summaryrefslogtreecommitdiffstats
path: root/contrib/llvm/tools
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm/tools')
-rw-r--r--contrib/llvm/tools/bugpoint/BugDriver.cpp247
-rw-r--r--contrib/llvm/tools/bugpoint/BugDriver.h330
-rw-r--r--contrib/llvm/tools/bugpoint/CrashDebugger.cpp672
-rw-r--r--contrib/llvm/tools/bugpoint/ExecutionDriver.cpp472
-rw-r--r--contrib/llvm/tools/bugpoint/ExtractFunction.cpp418
-rw-r--r--contrib/llvm/tools/bugpoint/FindBugs.cpp113
-rw-r--r--contrib/llvm/tools/bugpoint/ListReducer.h201
-rw-r--r--contrib/llvm/tools/bugpoint/Miscompilation.cpp1079
-rw-r--r--contrib/llvm/tools/bugpoint/OptimizerDriver.cpp267
-rw-r--r--contrib/llvm/tools/bugpoint/ToolRunner.cpp890
-rw-r--r--contrib/llvm/tools/bugpoint/ToolRunner.h248
-rw-r--r--contrib/llvm/tools/bugpoint/bugpoint.cpp210
-rw-r--r--contrib/llvm/tools/clang/LICENSE.TXT63
-rw-r--r--contrib/llvm/tools/clang/include/clang-c/Index.h4748
-rw-r--r--contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h122
-rw-r--r--contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h77
-rw-r--r--contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h80
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/APValue.h446
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/AST.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h128
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTContext.h1998
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h50
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h278
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h84
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ASTVector.h397
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Attr.h254
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h87
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def224
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h370
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h778
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/CharUnits.h217
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Decl.h3343
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclAccessPair.h72
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclBase.h1637
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h2938
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h223
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h198
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h151
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h84
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h1988
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h2106
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h54
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h580
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h192
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h83
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Expr.h4567
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h3638
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h1541
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h522
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/GlobalDecl.h124
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h36
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Mangle.h152
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NSAPI.h152
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h481
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h345
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/ParentMap.h62
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h146
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h228
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h2241
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h181
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h83
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Stmt.h1705
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h295
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtGraphTraits.h83
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h230
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h381
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h189
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h657
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TemplateName.h558
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/Type.h4992
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h1817
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLocNodes.def41
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeLocVisitor.h62
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def127
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeOrdering.h77
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h53
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h186
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/VTTBuilder.h176
-rw-r--r--contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h357
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/CFGReachabilityAnalysis.h49
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h212
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h657
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h120
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h111
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PseudoConstantAnalysis.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h56
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h159
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h53
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h432
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CFG.h938
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CFGStmtMap.h52
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h257
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h42
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h343
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h172
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h490
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Support/BlkExprDeclBitVector.h307
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h244
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h103
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h59
-rw-r--r--contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h175
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ABI.h126
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h44
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Attr.td716
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h33
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.def836
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Builtins.h163
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def52
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def689
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def209
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPTX.def62
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def633
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h166
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td77
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DelayedCleanupPool.h110
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h1207
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td98
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td211
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td12
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCategories.h26
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCategories.td10
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td109
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td138
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td134
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td415
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h279
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td503
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td718
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td5489
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td60
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h40
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h25
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileManager.h234
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h103
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h809
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LLVM.h73
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Lambda.h38
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def171
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h122
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Linkage.h68
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h46
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Module.h284
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h486
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OpenCLExtensions.def32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.def106
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h352
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h37
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h426
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h1402
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h130
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h173
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td169
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h110
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h680
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def596
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h70
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h95
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Version.h78
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h123
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/Visibility.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td395
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/BackendUtil.h40
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h103
-rw-r--r--contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h46
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Action.h254
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Arg.h122
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ArgList.h426
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td91
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td847
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Compilation.h163
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Driver.h418
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Job.h122
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h49
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptParser.td138
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptSpecifier.h39
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/OptTable.h186
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Option.h318
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Options.td968
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Phases.h32
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Tool.h75
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h257
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.def93
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Types.h96
-rw-r--r--contrib/llvm/tools/clang/include/clang/Driver/Util.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/Commit.h140
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h87
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h65
-rw-r--r--contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h33
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h57
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h801
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def65
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h137
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h73
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/ChainedIncludesSource.h75
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h231
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h87
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h664
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h221
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h56
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h108
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h149
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h277
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h218
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h209
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h23
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h124
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h92
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def120
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h61
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h79
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h31
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h64
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h224
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h37
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h62
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h120
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticBuffer.h54
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h59
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/Utils.h108
-rw-r--r--contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h97
-rw-r--r--contrib/llvm/tools/clang/include/clang/FrontendTool/Utils.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h173
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/Analyzer.h56
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/DeclReferenceMap.h50
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/Entity.h149
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/GlobalCallGraph.h149
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/GlobalSelector.h100
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/Handlers.h82
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/IndexProvider.h38
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/Indexer.h71
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/Program.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/STLExtras.h63
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/SelectorMap.h57
-rw-r--r--contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h41
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h71
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h170
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h40
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h72
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h562
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Lexer.h563
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h239
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h305
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h65
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h237
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h130
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h385
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h105
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h140
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Pragma.h126
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h637
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h1308
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h180
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/ScratchBuffer.h45
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/Token.h299
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/TokenConcatenation.h72
-rw-r--r--contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h187
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h49
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Parse/Parser.h2224
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/DeltaTree.h48
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h130
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h78
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/HTMLRewrite.h81
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h231
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h288
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h79
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/AnalysisBasedWarnings.h102
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h555
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/CXXFieldCollector.h79
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h992
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h1984
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h220
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Designator.h218
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h183
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h221
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Initialization.h999
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h63
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Lookup.h715
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ObjCMethodList.h38
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Overload.h813
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Ownership.h469
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h215
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h47
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Scope.h334
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h380
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Sema.h6798
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h49
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h91
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/SemaInternal.h30
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Template.h491
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h135
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h256
-rw-r--r--contrib/llvm/tools/clang/include/clang/Sema/Weak.h46
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h1277
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h60
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h1519
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h737
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h130
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/Module.h358
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h158
-rw-r--r--contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td39
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/ClangCheckers.h22
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h24
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h28
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h453
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h243
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h67
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h679
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h441
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h594
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerOptInfo.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h134
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h46
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h220
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h199
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h62
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h239
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h81
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h541
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h139
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h480
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h496
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h107
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h1230
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h293
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h796
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h197
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h43
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h320
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h517
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h304
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h51
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h130
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SummaryManager.h61
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h668
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h40
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h27
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h102
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h33
-rw-r--r--contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h35
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h164
-rw-r--r--contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h213
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp626
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp60
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp293
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h170
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp226
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp195
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp109
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp434
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp150
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp258
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp358
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp84
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp411
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp303
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp336
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp228
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp731
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp542
-rw-r--r--contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h207
-rw-r--r--contrib/llvm/tools/clang/lib/AST/APValue.cpp607
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp26
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTContext.cpp6768
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp331
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp4676
-rw-r--r--contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp26
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXABI.h48
-rw-r--r--contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp718
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Decl.cpp3074
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclBase.cpp1441
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp2029
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp48
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp1326
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp1072
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp872
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp627
-rw-r--r--contrib/llvm/tools/clang/lib/AST/DumpXML.cpp1040
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Expr.cpp3903
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp1335
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp644
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp6894
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp59
-rw-r--r--contrib/llvm/tools/clang/lib/AST/InheritViz.cpp168
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp73
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp3576
-rw-r--r--contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp30
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Mangle.cpp142
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp71
-rw-r--r--contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp1191
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NSAPI.cpp312
-rw-r--r--contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp633
-rw-r--r--contrib/llvm/tools/clang/lib/AST/ParentMap.cpp130
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp89
-rw-r--r--contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp2488
-rw-r--r--contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp128
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Stmt.cpp865
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp763
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp155
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp1885
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp1178
-rw-r--r--contrib/llvm/tools/clang/lib/AST/StmtViz.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp628
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TemplateName.cpp176
-rw-r--r--contrib/llvm/tools/clang/lib/AST/Type.cpp2246
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp332
-rw-r--r--contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp1232
-rw-r--r--contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp212
-rw-r--r--contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp2405
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp463
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFG.cpp3972
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp91
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp184
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp138
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp688
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h74
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp607
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp679
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp49
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp227
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp331
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp499
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp1726
-rw-r--r--contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp724
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Builtins.cpp120
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c564
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp878
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp697
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileManager.cpp600
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp122
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp524
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Module.cpp274
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp138
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp1896
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp490
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Targets.cpp4205
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/TokenKinds.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/Version.cpp146
-rw-r--r--contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp36
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h181
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp460
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp2042
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h229
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h28
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp4524
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp126
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h54
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp392
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp199
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h262
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp2145
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCall.h306
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp1836
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp1103
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h539
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp2668
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h322
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp1560
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp464
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp1595
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp3249
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp1343
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp1830
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp839
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp1500
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp2857
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp2974
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp2671
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp6373
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp374
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h286
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp28
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.h46
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp1015
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h281
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp1170
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp1676
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp192
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp733
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h141
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CGValue.h451
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp448
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp1144
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h2701
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp2667
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h986
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp185
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h80
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp676
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h254
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp1202
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp95
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp127
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp3694
-rw-r--r--contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h170
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Action.cpp122
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Arg.cpp121
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ArgList.cpp333
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Compilation.cpp232
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Driver.cpp1791
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp37
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/InputInfo.h88
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Job.cpp42
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/OptTable.cpp384
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Option.cpp280
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Phases.cpp27
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tool.cpp21
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp288
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp2335
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/ToolChains.h596
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.cpp5588
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Tools.h605
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/Types.cpp254
-rw-r--r--contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp368
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/Commit.cpp345
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp329
-rw-r--r--contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp587
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp422
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp109
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp2773
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp653
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp240
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp1097
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp2242
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp91
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp231
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp140
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp386
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp468
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp572
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp126
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp683
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp763
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp43
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp206
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp177
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp276
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp628
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp592
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp881
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp60
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp178
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp557
-rw-r--r--contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp191
-rw-r--r--contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp191
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/altivec.h11856
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avx2intrin.h961
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/avxintrin.h1235
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h75
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/bmiintrin.h115
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/cpuid.h33
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/emmintrin.h1424
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/float.h124
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/fma4intrin.h231
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/immintrin.h75
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/iso646.h43
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/limits.h117
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h55
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mm3dnow.h161
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mm_malloc.h75
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/mmintrin.h503
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/module.map108
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/nmmintrin.h35
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/pmmintrin.h117
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/popcntintrin.h45
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/smmintrin.h467
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdalign.h30
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdarg.h50
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdbool.h44
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stddef.h64
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/stdint.h661
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tgmath.h1374
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/tmmintrin.h225
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/unwind.h124
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/varargs.h26
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/wmmintrin.h67
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/x86intrin.h55
-rw-r--r--contrib/llvm/tools/clang/lib/Headers/xmmintrin.h990
-rw-r--r--contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp114
-rw-r--r--contrib/llvm/tools/clang/lib/Index/ASTVisitor.h143
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Analyzer.cpp470
-rw-r--r--contrib/llvm/tools/clang/lib/Index/DeclReferenceMap.cpp90
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Entity.cpp270
-rw-r--r--contrib/llvm/tools/clang/lib/Index/EntityImpl.h71
-rw-r--r--contrib/llvm/tools/clang/lib/Index/GlobalCallGraph.cpp152
-rw-r--r--contrib/llvm/tools/clang/lib/Index/GlobalSelector.cpp71
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Handlers.cpp22
-rw-r--r--contrib/llvm/tools/clang/lib/Index/IndexProvider.cpp20
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Indexer.cpp121
-rw-r--r--contrib/llvm/tools/clang/lib/Index/Program.cpp50
-rw-r--r--contrib/llvm/tools/clang/lib/Index/ProgramImpl.h56
-rw-r--r--contrib/llvm/tools/clang/lib/Index/SelectorMap.cpp84
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp228
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp1035
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Lexer.cpp3234
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp1400
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp317
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroArgs.h125
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp1437
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp118
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp2075
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp786
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp494
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp1156
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp710
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Pragma.cpp1292
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp519
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp666
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp73
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp272
-rw-r--r--contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp756
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp119
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp685
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp4838
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp3013
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp2433
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp2848
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp547
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp2846
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp568
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParsePragma.h127
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp2235
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp1292
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp1444
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/Parser.cpp1700
-rw-r--r--contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h142
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp467
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp205
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp183
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp94
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp576
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp217
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp7275
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp6018
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp806
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp39
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp414
-rw-r--r--contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp99
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp1016
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp126
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp641
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp986
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp56
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp444
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp770
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Scope.cpp71
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/Sema.cpp1101
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp1843
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp426
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp958
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp2112
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp5186
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp7178
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp10461
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp4171
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp11095
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp3121
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp729
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp11289
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp5315
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp1625
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp3049
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp204
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp6167
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp820
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp4090
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp1953
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp11227
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp1351
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp2654
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp7188
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp4496
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp2556
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp3411
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp794
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/SemaType.cpp4514
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp278
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.h27
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TreeTransform.h9220
-rw-r--r--contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h201
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp77
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h63
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp6369
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp2474
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h248
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp2214
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp4548
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp1725
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp1659
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/Module.cpp115
-rw-r--r--contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp254
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp140
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp318
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp134
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp672
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp157
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp82
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp1981
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp191
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp385
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp74
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp291
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp146
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp786
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp92
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp233
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td487
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp158
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp32
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h37
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp18
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp386
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp146
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp216
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp67
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp740
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp747
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h22
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp603
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp314
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp681
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp116
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp1463
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp267
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp211
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp89
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp334
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp146
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp218
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp96
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp174
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp159
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp381
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp186
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp76
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp198
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp3702
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp91
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp65
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp230
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp475
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp62
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp112
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp105
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp91
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp55
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp88
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp353
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp247
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp162
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp241
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp100
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp367
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp291
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp86
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp2056
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp784
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp31
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp83
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp80
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp678
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp150
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp689
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp295
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp405
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp2075
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp811
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp300
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp482
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp273
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp38
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp578
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp1101
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp90
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp755
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp513
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp709
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp442
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp2009
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp386
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp331
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp307
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h101
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp973
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp362
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp14
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp540
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp69
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp681
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h43
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp133
-rw-r--r--contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp23
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp230
-rw-r--r--contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp296
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1_main.cpp189
-rw-r--r--contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp451
-rw-r--r--contrib/llvm/tools/clang/tools/driver/driver.cpp490
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp168
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.h84
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp1092
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h153
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp385
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.h54
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp319
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.h31
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp1574
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h210
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp194
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.h34
-rw-r--r--contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp194
-rw-r--r--contrib/llvm/tools/llc/llc.cpp547
-rw-r--r--contrib/llvm/tools/lli/lli.cpp315
-rw-r--r--contrib/llvm/tools/llvm-ar/llvm-ar.cpp781
-rw-r--r--contrib/llvm/tools/llvm-as/llvm-as.cpp119
-rw-r--r--contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp626
-rw-r--r--contrib/llvm/tools/llvm-diff/DiffConsumer.cpp215
-rw-r--r--contrib/llvm/tools/llvm-diff/DiffConsumer.h93
-rw-r--r--contrib/llvm/tools/llvm-diff/DiffLog.cpp53
-rw-r--r--contrib/llvm/tools/llvm-diff/DiffLog.h80
-rw-r--r--contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp683
-rw-r--r--contrib/llvm/tools/llvm-diff/DifferenceEngine.h93
-rw-r--r--contrib/llvm/tools/llvm-diff/llvm-diff.cpp98
-rw-r--r--contrib/llvm/tools/llvm-dis/llvm-dis.cpp193
-rw-r--r--contrib/llvm/tools/llvm-extract/llvm-extract.cpp238
-rw-r--r--contrib/llvm/tools/llvm-ld/Optimize.cpp130
-rw-r--r--contrib/llvm/tools/llvm-ld/llvm-ld.cpp732
-rw-r--r--contrib/llvm/tools/llvm-link/llvm-link.cpp142
-rw-r--r--contrib/llvm/tools/llvm-mc/Disassembler.cpp382
-rw-r--r--contrib/llvm/tools/llvm-mc/Disassembler.h42
-rw-r--r--contrib/llvm/tools/llvm-mc/llvm-mc.cpp532
-rw-r--r--contrib/llvm/tools/llvm-nm/llvm-nm.cpp412
-rw-r--r--contrib/llvm/tools/llvm-objdump/MCFunction.cpp138
-rw-r--r--contrib/llvm/tools/llvm-objdump/MCFunction.h100
-rw-r--r--contrib/llvm/tools/llvm-objdump/MachODump.cpp639
-rw-r--r--contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp657
-rw-r--r--contrib/llvm/tools/llvm-objdump/llvm-objdump.h46
-rw-r--r--contrib/llvm/tools/llvm-prof/llvm-prof.cpp293
-rw-r--r--contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp101
-rw-r--r--contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp218
-rw-r--r--contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp156
-rw-r--r--contrib/llvm/tools/llvm-stress/llvm-stress.cpp702
-rw-r--r--contrib/llvm/tools/llvm-stub/llvm-stub.c77
-rw-r--r--contrib/llvm/tools/macho-dump/macho-dump.cpp400
-rw-r--r--contrib/llvm/tools/opt/AnalysisWrappers.cpp94
-rw-r--r--contrib/llvm/tools/opt/GraphPrinters.cpp118
-rw-r--r--contrib/llvm/tools/opt/PrintSCC.cpp112
-rw-r--r--contrib/llvm/tools/opt/opt.cpp716
889 files changed, 609969 insertions, 0 deletions
diff --git a/contrib/llvm/tools/bugpoint/BugDriver.cpp b/contrib/llvm/tools/bugpoint/BugDriver.cpp
new file mode 100644
index 0000000..6b219bf
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/BugDriver.cpp
@@ -0,0 +1,247 @@
+//===- BugDriver.cpp - Top-Level BugPoint class implementation ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class contains all of the shared state and information that is used by
+// the BugPoint tool to track down errors in optimizations. This class is the
+// main driver class that invokes all sub-functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BugDriver.h"
+#include "ToolRunner.h"
+#include "llvm/Linker.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Host.h"
+#include <memory>
+using namespace llvm;
+
+namespace llvm {
+ Triple TargetTriple;
+}
+
+// Anonymous namespace to define command line options for debugging.
+//
+namespace {
+ // Output - The user can specify a file containing the expected output of the
+ // program. If this filename is set, it is used as the reference diff source,
+ // otherwise the raw input run through an interpreter is used as the reference
+ // source.
+ //
+ cl::opt<std::string>
+ OutputFile("output", cl::desc("Specify a reference program output "
+ "(for miscompilation detection)"));
+}
+
+/// setNewProgram - If we reduce or update the program somehow, call this method
+/// to update bugdriver with it. This deletes the old module and sets the
+/// specified one as the current program.
+void BugDriver::setNewProgram(Module *M) {
+ delete Program;
+ Program = M;
+}
+
+
+/// getPassesString - Turn a list of passes into a string which indicates the
+/// command line options that must be passed to add the passes.
+///
+std::string llvm::getPassesString(const std::vector<std::string> &Passes) {
+ std::string Result;
+ for (unsigned i = 0, e = Passes.size(); i != e; ++i) {
+ if (i) Result += " ";
+ Result += "-";
+ Result += Passes[i];
+ }
+ return Result;
+}
+
+BugDriver::BugDriver(const char *toolname, bool find_bugs,
+ unsigned timeout, unsigned memlimit, bool use_valgrind,
+ LLVMContext& ctxt)
+ : Context(ctxt), ToolName(toolname), ReferenceOutputFile(OutputFile),
+ Program(0), Interpreter(0), SafeInterpreter(0), gcc(0),
+ run_find_bugs(find_bugs), Timeout(timeout),
+ MemoryLimit(memlimit), UseValgrind(use_valgrind) {}
+
+BugDriver::~BugDriver() {
+ delete Program;
+}
+
+
+/// ParseInputFile - Given a bitcode or assembly input filename, parse and
+/// return it, or return null if not possible.
+///
+Module *llvm::ParseInputFile(const std::string &Filename,
+ LLVMContext& Ctxt) {
+ SMDiagnostic Err;
+ Module *Result = ParseIRFile(Filename, Err, Ctxt);
+ if (!Result)
+ Err.print("bugpoint", errs());
+
+ // If we don't have an override triple, use the first one to configure
+ // bugpoint, or use the host triple if none provided.
+ if (Result) {
+ if (TargetTriple.getTriple().empty()) {
+ Triple TheTriple(Result->getTargetTriple());
+
+ if (TheTriple.getTriple().empty())
+ TheTriple.setTriple(sys::getDefaultTargetTriple());
+
+ TargetTriple.setTriple(TheTriple.getTriple());
+ }
+
+ Result->setTargetTriple(TargetTriple.getTriple()); // override the triple
+ }
+ return Result;
+}
+
+// This method takes the specified list of LLVM input files, attempts to load
+// them, either as assembly or bitcode, then link them together. It returns
+// true on failure (if, for example, an input bitcode file could not be
+// parsed), and false on success.
+//
+bool BugDriver::addSources(const std::vector<std::string> &Filenames) {
+ assert(Program == 0 && "Cannot call addSources multiple times!");
+ assert(!Filenames.empty() && "Must specify at least on input filename!");
+
+ // Load the first input file.
+ Program = ParseInputFile(Filenames[0], Context);
+ if (Program == 0) return true;
+
+ outs() << "Read input file : '" << Filenames[0] << "'\n";
+
+ for (unsigned i = 1, e = Filenames.size(); i != e; ++i) {
+ std::auto_ptr<Module> M(ParseInputFile(Filenames[i], Context));
+ if (M.get() == 0) return true;
+
+ outs() << "Linking in input file: '" << Filenames[i] << "'\n";
+ std::string ErrorMessage;
+ if (Linker::LinkModules(Program, M.get(), Linker::DestroySource,
+ &ErrorMessage)) {
+ errs() << ToolName << ": error linking in '" << Filenames[i] << "': "
+ << ErrorMessage << '\n';
+ return true;
+ }
+ }
+
+ outs() << "*** All input ok\n";
+
+ // All input files read successfully!
+ return false;
+}
+
+
+
+/// run - The top level method that is invoked after all of the instance
+/// variables are set up from command line arguments.
+///
+bool BugDriver::run(std::string &ErrMsg) {
+ if (run_find_bugs) {
+ // Rearrange the passes and apply them to the program. Repeat this process
+ // until the user kills the program or we find a bug.
+ return runManyPasses(PassesToRun, ErrMsg);
+ }
+
+ // If we're not running as a child, the first thing that we must do is
+ // determine what the problem is. Does the optimization series crash the
+ // compiler, or does it produce illegal code? We make the top-level
+ // decision by trying to run all of the passes on the the input program,
+ // which should generate a bitcode file. If it does generate a bitcode
+ // file, then we know the compiler didn't crash, so try to diagnose a
+ // miscompilation.
+ if (!PassesToRun.empty()) {
+ outs() << "Running selected passes on program to test for crash: ";
+ if (runPasses(Program, PassesToRun))
+ return debugOptimizerCrash();
+ }
+
+ // Set up the execution environment, selecting a method to run LLVM bitcode.
+ if (initializeExecutionEnvironment()) return true;
+
+ // Test to see if we have a code generator crash.
+ outs() << "Running the code generator to test for a crash: ";
+ std::string Error;
+ compileProgram(Program, &Error);
+ if (!Error.empty()) {
+ outs() << Error;
+ return debugCodeGeneratorCrash(ErrMsg);
+ }
+ outs() << '\n';
+
+ // Run the raw input to see where we are coming from. If a reference output
+ // was specified, make sure that the raw output matches it. If not, it's a
+ // problem in the front-end or the code generator.
+ //
+ bool CreatedOutput = false;
+ if (ReferenceOutputFile.empty()) {
+ outs() << "Generating reference output from raw program: ";
+ if (!createReferenceFile(Program)) {
+ return debugCodeGeneratorCrash(ErrMsg);
+ }
+ CreatedOutput = true;
+ }
+
+ // Make sure the reference output file gets deleted on exit from this
+ // function, if appropriate.
+ sys::Path ROF(ReferenceOutputFile);
+ FileRemover RemoverInstance(ROF.str(), CreatedOutput && !SaveTemps);
+
+ // Diff the output of the raw program against the reference output. If it
+ // matches, then we assume there is a miscompilation bug and try to
+ // diagnose it.
+ outs() << "*** Checking the code generator...\n";
+ bool Diff = diffProgram(Program, "", "", false, &Error);
+ if (!Error.empty()) {
+ errs() << Error;
+ return debugCodeGeneratorCrash(ErrMsg);
+ }
+ if (!Diff) {
+ outs() << "\n*** Output matches: Debugging miscompilation!\n";
+ debugMiscompilation(&Error);
+ if (!Error.empty()) {
+ errs() << Error;
+ return debugCodeGeneratorCrash(ErrMsg);
+ }
+ return false;
+ }
+
+ outs() << "\n*** Input program does not match reference diff!\n";
+ outs() << "Debugging code generator problem!\n";
+ bool Failure = debugCodeGenerator(&Error);
+ if (!Error.empty()) {
+ errs() << Error;
+ return debugCodeGeneratorCrash(ErrMsg);
+ }
+ return Failure;
+}
+
+void llvm::PrintFunctionList(const std::vector<Function*> &Funcs) {
+ unsigned NumPrint = Funcs.size();
+ if (NumPrint > 10) NumPrint = 10;
+ for (unsigned i = 0; i != NumPrint; ++i)
+ outs() << " " << Funcs[i]->getName();
+ if (NumPrint < Funcs.size())
+ outs() << "... <" << Funcs.size() << " total>";
+ outs().flush();
+}
+
+void llvm::PrintGlobalVariableList(const std::vector<GlobalVariable*> &GVs) {
+ unsigned NumPrint = GVs.size();
+ if (NumPrint > 10) NumPrint = 10;
+ for (unsigned i = 0; i != NumPrint; ++i)
+ outs() << " " << GVs[i]->getName();
+ if (NumPrint < GVs.size())
+ outs() << "... <" << GVs.size() << " total>";
+ outs().flush();
+}
diff --git a/contrib/llvm/tools/bugpoint/BugDriver.h b/contrib/llvm/tools/bugpoint/BugDriver.h
new file mode 100644
index 0000000..cc78489
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/BugDriver.h
@@ -0,0 +1,330 @@
+//===- BugDriver.h - Top-Level BugPoint class -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class contains all of the shared state and information that is used by
+// the BugPoint tool to track down errors in optimizations. This class is the
+// main driver class that invokes all sub-functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef BUGDRIVER_H
+#define BUGDRIVER_H
+
+#include "llvm/ADT/ValueMap.h"
+#include "llvm/Transforms/Utils/ValueMapper.h"
+#include <vector>
+#include <string>
+
+namespace llvm {
+
+class Value;
+class PassInfo;
+class Module;
+class GlobalVariable;
+class Function;
+class BasicBlock;
+class AbstractInterpreter;
+class Instruction;
+class LLVMContext;
+
+class DebugCrashes;
+
+class GCC;
+
+extern bool DisableSimplifyCFG;
+
+/// BugpointIsInterrupted - Set to true when the user presses ctrl-c.
+///
+extern bool BugpointIsInterrupted;
+
+class BugDriver {
+ LLVMContext& Context;
+ const char *ToolName; // argv[0] of bugpoint
+ std::string ReferenceOutputFile; // Name of `good' output file
+ Module *Program; // The raw program, linked together
+ std::vector<std::string> PassesToRun;
+ AbstractInterpreter *Interpreter; // How to run the program
+ AbstractInterpreter *SafeInterpreter; // To generate reference output, etc.
+ GCC *gcc;
+ bool run_find_bugs;
+ unsigned Timeout;
+ unsigned MemoryLimit;
+ bool UseValgrind;
+
+ // FIXME: sort out public/private distinctions...
+ friend class ReducePassList;
+ friend class ReduceMisCodegenFunctions;
+
+public:
+ BugDriver(const char *toolname, bool find_bugs,
+ unsigned timeout, unsigned memlimit, bool use_valgrind,
+ LLVMContext& ctxt);
+ ~BugDriver();
+
+ const char *getToolName() const { return ToolName; }
+
+ LLVMContext& getContext() const { return Context; }
+
+ // Set up methods... these methods are used to copy information about the
+ // command line arguments into instance variables of BugDriver.
+ //
+ bool addSources(const std::vector<std::string> &FileNames);
+ void addPass(std::string p) { PassesToRun.push_back(p); }
+ void setPassesToRun(const std::vector<std::string> &PTR) {
+ PassesToRun = PTR;
+ }
+ const std::vector<std::string> &getPassesToRun() const {
+ return PassesToRun;
+ }
+
+ /// run - The top level method that is invoked after all of the instance
+ /// variables are set up from command line arguments. The \p as_child argument
+ /// indicates whether the driver is to run in parent mode or child mode.
+ ///
+ bool run(std::string &ErrMsg);
+
+ /// debugOptimizerCrash - This method is called when some optimizer pass
+ /// crashes on input. It attempts to prune down the testcase to something
+ /// reasonable, and figure out exactly which pass is crashing.
+ ///
+ bool debugOptimizerCrash(const std::string &ID = "passes");
+
+ /// debugCodeGeneratorCrash - This method is called when the code generator
+ /// crashes on an input. It attempts to reduce the input as much as possible
+ /// while still causing the code generator to crash.
+ bool debugCodeGeneratorCrash(std::string &Error);
+
+ /// debugMiscompilation - This method is used when the passes selected are not
+ /// crashing, but the generated output is semantically different from the
+ /// input.
+ void debugMiscompilation(std::string *Error);
+
+ /// debugPassMiscompilation - This method is called when the specified pass
+ /// miscompiles Program as input. It tries to reduce the testcase to
+ /// something that smaller that still miscompiles the program.
+ /// ReferenceOutput contains the filename of the file containing the output we
+ /// are to match.
+ ///
+ bool debugPassMiscompilation(const PassInfo *ThePass,
+ const std::string &ReferenceOutput);
+
+ /// compileSharedObject - This method creates a SharedObject from a given
+ /// BitcodeFile for debugging a code generator.
+ ///
+ std::string compileSharedObject(const std::string &BitcodeFile,
+ std::string &Error);
+
+ /// debugCodeGenerator - This method narrows down a module to a function or
+ /// set of functions, using the CBE as a ``safe'' code generator for other
+ /// functions that are not under consideration.
+ bool debugCodeGenerator(std::string *Error);
+
+ /// isExecutingJIT - Returns true if bugpoint is currently testing the JIT
+ ///
+ bool isExecutingJIT();
+
+ /// runPasses - Run all of the passes in the "PassesToRun" list, discard the
+ /// output, and return true if any of the passes crashed.
+ bool runPasses(Module *M) const {
+ return runPasses(M, PassesToRun);
+ }
+
+ Module *getProgram() const { return Program; }
+
+ /// swapProgramIn - Set the current module to the specified module, returning
+ /// the old one.
+ Module *swapProgramIn(Module *M) {
+ Module *OldProgram = Program;
+ Program = M;
+ return OldProgram;
+ }
+
+ AbstractInterpreter *switchToSafeInterpreter() {
+ AbstractInterpreter *Old = Interpreter;
+ Interpreter = (AbstractInterpreter*)SafeInterpreter;
+ return Old;
+ }
+
+ void switchToInterpreter(AbstractInterpreter *AI) {
+ Interpreter = AI;
+ }
+
+ /// setNewProgram - If we reduce or update the program somehow, call this
+ /// method to update bugdriver with it. This deletes the old module and sets
+ /// the specified one as the current program.
+ void setNewProgram(Module *M);
+
+ /// compileProgram - Try to compile the specified module, returning false and
+ /// setting Error if an error occurs. This is used for code generation
+ /// crash testing.
+ ///
+ void compileProgram(Module *M, std::string *Error) const;
+
+ /// executeProgram - This method runs "Program", capturing the output of the
+ /// program to a file. A recommended filename may be optionally specified.
+ ///
+ std::string executeProgram(const Module *Program,
+ std::string OutputFilename,
+ std::string Bitcode,
+ const std::string &SharedObjects,
+ AbstractInterpreter *AI,
+ std::string *Error) const;
+
+ /// executeProgramSafely - Used to create reference output with the "safe"
+ /// backend, if reference output is not provided. If there is a problem with
+ /// the code generator (e.g., llc crashes), this will return false and set
+ /// Error.
+ ///
+ std::string executeProgramSafely(const Module *Program,
+ std::string OutputFile,
+ std::string *Error) const;
+
+ /// createReferenceFile - calls compileProgram and then records the output
+ /// into ReferenceOutputFile. Returns true if reference file created, false
+ /// otherwise. Note: initializeExecutionEnvironment should be called BEFORE
+ /// this function.
+ ///
+ bool createReferenceFile(Module *M, const std::string &Filename
+ = "bugpoint.reference.out");
+
+ /// diffProgram - This method executes the specified module and diffs the
+ /// output against the file specified by ReferenceOutputFile. If the output
+ /// is different, 1 is returned. If there is a problem with the code
+ /// generator (e.g., llc crashes), this will return -1 and set Error.
+ ///
+ bool diffProgram(const Module *Program,
+ const std::string &BitcodeFile = "",
+ const std::string &SharedObj = "",
+ bool RemoveBitcode = false,
+ std::string *Error = 0) const;
+
+ /// EmitProgressBitcode - This function is used to output M to a file named
+ /// "bugpoint-ID.bc".
+ ///
+ void EmitProgressBitcode(const Module *M, const std::string &ID,
+ bool NoFlyer = false) const;
+
+ /// deleteInstructionFromProgram - This method clones the current Program and
+ /// deletes the specified instruction from the cloned module. It then runs a
+ /// series of cleanup passes (ADCE and SimplifyCFG) to eliminate any code
+ /// which depends on the value. The modified module is then returned.
+ ///
+ Module *deleteInstructionFromProgram(const Instruction *I, unsigned Simp);
+
+ /// performFinalCleanups - This method clones the current Program and performs
+ /// a series of cleanups intended to get rid of extra cruft on the module. If
+ /// the MayModifySemantics argument is true, then the cleanups is allowed to
+ /// modify how the code behaves.
+ ///
+ Module *performFinalCleanups(Module *M, bool MayModifySemantics = false);
+
+ /// ExtractLoop - Given a module, extract up to one loop from it into a new
+ /// function. This returns null if there are no extractable loops in the
+ /// program or if the loop extractor crashes.
+ Module *ExtractLoop(Module *M);
+
+ /// ExtractMappedBlocksFromModule - Extract all but the specified basic blocks
+ /// into their own functions. The only detail is that M is actually a module
+ /// cloned from the one the BBs are in, so some mapping needs to be performed.
+ /// If this operation fails for some reason (ie the implementation is buggy),
+ /// this function should return null, otherwise it returns a new Module.
+ Module *ExtractMappedBlocksFromModule(const std::vector<BasicBlock*> &BBs,
+ Module *M);
+
+ /// runPassesOn - Carefully run the specified set of pass on the specified
+ /// module, returning the transformed module on success, or a null pointer on
+ /// failure. If AutoDebugCrashes is set to true, then bugpoint will
+ /// automatically attempt to track down a crashing pass if one exists, and
+ /// this method will never return null.
+ Module *runPassesOn(Module *M, const std::vector<std::string> &Passes,
+ bool AutoDebugCrashes = false, unsigned NumExtraArgs = 0,
+ const char * const *ExtraArgs = NULL);
+
+ /// runPasses - Run the specified passes on Program, outputting a bitcode
+ /// file and writting the filename into OutputFile if successful. If the
+ /// optimizations fail for some reason (optimizer crashes), return true,
+ /// otherwise return false. If DeleteOutput is set to true, the bitcode is
+ /// deleted on success, and the filename string is undefined. This prints to
+ /// outs() a single line message indicating whether compilation was successful
+ /// or failed, unless Quiet is set. ExtraArgs specifies additional arguments
+ /// to pass to the child bugpoint instance.
+ ///
+ bool runPasses(Module *Program,
+ const std::vector<std::string> &PassesToRun,
+ std::string &OutputFilename, bool DeleteOutput = false,
+ bool Quiet = false, unsigned NumExtraArgs = 0,
+ const char * const *ExtraArgs = NULL) const;
+
+ /// runManyPasses - Take the specified pass list and create different
+ /// combinations of passes to compile the program with. Compile the program with
+ /// each set and mark test to see if it compiled correctly. If the passes
+ /// compiled correctly output nothing and rearrange the passes into a new order.
+ /// If the passes did not compile correctly, output the command required to
+ /// recreate the failure. This returns true if a compiler error is found.
+ ///
+ bool runManyPasses(const std::vector<std::string> &AllPasses,
+ std::string &ErrMsg);
+
+ /// writeProgramToFile - This writes the current "Program" to the named
+ /// bitcode file. If an error occurs, true is returned.
+ ///
+ bool writeProgramToFile(const std::string &Filename, const Module *M) const;
+
+private:
+ /// runPasses - Just like the method above, but this just returns true or
+ /// false indicating whether or not the optimizer crashed on the specified
+ /// input (true = crashed).
+ ///
+ bool runPasses(Module *M,
+ const std::vector<std::string> &PassesToRun,
+ bool DeleteOutput = true) const {
+ std::string Filename;
+ return runPasses(M, PassesToRun, Filename, DeleteOutput);
+ }
+
+ /// initializeExecutionEnvironment - This method is used to set up the
+ /// environment for executing LLVM programs.
+ ///
+ bool initializeExecutionEnvironment();
+};
+
+/// ParseInputFile - Given a bitcode or assembly input filename, parse and
+/// return it, or return null if not possible.
+///
+Module *ParseInputFile(const std::string &InputFilename,
+ LLVMContext& ctxt);
+
+
+/// getPassesString - Turn a list of passes into a string which indicates the
+/// command line options that must be passed to add the passes.
+///
+std::string getPassesString(const std::vector<std::string> &Passes);
+
+/// PrintFunctionList - prints out list of problematic functions
+///
+void PrintFunctionList(const std::vector<Function*> &Funcs);
+
+/// PrintGlobalVariableList - prints out list of problematic global variables
+///
+void PrintGlobalVariableList(const std::vector<GlobalVariable*> &GVs);
+
+// DeleteFunctionBody - "Remove" the function by deleting all of it's basic
+// blocks, making it external.
+//
+void DeleteFunctionBody(Function *F);
+
+/// SplitFunctionsOutOfModule - Given a module and a list of functions in the
+/// module, split the functions OUT of the specified module, and place them in
+/// the new module.
+Module *SplitFunctionsOutOfModule(Module *M, const std::vector<Function*> &F,
+ ValueToValueMapTy &VMap);
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/bugpoint/CrashDebugger.cpp b/contrib/llvm/tools/bugpoint/CrashDebugger.cpp
new file mode 100644
index 0000000..aed16f4
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/CrashDebugger.cpp
@@ -0,0 +1,672 @@
+//===- CrashDebugger.cpp - Debug compilation crashes ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the bugpoint internals that narrow down compilation crashes
+//
+//===----------------------------------------------------------------------===//
+
+#include "BugDriver.h"
+#include "ToolRunner.h"
+#include "ListReducer.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/PassManager.h"
+#include "llvm/ValueSymbolTable.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/CommandLine.h"
+#include <set>
+using namespace llvm;
+
+namespace {
+ cl::opt<bool>
+ KeepMain("keep-main",
+ cl::desc("Force function reduction to keep main"),
+ cl::init(false));
+ cl::opt<bool>
+ NoGlobalRM ("disable-global-remove",
+ cl::desc("Do not remove global variables"),
+ cl::init(false));
+}
+
+namespace llvm {
+ class ReducePassList : public ListReducer<std::string> {
+ BugDriver &BD;
+ public:
+ ReducePassList(BugDriver &bd) : BD(bd) {}
+
+ // doTest - Return true iff running the "removed" passes succeeds, and
+ // running the "Kept" passes fail when run on the output of the "removed"
+ // passes. If we return true, we update the current module of bugpoint.
+ //
+ virtual TestResult doTest(std::vector<std::string> &Removed,
+ std::vector<std::string> &Kept,
+ std::string &Error);
+ };
+}
+
+ReducePassList::TestResult
+ReducePassList::doTest(std::vector<std::string> &Prefix,
+ std::vector<std::string> &Suffix,
+ std::string &Error) {
+ sys::Path PrefixOutput;
+ Module *OrigProgram = 0;
+ if (!Prefix.empty()) {
+ outs() << "Checking to see if these passes crash: "
+ << getPassesString(Prefix) << ": ";
+ std::string PfxOutput;
+ if (BD.runPasses(BD.getProgram(), Prefix, PfxOutput))
+ return KeepPrefix;
+
+ PrefixOutput.set(PfxOutput);
+ OrigProgram = BD.Program;
+
+ BD.Program = ParseInputFile(PrefixOutput.str(), BD.getContext());
+ if (BD.Program == 0) {
+ errs() << BD.getToolName() << ": Error reading bitcode file '"
+ << PrefixOutput.str() << "'!\n";
+ exit(1);
+ }
+ PrefixOutput.eraseFromDisk();
+ }
+
+ outs() << "Checking to see if these passes crash: "
+ << getPassesString(Suffix) << ": ";
+
+ if (BD.runPasses(BD.getProgram(), Suffix)) {
+ delete OrigProgram; // The suffix crashes alone...
+ return KeepSuffix;
+ }
+
+ // Nothing failed, restore state...
+ if (OrigProgram) {
+ delete BD.Program;
+ BD.Program = OrigProgram;
+ }
+ return NoFailure;
+}
+
+namespace {
+ /// ReduceCrashingGlobalVariables - This works by removing the global
+ /// variable's initializer and seeing if the program still crashes. If it
+ /// does, then we keep that program and try again.
+ ///
+ class ReduceCrashingGlobalVariables : public ListReducer<GlobalVariable*> {
+ BugDriver &BD;
+ bool (*TestFn)(const BugDriver &, Module *);
+ public:
+ ReduceCrashingGlobalVariables(BugDriver &bd,
+ bool (*testFn)(const BugDriver &, Module *))
+ : BD(bd), TestFn(testFn) {}
+
+ virtual TestResult doTest(std::vector<GlobalVariable*> &Prefix,
+ std::vector<GlobalVariable*> &Kept,
+ std::string &Error) {
+ if (!Kept.empty() && TestGlobalVariables(Kept))
+ return KeepSuffix;
+ if (!Prefix.empty() && TestGlobalVariables(Prefix))
+ return KeepPrefix;
+ return NoFailure;
+ }
+
+ bool TestGlobalVariables(std::vector<GlobalVariable*> &GVs);
+ };
+}
+
+bool
+ReduceCrashingGlobalVariables::TestGlobalVariables(
+ std::vector<GlobalVariable*> &GVs) {
+ // Clone the program to try hacking it apart...
+ ValueToValueMapTy VMap;
+ Module *M = CloneModule(BD.getProgram(), VMap);
+
+ // Convert list to set for fast lookup...
+ std::set<GlobalVariable*> GVSet;
+
+ for (unsigned i = 0, e = GVs.size(); i != e; ++i) {
+ GlobalVariable* CMGV = cast<GlobalVariable>(VMap[GVs[i]]);
+ assert(CMGV && "Global Variable not in module?!");
+ GVSet.insert(CMGV);
+ }
+
+ outs() << "Checking for crash with only these global variables: ";
+ PrintGlobalVariableList(GVs);
+ outs() << ": ";
+
+ // Loop over and delete any global variables which we aren't supposed to be
+ // playing with...
+ for (Module::global_iterator I = M->global_begin(), E = M->global_end();
+ I != E; ++I)
+ if (I->hasInitializer() && !GVSet.count(I)) {
+ I->setInitializer(0);
+ I->setLinkage(GlobalValue::ExternalLinkage);
+ }
+
+ // Try running the hacked up program...
+ if (TestFn(BD, M)) {
+ BD.setNewProgram(M); // It crashed, keep the trimmed version...
+
+ // Make sure to use global variable pointers that point into the now-current
+ // module.
+ GVs.assign(GVSet.begin(), GVSet.end());
+ return true;
+ }
+
+ delete M;
+ return false;
+}
+
+namespace {
+ /// ReduceCrashingFunctions reducer - This works by removing functions and
+ /// seeing if the program still crashes. If it does, then keep the newer,
+ /// smaller program.
+ ///
+ class ReduceCrashingFunctions : public ListReducer<Function*> {
+ BugDriver &BD;
+ bool (*TestFn)(const BugDriver &, Module *);
+ public:
+ ReduceCrashingFunctions(BugDriver &bd,
+ bool (*testFn)(const BugDriver &, Module *))
+ : BD(bd), TestFn(testFn) {}
+
+ virtual TestResult doTest(std::vector<Function*> &Prefix,
+ std::vector<Function*> &Kept,
+ std::string &Error) {
+ if (!Kept.empty() && TestFuncs(Kept))
+ return KeepSuffix;
+ if (!Prefix.empty() && TestFuncs(Prefix))
+ return KeepPrefix;
+ return NoFailure;
+ }
+
+ bool TestFuncs(std::vector<Function*> &Prefix);
+ };
+}
+
+bool ReduceCrashingFunctions::TestFuncs(std::vector<Function*> &Funcs) {
+
+ //if main isn't present, claim there is no problem
+ if (KeepMain && find(Funcs.begin(), Funcs.end(),
+ BD.getProgram()->getFunction("main")) == Funcs.end())
+ return false;
+
+ // Clone the program to try hacking it apart...
+ ValueToValueMapTy VMap;
+ Module *M = CloneModule(BD.getProgram(), VMap);
+
+ // Convert list to set for fast lookup...
+ std::set<Function*> Functions;
+ for (unsigned i = 0, e = Funcs.size(); i != e; ++i) {
+ Function *CMF = cast<Function>(VMap[Funcs[i]]);
+ assert(CMF && "Function not in module?!");
+ assert(CMF->getFunctionType() == Funcs[i]->getFunctionType() && "wrong ty");
+ assert(CMF->getName() == Funcs[i]->getName() && "wrong name");
+ Functions.insert(CMF);
+ }
+
+ outs() << "Checking for crash with only these functions: ";
+ PrintFunctionList(Funcs);
+ outs() << ": ";
+
+ // Loop over and delete any functions which we aren't supposed to be playing
+ // with...
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
+ if (!I->isDeclaration() && !Functions.count(I))
+ DeleteFunctionBody(I);
+
+ // Try running the hacked up program...
+ if (TestFn(BD, M)) {
+ BD.setNewProgram(M); // It crashed, keep the trimmed version...
+
+ // Make sure to use function pointers that point into the now-current
+ // module.
+ Funcs.assign(Functions.begin(), Functions.end());
+ return true;
+ }
+ delete M;
+ return false;
+}
+
+
+namespace {
+ /// ReduceCrashingBlocks reducer - This works by setting the terminators of
+ /// all terminators except the specified basic blocks to a 'ret' instruction,
+ /// then running the simplify-cfg pass. This has the effect of chopping up
+ /// the CFG really fast which can reduce large functions quickly.
+ ///
+ class ReduceCrashingBlocks : public ListReducer<const BasicBlock*> {
+ BugDriver &BD;
+ bool (*TestFn)(const BugDriver &, Module *);
+ public:
+ ReduceCrashingBlocks(BugDriver &bd,
+ bool (*testFn)(const BugDriver &, Module *))
+ : BD(bd), TestFn(testFn) {}
+
+ virtual TestResult doTest(std::vector<const BasicBlock*> &Prefix,
+ std::vector<const BasicBlock*> &Kept,
+ std::string &Error) {
+ if (!Kept.empty() && TestBlocks(Kept))
+ return KeepSuffix;
+ if (!Prefix.empty() && TestBlocks(Prefix))
+ return KeepPrefix;
+ return NoFailure;
+ }
+
+ bool TestBlocks(std::vector<const BasicBlock*> &Prefix);
+ };
+}
+
+bool ReduceCrashingBlocks::TestBlocks(std::vector<const BasicBlock*> &BBs) {
+ // Clone the program to try hacking it apart...
+ ValueToValueMapTy VMap;
+ Module *M = CloneModule(BD.getProgram(), VMap);
+
+ // Convert list to set for fast lookup...
+ SmallPtrSet<BasicBlock*, 8> Blocks;
+ for (unsigned i = 0, e = BBs.size(); i != e; ++i)
+ Blocks.insert(cast<BasicBlock>(VMap[BBs[i]]));
+
+ outs() << "Checking for crash with only these blocks:";
+ unsigned NumPrint = Blocks.size();
+ if (NumPrint > 10) NumPrint = 10;
+ for (unsigned i = 0, e = NumPrint; i != e; ++i)
+ outs() << " " << BBs[i]->getName();
+ if (NumPrint < Blocks.size())
+ outs() << "... <" << Blocks.size() << " total>";
+ outs() << ": ";
+
+ // Loop over and delete any hack up any blocks that are not listed...
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
+ for (Function::iterator BB = I->begin(), E = I->end(); BB != E; ++BB)
+ if (!Blocks.count(BB) && BB->getTerminator()->getNumSuccessors()) {
+ // Loop over all of the successors of this block, deleting any PHI nodes
+ // that might include it.
+ for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
+ (*SI)->removePredecessor(BB);
+
+ TerminatorInst *BBTerm = BB->getTerminator();
+
+ if (!BB->getTerminator()->getType()->isVoidTy())
+ BBTerm->replaceAllUsesWith(Constant::getNullValue(BBTerm->getType()));
+
+ // Replace the old terminator instruction.
+ BB->getInstList().pop_back();
+ new UnreachableInst(BB->getContext(), BB);
+ }
+
+ // The CFG Simplifier pass may delete one of the basic blocks we are
+ // interested in. If it does we need to take the block out of the list. Make
+ // a "persistent mapping" by turning basic blocks into <function, name> pairs.
+ // This won't work well if blocks are unnamed, but that is just the risk we
+ // have to take.
+ std::vector<std::pair<std::string, std::string> > BlockInfo;
+
+ for (SmallPtrSet<BasicBlock*, 8>::iterator I = Blocks.begin(),
+ E = Blocks.end(); I != E; ++I)
+ BlockInfo.push_back(std::make_pair((*I)->getParent()->getName(),
+ (*I)->getName()));
+
+ // Now run the CFG simplify pass on the function...
+ std::vector<std::string> Passes;
+ Passes.push_back("simplifycfg");
+ Passes.push_back("verify");
+ Module *New = BD.runPassesOn(M, Passes);
+ delete M;
+ if (!New) {
+ errs() << "simplifycfg failed!\n";
+ exit(1);
+ }
+ M = New;
+
+ // Try running on the hacked up program...
+ if (TestFn(BD, M)) {
+ BD.setNewProgram(M); // It crashed, keep the trimmed version...
+
+ // Make sure to use basic block pointers that point into the now-current
+ // module, and that they don't include any deleted blocks.
+ BBs.clear();
+ const ValueSymbolTable &GST = M->getValueSymbolTable();
+ for (unsigned i = 0, e = BlockInfo.size(); i != e; ++i) {
+ Function *F = cast<Function>(GST.lookup(BlockInfo[i].first));
+ ValueSymbolTable &ST = F->getValueSymbolTable();
+ Value* V = ST.lookup(BlockInfo[i].second);
+ if (V && V->getType() == Type::getLabelTy(V->getContext()))
+ BBs.push_back(cast<BasicBlock>(V));
+ }
+ return true;
+ }
+ delete M; // It didn't crash, try something else.
+ return false;
+}
+
+namespace {
+ /// ReduceCrashingInstructions reducer - This works by removing the specified
+ /// non-terminator instructions and replacing them with undef.
+ ///
+ class ReduceCrashingInstructions : public ListReducer<const Instruction*> {
+ BugDriver &BD;
+ bool (*TestFn)(const BugDriver &, Module *);
+ public:
+ ReduceCrashingInstructions(BugDriver &bd,
+ bool (*testFn)(const BugDriver &, Module *))
+ : BD(bd), TestFn(testFn) {}
+
+ virtual TestResult doTest(std::vector<const Instruction*> &Prefix,
+ std::vector<const Instruction*> &Kept,
+ std::string &Error) {
+ if (!Kept.empty() && TestInsts(Kept))
+ return KeepSuffix;
+ if (!Prefix.empty() && TestInsts(Prefix))
+ return KeepPrefix;
+ return NoFailure;
+ }
+
+ bool TestInsts(std::vector<const Instruction*> &Prefix);
+ };
+}
+
+bool ReduceCrashingInstructions::TestInsts(std::vector<const Instruction*>
+ &Insts) {
+ // Clone the program to try hacking it apart...
+ ValueToValueMapTy VMap;
+ Module *M = CloneModule(BD.getProgram(), VMap);
+
+ // Convert list to set for fast lookup...
+ SmallPtrSet<Instruction*, 64> Instructions;
+ for (unsigned i = 0, e = Insts.size(); i != e; ++i) {
+ assert(!isa<TerminatorInst>(Insts[i]));
+ Instructions.insert(cast<Instruction>(VMap[Insts[i]]));
+ }
+
+ outs() << "Checking for crash with only " << Instructions.size();
+ if (Instructions.size() == 1)
+ outs() << " instruction: ";
+ else
+ outs() << " instructions: ";
+
+ for (Module::iterator MI = M->begin(), ME = M->end(); MI != ME; ++MI)
+ for (Function::iterator FI = MI->begin(), FE = MI->end(); FI != FE; ++FI)
+ for (BasicBlock::iterator I = FI->begin(), E = FI->end(); I != E;) {
+ Instruction *Inst = I++;
+ if (!Instructions.count(Inst) && !isa<TerminatorInst>(Inst) &&
+ !isa<LandingPadInst>(Inst)) {
+ if (!Inst->getType()->isVoidTy())
+ Inst->replaceAllUsesWith(UndefValue::get(Inst->getType()));
+ Inst->eraseFromParent();
+ }
+ }
+
+ // Verify that this is still valid.
+ PassManager Passes;
+ Passes.add(createVerifierPass());
+ Passes.run(*M);
+
+ // Try running on the hacked up program...
+ if (TestFn(BD, M)) {
+ BD.setNewProgram(M); // It crashed, keep the trimmed version...
+
+ // Make sure to use instruction pointers that point into the now-current
+ // module, and that they don't include any deleted blocks.
+ Insts.clear();
+ for (SmallPtrSet<Instruction*, 64>::const_iterator I = Instructions.begin(),
+ E = Instructions.end(); I != E; ++I)
+ Insts.push_back(*I);
+ return true;
+ }
+ delete M; // It didn't crash, try something else.
+ return false;
+}
+
+/// DebugACrash - Given a predicate that determines whether a component crashes
+/// on a program, try to destructively reduce the program while still keeping
+/// the predicate true.
+static bool DebugACrash(BugDriver &BD,
+ bool (*TestFn)(const BugDriver &, Module *),
+ std::string &Error) {
+ // See if we can get away with nuking some of the global variable initializers
+ // in the program...
+ if (!NoGlobalRM &&
+ BD.getProgram()->global_begin() != BD.getProgram()->global_end()) {
+ // Now try to reduce the number of global variable initializers in the
+ // module to something small.
+ Module *M = CloneModule(BD.getProgram());
+ bool DeletedInit = false;
+
+ for (Module::global_iterator I = M->global_begin(), E = M->global_end();
+ I != E; ++I)
+ if (I->hasInitializer()) {
+ I->setInitializer(0);
+ I->setLinkage(GlobalValue::ExternalLinkage);
+ DeletedInit = true;
+ }
+
+ if (!DeletedInit) {
+ delete M; // No change made...
+ } else {
+ // See if the program still causes a crash...
+ outs() << "\nChecking to see if we can delete global inits: ";
+
+ if (TestFn(BD, M)) { // Still crashes?
+ BD.setNewProgram(M);
+ outs() << "\n*** Able to remove all global initializers!\n";
+ } else { // No longer crashes?
+ outs() << " - Removing all global inits hides problem!\n";
+ delete M;
+
+ std::vector<GlobalVariable*> GVs;
+
+ for (Module::global_iterator I = BD.getProgram()->global_begin(),
+ E = BD.getProgram()->global_end(); I != E; ++I)
+ if (I->hasInitializer())
+ GVs.push_back(I);
+
+ if (GVs.size() > 1 && !BugpointIsInterrupted) {
+ outs() << "\n*** Attempting to reduce the number of global "
+ << "variables in the testcase\n";
+
+ unsigned OldSize = GVs.size();
+ ReduceCrashingGlobalVariables(BD, TestFn).reduceList(GVs, Error);
+ if (!Error.empty())
+ return true;
+
+ if (GVs.size() < OldSize)
+ BD.EmitProgressBitcode(BD.getProgram(), "reduced-global-variables");
+ }
+ }
+ }
+ }
+
+ // Now try to reduce the number of functions in the module to something small.
+ std::vector<Function*> Functions;
+ for (Module::iterator I = BD.getProgram()->begin(),
+ E = BD.getProgram()->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ Functions.push_back(I);
+
+ if (Functions.size() > 1 && !BugpointIsInterrupted) {
+ outs() << "\n*** Attempting to reduce the number of functions "
+ "in the testcase\n";
+
+ unsigned OldSize = Functions.size();
+ ReduceCrashingFunctions(BD, TestFn).reduceList(Functions, Error);
+
+ if (Functions.size() < OldSize)
+ BD.EmitProgressBitcode(BD.getProgram(), "reduced-function");
+ }
+
+ // Attempt to delete entire basic blocks at a time to speed up
+ // convergence... this actually works by setting the terminator of the blocks
+ // to a return instruction then running simplifycfg, which can potentially
+ // shrinks the code dramatically quickly
+ //
+ if (!DisableSimplifyCFG && !BugpointIsInterrupted) {
+ std::vector<const BasicBlock*> Blocks;
+ for (Module::const_iterator I = BD.getProgram()->begin(),
+ E = BD.getProgram()->end(); I != E; ++I)
+ for (Function::const_iterator FI = I->begin(), E = I->end(); FI !=E; ++FI)
+ Blocks.push_back(FI);
+ unsigned OldSize = Blocks.size();
+ ReduceCrashingBlocks(BD, TestFn).reduceList(Blocks, Error);
+ if (Blocks.size() < OldSize)
+ BD.EmitProgressBitcode(BD.getProgram(), "reduced-blocks");
+ }
+
+ // Attempt to delete instructions using bisection. This should help out nasty
+ // cases with large basic blocks where the problem is at one end.
+ if (!BugpointIsInterrupted) {
+ std::vector<const Instruction*> Insts;
+ for (Module::const_iterator MI = BD.getProgram()->begin(),
+ ME = BD.getProgram()->end(); MI != ME; ++MI)
+ for (Function::const_iterator FI = MI->begin(), FE = MI->end(); FI != FE;
+ ++FI)
+ for (BasicBlock::const_iterator I = FI->begin(), E = FI->end();
+ I != E; ++I)
+ if (!isa<TerminatorInst>(I))
+ Insts.push_back(I);
+
+ ReduceCrashingInstructions(BD, TestFn).reduceList(Insts, Error);
+ }
+
+ // FIXME: This should use the list reducer to converge faster by deleting
+ // larger chunks of instructions at a time!
+ unsigned Simplification = 2;
+ do {
+ if (BugpointIsInterrupted) break;
+ --Simplification;
+ outs() << "\n*** Attempting to reduce testcase by deleting instruc"
+ << "tions: Simplification Level #" << Simplification << '\n';
+
+ // Now that we have deleted the functions that are unnecessary for the
+ // program, try to remove instructions that are not necessary to cause the
+ // crash. To do this, we loop through all of the instructions in the
+ // remaining functions, deleting them (replacing any values produced with
+ // nulls), and then running ADCE and SimplifyCFG. If the transformed input
+ // still triggers failure, keep deleting until we cannot trigger failure
+ // anymore.
+ //
+ unsigned InstructionsToSkipBeforeDeleting = 0;
+ TryAgain:
+
+ // Loop over all of the (non-terminator) instructions remaining in the
+ // function, attempting to delete them.
+ unsigned CurInstructionNum = 0;
+ for (Module::const_iterator FI = BD.getProgram()->begin(),
+ E = BD.getProgram()->end(); FI != E; ++FI)
+ if (!FI->isDeclaration())
+ for (Function::const_iterator BI = FI->begin(), E = FI->end(); BI != E;
+ ++BI)
+ for (BasicBlock::const_iterator I = BI->begin(), E = --BI->end();
+ I != E; ++I, ++CurInstructionNum) {
+ if (InstructionsToSkipBeforeDeleting) {
+ --InstructionsToSkipBeforeDeleting;
+ } else {
+ if (BugpointIsInterrupted) goto ExitLoops;
+
+ if (isa<LandingPadInst>(I))
+ continue;
+
+ outs() << "Checking instruction: " << *I;
+ Module *M = BD.deleteInstructionFromProgram(I, Simplification);
+
+ // Find out if the pass still crashes on this pass...
+ if (TestFn(BD, M)) {
+ // Yup, it does, we delete the old module, and continue trying
+ // to reduce the testcase...
+ BD.setNewProgram(M);
+ InstructionsToSkipBeforeDeleting = CurInstructionNum;
+ goto TryAgain; // I wish I had a multi-level break here!
+ }
+
+ // This pass didn't crash without this instruction, try the next
+ // one.
+ delete M;
+ }
+ }
+
+ if (InstructionsToSkipBeforeDeleting) {
+ InstructionsToSkipBeforeDeleting = 0;
+ goto TryAgain;
+ }
+
+ } while (Simplification);
+ExitLoops:
+
+ // Try to clean up the testcase by running funcresolve and globaldce...
+ if (!BugpointIsInterrupted) {
+ outs() << "\n*** Attempting to perform final cleanups: ";
+ Module *M = CloneModule(BD.getProgram());
+ M = BD.performFinalCleanups(M, true);
+
+ // Find out if the pass still crashes on the cleaned up program...
+ if (TestFn(BD, M)) {
+ BD.setNewProgram(M); // Yup, it does, keep the reduced version...
+ } else {
+ delete M;
+ }
+ }
+
+ BD.EmitProgressBitcode(BD.getProgram(), "reduced-simplified");
+
+ return false;
+}
+
+static bool TestForOptimizerCrash(const BugDriver &BD, Module *M) {
+ return BD.runPasses(M);
+}
+
+/// debugOptimizerCrash - This method is called when some pass crashes on input.
+/// It attempts to prune down the testcase to something reasonable, and figure
+/// out exactly which pass is crashing.
+///
+bool BugDriver::debugOptimizerCrash(const std::string &ID) {
+ outs() << "\n*** Debugging optimizer crash!\n";
+
+ std::string Error;
+ // Reduce the list of passes which causes the optimizer to crash...
+ if (!BugpointIsInterrupted)
+ ReducePassList(*this).reduceList(PassesToRun, Error);
+ assert(Error.empty());
+
+ outs() << "\n*** Found crashing pass"
+ << (PassesToRun.size() == 1 ? ": " : "es: ")
+ << getPassesString(PassesToRun) << '\n';
+
+ EmitProgressBitcode(Program, ID);
+
+ bool Success = DebugACrash(*this, TestForOptimizerCrash, Error);
+ assert(Error.empty());
+ return Success;
+}
+
+static bool TestForCodeGenCrash(const BugDriver &BD, Module *M) {
+ std::string Error;
+ BD.compileProgram(M, &Error);
+ if (!Error.empty()) {
+ errs() << "<crash>\n";
+ return true; // Tool is still crashing.
+ }
+ errs() << '\n';
+ return false;
+}
+
+/// debugCodeGeneratorCrash - This method is called when the code generator
+/// crashes on an input. It attempts to reduce the input as much as possible
+/// while still causing the code generator to crash.
+bool BugDriver::debugCodeGeneratorCrash(std::string &Error) {
+ errs() << "*** Debugging code generator crash!\n";
+
+ return DebugACrash(*this, TestForCodeGenCrash, Error);
+}
diff --git a/contrib/llvm/tools/bugpoint/ExecutionDriver.cpp b/contrib/llvm/tools/bugpoint/ExecutionDriver.cpp
new file mode 100644
index 0000000..218a559
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/ExecutionDriver.cpp
@@ -0,0 +1,472 @@
+//===- ExecutionDriver.cpp - Allow execution of LLVM program --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code used to execute the program utilizing one of the
+// various ways of running LLVM bitcode.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BugDriver.h"
+#include "ToolRunner.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fstream>
+
+using namespace llvm;
+
+namespace {
+ // OutputType - Allow the user to specify the way code should be run, to test
+ // for miscompilation.
+ //
+ enum OutputType {
+ AutoPick, RunLLI, RunJIT, RunLLC, RunLLCIA, LLC_Safe, CompileCustom, Custom
+ };
+
+ cl::opt<double>
+ AbsTolerance("abs-tolerance", cl::desc("Absolute error tolerated"),
+ cl::init(0.0));
+ cl::opt<double>
+ RelTolerance("rel-tolerance", cl::desc("Relative error tolerated"),
+ cl::init(0.0));
+
+ cl::opt<OutputType>
+ InterpreterSel(cl::desc("Specify the \"test\" i.e. suspect back-end:"),
+ cl::values(clEnumValN(AutoPick, "auto", "Use best guess"),
+ clEnumValN(RunLLI, "run-int",
+ "Execute with the interpreter"),
+ clEnumValN(RunJIT, "run-jit", "Execute with JIT"),
+ clEnumValN(RunLLC, "run-llc", "Compile with LLC"),
+ clEnumValN(RunLLCIA, "run-llc-ia",
+ "Compile with LLC with integrated assembler"),
+ clEnumValN(LLC_Safe, "llc-safe", "Use LLC for all"),
+ clEnumValN(CompileCustom, "compile-custom",
+ "Use -compile-command to define a command to "
+ "compile the bitcode. Useful to avoid linking."),
+ clEnumValN(Custom, "run-custom",
+ "Use -exec-command to define a command to execute "
+ "the bitcode. Useful for cross-compilation."),
+ clEnumValEnd),
+ cl::init(AutoPick));
+
+ cl::opt<OutputType>
+ SafeInterpreterSel(cl::desc("Specify \"safe\" i.e. known-good backend:"),
+ cl::values(clEnumValN(AutoPick, "safe-auto", "Use best guess"),
+ clEnumValN(RunLLC, "safe-run-llc", "Compile with LLC"),
+ clEnumValN(Custom, "safe-run-custom",
+ "Use -exec-command to define a command to execute "
+ "the bitcode. Useful for cross-compilation."),
+ clEnumValEnd),
+ cl::init(AutoPick));
+
+ cl::opt<std::string>
+ SafeInterpreterPath("safe-path",
+ cl::desc("Specify the path to the \"safe\" backend program"),
+ cl::init(""));
+
+ cl::opt<bool>
+ AppendProgramExitCode("append-exit-code",
+ cl::desc("Append the exit code to the output so it gets diff'd too"),
+ cl::init(false));
+
+ cl::opt<std::string>
+ InputFile("input", cl::init("/dev/null"),
+ cl::desc("Filename to pipe in as stdin (default: /dev/null)"));
+
+ cl::list<std::string>
+ AdditionalSOs("additional-so",
+ cl::desc("Additional shared objects to load "
+ "into executing programs"));
+
+ cl::list<std::string>
+ AdditionalLinkerArgs("Xlinker",
+ cl::desc("Additional arguments to pass to the linker"));
+
+ cl::opt<std::string>
+ CustomCompileCommand("compile-command", cl::init("llc"),
+ cl::desc("Command to compile the bitcode (use with -compile-custom) "
+ "(default: llc)"));
+
+ cl::opt<std::string>
+ CustomExecCommand("exec-command", cl::init("simulate"),
+ cl::desc("Command to execute the bitcode (use with -run-custom) "
+ "(default: simulate)"));
+}
+
+namespace llvm {
+ // Anything specified after the --args option are taken as arguments to the
+ // program being debugged.
+ cl::list<std::string>
+ InputArgv("args", cl::Positional, cl::desc("<program arguments>..."),
+ cl::ZeroOrMore, cl::PositionalEatsArgs);
+
+ cl::opt<std::string>
+ OutputPrefix("output-prefix", cl::init("bugpoint"),
+ cl::desc("Prefix to use for outputs (default: 'bugpoint')"));
+}
+
+namespace {
+ cl::list<std::string>
+ ToolArgv("tool-args", cl::Positional, cl::desc("<tool arguments>..."),
+ cl::ZeroOrMore, cl::PositionalEatsArgs);
+
+ cl::list<std::string>
+ SafeToolArgv("safe-tool-args", cl::Positional,
+ cl::desc("<safe-tool arguments>..."),
+ cl::ZeroOrMore, cl::PositionalEatsArgs);
+
+ cl::opt<std::string>
+ GCCBinary("gcc", cl::init("gcc"),
+ cl::desc("The gcc binary to use. (default 'gcc')"));
+
+ cl::list<std::string>
+ GCCToolArgv("gcc-tool-args", cl::Positional,
+ cl::desc("<gcc-tool arguments>..."),
+ cl::ZeroOrMore, cl::PositionalEatsArgs);
+}
+
+//===----------------------------------------------------------------------===//
+// BugDriver method implementation
+//
+
+/// initializeExecutionEnvironment - This method is used to set up the
+/// environment for executing LLVM programs.
+///
+bool BugDriver::initializeExecutionEnvironment() {
+ outs() << "Initializing execution environment: ";
+
+ // Create an instance of the AbstractInterpreter interface as specified on
+ // the command line
+ SafeInterpreter = 0;
+ std::string Message;
+
+ switch (InterpreterSel) {
+ case AutoPick:
+ if (!Interpreter) {
+ InterpreterSel = RunJIT;
+ Interpreter = AbstractInterpreter::createJIT(getToolName(), Message,
+ &ToolArgv);
+ }
+ if (!Interpreter) {
+ InterpreterSel = RunLLC;
+ Interpreter = AbstractInterpreter::createLLC(getToolName(), Message,
+ GCCBinary, &ToolArgv,
+ &GCCToolArgv);
+ }
+ if (!Interpreter) {
+ InterpreterSel = RunLLI;
+ Interpreter = AbstractInterpreter::createLLI(getToolName(), Message,
+ &ToolArgv);
+ }
+ if (!Interpreter) {
+ InterpreterSel = AutoPick;
+ Message = "Sorry, I can't automatically select an interpreter!\n";
+ }
+ break;
+ case RunLLI:
+ Interpreter = AbstractInterpreter::createLLI(getToolName(), Message,
+ &ToolArgv);
+ break;
+ case RunLLC:
+ case RunLLCIA:
+ case LLC_Safe:
+ Interpreter = AbstractInterpreter::createLLC(getToolName(), Message,
+ GCCBinary, &ToolArgv,
+ &GCCToolArgv,
+ InterpreterSel == RunLLCIA);
+ break;
+ case RunJIT:
+ Interpreter = AbstractInterpreter::createJIT(getToolName(), Message,
+ &ToolArgv);
+ break;
+ case CompileCustom:
+ Interpreter =
+ AbstractInterpreter::createCustomCompiler(Message, CustomCompileCommand);
+ break;
+ case Custom:
+ Interpreter =
+ AbstractInterpreter::createCustomExecutor(Message, CustomExecCommand);
+ break;
+ }
+ if (!Interpreter)
+ errs() << Message;
+ else // Display informational messages on stdout instead of stderr
+ outs() << Message;
+
+ std::string Path = SafeInterpreterPath;
+ if (Path.empty())
+ Path = getToolName();
+ std::vector<std::string> SafeToolArgs = SafeToolArgv;
+ switch (SafeInterpreterSel) {
+ case AutoPick:
+ // In "llc-safe" mode, default to using LLC as the "safe" backend.
+ if (!SafeInterpreter &&
+ InterpreterSel == LLC_Safe) {
+ SafeInterpreterSel = RunLLC;
+ SafeToolArgs.push_back("--relocation-model=pic");
+ SafeInterpreter = AbstractInterpreter::createLLC(Path.c_str(), Message,
+ GCCBinary,
+ &SafeToolArgs,
+ &GCCToolArgv);
+ }
+
+ if (!SafeInterpreter &&
+ InterpreterSel != RunLLC &&
+ InterpreterSel != RunJIT) {
+ SafeInterpreterSel = RunLLC;
+ SafeToolArgs.push_back("--relocation-model=pic");
+ SafeInterpreter = AbstractInterpreter::createLLC(Path.c_str(), Message,
+ GCCBinary,
+ &SafeToolArgs,
+ &GCCToolArgv);
+ }
+ if (!SafeInterpreter) {
+ SafeInterpreterSel = AutoPick;
+ Message = "Sorry, I can't automatically select an interpreter!\n";
+ }
+ break;
+ case RunLLC:
+ case RunLLCIA:
+ SafeToolArgs.push_back("--relocation-model=pic");
+ SafeInterpreter = AbstractInterpreter::createLLC(Path.c_str(), Message,
+ GCCBinary, &SafeToolArgs,
+ &GCCToolArgv,
+ SafeInterpreterSel == RunLLCIA);
+ break;
+ case Custom:
+ SafeInterpreter =
+ AbstractInterpreter::createCustomExecutor(Message, CustomExecCommand);
+ break;
+ default:
+ Message = "Sorry, this back-end is not supported by bugpoint as the "
+ "\"safe\" backend right now!\n";
+ break;
+ }
+ if (!SafeInterpreter) { outs() << Message << "\nExiting.\n"; exit(1); }
+
+ gcc = GCC::create(Message, GCCBinary, &GCCToolArgv);
+ if (!gcc) { outs() << Message << "\nExiting.\n"; exit(1); }
+
+ // If there was an error creating the selected interpreter, quit with error.
+ return Interpreter == 0;
+}
+
+/// compileProgram - Try to compile the specified module, returning false and
+/// setting Error if an error occurs. This is used for code generation
+/// crash testing.
+///
+void BugDriver::compileProgram(Module *M, std::string *Error) const {
+ // Emit the program to a bitcode file...
+ sys::Path BitcodeFile (OutputPrefix + "-test-program.bc");
+ std::string ErrMsg;
+ if (BitcodeFile.makeUnique(true, &ErrMsg)) {
+ errs() << ToolName << ": Error making unique filename: " << ErrMsg
+ << "\n";
+ exit(1);
+ }
+ if (writeProgramToFile(BitcodeFile.str(), M)) {
+ errs() << ToolName << ": Error emitting bitcode to file '"
+ << BitcodeFile.str() << "'!\n";
+ exit(1);
+ }
+
+ // Remove the temporary bitcode file when we are done.
+ FileRemover BitcodeFileRemover(BitcodeFile.str(), !SaveTemps);
+
+ // Actually compile the program!
+ Interpreter->compileProgram(BitcodeFile.str(), Error, Timeout, MemoryLimit);
+}
+
+
+/// executeProgram - This method runs "Program", capturing the output of the
+/// program to a file, returning the filename of the file. A recommended
+/// filename may be optionally specified.
+///
+std::string BugDriver::executeProgram(const Module *Program,
+ std::string OutputFile,
+ std::string BitcodeFile,
+ const std::string &SharedObj,
+ AbstractInterpreter *AI,
+ std::string *Error) const {
+ if (AI == 0) AI = Interpreter;
+ assert(AI && "Interpreter should have been created already!");
+ bool CreatedBitcode = false;
+ std::string ErrMsg;
+ if (BitcodeFile.empty()) {
+ // Emit the program to a bitcode file...
+ sys::Path uniqueFilename(OutputPrefix + "-test-program.bc");
+ if (uniqueFilename.makeUnique(true, &ErrMsg)) {
+ errs() << ToolName << ": Error making unique filename: "
+ << ErrMsg << "!\n";
+ exit(1);
+ }
+ BitcodeFile = uniqueFilename.str();
+
+ if (writeProgramToFile(BitcodeFile, Program)) {
+ errs() << ToolName << ": Error emitting bitcode to file '"
+ << BitcodeFile << "'!\n";
+ exit(1);
+ }
+ CreatedBitcode = true;
+ }
+
+ // Remove the temporary bitcode file when we are done.
+ sys::Path BitcodePath(BitcodeFile);
+ FileRemover BitcodeFileRemover(BitcodePath.str(),
+ CreatedBitcode && !SaveTemps);
+
+ if (OutputFile.empty()) OutputFile = OutputPrefix + "-execution-output";
+
+ // Check to see if this is a valid output filename...
+ sys::Path uniqueFile(OutputFile);
+ if (uniqueFile.makeUnique(true, &ErrMsg)) {
+ errs() << ToolName << ": Error making unique filename: "
+ << ErrMsg << "\n";
+ exit(1);
+ }
+ OutputFile = uniqueFile.str();
+
+ // Figure out which shared objects to run, if any.
+ std::vector<std::string> SharedObjs(AdditionalSOs);
+ if (!SharedObj.empty())
+ SharedObjs.push_back(SharedObj);
+
+ int RetVal = AI->ExecuteProgram(BitcodeFile, InputArgv, InputFile, OutputFile,
+ Error, AdditionalLinkerArgs, SharedObjs,
+ Timeout, MemoryLimit);
+ if (!Error->empty())
+ return OutputFile;
+
+ if (RetVal == -1) {
+ errs() << "<timeout>";
+ static bool FirstTimeout = true;
+ if (FirstTimeout) {
+ outs() << "\n"
+ "*** Program execution timed out! This mechanism is designed to handle\n"
+ " programs stuck in infinite loops gracefully. The -timeout option\n"
+ " can be used to change the timeout threshold or disable it completely\n"
+ " (with -timeout=0). This message is only displayed once.\n";
+ FirstTimeout = false;
+ }
+ }
+
+ if (AppendProgramExitCode) {
+ std::ofstream outFile(OutputFile.c_str(), std::ios_base::app);
+ outFile << "exit " << RetVal << '\n';
+ outFile.close();
+ }
+
+ // Return the filename we captured the output to.
+ return OutputFile;
+}
+
+/// executeProgramSafely - Used to create reference output with the "safe"
+/// backend, if reference output is not provided.
+///
+std::string BugDriver::executeProgramSafely(const Module *Program,
+ std::string OutputFile,
+ std::string *Error) const {
+ return executeProgram(Program, OutputFile, "", "", SafeInterpreter, Error);
+}
+
+std::string BugDriver::compileSharedObject(const std::string &BitcodeFile,
+ std::string &Error) {
+ assert(Interpreter && "Interpreter should have been created already!");
+ sys::Path OutputFile;
+
+ // Using the known-good backend.
+ GCC::FileType FT = SafeInterpreter->OutputCode(BitcodeFile, OutputFile,
+ Error);
+ if (!Error.empty())
+ return "";
+
+ std::string SharedObjectFile;
+ bool Failure = gcc->MakeSharedObject(OutputFile.str(), FT, SharedObjectFile,
+ AdditionalLinkerArgs, Error);
+ if (!Error.empty())
+ return "";
+ if (Failure)
+ exit(1);
+
+ // Remove the intermediate C file
+ OutputFile.eraseFromDisk();
+
+ return "./" + SharedObjectFile;
+}
+
+/// createReferenceFile - calls compileProgram and then records the output
+/// into ReferenceOutputFile. Returns true if reference file created, false
+/// otherwise. Note: initializeExecutionEnvironment should be called BEFORE
+/// this function.
+///
+bool BugDriver::createReferenceFile(Module *M, const std::string &Filename) {
+ std::string Error;
+ compileProgram(Program, &Error);
+ if (!Error.empty())
+ return false;
+
+ ReferenceOutputFile = executeProgramSafely(Program, Filename, &Error);
+ if (!Error.empty()) {
+ errs() << Error;
+ if (Interpreter != SafeInterpreter) {
+ errs() << "*** There is a bug running the \"safe\" backend. Either"
+ << " debug it (for example with the -run-jit bugpoint option,"
+ << " if JIT is being used as the \"safe\" backend), or fix the"
+ << " error some other way.\n";
+ }
+ return false;
+ }
+ outs() << "\nReference output is: " << ReferenceOutputFile << "\n\n";
+ return true;
+}
+
+/// diffProgram - This method executes the specified module and diffs the
+/// output against the file specified by ReferenceOutputFile. If the output
+/// is different, 1 is returned. If there is a problem with the code
+/// generator (e.g., llc crashes), this will set ErrMsg.
+///
+bool BugDriver::diffProgram(const Module *Program,
+ const std::string &BitcodeFile,
+ const std::string &SharedObject,
+ bool RemoveBitcode,
+ std::string *ErrMsg) const {
+ // Execute the program, generating an output file...
+ sys::Path Output(executeProgram(Program, "", BitcodeFile, SharedObject, 0,
+ ErrMsg));
+ if (!ErrMsg->empty())
+ return false;
+
+ std::string Error;
+ bool FilesDifferent = false;
+ if (int Diff = DiffFilesWithTolerance(sys::Path(ReferenceOutputFile),
+ sys::Path(Output.str()),
+ AbsTolerance, RelTolerance, &Error)) {
+ if (Diff == 2) {
+ errs() << "While diffing output: " << Error << '\n';
+ exit(1);
+ }
+ FilesDifferent = true;
+ }
+ else {
+ // Remove the generated output if there are no differences.
+ Output.eraseFromDisk();
+ }
+
+ // Remove the bitcode file if we are supposed to.
+ if (RemoveBitcode)
+ sys::Path(BitcodeFile).eraseFromDisk();
+ return FilesDifferent;
+}
+
+bool BugDriver::isExecutingJIT() {
+ return InterpreterSel == RunJIT;
+}
+
diff --git a/contrib/llvm/tools/bugpoint/ExtractFunction.cpp b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
new file mode 100644
index 0000000..ac8e159
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/ExtractFunction.cpp
@@ -0,0 +1,418 @@
+//===- ExtractFunction.cpp - Extract a function from Program --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements several methods that are used to extract functions,
+// loops, or portions of a module from the rest of the module.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BugDriver.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Assembly/Writer.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Transforms/Utils/FunctionUtils.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Signals.h"
+#include <set>
+using namespace llvm;
+
+namespace llvm {
+ bool DisableSimplifyCFG = false;
+ extern cl::opt<std::string> OutputPrefix;
+} // End llvm namespace
+
+namespace {
+ cl::opt<bool>
+ NoDCE ("disable-dce",
+ cl::desc("Do not use the -dce pass to reduce testcases"));
+ cl::opt<bool, true>
+ NoSCFG("disable-simplifycfg", cl::location(DisableSimplifyCFG),
+ cl::desc("Do not use the -simplifycfg pass to reduce testcases"));
+
+ Function* globalInitUsesExternalBA(GlobalVariable* GV) {
+ if (!GV->hasInitializer())
+ return 0;
+
+ Constant *I = GV->getInitializer();
+
+ // walk the values used by the initializer
+ // (and recurse into things like ConstantExpr)
+ std::vector<Constant*> Todo;
+ std::set<Constant*> Done;
+ Todo.push_back(I);
+
+ while (!Todo.empty()) {
+ Constant* V = Todo.back();
+ Todo.pop_back();
+ Done.insert(V);
+
+ if (BlockAddress *BA = dyn_cast<BlockAddress>(V)) {
+ Function *F = BA->getFunction();
+ if (F->isDeclaration())
+ return F;
+ }
+
+ for (User::op_iterator i = V->op_begin(), e = V->op_end(); i != e; ++i) {
+ Constant *C = dyn_cast<Constant>(*i);
+ if (C && !isa<GlobalValue>(C) && !Done.count(C))
+ Todo.push_back(C);
+ }
+ }
+ return 0;
+ }
+} // end anonymous namespace
+
+/// deleteInstructionFromProgram - This method clones the current Program and
+/// deletes the specified instruction from the cloned module. It then runs a
+/// series of cleanup passes (ADCE and SimplifyCFG) to eliminate any code which
+/// depends on the value. The modified module is then returned.
+///
+Module *BugDriver::deleteInstructionFromProgram(const Instruction *I,
+ unsigned Simplification) {
+ // FIXME, use vmap?
+ Module *Clone = CloneModule(Program);
+
+ const BasicBlock *PBB = I->getParent();
+ const Function *PF = PBB->getParent();
+
+ Module::iterator RFI = Clone->begin(); // Get iterator to corresponding fn
+ std::advance(RFI, std::distance(PF->getParent()->begin(),
+ Module::const_iterator(PF)));
+
+ Function::iterator RBI = RFI->begin(); // Get iterator to corresponding BB
+ std::advance(RBI, std::distance(PF->begin(), Function::const_iterator(PBB)));
+
+ BasicBlock::iterator RI = RBI->begin(); // Get iterator to corresponding inst
+ std::advance(RI, std::distance(PBB->begin(), BasicBlock::const_iterator(I)));
+ Instruction *TheInst = RI; // Got the corresponding instruction!
+
+ // If this instruction produces a value, replace any users with null values
+ if (!TheInst->getType()->isVoidTy())
+ TheInst->replaceAllUsesWith(Constant::getNullValue(TheInst->getType()));
+
+ // Remove the instruction from the program.
+ TheInst->getParent()->getInstList().erase(TheInst);
+
+ // Spiff up the output a little bit.
+ std::vector<std::string> Passes;
+
+ /// Can we get rid of the -disable-* options?
+ if (Simplification > 1 && !NoDCE)
+ Passes.push_back("dce");
+ if (Simplification && !DisableSimplifyCFG)
+ Passes.push_back("simplifycfg"); // Delete dead control flow
+
+ Passes.push_back("verify");
+ Module *New = runPassesOn(Clone, Passes);
+ delete Clone;
+ if (!New) {
+ errs() << "Instruction removal failed. Sorry. :( Please report a bug!\n";
+ exit(1);
+ }
+ return New;
+}
+
+/// performFinalCleanups - This method clones the current Program and performs
+/// a series of cleanups intended to get rid of extra cruft on the module
+/// before handing it to the user.
+///
+Module *BugDriver::performFinalCleanups(Module *M, bool MayModifySemantics) {
+ // Make all functions external, so GlobalDCE doesn't delete them...
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
+ I->setLinkage(GlobalValue::ExternalLinkage);
+
+ std::vector<std::string> CleanupPasses;
+ CleanupPasses.push_back("globaldce");
+
+ if (MayModifySemantics)
+ CleanupPasses.push_back("deadarghaX0r");
+ else
+ CleanupPasses.push_back("deadargelim");
+
+ Module *New = runPassesOn(M, CleanupPasses);
+ if (New == 0) {
+ errs() << "Final cleanups failed. Sorry. :( Please report a bug!\n";
+ return M;
+ }
+ delete M;
+ return New;
+}
+
+
+/// ExtractLoop - Given a module, extract up to one loop from it into a new
+/// function. This returns null if there are no extractable loops in the
+/// program or if the loop extractor crashes.
+Module *BugDriver::ExtractLoop(Module *M) {
+ std::vector<std::string> LoopExtractPasses;
+ LoopExtractPasses.push_back("loop-extract-single");
+
+ Module *NewM = runPassesOn(M, LoopExtractPasses);
+ if (NewM == 0) {
+ outs() << "*** Loop extraction failed: ";
+ EmitProgressBitcode(M, "loopextraction", true);
+ outs() << "*** Sorry. :( Please report a bug!\n";
+ return 0;
+ }
+
+ // Check to see if we created any new functions. If not, no loops were
+ // extracted and we should return null. Limit the number of loops we extract
+ // to avoid taking forever.
+ static unsigned NumExtracted = 32;
+ if (M->size() == NewM->size() || --NumExtracted == 0) {
+ delete NewM;
+ return 0;
+ } else {
+ assert(M->size() < NewM->size() && "Loop extract removed functions?");
+ Module::iterator MI = NewM->begin();
+ for (unsigned i = 0, e = M->size(); i != e; ++i)
+ ++MI;
+ }
+
+ return NewM;
+}
+
+
+// DeleteFunctionBody - "Remove" the function by deleting all of its basic
+// blocks, making it external.
+//
+void llvm::DeleteFunctionBody(Function *F) {
+ // delete the body of the function...
+ F->deleteBody();
+ assert(F->isDeclaration() && "This didn't make the function external!");
+}
+
+/// GetTorInit - Given a list of entries for static ctors/dtors, return them
+/// as a constant array.
+static Constant *GetTorInit(std::vector<std::pair<Function*, int> > &TorList) {
+ assert(!TorList.empty() && "Don't create empty tor list!");
+ std::vector<Constant*> ArrayElts;
+ Type *Int32Ty = Type::getInt32Ty(TorList[0].first->getContext());
+
+ StructType *STy =
+ StructType::get(Int32Ty, TorList[0].first->getType(), NULL);
+ for (unsigned i = 0, e = TorList.size(); i != e; ++i) {
+ Constant *Elts[] = {
+ ConstantInt::get(Int32Ty, TorList[i].second),
+ TorList[i].first
+ };
+ ArrayElts.push_back(ConstantStruct::get(STy, Elts));
+ }
+ return ConstantArray::get(ArrayType::get(ArrayElts[0]->getType(),
+ ArrayElts.size()),
+ ArrayElts);
+}
+
+/// SplitStaticCtorDtor - A module was recently split into two parts, M1/M2, and
+/// M1 has all of the global variables. If M2 contains any functions that are
+/// static ctors/dtors, we need to add an llvm.global_[cd]tors global to M2, and
+/// prune appropriate entries out of M1s list.
+static void SplitStaticCtorDtor(const char *GlobalName, Module *M1, Module *M2,
+ ValueToValueMapTy &VMap) {
+ GlobalVariable *GV = M1->getNamedGlobal(GlobalName);
+ if (!GV || GV->isDeclaration() || GV->hasLocalLinkage() ||
+ !GV->use_empty()) return;
+
+ std::vector<std::pair<Function*, int> > M1Tors, M2Tors;
+ ConstantArray *InitList = dyn_cast<ConstantArray>(GV->getInitializer());
+ if (!InitList) return;
+
+ for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i) {
+ if (ConstantStruct *CS = dyn_cast<ConstantStruct>(InitList->getOperand(i))){
+ if (CS->getNumOperands() != 2) return; // Not array of 2-element structs.
+
+ if (CS->getOperand(1)->isNullValue())
+ break; // Found a null terminator, stop here.
+
+ ConstantInt *CI = dyn_cast<ConstantInt>(CS->getOperand(0));
+ int Priority = CI ? CI->getSExtValue() : 0;
+
+ Constant *FP = CS->getOperand(1);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(FP))
+ if (CE->isCast())
+ FP = CE->getOperand(0);
+ if (Function *F = dyn_cast<Function>(FP)) {
+ if (!F->isDeclaration())
+ M1Tors.push_back(std::make_pair(F, Priority));
+ else {
+ // Map to M2's version of the function.
+ F = cast<Function>(VMap[F]);
+ M2Tors.push_back(std::make_pair(F, Priority));
+ }
+ }
+ }
+ }
+
+ GV->eraseFromParent();
+ if (!M1Tors.empty()) {
+ Constant *M1Init = GetTorInit(M1Tors);
+ new GlobalVariable(*M1, M1Init->getType(), false,
+ GlobalValue::AppendingLinkage,
+ M1Init, GlobalName);
+ }
+
+ GV = M2->getNamedGlobal(GlobalName);
+ assert(GV && "Not a clone of M1?");
+ assert(GV->use_empty() && "llvm.ctors shouldn't have uses!");
+
+ GV->eraseFromParent();
+ if (!M2Tors.empty()) {
+ Constant *M2Init = GetTorInit(M2Tors);
+ new GlobalVariable(*M2, M2Init->getType(), false,
+ GlobalValue::AppendingLinkage,
+ M2Init, GlobalName);
+ }
+}
+
+
+/// SplitFunctionsOutOfModule - Given a module and a list of functions in the
+/// module, split the functions OUT of the specified module, and place them in
+/// the new module.
+Module *
+llvm::SplitFunctionsOutOfModule(Module *M,
+ const std::vector<Function*> &F,
+ ValueToValueMapTy &VMap) {
+ // Make sure functions & globals are all external so that linkage
+ // between the two modules will work.
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
+ I->setLinkage(GlobalValue::ExternalLinkage);
+ for (Module::global_iterator I = M->global_begin(), E = M->global_end();
+ I != E; ++I) {
+ if (I->hasName() && I->getName()[0] == '\01')
+ I->setName(I->getName().substr(1));
+ I->setLinkage(GlobalValue::ExternalLinkage);
+ }
+
+ ValueToValueMapTy NewVMap;
+ Module *New = CloneModule(M, NewVMap);
+
+ // Remove the Test functions from the Safe module
+ std::set<Function *> TestFunctions;
+ for (unsigned i = 0, e = F.size(); i != e; ++i) {
+ Function *TNOF = cast<Function>(VMap[F[i]]);
+ DEBUG(errs() << "Removing function ");
+ DEBUG(WriteAsOperand(errs(), TNOF, false));
+ DEBUG(errs() << "\n");
+ TestFunctions.insert(cast<Function>(NewVMap[TNOF]));
+ DeleteFunctionBody(TNOF); // Function is now external in this module!
+ }
+
+
+ // Remove the Safe functions from the Test module
+ for (Module::iterator I = New->begin(), E = New->end(); I != E; ++I)
+ if (!TestFunctions.count(I))
+ DeleteFunctionBody(I);
+
+
+ // Try to split the global initializers evenly
+ for (Module::global_iterator I = M->global_begin(), E = M->global_end();
+ I != E; ++I) {
+ GlobalVariable *GV = cast<GlobalVariable>(NewVMap[I]);
+ if (Function *TestFn = globalInitUsesExternalBA(I)) {
+ if (Function *SafeFn = globalInitUsesExternalBA(GV)) {
+ errs() << "*** Error: when reducing functions, encountered "
+ "the global '";
+ WriteAsOperand(errs(), GV, false);
+ errs() << "' with an initializer that references blockaddresses "
+ "from safe function '" << SafeFn->getName()
+ << "' and from test function '" << TestFn->getName() << "'.\n";
+ exit(1);
+ }
+ I->setInitializer(0); // Delete the initializer to make it external
+ } else {
+ // If we keep it in the safe module, then delete it in the test module
+ GV->setInitializer(0);
+ }
+ }
+
+ // Make sure that there is a global ctor/dtor array in both halves of the
+ // module if they both have static ctor/dtor functions.
+ SplitStaticCtorDtor("llvm.global_ctors", M, New, NewVMap);
+ SplitStaticCtorDtor("llvm.global_dtors", M, New, NewVMap);
+
+ return New;
+}
+
+//===----------------------------------------------------------------------===//
+// Basic Block Extraction Code
+//===----------------------------------------------------------------------===//
+
+/// ExtractMappedBlocksFromModule - Extract all but the specified basic blocks
+/// into their own functions. The only detail is that M is actually a module
+/// cloned from the one the BBs are in, so some mapping needs to be performed.
+/// If this operation fails for some reason (ie the implementation is buggy),
+/// this function should return null, otherwise it returns a new Module.
+Module *BugDriver::ExtractMappedBlocksFromModule(const
+ std::vector<BasicBlock*> &BBs,
+ Module *M) {
+ sys::Path uniqueFilename(OutputPrefix + "-extractblocks");
+ std::string ErrMsg;
+ if (uniqueFilename.createTemporaryFileOnDisk(true, &ErrMsg)) {
+ outs() << "*** Basic Block extraction failed!\n";
+ errs() << "Error creating temporary file: " << ErrMsg << "\n";
+ EmitProgressBitcode(M, "basicblockextractfail", true);
+ return 0;
+ }
+ sys::RemoveFileOnSignal(uniqueFilename);
+
+ std::string ErrorInfo;
+ tool_output_file BlocksToNotExtractFile(uniqueFilename.c_str(), ErrorInfo);
+ if (!ErrorInfo.empty()) {
+ outs() << "*** Basic Block extraction failed!\n";
+ errs() << "Error writing list of blocks to not extract: " << ErrorInfo
+ << "\n";
+ EmitProgressBitcode(M, "basicblockextractfail", true);
+ return 0;
+ }
+ for (std::vector<BasicBlock*>::const_iterator I = BBs.begin(), E = BBs.end();
+ I != E; ++I) {
+ BasicBlock *BB = *I;
+ // If the BB doesn't have a name, give it one so we have something to key
+ // off of.
+ if (!BB->hasName()) BB->setName("tmpbb");
+ BlocksToNotExtractFile.os() << BB->getParent()->getName() << " "
+ << BB->getName() << "\n";
+ }
+ BlocksToNotExtractFile.os().close();
+ if (BlocksToNotExtractFile.os().has_error()) {
+ errs() << "Error writing list of blocks to not extract: " << ErrorInfo
+ << "\n";
+ EmitProgressBitcode(M, "basicblockextractfail", true);
+ BlocksToNotExtractFile.os().clear_error();
+ return 0;
+ }
+ BlocksToNotExtractFile.keep();
+
+ std::string uniqueFN = "--extract-blocks-file=" + uniqueFilename.str();
+ const char *ExtraArg = uniqueFN.c_str();
+
+ std::vector<std::string> PI;
+ PI.push_back("extract-blocks");
+ Module *Ret = runPassesOn(M, PI, false, 1, &ExtraArg);
+
+ uniqueFilename.eraseFromDisk(); // Free disk space
+
+ if (Ret == 0) {
+ outs() << "*** Basic Block extraction failed, please report a bug!\n";
+ EmitProgressBitcode(M, "basicblockextractfail", true);
+ }
+ return Ret;
+}
diff --git a/contrib/llvm/tools/bugpoint/FindBugs.cpp b/contrib/llvm/tools/bugpoint/FindBugs.cpp
new file mode 100644
index 0000000..a291f9f
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/FindBugs.cpp
@@ -0,0 +1,113 @@
+//===-- FindBugs.cpp - Run Many Different Optimizations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an interface that allows bugpoint to choose different
+// combinations of optimizations to run on the selected input. Bugpoint will
+// run these optimizations and record the success/failure of each. This way
+// we can hopefully spot bugs in the optimizations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BugDriver.h"
+#include "ToolRunner.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <ctime>
+using namespace llvm;
+
+/// runManyPasses - Take the specified pass list and create different
+/// combinations of passes to compile the program with. Compile the program with
+/// each set and mark test to see if it compiled correctly. If the passes
+/// compiled correctly output nothing and rearrange the passes into a new order.
+/// If the passes did not compile correctly, output the command required to
+/// recreate the failure. This returns true if a compiler error is found.
+///
+bool BugDriver::runManyPasses(const std::vector<std::string> &AllPasses,
+ std::string &ErrMsg) {
+ setPassesToRun(AllPasses);
+ outs() << "Starting bug finding procedure...\n\n";
+
+ // Creating a reference output if necessary
+ if (initializeExecutionEnvironment()) return false;
+
+ outs() << "\n";
+ if (ReferenceOutputFile.empty()) {
+ outs() << "Generating reference output from raw program: \n";
+ if (!createReferenceFile(Program))
+ return false;
+ }
+
+ srand(time(NULL));
+
+ unsigned num = 1;
+ while(1) {
+ //
+ // Step 1: Randomize the order of the optimizer passes.
+ //
+ std::random_shuffle(PassesToRun.begin(), PassesToRun.end());
+
+ //
+ // Step 2: Run optimizer passes on the program and check for success.
+ //
+ outs() << "Running selected passes on program to test for crash: ";
+ for(int i = 0, e = PassesToRun.size(); i != e; i++) {
+ outs() << "-" << PassesToRun[i] << " ";
+ }
+
+ std::string Filename;
+ if(runPasses(Program, PassesToRun, Filename, false)) {
+ outs() << "\n";
+ outs() << "Optimizer passes caused failure!\n\n";
+ debugOptimizerCrash();
+ return true;
+ } else {
+ outs() << "Combination " << num << " optimized successfully!\n";
+ }
+
+ //
+ // Step 3: Compile the optimized code.
+ //
+ outs() << "Running the code generator to test for a crash: ";
+ std::string Error;
+ compileProgram(Program, &Error);
+ if (!Error.empty()) {
+ outs() << "\n*** compileProgram threw an exception: ";
+ outs() << Error;
+ return debugCodeGeneratorCrash(ErrMsg);
+ }
+ outs() << '\n';
+
+ //
+ // Step 4: Run the program and compare its output to the reference
+ // output (created above).
+ //
+ outs() << "*** Checking if passes caused miscompliation:\n";
+ bool Diff = diffProgram(Program, Filename, "", false, &Error);
+ if (Error.empty() && Diff) {
+ outs() << "\n*** diffProgram returned true!\n";
+ debugMiscompilation(&Error);
+ if (Error.empty())
+ return true;
+ }
+ if (!Error.empty()) {
+ errs() << Error;
+ debugCodeGeneratorCrash(ErrMsg);
+ return true;
+ }
+ outs() << "\n*** diff'd output matches!\n";
+
+ sys::Path(Filename).eraseFromDisk();
+
+ outs() << "\n\n";
+ num++;
+ } //end while
+
+ // Unreachable.
+}
diff --git a/contrib/llvm/tools/bugpoint/ListReducer.h b/contrib/llvm/tools/bugpoint/ListReducer.h
new file mode 100644
index 0000000..bd1c5da
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/ListReducer.h
@@ -0,0 +1,201 @@
+//===- ListReducer.h - Trim down list while retaining property --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class is to be used as a base class for operations that want to zero in
+// on a subset of the input which still causes the bug we are tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef BUGPOINT_LIST_REDUCER_H
+#define BUGPOINT_LIST_REDUCER_H
+
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <vector>
+#include <cstdlib>
+#include <algorithm>
+
+namespace llvm {
+
+ extern bool BugpointIsInterrupted;
+
+template<typename ElTy>
+struct ListReducer {
+ enum TestResult {
+ NoFailure, // No failure of the predicate was detected
+ KeepSuffix, // The suffix alone satisfies the predicate
+ KeepPrefix, // The prefix alone satisfies the predicate
+ InternalError // Encountered an error trying to run the predicate
+ };
+
+ virtual ~ListReducer() {}
+
+ // doTest - This virtual function should be overriden by subclasses to
+ // implement the test desired. The testcase is only required to test to see
+ // if the Kept list still satisfies the property, but if it is going to check
+ // the prefix anyway, it can.
+ //
+ virtual TestResult doTest(std::vector<ElTy> &Prefix,
+ std::vector<ElTy> &Kept,
+ std::string &Error) = 0;
+
+ // reduceList - This function attempts to reduce the length of the specified
+ // list while still maintaining the "test" property. This is the core of the
+ // "work" that bugpoint does.
+ //
+ bool reduceList(std::vector<ElTy> &TheList, std::string &Error) {
+ std::vector<ElTy> empty;
+ std::srand(0x6e5ea738); // Seed the random number generator
+ switch (doTest(TheList, empty, Error)) {
+ case KeepPrefix:
+ if (TheList.size() == 1) // we are done, it's the base case and it fails
+ return true;
+ else
+ break; // there's definitely an error, but we need to narrow it down
+
+ case KeepSuffix:
+ // cannot be reached!
+ llvm_unreachable("bugpoint ListReducer internal error: "
+ "selected empty set.");
+
+ case NoFailure:
+ return false; // there is no failure with the full set of passes/funcs!
+
+ case InternalError:
+ assert(!Error.empty());
+ return true;
+ }
+
+ // Maximal number of allowed splitting iterations,
+ // before the elements are randomly shuffled.
+ const unsigned MaxIterationsWithoutProgress = 3;
+ bool ShufflingEnabled = true;
+
+Backjump:
+ unsigned MidTop = TheList.size();
+ unsigned MaxIterations = MaxIterationsWithoutProgress;
+ unsigned NumOfIterationsWithoutProgress = 0;
+ while (MidTop > 1) { // Binary split reduction loop
+ // Halt if the user presses ctrl-c.
+ if (BugpointIsInterrupted) {
+ errs() << "\n\n*** Reduction Interrupted, cleaning up...\n\n";
+ return true;
+ }
+
+ // If the loop doesn't make satisfying progress, try shuffling.
+ // The purpose of shuffling is to avoid the heavy tails of the
+ // distribution (improving the speed of convergence).
+ if (ShufflingEnabled &&
+ NumOfIterationsWithoutProgress > MaxIterations) {
+ std::vector<ElTy> ShuffledList(TheList);
+ std::random_shuffle(ShuffledList.begin(), ShuffledList.end());
+ errs() << "\n\n*** Testing shuffled set...\n\n";
+ // Check that random shuffle doesn't loose the bug
+ if (doTest(ShuffledList, empty, Error) == KeepPrefix) {
+ // If the bug is still here, use the shuffled list.
+ TheList.swap(ShuffledList);
+ MidTop = TheList.size();
+ // Must increase the shuffling treshold to avoid the small
+ // probability of inifinite looping without making progress.
+ MaxIterations += 2;
+ errs() << "\n\n*** Shuffling does not hide the bug...\n\n";
+ } else {
+ ShufflingEnabled = false; // Disable shuffling further on
+ errs() << "\n\n*** Shuffling hides the bug...\n\n";
+ }
+ NumOfIterationsWithoutProgress = 0;
+ }
+
+ unsigned Mid = MidTop / 2;
+ std::vector<ElTy> Prefix(TheList.begin(), TheList.begin()+Mid);
+ std::vector<ElTy> Suffix(TheList.begin()+Mid, TheList.end());
+
+ switch (doTest(Prefix, Suffix, Error)) {
+ case KeepSuffix:
+ // The property still holds. We can just drop the prefix elements, and
+ // shorten the list to the "kept" elements.
+ TheList.swap(Suffix);
+ MidTop = TheList.size();
+ // Reset progress treshold and progress counter
+ MaxIterations = MaxIterationsWithoutProgress;
+ NumOfIterationsWithoutProgress = 0;
+ break;
+ case KeepPrefix:
+ // The predicate still holds, shorten the list to the prefix elements.
+ TheList.swap(Prefix);
+ MidTop = TheList.size();
+ // Reset progress treshold and progress counter
+ MaxIterations = MaxIterationsWithoutProgress;
+ NumOfIterationsWithoutProgress = 0;
+ break;
+ case NoFailure:
+ // Otherwise the property doesn't hold. Some of the elements we removed
+ // must be necessary to maintain the property.
+ MidTop = Mid;
+ NumOfIterationsWithoutProgress++;
+ break;
+ case InternalError:
+ return true; // Error was set by doTest.
+ }
+ assert(Error.empty() && "doTest did not return InternalError for error");
+ }
+
+ // Probability of backjumping from the trimming loop back to the binary
+ // split reduction loop.
+ const int BackjumpProbability = 10;
+
+ // Okay, we trimmed as much off the top and the bottom of the list as we
+ // could. If there is more than two elements in the list, try deleting
+ // interior elements and testing that.
+ //
+ if (TheList.size() > 2) {
+ bool Changed = true;
+ std::vector<ElTy> EmptyList;
+ while (Changed) { // Trimming loop.
+ Changed = false;
+
+ // If the binary split reduction loop made an unfortunate sequence of
+ // splits, the trimming loop might be left off with a huge number of
+ // remaining elements (large search space). Backjumping out of that
+ // search space and attempting a different split can significantly
+ // improve the convergence speed.
+ if (std::rand() % 100 < BackjumpProbability)
+ goto Backjump;
+
+ for (unsigned i = 1; i < TheList.size()-1; ++i) { // Check interior elts
+ if (BugpointIsInterrupted) {
+ errs() << "\n\n*** Reduction Interrupted, cleaning up...\n\n";
+ return true;
+ }
+
+ std::vector<ElTy> TestList(TheList);
+ TestList.erase(TestList.begin()+i);
+
+ if (doTest(EmptyList, TestList, Error) == KeepSuffix) {
+ // We can trim down the list!
+ TheList.swap(TestList);
+ --i; // Don't skip an element of the list
+ Changed = true;
+ }
+ if (!Error.empty())
+ return true;
+ }
+ // This can take a long time if left uncontrolled. For now, don't
+ // iterate.
+ break;
+ }
+ }
+
+ return true; // there are some failure and we've narrowed them down
+ }
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/bugpoint/Miscompilation.cpp b/contrib/llvm/tools/bugpoint/Miscompilation.cpp
new file mode 100644
index 0000000..82a3a86
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/Miscompilation.cpp
@@ -0,0 +1,1079 @@
+//===- Miscompilation.cpp - Debug program miscompilations -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements optimizer and code generation miscompilation debugging
+// support.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BugDriver.h"
+#include "ListReducer.h"
+#include "ToolRunner.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Linker.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Config/config.h" // for HAVE_LINK_R
+using namespace llvm;
+
+namespace llvm {
+ extern cl::opt<std::string> OutputPrefix;
+ extern cl::list<std::string> InputArgv;
+}
+
+namespace {
+ static llvm::cl::opt<bool>
+ DisableLoopExtraction("disable-loop-extraction",
+ cl::desc("Don't extract loops when searching for miscompilations"),
+ cl::init(false));
+ static llvm::cl::opt<bool>
+ DisableBlockExtraction("disable-block-extraction",
+ cl::desc("Don't extract blocks when searching for miscompilations"),
+ cl::init(false));
+
+ class ReduceMiscompilingPasses : public ListReducer<std::string> {
+ BugDriver &BD;
+ public:
+ ReduceMiscompilingPasses(BugDriver &bd) : BD(bd) {}
+
+ virtual TestResult doTest(std::vector<std::string> &Prefix,
+ std::vector<std::string> &Suffix,
+ std::string &Error);
+ };
+}
+
+/// TestResult - After passes have been split into a test group and a control
+/// group, see if they still break the program.
+///
+ReduceMiscompilingPasses::TestResult
+ReduceMiscompilingPasses::doTest(std::vector<std::string> &Prefix,
+ std::vector<std::string> &Suffix,
+ std::string &Error) {
+ // First, run the program with just the Suffix passes. If it is still broken
+ // with JUST the kept passes, discard the prefix passes.
+ outs() << "Checking to see if '" << getPassesString(Suffix)
+ << "' compiles correctly: ";
+
+ std::string BitcodeResult;
+ if (BD.runPasses(BD.getProgram(), Suffix, BitcodeResult, false/*delete*/,
+ true/*quiet*/)) {
+ errs() << " Error running this sequence of passes"
+ << " on the input program!\n";
+ BD.setPassesToRun(Suffix);
+ BD.EmitProgressBitcode(BD.getProgram(), "pass-error", false);
+ exit(BD.debugOptimizerCrash());
+ }
+
+ // Check to see if the finished program matches the reference output...
+ bool Diff = BD.diffProgram(BD.getProgram(), BitcodeResult, "",
+ true /*delete bitcode*/, &Error);
+ if (!Error.empty())
+ return InternalError;
+ if (Diff) {
+ outs() << " nope.\n";
+ if (Suffix.empty()) {
+ errs() << BD.getToolName() << ": I'm confused: the test fails when "
+ << "no passes are run, nondeterministic program?\n";
+ exit(1);
+ }
+ return KeepSuffix; // Miscompilation detected!
+ }
+ outs() << " yup.\n"; // No miscompilation!
+
+ if (Prefix.empty()) return NoFailure;
+
+ // Next, see if the program is broken if we run the "prefix" passes first,
+ // then separately run the "kept" passes.
+ outs() << "Checking to see if '" << getPassesString(Prefix)
+ << "' compiles correctly: ";
+
+ // If it is not broken with the kept passes, it's possible that the prefix
+ // passes must be run before the kept passes to break it. If the program
+ // WORKS after the prefix passes, but then fails if running the prefix AND
+ // kept passes, we can update our bitcode file to include the result of the
+ // prefix passes, then discard the prefix passes.
+ //
+ if (BD.runPasses(BD.getProgram(), Prefix, BitcodeResult, false/*delete*/,
+ true/*quiet*/)) {
+ errs() << " Error running this sequence of passes"
+ << " on the input program!\n";
+ BD.setPassesToRun(Prefix);
+ BD.EmitProgressBitcode(BD.getProgram(), "pass-error", false);
+ exit(BD.debugOptimizerCrash());
+ }
+
+ // If the prefix maintains the predicate by itself, only keep the prefix!
+ Diff = BD.diffProgram(BD.getProgram(), BitcodeResult, "", false, &Error);
+ if (!Error.empty())
+ return InternalError;
+ if (Diff) {
+ outs() << " nope.\n";
+ sys::Path(BitcodeResult).eraseFromDisk();
+ return KeepPrefix;
+ }
+ outs() << " yup.\n"; // No miscompilation!
+
+ // Ok, so now we know that the prefix passes work, try running the suffix
+ // passes on the result of the prefix passes.
+ //
+ OwningPtr<Module> PrefixOutput(ParseInputFile(BitcodeResult,
+ BD.getContext()));
+ if (PrefixOutput == 0) {
+ errs() << BD.getToolName() << ": Error reading bitcode file '"
+ << BitcodeResult << "'!\n";
+ exit(1);
+ }
+ sys::Path(BitcodeResult).eraseFromDisk(); // No longer need the file on disk
+
+ // Don't check if there are no passes in the suffix.
+ if (Suffix.empty())
+ return NoFailure;
+
+ outs() << "Checking to see if '" << getPassesString(Suffix)
+ << "' passes compile correctly after the '"
+ << getPassesString(Prefix) << "' passes: ";
+
+ OwningPtr<Module> OriginalInput(BD.swapProgramIn(PrefixOutput.take()));
+ if (BD.runPasses(BD.getProgram(), Suffix, BitcodeResult, false/*delete*/,
+ true/*quiet*/)) {
+ errs() << " Error running this sequence of passes"
+ << " on the input program!\n";
+ BD.setPassesToRun(Suffix);
+ BD.EmitProgressBitcode(BD.getProgram(), "pass-error", false);
+ exit(BD.debugOptimizerCrash());
+ }
+
+ // Run the result...
+ Diff = BD.diffProgram(BD.getProgram(), BitcodeResult, "",
+ true /*delete bitcode*/, &Error);
+ if (!Error.empty())
+ return InternalError;
+ if (Diff) {
+ outs() << " nope.\n";
+ return KeepSuffix;
+ }
+
+ // Otherwise, we must not be running the bad pass anymore.
+ outs() << " yup.\n"; // No miscompilation!
+ // Restore orig program & free test.
+ delete BD.swapProgramIn(OriginalInput.take());
+ return NoFailure;
+}
+
+namespace {
+ class ReduceMiscompilingFunctions : public ListReducer<Function*> {
+ BugDriver &BD;
+ bool (*TestFn)(BugDriver &, Module *, Module *, std::string &);
+ public:
+ ReduceMiscompilingFunctions(BugDriver &bd,
+ bool (*F)(BugDriver &, Module *, Module *,
+ std::string &))
+ : BD(bd), TestFn(F) {}
+
+ virtual TestResult doTest(std::vector<Function*> &Prefix,
+ std::vector<Function*> &Suffix,
+ std::string &Error) {
+ if (!Suffix.empty()) {
+ bool Ret = TestFuncs(Suffix, Error);
+ if (!Error.empty())
+ return InternalError;
+ if (Ret)
+ return KeepSuffix;
+ }
+ if (!Prefix.empty()) {
+ bool Ret = TestFuncs(Prefix, Error);
+ if (!Error.empty())
+ return InternalError;
+ if (Ret)
+ return KeepPrefix;
+ }
+ return NoFailure;
+ }
+
+ bool TestFuncs(const std::vector<Function*> &Prefix, std::string &Error);
+ };
+}
+
+/// TestMergedProgram - Given two modules, link them together and run the
+/// program, checking to see if the program matches the diff. If there is
+/// an error, return NULL. If not, return the merged module. The Broken argument
+/// will be set to true if the output is different. If the DeleteInputs
+/// argument is set to true then this function deletes both input
+/// modules before it returns.
+///
+static Module *TestMergedProgram(const BugDriver &BD, Module *M1, Module *M2,
+ bool DeleteInputs, std::string &Error,
+ bool &Broken) {
+ // Link the two portions of the program back to together.
+ std::string ErrorMsg;
+ if (!DeleteInputs) {
+ M1 = CloneModule(M1);
+ M2 = CloneModule(M2);
+ }
+ if (Linker::LinkModules(M1, M2, Linker::DestroySource, &ErrorMsg)) {
+ errs() << BD.getToolName() << ": Error linking modules together:"
+ << ErrorMsg << '\n';
+ exit(1);
+ }
+ delete M2; // We are done with this module.
+
+ // Execute the program.
+ Broken = BD.diffProgram(M1, "", "", false, &Error);
+ if (!Error.empty()) {
+ // Delete the linked module
+ delete M1;
+ return NULL;
+ }
+ return M1;
+}
+
+/// TestFuncs - split functions in a Module into two groups: those that are
+/// under consideration for miscompilation vs. those that are not, and test
+/// accordingly. Each group of functions becomes a separate Module.
+///
+bool ReduceMiscompilingFunctions::TestFuncs(const std::vector<Function*> &Funcs,
+ std::string &Error) {
+ // Test to see if the function is misoptimized if we ONLY run it on the
+ // functions listed in Funcs.
+ outs() << "Checking to see if the program is misoptimized when "
+ << (Funcs.size()==1 ? "this function is" : "these functions are")
+ << " run through the pass"
+ << (BD.getPassesToRun().size() == 1 ? "" : "es") << ":";
+ PrintFunctionList(Funcs);
+ outs() << '\n';
+
+ // Create a clone for two reasons:
+ // * If the optimization passes delete any function, the deleted function
+ // will be in the clone and Funcs will still point to valid memory
+ // * If the optimization passes use interprocedural information to break
+ // a function, we want to continue with the original function. Otherwise
+ // we can conclude that a function triggers the bug when in fact one
+ // needs a larger set of original functions to do so.
+ ValueToValueMapTy VMap;
+ Module *Clone = CloneModule(BD.getProgram(), VMap);
+ Module *Orig = BD.swapProgramIn(Clone);
+
+ std::vector<Function*> FuncsOnClone;
+ for (unsigned i = 0, e = Funcs.size(); i != e; ++i) {
+ Function *F = cast<Function>(VMap[Funcs[i]]);
+ FuncsOnClone.push_back(F);
+ }
+
+ // Split the module into the two halves of the program we want.
+ VMap.clear();
+ Module *ToNotOptimize = CloneModule(BD.getProgram(), VMap);
+ Module *ToOptimize = SplitFunctionsOutOfModule(ToNotOptimize, FuncsOnClone,
+ VMap);
+
+ // Run the predicate, note that the predicate will delete both input modules.
+ bool Broken = TestFn(BD, ToOptimize, ToNotOptimize, Error);
+
+ delete BD.swapProgramIn(Orig);
+
+ return Broken;
+}
+
+/// DisambiguateGlobalSymbols - Give anonymous global values names.
+///
+static void DisambiguateGlobalSymbols(Module *M) {
+ for (Module::global_iterator I = M->global_begin(), E = M->global_end();
+ I != E; ++I)
+ if (!I->hasName())
+ I->setName("anon_global");
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I)
+ if (!I->hasName())
+ I->setName("anon_fn");
+}
+
+/// ExtractLoops - Given a reduced list of functions that still exposed the bug,
+/// check to see if we can extract the loops in the region without obscuring the
+/// bug. If so, it reduces the amount of code identified.
+///
+static bool ExtractLoops(BugDriver &BD,
+ bool (*TestFn)(BugDriver &, Module *, Module *,
+ std::string &),
+ std::vector<Function*> &MiscompiledFunctions,
+ std::string &Error) {
+ bool MadeChange = false;
+ while (1) {
+ if (BugpointIsInterrupted) return MadeChange;
+
+ ValueToValueMapTy VMap;
+ Module *ToNotOptimize = CloneModule(BD.getProgram(), VMap);
+ Module *ToOptimize = SplitFunctionsOutOfModule(ToNotOptimize,
+ MiscompiledFunctions,
+ VMap);
+ Module *ToOptimizeLoopExtracted = BD.ExtractLoop(ToOptimize);
+ if (!ToOptimizeLoopExtracted) {
+ // If the loop extractor crashed or if there were no extractible loops,
+ // then this chapter of our odyssey is over with.
+ delete ToNotOptimize;
+ delete ToOptimize;
+ return MadeChange;
+ }
+
+ errs() << "Extracted a loop from the breaking portion of the program.\n";
+
+ // Bugpoint is intentionally not very trusting of LLVM transformations. In
+ // particular, we're not going to assume that the loop extractor works, so
+ // we're going to test the newly loop extracted program to make sure nothing
+ // has broken. If something broke, then we'll inform the user and stop
+ // extraction.
+ AbstractInterpreter *AI = BD.switchToSafeInterpreter();
+ bool Failure;
+ Module *New = TestMergedProgram(BD, ToOptimizeLoopExtracted, ToNotOptimize,
+ false, Error, Failure);
+ if (!New)
+ return false;
+ // Delete the original and set the new program.
+ delete BD.swapProgramIn(New);
+ if (Failure) {
+ BD.switchToInterpreter(AI);
+
+ // Merged program doesn't work anymore!
+ errs() << " *** ERROR: Loop extraction broke the program. :("
+ << " Please report a bug!\n";
+ errs() << " Continuing on with un-loop-extracted version.\n";
+
+ BD.writeProgramToFile(OutputPrefix + "-loop-extract-fail-tno.bc",
+ ToNotOptimize);
+ BD.writeProgramToFile(OutputPrefix + "-loop-extract-fail-to.bc",
+ ToOptimize);
+ BD.writeProgramToFile(OutputPrefix + "-loop-extract-fail-to-le.bc",
+ ToOptimizeLoopExtracted);
+
+ errs() << "Please submit the "
+ << OutputPrefix << "-loop-extract-fail-*.bc files.\n";
+ delete ToOptimize;
+ delete ToNotOptimize;
+ delete ToOptimizeLoopExtracted;
+ return MadeChange;
+ }
+ delete ToOptimize;
+ BD.switchToInterpreter(AI);
+
+ outs() << " Testing after loop extraction:\n";
+ // Clone modules, the tester function will free them.
+ Module *TOLEBackup = CloneModule(ToOptimizeLoopExtracted);
+ Module *TNOBackup = CloneModule(ToNotOptimize);
+ Failure = TestFn(BD, ToOptimizeLoopExtracted, ToNotOptimize, Error);
+ if (!Error.empty())
+ return false;
+ if (!Failure) {
+ outs() << "*** Loop extraction masked the problem. Undoing.\n";
+ // If the program is not still broken, then loop extraction did something
+ // that masked the error. Stop loop extraction now.
+ delete TOLEBackup;
+ delete TNOBackup;
+ return MadeChange;
+ }
+ ToOptimizeLoopExtracted = TOLEBackup;
+ ToNotOptimize = TNOBackup;
+
+ outs() << "*** Loop extraction successful!\n";
+
+ std::vector<std::pair<std::string, FunctionType*> > MisCompFunctions;
+ for (Module::iterator I = ToOptimizeLoopExtracted->begin(),
+ E = ToOptimizeLoopExtracted->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ MisCompFunctions.push_back(std::make_pair(I->getName(),
+ I->getFunctionType()));
+
+ // Okay, great! Now we know that we extracted a loop and that loop
+ // extraction both didn't break the program, and didn't mask the problem.
+ // Replace the current program with the loop extracted version, and try to
+ // extract another loop.
+ std::string ErrorMsg;
+ if (Linker::LinkModules(ToNotOptimize, ToOptimizeLoopExtracted,
+ Linker::DestroySource, &ErrorMsg)){
+ errs() << BD.getToolName() << ": Error linking modules together:"
+ << ErrorMsg << '\n';
+ exit(1);
+ }
+ delete ToOptimizeLoopExtracted;
+
+ // All of the Function*'s in the MiscompiledFunctions list are in the old
+ // module. Update this list to include all of the functions in the
+ // optimized and loop extracted module.
+ MiscompiledFunctions.clear();
+ for (unsigned i = 0, e = MisCompFunctions.size(); i != e; ++i) {
+ Function *NewF = ToNotOptimize->getFunction(MisCompFunctions[i].first);
+
+ assert(NewF && "Function not found??");
+ MiscompiledFunctions.push_back(NewF);
+ }
+
+ BD.setNewProgram(ToNotOptimize);
+ MadeChange = true;
+ }
+}
+
+namespace {
+ class ReduceMiscompiledBlocks : public ListReducer<BasicBlock*> {
+ BugDriver &BD;
+ bool (*TestFn)(BugDriver &, Module *, Module *, std::string &);
+ std::vector<Function*> FunctionsBeingTested;
+ public:
+ ReduceMiscompiledBlocks(BugDriver &bd,
+ bool (*F)(BugDriver &, Module *, Module *,
+ std::string &),
+ const std::vector<Function*> &Fns)
+ : BD(bd), TestFn(F), FunctionsBeingTested(Fns) {}
+
+ virtual TestResult doTest(std::vector<BasicBlock*> &Prefix,
+ std::vector<BasicBlock*> &Suffix,
+ std::string &Error) {
+ if (!Suffix.empty()) {
+ bool Ret = TestFuncs(Suffix, Error);
+ if (!Error.empty())
+ return InternalError;
+ if (Ret)
+ return KeepSuffix;
+ }
+ if (!Prefix.empty()) {
+ bool Ret = TestFuncs(Prefix, Error);
+ if (!Error.empty())
+ return InternalError;
+ if (Ret)
+ return KeepPrefix;
+ }
+ return NoFailure;
+ }
+
+ bool TestFuncs(const std::vector<BasicBlock*> &BBs, std::string &Error);
+ };
+}
+
+/// TestFuncs - Extract all blocks for the miscompiled functions except for the
+/// specified blocks. If the problem still exists, return true.
+///
+bool ReduceMiscompiledBlocks::TestFuncs(const std::vector<BasicBlock*> &BBs,
+ std::string &Error) {
+ // Test to see if the function is misoptimized if we ONLY run it on the
+ // functions listed in Funcs.
+ outs() << "Checking to see if the program is misoptimized when all ";
+ if (!BBs.empty()) {
+ outs() << "but these " << BBs.size() << " blocks are extracted: ";
+ for (unsigned i = 0, e = BBs.size() < 10 ? BBs.size() : 10; i != e; ++i)
+ outs() << BBs[i]->getName() << " ";
+ if (BBs.size() > 10) outs() << "...";
+ } else {
+ outs() << "blocks are extracted.";
+ }
+ outs() << '\n';
+
+ // Split the module into the two halves of the program we want.
+ ValueToValueMapTy VMap;
+ Module *Clone = CloneModule(BD.getProgram(), VMap);
+ Module *Orig = BD.swapProgramIn(Clone);
+ std::vector<Function*> FuncsOnClone;
+ std::vector<BasicBlock*> BBsOnClone;
+ for (unsigned i = 0, e = FunctionsBeingTested.size(); i != e; ++i) {
+ Function *F = cast<Function>(VMap[FunctionsBeingTested[i]]);
+ FuncsOnClone.push_back(F);
+ }
+ for (unsigned i = 0, e = BBs.size(); i != e; ++i) {
+ BasicBlock *BB = cast<BasicBlock>(VMap[BBs[i]]);
+ BBsOnClone.push_back(BB);
+ }
+ VMap.clear();
+
+ Module *ToNotOptimize = CloneModule(BD.getProgram(), VMap);
+ Module *ToOptimize = SplitFunctionsOutOfModule(ToNotOptimize,
+ FuncsOnClone,
+ VMap);
+
+ // Try the extraction. If it doesn't work, then the block extractor crashed
+ // or something, in which case bugpoint can't chase down this possibility.
+ if (Module *New = BD.ExtractMappedBlocksFromModule(BBsOnClone, ToOptimize)) {
+ delete ToOptimize;
+ // Run the predicate,
+ // note that the predicate will delete both input modules.
+ bool Ret = TestFn(BD, New, ToNotOptimize, Error);
+ delete BD.swapProgramIn(Orig);
+ return Ret;
+ }
+ delete BD.swapProgramIn(Orig);
+ delete ToOptimize;
+ delete ToNotOptimize;
+ return false;
+}
+
+
+/// ExtractBlocks - Given a reduced list of functions that still expose the bug,
+/// extract as many basic blocks from the region as possible without obscuring
+/// the bug.
+///
+static bool ExtractBlocks(BugDriver &BD,
+ bool (*TestFn)(BugDriver &, Module *, Module *,
+ std::string &),
+ std::vector<Function*> &MiscompiledFunctions,
+ std::string &Error) {
+ if (BugpointIsInterrupted) return false;
+
+ std::vector<BasicBlock*> Blocks;
+ for (unsigned i = 0, e = MiscompiledFunctions.size(); i != e; ++i)
+ for (Function::iterator I = MiscompiledFunctions[i]->begin(),
+ E = MiscompiledFunctions[i]->end(); I != E; ++I)
+ Blocks.push_back(I);
+
+ // Use the list reducer to identify blocks that can be extracted without
+ // obscuring the bug. The Blocks list will end up containing blocks that must
+ // be retained from the original program.
+ unsigned OldSize = Blocks.size();
+
+ // Check to see if all blocks are extractible first.
+ bool Ret = ReduceMiscompiledBlocks(BD, TestFn, MiscompiledFunctions)
+ .TestFuncs(std::vector<BasicBlock*>(), Error);
+ if (!Error.empty())
+ return false;
+ if (Ret) {
+ Blocks.clear();
+ } else {
+ ReduceMiscompiledBlocks(BD, TestFn,
+ MiscompiledFunctions).reduceList(Blocks, Error);
+ if (!Error.empty())
+ return false;
+ if (Blocks.size() == OldSize)
+ return false;
+ }
+
+ ValueToValueMapTy VMap;
+ Module *ProgClone = CloneModule(BD.getProgram(), VMap);
+ Module *ToExtract = SplitFunctionsOutOfModule(ProgClone,
+ MiscompiledFunctions,
+ VMap);
+ Module *Extracted = BD.ExtractMappedBlocksFromModule(Blocks, ToExtract);
+ if (Extracted == 0) {
+ // Weird, extraction should have worked.
+ errs() << "Nondeterministic problem extracting blocks??\n";
+ delete ProgClone;
+ delete ToExtract;
+ return false;
+ }
+
+ // Otherwise, block extraction succeeded. Link the two program fragments back
+ // together.
+ delete ToExtract;
+
+ std::vector<std::pair<std::string, FunctionType*> > MisCompFunctions;
+ for (Module::iterator I = Extracted->begin(), E = Extracted->end();
+ I != E; ++I)
+ if (!I->isDeclaration())
+ MisCompFunctions.push_back(std::make_pair(I->getName(),
+ I->getFunctionType()));
+
+ std::string ErrorMsg;
+ if (Linker::LinkModules(ProgClone, Extracted, Linker::DestroySource,
+ &ErrorMsg)) {
+ errs() << BD.getToolName() << ": Error linking modules together:"
+ << ErrorMsg << '\n';
+ exit(1);
+ }
+ delete Extracted;
+
+ // Set the new program and delete the old one.
+ BD.setNewProgram(ProgClone);
+
+ // Update the list of miscompiled functions.
+ MiscompiledFunctions.clear();
+
+ for (unsigned i = 0, e = MisCompFunctions.size(); i != e; ++i) {
+ Function *NewF = ProgClone->getFunction(MisCompFunctions[i].first);
+ assert(NewF && "Function not found??");
+ MiscompiledFunctions.push_back(NewF);
+ }
+
+ return true;
+}
+
+
+/// DebugAMiscompilation - This is a generic driver to narrow down
+/// miscompilations, either in an optimization or a code generator.
+///
+static std::vector<Function*>
+DebugAMiscompilation(BugDriver &BD,
+ bool (*TestFn)(BugDriver &, Module *, Module *,
+ std::string &),
+ std::string &Error) {
+ // Okay, now that we have reduced the list of passes which are causing the
+ // failure, see if we can pin down which functions are being
+ // miscompiled... first build a list of all of the non-external functions in
+ // the program.
+ std::vector<Function*> MiscompiledFunctions;
+ Module *Prog = BD.getProgram();
+ for (Module::iterator I = Prog->begin(), E = Prog->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ MiscompiledFunctions.push_back(I);
+
+ // Do the reduction...
+ if (!BugpointIsInterrupted)
+ ReduceMiscompilingFunctions(BD, TestFn).reduceList(MiscompiledFunctions,
+ Error);
+ if (!Error.empty()) {
+ errs() << "\n***Cannot reduce functions: ";
+ return MiscompiledFunctions;
+ }
+ outs() << "\n*** The following function"
+ << (MiscompiledFunctions.size() == 1 ? " is" : "s are")
+ << " being miscompiled: ";
+ PrintFunctionList(MiscompiledFunctions);
+ outs() << '\n';
+
+ // See if we can rip any loops out of the miscompiled functions and still
+ // trigger the problem.
+
+ if (!BugpointIsInterrupted && !DisableLoopExtraction) {
+ bool Ret = ExtractLoops(BD, TestFn, MiscompiledFunctions, Error);
+ if (!Error.empty())
+ return MiscompiledFunctions;
+ if (Ret) {
+ // Okay, we extracted some loops and the problem still appears. See if
+ // we can eliminate some of the created functions from being candidates.
+ DisambiguateGlobalSymbols(BD.getProgram());
+
+ // Do the reduction...
+ if (!BugpointIsInterrupted)
+ ReduceMiscompilingFunctions(BD, TestFn).reduceList(MiscompiledFunctions,
+ Error);
+ if (!Error.empty())
+ return MiscompiledFunctions;
+
+ outs() << "\n*** The following function"
+ << (MiscompiledFunctions.size() == 1 ? " is" : "s are")
+ << " being miscompiled: ";
+ PrintFunctionList(MiscompiledFunctions);
+ outs() << '\n';
+ }
+ }
+
+ if (!BugpointIsInterrupted && !DisableBlockExtraction) {
+ bool Ret = ExtractBlocks(BD, TestFn, MiscompiledFunctions, Error);
+ if (!Error.empty())
+ return MiscompiledFunctions;
+ if (Ret) {
+ // Okay, we extracted some blocks and the problem still appears. See if
+ // we can eliminate some of the created functions from being candidates.
+ DisambiguateGlobalSymbols(BD.getProgram());
+
+ // Do the reduction...
+ ReduceMiscompilingFunctions(BD, TestFn).reduceList(MiscompiledFunctions,
+ Error);
+ if (!Error.empty())
+ return MiscompiledFunctions;
+
+ outs() << "\n*** The following function"
+ << (MiscompiledFunctions.size() == 1 ? " is" : "s are")
+ << " being miscompiled: ";
+ PrintFunctionList(MiscompiledFunctions);
+ outs() << '\n';
+ }
+ }
+
+ return MiscompiledFunctions;
+}
+
+/// TestOptimizer - This is the predicate function used to check to see if the
+/// "Test" portion of the program is misoptimized. If so, return true. In any
+/// case, both module arguments are deleted.
+///
+static bool TestOptimizer(BugDriver &BD, Module *Test, Module *Safe,
+ std::string &Error) {
+ // Run the optimization passes on ToOptimize, producing a transformed version
+ // of the functions being tested.
+ outs() << " Optimizing functions being tested: ";
+ Module *Optimized = BD.runPassesOn(Test, BD.getPassesToRun(),
+ /*AutoDebugCrashes*/true);
+ outs() << "done.\n";
+ delete Test;
+
+ outs() << " Checking to see if the merged program executes correctly: ";
+ bool Broken;
+ Module *New = TestMergedProgram(BD, Optimized, Safe, true, Error, Broken);
+ if (New) {
+ outs() << (Broken ? " nope.\n" : " yup.\n");
+ // Delete the original and set the new program.
+ delete BD.swapProgramIn(New);
+ }
+ return Broken;
+}
+
+
+/// debugMiscompilation - This method is used when the passes selected are not
+/// crashing, but the generated output is semantically different from the
+/// input.
+///
+void BugDriver::debugMiscompilation(std::string *Error) {
+ // Make sure something was miscompiled...
+ if (!BugpointIsInterrupted)
+ if (!ReduceMiscompilingPasses(*this).reduceList(PassesToRun, *Error)) {
+ if (Error->empty())
+ errs() << "*** Optimized program matches reference output! No problem"
+ << " detected...\nbugpoint can't help you with your problem!\n";
+ return;
+ }
+
+ outs() << "\n*** Found miscompiling pass"
+ << (getPassesToRun().size() == 1 ? "" : "es") << ": "
+ << getPassesString(getPassesToRun()) << '\n';
+ EmitProgressBitcode(Program, "passinput");
+
+ std::vector<Function *> MiscompiledFunctions =
+ DebugAMiscompilation(*this, TestOptimizer, *Error);
+ if (!Error->empty())
+ return;
+
+ // Output a bunch of bitcode files for the user...
+ outs() << "Outputting reduced bitcode files which expose the problem:\n";
+ ValueToValueMapTy VMap;
+ Module *ToNotOptimize = CloneModule(getProgram(), VMap);
+ Module *ToOptimize = SplitFunctionsOutOfModule(ToNotOptimize,
+ MiscompiledFunctions,
+ VMap);
+
+ outs() << " Non-optimized portion: ";
+ EmitProgressBitcode(ToNotOptimize, "tonotoptimize", true);
+ delete ToNotOptimize; // Delete hacked module.
+
+ outs() << " Portion that is input to optimizer: ";
+ EmitProgressBitcode(ToOptimize, "tooptimize");
+ delete ToOptimize; // Delete hacked module.
+
+ return;
+}
+
+/// CleanupAndPrepareModules - Get the specified modules ready for code
+/// generator testing.
+///
+static void CleanupAndPrepareModules(BugDriver &BD, Module *&Test,
+ Module *Safe) {
+ // Clean up the modules, removing extra cruft that we don't need anymore...
+ Test = BD.performFinalCleanups(Test);
+
+ // If we are executing the JIT, we have several nasty issues to take care of.
+ if (!BD.isExecutingJIT()) return;
+
+ // First, if the main function is in the Safe module, we must add a stub to
+ // the Test module to call into it. Thus, we create a new function `main'
+ // which just calls the old one.
+ if (Function *oldMain = Safe->getFunction("main"))
+ if (!oldMain->isDeclaration()) {
+ // Rename it
+ oldMain->setName("llvm_bugpoint_old_main");
+ // Create a NEW `main' function with same type in the test module.
+ Function *newMain = Function::Create(oldMain->getFunctionType(),
+ GlobalValue::ExternalLinkage,
+ "main", Test);
+ // Create an `oldmain' prototype in the test module, which will
+ // corresponds to the real main function in the same module.
+ Function *oldMainProto = Function::Create(oldMain->getFunctionType(),
+ GlobalValue::ExternalLinkage,
+ oldMain->getName(), Test);
+ // Set up and remember the argument list for the main function.
+ std::vector<Value*> args;
+ for (Function::arg_iterator
+ I = newMain->arg_begin(), E = newMain->arg_end(),
+ OI = oldMain->arg_begin(); I != E; ++I, ++OI) {
+ I->setName(OI->getName()); // Copy argument names from oldMain
+ args.push_back(I);
+ }
+
+ // Call the old main function and return its result
+ BasicBlock *BB = BasicBlock::Create(Safe->getContext(), "entry", newMain);
+ CallInst *call = CallInst::Create(oldMainProto, args, "", BB);
+
+ // If the type of old function wasn't void, return value of call
+ ReturnInst::Create(Safe->getContext(), call, BB);
+ }
+
+ // The second nasty issue we must deal with in the JIT is that the Safe
+ // module cannot directly reference any functions defined in the test
+ // module. Instead, we use a JIT API call to dynamically resolve the
+ // symbol.
+
+ // Add the resolver to the Safe module.
+ // Prototype: void *getPointerToNamedFunction(const char* Name)
+ Constant *resolverFunc =
+ Safe->getOrInsertFunction("getPointerToNamedFunction",
+ Type::getInt8PtrTy(Safe->getContext()),
+ Type::getInt8PtrTy(Safe->getContext()),
+ (Type *)0);
+
+ // Use the function we just added to get addresses of functions we need.
+ for (Module::iterator F = Safe->begin(), E = Safe->end(); F != E; ++F) {
+ if (F->isDeclaration() && !F->use_empty() && &*F != resolverFunc &&
+ !F->isIntrinsic() /* ignore intrinsics */) {
+ Function *TestFn = Test->getFunction(F->getName());
+
+ // Don't forward functions which are external in the test module too.
+ if (TestFn && !TestFn->isDeclaration()) {
+ // 1. Add a string constant with its name to the global file
+ Constant *InitArray =
+ ConstantDataArray::getString(F->getContext(), F->getName());
+ GlobalVariable *funcName =
+ new GlobalVariable(*Safe, InitArray->getType(), true /*isConstant*/,
+ GlobalValue::InternalLinkage, InitArray,
+ F->getName() + "_name");
+
+ // 2. Use `GetElementPtr *funcName, 0, 0' to convert the string to an
+ // sbyte* so it matches the signature of the resolver function.
+
+ // GetElementPtr *funcName, ulong 0, ulong 0
+ std::vector<Constant*> GEPargs(2,
+ Constant::getNullValue(Type::getInt32Ty(F->getContext())));
+ Value *GEP = ConstantExpr::getGetElementPtr(funcName, GEPargs);
+ std::vector<Value*> ResolverArgs;
+ ResolverArgs.push_back(GEP);
+
+ // Rewrite uses of F in global initializers, etc. to uses of a wrapper
+ // function that dynamically resolves the calls to F via our JIT API
+ if (!F->use_empty()) {
+ // Create a new global to hold the cached function pointer.
+ Constant *NullPtr = ConstantPointerNull::get(F->getType());
+ GlobalVariable *Cache =
+ new GlobalVariable(*F->getParent(), F->getType(),
+ false, GlobalValue::InternalLinkage,
+ NullPtr,F->getName()+".fpcache");
+
+ // Construct a new stub function that will re-route calls to F
+ FunctionType *FuncTy = F->getFunctionType();
+ Function *FuncWrapper = Function::Create(FuncTy,
+ GlobalValue::InternalLinkage,
+ F->getName() + "_wrapper",
+ F->getParent());
+ BasicBlock *EntryBB = BasicBlock::Create(F->getContext(),
+ "entry", FuncWrapper);
+ BasicBlock *DoCallBB = BasicBlock::Create(F->getContext(),
+ "usecache", FuncWrapper);
+ BasicBlock *LookupBB = BasicBlock::Create(F->getContext(),
+ "lookupfp", FuncWrapper);
+
+ // Check to see if we already looked up the value.
+ Value *CachedVal = new LoadInst(Cache, "fpcache", EntryBB);
+ Value *IsNull = new ICmpInst(*EntryBB, ICmpInst::ICMP_EQ, CachedVal,
+ NullPtr, "isNull");
+ BranchInst::Create(LookupBB, DoCallBB, IsNull, EntryBB);
+
+ // Resolve the call to function F via the JIT API:
+ //
+ // call resolver(GetElementPtr...)
+ CallInst *Resolver =
+ CallInst::Create(resolverFunc, ResolverArgs, "resolver", LookupBB);
+
+ // Cast the result from the resolver to correctly-typed function.
+ CastInst *CastedResolver =
+ new BitCastInst(Resolver,
+ PointerType::getUnqual(F->getFunctionType()),
+ "resolverCast", LookupBB);
+
+ // Save the value in our cache.
+ new StoreInst(CastedResolver, Cache, LookupBB);
+ BranchInst::Create(DoCallBB, LookupBB);
+
+ PHINode *FuncPtr = PHINode::Create(NullPtr->getType(), 2,
+ "fp", DoCallBB);
+ FuncPtr->addIncoming(CastedResolver, LookupBB);
+ FuncPtr->addIncoming(CachedVal, EntryBB);
+
+ // Save the argument list.
+ std::vector<Value*> Args;
+ for (Function::arg_iterator i = FuncWrapper->arg_begin(),
+ e = FuncWrapper->arg_end(); i != e; ++i)
+ Args.push_back(i);
+
+ // Pass on the arguments to the real function, return its result
+ if (F->getReturnType()->isVoidTy()) {
+ CallInst::Create(FuncPtr, Args, "", DoCallBB);
+ ReturnInst::Create(F->getContext(), DoCallBB);
+ } else {
+ CallInst *Call = CallInst::Create(FuncPtr, Args,
+ "retval", DoCallBB);
+ ReturnInst::Create(F->getContext(),Call, DoCallBB);
+ }
+
+ // Use the wrapper function instead of the old function
+ F->replaceAllUsesWith(FuncWrapper);
+ }
+ }
+ }
+ }
+
+ if (verifyModule(*Test) || verifyModule(*Safe)) {
+ errs() << "Bugpoint has a bug, which corrupted a module!!\n";
+ abort();
+ }
+}
+
+
+
+/// TestCodeGenerator - This is the predicate function used to check to see if
+/// the "Test" portion of the program is miscompiled by the code generator under
+/// test. If so, return true. In any case, both module arguments are deleted.
+///
+static bool TestCodeGenerator(BugDriver &BD, Module *Test, Module *Safe,
+ std::string &Error) {
+ CleanupAndPrepareModules(BD, Test, Safe);
+
+ sys::Path TestModuleBC("bugpoint.test.bc");
+ std::string ErrMsg;
+ if (TestModuleBC.makeUnique(true, &ErrMsg)) {
+ errs() << BD.getToolName() << "Error making unique filename: "
+ << ErrMsg << "\n";
+ exit(1);
+ }
+ if (BD.writeProgramToFile(TestModuleBC.str(), Test)) {
+ errs() << "Error writing bitcode to `" << TestModuleBC.str()
+ << "'\nExiting.";
+ exit(1);
+ }
+ delete Test;
+
+ FileRemover TestModuleBCRemover(TestModuleBC.str(), !SaveTemps);
+
+ // Make the shared library
+ sys::Path SafeModuleBC("bugpoint.safe.bc");
+ if (SafeModuleBC.makeUnique(true, &ErrMsg)) {
+ errs() << BD.getToolName() << "Error making unique filename: "
+ << ErrMsg << "\n";
+ exit(1);
+ }
+
+ if (BD.writeProgramToFile(SafeModuleBC.str(), Safe)) {
+ errs() << "Error writing bitcode to `" << SafeModuleBC.str()
+ << "'\nExiting.";
+ exit(1);
+ }
+
+ FileRemover SafeModuleBCRemover(SafeModuleBC.str(), !SaveTemps);
+
+ std::string SharedObject = BD.compileSharedObject(SafeModuleBC.str(), Error);
+ if (!Error.empty())
+ return false;
+ delete Safe;
+
+ FileRemover SharedObjectRemover(SharedObject, !SaveTemps);
+
+ // Run the code generator on the `Test' code, loading the shared library.
+ // The function returns whether or not the new output differs from reference.
+ bool Result = BD.diffProgram(BD.getProgram(), TestModuleBC.str(),
+ SharedObject, false, &Error);
+ if (!Error.empty())
+ return false;
+
+ if (Result)
+ errs() << ": still failing!\n";
+ else
+ errs() << ": didn't fail.\n";
+
+ return Result;
+}
+
+
+/// debugCodeGenerator - debug errors in LLC, LLI, or CBE.
+///
+bool BugDriver::debugCodeGenerator(std::string *Error) {
+ if ((void*)SafeInterpreter == (void*)Interpreter) {
+ std::string Result = executeProgramSafely(Program, "bugpoint.safe.out",
+ Error);
+ if (Error->empty()) {
+ outs() << "\n*** The \"safe\" i.e. 'known good' backend cannot match "
+ << "the reference diff. This may be due to a\n front-end "
+ << "bug or a bug in the original program, but this can also "
+ << "happen if bugpoint isn't running the program with the "
+ << "right flags or input.\n I left the result of executing "
+ << "the program with the \"safe\" backend in this file for "
+ << "you: '"
+ << Result << "'.\n";
+ }
+ return true;
+ }
+
+ DisambiguateGlobalSymbols(Program);
+
+ std::vector<Function*> Funcs = DebugAMiscompilation(*this, TestCodeGenerator,
+ *Error);
+ if (!Error->empty())
+ return true;
+
+ // Split the module into the two halves of the program we want.
+ ValueToValueMapTy VMap;
+ Module *ToNotCodeGen = CloneModule(getProgram(), VMap);
+ Module *ToCodeGen = SplitFunctionsOutOfModule(ToNotCodeGen, Funcs, VMap);
+
+ // Condition the modules
+ CleanupAndPrepareModules(*this, ToCodeGen, ToNotCodeGen);
+
+ sys::Path TestModuleBC("bugpoint.test.bc");
+ std::string ErrMsg;
+ if (TestModuleBC.makeUnique(true, &ErrMsg)) {
+ errs() << getToolName() << "Error making unique filename: "
+ << ErrMsg << "\n";
+ exit(1);
+ }
+
+ if (writeProgramToFile(TestModuleBC.str(), ToCodeGen)) {
+ errs() << "Error writing bitcode to `" << TestModuleBC.str()
+ << "'\nExiting.";
+ exit(1);
+ }
+ delete ToCodeGen;
+
+ // Make the shared library
+ sys::Path SafeModuleBC("bugpoint.safe.bc");
+ if (SafeModuleBC.makeUnique(true, &ErrMsg)) {
+ errs() << getToolName() << "Error making unique filename: "
+ << ErrMsg << "\n";
+ exit(1);
+ }
+
+ if (writeProgramToFile(SafeModuleBC.str(), ToNotCodeGen)) {
+ errs() << "Error writing bitcode to `" << SafeModuleBC.str()
+ << "'\nExiting.";
+ exit(1);
+ }
+ std::string SharedObject = compileSharedObject(SafeModuleBC.str(), *Error);
+ if (!Error->empty())
+ return true;
+ delete ToNotCodeGen;
+
+ outs() << "You can reproduce the problem with the command line: \n";
+ if (isExecutingJIT()) {
+ outs() << " lli -load " << SharedObject << " " << TestModuleBC.str();
+ } else {
+ outs() << " llc " << TestModuleBC.str() << " -o " << TestModuleBC.str()
+ << ".s\n";
+ outs() << " gcc " << SharedObject << " " << TestModuleBC.str()
+ << ".s -o " << TestModuleBC.str() << ".exe";
+#if defined (HAVE_LINK_R)
+ outs() << " -Wl,-R.";
+#endif
+ outs() << "\n";
+ outs() << " " << TestModuleBC.str() << ".exe";
+ }
+ for (unsigned i = 0, e = InputArgv.size(); i != e; ++i)
+ outs() << " " << InputArgv[i];
+ outs() << '\n';
+ outs() << "The shared object was created with:\n llc -march=c "
+ << SafeModuleBC.str() << " -o temporary.c\n"
+ << " gcc -xc temporary.c -O2 -o " << SharedObject;
+ if (TargetTriple.getArch() == Triple::sparc)
+ outs() << " -G"; // Compile a shared library, `-G' for Sparc
+ else
+ outs() << " -fPIC -shared"; // `-shared' for Linux/X86, maybe others
+
+ outs() << " -fno-strict-aliasing\n";
+
+ return false;
+}
diff --git a/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp b/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp
new file mode 100644
index 0000000..fb090ee
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/OptimizerDriver.cpp
@@ -0,0 +1,267 @@
+//===- OptimizerDriver.cpp - Allow BugPoint to run passes safely ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an interface that allows bugpoint to run various passes
+// without the threat of a buggy pass corrupting bugpoint (of course, bugpoint
+// may have its own bugs, but that's another story...). It achieves this by
+// forking a copy of itself and having the child process do the optimizations.
+// If this client dies, we can always fork a new one. :)
+//
+//===----------------------------------------------------------------------===//
+
+#include "BugDriver.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+
+#define DONT_GET_PLUGIN_LOADER_OPTION
+#include "llvm/Support/PluginLoader.h"
+
+#include <fstream>
+using namespace llvm;
+
+namespace llvm {
+ extern cl::opt<std::string> OutputPrefix;
+}
+
+namespace {
+ // ChildOutput - This option captures the name of the child output file that
+ // is set up by the parent bugpoint process
+ cl::opt<std::string> ChildOutput("child-output", cl::ReallyHidden);
+}
+
+/// writeProgramToFile - This writes the current "Program" to the named bitcode
+/// file. If an error occurs, true is returned.
+///
+bool BugDriver::writeProgramToFile(const std::string &Filename,
+ const Module *M) const {
+ std::string ErrInfo;
+ tool_output_file Out(Filename.c_str(), ErrInfo,
+ raw_fd_ostream::F_Binary);
+ if (ErrInfo.empty()) {
+ WriteBitcodeToFile(M, Out.os());
+ Out.os().close();
+ if (!Out.os().has_error()) {
+ Out.keep();
+ return false;
+ }
+ }
+ Out.os().clear_error();
+ return true;
+}
+
+
+/// EmitProgressBitcode - This function is used to output the current Program
+/// to a file named "bugpoint-ID.bc".
+///
+void BugDriver::EmitProgressBitcode(const Module *M,
+ const std::string &ID,
+ bool NoFlyer) const {
+ // Output the input to the current pass to a bitcode file, emit a message
+ // telling the user how to reproduce it: opt -foo blah.bc
+ //
+ std::string Filename = OutputPrefix + "-" + ID + ".bc";
+ if (writeProgramToFile(Filename, M)) {
+ errs() << "Error opening file '" << Filename << "' for writing!\n";
+ return;
+ }
+
+ outs() << "Emitted bitcode to '" << Filename << "'\n";
+ if (NoFlyer || PassesToRun.empty()) return;
+ outs() << "\n*** You can reproduce the problem with: ";
+ if (UseValgrind) outs() << "valgrind ";
+ outs() << "opt " << Filename;
+ for (unsigned i = 0, e = PluginLoader::getNumPlugins(); i != e; ++i) {
+ outs() << " -load " << PluginLoader::getPlugin(i);
+ }
+ outs() << " " << getPassesString(PassesToRun) << "\n";
+}
+
+cl::opt<bool> SilencePasses("silence-passes",
+ cl::desc("Suppress output of running passes (both stdout and stderr)"));
+
+static cl::list<std::string> OptArgs("opt-args", cl::Positional,
+ cl::desc("<opt arguments>..."),
+ cl::ZeroOrMore, cl::PositionalEatsArgs);
+
+/// runPasses - Run the specified passes on Program, outputting a bitcode file
+/// and writing the filename into OutputFile if successful. If the
+/// optimizations fail for some reason (optimizer crashes), return true,
+/// otherwise return false. If DeleteOutput is set to true, the bitcode is
+/// deleted on success, and the filename string is undefined. This prints to
+/// outs() a single line message indicating whether compilation was successful
+/// or failed.
+///
+bool BugDriver::runPasses(Module *Program,
+ const std::vector<std::string> &Passes,
+ std::string &OutputFilename, bool DeleteOutput,
+ bool Quiet, unsigned NumExtraArgs,
+ const char * const *ExtraArgs) const {
+ // setup the output file name
+ outs().flush();
+ sys::Path uniqueFilename(OutputPrefix + "-output.bc");
+ std::string ErrMsg;
+ if (uniqueFilename.makeUnique(true, &ErrMsg)) {
+ errs() << getToolName() << ": Error making unique filename: "
+ << ErrMsg << "\n";
+ return(1);
+ }
+ OutputFilename = uniqueFilename.str();
+
+ // set up the input file name
+ sys::Path inputFilename(OutputPrefix + "-input.bc");
+ if (inputFilename.makeUnique(true, &ErrMsg)) {
+ errs() << getToolName() << ": Error making unique filename: "
+ << ErrMsg << "\n";
+ return(1);
+ }
+
+ std::string ErrInfo;
+ tool_output_file InFile(inputFilename.c_str(), ErrInfo,
+ raw_fd_ostream::F_Binary);
+
+
+ if (!ErrInfo.empty()) {
+ errs() << "Error opening bitcode file: " << inputFilename.str() << "\n";
+ return 1;
+ }
+ WriteBitcodeToFile(Program, InFile.os());
+ InFile.os().close();
+ if (InFile.os().has_error()) {
+ errs() << "Error writing bitcode file: " << inputFilename.str() << "\n";
+ InFile.os().clear_error();
+ return 1;
+ }
+
+ sys::Path tool = sys::Program::FindProgramByName("opt");
+ if (tool.empty()) {
+ errs() << "Cannot find `opt' in PATH!\n";
+ return 1;
+ }
+
+ // Ok, everything that could go wrong before running opt is done.
+ InFile.keep();
+
+ // setup the child process' arguments
+ SmallVector<const char*, 8> Args;
+ std::string Opt = tool.str();
+ if (UseValgrind) {
+ Args.push_back("valgrind");
+ Args.push_back("--error-exitcode=1");
+ Args.push_back("-q");
+ Args.push_back(tool.c_str());
+ } else
+ Args.push_back(Opt.c_str());
+
+ Args.push_back("-o");
+ Args.push_back(OutputFilename.c_str());
+ for (unsigned i = 0, e = OptArgs.size(); i != e; ++i)
+ Args.push_back(OptArgs[i].c_str());
+ std::vector<std::string> pass_args;
+ for (unsigned i = 0, e = PluginLoader::getNumPlugins(); i != e; ++i) {
+ pass_args.push_back( std::string("-load"));
+ pass_args.push_back( PluginLoader::getPlugin(i));
+ }
+ for (std::vector<std::string>::const_iterator I = Passes.begin(),
+ E = Passes.end(); I != E; ++I )
+ pass_args.push_back( std::string("-") + (*I) );
+ for (std::vector<std::string>::const_iterator I = pass_args.begin(),
+ E = pass_args.end(); I != E; ++I )
+ Args.push_back(I->c_str());
+ Args.push_back(inputFilename.c_str());
+ for (unsigned i = 0; i < NumExtraArgs; ++i)
+ Args.push_back(*ExtraArgs);
+ Args.push_back(0);
+
+ DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = Args.size()-1; i != e; ++i)
+ errs() << " " << Args[i];
+ errs() << "\n";
+ );
+
+ sys::Path prog;
+ if (UseValgrind)
+ prog = sys::Program::FindProgramByName("valgrind");
+ else
+ prog = tool;
+
+ // Redirect stdout and stderr to nowhere if SilencePasses is given
+ sys::Path Nowhere;
+ const sys::Path *Redirects[3] = {0, &Nowhere, &Nowhere};
+
+ int result = sys::Program::ExecuteAndWait(prog, Args.data(), 0,
+ (SilencePasses ? Redirects : 0),
+ Timeout, MemoryLimit, &ErrMsg);
+
+ // If we are supposed to delete the bitcode file or if the passes crashed,
+ // remove it now. This may fail if the file was never created, but that's ok.
+ if (DeleteOutput || result != 0)
+ sys::Path(OutputFilename).eraseFromDisk();
+
+ // Remove the temporary input file as well
+ inputFilename.eraseFromDisk();
+
+ if (!Quiet) {
+ if (result == 0)
+ outs() << "Success!\n";
+ else if (result > 0)
+ outs() << "Exited with error code '" << result << "'\n";
+ else if (result < 0) {
+ if (result == -1)
+ outs() << "Execute failed: " << ErrMsg << "\n";
+ else
+ outs() << "Crashed: " << ErrMsg << "\n";
+ }
+ if (result & 0x01000000)
+ outs() << "Dumped core\n";
+ }
+
+ // Was the child successful?
+ return result != 0;
+}
+
+
+/// runPassesOn - Carefully run the specified set of pass on the specified
+/// module, returning the transformed module on success, or a null pointer on
+/// failure.
+Module *BugDriver::runPassesOn(Module *M,
+ const std::vector<std::string> &Passes,
+ bool AutoDebugCrashes, unsigned NumExtraArgs,
+ const char * const *ExtraArgs) {
+ std::string BitcodeResult;
+ if (runPasses(M, Passes, BitcodeResult, false/*delete*/, true/*quiet*/,
+ NumExtraArgs, ExtraArgs)) {
+ if (AutoDebugCrashes) {
+ errs() << " Error running this sequence of passes"
+ << " on the input program!\n";
+ delete swapProgramIn(M);
+ EmitProgressBitcode(M, "pass-error", false);
+ exit(debugOptimizerCrash());
+ }
+ return 0;
+ }
+
+ Module *Ret = ParseInputFile(BitcodeResult, Context);
+ if (Ret == 0) {
+ errs() << getToolName() << ": Error reading bitcode file '"
+ << BitcodeResult << "'!\n";
+ exit(1);
+ }
+ sys::Path(BitcodeResult).eraseFromDisk(); // No longer need the file on disk
+ return Ret;
+}
diff --git a/contrib/llvm/tools/bugpoint/ToolRunner.cpp b/contrib/llvm/tools/bugpoint/ToolRunner.cpp
new file mode 100644
index 0000000..25a2bae
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/ToolRunner.cpp
@@ -0,0 +1,890 @@
+//===-- ToolRunner.cpp ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the interfaces described in the ToolRunner.h file.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "toolrunner"
+#include "ToolRunner.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Config/config.h" // for HAVE_LINK_R
+#include <fstream>
+#include <sstream>
+using namespace llvm;
+
+namespace llvm {
+ cl::opt<bool>
+ SaveTemps("save-temps", cl::init(false), cl::desc("Save temporary files"));
+}
+
+namespace {
+ cl::opt<std::string>
+ RemoteClient("remote-client",
+ cl::desc("Remote execution client (rsh/ssh)"));
+
+ cl::opt<std::string>
+ RemoteHost("remote-host",
+ cl::desc("Remote execution (rsh/ssh) host"));
+
+ cl::opt<std::string>
+ RemotePort("remote-port",
+ cl::desc("Remote execution (rsh/ssh) port"));
+
+ cl::opt<std::string>
+ RemoteUser("remote-user",
+ cl::desc("Remote execution (rsh/ssh) user id"));
+
+ cl::opt<std::string>
+ RemoteExtra("remote-extra-options",
+ cl::desc("Remote execution (rsh/ssh) extra options"));
+}
+
+/// RunProgramWithTimeout - This function provides an alternate interface
+/// to the sys::Program::ExecuteAndWait interface.
+/// @see sys::Program::ExecuteAndWait
+static int RunProgramWithTimeout(const sys::Path &ProgramPath,
+ const char **Args,
+ const sys::Path &StdInFile,
+ const sys::Path &StdOutFile,
+ const sys::Path &StdErrFile,
+ unsigned NumSeconds = 0,
+ unsigned MemoryLimit = 0,
+ std::string *ErrMsg = 0) {
+ const sys::Path* redirects[3];
+ redirects[0] = &StdInFile;
+ redirects[1] = &StdOutFile;
+ redirects[2] = &StdErrFile;
+
+#if 0 // For debug purposes
+ {
+ errs() << "RUN:";
+ for (unsigned i = 0; Args[i]; ++i)
+ errs() << " " << Args[i];
+ errs() << "\n";
+ }
+#endif
+
+ return
+ sys::Program::ExecuteAndWait(ProgramPath, Args, 0, redirects,
+ NumSeconds, MemoryLimit, ErrMsg);
+}
+
+/// RunProgramRemotelyWithTimeout - This function runs the given program
+/// remotely using the given remote client and the sys::Program::ExecuteAndWait.
+/// Returns the remote program exit code or reports a remote client error if it
+/// fails. Remote client is required to return 255 if it failed or program exit
+/// code otherwise.
+/// @see sys::Program::ExecuteAndWait
+static int RunProgramRemotelyWithTimeout(const sys::Path &RemoteClientPath,
+ const char **Args,
+ const sys::Path &StdInFile,
+ const sys::Path &StdOutFile,
+ const sys::Path &StdErrFile,
+ unsigned NumSeconds = 0,
+ unsigned MemoryLimit = 0) {
+ const sys::Path* redirects[3];
+ redirects[0] = &StdInFile;
+ redirects[1] = &StdOutFile;
+ redirects[2] = &StdErrFile;
+
+#if 0 // For debug purposes
+ {
+ errs() << "RUN:";
+ for (unsigned i = 0; Args[i]; ++i)
+ errs() << " " << Args[i];
+ errs() << "\n";
+ }
+#endif
+
+ // Run the program remotely with the remote client
+ int ReturnCode = sys::Program::ExecuteAndWait(RemoteClientPath, Args,
+ 0, redirects, NumSeconds, MemoryLimit);
+
+ // Has the remote client fail?
+ if (255 == ReturnCode) {
+ std::ostringstream OS;
+ OS << "\nError running remote client:\n ";
+ for (const char **Arg = Args; *Arg; ++Arg)
+ OS << " " << *Arg;
+ OS << "\n";
+
+ // The error message is in the output file, let's print it out from there.
+ std::ifstream ErrorFile(StdOutFile.c_str());
+ if (ErrorFile) {
+ std::copy(std::istreambuf_iterator<char>(ErrorFile),
+ std::istreambuf_iterator<char>(),
+ std::ostreambuf_iterator<char>(OS));
+ ErrorFile.close();
+ }
+
+ errs() << OS;
+ }
+
+ return ReturnCode;
+}
+
+static std::string ProcessFailure(sys::Path ProgPath, const char** Args,
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0) {
+ std::ostringstream OS;
+ OS << "\nError running tool:\n ";
+ for (const char **Arg = Args; *Arg; ++Arg)
+ OS << " " << *Arg;
+ OS << "\n";
+
+ // Rerun the compiler, capturing any error messages to print them.
+ sys::Path ErrorFilename("bugpoint.program_error_messages");
+ std::string ErrMsg;
+ if (ErrorFilename.makeUnique(true, &ErrMsg)) {
+ errs() << "Error making unique filename: " << ErrMsg << "\n";
+ exit(1);
+ }
+ RunProgramWithTimeout(ProgPath, Args, sys::Path(""), ErrorFilename,
+ ErrorFilename, Timeout, MemoryLimit);
+ // FIXME: check return code ?
+
+ // Print out the error messages generated by GCC if possible...
+ std::ifstream ErrorFile(ErrorFilename.c_str());
+ if (ErrorFile) {
+ std::copy(std::istreambuf_iterator<char>(ErrorFile),
+ std::istreambuf_iterator<char>(),
+ std::ostreambuf_iterator<char>(OS));
+ ErrorFile.close();
+ }
+
+ ErrorFilename.eraseFromDisk();
+ return OS.str();
+}
+
+//===---------------------------------------------------------------------===//
+// LLI Implementation of AbstractIntepreter interface
+//
+namespace {
+ class LLI : public AbstractInterpreter {
+ std::string LLIPath; // The path to the LLI executable
+ std::vector<std::string> ToolArgs; // Args to pass to LLI
+ public:
+ LLI(const std::string &Path, const std::vector<std::string> *Args)
+ : LLIPath(Path) {
+ ToolArgs.clear ();
+ if (Args) { ToolArgs = *Args; }
+ }
+
+ virtual int ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs,
+ const std::vector<std::string> &SharedLibs =
+ std::vector<std::string>(),
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+ };
+}
+
+int LLI::ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs,
+ const std::vector<std::string> &SharedLibs,
+ unsigned Timeout,
+ unsigned MemoryLimit) {
+ std::vector<const char*> LLIArgs;
+ LLIArgs.push_back(LLIPath.c_str());
+ LLIArgs.push_back("-force-interpreter=true");
+
+ for (std::vector<std::string>::const_iterator i = SharedLibs.begin(),
+ e = SharedLibs.end(); i != e; ++i) {
+ LLIArgs.push_back("-load");
+ LLIArgs.push_back((*i).c_str());
+ }
+
+ // Add any extra LLI args.
+ for (unsigned i = 0, e = ToolArgs.size(); i != e; ++i)
+ LLIArgs.push_back(ToolArgs[i].c_str());
+
+ LLIArgs.push_back(Bitcode.c_str());
+ // Add optional parameters to the running program from Argv
+ for (unsigned i=0, e = Args.size(); i != e; ++i)
+ LLIArgs.push_back(Args[i].c_str());
+ LLIArgs.push_back(0);
+
+ outs() << "<lli>"; outs().flush();
+ DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i=0, e = LLIArgs.size()-1; i != e; ++i)
+ errs() << " " << LLIArgs[i];
+ errs() << "\n";
+ );
+ return RunProgramWithTimeout(sys::Path(LLIPath), &LLIArgs[0],
+ sys::Path(InputFile), sys::Path(OutputFile), sys::Path(OutputFile),
+ Timeout, MemoryLimit, Error);
+}
+
+void AbstractInterpreter::anchor() { }
+
+// LLI create method - Try to find the LLI executable
+AbstractInterpreter *AbstractInterpreter::createLLI(const char *Argv0,
+ std::string &Message,
+ const std::vector<std::string> *ToolArgs) {
+ std::string LLIPath =
+ PrependMainExecutablePath("lli", Argv0, (void *)(intptr_t)&createLLI).str();
+ if (!LLIPath.empty()) {
+ Message = "Found lli: " + LLIPath + "\n";
+ return new LLI(LLIPath, ToolArgs);
+ }
+
+ Message = "Cannot find `lli' in executable directory!\n";
+ return 0;
+}
+
+//===---------------------------------------------------------------------===//
+// Custom compiler command implementation of AbstractIntepreter interface
+//
+// Allows using a custom command for compiling the bitcode, thus allows, for
+// example, to compile a bitcode fragment without linking or executing, then
+// using a custom wrapper script to check for compiler errors.
+namespace {
+ class CustomCompiler : public AbstractInterpreter {
+ std::string CompilerCommand;
+ std::vector<std::string> CompilerArgs;
+ public:
+ CustomCompiler(
+ const std::string &CompilerCmd, std::vector<std::string> CompArgs) :
+ CompilerCommand(CompilerCmd), CompilerArgs(CompArgs) {}
+
+ virtual void compileProgram(const std::string &Bitcode,
+ std::string *Error,
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+
+ virtual int ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs =
+ std::vector<std::string>(),
+ const std::vector<std::string> &SharedLibs =
+ std::vector<std::string>(),
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0) {
+ *Error = "Execution not supported with -compile-custom";
+ return -1;
+ }
+ };
+}
+
+void CustomCompiler::compileProgram(const std::string &Bitcode,
+ std::string *Error,
+ unsigned Timeout,
+ unsigned MemoryLimit) {
+
+ std::vector<const char*> ProgramArgs;
+ ProgramArgs.push_back(CompilerCommand.c_str());
+
+ for (std::size_t i = 0; i < CompilerArgs.size(); ++i)
+ ProgramArgs.push_back(CompilerArgs.at(i).c_str());
+ ProgramArgs.push_back(Bitcode.c_str());
+ ProgramArgs.push_back(0);
+
+ // Add optional parameters to the running program from Argv
+ for (unsigned i = 0, e = CompilerArgs.size(); i != e; ++i)
+ ProgramArgs.push_back(CompilerArgs[i].c_str());
+
+ if (RunProgramWithTimeout( sys::Path(CompilerCommand), &ProgramArgs[0],
+ sys::Path(), sys::Path(), sys::Path(),
+ Timeout, MemoryLimit, Error))
+ *Error = ProcessFailure(sys::Path(CompilerCommand), &ProgramArgs[0],
+ Timeout, MemoryLimit);
+}
+
+//===---------------------------------------------------------------------===//
+// Custom execution command implementation of AbstractIntepreter interface
+//
+// Allows using a custom command for executing the bitcode, thus allows,
+// for example, to invoke a cross compiler for code generation followed by
+// a simulator that executes the generated binary.
+namespace {
+ class CustomExecutor : public AbstractInterpreter {
+ std::string ExecutionCommand;
+ std::vector<std::string> ExecutorArgs;
+ public:
+ CustomExecutor(
+ const std::string &ExecutionCmd, std::vector<std::string> ExecArgs) :
+ ExecutionCommand(ExecutionCmd), ExecutorArgs(ExecArgs) {}
+
+ virtual int ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs,
+ const std::vector<std::string> &SharedLibs =
+ std::vector<std::string>(),
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+ };
+}
+
+int CustomExecutor::ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs,
+ const std::vector<std::string> &SharedLibs,
+ unsigned Timeout,
+ unsigned MemoryLimit) {
+
+ std::vector<const char*> ProgramArgs;
+ ProgramArgs.push_back(ExecutionCommand.c_str());
+
+ for (std::size_t i = 0; i < ExecutorArgs.size(); ++i)
+ ProgramArgs.push_back(ExecutorArgs.at(i).c_str());
+ ProgramArgs.push_back(Bitcode.c_str());
+ ProgramArgs.push_back(0);
+
+ // Add optional parameters to the running program from Argv
+ for (unsigned i = 0, e = Args.size(); i != e; ++i)
+ ProgramArgs.push_back(Args[i].c_str());
+
+ return RunProgramWithTimeout(
+ sys::Path(ExecutionCommand),
+ &ProgramArgs[0], sys::Path(InputFile), sys::Path(OutputFile),
+ sys::Path(OutputFile), Timeout, MemoryLimit, Error);
+}
+
+// Tokenize the CommandLine to the command and the args to allow
+// defining a full command line as the command instead of just the
+// executed program. We cannot just pass the whole string after the command
+// as a single argument because then program sees only a single
+// command line argument (with spaces in it: "foo bar" instead
+// of "foo" and "bar").
+//
+// code borrowed from:
+// http://oopweb.com/CPP/Documents/CPPHOWTO/Volume/C++Programming-HOWTO-7.html
+static void lexCommand(std::string &Message, const std::string &CommandLine,
+ std::string &CmdPath, std::vector<std::string> Args) {
+
+ std::string Command = "";
+ std::string delimiters = " ";
+
+ std::string::size_type lastPos = CommandLine.find_first_not_of(delimiters, 0);
+ std::string::size_type pos = CommandLine.find_first_of(delimiters, lastPos);
+
+ while (std::string::npos != pos || std::string::npos != lastPos) {
+ std::string token = CommandLine.substr(lastPos, pos - lastPos);
+ if (Command == "")
+ Command = token;
+ else
+ Args.push_back(token);
+ // Skip delimiters. Note the "not_of"
+ lastPos = CommandLine.find_first_not_of(delimiters, pos);
+ // Find next "non-delimiter"
+ pos = CommandLine.find_first_of(delimiters, lastPos);
+ }
+
+ CmdPath = sys::Program::FindProgramByName(Command).str();
+ if (CmdPath.empty()) {
+ Message =
+ std::string("Cannot find '") + Command +
+ "' in PATH!\n";
+ return;
+ }
+
+ Message = "Found command in: " + CmdPath + "\n";
+}
+
+// Custom execution environment create method, takes the execution command
+// as arguments
+AbstractInterpreter *AbstractInterpreter::createCustomCompiler(
+ std::string &Message,
+ const std::string &CompileCommandLine) {
+
+ std::string CmdPath;
+ std::vector<std::string> Args;
+ lexCommand(Message, CompileCommandLine, CmdPath, Args);
+ if (CmdPath.empty())
+ return 0;
+
+ return new CustomCompiler(CmdPath, Args);
+}
+
+// Custom execution environment create method, takes the execution command
+// as arguments
+AbstractInterpreter *AbstractInterpreter::createCustomExecutor(
+ std::string &Message,
+ const std::string &ExecCommandLine) {
+
+
+ std::string CmdPath;
+ std::vector<std::string> Args;
+ lexCommand(Message, ExecCommandLine, CmdPath, Args);
+ if (CmdPath.empty())
+ return 0;
+
+ return new CustomExecutor(CmdPath, Args);
+}
+
+//===----------------------------------------------------------------------===//
+// LLC Implementation of AbstractIntepreter interface
+//
+GCC::FileType LLC::OutputCode(const std::string &Bitcode,
+ sys::Path &OutputAsmFile, std::string &Error,
+ unsigned Timeout, unsigned MemoryLimit) {
+ const char *Suffix = (UseIntegratedAssembler ? ".llc.o" : ".llc.s");
+ sys::Path uniqueFile(Bitcode + Suffix);
+ std::string ErrMsg;
+ if (uniqueFile.makeUnique(true, &ErrMsg)) {
+ errs() << "Error making unique filename: " << ErrMsg << "\n";
+ exit(1);
+ }
+ OutputAsmFile = uniqueFile;
+ std::vector<const char *> LLCArgs;
+ LLCArgs.push_back(LLCPath.c_str());
+
+ // Add any extra LLC args.
+ for (unsigned i = 0, e = ToolArgs.size(); i != e; ++i)
+ LLCArgs.push_back(ToolArgs[i].c_str());
+
+ LLCArgs.push_back("-o");
+ LLCArgs.push_back(OutputAsmFile.c_str()); // Output to the Asm file
+ LLCArgs.push_back(Bitcode.c_str()); // This is the input bitcode
+
+ if (UseIntegratedAssembler)
+ LLCArgs.push_back("-filetype=obj");
+
+ LLCArgs.push_back (0);
+
+ outs() << (UseIntegratedAssembler ? "<llc-ia>" : "<llc>");
+ outs().flush();
+ DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = LLCArgs.size()-1; i != e; ++i)
+ errs() << " " << LLCArgs[i];
+ errs() << "\n";
+ );
+ if (RunProgramWithTimeout(sys::Path(LLCPath), &LLCArgs[0],
+ sys::Path(), sys::Path(), sys::Path(),
+ Timeout, MemoryLimit))
+ Error = ProcessFailure(sys::Path(LLCPath), &LLCArgs[0],
+ Timeout, MemoryLimit);
+ return UseIntegratedAssembler ? GCC::ObjectFile : GCC::AsmFile;
+}
+
+void LLC::compileProgram(const std::string &Bitcode, std::string *Error,
+ unsigned Timeout, unsigned MemoryLimit) {
+ sys::Path OutputAsmFile;
+ OutputCode(Bitcode, OutputAsmFile, *Error, Timeout, MemoryLimit);
+ OutputAsmFile.eraseFromDisk();
+}
+
+int LLC::ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &ArgsForGCC,
+ const std::vector<std::string> &SharedLibs,
+ unsigned Timeout,
+ unsigned MemoryLimit) {
+
+ sys::Path OutputAsmFile;
+ GCC::FileType FileKind = OutputCode(Bitcode, OutputAsmFile, *Error, Timeout,
+ MemoryLimit);
+ FileRemover OutFileRemover(OutputAsmFile.str(), !SaveTemps);
+
+ std::vector<std::string> GCCArgs(ArgsForGCC);
+ GCCArgs.insert(GCCArgs.end(), SharedLibs.begin(), SharedLibs.end());
+
+ // Assuming LLC worked, compile the result with GCC and run it.
+ return gcc->ExecuteProgram(OutputAsmFile.str(), Args, FileKind,
+ InputFile, OutputFile, Error, GCCArgs,
+ Timeout, MemoryLimit);
+}
+
+/// createLLC - Try to find the LLC executable
+///
+LLC *AbstractInterpreter::createLLC(const char *Argv0,
+ std::string &Message,
+ const std::string &GCCBinary,
+ const std::vector<std::string> *Args,
+ const std::vector<std::string> *GCCArgs,
+ bool UseIntegratedAssembler) {
+ std::string LLCPath =
+ PrependMainExecutablePath("llc", Argv0, (void *)(intptr_t)&createLLC).str();
+ if (LLCPath.empty()) {
+ Message = "Cannot find `llc' in executable directory!\n";
+ return 0;
+ }
+
+ Message = "Found llc: " + LLCPath + "\n";
+ GCC *gcc = GCC::create(Message, GCCBinary, GCCArgs);
+ if (!gcc) {
+ errs() << Message << "\n";
+ exit(1);
+ }
+ return new LLC(LLCPath, gcc, Args, UseIntegratedAssembler);
+}
+
+//===---------------------------------------------------------------------===//
+// JIT Implementation of AbstractIntepreter interface
+//
+namespace {
+ class JIT : public AbstractInterpreter {
+ std::string LLIPath; // The path to the LLI executable
+ std::vector<std::string> ToolArgs; // Args to pass to LLI
+ public:
+ JIT(const std::string &Path, const std::vector<std::string> *Args)
+ : LLIPath(Path) {
+ ToolArgs.clear ();
+ if (Args) { ToolArgs = *Args; }
+ }
+
+ virtual int ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs =
+ std::vector<std::string>(),
+ const std::vector<std::string> &SharedLibs =
+ std::vector<std::string>(),
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+ };
+}
+
+int JIT::ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs,
+ const std::vector<std::string> &SharedLibs,
+ unsigned Timeout,
+ unsigned MemoryLimit) {
+ // Construct a vector of parameters, incorporating those from the command-line
+ std::vector<const char*> JITArgs;
+ JITArgs.push_back(LLIPath.c_str());
+ JITArgs.push_back("-force-interpreter=false");
+
+ // Add any extra LLI args.
+ for (unsigned i = 0, e = ToolArgs.size(); i != e; ++i)
+ JITArgs.push_back(ToolArgs[i].c_str());
+
+ for (unsigned i = 0, e = SharedLibs.size(); i != e; ++i) {
+ JITArgs.push_back("-load");
+ JITArgs.push_back(SharedLibs[i].c_str());
+ }
+ JITArgs.push_back(Bitcode.c_str());
+ // Add optional parameters to the running program from Argv
+ for (unsigned i=0, e = Args.size(); i != e; ++i)
+ JITArgs.push_back(Args[i].c_str());
+ JITArgs.push_back(0);
+
+ outs() << "<jit>"; outs().flush();
+ DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i=0, e = JITArgs.size()-1; i != e; ++i)
+ errs() << " " << JITArgs[i];
+ errs() << "\n";
+ );
+ DEBUG(errs() << "\nSending output to " << OutputFile << "\n");
+ return RunProgramWithTimeout(sys::Path(LLIPath), &JITArgs[0],
+ sys::Path(InputFile), sys::Path(OutputFile), sys::Path(OutputFile),
+ Timeout, MemoryLimit, Error);
+}
+
+/// createJIT - Try to find the LLI executable
+///
+AbstractInterpreter *AbstractInterpreter::createJIT(const char *Argv0,
+ std::string &Message, const std::vector<std::string> *Args) {
+ std::string LLIPath =
+ PrependMainExecutablePath("lli", Argv0, (void *)(intptr_t)&createJIT).str();
+ if (!LLIPath.empty()) {
+ Message = "Found lli: " + LLIPath + "\n";
+ return new JIT(LLIPath, Args);
+ }
+
+ Message = "Cannot find `lli' in executable directory!\n";
+ return 0;
+}
+
+//===---------------------------------------------------------------------===//
+// GCC abstraction
+//
+
+static bool IsARMArchitecture(std::vector<const char*> Args) {
+ for (std::vector<const char*>::const_iterator
+ I = Args.begin(), E = Args.end(); I != E; ++I) {
+ if (StringRef(*I).equals_lower("-arch")) {
+ ++I;
+ if (I != E && StringRef(*I).substr(0, strlen("arm")).equals_lower("arm"))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+int GCC::ExecuteProgram(const std::string &ProgramFile,
+ const std::vector<std::string> &Args,
+ FileType fileType,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &ArgsForGCC,
+ unsigned Timeout,
+ unsigned MemoryLimit) {
+ std::vector<const char*> GCCArgs;
+
+ GCCArgs.push_back(GCCPath.c_str());
+
+ if (TargetTriple.getArch() == Triple::x86)
+ GCCArgs.push_back("-m32");
+
+ for (std::vector<std::string>::const_iterator
+ I = gccArgs.begin(), E = gccArgs.end(); I != E; ++I)
+ GCCArgs.push_back(I->c_str());
+
+ // Specify -x explicitly in case the extension is wonky
+ if (fileType != ObjectFile) {
+ GCCArgs.push_back("-x");
+ if (fileType == CFile) {
+ GCCArgs.push_back("c");
+ GCCArgs.push_back("-fno-strict-aliasing");
+ } else {
+ GCCArgs.push_back("assembler");
+
+ // For ARM architectures we don't want this flag. bugpoint isn't
+ // explicitly told what architecture it is working on, so we get
+ // it from gcc flags
+ if (TargetTriple.isOSDarwin() && !IsARMArchitecture(GCCArgs))
+ GCCArgs.push_back("-force_cpusubtype_ALL");
+ }
+ }
+
+ GCCArgs.push_back(ProgramFile.c_str()); // Specify the input filename.
+
+ GCCArgs.push_back("-x");
+ GCCArgs.push_back("none");
+ GCCArgs.push_back("-o");
+ sys::Path OutputBinary (ProgramFile+".gcc.exe");
+ std::string ErrMsg;
+ if (OutputBinary.makeUnique(true, &ErrMsg)) {
+ errs() << "Error making unique filename: " << ErrMsg << "\n";
+ exit(1);
+ }
+ GCCArgs.push_back(OutputBinary.c_str()); // Output to the right file...
+
+ // Add any arguments intended for GCC. We locate them here because this is
+ // most likely -L and -l options that need to come before other libraries but
+ // after the source. Other options won't be sensitive to placement on the
+ // command line, so this should be safe.
+ for (unsigned i = 0, e = ArgsForGCC.size(); i != e; ++i)
+ GCCArgs.push_back(ArgsForGCC[i].c_str());
+
+ GCCArgs.push_back("-lm"); // Hard-code the math library...
+ GCCArgs.push_back("-O2"); // Optimize the program a bit...
+#if defined (HAVE_LINK_R)
+ GCCArgs.push_back("-Wl,-R."); // Search this dir for .so files
+#endif
+ if (TargetTriple.getArch() == Triple::sparc)
+ GCCArgs.push_back("-mcpu=v9");
+ GCCArgs.push_back(0); // NULL terminator
+
+ outs() << "<gcc>"; outs().flush();
+ DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = GCCArgs.size()-1; i != e; ++i)
+ errs() << " " << GCCArgs[i];
+ errs() << "\n";
+ );
+ if (RunProgramWithTimeout(GCCPath, &GCCArgs[0], sys::Path(), sys::Path(),
+ sys::Path())) {
+ *Error = ProcessFailure(GCCPath, &GCCArgs[0]);
+ return -1;
+ }
+
+ std::vector<const char*> ProgramArgs;
+
+ // Declared here so that the destructor only runs after
+ // ProgramArgs is used.
+ std::string Exec;
+
+ if (RemoteClientPath.isEmpty())
+ ProgramArgs.push_back(OutputBinary.c_str());
+ else {
+ ProgramArgs.push_back(RemoteClientPath.c_str());
+ ProgramArgs.push_back(RemoteHost.c_str());
+ if (!RemoteUser.empty()) {
+ ProgramArgs.push_back("-l");
+ ProgramArgs.push_back(RemoteUser.c_str());
+ }
+ if (!RemotePort.empty()) {
+ ProgramArgs.push_back("-p");
+ ProgramArgs.push_back(RemotePort.c_str());
+ }
+ if (!RemoteExtra.empty()) {
+ ProgramArgs.push_back(RemoteExtra.c_str());
+ }
+
+ // Full path to the binary. We need to cd to the exec directory because
+ // there is a dylib there that the exec expects to find in the CWD
+ char* env_pwd = getenv("PWD");
+ Exec = "cd ";
+ Exec += env_pwd;
+ Exec += "; ./";
+ Exec += OutputBinary.c_str();
+ ProgramArgs.push_back(Exec.c_str());
+ }
+
+ // Add optional parameters to the running program from Argv
+ for (unsigned i = 0, e = Args.size(); i != e; ++i)
+ ProgramArgs.push_back(Args[i].c_str());
+ ProgramArgs.push_back(0); // NULL terminator
+
+ // Now that we have a binary, run it!
+ outs() << "<program>"; outs().flush();
+ DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = ProgramArgs.size()-1; i != e; ++i)
+ errs() << " " << ProgramArgs[i];
+ errs() << "\n";
+ );
+
+ FileRemover OutputBinaryRemover(OutputBinary.str(), !SaveTemps);
+
+ if (RemoteClientPath.isEmpty()) {
+ DEBUG(errs() << "<run locally>");
+ int ExitCode = RunProgramWithTimeout(OutputBinary, &ProgramArgs[0],
+ sys::Path(InputFile), sys::Path(OutputFile), sys::Path(OutputFile),
+ Timeout, MemoryLimit, Error);
+ // Treat a signal (usually SIGSEGV) or timeout as part of the program output
+ // so that crash-causing miscompilation is handled seamlessly.
+ if (ExitCode < -1) {
+ std::ofstream outFile(OutputFile.c_str(), std::ios_base::app);
+ outFile << *Error << '\n';
+ outFile.close();
+ Error->clear();
+ }
+ return ExitCode;
+ } else {
+ outs() << "<run remotely>"; outs().flush();
+ return RunProgramRemotelyWithTimeout(sys::Path(RemoteClientPath),
+ &ProgramArgs[0], sys::Path(InputFile), sys::Path(OutputFile),
+ sys::Path(OutputFile), Timeout, MemoryLimit);
+ }
+}
+
+int GCC::MakeSharedObject(const std::string &InputFile, FileType fileType,
+ std::string &OutputFile,
+ const std::vector<std::string> &ArgsForGCC,
+ std::string &Error) {
+ sys::Path uniqueFilename(InputFile+LTDL_SHLIB_EXT);
+ std::string ErrMsg;
+ if (uniqueFilename.makeUnique(true, &ErrMsg)) {
+ errs() << "Error making unique filename: " << ErrMsg << "\n";
+ exit(1);
+ }
+ OutputFile = uniqueFilename.str();
+
+ std::vector<const char*> GCCArgs;
+
+ GCCArgs.push_back(GCCPath.c_str());
+
+ if (TargetTriple.getArch() == Triple::x86)
+ GCCArgs.push_back("-m32");
+
+ for (std::vector<std::string>::const_iterator
+ I = gccArgs.begin(), E = gccArgs.end(); I != E; ++I)
+ GCCArgs.push_back(I->c_str());
+
+ // Compile the C/asm file into a shared object
+ if (fileType != ObjectFile) {
+ GCCArgs.push_back("-x");
+ GCCArgs.push_back(fileType == AsmFile ? "assembler" : "c");
+ }
+ GCCArgs.push_back("-fno-strict-aliasing");
+ GCCArgs.push_back(InputFile.c_str()); // Specify the input filename.
+ GCCArgs.push_back("-x");
+ GCCArgs.push_back("none");
+ if (TargetTriple.getArch() == Triple::sparc)
+ GCCArgs.push_back("-G"); // Compile a shared library, `-G' for Sparc
+ else if (TargetTriple.isOSDarwin()) {
+ // link all source files into a single module in data segment, rather than
+ // generating blocks. dynamic_lookup requires that you set
+ // MACOSX_DEPLOYMENT_TARGET=10.3 in your env. FIXME: it would be better for
+ // bugpoint to just pass that in the environment of GCC.
+ GCCArgs.push_back("-single_module");
+ GCCArgs.push_back("-dynamiclib"); // `-dynamiclib' for MacOS X/PowerPC
+ GCCArgs.push_back("-undefined");
+ GCCArgs.push_back("dynamic_lookup");
+ } else
+ GCCArgs.push_back("-shared"); // `-shared' for Linux/X86, maybe others
+
+ if (TargetTriple.getArch() == Triple::x86_64)
+ GCCArgs.push_back("-fPIC"); // Requires shared objs to contain PIC
+
+ if (TargetTriple.getArch() == Triple::sparc)
+ GCCArgs.push_back("-mcpu=v9");
+
+ GCCArgs.push_back("-o");
+ GCCArgs.push_back(OutputFile.c_str()); // Output to the right filename.
+ GCCArgs.push_back("-O2"); // Optimize the program a bit.
+
+
+
+ // Add any arguments intended for GCC. We locate them here because this is
+ // most likely -L and -l options that need to come before other libraries but
+ // after the source. Other options won't be sensitive to placement on the
+ // command line, so this should be safe.
+ for (unsigned i = 0, e = ArgsForGCC.size(); i != e; ++i)
+ GCCArgs.push_back(ArgsForGCC[i].c_str());
+ GCCArgs.push_back(0); // NULL terminator
+
+
+
+ outs() << "<gcc>"; outs().flush();
+ DEBUG(errs() << "\nAbout to run:\t";
+ for (unsigned i = 0, e = GCCArgs.size()-1; i != e; ++i)
+ errs() << " " << GCCArgs[i];
+ errs() << "\n";
+ );
+ if (RunProgramWithTimeout(GCCPath, &GCCArgs[0], sys::Path(), sys::Path(),
+ sys::Path())) {
+ Error = ProcessFailure(GCCPath, &GCCArgs[0]);
+ return 1;
+ }
+ return 0;
+}
+
+/// create - Try to find the `gcc' executable
+///
+GCC *GCC::create(std::string &Message,
+ const std::string &GCCBinary,
+ const std::vector<std::string> *Args) {
+ sys::Path GCCPath = sys::Program::FindProgramByName(GCCBinary);
+ if (GCCPath.isEmpty()) {
+ Message = "Cannot find `"+ GCCBinary +"' in PATH!\n";
+ return 0;
+ }
+
+ sys::Path RemoteClientPath;
+ if (!RemoteClient.empty())
+ RemoteClientPath = sys::Program::FindProgramByName(RemoteClient);
+
+ Message = "Found gcc: " + GCCPath.str() + "\n";
+ return new GCC(GCCPath, RemoteClientPath, Args);
+}
diff --git a/contrib/llvm/tools/bugpoint/ToolRunner.h b/contrib/llvm/tools/bugpoint/ToolRunner.h
new file mode 100644
index 0000000..7b93394
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/ToolRunner.h
@@ -0,0 +1,248 @@
+//===-- tools/bugpoint/ToolRunner.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file exposes an abstraction around a platform C compiler, used to
+// compile C and assembly code. It also exposes an "AbstractIntepreter"
+// interface, which is used to execute code using one of the LLVM execution
+// engines.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef BUGPOINT_TOOLRUNNER_H
+#define BUGPOINT_TOOLRUNNER_H
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/Path.h"
+#include <exception>
+#include <vector>
+
+namespace llvm {
+
+extern cl::opt<bool> SaveTemps;
+extern Triple TargetTriple;
+
+class CBE;
+class LLC;
+
+//===---------------------------------------------------------------------===//
+// GCC abstraction
+//
+class GCC {
+ sys::Path GCCPath; // The path to the gcc executable.
+ sys::Path RemoteClientPath; // The path to the rsh / ssh executable.
+ std::vector<std::string> gccArgs; // GCC-specific arguments.
+ GCC(const sys::Path &gccPath, const sys::Path &RemotePath,
+ const std::vector<std::string> *GCCArgs)
+ : GCCPath(gccPath), RemoteClientPath(RemotePath) {
+ if (GCCArgs) gccArgs = *GCCArgs;
+ }
+public:
+ enum FileType { AsmFile, ObjectFile, CFile };
+
+ static GCC *create(std::string &Message,
+ const std::string &GCCBinary,
+ const std::vector<std::string> *Args);
+
+ /// ExecuteProgram - Execute the program specified by "ProgramFile" (which is
+ /// either a .s file, or a .c file, specified by FileType), with the specified
+ /// arguments. Standard input is specified with InputFile, and standard
+ /// Output is captured to the specified OutputFile location. The SharedLibs
+ /// option specifies optional native shared objects that can be loaded into
+ /// the program for execution.
+ ///
+ int ExecuteProgram(const std::string &ProgramFile,
+ const std::vector<std::string> &Args,
+ FileType fileType,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error = 0,
+ const std::vector<std::string> &GCCArgs =
+ std::vector<std::string>(),
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+
+ /// MakeSharedObject - This compiles the specified file (which is either a .c
+ /// file or a .s file) into a shared object.
+ ///
+ int MakeSharedObject(const std::string &InputFile, FileType fileType,
+ std::string &OutputFile,
+ const std::vector<std::string> &ArgsForGCC,
+ std::string &Error);
+};
+
+
+//===---------------------------------------------------------------------===//
+/// AbstractInterpreter Class - Subclasses of this class are used to execute
+/// LLVM bitcode in a variety of ways. This abstract interface hides this
+/// complexity behind a simple interface.
+///
+class AbstractInterpreter {
+ virtual void anchor();
+public:
+ static CBE *createCBE(const char *Argv0, std::string &Message,
+ const std::string &GCCBinary,
+ const std::vector<std::string> *Args = 0,
+ const std::vector<std::string> *GCCArgs = 0);
+ static LLC *createLLC(const char *Argv0, std::string &Message,
+ const std::string &GCCBinary,
+ const std::vector<std::string> *Args = 0,
+ const std::vector<std::string> *GCCArgs = 0,
+ bool UseIntegratedAssembler = false);
+
+ static AbstractInterpreter* createLLI(const char *Argv0, std::string &Message,
+ const std::vector<std::string> *Args=0);
+
+ static AbstractInterpreter* createJIT(const char *Argv0, std::string &Message,
+ const std::vector<std::string> *Args=0);
+
+ static AbstractInterpreter*
+ createCustomCompiler(std::string &Message,
+ const std::string &CompileCommandLine);
+
+ static AbstractInterpreter*
+ createCustomExecutor(std::string &Message,
+ const std::string &ExecCommandLine);
+
+
+ virtual ~AbstractInterpreter() {}
+
+ /// compileProgram - Compile the specified program from bitcode to executable
+ /// code. This does not produce any output, it is only used when debugging
+ /// the code generator. It returns false if the code generator fails.
+ virtual void compileProgram(const std::string &Bitcode, std::string *Error,
+ unsigned Timeout = 0, unsigned MemoryLimit = 0) {}
+
+ /// OutputCode - Compile the specified program from bitcode to code
+ /// understood by the GCC driver (either C or asm). If the code generator
+ /// fails, it sets Error, otherwise, this function returns the type of code
+ /// emitted.
+ virtual GCC::FileType OutputCode(const std::string &Bitcode,
+ sys::Path &OutFile, std::string &Error,
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0) {
+ Error = "OutputCode not supported by this AbstractInterpreter!";
+ return GCC::AsmFile;
+ }
+
+ /// ExecuteProgram - Run the specified bitcode file, emitting output to the
+ /// specified filename. This sets RetVal to the exit code of the program or
+ /// returns false if a problem was encountered that prevented execution of
+ /// the program.
+ ///
+ virtual int ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs =
+ std::vector<std::string>(),
+ const std::vector<std::string> &SharedLibs =
+ std::vector<std::string>(),
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0) = 0;
+};
+
+//===---------------------------------------------------------------------===//
+// CBE Implementation of AbstractIntepreter interface
+//
+class CBE : public AbstractInterpreter {
+ sys::Path LLCPath; // The path to the `llc' executable.
+ std::vector<std::string> ToolArgs; // Extra args to pass to LLC.
+ GCC *gcc;
+public:
+ CBE(const sys::Path &llcPath, GCC *Gcc,
+ const std::vector<std::string> *Args)
+ : LLCPath(llcPath), gcc(Gcc) {
+ ToolArgs.clear ();
+ if (Args) ToolArgs = *Args;
+ }
+ ~CBE() { delete gcc; }
+
+ /// compileProgram - Compile the specified program from bitcode to executable
+ /// code. This does not produce any output, it is only used when debugging
+ /// the code generator. Returns false if the code generator fails.
+ virtual void compileProgram(const std::string &Bitcode, std::string *Error,
+ unsigned Timeout = 0, unsigned MemoryLimit = 0);
+
+ virtual int ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs =
+ std::vector<std::string>(),
+ const std::vector<std::string> &SharedLibs =
+ std::vector<std::string>(),
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+
+ /// OutputCode - Compile the specified program from bitcode to code
+ /// understood by the GCC driver (either C or asm). If the code generator
+ /// fails, it sets Error, otherwise, this function returns the type of code
+ /// emitted.
+ virtual GCC::FileType OutputCode(const std::string &Bitcode,
+ sys::Path &OutFile, std::string &Error,
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+};
+
+
+//===---------------------------------------------------------------------===//
+// LLC Implementation of AbstractIntepreter interface
+//
+class LLC : public AbstractInterpreter {
+ std::string LLCPath; // The path to the LLC executable.
+ std::vector<std::string> ToolArgs; // Extra args to pass to LLC.
+ GCC *gcc;
+ bool UseIntegratedAssembler;
+public:
+ LLC(const std::string &llcPath, GCC *Gcc,
+ const std::vector<std::string> *Args,
+ bool useIntegratedAssembler)
+ : LLCPath(llcPath), gcc(Gcc),
+ UseIntegratedAssembler(useIntegratedAssembler) {
+ ToolArgs.clear();
+ if (Args) ToolArgs = *Args;
+ }
+ ~LLC() { delete gcc; }
+
+ /// compileProgram - Compile the specified program from bitcode to executable
+ /// code. This does not produce any output, it is only used when debugging
+ /// the code generator. Returns false if the code generator fails.
+ virtual void compileProgram(const std::string &Bitcode, std::string *Error,
+ unsigned Timeout = 0, unsigned MemoryLimit = 0);
+
+ virtual int ExecuteProgram(const std::string &Bitcode,
+ const std::vector<std::string> &Args,
+ const std::string &InputFile,
+ const std::string &OutputFile,
+ std::string *Error,
+ const std::vector<std::string> &GCCArgs =
+ std::vector<std::string>(),
+ const std::vector<std::string> &SharedLibs =
+ std::vector<std::string>(),
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+
+ /// OutputCode - Compile the specified program from bitcode to code
+ /// understood by the GCC driver (either C or asm). If the code generator
+ /// fails, it sets Error, otherwise, this function returns the type of code
+ /// emitted.
+ virtual GCC::FileType OutputCode(const std::string &Bitcode,
+ sys::Path &OutFile, std::string &Error,
+ unsigned Timeout = 0,
+ unsigned MemoryLimit = 0);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/bugpoint/bugpoint.cpp b/contrib/llvm/tools/bugpoint/bugpoint.cpp
new file mode 100644
index 0000000..8f15b02
--- /dev/null
+++ b/contrib/llvm/tools/bugpoint/bugpoint.cpp
@@ -0,0 +1,210 @@
+//===- bugpoint.cpp - The LLVM Bugpoint utility ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This program is an automated compiler debugger tool. It is used to narrow
+// down miscompilations and crash problems to a specific pass in the compiler,
+// and the specific Module or Function input that is causing the problem.
+//
+//===----------------------------------------------------------------------===//
+
+#include "BugDriver.h"
+#include "ToolRunner.h"
+#include "llvm/LinkAllPasses.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/PassManager.h"
+#include "llvm/Support/PassNameParser.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PluginLoader.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/Valgrind.h"
+#include "llvm/LinkAllVMCore.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+
+//Enable this macro to debug bugpoint itself.
+//#define DEBUG_BUGPOINT 1
+
+using namespace llvm;
+
+static cl::opt<bool>
+FindBugs("find-bugs", cl::desc("Run many different optimization sequences "
+ "on program to find bugs"), cl::init(false));
+
+static cl::list<std::string>
+InputFilenames(cl::Positional, cl::OneOrMore,
+ cl::desc("<input llvm ll/bc files>"));
+
+static cl::opt<unsigned>
+TimeoutValue("timeout", cl::init(300), cl::value_desc("seconds"),
+ cl::desc("Number of seconds program is allowed to run before it "
+ "is killed (default is 300s), 0 disables timeout"));
+
+static cl::opt<int>
+MemoryLimit("mlimit", cl::init(-1), cl::value_desc("MBytes"),
+ cl::desc("Maximum amount of memory to use. 0 disables check."
+ " Defaults to 100MB (800MB under valgrind)."));
+
+static cl::opt<bool>
+UseValgrind("enable-valgrind",
+ cl::desc("Run optimizations through valgrind"));
+
+// The AnalysesList is automatically populated with registered Passes by the
+// PassNameParser.
+//
+static cl::list<const PassInfo*, bool, PassNameParser>
+PassList(cl::desc("Passes available:"), cl::ZeroOrMore);
+
+static cl::opt<bool>
+StandardCompileOpts("std-compile-opts",
+ cl::desc("Include the standard compile time optimizations"));
+
+static cl::opt<bool>
+StandardLinkOpts("std-link-opts",
+ cl::desc("Include the standard link time optimizations"));
+
+static cl::opt<bool>
+OptLevelO1("O1",
+ cl::desc("Optimization level 1. Similar to llvm-gcc -O1"));
+
+static cl::opt<bool>
+OptLevelO2("O2",
+ cl::desc("Optimization level 2. Similar to llvm-gcc -O2"));
+
+static cl::opt<bool>
+OptLevelO3("O3",
+ cl::desc("Optimization level 3. Similar to llvm-gcc -O3"));
+
+static cl::opt<std::string>
+OverrideTriple("mtriple", cl::desc("Override target triple for module"));
+
+/// BugpointIsInterrupted - Set to true when the user presses ctrl-c.
+bool llvm::BugpointIsInterrupted = false;
+
+#ifndef DEBUG_BUGPOINT
+static void BugpointInterruptFunction() {
+ BugpointIsInterrupted = true;
+}
+#endif
+
+// Hack to capture a pass list.
+namespace {
+ class AddToDriver : public FunctionPassManager {
+ BugDriver &D;
+ public:
+ AddToDriver(BugDriver &_D) : FunctionPassManager(0), D(_D) {}
+
+ virtual void add(Pass *P) {
+ const void *ID = P->getPassID();
+ const PassInfo *PI = PassRegistry::getPassRegistry()->getPassInfo(ID);
+ D.addPass(PI->getPassArgument());
+ }
+ };
+}
+
+int main(int argc, char **argv) {
+#ifndef DEBUG_BUGPOINT
+ llvm::sys::PrintStackTraceOnErrorSignal();
+ llvm::PrettyStackTraceProgram X(argc, argv);
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+#endif
+
+ // Initialize passes
+ PassRegistry &Registry = *PassRegistry::getPassRegistry();
+ initializeCore(Registry);
+ initializeScalarOpts(Registry);
+ initializeVectorization(Registry);
+ initializeIPO(Registry);
+ initializeAnalysis(Registry);
+ initializeIPA(Registry);
+ initializeTransformUtils(Registry);
+ initializeInstCombine(Registry);
+ initializeInstrumentation(Registry);
+ initializeTarget(Registry);
+
+ cl::ParseCommandLineOptions(argc, argv,
+ "LLVM automatic testcase reducer. See\nhttp://"
+ "llvm.org/cmds/bugpoint.html"
+ " for more information.\n");
+#ifndef DEBUG_BUGPOINT
+ sys::SetInterruptFunction(BugpointInterruptFunction);
+#endif
+
+ LLVMContext& Context = getGlobalContext();
+ // If we have an override, set it and then track the triple we want Modules
+ // to use.
+ if (!OverrideTriple.empty()) {
+ TargetTriple.setTriple(Triple::normalize(OverrideTriple));
+ outs() << "Override triple set to '" << TargetTriple.getTriple() << "'\n";
+ }
+
+ if (MemoryLimit < 0) {
+ // Set the default MemoryLimit. Be sure to update the flag's description if
+ // you change this.
+ if (sys::RunningOnValgrind() || UseValgrind)
+ MemoryLimit = 800;
+ else
+ MemoryLimit = 100;
+ }
+
+ BugDriver D(argv[0], FindBugs, TimeoutValue, MemoryLimit,
+ UseValgrind, Context);
+ if (D.addSources(InputFilenames)) return 1;
+
+ AddToDriver PM(D);
+ if (StandardCompileOpts) {
+ PassManagerBuilder Builder;
+ Builder.OptLevel = 3;
+ Builder.Inliner = createFunctionInliningPass();
+ Builder.populateModulePassManager(PM);
+ }
+
+ if (StandardLinkOpts) {
+ PassManagerBuilder Builder;
+ Builder.populateLTOPassManager(PM, /*Internalize=*/true,
+ /*RunInliner=*/true);
+ }
+
+ if (OptLevelO1 || OptLevelO2 || OptLevelO3) {
+ PassManagerBuilder Builder;
+ if (OptLevelO1)
+ Builder.Inliner = createAlwaysInlinerPass();
+ else if (OptLevelO2)
+ Builder.Inliner = createFunctionInliningPass(225);
+ else
+ Builder.Inliner = createFunctionInliningPass(275);
+
+ // Note that although clang/llvm-gcc use two separate passmanagers
+ // here, it shouldn't normally make a difference.
+ Builder.populateFunctionPassManager(PM);
+ Builder.populateModulePassManager(PM);
+ }
+
+ for (std::vector<const PassInfo*>::iterator I = PassList.begin(),
+ E = PassList.end();
+ I != E; ++I) {
+ const PassInfo* PI = *I;
+ D.addPass(PI->getPassArgument());
+ }
+
+ // Bugpoint has the ability of generating a plethora of core files, so to
+ // avoid filling up the disk, we prevent it
+#ifndef DEBUG_BUGPOINT
+ sys::Process::PreventCoreFiles();
+#endif
+
+ std::string Error;
+ bool Failure = D.run(Error);
+ if (!Error.empty()) {
+ errs() << Error;
+ return 1;
+ }
+ return Failure;
+}
diff --git a/contrib/llvm/tools/clang/LICENSE.TXT b/contrib/llvm/tools/clang/LICENSE.TXT
new file mode 100644
index 0000000..6c224f8
--- /dev/null
+++ b/contrib/llvm/tools/clang/LICENSE.TXT
@@ -0,0 +1,63 @@
+==============================================================================
+LLVM Release License
+==============================================================================
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2007-2012 University of Illinois at Urbana-Champaign.
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+The LLVM software contains code written by third parties. Such software will
+have its own individual LICENSE.TXT file in the directory in which it appears.
+This file will describe the copyrights, license, and restrictions which apply
+to that code.
+
+The disclaimer of warranty in the University of Illinois Open Source License
+applies to all code in the LLVM Distribution, and nothing in any of the
+other licenses gives permission to use the names of the LLVM Team or the
+University of Illinois to endorse or promote products derived from this
+Software.
+
+The following pieces of software have additional or alternate copyrights,
+licenses, and/or restrictions:
+
+Program Directory
+------- ---------
+<none yet>
+
diff --git a/contrib/llvm/tools/clang/include/clang-c/Index.h b/contrib/llvm/tools/clang/include/clang-c/Index.h
new file mode 100644
index 0000000..13ba6ba
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang-c/Index.h
@@ -0,0 +1,4748 @@
+/*===-- clang-c/Index.h - Indexing Public C Interface -------------*- C -*-===*\
+|* *|
+|* The LLVM Compiler Infrastructure *|
+|* *|
+|* This file is distributed under the University of Illinois Open Source *|
+|* License. See LICENSE.TXT for details. *|
+|* *|
+|*===----------------------------------------------------------------------===*|
+|* *|
+|* This header provides a public inferface to a Clang library for extracting *|
+|* high-level symbol information from source files without exposing the full *|
+|* Clang C++ API. *|
+|* *|
+\*===----------------------------------------------------------------------===*/
+
+#ifndef CLANG_C_INDEX_H
+#define CLANG_C_INDEX_H
+
+#include <sys/stat.h>
+#include <time.h>
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* MSVC DLL import/export. */
+#ifdef _MSC_VER
+ #ifdef _CINDEX_LIB_
+ #define CINDEX_LINKAGE __declspec(dllexport)
+ #else
+ #define CINDEX_LINKAGE __declspec(dllimport)
+ #endif
+#else
+ #define CINDEX_LINKAGE
+#endif
+
+#ifdef __GNUC__
+ #define CINDEX_DEPRECATED __attribute__((deprecated))
+#else
+ #ifdef _MSC_VER
+ #define CINDEX_DEPRECATED __declspec(deprecated)
+ #else
+ #define CINDEX_DEPRECATED
+ #endif
+#endif
+
+/** \defgroup CINDEX libclang: C Interface to Clang
+ *
+ * The C Interface to Clang provides a relatively small API that exposes
+ * facilities for parsing source code into an abstract syntax tree (AST),
+ * loading already-parsed ASTs, traversing the AST, associating
+ * physical source locations with elements within the AST, and other
+ * facilities that support Clang-based development tools.
+ *
+ * This C interface to Clang will never provide all of the information
+ * representation stored in Clang's C++ AST, nor should it: the intent is to
+ * maintain an API that is relatively stable from one release to the next,
+ * providing only the basic functionality needed to support development tools.
+ *
+ * To avoid namespace pollution, data types are prefixed with "CX" and
+ * functions are prefixed with "clang_".
+ *
+ * @{
+ */
+
+/**
+ * \brief An "index" that consists of a set of translation units that would
+ * typically be linked together into an executable or library.
+ */
+typedef void *CXIndex;
+
+/**
+ * \brief A single translation unit, which resides in an index.
+ */
+typedef struct CXTranslationUnitImpl *CXTranslationUnit;
+
+/**
+ * \brief Opaque pointer representing client data that will be passed through
+ * to various callbacks and visitors.
+ */
+typedef void *CXClientData;
+
+/**
+ * \brief Provides the contents of a file that has not yet been saved to disk.
+ *
+ * Each CXUnsavedFile instance provides the name of a file on the
+ * system along with the current contents of that file that have not
+ * yet been saved to disk.
+ */
+struct CXUnsavedFile {
+ /**
+ * \brief The file whose contents have not yet been saved.
+ *
+ * This file must already exist in the file system.
+ */
+ const char *Filename;
+
+ /**
+ * \brief A buffer containing the unsaved contents of this file.
+ */
+ const char *Contents;
+
+ /**
+ * \brief The length of the unsaved contents of this buffer.
+ */
+ unsigned long Length;
+};
+
+/**
+ * \brief Describes the availability of a particular entity, which indicates
+ * whether the use of this entity will result in a warning or error due to
+ * it being deprecated or unavailable.
+ */
+enum CXAvailabilityKind {
+ /**
+ * \brief The entity is available.
+ */
+ CXAvailability_Available,
+ /**
+ * \brief The entity is available, but has been deprecated (and its use is
+ * not recommended).
+ */
+ CXAvailability_Deprecated,
+ /**
+ * \brief The entity is not available; any use of it will be an error.
+ */
+ CXAvailability_NotAvailable,
+ /**
+ * \brief The entity is available, but not accessible; any use of it will be
+ * an error.
+ */
+ CXAvailability_NotAccessible
+};
+
+/**
+ * \defgroup CINDEX_STRING String manipulation routines
+ *
+ * @{
+ */
+
+/**
+ * \brief A character string.
+ *
+ * The \c CXString type is used to return strings from the interface when
+ * the ownership of that string might different from one call to the next.
+ * Use \c clang_getCString() to retrieve the string data and, once finished
+ * with the string data, call \c clang_disposeString() to free the string.
+ */
+typedef struct {
+ void *data;
+ unsigned private_flags;
+} CXString;
+
+/**
+ * \brief Retrieve the character data associated with the given string.
+ */
+CINDEX_LINKAGE const char *clang_getCString(CXString string);
+
+/**
+ * \brief Free the given string,
+ */
+CINDEX_LINKAGE void clang_disposeString(CXString string);
+
+/**
+ * @}
+ */
+
+/**
+ * \brief clang_createIndex() provides a shared context for creating
+ * translation units. It provides two options:
+ *
+ * - excludeDeclarationsFromPCH: When non-zero, allows enumeration of "local"
+ * declarations (when loading any new translation units). A "local" declaration
+ * is one that belongs in the translation unit itself and not in a precompiled
+ * header that was used by the translation unit. If zero, all declarations
+ * will be enumerated.
+ *
+ * Here is an example:
+ *
+ * // excludeDeclsFromPCH = 1, displayDiagnostics=1
+ * Idx = clang_createIndex(1, 1);
+ *
+ * // IndexTest.pch was produced with the following command:
+ * // "clang -x c IndexTest.h -emit-ast -o IndexTest.pch"
+ * TU = clang_createTranslationUnit(Idx, "IndexTest.pch");
+ *
+ * // This will load all the symbols from 'IndexTest.pch'
+ * clang_visitChildren(clang_getTranslationUnitCursor(TU),
+ * TranslationUnitVisitor, 0);
+ * clang_disposeTranslationUnit(TU);
+ *
+ * // This will load all the symbols from 'IndexTest.c', excluding symbols
+ * // from 'IndexTest.pch'.
+ * char *args[] = { "-Xclang", "-include-pch=IndexTest.pch" };
+ * TU = clang_createTranslationUnitFromSourceFile(Idx, "IndexTest.c", 2, args,
+ * 0, 0);
+ * clang_visitChildren(clang_getTranslationUnitCursor(TU),
+ * TranslationUnitVisitor, 0);
+ * clang_disposeTranslationUnit(TU);
+ *
+ * This process of creating the 'pch', loading it separately, and using it (via
+ * -include-pch) allows 'excludeDeclsFromPCH' to remove redundant callbacks
+ * (which gives the indexer the same performance benefit as the compiler).
+ */
+CINDEX_LINKAGE CXIndex clang_createIndex(int excludeDeclarationsFromPCH,
+ int displayDiagnostics);
+
+/**
+ * \brief Destroy the given index.
+ *
+ * The index must not be destroyed until all of the translation units created
+ * within that index have been destroyed.
+ */
+CINDEX_LINKAGE void clang_disposeIndex(CXIndex index);
+
+typedef enum {
+ /**
+ * \brief Used to indicate that no special CXIndex options are needed.
+ */
+ CXGlobalOpt_None = 0x0,
+
+ /**
+ * \brief Used to indicate that threads that libclang creates for indexing
+ * purposes should use background priority.
+ * Affects \see clang_indexSourceFile, \see clang_indexTranslationUnit,
+ * \see clang_parseTranslationUnit, \see clang_saveTranslationUnit.
+ */
+ CXGlobalOpt_ThreadBackgroundPriorityForIndexing = 0x1,
+
+ /**
+ * \brief Used to indicate that threads that libclang creates for editing
+ * purposes should use background priority.
+ * Affects \see clang_reparseTranslationUnit, \see clang_codeCompleteAt,
+ * \see clang_annotateTokens
+ */
+ CXGlobalOpt_ThreadBackgroundPriorityForEditing = 0x2,
+
+ /**
+ * \brief Used to indicate that all threads that libclang creates should use
+ * background priority.
+ */
+ CXGlobalOpt_ThreadBackgroundPriorityForAll =
+ CXGlobalOpt_ThreadBackgroundPriorityForIndexing |
+ CXGlobalOpt_ThreadBackgroundPriorityForEditing
+
+} CXGlobalOptFlags;
+
+/**
+ * \brief Sets general options associated with a CXIndex.
+ *
+ * For example:
+ * \code
+ * CXIndex idx = ...;
+ * clang_CXIndex_setGlobalOptions(idx,
+ * clang_CXIndex_getGlobalOptions(idx) |
+ * CXGlobalOpt_ThreadBackgroundPriorityForIndexing);
+ * \endcode
+ *
+ * \param options A bitmask of options, a bitwise OR of CXGlobalOpt_XXX flags.
+ */
+CINDEX_LINKAGE void clang_CXIndex_setGlobalOptions(CXIndex, unsigned options);
+
+/**
+ * \brief Gets the general options associated with a CXIndex.
+ *
+ * \returns A bitmask of options, a bitwise OR of CXGlobalOpt_XXX flags that
+ * are associated with the given CXIndex object.
+ */
+CINDEX_LINKAGE unsigned clang_CXIndex_getGlobalOptions(CXIndex);
+
+/**
+ * \defgroup CINDEX_FILES File manipulation routines
+ *
+ * @{
+ */
+
+/**
+ * \brief A particular source file that is part of a translation unit.
+ */
+typedef void *CXFile;
+
+
+/**
+ * \brief Retrieve the complete file and path name of the given file.
+ */
+CINDEX_LINKAGE CXString clang_getFileName(CXFile SFile);
+
+/**
+ * \brief Retrieve the last modification time of the given file.
+ */
+CINDEX_LINKAGE time_t clang_getFileTime(CXFile SFile);
+
+/**
+ * \brief Determine whether the given header is guarded against
+ * multiple inclusions, either with the conventional
+ * #ifndef/#define/#endif macro guards or with #pragma once.
+ */
+CINDEX_LINKAGE unsigned
+clang_isFileMultipleIncludeGuarded(CXTranslationUnit tu, CXFile file);
+
+/**
+ * \brief Retrieve a file handle within the given translation unit.
+ *
+ * \param tu the translation unit
+ *
+ * \param file_name the name of the file.
+ *
+ * \returns the file handle for the named file in the translation unit \p tu,
+ * or a NULL file handle if the file was not a part of this translation unit.
+ */
+CINDEX_LINKAGE CXFile clang_getFile(CXTranslationUnit tu,
+ const char *file_name);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_LOCATIONS Physical source locations
+ *
+ * Clang represents physical source locations in its abstract syntax tree in
+ * great detail, with file, line, and column information for the majority of
+ * the tokens parsed in the source code. These data types and functions are
+ * used to represent source location information, either for a particular
+ * point in the program or for a range of points in the program, and extract
+ * specific location information from those data types.
+ *
+ * @{
+ */
+
+/**
+ * \brief Identifies a specific source location within a translation
+ * unit.
+ *
+ * Use clang_getExpansionLocation() or clang_getSpellingLocation()
+ * to map a source location to a particular file, line, and column.
+ */
+typedef struct {
+ void *ptr_data[2];
+ unsigned int_data;
+} CXSourceLocation;
+
+/**
+ * \brief Identifies a half-open character range in the source code.
+ *
+ * Use clang_getRangeStart() and clang_getRangeEnd() to retrieve the
+ * starting and end locations from a source range, respectively.
+ */
+typedef struct {
+ void *ptr_data[2];
+ unsigned begin_int_data;
+ unsigned end_int_data;
+} CXSourceRange;
+
+/**
+ * \brief Retrieve a NULL (invalid) source location.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getNullLocation();
+
+/**
+ * \determine Determine whether two source locations, which must refer into
+ * the same translation unit, refer to exactly the same point in the source
+ * code.
+ *
+ * \returns non-zero if the source locations refer to the same location, zero
+ * if they refer to different locations.
+ */
+CINDEX_LINKAGE unsigned clang_equalLocations(CXSourceLocation loc1,
+ CXSourceLocation loc2);
+
+/**
+ * \brief Retrieves the source location associated with a given file/line/column
+ * in a particular translation unit.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getLocation(CXTranslationUnit tu,
+ CXFile file,
+ unsigned line,
+ unsigned column);
+/**
+ * \brief Retrieves the source location associated with a given character offset
+ * in a particular translation unit.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getLocationForOffset(CXTranslationUnit tu,
+ CXFile file,
+ unsigned offset);
+
+/**
+ * \brief Retrieve a NULL (invalid) source range.
+ */
+CINDEX_LINKAGE CXSourceRange clang_getNullRange();
+
+/**
+ * \brief Retrieve a source range given the beginning and ending source
+ * locations.
+ */
+CINDEX_LINKAGE CXSourceRange clang_getRange(CXSourceLocation begin,
+ CXSourceLocation end);
+
+/**
+ * \brief Determine whether two ranges are equivalent.
+ *
+ * \returns non-zero if the ranges are the same, zero if they differ.
+ */
+CINDEX_LINKAGE unsigned clang_equalRanges(CXSourceRange range1,
+ CXSourceRange range2);
+
+/**
+ * \brief Returns non-zero if \arg range is null.
+ */
+CINDEX_LINKAGE int clang_Range_isNull(CXSourceRange range);
+
+/**
+ * \brief Retrieve the file, line, column, and offset represented by
+ * the given source location.
+ *
+ * If the location refers into a macro expansion, retrieves the
+ * location of the macro expansion.
+ *
+ * \param location the location within a source file that will be decomposed
+ * into its parts.
+ *
+ * \param file [out] if non-NULL, will be set to the file to which the given
+ * source location points.
+ *
+ * \param line [out] if non-NULL, will be set to the line to which the given
+ * source location points.
+ *
+ * \param column [out] if non-NULL, will be set to the column to which the given
+ * source location points.
+ *
+ * \param offset [out] if non-NULL, will be set to the offset into the
+ * buffer to which the given source location points.
+ */
+CINDEX_LINKAGE void clang_getExpansionLocation(CXSourceLocation location,
+ CXFile *file,
+ unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
+ * \brief Retrieve the file, line, column, and offset represented by
+ * the given source location, as specified in a # line directive.
+ *
+ * Example: given the following source code in a file somefile.c
+ *
+ * #123 "dummy.c" 1
+ *
+ * static int func(void)
+ * {
+ * return 0;
+ * }
+ *
+ * the location information returned by this function would be
+ *
+ * File: dummy.c Line: 124 Column: 12
+ *
+ * whereas clang_getExpansionLocation would have returned
+ *
+ * File: somefile.c Line: 3 Column: 12
+ *
+ * \param location the location within a source file that will be decomposed
+ * into its parts.
+ *
+ * \param filename [out] if non-NULL, will be set to the filename of the
+ * source location. Note that filenames returned will be for "virtual" files,
+ * which don't necessarily exist on the machine running clang - e.g. when
+ * parsing preprocessed output obtained from a different environment. If
+ * a non-NULL value is passed in, remember to dispose of the returned value
+ * using \c clang_disposeString() once you've finished with it. For an invalid
+ * source location, an empty string is returned.
+ *
+ * \param line [out] if non-NULL, will be set to the line number of the
+ * source location. For an invalid source location, zero is returned.
+ *
+ * \param column [out] if non-NULL, will be set to the column number of the
+ * source location. For an invalid source location, zero is returned.
+ */
+CINDEX_LINKAGE void clang_getPresumedLocation(CXSourceLocation location,
+ CXString *filename,
+ unsigned *line,
+ unsigned *column);
+
+/**
+ * \brief Legacy API to retrieve the file, line, column, and offset represented
+ * by the given source location.
+ *
+ * This interface has been replaced by the newer interface
+ * \see clang_getExpansionLocation(). See that interface's documentation for
+ * details.
+ */
+CINDEX_LINKAGE void clang_getInstantiationLocation(CXSourceLocation location,
+ CXFile *file,
+ unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
+ * \brief Retrieve the file, line, column, and offset represented by
+ * the given source location.
+ *
+ * If the location refers into a macro instantiation, return where the
+ * location was originally spelled in the source file.
+ *
+ * \param location the location within a source file that will be decomposed
+ * into its parts.
+ *
+ * \param file [out] if non-NULL, will be set to the file to which the given
+ * source location points.
+ *
+ * \param line [out] if non-NULL, will be set to the line to which the given
+ * source location points.
+ *
+ * \param column [out] if non-NULL, will be set to the column to which the given
+ * source location points.
+ *
+ * \param offset [out] if non-NULL, will be set to the offset into the
+ * buffer to which the given source location points.
+ */
+CINDEX_LINKAGE void clang_getSpellingLocation(CXSourceLocation location,
+ CXFile *file,
+ unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
+ * \brief Retrieve a source location representing the first character within a
+ * source range.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getRangeStart(CXSourceRange range);
+
+/**
+ * \brief Retrieve a source location representing the last character within a
+ * source range.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getRangeEnd(CXSourceRange range);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_DIAG Diagnostic reporting
+ *
+ * @{
+ */
+
+/**
+ * \brief Describes the severity of a particular diagnostic.
+ */
+enum CXDiagnosticSeverity {
+ /**
+ * \brief A diagnostic that has been suppressed, e.g., by a command-line
+ * option.
+ */
+ CXDiagnostic_Ignored = 0,
+
+ /**
+ * \brief This diagnostic is a note that should be attached to the
+ * previous (non-note) diagnostic.
+ */
+ CXDiagnostic_Note = 1,
+
+ /**
+ * \brief This diagnostic indicates suspicious code that may not be
+ * wrong.
+ */
+ CXDiagnostic_Warning = 2,
+
+ /**
+ * \brief This diagnostic indicates that the code is ill-formed.
+ */
+ CXDiagnostic_Error = 3,
+
+ /**
+ * \brief This diagnostic indicates that the code is ill-formed such
+ * that future parser recovery is unlikely to produce useful
+ * results.
+ */
+ CXDiagnostic_Fatal = 4
+};
+
+/**
+ * \brief A single diagnostic, containing the diagnostic's severity,
+ * location, text, source ranges, and fix-it hints.
+ */
+typedef void *CXDiagnostic;
+
+/**
+ * \brief A group of CXDiagnostics.
+ */
+typedef void *CXDiagnosticSet;
+
+/**
+ * \brief Determine the number of diagnostics in a CXDiagnosticSet.
+ */
+CINDEX_LINKAGE unsigned clang_getNumDiagnosticsInSet(CXDiagnosticSet Diags);
+
+/**
+ * \brief Retrieve a diagnostic associated with the given CXDiagnosticSet.
+ *
+ * \param Unit the CXDiagnosticSet to query.
+ * \param Index the zero-based diagnostic number to retrieve.
+ *
+ * \returns the requested diagnostic. This diagnostic must be freed
+ * via a call to \c clang_disposeDiagnostic().
+ */
+CINDEX_LINKAGE CXDiagnostic clang_getDiagnosticInSet(CXDiagnosticSet Diags,
+ unsigned Index);
+
+
+/**
+ * \brief Describes the kind of error that occurred (if any) in a call to
+ * \c clang_loadDiagnostics.
+ */
+enum CXLoadDiag_Error {
+ /**
+ * \brief Indicates that no error occurred.
+ */
+ CXLoadDiag_None = 0,
+
+ /**
+ * \brief Indicates that an unknown error occurred while attempting to
+ * deserialize diagnostics.
+ */
+ CXLoadDiag_Unknown = 1,
+
+ /**
+ * \brief Indicates that the file containing the serialized diagnostics
+ * could not be opened.
+ */
+ CXLoadDiag_CannotLoad = 2,
+
+ /**
+ * \brief Indicates that the serialized diagnostics file is invalid or
+ * corrupt.
+ */
+ CXLoadDiag_InvalidFile = 3
+};
+
+/**
+ * \brief Deserialize a set of diagnostics from a Clang diagnostics bitcode
+ * file.
+ *
+ * \param The name of the file to deserialize.
+ * \param A pointer to a enum value recording if there was a problem
+ * deserializing the diagnostics.
+ * \param A pointer to a CXString for recording the error string
+ * if the file was not successfully loaded.
+ *
+ * \returns A loaded CXDiagnosticSet if successful, and NULL otherwise. These
+ * diagnostics should be released using clang_disposeDiagnosticSet().
+ */
+CINDEX_LINKAGE CXDiagnosticSet clang_loadDiagnostics(const char *file,
+ enum CXLoadDiag_Error *error,
+ CXString *errorString);
+
+/**
+ * \brief Release a CXDiagnosticSet and all of its contained diagnostics.
+ */
+CINDEX_LINKAGE void clang_disposeDiagnosticSet(CXDiagnosticSet Diags);
+
+/**
+ * \brief Retrieve the child diagnostics of a CXDiagnostic. This
+ * CXDiagnosticSet does not need to be released by clang_diposeDiagnosticSet.
+ */
+CINDEX_LINKAGE CXDiagnosticSet clang_getChildDiagnostics(CXDiagnostic D);
+
+/**
+ * \brief Determine the number of diagnostics produced for the given
+ * translation unit.
+ */
+CINDEX_LINKAGE unsigned clang_getNumDiagnostics(CXTranslationUnit Unit);
+
+/**
+ * \brief Retrieve a diagnostic associated with the given translation unit.
+ *
+ * \param Unit the translation unit to query.
+ * \param Index the zero-based diagnostic number to retrieve.
+ *
+ * \returns the requested diagnostic. This diagnostic must be freed
+ * via a call to \c clang_disposeDiagnostic().
+ */
+CINDEX_LINKAGE CXDiagnostic clang_getDiagnostic(CXTranslationUnit Unit,
+ unsigned Index);
+
+/**
+ * \brief Retrieve the complete set of diagnostics associated with a
+ * translation unit.
+ *
+ * \param Unit the translation unit to query.
+ */
+CINDEX_LINKAGE CXDiagnosticSet
+ clang_getDiagnosticSetFromTU(CXTranslationUnit Unit);
+
+/**
+ * \brief Destroy a diagnostic.
+ */
+CINDEX_LINKAGE void clang_disposeDiagnostic(CXDiagnostic Diagnostic);
+
+/**
+ * \brief Options to control the display of diagnostics.
+ *
+ * The values in this enum are meant to be combined to customize the
+ * behavior of \c clang_displayDiagnostic().
+ */
+enum CXDiagnosticDisplayOptions {
+ /**
+ * \brief Display the source-location information where the
+ * diagnostic was located.
+ *
+ * When set, diagnostics will be prefixed by the file, line, and
+ * (optionally) column to which the diagnostic refers. For example,
+ *
+ * \code
+ * test.c:28: warning: extra tokens at end of #endif directive
+ * \endcode
+ *
+ * This option corresponds to the clang flag \c -fshow-source-location.
+ */
+ CXDiagnostic_DisplaySourceLocation = 0x01,
+
+ /**
+ * \brief If displaying the source-location information of the
+ * diagnostic, also include the column number.
+ *
+ * This option corresponds to the clang flag \c -fshow-column.
+ */
+ CXDiagnostic_DisplayColumn = 0x02,
+
+ /**
+ * \brief If displaying the source-location information of the
+ * diagnostic, also include information about source ranges in a
+ * machine-parsable format.
+ *
+ * This option corresponds to the clang flag
+ * \c -fdiagnostics-print-source-range-info.
+ */
+ CXDiagnostic_DisplaySourceRanges = 0x04,
+
+ /**
+ * \brief Display the option name associated with this diagnostic, if any.
+ *
+ * The option name displayed (e.g., -Wconversion) will be placed in brackets
+ * after the diagnostic text. This option corresponds to the clang flag
+ * \c -fdiagnostics-show-option.
+ */
+ CXDiagnostic_DisplayOption = 0x08,
+
+ /**
+ * \brief Display the category number associated with this diagnostic, if any.
+ *
+ * The category number is displayed within brackets after the diagnostic text.
+ * This option corresponds to the clang flag
+ * \c -fdiagnostics-show-category=id.
+ */
+ CXDiagnostic_DisplayCategoryId = 0x10,
+
+ /**
+ * \brief Display the category name associated with this diagnostic, if any.
+ *
+ * The category name is displayed within brackets after the diagnostic text.
+ * This option corresponds to the clang flag
+ * \c -fdiagnostics-show-category=name.
+ */
+ CXDiagnostic_DisplayCategoryName = 0x20
+};
+
+/**
+ * \brief Format the given diagnostic in a manner that is suitable for display.
+ *
+ * This routine will format the given diagnostic to a string, rendering
+ * the diagnostic according to the various options given. The
+ * \c clang_defaultDiagnosticDisplayOptions() function returns the set of
+ * options that most closely mimics the behavior of the clang compiler.
+ *
+ * \param Diagnostic The diagnostic to print.
+ *
+ * \param Options A set of options that control the diagnostic display,
+ * created by combining \c CXDiagnosticDisplayOptions values.
+ *
+ * \returns A new string containing for formatted diagnostic.
+ */
+CINDEX_LINKAGE CXString clang_formatDiagnostic(CXDiagnostic Diagnostic,
+ unsigned Options);
+
+/**
+ * \brief Retrieve the set of display options most similar to the
+ * default behavior of the clang compiler.
+ *
+ * \returns A set of display options suitable for use with \c
+ * clang_displayDiagnostic().
+ */
+CINDEX_LINKAGE unsigned clang_defaultDiagnosticDisplayOptions(void);
+
+/**
+ * \brief Determine the severity of the given diagnostic.
+ */
+CINDEX_LINKAGE enum CXDiagnosticSeverity
+clang_getDiagnosticSeverity(CXDiagnostic);
+
+/**
+ * \brief Retrieve the source location of the given diagnostic.
+ *
+ * This location is where Clang would print the caret ('^') when
+ * displaying the diagnostic on the command line.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getDiagnosticLocation(CXDiagnostic);
+
+/**
+ * \brief Retrieve the text of the given diagnostic.
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticSpelling(CXDiagnostic);
+
+/**
+ * \brief Retrieve the name of the command-line option that enabled this
+ * diagnostic.
+ *
+ * \param Diag The diagnostic to be queried.
+ *
+ * \param Disable If non-NULL, will be set to the option that disables this
+ * diagnostic (if any).
+ *
+ * \returns A string that contains the command-line option used to enable this
+ * warning, such as "-Wconversion" or "-pedantic".
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticOption(CXDiagnostic Diag,
+ CXString *Disable);
+
+/**
+ * \brief Retrieve the category number for this diagnostic.
+ *
+ * Diagnostics can be categorized into groups along with other, related
+ * diagnostics (e.g., diagnostics under the same warning flag). This routine
+ * retrieves the category number for the given diagnostic.
+ *
+ * \returns The number of the category that contains this diagnostic, or zero
+ * if this diagnostic is uncategorized.
+ */
+CINDEX_LINKAGE unsigned clang_getDiagnosticCategory(CXDiagnostic);
+
+/**
+ * \brief Retrieve the name of a particular diagnostic category. This
+ * is now deprecated. Use clang_getDiagnosticCategoryText()
+ * instead.
+ *
+ * \param Category A diagnostic category number, as returned by
+ * \c clang_getDiagnosticCategory().
+ *
+ * \returns The name of the given diagnostic category.
+ */
+CINDEX_DEPRECATED CINDEX_LINKAGE
+CXString clang_getDiagnosticCategoryName(unsigned Category);
+
+/**
+ * \brief Retrieve the diagnostic category text for a given diagnostic.
+ *
+ *
+ * \returns The text of the given diagnostic category.
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticCategoryText(CXDiagnostic);
+
+/**
+ * \brief Determine the number of source ranges associated with the given
+ * diagnostic.
+ */
+CINDEX_LINKAGE unsigned clang_getDiagnosticNumRanges(CXDiagnostic);
+
+/**
+ * \brief Retrieve a source range associated with the diagnostic.
+ *
+ * A diagnostic's source ranges highlight important elements in the source
+ * code. On the command line, Clang displays source ranges by
+ * underlining them with '~' characters.
+ *
+ * \param Diagnostic the diagnostic whose range is being extracted.
+ *
+ * \param Range the zero-based index specifying which range to
+ *
+ * \returns the requested source range.
+ */
+CINDEX_LINKAGE CXSourceRange clang_getDiagnosticRange(CXDiagnostic Diagnostic,
+ unsigned Range);
+
+/**
+ * \brief Determine the number of fix-it hints associated with the
+ * given diagnostic.
+ */
+CINDEX_LINKAGE unsigned clang_getDiagnosticNumFixIts(CXDiagnostic Diagnostic);
+
+/**
+ * \brief Retrieve the replacement information for a given fix-it.
+ *
+ * Fix-its are described in terms of a source range whose contents
+ * should be replaced by a string. This approach generalizes over
+ * three kinds of operations: removal of source code (the range covers
+ * the code to be removed and the replacement string is empty),
+ * replacement of source code (the range covers the code to be
+ * replaced and the replacement string provides the new code), and
+ * insertion (both the start and end of the range point at the
+ * insertion location, and the replacement string provides the text to
+ * insert).
+ *
+ * \param Diagnostic The diagnostic whose fix-its are being queried.
+ *
+ * \param FixIt The zero-based index of the fix-it.
+ *
+ * \param ReplacementRange The source range whose contents will be
+ * replaced with the returned replacement string. Note that source
+ * ranges are half-open ranges [a, b), so the source code should be
+ * replaced from a and up to (but not including) b.
+ *
+ * \returns A string containing text that should be replace the source
+ * code indicated by the \c ReplacementRange.
+ */
+CINDEX_LINKAGE CXString clang_getDiagnosticFixIt(CXDiagnostic Diagnostic,
+ unsigned FixIt,
+ CXSourceRange *ReplacementRange);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_TRANSLATION_UNIT Translation unit manipulation
+ *
+ * The routines in this group provide the ability to create and destroy
+ * translation units from files, either by parsing the contents of the files or
+ * by reading in a serialized representation of a translation unit.
+ *
+ * @{
+ */
+
+/**
+ * \brief Get the original translation unit source file name.
+ */
+CINDEX_LINKAGE CXString
+clang_getTranslationUnitSpelling(CXTranslationUnit CTUnit);
+
+/**
+ * \brief Return the CXTranslationUnit for a given source file and the provided
+ * command line arguments one would pass to the compiler.
+ *
+ * Note: The 'source_filename' argument is optional. If the caller provides a
+ * NULL pointer, the name of the source file is expected to reside in the
+ * specified command line arguments.
+ *
+ * Note: When encountered in 'clang_command_line_args', the following options
+ * are ignored:
+ *
+ * '-c'
+ * '-emit-ast'
+ * '-fsyntax-only'
+ * '-o <output file>' (both '-o' and '<output file>' are ignored)
+ *
+ * \param CIdx The index object with which the translation unit will be
+ * associated.
+ *
+ * \param source_filename - The name of the source file to load, or NULL if the
+ * source file is included in \p clang_command_line_args.
+ *
+ * \param num_clang_command_line_args The number of command-line arguments in
+ * \p clang_command_line_args.
+ *
+ * \param clang_command_line_args The command-line arguments that would be
+ * passed to the \c clang executable if it were being invoked out-of-process.
+ * These command-line options will be parsed and will affect how the translation
+ * unit is parsed. Note that the following options are ignored: '-c',
+ * '-emit-ast', '-fsyntex-only' (which is the default), and '-o <output file>'.
+ *
+ * \param num_unsaved_files the number of unsaved file entries in \p
+ * unsaved_files.
+ *
+ * \param unsaved_files the files that have not yet been saved to disk
+ * but may be required for code completion, including the contents of
+ * those files. The contents and name of these files (as specified by
+ * CXUnsavedFile) are copied when necessary, so the client only needs to
+ * guarantee their validity until the call to this function returns.
+ */
+CINDEX_LINKAGE CXTranslationUnit clang_createTranslationUnitFromSourceFile(
+ CXIndex CIdx,
+ const char *source_filename,
+ int num_clang_command_line_args,
+ const char * const *clang_command_line_args,
+ unsigned num_unsaved_files,
+ struct CXUnsavedFile *unsaved_files);
+
+/**
+ * \brief Create a translation unit from an AST file (-emit-ast).
+ */
+CINDEX_LINKAGE CXTranslationUnit clang_createTranslationUnit(CXIndex,
+ const char *ast_filename);
+
+/**
+ * \brief Flags that control the creation of translation units.
+ *
+ * The enumerators in this enumeration type are meant to be bitwise
+ * ORed together to specify which options should be used when
+ * constructing the translation unit.
+ */
+enum CXTranslationUnit_Flags {
+ /**
+ * \brief Used to indicate that no special translation-unit options are
+ * needed.
+ */
+ CXTranslationUnit_None = 0x0,
+
+ /**
+ * \brief Used to indicate that the parser should construct a "detailed"
+ * preprocessing record, including all macro definitions and instantiations.
+ *
+ * Constructing a detailed preprocessing record requires more memory
+ * and time to parse, since the information contained in the record
+ * is usually not retained. However, it can be useful for
+ * applications that require more detailed information about the
+ * behavior of the preprocessor.
+ */
+ CXTranslationUnit_DetailedPreprocessingRecord = 0x01,
+
+ /**
+ * \brief Used to indicate that the translation unit is incomplete.
+ *
+ * When a translation unit is considered "incomplete", semantic
+ * analysis that is typically performed at the end of the
+ * translation unit will be suppressed. For example, this suppresses
+ * the completion of tentative declarations in C and of
+ * instantiation of implicitly-instantiation function templates in
+ * C++. This option is typically used when parsing a header with the
+ * intent of producing a precompiled header.
+ */
+ CXTranslationUnit_Incomplete = 0x02,
+
+ /**
+ * \brief Used to indicate that the translation unit should be built with an
+ * implicit precompiled header for the preamble.
+ *
+ * An implicit precompiled header is used as an optimization when a
+ * particular translation unit is likely to be reparsed many times
+ * when the sources aren't changing that often. In this case, an
+ * implicit precompiled header will be built containing all of the
+ * initial includes at the top of the main file (what we refer to as
+ * the "preamble" of the file). In subsequent parses, if the
+ * preamble or the files in it have not changed, \c
+ * clang_reparseTranslationUnit() will re-use the implicit
+ * precompiled header to improve parsing performance.
+ */
+ CXTranslationUnit_PrecompiledPreamble = 0x04,
+
+ /**
+ * \brief Used to indicate that the translation unit should cache some
+ * code-completion results with each reparse of the source file.
+ *
+ * Caching of code-completion results is a performance optimization that
+ * introduces some overhead to reparsing but improves the performance of
+ * code-completion operations.
+ */
+ CXTranslationUnit_CacheCompletionResults = 0x08,
+ /**
+ * \brief DEPRECATED: Enable precompiled preambles in C++.
+ *
+ * Note: this is a *temporary* option that is available only while
+ * we are testing C++ precompiled preamble support. It is deprecated.
+ */
+ CXTranslationUnit_CXXPrecompiledPreamble = 0x10,
+
+ /**
+ * \brief DEPRECATED: Enabled chained precompiled preambles in C++.
+ *
+ * Note: this is a *temporary* option that is available only while
+ * we are testing C++ precompiled preamble support. It is deprecated.
+ */
+ CXTranslationUnit_CXXChainedPCH = 0x20,
+
+ /**
+ * \brief Used to indicate that function/method bodies should be skipped while
+ * parsing.
+ *
+ * This option can be used to search for declarations/definitions while
+ * ignoring the usages.
+ */
+ CXTranslationUnit_SkipFunctionBodies = 0x40
+};
+
+/**
+ * \brief Returns the set of flags that is suitable for parsing a translation
+ * unit that is being edited.
+ *
+ * The set of flags returned provide options for \c clang_parseTranslationUnit()
+ * to indicate that the translation unit is likely to be reparsed many times,
+ * either explicitly (via \c clang_reparseTranslationUnit()) or implicitly
+ * (e.g., by code completion (\c clang_codeCompletionAt())). The returned flag
+ * set contains an unspecified set of optimizations (e.g., the precompiled
+ * preamble) geared toward improving the performance of these routines. The
+ * set of optimizations enabled may change from one version to the next.
+ */
+CINDEX_LINKAGE unsigned clang_defaultEditingTranslationUnitOptions(void);
+
+/**
+ * \brief Parse the given source file and the translation unit corresponding
+ * to that file.
+ *
+ * This routine is the main entry point for the Clang C API, providing the
+ * ability to parse a source file into a translation unit that can then be
+ * queried by other functions in the API. This routine accepts a set of
+ * command-line arguments so that the compilation can be configured in the same
+ * way that the compiler is configured on the command line.
+ *
+ * \param CIdx The index object with which the translation unit will be
+ * associated.
+ *
+ * \param source_filename The name of the source file to load, or NULL if the
+ * source file is included in \p command_line_args.
+ *
+ * \param command_line_args The command-line arguments that would be
+ * passed to the \c clang executable if it were being invoked out-of-process.
+ * These command-line options will be parsed and will affect how the translation
+ * unit is parsed. Note that the following options are ignored: '-c',
+ * '-emit-ast', '-fsyntex-only' (which is the default), and '-o <output file>'.
+ *
+ * \param num_command_line_args The number of command-line arguments in
+ * \p command_line_args.
+ *
+ * \param unsaved_files the files that have not yet been saved to disk
+ * but may be required for parsing, including the contents of
+ * those files. The contents and name of these files (as specified by
+ * CXUnsavedFile) are copied when necessary, so the client only needs to
+ * guarantee their validity until the call to this function returns.
+ *
+ * \param num_unsaved_files the number of unsaved file entries in \p
+ * unsaved_files.
+ *
+ * \param options A bitmask of options that affects how the translation unit
+ * is managed but not its compilation. This should be a bitwise OR of the
+ * CXTranslationUnit_XXX flags.
+ *
+ * \returns A new translation unit describing the parsed code and containing
+ * any diagnostics produced by the compiler. If there is a failure from which
+ * the compiler cannot recover, returns NULL.
+ */
+CINDEX_LINKAGE CXTranslationUnit clang_parseTranslationUnit(CXIndex CIdx,
+ const char *source_filename,
+ const char * const *command_line_args,
+ int num_command_line_args,
+ struct CXUnsavedFile *unsaved_files,
+ unsigned num_unsaved_files,
+ unsigned options);
+
+/**
+ * \brief Flags that control how translation units are saved.
+ *
+ * The enumerators in this enumeration type are meant to be bitwise
+ * ORed together to specify which options should be used when
+ * saving the translation unit.
+ */
+enum CXSaveTranslationUnit_Flags {
+ /**
+ * \brief Used to indicate that no special saving options are needed.
+ */
+ CXSaveTranslationUnit_None = 0x0
+};
+
+/**
+ * \brief Returns the set of flags that is suitable for saving a translation
+ * unit.
+ *
+ * The set of flags returned provide options for
+ * \c clang_saveTranslationUnit() by default. The returned flag
+ * set contains an unspecified set of options that save translation units with
+ * the most commonly-requested data.
+ */
+CINDEX_LINKAGE unsigned clang_defaultSaveOptions(CXTranslationUnit TU);
+
+/**
+ * \brief Describes the kind of error that occurred (if any) in a call to
+ * \c clang_saveTranslationUnit().
+ */
+enum CXSaveError {
+ /**
+ * \brief Indicates that no error occurred while saving a translation unit.
+ */
+ CXSaveError_None = 0,
+
+ /**
+ * \brief Indicates that an unknown error occurred while attempting to save
+ * the file.
+ *
+ * This error typically indicates that file I/O failed when attempting to
+ * write the file.
+ */
+ CXSaveError_Unknown = 1,
+
+ /**
+ * \brief Indicates that errors during translation prevented this attempt
+ * to save the translation unit.
+ *
+ * Errors that prevent the translation unit from being saved can be
+ * extracted using \c clang_getNumDiagnostics() and \c clang_getDiagnostic().
+ */
+ CXSaveError_TranslationErrors = 2,
+
+ /**
+ * \brief Indicates that the translation unit to be saved was somehow
+ * invalid (e.g., NULL).
+ */
+ CXSaveError_InvalidTU = 3
+};
+
+/**
+ * \brief Saves a translation unit into a serialized representation of
+ * that translation unit on disk.
+ *
+ * Any translation unit that was parsed without error can be saved
+ * into a file. The translation unit can then be deserialized into a
+ * new \c CXTranslationUnit with \c clang_createTranslationUnit() or,
+ * if it is an incomplete translation unit that corresponds to a
+ * header, used as a precompiled header when parsing other translation
+ * units.
+ *
+ * \param TU The translation unit to save.
+ *
+ * \param FileName The file to which the translation unit will be saved.
+ *
+ * \param options A bitmask of options that affects how the translation unit
+ * is saved. This should be a bitwise OR of the
+ * CXSaveTranslationUnit_XXX flags.
+ *
+ * \returns A value that will match one of the enumerators of the CXSaveError
+ * enumeration. Zero (CXSaveError_None) indicates that the translation unit was
+ * saved successfully, while a non-zero value indicates that a problem occurred.
+ */
+CINDEX_LINKAGE int clang_saveTranslationUnit(CXTranslationUnit TU,
+ const char *FileName,
+ unsigned options);
+
+/**
+ * \brief Destroy the specified CXTranslationUnit object.
+ */
+CINDEX_LINKAGE void clang_disposeTranslationUnit(CXTranslationUnit);
+
+/**
+ * \brief Flags that control the reparsing of translation units.
+ *
+ * The enumerators in this enumeration type are meant to be bitwise
+ * ORed together to specify which options should be used when
+ * reparsing the translation unit.
+ */
+enum CXReparse_Flags {
+ /**
+ * \brief Used to indicate that no special reparsing options are needed.
+ */
+ CXReparse_None = 0x0
+};
+
+/**
+ * \brief Returns the set of flags that is suitable for reparsing a translation
+ * unit.
+ *
+ * The set of flags returned provide options for
+ * \c clang_reparseTranslationUnit() by default. The returned flag
+ * set contains an unspecified set of optimizations geared toward common uses
+ * of reparsing. The set of optimizations enabled may change from one version
+ * to the next.
+ */
+CINDEX_LINKAGE unsigned clang_defaultReparseOptions(CXTranslationUnit TU);
+
+/**
+ * \brief Reparse the source files that produced this translation unit.
+ *
+ * This routine can be used to re-parse the source files that originally
+ * created the given translation unit, for example because those source files
+ * have changed (either on disk or as passed via \p unsaved_files). The
+ * source code will be reparsed with the same command-line options as it
+ * was originally parsed.
+ *
+ * Reparsing a translation unit invalidates all cursors and source locations
+ * that refer into that translation unit. This makes reparsing a translation
+ * unit semantically equivalent to destroying the translation unit and then
+ * creating a new translation unit with the same command-line arguments.
+ * However, it may be more efficient to reparse a translation
+ * unit using this routine.
+ *
+ * \param TU The translation unit whose contents will be re-parsed. The
+ * translation unit must originally have been built with
+ * \c clang_createTranslationUnitFromSourceFile().
+ *
+ * \param num_unsaved_files The number of unsaved file entries in \p
+ * unsaved_files.
+ *
+ * \param unsaved_files The files that have not yet been saved to disk
+ * but may be required for parsing, including the contents of
+ * those files. The contents and name of these files (as specified by
+ * CXUnsavedFile) are copied when necessary, so the client only needs to
+ * guarantee their validity until the call to this function returns.
+ *
+ * \param options A bitset of options composed of the flags in CXReparse_Flags.
+ * The function \c clang_defaultReparseOptions() produces a default set of
+ * options recommended for most uses, based on the translation unit.
+ *
+ * \returns 0 if the sources could be reparsed. A non-zero value will be
+ * returned if reparsing was impossible, such that the translation unit is
+ * invalid. In such cases, the only valid call for \p TU is
+ * \c clang_disposeTranslationUnit(TU).
+ */
+CINDEX_LINKAGE int clang_reparseTranslationUnit(CXTranslationUnit TU,
+ unsigned num_unsaved_files,
+ struct CXUnsavedFile *unsaved_files,
+ unsigned options);
+
+/**
+ * \brief Categorizes how memory is being used by a translation unit.
+ */
+enum CXTUResourceUsageKind {
+ CXTUResourceUsage_AST = 1,
+ CXTUResourceUsage_Identifiers = 2,
+ CXTUResourceUsage_Selectors = 3,
+ CXTUResourceUsage_GlobalCompletionResults = 4,
+ CXTUResourceUsage_SourceManagerContentCache = 5,
+ CXTUResourceUsage_AST_SideTables = 6,
+ CXTUResourceUsage_SourceManager_Membuffer_Malloc = 7,
+ CXTUResourceUsage_SourceManager_Membuffer_MMap = 8,
+ CXTUResourceUsage_ExternalASTSource_Membuffer_Malloc = 9,
+ CXTUResourceUsage_ExternalASTSource_Membuffer_MMap = 10,
+ CXTUResourceUsage_Preprocessor = 11,
+ CXTUResourceUsage_PreprocessingRecord = 12,
+ CXTUResourceUsage_SourceManager_DataStructures = 13,
+ CXTUResourceUsage_Preprocessor_HeaderSearch = 14,
+ CXTUResourceUsage_MEMORY_IN_BYTES_BEGIN = CXTUResourceUsage_AST,
+ CXTUResourceUsage_MEMORY_IN_BYTES_END =
+ CXTUResourceUsage_Preprocessor_HeaderSearch,
+
+ CXTUResourceUsage_First = CXTUResourceUsage_AST,
+ CXTUResourceUsage_Last = CXTUResourceUsage_Preprocessor_HeaderSearch
+};
+
+/**
+ * \brief Returns the human-readable null-terminated C string that represents
+ * the name of the memory category. This string should never be freed.
+ */
+CINDEX_LINKAGE
+const char *clang_getTUResourceUsageName(enum CXTUResourceUsageKind kind);
+
+typedef struct CXTUResourceUsageEntry {
+ /* \brief The memory usage category. */
+ enum CXTUResourceUsageKind kind;
+ /* \brief Amount of resources used.
+ The units will depend on the resource kind. */
+ unsigned long amount;
+} CXTUResourceUsageEntry;
+
+/**
+ * \brief The memory usage of a CXTranslationUnit, broken into categories.
+ */
+typedef struct CXTUResourceUsage {
+ /* \brief Private data member, used for queries. */
+ void *data;
+
+ /* \brief The number of entries in the 'entries' array. */
+ unsigned numEntries;
+
+ /* \brief An array of key-value pairs, representing the breakdown of memory
+ usage. */
+ CXTUResourceUsageEntry *entries;
+
+} CXTUResourceUsage;
+
+/**
+ * \brief Return the memory usage of a translation unit. This object
+ * should be released with clang_disposeCXTUResourceUsage().
+ */
+CINDEX_LINKAGE CXTUResourceUsage clang_getCXTUResourceUsage(CXTranslationUnit TU);
+
+CINDEX_LINKAGE void clang_disposeCXTUResourceUsage(CXTUResourceUsage usage);
+
+/**
+ * @}
+ */
+
+/**
+ * \brief Describes the kind of entity that a cursor refers to.
+ */
+enum CXCursorKind {
+ /* Declarations */
+ /**
+ * \brief A declaration whose specific kind is not exposed via this
+ * interface.
+ *
+ * Unexposed declarations have the same operations as any other kind
+ * of declaration; one can extract their location information,
+ * spelling, find their definitions, etc. However, the specific kind
+ * of the declaration is not reported.
+ */
+ CXCursor_UnexposedDecl = 1,
+ /** \brief A C or C++ struct. */
+ CXCursor_StructDecl = 2,
+ /** \brief A C or C++ union. */
+ CXCursor_UnionDecl = 3,
+ /** \brief A C++ class. */
+ CXCursor_ClassDecl = 4,
+ /** \brief An enumeration. */
+ CXCursor_EnumDecl = 5,
+ /**
+ * \brief A field (in C) or non-static data member (in C++) in a
+ * struct, union, or C++ class.
+ */
+ CXCursor_FieldDecl = 6,
+ /** \brief An enumerator constant. */
+ CXCursor_EnumConstantDecl = 7,
+ /** \brief A function. */
+ CXCursor_FunctionDecl = 8,
+ /** \brief A variable. */
+ CXCursor_VarDecl = 9,
+ /** \brief A function or method parameter. */
+ CXCursor_ParmDecl = 10,
+ /** \brief An Objective-C @interface. */
+ CXCursor_ObjCInterfaceDecl = 11,
+ /** \brief An Objective-C @interface for a category. */
+ CXCursor_ObjCCategoryDecl = 12,
+ /** \brief An Objective-C @protocol declaration. */
+ CXCursor_ObjCProtocolDecl = 13,
+ /** \brief An Objective-C @property declaration. */
+ CXCursor_ObjCPropertyDecl = 14,
+ /** \brief An Objective-C instance variable. */
+ CXCursor_ObjCIvarDecl = 15,
+ /** \brief An Objective-C instance method. */
+ CXCursor_ObjCInstanceMethodDecl = 16,
+ /** \brief An Objective-C class method. */
+ CXCursor_ObjCClassMethodDecl = 17,
+ /** \brief An Objective-C @implementation. */
+ CXCursor_ObjCImplementationDecl = 18,
+ /** \brief An Objective-C @implementation for a category. */
+ CXCursor_ObjCCategoryImplDecl = 19,
+ /** \brief A typedef */
+ CXCursor_TypedefDecl = 20,
+ /** \brief A C++ class method. */
+ CXCursor_CXXMethod = 21,
+ /** \brief A C++ namespace. */
+ CXCursor_Namespace = 22,
+ /** \brief A linkage specification, e.g. 'extern "C"'. */
+ CXCursor_LinkageSpec = 23,
+ /** \brief A C++ constructor. */
+ CXCursor_Constructor = 24,
+ /** \brief A C++ destructor. */
+ CXCursor_Destructor = 25,
+ /** \brief A C++ conversion function. */
+ CXCursor_ConversionFunction = 26,
+ /** \brief A C++ template type parameter. */
+ CXCursor_TemplateTypeParameter = 27,
+ /** \brief A C++ non-type template parameter. */
+ CXCursor_NonTypeTemplateParameter = 28,
+ /** \brief A C++ template template parameter. */
+ CXCursor_TemplateTemplateParameter = 29,
+ /** \brief A C++ function template. */
+ CXCursor_FunctionTemplate = 30,
+ /** \brief A C++ class template. */
+ CXCursor_ClassTemplate = 31,
+ /** \brief A C++ class template partial specialization. */
+ CXCursor_ClassTemplatePartialSpecialization = 32,
+ /** \brief A C++ namespace alias declaration. */
+ CXCursor_NamespaceAlias = 33,
+ /** \brief A C++ using directive. */
+ CXCursor_UsingDirective = 34,
+ /** \brief A C++ using declaration. */
+ CXCursor_UsingDeclaration = 35,
+ /** \brief A C++ alias declaration */
+ CXCursor_TypeAliasDecl = 36,
+ /** \brief An Objective-C @synthesize definition. */
+ CXCursor_ObjCSynthesizeDecl = 37,
+ /** \brief An Objective-C @dynamic definition. */
+ CXCursor_ObjCDynamicDecl = 38,
+ /** \brief An access specifier. */
+ CXCursor_CXXAccessSpecifier = 39,
+
+ CXCursor_FirstDecl = CXCursor_UnexposedDecl,
+ CXCursor_LastDecl = CXCursor_CXXAccessSpecifier,
+
+ /* References */
+ CXCursor_FirstRef = 40, /* Decl references */
+ CXCursor_ObjCSuperClassRef = 40,
+ CXCursor_ObjCProtocolRef = 41,
+ CXCursor_ObjCClassRef = 42,
+ /**
+ * \brief A reference to a type declaration.
+ *
+ * A type reference occurs anywhere where a type is named but not
+ * declared. For example, given:
+ *
+ * \code
+ * typedef unsigned size_type;
+ * size_type size;
+ * \endcode
+ *
+ * The typedef is a declaration of size_type (CXCursor_TypedefDecl),
+ * while the type of the variable "size" is referenced. The cursor
+ * referenced by the type of size is the typedef for size_type.
+ */
+ CXCursor_TypeRef = 43,
+ CXCursor_CXXBaseSpecifier = 44,
+ /**
+ * \brief A reference to a class template, function template, template
+ * template parameter, or class template partial specialization.
+ */
+ CXCursor_TemplateRef = 45,
+ /**
+ * \brief A reference to a namespace or namespace alias.
+ */
+ CXCursor_NamespaceRef = 46,
+ /**
+ * \brief A reference to a member of a struct, union, or class that occurs in
+ * some non-expression context, e.g., a designated initializer.
+ */
+ CXCursor_MemberRef = 47,
+ /**
+ * \brief A reference to a labeled statement.
+ *
+ * This cursor kind is used to describe the jump to "start_over" in the
+ * goto statement in the following example:
+ *
+ * \code
+ * start_over:
+ * ++counter;
+ *
+ * goto start_over;
+ * \endcode
+ *
+ * A label reference cursor refers to a label statement.
+ */
+ CXCursor_LabelRef = 48,
+
+ /**
+ * \brief A reference to a set of overloaded functions or function templates
+ * that has not yet been resolved to a specific function or function template.
+ *
+ * An overloaded declaration reference cursor occurs in C++ templates where
+ * a dependent name refers to a function. For example:
+ *
+ * \code
+ * template<typename T> void swap(T&, T&);
+ *
+ * struct X { ... };
+ * void swap(X&, X&);
+ *
+ * template<typename T>
+ * void reverse(T* first, T* last) {
+ * while (first < last - 1) {
+ * swap(*first, *--last);
+ * ++first;
+ * }
+ * }
+ *
+ * struct Y { };
+ * void swap(Y&, Y&);
+ * \endcode
+ *
+ * Here, the identifier "swap" is associated with an overloaded declaration
+ * reference. In the template definition, "swap" refers to either of the two
+ * "swap" functions declared above, so both results will be available. At
+ * instantiation time, "swap" may also refer to other functions found via
+ * argument-dependent lookup (e.g., the "swap" function at the end of the
+ * example).
+ *
+ * The functions \c clang_getNumOverloadedDecls() and
+ * \c clang_getOverloadedDecl() can be used to retrieve the definitions
+ * referenced by this cursor.
+ */
+ CXCursor_OverloadedDeclRef = 49,
+
+ /**
+ * \brief A reference to a variable that occurs in some non-expression
+ * context, e.g., a C++ lambda capture list.
+ */
+ CXCursor_VariableRef = 50,
+
+ CXCursor_LastRef = CXCursor_VariableRef,
+
+ /* Error conditions */
+ CXCursor_FirstInvalid = 70,
+ CXCursor_InvalidFile = 70,
+ CXCursor_NoDeclFound = 71,
+ CXCursor_NotImplemented = 72,
+ CXCursor_InvalidCode = 73,
+ CXCursor_LastInvalid = CXCursor_InvalidCode,
+
+ /* Expressions */
+ CXCursor_FirstExpr = 100,
+
+ /**
+ * \brief An expression whose specific kind is not exposed via this
+ * interface.
+ *
+ * Unexposed expressions have the same operations as any other kind
+ * of expression; one can extract their location information,
+ * spelling, children, etc. However, the specific kind of the
+ * expression is not reported.
+ */
+ CXCursor_UnexposedExpr = 100,
+
+ /**
+ * \brief An expression that refers to some value declaration, such
+ * as a function, varible, or enumerator.
+ */
+ CXCursor_DeclRefExpr = 101,
+
+ /**
+ * \brief An expression that refers to a member of a struct, union,
+ * class, Objective-C class, etc.
+ */
+ CXCursor_MemberRefExpr = 102,
+
+ /** \brief An expression that calls a function. */
+ CXCursor_CallExpr = 103,
+
+ /** \brief An expression that sends a message to an Objective-C
+ object or class. */
+ CXCursor_ObjCMessageExpr = 104,
+
+ /** \brief An expression that represents a block literal. */
+ CXCursor_BlockExpr = 105,
+
+ /** \brief An integer literal.
+ */
+ CXCursor_IntegerLiteral = 106,
+
+ /** \brief A floating point number literal.
+ */
+ CXCursor_FloatingLiteral = 107,
+
+ /** \brief An imaginary number literal.
+ */
+ CXCursor_ImaginaryLiteral = 108,
+
+ /** \brief A string literal.
+ */
+ CXCursor_StringLiteral = 109,
+
+ /** \brief A character literal.
+ */
+ CXCursor_CharacterLiteral = 110,
+
+ /** \brief A parenthesized expression, e.g. "(1)".
+ *
+ * This AST node is only formed if full location information is requested.
+ */
+ CXCursor_ParenExpr = 111,
+
+ /** \brief This represents the unary-expression's (except sizeof and
+ * alignof).
+ */
+ CXCursor_UnaryOperator = 112,
+
+ /** \brief [C99 6.5.2.1] Array Subscripting.
+ */
+ CXCursor_ArraySubscriptExpr = 113,
+
+ /** \brief A builtin binary operation expression such as "x + y" or
+ * "x <= y".
+ */
+ CXCursor_BinaryOperator = 114,
+
+ /** \brief Compound assignment such as "+=".
+ */
+ CXCursor_CompoundAssignOperator = 115,
+
+ /** \brief The ?: ternary operator.
+ */
+ CXCursor_ConditionalOperator = 116,
+
+ /** \brief An explicit cast in C (C99 6.5.4) or a C-style cast in C++
+ * (C++ [expr.cast]), which uses the syntax (Type)expr.
+ *
+ * For example: (int)f.
+ */
+ CXCursor_CStyleCastExpr = 117,
+
+ /** \brief [C99 6.5.2.5]
+ */
+ CXCursor_CompoundLiteralExpr = 118,
+
+ /** \brief Describes an C or C++ initializer list.
+ */
+ CXCursor_InitListExpr = 119,
+
+ /** \brief The GNU address of label extension, representing &&label.
+ */
+ CXCursor_AddrLabelExpr = 120,
+
+ /** \brief This is the GNU Statement Expression extension: ({int X=4; X;})
+ */
+ CXCursor_StmtExpr = 121,
+
+ /** \brief Represents a C11 generic selection.
+ */
+ CXCursor_GenericSelectionExpr = 122,
+
+ /** \brief Implements the GNU __null extension, which is a name for a null
+ * pointer constant that has integral type (e.g., int or long) and is the same
+ * size and alignment as a pointer.
+ *
+ * The __null extension is typically only used by system headers, which define
+ * NULL as __null in C++ rather than using 0 (which is an integer that may not
+ * match the size of a pointer).
+ */
+ CXCursor_GNUNullExpr = 123,
+
+ /** \brief C++'s static_cast<> expression.
+ */
+ CXCursor_CXXStaticCastExpr = 124,
+
+ /** \brief C++'s dynamic_cast<> expression.
+ */
+ CXCursor_CXXDynamicCastExpr = 125,
+
+ /** \brief C++'s reinterpret_cast<> expression.
+ */
+ CXCursor_CXXReinterpretCastExpr = 126,
+
+ /** \brief C++'s const_cast<> expression.
+ */
+ CXCursor_CXXConstCastExpr = 127,
+
+ /** \brief Represents an explicit C++ type conversion that uses "functional"
+ * notion (C++ [expr.type.conv]).
+ *
+ * Example:
+ * \code
+ * x = int(0.5);
+ * \endcode
+ */
+ CXCursor_CXXFunctionalCastExpr = 128,
+
+ /** \brief A C++ typeid expression (C++ [expr.typeid]).
+ */
+ CXCursor_CXXTypeidExpr = 129,
+
+ /** \brief [C++ 2.13.5] C++ Boolean Literal.
+ */
+ CXCursor_CXXBoolLiteralExpr = 130,
+
+ /** \brief [C++0x 2.14.7] C++ Pointer Literal.
+ */
+ CXCursor_CXXNullPtrLiteralExpr = 131,
+
+ /** \brief Represents the "this" expression in C++
+ */
+ CXCursor_CXXThisExpr = 132,
+
+ /** \brief [C++ 15] C++ Throw Expression.
+ *
+ * This handles 'throw' and 'throw' assignment-expression. When
+ * assignment-expression isn't present, Op will be null.
+ */
+ CXCursor_CXXThrowExpr = 133,
+
+ /** \brief A new expression for memory allocation and constructor calls, e.g:
+ * "new CXXNewExpr(foo)".
+ */
+ CXCursor_CXXNewExpr = 134,
+
+ /** \brief A delete expression for memory deallocation and destructor calls,
+ * e.g. "delete[] pArray".
+ */
+ CXCursor_CXXDeleteExpr = 135,
+
+ /** \brief A unary expression.
+ */
+ CXCursor_UnaryExpr = 136,
+
+ /** \brief An Objective-C string literal i.e. @"foo".
+ */
+ CXCursor_ObjCStringLiteral = 137,
+
+ /** \brief An Objective-C @encode expression.
+ */
+ CXCursor_ObjCEncodeExpr = 138,
+
+ /** \brief An Objective-C @selector expression.
+ */
+ CXCursor_ObjCSelectorExpr = 139,
+
+ /** \brief An Objective-C @protocol expression.
+ */
+ CXCursor_ObjCProtocolExpr = 140,
+
+ /** \brief An Objective-C "bridged" cast expression, which casts between
+ * Objective-C pointers and C pointers, transferring ownership in the process.
+ *
+ * \code
+ * NSString *str = (__bridge_transfer NSString *)CFCreateString();
+ * \endcode
+ */
+ CXCursor_ObjCBridgedCastExpr = 141,
+
+ /** \brief Represents a C++0x pack expansion that produces a sequence of
+ * expressions.
+ *
+ * A pack expansion expression contains a pattern (which itself is an
+ * expression) followed by an ellipsis. For example:
+ *
+ * \code
+ * template<typename F, typename ...Types>
+ * void forward(F f, Types &&...args) {
+ * f(static_cast<Types&&>(args)...);
+ * }
+ * \endcode
+ */
+ CXCursor_PackExpansionExpr = 142,
+
+ /** \brief Represents an expression that computes the length of a parameter
+ * pack.
+ *
+ * \code
+ * template<typename ...Types>
+ * struct count {
+ * static const unsigned value = sizeof...(Types);
+ * };
+ * \endcode
+ */
+ CXCursor_SizeOfPackExpr = 143,
+
+ /* \brief Represents a C++ lambda expression that produces a local function
+ * object.
+ *
+ * \code
+ * void abssort(float *x, unsigned N) {
+ * std::sort(x, x + N,
+ * [](float a, float b) {
+ * return std::abs(a) < std::abs(b);
+ * });
+ * }
+ * \endcode
+ */
+ CXCursor_LambdaExpr = 144,
+
+ /** \brief Objective-c Boolean Literal.
+ */
+ CXCursor_ObjCBoolLiteralExpr = 145,
+
+ CXCursor_LastExpr = CXCursor_ObjCBoolLiteralExpr,
+
+ /* Statements */
+ CXCursor_FirstStmt = 200,
+ /**
+ * \brief A statement whose specific kind is not exposed via this
+ * interface.
+ *
+ * Unexposed statements have the same operations as any other kind of
+ * statement; one can extract their location information, spelling,
+ * children, etc. However, the specific kind of the statement is not
+ * reported.
+ */
+ CXCursor_UnexposedStmt = 200,
+
+ /** \brief A labelled statement in a function.
+ *
+ * This cursor kind is used to describe the "start_over:" label statement in
+ * the following example:
+ *
+ * \code
+ * start_over:
+ * ++counter;
+ * \endcode
+ *
+ */
+ CXCursor_LabelStmt = 201,
+
+ /** \brief A group of statements like { stmt stmt }.
+ *
+ * This cursor kind is used to describe compound statements, e.g. function
+ * bodies.
+ */
+ CXCursor_CompoundStmt = 202,
+
+ /** \brief A case statment.
+ */
+ CXCursor_CaseStmt = 203,
+
+ /** \brief A default statement.
+ */
+ CXCursor_DefaultStmt = 204,
+
+ /** \brief An if statement
+ */
+ CXCursor_IfStmt = 205,
+
+ /** \brief A switch statement.
+ */
+ CXCursor_SwitchStmt = 206,
+
+ /** \brief A while statement.
+ */
+ CXCursor_WhileStmt = 207,
+
+ /** \brief A do statement.
+ */
+ CXCursor_DoStmt = 208,
+
+ /** \brief A for statement.
+ */
+ CXCursor_ForStmt = 209,
+
+ /** \brief A goto statement.
+ */
+ CXCursor_GotoStmt = 210,
+
+ /** \brief An indirect goto statement.
+ */
+ CXCursor_IndirectGotoStmt = 211,
+
+ /** \brief A continue statement.
+ */
+ CXCursor_ContinueStmt = 212,
+
+ /** \brief A break statement.
+ */
+ CXCursor_BreakStmt = 213,
+
+ /** \brief A return statement.
+ */
+ CXCursor_ReturnStmt = 214,
+
+ /** \brief A GNU inline assembly statement extension.
+ */
+ CXCursor_AsmStmt = 215,
+
+ /** \brief Objective-C's overall @try-@catch-@finally statement.
+ */
+ CXCursor_ObjCAtTryStmt = 216,
+
+ /** \brief Objective-C's @catch statement.
+ */
+ CXCursor_ObjCAtCatchStmt = 217,
+
+ /** \brief Objective-C's @finally statement.
+ */
+ CXCursor_ObjCAtFinallyStmt = 218,
+
+ /** \brief Objective-C's @throw statement.
+ */
+ CXCursor_ObjCAtThrowStmt = 219,
+
+ /** \brief Objective-C's @synchronized statement.
+ */
+ CXCursor_ObjCAtSynchronizedStmt = 220,
+
+ /** \brief Objective-C's autorelease pool statement.
+ */
+ CXCursor_ObjCAutoreleasePoolStmt = 221,
+
+ /** \brief Objective-C's collection statement.
+ */
+ CXCursor_ObjCForCollectionStmt = 222,
+
+ /** \brief C++'s catch statement.
+ */
+ CXCursor_CXXCatchStmt = 223,
+
+ /** \brief C++'s try statement.
+ */
+ CXCursor_CXXTryStmt = 224,
+
+ /** \brief C++'s for (* : *) statement.
+ */
+ CXCursor_CXXForRangeStmt = 225,
+
+ /** \brief Windows Structured Exception Handling's try statement.
+ */
+ CXCursor_SEHTryStmt = 226,
+
+ /** \brief Windows Structured Exception Handling's except statement.
+ */
+ CXCursor_SEHExceptStmt = 227,
+
+ /** \brief Windows Structured Exception Handling's finally statement.
+ */
+ CXCursor_SEHFinallyStmt = 228,
+
+ /** \brief The null satement ";": C99 6.8.3p3.
+ *
+ * This cursor kind is used to describe the null statement.
+ */
+ CXCursor_NullStmt = 230,
+
+ /** \brief Adaptor class for mixing declarations with statements and
+ * expressions.
+ */
+ CXCursor_DeclStmt = 231,
+
+ CXCursor_LastStmt = CXCursor_DeclStmt,
+
+ /**
+ * \brief Cursor that represents the translation unit itself.
+ *
+ * The translation unit cursor exists primarily to act as the root
+ * cursor for traversing the contents of a translation unit.
+ */
+ CXCursor_TranslationUnit = 300,
+
+ /* Attributes */
+ CXCursor_FirstAttr = 400,
+ /**
+ * \brief An attribute whose specific kind is not exposed via this
+ * interface.
+ */
+ CXCursor_UnexposedAttr = 400,
+
+ CXCursor_IBActionAttr = 401,
+ CXCursor_IBOutletAttr = 402,
+ CXCursor_IBOutletCollectionAttr = 403,
+ CXCursor_CXXFinalAttr = 404,
+ CXCursor_CXXOverrideAttr = 405,
+ CXCursor_AnnotateAttr = 406,
+ CXCursor_AsmLabelAttr = 407,
+ CXCursor_LastAttr = CXCursor_AsmLabelAttr,
+
+ /* Preprocessing */
+ CXCursor_PreprocessingDirective = 500,
+ CXCursor_MacroDefinition = 501,
+ CXCursor_MacroExpansion = 502,
+ CXCursor_MacroInstantiation = CXCursor_MacroExpansion,
+ CXCursor_InclusionDirective = 503,
+ CXCursor_FirstPreprocessing = CXCursor_PreprocessingDirective,
+ CXCursor_LastPreprocessing = CXCursor_InclusionDirective
+};
+
+/**
+ * \brief A cursor representing some element in the abstract syntax tree for
+ * a translation unit.
+ *
+ * The cursor abstraction unifies the different kinds of entities in a
+ * program--declaration, statements, expressions, references to declarations,
+ * etc.--under a single "cursor" abstraction with a common set of operations.
+ * Common operation for a cursor include: getting the physical location in
+ * a source file where the cursor points, getting the name associated with a
+ * cursor, and retrieving cursors for any child nodes of a particular cursor.
+ *
+ * Cursors can be produced in two specific ways.
+ * clang_getTranslationUnitCursor() produces a cursor for a translation unit,
+ * from which one can use clang_visitChildren() to explore the rest of the
+ * translation unit. clang_getCursor() maps from a physical source location
+ * to the entity that resides at that location, allowing one to map from the
+ * source code into the AST.
+ */
+typedef struct {
+ enum CXCursorKind kind;
+ int xdata;
+ void *data[3];
+} CXCursor;
+
+/**
+ * \defgroup CINDEX_CURSOR_MANIP Cursor manipulations
+ *
+ * @{
+ */
+
+/**
+ * \brief Retrieve the NULL cursor, which represents no entity.
+ */
+CINDEX_LINKAGE CXCursor clang_getNullCursor(void);
+
+/**
+ * \brief Retrieve the cursor that represents the given translation unit.
+ *
+ * The translation unit cursor can be used to start traversing the
+ * various declarations within the given translation unit.
+ */
+CINDEX_LINKAGE CXCursor clang_getTranslationUnitCursor(CXTranslationUnit);
+
+/**
+ * \brief Determine whether two cursors are equivalent.
+ */
+CINDEX_LINKAGE unsigned clang_equalCursors(CXCursor, CXCursor);
+
+/**
+ * \brief Returns non-zero if \arg cursor is null.
+ */
+CINDEX_LINKAGE int clang_Cursor_isNull(CXCursor);
+
+/**
+ * \brief Compute a hash value for the given cursor.
+ */
+CINDEX_LINKAGE unsigned clang_hashCursor(CXCursor);
+
+/**
+ * \brief Retrieve the kind of the given cursor.
+ */
+CINDEX_LINKAGE enum CXCursorKind clang_getCursorKind(CXCursor);
+
+/**
+ * \brief Determine whether the given cursor kind represents a declaration.
+ */
+CINDEX_LINKAGE unsigned clang_isDeclaration(enum CXCursorKind);
+
+/**
+ * \brief Determine whether the given cursor kind represents a simple
+ * reference.
+ *
+ * Note that other kinds of cursors (such as expressions) can also refer to
+ * other cursors. Use clang_getCursorReferenced() to determine whether a
+ * particular cursor refers to another entity.
+ */
+CINDEX_LINKAGE unsigned clang_isReference(enum CXCursorKind);
+
+/**
+ * \brief Determine whether the given cursor kind represents an expression.
+ */
+CINDEX_LINKAGE unsigned clang_isExpression(enum CXCursorKind);
+
+/**
+ * \brief Determine whether the given cursor kind represents a statement.
+ */
+CINDEX_LINKAGE unsigned clang_isStatement(enum CXCursorKind);
+
+/**
+ * \brief Determine whether the given cursor kind represents an attribute.
+ */
+CINDEX_LINKAGE unsigned clang_isAttribute(enum CXCursorKind);
+
+/**
+ * \brief Determine whether the given cursor kind represents an invalid
+ * cursor.
+ */
+CINDEX_LINKAGE unsigned clang_isInvalid(enum CXCursorKind);
+
+/**
+ * \brief Determine whether the given cursor kind represents a translation
+ * unit.
+ */
+CINDEX_LINKAGE unsigned clang_isTranslationUnit(enum CXCursorKind);
+
+/***
+ * \brief Determine whether the given cursor represents a preprocessing
+ * element, such as a preprocessor directive or macro instantiation.
+ */
+CINDEX_LINKAGE unsigned clang_isPreprocessing(enum CXCursorKind);
+
+/***
+ * \brief Determine whether the given cursor represents a currently
+ * unexposed piece of the AST (e.g., CXCursor_UnexposedStmt).
+ */
+CINDEX_LINKAGE unsigned clang_isUnexposed(enum CXCursorKind);
+
+/**
+ * \brief Describe the linkage of the entity referred to by a cursor.
+ */
+enum CXLinkageKind {
+ /** \brief This value indicates that no linkage information is available
+ * for a provided CXCursor. */
+ CXLinkage_Invalid,
+ /**
+ * \brief This is the linkage for variables, parameters, and so on that
+ * have automatic storage. This covers normal (non-extern) local variables.
+ */
+ CXLinkage_NoLinkage,
+ /** \brief This is the linkage for static variables and static functions. */
+ CXLinkage_Internal,
+ /** \brief This is the linkage for entities with external linkage that live
+ * in C++ anonymous namespaces.*/
+ CXLinkage_UniqueExternal,
+ /** \brief This is the linkage for entities with true, external linkage. */
+ CXLinkage_External
+};
+
+/**
+ * \brief Determine the linkage of the entity referred to by a given cursor.
+ */
+CINDEX_LINKAGE enum CXLinkageKind clang_getCursorLinkage(CXCursor cursor);
+
+/**
+ * \brief Determine the availability of the entity that this cursor refers to.
+ *
+ * \param cursor The cursor to query.
+ *
+ * \returns The availability of the cursor.
+ */
+CINDEX_LINKAGE enum CXAvailabilityKind
+clang_getCursorAvailability(CXCursor cursor);
+
+/**
+ * \brief Describe the "language" of the entity referred to by a cursor.
+ */
+CINDEX_LINKAGE enum CXLanguageKind {
+ CXLanguage_Invalid = 0,
+ CXLanguage_C,
+ CXLanguage_ObjC,
+ CXLanguage_CPlusPlus
+};
+
+/**
+ * \brief Determine the "language" of the entity referred to by a given cursor.
+ */
+CINDEX_LINKAGE enum CXLanguageKind clang_getCursorLanguage(CXCursor cursor);
+
+/**
+ * \brief Returns the translation unit that a cursor originated from.
+ */
+CINDEX_LINKAGE CXTranslationUnit clang_Cursor_getTranslationUnit(CXCursor);
+
+
+/**
+ * \brief A fast container representing a set of CXCursors.
+ */
+typedef struct CXCursorSetImpl *CXCursorSet;
+
+/**
+ * \brief Creates an empty CXCursorSet.
+ */
+CINDEX_LINKAGE CXCursorSet clang_createCXCursorSet();
+
+/**
+ * \brief Disposes a CXCursorSet and releases its associated memory.
+ */
+CINDEX_LINKAGE void clang_disposeCXCursorSet(CXCursorSet cset);
+
+/**
+ * \brief Queries a CXCursorSet to see if it contains a specific CXCursor.
+ *
+ * \returns non-zero if the set contains the specified cursor.
+*/
+CINDEX_LINKAGE unsigned clang_CXCursorSet_contains(CXCursorSet cset,
+ CXCursor cursor);
+
+/**
+ * \brief Inserts a CXCursor into a CXCursorSet.
+ *
+ * \returns zero if the CXCursor was already in the set, and non-zero otherwise.
+*/
+CINDEX_LINKAGE unsigned clang_CXCursorSet_insert(CXCursorSet cset,
+ CXCursor cursor);
+
+/**
+ * \brief Determine the semantic parent of the given cursor.
+ *
+ * The semantic parent of a cursor is the cursor that semantically contains
+ * the given \p cursor. For many declarations, the lexical and semantic parents
+ * are equivalent (the lexical parent is returned by
+ * \c clang_getCursorLexicalParent()). They diverge when declarations or
+ * definitions are provided out-of-line. For example:
+ *
+ * \code
+ * class C {
+ * void f();
+ * };
+ *
+ * void C::f() { }
+ * \endcode
+ *
+ * In the out-of-line definition of \c C::f, the semantic parent is the
+ * the class \c C, of which this function is a member. The lexical parent is
+ * the place where the declaration actually occurs in the source code; in this
+ * case, the definition occurs in the translation unit. In general, the
+ * lexical parent for a given entity can change without affecting the semantics
+ * of the program, and the lexical parent of different declarations of the
+ * same entity may be different. Changing the semantic parent of a declaration,
+ * on the other hand, can have a major impact on semantics, and redeclarations
+ * of a particular entity should all have the same semantic context.
+ *
+ * In the example above, both declarations of \c C::f have \c C as their
+ * semantic context, while the lexical context of the first \c C::f is \c C
+ * and the lexical context of the second \c C::f is the translation unit.
+ *
+ * For global declarations, the semantic parent is the translation unit.
+ */
+CINDEX_LINKAGE CXCursor clang_getCursorSemanticParent(CXCursor cursor);
+
+/**
+ * \brief Determine the lexical parent of the given cursor.
+ *
+ * The lexical parent of a cursor is the cursor in which the given \p cursor
+ * was actually written. For many declarations, the lexical and semantic parents
+ * are equivalent (the semantic parent is returned by
+ * \c clang_getCursorSemanticParent()). They diverge when declarations or
+ * definitions are provided out-of-line. For example:
+ *
+ * \code
+ * class C {
+ * void f();
+ * };
+ *
+ * void C::f() { }
+ * \endcode
+ *
+ * In the out-of-line definition of \c C::f, the semantic parent is the
+ * the class \c C, of which this function is a member. The lexical parent is
+ * the place where the declaration actually occurs in the source code; in this
+ * case, the definition occurs in the translation unit. In general, the
+ * lexical parent for a given entity can change without affecting the semantics
+ * of the program, and the lexical parent of different declarations of the
+ * same entity may be different. Changing the semantic parent of a declaration,
+ * on the other hand, can have a major impact on semantics, and redeclarations
+ * of a particular entity should all have the same semantic context.
+ *
+ * In the example above, both declarations of \c C::f have \c C as their
+ * semantic context, while the lexical context of the first \c C::f is \c C
+ * and the lexical context of the second \c C::f is the translation unit.
+ *
+ * For declarations written in the global scope, the lexical parent is
+ * the translation unit.
+ */
+CINDEX_LINKAGE CXCursor clang_getCursorLexicalParent(CXCursor cursor);
+
+/**
+ * \brief Determine the set of methods that are overridden by the given
+ * method.
+ *
+ * In both Objective-C and C++, a method (aka virtual member function,
+ * in C++) can override a virtual method in a base class. For
+ * Objective-C, a method is said to override any method in the class's
+ * base class, its protocols, or its categories' protocols, that has the same
+ * selector and is of the same kind (class or instance).
+ * If no such method exists, the search continues to the class's superclass,
+ * its protocols, and its categories, and so on. A method from an Objective-C
+ * implementation is considered to override the same methods as its
+ * corresponding method in the interface.
+ *
+ * For C++, a virtual member function overrides any virtual member
+ * function with the same signature that occurs in its base
+ * classes. With multiple inheritance, a virtual member function can
+ * override several virtual member functions coming from different
+ * base classes.
+ *
+ * In all cases, this function determines the immediate overridden
+ * method, rather than all of the overridden methods. For example, if
+ * a method is originally declared in a class A, then overridden in B
+ * (which in inherits from A) and also in C (which inherited from B),
+ * then the only overridden method returned from this function when
+ * invoked on C's method will be B's method. The client may then
+ * invoke this function again, given the previously-found overridden
+ * methods, to map out the complete method-override set.
+ *
+ * \param cursor A cursor representing an Objective-C or C++
+ * method. This routine will compute the set of methods that this
+ * method overrides.
+ *
+ * \param overridden A pointer whose pointee will be replaced with a
+ * pointer to an array of cursors, representing the set of overridden
+ * methods. If there are no overridden methods, the pointee will be
+ * set to NULL. The pointee must be freed via a call to
+ * \c clang_disposeOverriddenCursors().
+ *
+ * \param num_overridden A pointer to the number of overridden
+ * functions, will be set to the number of overridden functions in the
+ * array pointed to by \p overridden.
+ */
+CINDEX_LINKAGE void clang_getOverriddenCursors(CXCursor cursor,
+ CXCursor **overridden,
+ unsigned *num_overridden);
+
+/**
+ * \brief Free the set of overridden cursors returned by \c
+ * clang_getOverriddenCursors().
+ */
+CINDEX_LINKAGE void clang_disposeOverriddenCursors(CXCursor *overridden);
+
+/**
+ * \brief Retrieve the file that is included by the given inclusion directive
+ * cursor.
+ */
+CINDEX_LINKAGE CXFile clang_getIncludedFile(CXCursor cursor);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_CURSOR_SOURCE Mapping between cursors and source code
+ *
+ * Cursors represent a location within the Abstract Syntax Tree (AST). These
+ * routines help map between cursors and the physical locations where the
+ * described entities occur in the source code. The mapping is provided in
+ * both directions, so one can map from source code to the AST and back.
+ *
+ * @{
+ */
+
+/**
+ * \brief Map a source location to the cursor that describes the entity at that
+ * location in the source code.
+ *
+ * clang_getCursor() maps an arbitrary source location within a translation
+ * unit down to the most specific cursor that describes the entity at that
+ * location. For example, given an expression \c x + y, invoking
+ * clang_getCursor() with a source location pointing to "x" will return the
+ * cursor for "x"; similarly for "y". If the cursor points anywhere between
+ * "x" or "y" (e.g., on the + or the whitespace around it), clang_getCursor()
+ * will return a cursor referring to the "+" expression.
+ *
+ * \returns a cursor representing the entity at the given source location, or
+ * a NULL cursor if no such entity can be found.
+ */
+CINDEX_LINKAGE CXCursor clang_getCursor(CXTranslationUnit, CXSourceLocation);
+
+/**
+ * \brief Retrieve the physical location of the source constructor referenced
+ * by the given cursor.
+ *
+ * The location of a declaration is typically the location of the name of that
+ * declaration, where the name of that declaration would occur if it is
+ * unnamed, or some keyword that introduces that particular declaration.
+ * The location of a reference is where that reference occurs within the
+ * source code.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getCursorLocation(CXCursor);
+
+/**
+ * \brief Retrieve the physical extent of the source construct referenced by
+ * the given cursor.
+ *
+ * The extent of a cursor starts with the file/line/column pointing at the
+ * first character within the source construct that the cursor refers to and
+ * ends with the last character withinin that source construct. For a
+ * declaration, the extent covers the declaration itself. For a reference,
+ * the extent covers the location of the reference (e.g., where the referenced
+ * entity was actually used).
+ */
+CINDEX_LINKAGE CXSourceRange clang_getCursorExtent(CXCursor);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_TYPES Type information for CXCursors
+ *
+ * @{
+ */
+
+/**
+ * \brief Describes the kind of type
+ */
+enum CXTypeKind {
+ /**
+ * \brief Reprents an invalid type (e.g., where no type is available).
+ */
+ CXType_Invalid = 0,
+
+ /**
+ * \brief A type whose specific kind is not exposed via this
+ * interface.
+ */
+ CXType_Unexposed = 1,
+
+ /* Builtin types */
+ CXType_Void = 2,
+ CXType_Bool = 3,
+ CXType_Char_U = 4,
+ CXType_UChar = 5,
+ CXType_Char16 = 6,
+ CXType_Char32 = 7,
+ CXType_UShort = 8,
+ CXType_UInt = 9,
+ CXType_ULong = 10,
+ CXType_ULongLong = 11,
+ CXType_UInt128 = 12,
+ CXType_Char_S = 13,
+ CXType_SChar = 14,
+ CXType_WChar = 15,
+ CXType_Short = 16,
+ CXType_Int = 17,
+ CXType_Long = 18,
+ CXType_LongLong = 19,
+ CXType_Int128 = 20,
+ CXType_Float = 21,
+ CXType_Double = 22,
+ CXType_LongDouble = 23,
+ CXType_NullPtr = 24,
+ CXType_Overload = 25,
+ CXType_Dependent = 26,
+ CXType_ObjCId = 27,
+ CXType_ObjCClass = 28,
+ CXType_ObjCSel = 29,
+ CXType_FirstBuiltin = CXType_Void,
+ CXType_LastBuiltin = CXType_ObjCSel,
+
+ CXType_Complex = 100,
+ CXType_Pointer = 101,
+ CXType_BlockPointer = 102,
+ CXType_LValueReference = 103,
+ CXType_RValueReference = 104,
+ CXType_Record = 105,
+ CXType_Enum = 106,
+ CXType_Typedef = 107,
+ CXType_ObjCInterface = 108,
+ CXType_ObjCObjectPointer = 109,
+ CXType_FunctionNoProto = 110,
+ CXType_FunctionProto = 111,
+ CXType_ConstantArray = 112,
+ CXType_Vector = 113
+};
+
+/**
+ * \brief Describes the calling convention of a function type
+ */
+enum CXCallingConv {
+ CXCallingConv_Default = 0,
+ CXCallingConv_C = 1,
+ CXCallingConv_X86StdCall = 2,
+ CXCallingConv_X86FastCall = 3,
+ CXCallingConv_X86ThisCall = 4,
+ CXCallingConv_X86Pascal = 5,
+ CXCallingConv_AAPCS = 6,
+ CXCallingConv_AAPCS_VFP = 7,
+
+ CXCallingConv_Invalid = 100,
+ CXCallingConv_Unexposed = 200
+};
+
+
+/**
+ * \brief The type of an element in the abstract syntax tree.
+ *
+ */
+typedef struct {
+ enum CXTypeKind kind;
+ void *data[2];
+} CXType;
+
+/**
+ * \brief Retrieve the type of a CXCursor (if any).
+ */
+CINDEX_LINKAGE CXType clang_getCursorType(CXCursor C);
+
+/**
+ * \brief Retrieve the underlying type of a typedef declaration.
+ *
+ * If the cursor does not reference a typedef declaration, an invalid type is
+ * returned.
+ */
+CINDEX_LINKAGE CXType clang_getTypedefDeclUnderlyingType(CXCursor C);
+
+/**
+ * \brief Retrieve the integer type of an enum declaration.
+ *
+ * If the cursor does not reference an enum declaration, an invalid type is
+ * returned.
+ */
+CINDEX_LINKAGE CXType clang_getEnumDeclIntegerType(CXCursor C);
+
+/**
+ * \brief Retrieve the integer value of an enum constant declaration as a signed
+ * long long.
+ *
+ * If the cursor does not reference an enum constant declaration, LLONG_MIN is returned.
+ * Since this is also potentially a valid constant value, the kind of the cursor
+ * must be verified before calling this function.
+ */
+CINDEX_LINKAGE long long clang_getEnumConstantDeclValue(CXCursor C);
+
+/**
+ * \brief Retrieve the integer value of an enum constant declaration as an unsigned
+ * long long.
+ *
+ * If the cursor does not reference an enum constant declaration, ULLONG_MAX is returned.
+ * Since this is also potentially a valid constant value, the kind of the cursor
+ * must be verified before calling this function.
+ */
+CINDEX_LINKAGE unsigned long long clang_getEnumConstantDeclUnsignedValue(CXCursor C);
+
+/**
+ * \brief Retrieve the number of non-variadic arguments associated with a given
+ * cursor.
+ *
+ * If a cursor that is not a function or method is passed in, -1 is returned.
+ */
+CINDEX_LINKAGE int clang_Cursor_getNumArguments(CXCursor C);
+
+/**
+ * \brief Retrieve the argument cursor of a function or method.
+ *
+ * If a cursor that is not a function or method is passed in or the index
+ * exceeds the number of arguments, an invalid cursor is returned.
+ */
+CINDEX_LINKAGE CXCursor clang_Cursor_getArgument(CXCursor C, unsigned i);
+
+/**
+ * \determine Determine whether two CXTypes represent the same type.
+ *
+ * \returns non-zero if the CXTypes represent the same type and
+ zero otherwise.
+ */
+CINDEX_LINKAGE unsigned clang_equalTypes(CXType A, CXType B);
+
+/**
+ * \brief Return the canonical type for a CXType.
+ *
+ * Clang's type system explicitly models typedefs and all the ways
+ * a specific type can be represented. The canonical type is the underlying
+ * type with all the "sugar" removed. For example, if 'T' is a typedef
+ * for 'int', the canonical type for 'T' would be 'int'.
+ */
+CINDEX_LINKAGE CXType clang_getCanonicalType(CXType T);
+
+/**
+ * \determine Determine whether a CXType has the "const" qualifier set,
+ * without looking through typedefs that may have added "const" at a different level.
+ */
+CINDEX_LINKAGE unsigned clang_isConstQualifiedType(CXType T);
+
+/**
+ * \determine Determine whether a CXType has the "volatile" qualifier set,
+ * without looking through typedefs that may have added "volatile" at a different level.
+ */
+CINDEX_LINKAGE unsigned clang_isVolatileQualifiedType(CXType T);
+
+/**
+ * \determine Determine whether a CXType has the "restrict" qualifier set,
+ * without looking through typedefs that may have added "restrict" at a different level.
+ */
+CINDEX_LINKAGE unsigned clang_isRestrictQualifiedType(CXType T);
+
+/**
+ * \brief For pointer types, returns the type of the pointee.
+ *
+ */
+CINDEX_LINKAGE CXType clang_getPointeeType(CXType T);
+
+/**
+ * \brief Return the cursor for the declaration of the given type.
+ */
+CINDEX_LINKAGE CXCursor clang_getTypeDeclaration(CXType T);
+
+/**
+ * Returns the Objective-C type encoding for the specified declaration.
+ */
+CINDEX_LINKAGE CXString clang_getDeclObjCTypeEncoding(CXCursor C);
+
+/**
+ * \brief Retrieve the spelling of a given CXTypeKind.
+ */
+CINDEX_LINKAGE CXString clang_getTypeKindSpelling(enum CXTypeKind K);
+
+/**
+ * \brief Retrieve the calling convention associated with a function type.
+ *
+ * If a non-function type is passed in, CXCallingConv_Invalid is returned.
+ */
+CINDEX_LINKAGE enum CXCallingConv clang_getFunctionTypeCallingConv(CXType T);
+
+/**
+ * \brief Retrieve the result type associated with a function type.
+ *
+ * If a non-function type is passed in, an invalid type is returned.
+ */
+CINDEX_LINKAGE CXType clang_getResultType(CXType T);
+
+/**
+ * \brief Retrieve the number of non-variadic arguments associated with a function type.
+ *
+ * If a non-function type is passed in, -1 is returned.
+ */
+CINDEX_LINKAGE int clang_getNumArgTypes(CXType T);
+
+/**
+ * \brief Retrieve the type of an argument of a function type.
+ *
+ * If a non-function type is passed in or the function does not have enough parameters,
+ * an invalid type is returned.
+ */
+CINDEX_LINKAGE CXType clang_getArgType(CXType T, unsigned i);
+
+/**
+ * \brief Return 1 if the CXType is a variadic function type, and 0 otherwise.
+ *
+ */
+CINDEX_LINKAGE unsigned clang_isFunctionTypeVariadic(CXType T);
+
+/**
+ * \brief Retrieve the result type associated with a given cursor.
+ *
+ * This only returns a valid type if the cursor refers to a function or method.
+ */
+CINDEX_LINKAGE CXType clang_getCursorResultType(CXCursor C);
+
+/**
+ * \brief Return 1 if the CXType is a POD (plain old data) type, and 0
+ * otherwise.
+ */
+CINDEX_LINKAGE unsigned clang_isPODType(CXType T);
+
+/**
+ * \brief Return the element type of an array, complex, or vector type.
+ *
+ * If a type is passed in that is not an array, complex, or vector type,
+ * an invalid type is returned.
+ */
+CINDEX_LINKAGE CXType clang_getElementType(CXType T);
+
+/**
+ * \brief Return the number of elements of an array or vector type.
+ *
+ * If a type is passed in that is not an array or vector type,
+ * -1 is returned.
+ */
+CINDEX_LINKAGE long long clang_getNumElements(CXType T);
+
+/**
+ * \brief Return the element type of an array type.
+ *
+ * If a non-array type is passed in, an invalid type is returned.
+ */
+CINDEX_LINKAGE CXType clang_getArrayElementType(CXType T);
+
+/**
+ * \brief Return the the array size of a constant array.
+ *
+ * If a non-array type is passed in, -1 is returned.
+ */
+CINDEX_LINKAGE long long clang_getArraySize(CXType T);
+
+/**
+ * \brief Returns 1 if the base class specified by the cursor with kind
+ * CX_CXXBaseSpecifier is virtual.
+ */
+CINDEX_LINKAGE unsigned clang_isVirtualBase(CXCursor);
+
+/**
+ * \brief Represents the C++ access control level to a base class for a
+ * cursor with kind CX_CXXBaseSpecifier.
+ */
+enum CX_CXXAccessSpecifier {
+ CX_CXXInvalidAccessSpecifier,
+ CX_CXXPublic,
+ CX_CXXProtected,
+ CX_CXXPrivate
+};
+
+/**
+ * \brief Returns the access control level for the C++ base specifier
+ * represented by a cursor with kind CXCursor_CXXBaseSpecifier or
+ * CXCursor_AccessSpecifier.
+ */
+CINDEX_LINKAGE enum CX_CXXAccessSpecifier clang_getCXXAccessSpecifier(CXCursor);
+
+/**
+ * \brief Determine the number of overloaded declarations referenced by a
+ * \c CXCursor_OverloadedDeclRef cursor.
+ *
+ * \param cursor The cursor whose overloaded declarations are being queried.
+ *
+ * \returns The number of overloaded declarations referenced by \c cursor. If it
+ * is not a \c CXCursor_OverloadedDeclRef cursor, returns 0.
+ */
+CINDEX_LINKAGE unsigned clang_getNumOverloadedDecls(CXCursor cursor);
+
+/**
+ * \brief Retrieve a cursor for one of the overloaded declarations referenced
+ * by a \c CXCursor_OverloadedDeclRef cursor.
+ *
+ * \param cursor The cursor whose overloaded declarations are being queried.
+ *
+ * \param index The zero-based index into the set of overloaded declarations in
+ * the cursor.
+ *
+ * \returns A cursor representing the declaration referenced by the given
+ * \c cursor at the specified \c index. If the cursor does not have an
+ * associated set of overloaded declarations, or if the index is out of bounds,
+ * returns \c clang_getNullCursor();
+ */
+CINDEX_LINKAGE CXCursor clang_getOverloadedDecl(CXCursor cursor,
+ unsigned index);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_ATTRIBUTES Information for attributes
+ *
+ * @{
+ */
+
+
+/**
+ * \brief For cursors representing an iboutletcollection attribute,
+ * this function returns the collection element type.
+ *
+ */
+CINDEX_LINKAGE CXType clang_getIBOutletCollectionType(CXCursor);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_CURSOR_TRAVERSAL Traversing the AST with cursors
+ *
+ * These routines provide the ability to traverse the abstract syntax tree
+ * using cursors.
+ *
+ * @{
+ */
+
+/**
+ * \brief Describes how the traversal of the children of a particular
+ * cursor should proceed after visiting a particular child cursor.
+ *
+ * A value of this enumeration type should be returned by each
+ * \c CXCursorVisitor to indicate how clang_visitChildren() proceed.
+ */
+enum CXChildVisitResult {
+ /**
+ * \brief Terminates the cursor traversal.
+ */
+ CXChildVisit_Break,
+ /**
+ * \brief Continues the cursor traversal with the next sibling of
+ * the cursor just visited, without visiting its children.
+ */
+ CXChildVisit_Continue,
+ /**
+ * \brief Recursively traverse the children of this cursor, using
+ * the same visitor and client data.
+ */
+ CXChildVisit_Recurse
+};
+
+/**
+ * \brief Visitor invoked for each cursor found by a traversal.
+ *
+ * This visitor function will be invoked for each cursor found by
+ * clang_visitCursorChildren(). Its first argument is the cursor being
+ * visited, its second argument is the parent visitor for that cursor,
+ * and its third argument is the client data provided to
+ * clang_visitCursorChildren().
+ *
+ * The visitor should return one of the \c CXChildVisitResult values
+ * to direct clang_visitCursorChildren().
+ */
+typedef enum CXChildVisitResult (*CXCursorVisitor)(CXCursor cursor,
+ CXCursor parent,
+ CXClientData client_data);
+
+/**
+ * \brief Visit the children of a particular cursor.
+ *
+ * This function visits all the direct children of the given cursor,
+ * invoking the given \p visitor function with the cursors of each
+ * visited child. The traversal may be recursive, if the visitor returns
+ * \c CXChildVisit_Recurse. The traversal may also be ended prematurely, if
+ * the visitor returns \c CXChildVisit_Break.
+ *
+ * \param parent the cursor whose child may be visited. All kinds of
+ * cursors can be visited, including invalid cursors (which, by
+ * definition, have no children).
+ *
+ * \param visitor the visitor function that will be invoked for each
+ * child of \p parent.
+ *
+ * \param client_data pointer data supplied by the client, which will
+ * be passed to the visitor each time it is invoked.
+ *
+ * \returns a non-zero value if the traversal was terminated
+ * prematurely by the visitor returning \c CXChildVisit_Break.
+ */
+CINDEX_LINKAGE unsigned clang_visitChildren(CXCursor parent,
+ CXCursorVisitor visitor,
+ CXClientData client_data);
+#ifdef __has_feature
+# if __has_feature(blocks)
+/**
+ * \brief Visitor invoked for each cursor found by a traversal.
+ *
+ * This visitor block will be invoked for each cursor found by
+ * clang_visitChildrenWithBlock(). Its first argument is the cursor being
+ * visited, its second argument is the parent visitor for that cursor.
+ *
+ * The visitor should return one of the \c CXChildVisitResult values
+ * to direct clang_visitChildrenWithBlock().
+ */
+typedef enum CXChildVisitResult
+ (^CXCursorVisitorBlock)(CXCursor cursor, CXCursor parent);
+
+/**
+ * Visits the children of a cursor using the specified block. Behaves
+ * identically to clang_visitChildren() in all other respects.
+ */
+unsigned clang_visitChildrenWithBlock(CXCursor parent,
+ CXCursorVisitorBlock block);
+# endif
+#endif
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_CURSOR_XREF Cross-referencing in the AST
+ *
+ * These routines provide the ability to determine references within and
+ * across translation units, by providing the names of the entities referenced
+ * by cursors, follow reference cursors to the declarations they reference,
+ * and associate declarations with their definitions.
+ *
+ * @{
+ */
+
+/**
+ * \brief Retrieve a Unified Symbol Resolution (USR) for the entity referenced
+ * by the given cursor.
+ *
+ * A Unified Symbol Resolution (USR) is a string that identifies a particular
+ * entity (function, class, variable, etc.) within a program. USRs can be
+ * compared across translation units to determine, e.g., when references in
+ * one translation refer to an entity defined in another translation unit.
+ */
+CINDEX_LINKAGE CXString clang_getCursorUSR(CXCursor);
+
+/**
+ * \brief Construct a USR for a specified Objective-C class.
+ */
+CINDEX_LINKAGE CXString clang_constructUSR_ObjCClass(const char *class_name);
+
+/**
+ * \brief Construct a USR for a specified Objective-C category.
+ */
+CINDEX_LINKAGE CXString
+ clang_constructUSR_ObjCCategory(const char *class_name,
+ const char *category_name);
+
+/**
+ * \brief Construct a USR for a specified Objective-C protocol.
+ */
+CINDEX_LINKAGE CXString
+ clang_constructUSR_ObjCProtocol(const char *protocol_name);
+
+
+/**
+ * \brief Construct a USR for a specified Objective-C instance variable and
+ * the USR for its containing class.
+ */
+CINDEX_LINKAGE CXString clang_constructUSR_ObjCIvar(const char *name,
+ CXString classUSR);
+
+/**
+ * \brief Construct a USR for a specified Objective-C method and
+ * the USR for its containing class.
+ */
+CINDEX_LINKAGE CXString clang_constructUSR_ObjCMethod(const char *name,
+ unsigned isInstanceMethod,
+ CXString classUSR);
+
+/**
+ * \brief Construct a USR for a specified Objective-C property and the USR
+ * for its containing class.
+ */
+CINDEX_LINKAGE CXString clang_constructUSR_ObjCProperty(const char *property,
+ CXString classUSR);
+
+/**
+ * \brief Retrieve a name for the entity referenced by this cursor.
+ */
+CINDEX_LINKAGE CXString clang_getCursorSpelling(CXCursor);
+
+/**
+ * \brief Retrieve a range for a piece that forms the cursors spelling name.
+ * Most of the times there is only one range for the complete spelling but for
+ * objc methods and objc message expressions, there are multiple pieces for each
+ * selector identifier.
+ *
+ * \param pieceIndex the index of the spelling name piece. If this is greater
+ * than the actual number of pieces, it will return a NULL (invalid) range.
+ *
+ * \param options Reserved.
+ */
+CINDEX_LINKAGE CXSourceRange clang_Cursor_getSpellingNameRange(CXCursor,
+ unsigned pieceIndex,
+ unsigned options);
+
+/**
+ * \brief Retrieve the display name for the entity referenced by this cursor.
+ *
+ * The display name contains extra information that helps identify the cursor,
+ * such as the parameters of a function or template or the arguments of a
+ * class template specialization.
+ */
+CINDEX_LINKAGE CXString clang_getCursorDisplayName(CXCursor);
+
+/** \brief For a cursor that is a reference, retrieve a cursor representing the
+ * entity that it references.
+ *
+ * Reference cursors refer to other entities in the AST. For example, an
+ * Objective-C superclass reference cursor refers to an Objective-C class.
+ * This function produces the cursor for the Objective-C class from the
+ * cursor for the superclass reference. If the input cursor is a declaration or
+ * definition, it returns that declaration or definition unchanged.
+ * Otherwise, returns the NULL cursor.
+ */
+CINDEX_LINKAGE CXCursor clang_getCursorReferenced(CXCursor);
+
+/**
+ * \brief For a cursor that is either a reference to or a declaration
+ * of some entity, retrieve a cursor that describes the definition of
+ * that entity.
+ *
+ * Some entities can be declared multiple times within a translation
+ * unit, but only one of those declarations can also be a
+ * definition. For example, given:
+ *
+ * \code
+ * int f(int, int);
+ * int g(int x, int y) { return f(x, y); }
+ * int f(int a, int b) { return a + b; }
+ * int f(int, int);
+ * \endcode
+ *
+ * there are three declarations of the function "f", but only the
+ * second one is a definition. The clang_getCursorDefinition()
+ * function will take any cursor pointing to a declaration of "f"
+ * (the first or fourth lines of the example) or a cursor referenced
+ * that uses "f" (the call to "f' inside "g") and will return a
+ * declaration cursor pointing to the definition (the second "f"
+ * declaration).
+ *
+ * If given a cursor for which there is no corresponding definition,
+ * e.g., because there is no definition of that entity within this
+ * translation unit, returns a NULL cursor.
+ */
+CINDEX_LINKAGE CXCursor clang_getCursorDefinition(CXCursor);
+
+/**
+ * \brief Determine whether the declaration pointed to by this cursor
+ * is also a definition of that entity.
+ */
+CINDEX_LINKAGE unsigned clang_isCursorDefinition(CXCursor);
+
+/**
+ * \brief Retrieve the canonical cursor corresponding to the given cursor.
+ *
+ * In the C family of languages, many kinds of entities can be declared several
+ * times within a single translation unit. For example, a structure type can
+ * be forward-declared (possibly multiple times) and later defined:
+ *
+ * \code
+ * struct X;
+ * struct X;
+ * struct X {
+ * int member;
+ * };
+ * \endcode
+ *
+ * The declarations and the definition of \c X are represented by three
+ * different cursors, all of which are declarations of the same underlying
+ * entity. One of these cursor is considered the "canonical" cursor, which
+ * is effectively the representative for the underlying entity. One can
+ * determine if two cursors are declarations of the same underlying entity by
+ * comparing their canonical cursors.
+ *
+ * \returns The canonical cursor for the entity referred to by the given cursor.
+ */
+CINDEX_LINKAGE CXCursor clang_getCanonicalCursor(CXCursor);
+
+
+/**
+ * \brief If the cursor points to a selector identifier in a objc method or
+ * message expression, this returns the selector index.
+ *
+ * After getting a cursor with \see clang_getCursor, this can be called to
+ * determine if the location points to a selector identifier.
+ *
+ * \returns The selector index if the cursor is an objc method or message
+ * expression and the cursor is pointing to a selector identifier, or -1
+ * otherwise.
+ */
+CINDEX_LINKAGE int clang_Cursor_getObjCSelectorIndex(CXCursor);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_CPP C++ AST introspection
+ *
+ * The routines in this group provide access information in the ASTs specific
+ * to C++ language features.
+ *
+ * @{
+ */
+
+/**
+ * \brief Determine if a C++ member function or member function template is
+ * declared 'static'.
+ */
+CINDEX_LINKAGE unsigned clang_CXXMethod_isStatic(CXCursor C);
+
+/**
+ * \brief Determine if a C++ member function or member function template is
+ * explicitly declared 'virtual' or if it overrides a virtual method from
+ * one of the base classes.
+ */
+CINDEX_LINKAGE unsigned clang_CXXMethod_isVirtual(CXCursor C);
+
+/**
+ * \brief Given a cursor that represents a template, determine
+ * the cursor kind of the specializations would be generated by instantiating
+ * the template.
+ *
+ * This routine can be used to determine what flavor of function template,
+ * class template, or class template partial specialization is stored in the
+ * cursor. For example, it can describe whether a class template cursor is
+ * declared with "struct", "class" or "union".
+ *
+ * \param C The cursor to query. This cursor should represent a template
+ * declaration.
+ *
+ * \returns The cursor kind of the specializations that would be generated
+ * by instantiating the template \p C. If \p C is not a template, returns
+ * \c CXCursor_NoDeclFound.
+ */
+CINDEX_LINKAGE enum CXCursorKind clang_getTemplateCursorKind(CXCursor C);
+
+/**
+ * \brief Given a cursor that may represent a specialization or instantiation
+ * of a template, retrieve the cursor that represents the template that it
+ * specializes or from which it was instantiated.
+ *
+ * This routine determines the template involved both for explicit
+ * specializations of templates and for implicit instantiations of the template,
+ * both of which are referred to as "specializations". For a class template
+ * specialization (e.g., \c std::vector<bool>), this routine will return
+ * either the primary template (\c std::vector) or, if the specialization was
+ * instantiated from a class template partial specialization, the class template
+ * partial specialization. For a class template partial specialization and a
+ * function template specialization (including instantiations), this
+ * this routine will return the specialized template.
+ *
+ * For members of a class template (e.g., member functions, member classes, or
+ * static data members), returns the specialized or instantiated member.
+ * Although not strictly "templates" in the C++ language, members of class
+ * templates have the same notions of specializations and instantiations that
+ * templates do, so this routine treats them similarly.
+ *
+ * \param C A cursor that may be a specialization of a template or a member
+ * of a template.
+ *
+ * \returns If the given cursor is a specialization or instantiation of a
+ * template or a member thereof, the template or member that it specializes or
+ * from which it was instantiated. Otherwise, returns a NULL cursor.
+ */
+CINDEX_LINKAGE CXCursor clang_getSpecializedCursorTemplate(CXCursor C);
+
+/**
+ * \brief Given a cursor that references something else, return the source range
+ * covering that reference.
+ *
+ * \param C A cursor pointing to a member reference, a declaration reference, or
+ * an operator call.
+ * \param NameFlags A bitset with three independent flags:
+ * CXNameRange_WantQualifier, CXNameRange_WantTemplateArgs, and
+ * CXNameRange_WantSinglePiece.
+ * \param PieceIndex For contiguous names or when passing the flag
+ * CXNameRange_WantSinglePiece, only one piece with index 0 is
+ * available. When the CXNameRange_WantSinglePiece flag is not passed for a
+ * non-contiguous names, this index can be used to retreive the individual
+ * pieces of the name. See also CXNameRange_WantSinglePiece.
+ *
+ * \returns The piece of the name pointed to by the given cursor. If there is no
+ * name, or if the PieceIndex is out-of-range, a null-cursor will be returned.
+ */
+CINDEX_LINKAGE CXSourceRange clang_getCursorReferenceNameRange(CXCursor C,
+ unsigned NameFlags,
+ unsigned PieceIndex);
+
+enum CXNameRefFlags {
+ /**
+ * \brief Include the nested-name-specifier, e.g. Foo:: in x.Foo::y, in the
+ * range.
+ */
+ CXNameRange_WantQualifier = 0x1,
+
+ /**
+ * \brief Include the explicit template arguments, e.g. <int> in x.f<int>, in
+ * the range.
+ */
+ CXNameRange_WantTemplateArgs = 0x2,
+
+ /**
+ * \brief If the name is non-contiguous, return the full spanning range.
+ *
+ * Non-contiguous names occur in Objective-C when a selector with two or more
+ * parameters is used, or in C++ when using an operator:
+ * \code
+ * [object doSomething:here withValue:there]; // ObjC
+ * return some_vector[1]; // C++
+ * \endcode
+ */
+ CXNameRange_WantSinglePiece = 0x4
+};
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_LEX Token extraction and manipulation
+ *
+ * The routines in this group provide access to the tokens within a
+ * translation unit, along with a semantic mapping of those tokens to
+ * their corresponding cursors.
+ *
+ * @{
+ */
+
+/**
+ * \brief Describes a kind of token.
+ */
+typedef enum CXTokenKind {
+ /**
+ * \brief A token that contains some kind of punctuation.
+ */
+ CXToken_Punctuation,
+
+ /**
+ * \brief A language keyword.
+ */
+ CXToken_Keyword,
+
+ /**
+ * \brief An identifier (that is not a keyword).
+ */
+ CXToken_Identifier,
+
+ /**
+ * \brief A numeric, string, or character literal.
+ */
+ CXToken_Literal,
+
+ /**
+ * \brief A comment.
+ */
+ CXToken_Comment
+} CXTokenKind;
+
+/**
+ * \brief Describes a single preprocessing token.
+ */
+typedef struct {
+ unsigned int_data[4];
+ void *ptr_data;
+} CXToken;
+
+/**
+ * \brief Determine the kind of the given token.
+ */
+CINDEX_LINKAGE CXTokenKind clang_getTokenKind(CXToken);
+
+/**
+ * \brief Determine the spelling of the given token.
+ *
+ * The spelling of a token is the textual representation of that token, e.g.,
+ * the text of an identifier or keyword.
+ */
+CINDEX_LINKAGE CXString clang_getTokenSpelling(CXTranslationUnit, CXToken);
+
+/**
+ * \brief Retrieve the source location of the given token.
+ */
+CINDEX_LINKAGE CXSourceLocation clang_getTokenLocation(CXTranslationUnit,
+ CXToken);
+
+/**
+ * \brief Retrieve a source range that covers the given token.
+ */
+CINDEX_LINKAGE CXSourceRange clang_getTokenExtent(CXTranslationUnit, CXToken);
+
+/**
+ * \brief Tokenize the source code described by the given range into raw
+ * lexical tokens.
+ *
+ * \param TU the translation unit whose text is being tokenized.
+ *
+ * \param Range the source range in which text should be tokenized. All of the
+ * tokens produced by tokenization will fall within this source range,
+ *
+ * \param Tokens this pointer will be set to point to the array of tokens
+ * that occur within the given source range. The returned pointer must be
+ * freed with clang_disposeTokens() before the translation unit is destroyed.
+ *
+ * \param NumTokens will be set to the number of tokens in the \c *Tokens
+ * array.
+ *
+ */
+CINDEX_LINKAGE void clang_tokenize(CXTranslationUnit TU, CXSourceRange Range,
+ CXToken **Tokens, unsigned *NumTokens);
+
+/**
+ * \brief Annotate the given set of tokens by providing cursors for each token
+ * that can be mapped to a specific entity within the abstract syntax tree.
+ *
+ * This token-annotation routine is equivalent to invoking
+ * clang_getCursor() for the source locations of each of the
+ * tokens. The cursors provided are filtered, so that only those
+ * cursors that have a direct correspondence to the token are
+ * accepted. For example, given a function call \c f(x),
+ * clang_getCursor() would provide the following cursors:
+ *
+ * * when the cursor is over the 'f', a DeclRefExpr cursor referring to 'f'.
+ * * when the cursor is over the '(' or the ')', a CallExpr referring to 'f'.
+ * * when the cursor is over the 'x', a DeclRefExpr cursor referring to 'x'.
+ *
+ * Only the first and last of these cursors will occur within the
+ * annotate, since the tokens "f" and "x' directly refer to a function
+ * and a variable, respectively, but the parentheses are just a small
+ * part of the full syntax of the function call expression, which is
+ * not provided as an annotation.
+ *
+ * \param TU the translation unit that owns the given tokens.
+ *
+ * \param Tokens the set of tokens to annotate.
+ *
+ * \param NumTokens the number of tokens in \p Tokens.
+ *
+ * \param Cursors an array of \p NumTokens cursors, whose contents will be
+ * replaced with the cursors corresponding to each token.
+ */
+CINDEX_LINKAGE void clang_annotateTokens(CXTranslationUnit TU,
+ CXToken *Tokens, unsigned NumTokens,
+ CXCursor *Cursors);
+
+/**
+ * \brief Free the given set of tokens.
+ */
+CINDEX_LINKAGE void clang_disposeTokens(CXTranslationUnit TU,
+ CXToken *Tokens, unsigned NumTokens);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_DEBUG Debugging facilities
+ *
+ * These routines are used for testing and debugging, only, and should not
+ * be relied upon.
+ *
+ * @{
+ */
+
+/* for debug/testing */
+CINDEX_LINKAGE CXString clang_getCursorKindSpelling(enum CXCursorKind Kind);
+CINDEX_LINKAGE void clang_getDefinitionSpellingAndExtent(CXCursor,
+ const char **startBuf,
+ const char **endBuf,
+ unsigned *startLine,
+ unsigned *startColumn,
+ unsigned *endLine,
+ unsigned *endColumn);
+CINDEX_LINKAGE void clang_enableStackTraces(void);
+CINDEX_LINKAGE void clang_executeOnThread(void (*fn)(void*), void *user_data,
+ unsigned stack_size);
+
+/**
+ * @}
+ */
+
+/**
+ * \defgroup CINDEX_CODE_COMPLET Code completion
+ *
+ * Code completion involves taking an (incomplete) source file, along with
+ * knowledge of where the user is actively editing that file, and suggesting
+ * syntactically- and semantically-valid constructs that the user might want to
+ * use at that particular point in the source code. These data structures and
+ * routines provide support for code completion.
+ *
+ * @{
+ */
+
+/**
+ * \brief A semantic string that describes a code-completion result.
+ *
+ * A semantic string that describes the formatting of a code-completion
+ * result as a single "template" of text that should be inserted into the
+ * source buffer when a particular code-completion result is selected.
+ * Each semantic string is made up of some number of "chunks", each of which
+ * contains some text along with a description of what that text means, e.g.,
+ * the name of the entity being referenced, whether the text chunk is part of
+ * the template, or whether it is a "placeholder" that the user should replace
+ * with actual code,of a specific kind. See \c CXCompletionChunkKind for a
+ * description of the different kinds of chunks.
+ */
+typedef void *CXCompletionString;
+
+/**
+ * \brief A single result of code completion.
+ */
+typedef struct {
+ /**
+ * \brief The kind of entity that this completion refers to.
+ *
+ * The cursor kind will be a macro, keyword, or a declaration (one of the
+ * *Decl cursor kinds), describing the entity that the completion is
+ * referring to.
+ *
+ * \todo In the future, we would like to provide a full cursor, to allow
+ * the client to extract additional information from declaration.
+ */
+ enum CXCursorKind CursorKind;
+
+ /**
+ * \brief The code-completion string that describes how to insert this
+ * code-completion result into the editing buffer.
+ */
+ CXCompletionString CompletionString;
+} CXCompletionResult;
+
+/**
+ * \brief Describes a single piece of text within a code-completion string.
+ *
+ * Each "chunk" within a code-completion string (\c CXCompletionString) is
+ * either a piece of text with a specific "kind" that describes how that text
+ * should be interpreted by the client or is another completion string.
+ */
+enum CXCompletionChunkKind {
+ /**
+ * \brief A code-completion string that describes "optional" text that
+ * could be a part of the template (but is not required).
+ *
+ * The Optional chunk is the only kind of chunk that has a code-completion
+ * string for its representation, which is accessible via
+ * \c clang_getCompletionChunkCompletionString(). The code-completion string
+ * describes an additional part of the template that is completely optional.
+ * For example, optional chunks can be used to describe the placeholders for
+ * arguments that match up with defaulted function parameters, e.g. given:
+ *
+ * \code
+ * void f(int x, float y = 3.14, double z = 2.71828);
+ * \endcode
+ *
+ * The code-completion string for this function would contain:
+ * - a TypedText chunk for "f".
+ * - a LeftParen chunk for "(".
+ * - a Placeholder chunk for "int x"
+ * - an Optional chunk containing the remaining defaulted arguments, e.g.,
+ * - a Comma chunk for ","
+ * - a Placeholder chunk for "float y"
+ * - an Optional chunk containing the last defaulted argument:
+ * - a Comma chunk for ","
+ * - a Placeholder chunk for "double z"
+ * - a RightParen chunk for ")"
+ *
+ * There are many ways to handle Optional chunks. Two simple approaches are:
+ * - Completely ignore optional chunks, in which case the template for the
+ * function "f" would only include the first parameter ("int x").
+ * - Fully expand all optional chunks, in which case the template for the
+ * function "f" would have all of the parameters.
+ */
+ CXCompletionChunk_Optional,
+ /**
+ * \brief Text that a user would be expected to type to get this
+ * code-completion result.
+ *
+ * There will be exactly one "typed text" chunk in a semantic string, which
+ * will typically provide the spelling of a keyword or the name of a
+ * declaration that could be used at the current code point. Clients are
+ * expected to filter the code-completion results based on the text in this
+ * chunk.
+ */
+ CXCompletionChunk_TypedText,
+ /**
+ * \brief Text that should be inserted as part of a code-completion result.
+ *
+ * A "text" chunk represents text that is part of the template to be
+ * inserted into user code should this particular code-completion result
+ * be selected.
+ */
+ CXCompletionChunk_Text,
+ /**
+ * \brief Placeholder text that should be replaced by the user.
+ *
+ * A "placeholder" chunk marks a place where the user should insert text
+ * into the code-completion template. For example, placeholders might mark
+ * the function parameters for a function declaration, to indicate that the
+ * user should provide arguments for each of those parameters. The actual
+ * text in a placeholder is a suggestion for the text to display before
+ * the user replaces the placeholder with real code.
+ */
+ CXCompletionChunk_Placeholder,
+ /**
+ * \brief Informative text that should be displayed but never inserted as
+ * part of the template.
+ *
+ * An "informative" chunk contains annotations that can be displayed to
+ * help the user decide whether a particular code-completion result is the
+ * right option, but which is not part of the actual template to be inserted
+ * by code completion.
+ */
+ CXCompletionChunk_Informative,
+ /**
+ * \brief Text that describes the current parameter when code-completion is
+ * referring to function call, message send, or template specialization.
+ *
+ * A "current parameter" chunk occurs when code-completion is providing
+ * information about a parameter corresponding to the argument at the
+ * code-completion point. For example, given a function
+ *
+ * \code
+ * int add(int x, int y);
+ * \endcode
+ *
+ * and the source code \c add(, where the code-completion point is after the
+ * "(", the code-completion string will contain a "current parameter" chunk
+ * for "int x", indicating that the current argument will initialize that
+ * parameter. After typing further, to \c add(17, (where the code-completion
+ * point is after the ","), the code-completion string will contain a
+ * "current paremeter" chunk to "int y".
+ */
+ CXCompletionChunk_CurrentParameter,
+ /**
+ * \brief A left parenthesis ('('), used to initiate a function call or
+ * signal the beginning of a function parameter list.
+ */
+ CXCompletionChunk_LeftParen,
+ /**
+ * \brief A right parenthesis (')'), used to finish a function call or
+ * signal the end of a function parameter list.
+ */
+ CXCompletionChunk_RightParen,
+ /**
+ * \brief A left bracket ('[').
+ */
+ CXCompletionChunk_LeftBracket,
+ /**
+ * \brief A right bracket (']').
+ */
+ CXCompletionChunk_RightBracket,
+ /**
+ * \brief A left brace ('{').
+ */
+ CXCompletionChunk_LeftBrace,
+ /**
+ * \brief A right brace ('}').
+ */
+ CXCompletionChunk_RightBrace,
+ /**
+ * \brief A left angle bracket ('<').
+ */
+ CXCompletionChunk_LeftAngle,
+ /**
+ * \brief A right angle bracket ('>').
+ */
+ CXCompletionChunk_RightAngle,
+ /**
+ * \brief A comma separator (',').
+ */
+ CXCompletionChunk_Comma,
+ /**
+ * \brief Text that specifies the result type of a given result.
+ *
+ * This special kind of informative chunk is not meant to be inserted into
+ * the text buffer. Rather, it is meant to illustrate the type that an
+ * expression using the given completion string would have.
+ */
+ CXCompletionChunk_ResultType,
+ /**
+ * \brief A colon (':').
+ */
+ CXCompletionChunk_Colon,
+ /**
+ * \brief A semicolon (';').
+ */
+ CXCompletionChunk_SemiColon,
+ /**
+ * \brief An '=' sign.
+ */
+ CXCompletionChunk_Equal,
+ /**
+ * Horizontal space (' ').
+ */
+ CXCompletionChunk_HorizontalSpace,
+ /**
+ * Vertical space ('\n'), after which it is generally a good idea to
+ * perform indentation.
+ */
+ CXCompletionChunk_VerticalSpace
+};
+
+/**
+ * \brief Determine the kind of a particular chunk within a completion string.
+ *
+ * \param completion_string the completion string to query.
+ *
+ * \param chunk_number the 0-based index of the chunk in the completion string.
+ *
+ * \returns the kind of the chunk at the index \c chunk_number.
+ */
+CINDEX_LINKAGE enum CXCompletionChunkKind
+clang_getCompletionChunkKind(CXCompletionString completion_string,
+ unsigned chunk_number);
+
+/**
+ * \brief Retrieve the text associated with a particular chunk within a
+ * completion string.
+ *
+ * \param completion_string the completion string to query.
+ *
+ * \param chunk_number the 0-based index of the chunk in the completion string.
+ *
+ * \returns the text associated with the chunk at index \c chunk_number.
+ */
+CINDEX_LINKAGE CXString
+clang_getCompletionChunkText(CXCompletionString completion_string,
+ unsigned chunk_number);
+
+/**
+ * \brief Retrieve the completion string associated with a particular chunk
+ * within a completion string.
+ *
+ * \param completion_string the completion string to query.
+ *
+ * \param chunk_number the 0-based index of the chunk in the completion string.
+ *
+ * \returns the completion string associated with the chunk at index
+ * \c chunk_number.
+ */
+CINDEX_LINKAGE CXCompletionString
+clang_getCompletionChunkCompletionString(CXCompletionString completion_string,
+ unsigned chunk_number);
+
+/**
+ * \brief Retrieve the number of chunks in the given code-completion string.
+ */
+CINDEX_LINKAGE unsigned
+clang_getNumCompletionChunks(CXCompletionString completion_string);
+
+/**
+ * \brief Determine the priority of this code completion.
+ *
+ * The priority of a code completion indicates how likely it is that this
+ * particular completion is the completion that the user will select. The
+ * priority is selected by various internal heuristics.
+ *
+ * \param completion_string The completion string to query.
+ *
+ * \returns The priority of this completion string. Smaller values indicate
+ * higher-priority (more likely) completions.
+ */
+CINDEX_LINKAGE unsigned
+clang_getCompletionPriority(CXCompletionString completion_string);
+
+/**
+ * \brief Determine the availability of the entity that this code-completion
+ * string refers to.
+ *
+ * \param completion_string The completion string to query.
+ *
+ * \returns The availability of the completion string.
+ */
+CINDEX_LINKAGE enum CXAvailabilityKind
+clang_getCompletionAvailability(CXCompletionString completion_string);
+
+/**
+ * \brief Retrieve the number of annotations associated with the given
+ * completion string.
+ *
+ * \param completion_string the completion string to query.
+ *
+ * \returns the number of annotations associated with the given completion
+ * string.
+ */
+CINDEX_LINKAGE unsigned
+clang_getCompletionNumAnnotations(CXCompletionString completion_string);
+
+/**
+ * \brief Retrieve the annotation associated with the given completion string.
+ *
+ * \param completion_string the completion string to query.
+ *
+ * \param annotation_number the 0-based index of the annotation of the
+ * completion string.
+ *
+ * \returns annotation string associated with the completion at index
+ * \c annotation_number, or a NULL string if that annotation is not available.
+ */
+CINDEX_LINKAGE CXString
+clang_getCompletionAnnotation(CXCompletionString completion_string,
+ unsigned annotation_number);
+
+/**
+ * \brief Retrieve the parent context of the given completion string.
+ *
+ * The parent context of a completion string is the semantic parent of
+ * the declaration (if any) that the code completion represents. For example,
+ * a code completion for an Objective-C method would have the method's class
+ * or protocol as its context.
+ *
+ * \param completion_string The code completion string whose parent is
+ * being queried.
+ *
+ * \param kind If non-NULL, will be set to the kind of the parent context,
+ * or CXCursor_NotImplemented if there is no context.
+ *
+ * \param Returns the name of the completion parent, e.g., "NSObject" if
+ * the completion string represents a method in the NSObject class.
+ */
+CINDEX_LINKAGE CXString
+clang_getCompletionParent(CXCompletionString completion_string,
+ enum CXCursorKind *kind);
+/**
+ * \brief Retrieve a completion string for an arbitrary declaration or macro
+ * definition cursor.
+ *
+ * \param cursor The cursor to query.
+ *
+ * \returns A non-context-sensitive completion string for declaration and macro
+ * definition cursors, or NULL for other kinds of cursors.
+ */
+CINDEX_LINKAGE CXCompletionString
+clang_getCursorCompletionString(CXCursor cursor);
+
+/**
+ * \brief Contains the results of code-completion.
+ *
+ * This data structure contains the results of code completion, as
+ * produced by \c clang_codeCompleteAt(). Its contents must be freed by
+ * \c clang_disposeCodeCompleteResults.
+ */
+typedef struct {
+ /**
+ * \brief The code-completion results.
+ */
+ CXCompletionResult *Results;
+
+ /**
+ * \brief The number of code-completion results stored in the
+ * \c Results array.
+ */
+ unsigned NumResults;
+} CXCodeCompleteResults;
+
+/**
+ * \brief Flags that can be passed to \c clang_codeCompleteAt() to
+ * modify its behavior.
+ *
+ * The enumerators in this enumeration can be bitwise-OR'd together to
+ * provide multiple options to \c clang_codeCompleteAt().
+ */
+enum CXCodeComplete_Flags {
+ /**
+ * \brief Whether to include macros within the set of code
+ * completions returned.
+ */
+ CXCodeComplete_IncludeMacros = 0x01,
+
+ /**
+ * \brief Whether to include code patterns for language constructs
+ * within the set of code completions, e.g., for loops.
+ */
+ CXCodeComplete_IncludeCodePatterns = 0x02
+};
+
+/**
+ * \brief Bits that represent the context under which completion is occurring.
+ *
+ * The enumerators in this enumeration may be bitwise-OR'd together if multiple
+ * contexts are occurring simultaneously.
+ */
+enum CXCompletionContext {
+ /**
+ * \brief The context for completions is unexposed, as only Clang results
+ * should be included. (This is equivalent to having no context bits set.)
+ */
+ CXCompletionContext_Unexposed = 0,
+
+ /**
+ * \brief Completions for any possible type should be included in the results.
+ */
+ CXCompletionContext_AnyType = 1 << 0,
+
+ /**
+ * \brief Completions for any possible value (variables, function calls, etc.)
+ * should be included in the results.
+ */
+ CXCompletionContext_AnyValue = 1 << 1,
+ /**
+ * \brief Completions for values that resolve to an Objective-C object should
+ * be included in the results.
+ */
+ CXCompletionContext_ObjCObjectValue = 1 << 2,
+ /**
+ * \brief Completions for values that resolve to an Objective-C selector
+ * should be included in the results.
+ */
+ CXCompletionContext_ObjCSelectorValue = 1 << 3,
+ /**
+ * \brief Completions for values that resolve to a C++ class type should be
+ * included in the results.
+ */
+ CXCompletionContext_CXXClassTypeValue = 1 << 4,
+
+ /**
+ * \brief Completions for fields of the member being accessed using the dot
+ * operator should be included in the results.
+ */
+ CXCompletionContext_DotMemberAccess = 1 << 5,
+ /**
+ * \brief Completions for fields of the member being accessed using the arrow
+ * operator should be included in the results.
+ */
+ CXCompletionContext_ArrowMemberAccess = 1 << 6,
+ /**
+ * \brief Completions for properties of the Objective-C object being accessed
+ * using the dot operator should be included in the results.
+ */
+ CXCompletionContext_ObjCPropertyAccess = 1 << 7,
+
+ /**
+ * \brief Completions for enum tags should be included in the results.
+ */
+ CXCompletionContext_EnumTag = 1 << 8,
+ /**
+ * \brief Completions for union tags should be included in the results.
+ */
+ CXCompletionContext_UnionTag = 1 << 9,
+ /**
+ * \brief Completions for struct tags should be included in the results.
+ */
+ CXCompletionContext_StructTag = 1 << 10,
+
+ /**
+ * \brief Completions for C++ class names should be included in the results.
+ */
+ CXCompletionContext_ClassTag = 1 << 11,
+ /**
+ * \brief Completions for C++ namespaces and namespace aliases should be
+ * included in the results.
+ */
+ CXCompletionContext_Namespace = 1 << 12,
+ /**
+ * \brief Completions for C++ nested name specifiers should be included in
+ * the results.
+ */
+ CXCompletionContext_NestedNameSpecifier = 1 << 13,
+
+ /**
+ * \brief Completions for Objective-C interfaces (classes) should be included
+ * in the results.
+ */
+ CXCompletionContext_ObjCInterface = 1 << 14,
+ /**
+ * \brief Completions for Objective-C protocols should be included in
+ * the results.
+ */
+ CXCompletionContext_ObjCProtocol = 1 << 15,
+ /**
+ * \brief Completions for Objective-C categories should be included in
+ * the results.
+ */
+ CXCompletionContext_ObjCCategory = 1 << 16,
+ /**
+ * \brief Completions for Objective-C instance messages should be included
+ * in the results.
+ */
+ CXCompletionContext_ObjCInstanceMessage = 1 << 17,
+ /**
+ * \brief Completions for Objective-C class messages should be included in
+ * the results.
+ */
+ CXCompletionContext_ObjCClassMessage = 1 << 18,
+ /**
+ * \brief Completions for Objective-C selector names should be included in
+ * the results.
+ */
+ CXCompletionContext_ObjCSelectorName = 1 << 19,
+
+ /**
+ * \brief Completions for preprocessor macro names should be included in
+ * the results.
+ */
+ CXCompletionContext_MacroName = 1 << 20,
+
+ /**
+ * \brief Natural language completions should be included in the results.
+ */
+ CXCompletionContext_NaturalLanguage = 1 << 21,
+
+ /**
+ * \brief The current context is unknown, so set all contexts.
+ */
+ CXCompletionContext_Unknown = ((1 << 22) - 1)
+};
+
+/**
+ * \brief Returns a default set of code-completion options that can be
+ * passed to\c clang_codeCompleteAt().
+ */
+CINDEX_LINKAGE unsigned clang_defaultCodeCompleteOptions(void);
+
+/**
+ * \brief Perform code completion at a given location in a translation unit.
+ *
+ * This function performs code completion at a particular file, line, and
+ * column within source code, providing results that suggest potential
+ * code snippets based on the context of the completion. The basic model
+ * for code completion is that Clang will parse a complete source file,
+ * performing syntax checking up to the location where code-completion has
+ * been requested. At that point, a special code-completion token is passed
+ * to the parser, which recognizes this token and determines, based on the
+ * current location in the C/Objective-C/C++ grammar and the state of
+ * semantic analysis, what completions to provide. These completions are
+ * returned via a new \c CXCodeCompleteResults structure.
+ *
+ * Code completion itself is meant to be triggered by the client when the
+ * user types punctuation characters or whitespace, at which point the
+ * code-completion location will coincide with the cursor. For example, if \c p
+ * is a pointer, code-completion might be triggered after the "-" and then
+ * after the ">" in \c p->. When the code-completion location is afer the ">",
+ * the completion results will provide, e.g., the members of the struct that
+ * "p" points to. The client is responsible for placing the cursor at the
+ * beginning of the token currently being typed, then filtering the results
+ * based on the contents of the token. For example, when code-completing for
+ * the expression \c p->get, the client should provide the location just after
+ * the ">" (e.g., pointing at the "g") to this code-completion hook. Then, the
+ * client can filter the results based on the current token text ("get"), only
+ * showing those results that start with "get". The intent of this interface
+ * is to separate the relatively high-latency acquisition of code-completion
+ * results from the filtering of results on a per-character basis, which must
+ * have a lower latency.
+ *
+ * \param TU The translation unit in which code-completion should
+ * occur. The source files for this translation unit need not be
+ * completely up-to-date (and the contents of those source files may
+ * be overridden via \p unsaved_files). Cursors referring into the
+ * translation unit may be invalidated by this invocation.
+ *
+ * \param complete_filename The name of the source file where code
+ * completion should be performed. This filename may be any file
+ * included in the translation unit.
+ *
+ * \param complete_line The line at which code-completion should occur.
+ *
+ * \param complete_column The column at which code-completion should occur.
+ * Note that the column should point just after the syntactic construct that
+ * initiated code completion, and not in the middle of a lexical token.
+ *
+ * \param unsaved_files the Tiles that have not yet been saved to disk
+ * but may be required for parsing or code completion, including the
+ * contents of those files. The contents and name of these files (as
+ * specified by CXUnsavedFile) are copied when necessary, so the
+ * client only needs to guarantee their validity until the call to
+ * this function returns.
+ *
+ * \param num_unsaved_files The number of unsaved file entries in \p
+ * unsaved_files.
+ *
+ * \param options Extra options that control the behavior of code
+ * completion, expressed as a bitwise OR of the enumerators of the
+ * CXCodeComplete_Flags enumeration. The
+ * \c clang_defaultCodeCompleteOptions() function returns a default set
+ * of code-completion options.
+ *
+ * \returns If successful, a new \c CXCodeCompleteResults structure
+ * containing code-completion results, which should eventually be
+ * freed with \c clang_disposeCodeCompleteResults(). If code
+ * completion fails, returns NULL.
+ */
+CINDEX_LINKAGE
+CXCodeCompleteResults *clang_codeCompleteAt(CXTranslationUnit TU,
+ const char *complete_filename,
+ unsigned complete_line,
+ unsigned complete_column,
+ struct CXUnsavedFile *unsaved_files,
+ unsigned num_unsaved_files,
+ unsigned options);
+
+/**
+ * \brief Sort the code-completion results in case-insensitive alphabetical
+ * order.
+ *
+ * \param Results The set of results to sort.
+ * \param NumResults The number of results in \p Results.
+ */
+CINDEX_LINKAGE
+void clang_sortCodeCompletionResults(CXCompletionResult *Results,
+ unsigned NumResults);
+
+/**
+ * \brief Free the given set of code-completion results.
+ */
+CINDEX_LINKAGE
+void clang_disposeCodeCompleteResults(CXCodeCompleteResults *Results);
+
+/**
+ * \brief Determine the number of diagnostics produced prior to the
+ * location where code completion was performed.
+ */
+CINDEX_LINKAGE
+unsigned clang_codeCompleteGetNumDiagnostics(CXCodeCompleteResults *Results);
+
+/**
+ * \brief Retrieve a diagnostic associated with the given code completion.
+ *
+ * \param Result the code completion results to query.
+ * \param Index the zero-based diagnostic number to retrieve.
+ *
+ * \returns the requested diagnostic. This diagnostic must be freed
+ * via a call to \c clang_disposeDiagnostic().
+ */
+CINDEX_LINKAGE
+CXDiagnostic clang_codeCompleteGetDiagnostic(CXCodeCompleteResults *Results,
+ unsigned Index);
+
+/**
+ * \brief Determines what compeltions are appropriate for the context
+ * the given code completion.
+ *
+ * \param Results the code completion results to query
+ *
+ * \returns the kinds of completions that are appropriate for use
+ * along with the given code completion results.
+ */
+CINDEX_LINKAGE
+unsigned long long clang_codeCompleteGetContexts(
+ CXCodeCompleteResults *Results);
+
+/**
+ * \brief Returns the cursor kind for the container for the current code
+ * completion context. The container is only guaranteed to be set for
+ * contexts where a container exists (i.e. member accesses or Objective-C
+ * message sends); if there is not a container, this function will return
+ * CXCursor_InvalidCode.
+ *
+ * \param Results the code completion results to query
+ *
+ * \param IsIncomplete on return, this value will be false if Clang has complete
+ * information about the container. If Clang does not have complete
+ * information, this value will be true.
+ *
+ * \returns the container kind, or CXCursor_InvalidCode if there is not a
+ * container
+ */
+CINDEX_LINKAGE
+enum CXCursorKind clang_codeCompleteGetContainerKind(
+ CXCodeCompleteResults *Results,
+ unsigned *IsIncomplete);
+
+/**
+ * \brief Returns the USR for the container for the current code completion
+ * context. If there is not a container for the current context, this
+ * function will return the empty string.
+ *
+ * \param Results the code completion results to query
+ *
+ * \returns the USR for the container
+ */
+CINDEX_LINKAGE
+CXString clang_codeCompleteGetContainerUSR(CXCodeCompleteResults *Results);
+
+
+/**
+ * \brief Returns the currently-entered selector for an Objective-C message
+ * send, formatted like "initWithFoo:bar:". Only guaranteed to return a
+ * non-empty string for CXCompletionContext_ObjCInstanceMessage and
+ * CXCompletionContext_ObjCClassMessage.
+ *
+ * \param Results the code completion results to query
+ *
+ * \returns the selector (or partial selector) that has been entered thus far
+ * for an Objective-C message send.
+ */
+CINDEX_LINKAGE
+CXString clang_codeCompleteGetObjCSelector(CXCodeCompleteResults *Results);
+
+/**
+ * @}
+ */
+
+
+/**
+ * \defgroup CINDEX_MISC Miscellaneous utility functions
+ *
+ * @{
+ */
+
+/**
+ * \brief Return a version string, suitable for showing to a user, but not
+ * intended to be parsed (the format is not guaranteed to be stable).
+ */
+CINDEX_LINKAGE CXString clang_getClangVersion();
+
+
+/**
+ * \brief Enable/disable crash recovery.
+ *
+ * \param Flag to indicate if crash recovery is enabled. A non-zero value
+ * enables crash recovery, while 0 disables it.
+ */
+CINDEX_LINKAGE void clang_toggleCrashRecovery(unsigned isEnabled);
+
+ /**
+ * \brief Visitor invoked for each file in a translation unit
+ * (used with clang_getInclusions()).
+ *
+ * This visitor function will be invoked by clang_getInclusions() for each
+ * file included (either at the top-level or by #include directives) within
+ * a translation unit. The first argument is the file being included, and
+ * the second and third arguments provide the inclusion stack. The
+ * array is sorted in order of immediate inclusion. For example,
+ * the first element refers to the location that included 'included_file'.
+ */
+typedef void (*CXInclusionVisitor)(CXFile included_file,
+ CXSourceLocation* inclusion_stack,
+ unsigned include_len,
+ CXClientData client_data);
+
+/**
+ * \brief Visit the set of preprocessor inclusions in a translation unit.
+ * The visitor function is called with the provided data for every included
+ * file. This does not include headers included by the PCH file (unless one
+ * is inspecting the inclusions in the PCH file itself).
+ */
+CINDEX_LINKAGE void clang_getInclusions(CXTranslationUnit tu,
+ CXInclusionVisitor visitor,
+ CXClientData client_data);
+
+/**
+ * @}
+ */
+
+/** \defgroup CINDEX_REMAPPING Remapping functions
+ *
+ * @{
+ */
+
+/**
+ * \brief A remapping of original source files and their translated files.
+ */
+typedef void *CXRemapping;
+
+/**
+ * \brief Retrieve a remapping.
+ *
+ * \param path the path that contains metadata about remappings.
+ *
+ * \returns the requested remapping. This remapping must be freed
+ * via a call to \c clang_remap_dispose(). Can return NULL if an error occurred.
+ */
+CINDEX_LINKAGE CXRemapping clang_getRemappings(const char *path);
+
+/**
+ * \brief Retrieve a remapping.
+ *
+ * \param filePaths pointer to an array of file paths containing remapping info.
+ *
+ * \param numFiles number of file paths.
+ *
+ * \returns the requested remapping. This remapping must be freed
+ * via a call to \c clang_remap_dispose(). Can return NULL if an error occurred.
+ */
+CINDEX_LINKAGE
+CXRemapping clang_getRemappingsFromFileList(const char **filePaths,
+ unsigned numFiles);
+
+/**
+ * \brief Determine the number of remappings.
+ */
+CINDEX_LINKAGE unsigned clang_remap_getNumFiles(CXRemapping);
+
+/**
+ * \brief Get the original and the associated filename from the remapping.
+ *
+ * \param original If non-NULL, will be set to the original filename.
+ *
+ * \param transformed If non-NULL, will be set to the filename that the original
+ * is associated with.
+ */
+CINDEX_LINKAGE void clang_remap_getFilenames(CXRemapping, unsigned index,
+ CXString *original, CXString *transformed);
+
+/**
+ * \brief Dispose the remapping.
+ */
+CINDEX_LINKAGE void clang_remap_dispose(CXRemapping);
+
+/**
+ * @}
+ */
+
+/** \defgroup CINDEX_HIGH Higher level API functions
+ *
+ * @{
+ */
+
+enum CXVisitorResult {
+ CXVisit_Break,
+ CXVisit_Continue
+};
+
+typedef struct {
+ void *context;
+ enum CXVisitorResult (*visit)(void *context, CXCursor, CXSourceRange);
+} CXCursorAndRangeVisitor;
+
+/**
+ * \brief Find references of a declaration in a specific file.
+ *
+ * \param cursor pointing to a declaration or a reference of one.
+ *
+ * \param file to search for references.
+ *
+ * \param visitor callback that will receive pairs of CXCursor/CXSourceRange for
+ * each reference found.
+ * The CXSourceRange will point inside the file; if the reference is inside
+ * a macro (and not a macro argument) the CXSourceRange will be invalid.
+ */
+CINDEX_LINKAGE void clang_findReferencesInFile(CXCursor cursor, CXFile file,
+ CXCursorAndRangeVisitor visitor);
+
+#ifdef __has_feature
+# if __has_feature(blocks)
+
+typedef enum CXVisitorResult
+ (^CXCursorAndRangeVisitorBlock)(CXCursor, CXSourceRange);
+
+CINDEX_LINKAGE
+void clang_findReferencesInFileWithBlock(CXCursor, CXFile,
+ CXCursorAndRangeVisitorBlock);
+
+# endif
+#endif
+
+/**
+ * \brief The client's data object that is associated with a CXFile.
+ */
+typedef void *CXIdxClientFile;
+
+/**
+ * \brief The client's data object that is associated with a semantic entity.
+ */
+typedef void *CXIdxClientEntity;
+
+/**
+ * \brief The client's data object that is associated with a semantic container
+ * of entities.
+ */
+typedef void *CXIdxClientContainer;
+
+/**
+ * \brief The client's data object that is associated with an AST file (PCH
+ * or module).
+ */
+typedef void *CXIdxClientASTFile;
+
+/**
+ * \brief Source location passed to index callbacks.
+ */
+typedef struct {
+ void *ptr_data[2];
+ unsigned int_data;
+} CXIdxLoc;
+
+/**
+ * \brief Data for \see ppIncludedFile callback.
+ */
+typedef struct {
+ /**
+ * \brief Location of '#' in the #include/#import directive.
+ */
+ CXIdxLoc hashLoc;
+ /**
+ * \brief Filename as written in the #include/#import directive.
+ */
+ const char *filename;
+ /**
+ * \brief The actual file that the #include/#import directive resolved to.
+ */
+ CXFile file;
+ int isImport;
+ int isAngled;
+} CXIdxIncludedFileInfo;
+
+/**
+ * \brief Data for \see importedASTFile callback.
+ */
+typedef struct {
+ CXFile file;
+ /**
+ * \brief Location where the file is imported. It is useful mostly for
+ * modules.
+ */
+ CXIdxLoc loc;
+ /**
+ * \brief Non-zero if the AST file is a module otherwise it's a PCH.
+ */
+ int isModule;
+} CXIdxImportedASTFileInfo;
+
+typedef enum {
+ CXIdxEntity_Unexposed = 0,
+ CXIdxEntity_Typedef = 1,
+ CXIdxEntity_Function = 2,
+ CXIdxEntity_Variable = 3,
+ CXIdxEntity_Field = 4,
+ CXIdxEntity_EnumConstant = 5,
+
+ CXIdxEntity_ObjCClass = 6,
+ CXIdxEntity_ObjCProtocol = 7,
+ CXIdxEntity_ObjCCategory = 8,
+
+ CXIdxEntity_ObjCInstanceMethod = 9,
+ CXIdxEntity_ObjCClassMethod = 10,
+ CXIdxEntity_ObjCProperty = 11,
+ CXIdxEntity_ObjCIvar = 12,
+
+ CXIdxEntity_Enum = 13,
+ CXIdxEntity_Struct = 14,
+ CXIdxEntity_Union = 15,
+
+ CXIdxEntity_CXXClass = 16,
+ CXIdxEntity_CXXNamespace = 17,
+ CXIdxEntity_CXXNamespaceAlias = 18,
+ CXIdxEntity_CXXStaticVariable = 19,
+ CXIdxEntity_CXXStaticMethod = 20,
+ CXIdxEntity_CXXInstanceMethod = 21,
+ CXIdxEntity_CXXConstructor = 22,
+ CXIdxEntity_CXXDestructor = 23,
+ CXIdxEntity_CXXConversionFunction = 24,
+ CXIdxEntity_CXXTypeAlias = 25
+
+} CXIdxEntityKind;
+
+typedef enum {
+ CXIdxEntityLang_None = 0,
+ CXIdxEntityLang_C = 1,
+ CXIdxEntityLang_ObjC = 2,
+ CXIdxEntityLang_CXX = 3
+} CXIdxEntityLanguage;
+
+/**
+ * \brief Extra C++ template information for an entity. This can apply to:
+ * CXIdxEntity_Function
+ * CXIdxEntity_CXXClass
+ * CXIdxEntity_CXXStaticMethod
+ * CXIdxEntity_CXXInstanceMethod
+ * CXIdxEntity_CXXConstructor
+ * CXIdxEntity_CXXConversionFunction
+ * CXIdxEntity_CXXTypeAlias
+ */
+typedef enum {
+ CXIdxEntity_NonTemplate = 0,
+ CXIdxEntity_Template = 1,
+ CXIdxEntity_TemplatePartialSpecialization = 2,
+ CXIdxEntity_TemplateSpecialization = 3
+} CXIdxEntityCXXTemplateKind;
+
+typedef enum {
+ CXIdxAttr_Unexposed = 0,
+ CXIdxAttr_IBAction = 1,
+ CXIdxAttr_IBOutlet = 2,
+ CXIdxAttr_IBOutletCollection = 3
+} CXIdxAttrKind;
+
+typedef struct {
+ CXIdxAttrKind kind;
+ CXCursor cursor;
+ CXIdxLoc loc;
+} CXIdxAttrInfo;
+
+typedef struct {
+ CXIdxEntityKind kind;
+ CXIdxEntityCXXTemplateKind templateKind;
+ CXIdxEntityLanguage lang;
+ const char *name;
+ const char *USR;
+ CXCursor cursor;
+ const CXIdxAttrInfo *const *attributes;
+ unsigned numAttributes;
+} CXIdxEntityInfo;
+
+typedef struct {
+ CXCursor cursor;
+} CXIdxContainerInfo;
+
+typedef struct {
+ const CXIdxAttrInfo *attrInfo;
+ const CXIdxEntityInfo *objcClass;
+ CXCursor classCursor;
+ CXIdxLoc classLoc;
+} CXIdxIBOutletCollectionAttrInfo;
+
+typedef struct {
+ const CXIdxEntityInfo *entityInfo;
+ CXCursor cursor;
+ CXIdxLoc loc;
+ const CXIdxContainerInfo *semanticContainer;
+ /**
+ * \brief Generally same as \see semanticContainer but can be different in
+ * cases like out-of-line C++ member functions.
+ */
+ const CXIdxContainerInfo *lexicalContainer;
+ int isRedeclaration;
+ int isDefinition;
+ int isContainer;
+ const CXIdxContainerInfo *declAsContainer;
+ /**
+ * \brief Whether the declaration exists in code or was created implicitly
+ * by the compiler, e.g. implicit objc methods for properties.
+ */
+ int isImplicit;
+ const CXIdxAttrInfo *const *attributes;
+ unsigned numAttributes;
+} CXIdxDeclInfo;
+
+typedef enum {
+ CXIdxObjCContainer_ForwardRef = 0,
+ CXIdxObjCContainer_Interface = 1,
+ CXIdxObjCContainer_Implementation = 2
+} CXIdxObjCContainerKind;
+
+typedef struct {
+ const CXIdxDeclInfo *declInfo;
+ CXIdxObjCContainerKind kind;
+} CXIdxObjCContainerDeclInfo;
+
+typedef struct {
+ const CXIdxEntityInfo *base;
+ CXCursor cursor;
+ CXIdxLoc loc;
+} CXIdxBaseClassInfo;
+
+typedef struct {
+ const CXIdxEntityInfo *protocol;
+ CXCursor cursor;
+ CXIdxLoc loc;
+} CXIdxObjCProtocolRefInfo;
+
+typedef struct {
+ const CXIdxObjCProtocolRefInfo *const *protocols;
+ unsigned numProtocols;
+} CXIdxObjCProtocolRefListInfo;
+
+typedef struct {
+ const CXIdxObjCContainerDeclInfo *containerInfo;
+ const CXIdxBaseClassInfo *superInfo;
+ const CXIdxObjCProtocolRefListInfo *protocols;
+} CXIdxObjCInterfaceDeclInfo;
+
+typedef struct {
+ const CXIdxObjCContainerDeclInfo *containerInfo;
+ const CXIdxEntityInfo *objcClass;
+ CXCursor classCursor;
+ CXIdxLoc classLoc;
+ const CXIdxObjCProtocolRefListInfo *protocols;
+} CXIdxObjCCategoryDeclInfo;
+
+typedef struct {
+ const CXIdxDeclInfo *declInfo;
+ const CXIdxEntityInfo *getter;
+ const CXIdxEntityInfo *setter;
+} CXIdxObjCPropertyDeclInfo;
+
+typedef struct {
+ const CXIdxDeclInfo *declInfo;
+ const CXIdxBaseClassInfo *const *bases;
+ unsigned numBases;
+} CXIdxCXXClassDeclInfo;
+
+/**
+ * \brief Data for \see indexEntityReference callback.
+ */
+typedef enum {
+ /**
+ * \brief The entity is referenced directly in user's code.
+ */
+ CXIdxEntityRef_Direct = 1,
+ /**
+ * \brief An implicit reference, e.g. a reference of an ObjC method via the
+ * dot syntax.
+ */
+ CXIdxEntityRef_Implicit = 2
+} CXIdxEntityRefKind;
+
+/**
+ * \brief Data for \see indexEntityReference callback.
+ */
+typedef struct {
+ CXIdxEntityRefKind kind;
+ /**
+ * \brief Reference cursor.
+ */
+ CXCursor cursor;
+ CXIdxLoc loc;
+ /**
+ * \brief The entity that gets referenced.
+ */
+ const CXIdxEntityInfo *referencedEntity;
+ /**
+ * \brief Immediate "parent" of the reference. For example:
+ *
+ * \code
+ * Foo *var;
+ * \endcode
+ *
+ * The parent of reference of type 'Foo' is the variable 'var'.
+ * For references inside statement bodies of functions/methods,
+ * the parentEntity will be the function/method.
+ */
+ const CXIdxEntityInfo *parentEntity;
+ /**
+ * \brief Lexical container context of the reference.
+ */
+ const CXIdxContainerInfo *container;
+} CXIdxEntityRefInfo;
+
+typedef struct {
+ /**
+ * \brief Called periodically to check whether indexing should be aborted.
+ * Should return 0 to continue, and non-zero to abort.
+ */
+ int (*abortQuery)(CXClientData client_data, void *reserved);
+
+ /**
+ * \brief Called at the end of indexing; passes the complete diagnostic set.
+ */
+ void (*diagnostic)(CXClientData client_data,
+ CXDiagnosticSet, void *reserved);
+
+ CXIdxClientFile (*enteredMainFile)(CXClientData client_data,
+ CXFile mainFile, void *reserved);
+
+ /**
+ * \brief Called when a file gets #included/#imported.
+ */
+ CXIdxClientFile (*ppIncludedFile)(CXClientData client_data,
+ const CXIdxIncludedFileInfo *);
+
+ /**
+ * \brief Called when a AST file (PCH or module) gets imported.
+ *
+ * AST files will not get indexed (there will not be callbacks to index all
+ * the entities in an AST file). The recommended action is that, if the AST
+ * file is not already indexed, to block further indexing and initiate a new
+ * indexing job specific to the AST file.
+ */
+ CXIdxClientASTFile (*importedASTFile)(CXClientData client_data,
+ const CXIdxImportedASTFileInfo *);
+
+ /**
+ * \brief Called at the beginning of indexing a translation unit.
+ */
+ CXIdxClientContainer (*startedTranslationUnit)(CXClientData client_data,
+ void *reserved);
+
+ void (*indexDeclaration)(CXClientData client_data,
+ const CXIdxDeclInfo *);
+
+ /**
+ * \brief Called to index a reference of an entity.
+ */
+ void (*indexEntityReference)(CXClientData client_data,
+ const CXIdxEntityRefInfo *);
+
+} IndexerCallbacks;
+
+CINDEX_LINKAGE int clang_index_isEntityObjCContainerKind(CXIdxEntityKind);
+CINDEX_LINKAGE const CXIdxObjCContainerDeclInfo *
+clang_index_getObjCContainerDeclInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE const CXIdxObjCInterfaceDeclInfo *
+clang_index_getObjCInterfaceDeclInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE
+const CXIdxObjCCategoryDeclInfo *
+clang_index_getObjCCategoryDeclInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE const CXIdxObjCProtocolRefListInfo *
+clang_index_getObjCProtocolRefListInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE const CXIdxObjCPropertyDeclInfo *
+clang_index_getObjCPropertyDeclInfo(const CXIdxDeclInfo *);
+
+CINDEX_LINKAGE const CXIdxIBOutletCollectionAttrInfo *
+clang_index_getIBOutletCollectionAttrInfo(const CXIdxAttrInfo *);
+
+CINDEX_LINKAGE const CXIdxCXXClassDeclInfo *
+clang_index_getCXXClassDeclInfo(const CXIdxDeclInfo *);
+
+/**
+ * \brief For retrieving a custom CXIdxClientContainer attached to a
+ * container.
+ */
+CINDEX_LINKAGE CXIdxClientContainer
+clang_index_getClientContainer(const CXIdxContainerInfo *);
+
+/**
+ * \brief For setting a custom CXIdxClientContainer attached to a
+ * container.
+ */
+CINDEX_LINKAGE void
+clang_index_setClientContainer(const CXIdxContainerInfo *,CXIdxClientContainer);
+
+/**
+ * \brief For retrieving a custom CXIdxClientEntity attached to an entity.
+ */
+CINDEX_LINKAGE CXIdxClientEntity
+clang_index_getClientEntity(const CXIdxEntityInfo *);
+
+/**
+ * \brief For setting a custom CXIdxClientEntity attached to an entity.
+ */
+CINDEX_LINKAGE void
+clang_index_setClientEntity(const CXIdxEntityInfo *, CXIdxClientEntity);
+
+/**
+ * \brief An indexing action, to be applied to one or multiple translation units
+ * but not on concurrent threads. If there are threads doing indexing
+ * concurrently, they should use different CXIndexAction objects.
+ */
+typedef void *CXIndexAction;
+
+/**
+ * \brief An indexing action, to be applied to one or multiple translation units
+ * but not on concurrent threads. If there are threads doing indexing
+ * concurrently, they should use different CXIndexAction objects.
+ *
+ * \param CIdx The index object with which the index action will be associated.
+ */
+CINDEX_LINKAGE CXIndexAction clang_IndexAction_create(CXIndex CIdx);
+
+/**
+ * \brief Destroy the given index action.
+ *
+ * The index action must not be destroyed until all of the translation units
+ * created within that index action have been destroyed.
+ */
+CINDEX_LINKAGE void clang_IndexAction_dispose(CXIndexAction);
+
+typedef enum {
+ /**
+ * \brief Used to indicate that no special indexing options are needed.
+ */
+ CXIndexOpt_None = 0x0,
+
+ /**
+ * \brief Used to indicate that \see indexEntityReference should be invoked
+ * for only one reference of an entity per source file that does not also
+ * include a declaration/definition of the entity.
+ */
+ CXIndexOpt_SuppressRedundantRefs = 0x1,
+
+ /**
+ * \brief Function-local symbols should be indexed. If this is not set
+ * function-local symbols will be ignored.
+ */
+ CXIndexOpt_IndexFunctionLocalSymbols = 0x2,
+
+ /**
+ * \brief Implicit function/class template instantiations should be indexed.
+ * If this is not set, implicit instantiations will be ignored.
+ */
+ CXIndexOpt_IndexImplicitTemplateInstantiations = 0x4,
+
+ /**
+ * \brief Suppress all compiler warnings when parsing for indexing.
+ */
+ CXIndexOpt_SuppressWarnings = 0x8
+} CXIndexOptFlags;
+
+/**
+ * \brief Index the given source file and the translation unit corresponding
+ * to that file via callbacks implemented through \see IndexerCallbacks.
+ *
+ * \param client_data pointer data supplied by the client, which will
+ * be passed to the invoked callbacks.
+ *
+ * \param index_callbacks Pointer to indexing callbacks that the client
+ * implements.
+ *
+ * \param index_callbacks_size Size of \see IndexerCallbacks structure that gets
+ * passed in index_callbacks.
+ *
+ * \param index_options A bitmask of options that affects how indexing is
+ * performed. This should be a bitwise OR of the CXIndexOpt_XXX flags.
+ *
+ * \param out_TU [out] pointer to store a CXTranslationUnit that can be reused
+ * after indexing is finished. Set to NULL if you do not require it.
+ *
+ * \returns If there is a failure from which the there is no recovery, returns
+ * non-zero, otherwise returns 0.
+ *
+ * The rest of the parameters are the same as \see clang_parseTranslationUnit.
+ */
+CINDEX_LINKAGE int clang_indexSourceFile(CXIndexAction,
+ CXClientData client_data,
+ IndexerCallbacks *index_callbacks,
+ unsigned index_callbacks_size,
+ unsigned index_options,
+ const char *source_filename,
+ const char * const *command_line_args,
+ int num_command_line_args,
+ struct CXUnsavedFile *unsaved_files,
+ unsigned num_unsaved_files,
+ CXTranslationUnit *out_TU,
+ unsigned TU_options);
+
+/**
+ * \brief Index the given translation unit via callbacks implemented through
+ * \see IndexerCallbacks.
+ *
+ * The order of callback invocations is not guaranteed to be the same as
+ * when indexing a source file. The high level order will be:
+ *
+ * -Preprocessor callbacks invocations
+ * -Declaration/reference callbacks invocations
+ * -Diagnostic callback invocations
+ *
+ * The parameters are the same as \see clang_indexSourceFile.
+ *
+ * \returns If there is a failure from which the there is no recovery, returns
+ * non-zero, otherwise returns 0.
+ */
+CINDEX_LINKAGE int clang_indexTranslationUnit(CXIndexAction,
+ CXClientData client_data,
+ IndexerCallbacks *index_callbacks,
+ unsigned index_callbacks_size,
+ unsigned index_options,
+ CXTranslationUnit);
+
+/**
+ * \brief Retrieve the CXIdxFile, file, line, column, and offset represented by
+ * the given CXIdxLoc.
+ *
+ * If the location refers into a macro expansion, retrieves the
+ * location of the macro expansion and if it refers into a macro argument
+ * retrieves the location of the argument.
+ */
+CINDEX_LINKAGE void clang_indexLoc_getFileLocation(CXIdxLoc loc,
+ CXIdxClientFile *indexFile,
+ CXFile *file,
+ unsigned *line,
+ unsigned *column,
+ unsigned *offset);
+
+/**
+ * \brief Retrieve the CXSourceLocation represented by the given CXIdxLoc.
+ */
+CINDEX_LINKAGE
+CXSourceLocation clang_indexLoc_getCXSourceLocation(CXIdxLoc loc);
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+#ifdef __cplusplus
+}
+#endif
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h
new file mode 100644
index 0000000..86a6cbb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMT.h
@@ -0,0 +1,122 @@
+//===-- ARCMT.h - ARC Migration Rewriter ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ARCMIGRATE_ARCMT_H
+#define LLVM_CLANG_ARCMIGRATE_ARCMT_H
+
+#include "clang/ARCMigrate/FileRemapper.h"
+#include "clang/Frontend/CompilerInvocation.h"
+
+namespace clang {
+ class ASTContext;
+ class DiagnosticConsumer;
+
+namespace arcmt {
+ class MigrationPass;
+
+/// \brief Creates an AST with the provided CompilerInvocation but with these
+/// changes:
+/// -if a PCH/PTH is set, the original header is used instead
+/// -Automatic Reference Counting mode is enabled
+///
+/// It then checks the AST and produces errors/warning for ARC migration issues
+/// that the user needs to handle manually.
+///
+/// \param emitPremigrationARCErrors if true all ARC errors will get emitted
+/// even if the migrator can fix them, but the function will still return false
+/// if all ARC errors can be fixed.
+///
+/// \param plistOut if non-empty, it is the file path to store the plist with
+/// the pre-migration ARC diagnostics.
+///
+/// \returns false if no error is produced, true otherwise.
+bool checkForManualIssues(CompilerInvocation &CI,
+ const FrontendInputFile &Input,
+ DiagnosticConsumer *DiagClient,
+ bool emitPremigrationARCErrors = false,
+ StringRef plistOut = StringRef());
+
+/// \brief Works similar to checkForManualIssues but instead of checking, it
+/// applies automatic modifications to source files to conform to ARC.
+///
+/// \returns false if no error is produced, true otherwise.
+bool applyTransformations(CompilerInvocation &origCI,
+ const FrontendInputFile &Input,
+ DiagnosticConsumer *DiagClient);
+
+/// \brief Applies automatic modifications and produces temporary files
+/// and metadata into the \arg outputDir path.
+///
+/// \param emitPremigrationARCErrors if true all ARC errors will get emitted
+/// even if the migrator can fix them, but the function will still return false
+/// if all ARC errors can be fixed.
+///
+/// \param plistOut if non-empty, it is the file path to store the plist with
+/// the pre-migration ARC diagnostics.
+///
+/// \returns false if no error is produced, true otherwise.
+bool migrateWithTemporaryFiles(CompilerInvocation &origCI,
+ const FrontendInputFile &Input,
+ DiagnosticConsumer *DiagClient,
+ StringRef outputDir,
+ bool emitPremigrationARCErrors,
+ StringRef plistOut);
+
+/// \brief Get the set of file remappings from the \arg outputDir path that
+/// migrateWithTemporaryFiles produced.
+///
+/// \returns false if no error is produced, true otherwise.
+bool getFileRemappings(std::vector<std::pair<std::string,std::string> > &remap,
+ StringRef outputDir,
+ DiagnosticConsumer *DiagClient);
+
+/// \brief Get the set of file remappings from a list of files with remapping
+/// info.
+///
+/// \returns false if no error is produced, true otherwise.
+bool getFileRemappingsFromFileList(
+ std::vector<std::pair<std::string,std::string> > &remap,
+ ArrayRef<StringRef> remapFiles,
+ DiagnosticConsumer *DiagClient);
+
+typedef void (*TransformFn)(MigrationPass &pass);
+
+std::vector<TransformFn> getAllTransformations(LangOptions::GCMode OrigGCMode,
+ bool NoFinalizeRemoval);
+
+class MigrationProcess {
+ CompilerInvocation OrigCI;
+ DiagnosticConsumer *DiagClient;
+ FileRemapper Remapper;
+
+public:
+ MigrationProcess(const CompilerInvocation &CI, DiagnosticConsumer *diagClient,
+ StringRef outputDir = StringRef());
+
+ class RewriteListener {
+ public:
+ virtual ~RewriteListener();
+
+ virtual void start(ASTContext &Ctx) { }
+ virtual void finish() { }
+
+ virtual void insert(SourceLocation loc, StringRef text) { }
+ virtual void remove(CharSourceRange range) { }
+ };
+
+ bool applyTransform(TransformFn trans, RewriteListener *listener = 0);
+
+ FileRemapper &getRemapper() { return Remapper; }
+};
+
+} // end namespace arcmt
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h
new file mode 100644
index 0000000..e075252
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/ARCMigrate/ARCMTActions.h
@@ -0,0 +1,77 @@
+//===--- ARCMTActions.h - ARC Migrate Tool Frontend Actions -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ARCMIGRATE_ARCMT_ACTION_H
+#define LLVM_CLANG_ARCMIGRATE_ARCMT_ACTION_H
+
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/ARCMigrate/FileRemapper.h"
+#include "llvm/ADT/OwningPtr.h"
+
+namespace clang {
+namespace arcmt {
+
+class CheckAction : public WrapperFrontendAction {
+protected:
+ virtual bool BeginInvocation(CompilerInstance &CI);
+
+public:
+ CheckAction(FrontendAction *WrappedAction);
+};
+
+class ModifyAction : public WrapperFrontendAction {
+protected:
+ virtual bool BeginInvocation(CompilerInstance &CI);
+
+public:
+ ModifyAction(FrontendAction *WrappedAction);
+};
+
+class MigrateSourceAction : public ASTFrontendAction {
+ FileRemapper Remapper;
+protected:
+ virtual bool BeginInvocation(CompilerInstance &CI);
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+class MigrateAction : public WrapperFrontendAction {
+ std::string MigrateDir;
+ std::string PlistOut;
+ bool EmitPremigrationARCErros;
+protected:
+ virtual bool BeginInvocation(CompilerInstance &CI);
+
+public:
+ MigrateAction(FrontendAction *WrappedAction, StringRef migrateDir,
+ StringRef plistOut,
+ bool emitPremigrationARCErrors);
+};
+
+/// \brief Migrates to modern ObjC syntax.
+class ObjCMigrateAction : public WrapperFrontendAction {
+ std::string MigrateDir;
+ bool MigrateLiterals;
+ bool MigrateSubscripting;
+ FileRemapper Remapper;
+ CompilerInstance *CompInst;
+public:
+ ObjCMigrateAction(FrontendAction *WrappedAction, StringRef migrateDir,
+ bool migrateLiterals,
+ bool migrateSubscripting);
+
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,StringRef InFile);
+ virtual bool BeginInvocation(CompilerInstance &CI);
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h b/contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h
new file mode 100644
index 0000000..fe7cfad
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/ARCMigrate/FileRemapper.h
@@ -0,0 +1,80 @@
+//===-- FileRemapper.h - File Remapping Helper ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ARCMIGRATE_FILEREMAPPER_H
+#define LLVM_CLANG_ARCMIGRATE_FILEREMAPPER_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+ class MemoryBuffer;
+}
+
+namespace clang {
+ class FileManager;
+ class FileEntry;
+ class DiagnosticsEngine;
+ class PreprocessorOptions;
+
+namespace arcmt {
+
+class FileRemapper {
+ // FIXME: Reuse the same FileManager for multiple ASTContexts.
+ OwningPtr<FileManager> FileMgr;
+
+ typedef llvm::PointerUnion<const FileEntry *, llvm::MemoryBuffer *> Target;
+ typedef llvm::DenseMap<const FileEntry *, Target> MappingsTy;
+ MappingsTy FromToMappings;
+
+ llvm::DenseMap<const FileEntry *, const FileEntry *> ToFromMappings;
+
+public:
+ FileRemapper();
+ ~FileRemapper();
+
+ bool initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
+ bool ignoreIfFilesChanged);
+ bool initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
+ bool ignoreIfFilesChanged);
+ bool flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag);
+ bool flushToFile(StringRef outputPath, DiagnosticsEngine &Diag);
+
+ bool overwriteOriginal(DiagnosticsEngine &Diag,
+ StringRef outputDir = StringRef());
+
+ void remap(StringRef filePath, llvm::MemoryBuffer *memBuf);
+ void remap(StringRef filePath, StringRef newPath);
+
+ void applyMappings(PreprocessorOptions &PPOpts) const;
+
+ void transferMappingsAndClear(PreprocessorOptions &PPOpts);
+
+ void clear(StringRef outputDir = StringRef());
+
+private:
+ void remap(const FileEntry *file, llvm::MemoryBuffer *memBuf);
+ void remap(const FileEntry *file, const FileEntry *newfile);
+
+ const FileEntry *getOriginalFile(StringRef filePath);
+ void resetTarget(Target &targ);
+
+ bool report(const Twine &err, DiagnosticsEngine &Diag);
+
+ std::string getRemapInfoFile(StringRef outputDir);
+};
+
+} // end namespace arcmt
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/APValue.h b/contrib/llvm/tools/clang/include/clang/AST/APValue.h
new file mode 100644
index 0000000..1b6e90c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/APValue.h
@@ -0,0 +1,446 @@
+//===--- APValue.h - Union class for APFloat/APSInt/Complex -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the APValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_APVALUE_H
+#define LLVM_CLANG_AST_APVALUE_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+
+namespace clang {
+ class AddrLabelExpr;
+ class ASTContext;
+ class CharUnits;
+ class DiagnosticBuilder;
+ class Expr;
+ class FieldDecl;
+ class Decl;
+ class ValueDecl;
+ class CXXRecordDecl;
+ class QualType;
+
+/// APValue - This class implements a discriminated union of [uninitialized]
+/// [APSInt] [APFloat], [Complex APSInt] [Complex APFloat], [Expr + Offset],
+/// [Vector: N * APValue], [Array: N * APValue]
+class APValue {
+ typedef llvm::APSInt APSInt;
+ typedef llvm::APFloat APFloat;
+public:
+ enum ValueKind {
+ Uninitialized,
+ Int,
+ Float,
+ ComplexInt,
+ ComplexFloat,
+ LValue,
+ Vector,
+ Array,
+ Struct,
+ Union,
+ MemberPointer,
+ AddrLabelDiff
+ };
+ typedef llvm::PointerUnion<const ValueDecl *, const Expr *> LValueBase;
+ typedef llvm::PointerIntPair<const Decl *, 1, bool> BaseOrMemberType;
+ union LValuePathEntry {
+ /// BaseOrMember - The FieldDecl or CXXRecordDecl indicating the next item
+ /// in the path. An opaque value of type BaseOrMemberType.
+ void *BaseOrMember;
+ /// ArrayIndex - The array index of the next item in the path.
+ uint64_t ArrayIndex;
+ };
+ struct NoLValuePath {};
+ struct UninitArray {};
+ struct UninitStruct {};
+private:
+ ValueKind Kind;
+
+ struct ComplexAPSInt {
+ APSInt Real, Imag;
+ ComplexAPSInt() : Real(1), Imag(1) {}
+ };
+ struct ComplexAPFloat {
+ APFloat Real, Imag;
+ ComplexAPFloat() : Real(0.0), Imag(0.0) {}
+ };
+ struct LV;
+ struct Vec {
+ APValue *Elts;
+ unsigned NumElts;
+ Vec() : Elts(0), NumElts(0) {}
+ ~Vec() { delete[] Elts; }
+ };
+ struct Arr {
+ APValue *Elts;
+ unsigned NumElts, ArrSize;
+ Arr(unsigned NumElts, unsigned ArrSize);
+ ~Arr();
+ };
+ struct StructData {
+ APValue *Elts;
+ unsigned NumBases;
+ unsigned NumFields;
+ StructData(unsigned NumBases, unsigned NumFields);
+ ~StructData();
+ };
+ struct UnionData {
+ const FieldDecl *Field;
+ APValue *Value;
+ UnionData();
+ ~UnionData();
+ };
+ struct AddrLabelDiffData {
+ const AddrLabelExpr* LHSExpr;
+ const AddrLabelExpr* RHSExpr;
+ };
+ struct MemberPointerData;
+
+ enum {
+ MaxSize = (sizeof(ComplexAPSInt) > sizeof(ComplexAPFloat) ?
+ sizeof(ComplexAPSInt) : sizeof(ComplexAPFloat))
+ };
+
+ union {
+ void *Aligner;
+ char Data[MaxSize];
+ };
+
+public:
+ APValue() : Kind(Uninitialized) {}
+ explicit APValue(const APSInt &I) : Kind(Uninitialized) {
+ MakeInt(); setInt(I);
+ }
+ explicit APValue(const APFloat &F) : Kind(Uninitialized) {
+ MakeFloat(); setFloat(F);
+ }
+ explicit APValue(const APValue *E, unsigned N) : Kind(Uninitialized) {
+ MakeVector(); setVector(E, N);
+ }
+ APValue(const APSInt &R, const APSInt &I) : Kind(Uninitialized) {
+ MakeComplexInt(); setComplexInt(R, I);
+ }
+ APValue(const APFloat &R, const APFloat &I) : Kind(Uninitialized) {
+ MakeComplexFloat(); setComplexFloat(R, I);
+ }
+ APValue(const APValue &RHS);
+ APValue(LValueBase B, const CharUnits &O, NoLValuePath N, unsigned CallIndex)
+ : Kind(Uninitialized) {
+ MakeLValue(); setLValue(B, O, N, CallIndex);
+ }
+ APValue(LValueBase B, const CharUnits &O, ArrayRef<LValuePathEntry> Path,
+ bool OnePastTheEnd, unsigned CallIndex)
+ : Kind(Uninitialized) {
+ MakeLValue(); setLValue(B, O, Path, OnePastTheEnd, CallIndex);
+ }
+ APValue(UninitArray, unsigned InitElts, unsigned Size) : Kind(Uninitialized) {
+ MakeArray(InitElts, Size);
+ }
+ APValue(UninitStruct, unsigned B, unsigned M) : Kind(Uninitialized) {
+ MakeStruct(B, M);
+ }
+ explicit APValue(const FieldDecl *D, const APValue &V = APValue())
+ : Kind(Uninitialized) {
+ MakeUnion(); setUnion(D, V);
+ }
+ APValue(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl*> Path) : Kind(Uninitialized) {
+ MakeMemberPointer(Member, IsDerivedMember, Path);
+ }
+ APValue(const AddrLabelExpr* LHSExpr, const AddrLabelExpr* RHSExpr)
+ : Kind(Uninitialized) {
+ MakeAddrLabelDiff(); setAddrLabelDiff(LHSExpr, RHSExpr);
+ }
+
+ ~APValue() {
+ MakeUninit();
+ }
+
+ /// \brief Swaps the contents of this and the given APValue.
+ void swap(APValue &RHS);
+
+ ValueKind getKind() const { return Kind; }
+ bool isUninit() const { return Kind == Uninitialized; }
+ bool isInt() const { return Kind == Int; }
+ bool isFloat() const { return Kind == Float; }
+ bool isComplexInt() const { return Kind == ComplexInt; }
+ bool isComplexFloat() const { return Kind == ComplexFloat; }
+ bool isLValue() const { return Kind == LValue; }
+ bool isVector() const { return Kind == Vector; }
+ bool isArray() const { return Kind == Array; }
+ bool isStruct() const { return Kind == Struct; }
+ bool isUnion() const { return Kind == Union; }
+ bool isMemberPointer() const { return Kind == MemberPointer; }
+ bool isAddrLabelDiff() const { return Kind == AddrLabelDiff; }
+
+ void dump() const;
+ void dump(raw_ostream &OS) const;
+
+ void printPretty(raw_ostream &OS, ASTContext &Ctx, QualType Ty) const;
+ std::string getAsString(ASTContext &Ctx, QualType Ty) const;
+
+ APSInt &getInt() {
+ assert(isInt() && "Invalid accessor");
+ return *(APSInt*)(char*)Data;
+ }
+ const APSInt &getInt() const {
+ return const_cast<APValue*>(this)->getInt();
+ }
+
+ APFloat &getFloat() {
+ assert(isFloat() && "Invalid accessor");
+ return *(APFloat*)(char*)Data;
+ }
+ const APFloat &getFloat() const {
+ return const_cast<APValue*>(this)->getFloat();
+ }
+
+ APSInt &getComplexIntReal() {
+ assert(isComplexInt() && "Invalid accessor");
+ return ((ComplexAPSInt*)(char*)Data)->Real;
+ }
+ const APSInt &getComplexIntReal() const {
+ return const_cast<APValue*>(this)->getComplexIntReal();
+ }
+
+ APSInt &getComplexIntImag() {
+ assert(isComplexInt() && "Invalid accessor");
+ return ((ComplexAPSInt*)(char*)Data)->Imag;
+ }
+ const APSInt &getComplexIntImag() const {
+ return const_cast<APValue*>(this)->getComplexIntImag();
+ }
+
+ APFloat &getComplexFloatReal() {
+ assert(isComplexFloat() && "Invalid accessor");
+ return ((ComplexAPFloat*)(char*)Data)->Real;
+ }
+ const APFloat &getComplexFloatReal() const {
+ return const_cast<APValue*>(this)->getComplexFloatReal();
+ }
+
+ APFloat &getComplexFloatImag() {
+ assert(isComplexFloat() && "Invalid accessor");
+ return ((ComplexAPFloat*)(char*)Data)->Imag;
+ }
+ const APFloat &getComplexFloatImag() const {
+ return const_cast<APValue*>(this)->getComplexFloatImag();
+ }
+
+ const LValueBase getLValueBase() const;
+ CharUnits &getLValueOffset();
+ const CharUnits &getLValueOffset() const {
+ return const_cast<APValue*>(this)->getLValueOffset();
+ }
+ bool isLValueOnePastTheEnd() const;
+ bool hasLValuePath() const;
+ ArrayRef<LValuePathEntry> getLValuePath() const;
+ unsigned getLValueCallIndex() const;
+
+ APValue &getVectorElt(unsigned I) {
+ assert(isVector() && "Invalid accessor");
+ assert(I < getVectorLength() && "Index out of range");
+ return ((Vec*)(char*)Data)->Elts[I];
+ }
+ const APValue &getVectorElt(unsigned I) const {
+ return const_cast<APValue*>(this)->getVectorElt(I);
+ }
+ unsigned getVectorLength() const {
+ assert(isVector() && "Invalid accessor");
+ return ((const Vec*)(const void *)Data)->NumElts;
+ }
+
+ APValue &getArrayInitializedElt(unsigned I) {
+ assert(isArray() && "Invalid accessor");
+ assert(I < getArrayInitializedElts() && "Index out of range");
+ return ((Arr*)(char*)Data)->Elts[I];
+ }
+ const APValue &getArrayInitializedElt(unsigned I) const {
+ return const_cast<APValue*>(this)->getArrayInitializedElt(I);
+ }
+ bool hasArrayFiller() const {
+ return getArrayInitializedElts() != getArraySize();
+ }
+ APValue &getArrayFiller() {
+ assert(isArray() && "Invalid accessor");
+ assert(hasArrayFiller() && "No array filler");
+ return ((Arr*)(char*)Data)->Elts[getArrayInitializedElts()];
+ }
+ const APValue &getArrayFiller() const {
+ return const_cast<APValue*>(this)->getArrayFiller();
+ }
+ unsigned getArrayInitializedElts() const {
+ assert(isArray() && "Invalid accessor");
+ return ((const Arr*)(const void *)Data)->NumElts;
+ }
+ unsigned getArraySize() const {
+ assert(isArray() && "Invalid accessor");
+ return ((const Arr*)(const void *)Data)->ArrSize;
+ }
+
+ unsigned getStructNumBases() const {
+ assert(isStruct() && "Invalid accessor");
+ return ((const StructData*)(const char*)Data)->NumBases;
+ }
+ unsigned getStructNumFields() const {
+ assert(isStruct() && "Invalid accessor");
+ return ((const StructData*)(const char*)Data)->NumFields;
+ }
+ APValue &getStructBase(unsigned i) {
+ assert(isStruct() && "Invalid accessor");
+ return ((StructData*)(char*)Data)->Elts[i];
+ }
+ APValue &getStructField(unsigned i) {
+ assert(isStruct() && "Invalid accessor");
+ return ((StructData*)(char*)Data)->Elts[getStructNumBases() + i];
+ }
+ const APValue &getStructBase(unsigned i) const {
+ return const_cast<APValue*>(this)->getStructBase(i);
+ }
+ const APValue &getStructField(unsigned i) const {
+ return const_cast<APValue*>(this)->getStructField(i);
+ }
+
+ const FieldDecl *getUnionField() const {
+ assert(isUnion() && "Invalid accessor");
+ return ((const UnionData*)(const char*)Data)->Field;
+ }
+ APValue &getUnionValue() {
+ assert(isUnion() && "Invalid accessor");
+ return *((UnionData*)(char*)Data)->Value;
+ }
+ const APValue &getUnionValue() const {
+ return const_cast<APValue*>(this)->getUnionValue();
+ }
+
+ const ValueDecl *getMemberPointerDecl() const;
+ bool isMemberPointerToDerivedMember() const;
+ ArrayRef<const CXXRecordDecl*> getMemberPointerPath() const;
+
+ const AddrLabelExpr* getAddrLabelDiffLHS() const {
+ assert(isAddrLabelDiff() && "Invalid accessor");
+ return ((const AddrLabelDiffData*)(const char*)Data)->LHSExpr;
+ }
+ const AddrLabelExpr* getAddrLabelDiffRHS() const {
+ assert(isAddrLabelDiff() && "Invalid accessor");
+ return ((const AddrLabelDiffData*)(const char*)Data)->RHSExpr;
+ }
+
+ void setInt(const APSInt &I) {
+ assert(isInt() && "Invalid accessor");
+ *(APSInt*)(char*)Data = I;
+ }
+ void setFloat(const APFloat &F) {
+ assert(isFloat() && "Invalid accessor");
+ *(APFloat*)(char*)Data = F;
+ }
+ void setVector(const APValue *E, unsigned N) {
+ assert(isVector() && "Invalid accessor");
+ ((Vec*)(char*)Data)->Elts = new APValue[N];
+ ((Vec*)(char*)Data)->NumElts = N;
+ for (unsigned i = 0; i != N; ++i)
+ ((Vec*)(char*)Data)->Elts[i] = E[i];
+ }
+ void setComplexInt(const APSInt &R, const APSInt &I) {
+ assert(R.getBitWidth() == I.getBitWidth() &&
+ "Invalid complex int (type mismatch).");
+ assert(isComplexInt() && "Invalid accessor");
+ ((ComplexAPSInt*)(char*)Data)->Real = R;
+ ((ComplexAPSInt*)(char*)Data)->Imag = I;
+ }
+ void setComplexFloat(const APFloat &R, const APFloat &I) {
+ assert(&R.getSemantics() == &I.getSemantics() &&
+ "Invalid complex float (type mismatch).");
+ assert(isComplexFloat() && "Invalid accessor");
+ ((ComplexAPFloat*)(char*)Data)->Real = R;
+ ((ComplexAPFloat*)(char*)Data)->Imag = I;
+ }
+ void setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
+ unsigned CallIndex);
+ void setLValue(LValueBase B, const CharUnits &O,
+ ArrayRef<LValuePathEntry> Path, bool OnePastTheEnd,
+ unsigned CallIndex);
+ void setUnion(const FieldDecl *Field, const APValue &Value) {
+ assert(isUnion() && "Invalid accessor");
+ ((UnionData*)(char*)Data)->Field = Field;
+ *((UnionData*)(char*)Data)->Value = Value;
+ }
+ void setAddrLabelDiff(const AddrLabelExpr* LHSExpr,
+ const AddrLabelExpr* RHSExpr) {
+ ((AddrLabelDiffData*)(char*)Data)->LHSExpr = LHSExpr;
+ ((AddrLabelDiffData*)(char*)Data)->RHSExpr = RHSExpr;
+ }
+
+ /// Assign by swapping from a copy of the RHS.
+ APValue &operator=(APValue RHS) {
+ swap(RHS);
+ return *this;
+ }
+
+private:
+ void DestroyDataAndMakeUninit();
+ void MakeUninit() {
+ if (Kind != Uninitialized)
+ DestroyDataAndMakeUninit();
+ }
+ void MakeInt() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)Data) APSInt(1);
+ Kind = Int;
+ }
+ void MakeFloat() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) APFloat(0.0);
+ Kind = Float;
+ }
+ void MakeVector() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) Vec();
+ Kind = Vector;
+ }
+ void MakeComplexInt() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) ComplexAPSInt();
+ Kind = ComplexInt;
+ }
+ void MakeComplexFloat() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) ComplexAPFloat();
+ Kind = ComplexFloat;
+ }
+ void MakeLValue();
+ void MakeArray(unsigned InitElts, unsigned Size);
+ void MakeStruct(unsigned B, unsigned M) {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) StructData(B, M);
+ Kind = Struct;
+ }
+ void MakeUnion() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) UnionData();
+ Kind = Union;
+ }
+ void MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl*> Path);
+ void MakeAddrLabelDiff() {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) AddrLabelDiffData();
+ Kind = AddrLabelDiff;
+ }
+};
+
+} // end namespace clang.
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/AST.h b/contrib/llvm/tools/clang/include/clang/AST/AST.h
new file mode 100644
index 0000000..164c5fb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/AST.h
@@ -0,0 +1,28 @@
+//===--- AST.h - "Umbrella" header for AST library --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface to the AST classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_AST_H
+#define LLVM_CLANG_AST_AST_H
+
+// This header exports all AST interfaces.
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/StmtVisitor.h"
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h b/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
new file mode 100644
index 0000000..69a3866
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTConsumer.h
@@ -0,0 +1,128 @@
+//===--- ASTConsumer.h - Abstract interface for reading ASTs ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTConsumer class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_ASTCONSUMER_H
+#define LLVM_CLANG_AST_ASTCONSUMER_H
+
+namespace clang {
+ class ASTContext;
+ class CXXRecordDecl;
+ class DeclGroupRef;
+ class HandleTagDeclDefinition;
+ class ASTMutationListener;
+ class ASTDeserializationListener; // layering violation because void* is ugly
+ class SemaConsumer; // layering violation required for safe SemaConsumer
+ class TagDecl;
+ class VarDecl;
+ class FunctionDecl;
+
+/// ASTConsumer - This is an abstract interface that should be implemented by
+/// clients that read ASTs. This abstraction layer allows the client to be
+/// independent of the AST producer (e.g. parser vs AST dump file reader, etc).
+class ASTConsumer {
+ /// \brief Whether this AST consumer also requires information about
+ /// semantic analysis.
+ bool SemaConsumer;
+
+ friend class SemaConsumer;
+
+public:
+ ASTConsumer() : SemaConsumer(false) { }
+
+ virtual ~ASTConsumer() {}
+
+ /// Initialize - This is called to initialize the consumer, providing the
+ /// ASTContext.
+ virtual void Initialize(ASTContext &Context) {}
+
+ /// HandleTopLevelDecl - Handle the specified top-level declaration. This is
+ /// called by the parser to process every top-level Decl*. Note that D can be
+ /// the head of a chain of Decls (e.g. for `int a, b` the chain will have two
+ /// elements). Use Decl::getNextDeclarator() to walk the chain.
+ ///
+ /// \returns true to continue parsing, or false to abort parsing.
+ virtual bool HandleTopLevelDecl(DeclGroupRef D);
+
+ /// HandleInterestingDecl - Handle the specified interesting declaration. This
+ /// is called by the AST reader when deserializing things that might interest
+ /// the consumer. The default implementation forwards to HandleTopLevelDecl.
+ virtual void HandleInterestingDecl(DeclGroupRef D);
+
+ /// HandleTranslationUnit - This method is called when the ASTs for entire
+ /// translation unit have been parsed.
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {}
+
+ /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+ /// (e.g. struct, union, enum, class) is completed. This allows the client to
+ /// hack on the type, which can occur at any point in the file (because these
+ /// can be defined in declspecs).
+ virtual void HandleTagDeclDefinition(TagDecl *D) {}
+
+ /// \brief Invoked when a function is implicitly instantiated.
+ /// Note that at this point point it does not have a body, its body is
+ /// instantiated at the end of the translation unit and passed to
+ /// HandleTopLevelDecl.
+ virtual void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D) {}
+
+ /// \brief Handle the specified top-level declaration that occurred inside
+ /// and ObjC container.
+ /// The default implementation ignored them.
+ virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
+
+ /// CompleteTentativeDefinition - Callback invoked at the end of a translation
+ /// unit to notify the consumer that the given tentative definition should be
+ /// completed.
+ ///
+ /// The variable declaration itself will be a tentative
+ /// definition. If it had an incomplete array type, its type will
+ /// have already been changed to an array of size 1. However, the
+ /// declaration remains a tentative definition and has not been
+ /// modified by the introduction of an implicit zero initializer.
+ virtual void CompleteTentativeDefinition(VarDecl *D) {}
+
+ /// HandleCXXStaticMemberVarInstantiation - Tell the consumer that this
+ // variable has been instantiated.
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *D) {}
+
+ /// \brief Callback involved at the end of a translation unit to
+ /// notify the consumer that a vtable for the given C++ class is
+ /// required.
+ ///
+ /// \param RD The class whose vtable was used.
+ ///
+ /// \param DefinitionRequired Whether a definition of this vtable is
+ /// required in this translation unit; otherwise, it is only needed if
+ /// it was actually used.
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {}
+
+ /// \brief If the consumer is interested in entities getting modified after
+ /// their initial creation, it should return a pointer to
+ /// an ASTMutationListener here.
+ virtual ASTMutationListener *GetASTMutationListener() { return 0; }
+
+ /// \brief If the consumer is interested in entities being deserialized from
+ /// AST files, it should return a pointer to a ASTDeserializationListener here
+ virtual ASTDeserializationListener *GetASTDeserializationListener() {
+ return 0;
+ }
+
+ /// PrintStats - If desired, print any statistics.
+ virtual void PrintStats() {}
+
+ // Support isa/cast/dyn_cast
+ static bool classof(const ASTConsumer *) { return true; }
+};
+
+} // end namespace clang.
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
new file mode 100644
index 0000000..96e41c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTContext.h
@@ -0,0 +1,1998 @@
+//===--- ASTContext.h - Context to hold long-lived AST nodes ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTContext interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_ASTCONTEXT_H
+#define LLVM_CLANG_AST_ASTCONTEXT_H
+
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/VersionTuple.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/LambdaMangleContext.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/Support/Allocator.h"
+#include <vector>
+
+namespace llvm {
+ struct fltSemantics;
+}
+
+namespace clang {
+ class FileManager;
+ class ASTRecordLayout;
+ class BlockExpr;
+ class CharUnits;
+ class DiagnosticsEngine;
+ class Expr;
+ class ExternalASTSource;
+ class ASTMutationListener;
+ class IdentifierTable;
+ class SelectorTable;
+ class SourceManager;
+ class TargetInfo;
+ class CXXABI;
+ // Decls
+ class DeclContext;
+ class CXXConversionDecl;
+ class CXXMethodDecl;
+ class CXXRecordDecl;
+ class Decl;
+ class FieldDecl;
+ class MangleContext;
+ class ObjCIvarDecl;
+ class ObjCIvarRefExpr;
+ class ObjCPropertyDecl;
+ class ParmVarDecl;
+ class RecordDecl;
+ class StoredDeclsMap;
+ class TagDecl;
+ class TemplateTemplateParmDecl;
+ class TemplateTypeParmDecl;
+ class TranslationUnitDecl;
+ class TypeDecl;
+ class TypedefNameDecl;
+ class UsingDecl;
+ class UsingShadowDecl;
+ class UnresolvedSetIterator;
+
+ namespace Builtin { class Context; }
+
+/// ASTContext - This class holds long-lived AST nodes (such as types and
+/// decls) that can be referred to throughout the semantic analysis of a file.
+class ASTContext : public RefCountedBase<ASTContext> {
+ ASTContext &this_() { return *this; }
+
+ mutable std::vector<Type*> Types;
+ mutable llvm::FoldingSet<ExtQuals> ExtQualNodes;
+ mutable llvm::FoldingSet<ComplexType> ComplexTypes;
+ mutable llvm::FoldingSet<PointerType> PointerTypes;
+ mutable llvm::FoldingSet<BlockPointerType> BlockPointerTypes;
+ mutable llvm::FoldingSet<LValueReferenceType> LValueReferenceTypes;
+ mutable llvm::FoldingSet<RValueReferenceType> RValueReferenceTypes;
+ mutable llvm::FoldingSet<MemberPointerType> MemberPointerTypes;
+ mutable llvm::FoldingSet<ConstantArrayType> ConstantArrayTypes;
+ mutable llvm::FoldingSet<IncompleteArrayType> IncompleteArrayTypes;
+ mutable std::vector<VariableArrayType*> VariableArrayTypes;
+ mutable llvm::FoldingSet<DependentSizedArrayType> DependentSizedArrayTypes;
+ mutable llvm::FoldingSet<DependentSizedExtVectorType>
+ DependentSizedExtVectorTypes;
+ mutable llvm::FoldingSet<VectorType> VectorTypes;
+ mutable llvm::FoldingSet<FunctionNoProtoType> FunctionNoProtoTypes;
+ mutable llvm::ContextualFoldingSet<FunctionProtoType, ASTContext&>
+ FunctionProtoTypes;
+ mutable llvm::FoldingSet<DependentTypeOfExprType> DependentTypeOfExprTypes;
+ mutable llvm::FoldingSet<DependentDecltypeType> DependentDecltypeTypes;
+ mutable llvm::FoldingSet<TemplateTypeParmType> TemplateTypeParmTypes;
+ mutable llvm::FoldingSet<SubstTemplateTypeParmType>
+ SubstTemplateTypeParmTypes;
+ mutable llvm::FoldingSet<SubstTemplateTypeParmPackType>
+ SubstTemplateTypeParmPackTypes;
+ mutable llvm::ContextualFoldingSet<TemplateSpecializationType, ASTContext&>
+ TemplateSpecializationTypes;
+ mutable llvm::FoldingSet<ParenType> ParenTypes;
+ mutable llvm::FoldingSet<ElaboratedType> ElaboratedTypes;
+ mutable llvm::FoldingSet<DependentNameType> DependentNameTypes;
+ mutable llvm::ContextualFoldingSet<DependentTemplateSpecializationType,
+ ASTContext&>
+ DependentTemplateSpecializationTypes;
+ llvm::FoldingSet<PackExpansionType> PackExpansionTypes;
+ mutable llvm::FoldingSet<ObjCObjectTypeImpl> ObjCObjectTypes;
+ mutable llvm::FoldingSet<ObjCObjectPointerType> ObjCObjectPointerTypes;
+ mutable llvm::FoldingSet<AutoType> AutoTypes;
+ mutable llvm::FoldingSet<AtomicType> AtomicTypes;
+ llvm::FoldingSet<AttributedType> AttributedTypes;
+
+ mutable llvm::FoldingSet<QualifiedTemplateName> QualifiedTemplateNames;
+ mutable llvm::FoldingSet<DependentTemplateName> DependentTemplateNames;
+ mutable llvm::FoldingSet<SubstTemplateTemplateParmStorage>
+ SubstTemplateTemplateParms;
+ mutable llvm::ContextualFoldingSet<SubstTemplateTemplateParmPackStorage,
+ ASTContext&>
+ SubstTemplateTemplateParmPacks;
+
+ /// \brief The set of nested name specifiers.
+ ///
+ /// This set is managed by the NestedNameSpecifier class.
+ mutable llvm::FoldingSet<NestedNameSpecifier> NestedNameSpecifiers;
+ mutable NestedNameSpecifier *GlobalNestedNameSpecifier;
+ friend class NestedNameSpecifier;
+
+ /// ASTRecordLayouts - A cache mapping from RecordDecls to ASTRecordLayouts.
+ /// This is lazily created. This is intentionally not serialized.
+ mutable llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>
+ ASTRecordLayouts;
+ mutable llvm::DenseMap<const ObjCContainerDecl*, const ASTRecordLayout*>
+ ObjCLayouts;
+
+ /// TypeInfoMap - A cache from types to size and alignment information.
+ typedef llvm::DenseMap<const Type*,
+ std::pair<uint64_t, unsigned> > TypeInfoMap;
+ mutable TypeInfoMap MemoizedTypeInfo;
+
+ /// KeyFunctions - A cache mapping from CXXRecordDecls to key functions.
+ llvm::DenseMap<const CXXRecordDecl*, const CXXMethodDecl*> KeyFunctions;
+
+ /// \brief Mapping from ObjCContainers to their ObjCImplementations.
+ llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*> ObjCImpls;
+
+ /// \brief Mapping from ObjCMethod to its duplicate declaration in the same
+ /// interface.
+ llvm::DenseMap<const ObjCMethodDecl*,const ObjCMethodDecl*> ObjCMethodRedecls;
+
+ /// \brief Mapping from __block VarDecls to their copy initialization expr.
+ llvm::DenseMap<const VarDecl*, Expr*> BlockVarCopyInits;
+
+ /// \brief Mapping from class scope functions specialization to their
+ /// template patterns.
+ llvm::DenseMap<const FunctionDecl*, FunctionDecl*>
+ ClassScopeSpecializationPattern;
+
+ /// \brief Representation of a "canonical" template template parameter that
+ /// is used in canonical template names.
+ class CanonicalTemplateTemplateParm : public llvm::FoldingSetNode {
+ TemplateTemplateParmDecl *Parm;
+
+ public:
+ CanonicalTemplateTemplateParm(TemplateTemplateParmDecl *Parm)
+ : Parm(Parm) { }
+
+ TemplateTemplateParmDecl *getParam() const { return Parm; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) { Profile(ID, Parm); }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *Parm);
+ };
+ mutable llvm::FoldingSet<CanonicalTemplateTemplateParm>
+ CanonTemplateTemplateParms;
+
+ TemplateTemplateParmDecl *
+ getCanonicalTemplateTemplateParmDecl(TemplateTemplateParmDecl *TTP) const;
+
+ /// \brief The typedef for the __int128_t type.
+ mutable TypedefDecl *Int128Decl;
+
+ /// \brief The typedef for the __uint128_t type.
+ mutable TypedefDecl *UInt128Decl;
+
+ /// BuiltinVaListType - built-in va list type.
+ /// This is initially null and set by Sema::LazilyCreateBuiltin when
+ /// a builtin that takes a valist is encountered.
+ QualType BuiltinVaListType;
+
+ /// \brief The typedef for the predefined 'id' type.
+ mutable TypedefDecl *ObjCIdDecl;
+
+ /// \brief The typedef for the predefined 'SEL' type.
+ mutable TypedefDecl *ObjCSelDecl;
+
+ /// \brief The typedef for the predefined 'Class' type.
+ mutable TypedefDecl *ObjCClassDecl;
+
+ /// \brief The typedef for the predefined 'Protocol' class in Objective-C.
+ mutable ObjCInterfaceDecl *ObjCProtocolClassDecl;
+
+ // Typedefs which may be provided defining the structure of Objective-C
+ // pseudo-builtins
+ QualType ObjCIdRedefinitionType;
+ QualType ObjCClassRedefinitionType;
+ QualType ObjCSelRedefinitionType;
+
+ QualType ObjCConstantStringType;
+ mutable RecordDecl *CFConstantStringTypeDecl;
+
+ QualType ObjCNSStringType;
+
+ /// \brief The typedef declaration for the Objective-C "instancetype" type.
+ TypedefDecl *ObjCInstanceTypeDecl;
+
+ /// \brief The type for the C FILE type.
+ TypeDecl *FILEDecl;
+
+ /// \brief The type for the C jmp_buf type.
+ TypeDecl *jmp_bufDecl;
+
+ /// \brief The type for the C sigjmp_buf type.
+ TypeDecl *sigjmp_bufDecl;
+
+ /// \brief The type for the C ucontext_t type.
+ TypeDecl *ucontext_tDecl;
+
+ /// \brief Type for the Block descriptor for Blocks CodeGen.
+ ///
+ /// Since this is only used for generation of debug info, it is not
+ /// serialized.
+ mutable RecordDecl *BlockDescriptorType;
+
+ /// \brief Type for the Block descriptor for Blocks CodeGen.
+ ///
+ /// Since this is only used for generation of debug info, it is not
+ /// serialized.
+ mutable RecordDecl *BlockDescriptorExtendedType;
+
+ /// \brief Declaration for the CUDA cudaConfigureCall function.
+ FunctionDecl *cudaConfigureCallDecl;
+
+ TypeSourceInfo NullTypeSourceInfo;
+
+ /// \brief Keeps track of all declaration attributes.
+ ///
+ /// Since so few decls have attrs, we keep them in a hash map instead of
+ /// wasting space in the Decl class.
+ llvm::DenseMap<const Decl*, AttrVec*> DeclAttrs;
+
+ /// \brief Keeps track of the static data member templates from which
+ /// static data members of class template specializations were instantiated.
+ ///
+ /// This data structure stores the mapping from instantiations of static
+ /// data members to the static data member representations within the
+ /// class template from which they were instantiated along with the kind
+ /// of instantiation or specialization (a TemplateSpecializationKind - 1).
+ ///
+ /// Given the following example:
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct X {
+ /// static T value;
+ /// };
+ ///
+ /// template<typename T>
+ /// T X<T>::value = T(17);
+ ///
+ /// int *x = &X<int>::value;
+ /// \endcode
+ ///
+ /// This mapping will contain an entry that maps from the VarDecl for
+ /// X<int>::value to the corresponding VarDecl for X<T>::value (within the
+ /// class template X) and will be marked TSK_ImplicitInstantiation.
+ llvm::DenseMap<const VarDecl *, MemberSpecializationInfo *>
+ InstantiatedFromStaticDataMember;
+
+ /// \brief Keeps track of the declaration from which a UsingDecl was
+ /// created during instantiation. The source declaration is always
+ /// a UsingDecl, an UnresolvedUsingValueDecl, or an
+ /// UnresolvedUsingTypenameDecl.
+ ///
+ /// For example:
+ /// \code
+ /// template<typename T>
+ /// struct A {
+ /// void f();
+ /// };
+ ///
+ /// template<typename T>
+ /// struct B : A<T> {
+ /// using A<T>::f;
+ /// };
+ ///
+ /// template struct B<int>;
+ /// \endcode
+ ///
+ /// This mapping will contain an entry that maps from the UsingDecl in
+ /// B<int> to the UnresolvedUsingDecl in B<T>.
+ llvm::DenseMap<UsingDecl *, NamedDecl *> InstantiatedFromUsingDecl;
+
+ llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>
+ InstantiatedFromUsingShadowDecl;
+
+ llvm::DenseMap<FieldDecl *, FieldDecl *> InstantiatedFromUnnamedFieldDecl;
+
+ /// \brief Mapping that stores the methods overridden by a given C++
+ /// member function.
+ ///
+ /// Since most C++ member functions aren't virtual and therefore
+ /// don't override anything, we store the overridden functions in
+ /// this map on the side rather than within the CXXMethodDecl structure.
+ typedef llvm::TinyPtrVector<const CXXMethodDecl*> CXXMethodVector;
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector> OverriddenMethods;
+
+ /// \brief Mapping from each declaration context to its corresponding lambda
+ /// mangling context.
+ llvm::DenseMap<const DeclContext *, LambdaMangleContext> LambdaMangleContexts;
+
+ /// \brief Mapping that stores parameterIndex values for ParmVarDecls
+ /// when that value exceeds the bitfield size of
+ /// ParmVarDeclBits.ParameterIndex.
+ typedef llvm::DenseMap<const VarDecl *, unsigned> ParameterIndexTable;
+ ParameterIndexTable ParamIndices;
+
+ ImportDecl *FirstLocalImport;
+ ImportDecl *LastLocalImport;
+
+ TranslationUnitDecl *TUDecl;
+
+ /// SourceMgr - The associated SourceManager object.
+ SourceManager &SourceMgr;
+
+ /// LangOpts - The language options used to create the AST associated with
+ /// this ASTContext object.
+ LangOptions &LangOpts;
+
+ /// \brief The allocator used to create AST objects.
+ ///
+ /// AST objects are never destructed; rather, all memory associated with the
+ /// AST objects will be released when the ASTContext itself is destroyed.
+ mutable llvm::BumpPtrAllocator BumpAlloc;
+
+ /// \brief Allocator for partial diagnostics.
+ PartialDiagnostic::StorageAllocator DiagAllocator;
+
+ /// \brief The current C++ ABI.
+ OwningPtr<CXXABI> ABI;
+ CXXABI *createCXXABI(const TargetInfo &T);
+
+ /// \brief The logical -> physical address space map.
+ const LangAS::Map *AddrSpaceMap;
+
+ friend class ASTDeclReader;
+ friend class ASTReader;
+ friend class ASTWriter;
+ friend class CXXRecordDecl;
+
+ const TargetInfo *Target;
+ clang::PrintingPolicy PrintingPolicy;
+
+public:
+ IdentifierTable &Idents;
+ SelectorTable &Selectors;
+ Builtin::Context &BuiltinInfo;
+ mutable DeclarationNameTable DeclarationNames;
+ OwningPtr<ExternalASTSource> ExternalSource;
+ ASTMutationListener *Listener;
+
+ clang::PrintingPolicy getPrintingPolicy() const { return PrintingPolicy; }
+
+ void setPrintingPolicy(clang::PrintingPolicy Policy) {
+ PrintingPolicy = Policy;
+ }
+
+ SourceManager& getSourceManager() { return SourceMgr; }
+ const SourceManager& getSourceManager() const { return SourceMgr; }
+ void *Allocate(unsigned Size, unsigned Align = 8) const {
+ return BumpAlloc.Allocate(Size, Align);
+ }
+ void Deallocate(void *Ptr) const { }
+
+ /// Return the total amount of physical memory allocated for representing
+ /// AST nodes and type information.
+ size_t getASTAllocatedMemory() const {
+ return BumpAlloc.getTotalMemory();
+ }
+ /// Return the total memory used for various side tables.
+ size_t getSideTableAllocatedMemory() const;
+
+ PartialDiagnostic::StorageAllocator &getDiagAllocator() {
+ return DiagAllocator;
+ }
+
+ const TargetInfo &getTargetInfo() const { return *Target; }
+
+ const LangOptions& getLangOpts() const { return LangOpts; }
+
+ DiagnosticsEngine &getDiagnostics() const;
+
+ FullSourceLoc getFullLoc(SourceLocation Loc) const {
+ return FullSourceLoc(Loc,SourceMgr);
+ }
+
+ /// \brief Retrieve the attributes for the given declaration.
+ AttrVec& getDeclAttrs(const Decl *D);
+
+ /// \brief Erase the attributes corresponding to the given declaration.
+ void eraseDeclAttrs(const Decl *D);
+
+ /// \brief If this variable is an instantiated static data member of a
+ /// class template specialization, returns the templated static data member
+ /// from which it was instantiated.
+ MemberSpecializationInfo *getInstantiatedFromStaticDataMember(
+ const VarDecl *Var);
+
+ FunctionDecl *getClassScopeSpecializationPattern(const FunctionDecl *FD);
+
+ void setClassScopeSpecializationPattern(FunctionDecl *FD,
+ FunctionDecl *Pattern);
+
+ /// \brief Note that the static data member \p Inst is an instantiation of
+ /// the static data member template \p Tmpl of a class template.
+ void setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
+ TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation = SourceLocation());
+
+ /// \brief If the given using decl is an instantiation of a
+ /// (possibly unresolved) using decl from a template instantiation,
+ /// return it.
+ NamedDecl *getInstantiatedFromUsingDecl(UsingDecl *Inst);
+
+ /// \brief Remember that the using decl \p Inst is an instantiation
+ /// of the using decl \p Pattern of a class template.
+ void setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern);
+
+ void setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
+ UsingShadowDecl *Pattern);
+ UsingShadowDecl *getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst);
+
+ FieldDecl *getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field);
+
+ void setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst, FieldDecl *Tmpl);
+
+ /// ZeroBitfieldFollowsNonBitfield - return 'true" if 'FD' is a zero-length
+ /// bitfield which follows the non-bitfield 'LastFD'.
+ bool ZeroBitfieldFollowsNonBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const;
+
+ /// ZeroBitfieldFollowsBitfield - return 'true" if 'FD' is a zero-length
+ /// bitfield which follows the bitfield 'LastFD'.
+ bool ZeroBitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const;
+
+ /// BitfieldFollowsBitfield - return 'true" if 'FD' is a
+ /// bitfield which follows the bitfield 'LastFD'.
+ bool BitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const;
+
+ /// NonBitfieldFollowsBitfield - return 'true" if 'FD' is not a
+ /// bitfield which follows the bitfield 'LastFD'.
+ bool NonBitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const;
+
+ /// BitfieldFollowsNonBitfield - return 'true" if 'FD' is a
+ /// bitfield which follows the none bitfield 'LastFD'.
+ bool BitfieldFollowsNonBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const;
+
+ // Access to the set of methods overridden by the given C++ method.
+ typedef CXXMethodVector::const_iterator overridden_cxx_method_iterator;
+ overridden_cxx_method_iterator
+ overridden_methods_begin(const CXXMethodDecl *Method) const;
+
+ overridden_cxx_method_iterator
+ overridden_methods_end(const CXXMethodDecl *Method) const;
+
+ unsigned overridden_methods_size(const CXXMethodDecl *Method) const;
+
+ /// \brief Note that the given C++ \p Method overrides the given \p
+ /// Overridden method.
+ void addOverriddenMethod(const CXXMethodDecl *Method,
+ const CXXMethodDecl *Overridden);
+
+ /// \brief Notify the AST context that a new import declaration has been
+ /// parsed or implicitly created within this translation unit.
+ void addedLocalImportDecl(ImportDecl *Import);
+
+ static ImportDecl *getNextLocalImport(ImportDecl *Import) {
+ return Import->NextLocalImport;
+ }
+
+ /// \brief Iterator that visits import declarations.
+ class import_iterator {
+ ImportDecl *Import;
+
+ public:
+ typedef ImportDecl *value_type;
+ typedef ImportDecl *reference;
+ typedef ImportDecl *pointer;
+ typedef int difference_type;
+ typedef std::forward_iterator_tag iterator_category;
+
+ import_iterator() : Import() { }
+ explicit import_iterator(ImportDecl *Import) : Import(Import) { }
+
+ reference operator*() const { return Import; }
+ pointer operator->() const { return Import; }
+
+ import_iterator &operator++() {
+ Import = ASTContext::getNextLocalImport(Import);
+ return *this;
+ }
+
+ import_iterator operator++(int) {
+ import_iterator Other(*this);
+ ++(*this);
+ return Other;
+ }
+
+ friend bool operator==(import_iterator X, import_iterator Y) {
+ return X.Import == Y.Import;
+ }
+
+ friend bool operator!=(import_iterator X, import_iterator Y) {
+ return X.Import != Y.Import;
+ }
+ };
+
+ import_iterator local_import_begin() const {
+ return import_iterator(FirstLocalImport);
+ }
+ import_iterator local_import_end() const { return import_iterator(); }
+
+ TranslationUnitDecl *getTranslationUnitDecl() const { return TUDecl; }
+
+
+ // Builtin Types.
+ CanQualType VoidTy;
+ CanQualType BoolTy;
+ CanQualType CharTy;
+ CanQualType WCharTy; // [C++ 3.9.1p5], integer type in C99.
+ CanQualType Char16Ty; // [C++0x 3.9.1p5], integer type in C99.
+ CanQualType Char32Ty; // [C++0x 3.9.1p5], integer type in C99.
+ CanQualType SignedCharTy, ShortTy, IntTy, LongTy, LongLongTy, Int128Ty;
+ CanQualType UnsignedCharTy, UnsignedShortTy, UnsignedIntTy, UnsignedLongTy;
+ CanQualType UnsignedLongLongTy, UnsignedInt128Ty;
+ CanQualType FloatTy, DoubleTy, LongDoubleTy;
+ CanQualType HalfTy; // [OpenCL 6.1.1.1], ARM NEON
+ CanQualType FloatComplexTy, DoubleComplexTy, LongDoubleComplexTy;
+ CanQualType VoidPtrTy, NullPtrTy;
+ CanQualType DependentTy, OverloadTy, BoundMemberTy, UnknownAnyTy;
+ CanQualType PseudoObjectTy, ARCUnbridgedCastTy;
+ CanQualType ObjCBuiltinIdTy, ObjCBuiltinClassTy, ObjCBuiltinSelTy;
+ CanQualType ObjCBuiltinBoolTy;
+
+ // Types for deductions in C++0x [stmt.ranged]'s desugaring. Built on demand.
+ mutable QualType AutoDeductTy; // Deduction against 'auto'.
+ mutable QualType AutoRRefDeductTy; // Deduction against 'auto &&'.
+
+ ASTContext(LangOptions& LOpts, SourceManager &SM, const TargetInfo *t,
+ IdentifierTable &idents, SelectorTable &sels,
+ Builtin::Context &builtins,
+ unsigned size_reserve,
+ bool DelayInitialization = false);
+
+ ~ASTContext();
+
+ /// \brief Attach an external AST source to the AST context.
+ ///
+ /// The external AST source provides the ability to load parts of
+ /// the abstract syntax tree as needed from some external storage,
+ /// e.g., a precompiled header.
+ void setExternalSource(OwningPtr<ExternalASTSource> &Source);
+
+ /// \brief Retrieve a pointer to the external AST source associated
+ /// with this AST context, if any.
+ ExternalASTSource *getExternalSource() const { return ExternalSource.get(); }
+
+ /// \brief Attach an AST mutation listener to the AST context.
+ ///
+ /// The AST mutation listener provides the ability to track modifications to
+ /// the abstract syntax tree entities committed after they were initially
+ /// created.
+ void setASTMutationListener(ASTMutationListener *Listener) {
+ this->Listener = Listener;
+ }
+
+ /// \brief Retrieve a pointer to the AST mutation listener associated
+ /// with this AST context, if any.
+ ASTMutationListener *getASTMutationListener() const { return Listener; }
+
+ void PrintStats() const;
+ const std::vector<Type*>& getTypes() const { return Types; }
+
+ /// \brief Retrieve the declaration for the 128-bit signed integer type.
+ TypedefDecl *getInt128Decl() const;
+
+ /// \brief Retrieve the declaration for the 128-bit unsigned integer type.
+ TypedefDecl *getUInt128Decl() const;
+
+ //===--------------------------------------------------------------------===//
+ // Type Constructors
+ //===--------------------------------------------------------------------===//
+
+private:
+ /// getExtQualType - Return a type with extended qualifiers.
+ QualType getExtQualType(const Type *Base, Qualifiers Quals) const;
+
+ QualType getTypeDeclTypeSlow(const TypeDecl *Decl) const;
+
+public:
+ /// getAddSpaceQualType - Return the uniqued reference to the type for an
+ /// address space qualified type with the specified type and address space.
+ /// The resulting type has a union of the qualifiers from T and the address
+ /// space. If T already has an address space specifier, it is silently
+ /// replaced.
+ QualType getAddrSpaceQualType(QualType T, unsigned AddressSpace) const;
+
+ /// getObjCGCQualType - Returns the uniqued reference to the type for an
+ /// objc gc qualified type. The retulting type has a union of the qualifiers
+ /// from T and the gc attribute.
+ QualType getObjCGCQualType(QualType T, Qualifiers::GC gcAttr) const;
+
+ /// getRestrictType - Returns the uniqued reference to the type for a
+ /// 'restrict' qualified type. The resulting type has a union of the
+ /// qualifiers from T and 'restrict'.
+ QualType getRestrictType(QualType T) const {
+ return T.withFastQualifiers(Qualifiers::Restrict);
+ }
+
+ /// getVolatileType - Returns the uniqued reference to the type for a
+ /// 'volatile' qualified type. The resulting type has a union of the
+ /// qualifiers from T and 'volatile'.
+ QualType getVolatileType(QualType T) const {
+ return T.withFastQualifiers(Qualifiers::Volatile);
+ }
+
+ /// getConstType - Returns the uniqued reference to the type for a
+ /// 'const' qualified type. The resulting type has a union of the
+ /// qualifiers from T and 'const'.
+ ///
+ /// It can be reasonably expected that this will always be
+ /// equivalent to calling T.withConst().
+ QualType getConstType(QualType T) const { return T.withConst(); }
+
+ /// adjustFunctionType - Change the ExtInfo on a function type.
+ const FunctionType *adjustFunctionType(const FunctionType *Fn,
+ FunctionType::ExtInfo EInfo);
+
+ /// getComplexType - Return the uniqued reference to the type for a complex
+ /// number with the specified element type.
+ QualType getComplexType(QualType T) const;
+ CanQualType getComplexType(CanQualType T) const {
+ return CanQualType::CreateUnsafe(getComplexType((QualType) T));
+ }
+
+ /// getPointerType - Return the uniqued reference to the type for a pointer to
+ /// the specified type.
+ QualType getPointerType(QualType T) const;
+ CanQualType getPointerType(CanQualType T) const {
+ return CanQualType::CreateUnsafe(getPointerType((QualType) T));
+ }
+
+ /// getAtomicType - Return the uniqued reference to the atomic type for
+ /// the specified type.
+ QualType getAtomicType(QualType T) const;
+
+ /// getBlockPointerType - Return the uniqued reference to the type for a block
+ /// of the specified type.
+ QualType getBlockPointerType(QualType T) const;
+
+ /// This gets the struct used to keep track of the descriptor for pointer to
+ /// blocks.
+ QualType getBlockDescriptorType() const;
+
+ /// This gets the struct used to keep track of the extended descriptor for
+ /// pointer to blocks.
+ QualType getBlockDescriptorExtendedType() const;
+
+ void setcudaConfigureCallDecl(FunctionDecl *FD) {
+ cudaConfigureCallDecl = FD;
+ }
+ FunctionDecl *getcudaConfigureCallDecl() {
+ return cudaConfigureCallDecl;
+ }
+
+ /// This builds the struct used for __block variables.
+ QualType BuildByRefType(StringRef DeclName, QualType Ty) const;
+
+ /// Returns true iff we need copy/dispose helpers for the given type.
+ bool BlockRequiresCopying(QualType Ty) const;
+
+ /// getLValueReferenceType - Return the uniqued reference to the type for an
+ /// lvalue reference to the specified type.
+ QualType getLValueReferenceType(QualType T, bool SpelledAsLValue = true)
+ const;
+
+ /// getRValueReferenceType - Return the uniqued reference to the type for an
+ /// rvalue reference to the specified type.
+ QualType getRValueReferenceType(QualType T) const;
+
+ /// getMemberPointerType - Return the uniqued reference to the type for a
+ /// member pointer to the specified type in the specified class. The class
+ /// is a Type because it could be a dependent name.
+ QualType getMemberPointerType(QualType T, const Type *Cls) const;
+
+ /// getVariableArrayType - Returns a non-unique reference to the type for a
+ /// variable array of the specified element type.
+ QualType getVariableArrayType(QualType EltTy, Expr *NumElts,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals,
+ SourceRange Brackets) const;
+
+ /// getDependentSizedArrayType - Returns a non-unique reference to
+ /// the type for a dependently-sized array of the specified element
+ /// type. FIXME: We will need these to be uniqued, or at least
+ /// comparable, at some point.
+ QualType getDependentSizedArrayType(QualType EltTy, Expr *NumElts,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals,
+ SourceRange Brackets) const;
+
+ /// getIncompleteArrayType - Returns a unique reference to the type for a
+ /// incomplete array of the specified element type.
+ QualType getIncompleteArrayType(QualType EltTy,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals) const;
+
+ /// getConstantArrayType - Return the unique reference to the type for a
+ /// constant array of the specified element type.
+ QualType getConstantArrayType(QualType EltTy, const llvm::APInt &ArySize,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals) const;
+
+ /// getVariableArrayDecayedType - Returns a vla type where known sizes
+ /// are replaced with [*].
+ QualType getVariableArrayDecayedType(QualType Ty) const;
+
+ /// getVectorType - Return the unique reference to a vector type of
+ /// the specified element type and size. VectorType must be a built-in type.
+ QualType getVectorType(QualType VectorType, unsigned NumElts,
+ VectorType::VectorKind VecKind) const;
+
+ /// getExtVectorType - Return the unique reference to an extended vector type
+ /// of the specified element type and size. VectorType must be a built-in
+ /// type.
+ QualType getExtVectorType(QualType VectorType, unsigned NumElts) const;
+
+ /// getDependentSizedExtVectorType - Returns a non-unique reference to
+ /// the type for a dependently-sized vector of the specified element
+ /// type. FIXME: We will need these to be uniqued, or at least
+ /// comparable, at some point.
+ QualType getDependentSizedExtVectorType(QualType VectorType,
+ Expr *SizeExpr,
+ SourceLocation AttrLoc) const;
+
+ /// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
+ ///
+ QualType getFunctionNoProtoType(QualType ResultTy,
+ const FunctionType::ExtInfo &Info) const;
+
+ QualType getFunctionNoProtoType(QualType ResultTy) const {
+ return getFunctionNoProtoType(ResultTy, FunctionType::ExtInfo());
+ }
+
+ /// getFunctionType - Return a normal function type with a typed
+ /// argument list.
+ QualType getFunctionType(QualType ResultTy,
+ const QualType *Args, unsigned NumArgs,
+ const FunctionProtoType::ExtProtoInfo &EPI) const;
+
+ /// getTypeDeclType - Return the unique reference to the type for
+ /// the specified type declaration.
+ QualType getTypeDeclType(const TypeDecl *Decl,
+ const TypeDecl *PrevDecl = 0) const {
+ assert(Decl && "Passed null for Decl param");
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (PrevDecl) {
+ assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ return QualType(PrevDecl->TypeForDecl, 0);
+ }
+
+ return getTypeDeclTypeSlow(Decl);
+ }
+
+ /// getTypedefType - Return the unique reference to the type for the
+ /// specified typedef-name decl.
+ QualType getTypedefType(const TypedefNameDecl *Decl,
+ QualType Canon = QualType()) const;
+
+ QualType getRecordType(const RecordDecl *Decl) const;
+
+ QualType getEnumType(const EnumDecl *Decl) const;
+
+ QualType getInjectedClassNameType(CXXRecordDecl *Decl, QualType TST) const;
+
+ QualType getAttributedType(AttributedType::Kind attrKind,
+ QualType modifiedType,
+ QualType equivalentType);
+
+ QualType getSubstTemplateTypeParmType(const TemplateTypeParmType *Replaced,
+ QualType Replacement) const;
+ QualType getSubstTemplateTypeParmPackType(
+ const TemplateTypeParmType *Replaced,
+ const TemplateArgument &ArgPack);
+
+ QualType getTemplateTypeParmType(unsigned Depth, unsigned Index,
+ bool ParameterPack,
+ TemplateTypeParmDecl *ParmDecl = 0) const;
+
+ QualType getTemplateSpecializationType(TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ QualType Canon = QualType()) const;
+
+ QualType getCanonicalTemplateSpecializationType(TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs) const;
+
+ QualType getTemplateSpecializationType(TemplateName T,
+ const TemplateArgumentListInfo &Args,
+ QualType Canon = QualType()) const;
+
+ TypeSourceInfo *
+ getTemplateSpecializationTypeInfo(TemplateName T, SourceLocation TLoc,
+ const TemplateArgumentListInfo &Args,
+ QualType Canon = QualType()) const;
+
+ QualType getParenType(QualType NamedType) const;
+
+ QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ QualType NamedType) const;
+ QualType getDependentNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ QualType Canon = QualType()) const;
+
+ QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ const TemplateArgumentListInfo &Args) const;
+ QualType getDependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) const;
+
+ QualType getPackExpansionType(QualType Pattern,
+ llvm::Optional<unsigned> NumExpansions);
+
+ QualType getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
+ ObjCInterfaceDecl *PrevDecl = 0) const;
+
+ QualType getObjCObjectType(QualType Base,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) const;
+
+ /// getObjCObjectPointerType - Return a ObjCObjectPointerType type
+ /// for the given ObjCObjectType.
+ QualType getObjCObjectPointerType(QualType OIT) const;
+
+ /// getTypeOfType - GCC extension.
+ QualType getTypeOfExprType(Expr *e) const;
+ QualType getTypeOfType(QualType t) const;
+
+ /// getDecltypeType - C++0x decltype.
+ QualType getDecltypeType(Expr *e, QualType UnderlyingType) const;
+
+ /// getUnaryTransformType - unary type transforms
+ QualType getUnaryTransformType(QualType BaseType, QualType UnderlyingType,
+ UnaryTransformType::UTTKind UKind) const;
+
+ /// getAutoType - C++0x deduced auto type.
+ QualType getAutoType(QualType DeducedType) const;
+
+ /// getAutoDeductType - C++0x deduction pattern for 'auto' type.
+ QualType getAutoDeductType() const;
+
+ /// getAutoRRefDeductType - C++0x deduction pattern for 'auto &&' type.
+ QualType getAutoRRefDeductType() const;
+
+ /// getTagDeclType - Return the unique reference to the type for the
+ /// specified TagDecl (struct/union/class/enum) decl.
+ QualType getTagDeclType(const TagDecl *Decl) const;
+
+ /// getSizeType - Return the unique type for "size_t" (C99 7.17), defined
+ /// in <stddef.h>. The sizeof operator requires this (C99 6.5.3.4p4).
+ CanQualType getSizeType() const;
+
+ /// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5),
+ /// defined in <stdint.h>.
+ CanQualType getIntMaxType() const;
+
+ /// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5),
+ /// defined in <stdint.h>.
+ CanQualType getUIntMaxType() const;
+
+ /// getWCharType - In C++, this returns the unique wchar_t type. In C99, this
+ /// returns a type compatible with the type defined in <stddef.h> as defined
+ /// by the target.
+ QualType getWCharType() const { return WCharTy; }
+
+ /// getSignedWCharType - Return the type of "signed wchar_t".
+ /// Used when in C++, as a GCC extension.
+ QualType getSignedWCharType() const;
+
+ /// getUnsignedWCharType - Return the type of "unsigned wchar_t".
+ /// Used when in C++, as a GCC extension.
+ QualType getUnsignedWCharType() const;
+
+ /// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
+ /// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
+ QualType getPointerDiffType() const;
+
+ // getCFConstantStringType - Return the C structure type used to represent
+ // constant CFStrings.
+ QualType getCFConstantStringType() const;
+
+ /// Get the structure type used to representation CFStrings, or NULL
+ /// if it hasn't yet been built.
+ QualType getRawCFConstantStringType() const {
+ if (CFConstantStringTypeDecl)
+ return getTagDeclType(CFConstantStringTypeDecl);
+ return QualType();
+ }
+ void setCFConstantStringType(QualType T);
+
+ // This setter/getter represents the ObjC type for an NSConstantString.
+ void setObjCConstantStringInterface(ObjCInterfaceDecl *Decl);
+ QualType getObjCConstantStringInterface() const {
+ return ObjCConstantStringType;
+ }
+
+ QualType getObjCNSStringType() const {
+ return ObjCNSStringType;
+ }
+
+ void setObjCNSStringType(QualType T) {
+ ObjCNSStringType = T;
+ }
+
+ /// \brief Retrieve the type that 'id' has been defined to, which may be
+ /// different from the built-in 'id' if 'id' has been typedef'd.
+ QualType getObjCIdRedefinitionType() const {
+ if (ObjCIdRedefinitionType.isNull())
+ return getObjCIdType();
+ return ObjCIdRedefinitionType;
+ }
+
+ /// \brief Set the user-written type that redefines 'id'.
+ void setObjCIdRedefinitionType(QualType RedefType) {
+ ObjCIdRedefinitionType = RedefType;
+ }
+
+ /// \brief Retrieve the type that 'Class' has been defined to, which may be
+ /// different from the built-in 'Class' if 'Class' has been typedef'd.
+ QualType getObjCClassRedefinitionType() const {
+ if (ObjCClassRedefinitionType.isNull())
+ return getObjCClassType();
+ return ObjCClassRedefinitionType;
+ }
+
+ /// \brief Set the user-written type that redefines 'SEL'.
+ void setObjCClassRedefinitionType(QualType RedefType) {
+ ObjCClassRedefinitionType = RedefType;
+ }
+
+ /// \brief Retrieve the type that 'SEL' has been defined to, which may be
+ /// different from the built-in 'SEL' if 'SEL' has been typedef'd.
+ QualType getObjCSelRedefinitionType() const {
+ if (ObjCSelRedefinitionType.isNull())
+ return getObjCSelType();
+ return ObjCSelRedefinitionType;
+ }
+
+
+ /// \brief Set the user-written type that redefines 'SEL'.
+ void setObjCSelRedefinitionType(QualType RedefType) {
+ ObjCSelRedefinitionType = RedefType;
+ }
+
+ /// \brief Retrieve the Objective-C "instancetype" type, if already known;
+ /// otherwise, returns a NULL type;
+ QualType getObjCInstanceType() {
+ return getTypeDeclType(getObjCInstanceTypeDecl());
+ }
+
+ /// \brief Retrieve the typedef declaration corresponding to the Objective-C
+ /// "instancetype" type.
+ TypedefDecl *getObjCInstanceTypeDecl();
+
+ /// \brief Set the type for the C FILE type.
+ void setFILEDecl(TypeDecl *FILEDecl) { this->FILEDecl = FILEDecl; }
+
+ /// \brief Retrieve the C FILE type.
+ QualType getFILEType() const {
+ if (FILEDecl)
+ return getTypeDeclType(FILEDecl);
+ return QualType();
+ }
+
+ /// \brief Set the type for the C jmp_buf type.
+ void setjmp_bufDecl(TypeDecl *jmp_bufDecl) {
+ this->jmp_bufDecl = jmp_bufDecl;
+ }
+
+ /// \brief Retrieve the C jmp_buf type.
+ QualType getjmp_bufType() const {
+ if (jmp_bufDecl)
+ return getTypeDeclType(jmp_bufDecl);
+ return QualType();
+ }
+
+ /// \brief Set the type for the C sigjmp_buf type.
+ void setsigjmp_bufDecl(TypeDecl *sigjmp_bufDecl) {
+ this->sigjmp_bufDecl = sigjmp_bufDecl;
+ }
+
+ /// \brief Retrieve the C sigjmp_buf type.
+ QualType getsigjmp_bufType() const {
+ if (sigjmp_bufDecl)
+ return getTypeDeclType(sigjmp_bufDecl);
+ return QualType();
+ }
+
+ /// \brief Set the type for the C ucontext_t type.
+ void setucontext_tDecl(TypeDecl *ucontext_tDecl) {
+ this->ucontext_tDecl = ucontext_tDecl;
+ }
+
+ /// \brief Retrieve the C ucontext_t type.
+ QualType getucontext_tType() const {
+ if (ucontext_tDecl)
+ return getTypeDeclType(ucontext_tDecl);
+ return QualType();
+ }
+
+ /// \brief The result type of logical operations, '<', '>', '!=', etc.
+ QualType getLogicalOperationType() const {
+ return getLangOpts().CPlusPlus ? BoolTy : IntTy;
+ }
+
+ /// getObjCEncodingForType - Emit the ObjC type encoding for the
+ /// given type into \arg S. If \arg NameFields is specified then
+ /// record field names are also encoded.
+ void getObjCEncodingForType(QualType t, std::string &S,
+ const FieldDecl *Field=0) const;
+
+ void getLegacyIntegralTypeEncoding(QualType &t) const;
+
+ // Put the string version of type qualifiers into S.
+ void getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
+ std::string &S) const;
+
+ /// getObjCEncodingForFunctionDecl - Returns the encoded type for this
+ /// function. This is in the same format as Objective-C method encodings.
+ ///
+ /// \returns true if an error occurred (e.g., because one of the parameter
+ /// types is incomplete), false otherwise.
+ bool getObjCEncodingForFunctionDecl(const FunctionDecl *Decl, std::string& S);
+
+ /// getObjCEncodingForMethodDecl - Return the encoded type for this method
+ /// declaration.
+ ///
+ /// \returns true if an error occurred (e.g., because one of the parameter
+ /// types is incomplete), false otherwise.
+ bool getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl, std::string &S,
+ bool Extended = false)
+ const;
+
+ /// getObjCEncodingForBlock - Return the encoded type for this block
+ /// declaration.
+ std::string getObjCEncodingForBlock(const BlockExpr *blockExpr) const;
+
+ /// getObjCEncodingForPropertyDecl - Return the encoded type for
+ /// this method declaration. If non-NULL, Container must be either
+ /// an ObjCCategoryImplDecl or ObjCImplementationDecl; it should
+ /// only be NULL when getting encodings for protocol properties.
+ void getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
+ const Decl *Container,
+ std::string &S) const;
+
+ bool ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
+ ObjCProtocolDecl *rProto) const;
+
+ /// getObjCEncodingTypeSize returns size of type for objective-c encoding
+ /// purpose in characters.
+ CharUnits getObjCEncodingTypeSize(QualType t) const;
+
+ /// \brief Retrieve the typedef corresponding to the predefined 'id' type
+ /// in Objective-C.
+ TypedefDecl *getObjCIdDecl() const;
+
+ /// This setter/getter represents the ObjC 'id' type. It is setup lazily, by
+ /// Sema. id is always a (typedef for a) pointer type, a pointer to a struct.
+ QualType getObjCIdType() const {
+ return getTypeDeclType(getObjCIdDecl());
+ }
+
+ /// \brief Retrieve the typedef corresponding to the predefined 'SEL' type
+ /// in Objective-C.
+ TypedefDecl *getObjCSelDecl() const;
+
+ /// \brief Retrieve the type that corresponds to the predefined Objective-C
+ /// 'SEL' type.
+ QualType getObjCSelType() const {
+ return getTypeDeclType(getObjCSelDecl());
+ }
+
+ /// \brief Retrieve the typedef declaration corresponding to the predefined
+ /// Objective-C 'Class' type.
+ TypedefDecl *getObjCClassDecl() const;
+
+ /// This setter/getter repreents the ObjC 'Class' type. It is setup lazily, by
+ /// Sema. 'Class' is always a (typedef for a) pointer type, a pointer to a
+ /// struct.
+ QualType getObjCClassType() const {
+ return getTypeDeclType(getObjCClassDecl());
+ }
+
+ /// \brief Retrieve the Objective-C class declaration corresponding to
+ /// the predefined 'Protocol' class.
+ ObjCInterfaceDecl *getObjCProtocolDecl() const;
+
+ /// \brief Retrieve the type of the Objective-C "Protocol" class.
+ QualType getObjCProtoType() const {
+ return getObjCInterfaceType(getObjCProtocolDecl());
+ }
+
+ void setBuiltinVaListType(QualType T);
+ QualType getBuiltinVaListType() const { return BuiltinVaListType; }
+
+ /// getCVRQualifiedType - Returns a type with additional const,
+ /// volatile, or restrict qualifiers.
+ QualType getCVRQualifiedType(QualType T, unsigned CVR) const {
+ return getQualifiedType(T, Qualifiers::fromCVRMask(CVR));
+ }
+
+ /// getQualifiedType - Un-split a SplitQualType.
+ QualType getQualifiedType(SplitQualType split) const {
+ return getQualifiedType(split.Ty, split.Quals);
+ }
+
+ /// getQualifiedType - Returns a type with additional qualifiers.
+ QualType getQualifiedType(QualType T, Qualifiers Qs) const {
+ if (!Qs.hasNonFastQualifiers())
+ return T.withFastQualifiers(Qs.getFastQualifiers());
+ QualifierCollector Qc(Qs);
+ const Type *Ptr = Qc.strip(T);
+ return getExtQualType(Ptr, Qc);
+ }
+
+ /// getQualifiedType - Returns a type with additional qualifiers.
+ QualType getQualifiedType(const Type *T, Qualifiers Qs) const {
+ if (!Qs.hasNonFastQualifiers())
+ return QualType(T, Qs.getFastQualifiers());
+ return getExtQualType(T, Qs);
+ }
+
+ /// getLifetimeQualifiedType - Returns a type with the given
+ /// lifetime qualifier.
+ QualType getLifetimeQualifiedType(QualType type,
+ Qualifiers::ObjCLifetime lifetime) {
+ assert(type.getObjCLifetime() == Qualifiers::OCL_None);
+ assert(lifetime != Qualifiers::OCL_None);
+
+ Qualifiers qs;
+ qs.addObjCLifetime(lifetime);
+ return getQualifiedType(type, qs);
+ }
+
+ DeclarationNameInfo getNameForTemplate(TemplateName Name,
+ SourceLocation NameLoc) const;
+
+ TemplateName getOverloadedTemplateName(UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) const;
+
+ TemplateName getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ bool TemplateKeyword,
+ TemplateDecl *Template) const;
+
+ TemplateName getDependentTemplateName(NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name) const;
+ TemplateName getDependentTemplateName(NestedNameSpecifier *NNS,
+ OverloadedOperatorKind Operator) const;
+ TemplateName getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
+ TemplateName replacement) const;
+ TemplateName getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) const;
+
+ enum GetBuiltinTypeError {
+ GE_None, //< No error
+ GE_Missing_stdio, //< Missing a type from <stdio.h>
+ GE_Missing_setjmp, //< Missing a type from <setjmp.h>
+ GE_Missing_ucontext //< Missing a type from <ucontext.h>
+ };
+
+ /// GetBuiltinType - Return the type for the specified builtin. If
+ /// IntegerConstantArgs is non-null, it is filled in with a bitmask of
+ /// arguments to the builtin that are required to be integer constant
+ /// expressions.
+ QualType GetBuiltinType(unsigned ID, GetBuiltinTypeError &Error,
+ unsigned *IntegerConstantArgs = 0) const;
+
+private:
+ CanQualType getFromTargetType(unsigned Type) const;
+ std::pair<uint64_t, unsigned> getTypeInfoImpl(const Type *T) const;
+
+ //===--------------------------------------------------------------------===//
+ // Type Predicates.
+ //===--------------------------------------------------------------------===//
+
+public:
+ /// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
+ /// garbage collection attribute.
+ ///
+ Qualifiers::GC getObjCGCAttrKind(QualType Ty) const;
+
+ /// areCompatibleVectorTypes - Return true if the given vector types
+ /// are of the same unqualified type or if they are equivalent to the same
+ /// GCC vector type, ignoring whether they are target-specific (AltiVec or
+ /// Neon) types.
+ bool areCompatibleVectorTypes(QualType FirstVec, QualType SecondVec);
+
+ /// isObjCNSObjectType - Return true if this is an NSObject object with
+ /// its NSObject attribute set.
+ static bool isObjCNSObjectType(QualType Ty) {
+ return Ty->isObjCNSObjectType();
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Type Sizing and Analysis
+ //===--------------------------------------------------------------------===//
+
+ /// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
+ /// scalar floating point type.
+ const llvm::fltSemantics &getFloatTypeSemantics(QualType T) const;
+
+ /// getTypeInfo - Get the size and alignment of the specified complete type in
+ /// bits.
+ std::pair<uint64_t, unsigned> getTypeInfo(const Type *T) const;
+ std::pair<uint64_t, unsigned> getTypeInfo(QualType T) const {
+ return getTypeInfo(T.getTypePtr());
+ }
+
+ /// getTypeSize - Return the size of the specified type, in bits. This method
+ /// does not work on incomplete types.
+ uint64_t getTypeSize(QualType T) const {
+ return getTypeInfo(T).first;
+ }
+ uint64_t getTypeSize(const Type *T) const {
+ return getTypeInfo(T).first;
+ }
+
+ /// getCharWidth - Return the size of the character type, in bits
+ uint64_t getCharWidth() const {
+ return getTypeSize(CharTy);
+ }
+
+ /// toCharUnitsFromBits - Convert a size in bits to a size in characters.
+ CharUnits toCharUnitsFromBits(int64_t BitSize) const;
+
+ /// toBits - Convert a size in characters to a size in bits.
+ int64_t toBits(CharUnits CharSize) const;
+
+ /// getTypeSizeInChars - Return the size of the specified type, in characters.
+ /// This method does not work on incomplete types.
+ CharUnits getTypeSizeInChars(QualType T) const;
+ CharUnits getTypeSizeInChars(const Type *T) const;
+
+ /// getTypeAlign - Return the ABI-specified alignment of a type, in bits.
+ /// This method does not work on incomplete types.
+ unsigned getTypeAlign(QualType T) const {
+ return getTypeInfo(T).second;
+ }
+ unsigned getTypeAlign(const Type *T) const {
+ return getTypeInfo(T).second;
+ }
+
+ /// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
+ /// characters. This method does not work on incomplete types.
+ CharUnits getTypeAlignInChars(QualType T) const;
+ CharUnits getTypeAlignInChars(const Type *T) const;
+
+ std::pair<CharUnits, CharUnits> getTypeInfoInChars(const Type *T) const;
+ std::pair<CharUnits, CharUnits> getTypeInfoInChars(QualType T) const;
+
+ /// getPreferredTypeAlign - Return the "preferred" alignment of the specified
+ /// type for the current target in bits. This can be different than the ABI
+ /// alignment in cases where it is beneficial for performance to overalign
+ /// a data type.
+ unsigned getPreferredTypeAlign(const Type *T) const;
+
+ /// getDeclAlign - Return a conservative estimate of the alignment of
+ /// the specified decl. Note that bitfields do not have a valid alignment, so
+ /// this method will assert on them.
+ /// If @p RefAsPointee, references are treated like their underlying type
+ /// (for alignof), else they're treated like pointers (for CodeGen).
+ CharUnits getDeclAlign(const Decl *D, bool RefAsPointee = false) const;
+
+ /// getASTRecordLayout - Get or compute information about the layout of the
+ /// specified record (struct/union/class), which indicates its size and field
+ /// position information.
+ const ASTRecordLayout &getASTRecordLayout(const RecordDecl *D) const;
+
+ /// getASTObjCInterfaceLayout - Get or compute information about the
+ /// layout of the specified Objective-C interface.
+ const ASTRecordLayout &getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D)
+ const;
+
+ void DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
+ bool Simple = false) const;
+
+ /// getASTObjCImplementationLayout - Get or compute information about
+ /// the layout of the specified Objective-C implementation. This may
+ /// differ from the interface if synthesized ivars are present.
+ const ASTRecordLayout &
+ getASTObjCImplementationLayout(const ObjCImplementationDecl *D) const;
+
+ /// getKeyFunction - Get the key function for the given record decl, or NULL
+ /// if there isn't one. The key function is, according to the Itanium C++ ABI
+ /// section 5.2.3:
+ ///
+ /// ...the first non-pure virtual function that is not inline at the point
+ /// of class definition.
+ const CXXMethodDecl *getKeyFunction(const CXXRecordDecl *RD);
+
+ /// Get the offset of a FieldDecl or IndirectFieldDecl, in bits.
+ uint64_t getFieldOffset(const ValueDecl *FD) const;
+
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const;
+
+ MangleContext *createMangleContext();
+
+ void DeepCollectObjCIvars(const ObjCInterfaceDecl *OI, bool leafClass,
+ SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const;
+
+ unsigned CountNonClassIvars(const ObjCInterfaceDecl *OI) const;
+ void CollectInheritedProtocols(const Decl *CDecl,
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols);
+
+ //===--------------------------------------------------------------------===//
+ // Type Operators
+ //===--------------------------------------------------------------------===//
+
+ /// getCanonicalType - Return the canonical (structural) type corresponding to
+ /// the specified potentially non-canonical type. The non-canonical version
+ /// of a type may have many "decorated" versions of types. Decorators can
+ /// include typedefs, 'typeof' operators, etc. The returned type is guaranteed
+ /// to be free of any of these, allowing two canonical types to be compared
+ /// for exact equality with a simple pointer comparison.
+ CanQualType getCanonicalType(QualType T) const {
+ return CanQualType::CreateUnsafe(T.getCanonicalType());
+ }
+
+ const Type *getCanonicalType(const Type *T) const {
+ return T->getCanonicalTypeInternal().getTypePtr();
+ }
+
+ /// getCanonicalParamType - Return the canonical parameter type
+ /// corresponding to the specific potentially non-canonical one.
+ /// Qualifiers are stripped off, functions are turned into function
+ /// pointers, and arrays decay one level into pointers.
+ CanQualType getCanonicalParamType(QualType T) const;
+
+ /// \brief Determine whether the given types are equivalent.
+ bool hasSameType(QualType T1, QualType T2) const {
+ return getCanonicalType(T1) == getCanonicalType(T2);
+ }
+
+ /// \brief Returns this type as a completely-unqualified array type,
+ /// capturing the qualifiers in Quals. This will remove the minimal amount of
+ /// sugaring from the types, similar to the behavior of
+ /// QualType::getUnqualifiedType().
+ ///
+ /// \param T is the qualified type, which may be an ArrayType
+ ///
+ /// \param Quals will receive the full set of qualifiers that were
+ /// applied to the array.
+ ///
+ /// \returns if this is an array type, the completely unqualified array type
+ /// that corresponds to it. Otherwise, returns T.getUnqualifiedType().
+ QualType getUnqualifiedArrayType(QualType T, Qualifiers &Quals);
+
+ /// \brief Determine whether the given types are equivalent after
+ /// cvr-qualifiers have been removed.
+ bool hasSameUnqualifiedType(QualType T1, QualType T2) const {
+ return getCanonicalType(T1).getTypePtr() ==
+ getCanonicalType(T2).getTypePtr();
+ }
+
+ bool UnwrapSimilarPointerTypes(QualType &T1, QualType &T2);
+
+ /// \brief Retrieves the "canonical" nested name specifier for a
+ /// given nested name specifier.
+ ///
+ /// The canonical nested name specifier is a nested name specifier
+ /// that uniquely identifies a type or namespace within the type
+ /// system. For example, given:
+ ///
+ /// \code
+ /// namespace N {
+ /// struct S {
+ /// template<typename T> struct X { typename T* type; };
+ /// };
+ /// }
+ ///
+ /// template<typename T> struct Y {
+ /// typename N::S::X<T>::type member;
+ /// };
+ /// \endcode
+ ///
+ /// Here, the nested-name-specifier for N::S::X<T>:: will be
+ /// S::X<template-param-0-0>, since 'S' and 'X' are uniquely defined
+ /// by declarations in the type system and the canonical type for
+ /// the template type parameter 'T' is template-param-0-0.
+ NestedNameSpecifier *
+ getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const;
+
+ /// \brief Retrieves the default calling convention to use for
+ /// C++ instance methods.
+ CallingConv getDefaultMethodCallConv();
+
+ /// \brief Retrieves the canonical representation of the given
+ /// calling convention.
+ CallingConv getCanonicalCallConv(CallingConv CC) const {
+ if (!LangOpts.MRTD && CC == CC_C)
+ return CC_Default;
+ return CC;
+ }
+
+ /// \brief Determines whether two calling conventions name the same
+ /// calling convention.
+ bool isSameCallConv(CallingConv lcc, CallingConv rcc) {
+ return (getCanonicalCallConv(lcc) == getCanonicalCallConv(rcc));
+ }
+
+ /// \brief Retrieves the "canonical" template name that refers to a
+ /// given template.
+ ///
+ /// The canonical template name is the simplest expression that can
+ /// be used to refer to a given template. For most templates, this
+ /// expression is just the template declaration itself. For example,
+ /// the template std::vector can be referred to via a variety of
+ /// names---std::vector, ::std::vector, vector (if vector is in
+ /// scope), etc.---but all of these names map down to the same
+ /// TemplateDecl, which is used to form the canonical template name.
+ ///
+ /// Dependent template names are more interesting. Here, the
+ /// template name could be something like T::template apply or
+ /// std::allocator<T>::template rebind, where the nested name
+ /// specifier itself is dependent. In this case, the canonical
+ /// template name uses the shortest form of the dependent
+ /// nested-name-specifier, which itself contains all canonical
+ /// types, values, and templates.
+ TemplateName getCanonicalTemplateName(TemplateName Name) const;
+
+ /// \brief Determine whether the given template names refer to the same
+ /// template.
+ bool hasSameTemplateName(TemplateName X, TemplateName Y);
+
+ /// \brief Retrieve the "canonical" template argument.
+ ///
+ /// The canonical template argument is the simplest template argument
+ /// (which may be a type, value, expression, or declaration) that
+ /// expresses the value of the argument.
+ TemplateArgument getCanonicalTemplateArgument(const TemplateArgument &Arg)
+ const;
+
+ /// Type Query functions. If the type is an instance of the specified class,
+ /// return the Type pointer for the underlying maximally pretty type. This
+ /// is a member of ASTContext because this may need to do some amount of
+ /// canonicalization, e.g. to move type qualifiers into the element type.
+ const ArrayType *getAsArrayType(QualType T) const;
+ const ConstantArrayType *getAsConstantArrayType(QualType T) const {
+ return dyn_cast_or_null<ConstantArrayType>(getAsArrayType(T));
+ }
+ const VariableArrayType *getAsVariableArrayType(QualType T) const {
+ return dyn_cast_or_null<VariableArrayType>(getAsArrayType(T));
+ }
+ const IncompleteArrayType *getAsIncompleteArrayType(QualType T) const {
+ return dyn_cast_or_null<IncompleteArrayType>(getAsArrayType(T));
+ }
+ const DependentSizedArrayType *getAsDependentSizedArrayType(QualType T)
+ const {
+ return dyn_cast_or_null<DependentSizedArrayType>(getAsArrayType(T));
+ }
+
+ /// getBaseElementType - Returns the innermost element type of an array type.
+ /// For example, will return "int" for int[m][n]
+ QualType getBaseElementType(const ArrayType *VAT) const;
+
+ /// getBaseElementType - Returns the innermost element type of a type
+ /// (which needn't actually be an array type).
+ QualType getBaseElementType(QualType QT) const;
+
+ /// getConstantArrayElementCount - Returns number of constant array elements.
+ uint64_t getConstantArrayElementCount(const ConstantArrayType *CA) const;
+
+ /// \brief Perform adjustment on the parameter type of a function.
+ ///
+ /// This routine adjusts the given parameter type @p T to the actual
+ /// parameter type used by semantic analysis (C99 6.7.5.3p[7,8],
+ /// C++ [dcl.fct]p3). The adjusted parameter type is returned.
+ QualType getAdjustedParameterType(QualType T);
+
+ /// \brief Retrieve the parameter type as adjusted for use in the signature
+ /// of a function, decaying array and function types and removing top-level
+ /// cv-qualifiers.
+ QualType getSignatureParameterType(QualType T);
+
+ /// getArrayDecayedType - Return the properly qualified result of decaying the
+ /// specified array type to a pointer. This operation is non-trivial when
+ /// handling typedefs etc. The canonical type of "T" must be an array type,
+ /// this returns a pointer to a properly qualified element of the array.
+ ///
+ /// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
+ QualType getArrayDecayedType(QualType T) const;
+
+ /// getPromotedIntegerType - Returns the type that Promotable will
+ /// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
+ /// integer type.
+ QualType getPromotedIntegerType(QualType PromotableType) const;
+
+ /// \brief Recurses in pointer/array types until it finds an objc retainable
+ /// type and returns its ownership.
+ Qualifiers::ObjCLifetime getInnerObjCOwnership(QualType T) const;
+
+ /// \brief Whether this is a promotable bitfield reference according
+ /// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
+ ///
+ /// \returns the type this bit-field will promote to, or NULL if no
+ /// promotion occurs.
+ QualType isPromotableBitField(Expr *E) const;
+
+ /// getIntegerTypeOrder - Returns the highest ranked integer type:
+ /// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
+ /// LHS < RHS, return -1.
+ int getIntegerTypeOrder(QualType LHS, QualType RHS) const;
+
+ /// getFloatingTypeOrder - Compare the rank of the two specified floating
+ /// point types, ignoring the domain of the type (i.e. 'double' ==
+ /// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
+ /// LHS < RHS, return -1.
+ int getFloatingTypeOrder(QualType LHS, QualType RHS) const;
+
+ /// getFloatingTypeOfSizeWithinDomain - Returns a real floating
+ /// point or a complex type (based on typeDomain/typeSize).
+ /// 'typeDomain' is a real floating point or complex type.
+ /// 'typeSize' is a real floating point or complex type.
+ QualType getFloatingTypeOfSizeWithinDomain(QualType typeSize,
+ QualType typeDomain) const;
+
+ unsigned getTargetAddressSpace(QualType T) const {
+ return getTargetAddressSpace(T.getQualifiers());
+ }
+
+ unsigned getTargetAddressSpace(Qualifiers Q) const {
+ return getTargetAddressSpace(Q.getAddressSpace());
+ }
+
+ unsigned getTargetAddressSpace(unsigned AS) const {
+ if (AS < LangAS::Offset || AS >= LangAS::Offset + LangAS::Count)
+ return AS;
+ else
+ return (*AddrSpaceMap)[AS - LangAS::Offset];
+ }
+
+private:
+ // Helper for integer ordering
+ unsigned getIntegerRank(const Type *T) const;
+
+public:
+
+ //===--------------------------------------------------------------------===//
+ // Type Compatibility Predicates
+ //===--------------------------------------------------------------------===//
+
+ /// Compatibility predicates used to check assignment expressions.
+ bool typesAreCompatible(QualType T1, QualType T2,
+ bool CompareUnqualified = false); // C99 6.2.7p1
+
+ bool propertyTypesAreCompatible(QualType, QualType);
+ bool typesAreBlockPointerCompatible(QualType, QualType);
+
+ bool isObjCIdType(QualType T) const {
+ return T == getObjCIdType();
+ }
+ bool isObjCClassType(QualType T) const {
+ return T == getObjCClassType();
+ }
+ bool isObjCSelType(QualType T) const {
+ return T == getObjCSelType();
+ }
+ bool QualifiedIdConformsQualifiedId(QualType LHS, QualType RHS);
+ bool ObjCQualifiedIdTypesAreCompatible(QualType LHS, QualType RHS,
+ bool ForCompare);
+
+ bool ObjCQualifiedClassTypesAreCompatible(QualType LHS, QualType RHS);
+
+ // Check the safety of assignment from LHS to RHS
+ bool canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT);
+ bool canAssignObjCInterfaces(const ObjCObjectType *LHS,
+ const ObjCObjectType *RHS);
+ bool canAssignObjCInterfacesInBlockPointer(
+ const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT,
+ bool BlockReturnType);
+ bool areComparableObjCPointerTypes(QualType LHS, QualType RHS);
+ QualType areCommonBaseCompatible(const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT);
+ bool canBindObjCObjectType(QualType To, QualType From);
+
+ // Functions for calculating composite types
+ QualType mergeTypes(QualType, QualType, bool OfBlockPointer=false,
+ bool Unqualified = false, bool BlockReturnType = false);
+ QualType mergeFunctionTypes(QualType, QualType, bool OfBlockPointer=false,
+ bool Unqualified = false);
+ QualType mergeFunctionArgumentTypes(QualType, QualType,
+ bool OfBlockPointer=false,
+ bool Unqualified = false);
+ QualType mergeTransparentUnionType(QualType, QualType,
+ bool OfBlockPointer=false,
+ bool Unqualified = false);
+
+ QualType mergeObjCGCQualifiers(QualType, QualType);
+
+ bool FunctionTypesMatchOnNSConsumedAttrs(
+ const FunctionProtoType *FromFunctionType,
+ const FunctionProtoType *ToFunctionType);
+
+ void ResetObjCLayout(const ObjCContainerDecl *CD) {
+ ObjCLayouts[CD] = 0;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Integer Predicates
+ //===--------------------------------------------------------------------===//
+
+ // The width of an integer, as defined in C99 6.2.6.2. This is the number
+ // of bits in an integer type excluding any padding bits.
+ unsigned getIntWidth(QualType T) const;
+
+ // Per C99 6.2.5p6, for every signed integer type, there is a corresponding
+ // unsigned integer type. This method takes a signed type, and returns the
+ // corresponding unsigned integer type.
+ QualType getCorrespondingUnsignedType(QualType T);
+
+ //===--------------------------------------------------------------------===//
+ // Type Iterators.
+ //===--------------------------------------------------------------------===//
+
+ typedef std::vector<Type*>::iterator type_iterator;
+ typedef std::vector<Type*>::const_iterator const_type_iterator;
+
+ type_iterator types_begin() { return Types.begin(); }
+ type_iterator types_end() { return Types.end(); }
+ const_type_iterator types_begin() const { return Types.begin(); }
+ const_type_iterator types_end() const { return Types.end(); }
+
+ //===--------------------------------------------------------------------===//
+ // Integer Values
+ //===--------------------------------------------------------------------===//
+
+ /// MakeIntValue - Make an APSInt of the appropriate width and
+ /// signedness for the given \arg Value and integer \arg Type.
+ llvm::APSInt MakeIntValue(uint64_t Value, QualType Type) const {
+ llvm::APSInt Res(getIntWidth(Type),
+ !Type->isSignedIntegerOrEnumerationType());
+ Res = Value;
+ return Res;
+ }
+
+ bool isSentinelNullExpr(const Expr *E);
+
+ /// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
+ ObjCImplementationDecl *getObjCImplementation(ObjCInterfaceDecl *D);
+ /// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
+ ObjCCategoryImplDecl *getObjCImplementation(ObjCCategoryDecl *D);
+
+ /// \brief returns true if there is at lease one @implementation in TU.
+ bool AnyObjCImplementation() {
+ return !ObjCImpls.empty();
+ }
+
+ /// \brief Set the implementation of ObjCInterfaceDecl.
+ void setObjCImplementation(ObjCInterfaceDecl *IFaceD,
+ ObjCImplementationDecl *ImplD);
+ /// \brief Set the implementation of ObjCCategoryDecl.
+ void setObjCImplementation(ObjCCategoryDecl *CatD,
+ ObjCCategoryImplDecl *ImplD);
+
+ /// \brief Get the duplicate declaration of a ObjCMethod in the same
+ /// interface, or null if non exists.
+ const ObjCMethodDecl *getObjCMethodRedeclaration(
+ const ObjCMethodDecl *MD) const {
+ llvm::DenseMap<const ObjCMethodDecl*, const ObjCMethodDecl*>::const_iterator
+ I = ObjCMethodRedecls.find(MD);
+ if (I == ObjCMethodRedecls.end())
+ return 0;
+ return I->second;
+ }
+
+ void setObjCMethodRedeclaration(const ObjCMethodDecl *MD,
+ const ObjCMethodDecl *Redecl) {
+ ObjCMethodRedecls[MD] = Redecl;
+ }
+
+ /// \brief Returns the objc interface that \arg ND belongs to if it is a
+ /// objc method/property/ivar etc. that is part of an interface,
+ /// otherwise returns null.
+ ObjCInterfaceDecl *getObjContainingInterface(NamedDecl *ND) const;
+
+ /// \brief Set the copy inialization expression of a block var decl.
+ void setBlockVarCopyInits(VarDecl*VD, Expr* Init);
+ /// \brief Get the copy initialization expression of VarDecl,or NULL if
+ /// none exists.
+ Expr *getBlockVarCopyInits(const VarDecl*VD);
+
+ /// \brief Allocate an uninitialized TypeSourceInfo.
+ ///
+ /// The caller should initialize the memory held by TypeSourceInfo using
+ /// the TypeLoc wrappers.
+ ///
+ /// \param T the type that will be the basis for type source info. This type
+ /// should refer to how the declarator was written in source code, not to
+ /// what type semantic analysis resolved the declarator to.
+ ///
+ /// \param Size the size of the type info to create, or 0 if the size
+ /// should be calculated based on the type.
+ TypeSourceInfo *CreateTypeSourceInfo(QualType T, unsigned Size = 0) const;
+
+ /// \brief Allocate a TypeSourceInfo where all locations have been
+ /// initialized to a given location, which defaults to the empty
+ /// location.
+ TypeSourceInfo *
+ getTrivialTypeSourceInfo(QualType T,
+ SourceLocation Loc = SourceLocation()) const;
+
+ TypeSourceInfo *getNullTypeSourceInfo() { return &NullTypeSourceInfo; }
+
+ /// \brief Add a deallocation callback that will be invoked when the
+ /// ASTContext is destroyed.
+ ///
+ /// \brief Callback A callback function that will be invoked on destruction.
+ ///
+ /// \brief Data Pointer data that will be provided to the callback function
+ /// when it is called.
+ void AddDeallocation(void (*Callback)(void*), void *Data);
+
+ GVALinkage GetGVALinkageForFunction(const FunctionDecl *FD);
+ GVALinkage GetGVALinkageForVariable(const VarDecl *VD);
+
+ /// \brief Determines if the decl can be CodeGen'ed or deserialized from PCH
+ /// lazily, only when used; this is only relevant for function or file scoped
+ /// var definitions.
+ ///
+ /// \returns true if the function/var must be CodeGen'ed/deserialized even if
+ /// it is not used.
+ bool DeclMustBeEmitted(const Decl *D);
+
+ /// \brief Retrieve the lambda mangling number for a lambda expression.
+ unsigned getLambdaManglingNumber(CXXMethodDecl *CallOperator);
+
+ /// \brief Used by ParmVarDecl to store on the side the
+ /// index of the parameter when it exceeds the size of the normal bitfield.
+ void setParameterIndex(const ParmVarDecl *D, unsigned index);
+
+ /// \brief Used by ParmVarDecl to retrieve on the side the
+ /// index of the parameter when it exceeds the size of the normal bitfield.
+ unsigned getParameterIndex(const ParmVarDecl *D) const;
+
+ //===--------------------------------------------------------------------===//
+ // Statistics
+ //===--------------------------------------------------------------------===//
+
+ /// \brief The number of implicitly-declared default constructors.
+ static unsigned NumImplicitDefaultConstructors;
+
+ /// \brief The number of implicitly-declared default constructors for
+ /// which declarations were built.
+ static unsigned NumImplicitDefaultConstructorsDeclared;
+
+ /// \brief The number of implicitly-declared copy constructors.
+ static unsigned NumImplicitCopyConstructors;
+
+ /// \brief The number of implicitly-declared copy constructors for
+ /// which declarations were built.
+ static unsigned NumImplicitCopyConstructorsDeclared;
+
+ /// \brief The number of implicitly-declared move constructors.
+ static unsigned NumImplicitMoveConstructors;
+
+ /// \brief The number of implicitly-declared move constructors for
+ /// which declarations were built.
+ static unsigned NumImplicitMoveConstructorsDeclared;
+
+ /// \brief The number of implicitly-declared copy assignment operators.
+ static unsigned NumImplicitCopyAssignmentOperators;
+
+ /// \brief The number of implicitly-declared copy assignment operators for
+ /// which declarations were built.
+ static unsigned NumImplicitCopyAssignmentOperatorsDeclared;
+
+ /// \brief The number of implicitly-declared move assignment operators.
+ static unsigned NumImplicitMoveAssignmentOperators;
+
+ /// \brief The number of implicitly-declared move assignment operators for
+ /// which declarations were built.
+ static unsigned NumImplicitMoveAssignmentOperatorsDeclared;
+
+ /// \brief The number of implicitly-declared destructors.
+ static unsigned NumImplicitDestructors;
+
+ /// \brief The number of implicitly-declared destructors for which
+ /// declarations were built.
+ static unsigned NumImplicitDestructorsDeclared;
+
+private:
+ ASTContext(const ASTContext&); // DO NOT IMPLEMENT
+ void operator=(const ASTContext&); // DO NOT IMPLEMENT
+
+public:
+ /// \brief Initialize built-in types.
+ ///
+ /// This routine may only be invoked once for a given ASTContext object.
+ /// It is normally invoked by the ASTContext constructor. However, the
+ /// constructor can be asked to delay initialization, which places the burden
+ /// of calling this function on the user of that object.
+ ///
+ /// \param Target The target
+ void InitBuiltinTypes(const TargetInfo &Target);
+
+private:
+ void InitBuiltinType(CanQualType &R, BuiltinType::Kind K);
+
+ // Return the ObjC type encoding for a given type.
+ void getObjCEncodingForTypeImpl(QualType t, std::string &S,
+ bool ExpandPointedToStructures,
+ bool ExpandStructures,
+ const FieldDecl *Field,
+ bool OutermostType = false,
+ bool EncodingProperty = false,
+ bool StructField = false,
+ bool EncodeBlockParameters = false,
+ bool EncodeClassNames = false) const;
+
+ // Adds the encoding of the structure's members.
+ void getObjCEncodingForStructureImpl(RecordDecl *RD, std::string &S,
+ const FieldDecl *Field,
+ bool includeVBases = true) const;
+
+ // Adds the encoding of a method parameter or return type.
+ void getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
+ QualType T, std::string& S,
+ bool Extended) const;
+
+ const ASTRecordLayout &
+ getObjCLayout(const ObjCInterfaceDecl *D,
+ const ObjCImplementationDecl *Impl) const;
+
+private:
+ /// \brief A set of deallocations that should be performed when the
+ /// ASTContext is destroyed.
+ SmallVector<std::pair<void (*)(void*), void *>, 16> Deallocations;
+
+ // FIXME: This currently contains the set of StoredDeclMaps used
+ // by DeclContext objects. This probably should not be in ASTContext,
+ // but we include it here so that ASTContext can quickly deallocate them.
+ llvm::PointerIntPair<StoredDeclsMap*,1> LastSDM;
+
+ /// \brief A counter used to uniquely identify "blocks".
+ mutable unsigned int UniqueBlockByRefTypeID;
+
+ friend class DeclContext;
+ friend class DeclarationNameTable;
+ void ReleaseDeclContextMaps();
+};
+
+/// @brief Utility function for constructing a nullary selector.
+static inline Selector GetNullarySelector(StringRef name, ASTContext& Ctx) {
+ IdentifierInfo* II = &Ctx.Idents.get(name);
+ return Ctx.Selectors.getSelector(0, &II);
+}
+
+/// @brief Utility function for constructing an unary selector.
+static inline Selector GetUnarySelector(StringRef name, ASTContext& Ctx) {
+ IdentifierInfo* II = &Ctx.Idents.get(name);
+ return Ctx.Selectors.getSelector(1, &II);
+}
+
+} // end namespace clang
+
+// operator new and delete aren't allowed inside namespaces.
+
+/// @brief Placement new for using the ASTContext's allocator.
+///
+/// This placement form of operator new uses the ASTContext's allocator for
+/// obtaining memory.
+///
+/// IMPORTANT: These are also declared in clang/AST/Attr.h! Any changes here
+/// need to also be made there.
+///
+/// We intentionally avoid using a nothrow specification here so that the calls
+/// to this operator will not perform a null check on the result -- the
+/// underlying allocator never returns null pointers.
+///
+/// Usage looks like this (assuming there's an ASTContext 'Context' in scope):
+/// @code
+/// // Default alignment (8)
+/// IntegerLiteral *Ex = new (Context) IntegerLiteral(arguments);
+/// // Specific alignment
+/// IntegerLiteral *Ex2 = new (Context, 4) IntegerLiteral(arguments);
+/// @endcode
+/// Please note that you cannot use delete on the pointer; it must be
+/// deallocated using an explicit destructor call followed by
+/// @c Context.Deallocate(Ptr).
+///
+/// @param Bytes The number of bytes to allocate. Calculated by the compiler.
+/// @param C The ASTContext that provides the allocator.
+/// @param Alignment The alignment of the allocated memory (if the underlying
+/// allocator supports it).
+/// @return The allocated memory. Could be NULL.
+inline void *operator new(size_t Bytes, const clang::ASTContext &C,
+ size_t Alignment) {
+ return C.Allocate(Bytes, Alignment);
+}
+/// @brief Placement delete companion to the new above.
+///
+/// This operator is just a companion to the new above. There is no way of
+/// invoking it directly; see the new operator for more details. This operator
+/// is called implicitly by the compiler if a placement new expression using
+/// the ASTContext throws in the object constructor.
+inline void operator delete(void *Ptr, const clang::ASTContext &C, size_t) {
+ C.Deallocate(Ptr);
+}
+
+/// This placement form of operator new[] uses the ASTContext's allocator for
+/// obtaining memory.
+///
+/// We intentionally avoid using a nothrow specification here so that the calls
+/// to this operator will not perform a null check on the result -- the
+/// underlying allocator never returns null pointers.
+///
+/// Usage looks like this (assuming there's an ASTContext 'Context' in scope):
+/// @code
+/// // Default alignment (8)
+/// char *data = new (Context) char[10];
+/// // Specific alignment
+/// char *data = new (Context, 4) char[10];
+/// @endcode
+/// Please note that you cannot use delete on the pointer; it must be
+/// deallocated using an explicit destructor call followed by
+/// @c Context.Deallocate(Ptr).
+///
+/// @param Bytes The number of bytes to allocate. Calculated by the compiler.
+/// @param C The ASTContext that provides the allocator.
+/// @param Alignment The alignment of the allocated memory (if the underlying
+/// allocator supports it).
+/// @return The allocated memory. Could be NULL.
+inline void *operator new[](size_t Bytes, const clang::ASTContext& C,
+ size_t Alignment = 8) {
+ return C.Allocate(Bytes, Alignment);
+}
+
+/// @brief Placement delete[] companion to the new[] above.
+///
+/// This operator is just a companion to the new[] above. There is no way of
+/// invoking it directly; see the new[] operator for more details. This operator
+/// is called implicitly by the compiler if a placement new[] expression using
+/// the ASTContext throws in the object constructor.
+inline void operator delete[](void *Ptr, const clang::ASTContext &C, size_t) {
+ C.Deallocate(Ptr);
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h b/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h
new file mode 100644
index 0000000..64e955e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTDiagnostic.h
@@ -0,0 +1,50 @@
+//===--- ASTDiagnostic.h - Diagnostics for the AST library ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DIAGNOSTICAST_H
+#define LLVM_CLANG_DIAGNOSTICAST_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define ASTSTART
+#include "clang/Basic/DiagnosticASTKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_AST_DIAGNOSTICS
+ };
+ } // end namespace diag
+
+ /// \brief DiagnosticsEngine argument formatting function for diagnostics that
+ /// involve AST nodes.
+ ///
+ /// This function formats diagnostic arguments for various AST nodes,
+ /// including types, declaration names, nested name specifiers, and
+ /// declaration contexts, into strings that can be printed as part of
+ /// diagnostics. It is meant to be used as the argument to
+ /// \c DiagnosticsEngine::SetArgToStringFn(), where the cookie is an \c
+ /// ASTContext pointer.
+ void FormatASTNodeDiagnosticArgument(
+ DiagnosticsEngine::ArgumentKind Kind,
+ intptr_t Val,
+ const char *Modifier,
+ unsigned ModLen,
+ const char *Argument,
+ unsigned ArgLen,
+ const DiagnosticsEngine::ArgumentValue *PrevArgs,
+ unsigned NumPrevArgs,
+ SmallVectorImpl<char> &Output,
+ void *Cookie,
+ ArrayRef<intptr_t> QualTypeVals);
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h b/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
new file mode 100644
index 0000000..7157efe
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTImporter.h
@@ -0,0 +1,278 @@
+//===--- ASTImporter.h - Importing ASTs from other Contexts -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTImporter class which imports AST nodes from one
+// context into another context.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_ASTIMPORTER_H
+#define LLVM_CLANG_AST_ASTIMPORTER_H
+
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+ class DeclContext;
+ class DiagnosticsEngine;
+ class Expr;
+ class FileManager;
+ class IdentifierInfo;
+ class NestedNameSpecifier;
+ class Stmt;
+ class TypeSourceInfo;
+
+ /// \brief Imports selected nodes from one AST context into another context,
+ /// merging AST nodes where appropriate.
+ class ASTImporter {
+ public:
+ typedef llvm::DenseSet<std::pair<Decl *, Decl *> > NonEquivalentDeclSet;
+
+ private:
+ /// \brief The contexts we're importing to and from.
+ ASTContext &ToContext, &FromContext;
+
+ /// \brief The file managers we're importing to and from.
+ FileManager &ToFileManager, &FromFileManager;
+
+ /// \brief Whether to perform a minimal import.
+ bool Minimal;
+
+ /// \brief Mapping from the already-imported types in the "from" context
+ /// to the corresponding types in the "to" context.
+ llvm::DenseMap<const Type *, const Type *> ImportedTypes;
+
+ /// \brief Mapping from the already-imported declarations in the "from"
+ /// context to the corresponding declarations in the "to" context.
+ llvm::DenseMap<Decl *, Decl *> ImportedDecls;
+
+ /// \brief Mapping from the already-imported statements in the "from"
+ /// context to the corresponding statements in the "to" context.
+ llvm::DenseMap<Stmt *, Stmt *> ImportedStmts;
+
+ /// \brief Mapping from the already-imported FileIDs in the "from" source
+ /// manager to the corresponding FileIDs in the "to" source manager.
+ llvm::DenseMap<FileID, FileID> ImportedFileIDs;
+
+ /// \brief Imported, anonymous tag declarations that are missing their
+ /// corresponding typedefs.
+ SmallVector<TagDecl *, 4> AnonTagsWithPendingTypedefs;
+
+ /// \brief Declaration (from, to) pairs that are known not to be equivalent
+ /// (which we have already complained about).
+ NonEquivalentDeclSet NonEquivalentDecls;
+
+ public:
+ /// \brief Create a new AST importer.
+ ///
+ /// \param ToContext The context we'll be importing into.
+ ///
+ /// \param ToFileManager The file manager we'll be importing into.
+ ///
+ /// \param FromContext The context we'll be importing from.
+ ///
+ /// \param FromFileManager The file manager we'll be importing into.
+ ///
+ /// \param MinimalImport If true, the importer will attempt to import
+ /// as little as it can, e.g., by importing declarations as forward
+ /// declarations that can be completed at a later point.
+ ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
+ ASTContext &FromContext, FileManager &FromFileManager,
+ bool MinimalImport);
+
+ virtual ~ASTImporter();
+
+ /// \brief Whether the importer will perform a minimal import, creating
+ /// to-be-completed forward declarations when possible.
+ bool isMinimalImport() const { return Minimal; }
+
+ /// \brief Import the given type from the "from" context into the "to"
+ /// context.
+ ///
+ /// \returns the equivalent type in the "to" context, or a NULL type if
+ /// an error occurred.
+ QualType Import(QualType FromT);
+
+ /// \brief Import the given type source information from the
+ /// "from" context into the "to" context.
+ ///
+ /// \returns the equivalent type source information in the "to"
+ /// context, or NULL if an error occurred.
+ TypeSourceInfo *Import(TypeSourceInfo *FromTSI);
+
+ /// \brief Import the given declaration from the "from" context into the
+ /// "to" context.
+ ///
+ /// \returns the equivalent declaration in the "to" context, or a NULL type
+ /// if an error occurred.
+ Decl *Import(Decl *FromD);
+
+ /// \brief Import the given declaration context from the "from"
+ /// AST context into the "to" AST context.
+ ///
+ /// \returns the equivalent declaration context in the "to"
+ /// context, or a NULL type if an error occurred.
+ DeclContext *ImportContext(DeclContext *FromDC);
+
+ /// \brief Import the given expression from the "from" context into the
+ /// "to" context.
+ ///
+ /// \returns the equivalent expression in the "to" context, or NULL if
+ /// an error occurred.
+ Expr *Import(Expr *FromE);
+
+ /// \brief Import the given statement from the "from" context into the
+ /// "to" context.
+ ///
+ /// \returns the equivalent statement in the "to" context, or NULL if
+ /// an error occurred.
+ Stmt *Import(Stmt *FromS);
+
+ /// \brief Import the given nested-name-specifier from the "from"
+ /// context into the "to" context.
+ ///
+ /// \returns the equivalent nested-name-specifier in the "to"
+ /// context, or NULL if an error occurred.
+ NestedNameSpecifier *Import(NestedNameSpecifier *FromNNS);
+
+ /// \brief Import the given nested-name-specifier from the "from"
+ /// context into the "to" context.
+ ///
+ /// \returns the equivalent nested-name-specifier in the "to"
+ /// context.
+ NestedNameSpecifierLoc Import(NestedNameSpecifierLoc FromNNS);
+
+ /// \brief Import the goven template name from the "from" context into the
+ /// "to" context.
+ TemplateName Import(TemplateName From);
+
+ /// \brief Import the given source location from the "from" context into
+ /// the "to" context.
+ ///
+ /// \returns the equivalent source location in the "to" context, or an
+ /// invalid source location if an error occurred.
+ SourceLocation Import(SourceLocation FromLoc);
+
+ /// \brief Import the given source range from the "from" context into
+ /// the "to" context.
+ ///
+ /// \returns the equivalent source range in the "to" context, or an
+ /// invalid source location if an error occurred.
+ SourceRange Import(SourceRange FromRange);
+
+ /// \brief Import the given declaration name from the "from"
+ /// context into the "to" context.
+ ///
+ /// \returns the equivalent declaration name in the "to" context,
+ /// or an empty declaration name if an error occurred.
+ DeclarationName Import(DeclarationName FromName);
+
+ /// \brief Import the given identifier from the "from" context
+ /// into the "to" context.
+ ///
+ /// \returns the equivalent identifier in the "to" context.
+ IdentifierInfo *Import(const IdentifierInfo *FromId);
+
+ /// \brief Import the given Objective-C selector from the "from"
+ /// context into the "to" context.
+ ///
+ /// \returns the equivalent selector in the "to" context.
+ Selector Import(Selector FromSel);
+
+ /// \brief Import the given file ID from the "from" context into the
+ /// "to" context.
+ ///
+ /// \returns the equivalent file ID in the source manager of the "to"
+ /// context.
+ FileID Import(FileID);
+
+ /// \brief Import the definition of the given declaration, including all of
+ /// the declarations it contains.
+ ///
+ /// This routine is intended to be used
+ void ImportDefinition(Decl *From);
+
+ /// \brief Cope with a name conflict when importing a declaration into the
+ /// given context.
+ ///
+ /// This routine is invoked whenever there is a name conflict while
+ /// importing a declaration. The returned name will become the name of the
+ /// imported declaration. By default, the returned name is the same as the
+ /// original name, leaving the conflict unresolve such that name lookup
+ /// for this name is likely to find an ambiguity later.
+ ///
+ /// Subclasses may override this routine to resolve the conflict, e.g., by
+ /// renaming the declaration being imported.
+ ///
+ /// \param Name the name of the declaration being imported, which conflicts
+ /// with other declarations.
+ ///
+ /// \param DC the declaration context (in the "to" AST context) in which
+ /// the name is being imported.
+ ///
+ /// \param IDNS the identifier namespace in which the name will be found.
+ ///
+ /// \param Decls the set of declarations with the same name as the
+ /// declaration being imported.
+ ///
+ /// \param NumDecls the number of conflicting declarations in \p Decls.
+ ///
+ /// \returns the name that the newly-imported declaration should have.
+ virtual DeclarationName HandleNameConflict(DeclarationName Name,
+ DeclContext *DC,
+ unsigned IDNS,
+ NamedDecl **Decls,
+ unsigned NumDecls);
+
+ /// \brief Retrieve the context that AST nodes are being imported into.
+ ASTContext &getToContext() const { return ToContext; }
+
+ /// \brief Retrieve the context that AST nodes are being imported from.
+ ASTContext &getFromContext() const { return FromContext; }
+
+ /// \brief Retrieve the file manager that AST nodes are being imported into.
+ FileManager &getToFileManager() const { return ToFileManager; }
+
+ /// \brief Retrieve the file manager that AST nodes are being imported from.
+ FileManager &getFromFileManager() const { return FromFileManager; }
+
+ /// \brief Report a diagnostic in the "to" context.
+ DiagnosticBuilder ToDiag(SourceLocation Loc, unsigned DiagID);
+
+ /// \brief Report a diagnostic in the "from" context.
+ DiagnosticBuilder FromDiag(SourceLocation Loc, unsigned DiagID);
+
+ /// \brief Return the set of declarations that we know are not equivalent.
+ NonEquivalentDeclSet &getNonEquivalentDecls() { return NonEquivalentDecls; }
+
+ /// \brief Called for ObjCInterfaceDecl, ObjCProtocolDecl, and TagDecl.
+ /// Mark the Decl as complete, filling it in as much as possible.
+ ///
+ /// \param D A declaration in the "to" context.
+ virtual void CompleteDecl(Decl* D);
+
+ /// \brief Note that we have imported the "from" declaration by mapping it
+ /// to the (potentially-newly-created) "to" declaration.
+ ///
+ /// Subclasses can override this function to observe all of the \c From ->
+ /// \c To declaration mappings as they are imported.
+ virtual Decl *Imported(Decl *From, Decl *To);
+
+ /// \brief Determine whether the given types are structurally
+ /// equivalent.
+ bool IsStructurallyEquivalent(QualType From, QualType To);
+ };
+}
+
+#endif // LLVM_CLANG_AST_ASTIMPORTER_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h b/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
new file mode 100644
index 0000000..cb038a0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTMutationListener.h
@@ -0,0 +1,84 @@
+//===--- ASTMutationListener.h - AST Mutation Interface --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTMutationListener interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_ASTMUTATIONLISTENER_H
+#define LLVM_CLANG_AST_ASTMUTATIONLISTENER_H
+
+namespace clang {
+ class Decl;
+ class DeclContext;
+ class TagDecl;
+ class CXXRecordDecl;
+ class ClassTemplateDecl;
+ class ClassTemplateSpecializationDecl;
+ class FunctionDecl;
+ class FunctionTemplateDecl;
+ class ObjCCategoryDecl;
+ class ObjCInterfaceDecl;
+ class ObjCContainerDecl;
+ class ObjCPropertyDecl;
+
+/// \brief An abstract interface that should be implemented by listeners
+/// that want to be notified when an AST entity gets modified after its
+/// initial creation.
+class ASTMutationListener {
+public:
+ virtual ~ASTMutationListener();
+
+ /// \brief A new TagDecl definition was completed.
+ virtual void CompletedTagDefinition(const TagDecl *D) { }
+
+ /// \brief A new declaration with name has been added to a DeclContext.
+ virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D) {}
+
+ /// \brief An implicit member was added after the definition was completed.
+ virtual void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {}
+
+ /// \brief A template specialization (or partial one) was added to the
+ /// template declaration.
+ virtual void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D) {}
+
+ /// \brief A template specialization (or partial one) was added to the
+ /// template declaration.
+ virtual void AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
+ const FunctionDecl *D) {}
+
+ /// \brief An implicit member got a definition.
+ virtual void CompletedImplicitDefinition(const FunctionDecl *D) {}
+
+ /// \brief A static data member was implicitly instantiated.
+ virtual void StaticDataMemberInstantiated(const VarDecl *D) {}
+
+ /// \brief A new objc category class was added for an interface.
+ virtual void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD) {}
+
+ /// \brief A objc class extension redeclared or introduced a property.
+ ///
+ /// \param Prop the property in the class extension
+ ///
+ /// \param OrigProp the property from the original interface that was declared
+ /// or null if the property was introduced.
+ ///
+ /// \param ClassExt the class extension.
+ virtual void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt) {}
+
+ // NOTE: If new methods are added they should also be added to
+ // MultiplexASTMutationListener.
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ASTVector.h b/contrib/llvm/tools/clang/include/clang/AST/ASTVector.h
new file mode 100644
index 0000000..217dfad
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ASTVector.h
@@ -0,0 +1,397 @@
+//===- ASTVector.h - Vector that uses ASTContext for allocation --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides ASTVector, a vector ADT whose contents are
+// allocated using the allocator associated with an ASTContext..
+//
+//===----------------------------------------------------------------------===//
+
+// FIXME: Most of this is copy-and-paste from BumpVector.h and SmallVector.h.
+// We can refactor this core logic into something common.
+
+#ifndef LLVM_CLANG_AST_VECTOR
+#define LLVM_CLANG_AST_VECTOR
+
+#include "llvm/Support/type_traits.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include <algorithm>
+#include <memory>
+#include <cstring>
+
+#ifdef _MSC_VER
+namespace std {
+#if _MSC_VER <= 1310
+ // Work around flawed VC++ implementation of std::uninitialized_copy. Define
+ // additional overloads so that elements with pointer types are recognized as
+ // scalars and not objects, causing bizarre type conversion errors.
+ template<class T1, class T2>
+ inline _Scalar_ptr_iterator_tag _Ptr_cat(T1 **, T2 **) {
+ _Scalar_ptr_iterator_tag _Cat;
+ return _Cat;
+ }
+
+ template<class T1, class T2>
+ inline _Scalar_ptr_iterator_tag _Ptr_cat(T1* const *, T2 **) {
+ _Scalar_ptr_iterator_tag _Cat;
+ return _Cat;
+ }
+#else
+ // FIXME: It is not clear if the problem is fixed in VS 2005. What is clear
+ // is that the above hack won't work if it wasn't fixed.
+#endif
+}
+#endif
+
+namespace clang {
+
+template<typename T>
+class ASTVector {
+ T *Begin, *End, *Capacity;
+
+ void setEnd(T *P) { this->End = P; }
+
+public:
+ // Default ctor - Initialize to empty.
+ explicit ASTVector(ASTContext &C, unsigned N = 0)
+ : Begin(NULL), End(NULL), Capacity(NULL) {
+ reserve(C, N);
+ }
+
+ ~ASTVector() {
+ if (llvm::is_class<T>::value) {
+ // Destroy the constructed elements in the vector.
+ destroy_range(Begin, End);
+ }
+ }
+
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef T* iterator;
+ typedef const T* const_iterator;
+
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+
+ // forward iterator creation methods.
+ iterator begin() { return Begin; }
+ const_iterator begin() const { return Begin; }
+ iterator end() { return End; }
+ const_iterator end() const { return End; }
+
+ // reverse iterator creation methods.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+
+ bool empty() const { return Begin == End; }
+ size_type size() const { return End-Begin; }
+
+ reference operator[](unsigned idx) {
+ assert(Begin + idx < End);
+ return Begin[idx];
+ }
+ const_reference operator[](unsigned idx) const {
+ assert(Begin + idx < End);
+ return Begin[idx];
+ }
+
+ reference front() {
+ return begin()[0];
+ }
+ const_reference front() const {
+ return begin()[0];
+ }
+
+ reference back() {
+ return end()[-1];
+ }
+ const_reference back() const {
+ return end()[-1];
+ }
+
+ void pop_back() {
+ --End;
+ End->~T();
+ }
+
+ T pop_back_val() {
+ T Result = back();
+ pop_back();
+ return Result;
+ }
+
+ void clear() {
+ if (llvm::is_class<T>::value) {
+ destroy_range(Begin, End);
+ }
+ End = Begin;
+ }
+
+ /// data - Return a pointer to the vector's buffer, even if empty().
+ pointer data() {
+ return pointer(Begin);
+ }
+
+ /// data - Return a pointer to the vector's buffer, even if empty().
+ const_pointer data() const {
+ return const_pointer(Begin);
+ }
+
+ void push_back(const_reference Elt, ASTContext &C) {
+ if (End < Capacity) {
+ Retry:
+ new (End) T(Elt);
+ ++End;
+ return;
+ }
+ grow(C);
+ goto Retry;
+ }
+
+ void reserve(ASTContext &C, unsigned N) {
+ if (unsigned(Capacity-Begin) < N)
+ grow(C, N);
+ }
+
+ /// capacity - Return the total number of elements in the currently allocated
+ /// buffer.
+ size_t capacity() const { return Capacity - Begin; }
+
+ /// append - Add the specified range to the end of the SmallVector.
+ ///
+ template<typename in_iter>
+ void append(ASTContext &C, in_iter in_start, in_iter in_end) {
+ size_type NumInputs = std::distance(in_start, in_end);
+
+ if (NumInputs == 0)
+ return;
+
+ // Grow allocated space if needed.
+ if (NumInputs > size_type(this->capacity_ptr()-this->end()))
+ this->grow(C, this->size()+NumInputs);
+
+ // Copy the new elements over.
+ // TODO: NEED To compile time dispatch on whether in_iter is a random access
+ // iterator to use the fast uninitialized_copy.
+ std::uninitialized_copy(in_start, in_end, this->end());
+ this->setEnd(this->end() + NumInputs);
+ }
+
+ /// append - Add the specified range to the end of the SmallVector.
+ ///
+ void append(ASTContext &C, size_type NumInputs, const T &Elt) {
+ // Grow allocated space if needed.
+ if (NumInputs > size_type(this->capacity_ptr()-this->end()))
+ this->grow(C, this->size()+NumInputs);
+
+ // Copy the new elements over.
+ std::uninitialized_fill_n(this->end(), NumInputs, Elt);
+ this->setEnd(this->end() + NumInputs);
+ }
+
+ /// uninitialized_copy - Copy the range [I, E) onto the uninitialized memory
+ /// starting with "Dest", constructing elements into it as needed.
+ template<typename It1, typename It2>
+ static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
+ std::uninitialized_copy(I, E, Dest);
+ }
+
+ iterator insert(ASTContext &C, iterator I, const T &Elt) {
+ if (I == this->end()) { // Important special case for empty vector.
+ push_back(Elt);
+ return this->end()-1;
+ }
+
+ if (this->EndX < this->CapacityX) {
+ Retry:
+ new (this->end()) T(this->back());
+ this->setEnd(this->end()+1);
+ // Push everything else over.
+ std::copy_backward(I, this->end()-1, this->end());
+ *I = Elt;
+ return I;
+ }
+ size_t EltNo = I-this->begin();
+ this->grow(C);
+ I = this->begin()+EltNo;
+ goto Retry;
+ }
+
+ iterator insert(ASTContext &C, iterator I, size_type NumToInsert,
+ const T &Elt) {
+ if (I == this->end()) { // Important special case for empty vector.
+ append(C, NumToInsert, Elt);
+ return this->end()-1;
+ }
+
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
+ // Ensure there is enough space.
+ reserve(C, static_cast<unsigned>(this->size() + NumToInsert));
+
+ // Uninvalidate the iterator.
+ I = this->begin()+InsertElt;
+
+ // If there are more elements between the insertion point and the end of the
+ // range than there are being inserted, we can use a simple approach to
+ // insertion. Since we already reserved space, we know that this won't
+ // reallocate the vector.
+ if (size_t(this->end()-I) >= NumToInsert) {
+ T *OldEnd = this->end();
+ append(C, this->end()-NumToInsert, this->end());
+
+ // Copy the existing elements that get replaced.
+ std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
+
+ std::fill_n(I, NumToInsert, Elt);
+ return I;
+ }
+
+ // Otherwise, we're inserting more elements than exist already, and we're
+ // not inserting at the end.
+
+ // Copy over the elements that we're about to overwrite.
+ T *OldEnd = this->end();
+ this->setEnd(this->end() + NumToInsert);
+ size_t NumOverwritten = OldEnd-I;
+ this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
+
+ // Replace the overwritten part.
+ std::fill_n(I, NumOverwritten, Elt);
+
+ // Insert the non-overwritten middle part.
+ std::uninitialized_fill_n(OldEnd, NumToInsert-NumOverwritten, Elt);
+ return I;
+ }
+
+ template<typename ItTy>
+ iterator insert(ASTContext &C, iterator I, ItTy From, ItTy To) {
+ if (I == this->end()) { // Important special case for empty vector.
+ append(C, From, To);
+ return this->end()-1;
+ }
+
+ size_t NumToInsert = std::distance(From, To);
+ // Convert iterator to elt# to avoid invalidating iterator when we reserve()
+ size_t InsertElt = I - this->begin();
+
+ // Ensure there is enough space.
+ reserve(C, static_cast<unsigned>(this->size() + NumToInsert));
+
+ // Uninvalidate the iterator.
+ I = this->begin()+InsertElt;
+
+ // If there are more elements between the insertion point and the end of the
+ // range than there are being inserted, we can use a simple approach to
+ // insertion. Since we already reserved space, we know that this won't
+ // reallocate the vector.
+ if (size_t(this->end()-I) >= NumToInsert) {
+ T *OldEnd = this->end();
+ append(C, this->end()-NumToInsert, this->end());
+
+ // Copy the existing elements that get replaced.
+ std::copy_backward(I, OldEnd-NumToInsert, OldEnd);
+
+ std::copy(From, To, I);
+ return I;
+ }
+
+ // Otherwise, we're inserting more elements than exist already, and we're
+ // not inserting at the end.
+
+ // Copy over the elements that we're about to overwrite.
+ T *OldEnd = this->end();
+ this->setEnd(this->end() + NumToInsert);
+ size_t NumOverwritten = OldEnd-I;
+ this->uninitialized_copy(I, OldEnd, this->end()-NumOverwritten);
+
+ // Replace the overwritten part.
+ for (; NumOverwritten > 0; --NumOverwritten) {
+ *I = *From;
+ ++I; ++From;
+ }
+
+ // Insert the non-overwritten middle part.
+ this->uninitialized_copy(From, To, OldEnd);
+ return I;
+ }
+
+ void resize(ASTContext &C, unsigned N, const T &NV) {
+ if (N < this->size()) {
+ this->destroy_range(this->begin()+N, this->end());
+ this->setEnd(this->begin()+N);
+ } else if (N > this->size()) {
+ if (this->capacity() < N)
+ this->grow(C, N);
+ construct_range(this->end(), this->begin()+N, NV);
+ this->setEnd(this->begin()+N);
+ }
+ }
+
+private:
+ /// grow - double the size of the allocated memory, guaranteeing space for at
+ /// least one more element or MinSize if specified.
+ void grow(ASTContext &C, size_type MinSize = 1);
+
+ void construct_range(T *S, T *E, const T &Elt) {
+ for (; S != E; ++S)
+ new (S) T(Elt);
+ }
+
+ void destroy_range(T *S, T *E) {
+ while (S != E) {
+ --E;
+ E->~T();
+ }
+ }
+
+protected:
+ iterator capacity_ptr() { return (iterator)this->Capacity; }
+};
+
+// Define this out-of-line to dissuade the C++ compiler from inlining it.
+template <typename T>
+void ASTVector<T>::grow(ASTContext &C, size_t MinSize) {
+ size_t CurCapacity = Capacity-Begin;
+ size_t CurSize = size();
+ size_t NewCapacity = 2*CurCapacity;
+ if (NewCapacity < MinSize)
+ NewCapacity = MinSize;
+
+ // Allocate the memory from the ASTContext.
+ T *NewElts = new (C) T[NewCapacity];
+
+ // Copy the elements over.
+ if (llvm::is_class<T>::value) {
+ std::uninitialized_copy(Begin, End, NewElts);
+ // Destroy the original elements.
+ destroy_range(Begin, End);
+ }
+ else {
+ // Use memcpy for PODs (std::uninitialized_copy optimizes to memmove).
+ memcpy(NewElts, Begin, CurSize * sizeof(T));
+ }
+
+ C.Deallocate(Begin);
+ Begin = NewElts;
+ End = NewElts+CurSize;
+ Capacity = Begin+NewCapacity;
+}
+
+} // end: clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Attr.h b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
new file mode 100644
index 0000000..ef1aa25
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/Attr.h
@@ -0,0 +1,254 @@
+//===--- Attr.h - Classes for representing expressions ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Attr interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_ATTR_H
+#define LLVM_CLANG_AST_ATTR_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/AttrKinds.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/VersionTuple.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <cstring>
+#include <algorithm>
+
+namespace clang {
+ class ASTContext;
+ class IdentifierInfo;
+ class ObjCInterfaceDecl;
+ class Expr;
+ class QualType;
+ class FunctionDecl;
+ class TypeSourceInfo;
+}
+
+// Defined in ASTContext.h
+void *operator new(size_t Bytes, const clang::ASTContext &C,
+ size_t Alignment = 16);
+// FIXME: Being forced to not have a default argument here due to redeclaration
+// rules on default arguments sucks
+void *operator new[](size_t Bytes, const clang::ASTContext &C,
+ size_t Alignment);
+
+// It is good practice to pair new/delete operators. Also, MSVC gives many
+// warnings if a matching delete overload is not declared, even though the
+// throw() spec guarantees it will not be implicitly called.
+void operator delete(void *Ptr, const clang::ASTContext &C, size_t);
+void operator delete[](void *Ptr, const clang::ASTContext &C, size_t);
+
+namespace clang {
+
+/// Attr - This represents one attribute.
+class Attr {
+private:
+ SourceRange Range;
+ unsigned AttrKind : 16;
+
+protected:
+ bool Inherited : 1;
+
+ virtual ~Attr();
+
+ void* operator new(size_t bytes) throw() {
+ llvm_unreachable("Attrs cannot be allocated with regular 'new'.");
+ }
+ void operator delete(void* data) throw() {
+ llvm_unreachable("Attrs cannot be released with regular 'delete'.");
+ }
+
+public:
+ // Forward so that the regular new and delete do not hide global ones.
+ void* operator new(size_t Bytes, ASTContext &C,
+ size_t Alignment = 16) throw() {
+ return ::operator new(Bytes, C, Alignment);
+ }
+ void operator delete(void *Ptr, ASTContext &C,
+ size_t Alignment) throw() {
+ return ::operator delete(Ptr, C, Alignment);
+ }
+
+protected:
+ Attr(attr::Kind AK, SourceRange R)
+ : Range(R), AttrKind(AK), Inherited(false) {}
+
+public:
+
+ attr::Kind getKind() const {
+ return static_cast<attr::Kind>(AttrKind);
+ }
+
+ SourceLocation getLocation() const { return Range.getBegin(); }
+ SourceRange getRange() const { return Range; }
+ void setRange(SourceRange R) { Range = R; }
+
+ bool isInherited() const { return Inherited; }
+
+ // Clone this attribute.
+ virtual Attr* clone(ASTContext &C) const = 0;
+
+ virtual bool isLateParsed() const { return false; }
+
+ // Pretty print this attribute.
+ virtual void printPretty(llvm::raw_ostream &OS, ASTContext &C) const = 0;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Attr *) { return true; }
+};
+
+class InheritableAttr : public Attr {
+ virtual void anchor();
+protected:
+ InheritableAttr(attr::Kind AK, SourceRange R)
+ : Attr(AK, R) {}
+
+public:
+ void setInherited(bool I) { Inherited = I; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Attr *A) {
+ return A->getKind() <= attr::LAST_INHERITABLE;
+ }
+ static bool classof(const InheritableAttr *) { return true; }
+};
+
+class InheritableParamAttr : public InheritableAttr {
+ virtual void anchor();
+protected:
+ InheritableParamAttr(attr::Kind AK, SourceRange R)
+ : InheritableAttr(AK, R) {}
+
+public:
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Attr *A) {
+ return A->getKind() <= attr::LAST_INHERITABLE_PARAM;
+ }
+ static bool classof(const InheritableParamAttr *) { return true; }
+};
+
+#include "clang/AST/Attrs.inc"
+
+/// AttrVec - A vector of Attr, which is how they are stored on the AST.
+typedef SmallVector<Attr*, 2> AttrVec;
+typedef SmallVector<const Attr*, 2> ConstAttrVec;
+
+/// DestroyAttrs - Destroy the contents of an AttrVec.
+inline void DestroyAttrs (AttrVec& V, ASTContext &C) {
+}
+
+/// specific_attr_iterator - Iterates over a subrange of an AttrVec, only
+/// providing attributes that are of a specifc type.
+template <typename SpecificAttr>
+class specific_attr_iterator {
+ /// Current - The current, underlying iterator.
+ /// In order to ensure we don't dereference an invalid iterator unless
+ /// specifically requested, we don't necessarily advance this all the
+ /// way. Instead, we advance it when an operation is requested; if the
+ /// operation is acting on what should be a past-the-end iterator,
+ /// then we offer no guarantees, but this way we do not dererence a
+ /// past-the-end iterator when we move to a past-the-end position.
+ mutable AttrVec::const_iterator Current;
+
+ void AdvanceToNext() const {
+ while (!isa<SpecificAttr>(*Current))
+ ++Current;
+ }
+
+ void AdvanceToNext(AttrVec::const_iterator I) const {
+ while (Current != I && !isa<SpecificAttr>(*Current))
+ ++Current;
+ }
+
+public:
+ typedef SpecificAttr* value_type;
+ typedef SpecificAttr* reference;
+ typedef SpecificAttr* pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ specific_attr_iterator() : Current() { }
+ explicit specific_attr_iterator(AttrVec::const_iterator i) : Current(i) { }
+
+ reference operator*() const {
+ AdvanceToNext();
+ return cast<SpecificAttr>(*Current);
+ }
+ pointer operator->() const {
+ AdvanceToNext();
+ return cast<SpecificAttr>(*Current);
+ }
+
+ specific_attr_iterator& operator++() {
+ ++Current;
+ return *this;
+ }
+ specific_attr_iterator operator++(int) {
+ specific_attr_iterator Tmp(*this);
+ ++(*this);
+ return Tmp;
+ }
+
+ friend bool operator==(specific_attr_iterator Left,
+ specific_attr_iterator Right) {
+ if (Left.Current < Right.Current)
+ Left.AdvanceToNext(Right.Current);
+ else
+ Right.AdvanceToNext(Left.Current);
+ return Left.Current == Right.Current;
+ }
+ friend bool operator!=(specific_attr_iterator Left,
+ specific_attr_iterator Right) {
+ return !(Left == Right);
+ }
+};
+
+template <typename T>
+inline specific_attr_iterator<T> specific_attr_begin(const AttrVec& vec) {
+ return specific_attr_iterator<T>(vec.begin());
+}
+template <typename T>
+inline specific_attr_iterator<T> specific_attr_end(const AttrVec& vec) {
+ return specific_attr_iterator<T>(vec.end());
+}
+
+template <typename T>
+inline bool hasSpecificAttr(const AttrVec& vec) {
+ return specific_attr_begin<T>(vec) != specific_attr_end<T>(vec);
+}
+template <typename T>
+inline T *getSpecificAttr(const AttrVec& vec) {
+ specific_attr_iterator<T> i = specific_attr_begin<T>(vec);
+ if (i != specific_attr_end<T>(vec))
+ return *i;
+ else
+ return 0;
+}
+
+/// getMaxAlignment - Returns the highest alignment value found among
+/// AlignedAttrs in an AttrVec, or 0 if there are none.
+inline unsigned getMaxAttrAlignment(const AttrVec& V, ASTContext &Ctx) {
+ unsigned Align = 0;
+ specific_attr_iterator<AlignedAttr> i(V.begin()), e(V.end());
+ for(; i != e; ++i)
+ Align = std::max(Align, i->getAlignment(Ctx));
+ return Align;
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h b/contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h
new file mode 100644
index 0000000..6a036bb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/BaseSubobject.h
@@ -0,0 +1,87 @@
+//===--- BaseSubobject.h - BaseSubobject class ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a definition of the BaseSubobject class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_BASESUBOBJECT_H
+#define LLVM_CLANG_AST_BASESUBOBJECT_H
+
+#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/type_traits.h"
+
+namespace clang {
+ class CXXRecordDecl;
+
+// BaseSubobject - Uniquely identifies a direct or indirect base class.
+// Stores both the base class decl and the offset from the most derived class to
+// the base class. Used for vtable and VTT generation.
+class BaseSubobject {
+ /// Base - The base class declaration.
+ const CXXRecordDecl *Base;
+
+ /// BaseOffset - The offset from the most derived class to the base class.
+ CharUnits BaseOffset;
+
+public:
+ BaseSubobject() { }
+ BaseSubobject(const CXXRecordDecl *Base, CharUnits BaseOffset)
+ : Base(Base), BaseOffset(BaseOffset) { }
+
+ /// getBase - Returns the base class declaration.
+ const CXXRecordDecl *getBase() const { return Base; }
+
+ /// getBaseOffset - Returns the base class offset.
+ CharUnits getBaseOffset() const { return BaseOffset; }
+
+ friend bool operator==(const BaseSubobject &LHS, const BaseSubobject &RHS) {
+ return LHS.Base == RHS.Base && LHS.BaseOffset == RHS.BaseOffset;
+ }
+};
+
+} // end namespace clang
+
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::BaseSubobject> {
+ static clang::BaseSubobject getEmptyKey() {
+ return clang::BaseSubobject(
+ DenseMapInfo<const clang::CXXRecordDecl *>::getEmptyKey(),
+ clang::CharUnits::fromQuantity(DenseMapInfo<int64_t>::getEmptyKey()));
+ }
+
+ static clang::BaseSubobject getTombstoneKey() {
+ return clang::BaseSubobject(
+ DenseMapInfo<const clang::CXXRecordDecl *>::getTombstoneKey(),
+ clang::CharUnits::fromQuantity(DenseMapInfo<int64_t>::getTombstoneKey()));
+ }
+
+ static unsigned getHashValue(const clang::BaseSubobject &Base) {
+ return
+ DenseMapInfo<const clang::CXXRecordDecl *>::getHashValue(Base.getBase()) ^
+ DenseMapInfo<int64_t>::getHashValue(Base.getBaseOffset().getQuantity());
+ }
+
+ static bool isEqual(const clang::BaseSubobject &LHS,
+ const clang::BaseSubobject &RHS) {
+ return LHS == RHS;
+ }
+};
+
+// It's OK to treat BaseSubobject as a POD type.
+template <> struct isPodLike<clang::BaseSubobject> {
+ static const bool value = true;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def b/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def
new file mode 100644
index 0000000..34e6fc5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/BuiltinTypes.def
@@ -0,0 +1,224 @@
+//===-- BuiltinTypeNodes.def - Metadata about BuiltinTypes ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the database about various builtin singleton types.
+//
+// BuiltinType::Id is the enumerator defining the type.
+//
+// Context.SingletonId is the global singleton of this type. Some global
+// singletons are shared by multiple types.
+//
+// BUILTIN_TYPE(Id, SingletonId) - A builtin type that has not been
+// covered by any other #define. Defining this macro covers all
+// the builtins.
+//
+// SIGNED_TYPE(Id, SingletonId) - A signed integral type.
+//
+// UNSIGNED_TYPE(Id, SingletonId) - An unsigned integral type.
+//
+// FLOATING_TYPE(Id, SingletonId) - A floating-point type.
+//
+// PLACEHOLDER_TYPE(Id, SingletonId) - A placeholder type. Placeholder
+// types are used to perform context-sensitive checking of specific
+// forms of expression.
+//
+// SHARED_SINGLETON_TYPE(Expansion) - The given expansion corresponds
+// to a builtin which uses a shared singleton type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SIGNED_TYPE
+#define SIGNED_TYPE(Id, SingletonId) BUILTIN_TYPE(Id, SingletonId)
+#endif
+
+#ifndef UNSIGNED_TYPE
+#define UNSIGNED_TYPE(Id, SingletonId) BUILTIN_TYPE(Id, SingletonId)
+#endif
+
+#ifndef FLOATING_TYPE
+#define FLOATING_TYPE(Id, SingletonId) BUILTIN_TYPE(Id, SingletonId)
+#endif
+
+#ifndef PLACEHOLDER_TYPE
+#define PLACEHOLDER_TYPE(Id, SingletonId) BUILTIN_TYPE(Id, SingletonId)
+#endif
+
+#ifndef SHARED_SINGLETON_TYPE
+#define SHARED_SINGLETON_TYPE(Expansion) Expansion
+#endif
+
+//===- Builtin Types ------------------------------------------------------===//
+
+// void
+BUILTIN_TYPE(Void, VoidTy)
+
+//===- Unsigned Types -----------------------------------------------------===//
+
+// 'bool' in C++, '_Bool' in C99
+UNSIGNED_TYPE(Bool, BoolTy)
+
+// 'char' for targets where it's unsigned
+SHARED_SINGLETON_TYPE(UNSIGNED_TYPE(Char_U, CharTy))
+
+// 'unsigned char', explicitly qualified
+UNSIGNED_TYPE(UChar, UnsignedCharTy)
+
+// 'wchar_t' for targets where it's unsigned
+SHARED_SINGLETON_TYPE(UNSIGNED_TYPE(WChar_U, WCharTy))
+
+// 'char16_t' in C++
+UNSIGNED_TYPE(Char16, Char16Ty)
+
+// 'char32_t' in C++
+UNSIGNED_TYPE(Char32, Char32Ty)
+
+// 'unsigned short'
+UNSIGNED_TYPE(UShort, UnsignedShortTy)
+
+// 'unsigned int'
+UNSIGNED_TYPE(UInt, UnsignedIntTy)
+
+// 'unsigned long'
+UNSIGNED_TYPE(ULong, UnsignedLongTy)
+
+// 'unsigned long long'
+UNSIGNED_TYPE(ULongLong, UnsignedLongLongTy)
+
+// '__uint128_t'
+UNSIGNED_TYPE(UInt128, UnsignedInt128Ty)
+
+//===- Signed Types -------------------------------------------------------===//
+
+// 'char' for targets where it's signed
+SHARED_SINGLETON_TYPE(SIGNED_TYPE(Char_S, CharTy))
+
+// 'signed char', explicitly qualified
+SIGNED_TYPE(SChar, SignedCharTy)
+
+// 'wchar_t' for targets where it's signed
+SHARED_SINGLETON_TYPE(SIGNED_TYPE(WChar_S, WCharTy))
+
+// 'short' or 'signed short'
+SIGNED_TYPE(Short, ShortTy)
+
+// 'int' or 'signed int'
+SIGNED_TYPE(Int, IntTy)
+
+// 'long' or 'signed long'
+SIGNED_TYPE(Long, LongTy)
+
+// 'long long' or 'signed long long'
+SIGNED_TYPE(LongLong, LongLongTy)
+
+// '__int128_t'
+SIGNED_TYPE(Int128, Int128Ty)
+
+//===- Floating point types -----------------------------------------------===//
+
+// 'half' in OpenCL, '__fp16' in ARM NEON.
+FLOATING_TYPE(Half, HalfTy)
+
+// 'float'
+FLOATING_TYPE(Float, FloatTy)
+
+// 'double'
+FLOATING_TYPE(Double, DoubleTy)
+
+// 'long double'
+FLOATING_TYPE(LongDouble, LongDoubleTy)
+
+//===- Language-specific types --------------------------------------------===//
+
+// This is the type of C++0x 'nullptr'.
+BUILTIN_TYPE(NullPtr, NullPtrTy)
+
+// The primitive Objective C 'id' type. The user-visible 'id'
+// type is a typedef of an ObjCObjectPointerType to an
+// ObjCObjectType with this as its base. In fact, this only ever
+// shows up in an AST as the base type of an ObjCObjectType.
+BUILTIN_TYPE(ObjCId, ObjCBuiltinIdTy)
+
+// The primitive Objective C 'Class' type. The user-visible
+// 'Class' type is a typedef of an ObjCObjectPointerType to an
+// ObjCObjectType with this as its base. In fact, this only ever
+// shows up in an AST as the base type of an ObjCObjectType.
+BUILTIN_TYPE(ObjCClass, ObjCBuiltinClassTy)
+
+// The primitive Objective C 'SEL' type. The user-visible 'SEL'
+// type is a typedef of a PointerType to this.
+BUILTIN_TYPE(ObjCSel, ObjCBuiltinSelTy)
+
+// This represents the type of an expression whose type is
+// totally unknown, e.g. 'T::foo'. It is permitted for this to
+// appear in situations where the structure of the type is
+// theoretically deducible.
+BUILTIN_TYPE(Dependent, DependentTy)
+
+// The type of an unresolved overload set. A placeholder type.
+// Expressions with this type have one of the following basic
+// forms, with parentheses generally permitted:
+// foo # possibly qualified, not if an implicit access
+// foo # possibly qualified, not if an implicit access
+// &foo # possibly qualified, not if an implicit access
+// x->foo # only if might be a static member function
+// &x->foo # only if might be a static member function
+// &Class::foo # when a pointer-to-member; sub-expr also has this type
+// OverloadExpr::find can be used to analyze the expression.
+//
+// Overload should be the first placeholder type, or else change
+// BuiltinType::isNonOverloadPlaceholderType()
+PLACEHOLDER_TYPE(Overload, OverloadTy)
+
+// The type of a bound C++ non-static member function.
+// A placeholder type. Expressions with this type have one of the
+// following basic forms:
+// foo # if an implicit access
+// x->foo # if only contains non-static members
+PLACEHOLDER_TYPE(BoundMember, BoundMemberTy)
+
+// The type of an expression which refers to a pseudo-object,
+// such as those introduced by Objective C's @property or
+// VS.NET's __property declarations. A placeholder type. The
+// pseudo-object is actually accessed by emitting a call to
+// some sort of function or method; typically there is a pair
+// of a setter and a getter, with the setter used if the
+// pseudo-object reference is used syntactically as the
+// left-hand-side of an assignment operator.
+//
+// A pseudo-object reference naming an Objective-C @property is
+// always a dot access with a base of object-pointer type,
+// e.g. 'x.foo'.
+//
+// In VS.NET, a __property declaration creates an implicit
+// member with an associated name, which can then be named
+// in any of the normal ways an ordinary member could be.
+PLACEHOLDER_TYPE(PseudoObject, PseudoObjectTy)
+
+// __builtin_any_type. A placeholder type. Useful for clients
+// like debuggers that don't know what type to give something.
+// Only a small number of operations are valid on expressions of
+// unknown type, most notably explicit casts.
+PLACEHOLDER_TYPE(UnknownAny, UnknownAnyTy)
+
+// The type of a cast which, in ARC, would normally require a
+// __bridge, but which might be okay depending on the immediate
+// context.
+PLACEHOLDER_TYPE(ARCUnbridgedCast, ARCUnbridgedCastTy)
+
+#ifdef LAST_BUILTIN_TYPE
+LAST_BUILTIN_TYPE(ARCUnbridgedCast)
+#undef LAST_BUILTIN_TYPE
+#endif
+
+#undef SHARED_SINGLETON_TYPE
+#undef PLACEHOLDER_TYPE
+#undef FLOATING_TYPE
+#undef SIGNED_TYPE
+#undef UNSIGNED_TYPE
+#undef BUILTIN_TYPE
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h b/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
new file mode 100644
index 0000000..44c554b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CXXInheritance.h
@@ -0,0 +1,370 @@
+//===------ CXXInheritance.h - C++ Inheritance ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides routines that help analyzing C++ inheritance hierarchies.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_CXXINHERITANCE_H
+#define LLVM_CLANG_AST_CXXINHERITANCE_H
+
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeOrdering.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include <list>
+#include <map>
+#include <cassert>
+
+namespace clang {
+
+class CXXBaseSpecifier;
+class CXXMethodDecl;
+class CXXRecordDecl;
+class NamedDecl;
+
+/// \brief Represents an element in a path from a derived class to a
+/// base class.
+///
+/// Each step in the path references the link from a
+/// derived class to one of its direct base classes, along with a
+/// base "number" that identifies which base subobject of the
+/// original derived class we are referencing.
+struct CXXBasePathElement {
+ /// \brief The base specifier that states the link from a derived
+ /// class to a base class, which will be followed by this base
+ /// path element.
+ const CXXBaseSpecifier *Base;
+
+ /// \brief The record decl of the class that the base is a base of.
+ const CXXRecordDecl *Class;
+
+ /// \brief Identifies which base class subobject (of type
+ /// \c Base->getType()) this base path element refers to.
+ ///
+ /// This value is only valid if \c !Base->isVirtual(), because there
+ /// is no base numbering for the zero or one virtual bases of a
+ /// given type.
+ int SubobjectNumber;
+};
+
+/// \brief Represents a path from a specific derived class
+/// (which is not represented as part of the path) to a particular
+/// (direct or indirect) base class subobject.
+///
+/// Individual elements in the path are described by the \c CXXBasePathElement
+/// structure, which captures both the link from a derived class to one of its
+/// direct bases and identification describing which base class
+/// subobject is being used.
+class CXXBasePath : public SmallVector<CXXBasePathElement, 4> {
+public:
+ CXXBasePath() : Access(AS_public) {}
+
+ /// \brief The access along this inheritance path. This is only
+ /// calculated when recording paths. AS_none is a special value
+ /// used to indicate a path which permits no legal access.
+ AccessSpecifier Access;
+
+ /// \brief The set of declarations found inside this base class
+ /// subobject.
+ DeclContext::lookup_result Decls;
+
+ void clear() {
+ SmallVectorImpl<CXXBasePathElement>::clear();
+ Access = AS_public;
+ }
+};
+
+/// BasePaths - Represents the set of paths from a derived class to
+/// one of its (direct or indirect) bases. For example, given the
+/// following class hierarchy:
+///
+/// @code
+/// class A { };
+/// class B : public A { };
+/// class C : public A { };
+/// class D : public B, public C{ };
+/// @endcode
+///
+/// There are two potential BasePaths to represent paths from D to a
+/// base subobject of type A. One path is (D,0) -> (B,0) -> (A,0)
+/// and another is (D,0)->(C,0)->(A,1). These two paths actually
+/// refer to two different base class subobjects of the same type,
+/// so the BasePaths object refers to an ambiguous path. On the
+/// other hand, consider the following class hierarchy:
+///
+/// @code
+/// class A { };
+/// class B : public virtual A { };
+/// class C : public virtual A { };
+/// class D : public B, public C{ };
+/// @endcode
+///
+/// Here, there are two potential BasePaths again, (D, 0) -> (B, 0)
+/// -> (A,v) and (D, 0) -> (C, 0) -> (A, v), but since both of them
+/// refer to the same base class subobject of type A (the virtual
+/// one), there is no ambiguity.
+class CXXBasePaths {
+ /// \brief The type from which this search originated.
+ CXXRecordDecl *Origin;
+
+ /// Paths - The actual set of paths that can be taken from the
+ /// derived class to the same base class.
+ std::list<CXXBasePath> Paths;
+
+ /// ClassSubobjects - Records the class subobjects for each class
+ /// type that we've seen. The first element in the pair says
+ /// whether we found a path to a virtual base for that class type,
+ /// while the element contains the number of non-virtual base
+ /// class subobjects for that class type. The key of the map is
+ /// the cv-unqualified canonical type of the base class subobject.
+ std::map<QualType, std::pair<bool, unsigned>, QualTypeOrdering>
+ ClassSubobjects;
+
+ /// FindAmbiguities - Whether Sema::IsDerivedFrom should try find
+ /// ambiguous paths while it is looking for a path from a derived
+ /// type to a base type.
+ bool FindAmbiguities;
+
+ /// RecordPaths - Whether Sema::IsDerivedFrom should record paths
+ /// while it is determining whether there are paths from a derived
+ /// type to a base type.
+ bool RecordPaths;
+
+ /// DetectVirtual - Whether Sema::IsDerivedFrom should abort the search
+ /// if it finds a path that goes across a virtual base. The virtual class
+ /// is also recorded.
+ bool DetectVirtual;
+
+ /// ScratchPath - A BasePath that is used by Sema::lookupInBases
+ /// to help build the set of paths.
+ CXXBasePath ScratchPath;
+
+ /// DetectedVirtual - The base class that is virtual.
+ const RecordType *DetectedVirtual;
+
+ /// \brief Array of the declarations that have been found. This
+ /// array is constructed only if needed, e.g., to iterate over the
+ /// results within LookupResult.
+ NamedDecl **DeclsFound;
+ unsigned NumDeclsFound;
+
+ friend class CXXRecordDecl;
+
+ void ComputeDeclsFound();
+
+ bool lookupInBases(ASTContext &Context,
+ const CXXRecordDecl *Record,
+ CXXRecordDecl::BaseMatchesCallback *BaseMatches,
+ void *UserData);
+public:
+ typedef std::list<CXXBasePath>::iterator paths_iterator;
+ typedef std::list<CXXBasePath>::const_iterator const_paths_iterator;
+ typedef NamedDecl **decl_iterator;
+
+ /// BasePaths - Construct a new BasePaths structure to record the
+ /// paths for a derived-to-base search.
+ explicit CXXBasePaths(bool FindAmbiguities = true,
+ bool RecordPaths = true,
+ bool DetectVirtual = true)
+ : FindAmbiguities(FindAmbiguities), RecordPaths(RecordPaths),
+ DetectVirtual(DetectVirtual), DetectedVirtual(0), DeclsFound(0),
+ NumDeclsFound(0) { }
+
+ ~CXXBasePaths() { delete [] DeclsFound; }
+
+ paths_iterator begin() { return Paths.begin(); }
+ paths_iterator end() { return Paths.end(); }
+ const_paths_iterator begin() const { return Paths.begin(); }
+ const_paths_iterator end() const { return Paths.end(); }
+
+ CXXBasePath& front() { return Paths.front(); }
+ const CXXBasePath& front() const { return Paths.front(); }
+
+ decl_iterator found_decls_begin();
+ decl_iterator found_decls_end();
+
+ /// \brief Determine whether the path from the most-derived type to the
+ /// given base type is ambiguous (i.e., it refers to multiple subobjects of
+ /// the same base type).
+ bool isAmbiguous(CanQualType BaseType);
+
+ /// \brief Whether we are finding multiple paths to detect ambiguities.
+ bool isFindingAmbiguities() const { return FindAmbiguities; }
+
+ /// \brief Whether we are recording paths.
+ bool isRecordingPaths() const { return RecordPaths; }
+
+ /// \brief Specify whether we should be recording paths or not.
+ void setRecordingPaths(bool RP) { RecordPaths = RP; }
+
+ /// \brief Whether we are detecting virtual bases.
+ bool isDetectingVirtual() const { return DetectVirtual; }
+
+ /// \brief The virtual base discovered on the path (if we are merely
+ /// detecting virtuals).
+ const RecordType* getDetectedVirtual() const {
+ return DetectedVirtual;
+ }
+
+ /// \brief Retrieve the type from which this base-paths search
+ /// began
+ CXXRecordDecl *getOrigin() const { return Origin; }
+ void setOrigin(CXXRecordDecl *Rec) { Origin = Rec; }
+
+ /// \brief Clear the base-paths results.
+ void clear();
+
+ /// \brief Swap this data structure's contents with another CXXBasePaths
+ /// object.
+ void swap(CXXBasePaths &Other);
+};
+
+/// \brief Uniquely identifies a virtual method within a class
+/// hierarchy by the method itself and a class subobject number.
+struct UniqueVirtualMethod {
+ UniqueVirtualMethod() : Method(0), Subobject(0), InVirtualSubobject(0) { }
+
+ UniqueVirtualMethod(CXXMethodDecl *Method, unsigned Subobject,
+ const CXXRecordDecl *InVirtualSubobject)
+ : Method(Method), Subobject(Subobject),
+ InVirtualSubobject(InVirtualSubobject) { }
+
+ /// \brief The overriding virtual method.
+ CXXMethodDecl *Method;
+
+ /// \brief The subobject in which the overriding virtual method
+ /// resides.
+ unsigned Subobject;
+
+ /// \brief The virtual base class subobject of which this overridden
+ /// virtual method is a part. Note that this records the closest
+ /// derived virtual base class subobject.
+ const CXXRecordDecl *InVirtualSubobject;
+
+ friend bool operator==(const UniqueVirtualMethod &X,
+ const UniqueVirtualMethod &Y) {
+ return X.Method == Y.Method && X.Subobject == Y.Subobject &&
+ X.InVirtualSubobject == Y.InVirtualSubobject;
+ }
+
+ friend bool operator!=(const UniqueVirtualMethod &X,
+ const UniqueVirtualMethod &Y) {
+ return !(X == Y);
+ }
+};
+
+/// \brief The set of methods that override a given virtual method in
+/// each subobject where it occurs.
+///
+/// The first part of the pair is the subobject in which the
+/// overridden virtual function occurs, while the second part of the
+/// pair is the virtual method that overrides it (including the
+/// subobject in which that virtual function occurs).
+class OverridingMethods {
+ llvm::DenseMap<unsigned, SmallVector<UniqueVirtualMethod, 4> >
+ Overrides;
+
+public:
+ // Iterate over the set of subobjects that have overriding methods.
+ typedef llvm::DenseMap<unsigned, SmallVector<UniqueVirtualMethod, 4> >
+ ::iterator iterator;
+ typedef llvm::DenseMap<unsigned, SmallVector<UniqueVirtualMethod, 4> >
+ ::const_iterator const_iterator;
+ iterator begin() { return Overrides.begin(); }
+ const_iterator begin() const { return Overrides.begin(); }
+ iterator end() { return Overrides.end(); }
+ const_iterator end() const { return Overrides.end(); }
+ unsigned size() const { return Overrides.size(); }
+
+ // Iterate over the set of overriding virtual methods in a given
+ // subobject.
+ typedef SmallVector<UniqueVirtualMethod, 4>::iterator
+ overriding_iterator;
+ typedef SmallVector<UniqueVirtualMethod, 4>::const_iterator
+ overriding_const_iterator;
+
+ // Add a new overriding method for a particular subobject.
+ void add(unsigned OverriddenSubobject, UniqueVirtualMethod Overriding);
+
+ // Add all of the overriding methods from "other" into overrides for
+ // this method. Used when merging the overrides from multiple base
+ // class subobjects.
+ void add(const OverridingMethods &Other);
+
+ // Replace all overriding virtual methods in all subobjects with the
+ // given virtual method.
+ void replaceAll(UniqueVirtualMethod Overriding);
+};
+
+/// \brief A mapping from each virtual member function to its set of
+/// final overriders.
+///
+/// Within a class hierarchy for a given derived class, each virtual
+/// member function in that hierarchy has one or more "final
+/// overriders" (C++ [class.virtual]p2). A final overrider for a
+/// virtual function "f" is the virtual function that will actually be
+/// invoked when dispatching a call to "f" through the
+/// vtable. Well-formed classes have a single final overrider for each
+/// virtual function; in abstract classes, the final overrider for at
+/// least one virtual function is a pure virtual function. Due to
+/// multiple, virtual inheritance, it is possible for a class to have
+/// more than one final overrider. Athough this is an error (per C++
+/// [class.virtual]p2), it is not considered an error here: the final
+/// overrider map can represent multiple final overriders for a
+/// method, and it is up to the client to determine whether they are
+/// problem. For example, the following class \c D has two final
+/// overriders for the virtual function \c A::f(), one in \c C and one
+/// in \c D:
+///
+/// \code
+/// struct A { virtual void f(); };
+/// struct B : virtual A { virtual void f(); };
+/// struct C : virtual A { virtual void f(); };
+/// struct D : B, C { };
+/// \endcode
+///
+/// This data structure contaings a mapping from every virtual
+/// function *that does not override an existing virtual function* and
+/// in every subobject where that virtual function occurs to the set
+/// of virtual functions that override it. Thus, the same virtual
+/// function \c A::f can actually occur in multiple subobjects of type
+/// \c A due to multiple inheritance, and may be overriden by
+/// different virtual functions in each, as in the following example:
+///
+/// \code
+/// struct A { virtual void f(); };
+/// struct B : A { virtual void f(); };
+/// struct C : A { virtual void f(); };
+/// struct D : B, C { };
+/// \endcode
+///
+/// Unlike in the previous example, where the virtual functions \c
+/// B::f and \c C::f both overrode \c A::f in the same subobject of
+/// type \c A, in this example the two virtual functions both override
+/// \c A::f but in *different* subobjects of type A. This is
+/// represented by numbering the subobjects in which the overridden
+/// and the overriding virtual member functions are located. Subobject
+/// 0 represents the virtua base class subobject of that type, while
+/// subobject numbers greater than 0 refer to non-virtual base class
+/// subobjects of that type.
+class CXXFinalOverriderMap
+ : public llvm::DenseMap<const CXXMethodDecl *, OverridingMethods> { };
+
+/// \brief A set of all the primary bases for a class.
+class CXXIndirectPrimaryBaseSet
+ : public llvm::SmallSet<const CXXRecordDecl*, 32> { };
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
new file mode 100644
index 0000000..6cce888
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CanonicalType.h
@@ -0,0 +1,778 @@
+//===-- CanonicalType.h - C Language Family Type Representation -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CanQual class template, which provides access to
+// canonical types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_CANONICAL_TYPE_H
+#define LLVM_CLANG_AST_CANONICAL_TYPE_H
+
+#include "clang/AST/Type.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/type_traits.h"
+#include <iterator>
+
+namespace clang {
+
+template<typename T> class CanProxy;
+template<typename T> struct CanProxyAdaptor;
+
+//----------------------------------------------------------------------------//
+// Canonical, qualified type template
+//----------------------------------------------------------------------------//
+
+/// \brief Represents a canonical, potentially-qualified type.
+///
+/// The CanQual template is a lightweight smart pointer that provides access
+/// to the canonical representation of a type, where all typedefs and other
+/// syntactic sugar has been eliminated. A CanQualType may also have various
+/// qualifiers (const, volatile, restrict) attached to it.
+///
+/// The template type parameter @p T is one of the Type classes (PointerType,
+/// BuiltinType, etc.). The type stored within @c CanQual<T> will be of that
+/// type (or some subclass of that type). The typedef @c CanQualType is just
+/// a shorthand for @c CanQual<Type>.
+///
+/// An instance of @c CanQual<T> can be implicitly converted to a
+/// @c CanQual<U> when T is derived from U, which essentially provides an
+/// implicit upcast. For example, @c CanQual<LValueReferenceType> can be
+/// converted to @c CanQual<ReferenceType>. Note that any @c CanQual type can
+/// be implicitly converted to a QualType, but the reverse operation requires
+/// a call to ASTContext::getCanonicalType().
+///
+///
+template<typename T = Type>
+class CanQual {
+ /// \brief The actual, canonical type.
+ QualType Stored;
+
+public:
+ /// \brief Constructs a NULL canonical type.
+ CanQual() : Stored() { }
+
+ /// \brief Converting constructor that permits implicit upcasting of
+ /// canonical type pointers.
+ template<typename U>
+ CanQual(const CanQual<U>& Other,
+ typename llvm::enable_if<llvm::is_base_of<T, U>, int>::type = 0);
+
+ /// \brief Retrieve the underlying type pointer, which refers to a
+ /// canonical type.
+ ///
+ /// The underlying pointer must not be NULL.
+ const T *getTypePtr() const { return cast<T>(Stored.getTypePtr()); }
+
+ /// \brief Retrieve the underlying type pointer, which refers to a
+ /// canonical type, or NULL.
+ ///
+ const T *getTypePtrOrNull() const {
+ return cast_or_null<T>(Stored.getTypePtrOrNull());
+ }
+
+ /// \brief Implicit conversion to a qualified type.
+ operator QualType() const { return Stored; }
+
+ /// \brief Implicit conversion to bool.
+ operator bool() const { return !isNull(); }
+
+ bool isNull() const {
+ return Stored.isNull();
+ }
+
+ SplitQualType split() const { return Stored.split(); }
+
+ /// \brief Retrieve a canonical type pointer with a different static type,
+ /// upcasting or downcasting as needed.
+ ///
+ /// The getAs() function is typically used to try to downcast to a
+ /// more specific (canonical) type in the type system. For example:
+ ///
+ /// @code
+ /// void f(CanQual<Type> T) {
+ /// if (CanQual<PointerType> Ptr = T->getAs<PointerType>()) {
+ /// // look at Ptr's pointee type
+ /// }
+ /// }
+ /// @endcode
+ ///
+ /// \returns A proxy pointer to the same type, but with the specified
+ /// static type (@p U). If the dynamic type is not the specified static type
+ /// or a derived class thereof, a NULL canonical type.
+ template<typename U> CanProxy<U> getAs() const;
+
+ template<typename U> CanProxy<U> castAs() const;
+
+ /// \brief Overloaded arrow operator that produces a canonical type
+ /// proxy.
+ CanProxy<T> operator->() const;
+
+ /// \brief Retrieve all qualifiers.
+ Qualifiers getQualifiers() const { return Stored.getLocalQualifiers(); }
+
+ /// \brief Retrieve the const/volatile/restrict qualifiers.
+ unsigned getCVRQualifiers() const { return Stored.getLocalCVRQualifiers(); }
+
+ /// \brief Determines whether this type has any qualifiers
+ bool hasQualifiers() const { return Stored.hasLocalQualifiers(); }
+
+ bool isConstQualified() const {
+ return Stored.isLocalConstQualified();
+ }
+ bool isVolatileQualified() const {
+ return Stored.isLocalVolatileQualified();
+ }
+ bool isRestrictQualified() const {
+ return Stored.isLocalRestrictQualified();
+ }
+
+ /// \brief Determines if this canonical type is furthermore
+ /// canonical as a parameter. The parameter-canonicalization
+ /// process decays arrays to pointers and drops top-level qualifiers.
+ bool isCanonicalAsParam() const {
+ return Stored.isCanonicalAsParam();
+ }
+
+ /// \brief Retrieve the unqualified form of this type.
+ CanQual<T> getUnqualifiedType() const;
+
+ /// \brief Retrieves a version of this type with const applied.
+ /// Note that this does not always yield a canonical type.
+ QualType withConst() const {
+ return Stored.withConst();
+ }
+
+ /// \brief Determines whether this canonical type is more qualified than
+ /// the @p Other canonical type.
+ bool isMoreQualifiedThan(CanQual<T> Other) const {
+ return Stored.isMoreQualifiedThan(Other.Stored);
+ }
+
+ /// \brief Determines whether this canonical type is at least as qualified as
+ /// the @p Other canonical type.
+ bool isAtLeastAsQualifiedAs(CanQual<T> Other) const {
+ return Stored.isAtLeastAsQualifiedAs(Other.Stored);
+ }
+
+ /// \brief If the canonical type is a reference type, returns the type that
+ /// it refers to; otherwise, returns the type itself.
+ CanQual<Type> getNonReferenceType() const;
+
+ /// \brief Retrieve the internal representation of this canonical type.
+ void *getAsOpaquePtr() const { return Stored.getAsOpaquePtr(); }
+
+ /// \brief Construct a canonical type from its internal representation.
+ static CanQual<T> getFromOpaquePtr(void *Ptr);
+
+ /// \brief Builds a canonical type from a QualType.
+ ///
+ /// This routine is inherently unsafe, because it requires the user to
+ /// ensure that the given type is a canonical type with the correct
+ // (dynamic) type.
+ static CanQual<T> CreateUnsafe(QualType Other);
+
+ void dump() const { Stored.dump(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(getAsOpaquePtr());
+ }
+};
+
+template<typename T, typename U>
+inline bool operator==(CanQual<T> x, CanQual<U> y) {
+ return x.getAsOpaquePtr() == y.getAsOpaquePtr();
+}
+
+template<typename T, typename U>
+inline bool operator!=(CanQual<T> x, CanQual<U> y) {
+ return x.getAsOpaquePtr() != y.getAsOpaquePtr();
+}
+
+/// \brief Represents a canonical, potentially-qualified type.
+typedef CanQual<Type> CanQualType;
+
+inline CanQualType Type::getCanonicalTypeUnqualified() const {
+ return CanQualType::CreateUnsafe(getCanonicalTypeInternal());
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ CanQualType T) {
+ DB << static_cast<QualType>(T);
+ return DB;
+}
+
+//----------------------------------------------------------------------------//
+// Internal proxy classes used by canonical types
+//----------------------------------------------------------------------------//
+
+#define LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(Accessor) \
+CanQualType Accessor() const { \
+return CanQualType::CreateUnsafe(this->getTypePtr()->Accessor()); \
+}
+
+#define LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Type, Accessor) \
+Type Accessor() const { return this->getTypePtr()->Accessor(); }
+
+/// \brief Base class of all canonical proxy types, which is responsible for
+/// storing the underlying canonical type and providing basic conversions.
+template<typename T>
+class CanProxyBase {
+protected:
+ CanQual<T> Stored;
+
+public:
+ /// \brief Retrieve the pointer to the underlying Type
+ const T *getTypePtr() const { return Stored.getTypePtr(); }
+
+ /// \brief Implicit conversion to the underlying pointer.
+ ///
+ /// Also provides the ability to use canonical type proxies in a Boolean
+ // context,e.g.,
+ /// @code
+ /// if (CanQual<PointerType> Ptr = T->getAs<PointerType>()) { ... }
+ /// @endcode
+ operator const T*() const { return this->Stored.getTypePtrOrNull(); }
+
+ /// \brief Try to convert the given canonical type to a specific structural
+ /// type.
+ template<typename U> CanProxy<U> getAs() const {
+ return this->Stored.template getAs<U>();
+ }
+
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Type::TypeClass, getTypeClass)
+
+ // Type predicates
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjectType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIncompleteType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIncompleteOrObjectType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isVariablyModifiedType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIntegerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isEnumeralType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBooleanType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isCharType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isWideCharType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIntegralType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isIntegralOrEnumerationType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isRealFloatingType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isComplexType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isAnyComplexType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isFloatingType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isRealType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isArithmeticType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isVoidType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isDerivedType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isScalarType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isAggregateType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isAnyPointerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isVoidPointerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isFunctionPointerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isMemberFunctionPointerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isClassType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isStructureType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isStructureOrClassType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isUnionType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isComplexIntegerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isNullPtrType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isDependentType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isOverloadableType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isArrayType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasPointerRepresentation)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasObjCPointerRepresentation)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasIntegerRepresentation)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasSignedIntegerRepresentation)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasUnsignedIntegerRepresentation)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasFloatingRepresentation)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isPromotableIntegerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isSignedIntegerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isUnsignedIntegerType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isSignedIntegerOrEnumerationType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isUnsignedIntegerOrEnumerationType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isConstantSizeType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isSpecifierType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(CXXRecordDecl*, getAsCXXRecordDecl)
+
+ /// \brief Retrieve the proxy-adaptor type.
+ ///
+ /// This arrow operator is used when CanProxyAdaptor has been specialized
+ /// for the given type T. In that case, we reference members of the
+ /// CanProxyAdaptor specialization. Otherwise, this operator will be hidden
+ /// by the arrow operator in the primary CanProxyAdaptor template.
+ const CanProxyAdaptor<T> *operator->() const {
+ return static_cast<const CanProxyAdaptor<T> *>(this);
+ }
+};
+
+/// \brief Replacable canonical proxy adaptor class that provides the link
+/// between a canonical type and the accessors of the type.
+///
+/// The CanProxyAdaptor is a replaceable class template that is instantiated
+/// as part of each canonical proxy type. The primary template merely provides
+/// redirection to the underlying type (T), e.g., @c PointerType. One can
+/// provide specializations of this class template for each underlying type
+/// that provide accessors returning canonical types (@c CanQualType) rather
+/// than the more typical @c QualType, to propagate the notion of "canonical"
+/// through the system.
+template<typename T>
+struct CanProxyAdaptor : CanProxyBase<T> { };
+
+/// \brief Canonical proxy type returned when retrieving the members of a
+/// canonical type or as the result of the @c CanQual<T>::getAs member
+/// function.
+///
+/// The CanProxy type mainly exists as a proxy through which operator-> will
+/// look to either map down to a raw T* (e.g., PointerType*) or to a proxy
+/// type that provides canonical-type access to the fields of the type.
+template<typename T>
+class CanProxy : public CanProxyAdaptor<T> {
+public:
+ /// \brief Build a NULL proxy.
+ CanProxy() { }
+
+ /// \brief Build a proxy to the given canonical type.
+ CanProxy(CanQual<T> Stored) { this->Stored = Stored; }
+
+ /// \brief Implicit conversion to the stored canonical type.
+ operator CanQual<T>() const { return this->Stored; }
+};
+
+} // end namespace clang
+
+namespace llvm {
+
+/// Implement simplify_type for CanQual<T>, so that we can dyn_cast from
+/// CanQual<T> to a specific Type class. We're prefer isa/dyn_cast/cast/etc.
+/// to return smart pointer (proxies?).
+template<typename T>
+struct simplify_type<const ::clang::CanQual<T> > {
+ typedef const T *SimpleType;
+ static SimpleType getSimplifiedValue(const ::clang::CanQual<T> &Val) {
+ return Val.getTypePtr();
+ }
+};
+template<typename T>
+struct simplify_type< ::clang::CanQual<T> >
+: public simplify_type<const ::clang::CanQual<T> > {};
+
+// Teach SmallPtrSet that CanQual<T> is "basically a pointer".
+template<typename T>
+class PointerLikeTypeTraits<clang::CanQual<T> > {
+public:
+ static inline void *getAsVoidPointer(clang::CanQual<T> P) {
+ return P.getAsOpaquePtr();
+ }
+ static inline clang::CanQual<T> getFromVoidPointer(void *P) {
+ return clang::CanQual<T>::getFromOpaquePtr(P);
+ }
+ // qualifier information is encoded in the low bits.
+ enum { NumLowBitsAvailable = 0 };
+};
+
+} // end namespace llvm
+
+namespace clang {
+
+//----------------------------------------------------------------------------//
+// Canonical proxy adaptors for canonical type nodes.
+//----------------------------------------------------------------------------//
+
+/// \brief Iterator adaptor that turns an iterator over canonical QualTypes
+/// into an iterator over CanQualTypes.
+template<typename InputIterator>
+class CanTypeIterator {
+ InputIterator Iter;
+
+public:
+ typedef CanQualType value_type;
+ typedef value_type reference;
+ typedef CanProxy<Type> pointer;
+ typedef typename std::iterator_traits<InputIterator>::difference_type
+ difference_type;
+ typedef typename std::iterator_traits<InputIterator>::iterator_category
+ iterator_category;
+
+ CanTypeIterator() : Iter() { }
+ explicit CanTypeIterator(InputIterator Iter) : Iter(Iter) { }
+
+ // Input iterator
+ reference operator*() const {
+ return CanQualType::CreateUnsafe(*Iter);
+ }
+
+ pointer operator->() const;
+
+ CanTypeIterator &operator++() {
+ ++Iter;
+ return *this;
+ }
+
+ CanTypeIterator operator++(int) {
+ CanTypeIterator Tmp(*this);
+ ++Iter;
+ return Tmp;
+ }
+
+ friend bool operator==(const CanTypeIterator& X, const CanTypeIterator &Y) {
+ return X.Iter == Y.Iter;
+ }
+ friend bool operator!=(const CanTypeIterator& X, const CanTypeIterator &Y) {
+ return X.Iter != Y.Iter;
+ }
+
+ // Bidirectional iterator
+ CanTypeIterator &operator--() {
+ --Iter;
+ return *this;
+ }
+
+ CanTypeIterator operator--(int) {
+ CanTypeIterator Tmp(*this);
+ --Iter;
+ return Tmp;
+ }
+
+ // Random access iterator
+ reference operator[](difference_type n) const {
+ return CanQualType::CreateUnsafe(Iter[n]);
+ }
+
+ CanTypeIterator &operator+=(difference_type n) {
+ Iter += n;
+ return *this;
+ }
+
+ CanTypeIterator &operator-=(difference_type n) {
+ Iter -= n;
+ return *this;
+ }
+
+ friend CanTypeIterator operator+(CanTypeIterator X, difference_type n) {
+ X += n;
+ return X;
+ }
+
+ friend CanTypeIterator operator+(difference_type n, CanTypeIterator X) {
+ X += n;
+ return X;
+ }
+
+ friend CanTypeIterator operator-(CanTypeIterator X, difference_type n) {
+ X -= n;
+ return X;
+ }
+
+ friend difference_type operator-(const CanTypeIterator &X,
+ const CanTypeIterator &Y) {
+ return X - Y;
+ }
+};
+
+template<>
+struct CanProxyAdaptor<ComplexType> : public CanProxyBase<ComplexType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+};
+
+template<>
+struct CanProxyAdaptor<PointerType> : public CanProxyBase<PointerType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
+};
+
+template<>
+struct CanProxyAdaptor<BlockPointerType>
+ : public CanProxyBase<BlockPointerType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
+};
+
+template<>
+struct CanProxyAdaptor<ReferenceType> : public CanProxyBase<ReferenceType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
+};
+
+template<>
+struct CanProxyAdaptor<LValueReferenceType>
+ : public CanProxyBase<LValueReferenceType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
+};
+
+template<>
+struct CanProxyAdaptor<RValueReferenceType>
+ : public CanProxyBase<RValueReferenceType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
+};
+
+template<>
+struct CanProxyAdaptor<MemberPointerType>
+ : public CanProxyBase<MemberPointerType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(const Type *, getClass)
+};
+
+template<>
+struct CanProxyAdaptor<ArrayType> : public CanProxyBase<ArrayType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(ArrayType::ArraySizeModifier,
+ getSizeModifier)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Qualifiers, getIndexTypeQualifiers)
+};
+
+template<>
+struct CanProxyAdaptor<ConstantArrayType>
+ : public CanProxyBase<ConstantArrayType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(ArrayType::ArraySizeModifier,
+ getSizeModifier)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Qualifiers, getIndexTypeQualifiers)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(const llvm::APInt &, getSize)
+};
+
+template<>
+struct CanProxyAdaptor<IncompleteArrayType>
+ : public CanProxyBase<IncompleteArrayType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(ArrayType::ArraySizeModifier,
+ getSizeModifier)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Qualifiers, getIndexTypeQualifiers)
+};
+
+template<>
+struct CanProxyAdaptor<VariableArrayType>
+ : public CanProxyBase<VariableArrayType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(ArrayType::ArraySizeModifier,
+ getSizeModifier)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Qualifiers, getIndexTypeQualifiers)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Expr *, getSizeExpr)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(SourceRange, getBracketsRange)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(SourceLocation, getLBracketLoc)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(SourceLocation, getRBracketLoc)
+};
+
+template<>
+struct CanProxyAdaptor<DependentSizedArrayType>
+ : public CanProxyBase<DependentSizedArrayType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Expr *, getSizeExpr)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(SourceRange, getBracketsRange)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(SourceLocation, getLBracketLoc)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(SourceLocation, getRBracketLoc)
+};
+
+template<>
+struct CanProxyAdaptor<DependentSizedExtVectorType>
+ : public CanProxyBase<DependentSizedExtVectorType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(const Expr *, getSizeExpr)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(SourceLocation, getAttributeLoc)
+};
+
+template<>
+struct CanProxyAdaptor<VectorType> : public CanProxyBase<VectorType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getNumElements)
+};
+
+template<>
+struct CanProxyAdaptor<ExtVectorType> : public CanProxyBase<ExtVectorType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getElementType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getNumElements)
+};
+
+template<>
+struct CanProxyAdaptor<FunctionType> : public CanProxyBase<FunctionType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getResultType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(FunctionType::ExtInfo, getExtInfo)
+};
+
+template<>
+struct CanProxyAdaptor<FunctionNoProtoType>
+ : public CanProxyBase<FunctionNoProtoType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getResultType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(FunctionType::ExtInfo, getExtInfo)
+};
+
+template<>
+struct CanProxyAdaptor<FunctionProtoType>
+ : public CanProxyBase<FunctionProtoType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getResultType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(FunctionType::ExtInfo, getExtInfo)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getNumArgs)
+ CanQualType getArgType(unsigned i) const {
+ return CanQualType::CreateUnsafe(this->getTypePtr()->getArgType(i));
+ }
+
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isVariadic)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getTypeQuals)
+
+ typedef CanTypeIterator<FunctionProtoType::arg_type_iterator>
+ arg_type_iterator;
+
+ arg_type_iterator arg_type_begin() const {
+ return arg_type_iterator(this->getTypePtr()->arg_type_begin());
+ }
+
+ arg_type_iterator arg_type_end() const {
+ return arg_type_iterator(this->getTypePtr()->arg_type_end());
+ }
+
+ // Note: canonical function types never have exception specifications
+};
+
+template<>
+struct CanProxyAdaptor<TypeOfType> : public CanProxyBase<TypeOfType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getUnderlyingType)
+};
+
+template<>
+struct CanProxyAdaptor<DecltypeType> : public CanProxyBase<DecltypeType> {
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(Expr *, getUnderlyingExpr)
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getUnderlyingType)
+};
+
+template <>
+struct CanProxyAdaptor<UnaryTransformType>
+ : public CanProxyBase<UnaryTransformType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getBaseType)
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getUnderlyingType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(UnaryTransformType::UTTKind, getUTTKind)
+};
+
+template<>
+struct CanProxyAdaptor<TagType> : public CanProxyBase<TagType> {
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(TagDecl *, getDecl)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+};
+
+template<>
+struct CanProxyAdaptor<RecordType> : public CanProxyBase<RecordType> {
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(RecordDecl *, getDecl)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, hasConstFields)
+};
+
+template<>
+struct CanProxyAdaptor<EnumType> : public CanProxyBase<EnumType> {
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(EnumDecl *, getDecl)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isBeingDefined)
+};
+
+template<>
+struct CanProxyAdaptor<TemplateTypeParmType>
+ : public CanProxyBase<TemplateTypeParmType> {
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getDepth)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getIndex)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isParameterPack)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(TemplateTypeParmDecl *, getDecl)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(IdentifierInfo *, getIdentifier)
+};
+
+template<>
+struct CanProxyAdaptor<ObjCObjectType>
+ : public CanProxyBase<ObjCObjectType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getBaseType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(const ObjCInterfaceDecl *,
+ getInterface)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjCUnqualifiedId)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjCUnqualifiedClass)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjCQualifiedId)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjCQualifiedClass)
+
+ typedef ObjCObjectPointerType::qual_iterator qual_iterator;
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(qual_iterator, qual_begin)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(qual_iterator, qual_end)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, qual_empty)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getNumProtocols)
+};
+
+template<>
+struct CanProxyAdaptor<ObjCObjectPointerType>
+ : public CanProxyBase<ObjCObjectPointerType> {
+ LLVM_CLANG_CANPROXY_TYPE_ACCESSOR(getPointeeType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(const ObjCInterfaceType *,
+ getInterfaceType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjCIdType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjCClassType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjCQualifiedIdType)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, isObjCQualifiedClassType)
+
+ typedef ObjCObjectPointerType::qual_iterator qual_iterator;
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(qual_iterator, qual_begin)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(qual_iterator, qual_end)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(bool, qual_empty)
+ LLVM_CLANG_CANPROXY_SIMPLE_ACCESSOR(unsigned, getNumProtocols)
+};
+
+//----------------------------------------------------------------------------//
+// Method and function definitions
+//----------------------------------------------------------------------------//
+template<typename T>
+inline CanQual<T> CanQual<T>::getUnqualifiedType() const {
+ return CanQual<T>::CreateUnsafe(Stored.getLocalUnqualifiedType());
+}
+
+template<typename T>
+inline CanQual<Type> CanQual<T>::getNonReferenceType() const {
+ if (CanQual<ReferenceType> RefType = getAs<ReferenceType>())
+ return RefType->getPointeeType();
+ else
+ return *this;
+}
+
+template<typename T>
+CanQual<T> CanQual<T>::getFromOpaquePtr(void *Ptr) {
+ CanQual<T> Result;
+ Result.Stored = QualType::getFromOpaquePtr(Ptr);
+ assert((!Result || Result.Stored.getAsOpaquePtr() == (void*)-1 ||
+ Result.Stored.isCanonical()) && "Type is not canonical!");
+ return Result;
+}
+
+template<typename T>
+CanQual<T> CanQual<T>::CreateUnsafe(QualType Other) {
+ assert((Other.isNull() || Other.isCanonical()) && "Type is not canonical!");
+ assert((Other.isNull() || isa<T>(Other.getTypePtr())) &&
+ "Dynamic type does not meet the static type's requires");
+ CanQual<T> Result;
+ Result.Stored = Other;
+ return Result;
+}
+
+template<typename T>
+template<typename U>
+CanProxy<U> CanQual<T>::getAs() const {
+ if (Stored.isNull())
+ return CanProxy<U>();
+
+ if (isa<U>(Stored.getTypePtr()))
+ return CanQual<U>::CreateUnsafe(Stored);
+
+ return CanProxy<U>();
+}
+
+template<typename T>
+template<typename U>
+CanProxy<U> CanQual<T>::castAs() const {
+ assert(!Stored.isNull() && isa<U>(Stored.getTypePtr()));
+ return CanQual<U>::CreateUnsafe(Stored);
+}
+
+template<typename T>
+CanProxy<T> CanQual<T>::operator->() const {
+ return CanProxy<T>(*this);
+}
+
+template<typename InputIterator>
+typename CanTypeIterator<InputIterator>::pointer
+CanTypeIterator<InputIterator>::operator->() const {
+ return CanProxy<Type>(*this);
+}
+
+}
+
+
+#endif // LLVM_CLANG_AST_CANONICAL_TYPE_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h b/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h
new file mode 100644
index 0000000..5be3582
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/CharUnits.h
@@ -0,0 +1,217 @@
+//===--- CharUnits.h - Character units for sizes and offsets ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CharUnits class
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_CHARUNITS_H
+#define LLVM_CLANG_AST_CHARUNITS_H
+
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+
+namespace clang {
+
+ /// CharUnits - This is an opaque type for sizes expressed in character units.
+ /// Instances of this type represent a quantity as a multiple of the size
+ /// of the standard C type, char, on the target architecture. As an opaque
+ /// type, CharUnits protects you from accidentally combining operations on
+ /// quantities in bit units and character units.
+ ///
+ /// It should be noted that characters and bytes are distinct concepts. Bytes
+ /// refer to addressable units of data storage on the target machine, and
+ /// characters are members of a set of elements used for the organization,
+ /// control, or representation of data. According to C99, bytes are allowed
+ /// to exceed characters in size, although currently, clang only supports
+ /// architectures where the two are the same size.
+ ///
+ /// For portability, never assume that a target character is 8 bits wide. Use
+ /// CharUnit values wherever you calculate sizes, offsets, or alignments
+ /// in character units.
+ class CharUnits {
+ public:
+ typedef int64_t QuantityType;
+
+ private:
+ QuantityType Quantity;
+
+ explicit CharUnits(QuantityType C) : Quantity(C) {}
+
+ public:
+
+ /// CharUnits - A default constructor.
+ CharUnits() : Quantity(0) {}
+
+ /// Zero - Construct a CharUnits quantity of zero.
+ static CharUnits Zero() {
+ return CharUnits(0);
+ }
+
+ /// One - Construct a CharUnits quantity of one.
+ static CharUnits One() {
+ return CharUnits(1);
+ }
+
+ /// fromQuantity - Construct a CharUnits quantity from a raw integer type.
+ static CharUnits fromQuantity(QuantityType Quantity) {
+ return CharUnits(Quantity);
+ }
+
+ // Compound assignment.
+ CharUnits& operator+= (const CharUnits &Other) {
+ Quantity += Other.Quantity;
+ return *this;
+ }
+ CharUnits& operator++ () {
+ ++Quantity;
+ return *this;
+ }
+ CharUnits operator++ (int) {
+ return CharUnits(Quantity++);
+ }
+ CharUnits& operator-= (const CharUnits &Other) {
+ Quantity -= Other.Quantity;
+ return *this;
+ }
+ CharUnits& operator-- () {
+ --Quantity;
+ return *this;
+ }
+ CharUnits operator-- (int) {
+ return CharUnits(Quantity--);
+ }
+
+ // Comparison operators.
+ bool operator== (const CharUnits &Other) const {
+ return Quantity == Other.Quantity;
+ }
+ bool operator!= (const CharUnits &Other) const {
+ return Quantity != Other.Quantity;
+ }
+
+ // Relational operators.
+ bool operator< (const CharUnits &Other) const {
+ return Quantity < Other.Quantity;
+ }
+ bool operator<= (const CharUnits &Other) const {
+ return Quantity <= Other.Quantity;
+ }
+ bool operator> (const CharUnits &Other) const {
+ return Quantity > Other.Quantity;
+ }
+ bool operator>= (const CharUnits &Other) const {
+ return Quantity >= Other.Quantity;
+ }
+
+ // Other predicates.
+
+ /// isZero - Test whether the quantity equals zero.
+ bool isZero() const { return Quantity == 0; }
+
+ /// isOne - Test whether the quantity equals one.
+ bool isOne() const { return Quantity == 1; }
+
+ /// isPositive - Test whether the quantity is greater than zero.
+ bool isPositive() const { return Quantity > 0; }
+
+ /// isNegative - Test whether the quantity is less than zero.
+ bool isNegative() const { return Quantity < 0; }
+
+ /// isPowerOfTwo - Test whether the quantity is a power of two.
+ /// Zero is not a power of two.
+ bool isPowerOfTwo() const {
+ return (Quantity & -Quantity) == Quantity;
+ }
+
+ // Arithmetic operators.
+ CharUnits operator* (QuantityType N) const {
+ return CharUnits(Quantity * N);
+ }
+ CharUnits operator/ (QuantityType N) const {
+ return CharUnits(Quantity / N);
+ }
+ QuantityType operator/ (const CharUnits &Other) const {
+ return Quantity / Other.Quantity;
+ }
+ CharUnits operator% (QuantityType N) const {
+ return CharUnits(Quantity % N);
+ }
+ QuantityType operator% (const CharUnits &Other) const {
+ return Quantity % Other.Quantity;
+ }
+ CharUnits operator+ (const CharUnits &Other) const {
+ return CharUnits(Quantity + Other.Quantity);
+ }
+ CharUnits operator- (const CharUnits &Other) const {
+ return CharUnits(Quantity - Other.Quantity);
+ }
+ CharUnits operator- () const {
+ return CharUnits(-Quantity);
+ }
+
+
+ // Conversions.
+
+ /// getQuantity - Get the raw integer representation of this quantity.
+ QuantityType getQuantity() const { return Quantity; }
+
+ /// RoundUpToAlignment - Returns the next integer (mod 2**64) that is
+ /// greater than or equal to this quantity and is a multiple of \arg
+ /// Align. Align must be non-zero.
+ CharUnits RoundUpToAlignment(const CharUnits &Align) {
+ return CharUnits(llvm::RoundUpToAlignment(Quantity,
+ Align.Quantity));
+ }
+
+
+ }; // class CharUnit
+} // namespace clang
+
+inline clang::CharUnits operator* (clang::CharUnits::QuantityType Scale,
+ const clang::CharUnits &CU) {
+ return CU * Scale;
+}
+
+namespace llvm {
+
+template<> struct DenseMapInfo<clang::CharUnits> {
+ static clang::CharUnits getEmptyKey() {
+ clang::CharUnits::QuantityType Quantity =
+ DenseMapInfo<clang::CharUnits::QuantityType>::getEmptyKey();
+
+ return clang::CharUnits::fromQuantity(Quantity);
+ }
+
+ static clang::CharUnits getTombstoneKey() {
+ clang::CharUnits::QuantityType Quantity =
+ DenseMapInfo<clang::CharUnits::QuantityType>::getTombstoneKey();
+
+ return clang::CharUnits::fromQuantity(Quantity);
+ }
+
+ static unsigned getHashValue(const clang::CharUnits &CU) {
+ clang::CharUnits::QuantityType Quantity = CU.getQuantity();
+ return DenseMapInfo<clang::CharUnits::QuantityType>::getHashValue(Quantity);
+ }
+
+ static bool isEqual(const clang::CharUnits &LHS,
+ const clang::CharUnits &RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <> struct isPodLike<clang::CharUnits> {
+ static const bool value = true;
+};
+
+} // end namespace llvm
+
+#endif // LLVM_CLANG_AST_CHARUNITS_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Decl.h b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
new file mode 100644
index 0000000..11696db
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/Decl.h
@@ -0,0 +1,3343 @@
+//===--- Decl.h - Classes for representing declarations ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Decl subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECL_H
+#define LLVM_CLANG_AST_DECL_H
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Redeclarable.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/Basic/Linkage.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+class CXXTemporary;
+class Expr;
+class FunctionTemplateDecl;
+class Stmt;
+class CompoundStmt;
+class StringLiteral;
+class NestedNameSpecifier;
+class TemplateParameterList;
+class TemplateArgumentList;
+struct ASTTemplateArgumentListInfo;
+class MemberSpecializationInfo;
+class FunctionTemplateSpecializationInfo;
+class DependentFunctionTemplateSpecializationInfo;
+class TypeLoc;
+class UnresolvedSetImpl;
+class LabelStmt;
+class Module;
+
+/// \brief A container of type source information.
+///
+/// A client can read the relevant info using TypeLoc wrappers, e.g:
+/// @code
+/// TypeLoc TL = TypeSourceInfo->getTypeLoc();
+/// if (PointerLoc *PL = dyn_cast<PointerLoc>(&TL))
+/// PL->getStarLoc().print(OS, SrcMgr);
+/// @endcode
+///
+class TypeSourceInfo {
+ QualType Ty;
+ // Contains a memory block after the class, used for type source information,
+ // allocated by ASTContext.
+ friend class ASTContext;
+ TypeSourceInfo(QualType ty) : Ty(ty) { }
+public:
+ /// \brief Return the type wrapped by this type source info.
+ QualType getType() const { return Ty; }
+
+ /// \brief Return the TypeLoc wrapper for the type source info.
+ TypeLoc getTypeLoc() const; // implemented in TypeLoc.h
+};
+
+/// TranslationUnitDecl - The top declaration context.
+class TranslationUnitDecl : public Decl, public DeclContext {
+ virtual void anchor();
+ ASTContext &Ctx;
+
+ /// The (most recently entered) anonymous namespace for this
+ /// translation unit, if one has been created.
+ NamespaceDecl *AnonymousNamespace;
+
+ explicit TranslationUnitDecl(ASTContext &ctx)
+ : Decl(TranslationUnit, 0, SourceLocation()),
+ DeclContext(TranslationUnit),
+ Ctx(ctx), AnonymousNamespace(0) {}
+public:
+ ASTContext &getASTContext() const { return Ctx; }
+
+ NamespaceDecl *getAnonymousNamespace() const { return AnonymousNamespace; }
+ void setAnonymousNamespace(NamespaceDecl *D) { AnonymousNamespace = D; }
+
+ static TranslationUnitDecl *Create(ASTContext &C);
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TranslationUnitDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == TranslationUnit; }
+ static DeclContext *castToDeclContext(const TranslationUnitDecl *D) {
+ return static_cast<DeclContext *>(const_cast<TranslationUnitDecl*>(D));
+ }
+ static TranslationUnitDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<TranslationUnitDecl *>(const_cast<DeclContext*>(DC));
+ }
+};
+
+/// NamedDecl - This represents a decl with a name. Many decls have names such
+/// as ObjCMethodDecl, but not @class, etc.
+class NamedDecl : public Decl {
+ virtual void anchor();
+ /// Name - The name of this declaration, which is typically a normal
+ /// identifier but may also be a special kind of name (C++
+ /// constructor, Objective-C selector, etc.)
+ DeclarationName Name;
+
+private:
+ NamedDecl *getUnderlyingDeclImpl();
+
+protected:
+ NamedDecl(Kind DK, DeclContext *DC, SourceLocation L, DeclarationName N)
+ : Decl(DK, DC, L), Name(N) { }
+
+public:
+ /// getIdentifier - Get the identifier that names this declaration,
+ /// if there is one. This will return NULL if this declaration has
+ /// no name (e.g., for an unnamed class) or if the name is a special
+ /// name (C++ constructor, Objective-C selector, etc.).
+ IdentifierInfo *getIdentifier() const { return Name.getAsIdentifierInfo(); }
+
+ /// getName - Get the name of identifier for this declaration as a StringRef.
+ /// This requires that the declaration have a name and that it be a simple
+ /// identifier.
+ StringRef getName() const {
+ assert(Name.isIdentifier() && "Name is not a simple identifier");
+ return getIdentifier() ? getIdentifier()->getName() : "";
+ }
+
+ /// getNameAsString - Get a human-readable name for the declaration, even if
+ /// it is one of the special kinds of names (C++ constructor, Objective-C
+ /// selector, etc). Creating this name requires expensive string
+ /// manipulation, so it should be called only when performance doesn't matter.
+ /// For simple declarations, getNameAsCString() should suffice.
+ //
+ // FIXME: This function should be renamed to indicate that it is not just an
+ // alternate form of getName(), and clients should move as appropriate.
+ //
+ // FIXME: Deprecated, move clients to getName().
+ std::string getNameAsString() const { return Name.getAsString(); }
+
+ void printName(raw_ostream &os) const { return Name.printName(os); }
+
+ /// getDeclName - Get the actual, stored name of the declaration,
+ /// which may be a special name.
+ DeclarationName getDeclName() const { return Name; }
+
+ /// \brief Set the name of this declaration.
+ void setDeclName(DeclarationName N) { Name = N; }
+
+ /// getQualifiedNameAsString - Returns human-readable qualified name for
+ /// declaration, like A::B::i, for i being member of namespace A::B.
+ /// If declaration is not member of context which can be named (record,
+ /// namespace), it will return same result as getNameAsString().
+ /// Creating this name is expensive, so it should be called only when
+ /// performance doesn't matter.
+ std::string getQualifiedNameAsString() const;
+ std::string getQualifiedNameAsString(const PrintingPolicy &Policy) const;
+
+ /// getNameForDiagnostic - Appends a human-readable name for this
+ /// declaration into the given string.
+ ///
+ /// This is the method invoked by Sema when displaying a NamedDecl
+ /// in a diagnostic. It does not necessarily produce the same
+ /// result as getNameAsString(); for example, class template
+ /// specializations are printed with their template arguments.
+ ///
+ /// TODO: use an API that doesn't require so many temporary strings
+ virtual void getNameForDiagnostic(std::string &S,
+ const PrintingPolicy &Policy,
+ bool Qualified) const {
+ if (Qualified)
+ S += getQualifiedNameAsString(Policy);
+ else
+ S += getNameAsString();
+ }
+
+ /// declarationReplaces - Determine whether this declaration, if
+ /// known to be well-formed within its context, will replace the
+ /// declaration OldD if introduced into scope. A declaration will
+ /// replace another declaration if, for example, it is a
+ /// redeclaration of the same variable or function, but not if it is
+ /// a declaration of a different kind (function vs. class) or an
+ /// overloaded function.
+ bool declarationReplaces(NamedDecl *OldD) const;
+
+ /// \brief Determine whether this declaration has linkage.
+ bool hasLinkage() const;
+
+ using Decl::isModulePrivate;
+ using Decl::setModulePrivate;
+
+ /// \brief Determine whether this declaration is hidden from name lookup.
+ bool isHidden() const { return Hidden; }
+
+ /// \brief Determine whether this declaration is a C++ class member.
+ bool isCXXClassMember() const {
+ const DeclContext *DC = getDeclContext();
+
+ // C++0x [class.mem]p1:
+ // The enumerators of an unscoped enumeration defined in
+ // the class are members of the class.
+ // FIXME: support C++0x scoped enumerations.
+ if (isa<EnumDecl>(DC))
+ DC = DC->getParent();
+
+ return DC->isRecord();
+ }
+
+ /// \brief Determine whether the given declaration is an instance member of
+ /// a C++ class.
+ bool isCXXInstanceMember() const;
+
+ class LinkageInfo {
+ Linkage linkage_;
+ Visibility visibility_;
+ bool explicit_;
+
+ public:
+ LinkageInfo() : linkage_(ExternalLinkage), visibility_(DefaultVisibility),
+ explicit_(false) {}
+ LinkageInfo(Linkage L, Visibility V, bool E)
+ : linkage_(L), visibility_(V), explicit_(E) {}
+
+ static LinkageInfo external() {
+ return LinkageInfo();
+ }
+ static LinkageInfo internal() {
+ return LinkageInfo(InternalLinkage, DefaultVisibility, false);
+ }
+ static LinkageInfo uniqueExternal() {
+ return LinkageInfo(UniqueExternalLinkage, DefaultVisibility, false);
+ }
+ static LinkageInfo none() {
+ return LinkageInfo(NoLinkage, DefaultVisibility, false);
+ }
+
+ Linkage linkage() const { return linkage_; }
+ Visibility visibility() const { return visibility_; }
+ bool visibilityExplicit() const { return explicit_; }
+
+ void setLinkage(Linkage L) { linkage_ = L; }
+ void setVisibility(Visibility V) { visibility_ = V; }
+ void setVisibility(Visibility V, bool E) { visibility_ = V; explicit_ = E; }
+ void setVisibility(LinkageInfo Other) {
+ setVisibility(Other.visibility(), Other.visibilityExplicit());
+ }
+
+ void mergeLinkage(Linkage L) {
+ setLinkage(minLinkage(linkage(), L));
+ }
+ void mergeLinkage(LinkageInfo Other) {
+ mergeLinkage(Other.linkage());
+ }
+
+ // Merge the visibility V giving preference to explicit ones.
+ // This is used, for example, when merging the visibility of a class
+ // down to one of its members. If the member has no explicit visibility,
+ // the class visibility wins.
+ void mergeVisibility(Visibility V, bool E = false) {
+ // If one has explicit visibility and the other doesn't, keep the
+ // explicit one.
+ if (visibilityExplicit() && !E)
+ return;
+ if (!visibilityExplicit() && E)
+ setVisibility(V, E);
+
+ // If both are explicit or both are implicit, keep the minimum.
+ setVisibility(minVisibility(visibility(), V), visibilityExplicit() || E);
+ }
+ // Merge the visibility V, keeping the most restrictive one.
+ // This is used for cases like merging the visibility of a template
+ // argument to an instantiation. If we already have a hidden class,
+ // no argument should give it default visibility.
+ void mergeVisibilityWithMin(Visibility V, bool E = false) {
+ // Never increase the visibility
+ if (visibility() < V)
+ return;
+
+ // If this visibility is explicit, keep it.
+ if (visibilityExplicit() && !E)
+ return;
+ setVisibility(V, E);
+ }
+ void mergeVisibility(LinkageInfo Other) {
+ mergeVisibility(Other.visibility(), Other.visibilityExplicit());
+ }
+ void mergeVisibilityWithMin(LinkageInfo Other) {
+ mergeVisibilityWithMin(Other.visibility(), Other.visibilityExplicit());
+ }
+
+ void merge(LinkageInfo Other) {
+ mergeLinkage(Other);
+ mergeVisibility(Other);
+ }
+ void mergeWithMin(LinkageInfo Other) {
+ mergeLinkage(Other);
+ mergeVisibilityWithMin(Other);
+ }
+
+ friend LinkageInfo merge(LinkageInfo L, LinkageInfo R) {
+ L.merge(R);
+ return L;
+ }
+ };
+
+ /// \brief Determine what kind of linkage this entity has.
+ Linkage getLinkage() const;
+
+ /// \brief Determines the visibility of this entity.
+ Visibility getVisibility() const {
+ return getLinkageAndVisibility().visibility();
+ }
+
+ /// \brief Determines the linkage and visibility of this entity.
+ LinkageInfo getLinkageAndVisibility() const;
+
+ /// \brief If visibility was explicitly specified for this
+ /// declaration, return that visibility.
+ llvm::Optional<Visibility> getExplicitVisibility() const;
+
+ /// \brief Clear the linkage cache in response to a change
+ /// to the declaration.
+ void ClearLinkageCache();
+
+ /// \brief Looks through UsingDecls and ObjCCompatibleAliasDecls for
+ /// the underlying named decl.
+ NamedDecl *getUnderlyingDecl() {
+ // Fast-path the common case.
+ if (this->getKind() != UsingShadow &&
+ this->getKind() != ObjCCompatibleAlias)
+ return this;
+
+ return getUnderlyingDeclImpl();
+ }
+ const NamedDecl *getUnderlyingDecl() const {
+ return const_cast<NamedDecl*>(this)->getUnderlyingDecl();
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const NamedDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K >= firstNamed && K <= lastNamed; }
+};
+
+inline raw_ostream &operator<<(raw_ostream &OS, const NamedDecl &ND) {
+ ND.printName(OS);
+ return OS;
+}
+
+/// LabelDecl - Represents the declaration of a label. Labels also have a
+/// corresponding LabelStmt, which indicates the position that the label was
+/// defined at. For normal labels, the location of the decl is the same as the
+/// location of the statement. For GNU local labels (__label__), the decl
+/// location is where the __label__ is.
+class LabelDecl : public NamedDecl {
+ virtual void anchor();
+ LabelStmt *TheStmt;
+ /// LocStart - For normal labels, this is the same as the main declaration
+ /// label, i.e., the location of the identifier; for GNU local labels,
+ /// this is the location of the __label__ keyword.
+ SourceLocation LocStart;
+
+ LabelDecl(DeclContext *DC, SourceLocation IdentL, IdentifierInfo *II,
+ LabelStmt *S, SourceLocation StartL)
+ : NamedDecl(Label, DC, IdentL, II), TheStmt(S), LocStart(StartL) {}
+
+public:
+ static LabelDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdentL, IdentifierInfo *II);
+ static LabelDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdentL, IdentifierInfo *II,
+ SourceLocation GnuLabelL);
+ static LabelDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ LabelStmt *getStmt() const { return TheStmt; }
+ void setStmt(LabelStmt *T) { TheStmt = T; }
+
+ bool isGnuLocal() const { return LocStart != getLocation(); }
+ void setLocStart(SourceLocation L) { LocStart = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(LocStart, getLocation());
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const LabelDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Label; }
+};
+
+/// NamespaceDecl - Represent a C++ namespace.
+class NamespaceDecl : public NamedDecl, public DeclContext,
+ public Redeclarable<NamespaceDecl>
+{
+ virtual void anchor();
+
+ /// LocStart - The starting location of the source range, pointing
+ /// to either the namespace or the inline keyword.
+ SourceLocation LocStart;
+ /// RBraceLoc - The ending location of the source range.
+ SourceLocation RBraceLoc;
+
+ /// \brief A pointer to either the anonymous namespace that lives just inside
+ /// this namespace or to the first namespace in the chain (the latter case
+ /// only when this is not the first in the chain), along with a
+ /// boolean value indicating whether this is an inline namespace.
+ llvm::PointerIntPair<NamespaceDecl *, 1, bool> AnonOrFirstNamespaceAndInline;
+
+ NamespaceDecl(DeclContext *DC, bool Inline, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl);
+
+ typedef Redeclarable<NamespaceDecl> redeclarable_base;
+ virtual NamespaceDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
+ }
+ virtual NamespaceDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual NamespaceDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
+public:
+ static NamespaceDecl *Create(ASTContext &C, DeclContext *DC,
+ bool Inline, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl);
+
+ static NamespaceDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ /// \brief Returns true if this is an anonymous namespace declaration.
+ ///
+ /// For example:
+ /// \code
+ /// namespace {
+ /// ...
+ /// };
+ /// \endcode
+ /// q.v. C++ [namespace.unnamed]
+ bool isAnonymousNamespace() const {
+ return !getIdentifier();
+ }
+
+ /// \brief Returns true if this is an inline namespace declaration.
+ bool isInline() const {
+ return AnonOrFirstNamespaceAndInline.getInt();
+ }
+
+ /// \brief Set whether this is an inline namespace declaration.
+ void setInline(bool Inline) {
+ AnonOrFirstNamespaceAndInline.setInt(Inline);
+ }
+
+ /// \brief Get the original (first) namespace declaration.
+ NamespaceDecl *getOriginalNamespace() {
+ if (isFirstDeclaration())
+ return this;
+
+ return AnonOrFirstNamespaceAndInline.getPointer();
+ }
+
+ /// \brief Get the original (first) namespace declaration.
+ const NamespaceDecl *getOriginalNamespace() const {
+ if (isFirstDeclaration())
+ return this;
+
+ return AnonOrFirstNamespaceAndInline.getPointer();
+ }
+
+ /// \brief Return true if this declaration is an original (first) declaration
+ /// of the namespace. This is false for non-original (subsequent) namespace
+ /// declarations and anonymous namespaces.
+ bool isOriginalNamespace() const {
+ return isFirstDeclaration();
+ }
+
+ /// \brief Retrieve the anonymous namespace nested inside this namespace,
+ /// if any.
+ NamespaceDecl *getAnonymousNamespace() const {
+ return getOriginalNamespace()->AnonOrFirstNamespaceAndInline.getPointer();
+ }
+
+ void setAnonymousNamespace(NamespaceDecl *D) {
+ getOriginalNamespace()->AnonOrFirstNamespaceAndInline.setPointer(D);
+ }
+
+ /// Retrieves the canonical declaration of this namespace.
+ NamespaceDecl *getCanonicalDecl() {
+ return getOriginalNamespace();
+ }
+ const NamespaceDecl *getCanonicalDecl() const {
+ return getOriginalNamespace();
+ }
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(LocStart, RBraceLoc);
+ }
+
+ SourceLocation getLocStart() const LLVM_READONLY { return LocStart; }
+ SourceLocation getRBraceLoc() const { return RBraceLoc; }
+ void setLocStart(SourceLocation L) { LocStart = L; }
+ void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const NamespaceDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Namespace; }
+ static DeclContext *castToDeclContext(const NamespaceDecl *D) {
+ return static_cast<DeclContext *>(const_cast<NamespaceDecl*>(D));
+ }
+ static NamespaceDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<NamespaceDecl *>(const_cast<DeclContext*>(DC));
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// ValueDecl - Represent the declaration of a variable (in which case it is
+/// an lvalue) a function (in which case it is a function designator) or
+/// an enum constant.
+class ValueDecl : public NamedDecl {
+ virtual void anchor();
+ QualType DeclType;
+
+protected:
+ ValueDecl(Kind DK, DeclContext *DC, SourceLocation L,
+ DeclarationName N, QualType T)
+ : NamedDecl(DK, DC, L, N), DeclType(T) {}
+public:
+ QualType getType() const { return DeclType; }
+ void setType(QualType newType) { DeclType = newType; }
+
+ /// \brief Determine whether this symbol is weakly-imported,
+ /// or declared with the weak or weak-ref attr.
+ bool isWeak() const {
+ return hasAttr<WeakAttr>() || hasAttr<WeakRefAttr>() || isWeakImported();
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ValueDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K >= firstValue && K <= lastValue; }
+};
+
+/// QualifierInfo - A struct with extended info about a syntactic
+/// name qualifier, to be used for the case of out-of-line declarations.
+struct QualifierInfo {
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// NumTemplParamLists - The number of "outer" template parameter lists.
+ /// The count includes all of the template parameter lists that were matched
+ /// against the template-ids occurring into the NNS and possibly (in the
+ /// case of an explicit specialization) a final "template <>".
+ unsigned NumTemplParamLists;
+
+ /// TemplParamLists - A new-allocated array of size NumTemplParamLists,
+ /// containing pointers to the "outer" template parameter lists.
+ /// It includes all of the template parameter lists that were matched
+ /// against the template-ids occurring into the NNS and possibly (in the
+ /// case of an explicit specialization) a final "template <>".
+ TemplateParameterList** TemplParamLists;
+
+ /// Default constructor.
+ QualifierInfo() : QualifierLoc(), NumTemplParamLists(0), TemplParamLists(0) {}
+
+ /// setTemplateParameterListsInfo - Sets info about "outer" template
+ /// parameter lists.
+ void setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists);
+
+private:
+ // Copy constructor and copy assignment are disabled.
+ QualifierInfo(const QualifierInfo&);
+ QualifierInfo& operator=(const QualifierInfo&);
+};
+
+/// \brief Represents a ValueDecl that came out of a declarator.
+/// Contains type source information through TypeSourceInfo.
+class DeclaratorDecl : public ValueDecl {
+ // A struct representing both a TInfo and a syntactic qualifier,
+ // to be used for the (uncommon) case of out-of-line declarations.
+ struct ExtInfo : public QualifierInfo {
+ TypeSourceInfo *TInfo;
+ };
+
+ llvm::PointerUnion<TypeSourceInfo*, ExtInfo*> DeclInfo;
+
+ /// InnerLocStart - The start of the source range for this declaration,
+ /// ignoring outer template declarations.
+ SourceLocation InnerLocStart;
+
+ bool hasExtInfo() const { return DeclInfo.is<ExtInfo*>(); }
+ ExtInfo *getExtInfo() { return DeclInfo.get<ExtInfo*>(); }
+ const ExtInfo *getExtInfo() const { return DeclInfo.get<ExtInfo*>(); }
+
+protected:
+ DeclaratorDecl(Kind DK, DeclContext *DC, SourceLocation L,
+ DeclarationName N, QualType T, TypeSourceInfo *TInfo,
+ SourceLocation StartL)
+ : ValueDecl(DK, DC, L, N, T), DeclInfo(TInfo), InnerLocStart(StartL) {
+ }
+
+public:
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return hasExtInfo()
+ ? getExtInfo()->TInfo
+ : DeclInfo.get<TypeSourceInfo*>();
+ }
+ void setTypeSourceInfo(TypeSourceInfo *TI) {
+ if (hasExtInfo())
+ getExtInfo()->TInfo = TI;
+ else
+ DeclInfo = TI;
+ }
+
+ /// getInnerLocStart - Return SourceLocation representing start of source
+ /// range ignoring outer template declarations.
+ SourceLocation getInnerLocStart() const { return InnerLocStart; }
+ void setInnerLocStart(SourceLocation L) { InnerLocStart = L; }
+
+ /// getOuterLocStart - Return SourceLocation representing start of source
+ /// range taking into account any outer template declarations.
+ SourceLocation getOuterLocStart() const;
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return getOuterLocStart();
+ }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the name of this
+ /// declaration, if it was present in the source.
+ NestedNameSpecifier *getQualifier() const {
+ return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
+ : 0;
+ }
+
+ /// \brief Retrieve the nested-name-specifier (with source-location
+ /// information) that qualifies the name of this declaration, if it was
+ /// present in the source.
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ return hasExtInfo() ? getExtInfo()->QualifierLoc
+ : NestedNameSpecifierLoc();
+ }
+
+ void setQualifierInfo(NestedNameSpecifierLoc QualifierLoc);
+
+ unsigned getNumTemplateParameterLists() const {
+ return hasExtInfo() ? getExtInfo()->NumTemplParamLists : 0;
+ }
+ TemplateParameterList *getTemplateParameterList(unsigned index) const {
+ assert(index < getNumTemplateParameterLists());
+ return getExtInfo()->TemplParamLists[index];
+ }
+ void setTemplateParameterListsInfo(ASTContext &Context, unsigned NumTPLists,
+ TemplateParameterList **TPLists);
+
+ SourceLocation getTypeSpecStartLoc() const;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const DeclaratorDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstDeclarator && K <= lastDeclarator;
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// \brief Structure used to store a statement, the constant value to
+/// which it was evaluated (if any), and whether or not the statement
+/// is an integral constant expression (if known).
+struct EvaluatedStmt {
+ EvaluatedStmt() : WasEvaluated(false), IsEvaluating(false), CheckedICE(false),
+ CheckingICE(false), IsICE(false) { }
+
+ /// \brief Whether this statement was already evaluated.
+ bool WasEvaluated : 1;
+
+ /// \brief Whether this statement is being evaluated.
+ bool IsEvaluating : 1;
+
+ /// \brief Whether we already checked whether this statement was an
+ /// integral constant expression.
+ bool CheckedICE : 1;
+
+ /// \brief Whether we are checking whether this statement is an
+ /// integral constant expression.
+ bool CheckingICE : 1;
+
+ /// \brief Whether this statement is an integral constant expression,
+ /// or in C++11, whether the statement is a constant expression. Only
+ /// valid if CheckedICE is true.
+ bool IsICE : 1;
+
+ Stmt *Value;
+ APValue Evaluated;
+};
+
+/// VarDecl - An instance of this class is created to represent a variable
+/// declaration or definition.
+class VarDecl : public DeclaratorDecl, public Redeclarable<VarDecl> {
+public:
+ typedef clang::StorageClass StorageClass;
+
+ /// getStorageClassSpecifierString - Return the string used to
+ /// specify the storage class \arg SC.
+ ///
+ /// It is illegal to call this function with SC == None.
+ static const char *getStorageClassSpecifierString(StorageClass SC);
+
+ /// \brief Initialization styles.
+ enum InitializationStyle {
+ CInit, ///< C-style initialization with assignment
+ CallInit, ///< Call-style initialization (C++98)
+ ListInit ///< Direct list-initialization (C++11)
+ };
+
+protected:
+ /// \brief Placeholder type used in Init to denote an unparsed C++ default
+ /// argument.
+ struct UnparsedDefaultArgument;
+
+ /// \brief Placeholder type used in Init to denote an uninstantiated C++
+ /// default argument.
+ struct UninstantiatedDefaultArgument;
+
+ typedef llvm::PointerUnion4<Stmt *, EvaluatedStmt *,
+ UnparsedDefaultArgument *,
+ UninstantiatedDefaultArgument *> InitType;
+
+ /// \brief The initializer for this variable or, for a ParmVarDecl, the
+ /// C++ default argument.
+ mutable InitType Init;
+
+private:
+ class VarDeclBitfields {
+ friend class VarDecl;
+ friend class ASTDeclReader;
+
+ unsigned SClass : 3;
+ unsigned SClassAsWritten : 3;
+ unsigned ThreadSpecified : 1;
+ unsigned InitStyle : 2;
+
+ /// \brief Whether this variable is the exception variable in a C++ catch
+ /// or an Objective-C @catch statement.
+ unsigned ExceptionVar : 1;
+
+ /// \brief Whether this local variable could be allocated in the return
+ /// slot of its function, enabling the named return value optimization
+ /// (NRVO).
+ unsigned NRVOVariable : 1;
+
+ /// \brief Whether this variable is the for-range-declaration in a C++0x
+ /// for-range statement.
+ unsigned CXXForRangeDecl : 1;
+
+ /// \brief Whether this variable is an ARC pseudo-__strong
+ /// variable; see isARCPseudoStrong() for details.
+ unsigned ARCPseudoStrong : 1;
+
+ /// \brief Whether this variable is (C++0x) constexpr.
+ unsigned IsConstexpr : 1;
+ };
+ enum { NumVarDeclBits = 14 };
+
+ friend class ASTDeclReader;
+ friend class StmtIteratorBase;
+
+protected:
+ enum { NumParameterIndexBits = 8 };
+
+ class ParmVarDeclBitfields {
+ friend class ParmVarDecl;
+ friend class ASTDeclReader;
+
+ unsigned : NumVarDeclBits;
+
+ /// Whether this parameter inherits a default argument from a
+ /// prior declaration.
+ unsigned HasInheritedDefaultArg : 1;
+
+ /// Whether this parameter undergoes K&R argument promotion.
+ unsigned IsKNRPromoted : 1;
+
+ /// Whether this parameter is an ObjC method parameter or not.
+ unsigned IsObjCMethodParam : 1;
+
+ /// If IsObjCMethodParam, a Decl::ObjCDeclQualifier.
+ /// Otherwise, the number of function parameter scopes enclosing
+ /// the function parameter scope in which this parameter was
+ /// declared.
+ unsigned ScopeDepthOrObjCQuals : 7;
+
+ /// The number of parameters preceding this parameter in the
+ /// function parameter scope in which it was declared.
+ unsigned ParameterIndex : NumParameterIndexBits;
+ };
+
+ union {
+ unsigned AllBits;
+ VarDeclBitfields VarDeclBits;
+ ParmVarDeclBitfields ParmVarDeclBits;
+ };
+
+ VarDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo, StorageClass SC,
+ StorageClass SCAsWritten)
+ : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc), Init() {
+ assert(sizeof(VarDeclBitfields) <= sizeof(unsigned));
+ assert(sizeof(ParmVarDeclBitfields) <= sizeof(unsigned));
+ AllBits = 0;
+ VarDeclBits.SClass = SC;
+ VarDeclBits.SClassAsWritten = SCAsWritten;
+ // Everything else is implicitly initialized to false.
+ }
+
+ typedef Redeclarable<VarDecl> redeclarable_base;
+ virtual VarDecl *getNextRedeclaration() { return RedeclLink.getNext(); }
+ virtual VarDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual VarDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
+public:
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ static VarDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, StorageClass SCAsWritten);
+
+ static VarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ StorageClass getStorageClass() const {
+ return (StorageClass) VarDeclBits.SClass;
+ }
+ StorageClass getStorageClassAsWritten() const {
+ return (StorageClass) VarDeclBits.SClassAsWritten;
+ }
+ void setStorageClass(StorageClass SC);
+ void setStorageClassAsWritten(StorageClass SC) {
+ assert(isLegalForVariable(SC));
+ VarDeclBits.SClassAsWritten = SC;
+ }
+
+ void setThreadSpecified(bool T) { VarDeclBits.ThreadSpecified = T; }
+ bool isThreadSpecified() const {
+ return VarDeclBits.ThreadSpecified;
+ }
+
+ /// hasLocalStorage - Returns true if a variable with function scope
+ /// is a non-static local variable.
+ bool hasLocalStorage() const {
+ if (getStorageClass() == SC_None)
+ return !isFileVarDecl();
+
+ // Return true for: Auto, Register.
+ // Return false for: Extern, Static, PrivateExtern, OpenCLWorkGroupLocal.
+
+ return getStorageClass() >= SC_Auto;
+ }
+
+ /// isStaticLocal - Returns true if a variable with function scope is a
+ /// static local variable.
+ bool isStaticLocal() const {
+ return getStorageClass() == SC_Static && !isFileVarDecl();
+ }
+
+ /// hasExternStorage - Returns true if a variable has extern or
+ /// __private_extern__ storage.
+ bool hasExternalStorage() const {
+ return getStorageClass() == SC_Extern ||
+ getStorageClass() == SC_PrivateExtern;
+ }
+
+ /// hasGlobalStorage - Returns true for all variables that do not
+ /// have local storage. This includs all global variables as well
+ /// as static variables declared within a function.
+ bool hasGlobalStorage() const { return !hasLocalStorage(); }
+
+ /// \brief Determines whether this variable is a variable with
+ /// external, C linkage.
+ bool isExternC() const;
+
+ /// isLocalVarDecl - Returns true for local variable declarations
+ /// other than parameters. Note that this includes static variables
+ /// inside of functions. It also includes variables inside blocks.
+ ///
+ /// void foo() { int x; static int y; extern int z; }
+ ///
+ bool isLocalVarDecl() const {
+ if (getKind() != Decl::Var)
+ return false;
+ if (const DeclContext *DC = getDeclContext())
+ return DC->getRedeclContext()->isFunctionOrMethod();
+ return false;
+ }
+
+ /// isFunctionOrMethodVarDecl - Similar to isLocalVarDecl, but
+ /// excludes variables declared in blocks.
+ bool isFunctionOrMethodVarDecl() const {
+ if (getKind() != Decl::Var)
+ return false;
+ const DeclContext *DC = getDeclContext()->getRedeclContext();
+ return DC->isFunctionOrMethod() && DC->getDeclKind() != Decl::Block;
+ }
+
+ /// \brief Determines whether this is a static data member.
+ ///
+ /// This will only be true in C++, and applies to, e.g., the
+ /// variable 'x' in:
+ /// \code
+ /// struct S {
+ /// static int x;
+ /// };
+ /// \endcode
+ bool isStaticDataMember() const {
+ // If it wasn't static, it would be a FieldDecl.
+ return getKind() != Decl::ParmVar && getDeclContext()->isRecord();
+ }
+
+ virtual VarDecl *getCanonicalDecl();
+ const VarDecl *getCanonicalDecl() const {
+ return const_cast<VarDecl*>(this)->getCanonicalDecl();
+ }
+
+ enum DefinitionKind {
+ DeclarationOnly, ///< This declaration is only a declaration.
+ TentativeDefinition, ///< This declaration is a tentative definition.
+ Definition ///< This declaration is definitely a definition.
+ };
+
+ /// \brief Check whether this declaration is a definition. If this could be
+ /// a tentative definition (in C), don't check whether there's an overriding
+ /// definition.
+ DefinitionKind isThisDeclarationADefinition(ASTContext &) const;
+ DefinitionKind isThisDeclarationADefinition() const {
+ return isThisDeclarationADefinition(getASTContext());
+ }
+
+ /// \brief Check whether this variable is defined in this
+ /// translation unit.
+ DefinitionKind hasDefinition(ASTContext &) const;
+ DefinitionKind hasDefinition() const {
+ return hasDefinition(getASTContext());
+ }
+
+ /// \brief Get the tentative definition that acts as the real definition in
+ /// a TU. Returns null if there is a proper definition available.
+ VarDecl *getActingDefinition();
+ const VarDecl *getActingDefinition() const {
+ return const_cast<VarDecl*>(this)->getActingDefinition();
+ }
+
+ /// \brief Determine whether this is a tentative definition of a
+ /// variable in C.
+ bool isTentativeDefinitionNow() const;
+
+ /// \brief Get the real (not just tentative) definition for this declaration.
+ VarDecl *getDefinition(ASTContext &);
+ const VarDecl *getDefinition(ASTContext &C) const {
+ return const_cast<VarDecl*>(this)->getDefinition(C);
+ }
+ VarDecl *getDefinition() {
+ return getDefinition(getASTContext());
+ }
+ const VarDecl *getDefinition() const {
+ return const_cast<VarDecl*>(this)->getDefinition();
+ }
+
+ /// \brief Determine whether this is or was instantiated from an out-of-line
+ /// definition of a static data member.
+ virtual bool isOutOfLine() const;
+
+ /// \brief If this is a static data member, find its out-of-line definition.
+ VarDecl *getOutOfLineDefinition();
+
+ /// isFileVarDecl - Returns true for file scoped variable declaration.
+ bool isFileVarDecl() const {
+ if (getKind() != Decl::Var)
+ return false;
+
+ if (getDeclContext()->getRedeclContext()->isFileContext())
+ return true;
+
+ if (isStaticDataMember())
+ return true;
+
+ return false;
+ }
+
+ /// getAnyInitializer - Get the initializer for this variable, no matter which
+ /// declaration it is attached to.
+ const Expr *getAnyInitializer() const {
+ const VarDecl *D;
+ return getAnyInitializer(D);
+ }
+
+ /// getAnyInitializer - Get the initializer for this variable, no matter which
+ /// declaration it is attached to. Also get that declaration.
+ const Expr *getAnyInitializer(const VarDecl *&D) const;
+
+ bool hasInit() const {
+ return !Init.isNull() && (Init.is<Stmt *>() || Init.is<EvaluatedStmt *>());
+ }
+ const Expr *getInit() const {
+ if (Init.isNull())
+ return 0;
+
+ const Stmt *S = Init.dyn_cast<Stmt *>();
+ if (!S) {
+ if (EvaluatedStmt *ES = Init.dyn_cast<EvaluatedStmt*>())
+ S = ES->Value;
+ }
+ return (const Expr*) S;
+ }
+ Expr *getInit() {
+ if (Init.isNull())
+ return 0;
+
+ Stmt *S = Init.dyn_cast<Stmt *>();
+ if (!S) {
+ if (EvaluatedStmt *ES = Init.dyn_cast<EvaluatedStmt*>())
+ S = ES->Value;
+ }
+
+ return (Expr*) S;
+ }
+
+ /// \brief Retrieve the address of the initializer expression.
+ Stmt **getInitAddress() {
+ if (EvaluatedStmt *ES = Init.dyn_cast<EvaluatedStmt*>())
+ return &ES->Value;
+
+ // This union hack tip-toes around strict-aliasing rules.
+ union {
+ InitType *InitPtr;
+ Stmt **StmtPtr;
+ };
+
+ InitPtr = &Init;
+ return StmtPtr;
+ }
+
+ void setInit(Expr *I);
+
+ /// \brief Determine whether this variable is a reference that
+ /// extends the lifetime of its temporary initializer.
+ ///
+ /// A reference extends the lifetime of its temporary initializer if
+ /// it's initializer is an rvalue that would normally go out of scope
+ /// at the end of the initializer (a full expression). In such cases,
+ /// the reference itself takes ownership of the temporary, which will
+ /// be destroyed when the reference goes out of scope. For example:
+ ///
+ /// \code
+ /// const int &r = 1.0; // creates a temporary of type 'int'
+ /// \endcode
+ bool extendsLifetimeOfTemporary() const;
+
+ /// \brief Determine whether this variable's value can be used in a
+ /// constant expression, according to the relevant language standard.
+ /// This only checks properties of the declaration, and does not check
+ /// whether the initializer is in fact a constant expression.
+ bool isUsableInConstantExpressions(ASTContext &C) const;
+
+ EvaluatedStmt *ensureEvaluatedStmt() const;
+
+ /// \brief Attempt to evaluate the value of the initializer attached to this
+ /// declaration, and produce notes explaining why it cannot be evaluated or is
+ /// not a constant expression. Returns a pointer to the value if evaluation
+ /// succeeded, 0 otherwise.
+ APValue *evaluateValue() const;
+ APValue *evaluateValue(
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
+
+ /// \brief Return the already-evaluated value of this variable's
+ /// initializer, or NULL if the value is not yet known. Returns pointer
+ /// to untyped APValue if the value could not be evaluated.
+ APValue *getEvaluatedValue() const {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
+ if (Eval->WasEvaluated)
+ return &Eval->Evaluated;
+
+ return 0;
+ }
+
+ /// \brief Determines whether it is already known whether the
+ /// initializer is an integral constant expression or not.
+ bool isInitKnownICE() const {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>())
+ return Eval->CheckedICE;
+
+ return false;
+ }
+
+ /// \brief Determines whether the initializer is an integral constant
+ /// expression, or in C++11, whether the initializer is a constant
+ /// expression.
+ ///
+ /// \pre isInitKnownICE()
+ bool isInitICE() const {
+ assert(isInitKnownICE() &&
+ "Check whether we already know that the initializer is an ICE");
+ return Init.get<EvaluatedStmt *>()->IsICE;
+ }
+
+ /// \brief Determine whether the value of the initializer attached to this
+ /// declaration is an integral constant expression.
+ bool checkInitIsICE() const;
+
+ void setInitStyle(InitializationStyle Style) {
+ VarDeclBits.InitStyle = Style;
+ }
+
+ /// \brief The style of initialization for this declaration.
+ ///
+ /// C-style initialization is "int x = 1;". Call-style initialization is
+ /// a C++98 direct-initializer, e.g. "int x(1);". The Init expression will be
+ /// the expression inside the parens or a "ClassType(a,b,c)" class constructor
+ /// expression for class types. List-style initialization is C++11 syntax,
+ /// e.g. "int x{1};". Clients can distinguish between different forms of
+ /// initialization by checking this value. In particular, "int x = {1};" is
+ /// C-style, "int x({1})" is call-style, and "int x{1};" is list-style; the
+ /// Init expression in all three cases is an InitListExpr.
+ InitializationStyle getInitStyle() const {
+ return static_cast<InitializationStyle>(VarDeclBits.InitStyle);
+ }
+
+ /// \brief Whether the initializer is a direct-initializer (list or call).
+ bool isDirectInit() const {
+ return getInitStyle() != CInit;
+ }
+
+ /// \brief Determine whether this variable is the exception variable in a
+ /// C++ catch statememt or an Objective-C @catch statement.
+ bool isExceptionVariable() const {
+ return VarDeclBits.ExceptionVar;
+ }
+ void setExceptionVariable(bool EV) { VarDeclBits.ExceptionVar = EV; }
+
+ /// \brief Determine whether this local variable can be used with the named
+ /// return value optimization (NRVO).
+ ///
+ /// The named return value optimization (NRVO) works by marking certain
+ /// non-volatile local variables of class type as NRVO objects. These
+ /// locals can be allocated within the return slot of their containing
+ /// function, in which case there is no need to copy the object to the
+ /// return slot when returning from the function. Within the function body,
+ /// each return that returns the NRVO object will have this variable as its
+ /// NRVO candidate.
+ bool isNRVOVariable() const { return VarDeclBits.NRVOVariable; }
+ void setNRVOVariable(bool NRVO) { VarDeclBits.NRVOVariable = NRVO; }
+
+ /// \brief Determine whether this variable is the for-range-declaration in
+ /// a C++0x for-range statement.
+ bool isCXXForRangeDecl() const { return VarDeclBits.CXXForRangeDecl; }
+ void setCXXForRangeDecl(bool FRD) { VarDeclBits.CXXForRangeDecl = FRD; }
+
+ /// \brief Determine whether this variable is an ARC pseudo-__strong
+ /// variable. A pseudo-__strong variable has a __strong-qualified
+ /// type but does not actually retain the object written into it.
+ /// Generally such variables are also 'const' for safety.
+ bool isARCPseudoStrong() const { return VarDeclBits.ARCPseudoStrong; }
+ void setARCPseudoStrong(bool ps) { VarDeclBits.ARCPseudoStrong = ps; }
+
+ /// Whether this variable is (C++0x) constexpr.
+ bool isConstexpr() const { return VarDeclBits.IsConstexpr; }
+ void setConstexpr(bool IC) { VarDeclBits.IsConstexpr = IC; }
+
+ /// \brief If this variable is an instantiated static data member of a
+ /// class template specialization, returns the templated static data member
+ /// from which it was instantiated.
+ VarDecl *getInstantiatedFromStaticDataMember() const;
+
+ /// \brief If this variable is a static data member, determine what kind of
+ /// template specialization or instantiation this is.
+ TemplateSpecializationKind getTemplateSpecializationKind() const;
+
+ /// \brief If this variable is an instantiation of a static data member of a
+ /// class template specialization, retrieves the member specialization
+ /// information.
+ MemberSpecializationInfo *getMemberSpecializationInfo() const;
+
+ /// \brief For a static data member that was instantiated from a static
+ /// data member of a class template, set the template specialiation kind.
+ void setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation = SourceLocation());
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const VarDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K >= firstVar && K <= lastVar; }
+};
+
+class ImplicitParamDecl : public VarDecl {
+ virtual void anchor();
+public:
+ static ImplicitParamDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T);
+
+ static ImplicitParamDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ ImplicitParamDecl(DeclContext *DC, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType Type)
+ : VarDecl(ImplicitParam, DC, IdLoc, IdLoc, Id, Type,
+ /*tinfo*/ 0, SC_None, SC_None) {
+ setImplicit();
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const ImplicitParamDecl *D) { return true; }
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == ImplicitParam; }
+};
+
+/// ParmVarDecl - Represents a parameter to a function.
+class ParmVarDecl : public VarDecl {
+public:
+ enum { MaxFunctionScopeDepth = 255 };
+ enum { MaxFunctionScopeIndex = 255 };
+
+protected:
+ ParmVarDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, StorageClass SCAsWritten, Expr *DefArg)
+ : VarDecl(DK, DC, StartLoc, IdLoc, Id, T, TInfo, S, SCAsWritten) {
+ assert(ParmVarDeclBits.HasInheritedDefaultArg == false);
+ assert(ParmVarDeclBits.IsKNRPromoted == false);
+ assert(ParmVarDeclBits.IsObjCMethodParam == false);
+ setDefaultArg(DefArg);
+ }
+
+public:
+ static ParmVarDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, StorageClass SCAsWritten,
+ Expr *DefArg);
+
+ static ParmVarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ void setObjCMethodScopeInfo(unsigned parameterIndex) {
+ ParmVarDeclBits.IsObjCMethodParam = true;
+ setParameterIndex(parameterIndex);
+ }
+
+ void setScopeInfo(unsigned scopeDepth, unsigned parameterIndex) {
+ assert(!ParmVarDeclBits.IsObjCMethodParam);
+
+ ParmVarDeclBits.ScopeDepthOrObjCQuals = scopeDepth;
+ assert(ParmVarDeclBits.ScopeDepthOrObjCQuals == scopeDepth
+ && "truncation!");
+
+ setParameterIndex(parameterIndex);
+ }
+
+ bool isObjCMethodParameter() const {
+ return ParmVarDeclBits.IsObjCMethodParam;
+ }
+
+ unsigned getFunctionScopeDepth() const {
+ if (ParmVarDeclBits.IsObjCMethodParam) return 0;
+ return ParmVarDeclBits.ScopeDepthOrObjCQuals;
+ }
+
+ /// Returns the index of this parameter in its prototype or method scope.
+ unsigned getFunctionScopeIndex() const {
+ return getParameterIndex();
+ }
+
+ ObjCDeclQualifier getObjCDeclQualifier() const {
+ if (!ParmVarDeclBits.IsObjCMethodParam) return OBJC_TQ_None;
+ return ObjCDeclQualifier(ParmVarDeclBits.ScopeDepthOrObjCQuals);
+ }
+ void setObjCDeclQualifier(ObjCDeclQualifier QTVal) {
+ assert(ParmVarDeclBits.IsObjCMethodParam);
+ ParmVarDeclBits.ScopeDepthOrObjCQuals = QTVal;
+ }
+
+ /// True if the value passed to this parameter must undergo
+ /// K&R-style default argument promotion:
+ ///
+ /// C99 6.5.2.2.
+ /// If the expression that denotes the called function has a type
+ /// that does not include a prototype, the integer promotions are
+ /// performed on each argument, and arguments that have type float
+ /// are promoted to double.
+ bool isKNRPromoted() const {
+ return ParmVarDeclBits.IsKNRPromoted;
+ }
+ void setKNRPromoted(bool promoted) {
+ ParmVarDeclBits.IsKNRPromoted = promoted;
+ }
+
+ Expr *getDefaultArg();
+ const Expr *getDefaultArg() const {
+ return const_cast<ParmVarDecl *>(this)->getDefaultArg();
+ }
+
+ void setDefaultArg(Expr *defarg) {
+ Init = reinterpret_cast<Stmt *>(defarg);
+ }
+
+ /// \brief Retrieve the source range that covers the entire default
+ /// argument.
+ SourceRange getDefaultArgRange() const;
+ void setUninstantiatedDefaultArg(Expr *arg) {
+ Init = reinterpret_cast<UninstantiatedDefaultArgument *>(arg);
+ }
+ Expr *getUninstantiatedDefaultArg() {
+ return (Expr *)Init.get<UninstantiatedDefaultArgument *>();
+ }
+ const Expr *getUninstantiatedDefaultArg() const {
+ return (const Expr *)Init.get<UninstantiatedDefaultArgument *>();
+ }
+
+ /// hasDefaultArg - Determines whether this parameter has a default argument,
+ /// either parsed or not.
+ bool hasDefaultArg() const {
+ return getInit() || hasUnparsedDefaultArg() ||
+ hasUninstantiatedDefaultArg();
+ }
+
+ /// hasUnparsedDefaultArg - Determines whether this parameter has a
+ /// default argument that has not yet been parsed. This will occur
+ /// during the processing of a C++ class whose member functions have
+ /// default arguments, e.g.,
+ /// @code
+ /// class X {
+ /// public:
+ /// void f(int x = 17); // x has an unparsed default argument now
+ /// }; // x has a regular default argument now
+ /// @endcode
+ bool hasUnparsedDefaultArg() const {
+ return Init.is<UnparsedDefaultArgument*>();
+ }
+
+ bool hasUninstantiatedDefaultArg() const {
+ return Init.is<UninstantiatedDefaultArgument*>();
+ }
+
+ /// setUnparsedDefaultArg - Specify that this parameter has an
+ /// unparsed default argument. The argument will be replaced with a
+ /// real default argument via setDefaultArg when the class
+ /// definition enclosing the function declaration that owns this
+ /// default argument is completed.
+ void setUnparsedDefaultArg() {
+ Init = (UnparsedDefaultArgument *)0;
+ }
+
+ bool hasInheritedDefaultArg() const {
+ return ParmVarDeclBits.HasInheritedDefaultArg;
+ }
+
+ void setHasInheritedDefaultArg(bool I = true) {
+ ParmVarDeclBits.HasInheritedDefaultArg = I;
+ }
+
+ QualType getOriginalType() const {
+ if (getTypeSourceInfo())
+ return getTypeSourceInfo()->getType();
+ return getType();
+ }
+
+ /// \brief Determine whether this parameter is actually a function
+ /// parameter pack.
+ bool isParameterPack() const;
+
+ /// setOwningFunction - Sets the function declaration that owns this
+ /// ParmVarDecl. Since ParmVarDecls are often created before the
+ /// FunctionDecls that own them, this routine is required to update
+ /// the DeclContext appropriately.
+ void setOwningFunction(DeclContext *FD) { setDeclContext(FD); }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ParmVarDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ParmVar; }
+
+private:
+ enum { ParameterIndexSentinel = (1 << NumParameterIndexBits) - 1 };
+
+ void setParameterIndex(unsigned parameterIndex) {
+ if (parameterIndex >= ParameterIndexSentinel) {
+ setParameterIndexLarge(parameterIndex);
+ return;
+ }
+
+ ParmVarDeclBits.ParameterIndex = parameterIndex;
+ assert(ParmVarDeclBits.ParameterIndex == parameterIndex && "truncation!");
+ }
+ unsigned getParameterIndex() const {
+ unsigned d = ParmVarDeclBits.ParameterIndex;
+ return d == ParameterIndexSentinel ? getParameterIndexLarge() : d;
+ }
+
+ void setParameterIndexLarge(unsigned parameterIndex);
+ unsigned getParameterIndexLarge() const;
+};
+
+/// FunctionDecl - An instance of this class is created to represent a
+/// function declaration or definition.
+///
+/// Since a given function can be declared several times in a program,
+/// there may be several FunctionDecls that correspond to that
+/// function. Only one of those FunctionDecls will be found when
+/// traversing the list of declarations in the context of the
+/// FunctionDecl (e.g., the translation unit); this FunctionDecl
+/// contains all of the information known about the function. Other,
+/// previous declarations of the function are available via the
+/// getPreviousDecl() chain.
+class FunctionDecl : public DeclaratorDecl, public DeclContext,
+ public Redeclarable<FunctionDecl> {
+public:
+ typedef clang::StorageClass StorageClass;
+
+ /// \brief The kind of templated function a FunctionDecl can be.
+ enum TemplatedKind {
+ TK_NonTemplate,
+ TK_FunctionTemplate,
+ TK_MemberSpecialization,
+ TK_FunctionTemplateSpecialization,
+ TK_DependentFunctionTemplateSpecialization
+ };
+
+private:
+ /// ParamInfo - new[]'d array of pointers to VarDecls for the formal
+ /// parameters of this function. This is null if a prototype or if there are
+ /// no formals.
+ ParmVarDecl **ParamInfo;
+
+ /// DeclsInPrototypeScope - Array of pointers to NamedDecls for
+ /// decls defined in the function prototype that are not parameters. E.g.
+ /// 'enum Y' in 'void f(enum Y {AA} x) {}'.
+ llvm::ArrayRef<NamedDecl*> DeclsInPrototypeScope;
+
+ LazyDeclStmtPtr Body;
+
+ // FIXME: This can be packed into the bitfields in Decl.
+ // NOTE: VC++ treats enums as signed, avoid using the StorageClass enum
+ unsigned SClass : 2;
+ unsigned SClassAsWritten : 2;
+ bool IsInline : 1;
+ bool IsInlineSpecified : 1;
+ bool IsVirtualAsWritten : 1;
+ bool IsPure : 1;
+ bool HasInheritedPrototype : 1;
+ bool HasWrittenPrototype : 1;
+ bool IsDeleted : 1;
+ bool IsTrivial : 1; // sunk from CXXMethodDecl
+ bool IsDefaulted : 1; // sunk from CXXMethoDecl
+ bool IsExplicitlyDefaulted : 1; //sunk from CXXMethodDecl
+ bool HasImplicitReturnZero : 1;
+ bool IsLateTemplateParsed : 1;
+ bool IsConstexpr : 1;
+
+ /// \brief End part of this FunctionDecl's source range.
+ ///
+ /// We could compute the full range in getSourceRange(). However, when we're
+ /// dealing with a function definition deserialized from a PCH/AST file,
+ /// we can only compute the full range once the function body has been
+ /// de-serialized, so it's far better to have the (sometimes-redundant)
+ /// EndRangeLoc.
+ SourceLocation EndRangeLoc;
+
+ /// \brief The template or declaration that this declaration
+ /// describes or was instantiated from, respectively.
+ ///
+ /// For non-templates, this value will be NULL. For function
+ /// declarations that describe a function template, this will be a
+ /// pointer to a FunctionTemplateDecl. For member functions
+ /// of class template specializations, this will be a MemberSpecializationInfo
+ /// pointer containing information about the specialization.
+ /// For function template specializations, this will be a
+ /// FunctionTemplateSpecializationInfo, which contains information about
+ /// the template being specialized and the template arguments involved in
+ /// that specialization.
+ llvm::PointerUnion4<FunctionTemplateDecl *,
+ MemberSpecializationInfo *,
+ FunctionTemplateSpecializationInfo *,
+ DependentFunctionTemplateSpecializationInfo *>
+ TemplateOrSpecialization;
+
+ /// DNLoc - Provides source/type location info for the
+ /// declaration name embedded in the DeclaratorDecl base class.
+ DeclarationNameLoc DNLoc;
+
+ /// \brief Specify that this function declaration is actually a function
+ /// template specialization.
+ ///
+ /// \param C the ASTContext.
+ ///
+ /// \param Template the function template that this function template
+ /// specialization specializes.
+ ///
+ /// \param TemplateArgs the template arguments that produced this
+ /// function template specialization from the template.
+ ///
+ /// \param InsertPos If non-NULL, the position in the function template
+ /// specialization set where the function template specialization data will
+ /// be inserted.
+ ///
+ /// \param TSK the kind of template specialization this is.
+ ///
+ /// \param TemplateArgsAsWritten location info of template arguments.
+ ///
+ /// \param PointOfInstantiation point at which the function template
+ /// specialization was first instantiated.
+ void setFunctionTemplateSpecialization(ASTContext &C,
+ FunctionTemplateDecl *Template,
+ const TemplateArgumentList *TemplateArgs,
+ void *InsertPos,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation PointOfInstantiation);
+
+ /// \brief Specify that this record is an instantiation of the
+ /// member function FD.
+ void setInstantiationOfMemberFunction(ASTContext &C, FunctionDecl *FD,
+ TemplateSpecializationKind TSK);
+
+ void setParams(ASTContext &C, llvm::ArrayRef<ParmVarDecl *> NewParamInfo);
+
+protected:
+ FunctionDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, StorageClass SCAsWritten, bool isInlineSpecified,
+ bool isConstexprSpecified)
+ : DeclaratorDecl(DK, DC, NameInfo.getLoc(), NameInfo.getName(), T, TInfo,
+ StartLoc),
+ DeclContext(DK),
+ ParamInfo(0), Body(),
+ SClass(S), SClassAsWritten(SCAsWritten),
+ IsInline(isInlineSpecified), IsInlineSpecified(isInlineSpecified),
+ IsVirtualAsWritten(false), IsPure(false), HasInheritedPrototype(false),
+ HasWrittenPrototype(true), IsDeleted(false), IsTrivial(false),
+ IsDefaulted(false), IsExplicitlyDefaulted(false),
+ HasImplicitReturnZero(false), IsLateTemplateParsed(false),
+ IsConstexpr(isConstexprSpecified), EndRangeLoc(NameInfo.getEndLoc()),
+ TemplateOrSpecialization(),
+ DNLoc(NameInfo.getInfo()) {}
+
+ typedef Redeclarable<FunctionDecl> redeclarable_base;
+ virtual FunctionDecl *getNextRedeclaration() { return RedeclLink.getNext(); }
+ virtual FunctionDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual FunctionDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
+public:
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ static FunctionDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation NLoc,
+ DeclarationName N, QualType T,
+ TypeSourceInfo *TInfo,
+ StorageClass SC = SC_None,
+ StorageClass SCAsWritten = SC_None,
+ bool isInlineSpecified = false,
+ bool hasWrittenPrototype = true,
+ bool isConstexprSpecified = false) {
+ DeclarationNameInfo NameInfo(N, NLoc);
+ return FunctionDecl::Create(C, DC, StartLoc, NameInfo, T, TInfo,
+ SC, SCAsWritten,
+ isInlineSpecified, hasWrittenPrototype,
+ isConstexprSpecified);
+ }
+
+ static FunctionDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC = SC_None,
+ StorageClass SCAsWritten = SC_None,
+ bool isInlineSpecified = false,
+ bool hasWrittenPrototype = true,
+ bool isConstexprSpecified = false);
+
+ static FunctionDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ DeclarationNameInfo getNameInfo() const {
+ return DeclarationNameInfo(getDeclName(), getLocation(), DNLoc);
+ }
+
+ virtual void getNameForDiagnostic(std::string &S,
+ const PrintingPolicy &Policy,
+ bool Qualified) const;
+
+ void setRangeEnd(SourceLocation E) { EndRangeLoc = E; }
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ /// \brief Returns true if the function has a body (definition). The
+ /// function body might be in any of the (re-)declarations of this
+ /// function. The variant that accepts a FunctionDecl pointer will
+ /// set that function declaration to the actual declaration
+ /// containing the body (if there is one).
+ bool hasBody(const FunctionDecl *&Definition) const;
+
+ virtual bool hasBody() const {
+ const FunctionDecl* Definition;
+ return hasBody(Definition);
+ }
+
+ /// hasTrivialBody - Returns whether the function has a trivial body that does
+ /// not require any specific codegen.
+ bool hasTrivialBody() const;
+
+ /// isDefined - Returns true if the function is defined at all, including
+ /// a deleted definition. Except for the behavior when the function is
+ /// deleted, behaves like hasBody.
+ bool isDefined(const FunctionDecl *&Definition) const;
+
+ virtual bool isDefined() const {
+ const FunctionDecl* Definition;
+ return isDefined(Definition);
+ }
+
+ /// getBody - Retrieve the body (definition) of the function. The
+ /// function body might be in any of the (re-)declarations of this
+ /// function. The variant that accepts a FunctionDecl pointer will
+ /// set that function declaration to the actual declaration
+ /// containing the body (if there is one).
+ /// NOTE: For checking if there is a body, use hasBody() instead, to avoid
+ /// unnecessary AST de-serialization of the body.
+ Stmt *getBody(const FunctionDecl *&Definition) const;
+
+ virtual Stmt *getBody() const {
+ const FunctionDecl* Definition;
+ return getBody(Definition);
+ }
+
+ /// isThisDeclarationADefinition - Returns whether this specific
+ /// declaration of the function is also a definition. This does not
+ /// determine whether the function has been defined (e.g., in a
+ /// previous definition); for that information, use isDefined. Note
+ /// that this returns false for a defaulted function unless that function
+ /// has been implicitly defined (possibly as deleted).
+ bool isThisDeclarationADefinition() const {
+ return IsDeleted || Body || IsLateTemplateParsed;
+ }
+
+ /// doesThisDeclarationHaveABody - Returns whether this specific
+ /// declaration of the function has a body - that is, if it is a non-
+ /// deleted definition.
+ bool doesThisDeclarationHaveABody() const {
+ return Body || IsLateTemplateParsed;
+ }
+
+ void setBody(Stmt *B);
+ void setLazyBody(uint64_t Offset) { Body = Offset; }
+
+ /// Whether this function is variadic.
+ bool isVariadic() const;
+
+ /// Whether this function is marked as virtual explicitly.
+ bool isVirtualAsWritten() const { return IsVirtualAsWritten; }
+ void setVirtualAsWritten(bool V) { IsVirtualAsWritten = V; }
+
+ /// Whether this virtual function is pure, i.e. makes the containing class
+ /// abstract.
+ bool isPure() const { return IsPure; }
+ void setPure(bool P = true);
+
+ /// Whether this templated function will be late parsed.
+ bool isLateTemplateParsed() const { return IsLateTemplateParsed; }
+ void setLateTemplateParsed(bool ILT = true) { IsLateTemplateParsed = ILT; }
+
+ /// Whether this function is "trivial" in some specialized C++ senses.
+ /// Can only be true for default constructors, copy constructors,
+ /// copy assignment operators, and destructors. Not meaningful until
+ /// the class has been fully built by Sema.
+ bool isTrivial() const { return IsTrivial; }
+ void setTrivial(bool IT) { IsTrivial = IT; }
+
+ /// Whether this function is defaulted per C++0x. Only valid for
+ /// special member functions.
+ bool isDefaulted() const { return IsDefaulted; }
+ void setDefaulted(bool D = true) { IsDefaulted = D; }
+
+ /// Whether this function is explicitly defaulted per C++0x. Only valid
+ /// for special member functions.
+ bool isExplicitlyDefaulted() const { return IsExplicitlyDefaulted; }
+ void setExplicitlyDefaulted(bool ED = true) { IsExplicitlyDefaulted = ED; }
+
+ /// Whether falling off this function implicitly returns null/zero.
+ /// If a more specific implicit return value is required, front-ends
+ /// should synthesize the appropriate return statements.
+ bool hasImplicitReturnZero() const { return HasImplicitReturnZero; }
+ void setHasImplicitReturnZero(bool IRZ) { HasImplicitReturnZero = IRZ; }
+
+ /// \brief Whether this function has a prototype, either because one
+ /// was explicitly written or because it was "inherited" by merging
+ /// a declaration without a prototype with a declaration that has a
+ /// prototype.
+ bool hasPrototype() const {
+ return HasWrittenPrototype || HasInheritedPrototype;
+ }
+
+ bool hasWrittenPrototype() const { return HasWrittenPrototype; }
+
+ /// \brief Whether this function inherited its prototype from a
+ /// previous declaration.
+ bool hasInheritedPrototype() const { return HasInheritedPrototype; }
+ void setHasInheritedPrototype(bool P = true) { HasInheritedPrototype = P; }
+
+ /// Whether this is a (C++0x) constexpr function or constexpr constructor.
+ bool isConstexpr() const { return IsConstexpr; }
+ void setConstexpr(bool IC) { IsConstexpr = IC; }
+
+ /// \brief Whether this function has been deleted.
+ ///
+ /// A function that is "deleted" (via the C++0x "= delete" syntax)
+ /// acts like a normal function, except that it cannot actually be
+ /// called or have its address taken. Deleted functions are
+ /// typically used in C++ overload resolution to attract arguments
+ /// whose type or lvalue/rvalue-ness would permit the use of a
+ /// different overload that would behave incorrectly. For example,
+ /// one might use deleted functions to ban implicit conversion from
+ /// a floating-point number to an Integer type:
+ ///
+ /// @code
+ /// struct Integer {
+ /// Integer(long); // construct from a long
+ /// Integer(double) = delete; // no construction from float or double
+ /// Integer(long double) = delete; // no construction from long double
+ /// };
+ /// @endcode
+ // If a function is deleted, its first declaration must be.
+ bool isDeleted() const { return getCanonicalDecl()->IsDeleted; }
+ bool isDeletedAsWritten() const { return IsDeleted && !IsDefaulted; }
+ void setDeletedAsWritten(bool D = true) { IsDeleted = D; }
+
+ /// \brief Determines whether this function is "main", which is the
+ /// entry point into an executable program.
+ bool isMain() const;
+
+ /// \brief Determines whether this operator new or delete is one
+ /// of the reserved global placement operators:
+ /// void *operator new(size_t, void *);
+ /// void *operator new[](size_t, void *);
+ /// void operator delete(void *, void *);
+ /// void operator delete[](void *, void *);
+ /// These functions have special behavior under [new.delete.placement]:
+ /// These functions are reserved, a C++ program may not define
+ /// functions that displace the versions in the Standard C++ library.
+ /// The provisions of [basic.stc.dynamic] do not apply to these
+ /// reserved placement forms of operator new and operator delete.
+ ///
+ /// This function must be an allocation or deallocation function.
+ bool isReservedGlobalPlacementOperator() const;
+
+ /// \brief Determines whether this function is a function with
+ /// external, C linkage.
+ bool isExternC() const;
+
+ /// \brief Determines whether this is a global function.
+ bool isGlobal() const;
+
+ void setPreviousDeclaration(FunctionDecl * PrevDecl);
+
+ virtual const FunctionDecl *getCanonicalDecl() const;
+ virtual FunctionDecl *getCanonicalDecl();
+
+ unsigned getBuiltinID() const;
+
+ // Iterator access to formal parameters.
+ unsigned param_size() const { return getNumParams(); }
+ typedef ParmVarDecl **param_iterator;
+ typedef ParmVarDecl * const *param_const_iterator;
+
+ param_iterator param_begin() { return ParamInfo; }
+ param_iterator param_end() { return ParamInfo+param_size(); }
+
+ param_const_iterator param_begin() const { return ParamInfo; }
+ param_const_iterator param_end() const { return ParamInfo+param_size(); }
+
+ /// getNumParams - Return the number of parameters this function must have
+ /// based on its FunctionType. This is the length of the ParamInfo array
+ /// after it has been created.
+ unsigned getNumParams() const;
+
+ const ParmVarDecl *getParamDecl(unsigned i) const {
+ assert(i < getNumParams() && "Illegal param #");
+ return ParamInfo[i];
+ }
+ ParmVarDecl *getParamDecl(unsigned i) {
+ assert(i < getNumParams() && "Illegal param #");
+ return ParamInfo[i];
+ }
+ void setParams(llvm::ArrayRef<ParmVarDecl *> NewParamInfo) {
+ setParams(getASTContext(), NewParamInfo);
+ }
+
+ const llvm::ArrayRef<NamedDecl*> &getDeclsInPrototypeScope() const {
+ return DeclsInPrototypeScope;
+ }
+ void setDeclsInPrototypeScope(llvm::ArrayRef<NamedDecl *> NewDecls);
+
+ /// getMinRequiredArguments - Returns the minimum number of arguments
+ /// needed to call this function. This may be fewer than the number of
+ /// function parameters, if some of the parameters have default
+ /// arguments (in C++).
+ unsigned getMinRequiredArguments() const;
+
+ QualType getResultType() const {
+ return getType()->getAs<FunctionType>()->getResultType();
+ }
+
+ /// \brief Determine the type of an expression that calls this function.
+ QualType getCallResultType() const {
+ return getType()->getAs<FunctionType>()->getCallResultType(getASTContext());
+ }
+
+ StorageClass getStorageClass() const { return StorageClass(SClass); }
+ void setStorageClass(StorageClass SC);
+
+ StorageClass getStorageClassAsWritten() const {
+ return StorageClass(SClassAsWritten);
+ }
+
+ /// \brief Determine whether the "inline" keyword was specified for this
+ /// function.
+ bool isInlineSpecified() const { return IsInlineSpecified; }
+
+ /// Set whether the "inline" keyword was specified for this function.
+ void setInlineSpecified(bool I) {
+ IsInlineSpecified = I;
+ IsInline = I;
+ }
+
+ /// Flag that this function is implicitly inline.
+ void setImplicitlyInline() {
+ IsInline = true;
+ }
+
+ /// \brief Determine whether this function should be inlined, because it is
+ /// either marked "inline" or "constexpr" or is a member function of a class
+ /// that was defined in the class body.
+ bool isInlined() const;
+
+ bool isInlineDefinitionExternallyVisible() const;
+
+ bool doesDeclarationForceExternallyVisibleDefinition() const;
+
+ /// isOverloadedOperator - Whether this function declaration
+ /// represents an C++ overloaded operator, e.g., "operator+".
+ bool isOverloadedOperator() const {
+ return getOverloadedOperator() != OO_None;
+ }
+
+ OverloadedOperatorKind getOverloadedOperator() const;
+
+ const IdentifierInfo *getLiteralIdentifier() const;
+
+ /// \brief If this function is an instantiation of a member function
+ /// of a class template specialization, retrieves the function from
+ /// which it was instantiated.
+ ///
+ /// This routine will return non-NULL for (non-templated) member
+ /// functions of class templates and for instantiations of function
+ /// templates. For example, given:
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct X {
+ /// void f(T);
+ /// };
+ /// \endcode
+ ///
+ /// The declaration for X<int>::f is a (non-templated) FunctionDecl
+ /// whose parent is the class template specialization X<int>. For
+ /// this declaration, getInstantiatedFromFunction() will return
+ /// the FunctionDecl X<T>::A. When a complete definition of
+ /// X<int>::A is required, it will be instantiated from the
+ /// declaration returned by getInstantiatedFromMemberFunction().
+ FunctionDecl *getInstantiatedFromMemberFunction() const;
+
+ /// \brief What kind of templated function this is.
+ TemplatedKind getTemplatedKind() const;
+
+ /// \brief If this function is an instantiation of a member function of a
+ /// class template specialization, retrieves the member specialization
+ /// information.
+ MemberSpecializationInfo *getMemberSpecializationInfo() const;
+
+ /// \brief Specify that this record is an instantiation of the
+ /// member function FD.
+ void setInstantiationOfMemberFunction(FunctionDecl *FD,
+ TemplateSpecializationKind TSK) {
+ setInstantiationOfMemberFunction(getASTContext(), FD, TSK);
+ }
+
+ /// \brief Retrieves the function template that is described by this
+ /// function declaration.
+ ///
+ /// Every function template is represented as a FunctionTemplateDecl
+ /// and a FunctionDecl (or something derived from FunctionDecl). The
+ /// former contains template properties (such as the template
+ /// parameter lists) while the latter contains the actual
+ /// description of the template's
+ /// contents. FunctionTemplateDecl::getTemplatedDecl() retrieves the
+ /// FunctionDecl that describes the function template,
+ /// getDescribedFunctionTemplate() retrieves the
+ /// FunctionTemplateDecl from a FunctionDecl.
+ FunctionTemplateDecl *getDescribedFunctionTemplate() const {
+ return TemplateOrSpecialization.dyn_cast<FunctionTemplateDecl*>();
+ }
+
+ void setDescribedFunctionTemplate(FunctionTemplateDecl *Template) {
+ TemplateOrSpecialization = Template;
+ }
+
+ /// \brief Determine whether this function is a function template
+ /// specialization.
+ bool isFunctionTemplateSpecialization() const {
+ return getPrimaryTemplate() != 0;
+ }
+
+ /// \brief Retrieve the class scope template pattern that this function
+ /// template specialization is instantiated from.
+ FunctionDecl *getClassScopeSpecializationPattern() const;
+
+ /// \brief If this function is actually a function template specialization,
+ /// retrieve information about this function template specialization.
+ /// Otherwise, returns NULL.
+ FunctionTemplateSpecializationInfo *getTemplateSpecializationInfo() const {
+ return TemplateOrSpecialization.
+ dyn_cast<FunctionTemplateSpecializationInfo*>();
+ }
+
+ /// \brief Determines whether this function is a function template
+ /// specialization or a member of a class template specialization that can
+ /// be implicitly instantiated.
+ bool isImplicitlyInstantiable() const;
+
+ /// \brief Determines if the given function was instantiated from a
+ /// function template.
+ bool isTemplateInstantiation() const;
+
+ /// \brief Retrieve the function declaration from which this function could
+ /// be instantiated, if it is an instantiation (rather than a non-template
+ /// or a specialization, for example).
+ FunctionDecl *getTemplateInstantiationPattern() const;
+
+ /// \brief Retrieve the primary template that this function template
+ /// specialization either specializes or was instantiated from.
+ ///
+ /// If this function declaration is not a function template specialization,
+ /// returns NULL.
+ FunctionTemplateDecl *getPrimaryTemplate() const;
+
+ /// \brief Retrieve the template arguments used to produce this function
+ /// template specialization from the primary template.
+ ///
+ /// If this function declaration is not a function template specialization,
+ /// returns NULL.
+ const TemplateArgumentList *getTemplateSpecializationArgs() const;
+
+ /// \brief Retrieve the template argument list as written in the sources,
+ /// if any.
+ ///
+ /// If this function declaration is not a function template specialization
+ /// or if it had no explicit template argument list, returns NULL.
+ /// Note that it an explicit template argument list may be written empty,
+ /// e.g., template<> void foo<>(char* s);
+ const ASTTemplateArgumentListInfo*
+ getTemplateSpecializationArgsAsWritten() const;
+
+ /// \brief Specify that this function declaration is actually a function
+ /// template specialization.
+ ///
+ /// \param Template the function template that this function template
+ /// specialization specializes.
+ ///
+ /// \param TemplateArgs the template arguments that produced this
+ /// function template specialization from the template.
+ ///
+ /// \param InsertPos If non-NULL, the position in the function template
+ /// specialization set where the function template specialization data will
+ /// be inserted.
+ ///
+ /// \param TSK the kind of template specialization this is.
+ ///
+ /// \param TemplateArgsAsWritten location info of template arguments.
+ ///
+ /// \param PointOfInstantiation point at which the function template
+ /// specialization was first instantiated.
+ void setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
+ const TemplateArgumentList *TemplateArgs,
+ void *InsertPos,
+ TemplateSpecializationKind TSK = TSK_ImplicitInstantiation,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten = 0,
+ SourceLocation PointOfInstantiation = SourceLocation()) {
+ setFunctionTemplateSpecialization(getASTContext(), Template, TemplateArgs,
+ InsertPos, TSK, TemplateArgsAsWritten,
+ PointOfInstantiation);
+ }
+
+ /// \brief Specifies that this function declaration is actually a
+ /// dependent function template specialization.
+ void setDependentTemplateSpecialization(ASTContext &Context,
+ const UnresolvedSetImpl &Templates,
+ const TemplateArgumentListInfo &TemplateArgs);
+
+ DependentFunctionTemplateSpecializationInfo *
+ getDependentSpecializationInfo() const {
+ return TemplateOrSpecialization.
+ dyn_cast<DependentFunctionTemplateSpecializationInfo*>();
+ }
+
+ /// \brief Determine what kind of template instantiation this function
+ /// represents.
+ TemplateSpecializationKind getTemplateSpecializationKind() const;
+
+ /// \brief Determine what kind of template instantiation this function
+ /// represents.
+ void setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation = SourceLocation());
+
+ /// \brief Retrieve the (first) point of instantiation of a function template
+ /// specialization or a member of a class template specialization.
+ ///
+ /// \returns the first point of instantiation, if this function was
+ /// instantiated from a template; otherwise, returns an invalid source
+ /// location.
+ SourceLocation getPointOfInstantiation() const;
+
+ /// \brief Determine whether this is or was instantiated from an out-of-line
+ /// definition of a member function.
+ virtual bool isOutOfLine() const;
+
+ /// \brief Identify a memory copying or setting function.
+ /// If the given function is a memory copy or setting function, returns
+ /// the corresponding Builtin ID. If the function is not a memory function,
+ /// returns 0.
+ unsigned getMemoryFunctionKind() const;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const FunctionDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstFunction && K <= lastFunction;
+ }
+ static DeclContext *castToDeclContext(const FunctionDecl *D) {
+ return static_cast<DeclContext *>(const_cast<FunctionDecl*>(D));
+ }
+ static FunctionDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<FunctionDecl *>(const_cast<DeclContext*>(DC));
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+
+/// FieldDecl - An instance of this class is created by Sema::ActOnField to
+/// represent a member of a struct/union/class.
+class FieldDecl : public DeclaratorDecl {
+ // FIXME: This can be packed into the bitfields in Decl.
+ bool Mutable : 1;
+ mutable unsigned CachedFieldIndex : 31;
+
+ /// \brief A pointer to either the in-class initializer for this field (if
+ /// the boolean value is false), or the bit width expression for this bit
+ /// field (if the boolean value is true).
+ ///
+ /// We can safely combine these two because in-class initializers are not
+ /// permitted for bit-fields.
+ ///
+ /// If the boolean is false and the initializer is null, then this field has
+ /// an in-class initializer which has not yet been parsed and attached.
+ llvm::PointerIntPair<Expr *, 1, bool> InitializerOrBitWidth;
+protected:
+ FieldDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
+ bool HasInit)
+ : DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
+ Mutable(Mutable), CachedFieldIndex(0),
+ InitializerOrBitWidth(BW, !HasInit) {
+ assert(!(BW && HasInit) && "got initializer for bitfield");
+ }
+
+public:
+ static FieldDecl *Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
+ bool HasInit);
+
+ static FieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// getFieldIndex - Returns the index of this field within its record,
+ /// as appropriate for passing to ASTRecordLayout::getFieldOffset.
+ unsigned getFieldIndex() const;
+
+ /// isMutable - Determines whether this field is mutable (C++ only).
+ bool isMutable() const { return Mutable; }
+
+ /// \brief Set whether this field is mutable (C++ only).
+ void setMutable(bool M) { Mutable = M; }
+
+ /// isBitfield - Determines whether this field is a bitfield.
+ bool isBitField() const {
+ return InitializerOrBitWidth.getInt() && InitializerOrBitWidth.getPointer();
+ }
+
+ /// @brief Determines whether this is an unnamed bitfield.
+ bool isUnnamedBitfield() const { return isBitField() && !getDeclName(); }
+
+ /// isAnonymousStructOrUnion - Determines whether this field is a
+ /// representative for an anonymous struct or union. Such fields are
+ /// unnamed and are implicitly generated by the implementation to
+ /// store the data for the anonymous union or struct.
+ bool isAnonymousStructOrUnion() const;
+
+ Expr *getBitWidth() const {
+ return isBitField() ? InitializerOrBitWidth.getPointer() : 0;
+ }
+ unsigned getBitWidthValue(const ASTContext &Ctx) const;
+ void setBitWidth(Expr *BW) {
+ assert(!InitializerOrBitWidth.getPointer() &&
+ "bit width or initializer already set");
+ InitializerOrBitWidth.setPointer(BW);
+ InitializerOrBitWidth.setInt(1);
+ }
+ /// removeBitWidth - Remove the bitfield width from this member.
+ void removeBitWidth() {
+ assert(isBitField() && "no bit width to remove");
+ InitializerOrBitWidth.setPointer(0);
+ }
+
+ /// hasInClassInitializer - Determine whether this member has a C++0x in-class
+ /// initializer.
+ bool hasInClassInitializer() const {
+ return !InitializerOrBitWidth.getInt();
+ }
+ /// getInClassInitializer - Get the C++0x in-class initializer for this
+ /// member, or null if one has not been set. If a valid declaration has an
+ /// in-class initializer, but this returns null, then we have not parsed and
+ /// attached it yet.
+ Expr *getInClassInitializer() const {
+ return hasInClassInitializer() ? InitializerOrBitWidth.getPointer() : 0;
+ }
+ /// setInClassInitializer - Set the C++0x in-class initializer for this
+ /// member.
+ void setInClassInitializer(Expr *Init);
+ /// removeInClassInitializer - Remove the C++0x in-class initializer from this
+ /// member.
+ void removeInClassInitializer() {
+ assert(!InitializerOrBitWidth.getInt() && "no initializer to remove");
+ InitializerOrBitWidth.setPointer(0);
+ InitializerOrBitWidth.setInt(1);
+ }
+
+ /// getParent - Returns the parent of this field declaration, which
+ /// is the struct in which this method is defined.
+ const RecordDecl *getParent() const {
+ return cast<RecordDecl>(getDeclContext());
+ }
+
+ RecordDecl *getParent() {
+ return cast<RecordDecl>(getDeclContext());
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const FieldDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K >= firstField && K <= lastField; }
+};
+
+/// EnumConstantDecl - An instance of this object exists for each enum constant
+/// that is defined. For example, in "enum X {a,b}", each of a/b are
+/// EnumConstantDecl's, X is an instance of EnumDecl, and the type of a/b is a
+/// TagType for the X EnumDecl.
+class EnumConstantDecl : public ValueDecl {
+ Stmt *Init; // an integer constant expression
+ llvm::APSInt Val; // The value.
+protected:
+ EnumConstantDecl(DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, QualType T, Expr *E,
+ const llvm::APSInt &V)
+ : ValueDecl(EnumConstant, DC, L, Id, T), Init((Stmt*)E), Val(V) {}
+
+public:
+
+ static EnumConstantDecl *Create(ASTContext &C, EnumDecl *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ QualType T, Expr *E,
+ const llvm::APSInt &V);
+ static EnumConstantDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ const Expr *getInitExpr() const { return (const Expr*) Init; }
+ Expr *getInitExpr() { return (Expr*) Init; }
+ const llvm::APSInt &getInitVal() const { return Val; }
+
+ void setInitExpr(Expr *E) { Init = (Stmt*) E; }
+ void setInitVal(const llvm::APSInt &V) { Val = V; }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const EnumConstantDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == EnumConstant; }
+
+ friend class StmtIteratorBase;
+};
+
+/// IndirectFieldDecl - An instance of this class is created to represent a
+/// field injected from an anonymous union/struct into the parent scope.
+/// IndirectFieldDecl are always implicit.
+class IndirectFieldDecl : public ValueDecl {
+ virtual void anchor();
+ NamedDecl **Chaining;
+ unsigned ChainingSize;
+
+ IndirectFieldDecl(DeclContext *DC, SourceLocation L,
+ DeclarationName N, QualType T,
+ NamedDecl **CH, unsigned CHS)
+ : ValueDecl(IndirectField, DC, L, N, T), Chaining(CH), ChainingSize(CHS) {}
+
+public:
+ static IndirectFieldDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ QualType T, NamedDecl **CH, unsigned CHS);
+
+ static IndirectFieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ typedef NamedDecl * const *chain_iterator;
+ chain_iterator chain_begin() const { return Chaining; }
+ chain_iterator chain_end() const { return Chaining+ChainingSize; }
+
+ unsigned getChainingSize() const { return ChainingSize; }
+
+ FieldDecl *getAnonField() const {
+ assert(ChainingSize >= 2);
+ return cast<FieldDecl>(Chaining[ChainingSize - 1]);
+ }
+
+ VarDecl *getVarDecl() const {
+ assert(ChainingSize >= 2);
+ return dyn_cast<VarDecl>(*chain_begin());
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const IndirectFieldDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == IndirectField; }
+ friend class ASTDeclReader;
+};
+
+/// TypeDecl - Represents a declaration of a type.
+///
+class TypeDecl : public NamedDecl {
+ virtual void anchor();
+ /// TypeForDecl - This indicates the Type object that represents
+ /// this TypeDecl. It is a cache maintained by
+ /// ASTContext::getTypedefType, ASTContext::getTagDeclType, and
+ /// ASTContext::getTemplateTypeParmType, and TemplateTypeParmDecl.
+ mutable const Type *TypeForDecl;
+ /// LocStart - The start of the source range for this declaration.
+ SourceLocation LocStart;
+ friend class ASTContext;
+ friend class DeclContext;
+ friend class TagDecl;
+ friend class TemplateTypeParmDecl;
+ friend class TagType;
+ friend class ASTReader;
+
+protected:
+ TypeDecl(Kind DK, DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
+ SourceLocation StartL = SourceLocation())
+ : NamedDecl(DK, DC, L, Id), TypeForDecl(0), LocStart(StartL) {}
+
+public:
+ // Low-level accessor
+ const Type *getTypeForDecl() const { return TypeForDecl; }
+ void setTypeForDecl(const Type *TD) { TypeForDecl = TD; }
+
+ SourceLocation getLocStart() const LLVM_READONLY { return LocStart; }
+ void setLocStart(SourceLocation L) { LocStart = L; }
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ if (LocStart.isValid())
+ return SourceRange(LocStart, getLocation());
+ else
+ return SourceRange(getLocation());
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TypeDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K >= firstType && K <= lastType; }
+};
+
+
+/// Base class for declarations which introduce a typedef-name.
+class TypedefNameDecl : public TypeDecl, public Redeclarable<TypedefNameDecl> {
+ virtual void anchor();
+ /// UnderlyingType - This is the type the typedef is set to.
+ TypeSourceInfo *TInfo;
+
+protected:
+ TypedefNameDecl(Kind DK, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ TypeSourceInfo *TInfo)
+ : TypeDecl(DK, DC, IdLoc, Id, StartLoc), TInfo(TInfo) {}
+
+ typedef Redeclarable<TypedefNameDecl> redeclarable_base;
+ virtual TypedefNameDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
+ }
+ virtual TypedefNameDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual TypedefNameDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
+public:
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return TInfo;
+ }
+
+ /// Retrieves the canonical declaration of this typedef-name.
+ TypedefNameDecl *getCanonicalDecl() {
+ return getFirstDeclaration();
+ }
+ const TypedefNameDecl *getCanonicalDecl() const {
+ return getFirstDeclaration();
+ }
+
+ QualType getUnderlyingType() const {
+ return TInfo->getType();
+ }
+ void setTypeSourceInfo(TypeSourceInfo *newType) {
+ TInfo = newType;
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TypedefNameDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstTypedefName && K <= lastTypedefName;
+ }
+};
+
+/// TypedefDecl - Represents the declaration of a typedef-name via the 'typedef'
+/// type specifier.
+class TypedefDecl : public TypedefNameDecl {
+ TypedefDecl(DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, TypeSourceInfo *TInfo)
+ : TypedefNameDecl(Typedef, DC, StartLoc, IdLoc, Id, TInfo) {}
+
+public:
+ static TypedefDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, TypeSourceInfo *TInfo);
+ static TypedefDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TypedefDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Typedef; }
+};
+
+/// TypeAliasDecl - Represents the declaration of a typedef-name via a C++0x
+/// alias-declaration.
+class TypeAliasDecl : public TypedefNameDecl {
+ TypeAliasDecl(DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, TypeSourceInfo *TInfo)
+ : TypedefNameDecl(TypeAlias, DC, StartLoc, IdLoc, Id, TInfo) {}
+
+public:
+ static TypeAliasDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, TypeSourceInfo *TInfo);
+ static TypeAliasDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TypeAliasDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == TypeAlias; }
+};
+
+/// TagDecl - Represents the declaration of a struct/union/class/enum.
+class TagDecl
+ : public TypeDecl, public DeclContext, public Redeclarable<TagDecl> {
+public:
+ // This is really ugly.
+ typedef TagTypeKind TagKind;
+
+private:
+ // FIXME: This can be packed into the bitfields in Decl.
+ /// TagDeclKind - The TagKind enum.
+ unsigned TagDeclKind : 2;
+
+ /// IsCompleteDefinition - True if this is a definition ("struct foo
+ /// {};"), false if it is a declaration ("struct foo;"). It is not
+ /// a definition until the definition has been fully processed.
+ bool IsCompleteDefinition : 1;
+
+protected:
+ /// IsBeingDefined - True if this is currently being defined.
+ bool IsBeingDefined : 1;
+
+private:
+ /// IsEmbeddedInDeclarator - True if this tag declaration is
+ /// "embedded" (i.e., defined or declared for the very first time)
+ /// in the syntax of a declarator.
+ bool IsEmbeddedInDeclarator : 1;
+
+ /// \brief True if this tag is free standing, e.g. "struct foo;".
+ bool IsFreeStanding : 1;
+
+protected:
+ // These are used by (and only defined for) EnumDecl.
+ unsigned NumPositiveBits : 8;
+ unsigned NumNegativeBits : 8;
+
+ /// IsScoped - True if this tag declaration is a scoped enumeration. Only
+ /// possible in C++11 mode.
+ bool IsScoped : 1;
+ /// IsScopedUsingClassTag - If this tag declaration is a scoped enum,
+ /// then this is true if the scoped enum was declared using the class
+ /// tag, false if it was declared with the struct tag. No meaning is
+ /// associated if this tag declaration is not a scoped enum.
+ bool IsScopedUsingClassTag : 1;
+
+ /// IsFixed - True if this is an enumeration with fixed underlying type. Only
+ /// possible in C++11 or Microsoft extensions mode.
+ bool IsFixed : 1;
+
+private:
+ SourceLocation RBraceLoc;
+
+ // A struct representing syntactic qualifier info,
+ // to be used for the (uncommon) case of out-of-line declarations.
+ typedef QualifierInfo ExtInfo;
+
+ /// TypedefNameDeclOrQualifier - If the (out-of-line) tag declaration name
+ /// is qualified, it points to the qualifier info (nns and range);
+ /// otherwise, if the tag declaration is anonymous and it is part of
+ /// a typedef or alias, it points to the TypedefNameDecl (used for mangling);
+ /// otherwise, it is a null (TypedefNameDecl) pointer.
+ llvm::PointerUnion<TypedefNameDecl*, ExtInfo*> TypedefNameDeclOrQualifier;
+
+ bool hasExtInfo() const { return TypedefNameDeclOrQualifier.is<ExtInfo*>(); }
+ ExtInfo *getExtInfo() { return TypedefNameDeclOrQualifier.get<ExtInfo*>(); }
+ const ExtInfo *getExtInfo() const {
+ return TypedefNameDeclOrQualifier.get<ExtInfo*>();
+ }
+
+protected:
+ TagDecl(Kind DK, TagKind TK, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ TagDecl *PrevDecl, SourceLocation StartL)
+ : TypeDecl(DK, DC, L, Id, StartL), DeclContext(DK),
+ TypedefNameDeclOrQualifier((TypedefNameDecl*) 0) {
+ assert((DK != Enum || TK == TTK_Enum) &&
+ "EnumDecl not matched with TTK_Enum");
+ TagDeclKind = TK;
+ IsCompleteDefinition = false;
+ IsBeingDefined = false;
+ IsEmbeddedInDeclarator = false;
+ IsFreeStanding = false;
+ setPreviousDeclaration(PrevDecl);
+ }
+
+ typedef Redeclarable<TagDecl> redeclarable_base;
+ virtual TagDecl *getNextRedeclaration() { return RedeclLink.getNext(); }
+ virtual TagDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual TagDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
+ /// @brief Completes the definition of this tag declaration.
+ ///
+ /// This is a helper function for derived classes.
+ void completeDefinition();
+
+public:
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ SourceLocation getRBraceLoc() const { return RBraceLoc; }
+ void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
+
+ /// getInnerLocStart - Return SourceLocation representing start of source
+ /// range ignoring outer template declarations.
+ SourceLocation getInnerLocStart() const { return getLocStart(); }
+
+ /// getOuterLocStart - Return SourceLocation representing start of source
+ /// range taking into account any outer template declarations.
+ SourceLocation getOuterLocStart() const;
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ virtual TagDecl* getCanonicalDecl();
+ const TagDecl* getCanonicalDecl() const {
+ return const_cast<TagDecl*>(this)->getCanonicalDecl();
+ }
+
+ /// isThisDeclarationADefinition() - Return true if this declaration
+ /// is a completion definintion of the type. Provided for consistency.
+ bool isThisDeclarationADefinition() const {
+ return isCompleteDefinition();
+ }
+
+ /// isCompleteDefinition - Return true if this decl has its body
+ /// fully specified.
+ bool isCompleteDefinition() const {
+ return IsCompleteDefinition;
+ }
+
+ /// isBeingDefined - Return true if this decl is currently being defined.
+ bool isBeingDefined() const {
+ return IsBeingDefined;
+ }
+
+ bool isEmbeddedInDeclarator() const {
+ return IsEmbeddedInDeclarator;
+ }
+ void setEmbeddedInDeclarator(bool isInDeclarator) {
+ IsEmbeddedInDeclarator = isInDeclarator;
+ }
+
+ bool isFreeStanding() const { return IsFreeStanding; }
+ void setFreeStanding(bool isFreeStanding = true) {
+ IsFreeStanding = isFreeStanding;
+ }
+
+ /// \brief Whether this declaration declares a type that is
+ /// dependent, i.e., a type that somehow depends on template
+ /// parameters.
+ bool isDependentType() const { return isDependentContext(); }
+
+ /// @brief Starts the definition of this tag declaration.
+ ///
+ /// This method should be invoked at the beginning of the definition
+ /// of this tag declaration. It will set the tag type into a state
+ /// where it is in the process of being defined.
+ void startDefinition();
+
+ /// getDefinition - Returns the TagDecl that actually defines this
+ /// struct/union/class/enum. When determining whether or not a
+ /// struct/union/class/enum has a definition, one should use this
+ /// method as opposed to 'isDefinition'. 'isDefinition' indicates
+ /// whether or not a specific TagDecl is defining declaration, not
+ /// whether or not the struct/union/class/enum type is defined.
+ /// This method returns NULL if there is no TagDecl that defines
+ /// the struct/union/class/enum.
+ TagDecl *getDefinition() const;
+
+ void setCompleteDefinition(bool V) { IsCompleteDefinition = V; }
+
+ const char *getKindName() const {
+ return TypeWithKeyword::getTagTypeKindName(getTagKind());
+ }
+
+ TagKind getTagKind() const {
+ return TagKind(TagDeclKind);
+ }
+
+ void setTagKind(TagKind TK) { TagDeclKind = TK; }
+
+ bool isStruct() const { return getTagKind() == TTK_Struct; }
+ bool isClass() const { return getTagKind() == TTK_Class; }
+ bool isUnion() const { return getTagKind() == TTK_Union; }
+ bool isEnum() const { return getTagKind() == TTK_Enum; }
+
+ TypedefNameDecl *getTypedefNameForAnonDecl() const {
+ return hasExtInfo() ? 0 :
+ TypedefNameDeclOrQualifier.get<TypedefNameDecl*>();
+ }
+
+ void setTypedefNameForAnonDecl(TypedefNameDecl *TDD);
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the name of this
+ /// declaration, if it was present in the source.
+ NestedNameSpecifier *getQualifier() const {
+ return hasExtInfo() ? getExtInfo()->QualifierLoc.getNestedNameSpecifier()
+ : 0;
+ }
+
+ /// \brief Retrieve the nested-name-specifier (with source-location
+ /// information) that qualifies the name of this declaration, if it was
+ /// present in the source.
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ return hasExtInfo() ? getExtInfo()->QualifierLoc
+ : NestedNameSpecifierLoc();
+ }
+
+ void setQualifierInfo(NestedNameSpecifierLoc QualifierLoc);
+
+ unsigned getNumTemplateParameterLists() const {
+ return hasExtInfo() ? getExtInfo()->NumTemplParamLists : 0;
+ }
+ TemplateParameterList *getTemplateParameterList(unsigned i) const {
+ assert(i < getNumTemplateParameterLists());
+ return getExtInfo()->TemplParamLists[i];
+ }
+ void setTemplateParameterListsInfo(ASTContext &Context, unsigned NumTPLists,
+ TemplateParameterList **TPLists);
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TagDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K >= firstTag && K <= lastTag; }
+
+ static DeclContext *castToDeclContext(const TagDecl *D) {
+ return static_cast<DeclContext *>(const_cast<TagDecl*>(D));
+ }
+ static TagDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<TagDecl *>(const_cast<DeclContext*>(DC));
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// EnumDecl - Represents an enum. In C++11, enums can be forward-declared
+/// with a fixed underlying type, and in C we allow them to be forward-declared
+/// with no underlying type as an extension.
+class EnumDecl : public TagDecl {
+ virtual void anchor();
+ /// IntegerType - This represent the integer type that the enum corresponds
+ /// to for code generation purposes. Note that the enumerator constants may
+ /// have a different type than this does.
+ ///
+ /// If the underlying integer type was explicitly stated in the source
+ /// code, this is a TypeSourceInfo* for that type. Otherwise this type
+ /// was automatically deduced somehow, and this is a Type*.
+ ///
+ /// Normally if IsFixed(), this would contain a TypeSourceInfo*, but in
+ /// some cases it won't.
+ ///
+ /// The underlying type of an enumeration never has any qualifiers, so
+ /// we can get away with just storing a raw Type*, and thus save an
+ /// extra pointer when TypeSourceInfo is needed.
+
+ llvm::PointerUnion<const Type*, TypeSourceInfo*> IntegerType;
+
+ /// PromotionType - The integer type that values of this type should
+ /// promote to. In C, enumerators are generally of an integer type
+ /// directly, but gcc-style large enumerators (and all enumerators
+ /// in C++) are of the enum type instead.
+ QualType PromotionType;
+
+ /// \brief If this enumeration is an instantiation of a member enumeration
+ /// of a class template specialization, this is the member specialization
+ /// information.
+ MemberSpecializationInfo *SpecializationInfo;
+
+ EnumDecl(DeclContext *DC, SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, EnumDecl *PrevDecl,
+ bool Scoped, bool ScopedUsingClassTag, bool Fixed)
+ : TagDecl(Enum, TTK_Enum, DC, IdLoc, Id, PrevDecl, StartLoc),
+ SpecializationInfo(0) {
+ assert(Scoped || !ScopedUsingClassTag);
+ IntegerType = (const Type*)0;
+ NumNegativeBits = 0;
+ NumPositiveBits = 0;
+ IsScoped = Scoped;
+ IsScopedUsingClassTag = ScopedUsingClassTag;
+ IsFixed = Fixed;
+ }
+
+ void setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
+ TemplateSpecializationKind TSK);
+public:
+ EnumDecl *getCanonicalDecl() {
+ return cast<EnumDecl>(TagDecl::getCanonicalDecl());
+ }
+ const EnumDecl *getCanonicalDecl() const {
+ return cast<EnumDecl>(TagDecl::getCanonicalDecl());
+ }
+
+ const EnumDecl *getPreviousDecl() const {
+ return cast_or_null<EnumDecl>(TagDecl::getPreviousDecl());
+ }
+ EnumDecl *getPreviousDecl() {
+ return cast_or_null<EnumDecl>(TagDecl::getPreviousDecl());
+ }
+
+ const EnumDecl *getMostRecentDecl() const {
+ return cast<EnumDecl>(TagDecl::getMostRecentDecl());
+ }
+ EnumDecl *getMostRecentDecl() {
+ return cast<EnumDecl>(TagDecl::getMostRecentDecl());
+ }
+
+ EnumDecl *getDefinition() const {
+ return cast_or_null<EnumDecl>(TagDecl::getDefinition());
+ }
+
+ static EnumDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, EnumDecl *PrevDecl,
+ bool IsScoped, bool IsScopedUsingClassTag,
+ bool IsFixed);
+ static EnumDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// completeDefinition - When created, the EnumDecl corresponds to a
+ /// forward-declared enum. This method is used to mark the
+ /// declaration as being defined; it's enumerators have already been
+ /// added (via DeclContext::addDecl). NewType is the new underlying
+ /// type of the enumeration type.
+ void completeDefinition(QualType NewType,
+ QualType PromotionType,
+ unsigned NumPositiveBits,
+ unsigned NumNegativeBits);
+
+ // enumerator_iterator - Iterates through the enumerators of this
+ // enumeration.
+ typedef specific_decl_iterator<EnumConstantDecl> enumerator_iterator;
+
+ enumerator_iterator enumerator_begin() const {
+ const EnumDecl *E = getDefinition();
+ if (!E)
+ E = this;
+ return enumerator_iterator(E->decls_begin());
+ }
+
+ enumerator_iterator enumerator_end() const {
+ const EnumDecl *E = getDefinition();
+ if (!E)
+ E = this;
+ return enumerator_iterator(E->decls_end());
+ }
+
+ /// getPromotionType - Return the integer type that enumerators
+ /// should promote to.
+ QualType getPromotionType() const { return PromotionType; }
+
+ /// \brief Set the promotion type.
+ void setPromotionType(QualType T) { PromotionType = T; }
+
+ /// getIntegerType - Return the integer type this enum decl corresponds to.
+ /// This returns a null qualtype for an enum forward definition.
+ QualType getIntegerType() const {
+ if (!IntegerType)
+ return QualType();
+ if (const Type* T = IntegerType.dyn_cast<const Type*>())
+ return QualType(T, 0);
+ return IntegerType.get<TypeSourceInfo*>()->getType();
+ }
+
+ /// \brief Set the underlying integer type.
+ void setIntegerType(QualType T) { IntegerType = T.getTypePtrOrNull(); }
+
+ /// \brief Set the underlying integer type source info.
+ void setIntegerTypeSourceInfo(TypeSourceInfo* TInfo) { IntegerType = TInfo; }
+
+ /// \brief Return the type source info for the underlying integer type,
+ /// if no type source info exists, return 0.
+ TypeSourceInfo* getIntegerTypeSourceInfo() const {
+ return IntegerType.dyn_cast<TypeSourceInfo*>();
+ }
+
+ /// \brief Returns the width in bits required to store all the
+ /// non-negative enumerators of this enum.
+ unsigned getNumPositiveBits() const {
+ return NumPositiveBits;
+ }
+ void setNumPositiveBits(unsigned Num) {
+ NumPositiveBits = Num;
+ assert(NumPositiveBits == Num && "can't store this bitcount");
+ }
+
+ /// \brief Returns the width in bits required to store all the
+ /// negative enumerators of this enum. These widths include
+ /// the rightmost leading 1; that is:
+ ///
+ /// MOST NEGATIVE ENUMERATOR PATTERN NUM NEGATIVE BITS
+ /// ------------------------ ------- -----------------
+ /// -1 1111111 1
+ /// -10 1110110 5
+ /// -101 1001011 8
+ unsigned getNumNegativeBits() const {
+ return NumNegativeBits;
+ }
+ void setNumNegativeBits(unsigned Num) {
+ NumNegativeBits = Num;
+ }
+
+ /// \brief Returns true if this is a C++0x scoped enumeration.
+ bool isScoped() const {
+ return IsScoped;
+ }
+
+ /// \brief Returns true if this is a C++0x scoped enumeration.
+ bool isScopedUsingClassTag() const {
+ return IsScopedUsingClassTag;
+ }
+
+ /// \brief Returns true if this is a C++0x enumeration with fixed underlying
+ /// type.
+ bool isFixed() const {
+ return IsFixed;
+ }
+
+ /// \brief Returns true if this can be considered a complete type.
+ bool isComplete() const {
+ return isCompleteDefinition() || isFixed();
+ }
+
+ /// \brief Returns the enumeration (declared within the template)
+ /// from which this enumeration type was instantiated, or NULL if
+ /// this enumeration was not instantiated from any template.
+ EnumDecl *getInstantiatedFromMemberEnum() const;
+
+ /// \brief If this enumeration is a member of a specialization of a
+ /// templated class, determine what kind of template specialization
+ /// or instantiation this is.
+ TemplateSpecializationKind getTemplateSpecializationKind() const;
+
+ /// \brief For an enumeration member that was instantiated from a member
+ /// enumeration of a templated class, set the template specialiation kind.
+ void setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation = SourceLocation());
+
+ /// \brief If this enumeration is an instantiation of a member enumeration of
+ /// a class template specialization, retrieves the member specialization
+ /// information.
+ MemberSpecializationInfo *getMemberSpecializationInfo() const {
+ return SpecializationInfo;
+ }
+
+ /// \brief Specify that this enumeration is an instantiation of the
+ /// member enumeration ED.
+ void setInstantiationOfMemberEnum(EnumDecl *ED,
+ TemplateSpecializationKind TSK) {
+ setInstantiationOfMemberEnum(getASTContext(), ED, TSK);
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const EnumDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Enum; }
+
+ friend class ASTDeclReader;
+};
+
+
+/// RecordDecl - Represents a struct/union/class. For example:
+/// struct X; // Forward declaration, no "body".
+/// union Y { int A, B; }; // Has body with members A and B (FieldDecls).
+/// This decl will be marked invalid if *any* members are invalid.
+///
+class RecordDecl : public TagDecl {
+ // FIXME: This can be packed into the bitfields in Decl.
+ /// HasFlexibleArrayMember - This is true if this struct ends with a flexible
+ /// array member (e.g. int X[]) or if this union contains a struct that does.
+ /// If so, this cannot be contained in arrays or other structs as a member.
+ bool HasFlexibleArrayMember : 1;
+
+ /// AnonymousStructOrUnion - Whether this is the type of an anonymous struct
+ /// or union.
+ bool AnonymousStructOrUnion : 1;
+
+ /// HasObjectMember - This is true if this struct has at least one member
+ /// containing an Objective-C object pointer type.
+ bool HasObjectMember : 1;
+
+ /// \brief Whether the field declarations of this record have been loaded
+ /// from external storage. To avoid unnecessary deserialization of
+ /// methods/nested types we allow deserialization of just the fields
+ /// when needed.
+ mutable bool LoadedFieldsFromExternalStorage : 1;
+ friend class DeclContext;
+
+protected:
+ RecordDecl(Kind DK, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, RecordDecl *PrevDecl);
+
+public:
+ static RecordDecl *Create(const ASTContext &C, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, RecordDecl* PrevDecl = 0);
+ static RecordDecl *CreateDeserialized(const ASTContext &C, unsigned ID);
+
+ const RecordDecl *getPreviousDecl() const {
+ return cast_or_null<RecordDecl>(TagDecl::getPreviousDecl());
+ }
+ RecordDecl *getPreviousDecl() {
+ return cast_or_null<RecordDecl>(TagDecl::getPreviousDecl());
+ }
+
+ const RecordDecl *getMostRecentDecl() const {
+ return cast<RecordDecl>(TagDecl::getMostRecentDecl());
+ }
+ RecordDecl *getMostRecentDecl() {
+ return cast<RecordDecl>(TagDecl::getMostRecentDecl());
+ }
+
+ bool hasFlexibleArrayMember() const { return HasFlexibleArrayMember; }
+ void setHasFlexibleArrayMember(bool V) { HasFlexibleArrayMember = V; }
+
+ /// isAnonymousStructOrUnion - Whether this is an anonymous struct
+ /// or union. To be an anonymous struct or union, it must have been
+ /// declared without a name and there must be no objects of this
+ /// type declared, e.g.,
+ /// @code
+ /// union { int i; float f; };
+ /// @endcode
+ /// is an anonymous union but neither of the following are:
+ /// @code
+ /// union X { int i; float f; };
+ /// union { int i; float f; } obj;
+ /// @endcode
+ bool isAnonymousStructOrUnion() const { return AnonymousStructOrUnion; }
+ void setAnonymousStructOrUnion(bool Anon) {
+ AnonymousStructOrUnion = Anon;
+ }
+
+ bool hasObjectMember() const { return HasObjectMember; }
+ void setHasObjectMember (bool val) { HasObjectMember = val; }
+
+ /// \brief Determines whether this declaration represents the
+ /// injected class name.
+ ///
+ /// The injected class name in C++ is the name of the class that
+ /// appears inside the class itself. For example:
+ ///
+ /// \code
+ /// struct C {
+ /// // C is implicitly declared here as a synonym for the class name.
+ /// };
+ ///
+ /// C::C c; // same as "C c;"
+ /// \endcode
+ bool isInjectedClassName() const;
+
+ /// getDefinition - Returns the RecordDecl that actually defines
+ /// this struct/union/class. When determining whether or not a
+ /// struct/union/class is completely defined, one should use this
+ /// method as opposed to 'isCompleteDefinition'.
+ /// 'isCompleteDefinition' indicates whether or not a specific
+ /// RecordDecl is a completed definition, not whether or not the
+ /// record type is defined. This method returns NULL if there is
+ /// no RecordDecl that defines the struct/union/tag.
+ RecordDecl *getDefinition() const {
+ return cast_or_null<RecordDecl>(TagDecl::getDefinition());
+ }
+
+ // Iterator access to field members. The field iterator only visits
+ // the non-static data members of this class, ignoring any static
+ // data members, functions, constructors, destructors, etc.
+ typedef specific_decl_iterator<FieldDecl> field_iterator;
+
+ field_iterator field_begin() const;
+
+ field_iterator field_end() const {
+ return field_iterator(decl_iterator());
+ }
+
+ // field_empty - Whether there are any fields (non-static data
+ // members) in this record.
+ bool field_empty() const {
+ return field_begin() == field_end();
+ }
+
+ /// completeDefinition - Notes that the definition of this type is
+ /// now complete.
+ virtual void completeDefinition();
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const RecordDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstRecord && K <= lastRecord;
+ }
+
+private:
+ /// \brief Deserialize just the fields.
+ void LoadFieldsFromExternalStorage() const;
+};
+
+class FileScopeAsmDecl : public Decl {
+ virtual void anchor();
+ StringLiteral *AsmString;
+ SourceLocation RParenLoc;
+ FileScopeAsmDecl(DeclContext *DC, StringLiteral *asmstring,
+ SourceLocation StartL, SourceLocation EndL)
+ : Decl(FileScopeAsm, DC, StartL), AsmString(asmstring), RParenLoc(EndL) {}
+public:
+ static FileScopeAsmDecl *Create(ASTContext &C, DeclContext *DC,
+ StringLiteral *Str, SourceLocation AsmLoc,
+ SourceLocation RParenLoc);
+
+ static FileScopeAsmDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceLocation getAsmLoc() const { return getLocation(); }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getAsmLoc(), getRParenLoc());
+ }
+
+ const StringLiteral *getAsmString() const { return AsmString; }
+ StringLiteral *getAsmString() { return AsmString; }
+ void setAsmString(StringLiteral *Asm) { AsmString = Asm; }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const FileScopeAsmDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == FileScopeAsm; }
+};
+
+/// BlockDecl - This represents a block literal declaration, which is like an
+/// unnamed FunctionDecl. For example:
+/// ^{ statement-body } or ^(int arg1, float arg2){ statement-body }
+///
+class BlockDecl : public Decl, public DeclContext {
+public:
+ /// A class which contains all the information about a particular
+ /// captured value.
+ class Capture {
+ enum {
+ flag_isByRef = 0x1,
+ flag_isNested = 0x2
+ };
+
+ /// The variable being captured.
+ llvm::PointerIntPair<VarDecl*, 2> VariableAndFlags;
+
+ /// The copy expression, expressed in terms of a DeclRef (or
+ /// BlockDeclRef) to the captured variable. Only required if the
+ /// variable has a C++ class type.
+ Expr *CopyExpr;
+
+ public:
+ Capture(VarDecl *variable, bool byRef, bool nested, Expr *copy)
+ : VariableAndFlags(variable,
+ (byRef ? flag_isByRef : 0) | (nested ? flag_isNested : 0)),
+ CopyExpr(copy) {}
+
+ /// The variable being captured.
+ VarDecl *getVariable() const { return VariableAndFlags.getPointer(); }
+
+ /// Whether this is a "by ref" capture, i.e. a capture of a __block
+ /// variable.
+ bool isByRef() const { return VariableAndFlags.getInt() & flag_isByRef; }
+
+ /// Whether this is a nested capture, i.e. the variable captured
+ /// is not from outside the immediately enclosing function/block.
+ bool isNested() const { return VariableAndFlags.getInt() & flag_isNested; }
+
+ bool hasCopyExpr() const { return CopyExpr != 0; }
+ Expr *getCopyExpr() const { return CopyExpr; }
+ void setCopyExpr(Expr *e) { CopyExpr = e; }
+ };
+
+private:
+ // FIXME: This can be packed into the bitfields in Decl.
+ bool IsVariadic : 1;
+ bool CapturesCXXThis : 1;
+ bool BlockMissingReturnType : 1;
+ bool IsConversionFromLambda : 1;
+ /// ParamInfo - new[]'d array of pointers to ParmVarDecls for the formal
+ /// parameters of this function. This is null if a prototype or if there are
+ /// no formals.
+ ParmVarDecl **ParamInfo;
+ unsigned NumParams;
+
+ Stmt *Body;
+ TypeSourceInfo *SignatureAsWritten;
+
+ Capture *Captures;
+ unsigned NumCaptures;
+
+protected:
+ BlockDecl(DeclContext *DC, SourceLocation CaretLoc)
+ : Decl(Block, DC, CaretLoc), DeclContext(Block),
+ IsVariadic(false), CapturesCXXThis(false),
+ BlockMissingReturnType(true), IsConversionFromLambda(false),
+ ParamInfo(0), NumParams(0), Body(0),
+ SignatureAsWritten(0), Captures(0), NumCaptures(0) {}
+
+public:
+ static BlockDecl *Create(ASTContext &C, DeclContext *DC, SourceLocation L);
+ static BlockDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceLocation getCaretLocation() const { return getLocation(); }
+
+ bool isVariadic() const { return IsVariadic; }
+ void setIsVariadic(bool value) { IsVariadic = value; }
+
+ CompoundStmt *getCompoundBody() const { return (CompoundStmt*) Body; }
+ Stmt *getBody() const { return (Stmt*) Body; }
+ void setBody(CompoundStmt *B) { Body = (Stmt*) B; }
+
+ void setSignatureAsWritten(TypeSourceInfo *Sig) { SignatureAsWritten = Sig; }
+ TypeSourceInfo *getSignatureAsWritten() const { return SignatureAsWritten; }
+
+ // Iterator access to formal parameters.
+ unsigned param_size() const { return getNumParams(); }
+ typedef ParmVarDecl **param_iterator;
+ typedef ParmVarDecl * const *param_const_iterator;
+
+ bool param_empty() const { return NumParams == 0; }
+ param_iterator param_begin() { return ParamInfo; }
+ param_iterator param_end() { return ParamInfo+param_size(); }
+
+ param_const_iterator param_begin() const { return ParamInfo; }
+ param_const_iterator param_end() const { return ParamInfo+param_size(); }
+
+ unsigned getNumParams() const { return NumParams; }
+ const ParmVarDecl *getParamDecl(unsigned i) const {
+ assert(i < getNumParams() && "Illegal param #");
+ return ParamInfo[i];
+ }
+ ParmVarDecl *getParamDecl(unsigned i) {
+ assert(i < getNumParams() && "Illegal param #");
+ return ParamInfo[i];
+ }
+ void setParams(llvm::ArrayRef<ParmVarDecl *> NewParamInfo);
+
+ /// hasCaptures - True if this block (or its nested blocks) captures
+ /// anything of local storage from its enclosing scopes.
+ bool hasCaptures() const { return NumCaptures != 0 || CapturesCXXThis; }
+
+ /// getNumCaptures - Returns the number of captured variables.
+ /// Does not include an entry for 'this'.
+ unsigned getNumCaptures() const { return NumCaptures; }
+
+ typedef const Capture *capture_iterator;
+ typedef const Capture *capture_const_iterator;
+ capture_iterator capture_begin() { return Captures; }
+ capture_iterator capture_end() { return Captures + NumCaptures; }
+ capture_const_iterator capture_begin() const { return Captures; }
+ capture_const_iterator capture_end() const { return Captures + NumCaptures; }
+
+ bool capturesCXXThis() const { return CapturesCXXThis; }
+ bool blockMissingReturnType() const { return BlockMissingReturnType; }
+ void setBlockMissingReturnType(bool val) { BlockMissingReturnType = val; }
+
+ bool isConversionFromLambda() const { return IsConversionFromLambda; }
+ void setIsConversionFromLambda(bool val) { IsConversionFromLambda = val; }
+
+ bool capturesVariable(const VarDecl *var) const;
+
+ void setCaptures(ASTContext &Context,
+ const Capture *begin,
+ const Capture *end,
+ bool capturesCXXThis);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const BlockDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Block; }
+ static DeclContext *castToDeclContext(const BlockDecl *D) {
+ return static_cast<DeclContext *>(const_cast<BlockDecl*>(D));
+ }
+ static BlockDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<BlockDecl *>(const_cast<DeclContext*>(DC));
+ }
+};
+
+/// \brief Describes a module import declaration, which makes the contents
+/// of the named module visible in the current translation unit.
+///
+/// An import declaration imports the named module (or submodule). For example:
+/// \code
+/// @__experimental_modules_import std.vector;
+/// \endcode
+///
+/// Import declarations can also be implicitly generated from #include/#import
+/// directives.
+class ImportDecl : public Decl {
+ /// \brief The imported module, along with a bit that indicates whether
+ /// we have source-location information for each identifier in the module
+ /// name.
+ ///
+ /// When the bit is false, we only have a single source location for the
+ /// end of the import declaration.
+ llvm::PointerIntPair<Module *, 1, bool> ImportedAndComplete;
+
+ /// \brief The next import in the list of imports local to the translation
+ /// unit being parsed (not loaded from an AST file).
+ ImportDecl *NextLocalImport;
+
+ friend class ASTReader;
+ friend class ASTDeclReader;
+ friend class ASTContext;
+
+ ImportDecl(DeclContext *DC, SourceLocation StartLoc, Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs);
+
+ ImportDecl(DeclContext *DC, SourceLocation StartLoc, Module *Imported,
+ SourceLocation EndLoc);
+
+ ImportDecl(EmptyShell Empty) : Decl(Import, Empty), NextLocalImport() { }
+
+public:
+ /// \brief Create a new module import declaration.
+ static ImportDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs);
+
+ /// \brief Create a new module import declaration for an implicitly-generated
+ /// import.
+ static ImportDecl *CreateImplicit(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, Module *Imported,
+ SourceLocation EndLoc);
+
+ /// \brief Create a new, deserialized module import declaration.
+ static ImportDecl *CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumLocations);
+
+ /// \brief Retrieve the module that was imported by the import declaration.
+ Module *getImportedModule() const { return ImportedAndComplete.getPointer(); }
+
+ /// \brief Retrieves the locations of each of the identifiers that make up
+ /// the complete module name in the import declaration.
+ ///
+ /// This will return an empty array if the locations of the individual
+ /// identifiers aren't available.
+ ArrayRef<SourceLocation> getIdentifierLocs() const;
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ImportDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Import; }
+};
+
+
+/// Insertion operator for diagnostics. This allows sending NamedDecl's
+/// into a diagnostic with <<.
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const NamedDecl* ND) {
+ DB.AddTaggedVal(reinterpret_cast<intptr_t>(ND),
+ DiagnosticsEngine::ak_nameddecl);
+ return DB;
+}
+inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ const NamedDecl* ND) {
+ PD.AddTaggedVal(reinterpret_cast<intptr_t>(ND),
+ DiagnosticsEngine::ak_nameddecl);
+ return PD;
+}
+
+template<typename decl_type>
+void Redeclarable<decl_type>::setPreviousDeclaration(decl_type *PrevDecl) {
+ // Note: This routine is implemented here because we need both NamedDecl
+ // and Redeclarable to be defined.
+
+ decl_type *First;
+
+ if (PrevDecl) {
+ // Point to previous. Make sure that this is actually the most recent
+ // redeclaration, or we can build invalid chains. If the most recent
+ // redeclaration is invalid, it won't be PrevDecl, but we want it anyway.
+ RedeclLink = PreviousDeclLink(
+ llvm::cast<decl_type>(PrevDecl->getMostRecentDecl()));
+ First = PrevDecl->getFirstDeclaration();
+ assert(First->RedeclLink.NextIsLatest() && "Expected first");
+ } else {
+ // Make this first.
+ First = static_cast<decl_type*>(this);
+ }
+
+ // First one will point to this one as latest.
+ First->RedeclLink = LatestDeclLink(static_cast<decl_type*>(this));
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(static_cast<decl_type*>(this)))
+ ND->ClearLinkageCache();
+}
+
+// Inline function definitions.
+
+/// \brief Check if the given decl is complete.
+///
+/// We use this function to break a cycle between the inline definitions in
+/// Type.h and Decl.h.
+inline bool IsEnumDeclComplete(EnumDecl *ED) {
+ return ED->isComplete();
+}
+
+/// \brief Check if the given decl is scoped.
+///
+/// We use this function to break a cycle between the inline definitions in
+/// Type.h and Decl.h.
+inline bool IsEnumDeclScoped(EnumDecl *ED) {
+ return ED->isScoped();
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclAccessPair.h b/contrib/llvm/tools/clang/include/clang/AST/DeclAccessPair.h
new file mode 100644
index 0000000..7ecd8f8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclAccessPair.h
@@ -0,0 +1,72 @@
+//===--- DeclAccessPair.h - A decl bundled with its path access -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeclAccessPair class, which provides an
+// efficient representation of a pair of a NamedDecl* and an
+// AccessSpecifier. Generally the access specifier gives the
+// natural access of a declaration when named in a class, as
+// defined in C++ [class.access.base]p1.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLACCESSPAIR_H
+#define LLVM_CLANG_AST_DECLACCESSPAIR_H
+
+#include "clang/Basic/Specifiers.h"
+
+namespace clang {
+
+class NamedDecl;
+
+/// A POD class for pairing a NamedDecl* with an access specifier.
+/// Can be put into unions.
+class DeclAccessPair {
+ NamedDecl *Ptr; // we'd use llvm::PointerUnion, but it isn't trivial
+
+ enum { Mask = 0x3 };
+
+public:
+ static DeclAccessPair make(NamedDecl *D, AccessSpecifier AS) {
+ DeclAccessPair p;
+ p.set(D, AS);
+ return p;
+ }
+
+ NamedDecl *getDecl() const {
+ return (NamedDecl*) (~Mask & (uintptr_t) Ptr);
+ }
+ AccessSpecifier getAccess() const {
+ return AccessSpecifier(Mask & (uintptr_t) Ptr);
+ }
+
+ void setDecl(NamedDecl *D) {
+ set(D, getAccess());
+ }
+ void setAccess(AccessSpecifier AS) {
+ set(getDecl(), AS);
+ }
+ void set(NamedDecl *D, AccessSpecifier AS) {
+ Ptr = reinterpret_cast<NamedDecl*>(uintptr_t(AS) |
+ reinterpret_cast<uintptr_t>(D));
+ }
+
+ operator NamedDecl*() const { return getDecl(); }
+ NamedDecl *operator->() const { return getDecl(); }
+};
+}
+
+// Take a moment to tell SmallVector that DeclAccessPair is POD.
+namespace llvm {
+template<typename> struct isPodLike;
+template<> struct isPodLike<clang::DeclAccessPair> {
+ static const bool value = true;
+};
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
new file mode 100644
index 0000000..4c675aed
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclBase.h
@@ -0,0 +1,1637 @@
+//===-- DeclBase.h - Base Classes for representing declarations -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Decl and DeclContext interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLBASE_H
+#define LLVM_CLANG_AST_DECLBASE_H
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PrettyStackTrace.h"
+
+namespace clang {
+class DeclContext;
+class TranslationUnitDecl;
+class NamespaceDecl;
+class UsingDirectiveDecl;
+class NamedDecl;
+class FunctionDecl;
+class CXXRecordDecl;
+class EnumDecl;
+class ObjCMethodDecl;
+class ObjCContainerDecl;
+class ObjCInterfaceDecl;
+class ObjCCategoryDecl;
+class ObjCProtocolDecl;
+class ObjCImplementationDecl;
+class ObjCCategoryImplDecl;
+class ObjCImplDecl;
+class LinkageSpecDecl;
+class BlockDecl;
+class DeclarationName;
+class CompoundStmt;
+class StoredDeclsMap;
+class DependentDiagnostic;
+class ASTMutationListener;
+}
+
+namespace llvm {
+// DeclContext* is only 4-byte aligned on 32-bit systems.
+template<>
+ class PointerLikeTypeTraits<clang::DeclContext*> {
+ typedef clang::DeclContext* PT;
+public:
+ static inline void *getAsVoidPointer(PT P) { return P; }
+ static inline PT getFromVoidPointer(void *P) {
+ return static_cast<PT>(P);
+ }
+ enum { NumLowBitsAvailable = 2 };
+};
+}
+
+namespace clang {
+
+ /// \brief Captures the result of checking the availability of a
+ /// declaration.
+ enum AvailabilityResult {
+ AR_Available = 0,
+ AR_NotYetIntroduced,
+ AR_Deprecated,
+ AR_Unavailable
+ };
+
+/// Decl - This represents one declaration (or definition), e.g. a variable,
+/// typedef, function, struct, etc.
+///
+class Decl {
+public:
+ /// \brief Lists the kind of concrete classes of Decl.
+ enum Kind {
+#define DECL(DERIVED, BASE) DERIVED,
+#define ABSTRACT_DECL(DECL)
+#define DECL_RANGE(BASE, START, END) \
+ first##BASE = START, last##BASE = END,
+#define LAST_DECL_RANGE(BASE, START, END) \
+ first##BASE = START, last##BASE = END
+#include "clang/AST/DeclNodes.inc"
+ };
+
+ /// \brief A placeholder type used to construct an empty shell of a
+ /// decl-derived type that will be filled in later (e.g., by some
+ /// deserialization method).
+ struct EmptyShell { };
+
+ /// IdentifierNamespace - The different namespaces in which
+ /// declarations may appear. According to C99 6.2.3, there are
+ /// four namespaces, labels, tags, members and ordinary
+ /// identifiers. C++ describes lookup completely differently:
+ /// certain lookups merely "ignore" certain kinds of declarations,
+ /// usually based on whether the declaration is of a type, etc.
+ ///
+ /// These are meant as bitmasks, so that searches in
+ /// C++ can look into the "tag" namespace during ordinary lookup.
+ ///
+ /// Decl currently provides 15 bits of IDNS bits.
+ enum IdentifierNamespace {
+ /// Labels, declared with 'x:' and referenced with 'goto x'.
+ IDNS_Label = 0x0001,
+
+ /// Tags, declared with 'struct foo;' and referenced with
+ /// 'struct foo'. All tags are also types. This is what
+ /// elaborated-type-specifiers look for in C.
+ IDNS_Tag = 0x0002,
+
+ /// Types, declared with 'struct foo', typedefs, etc.
+ /// This is what elaborated-type-specifiers look for in C++,
+ /// but note that it's ill-formed to find a non-tag.
+ IDNS_Type = 0x0004,
+
+ /// Members, declared with object declarations within tag
+ /// definitions. In C, these can only be found by "qualified"
+ /// lookup in member expressions. In C++, they're found by
+ /// normal lookup.
+ IDNS_Member = 0x0008,
+
+ /// Namespaces, declared with 'namespace foo {}'.
+ /// Lookup for nested-name-specifiers find these.
+ IDNS_Namespace = 0x0010,
+
+ /// Ordinary names. In C, everything that's not a label, tag,
+ /// or member ends up here.
+ IDNS_Ordinary = 0x0020,
+
+ /// Objective C @protocol.
+ IDNS_ObjCProtocol = 0x0040,
+
+ /// This declaration is a friend function. A friend function
+ /// declaration is always in this namespace but may also be in
+ /// IDNS_Ordinary if it was previously declared.
+ IDNS_OrdinaryFriend = 0x0080,
+
+ /// This declaration is a friend class. A friend class
+ /// declaration is always in this namespace but may also be in
+ /// IDNS_Tag|IDNS_Type if it was previously declared.
+ IDNS_TagFriend = 0x0100,
+
+ /// This declaration is a using declaration. A using declaration
+ /// *introduces* a number of other declarations into the current
+ /// scope, and those declarations use the IDNS of their targets,
+ /// but the actual using declarations go in this namespace.
+ IDNS_Using = 0x0200,
+
+ /// This declaration is a C++ operator declared in a non-class
+ /// context. All such operators are also in IDNS_Ordinary.
+ /// C++ lexical operator lookup looks for these.
+ IDNS_NonMemberOperator = 0x0400
+ };
+
+ /// ObjCDeclQualifier - 'Qualifiers' written next to the return and
+ /// parameter types in method declarations. Other than remembering
+ /// them and mangling them into the method's signature string, these
+ /// are ignored by the compiler; they are consumed by certain
+ /// remote-messaging frameworks.
+ ///
+ /// in, inout, and out are mutually exclusive and apply only to
+ /// method parameters. bycopy and byref are mutually exclusive and
+ /// apply only to method parameters (?). oneway applies only to
+ /// results. All of these expect their corresponding parameter to
+ /// have a particular type. None of this is currently enforced by
+ /// clang.
+ ///
+ /// This should be kept in sync with ObjCDeclSpec::ObjCDeclQualifier.
+ enum ObjCDeclQualifier {
+ OBJC_TQ_None = 0x0,
+ OBJC_TQ_In = 0x1,
+ OBJC_TQ_Inout = 0x2,
+ OBJC_TQ_Out = 0x4,
+ OBJC_TQ_Bycopy = 0x8,
+ OBJC_TQ_Byref = 0x10,
+ OBJC_TQ_Oneway = 0x20
+ };
+
+protected:
+ // Enumeration values used in the bits stored in NextInContextAndBits.
+ enum {
+ /// \brief Whether this declaration is a top-level declaration (function,
+ /// global variable, etc.) that is lexically inside an objc container
+ /// definition.
+ TopLevelDeclInObjCContainerFlag = 0x01,
+
+ /// \brief Whether this declaration is private to the module in which it was
+ /// defined.
+ ModulePrivateFlag = 0x02
+ };
+
+ /// \brief The next declaration within the same lexical
+ /// DeclContext. These pointers form the linked list that is
+ /// traversed via DeclContext's decls_begin()/decls_end().
+ ///
+ /// The extra two bits are used for the TopLevelDeclInObjCContainer and
+ /// ModulePrivate bits.
+ llvm::PointerIntPair<Decl *, 2, unsigned> NextInContextAndBits;
+
+private:
+ friend class DeclContext;
+
+ struct MultipleDC {
+ DeclContext *SemanticDC;
+ DeclContext *LexicalDC;
+ };
+
+
+ /// DeclCtx - Holds either a DeclContext* or a MultipleDC*.
+ /// For declarations that don't contain C++ scope specifiers, it contains
+ /// the DeclContext where the Decl was declared.
+ /// For declarations with C++ scope specifiers, it contains a MultipleDC*
+ /// with the context where it semantically belongs (SemanticDC) and the
+ /// context where it was lexically declared (LexicalDC).
+ /// e.g.:
+ ///
+ /// namespace A {
+ /// void f(); // SemanticDC == LexicalDC == 'namespace A'
+ /// }
+ /// void A::f(); // SemanticDC == namespace 'A'
+ /// // LexicalDC == global namespace
+ llvm::PointerUnion<DeclContext*, MultipleDC*> DeclCtx;
+
+ inline bool isInSemaDC() const { return DeclCtx.is<DeclContext*>(); }
+ inline bool isOutOfSemaDC() const { return DeclCtx.is<MultipleDC*>(); }
+ inline MultipleDC *getMultipleDC() const {
+ return DeclCtx.get<MultipleDC*>();
+ }
+ inline DeclContext *getSemanticDC() const {
+ return DeclCtx.get<DeclContext*>();
+ }
+
+ /// Loc - The location of this decl.
+ SourceLocation Loc;
+
+ /// DeclKind - This indicates which class this is.
+ unsigned DeclKind : 8;
+
+ /// InvalidDecl - This indicates a semantic error occurred.
+ unsigned InvalidDecl : 1;
+
+ /// HasAttrs - This indicates whether the decl has attributes or not.
+ unsigned HasAttrs : 1;
+
+ /// Implicit - Whether this declaration was implicitly generated by
+ /// the implementation rather than explicitly written by the user.
+ unsigned Implicit : 1;
+
+ /// \brief Whether this declaration was "used", meaning that a definition is
+ /// required.
+ unsigned Used : 1;
+
+ /// \brief Whether this declaration was "referenced".
+ /// The difference with 'Used' is whether the reference appears in a
+ /// evaluated context or not, e.g. functions used in uninstantiated templates
+ /// are regarded as "referenced" but not "used".
+ unsigned Referenced : 1;
+
+ /// \brief Whether statistic collection is enabled.
+ static bool StatisticsEnabled;
+
+protected:
+ /// Access - Used by C++ decls for the access specifier.
+ // NOTE: VC++ treats enums as signed, avoid using the AccessSpecifier enum
+ unsigned Access : 2;
+ friend class CXXClassMemberWrapper;
+
+ /// \brief Whether this declaration was loaded from an AST file.
+ unsigned FromASTFile : 1;
+
+ /// \brief Whether this declaration is hidden from normal name lookup, e.g.,
+ /// because it is was loaded from an AST file is either module-private or
+ /// because its submodule has not been made visible.
+ unsigned Hidden : 1;
+
+ /// IdentifierNamespace - This specifies what IDNS_* namespace this lives in.
+ unsigned IdentifierNamespace : 12;
+
+ /// \brief Whether the \c CachedLinkage field is active.
+ ///
+ /// This field is only valid for NamedDecls subclasses.
+ mutable unsigned HasCachedLinkage : 1;
+
+ /// \brief If \c HasCachedLinkage, the linkage of this declaration.
+ ///
+ /// This field is only valid for NamedDecls subclasses.
+ mutable unsigned CachedLinkage : 2;
+
+ friend class ASTDeclWriter;
+ friend class ASTDeclReader;
+ friend class ASTReader;
+
+private:
+ void CheckAccessDeclContext() const;
+
+protected:
+
+ Decl(Kind DK, DeclContext *DC, SourceLocation L)
+ : NextInContextAndBits(), DeclCtx(DC),
+ Loc(L), DeclKind(DK), InvalidDecl(0),
+ HasAttrs(false), Implicit(false), Used(false), Referenced(false),
+ Access(AS_none), FromASTFile(0), Hidden(0),
+ IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
+ HasCachedLinkage(0)
+ {
+ if (StatisticsEnabled) add(DK);
+ }
+
+ Decl(Kind DK, EmptyShell Empty)
+ : NextInContextAndBits(), DeclKind(DK), InvalidDecl(0),
+ HasAttrs(false), Implicit(false), Used(false), Referenced(false),
+ Access(AS_none), FromASTFile(0), Hidden(0),
+ IdentifierNamespace(getIdentifierNamespaceForKind(DK)),
+ HasCachedLinkage(0)
+ {
+ if (StatisticsEnabled) add(DK);
+ }
+
+ virtual ~Decl();
+
+ /// \brief Allocate memory for a deserialized declaration.
+ ///
+ /// This routine must be used to allocate memory for any declaration that is
+ /// deserialized from a module file.
+ ///
+ /// \param Context The context in which we will allocate memory.
+ /// \param ID The global ID of the deserialized declaration.
+ /// \param Size The size of the allocated object.
+ static void *AllocateDeserializedDecl(const ASTContext &Context,
+ unsigned ID,
+ unsigned Size);
+
+public:
+
+ /// \brief Source range that this declaration covers.
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getLocation(), getLocation());
+ }
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return getSourceRange().getBegin();
+ }
+ SourceLocation getLocEnd() const LLVM_READONLY {
+ return getSourceRange().getEnd();
+ }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ Kind getKind() const { return static_cast<Kind>(DeclKind); }
+ const char *getDeclKindName() const;
+
+ Decl *getNextDeclInContext() { return NextInContextAndBits.getPointer(); }
+ const Decl *getNextDeclInContext() const {return NextInContextAndBits.getPointer();}
+
+ DeclContext *getDeclContext() {
+ if (isInSemaDC())
+ return getSemanticDC();
+ return getMultipleDC()->SemanticDC;
+ }
+ const DeclContext *getDeclContext() const {
+ return const_cast<Decl*>(this)->getDeclContext();
+ }
+
+ /// Finds the innermost non-closure context of this declaration.
+ /// That is, walk out the DeclContext chain, skipping any blocks.
+ DeclContext *getNonClosureContext();
+ const DeclContext *getNonClosureContext() const {
+ return const_cast<Decl*>(this)->getNonClosureContext();
+ }
+
+ TranslationUnitDecl *getTranslationUnitDecl();
+ const TranslationUnitDecl *getTranslationUnitDecl() const {
+ return const_cast<Decl*>(this)->getTranslationUnitDecl();
+ }
+
+ bool isInAnonymousNamespace() const;
+
+ ASTContext &getASTContext() const LLVM_READONLY;
+
+ void setAccess(AccessSpecifier AS) {
+ Access = AS;
+#ifndef NDEBUG
+ CheckAccessDeclContext();
+#endif
+ }
+
+ AccessSpecifier getAccess() const {
+#ifndef NDEBUG
+ CheckAccessDeclContext();
+#endif
+ return AccessSpecifier(Access);
+ }
+
+ bool hasAttrs() const { return HasAttrs; }
+ void setAttrs(const AttrVec& Attrs) {
+ return setAttrsImpl(Attrs, getASTContext());
+ }
+ AttrVec &getAttrs() {
+ return const_cast<AttrVec&>(const_cast<const Decl*>(this)->getAttrs());
+ }
+ const AttrVec &getAttrs() const;
+ void swapAttrs(Decl *D);
+ void dropAttrs();
+
+ void addAttr(Attr *A) {
+ if (hasAttrs())
+ getAttrs().push_back(A);
+ else
+ setAttrs(AttrVec(1, A));
+ }
+
+ typedef AttrVec::const_iterator attr_iterator;
+
+ // FIXME: Do not rely on iterators having comparable singular values.
+ // Note that this should error out if they do not.
+ attr_iterator attr_begin() const {
+ return hasAttrs() ? getAttrs().begin() : 0;
+ }
+ attr_iterator attr_end() const {
+ return hasAttrs() ? getAttrs().end() : 0;
+ }
+
+ template <typename T>
+ void dropAttr() {
+ if (!HasAttrs) return;
+
+ AttrVec &Attrs = getAttrs();
+ for (unsigned i = 0, e = Attrs.size(); i != e; /* in loop */) {
+ if (isa<T>(Attrs[i])) {
+ Attrs.erase(Attrs.begin() + i);
+ --e;
+ }
+ else
+ ++i;
+ }
+ if (Attrs.empty())
+ HasAttrs = false;
+ }
+
+ template <typename T>
+ specific_attr_iterator<T> specific_attr_begin() const {
+ return specific_attr_iterator<T>(attr_begin());
+ }
+ template <typename T>
+ specific_attr_iterator<T> specific_attr_end() const {
+ return specific_attr_iterator<T>(attr_end());
+ }
+
+ template<typename T> T *getAttr() const {
+ return hasAttrs() ? getSpecificAttr<T>(getAttrs()) : 0;
+ }
+ template<typename T> bool hasAttr() const {
+ return hasAttrs() && hasSpecificAttr<T>(getAttrs());
+ }
+
+ /// getMaxAlignment - return the maximum alignment specified by attributes
+ /// on this decl, 0 if there are none.
+ unsigned getMaxAlignment() const {
+ return hasAttrs() ? getMaxAttrAlignment(getAttrs(), getASTContext()) : 0;
+ }
+
+ /// setInvalidDecl - Indicates the Decl had a semantic error. This
+ /// allows for graceful error recovery.
+ void setInvalidDecl(bool Invalid = true);
+ bool isInvalidDecl() const { return (bool) InvalidDecl; }
+
+ /// isImplicit - Indicates whether the declaration was implicitly
+ /// generated by the implementation. If false, this declaration
+ /// was written explicitly in the source code.
+ bool isImplicit() const { return Implicit; }
+ void setImplicit(bool I = true) { Implicit = I; }
+
+ /// \brief Whether this declaration was used, meaning that a definition
+ /// is required.
+ ///
+ /// \param CheckUsedAttr When true, also consider the "used" attribute
+ /// (in addition to the "used" bit set by \c setUsed()) when determining
+ /// whether the function is used.
+ bool isUsed(bool CheckUsedAttr = true) const;
+
+ void setUsed(bool U = true) { Used = U; }
+
+ /// \brief Whether this declaration was referenced.
+ bool isReferenced() const;
+
+ void setReferenced(bool R = true) { Referenced = R; }
+
+ /// \brief Whether this declaration is a top-level declaration (function,
+ /// global variable, etc.) that is lexically inside an objc container
+ /// definition.
+ bool isTopLevelDeclInObjCContainer() const {
+ return NextInContextAndBits.getInt() & TopLevelDeclInObjCContainerFlag;
+ }
+
+ void setTopLevelDeclInObjCContainer(bool V = true) {
+ unsigned Bits = NextInContextAndBits.getInt();
+ if (V)
+ Bits |= TopLevelDeclInObjCContainerFlag;
+ else
+ Bits &= ~TopLevelDeclInObjCContainerFlag;
+ NextInContextAndBits.setInt(Bits);
+ }
+
+protected:
+ /// \brief Whether this declaration was marked as being private to the
+ /// module in which it was defined.
+ bool isModulePrivate() const {
+ return NextInContextAndBits.getInt() & ModulePrivateFlag;
+ }
+
+ /// \brief Specify whether this declaration was marked as being private
+ /// to the module in which it was defined.
+ void setModulePrivate(bool MP = true) {
+ unsigned Bits = NextInContextAndBits.getInt();
+ if (MP)
+ Bits |= ModulePrivateFlag;
+ else
+ Bits &= ~ModulePrivateFlag;
+ NextInContextAndBits.setInt(Bits);
+ }
+
+ /// \brief Set the owning module ID.
+ void setOwningModuleID(unsigned ID) {
+ assert(isFromASTFile() && "Only works on a deserialized declaration");
+ *((unsigned*)this - 2) = ID;
+ }
+
+public:
+
+ /// \brief Determine the availability of the given declaration.
+ ///
+ /// This routine will determine the most restrictive availability of
+ /// the given declaration (e.g., preferring 'unavailable' to
+ /// 'deprecated').
+ ///
+ /// \param Message If non-NULL and the result is not \c
+ /// AR_Available, will be set to a (possibly empty) message
+ /// describing why the declaration has not been introduced, is
+ /// deprecated, or is unavailable.
+ AvailabilityResult getAvailability(std::string *Message = 0) const;
+
+ /// \brief Determine whether this declaration is marked 'deprecated'.
+ ///
+ /// \param Message If non-NULL and the declaration is deprecated,
+ /// this will be set to the message describing why the declaration
+ /// was deprecated (which may be empty).
+ bool isDeprecated(std::string *Message = 0) const {
+ return getAvailability(Message) == AR_Deprecated;
+ }
+
+ /// \brief Determine whether this declaration is marked 'unavailable'.
+ ///
+ /// \param Message If non-NULL and the declaration is unavailable,
+ /// this will be set to the message describing why the declaration
+ /// was made unavailable (which may be empty).
+ bool isUnavailable(std::string *Message = 0) const {
+ return getAvailability(Message) == AR_Unavailable;
+ }
+
+ /// \brief Determine whether this is a weak-imported symbol.
+ ///
+ /// Weak-imported symbols are typically marked with the
+ /// 'weak_import' attribute, but may also be marked with an
+ /// 'availability' attribute where we're targing a platform prior to
+ /// the introduction of this feature.
+ bool isWeakImported() const;
+
+ /// \brief Determines whether this symbol can be weak-imported,
+ /// e.g., whether it would be well-formed to add the weak_import
+ /// attribute.
+ ///
+ /// \param IsDefinition Set to \c true to indicate that this
+ /// declaration cannot be weak-imported because it has a definition.
+ bool canBeWeakImported(bool &IsDefinition) const;
+
+ /// \brief Determine whether this declaration came from an AST file (such as
+ /// a precompiled header or module) rather than having been parsed.
+ bool isFromASTFile() const { return FromASTFile; }
+
+ /// \brief Retrieve the global declaration ID associated with this
+ /// declaration, which specifies where in the
+ unsigned getGlobalID() const {
+ if (isFromASTFile())
+ return *((const unsigned*)this - 1);
+ return 0;
+ }
+
+ /// \brief Retrieve the global ID of the module that owns this particular
+ /// declaration.
+ unsigned getOwningModuleID() const {
+ if (isFromASTFile())
+ return *((const unsigned*)this - 2);
+
+ return 0;
+ }
+
+ unsigned getIdentifierNamespace() const {
+ return IdentifierNamespace;
+ }
+ bool isInIdentifierNamespace(unsigned NS) const {
+ return getIdentifierNamespace() & NS;
+ }
+ static unsigned getIdentifierNamespaceForKind(Kind DK);
+
+ bool hasTagIdentifierNamespace() const {
+ return isTagIdentifierNamespace(getIdentifierNamespace());
+ }
+ static bool isTagIdentifierNamespace(unsigned NS) {
+ // TagDecls have Tag and Type set and may also have TagFriend.
+ return (NS & ~IDNS_TagFriend) == (IDNS_Tag | IDNS_Type);
+ }
+
+ /// getLexicalDeclContext - The declaration context where this Decl was
+ /// lexically declared (LexicalDC). May be different from
+ /// getDeclContext() (SemanticDC).
+ /// e.g.:
+ ///
+ /// namespace A {
+ /// void f(); // SemanticDC == LexicalDC == 'namespace A'
+ /// }
+ /// void A::f(); // SemanticDC == namespace 'A'
+ /// // LexicalDC == global namespace
+ DeclContext *getLexicalDeclContext() {
+ if (isInSemaDC())
+ return getSemanticDC();
+ return getMultipleDC()->LexicalDC;
+ }
+ const DeclContext *getLexicalDeclContext() const {
+ return const_cast<Decl*>(this)->getLexicalDeclContext();
+ }
+
+ virtual bool isOutOfLine() const {
+ return getLexicalDeclContext() != getDeclContext();
+ }
+
+ /// setDeclContext - Set both the semantic and lexical DeclContext
+ /// to DC.
+ void setDeclContext(DeclContext *DC);
+
+ void setLexicalDeclContext(DeclContext *DC);
+
+ /// isDefinedOutsideFunctionOrMethod - This predicate returns true if this
+ /// scoped decl is defined outside the current function or method. This is
+ /// roughly global variables and functions, but also handles enums (which
+ /// could be defined inside or outside a function etc).
+ bool isDefinedOutsideFunctionOrMethod() const {
+ return getParentFunctionOrMethod() == 0;
+ }
+
+ /// \brief If this decl is defined inside a function/method/block it returns
+ /// the corresponding DeclContext, otherwise it returns null.
+ const DeclContext *getParentFunctionOrMethod() const;
+ DeclContext *getParentFunctionOrMethod() {
+ return const_cast<DeclContext*>(
+ const_cast<const Decl*>(this)->getParentFunctionOrMethod());
+ }
+
+ /// \brief Retrieves the "canonical" declaration of the given declaration.
+ virtual Decl *getCanonicalDecl() { return this; }
+ const Decl *getCanonicalDecl() const {
+ return const_cast<Decl*>(this)->getCanonicalDecl();
+ }
+
+ /// \brief Whether this particular Decl is a canonical one.
+ bool isCanonicalDecl() const { return getCanonicalDecl() == this; }
+
+protected:
+ /// \brief Returns the next redeclaration or itself if this is the only decl.
+ ///
+ /// Decl subclasses that can be redeclared should override this method so that
+ /// Decl::redecl_iterator can iterate over them.
+ virtual Decl *getNextRedeclaration() { return this; }
+
+ /// \brief Implementation of getPreviousDecl(), to be overridden by any
+ /// subclass that has a redeclaration chain.
+ virtual Decl *getPreviousDeclImpl() { return 0; }
+
+ /// \brief Implementation of getMostRecentDecl(), to be overridden by any
+ /// subclass that has a redeclaration chain.
+ virtual Decl *getMostRecentDeclImpl() { return this; }
+
+public:
+ /// \brief Iterates through all the redeclarations of the same decl.
+ class redecl_iterator {
+ /// Current - The current declaration.
+ Decl *Current;
+ Decl *Starter;
+
+ public:
+ typedef Decl* value_type;
+ typedef Decl* reference;
+ typedef Decl* pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ redecl_iterator() : Current(0) { }
+ explicit redecl_iterator(Decl *C) : Current(C), Starter(C) { }
+
+ reference operator*() const { return Current; }
+ pointer operator->() const { return Current; }
+
+ redecl_iterator& operator++() {
+ assert(Current && "Advancing while iterator has reached end");
+ // Get either previous decl or latest decl.
+ Decl *Next = Current->getNextRedeclaration();
+ assert(Next && "Should return next redeclaration or itself, never null!");
+ Current = (Next != Starter ? Next : 0);
+ return *this;
+ }
+
+ redecl_iterator operator++(int) {
+ redecl_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(redecl_iterator x, redecl_iterator y) {
+ return x.Current == y.Current;
+ }
+ friend bool operator!=(redecl_iterator x, redecl_iterator y) {
+ return x.Current != y.Current;
+ }
+ };
+
+ /// \brief Returns iterator for all the redeclarations of the same decl.
+ /// It will iterate at least once (when this decl is the only one).
+ redecl_iterator redecls_begin() const {
+ return redecl_iterator(const_cast<Decl*>(this));
+ }
+ redecl_iterator redecls_end() const { return redecl_iterator(); }
+
+ /// \brief Retrieve the previous declaration that declares the same entity
+ /// as this declaration, or NULL if there is no previous declaration.
+ Decl *getPreviousDecl() { return getPreviousDeclImpl(); }
+
+ /// \brief Retrieve the most recent declaration that declares the same entity
+ /// as this declaration, or NULL if there is no previous declaration.
+ const Decl *getPreviousDecl() const {
+ return const_cast<Decl *>(this)->getPreviousDeclImpl();
+ }
+
+ /// \brief Retrieve the most recent declaration that declares the same entity
+ /// as this declaration (which may be this declaration).
+ Decl *getMostRecentDecl() { return getMostRecentDeclImpl(); }
+
+ /// \brief Retrieve the most recent declaration that declares the same entity
+ /// as this declaration (which may be this declaration).
+ const Decl *getMostRecentDecl() const {
+ return const_cast<Decl *>(this)->getMostRecentDeclImpl();
+ }
+
+ /// getBody - If this Decl represents a declaration for a body of code,
+ /// such as a function or method definition, this method returns the
+ /// top-level Stmt* of that body. Otherwise this method returns null.
+ virtual Stmt* getBody() const { return 0; }
+
+ /// \brief Returns true if this Decl represents a declaration for a body of
+ /// code, such as a function or method definition.
+ virtual bool hasBody() const { return getBody() != 0; }
+
+ /// getBodyRBrace - Gets the right brace of the body, if a body exists.
+ /// This works whether the body is a CompoundStmt or a CXXTryStmt.
+ SourceLocation getBodyRBrace() const;
+
+ // global temp stats (until we have a per-module visitor)
+ static void add(Kind k);
+ static void EnableStatistics();
+ static void PrintStats();
+
+ /// isTemplateParameter - Determines whether this declaration is a
+ /// template parameter.
+ bool isTemplateParameter() const;
+
+ /// isTemplateParameter - Determines whether this declaration is a
+ /// template parameter pack.
+ bool isTemplateParameterPack() const;
+
+ /// \brief Whether this declaration is a parameter pack.
+ bool isParameterPack() const;
+
+ /// \brief returns true if this declaration is a template
+ bool isTemplateDecl() const;
+
+ /// \brief Whether this declaration is a function or function template.
+ bool isFunctionOrFunctionTemplate() const;
+
+ /// \brief Changes the namespace of this declaration to reflect that it's
+ /// the object of a friend declaration.
+ ///
+ /// These declarations appear in the lexical context of the friending
+ /// class, but in the semantic context of the actual entity. This property
+ /// applies only to a specific decl object; other redeclarations of the
+ /// same entity may not (and probably don't) share this property.
+ void setObjectOfFriendDecl(bool PreviouslyDeclared) {
+ unsigned OldNS = IdentifierNamespace;
+ assert((OldNS & (IDNS_Tag | IDNS_Ordinary |
+ IDNS_TagFriend | IDNS_OrdinaryFriend)) &&
+ "namespace includes neither ordinary nor tag");
+ assert(!(OldNS & ~(IDNS_Tag | IDNS_Ordinary | IDNS_Type |
+ IDNS_TagFriend | IDNS_OrdinaryFriend)) &&
+ "namespace includes other than ordinary or tag");
+
+ IdentifierNamespace = 0;
+ if (OldNS & (IDNS_Tag | IDNS_TagFriend)) {
+ IdentifierNamespace |= IDNS_TagFriend;
+ if (PreviouslyDeclared) IdentifierNamespace |= IDNS_Tag | IDNS_Type;
+ }
+
+ if (OldNS & (IDNS_Ordinary | IDNS_OrdinaryFriend)) {
+ IdentifierNamespace |= IDNS_OrdinaryFriend;
+ if (PreviouslyDeclared) IdentifierNamespace |= IDNS_Ordinary;
+ }
+ }
+
+ enum FriendObjectKind {
+ FOK_None, // not a friend object
+ FOK_Declared, // a friend of a previously-declared entity
+ FOK_Undeclared // a friend of a previously-undeclared entity
+ };
+
+ /// \brief Determines whether this declaration is the object of a
+ /// friend declaration and, if so, what kind.
+ ///
+ /// There is currently no direct way to find the associated FriendDecl.
+ FriendObjectKind getFriendObjectKind() const {
+ unsigned mask
+ = (IdentifierNamespace & (IDNS_TagFriend | IDNS_OrdinaryFriend));
+ if (!mask) return FOK_None;
+ return (IdentifierNamespace & (IDNS_Tag | IDNS_Ordinary) ?
+ FOK_Declared : FOK_Undeclared);
+ }
+
+ /// Specifies that this declaration is a C++ overloaded non-member.
+ void setNonMemberOperator() {
+ assert(getKind() == Function || getKind() == FunctionTemplate);
+ assert((IdentifierNamespace & IDNS_Ordinary) &&
+ "visible non-member operators should be in ordinary namespace");
+ IdentifierNamespace |= IDNS_NonMemberOperator;
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *) { return true; }
+ static bool classofKind(Kind K) { return true; }
+ static DeclContext *castToDeclContext(const Decl *);
+ static Decl *castFromDeclContext(const DeclContext *);
+
+ void print(raw_ostream &Out, unsigned Indentation = 0,
+ bool PrintInstantiation = false) const;
+ void print(raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation = 0, bool PrintInstantiation = false) const;
+ static void printGroup(Decl** Begin, unsigned NumDecls,
+ raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation = 0);
+ LLVM_ATTRIBUTE_USED void dump() const;
+ LLVM_ATTRIBUTE_USED void dumpXML() const;
+ void dumpXML(raw_ostream &OS) const;
+
+private:
+ const Attr *getAttrsImpl() const;
+ void setAttrsImpl(const AttrVec& Attrs, ASTContext &Ctx);
+ void setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
+ ASTContext &Ctx);
+
+protected:
+ ASTMutationListener *getASTMutationListener() const;
+};
+
+/// \brief Determine whether two declarations declare the same entity.
+inline bool declaresSameEntity(const Decl *D1, const Decl *D2) {
+ if (!D1 || !D2)
+ return false;
+
+ if (D1 == D2)
+ return true;
+
+ return D1->getCanonicalDecl() == D2->getCanonicalDecl();
+}
+
+/// PrettyStackTraceDecl - If a crash occurs, indicate that it happened when
+/// doing something to a specific decl.
+class PrettyStackTraceDecl : public llvm::PrettyStackTraceEntry {
+ const Decl *TheDecl;
+ SourceLocation Loc;
+ SourceManager &SM;
+ const char *Message;
+public:
+ PrettyStackTraceDecl(const Decl *theDecl, SourceLocation L,
+ SourceManager &sm, const char *Msg)
+ : TheDecl(theDecl), Loc(L), SM(sm), Message(Msg) {}
+
+ virtual void print(raw_ostream &OS) const;
+};
+
+class DeclContextLookupResult
+ : public std::pair<NamedDecl**,NamedDecl**> {
+public:
+ DeclContextLookupResult(NamedDecl **I, NamedDecl **E)
+ : std::pair<NamedDecl**,NamedDecl**>(I, E) {}
+ DeclContextLookupResult()
+ : std::pair<NamedDecl**,NamedDecl**>() {}
+
+ using std::pair<NamedDecl**,NamedDecl**>::operator=;
+};
+
+class DeclContextLookupConstResult
+ : public std::pair<NamedDecl*const*, NamedDecl*const*> {
+public:
+ DeclContextLookupConstResult(std::pair<NamedDecl**,NamedDecl**> R)
+ : std::pair<NamedDecl*const*, NamedDecl*const*>(R) {}
+ DeclContextLookupConstResult(NamedDecl * const *I, NamedDecl * const *E)
+ : std::pair<NamedDecl*const*, NamedDecl*const*>(I, E) {}
+ DeclContextLookupConstResult()
+ : std::pair<NamedDecl*const*, NamedDecl*const*>() {}
+
+ using std::pair<NamedDecl*const*,NamedDecl*const*>::operator=;
+};
+
+/// DeclContext - This is used only as base class of specific decl types that
+/// can act as declaration contexts. These decls are (only the top classes
+/// that directly derive from DeclContext are mentioned, not their subclasses):
+///
+/// TranslationUnitDecl
+/// NamespaceDecl
+/// FunctionDecl
+/// TagDecl
+/// ObjCMethodDecl
+/// ObjCContainerDecl
+/// LinkageSpecDecl
+/// BlockDecl
+///
+class DeclContext {
+ /// DeclKind - This indicates which class this is.
+ unsigned DeclKind : 8;
+
+ /// \brief Whether this declaration context also has some external
+ /// storage that contains additional declarations that are lexically
+ /// part of this context.
+ mutable unsigned ExternalLexicalStorage : 1;
+
+ /// \brief Whether this declaration context also has some external
+ /// storage that contains additional declarations that are visible
+ /// in this context.
+ mutable unsigned ExternalVisibleStorage : 1;
+
+ /// \brief Pointer to the data structure used to lookup declarations
+ /// within this context (or a DependentStoredDeclsMap if this is a
+ /// dependent context), and a bool indicating whether we have lazily
+ /// omitted any declarations from the map. We maintain the invariant
+ /// that, if the map contains an entry for a DeclarationName, then it
+ /// contains all relevant entries for that name.
+ mutable llvm::PointerIntPair<StoredDeclsMap*, 1, bool> LookupPtr;
+
+protected:
+ /// FirstDecl - The first declaration stored within this declaration
+ /// context.
+ mutable Decl *FirstDecl;
+
+ /// LastDecl - The last declaration stored within this declaration
+ /// context. FIXME: We could probably cache this value somewhere
+ /// outside of the DeclContext, to reduce the size of DeclContext by
+ /// another pointer.
+ mutable Decl *LastDecl;
+
+ friend class ExternalASTSource;
+ friend class ASTWriter;
+
+ /// \brief Build up a chain of declarations.
+ ///
+ /// \returns the first/last pair of declarations.
+ static std::pair<Decl *, Decl *>
+ BuildDeclChain(ArrayRef<Decl*> Decls, bool FieldsAlreadyLoaded);
+
+ DeclContext(Decl::Kind K)
+ : DeclKind(K), ExternalLexicalStorage(false),
+ ExternalVisibleStorage(false), LookupPtr(0, false), FirstDecl(0),
+ LastDecl(0) { }
+
+public:
+ ~DeclContext();
+
+ Decl::Kind getDeclKind() const {
+ return static_cast<Decl::Kind>(DeclKind);
+ }
+ const char *getDeclKindName() const;
+
+ /// getParent - Returns the containing DeclContext.
+ DeclContext *getParent() {
+ return cast<Decl>(this)->getDeclContext();
+ }
+ const DeclContext *getParent() const {
+ return const_cast<DeclContext*>(this)->getParent();
+ }
+
+ /// getLexicalParent - Returns the containing lexical DeclContext. May be
+ /// different from getParent, e.g.:
+ ///
+ /// namespace A {
+ /// struct S;
+ /// }
+ /// struct A::S {}; // getParent() == namespace 'A'
+ /// // getLexicalParent() == translation unit
+ ///
+ DeclContext *getLexicalParent() {
+ return cast<Decl>(this)->getLexicalDeclContext();
+ }
+ const DeclContext *getLexicalParent() const {
+ return const_cast<DeclContext*>(this)->getLexicalParent();
+ }
+
+ DeclContext *getLookupParent();
+
+ const DeclContext *getLookupParent() const {
+ return const_cast<DeclContext*>(this)->getLookupParent();
+ }
+
+ ASTContext &getParentASTContext() const {
+ return cast<Decl>(this)->getASTContext();
+ }
+
+ bool isClosure() const {
+ return DeclKind == Decl::Block;
+ }
+
+ bool isObjCContainer() const {
+ switch (DeclKind) {
+ case Decl::ObjCCategory:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCInterface:
+ case Decl::ObjCProtocol:
+ return true;
+ }
+ return false;
+ }
+
+ bool isFunctionOrMethod() const {
+ switch (DeclKind) {
+ case Decl::Block:
+ case Decl::ObjCMethod:
+ return true;
+ default:
+ return DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction;
+ }
+ }
+
+ bool isFileContext() const {
+ return DeclKind == Decl::TranslationUnit || DeclKind == Decl::Namespace;
+ }
+
+ bool isTranslationUnit() const {
+ return DeclKind == Decl::TranslationUnit;
+ }
+
+ bool isRecord() const {
+ return DeclKind >= Decl::firstRecord && DeclKind <= Decl::lastRecord;
+ }
+
+ bool isNamespace() const {
+ return DeclKind == Decl::Namespace;
+ }
+
+ bool isInlineNamespace() const;
+
+ /// \brief Determines whether this context is dependent on a
+ /// template parameter.
+ bool isDependentContext() const;
+
+ /// isTransparentContext - Determines whether this context is a
+ /// "transparent" context, meaning that the members declared in this
+ /// context are semantically declared in the nearest enclosing
+ /// non-transparent (opaque) context but are lexically declared in
+ /// this context. For example, consider the enumerators of an
+ /// enumeration type:
+ /// @code
+ /// enum E {
+ /// Val1
+ /// };
+ /// @endcode
+ /// Here, E is a transparent context, so its enumerator (Val1) will
+ /// appear (semantically) that it is in the same context of E.
+ /// Examples of transparent contexts include: enumerations (except for
+ /// C++0x scoped enums), and C++ linkage specifications.
+ bool isTransparentContext() const;
+
+ /// \brief Determines whether this context is, or is nested within,
+ /// a C++ extern "C" linkage spec.
+ bool isExternCContext() const;
+
+ /// \brief Determine whether this declaration context is equivalent
+ /// to the declaration context DC.
+ bool Equals(const DeclContext *DC) const {
+ return DC && this->getPrimaryContext() == DC->getPrimaryContext();
+ }
+
+ /// \brief Determine whether this declaration context encloses the
+ /// declaration context DC.
+ bool Encloses(const DeclContext *DC) const;
+
+ /// \brief Find the nearest non-closure ancestor of this context,
+ /// i.e. the innermost semantic parent of this context which is not
+ /// a closure. A context may be its own non-closure ancestor.
+ DeclContext *getNonClosureAncestor();
+ const DeclContext *getNonClosureAncestor() const {
+ return const_cast<DeclContext*>(this)->getNonClosureAncestor();
+ }
+
+ /// getPrimaryContext - There may be many different
+ /// declarations of the same entity (including forward declarations
+ /// of classes, multiple definitions of namespaces, etc.), each with
+ /// a different set of declarations. This routine returns the
+ /// "primary" DeclContext structure, which will contain the
+ /// information needed to perform name lookup into this context.
+ DeclContext *getPrimaryContext();
+ const DeclContext *getPrimaryContext() const {
+ return const_cast<DeclContext*>(this)->getPrimaryContext();
+ }
+
+ /// getRedeclContext - Retrieve the context in which an entity conflicts with
+ /// other entities of the same name, or where it is a redeclaration if the
+ /// two entities are compatible. This skips through transparent contexts.
+ DeclContext *getRedeclContext();
+ const DeclContext *getRedeclContext() const {
+ return const_cast<DeclContext *>(this)->getRedeclContext();
+ }
+
+ /// \brief Retrieve the nearest enclosing namespace context.
+ DeclContext *getEnclosingNamespaceContext();
+ const DeclContext *getEnclosingNamespaceContext() const {
+ return const_cast<DeclContext *>(this)->getEnclosingNamespaceContext();
+ }
+
+ /// \brief Test if this context is part of the enclosing namespace set of
+ /// the context NS, as defined in C++0x [namespace.def]p9. If either context
+ /// isn't a namespace, this is equivalent to Equals().
+ ///
+ /// The enclosing namespace set of a namespace is the namespace and, if it is
+ /// inline, its enclosing namespace, recursively.
+ bool InEnclosingNamespaceSetOf(const DeclContext *NS) const;
+
+ /// \\brief Collects all of the declaration contexts that are semantically
+ /// connected to this declaration context.
+ ///
+ /// For declaration contexts that have multiple semantically connected but
+ /// syntactically distinct contexts, such as C++ namespaces, this routine
+ /// retrieves the complete set of such declaration contexts in source order.
+ /// For example, given:
+ ///
+ /// \code
+ /// namespace N {
+ /// int x;
+ /// }
+ /// namespace N {
+ /// int y;
+ /// }
+ /// \endcode
+ ///
+ /// The \c Contexts parameter will contain both definitions of N.
+ ///
+ /// \param Contexts Will be cleared and set to the set of declaration
+ /// contexts that are semanticaly connected to this declaration context,
+ /// in source order, including this context (which may be the only result,
+ /// for non-namespace contexts).
+ void collectAllContexts(llvm::SmallVectorImpl<DeclContext *> &Contexts);
+
+ /// decl_iterator - Iterates through the declarations stored
+ /// within this context.
+ class decl_iterator {
+ /// Current - The current declaration.
+ Decl *Current;
+
+ public:
+ typedef Decl* value_type;
+ typedef Decl* reference;
+ typedef Decl* pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ decl_iterator() : Current(0) { }
+ explicit decl_iterator(Decl *C) : Current(C) { }
+
+ reference operator*() const { return Current; }
+ pointer operator->() const { return Current; }
+
+ decl_iterator& operator++() {
+ Current = Current->getNextDeclInContext();
+ return *this;
+ }
+
+ decl_iterator operator++(int) {
+ decl_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(decl_iterator x, decl_iterator y) {
+ return x.Current == y.Current;
+ }
+ friend bool operator!=(decl_iterator x, decl_iterator y) {
+ return x.Current != y.Current;
+ }
+ };
+
+ /// decls_begin/decls_end - Iterate over the declarations stored in
+ /// this context.
+ decl_iterator decls_begin() const;
+ decl_iterator decls_end() const;
+ bool decls_empty() const;
+
+ /// noload_decls_begin/end - Iterate over the declarations stored in this
+ /// context that are currently loaded; don't attempt to retrieve anything
+ /// from an external source.
+ decl_iterator noload_decls_begin() const;
+ decl_iterator noload_decls_end() const;
+
+ /// specific_decl_iterator - Iterates over a subrange of
+ /// declarations stored in a DeclContext, providing only those that
+ /// are of type SpecificDecl (or a class derived from it). This
+ /// iterator is used, for example, to provide iteration over just
+ /// the fields within a RecordDecl (with SpecificDecl = FieldDecl).
+ template<typename SpecificDecl>
+ class specific_decl_iterator {
+ /// Current - The current, underlying declaration iterator, which
+ /// will either be NULL or will point to a declaration of
+ /// type SpecificDecl.
+ DeclContext::decl_iterator Current;
+
+ /// SkipToNextDecl - Advances the current position up to the next
+ /// declaration of type SpecificDecl that also meets the criteria
+ /// required by Acceptable.
+ void SkipToNextDecl() {
+ while (*Current && !isa<SpecificDecl>(*Current))
+ ++Current;
+ }
+
+ public:
+ typedef SpecificDecl* value_type;
+ typedef SpecificDecl* reference;
+ typedef SpecificDecl* pointer;
+ typedef std::iterator_traits<DeclContext::decl_iterator>::difference_type
+ difference_type;
+ typedef std::forward_iterator_tag iterator_category;
+
+ specific_decl_iterator() : Current() { }
+
+ /// specific_decl_iterator - Construct a new iterator over a
+ /// subset of the declarations the range [C,
+ /// end-of-declarations). If A is non-NULL, it is a pointer to a
+ /// member function of SpecificDecl that should return true for
+ /// all of the SpecificDecl instances that will be in the subset
+ /// of iterators. For example, if you want Objective-C instance
+ /// methods, SpecificDecl will be ObjCMethodDecl and A will be
+ /// &ObjCMethodDecl::isInstanceMethod.
+ explicit specific_decl_iterator(DeclContext::decl_iterator C) : Current(C) {
+ SkipToNextDecl();
+ }
+
+ reference operator*() const { return cast<SpecificDecl>(*Current); }
+ pointer operator->() const { return cast<SpecificDecl>(*Current); }
+
+ specific_decl_iterator& operator++() {
+ ++Current;
+ SkipToNextDecl();
+ return *this;
+ }
+
+ specific_decl_iterator operator++(int) {
+ specific_decl_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(const specific_decl_iterator& x,
+ const specific_decl_iterator& y) {
+ return x.Current == y.Current;
+ }
+
+ friend bool operator!=(const specific_decl_iterator& x,
+ const specific_decl_iterator& y) {
+ return x.Current != y.Current;
+ }
+ };
+
+ /// \brief Iterates over a filtered subrange of declarations stored
+ /// in a DeclContext.
+ ///
+ /// This iterator visits only those declarations that are of type
+ /// SpecificDecl (or a class derived from it) and that meet some
+ /// additional run-time criteria. This iterator is used, for
+ /// example, to provide access to the instance methods within an
+ /// Objective-C interface (with SpecificDecl = ObjCMethodDecl and
+ /// Acceptable = ObjCMethodDecl::isInstanceMethod).
+ template<typename SpecificDecl, bool (SpecificDecl::*Acceptable)() const>
+ class filtered_decl_iterator {
+ /// Current - The current, underlying declaration iterator, which
+ /// will either be NULL or will point to a declaration of
+ /// type SpecificDecl.
+ DeclContext::decl_iterator Current;
+
+ /// SkipToNextDecl - Advances the current position up to the next
+ /// declaration of type SpecificDecl that also meets the criteria
+ /// required by Acceptable.
+ void SkipToNextDecl() {
+ while (*Current &&
+ (!isa<SpecificDecl>(*Current) ||
+ (Acceptable && !(cast<SpecificDecl>(*Current)->*Acceptable)())))
+ ++Current;
+ }
+
+ public:
+ typedef SpecificDecl* value_type;
+ typedef SpecificDecl* reference;
+ typedef SpecificDecl* pointer;
+ typedef std::iterator_traits<DeclContext::decl_iterator>::difference_type
+ difference_type;
+ typedef std::forward_iterator_tag iterator_category;
+
+ filtered_decl_iterator() : Current() { }
+
+ /// specific_decl_iterator - Construct a new iterator over a
+ /// subset of the declarations the range [C,
+ /// end-of-declarations). If A is non-NULL, it is a pointer to a
+ /// member function of SpecificDecl that should return true for
+ /// all of the SpecificDecl instances that will be in the subset
+ /// of iterators. For example, if you want Objective-C instance
+ /// methods, SpecificDecl will be ObjCMethodDecl and A will be
+ /// &ObjCMethodDecl::isInstanceMethod.
+ explicit filtered_decl_iterator(DeclContext::decl_iterator C) : Current(C) {
+ SkipToNextDecl();
+ }
+
+ reference operator*() const { return cast<SpecificDecl>(*Current); }
+ pointer operator->() const { return cast<SpecificDecl>(*Current); }
+
+ filtered_decl_iterator& operator++() {
+ ++Current;
+ SkipToNextDecl();
+ return *this;
+ }
+
+ filtered_decl_iterator operator++(int) {
+ filtered_decl_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(const filtered_decl_iterator& x,
+ const filtered_decl_iterator& y) {
+ return x.Current == y.Current;
+ }
+
+ friend bool operator!=(const filtered_decl_iterator& x,
+ const filtered_decl_iterator& y) {
+ return x.Current != y.Current;
+ }
+ };
+
+ /// @brief Add the declaration D into this context.
+ ///
+ /// This routine should be invoked when the declaration D has first
+ /// been declared, to place D into the context where it was
+ /// (lexically) defined. Every declaration must be added to one
+ /// (and only one!) context, where it can be visited via
+ /// [decls_begin(), decls_end()). Once a declaration has been added
+ /// to its lexical context, the corresponding DeclContext owns the
+ /// declaration.
+ ///
+ /// If D is also a NamedDecl, it will be made visible within its
+ /// semantic context via makeDeclVisibleInContext.
+ void addDecl(Decl *D);
+
+ /// @brief Add the declaration D into this context, but suppress
+ /// searches for external declarations with the same name.
+ ///
+ /// Although analogous in function to addDecl, this removes an
+ /// important check. This is only useful if the Decl is being
+ /// added in response to an external search; in all other cases,
+ /// addDecl() is the right function to use.
+ /// See the ASTImporter for use cases.
+ void addDeclInternal(Decl *D);
+
+ /// @brief Add the declaration D to this context without modifying
+ /// any lookup tables.
+ ///
+ /// This is useful for some operations in dependent contexts where
+ /// the semantic context might not be dependent; this basically
+ /// only happens with friends.
+ void addHiddenDecl(Decl *D);
+
+ /// @brief Removes a declaration from this context.
+ void removeDecl(Decl *D);
+
+ /// lookup_iterator - An iterator that provides access to the results
+ /// of looking up a name within this context.
+ typedef NamedDecl **lookup_iterator;
+
+ /// lookup_const_iterator - An iterator that provides non-mutable
+ /// access to the results of lookup up a name within this context.
+ typedef NamedDecl * const * lookup_const_iterator;
+
+ typedef DeclContextLookupResult lookup_result;
+ typedef DeclContextLookupConstResult lookup_const_result;
+
+ /// lookup - Find the declarations (if any) with the given Name in
+ /// this context. Returns a range of iterators that contains all of
+ /// the declarations with this name, with object, function, member,
+ /// and enumerator names preceding any tag name. Note that this
+ /// routine will not look into parent contexts.
+ lookup_result lookup(DeclarationName Name);
+ lookup_const_result lookup(DeclarationName Name) const;
+
+ /// \brief A simplistic name lookup mechanism that performs name lookup
+ /// into this declaration context without consulting the external source.
+ ///
+ /// This function should almost never be used, because it subverts the
+ /// usual relationship between a DeclContext and the external source.
+ /// See the ASTImporter for the (few, but important) use cases.
+ void localUncachedLookup(DeclarationName Name,
+ llvm::SmallVectorImpl<NamedDecl *> &Results);
+
+ /// @brief Makes a declaration visible within this context.
+ ///
+ /// This routine makes the declaration D visible to name lookup
+ /// within this context and, if this is a transparent context,
+ /// within its parent contexts up to the first enclosing
+ /// non-transparent context. Making a declaration visible within a
+ /// context does not transfer ownership of a declaration, and a
+ /// declaration can be visible in many contexts that aren't its
+ /// lexical context.
+ ///
+ /// If D is a redeclaration of an existing declaration that is
+ /// visible from this context, as determined by
+ /// NamedDecl::declarationReplaces, the previous declaration will be
+ /// replaced with D.
+ void makeDeclVisibleInContext(NamedDecl *D);
+
+ /// all_lookups_iterator - An iterator that provides a view over the results
+ /// of looking up every possible name.
+ class all_lookups_iterator;
+
+ all_lookups_iterator lookups_begin() const;
+
+ all_lookups_iterator lookups_end() const;
+
+ /// udir_iterator - Iterates through the using-directives stored
+ /// within this context.
+ typedef UsingDirectiveDecl * const * udir_iterator;
+
+ typedef std::pair<udir_iterator, udir_iterator> udir_iterator_range;
+
+ udir_iterator_range getUsingDirectives() const;
+
+ udir_iterator using_directives_begin() const {
+ return getUsingDirectives().first;
+ }
+
+ udir_iterator using_directives_end() const {
+ return getUsingDirectives().second;
+ }
+
+ // These are all defined in DependentDiagnostic.h.
+ class ddiag_iterator;
+ inline ddiag_iterator ddiag_begin() const;
+ inline ddiag_iterator ddiag_end() const;
+
+ // Low-level accessors
+
+ /// \brief Retrieve the internal representation of the lookup structure.
+ /// This may omit some names if we are lazily building the structure.
+ StoredDeclsMap *getLookupPtr() const { return LookupPtr.getPointer(); }
+
+ /// \brief Ensure the lookup structure is fully-built and return it.
+ StoredDeclsMap *buildLookup();
+
+ /// \brief Whether this DeclContext has external storage containing
+ /// additional declarations that are lexically in this context.
+ bool hasExternalLexicalStorage() const { return ExternalLexicalStorage; }
+
+ /// \brief State whether this DeclContext has external storage for
+ /// declarations lexically in this context.
+ void setHasExternalLexicalStorage(bool ES = true) {
+ ExternalLexicalStorage = ES;
+ }
+
+ /// \brief Whether this DeclContext has external storage containing
+ /// additional declarations that are visible in this context.
+ bool hasExternalVisibleStorage() const { return ExternalVisibleStorage; }
+
+ /// \brief State whether this DeclContext has external storage for
+ /// declarations visible in this context.
+ void setHasExternalVisibleStorage(bool ES = true) {
+ ExternalVisibleStorage = ES;
+ }
+
+ /// \brief Determine whether the given declaration is stored in the list of
+ /// declarations lexically within this context.
+ bool isDeclInLexicalTraversal(const Decl *D) const {
+ return D && (D->NextInContextAndBits.getPointer() || D == FirstDecl ||
+ D == LastDecl);
+ }
+
+ static bool classof(const Decl *D);
+ static bool classof(const DeclContext *D) { return true; }
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ static bool classof(const NAME##Decl *D) { return true; }
+#include "clang/AST/DeclNodes.inc"
+
+ LLVM_ATTRIBUTE_USED void dumpDeclContext() const;
+
+private:
+ void LoadLexicalDeclsFromExternalStorage() const;
+
+ /// @brief Makes a declaration visible within this context, but
+ /// suppresses searches for external declarations with the same
+ /// name.
+ ///
+ /// Analogous to makeDeclVisibleInContext, but for the exclusive
+ /// use of addDeclInternal().
+ void makeDeclVisibleInContextInternal(NamedDecl *D);
+
+ friend class DependentDiagnostic;
+ StoredDeclsMap *CreateStoredDeclsMap(ASTContext &C) const;
+
+ void buildLookupImpl(DeclContext *DCtx);
+ void makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
+ bool Rediscoverable);
+ void makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal);
+};
+
+inline bool Decl::isTemplateParameter() const {
+ return getKind() == TemplateTypeParm || getKind() == NonTypeTemplateParm ||
+ getKind() == TemplateTemplateParm;
+}
+
+// Specialization selected when ToTy is not a known subclass of DeclContext.
+template <class ToTy,
+ bool IsKnownSubtype = ::llvm::is_base_of< DeclContext, ToTy>::value>
+struct cast_convert_decl_context {
+ static const ToTy *doit(const DeclContext *Val) {
+ return static_cast<const ToTy*>(Decl::castFromDeclContext(Val));
+ }
+
+ static ToTy *doit(DeclContext *Val) {
+ return static_cast<ToTy*>(Decl::castFromDeclContext(Val));
+ }
+};
+
+// Specialization selected when ToTy is a known subclass of DeclContext.
+template <class ToTy>
+struct cast_convert_decl_context<ToTy, true> {
+ static const ToTy *doit(const DeclContext *Val) {
+ return static_cast<const ToTy*>(Val);
+ }
+
+ static ToTy *doit(DeclContext *Val) {
+ return static_cast<ToTy*>(Val);
+ }
+};
+
+
+} // end clang.
+
+namespace llvm {
+
+/// isa<T>(DeclContext*)
+template <typename To>
+struct isa_impl<To, ::clang::DeclContext> {
+ static bool doit(const ::clang::DeclContext &Val) {
+ return To::classofKind(Val.getDeclKind());
+ }
+};
+
+/// cast<T>(DeclContext*)
+template<class ToTy>
+struct cast_convert_val<ToTy,
+ const ::clang::DeclContext,const ::clang::DeclContext> {
+ static const ToTy &doit(const ::clang::DeclContext &Val) {
+ return *::clang::cast_convert_decl_context<ToTy>::doit(&Val);
+ }
+};
+template<class ToTy>
+struct cast_convert_val<ToTy, ::clang::DeclContext, ::clang::DeclContext> {
+ static ToTy &doit(::clang::DeclContext &Val) {
+ return *::clang::cast_convert_decl_context<ToTy>::doit(&Val);
+ }
+};
+template<class ToTy>
+struct cast_convert_val<ToTy,
+ const ::clang::DeclContext*, const ::clang::DeclContext*> {
+ static const ToTy *doit(const ::clang::DeclContext *Val) {
+ return ::clang::cast_convert_decl_context<ToTy>::doit(Val);
+ }
+};
+template<class ToTy>
+struct cast_convert_val<ToTy, ::clang::DeclContext*, ::clang::DeclContext*> {
+ static ToTy *doit(::clang::DeclContext *Val) {
+ return ::clang::cast_convert_decl_context<ToTy>::doit(Val);
+ }
+};
+
+/// Implement cast_convert_val for Decl -> DeclContext conversions.
+template<class FromTy>
+struct cast_convert_val< ::clang::DeclContext, FromTy, FromTy> {
+ static ::clang::DeclContext &doit(const FromTy &Val) {
+ return *FromTy::castToDeclContext(&Val);
+ }
+};
+
+template<class FromTy>
+struct cast_convert_val< ::clang::DeclContext, FromTy*, FromTy*> {
+ static ::clang::DeclContext *doit(const FromTy *Val) {
+ return FromTy::castToDeclContext(Val);
+ }
+};
+
+template<class FromTy>
+struct cast_convert_val< const ::clang::DeclContext, FromTy, FromTy> {
+ static const ::clang::DeclContext &doit(const FromTy &Val) {
+ return *FromTy::castToDeclContext(&Val);
+ }
+};
+
+template<class FromTy>
+struct cast_convert_val< const ::clang::DeclContext, FromTy*, FromTy*> {
+ static const ::clang::DeclContext *doit(const FromTy *Val) {
+ return FromTy::castToDeclContext(Val);
+ }
+};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
new file mode 100644
index 0000000..7f3ec4c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclCXX.h
@@ -0,0 +1,2938 @@
+//===-- DeclCXX.h - Classes for representing C++ declarations -*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C++ Decl subclasses, other than those for
+// templates (in DeclTemplate.h) and friends (in DeclFriend.h).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLCXX_H
+#define LLVM_CLANG_AST_DECLCXX_H
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+
+class ClassTemplateDecl;
+class ClassTemplateSpecializationDecl;
+class CXXBasePath;
+class CXXBasePaths;
+class CXXConstructorDecl;
+class CXXConversionDecl;
+class CXXDestructorDecl;
+class CXXMethodDecl;
+class CXXRecordDecl;
+class CXXMemberLookupCriteria;
+class CXXFinalOverriderMap;
+class CXXIndirectPrimaryBaseSet;
+class FriendDecl;
+class LambdaExpr;
+
+/// \brief Represents any kind of function declaration, whether it is a
+/// concrete function or a function template.
+class AnyFunctionDecl {
+ NamedDecl *Function;
+
+ AnyFunctionDecl(NamedDecl *ND) : Function(ND) { }
+
+public:
+ AnyFunctionDecl(FunctionDecl *FD) : Function(FD) { }
+ AnyFunctionDecl(FunctionTemplateDecl *FTD);
+
+ /// \brief Implicily converts any function or function template into a
+ /// named declaration.
+ operator NamedDecl *() const { return Function; }
+
+ /// \brief Retrieve the underlying function or function template.
+ NamedDecl *get() const { return Function; }
+
+ static AnyFunctionDecl getFromNamedDecl(NamedDecl *ND) {
+ return AnyFunctionDecl(ND);
+ }
+};
+
+} // end namespace clang
+
+namespace llvm {
+ /// Implement simplify_type for AnyFunctionDecl, so that we can dyn_cast from
+ /// AnyFunctionDecl to any function or function template declaration.
+ template<> struct simplify_type<const ::clang::AnyFunctionDecl> {
+ typedef ::clang::NamedDecl* SimpleType;
+ static SimpleType getSimplifiedValue(const ::clang::AnyFunctionDecl &Val) {
+ return Val;
+ }
+ };
+ template<> struct simplify_type< ::clang::AnyFunctionDecl>
+ : public simplify_type<const ::clang::AnyFunctionDecl> {};
+
+ // Provide PointerLikeTypeTraits for non-cvr pointers.
+ template<>
+ class PointerLikeTypeTraits< ::clang::AnyFunctionDecl> {
+ public:
+ static inline void *getAsVoidPointer(::clang::AnyFunctionDecl F) {
+ return F.get();
+ }
+ static inline ::clang::AnyFunctionDecl getFromVoidPointer(void *P) {
+ return ::clang::AnyFunctionDecl::getFromNamedDecl(
+ static_cast< ::clang::NamedDecl*>(P));
+ }
+
+ enum { NumLowBitsAvailable = 2 };
+ };
+
+} // end namespace llvm
+
+namespace clang {
+
+/// AccessSpecDecl - An access specifier followed by colon ':'.
+///
+/// An objects of this class represents sugar for the syntactic occurrence
+/// of an access specifier followed by a colon in the list of member
+/// specifiers of a C++ class definition.
+///
+/// Note that they do not represent other uses of access specifiers,
+/// such as those occurring in a list of base specifiers.
+/// Also note that this class has nothing to do with so-called
+/// "access declarations" (C++98 11.3 [class.access.dcl]).
+class AccessSpecDecl : public Decl {
+ virtual void anchor();
+ /// ColonLoc - The location of the ':'.
+ SourceLocation ColonLoc;
+
+ AccessSpecDecl(AccessSpecifier AS, DeclContext *DC,
+ SourceLocation ASLoc, SourceLocation ColonLoc)
+ : Decl(AccessSpec, DC, ASLoc), ColonLoc(ColonLoc) {
+ setAccess(AS);
+ }
+ AccessSpecDecl(EmptyShell Empty)
+ : Decl(AccessSpec, Empty) { }
+public:
+ /// getAccessSpecifierLoc - The location of the access specifier.
+ SourceLocation getAccessSpecifierLoc() const { return getLocation(); }
+ /// setAccessSpecifierLoc - Sets the location of the access specifier.
+ void setAccessSpecifierLoc(SourceLocation ASLoc) { setLocation(ASLoc); }
+
+ /// getColonLoc - The location of the colon following the access specifier.
+ SourceLocation getColonLoc() const { return ColonLoc; }
+ /// setColonLoc - Sets the location of the colon.
+ void setColonLoc(SourceLocation CLoc) { ColonLoc = CLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getAccessSpecifierLoc(), getColonLoc());
+ }
+
+ static AccessSpecDecl *Create(ASTContext &C, AccessSpecifier AS,
+ DeclContext *DC, SourceLocation ASLoc,
+ SourceLocation ColonLoc) {
+ return new (C) AccessSpecDecl(AS, DC, ASLoc, ColonLoc);
+ }
+ static AccessSpecDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const AccessSpecDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == AccessSpec; }
+};
+
+
+/// CXXBaseSpecifier - A base class of a C++ class.
+///
+/// Each CXXBaseSpecifier represents a single, direct base class (or
+/// struct) of a C++ class (or struct). It specifies the type of that
+/// base class, whether it is a virtual or non-virtual base, and what
+/// level of access (public, protected, private) is used for the
+/// derivation. For example:
+///
+/// @code
+/// class A { };
+/// class B { };
+/// class C : public virtual A, protected B { };
+/// @endcode
+///
+/// In this code, C will have two CXXBaseSpecifiers, one for "public
+/// virtual A" and the other for "protected B".
+class CXXBaseSpecifier {
+ /// Range - The source code range that covers the full base
+ /// specifier, including the "virtual" (if present) and access
+ /// specifier (if present).
+ SourceRange Range;
+
+ /// \brief The source location of the ellipsis, if this is a pack
+ /// expansion.
+ SourceLocation EllipsisLoc;
+
+ /// Virtual - Whether this is a virtual base class or not.
+ bool Virtual : 1;
+
+ /// BaseOfClass - Whether this is the base of a class (true) or of a
+ /// struct (false). This determines the mapping from the access
+ /// specifier as written in the source code to the access specifier
+ /// used for semantic analysis.
+ bool BaseOfClass : 1;
+
+ /// Access - Access specifier as written in the source code (which
+ /// may be AS_none). The actual type of data stored here is an
+ /// AccessSpecifier, but we use "unsigned" here to work around a
+ /// VC++ bug.
+ unsigned Access : 2;
+
+ /// InheritConstructors - Whether the class contains a using declaration
+ /// to inherit the named class's constructors.
+ bool InheritConstructors : 1;
+
+ /// BaseTypeInfo - The type of the base class. This will be a class or struct
+ /// (or a typedef of such). The source code range does not include the
+ /// "virtual" or access specifier.
+ TypeSourceInfo *BaseTypeInfo;
+
+public:
+ CXXBaseSpecifier() { }
+
+ CXXBaseSpecifier(SourceRange R, bool V, bool BC, AccessSpecifier A,
+ TypeSourceInfo *TInfo, SourceLocation EllipsisLoc)
+ : Range(R), EllipsisLoc(EllipsisLoc), Virtual(V), BaseOfClass(BC),
+ Access(A), InheritConstructors(false), BaseTypeInfo(TInfo) { }
+
+ /// getSourceRange - Retrieves the source range that contains the
+ /// entire base specifier.
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+ SourceLocation getLocStart() const LLVM_READONLY { return Range.getBegin(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return Range.getEnd(); }
+
+ /// isVirtual - Determines whether the base class is a virtual base
+ /// class (or not).
+ bool isVirtual() const { return Virtual; }
+
+ /// \brief Determine whether this base class is a base of a class declared
+ /// with the 'class' keyword (vs. one declared with the 'struct' keyword).
+ bool isBaseOfClass() const { return BaseOfClass; }
+
+ /// \brief Determine whether this base specifier is a pack expansion.
+ bool isPackExpansion() const { return EllipsisLoc.isValid(); }
+
+ /// \brief Determine whether this base class's constructors get inherited.
+ bool getInheritConstructors() const { return InheritConstructors; }
+
+ /// \brief Set that this base class's constructors should be inherited.
+ void setInheritConstructors(bool Inherit = true) {
+ InheritConstructors = Inherit;
+ }
+
+ /// \brief For a pack expansion, determine the location of the ellipsis.
+ SourceLocation getEllipsisLoc() const {
+ return EllipsisLoc;
+ }
+
+ /// getAccessSpecifier - Returns the access specifier for this base
+ /// specifier. This is the actual base specifier as used for
+ /// semantic analysis, so the result can never be AS_none. To
+ /// retrieve the access specifier as written in the source code, use
+ /// getAccessSpecifierAsWritten().
+ AccessSpecifier getAccessSpecifier() const {
+ if ((AccessSpecifier)Access == AS_none)
+ return BaseOfClass? AS_private : AS_public;
+ else
+ return (AccessSpecifier)Access;
+ }
+
+ /// getAccessSpecifierAsWritten - Retrieves the access specifier as
+ /// written in the source code (which may mean that no access
+ /// specifier was explicitly written). Use getAccessSpecifier() to
+ /// retrieve the access specifier for use in semantic analysis.
+ AccessSpecifier getAccessSpecifierAsWritten() const {
+ return (AccessSpecifier)Access;
+ }
+
+ /// getType - Retrieves the type of the base class. This type will
+ /// always be an unqualified class type.
+ QualType getType() const { return BaseTypeInfo->getType(); }
+
+ /// getTypeLoc - Retrieves the type and source location of the base class.
+ TypeSourceInfo *getTypeSourceInfo() const { return BaseTypeInfo; }
+};
+
+/// CXXRecordDecl - Represents a C++ struct/union/class.
+/// FIXME: This class will disappear once we've properly taught RecordDecl
+/// to deal with C++-specific things.
+class CXXRecordDecl : public RecordDecl {
+
+ friend void TagDecl::startDefinition();
+
+ struct DefinitionData {
+ DefinitionData(CXXRecordDecl *D);
+
+ /// UserDeclaredConstructor - True when this class has a
+ /// user-declared constructor.
+ bool UserDeclaredConstructor : 1;
+
+ /// UserDeclaredCopyConstructor - True when this class has a
+ /// user-declared copy constructor.
+ bool UserDeclaredCopyConstructor : 1;
+
+ /// UserDeclareMoveConstructor - True when this class has a
+ /// user-declared move constructor.
+ bool UserDeclaredMoveConstructor : 1;
+
+ /// UserDeclaredCopyAssignment - True when this class has a
+ /// user-declared copy assignment operator.
+ bool UserDeclaredCopyAssignment : 1;
+
+ /// UserDeclareMoveAssignment - True when this class has a
+ /// user-declared move assignment.
+ bool UserDeclaredMoveAssignment : 1;
+
+ /// UserDeclaredDestructor - True when this class has a
+ /// user-declared destructor.
+ bool UserDeclaredDestructor : 1;
+
+ /// Aggregate - True when this class is an aggregate.
+ bool Aggregate : 1;
+
+ /// PlainOldData - True when this class is a POD-type.
+ bool PlainOldData : 1;
+
+ /// Empty - true when this class is empty for traits purposes,
+ /// i.e. has no data members other than 0-width bit-fields, has no
+ /// virtual function/base, and doesn't inherit from a non-empty
+ /// class. Doesn't take union-ness into account.
+ bool Empty : 1;
+
+ /// Polymorphic - True when this class is polymorphic, i.e. has at
+ /// least one virtual member or derives from a polymorphic class.
+ bool Polymorphic : 1;
+
+ /// Abstract - True when this class is abstract, i.e. has at least
+ /// one pure virtual function, (that can come from a base class).
+ bool Abstract : 1;
+
+ /// IsStandardLayout - True when this class has standard layout.
+ ///
+ /// C++0x [class]p7. A standard-layout class is a class that:
+ /// * has no non-static data members of type non-standard-layout class (or
+ /// array of such types) or reference,
+ /// * has no virtual functions (10.3) and no virtual base classes (10.1),
+ /// * has the same access control (Clause 11) for all non-static data
+ /// members
+ /// * has no non-standard-layout base classes,
+ /// * either has no non-static data members in the most derived class and at
+ /// most one base class with non-static data members, or has no base
+ /// classes with non-static data members, and
+ /// * has no base classes of the same type as the first non-static data
+ /// member.
+ bool IsStandardLayout : 1;
+
+ /// HasNoNonEmptyBases - True when there are no non-empty base classes.
+ ///
+ /// This is a helper bit of state used to implement IsStandardLayout more
+ /// efficiently.
+ bool HasNoNonEmptyBases : 1;
+
+ /// HasPrivateFields - True when there are private non-static data members.
+ bool HasPrivateFields : 1;
+
+ /// HasProtectedFields - True when there are protected non-static data
+ /// members.
+ bool HasProtectedFields : 1;
+
+ /// HasPublicFields - True when there are private non-static data members.
+ bool HasPublicFields : 1;
+
+ /// \brief True if this class (or any subobject) has mutable fields.
+ bool HasMutableFields : 1;
+
+ /// \brief True if there no non-field members declared by the user.
+ bool HasOnlyCMembers : 1;
+
+ /// HasTrivialDefaultConstructor - True when, if this class has a default
+ /// constructor, this default constructor is trivial.
+ ///
+ /// C++0x [class.ctor]p5
+ /// A default constructor is trivial if it is not user-provided and if
+ /// -- its class has no virtual functions and no virtual base classes,
+ /// and
+ /// -- no non-static data member of its class has a
+ /// brace-or-equal-initializer, and
+ /// -- all the direct base classes of its class have trivial
+ /// default constructors, and
+ /// -- for all the nonstatic data members of its class that are of class
+ /// type (or array thereof), each such class has a trivial
+ /// default constructor.
+ bool HasTrivialDefaultConstructor : 1;
+
+ /// HasConstexprNonCopyMoveConstructor - True when this class has at least
+ /// one user-declared constexpr constructor which is neither the copy nor
+ /// move constructor.
+ bool HasConstexprNonCopyMoveConstructor : 1;
+
+ /// DefaultedDefaultConstructorIsConstexpr - True if a defaulted default
+ /// constructor for this class would be constexpr.
+ bool DefaultedDefaultConstructorIsConstexpr : 1;
+
+ /// DefaultedCopyConstructorIsConstexpr - True if a defaulted copy
+ /// constructor for this class would be constexpr.
+ bool DefaultedCopyConstructorIsConstexpr : 1;
+
+ /// DefaultedMoveConstructorIsConstexpr - True if a defaulted move
+ /// constructor for this class would be constexpr.
+ bool DefaultedMoveConstructorIsConstexpr : 1;
+
+ /// HasConstexprDefaultConstructor - True if this class has a constexpr
+ /// default constructor (either user-declared or implicitly declared).
+ bool HasConstexprDefaultConstructor : 1;
+
+ /// HasConstexprCopyConstructor - True if this class has a constexpr copy
+ /// constructor (either user-declared or implicitly declared).
+ bool HasConstexprCopyConstructor : 1;
+
+ /// HasConstexprMoveConstructor - True if this class has a constexpr move
+ /// constructor (either user-declared or implicitly declared).
+ bool HasConstexprMoveConstructor : 1;
+
+ /// HasTrivialCopyConstructor - True when this class has a trivial copy
+ /// constructor.
+ ///
+ /// C++0x [class.copy]p13:
+ /// A copy/move constructor for class X is trivial if it is neither
+ /// user-provided and if
+ /// -- class X has no virtual functions and no virtual base classes, and
+ /// -- the constructor selected to copy/move each direct base class
+ /// subobject is trivial, and
+ /// -- for each non-static data member of X that is of class type (or an
+ /// array thereof), the constructor selected to copy/move that member
+ /// is trivial;
+ /// otherwise the copy/move constructor is non-trivial.
+ bool HasTrivialCopyConstructor : 1;
+
+ /// HasTrivialMoveConstructor - True when this class has a trivial move
+ /// constructor.
+ ///
+ /// C++0x [class.copy]p13:
+ /// A copy/move constructor for class X is trivial if it is neither
+ /// user-provided and if
+ /// -- class X has no virtual functions and no virtual base classes, and
+ /// -- the constructor selected to copy/move each direct base class
+ /// subobject is trivial, and
+ /// -- for each non-static data member of X that is of class type (or an
+ /// array thereof), the constructor selected to copy/move that member
+ /// is trivial;
+ /// otherwise the copy/move constructor is non-trivial.
+ bool HasTrivialMoveConstructor : 1;
+
+ /// HasTrivialCopyAssignment - True when this class has a trivial copy
+ /// assignment operator.
+ ///
+ /// C++0x [class.copy]p27:
+ /// A copy/move assignment operator for class X is trivial if it is
+ /// neither user-provided nor deleted and if
+ /// -- class X has no virtual functions and no virtual base classes, and
+ /// -- the assignment operator selected to copy/move each direct base
+ /// class subobject is trivial, and
+ /// -- for each non-static data member of X that is of class type (or an
+ /// array thereof), the assignment operator selected to copy/move
+ /// that member is trivial;
+ /// otherwise the copy/move assignment operator is non-trivial.
+ bool HasTrivialCopyAssignment : 1;
+
+ /// HasTrivialMoveAssignment - True when this class has a trivial move
+ /// assignment operator.
+ ///
+ /// C++0x [class.copy]p27:
+ /// A copy/move assignment operator for class X is trivial if it is
+ /// neither user-provided nor deleted and if
+ /// -- class X has no virtual functions and no virtual base classes, and
+ /// -- the assignment operator selected to copy/move each direct base
+ /// class subobject is trivial, and
+ /// -- for each non-static data member of X that is of class type (or an
+ /// array thereof), the assignment operator selected to copy/move
+ /// that member is trivial;
+ /// otherwise the copy/move assignment operator is non-trivial.
+ bool HasTrivialMoveAssignment : 1;
+
+ /// HasTrivialDestructor - True when this class has a trivial destructor.
+ ///
+ /// C++ [class.dtor]p3. A destructor is trivial if it is an
+ /// implicitly-declared destructor and if:
+ /// * all of the direct base classes of its class have trivial destructors
+ /// and
+ /// * for all of the non-static data members of its class that are of class
+ /// type (or array thereof), each such class has a trivial destructor.
+ bool HasTrivialDestructor : 1;
+
+ /// HasIrrelevantDestructor - True when this class has a destructor with no
+ /// semantic effect.
+ bool HasIrrelevantDestructor : 1;
+
+ /// HasNonLiteralTypeFieldsOrBases - True when this class contains at least
+ /// one non-static data member or base class of non-literal or volatile
+ /// type.
+ bool HasNonLiteralTypeFieldsOrBases : 1;
+
+ /// ComputedVisibleConversions - True when visible conversion functions are
+ /// already computed and are available.
+ bool ComputedVisibleConversions : 1;
+
+ /// \brief Whether we have a C++0x user-provided default constructor (not
+ /// explicitly deleted or defaulted).
+ bool UserProvidedDefaultConstructor : 1;
+
+ /// \brief Whether we have already declared the default constructor.
+ bool DeclaredDefaultConstructor : 1;
+
+ /// \brief Whether we have already declared the copy constructor.
+ bool DeclaredCopyConstructor : 1;
+
+ /// \brief Whether we have already declared the move constructor.
+ bool DeclaredMoveConstructor : 1;
+
+ /// \brief Whether we have already declared the copy-assignment operator.
+ bool DeclaredCopyAssignment : 1;
+
+ /// \brief Whether we have already declared the move-assignment operator.
+ bool DeclaredMoveAssignment : 1;
+
+ /// \brief Whether we have already declared a destructor within the class.
+ bool DeclaredDestructor : 1;
+
+ /// \brief Whether an implicit move constructor was attempted to be declared
+ /// but would have been deleted.
+ bool FailedImplicitMoveConstructor : 1;
+
+ /// \brief Whether an implicit move assignment operator was attempted to be
+ /// declared but would have been deleted.
+ bool FailedImplicitMoveAssignment : 1;
+
+ /// \brief Whether this class describes a C++ lambda.
+ bool IsLambda : 1;
+
+ /// NumBases - The number of base class specifiers in Bases.
+ unsigned NumBases;
+
+ /// NumVBases - The number of virtual base class specifiers in VBases.
+ unsigned NumVBases;
+
+ /// Bases - Base classes of this class.
+ /// FIXME: This is wasted space for a union.
+ LazyCXXBaseSpecifiersPtr Bases;
+
+ /// VBases - direct and indirect virtual base classes of this class.
+ LazyCXXBaseSpecifiersPtr VBases;
+
+ /// Conversions - Overload set containing the conversion functions
+ /// of this C++ class (but not its inherited conversion
+ /// functions). Each of the entries in this overload set is a
+ /// CXXConversionDecl.
+ UnresolvedSet<4> Conversions;
+
+ /// VisibleConversions - Overload set containing the conversion
+ /// functions of this C++ class and all those inherited conversion
+ /// functions that are visible in this class. Each of the entries
+ /// in this overload set is a CXXConversionDecl or a
+ /// FunctionTemplateDecl.
+ UnresolvedSet<4> VisibleConversions;
+
+ /// Definition - The declaration which defines this record.
+ CXXRecordDecl *Definition;
+
+ /// FirstFriend - The first friend declaration in this class, or
+ /// null if there aren't any. This is actually currently stored
+ /// in reverse order.
+ FriendDecl *FirstFriend;
+
+ /// \brief Retrieve the set of direct base classes.
+ CXXBaseSpecifier *getBases() const {
+ return Bases.get(Definition->getASTContext().getExternalSource());
+ }
+
+ /// \brief Retrieve the set of virtual base classes.
+ CXXBaseSpecifier *getVBases() const {
+ return VBases.get(Definition->getASTContext().getExternalSource());
+ }
+ } *DefinitionData;
+
+ /// \brief Describes a C++ closure type (generated by a lambda expression).
+ struct LambdaDefinitionData : public DefinitionData {
+ typedef LambdaExpr::Capture Capture;
+
+ LambdaDefinitionData(CXXRecordDecl *D, bool Dependent)
+ : DefinitionData(D), Dependent(Dependent), NumCaptures(0),
+ NumExplicitCaptures(0), ManglingNumber(0), ContextDecl(0), Captures(0)
+ {
+ IsLambda = true;
+ }
+
+ /// \brief Whether this lambda is known to be dependent, even if its
+ /// context isn't dependent.
+ ///
+ /// A lambda with a non-dependent context can be dependent if it occurs
+ /// within the default argument of a function template, because the
+ /// lambda will have been created with the enclosing context as its
+ /// declaration context, rather than function. This is an unfortunate
+ /// artifact of having to parse the default arguments before
+ unsigned Dependent : 1;
+
+ /// \brief The number of captures in this lambda.
+ unsigned NumCaptures : 16;
+
+ /// \brief The number of explicit captures in this lambda.
+ unsigned NumExplicitCaptures : 15;
+
+ /// \brief The number used to indicate this lambda expression for name
+ /// mangling in the Itanium C++ ABI.
+ unsigned ManglingNumber;
+
+ /// \brief The declaration that provides context for this lambda, if the
+ /// actual DeclContext does not suffice. This is used for lambdas that
+ /// occur within default arguments of function parameters within the class
+ /// or within a data member initializer.
+ Decl *ContextDecl;
+
+ /// \brief The list of captures, both explicit and implicit, for this
+ /// lambda.
+ Capture *Captures;
+ };
+
+ struct DefinitionData &data() {
+ assert(DefinitionData && "queried property of class with no definition");
+ return *DefinitionData;
+ }
+
+ const struct DefinitionData &data() const {
+ assert(DefinitionData && "queried property of class with no definition");
+ return *DefinitionData;
+ }
+
+ struct LambdaDefinitionData &getLambdaData() const {
+ assert(DefinitionData && "queried property of lambda with no definition");
+ assert(DefinitionData->IsLambda &&
+ "queried lambda property of non-lambda class");
+ return static_cast<LambdaDefinitionData &>(*DefinitionData);
+ }
+
+ /// \brief The template or declaration that this declaration
+ /// describes or was instantiated from, respectively.
+ ///
+ /// For non-templates, this value will be NULL. For record
+ /// declarations that describe a class template, this will be a
+ /// pointer to a ClassTemplateDecl. For member
+ /// classes of class template specializations, this will be the
+ /// MemberSpecializationInfo referring to the member class that was
+ /// instantiated or specialized.
+ llvm::PointerUnion<ClassTemplateDecl*, MemberSpecializationInfo*>
+ TemplateOrInstantiation;
+
+ friend class DeclContext;
+ friend class LambdaExpr;
+
+ /// \brief Notify the class that member has been added.
+ ///
+ /// This routine helps maintain information about the class based on which
+ /// members have been added. It will be invoked by DeclContext::addDecl()
+ /// whenever a member is added to this record.
+ void addedMember(Decl *D);
+
+ void markedVirtualFunctionPure();
+ friend void FunctionDecl::setPure(bool);
+
+ friend class ASTNodeImporter;
+
+protected:
+ CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, CXXRecordDecl *PrevDecl);
+
+public:
+ /// base_class_iterator - Iterator that traverses the base classes
+ /// of a class.
+ typedef CXXBaseSpecifier* base_class_iterator;
+
+ /// base_class_const_iterator - Iterator that traverses the base
+ /// classes of a class.
+ typedef const CXXBaseSpecifier* base_class_const_iterator;
+
+ /// reverse_base_class_iterator = Iterator that traverses the base classes
+ /// of a class in reverse order.
+ typedef std::reverse_iterator<base_class_iterator>
+ reverse_base_class_iterator;
+
+ /// reverse_base_class_iterator = Iterator that traverses the base classes
+ /// of a class in reverse order.
+ typedef std::reverse_iterator<base_class_const_iterator>
+ reverse_base_class_const_iterator;
+
+ virtual CXXRecordDecl *getCanonicalDecl() {
+ return cast<CXXRecordDecl>(RecordDecl::getCanonicalDecl());
+ }
+ virtual const CXXRecordDecl *getCanonicalDecl() const {
+ return cast<CXXRecordDecl>(RecordDecl::getCanonicalDecl());
+ }
+
+ const CXXRecordDecl *getPreviousDecl() const {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getPreviousDecl());
+ }
+ CXXRecordDecl *getPreviousDecl() {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getPreviousDecl());
+ }
+
+ const CXXRecordDecl *getMostRecentDecl() const {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getMostRecentDecl());
+ }
+ CXXRecordDecl *getMostRecentDecl() {
+ return cast_or_null<CXXRecordDecl>(RecordDecl::getMostRecentDecl());
+ }
+
+ CXXRecordDecl *getDefinition() const {
+ if (!DefinitionData) return 0;
+ return data().Definition;
+ }
+
+ bool hasDefinition() const { return DefinitionData != 0; }
+
+ static CXXRecordDecl *Create(const ASTContext &C, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, CXXRecordDecl* PrevDecl=0,
+ bool DelayTypeCreation = false);
+ static CXXRecordDecl *CreateLambda(const ASTContext &C, DeclContext *DC,
+ SourceLocation Loc, bool DependentLambda);
+ static CXXRecordDecl *CreateDeserialized(const ASTContext &C, unsigned ID);
+
+ bool isDynamicClass() const {
+ return data().Polymorphic || data().NumVBases != 0;
+ }
+
+ /// setBases - Sets the base classes of this struct or class.
+ void setBases(CXXBaseSpecifier const * const *Bases, unsigned NumBases);
+
+ /// getNumBases - Retrieves the number of base classes of this
+ /// class.
+ unsigned getNumBases() const { return data().NumBases; }
+
+ base_class_iterator bases_begin() { return data().getBases(); }
+ base_class_const_iterator bases_begin() const { return data().getBases(); }
+ base_class_iterator bases_end() { return bases_begin() + data().NumBases; }
+ base_class_const_iterator bases_end() const {
+ return bases_begin() + data().NumBases;
+ }
+ reverse_base_class_iterator bases_rbegin() {
+ return reverse_base_class_iterator(bases_end());
+ }
+ reverse_base_class_const_iterator bases_rbegin() const {
+ return reverse_base_class_const_iterator(bases_end());
+ }
+ reverse_base_class_iterator bases_rend() {
+ return reverse_base_class_iterator(bases_begin());
+ }
+ reverse_base_class_const_iterator bases_rend() const {
+ return reverse_base_class_const_iterator(bases_begin());
+ }
+
+ /// getNumVBases - Retrieves the number of virtual base classes of this
+ /// class.
+ unsigned getNumVBases() const { return data().NumVBases; }
+
+ base_class_iterator vbases_begin() { return data().getVBases(); }
+ base_class_const_iterator vbases_begin() const { return data().getVBases(); }
+ base_class_iterator vbases_end() { return vbases_begin() + data().NumVBases; }
+ base_class_const_iterator vbases_end() const {
+ return vbases_begin() + data().NumVBases;
+ }
+ reverse_base_class_iterator vbases_rbegin() {
+ return reverse_base_class_iterator(vbases_end());
+ }
+ reverse_base_class_const_iterator vbases_rbegin() const {
+ return reverse_base_class_const_iterator(vbases_end());
+ }
+ reverse_base_class_iterator vbases_rend() {
+ return reverse_base_class_iterator(vbases_begin());
+ }
+ reverse_base_class_const_iterator vbases_rend() const {
+ return reverse_base_class_const_iterator(vbases_begin());
+ }
+
+ /// \brief Determine whether this class has any dependent base classes.
+ bool hasAnyDependentBases() const;
+
+ /// Iterator access to method members. The method iterator visits
+ /// all method members of the class, including non-instance methods,
+ /// special methods, etc.
+ typedef specific_decl_iterator<CXXMethodDecl> method_iterator;
+
+ /// method_begin - Method begin iterator. Iterates in the order the methods
+ /// were declared.
+ method_iterator method_begin() const {
+ return method_iterator(decls_begin());
+ }
+ /// method_end - Method end iterator.
+ method_iterator method_end() const {
+ return method_iterator(decls_end());
+ }
+
+ /// Iterator access to constructor members.
+ typedef specific_decl_iterator<CXXConstructorDecl> ctor_iterator;
+
+ ctor_iterator ctor_begin() const {
+ return ctor_iterator(decls_begin());
+ }
+ ctor_iterator ctor_end() const {
+ return ctor_iterator(decls_end());
+ }
+
+ /// An iterator over friend declarations. All of these are defined
+ /// in DeclFriend.h.
+ class friend_iterator;
+ friend_iterator friend_begin() const;
+ friend_iterator friend_end() const;
+ void pushFriendDecl(FriendDecl *FD);
+
+ /// Determines whether this record has any friends.
+ bool hasFriends() const {
+ return data().FirstFriend != 0;
+ }
+
+ /// \brief Determine if we need to declare a default constructor for
+ /// this class.
+ ///
+ /// This value is used for lazy creation of default constructors.
+ bool needsImplicitDefaultConstructor() const {
+ return !data().UserDeclaredConstructor &&
+ !data().DeclaredDefaultConstructor;
+ }
+
+ /// hasDeclaredDefaultConstructor - Whether this class's default constructor
+ /// has been declared (either explicitly or implicitly).
+ bool hasDeclaredDefaultConstructor() const {
+ return data().DeclaredDefaultConstructor;
+ }
+
+ /// hasConstCopyConstructor - Determines whether this class has a
+ /// copy constructor that accepts a const-qualified argument.
+ bool hasConstCopyConstructor() const;
+
+ /// getCopyConstructor - Returns the copy constructor for this class
+ CXXConstructorDecl *getCopyConstructor(unsigned TypeQuals) const;
+
+ /// getMoveConstructor - Returns the move constructor for this class
+ CXXConstructorDecl *getMoveConstructor() const;
+
+ /// \brief Retrieve the copy-assignment operator for this class, if available.
+ ///
+ /// This routine attempts to find the copy-assignment operator for this
+ /// class, using a simplistic form of overload resolution.
+ ///
+ /// \param ArgIsConst Whether the argument to the copy-assignment operator
+ /// is const-qualified.
+ ///
+ /// \returns The copy-assignment operator that can be invoked, or NULL if
+ /// a unique copy-assignment operator could not be found.
+ CXXMethodDecl *getCopyAssignmentOperator(bool ArgIsConst) const;
+
+ /// getMoveAssignmentOperator - Returns the move assignment operator for this
+ /// class
+ CXXMethodDecl *getMoveAssignmentOperator() const;
+
+ /// hasUserDeclaredConstructor - Whether this class has any
+ /// user-declared constructors. When true, a default constructor
+ /// will not be implicitly declared.
+ bool hasUserDeclaredConstructor() const {
+ return data().UserDeclaredConstructor;
+ }
+
+ /// hasUserProvidedDefaultconstructor - Whether this class has a
+ /// user-provided default constructor per C++0x.
+ bool hasUserProvidedDefaultConstructor() const {
+ return data().UserProvidedDefaultConstructor;
+ }
+
+ /// hasUserDeclaredCopyConstructor - Whether this class has a
+ /// user-declared copy constructor. When false, a copy constructor
+ /// will be implicitly declared.
+ bool hasUserDeclaredCopyConstructor() const {
+ return data().UserDeclaredCopyConstructor;
+ }
+
+ /// \brief Determine whether this class has had its copy constructor
+ /// declared, either via the user or via an implicit declaration.
+ ///
+ /// This value is used for lazy creation of copy constructors.
+ bool hasDeclaredCopyConstructor() const {
+ return data().DeclaredCopyConstructor;
+ }
+
+ /// hasUserDeclaredMoveOperation - Whether this class has a user-
+ /// declared move constructor or assignment operator. When false, a
+ /// move constructor and assignment operator may be implicitly declared.
+ bool hasUserDeclaredMoveOperation() const {
+ return data().UserDeclaredMoveConstructor ||
+ data().UserDeclaredMoveAssignment;
+ }
+
+ /// \brief Determine whether this class has had a move constructor
+ /// declared by the user.
+ bool hasUserDeclaredMoveConstructor() const {
+ return data().UserDeclaredMoveConstructor;
+ }
+
+ /// \brief Determine whether this class has had a move constructor
+ /// declared.
+ bool hasDeclaredMoveConstructor() const {
+ return data().DeclaredMoveConstructor;
+ }
+
+ /// \brief Determine whether implicit move constructor generation for this
+ /// class has failed before.
+ bool hasFailedImplicitMoveConstructor() const {
+ return data().FailedImplicitMoveConstructor;
+ }
+
+ /// \brief Set whether implicit move constructor generation for this class
+ /// has failed before.
+ void setFailedImplicitMoveConstructor(bool Failed = true) {
+ data().FailedImplicitMoveConstructor = Failed;
+ }
+
+ /// \brief Determine whether this class should get an implicit move
+ /// constructor or if any existing special member function inhibits this.
+ ///
+ /// Covers all bullets of C++0x [class.copy]p9 except the last, that the
+ /// constructor wouldn't be deleted, which is only looked up from a cached
+ /// result.
+ bool needsImplicitMoveConstructor() const {
+ return !hasFailedImplicitMoveConstructor() &&
+ !hasDeclaredMoveConstructor() &&
+ !hasUserDeclaredCopyConstructor() &&
+ !hasUserDeclaredCopyAssignment() &&
+ !hasUserDeclaredMoveAssignment() &&
+ !hasUserDeclaredDestructor();
+ }
+
+ /// hasUserDeclaredCopyAssignment - Whether this class has a
+ /// user-declared copy assignment operator. When false, a copy
+ /// assigment operator will be implicitly declared.
+ bool hasUserDeclaredCopyAssignment() const {
+ return data().UserDeclaredCopyAssignment;
+ }
+
+ /// \brief Determine whether this class has had its copy assignment operator
+ /// declared, either via the user or via an implicit declaration.
+ ///
+ /// This value is used for lazy creation of copy assignment operators.
+ bool hasDeclaredCopyAssignment() const {
+ return data().DeclaredCopyAssignment;
+ }
+
+ /// \brief Determine whether this class has had a move assignment
+ /// declared by the user.
+ bool hasUserDeclaredMoveAssignment() const {
+ return data().UserDeclaredMoveAssignment;
+ }
+
+ /// hasDeclaredMoveAssignment - Whether this class has a
+ /// declared move assignment operator.
+ bool hasDeclaredMoveAssignment() const {
+ return data().DeclaredMoveAssignment;
+ }
+
+ /// \brief Determine whether implicit move assignment generation for this
+ /// class has failed before.
+ bool hasFailedImplicitMoveAssignment() const {
+ return data().FailedImplicitMoveAssignment;
+ }
+
+ /// \brief Set whether implicit move assignment generation for this class
+ /// has failed before.
+ void setFailedImplicitMoveAssignment(bool Failed = true) {
+ data().FailedImplicitMoveAssignment = Failed;
+ }
+
+ /// \brief Determine whether this class should get an implicit move
+ /// assignment operator or if any existing special member function inhibits
+ /// this.
+ ///
+ /// Covers all bullets of C++0x [class.copy]p20 except the last, that the
+ /// constructor wouldn't be deleted.
+ bool needsImplicitMoveAssignment() const {
+ return !hasFailedImplicitMoveAssignment() &&
+ !hasDeclaredMoveAssignment() &&
+ !hasUserDeclaredCopyConstructor() &&
+ !hasUserDeclaredCopyAssignment() &&
+ !hasUserDeclaredMoveConstructor() &&
+ !hasUserDeclaredDestructor();
+ }
+
+ /// hasUserDeclaredDestructor - Whether this class has a
+ /// user-declared destructor. When false, a destructor will be
+ /// implicitly declared.
+ bool hasUserDeclaredDestructor() const {
+ return data().UserDeclaredDestructor;
+ }
+
+ /// \brief Determine whether this class has had its destructor declared,
+ /// either via the user or via an implicit declaration.
+ ///
+ /// This value is used for lazy creation of destructors.
+ bool hasDeclaredDestructor() const { return data().DeclaredDestructor; }
+
+ /// \brief Determine whether this class describes a lambda function object.
+ bool isLambda() const { return hasDefinition() && data().IsLambda; }
+
+ /// \brief For a closure type, retrieve the mapping from captured
+ /// variables and this to the non-static data members that store the
+ /// values or references of the captures.
+ ///
+ /// \param Captures Will be populated with the mapping from captured
+ /// variables to the corresponding fields.
+ ///
+ /// \param ThisCapture Will be set to the field declaration for the
+ /// 'this' capture.
+ void getCaptureFields(llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
+ FieldDecl *&ThisCapture) const;
+
+ typedef const LambdaExpr::Capture* capture_const_iterator;
+ capture_const_iterator captures_begin() const {
+ return isLambda() ? getLambdaData().Captures : NULL;
+ }
+ capture_const_iterator captures_end() const {
+ return isLambda() ? captures_begin() + getLambdaData().NumCaptures : NULL;
+ }
+
+ /// getConversions - Retrieve the overload set containing all of the
+ /// conversion functions in this class.
+ UnresolvedSetImpl *getConversionFunctions() {
+ return &data().Conversions;
+ }
+ const UnresolvedSetImpl *getConversionFunctions() const {
+ return &data().Conversions;
+ }
+
+ typedef UnresolvedSetImpl::iterator conversion_iterator;
+ conversion_iterator conversion_begin() const {
+ return getConversionFunctions()->begin();
+ }
+ conversion_iterator conversion_end() const {
+ return getConversionFunctions()->end();
+ }
+
+ /// Removes a conversion function from this class. The conversion
+ /// function must currently be a member of this class. Furthermore,
+ /// this class must currently be in the process of being defined.
+ void removeConversion(const NamedDecl *Old);
+
+ /// getVisibleConversionFunctions - get all conversion functions visible
+ /// in current class; including conversion function templates.
+ const UnresolvedSetImpl *getVisibleConversionFunctions();
+
+ /// isAggregate - Whether this class is an aggregate (C++
+ /// [dcl.init.aggr]), which is a class with no user-declared
+ /// constructors, no private or protected non-static data members,
+ /// no base classes, and no virtual functions (C++ [dcl.init.aggr]p1).
+ bool isAggregate() const { return data().Aggregate; }
+
+ /// isPOD - Whether this class is a POD-type (C++ [class]p4), which is a class
+ /// that is an aggregate that has no non-static non-POD data members, no
+ /// reference data members, no user-defined copy assignment operator and no
+ /// user-defined destructor.
+ bool isPOD() const { return data().PlainOldData; }
+
+ /// \brief True if this class is C-like, without C++-specific features, e.g.
+ /// it contains only public fields, no bases, tag kind is not 'class', etc.
+ bool isCLike() const;
+
+ /// isEmpty - Whether this class is empty (C++0x [meta.unary.prop]), which
+ /// means it has a virtual function, virtual base, data member (other than
+ /// 0-width bit-field) or inherits from a non-empty class. Does NOT include
+ /// a check for union-ness.
+ bool isEmpty() const { return data().Empty; }
+
+ /// isPolymorphic - Whether this class is polymorphic (C++ [class.virtual]),
+ /// which means that the class contains or inherits a virtual function.
+ bool isPolymorphic() const { return data().Polymorphic; }
+
+ /// isAbstract - Whether this class is abstract (C++ [class.abstract]),
+ /// which means that the class contains or inherits a pure virtual function.
+ bool isAbstract() const { return data().Abstract; }
+
+ /// isStandardLayout - Whether this class has standard layout
+ /// (C++ [class]p7)
+ bool isStandardLayout() const { return data().IsStandardLayout; }
+
+ /// \brief Whether this class, or any of its class subobjects, contains a
+ /// mutable field.
+ bool hasMutableFields() const { return data().HasMutableFields; }
+
+ /// hasTrivialDefaultConstructor - Whether this class has a trivial default
+ /// constructor (C++11 [class.ctor]p5).
+ bool hasTrivialDefaultConstructor() const {
+ return data().HasTrivialDefaultConstructor &&
+ (!data().UserDeclaredConstructor ||
+ data().DeclaredDefaultConstructor);
+ }
+
+ /// hasConstexprNonCopyMoveConstructor - Whether this class has at least one
+ /// constexpr constructor other than the copy or move constructors.
+ bool hasConstexprNonCopyMoveConstructor() const {
+ return data().HasConstexprNonCopyMoveConstructor ||
+ (!hasUserDeclaredConstructor() &&
+ defaultedDefaultConstructorIsConstexpr());
+ }
+
+ /// defaultedDefaultConstructorIsConstexpr - Whether a defaulted default
+ /// constructor for this class would be constexpr.
+ bool defaultedDefaultConstructorIsConstexpr() const {
+ return data().DefaultedDefaultConstructorIsConstexpr;
+ }
+
+ /// defaultedCopyConstructorIsConstexpr - Whether a defaulted copy
+ /// constructor for this class would be constexpr.
+ bool defaultedCopyConstructorIsConstexpr() const {
+ return data().DefaultedCopyConstructorIsConstexpr;
+ }
+
+ /// defaultedMoveConstructorIsConstexpr - Whether a defaulted move
+ /// constructor for this class would be constexpr.
+ bool defaultedMoveConstructorIsConstexpr() const {
+ return data().DefaultedMoveConstructorIsConstexpr;
+ }
+
+ /// hasConstexprDefaultConstructor - Whether this class has a constexpr
+ /// default constructor.
+ bool hasConstexprDefaultConstructor() const {
+ return data().HasConstexprDefaultConstructor ||
+ (!data().UserDeclaredConstructor &&
+ data().DefaultedDefaultConstructorIsConstexpr && isLiteral());
+ }
+
+ /// hasConstexprCopyConstructor - Whether this class has a constexpr copy
+ /// constructor.
+ bool hasConstexprCopyConstructor() const {
+ return data().HasConstexprCopyConstructor ||
+ (!data().DeclaredCopyConstructor &&
+ data().DefaultedCopyConstructorIsConstexpr && isLiteral());
+ }
+
+ /// hasConstexprMoveConstructor - Whether this class has a constexpr move
+ /// constructor.
+ bool hasConstexprMoveConstructor() const {
+ return data().HasConstexprMoveConstructor ||
+ (needsImplicitMoveConstructor() &&
+ data().DefaultedMoveConstructorIsConstexpr && isLiteral());
+ }
+
+ // hasTrivialCopyConstructor - Whether this class has a trivial copy
+ // constructor (C++ [class.copy]p6, C++0x [class.copy]p13)
+ bool hasTrivialCopyConstructor() const {
+ return data().HasTrivialCopyConstructor;
+ }
+
+ // hasTrivialMoveConstructor - Whether this class has a trivial move
+ // constructor (C++0x [class.copy]p13)
+ bool hasTrivialMoveConstructor() const {
+ return data().HasTrivialMoveConstructor;
+ }
+
+ // hasTrivialCopyAssignment - Whether this class has a trivial copy
+ // assignment operator (C++ [class.copy]p11, C++0x [class.copy]p27)
+ bool hasTrivialCopyAssignment() const {
+ return data().HasTrivialCopyAssignment;
+ }
+
+ // hasTrivialMoveAssignment - Whether this class has a trivial move
+ // assignment operator (C++0x [class.copy]p27)
+ bool hasTrivialMoveAssignment() const {
+ return data().HasTrivialMoveAssignment;
+ }
+
+ // hasTrivialDestructor - Whether this class has a trivial destructor
+ // (C++ [class.dtor]p3)
+ bool hasTrivialDestructor() const { return data().HasTrivialDestructor; }
+
+ // hasIrrelevantDestructor - Whether this class has a destructor which has no
+ // semantic effect. Any such destructor will be trivial, public, defaulted
+ // and not deleted, and will call only irrelevant destructors.
+ bool hasIrrelevantDestructor() const {
+ return data().HasIrrelevantDestructor;
+ }
+
+ // hasNonLiteralTypeFieldsOrBases - Whether this class has a non-literal or
+ // volatile type non-static data member or base class.
+ bool hasNonLiteralTypeFieldsOrBases() const {
+ return data().HasNonLiteralTypeFieldsOrBases;
+ }
+
+ // isTriviallyCopyable - Whether this class is considered trivially copyable
+ // (C++0x [class]p6).
+ bool isTriviallyCopyable() const;
+
+ // isTrivial - Whether this class is considered trivial
+ //
+ // C++0x [class]p6
+ // A trivial class is a class that has a trivial default constructor and
+ // is trivially copiable.
+ bool isTrivial() const {
+ return isTriviallyCopyable() && hasTrivialDefaultConstructor();
+ }
+
+ // isLiteral - Whether this class is a literal type.
+ //
+ // C++11 [basic.types]p10
+ // A class type that has all the following properties:
+ // -- it has a trivial destructor
+ // -- every constructor call and full-expression in the
+ // brace-or-equal-intializers for non-static data members (if any) is
+ // a constant expression.
+ // -- it is an aggregate type or has at least one constexpr constructor or
+ // constructor template that is not a copy or move constructor, and
+ // -- all of its non-static data members and base classes are of literal
+ // types
+ //
+ // We resolve DR1361 by ignoring the second bullet. We resolve DR1452 by
+ // treating types with trivial default constructors as literal types.
+ bool isLiteral() const {
+ return hasTrivialDestructor() &&
+ (isAggregate() || hasConstexprNonCopyMoveConstructor() ||
+ hasTrivialDefaultConstructor()) &&
+ !hasNonLiteralTypeFieldsOrBases();
+ }
+
+ /// \brief If this record is an instantiation of a member class,
+ /// retrieves the member class from which it was instantiated.
+ ///
+ /// This routine will return non-NULL for (non-templated) member
+ /// classes of class templates. For example, given:
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct X {
+ /// struct A { };
+ /// };
+ /// \endcode
+ ///
+ /// The declaration for X<int>::A is a (non-templated) CXXRecordDecl
+ /// whose parent is the class template specialization X<int>. For
+ /// this declaration, getInstantiatedFromMemberClass() will return
+ /// the CXXRecordDecl X<T>::A. When a complete definition of
+ /// X<int>::A is required, it will be instantiated from the
+ /// declaration returned by getInstantiatedFromMemberClass().
+ CXXRecordDecl *getInstantiatedFromMemberClass() const;
+
+ /// \brief If this class is an instantiation of a member class of a
+ /// class template specialization, retrieves the member specialization
+ /// information.
+ MemberSpecializationInfo *getMemberSpecializationInfo() const;
+
+ /// \brief Specify that this record is an instantiation of the
+ /// member class RD.
+ void setInstantiationOfMemberClass(CXXRecordDecl *RD,
+ TemplateSpecializationKind TSK);
+
+ /// \brief Retrieves the class template that is described by this
+ /// class declaration.
+ ///
+ /// Every class template is represented as a ClassTemplateDecl and a
+ /// CXXRecordDecl. The former contains template properties (such as
+ /// the template parameter lists) while the latter contains the
+ /// actual description of the template's
+ /// contents. ClassTemplateDecl::getTemplatedDecl() retrieves the
+ /// CXXRecordDecl that from a ClassTemplateDecl, while
+ /// getDescribedClassTemplate() retrieves the ClassTemplateDecl from
+ /// a CXXRecordDecl.
+ ClassTemplateDecl *getDescribedClassTemplate() const {
+ return TemplateOrInstantiation.dyn_cast<ClassTemplateDecl*>();
+ }
+
+ void setDescribedClassTemplate(ClassTemplateDecl *Template) {
+ TemplateOrInstantiation = Template;
+ }
+
+ /// \brief Determine whether this particular class is a specialization or
+ /// instantiation of a class template or member class of a class template,
+ /// and how it was instantiated or specialized.
+ TemplateSpecializationKind getTemplateSpecializationKind() const;
+
+ /// \brief Set the kind of specialization or template instantiation this is.
+ void setTemplateSpecializationKind(TemplateSpecializationKind TSK);
+
+ /// getDestructor - Returns the destructor decl for this class.
+ CXXDestructorDecl *getDestructor() const;
+
+ /// isLocalClass - If the class is a local class [class.local], returns
+ /// the enclosing function declaration.
+ const FunctionDecl *isLocalClass() const {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(getDeclContext()))
+ return RD->isLocalClass();
+
+ return dyn_cast<FunctionDecl>(getDeclContext());
+ }
+
+ /// \brief Determine whether this class is derived from the class \p Base.
+ ///
+ /// This routine only determines whether this class is derived from \p Base,
+ /// but does not account for factors that may make a Derived -> Base class
+ /// ill-formed, such as private/protected inheritance or multiple, ambiguous
+ /// base class subobjects.
+ ///
+ /// \param Base the base class we are searching for.
+ ///
+ /// \returns true if this class is derived from Base, false otherwise.
+ bool isDerivedFrom(const CXXRecordDecl *Base) const;
+
+ /// \brief Determine whether this class is derived from the type \p Base.
+ ///
+ /// This routine only determines whether this class is derived from \p Base,
+ /// but does not account for factors that may make a Derived -> Base class
+ /// ill-formed, such as private/protected inheritance or multiple, ambiguous
+ /// base class subobjects.
+ ///
+ /// \param Base the base class we are searching for.
+ ///
+ /// \param Paths will contain the paths taken from the current class to the
+ /// given \p Base class.
+ ///
+ /// \returns true if this class is derived from Base, false otherwise.
+ ///
+ /// \todo add a separate paramaeter to configure IsDerivedFrom, rather than
+ /// tangling input and output in \p Paths
+ bool isDerivedFrom(const CXXRecordDecl *Base, CXXBasePaths &Paths) const;
+
+ /// \brief Determine whether this class is virtually derived from
+ /// the class \p Base.
+ ///
+ /// This routine only determines whether this class is virtually
+ /// derived from \p Base, but does not account for factors that may
+ /// make a Derived -> Base class ill-formed, such as
+ /// private/protected inheritance or multiple, ambiguous base class
+ /// subobjects.
+ ///
+ /// \param Base the base class we are searching for.
+ ///
+ /// \returns true if this class is virtually derived from Base,
+ /// false otherwise.
+ bool isVirtuallyDerivedFrom(CXXRecordDecl *Base) const;
+
+ /// \brief Determine whether this class is provably not derived from
+ /// the type \p Base.
+ bool isProvablyNotDerivedFrom(const CXXRecordDecl *Base) const;
+
+ /// \brief Function type used by forallBases() as a callback.
+ ///
+ /// \param Base the definition of the base class
+ ///
+ /// \returns true if this base matched the search criteria
+ typedef bool ForallBasesCallback(const CXXRecordDecl *BaseDefinition,
+ void *UserData);
+
+ /// \brief Determines if the given callback holds for all the direct
+ /// or indirect base classes of this type.
+ ///
+ /// The class itself does not count as a base class. This routine
+ /// returns false if the class has non-computable base classes.
+ ///
+ /// \param AllowShortCircuit if false, forces the callback to be called
+ /// for every base class, even if a dependent or non-matching base was
+ /// found.
+ bool forallBases(ForallBasesCallback *BaseMatches, void *UserData,
+ bool AllowShortCircuit = true) const;
+
+ /// \brief Function type used by lookupInBases() to determine whether a
+ /// specific base class subobject matches the lookup criteria.
+ ///
+ /// \param Specifier the base-class specifier that describes the inheritance
+ /// from the base class we are trying to match.
+ ///
+ /// \param Path the current path, from the most-derived class down to the
+ /// base named by the \p Specifier.
+ ///
+ /// \param UserData a single pointer to user-specified data, provided to
+ /// lookupInBases().
+ ///
+ /// \returns true if this base matched the search criteria, false otherwise.
+ typedef bool BaseMatchesCallback(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *UserData);
+
+ /// \brief Look for entities within the base classes of this C++ class,
+ /// transitively searching all base class subobjects.
+ ///
+ /// This routine uses the callback function \p BaseMatches to find base
+ /// classes meeting some search criteria, walking all base class subobjects
+ /// and populating the given \p Paths structure with the paths through the
+ /// inheritance hierarchy that resulted in a match. On a successful search,
+ /// the \p Paths structure can be queried to retrieve the matching paths and
+ /// to determine if there were any ambiguities.
+ ///
+ /// \param BaseMatches callback function used to determine whether a given
+ /// base matches the user-defined search criteria.
+ ///
+ /// \param UserData user data pointer that will be provided to \p BaseMatches.
+ ///
+ /// \param Paths used to record the paths from this class to its base class
+ /// subobjects that match the search criteria.
+ ///
+ /// \returns true if there exists any path from this class to a base class
+ /// subobject that matches the search criteria.
+ bool lookupInBases(BaseMatchesCallback *BaseMatches, void *UserData,
+ CXXBasePaths &Paths) const;
+
+ /// \brief Base-class lookup callback that determines whether the given
+ /// base class specifier refers to a specific class declaration.
+ ///
+ /// This callback can be used with \c lookupInBases() to determine whether
+ /// a given derived class has is a base class subobject of a particular type.
+ /// The user data pointer should refer to the canonical CXXRecordDecl of the
+ /// base class that we are searching for.
+ static bool FindBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path, void *BaseRecord);
+
+ /// \brief Base-class lookup callback that determines whether the
+ /// given base class specifier refers to a specific class
+ /// declaration and describes virtual derivation.
+ ///
+ /// This callback can be used with \c lookupInBases() to determine
+ /// whether a given derived class has is a virtual base class
+ /// subobject of a particular type. The user data pointer should
+ /// refer to the canonical CXXRecordDecl of the base class that we
+ /// are searching for.
+ static bool FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path, void *BaseRecord);
+
+ /// \brief Base-class lookup callback that determines whether there exists
+ /// a tag with the given name.
+ ///
+ /// This callback can be used with \c lookupInBases() to find tag members
+ /// of the given name within a C++ class hierarchy. The user data pointer
+ /// is an opaque \c DeclarationName pointer.
+ static bool FindTagMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path, void *Name);
+
+ /// \brief Base-class lookup callback that determines whether there exists
+ /// a member with the given name.
+ ///
+ /// This callback can be used with \c lookupInBases() to find members
+ /// of the given name within a C++ class hierarchy. The user data pointer
+ /// is an opaque \c DeclarationName pointer.
+ static bool FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path, void *Name);
+
+ /// \brief Base-class lookup callback that determines whether there exists
+ /// a member with the given name that can be used in a nested-name-specifier.
+ ///
+ /// This callback can be used with \c lookupInBases() to find membes of
+ /// the given name within a C++ class hierarchy that can occur within
+ /// nested-name-specifiers.
+ static bool FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *UserData);
+
+ /// \brief Retrieve the final overriders for each virtual member
+ /// function in the class hierarchy where this class is the
+ /// most-derived class in the class hierarchy.
+ void getFinalOverriders(CXXFinalOverriderMap &FinaOverriders) const;
+
+ /// \brief Get the indirect primary bases for this class.
+ void getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const;
+
+ /// viewInheritance - Renders and displays an inheritance diagram
+ /// for this C++ class and all of its base classes (transitively) using
+ /// GraphViz.
+ void viewInheritance(ASTContext& Context) const;
+
+ /// MergeAccess - Calculates the access of a decl that is reached
+ /// along a path.
+ static AccessSpecifier MergeAccess(AccessSpecifier PathAccess,
+ AccessSpecifier DeclAccess) {
+ assert(DeclAccess != AS_none);
+ if (DeclAccess == AS_private) return AS_none;
+ return (PathAccess > DeclAccess ? PathAccess : DeclAccess);
+ }
+
+ /// \brief Indicates that the definition of this class is now complete.
+ virtual void completeDefinition();
+
+ /// \brief Indicates that the definition of this class is now complete,
+ /// and provides a final overrider map to help determine
+ ///
+ /// \param FinalOverriders The final overrider map for this class, which can
+ /// be provided as an optimization for abstract-class checking. If NULL,
+ /// final overriders will be computed if they are needed to complete the
+ /// definition.
+ void completeDefinition(CXXFinalOverriderMap *FinalOverriders);
+
+ /// \brief Determine whether this class may end up being abstract, even though
+ /// it is not yet known to be abstract.
+ ///
+ /// \returns true if this class is not known to be abstract but has any
+ /// base classes that are abstract. In this case, \c completeDefinition()
+ /// will need to compute final overriders to determine whether the class is
+ /// actually abstract.
+ bool mayBeAbstract() const;
+
+ /// \brief If this is the closure type of a lambda expression, retrieve the
+ /// number to be used for name mangling in the Itanium C++ ABI.
+ ///
+ /// Zero indicates that this closure type has internal linkage, so the
+ /// mangling number does not matter, while a non-zero value indicates which
+ /// lambda expression this is in this particular context.
+ unsigned getLambdaManglingNumber() const {
+ assert(isLambda() && "Not a lambda closure type!");
+ return getLambdaData().ManglingNumber;
+ }
+
+ /// \brief Retrieve the declaration that provides additional context for a
+ /// lambda, when the normal declaration context is not specific enough.
+ ///
+ /// Certain contexts (default arguments of in-class function parameters and
+ /// the initializers of data members) have separate name mangling rules for
+ /// lambdas within the Itanium C++ ABI. For these cases, this routine provides
+ /// the declaration in which the lambda occurs, e.g., the function parameter
+ /// or the non-static data member. Otherwise, it returns NULL to imply that
+ /// the declaration context suffices.
+ Decl *getLambdaContextDecl() const {
+ assert(isLambda() && "Not a lambda closure type!");
+ return getLambdaData().ContextDecl;
+ }
+
+ /// \brief Set the mangling number and context declaration for a lambda
+ /// class.
+ void setLambdaMangling(unsigned ManglingNumber, Decl *ContextDecl) {
+ getLambdaData().ManglingNumber = ManglingNumber;
+ getLambdaData().ContextDecl = ContextDecl;
+ }
+
+ /// \brief Determine whether this lambda expression was known to be dependent
+ /// at the time it was created, even if its context does not appear to be
+ /// dependent.
+ ///
+ /// This flag is a workaround for an issue with parsing, where default
+ /// arguments are parsed before their enclosing function declarations have
+ /// been created. This means that any lambda expressions within those
+ /// default arguments will have as their DeclContext the context enclosing
+ /// the function declaration, which may be non-dependent even when the
+ /// function declaration itself is dependent. This flag indicates when we
+ /// know that the lambda is dependent despite that.
+ bool isDependentLambda() const {
+ return isLambda() && getLambdaData().Dependent;
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) {
+ return K >= firstCXXRecord && K <= lastCXXRecord;
+ }
+ static bool classof(const CXXRecordDecl *D) { return true; }
+ static bool classof(const ClassTemplateSpecializationDecl *D) {
+ return true;
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+ friend class ASTReader;
+ friend class ASTWriter;
+};
+
+/// CXXMethodDecl - Represents a static or instance method of a
+/// struct/union/class.
+class CXXMethodDecl : public FunctionDecl {
+ virtual void anchor();
+protected:
+ CXXMethodDecl(Kind DK, CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isStatic, StorageClass SCAsWritten, bool isInline,
+ bool isConstexpr, SourceLocation EndLocation)
+ : FunctionDecl(DK, RD, StartLoc, NameInfo, T, TInfo,
+ (isStatic ? SC_Static : SC_None),
+ SCAsWritten, isInline, isConstexpr) {
+ if (EndLocation.isValid())
+ setRangeEnd(EndLocation);
+ }
+
+public:
+ static CXXMethodDecl *Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isStatic,
+ StorageClass SCAsWritten,
+ bool isInline,
+ bool isConstexpr,
+ SourceLocation EndLocation);
+
+ static CXXMethodDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ bool isStatic() const { return getStorageClass() == SC_Static; }
+ bool isInstance() const { return !isStatic(); }
+
+ bool isVirtual() const {
+ CXXMethodDecl *CD =
+ cast<CXXMethodDecl>(const_cast<CXXMethodDecl*>(this)->getCanonicalDecl());
+
+ if (CD->isVirtualAsWritten())
+ return true;
+
+ return (CD->begin_overridden_methods() != CD->end_overridden_methods());
+ }
+
+ /// \brief Determine whether this is a usual deallocation function
+ /// (C++ [basic.stc.dynamic.deallocation]p2), which is an overloaded
+ /// delete or delete[] operator with a particular signature.
+ bool isUsualDeallocationFunction() const;
+
+ /// \brief Determine whether this is a copy-assignment operator, regardless
+ /// of whether it was declared implicitly or explicitly.
+ bool isCopyAssignmentOperator() const;
+
+ /// \brief Determine whether this is a move assignment operator.
+ bool isMoveAssignmentOperator() const;
+
+ const CXXMethodDecl *getCanonicalDecl() const {
+ return cast<CXXMethodDecl>(FunctionDecl::getCanonicalDecl());
+ }
+ CXXMethodDecl *getCanonicalDecl() {
+ return cast<CXXMethodDecl>(FunctionDecl::getCanonicalDecl());
+ }
+
+ /// isUserProvided - True if it is either an implicit constructor or
+ /// if it was defaulted or deleted on first declaration.
+ bool isUserProvided() const {
+ return !(isDeleted() || getCanonicalDecl()->isDefaulted());
+ }
+
+ ///
+ void addOverriddenMethod(const CXXMethodDecl *MD);
+
+ typedef const CXXMethodDecl *const* method_iterator;
+
+ method_iterator begin_overridden_methods() const;
+ method_iterator end_overridden_methods() const;
+ unsigned size_overridden_methods() const;
+
+ /// getParent - Returns the parent of this method declaration, which
+ /// is the class in which this method is defined.
+ const CXXRecordDecl *getParent() const {
+ return cast<CXXRecordDecl>(FunctionDecl::getParent());
+ }
+
+ /// getParent - Returns the parent of this method declaration, which
+ /// is the class in which this method is defined.
+ CXXRecordDecl *getParent() {
+ return const_cast<CXXRecordDecl *>(
+ cast<CXXRecordDecl>(FunctionDecl::getParent()));
+ }
+
+ /// getThisType - Returns the type of 'this' pointer.
+ /// Should only be called for instance methods.
+ QualType getThisType(ASTContext &C) const;
+
+ unsigned getTypeQualifiers() const {
+ return getType()->getAs<FunctionProtoType>()->getTypeQuals();
+ }
+
+ /// \brief Retrieve the ref-qualifier associated with this method.
+ ///
+ /// In the following example, \c f() has an lvalue ref-qualifier, \c g()
+ /// has an rvalue ref-qualifier, and \c h() has no ref-qualifier.
+ /// \code
+ /// struct X {
+ /// void f() &;
+ /// void g() &&;
+ /// void h();
+ /// };
+ /// \endcode
+ RefQualifierKind getRefQualifier() const {
+ return getType()->getAs<FunctionProtoType>()->getRefQualifier();
+ }
+
+ bool hasInlineBody() const;
+
+ /// \brief Determine whether this is a lambda closure type's static member
+ /// function that is used for the result of the lambda's conversion to
+ /// function pointer (for a lambda with no captures).
+ ///
+ /// The function itself, if used, will have a placeholder body that will be
+ /// supplied by IR generation to either forward to the function call operator
+ /// or clone the function call operator.
+ bool isLambdaStaticInvoker() const;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const CXXMethodDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstCXXMethod && K <= lastCXXMethod;
+ }
+};
+
+/// CXXCtorInitializer - Represents a C++ base or member
+/// initializer, which is part of a constructor initializer that
+/// initializes one non-static member variable or one base class. For
+/// example, in the following, both 'A(a)' and 'f(3.14159)' are member
+/// initializers:
+///
+/// @code
+/// class A { };
+/// class B : public A {
+/// float f;
+/// public:
+/// B(A& a) : A(a), f(3.14159) { }
+/// };
+/// @endcode
+class CXXCtorInitializer {
+ /// \brief Either the base class name/delegating constructor type (stored as
+ /// a TypeSourceInfo*), an normal field (FieldDecl), or an anonymous field
+ /// (IndirectFieldDecl*) being initialized.
+ llvm::PointerUnion3<TypeSourceInfo *, FieldDecl *, IndirectFieldDecl *>
+ Initializee;
+
+ /// \brief The source location for the field name or, for a base initializer
+ /// pack expansion, the location of the ellipsis. In the case of a delegating
+ /// constructor, it will still include the type's source location as the
+ /// Initializee points to the CXXConstructorDecl (to allow loop detection).
+ SourceLocation MemberOrEllipsisLocation;
+
+ /// \brief The argument used to initialize the base or member, which may
+ /// end up constructing an object (when multiple arguments are involved).
+ /// If 0, this is a field initializer, and the in-class member initializer
+ /// will be used.
+ Stmt *Init;
+
+ /// LParenLoc - Location of the left paren of the ctor-initializer.
+ SourceLocation LParenLoc;
+
+ /// RParenLoc - Location of the right paren of the ctor-initializer.
+ SourceLocation RParenLoc;
+
+ /// \brief If the initializee is a type, whether that type makes this
+ /// a delegating initialization.
+ bool IsDelegating : 1;
+
+ /// IsVirtual - If the initializer is a base initializer, this keeps track
+ /// of whether the base is virtual or not.
+ bool IsVirtual : 1;
+
+ /// IsWritten - Whether or not the initializer is explicitly written
+ /// in the sources.
+ bool IsWritten : 1;
+
+ /// SourceOrderOrNumArrayIndices - If IsWritten is true, then this
+ /// number keeps track of the textual order of this initializer in the
+ /// original sources, counting from 0; otherwise, if IsWritten is false,
+ /// it stores the number of array index variables stored after this
+ /// object in memory.
+ unsigned SourceOrderOrNumArrayIndices : 13;
+
+ CXXCtorInitializer(ASTContext &Context, FieldDecl *Member,
+ SourceLocation MemberLoc, SourceLocation L, Expr *Init,
+ SourceLocation R, VarDecl **Indices, unsigned NumIndices);
+
+public:
+ /// CXXCtorInitializer - Creates a new base-class initializer.
+ explicit
+ CXXCtorInitializer(ASTContext &Context, TypeSourceInfo *TInfo, bool IsVirtual,
+ SourceLocation L, Expr *Init, SourceLocation R,
+ SourceLocation EllipsisLoc);
+
+ /// CXXCtorInitializer - Creates a new member initializer.
+ explicit
+ CXXCtorInitializer(ASTContext &Context, FieldDecl *Member,
+ SourceLocation MemberLoc, SourceLocation L, Expr *Init,
+ SourceLocation R);
+
+ /// CXXCtorInitializer - Creates a new anonymous field initializer.
+ explicit
+ CXXCtorInitializer(ASTContext &Context, IndirectFieldDecl *Member,
+ SourceLocation MemberLoc, SourceLocation L, Expr *Init,
+ SourceLocation R);
+
+ /// CXXCtorInitializer - Creates a new delegating Initializer.
+ explicit
+ CXXCtorInitializer(ASTContext &Context, TypeSourceInfo *TInfo,
+ SourceLocation L, Expr *Init, SourceLocation R);
+
+ /// \brief Creates a new member initializer that optionally contains
+ /// array indices used to describe an elementwise initialization.
+ static CXXCtorInitializer *Create(ASTContext &Context, FieldDecl *Member,
+ SourceLocation MemberLoc, SourceLocation L,
+ Expr *Init, SourceLocation R,
+ VarDecl **Indices, unsigned NumIndices);
+
+ /// isBaseInitializer - Returns true when this initializer is
+ /// initializing a base class.
+ bool isBaseInitializer() const {
+ return Initializee.is<TypeSourceInfo*>() && !IsDelegating;
+ }
+
+ /// isMemberInitializer - Returns true when this initializer is
+ /// initializing a non-static data member.
+ bool isMemberInitializer() const { return Initializee.is<FieldDecl*>(); }
+
+ bool isAnyMemberInitializer() const {
+ return isMemberInitializer() || isIndirectMemberInitializer();
+ }
+
+ bool isIndirectMemberInitializer() const {
+ return Initializee.is<IndirectFieldDecl*>();
+ }
+
+ /// isInClassMemberInitializer - Returns true when this initializer is an
+ /// implicit ctor initializer generated for a field with an initializer
+ /// defined on the member declaration.
+ bool isInClassMemberInitializer() const {
+ return !Init;
+ }
+
+ /// isDelegatingInitializer - Returns true when this initializer is creating
+ /// a delegating constructor.
+ bool isDelegatingInitializer() const {
+ return Initializee.is<TypeSourceInfo*>() && IsDelegating;
+ }
+
+ /// \brief Determine whether this initializer is a pack expansion.
+ bool isPackExpansion() const {
+ return isBaseInitializer() && MemberOrEllipsisLocation.isValid();
+ }
+
+ // \brief For a pack expansion, returns the location of the ellipsis.
+ SourceLocation getEllipsisLoc() const {
+ assert(isPackExpansion() && "Initializer is not a pack expansion");
+ return MemberOrEllipsisLocation;
+ }
+
+ /// If this is a base class initializer, returns the type of the
+ /// base class with location information. Otherwise, returns an NULL
+ /// type location.
+ TypeLoc getBaseClassLoc() const;
+
+ /// If this is a base class initializer, returns the type of the base class.
+ /// Otherwise, returns NULL.
+ const Type *getBaseClass() const;
+
+ /// Returns whether the base is virtual or not.
+ bool isBaseVirtual() const {
+ assert(isBaseInitializer() && "Must call this on base initializer!");
+
+ return IsVirtual;
+ }
+
+ /// \brief Returns the declarator information for a base class or delegating
+ /// initializer.
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return Initializee.dyn_cast<TypeSourceInfo *>();
+ }
+
+ /// getMember - If this is a member initializer, returns the
+ /// declaration of the non-static data member being
+ /// initialized. Otherwise, returns NULL.
+ FieldDecl *getMember() const {
+ if (isMemberInitializer())
+ return Initializee.get<FieldDecl*>();
+ return 0;
+ }
+ FieldDecl *getAnyMember() const {
+ if (isMemberInitializer())
+ return Initializee.get<FieldDecl*>();
+ if (isIndirectMemberInitializer())
+ return Initializee.get<IndirectFieldDecl*>()->getAnonField();
+ return 0;
+ }
+
+ IndirectFieldDecl *getIndirectMember() const {
+ if (isIndirectMemberInitializer())
+ return Initializee.get<IndirectFieldDecl*>();
+ return 0;
+ }
+
+ SourceLocation getMemberLocation() const {
+ return MemberOrEllipsisLocation;
+ }
+
+ /// \brief Determine the source location of the initializer.
+ SourceLocation getSourceLocation() const;
+
+ /// \brief Determine the source range covering the entire initializer.
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ /// isWritten - Returns true if this initializer is explicitly written
+ /// in the source code.
+ bool isWritten() const { return IsWritten; }
+
+ /// \brief Return the source position of the initializer, counting from 0.
+ /// If the initializer was implicit, -1 is returned.
+ int getSourceOrder() const {
+ return IsWritten ? static_cast<int>(SourceOrderOrNumArrayIndices) : -1;
+ }
+
+ /// \brief Set the source order of this initializer. This method can only
+ /// be called once for each initializer; it cannot be called on an
+ /// initializer having a positive number of (implicit) array indices.
+ void setSourceOrder(int pos) {
+ assert(!IsWritten &&
+ "calling twice setSourceOrder() on the same initializer");
+ assert(SourceOrderOrNumArrayIndices == 0 &&
+ "setSourceOrder() used when there are implicit array indices");
+ assert(pos >= 0 &&
+ "setSourceOrder() used to make an initializer implicit");
+ IsWritten = true;
+ SourceOrderOrNumArrayIndices = static_cast<unsigned>(pos);
+ }
+
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ /// \brief Determine the number of implicit array indices used while
+ /// described an array member initialization.
+ unsigned getNumArrayIndices() const {
+ return IsWritten ? 0 : SourceOrderOrNumArrayIndices;
+ }
+
+ /// \brief Retrieve a particular array index variable used to
+ /// describe an array member initialization.
+ VarDecl *getArrayIndex(unsigned I) {
+ assert(I < getNumArrayIndices() && "Out of bounds member array index");
+ return reinterpret_cast<VarDecl **>(this + 1)[I];
+ }
+ const VarDecl *getArrayIndex(unsigned I) const {
+ assert(I < getNumArrayIndices() && "Out of bounds member array index");
+ return reinterpret_cast<const VarDecl * const *>(this + 1)[I];
+ }
+ void setArrayIndex(unsigned I, VarDecl *Index) {
+ assert(I < getNumArrayIndices() && "Out of bounds member array index");
+ reinterpret_cast<VarDecl **>(this + 1)[I] = Index;
+ }
+ ArrayRef<VarDecl *> getArrayIndexes() {
+ assert(getNumArrayIndices() != 0 && "Getting indexes for non-array init");
+ return ArrayRef<VarDecl *>(reinterpret_cast<VarDecl **>(this + 1),
+ getNumArrayIndices());
+ }
+
+ /// \brief Get the initializer. This is 0 if this is an in-class initializer
+ /// for a non-static data member which has not yet been parsed.
+ Expr *getInit() const {
+ if (!Init)
+ return getAnyMember()->getInClassInitializer();
+
+ return static_cast<Expr*>(Init);
+ }
+};
+
+/// CXXConstructorDecl - Represents a C++ constructor within a
+/// class. For example:
+///
+/// @code
+/// class X {
+/// public:
+/// explicit X(int); // represented by a CXXConstructorDecl.
+/// };
+/// @endcode
+class CXXConstructorDecl : public CXXMethodDecl {
+ virtual void anchor();
+ /// IsExplicitSpecified - Whether this constructor declaration has the
+ /// 'explicit' keyword specified.
+ bool IsExplicitSpecified : 1;
+
+ /// ImplicitlyDefined - Whether this constructor was implicitly
+ /// defined by the compiler. When false, the constructor was defined
+ /// by the user. In C++03, this flag will have the same value as
+ /// Implicit. In C++0x, however, a constructor that is
+ /// explicitly defaulted (i.e., defined with " = default") will have
+ /// @c !Implicit && ImplicitlyDefined.
+ bool ImplicitlyDefined : 1;
+
+ /// Support for base and member initializers.
+ /// CtorInitializers - The arguments used to initialize the base
+ /// or member.
+ CXXCtorInitializer **CtorInitializers;
+ unsigned NumCtorInitializers;
+
+ CXXConstructorDecl(CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isExplicitSpecified, bool isInline,
+ bool isImplicitlyDeclared, bool isConstexpr)
+ : CXXMethodDecl(CXXConstructor, RD, StartLoc, NameInfo, T, TInfo, false,
+ SC_None, isInline, isConstexpr, SourceLocation()),
+ IsExplicitSpecified(isExplicitSpecified), ImplicitlyDefined(false),
+ CtorInitializers(0), NumCtorInitializers(0) {
+ setImplicit(isImplicitlyDeclared);
+ }
+
+public:
+ static CXXConstructorDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+ static CXXConstructorDecl *Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isExplicit,
+ bool isInline, bool isImplicitlyDeclared,
+ bool isConstexpr);
+
+ /// isExplicitSpecified - Whether this constructor declaration has the
+ /// 'explicit' keyword specified.
+ bool isExplicitSpecified() const { return IsExplicitSpecified; }
+
+ /// isExplicit - Whether this constructor was marked "explicit" or not.
+ bool isExplicit() const {
+ return cast<CXXConstructorDecl>(getFirstDeclaration())
+ ->isExplicitSpecified();
+ }
+
+ /// isImplicitlyDefined - Whether this constructor was implicitly
+ /// defined. If false, then this constructor was defined by the
+ /// user. This operation can only be invoked if the constructor has
+ /// already been defined.
+ bool isImplicitlyDefined() const {
+ assert(isThisDeclarationADefinition() &&
+ "Can only get the implicit-definition flag once the "
+ "constructor has been defined");
+ return ImplicitlyDefined;
+ }
+
+ /// setImplicitlyDefined - Set whether this constructor was
+ /// implicitly defined or not.
+ void setImplicitlyDefined(bool ID) {
+ assert(isThisDeclarationADefinition() &&
+ "Can only set the implicit-definition flag once the constructor "
+ "has been defined");
+ ImplicitlyDefined = ID;
+ }
+
+ /// init_iterator - Iterates through the member/base initializer list.
+ typedef CXXCtorInitializer **init_iterator;
+
+ /// init_const_iterator - Iterates through the memberbase initializer list.
+ typedef CXXCtorInitializer * const * init_const_iterator;
+
+ /// init_begin() - Retrieve an iterator to the first initializer.
+ init_iterator init_begin() { return CtorInitializers; }
+ /// begin() - Retrieve an iterator to the first initializer.
+ init_const_iterator init_begin() const { return CtorInitializers; }
+
+ /// init_end() - Retrieve an iterator past the last initializer.
+ init_iterator init_end() {
+ return CtorInitializers + NumCtorInitializers;
+ }
+ /// end() - Retrieve an iterator past the last initializer.
+ init_const_iterator init_end() const {
+ return CtorInitializers + NumCtorInitializers;
+ }
+
+ typedef std::reverse_iterator<init_iterator> init_reverse_iterator;
+ typedef std::reverse_iterator<init_const_iterator>
+ init_const_reverse_iterator;
+
+ init_reverse_iterator init_rbegin() {
+ return init_reverse_iterator(init_end());
+ }
+ init_const_reverse_iterator init_rbegin() const {
+ return init_const_reverse_iterator(init_end());
+ }
+
+ init_reverse_iterator init_rend() {
+ return init_reverse_iterator(init_begin());
+ }
+ init_const_reverse_iterator init_rend() const {
+ return init_const_reverse_iterator(init_begin());
+ }
+
+ /// getNumArgs - Determine the number of arguments used to
+ /// initialize the member or base.
+ unsigned getNumCtorInitializers() const {
+ return NumCtorInitializers;
+ }
+
+ void setNumCtorInitializers(unsigned numCtorInitializers) {
+ NumCtorInitializers = numCtorInitializers;
+ }
+
+ void setCtorInitializers(CXXCtorInitializer ** initializers) {
+ CtorInitializers = initializers;
+ }
+
+ /// isDelegatingConstructor - Whether this constructor is a
+ /// delegating constructor
+ bool isDelegatingConstructor() const {
+ return (getNumCtorInitializers() == 1) &&
+ CtorInitializers[0]->isDelegatingInitializer();
+ }
+
+ /// getTargetConstructor - When this constructor delegates to
+ /// another, retrieve the target
+ CXXConstructorDecl *getTargetConstructor() const;
+
+ /// isDefaultConstructor - Whether this constructor is a default
+ /// constructor (C++ [class.ctor]p5), which can be used to
+ /// default-initialize a class of this type.
+ bool isDefaultConstructor() const;
+
+ /// isCopyConstructor - Whether this constructor is a copy
+ /// constructor (C++ [class.copy]p2, which can be used to copy the
+ /// class. @p TypeQuals will be set to the qualifiers on the
+ /// argument type. For example, @p TypeQuals would be set to @c
+ /// QualType::Const for the following copy constructor:
+ ///
+ /// @code
+ /// class X {
+ /// public:
+ /// X(const X&);
+ /// };
+ /// @endcode
+ bool isCopyConstructor(unsigned &TypeQuals) const;
+
+ /// isCopyConstructor - Whether this constructor is a copy
+ /// constructor (C++ [class.copy]p2, which can be used to copy the
+ /// class.
+ bool isCopyConstructor() const {
+ unsigned TypeQuals = 0;
+ return isCopyConstructor(TypeQuals);
+ }
+
+ /// \brief Determine whether this constructor is a move constructor
+ /// (C++0x [class.copy]p3), which can be used to move values of the class.
+ ///
+ /// \param TypeQuals If this constructor is a move constructor, will be set
+ /// to the type qualifiers on the referent of the first parameter's type.
+ bool isMoveConstructor(unsigned &TypeQuals) const;
+
+ /// \brief Determine whether this constructor is a move constructor
+ /// (C++0x [class.copy]p3), which can be used to move values of the class.
+ bool isMoveConstructor() const {
+ unsigned TypeQuals = 0;
+ return isMoveConstructor(TypeQuals);
+ }
+
+ /// \brief Determine whether this is a copy or move constructor.
+ ///
+ /// \param TypeQuals Will be set to the type qualifiers on the reference
+ /// parameter, if in fact this is a copy or move constructor.
+ bool isCopyOrMoveConstructor(unsigned &TypeQuals) const;
+
+ /// \brief Determine whether this a copy or move constructor.
+ bool isCopyOrMoveConstructor() const {
+ unsigned Quals;
+ return isCopyOrMoveConstructor(Quals);
+ }
+
+ /// isConvertingConstructor - Whether this constructor is a
+ /// converting constructor (C++ [class.conv.ctor]), which can be
+ /// used for user-defined conversions.
+ bool isConvertingConstructor(bool AllowExplicit) const;
+
+ /// \brief Determine whether this is a member template specialization that
+ /// would copy the object to itself. Such constructors are never used to copy
+ /// an object.
+ bool isSpecializationCopyingObject() const;
+
+ /// \brief Get the constructor that this inheriting constructor is based on.
+ const CXXConstructorDecl *getInheritedConstructor() const;
+
+ /// \brief Set the constructor that this inheriting constructor is based on.
+ void setInheritedConstructor(const CXXConstructorDecl *BaseCtor);
+
+ const CXXConstructorDecl *getCanonicalDecl() const {
+ return cast<CXXConstructorDecl>(FunctionDecl::getCanonicalDecl());
+ }
+ CXXConstructorDecl *getCanonicalDecl() {
+ return cast<CXXConstructorDecl>(FunctionDecl::getCanonicalDecl());
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const CXXConstructorDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == CXXConstructor; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// CXXDestructorDecl - Represents a C++ destructor within a
+/// class. For example:
+///
+/// @code
+/// class X {
+/// public:
+/// ~X(); // represented by a CXXDestructorDecl.
+/// };
+/// @endcode
+class CXXDestructorDecl : public CXXMethodDecl {
+ virtual void anchor();
+ /// ImplicitlyDefined - Whether this destructor was implicitly
+ /// defined by the compiler. When false, the destructor was defined
+ /// by the user. In C++03, this flag will have the same value as
+ /// Implicit. In C++0x, however, a destructor that is
+ /// explicitly defaulted (i.e., defined with " = default") will have
+ /// @c !Implicit && ImplicitlyDefined.
+ bool ImplicitlyDefined : 1;
+
+ FunctionDecl *OperatorDelete;
+
+ CXXDestructorDecl(CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isImplicitlyDeclared)
+ : CXXMethodDecl(CXXDestructor, RD, StartLoc, NameInfo, T, TInfo, false,
+ SC_None, isInline, /*isConstexpr=*/false, SourceLocation()),
+ ImplicitlyDefined(false), OperatorDelete(0) {
+ setImplicit(isImplicitlyDeclared);
+ }
+
+public:
+ static CXXDestructorDecl *Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo* TInfo,
+ bool isInline,
+ bool isImplicitlyDeclared);
+ static CXXDestructorDecl *CreateDeserialized(ASTContext & C, unsigned ID);
+
+ /// isImplicitlyDefined - Whether this destructor was implicitly
+ /// defined. If false, then this destructor was defined by the
+ /// user. This operation can only be invoked if the destructor has
+ /// already been defined.
+ bool isImplicitlyDefined() const {
+ assert(isThisDeclarationADefinition() &&
+ "Can only get the implicit-definition flag once the destructor has "
+ "been defined");
+ return ImplicitlyDefined;
+ }
+
+ /// setImplicitlyDefined - Set whether this destructor was
+ /// implicitly defined or not.
+ void setImplicitlyDefined(bool ID) {
+ assert(isThisDeclarationADefinition() &&
+ "Can only set the implicit-definition flag once the destructor has "
+ "been defined");
+ ImplicitlyDefined = ID;
+ }
+
+ void setOperatorDelete(FunctionDecl *OD) { OperatorDelete = OD; }
+ const FunctionDecl *getOperatorDelete() const { return OperatorDelete; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const CXXDestructorDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == CXXDestructor; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// CXXConversionDecl - Represents a C++ conversion function within a
+/// class. For example:
+///
+/// @code
+/// class X {
+/// public:
+/// operator bool();
+/// };
+/// @endcode
+class CXXConversionDecl : public CXXMethodDecl {
+ virtual void anchor();
+ /// IsExplicitSpecified - Whether this conversion function declaration is
+ /// marked "explicit", meaning that it can only be applied when the user
+ /// explicitly wrote a cast. This is a C++0x feature.
+ bool IsExplicitSpecified : 1;
+
+ CXXConversionDecl(CXXRecordDecl *RD, SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isExplicitSpecified,
+ bool isConstexpr, SourceLocation EndLocation)
+ : CXXMethodDecl(CXXConversion, RD, StartLoc, NameInfo, T, TInfo, false,
+ SC_None, isInline, isConstexpr, EndLocation),
+ IsExplicitSpecified(isExplicitSpecified) { }
+
+public:
+ static CXXConversionDecl *Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isExplicit,
+ bool isConstexpr,
+ SourceLocation EndLocation);
+ static CXXConversionDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// IsExplicitSpecified - Whether this conversion function declaration is
+ /// marked "explicit", meaning that it can only be applied when the user
+ /// explicitly wrote a cast. This is a C++0x feature.
+ bool isExplicitSpecified() const { return IsExplicitSpecified; }
+
+ /// isExplicit - Whether this is an explicit conversion operator
+ /// (C++0x only). Explicit conversion operators are only considered
+ /// when the user has explicitly written a cast.
+ bool isExplicit() const {
+ return cast<CXXConversionDecl>(getFirstDeclaration())
+ ->isExplicitSpecified();
+ }
+
+ /// getConversionType - Returns the type that this conversion
+ /// function is converting to.
+ QualType getConversionType() const {
+ return getType()->getAs<FunctionType>()->getResultType();
+ }
+
+ /// \brief Determine whether this conversion function is a conversion from
+ /// a lambda closure type to a block pointer.
+ bool isLambdaToBlockPointerConversion() const;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const CXXConversionDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == CXXConversion; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// LinkageSpecDecl - This represents a linkage specification. For example:
+/// extern "C" void foo();
+///
+class LinkageSpecDecl : public Decl, public DeclContext {
+ virtual void anchor();
+public:
+ /// LanguageIDs - Used to represent the language in a linkage
+ /// specification. The values are part of the serialization abi for
+ /// ASTs and cannot be changed without altering that abi. To help
+ /// ensure a stable abi for this, we choose the DW_LANG_ encodings
+ /// from the dwarf standard.
+ enum LanguageIDs {
+ lang_c = /* DW_LANG_C */ 0x0002,
+ lang_cxx = /* DW_LANG_C_plus_plus */ 0x0004
+ };
+private:
+ /// Language - The language for this linkage specification.
+ LanguageIDs Language;
+ /// ExternLoc - The source location for the extern keyword.
+ SourceLocation ExternLoc;
+ /// RBraceLoc - The source location for the right brace (if valid).
+ SourceLocation RBraceLoc;
+
+ LinkageSpecDecl(DeclContext *DC, SourceLocation ExternLoc,
+ SourceLocation LangLoc, LanguageIDs lang,
+ SourceLocation RBLoc)
+ : Decl(LinkageSpec, DC, LangLoc), DeclContext(LinkageSpec),
+ Language(lang), ExternLoc(ExternLoc), RBraceLoc(RBLoc) { }
+
+public:
+ static LinkageSpecDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation ExternLoc,
+ SourceLocation LangLoc, LanguageIDs Lang,
+ SourceLocation RBraceLoc = SourceLocation());
+ static LinkageSpecDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// \brief Return the language specified by this linkage specification.
+ LanguageIDs getLanguage() const { return Language; }
+ /// \brief Set the language specified by this linkage specification.
+ void setLanguage(LanguageIDs L) { Language = L; }
+
+ /// \brief Determines whether this linkage specification had braces in
+ /// its syntactic form.
+ bool hasBraces() const { return RBraceLoc.isValid(); }
+
+ SourceLocation getExternLoc() const { return ExternLoc; }
+ SourceLocation getRBraceLoc() const { return RBraceLoc; }
+ void setExternLoc(SourceLocation L) { ExternLoc = L; }
+ void setRBraceLoc(SourceLocation L) { RBraceLoc = L; }
+
+ SourceLocation getLocEnd() const LLVM_READONLY {
+ if (hasBraces())
+ return getRBraceLoc();
+ // No braces: get the end location of the (only) declaration in context
+ // (if present).
+ return decls_empty() ? getLocation() : decls_begin()->getLocEnd();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(ExternLoc, getLocEnd());
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const LinkageSpecDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == LinkageSpec; }
+ static DeclContext *castToDeclContext(const LinkageSpecDecl *D) {
+ return static_cast<DeclContext *>(const_cast<LinkageSpecDecl*>(D));
+ }
+ static LinkageSpecDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<LinkageSpecDecl *>(const_cast<DeclContext*>(DC));
+ }
+};
+
+/// UsingDirectiveDecl - Represents C++ using-directive. For example:
+///
+/// using namespace std;
+///
+// NB: UsingDirectiveDecl should be Decl not NamedDecl, but we provide
+// artificial names for all using-directives in order to store
+// them in DeclContext effectively.
+class UsingDirectiveDecl : public NamedDecl {
+ virtual void anchor();
+ /// \brief The location of the "using" keyword.
+ SourceLocation UsingLoc;
+
+ /// SourceLocation - Location of 'namespace' token.
+ SourceLocation NamespaceLoc;
+
+ /// \brief The nested-name-specifier that precedes the namespace.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// NominatedNamespace - Namespace nominated by using-directive.
+ NamedDecl *NominatedNamespace;
+
+ /// Enclosing context containing both using-directive and nominated
+ /// namespace.
+ DeclContext *CommonAncestor;
+
+ /// getUsingDirectiveName - Returns special DeclarationName used by
+ /// using-directives. This is only used by DeclContext for storing
+ /// UsingDirectiveDecls in its lookup structure.
+ static DeclarationName getName() {
+ return DeclarationName::getUsingDirectiveName();
+ }
+
+ UsingDirectiveDecl(DeclContext *DC, SourceLocation UsingLoc,
+ SourceLocation NamespcLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Nominated,
+ DeclContext *CommonAncestor)
+ : NamedDecl(UsingDirective, DC, IdentLoc, getName()), UsingLoc(UsingLoc),
+ NamespaceLoc(NamespcLoc), QualifierLoc(QualifierLoc),
+ NominatedNamespace(Nominated), CommonAncestor(CommonAncestor) { }
+
+public:
+ /// \brief Retrieve the nested-name-specifier that qualifies the
+ /// name of the namespace, with source-location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the
+ /// name of the namespace.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ NamedDecl *getNominatedNamespaceAsWritten() { return NominatedNamespace; }
+ const NamedDecl *getNominatedNamespaceAsWritten() const {
+ return NominatedNamespace;
+ }
+
+ /// getNominatedNamespace - Returns namespace nominated by using-directive.
+ NamespaceDecl *getNominatedNamespace();
+
+ const NamespaceDecl *getNominatedNamespace() const {
+ return const_cast<UsingDirectiveDecl*>(this)->getNominatedNamespace();
+ }
+
+ /// \brief Returns the common ancestor context of this using-directive and
+ /// its nominated namespace.
+ DeclContext *getCommonAncestor() { return CommonAncestor; }
+ const DeclContext *getCommonAncestor() const { return CommonAncestor; }
+
+ /// \brief Return the location of the "using" keyword.
+ SourceLocation getUsingLoc() const { return UsingLoc; }
+
+ // FIXME: Could omit 'Key' in name.
+ /// getNamespaceKeyLocation - Returns location of namespace keyword.
+ SourceLocation getNamespaceKeyLocation() const { return NamespaceLoc; }
+
+ /// getIdentLocation - Returns location of identifier.
+ SourceLocation getIdentLocation() const { return getLocation(); }
+
+ static UsingDirectiveDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ SourceLocation NamespaceLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Nominated,
+ DeclContext *CommonAncestor);
+ static UsingDirectiveDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(UsingLoc, getLocation());
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const UsingDirectiveDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == UsingDirective; }
+
+ // Friend for getUsingDirectiveName.
+ friend class DeclContext;
+
+ friend class ASTDeclReader;
+};
+
+/// NamespaceAliasDecl - Represents a C++ namespace alias. For example:
+///
+/// @code
+/// namespace Foo = Bar;
+/// @endcode
+class NamespaceAliasDecl : public NamedDecl {
+ virtual void anchor();
+
+ /// \brief The location of the "namespace" keyword.
+ SourceLocation NamespaceLoc;
+
+ /// IdentLoc - Location of namespace identifier. Accessed by TargetNameLoc.
+ SourceLocation IdentLoc;
+
+ /// \brief The nested-name-specifier that precedes the namespace.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// Namespace - The Decl that this alias points to. Can either be a
+ /// NamespaceDecl or a NamespaceAliasDecl.
+ NamedDecl *Namespace;
+
+ NamespaceAliasDecl(DeclContext *DC, SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc, IdentifierInfo *Alias,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc, NamedDecl *Namespace)
+ : NamedDecl(NamespaceAlias, DC, AliasLoc, Alias),
+ NamespaceLoc(NamespaceLoc), IdentLoc(IdentLoc),
+ QualifierLoc(QualifierLoc), Namespace(Namespace) { }
+
+ friend class ASTDeclReader;
+
+public:
+ /// \brief Retrieve the nested-name-specifier that qualifies the
+ /// name of the namespace, with source-location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the
+ /// name of the namespace.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ /// \brief Retrieve the namespace declaration aliased by this directive.
+ NamespaceDecl *getNamespace() {
+ if (NamespaceAliasDecl *AD = dyn_cast<NamespaceAliasDecl>(Namespace))
+ return AD->getNamespace();
+
+ return cast<NamespaceDecl>(Namespace);
+ }
+
+ const NamespaceDecl *getNamespace() const {
+ return const_cast<NamespaceAliasDecl*>(this)->getNamespace();
+ }
+
+ /// Returns the location of the alias name, i.e. 'foo' in
+ /// "namespace foo = ns::bar;".
+ SourceLocation getAliasLoc() const { return getLocation(); }
+
+ /// Returns the location of the 'namespace' keyword.
+ SourceLocation getNamespaceLoc() const { return NamespaceLoc; }
+
+ /// Returns the location of the identifier in the named namespace.
+ SourceLocation getTargetNameLoc() const { return IdentLoc; }
+
+ /// \brief Retrieve the namespace that this alias refers to, which
+ /// may either be a NamespaceDecl or a NamespaceAliasDecl.
+ NamedDecl *getAliasedNamespace() const { return Namespace; }
+
+ static NamespaceAliasDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Namespace);
+
+ static NamespaceAliasDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(NamespaceLoc, IdentLoc);
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const NamespaceAliasDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == NamespaceAlias; }
+};
+
+/// UsingShadowDecl - Represents a shadow declaration introduced into
+/// a scope by a (resolved) using declaration. For example,
+///
+/// namespace A {
+/// void foo();
+/// }
+/// namespace B {
+/// using A::foo(); // <- a UsingDecl
+/// // Also creates a UsingShadowDecl for A::foo in B
+/// }
+///
+class UsingShadowDecl : public NamedDecl {
+ virtual void anchor();
+
+ /// The referenced declaration.
+ NamedDecl *Underlying;
+
+ /// \brief The using declaration which introduced this decl or the next using
+ /// shadow declaration contained in the aforementioned using declaration.
+ NamedDecl *UsingOrNextShadow;
+ friend class UsingDecl;
+
+ UsingShadowDecl(DeclContext *DC, SourceLocation Loc, UsingDecl *Using,
+ NamedDecl *Target)
+ : NamedDecl(UsingShadow, DC, Loc, DeclarationName()),
+ Underlying(Target),
+ UsingOrNextShadow(reinterpret_cast<NamedDecl *>(Using)) {
+ if (Target) {
+ setDeclName(Target->getDeclName());
+ IdentifierNamespace = Target->getIdentifierNamespace();
+ }
+ setImplicit();
+ }
+
+public:
+ static UsingShadowDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation Loc, UsingDecl *Using,
+ NamedDecl *Target) {
+ return new (C) UsingShadowDecl(DC, Loc, Using, Target);
+ }
+
+ static UsingShadowDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// \brief Gets the underlying declaration which has been brought into the
+ /// local scope.
+ NamedDecl *getTargetDecl() const { return Underlying; }
+
+ /// \brief Sets the underlying declaration which has been brought into the
+ /// local scope.
+ void setTargetDecl(NamedDecl* ND) {
+ assert(ND && "Target decl is null!");
+ Underlying = ND;
+ IdentifierNamespace = ND->getIdentifierNamespace();
+ }
+
+ /// \brief Gets the using declaration to which this declaration is tied.
+ UsingDecl *getUsingDecl() const;
+
+ /// \brief The next using shadow declaration contained in the shadow decl
+ /// chain of the using declaration which introduced this decl.
+ UsingShadowDecl *getNextUsingShadowDecl() const {
+ return dyn_cast_or_null<UsingShadowDecl>(UsingOrNextShadow);
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const UsingShadowDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Decl::UsingShadow; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// UsingDecl - Represents a C++ using-declaration. For example:
+/// using someNameSpace::someIdentifier;
+class UsingDecl : public NamedDecl {
+ virtual void anchor();
+
+ /// \brief The source location of the "using" location itself.
+ SourceLocation UsingLocation;
+
+ /// \brief The nested-name-specifier that precedes the name.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// DNLoc - Provides source/type location info for the
+ /// declaration name embedded in the ValueDecl base class.
+ DeclarationNameLoc DNLoc;
+
+ /// \brief The first shadow declaration of the shadow decl chain associated
+ /// with this using declaration. The bool member of the pair store whether
+ /// this decl has the 'typename' keyword.
+ llvm::PointerIntPair<UsingShadowDecl *, 1, bool> FirstUsingShadow;
+
+ UsingDecl(DeclContext *DC, SourceLocation UL,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo, bool IsTypeNameArg)
+ : NamedDecl(Using, DC, NameInfo.getLoc(), NameInfo.getName()),
+ UsingLocation(UL), QualifierLoc(QualifierLoc),
+ DNLoc(NameInfo.getInfo()), FirstUsingShadow(0, IsTypeNameArg) {
+ }
+
+public:
+ /// \brief Returns the source location of the "using" keyword.
+ SourceLocation getUsingLocation() const { return UsingLocation; }
+
+ /// \brief Set the source location of the 'using' keyword.
+ void setUsingLocation(SourceLocation L) { UsingLocation = L; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the name,
+ /// with source-location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the name.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ DeclarationNameInfo getNameInfo() const {
+ return DeclarationNameInfo(getDeclName(), getLocation(), DNLoc);
+ }
+
+ /// \brief Return true if the using declaration has 'typename'.
+ bool isTypeName() const { return FirstUsingShadow.getInt(); }
+
+ /// \brief Sets whether the using declaration has 'typename'.
+ void setTypeName(bool TN) { FirstUsingShadow.setInt(TN); }
+
+ /// \brief Iterates through the using shadow declarations assosiated with
+ /// this using declaration.
+ class shadow_iterator {
+ /// \brief The current using shadow declaration.
+ UsingShadowDecl *Current;
+
+ public:
+ typedef UsingShadowDecl* value_type;
+ typedef UsingShadowDecl* reference;
+ typedef UsingShadowDecl* pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ shadow_iterator() : Current(0) { }
+ explicit shadow_iterator(UsingShadowDecl *C) : Current(C) { }
+
+ reference operator*() const { return Current; }
+ pointer operator->() const { return Current; }
+
+ shadow_iterator& operator++() {
+ Current = Current->getNextUsingShadowDecl();
+ return *this;
+ }
+
+ shadow_iterator operator++(int) {
+ shadow_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(shadow_iterator x, shadow_iterator y) {
+ return x.Current == y.Current;
+ }
+ friend bool operator!=(shadow_iterator x, shadow_iterator y) {
+ return x.Current != y.Current;
+ }
+ };
+
+ shadow_iterator shadow_begin() const {
+ return shadow_iterator(FirstUsingShadow.getPointer());
+ }
+ shadow_iterator shadow_end() const { return shadow_iterator(); }
+
+ /// \brief Return the number of shadowed declarations associated with this
+ /// using declaration.
+ unsigned shadow_size() const {
+ return std::distance(shadow_begin(), shadow_end());
+ }
+
+ void addShadowDecl(UsingShadowDecl *S);
+ void removeShadowDecl(UsingShadowDecl *S);
+
+ static UsingDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingL,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool IsTypeNameArg);
+
+ static UsingDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(UsingLocation, getNameInfo().getEndLoc());
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const UsingDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Using; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// UnresolvedUsingValueDecl - Represents a dependent using
+/// declaration which was not marked with 'typename'. Unlike
+/// non-dependent using declarations, these *only* bring through
+/// non-types; otherwise they would break two-phase lookup.
+///
+/// template <class T> class A : public Base<T> {
+/// using Base<T>::foo;
+/// };
+class UnresolvedUsingValueDecl : public ValueDecl {
+ virtual void anchor();
+
+ /// \brief The source location of the 'using' keyword
+ SourceLocation UsingLocation;
+
+ /// \brief The nested-name-specifier that precedes the name.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// DNLoc - Provides source/type location info for the
+ /// declaration name embedded in the ValueDecl base class.
+ DeclarationNameLoc DNLoc;
+
+ UnresolvedUsingValueDecl(DeclContext *DC, QualType Ty,
+ SourceLocation UsingLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo)
+ : ValueDecl(UnresolvedUsingValue, DC,
+ NameInfo.getLoc(), NameInfo.getName(), Ty),
+ UsingLocation(UsingLoc), QualifierLoc(QualifierLoc),
+ DNLoc(NameInfo.getInfo())
+ { }
+
+public:
+ /// \brief Returns the source location of the 'using' keyword.
+ SourceLocation getUsingLoc() const { return UsingLocation; }
+
+ /// \brief Set the source location of the 'using' keyword.
+ void setUsingLoc(SourceLocation L) { UsingLocation = L; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the name,
+ /// with source-location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the name.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ DeclarationNameInfo getNameInfo() const {
+ return DeclarationNameInfo(getDeclName(), getLocation(), DNLoc);
+ }
+
+ static UnresolvedUsingValueDecl *
+ Create(ASTContext &C, DeclContext *DC, SourceLocation UsingLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo);
+
+ static UnresolvedUsingValueDecl *
+ CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(UsingLocation, getNameInfo().getEndLoc());
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const UnresolvedUsingValueDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == UnresolvedUsingValue; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// UnresolvedUsingTypenameDecl - Represents a dependent using
+/// declaration which was marked with 'typename'.
+///
+/// template <class T> class A : public Base<T> {
+/// using typename Base<T>::foo;
+/// };
+///
+/// The type associated with a unresolved using typename decl is
+/// currently always a typename type.
+class UnresolvedUsingTypenameDecl : public TypeDecl {
+ virtual void anchor();
+
+ /// \brief The source location of the 'using' keyword
+ SourceLocation UsingLocation;
+
+ /// \brief The source location of the 'typename' keyword
+ SourceLocation TypenameLocation;
+
+ /// \brief The nested-name-specifier that precedes the name.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ UnresolvedUsingTypenameDecl(DeclContext *DC, SourceLocation UsingLoc,
+ SourceLocation TypenameLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TargetNameLoc,
+ IdentifierInfo *TargetName)
+ : TypeDecl(UnresolvedUsingTypename, DC, TargetNameLoc, TargetName,
+ UsingLoc),
+ TypenameLocation(TypenameLoc), QualifierLoc(QualifierLoc) { }
+
+ friend class ASTDeclReader;
+
+public:
+ /// \brief Returns the source location of the 'using' keyword.
+ SourceLocation getUsingLoc() const { return getLocStart(); }
+
+ /// \brief Returns the source location of the 'typename' keyword.
+ SourceLocation getTypenameLoc() const { return TypenameLocation; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the name,
+ /// with source-location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the name.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ static UnresolvedUsingTypenameDecl *
+ Create(ASTContext &C, DeclContext *DC, SourceLocation UsingLoc,
+ SourceLocation TypenameLoc, NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TargetNameLoc, DeclarationName TargetName);
+
+ static UnresolvedUsingTypenameDecl *
+ CreateDeserialized(ASTContext &C, unsigned ID);
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const UnresolvedUsingTypenameDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == UnresolvedUsingTypename; }
+};
+
+/// StaticAssertDecl - Represents a C++0x static_assert declaration.
+class StaticAssertDecl : public Decl {
+ virtual void anchor();
+ Expr *AssertExpr;
+ StringLiteral *Message;
+ SourceLocation RParenLoc;
+
+ StaticAssertDecl(DeclContext *DC, SourceLocation StaticAssertLoc,
+ Expr *assertexpr, StringLiteral *message,
+ SourceLocation RParenLoc)
+ : Decl(StaticAssert, DC, StaticAssertLoc), AssertExpr(assertexpr),
+ Message(message), RParenLoc(RParenLoc) { }
+
+public:
+ static StaticAssertDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StaticAssertLoc,
+ Expr *AssertExpr, StringLiteral *Message,
+ SourceLocation RParenLoc);
+ static StaticAssertDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ Expr *getAssertExpr() { return AssertExpr; }
+ const Expr *getAssertExpr() const { return AssertExpr; }
+
+ StringLiteral *getMessage() { return Message; }
+ const StringLiteral *getMessage() const { return Message; }
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getLocation(), getRParenLoc());
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(StaticAssertDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == StaticAssert; }
+
+ friend class ASTDeclReader;
+};
+
+/// Insertion operator for diagnostics. This allows sending AccessSpecifier's
+/// into a diagnostic with <<.
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ AccessSpecifier AS);
+
+const PartialDiagnostic &operator<<(const PartialDiagnostic &DB,
+ AccessSpecifier AS);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h b/contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h
new file mode 100644
index 0000000..c5f2aa0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclContextInternals.h
@@ -0,0 +1,223 @@
+//===-- DeclContextInternals.h - DeclContext Representation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the data structures used in the implementation
+// of DeclContext.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_DECLCONTEXTINTERNALS_H
+#define LLVM_CLANG_AST_DECLCONTEXTINTERNALS_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include <algorithm>
+
+namespace clang {
+
+class DependentDiagnostic;
+
+/// StoredDeclsList - This is an array of decls optimized a common case of only
+/// containing one entry.
+struct StoredDeclsList {
+
+ /// DeclsTy - When in vector form, this is what the Data pointer points to.
+ typedef SmallVector<NamedDecl *, 4> DeclsTy;
+
+ /// \brief The stored data, which will be either a pointer to a NamedDecl,
+ /// or a pointer to a vector.
+ llvm::PointerUnion<NamedDecl *, DeclsTy *> Data;
+
+public:
+ StoredDeclsList() {}
+
+ StoredDeclsList(const StoredDeclsList &RHS) : Data(RHS.Data) {
+ if (DeclsTy *RHSVec = RHS.getAsVector())
+ Data = new DeclsTy(*RHSVec);
+ }
+
+ ~StoredDeclsList() {
+ // If this is a vector-form, free the vector.
+ if (DeclsTy *Vector = getAsVector())
+ delete Vector;
+ }
+
+ StoredDeclsList &operator=(const StoredDeclsList &RHS) {
+ if (DeclsTy *Vector = getAsVector())
+ delete Vector;
+ Data = RHS.Data;
+ if (DeclsTy *RHSVec = RHS.getAsVector())
+ Data = new DeclsTy(*RHSVec);
+ return *this;
+ }
+
+ bool isNull() const { return Data.isNull(); }
+
+ NamedDecl *getAsDecl() const {
+ return Data.dyn_cast<NamedDecl *>();
+ }
+
+ DeclsTy *getAsVector() const {
+ return Data.dyn_cast<DeclsTy *>();
+ }
+
+ void setOnlyValue(NamedDecl *ND) {
+ assert(!getAsVector() && "Not inline");
+ Data = ND;
+ // Make sure that Data is a plain NamedDecl* so we can use its address
+ // at getLookupResult.
+ assert(*(NamedDecl **)&Data == ND &&
+ "PointerUnion mangles the NamedDecl pointer!");
+ }
+
+ void remove(NamedDecl *D) {
+ assert(!isNull() && "removing from empty list");
+ if (NamedDecl *Singleton = getAsDecl()) {
+ assert(Singleton == D && "list is different singleton");
+ (void)Singleton;
+ Data = (NamedDecl *)0;
+ return;
+ }
+
+ DeclsTy &Vec = *getAsVector();
+ DeclsTy::iterator I = std::find(Vec.begin(), Vec.end(), D);
+ assert(I != Vec.end() && "list does not contain decl");
+ Vec.erase(I);
+
+ assert(std::find(Vec.begin(), Vec.end(), D)
+ == Vec.end() && "list still contains decl");
+ }
+
+ /// getLookupResult - Return an array of all the decls that this list
+ /// represents.
+ DeclContext::lookup_result getLookupResult() {
+ if (isNull())
+ return DeclContext::lookup_result(DeclContext::lookup_iterator(0),
+ DeclContext::lookup_iterator(0));
+
+ // If we have a single NamedDecl, return it.
+ if (getAsDecl()) {
+ assert(!isNull() && "Empty list isn't allowed");
+
+ // Data is a raw pointer to a NamedDecl*, return it.
+ void *Ptr = &Data;
+ return DeclContext::lookup_result((NamedDecl**)Ptr, (NamedDecl**)Ptr+1);
+ }
+
+ assert(getAsVector() && "Must have a vector at this point");
+ DeclsTy &Vector = *getAsVector();
+
+ // Otherwise, we have a range result.
+ return DeclContext::lookup_result(&Vector[0], &Vector[0]+Vector.size());
+ }
+
+ /// HandleRedeclaration - If this is a redeclaration of an existing decl,
+ /// replace the old one with D and return true. Otherwise return false.
+ bool HandleRedeclaration(NamedDecl *D) {
+ // Most decls only have one entry in their list, special case it.
+ if (NamedDecl *OldD = getAsDecl()) {
+ if (!D->declarationReplaces(OldD))
+ return false;
+ setOnlyValue(D);
+ return true;
+ }
+
+ // Determine if this declaration is actually a redeclaration.
+ DeclsTy &Vec = *getAsVector();
+ for (DeclsTy::iterator OD = Vec.begin(), ODEnd = Vec.end();
+ OD != ODEnd; ++OD) {
+ NamedDecl *OldD = *OD;
+ if (D->declarationReplaces(OldD)) {
+ *OD = D;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ /// AddSubsequentDecl - This is called on the second and later decl when it is
+ /// not a redeclaration to merge it into the appropriate place in our list.
+ ///
+ void AddSubsequentDecl(NamedDecl *D) {
+ // If this is the second decl added to the list, convert this to vector
+ // form.
+ if (NamedDecl *OldD = getAsDecl()) {
+ DeclsTy *VT = new DeclsTy();
+ VT->push_back(OldD);
+ Data = VT;
+ }
+
+ DeclsTy &Vec = *getAsVector();
+
+ // Using directives end up in a special entry which contains only
+ // other using directives, so all this logic is wasted for them.
+ // But avoiding the logic wastes time in the far-more-common case
+ // that we're *not* adding a new using directive.
+
+ // Tag declarations always go at the end of the list so that an
+ // iterator which points at the first tag will start a span of
+ // decls that only contains tags.
+ if (D->hasTagIdentifierNamespace())
+ Vec.push_back(D);
+
+ // Resolved using declarations go at the front of the list so that
+ // they won't show up in other lookup results. Unresolved using
+ // declarations (which are always in IDNS_Using | IDNS_Ordinary)
+ // follow that so that the using declarations will be contiguous.
+ else if (D->getIdentifierNamespace() & Decl::IDNS_Using) {
+ DeclsTy::iterator I = Vec.begin();
+ if (D->getIdentifierNamespace() != Decl::IDNS_Using) {
+ while (I != Vec.end() &&
+ (*I)->getIdentifierNamespace() == Decl::IDNS_Using)
+ ++I;
+ }
+ Vec.insert(I, D);
+
+ // All other declarations go at the end of the list, but before any
+ // tag declarations. But we can be clever about tag declarations
+ // because there can only ever be one in a scope.
+ } else if (Vec.back()->hasTagIdentifierNamespace()) {
+ NamedDecl *TagD = Vec.back();
+ Vec.back() = D;
+ Vec.push_back(TagD);
+ } else
+ Vec.push_back(D);
+ }
+};
+
+class StoredDeclsMap
+ : public llvm::DenseMap<DeclarationName, StoredDeclsList> {
+
+public:
+ static void DestroyAll(StoredDeclsMap *Map, bool Dependent);
+
+private:
+ friend class ASTContext; // walks the chain deleting these
+ friend class DeclContext;
+ llvm::PointerIntPair<StoredDeclsMap*, 1> Previous;
+};
+
+class DependentStoredDeclsMap : public StoredDeclsMap {
+public:
+ DependentStoredDeclsMap() : FirstDiagnostic(0) {}
+
+private:
+ friend class DependentDiagnostic;
+ friend class DeclContext; // iterates over diagnostics
+
+ DependentDiagnostic *FirstDiagnostic;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
new file mode 100644
index 0000000..ba1eb8d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclFriend.h
@@ -0,0 +1,198 @@
+//===-- DeclFriend.h - Classes for C++ friend declarations -*- C++ -*------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the section of the AST representing C++ friend
+// declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLFRIEND_H
+#define LLVM_CLANG_AST_DECLFRIEND_H
+
+#include "clang/AST/DeclCXX.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+
+/// FriendDecl - Represents the declaration of a friend entity,
+/// which can be a function, a type, or a templated function or type.
+// For example:
+///
+/// @code
+/// template <typename T> class A {
+/// friend int foo(T);
+/// friend class B;
+/// friend T; // only in C++0x
+/// template <typename U> friend class C;
+/// template <typename U> friend A& operator+=(A&, const U&) { ... }
+/// };
+/// @endcode
+///
+/// The semantic context of a friend decl is its declaring class.
+class FriendDecl : public Decl {
+ virtual void anchor();
+public:
+ typedef llvm::PointerUnion<NamedDecl*,TypeSourceInfo*> FriendUnion;
+
+private:
+ // The declaration that's a friend of this class.
+ FriendUnion Friend;
+
+ // A pointer to the next friend in the sequence.
+ LazyDeclPtr NextFriend;
+
+ // Location of the 'friend' specifier.
+ SourceLocation FriendLoc;
+
+ /// True if this 'friend' declaration is unsupported. Eventually we
+ /// will support every possible friend declaration, but for now we
+ /// silently ignore some and set this flag to authorize all access.
+ bool UnsupportedFriend;
+
+ friend class CXXRecordDecl::friend_iterator;
+ friend class CXXRecordDecl;
+
+ FriendDecl(DeclContext *DC, SourceLocation L, FriendUnion Friend,
+ SourceLocation FriendL)
+ : Decl(Decl::Friend, DC, L),
+ Friend(Friend),
+ NextFriend(),
+ FriendLoc(FriendL),
+ UnsupportedFriend(false) {
+ }
+
+ explicit FriendDecl(EmptyShell Empty)
+ : Decl(Decl::Friend, Empty), NextFriend() { }
+
+ FriendDecl *getNextFriend() {
+ return cast_or_null<FriendDecl>(
+ NextFriend.get(getASTContext().getExternalSource()));
+ }
+
+public:
+ static FriendDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, FriendUnion Friend_,
+ SourceLocation FriendL);
+ static FriendDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// If this friend declaration names an (untemplated but possibly
+ /// dependent) type, return the type; otherwise return null. This
+ /// is used for elaborated-type-specifiers and, in C++0x, for
+ /// arbitrary friend type declarations.
+ TypeSourceInfo *getFriendType() const {
+ return Friend.dyn_cast<TypeSourceInfo*>();
+ }
+
+ /// If this friend declaration doesn't name a type, return the inner
+ /// declaration.
+ NamedDecl *getFriendDecl() const {
+ return Friend.dyn_cast<NamedDecl*>();
+ }
+
+ /// Retrieves the location of the 'friend' keyword.
+ SourceLocation getFriendLoc() const {
+ return FriendLoc;
+ }
+
+ /// Retrieves the source range for the friend declaration.
+ SourceRange getSourceRange() const LLVM_READONLY {
+ /* FIXME: consider the case of templates wrt start of range. */
+ if (NamedDecl *ND = getFriendDecl())
+ return SourceRange(getFriendLoc(), ND->getLocEnd());
+ else if (TypeSourceInfo *TInfo = getFriendType())
+ return SourceRange(getFriendLoc(), TInfo->getTypeLoc().getEndLoc());
+ else
+ return SourceRange(getFriendLoc(), getLocation());
+ }
+
+ /// Determines if this friend kind is unsupported.
+ bool isUnsupportedFriend() const {
+ return UnsupportedFriend;
+ }
+ void setUnsupportedFriend(bool Unsupported) {
+ UnsupportedFriend = Unsupported;
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const FriendDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == Decl::Friend; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// An iterator over the friend declarations of a class.
+class CXXRecordDecl::friend_iterator {
+ FriendDecl *Ptr;
+
+ friend class CXXRecordDecl;
+ explicit friend_iterator(FriendDecl *Ptr) : Ptr(Ptr) {}
+public:
+ friend_iterator() {}
+
+ typedef FriendDecl *value_type;
+ typedef FriendDecl *reference;
+ typedef FriendDecl *pointer;
+ typedef int difference_type;
+ typedef std::forward_iterator_tag iterator_category;
+
+ reference operator*() const { return Ptr; }
+
+ friend_iterator &operator++() {
+ assert(Ptr && "attempt to increment past end of friend list");
+ Ptr = Ptr->getNextFriend();
+ return *this;
+ }
+
+ friend_iterator operator++(int) {
+ friend_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ bool operator==(const friend_iterator &Other) const {
+ return Ptr == Other.Ptr;
+ }
+
+ bool operator!=(const friend_iterator &Other) const {
+ return Ptr != Other.Ptr;
+ }
+
+ friend_iterator &operator+=(difference_type N) {
+ assert(N >= 0 && "cannot rewind a CXXRecordDecl::friend_iterator");
+ while (N--)
+ ++*this;
+ return *this;
+ }
+
+ friend_iterator operator+(difference_type N) const {
+ friend_iterator tmp = *this;
+ tmp += N;
+ return tmp;
+ }
+};
+
+inline CXXRecordDecl::friend_iterator CXXRecordDecl::friend_begin() const {
+ return friend_iterator(data().FirstFriend);
+}
+
+inline CXXRecordDecl::friend_iterator CXXRecordDecl::friend_end() const {
+ return friend_iterator(0);
+}
+
+inline void CXXRecordDecl::pushFriendDecl(FriendDecl *FD) {
+ assert(FD->NextFriend == 0 && "friend already has next friend?");
+ FD->NextFriend = data().FirstFriend;
+ data().FirstFriend = FD;
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h b/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h
new file mode 100644
index 0000000..63cdac5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclGroup.h
@@ -0,0 +1,151 @@
+//===--- DeclGroup.h - Classes for representing groups of Decls -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeclGroup, DeclGroupRef, and OwningDeclGroup classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLGROUP_H
+#define LLVM_CLANG_AST_DECLGROUP_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace clang {
+
+class ASTContext;
+class Decl;
+class DeclGroup;
+class DeclGroupIterator;
+
+class DeclGroup {
+ // FIXME: Include a TypeSpecifier object.
+ unsigned NumDecls;
+
+private:
+ DeclGroup() : NumDecls(0) {}
+ DeclGroup(unsigned numdecls, Decl** decls);
+
+public:
+ static DeclGroup *Create(ASTContext &C, Decl **Decls, unsigned NumDecls);
+
+ unsigned size() const { return NumDecls; }
+
+ Decl*& operator[](unsigned i) {
+ assert (i < NumDecls && "Out-of-bounds access.");
+ return ((Decl**) (this+1))[i];
+ }
+
+ Decl* const& operator[](unsigned i) const {
+ assert (i < NumDecls && "Out-of-bounds access.");
+ return ((Decl* const*) (this+1))[i];
+ }
+};
+
+class DeclGroupRef {
+ // Note this is not a PointerIntPair because we need the address of the
+ // non-group case to be valid as a Decl** for iteration.
+ enum Kind { SingleDeclKind=0x0, DeclGroupKind=0x1, Mask=0x1 };
+ Decl* D;
+
+ Kind getKind() const {
+ return (Kind) (reinterpret_cast<uintptr_t>(D) & Mask);
+ }
+
+public:
+ DeclGroupRef() : D(0) {}
+
+ explicit DeclGroupRef(Decl* d) : D(d) {}
+ explicit DeclGroupRef(DeclGroup* dg)
+ : D((Decl*) (reinterpret_cast<uintptr_t>(dg) | DeclGroupKind)) {}
+
+ static DeclGroupRef Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
+ if (NumDecls == 0)
+ return DeclGroupRef();
+ if (NumDecls == 1)
+ return DeclGroupRef(Decls[0]);
+ return DeclGroupRef(DeclGroup::Create(C, Decls, NumDecls));
+ }
+
+ typedef Decl** iterator;
+ typedef Decl* const * const_iterator;
+
+ bool isNull() const { return D == 0; }
+ bool isSingleDecl() const { return getKind() == SingleDeclKind; }
+ bool isDeclGroup() const { return getKind() == DeclGroupKind; }
+
+ Decl *getSingleDecl() {
+ assert(isSingleDecl() && "Isn't a declgroup");
+ return D;
+ }
+ const Decl *getSingleDecl() const {
+ return const_cast<DeclGroupRef*>(this)->getSingleDecl();
+ }
+
+ DeclGroup &getDeclGroup() {
+ assert(isDeclGroup() && "Isn't a declgroup");
+ return *((DeclGroup*)(reinterpret_cast<uintptr_t>(D) & ~Mask));
+ }
+ const DeclGroup &getDeclGroup() const {
+ return const_cast<DeclGroupRef*>(this)->getDeclGroup();
+ }
+
+ iterator begin() {
+ if (isSingleDecl())
+ return D ? &D : 0;
+ return &getDeclGroup()[0];
+ }
+
+ iterator end() {
+ if (isSingleDecl())
+ return D ? &D+1 : 0;
+ DeclGroup &G = getDeclGroup();
+ return &G[0] + G.size();
+ }
+
+ const_iterator begin() const {
+ if (isSingleDecl())
+ return D ? &D : 0;
+ return &getDeclGroup()[0];
+ }
+
+ const_iterator end() const {
+ if (isSingleDecl())
+ return D ? &D+1 : 0;
+ const DeclGroup &G = getDeclGroup();
+ return &G[0] + G.size();
+ }
+
+ void *getAsOpaquePtr() const { return D; }
+ static DeclGroupRef getFromOpaquePtr(void *Ptr) {
+ DeclGroupRef X;
+ X.D = static_cast<Decl*>(Ptr);
+ return X;
+ }
+};
+
+} // end clang namespace
+
+namespace llvm {
+ // DeclGroupRef is "like a pointer", implement PointerLikeTypeTraits.
+ template <typename T>
+ class PointerLikeTypeTraits;
+ template <>
+ class PointerLikeTypeTraits<clang::DeclGroupRef> {
+ public:
+ static inline void *getAsVoidPointer(clang::DeclGroupRef P) {
+ return P.getAsOpaquePtr();
+ }
+ static inline clang::DeclGroupRef getFromVoidPointer(void *P) {
+ return clang::DeclGroupRef::getFromOpaquePtr(P);
+ }
+ enum { NumLowBitsAvailable = 0 };
+ };
+}
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h b/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h
new file mode 100644
index 0000000..66d190f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclLookups.h
@@ -0,0 +1,84 @@
+//===-- DeclLookups.h - Low-level interface to all names in a DC-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines DeclContext::all_lookups_iterator.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLLOOKUPS_H
+#define LLVM_CLANG_AST_DECLLOOKUPS_H
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclarationName.h"
+
+namespace clang {
+
+/// all_lookups_iterator - An iterator that provides a view over the results
+/// of looking up every possible name.
+class DeclContext::all_lookups_iterator {
+ StoredDeclsMap::iterator It, End;
+public:
+ typedef lookup_result value_type;
+ typedef lookup_result reference;
+ typedef lookup_result pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ all_lookups_iterator() {}
+ all_lookups_iterator(StoredDeclsMap::iterator It,
+ StoredDeclsMap::iterator End)
+ : It(It), End(End) {}
+
+ reference operator*() const { return It->second.getLookupResult(); }
+ pointer operator->() const { return It->second.getLookupResult(); }
+
+ all_lookups_iterator& operator++() {
+ // Filter out using directives. They don't belong as results from name
+ // lookup anyways, except as an implementation detail. Users of the API
+ // should not expect to get them (or worse, rely on it).
+ do {
+ ++It;
+ } while (It != End &&
+ It->first == DeclarationName::getUsingDirectiveName());
+
+ return *this;
+ }
+
+ all_lookups_iterator operator++(int) {
+ all_lookups_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(all_lookups_iterator x, all_lookups_iterator y) {
+ return x.It == y.It;
+ }
+ friend bool operator!=(all_lookups_iterator x, all_lookups_iterator y) {
+ return x.It != y.It;
+ }
+};
+
+DeclContext::all_lookups_iterator DeclContext::lookups_begin() const {
+ DeclContext *Primary = const_cast<DeclContext*>(this)->getPrimaryContext();
+ if (StoredDeclsMap *Map = Primary->buildLookup())
+ return all_lookups_iterator(Map->begin(), Map->end());
+ return all_lookups_iterator();
+}
+
+DeclContext::all_lookups_iterator DeclContext::lookups_end() const {
+ DeclContext *Primary = const_cast<DeclContext*>(this)->getPrimaryContext();
+ if (StoredDeclsMap *Map = Primary->buildLookup())
+ return all_lookups_iterator(Map->end(), Map->end());
+ return all_lookups_iterator();
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
new file mode 100644
index 0000000..4ae073e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclObjC.h
@@ -0,0 +1,1988 @@
+//===--- DeclObjC.h - Classes for representing declarations -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeclObjC interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLOBJC_H
+#define LLVM_CLANG_AST_DECLOBJC_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/SelectorLocationsKind.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+class Expr;
+class Stmt;
+class FunctionDecl;
+class RecordDecl;
+class ObjCIvarDecl;
+class ObjCMethodDecl;
+class ObjCProtocolDecl;
+class ObjCCategoryDecl;
+class ObjCPropertyDecl;
+class ObjCPropertyImplDecl;
+class CXXCtorInitializer;
+
+class ObjCListBase {
+ void operator=(const ObjCListBase &); // DO NOT IMPLEMENT
+ ObjCListBase(const ObjCListBase&); // DO NOT IMPLEMENT
+protected:
+ /// List is an array of pointers to objects that are not owned by this object.
+ void **List;
+ unsigned NumElts;
+
+public:
+ ObjCListBase() : List(0), NumElts(0) {}
+ unsigned size() const { return NumElts; }
+ bool empty() const { return NumElts == 0; }
+
+protected:
+ void set(void *const* InList, unsigned Elts, ASTContext &Ctx);
+};
+
+
+/// ObjCList - This is a simple template class used to hold various lists of
+/// decls etc, which is heavily used by the ObjC front-end. This only use case
+/// this supports is setting the list all at once and then reading elements out
+/// of it.
+template <typename T>
+class ObjCList : public ObjCListBase {
+public:
+ void set(T* const* InList, unsigned Elts, ASTContext &Ctx) {
+ ObjCListBase::set(reinterpret_cast<void*const*>(InList), Elts, Ctx);
+ }
+
+ typedef T* const * iterator;
+ iterator begin() const { return (iterator)List; }
+ iterator end() const { return (iterator)List+NumElts; }
+
+ T* operator[](unsigned Idx) const {
+ assert(Idx < NumElts && "Invalid access");
+ return (T*)List[Idx];
+ }
+};
+
+/// \brief A list of Objective-C protocols, along with the source
+/// locations at which they were referenced.
+class ObjCProtocolList : public ObjCList<ObjCProtocolDecl> {
+ SourceLocation *Locations;
+
+ using ObjCList<ObjCProtocolDecl>::set;
+
+public:
+ ObjCProtocolList() : ObjCList<ObjCProtocolDecl>(), Locations(0) { }
+
+ typedef const SourceLocation *loc_iterator;
+ loc_iterator loc_begin() const { return Locations; }
+ loc_iterator loc_end() const { return Locations + size(); }
+
+ void set(ObjCProtocolDecl* const* InList, unsigned Elts,
+ const SourceLocation *Locs, ASTContext &Ctx);
+};
+
+
+/// ObjCMethodDecl - Represents an instance or class method declaration.
+/// ObjC methods can be declared within 4 contexts: class interfaces,
+/// categories, protocols, and class implementations. While C++ member
+/// functions leverage C syntax, Objective-C method syntax is modeled after
+/// Smalltalk (using colons to specify argument types/expressions).
+/// Here are some brief examples:
+///
+/// Setter/getter instance methods:
+/// - (void)setMenu:(NSMenu *)menu;
+/// - (NSMenu *)menu;
+///
+/// Instance method that takes 2 NSView arguments:
+/// - (void)replaceSubview:(NSView *)oldView with:(NSView *)newView;
+///
+/// Getter class method:
+/// + (NSMenu *)defaultMenu;
+///
+/// A selector represents a unique name for a method. The selector names for
+/// the above methods are setMenu:, menu, replaceSubview:with:, and defaultMenu.
+///
+class ObjCMethodDecl : public NamedDecl, public DeclContext {
+public:
+ enum ImplementationControl { None, Required, Optional };
+private:
+ // The conventional meaning of this method; an ObjCMethodFamily.
+ // This is not serialized; instead, it is computed on demand and
+ // cached.
+ mutable unsigned Family : ObjCMethodFamilyBitWidth;
+
+ /// instance (true) or class (false) method.
+ unsigned IsInstance : 1;
+ unsigned IsVariadic : 1;
+
+ // Synthesized declaration method for a property setter/getter
+ unsigned IsSynthesized : 1;
+
+ // Method has a definition.
+ unsigned IsDefined : 1;
+
+ /// \brief Method redeclaration in the same interface.
+ unsigned IsRedeclaration : 1;
+
+ /// \brief Is redeclared in the same interface.
+ mutable unsigned HasRedeclaration : 1;
+
+ // NOTE: VC++ treats enums as signed, avoid using ImplementationControl enum
+ /// @required/@optional
+ unsigned DeclImplementation : 2;
+
+ // NOTE: VC++ treats enums as signed, avoid using the ObjCDeclQualifier enum
+ /// in, inout, etc.
+ unsigned objcDeclQualifier : 6;
+
+ /// \brief Indicates whether this method has a related result type.
+ unsigned RelatedResultType : 1;
+
+ /// \brief Whether the locations of the selector identifiers are in a
+ /// "standard" position, a enum SelectorLocationsKind.
+ unsigned SelLocsKind : 2;
+
+ // Result type of this method.
+ QualType MethodDeclType;
+
+ // Type source information for the result type.
+ TypeSourceInfo *ResultTInfo;
+
+ /// \brief Array of ParmVarDecls for the formal parameters of this method
+ /// and optionally followed by selector locations.
+ void *ParamsAndSelLocs;
+ unsigned NumParams;
+
+ /// List of attributes for this method declaration.
+ SourceLocation EndLoc; // the location of the ';' or '}'.
+
+ // The following are only used for method definitions, null otherwise.
+ // FIXME: space savings opportunity, consider a sub-class.
+ Stmt *Body;
+
+ /// SelfDecl - Decl for the implicit self parameter. This is lazily
+ /// constructed by createImplicitParams.
+ ImplicitParamDecl *SelfDecl;
+ /// CmdDecl - Decl for the implicit _cmd parameter. This is lazily
+ /// constructed by createImplicitParams.
+ ImplicitParamDecl *CmdDecl;
+
+ SelectorLocationsKind getSelLocsKind() const {
+ return (SelectorLocationsKind)SelLocsKind;
+ }
+ bool hasStandardSelLocs() const {
+ return getSelLocsKind() != SelLoc_NonStandard;
+ }
+
+ /// \brief Get a pointer to the stored selector identifiers locations array.
+ /// No locations will be stored if HasStandardSelLocs is true.
+ SourceLocation *getStoredSelLocs() {
+ return reinterpret_cast<SourceLocation*>(getParams() + NumParams);
+ }
+ const SourceLocation *getStoredSelLocs() const {
+ return reinterpret_cast<const SourceLocation*>(getParams() + NumParams);
+ }
+
+ /// \brief Get a pointer to the stored selector identifiers locations array.
+ /// No locations will be stored if HasStandardSelLocs is true.
+ ParmVarDecl **getParams() {
+ return reinterpret_cast<ParmVarDecl **>(ParamsAndSelLocs);
+ }
+ const ParmVarDecl *const *getParams() const {
+ return reinterpret_cast<const ParmVarDecl *const *>(ParamsAndSelLocs);
+ }
+
+ /// \brief Get the number of stored selector identifiers locations.
+ /// No locations will be stored if HasStandardSelLocs is true.
+ unsigned getNumStoredSelLocs() const {
+ if (hasStandardSelLocs())
+ return 0;
+ return getNumSelectorLocs();
+ }
+
+ void setParamsAndSelLocs(ASTContext &C,
+ ArrayRef<ParmVarDecl*> Params,
+ ArrayRef<SourceLocation> SelLocs);
+
+ ObjCMethodDecl(SourceLocation beginLoc, SourceLocation endLoc,
+ Selector SelInfo, QualType T,
+ TypeSourceInfo *ResultTInfo,
+ DeclContext *contextDecl,
+ bool isInstance = true,
+ bool isVariadic = false,
+ bool isSynthesized = false,
+ bool isImplicitlyDeclared = false,
+ bool isDefined = false,
+ ImplementationControl impControl = None,
+ bool HasRelatedResultType = false)
+ : NamedDecl(ObjCMethod, contextDecl, beginLoc, SelInfo),
+ DeclContext(ObjCMethod), Family(InvalidObjCMethodFamily),
+ IsInstance(isInstance), IsVariadic(isVariadic),
+ IsSynthesized(isSynthesized),
+ IsDefined(isDefined), IsRedeclaration(0), HasRedeclaration(0),
+ DeclImplementation(impControl), objcDeclQualifier(OBJC_TQ_None),
+ RelatedResultType(HasRelatedResultType),
+ SelLocsKind(SelLoc_StandardNoSpace),
+ MethodDeclType(T), ResultTInfo(ResultTInfo),
+ ParamsAndSelLocs(0), NumParams(0),
+ EndLoc(endLoc), Body(0), SelfDecl(0), CmdDecl(0) {
+ setImplicit(isImplicitlyDeclared);
+ }
+
+ /// \brief A definition will return its interface declaration.
+ /// An interface declaration will return its definition.
+ /// Otherwise it will return itself.
+ virtual ObjCMethodDecl *getNextRedeclaration();
+
+public:
+ static ObjCMethodDecl *Create(ASTContext &C,
+ SourceLocation beginLoc,
+ SourceLocation endLoc,
+ Selector SelInfo,
+ QualType T,
+ TypeSourceInfo *ResultTInfo,
+ DeclContext *contextDecl,
+ bool isInstance = true,
+ bool isVariadic = false,
+ bool isSynthesized = false,
+ bool isImplicitlyDeclared = false,
+ bool isDefined = false,
+ ImplementationControl impControl = None,
+ bool HasRelatedResultType = false);
+
+ static ObjCMethodDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual ObjCMethodDecl *getCanonicalDecl();
+ const ObjCMethodDecl *getCanonicalDecl() const {
+ return const_cast<ObjCMethodDecl*>(this)->getCanonicalDecl();
+ }
+
+ ObjCDeclQualifier getObjCDeclQualifier() const {
+ return ObjCDeclQualifier(objcDeclQualifier);
+ }
+ void setObjCDeclQualifier(ObjCDeclQualifier QV) { objcDeclQualifier = QV; }
+
+ /// \brief Determine whether this method has a result type that is related
+ /// to the message receiver's type.
+ bool hasRelatedResultType() const { return RelatedResultType; }
+
+ /// \brief Note whether this method has a related result type.
+ void SetRelatedResultType(bool RRT = true) { RelatedResultType = RRT; }
+
+ /// \brief True if this is a method redeclaration in the same interface.
+ bool isRedeclaration() const { return IsRedeclaration; }
+ void setAsRedeclaration(const ObjCMethodDecl *PrevMethod);
+
+ // Location information, modeled after the Stmt API.
+ SourceLocation getLocStart() const LLVM_READONLY { return getLocation(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; }
+ void setEndLoc(SourceLocation Loc) { EndLoc = Loc; }
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getLocation(), EndLoc);
+ }
+
+ SourceLocation getSelectorStartLoc() const {
+ if (isImplicit())
+ return getLocStart();
+ return getSelectorLoc(0);
+ }
+ SourceLocation getSelectorLoc(unsigned Index) const {
+ assert(Index < getNumSelectorLocs() && "Index out of range!");
+ if (hasStandardSelLocs())
+ return getStandardSelectorLoc(Index, getSelector(),
+ getSelLocsKind() == SelLoc_StandardWithSpace,
+ llvm::makeArrayRef(const_cast<ParmVarDecl**>(getParams()),
+ NumParams),
+ EndLoc);
+ return getStoredSelLocs()[Index];
+ }
+
+ void getSelectorLocs(SmallVectorImpl<SourceLocation> &SelLocs) const;
+
+ unsigned getNumSelectorLocs() const {
+ if (isImplicit())
+ return 0;
+ Selector Sel = getSelector();
+ if (Sel.isUnarySelector())
+ return 1;
+ return Sel.getNumArgs();
+ }
+
+ ObjCInterfaceDecl *getClassInterface();
+ const ObjCInterfaceDecl *getClassInterface() const {
+ return const_cast<ObjCMethodDecl*>(this)->getClassInterface();
+ }
+
+ Selector getSelector() const { return getDeclName().getObjCSelector(); }
+
+ QualType getResultType() const { return MethodDeclType; }
+ void setResultType(QualType T) { MethodDeclType = T; }
+
+ /// \brief Determine the type of an expression that sends a message to this
+ /// function.
+ QualType getSendResultType() const {
+ return getResultType().getNonLValueExprType(getASTContext());
+ }
+
+ TypeSourceInfo *getResultTypeSourceInfo() const { return ResultTInfo; }
+ void setResultTypeSourceInfo(TypeSourceInfo *TInfo) { ResultTInfo = TInfo; }
+
+ // Iterator access to formal parameters.
+ unsigned param_size() const { return NumParams; }
+ typedef const ParmVarDecl *const *param_const_iterator;
+ typedef ParmVarDecl *const *param_iterator;
+ param_const_iterator param_begin() const { return getParams(); }
+ param_const_iterator param_end() const { return getParams() + NumParams; }
+ param_iterator param_begin() { return getParams(); }
+ param_iterator param_end() { return getParams() + NumParams; }
+ // This method returns and of the parameters which are part of the selector
+ // name mangling requirements.
+ param_const_iterator sel_param_end() const {
+ return param_begin() + getSelector().getNumArgs();
+ }
+
+ /// \brief Sets the method's parameters and selector source locations.
+ /// If the method is implicit (not coming from source) \arg SelLocs is
+ /// ignored.
+ void setMethodParams(ASTContext &C,
+ ArrayRef<ParmVarDecl*> Params,
+ ArrayRef<SourceLocation> SelLocs =
+ ArrayRef<SourceLocation>());
+
+ // Iterator access to parameter types.
+ typedef std::const_mem_fun_t<QualType, ParmVarDecl> deref_fun;
+ typedef llvm::mapped_iterator<param_const_iterator, deref_fun>
+ arg_type_iterator;
+
+ arg_type_iterator arg_type_begin() const {
+ return llvm::map_iterator(param_begin(), deref_fun(&ParmVarDecl::getType));
+ }
+ arg_type_iterator arg_type_end() const {
+ return llvm::map_iterator(param_end(), deref_fun(&ParmVarDecl::getType));
+ }
+
+ /// createImplicitParams - Used to lazily create the self and cmd
+ /// implict parameters. This must be called prior to using getSelfDecl()
+ /// or getCmdDecl(). The call is ignored if the implicit paramters
+ /// have already been created.
+ void createImplicitParams(ASTContext &Context, const ObjCInterfaceDecl *ID);
+
+ ImplicitParamDecl * getSelfDecl() const { return SelfDecl; }
+ void setSelfDecl(ImplicitParamDecl *SD) { SelfDecl = SD; }
+ ImplicitParamDecl * getCmdDecl() const { return CmdDecl; }
+ void setCmdDecl(ImplicitParamDecl *CD) { CmdDecl = CD; }
+
+ /// Determines the family of this method.
+ ObjCMethodFamily getMethodFamily() const;
+
+ bool isInstanceMethod() const { return IsInstance; }
+ void setInstanceMethod(bool isInst) { IsInstance = isInst; }
+ bool isVariadic() const { return IsVariadic; }
+ void setVariadic(bool isVar) { IsVariadic = isVar; }
+
+ bool isClassMethod() const { return !IsInstance; }
+
+ bool isSynthesized() const { return IsSynthesized; }
+ void setSynthesized(bool isSynth) { IsSynthesized = isSynth; }
+
+ bool isDefined() const { return IsDefined; }
+ void setDefined(bool isDefined) { IsDefined = isDefined; }
+
+ // Related to protocols declared in @protocol
+ void setDeclImplementation(ImplementationControl ic) {
+ DeclImplementation = ic;
+ }
+ ImplementationControl getImplementationControl() const {
+ return ImplementationControl(DeclImplementation);
+ }
+
+ virtual Stmt *getBody() const {
+ return (Stmt*) Body;
+ }
+ CompoundStmt *getCompoundBody() { return (CompoundStmt*)Body; }
+ void setBody(Stmt *B) { Body = B; }
+
+ /// \brief Returns whether this specific method is a definition.
+ bool isThisDeclarationADefinition() const { return Body; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCMethodDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCMethod; }
+ static DeclContext *castToDeclContext(const ObjCMethodDecl *D) {
+ return static_cast<DeclContext *>(const_cast<ObjCMethodDecl*>(D));
+ }
+ static ObjCMethodDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<ObjCMethodDecl *>(const_cast<DeclContext*>(DC));
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// ObjCContainerDecl - Represents a container for method declarations.
+/// Current sub-classes are ObjCInterfaceDecl, ObjCCategoryDecl,
+/// ObjCProtocolDecl, and ObjCImplDecl.
+///
+class ObjCContainerDecl : public NamedDecl, public DeclContext {
+ virtual void anchor();
+
+ SourceLocation AtStart;
+
+ // These two locations in the range mark the end of the method container.
+ // The first points to the '@' token, and the second to the 'end' token.
+ SourceRange AtEnd;
+public:
+
+ ObjCContainerDecl(Kind DK, DeclContext *DC,
+ IdentifierInfo *Id, SourceLocation nameLoc,
+ SourceLocation atStartLoc)
+ : NamedDecl(DK, DC, nameLoc, Id), DeclContext(DK), AtStart(atStartLoc) {}
+
+ // Iterator access to properties.
+ typedef specific_decl_iterator<ObjCPropertyDecl> prop_iterator;
+ prop_iterator prop_begin() const {
+ return prop_iterator(decls_begin());
+ }
+ prop_iterator prop_end() const {
+ return prop_iterator(decls_end());
+ }
+
+ // Iterator access to instance/class methods.
+ typedef specific_decl_iterator<ObjCMethodDecl> method_iterator;
+ method_iterator meth_begin() const {
+ return method_iterator(decls_begin());
+ }
+ method_iterator meth_end() const {
+ return method_iterator(decls_end());
+ }
+
+ typedef filtered_decl_iterator<ObjCMethodDecl,
+ &ObjCMethodDecl::isInstanceMethod>
+ instmeth_iterator;
+ instmeth_iterator instmeth_begin() const {
+ return instmeth_iterator(decls_begin());
+ }
+ instmeth_iterator instmeth_end() const {
+ return instmeth_iterator(decls_end());
+ }
+
+ typedef filtered_decl_iterator<ObjCMethodDecl,
+ &ObjCMethodDecl::isClassMethod>
+ classmeth_iterator;
+ classmeth_iterator classmeth_begin() const {
+ return classmeth_iterator(decls_begin());
+ }
+ classmeth_iterator classmeth_end() const {
+ return classmeth_iterator(decls_end());
+ }
+
+ // Get the local instance/class method declared in this interface.
+ ObjCMethodDecl *getMethod(Selector Sel, bool isInstance) const;
+ ObjCMethodDecl *getInstanceMethod(Selector Sel) const {
+ return getMethod(Sel, true/*isInstance*/);
+ }
+ ObjCMethodDecl *getClassMethod(Selector Sel) const {
+ return getMethod(Sel, false/*isInstance*/);
+ }
+ ObjCIvarDecl *getIvarDecl(IdentifierInfo *Id) const;
+
+ ObjCPropertyDecl *FindPropertyDeclaration(IdentifierInfo *PropertyId) const;
+
+ SourceLocation getAtStartLoc() const { return AtStart; }
+ void setAtStartLoc(SourceLocation Loc) { AtStart = Loc; }
+
+ // Marks the end of the container.
+ SourceRange getAtEndRange() const {
+ return AtEnd;
+ }
+ void setAtEndRange(SourceRange atEnd) {
+ AtEnd = atEnd;
+ }
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtStart, getAtEndRange().getEnd());
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCContainerDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstObjCContainer &&
+ K <= lastObjCContainer;
+ }
+
+ static DeclContext *castToDeclContext(const ObjCContainerDecl *D) {
+ return static_cast<DeclContext *>(const_cast<ObjCContainerDecl*>(D));
+ }
+ static ObjCContainerDecl *castFromDeclContext(const DeclContext *DC) {
+ return static_cast<ObjCContainerDecl *>(const_cast<DeclContext*>(DC));
+ }
+};
+
+/// ObjCInterfaceDecl - Represents an ObjC class declaration. For example:
+///
+/// // MostPrimitive declares no super class (not particularly useful).
+/// @interface MostPrimitive
+/// // no instance variables or methods.
+/// @end
+///
+/// // NSResponder inherits from NSObject & implements NSCoding (a protocol).
+/// @interface NSResponder : NSObject <NSCoding>
+/// { // instance variables are represented by ObjCIvarDecl.
+/// id nextResponder; // nextResponder instance variable.
+/// }
+/// - (NSResponder *)nextResponder; // return a pointer to NSResponder.
+/// - (void)mouseMoved:(NSEvent *)theEvent; // return void, takes a pointer
+/// @end // to an NSEvent.
+///
+/// Unlike C/C++, forward class declarations are accomplished with @class.
+/// Unlike C/C++, @class allows for a list of classes to be forward declared.
+/// Unlike C++, ObjC is a single-rooted class model. In Cocoa, classes
+/// typically inherit from NSObject (an exception is NSProxy).
+///
+class ObjCInterfaceDecl : public ObjCContainerDecl
+ , public Redeclarable<ObjCInterfaceDecl> {
+ virtual void anchor();
+
+ /// TypeForDecl - This indicates the Type object that represents this
+ /// TypeDecl. It is a cache maintained by ASTContext::getObjCInterfaceType
+ mutable const Type *TypeForDecl;
+ friend class ASTContext;
+
+ struct DefinitionData {
+ /// \brief The definition of this class, for quick access from any
+ /// declaration.
+ ObjCInterfaceDecl *Definition;
+
+ /// Class's super class.
+ ObjCInterfaceDecl *SuperClass;
+
+ /// Protocols referenced in the @interface declaration
+ ObjCProtocolList ReferencedProtocols;
+
+ /// Protocols reference in both the @interface and class extensions.
+ ObjCList<ObjCProtocolDecl> AllReferencedProtocols;
+
+ /// \brief List of categories and class extensions defined for this class.
+ ///
+ /// Categories are stored as a linked list in the AST, since the categories
+ /// and class extensions come long after the initial interface declaration,
+ /// and we avoid dynamically-resized arrays in the AST wherever possible.
+ ObjCCategoryDecl *CategoryList;
+
+ /// IvarList - List of all ivars defined by this class; including class
+ /// extensions and implementation. This list is built lazily.
+ ObjCIvarDecl *IvarList;
+
+ /// \brief Indicates that the contents of this Objective-C class will be
+ /// completed by the external AST source when required.
+ mutable bool ExternallyCompleted : 1;
+
+ /// \brief The location of the superclass, if any.
+ SourceLocation SuperClassLoc;
+
+ /// \brief The location of the last location in this declaration, before
+ /// the properties/methods. For example, this will be the '>', '}', or
+ /// identifier,
+ SourceLocation EndLoc;
+
+ DefinitionData() : Definition(), SuperClass(), CategoryList(), IvarList(),
+ ExternallyCompleted() { }
+ };
+
+ ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id,
+ SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl,
+ bool isInternal);
+
+ void LoadExternalDefinition() const;
+
+ /// \brief Contains a pointer to the data associated with this class,
+ /// which will be NULL if this class has not yet been defined.
+ DefinitionData *Data;
+
+ DefinitionData &data() const {
+ assert(Data != 0 && "Declaration has no definition!");
+ return *Data;
+ }
+
+ /// \brief Allocate the definition data for this class.
+ void allocateDefinitionData();
+
+ typedef Redeclarable<ObjCInterfaceDecl> redeclarable_base;
+ virtual ObjCInterfaceDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
+ }
+ virtual ObjCInterfaceDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual ObjCInterfaceDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
+public:
+ static ObjCInterfaceDecl *Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation atLoc,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *PrevDecl,
+ SourceLocation ClassLoc = SourceLocation(),
+ bool isInternal = false);
+
+ static ObjCInterfaceDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ if (isThisDeclarationADefinition())
+ return ObjCContainerDecl::getSourceRange();
+
+ return SourceRange(getAtStartLoc(), getLocation());
+ }
+
+ /// \brief Indicate that this Objective-C class is complete, but that
+ /// the external AST source will be responsible for filling in its contents
+ /// when a complete class is required.
+ void setExternallyCompleted();
+
+ const ObjCProtocolList &getReferencedProtocols() const {
+ assert(hasDefinition() && "Caller did not check for forward reference!");
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().ReferencedProtocols;
+ }
+
+ ObjCImplementationDecl *getImplementation() const;
+ void setImplementation(ObjCImplementationDecl *ImplD);
+
+ ObjCCategoryDecl *FindCategoryDeclaration(IdentifierInfo *CategoryId) const;
+
+ // Get the local instance/class method declared in a category.
+ ObjCMethodDecl *getCategoryInstanceMethod(Selector Sel) const;
+ ObjCMethodDecl *getCategoryClassMethod(Selector Sel) const;
+ ObjCMethodDecl *getCategoryMethod(Selector Sel, bool isInstance) const {
+ return isInstance ? getInstanceMethod(Sel)
+ : getClassMethod(Sel);
+ }
+
+ typedef ObjCProtocolList::iterator protocol_iterator;
+
+ protocol_iterator protocol_begin() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return protocol_iterator();
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().ReferencedProtocols.begin();
+ }
+ protocol_iterator protocol_end() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return protocol_iterator();
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().ReferencedProtocols.end();
+ }
+
+ typedef ObjCProtocolList::loc_iterator protocol_loc_iterator;
+
+ protocol_loc_iterator protocol_loc_begin() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return protocol_loc_iterator();
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().ReferencedProtocols.loc_begin();
+ }
+
+ protocol_loc_iterator protocol_loc_end() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return protocol_loc_iterator();
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().ReferencedProtocols.loc_end();
+ }
+
+ typedef ObjCList<ObjCProtocolDecl>::iterator all_protocol_iterator;
+
+ all_protocol_iterator all_referenced_protocol_begin() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return all_protocol_iterator();
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().AllReferencedProtocols.empty()
+ ? protocol_begin()
+ : data().AllReferencedProtocols.begin();
+ }
+ all_protocol_iterator all_referenced_protocol_end() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return all_protocol_iterator();
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().AllReferencedProtocols.empty()
+ ? protocol_end()
+ : data().AllReferencedProtocols.end();
+ }
+
+ typedef specific_decl_iterator<ObjCIvarDecl> ivar_iterator;
+
+ ivar_iterator ivar_begin() const {
+ if (const ObjCInterfaceDecl *Def = getDefinition())
+ return ivar_iterator(Def->decls_begin());
+
+ // FIXME: Should make sure no callers ever do this.
+ return ivar_iterator();
+ }
+ ivar_iterator ivar_end() const {
+ if (const ObjCInterfaceDecl *Def = getDefinition())
+ return ivar_iterator(Def->decls_end());
+
+ // FIXME: Should make sure no callers ever do this.
+ return ivar_iterator();
+ }
+
+ unsigned ivar_size() const {
+ return std::distance(ivar_begin(), ivar_end());
+ }
+
+ bool ivar_empty() const { return ivar_begin() == ivar_end(); }
+
+ ObjCIvarDecl *all_declared_ivar_begin();
+ const ObjCIvarDecl *all_declared_ivar_begin() const {
+ // Even though this modifies IvarList, it's conceptually const:
+ // the ivar chain is essentially a cached property of ObjCInterfaceDecl.
+ return const_cast<ObjCInterfaceDecl *>(this)->all_declared_ivar_begin();
+ }
+ void setIvarList(ObjCIvarDecl *ivar) { data().IvarList = ivar; }
+
+ /// setProtocolList - Set the list of protocols that this interface
+ /// implements.
+ void setProtocolList(ObjCProtocolDecl *const* List, unsigned Num,
+ const SourceLocation *Locs, ASTContext &C) {
+ data().ReferencedProtocols.set(List, Num, Locs, C);
+ }
+
+ /// mergeClassExtensionProtocolList - Merge class extension's protocol list
+ /// into the protocol list for this class.
+ void mergeClassExtensionProtocolList(ObjCProtocolDecl *const* List,
+ unsigned Num,
+ ASTContext &C);
+
+ /// \brief Determine whether this particular declaration of this class is
+ /// actually also a definition.
+ bool isThisDeclarationADefinition() const {
+ return Data && Data->Definition == this;
+ }
+
+ /// \brief Determine whether this class has been defined.
+ bool hasDefinition() const { return Data; }
+
+ /// \brief Retrieve the definition of this class, or NULL if this class
+ /// has been forward-declared (with @class) but not yet defined (with
+ /// @interface).
+ ObjCInterfaceDecl *getDefinition() {
+ return hasDefinition()? Data->Definition : 0;
+ }
+
+ /// \brief Retrieve the definition of this class, or NULL if this class
+ /// has been forward-declared (with @class) but not yet defined (with
+ /// @interface).
+ const ObjCInterfaceDecl *getDefinition() const {
+ return hasDefinition()? Data->Definition : 0;
+ }
+
+ /// \brief Starts the definition of this Objective-C class, taking it from
+ /// a forward declaration (@class) to a definition (@interface).
+ void startDefinition();
+
+ ObjCInterfaceDecl *getSuperClass() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().SuperClass;
+ }
+
+ void setSuperClass(ObjCInterfaceDecl * superCls) {
+ data().SuperClass =
+ (superCls && superCls->hasDefinition()) ? superCls->getDefinition()
+ : superCls;
+ }
+
+ ObjCCategoryDecl* getCategoryList() const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return data().CategoryList;
+ }
+
+ void setCategoryList(ObjCCategoryDecl *category) {
+ data().CategoryList = category;
+ }
+
+ ObjCCategoryDecl* getFirstClassExtension() const;
+
+ ObjCPropertyDecl
+ *FindPropertyVisibleInPrimaryClass(IdentifierInfo *PropertyId) const;
+
+ /// isSuperClassOf - Return true if this class is the specified class or is a
+ /// super class of the specified interface class.
+ bool isSuperClassOf(const ObjCInterfaceDecl *I) const {
+ // If RHS is derived from LHS it is OK; else it is not OK.
+ while (I != NULL) {
+ if (declaresSameEntity(this, I))
+ return true;
+
+ I = I->getSuperClass();
+ }
+ return false;
+ }
+
+ /// isArcWeakrefUnavailable - Checks for a class or one of its super classes
+ /// to be incompatible with __weak references. Returns true if it is.
+ bool isArcWeakrefUnavailable() const {
+ const ObjCInterfaceDecl *Class = this;
+ while (Class) {
+ if (Class->hasAttr<ArcWeakrefUnavailableAttr>())
+ return true;
+ Class = Class->getSuperClass();
+ }
+ return false;
+ }
+
+ /// isObjCRequiresPropertyDefs - Checks that a class or one of its super
+ /// classes must not be auto-synthesized. Returns class decl. if it must not be;
+ /// 0, otherwise.
+ const ObjCInterfaceDecl *isObjCRequiresPropertyDefs() const {
+ const ObjCInterfaceDecl *Class = this;
+ while (Class) {
+ if (Class->hasAttr<ObjCRequiresPropertyDefsAttr>())
+ return Class;
+ Class = Class->getSuperClass();
+ }
+ return 0;
+ }
+
+ ObjCIvarDecl *lookupInstanceVariable(IdentifierInfo *IVarName,
+ ObjCInterfaceDecl *&ClassDeclared);
+ ObjCIvarDecl *lookupInstanceVariable(IdentifierInfo *IVarName) {
+ ObjCInterfaceDecl *ClassDeclared;
+ return lookupInstanceVariable(IVarName, ClassDeclared);
+ }
+
+ // Lookup a method. First, we search locally. If a method isn't
+ // found, we search referenced protocols and class categories.
+ ObjCMethodDecl *lookupMethod(Selector Sel, bool isInstance,
+ bool shallowCategoryLookup= false) const;
+ ObjCMethodDecl *lookupInstanceMethod(Selector Sel,
+ bool shallowCategoryLookup = false) const {
+ return lookupMethod(Sel, true/*isInstance*/, shallowCategoryLookup);
+ }
+ ObjCMethodDecl *lookupClassMethod(Selector Sel,
+ bool shallowCategoryLookup = false) const {
+ return lookupMethod(Sel, false/*isInstance*/, shallowCategoryLookup);
+ }
+ ObjCInterfaceDecl *lookupInheritedClass(const IdentifierInfo *ICName);
+
+ // Lookup a method in the classes implementation hierarchy.
+ ObjCMethodDecl *lookupPrivateMethod(const Selector &Sel, bool Instance=true);
+
+ SourceLocation getEndOfDefinitionLoc() const {
+ if (!hasDefinition())
+ return getLocation();
+
+ return data().EndLoc;
+ }
+
+ void setEndOfDefinitionLoc(SourceLocation LE) { data().EndLoc = LE; }
+
+ void setSuperClassLoc(SourceLocation Loc) { data().SuperClassLoc = Loc; }
+ SourceLocation getSuperClassLoc() const { return data().SuperClassLoc; }
+
+ /// isImplicitInterfaceDecl - check that this is an implicitly declared
+ /// ObjCInterfaceDecl node. This is for legacy objective-c @implementation
+ /// declaration without an @interface declaration.
+ bool isImplicitInterfaceDecl() const {
+ return hasDefinition() ? Data->Definition->isImplicit() : isImplicit();
+ }
+
+ /// ClassImplementsProtocol - Checks that 'lProto' protocol
+ /// has been implemented in IDecl class, its super class or categories (if
+ /// lookupCategory is true).
+ bool ClassImplementsProtocol(ObjCProtocolDecl *lProto,
+ bool lookupCategory,
+ bool RHSIsQualifiedID = false);
+
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ /// Retrieves the canonical declaration of this Objective-C class.
+ ObjCInterfaceDecl *getCanonicalDecl() {
+ return getFirstDeclaration();
+ }
+ const ObjCInterfaceDecl *getCanonicalDecl() const {
+ return getFirstDeclaration();
+ }
+
+ // Low-level accessor
+ const Type *getTypeForDecl() const { return TypeForDecl; }
+ void setTypeForDecl(const Type *TD) const { TypeForDecl = TD; }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCInterfaceDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCInterface; }
+
+ friend class ASTReader;
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// ObjCIvarDecl - Represents an ObjC instance variable. In general, ObjC
+/// instance variables are identical to C. The only exception is Objective-C
+/// supports C++ style access control. For example:
+///
+/// @interface IvarExample : NSObject
+/// {
+/// id defaultToProtected;
+/// @public:
+/// id canBePublic; // same as C++.
+/// @protected:
+/// id canBeProtected; // same as C++.
+/// @package:
+/// id canBePackage; // framework visibility (not available in C++).
+/// }
+///
+class ObjCIvarDecl : public FieldDecl {
+ virtual void anchor();
+
+public:
+ enum AccessControl {
+ None, Private, Protected, Public, Package
+ };
+
+private:
+ ObjCIvarDecl(ObjCContainerDecl *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo, AccessControl ac, Expr *BW,
+ bool synthesized)
+ : FieldDecl(ObjCIvar, DC, StartLoc, IdLoc, Id, T, TInfo, BW,
+ /*Mutable=*/false, /*HasInit=*/false),
+ NextIvar(0), DeclAccess(ac), Synthesized(synthesized) {}
+
+public:
+ static ObjCIvarDecl *Create(ASTContext &C, ObjCContainerDecl *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo,
+ AccessControl ac, Expr *BW = NULL,
+ bool synthesized=false);
+
+ static ObjCIvarDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// \brief Return the class interface that this ivar is logically contained
+ /// in; this is either the interface where the ivar was declared, or the
+ /// interface the ivar is conceptually a part of in the case of synthesized
+ /// ivars.
+ const ObjCInterfaceDecl *getContainingInterface() const;
+
+ ObjCIvarDecl *getNextIvar() { return NextIvar; }
+ const ObjCIvarDecl *getNextIvar() const { return NextIvar; }
+ void setNextIvar(ObjCIvarDecl *ivar) { NextIvar = ivar; }
+
+ void setAccessControl(AccessControl ac) { DeclAccess = ac; }
+
+ AccessControl getAccessControl() const { return AccessControl(DeclAccess); }
+
+ AccessControl getCanonicalAccessControl() const {
+ return DeclAccess == None ? Protected : AccessControl(DeclAccess);
+ }
+
+ void setSynthesize(bool synth) { Synthesized = synth; }
+ bool getSynthesize() const { return Synthesized; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCIvarDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCIvar; }
+private:
+ /// NextIvar - Next Ivar in the list of ivars declared in class; class's
+ /// extensions and class's implementation
+ ObjCIvarDecl *NextIvar;
+
+ // NOTE: VC++ treats enums as signed, avoid using the AccessControl enum
+ unsigned DeclAccess : 3;
+ unsigned Synthesized : 1;
+};
+
+
+/// ObjCAtDefsFieldDecl - Represents a field declaration created by an
+/// @defs(...).
+class ObjCAtDefsFieldDecl : public FieldDecl {
+ virtual void anchor();
+ ObjCAtDefsFieldDecl(DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, Expr *BW)
+ : FieldDecl(ObjCAtDefsField, DC, StartLoc, IdLoc, Id, T,
+ /*TInfo=*/0, // FIXME: Do ObjCAtDefs have declarators ?
+ BW, /*Mutable=*/false, /*HasInit=*/false) {}
+
+public:
+ static ObjCAtDefsFieldDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, Expr *BW);
+
+ static ObjCAtDefsFieldDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCAtDefsFieldDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCAtDefsField; }
+};
+
+/// ObjCProtocolDecl - Represents a protocol declaration. ObjC protocols
+/// declare a pure abstract type (i.e no instance variables are permitted).
+/// Protocols originally drew inspiration from C++ pure virtual functions (a C++
+/// feature with nice semantics and lousy syntax:-). Here is an example:
+///
+/// @protocol NSDraggingInfo <refproto1, refproto2>
+/// - (NSWindow *)draggingDestinationWindow;
+/// - (NSImage *)draggedImage;
+/// @end
+///
+/// This says that NSDraggingInfo requires two methods and requires everything
+/// that the two "referenced protocols" 'refproto1' and 'refproto2' require as
+/// well.
+///
+/// @interface ImplementsNSDraggingInfo : NSObject <NSDraggingInfo>
+/// @end
+///
+/// ObjC protocols inspired Java interfaces. Unlike Java, ObjC classes and
+/// protocols are in distinct namespaces. For example, Cocoa defines both
+/// an NSObject protocol and class (which isn't allowed in Java). As a result,
+/// protocols are referenced using angle brackets as follows:
+///
+/// id <NSDraggingInfo> anyObjectThatImplementsNSDraggingInfo;
+///
+class ObjCProtocolDecl : public ObjCContainerDecl,
+ public Redeclarable<ObjCProtocolDecl> {
+ virtual void anchor();
+
+ struct DefinitionData {
+ // \brief The declaration that defines this protocol.
+ ObjCProtocolDecl *Definition;
+
+ /// \brief Referenced protocols
+ ObjCProtocolList ReferencedProtocols;
+ };
+
+ DefinitionData *Data;
+
+ DefinitionData &data() const {
+ assert(Data && "Objective-C protocol has no definition!");
+ return *Data;
+ }
+
+ ObjCProtocolDecl(DeclContext *DC, IdentifierInfo *Id,
+ SourceLocation nameLoc, SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl);
+
+ void allocateDefinitionData();
+
+ typedef Redeclarable<ObjCProtocolDecl> redeclarable_base;
+ virtual ObjCProtocolDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
+ }
+ virtual ObjCProtocolDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual ObjCProtocolDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
+public:
+ static ObjCProtocolDecl *Create(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl);
+
+ static ObjCProtocolDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ const ObjCProtocolList &getReferencedProtocols() const {
+ assert(hasDefinition() && "No definition available!");
+ return data().ReferencedProtocols;
+ }
+ typedef ObjCProtocolList::iterator protocol_iterator;
+ protocol_iterator protocol_begin() const {
+ if (!hasDefinition())
+ return protocol_iterator();
+
+ return data().ReferencedProtocols.begin();
+ }
+ protocol_iterator protocol_end() const {
+ if (!hasDefinition())
+ return protocol_iterator();
+
+ return data().ReferencedProtocols.end();
+ }
+ typedef ObjCProtocolList::loc_iterator protocol_loc_iterator;
+ protocol_loc_iterator protocol_loc_begin() const {
+ if (!hasDefinition())
+ return protocol_loc_iterator();
+
+ return data().ReferencedProtocols.loc_begin();
+ }
+ protocol_loc_iterator protocol_loc_end() const {
+ if (!hasDefinition())
+ return protocol_loc_iterator();
+
+ return data().ReferencedProtocols.loc_end();
+ }
+ unsigned protocol_size() const {
+ if (!hasDefinition())
+ return 0;
+
+ return data().ReferencedProtocols.size();
+ }
+
+ /// setProtocolList - Set the list of protocols that this interface
+ /// implements.
+ void setProtocolList(ObjCProtocolDecl *const*List, unsigned Num,
+ const SourceLocation *Locs, ASTContext &C) {
+ assert(Data && "Protocol is not defined");
+ data().ReferencedProtocols.set(List, Num, Locs, C);
+ }
+
+ ObjCProtocolDecl *lookupProtocolNamed(IdentifierInfo *PName);
+
+ // Lookup a method. First, we search locally. If a method isn't
+ // found, we search referenced protocols and class categories.
+ ObjCMethodDecl *lookupMethod(Selector Sel, bool isInstance) const;
+ ObjCMethodDecl *lookupInstanceMethod(Selector Sel) const {
+ return lookupMethod(Sel, true/*isInstance*/);
+ }
+ ObjCMethodDecl *lookupClassMethod(Selector Sel) const {
+ return lookupMethod(Sel, false/*isInstance*/);
+ }
+
+ /// \brief Determine whether this protocol has a definition.
+ bool hasDefinition() const { return Data != 0; }
+
+ /// \brief Retrieve the definition of this protocol, if any.
+ ObjCProtocolDecl *getDefinition() {
+ return Data? Data->Definition : 0;
+ }
+
+ /// \brief Retrieve the definition of this protocol, if any.
+ const ObjCProtocolDecl *getDefinition() const {
+ return Data? Data->Definition : 0;
+ }
+
+ /// \brief Determine whether this particular declaration is also the
+ /// definition.
+ bool isThisDeclarationADefinition() const {
+ return getDefinition() == this;
+ }
+
+ /// \brief Starts the definition of this Objective-C protocol.
+ void startDefinition();
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ if (isThisDeclarationADefinition())
+ return ObjCContainerDecl::getSourceRange();
+
+ return SourceRange(getAtStartLoc(), getLocation());
+ }
+
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ /// Retrieves the canonical declaration of this Objective-C protocol.
+ ObjCProtocolDecl *getCanonicalDecl() {
+ return getFirstDeclaration();
+ }
+ const ObjCProtocolDecl *getCanonicalDecl() const {
+ return getFirstDeclaration();
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCProtocolDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCProtocol; }
+
+ friend class ASTReader;
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// ObjCCategoryDecl - Represents a category declaration. A category allows
+/// you to add methods to an existing class (without subclassing or modifying
+/// the original class interface or implementation:-). Categories don't allow
+/// you to add instance data. The following example adds "myMethod" to all
+/// NSView's within a process:
+///
+/// @interface NSView (MyViewMethods)
+/// - myMethod;
+/// @end
+///
+/// Categories also allow you to split the implementation of a class across
+/// several files (a feature more naturally supported in C++).
+///
+/// Categories were originally inspired by dynamic languages such as Common
+/// Lisp and Smalltalk. More traditional class-based languages (C++, Java)
+/// don't support this level of dynamism, which is both powerful and dangerous.
+///
+class ObjCCategoryDecl : public ObjCContainerDecl {
+ virtual void anchor();
+
+ /// Interface belonging to this category
+ ObjCInterfaceDecl *ClassInterface;
+
+ /// referenced protocols in this category.
+ ObjCProtocolList ReferencedProtocols;
+
+ /// Next category belonging to this class.
+ /// FIXME: this should not be a singly-linked list. Move storage elsewhere.
+ ObjCCategoryDecl *NextClassCategory;
+
+ /// true of class extension has at least one bitfield ivar.
+ bool HasSynthBitfield : 1;
+
+ /// \brief The location of the category name in this declaration.
+ SourceLocation CategoryNameLoc;
+
+ /// class extension may have private ivars.
+ SourceLocation IvarLBraceLoc;
+ SourceLocation IvarRBraceLoc;
+
+ ObjCCategoryDecl(DeclContext *DC, SourceLocation AtLoc,
+ SourceLocation ClassNameLoc, SourceLocation CategoryNameLoc,
+ IdentifierInfo *Id, ObjCInterfaceDecl *IDecl,
+ SourceLocation IvarLBraceLoc=SourceLocation(),
+ SourceLocation IvarRBraceLoc=SourceLocation())
+ : ObjCContainerDecl(ObjCCategory, DC, Id, ClassNameLoc, AtLoc),
+ ClassInterface(IDecl), NextClassCategory(0), HasSynthBitfield(false),
+ CategoryNameLoc(CategoryNameLoc),
+ IvarLBraceLoc(IvarLBraceLoc), IvarRBraceLoc(IvarRBraceLoc) {
+ }
+public:
+
+ static ObjCCategoryDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation AtLoc,
+ SourceLocation ClassNameLoc,
+ SourceLocation CategoryNameLoc,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *IDecl,
+ SourceLocation IvarLBraceLoc=SourceLocation(),
+ SourceLocation IvarRBraceLoc=SourceLocation());
+ static ObjCCategoryDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ ObjCInterfaceDecl *getClassInterface() { return ClassInterface; }
+ const ObjCInterfaceDecl *getClassInterface() const { return ClassInterface; }
+
+ ObjCCategoryImplDecl *getImplementation() const;
+ void setImplementation(ObjCCategoryImplDecl *ImplD);
+
+ /// setProtocolList - Set the list of protocols that this interface
+ /// implements.
+ void setProtocolList(ObjCProtocolDecl *const*List, unsigned Num,
+ const SourceLocation *Locs, ASTContext &C) {
+ ReferencedProtocols.set(List, Num, Locs, C);
+ }
+
+ const ObjCProtocolList &getReferencedProtocols() const {
+ return ReferencedProtocols;
+ }
+
+ typedef ObjCProtocolList::iterator protocol_iterator;
+ protocol_iterator protocol_begin() const {return ReferencedProtocols.begin();}
+ protocol_iterator protocol_end() const { return ReferencedProtocols.end(); }
+ unsigned protocol_size() const { return ReferencedProtocols.size(); }
+ typedef ObjCProtocolList::loc_iterator protocol_loc_iterator;
+ protocol_loc_iterator protocol_loc_begin() const {
+ return ReferencedProtocols.loc_begin();
+ }
+ protocol_loc_iterator protocol_loc_end() const {
+ return ReferencedProtocols.loc_end();
+ }
+
+ ObjCCategoryDecl *getNextClassCategory() const { return NextClassCategory; }
+
+ bool IsClassExtension() const { return getIdentifier() == 0; }
+ const ObjCCategoryDecl *getNextClassExtension() const;
+
+ bool hasSynthBitfield() const { return HasSynthBitfield; }
+ void setHasSynthBitfield (bool val) { HasSynthBitfield = val; }
+
+ typedef specific_decl_iterator<ObjCIvarDecl> ivar_iterator;
+ ivar_iterator ivar_begin() const {
+ return ivar_iterator(decls_begin());
+ }
+ ivar_iterator ivar_end() const {
+ return ivar_iterator(decls_end());
+ }
+ unsigned ivar_size() const {
+ return std::distance(ivar_begin(), ivar_end());
+ }
+ bool ivar_empty() const {
+ return ivar_begin() == ivar_end();
+ }
+
+ SourceLocation getCategoryNameLoc() const { return CategoryNameLoc; }
+ void setCategoryNameLoc(SourceLocation Loc) { CategoryNameLoc = Loc; }
+
+ void setIvarLBraceLoc(SourceLocation Loc) { IvarLBraceLoc = Loc; }
+ SourceLocation getIvarLBraceLoc() const { return IvarLBraceLoc; }
+ void setIvarRBraceLoc(SourceLocation Loc) { IvarRBraceLoc = Loc; }
+ SourceLocation getIvarRBraceLoc() const { return IvarRBraceLoc; }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCCategoryDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCCategory; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+class ObjCImplDecl : public ObjCContainerDecl {
+ virtual void anchor();
+
+ /// Class interface for this class/category implementation
+ ObjCInterfaceDecl *ClassInterface;
+
+protected:
+ ObjCImplDecl(Kind DK, DeclContext *DC,
+ ObjCInterfaceDecl *classInterface,
+ SourceLocation nameLoc, SourceLocation atStartLoc)
+ : ObjCContainerDecl(DK, DC,
+ classInterface? classInterface->getIdentifier() : 0,
+ nameLoc, atStartLoc),
+ ClassInterface(classInterface) {}
+
+public:
+ const ObjCInterfaceDecl *getClassInterface() const { return ClassInterface; }
+ ObjCInterfaceDecl *getClassInterface() { return ClassInterface; }
+ void setClassInterface(ObjCInterfaceDecl *IFace);
+
+ void addInstanceMethod(ObjCMethodDecl *method) {
+ // FIXME: Context should be set correctly before we get here.
+ method->setLexicalDeclContext(this);
+ addDecl(method);
+ }
+ void addClassMethod(ObjCMethodDecl *method) {
+ // FIXME: Context should be set correctly before we get here.
+ method->setLexicalDeclContext(this);
+ addDecl(method);
+ }
+
+ void addPropertyImplementation(ObjCPropertyImplDecl *property);
+
+ ObjCPropertyImplDecl *FindPropertyImplDecl(IdentifierInfo *propertyId) const;
+ ObjCPropertyImplDecl *FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const;
+
+ // Iterator access to properties.
+ typedef specific_decl_iterator<ObjCPropertyImplDecl> propimpl_iterator;
+ propimpl_iterator propimpl_begin() const {
+ return propimpl_iterator(decls_begin());
+ }
+ propimpl_iterator propimpl_end() const {
+ return propimpl_iterator(decls_end());
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCImplDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstObjCImpl && K <= lastObjCImpl;
+ }
+};
+
+/// ObjCCategoryImplDecl - An object of this class encapsulates a category
+/// @implementation declaration. If a category class has declaration of a
+/// property, its implementation must be specified in the category's
+/// @implementation declaration. Example:
+/// @interface I @end
+/// @interface I(CATEGORY)
+/// @property int p1, d1;
+/// @end
+/// @implementation I(CATEGORY)
+/// @dynamic p1,d1;
+/// @end
+///
+/// ObjCCategoryImplDecl
+class ObjCCategoryImplDecl : public ObjCImplDecl {
+ virtual void anchor();
+
+ // Category name
+ IdentifierInfo *Id;
+
+ // Category name location
+ SourceLocation CategoryNameLoc;
+
+ ObjCCategoryImplDecl(DeclContext *DC, IdentifierInfo *Id,
+ ObjCInterfaceDecl *classInterface,
+ SourceLocation nameLoc, SourceLocation atStartLoc,
+ SourceLocation CategoryNameLoc)
+ : ObjCImplDecl(ObjCCategoryImpl, DC, classInterface, nameLoc, atStartLoc),
+ Id(Id), CategoryNameLoc(CategoryNameLoc) {}
+public:
+ static ObjCCategoryImplDecl *Create(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *classInterface,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ SourceLocation CategoryNameLoc);
+ static ObjCCategoryImplDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// getIdentifier - Get the identifier that names the category
+ /// interface associated with this implementation.
+ /// FIXME: This is a bad API, we are overriding the NamedDecl::getIdentifier()
+ /// to mean something different. For example:
+ /// ((NamedDecl *)SomeCategoryImplDecl)->getIdentifier()
+ /// returns the class interface name, whereas
+ /// ((ObjCCategoryImplDecl *)SomeCategoryImplDecl)->getIdentifier()
+ /// returns the category name.
+ IdentifierInfo *getIdentifier() const {
+ return Id;
+ }
+ void setIdentifier(IdentifierInfo *II) { Id = II; }
+
+ ObjCCategoryDecl *getCategoryDecl() const;
+
+ SourceLocation getCategoryNameLoc() const { return CategoryNameLoc; }
+
+ /// getName - Get the name of identifier for the class interface associated
+ /// with this implementation as a StringRef.
+ //
+ // FIXME: This is a bad API, we are overriding the NamedDecl::getName, to mean
+ // something different.
+ StringRef getName() const {
+ return Id ? Id->getNameStart() : "";
+ }
+
+ /// getNameAsCString - Get the name of identifier for the class
+ /// interface associated with this implementation as a C string
+ /// (const char*).
+ //
+ // FIXME: Deprecated, move clients to getName().
+ const char *getNameAsCString() const {
+ return Id ? Id->getNameStart() : "";
+ }
+
+ /// @brief Get the name of the class associated with this interface.
+ //
+ // FIXME: Deprecated, move clients to getName().
+ std::string getNameAsString() const {
+ return getName();
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCCategoryImplDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCCategoryImpl;}
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ObjCCategoryImplDecl &CID);
+
+/// ObjCImplementationDecl - Represents a class definition - this is where
+/// method definitions are specified. For example:
+///
+/// @code
+/// @implementation MyClass
+/// - (void)myMethod { /* do something */ }
+/// @end
+/// @endcode
+///
+/// Typically, instance variables are specified in the class interface,
+/// *not* in the implementation. Nevertheless (for legacy reasons), we
+/// allow instance variables to be specified in the implementation. When
+/// specified, they need to be *identical* to the interface.
+///
+class ObjCImplementationDecl : public ObjCImplDecl {
+ virtual void anchor();
+ /// Implementation Class's super class.
+ ObjCInterfaceDecl *SuperClass;
+ /// @implementation may have private ivars.
+ SourceLocation IvarLBraceLoc;
+ SourceLocation IvarRBraceLoc;
+
+ /// Support for ivar initialization.
+ /// IvarInitializers - The arguments used to initialize the ivars
+ CXXCtorInitializer **IvarInitializers;
+ unsigned NumIvarInitializers;
+
+ /// true if class has a .cxx_[construct,destruct] method.
+ bool HasCXXStructors : 1;
+
+ /// true of class extension has at least one bitfield ivar.
+ bool HasSynthBitfield : 1;
+
+ ObjCImplementationDecl(DeclContext *DC,
+ ObjCInterfaceDecl *classInterface,
+ ObjCInterfaceDecl *superDecl,
+ SourceLocation nameLoc, SourceLocation atStartLoc,
+ SourceLocation IvarLBraceLoc=SourceLocation(),
+ SourceLocation IvarRBraceLoc=SourceLocation())
+ : ObjCImplDecl(ObjCImplementation, DC, classInterface, nameLoc, atStartLoc),
+ SuperClass(superDecl), IvarLBraceLoc(IvarLBraceLoc),
+ IvarRBraceLoc(IvarRBraceLoc),
+ IvarInitializers(0), NumIvarInitializers(0),
+ HasCXXStructors(false), HasSynthBitfield(false){}
+public:
+ static ObjCImplementationDecl *Create(ASTContext &C, DeclContext *DC,
+ ObjCInterfaceDecl *classInterface,
+ ObjCInterfaceDecl *superDecl,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ SourceLocation IvarLBraceLoc=SourceLocation(),
+ SourceLocation IvarRBraceLoc=SourceLocation());
+
+ static ObjCImplementationDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// init_iterator - Iterates through the ivar initializer list.
+ typedef CXXCtorInitializer **init_iterator;
+
+ /// init_const_iterator - Iterates through the ivar initializer list.
+ typedef CXXCtorInitializer * const * init_const_iterator;
+
+ /// init_begin() - Retrieve an iterator to the first initializer.
+ init_iterator init_begin() { return IvarInitializers; }
+ /// begin() - Retrieve an iterator to the first initializer.
+ init_const_iterator init_begin() const { return IvarInitializers; }
+
+ /// init_end() - Retrieve an iterator past the last initializer.
+ init_iterator init_end() {
+ return IvarInitializers + NumIvarInitializers;
+ }
+ /// end() - Retrieve an iterator past the last initializer.
+ init_const_iterator init_end() const {
+ return IvarInitializers + NumIvarInitializers;
+ }
+ /// getNumArgs - Number of ivars which must be initialized.
+ unsigned getNumIvarInitializers() const {
+ return NumIvarInitializers;
+ }
+
+ void setNumIvarInitializers(unsigned numNumIvarInitializers) {
+ NumIvarInitializers = numNumIvarInitializers;
+ }
+
+ void setIvarInitializers(ASTContext &C,
+ CXXCtorInitializer ** initializers,
+ unsigned numInitializers);
+
+ bool hasCXXStructors() const { return HasCXXStructors; }
+ void setHasCXXStructors(bool val) { HasCXXStructors = val; }
+
+ bool hasSynthBitfield() const { return HasSynthBitfield; }
+ void setHasSynthBitfield (bool val) { HasSynthBitfield = val; }
+
+ /// getIdentifier - Get the identifier that names the class
+ /// interface associated with this implementation.
+ IdentifierInfo *getIdentifier() const {
+ return getClassInterface()->getIdentifier();
+ }
+
+ /// getName - Get the name of identifier for the class interface associated
+ /// with this implementation as a StringRef.
+ //
+ // FIXME: This is a bad API, we are overriding the NamedDecl::getName, to mean
+ // something different.
+ StringRef getName() const {
+ assert(getIdentifier() && "Name is not a simple identifier");
+ return getIdentifier()->getName();
+ }
+
+ /// getNameAsCString - Get the name of identifier for the class
+ /// interface associated with this implementation as a C string
+ /// (const char*).
+ //
+ // FIXME: Move to StringRef API.
+ const char *getNameAsCString() const {
+ return getName().data();
+ }
+
+ /// @brief Get the name of the class associated with this interface.
+ //
+ // FIXME: Move to StringRef API.
+ std::string getNameAsString() const {
+ return getName();
+ }
+
+ const ObjCInterfaceDecl *getSuperClass() const { return SuperClass; }
+ ObjCInterfaceDecl *getSuperClass() { return SuperClass; }
+
+ void setSuperClass(ObjCInterfaceDecl * superCls) { SuperClass = superCls; }
+
+ void setIvarLBraceLoc(SourceLocation Loc) { IvarLBraceLoc = Loc; }
+ SourceLocation getIvarLBraceLoc() const { return IvarLBraceLoc; }
+ void setIvarRBraceLoc(SourceLocation Loc) { IvarRBraceLoc = Loc; }
+ SourceLocation getIvarRBraceLoc() const { return IvarRBraceLoc; }
+
+ typedef specific_decl_iterator<ObjCIvarDecl> ivar_iterator;
+ ivar_iterator ivar_begin() const {
+ return ivar_iterator(decls_begin());
+ }
+ ivar_iterator ivar_end() const {
+ return ivar_iterator(decls_end());
+ }
+ unsigned ivar_size() const {
+ return std::distance(ivar_begin(), ivar_end());
+ }
+ bool ivar_empty() const {
+ return ivar_begin() == ivar_end();
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCImplementationDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCImplementation; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+raw_ostream &operator<<(raw_ostream &OS, const ObjCImplementationDecl &ID);
+
+/// ObjCCompatibleAliasDecl - Represents alias of a class. This alias is
+/// declared as @compatibility_alias alias class.
+class ObjCCompatibleAliasDecl : public NamedDecl {
+ virtual void anchor();
+ /// Class that this is an alias of.
+ ObjCInterfaceDecl *AliasedClass;
+
+ ObjCCompatibleAliasDecl(DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
+ ObjCInterfaceDecl* aliasedClass)
+ : NamedDecl(ObjCCompatibleAlias, DC, L, Id), AliasedClass(aliasedClass) {}
+public:
+ static ObjCCompatibleAliasDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L, IdentifierInfo *Id,
+ ObjCInterfaceDecl* aliasedClass);
+
+ static ObjCCompatibleAliasDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID);
+
+ const ObjCInterfaceDecl *getClassInterface() const { return AliasedClass; }
+ ObjCInterfaceDecl *getClassInterface() { return AliasedClass; }
+ void setClassInterface(ObjCInterfaceDecl *D) { AliasedClass = D; }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCCompatibleAliasDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCCompatibleAlias; }
+
+};
+
+/// ObjCPropertyDecl - Represents one property declaration in an interface.
+/// For example:
+/// @property (assign, readwrite) int MyProperty;
+///
+class ObjCPropertyDecl : public NamedDecl {
+ virtual void anchor();
+public:
+ enum PropertyAttributeKind {
+ OBJC_PR_noattr = 0x00,
+ OBJC_PR_readonly = 0x01,
+ OBJC_PR_getter = 0x02,
+ OBJC_PR_assign = 0x04,
+ OBJC_PR_readwrite = 0x08,
+ OBJC_PR_retain = 0x10,
+ OBJC_PR_copy = 0x20,
+ OBJC_PR_nonatomic = 0x40,
+ OBJC_PR_setter = 0x80,
+ OBJC_PR_atomic = 0x100,
+ OBJC_PR_weak = 0x200,
+ OBJC_PR_strong = 0x400,
+ OBJC_PR_unsafe_unretained = 0x800
+ // Adding a property should change NumPropertyAttrsBits
+ };
+
+ enum {
+ /// \brief Number of bits fitting all the property attributes.
+ NumPropertyAttrsBits = 12
+ };
+
+ enum SetterKind { Assign, Retain, Copy, Weak };
+ enum PropertyControl { None, Required, Optional };
+private:
+ SourceLocation AtLoc; // location of @property
+ SourceLocation LParenLoc; // location of '(' starting attribute list or null.
+ TypeSourceInfo *DeclType;
+ unsigned PropertyAttributes : NumPropertyAttrsBits;
+ unsigned PropertyAttributesAsWritten : NumPropertyAttrsBits;
+ // @required/@optional
+ unsigned PropertyImplementation : 2;
+
+ Selector GetterName; // getter name of NULL if no getter
+ Selector SetterName; // setter name of NULL if no setter
+
+ ObjCMethodDecl *GetterMethodDecl; // Declaration of getter instance method
+ ObjCMethodDecl *SetterMethodDecl; // Declaration of setter instance method
+ ObjCIvarDecl *PropertyIvarDecl; // Synthesize ivar for this property
+
+ ObjCPropertyDecl(DeclContext *DC, SourceLocation L, IdentifierInfo *Id,
+ SourceLocation AtLocation, SourceLocation LParenLocation,
+ TypeSourceInfo *T)
+ : NamedDecl(ObjCProperty, DC, L, Id), AtLoc(AtLocation),
+ LParenLoc(LParenLocation), DeclType(T),
+ PropertyAttributes(OBJC_PR_noattr),
+ PropertyAttributesAsWritten(OBJC_PR_noattr),
+ PropertyImplementation(None),
+ GetterName(Selector()),
+ SetterName(Selector()),
+ GetterMethodDecl(0), SetterMethodDecl(0) , PropertyIvarDecl(0) {}
+public:
+ static ObjCPropertyDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id, SourceLocation AtLocation,
+ SourceLocation LParenLocation,
+ TypeSourceInfo *T,
+ PropertyControl propControl = None);
+
+ static ObjCPropertyDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ SourceLocation getAtLoc() const { return AtLoc; }
+ void setAtLoc(SourceLocation L) { AtLoc = L; }
+
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ void setLParenLoc(SourceLocation L) { LParenLoc = L; }
+
+ TypeSourceInfo *getTypeSourceInfo() const { return DeclType; }
+ QualType getType() const { return DeclType->getType(); }
+ void setType(TypeSourceInfo *T) { DeclType = T; }
+
+ PropertyAttributeKind getPropertyAttributes() const {
+ return PropertyAttributeKind(PropertyAttributes);
+ }
+ void setPropertyAttributes(PropertyAttributeKind PRVal) {
+ PropertyAttributes |= PRVal;
+ }
+
+ PropertyAttributeKind getPropertyAttributesAsWritten() const {
+ return PropertyAttributeKind(PropertyAttributesAsWritten);
+ }
+
+ bool hasWrittenStorageAttribute() const {
+ return PropertyAttributesAsWritten & (OBJC_PR_assign | OBJC_PR_copy |
+ OBJC_PR_unsafe_unretained | OBJC_PR_retain | OBJC_PR_strong |
+ OBJC_PR_weak);
+ }
+
+ void setPropertyAttributesAsWritten(PropertyAttributeKind PRVal) {
+ PropertyAttributesAsWritten = PRVal;
+ }
+
+ void makeitReadWriteAttribute(void) {
+ PropertyAttributes &= ~OBJC_PR_readonly;
+ PropertyAttributes |= OBJC_PR_readwrite;
+ }
+
+ // Helper methods for accessing attributes.
+
+ /// isReadOnly - Return true iff the property has a setter.
+ bool isReadOnly() const {
+ return (PropertyAttributes & OBJC_PR_readonly);
+ }
+
+ /// isAtomic - Return true if the property is atomic.
+ bool isAtomic() const {
+ return (PropertyAttributes & OBJC_PR_atomic);
+ }
+
+ /// isRetaining - Return true if the property retains its value.
+ bool isRetaining() const {
+ return (PropertyAttributes &
+ (OBJC_PR_retain | OBJC_PR_strong | OBJC_PR_copy));
+ }
+
+ /// getSetterKind - Return the method used for doing assignment in
+ /// the property setter. This is only valid if the property has been
+ /// defined to have a setter.
+ SetterKind getSetterKind() const {
+ if (PropertyAttributes & OBJC_PR_strong)
+ return getType()->isBlockPointerType() ? Copy : Retain;
+ if (PropertyAttributes & OBJC_PR_retain)
+ return Retain;
+ if (PropertyAttributes & OBJC_PR_copy)
+ return Copy;
+ if (PropertyAttributes & OBJC_PR_weak)
+ return Weak;
+ return Assign;
+ }
+
+ Selector getGetterName() const { return GetterName; }
+ void setGetterName(Selector Sel) { GetterName = Sel; }
+
+ Selector getSetterName() const { return SetterName; }
+ void setSetterName(Selector Sel) { SetterName = Sel; }
+
+ ObjCMethodDecl *getGetterMethodDecl() const { return GetterMethodDecl; }
+ void setGetterMethodDecl(ObjCMethodDecl *gDecl) { GetterMethodDecl = gDecl; }
+
+ ObjCMethodDecl *getSetterMethodDecl() const { return SetterMethodDecl; }
+ void setSetterMethodDecl(ObjCMethodDecl *gDecl) { SetterMethodDecl = gDecl; }
+
+ // Related to @optional/@required declared in @protocol
+ void setPropertyImplementation(PropertyControl pc) {
+ PropertyImplementation = pc;
+ }
+ PropertyControl getPropertyImplementation() const {
+ return PropertyControl(PropertyImplementation);
+ }
+
+ void setPropertyIvarDecl(ObjCIvarDecl *Ivar) {
+ PropertyIvarDecl = Ivar;
+ }
+ ObjCIvarDecl *getPropertyIvarDecl() const {
+ return PropertyIvarDecl;
+ }
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtLoc, getLocation());
+ }
+
+ /// Lookup a property by name in the specified DeclContext.
+ static ObjCPropertyDecl *findPropertyDecl(const DeclContext *DC,
+ IdentifierInfo *propertyID);
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCPropertyDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ObjCProperty; }
+};
+
+/// ObjCPropertyImplDecl - Represents implementation declaration of a property
+/// in a class or category implementation block. For example:
+/// @synthesize prop1 = ivar1;
+///
+class ObjCPropertyImplDecl : public Decl {
+public:
+ enum Kind {
+ Synthesize,
+ Dynamic
+ };
+private:
+ SourceLocation AtLoc; // location of @synthesize or @dynamic
+
+ /// \brief For @synthesize, the location of the ivar, if it was written in
+ /// the source code.
+ ///
+ /// \code
+ /// @synthesize int a = b
+ /// \endcode
+ SourceLocation IvarLoc;
+
+ /// Property declaration being implemented
+ ObjCPropertyDecl *PropertyDecl;
+
+ /// Null for @dynamic. Required for @synthesize.
+ ObjCIvarDecl *PropertyIvarDecl;
+
+ /// Null for @dynamic. Non-null if property must be copy-constructed in getter
+ Expr *GetterCXXConstructor;
+
+ /// Null for @dynamic. Non-null if property has assignment operator to call
+ /// in Setter synthesis.
+ Expr *SetterCXXAssignment;
+
+ ObjCPropertyImplDecl(DeclContext *DC, SourceLocation atLoc, SourceLocation L,
+ ObjCPropertyDecl *property,
+ Kind PK,
+ ObjCIvarDecl *ivarDecl,
+ SourceLocation ivarLoc)
+ : Decl(ObjCPropertyImpl, DC, L), AtLoc(atLoc),
+ IvarLoc(ivarLoc), PropertyDecl(property), PropertyIvarDecl(ivarDecl),
+ GetterCXXConstructor(0), SetterCXXAssignment(0) {
+ assert (PK == Dynamic || PropertyIvarDecl);
+ }
+
+public:
+ static ObjCPropertyImplDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation atLoc, SourceLocation L,
+ ObjCPropertyDecl *property,
+ Kind PK,
+ ObjCIvarDecl *ivarDecl,
+ SourceLocation ivarLoc);
+
+ static ObjCPropertyImplDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY;
+
+ SourceLocation getLocStart() const LLVM_READONLY { return AtLoc; }
+ void setAtLoc(SourceLocation Loc) { AtLoc = Loc; }
+
+ ObjCPropertyDecl *getPropertyDecl() const {
+ return PropertyDecl;
+ }
+ void setPropertyDecl(ObjCPropertyDecl *Prop) { PropertyDecl = Prop; }
+
+ Kind getPropertyImplementation() const {
+ return PropertyIvarDecl ? Synthesize : Dynamic;
+ }
+
+ ObjCIvarDecl *getPropertyIvarDecl() const {
+ return PropertyIvarDecl;
+ }
+ SourceLocation getPropertyIvarDeclLoc() const { return IvarLoc; }
+
+ void setPropertyIvarDecl(ObjCIvarDecl *Ivar,
+ SourceLocation IvarLoc) {
+ PropertyIvarDecl = Ivar;
+ this->IvarLoc = IvarLoc;
+ }
+
+ Expr *getGetterCXXConstructor() const {
+ return GetterCXXConstructor;
+ }
+ void setGetterCXXConstructor(Expr *getterCXXConstructor) {
+ GetterCXXConstructor = getterCXXConstructor;
+ }
+
+ Expr *getSetterCXXAssignment() const {
+ return SetterCXXAssignment;
+ }
+ void setSetterCXXAssignment(Expr *setterCXXAssignment) {
+ SetterCXXAssignment = setterCXXAssignment;
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ObjCPropertyImplDecl *D) { return true; }
+ static bool classofKind(Decl::Kind K) { return K == ObjCPropertyImpl; }
+
+ friend class ASTDeclReader;
+};
+
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
new file mode 100644
index 0000000..36549ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclTemplate.h
@@ -0,0 +1,2106 @@
+//===-- DeclTemplate.h - Classes for representing C++ templates -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C++ template declaration subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DECLTEMPLATE_H
+#define LLVM_CLANG_AST_DECLTEMPLATE_H
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Redeclarable.h"
+#include "clang/AST/TemplateBase.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/Support/Compiler.h"
+#include <limits>
+
+namespace clang {
+
+class TemplateParameterList;
+class TemplateDecl;
+class RedeclarableTemplateDecl;
+class FunctionTemplateDecl;
+class ClassTemplateDecl;
+class ClassTemplatePartialSpecializationDecl;
+class TemplateTypeParmDecl;
+class NonTypeTemplateParmDecl;
+class TemplateTemplateParmDecl;
+class TypeAliasTemplateDecl;
+
+/// \brief Stores a template parameter of any kind.
+typedef llvm::PointerUnion3<TemplateTypeParmDecl*, NonTypeTemplateParmDecl*,
+ TemplateTemplateParmDecl*> TemplateParameter;
+
+/// TemplateParameterList - Stores a list of template parameters for a
+/// TemplateDecl and its derived classes.
+class TemplateParameterList {
+ /// The location of the 'template' keyword.
+ SourceLocation TemplateLoc;
+
+ /// The locations of the '<' and '>' angle brackets.
+ SourceLocation LAngleLoc, RAngleLoc;
+
+ /// The number of template parameters in this template
+ /// parameter list.
+ unsigned NumParams;
+
+protected:
+ TemplateParameterList(SourceLocation TemplateLoc, SourceLocation LAngleLoc,
+ NamedDecl **Params, unsigned NumParams,
+ SourceLocation RAngleLoc);
+
+public:
+ static TemplateParameterList *Create(const ASTContext &C,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ NamedDecl **Params,
+ unsigned NumParams,
+ SourceLocation RAngleLoc);
+
+ /// iterator - Iterates through the template parameters in this list.
+ typedef NamedDecl** iterator;
+
+ /// const_iterator - Iterates through the template parameters in this list.
+ typedef NamedDecl* const* const_iterator;
+
+ iterator begin() { return reinterpret_cast<NamedDecl **>(this + 1); }
+ const_iterator begin() const {
+ return reinterpret_cast<NamedDecl * const *>(this + 1);
+ }
+ iterator end() { return begin() + NumParams; }
+ const_iterator end() const { return begin() + NumParams; }
+
+ unsigned size() const { return NumParams; }
+
+ NamedDecl* getParam(unsigned Idx) {
+ assert(Idx < size() && "Template parameter index out-of-range");
+ return begin()[Idx];
+ }
+
+ const NamedDecl* getParam(unsigned Idx) const {
+ assert(Idx < size() && "Template parameter index out-of-range");
+ return begin()[Idx];
+ }
+
+ /// \brief Returns the minimum number of arguments needed to form a
+ /// template specialization. This may be fewer than the number of
+ /// template parameters, if some of the parameters have default
+ /// arguments or if there is a parameter pack.
+ unsigned getMinRequiredArguments() const;
+
+ /// \brief Get the depth of this template parameter list in the set of
+ /// template parameter lists.
+ ///
+ /// The first template parameter list in a declaration will have depth 0,
+ /// the second template parameter list will have depth 1, etc.
+ unsigned getDepth() const;
+
+ SourceLocation getTemplateLoc() const { return TemplateLoc; }
+ SourceLocation getLAngleLoc() const { return LAngleLoc; }
+ SourceLocation getRAngleLoc() const { return RAngleLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(TemplateLoc, RAngleLoc);
+ }
+};
+
+/// FixedSizeTemplateParameterList - Stores a list of template parameters for a
+/// TemplateDecl and its derived classes. Suitable for creating on the stack.
+template<size_t N>
+class FixedSizeTemplateParameterList : public TemplateParameterList {
+ NamedDecl *Params[N];
+
+public:
+ FixedSizeTemplateParameterList(SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ NamedDecl **Params, SourceLocation RAngleLoc) :
+ TemplateParameterList(TemplateLoc, LAngleLoc, Params, N, RAngleLoc) {
+ }
+};
+
+/// \brief A template argument list.
+class TemplateArgumentList {
+ /// \brief The template argument list.
+ ///
+ /// The integer value will be non-zero to indicate that this
+ /// template argument list does own the pointer.
+ llvm::PointerIntPair<const TemplateArgument *, 1> Arguments;
+
+ /// \brief The number of template arguments in this template
+ /// argument list.
+ unsigned NumArguments;
+
+ TemplateArgumentList(const TemplateArgumentList &Other); // DO NOT IMPL
+ void operator=(const TemplateArgumentList &Other); // DO NOT IMPL
+
+ TemplateArgumentList(const TemplateArgument *Args, unsigned NumArgs,
+ bool Owned)
+ : Arguments(Args, Owned), NumArguments(NumArgs) { }
+
+public:
+ /// \brief Type used to indicate that the template argument list itself is a
+ /// stack object. It does not own its template arguments.
+ enum OnStackType { OnStack };
+
+ /// \brief Create a new template argument list that copies the given set of
+ /// template arguments.
+ static TemplateArgumentList *CreateCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs);
+
+ /// \brief Construct a new, temporary template argument list on the stack.
+ ///
+ /// The template argument list does not own the template arguments
+ /// provided.
+ explicit TemplateArgumentList(OnStackType,
+ const TemplateArgument *Args, unsigned NumArgs)
+ : Arguments(Args, false), NumArguments(NumArgs) { }
+
+ /// \brief Produces a shallow copy of the given template argument list.
+ ///
+ /// This operation assumes that the input argument list outlives it.
+ /// This takes the list as a pointer to avoid looking like a copy
+ /// constructor, since this really really isn't safe to use that
+ /// way.
+ explicit TemplateArgumentList(const TemplateArgumentList *Other)
+ : Arguments(Other->data(), false), NumArguments(Other->size()) { }
+
+ /// \brief Retrieve the template argument at a given index.
+ const TemplateArgument &get(unsigned Idx) const {
+ assert(Idx < NumArguments && "Invalid template argument index");
+ return data()[Idx];
+ }
+
+ /// \brief Retrieve the template argument at a given index.
+ const TemplateArgument &operator[](unsigned Idx) const { return get(Idx); }
+
+ /// \brief Retrieve the number of template arguments in this
+ /// template argument list.
+ unsigned size() const { return NumArguments; }
+
+ /// \brief Retrieve a pointer to the template argument list.
+ const TemplateArgument *data() const {
+ return Arguments.getPointer();
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Kinds of Templates
+//===----------------------------------------------------------------------===//
+
+/// TemplateDecl - The base class of all kinds of template declarations (e.g.,
+/// class, function, etc.). The TemplateDecl class stores the list of template
+/// parameters and a reference to the templated scoped declaration: the
+/// underlying AST node.
+class TemplateDecl : public NamedDecl {
+ virtual void anchor();
+protected:
+ // This is probably never used.
+ TemplateDecl(Kind DK, DeclContext *DC, SourceLocation L,
+ DeclarationName Name)
+ : NamedDecl(DK, DC, L, Name), TemplatedDecl(0), TemplateParams(0) { }
+
+ // Construct a template decl with the given name and parameters.
+ // Used when there is not templated element (tt-params, alias?).
+ TemplateDecl(Kind DK, DeclContext *DC, SourceLocation L,
+ DeclarationName Name, TemplateParameterList *Params)
+ : NamedDecl(DK, DC, L, Name), TemplatedDecl(0), TemplateParams(Params) { }
+
+ // Construct a template decl with name, parameters, and templated element.
+ TemplateDecl(Kind DK, DeclContext *DC, SourceLocation L,
+ DeclarationName Name, TemplateParameterList *Params,
+ NamedDecl *Decl)
+ : NamedDecl(DK, DC, L, Name), TemplatedDecl(Decl),
+ TemplateParams(Params) { }
+public:
+ /// Get the list of template parameters
+ TemplateParameterList *getTemplateParameters() const {
+ return TemplateParams;
+ }
+
+ /// Get the underlying, templated declaration.
+ NamedDecl *getTemplatedDecl() const { return TemplatedDecl; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TemplateDecl *D) { return true; }
+ static bool classof(const RedeclarableTemplateDecl *D) { return true; }
+ static bool classof(const FunctionTemplateDecl *D) { return true; }
+ static bool classof(const ClassTemplateDecl *D) { return true; }
+ static bool classof(const TemplateTemplateParmDecl *D) { return true; }
+ static bool classof(const TypeAliasTemplateDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstTemplate && K <= lastTemplate;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(TemplateParams->getTemplateLoc(),
+ TemplatedDecl->getSourceRange().getEnd());
+ }
+
+protected:
+ NamedDecl *TemplatedDecl;
+ TemplateParameterList* TemplateParams;
+
+public:
+ /// \brief Initialize the underlying templated declaration and
+ /// template parameters.
+ void init(NamedDecl *templatedDecl, TemplateParameterList* templateParams) {
+ assert(TemplatedDecl == 0 && "TemplatedDecl already set!");
+ assert(TemplateParams == 0 && "TemplateParams already set!");
+ TemplatedDecl = templatedDecl;
+ TemplateParams = templateParams;
+ }
+};
+
+/// \brief Provides information about a function template specialization,
+/// which is a FunctionDecl that has been explicitly specialization or
+/// instantiated from a function template.
+class FunctionTemplateSpecializationInfo : public llvm::FoldingSetNode {
+ FunctionTemplateSpecializationInfo(FunctionDecl *FD,
+ FunctionTemplateDecl *Template,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentList *TemplateArgs,
+ const ASTTemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation POI)
+ : Function(FD),
+ Template(Template, TSK - 1),
+ TemplateArguments(TemplateArgs),
+ TemplateArgumentsAsWritten(TemplateArgsAsWritten),
+ PointOfInstantiation(POI) { }
+
+public:
+ static FunctionTemplateSpecializationInfo *
+ Create(ASTContext &C, FunctionDecl *FD, FunctionTemplateDecl *Template,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentList *TemplateArgs,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation POI);
+
+ /// \brief The function template specialization that this structure
+ /// describes.
+ FunctionDecl *Function;
+
+ /// \brief The function template from which this function template
+ /// specialization was generated.
+ ///
+ /// The two bits are contain the top 4 values of TemplateSpecializationKind.
+ llvm::PointerIntPair<FunctionTemplateDecl *, 2> Template;
+
+ /// \brief The template arguments used to produce the function template
+ /// specialization from the function template.
+ const TemplateArgumentList *TemplateArguments;
+
+ /// \brief The template arguments as written in the sources, if provided.
+ const ASTTemplateArgumentListInfo *TemplateArgumentsAsWritten;
+
+ /// \brief The point at which this function template specialization was
+ /// first instantiated.
+ SourceLocation PointOfInstantiation;
+
+ /// \brief Retrieve the template from which this function was specialized.
+ FunctionTemplateDecl *getTemplate() const { return Template.getPointer(); }
+
+ /// \brief Determine what kind of template specialization this is.
+ TemplateSpecializationKind getTemplateSpecializationKind() const {
+ return (TemplateSpecializationKind)(Template.getInt() + 1);
+ }
+
+ bool isExplicitSpecialization() const {
+ return getTemplateSpecializationKind() == TSK_ExplicitSpecialization;
+ }
+
+ /// \brief Set the template specialization kind.
+ void setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
+ assert(TSK != TSK_Undeclared &&
+ "Cannot encode TSK_Undeclared for a function template specialization");
+ Template.setInt(TSK - 1);
+ }
+
+ /// \brief Retrieve the first point of instantiation of this function
+ /// template specialization.
+ ///
+ /// The point of instantiation may be an invalid source location if this
+ /// function has yet to be instantiated.
+ SourceLocation getPointOfInstantiation() const {
+ return PointOfInstantiation;
+ }
+
+ /// \brief Set the (first) point of instantiation of this function template
+ /// specialization.
+ void setPointOfInstantiation(SourceLocation POI) {
+ PointOfInstantiation = POI;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, TemplateArguments->data(),
+ TemplateArguments->size(),
+ Function->getASTContext());
+ }
+
+ static void
+ Profile(llvm::FoldingSetNodeID &ID, const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs, ASTContext &Context) {
+ ID.AddInteger(NumTemplateArgs);
+ for (unsigned Arg = 0; Arg != NumTemplateArgs; ++Arg)
+ TemplateArgs[Arg].Profile(ID, Context);
+ }
+};
+
+/// \brief Provides information a specialization of a member of a class
+/// template, which may be a member function, static data member,
+/// member class or member enumeration.
+class MemberSpecializationInfo {
+ // The member declaration from which this member was instantiated, and the
+ // manner in which the instantiation occurred (in the lower two bits).
+ llvm::PointerIntPair<NamedDecl *, 2> MemberAndTSK;
+
+ // The point at which this member was first instantiated.
+ SourceLocation PointOfInstantiation;
+
+public:
+ explicit
+ MemberSpecializationInfo(NamedDecl *IF, TemplateSpecializationKind TSK,
+ SourceLocation POI = SourceLocation())
+ : MemberAndTSK(IF, TSK - 1), PointOfInstantiation(POI) {
+ assert(TSK != TSK_Undeclared &&
+ "Cannot encode undeclared template specializations for members");
+ }
+
+ /// \brief Retrieve the member declaration from which this member was
+ /// instantiated.
+ NamedDecl *getInstantiatedFrom() const { return MemberAndTSK.getPointer(); }
+
+ /// \brief Determine what kind of template specialization this is.
+ TemplateSpecializationKind getTemplateSpecializationKind() const {
+ return (TemplateSpecializationKind)(MemberAndTSK.getInt() + 1);
+ }
+
+ /// \brief Set the template specialization kind.
+ void setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
+ assert(TSK != TSK_Undeclared &&
+ "Cannot encode undeclared template specializations for members");
+ MemberAndTSK.setInt(TSK - 1);
+ }
+
+ /// \brief Retrieve the first point of instantiation of this member.
+ /// If the point of instantiation is an invalid location, then this member
+ /// has not yet been instantiated.
+ SourceLocation getPointOfInstantiation() const {
+ return PointOfInstantiation;
+ }
+
+ /// \brief Set the first point of instantiation.
+ void setPointOfInstantiation(SourceLocation POI) {
+ PointOfInstantiation = POI;
+ }
+};
+
+/// \brief Provides information about a dependent function-template
+/// specialization declaration. Since explicit function template
+/// specialization and instantiation declarations can only appear in
+/// namespace scope, and you can only specialize a member of a
+/// fully-specialized class, the only way to get one of these is in
+/// a friend declaration like the following:
+///
+/// template <class T> void foo(T);
+/// template <class T> class A {
+/// friend void foo<>(T);
+/// };
+class DependentFunctionTemplateSpecializationInfo {
+ union {
+ // Force sizeof to be a multiple of sizeof(void*) so that the
+ // trailing data is aligned.
+ void *Aligner;
+
+ struct {
+ /// The number of potential template candidates.
+ unsigned NumTemplates;
+
+ /// The number of template arguments.
+ unsigned NumArgs;
+ } d;
+ };
+
+ /// The locations of the left and right angle brackets.
+ SourceRange AngleLocs;
+
+ FunctionTemplateDecl * const *getTemplates() const {
+ return reinterpret_cast<FunctionTemplateDecl*const*>(this+1);
+ }
+
+public:
+ DependentFunctionTemplateSpecializationInfo(
+ const UnresolvedSetImpl &Templates,
+ const TemplateArgumentListInfo &TemplateArgs);
+
+ /// \brief Returns the number of function templates that this might
+ /// be a specialization of.
+ unsigned getNumTemplates() const {
+ return d.NumTemplates;
+ }
+
+ /// \brief Returns the i'th template candidate.
+ FunctionTemplateDecl *getTemplate(unsigned I) const {
+ assert(I < getNumTemplates() && "template index out of range");
+ return getTemplates()[I];
+ }
+
+ /// \brief Returns the explicit template arguments that were given.
+ const TemplateArgumentLoc *getTemplateArgs() const {
+ return reinterpret_cast<const TemplateArgumentLoc*>(
+ &getTemplates()[getNumTemplates()]);
+ }
+
+ /// \brief Returns the number of explicit template arguments that were given.
+ unsigned getNumTemplateArgs() const {
+ return d.NumArgs;
+ }
+
+ /// \brief Returns the nth template argument.
+ const TemplateArgumentLoc &getTemplateArg(unsigned I) const {
+ assert(I < getNumTemplateArgs() && "template arg index out of range");
+ return getTemplateArgs()[I];
+ }
+
+ SourceLocation getLAngleLoc() const {
+ return AngleLocs.getBegin();
+ }
+
+ SourceLocation getRAngleLoc() const {
+ return AngleLocs.getEnd();
+ }
+};
+
+/// Declaration of a redeclarable template.
+class RedeclarableTemplateDecl : public TemplateDecl,
+ public Redeclarable<RedeclarableTemplateDecl>
+{
+ typedef Redeclarable<RedeclarableTemplateDecl> redeclarable_base;
+ virtual RedeclarableTemplateDecl *getNextRedeclaration() {
+ return RedeclLink.getNext();
+ }
+ virtual RedeclarableTemplateDecl *getPreviousDeclImpl() {
+ return getPreviousDecl();
+ }
+ virtual RedeclarableTemplateDecl *getMostRecentDeclImpl() {
+ return getMostRecentDecl();
+ }
+
+protected:
+ template <typename EntryType> struct SpecEntryTraits {
+ typedef EntryType DeclType;
+
+ static DeclType *getMostRecentDecl(EntryType *D) {
+ return D->getMostRecentDecl();
+ }
+ };
+
+ template <typename EntryType,
+ typename _SETraits = SpecEntryTraits<EntryType>,
+ typename _DeclType = typename _SETraits::DeclType>
+ class SpecIterator : public std::iterator<std::forward_iterator_tag,
+ _DeclType*, ptrdiff_t,
+ _DeclType*, _DeclType*> {
+ typedef _SETraits SETraits;
+ typedef _DeclType DeclType;
+
+ typedef typename llvm::FoldingSet<EntryType>::iterator SetIteratorType;
+
+ SetIteratorType SetIter;
+
+ public:
+ SpecIterator() : SetIter() {}
+ SpecIterator(SetIteratorType SetIter) : SetIter(SetIter) {}
+
+ DeclType *operator*() const {
+ return SETraits::getMostRecentDecl(&*SetIter);
+ }
+ DeclType *operator->() const { return **this; }
+
+ SpecIterator &operator++() { ++SetIter; return *this; }
+ SpecIterator operator++(int) {
+ SpecIterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ bool operator==(SpecIterator Other) const {
+ return SetIter == Other.SetIter;
+ }
+ bool operator!=(SpecIterator Other) const {
+ return SetIter != Other.SetIter;
+ }
+ };
+
+ template <typename EntryType>
+ SpecIterator<EntryType> makeSpecIterator(llvm::FoldingSet<EntryType> &Specs,
+ bool isEnd) {
+ return SpecIterator<EntryType>(isEnd ? Specs.end() : Specs.begin());
+ }
+
+ template <class EntryType> typename SpecEntryTraits<EntryType>::DeclType*
+ findSpecializationImpl(llvm::FoldingSet<EntryType> &Specs,
+ const TemplateArgument *Args, unsigned NumArgs,
+ void *&InsertPos);
+
+ struct CommonBase {
+ CommonBase() : InstantiatedFromMember(0, false) { }
+
+ /// \brief The template from which this was most
+ /// directly instantiated (or null).
+ ///
+ /// The boolean value indicates whether this template
+ /// was explicitly specialized.
+ llvm::PointerIntPair<RedeclarableTemplateDecl*, 1, bool>
+ InstantiatedFromMember;
+ };
+
+ /// \brief Pointer to the common data shared by all declarations of this
+ /// template.
+ CommonBase *Common;
+
+ /// \brief Retrieves the "common" pointer shared by all (re-)declarations of
+ /// the same template. Calling this routine may implicitly allocate memory
+ /// for the common pointer.
+ CommonBase *getCommonPtr();
+
+ virtual CommonBase *newCommon(ASTContext &C) = 0;
+
+ // Construct a template decl with name, parameters, and templated element.
+ RedeclarableTemplateDecl(Kind DK, DeclContext *DC, SourceLocation L,
+ DeclarationName Name, TemplateParameterList *Params,
+ NamedDecl *Decl)
+ : TemplateDecl(DK, DC, L, Name, Params, Decl), Common() { }
+
+public:
+ template <class decl_type> friend class RedeclarableTemplate;
+
+ /// Retrieves the canonical declaration of this template.
+ RedeclarableTemplateDecl *getCanonicalDecl() { return getFirstDeclaration(); }
+ const RedeclarableTemplateDecl *getCanonicalDecl() const {
+ return getFirstDeclaration();
+ }
+
+ /// \brief Determines whether this template was a specialization of a
+ /// member template.
+ ///
+ /// In the following example, the function template \c X<int>::f and the
+ /// member template \c X<int>::Inner are member specializations.
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct X {
+ /// template<typename U> void f(T, U);
+ /// template<typename U> struct Inner;
+ /// };
+ ///
+ /// template<> template<typename T>
+ /// void X<int>::f(int, T);
+ /// template<> template<typename T>
+ /// struct X<int>::Inner { /* ... */ };
+ /// \endcode
+ bool isMemberSpecialization() {
+ return getCommonPtr()->InstantiatedFromMember.getInt();
+ }
+
+ /// \brief Note that this member template is a specialization.
+ void setMemberSpecialization() {
+ assert(getCommonPtr()->InstantiatedFromMember.getPointer() &&
+ "Only member templates can be member template specializations");
+ getCommonPtr()->InstantiatedFromMember.setInt(true);
+ }
+
+ /// \brief Retrieve the member template from which this template was
+ /// instantiated, or NULL if this template was not instantiated from a
+ /// member template.
+ ///
+ /// A template is instantiated from a member template when the member
+ /// template itself is part of a class template (or member thereof). For
+ /// example, given
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct X {
+ /// template<typename U> void f(T, U);
+ /// };
+ ///
+ /// void test(X<int> x) {
+ /// x.f(1, 'a');
+ /// };
+ /// \endcode
+ ///
+ /// \c X<int>::f is a FunctionTemplateDecl that describes the function
+ /// template
+ ///
+ /// \code
+ /// template<typename U> void X<int>::f(int, U);
+ /// \endcode
+ ///
+ /// which was itself created during the instantiation of \c X<int>. Calling
+ /// getInstantiatedFromMemberTemplate() on this FunctionTemplateDecl will
+ /// retrieve the FunctionTemplateDecl for the original template "f" within
+ /// the class template \c X<T>, i.e.,
+ ///
+ /// \code
+ /// template<typename T>
+ /// template<typename U>
+ /// void X<T>::f(T, U);
+ /// \endcode
+ RedeclarableTemplateDecl *getInstantiatedFromMemberTemplate() {
+ return getCommonPtr()->InstantiatedFromMember.getPointer();
+ }
+
+ void setInstantiatedFromMemberTemplate(RedeclarableTemplateDecl *TD) {
+ assert(!getCommonPtr()->InstantiatedFromMember.getPointer());
+ getCommonPtr()->InstantiatedFromMember.setPointer(TD);
+ }
+
+ typedef redeclarable_base::redecl_iterator redecl_iterator;
+ using redeclarable_base::redecls_begin;
+ using redeclarable_base::redecls_end;
+ using redeclarable_base::getPreviousDecl;
+ using redeclarable_base::getMostRecentDecl;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const RedeclarableTemplateDecl *D) { return true; }
+ static bool classof(const FunctionTemplateDecl *D) { return true; }
+ static bool classof(const ClassTemplateDecl *D) { return true; }
+ static bool classof(const TypeAliasTemplateDecl *D) { return true; }
+ static bool classofKind(Kind K) {
+ return K >= firstRedeclarableTemplate && K <= lastRedeclarableTemplate;
+ }
+
+ friend class ASTReader;
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+template <> struct RedeclarableTemplateDecl::
+SpecEntryTraits<FunctionTemplateSpecializationInfo> {
+ typedef FunctionDecl DeclType;
+
+ static DeclType *
+ getMostRecentDecl(FunctionTemplateSpecializationInfo *I) {
+ return I->Function->getMostRecentDecl();
+ }
+};
+
+/// Declaration of a template function.
+class FunctionTemplateDecl : public RedeclarableTemplateDecl {
+ static void DeallocateCommon(void *Ptr);
+
+protected:
+ /// \brief Data that is common to all of the declarations of a given
+ /// function template.
+ struct Common : CommonBase {
+ Common() : InjectedArgs(0) { }
+
+ /// \brief The function template specializations for this function
+ /// template, including explicit specializations and instantiations.
+ llvm::FoldingSet<FunctionTemplateSpecializationInfo> Specializations;
+
+ /// \brief The set of "injected" template arguments used within this
+ /// function template.
+ ///
+ /// This pointer refers to the template arguments (there are as
+ /// many template arguments as template parameaters) for the function
+ /// template, and is allocated lazily, since most function templates do not
+ /// require the use of this information.
+ TemplateArgument *InjectedArgs;
+ };
+
+ FunctionTemplateDecl(DeclContext *DC, SourceLocation L, DeclarationName Name,
+ TemplateParameterList *Params, NamedDecl *Decl)
+ : RedeclarableTemplateDecl(FunctionTemplate, DC, L, Name, Params, Decl) { }
+
+ CommonBase *newCommon(ASTContext &C);
+
+ Common *getCommonPtr() {
+ return static_cast<Common *>(RedeclarableTemplateDecl::getCommonPtr());
+ }
+
+ friend class FunctionDecl;
+
+ /// \brief Retrieve the set of function template specializations of this
+ /// function template.
+ llvm::FoldingSet<FunctionTemplateSpecializationInfo> &getSpecializations() {
+ return getCommonPtr()->Specializations;
+ }
+
+ /// \brief Add a specialization of this function template.
+ ///
+ /// \param InsertPos Insert position in the FoldingSet, must have been
+ /// retrieved by an earlier call to findSpecialization().
+ void addSpecialization(FunctionTemplateSpecializationInfo* Info,
+ void *InsertPos);
+
+public:
+ /// Get the underlying function declaration of the template.
+ FunctionDecl *getTemplatedDecl() const {
+ return static_cast<FunctionDecl*>(TemplatedDecl);
+ }
+
+ /// Returns whether this template declaration defines the primary
+ /// pattern.
+ bool isThisDeclarationADefinition() const {
+ return getTemplatedDecl()->isThisDeclarationADefinition();
+ }
+
+ /// \brief Return the specialization with the provided arguments if it exists,
+ /// otherwise return the insertion point.
+ FunctionDecl *findSpecialization(const TemplateArgument *Args,
+ unsigned NumArgs, void *&InsertPos);
+
+ FunctionTemplateDecl *getCanonicalDecl() {
+ return cast<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
+ }
+ const FunctionTemplateDecl *getCanonicalDecl() const {
+ return cast<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
+ }
+
+ /// \brief Retrieve the previous declaration of this function template, or
+ /// NULL if no such declaration exists.
+ FunctionTemplateDecl *getPreviousDecl() {
+ return cast_or_null<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
+ }
+
+ /// \brief Retrieve the previous declaration of this function template, or
+ /// NULL if no such declaration exists.
+ const FunctionTemplateDecl *getPreviousDecl() const {
+ return cast_or_null<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
+ }
+
+ FunctionTemplateDecl *getInstantiatedFromMemberTemplate() {
+ return cast_or_null<FunctionTemplateDecl>(
+ RedeclarableTemplateDecl::getInstantiatedFromMemberTemplate());
+ }
+
+ typedef SpecIterator<FunctionTemplateSpecializationInfo> spec_iterator;
+
+ spec_iterator spec_begin() {
+ return makeSpecIterator(getSpecializations(), false);
+ }
+
+ spec_iterator spec_end() {
+ return makeSpecIterator(getSpecializations(), true);
+ }
+
+ /// \brief Retrieve the "injected" template arguments that correspond to the
+ /// template parameters of this function template.
+ ///
+ /// Although the C++ standard has no notion of the "injected" template
+ /// arguments for a function template, the notion is convenient when
+ /// we need to perform substitutions inside the definition of a function
+ /// template.
+ std::pair<const TemplateArgument *, unsigned> getInjectedTemplateArgs();
+
+ /// \brief Create a function template node.
+ static FunctionTemplateDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl);
+
+ /// \brief Create an empty function template node.
+ static FunctionTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ // Implement isa/cast/dyncast support
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const FunctionTemplateDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == FunctionTemplate; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+//===----------------------------------------------------------------------===//
+// Kinds of Template Parameters
+//===----------------------------------------------------------------------===//
+
+/// The TemplateParmPosition class defines the position of a template parameter
+/// within a template parameter list. Because template parameter can be listed
+/// sequentially for out-of-line template members, each template parameter is
+/// given a Depth - the nesting of template parameter scopes - and a Position -
+/// the occurrence within the parameter list.
+/// This class is inheritedly privately by different kinds of template
+/// parameters and is not part of the Decl hierarchy. Just a facility.
+class TemplateParmPosition {
+protected:
+ // FIXME: This should probably never be called, but it's here as
+ TemplateParmPosition()
+ : Depth(0), Position(0)
+ { /* llvm_unreachable("Cannot create positionless template parameter"); */ }
+
+ TemplateParmPosition(unsigned D, unsigned P)
+ : Depth(D), Position(P)
+ { }
+
+ // FIXME: These probably don't need to be ints. int:5 for depth, int:8 for
+ // position? Maybe?
+ unsigned Depth;
+ unsigned Position;
+
+public:
+ /// Get the nesting depth of the template parameter.
+ unsigned getDepth() const { return Depth; }
+ void setDepth(unsigned D) { Depth = D; }
+
+ /// Get the position of the template parameter within its parameter list.
+ unsigned getPosition() const { return Position; }
+ void setPosition(unsigned P) { Position = P; }
+
+ /// Get the index of the template parameter within its parameter list.
+ unsigned getIndex() const { return Position; }
+};
+
+/// TemplateTypeParmDecl - Declaration of a template type parameter,
+/// e.g., "T" in
+/// @code
+/// template<typename T> class vector;
+/// @endcode
+class TemplateTypeParmDecl : public TypeDecl {
+ /// \brief Whether this template type parameter was declaration with
+ /// the 'typename' keyword. If false, it was declared with the
+ /// 'class' keyword.
+ bool Typename : 1;
+
+ /// \brief Whether this template type parameter inherited its
+ /// default argument.
+ bool InheritedDefault : 1;
+
+ /// \brief The default template argument, if any.
+ TypeSourceInfo *DefaultArgument;
+
+ TemplateTypeParmDecl(DeclContext *DC, SourceLocation KeyLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ bool Typename)
+ : TypeDecl(TemplateTypeParm, DC, IdLoc, Id, KeyLoc), Typename(Typename),
+ InheritedDefault(false), DefaultArgument() { }
+
+ /// Sema creates these on the stack during auto type deduction.
+ friend class Sema;
+
+public:
+ static TemplateTypeParmDecl *Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation KeyLoc,
+ SourceLocation NameLoc,
+ unsigned D, unsigned P,
+ IdentifierInfo *Id, bool Typename,
+ bool ParameterPack);
+ static TemplateTypeParmDecl *CreateDeserialized(const ASTContext &C,
+ unsigned ID);
+
+ /// \brief Whether this template type parameter was declared with
+ /// the 'typename' keyword. If not, it was declared with the 'class'
+ /// keyword.
+ bool wasDeclaredWithTypename() const { return Typename; }
+
+ /// \brief Determine whether this template parameter has a default
+ /// argument.
+ bool hasDefaultArgument() const { return DefaultArgument != 0; }
+
+ /// \brief Retrieve the default argument, if any.
+ QualType getDefaultArgument() const { return DefaultArgument->getType(); }
+
+ /// \brief Retrieves the default argument's source information, if any.
+ TypeSourceInfo *getDefaultArgumentInfo() const { return DefaultArgument; }
+
+ /// \brief Retrieves the location of the default argument declaration.
+ SourceLocation getDefaultArgumentLoc() const;
+
+ /// \brief Determines whether the default argument was inherited
+ /// from a previous declaration of this template.
+ bool defaultArgumentWasInherited() const { return InheritedDefault; }
+
+ /// \brief Set the default argument for this template parameter, and
+ /// whether that default argument was inherited from another
+ /// declaration.
+ void setDefaultArgument(TypeSourceInfo *DefArg, bool Inherited) {
+ DefaultArgument = DefArg;
+ InheritedDefault = Inherited;
+ }
+
+ /// \brief Removes the default argument of this template parameter.
+ void removeDefaultArgument() {
+ DefaultArgument = 0;
+ InheritedDefault = false;
+ }
+
+ /// \brief Set whether this template type parameter was declared with
+ /// the 'typename' or 'class' keyword.
+ void setDeclaredWithTypename(bool withTypename) { Typename = withTypename; }
+
+ /// \brief Retrieve the depth of the template parameter.
+ unsigned getDepth() const;
+
+ /// \brief Retrieve the index of the template parameter.
+ unsigned getIndex() const;
+
+ /// \brief Returns whether this is a parameter pack.
+ bool isParameterPack() const;
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TemplateTypeParmDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == TemplateTypeParm; }
+};
+
+/// NonTypeTemplateParmDecl - Declares a non-type template parameter,
+/// e.g., "Size" in
+/// @code
+/// template<int Size> class array { };
+/// @endcode
+class NonTypeTemplateParmDecl
+ : public DeclaratorDecl, protected TemplateParmPosition {
+ /// \brief The default template argument, if any, and whether or not
+ /// it was inherited.
+ llvm::PointerIntPair<Expr*, 1, bool> DefaultArgumentAndInherited;
+
+ // FIXME: Collapse this into TemplateParamPosition; or, just move depth/index
+ // down here to save memory.
+
+ /// \brief Whether this non-type template parameter is a parameter pack.
+ bool ParameterPack;
+
+ /// \brief Whether this non-type template parameter is an "expanded"
+ /// parameter pack, meaning that its type is a pack expansion and we
+ /// already know the set of types that expansion expands to.
+ bool ExpandedParameterPack;
+
+ /// \brief The number of types in an expanded parameter pack.
+ unsigned NumExpandedTypes;
+
+ NonTypeTemplateParmDecl(DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, unsigned D, unsigned P,
+ IdentifierInfo *Id, QualType T,
+ bool ParameterPack, TypeSourceInfo *TInfo)
+ : DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc),
+ TemplateParmPosition(D, P), DefaultArgumentAndInherited(0, false),
+ ParameterPack(ParameterPack), ExpandedParameterPack(false),
+ NumExpandedTypes(0)
+ { }
+
+ NonTypeTemplateParmDecl(DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, unsigned D, unsigned P,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos);
+
+ friend class ASTDeclReader;
+
+public:
+ static NonTypeTemplateParmDecl *
+ Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, unsigned D, unsigned P, IdentifierInfo *Id,
+ QualType T, bool ParameterPack, TypeSourceInfo *TInfo);
+
+ static NonTypeTemplateParmDecl *
+ Create(const ASTContext &C, DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, unsigned D, unsigned P, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes, unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos);
+
+ static NonTypeTemplateParmDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID);
+ static NonTypeTemplateParmDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID,
+ unsigned NumExpandedTypes);
+
+ using TemplateParmPosition::getDepth;
+ using TemplateParmPosition::setDepth;
+ using TemplateParmPosition::getPosition;
+ using TemplateParmPosition::setPosition;
+ using TemplateParmPosition::getIndex;
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ /// \brief Determine whether this template parameter has a default
+ /// argument.
+ bool hasDefaultArgument() const {
+ return DefaultArgumentAndInherited.getPointer() != 0;
+ }
+
+ /// \brief Retrieve the default argument, if any.
+ Expr *getDefaultArgument() const {
+ return DefaultArgumentAndInherited.getPointer();
+ }
+
+ /// \brief Retrieve the location of the default argument, if any.
+ SourceLocation getDefaultArgumentLoc() const;
+
+ /// \brief Determines whether the default argument was inherited
+ /// from a previous declaration of this template.
+ bool defaultArgumentWasInherited() const {
+ return DefaultArgumentAndInherited.getInt();
+ }
+
+ /// \brief Set the default argument for this template parameter, and
+ /// whether that default argument was inherited from another
+ /// declaration.
+ void setDefaultArgument(Expr *DefArg, bool Inherited) {
+ DefaultArgumentAndInherited.setPointer(DefArg);
+ DefaultArgumentAndInherited.setInt(Inherited);
+ }
+
+ /// \brief Removes the default argument of this template parameter.
+ void removeDefaultArgument() {
+ DefaultArgumentAndInherited.setPointer(0);
+ DefaultArgumentAndInherited.setInt(false);
+ }
+
+ /// \brief Whether this parameter is a non-type template parameter pack.
+ ///
+ /// If the parameter is a parameter pack, the type may be a
+ /// \c PackExpansionType. In the following example, the \c Dims parameter
+ /// is a parameter pack (whose type is 'unsigned').
+ ///
+ /// \code
+ /// template<typename T, unsigned ...Dims> struct multi_array;
+ /// \endcode
+ bool isParameterPack() const { return ParameterPack; }
+
+ /// \brief Whether this parameter is a non-type template parameter pack
+ /// that has different types at different positions.
+ ///
+ /// A parameter pack is an expanded parameter pack when the original
+ /// parameter pack's type was itself a pack expansion, and that expansion
+ /// has already been expanded. For example, given:
+ ///
+ /// \code
+ /// template<typename ...Types>
+ /// struct X {
+ /// template<Types ...Values>
+ /// struct Y { /* ... */ };
+ /// };
+ /// \endcode
+ ///
+ /// The parameter pack \c Values has a \c PackExpansionType as its type,
+ /// which expands \c Types. When \c Types is supplied with template arguments
+ /// by instantiating \c X, the instantiation of \c Values becomes an
+ /// expanded parameter pack. For example, instantiating
+ /// \c X<int, unsigned int> results in \c Values being an expanded parameter
+ /// pack with expansion types \c int and \c unsigned int.
+ ///
+ /// The \c getExpansionType() and \c getExpansionTypeSourceInfo() functions
+ /// return the expansion types.
+ bool isExpandedParameterPack() const { return ExpandedParameterPack; }
+
+ /// \brief Retrieves the number of expansion types in an expanded parameter
+ /// pack.
+ unsigned getNumExpansionTypes() const {
+ assert(ExpandedParameterPack && "Not an expansion parameter pack");
+ return NumExpandedTypes;
+ }
+
+ /// \brief Retrieve a particular expansion type within an expanded parameter
+ /// pack.
+ QualType getExpansionType(unsigned I) const {
+ assert(I < NumExpandedTypes && "Out-of-range expansion type index");
+ void * const *TypesAndInfos = reinterpret_cast<void * const*>(this + 1);
+ return QualType::getFromOpaquePtr(TypesAndInfos[2*I]);
+ }
+
+ /// \brief Retrieve a particular expansion type source info within an
+ /// expanded parameter pack.
+ TypeSourceInfo *getExpansionTypeSourceInfo(unsigned I) const {
+ assert(I < NumExpandedTypes && "Out-of-range expansion type index");
+ void * const *TypesAndInfos = reinterpret_cast<void * const*>(this + 1);
+ return static_cast<TypeSourceInfo *>(TypesAndInfos[2*I+1]);
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const NonTypeTemplateParmDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == NonTypeTemplateParm; }
+};
+
+/// TemplateTemplateParmDecl - Declares a template template parameter,
+/// e.g., "T" in
+/// @code
+/// template <template <typename> class T> class container { };
+/// @endcode
+/// A template template parameter is a TemplateDecl because it defines the
+/// name of a template and the template parameters allowable for substitution.
+class TemplateTemplateParmDecl : public TemplateDecl,
+ protected TemplateParmPosition
+{
+ virtual void anchor();
+
+ /// DefaultArgument - The default template argument, if any.
+ TemplateArgumentLoc DefaultArgument;
+ /// Whether or not the default argument was inherited.
+ bool DefaultArgumentWasInherited;
+
+ /// \brief Whether this parameter is a parameter pack.
+ bool ParameterPack;
+
+ TemplateTemplateParmDecl(DeclContext *DC, SourceLocation L,
+ unsigned D, unsigned P, bool ParameterPack,
+ IdentifierInfo *Id, TemplateParameterList *Params)
+ : TemplateDecl(TemplateTemplateParm, DC, L, Id, Params),
+ TemplateParmPosition(D, P), DefaultArgument(),
+ DefaultArgumentWasInherited(false), ParameterPack(ParameterPack)
+ { }
+
+public:
+ static TemplateTemplateParmDecl *Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D,
+ unsigned P, bool ParameterPack,
+ IdentifierInfo *Id,
+ TemplateParameterList *Params);
+
+ static TemplateTemplateParmDecl *CreateDeserialized(ASTContext &C,
+ unsigned ID);
+
+ using TemplateParmPosition::getDepth;
+ using TemplateParmPosition::getPosition;
+ using TemplateParmPosition::getIndex;
+
+ /// \brief Whether this template template parameter is a template
+ /// parameter pack.
+ ///
+ /// \code
+ /// template<template <class T> ...MetaFunctions> struct Apply;
+ /// \endcode
+ bool isParameterPack() const { return ParameterPack; }
+
+ /// \brief Determine whether this template parameter has a default
+ /// argument.
+ bool hasDefaultArgument() const {
+ return !DefaultArgument.getArgument().isNull();
+ }
+
+ /// \brief Retrieve the default argument, if any.
+ const TemplateArgumentLoc &getDefaultArgument() const {
+ return DefaultArgument;
+ }
+
+ /// \brief Retrieve the location of the default argument, if any.
+ SourceLocation getDefaultArgumentLoc() const;
+
+ /// \brief Determines whether the default argument was inherited
+ /// from a previous declaration of this template.
+ bool defaultArgumentWasInherited() const {
+ return DefaultArgumentWasInherited;
+ }
+
+ /// \brief Set the default argument for this template parameter, and
+ /// whether that default argument was inherited from another
+ /// declaration.
+ void setDefaultArgument(const TemplateArgumentLoc &DefArg, bool Inherited) {
+ DefaultArgument = DefArg;
+ DefaultArgumentWasInherited = Inherited;
+ }
+
+ /// \brief Removes the default argument of this template parameter.
+ void removeDefaultArgument() {
+ DefaultArgument = TemplateArgumentLoc();
+ DefaultArgumentWasInherited = false;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ SourceLocation End = getLocation();
+ if (hasDefaultArgument() && !defaultArgumentWasInherited())
+ End = getDefaultArgument().getSourceRange().getEnd();
+ return SourceRange(getTemplateParameters()->getTemplateLoc(), End);
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TemplateTemplateParmDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == TemplateTemplateParm; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// \brief Represents a class template specialization, which refers to
+/// a class template with a given set of template arguments.
+///
+/// Class template specializations represent both explicit
+/// specialization of class templates, as in the example below, and
+/// implicit instantiations of class templates.
+///
+/// \code
+/// template<typename T> class array;
+///
+/// template<>
+/// class array<bool> { }; // class template specialization array<bool>
+/// \endcode
+class ClassTemplateSpecializationDecl
+ : public CXXRecordDecl, public llvm::FoldingSetNode {
+
+ /// \brief Structure that stores information about a class template
+ /// specialization that was instantiated from a class template partial
+ /// specialization.
+ struct SpecializedPartialSpecialization {
+ /// \brief The class template partial specialization from which this
+ /// class template specialization was instantiated.
+ ClassTemplatePartialSpecializationDecl *PartialSpecialization;
+
+ /// \brief The template argument list deduced for the class template
+ /// partial specialization itself.
+ TemplateArgumentList *TemplateArgs;
+ };
+
+ /// \brief The template that this specialization specializes
+ llvm::PointerUnion<ClassTemplateDecl *, SpecializedPartialSpecialization *>
+ SpecializedTemplate;
+
+ /// \brief Further info for explicit template specialization/instantiation.
+ struct ExplicitSpecializationInfo {
+ /// \brief The type-as-written.
+ TypeSourceInfo *TypeAsWritten;
+ /// \brief The location of the extern keyword.
+ SourceLocation ExternLoc;
+ /// \brief The location of the template keyword.
+ SourceLocation TemplateKeywordLoc;
+
+ ExplicitSpecializationInfo()
+ : TypeAsWritten(0), ExternLoc(), TemplateKeywordLoc() {}
+ };
+
+ /// \brief Further info for explicit template specialization/instantiation.
+ /// Does not apply to implicit specializations.
+ ExplicitSpecializationInfo *ExplicitInfo;
+
+ /// \brief The template arguments used to describe this specialization.
+ TemplateArgumentList *TemplateArgs;
+
+ /// \brief The point where this template was instantiated (if any)
+ SourceLocation PointOfInstantiation;
+
+ /// \brief The kind of specialization this declaration refers to.
+ /// Really a value of type TemplateSpecializationKind.
+ unsigned SpecializationKind : 3;
+
+protected:
+ ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ ClassTemplateSpecializationDecl *PrevDecl);
+
+ explicit ClassTemplateSpecializationDecl(Kind DK);
+
+public:
+ static ClassTemplateSpecializationDecl *
+ Create(ASTContext &Context, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ ClassTemplateSpecializationDecl *PrevDecl);
+ static ClassTemplateSpecializationDecl *
+ CreateDeserialized(ASTContext &C, unsigned ID);
+
+ virtual void getNameForDiagnostic(std::string &S,
+ const PrintingPolicy &Policy,
+ bool Qualified) const;
+
+ ClassTemplateSpecializationDecl *getMostRecentDecl() {
+ CXXRecordDecl *Recent
+ = cast<CXXRecordDecl>(CXXRecordDecl::getMostRecentDecl());
+ if (!isa<ClassTemplateSpecializationDecl>(Recent)) {
+ // FIXME: Does injected class name need to be in the redeclarations chain?
+ assert(Recent->isInjectedClassName() && Recent->getPreviousDecl());
+ Recent = Recent->getPreviousDecl();
+ }
+ return cast<ClassTemplateSpecializationDecl>(Recent);
+ }
+
+ /// \brief Retrieve the template that this specialization specializes.
+ ClassTemplateDecl *getSpecializedTemplate() const;
+
+ /// \brief Retrieve the template arguments of the class template
+ /// specialization.
+ const TemplateArgumentList &getTemplateArgs() const {
+ return *TemplateArgs;
+ }
+
+ /// \brief Determine the kind of specialization that this
+ /// declaration represents.
+ TemplateSpecializationKind getSpecializationKind() const {
+ return static_cast<TemplateSpecializationKind>(SpecializationKind);
+ }
+
+ bool isExplicitSpecialization() const {
+ return getSpecializationKind() == TSK_ExplicitSpecialization;
+ }
+
+ void setSpecializationKind(TemplateSpecializationKind TSK) {
+ SpecializationKind = TSK;
+ }
+
+ /// \brief Get the point of instantiation (if any), or null if none.
+ SourceLocation getPointOfInstantiation() const {
+ return PointOfInstantiation;
+ }
+
+ void setPointOfInstantiation(SourceLocation Loc) {
+ assert(Loc.isValid() && "point of instantiation must be valid!");
+ PointOfInstantiation = Loc;
+ }
+
+ /// \brief If this class template specialization is an instantiation of
+ /// a template (rather than an explicit specialization), return the
+ /// class template or class template partial specialization from which it
+ /// was instantiated.
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ getInstantiatedFrom() const {
+ if (getSpecializationKind() != TSK_ImplicitInstantiation &&
+ getSpecializationKind() != TSK_ExplicitInstantiationDefinition &&
+ getSpecializationKind() != TSK_ExplicitInstantiationDeclaration)
+ return llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>();
+
+ if (SpecializedPartialSpecialization *PartialSpec
+ = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
+ return PartialSpec->PartialSpecialization;
+
+ return const_cast<ClassTemplateDecl*>(
+ SpecializedTemplate.get<ClassTemplateDecl*>());
+ }
+
+ /// \brief Retrieve the class template or class template partial
+ /// specialization which was specialized by this.
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ getSpecializedTemplateOrPartial() const {
+ if (SpecializedPartialSpecialization *PartialSpec
+ = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
+ return PartialSpec->PartialSpecialization;
+
+ return const_cast<ClassTemplateDecl*>(
+ SpecializedTemplate.get<ClassTemplateDecl*>());
+ }
+
+ /// \brief Retrieve the set of template arguments that should be used
+ /// to instantiate members of the class template or class template partial
+ /// specialization from which this class template specialization was
+ /// instantiated.
+ ///
+ /// \returns For a class template specialization instantiated from the primary
+ /// template, this function will return the same template arguments as
+ /// getTemplateArgs(). For a class template specialization instantiated from
+ /// a class template partial specialization, this function will return the
+ /// deduced template arguments for the class template partial specialization
+ /// itself.
+ const TemplateArgumentList &getTemplateInstantiationArgs() const {
+ if (SpecializedPartialSpecialization *PartialSpec
+ = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
+ return *PartialSpec->TemplateArgs;
+
+ return getTemplateArgs();
+ }
+
+ /// \brief Note that this class template specialization is actually an
+ /// instantiation of the given class template partial specialization whose
+ /// template arguments have been deduced.
+ void setInstantiationOf(ClassTemplatePartialSpecializationDecl *PartialSpec,
+ TemplateArgumentList *TemplateArgs) {
+ assert(!SpecializedTemplate.is<SpecializedPartialSpecialization*>() &&
+ "Already set to a class template partial specialization!");
+ SpecializedPartialSpecialization *PS
+ = new (getASTContext()) SpecializedPartialSpecialization();
+ PS->PartialSpecialization = PartialSpec;
+ PS->TemplateArgs = TemplateArgs;
+ SpecializedTemplate = PS;
+ }
+
+ /// \brief Note that this class template specialization is an instantiation
+ /// of the given class template.
+ void setInstantiationOf(ClassTemplateDecl *TemplDecl) {
+ assert(!SpecializedTemplate.is<SpecializedPartialSpecialization*>() &&
+ "Previously set to a class template partial specialization!");
+ SpecializedTemplate = TemplDecl;
+ }
+
+ /// \brief Sets the type of this specialization as it was written by
+ /// the user. This will be a class template specialization type.
+ void setTypeAsWritten(TypeSourceInfo *T) {
+ if (!ExplicitInfo)
+ ExplicitInfo = new (getASTContext()) ExplicitSpecializationInfo;
+ ExplicitInfo->TypeAsWritten = T;
+ }
+ /// \brief Gets the type of this specialization as it was written by
+ /// the user, if it was so written.
+ TypeSourceInfo *getTypeAsWritten() const {
+ return ExplicitInfo ? ExplicitInfo->TypeAsWritten : 0;
+ }
+
+ /// \brief Gets the location of the extern keyword, if present.
+ SourceLocation getExternLoc() const {
+ return ExplicitInfo ? ExplicitInfo->ExternLoc : SourceLocation();
+ }
+ /// \brief Sets the location of the extern keyword.
+ void setExternLoc(SourceLocation Loc) {
+ if (!ExplicitInfo)
+ ExplicitInfo = new (getASTContext()) ExplicitSpecializationInfo;
+ ExplicitInfo->ExternLoc = Loc;
+ }
+
+ /// \brief Sets the location of the template keyword.
+ void setTemplateKeywordLoc(SourceLocation Loc) {
+ if (!ExplicitInfo)
+ ExplicitInfo = new (getASTContext()) ExplicitSpecializationInfo;
+ ExplicitInfo->TemplateKeywordLoc = Loc;
+ }
+ /// \brief Gets the location of the template keyword, if present.
+ SourceLocation getTemplateKeywordLoc() const {
+ return ExplicitInfo ? ExplicitInfo->TemplateKeywordLoc : SourceLocation();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, TemplateArgs->data(), TemplateArgs->size(), getASTContext());
+ }
+
+ static void
+ Profile(llvm::FoldingSetNodeID &ID, const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs, ASTContext &Context) {
+ ID.AddInteger(NumTemplateArgs);
+ for (unsigned Arg = 0; Arg != NumTemplateArgs; ++Arg)
+ TemplateArgs[Arg].Profile(ID, Context);
+ }
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) {
+ return K >= firstClassTemplateSpecialization &&
+ K <= lastClassTemplateSpecialization;
+ }
+
+ static bool classof(const ClassTemplateSpecializationDecl *) {
+ return true;
+ }
+
+ static bool classof(const ClassTemplatePartialSpecializationDecl *) {
+ return true;
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+class ClassTemplatePartialSpecializationDecl
+ : public ClassTemplateSpecializationDecl {
+ virtual void anchor();
+
+ /// \brief The list of template parameters
+ TemplateParameterList* TemplateParams;
+
+ /// \brief The source info for the template arguments as written.
+ /// FIXME: redundant with TypeAsWritten?
+ TemplateArgumentLoc *ArgsAsWritten;
+ unsigned NumArgsAsWritten;
+
+ /// \brief Sequence number indicating when this class template partial
+ /// specialization was added to the set of partial specializations for
+ /// its owning class template.
+ unsigned SequenceNumber;
+
+ /// \brief The class template partial specialization from which this
+ /// class template partial specialization was instantiated.
+ ///
+ /// The boolean value will be true to indicate that this class template
+ /// partial specialization was specialized at this level.
+ llvm::PointerIntPair<ClassTemplatePartialSpecializationDecl *, 1, bool>
+ InstantiatedFromMember;
+
+ ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
+ DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ TemplateArgumentLoc *ArgInfos,
+ unsigned NumArgInfos,
+ ClassTemplatePartialSpecializationDecl *PrevDecl,
+ unsigned SequenceNumber);
+
+ ClassTemplatePartialSpecializationDecl()
+ : ClassTemplateSpecializationDecl(ClassTemplatePartialSpecialization),
+ TemplateParams(0), ArgsAsWritten(0),
+ NumArgsAsWritten(0), SequenceNumber(0),
+ InstantiatedFromMember(0, false) { }
+
+public:
+ static ClassTemplatePartialSpecializationDecl *
+ Create(ASTContext &Context, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const TemplateArgumentListInfo &ArgInfos,
+ QualType CanonInjectedType,
+ ClassTemplatePartialSpecializationDecl *PrevDecl,
+ unsigned SequenceNumber);
+
+ static ClassTemplatePartialSpecializationDecl *
+ CreateDeserialized(ASTContext &C, unsigned ID);
+
+ ClassTemplatePartialSpecializationDecl *getMostRecentDecl() {
+ return cast<ClassTemplatePartialSpecializationDecl>(
+ ClassTemplateSpecializationDecl::getMostRecentDecl());
+ }
+
+ /// Get the list of template parameters
+ TemplateParameterList *getTemplateParameters() const {
+ return TemplateParams;
+ }
+
+ /// Get the template arguments as written.
+ TemplateArgumentLoc *getTemplateArgsAsWritten() const {
+ return ArgsAsWritten;
+ }
+
+ /// Get the number of template arguments as written.
+ unsigned getNumTemplateArgsAsWritten() const {
+ return NumArgsAsWritten;
+ }
+
+ /// \brief Get the sequence number for this class template partial
+ /// specialization.
+ unsigned getSequenceNumber() const { return SequenceNumber; }
+
+ /// \brief Retrieve the member class template partial specialization from
+ /// which this particular class template partial specialization was
+ /// instantiated.
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct Outer {
+ /// template<typename U> struct Inner;
+ /// template<typename U> struct Inner<U*> { }; // #1
+ /// };
+ ///
+ /// Outer<float>::Inner<int*> ii;
+ /// \endcode
+ ///
+ /// In this example, the instantiation of \c Outer<float>::Inner<int*> will
+ /// end up instantiating the partial specialization
+ /// \c Outer<float>::Inner<U*>, which itself was instantiated from the class
+ /// template partial specialization \c Outer<T>::Inner<U*>. Given
+ /// \c Outer<float>::Inner<U*>, this function would return
+ /// \c Outer<T>::Inner<U*>.
+ ClassTemplatePartialSpecializationDecl *getInstantiatedFromMember() {
+ ClassTemplatePartialSpecializationDecl *First
+ = cast<ClassTemplatePartialSpecializationDecl>(getFirstDeclaration());
+ return First->InstantiatedFromMember.getPointer();
+ }
+
+ void setInstantiatedFromMember(
+ ClassTemplatePartialSpecializationDecl *PartialSpec) {
+ ClassTemplatePartialSpecializationDecl *First
+ = cast<ClassTemplatePartialSpecializationDecl>(getFirstDeclaration());
+ First->InstantiatedFromMember.setPointer(PartialSpec);
+ }
+
+ /// \brief Determines whether this class template partial specialization
+ /// template was a specialization of a member partial specialization.
+ ///
+ /// In the following example, the member template partial specialization
+ /// \c X<int>::Inner<T*> is a member specialization.
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct X {
+ /// template<typename U> struct Inner;
+ /// template<typename U> struct Inner<U*>;
+ /// };
+ ///
+ /// template<> template<typename T>
+ /// struct X<int>::Inner<T*> { /* ... */ };
+ /// \endcode
+ bool isMemberSpecialization() {
+ ClassTemplatePartialSpecializationDecl *First
+ = cast<ClassTemplatePartialSpecializationDecl>(getFirstDeclaration());
+ return First->InstantiatedFromMember.getInt();
+ }
+
+ /// \brief Note that this member template is a specialization.
+ void setMemberSpecialization() {
+ ClassTemplatePartialSpecializationDecl *First
+ = cast<ClassTemplatePartialSpecializationDecl>(getFirstDeclaration());
+ assert(First->InstantiatedFromMember.getPointer() &&
+ "Only member templates can be member template specializations");
+ return First->InstantiatedFromMember.setInt(true);
+ }
+
+ /// Retrieves the injected specialization type for this partial
+ /// specialization. This is not the same as the type-decl-type for
+ /// this partial specialization, which is an InjectedClassNameType.
+ QualType getInjectedSpecializationType() const {
+ assert(getTypeForDecl() && "partial specialization has no type set!");
+ return cast<InjectedClassNameType>(getTypeForDecl())
+ ->getInjectedSpecializationType();
+ }
+
+ // FIXME: Add Profile support!
+
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) {
+ return K == ClassTemplatePartialSpecialization;
+ }
+
+ static bool classof(const ClassTemplatePartialSpecializationDecl *) {
+ return true;
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// Declaration of a class template.
+class ClassTemplateDecl : public RedeclarableTemplateDecl {
+ static void DeallocateCommon(void *Ptr);
+
+protected:
+ /// \brief Data that is common to all of the declarations of a given
+ /// class template.
+ struct Common : CommonBase {
+ Common() : LazySpecializations() { }
+
+ /// \brief The class template specializations for this class
+ /// template, including explicit specializations and instantiations.
+ llvm::FoldingSet<ClassTemplateSpecializationDecl> Specializations;
+
+ /// \brief The class template partial specializations for this class
+ /// template.
+ llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>
+ PartialSpecializations;
+
+ /// \brief The injected-class-name type for this class template.
+ QualType InjectedClassNameType;
+
+ /// \brief If non-null, points to an array of specializations (including
+ /// partial specializations) known ownly by their external declaration IDs.
+ ///
+ /// The first value in the array is the number of of specializations/
+ /// partial specializations that follow.
+ uint32_t *LazySpecializations;
+ };
+
+ /// \brief Load any lazily-loaded specializations from the external source.
+ void LoadLazySpecializations();
+
+ /// \brief Retrieve the set of specializations of this class template.
+ llvm::FoldingSet<ClassTemplateSpecializationDecl> &getSpecializations();
+
+ /// \brief Retrieve the set of partial specializations of this class
+ /// template.
+ llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &
+ getPartialSpecializations();
+
+ ClassTemplateDecl(DeclContext *DC, SourceLocation L, DeclarationName Name,
+ TemplateParameterList *Params, NamedDecl *Decl)
+ : RedeclarableTemplateDecl(ClassTemplate, DC, L, Name, Params, Decl) { }
+
+ ClassTemplateDecl(EmptyShell Empty)
+ : RedeclarableTemplateDecl(ClassTemplate, 0, SourceLocation(),
+ DeclarationName(), 0, 0) { }
+
+ CommonBase *newCommon(ASTContext &C);
+
+ Common *getCommonPtr() {
+ return static_cast<Common *>(RedeclarableTemplateDecl::getCommonPtr());
+ }
+
+public:
+ /// Get the underlying class declarations of the template.
+ CXXRecordDecl *getTemplatedDecl() const {
+ return static_cast<CXXRecordDecl *>(TemplatedDecl);
+ }
+
+ /// Returns whether this template declaration defines the primary
+ /// class pattern.
+ bool isThisDeclarationADefinition() const {
+ return getTemplatedDecl()->isThisDeclarationADefinition();
+ }
+
+ /// Create a class template node.
+ static ClassTemplateDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl,
+ ClassTemplateDecl *PrevDecl);
+
+ /// Create an empty class template node.
+ static ClassTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// \brief Return the specialization with the provided arguments if it exists,
+ /// otherwise return the insertion point.
+ ClassTemplateSpecializationDecl *
+ findSpecialization(const TemplateArgument *Args, unsigned NumArgs,
+ void *&InsertPos);
+
+ /// \brief Insert the specified specialization knowing that it is not already
+ /// in. InsertPos must be obtained from findSpecialization.
+ void AddSpecialization(ClassTemplateSpecializationDecl *D, void *InsertPos);
+
+ ClassTemplateDecl *getCanonicalDecl() {
+ return cast<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
+ }
+ const ClassTemplateDecl *getCanonicalDecl() const {
+ return cast<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
+ }
+
+ /// \brief Retrieve the previous declaration of this class template, or
+ /// NULL if no such declaration exists.
+ ClassTemplateDecl *getPreviousDecl() {
+ return cast_or_null<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
+ }
+
+ /// \brief Retrieve the previous declaration of this class template, or
+ /// NULL if no such declaration exists.
+ const ClassTemplateDecl *getPreviousDecl() const {
+ return cast_or_null<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
+ }
+
+ ClassTemplateDecl *getInstantiatedFromMemberTemplate() {
+ return cast_or_null<ClassTemplateDecl>(
+ RedeclarableTemplateDecl::getInstantiatedFromMemberTemplate());
+ }
+
+ /// \brief Return the partial specialization with the provided arguments if it
+ /// exists, otherwise return the insertion point.
+ ClassTemplatePartialSpecializationDecl *
+ findPartialSpecialization(const TemplateArgument *Args, unsigned NumArgs,
+ void *&InsertPos);
+
+ /// \brief Insert the specified partial specialization knowing that it is not
+ /// already in. InsertPos must be obtained from findPartialSpecialization.
+ void AddPartialSpecialization(ClassTemplatePartialSpecializationDecl *D,
+ void *InsertPos);
+
+ /// \brief Return the next partial specialization sequence number.
+ unsigned getNextPartialSpecSequenceNumber() {
+ return getPartialSpecializations().size();
+ }
+
+ /// \brief Retrieve the partial specializations as an ordered list.
+ void getPartialSpecializations(
+ SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS);
+
+ /// \brief Find a class template partial specialization with the given
+ /// type T.
+ ///
+ /// \param T a dependent type that names a specialization of this class
+ /// template.
+ ///
+ /// \returns the class template partial specialization that exactly matches
+ /// the type \p T, or NULL if no such partial specialization exists.
+ ClassTemplatePartialSpecializationDecl *findPartialSpecialization(QualType T);
+
+ /// \brief Find a class template partial specialization which was instantiated
+ /// from the given member partial specialization.
+ ///
+ /// \param D a member class template partial specialization.
+ ///
+ /// \returns the class template partial specialization which was instantiated
+ /// from the given member partial specialization, or NULL if no such partial
+ /// specialization exists.
+ ClassTemplatePartialSpecializationDecl *
+ findPartialSpecInstantiatedFromMember(
+ ClassTemplatePartialSpecializationDecl *D);
+
+ /// \brief Retrieve the template specialization type of the
+ /// injected-class-name for this class template.
+ ///
+ /// The injected-class-name for a class template \c X is \c
+ /// X<template-args>, where \c template-args is formed from the
+ /// template arguments that correspond to the template parameters of
+ /// \c X. For example:
+ ///
+ /// \code
+ /// template<typename T, int N>
+ /// struct array {
+ /// typedef array this_type; // "array" is equivalent to "array<T, N>"
+ /// };
+ /// \endcode
+ QualType getInjectedClassNameSpecialization();
+
+ typedef SpecIterator<ClassTemplateSpecializationDecl> spec_iterator;
+
+ spec_iterator spec_begin() {
+ return makeSpecIterator(getSpecializations(), false);
+ }
+
+ spec_iterator spec_end() {
+ return makeSpecIterator(getSpecializations(), true);
+ }
+
+ typedef SpecIterator<ClassTemplatePartialSpecializationDecl>
+ partial_spec_iterator;
+
+ partial_spec_iterator partial_spec_begin() {
+ return makeSpecIterator(getPartialSpecializations(), false);
+ }
+
+ partial_spec_iterator partial_spec_end() {
+ return makeSpecIterator(getPartialSpecializations(), true);
+ }
+
+ // Implement isa/cast/dyncast support
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const ClassTemplateDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == ClassTemplate; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// Declaration of a friend template. For example:
+///
+/// template <typename T> class A {
+/// friend class MyVector<T>; // not a friend template
+/// template <typename U> friend class B; // not a friend template
+/// template <typename U> friend class Foo<T>::Nested; // friend template
+/// };
+/// NOTE: This class is not currently in use. All of the above
+/// will yield a FriendDecl, not a FriendTemplateDecl.
+class FriendTemplateDecl : public Decl {
+ virtual void anchor();
+public:
+ typedef llvm::PointerUnion<NamedDecl*,TypeSourceInfo*> FriendUnion;
+
+private:
+ // The number of template parameters; always non-zero.
+ unsigned NumParams;
+
+ // The parameter list.
+ TemplateParameterList **Params;
+
+ // The declaration that's a friend of this class.
+ FriendUnion Friend;
+
+ // Location of the 'friend' specifier.
+ SourceLocation FriendLoc;
+
+
+ FriendTemplateDecl(DeclContext *DC, SourceLocation Loc,
+ unsigned NParams,
+ TemplateParameterList **Params,
+ FriendUnion Friend,
+ SourceLocation FriendLoc)
+ : Decl(Decl::FriendTemplate, DC, Loc),
+ NumParams(NParams),
+ Params(Params),
+ Friend(Friend),
+ FriendLoc(FriendLoc)
+ {}
+
+ FriendTemplateDecl(EmptyShell Empty)
+ : Decl(Decl::FriendTemplate, Empty),
+ NumParams(0),
+ Params(0)
+ {}
+
+public:
+ static FriendTemplateDecl *Create(ASTContext &Context,
+ DeclContext *DC, SourceLocation Loc,
+ unsigned NParams,
+ TemplateParameterList **Params,
+ FriendUnion Friend,
+ SourceLocation FriendLoc);
+
+ static FriendTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ /// If this friend declaration names a templated type (or
+ /// a dependent member type of a templated type), return that
+ /// type; otherwise return null.
+ TypeSourceInfo *getFriendType() const {
+ return Friend.dyn_cast<TypeSourceInfo*>();
+ }
+
+ /// If this friend declaration names a templated function (or
+ /// a member function of a templated type), return that type;
+ /// otherwise return null.
+ NamedDecl *getFriendDecl() const {
+ return Friend.dyn_cast<NamedDecl*>();
+ }
+
+ /// Retrieves the location of the 'friend' keyword.
+ SourceLocation getFriendLoc() const {
+ return FriendLoc;
+ }
+
+ TemplateParameterList *getTemplateParameterList(unsigned i) const {
+ assert(i <= NumParams);
+ return Params[i];
+ }
+
+ unsigned getNumTemplateParameters() const {
+ return NumParams;
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) { return K == Decl::FriendTemplate; }
+ static bool classof(const FriendTemplateDecl *D) { return true; }
+
+ friend class ASTDeclReader;
+};
+
+/// Declaration of an alias template. For example:
+///
+/// template <typename T> using V = std::map<T*, int, MyCompare<T>>;
+class TypeAliasTemplateDecl : public RedeclarableTemplateDecl {
+ static void DeallocateCommon(void *Ptr);
+
+protected:
+ typedef CommonBase Common;
+
+ TypeAliasTemplateDecl(DeclContext *DC, SourceLocation L, DeclarationName Name,
+ TemplateParameterList *Params, NamedDecl *Decl)
+ : RedeclarableTemplateDecl(TypeAliasTemplate, DC, L, Name, Params, Decl) { }
+
+ CommonBase *newCommon(ASTContext &C);
+
+ Common *getCommonPtr() {
+ return static_cast<Common *>(RedeclarableTemplateDecl::getCommonPtr());
+ }
+
+public:
+ /// Get the underlying function declaration of the template.
+ TypeAliasDecl *getTemplatedDecl() const {
+ return static_cast<TypeAliasDecl*>(TemplatedDecl);
+ }
+
+
+ TypeAliasTemplateDecl *getCanonicalDecl() {
+ return cast<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
+ }
+ const TypeAliasTemplateDecl *getCanonicalDecl() const {
+ return cast<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getCanonicalDecl());
+ }
+
+ /// \brief Retrieve the previous declaration of this function template, or
+ /// NULL if no such declaration exists.
+ TypeAliasTemplateDecl *getPreviousDecl() {
+ return cast_or_null<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
+ }
+
+ /// \brief Retrieve the previous declaration of this function template, or
+ /// NULL if no such declaration exists.
+ const TypeAliasTemplateDecl *getPreviousDecl() const {
+ return cast_or_null<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getPreviousDecl());
+ }
+
+ TypeAliasTemplateDecl *getInstantiatedFromMemberTemplate() {
+ return cast_or_null<TypeAliasTemplateDecl>(
+ RedeclarableTemplateDecl::getInstantiatedFromMemberTemplate());
+ }
+
+
+ /// \brief Create a function template node.
+ static TypeAliasTemplateDecl *Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl);
+
+ /// \brief Create an empty alias template node.
+ static TypeAliasTemplateDecl *CreateDeserialized(ASTContext &C, unsigned ID);
+
+ // Implement isa/cast/dyncast support
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classof(const TypeAliasTemplateDecl *D) { return true; }
+ static bool classofKind(Kind K) { return K == TypeAliasTemplate; }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// Declaration of a function specialization at template class scope.
+/// This is a non standard extension needed to support MSVC.
+/// For example:
+/// template <class T>
+/// class A {
+/// template <class U> void foo(U a) { }
+/// template<> void foo(int a) { }
+/// }
+///
+/// "template<> foo(int a)" will be saved in Specialization as a normal
+/// CXXMethodDecl. Then during an instantiation of class A, it will be
+/// transformed into an actual function specialization.
+class ClassScopeFunctionSpecializationDecl : public Decl {
+ virtual void anchor();
+
+ ClassScopeFunctionSpecializationDecl(DeclContext *DC, SourceLocation Loc,
+ CXXMethodDecl *FD)
+ : Decl(Decl::ClassScopeFunctionSpecialization, DC, Loc),
+ Specialization(FD) {}
+
+ ClassScopeFunctionSpecializationDecl(EmptyShell Empty)
+ : Decl(Decl::ClassScopeFunctionSpecialization, Empty) {}
+
+ CXXMethodDecl *Specialization;
+
+public:
+ CXXMethodDecl *getSpecialization() const { return Specialization; }
+
+ static ClassScopeFunctionSpecializationDecl *Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation Loc,
+ CXXMethodDecl *FD) {
+ return new (C) ClassScopeFunctionSpecializationDecl(DC , Loc, FD);
+ }
+
+ static ClassScopeFunctionSpecializationDecl *
+ CreateDeserialized(ASTContext &Context, unsigned ID);
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Decl *D) { return classofKind(D->getKind()); }
+ static bool classofKind(Kind K) {
+ return K == Decl::ClassScopeFunctionSpecialization;
+ }
+ static bool classof(const ClassScopeFunctionSpecializationDecl *D) {
+ return true;
+ }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+/// Implementation of inline functions that require the template declarations
+inline AnyFunctionDecl::AnyFunctionDecl(FunctionTemplateDecl *FTD)
+ : Function(FTD) { }
+
+} /* end of namespace clang */
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h
new file mode 100644
index 0000000..62654b8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclVisitor.h
@@ -0,0 +1,54 @@
+//===--- DeclVisitor.h - Visitor for Decl subclasses ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeclVisitor interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_DECLVISITOR_H
+#define LLVM_CLANG_AST_DECLVISITOR_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclTemplate.h"
+
+namespace clang {
+
+#define DISPATCH(NAME, CLASS) \
+ return static_cast<ImplClass*>(this)-> Visit##NAME(static_cast<CLASS*>(D))
+
+/// \brief A simple visitor class that helps create declaration visitors.
+template<typename ImplClass, typename RetTy=void>
+class DeclVisitor {
+public:
+ RetTy Visit(Decl *D) {
+ switch (D->getKind()) {
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: DISPATCH(DERIVED##Decl, DERIVED##Decl);
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+ llvm_unreachable("Decl that isn't part of DeclNodes.inc!");
+ }
+
+ // If the implementation chooses not to implement a certain visit
+ // method, fall back to the parent.
+#define DECL(DERIVED, BASE) \
+ RetTy Visit##DERIVED##Decl(DERIVED##Decl *D) { DISPATCH(BASE, BASE); }
+#include "clang/AST/DeclNodes.inc"
+
+ RetTy VisitDecl(Decl *D) { return RetTy(); }
+};
+
+#undef DISPATCH
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_DECLVISITOR_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
new file mode 100644
index 0000000..6349d9c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DeclarationName.h
@@ -0,0 +1,580 @@
+//===-- DeclarationName.h - Representation of declaration names -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the DeclarationName and DeclarationNameTable classes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_DECLARATIONNAME_H
+#define LLVM_CLANG_AST_DECLARATIONNAME_H
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/Support/Compiler.h"
+
+namespace llvm {
+ template <typename T> struct DenseMapInfo;
+}
+
+namespace clang {
+ class CXXSpecialName;
+ class CXXOperatorIdName;
+ class CXXLiteralOperatorIdName;
+ class DeclarationNameExtra;
+ class IdentifierInfo;
+ class MultiKeywordSelector;
+ class UsingDirectiveDecl;
+ class TypeSourceInfo;
+
+/// DeclarationName - The name of a declaration. In the common case,
+/// this just stores an IdentifierInfo pointer to a normal
+/// name. However, it also provides encodings for Objective-C
+/// selectors (optimizing zero- and one-argument selectors, which make
+/// up 78% percent of all selectors in Cocoa.h) and special C++ names
+/// for constructors, destructors, and conversion functions.
+class DeclarationName {
+public:
+ /// NameKind - The kind of name this object contains.
+ enum NameKind {
+ Identifier,
+ ObjCZeroArgSelector,
+ ObjCOneArgSelector,
+ ObjCMultiArgSelector,
+ CXXConstructorName,
+ CXXDestructorName,
+ CXXConversionFunctionName,
+ CXXOperatorName,
+ CXXLiteralOperatorName,
+ CXXUsingDirective
+ };
+
+private:
+ /// StoredNameKind - The kind of name that is actually stored in the
+ /// upper bits of the Ptr field. This is only used internally.
+ enum StoredNameKind {
+ StoredIdentifier = 0,
+ StoredObjCZeroArgSelector,
+ StoredObjCOneArgSelector,
+ StoredDeclarationNameExtra,
+ PtrMask = 0x03
+ };
+
+ /// Ptr - The lowest two bits are used to express what kind of name
+ /// we're actually storing, using the values of NameKind. Depending
+ /// on the kind of name this is, the upper bits of Ptr may have one
+ /// of several different meanings:
+ ///
+ /// StoredIdentifier - The name is a normal identifier, and Ptr is
+ /// a normal IdentifierInfo pointer.
+ ///
+ /// StoredObjCZeroArgSelector - The name is an Objective-C
+ /// selector with zero arguments, and Ptr is an IdentifierInfo
+ /// pointer pointing to the selector name.
+ ///
+ /// StoredObjCOneArgSelector - The name is an Objective-C selector
+ /// with one argument, and Ptr is an IdentifierInfo pointer
+ /// pointing to the selector name.
+ ///
+ /// StoredDeclarationNameExtra - Ptr is actually a pointer to a
+ /// DeclarationNameExtra structure, whose first value will tell us
+ /// whether this is an Objective-C selector, C++ operator-id name,
+ /// or special C++ name.
+ uintptr_t Ptr;
+
+ /// getStoredNameKind - Return the kind of object that is stored in
+ /// Ptr.
+ StoredNameKind getStoredNameKind() const {
+ return static_cast<StoredNameKind>(Ptr & PtrMask);
+ }
+
+ /// getExtra - Get the "extra" information associated with this
+ /// multi-argument selector or C++ special name.
+ DeclarationNameExtra *getExtra() const {
+ assert(getStoredNameKind() == StoredDeclarationNameExtra &&
+ "Declaration name does not store an Extra structure");
+ return reinterpret_cast<DeclarationNameExtra *>(Ptr & ~PtrMask);
+ }
+
+ /// getAsCXXSpecialName - If the stored pointer is actually a
+ /// CXXSpecialName, returns a pointer to it. Otherwise, returns
+ /// a NULL pointer.
+ CXXSpecialName *getAsCXXSpecialName() const {
+ if (getNameKind() >= CXXConstructorName &&
+ getNameKind() <= CXXConversionFunctionName)
+ return reinterpret_cast<CXXSpecialName *>(Ptr & ~PtrMask);
+ return 0;
+ }
+
+ /// getAsCXXOperatorIdName
+ CXXOperatorIdName *getAsCXXOperatorIdName() const {
+ if (getNameKind() == CXXOperatorName)
+ return reinterpret_cast<CXXOperatorIdName *>(Ptr & ~PtrMask);
+ return 0;
+ }
+
+ CXXLiteralOperatorIdName *getAsCXXLiteralOperatorIdName() const {
+ if (getNameKind() == CXXLiteralOperatorName)
+ return reinterpret_cast<CXXLiteralOperatorIdName *>(Ptr & ~PtrMask);
+ return 0;
+ }
+
+ // Construct a declaration name from the name of a C++ constructor,
+ // destructor, or conversion function.
+ DeclarationName(CXXSpecialName *Name)
+ : Ptr(reinterpret_cast<uintptr_t>(Name)) {
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned CXXSpecialName");
+ Ptr |= StoredDeclarationNameExtra;
+ }
+
+ // Construct a declaration name from the name of a C++ overloaded
+ // operator.
+ DeclarationName(CXXOperatorIdName *Name)
+ : Ptr(reinterpret_cast<uintptr_t>(Name)) {
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned CXXOperatorId");
+ Ptr |= StoredDeclarationNameExtra;
+ }
+
+ DeclarationName(CXXLiteralOperatorIdName *Name)
+ : Ptr(reinterpret_cast<uintptr_t>(Name)) {
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned CXXLiteralOperatorId");
+ Ptr |= StoredDeclarationNameExtra;
+ }
+
+ /// Construct a declaration name from a raw pointer.
+ DeclarationName(uintptr_t Ptr) : Ptr(Ptr) { }
+
+ friend class DeclarationNameTable;
+ friend class NamedDecl;
+
+ /// getFETokenInfoAsVoid - Retrieves the front end-specified pointer
+ /// for this name as a void pointer.
+ void *getFETokenInfoAsVoid() const;
+
+public:
+ /// DeclarationName - Used to create an empty selector.
+ DeclarationName() : Ptr(0) { }
+
+ // Construct a declaration name from an IdentifierInfo *.
+ DeclarationName(const IdentifierInfo *II)
+ : Ptr(reinterpret_cast<uintptr_t>(II)) {
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
+ }
+
+ // Construct a declaration name from an Objective-C selector.
+ DeclarationName(Selector Sel);
+
+ /// getUsingDirectiveName - Return name for all using-directives.
+ static DeclarationName getUsingDirectiveName();
+
+ // operator bool() - Evaluates true when this declaration name is
+ // non-empty.
+ operator bool() const {
+ return ((Ptr & PtrMask) != 0) ||
+ (reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask));
+ }
+
+ /// Predicate functions for querying what type of name this is.
+ bool isIdentifier() const { return getStoredNameKind() == StoredIdentifier; }
+ bool isObjCZeroArgSelector() const {
+ return getStoredNameKind() == StoredObjCZeroArgSelector;
+ }
+ bool isObjCOneArgSelector() const {
+ return getStoredNameKind() == StoredObjCOneArgSelector;
+ }
+
+ /// getNameKind - Determine what kind of name this is.
+ NameKind getNameKind() const;
+
+ /// \brief Determines whether the name itself is dependent, e.g., because it
+ /// involves a C++ type that is itself dependent.
+ ///
+ /// Note that this does not capture all of the notions of "dependent name",
+ /// because an identifier can be a dependent name if it is used as the
+ /// callee in a call expression with dependent arguments.
+ bool isDependentName() const;
+
+ /// getNameAsString - Retrieve the human-readable string for this name.
+ std::string getAsString() const;
+
+ /// printName - Print the human-readable name to a stream.
+ void printName(raw_ostream &OS) const;
+
+ /// getAsIdentifierInfo - Retrieve the IdentifierInfo * stored in
+ /// this declaration name, or NULL if this declaration name isn't a
+ /// simple identifier.
+ IdentifierInfo *getAsIdentifierInfo() const {
+ if (isIdentifier())
+ return reinterpret_cast<IdentifierInfo *>(Ptr);
+ return 0;
+ }
+
+ /// getAsOpaqueInteger - Get the representation of this declaration
+ /// name as an opaque integer.
+ uintptr_t getAsOpaqueInteger() const { return Ptr; }
+
+ /// getAsOpaquePtr - Get the representation of this declaration name as
+ /// an opaque pointer.
+ void *getAsOpaquePtr() const { return reinterpret_cast<void*>(Ptr); }
+
+ static DeclarationName getFromOpaquePtr(void *P) {
+ DeclarationName N;
+ N.Ptr = reinterpret_cast<uintptr_t> (P);
+ return N;
+ }
+
+ static DeclarationName getFromOpaqueInteger(uintptr_t P) {
+ DeclarationName N;
+ N.Ptr = P;
+ return N;
+ }
+
+ /// getCXXNameType - If this name is one of the C++ names (of a
+ /// constructor, destructor, or conversion function), return the
+ /// type associated with that name.
+ QualType getCXXNameType() const;
+
+ /// getCXXOverloadedOperator - If this name is the name of an
+ /// overloadable operator in C++ (e.g., @c operator+), retrieve the
+ /// kind of overloaded operator.
+ OverloadedOperatorKind getCXXOverloadedOperator() const;
+
+ /// getCXXLiteralIdentifier - If this name is the name of a literal
+ /// operator, retrieve the identifier associated with it.
+ IdentifierInfo *getCXXLiteralIdentifier() const;
+
+ /// getObjCSelector - Get the Objective-C selector stored in this
+ /// declaration name.
+ Selector getObjCSelector() const;
+
+ /// getFETokenInfo/setFETokenInfo - The language front-end is
+ /// allowed to associate arbitrary metadata with some kinds of
+ /// declaration names, including normal identifiers and C++
+ /// constructors, destructors, and conversion functions.
+ template<typename T>
+ T *getFETokenInfo() const { return static_cast<T*>(getFETokenInfoAsVoid()); }
+
+ void setFETokenInfo(void *T);
+
+ /// operator== - Determine whether the specified names are identical..
+ friend bool operator==(DeclarationName LHS, DeclarationName RHS) {
+ return LHS.Ptr == RHS.Ptr;
+ }
+
+ /// operator!= - Determine whether the specified names are different.
+ friend bool operator!=(DeclarationName LHS, DeclarationName RHS) {
+ return LHS.Ptr != RHS.Ptr;
+ }
+
+ static DeclarationName getEmptyMarker() {
+ return DeclarationName(uintptr_t(-1));
+ }
+
+ static DeclarationName getTombstoneMarker() {
+ return DeclarationName(uintptr_t(-2));
+ }
+
+ static int compare(DeclarationName LHS, DeclarationName RHS);
+
+ void dump() const;
+};
+
+/// Ordering on two declaration names. If both names are identifiers,
+/// this provides a lexicographical ordering.
+inline bool operator<(DeclarationName LHS, DeclarationName RHS) {
+ return DeclarationName::compare(LHS, RHS) < 0;
+}
+
+/// Ordering on two declaration names. If both names are identifiers,
+/// this provides a lexicographical ordering.
+inline bool operator>(DeclarationName LHS, DeclarationName RHS) {
+ return DeclarationName::compare(LHS, RHS) > 0;
+}
+
+/// Ordering on two declaration names. If both names are identifiers,
+/// this provides a lexicographical ordering.
+inline bool operator<=(DeclarationName LHS, DeclarationName RHS) {
+ return DeclarationName::compare(LHS, RHS) <= 0;
+}
+
+/// Ordering on two declaration names. If both names are identifiers,
+/// this provides a lexicographical ordering.
+inline bool operator>=(DeclarationName LHS, DeclarationName RHS) {
+ return DeclarationName::compare(LHS, RHS) >= 0;
+}
+
+/// DeclarationNameTable - Used to store and retrieve DeclarationName
+/// instances for the various kinds of declaration names, e.g., normal
+/// identifiers, C++ constructor names, etc. This class contains
+/// uniqued versions of each of the C++ special names, which can be
+/// retrieved using its member functions (e.g.,
+/// getCXXConstructorName).
+class DeclarationNameTable {
+ const ASTContext &Ctx;
+ void *CXXSpecialNamesImpl; // Actually a FoldingSet<CXXSpecialName> *
+ CXXOperatorIdName *CXXOperatorNames; // Operator names
+ void *CXXLiteralOperatorNames; // Actually a CXXOperatorIdName*
+
+ DeclarationNameTable(const DeclarationNameTable&); // NONCOPYABLE
+ DeclarationNameTable& operator=(const DeclarationNameTable&); // NONCOPYABLE
+
+public:
+ DeclarationNameTable(const ASTContext &C);
+ ~DeclarationNameTable();
+
+ /// getIdentifier - Create a declaration name that is a simple
+ /// identifier.
+ DeclarationName getIdentifier(const IdentifierInfo *ID) {
+ return DeclarationName(ID);
+ }
+
+ /// getCXXConstructorName - Returns the name of a C++ constructor
+ /// for the given Type.
+ DeclarationName getCXXConstructorName(CanQualType Ty) {
+ return getCXXSpecialName(DeclarationName::CXXConstructorName,
+ Ty.getUnqualifiedType());
+ }
+
+ /// getCXXDestructorName - Returns the name of a C++ destructor
+ /// for the given Type.
+ DeclarationName getCXXDestructorName(CanQualType Ty) {
+ return getCXXSpecialName(DeclarationName::CXXDestructorName,
+ Ty.getUnqualifiedType());
+ }
+
+ /// getCXXConversionFunctionName - Returns the name of a C++
+ /// conversion function for the given Type.
+ DeclarationName getCXXConversionFunctionName(CanQualType Ty) {
+ return getCXXSpecialName(DeclarationName::CXXConversionFunctionName, Ty);
+ }
+
+ /// getCXXSpecialName - Returns a declaration name for special kind
+ /// of C++ name, e.g., for a constructor, destructor, or conversion
+ /// function.
+ DeclarationName getCXXSpecialName(DeclarationName::NameKind Kind,
+ CanQualType Ty);
+
+ /// getCXXOperatorName - Get the name of the overloadable C++
+ /// operator corresponding to Op.
+ DeclarationName getCXXOperatorName(OverloadedOperatorKind Op);
+
+ /// getCXXLiteralOperatorName - Get the name of the literal operator function
+ /// with II as the identifier.
+ DeclarationName getCXXLiteralOperatorName(IdentifierInfo *II);
+};
+
+/// DeclarationNameLoc - Additional source/type location info
+/// for a declaration name. Needs a DeclarationName in order
+/// to be interpreted correctly.
+struct DeclarationNameLoc {
+ union {
+ // The source location for identifier stored elsewhere.
+ // struct {} Identifier;
+
+ // Type info for constructors, destructors and conversion functions.
+ // Locations (if any) for the tilde (destructor) or operator keyword
+ // (conversion) are stored elsewhere.
+ struct {
+ TypeSourceInfo* TInfo;
+ } NamedType;
+
+ // The location (if any) of the operator keyword is stored elsewhere.
+ struct {
+ unsigned BeginOpNameLoc;
+ unsigned EndOpNameLoc;
+ } CXXOperatorName;
+
+ // The location (if any) of the operator keyword is stored elsewhere.
+ struct {
+ unsigned OpNameLoc;
+ } CXXLiteralOperatorName;
+
+ // struct {} CXXUsingDirective;
+ // struct {} ObjCZeroArgSelector;
+ // struct {} ObjCOneArgSelector;
+ // struct {} ObjCMultiArgSelector;
+ };
+
+ DeclarationNameLoc(DeclarationName Name);
+ // FIXME: this should go away once all DNLocs are properly initialized.
+ DeclarationNameLoc() { memset((void*) this, 0, sizeof(*this)); }
+}; // struct DeclarationNameLoc
+
+
+/// DeclarationNameInfo - A collector data type for bundling together
+/// a DeclarationName and the correspnding source/type location info.
+struct DeclarationNameInfo {
+private:
+ /// Name - The declaration name, also encoding name kind.
+ DeclarationName Name;
+ /// Loc - The main source location for the declaration name.
+ SourceLocation NameLoc;
+ /// Info - Further source/type location info for special kinds of names.
+ DeclarationNameLoc LocInfo;
+
+public:
+ // FIXME: remove it.
+ DeclarationNameInfo() {}
+
+ DeclarationNameInfo(DeclarationName Name, SourceLocation NameLoc)
+ : Name(Name), NameLoc(NameLoc), LocInfo(Name) {}
+
+ DeclarationNameInfo(DeclarationName Name, SourceLocation NameLoc,
+ DeclarationNameLoc LocInfo)
+ : Name(Name), NameLoc(NameLoc), LocInfo(LocInfo) {}
+
+ /// getName - Returns the embedded declaration name.
+ DeclarationName getName() const { return Name; }
+ /// setName - Sets the embedded declaration name.
+ void setName(DeclarationName N) { Name = N; }
+
+ /// getLoc - Returns the main location of the declaration name.
+ SourceLocation getLoc() const { return NameLoc; }
+ /// setLoc - Sets the main location of the declaration name.
+ void setLoc(SourceLocation L) { NameLoc = L; }
+
+ const DeclarationNameLoc &getInfo() const { return LocInfo; }
+ DeclarationNameLoc &getInfo() { return LocInfo; }
+ void setInfo(const DeclarationNameLoc &Info) { LocInfo = Info; }
+
+ /// getNamedTypeInfo - Returns the source type info associated to
+ /// the name. Assumes it is a constructor, destructor or conversion.
+ TypeSourceInfo *getNamedTypeInfo() const {
+ assert(Name.getNameKind() == DeclarationName::CXXConstructorName ||
+ Name.getNameKind() == DeclarationName::CXXDestructorName ||
+ Name.getNameKind() == DeclarationName::CXXConversionFunctionName);
+ return LocInfo.NamedType.TInfo;
+ }
+ /// setNamedTypeInfo - Sets the source type info associated to
+ /// the name. Assumes it is a constructor, destructor or conversion.
+ void setNamedTypeInfo(TypeSourceInfo *TInfo) {
+ assert(Name.getNameKind() == DeclarationName::CXXConstructorName ||
+ Name.getNameKind() == DeclarationName::CXXDestructorName ||
+ Name.getNameKind() == DeclarationName::CXXConversionFunctionName);
+ LocInfo.NamedType.TInfo = TInfo;
+ }
+
+ /// getCXXOperatorNameRange - Gets the range of the operator name
+ /// (without the operator keyword). Assumes it is a (non-literal) operator.
+ SourceRange getCXXOperatorNameRange() const {
+ assert(Name.getNameKind() == DeclarationName::CXXOperatorName);
+ return SourceRange(
+ SourceLocation::getFromRawEncoding(LocInfo.CXXOperatorName.BeginOpNameLoc),
+ SourceLocation::getFromRawEncoding(LocInfo.CXXOperatorName.EndOpNameLoc)
+ );
+ }
+ /// setCXXOperatorNameRange - Sets the range of the operator name
+ /// (without the operator keyword). Assumes it is a C++ operator.
+ void setCXXOperatorNameRange(SourceRange R) {
+ assert(Name.getNameKind() == DeclarationName::CXXOperatorName);
+ LocInfo.CXXOperatorName.BeginOpNameLoc = R.getBegin().getRawEncoding();
+ LocInfo.CXXOperatorName.EndOpNameLoc = R.getEnd().getRawEncoding();
+ }
+
+ /// getCXXLiteralOperatorNameLoc - Returns the location of the literal
+ /// operator name (not the operator keyword).
+ /// Assumes it is a literal operator.
+ SourceLocation getCXXLiteralOperatorNameLoc() const {
+ assert(Name.getNameKind() == DeclarationName::CXXLiteralOperatorName);
+ return SourceLocation::
+ getFromRawEncoding(LocInfo.CXXLiteralOperatorName.OpNameLoc);
+ }
+ /// setCXXLiteralOperatorNameLoc - Sets the location of the literal
+ /// operator name (not the operator keyword).
+ /// Assumes it is a literal operator.
+ void setCXXLiteralOperatorNameLoc(SourceLocation Loc) {
+ assert(Name.getNameKind() == DeclarationName::CXXLiteralOperatorName);
+ LocInfo.CXXLiteralOperatorName.OpNameLoc = Loc.getRawEncoding();
+ }
+
+ /// \brief Determine whether this name involves a template parameter.
+ bool isInstantiationDependent() const;
+
+ /// \brief Determine whether this name contains an unexpanded
+ /// parameter pack.
+ bool containsUnexpandedParameterPack() const;
+
+ /// getAsString - Retrieve the human-readable string for this name.
+ std::string getAsString() const;
+
+ /// printName - Print the human-readable name to a stream.
+ void printName(raw_ostream &OS) const;
+
+ /// getBeginLoc - Retrieve the location of the first token.
+ SourceLocation getBeginLoc() const { return NameLoc; }
+ /// getEndLoc - Retrieve the location of the last token.
+ SourceLocation getEndLoc() const;
+ /// getSourceRange - The range of the declaration name.
+ SourceRange getSourceRange() const LLVM_READONLY {
+ SourceLocation BeginLoc = getBeginLoc();
+ SourceLocation EndLoc = getEndLoc();
+ return SourceRange(BeginLoc, EndLoc.isValid() ? EndLoc : BeginLoc);
+ }
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return getBeginLoc();
+ }
+ SourceLocation getLocEnd() const LLVM_READONLY {
+ SourceLocation EndLoc = getEndLoc();
+ return EndLoc.isValid() ? EndLoc : getLocStart();
+ }
+};
+
+/// Insertion operator for diagnostics. This allows sending DeclarationName's
+/// into a diagnostic with <<.
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ DeclarationName N) {
+ DB.AddTaggedVal(N.getAsOpaqueInteger(),
+ DiagnosticsEngine::ak_declarationname);
+ return DB;
+}
+
+/// Insertion operator for partial diagnostics. This allows binding
+/// DeclarationName's into a partial diagnostic with <<.
+inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ DeclarationName N) {
+ PD.AddTaggedVal(N.getAsOpaqueInteger(),
+ DiagnosticsEngine::ak_declarationname);
+ return PD;
+}
+
+inline raw_ostream &operator<<(raw_ostream &OS,
+ DeclarationNameInfo DNInfo) {
+ DNInfo.printName(OS);
+ return OS;
+}
+
+} // end namespace clang
+
+namespace llvm {
+/// Define DenseMapInfo so that DeclarationNames can be used as keys
+/// in DenseMap and DenseSets.
+template<>
+struct DenseMapInfo<clang::DeclarationName> {
+ static inline clang::DeclarationName getEmptyKey() {
+ return clang::DeclarationName::getEmptyMarker();
+ }
+
+ static inline clang::DeclarationName getTombstoneKey() {
+ return clang::DeclarationName::getTombstoneMarker();
+ }
+
+ static unsigned getHashValue(clang::DeclarationName);
+
+ static inline bool
+ isEqual(clang::DeclarationName LHS, clang::DeclarationName RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <>
+struct isPodLike<clang::DeclarationName> { static const bool value = true; };
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h b/contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h
new file mode 100644
index 0000000..948dcb4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/DependentDiagnostic.h
@@ -0,0 +1,192 @@
+//===-- DependentDiagnostic.h - Dependently-generated diagnostics -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines interfaces for diagnostics which may or may
+// fire based on how a template is instantiated.
+//
+// At the moment, the only consumer of this interface is access
+// control.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_DEPENDENT_DIAGNOSTIC_H
+#define LLVM_CLANG_AST_DEPENDENT_DIAGNOSTIC_H
+
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/Type.h"
+
+namespace clang {
+
+class ASTContext;
+class CXXRecordDecl;
+class NamedDecl;
+
+/// A dependently-generated diagnostic.
+class DependentDiagnostic {
+public:
+ enum AccessNonce { Access = 0 };
+
+ static DependentDiagnostic *Create(ASTContext &Context,
+ DeclContext *Parent,
+ AccessNonce _,
+ SourceLocation Loc,
+ bool IsMemberAccess,
+ AccessSpecifier AS,
+ NamedDecl *TargetDecl,
+ CXXRecordDecl *NamingClass,
+ QualType BaseObjectType,
+ const PartialDiagnostic &PDiag) {
+ DependentDiagnostic *DD = Create(Context, Parent, PDiag);
+ DD->AccessData.Loc = Loc.getRawEncoding();
+ DD->AccessData.IsMember = IsMemberAccess;
+ DD->AccessData.Access = AS;
+ DD->AccessData.TargetDecl = TargetDecl;
+ DD->AccessData.NamingClass = NamingClass;
+ DD->AccessData.BaseObjectType = BaseObjectType.getAsOpaquePtr();
+ return DD;
+ }
+
+ unsigned getKind() const {
+ return Access;
+ }
+
+ bool isAccessToMember() const {
+ assert(getKind() == Access);
+ return AccessData.IsMember;
+ }
+
+ AccessSpecifier getAccess() const {
+ assert(getKind() == Access);
+ return AccessSpecifier(AccessData.Access);
+ }
+
+ SourceLocation getAccessLoc() const {
+ assert(getKind() == Access);
+ return SourceLocation::getFromRawEncoding(AccessData.Loc);
+ }
+
+ NamedDecl *getAccessTarget() const {
+ assert(getKind() == Access);
+ return AccessData.TargetDecl;
+ }
+
+ NamedDecl *getAccessNamingClass() const {
+ assert(getKind() == Access);
+ return AccessData.NamingClass;
+ }
+
+ QualType getAccessBaseObjectType() const {
+ assert(getKind() == Access);
+ return QualType::getFromOpaquePtr(AccessData.BaseObjectType);
+ }
+
+ const PartialDiagnostic &getDiagnostic() const {
+ return Diag;
+ }
+
+private:
+ DependentDiagnostic(const PartialDiagnostic &PDiag,
+ PartialDiagnostic::Storage *Storage)
+ : Diag(PDiag, Storage) {}
+
+ static DependentDiagnostic *Create(ASTContext &Context,
+ DeclContext *Parent,
+ const PartialDiagnostic &PDiag);
+
+ friend class DependentStoredDeclsMap;
+ friend class DeclContext::ddiag_iterator;
+ DependentDiagnostic *NextDiagnostic;
+
+ PartialDiagnostic Diag;
+
+ union {
+ struct {
+ unsigned Loc;
+ unsigned Access : 2;
+ unsigned IsMember : 1;
+ NamedDecl *TargetDecl;
+ CXXRecordDecl *NamingClass;
+ void *BaseObjectType;
+ } AccessData;
+ };
+};
+
+///
+
+/// An iterator over the dependent diagnostics in a dependent context.
+class DeclContext::ddiag_iterator {
+public:
+ ddiag_iterator() : Ptr(0) {}
+ explicit ddiag_iterator(DependentDiagnostic *Ptr) : Ptr(Ptr) {}
+
+ typedef DependentDiagnostic *value_type;
+ typedef DependentDiagnostic *reference;
+ typedef DependentDiagnostic *pointer;
+ typedef int difference_type;
+ typedef std::forward_iterator_tag iterator_category;
+
+ reference operator*() const { return Ptr; }
+
+ ddiag_iterator &operator++() {
+ assert(Ptr && "attempt to increment past end of diag list");
+ Ptr = Ptr->NextDiagnostic;
+ return *this;
+ }
+
+ ddiag_iterator operator++(int) {
+ ddiag_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ bool operator==(ddiag_iterator Other) const {
+ return Ptr == Other.Ptr;
+ }
+
+ bool operator!=(ddiag_iterator Other) const {
+ return Ptr != Other.Ptr;
+ }
+
+ ddiag_iterator &operator+=(difference_type N) {
+ assert(N >= 0 && "cannot rewind a DeclContext::ddiag_iterator");
+ while (N--)
+ ++*this;
+ return *this;
+ }
+
+ ddiag_iterator operator+(difference_type N) const {
+ ddiag_iterator tmp = *this;
+ tmp += N;
+ return tmp;
+ }
+
+private:
+ DependentDiagnostic *Ptr;
+};
+
+inline DeclContext::ddiag_iterator DeclContext::ddiag_begin() const {
+ assert(isDependentContext()
+ && "cannot iterate dependent diagnostics of non-dependent context");
+ const DependentStoredDeclsMap *Map
+ = static_cast<DependentStoredDeclsMap*>(getPrimaryContext()->getLookupPtr());
+
+ if (!Map) return ddiag_iterator();
+ return ddiag_iterator(Map->FirstDiagnostic);
+}
+
+inline DeclContext::ddiag_iterator DeclContext::ddiag_end() const {
+ return ddiag_iterator();
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h
new file mode 100644
index 0000000..bab1606
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/EvaluatedExprVisitor.h
@@ -0,0 +1,83 @@
+//===--- EvaluatedExprVisitor.h - Evaluated expression visitor --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the EvaluatedExprVisitor class template, which visits
+// the potentially-evaluated subexpressions of a potentially-evaluated
+// expression.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_EVALUATEDEXPRVISITOR_H
+#define LLVM_CLANG_AST_EVALUATEDEXPRVISITOR_H
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+
+namespace clang {
+
+class ASTContext;
+
+/// \begin Given a potentially-evaluated expression, this visitor visits all
+/// of its potentially-evaluated subexpressions, recursively.
+template<typename ImplClass>
+class EvaluatedExprVisitor : public StmtVisitor<ImplClass> {
+ ASTContext &Context;
+
+public:
+ explicit EvaluatedExprVisitor(ASTContext &Context) : Context(Context) { }
+
+ // Expressions that have no potentially-evaluated subexpressions (but may have
+ // other sub-expressions).
+ void VisitDeclRefExpr(DeclRefExpr *E) { }
+ void VisitOffsetOfExpr(OffsetOfExpr *E) { }
+ void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) { }
+ void VisitExpressionTraitExpr(ExpressionTraitExpr *E) { }
+ void VisitBlockExpr(BlockExpr *E) { }
+ void VisitCXXUuidofExpr(CXXUuidofExpr *E) { }
+ void VisitCXXNoexceptExpr(CXXNoexceptExpr *E) { }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ // Only the base matters.
+ return this->Visit(E->getBase());
+ }
+
+ void VisitChooseExpr(ChooseExpr *E) {
+ // Only the selected subexpression matters; the other one is not evaluated.
+ return this->Visit(E->getChosenSubExpr(Context));
+ }
+
+ void VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ // Only the actual initializer matters; the designators are all constant
+ // expressions.
+ return this->Visit(E->getInit());
+ }
+
+ void VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ // typeid(expression) is potentially evaluated when the argument is
+ // a glvalue of polymorphic type. (C++ 5.2.8p2-3)
+ if (!E->isTypeOperand() && E->Classify(Context).isGLValue())
+ if (const RecordType *Record
+ = E->getExprOperand()->getType()->template getAs<RecordType>())
+ if (cast<CXXRecordDecl>(Record->getDecl())->isPolymorphic())
+ return this->Visit(E->getExprOperand());
+ }
+
+ /// \brief The basis case walks all of the children of the statement or
+ /// expression, assuming they are all potentially evaluated.
+ void VisitStmt(Stmt *S) {
+ for (Stmt::child_range C = S->children(); C; ++C)
+ if (*C)
+ this->Visit(*C);
+ }
+};
+
+}
+
+#endif // LLVM_CLANG_AST_EVALUATEDEXPRVISITOR_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Expr.h b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
new file mode 100644
index 0000000..558bd00
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/Expr.h
@@ -0,0 +1,4567 @@
+//===--- Expr.h - Classes for representing expressions ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Expr interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_EXPR_H
+#define LLVM_CLANG_AST_EXPR_H
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/DeclAccessPair.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/ASTVector.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TypeTraits.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Compiler.h"
+#include <cctype>
+
+namespace clang {
+ class ASTContext;
+ class APValue;
+ class Decl;
+ class IdentifierInfo;
+ class ParmVarDecl;
+ class NamedDecl;
+ class ValueDecl;
+ class BlockDecl;
+ class CXXBaseSpecifier;
+ class CXXOperatorCallExpr;
+ class CXXMemberCallExpr;
+ class ObjCPropertyRefExpr;
+ class OpaqueValueExpr;
+
+/// \brief A simple array of base specifiers.
+typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
+
+/// Expr - This represents one expression. Note that Expr's are subclasses of
+/// Stmt. This allows an expression to be transparently used any place a Stmt
+/// is required.
+///
+class Expr : public Stmt {
+ QualType TR;
+
+protected:
+ Expr(StmtClass SC, QualType T, ExprValueKind VK, ExprObjectKind OK,
+ bool TD, bool VD, bool ID, bool ContainsUnexpandedParameterPack)
+ : Stmt(SC)
+ {
+ ExprBits.TypeDependent = TD;
+ ExprBits.ValueDependent = VD;
+ ExprBits.InstantiationDependent = ID;
+ ExprBits.ValueKind = VK;
+ ExprBits.ObjectKind = OK;
+ ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ setType(T);
+ }
+
+ /// \brief Construct an empty expression.
+ explicit Expr(StmtClass SC, EmptyShell) : Stmt(SC) { }
+
+public:
+ QualType getType() const { return TR; }
+ void setType(QualType t) {
+ // In C++, the type of an expression is always adjusted so that it
+ // will not have reference type an expression will never have
+ // reference type (C++ [expr]p6). Use
+ // QualType::getNonReferenceType() to retrieve the non-reference
+ // type. Additionally, inspect Expr::isLvalue to determine whether
+ // an expression that is adjusted in this manner should be
+ // considered an lvalue.
+ assert((t.isNull() || !t->isReferenceType()) &&
+ "Expressions can't have reference type");
+
+ TR = t;
+ }
+
+ /// isValueDependent - Determines whether this expression is
+ /// value-dependent (C++ [temp.dep.constexpr]). For example, the
+ /// array bound of "Chars" in the following example is
+ /// value-dependent.
+ /// @code
+ /// template<int Size, char (&Chars)[Size]> struct meta_string;
+ /// @endcode
+ bool isValueDependent() const { return ExprBits.ValueDependent; }
+
+ /// \brief Set whether this expression is value-dependent or not.
+ void setValueDependent(bool VD) {
+ ExprBits.ValueDependent = VD;
+ if (VD)
+ ExprBits.InstantiationDependent = true;
+ }
+
+ /// isTypeDependent - Determines whether this expression is
+ /// type-dependent (C++ [temp.dep.expr]), which means that its type
+ /// could change from one template instantiation to the next. For
+ /// example, the expressions "x" and "x + y" are type-dependent in
+ /// the following code, but "y" is not type-dependent:
+ /// @code
+ /// template<typename T>
+ /// void add(T x, int y) {
+ /// x + y;
+ /// }
+ /// @endcode
+ bool isTypeDependent() const { return ExprBits.TypeDependent; }
+
+ /// \brief Set whether this expression is type-dependent or not.
+ void setTypeDependent(bool TD) {
+ ExprBits.TypeDependent = TD;
+ if (TD)
+ ExprBits.InstantiationDependent = true;
+ }
+
+ /// \brief Whether this expression is instantiation-dependent, meaning that
+ /// it depends in some way on a template parameter, even if neither its type
+ /// nor (constant) value can change due to the template instantiation.
+ ///
+ /// In the following example, the expression \c sizeof(sizeof(T() + T())) is
+ /// instantiation-dependent (since it involves a template parameter \c T), but
+ /// is neither type- nor value-dependent, since the type of the inner
+ /// \c sizeof is known (\c std::size_t) and therefore the size of the outer
+ /// \c sizeof is known.
+ ///
+ /// \code
+ /// template<typename T>
+ /// void f(T x, T y) {
+ /// sizeof(sizeof(T() + T());
+ /// }
+ /// \endcode
+ ///
+ bool isInstantiationDependent() const {
+ return ExprBits.InstantiationDependent;
+ }
+
+ /// \brief Set whether this expression is instantiation-dependent or not.
+ void setInstantiationDependent(bool ID) {
+ ExprBits.InstantiationDependent = ID;
+ }
+
+ /// \brief Whether this expression contains an unexpanded parameter
+ /// pack (for C++0x variadic templates).
+ ///
+ /// Given the following function template:
+ ///
+ /// \code
+ /// template<typename F, typename ...Types>
+ /// void forward(const F &f, Types &&...args) {
+ /// f(static_cast<Types&&>(args)...);
+ /// }
+ /// \endcode
+ ///
+ /// The expressions \c args and \c static_cast<Types&&>(args) both
+ /// contain parameter packs.
+ bool containsUnexpandedParameterPack() const {
+ return ExprBits.ContainsUnexpandedParameterPack;
+ }
+
+ /// \brief Set the bit that describes whether this expression
+ /// contains an unexpanded parameter pack.
+ void setContainsUnexpandedParameterPack(bool PP = true) {
+ ExprBits.ContainsUnexpandedParameterPack = PP;
+ }
+
+ /// getExprLoc - Return the preferred location for the arrow when diagnosing
+ /// a problem with a generic expression.
+ SourceLocation getExprLoc() const LLVM_READONLY;
+
+ /// isUnusedResultAWarning - Return true if this immediate expression should
+ /// be warned about if the result is unused. If so, fill in Loc and Ranges
+ /// with location to warn on and the source range[s] to report with the
+ /// warning.
+ bool isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
+ SourceRange &R2, ASTContext &Ctx) const;
+
+ /// isLValue - True if this expression is an "l-value" according to
+ /// the rules of the current language. C and C++ give somewhat
+ /// different rules for this concept, but in general, the result of
+ /// an l-value expression identifies a specific object whereas the
+ /// result of an r-value expression is a value detached from any
+ /// specific storage.
+ ///
+ /// C++0x divides the concept of "r-value" into pure r-values
+ /// ("pr-values") and so-called expiring values ("x-values"), which
+ /// identify specific objects that can be safely cannibalized for
+ /// their resources. This is an unfortunate abuse of terminology on
+ /// the part of the C++ committee. In Clang, when we say "r-value",
+ /// we generally mean a pr-value.
+ bool isLValue() const { return getValueKind() == VK_LValue; }
+ bool isRValue() const { return getValueKind() == VK_RValue; }
+ bool isXValue() const { return getValueKind() == VK_XValue; }
+ bool isGLValue() const { return getValueKind() != VK_RValue; }
+
+ enum LValueClassification {
+ LV_Valid,
+ LV_NotObjectType,
+ LV_IncompleteVoidType,
+ LV_DuplicateVectorComponents,
+ LV_InvalidExpression,
+ LV_InvalidMessageExpression,
+ LV_MemberFunction,
+ LV_SubObjCPropertySetting,
+ LV_ClassTemporary
+ };
+ /// Reasons why an expression might not be an l-value.
+ LValueClassification ClassifyLValue(ASTContext &Ctx) const;
+
+ /// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type,
+ /// does not have an incomplete type, does not have a const-qualified type,
+ /// and if it is a structure or union, does not have any member (including,
+ /// recursively, any member or element of all contained aggregates or unions)
+ /// with a const-qualified type.
+ ///
+ /// \param Loc [in] [out] - A source location which *may* be filled
+ /// in with the location of the expression making this a
+ /// non-modifiable lvalue, if specified.
+ enum isModifiableLvalueResult {
+ MLV_Valid,
+ MLV_NotObjectType,
+ MLV_IncompleteVoidType,
+ MLV_DuplicateVectorComponents,
+ MLV_InvalidExpression,
+ MLV_LValueCast, // Specialized form of MLV_InvalidExpression.
+ MLV_IncompleteType,
+ MLV_ConstQualified,
+ MLV_ArrayType,
+ MLV_ReadonlyProperty,
+ MLV_NoSetterProperty,
+ MLV_MemberFunction,
+ MLV_SubObjCPropertySetting,
+ MLV_InvalidMessageExpression,
+ MLV_ClassTemporary
+ };
+ isModifiableLvalueResult isModifiableLvalue(ASTContext &Ctx,
+ SourceLocation *Loc = 0) const;
+
+ /// \brief The return type of classify(). Represents the C++0x expression
+ /// taxonomy.
+ class Classification {
+ public:
+ /// \brief The various classification results. Most of these mean prvalue.
+ enum Kinds {
+ CL_LValue,
+ CL_XValue,
+ CL_Function, // Functions cannot be lvalues in C.
+ CL_Void, // Void cannot be an lvalue in C.
+ CL_AddressableVoid, // Void expression whose address can be taken in C.
+ CL_DuplicateVectorComponents, // A vector shuffle with dupes.
+ CL_MemberFunction, // An expression referring to a member function
+ CL_SubObjCPropertySetting,
+ CL_ClassTemporary, // A prvalue of class type
+ CL_ObjCMessageRValue, // ObjC message is an rvalue
+ CL_PRValue // A prvalue for any other reason, of any other type
+ };
+ /// \brief The results of modification testing.
+ enum ModifiableType {
+ CM_Untested, // testModifiable was false.
+ CM_Modifiable,
+ CM_RValue, // Not modifiable because it's an rvalue
+ CM_Function, // Not modifiable because it's a function; C++ only
+ CM_LValueCast, // Same as CM_RValue, but indicates GCC cast-as-lvalue ext
+ CM_NoSetterProperty,// Implicit assignment to ObjC property without setter
+ CM_ConstQualified,
+ CM_ArrayType,
+ CM_IncompleteType
+ };
+
+ private:
+ friend class Expr;
+
+ unsigned short Kind;
+ unsigned short Modifiable;
+
+ explicit Classification(Kinds k, ModifiableType m)
+ : Kind(k), Modifiable(m)
+ {}
+
+ public:
+ Classification() {}
+
+ Kinds getKind() const { return static_cast<Kinds>(Kind); }
+ ModifiableType getModifiable() const {
+ assert(Modifiable != CM_Untested && "Did not test for modifiability.");
+ return static_cast<ModifiableType>(Modifiable);
+ }
+ bool isLValue() const { return Kind == CL_LValue; }
+ bool isXValue() const { return Kind == CL_XValue; }
+ bool isGLValue() const { return Kind <= CL_XValue; }
+ bool isPRValue() const { return Kind >= CL_Function; }
+ bool isRValue() const { return Kind >= CL_XValue; }
+ bool isModifiable() const { return getModifiable() == CM_Modifiable; }
+
+ /// \brief Create a simple, modifiably lvalue
+ static Classification makeSimpleLValue() {
+ return Classification(CL_LValue, CM_Modifiable);
+ }
+
+ };
+ /// \brief Classify - Classify this expression according to the C++0x
+ /// expression taxonomy.
+ ///
+ /// C++0x defines ([basic.lval]) a new taxonomy of expressions to replace the
+ /// old lvalue vs rvalue. This function determines the type of expression this
+ /// is. There are three expression types:
+ /// - lvalues are classical lvalues as in C++03.
+ /// - prvalues are equivalent to rvalues in C++03.
+ /// - xvalues are expressions yielding unnamed rvalue references, e.g. a
+ /// function returning an rvalue reference.
+ /// lvalues and xvalues are collectively referred to as glvalues, while
+ /// prvalues and xvalues together form rvalues.
+ Classification Classify(ASTContext &Ctx) const {
+ return ClassifyImpl(Ctx, 0);
+ }
+
+ /// \brief ClassifyModifiable - Classify this expression according to the
+ /// C++0x expression taxonomy, and see if it is valid on the left side
+ /// of an assignment.
+ ///
+ /// This function extends classify in that it also tests whether the
+ /// expression is modifiable (C99 6.3.2.1p1).
+ /// \param Loc A source location that might be filled with a relevant location
+ /// if the expression is not modifiable.
+ Classification ClassifyModifiable(ASTContext &Ctx, SourceLocation &Loc) const{
+ return ClassifyImpl(Ctx, &Loc);
+ }
+
+ /// getValueKindForType - Given a formal return or parameter type,
+ /// give its value kind.
+ static ExprValueKind getValueKindForType(QualType T) {
+ if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ return (isa<LValueReferenceType>(RT)
+ ? VK_LValue
+ : (RT->getPointeeType()->isFunctionType()
+ ? VK_LValue : VK_XValue));
+ return VK_RValue;
+ }
+
+ /// getValueKind - The value kind that this expression produces.
+ ExprValueKind getValueKind() const {
+ return static_cast<ExprValueKind>(ExprBits.ValueKind);
+ }
+
+ /// getObjectKind - The object kind that this expression produces.
+ /// Object kinds are meaningful only for expressions that yield an
+ /// l-value or x-value.
+ ExprObjectKind getObjectKind() const {
+ return static_cast<ExprObjectKind>(ExprBits.ObjectKind);
+ }
+
+ bool isOrdinaryOrBitFieldObject() const {
+ ExprObjectKind OK = getObjectKind();
+ return (OK == OK_Ordinary || OK == OK_BitField);
+ }
+
+ /// setValueKind - Set the value kind produced by this expression.
+ void setValueKind(ExprValueKind Cat) { ExprBits.ValueKind = Cat; }
+
+ /// setObjectKind - Set the object kind produced by this expression.
+ void setObjectKind(ExprObjectKind Cat) { ExprBits.ObjectKind = Cat; }
+
+private:
+ Classification ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const;
+
+public:
+
+ /// \brief If this expression refers to a bit-field, retrieve the
+ /// declaration of that bit-field.
+ FieldDecl *getBitField();
+
+ const FieldDecl *getBitField() const {
+ return const_cast<Expr*>(this)->getBitField();
+ }
+
+ /// \brief If this expression is an l-value for an Objective C
+ /// property, find the underlying property reference expression.
+ const ObjCPropertyRefExpr *getObjCProperty() const;
+
+ /// \brief Returns whether this expression refers to a vector element.
+ bool refersToVectorElement() const;
+
+ /// \brief Returns whether this expression has a placeholder type.
+ bool hasPlaceholderType() const {
+ return getType()->isPlaceholderType();
+ }
+
+ /// \brief Returns whether this expression has a specific placeholder type.
+ bool hasPlaceholderType(BuiltinType::Kind K) const {
+ assert(BuiltinType::isPlaceholderTypeKind(K));
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(getType()))
+ return BT->getKind() == K;
+ return false;
+ }
+
+ /// isKnownToHaveBooleanValue - Return true if this is an integer expression
+ /// that is known to return 0 or 1. This happens for _Bool/bool expressions
+ /// but also int expressions which are produced by things like comparisons in
+ /// C.
+ bool isKnownToHaveBooleanValue() const;
+
+ /// isIntegerConstantExpr - Return true if this expression is a valid integer
+ /// constant expression, and, if so, return its value in Result. If not a
+ /// valid i-c-e, return false and fill in Loc (if specified) with the location
+ /// of the invalid expression.
+ ///
+ /// Note: This does not perform the implicit conversions required by C++11
+ /// [expr.const]p5.
+ bool isIntegerConstantExpr(llvm::APSInt &Result, ASTContext &Ctx,
+ SourceLocation *Loc = 0,
+ bool isEvaluated = true) const;
+ bool isIntegerConstantExpr(ASTContext &Ctx, SourceLocation *Loc = 0) const;
+
+ /// isCXX98IntegralConstantExpr - Return true if this expression is an
+ /// integral constant expression in C++98. Can only be used in C++.
+ bool isCXX98IntegralConstantExpr(ASTContext &Ctx) const;
+
+ /// isCXX11ConstantExpr - Return true if this expression is a constant
+ /// expression in C++11. Can only be used in C++.
+ ///
+ /// Note: This does not perform the implicit conversions required by C++11
+ /// [expr.const]p5.
+ bool isCXX11ConstantExpr(ASTContext &Ctx, APValue *Result = 0,
+ SourceLocation *Loc = 0) const;
+
+ /// isPotentialConstantExpr - Return true if this function's definition
+ /// might be usable in a constant expression in C++11, if it were marked
+ /// constexpr. Return false if the function can never produce a constant
+ /// expression, along with diagnostics describing why not.
+ static bool isPotentialConstantExpr(const FunctionDecl *FD,
+ llvm::SmallVectorImpl<
+ PartialDiagnosticAt> &Diags);
+
+ /// isConstantInitializer - Returns true if this expression can be emitted to
+ /// IR as a constant, and thus can be used as a constant initializer in C.
+ bool isConstantInitializer(ASTContext &Ctx, bool ForRef) const;
+
+ /// EvalStatus is a struct with detailed info about an evaluation in progress.
+ struct EvalStatus {
+ /// HasSideEffects - Whether the evaluated expression has side effects.
+ /// For example, (f() && 0) can be folded, but it still has side effects.
+ bool HasSideEffects;
+
+ /// Diag - If this is non-null, it will be filled in with a stack of notes
+ /// indicating why evaluation failed (or why it failed to produce a constant
+ /// expression).
+ /// If the expression is unfoldable, the notes will indicate why it's not
+ /// foldable. If the expression is foldable, but not a constant expression,
+ /// the notes will describes why it isn't a constant expression. If the
+ /// expression *is* a constant expression, no notes will be produced.
+ llvm::SmallVectorImpl<PartialDiagnosticAt> *Diag;
+
+ EvalStatus() : HasSideEffects(false), Diag(0) {}
+
+ // hasSideEffects - Return true if the evaluated expression has
+ // side effects.
+ bool hasSideEffects() const {
+ return HasSideEffects;
+ }
+ };
+
+ /// EvalResult is a struct with detailed info about an evaluated expression.
+ struct EvalResult : EvalStatus {
+ /// Val - This is the value the expression can be folded to.
+ APValue Val;
+
+ // isGlobalLValue - Return true if the evaluated lvalue expression
+ // is global.
+ bool isGlobalLValue() const;
+ };
+
+ /// EvaluateAsRValue - Return true if this is a constant which we can fold to
+ /// an rvalue using any crazy technique (that has nothing to do with language
+ /// standards) that we want to, even if the expression has side-effects. If
+ /// this function returns true, it returns the folded constant in Result. If
+ /// the expression is a glvalue, an lvalue-to-rvalue conversion will be
+ /// applied.
+ bool EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const;
+
+ /// EvaluateAsBooleanCondition - Return true if this is a constant
+ /// which we we can fold and convert to a boolean condition using
+ /// any crazy technique that we want to, even if the expression has
+ /// side-effects.
+ bool EvaluateAsBooleanCondition(bool &Result, const ASTContext &Ctx) const;
+
+ enum SideEffectsKind { SE_NoSideEffects, SE_AllowSideEffects };
+
+ /// EvaluateAsInt - Return true if this is a constant which we can fold and
+ /// convert to an integer, using any crazy technique that we want to.
+ bool EvaluateAsInt(llvm::APSInt &Result, const ASTContext &Ctx,
+ SideEffectsKind AllowSideEffects = SE_NoSideEffects) const;
+
+ /// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
+ /// constant folded without side-effects, but discard the result.
+ bool isEvaluatable(const ASTContext &Ctx) const;
+
+ /// HasSideEffects - This routine returns true for all those expressions
+ /// which must be evaluated each time and must not be optimized away
+ /// or evaluated at compile time. Example is a function call, volatile
+ /// variable read.
+ bool HasSideEffects(const ASTContext &Ctx) const;
+
+ /// \brief Determine whether this expression involves a call to any function
+ /// that is not trivial.
+ bool hasNonTrivialCall(ASTContext &Ctx);
+
+ /// EvaluateKnownConstInt - Call EvaluateAsRValue and return the folded
+ /// integer. This must be called on an expression that constant folds to an
+ /// integer.
+ llvm::APSInt EvaluateKnownConstInt(const ASTContext &Ctx) const;
+
+ /// EvaluateAsLValue - Evaluate an expression to see if we can fold it to an
+ /// lvalue with link time known address, with no side-effects.
+ bool EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const;
+
+ /// EvaluateAsInitializer - Evaluate an expression as if it were the
+ /// initializer of the given declaration. Returns true if the initializer
+ /// can be folded to a constant, and produces any relevant notes. In C++11,
+ /// notes will be produced if the expression is not a constant expression.
+ bool EvaluateAsInitializer(APValue &Result, const ASTContext &Ctx,
+ const VarDecl *VD,
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const;
+
+ /// \brief Enumeration used to describe the kind of Null pointer constant
+ /// returned from \c isNullPointerConstant().
+ enum NullPointerConstantKind {
+ /// \brief Expression is not a Null pointer constant.
+ NPCK_NotNull = 0,
+
+ /// \brief Expression is a Null pointer constant built from a zero integer.
+ NPCK_ZeroInteger,
+
+ /// \brief Expression is a C++0X nullptr.
+ NPCK_CXX0X_nullptr,
+
+ /// \brief Expression is a GNU-style __null constant.
+ NPCK_GNUNull
+ };
+
+ /// \brief Enumeration used to describe how \c isNullPointerConstant()
+ /// should cope with value-dependent expressions.
+ enum NullPointerConstantValueDependence {
+ /// \brief Specifies that the expression should never be value-dependent.
+ NPC_NeverValueDependent = 0,
+
+ /// \brief Specifies that a value-dependent expression of integral or
+ /// dependent type should be considered a null pointer constant.
+ NPC_ValueDependentIsNull,
+
+ /// \brief Specifies that a value-dependent expression should be considered
+ /// to never be a null pointer constant.
+ NPC_ValueDependentIsNotNull
+ };
+
+ /// isNullPointerConstant - C99 6.3.2.3p3 - Test if this reduces down to
+ /// a Null pointer constant. The return value can further distinguish the
+ /// kind of NULL pointer constant that was detected.
+ NullPointerConstantKind isNullPointerConstant(
+ ASTContext &Ctx,
+ NullPointerConstantValueDependence NPC) const;
+
+ /// isOBJCGCCandidate - Return true if this expression may be used in a read/
+ /// write barrier.
+ bool isOBJCGCCandidate(ASTContext &Ctx) const;
+
+ /// \brief Returns true if this expression is a bound member function.
+ bool isBoundMemberFunction(ASTContext &Ctx) const;
+
+ /// \brief Given an expression of bound-member type, find the type
+ /// of the member. Returns null if this is an *overloaded* bound
+ /// member expression.
+ static QualType findBoundMemberType(const Expr *expr);
+
+ /// \brief Result type of CanThrow().
+ enum CanThrowResult {
+ CT_Cannot,
+ CT_Dependent,
+ CT_Can
+ };
+ /// \brief Test if this expression, if evaluated, might throw, according to
+ /// the rules of C++ [expr.unary.noexcept].
+ CanThrowResult CanThrow(ASTContext &C) const;
+
+ /// IgnoreImpCasts - Skip past any implicit casts which might
+ /// surround this expression. Only skips ImplicitCastExprs.
+ Expr *IgnoreImpCasts() LLVM_READONLY;
+
+ /// IgnoreImplicit - Skip past any implicit AST nodes which might
+ /// surround this expression.
+ Expr *IgnoreImplicit() LLVM_READONLY {
+ return cast<Expr>(Stmt::IgnoreImplicit());
+ }
+
+ /// IgnoreParens - Ignore parentheses. If this Expr is a ParenExpr, return
+ /// its subexpression. If that subexpression is also a ParenExpr,
+ /// then this method recursively returns its subexpression, and so forth.
+ /// Otherwise, the method returns the current Expr.
+ Expr *IgnoreParens() LLVM_READONLY;
+
+ /// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
+ /// or CastExprs, returning their operand.
+ Expr *IgnoreParenCasts() LLVM_READONLY;
+
+ /// IgnoreParenImpCasts - Ignore parentheses and implicit casts. Strip off
+ /// any ParenExpr or ImplicitCastExprs, returning their operand.
+ Expr *IgnoreParenImpCasts() LLVM_READONLY;
+
+ /// IgnoreConversionOperator - Ignore conversion operator. If this Expr is a
+ /// call to a conversion operator, return the argument.
+ Expr *IgnoreConversionOperator() LLVM_READONLY;
+
+ const Expr *IgnoreConversionOperator() const LLVM_READONLY {
+ return const_cast<Expr*>(this)->IgnoreConversionOperator();
+ }
+
+ const Expr *IgnoreParenImpCasts() const LLVM_READONLY {
+ return const_cast<Expr*>(this)->IgnoreParenImpCasts();
+ }
+
+ /// Ignore parentheses and lvalue casts. Strip off any ParenExpr and
+ /// CastExprs that represent lvalue casts, returning their operand.
+ Expr *IgnoreParenLValueCasts() LLVM_READONLY;
+
+ const Expr *IgnoreParenLValueCasts() const LLVM_READONLY {
+ return const_cast<Expr*>(this)->IgnoreParenLValueCasts();
+ }
+
+ /// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
+ /// value (including ptr->int casts of the same size). Strip off any
+ /// ParenExpr or CastExprs, returning their operand.
+ Expr *IgnoreParenNoopCasts(ASTContext &Ctx) LLVM_READONLY;
+
+ /// \brief Determine whether this expression is a default function argument.
+ ///
+ /// Default arguments are implicitly generated in the abstract syntax tree
+ /// by semantic analysis for function calls, object constructions, etc. in
+ /// C++. Default arguments are represented by \c CXXDefaultArgExpr nodes;
+ /// this routine also looks through any implicit casts to determine whether
+ /// the expression is a default argument.
+ bool isDefaultArgument() const;
+
+ /// \brief Determine whether the result of this expression is a
+ /// temporary object of the given class type.
+ bool isTemporaryObject(ASTContext &Ctx, const CXXRecordDecl *TempTy) const;
+
+ /// \brief Whether this expression is an implicit reference to 'this' in C++.
+ bool isImplicitCXXThis() const;
+
+ const Expr *IgnoreImpCasts() const LLVM_READONLY {
+ return const_cast<Expr*>(this)->IgnoreImpCasts();
+ }
+ const Expr *IgnoreParens() const LLVM_READONLY {
+ return const_cast<Expr*>(this)->IgnoreParens();
+ }
+ const Expr *IgnoreParenCasts() const LLVM_READONLY {
+ return const_cast<Expr*>(this)->IgnoreParenCasts();
+ }
+ const Expr *IgnoreParenNoopCasts(ASTContext &Ctx) const LLVM_READONLY {
+ return const_cast<Expr*>(this)->IgnoreParenNoopCasts(Ctx);
+ }
+
+ static bool hasAnyTypeDependentArguments(llvm::ArrayRef<Expr *> Exprs);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() >= firstExprConstant &&
+ T->getStmtClass() <= lastExprConstant;
+ }
+ static bool classof(const Expr *) { return true; }
+};
+
+
+//===----------------------------------------------------------------------===//
+// Primary Expressions.
+//===----------------------------------------------------------------------===//
+
+/// OpaqueValueExpr - An expression referring to an opaque object of a
+/// fixed type and value class. These don't correspond to concrete
+/// syntax; instead they're used to express operations (usually copy
+/// operations) on values whose source is generally obvious from
+/// context.
+class OpaqueValueExpr : public Expr {
+ friend class ASTStmtReader;
+ Expr *SourceExpr;
+ SourceLocation Loc;
+
+public:
+ OpaqueValueExpr(SourceLocation Loc, QualType T, ExprValueKind VK,
+ ExprObjectKind OK = OK_Ordinary,
+ Expr *SourceExpr = 0)
+ : Expr(OpaqueValueExprClass, T, VK, OK,
+ T->isDependentType(),
+ T->isDependentType() ||
+ (SourceExpr && SourceExpr->isValueDependent()),
+ T->isInstantiationDependentType(),
+ false),
+ SourceExpr(SourceExpr), Loc(Loc) {
+ }
+
+ /// Given an expression which invokes a copy constructor --- i.e. a
+ /// CXXConstructExpr, possibly wrapped in an ExprWithCleanups ---
+ /// find the OpaqueValueExpr that's the source of the construction.
+ static const OpaqueValueExpr *findInCopyConstruct(const Expr *expr);
+
+ explicit OpaqueValueExpr(EmptyShell Empty)
+ : Expr(OpaqueValueExprClass, Empty) { }
+
+ /// \brief Retrieve the location of this expression.
+ SourceLocation getLocation() const { return Loc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ if (SourceExpr) return SourceExpr->getSourceRange();
+ return Loc;
+ }
+ SourceLocation getExprLoc() const LLVM_READONLY {
+ if (SourceExpr) return SourceExpr->getExprLoc();
+ return Loc;
+ }
+
+ child_range children() { return child_range(); }
+
+ /// The source expression of an opaque value expression is the
+ /// expression which originally generated the value. This is
+ /// provided as a convenience for analyses that don't wish to
+ /// precisely model the execution behavior of the program.
+ ///
+ /// The source expression is typically set when building the
+ /// expression which binds the opaque value expression in the first
+ /// place.
+ Expr *getSourceExpr() const { return SourceExpr; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OpaqueValueExprClass;
+ }
+ static bool classof(const OpaqueValueExpr *) { return true; }
+};
+
+/// \brief A reference to a declared variable, function, enum, etc.
+/// [C99 6.5.1p2]
+///
+/// This encodes all the information about how a declaration is referenced
+/// within an expression.
+///
+/// There are several optional constructs attached to DeclRefExprs only when
+/// they apply in order to conserve memory. These are laid out past the end of
+/// the object, and flags in the DeclRefExprBitfield track whether they exist:
+///
+/// DeclRefExprBits.HasQualifier:
+/// Specifies when this declaration reference expression has a C++
+/// nested-name-specifier.
+/// DeclRefExprBits.HasFoundDecl:
+/// Specifies when this declaration reference expression has a record of
+/// a NamedDecl (different from the referenced ValueDecl) which was found
+/// during name lookup and/or overload resolution.
+/// DeclRefExprBits.HasTemplateKWAndArgsInfo:
+/// Specifies when this declaration reference expression has an explicit
+/// C++ template keyword and/or template argument list.
+/// DeclRefExprBits.RefersToEnclosingLocal
+/// Specifies when this declaration reference expression (validly)
+/// refers to a local variable from a different function.
+class DeclRefExpr : public Expr {
+ /// \brief The declaration that we are referencing.
+ ValueDecl *D;
+
+ /// \brief The location of the declaration name itself.
+ SourceLocation Loc;
+
+ /// \brief Provides source/type location info for the declaration name
+ /// embedded in D.
+ DeclarationNameLoc DNLoc;
+
+ /// \brief Helper to retrieve the optional NestedNameSpecifierLoc.
+ NestedNameSpecifierLoc &getInternalQualifierLoc() {
+ assert(hasQualifier());
+ return *reinterpret_cast<NestedNameSpecifierLoc *>(this + 1);
+ }
+
+ /// \brief Helper to retrieve the optional NestedNameSpecifierLoc.
+ const NestedNameSpecifierLoc &getInternalQualifierLoc() const {
+ return const_cast<DeclRefExpr *>(this)->getInternalQualifierLoc();
+ }
+
+ /// \brief Test whether there is a distinct FoundDecl attached to the end of
+ /// this DRE.
+ bool hasFoundDecl() const { return DeclRefExprBits.HasFoundDecl; }
+
+ /// \brief Helper to retrieve the optional NamedDecl through which this
+ /// reference occured.
+ NamedDecl *&getInternalFoundDecl() {
+ assert(hasFoundDecl());
+ if (hasQualifier())
+ return *reinterpret_cast<NamedDecl **>(&getInternalQualifierLoc() + 1);
+ return *reinterpret_cast<NamedDecl **>(this + 1);
+ }
+
+ /// \brief Helper to retrieve the optional NamedDecl through which this
+ /// reference occured.
+ NamedDecl *getInternalFoundDecl() const {
+ return const_cast<DeclRefExpr *>(this)->getInternalFoundDecl();
+ }
+
+ DeclRefExpr(ASTContext &Ctx,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D, bool refersToEnclosingLocal,
+ const DeclarationNameInfo &NameInfo,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs,
+ QualType T, ExprValueKind VK);
+
+ /// \brief Construct an empty declaration reference expression.
+ explicit DeclRefExpr(EmptyShell Empty)
+ : Expr(DeclRefExprClass, Empty) { }
+
+ /// \brief Computes the type- and value-dependence flags for this
+ /// declaration reference expression.
+ void computeDependence(ASTContext &C);
+
+public:
+ DeclRefExpr(ValueDecl *D, bool refersToEnclosingLocal, QualType T,
+ ExprValueKind VK, SourceLocation L,
+ const DeclarationNameLoc &LocInfo = DeclarationNameLoc())
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
+ D(D), Loc(L), DNLoc(LocInfo) {
+ DeclRefExprBits.HasQualifier = 0;
+ DeclRefExprBits.HasTemplateKWAndArgsInfo = 0;
+ DeclRefExprBits.HasFoundDecl = 0;
+ DeclRefExprBits.HadMultipleCandidates = 0;
+ DeclRefExprBits.RefersToEnclosingLocal = refersToEnclosingLocal;
+ computeDependence(D->getASTContext());
+ }
+
+ static DeclRefExpr *Create(ASTContext &Context,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D,
+ bool isEnclosingLocal,
+ SourceLocation NameLoc,
+ QualType T, ExprValueKind VK,
+ NamedDecl *FoundD = 0,
+ const TemplateArgumentListInfo *TemplateArgs = 0);
+
+ static DeclRefExpr *Create(ASTContext &Context,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D,
+ bool isEnclosingLocal,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, ExprValueKind VK,
+ NamedDecl *FoundD = 0,
+ const TemplateArgumentListInfo *TemplateArgs = 0);
+
+ /// \brief Construct an empty declaration reference expression.
+ static DeclRefExpr *CreateEmpty(ASTContext &Context,
+ bool HasQualifier,
+ bool HasFoundDecl,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs);
+
+ ValueDecl *getDecl() { return D; }
+ const ValueDecl *getDecl() const { return D; }
+ void setDecl(ValueDecl *NewD) { D = NewD; }
+
+ DeclarationNameInfo getNameInfo() const {
+ return DeclarationNameInfo(getDecl()->getDeclName(), Loc, DNLoc);
+ }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY;
+ SourceLocation getLocEnd() const LLVM_READONLY;
+
+ /// \brief Determine whether this declaration reference was preceded by a
+ /// C++ nested-name-specifier, e.g., \c N::foo.
+ bool hasQualifier() const { return DeclRefExprBits.HasQualifier; }
+
+ /// \brief If the name was qualified, retrieves the nested-name-specifier
+ /// that precedes the name. Otherwise, returns NULL.
+ NestedNameSpecifier *getQualifier() const {
+ if (!hasQualifier())
+ return 0;
+
+ return getInternalQualifierLoc().getNestedNameSpecifier();
+ }
+
+ /// \brief If the name was qualified, retrieves the nested-name-specifier
+ /// that precedes the name, with source-location information.
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ if (!hasQualifier())
+ return NestedNameSpecifierLoc();
+
+ return getInternalQualifierLoc();
+ }
+
+ /// \brief Get the NamedDecl through which this reference occured.
+ ///
+ /// This Decl may be different from the ValueDecl actually referred to in the
+ /// presence of using declarations, etc. It always returns non-NULL, and may
+ /// simple return the ValueDecl when appropriate.
+ NamedDecl *getFoundDecl() {
+ return hasFoundDecl() ? getInternalFoundDecl() : D;
+ }
+
+ /// \brief Get the NamedDecl through which this reference occurred.
+ /// See non-const variant.
+ const NamedDecl *getFoundDecl() const {
+ return hasFoundDecl() ? getInternalFoundDecl() : D;
+ }
+
+ bool hasTemplateKWAndArgsInfo() const {
+ return DeclRefExprBits.HasTemplateKWAndArgsInfo;
+ }
+
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() {
+ if (!hasTemplateKWAndArgsInfo())
+ return 0;
+
+ if (hasFoundDecl())
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(
+ &getInternalFoundDecl() + 1);
+
+ if (hasQualifier())
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(
+ &getInternalQualifierLoc() + 1);
+
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(this + 1);
+ }
+
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<DeclRefExpr*>(this)->getTemplateKWAndArgsInfo();
+ }
+
+ /// \brief Retrieve the location of the template keyword preceding
+ /// this name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!hasTemplateKWAndArgsInfo()) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!hasTemplateKWAndArgsInfo()) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!hasTemplateKWAndArgsInfo()) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// \brief Determines whether the name in this declaration reference
+ /// was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
+ /// \brief Determines whether this declaration reference was followed by an
+ /// explicit template argument list.
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
+
+ /// \brief Retrieve the explicit template argument list that followed the
+ /// member template name.
+ ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
+ assert(hasExplicitTemplateArgs());
+ return *getTemplateKWAndArgsInfo();
+ }
+
+ /// \brief Retrieve the explicit template argument list that followed the
+ /// member template name.
+ const ASTTemplateArgumentListInfo &getExplicitTemplateArgs() const {
+ return const_cast<DeclRefExpr *>(this)->getExplicitTemplateArgs();
+ }
+
+ /// \brief Retrieves the optional explicit template arguments.
+ /// This points to the same data as getExplicitTemplateArgs(), but
+ /// returns null if there are no explicit template arguments.
+ const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() const {
+ if (!hasExplicitTemplateArgs()) return 0;
+ return &getExplicitTemplateArgs();
+ }
+
+ /// \brief Copies the template arguments (if present) into the given
+ /// structure.
+ void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
+ if (hasExplicitTemplateArgs())
+ getExplicitTemplateArgs().copyInto(List);
+ }
+
+ /// \brief Retrieve the template arguments provided as part of this
+ /// template-id.
+ const TemplateArgumentLoc *getTemplateArgs() const {
+ if (!hasExplicitTemplateArgs())
+ return 0;
+
+ return getExplicitTemplateArgs().getTemplateArgs();
+ }
+
+ /// \brief Retrieve the number of template arguments provided as part of this
+ /// template-id.
+ unsigned getNumTemplateArgs() const {
+ if (!hasExplicitTemplateArgs())
+ return 0;
+
+ return getExplicitTemplateArgs().NumTemplateArgs;
+ }
+
+ /// \brief Returns true if this expression refers to a function that
+ /// was resolved from an overloaded set having size greater than 1.
+ bool hadMultipleCandidates() const {
+ return DeclRefExprBits.HadMultipleCandidates;
+ }
+ /// \brief Sets the flag telling whether this expression refers to
+ /// a function that was resolved from an overloaded set having size
+ /// greater than 1.
+ void setHadMultipleCandidates(bool V = true) {
+ DeclRefExprBits.HadMultipleCandidates = V;
+ }
+
+ /// Does this DeclRefExpr refer to a local declaration from an
+ /// enclosing function scope?
+ bool refersToEnclosingLocal() const {
+ return DeclRefExprBits.RefersToEnclosingLocal;
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == DeclRefExprClass;
+ }
+ static bool classof(const DeclRefExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// PredefinedExpr - [C99 6.4.2.2] - A predefined identifier such as __func__.
+class PredefinedExpr : public Expr {
+public:
+ enum IdentType {
+ Func,
+ Function,
+ PrettyFunction,
+ /// PrettyFunctionNoVirtual - The same as PrettyFunction, except that the
+ /// 'virtual' keyword is omitted for virtual member functions.
+ PrettyFunctionNoVirtual
+ };
+
+private:
+ SourceLocation Loc;
+ IdentType Type;
+public:
+ PredefinedExpr(SourceLocation l, QualType type, IdentType IT)
+ : Expr(PredefinedExprClass, type, VK_LValue, OK_Ordinary,
+ type->isDependentType(), type->isDependentType(),
+ type->isInstantiationDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(l), Type(IT) {}
+
+ /// \brief Construct an empty predefined expression.
+ explicit PredefinedExpr(EmptyShell Empty)
+ : Expr(PredefinedExprClass, Empty) { }
+
+ IdentType getIdentType() const { return Type; }
+ void setIdentType(IdentType IT) { Type = IT; }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ static std::string ComputeName(IdentType IT, const Decl *CurrentDecl);
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == PredefinedExprClass;
+ }
+ static bool classof(const PredefinedExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// \brief Used by IntegerLiteral/FloatingLiteral to store the numeric without
+/// leaking memory.
+///
+/// For large floats/integers, APFloat/APInt will allocate memory from the heap
+/// to represent these numbers. Unfortunately, when we use a BumpPtrAllocator
+/// to allocate IntegerLiteral/FloatingLiteral nodes the memory associated with
+/// the APFloat/APInt values will never get freed. APNumericStorage uses
+/// ASTContext's allocator for memory allocation.
+class APNumericStorage {
+ union {
+ uint64_t VAL; ///< Used to store the <= 64 bits integer value.
+ uint64_t *pVal; ///< Used to store the >64 bits integer value.
+ };
+ unsigned BitWidth;
+
+ bool hasAllocation() const { return llvm::APInt::getNumWords(BitWidth) > 1; }
+
+ APNumericStorage(const APNumericStorage&); // do not implement
+ APNumericStorage& operator=(const APNumericStorage&); // do not implement
+
+protected:
+ APNumericStorage() : VAL(0), BitWidth(0) { }
+
+ llvm::APInt getIntValue() const {
+ unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
+ if (NumWords > 1)
+ return llvm::APInt(BitWidth, NumWords, pVal);
+ else
+ return llvm::APInt(BitWidth, VAL);
+ }
+ void setIntValue(ASTContext &C, const llvm::APInt &Val);
+};
+
+class APIntStorage : private APNumericStorage {
+public:
+ llvm::APInt getValue() const { return getIntValue(); }
+ void setValue(ASTContext &C, const llvm::APInt &Val) { setIntValue(C, Val); }
+};
+
+class APFloatStorage : private APNumericStorage {
+public:
+ llvm::APFloat getValue(bool IsIEEE) const {
+ return llvm::APFloat(getIntValue(), IsIEEE);
+ }
+ void setValue(ASTContext &C, const llvm::APFloat &Val) {
+ setIntValue(C, Val.bitcastToAPInt());
+ }
+};
+
+class IntegerLiteral : public Expr, public APIntStorage {
+ SourceLocation Loc;
+
+ /// \brief Construct an empty integer literal.
+ explicit IntegerLiteral(EmptyShell Empty)
+ : Expr(IntegerLiteralClass, Empty) { }
+
+public:
+ // type should be IntTy, LongTy, LongLongTy, UnsignedIntTy, UnsignedLongTy,
+ // or UnsignedLongLongTy
+ IntegerLiteral(ASTContext &C, const llvm::APInt &V,
+ QualType type, SourceLocation l)
+ : Expr(IntegerLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ Loc(l) {
+ assert(type->isIntegerType() && "Illegal type in IntegerLiteral");
+ assert(V.getBitWidth() == C.getIntWidth(type) &&
+ "Integer type is not the correct size for constant.");
+ setValue(C, V);
+ }
+
+ /// \brief Returns a new integer literal with value 'V' and type 'type'.
+ /// \param type - either IntTy, LongTy, LongLongTy, UnsignedIntTy,
+ /// UnsignedLongTy, or UnsignedLongLongTy which should match the size of V
+ /// \param V - the value that the returned integer literal contains.
+ static IntegerLiteral *Create(ASTContext &C, const llvm::APInt &V,
+ QualType type, SourceLocation l);
+ /// \brief Returns a new empty integer literal.
+ static IntegerLiteral *Create(ASTContext &C, EmptyShell Empty);
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ /// \brief Retrieve the location of the literal.
+ SourceLocation getLocation() const { return Loc; }
+
+ void setLocation(SourceLocation Location) { Loc = Location; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == IntegerLiteralClass;
+ }
+ static bool classof(const IntegerLiteral *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+class CharacterLiteral : public Expr {
+public:
+ enum CharacterKind {
+ Ascii,
+ Wide,
+ UTF16,
+ UTF32
+ };
+
+private:
+ unsigned Value;
+ SourceLocation Loc;
+public:
+ // type should be IntTy
+ CharacterLiteral(unsigned value, CharacterKind kind, QualType type,
+ SourceLocation l)
+ : Expr(CharacterLiteralClass, type, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ Value(value), Loc(l) {
+ CharacterLiteralBits.Kind = kind;
+ }
+
+ /// \brief Construct an empty character literal.
+ CharacterLiteral(EmptyShell Empty) : Expr(CharacterLiteralClass, Empty) { }
+
+ SourceLocation getLocation() const { return Loc; }
+ CharacterKind getKind() const {
+ return static_cast<CharacterKind>(CharacterLiteralBits.Kind);
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ unsigned getValue() const { return Value; }
+
+ void setLocation(SourceLocation Location) { Loc = Location; }
+ void setKind(CharacterKind kind) { CharacterLiteralBits.Kind = kind; }
+ void setValue(unsigned Val) { Value = Val; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CharacterLiteralClass;
+ }
+ static bool classof(const CharacterLiteral *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+class FloatingLiteral : public Expr, private APFloatStorage {
+ SourceLocation Loc;
+
+ FloatingLiteral(ASTContext &C, const llvm::APFloat &V, bool isexact,
+ QualType Type, SourceLocation L)
+ : Expr(FloatingLiteralClass, Type, VK_RValue, OK_Ordinary, false, false,
+ false, false), Loc(L) {
+ FloatingLiteralBits.IsIEEE =
+ &C.getTargetInfo().getLongDoubleFormat() == &llvm::APFloat::IEEEquad;
+ FloatingLiteralBits.IsExact = isexact;
+ setValue(C, V);
+ }
+
+ /// \brief Construct an empty floating-point literal.
+ explicit FloatingLiteral(ASTContext &C, EmptyShell Empty)
+ : Expr(FloatingLiteralClass, Empty) {
+ FloatingLiteralBits.IsIEEE =
+ &C.getTargetInfo().getLongDoubleFormat() == &llvm::APFloat::IEEEquad;
+ FloatingLiteralBits.IsExact = false;
+ }
+
+public:
+ static FloatingLiteral *Create(ASTContext &C, const llvm::APFloat &V,
+ bool isexact, QualType Type, SourceLocation L);
+ static FloatingLiteral *Create(ASTContext &C, EmptyShell Empty);
+
+ llvm::APFloat getValue() const {
+ return APFloatStorage::getValue(FloatingLiteralBits.IsIEEE);
+ }
+ void setValue(ASTContext &C, const llvm::APFloat &Val) {
+ APFloatStorage::setValue(C, Val);
+ }
+
+ bool isExact() const { return FloatingLiteralBits.IsExact; }
+ void setExact(bool E) { FloatingLiteralBits.IsExact = E; }
+
+ /// getValueAsApproximateDouble - This returns the value as an inaccurate
+ /// double. Note that this may cause loss of precision, but is useful for
+ /// debugging dumps, etc.
+ double getValueAsApproximateDouble() const;
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == FloatingLiteralClass;
+ }
+ static bool classof(const FloatingLiteral *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// ImaginaryLiteral - We support imaginary integer and floating point literals,
+/// like "1.0i". We represent these as a wrapper around FloatingLiteral and
+/// IntegerLiteral classes. Instances of this class always have a Complex type
+/// whose element type matches the subexpression.
+///
+class ImaginaryLiteral : public Expr {
+ Stmt *Val;
+public:
+ ImaginaryLiteral(Expr *val, QualType Ty)
+ : Expr(ImaginaryLiteralClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ Val(val) {}
+
+ /// \brief Build an empty imaginary literal.
+ explicit ImaginaryLiteral(EmptyShell Empty)
+ : Expr(ImaginaryLiteralClass, Empty) { }
+
+ const Expr *getSubExpr() const { return cast<Expr>(Val); }
+ Expr *getSubExpr() { return cast<Expr>(Val); }
+ void setSubExpr(Expr *E) { Val = E; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Val->getSourceRange(); }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ImaginaryLiteralClass;
+ }
+ static bool classof(const ImaginaryLiteral *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Val, &Val+1); }
+};
+
+/// StringLiteral - This represents a string literal expression, e.g. "foo"
+/// or L"bar" (wide strings). The actual string is returned by getStrData()
+/// is NOT null-terminated, and the length of the string is determined by
+/// calling getByteLength(). The C type for a string is always a
+/// ConstantArrayType. In C++, the char type is const qualified, in C it is
+/// not.
+///
+/// Note that strings in C can be formed by concatenation of multiple string
+/// literal pptokens in translation phase #6. This keeps track of the locations
+/// of each of these pieces.
+///
+/// Strings in C can also be truncated and extended by assigning into arrays,
+/// e.g. with constructs like:
+/// char X[2] = "foobar";
+/// In this case, getByteLength() will return 6, but the string literal will
+/// have type "char[2]".
+class StringLiteral : public Expr {
+public:
+ enum StringKind {
+ Ascii,
+ Wide,
+ UTF8,
+ UTF16,
+ UTF32
+ };
+
+private:
+ friend class ASTStmtReader;
+
+ union {
+ const char *asChar;
+ const uint16_t *asUInt16;
+ const uint32_t *asUInt32;
+ } StrData;
+ unsigned Length;
+ unsigned CharByteWidth : 4;
+ unsigned Kind : 3;
+ unsigned IsPascal : 1;
+ unsigned NumConcatenated;
+ SourceLocation TokLocs[1];
+
+ StringLiteral(QualType Ty) :
+ Expr(StringLiteralClass, Ty, VK_LValue, OK_Ordinary, false, false, false,
+ false) {}
+
+ static int mapCharByteWidth(TargetInfo const &target,StringKind k);
+
+public:
+ /// This is the "fully general" constructor that allows representation of
+ /// strings formed from multiple concatenated tokens.
+ static StringLiteral *Create(ASTContext &C, StringRef Str, StringKind Kind,
+ bool Pascal, QualType Ty,
+ const SourceLocation *Loc, unsigned NumStrs);
+
+ /// Simple constructor for string literals made from one token.
+ static StringLiteral *Create(ASTContext &C, StringRef Str, StringKind Kind,
+ bool Pascal, QualType Ty,
+ SourceLocation Loc) {
+ return Create(C, Str, Kind, Pascal, Ty, &Loc, 1);
+ }
+
+ /// \brief Construct an empty string literal.
+ static StringLiteral *CreateEmpty(ASTContext &C, unsigned NumStrs);
+
+ StringRef getString() const {
+ assert(CharByteWidth==1
+ && "This function is used in places that assume strings use char");
+ return StringRef(StrData.asChar, getByteLength());
+ }
+
+ /// Allow clients that need the byte representation, such as ASTWriterStmt
+ /// ::VisitStringLiteral(), access.
+ StringRef getBytes() const {
+ // FIXME: StringRef may not be the right type to use as a result for this.
+ if (CharByteWidth == 1)
+ return StringRef(StrData.asChar, getByteLength());
+ if (CharByteWidth == 4)
+ return StringRef(reinterpret_cast<const char*>(StrData.asUInt32),
+ getByteLength());
+ assert(CharByteWidth == 2 && "unsupported CharByteWidth");
+ return StringRef(reinterpret_cast<const char*>(StrData.asUInt16),
+ getByteLength());
+ }
+
+ uint32_t getCodeUnit(size_t i) const {
+ assert(i < Length && "out of bounds access");
+ if (CharByteWidth == 1)
+ return static_cast<unsigned char>(StrData.asChar[i]);
+ if (CharByteWidth == 4)
+ return StrData.asUInt32[i];
+ assert(CharByteWidth == 2 && "unsupported CharByteWidth");
+ return StrData.asUInt16[i];
+ }
+
+ unsigned getByteLength() const { return CharByteWidth*Length; }
+ unsigned getLength() const { return Length; }
+ unsigned getCharByteWidth() const { return CharByteWidth; }
+
+ /// \brief Sets the string data to the given string data.
+ void setString(ASTContext &C, StringRef Str,
+ StringKind Kind, bool IsPascal);
+
+ StringKind getKind() const { return static_cast<StringKind>(Kind); }
+
+
+ bool isAscii() const { return Kind == Ascii; }
+ bool isWide() const { return Kind == Wide; }
+ bool isUTF8() const { return Kind == UTF8; }
+ bool isUTF16() const { return Kind == UTF16; }
+ bool isUTF32() const { return Kind == UTF32; }
+ bool isPascal() const { return IsPascal; }
+
+ bool containsNonAsciiOrNull() const {
+ StringRef Str = getString();
+ for (unsigned i = 0, e = Str.size(); i != e; ++i)
+ if (!isascii(Str[i]) || !Str[i])
+ return true;
+ return false;
+ }
+
+ /// getNumConcatenated - Get the number of string literal tokens that were
+ /// concatenated in translation phase #6 to form this string literal.
+ unsigned getNumConcatenated() const { return NumConcatenated; }
+
+ SourceLocation getStrTokenLoc(unsigned TokNum) const {
+ assert(TokNum < NumConcatenated && "Invalid tok number");
+ return TokLocs[TokNum];
+ }
+ void setStrTokenLoc(unsigned TokNum, SourceLocation L) {
+ assert(TokNum < NumConcatenated && "Invalid tok number");
+ TokLocs[TokNum] = L;
+ }
+
+ /// getLocationOfByte - Return a source location that points to the specified
+ /// byte of this string literal.
+ ///
+ /// Strings are amazingly complex. They can be formed from multiple tokens
+ /// and can have escape sequences in them in addition to the usual trigraph
+ /// and escaped newline business. This routine handles this complexity.
+ ///
+ SourceLocation getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
+ const LangOptions &Features,
+ const TargetInfo &Target) const;
+
+ typedef const SourceLocation *tokloc_iterator;
+ tokloc_iterator tokloc_begin() const { return TokLocs; }
+ tokloc_iterator tokloc_end() const { return TokLocs+NumConcatenated; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(TokLocs[0], TokLocs[NumConcatenated-1]);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == StringLiteralClass;
+ }
+ static bool classof(const StringLiteral *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// ParenExpr - This represents a parethesized expression, e.g. "(1)". This
+/// AST node is only formed if full location information is requested.
+class ParenExpr : public Expr {
+ SourceLocation L, R;
+ Stmt *Val;
+public:
+ ParenExpr(SourceLocation l, SourceLocation r, Expr *val)
+ : Expr(ParenExprClass, val->getType(),
+ val->getValueKind(), val->getObjectKind(),
+ val->isTypeDependent(), val->isValueDependent(),
+ val->isInstantiationDependent(),
+ val->containsUnexpandedParameterPack()),
+ L(l), R(r), Val(val) {}
+
+ /// \brief Construct an empty parenthesized expression.
+ explicit ParenExpr(EmptyShell Empty)
+ : Expr(ParenExprClass, Empty) { }
+
+ const Expr *getSubExpr() const { return cast<Expr>(Val); }
+ Expr *getSubExpr() { return cast<Expr>(Val); }
+ void setSubExpr(Expr *E) { Val = E; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(L, R); }
+
+ /// \brief Get the location of the left parentheses '('.
+ SourceLocation getLParen() const { return L; }
+ void setLParen(SourceLocation Loc) { L = Loc; }
+
+ /// \brief Get the location of the right parentheses ')'.
+ SourceLocation getRParen() const { return R; }
+ void setRParen(SourceLocation Loc) { R = Loc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ParenExprClass;
+ }
+ static bool classof(const ParenExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Val, &Val+1); }
+};
+
+
+/// UnaryOperator - This represents the unary-expression's (except sizeof and
+/// alignof), the postinc/postdec operators from postfix-expression, and various
+/// extensions.
+///
+/// Notes on various nodes:
+///
+/// Real/Imag - These return the real/imag part of a complex operand. If
+/// applied to a non-complex value, the former returns its operand and the
+/// later returns zero in the type of the operand.
+///
+class UnaryOperator : public Expr {
+public:
+ typedef UnaryOperatorKind Opcode;
+
+private:
+ unsigned Opc : 5;
+ SourceLocation Loc;
+ Stmt *Val;
+public:
+
+ UnaryOperator(Expr *input, Opcode opc, QualType type,
+ ExprValueKind VK, ExprObjectKind OK, SourceLocation l)
+ : Expr(UnaryOperatorClass, type, VK, OK,
+ input->isTypeDependent() || type->isDependentType(),
+ input->isValueDependent(),
+ (input->isInstantiationDependent() ||
+ type->isInstantiationDependentType()),
+ input->containsUnexpandedParameterPack()),
+ Opc(opc), Loc(l), Val(input) {}
+
+ /// \brief Build an empty unary operator.
+ explicit UnaryOperator(EmptyShell Empty)
+ : Expr(UnaryOperatorClass, Empty), Opc(UO_AddrOf) { }
+
+ Opcode getOpcode() const { return static_cast<Opcode>(Opc); }
+ void setOpcode(Opcode O) { Opc = O; }
+
+ Expr *getSubExpr() const { return cast<Expr>(Val); }
+ void setSubExpr(Expr *E) { Val = E; }
+
+ /// getOperatorLoc - Return the location of the operator.
+ SourceLocation getOperatorLoc() const { return Loc; }
+ void setOperatorLoc(SourceLocation L) { Loc = L; }
+
+ /// isPostfix - Return true if this is a postfix operation, like x++.
+ static bool isPostfix(Opcode Op) {
+ return Op == UO_PostInc || Op == UO_PostDec;
+ }
+
+ /// isPrefix - Return true if this is a prefix operation, like --x.
+ static bool isPrefix(Opcode Op) {
+ return Op == UO_PreInc || Op == UO_PreDec;
+ }
+
+ bool isPrefix() const { return isPrefix(getOpcode()); }
+ bool isPostfix() const { return isPostfix(getOpcode()); }
+
+ static bool isIncrementOp(Opcode Op) {
+ return Op == UO_PreInc || Op == UO_PostInc;
+ }
+ bool isIncrementOp() const {
+ return isIncrementOp(getOpcode());
+ }
+
+ static bool isDecrementOp(Opcode Op) {
+ return Op == UO_PreDec || Op == UO_PostDec;
+ }
+ bool isDecrementOp() const {
+ return isDecrementOp(getOpcode());
+ }
+
+ static bool isIncrementDecrementOp(Opcode Op) { return Op <= UO_PreDec; }
+ bool isIncrementDecrementOp() const {
+ return isIncrementDecrementOp(getOpcode());
+ }
+
+ static bool isArithmeticOp(Opcode Op) {
+ return Op >= UO_Plus && Op <= UO_LNot;
+ }
+ bool isArithmeticOp() const { return isArithmeticOp(getOpcode()); }
+
+ /// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+ /// corresponds to, e.g. "sizeof" or "[pre]++"
+ static const char *getOpcodeStr(Opcode Op);
+
+ /// \brief Retrieve the unary opcode that corresponds to the given
+ /// overloaded operator.
+ static Opcode getOverloadedOpcode(OverloadedOperatorKind OO, bool Postfix);
+
+ /// \brief Retrieve the overloaded operator kind that corresponds to
+ /// the given unary opcode.
+ static OverloadedOperatorKind getOverloadedOperator(Opcode Opc);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ if (isPostfix())
+ return SourceRange(Val->getLocStart(), Loc);
+ else
+ return SourceRange(Loc, Val->getLocEnd());
+ }
+ SourceLocation getExprLoc() const LLVM_READONLY { return Loc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == UnaryOperatorClass;
+ }
+ static bool classof(const UnaryOperator *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Val, &Val+1); }
+};
+
+/// OffsetOfExpr - [C99 7.17] - This represents an expression of the form
+/// offsetof(record-type, member-designator). For example, given:
+/// @code
+/// struct S {
+/// float f;
+/// double d;
+/// };
+/// struct T {
+/// int i;
+/// struct S s[10];
+/// };
+/// @endcode
+/// we can represent and evaluate the expression @c offsetof(struct T, s[2].d).
+
+class OffsetOfExpr : public Expr {
+public:
+ // __builtin_offsetof(type, identifier(.identifier|[expr])*)
+ class OffsetOfNode {
+ public:
+ /// \brief The kind of offsetof node we have.
+ enum Kind {
+ /// \brief An index into an array.
+ Array = 0x00,
+ /// \brief A field.
+ Field = 0x01,
+ /// \brief A field in a dependent type, known only by its name.
+ Identifier = 0x02,
+ /// \brief An implicit indirection through a C++ base class, when the
+ /// field found is in a base class.
+ Base = 0x03
+ };
+
+ private:
+ enum { MaskBits = 2, Mask = 0x03 };
+
+ /// \brief The source range that covers this part of the designator.
+ SourceRange Range;
+
+ /// \brief The data describing the designator, which comes in three
+ /// different forms, depending on the lower two bits.
+ /// - An unsigned index into the array of Expr*'s stored after this node
+ /// in memory, for [constant-expression] designators.
+ /// - A FieldDecl*, for references to a known field.
+ /// - An IdentifierInfo*, for references to a field with a given name
+ /// when the class type is dependent.
+ /// - A CXXBaseSpecifier*, for references that look at a field in a
+ /// base class.
+ uintptr_t Data;
+
+ public:
+ /// \brief Create an offsetof node that refers to an array element.
+ OffsetOfNode(SourceLocation LBracketLoc, unsigned Index,
+ SourceLocation RBracketLoc)
+ : Range(LBracketLoc, RBracketLoc), Data((Index << 2) | Array) { }
+
+ /// \brief Create an offsetof node that refers to a field.
+ OffsetOfNode(SourceLocation DotLoc, FieldDecl *Field,
+ SourceLocation NameLoc)
+ : Range(DotLoc.isValid()? DotLoc : NameLoc, NameLoc),
+ Data(reinterpret_cast<uintptr_t>(Field) | OffsetOfNode::Field) { }
+
+ /// \brief Create an offsetof node that refers to an identifier.
+ OffsetOfNode(SourceLocation DotLoc, IdentifierInfo *Name,
+ SourceLocation NameLoc)
+ : Range(DotLoc.isValid()? DotLoc : NameLoc, NameLoc),
+ Data(reinterpret_cast<uintptr_t>(Name) | Identifier) { }
+
+ /// \brief Create an offsetof node that refers into a C++ base class.
+ explicit OffsetOfNode(const CXXBaseSpecifier *Base)
+ : Range(), Data(reinterpret_cast<uintptr_t>(Base) | OffsetOfNode::Base) {}
+
+ /// \brief Determine what kind of offsetof node this is.
+ Kind getKind() const {
+ return static_cast<Kind>(Data & Mask);
+ }
+
+ /// \brief For an array element node, returns the index into the array
+ /// of expressions.
+ unsigned getArrayExprIndex() const {
+ assert(getKind() == Array);
+ return Data >> 2;
+ }
+
+ /// \brief For a field offsetof node, returns the field.
+ FieldDecl *getField() const {
+ assert(getKind() == Field);
+ return reinterpret_cast<FieldDecl *>(Data & ~(uintptr_t)Mask);
+ }
+
+ /// \brief For a field or identifier offsetof node, returns the name of
+ /// the field.
+ IdentifierInfo *getFieldName() const;
+
+ /// \brief For a base class node, returns the base specifier.
+ CXXBaseSpecifier *getBase() const {
+ assert(getKind() == Base);
+ return reinterpret_cast<CXXBaseSpecifier *>(Data & ~(uintptr_t)Mask);
+ }
+
+ /// \brief Retrieve the source range that covers this offsetof node.
+ ///
+ /// For an array element node, the source range contains the locations of
+ /// the square brackets. For a field or identifier node, the source range
+ /// contains the location of the period (if there is one) and the
+ /// identifier.
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+ };
+
+private:
+
+ SourceLocation OperatorLoc, RParenLoc;
+ // Base type;
+ TypeSourceInfo *TSInfo;
+ // Number of sub-components (i.e. instances of OffsetOfNode).
+ unsigned NumComps;
+ // Number of sub-expressions (i.e. array subscript expressions).
+ unsigned NumExprs;
+
+ OffsetOfExpr(ASTContext &C, QualType type,
+ SourceLocation OperatorLoc, TypeSourceInfo *tsi,
+ OffsetOfNode* compsPtr, unsigned numComps,
+ Expr** exprsPtr, unsigned numExprs,
+ SourceLocation RParenLoc);
+
+ explicit OffsetOfExpr(unsigned numComps, unsigned numExprs)
+ : Expr(OffsetOfExprClass, EmptyShell()),
+ TSInfo(0), NumComps(numComps), NumExprs(numExprs) {}
+
+public:
+
+ static OffsetOfExpr *Create(ASTContext &C, QualType type,
+ SourceLocation OperatorLoc, TypeSourceInfo *tsi,
+ OffsetOfNode* compsPtr, unsigned numComps,
+ Expr** exprsPtr, unsigned numExprs,
+ SourceLocation RParenLoc);
+
+ static OffsetOfExpr *CreateEmpty(ASTContext &C,
+ unsigned NumComps, unsigned NumExprs);
+
+ /// getOperatorLoc - Return the location of the operator.
+ SourceLocation getOperatorLoc() const { return OperatorLoc; }
+ void setOperatorLoc(SourceLocation L) { OperatorLoc = L; }
+
+ /// \brief Return the location of the right parentheses.
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation R) { RParenLoc = R; }
+
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return TSInfo;
+ }
+ void setTypeSourceInfo(TypeSourceInfo *tsi) {
+ TSInfo = tsi;
+ }
+
+ const OffsetOfNode &getComponent(unsigned Idx) const {
+ assert(Idx < NumComps && "Subscript out of range");
+ return reinterpret_cast<const OffsetOfNode *> (this + 1)[Idx];
+ }
+
+ void setComponent(unsigned Idx, OffsetOfNode ON) {
+ assert(Idx < NumComps && "Subscript out of range");
+ reinterpret_cast<OffsetOfNode *> (this + 1)[Idx] = ON;
+ }
+
+ unsigned getNumComponents() const {
+ return NumComps;
+ }
+
+ Expr* getIndexExpr(unsigned Idx) {
+ assert(Idx < NumExprs && "Subscript out of range");
+ return reinterpret_cast<Expr **>(
+ reinterpret_cast<OffsetOfNode *>(this+1) + NumComps)[Idx];
+ }
+ const Expr *getIndexExpr(unsigned Idx) const {
+ return const_cast<OffsetOfExpr*>(this)->getIndexExpr(Idx);
+ }
+
+ void setIndexExpr(unsigned Idx, Expr* E) {
+ assert(Idx < NumComps && "Subscript out of range");
+ reinterpret_cast<Expr **>(
+ reinterpret_cast<OffsetOfNode *>(this+1) + NumComps)[Idx] = E;
+ }
+
+ unsigned getNumExpressions() const {
+ return NumExprs;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(OperatorLoc, RParenLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == OffsetOfExprClass;
+ }
+
+ static bool classof(const OffsetOfExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ Stmt **begin =
+ reinterpret_cast<Stmt**>(reinterpret_cast<OffsetOfNode*>(this + 1)
+ + NumComps);
+ return child_range(begin, begin + NumExprs);
+ }
+};
+
+/// UnaryExprOrTypeTraitExpr - expression with either a type or (unevaluated)
+/// expression operand. Used for sizeof/alignof (C99 6.5.3.4) and
+/// vec_step (OpenCL 1.1 6.11.12).
+class UnaryExprOrTypeTraitExpr : public Expr {
+ union {
+ TypeSourceInfo *Ty;
+ Stmt *Ex;
+ } Argument;
+ SourceLocation OpLoc, RParenLoc;
+
+public:
+ UnaryExprOrTypeTraitExpr(UnaryExprOrTypeTrait ExprKind, TypeSourceInfo *TInfo,
+ QualType resultType, SourceLocation op,
+ SourceLocation rp) :
+ Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary,
+ false, // Never type-dependent (C++ [temp.dep.expr]p3).
+ // Value-dependent if the argument is type-dependent.
+ TInfo->getType()->isDependentType(),
+ TInfo->getType()->isInstantiationDependentType(),
+ TInfo->getType()->containsUnexpandedParameterPack()),
+ OpLoc(op), RParenLoc(rp) {
+ UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
+ UnaryExprOrTypeTraitExprBits.IsType = true;
+ Argument.Ty = TInfo;
+ }
+
+ UnaryExprOrTypeTraitExpr(UnaryExprOrTypeTrait ExprKind, Expr *E,
+ QualType resultType, SourceLocation op,
+ SourceLocation rp) :
+ Expr(UnaryExprOrTypeTraitExprClass, resultType, VK_RValue, OK_Ordinary,
+ false, // Never type-dependent (C++ [temp.dep.expr]p3).
+ // Value-dependent if the argument is type-dependent.
+ E->isTypeDependent(),
+ E->isInstantiationDependent(),
+ E->containsUnexpandedParameterPack()),
+ OpLoc(op), RParenLoc(rp) {
+ UnaryExprOrTypeTraitExprBits.Kind = ExprKind;
+ UnaryExprOrTypeTraitExprBits.IsType = false;
+ Argument.Ex = E;
+ }
+
+ /// \brief Construct an empty sizeof/alignof expression.
+ explicit UnaryExprOrTypeTraitExpr(EmptyShell Empty)
+ : Expr(UnaryExprOrTypeTraitExprClass, Empty) { }
+
+ UnaryExprOrTypeTrait getKind() const {
+ return static_cast<UnaryExprOrTypeTrait>(UnaryExprOrTypeTraitExprBits.Kind);
+ }
+ void setKind(UnaryExprOrTypeTrait K) { UnaryExprOrTypeTraitExprBits.Kind = K;}
+
+ bool isArgumentType() const { return UnaryExprOrTypeTraitExprBits.IsType; }
+ QualType getArgumentType() const {
+ return getArgumentTypeInfo()->getType();
+ }
+ TypeSourceInfo *getArgumentTypeInfo() const {
+ assert(isArgumentType() && "calling getArgumentType() when arg is expr");
+ return Argument.Ty;
+ }
+ Expr *getArgumentExpr() {
+ assert(!isArgumentType() && "calling getArgumentExpr() when arg is type");
+ return static_cast<Expr*>(Argument.Ex);
+ }
+ const Expr *getArgumentExpr() const {
+ return const_cast<UnaryExprOrTypeTraitExpr*>(this)->getArgumentExpr();
+ }
+
+ void setArgument(Expr *E) {
+ Argument.Ex = E;
+ UnaryExprOrTypeTraitExprBits.IsType = false;
+ }
+ void setArgument(TypeSourceInfo *TInfo) {
+ Argument.Ty = TInfo;
+ UnaryExprOrTypeTraitExprBits.IsType = true;
+ }
+
+ /// Gets the argument type, or the type of the argument expression, whichever
+ /// is appropriate.
+ QualType getTypeOfArgument() const {
+ return isArgumentType() ? getArgumentType() : getArgumentExpr()->getType();
+ }
+
+ SourceLocation getOperatorLoc() const { return OpLoc; }
+ void setOperatorLoc(SourceLocation L) { OpLoc = L; }
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(OpLoc, RParenLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == UnaryExprOrTypeTraitExprClass;
+ }
+ static bool classof(const UnaryExprOrTypeTraitExpr *) { return true; }
+
+ // Iterators
+ child_range children();
+};
+
+//===----------------------------------------------------------------------===//
+// Postfix Operators.
+//===----------------------------------------------------------------------===//
+
+/// ArraySubscriptExpr - [C99 6.5.2.1] Array Subscripting.
+class ArraySubscriptExpr : public Expr {
+ enum { LHS, RHS, END_EXPR=2 };
+ Stmt* SubExprs[END_EXPR];
+ SourceLocation RBracketLoc;
+public:
+ ArraySubscriptExpr(Expr *lhs, Expr *rhs, QualType t,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation rbracketloc)
+ : Expr(ArraySubscriptExprClass, t, VK, OK,
+ lhs->isTypeDependent() || rhs->isTypeDependent(),
+ lhs->isValueDependent() || rhs->isValueDependent(),
+ (lhs->isInstantiationDependent() ||
+ rhs->isInstantiationDependent()),
+ (lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack())),
+ RBracketLoc(rbracketloc) {
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ }
+
+ /// \brief Create an empty array subscript expression.
+ explicit ArraySubscriptExpr(EmptyShell Shell)
+ : Expr(ArraySubscriptExprClass, Shell) { }
+
+ /// An array access can be written A[4] or 4[A] (both are equivalent).
+ /// - getBase() and getIdx() always present the normalized view: A[4].
+ /// In this case getBase() returns "A" and getIdx() returns "4".
+ /// - getLHS() and getRHS() present the syntactic view. e.g. for
+ /// 4[A] getLHS() returns "4".
+ /// Note: Because vector element access is also written A[4] we must
+ /// predicate the format conversion in getBase and getIdx only on the
+ /// the type of the RHS, as it is possible for the LHS to be a vector of
+ /// integer type
+ Expr *getLHS() { return cast<Expr>(SubExprs[LHS]); }
+ const Expr *getLHS() const { return cast<Expr>(SubExprs[LHS]); }
+ void setLHS(Expr *E) { SubExprs[LHS] = E; }
+
+ Expr *getRHS() { return cast<Expr>(SubExprs[RHS]); }
+ const Expr *getRHS() const { return cast<Expr>(SubExprs[RHS]); }
+ void setRHS(Expr *E) { SubExprs[RHS] = E; }
+
+ Expr *getBase() {
+ return cast<Expr>(getRHS()->getType()->isIntegerType() ? getLHS():getRHS());
+ }
+
+ const Expr *getBase() const {
+ return cast<Expr>(getRHS()->getType()->isIntegerType() ? getLHS():getRHS());
+ }
+
+ Expr *getIdx() {
+ return cast<Expr>(getRHS()->getType()->isIntegerType() ? getRHS():getLHS());
+ }
+
+ const Expr *getIdx() const {
+ return cast<Expr>(getRHS()->getType()->isIntegerType() ? getRHS():getLHS());
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getLHS()->getLocStart(), RBracketLoc);
+ }
+
+ SourceLocation getRBracketLoc() const { return RBracketLoc; }
+ void setRBracketLoc(SourceLocation L) { RBracketLoc = L; }
+
+ SourceLocation getExprLoc() const LLVM_READONLY { return getBase()->getExprLoc(); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ArraySubscriptExprClass;
+ }
+ static bool classof(const ArraySubscriptExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+};
+
+
+/// CallExpr - Represents a function call (C99 6.5.2.2, C++ [expr.call]).
+/// CallExpr itself represents a normal function call, e.g., "f(x, 2)",
+/// while its subclasses may represent alternative syntax that (semantically)
+/// results in a function call. For example, CXXOperatorCallExpr is
+/// a subclass for overloaded operator calls that use operator syntax, e.g.,
+/// "str1 + str2" to resolve to a function call.
+class CallExpr : public Expr {
+ enum { FN=0, PREARGS_START=1 };
+ Stmt **SubExprs;
+ unsigned NumArgs;
+ SourceLocation RParenLoc;
+
+protected:
+ // These versions of the constructor are for derived classes.
+ CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
+ Expr **args, unsigned numargs, QualType t, ExprValueKind VK,
+ SourceLocation rparenloc);
+ CallExpr(ASTContext &C, StmtClass SC, unsigned NumPreArgs, EmptyShell Empty);
+
+ Stmt *getPreArg(unsigned i) {
+ assert(i < getNumPreArgs() && "Prearg access out of range!");
+ return SubExprs[PREARGS_START+i];
+ }
+ const Stmt *getPreArg(unsigned i) const {
+ assert(i < getNumPreArgs() && "Prearg access out of range!");
+ return SubExprs[PREARGS_START+i];
+ }
+ void setPreArg(unsigned i, Stmt *PreArg) {
+ assert(i < getNumPreArgs() && "Prearg access out of range!");
+ SubExprs[PREARGS_START+i] = PreArg;
+ }
+
+ unsigned getNumPreArgs() const { return CallExprBits.NumPreArgs; }
+
+public:
+ CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs, QualType t,
+ ExprValueKind VK, SourceLocation rparenloc);
+
+ /// \brief Build an empty call expression.
+ CallExpr(ASTContext &C, StmtClass SC, EmptyShell Empty);
+
+ const Expr *getCallee() const { return cast<Expr>(SubExprs[FN]); }
+ Expr *getCallee() { return cast<Expr>(SubExprs[FN]); }
+ void setCallee(Expr *F) { SubExprs[FN] = F; }
+
+ Decl *getCalleeDecl();
+ const Decl *getCalleeDecl() const {
+ return const_cast<CallExpr*>(this)->getCalleeDecl();
+ }
+
+ /// \brief If the callee is a FunctionDecl, return it. Otherwise return 0.
+ FunctionDecl *getDirectCallee();
+ const FunctionDecl *getDirectCallee() const {
+ return const_cast<CallExpr*>(this)->getDirectCallee();
+ }
+
+ /// getNumArgs - Return the number of actual arguments to this call.
+ ///
+ unsigned getNumArgs() const { return NumArgs; }
+
+ /// \brief Retrieve the call arguments.
+ Expr **getArgs() {
+ return reinterpret_cast<Expr **>(SubExprs+getNumPreArgs()+PREARGS_START);
+ }
+ const Expr *const *getArgs() const {
+ return const_cast<CallExpr*>(this)->getArgs();
+ }
+
+ /// getArg - Return the specified argument.
+ Expr *getArg(unsigned Arg) {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ return cast<Expr>(SubExprs[Arg+getNumPreArgs()+PREARGS_START]);
+ }
+ const Expr *getArg(unsigned Arg) const {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ return cast<Expr>(SubExprs[Arg+getNumPreArgs()+PREARGS_START]);
+ }
+
+ /// setArg - Set the specified argument.
+ void setArg(unsigned Arg, Expr *ArgExpr) {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ SubExprs[Arg+getNumPreArgs()+PREARGS_START] = ArgExpr;
+ }
+
+ /// setNumArgs - This changes the number of arguments present in this call.
+ /// Any orphaned expressions are deleted by this, and any new operands are set
+ /// to null.
+ void setNumArgs(ASTContext& C, unsigned NumArgs);
+
+ typedef ExprIterator arg_iterator;
+ typedef ConstExprIterator const_arg_iterator;
+
+ arg_iterator arg_begin() { return SubExprs+PREARGS_START+getNumPreArgs(); }
+ arg_iterator arg_end() {
+ return SubExprs+PREARGS_START+getNumPreArgs()+getNumArgs();
+ }
+ const_arg_iterator arg_begin() const {
+ return SubExprs+PREARGS_START+getNumPreArgs();
+ }
+ const_arg_iterator arg_end() const {
+ return SubExprs+PREARGS_START+getNumPreArgs()+getNumArgs();
+ }
+
+ /// getNumCommas - Return the number of commas that must have been present in
+ /// this function call.
+ unsigned getNumCommas() const { return NumArgs ? NumArgs - 1 : 0; }
+
+ /// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
+ /// not, return 0.
+ unsigned isBuiltinCall() const;
+
+ /// getCallReturnType - Get the return type of the call expr. This is not
+ /// always the type of the expr itself, if the return type is a reference
+ /// type.
+ QualType getCallReturnType() const;
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY;
+ SourceLocation getLocEnd() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() >= firstCallExprConstant &&
+ T->getStmtClass() <= lastCallExprConstant;
+ }
+ static bool classof(const CallExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0],
+ &SubExprs[0]+NumArgs+getNumPreArgs()+PREARGS_START);
+ }
+};
+
+/// MemberExpr - [C99 6.5.2.3] Structure and Union Members. X->F and X.F.
+///
+class MemberExpr : public Expr {
+ /// Extra data stored in some member expressions.
+ struct MemberNameQualifier {
+ /// \brief The nested-name-specifier that qualifies the name, including
+ /// source-location information.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// \brief The DeclAccessPair through which the MemberDecl was found due to
+ /// name qualifiers.
+ DeclAccessPair FoundDecl;
+ };
+
+ /// Base - the expression for the base pointer or structure references. In
+ /// X.F, this is "X".
+ Stmt *Base;
+
+ /// MemberDecl - This is the decl being referenced by the field/member name.
+ /// In X.F, this is the decl referenced by F.
+ ValueDecl *MemberDecl;
+
+ /// MemberDNLoc - Provides source/type location info for the
+ /// declaration name embedded in MemberDecl.
+ DeclarationNameLoc MemberDNLoc;
+
+ /// MemberLoc - This is the location of the member name.
+ SourceLocation MemberLoc;
+
+ /// IsArrow - True if this is "X->F", false if this is "X.F".
+ bool IsArrow : 1;
+
+ /// \brief True if this member expression used a nested-name-specifier to
+ /// refer to the member, e.g., "x->Base::f", or found its member via a using
+ /// declaration. When true, a MemberNameQualifier
+ /// structure is allocated immediately after the MemberExpr.
+ bool HasQualifierOrFoundDecl : 1;
+
+ /// \brief True if this member expression specified a template keyword
+ /// and/or a template argument list explicitly, e.g., x->f<int>,
+ /// x->template f, x->template f<int>.
+ /// When true, an ASTTemplateKWAndArgsInfo structure and its
+ /// TemplateArguments (if any) are allocated immediately after
+ /// the MemberExpr or, if the member expression also has a qualifier,
+ /// after the MemberNameQualifier structure.
+ bool HasTemplateKWAndArgsInfo : 1;
+
+ /// \brief True if this member expression refers to a method that
+ /// was resolved from an overloaded set having size greater than 1.
+ bool HadMultipleCandidates : 1;
+
+ /// \brief Retrieve the qualifier that preceded the member name, if any.
+ MemberNameQualifier *getMemberQualifier() {
+ assert(HasQualifierOrFoundDecl);
+ return reinterpret_cast<MemberNameQualifier *> (this + 1);
+ }
+
+ /// \brief Retrieve the qualifier that preceded the member name, if any.
+ const MemberNameQualifier *getMemberQualifier() const {
+ return const_cast<MemberExpr *>(this)->getMemberQualifier();
+ }
+
+public:
+ MemberExpr(Expr *base, bool isarrow, ValueDecl *memberdecl,
+ const DeclarationNameInfo &NameInfo, QualType ty,
+ ExprValueKind VK, ExprObjectKind OK)
+ : Expr(MemberExprClass, ty, VK, OK,
+ base->isTypeDependent(),
+ base->isValueDependent(),
+ base->isInstantiationDependent(),
+ base->containsUnexpandedParameterPack()),
+ Base(base), MemberDecl(memberdecl), MemberDNLoc(NameInfo.getInfo()),
+ MemberLoc(NameInfo.getLoc()), IsArrow(isarrow),
+ HasQualifierOrFoundDecl(false), HasTemplateKWAndArgsInfo(false),
+ HadMultipleCandidates(false) {
+ assert(memberdecl->getDeclName() == NameInfo.getName());
+ }
+
+ // NOTE: this constructor should be used only when it is known that
+ // the member name can not provide additional syntactic info
+ // (i.e., source locations for C++ operator names or type source info
+ // for constructors, destructors and conversion operators).
+ MemberExpr(Expr *base, bool isarrow, ValueDecl *memberdecl,
+ SourceLocation l, QualType ty,
+ ExprValueKind VK, ExprObjectKind OK)
+ : Expr(MemberExprClass, ty, VK, OK,
+ base->isTypeDependent(), base->isValueDependent(),
+ base->isInstantiationDependent(),
+ base->containsUnexpandedParameterPack()),
+ Base(base), MemberDecl(memberdecl), MemberDNLoc(), MemberLoc(l),
+ IsArrow(isarrow),
+ HasQualifierOrFoundDecl(false), HasTemplateKWAndArgsInfo(false),
+ HadMultipleCandidates(false) {}
+
+ static MemberExpr *Create(ASTContext &C, Expr *base, bool isarrow,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *memberdecl, DeclAccessPair founddecl,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *targs,
+ QualType ty, ExprValueKind VK, ExprObjectKind OK);
+
+ void setBase(Expr *E) { Base = E; }
+ Expr *getBase() const { return cast<Expr>(Base); }
+
+ /// \brief Retrieve the member declaration to which this expression refers.
+ ///
+ /// The returned declaration will either be a FieldDecl or (in C++)
+ /// a CXXMethodDecl.
+ ValueDecl *getMemberDecl() const { return MemberDecl; }
+ void setMemberDecl(ValueDecl *D) { MemberDecl = D; }
+
+ /// \brief Retrieves the declaration found by lookup.
+ DeclAccessPair getFoundDecl() const {
+ if (!HasQualifierOrFoundDecl)
+ return DeclAccessPair::make(getMemberDecl(),
+ getMemberDecl()->getAccess());
+ return getMemberQualifier()->FoundDecl;
+ }
+
+ /// \brief Determines whether this member expression actually had
+ /// a C++ nested-name-specifier prior to the name of the member, e.g.,
+ /// x->Base::foo.
+ bool hasQualifier() const { return getQualifier() != 0; }
+
+ /// \brief If the member name was qualified, retrieves the
+ /// nested-name-specifier that precedes the member name. Otherwise, returns
+ /// NULL.
+ NestedNameSpecifier *getQualifier() const {
+ if (!HasQualifierOrFoundDecl)
+ return 0;
+
+ return getMemberQualifier()->QualifierLoc.getNestedNameSpecifier();
+ }
+
+ /// \brief If the member name was qualified, retrieves the
+ /// nested-name-specifier that precedes the member name, with source-location
+ /// information.
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ if (!hasQualifier())
+ return NestedNameSpecifierLoc();
+
+ return getMemberQualifier()->QualifierLoc;
+ }
+
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() {
+ if (!HasTemplateKWAndArgsInfo)
+ return 0;
+
+ if (!HasQualifierOrFoundDecl)
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(this + 1);
+
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo *>(
+ getMemberQualifier() + 1);
+ }
+
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<MemberExpr*>(this)->getTemplateKWAndArgsInfo();
+ }
+
+ /// \brief Retrieve the location of the template keyword preceding
+ /// the member name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the member name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the member name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// Determines whether the member name was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
+ /// \brief Determines whether the member name was followed by an
+ /// explicit template argument list.
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
+
+ /// \brief Copies the template arguments (if present) into the given
+ /// structure.
+ void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
+ if (hasExplicitTemplateArgs())
+ getExplicitTemplateArgs().copyInto(List);
+ }
+
+ /// \brief Retrieve the explicit template argument list that
+ /// follow the member template name. This must only be called on an
+ /// expression with explicit template arguments.
+ ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
+ assert(hasExplicitTemplateArgs());
+ return *getTemplateKWAndArgsInfo();
+ }
+
+ /// \brief Retrieve the explicit template argument list that
+ /// followed the member template name. This must only be called on
+ /// an expression with explicit template arguments.
+ const ASTTemplateArgumentListInfo &getExplicitTemplateArgs() const {
+ return const_cast<MemberExpr *>(this)->getExplicitTemplateArgs();
+ }
+
+ /// \brief Retrieves the optional explicit template arguments.
+ /// This points to the same data as getExplicitTemplateArgs(), but
+ /// returns null if there are no explicit template arguments.
+ const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() const {
+ if (!hasExplicitTemplateArgs()) return 0;
+ return &getExplicitTemplateArgs();
+ }
+
+ /// \brief Retrieve the template arguments provided as part of this
+ /// template-id.
+ const TemplateArgumentLoc *getTemplateArgs() const {
+ if (!hasExplicitTemplateArgs())
+ return 0;
+
+ return getExplicitTemplateArgs().getTemplateArgs();
+ }
+
+ /// \brief Retrieve the number of template arguments provided as part of this
+ /// template-id.
+ unsigned getNumTemplateArgs() const {
+ if (!hasExplicitTemplateArgs())
+ return 0;
+
+ return getExplicitTemplateArgs().NumTemplateArgs;
+ }
+
+ /// \brief Retrieve the member declaration name info.
+ DeclarationNameInfo getMemberNameInfo() const {
+ return DeclarationNameInfo(MemberDecl->getDeclName(),
+ MemberLoc, MemberDNLoc);
+ }
+
+ bool isArrow() const { return IsArrow; }
+ void setArrow(bool A) { IsArrow = A; }
+
+ /// getMemberLoc - Return the location of the "member", in X->F, it is the
+ /// location of 'F'.
+ SourceLocation getMemberLoc() const { return MemberLoc; }
+ void setMemberLoc(SourceLocation L) { MemberLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY;
+ SourceLocation getLocEnd() const LLVM_READONLY;
+
+ SourceLocation getExprLoc() const LLVM_READONLY { return MemberLoc; }
+
+ /// \brief Determine whether the base of this explicit is implicit.
+ bool isImplicitAccess() const {
+ return getBase() && getBase()->isImplicitCXXThis();
+ }
+
+ /// \brief Returns true if this member expression refers to a method that
+ /// was resolved from an overloaded set having size greater than 1.
+ bool hadMultipleCandidates() const {
+ return HadMultipleCandidates;
+ }
+ /// \brief Sets the flag telling whether this expression refers to
+ /// a method that was resolved from an overloaded set having size
+ /// greater than 1.
+ void setHadMultipleCandidates(bool V = true) {
+ HadMultipleCandidates = V;
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == MemberExprClass;
+ }
+ static bool classof(const MemberExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Base, &Base+1); }
+
+ friend class ASTReader;
+ friend class ASTStmtWriter;
+};
+
+/// CompoundLiteralExpr - [C99 6.5.2.5]
+///
+class CompoundLiteralExpr : public Expr {
+ /// LParenLoc - If non-null, this is the location of the left paren in a
+ /// compound literal like "(int){4}". This can be null if this is a
+ /// synthesized compound expression.
+ SourceLocation LParenLoc;
+
+ /// The type as written. This can be an incomplete array type, in
+ /// which case the actual expression type will be different.
+ /// The int part of the pair stores whether this expr is file scope.
+ llvm::PointerIntPair<TypeSourceInfo *, 1, bool> TInfoAndScope;
+ Stmt *Init;
+public:
+ CompoundLiteralExpr(SourceLocation lparenloc, TypeSourceInfo *tinfo,
+ QualType T, ExprValueKind VK, Expr *init, bool fileScope)
+ : Expr(CompoundLiteralExprClass, T, VK, OK_Ordinary,
+ tinfo->getType()->isDependentType(),
+ init->isValueDependent(),
+ (init->isInstantiationDependent() ||
+ tinfo->getType()->isInstantiationDependentType()),
+ init->containsUnexpandedParameterPack()),
+ LParenLoc(lparenloc), TInfoAndScope(tinfo, fileScope), Init(init) {}
+
+ /// \brief Construct an empty compound literal.
+ explicit CompoundLiteralExpr(EmptyShell Empty)
+ : Expr(CompoundLiteralExprClass, Empty) { }
+
+ const Expr *getInitializer() const { return cast<Expr>(Init); }
+ Expr *getInitializer() { return cast<Expr>(Init); }
+ void setInitializer(Expr *E) { Init = E; }
+
+ bool isFileScope() const { return TInfoAndScope.getInt(); }
+ void setFileScope(bool FS) { TInfoAndScope.setInt(FS); }
+
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ void setLParenLoc(SourceLocation L) { LParenLoc = L; }
+
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return TInfoAndScope.getPointer();
+ }
+ void setTypeSourceInfo(TypeSourceInfo *tinfo) {
+ TInfoAndScope.setPointer(tinfo);
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ // FIXME: Init should never be null.
+ if (!Init)
+ return SourceRange();
+ if (LParenLoc.isInvalid())
+ return Init->getSourceRange();
+ return SourceRange(LParenLoc, Init->getLocEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CompoundLiteralExprClass;
+ }
+ static bool classof(const CompoundLiteralExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Init, &Init+1); }
+};
+
+/// CastExpr - Base class for type casts, including both implicit
+/// casts (ImplicitCastExpr) and explicit casts that have some
+/// representation in the source code (ExplicitCastExpr's derived
+/// classes).
+class CastExpr : public Expr {
+public:
+ typedef clang::CastKind CastKind;
+
+private:
+ Stmt *Op;
+
+ void CheckCastConsistency() const;
+
+ const CXXBaseSpecifier * const *path_buffer() const {
+ return const_cast<CastExpr*>(this)->path_buffer();
+ }
+ CXXBaseSpecifier **path_buffer();
+
+ void setBasePathSize(unsigned basePathSize) {
+ CastExprBits.BasePathSize = basePathSize;
+ assert(CastExprBits.BasePathSize == basePathSize &&
+ "basePathSize doesn't fit in bits of CastExprBits.BasePathSize!");
+ }
+
+protected:
+ CastExpr(StmtClass SC, QualType ty, ExprValueKind VK,
+ const CastKind kind, Expr *op, unsigned BasePathSize) :
+ Expr(SC, ty, VK, OK_Ordinary,
+ // Cast expressions are type-dependent if the type is
+ // dependent (C++ [temp.dep.expr]p3).
+ ty->isDependentType(),
+ // Cast expressions are value-dependent if the type is
+ // dependent or if the subexpression is value-dependent.
+ ty->isDependentType() || (op && op->isValueDependent()),
+ (ty->isInstantiationDependentType() ||
+ (op && op->isInstantiationDependent())),
+ (ty->containsUnexpandedParameterPack() ||
+ op->containsUnexpandedParameterPack())),
+ Op(op) {
+ assert(kind != CK_Invalid && "creating cast with invalid cast kind");
+ CastExprBits.Kind = kind;
+ setBasePathSize(BasePathSize);
+#ifndef NDEBUG
+ CheckCastConsistency();
+#endif
+ }
+
+ /// \brief Construct an empty cast.
+ CastExpr(StmtClass SC, EmptyShell Empty, unsigned BasePathSize)
+ : Expr(SC, Empty) {
+ setBasePathSize(BasePathSize);
+ }
+
+public:
+ CastKind getCastKind() const { return (CastKind) CastExprBits.Kind; }
+ void setCastKind(CastKind K) { CastExprBits.Kind = K; }
+ const char *getCastKindName() const;
+
+ Expr *getSubExpr() { return cast<Expr>(Op); }
+ const Expr *getSubExpr() const { return cast<Expr>(Op); }
+ void setSubExpr(Expr *E) { Op = E; }
+
+ /// \brief Retrieve the cast subexpression as it was written in the source
+ /// code, looking through any implicit casts or other intermediate nodes
+ /// introduced by semantic analysis.
+ Expr *getSubExprAsWritten();
+ const Expr *getSubExprAsWritten() const {
+ return const_cast<CastExpr *>(this)->getSubExprAsWritten();
+ }
+
+ typedef CXXBaseSpecifier **path_iterator;
+ typedef const CXXBaseSpecifier * const *path_const_iterator;
+ bool path_empty() const { return CastExprBits.BasePathSize == 0; }
+ unsigned path_size() const { return CastExprBits.BasePathSize; }
+ path_iterator path_begin() { return path_buffer(); }
+ path_iterator path_end() { return path_buffer() + path_size(); }
+ path_const_iterator path_begin() const { return path_buffer(); }
+ path_const_iterator path_end() const { return path_buffer() + path_size(); }
+
+ void setCastPath(const CXXCastPath &Path);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() >= firstCastExprConstant &&
+ T->getStmtClass() <= lastCastExprConstant;
+ }
+ static bool classof(const CastExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Op, &Op+1); }
+};
+
+/// ImplicitCastExpr - Allows us to explicitly represent implicit type
+/// conversions, which have no direct representation in the original
+/// source code. For example: converting T[]->T*, void f()->void
+/// (*f)(), float->double, short->int, etc.
+///
+/// In C, implicit casts always produce rvalues. However, in C++, an
+/// implicit cast whose result is being bound to a reference will be
+/// an lvalue or xvalue. For example:
+///
+/// @code
+/// class Base { };
+/// class Derived : public Base { };
+/// Derived &&ref();
+/// void f(Derived d) {
+/// Base& b = d; // initializer is an ImplicitCastExpr
+/// // to an lvalue of type Base
+/// Base&& r = ref(); // initializer is an ImplicitCastExpr
+/// // to an xvalue of type Base
+/// }
+/// @endcode
+class ImplicitCastExpr : public CastExpr {
+private:
+ ImplicitCastExpr(QualType ty, CastKind kind, Expr *op,
+ unsigned BasePathLength, ExprValueKind VK)
+ : CastExpr(ImplicitCastExprClass, ty, VK, kind, op, BasePathLength) {
+ }
+
+ /// \brief Construct an empty implicit cast.
+ explicit ImplicitCastExpr(EmptyShell Shell, unsigned PathSize)
+ : CastExpr(ImplicitCastExprClass, Shell, PathSize) { }
+
+public:
+ enum OnStack_t { OnStack };
+ ImplicitCastExpr(OnStack_t _, QualType ty, CastKind kind, Expr *op,
+ ExprValueKind VK)
+ : CastExpr(ImplicitCastExprClass, ty, VK, kind, op, 0) {
+ }
+
+ static ImplicitCastExpr *Create(ASTContext &Context, QualType T,
+ CastKind Kind, Expr *Operand,
+ const CXXCastPath *BasePath,
+ ExprValueKind Cat);
+
+ static ImplicitCastExpr *CreateEmpty(ASTContext &Context, unsigned PathSize);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return getSubExpr()->getSourceRange();
+ }
+ SourceLocation getLocStart() const LLVM_READONLY {
+ return getSubExpr()->getLocStart();
+ }
+ SourceLocation getLocEnd() const LLVM_READONLY {
+ return getSubExpr()->getLocEnd();
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ImplicitCastExprClass;
+ }
+ static bool classof(const ImplicitCastExpr *) { return true; }
+};
+
+inline Expr *Expr::IgnoreImpCasts() {
+ Expr *e = this;
+ while (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
+ e = ice->getSubExpr();
+ return e;
+}
+
+/// ExplicitCastExpr - An explicit cast written in the source
+/// code.
+///
+/// This class is effectively an abstract class, because it provides
+/// the basic representation of an explicitly-written cast without
+/// specifying which kind of cast (C cast, functional cast, static
+/// cast, etc.) was written; specific derived classes represent the
+/// particular style of cast and its location information.
+///
+/// Unlike implicit casts, explicit cast nodes have two different
+/// types: the type that was written into the source code, and the
+/// actual type of the expression as determined by semantic
+/// analysis. These types may differ slightly. For example, in C++ one
+/// can cast to a reference type, which indicates that the resulting
+/// expression will be an lvalue or xvalue. The reference type, however,
+/// will not be used as the type of the expression.
+class ExplicitCastExpr : public CastExpr {
+ /// TInfo - Source type info for the (written) type
+ /// this expression is casting to.
+ TypeSourceInfo *TInfo;
+
+protected:
+ ExplicitCastExpr(StmtClass SC, QualType exprTy, ExprValueKind VK,
+ CastKind kind, Expr *op, unsigned PathSize,
+ TypeSourceInfo *writtenTy)
+ : CastExpr(SC, exprTy, VK, kind, op, PathSize), TInfo(writtenTy) {}
+
+ /// \brief Construct an empty explicit cast.
+ ExplicitCastExpr(StmtClass SC, EmptyShell Shell, unsigned PathSize)
+ : CastExpr(SC, Shell, PathSize) { }
+
+public:
+ /// getTypeInfoAsWritten - Returns the type source info for the type
+ /// that this expression is casting to.
+ TypeSourceInfo *getTypeInfoAsWritten() const { return TInfo; }
+ void setTypeInfoAsWritten(TypeSourceInfo *writtenTy) { TInfo = writtenTy; }
+
+ /// getTypeAsWritten - Returns the type that this expression is
+ /// casting to, as written in the source code.
+ QualType getTypeAsWritten() const { return TInfo->getType(); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() >= firstExplicitCastExprConstant &&
+ T->getStmtClass() <= lastExplicitCastExprConstant;
+ }
+ static bool classof(const ExplicitCastExpr *) { return true; }
+};
+
+/// CStyleCastExpr - An explicit cast in C (C99 6.5.4) or a C-style
+/// cast in C++ (C++ [expr.cast]), which uses the syntax
+/// (Type)expr. For example: @c (int)f.
+class CStyleCastExpr : public ExplicitCastExpr {
+ SourceLocation LPLoc; // the location of the left paren
+ SourceLocation RPLoc; // the location of the right paren
+
+ CStyleCastExpr(QualType exprTy, ExprValueKind vk, CastKind kind, Expr *op,
+ unsigned PathSize, TypeSourceInfo *writtenTy,
+ SourceLocation l, SourceLocation r)
+ : ExplicitCastExpr(CStyleCastExprClass, exprTy, vk, kind, op, PathSize,
+ writtenTy), LPLoc(l), RPLoc(r) {}
+
+ /// \brief Construct an empty C-style explicit cast.
+ explicit CStyleCastExpr(EmptyShell Shell, unsigned PathSize)
+ : ExplicitCastExpr(CStyleCastExprClass, Shell, PathSize) { }
+
+public:
+ static CStyleCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK, CastKind K,
+ Expr *Op, const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation R);
+
+ static CStyleCastExpr *CreateEmpty(ASTContext &Context, unsigned PathSize);
+
+ SourceLocation getLParenLoc() const { return LPLoc; }
+ void setLParenLoc(SourceLocation L) { LPLoc = L; }
+
+ SourceLocation getRParenLoc() const { return RPLoc; }
+ void setRParenLoc(SourceLocation L) { RPLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(LPLoc, getSubExpr()->getSourceRange().getEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CStyleCastExprClass;
+ }
+ static bool classof(const CStyleCastExpr *) { return true; }
+};
+
+/// \brief A builtin binary operation expression such as "x + y" or "x <= y".
+///
+/// This expression node kind describes a builtin binary operation,
+/// such as "x + y" for integer values "x" and "y". The operands will
+/// already have been converted to appropriate types (e.g., by
+/// performing promotions or conversions).
+///
+/// In C++, where operators may be overloaded, a different kind of
+/// expression node (CXXOperatorCallExpr) is used to express the
+/// invocation of an overloaded operator with operator syntax. Within
+/// a C++ template, whether BinaryOperator or CXXOperatorCallExpr is
+/// used to store an expression "x + y" depends on the subexpressions
+/// for x and y. If neither x or y is type-dependent, and the "+"
+/// operator resolves to a built-in operation, BinaryOperator will be
+/// used to express the computation (x and y may still be
+/// value-dependent). If either x or y is type-dependent, or if the
+/// "+" resolves to an overloaded operator, CXXOperatorCallExpr will
+/// be used to express the computation.
+class BinaryOperator : public Expr {
+public:
+ typedef BinaryOperatorKind Opcode;
+
+private:
+ unsigned Opc : 6;
+ SourceLocation OpLoc;
+
+ enum { LHS, RHS, END_EXPR };
+ Stmt* SubExprs[END_EXPR];
+public:
+
+ BinaryOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation opLoc)
+ : Expr(BinaryOperatorClass, ResTy, VK, OK,
+ lhs->isTypeDependent() || rhs->isTypeDependent(),
+ lhs->isValueDependent() || rhs->isValueDependent(),
+ (lhs->isInstantiationDependent() ||
+ rhs->isInstantiationDependent()),
+ (lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack())),
+ Opc(opc), OpLoc(opLoc) {
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ assert(!isCompoundAssignmentOp() &&
+ "Use ArithAssignBinaryOperator for compound assignments");
+ }
+
+ /// \brief Construct an empty binary operator.
+ explicit BinaryOperator(EmptyShell Empty)
+ : Expr(BinaryOperatorClass, Empty), Opc(BO_Comma) { }
+
+ SourceLocation getExprLoc() const LLVM_READONLY { return OpLoc; }
+ SourceLocation getOperatorLoc() const { return OpLoc; }
+ void setOperatorLoc(SourceLocation L) { OpLoc = L; }
+
+ Opcode getOpcode() const { return static_cast<Opcode>(Opc); }
+ void setOpcode(Opcode O) { Opc = O; }
+
+ Expr *getLHS() const { return cast<Expr>(SubExprs[LHS]); }
+ void setLHS(Expr *E) { SubExprs[LHS] = E; }
+ Expr *getRHS() const { return cast<Expr>(SubExprs[RHS]); }
+ void setRHS(Expr *E) { SubExprs[RHS] = E; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getLHS()->getLocStart(), getRHS()->getLocEnd());
+ }
+
+ /// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+ /// corresponds to, e.g. "<<=".
+ static const char *getOpcodeStr(Opcode Op);
+
+ const char *getOpcodeStr() const { return getOpcodeStr(getOpcode()); }
+
+ /// \brief Retrieve the binary opcode that corresponds to the given
+ /// overloaded operator.
+ static Opcode getOverloadedOpcode(OverloadedOperatorKind OO);
+
+ /// \brief Retrieve the overloaded operator kind that corresponds to
+ /// the given binary opcode.
+ static OverloadedOperatorKind getOverloadedOperator(Opcode Opc);
+
+ /// predicates to categorize the respective opcodes.
+ bool isPtrMemOp() const { return Opc == BO_PtrMemD || Opc == BO_PtrMemI; }
+ bool isMultiplicativeOp() const { return Opc >= BO_Mul && Opc <= BO_Rem; }
+ static bool isAdditiveOp(Opcode Opc) { return Opc == BO_Add || Opc==BO_Sub; }
+ bool isAdditiveOp() const { return isAdditiveOp(getOpcode()); }
+ static bool isShiftOp(Opcode Opc) { return Opc == BO_Shl || Opc == BO_Shr; }
+ bool isShiftOp() const { return isShiftOp(getOpcode()); }
+
+ static bool isBitwiseOp(Opcode Opc) { return Opc >= BO_And && Opc <= BO_Or; }
+ bool isBitwiseOp() const { return isBitwiseOp(getOpcode()); }
+
+ static bool isRelationalOp(Opcode Opc) { return Opc >= BO_LT && Opc<=BO_GE; }
+ bool isRelationalOp() const { return isRelationalOp(getOpcode()); }
+
+ static bool isEqualityOp(Opcode Opc) { return Opc == BO_EQ || Opc == BO_NE; }
+ bool isEqualityOp() const { return isEqualityOp(getOpcode()); }
+
+ static bool isComparisonOp(Opcode Opc) { return Opc >= BO_LT && Opc<=BO_NE; }
+ bool isComparisonOp() const { return isComparisonOp(getOpcode()); }
+
+ static bool isLogicalOp(Opcode Opc) { return Opc == BO_LAnd || Opc==BO_LOr; }
+ bool isLogicalOp() const { return isLogicalOp(getOpcode()); }
+
+ static bool isAssignmentOp(Opcode Opc) {
+ return Opc >= BO_Assign && Opc <= BO_OrAssign;
+ }
+ bool isAssignmentOp() const { return isAssignmentOp(getOpcode()); }
+
+ static bool isCompoundAssignmentOp(Opcode Opc) {
+ return Opc > BO_Assign && Opc <= BO_OrAssign;
+ }
+ bool isCompoundAssignmentOp() const {
+ return isCompoundAssignmentOp(getOpcode());
+ }
+ static Opcode getOpForCompoundAssignment(Opcode Opc) {
+ assert(isCompoundAssignmentOp(Opc));
+ if (Opc >= BO_AndAssign)
+ return Opcode(unsigned(Opc) - BO_AndAssign + BO_And);
+ else
+ return Opcode(unsigned(Opc) - BO_MulAssign + BO_Mul);
+ }
+
+ static bool isShiftAssignOp(Opcode Opc) {
+ return Opc == BO_ShlAssign || Opc == BO_ShrAssign;
+ }
+ bool isShiftAssignOp() const {
+ return isShiftAssignOp(getOpcode());
+ }
+
+ static bool classof(const Stmt *S) {
+ return S->getStmtClass() >= firstBinaryOperatorConstant &&
+ S->getStmtClass() <= lastBinaryOperatorConstant;
+ }
+ static bool classof(const BinaryOperator *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+
+protected:
+ BinaryOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResTy,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation opLoc, bool dead)
+ : Expr(CompoundAssignOperatorClass, ResTy, VK, OK,
+ lhs->isTypeDependent() || rhs->isTypeDependent(),
+ lhs->isValueDependent() || rhs->isValueDependent(),
+ (lhs->isInstantiationDependent() ||
+ rhs->isInstantiationDependent()),
+ (lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack())),
+ Opc(opc), OpLoc(opLoc) {
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ }
+
+ BinaryOperator(StmtClass SC, EmptyShell Empty)
+ : Expr(SC, Empty), Opc(BO_MulAssign) { }
+};
+
+/// CompoundAssignOperator - For compound assignments (e.g. +=), we keep
+/// track of the type the operation is performed in. Due to the semantics of
+/// these operators, the operands are promoted, the arithmetic performed, an
+/// implicit conversion back to the result type done, then the assignment takes
+/// place. This captures the intermediate type which the computation is done
+/// in.
+class CompoundAssignOperator : public BinaryOperator {
+ QualType ComputationLHSType;
+ QualType ComputationResultType;
+public:
+ CompoundAssignOperator(Expr *lhs, Expr *rhs, Opcode opc, QualType ResType,
+ ExprValueKind VK, ExprObjectKind OK,
+ QualType CompLHSType, QualType CompResultType,
+ SourceLocation OpLoc)
+ : BinaryOperator(lhs, rhs, opc, ResType, VK, OK, OpLoc, true),
+ ComputationLHSType(CompLHSType),
+ ComputationResultType(CompResultType) {
+ assert(isCompoundAssignmentOp() &&
+ "Only should be used for compound assignments");
+ }
+
+ /// \brief Build an empty compound assignment operator expression.
+ explicit CompoundAssignOperator(EmptyShell Empty)
+ : BinaryOperator(CompoundAssignOperatorClass, Empty) { }
+
+ // The two computation types are the type the LHS is converted
+ // to for the computation and the type of the result; the two are
+ // distinct in a few cases (specifically, int+=ptr and ptr-=ptr).
+ QualType getComputationLHSType() const { return ComputationLHSType; }
+ void setComputationLHSType(QualType T) { ComputationLHSType = T; }
+
+ QualType getComputationResultType() const { return ComputationResultType; }
+ void setComputationResultType(QualType T) { ComputationResultType = T; }
+
+ static bool classof(const CompoundAssignOperator *) { return true; }
+ static bool classof(const Stmt *S) {
+ return S->getStmtClass() == CompoundAssignOperatorClass;
+ }
+};
+
+/// AbstractConditionalOperator - An abstract base class for
+/// ConditionalOperator and BinaryConditionalOperator.
+class AbstractConditionalOperator : public Expr {
+ SourceLocation QuestionLoc, ColonLoc;
+ friend class ASTStmtReader;
+
+protected:
+ AbstractConditionalOperator(StmtClass SC, QualType T,
+ ExprValueKind VK, ExprObjectKind OK,
+ bool TD, bool VD, bool ID,
+ bool ContainsUnexpandedParameterPack,
+ SourceLocation qloc,
+ SourceLocation cloc)
+ : Expr(SC, T, VK, OK, TD, VD, ID, ContainsUnexpandedParameterPack),
+ QuestionLoc(qloc), ColonLoc(cloc) {}
+
+ AbstractConditionalOperator(StmtClass SC, EmptyShell Empty)
+ : Expr(SC, Empty) { }
+
+public:
+ // getCond - Return the expression representing the condition for
+ // the ?: operator.
+ Expr *getCond() const;
+
+ // getTrueExpr - Return the subexpression representing the value of
+ // the expression if the condition evaluates to true.
+ Expr *getTrueExpr() const;
+
+ // getFalseExpr - Return the subexpression representing the value of
+ // the expression if the condition evaluates to false. This is
+ // the same as getRHS.
+ Expr *getFalseExpr() const;
+
+ SourceLocation getQuestionLoc() const { return QuestionLoc; }
+ SourceLocation getColonLoc() const { return ColonLoc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ConditionalOperatorClass ||
+ T->getStmtClass() == BinaryConditionalOperatorClass;
+ }
+ static bool classof(const AbstractConditionalOperator *) { return true; }
+};
+
+/// ConditionalOperator - The ?: ternary operator. The GNU "missing
+/// middle" extension is a BinaryConditionalOperator.
+class ConditionalOperator : public AbstractConditionalOperator {
+ enum { COND, LHS, RHS, END_EXPR };
+ Stmt* SubExprs[END_EXPR]; // Left/Middle/Right hand sides.
+
+ friend class ASTStmtReader;
+public:
+ ConditionalOperator(Expr *cond, SourceLocation QLoc, Expr *lhs,
+ SourceLocation CLoc, Expr *rhs,
+ QualType t, ExprValueKind VK, ExprObjectKind OK)
+ : AbstractConditionalOperator(ConditionalOperatorClass, t, VK, OK,
+ // FIXME: the type of the conditional operator doesn't
+ // depend on the type of the conditional, but the standard
+ // seems to imply that it could. File a bug!
+ (lhs->isTypeDependent() || rhs->isTypeDependent()),
+ (cond->isValueDependent() || lhs->isValueDependent() ||
+ rhs->isValueDependent()),
+ (cond->isInstantiationDependent() ||
+ lhs->isInstantiationDependent() ||
+ rhs->isInstantiationDependent()),
+ (cond->containsUnexpandedParameterPack() ||
+ lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack()),
+ QLoc, CLoc) {
+ SubExprs[COND] = cond;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ }
+
+ /// \brief Build an empty conditional operator.
+ explicit ConditionalOperator(EmptyShell Empty)
+ : AbstractConditionalOperator(ConditionalOperatorClass, Empty) { }
+
+ // getCond - Return the expression representing the condition for
+ // the ?: operator.
+ Expr *getCond() const { return cast<Expr>(SubExprs[COND]); }
+
+ // getTrueExpr - Return the subexpression representing the value of
+ // the expression if the condition evaluates to true.
+ Expr *getTrueExpr() const { return cast<Expr>(SubExprs[LHS]); }
+
+ // getFalseExpr - Return the subexpression representing the value of
+ // the expression if the condition evaluates to false. This is
+ // the same as getRHS.
+ Expr *getFalseExpr() const { return cast<Expr>(SubExprs[RHS]); }
+
+ Expr *getLHS() const { return cast<Expr>(SubExprs[LHS]); }
+ Expr *getRHS() const { return cast<Expr>(SubExprs[RHS]); }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getCond()->getLocStart(), getRHS()->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ConditionalOperatorClass;
+ }
+ static bool classof(const ConditionalOperator *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+};
+
+/// BinaryConditionalOperator - The GNU extension to the conditional
+/// operator which allows the middle operand to be omitted.
+///
+/// This is a different expression kind on the assumption that almost
+/// every client ends up needing to know that these are different.
+class BinaryConditionalOperator : public AbstractConditionalOperator {
+ enum { COMMON, COND, LHS, RHS, NUM_SUBEXPRS };
+
+ /// - the common condition/left-hand-side expression, which will be
+ /// evaluated as the opaque value
+ /// - the condition, expressed in terms of the opaque value
+ /// - the left-hand-side, expressed in terms of the opaque value
+ /// - the right-hand-side
+ Stmt *SubExprs[NUM_SUBEXPRS];
+ OpaqueValueExpr *OpaqueValue;
+
+ friend class ASTStmtReader;
+public:
+ BinaryConditionalOperator(Expr *common, OpaqueValueExpr *opaqueValue,
+ Expr *cond, Expr *lhs, Expr *rhs,
+ SourceLocation qloc, SourceLocation cloc,
+ QualType t, ExprValueKind VK, ExprObjectKind OK)
+ : AbstractConditionalOperator(BinaryConditionalOperatorClass, t, VK, OK,
+ (common->isTypeDependent() || rhs->isTypeDependent()),
+ (common->isValueDependent() || rhs->isValueDependent()),
+ (common->isInstantiationDependent() ||
+ rhs->isInstantiationDependent()),
+ (common->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack()),
+ qloc, cloc),
+ OpaqueValue(opaqueValue) {
+ SubExprs[COMMON] = common;
+ SubExprs[COND] = cond;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ assert(OpaqueValue->getSourceExpr() == common && "Wrong opaque value");
+ }
+
+ /// \brief Build an empty conditional operator.
+ explicit BinaryConditionalOperator(EmptyShell Empty)
+ : AbstractConditionalOperator(BinaryConditionalOperatorClass, Empty) { }
+
+ /// \brief getCommon - Return the common expression, written to the
+ /// left of the condition. The opaque value will be bound to the
+ /// result of this expression.
+ Expr *getCommon() const { return cast<Expr>(SubExprs[COMMON]); }
+
+ /// \brief getOpaqueValue - Return the opaque value placeholder.
+ OpaqueValueExpr *getOpaqueValue() const { return OpaqueValue; }
+
+ /// \brief getCond - Return the condition expression; this is defined
+ /// in terms of the opaque value.
+ Expr *getCond() const { return cast<Expr>(SubExprs[COND]); }
+
+ /// \brief getTrueExpr - Return the subexpression which will be
+ /// evaluated if the condition evaluates to true; this is defined
+ /// in terms of the opaque value.
+ Expr *getTrueExpr() const {
+ return cast<Expr>(SubExprs[LHS]);
+ }
+
+ /// \brief getFalseExpr - Return the subexpression which will be
+ /// evaluated if the condnition evaluates to false; this is
+ /// defined in terms of the opaque value.
+ Expr *getFalseExpr() const {
+ return cast<Expr>(SubExprs[RHS]);
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getCommon()->getLocStart(), getFalseExpr()->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == BinaryConditionalOperatorClass;
+ }
+ static bool classof(const BinaryConditionalOperator *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(SubExprs, SubExprs + NUM_SUBEXPRS);
+ }
+};
+
+inline Expr *AbstractConditionalOperator::getCond() const {
+ if (const ConditionalOperator *co = dyn_cast<ConditionalOperator>(this))
+ return co->getCond();
+ return cast<BinaryConditionalOperator>(this)->getCond();
+}
+
+inline Expr *AbstractConditionalOperator::getTrueExpr() const {
+ if (const ConditionalOperator *co = dyn_cast<ConditionalOperator>(this))
+ return co->getTrueExpr();
+ return cast<BinaryConditionalOperator>(this)->getTrueExpr();
+}
+
+inline Expr *AbstractConditionalOperator::getFalseExpr() const {
+ if (const ConditionalOperator *co = dyn_cast<ConditionalOperator>(this))
+ return co->getFalseExpr();
+ return cast<BinaryConditionalOperator>(this)->getFalseExpr();
+}
+
+/// AddrLabelExpr - The GNU address of label extension, representing &&label.
+class AddrLabelExpr : public Expr {
+ SourceLocation AmpAmpLoc, LabelLoc;
+ LabelDecl *Label;
+public:
+ AddrLabelExpr(SourceLocation AALoc, SourceLocation LLoc, LabelDecl *L,
+ QualType t)
+ : Expr(AddrLabelExprClass, t, VK_RValue, OK_Ordinary, false, false, false,
+ false),
+ AmpAmpLoc(AALoc), LabelLoc(LLoc), Label(L) {}
+
+ /// \brief Build an empty address of a label expression.
+ explicit AddrLabelExpr(EmptyShell Empty)
+ : Expr(AddrLabelExprClass, Empty) { }
+
+ SourceLocation getAmpAmpLoc() const { return AmpAmpLoc; }
+ void setAmpAmpLoc(SourceLocation L) { AmpAmpLoc = L; }
+ SourceLocation getLabelLoc() const { return LabelLoc; }
+ void setLabelLoc(SourceLocation L) { LabelLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AmpAmpLoc, LabelLoc);
+ }
+
+ LabelDecl *getLabel() const { return Label; }
+ void setLabel(LabelDecl *L) { Label = L; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == AddrLabelExprClass;
+ }
+ static bool classof(const AddrLabelExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// StmtExpr - This is the GNU Statement Expression extension: ({int X=4; X;}).
+/// The StmtExpr contains a single CompoundStmt node, which it evaluates and
+/// takes the value of the last subexpression.
+///
+/// A StmtExpr is always an r-value; values "returned" out of a
+/// StmtExpr will be copied.
+class StmtExpr : public Expr {
+ Stmt *SubStmt;
+ SourceLocation LParenLoc, RParenLoc;
+public:
+ // FIXME: Does type-dependence need to be computed differently?
+ // FIXME: Do we need to compute instantiation instantiation-dependence for
+ // statements? (ugh!)
+ StmtExpr(CompoundStmt *substmt, QualType T,
+ SourceLocation lp, SourceLocation rp) :
+ Expr(StmtExprClass, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), false, false, false),
+ SubStmt(substmt), LParenLoc(lp), RParenLoc(rp) { }
+
+ /// \brief Build an empty statement expression.
+ explicit StmtExpr(EmptyShell Empty) : Expr(StmtExprClass, Empty) { }
+
+ CompoundStmt *getSubStmt() { return cast<CompoundStmt>(SubStmt); }
+ const CompoundStmt *getSubStmt() const { return cast<CompoundStmt>(SubStmt); }
+ void setSubStmt(CompoundStmt *S) { SubStmt = S; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(LParenLoc, RParenLoc);
+ }
+
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ void setLParenLoc(SourceLocation L) { LParenLoc = L; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == StmtExprClass;
+ }
+ static bool classof(const StmtExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&SubStmt, &SubStmt+1); }
+};
+
+
+/// ShuffleVectorExpr - clang-specific builtin-in function
+/// __builtin_shufflevector.
+/// This AST node represents a operator that does a constant
+/// shuffle, similar to LLVM's shufflevector instruction. It takes
+/// two vectors and a variable number of constant indices,
+/// and returns the appropriately shuffled vector.
+class ShuffleVectorExpr : public Expr {
+ SourceLocation BuiltinLoc, RParenLoc;
+
+ // SubExprs - the list of values passed to the __builtin_shufflevector
+ // function. The first two are vectors, and the rest are constant
+ // indices. The number of values in this list is always
+ // 2+the number of indices in the vector type.
+ Stmt **SubExprs;
+ unsigned NumExprs;
+
+public:
+ ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
+ QualType Type, SourceLocation BLoc,
+ SourceLocation RP);
+
+ /// \brief Build an empty vector-shuffle expression.
+ explicit ShuffleVectorExpr(EmptyShell Empty)
+ : Expr(ShuffleVectorExprClass, Empty), SubExprs(0) { }
+
+ SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
+ void setBuiltinLoc(SourceLocation L) { BuiltinLoc = L; }
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(BuiltinLoc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ShuffleVectorExprClass;
+ }
+ static bool classof(const ShuffleVectorExpr *) { return true; }
+
+ /// getNumSubExprs - Return the size of the SubExprs array. This includes the
+ /// constant expression, the actual arguments passed in, and the function
+ /// pointers.
+ unsigned getNumSubExprs() const { return NumExprs; }
+
+ /// \brief Retrieve the array of expressions.
+ Expr **getSubExprs() { return reinterpret_cast<Expr **>(SubExprs); }
+
+ /// getExpr - Return the Expr at the specified index.
+ Expr *getExpr(unsigned Index) {
+ assert((Index < NumExprs) && "Arg access out of range!");
+ return cast<Expr>(SubExprs[Index]);
+ }
+ const Expr *getExpr(unsigned Index) const {
+ assert((Index < NumExprs) && "Arg access out of range!");
+ return cast<Expr>(SubExprs[Index]);
+ }
+
+ void setExprs(ASTContext &C, Expr ** Exprs, unsigned NumExprs);
+
+ unsigned getShuffleMaskIdx(ASTContext &Ctx, unsigned N) {
+ assert((N < NumExprs - 2) && "Shuffle idx out of range!");
+ return getExpr(N+2)->EvaluateKnownConstInt(Ctx).getZExtValue();
+ }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+NumExprs);
+ }
+};
+
+/// ChooseExpr - GNU builtin-in function __builtin_choose_expr.
+/// This AST node is similar to the conditional operator (?:) in C, with
+/// the following exceptions:
+/// - the test expression must be a integer constant expression.
+/// - the expression returned acts like the chosen subexpression in every
+/// visible way: the type is the same as that of the chosen subexpression,
+/// and all predicates (whether it's an l-value, whether it's an integer
+/// constant expression, etc.) return the same result as for the chosen
+/// sub-expression.
+class ChooseExpr : public Expr {
+ enum { COND, LHS, RHS, END_EXPR };
+ Stmt* SubExprs[END_EXPR]; // Left/Middle/Right hand sides.
+ SourceLocation BuiltinLoc, RParenLoc;
+public:
+ ChooseExpr(SourceLocation BLoc, Expr *cond, Expr *lhs, Expr *rhs,
+ QualType t, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation RP, bool TypeDependent, bool ValueDependent)
+ : Expr(ChooseExprClass, t, VK, OK, TypeDependent, ValueDependent,
+ (cond->isInstantiationDependent() ||
+ lhs->isInstantiationDependent() ||
+ rhs->isInstantiationDependent()),
+ (cond->containsUnexpandedParameterPack() ||
+ lhs->containsUnexpandedParameterPack() ||
+ rhs->containsUnexpandedParameterPack())),
+ BuiltinLoc(BLoc), RParenLoc(RP) {
+ SubExprs[COND] = cond;
+ SubExprs[LHS] = lhs;
+ SubExprs[RHS] = rhs;
+ }
+
+ /// \brief Build an empty __builtin_choose_expr.
+ explicit ChooseExpr(EmptyShell Empty) : Expr(ChooseExprClass, Empty) { }
+
+ /// isConditionTrue - Return whether the condition is true (i.e. not
+ /// equal to zero).
+ bool isConditionTrue(const ASTContext &C) const;
+
+ /// getChosenSubExpr - Return the subexpression chosen according to the
+ /// condition.
+ Expr *getChosenSubExpr(const ASTContext &C) const {
+ return isConditionTrue(C) ? getLHS() : getRHS();
+ }
+
+ Expr *getCond() const { return cast<Expr>(SubExprs[COND]); }
+ void setCond(Expr *E) { SubExprs[COND] = E; }
+ Expr *getLHS() const { return cast<Expr>(SubExprs[LHS]); }
+ void setLHS(Expr *E) { SubExprs[LHS] = E; }
+ Expr *getRHS() const { return cast<Expr>(SubExprs[RHS]); }
+ void setRHS(Expr *E) { SubExprs[RHS] = E; }
+
+ SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
+ void setBuiltinLoc(SourceLocation L) { BuiltinLoc = L; }
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(BuiltinLoc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ChooseExprClass;
+ }
+ static bool classof(const ChooseExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+};
+
+/// GNUNullExpr - Implements the GNU __null extension, which is a name
+/// for a null pointer constant that has integral type (e.g., int or
+/// long) and is the same size and alignment as a pointer. The __null
+/// extension is typically only used by system headers, which define
+/// NULL as __null in C++ rather than using 0 (which is an integer
+/// that may not match the size of a pointer).
+class GNUNullExpr : public Expr {
+ /// TokenLoc - The location of the __null keyword.
+ SourceLocation TokenLoc;
+
+public:
+ GNUNullExpr(QualType Ty, SourceLocation Loc)
+ : Expr(GNUNullExprClass, Ty, VK_RValue, OK_Ordinary, false, false, false,
+ false),
+ TokenLoc(Loc) { }
+
+ /// \brief Build an empty GNU __null expression.
+ explicit GNUNullExpr(EmptyShell Empty) : Expr(GNUNullExprClass, Empty) { }
+
+ /// getTokenLocation - The location of the __null token.
+ SourceLocation getTokenLocation() const { return TokenLoc; }
+ void setTokenLocation(SourceLocation L) { TokenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(TokenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == GNUNullExprClass;
+ }
+ static bool classof(const GNUNullExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// VAArgExpr, used for the builtin function __builtin_va_arg.
+class VAArgExpr : public Expr {
+ Stmt *Val;
+ TypeSourceInfo *TInfo;
+ SourceLocation BuiltinLoc, RParenLoc;
+public:
+ VAArgExpr(SourceLocation BLoc, Expr* e, TypeSourceInfo *TInfo,
+ SourceLocation RPLoc, QualType t)
+ : Expr(VAArgExprClass, t, VK_RValue, OK_Ordinary,
+ t->isDependentType(), false,
+ (TInfo->getType()->isInstantiationDependentType() ||
+ e->isInstantiationDependent()),
+ (TInfo->getType()->containsUnexpandedParameterPack() ||
+ e->containsUnexpandedParameterPack())),
+ Val(e), TInfo(TInfo),
+ BuiltinLoc(BLoc),
+ RParenLoc(RPLoc) { }
+
+ /// \brief Create an empty __builtin_va_arg expression.
+ explicit VAArgExpr(EmptyShell Empty) : Expr(VAArgExprClass, Empty) { }
+
+ const Expr *getSubExpr() const { return cast<Expr>(Val); }
+ Expr *getSubExpr() { return cast<Expr>(Val); }
+ void setSubExpr(Expr *E) { Val = E; }
+
+ TypeSourceInfo *getWrittenTypeInfo() const { return TInfo; }
+ void setWrittenTypeInfo(TypeSourceInfo *TI) { TInfo = TI; }
+
+ SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
+ void setBuiltinLoc(SourceLocation L) { BuiltinLoc = L; }
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(BuiltinLoc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == VAArgExprClass;
+ }
+ static bool classof(const VAArgExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Val, &Val+1); }
+};
+
+/// @brief Describes an C or C++ initializer list.
+///
+/// InitListExpr describes an initializer list, which can be used to
+/// initialize objects of different types, including
+/// struct/class/union types, arrays, and vectors. For example:
+///
+/// @code
+/// struct foo x = { 1, { 2, 3 } };
+/// @endcode
+///
+/// Prior to semantic analysis, an initializer list will represent the
+/// initializer list as written by the user, but will have the
+/// placeholder type "void". This initializer list is called the
+/// syntactic form of the initializer, and may contain C99 designated
+/// initializers (represented as DesignatedInitExprs), initializations
+/// of subobject members without explicit braces, and so on. Clients
+/// interested in the original syntax of the initializer list should
+/// use the syntactic form of the initializer list.
+///
+/// After semantic analysis, the initializer list will represent the
+/// semantic form of the initializer, where the initializations of all
+/// subobjects are made explicit with nested InitListExpr nodes and
+/// C99 designators have been eliminated by placing the designated
+/// initializations into the subobject they initialize. Additionally,
+/// any "holes" in the initialization, where no initializer has been
+/// specified for a particular subobject, will be replaced with
+/// implicitly-generated ImplicitValueInitExpr expressions that
+/// value-initialize the subobjects. Note, however, that the
+/// initializer lists may still have fewer initializers than there are
+/// elements to initialize within the object.
+///
+/// Given the semantic form of the initializer list, one can retrieve
+/// the original syntactic form of that initializer list (if it
+/// exists) using getSyntacticForm(). Since many initializer lists
+/// have the same syntactic and semantic forms, getSyntacticForm() may
+/// return NULL, indicating that the current initializer list also
+/// serves as its syntactic form.
+class InitListExpr : public Expr {
+ // FIXME: Eliminate this vector in favor of ASTContext allocation
+ typedef ASTVector<Stmt *> InitExprsTy;
+ InitExprsTy InitExprs;
+ SourceLocation LBraceLoc, RBraceLoc;
+
+ /// Contains the initializer list that describes the syntactic form
+ /// written in the source code.
+ InitListExpr *SyntacticForm;
+
+ /// \brief Either:
+ /// If this initializer list initializes an array with more elements than
+ /// there are initializers in the list, specifies an expression to be used
+ /// for value initialization of the rest of the elements.
+ /// Or
+ /// If this initializer list initializes a union, specifies which
+ /// field within the union will be initialized.
+ llvm::PointerUnion<Expr *, FieldDecl *> ArrayFillerOrUnionFieldInit;
+
+public:
+ InitListExpr(ASTContext &C, SourceLocation lbraceloc,
+ Expr **initexprs, unsigned numinits,
+ SourceLocation rbraceloc);
+
+ /// \brief Build an empty initializer list.
+ explicit InitListExpr(ASTContext &C, EmptyShell Empty)
+ : Expr(InitListExprClass, Empty), InitExprs(C) { }
+
+ unsigned getNumInits() const { return InitExprs.size(); }
+
+ /// \brief Retrieve the set of initializers.
+ Expr **getInits() { return reinterpret_cast<Expr **>(InitExprs.data()); }
+
+ const Expr *getInit(unsigned Init) const {
+ assert(Init < getNumInits() && "Initializer access out of range!");
+ return cast_or_null<Expr>(InitExprs[Init]);
+ }
+
+ Expr *getInit(unsigned Init) {
+ assert(Init < getNumInits() && "Initializer access out of range!");
+ return cast_or_null<Expr>(InitExprs[Init]);
+ }
+
+ void setInit(unsigned Init, Expr *expr) {
+ assert(Init < getNumInits() && "Initializer access out of range!");
+ InitExprs[Init] = expr;
+ }
+
+ /// \brief Reserve space for some number of initializers.
+ void reserveInits(ASTContext &C, unsigned NumInits);
+
+ /// @brief Specify the number of initializers
+ ///
+ /// If there are more than @p NumInits initializers, the remaining
+ /// initializers will be destroyed. If there are fewer than @p
+ /// NumInits initializers, NULL expressions will be added for the
+ /// unknown initializers.
+ void resizeInits(ASTContext &Context, unsigned NumInits);
+
+ /// @brief Updates the initializer at index @p Init with the new
+ /// expression @p expr, and returns the old expression at that
+ /// location.
+ ///
+ /// When @p Init is out of range for this initializer list, the
+ /// initializer list will be extended with NULL expressions to
+ /// accommodate the new entry.
+ Expr *updateInit(ASTContext &C, unsigned Init, Expr *expr);
+
+ /// \brief If this initializer list initializes an array with more elements
+ /// than there are initializers in the list, specifies an expression to be
+ /// used for value initialization of the rest of the elements.
+ Expr *getArrayFiller() {
+ return ArrayFillerOrUnionFieldInit.dyn_cast<Expr *>();
+ }
+ const Expr *getArrayFiller() const {
+ return const_cast<InitListExpr *>(this)->getArrayFiller();
+ }
+ void setArrayFiller(Expr *filler);
+
+ /// \brief Return true if this is an array initializer and its array "filler"
+ /// has been set.
+ bool hasArrayFiller() const { return getArrayFiller(); }
+
+ /// \brief If this initializes a union, specifies which field in the
+ /// union to initialize.
+ ///
+ /// Typically, this field is the first named field within the
+ /// union. However, a designated initializer can specify the
+ /// initialization of a different field within the union.
+ FieldDecl *getInitializedFieldInUnion() {
+ return ArrayFillerOrUnionFieldInit.dyn_cast<FieldDecl *>();
+ }
+ const FieldDecl *getInitializedFieldInUnion() const {
+ return const_cast<InitListExpr *>(this)->getInitializedFieldInUnion();
+ }
+ void setInitializedFieldInUnion(FieldDecl *FD) {
+ ArrayFillerOrUnionFieldInit = FD;
+ }
+
+ // Explicit InitListExpr's originate from source code (and have valid source
+ // locations). Implicit InitListExpr's are created by the semantic analyzer.
+ bool isExplicit() {
+ return LBraceLoc.isValid() && RBraceLoc.isValid();
+ }
+
+ SourceLocation getLBraceLoc() const { return LBraceLoc; }
+ void setLBraceLoc(SourceLocation Loc) { LBraceLoc = Loc; }
+ SourceLocation getRBraceLoc() const { return RBraceLoc; }
+ void setRBraceLoc(SourceLocation Loc) { RBraceLoc = Loc; }
+
+ /// @brief Retrieve the initializer list that describes the
+ /// syntactic form of the initializer.
+ ///
+ ///
+ InitListExpr *getSyntacticForm() const { return SyntacticForm; }
+ void setSyntacticForm(InitListExpr *Init) { SyntacticForm = Init; }
+
+ bool hadArrayRangeDesignator() const {
+ return InitListExprBits.HadArrayRangeDesignator != 0;
+ }
+ void sawArrayRangeDesignator(bool ARD = true) {
+ InitListExprBits.HadArrayRangeDesignator = ARD;
+ }
+
+ bool initializesStdInitializerList() const {
+ return InitListExprBits.InitializesStdInitializerList != 0;
+ }
+ void setInitializesStdInitializerList(bool ISIL = true) {
+ InitListExprBits.InitializesStdInitializerList = ISIL;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == InitListExprClass;
+ }
+ static bool classof(const InitListExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ if (InitExprs.empty()) return child_range();
+ return child_range(&InitExprs[0], &InitExprs[0] + InitExprs.size());
+ }
+
+ typedef InitExprsTy::iterator iterator;
+ typedef InitExprsTy::const_iterator const_iterator;
+ typedef InitExprsTy::reverse_iterator reverse_iterator;
+ typedef InitExprsTy::const_reverse_iterator const_reverse_iterator;
+
+ iterator begin() { return InitExprs.begin(); }
+ const_iterator begin() const { return InitExprs.begin(); }
+ iterator end() { return InitExprs.end(); }
+ const_iterator end() const { return InitExprs.end(); }
+ reverse_iterator rbegin() { return InitExprs.rbegin(); }
+ const_reverse_iterator rbegin() const { return InitExprs.rbegin(); }
+ reverse_iterator rend() { return InitExprs.rend(); }
+ const_reverse_iterator rend() const { return InitExprs.rend(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// @brief Represents a C99 designated initializer expression.
+///
+/// A designated initializer expression (C99 6.7.8) contains one or
+/// more designators (which can be field designators, array
+/// designators, or GNU array-range designators) followed by an
+/// expression that initializes the field or element(s) that the
+/// designators refer to. For example, given:
+///
+/// @code
+/// struct point {
+/// double x;
+/// double y;
+/// };
+/// struct point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 };
+/// @endcode
+///
+/// The InitListExpr contains three DesignatedInitExprs, the first of
+/// which covers @c [2].y=1.0. This DesignatedInitExpr will have two
+/// designators, one array designator for @c [2] followed by one field
+/// designator for @c .y. The initalization expression will be 1.0.
+class DesignatedInitExpr : public Expr {
+public:
+ /// \brief Forward declaration of the Designator class.
+ class Designator;
+
+private:
+ /// The location of the '=' or ':' prior to the actual initializer
+ /// expression.
+ SourceLocation EqualOrColonLoc;
+
+ /// Whether this designated initializer used the GNU deprecated
+ /// syntax rather than the C99 '=' syntax.
+ bool GNUSyntax : 1;
+
+ /// The number of designators in this initializer expression.
+ unsigned NumDesignators : 15;
+
+ /// The number of subexpressions of this initializer expression,
+ /// which contains both the initializer and any additional
+ /// expressions used by array and array-range designators.
+ unsigned NumSubExprs : 16;
+
+ /// \brief The designators in this designated initialization
+ /// expression.
+ Designator *Designators;
+
+
+ DesignatedInitExpr(ASTContext &C, QualType Ty, unsigned NumDesignators,
+ const Designator *Designators,
+ SourceLocation EqualOrColonLoc, bool GNUSyntax,
+ Expr **IndexExprs, unsigned NumIndexExprs,
+ Expr *Init);
+
+ explicit DesignatedInitExpr(unsigned NumSubExprs)
+ : Expr(DesignatedInitExprClass, EmptyShell()),
+ NumDesignators(0), NumSubExprs(NumSubExprs), Designators(0) { }
+
+public:
+ /// A field designator, e.g., ".x".
+ struct FieldDesignator {
+ /// Refers to the field that is being initialized. The low bit
+ /// of this field determines whether this is actually a pointer
+ /// to an IdentifierInfo (if 1) or a FieldDecl (if 0). When
+ /// initially constructed, a field designator will store an
+ /// IdentifierInfo*. After semantic analysis has resolved that
+ /// name, the field designator will instead store a FieldDecl*.
+ uintptr_t NameOrField;
+
+ /// The location of the '.' in the designated initializer.
+ unsigned DotLoc;
+
+ /// The location of the field name in the designated initializer.
+ unsigned FieldLoc;
+ };
+
+ /// An array or GNU array-range designator, e.g., "[9]" or "[10..15]".
+ struct ArrayOrRangeDesignator {
+ /// Location of the first index expression within the designated
+ /// initializer expression's list of subexpressions.
+ unsigned Index;
+ /// The location of the '[' starting the array range designator.
+ unsigned LBracketLoc;
+ /// The location of the ellipsis separating the start and end
+ /// indices. Only valid for GNU array-range designators.
+ unsigned EllipsisLoc;
+ /// The location of the ']' terminating the array range designator.
+ unsigned RBracketLoc;
+ };
+
+ /// @brief Represents a single C99 designator.
+ ///
+ /// @todo This class is infuriatingly similar to clang::Designator,
+ /// but minor differences (storing indices vs. storing pointers)
+ /// keep us from reusing it. Try harder, later, to rectify these
+ /// differences.
+ class Designator {
+ /// @brief The kind of designator this describes.
+ enum {
+ FieldDesignator,
+ ArrayDesignator,
+ ArrayRangeDesignator
+ } Kind;
+
+ union {
+ /// A field designator, e.g., ".x".
+ struct FieldDesignator Field;
+ /// An array or GNU array-range designator, e.g., "[9]" or "[10..15]".
+ struct ArrayOrRangeDesignator ArrayOrRange;
+ };
+ friend class DesignatedInitExpr;
+
+ public:
+ Designator() {}
+
+ /// @brief Initializes a field designator.
+ Designator(const IdentifierInfo *FieldName, SourceLocation DotLoc,
+ SourceLocation FieldLoc)
+ : Kind(FieldDesignator) {
+ Field.NameOrField = reinterpret_cast<uintptr_t>(FieldName) | 0x01;
+ Field.DotLoc = DotLoc.getRawEncoding();
+ Field.FieldLoc = FieldLoc.getRawEncoding();
+ }
+
+ /// @brief Initializes an array designator.
+ Designator(unsigned Index, SourceLocation LBracketLoc,
+ SourceLocation RBracketLoc)
+ : Kind(ArrayDesignator) {
+ ArrayOrRange.Index = Index;
+ ArrayOrRange.LBracketLoc = LBracketLoc.getRawEncoding();
+ ArrayOrRange.EllipsisLoc = SourceLocation().getRawEncoding();
+ ArrayOrRange.RBracketLoc = RBracketLoc.getRawEncoding();
+ }
+
+ /// @brief Initializes a GNU array-range designator.
+ Designator(unsigned Index, SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc, SourceLocation RBracketLoc)
+ : Kind(ArrayRangeDesignator) {
+ ArrayOrRange.Index = Index;
+ ArrayOrRange.LBracketLoc = LBracketLoc.getRawEncoding();
+ ArrayOrRange.EllipsisLoc = EllipsisLoc.getRawEncoding();
+ ArrayOrRange.RBracketLoc = RBracketLoc.getRawEncoding();
+ }
+
+ bool isFieldDesignator() const { return Kind == FieldDesignator; }
+ bool isArrayDesignator() const { return Kind == ArrayDesignator; }
+ bool isArrayRangeDesignator() const { return Kind == ArrayRangeDesignator; }
+
+ IdentifierInfo *getFieldName() const;
+
+ FieldDecl *getField() const {
+ assert(Kind == FieldDesignator && "Only valid on a field designator");
+ if (Field.NameOrField & 0x01)
+ return 0;
+ else
+ return reinterpret_cast<FieldDecl *>(Field.NameOrField);
+ }
+
+ void setField(FieldDecl *FD) {
+ assert(Kind == FieldDesignator && "Only valid on a field designator");
+ Field.NameOrField = reinterpret_cast<uintptr_t>(FD);
+ }
+
+ SourceLocation getDotLoc() const {
+ assert(Kind == FieldDesignator && "Only valid on a field designator");
+ return SourceLocation::getFromRawEncoding(Field.DotLoc);
+ }
+
+ SourceLocation getFieldLoc() const {
+ assert(Kind == FieldDesignator && "Only valid on a field designator");
+ return SourceLocation::getFromRawEncoding(Field.FieldLoc);
+ }
+
+ SourceLocation getLBracketLoc() const {
+ assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ "Only valid on an array or array-range designator");
+ return SourceLocation::getFromRawEncoding(ArrayOrRange.LBracketLoc);
+ }
+
+ SourceLocation getRBracketLoc() const {
+ assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ "Only valid on an array or array-range designator");
+ return SourceLocation::getFromRawEncoding(ArrayOrRange.RBracketLoc);
+ }
+
+ SourceLocation getEllipsisLoc() const {
+ assert(Kind == ArrayRangeDesignator &&
+ "Only valid on an array-range designator");
+ return SourceLocation::getFromRawEncoding(ArrayOrRange.EllipsisLoc);
+ }
+
+ unsigned getFirstExprIndex() const {
+ assert((Kind == ArrayDesignator || Kind == ArrayRangeDesignator) &&
+ "Only valid on an array or array-range designator");
+ return ArrayOrRange.Index;
+ }
+
+ SourceLocation getStartLocation() const {
+ if (Kind == FieldDesignator)
+ return getDotLoc().isInvalid()? getFieldLoc() : getDotLoc();
+ else
+ return getLBracketLoc();
+ }
+ SourceLocation getEndLocation() const {
+ return Kind == FieldDesignator ? getFieldLoc() : getRBracketLoc();
+ }
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getStartLocation(), getEndLocation());
+ }
+ };
+
+ static DesignatedInitExpr *Create(ASTContext &C, Designator *Designators,
+ unsigned NumDesignators,
+ Expr **IndexExprs, unsigned NumIndexExprs,
+ SourceLocation EqualOrColonLoc,
+ bool GNUSyntax, Expr *Init);
+
+ static DesignatedInitExpr *CreateEmpty(ASTContext &C, unsigned NumIndexExprs);
+
+ /// @brief Returns the number of designators in this initializer.
+ unsigned size() const { return NumDesignators; }
+
+ // Iterator access to the designators.
+ typedef Designator *designators_iterator;
+ designators_iterator designators_begin() { return Designators; }
+ designators_iterator designators_end() {
+ return Designators + NumDesignators;
+ }
+
+ typedef const Designator *const_designators_iterator;
+ const_designators_iterator designators_begin() const { return Designators; }
+ const_designators_iterator designators_end() const {
+ return Designators + NumDesignators;
+ }
+
+ typedef std::reverse_iterator<designators_iterator>
+ reverse_designators_iterator;
+ reverse_designators_iterator designators_rbegin() {
+ return reverse_designators_iterator(designators_end());
+ }
+ reverse_designators_iterator designators_rend() {
+ return reverse_designators_iterator(designators_begin());
+ }
+
+ typedef std::reverse_iterator<const_designators_iterator>
+ const_reverse_designators_iterator;
+ const_reverse_designators_iterator designators_rbegin() const {
+ return const_reverse_designators_iterator(designators_end());
+ }
+ const_reverse_designators_iterator designators_rend() const {
+ return const_reverse_designators_iterator(designators_begin());
+ }
+
+ Designator *getDesignator(unsigned Idx) { return &designators_begin()[Idx]; }
+
+ void setDesignators(ASTContext &C, const Designator *Desigs,
+ unsigned NumDesigs);
+
+ Expr *getArrayIndex(const Designator& D);
+ Expr *getArrayRangeStart(const Designator& D);
+ Expr *getArrayRangeEnd(const Designator& D);
+
+ /// @brief Retrieve the location of the '=' that precedes the
+ /// initializer value itself, if present.
+ SourceLocation getEqualOrColonLoc() const { return EqualOrColonLoc; }
+ void setEqualOrColonLoc(SourceLocation L) { EqualOrColonLoc = L; }
+
+ /// @brief Determines whether this designated initializer used the
+ /// deprecated GNU syntax for designated initializers.
+ bool usesGNUSyntax() const { return GNUSyntax; }
+ void setGNUSyntax(bool GNU) { GNUSyntax = GNU; }
+
+ /// @brief Retrieve the initializer value.
+ Expr *getInit() const {
+ return cast<Expr>(*const_cast<DesignatedInitExpr*>(this)->child_begin());
+ }
+
+ void setInit(Expr *init) {
+ *child_begin() = init;
+ }
+
+ /// \brief Retrieve the total number of subexpressions in this
+ /// designated initializer expression, including the actual
+ /// initialized value and any expressions that occur within array
+ /// and array-range designators.
+ unsigned getNumSubExprs() const { return NumSubExprs; }
+
+ Expr *getSubExpr(unsigned Idx) {
+ assert(Idx < NumSubExprs && "Subscript out of range");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ return reinterpret_cast<Expr**>(reinterpret_cast<void**>(Ptr))[Idx];
+ }
+
+ void setSubExpr(unsigned Idx, Expr *E) {
+ assert(Idx < NumSubExprs && "Subscript out of range");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ reinterpret_cast<Expr**>(reinterpret_cast<void**>(Ptr))[Idx] = E;
+ }
+
+ /// \brief Replaces the designator at index @p Idx with the series
+ /// of designators in [First, Last).
+ void ExpandDesignator(ASTContext &C, unsigned Idx, const Designator *First,
+ const Designator *Last);
+
+ SourceRange getDesignatorsSourceRange() const;
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == DesignatedInitExprClass;
+ }
+ static bool classof(const DesignatedInitExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ Stmt **begin = reinterpret_cast<Stmt**>(this + 1);
+ return child_range(begin, begin + NumSubExprs);
+ }
+};
+
+/// \brief Represents an implicitly-generated value initialization of
+/// an object of a given type.
+///
+/// Implicit value initializations occur within semantic initializer
+/// list expressions (InitListExpr) as placeholders for subobject
+/// initializations not explicitly specified by the user.
+///
+/// \see InitListExpr
+class ImplicitValueInitExpr : public Expr {
+public:
+ explicit ImplicitValueInitExpr(QualType ty)
+ : Expr(ImplicitValueInitExprClass, ty, VK_RValue, OK_Ordinary,
+ false, false, ty->isInstantiationDependentType(), false) { }
+
+ /// \brief Construct an empty implicit value initialization.
+ explicit ImplicitValueInitExpr(EmptyShell Empty)
+ : Expr(ImplicitValueInitExprClass, Empty) { }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ImplicitValueInitExprClass;
+ }
+ static bool classof(const ImplicitValueInitExpr *) { return true; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange();
+ }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+
+class ParenListExpr : public Expr {
+ Stmt **Exprs;
+ unsigned NumExprs;
+ SourceLocation LParenLoc, RParenLoc;
+
+public:
+ ParenListExpr(ASTContext& C, SourceLocation lparenloc, Expr **exprs,
+ unsigned numexprs, SourceLocation rparenloc);
+
+ /// \brief Build an empty paren list.
+ explicit ParenListExpr(EmptyShell Empty) : Expr(ParenListExprClass, Empty) { }
+
+ unsigned getNumExprs() const { return NumExprs; }
+
+ const Expr* getExpr(unsigned Init) const {
+ assert(Init < getNumExprs() && "Initializer access out of range!");
+ return cast_or_null<Expr>(Exprs[Init]);
+ }
+
+ Expr* getExpr(unsigned Init) {
+ assert(Init < getNumExprs() && "Initializer access out of range!");
+ return cast_or_null<Expr>(Exprs[Init]);
+ }
+
+ Expr **getExprs() { return reinterpret_cast<Expr **>(Exprs); }
+
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(LParenLoc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ParenListExprClass;
+ }
+ static bool classof(const ParenListExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&Exprs[0], &Exprs[0]+NumExprs);
+ }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+
+/// \brief Represents a C11 generic selection.
+///
+/// A generic selection (C11 6.5.1.1) contains an unevaluated controlling
+/// expression, followed by one or more generic associations. Each generic
+/// association specifies a type name and an expression, or "default" and an
+/// expression (in which case it is known as a default generic association).
+/// The type and value of the generic selection are identical to those of its
+/// result expression, which is defined as the expression in the generic
+/// association with a type name that is compatible with the type of the
+/// controlling expression, or the expression in the default generic association
+/// if no types are compatible. For example:
+///
+/// @code
+/// _Generic(X, double: 1, float: 2, default: 3)
+/// @endcode
+///
+/// The above expression evaluates to 1 if 1.0 is substituted for X, 2 if 1.0f
+/// or 3 if "hello".
+///
+/// As an extension, generic selections are allowed in C++, where the following
+/// additional semantics apply:
+///
+/// Any generic selection whose controlling expression is type-dependent or
+/// which names a dependent type in its association list is result-dependent,
+/// which means that the choice of result expression is dependent.
+/// Result-dependent generic associations are both type- and value-dependent.
+class GenericSelectionExpr : public Expr {
+ enum { CONTROLLING, END_EXPR };
+ TypeSourceInfo **AssocTypes;
+ Stmt **SubExprs;
+ unsigned NumAssocs, ResultIndex;
+ SourceLocation GenericLoc, DefaultLoc, RParenLoc;
+
+public:
+ GenericSelectionExpr(ASTContext &Context,
+ SourceLocation GenericLoc, Expr *ControllingExpr,
+ TypeSourceInfo **AssocTypes, Expr **AssocExprs,
+ unsigned NumAssocs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex);
+
+ /// This constructor is used in the result-dependent case.
+ GenericSelectionExpr(ASTContext &Context,
+ SourceLocation GenericLoc, Expr *ControllingExpr,
+ TypeSourceInfo **AssocTypes, Expr **AssocExprs,
+ unsigned NumAssocs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack);
+
+ explicit GenericSelectionExpr(EmptyShell Empty)
+ : Expr(GenericSelectionExprClass, Empty) { }
+
+ unsigned getNumAssocs() const { return NumAssocs; }
+
+ SourceLocation getGenericLoc() const { return GenericLoc; }
+ SourceLocation getDefaultLoc() const { return DefaultLoc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ const Expr *getAssocExpr(unsigned i) const {
+ return cast<Expr>(SubExprs[END_EXPR+i]);
+ }
+ Expr *getAssocExpr(unsigned i) { return cast<Expr>(SubExprs[END_EXPR+i]); }
+
+ const TypeSourceInfo *getAssocTypeSourceInfo(unsigned i) const {
+ return AssocTypes[i];
+ }
+ TypeSourceInfo *getAssocTypeSourceInfo(unsigned i) { return AssocTypes[i]; }
+
+ QualType getAssocType(unsigned i) const {
+ if (const TypeSourceInfo *TS = getAssocTypeSourceInfo(i))
+ return TS->getType();
+ else
+ return QualType();
+ }
+
+ const Expr *getControllingExpr() const {
+ return cast<Expr>(SubExprs[CONTROLLING]);
+ }
+ Expr *getControllingExpr() { return cast<Expr>(SubExprs[CONTROLLING]); }
+
+ /// Whether this generic selection is result-dependent.
+ bool isResultDependent() const { return ResultIndex == -1U; }
+
+ /// The zero-based index of the result expression's generic association in
+ /// the generic selection's association list. Defined only if the
+ /// generic selection is not result-dependent.
+ unsigned getResultIndex() const {
+ assert(!isResultDependent() && "Generic selection is result-dependent");
+ return ResultIndex;
+ }
+
+ /// The generic selection's result expression. Defined only if the
+ /// generic selection is not result-dependent.
+ const Expr *getResultExpr() const { return getAssocExpr(getResultIndex()); }
+ Expr *getResultExpr() { return getAssocExpr(getResultIndex()); }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(GenericLoc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == GenericSelectionExprClass;
+ }
+ static bool classof(const GenericSelectionExpr *) { return true; }
+
+ child_range children() {
+ return child_range(SubExprs, SubExprs+END_EXPR+NumAssocs);
+ }
+
+ friend class ASTStmtReader;
+};
+
+//===----------------------------------------------------------------------===//
+// Clang Extensions
+//===----------------------------------------------------------------------===//
+
+
+/// ExtVectorElementExpr - This represents access to specific elements of a
+/// vector, and may occur on the left hand side or right hand side. For example
+/// the following is legal: "V.xy = V.zw" if V is a 4 element extended vector.
+///
+/// Note that the base may have either vector or pointer to vector type, just
+/// like a struct field reference.
+///
+class ExtVectorElementExpr : public Expr {
+ Stmt *Base;
+ IdentifierInfo *Accessor;
+ SourceLocation AccessorLoc;
+public:
+ ExtVectorElementExpr(QualType ty, ExprValueKind VK, Expr *base,
+ IdentifierInfo &accessor, SourceLocation loc)
+ : Expr(ExtVectorElementExprClass, ty, VK,
+ (VK == VK_RValue ? OK_Ordinary : OK_VectorComponent),
+ base->isTypeDependent(), base->isValueDependent(),
+ base->isInstantiationDependent(),
+ base->containsUnexpandedParameterPack()),
+ Base(base), Accessor(&accessor), AccessorLoc(loc) {}
+
+ /// \brief Build an empty vector element expression.
+ explicit ExtVectorElementExpr(EmptyShell Empty)
+ : Expr(ExtVectorElementExprClass, Empty) { }
+
+ const Expr *getBase() const { return cast<Expr>(Base); }
+ Expr *getBase() { return cast<Expr>(Base); }
+ void setBase(Expr *E) { Base = E; }
+
+ IdentifierInfo &getAccessor() const { return *Accessor; }
+ void setAccessor(IdentifierInfo *II) { Accessor = II; }
+
+ SourceLocation getAccessorLoc() const { return AccessorLoc; }
+ void setAccessorLoc(SourceLocation L) { AccessorLoc = L; }
+
+ /// getNumElements - Get the number of components being selected.
+ unsigned getNumElements() const;
+
+ /// containsDuplicateElements - Return true if any element access is
+ /// repeated.
+ bool containsDuplicateElements() const;
+
+ /// getEncodedElementAccess - Encode the elements accessed into an llvm
+ /// aggregate Constant of ConstantInt(s).
+ void getEncodedElementAccess(SmallVectorImpl<unsigned> &Elts) const;
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getBase()->getLocStart(), AccessorLoc);
+ }
+
+ /// isArrow - Return true if the base expression is a pointer to vector,
+ /// return false if the base expression is a vector.
+ bool isArrow() const;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ExtVectorElementExprClass;
+ }
+ static bool classof(const ExtVectorElementExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Base, &Base+1); }
+};
+
+
+/// BlockExpr - Adaptor class for mixing a BlockDecl with expressions.
+/// ^{ statement-body } or ^(int arg1, float arg2){ statement-body }
+class BlockExpr : public Expr {
+protected:
+ BlockDecl *TheBlock;
+public:
+ BlockExpr(BlockDecl *BD, QualType ty)
+ : Expr(BlockExprClass, ty, VK_RValue, OK_Ordinary,
+ ty->isDependentType(), ty->isDependentType(),
+ ty->isInstantiationDependentType() || BD->isDependentContext(),
+ false),
+ TheBlock(BD) {}
+
+ /// \brief Build an empty block expression.
+ explicit BlockExpr(EmptyShell Empty) : Expr(BlockExprClass, Empty) { }
+
+ const BlockDecl *getBlockDecl() const { return TheBlock; }
+ BlockDecl *getBlockDecl() { return TheBlock; }
+ void setBlockDecl(BlockDecl *BD) { TheBlock = BD; }
+
+ // Convenience functions for probing the underlying BlockDecl.
+ SourceLocation getCaretLocation() const;
+ const Stmt *getBody() const;
+ Stmt *getBody();
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getCaretLocation(), getBody()->getLocEnd());
+ }
+
+ /// getFunctionType - Return the underlying function type for this block.
+ const FunctionProtoType *getFunctionType() const;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == BlockExprClass;
+ }
+ static bool classof(const BlockExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// AsTypeExpr - Clang builtin function __builtin_astype [OpenCL 6.2.4.2]
+/// This AST node provides support for reinterpreting a type to another
+/// type of the same size.
+class AsTypeExpr : public Expr { // Should this be an ExplicitCastExpr?
+private:
+ Stmt *SrcExpr;
+ SourceLocation BuiltinLoc, RParenLoc;
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+ explicit AsTypeExpr(EmptyShell Empty) : Expr(AsTypeExprClass, Empty) {}
+
+public:
+ AsTypeExpr(Expr* SrcExpr, QualType DstType,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation BuiltinLoc, SourceLocation RParenLoc)
+ : Expr(AsTypeExprClass, DstType, VK, OK,
+ DstType->isDependentType(),
+ DstType->isDependentType() || SrcExpr->isValueDependent(),
+ (DstType->isInstantiationDependentType() ||
+ SrcExpr->isInstantiationDependent()),
+ (DstType->containsUnexpandedParameterPack() ||
+ SrcExpr->containsUnexpandedParameterPack())),
+ SrcExpr(SrcExpr), BuiltinLoc(BuiltinLoc), RParenLoc(RParenLoc) {}
+
+ /// getSrcExpr - Return the Expr to be converted.
+ Expr *getSrcExpr() const { return cast<Expr>(SrcExpr); }
+
+ /// getBuiltinLoc - Return the location of the __builtin_astype token.
+ SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
+
+ /// getRParenLoc - Return the location of final right parenthesis.
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(BuiltinLoc, RParenLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == AsTypeExprClass;
+ }
+ static bool classof(const AsTypeExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&SrcExpr, &SrcExpr+1); }
+};
+
+/// PseudoObjectExpr - An expression which accesses a pseudo-object
+/// l-value. A pseudo-object is an abstract object, accesses to which
+/// are translated to calls. The pseudo-object expression has a
+/// syntactic form, which shows how the expression was actually
+/// written in the source code, and a semantic form, which is a series
+/// of expressions to be executed in order which detail how the
+/// operation is actually evaluated. Optionally, one of the semantic
+/// forms may also provide a result value for the expression.
+///
+/// If any of the semantic-form expressions is an OpaqueValueExpr,
+/// that OVE is required to have a source expression, and it is bound
+/// to the result of that source expression. Such OVEs may appear
+/// only in subsequent semantic-form expressions and as
+/// sub-expressions of the syntactic form.
+///
+/// PseudoObjectExpr should be used only when an operation can be
+/// usefully described in terms of fairly simple rewrite rules on
+/// objects and functions that are meant to be used by end-developers.
+/// For example, under the Itanium ABI, dynamic casts are implemented
+/// as a call to a runtime function called __dynamic_cast; using this
+/// class to describe that would be inappropriate because that call is
+/// not really part of the user-visible semantics, and instead the
+/// cast is properly reflected in the AST and IR-generation has been
+/// taught to generate the call as necessary. In contrast, an
+/// Objective-C property access is semantically defined to be
+/// equivalent to a particular message send, and this is very much
+/// part of the user model. The name of this class encourages this
+/// modelling design.
+class PseudoObjectExpr : public Expr {
+ // PseudoObjectExprBits.NumSubExprs - The number of sub-expressions.
+ // Always at least two, because the first sub-expression is the
+ // syntactic form.
+
+ // PseudoObjectExprBits.ResultIndex - The index of the
+ // sub-expression holding the result. 0 means the result is void,
+ // which is unambiguous because it's the index of the syntactic
+ // form. Note that this is therefore 1 higher than the value passed
+ // in to Create, which is an index within the semantic forms.
+ // Note also that ASTStmtWriter assumes this encoding.
+
+ Expr **getSubExprsBuffer() { return reinterpret_cast<Expr**>(this + 1); }
+ const Expr * const *getSubExprsBuffer() const {
+ return reinterpret_cast<const Expr * const *>(this + 1);
+ }
+
+ friend class ASTStmtReader;
+
+ PseudoObjectExpr(QualType type, ExprValueKind VK,
+ Expr *syntactic, ArrayRef<Expr*> semantic,
+ unsigned resultIndex);
+
+ PseudoObjectExpr(EmptyShell shell, unsigned numSemanticExprs);
+
+ unsigned getNumSubExprs() const {
+ return PseudoObjectExprBits.NumSubExprs;
+ }
+
+public:
+ /// NoResult - A value for the result index indicating that there is
+ /// no semantic result.
+ enum { NoResult = ~0U };
+
+ static PseudoObjectExpr *Create(ASTContext &Context, Expr *syntactic,
+ ArrayRef<Expr*> semantic,
+ unsigned resultIndex);
+
+ static PseudoObjectExpr *Create(ASTContext &Context, EmptyShell shell,
+ unsigned numSemanticExprs);
+
+ /// Return the syntactic form of this expression, i.e. the
+ /// expression it actually looks like. Likely to be expressed in
+ /// terms of OpaqueValueExprs bound in the semantic form.
+ Expr *getSyntacticForm() { return getSubExprsBuffer()[0]; }
+ const Expr *getSyntacticForm() const { return getSubExprsBuffer()[0]; }
+
+ /// Return the index of the result-bearing expression into the semantics
+ /// expressions, or PseudoObjectExpr::NoResult if there is none.
+ unsigned getResultExprIndex() const {
+ if (PseudoObjectExprBits.ResultIndex == 0) return NoResult;
+ return PseudoObjectExprBits.ResultIndex - 1;
+ }
+
+ /// Return the result-bearing expression, or null if there is none.
+ Expr *getResultExpr() {
+ if (PseudoObjectExprBits.ResultIndex == 0)
+ return 0;
+ return getSubExprsBuffer()[PseudoObjectExprBits.ResultIndex];
+ }
+ const Expr *getResultExpr() const {
+ return const_cast<PseudoObjectExpr*>(this)->getResultExpr();
+ }
+
+ unsigned getNumSemanticExprs() const { return getNumSubExprs() - 1; }
+
+ typedef Expr * const *semantics_iterator;
+ typedef const Expr * const *const_semantics_iterator;
+ semantics_iterator semantics_begin() {
+ return getSubExprsBuffer() + 1;
+ }
+ const_semantics_iterator semantics_begin() const {
+ return getSubExprsBuffer() + 1;
+ }
+ semantics_iterator semantics_end() {
+ return getSubExprsBuffer() + getNumSubExprs();
+ }
+ const_semantics_iterator semantics_end() const {
+ return getSubExprsBuffer() + getNumSubExprs();
+ }
+ Expr *getSemanticExpr(unsigned index) {
+ assert(index + 1 < getNumSubExprs());
+ return getSubExprsBuffer()[index + 1];
+ }
+ const Expr *getSemanticExpr(unsigned index) const {
+ return const_cast<PseudoObjectExpr*>(this)->getSemanticExpr(index);
+ }
+
+ SourceLocation getExprLoc() const LLVM_READONLY {
+ return getSyntacticForm()->getExprLoc();
+ }
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return getSyntacticForm()->getSourceRange();
+ }
+
+ child_range children() {
+ Stmt **cs = reinterpret_cast<Stmt**>(getSubExprsBuffer());
+ return child_range(cs, cs + getNumSubExprs());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == PseudoObjectExprClass;
+ }
+ static bool classof(const PseudoObjectExpr *) { return true; }
+};
+
+/// AtomicExpr - Variadic atomic builtins: __atomic_exchange, __atomic_fetch_*,
+/// __atomic_load, __atomic_store, and __atomic_compare_exchange_*, for the
+/// similarly-named C++11 instructions, and __c11 variants for <stdatomic.h>.
+/// All of these instructions take one primary pointer and at least one memory
+/// order.
+class AtomicExpr : public Expr {
+public:
+ enum AtomicOp {
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) AO ## ID,
+#include "clang/Basic/Builtins.def"
+ // Avoid trailing comma
+ BI_First = 0
+ };
+
+private:
+ enum { PTR, ORDER, VAL1, ORDER_FAIL, VAL2, WEAK, END_EXPR };
+ Stmt* SubExprs[END_EXPR];
+ unsigned NumSubExprs;
+ SourceLocation BuiltinLoc, RParenLoc;
+ AtomicOp Op;
+
+ friend class ASTStmtReader;
+
+public:
+ AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr, QualType t,
+ AtomicOp op, SourceLocation RP);
+
+ /// \brief Determine the number of arguments the specified atomic builtin
+ /// should have.
+ static unsigned getNumSubExprs(AtomicOp Op);
+
+ /// \brief Build an empty AtomicExpr.
+ explicit AtomicExpr(EmptyShell Empty) : Expr(AtomicExprClass, Empty) { }
+
+ Expr *getPtr() const {
+ return cast<Expr>(SubExprs[PTR]);
+ }
+ Expr *getOrder() const {
+ return cast<Expr>(SubExprs[ORDER]);
+ }
+ Expr *getVal1() const {
+ if (Op == AO__c11_atomic_init)
+ return cast<Expr>(SubExprs[ORDER]);
+ assert(NumSubExprs > VAL1);
+ return cast<Expr>(SubExprs[VAL1]);
+ }
+ Expr *getOrderFail() const {
+ assert(NumSubExprs > ORDER_FAIL);
+ return cast<Expr>(SubExprs[ORDER_FAIL]);
+ }
+ Expr *getVal2() const {
+ if (Op == AO__atomic_exchange)
+ return cast<Expr>(SubExprs[ORDER_FAIL]);
+ assert(NumSubExprs > VAL2);
+ return cast<Expr>(SubExprs[VAL2]);
+ }
+ Expr *getWeak() const {
+ assert(NumSubExprs > WEAK);
+ return cast<Expr>(SubExprs[WEAK]);
+ }
+
+ AtomicOp getOp() const { return Op; }
+ unsigned getNumSubExprs() { return NumSubExprs; }
+
+ Expr **getSubExprs() { return reinterpret_cast<Expr **>(SubExprs); }
+
+ bool isVolatile() const {
+ return getPtr()->getType()->getPointeeType().isVolatileQualified();
+ }
+
+ bool isCmpXChg() const {
+ return getOp() == AO__c11_atomic_compare_exchange_strong ||
+ getOp() == AO__c11_atomic_compare_exchange_weak ||
+ getOp() == AO__atomic_compare_exchange ||
+ getOp() == AO__atomic_compare_exchange_n;
+ }
+
+ SourceLocation getBuiltinLoc() const { return BuiltinLoc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(BuiltinLoc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == AtomicExprClass;
+ }
+ static bool classof(const AtomicExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(SubExprs, SubExprs+NumSubExprs);
+ }
+};
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
new file mode 100644
index 0000000..b69693d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprCXX.h
@@ -0,0 +1,3638 @@
+//===--- ExprCXX.h - Classes for representing expressions -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Expr interface and subclasses for C++ expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_EXPRCXX_H
+#define LLVM_CLANG_AST_EXPRCXX_H
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/Basic/ExpressionTraits.h"
+#include "clang/Basic/Lambda.h"
+#include "clang/Basic/TypeTraits.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+
+class CXXConstructorDecl;
+class CXXDestructorDecl;
+class CXXMethodDecl;
+class CXXTemporary;
+class TemplateArgumentListInfo;
+
+//===--------------------------------------------------------------------===//
+// C++ Expressions.
+//===--------------------------------------------------------------------===//
+
+/// \brief A call to an overloaded operator written using operator
+/// syntax.
+///
+/// Represents a call to an overloaded operator written using operator
+/// syntax, e.g., "x + y" or "*p". While semantically equivalent to a
+/// normal call, this AST node provides better information about the
+/// syntactic representation of the call.
+///
+/// In a C++ template, this expression node kind will be used whenever
+/// any of the arguments are type-dependent. In this case, the
+/// function itself will be a (possibly empty) set of functions and
+/// function templates that were found by name lookup at template
+/// definition time.
+class CXXOperatorCallExpr : public CallExpr {
+ /// \brief The overloaded operator.
+ OverloadedOperatorKind Operator;
+
+public:
+ CXXOperatorCallExpr(ASTContext& C, OverloadedOperatorKind Op, Expr *fn,
+ Expr **args, unsigned numargs, QualType t,
+ ExprValueKind VK, SourceLocation operatorloc)
+ : CallExpr(C, CXXOperatorCallExprClass, fn, 0, args, numargs, t, VK,
+ operatorloc),
+ Operator(Op) {}
+ explicit CXXOperatorCallExpr(ASTContext& C, EmptyShell Empty) :
+ CallExpr(C, CXXOperatorCallExprClass, Empty) { }
+
+
+ /// getOperator - Returns the kind of overloaded operator that this
+ /// expression refers to.
+ OverloadedOperatorKind getOperator() const { return Operator; }
+ void setOperator(OverloadedOperatorKind Kind) { Operator = Kind; }
+
+ /// getOperatorLoc - Returns the location of the operator symbol in
+ /// the expression. When @c getOperator()==OO_Call, this is the
+ /// location of the right parentheses; when @c
+ /// getOperator()==OO_Subscript, this is the location of the right
+ /// bracket.
+ SourceLocation getOperatorLoc() const { return getRParenLoc(); }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXOperatorCallExprClass;
+ }
+ static bool classof(const CXXOperatorCallExpr *) { return true; }
+};
+
+/// CXXMemberCallExpr - Represents a call to a member function that
+/// may be written either with member call syntax (e.g., "obj.func()"
+/// or "objptr->func()") or with normal function-call syntax
+/// ("func()") within a member function that ends up calling a member
+/// function. The callee in either case is a MemberExpr that contains
+/// both the object argument and the member function, while the
+/// arguments are the arguments within the parentheses (not including
+/// the object argument).
+class CXXMemberCallExpr : public CallExpr {
+public:
+ CXXMemberCallExpr(ASTContext &C, Expr *fn, Expr **args, unsigned numargs,
+ QualType t, ExprValueKind VK, SourceLocation RP)
+ : CallExpr(C, CXXMemberCallExprClass, fn, 0, args, numargs, t, VK, RP) {}
+
+ CXXMemberCallExpr(ASTContext &C, EmptyShell Empty)
+ : CallExpr(C, CXXMemberCallExprClass, Empty) { }
+
+ /// getImplicitObjectArgument - Retrieves the implicit object
+ /// argument for the member call. For example, in "x.f(5)", this
+ /// operation would return "x".
+ Expr *getImplicitObjectArgument() const;
+
+ /// Retrieves the declaration of the called method.
+ CXXMethodDecl *getMethodDecl() const;
+
+ /// getRecordDecl - Retrieves the CXXRecordDecl for the underlying type of
+ /// the implicit object argument. Note that this is may not be the same
+ /// declaration as that of the class context of the CXXMethodDecl which this
+ /// function is calling.
+ /// FIXME: Returns 0 for member pointer call exprs.
+ CXXRecordDecl *getRecordDecl();
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXMemberCallExprClass;
+ }
+ static bool classof(const CXXMemberCallExpr *) { return true; }
+};
+
+/// CUDAKernelCallExpr - Represents a call to a CUDA kernel function.
+class CUDAKernelCallExpr : public CallExpr {
+private:
+ enum { CONFIG, END_PREARG };
+
+public:
+ CUDAKernelCallExpr(ASTContext &C, Expr *fn, CallExpr *Config,
+ Expr **args, unsigned numargs, QualType t,
+ ExprValueKind VK, SourceLocation RP)
+ : CallExpr(C, CUDAKernelCallExprClass, fn, END_PREARG, args, numargs, t, VK,
+ RP) {
+ setConfig(Config);
+ }
+
+ CUDAKernelCallExpr(ASTContext &C, EmptyShell Empty)
+ : CallExpr(C, CUDAKernelCallExprClass, END_PREARG, Empty) { }
+
+ const CallExpr *getConfig() const {
+ return cast_or_null<CallExpr>(getPreArg(CONFIG));
+ }
+ CallExpr *getConfig() { return cast_or_null<CallExpr>(getPreArg(CONFIG)); }
+ void setConfig(CallExpr *E) { setPreArg(CONFIG, E); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CUDAKernelCallExprClass;
+ }
+ static bool classof(const CUDAKernelCallExpr *) { return true; }
+};
+
+/// CXXNamedCastExpr - Abstract class common to all of the C++ "named"
+/// casts, @c static_cast, @c dynamic_cast, @c reinterpret_cast, or @c
+/// const_cast.
+///
+/// This abstract class is inherited by all of the classes
+/// representing "named" casts, e.g., CXXStaticCastExpr,
+/// CXXDynamicCastExpr, CXXReinterpretCastExpr, and CXXConstCastExpr.
+class CXXNamedCastExpr : public ExplicitCastExpr {
+private:
+ SourceLocation Loc; // the location of the casting op
+ SourceLocation RParenLoc; // the location of the right parenthesis
+
+protected:
+ CXXNamedCastExpr(StmtClass SC, QualType ty, ExprValueKind VK,
+ CastKind kind, Expr *op, unsigned PathSize,
+ TypeSourceInfo *writtenTy, SourceLocation l,
+ SourceLocation RParenLoc)
+ : ExplicitCastExpr(SC, ty, VK, kind, op, PathSize, writtenTy), Loc(l),
+ RParenLoc(RParenLoc) {}
+
+ explicit CXXNamedCastExpr(StmtClass SC, EmptyShell Shell, unsigned PathSize)
+ : ExplicitCastExpr(SC, Shell, PathSize) { }
+
+ friend class ASTStmtReader;
+
+public:
+ const char *getCastName() const;
+
+ /// \brief Retrieve the location of the cast operator keyword, e.g.,
+ /// "static_cast".
+ SourceLocation getOperatorLoc() const { return Loc; }
+
+ /// \brief Retrieve the location of the closing parenthesis.
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(Loc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ switch (T->getStmtClass()) {
+ case CXXStaticCastExprClass:
+ case CXXDynamicCastExprClass:
+ case CXXReinterpretCastExprClass:
+ case CXXConstCastExprClass:
+ return true;
+ default:
+ return false;
+ }
+ }
+ static bool classof(const CXXNamedCastExpr *) { return true; }
+};
+
+/// CXXStaticCastExpr - A C++ @c static_cast expression
+/// (C++ [expr.static.cast]).
+///
+/// This expression node represents a C++ static cast, e.g.,
+/// @c static_cast<int>(1.0).
+class CXXStaticCastExpr : public CXXNamedCastExpr {
+ CXXStaticCastExpr(QualType ty, ExprValueKind vk, CastKind kind, Expr *op,
+ unsigned pathSize, TypeSourceInfo *writtenTy,
+ SourceLocation l, SourceLocation RParenLoc)
+ : CXXNamedCastExpr(CXXStaticCastExprClass, ty, vk, kind, op, pathSize,
+ writtenTy, l, RParenLoc) {}
+
+ explicit CXXStaticCastExpr(EmptyShell Empty, unsigned PathSize)
+ : CXXNamedCastExpr(CXXStaticCastExprClass, Empty, PathSize) { }
+
+public:
+ static CXXStaticCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK, CastKind K, Expr *Op,
+ const CXXCastPath *Path,
+ TypeSourceInfo *Written, SourceLocation L,
+ SourceLocation RParenLoc);
+ static CXXStaticCastExpr *CreateEmpty(ASTContext &Context,
+ unsigned PathSize);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXStaticCastExprClass;
+ }
+ static bool classof(const CXXStaticCastExpr *) { return true; }
+};
+
+/// CXXDynamicCastExpr - A C++ @c dynamic_cast expression
+/// (C++ [expr.dynamic.cast]), which may perform a run-time check to
+/// determine how to perform the type cast.
+///
+/// This expression node represents a dynamic cast, e.g.,
+/// @c dynamic_cast<Derived*>(BasePtr).
+class CXXDynamicCastExpr : public CXXNamedCastExpr {
+ CXXDynamicCastExpr(QualType ty, ExprValueKind VK, CastKind kind,
+ Expr *op, unsigned pathSize, TypeSourceInfo *writtenTy,
+ SourceLocation l, SourceLocation RParenLoc)
+ : CXXNamedCastExpr(CXXDynamicCastExprClass, ty, VK, kind, op, pathSize,
+ writtenTy, l, RParenLoc) {}
+
+ explicit CXXDynamicCastExpr(EmptyShell Empty, unsigned pathSize)
+ : CXXNamedCastExpr(CXXDynamicCastExprClass, Empty, pathSize) { }
+
+public:
+ static CXXDynamicCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK, CastKind Kind, Expr *Op,
+ const CXXCastPath *Path,
+ TypeSourceInfo *Written, SourceLocation L,
+ SourceLocation RParenLoc);
+
+ static CXXDynamicCastExpr *CreateEmpty(ASTContext &Context,
+ unsigned pathSize);
+
+ bool isAlwaysNull() const;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXDynamicCastExprClass;
+ }
+ static bool classof(const CXXDynamicCastExpr *) { return true; }
+};
+
+/// CXXReinterpretCastExpr - A C++ @c reinterpret_cast expression (C++
+/// [expr.reinterpret.cast]), which provides a differently-typed view
+/// of a value but performs no actual work at run time.
+///
+/// This expression node represents a reinterpret cast, e.g.,
+/// @c reinterpret_cast<int>(VoidPtr).
+class CXXReinterpretCastExpr : public CXXNamedCastExpr {
+ CXXReinterpretCastExpr(QualType ty, ExprValueKind vk, CastKind kind,
+ Expr *op, unsigned pathSize,
+ TypeSourceInfo *writtenTy, SourceLocation l,
+ SourceLocation RParenLoc)
+ : CXXNamedCastExpr(CXXReinterpretCastExprClass, ty, vk, kind, op,
+ pathSize, writtenTy, l, RParenLoc) {}
+
+ CXXReinterpretCastExpr(EmptyShell Empty, unsigned pathSize)
+ : CXXNamedCastExpr(CXXReinterpretCastExprClass, Empty, pathSize) { }
+
+public:
+ static CXXReinterpretCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK, CastKind Kind,
+ Expr *Op, const CXXCastPath *Path,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc);
+ static CXXReinterpretCastExpr *CreateEmpty(ASTContext &Context,
+ unsigned pathSize);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXReinterpretCastExprClass;
+ }
+ static bool classof(const CXXReinterpretCastExpr *) { return true; }
+};
+
+/// CXXConstCastExpr - A C++ @c const_cast expression (C++ [expr.const.cast]),
+/// which can remove type qualifiers but does not change the underlying value.
+///
+/// This expression node represents a const cast, e.g.,
+/// @c const_cast<char*>(PtrToConstChar).
+class CXXConstCastExpr : public CXXNamedCastExpr {
+ CXXConstCastExpr(QualType ty, ExprValueKind VK, Expr *op,
+ TypeSourceInfo *writtenTy, SourceLocation l,
+ SourceLocation RParenLoc)
+ : CXXNamedCastExpr(CXXConstCastExprClass, ty, VK, CK_NoOp, op,
+ 0, writtenTy, l, RParenLoc) {}
+
+ explicit CXXConstCastExpr(EmptyShell Empty)
+ : CXXNamedCastExpr(CXXConstCastExprClass, Empty, 0) { }
+
+public:
+ static CXXConstCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK, Expr *Op,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc);
+ static CXXConstCastExpr *CreateEmpty(ASTContext &Context);
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXConstCastExprClass;
+ }
+ static bool classof(const CXXConstCastExpr *) { return true; }
+};
+
+/// UserDefinedLiteral - A call to a literal operator (C++11 [over.literal])
+/// written as a user-defined literal (C++11 [lit.ext]).
+///
+/// Represents a user-defined literal, e.g. "foo"_bar or 1.23_xyz. While this
+/// is semantically equivalent to a normal call, this AST node provides better
+/// information about the syntactic representation of the literal.
+///
+/// Since literal operators are never found by ADL and can only be declared at
+/// namespace scope, a user-defined literal is never dependent.
+class UserDefinedLiteral : public CallExpr {
+ /// \brief The location of a ud-suffix within the literal.
+ SourceLocation UDSuffixLoc;
+
+public:
+ UserDefinedLiteral(ASTContext &C, Expr *Fn, Expr **Args, unsigned NumArgs,
+ QualType T, ExprValueKind VK, SourceLocation LitEndLoc,
+ SourceLocation SuffixLoc)
+ : CallExpr(C, UserDefinedLiteralClass, Fn, 0, Args, NumArgs, T, VK,
+ LitEndLoc), UDSuffixLoc(SuffixLoc) {}
+ explicit UserDefinedLiteral(ASTContext &C, EmptyShell Empty)
+ : CallExpr(C, UserDefinedLiteralClass, Empty) {}
+
+ /// The kind of literal operator which is invoked.
+ enum LiteralOperatorKind {
+ LOK_Raw, ///< Raw form: operator "" X (const char *)
+ LOK_Template, ///< Raw form: operator "" X<cs...> ()
+ LOK_Integer, ///< operator "" X (unsigned long long)
+ LOK_Floating, ///< operator "" X (long double)
+ LOK_String, ///< operator "" X (const CharT *, size_t)
+ LOK_Character ///< operator "" X (CharT)
+ };
+
+ /// getLiteralOperatorKind - Returns the kind of literal operator invocation
+ /// which this expression represents.
+ LiteralOperatorKind getLiteralOperatorKind() const;
+
+ /// getCookedLiteral - If this is not a raw user-defined literal, get the
+ /// underlying cooked literal (representing the literal with the suffix
+ /// removed).
+ Expr *getCookedLiteral();
+ const Expr *getCookedLiteral() const {
+ return const_cast<UserDefinedLiteral*>(this)->getCookedLiteral();
+ }
+
+ /// getUDSuffixLoc - Returns the location of a ud-suffix in the expression.
+ /// For a string literal, there may be multiple identical suffixes. This
+ /// returns the first.
+ SourceLocation getUDSuffixLoc() const { return getRParenLoc(); }
+
+ /// getUDSuffix - Returns the ud-suffix specified for this literal.
+ const IdentifierInfo *getUDSuffix() const;
+
+ static bool classof(const Stmt *S) {
+ return S->getStmtClass() == UserDefinedLiteralClass;
+ }
+ static bool classof(const UserDefinedLiteral *) { return true; }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// CXXBoolLiteralExpr - [C++ 2.13.5] C++ Boolean Literal.
+///
+class CXXBoolLiteralExpr : public Expr {
+ bool Value;
+ SourceLocation Loc;
+public:
+ CXXBoolLiteralExpr(bool val, QualType Ty, SourceLocation l) :
+ Expr(CXXBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ Value(val), Loc(l) {}
+
+ explicit CXXBoolLiteralExpr(EmptyShell Empty)
+ : Expr(CXXBoolLiteralExprClass, Empty) { }
+
+ bool getValue() const { return Value; }
+ void setValue(bool V) { Value = V; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXBoolLiteralExprClass;
+ }
+ static bool classof(const CXXBoolLiteralExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// CXXNullPtrLiteralExpr - [C++0x 2.14.7] C++ Pointer Literal
+class CXXNullPtrLiteralExpr : public Expr {
+ SourceLocation Loc;
+public:
+ CXXNullPtrLiteralExpr(QualType Ty, SourceLocation l) :
+ Expr(CXXNullPtrLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ Loc(l) {}
+
+ explicit CXXNullPtrLiteralExpr(EmptyShell Empty)
+ : Expr(CXXNullPtrLiteralExprClass, Empty) { }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXNullPtrLiteralExprClass;
+ }
+ static bool classof(const CXXNullPtrLiteralExpr *) { return true; }
+
+ child_range children() { return child_range(); }
+};
+
+/// CXXTypeidExpr - A C++ @c typeid expression (C++ [expr.typeid]), which gets
+/// the type_info that corresponds to the supplied type, or the (possibly
+/// dynamic) type of the supplied expression.
+///
+/// This represents code like @c typeid(int) or @c typeid(*objPtr)
+class CXXTypeidExpr : public Expr {
+private:
+ llvm::PointerUnion<Stmt *, TypeSourceInfo *> Operand;
+ SourceRange Range;
+
+public:
+ CXXTypeidExpr(QualType Ty, TypeSourceInfo *Operand, SourceRange R)
+ : Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary,
+ // typeid is never type-dependent (C++ [temp.dep.expr]p4)
+ false,
+ // typeid is value-dependent if the type or expression are dependent
+ Operand->getType()->isDependentType(),
+ Operand->getType()->isInstantiationDependentType(),
+ Operand->getType()->containsUnexpandedParameterPack()),
+ Operand(Operand), Range(R) { }
+
+ CXXTypeidExpr(QualType Ty, Expr *Operand, SourceRange R)
+ : Expr(CXXTypeidExprClass, Ty, VK_LValue, OK_Ordinary,
+ // typeid is never type-dependent (C++ [temp.dep.expr]p4)
+ false,
+ // typeid is value-dependent if the type or expression are dependent
+ Operand->isTypeDependent() || Operand->isValueDependent(),
+ Operand->isInstantiationDependent(),
+ Operand->containsUnexpandedParameterPack()),
+ Operand(Operand), Range(R) { }
+
+ CXXTypeidExpr(EmptyShell Empty, bool isExpr)
+ : Expr(CXXTypeidExprClass, Empty) {
+ if (isExpr)
+ Operand = (Expr*)0;
+ else
+ Operand = (TypeSourceInfo*)0;
+ }
+
+ bool isTypeOperand() const { return Operand.is<TypeSourceInfo *>(); }
+
+ /// \brief Retrieves the type operand of this typeid() expression after
+ /// various required adjustments (removing reference types, cv-qualifiers).
+ QualType getTypeOperand() const;
+
+ /// \brief Retrieve source information for the type operand.
+ TypeSourceInfo *getTypeOperandSourceInfo() const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
+ return Operand.get<TypeSourceInfo *>();
+ }
+
+ void setTypeOperandSourceInfo(TypeSourceInfo *TSI) {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
+ Operand = TSI;
+ }
+
+ Expr *getExprOperand() const {
+ assert(!isTypeOperand() && "Cannot call getExprOperand for typeid(type)");
+ return static_cast<Expr*>(Operand.get<Stmt *>());
+ }
+
+ void setExprOperand(Expr *E) {
+ assert(!isTypeOperand() && "Cannot call getExprOperand for typeid(type)");
+ Operand = E;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+ void setSourceRange(SourceRange R) { Range = R; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXTypeidExprClass;
+ }
+ static bool classof(const CXXTypeidExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ if (isTypeOperand()) return child_range();
+ Stmt **begin = reinterpret_cast<Stmt**>(&Operand);
+ return child_range(begin, begin + 1);
+ }
+};
+
+/// CXXUuidofExpr - A microsoft C++ @c __uuidof expression, which gets
+/// the _GUID that corresponds to the supplied type or expression.
+///
+/// This represents code like @c __uuidof(COMTYPE) or @c __uuidof(*comPtr)
+class CXXUuidofExpr : public Expr {
+private:
+ llvm::PointerUnion<Stmt *, TypeSourceInfo *> Operand;
+ SourceRange Range;
+
+public:
+ CXXUuidofExpr(QualType Ty, TypeSourceInfo *Operand, SourceRange R)
+ : Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary,
+ false, Operand->getType()->isDependentType(),
+ Operand->getType()->isInstantiationDependentType(),
+ Operand->getType()->containsUnexpandedParameterPack()),
+ Operand(Operand), Range(R) { }
+
+ CXXUuidofExpr(QualType Ty, Expr *Operand, SourceRange R)
+ : Expr(CXXUuidofExprClass, Ty, VK_LValue, OK_Ordinary,
+ false, Operand->isTypeDependent(),
+ Operand->isInstantiationDependent(),
+ Operand->containsUnexpandedParameterPack()),
+ Operand(Operand), Range(R) { }
+
+ CXXUuidofExpr(EmptyShell Empty, bool isExpr)
+ : Expr(CXXUuidofExprClass, Empty) {
+ if (isExpr)
+ Operand = (Expr*)0;
+ else
+ Operand = (TypeSourceInfo*)0;
+ }
+
+ bool isTypeOperand() const { return Operand.is<TypeSourceInfo *>(); }
+
+ /// \brief Retrieves the type operand of this __uuidof() expression after
+ /// various required adjustments (removing reference types, cv-qualifiers).
+ QualType getTypeOperand() const;
+
+ /// \brief Retrieve source information for the type operand.
+ TypeSourceInfo *getTypeOperandSourceInfo() const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
+ return Operand.get<TypeSourceInfo *>();
+ }
+
+ void setTypeOperandSourceInfo(TypeSourceInfo *TSI) {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
+ Operand = TSI;
+ }
+
+ Expr *getExprOperand() const {
+ assert(!isTypeOperand() && "Cannot call getExprOperand for __uuidof(type)");
+ return static_cast<Expr*>(Operand.get<Stmt *>());
+ }
+
+ void setExprOperand(Expr *E) {
+ assert(!isTypeOperand() && "Cannot call getExprOperand for __uuidof(type)");
+ Operand = E;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+ void setSourceRange(SourceRange R) { Range = R; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXUuidofExprClass;
+ }
+ static bool classof(const CXXUuidofExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ if (isTypeOperand()) return child_range();
+ Stmt **begin = reinterpret_cast<Stmt**>(&Operand);
+ return child_range(begin, begin + 1);
+ }
+};
+
+/// CXXThisExpr - Represents the "this" expression in C++, which is a
+/// pointer to the object on which the current member function is
+/// executing (C++ [expr.prim]p3). Example:
+///
+/// @code
+/// class Foo {
+/// public:
+/// void bar();
+/// void test() { this->bar(); }
+/// };
+/// @endcode
+class CXXThisExpr : public Expr {
+ SourceLocation Loc;
+ bool Implicit : 1;
+
+public:
+ CXXThisExpr(SourceLocation L, QualType Type, bool isImplicit)
+ : Expr(CXXThisExprClass, Type, VK_RValue, OK_Ordinary,
+ // 'this' is type-dependent if the class type of the enclosing
+ // member function is dependent (C++ [temp.dep.expr]p2)
+ Type->isDependentType(), Type->isDependentType(),
+ Type->isInstantiationDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(L), Implicit(isImplicit) { }
+
+ CXXThisExpr(EmptyShell Empty) : Expr(CXXThisExprClass, Empty) {}
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ bool isImplicit() const { return Implicit; }
+ void setImplicit(bool I) { Implicit = I; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXThisExprClass;
+ }
+ static bool classof(const CXXThisExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// CXXThrowExpr - [C++ 15] C++ Throw Expression. This handles
+/// 'throw' and 'throw' assignment-expression. When
+/// assignment-expression isn't present, Op will be null.
+///
+class CXXThrowExpr : public Expr {
+ Stmt *Op;
+ SourceLocation ThrowLoc;
+ /// \brief Whether the thrown variable (if any) is in scope.
+ unsigned IsThrownVariableInScope : 1;
+
+ friend class ASTStmtReader;
+
+public:
+ // Ty is the void type which is used as the result type of the
+ // exepression. The l is the location of the throw keyword. expr
+ // can by null, if the optional expression to throw isn't present.
+ CXXThrowExpr(Expr *expr, QualType Ty, SourceLocation l,
+ bool IsThrownVariableInScope) :
+ Expr(CXXThrowExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ expr && expr->isInstantiationDependent(),
+ expr && expr->containsUnexpandedParameterPack()),
+ Op(expr), ThrowLoc(l), IsThrownVariableInScope(IsThrownVariableInScope) {}
+ CXXThrowExpr(EmptyShell Empty) : Expr(CXXThrowExprClass, Empty) {}
+
+ const Expr *getSubExpr() const { return cast_or_null<Expr>(Op); }
+ Expr *getSubExpr() { return cast_or_null<Expr>(Op); }
+
+ SourceLocation getThrowLoc() const { return ThrowLoc; }
+
+ /// \brief Determines whether the variable thrown by this expression (if any!)
+ /// is within the innermost try block.
+ ///
+ /// This information is required to determine whether the NRVO can apply to
+ /// this variable.
+ bool isThrownVariableInScope() const { return IsThrownVariableInScope; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ if (getSubExpr() == 0)
+ return SourceRange(ThrowLoc, ThrowLoc);
+ return SourceRange(ThrowLoc, getSubExpr()->getSourceRange().getEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXThrowExprClass;
+ }
+ static bool classof(const CXXThrowExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&Op, Op ? &Op+1 : &Op);
+ }
+};
+
+/// CXXDefaultArgExpr - C++ [dcl.fct.default]. This wraps up a
+/// function call argument that was created from the corresponding
+/// parameter's default argument, when the call did not explicitly
+/// supply arguments for all of the parameters.
+class CXXDefaultArgExpr : public Expr {
+ /// \brief The parameter whose default is being used.
+ ///
+ /// When the bit is set, the subexpression is stored after the
+ /// CXXDefaultArgExpr itself. When the bit is clear, the parameter's
+ /// actual default expression is the subexpression.
+ llvm::PointerIntPair<ParmVarDecl *, 1, bool> Param;
+
+ /// \brief The location where the default argument expression was used.
+ SourceLocation Loc;
+
+ CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *param)
+ : Expr(SC,
+ param->hasUnparsedDefaultArg()
+ ? param->getType().getNonReferenceType()
+ : param->getDefaultArg()->getType(),
+ param->getDefaultArg()->getValueKind(),
+ param->getDefaultArg()->getObjectKind(), false, false, false, false),
+ Param(param, false), Loc(Loc) { }
+
+ CXXDefaultArgExpr(StmtClass SC, SourceLocation Loc, ParmVarDecl *param,
+ Expr *SubExpr)
+ : Expr(SC, SubExpr->getType(),
+ SubExpr->getValueKind(), SubExpr->getObjectKind(),
+ false, false, false, false),
+ Param(param, true), Loc(Loc) {
+ *reinterpret_cast<Expr **>(this + 1) = SubExpr;
+ }
+
+public:
+ CXXDefaultArgExpr(EmptyShell Empty) : Expr(CXXDefaultArgExprClass, Empty) {}
+
+
+ // Param is the parameter whose default argument is used by this
+ // expression.
+ static CXXDefaultArgExpr *Create(ASTContext &C, SourceLocation Loc,
+ ParmVarDecl *Param) {
+ return new (C) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param);
+ }
+
+ // Param is the parameter whose default argument is used by this
+ // expression, and SubExpr is the expression that will actually be used.
+ static CXXDefaultArgExpr *Create(ASTContext &C,
+ SourceLocation Loc,
+ ParmVarDecl *Param,
+ Expr *SubExpr);
+
+ // Retrieve the parameter that the argument was created from.
+ const ParmVarDecl *getParam() const { return Param.getPointer(); }
+ ParmVarDecl *getParam() { return Param.getPointer(); }
+
+ // Retrieve the actual argument to the function call.
+ const Expr *getExpr() const {
+ if (Param.getInt())
+ return *reinterpret_cast<Expr const * const*> (this + 1);
+ return getParam()->getDefaultArg();
+ }
+ Expr *getExpr() {
+ if (Param.getInt())
+ return *reinterpret_cast<Expr **> (this + 1);
+ return getParam()->getDefaultArg();
+ }
+
+ /// \brief Retrieve the location where this default argument was actually
+ /// used.
+ SourceLocation getUsedLocation() const { return Loc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ // Default argument expressions have no representation in the
+ // source, so they have an empty source range.
+ return SourceRange();
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXDefaultArgExprClass;
+ }
+ static bool classof(const CXXDefaultArgExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// CXXTemporary - Represents a C++ temporary.
+class CXXTemporary {
+ /// Destructor - The destructor that needs to be called.
+ const CXXDestructorDecl *Destructor;
+
+ CXXTemporary(const CXXDestructorDecl *destructor)
+ : Destructor(destructor) { }
+
+public:
+ static CXXTemporary *Create(ASTContext &C,
+ const CXXDestructorDecl *Destructor);
+
+ const CXXDestructorDecl *getDestructor() const { return Destructor; }
+ void setDestructor(const CXXDestructorDecl *Dtor) {
+ Destructor = Dtor;
+ }
+};
+
+/// \brief Represents binding an expression to a temporary.
+///
+/// This ensures the destructor is called for the temporary. It should only be
+/// needed for non-POD, non-trivially destructable class types. For example:
+///
+/// \code
+/// struct S {
+/// S() { } // User defined constructor makes S non-POD.
+/// ~S() { } // User defined destructor makes it non-trivial.
+/// };
+/// void test() {
+/// const S &s_ref = S(); // Requires a CXXBindTemporaryExpr.
+/// }
+/// \endcode
+class CXXBindTemporaryExpr : public Expr {
+ CXXTemporary *Temp;
+
+ Stmt *SubExpr;
+
+ CXXBindTemporaryExpr(CXXTemporary *temp, Expr* SubExpr)
+ : Expr(CXXBindTemporaryExprClass, SubExpr->getType(),
+ VK_RValue, OK_Ordinary, SubExpr->isTypeDependent(),
+ SubExpr->isValueDependent(),
+ SubExpr->isInstantiationDependent(),
+ SubExpr->containsUnexpandedParameterPack()),
+ Temp(temp), SubExpr(SubExpr) { }
+
+public:
+ CXXBindTemporaryExpr(EmptyShell Empty)
+ : Expr(CXXBindTemporaryExprClass, Empty), Temp(0), SubExpr(0) {}
+
+ static CXXBindTemporaryExpr *Create(ASTContext &C, CXXTemporary *Temp,
+ Expr* SubExpr);
+
+ CXXTemporary *getTemporary() { return Temp; }
+ const CXXTemporary *getTemporary() const { return Temp; }
+ void setTemporary(CXXTemporary *T) { Temp = T; }
+
+ const Expr *getSubExpr() const { return cast<Expr>(SubExpr); }
+ Expr *getSubExpr() { return cast<Expr>(SubExpr); }
+ void setSubExpr(Expr *E) { SubExpr = E; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SubExpr->getSourceRange();
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXBindTemporaryExprClass;
+ }
+ static bool classof(const CXXBindTemporaryExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&SubExpr, &SubExpr + 1); }
+};
+
+/// CXXConstructExpr - Represents a call to a C++ constructor.
+class CXXConstructExpr : public Expr {
+public:
+ enum ConstructionKind {
+ CK_Complete,
+ CK_NonVirtualBase,
+ CK_VirtualBase,
+ CK_Delegating
+ };
+
+private:
+ CXXConstructorDecl *Constructor;
+
+ SourceLocation Loc;
+ SourceRange ParenRange;
+ unsigned NumArgs : 16;
+ bool Elidable : 1;
+ bool HadMultipleCandidates : 1;
+ bool ListInitialization : 1;
+ bool ZeroInitialization : 1;
+ unsigned ConstructKind : 2;
+ Stmt **Args;
+
+protected:
+ CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *d, bool elidable,
+ Expr **args, unsigned numargs,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange);
+
+ /// \brief Construct an empty C++ construction expression.
+ CXXConstructExpr(StmtClass SC, EmptyShell Empty)
+ : Expr(SC, Empty), Constructor(0), NumArgs(0), Elidable(false),
+ HadMultipleCandidates(false), ListInitialization(false),
+ ZeroInitialization(false), ConstructKind(0), Args(0)
+ { }
+
+public:
+ /// \brief Construct an empty C++ construction expression.
+ explicit CXXConstructExpr(EmptyShell Empty)
+ : Expr(CXXConstructExprClass, Empty), Constructor(0),
+ NumArgs(0), Elidable(false), HadMultipleCandidates(false),
+ ListInitialization(false), ZeroInitialization(false),
+ ConstructKind(0), Args(0)
+ { }
+
+ static CXXConstructExpr *Create(ASTContext &C, QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *D, bool Elidable,
+ Expr **Args, unsigned NumArgs,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange);
+
+ CXXConstructorDecl* getConstructor() const { return Constructor; }
+ void setConstructor(CXXConstructorDecl *C) { Constructor = C; }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation Loc) { this->Loc = Loc; }
+
+ /// \brief Whether this construction is elidable.
+ bool isElidable() const { return Elidable; }
+ void setElidable(bool E) { Elidable = E; }
+
+ /// \brief Whether the referred constructor was resolved from
+ /// an overloaded set having size greater than 1.
+ bool hadMultipleCandidates() const { return HadMultipleCandidates; }
+ void setHadMultipleCandidates(bool V) { HadMultipleCandidates = V; }
+
+ /// \brief Whether this constructor call was written as list-initialization.
+ bool isListInitialization() const { return ListInitialization; }
+ void setListInitialization(bool V) { ListInitialization = V; }
+
+ /// \brief Whether this construction first requires
+ /// zero-initialization before the initializer is called.
+ bool requiresZeroInitialization() const { return ZeroInitialization; }
+ void setRequiresZeroInitialization(bool ZeroInit) {
+ ZeroInitialization = ZeroInit;
+ }
+
+ /// \brief Determines whether this constructor is actually constructing
+ /// a base class (rather than a complete object).
+ ConstructionKind getConstructionKind() const {
+ return (ConstructionKind)ConstructKind;
+ }
+ void setConstructionKind(ConstructionKind CK) {
+ ConstructKind = CK;
+ }
+
+ typedef ExprIterator arg_iterator;
+ typedef ConstExprIterator const_arg_iterator;
+
+ arg_iterator arg_begin() { return Args; }
+ arg_iterator arg_end() { return Args + NumArgs; }
+ const_arg_iterator arg_begin() const { return Args; }
+ const_arg_iterator arg_end() const { return Args + NumArgs; }
+
+ Expr **getArgs() const { return reinterpret_cast<Expr **>(Args); }
+ unsigned getNumArgs() const { return NumArgs; }
+
+ /// getArg - Return the specified argument.
+ Expr *getArg(unsigned Arg) {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ return cast<Expr>(Args[Arg]);
+ }
+ const Expr *getArg(unsigned Arg) const {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ return cast<Expr>(Args[Arg]);
+ }
+
+ /// setArg - Set the specified argument.
+ void setArg(unsigned Arg, Expr *ArgExpr) {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ Args[Arg] = ArgExpr;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceRange getParenRange() const { return ParenRange; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXConstructExprClass ||
+ T->getStmtClass() == CXXTemporaryObjectExprClass;
+ }
+ static bool classof(const CXXConstructExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&Args[0], &Args[0]+NumArgs);
+ }
+
+ friend class ASTStmtReader;
+};
+
+/// CXXFunctionalCastExpr - Represents an explicit C++ type conversion
+/// that uses "functional" notion (C++ [expr.type.conv]). Example: @c
+/// x = int(0.5);
+class CXXFunctionalCastExpr : public ExplicitCastExpr {
+ SourceLocation TyBeginLoc;
+ SourceLocation RParenLoc;
+
+ CXXFunctionalCastExpr(QualType ty, ExprValueKind VK,
+ TypeSourceInfo *writtenTy,
+ SourceLocation tyBeginLoc, CastKind kind,
+ Expr *castExpr, unsigned pathSize,
+ SourceLocation rParenLoc)
+ : ExplicitCastExpr(CXXFunctionalCastExprClass, ty, VK, kind,
+ castExpr, pathSize, writtenTy),
+ TyBeginLoc(tyBeginLoc), RParenLoc(rParenLoc) {}
+
+ explicit CXXFunctionalCastExpr(EmptyShell Shell, unsigned PathSize)
+ : ExplicitCastExpr(CXXFunctionalCastExprClass, Shell, PathSize) { }
+
+public:
+ static CXXFunctionalCastExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ TypeSourceInfo *Written,
+ SourceLocation TyBeginLoc,
+ CastKind Kind, Expr *Op,
+ const CXXCastPath *Path,
+ SourceLocation RPLoc);
+ static CXXFunctionalCastExpr *CreateEmpty(ASTContext &Context,
+ unsigned PathSize);
+
+ SourceLocation getTypeBeginLoc() const { return TyBeginLoc; }
+ void setTypeBeginLoc(SourceLocation L) { TyBeginLoc = L; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(TyBeginLoc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXFunctionalCastExprClass;
+ }
+ static bool classof(const CXXFunctionalCastExpr *) { return true; }
+};
+
+/// @brief Represents a C++ functional cast expression that builds a
+/// temporary object.
+///
+/// This expression type represents a C++ "functional" cast
+/// (C++[expr.type.conv]) with N != 1 arguments that invokes a
+/// constructor to build a temporary object. With N == 1 arguments the
+/// functional cast expression will be represented by CXXFunctionalCastExpr.
+/// Example:
+/// @code
+/// struct X { X(int, float); }
+///
+/// X create_X() {
+/// return X(1, 3.14f); // creates a CXXTemporaryObjectExpr
+/// };
+/// @endcode
+class CXXTemporaryObjectExpr : public CXXConstructExpr {
+ TypeSourceInfo *Type;
+
+public:
+ CXXTemporaryObjectExpr(ASTContext &C, CXXConstructorDecl *Cons,
+ TypeSourceInfo *Type,
+ Expr **Args,unsigned NumArgs,
+ SourceRange parenRange,
+ bool HadMultipleCandidates,
+ bool ZeroInitialization = false);
+ explicit CXXTemporaryObjectExpr(EmptyShell Empty)
+ : CXXConstructExpr(CXXTemporaryObjectExprClass, Empty), Type() { }
+
+ TypeSourceInfo *getTypeSourceInfo() const { return Type; }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXTemporaryObjectExprClass;
+ }
+ static bool classof(const CXXTemporaryObjectExpr *) { return true; }
+
+ friend class ASTStmtReader;
+};
+
+/// \brief A C++ lambda expression, which produces a function object
+/// (of unspecified type) that can be invoked later.
+///
+/// Example:
+/// \code
+/// void low_pass_filter(std::vector<double> &values, double cutoff) {
+/// values.erase(std::remove_if(values.begin(), values.end(),
+// [=](double value) { return value > cutoff; });
+/// }
+/// \endcode
+///
+/// Lambda expressions can capture local variables, either by copying
+/// the values of those local variables at the time the function
+/// object is constructed (not when it is called!) or by holding a
+/// reference to the local variable. These captures can occur either
+/// implicitly or can be written explicitly between the square
+/// brackets ([...]) that start the lambda expression.
+class LambdaExpr : public Expr {
+ enum {
+ /// \brief Flag used by the Capture class to indicate that the given
+ /// capture was implicit.
+ Capture_Implicit = 0x01,
+
+ /// \brief Flag used by the Capture class to indciate that the
+ /// given capture was by-copy.
+ Capture_ByCopy = 0x02
+ };
+
+ /// \brief The source range that covers the lambda introducer ([...]).
+ SourceRange IntroducerRange;
+
+ /// \brief The number of captures.
+ unsigned NumCaptures : 16;
+
+ /// \brief The default capture kind, which is a value of type
+ /// LambdaCaptureDefault.
+ unsigned CaptureDefault : 2;
+
+ /// \brief Whether this lambda had an explicit parameter list vs. an
+ /// implicit (and empty) parameter list.
+ unsigned ExplicitParams : 1;
+
+ /// \brief Whether this lambda had the result type explicitly specified.
+ unsigned ExplicitResultType : 1;
+
+ /// \brief Whether there are any array index variables stored at the end of
+ /// this lambda expression.
+ unsigned HasArrayIndexVars : 1;
+
+ /// \brief The location of the closing brace ('}') that completes
+ /// the lambda.
+ ///
+ /// The location of the brace is also available by looking up the
+ /// function call operator in the lambda class. However, it is
+ /// stored here to improve the performance of getSourceRange(), and
+ /// to avoid having to deserialize the function call operator from a
+ /// module file just to determine the source range.
+ SourceLocation ClosingBrace;
+
+ // Note: The capture initializers are stored directly after the lambda
+ // expression, along with the index variables used to initialize by-copy
+ // array captures.
+
+public:
+ /// \brief Describes the capture of either a variable or 'this'.
+ class Capture {
+ llvm::PointerIntPair<VarDecl *, 2> VarAndBits;
+ SourceLocation Loc;
+ SourceLocation EllipsisLoc;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+ public:
+ /// \brief Create a new capture.
+ ///
+ /// \param Loc The source location associated with this capture.
+ ///
+ /// \param Kind The kind of capture (this, byref, bycopy).
+ ///
+ /// \param Implicit Whether the capture was implicit or explicit.
+ ///
+ /// \param Var The local variable being captured, or null if capturing this.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis (...) for a
+ /// capture that is a pack expansion, or an invalid source
+ /// location to indicate that this is not a pack expansion.
+ Capture(SourceLocation Loc, bool Implicit,
+ LambdaCaptureKind Kind, VarDecl *Var = 0,
+ SourceLocation EllipsisLoc = SourceLocation());
+
+ /// \brief Determine the kind of capture.
+ LambdaCaptureKind getCaptureKind() const;
+
+ /// \brief Determine whether this capture handles the C++ 'this'
+ /// pointer.
+ bool capturesThis() const { return VarAndBits.getPointer() == 0; }
+
+ /// \brief Determine whether this capture handles a variable.
+ bool capturesVariable() const { return VarAndBits.getPointer() != 0; }
+
+ /// \brief Retrieve the declaration of the local variable being
+ /// captured.
+ ///
+ /// This operation is only valid if this capture does not capture
+ /// 'this'.
+ VarDecl *getCapturedVar() const {
+ assert(!capturesThis() && "No variable available for 'this' capture");
+ return VarAndBits.getPointer();
+ }
+
+ /// \brief Determine whether this was an implicit capture (not
+ /// written between the square brackets introducing the lambda).
+ bool isImplicit() const { return VarAndBits.getInt() & Capture_Implicit; }
+
+ /// \brief Determine whether this was an explicit capture, written
+ /// between the square brackets introducing the lambda.
+ bool isExplicit() const { return !isImplicit(); }
+
+ /// \brief Retrieve the source location of the capture.
+ ///
+ /// For an explicit capture, this returns the location of the
+ /// explicit capture in the source. For an implicit capture, this
+ /// returns the location at which the variable or 'this' was first
+ /// used.
+ SourceLocation getLocation() const { return Loc; }
+
+ /// \brief Determine whether this capture is a pack expansion,
+ /// which captures a function parameter pack.
+ bool isPackExpansion() const { return EllipsisLoc.isValid(); }
+
+ /// \brief Retrieve the location of the ellipsis for a capture
+ /// that is a pack expansion.
+ SourceLocation getEllipsisLoc() const {
+ assert(isPackExpansion() && "No ellipsis location for a non-expansion");
+ return EllipsisLoc;
+ }
+ };
+
+private:
+ /// \brief Construct a lambda expression.
+ LambdaExpr(QualType T, SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace);
+
+ /// \brief Construct an empty lambda expression.
+ LambdaExpr(EmptyShell Empty, unsigned NumCaptures, bool HasArrayIndexVars)
+ : Expr(LambdaExprClass, Empty),
+ NumCaptures(NumCaptures), CaptureDefault(LCD_None), ExplicitParams(false),
+ ExplicitResultType(false), HasArrayIndexVars(true) {
+ getStoredStmts()[NumCaptures] = 0;
+ }
+
+ Stmt **getStoredStmts() const {
+ return reinterpret_cast<Stmt **>(const_cast<LambdaExpr *>(this) + 1);
+ }
+
+ /// \brief Retrieve the mapping from captures to the first array index
+ /// variable.
+ unsigned *getArrayIndexStarts() const {
+ return reinterpret_cast<unsigned *>(getStoredStmts() + NumCaptures + 1);
+ }
+
+ /// \brief Retrieve the complete set of array-index variables.
+ VarDecl **getArrayIndexVars() const {
+ return reinterpret_cast<VarDecl **>(
+ getArrayIndexStarts() + NumCaptures + 1);
+ }
+
+public:
+ /// \brief Construct a new lambda expression.
+ static LambdaExpr *Create(ASTContext &C,
+ CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace);
+
+ /// \brief Construct a new lambda expression that will be deserialized from
+ /// an external source.
+ static LambdaExpr *CreateDeserialized(ASTContext &C, unsigned NumCaptures,
+ unsigned NumArrayIndexVars);
+
+ /// \brief Determine the default capture kind for this lambda.
+ LambdaCaptureDefault getCaptureDefault() const {
+ return static_cast<LambdaCaptureDefault>(CaptureDefault);
+ }
+
+ /// \brief An iterator that walks over the captures of the lambda,
+ /// both implicit and explicit.
+ typedef const Capture *capture_iterator;
+
+ /// \brief Retrieve an iterator pointing to the first lambda capture.
+ capture_iterator capture_begin() const;
+
+ /// \brief Retrieve an iterator pointing past the end of the
+ /// sequence of lambda captures.
+ capture_iterator capture_end() const;
+
+ /// \brief Determine the number of captures in this lambda.
+ unsigned capture_size() const { return NumCaptures; }
+
+ /// \brief Retrieve an iterator pointing to the first explicit
+ /// lambda capture.
+ capture_iterator explicit_capture_begin() const;
+
+ /// \brief Retrieve an iterator pointing past the end of the sequence of
+ /// explicit lambda captures.
+ capture_iterator explicit_capture_end() const;
+
+ /// \brief Retrieve an iterator pointing to the first implicit
+ /// lambda capture.
+ capture_iterator implicit_capture_begin() const;
+
+ /// \brief Retrieve an iterator pointing past the end of the sequence of
+ /// implicit lambda captures.
+ capture_iterator implicit_capture_end() const;
+
+ /// \brief Iterator that walks over the capture initialization
+ /// arguments.
+ typedef Expr **capture_init_iterator;
+
+ /// \brief Retrieve the first initialization argument for this
+ /// lambda expression (which initializes the first capture field).
+ capture_init_iterator capture_init_begin() const {
+ return reinterpret_cast<Expr **>(getStoredStmts());
+ }
+
+ /// \brief Retrieve the iterator pointing one past the last
+ /// initialization argument for this lambda expression.
+ capture_init_iterator capture_init_end() const {
+ return capture_init_begin() + NumCaptures;
+ }
+
+ /// \brief Retrieve the set of index variables used in the capture
+ /// initializer of an array captured by copy.
+ ///
+ /// \param Iter The iterator that points at the capture initializer for
+ /// which we are extracting the corresponding index variables.
+ ArrayRef<VarDecl *> getCaptureInitIndexVars(capture_init_iterator Iter) const;
+
+ /// \brief Retrieve the source range covering the lambda introducer,
+ /// which contains the explicit capture list surrounded by square
+ /// brackets ([...]).
+ SourceRange getIntroducerRange() const { return IntroducerRange; }
+
+ /// \brief Retrieve the class that corresponds to the lambda, which
+ /// stores the captures in its fields and provides the various
+ /// operations permitted on a lambda (copying, calling).
+ CXXRecordDecl *getLambdaClass() const;
+
+ /// \brief Retrieve the function call operator associated with this
+ /// lambda expression.
+ CXXMethodDecl *getCallOperator() const;
+
+ /// \brief Retrieve the body of the lambda.
+ CompoundStmt *getBody() const;
+
+ /// \brief Determine whether the lambda is mutable, meaning that any
+ /// captures values can be modified.
+ bool isMutable() const;
+
+ /// \brief Determine whether this lambda has an explicit parameter
+ /// list vs. an implicit (empty) parameter list.
+ bool hasExplicitParameters() const { return ExplicitParams; }
+
+ /// \brief Whether this lambda had its result type explicitly specified.
+ bool hasExplicitResultType() const { return ExplicitResultType; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == LambdaExprClass;
+ }
+ static bool classof(const LambdaExpr *) { return true; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(IntroducerRange.getBegin(), ClosingBrace);
+ }
+
+ child_range children() {
+ return child_range(getStoredStmts(), getStoredStmts() + NumCaptures + 1);
+ }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// CXXScalarValueInitExpr - [C++ 5.2.3p2]
+/// Expression "T()" which creates a value-initialized rvalue of type
+/// T, which is a non-class type.
+///
+class CXXScalarValueInitExpr : public Expr {
+ SourceLocation RParenLoc;
+ TypeSourceInfo *TypeInfo;
+
+ friend class ASTStmtReader;
+
+public:
+ /// \brief Create an explicitly-written scalar-value initialization
+ /// expression.
+ CXXScalarValueInitExpr(QualType Type,
+ TypeSourceInfo *TypeInfo,
+ SourceLocation rParenLoc ) :
+ Expr(CXXScalarValueInitExprClass, Type, VK_RValue, OK_Ordinary,
+ false, false, Type->isInstantiationDependentType(), false),
+ RParenLoc(rParenLoc), TypeInfo(TypeInfo) {}
+
+ explicit CXXScalarValueInitExpr(EmptyShell Shell)
+ : Expr(CXXScalarValueInitExprClass, Shell) { }
+
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return TypeInfo;
+ }
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXScalarValueInitExprClass;
+ }
+ static bool classof(const CXXScalarValueInitExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// CXXNewExpr - A new expression for memory allocation and constructor calls,
+/// e.g: "new CXXNewExpr(foo)".
+class CXXNewExpr : public Expr {
+ // Contains an optional array size expression, an optional initialization
+ // expression, and any number of optional placement arguments, in that order.
+ Stmt **SubExprs;
+ // Points to the allocation function used.
+ FunctionDecl *OperatorNew;
+ // Points to the deallocation function used in case of error. May be null.
+ FunctionDecl *OperatorDelete;
+
+ /// \brief The allocated type-source information, as written in the source.
+ TypeSourceInfo *AllocatedTypeInfo;
+
+ /// \brief If the allocated type was expressed as a parenthesized type-id,
+ /// the source range covering the parenthesized type-id.
+ SourceRange TypeIdParens;
+
+ /// \brief Location of the first token.
+ SourceLocation StartLoc;
+
+ /// \brief Source-range of a paren-delimited initializer.
+ SourceRange DirectInitRange;
+
+ // Was the usage ::new, i.e. is the global new to be used?
+ bool GlobalNew : 1;
+ // Do we allocate an array? If so, the first SubExpr is the size expression.
+ bool Array : 1;
+ // If this is an array allocation, does the usual deallocation
+ // function for the allocated type want to know the allocated size?
+ bool UsualArrayDeleteWantsSize : 1;
+ // The number of placement new arguments.
+ unsigned NumPlacementArgs : 13;
+ // What kind of initializer do we have? Could be none, parens, or braces.
+ // In storage, we distinguish between "none, and no initializer expr", and
+ // "none, but an implicit initializer expr".
+ unsigned StoredInitializationStyle : 2;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+public:
+ enum InitializationStyle {
+ NoInit, ///< New-expression has no initializer as written.
+ CallInit, ///< New-expression has a C++98 paren-delimited initializer.
+ ListInit ///< New-expression has a C++11 list-initializer.
+ };
+
+ CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
+ FunctionDecl *operatorDelete, bool usualArrayDeleteWantsSize,
+ Expr **placementArgs, unsigned numPlaceArgs,
+ SourceRange typeIdParens, Expr *arraySize,
+ InitializationStyle initializationStyle, Expr *initializer,
+ QualType ty, TypeSourceInfo *AllocatedTypeInfo,
+ SourceLocation startLoc, SourceRange directInitRange);
+ explicit CXXNewExpr(EmptyShell Shell)
+ : Expr(CXXNewExprClass, Shell), SubExprs(0) { }
+
+ void AllocateArgsArray(ASTContext &C, bool isArray, unsigned numPlaceArgs,
+ bool hasInitializer);
+
+ QualType getAllocatedType() const {
+ assert(getType()->isPointerType());
+ return getType()->getAs<PointerType>()->getPointeeType();
+ }
+
+ TypeSourceInfo *getAllocatedTypeSourceInfo() const {
+ return AllocatedTypeInfo;
+ }
+
+ /// \brief True if the allocation result needs to be null-checked.
+ /// C++0x [expr.new]p13:
+ /// If the allocation function returns null, initialization shall
+ /// not be done, the deallocation function shall not be called,
+ /// and the value of the new-expression shall be null.
+ /// An allocation function is not allowed to return null unless it
+ /// has a non-throwing exception-specification. The '03 rule is
+ /// identical except that the definition of a non-throwing
+ /// exception specification is just "is it throw()?".
+ bool shouldNullCheckAllocation(ASTContext &Ctx) const;
+
+ FunctionDecl *getOperatorNew() const { return OperatorNew; }
+ void setOperatorNew(FunctionDecl *D) { OperatorNew = D; }
+ FunctionDecl *getOperatorDelete() const { return OperatorDelete; }
+ void setOperatorDelete(FunctionDecl *D) { OperatorDelete = D; }
+
+ bool isArray() const { return Array; }
+ Expr *getArraySize() {
+ return Array ? cast<Expr>(SubExprs[0]) : 0;
+ }
+ const Expr *getArraySize() const {
+ return Array ? cast<Expr>(SubExprs[0]) : 0;
+ }
+
+ unsigned getNumPlacementArgs() const { return NumPlacementArgs; }
+ Expr **getPlacementArgs() {
+ return reinterpret_cast<Expr **>(SubExprs + Array + hasInitializer());
+ }
+
+ Expr *getPlacementArg(unsigned i) {
+ assert(i < NumPlacementArgs && "Index out of range");
+ return getPlacementArgs()[i];
+ }
+ const Expr *getPlacementArg(unsigned i) const {
+ assert(i < NumPlacementArgs && "Index out of range");
+ return const_cast<CXXNewExpr*>(this)->getPlacementArg(i);
+ }
+
+ bool isParenTypeId() const { return TypeIdParens.isValid(); }
+ SourceRange getTypeIdParens() const { return TypeIdParens; }
+
+ bool isGlobalNew() const { return GlobalNew; }
+
+ /// \brief Whether this new-expression has any initializer at all.
+ bool hasInitializer() const { return StoredInitializationStyle > 0; }
+
+ /// \brief The kind of initializer this new-expression has.
+ InitializationStyle getInitializationStyle() const {
+ if (StoredInitializationStyle == 0)
+ return NoInit;
+ return static_cast<InitializationStyle>(StoredInitializationStyle-1);
+ }
+
+ /// \brief The initializer of this new-expression.
+ Expr *getInitializer() {
+ return hasInitializer() ? cast<Expr>(SubExprs[Array]) : 0;
+ }
+ const Expr *getInitializer() const {
+ return hasInitializer() ? cast<Expr>(SubExprs[Array]) : 0;
+ }
+
+ /// \brief Returns the CXXConstructExpr from this new-expression, or NULL.
+ const CXXConstructExpr* getConstructExpr() {
+ return dyn_cast_or_null<CXXConstructExpr>(getInitializer());
+ }
+
+ /// Answers whether the usual array deallocation function for the
+ /// allocated type expects the size of the allocation as a
+ /// parameter.
+ bool doesUsualArrayDeleteWantSize() const {
+ return UsualArrayDeleteWantsSize;
+ }
+
+ typedef ExprIterator arg_iterator;
+ typedef ConstExprIterator const_arg_iterator;
+
+ arg_iterator placement_arg_begin() {
+ return SubExprs + Array + hasInitializer();
+ }
+ arg_iterator placement_arg_end() {
+ return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
+ }
+ const_arg_iterator placement_arg_begin() const {
+ return SubExprs + Array + hasInitializer();
+ }
+ const_arg_iterator placement_arg_end() const {
+ return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
+ }
+
+ typedef Stmt **raw_arg_iterator;
+ raw_arg_iterator raw_arg_begin() { return SubExprs; }
+ raw_arg_iterator raw_arg_end() {
+ return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
+ }
+ const_arg_iterator raw_arg_begin() const { return SubExprs; }
+ const_arg_iterator raw_arg_end() const {
+ return SubExprs + Array + hasInitializer() + getNumPlacementArgs();
+ }
+
+ SourceLocation getStartLoc() const { return StartLoc; }
+ SourceLocation getEndLoc() const;
+
+ SourceRange getDirectInitRange() const { return DirectInitRange; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getStartLoc(), getEndLoc());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXNewExprClass;
+ }
+ static bool classof(const CXXNewExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(raw_arg_begin(), raw_arg_end());
+ }
+};
+
+/// CXXDeleteExpr - A delete expression for memory deallocation and destructor
+/// calls, e.g. "delete[] pArray".
+class CXXDeleteExpr : public Expr {
+ // Points to the operator delete overload that is used. Could be a member.
+ FunctionDecl *OperatorDelete;
+ // The pointer expression to be deleted.
+ Stmt *Argument;
+ // Location of the expression.
+ SourceLocation Loc;
+ // Is this a forced global delete, i.e. "::delete"?
+ bool GlobalDelete : 1;
+ // Is this the array form of delete, i.e. "delete[]"?
+ bool ArrayForm : 1;
+ // ArrayFormAsWritten can be different from ArrayForm if 'delete' is applied
+ // to pointer-to-array type (ArrayFormAsWritten will be false while ArrayForm
+ // will be true).
+ bool ArrayFormAsWritten : 1;
+ // Does the usual deallocation function for the element type require
+ // a size_t argument?
+ bool UsualArrayDeleteWantsSize : 1;
+public:
+ CXXDeleteExpr(QualType ty, bool globalDelete, bool arrayForm,
+ bool arrayFormAsWritten, bool usualArrayDeleteWantsSize,
+ FunctionDecl *operatorDelete, Expr *arg, SourceLocation loc)
+ : Expr(CXXDeleteExprClass, ty, VK_RValue, OK_Ordinary, false, false,
+ arg->isInstantiationDependent(),
+ arg->containsUnexpandedParameterPack()),
+ OperatorDelete(operatorDelete), Argument(arg), Loc(loc),
+ GlobalDelete(globalDelete),
+ ArrayForm(arrayForm), ArrayFormAsWritten(arrayFormAsWritten),
+ UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) { }
+ explicit CXXDeleteExpr(EmptyShell Shell)
+ : Expr(CXXDeleteExprClass, Shell), OperatorDelete(0), Argument(0) { }
+
+ bool isGlobalDelete() const { return GlobalDelete; }
+ bool isArrayForm() const { return ArrayForm; }
+ bool isArrayFormAsWritten() const { return ArrayFormAsWritten; }
+
+ /// Answers whether the usual array deallocation function for the
+ /// allocated type expects the size of the allocation as a
+ /// parameter. This can be true even if the actual deallocation
+ /// function that we're using doesn't want a size.
+ bool doesUsualArrayDeleteWantSize() const {
+ return UsualArrayDeleteWantsSize;
+ }
+
+ FunctionDecl *getOperatorDelete() const { return OperatorDelete; }
+
+ Expr *getArgument() { return cast<Expr>(Argument); }
+ const Expr *getArgument() const { return cast<Expr>(Argument); }
+
+ /// \brief Retrieve the type being destroyed. If the type being
+ /// destroyed is a dependent type which may or may not be a pointer,
+ /// return an invalid type.
+ QualType getDestroyedType() const;
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(Loc, Argument->getLocEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXDeleteExprClass;
+ }
+ static bool classof(const CXXDeleteExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Argument, &Argument+1); }
+
+ friend class ASTStmtReader;
+};
+
+/// \brief Structure used to store the type being destroyed by a
+/// pseudo-destructor expression.
+class PseudoDestructorTypeStorage {
+ /// \brief Either the type source information or the name of the type, if
+ /// it couldn't be resolved due to type-dependence.
+ llvm::PointerUnion<TypeSourceInfo *, IdentifierInfo *> Type;
+
+ /// \brief The starting source location of the pseudo-destructor type.
+ SourceLocation Location;
+
+public:
+ PseudoDestructorTypeStorage() { }
+
+ PseudoDestructorTypeStorage(IdentifierInfo *II, SourceLocation Loc)
+ : Type(II), Location(Loc) { }
+
+ PseudoDestructorTypeStorage(TypeSourceInfo *Info);
+
+ TypeSourceInfo *getTypeSourceInfo() const {
+ return Type.dyn_cast<TypeSourceInfo *>();
+ }
+
+ IdentifierInfo *getIdentifier() const {
+ return Type.dyn_cast<IdentifierInfo *>();
+ }
+
+ SourceLocation getLocation() const { return Location; }
+};
+
+/// \brief Represents a C++ pseudo-destructor (C++ [expr.pseudo]).
+///
+/// A pseudo-destructor is an expression that looks like a member access to a
+/// destructor of a scalar type, except that scalar types don't have
+/// destructors. For example:
+///
+/// \code
+/// typedef int T;
+/// void f(int *p) {
+/// p->T::~T();
+/// }
+/// \endcode
+///
+/// Pseudo-destructors typically occur when instantiating templates such as:
+///
+/// \code
+/// template<typename T>
+/// void destroy(T* ptr) {
+/// ptr->T::~T();
+/// }
+/// \endcode
+///
+/// for scalar types. A pseudo-destructor expression has no run-time semantics
+/// beyond evaluating the base expression.
+class CXXPseudoDestructorExpr : public Expr {
+ /// \brief The base expression (that is being destroyed).
+ Stmt *Base;
+
+ /// \brief Whether the operator was an arrow ('->'); otherwise, it was a
+ /// period ('.').
+ bool IsArrow : 1;
+
+ /// \brief The location of the '.' or '->' operator.
+ SourceLocation OperatorLoc;
+
+ /// \brief The nested-name-specifier that follows the operator, if present.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// \brief The type that precedes the '::' in a qualified pseudo-destructor
+ /// expression.
+ TypeSourceInfo *ScopeType;
+
+ /// \brief The location of the '::' in a qualified pseudo-destructor
+ /// expression.
+ SourceLocation ColonColonLoc;
+
+ /// \brief The location of the '~'.
+ SourceLocation TildeLoc;
+
+ /// \brief The type being destroyed, or its name if we were unable to
+ /// resolve the name.
+ PseudoDestructorTypeStorage DestroyedType;
+
+ friend class ASTStmtReader;
+
+public:
+ CXXPseudoDestructorExpr(ASTContext &Context,
+ Expr *Base, bool isArrow, SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ TypeSourceInfo *ScopeType,
+ SourceLocation ColonColonLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage DestroyedType);
+
+ explicit CXXPseudoDestructorExpr(EmptyShell Shell)
+ : Expr(CXXPseudoDestructorExprClass, Shell),
+ Base(0), IsArrow(false), QualifierLoc(), ScopeType(0) { }
+
+ Expr *getBase() const { return cast<Expr>(Base); }
+
+ /// \brief Determines whether this member expression actually had
+ /// a C++ nested-name-specifier prior to the name of the member, e.g.,
+ /// x->Base::foo.
+ bool hasQualifier() const { return QualifierLoc; }
+
+ /// \brief Retrieves the nested-name-specifier that qualifies the type name,
+ /// with source-location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief If the member name was qualified, retrieves the
+ /// nested-name-specifier that precedes the member name. Otherwise, returns
+ /// NULL.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ /// \brief Determine whether this pseudo-destructor expression was written
+ /// using an '->' (otherwise, it used a '.').
+ bool isArrow() const { return IsArrow; }
+
+ /// \brief Retrieve the location of the '.' or '->' operator.
+ SourceLocation getOperatorLoc() const { return OperatorLoc; }
+
+ /// \brief Retrieve the scope type in a qualified pseudo-destructor
+ /// expression.
+ ///
+ /// Pseudo-destructor expressions can have extra qualification within them
+ /// that is not part of the nested-name-specifier, e.g., \c p->T::~T().
+ /// Here, if the object type of the expression is (or may be) a scalar type,
+ /// \p T may also be a scalar type and, therefore, cannot be part of a
+ /// nested-name-specifier. It is stored as the "scope type" of the pseudo-
+ /// destructor expression.
+ TypeSourceInfo *getScopeTypeInfo() const { return ScopeType; }
+
+ /// \brief Retrieve the location of the '::' in a qualified pseudo-destructor
+ /// expression.
+ SourceLocation getColonColonLoc() const { return ColonColonLoc; }
+
+ /// \brief Retrieve the location of the '~'.
+ SourceLocation getTildeLoc() const { return TildeLoc; }
+
+ /// \brief Retrieve the source location information for the type
+ /// being destroyed.
+ ///
+ /// This type-source information is available for non-dependent
+ /// pseudo-destructor expressions and some dependent pseudo-destructor
+ /// expressions. Returns NULL if we only have the identifier for a
+ /// dependent pseudo-destructor expression.
+ TypeSourceInfo *getDestroyedTypeInfo() const {
+ return DestroyedType.getTypeSourceInfo();
+ }
+
+ /// \brief In a dependent pseudo-destructor expression for which we do not
+ /// have full type information on the destroyed type, provides the name
+ /// of the destroyed type.
+ IdentifierInfo *getDestroyedTypeIdentifier() const {
+ return DestroyedType.getIdentifier();
+ }
+
+ /// \brief Retrieve the type being destroyed.
+ QualType getDestroyedType() const;
+
+ /// \brief Retrieve the starting location of the type being destroyed.
+ SourceLocation getDestroyedTypeLoc() const {
+ return DestroyedType.getLocation();
+ }
+
+ /// \brief Set the name of destroyed type for a dependent pseudo-destructor
+ /// expression.
+ void setDestroyedType(IdentifierInfo *II, SourceLocation Loc) {
+ DestroyedType = PseudoDestructorTypeStorage(II, Loc);
+ }
+
+ /// \brief Set the destroyed type.
+ void setDestroyedType(TypeSourceInfo *Info) {
+ DestroyedType = PseudoDestructorTypeStorage(Info);
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXPseudoDestructorExprClass;
+ }
+ static bool classof(const CXXPseudoDestructorExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Base, &Base + 1); }
+};
+
+/// UnaryTypeTraitExpr - A GCC or MS unary type trait, as used in the
+/// implementation of TR1/C++0x type trait templates.
+/// Example:
+/// __is_pod(int) == true
+/// __is_enum(std::string) == false
+class UnaryTypeTraitExpr : public Expr {
+ /// UTT - The trait. A UnaryTypeTrait enum in MSVC compat unsigned.
+ unsigned UTT : 31;
+ /// The value of the type trait. Unspecified if dependent.
+ bool Value : 1;
+
+ /// Loc - The location of the type trait keyword.
+ SourceLocation Loc;
+
+ /// RParen - The location of the closing paren.
+ SourceLocation RParen;
+
+ /// The type being queried.
+ TypeSourceInfo *QueriedType;
+
+public:
+ UnaryTypeTraitExpr(SourceLocation loc, UnaryTypeTrait utt,
+ TypeSourceInfo *queried, bool value,
+ SourceLocation rparen, QualType ty)
+ : Expr(UnaryTypeTraitExprClass, ty, VK_RValue, OK_Ordinary,
+ false, queried->getType()->isDependentType(),
+ queried->getType()->isInstantiationDependentType(),
+ queried->getType()->containsUnexpandedParameterPack()),
+ UTT(utt), Value(value), Loc(loc), RParen(rparen), QueriedType(queried) { }
+
+ explicit UnaryTypeTraitExpr(EmptyShell Empty)
+ : Expr(UnaryTypeTraitExprClass, Empty), UTT(0), Value(false),
+ QueriedType() { }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc, RParen);}
+
+ UnaryTypeTrait getTrait() const { return static_cast<UnaryTypeTrait>(UTT); }
+
+ QualType getQueriedType() const { return QueriedType->getType(); }
+
+ TypeSourceInfo *getQueriedTypeSourceInfo() const { return QueriedType; }
+
+ bool getValue() const { return Value; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == UnaryTypeTraitExprClass;
+ }
+ static bool classof(const UnaryTypeTraitExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+};
+
+/// BinaryTypeTraitExpr - A GCC or MS binary type trait, as used in the
+/// implementation of TR1/C++0x type trait templates.
+/// Example:
+/// __is_base_of(Base, Derived) == true
+class BinaryTypeTraitExpr : public Expr {
+ /// BTT - The trait. A BinaryTypeTrait enum in MSVC compat unsigned.
+ unsigned BTT : 8;
+
+ /// The value of the type trait. Unspecified if dependent.
+ bool Value : 1;
+
+ /// Loc - The location of the type trait keyword.
+ SourceLocation Loc;
+
+ /// RParen - The location of the closing paren.
+ SourceLocation RParen;
+
+ /// The lhs type being queried.
+ TypeSourceInfo *LhsType;
+
+ /// The rhs type being queried.
+ TypeSourceInfo *RhsType;
+
+public:
+ BinaryTypeTraitExpr(SourceLocation loc, BinaryTypeTrait btt,
+ TypeSourceInfo *lhsType, TypeSourceInfo *rhsType,
+ bool value, SourceLocation rparen, QualType ty)
+ : Expr(BinaryTypeTraitExprClass, ty, VK_RValue, OK_Ordinary, false,
+ lhsType->getType()->isDependentType() ||
+ rhsType->getType()->isDependentType(),
+ (lhsType->getType()->isInstantiationDependentType() ||
+ rhsType->getType()->isInstantiationDependentType()),
+ (lhsType->getType()->containsUnexpandedParameterPack() ||
+ rhsType->getType()->containsUnexpandedParameterPack())),
+ BTT(btt), Value(value), Loc(loc), RParen(rparen),
+ LhsType(lhsType), RhsType(rhsType) { }
+
+
+ explicit BinaryTypeTraitExpr(EmptyShell Empty)
+ : Expr(BinaryTypeTraitExprClass, Empty), BTT(0), Value(false),
+ LhsType(), RhsType() { }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(Loc, RParen);
+ }
+
+ BinaryTypeTrait getTrait() const {
+ return static_cast<BinaryTypeTrait>(BTT);
+ }
+
+ QualType getLhsType() const { return LhsType->getType(); }
+ QualType getRhsType() const { return RhsType->getType(); }
+
+ TypeSourceInfo *getLhsTypeSourceInfo() const { return LhsType; }
+ TypeSourceInfo *getRhsTypeSourceInfo() const { return RhsType; }
+
+ bool getValue() const { assert(!isTypeDependent()); return Value; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == BinaryTypeTraitExprClass;
+ }
+ static bool classof(const BinaryTypeTraitExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+};
+
+/// \brief A type trait used in the implementation of various C++11 and
+/// Library TR1 trait templates.
+///
+/// \code
+/// __is_trivially_constructible(vector<int>, int*, int*)
+/// \endcode
+class TypeTraitExpr : public Expr {
+ /// \brief The location of the type trait keyword.
+ SourceLocation Loc;
+
+ /// \brief The location of the closing parenthesis.
+ SourceLocation RParenLoc;
+
+ // Note: The TypeSourceInfos for the arguments are allocated after the
+ // TypeTraitExpr.
+
+ TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value);
+
+ TypeTraitExpr(EmptyShell Empty) : Expr(TypeTraitExprClass, Empty) { }
+
+ /// \brief Retrieve the argument types.
+ TypeSourceInfo **getTypeSourceInfos() {
+ return reinterpret_cast<TypeSourceInfo **>(this+1);
+ }
+
+ /// \brief Retrieve the argument types.
+ TypeSourceInfo * const *getTypeSourceInfos() const {
+ return reinterpret_cast<TypeSourceInfo * const*>(this+1);
+ }
+
+public:
+ /// \brief Create a new type trait expression.
+ static TypeTraitExpr *Create(ASTContext &C, QualType T, SourceLocation Loc,
+ TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value);
+
+ static TypeTraitExpr *CreateDeserialized(ASTContext &C, unsigned NumArgs);
+
+ /// \brief Determine which type trait this expression uses.
+ TypeTrait getTrait() const {
+ return static_cast<TypeTrait>(TypeTraitExprBits.Kind);
+ }
+
+ bool getValue() const {
+ assert(!isValueDependent());
+ return TypeTraitExprBits.Value;
+ }
+
+ /// \brief Determine the number of arguments to this type trait.
+ unsigned getNumArgs() const { return TypeTraitExprBits.NumArgs; }
+
+ /// \brief Retrieve the Ith argument.
+ TypeSourceInfo *getArg(unsigned I) const {
+ assert(I < getNumArgs() && "Argument out-of-range");
+ return getArgs()[I];
+ }
+
+ /// \brief Retrieve the argument types.
+ ArrayRef<TypeSourceInfo *> getArgs() const {
+ return ArrayRef<TypeSourceInfo *>(getTypeSourceInfos(), getNumArgs());
+ }
+
+ typedef TypeSourceInfo **arg_iterator;
+ arg_iterator arg_begin() {
+ return getTypeSourceInfos();
+ }
+ arg_iterator arg_end() {
+ return getTypeSourceInfos() + getNumArgs();
+ }
+
+ typedef TypeSourceInfo const * const *arg_const_iterator;
+ arg_const_iterator arg_begin() const { return getTypeSourceInfos(); }
+ arg_const_iterator arg_end() const {
+ return getTypeSourceInfos() + getNumArgs();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc, RParenLoc); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == TypeTraitExprClass;
+ }
+ static bool classof(const TypeTraitExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+};
+
+/// ArrayTypeTraitExpr - An Embarcadero array type trait, as used in the
+/// implementation of __array_rank and __array_extent.
+/// Example:
+/// __array_rank(int[10][20]) == 2
+/// __array_extent(int, 1) == 20
+class ArrayTypeTraitExpr : public Expr {
+ virtual void anchor();
+
+ /// ATT - The trait. An ArrayTypeTrait enum in MSVC compat unsigned.
+ unsigned ATT : 2;
+
+ /// The value of the type trait. Unspecified if dependent.
+ uint64_t Value;
+
+ /// The array dimension being queried, or -1 if not used
+ Expr *Dimension;
+
+ /// Loc - The location of the type trait keyword.
+ SourceLocation Loc;
+
+ /// RParen - The location of the closing paren.
+ SourceLocation RParen;
+
+ /// The type being queried.
+ TypeSourceInfo *QueriedType;
+
+public:
+ ArrayTypeTraitExpr(SourceLocation loc, ArrayTypeTrait att,
+ TypeSourceInfo *queried, uint64_t value,
+ Expr *dimension, SourceLocation rparen, QualType ty)
+ : Expr(ArrayTypeTraitExprClass, ty, VK_RValue, OK_Ordinary,
+ false, queried->getType()->isDependentType(),
+ (queried->getType()->isInstantiationDependentType() ||
+ (dimension && dimension->isInstantiationDependent())),
+ queried->getType()->containsUnexpandedParameterPack()),
+ ATT(att), Value(value), Dimension(dimension),
+ Loc(loc), RParen(rparen), QueriedType(queried) { }
+
+
+ explicit ArrayTypeTraitExpr(EmptyShell Empty)
+ : Expr(ArrayTypeTraitExprClass, Empty), ATT(0), Value(false),
+ QueriedType() { }
+
+ virtual ~ArrayTypeTraitExpr() { }
+
+ virtual SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(Loc, RParen);
+ }
+
+ ArrayTypeTrait getTrait() const { return static_cast<ArrayTypeTrait>(ATT); }
+
+ QualType getQueriedType() const { return QueriedType->getType(); }
+
+ TypeSourceInfo *getQueriedTypeSourceInfo() const { return QueriedType; }
+
+ uint64_t getValue() const { assert(!isTypeDependent()); return Value; }
+
+ Expr *getDimensionExpression() const { return Dimension; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ArrayTypeTraitExprClass;
+ }
+ static bool classof(const ArrayTypeTraitExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+};
+
+/// ExpressionTraitExpr - An expression trait intrinsic
+/// Example:
+/// __is_lvalue_expr(std::cout) == true
+/// __is_lvalue_expr(1) == false
+class ExpressionTraitExpr : public Expr {
+ /// ET - The trait. A ExpressionTrait enum in MSVC compat unsigned.
+ unsigned ET : 31;
+ /// The value of the type trait. Unspecified if dependent.
+ bool Value : 1;
+
+ /// Loc - The location of the type trait keyword.
+ SourceLocation Loc;
+
+ /// RParen - The location of the closing paren.
+ SourceLocation RParen;
+
+ Expr* QueriedExpression;
+public:
+ ExpressionTraitExpr(SourceLocation loc, ExpressionTrait et,
+ Expr *queried, bool value,
+ SourceLocation rparen, QualType resultType)
+ : Expr(ExpressionTraitExprClass, resultType, VK_RValue, OK_Ordinary,
+ false, // Not type-dependent
+ // Value-dependent if the argument is type-dependent.
+ queried->isTypeDependent(),
+ queried->isInstantiationDependent(),
+ queried->containsUnexpandedParameterPack()),
+ ET(et), Value(value), Loc(loc), RParen(rparen),
+ QueriedExpression(queried) { }
+
+ explicit ExpressionTraitExpr(EmptyShell Empty)
+ : Expr(ExpressionTraitExprClass, Empty), ET(0), Value(false),
+ QueriedExpression() { }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc, RParen);}
+
+ ExpressionTrait getTrait() const { return static_cast<ExpressionTrait>(ET); }
+
+ Expr *getQueriedExpression() const { return QueriedExpression; }
+
+ bool getValue() const { return Value; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ExpressionTraitExprClass;
+ }
+ static bool classof(const ExpressionTraitExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+};
+
+
+/// \brief A reference to an overloaded function set, either an
+/// \t UnresolvedLookupExpr or an \t UnresolvedMemberExpr.
+class OverloadExpr : public Expr {
+ /// The common name of these declarations.
+ DeclarationNameInfo NameInfo;
+
+ /// \brief The nested-name-specifier that qualifies the name, if any.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// The results. These are undesugared, which is to say, they may
+ /// include UsingShadowDecls. Access is relative to the naming
+ /// class.
+ // FIXME: Allocate this data after the OverloadExpr subclass.
+ DeclAccessPair *Results;
+ unsigned NumResults;
+
+protected:
+ /// \brief Whether the name includes info for explicit template
+ /// keyword and arguments.
+ bool HasTemplateKWAndArgsInfo;
+
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo(); // defined far below.
+
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<OverloadExpr*>(this)->getTemplateKWAndArgsInfo();
+ }
+
+ OverloadExpr(StmtClass K, ASTContext &C,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin, UnresolvedSetIterator End,
+ bool KnownDependent,
+ bool KnownInstantiationDependent,
+ bool KnownContainsUnexpandedParameterPack);
+
+ OverloadExpr(StmtClass K, EmptyShell Empty)
+ : Expr(K, Empty), QualifierLoc(), Results(0), NumResults(0),
+ HasTemplateKWAndArgsInfo(false) { }
+
+ void initializeResults(ASTContext &C,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End);
+
+public:
+ struct FindResult {
+ OverloadExpr *Expression;
+ bool IsAddressOfOperand;
+ bool HasFormOfMemberPointer;
+ };
+
+ /// Finds the overloaded expression in the given expression of
+ /// OverloadTy.
+ ///
+ /// \return the expression (which must be there) and true if it has
+ /// the particular form of a member pointer expression
+ static FindResult find(Expr *E) {
+ assert(E->getType()->isSpecificBuiltinType(BuiltinType::Overload));
+
+ FindResult Result;
+
+ E = E->IgnoreParens();
+ if (isa<UnaryOperator>(E)) {
+ assert(cast<UnaryOperator>(E)->getOpcode() == UO_AddrOf);
+ E = cast<UnaryOperator>(E)->getSubExpr();
+ OverloadExpr *Ovl = cast<OverloadExpr>(E->IgnoreParens());
+
+ Result.HasFormOfMemberPointer = (E == Ovl && Ovl->getQualifier());
+ Result.IsAddressOfOperand = true;
+ Result.Expression = Ovl;
+ } else {
+ Result.HasFormOfMemberPointer = false;
+ Result.IsAddressOfOperand = false;
+ Result.Expression = cast<OverloadExpr>(E);
+ }
+
+ return Result;
+ }
+
+ /// Gets the naming class of this lookup, if any.
+ CXXRecordDecl *getNamingClass() const;
+
+ typedef UnresolvedSetImpl::iterator decls_iterator;
+ decls_iterator decls_begin() const { return UnresolvedSetIterator(Results); }
+ decls_iterator decls_end() const {
+ return UnresolvedSetIterator(Results + NumResults);
+ }
+
+ /// Gets the number of declarations in the unresolved set.
+ unsigned getNumDecls() const { return NumResults; }
+
+ /// Gets the full name info.
+ const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
+
+ /// Gets the name looked up.
+ DeclarationName getName() const { return NameInfo.getName(); }
+
+ /// Gets the location of the name.
+ SourceLocation getNameLoc() const { return NameInfo.getLoc(); }
+
+ /// Fetches the nested-name qualifier, if one was given.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ /// Fetches the nested-name qualifier with source-location information, if
+ /// one was given.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief Retrieve the location of the template keyword preceding
+ /// this name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// Determines whether the name was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
+ /// Determines whether this expression had explicit template arguments.
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
+
+ // Note that, inconsistently with the explicit-template-argument AST
+ // nodes, users are *forbidden* from calling these methods on objects
+ // without explicit template arguments.
+
+ ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
+ assert(hasExplicitTemplateArgs());
+ return *getTemplateKWAndArgsInfo();
+ }
+
+ const ASTTemplateArgumentListInfo &getExplicitTemplateArgs() const {
+ return const_cast<OverloadExpr*>(this)->getExplicitTemplateArgs();
+ }
+
+ TemplateArgumentLoc const *getTemplateArgs() const {
+ return getExplicitTemplateArgs().getTemplateArgs();
+ }
+
+ unsigned getNumTemplateArgs() const {
+ return getExplicitTemplateArgs().NumTemplateArgs;
+ }
+
+ /// Copies the template arguments into the given structure.
+ void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
+ getExplicitTemplateArgs().copyInto(List);
+ }
+
+ /// \brief Retrieves the optional explicit template arguments.
+ /// This points to the same data as getExplicitTemplateArgs(), but
+ /// returns null if there are no explicit template arguments.
+ const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() {
+ if (!hasExplicitTemplateArgs()) return 0;
+ return &getExplicitTemplateArgs();
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == UnresolvedLookupExprClass ||
+ T->getStmtClass() == UnresolvedMemberExprClass;
+ }
+ static bool classof(const OverloadExpr *) { return true; }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// \brief A reference to a name which we were able to look up during
+/// parsing but could not resolve to a specific declaration. This
+/// arises in several ways:
+/// * we might be waiting for argument-dependent lookup
+/// * the name might resolve to an overloaded function
+/// and eventually:
+/// * the lookup might have included a function template
+/// These never include UnresolvedUsingValueDecls, which are always
+/// class members and therefore appear only in
+/// UnresolvedMemberLookupExprs.
+class UnresolvedLookupExpr : public OverloadExpr {
+ /// True if these lookup results should be extended by
+ /// argument-dependent lookup if this is the operand of a function
+ /// call.
+ bool RequiresADL;
+
+ /// True if namespace ::std should be considered an associated namespace
+ /// for the purposes of argument-dependent lookup. See C++0x [stmt.ranged]p1.
+ bool StdIsAssociatedNamespace;
+
+ /// True if these lookup results are overloaded. This is pretty
+ /// trivially rederivable if we urgently need to kill this field.
+ bool Overloaded;
+
+ /// The naming class (C++ [class.access.base]p5) of the lookup, if
+ /// any. This can generally be recalculated from the context chain,
+ /// but that can be fairly expensive for unqualified lookups. If we
+ /// want to improve memory use here, this could go in a union
+ /// against the qualified-lookup bits.
+ CXXRecordDecl *NamingClass;
+
+ UnresolvedLookupExpr(ASTContext &C,
+ CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool RequiresADL, bool Overloaded,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin, UnresolvedSetIterator End,
+ bool StdIsAssociatedNamespace)
+ : OverloadExpr(UnresolvedLookupExprClass, C, QualifierLoc, TemplateKWLoc,
+ NameInfo, TemplateArgs, Begin, End, false, false, false),
+ RequiresADL(RequiresADL),
+ StdIsAssociatedNamespace(StdIsAssociatedNamespace),
+ Overloaded(Overloaded), NamingClass(NamingClass)
+ {}
+
+ UnresolvedLookupExpr(EmptyShell Empty)
+ : OverloadExpr(UnresolvedLookupExprClass, Empty),
+ RequiresADL(false), StdIsAssociatedNamespace(false), Overloaded(false),
+ NamingClass(0)
+ {}
+
+ friend class ASTStmtReader;
+
+public:
+ static UnresolvedLookupExpr *Create(ASTContext &C,
+ CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool ADL, bool Overloaded,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End,
+ bool StdIsAssociatedNamespace = false) {
+ assert((ADL || !StdIsAssociatedNamespace) &&
+ "std considered associated namespace when not performing ADL");
+ return new(C) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
+ SourceLocation(), NameInfo,
+ ADL, Overloaded, 0, Begin, End,
+ StdIsAssociatedNamespace);
+ }
+
+ static UnresolvedLookupExpr *Create(ASTContext &C,
+ CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool ADL,
+ const TemplateArgumentListInfo *Args,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End);
+
+ static UnresolvedLookupExpr *CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs);
+
+ /// True if this declaration should be extended by
+ /// argument-dependent lookup.
+ bool requiresADL() const { return RequiresADL; }
+
+ /// True if namespace ::std should be artificially added to the set of
+ /// associated namespaecs for argument-dependent lookup purposes.
+ bool isStdAssociatedNamespace() const { return StdIsAssociatedNamespace; }
+
+ /// True if this lookup is overloaded.
+ bool isOverloaded() const { return Overloaded; }
+
+ /// Gets the 'naming class' (in the sense of C++0x
+ /// [class.access.base]p5) of the lookup. This is the scope
+ /// that was looked in to find these results.
+ CXXRecordDecl *getNamingClass() const { return NamingClass; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ SourceRange Range(getNameInfo().getSourceRange());
+ if (getQualifierLoc())
+ Range.setBegin(getQualifierLoc().getBeginLoc());
+ if (hasExplicitTemplateArgs())
+ Range.setEnd(getRAngleLoc());
+ return Range;
+ }
+
+ child_range children() { return child_range(); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == UnresolvedLookupExprClass;
+ }
+ static bool classof(const UnresolvedLookupExpr *) { return true; }
+};
+
+/// \brief A qualified reference to a name whose declaration cannot
+/// yet be resolved.
+///
+/// DependentScopeDeclRefExpr is similar to DeclRefExpr in that
+/// it expresses a reference to a declaration such as
+/// X<T>::value. The difference, however, is that an
+/// DependentScopeDeclRefExpr node is used only within C++ templates when
+/// the qualification (e.g., X<T>::) refers to a dependent type. In
+/// this case, X<T>::value cannot resolve to a declaration because the
+/// declaration will differ from on instantiation of X<T> to the
+/// next. Therefore, DependentScopeDeclRefExpr keeps track of the
+/// qualifier (X<T>::) and the name of the entity being referenced
+/// ("value"). Such expressions will instantiate to a DeclRefExpr once the
+/// declaration can be found.
+class DependentScopeDeclRefExpr : public Expr {
+ /// \brief The nested-name-specifier that qualifies this unresolved
+ /// declaration name.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// The name of the entity we will be referencing.
+ DeclarationNameInfo NameInfo;
+
+ /// \brief Whether the name includes info for explicit template
+ /// keyword and arguments.
+ bool HasTemplateKWAndArgsInfo;
+
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() {
+ if (!HasTemplateKWAndArgsInfo) return 0;
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo*>(this + 1);
+ }
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<DependentScopeDeclRefExpr*>(this)
+ ->getTemplateKWAndArgsInfo();
+ }
+
+ DependentScopeDeclRefExpr(QualType T,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args);
+
+public:
+ static DependentScopeDeclRefExpr *Create(ASTContext &C,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+ static DependentScopeDeclRefExpr *CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs);
+
+ /// \brief Retrieve the name that this expression refers to.
+ const DeclarationNameInfo &getNameInfo() const { return NameInfo; }
+
+ /// \brief Retrieve the name that this expression refers to.
+ DeclarationName getDeclName() const { return NameInfo.getName(); }
+
+ /// \brief Retrieve the location of the name within the expression.
+ SourceLocation getLocation() const { return NameInfo.getLoc(); }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the
+ /// name, with source location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+
+ /// \brief Retrieve the nested-name-specifier that qualifies this
+ /// declaration.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ /// \brief Retrieve the location of the template keyword preceding
+ /// this name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// Determines whether the name was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
+ /// Determines whether this lookup had explicit template arguments.
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
+
+ // Note that, inconsistently with the explicit-template-argument AST
+ // nodes, users are *forbidden* from calling these methods on objects
+ // without explicit template arguments.
+
+ ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
+ assert(hasExplicitTemplateArgs());
+ return *reinterpret_cast<ASTTemplateArgumentListInfo*>(this + 1);
+ }
+
+ /// Gets a reference to the explicit template argument list.
+ const ASTTemplateArgumentListInfo &getExplicitTemplateArgs() const {
+ assert(hasExplicitTemplateArgs());
+ return *reinterpret_cast<const ASTTemplateArgumentListInfo*>(this + 1);
+ }
+
+ /// \brief Retrieves the optional explicit template arguments.
+ /// This points to the same data as getExplicitTemplateArgs(), but
+ /// returns null if there are no explicit template arguments.
+ const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() {
+ if (!hasExplicitTemplateArgs()) return 0;
+ return &getExplicitTemplateArgs();
+ }
+
+ /// \brief Copies the template arguments (if present) into the given
+ /// structure.
+ void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
+ getExplicitTemplateArgs().copyInto(List);
+ }
+
+ TemplateArgumentLoc const *getTemplateArgs() const {
+ return getExplicitTemplateArgs().getTemplateArgs();
+ }
+
+ unsigned getNumTemplateArgs() const {
+ return getExplicitTemplateArgs().NumTemplateArgs;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ SourceRange Range(QualifierLoc.getBeginLoc(), getLocation());
+ if (hasExplicitTemplateArgs())
+ Range.setEnd(getRAngleLoc());
+ return Range;
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == DependentScopeDeclRefExprClass;
+ }
+ static bool classof(const DependentScopeDeclRefExpr *) { return true; }
+
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// Represents an expression --- generally a full-expression --- which
+/// introduces cleanups to be run at the end of the sub-expression's
+/// evaluation. The most common source of expression-introduced
+/// cleanups is temporary objects in C++, but several other kinds of
+/// expressions can create cleanups, including basically every
+/// call in ARC that returns an Objective-C pointer.
+///
+/// This expression also tracks whether the sub-expression contains a
+/// potentially-evaluated block literal. The lifetime of a block
+/// literal is the extent of the enclosing scope.
+class ExprWithCleanups : public Expr {
+public:
+ /// The type of objects that are kept in the cleanup.
+ /// It's useful to remember the set of blocks; we could also
+ /// remember the set of temporaries, but there's currently
+ /// no need.
+ typedef BlockDecl *CleanupObject;
+
+private:
+ Stmt *SubExpr;
+
+ ExprWithCleanups(EmptyShell, unsigned NumObjects);
+ ExprWithCleanups(Expr *SubExpr, ArrayRef<CleanupObject> Objects);
+
+ CleanupObject *getObjectsBuffer() {
+ return reinterpret_cast<CleanupObject*>(this + 1);
+ }
+ const CleanupObject *getObjectsBuffer() const {
+ return reinterpret_cast<const CleanupObject*>(this + 1);
+ }
+ friend class ASTStmtReader;
+
+public:
+ static ExprWithCleanups *Create(ASTContext &C, EmptyShell empty,
+ unsigned numObjects);
+
+ static ExprWithCleanups *Create(ASTContext &C, Expr *subexpr,
+ ArrayRef<CleanupObject> objects);
+
+ ArrayRef<CleanupObject> getObjects() const {
+ return ArrayRef<CleanupObject>(getObjectsBuffer(), getNumObjects());
+ }
+
+ unsigned getNumObjects() const { return ExprWithCleanupsBits.NumObjects; }
+
+ CleanupObject getObject(unsigned i) const {
+ assert(i < getNumObjects() && "Index out of range");
+ return getObjects()[i];
+ }
+
+ Expr *getSubExpr() { return cast<Expr>(SubExpr); }
+ const Expr *getSubExpr() const { return cast<Expr>(SubExpr); }
+
+ /// setSubExpr - As with any mutator of the AST, be very careful
+ /// when modifying an existing AST to preserve its invariants.
+ void setSubExpr(Expr *E) { SubExpr = E; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SubExpr->getSourceRange();
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ExprWithCleanupsClass;
+ }
+ static bool classof(const ExprWithCleanups *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&SubExpr, &SubExpr + 1); }
+};
+
+/// \brief Describes an explicit type conversion that uses functional
+/// notion but could not be resolved because one or more arguments are
+/// type-dependent.
+///
+/// The explicit type conversions expressed by
+/// CXXUnresolvedConstructExpr have the form \c T(a1, a2, ..., aN),
+/// where \c T is some type and \c a1, a2, ..., aN are values, and
+/// either \C T is a dependent type or one or more of the \c a's is
+/// type-dependent. For example, this would occur in a template such
+/// as:
+///
+/// \code
+/// template<typename T, typename A1>
+/// inline T make_a(const A1& a1) {
+/// return T(a1);
+/// }
+/// \endcode
+///
+/// When the returned expression is instantiated, it may resolve to a
+/// constructor call, conversion function call, or some kind of type
+/// conversion.
+class CXXUnresolvedConstructExpr : public Expr {
+ /// \brief The type being constructed.
+ TypeSourceInfo *Type;
+
+ /// \brief The location of the left parentheses ('(').
+ SourceLocation LParenLoc;
+
+ /// \brief The location of the right parentheses (')').
+ SourceLocation RParenLoc;
+
+ /// \brief The number of arguments used to construct the type.
+ unsigned NumArgs;
+
+ CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation RParenLoc);
+
+ CXXUnresolvedConstructExpr(EmptyShell Empty, unsigned NumArgs)
+ : Expr(CXXUnresolvedConstructExprClass, Empty), Type(), NumArgs(NumArgs) { }
+
+ friend class ASTStmtReader;
+
+public:
+ static CXXUnresolvedConstructExpr *Create(ASTContext &C,
+ TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation RParenLoc);
+
+ static CXXUnresolvedConstructExpr *CreateEmpty(ASTContext &C,
+ unsigned NumArgs);
+
+ /// \brief Retrieve the type that is being constructed, as specified
+ /// in the source code.
+ QualType getTypeAsWritten() const { return Type->getType(); }
+
+ /// \brief Retrieve the type source information for the type being
+ /// constructed.
+ TypeSourceInfo *getTypeSourceInfo() const { return Type; }
+
+ /// \brief Retrieve the location of the left parentheses ('(') that
+ /// precedes the argument list.
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ void setLParenLoc(SourceLocation L) { LParenLoc = L; }
+
+ /// \brief Retrieve the location of the right parentheses (')') that
+ /// follows the argument list.
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ /// \brief Retrieve the number of arguments.
+ unsigned arg_size() const { return NumArgs; }
+
+ typedef Expr** arg_iterator;
+ arg_iterator arg_begin() { return reinterpret_cast<Expr**>(this + 1); }
+ arg_iterator arg_end() { return arg_begin() + NumArgs; }
+
+ typedef const Expr* const * const_arg_iterator;
+ const_arg_iterator arg_begin() const {
+ return reinterpret_cast<const Expr* const *>(this + 1);
+ }
+ const_arg_iterator arg_end() const {
+ return arg_begin() + NumArgs;
+ }
+
+ Expr *getArg(unsigned I) {
+ assert(I < NumArgs && "Argument index out-of-range");
+ return *(arg_begin() + I);
+ }
+
+ const Expr *getArg(unsigned I) const {
+ assert(I < NumArgs && "Argument index out-of-range");
+ return *(arg_begin() + I);
+ }
+
+ void setArg(unsigned I, Expr *E) {
+ assert(I < NumArgs && "Argument index out-of-range");
+ *(arg_begin() + I) = E;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXUnresolvedConstructExprClass;
+ }
+ static bool classof(const CXXUnresolvedConstructExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ Stmt **begin = reinterpret_cast<Stmt**>(this+1);
+ return child_range(begin, begin + NumArgs);
+ }
+};
+
+/// \brief Represents a C++ member access expression where the actual
+/// member referenced could not be resolved because the base
+/// expression or the member name was dependent.
+///
+/// Like UnresolvedMemberExprs, these can be either implicit or
+/// explicit accesses. It is only possible to get one of these with
+/// an implicit access if a qualifier is provided.
+class CXXDependentScopeMemberExpr : public Expr {
+ /// \brief The expression for the base pointer or class reference,
+ /// e.g., the \c x in x.f. Can be null in implicit accesses.
+ Stmt *Base;
+
+ /// \brief The type of the base expression. Never null, even for
+ /// implicit accesses.
+ QualType BaseType;
+
+ /// \brief Whether this member expression used the '->' operator or
+ /// the '.' operator.
+ bool IsArrow : 1;
+
+ /// \brief Whether this member expression has info for explicit template
+ /// keyword and arguments.
+ bool HasTemplateKWAndArgsInfo : 1;
+
+ /// \brief The location of the '->' or '.' operator.
+ SourceLocation OperatorLoc;
+
+ /// \brief The nested-name-specifier that precedes the member name, if any.
+ NestedNameSpecifierLoc QualifierLoc;
+
+ /// \brief In a qualified member access expression such as t->Base::f, this
+ /// member stores the resolves of name lookup in the context of the member
+ /// access expression, to be used at instantiation time.
+ ///
+ /// FIXME: This member, along with the QualifierLoc, could
+ /// be stuck into a structure that is optionally allocated at the end of
+ /// the CXXDependentScopeMemberExpr, to save space in the common case.
+ NamedDecl *FirstQualifierFoundInScope;
+
+ /// \brief The member to which this member expression refers, which
+ /// can be name, overloaded operator, or destructor.
+ /// FIXME: could also be a template-id
+ DeclarationNameInfo MemberNameInfo;
+
+ /// \brief Return the optional template keyword and arguments info.
+ ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() {
+ if (!HasTemplateKWAndArgsInfo) return 0;
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo*>(this + 1);
+ }
+ /// \brief Return the optional template keyword and arguments info.
+ const ASTTemplateKWAndArgsInfo *getTemplateKWAndArgsInfo() const {
+ return const_cast<CXXDependentScopeMemberExpr*>(this)
+ ->getTemplateKWAndArgsInfo();
+ }
+
+ CXXDependentScopeMemberExpr(ASTContext &C,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+public:
+ CXXDependentScopeMemberExpr(ASTContext &C,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo);
+
+ static CXXDependentScopeMemberExpr *
+ Create(ASTContext &C,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+ static CXXDependentScopeMemberExpr *
+ CreateEmpty(ASTContext &C, bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs);
+
+ /// \brief True if this is an implicit access, i.e. one in which the
+ /// member being accessed was not written in the source. The source
+ /// location of the operator is invalid in this case.
+ bool isImplicitAccess() const;
+
+ /// \brief Retrieve the base object of this member expressions,
+ /// e.g., the \c x in \c x.m.
+ Expr *getBase() const {
+ assert(!isImplicitAccess());
+ return cast<Expr>(Base);
+ }
+
+ QualType getBaseType() const { return BaseType; }
+
+ /// \brief Determine whether this member expression used the '->'
+ /// operator; otherwise, it used the '.' operator.
+ bool isArrow() const { return IsArrow; }
+
+ /// \brief Retrieve the location of the '->' or '.' operator.
+ SourceLocation getOperatorLoc() const { return OperatorLoc; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the member
+ /// name.
+ NestedNameSpecifier *getQualifier() const {
+ return QualifierLoc.getNestedNameSpecifier();
+ }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies the member
+ /// name, with source location information.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+
+ /// \brief Retrieve the first part of the nested-name-specifier that was
+ /// found in the scope of the member access expression when the member access
+ /// was initially parsed.
+ ///
+ /// This function only returns a useful result when member access expression
+ /// uses a qualified member name, e.g., "x.Base::f". Here, the declaration
+ /// returned by this function describes what was found by unqualified name
+ /// lookup for the identifier "Base" within the scope of the member access
+ /// expression itself. At template instantiation time, this information is
+ /// combined with the results of name lookup into the type of the object
+ /// expression itself (the class type of x).
+ NamedDecl *getFirstQualifierFoundInScope() const {
+ return FirstQualifierFoundInScope;
+ }
+
+ /// \brief Retrieve the name of the member that this expression
+ /// refers to.
+ const DeclarationNameInfo &getMemberNameInfo() const {
+ return MemberNameInfo;
+ }
+
+ /// \brief Retrieve the name of the member that this expression
+ /// refers to.
+ DeclarationName getMember() const { return MemberNameInfo.getName(); }
+
+ // \brief Retrieve the location of the name of the member that this
+ // expression refers to.
+ SourceLocation getMemberLoc() const { return MemberNameInfo.getLoc(); }
+
+ /// \brief Retrieve the location of the template keyword preceding the
+ /// member name, if any.
+ SourceLocation getTemplateKeywordLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->getTemplateKeywordLoc();
+ }
+
+ /// \brief Retrieve the location of the left angle bracket starting the
+ /// explicit template argument list following the member name, if any.
+ SourceLocation getLAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->LAngleLoc;
+ }
+
+ /// \brief Retrieve the location of the right angle bracket ending the
+ /// explicit template argument list following the member name, if any.
+ SourceLocation getRAngleLoc() const {
+ if (!HasTemplateKWAndArgsInfo) return SourceLocation();
+ return getTemplateKWAndArgsInfo()->RAngleLoc;
+ }
+
+ /// Determines whether the member name was preceded by the template keyword.
+ bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
+
+ /// \brief Determines whether this member expression actually had a C++
+ /// template argument list explicitly specified, e.g., x.f<int>.
+ bool hasExplicitTemplateArgs() const { return getLAngleLoc().isValid(); }
+
+ /// \brief Retrieve the explicit template argument list that followed the
+ /// member template name, if any.
+ ASTTemplateArgumentListInfo &getExplicitTemplateArgs() {
+ assert(hasExplicitTemplateArgs());
+ return *reinterpret_cast<ASTTemplateArgumentListInfo *>(this + 1);
+ }
+
+ /// \brief Retrieve the explicit template argument list that followed the
+ /// member template name, if any.
+ const ASTTemplateArgumentListInfo &getExplicitTemplateArgs() const {
+ return const_cast<CXXDependentScopeMemberExpr *>(this)
+ ->getExplicitTemplateArgs();
+ }
+
+ /// \brief Retrieves the optional explicit template arguments.
+ /// This points to the same data as getExplicitTemplateArgs(), but
+ /// returns null if there are no explicit template arguments.
+ const ASTTemplateArgumentListInfo *getOptionalExplicitTemplateArgs() {
+ if (!hasExplicitTemplateArgs()) return 0;
+ return &getExplicitTemplateArgs();
+ }
+
+ /// \brief Copies the template arguments (if present) into the given
+ /// structure.
+ void copyTemplateArgumentsInto(TemplateArgumentListInfo &List) const {
+ getExplicitTemplateArgs().copyInto(List);
+ }
+
+ /// \brief Initializes the template arguments using the given structure.
+ void initializeTemplateArgumentsFrom(const TemplateArgumentListInfo &List) {
+ getExplicitTemplateArgs().initializeFrom(List);
+ }
+
+ /// \brief Retrieve the template arguments provided as part of this
+ /// template-id.
+ const TemplateArgumentLoc *getTemplateArgs() const {
+ return getExplicitTemplateArgs().getTemplateArgs();
+ }
+
+ /// \brief Retrieve the number of template arguments provided as part of this
+ /// template-id.
+ unsigned getNumTemplateArgs() const {
+ return getExplicitTemplateArgs().NumTemplateArgs;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ SourceRange Range;
+ if (!isImplicitAccess())
+ Range.setBegin(Base->getSourceRange().getBegin());
+ else if (getQualifier())
+ Range.setBegin(getQualifierLoc().getBeginLoc());
+ else
+ Range.setBegin(MemberNameInfo.getBeginLoc());
+
+ if (hasExplicitTemplateArgs())
+ Range.setEnd(getRAngleLoc());
+ else
+ Range.setEnd(MemberNameInfo.getEndLoc());
+ return Range;
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXDependentScopeMemberExprClass;
+ }
+ static bool classof(const CXXDependentScopeMemberExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ if (isImplicitAccess()) return child_range();
+ return child_range(&Base, &Base + 1);
+ }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// \brief Represents a C++ member access expression for which lookup
+/// produced a set of overloaded functions.
+///
+/// The member access may be explicit or implicit:
+/// struct A {
+/// int a, b;
+/// int explicitAccess() { return this->a + this->A::b; }
+/// int implicitAccess() { return a + A::b; }
+/// };
+///
+/// In the final AST, an explicit access always becomes a MemberExpr.
+/// An implicit access may become either a MemberExpr or a
+/// DeclRefExpr, depending on whether the member is static.
+class UnresolvedMemberExpr : public OverloadExpr {
+ /// \brief Whether this member expression used the '->' operator or
+ /// the '.' operator.
+ bool IsArrow : 1;
+
+ /// \brief Whether the lookup results contain an unresolved using
+ /// declaration.
+ bool HasUnresolvedUsing : 1;
+
+ /// \brief The expression for the base pointer or class reference,
+ /// e.g., the \c x in x.f. This can be null if this is an 'unbased'
+ /// member expression
+ Stmt *Base;
+
+ /// \brief The type of the base expression; never null.
+ QualType BaseType;
+
+ /// \brief The location of the '->' or '.' operator.
+ SourceLocation OperatorLoc;
+
+ UnresolvedMemberExpr(ASTContext &C, bool HasUnresolvedUsing,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin, UnresolvedSetIterator End);
+
+ UnresolvedMemberExpr(EmptyShell Empty)
+ : OverloadExpr(UnresolvedMemberExprClass, Empty), IsArrow(false),
+ HasUnresolvedUsing(false), Base(0) { }
+
+ friend class ASTStmtReader;
+
+public:
+ static UnresolvedMemberExpr *
+ Create(ASTContext &C, bool HasUnresolvedUsing,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin, UnresolvedSetIterator End);
+
+ static UnresolvedMemberExpr *
+ CreateEmpty(ASTContext &C, bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs);
+
+ /// \brief True if this is an implicit access, i.e. one in which the
+ /// member being accessed was not written in the source. The source
+ /// location of the operator is invalid in this case.
+ bool isImplicitAccess() const;
+
+ /// \brief Retrieve the base object of this member expressions,
+ /// e.g., the \c x in \c x.m.
+ Expr *getBase() {
+ assert(!isImplicitAccess());
+ return cast<Expr>(Base);
+ }
+ const Expr *getBase() const {
+ assert(!isImplicitAccess());
+ return cast<Expr>(Base);
+ }
+
+ QualType getBaseType() const { return BaseType; }
+
+ /// \brief Determine whether the lookup results contain an unresolved using
+ /// declaration.
+ bool hasUnresolvedUsing() const { return HasUnresolvedUsing; }
+
+ /// \brief Determine whether this member expression used the '->'
+ /// operator; otherwise, it used the '.' operator.
+ bool isArrow() const { return IsArrow; }
+
+ /// \brief Retrieve the location of the '->' or '.' operator.
+ SourceLocation getOperatorLoc() const { return OperatorLoc; }
+
+ /// \brief Retrieves the naming class of this lookup.
+ CXXRecordDecl *getNamingClass() const;
+
+ /// \brief Retrieve the full name info for the member that this expression
+ /// refers to.
+ const DeclarationNameInfo &getMemberNameInfo() const { return getNameInfo(); }
+
+ /// \brief Retrieve the name of the member that this expression
+ /// refers to.
+ DeclarationName getMemberName() const { return getName(); }
+
+ // \brief Retrieve the location of the name of the member that this
+ // expression refers to.
+ SourceLocation getMemberLoc() const { return getNameLoc(); }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ SourceRange Range = getMemberNameInfo().getSourceRange();
+ if (!isImplicitAccess())
+ Range.setBegin(Base->getSourceRange().getBegin());
+ else if (getQualifierLoc())
+ Range.setBegin(getQualifierLoc().getBeginLoc());
+
+ if (hasExplicitTemplateArgs())
+ Range.setEnd(getRAngleLoc());
+ return Range;
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == UnresolvedMemberExprClass;
+ }
+ static bool classof(const UnresolvedMemberExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ if (isImplicitAccess()) return child_range();
+ return child_range(&Base, &Base + 1);
+ }
+};
+
+/// \brief Represents a C++0x noexcept expression (C++ [expr.unary.noexcept]).
+///
+/// The noexcept expression tests whether a given expression might throw. Its
+/// result is a boolean constant.
+class CXXNoexceptExpr : public Expr {
+ bool Value : 1;
+ Stmt *Operand;
+ SourceRange Range;
+
+ friend class ASTStmtReader;
+
+public:
+ CXXNoexceptExpr(QualType Ty, Expr *Operand, CanThrowResult Val,
+ SourceLocation Keyword, SourceLocation RParen)
+ : Expr(CXXNoexceptExprClass, Ty, VK_RValue, OK_Ordinary,
+ /*TypeDependent*/false,
+ /*ValueDependent*/Val == CT_Dependent,
+ Val == CT_Dependent || Operand->isInstantiationDependent(),
+ Operand->containsUnexpandedParameterPack()),
+ Value(Val == CT_Cannot), Operand(Operand), Range(Keyword, RParen)
+ { }
+
+ CXXNoexceptExpr(EmptyShell Empty)
+ : Expr(CXXNoexceptExprClass, Empty)
+ { }
+
+ Expr *getOperand() const { return static_cast<Expr*>(Operand); }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+
+ bool getValue() const { return Value; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXNoexceptExprClass;
+ }
+ static bool classof(const CXXNoexceptExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Operand, &Operand + 1); }
+};
+
+/// \brief Represents a C++0x pack expansion that produces a sequence of
+/// expressions.
+///
+/// A pack expansion expression contains a pattern (which itself is an
+/// expression) followed by an ellipsis. For example:
+///
+/// \code
+/// template<typename F, typename ...Types>
+/// void forward(F f, Types &&...args) {
+/// f(static_cast<Types&&>(args)...);
+/// }
+/// \endcode
+///
+/// Here, the argument to the function object \c f is a pack expansion whose
+/// pattern is \c static_cast<Types&&>(args). When the \c forward function
+/// template is instantiated, the pack expansion will instantiate to zero or
+/// or more function arguments to the function object \c f.
+class PackExpansionExpr : public Expr {
+ SourceLocation EllipsisLoc;
+
+ /// \brief The number of expansions that will be produced by this pack
+ /// expansion expression, if known.
+ ///
+ /// When zero, the number of expansions is not known. Otherwise, this value
+ /// is the number of expansions + 1.
+ unsigned NumExpansions;
+
+ Stmt *Pattern;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+public:
+ PackExpansionExpr(QualType T, Expr *Pattern, SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions)
+ : Expr(PackExpansionExprClass, T, Pattern->getValueKind(),
+ Pattern->getObjectKind(), /*TypeDependent=*/true,
+ /*ValueDependent=*/true, /*InstantiationDependent=*/true,
+ /*ContainsUnexpandedParameterPack=*/false),
+ EllipsisLoc(EllipsisLoc),
+ NumExpansions(NumExpansions? *NumExpansions + 1 : 0),
+ Pattern(Pattern) { }
+
+ PackExpansionExpr(EmptyShell Empty) : Expr(PackExpansionExprClass, Empty) { }
+
+ /// \brief Retrieve the pattern of the pack expansion.
+ Expr *getPattern() { return reinterpret_cast<Expr *>(Pattern); }
+
+ /// \brief Retrieve the pattern of the pack expansion.
+ const Expr *getPattern() const { return reinterpret_cast<Expr *>(Pattern); }
+
+ /// \brief Retrieve the location of the ellipsis that describes this pack
+ /// expansion.
+ SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
+
+ /// \brief Determine the number of expansions that will be produced when
+ /// this pack expansion is instantiated, if already known.
+ llvm::Optional<unsigned> getNumExpansions() const {
+ if (NumExpansions)
+ return NumExpansions - 1;
+
+ return llvm::Optional<unsigned>();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(Pattern->getLocStart(), EllipsisLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == PackExpansionExprClass;
+ }
+ static bool classof(const PackExpansionExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&Pattern, &Pattern + 1);
+ }
+};
+
+inline ASTTemplateKWAndArgsInfo *OverloadExpr::getTemplateKWAndArgsInfo() {
+ if (!HasTemplateKWAndArgsInfo) return 0;
+ if (isa<UnresolvedLookupExpr>(this))
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo*>
+ (cast<UnresolvedLookupExpr>(this) + 1);
+ else
+ return reinterpret_cast<ASTTemplateKWAndArgsInfo*>
+ (cast<UnresolvedMemberExpr>(this) + 1);
+}
+
+/// \brief Represents an expression that computes the length of a parameter
+/// pack.
+///
+/// \code
+/// template<typename ...Types>
+/// struct count {
+/// static const unsigned value = sizeof...(Types);
+/// };
+/// \endcode
+class SizeOfPackExpr : public Expr {
+ /// \brief The location of the 'sizeof' keyword.
+ SourceLocation OperatorLoc;
+
+ /// \brief The location of the name of the parameter pack.
+ SourceLocation PackLoc;
+
+ /// \brief The location of the closing parenthesis.
+ SourceLocation RParenLoc;
+
+ /// \brief The length of the parameter pack, if known.
+ ///
+ /// When this expression is value-dependent, the length of the parameter pack
+ /// is unknown. When this expression is not value-dependent, the length is
+ /// known.
+ unsigned Length;
+
+ /// \brief The parameter pack itself.
+ NamedDecl *Pack;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+public:
+ /// \brief Creates a value-dependent expression that computes the length of
+ /// the given parameter pack.
+ SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
+ SourceLocation PackLoc, SourceLocation RParenLoc)
+ : Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/true,
+ /*InstantiationDependent=*/true,
+ /*ContainsUnexpandedParameterPack=*/false),
+ OperatorLoc(OperatorLoc), PackLoc(PackLoc), RParenLoc(RParenLoc),
+ Length(0), Pack(Pack) { }
+
+ /// \brief Creates an expression that computes the length of
+ /// the given parameter pack, which is already known.
+ SizeOfPackExpr(QualType SizeType, SourceLocation OperatorLoc, NamedDecl *Pack,
+ SourceLocation PackLoc, SourceLocation RParenLoc,
+ unsigned Length)
+ : Expr(SizeOfPackExprClass, SizeType, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ OperatorLoc(OperatorLoc), PackLoc(PackLoc), RParenLoc(RParenLoc),
+ Length(Length), Pack(Pack) { }
+
+ /// \brief Create an empty expression.
+ SizeOfPackExpr(EmptyShell Empty) : Expr(SizeOfPackExprClass, Empty) { }
+
+ /// \brief Determine the location of the 'sizeof' keyword.
+ SourceLocation getOperatorLoc() const { return OperatorLoc; }
+
+ /// \brief Determine the location of the parameter pack.
+ SourceLocation getPackLoc() const { return PackLoc; }
+
+ /// \brief Determine the location of the right parenthesis.
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+
+ /// \brief Retrieve the parameter pack.
+ NamedDecl *getPack() const { return Pack; }
+
+ /// \brief Retrieve the length of the parameter pack.
+ ///
+ /// This routine may only be invoked when the expression is not
+ /// value-dependent.
+ unsigned getPackLength() const {
+ assert(!isValueDependent() &&
+ "Cannot get the length of a value-dependent pack size expression");
+ return Length;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(OperatorLoc, RParenLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SizeOfPackExprClass;
+ }
+ static bool classof(const SizeOfPackExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// \brief Represents a reference to a non-type template parameter
+/// that has been substituted with a template argument.
+class SubstNonTypeTemplateParmExpr : public Expr {
+ /// \brief The replaced parameter.
+ NonTypeTemplateParmDecl *Param;
+
+ /// \brief The replacement expression.
+ Stmt *Replacement;
+
+ /// \brief The location of the non-type template parameter reference.
+ SourceLocation NameLoc;
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+ explicit SubstNonTypeTemplateParmExpr(EmptyShell Empty)
+ : Expr(SubstNonTypeTemplateParmExprClass, Empty) { }
+
+public:
+ SubstNonTypeTemplateParmExpr(QualType type,
+ ExprValueKind valueKind,
+ SourceLocation loc,
+ NonTypeTemplateParmDecl *param,
+ Expr *replacement)
+ : Expr(SubstNonTypeTemplateParmExprClass, type, valueKind, OK_Ordinary,
+ replacement->isTypeDependent(), replacement->isValueDependent(),
+ replacement->isInstantiationDependent(),
+ replacement->containsUnexpandedParameterPack()),
+ Param(param), Replacement(replacement), NameLoc(loc) {}
+
+ SourceLocation getNameLoc() const { return NameLoc; }
+ SourceRange getSourceRange() const LLVM_READONLY { return NameLoc; }
+
+ Expr *getReplacement() const { return cast<Expr>(Replacement); }
+
+ NonTypeTemplateParmDecl *getParameter() const { return Param; }
+
+ static bool classof(const Stmt *s) {
+ return s->getStmtClass() == SubstNonTypeTemplateParmExprClass;
+ }
+ static bool classof(const SubstNonTypeTemplateParmExpr *) {
+ return true;
+ }
+
+ // Iterators
+ child_range children() { return child_range(&Replacement, &Replacement+1); }
+};
+
+/// \brief Represents a reference to a non-type template parameter pack that
+/// has been substituted with a non-template argument pack.
+///
+/// When a pack expansion in the source code contains multiple parameter packs
+/// and those parameter packs correspond to different levels of template
+/// parameter lists, this node node is used to represent a non-type template
+/// parameter pack from an outer level, which has already had its argument pack
+/// substituted but that still lives within a pack expansion that itself
+/// could not be instantiated. When actually performing a substitution into
+/// that pack expansion (e.g., when all template parameters have corresponding
+/// arguments), this type will be replaced with the appropriate underlying
+/// expression at the current pack substitution index.
+class SubstNonTypeTemplateParmPackExpr : public Expr {
+ /// \brief The non-type template parameter pack itself.
+ NonTypeTemplateParmDecl *Param;
+
+ /// \brief A pointer to the set of template arguments that this
+ /// parameter pack is instantiated with.
+ const TemplateArgument *Arguments;
+
+ /// \brief The number of template arguments in \c Arguments.
+ unsigned NumArguments;
+
+ /// \brief The location of the non-type template parameter pack reference.
+ SourceLocation NameLoc;
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+ explicit SubstNonTypeTemplateParmPackExpr(EmptyShell Empty)
+ : Expr(SubstNonTypeTemplateParmPackExprClass, Empty) { }
+
+public:
+ SubstNonTypeTemplateParmPackExpr(QualType T,
+ NonTypeTemplateParmDecl *Param,
+ SourceLocation NameLoc,
+ const TemplateArgument &ArgPack);
+
+ /// \brief Retrieve the non-type template parameter pack being substituted.
+ NonTypeTemplateParmDecl *getParameterPack() const { return Param; }
+
+ /// \brief Retrieve the location of the parameter pack name.
+ SourceLocation getParameterPackLocation() const { return NameLoc; }
+
+ /// \brief Retrieve the template argument pack containing the substituted
+ /// template arguments.
+ TemplateArgument getArgumentPack() const;
+
+ SourceRange getSourceRange() const LLVM_READONLY { return NameLoc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SubstNonTypeTemplateParmPackExprClass;
+ }
+ static bool classof(const SubstNonTypeTemplateParmPackExpr *) {
+ return true;
+ }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// \brief Represents a prvalue temporary that written into memory so that
+/// a reference can bind to it.
+///
+/// Prvalue expressions are materialized when they need to have an address
+/// in memory for a reference to bind to. This happens when binding a
+/// reference to the result of a conversion, e.g.,
+///
+/// \code
+/// const int &r = 1.0;
+/// \endcode
+///
+/// Here, 1.0 is implicitly converted to an \c int. That resulting \c int is
+/// then materialized via a \c MaterializeTemporaryExpr, and the reference
+/// binds to the temporary. \c MaterializeTemporaryExprs are always glvalues
+/// (either an lvalue or an xvalue, depending on the kind of reference binding
+/// to it), maintaining the invariant that references always bind to glvalues.
+class MaterializeTemporaryExpr : public Expr {
+ /// \brief The temporary-generating expression whose value will be
+ /// materialized.
+ Stmt *Temporary;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+public:
+ MaterializeTemporaryExpr(QualType T, Expr *Temporary,
+ bool BoundToLvalueReference)
+ : Expr(MaterializeTemporaryExprClass, T,
+ BoundToLvalueReference? VK_LValue : VK_XValue, OK_Ordinary,
+ Temporary->isTypeDependent(), Temporary->isValueDependent(),
+ Temporary->isInstantiationDependent(),
+ Temporary->containsUnexpandedParameterPack()),
+ Temporary(Temporary) { }
+
+ MaterializeTemporaryExpr(EmptyShell Empty)
+ : Expr(MaterializeTemporaryExprClass, Empty) { }
+
+ /// \brief Retrieve the temporary-generating subexpression whose value will
+ /// be materialized into a glvalue.
+ Expr *GetTemporaryExpr() const { return reinterpret_cast<Expr *>(Temporary); }
+
+ /// \brief Determine whether this materialized temporary is bound to an
+ /// lvalue reference; otherwise, it's bound to an rvalue reference.
+ bool isBoundToLvalueReference() const {
+ return getValueKind() == VK_LValue;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return Temporary->getSourceRange();
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == MaterializeTemporaryExprClass;
+ }
+ static bool classof(const MaterializeTemporaryExpr *) {
+ return true;
+ }
+
+ // Iterators
+ child_range children() { return child_range(&Temporary, &Temporary + 1); }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
new file mode 100644
index 0000000..4bfd12c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExprObjC.h
@@ -0,0 +1,1541 @@
+//===--- ExprObjC.h - Classes for representing ObjC expressions -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ExprObjC interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_EXPROBJC_H
+#define LLVM_CLANG_AST_EXPROBJC_H
+
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/SelectorLocationsKind.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+ class IdentifierInfo;
+ class ASTContext;
+
+/// ObjCStringLiteral, used for Objective-C string literals
+/// i.e. @"foo".
+class ObjCStringLiteral : public Expr {
+ Stmt *String;
+ SourceLocation AtLoc;
+public:
+ ObjCStringLiteral(StringLiteral *SL, QualType T, SourceLocation L)
+ : Expr(ObjCStringLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ String(SL), AtLoc(L) {}
+ explicit ObjCStringLiteral(EmptyShell Empty)
+ : Expr(ObjCStringLiteralClass, Empty) {}
+
+ StringLiteral *getString() { return cast<StringLiteral>(String); }
+ const StringLiteral *getString() const { return cast<StringLiteral>(String); }
+ void setString(StringLiteral *S) { String = S; }
+
+ SourceLocation getAtLoc() const { return AtLoc; }
+ void setAtLoc(SourceLocation L) { AtLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtLoc, String->getLocEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCStringLiteralClass;
+ }
+ static bool classof(const ObjCStringLiteral *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&String, &String+1); }
+};
+
+/// ObjCBoolLiteralExpr - Objective-C Boolean Literal.
+///
+class ObjCBoolLiteralExpr : public Expr {
+ bool Value;
+ SourceLocation Loc;
+public:
+ ObjCBoolLiteralExpr(bool val, QualType Ty, SourceLocation l) :
+ Expr(ObjCBoolLiteralExprClass, Ty, VK_RValue, OK_Ordinary, false, false,
+ false, false), Value(val), Loc(l) {}
+
+ explicit ObjCBoolLiteralExpr(EmptyShell Empty)
+ : Expr(ObjCBoolLiteralExprClass, Empty) { }
+
+ bool getValue() const { return Value; }
+ void setValue(bool V) { Value = V; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(Loc); }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCBoolLiteralExprClass;
+ }
+ static bool classof(const ObjCBoolLiteralExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// ObjCNumericLiteral - used for objective-c numeric literals;
+/// as in: @42 or @true (c++/objc++) or @__yes (c/objc)
+class ObjCNumericLiteral : public Expr {
+ /// Number - expression AST node for the numeric literal
+ Stmt *Number;
+ ObjCMethodDecl *ObjCNumericLiteralMethod;
+ SourceLocation AtLoc;
+public:
+ ObjCNumericLiteral(Stmt *NL, QualType T, ObjCMethodDecl *method,
+ SourceLocation L)
+ : Expr(ObjCNumericLiteralClass, T, VK_RValue, OK_Ordinary,
+ false, false, false, false), Number(NL),
+ ObjCNumericLiteralMethod(method), AtLoc(L) {}
+ explicit ObjCNumericLiteral(EmptyShell Empty)
+ : Expr(ObjCNumericLiteralClass, Empty) {}
+
+ Expr *getNumber() { return cast<Expr>(Number); }
+ const Expr *getNumber() const { return cast<Expr>(Number); }
+
+ ObjCMethodDecl *getObjCNumericLiteralMethod() const {
+ return ObjCNumericLiteralMethod;
+ }
+
+ SourceLocation getAtLoc() const { return AtLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtLoc, Number->getSourceRange().getEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCNumericLiteralClass;
+ }
+ static bool classof(const ObjCNumericLiteral *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Number, &Number+1); }
+
+ friend class ASTStmtReader;
+};
+
+/// ObjCArrayLiteral - used for objective-c array containers; as in:
+/// @[@"Hello", NSApp, [NSNumber numberWithInt:42]];
+class ObjCArrayLiteral : public Expr {
+ unsigned NumElements;
+ SourceRange Range;
+ ObjCMethodDecl *ArrayWithObjectsMethod;
+
+ ObjCArrayLiteral(llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl * Method,
+ SourceRange SR);
+
+ explicit ObjCArrayLiteral(EmptyShell Empty, unsigned NumElements)
+ : Expr(ObjCArrayLiteralClass, Empty), NumElements(NumElements) {}
+
+public:
+ static ObjCArrayLiteral *Create(ASTContext &C,
+ llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl * Method,
+ SourceRange SR);
+
+ static ObjCArrayLiteral *CreateEmpty(ASTContext &C, unsigned NumElements);
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCArrayLiteralClass;
+ }
+ static bool classof(const ObjCArrayLiteral *) { return true; }
+
+ /// \brief Retrieve elements of array of literals.
+ Expr **getElements() { return reinterpret_cast<Expr **>(this + 1); }
+
+ /// \brief Retrieve elements of array of literals.
+ const Expr * const *getElements() const {
+ return reinterpret_cast<const Expr * const*>(this + 1);
+ }
+
+ /// getNumElements - Return number of elements of objective-c array literal.
+ unsigned getNumElements() const { return NumElements; }
+
+ /// getExpr - Return the Expr at the specified index.
+ Expr *getElement(unsigned Index) {
+ assert((Index < NumElements) && "Arg access out of range!");
+ return cast<Expr>(getElements()[Index]);
+ }
+ const Expr *getElement(unsigned Index) const {
+ assert((Index < NumElements) && "Arg access out of range!");
+ return cast<Expr>(getElements()[Index]);
+ }
+
+ ObjCMethodDecl *getArrayWithObjectsMethod() const {
+ return ArrayWithObjectsMethod;
+ }
+
+ // Iterators
+ child_range children() {
+ return child_range((Stmt **)getElements(),
+ (Stmt **)getElements() + NumElements);
+ }
+
+ friend class ASTStmtReader;
+};
+
+/// \brief An element in an Objective-C dictionary literal.
+///
+struct ObjCDictionaryElement {
+ /// \brief The key for the dictionary element.
+ Expr *Key;
+
+ /// \brief The value of the dictionary element.
+ Expr *Value;
+
+ /// \brief The location of the ellipsis, if this is a pack expansion.
+ SourceLocation EllipsisLoc;
+
+ /// \brief The number of elements this pack expansion will expand to, if
+ /// this is a pack expansion and is known.
+ llvm::Optional<unsigned> NumExpansions;
+
+ /// \brief Determines whether this dictionary element is a pack expansion.
+ bool isPackExpansion() const { return EllipsisLoc.isValid(); }
+};
+
+/// ObjCDictionaryLiteral - AST node to represent objective-c dictionary
+/// literals; as in: @{@"name" : NSUserName(), @"date" : [NSDate date] };
+class ObjCDictionaryLiteral : public Expr {
+ /// \brief Key/value pair used to store the key and value of a given element.
+ ///
+ /// Objects of this type are stored directly after the expression.
+ struct KeyValuePair {
+ Expr *Key;
+ Expr *Value;
+ };
+
+ /// \brief Data that describes an element that is a pack expansion, used if any
+ /// of the elements in the dictionary literal are pack expansions.
+ struct ExpansionData {
+ /// \brief The location of the ellipsis, if this element is a pack
+ /// expansion.
+ SourceLocation EllipsisLoc;
+
+ /// \brief If non-zero, the number of elements that this pack
+ /// expansion will expand to (+1).
+ unsigned NumExpansionsPlusOne;
+ };
+
+ /// \brief The number of elements in this dictionary literal.
+ unsigned NumElements : 31;
+
+ /// \brief Determine whether this dictionary literal has any pack expansions.
+ ///
+ /// If the dictionary literal has pack expansions, then there will
+ /// be an array of pack expansion data following the array of
+ /// key/value pairs, which provide the locations of the ellipses (if
+ /// any) and number of elements in the expansion (if known). If
+ /// there are no pack expansions, we optimize away this storage.
+ unsigned HasPackExpansions : 1;
+
+ SourceRange Range;
+ ObjCMethodDecl *DictWithObjectsMethod;
+
+ ObjCDictionaryLiteral(ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR);
+
+ explicit ObjCDictionaryLiteral(EmptyShell Empty, unsigned NumElements,
+ bool HasPackExpansions)
+ : Expr(ObjCDictionaryLiteralClass, Empty), NumElements(NumElements),
+ HasPackExpansions(HasPackExpansions) {}
+
+ KeyValuePair *getKeyValues() {
+ return reinterpret_cast<KeyValuePair *>(this + 1);
+ }
+
+ const KeyValuePair *getKeyValues() const {
+ return reinterpret_cast<const KeyValuePair *>(this + 1);
+ }
+
+ ExpansionData *getExpansionData() {
+ if (!HasPackExpansions)
+ return 0;
+
+ return reinterpret_cast<ExpansionData *>(getKeyValues() + NumElements);
+ }
+
+ const ExpansionData *getExpansionData() const {
+ if (!HasPackExpansions)
+ return 0;
+
+ return reinterpret_cast<const ExpansionData *>(getKeyValues()+NumElements);
+ }
+
+public:
+ static ObjCDictionaryLiteral *Create(ASTContext &C,
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR);
+
+ static ObjCDictionaryLiteral *CreateEmpty(ASTContext &C,
+ unsigned NumElements,
+ bool HasPackExpansions);
+
+ /// getNumElements - Return number of elements of objective-c dictionary
+ /// literal.
+ unsigned getNumElements() const { return NumElements; }
+
+ ObjCDictionaryElement getKeyValueElement(unsigned Index) const {
+ assert((Index < NumElements) && "Arg access out of range!");
+ const KeyValuePair &KV = getKeyValues()[Index];
+ ObjCDictionaryElement Result = { KV.Key, KV.Value, SourceLocation(),
+ llvm::Optional<unsigned>() };
+ if (HasPackExpansions) {
+ const ExpansionData &Expansion = getExpansionData()[Index];
+ Result.EllipsisLoc = Expansion.EllipsisLoc;
+ if (Expansion.NumExpansionsPlusOne > 0)
+ Result.NumExpansions = Expansion.NumExpansionsPlusOne - 1;
+ }
+ return Result;
+ }
+
+ ObjCMethodDecl *getDictWithObjectsMethod() const
+ { return DictWithObjectsMethod; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCDictionaryLiteralClass;
+ }
+ static bool classof(const ObjCDictionaryLiteral *) { return true; }
+
+ // Iterators
+ child_range children() {
+ // Note: we're taking advantage of the layout of the KeyValuePair struct
+ // here. If that struct changes, this code will need to change as well.
+ return child_range(reinterpret_cast<Stmt **>(this + 1),
+ reinterpret_cast<Stmt **>(this + 1) + NumElements * 2);
+ }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+
+/// ObjCEncodeExpr, used for @encode in Objective-C. @encode has the same type
+/// and behavior as StringLiteral except that the string initializer is obtained
+/// from ASTContext with the encoding type as an argument.
+class ObjCEncodeExpr : public Expr {
+ TypeSourceInfo *EncodedType;
+ SourceLocation AtLoc, RParenLoc;
+public:
+ ObjCEncodeExpr(QualType T, TypeSourceInfo *EncodedType,
+ SourceLocation at, SourceLocation rp)
+ : Expr(ObjCEncodeExprClass, T, VK_LValue, OK_Ordinary,
+ EncodedType->getType()->isDependentType(),
+ EncodedType->getType()->isDependentType(),
+ EncodedType->getType()->isInstantiationDependentType(),
+ EncodedType->getType()->containsUnexpandedParameterPack()),
+ EncodedType(EncodedType), AtLoc(at), RParenLoc(rp) {}
+
+ explicit ObjCEncodeExpr(EmptyShell Empty) : Expr(ObjCEncodeExprClass, Empty){}
+
+
+ SourceLocation getAtLoc() const { return AtLoc; }
+ void setAtLoc(SourceLocation L) { AtLoc = L; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ QualType getEncodedType() const { return EncodedType->getType(); }
+
+ TypeSourceInfo *getEncodedTypeSourceInfo() const { return EncodedType; }
+ void setEncodedTypeSourceInfo(TypeSourceInfo *EncType) {
+ EncodedType = EncType;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtLoc, RParenLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCEncodeExprClass;
+ }
+ static bool classof(const ObjCEncodeExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// ObjCSelectorExpr used for @selector in Objective-C.
+class ObjCSelectorExpr : public Expr {
+ Selector SelName;
+ SourceLocation AtLoc, RParenLoc;
+public:
+ ObjCSelectorExpr(QualType T, Selector selInfo,
+ SourceLocation at, SourceLocation rp)
+ : Expr(ObjCSelectorExprClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ SelName(selInfo), AtLoc(at), RParenLoc(rp){}
+ explicit ObjCSelectorExpr(EmptyShell Empty)
+ : Expr(ObjCSelectorExprClass, Empty) {}
+
+ Selector getSelector() const { return SelName; }
+ void setSelector(Selector S) { SelName = S; }
+
+ SourceLocation getAtLoc() const { return AtLoc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setAtLoc(SourceLocation L) { AtLoc = L; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtLoc, RParenLoc);
+ }
+
+ /// getNumArgs - Return the number of actual arguments to this call.
+ unsigned getNumArgs() const { return SelName.getNumArgs(); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCSelectorExprClass;
+ }
+ static bool classof(const ObjCSelectorExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// ObjCProtocolExpr used for protocol expression in Objective-C. This is used
+/// as: @protocol(foo), as in:
+/// obj conformsToProtocol:@protocol(foo)]
+/// The return type is "Protocol*".
+class ObjCProtocolExpr : public Expr {
+ ObjCProtocolDecl *TheProtocol;
+ SourceLocation AtLoc, RParenLoc;
+public:
+ ObjCProtocolExpr(QualType T, ObjCProtocolDecl *protocol,
+ SourceLocation at, SourceLocation rp)
+ : Expr(ObjCProtocolExprClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ TheProtocol(protocol), AtLoc(at), RParenLoc(rp) {}
+ explicit ObjCProtocolExpr(EmptyShell Empty)
+ : Expr(ObjCProtocolExprClass, Empty) {}
+
+ ObjCProtocolDecl *getProtocol() const { return TheProtocol; }
+ void setProtocol(ObjCProtocolDecl *P) { TheProtocol = P; }
+
+ SourceLocation getAtLoc() const { return AtLoc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setAtLoc(SourceLocation L) { AtLoc = L; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtLoc, RParenLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCProtocolExprClass;
+ }
+ static bool classof(const ObjCProtocolExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// ObjCIvarRefExpr - A reference to an ObjC instance variable.
+class ObjCIvarRefExpr : public Expr {
+ ObjCIvarDecl *D;
+ Stmt *Base;
+ SourceLocation Loc;
+ bool IsArrow:1; // True if this is "X->F", false if this is "X.F".
+ bool IsFreeIvar:1; // True if ivar reference has no base (self assumed).
+
+public:
+ ObjCIvarRefExpr(ObjCIvarDecl *d, QualType t,
+ SourceLocation l, Expr *base,
+ bool arrow = false, bool freeIvar = false) :
+ Expr(ObjCIvarRefExprClass, t, VK_LValue, OK_Ordinary,
+ /*TypeDependent=*/false, base->isValueDependent(),
+ base->isInstantiationDependent(),
+ base->containsUnexpandedParameterPack()),
+ D(d), Base(base), Loc(l), IsArrow(arrow), IsFreeIvar(freeIvar) {}
+
+ explicit ObjCIvarRefExpr(EmptyShell Empty)
+ : Expr(ObjCIvarRefExprClass, Empty) {}
+
+ ObjCIvarDecl *getDecl() { return D; }
+ const ObjCIvarDecl *getDecl() const { return D; }
+ void setDecl(ObjCIvarDecl *d) { D = d; }
+
+ const Expr *getBase() const { return cast<Expr>(Base); }
+ Expr *getBase() { return cast<Expr>(Base); }
+ void setBase(Expr * base) { Base = base; }
+
+ bool isArrow() const { return IsArrow; }
+ bool isFreeIvar() const { return IsFreeIvar; }
+ void setIsArrow(bool A) { IsArrow = A; }
+ void setIsFreeIvar(bool A) { IsFreeIvar = A; }
+
+ SourceLocation getLocation() const { return Loc; }
+ void setLocation(SourceLocation L) { Loc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return isFreeIvar() ? SourceRange(Loc)
+ : SourceRange(getBase()->getLocStart(), Loc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCIvarRefExprClass;
+ }
+ static bool classof(const ObjCIvarRefExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Base, &Base+1); }
+};
+
+/// ObjCPropertyRefExpr - A dot-syntax expression to access an ObjC
+/// property.
+class ObjCPropertyRefExpr : public Expr {
+private:
+ /// If the bool is true, this is an implicit property reference; the
+ /// pointer is an (optional) ObjCMethodDecl and Setter may be set.
+ /// if the bool is false, this is an explicit property reference;
+ /// the pointer is an ObjCPropertyDecl and Setter is always null.
+ llvm::PointerIntPair<NamedDecl*, 1, bool> PropertyOrGetter;
+
+ /// \brief Indicates whether the property reference will result in a message
+ /// to the getter, the setter, or both.
+ /// This applies to both implicit and explicit property references.
+ enum MethodRefFlags {
+ MethodRef_None = 0,
+ MethodRef_Getter = 0x1,
+ MethodRef_Setter = 0x2
+ };
+
+ /// \brief Contains the Setter method pointer and MethodRefFlags bit flags.
+ llvm::PointerIntPair<ObjCMethodDecl *, 2, unsigned> SetterAndMethodRefFlags;
+
+ // FIXME: Maybe we should store the property identifier here,
+ // because it's not rederivable from the other data when there's an
+ // implicit property with no getter (because the 'foo' -> 'setFoo:'
+ // transformation is lossy on the first character).
+
+ SourceLocation IdLoc;
+
+ /// \brief When the receiver in property access is 'super', this is
+ /// the location of the 'super' keyword. When it's an interface,
+ /// this is that interface.
+ SourceLocation ReceiverLoc;
+ llvm::PointerUnion3<Stmt*, const Type*, ObjCInterfaceDecl*> Receiver;
+
+public:
+ ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation l, Expr *base)
+ : Expr(ObjCPropertyRefExprClass, t, VK, OK,
+ /*TypeDependent=*/false, base->isValueDependent(),
+ base->isInstantiationDependent(),
+ base->containsUnexpandedParameterPack()),
+ PropertyOrGetter(PD, false), SetterAndMethodRefFlags(),
+ IdLoc(l), ReceiverLoc(), Receiver(base) {
+ assert(t->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ }
+
+ ObjCPropertyRefExpr(ObjCPropertyDecl *PD, QualType t,
+ ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation l, SourceLocation sl, QualType st)
+ : Expr(ObjCPropertyRefExprClass, t, VK, OK,
+ /*TypeDependent=*/false, false, st->isInstantiationDependentType(),
+ st->containsUnexpandedParameterPack()),
+ PropertyOrGetter(PD, false), SetterAndMethodRefFlags(),
+ IdLoc(l), ReceiverLoc(sl), Receiver(st.getTypePtr()) {
+ assert(t->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ }
+
+ ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
+ QualType T, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation IdLoc, Expr *Base)
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK, false,
+ Base->isValueDependent(), Base->isInstantiationDependent(),
+ Base->containsUnexpandedParameterPack()),
+ PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
+ IdLoc(IdLoc), ReceiverLoc(), Receiver(Base) {
+ assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ }
+
+ ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
+ QualType T, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation IdLoc,
+ SourceLocation SuperLoc, QualType SuperTy)
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK, false, false, false, false),
+ PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
+ IdLoc(IdLoc), ReceiverLoc(SuperLoc), Receiver(SuperTy.getTypePtr()) {
+ assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ }
+
+ ObjCPropertyRefExpr(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
+ QualType T, ExprValueKind VK, ExprObjectKind OK,
+ SourceLocation IdLoc,
+ SourceLocation ReceiverLoc, ObjCInterfaceDecl *Receiver)
+ : Expr(ObjCPropertyRefExprClass, T, VK, OK, false, false, false, false),
+ PropertyOrGetter(Getter, true), SetterAndMethodRefFlags(Setter, 0),
+ IdLoc(IdLoc), ReceiverLoc(ReceiverLoc), Receiver(Receiver) {
+ assert(T->isSpecificPlaceholderType(BuiltinType::PseudoObject));
+ }
+
+ explicit ObjCPropertyRefExpr(EmptyShell Empty)
+ : Expr(ObjCPropertyRefExprClass, Empty) {}
+
+ bool isImplicitProperty() const { return PropertyOrGetter.getInt(); }
+ bool isExplicitProperty() const { return !PropertyOrGetter.getInt(); }
+
+ ObjCPropertyDecl *getExplicitProperty() const {
+ assert(!isImplicitProperty());
+ return cast<ObjCPropertyDecl>(PropertyOrGetter.getPointer());
+ }
+
+ ObjCMethodDecl *getImplicitPropertyGetter() const {
+ assert(isImplicitProperty());
+ return cast_or_null<ObjCMethodDecl>(PropertyOrGetter.getPointer());
+ }
+
+ ObjCMethodDecl *getImplicitPropertySetter() const {
+ assert(isImplicitProperty());
+ return SetterAndMethodRefFlags.getPointer();
+ }
+
+ Selector getGetterSelector() const {
+ if (isImplicitProperty())
+ return getImplicitPropertyGetter()->getSelector();
+ return getExplicitProperty()->getGetterName();
+ }
+
+ Selector getSetterSelector() const {
+ if (isImplicitProperty())
+ return getImplicitPropertySetter()->getSelector();
+ return getExplicitProperty()->getSetterName();
+ }
+
+ /// \brief True if the property reference will result in a message to the
+ /// getter.
+ /// This applies to both implicit and explicit property references.
+ bool isMessagingGetter() const {
+ return SetterAndMethodRefFlags.getInt() & MethodRef_Getter;
+ }
+
+ /// \brief True if the property reference will result in a message to the
+ /// setter.
+ /// This applies to both implicit and explicit property references.
+ bool isMessagingSetter() const {
+ return SetterAndMethodRefFlags.getInt() & MethodRef_Setter;
+ }
+
+ void setIsMessagingGetter(bool val = true) {
+ setMethodRefFlag(MethodRef_Getter, val);
+ }
+
+ void setIsMessagingSetter(bool val = true) {
+ setMethodRefFlag(MethodRef_Setter, val);
+ }
+
+ const Expr *getBase() const {
+ return cast<Expr>(Receiver.get<Stmt*>());
+ }
+ Expr *getBase() {
+ return cast<Expr>(Receiver.get<Stmt*>());
+ }
+
+ SourceLocation getLocation() const { return IdLoc; }
+
+ SourceLocation getReceiverLocation() const { return ReceiverLoc; }
+ QualType getSuperReceiverType() const {
+ return QualType(Receiver.get<const Type*>(), 0);
+ }
+ QualType getGetterResultType() const {
+ QualType ResultType;
+ if (isExplicitProperty()) {
+ const ObjCPropertyDecl *PDecl = getExplicitProperty();
+ if (const ObjCMethodDecl *Getter = PDecl->getGetterMethodDecl())
+ ResultType = Getter->getResultType();
+ else
+ ResultType = PDecl->getType();
+ } else {
+ const ObjCMethodDecl *Getter = getImplicitPropertyGetter();
+ if (Getter)
+ ResultType = Getter->getResultType(); // with reference!
+ }
+ return ResultType;
+ }
+
+ QualType getSetterArgType() const {
+ QualType ArgType;
+ if (isImplicitProperty()) {
+ const ObjCMethodDecl *Setter = getImplicitPropertySetter();
+ ObjCMethodDecl::param_const_iterator P = Setter->param_begin();
+ ArgType = (*P)->getType();
+ } else {
+ if (ObjCPropertyDecl *PDecl = getExplicitProperty())
+ if (const ObjCMethodDecl *Setter = PDecl->getSetterMethodDecl()) {
+ ObjCMethodDecl::param_const_iterator P = Setter->param_begin();
+ ArgType = (*P)->getType();
+ }
+ if (ArgType.isNull())
+ ArgType = getType();
+ }
+ return ArgType;
+ }
+
+ ObjCInterfaceDecl *getClassReceiver() const {
+ return Receiver.get<ObjCInterfaceDecl*>();
+ }
+ bool isObjectReceiver() const { return Receiver.is<Stmt*>(); }
+ bool isSuperReceiver() const { return Receiver.is<const Type*>(); }
+ bool isClassReceiver() const { return Receiver.is<ObjCInterfaceDecl*>(); }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange((isObjectReceiver() ? getBase()->getLocStart()
+ : getReceiverLocation()),
+ IdLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCPropertyRefExprClass;
+ }
+ static bool classof(const ObjCPropertyRefExpr *) { return true; }
+
+ // Iterators
+ child_range children() {
+ if (Receiver.is<Stmt*>()) {
+ Stmt **begin = reinterpret_cast<Stmt**>(&Receiver); // hack!
+ return child_range(begin, begin+1);
+ }
+ return child_range();
+ }
+
+private:
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+ void setExplicitProperty(ObjCPropertyDecl *D, unsigned methRefFlags) {
+ PropertyOrGetter.setPointer(D);
+ PropertyOrGetter.setInt(false);
+ SetterAndMethodRefFlags.setPointer(0);
+ SetterAndMethodRefFlags.setInt(methRefFlags);
+ }
+ void setImplicitProperty(ObjCMethodDecl *Getter, ObjCMethodDecl *Setter,
+ unsigned methRefFlags) {
+ PropertyOrGetter.setPointer(Getter);
+ PropertyOrGetter.setInt(true);
+ SetterAndMethodRefFlags.setPointer(Setter);
+ SetterAndMethodRefFlags.setInt(methRefFlags);
+ }
+ void setBase(Expr *Base) { Receiver = Base; }
+ void setSuperReceiver(QualType T) { Receiver = T.getTypePtr(); }
+ void setClassReceiver(ObjCInterfaceDecl *D) { Receiver = D; }
+
+ void setLocation(SourceLocation L) { IdLoc = L; }
+ void setReceiverLocation(SourceLocation Loc) { ReceiverLoc = Loc; }
+
+ void setMethodRefFlag(MethodRefFlags flag, bool val) {
+ unsigned f = SetterAndMethodRefFlags.getInt();
+ if (val)
+ f |= flag;
+ else
+ f &= ~flag;
+ SetterAndMethodRefFlags.setInt(f);
+ }
+};
+
+/// ObjCSubscriptRefExpr - used for array and dictionary subscripting.
+/// array[4] = array[3]; dictionary[key] = dictionary[alt_key];
+///
+class ObjCSubscriptRefExpr : public Expr {
+ // Location of ']' in an indexing expression.
+ SourceLocation RBracket;
+ // array/dictionary base expression.
+ // for arrays, this is a numeric expression. For dictionaries, this is
+ // an objective-c object pointer expression.
+ enum { BASE, KEY, END_EXPR };
+ Stmt* SubExprs[END_EXPR];
+
+ ObjCMethodDecl *GetAtIndexMethodDecl;
+
+ // For immutable objects this is null. When ObjCSubscriptRefExpr is to read
+ // an indexed object this is null too.
+ ObjCMethodDecl *SetAtIndexMethodDecl;
+
+public:
+
+ ObjCSubscriptRefExpr(Expr *base, Expr *key, QualType T,
+ ExprValueKind VK, ExprObjectKind OK,
+ ObjCMethodDecl *getMethod,
+ ObjCMethodDecl *setMethod, SourceLocation RB)
+ : Expr(ObjCSubscriptRefExprClass, T, VK, OK,
+ base->isTypeDependent() || key->isTypeDependent(),
+ base->isValueDependent() || key->isValueDependent(),
+ base->isInstantiationDependent() || key->isInstantiationDependent(),
+ (base->containsUnexpandedParameterPack() ||
+ key->containsUnexpandedParameterPack())),
+ RBracket(RB),
+ GetAtIndexMethodDecl(getMethod),
+ SetAtIndexMethodDecl(setMethod)
+ {SubExprs[BASE] = base; SubExprs[KEY] = key;}
+
+ explicit ObjCSubscriptRefExpr(EmptyShell Empty)
+ : Expr(ObjCSubscriptRefExprClass, Empty) {}
+
+ static ObjCSubscriptRefExpr *Create(ASTContext &C,
+ Expr *base,
+ Expr *key, QualType T,
+ ObjCMethodDecl *getMethod,
+ ObjCMethodDecl *setMethod,
+ SourceLocation RB);
+
+ SourceLocation getRBracket() const { return RBracket; }
+ void setRBracket(SourceLocation RB) { RBracket = RB; }
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(SubExprs[BASE]->getLocStart(), RBracket);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCSubscriptRefExprClass;
+ }
+ static bool classof(const ObjCSubscriptRefExpr *) { return true; }
+
+ Expr *getBaseExpr() const { return cast<Expr>(SubExprs[BASE]); }
+ void setBaseExpr(Stmt *S) { SubExprs[BASE] = S; }
+
+ Expr *getKeyExpr() const { return cast<Expr>(SubExprs[KEY]); }
+ void setKeyExpr(Stmt *S) { SubExprs[KEY] = S; }
+
+ ObjCMethodDecl *getAtIndexMethodDecl() const {
+ return GetAtIndexMethodDecl;
+ }
+
+ ObjCMethodDecl *setAtIndexMethodDecl() const {
+ return SetAtIndexMethodDecl;
+ }
+
+ bool isArraySubscriptRefExpr() const {
+ return getKeyExpr()->getType()->isIntegralOrEnumerationType();
+ }
+
+ child_range children() {
+ return child_range(SubExprs, SubExprs+END_EXPR);
+ }
+private:
+ friend class ASTStmtReader;
+};
+
+
+/// \brief An expression that sends a message to the given Objective-C
+/// object or class.
+///
+/// The following contains two message send expressions:
+///
+/// \code
+/// [[NSString alloc] initWithString:@"Hello"]
+/// \endcode
+///
+/// The innermost message send invokes the "alloc" class method on the
+/// NSString class, while the outermost message send invokes the
+/// "initWithString" instance method on the object returned from
+/// NSString's "alloc". In all, an Objective-C message send can take
+/// on four different (although related) forms:
+///
+/// 1. Send to an object instance.
+/// 2. Send to a class.
+/// 3. Send to the superclass instance of the current class.
+/// 4. Send to the superclass of the current class.
+///
+/// All four kinds of message sends are modeled by the ObjCMessageExpr
+/// class, and can be distinguished via \c getReceiverKind(). Example:
+///
+class ObjCMessageExpr : public Expr {
+ /// \brief Stores either the selector that this message is sending
+ /// to (when \c HasMethod is zero) or an \c ObjCMethodDecl pointer
+ /// referring to the method that we type-checked against.
+ uintptr_t SelectorOrMethod;
+
+ enum { NumArgsBitWidth = 16 };
+
+ /// \brief The number of arguments in the message send, not
+ /// including the receiver.
+ unsigned NumArgs : NumArgsBitWidth;
+
+ void setNumArgs(unsigned Num) {
+ assert((Num >> NumArgsBitWidth) == 0 && "Num of args is out of range!");
+ NumArgs = Num;
+ }
+
+ /// \brief The kind of message send this is, which is one of the
+ /// ReceiverKind values.
+ ///
+ /// We pad this out to a byte to avoid excessive masking and shifting.
+ unsigned Kind : 8;
+
+ /// \brief Whether we have an actual method prototype in \c
+ /// SelectorOrMethod.
+ ///
+ /// When non-zero, we have a method declaration; otherwise, we just
+ /// have a selector.
+ unsigned HasMethod : 1;
+
+ /// \brief Whether this message send is a "delegate init call",
+ /// i.e. a call of an init method on self from within an init method.
+ unsigned IsDelegateInitCall : 1;
+
+ /// \brief Whether this message send was implicitly generated by
+ /// the implementation rather than explicitly written by the user.
+ unsigned IsImplicit : 1;
+
+ /// \brief Whether the locations of the selector identifiers are in a
+ /// "standard" position, a enum SelectorLocationsKind.
+ unsigned SelLocsKind : 2;
+
+ /// \brief When the message expression is a send to 'super', this is
+ /// the location of the 'super' keyword.
+ SourceLocation SuperLoc;
+
+ /// \brief The source locations of the open and close square
+ /// brackets ('[' and ']', respectively).
+ SourceLocation LBracLoc, RBracLoc;
+
+ ObjCMessageExpr(EmptyShell Empty, unsigned NumArgs)
+ : Expr(ObjCMessageExprClass, Empty), SelectorOrMethod(0), Kind(0),
+ HasMethod(0), IsDelegateInitCall(0), IsImplicit(0), SelLocsKind(0) {
+ setNumArgs(NumArgs);
+ }
+
+ ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ bool IsInstanceSuper,
+ QualType SuperType,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit);
+ ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc,
+ TypeSourceInfo *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit);
+ ObjCMessageExpr(QualType T, ExprValueKind VK,
+ SourceLocation LBracLoc,
+ Expr *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit);
+
+ void initArgsAndSelLocs(ArrayRef<Expr *> Args,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK);
+
+ /// \brief Retrieve the pointer value of the message receiver.
+ void *getReceiverPointer() const {
+ return *const_cast<void **>(
+ reinterpret_cast<const void * const*>(this + 1));
+ }
+
+ /// \brief Set the pointer value of the message receiver.
+ void setReceiverPointer(void *Value) {
+ *reinterpret_cast<void **>(this + 1) = Value;
+ }
+
+ SelectorLocationsKind getSelLocsKind() const {
+ return (SelectorLocationsKind)SelLocsKind;
+ }
+ bool hasStandardSelLocs() const {
+ return getSelLocsKind() != SelLoc_NonStandard;
+ }
+
+ /// \brief Get a pointer to the stored selector identifiers locations array.
+ /// No locations will be stored if HasStandardSelLocs is true.
+ SourceLocation *getStoredSelLocs() {
+ return reinterpret_cast<SourceLocation*>(getArgs() + getNumArgs());
+ }
+ const SourceLocation *getStoredSelLocs() const {
+ return reinterpret_cast<const SourceLocation*>(getArgs() + getNumArgs());
+ }
+
+ /// \brief Get the number of stored selector identifiers locations.
+ /// No locations will be stored if HasStandardSelLocs is true.
+ unsigned getNumStoredSelLocs() const {
+ if (hasStandardSelLocs())
+ return 0;
+ return getNumSelectorLocs();
+ }
+
+ static ObjCMessageExpr *alloc(ASTContext &C,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBraceLoc,
+ ArrayRef<SourceLocation> SelLocs,
+ Selector Sel,
+ SelectorLocationsKind &SelLocsK);
+ static ObjCMessageExpr *alloc(ASTContext &C,
+ unsigned NumArgs,
+ unsigned NumStoredSelLocs);
+
+public:
+ /// \brief The kind of receiver this message is sending to.
+ enum ReceiverKind {
+ /// \brief The receiver is a class.
+ Class = 0,
+ /// \brief The receiver is an object instance.
+ Instance,
+ /// \brief The receiver is a superclass.
+ SuperClass,
+ /// \brief The receiver is the instance of the superclass object.
+ SuperInstance
+ };
+
+ /// \brief Create a message send to super.
+ ///
+ /// \param Context The ASTContext in which this expression will be created.
+ ///
+ /// \param T The result type of this message.
+ ///
+ /// \param VK The value kind of this message. A message returning
+ /// a l-value or r-value reference will be an l-value or x-value,
+ /// respectively.
+ ///
+ /// \param LBrac The location of the open square bracket '['.
+ ///
+ /// \param SuperLoc The location of the "super" keyword.
+ ///
+ /// \param IsInstanceSuper Whether this is an instance "super"
+ /// message (otherwise, it's a class "super" message).
+ ///
+ /// \param Sel The selector used to determine which method gets called.
+ ///
+ /// \param Method The Objective-C method against which this message
+ /// send was type-checked. May be NULL.
+ ///
+ /// \param Args The message send arguments.
+ ///
+ /// \param NumArgs The number of arguments.
+ ///
+ /// \param RBracLoc The location of the closing square bracket ']'.
+ static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ bool IsInstanceSuper,
+ QualType SuperType,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit);
+
+ /// \brief Create a class message send.
+ ///
+ /// \param Context The ASTContext in which this expression will be created.
+ ///
+ /// \param T The result type of this message.
+ ///
+ /// \param VK The value kind of this message. A message returning
+ /// a l-value or r-value reference will be an l-value or x-value,
+ /// respectively.
+ ///
+ /// \param LBrac The location of the open square bracket '['.
+ ///
+ /// \param Receiver The type of the receiver, including
+ /// source-location information.
+ ///
+ /// \param Sel The selector used to determine which method gets called.
+ ///
+ /// \param Method The Objective-C method against which this message
+ /// send was type-checked. May be NULL.
+ ///
+ /// \param Args The message send arguments.
+ ///
+ /// \param NumArgs The number of arguments.
+ ///
+ /// \param RBracLoc The location of the closing square bracket ']'.
+ static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ TypeSourceInfo *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit);
+
+ /// \brief Create an instance message send.
+ ///
+ /// \param Context The ASTContext in which this expression will be created.
+ ///
+ /// \param T The result type of this message.
+ ///
+ /// \param VK The value kind of this message. A message returning
+ /// a l-value or r-value reference will be an l-value or x-value,
+ /// respectively.
+ ///
+ /// \param LBrac The location of the open square bracket '['.
+ ///
+ /// \param Receiver The expression used to produce the object that
+ /// will receive this message.
+ ///
+ /// \param Sel The selector used to determine which method gets called.
+ ///
+ /// \param Method The Objective-C method against which this message
+ /// send was type-checked. May be NULL.
+ ///
+ /// \param Args The message send arguments.
+ ///
+ /// \param NumArgs The number of arguments.
+ ///
+ /// \param RBracLoc The location of the closing square bracket ']'.
+ static ObjCMessageExpr *Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ Expr *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SeLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit);
+
+ /// \brief Create an empty Objective-C message expression, to be
+ /// filled in by subsequent calls.
+ ///
+ /// \param Context The context in which the message send will be created.
+ ///
+ /// \param NumArgs The number of message arguments, not including
+ /// the receiver.
+ static ObjCMessageExpr *CreateEmpty(ASTContext &Context,
+ unsigned NumArgs,
+ unsigned NumStoredSelLocs);
+
+ /// \brief Indicates whether the message send was implicitly
+ /// generated by the implementation. If false, it was written explicitly
+ /// in the source code.
+ bool isImplicit() const { return IsImplicit; }
+
+ /// \brief Determine the kind of receiver that this message is being
+ /// sent to.
+ ReceiverKind getReceiverKind() const { return (ReceiverKind)Kind; }
+
+ /// \brief Source range of the receiver.
+ SourceRange getReceiverRange() const;
+
+ /// \brief Determine whether this is an instance message to either a
+ /// computed object or to super.
+ bool isInstanceMessage() const {
+ return getReceiverKind() == Instance || getReceiverKind() == SuperInstance;
+ }
+
+ /// \brief Determine whether this is an class message to either a
+ /// specified class or to super.
+ bool isClassMessage() const {
+ return getReceiverKind() == Class || getReceiverKind() == SuperClass;
+ }
+
+ /// \brief Returns the receiver of an instance message.
+ ///
+ /// \brief Returns the object expression for an instance message, or
+ /// NULL for a message that is not an instance message.
+ Expr *getInstanceReceiver() {
+ if (getReceiverKind() == Instance)
+ return static_cast<Expr *>(getReceiverPointer());
+
+ return 0;
+ }
+ const Expr *getInstanceReceiver() const {
+ return const_cast<ObjCMessageExpr*>(this)->getInstanceReceiver();
+ }
+
+ /// \brief Turn this message send into an instance message that
+ /// computes the receiver object with the given expression.
+ void setInstanceReceiver(Expr *rec) {
+ Kind = Instance;
+ setReceiverPointer(rec);
+ }
+
+ /// \brief Returns the type of a class message send, or NULL if the
+ /// message is not a class message.
+ QualType getClassReceiver() const {
+ if (TypeSourceInfo *TSInfo = getClassReceiverTypeInfo())
+ return TSInfo->getType();
+
+ return QualType();
+ }
+
+ /// \brief Returns a type-source information of a class message
+ /// send, or NULL if the message is not a class message.
+ TypeSourceInfo *getClassReceiverTypeInfo() const {
+ if (getReceiverKind() == Class)
+ return reinterpret_cast<TypeSourceInfo *>(getReceiverPointer());
+ return 0;
+ }
+
+ void setClassReceiver(TypeSourceInfo *TSInfo) {
+ Kind = Class;
+ setReceiverPointer(TSInfo);
+ }
+
+ /// \brief Retrieve the location of the 'super' keyword for a class
+ /// or instance message to 'super', otherwise an invalid source location.
+ SourceLocation getSuperLoc() const {
+ if (getReceiverKind() == SuperInstance || getReceiverKind() == SuperClass)
+ return SuperLoc;
+
+ return SourceLocation();
+ }
+
+ /// \brief Retrieve the Objective-C interface to which this message
+ /// is being directed, if known.
+ ///
+ /// This routine cross-cuts all of the different kinds of message
+ /// sends to determine what the underlying (statically known) type
+ /// of the receiver will be; use \c getReceiverKind() to determine
+ /// whether the message is a class or an instance method, whether it
+ /// is a send to super or not, etc.
+ ///
+ /// \returns The Objective-C interface if known, otherwise NULL.
+ ObjCInterfaceDecl *getReceiverInterface() const;
+
+ /// \brief Retrieve the type referred to by 'super'.
+ ///
+ /// The returned type will either be an ObjCInterfaceType (for an
+ /// class message to super) or an ObjCObjectPointerType that refers
+ /// to a class (for an instance message to super);
+ QualType getSuperType() const {
+ if (getReceiverKind() == SuperInstance || getReceiverKind() == SuperClass)
+ return QualType::getFromOpaquePtr(getReceiverPointer());
+
+ return QualType();
+ }
+
+ void setSuper(SourceLocation Loc, QualType T, bool IsInstanceSuper) {
+ Kind = IsInstanceSuper? SuperInstance : SuperClass;
+ SuperLoc = Loc;
+ setReceiverPointer(T.getAsOpaquePtr());
+ }
+
+ Selector getSelector() const;
+
+ void setSelector(Selector S) {
+ HasMethod = false;
+ SelectorOrMethod = reinterpret_cast<uintptr_t>(S.getAsOpaquePtr());
+ }
+
+ const ObjCMethodDecl *getMethodDecl() const {
+ if (HasMethod)
+ return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod);
+
+ return 0;
+ }
+
+ ObjCMethodDecl *getMethodDecl() {
+ if (HasMethod)
+ return reinterpret_cast<ObjCMethodDecl *>(SelectorOrMethod);
+
+ return 0;
+ }
+
+ void setMethodDecl(ObjCMethodDecl *MD) {
+ HasMethod = true;
+ SelectorOrMethod = reinterpret_cast<uintptr_t>(MD);
+ }
+
+ ObjCMethodFamily getMethodFamily() const {
+ if (HasMethod) return getMethodDecl()->getMethodFamily();
+ return getSelector().getMethodFamily();
+ }
+
+ /// \brief Return the number of actual arguments in this message,
+ /// not counting the receiver.
+ unsigned getNumArgs() const { return NumArgs; }
+
+ /// \brief Retrieve the arguments to this message, not including the
+ /// receiver.
+ Expr **getArgs() {
+ return reinterpret_cast<Expr **>(this + 1) + 1;
+ }
+ const Expr * const *getArgs() const {
+ return reinterpret_cast<const Expr * const *>(this + 1) + 1;
+ }
+
+ /// getArg - Return the specified argument.
+ Expr *getArg(unsigned Arg) {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ return cast<Expr>(getArgs()[Arg]);
+ }
+ const Expr *getArg(unsigned Arg) const {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ return cast<Expr>(getArgs()[Arg]);
+ }
+ /// setArg - Set the specified argument.
+ void setArg(unsigned Arg, Expr *ArgExpr) {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ getArgs()[Arg] = ArgExpr;
+ }
+
+ /// isDelegateInitCall - Answers whether this message send has been
+ /// tagged as a "delegate init call", i.e. a call to a method in the
+ /// -init family on self from within an -init method implementation.
+ bool isDelegateInitCall() const { return IsDelegateInitCall; }
+ void setDelegateInitCall(bool isDelegate) { IsDelegateInitCall = isDelegate; }
+
+ SourceLocation getLeftLoc() const { return LBracLoc; }
+ SourceLocation getRightLoc() const { return RBracLoc; }
+
+ SourceLocation getSelectorStartLoc() const {
+ if (isImplicit())
+ return getLocStart();
+ return getSelectorLoc(0);
+ }
+ SourceLocation getSelectorLoc(unsigned Index) const {
+ assert(Index < getNumSelectorLocs() && "Index out of range!");
+ if (hasStandardSelLocs())
+ return getStandardSelectorLoc(Index, getSelector(),
+ getSelLocsKind() == SelLoc_StandardWithSpace,
+ llvm::makeArrayRef(const_cast<Expr**>(getArgs()),
+ getNumArgs()),
+ RBracLoc);
+ return getStoredSelLocs()[Index];
+ }
+
+ void getSelectorLocs(SmallVectorImpl<SourceLocation> &SelLocs) const;
+
+ unsigned getNumSelectorLocs() const {
+ if (isImplicit())
+ return 0;
+ Selector Sel = getSelector();
+ if (Sel.isUnarySelector())
+ return 1;
+ return Sel.getNumArgs();
+ }
+
+ void setSourceRange(SourceRange R) {
+ LBracLoc = R.getBegin();
+ RBracLoc = R.getEnd();
+ }
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(LBracLoc, RBracLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCMessageExprClass;
+ }
+ static bool classof(const ObjCMessageExpr *) { return true; }
+
+ // Iterators
+ child_range children();
+
+ typedef ExprIterator arg_iterator;
+ typedef ConstExprIterator const_arg_iterator;
+
+ arg_iterator arg_begin() { return reinterpret_cast<Stmt **>(getArgs()); }
+ arg_iterator arg_end() {
+ return reinterpret_cast<Stmt **>(getArgs() + NumArgs);
+ }
+ const_arg_iterator arg_begin() const {
+ return reinterpret_cast<Stmt const * const*>(getArgs());
+ }
+ const_arg_iterator arg_end() const {
+ return reinterpret_cast<Stmt const * const*>(getArgs() + NumArgs);
+ }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// ObjCIsaExpr - Represent X->isa and X.isa when X is an ObjC 'id' type.
+/// (similar in spirit to MemberExpr).
+class ObjCIsaExpr : public Expr {
+ /// Base - the expression for the base object pointer.
+ Stmt *Base;
+
+ /// IsaMemberLoc - This is the location of the 'isa'.
+ SourceLocation IsaMemberLoc;
+
+ /// IsArrow - True if this is "X->F", false if this is "X.F".
+ bool IsArrow;
+public:
+ ObjCIsaExpr(Expr *base, bool isarrow, SourceLocation l, QualType ty)
+ : Expr(ObjCIsaExprClass, ty, VK_LValue, OK_Ordinary,
+ /*TypeDependent=*/false, base->isValueDependent(),
+ base->isInstantiationDependent(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Base(base), IsaMemberLoc(l), IsArrow(isarrow) {}
+
+ /// \brief Build an empty expression.
+ explicit ObjCIsaExpr(EmptyShell Empty) : Expr(ObjCIsaExprClass, Empty) { }
+
+ void setBase(Expr *E) { Base = E; }
+ Expr *getBase() const { return cast<Expr>(Base); }
+
+ bool isArrow() const { return IsArrow; }
+ void setArrow(bool A) { IsArrow = A; }
+
+ /// getMemberLoc - Return the location of the "member", in X->F, it is the
+ /// location of 'F'.
+ SourceLocation getIsaMemberLoc() const { return IsaMemberLoc; }
+ void setIsaMemberLoc(SourceLocation L) { IsaMemberLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getBase()->getLocStart(), IsaMemberLoc);
+ }
+
+ SourceLocation getExprLoc() const LLVM_READONLY { return IsaMemberLoc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCIsaExprClass;
+ }
+ static bool classof(const ObjCIsaExpr *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Base, &Base+1); }
+};
+
+
+/// ObjCIndirectCopyRestoreExpr - Represents the passing of a function
+/// argument by indirect copy-restore in ARC. This is used to support
+/// passing indirect arguments with the wrong lifetime, e.g. when
+/// passing the address of a __strong local variable to an 'out'
+/// parameter. This expression kind is only valid in an "argument"
+/// position to some sort of call expression.
+///
+/// The parameter must have type 'pointer to T', and the argument must
+/// have type 'pointer to U', where T and U agree except possibly in
+/// qualification. If the argument value is null, then a null pointer
+/// is passed; otherwise it points to an object A, and:
+/// 1. A temporary object B of type T is initialized, either by
+/// zero-initialization (used when initializing an 'out' parameter)
+/// or copy-initialization (used when initializing an 'inout'
+/// parameter).
+/// 2. The address of the temporary is passed to the function.
+/// 3. If the call completes normally, A is move-assigned from B.
+/// 4. Finally, A is destroyed immediately.
+///
+/// Currently 'T' must be a retainable object lifetime and must be
+/// __autoreleasing; this qualifier is ignored when initializing
+/// the value.
+class ObjCIndirectCopyRestoreExpr : public Expr {
+ Stmt *Operand;
+
+ // unsigned ObjCIndirectCopyRestoreBits.ShouldCopy : 1;
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+
+ void setShouldCopy(bool shouldCopy) {
+ ObjCIndirectCopyRestoreExprBits.ShouldCopy = shouldCopy;
+ }
+
+ explicit ObjCIndirectCopyRestoreExpr(EmptyShell Empty)
+ : Expr(ObjCIndirectCopyRestoreExprClass, Empty) { }
+
+public:
+ ObjCIndirectCopyRestoreExpr(Expr *operand, QualType type, bool shouldCopy)
+ : Expr(ObjCIndirectCopyRestoreExprClass, type, VK_LValue, OK_Ordinary,
+ operand->isTypeDependent(), operand->isValueDependent(),
+ operand->isInstantiationDependent(),
+ operand->containsUnexpandedParameterPack()),
+ Operand(operand) {
+ setShouldCopy(shouldCopy);
+ }
+
+ Expr *getSubExpr() { return cast<Expr>(Operand); }
+ const Expr *getSubExpr() const { return cast<Expr>(Operand); }
+
+ /// shouldCopy - True if we should do the 'copy' part of the
+ /// copy-restore. If false, the temporary will be zero-initialized.
+ bool shouldCopy() const { return ObjCIndirectCopyRestoreExprBits.ShouldCopy; }
+
+ child_range children() { return child_range(&Operand, &Operand+1); }
+
+ // Source locations are determined by the subexpression.
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return Operand->getSourceRange();
+ }
+ SourceLocation getExprLoc() const LLVM_READONLY {
+ return getSubExpr()->getExprLoc();
+ }
+
+ static bool classof(const Stmt *s) {
+ return s->getStmtClass() == ObjCIndirectCopyRestoreExprClass;
+ }
+ static bool classof(const ObjCIndirectCopyRestoreExpr *) { return true; }
+};
+
+/// \brief An Objective-C "bridged" cast expression, which casts between
+/// Objective-C pointers and C pointers, transferring ownership in the process.
+///
+/// \code
+/// NSString *str = (__bridge_transfer NSString *)CFCreateString();
+/// \endcode
+class ObjCBridgedCastExpr : public ExplicitCastExpr {
+ SourceLocation LParenLoc;
+ SourceLocation BridgeKeywordLoc;
+ unsigned Kind : 2;
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+public:
+ ObjCBridgedCastExpr(SourceLocation LParenLoc, ObjCBridgeCastKind Kind,
+ CastKind CK, SourceLocation BridgeKeywordLoc,
+ TypeSourceInfo *TSInfo, Expr *Operand)
+ : ExplicitCastExpr(ObjCBridgedCastExprClass, TSInfo->getType(), VK_RValue,
+ CK, Operand, 0, TSInfo),
+ LParenLoc(LParenLoc), BridgeKeywordLoc(BridgeKeywordLoc), Kind(Kind) { }
+
+ /// \brief Construct an empty Objective-C bridged cast.
+ explicit ObjCBridgedCastExpr(EmptyShell Shell)
+ : ExplicitCastExpr(ObjCBridgedCastExprClass, Shell, 0) { }
+
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+
+ /// \brief Determine which kind of bridge is being performed via this cast.
+ ObjCBridgeCastKind getBridgeKind() const {
+ return static_cast<ObjCBridgeCastKind>(Kind);
+ }
+
+ /// \brief Retrieve the kind of bridge being performed as a string.
+ StringRef getBridgeKindName() const;
+
+ /// \brief The location of the bridge keyword.
+ SourceLocation getBridgeKeywordLoc() const { return BridgeKeywordLoc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(LParenLoc, getSubExpr()->getLocEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCBridgedCastExprClass;
+ }
+ static bool classof(const ObjCBridgedCastExpr *) { return true; }
+
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
new file mode 100644
index 0000000..18a1432
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ExternalASTSource.h
@@ -0,0 +1,522 @@
+//===--- ExternalASTSource.h - Abstract External AST Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ExternalASTSource interface, which enables
+// construction of AST nodes from some external source.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_EXTERNAL_AST_SOURCE_H
+#define LLVM_CLANG_AST_EXTERNAL_AST_SOURCE_H
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+
+class ASTConsumer;
+class CXXBaseSpecifier;
+class DeclarationName;
+class ExternalSemaSource; // layering violation required for downcasting
+class NamedDecl;
+class Selector;
+class Stmt;
+class TagDecl;
+
+/// \brief Enumeration describing the result of loading information from
+/// an external source.
+enum ExternalLoadResult {
+ /// \brief Loading the external information has succeeded.
+ ELR_Success,
+
+ /// \brief Loading the external information has failed.
+ ELR_Failure,
+
+ /// \brief The external information has already been loaded, and therefore
+ /// no additional processing is required.
+ ELR_AlreadyLoaded
+};
+
+/// \brief Abstract interface for external sources of AST nodes.
+///
+/// External AST sources provide AST nodes constructed from some
+/// external source, such as a precompiled header. External AST
+/// sources can resolve types and declarations from abstract IDs into
+/// actual type and declaration nodes, and read parts of declaration
+/// contexts.
+class ExternalASTSource {
+ /// \brief Whether this AST source also provides information for
+ /// semantic analysis.
+ bool SemaSource;
+
+ friend class ExternalSemaSource;
+
+public:
+ ExternalASTSource() : SemaSource(false) { }
+
+ virtual ~ExternalASTSource();
+
+ /// \brief RAII class for safely pairing a StartedDeserializing call
+ /// with FinishedDeserializing.
+ class Deserializing {
+ ExternalASTSource *Source;
+ public:
+ explicit Deserializing(ExternalASTSource *source) : Source(source) {
+ assert(Source);
+ Source->StartedDeserializing();
+ }
+ ~Deserializing() {
+ Source->FinishedDeserializing();
+ }
+ };
+
+ /// \brief Resolve a declaration ID into a declaration, potentially
+ /// building a new declaration.
+ ///
+ /// This method only needs to be implemented if the AST source ever
+ /// passes back decl sets as VisibleDeclaration objects.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual Decl *GetExternalDecl(uint32_t ID);
+
+ /// \brief Resolve a selector ID into a selector.
+ ///
+ /// This operation only needs to be implemented if the AST source
+ /// returns non-zero for GetNumKnownSelectors().
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual Selector GetExternalSelector(uint32_t ID);
+
+ /// \brief Returns the number of selectors known to the external AST
+ /// source.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual uint32_t GetNumExternalSelectors();
+
+ /// \brief Resolve the offset of a statement in the decl stream into
+ /// a statement.
+ ///
+ /// This operation is meant to be used via a LazyOffsetPtr. It only
+ /// needs to be implemented if the AST source uses methods like
+ /// FunctionDecl::setLazyBody when building decls.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual Stmt *GetExternalDeclStmt(uint64_t Offset);
+
+ /// \brief Resolve the offset of a set of C++ base specifiers in the decl
+ /// stream into an array of specifiers.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset);
+
+ /// \brief Finds all declarations with the given name in the
+ /// given context.
+ ///
+ /// Generally the final step of this method is either to call
+ /// SetExternalVisibleDeclsForName or to recursively call lookup on
+ /// the DeclContext after calling SetExternalVisibleDecls.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual DeclContextLookupResult
+ FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name);
+
+ /// \brief Finds all declarations lexically contained within the given
+ /// DeclContext, after applying an optional filter predicate.
+ ///
+ /// \param isKindWeWant a predicate function that returns true if the passed
+ /// declaration kind is one we are looking for. If NULL, all declarations
+ /// are returned.
+ ///
+ /// \return an indication of whether the load succeeded or failed.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual ExternalLoadResult FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Result);
+
+ /// \brief Finds all declarations lexically contained within the given
+ /// DeclContext.
+ ///
+ /// \return true if an error occurred
+ ExternalLoadResult FindExternalLexicalDecls(const DeclContext *DC,
+ SmallVectorImpl<Decl*> &Result) {
+ return FindExternalLexicalDecls(DC, 0, Result);
+ }
+
+ template <typename DeclTy>
+ ExternalLoadResult FindExternalLexicalDeclsBy(const DeclContext *DC,
+ SmallVectorImpl<Decl*> &Result) {
+ return FindExternalLexicalDecls(DC, DeclTy::classofKind, Result);
+ }
+
+ /// \brief Get the decls that are contained in a file in the Offset/Length
+ /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// a range.
+ virtual void FindFileRegionDecls(FileID File, unsigned Offset,unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {}
+
+ /// \brief Gives the external AST source an opportunity to complete
+ /// an incomplete type.
+ virtual void CompleteType(TagDecl *Tag) {}
+
+ /// \brief Gives the external AST source an opportunity to complete an
+ /// incomplete Objective-C class.
+ ///
+ /// This routine will only be invoked if the "externally completed" bit is
+ /// set on the ObjCInterfaceDecl via the function
+ /// \c ObjCInterfaceDecl::setExternallyCompleted().
+ virtual void CompleteType(ObjCInterfaceDecl *Class) { }
+
+ /// \brief Notify ExternalASTSource that we started deserialization of
+ /// a decl or type so until FinishedDeserializing is called there may be
+ /// decls that are initializing. Must be paired with FinishedDeserializing.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual void StartedDeserializing() { }
+
+ /// \brief Notify ExternalASTSource that we finished the deserialization of
+ /// a decl or type. Must be paired with StartedDeserializing.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual void FinishedDeserializing() { }
+
+ /// \brief Function that will be invoked when we begin parsing a new
+ /// translation unit involving this external AST source.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual void StartTranslationUnit(ASTConsumer *Consumer) { }
+
+ /// \brief Print any statistics that have been gathered regarding
+ /// the external AST source.
+ ///
+ /// The default implementation of this method is a no-op.
+ virtual void PrintStats();
+
+
+ /// \brief Perform layout on the given record.
+ ///
+ /// This routine allows the external AST source to provide an specific
+ /// layout for a record, overriding the layout that would normally be
+ /// constructed. It is intended for clients who receive specific layout
+ /// details rather than source code (such as LLDB). The client is expected
+ /// to fill in the field offsets, base offsets, virtual base offsets, and
+ /// complete object size.
+ ///
+ /// \param Record The record whose layout is being requested.
+ ///
+ /// \param Size The final size of the record, in bits.
+ ///
+ /// \param Alignment The final alignment of the record, in bits.
+ ///
+ /// \param FieldOffsets The offset of each of the fields within the record,
+ /// expressed in bits. All of the fields must be provided with offsets.
+ ///
+ /// \param BaseOffsets The offset of each of the direct, non-virtual base
+ /// classes. If any bases are not given offsets, the bases will be laid
+ /// out according to the ABI.
+ ///
+ /// \param VirtualBaseOffsets The offset of each of the virtual base classes
+ /// (either direct or not). If any bases are not given offsets, the bases will be laid
+ /// out according to the ABI.
+ ///
+ /// \returns true if the record layout was provided, false otherwise.
+ virtual bool
+ layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets)
+ {
+ return false;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Queries for performance analysis.
+ //===--------------------------------------------------------------------===//
+
+ struct MemoryBufferSizes {
+ size_t malloc_bytes;
+ size_t mmap_bytes;
+
+ MemoryBufferSizes(size_t malloc_bytes, size_t mmap_bytes)
+ : malloc_bytes(malloc_bytes), mmap_bytes(mmap_bytes) {}
+ };
+
+ /// Return the amount of memory used by memory buffers, breaking down
+ /// by heap-backed versus mmap'ed memory.
+ MemoryBufferSizes getMemoryBufferSizes() const {
+ MemoryBufferSizes sizes(0, 0);
+ getMemoryBufferSizes(sizes);
+ return sizes;
+ }
+
+ virtual void getMemoryBufferSizes(MemoryBufferSizes &sizes) const;
+
+protected:
+ static DeclContextLookupResult
+ SetExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name,
+ ArrayRef<NamedDecl*> Decls);
+
+ static DeclContextLookupResult
+ SetNoExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name);
+};
+
+/// \brief A lazy pointer to an AST node (of base type T) that resides
+/// within an external AST source.
+///
+/// The AST node is identified within the external AST source by a
+/// 63-bit offset, and can be retrieved via an operation on the
+/// external AST source itself.
+template<typename T, typename OffsT, T* (ExternalASTSource::*Get)(OffsT Offset)>
+struct LazyOffsetPtr {
+ /// \brief Either a pointer to an AST node or the offset within the
+ /// external AST source where the AST node can be found.
+ ///
+ /// If the low bit is clear, a pointer to the AST node. If the low
+ /// bit is set, the upper 63 bits are the offset.
+ mutable uint64_t Ptr;
+
+public:
+ LazyOffsetPtr() : Ptr(0) { }
+
+ explicit LazyOffsetPtr(T *Ptr) : Ptr(reinterpret_cast<uint64_t>(Ptr)) { }
+ explicit LazyOffsetPtr(uint64_t Offset) : Ptr((Offset << 1) | 0x01) {
+ assert((Offset << 1 >> 1) == Offset && "Offsets must require < 63 bits");
+ if (Offset == 0)
+ Ptr = 0;
+ }
+
+ LazyOffsetPtr &operator=(T *Ptr) {
+ this->Ptr = reinterpret_cast<uint64_t>(Ptr);
+ return *this;
+ }
+
+ LazyOffsetPtr &operator=(uint64_t Offset) {
+ assert((Offset << 1 >> 1) == Offset && "Offsets must require < 63 bits");
+ if (Offset == 0)
+ Ptr = 0;
+ else
+ Ptr = (Offset << 1) | 0x01;
+
+ return *this;
+ }
+
+ /// \brief Whether this pointer is non-NULL.
+ ///
+ /// This operation does not require the AST node to be deserialized.
+ operator bool() const { return Ptr != 0; }
+
+ /// \brief Whether this pointer is currently stored as an offset.
+ bool isOffset() const { return Ptr & 0x01; }
+
+ /// \brief Retrieve the pointer to the AST node that this lazy pointer
+ ///
+ /// \param Source the external AST source.
+ ///
+ /// \returns a pointer to the AST node.
+ T* get(ExternalASTSource *Source) const {
+ if (isOffset()) {
+ assert(Source &&
+ "Cannot deserialize a lazy pointer without an AST source");
+ Ptr = reinterpret_cast<uint64_t>((Source->*Get)(Ptr >> 1));
+ }
+ return reinterpret_cast<T*>(Ptr);
+ }
+};
+
+/// \brief Represents a lazily-loaded vector of data.
+///
+/// The lazily-loaded vector of data contains data that is partially loaded
+/// from an external source and partially added by local translation. The
+/// items loaded from the external source are loaded lazily, when needed for
+/// iteration over the complete vector.
+template<typename T, typename Source,
+ void (Source::*Loader)(SmallVectorImpl<T>&),
+ unsigned LoadedStorage = 2, unsigned LocalStorage = 4>
+class LazyVector {
+ SmallVector<T, LoadedStorage> Loaded;
+ SmallVector<T, LocalStorage> Local;
+
+public:
+ // Iteration over the elements in the vector.
+ class iterator {
+ LazyVector *Self;
+
+ /// \brief Position within the vector..
+ ///
+ /// In a complete iteration, the Position field walks the range [-M, N),
+ /// where negative values are used to indicate elements
+ /// loaded from the external source while non-negative values are used to
+ /// indicate elements added via \c push_back().
+ /// However, to provide iteration in source order (for, e.g., chained
+ /// precompiled headers), dereferencing the iterator flips the negative
+ /// values (corresponding to loaded entities), so that position -M
+ /// corresponds to element 0 in the loaded entities vector, position -M+1
+ /// corresponds to element 1 in the loaded entities vector, etc. This
+ /// gives us a reasonably efficient, source-order walk.
+ int Position;
+
+ friend class LazyVector;
+
+ public:
+ typedef T value_type;
+ typedef value_type& reference;
+ typedef value_type* pointer;
+ typedef std::random_access_iterator_tag iterator_category;
+ typedef int difference_type;
+
+ iterator() : Self(0), Position(0) { }
+
+ iterator(LazyVector *Self, int Position)
+ : Self(Self), Position(Position) { }
+
+ reference operator*() const {
+ if (Position < 0)
+ return Self->Loaded.end()[Position];
+ return Self->Local[Position];
+ }
+
+ pointer operator->() const {
+ if (Position < 0)
+ return &Self->Loaded.end()[Position];
+
+ return &Self->Local[Position];
+ }
+
+ reference operator[](difference_type D) {
+ return *(*this + D);
+ }
+
+ iterator &operator++() {
+ ++Position;
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator Prev(*this);
+ ++Position;
+ return Prev;
+ }
+
+ iterator &operator--() {
+ --Position;
+ return *this;
+ }
+
+ iterator operator--(int) {
+ iterator Prev(*this);
+ --Position;
+ return Prev;
+ }
+
+ friend bool operator==(const iterator &X, const iterator &Y) {
+ return X.Position == Y.Position;
+ }
+
+ friend bool operator!=(const iterator &X, const iterator &Y) {
+ return X.Position != Y.Position;
+ }
+
+ friend bool operator<(const iterator &X, const iterator &Y) {
+ return X.Position < Y.Position;
+ }
+
+ friend bool operator>(const iterator &X, const iterator &Y) {
+ return X.Position > Y.Position;
+ }
+
+ friend bool operator<=(const iterator &X, const iterator &Y) {
+ return X.Position < Y.Position;
+ }
+
+ friend bool operator>=(const iterator &X, const iterator &Y) {
+ return X.Position > Y.Position;
+ }
+
+ friend iterator& operator+=(iterator &X, difference_type D) {
+ X.Position += D;
+ return X;
+ }
+
+ friend iterator& operator-=(iterator &X, difference_type D) {
+ X.Position -= D;
+ return X;
+ }
+
+ friend iterator operator+(iterator X, difference_type D) {
+ X.Position += D;
+ return X;
+ }
+
+ friend iterator operator+(difference_type D, iterator X) {
+ X.Position += D;
+ return X;
+ }
+
+ friend difference_type operator-(const iterator &X, const iterator &Y) {
+ return X.Position - Y.Position;
+ }
+
+ friend iterator operator-(iterator X, difference_type D) {
+ X.Position -= D;
+ return X;
+ }
+ };
+ friend class iterator;
+
+ iterator begin(Source *source, bool LocalOnly = false) {
+ if (LocalOnly)
+ return iterator(this, 0);
+
+ if (source)
+ (source->*Loader)(Loaded);
+ return iterator(this, -(int)Loaded.size());
+ }
+
+ iterator end() {
+ return iterator(this, Local.size());
+ }
+
+ void push_back(const T& LocalValue) {
+ Local.push_back(LocalValue);
+ }
+
+ void erase(iterator From, iterator To) {
+ if (From.Position < 0 && To.Position < 0) {
+ Loaded.erase(Loaded.end() + From.Position, Loaded.end() + To.Position);
+ return;
+ }
+
+ if (From.Position < 0) {
+ Loaded.erase(Loaded.end() + From.Position, Loaded.end());
+ From = begin(0, true);
+ }
+
+ Local.erase(Local.begin() + From.Position, Local.begin() + To.Position);
+ }
+};
+
+/// \brief A lazy pointer to a statement.
+typedef LazyOffsetPtr<Stmt, uint64_t, &ExternalASTSource::GetExternalDeclStmt>
+ LazyDeclStmtPtr;
+
+/// \brief A lazy pointer to a declaration.
+typedef LazyOffsetPtr<Decl, uint32_t, &ExternalASTSource::GetExternalDecl>
+ LazyDeclPtr;
+
+/// \brief A lazy pointer to a set of CXXBaseSpecifiers.
+typedef LazyOffsetPtr<CXXBaseSpecifier, uint64_t,
+ &ExternalASTSource::GetExternalCXXBaseSpecifiers>
+ LazyCXXBaseSpecifiersPtr;
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_EXTERNAL_AST_SOURCE_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/GlobalDecl.h b/contrib/llvm/tools/clang/include/clang/AST/GlobalDecl.h
new file mode 100644
index 0000000..c43e44c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/GlobalDecl.h
@@ -0,0 +1,124 @@
+//===--- GlobalDecl.h - Global declaration holder ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A GlobalDecl can hold either a regular variable/function or a C++ ctor/dtor
+// together with its type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_GLOBALDECL_H
+#define LLVM_CLANG_AST_GLOBALDECL_H
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/ABI.h"
+
+namespace clang {
+
+/// GlobalDecl - represents a global declaration. This can either be a
+/// CXXConstructorDecl and the constructor type (Base, Complete).
+/// a CXXDestructorDecl and the destructor type (Base, Complete) or
+/// a VarDecl, a FunctionDecl or a BlockDecl.
+class GlobalDecl {
+ llvm::PointerIntPair<const Decl*, 2> Value;
+
+ void Init(const Decl *D) {
+ assert(!isa<CXXConstructorDecl>(D) && "Use other ctor with ctor decls!");
+ assert(!isa<CXXDestructorDecl>(D) && "Use other ctor with dtor decls!");
+
+ Value.setPointer(D);
+ }
+
+public:
+ GlobalDecl() {}
+
+ GlobalDecl(const VarDecl *D) { Init(D);}
+ GlobalDecl(const FunctionDecl *D) { Init(D); }
+ GlobalDecl(const BlockDecl *D) { Init(D); }
+ GlobalDecl(const ObjCMethodDecl *D) { Init(D); }
+
+ GlobalDecl(const CXXConstructorDecl *D, CXXCtorType Type)
+ : Value(D, Type) {}
+ GlobalDecl(const CXXDestructorDecl *D, CXXDtorType Type)
+ : Value(D, Type) {}
+
+ GlobalDecl getCanonicalDecl() const {
+ GlobalDecl CanonGD;
+ CanonGD.Value.setPointer(Value.getPointer()->getCanonicalDecl());
+ CanonGD.Value.setInt(Value.getInt());
+
+ return CanonGD;
+ }
+
+ const Decl *getDecl() const { return Value.getPointer(); }
+
+ CXXCtorType getCtorType() const {
+ assert(isa<CXXConstructorDecl>(getDecl()) && "Decl is not a ctor!");
+ return static_cast<CXXCtorType>(Value.getInt());
+ }
+
+ CXXDtorType getDtorType() const {
+ assert(isa<CXXDestructorDecl>(getDecl()) && "Decl is not a dtor!");
+ return static_cast<CXXDtorType>(Value.getInt());
+ }
+
+ friend bool operator==(const GlobalDecl &LHS, const GlobalDecl &RHS) {
+ return LHS.Value == RHS.Value;
+ }
+
+ void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
+
+ static GlobalDecl getFromOpaquePtr(void *P) {
+ GlobalDecl GD;
+ GD.Value.setFromOpaqueValue(P);
+ return GD;
+ }
+
+ GlobalDecl getWithDecl(const Decl *D) {
+ GlobalDecl Result(*this);
+ Result.Value.setPointer(D);
+ return Result;
+ }
+};
+
+} // end namespace clang
+
+namespace llvm {
+ template<class> struct DenseMapInfo;
+
+ template<> struct DenseMapInfo<clang::GlobalDecl> {
+ static inline clang::GlobalDecl getEmptyKey() {
+ return clang::GlobalDecl();
+ }
+
+ static inline clang::GlobalDecl getTombstoneKey() {
+ return clang::GlobalDecl::
+ getFromOpaquePtr(reinterpret_cast<void*>(-1));
+ }
+
+ static unsigned getHashValue(clang::GlobalDecl GD) {
+ return DenseMapInfo<void*>::getHashValue(GD.getAsOpaquePtr());
+ }
+
+ static bool isEqual(clang::GlobalDecl LHS,
+ clang::GlobalDecl RHS) {
+ return LHS == RHS;
+ }
+
+ };
+
+ // GlobalDecl isn't *technically* a POD type. However, its copy constructor,
+ // copy assignment operator, and destructor are all trivial.
+ template <>
+ struct isPodLike<clang::GlobalDecl> {
+ static const bool value = true;
+ };
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h b/contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h
new file mode 100644
index 0000000..3e2fbad
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/LambdaMangleContext.h
@@ -0,0 +1,36 @@
+//===--- LambdaMangleContext.h - Context for mangling lambdas ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LambdaMangleContext interface, which keeps track of
+// the Itanium C++ ABI mangling numbers for lambda expressions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LAMBDAMANGLECONTEXT_H
+#define LLVM_CLANG_LAMBDAMANGLECONTEXT_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+
+class CXXMethodDecl;
+class FunctionProtoType;
+
+/// \brief Keeps track of the mangled names of lambda expressions within a
+/// particular context.
+class LambdaMangleContext {
+ llvm::DenseMap<const FunctionProtoType *, unsigned> ManglingNumbers;
+
+public:
+ /// \brief Retrieve the mangling number of a new lambda expression with the
+ /// given call operator within this lambda context.
+ unsigned getManglingNumber(CXXMethodDecl *CallOperator);
+};
+
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Mangle.h b/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
new file mode 100644
index 0000000..ca22ed6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/Mangle.h
@@ -0,0 +1,152 @@
+//===--- Mangle.h - Mangle C++ Names ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the C++ name mangling interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_MANGLE_H
+#define LLVM_CLANG_AST_MANGLE_H
+
+#include "clang/AST/Type.h"
+#include "clang/Basic/ABI.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+ class ASTContext;
+ class BlockDecl;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class FunctionDecl;
+ class NamedDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+ struct ThisAdjustment;
+ struct ThunkInfo;
+
+/// MangleBuffer - a convenient class for storing a name which is
+/// either the result of a mangling or is a constant string with
+/// external memory ownership.
+class MangleBuffer {
+public:
+ void setString(StringRef Ref) {
+ String = Ref;
+ }
+
+ SmallVectorImpl<char> &getBuffer() {
+ return Buffer;
+ }
+
+ StringRef getString() const {
+ if (!String.empty()) return String;
+ return Buffer.str();
+ }
+
+ operator StringRef() const {
+ return getString();
+ }
+
+private:
+ StringRef String;
+ SmallString<256> Buffer;
+};
+
+/// MangleContext - Context for tracking state which persists across multiple
+/// calls to the C++ name mangler.
+class MangleContext {
+ virtual void anchor();
+
+ ASTContext &Context;
+ DiagnosticsEngine &Diags;
+
+ llvm::DenseMap<const BlockDecl*, unsigned> GlobalBlockIds;
+ llvm::DenseMap<const BlockDecl*, unsigned> LocalBlockIds;
+
+public:
+ explicit MangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags)
+ : Context(Context), Diags(Diags) { }
+
+ virtual ~MangleContext() { }
+
+ ASTContext &getASTContext() const { return Context; }
+
+ DiagnosticsEngine &getDiags() const { return Diags; }
+
+ virtual void startNewFunction() { LocalBlockIds.clear(); }
+
+ unsigned getBlockId(const BlockDecl *BD, bool Local) {
+ llvm::DenseMap<const BlockDecl *, unsigned> &BlockIds
+ = Local? LocalBlockIds : GlobalBlockIds;
+ std::pair<llvm::DenseMap<const BlockDecl *, unsigned>::iterator, bool>
+ Result = BlockIds.insert(std::make_pair(BD, BlockIds.size()));
+ return Result.first->second;
+ }
+
+ /// @name Mangler Entry Points
+ /// @{
+
+ virtual bool shouldMangleDeclName(const NamedDecl *D) = 0;
+ virtual void mangleName(const NamedDecl *D, raw_ostream &)=0;
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &) = 0;
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &) = 0;
+ virtual void mangleReferenceTemporary(const VarDecl *D,
+ raw_ostream &) = 0;
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &) = 0;
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &) = 0;
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &) = 0;
+ virtual void mangleCXXRTTI(QualType T, raw_ostream &) = 0;
+ virtual void mangleCXXRTTIName(QualType T, raw_ostream &) = 0;
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ raw_ostream &) = 0;
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ raw_ostream &) = 0;
+
+ void mangleGlobalBlock(const BlockDecl *BD,
+ raw_ostream &Out);
+ void mangleCtorBlock(const CXXConstructorDecl *CD, CXXCtorType CT,
+ const BlockDecl *BD, raw_ostream &Out);
+ void mangleDtorBlock(const CXXDestructorDecl *CD, CXXDtorType DT,
+ const BlockDecl *BD, raw_ostream &Out);
+ void mangleBlock(const DeclContext *DC, const BlockDecl *BD,
+ raw_ostream &Out);
+ // Do the right thing.
+ void mangleBlock(const BlockDecl *BD, raw_ostream &Out);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD,
+ raw_ostream &);
+
+ // This is pretty lame.
+ virtual void mangleItaniumGuardVariable(const VarDecl *D,
+ raw_ostream &) {
+ llvm_unreachable("Target does not support mangling guard variables");
+ }
+ /// @}
+};
+
+MangleContext *createItaniumMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags);
+MangleContext *createMicrosoftMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags);
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h b/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
new file mode 100644
index 0000000..40e9759
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/NSAPI.h
@@ -0,0 +1,152 @@
+//===--- NSAPI.h - NSFoundation APIs ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_NSAPI_H
+#define LLVM_CLANG_AST_NSAPI_H
+
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/Optional.h"
+
+namespace clang {
+ class ASTContext;
+ class QualType;
+
+// \brief Provides info and caches identifiers/selectors for NSFoundation API.
+class NSAPI {
+public:
+ explicit NSAPI(ASTContext &Ctx);
+
+ ASTContext &getASTContext() const { return Ctx; }
+
+ enum NSClassIdKindKind {
+ ClassId_NSObject,
+ ClassId_NSString,
+ ClassId_NSArray,
+ ClassId_NSMutableArray,
+ ClassId_NSDictionary,
+ ClassId_NSMutableDictionary,
+ ClassId_NSNumber
+ };
+ static const unsigned NumClassIds = 7;
+
+ enum NSStringMethodKind {
+ NSStr_stringWithString,
+ NSStr_initWithString
+ };
+ static const unsigned NumNSStringMethods = 2;
+
+ IdentifierInfo *getNSClassId(NSClassIdKindKind K) const;
+
+ /// \brief The Objective-C NSString selectors.
+ Selector getNSStringSelector(NSStringMethodKind MK) const;
+
+ /// \brief Enumerates the NSArray methods used to generate literals.
+ enum NSArrayMethodKind {
+ NSArr_array,
+ NSArr_arrayWithArray,
+ NSArr_arrayWithObject,
+ NSArr_arrayWithObjects,
+ NSArr_arrayWithObjectsCount,
+ NSArr_initWithArray,
+ NSArr_initWithObjects,
+ NSArr_objectAtIndex,
+ NSMutableArr_replaceObjectAtIndex
+ };
+ static const unsigned NumNSArrayMethods = 9;
+
+ /// \brief The Objective-C NSArray selectors.
+ Selector getNSArraySelector(NSArrayMethodKind MK) const;
+
+ /// \brief Return NSArrayMethodKind if \arg Sel is such a selector.
+ llvm::Optional<NSArrayMethodKind> getNSArrayMethodKind(Selector Sel);
+
+ /// \brief Enumerates the NSDictionary methods used to generate literals.
+ enum NSDictionaryMethodKind {
+ NSDict_dictionary,
+ NSDict_dictionaryWithDictionary,
+ NSDict_dictionaryWithObjectForKey,
+ NSDict_dictionaryWithObjectsForKeys,
+ NSDict_dictionaryWithObjectsForKeysCount,
+ NSDict_dictionaryWithObjectsAndKeys,
+ NSDict_initWithDictionary,
+ NSDict_initWithObjectsAndKeys,
+ NSDict_objectForKey,
+ NSMutableDict_setObjectForKey
+ };
+ static const unsigned NumNSDictionaryMethods = 10;
+
+ /// \brief The Objective-C NSDictionary selectors.
+ Selector getNSDictionarySelector(NSDictionaryMethodKind MK) const;
+
+ /// \brief Return NSDictionaryMethodKind if \arg Sel is such a selector.
+ llvm::Optional<NSDictionaryMethodKind>
+ getNSDictionaryMethodKind(Selector Sel);
+
+ /// \brief Enumerates the NSNumber methods used to generate literals.
+ enum NSNumberLiteralMethodKind {
+ NSNumberWithChar,
+ NSNumberWithUnsignedChar,
+ NSNumberWithShort,
+ NSNumberWithUnsignedShort,
+ NSNumberWithInt,
+ NSNumberWithUnsignedInt,
+ NSNumberWithLong,
+ NSNumberWithUnsignedLong,
+ NSNumberWithLongLong,
+ NSNumberWithUnsignedLongLong,
+ NSNumberWithFloat,
+ NSNumberWithDouble,
+ NSNumberWithBool,
+ NSNumberWithInteger,
+ NSNumberWithUnsignedInteger
+ };
+ static const unsigned NumNSNumberLiteralMethods = 15;
+
+ /// \brief The Objective-C NSNumber selectors used to create NSNumber literals.
+ /// \param Instance if true it will return the selector for the init* method
+ /// otherwise it will return the selector for the number* method.
+ Selector getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
+ bool Instance) const;
+
+ bool isNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
+ Selector Sel) const {
+ return Sel == getNSNumberLiteralSelector(MK, false) ||
+ Sel == getNSNumberLiteralSelector(MK, true);
+ }
+
+ /// \brief Return NSNumberLiteralMethodKind if \arg Sel is such a selector.
+ llvm::Optional<NSNumberLiteralMethodKind>
+ getNSNumberLiteralMethodKind(Selector Sel) const;
+
+ /// \brief Determine the appropriate NSNumber factory method kind for a
+ /// literal of the given type.
+ static llvm::Optional<NSNumberLiteralMethodKind>
+ getNSNumberFactoryMethodKind(QualType T);
+
+private:
+ ASTContext &Ctx;
+
+ mutable IdentifierInfo *ClassIds[NumClassIds];
+
+ mutable Selector NSStringSelectors[NumNSStringMethods];
+
+ /// \brief The selectors for Objective-C NSArray methods.
+ mutable Selector NSArraySelectors[NumNSArrayMethods];
+
+ /// \brief The selectors for Objective-C NSDictionary methods.
+ mutable Selector NSDictionarySelectors[NumNSDictionaryMethods];
+
+ /// \brief The Objective-C NSNumber selectors used to create NSNumber literals.
+ mutable Selector NSNumberClassSelectors[NumNSNumberLiteralMethods];
+ mutable Selector NSNumberInstanceSelectors[NumNSNumberLiteralMethods];
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_NSAPI_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
new file mode 100644
index 0000000..b5bd824
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/NestedNameSpecifier.h
@@ -0,0 +1,481 @@
+//===--- NestedNameSpecifier.h - C++ nested name specifiers -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NestedNameSpecifier class, which represents
+// a C++ nested-name-specifier.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_NESTEDNAMESPECIFIER_H
+#define LLVM_CLANG_AST_NESTEDNAMESPECIFIER_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+
+class ASTContext;
+class NamespaceAliasDecl;
+class NamespaceDecl;
+class IdentifierInfo;
+struct PrintingPolicy;
+class Type;
+class TypeLoc;
+class LangOptions;
+
+/// \brief Represents a C++ nested name specifier, such as
+/// "::std::vector<int>::".
+///
+/// C++ nested name specifiers are the prefixes to qualified
+/// namespaces. For example, "foo::" in "foo::x" is a nested name
+/// specifier. Nested name specifiers are made up of a sequence of
+/// specifiers, each of which can be a namespace, type, identifier
+/// (for dependent names), decltype specifier, or the global specifier ('::').
+/// The last two specifiers can only appear at the start of a
+/// nested-namespace-specifier.
+class NestedNameSpecifier : public llvm::FoldingSetNode {
+
+ /// \brief Enumeration describing
+ enum StoredSpecifierKind {
+ StoredIdentifier = 0,
+ StoredNamespaceOrAlias = 1,
+ StoredTypeSpec = 2,
+ StoredTypeSpecWithTemplate = 3
+ };
+
+ /// \brief The nested name specifier that precedes this nested name
+ /// specifier.
+ ///
+ /// The pointer is the nested-name-specifier that precedes this
+ /// one. The integer stores one of the first four values of type
+ /// SpecifierKind.
+ llvm::PointerIntPair<NestedNameSpecifier *, 2, StoredSpecifierKind> Prefix;
+
+ /// \brief The last component in the nested name specifier, which
+ /// can be an identifier, a declaration, or a type.
+ ///
+ /// When the pointer is NULL, this specifier represents the global
+ /// specifier '::'. Otherwise, the pointer is one of
+ /// IdentifierInfo*, Namespace*, or Type*, depending on the kind of
+ /// specifier as encoded within the prefix.
+ void* Specifier;
+
+public:
+ /// \brief The kind of specifier that completes this nested name
+ /// specifier.
+ enum SpecifierKind {
+ /// \brief An identifier, stored as an IdentifierInfo*.
+ Identifier,
+ /// \brief A namespace, stored as a NamespaceDecl*.
+ Namespace,
+ /// \brief A namespace alias, stored as a NamespaceAliasDecl*.
+ NamespaceAlias,
+ /// \brief A type, stored as a Type*.
+ TypeSpec,
+ /// \brief A type that was preceded by the 'template' keyword,
+ /// stored as a Type*.
+ TypeSpecWithTemplate,
+ /// \brief The global specifier '::'. There is no stored value.
+ Global
+ };
+
+private:
+ /// \brief Builds the global specifier.
+ NestedNameSpecifier() : Prefix(0, StoredIdentifier), Specifier(0) { }
+
+ /// \brief Copy constructor used internally to clone nested name
+ /// specifiers.
+ NestedNameSpecifier(const NestedNameSpecifier &Other)
+ : llvm::FoldingSetNode(Other), Prefix(Other.Prefix),
+ Specifier(Other.Specifier) {
+ }
+
+ NestedNameSpecifier &operator=(const NestedNameSpecifier &); // do not
+ // implement
+
+ /// \brief Either find or insert the given nested name specifier
+ /// mockup in the given context.
+ static NestedNameSpecifier *FindOrInsert(const ASTContext &Context,
+ const NestedNameSpecifier &Mockup);
+
+public:
+ /// \brief Builds a specifier combining a prefix and an identifier.
+ ///
+ /// The prefix must be dependent, since nested name specifiers
+ /// referencing an identifier are only permitted when the identifier
+ /// cannot be resolved.
+ static NestedNameSpecifier *Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ IdentifierInfo *II);
+
+ /// \brief Builds a nested name specifier that names a namespace.
+ static NestedNameSpecifier *Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ NamespaceDecl *NS);
+
+ /// \brief Builds a nested name specifier that names a namespace alias.
+ static NestedNameSpecifier *Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ NamespaceAliasDecl *Alias);
+
+ /// \brief Builds a nested name specifier that names a type.
+ static NestedNameSpecifier *Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ bool Template, const Type *T);
+
+ /// \brief Builds a specifier that consists of just an identifier.
+ ///
+ /// The nested-name-specifier is assumed to be dependent, but has no
+ /// prefix because the prefix is implied by something outside of the
+ /// nested name specifier, e.g., in "x->Base::f", the "x" has a dependent
+ /// type.
+ static NestedNameSpecifier *Create(const ASTContext &Context,
+ IdentifierInfo *II);
+
+ /// \brief Returns the nested name specifier representing the global
+ /// scope.
+ static NestedNameSpecifier *GlobalSpecifier(const ASTContext &Context);
+
+ /// \brief Return the prefix of this nested name specifier.
+ ///
+ /// The prefix contains all of the parts of the nested name
+ /// specifier that preced this current specifier. For example, for a
+ /// nested name specifier that represents "foo::bar::", the current
+ /// specifier will contain "bar::" and the prefix will contain
+ /// "foo::".
+ NestedNameSpecifier *getPrefix() const { return Prefix.getPointer(); }
+
+ /// \brief Determine what kind of nested name specifier is stored.
+ SpecifierKind getKind() const;
+
+ /// \brief Retrieve the identifier stored in this nested name
+ /// specifier.
+ IdentifierInfo *getAsIdentifier() const {
+ if (Prefix.getInt() == StoredIdentifier)
+ return (IdentifierInfo *)Specifier;
+
+ return 0;
+ }
+
+ /// \brief Retrieve the namespace stored in this nested name
+ /// specifier.
+ NamespaceDecl *getAsNamespace() const;
+
+ /// \brief Retrieve the namespace alias stored in this nested name
+ /// specifier.
+ NamespaceAliasDecl *getAsNamespaceAlias() const;
+
+ /// \brief Retrieve the type stored in this nested name specifier.
+ const Type *getAsType() const {
+ if (Prefix.getInt() == StoredTypeSpec ||
+ Prefix.getInt() == StoredTypeSpecWithTemplate)
+ return (const Type *)Specifier;
+
+ return 0;
+ }
+
+ /// \brief Whether this nested name specifier refers to a dependent
+ /// type or not.
+ bool isDependent() const;
+
+ /// \brief Whether this nested name specifier involves a template
+ /// parameter.
+ bool isInstantiationDependent() const;
+
+ /// \brief Whether this nested-name-specifier contains an unexpanded
+ /// parameter pack (for C++0x variadic templates).
+ bool containsUnexpandedParameterPack() const;
+
+ /// \brief Print this nested name specifier to the given output
+ /// stream.
+ void print(raw_ostream &OS, const PrintingPolicy &Policy) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(Prefix.getOpaqueValue());
+ ID.AddPointer(Specifier);
+ }
+
+ /// \brief Dump the nested name specifier to standard output to aid
+ /// in debugging.
+ void dump(const LangOptions &LO);
+};
+
+/// \brief A C++ nested-name-specifier augmented with source location
+/// information.
+class NestedNameSpecifierLoc {
+ NestedNameSpecifier *Qualifier;
+ void *Data;
+
+ /// \brief Determines the data length for the last component in the
+ /// given nested-name-specifier.
+ static unsigned getLocalDataLength(NestedNameSpecifier *Qualifier);
+
+ /// \brief Determines the data length for the entire
+ /// nested-name-specifier.
+ static unsigned getDataLength(NestedNameSpecifier *Qualifier);
+
+public:
+ /// \brief Construct an empty nested-name-specifier.
+ NestedNameSpecifierLoc() : Qualifier(0), Data(0) { }
+
+ /// \brief Construct a nested-name-specifier with source location information
+ /// from
+ NestedNameSpecifierLoc(NestedNameSpecifier *Qualifier, void *Data)
+ : Qualifier(Qualifier), Data(Data) { }
+
+ /// \brief Evalutes true when this nested-name-specifier location is
+ /// non-empty.
+ operator bool() const { return Qualifier; }
+
+ /// \brief Retrieve the nested-name-specifier to which this instance
+ /// refers.
+ NestedNameSpecifier *getNestedNameSpecifier() const {
+ return Qualifier;
+ }
+
+ /// \brief Retrieve the opaque pointer that refers to source-location data.
+ void *getOpaqueData() const { return Data; }
+
+ /// \brief Retrieve the source range covering the entirety of this
+ /// nested-name-specifier.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c ::std::vector<int>::, the returned source range would cover
+ /// from the initial '::' to the last '::'.
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ /// \brief Retrieve the source range covering just the last part of
+ /// this nested-name-specifier, not including the prefix.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c ::std::vector<int>::, the returned source range would cover
+ /// from "vector" to the last '::'.
+ SourceRange getLocalSourceRange() const;
+
+ /// \brief Retrieve the location of the beginning of this
+ /// nested-name-specifier.
+ SourceLocation getBeginLoc() const {
+ return getSourceRange().getBegin();
+ }
+
+ /// \brief Retrieve the location of the end of this
+ /// nested-name-specifier.
+ SourceLocation getEndLoc() const {
+ return getSourceRange().getEnd();
+ }
+
+ /// \brief Retrieve the location of the beginning of this
+ /// component of the nested-name-specifier.
+ SourceLocation getLocalBeginLoc() const {
+ return getLocalSourceRange().getBegin();
+ }
+
+ /// \brief Retrieve the location of the end of this component of the
+ /// nested-name-specifier.
+ SourceLocation getLocalEndLoc() const {
+ return getLocalSourceRange().getEnd();
+ }
+
+ /// \brief Return the prefix of this nested-name-specifier.
+ ///
+ /// For example, if this instance refers to a nested-name-specifier
+ /// \c ::std::vector<int>::, the prefix is \c ::std::. Note that the
+ /// returned prefix may be empty, if this is the first component of
+ /// the nested-name-specifier.
+ NestedNameSpecifierLoc getPrefix() const {
+ if (!Qualifier)
+ return *this;
+
+ return NestedNameSpecifierLoc(Qualifier->getPrefix(), Data);
+ }
+
+ /// \brief For a nested-name-specifier that refers to a type,
+ /// retrieve the type with source-location information.
+ TypeLoc getTypeLoc() const;
+
+ /// \brief Determines the data length for the entire
+ /// nested-name-specifier.
+ unsigned getDataLength() const { return getDataLength(Qualifier); }
+
+ friend bool operator==(NestedNameSpecifierLoc X,
+ NestedNameSpecifierLoc Y) {
+ return X.Qualifier == Y.Qualifier && X.Data == Y.Data;
+ }
+
+ friend bool operator!=(NestedNameSpecifierLoc X,
+ NestedNameSpecifierLoc Y) {
+ return !(X == Y);
+ }
+};
+
+/// \brief Class that aids in the construction of nested-name-specifiers along
+/// with source-location information for all of the components of the
+/// nested-name-specifier.
+class NestedNameSpecifierLocBuilder {
+ /// \brief The current representation of the nested-name-specifier we're
+ /// building.
+ NestedNameSpecifier *Representation;
+
+ /// \brief Buffer used to store source-location information for the
+ /// nested-name-specifier.
+ ///
+ /// Note that we explicitly manage the buffer (rather than using a
+ /// SmallVector) because \c Declarator expects it to be possible to memcpy()
+ /// a \c CXXScopeSpec, and CXXScopeSpec uses a NestedNameSpecifierLocBuilder.
+ char *Buffer;
+
+ /// \brief The size of the buffer used to store source-location information
+ /// for the nested-name-specifier.
+ unsigned BufferSize;
+
+ /// \brief The capacity of the buffer used to store source-location
+ /// information for the nested-name-specifier.
+ unsigned BufferCapacity;
+
+public:
+ NestedNameSpecifierLocBuilder()
+ : Representation(0), Buffer(0), BufferSize(0), BufferCapacity(0) { }
+
+ NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other);
+
+ NestedNameSpecifierLocBuilder &
+ operator=(const NestedNameSpecifierLocBuilder &Other);
+
+ ~NestedNameSpecifierLocBuilder() {
+ if (BufferCapacity)
+ free(Buffer);
+ }
+
+ /// \brief Retrieve the representation of the nested-name-specifier.
+ NestedNameSpecifier *getRepresentation() const { return Representation; }
+
+ /// \brief Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'type::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param TemplateKWLoc The location of the 'template' keyword, if present.
+ ///
+ /// \param TL The TypeLoc that describes the type preceding the '::'.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, SourceLocation TemplateKWLoc, TypeLoc TL,
+ SourceLocation ColonColonLoc);
+
+ /// \brief Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'identifier::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param Identifier The identifier.
+ ///
+ /// \param IdentifierLoc The location of the identifier.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, IdentifierInfo *Identifier,
+ SourceLocation IdentifierLoc, SourceLocation ColonColonLoc);
+
+ /// \brief Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'namespace::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param Namespace The namespace.
+ ///
+ /// \param NamespaceLoc The location of the namespace name.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, NamespaceDecl *Namespace,
+ SourceLocation NamespaceLoc, SourceLocation ColonColonLoc);
+
+ /// \brief Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'namespace-alias::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param Alias The namespace alias.
+ ///
+ /// \param AliasLoc The location of the namespace alias
+ /// name.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, NamespaceAliasDecl *Alias,
+ SourceLocation AliasLoc, SourceLocation ColonColonLoc);
+
+ /// \brief Turn this (empty) nested-name-specifier into the global
+ /// nested-name-specifier '::'.
+ void MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc);
+
+ /// \brief Make a new nested-name-specifier from incomplete source-location
+ /// information.
+ ///
+ /// This routine should be used very, very rarely, in cases where we
+ /// need to synthesize a nested-name-specifier. Most code should instead use
+ /// \c Adopt() with a proper \c NestedNameSpecifierLoc.
+ void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier,
+ SourceRange R);
+
+ /// \brief Adopt an existing nested-name-specifier (with source-range
+ /// information).
+ void Adopt(NestedNameSpecifierLoc Other);
+
+ /// \brief Retrieve the source range covered by this nested-name-specifier.
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return NestedNameSpecifierLoc(Representation, Buffer).getSourceRange();
+ }
+
+ /// \brief Retrieve a nested-name-specifier with location information,
+ /// copied into the given AST context.
+ ///
+ /// \param Context The context into which this nested-name-specifier will be
+ /// copied.
+ NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const;
+
+ /// \brief Retrieve a nested-name-specifier with location
+ /// information based on the information in this builder. This loc
+ /// will contain references to the builder's internal data and may
+ /// be invalidated by any change to the builder.
+ NestedNameSpecifierLoc getTemporary() const {
+ return NestedNameSpecifierLoc(Representation, Buffer);
+ }
+
+ /// \brief Clear out this builder, and prepare it to build another
+ /// nested-name-specifier with source-location information.
+ void Clear() {
+ Representation = 0;
+ BufferSize = 0;
+ }
+
+ /// \brief Retrieve the underlying buffer.
+ ///
+ /// \returns A pair containing a pointer to the buffer of source-location
+ /// data and the size of the source-location data that resides in that
+ /// buffer.
+ std::pair<char *, unsigned> getBuffer() const {
+ return std::make_pair(Buffer, BufferSize);
+ }
+};
+
+/// Insertion operator for diagnostics. This allows sending
+/// NestedNameSpecifiers into a diagnostic with <<.
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ NestedNameSpecifier *NNS) {
+ DB.AddTaggedVal(reinterpret_cast<intptr_t>(NNS),
+ DiagnosticsEngine::ak_nestednamespec);
+ return DB;
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
new file mode 100644
index 0000000..258637d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/OperationKinds.h
@@ -0,0 +1,345 @@
+//===- OperationKinds.h - Operation enums -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file enumerates the different kinds of operations that can be
+// performed by various expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_OPERATION_KINDS_H
+#define LLVM_CLANG_AST_OPERATION_KINDS_H
+
+namespace clang {
+
+/// CastKind - The kind of operation required for a conversion.
+enum CastKind {
+ /// CK_Dependent - A conversion which cannot yet be analyzed because
+ /// either the expression or target type is dependent. These are
+ /// created only for explicit casts; dependent ASTs aren't required
+ /// to even approximately type-check.
+ /// (T*) malloc(sizeof(T))
+ /// reinterpret_cast<intptr_t>(A<T>::alloc());
+ CK_Dependent,
+
+ /// CK_BitCast - A conversion which causes a bit pattern of one type
+ /// to be reinterpreted as a bit pattern of another type. Generally
+ /// the operands must have equivalent size and unrelated types.
+ ///
+ /// The pointer conversion char* -> int* is a bitcast. A conversion
+ /// from any pointer type to a C pointer type is a bitcast unless
+ /// it's actually BaseToDerived or DerivedToBase. A conversion to a
+ /// block pointer or ObjC pointer type is a bitcast only if the
+ /// operand has the same type kind; otherwise, it's one of the
+ /// specialized casts below.
+ ///
+ /// Vector coercions are bitcasts.
+ CK_BitCast,
+
+ /// CK_LValueBitCast - A conversion which reinterprets the address of
+ /// an l-value as an l-value of a different kind. Used for
+ /// reinterpret_casts of l-value expressions to reference types.
+ /// bool b; reinterpret_cast<char&>(b) = 'a';
+ CK_LValueBitCast,
+
+ /// CK_LValueToRValue - A conversion which causes the extraction of
+ /// an r-value from the operand gl-value. The result of an r-value
+ /// conversion is always unqualified.
+ CK_LValueToRValue,
+
+ /// CK_NoOp - A conversion which does not affect the type other than
+ /// (possibly) adding qualifiers.
+ /// int -> int
+ /// char** -> const char * const *
+ CK_NoOp,
+
+ /// CK_BaseToDerived - A conversion from a C++ class pointer/reference
+ /// to a derived class pointer/reference.
+ /// B *b = static_cast<B*>(a);
+ CK_BaseToDerived,
+
+ /// CK_DerivedToBase - A conversion from a C++ class pointer
+ /// to a base class pointer.
+ /// A *a = new B();
+ CK_DerivedToBase,
+
+ /// CK_UncheckedDerivedToBase - A conversion from a C++ class
+ /// pointer/reference to a base class that can assume that the
+ /// derived pointer is not null.
+ /// const A &a = B();
+ /// b->method_from_a();
+ CK_UncheckedDerivedToBase,
+
+ /// CK_Dynamic - A C++ dynamic_cast.
+ CK_Dynamic,
+
+ /// CK_ToUnion - The GCC cast-to-union extension.
+ /// int -> union { int x; float y; }
+ /// float -> union { int x; float y; }
+ CK_ToUnion,
+
+ /// CK_ArrayToPointerDecay - Array to pointer decay.
+ /// int[10] -> int*
+ /// char[5][6] -> char(*)[6]
+ CK_ArrayToPointerDecay,
+
+ /// CK_FunctionToPointerDecay - Function to pointer decay.
+ /// void(int) -> void(*)(int)
+ CK_FunctionToPointerDecay,
+
+ /// CK_NullToPointer - Null pointer constant to pointer, ObjC
+ /// pointer, or block pointer.
+ /// (void*) 0
+ /// void (^block)() = 0;
+ CK_NullToPointer,
+
+ /// CK_NullToMemberPointer - Null pointer constant to member pointer.
+ /// int A::*mptr = 0;
+ /// int (A::*fptr)(int) = nullptr;
+ CK_NullToMemberPointer,
+
+ /// CK_BaseToDerivedMemberPointer - Member pointer in base class to
+ /// member pointer in derived class.
+ /// int B::*mptr = &A::member;
+ CK_BaseToDerivedMemberPointer,
+
+ /// CK_DerivedToBaseMemberPointer - Member pointer in derived class to
+ /// member pointer in base class.
+ /// int A::*mptr = static_cast<int A::*>(&B::member);
+ CK_DerivedToBaseMemberPointer,
+
+ /// CK_MemberPointerToBoolean - Member pointer to boolean. A check
+ /// against the null member pointer.
+ CK_MemberPointerToBoolean,
+
+ /// CK_ReinterpretMemberPointer - Reinterpret a member pointer as a
+ /// different kind of member pointer. C++ forbids this from
+ /// crossing between function and object types, but otherwise does
+ /// not restrict it. However, the only operation that is permitted
+ /// on a "punned" member pointer is casting it back to the original
+ /// type, which is required to be a lossless operation (although
+ /// many ABIs do not guarantee this on all possible intermediate types).
+ CK_ReinterpretMemberPointer,
+
+ /// CK_UserDefinedConversion - Conversion using a user defined type
+ /// conversion function.
+ /// struct A { operator int(); }; int i = int(A());
+ CK_UserDefinedConversion,
+
+ /// CK_ConstructorConversion - Conversion by constructor.
+ /// struct A { A(int); }; A a = A(10);
+ CK_ConstructorConversion,
+
+ /// CK_IntegralToPointer - Integral to pointer. A special kind of
+ /// reinterpreting conversion. Applies to normal, ObjC, and block
+ /// pointers.
+ /// (char*) 0x1001aab0
+ /// reinterpret_cast<int*>(0)
+ CK_IntegralToPointer,
+
+ /// CK_PointerToIntegral - Pointer to integral. A special kind of
+ /// reinterpreting conversion. Applies to normal, ObjC, and block
+ /// pointers.
+ /// (intptr_t) "help!"
+ CK_PointerToIntegral,
+
+ /// CK_PointerToBoolean - Pointer to boolean conversion. A check
+ /// against null. Applies to normal, ObjC, and block pointers.
+ CK_PointerToBoolean,
+
+ /// CK_ToVoid - Cast to void, discarding the computed value.
+ /// (void) malloc(2048)
+ CK_ToVoid,
+
+ /// CK_VectorSplat - A conversion from an arithmetic type to a
+ /// vector of that element type. Fills all elements ("splats") with
+ /// the source value.
+ /// __attribute__((ext_vector_type(4))) int v = 5;
+ CK_VectorSplat,
+
+ /// CK_IntegralCast - A cast between integral types (other than to
+ /// boolean). Variously a bitcast, a truncation, a sign-extension,
+ /// or a zero-extension.
+ /// long l = 5;
+ /// (unsigned) i
+ CK_IntegralCast,
+
+ /// CK_IntegralToBoolean - Integral to boolean. A check against zero.
+ /// (bool) i
+ CK_IntegralToBoolean,
+
+ /// CK_IntegralToFloating - Integral to floating point.
+ /// float f = i;
+ CK_IntegralToFloating,
+
+ /// CK_FloatingToIntegral - Floating point to integral. Rounds
+ /// towards zero, discarding any fractional component.
+ /// (int) f
+ CK_FloatingToIntegral,
+
+ /// CK_FloatingToBoolean - Floating point to boolean.
+ /// (bool) f
+ CK_FloatingToBoolean,
+
+ /// CK_FloatingCast - Casting between floating types of different size.
+ /// (double) f
+ /// (float) ld
+ CK_FloatingCast,
+
+ /// CK_CPointerToObjCPointerCast - Casting a C pointer kind to an
+ /// Objective-C pointer.
+ CK_CPointerToObjCPointerCast,
+
+ /// CK_BlockPointerToObjCPointerCast - Casting a block pointer to an
+ /// ObjC pointer.
+ CK_BlockPointerToObjCPointerCast,
+
+ /// CK_AnyPointerToBlockPointerCast - Casting any non-block pointer
+ /// to a block pointer. Block-to-block casts are bitcasts.
+ CK_AnyPointerToBlockPointerCast,
+
+ /// \brief Converting between two Objective-C object types, which
+ /// can occur when performing reference binding to an Objective-C
+ /// object.
+ CK_ObjCObjectLValueCast,
+
+ /// \brief A conversion of a floating point real to a floating point
+ /// complex of the original type. Injects the value as the real
+ /// component with a zero imaginary component.
+ /// float -> _Complex float
+ CK_FloatingRealToComplex,
+
+ /// \brief Converts a floating point complex to floating point real
+ /// of the source's element type. Just discards the imaginary
+ /// component.
+ /// _Complex long double -> long double
+ CK_FloatingComplexToReal,
+
+ /// \brief Converts a floating point complex to bool by comparing
+ /// against 0+0i.
+ CK_FloatingComplexToBoolean,
+
+ /// \brief Converts between different floating point complex types.
+ /// _Complex float -> _Complex double
+ CK_FloatingComplexCast,
+
+ /// \brief Converts from a floating complex to an integral complex.
+ /// _Complex float -> _Complex int
+ CK_FloatingComplexToIntegralComplex,
+
+ /// \brief Converts from an integral real to an integral complex
+ /// whose element type matches the source. Injects the value as
+ /// the real component with a zero imaginary component.
+ /// long -> _Complex long
+ CK_IntegralRealToComplex,
+
+ /// \brief Converts an integral complex to an integral real of the
+ /// source's element type by discarding the imaginary component.
+ /// _Complex short -> short
+ CK_IntegralComplexToReal,
+
+ /// \brief Converts an integral complex to bool by comparing against
+ /// 0+0i.
+ CK_IntegralComplexToBoolean,
+
+ /// \brief Converts between different integral complex types.
+ /// _Complex char -> _Complex long long
+ /// _Complex unsigned int -> _Complex signed int
+ CK_IntegralComplexCast,
+
+ /// \brief Converts from an integral complex to a floating complex.
+ /// _Complex unsigned -> _Complex float
+ CK_IntegralComplexToFloatingComplex,
+
+ /// \brief [ARC] Produces a retainable object pointer so that it may
+ /// be consumed, e.g. by being passed to a consuming parameter.
+ /// Calls objc_retain.
+ CK_ARCProduceObject,
+
+ /// \brief [ARC] Consumes a retainable object pointer that has just
+ /// been produced, e.g. as the return value of a retaining call.
+ /// Enters a cleanup to call objc_release at some indefinite time.
+ CK_ARCConsumeObject,
+
+ /// \brief [ARC] Reclaim a retainable object pointer object that may
+ /// have been produced and autoreleased as part of a function return
+ /// sequence.
+ CK_ARCReclaimReturnedObject,
+
+ /// \brief [ARC] Causes a value of block type to be copied to the
+ /// heap, if it is not already there. A number of other operations
+ /// in ARC cause blocks to be copied; this is for cases where that
+ /// would not otherwise be guaranteed, such as when casting to a
+ /// non-block pointer type.
+ CK_ARCExtendBlockObject,
+
+ /// \brief Converts from _Atomic(T) to T.
+ CK_AtomicToNonAtomic,
+ /// \brief Converts from T to _Atomic(T).
+ CK_NonAtomicToAtomic,
+
+ /// \brief Causes a block literal to by copied to the heap and then
+ /// autoreleased.
+ ///
+ /// This particular cast kind is used for the conversion from a C++11
+ /// lambda expression to a block pointer.
+ CK_CopyAndAutoreleaseBlockObject
+};
+
+#define CK_Invalid ((CastKind) -1)
+
+enum BinaryOperatorKind {
+ // Operators listed in order of precedence.
+ // Note that additions to this should also update the StmtVisitor class.
+ BO_PtrMemD, BO_PtrMemI, // [C++ 5.5] Pointer-to-member operators.
+ BO_Mul, BO_Div, BO_Rem, // [C99 6.5.5] Multiplicative operators.
+ BO_Add, BO_Sub, // [C99 6.5.6] Additive operators.
+ BO_Shl, BO_Shr, // [C99 6.5.7] Bitwise shift operators.
+ BO_LT, BO_GT, BO_LE, BO_GE, // [C99 6.5.8] Relational operators.
+ BO_EQ, BO_NE, // [C99 6.5.9] Equality operators.
+ BO_And, // [C99 6.5.10] Bitwise AND operator.
+ BO_Xor, // [C99 6.5.11] Bitwise XOR operator.
+ BO_Or, // [C99 6.5.12] Bitwise OR operator.
+ BO_LAnd, // [C99 6.5.13] Logical AND operator.
+ BO_LOr, // [C99 6.5.14] Logical OR operator.
+ BO_Assign, BO_MulAssign, // [C99 6.5.16] Assignment operators.
+ BO_DivAssign, BO_RemAssign,
+ BO_AddAssign, BO_SubAssign,
+ BO_ShlAssign, BO_ShrAssign,
+ BO_AndAssign, BO_XorAssign,
+ BO_OrAssign,
+ BO_Comma // [C99 6.5.17] Comma operator.
+};
+
+enum UnaryOperatorKind {
+ // Note that additions to this should also update the StmtVisitor class.
+ UO_PostInc, UO_PostDec, // [C99 6.5.2.4] Postfix increment and decrement
+ UO_PreInc, UO_PreDec, // [C99 6.5.3.1] Prefix increment and decrement
+ UO_AddrOf, UO_Deref, // [C99 6.5.3.2] Address and indirection
+ UO_Plus, UO_Minus, // [C99 6.5.3.3] Unary arithmetic
+ UO_Not, UO_LNot, // [C99 6.5.3.3] Unary arithmetic
+ UO_Real, UO_Imag, // "__real expr"/"__imag expr" Extension.
+ UO_Extension // __extension__ marker.
+};
+
+/// \brief The kind of bridging performed by the Objective-C bridge cast.
+enum ObjCBridgeCastKind {
+ /// \brief Bridging via __bridge, which does nothing but reinterpret
+ /// the bits.
+ OBC_Bridge,
+ /// \brief Bridging via __bridge_transfer, which transfers ownership of an
+ /// Objective-C pointer into ARC.
+ OBC_BridgeTransfer,
+ /// \brief Bridging via __bridge_retain, which makes an ARC object available
+ /// as a +1 C pointer.
+ OBC_BridgeRetained
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/ParentMap.h b/contrib/llvm/tools/clang/include/clang/AST/ParentMap.h
new file mode 100644
index 0000000..62eae02
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/ParentMap.h
@@ -0,0 +1,62 @@
+//===--- ParentMap.h - Mappings from Stmts to their Parents -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ParentMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PARENTMAP_H
+#define LLVM_CLANG_PARENTMAP_H
+
+namespace clang {
+class Stmt;
+class Expr;
+
+class ParentMap {
+ void* Impl;
+public:
+ ParentMap(Stmt* ASTRoot);
+ ~ParentMap();
+
+ /// \brief Adds and/or updates the parent/child-relations of the complete
+ /// stmt tree of S. All children of S including indirect descendants are
+ /// visited and updated or inserted but not the parents of S.
+ void addStmt(Stmt* S);
+
+ Stmt *getParent(Stmt*) const;
+ Stmt *getParentIgnoreParens(Stmt *) const;
+ Stmt *getParentIgnoreParenCasts(Stmt *) const;
+ Stmt *getParentIgnoreParenImpCasts(Stmt *) const;
+ Stmt *getOuterParenParent(Stmt *) const;
+
+ const Stmt *getParent(const Stmt* S) const {
+ return getParent(const_cast<Stmt*>(S));
+ }
+
+ const Stmt *getParentIgnoreParens(const Stmt *S) const {
+ return getParentIgnoreParens(const_cast<Stmt*>(S));
+ }
+
+ const Stmt *getParentIgnoreParenCasts(const Stmt *S) const {
+ return getParentIgnoreParenCasts(const_cast<Stmt*>(S));
+ }
+
+ bool hasParent(Stmt* S) const {
+ return getParent(S) != 0;
+ }
+
+ bool isConsumedExpr(Expr *E) const;
+
+ bool isConsumedExpr(const Expr *E) const {
+ return isConsumedExpr(const_cast<Expr*>(E));
+ }
+};
+
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
new file mode 100644
index 0000000..2e34dc8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/PrettyPrinter.h
@@ -0,0 +1,146 @@
+//===--- PrettyPrinter.h - Classes for aiding with AST printing -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PrinterHelper interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_PRETTY_PRINTER_H
+#define LLVM_CLANG_AST_PRETTY_PRINTER_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+
+class Stmt;
+class TagDecl;
+class LangOptions;
+
+class PrinterHelper {
+public:
+ virtual ~PrinterHelper();
+ virtual bool handledStmt(Stmt* E, raw_ostream& OS) = 0;
+};
+
+/// \brief Describes how types, statements, expressions, and
+/// declarations should be printed.
+struct PrintingPolicy {
+ /// \brief Create a default printing policy for C.
+ PrintingPolicy(const LangOptions &LO)
+ : Indentation(2), LangOpts(LO), SuppressSpecifiers(false),
+ SuppressTagKeyword(false), SuppressTag(false), SuppressScope(false),
+ SuppressUnwrittenScope(false), SuppressInitializers(false),
+ Dump(false), ConstantArraySizeAsWritten(false),
+ AnonymousTagLocations(true), SuppressStrongLifetime(false),
+ Bool(LO.Bool) { }
+
+ /// \brief The number of spaces to use to indent each line.
+ unsigned Indentation : 8;
+
+ /// \brief What language we're printing.
+ LangOptions LangOpts;
+
+ /// \brief Whether we should suppress printing of the actual specifiers for
+ /// the given type or declaration.
+ ///
+ /// This flag is only used when we are printing declarators beyond
+ /// the first declarator within a declaration group. For example, given:
+ ///
+ /// \code
+ /// const int *x, *y;
+ /// \endcode
+ ///
+ /// SuppressSpecifiers will be false when printing the
+ /// declaration for "x", so that we will print "int *x"; it will be
+ /// \c true when we print "y", so that we suppress printing the
+ /// "const int" type specifier and instead only print the "*y".
+ bool SuppressSpecifiers : 1;
+
+ /// \brief Whether type printing should skip printing the tag keyword.
+ ///
+ /// This is used when printing the inner type of elaborated types,
+ /// (as the tag keyword is part of the elaborated type):
+ ///
+ /// \code
+ /// struct Geometry::Point;
+ /// \endcode
+ bool SuppressTagKeyword : 1;
+
+ /// \brief Whether type printing should skip printing the actual tag type.
+ ///
+ /// This is used when the caller needs to print a tag definition in front
+ /// of the type, as in constructs like the following:
+ ///
+ /// \code
+ /// typedef struct { int x, y; } Point;
+ /// \endcode
+ bool SuppressTag : 1;
+
+ /// \brief Suppresses printing of scope specifiers.
+ bool SuppressScope : 1;
+
+ /// \brief Suppress printing parts of scope specifiers that don't need
+ /// to be written, e.g., for inline or anonymous namespaces.
+ bool SuppressUnwrittenScope : 1;
+
+ /// \brief Suppress printing of variable initializers.
+ ///
+ /// This flag is used when printing the loop variable in a for-range
+ /// statement. For example, given:
+ ///
+ /// \code
+ /// for (auto x : coll)
+ /// \endcode
+ ///
+ /// SuppressInitializers will be true when printing "auto x", so that the
+ /// internal initializer constructed for x will not be printed.
+ bool SuppressInitializers : 1;
+
+ /// \brief True when we are "dumping" rather than "pretty-printing",
+ /// where dumping involves printing the internal details of the AST
+ /// and pretty-printing involves printing something similar to
+ /// source code.
+ bool Dump : 1;
+
+ /// \brief Whether we should print the sizes of constant array expressions
+ /// as written in the sources.
+ ///
+ /// This flag is determines whether arrays types declared as
+ ///
+ /// \code
+ /// int a[4+10*10];
+ /// char a[] = "A string";
+ /// \endcode
+ ///
+ /// will be printed as written or as follows:
+ ///
+ /// \code
+ /// int a[104];
+ /// char a[9] = "A string";
+ /// \endcode
+ bool ConstantArraySizeAsWritten : 1;
+
+ /// \brief When printing an anonymous tag name, also print the location of
+ /// that entity (e.g., "enum <anonymous at t.h:10:5>"). Otherwise, just
+ /// prints "<anonymous>" for the name.
+ bool AnonymousTagLocations : 1;
+
+ /// \brief When true, suppress printing of the __strong lifetime qualifier in
+ /// ARC.
+ unsigned SuppressStrongLifetime : 1;
+
+ /// \brief Whether we can use 'bool' rather than '_Bool', even if the language
+ /// doesn't actually have 'bool' (because, e.g., it is defined as a macro).
+ unsigned Bool : 1;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
new file mode 100644
index 0000000..ec07267
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecordLayout.h
@@ -0,0 +1,228 @@
+//===--- RecordLayout.h - Layout information for a struct/union -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RecordLayout interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_LAYOUTINFO_H
+#define LLVM_CLANG_AST_LAYOUTINFO_H
+
+#include "llvm/Support/DataTypes.h"
+#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+
+namespace clang {
+ class ASTContext;
+ class FieldDecl;
+ class RecordDecl;
+ class CXXRecordDecl;
+
+/// ASTRecordLayout -
+/// This class contains layout information for one RecordDecl,
+/// which is a struct/union/class. The decl represented must be a definition,
+/// not a forward declaration.
+/// This class is also used to contain layout information for one
+/// ObjCInterfaceDecl. FIXME - Find appropriate name.
+/// These objects are managed by ASTContext.
+class ASTRecordLayout {
+ /// Size - Size of record in characters.
+ CharUnits Size;
+
+ /// DataSize - Size of record in characters without tail padding.
+ CharUnits DataSize;
+
+ /// FieldOffsets - Array of field offsets in bits.
+ uint64_t *FieldOffsets;
+
+ // Alignment - Alignment of record in characters.
+ CharUnits Alignment;
+
+ // FieldCount - Number of fields.
+ unsigned FieldCount;
+
+ /// CXXRecordLayoutInfo - Contains C++ specific layout information.
+ struct CXXRecordLayoutInfo {
+ /// NonVirtualSize - The non-virtual size (in chars) of an object, which is
+ /// the size of the object without virtual bases.
+ CharUnits NonVirtualSize;
+
+ /// NonVirtualAlign - The non-virtual alignment (in chars) of an object,
+ /// which is the alignment of the object without virtual bases.
+ CharUnits NonVirtualAlign;
+
+ /// SizeOfLargestEmptySubobject - The size of the largest empty subobject
+ /// (either a base or a member). Will be zero if the class doesn't contain
+ /// any empty subobjects.
+ CharUnits SizeOfLargestEmptySubobject;
+
+ /// VFPtrOffset - Virtual function table offset (Microsoft-only).
+ CharUnits VFPtrOffset;
+
+ /// VBPtrOffset - Virtual base table offset (Microsoft-only).
+ CharUnits VBPtrOffset;
+
+ /// PrimaryBase - The primary base info for this record.
+ llvm::PointerIntPair<const CXXRecordDecl *, 1, bool> PrimaryBase;
+
+ /// FIXME: This should really use a SmallPtrMap, once we have one in LLVM :)
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
+
+ /// BaseOffsets - Contains a map from base classes to their offset.
+ BaseOffsetsMapTy BaseOffsets;
+
+ /// VBaseOffsets - Contains a map from vbase classes to their offset.
+ BaseOffsetsMapTy VBaseOffsets;
+ };
+
+ /// CXXInfo - If the record layout is for a C++ record, this will have
+ /// C++ specific information about the record.
+ CXXRecordLayoutInfo *CXXInfo;
+
+ friend class ASTContext;
+
+ ASTRecordLayout(const ASTContext &Ctx, CharUnits size, CharUnits alignment,
+ CharUnits datasize, const uint64_t *fieldoffsets,
+ unsigned fieldcount);
+
+ // Constructor for C++ records.
+ typedef CXXRecordLayoutInfo::BaseOffsetsMapTy BaseOffsetsMapTy;
+ ASTRecordLayout(const ASTContext &Ctx,
+ CharUnits size, CharUnits alignment,
+ CharUnits vfptroffset, CharUnits vbptroffset,
+ CharUnits datasize,
+ const uint64_t *fieldoffsets, unsigned fieldcount,
+ CharUnits nonvirtualsize, CharUnits nonvirtualalign,
+ CharUnits SizeOfLargestEmptySubobject,
+ const CXXRecordDecl *PrimaryBase,
+ bool IsPrimaryBaseVirtual,
+ const BaseOffsetsMapTy& BaseOffsets,
+ const BaseOffsetsMapTy& VBaseOffsets);
+
+ ~ASTRecordLayout() {}
+
+ void Destroy(ASTContext &Ctx);
+
+ ASTRecordLayout(const ASTRecordLayout&); // DO NOT IMPLEMENT
+ void operator=(const ASTRecordLayout&); // DO NOT IMPLEMENT
+public:
+
+ /// getAlignment - Get the record alignment in characters.
+ CharUnits getAlignment() const { return Alignment; }
+
+ /// getSize - Get the record size in characters.
+ CharUnits getSize() const { return Size; }
+
+ /// getFieldCount - Get the number of fields in the layout.
+ unsigned getFieldCount() const { return FieldCount; }
+
+ /// getFieldOffset - Get the offset of the given field index, in
+ /// bits.
+ uint64_t getFieldOffset(unsigned FieldNo) const {
+ assert (FieldNo < FieldCount && "Invalid Field No");
+ return FieldOffsets[FieldNo];
+ }
+
+ /// getDataSize() - Get the record data size, which is the record size
+ /// without tail padding, in characters.
+ CharUnits getDataSize() const {
+ return DataSize;
+ }
+
+ /// getNonVirtualSize - Get the non-virtual size (in chars) of an object,
+ /// which is the size of the object without virtual bases.
+ CharUnits getNonVirtualSize() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+
+ return CXXInfo->NonVirtualSize;
+ }
+
+ /// getNonVirtualSize - Get the non-virtual alignment (in chars) of an object,
+ /// which is the alignment of the object without virtual bases.
+ CharUnits getNonVirtualAlign() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+
+ return CXXInfo->NonVirtualAlign;
+ }
+
+ /// getPrimaryBase - Get the primary base for this record.
+ const CXXRecordDecl *getPrimaryBase() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+
+ return CXXInfo->PrimaryBase.getPointer();
+ }
+
+ /// isPrimaryBaseVirtual - Get whether the primary base for this record
+ /// is virtual or not.
+ bool isPrimaryBaseVirtual() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+
+ return CXXInfo->PrimaryBase.getInt();
+ }
+
+ /// getBaseClassOffset - Get the offset, in chars, for the given base class.
+ CharUnits getBaseClassOffset(const CXXRecordDecl *Base) const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ assert(CXXInfo->BaseOffsets.count(Base) && "Did not find base!");
+
+ return CXXInfo->BaseOffsets[Base];
+ }
+
+ /// getVBaseClassOffset - Get the offset, in chars, for the given base class.
+ CharUnits getVBaseClassOffset(const CXXRecordDecl *VBase) const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ assert(CXXInfo->VBaseOffsets.count(VBase) && "Did not find base!");
+
+ return CXXInfo->VBaseOffsets[VBase];
+ }
+
+ /// getBaseClassOffsetInBits - Get the offset, in bits, for the given
+ /// base class.
+ uint64_t getBaseClassOffsetInBits(const CXXRecordDecl *Base) const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ assert(CXXInfo->BaseOffsets.count(Base) && "Did not find base!");
+
+ return getBaseClassOffset(Base).getQuantity() *
+ Base->getASTContext().getCharWidth();
+ }
+
+ /// getVBaseClassOffsetInBits - Get the offset, in bits, for the given
+ /// base class.
+ uint64_t getVBaseClassOffsetInBits(const CXXRecordDecl *VBase) const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ assert(CXXInfo->VBaseOffsets.count(VBase) && "Did not find base!");
+
+ return getVBaseClassOffset(VBase).getQuantity() *
+ VBase->getASTContext().getCharWidth();
+ }
+
+ CharUnits getSizeOfLargestEmptySubobject() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ return CXXInfo->SizeOfLargestEmptySubobject;
+ }
+
+ /// getVFPtrOffset - Get the offset for virtual function table pointer.
+ /// This is only meaningful with the Microsoft ABI.
+ CharUnits getVFPtrOffset() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ return CXXInfo->VFPtrOffset;
+ }
+
+ /// getVBPtrOffset - Get the offset for virtual base table pointer.
+ /// This is only meaningful with the Microsoft ABI.
+ CharUnits getVBPtrOffset() const {
+ assert(CXXInfo && "Record layout does not have C++ specific info!");
+ return CXXInfo->VBPtrOffset;
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
new file mode 100644
index 0000000..a4ad525
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -0,0 +1,2241 @@
+//===--- RecursiveASTVisitor.h - Recursive AST Visitor ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RecursiveASTVisitor interface, which recursively
+// traverses the entire AST.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_RECURSIVEASTVISITOR_H
+#define LLVM_CLANG_AST_RECURSIVEASTVISITOR_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+
+// The following three macros are used for meta programming. The code
+// using them is responsible for defining macro OPERATOR().
+
+// All unary operators.
+#define UNARYOP_LIST() \
+ OPERATOR(PostInc) OPERATOR(PostDec) \
+ OPERATOR(PreInc) OPERATOR(PreDec) \
+ OPERATOR(AddrOf) OPERATOR(Deref) \
+ OPERATOR(Plus) OPERATOR(Minus) \
+ OPERATOR(Not) OPERATOR(LNot) \
+ OPERATOR(Real) OPERATOR(Imag) \
+ OPERATOR(Extension)
+
+// All binary operators (excluding compound assign operators).
+#define BINOP_LIST() \
+ OPERATOR(PtrMemD) OPERATOR(PtrMemI) \
+ OPERATOR(Mul) OPERATOR(Div) OPERATOR(Rem) \
+ OPERATOR(Add) OPERATOR(Sub) OPERATOR(Shl) \
+ OPERATOR(Shr) \
+ \
+ OPERATOR(LT) OPERATOR(GT) OPERATOR(LE) \
+ OPERATOR(GE) OPERATOR(EQ) OPERATOR(NE) \
+ OPERATOR(And) OPERATOR(Xor) OPERATOR(Or) \
+ OPERATOR(LAnd) OPERATOR(LOr) \
+ \
+ OPERATOR(Assign) \
+ OPERATOR(Comma)
+
+// All compound assign operators.
+#define CAO_LIST() \
+ OPERATOR(Mul) OPERATOR(Div) OPERATOR(Rem) OPERATOR(Add) OPERATOR(Sub) \
+ OPERATOR(Shl) OPERATOR(Shr) OPERATOR(And) OPERATOR(Or) OPERATOR(Xor)
+
+namespace clang {
+
+// A helper macro to implement short-circuiting when recursing. It
+// invokes CALL_EXPR, which must be a method call, on the derived
+// object (s.t. a user of RecursiveASTVisitor can override the method
+// in CALL_EXPR).
+#define TRY_TO(CALL_EXPR) \
+ do { if (!getDerived().CALL_EXPR) return false; } while (0)
+
+/// \brief A class that does preorder depth-first traversal on the
+/// entire Clang AST and visits each node.
+///
+/// This class performs three distinct tasks:
+/// 1. traverse the AST (i.e. go to each node);
+/// 2. at a given node, walk up the class hierarchy, starting from
+/// the node's dynamic type, until the top-most class (e.g. Stmt,
+/// Decl, or Type) is reached.
+/// 3. given a (node, class) combination, where 'class' is some base
+/// class of the dynamic type of 'node', call a user-overridable
+/// function to actually visit the node.
+///
+/// These tasks are done by three groups of methods, respectively:
+/// 1. TraverseDecl(Decl *x) does task #1. It is the entry point
+/// for traversing an AST rooted at x. This method simply
+/// dispatches (i.e. forwards) to TraverseFoo(Foo *x) where Foo
+/// is the dynamic type of *x, which calls WalkUpFromFoo(x) and
+/// then recursively visits the child nodes of x.
+/// TraverseStmt(Stmt *x) and TraverseType(QualType x) work
+/// similarly.
+/// 2. WalkUpFromFoo(Foo *x) does task #2. It does not try to visit
+/// any child node of x. Instead, it first calls WalkUpFromBar(x)
+/// where Bar is the direct parent class of Foo (unless Foo has
+/// no parent), and then calls VisitFoo(x) (see the next list item).
+/// 3. VisitFoo(Foo *x) does task #3.
+///
+/// These three method groups are tiered (Traverse* > WalkUpFrom* >
+/// Visit*). A method (e.g. Traverse*) may call methods from the same
+/// tier (e.g. other Traverse*) or one tier lower (e.g. WalkUpFrom*).
+/// It may not call methods from a higher tier.
+///
+/// Note that since WalkUpFromFoo() calls WalkUpFromBar() (where Bar
+/// is Foo's super class) before calling VisitFoo(), the result is
+/// that the Visit*() methods for a given node are called in the
+/// top-down order (e.g. for a node of type NamedDecl, the order will
+/// be VisitDecl(), VisitNamedDecl(), and then VisitNamespaceDecl()).
+///
+/// This scheme guarantees that all Visit*() calls for the same AST
+/// node are grouped together. In other words, Visit*() methods for
+/// different nodes are never interleaved.
+///
+/// Clients of this visitor should subclass the visitor (providing
+/// themselves as the template argument, using the curiously recurring
+/// template pattern) and override any of the Traverse*, WalkUpFrom*,
+/// and Visit* methods for declarations, types, statements,
+/// expressions, or other AST nodes where the visitor should customize
+/// behavior. Most users only need to override Visit*. Advanced
+/// users may override Traverse* and WalkUpFrom* to implement custom
+/// traversal strategies. Returning false from one of these overridden
+/// functions will abort the entire traversal.
+///
+/// By default, this visitor tries to visit every part of the explicit
+/// source code exactly once. The default policy towards templates
+/// is to descend into the 'pattern' class or function body, not any
+/// explicit or implicit instantiations. Explicit specializations
+/// are still visited, and the patterns of partial specializations
+/// are visited separately. This behavior can be changed by
+/// overriding shouldVisitTemplateInstantiations() in the derived class
+/// to return true, in which case all known implicit and explicit
+/// instantiations will be visited at the same time as the pattern
+/// from which they were produced.
+template<typename Derived>
+class RecursiveASTVisitor {
+public:
+ /// \brief Return a reference to the derived class.
+ Derived &getDerived() { return *static_cast<Derived*>(this); }
+
+ /// \brief Return whether this visitor should recurse into
+ /// template instantiations.
+ bool shouldVisitTemplateInstantiations() const { return false; }
+
+ /// \brief Return whether this visitor should recurse into the types of
+ /// TypeLocs.
+ bool shouldWalkTypesOfTypeLocs() const { return true; }
+
+ /// \brief Return whether \param S should be traversed using data recursion
+ /// to avoid a stack overflow with extreme cases.
+ bool shouldUseDataRecursionFor(Stmt *S) const {
+ return isa<BinaryOperator>(S) || isa<UnaryOperator>(S) || isa<CaseStmt>(S);
+ }
+
+ /// \brief Recursively visit a statement or expression, by
+ /// dispatching to Traverse*() based on the argument's dynamic type.
+ ///
+ /// \returns false if the visitation was terminated early, true
+ /// otherwise (including when the argument is NULL).
+ bool TraverseStmt(Stmt *S);
+
+ /// \brief Recursively visit a type, by dispatching to
+ /// Traverse*Type() based on the argument's getTypeClass() property.
+ ///
+ /// \returns false if the visitation was terminated early, true
+ /// otherwise (including when the argument is a Null type).
+ bool TraverseType(QualType T);
+
+ /// \brief Recursively visit a type with location, by dispatching to
+ /// Traverse*TypeLoc() based on the argument type's getTypeClass() property.
+ ///
+ /// \returns false if the visitation was terminated early, true
+ /// otherwise (including when the argument is a Null type location).
+ bool TraverseTypeLoc(TypeLoc TL);
+
+ /// \brief Recursively visit a declaration, by dispatching to
+ /// Traverse*Decl() based on the argument's dynamic type.
+ ///
+ /// \returns false if the visitation was terminated early, true
+ /// otherwise (including when the argument is NULL).
+ bool TraverseDecl(Decl *D);
+
+ /// \brief Recursively visit a C++ nested-name-specifier.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseNestedNameSpecifier(NestedNameSpecifier *NNS);
+
+ /// \brief Recursively visit a C++ nested-name-specifier with location
+ /// information.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS);
+
+ /// \brief Recursively visit a name with its location information.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseDeclarationNameInfo(DeclarationNameInfo NameInfo);
+
+ /// \brief Recursively visit a template name and dispatch to the
+ /// appropriate method.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseTemplateName(TemplateName Template);
+
+ /// \brief Recursively visit a template argument and dispatch to the
+ /// appropriate method for the argument type.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ // FIXME: migrate callers to TemplateArgumentLoc instead.
+ bool TraverseTemplateArgument(const TemplateArgument &Arg);
+
+ /// \brief Recursively visit a template argument location and dispatch to the
+ /// appropriate method for the argument type.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc);
+
+ /// \brief Recursively visit a set of template arguments.
+ /// This can be overridden by a subclass, but it's not expected that
+ /// will be needed -- this visitor always dispatches to another.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ // FIXME: take a TemplateArgumentLoc* (or TemplateArgumentListInfo) instead.
+ bool TraverseTemplateArguments(const TemplateArgument *Args,
+ unsigned NumArgs);
+
+ /// \brief Recursively visit a constructor initializer. This
+ /// automatically dispatches to another visitor for the initializer
+ /// expression, but not for the name of the initializer, so may
+ /// be overridden for clients that need access to the name.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseConstructorInitializer(CXXCtorInitializer *Init);
+
+ /// \brief Recursively visit a lambda capture.
+ ///
+ /// \returns false if the visitation was terminated early, true otherwise.
+ bool TraverseLambdaCapture(LambdaExpr::Capture C);
+
+ // ---- Methods on Stmts ----
+
+ // Declare Traverse*() for all concrete Stmt classes.
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ bool Traverse##CLASS(CLASS *S);
+#include "clang/AST/StmtNodes.inc"
+ // The above header #undefs ABSTRACT_STMT and STMT upon exit.
+
+ // Define WalkUpFrom*() and empty Visit*() for all Stmt classes.
+ bool WalkUpFromStmt(Stmt *S) { return getDerived().VisitStmt(S); }
+ bool VisitStmt(Stmt *S) { return true; }
+#define STMT(CLASS, PARENT) \
+ bool WalkUpFrom##CLASS(CLASS *S) { \
+ TRY_TO(WalkUpFrom##PARENT(S)); \
+ TRY_TO(Visit##CLASS(S)); \
+ return true; \
+ } \
+ bool Visit##CLASS(CLASS *S) { return true; }
+#include "clang/AST/StmtNodes.inc"
+
+ // Define Traverse*(), WalkUpFrom*(), and Visit*() for unary
+ // operator methods. Unary operators are not classes in themselves
+ // (they're all opcodes in UnaryOperator) but do have visitors.
+#define OPERATOR(NAME) \
+ bool TraverseUnary##NAME(UnaryOperator *S) { \
+ TRY_TO(WalkUpFromUnary##NAME(S)); \
+ TRY_TO(TraverseStmt(S->getSubExpr())); \
+ return true; \
+ } \
+ bool WalkUpFromUnary##NAME(UnaryOperator *S) { \
+ TRY_TO(WalkUpFromUnaryOperator(S)); \
+ TRY_TO(VisitUnary##NAME(S)); \
+ return true; \
+ } \
+ bool VisitUnary##NAME(UnaryOperator *S) { return true; }
+
+ UNARYOP_LIST()
+#undef OPERATOR
+
+ // Define Traverse*(), WalkUpFrom*(), and Visit*() for binary
+ // operator methods. Binary operators are not classes in themselves
+ // (they're all opcodes in BinaryOperator) but do have visitors.
+#define GENERAL_BINOP_FALLBACK(NAME, BINOP_TYPE) \
+ bool TraverseBin##NAME(BINOP_TYPE *S) { \
+ TRY_TO(WalkUpFromBin##NAME(S)); \
+ TRY_TO(TraverseStmt(S->getLHS())); \
+ TRY_TO(TraverseStmt(S->getRHS())); \
+ return true; \
+ } \
+ bool WalkUpFromBin##NAME(BINOP_TYPE *S) { \
+ TRY_TO(WalkUpFrom##BINOP_TYPE(S)); \
+ TRY_TO(VisitBin##NAME(S)); \
+ return true; \
+ } \
+ bool VisitBin##NAME(BINOP_TYPE *S) { return true; }
+
+#define OPERATOR(NAME) GENERAL_BINOP_FALLBACK(NAME, BinaryOperator)
+ BINOP_LIST()
+#undef OPERATOR
+
+ // Define Traverse*(), WalkUpFrom*(), and Visit*() for compound
+ // assignment methods. Compound assignment operators are not
+ // classes in themselves (they're all opcodes in
+ // CompoundAssignOperator) but do have visitors.
+#define OPERATOR(NAME) \
+ GENERAL_BINOP_FALLBACK(NAME##Assign, CompoundAssignOperator)
+
+ CAO_LIST()
+#undef OPERATOR
+#undef GENERAL_BINOP_FALLBACK
+
+ // ---- Methods on Types ----
+ // FIXME: revamp to take TypeLoc's rather than Types.
+
+ // Declare Traverse*() for all concrete Type classes.
+#define ABSTRACT_TYPE(CLASS, BASE)
+#define TYPE(CLASS, BASE) \
+ bool Traverse##CLASS##Type(CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+ // The above header #undefs ABSTRACT_TYPE and TYPE upon exit.
+
+ // Define WalkUpFrom*() and empty Visit*() for all Type classes.
+ bool WalkUpFromType(Type *T) { return getDerived().VisitType(T); }
+ bool VisitType(Type *T) { return true; }
+#define TYPE(CLASS, BASE) \
+ bool WalkUpFrom##CLASS##Type(CLASS##Type *T) { \
+ TRY_TO(WalkUpFrom##BASE(T)); \
+ TRY_TO(Visit##CLASS##Type(T)); \
+ return true; \
+ } \
+ bool Visit##CLASS##Type(CLASS##Type *T) { return true; }
+#include "clang/AST/TypeNodes.def"
+
+ // ---- Methods on TypeLocs ----
+ // FIXME: this currently just calls the matching Type methods
+
+ // Declare Traverse*() for all concrete Type classes.
+#define ABSTRACT_TYPELOC(CLASS, BASE)
+#define TYPELOC(CLASS, BASE) \
+ bool Traverse##CLASS##TypeLoc(CLASS##TypeLoc TL);
+#include "clang/AST/TypeLocNodes.def"
+ // The above header #undefs ABSTRACT_TYPELOC and TYPELOC upon exit.
+
+ // Define WalkUpFrom*() and empty Visit*() for all TypeLoc classes.
+ bool WalkUpFromTypeLoc(TypeLoc TL) { return getDerived().VisitTypeLoc(TL); }
+ bool VisitTypeLoc(TypeLoc TL) { return true; }
+
+ // QualifiedTypeLoc and UnqualTypeLoc are not declared in
+ // TypeNodes.def and thus need to be handled specially.
+ bool WalkUpFromQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ return getDerived().VisitUnqualTypeLoc(TL.getUnqualifiedLoc());
+ }
+ bool VisitQualifiedTypeLoc(QualifiedTypeLoc TL) { return true; }
+ bool WalkUpFromUnqualTypeLoc(UnqualTypeLoc TL) {
+ return getDerived().VisitUnqualTypeLoc(TL.getUnqualifiedLoc());
+ }
+ bool VisitUnqualTypeLoc(UnqualTypeLoc TL) { return true; }
+
+ // Note that BASE includes trailing 'Type' which CLASS doesn't.
+#define TYPE(CLASS, BASE) \
+ bool WalkUpFrom##CLASS##TypeLoc(CLASS##TypeLoc TL) { \
+ TRY_TO(WalkUpFrom##BASE##Loc(TL)); \
+ TRY_TO(Visit##CLASS##TypeLoc(TL)); \
+ return true; \
+ } \
+ bool Visit##CLASS##TypeLoc(CLASS##TypeLoc TL) { return true; }
+#include "clang/AST/TypeNodes.def"
+
+ // ---- Methods on Decls ----
+
+ // Declare Traverse*() for all concrete Decl classes.
+#define ABSTRACT_DECL(DECL)
+#define DECL(CLASS, BASE) \
+ bool Traverse##CLASS##Decl(CLASS##Decl *D);
+#include "clang/AST/DeclNodes.inc"
+ // The above header #undefs ABSTRACT_DECL and DECL upon exit.
+
+ // Define WalkUpFrom*() and empty Visit*() for all Decl classes.
+ bool WalkUpFromDecl(Decl *D) { return getDerived().VisitDecl(D); }
+ bool VisitDecl(Decl *D) { return true; }
+#define DECL(CLASS, BASE) \
+ bool WalkUpFrom##CLASS##Decl(CLASS##Decl *D) { \
+ TRY_TO(WalkUpFrom##BASE(D)); \
+ TRY_TO(Visit##CLASS##Decl(D)); \
+ return true; \
+ } \
+ bool Visit##CLASS##Decl(CLASS##Decl *D) { return true; }
+#include "clang/AST/DeclNodes.inc"
+
+private:
+ // These are helper methods used by more than one Traverse* method.
+ bool TraverseTemplateParameterListHelper(TemplateParameterList *TPL);
+ bool TraverseClassInstantiations(ClassTemplateDecl* D, Decl *Pattern);
+ bool TraverseFunctionInstantiations(FunctionTemplateDecl* D) ;
+ bool TraverseTemplateArgumentLocsHelper(const TemplateArgumentLoc *TAL,
+ unsigned Count);
+ bool TraverseArrayTypeLocHelper(ArrayTypeLoc TL);
+ bool TraverseRecordHelper(RecordDecl *D);
+ bool TraverseCXXRecordHelper(CXXRecordDecl *D);
+ bool TraverseDeclaratorHelper(DeclaratorDecl *D);
+ bool TraverseDeclContextHelper(DeclContext *DC);
+ bool TraverseFunctionHelper(FunctionDecl *D);
+ bool TraverseVarHelper(VarDecl *D);
+
+ bool Walk(Stmt *S);
+
+ struct EnqueueJob {
+ Stmt *S;
+ Stmt::child_iterator StmtIt;
+
+ EnqueueJob(Stmt *S) : S(S), StmtIt() {
+ if (Expr *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreParens();
+ }
+ };
+ bool dataTraverse(Stmt *S);
+};
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::dataTraverse(Stmt *S) {
+
+ SmallVector<EnqueueJob, 16> Queue;
+ Queue.push_back(S);
+
+ while (!Queue.empty()) {
+ EnqueueJob &job = Queue.back();
+ Stmt *CurrS = job.S;
+ if (!CurrS) {
+ Queue.pop_back();
+ continue;
+ }
+
+ if (getDerived().shouldUseDataRecursionFor(CurrS)) {
+ if (job.StmtIt == Stmt::child_iterator()) {
+ if (!Walk(CurrS)) return false;
+ job.StmtIt = CurrS->child_begin();
+ } else {
+ ++job.StmtIt;
+ }
+
+ if (job.StmtIt != CurrS->child_end())
+ Queue.push_back(*job.StmtIt);
+ else
+ Queue.pop_back();
+ continue;
+ }
+
+ Queue.pop_back();
+ TRY_TO(TraverseStmt(CurrS));
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::Walk(Stmt *S) {
+
+#define DISPATCH_WALK(NAME, CLASS, VAR) \
+ return getDerived().WalkUpFrom##NAME(static_cast<CLASS*>(VAR));
+
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
+ switch (BinOp->getOpcode()) {
+#define OPERATOR(NAME) \
+ case BO_##NAME: DISPATCH_WALK(Bin##NAME, BinaryOperator, S);
+
+ BINOP_LIST()
+#undef OPERATOR
+
+#define OPERATOR(NAME) \
+ case BO_##NAME##Assign: \
+ DISPATCH_WALK(Bin##NAME##Assign, CompoundAssignOperator, S);
+
+ CAO_LIST()
+#undef OPERATOR
+ }
+ } else if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(S)) {
+ switch (UnOp->getOpcode()) {
+#define OPERATOR(NAME) \
+ case UO_##NAME: DISPATCH_WALK(Unary##NAME, UnaryOperator, S);
+
+ UNARYOP_LIST()
+#undef OPERATOR
+ }
+ }
+
+ // Top switch stmt: dispatch to TraverseFooStmt for each concrete FooStmt.
+ switch (S->getStmtClass()) {
+ case Stmt::NoStmtClass: break;
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ case Stmt::CLASS##Class: DISPATCH_WALK(CLASS, CLASS, S);
+#include "clang/AST/StmtNodes.inc"
+ }
+
+#undef DISPATCH_WALK
+
+ return true;
+}
+
+#define DISPATCH(NAME, CLASS, VAR) \
+ return getDerived().Traverse##NAME(static_cast<CLASS*>(VAR))
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseStmt(Stmt *S) {
+ if (!S)
+ return true;
+
+ if (getDerived().shouldUseDataRecursionFor(S))
+ return dataTraverse(S);
+
+ // If we have a binary expr, dispatch to the subcode of the binop. A smart
+ // optimizer (e.g. LLVM) will fold this comparison into the switch stmt
+ // below.
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(S)) {
+ switch (BinOp->getOpcode()) {
+#define OPERATOR(NAME) \
+ case BO_##NAME: DISPATCH(Bin##NAME, BinaryOperator, S);
+
+ BINOP_LIST()
+#undef OPERATOR
+#undef BINOP_LIST
+
+#define OPERATOR(NAME) \
+ case BO_##NAME##Assign: \
+ DISPATCH(Bin##NAME##Assign, CompoundAssignOperator, S);
+
+ CAO_LIST()
+#undef OPERATOR
+#undef CAO_LIST
+ }
+ } else if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(S)) {
+ switch (UnOp->getOpcode()) {
+#define OPERATOR(NAME) \
+ case UO_##NAME: DISPATCH(Unary##NAME, UnaryOperator, S);
+
+ UNARYOP_LIST()
+#undef OPERATOR
+#undef UNARYOP_LIST
+ }
+ }
+
+ // Top switch stmt: dispatch to TraverseFooStmt for each concrete FooStmt.
+ switch (S->getStmtClass()) {
+ case Stmt::NoStmtClass: break;
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ case Stmt::CLASS##Class: DISPATCH(CLASS, CLASS, S);
+#include "clang/AST/StmtNodes.inc"
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseType(QualType T) {
+ if (T.isNull())
+ return true;
+
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, BASE)
+#define TYPE(CLASS, BASE) \
+ case Type::CLASS: DISPATCH(CLASS##Type, CLASS##Type, \
+ const_cast<Type*>(T.getTypePtr()));
+#include "clang/AST/TypeNodes.def"
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTypeLoc(TypeLoc TL) {
+ if (TL.isNull())
+ return true;
+
+ switch (TL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, BASE)
+#define TYPELOC(CLASS, BASE) \
+ case TypeLoc::CLASS: \
+ return getDerived().Traverse##CLASS##TypeLoc(*cast<CLASS##TypeLoc>(&TL));
+#include "clang/AST/TypeLocNodes.def"
+ }
+
+ return true;
+}
+
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseDecl(Decl *D) {
+ if (!D)
+ return true;
+
+ // As a syntax visitor, we want to ignore declarations for
+ // implicitly-defined declarations (ones not typed explicitly by the
+ // user).
+ if (D->isImplicit())
+ return true;
+
+ switch (D->getKind()) {
+#define ABSTRACT_DECL(DECL)
+#define DECL(CLASS, BASE) \
+ case Decl::CLASS: DISPATCH(CLASS##Decl, CLASS##Decl, D);
+#include "clang/AST/DeclNodes.inc"
+ }
+
+ return true;
+}
+
+#undef DISPATCH
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifier(
+ NestedNameSpecifier *NNS) {
+ if (!NNS)
+ return true;
+
+ if (NNS->getPrefix())
+ TRY_TO(TraverseNestedNameSpecifier(NNS->getPrefix()));
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Global:
+ return true;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ TRY_TO(TraverseType(QualType(NNS->getAsType(), 0)));
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc NNS) {
+ if (!NNS)
+ return true;
+
+ if (NestedNameSpecifierLoc Prefix = NNS.getPrefix())
+ TRY_TO(TraverseNestedNameSpecifierLoc(Prefix));
+
+ switch (NNS.getNestedNameSpecifier()->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Global:
+ return true;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ TRY_TO(TraverseTypeLoc(NNS.getTypeLoc()));
+ break;
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseDeclarationNameInfo(
+ DeclarationNameInfo NameInfo) {
+ switch (NameInfo.getName().getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TSInfo = NameInfo.getNamedTypeInfo())
+ TRY_TO(TraverseTypeLoc(TSInfo->getTypeLoc()));
+
+ break;
+
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTemplateName(TemplateName Template) {
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
+ TRY_TO(TraverseNestedNameSpecifier(DTN->getQualifier()));
+ else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ TRY_TO(TraverseNestedNameSpecifier(QTN->getQualifier()));
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTemplateArgument(
+ const TemplateArgument &Arg) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ return true;
+
+ case TemplateArgument::Type:
+ return getDerived().TraverseType(Arg.getAsType());
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ return getDerived().TraverseTemplateName(
+ Arg.getAsTemplateOrTemplatePattern());
+
+ case TemplateArgument::Expression:
+ return getDerived().TraverseStmt(Arg.getAsExpr());
+
+ case TemplateArgument::Pack:
+ return getDerived().TraverseTemplateArguments(Arg.pack_begin(),
+ Arg.pack_size());
+ }
+
+ return true;
+}
+
+// FIXME: no template name location?
+// FIXME: no source locations for a template argument pack?
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLoc(
+ const TemplateArgumentLoc &ArgLoc) {
+ const TemplateArgument &Arg = ArgLoc.getArgument();
+
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ return true;
+
+ case TemplateArgument::Type: {
+ // FIXME: how can TSI ever be NULL?
+ if (TypeSourceInfo *TSI = ArgLoc.getTypeSourceInfo())
+ return getDerived().TraverseTypeLoc(TSI->getTypeLoc());
+ else
+ return getDerived().TraverseType(Arg.getAsType());
+ }
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ if (ArgLoc.getTemplateQualifierLoc())
+ TRY_TO(getDerived().TraverseNestedNameSpecifierLoc(
+ ArgLoc.getTemplateQualifierLoc()));
+ return getDerived().TraverseTemplateName(
+ Arg.getAsTemplateOrTemplatePattern());
+
+ case TemplateArgument::Expression:
+ return getDerived().TraverseStmt(ArgLoc.getSourceExpression());
+
+ case TemplateArgument::Pack:
+ return getDerived().TraverseTemplateArguments(Arg.pack_begin(),
+ Arg.pack_size());
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTemplateArguments(
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ TRY_TO(TraverseTemplateArgument(Args[I]));
+ }
+
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseConstructorInitializer(
+ CXXCtorInitializer *Init) {
+ if (TypeSourceInfo *TInfo = Init->getTypeSourceInfo())
+ TRY_TO(TraverseTypeLoc(TInfo->getTypeLoc()));
+
+ if (Init->isWritten())
+ TRY_TO(TraverseStmt(Init->getInit()));
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseLambdaCapture(LambdaExpr::Capture C){
+ return true;
+}
+
+// ----------------- Type traversal -----------------
+
+// This macro makes available a variable T, the passed-in type.
+#define DEF_TRAVERSE_TYPE(TYPE, CODE) \
+ template<typename Derived> \
+ bool RecursiveASTVisitor<Derived>::Traverse##TYPE (TYPE *T) { \
+ TRY_TO(WalkUpFrom##TYPE (T)); \
+ { CODE; } \
+ return true; \
+ }
+
+DEF_TRAVERSE_TYPE(BuiltinType, { })
+
+DEF_TRAVERSE_TYPE(ComplexType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPE(PointerType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
+
+DEF_TRAVERSE_TYPE(BlockPointerType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
+
+DEF_TRAVERSE_TYPE(LValueReferenceType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
+
+DEF_TRAVERSE_TYPE(RValueReferenceType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
+
+DEF_TRAVERSE_TYPE(MemberPointerType, {
+ TRY_TO(TraverseType(QualType(T->getClass(), 0)));
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
+
+DEF_TRAVERSE_TYPE(ConstantArrayType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPE(IncompleteArrayType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPE(VariableArrayType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ TRY_TO(TraverseStmt(T->getSizeExpr()));
+ })
+
+DEF_TRAVERSE_TYPE(DependentSizedArrayType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ if (T->getSizeExpr())
+ TRY_TO(TraverseStmt(T->getSizeExpr()));
+ })
+
+DEF_TRAVERSE_TYPE(DependentSizedExtVectorType, {
+ if (T->getSizeExpr())
+ TRY_TO(TraverseStmt(T->getSizeExpr()));
+ TRY_TO(TraverseType(T->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPE(VectorType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPE(ExtVectorType, {
+ TRY_TO(TraverseType(T->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPE(FunctionNoProtoType, {
+ TRY_TO(TraverseType(T->getResultType()));
+ })
+
+DEF_TRAVERSE_TYPE(FunctionProtoType, {
+ TRY_TO(TraverseType(T->getResultType()));
+
+ for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
+ AEnd = T->arg_type_end();
+ A != AEnd; ++A) {
+ TRY_TO(TraverseType(*A));
+ }
+
+ for (FunctionProtoType::exception_iterator E = T->exception_begin(),
+ EEnd = T->exception_end();
+ E != EEnd; ++E) {
+ TRY_TO(TraverseType(*E));
+ }
+ })
+
+DEF_TRAVERSE_TYPE(UnresolvedUsingType, { })
+DEF_TRAVERSE_TYPE(TypedefType, { })
+
+DEF_TRAVERSE_TYPE(TypeOfExprType, {
+ TRY_TO(TraverseStmt(T->getUnderlyingExpr()));
+ })
+
+DEF_TRAVERSE_TYPE(TypeOfType, {
+ TRY_TO(TraverseType(T->getUnderlyingType()));
+ })
+
+DEF_TRAVERSE_TYPE(DecltypeType, {
+ TRY_TO(TraverseStmt(T->getUnderlyingExpr()));
+ })
+
+DEF_TRAVERSE_TYPE(UnaryTransformType, {
+ TRY_TO(TraverseType(T->getBaseType()));
+ TRY_TO(TraverseType(T->getUnderlyingType()));
+ })
+
+DEF_TRAVERSE_TYPE(AutoType, {
+ TRY_TO(TraverseType(T->getDeducedType()));
+ })
+
+DEF_TRAVERSE_TYPE(RecordType, { })
+DEF_TRAVERSE_TYPE(EnumType, { })
+DEF_TRAVERSE_TYPE(TemplateTypeParmType, { })
+DEF_TRAVERSE_TYPE(SubstTemplateTypeParmType, { })
+DEF_TRAVERSE_TYPE(SubstTemplateTypeParmPackType, { })
+
+DEF_TRAVERSE_TYPE(TemplateSpecializationType, {
+ TRY_TO(TraverseTemplateName(T->getTemplateName()));
+ TRY_TO(TraverseTemplateArguments(T->getArgs(), T->getNumArgs()));
+ })
+
+DEF_TRAVERSE_TYPE(InjectedClassNameType, { })
+
+DEF_TRAVERSE_TYPE(AttributedType, {
+ TRY_TO(TraverseType(T->getModifiedType()));
+ })
+
+DEF_TRAVERSE_TYPE(ParenType, {
+ TRY_TO(TraverseType(T->getInnerType()));
+ })
+
+DEF_TRAVERSE_TYPE(ElaboratedType, {
+ if (T->getQualifier()) {
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+ }
+ TRY_TO(TraverseType(T->getNamedType()));
+ })
+
+DEF_TRAVERSE_TYPE(DependentNameType, {
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+ })
+
+DEF_TRAVERSE_TYPE(DependentTemplateSpecializationType, {
+ TRY_TO(TraverseNestedNameSpecifier(T->getQualifier()));
+ TRY_TO(TraverseTemplateArguments(T->getArgs(), T->getNumArgs()));
+ })
+
+DEF_TRAVERSE_TYPE(PackExpansionType, {
+ TRY_TO(TraverseType(T->getPattern()));
+ })
+
+DEF_TRAVERSE_TYPE(ObjCInterfaceType, { })
+
+DEF_TRAVERSE_TYPE(ObjCObjectType, {
+ // We have to watch out here because an ObjCInterfaceType's base
+ // type is itself.
+ if (T->getBaseType().getTypePtr() != T)
+ TRY_TO(TraverseType(T->getBaseType()));
+ })
+
+DEF_TRAVERSE_TYPE(ObjCObjectPointerType, {
+ TRY_TO(TraverseType(T->getPointeeType()));
+ })
+
+DEF_TRAVERSE_TYPE(AtomicType, {
+ TRY_TO(TraverseType(T->getValueType()));
+ })
+
+#undef DEF_TRAVERSE_TYPE
+
+// ----------------- TypeLoc traversal -----------------
+
+// This macro makes available a variable TL, the passed-in TypeLoc.
+// If requested, it calls WalkUpFrom* for the Type in the given TypeLoc,
+// in addition to WalkUpFrom* for the TypeLoc itself, such that existing
+// clients that override the WalkUpFrom*Type() and/or Visit*Type() methods
+// continue to work.
+#define DEF_TRAVERSE_TYPELOC(TYPE, CODE) \
+ template<typename Derived> \
+ bool RecursiveASTVisitor<Derived>::Traverse##TYPE##Loc(TYPE##Loc TL) { \
+ if (getDerived().shouldWalkTypesOfTypeLocs()) \
+ TRY_TO(WalkUpFrom##TYPE(const_cast<TYPE*>(TL.getTypePtr()))); \
+ TRY_TO(WalkUpFrom##TYPE##Loc(TL)); \
+ { CODE; } \
+ return true; \
+ }
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseQualifiedTypeLoc(
+ QualifiedTypeLoc TL) {
+ // Move this over to the 'main' typeloc tree. Note that this is a
+ // move -- we pretend that we were really looking at the unqualified
+ // typeloc all along -- rather than a recursion, so we don't follow
+ // the normal CRTP plan of going through
+ // getDerived().TraverseTypeLoc. If we did, we'd be traversing
+ // twice for the same type (once as a QualifiedTypeLoc version of
+ // the type, once as an UnqualifiedTypeLoc version of the type),
+ // which in effect means we'd call VisitTypeLoc twice with the
+ // 'same' type. This solves that problem, at the cost of never
+ // seeing the qualified version of the type (unless the client
+ // subclasses TraverseQualifiedTypeLoc themselves). It's not a
+ // perfect solution. A perfect solution probably requires making
+ // QualifiedTypeLoc a wrapper around TypeLoc -- like QualType is a
+ // wrapper around Type* -- rather than being its own class in the
+ // type hierarchy.
+ return TraverseTypeLoc(TL.getUnqualifiedLoc());
+}
+
+DEF_TRAVERSE_TYPELOC(BuiltinType, { })
+
+// FIXME: ComplexTypeLoc is unfinished
+DEF_TRAVERSE_TYPELOC(ComplexType, {
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPELOC(PointerType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(BlockPointerType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(LValueReferenceType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(RValueReferenceType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+// FIXME: location of base class?
+// We traverse this in the type case as well, but how is it not reached through
+// the pointee type?
+DEF_TRAVERSE_TYPELOC(MemberPointerType, {
+ TRY_TO(TraverseType(QualType(TL.getTypePtr()->getClass(), 0)));
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseArrayTypeLocHelper(ArrayTypeLoc TL) {
+ // This isn't available for ArrayType, but is for the ArrayTypeLoc.
+ TRY_TO(TraverseStmt(TL.getSizeExpr()));
+ return true;
+}
+
+DEF_TRAVERSE_TYPELOC(ConstantArrayType, {
+ TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
+ return TraverseArrayTypeLocHelper(TL);
+ })
+
+DEF_TRAVERSE_TYPELOC(IncompleteArrayType, {
+ TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
+ return TraverseArrayTypeLocHelper(TL);
+ })
+
+DEF_TRAVERSE_TYPELOC(VariableArrayType, {
+ TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
+ return TraverseArrayTypeLocHelper(TL);
+ })
+
+DEF_TRAVERSE_TYPELOC(DependentSizedArrayType, {
+ TRY_TO(TraverseTypeLoc(TL.getElementLoc()));
+ return TraverseArrayTypeLocHelper(TL);
+ })
+
+// FIXME: order? why not size expr first?
+// FIXME: base VectorTypeLoc is unfinished
+DEF_TRAVERSE_TYPELOC(DependentSizedExtVectorType, {
+ if (TL.getTypePtr()->getSizeExpr())
+ TRY_TO(TraverseStmt(TL.getTypePtr()->getSizeExpr()));
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+ })
+
+// FIXME: VectorTypeLoc is unfinished
+DEF_TRAVERSE_TYPELOC(VectorType, {
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+ })
+
+// FIXME: size and attributes
+// FIXME: base VectorTypeLoc is unfinished
+DEF_TRAVERSE_TYPELOC(ExtVectorType, {
+ TRY_TO(TraverseType(TL.getTypePtr()->getElementType()));
+ })
+
+DEF_TRAVERSE_TYPELOC(FunctionNoProtoType, {
+ TRY_TO(TraverseTypeLoc(TL.getResultLoc()));
+ })
+
+// FIXME: location of exception specifications (attributes?)
+DEF_TRAVERSE_TYPELOC(FunctionProtoType, {
+ TRY_TO(TraverseTypeLoc(TL.getResultLoc()));
+
+ const FunctionProtoType *T = TL.getTypePtr();
+
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ if (TL.getArg(I)) {
+ TRY_TO(TraverseDecl(TL.getArg(I)));
+ } else if (I < T->getNumArgs()) {
+ TRY_TO(TraverseType(T->getArgType(I)));
+ }
+ }
+
+ for (FunctionProtoType::exception_iterator E = T->exception_begin(),
+ EEnd = T->exception_end();
+ E != EEnd; ++E) {
+ TRY_TO(TraverseType(*E));
+ }
+ })
+
+DEF_TRAVERSE_TYPELOC(UnresolvedUsingType, { })
+DEF_TRAVERSE_TYPELOC(TypedefType, { })
+
+DEF_TRAVERSE_TYPELOC(TypeOfExprType, {
+ TRY_TO(TraverseStmt(TL.getUnderlyingExpr()));
+ })
+
+DEF_TRAVERSE_TYPELOC(TypeOfType, {
+ TRY_TO(TraverseTypeLoc(TL.getUnderlyingTInfo()->getTypeLoc()));
+ })
+
+// FIXME: location of underlying expr
+DEF_TRAVERSE_TYPELOC(DecltypeType, {
+ TRY_TO(TraverseStmt(TL.getTypePtr()->getUnderlyingExpr()));
+ })
+
+DEF_TRAVERSE_TYPELOC(UnaryTransformType, {
+ TRY_TO(TraverseTypeLoc(TL.getUnderlyingTInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(AutoType, {
+ TRY_TO(TraverseType(TL.getTypePtr()->getDeducedType()));
+ })
+
+DEF_TRAVERSE_TYPELOC(RecordType, { })
+DEF_TRAVERSE_TYPELOC(EnumType, { })
+DEF_TRAVERSE_TYPELOC(TemplateTypeParmType, { })
+DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmType, { })
+DEF_TRAVERSE_TYPELOC(SubstTemplateTypeParmPackType, { })
+
+// FIXME: use the loc for the template name?
+DEF_TRAVERSE_TYPELOC(TemplateSpecializationType, {
+ TRY_TO(TraverseTemplateName(TL.getTypePtr()->getTemplateName()));
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
+ }
+ })
+
+DEF_TRAVERSE_TYPELOC(InjectedClassNameType, { })
+
+DEF_TRAVERSE_TYPELOC(ParenType, {
+ TRY_TO(TraverseTypeLoc(TL.getInnerLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(AttributedType, {
+ TRY_TO(TraverseTypeLoc(TL.getModifiedLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(ElaboratedType, {
+ if (TL.getQualifierLoc()) {
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+ }
+ TRY_TO(TraverseTypeLoc(TL.getNamedTypeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(DependentNameType, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(DependentTemplateSpecializationType, {
+ if (TL.getQualifierLoc()) {
+ TRY_TO(TraverseNestedNameSpecifierLoc(TL.getQualifierLoc()));
+ }
+
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TRY_TO(TraverseTemplateArgumentLoc(TL.getArgLoc(I)));
+ }
+ })
+
+DEF_TRAVERSE_TYPELOC(PackExpansionType, {
+ TRY_TO(TraverseTypeLoc(TL.getPatternLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(ObjCInterfaceType, { })
+
+DEF_TRAVERSE_TYPELOC(ObjCObjectType, {
+ // We have to watch out here because an ObjCInterfaceType's base
+ // type is itself.
+ if (TL.getTypePtr()->getBaseType().getTypePtr() != TL.getTypePtr())
+ TRY_TO(TraverseTypeLoc(TL.getBaseLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(ObjCObjectPointerType, {
+ TRY_TO(TraverseTypeLoc(TL.getPointeeLoc()));
+ })
+
+DEF_TRAVERSE_TYPELOC(AtomicType, {
+ TRY_TO(TraverseTypeLoc(TL.getValueLoc()));
+ })
+
+#undef DEF_TRAVERSE_TYPELOC
+
+// ----------------- Decl traversal -----------------
+//
+// For a Decl, we automate (in the DEF_TRAVERSE_DECL macro) traversing
+// the children that come from the DeclContext associated with it.
+// Therefore each Traverse* only needs to worry about children other
+// than those.
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseDeclContextHelper(DeclContext *DC) {
+ if (!DC)
+ return true;
+
+ for (DeclContext::decl_iterator Child = DC->decls_begin(),
+ ChildEnd = DC->decls_end();
+ Child != ChildEnd; ++Child) {
+ // BlockDecls are traversed through BlockExprs.
+ if (!isa<BlockDecl>(*Child))
+ TRY_TO(TraverseDecl(*Child));
+ }
+
+ return true;
+}
+
+// This macro makes available a variable D, the passed-in decl.
+#define DEF_TRAVERSE_DECL(DECL, CODE) \
+template<typename Derived> \
+bool RecursiveASTVisitor<Derived>::Traverse##DECL (DECL *D) { \
+ TRY_TO(WalkUpFrom##DECL (D)); \
+ { CODE; } \
+ TRY_TO(TraverseDeclContextHelper(dyn_cast<DeclContext>(D))); \
+ return true; \
+}
+
+DEF_TRAVERSE_DECL(AccessSpecDecl, { })
+
+DEF_TRAVERSE_DECL(BlockDecl, {
+ TRY_TO(TraverseTypeLoc(D->getSignatureAsWritten()->getTypeLoc()));
+ TRY_TO(TraverseStmt(D->getBody()));
+ // This return statement makes sure the traversal of nodes in
+ // decls_begin()/decls_end() (done in the DEF_TRAVERSE_DECL macro)
+ // is skipped - don't remove it.
+ return true;
+ })
+
+DEF_TRAVERSE_DECL(FileScopeAsmDecl, {
+ TRY_TO(TraverseStmt(D->getAsmString()));
+ })
+
+DEF_TRAVERSE_DECL(ImportDecl, { })
+
+DEF_TRAVERSE_DECL(FriendDecl, {
+ // Friend is either decl or a type.
+ if (D->getFriendType())
+ TRY_TO(TraverseTypeLoc(D->getFriendType()->getTypeLoc()));
+ else
+ TRY_TO(TraverseDecl(D->getFriendDecl()));
+ })
+
+DEF_TRAVERSE_DECL(FriendTemplateDecl, {
+ if (D->getFriendType())
+ TRY_TO(TraverseTypeLoc(D->getFriendType()->getTypeLoc()));
+ else
+ TRY_TO(TraverseDecl(D->getFriendDecl()));
+ for (unsigned I = 0, E = D->getNumTemplateParameters(); I < E; ++I) {
+ TemplateParameterList *TPL = D->getTemplateParameterList(I);
+ for (TemplateParameterList::iterator ITPL = TPL->begin(),
+ ETPL = TPL->end();
+ ITPL != ETPL; ++ITPL) {
+ TRY_TO(TraverseDecl(*ITPL));
+ }
+ }
+ })
+
+DEF_TRAVERSE_DECL(ClassScopeFunctionSpecializationDecl, {
+ TRY_TO(TraverseDecl(D->getSpecialization()));
+ })
+
+DEF_TRAVERSE_DECL(LinkageSpecDecl, { })
+
+DEF_TRAVERSE_DECL(ObjCPropertyImplDecl, {
+ // FIXME: implement this
+ })
+
+DEF_TRAVERSE_DECL(StaticAssertDecl, {
+ TRY_TO(TraverseStmt(D->getAssertExpr()));
+ TRY_TO(TraverseStmt(D->getMessage()));
+ })
+
+DEF_TRAVERSE_DECL(TranslationUnitDecl, {
+ // Code in an unnamed namespace shows up automatically in
+ // decls_begin()/decls_end(). Thus we don't need to recurse on
+ // D->getAnonymousNamespace().
+ })
+
+DEF_TRAVERSE_DECL(NamespaceAliasDecl, {
+ // We shouldn't traverse an aliased namespace, since it will be
+ // defined (and, therefore, traversed) somewhere else.
+ //
+ // This return statement makes sure the traversal of nodes in
+ // decls_begin()/decls_end() (done in the DEF_TRAVERSE_DECL macro)
+ // is skipped - don't remove it.
+ return true;
+ })
+
+DEF_TRAVERSE_DECL(LabelDecl, {
+ // There is no code in a LabelDecl.
+})
+
+
+DEF_TRAVERSE_DECL(NamespaceDecl, {
+ // Code in an unnamed namespace shows up automatically in
+ // decls_begin()/decls_end(). Thus we don't need to recurse on
+ // D->getAnonymousNamespace().
+ })
+
+DEF_TRAVERSE_DECL(ObjCCompatibleAliasDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCCategoryDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCCategoryImplDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCImplementationDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCInterfaceDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCProtocolDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(ObjCMethodDecl, {
+ if (D->getResultTypeSourceInfo()) {
+ TRY_TO(TraverseTypeLoc(D->getResultTypeSourceInfo()->getTypeLoc()));
+ }
+ for (ObjCMethodDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I) {
+ TRY_TO(TraverseDecl(*I));
+ }
+ if (D->isThisDeclarationADefinition()) {
+ TRY_TO(TraverseStmt(D->getBody()));
+ }
+ return true;
+ })
+
+DEF_TRAVERSE_DECL(ObjCPropertyDecl, {
+ // FIXME: implement
+ })
+
+DEF_TRAVERSE_DECL(UsingDecl, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(D->getNameInfo()));
+ })
+
+DEF_TRAVERSE_DECL(UsingDirectiveDecl, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ })
+
+DEF_TRAVERSE_DECL(UsingShadowDecl, { })
+
+// A helper method for TemplateDecl's children.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTemplateParameterListHelper(
+ TemplateParameterList *TPL) {
+ if (TPL) {
+ for (TemplateParameterList::iterator I = TPL->begin(), E = TPL->end();
+ I != E; ++I) {
+ TRY_TO(TraverseDecl(*I));
+ }
+ }
+ return true;
+}
+
+// A helper method for traversing the implicit instantiations of a
+// class.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseClassInstantiations(
+ ClassTemplateDecl* D, Decl *Pattern) {
+ assert(isa<ClassTemplateDecl>(Pattern) ||
+ isa<ClassTemplatePartialSpecializationDecl>(Pattern));
+
+ ClassTemplateDecl::spec_iterator end = D->spec_end();
+ for (ClassTemplateDecl::spec_iterator it = D->spec_begin(); it != end; ++it) {
+ ClassTemplateSpecializationDecl* SD = *it;
+
+ switch (SD->getSpecializationKind()) {
+ // Visit the implicit instantiations with the requested pattern.
+ case TSK_ImplicitInstantiation: {
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *> U
+ = SD->getInstantiatedFrom();
+
+ bool ShouldVisit;
+ if (U.is<ClassTemplateDecl*>())
+ ShouldVisit = (U.get<ClassTemplateDecl*>() == Pattern);
+ else
+ ShouldVisit
+ = (U.get<ClassTemplatePartialSpecializationDecl*>() == Pattern);
+
+ if (ShouldVisit)
+ TRY_TO(TraverseDecl(SD));
+ break;
+ }
+
+ // We don't need to do anything on an explicit instantiation
+ // or explicit specialization because there will be an explicit
+ // node for it elsewhere.
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitSpecialization:
+ break;
+
+ // We don't need to do anything for an uninstantiated
+ // specialization.
+ case TSK_Undeclared:
+ break;
+ }
+ }
+
+ return true;
+}
+
+DEF_TRAVERSE_DECL(ClassTemplateDecl, {
+ CXXRecordDecl* TempDecl = D->getTemplatedDecl();
+ TRY_TO(TraverseDecl(TempDecl));
+ TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
+
+ // By default, we do not traverse the instantiations of
+ // class templates since they do not appear in the user code. The
+ // following code optionally traverses them.
+ if (getDerived().shouldVisitTemplateInstantiations()) {
+ // If this is the definition of the primary template, visit
+ // instantiations which were formed from this pattern.
+ if (D->isThisDeclarationADefinition())
+ TRY_TO(TraverseClassInstantiations(D, D));
+ }
+
+ // Note that getInstantiatedFromMemberTemplate() is just a link
+ // from a template instantiation back to the template from which
+ // it was instantiated, and thus should not be traversed.
+ })
+
+// A helper method for traversing the instantiations of a
+// function while skipping its specializations.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseFunctionInstantiations(
+ FunctionTemplateDecl* D) {
+ FunctionTemplateDecl::spec_iterator end = D->spec_end();
+ for (FunctionTemplateDecl::spec_iterator it = D->spec_begin(); it != end;
+ ++it) {
+ FunctionDecl* FD = *it;
+ switch (FD->getTemplateSpecializationKind()) {
+ case TSK_ImplicitInstantiation:
+ // We don't know what kind of FunctionDecl this is.
+ TRY_TO(TraverseDecl(FD));
+ break;
+
+ // No need to visit explicit instantiations, we'll find the node
+ // eventually.
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ break;
+
+ case TSK_Undeclared: // Declaration of the template definition.
+ case TSK_ExplicitSpecialization:
+ break;
+ }
+ }
+
+ return true;
+}
+
+DEF_TRAVERSE_DECL(FunctionTemplateDecl, {
+ TRY_TO(TraverseDecl(D->getTemplatedDecl()));
+ TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
+
+ // By default, we do not traverse the instantiations of
+ // function templates since they do not apprear in the user code. The
+ // following code optionally traverses them.
+ if (getDerived().shouldVisitTemplateInstantiations()) {
+ // Explicit function specializations will be traversed from the
+ // context of their declaration. There is therefore no need to
+ // traverse them for here.
+ //
+ // In addition, we only traverse the function instantiations when
+ // the function template is a function template definition.
+ if (D->isThisDeclarationADefinition()) {
+ TRY_TO(TraverseFunctionInstantiations(D));
+ }
+ }
+ })
+
+DEF_TRAVERSE_DECL(TemplateTemplateParmDecl, {
+ // D is the "T" in something like
+ // template <template <typename> class T> class container { };
+ TRY_TO(TraverseDecl(D->getTemplatedDecl()));
+ if (D->hasDefaultArgument()) {
+ TRY_TO(TraverseTemplateArgumentLoc(D->getDefaultArgument()));
+ }
+ TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
+ })
+
+DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
+ // D is the "T" in something like "template<typename T> class vector;"
+ if (D->getTypeForDecl())
+ TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
+ if (D->hasDefaultArgument())
+ TRY_TO(TraverseTypeLoc(D->getDefaultArgumentInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_DECL(TypedefDecl, {
+ TRY_TO(TraverseTypeLoc(D->getTypeSourceInfo()->getTypeLoc()));
+ // We shouldn't traverse D->getTypeForDecl(); it's a result of
+ // declaring the typedef, not something that was written in the
+ // source.
+ })
+
+DEF_TRAVERSE_DECL(TypeAliasDecl, {
+ TRY_TO(TraverseTypeLoc(D->getTypeSourceInfo()->getTypeLoc()));
+ // We shouldn't traverse D->getTypeForDecl(); it's a result of
+ // declaring the type alias, not something that was written in the
+ // source.
+ })
+
+DEF_TRAVERSE_DECL(TypeAliasTemplateDecl, {
+ TRY_TO(TraverseDecl(D->getTemplatedDecl()));
+ TRY_TO(TraverseTemplateParameterListHelper(D->getTemplateParameters()));
+ })
+
+DEF_TRAVERSE_DECL(UnresolvedUsingTypenameDecl, {
+ // A dependent using declaration which was marked with 'typename'.
+ // template<class T> class A : public B<T> { using typename B<T>::foo; };
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ // We shouldn't traverse D->getTypeForDecl(); it's a result of
+ // declaring the type, not something that was written in the
+ // source.
+ })
+
+DEF_TRAVERSE_DECL(EnumDecl, {
+ if (D->getTypeForDecl())
+ TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
+
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ // The enumerators are already traversed by
+ // decls_begin()/decls_end().
+ })
+
+
+// Helper methods for RecordDecl and its children.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseRecordHelper(
+ RecordDecl *D) {
+ // We shouldn't traverse D->getTypeForDecl(); it's a result of
+ // declaring the type, not something that was written in the source.
+
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ return true;
+}
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseCXXRecordHelper(
+ CXXRecordDecl *D) {
+ if (!TraverseRecordHelper(D))
+ return false;
+ if (D->hasDefinition()) {
+ for (CXXRecordDecl::base_class_iterator I = D->bases_begin(),
+ E = D->bases_end();
+ I != E; ++I) {
+ TRY_TO(TraverseTypeLoc(I->getTypeSourceInfo()->getTypeLoc()));
+ }
+ // We don't traverse the friends or the conversions, as they are
+ // already in decls_begin()/decls_end().
+ }
+ return true;
+}
+
+DEF_TRAVERSE_DECL(RecordDecl, {
+ TRY_TO(TraverseRecordHelper(D));
+ })
+
+DEF_TRAVERSE_DECL(CXXRecordDecl, {
+ TRY_TO(TraverseCXXRecordHelper(D));
+ })
+
+DEF_TRAVERSE_DECL(ClassTemplateSpecializationDecl, {
+ // For implicit instantiations ("set<int> x;"), we don't want to
+ // recurse at all, since the instatiated class isn't written in
+ // the source code anywhere. (Note the instatiated *type* --
+ // set<int> -- is written, and will still get a callback of
+ // TemplateSpecializationType). For explicit instantiations
+ // ("template set<int>;"), we do need a callback, since this
+ // is the only callback that's made for this instantiation.
+ // We use getTypeAsWritten() to distinguish.
+ if (TypeSourceInfo *TSI = D->getTypeAsWritten())
+ TRY_TO(TraverseTypeLoc(TSI->getTypeLoc()));
+
+ if (!getDerived().shouldVisitTemplateInstantiations() &&
+ D->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
+ // Returning from here skips traversing the
+ // declaration context of the ClassTemplateSpecializationDecl
+ // (embedded in the DEF_TRAVERSE_DECL() macro)
+ // which contains the instantiated members of the class.
+ return true;
+ })
+
+template <typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseTemplateArgumentLocsHelper(
+ const TemplateArgumentLoc *TAL, unsigned Count) {
+ for (unsigned I = 0; I < Count; ++I) {
+ TRY_TO(TraverseTemplateArgumentLoc(TAL[I]));
+ }
+ return true;
+}
+
+DEF_TRAVERSE_DECL(ClassTemplatePartialSpecializationDecl, {
+ // The partial specialization.
+ if (TemplateParameterList *TPL = D->getTemplateParameters()) {
+ for (TemplateParameterList::iterator I = TPL->begin(), E = TPL->end();
+ I != E; ++I) {
+ TRY_TO(TraverseDecl(*I));
+ }
+ }
+ // The args that remains unspecialized.
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ D->getTemplateArgsAsWritten(), D->getNumTemplateArgsAsWritten()));
+
+ // Don't need the ClassTemplatePartialSpecializationHelper, even
+ // though that's our parent class -- we already visit all the
+ // template args here.
+ TRY_TO(TraverseCXXRecordHelper(D));
+
+ // If we're visiting instantiations, visit the instantiations of
+ // this template now.
+ if (getDerived().shouldVisitTemplateInstantiations() &&
+ D->isThisDeclarationADefinition())
+ TRY_TO(TraverseClassInstantiations(D->getSpecializedTemplate(), D));
+ })
+
+DEF_TRAVERSE_DECL(EnumConstantDecl, {
+ TRY_TO(TraverseStmt(D->getInitExpr()));
+ })
+
+DEF_TRAVERSE_DECL(UnresolvedUsingValueDecl, {
+ // Like UnresolvedUsingTypenameDecl, but without the 'typename':
+ // template <class T> Class A : public Base<T> { using Base<T>::foo; };
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(D->getNameInfo()));
+ })
+
+DEF_TRAVERSE_DECL(IndirectFieldDecl, {})
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseDeclaratorHelper(DeclaratorDecl *D) {
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ if (D->getTypeSourceInfo())
+ TRY_TO(TraverseTypeLoc(D->getTypeSourceInfo()->getTypeLoc()));
+ else
+ TRY_TO(TraverseType(D->getType()));
+ return true;
+}
+
+DEF_TRAVERSE_DECL(FieldDecl, {
+ TRY_TO(TraverseDeclaratorHelper(D));
+ if (D->isBitField())
+ TRY_TO(TraverseStmt(D->getBitWidth()));
+ else if (D->hasInClassInitializer())
+ TRY_TO(TraverseStmt(D->getInClassInitializer()));
+ })
+
+DEF_TRAVERSE_DECL(ObjCAtDefsFieldDecl, {
+ TRY_TO(TraverseDeclaratorHelper(D));
+ if (D->isBitField())
+ TRY_TO(TraverseStmt(D->getBitWidth()));
+ // FIXME: implement the rest.
+ })
+
+DEF_TRAVERSE_DECL(ObjCIvarDecl, {
+ TRY_TO(TraverseDeclaratorHelper(D));
+ if (D->isBitField())
+ TRY_TO(TraverseStmt(D->getBitWidth()));
+ // FIXME: implement the rest.
+ })
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseFunctionHelper(FunctionDecl *D) {
+ TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(D->getNameInfo()));
+
+ // If we're an explicit template specialization, iterate over the
+ // template args that were explicitly specified. If we were doing
+ // this in typing order, we'd do it between the return type and
+ // the function args, but both are handled by the FunctionTypeLoc
+ // above, so we have to choose one side. I've decided to do before.
+ if (const FunctionTemplateSpecializationInfo *FTSI =
+ D->getTemplateSpecializationInfo()) {
+ if (FTSI->getTemplateSpecializationKind() != TSK_Undeclared &&
+ FTSI->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) {
+ // A specialization might not have explicit template arguments if it has
+ // a templated return type and concrete arguments.
+ if (const ASTTemplateArgumentListInfo *TALI =
+ FTSI->TemplateArgumentsAsWritten) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(TALI->getTemplateArgs(),
+ TALI->NumTemplateArgs));
+ }
+ }
+ }
+
+ // Visit the function type itself, which can be either
+ // FunctionNoProtoType or FunctionProtoType, or a typedef. This
+ // also covers the return type and the function parameters,
+ // including exception specifications.
+ TRY_TO(TraverseTypeLoc(D->getTypeSourceInfo()->getTypeLoc()));
+
+ if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(D)) {
+ // Constructor initializers.
+ for (CXXConstructorDecl::init_iterator I = Ctor->init_begin(),
+ E = Ctor->init_end();
+ I != E; ++I) {
+ TRY_TO(TraverseConstructorInitializer(*I));
+ }
+ }
+
+ if (D->isThisDeclarationADefinition()) {
+ TRY_TO(TraverseStmt(D->getBody())); // Function body.
+ }
+ return true;
+}
+
+DEF_TRAVERSE_DECL(FunctionDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+DEF_TRAVERSE_DECL(CXXMethodDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+DEF_TRAVERSE_DECL(CXXConstructorDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+// CXXConversionDecl is the declaration of a type conversion operator.
+// It's not a cast expression.
+DEF_TRAVERSE_DECL(CXXConversionDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+DEF_TRAVERSE_DECL(CXXDestructorDecl, {
+ // We skip decls_begin/decls_end, which are already covered by
+ // TraverseFunctionHelper().
+ return TraverseFunctionHelper(D);
+ })
+
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseVarHelper(VarDecl *D) {
+ TRY_TO(TraverseDeclaratorHelper(D));
+ // Default params are taken care of when we traverse the ParmVarDecl.
+ if (!isa<ParmVarDecl>(D))
+ TRY_TO(TraverseStmt(D->getInit()));
+ return true;
+}
+
+DEF_TRAVERSE_DECL(VarDecl, {
+ TRY_TO(TraverseVarHelper(D));
+ })
+
+DEF_TRAVERSE_DECL(ImplicitParamDecl, {
+ TRY_TO(TraverseVarHelper(D));
+ })
+
+DEF_TRAVERSE_DECL(NonTypeTemplateParmDecl, {
+ // A non-type template parameter, e.g. "S" in template<int S> class Foo ...
+ TRY_TO(TraverseDeclaratorHelper(D));
+ TRY_TO(TraverseStmt(D->getDefaultArgument()));
+ })
+
+DEF_TRAVERSE_DECL(ParmVarDecl, {
+ TRY_TO(TraverseVarHelper(D));
+
+ if (D->hasDefaultArg() &&
+ D->hasUninstantiatedDefaultArg() &&
+ !D->hasUnparsedDefaultArg())
+ TRY_TO(TraverseStmt(D->getUninstantiatedDefaultArg()));
+
+ if (D->hasDefaultArg() &&
+ !D->hasUninstantiatedDefaultArg() &&
+ !D->hasUnparsedDefaultArg())
+ TRY_TO(TraverseStmt(D->getDefaultArg()));
+ })
+
+#undef DEF_TRAVERSE_DECL
+
+// ----------------- Stmt traversal -----------------
+//
+// For stmts, we automate (in the DEF_TRAVERSE_STMT macro) iterating
+// over the children defined in children() (every stmt defines these,
+// though sometimes the range is empty). Each individual Traverse*
+// method only needs to worry about children other than those. To see
+// what children() does for a given class, see, e.g.,
+// http://clang.llvm.org/doxygen/Stmt_8cpp_source.html
+
+// This macro makes available a variable S, the passed-in stmt.
+#define DEF_TRAVERSE_STMT(STMT, CODE) \
+template<typename Derived> \
+bool RecursiveASTVisitor<Derived>::Traverse##STMT (STMT *S) { \
+ TRY_TO(WalkUpFrom##STMT(S)); \
+ { CODE; } \
+ for (Stmt::child_range range = S->children(); range; ++range) { \
+ TRY_TO(TraverseStmt(*range)); \
+ } \
+ return true; \
+}
+
+DEF_TRAVERSE_STMT(AsmStmt, {
+ TRY_TO(TraverseStmt(S->getAsmString()));
+ for (unsigned I = 0, E = S->getNumInputs(); I < E; ++I) {
+ TRY_TO(TraverseStmt(S->getInputConstraintLiteral(I)));
+ }
+ for (unsigned I = 0, E = S->getNumOutputs(); I < E; ++I) {
+ TRY_TO(TraverseStmt(S->getOutputConstraintLiteral(I)));
+ }
+ for (unsigned I = 0, E = S->getNumClobbers(); I < E; ++I) {
+ TRY_TO(TraverseStmt(S->getClobber(I)));
+ }
+ // children() iterates over inputExpr and outputExpr.
+ })
+
+DEF_TRAVERSE_STMT(CXXCatchStmt, {
+ TRY_TO(TraverseDecl(S->getExceptionDecl()));
+ // children() iterates over the handler block.
+ })
+
+DEF_TRAVERSE_STMT(DeclStmt, {
+ for (DeclStmt::decl_iterator I = S->decl_begin(), E = S->decl_end();
+ I != E; ++I) {
+ TRY_TO(TraverseDecl(*I));
+ }
+ // Suppress the default iteration over children() by
+ // returning. Here's why: A DeclStmt looks like 'type var [=
+ // initializer]'. The decls above already traverse over the
+ // initializers, so we don't have to do it again (which
+ // children() would do).
+ return true;
+ })
+
+
+// These non-expr stmts (most of them), do not need any action except
+// iterating over the children.
+DEF_TRAVERSE_STMT(BreakStmt, { })
+DEF_TRAVERSE_STMT(CXXTryStmt, { })
+DEF_TRAVERSE_STMT(CaseStmt, { })
+DEF_TRAVERSE_STMT(CompoundStmt, { })
+DEF_TRAVERSE_STMT(ContinueStmt, { })
+DEF_TRAVERSE_STMT(DefaultStmt, { })
+DEF_TRAVERSE_STMT(DoStmt, { })
+DEF_TRAVERSE_STMT(ForStmt, { })
+DEF_TRAVERSE_STMT(GotoStmt, { })
+DEF_TRAVERSE_STMT(IfStmt, { })
+DEF_TRAVERSE_STMT(IndirectGotoStmt, { })
+DEF_TRAVERSE_STMT(LabelStmt, { })
+DEF_TRAVERSE_STMT(NullStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtCatchStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtFinallyStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtSynchronizedStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtThrowStmt, { })
+DEF_TRAVERSE_STMT(ObjCAtTryStmt, { })
+DEF_TRAVERSE_STMT(ObjCForCollectionStmt, { })
+DEF_TRAVERSE_STMT(ObjCAutoreleasePoolStmt, { })
+DEF_TRAVERSE_STMT(CXXForRangeStmt, { })
+DEF_TRAVERSE_STMT(MSDependentExistsStmt, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getNameInfo()));
+})
+DEF_TRAVERSE_STMT(ReturnStmt, { })
+DEF_TRAVERSE_STMT(SwitchStmt, { })
+DEF_TRAVERSE_STMT(WhileStmt, { })
+
+
+DEF_TRAVERSE_STMT(CXXDependentScopeMemberExpr, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getMemberNameInfo()));
+ if (S->hasExplicitTemplateArgs()) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ S->getTemplateArgs(), S->getNumTemplateArgs()));
+ }
+ })
+
+DEF_TRAVERSE_STMT(DeclRefExpr, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getNameInfo()));
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ S->getTemplateArgs(), S->getNumTemplateArgs()));
+ })
+
+DEF_TRAVERSE_STMT(DependentScopeDeclRefExpr, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getNameInfo()));
+ if (S->hasExplicitTemplateArgs()) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ S->getExplicitTemplateArgs().getTemplateArgs(),
+ S->getNumTemplateArgs()));
+ }
+ })
+
+DEF_TRAVERSE_STMT(MemberExpr, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ TRY_TO(TraverseDeclarationNameInfo(S->getMemberNameInfo()));
+ TRY_TO(TraverseTemplateArgumentLocsHelper(
+ S->getTemplateArgs(), S->getNumTemplateArgs()));
+ })
+
+DEF_TRAVERSE_STMT(ImplicitCastExpr, {
+ // We don't traverse the cast type, as it's not written in the
+ // source code.
+ })
+
+DEF_TRAVERSE_STMT(CStyleCastExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXFunctionalCastExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXConstCastExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXDynamicCastExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXReinterpretCastExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXStaticCastExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+ })
+
+// InitListExpr is a tricky one, because we want to do all our work on
+// the syntactic form of the listexpr, but this method takes the
+// semantic form by default. We can't use the macro helper because it
+// calls WalkUp*() on the semantic form, before our code can convert
+// to the syntactic form.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseInitListExpr(InitListExpr *S) {
+ if (InitListExpr *Syn = S->getSyntacticForm())
+ S = Syn;
+ TRY_TO(WalkUpFromInitListExpr(S));
+ // All we need are the default actions. FIXME: use a helper function.
+ for (Stmt::child_range range = S->children(); range; ++range) {
+ TRY_TO(TraverseStmt(*range));
+ }
+ return true;
+}
+
+// GenericSelectionExpr is a special case because the types and expressions
+// are interleaved. We also need to watch out for null types (default
+// generic associations).
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::
+TraverseGenericSelectionExpr(GenericSelectionExpr *S) {
+ TRY_TO(WalkUpFromGenericSelectionExpr(S));
+ TRY_TO(TraverseStmt(S->getControllingExpr()));
+ for (unsigned i = 0; i != S->getNumAssocs(); ++i) {
+ if (TypeSourceInfo *TS = S->getAssocTypeSourceInfo(i))
+ TRY_TO(TraverseTypeLoc(TS->getTypeLoc()));
+ TRY_TO(TraverseStmt(S->getAssocExpr(i)));
+ }
+ return true;
+}
+
+// PseudoObjectExpr is a special case because of the wierdness with
+// syntactic expressions and opaque values.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::
+TraversePseudoObjectExpr(PseudoObjectExpr *S) {
+ TRY_TO(WalkUpFromPseudoObjectExpr(S));
+ TRY_TO(TraverseStmt(S->getSyntacticForm()));
+ for (PseudoObjectExpr::semantics_iterator
+ i = S->semantics_begin(), e = S->semantics_end(); i != e; ++i) {
+ Expr *sub = *i;
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(sub))
+ sub = OVE->getSourceExpr();
+ TRY_TO(TraverseStmt(sub));
+ }
+ return true;
+}
+
+DEF_TRAVERSE_STMT(CXXScalarValueInitExpr, {
+ // This is called for code like 'return T()' where T is a built-in
+ // (i.e. non-class) type.
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXNewExpr, {
+ // The child-iterator will pick up the other arguments.
+ TRY_TO(TraverseTypeLoc(S->getAllocatedTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(OffsetOfExpr, {
+ // The child-iterator will pick up the expression representing
+ // the field.
+ // FIMXE: for code like offsetof(Foo, a.b.c), should we get
+ // making a MemberExpr callbacks for Foo.a, Foo.a.b, and Foo.a.b.c?
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(UnaryExprOrTypeTraitExpr, {
+ // The child-iterator will pick up the arg if it's an expression,
+ // but not if it's a type.
+ if (S->isArgumentType())
+ TRY_TO(TraverseTypeLoc(S->getArgumentTypeInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXTypeidExpr, {
+ // The child-iterator will pick up the arg if it's an expression,
+ // but not if it's a type.
+ if (S->isTypeOperand())
+ TRY_TO(TraverseTypeLoc(S->getTypeOperandSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXUuidofExpr, {
+ // The child-iterator will pick up the arg if it's an expression,
+ // but not if it's a type.
+ if (S->isTypeOperand())
+ TRY_TO(TraverseTypeLoc(S->getTypeOperandSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(UnaryTypeTraitExpr, {
+ TRY_TO(TraverseTypeLoc(S->getQueriedTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(BinaryTypeTraitExpr, {
+ TRY_TO(TraverseTypeLoc(S->getLhsTypeSourceInfo()->getTypeLoc()));
+ TRY_TO(TraverseTypeLoc(S->getRhsTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(TypeTraitExpr, {
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ TRY_TO(TraverseTypeLoc(S->getArg(I)->getTypeLoc()));
+})
+
+DEF_TRAVERSE_STMT(ArrayTypeTraitExpr, {
+ TRY_TO(TraverseTypeLoc(S->getQueriedTypeSourceInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(ExpressionTraitExpr, {
+ TRY_TO(TraverseStmt(S->getQueriedExpression()));
+ })
+
+DEF_TRAVERSE_STMT(VAArgExpr, {
+ // The child-iterator will pick up the expression argument.
+ TRY_TO(TraverseTypeLoc(S->getWrittenTypeInfo()->getTypeLoc()));
+ })
+
+DEF_TRAVERSE_STMT(CXXTemporaryObjectExpr, {
+ // This is called for code like 'return T()' where T is a class type.
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
+ })
+
+// Walk only the visible parts of lambda expressions.
+template<typename Derived>
+bool RecursiveASTVisitor<Derived>::TraverseLambdaExpr(LambdaExpr *S) {
+ for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(),
+ CEnd = S->explicit_capture_end();
+ C != CEnd; ++C) {
+ TRY_TO(TraverseLambdaCapture(*C));
+ }
+
+ if (S->hasExplicitParameters() || S->hasExplicitResultType()) {
+ TypeLoc TL = S->getCallOperator()->getTypeSourceInfo()->getTypeLoc();
+ if (S->hasExplicitParameters() && S->hasExplicitResultType()) {
+ // Visit the whole type.
+ TRY_TO(TraverseTypeLoc(TL));
+ } else if (isa<FunctionProtoTypeLoc>(TL)) {
+ FunctionProtoTypeLoc Proto = cast<FunctionProtoTypeLoc>(TL);
+ if (S->hasExplicitParameters()) {
+ // Visit parameters.
+ for (unsigned I = 0, N = Proto.getNumArgs(); I != N; ++I) {
+ TRY_TO(TraverseDecl(Proto.getArg(I)));
+ }
+ } else {
+ TRY_TO(TraverseTypeLoc(Proto.getResultLoc()));
+ }
+ }
+ }
+
+ TRY_TO(TraverseStmt(S->getBody()));
+ return true;
+}
+
+DEF_TRAVERSE_STMT(CXXUnresolvedConstructExpr, {
+ // This is called for code like 'T()', where T is a template argument.
+ TRY_TO(TraverseTypeLoc(S->getTypeSourceInfo()->getTypeLoc()));
+ })
+
+// These expressions all might take explicit template arguments.
+// We traverse those if so. FIXME: implement these.
+DEF_TRAVERSE_STMT(CXXConstructExpr, { })
+DEF_TRAVERSE_STMT(CallExpr, { })
+DEF_TRAVERSE_STMT(CXXMemberCallExpr, { })
+
+// These exprs (most of them), do not need any action except iterating
+// over the children.
+DEF_TRAVERSE_STMT(AddrLabelExpr, { })
+DEF_TRAVERSE_STMT(ArraySubscriptExpr, { })
+DEF_TRAVERSE_STMT(BlockExpr, {
+ TRY_TO(TraverseDecl(S->getBlockDecl()));
+ return true; // no child statements to loop through.
+})
+DEF_TRAVERSE_STMT(ChooseExpr, { })
+DEF_TRAVERSE_STMT(CompoundLiteralExpr, { })
+DEF_TRAVERSE_STMT(CXXBindTemporaryExpr, { })
+DEF_TRAVERSE_STMT(CXXBoolLiteralExpr, { })
+DEF_TRAVERSE_STMT(CXXDefaultArgExpr, { })
+DEF_TRAVERSE_STMT(CXXDeleteExpr, { })
+DEF_TRAVERSE_STMT(ExprWithCleanups, { })
+DEF_TRAVERSE_STMT(CXXNullPtrLiteralExpr, { })
+DEF_TRAVERSE_STMT(CXXPseudoDestructorExpr, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ if (TypeSourceInfo *ScopeInfo = S->getScopeTypeInfo())
+ TRY_TO(TraverseTypeLoc(ScopeInfo->getTypeLoc()));
+ if (TypeSourceInfo *DestroyedTypeInfo = S->getDestroyedTypeInfo())
+ TRY_TO(TraverseTypeLoc(DestroyedTypeInfo->getTypeLoc()));
+})
+DEF_TRAVERSE_STMT(CXXThisExpr, { })
+DEF_TRAVERSE_STMT(CXXThrowExpr, { })
+DEF_TRAVERSE_STMT(UserDefinedLiteral, { })
+DEF_TRAVERSE_STMT(DesignatedInitExpr, { })
+DEF_TRAVERSE_STMT(ExtVectorElementExpr, { })
+DEF_TRAVERSE_STMT(GNUNullExpr, { })
+DEF_TRAVERSE_STMT(ImplicitValueInitExpr, { })
+DEF_TRAVERSE_STMT(ObjCBoolLiteralExpr, { })
+DEF_TRAVERSE_STMT(ObjCEncodeExpr, { })
+DEF_TRAVERSE_STMT(ObjCIsaExpr, { })
+DEF_TRAVERSE_STMT(ObjCIvarRefExpr, { })
+DEF_TRAVERSE_STMT(ObjCMessageExpr, { })
+DEF_TRAVERSE_STMT(ObjCPropertyRefExpr, { })
+DEF_TRAVERSE_STMT(ObjCSubscriptRefExpr, { })
+DEF_TRAVERSE_STMT(ObjCProtocolExpr, { })
+DEF_TRAVERSE_STMT(ObjCSelectorExpr, { })
+DEF_TRAVERSE_STMT(ObjCIndirectCopyRestoreExpr, { })
+DEF_TRAVERSE_STMT(ObjCBridgedCastExpr, {
+ TRY_TO(TraverseTypeLoc(S->getTypeInfoAsWritten()->getTypeLoc()));
+})
+DEF_TRAVERSE_STMT(ParenExpr, { })
+DEF_TRAVERSE_STMT(ParenListExpr, { })
+DEF_TRAVERSE_STMT(PredefinedExpr, { })
+DEF_TRAVERSE_STMT(ShuffleVectorExpr, { })
+DEF_TRAVERSE_STMT(StmtExpr, { })
+DEF_TRAVERSE_STMT(UnresolvedLookupExpr, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ if (S->hasExplicitTemplateArgs()) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(S->getTemplateArgs(),
+ S->getNumTemplateArgs()));
+ }
+})
+
+DEF_TRAVERSE_STMT(UnresolvedMemberExpr, {
+ TRY_TO(TraverseNestedNameSpecifierLoc(S->getQualifierLoc()));
+ if (S->hasExplicitTemplateArgs()) {
+ TRY_TO(TraverseTemplateArgumentLocsHelper(S->getTemplateArgs(),
+ S->getNumTemplateArgs()));
+ }
+})
+
+DEF_TRAVERSE_STMT(SEHTryStmt, {})
+DEF_TRAVERSE_STMT(SEHExceptStmt, {})
+DEF_TRAVERSE_STMT(SEHFinallyStmt,{})
+
+DEF_TRAVERSE_STMT(CXXOperatorCallExpr, { })
+DEF_TRAVERSE_STMT(OpaqueValueExpr, { })
+DEF_TRAVERSE_STMT(CUDAKernelCallExpr, { })
+
+// These operators (all of them) do not need any action except
+// iterating over the children.
+DEF_TRAVERSE_STMT(BinaryConditionalOperator, { })
+DEF_TRAVERSE_STMT(ConditionalOperator, { })
+DEF_TRAVERSE_STMT(UnaryOperator, { })
+DEF_TRAVERSE_STMT(BinaryOperator, { })
+DEF_TRAVERSE_STMT(CompoundAssignOperator, { })
+DEF_TRAVERSE_STMT(CXXNoexceptExpr, { })
+DEF_TRAVERSE_STMT(PackExpansionExpr, { })
+DEF_TRAVERSE_STMT(SizeOfPackExpr, { })
+DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmPackExpr, { })
+DEF_TRAVERSE_STMT(SubstNonTypeTemplateParmExpr, { })
+DEF_TRAVERSE_STMT(MaterializeTemporaryExpr, { })
+DEF_TRAVERSE_STMT(AtomicExpr, { })
+
+// These literals (all of them) do not need any action.
+DEF_TRAVERSE_STMT(IntegerLiteral, { })
+DEF_TRAVERSE_STMT(CharacterLiteral, { })
+DEF_TRAVERSE_STMT(FloatingLiteral, { })
+DEF_TRAVERSE_STMT(ImaginaryLiteral, { })
+DEF_TRAVERSE_STMT(StringLiteral, { })
+DEF_TRAVERSE_STMT(ObjCStringLiteral, { })
+DEF_TRAVERSE_STMT(ObjCNumericLiteral, { })
+DEF_TRAVERSE_STMT(ObjCArrayLiteral, { })
+DEF_TRAVERSE_STMT(ObjCDictionaryLiteral, { })
+
+// Traverse OpenCL: AsType, Convert.
+DEF_TRAVERSE_STMT(AsTypeExpr, { })
+
+// FIXME: look at the following tricky-seeming exprs to see if we
+// need to recurse on anything. These are ones that have methods
+// returning decls or qualtypes or nestednamespecifier -- though I'm
+// not sure if they own them -- or just seemed very complicated, or
+// had lots of sub-types to explore.
+//
+// VisitOverloadExpr and its children: recurse on template args? etc?
+
+// FIXME: go through all the stmts and exprs again, and see which of them
+// create new types, and recurse on the types (TypeLocs?) of those.
+// Candidates:
+//
+// http://clang.llvm.org/doxygen/classclang_1_1CXXTypeidExpr.html
+// http://clang.llvm.org/doxygen/classclang_1_1UnaryExprOrTypeTraitExpr.html
+// http://clang.llvm.org/doxygen/classclang_1_1TypesCompatibleExpr.html
+// Every class that has getQualifier.
+
+#undef DEF_TRAVERSE_STMT
+
+#undef TRY_TO
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_RECURSIVEASTVISITOR_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
new file mode 100644
index 0000000..88abadb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/Redeclarable.h
@@ -0,0 +1,181 @@
+//===-- Redeclarable.h - Base for Decls that can be redeclared -*- C++ -*-====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Redeclarable interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_REDECLARABLE_H
+#define LLVM_CLANG_AST_REDECLARABLE_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Casting.h"
+#include <iterator>
+
+namespace clang {
+
+/// \brief Provides common interface for the Decls that can be redeclared.
+template<typename decl_type>
+class Redeclarable {
+
+protected:
+ // FIXME: PointerIntPair is a value class that should not be inherited from.
+ // This should change to using containment.
+ struct DeclLink : public llvm::PointerIntPair<decl_type *, 1, bool> {
+ DeclLink(decl_type *D, bool isLatest)
+ : llvm::PointerIntPair<decl_type *, 1, bool>(D, isLatest) { }
+
+ typedef llvm::PointerIntPair<decl_type *, 1, bool> base_type;
+
+ bool NextIsPrevious() const { return base_type::getInt() == false; }
+ bool NextIsLatest() const { return base_type::getInt() == true; }
+ decl_type *getNext() const { return base_type::getPointer(); }
+ };
+
+ struct PreviousDeclLink : public DeclLink {
+ PreviousDeclLink(decl_type *D) : DeclLink(D, false) { }
+ };
+
+ struct LatestDeclLink : public DeclLink {
+ LatestDeclLink(decl_type *D) : DeclLink(D, true) { }
+ };
+
+ /// \brief Points to the next redeclaration in the chain.
+ ///
+ /// If NextIsPrevious() is true, this is a link to the previous declaration
+ /// of this same Decl. If NextIsLatest() is true, this is the first
+ /// declaration and Link points to the latest declaration. For example:
+ ///
+ /// #1 int f(int x, int y = 1); // <pointer to #3, true>
+ /// #2 int f(int x = 0, int y); // <pointer to #1, false>
+ /// #3 int f(int x, int y) { return x + y; } // <pointer to #2, false>
+ ///
+ /// If there is only one declaration, it is <pointer to self, true>
+ DeclLink RedeclLink;
+
+public:
+ Redeclarable() : RedeclLink(LatestDeclLink(static_cast<decl_type*>(this))) { }
+
+ /// \brief Return the previous declaration of this declaration or NULL if this
+ /// is the first declaration.
+ decl_type *getPreviousDecl() {
+ if (RedeclLink.NextIsPrevious())
+ return RedeclLink.getNext();
+ return 0;
+ }
+ const decl_type *getPreviousDecl() const {
+ return const_cast<decl_type *>(
+ static_cast<const decl_type*>(this))->getPreviousDecl();
+ }
+
+ /// \brief Return the first declaration of this declaration or itself if this
+ /// is the only declaration.
+ decl_type *getFirstDeclaration() {
+ decl_type *D = static_cast<decl_type*>(this);
+ while (D->getPreviousDecl())
+ D = D->getPreviousDecl();
+ return D;
+ }
+
+ /// \brief Return the first declaration of this declaration or itself if this
+ /// is the only declaration.
+ const decl_type *getFirstDeclaration() const {
+ const decl_type *D = static_cast<const decl_type*>(this);
+ while (D->getPreviousDecl())
+ D = D->getPreviousDecl();
+ return D;
+ }
+
+ /// \brief Returns true if this is the first declaration.
+ bool isFirstDeclaration() const {
+ return RedeclLink.NextIsLatest();
+ }
+
+ /// \brief Returns the most recent (re)declaration of this declaration.
+ decl_type *getMostRecentDecl() {
+ return getFirstDeclaration()->RedeclLink.getNext();
+ }
+
+ /// \brief Returns the most recent (re)declaration of this declaration.
+ const decl_type *getMostRecentDecl() const {
+ return getFirstDeclaration()->RedeclLink.getNext();
+ }
+
+ /// \brief Set the previous declaration. If PrevDecl is NULL, set this as the
+ /// first and only declaration.
+ void setPreviousDeclaration(decl_type *PrevDecl);
+
+ /// \brief Iterates through all the redeclarations of the same decl.
+ class redecl_iterator {
+ /// Current - The current declaration.
+ decl_type *Current;
+ decl_type *Starter;
+ bool PassedFirst;
+
+ public:
+ typedef decl_type* value_type;
+ typedef decl_type* reference;
+ typedef decl_type* pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ redecl_iterator() : Current(0) { }
+ explicit redecl_iterator(decl_type *C)
+ : Current(C), Starter(C), PassedFirst(false) { }
+
+ reference operator*() const { return Current; }
+ pointer operator->() const { return Current; }
+
+ redecl_iterator& operator++() {
+ assert(Current && "Advancing while iterator has reached end");
+ // Sanity check to avoid infinite loop on invalid redecl chain.
+ if (Current->isFirstDeclaration()) {
+ if (PassedFirst) {
+ assert(0 && "Passed first decl twice, invalid redecl chain!");
+ Current = 0;
+ return *this;
+ }
+ PassedFirst = true;
+ }
+
+ // Get either previous decl or latest decl.
+ decl_type *Next = Current->RedeclLink.getNext();
+ Current = (Next != Starter ? Next : 0);
+ return *this;
+ }
+
+ redecl_iterator operator++(int) {
+ redecl_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(redecl_iterator x, redecl_iterator y) {
+ return x.Current == y.Current;
+ }
+ friend bool operator!=(redecl_iterator x, redecl_iterator y) {
+ return x.Current != y.Current;
+ }
+ };
+
+ /// \brief Returns iterator for all the redeclarations of the same decl.
+ /// It will iterate at least once (when this decl is the only one).
+ redecl_iterator redecls_begin() const {
+ return redecl_iterator(const_cast<decl_type*>(
+ static_cast<const decl_type*>(this)));
+ }
+ redecl_iterator redecls_end() const { return redecl_iterator(); }
+
+ friend class ASTDeclReader;
+ friend class ASTDeclWriter;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h b/contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h
new file mode 100644
index 0000000..cd43a5c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/SelectorLocationsKind.h
@@ -0,0 +1,83 @@
+//===--- SelectorLocationsKind.h - Kind of selector locations ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Describes whether the identifier locations for a selector are "standard"
+// or not.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_SELECTORLOCATIONSKIND_H
+#define LLVM_CLANG_AST_SELECTORLOCATIONSKIND_H
+
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+ class Selector;
+ class SourceLocation;
+ class Expr;
+ class ParmVarDecl;
+
+/// \brief Whether all locations of the selector identifiers are in a
+/// "standard" position.
+enum SelectorLocationsKind {
+ /// \brief Non-standard.
+ SelLoc_NonStandard = 0,
+
+ /// \brief For nullary selectors, immediately before the end:
+ /// "[foo release]" / "-(void)release;"
+ /// Or immediately before the arguments:
+ /// "[foo first:1 second:2]" / "-(id)first:(int)x second:(int)y;
+ SelLoc_StandardNoSpace = 1,
+
+ /// \brief For nullary selectors, immediately before the end:
+ /// "[foo release]" / "-(void)release;"
+ /// Or with a space between the arguments:
+ /// "[foo first: 1 second: 2]" / "-(id)first: (int)x second: (int)y;
+ SelLoc_StandardWithSpace = 2
+};
+
+/// \brief Returns true if all \arg SelLocs are in a "standard" location.
+SelectorLocationsKind hasStandardSelectorLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<Expr *> Args,
+ SourceLocation EndLoc);
+
+/// \brief Get the "standard" location of a selector identifier, e.g:
+/// For nullary selectors, immediately before ']': "[foo release]"
+///
+/// \param WithArgSpace if true the standard location is with a space apart
+/// before arguments: "[foo first: 1 second: 2]"
+/// If false: "[foo first:1 second:2]"
+SourceLocation getStandardSelectorLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ ArrayRef<Expr *> Args,
+ SourceLocation EndLoc);
+
+/// \brief Returns true if all \arg SelLocs are in a "standard" location.
+SelectorLocationsKind hasStandardSelectorLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<ParmVarDecl *> Args,
+ SourceLocation EndLoc);
+
+/// \brief Get the "standard" location of a selector identifier, e.g:
+/// For nullary selectors, immediately before ']': "[foo release]"
+///
+/// \param WithArgSpace if true the standard location is with a space apart
+/// before arguments: "-(id)first: (int)x second: (int)y;"
+/// If false: "-(id)first:(int)x second:(int)y;"
+SourceLocation getStandardSelectorLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ ArrayRef<ParmVarDecl *> Args,
+ SourceLocation EndLoc);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Stmt.h b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
new file mode 100644
index 0000000..84bdfb8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/Stmt.h
@@ -0,0 +1,1705 @@
+//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Stmt interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_STMT_H
+#define LLVM_CLANG_AST_STMT_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/StmtIterator.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+
+namespace llvm {
+ class FoldingSetNodeID;
+}
+
+namespace clang {
+ class ASTContext;
+ class Expr;
+ class Decl;
+ class ParmVarDecl;
+ class QualType;
+ class IdentifierInfo;
+ class SourceManager;
+ class StringLiteral;
+ class SwitchStmt;
+
+ //===--------------------------------------------------------------------===//
+ // ExprIterator - Iterators for iterating over Stmt* arrays that contain
+ // only Expr*. This is needed because AST nodes use Stmt* arrays to store
+ // references to children (to be compatible with StmtIterator).
+ //===--------------------------------------------------------------------===//
+
+ class Stmt;
+ class Expr;
+
+ class ExprIterator {
+ Stmt** I;
+ public:
+ ExprIterator(Stmt** i) : I(i) {}
+ ExprIterator() : I(0) {}
+ ExprIterator& operator++() { ++I; return *this; }
+ ExprIterator operator-(size_t i) { return I-i; }
+ ExprIterator operator+(size_t i) { return I+i; }
+ Expr* operator[](size_t idx);
+ // FIXME: Verify that this will correctly return a signed distance.
+ signed operator-(const ExprIterator& R) const { return I - R.I; }
+ Expr* operator*() const;
+ Expr* operator->() const;
+ bool operator==(const ExprIterator& R) const { return I == R.I; }
+ bool operator!=(const ExprIterator& R) const { return I != R.I; }
+ bool operator>(const ExprIterator& R) const { return I > R.I; }
+ bool operator>=(const ExprIterator& R) const { return I >= R.I; }
+ };
+
+ class ConstExprIterator {
+ const Stmt * const *I;
+ public:
+ ConstExprIterator(const Stmt * const *i) : I(i) {}
+ ConstExprIterator() : I(0) {}
+ ConstExprIterator& operator++() { ++I; return *this; }
+ ConstExprIterator operator+(size_t i) const { return I+i; }
+ ConstExprIterator operator-(size_t i) const { return I-i; }
+ const Expr * operator[](size_t idx) const;
+ signed operator-(const ConstExprIterator& R) const { return I - R.I; }
+ const Expr * operator*() const;
+ const Expr * operator->() const;
+ bool operator==(const ConstExprIterator& R) const { return I == R.I; }
+ bool operator!=(const ConstExprIterator& R) const { return I != R.I; }
+ bool operator>(const ConstExprIterator& R) const { return I > R.I; }
+ bool operator>=(const ConstExprIterator& R) const { return I >= R.I; }
+ };
+
+//===----------------------------------------------------------------------===//
+// AST classes for statements.
+//===----------------------------------------------------------------------===//
+
+/// Stmt - This represents one statement.
+///
+class Stmt {
+public:
+ enum StmtClass {
+ NoStmtClass = 0,
+#define STMT(CLASS, PARENT) CLASS##Class,
+#define STMT_RANGE(BASE, FIRST, LAST) \
+ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
+#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
+ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
+#define ABSTRACT_STMT(STMT)
+#include "clang/AST/StmtNodes.inc"
+ };
+
+ // Make vanilla 'new' and 'delete' illegal for Stmts.
+protected:
+ void* operator new(size_t bytes) throw() {
+ llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
+ }
+ void operator delete(void* data) throw() {
+ llvm_unreachable("Stmts cannot be released with regular 'delete'.");
+ }
+
+ class StmtBitfields {
+ friend class Stmt;
+
+ /// \brief The statement class.
+ unsigned sClass : 8;
+ };
+ enum { NumStmtBits = 8 };
+
+ class CompoundStmtBitfields {
+ friend class CompoundStmt;
+ unsigned : NumStmtBits;
+
+ unsigned NumStmts : 32 - NumStmtBits;
+ };
+
+ class ExprBitfields {
+ friend class Expr;
+ friend class DeclRefExpr; // computeDependence
+ friend class InitListExpr; // ctor
+ friend class DesignatedInitExpr; // ctor
+ friend class BlockDeclRefExpr; // ctor
+ friend class ASTStmtReader; // deserialization
+ friend class CXXNewExpr; // ctor
+ friend class DependentScopeDeclRefExpr; // ctor
+ friend class CXXConstructExpr; // ctor
+ friend class CallExpr; // ctor
+ friend class OffsetOfExpr; // ctor
+ friend class ObjCMessageExpr; // ctor
+ friend class ObjCArrayLiteral; // ctor
+ friend class ObjCDictionaryLiteral; // ctor
+ friend class ShuffleVectorExpr; // ctor
+ friend class ParenListExpr; // ctor
+ friend class CXXUnresolvedConstructExpr; // ctor
+ friend class CXXDependentScopeMemberExpr; // ctor
+ friend class OverloadExpr; // ctor
+ friend class PseudoObjectExpr; // ctor
+ friend class AtomicExpr; // ctor
+ unsigned : NumStmtBits;
+
+ unsigned ValueKind : 2;
+ unsigned ObjectKind : 2;
+ unsigned TypeDependent : 1;
+ unsigned ValueDependent : 1;
+ unsigned InstantiationDependent : 1;
+ unsigned ContainsUnexpandedParameterPack : 1;
+ };
+ enum { NumExprBits = 16 };
+
+ class CharacterLiteralBitfields {
+ friend class CharacterLiteral;
+ unsigned : NumExprBits;
+
+ unsigned Kind : 2;
+ };
+
+ class FloatingLiteralBitfields {
+ friend class FloatingLiteral;
+ unsigned : NumExprBits;
+
+ unsigned IsIEEE : 1; // Distinguishes between PPC128 and IEEE128.
+ unsigned IsExact : 1;
+ };
+
+ class UnaryExprOrTypeTraitExprBitfields {
+ friend class UnaryExprOrTypeTraitExpr;
+ unsigned : NumExprBits;
+
+ unsigned Kind : 2;
+ unsigned IsType : 1; // true if operand is a type, false if an expression.
+ };
+
+ class DeclRefExprBitfields {
+ friend class DeclRefExpr;
+ friend class ASTStmtReader; // deserialization
+ unsigned : NumExprBits;
+
+ unsigned HasQualifier : 1;
+ unsigned HasTemplateKWAndArgsInfo : 1;
+ unsigned HasFoundDecl : 1;
+ unsigned HadMultipleCandidates : 1;
+ unsigned RefersToEnclosingLocal : 1;
+ };
+
+ class CastExprBitfields {
+ friend class CastExpr;
+ unsigned : NumExprBits;
+
+ unsigned Kind : 6;
+ unsigned BasePathSize : 32 - 6 - NumExprBits;
+ };
+
+ class CallExprBitfields {
+ friend class CallExpr;
+ unsigned : NumExprBits;
+
+ unsigned NumPreArgs : 1;
+ };
+
+ class ExprWithCleanupsBitfields {
+ friend class ExprWithCleanups;
+ friend class ASTStmtReader; // deserialization
+
+ unsigned : NumExprBits;
+
+ unsigned NumObjects : 32 - NumExprBits;
+ };
+
+ class PseudoObjectExprBitfields {
+ friend class PseudoObjectExpr;
+ friend class ASTStmtReader; // deserialization
+
+ unsigned : NumExprBits;
+
+ // These don't need to be particularly wide, because they're
+ // strictly limited by the forms of expressions we permit.
+ unsigned NumSubExprs : 8;
+ unsigned ResultIndex : 32 - 8 - NumExprBits;
+ };
+
+ class ObjCIndirectCopyRestoreExprBitfields {
+ friend class ObjCIndirectCopyRestoreExpr;
+ unsigned : NumExprBits;
+
+ unsigned ShouldCopy : 1;
+ };
+
+ class InitListExprBitfields {
+ friend class InitListExpr;
+
+ unsigned : NumExprBits;
+
+ /// Whether this initializer list originally had a GNU array-range
+ /// designator in it. This is a temporary marker used by CodeGen.
+ unsigned HadArrayRangeDesignator : 1;
+
+ /// Whether this initializer list initializes a std::initializer_list
+ /// object.
+ unsigned InitializesStdInitializerList : 1;
+ };
+
+ class TypeTraitExprBitfields {
+ friend class TypeTraitExpr;
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+ unsigned : NumExprBits;
+
+ /// \brief The kind of type trait, which is a value of a TypeTrait enumerator.
+ unsigned Kind : 8;
+
+ /// \brief If this expression is not value-dependent, this indicates whether
+ /// the trait evaluated true or false.
+ unsigned Value : 1;
+
+ /// \brief The number of arguments to this type trait.
+ unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
+ };
+
+ union {
+ // FIXME: this is wasteful on 64-bit platforms.
+ void *Aligner;
+
+ StmtBitfields StmtBits;
+ CompoundStmtBitfields CompoundStmtBits;
+ ExprBitfields ExprBits;
+ CharacterLiteralBitfields CharacterLiteralBits;
+ FloatingLiteralBitfields FloatingLiteralBits;
+ UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
+ DeclRefExprBitfields DeclRefExprBits;
+ CastExprBitfields CastExprBits;
+ CallExprBitfields CallExprBits;
+ ExprWithCleanupsBitfields ExprWithCleanupsBits;
+ PseudoObjectExprBitfields PseudoObjectExprBits;
+ ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
+ InitListExprBitfields InitListExprBits;
+ TypeTraitExprBitfields TypeTraitExprBits;
+ };
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+
+public:
+ // Only allow allocation of Stmts using the allocator in ASTContext
+ // or by doing a placement new.
+ void* operator new(size_t bytes, ASTContext& C,
+ unsigned alignment = 8) throw() {
+ return ::operator new(bytes, C, alignment);
+ }
+
+ void* operator new(size_t bytes, ASTContext* C,
+ unsigned alignment = 8) throw() {
+ return ::operator new(bytes, *C, alignment);
+ }
+
+ void* operator new(size_t bytes, void* mem) throw() {
+ return mem;
+ }
+
+ void operator delete(void*, ASTContext&, unsigned) throw() { }
+ void operator delete(void*, ASTContext*, unsigned) throw() { }
+ void operator delete(void*, std::size_t) throw() { }
+ void operator delete(void*, void*) throw() { }
+
+public:
+ /// \brief A placeholder type used to construct an empty shell of a
+ /// type, that will be filled in later (e.g., by some
+ /// de-serialization).
+ struct EmptyShell { };
+
+private:
+ /// \brief Whether statistic collection is enabled.
+ static bool StatisticsEnabled;
+
+protected:
+ /// \brief Construct an empty statement.
+ explicit Stmt(StmtClass SC, EmptyShell) {
+ StmtBits.sClass = SC;
+ if (StatisticsEnabled) Stmt::addStmtClass(SC);
+ }
+
+public:
+ Stmt(StmtClass SC) {
+ StmtBits.sClass = SC;
+ if (StatisticsEnabled) Stmt::addStmtClass(SC);
+ }
+
+ StmtClass getStmtClass() const {
+ return static_cast<StmtClass>(StmtBits.sClass);
+ }
+ const char *getStmtClassName() const;
+
+ /// SourceLocation tokens are not useful in isolation - they are low level
+ /// value objects created/interpreted by SourceManager. We assume AST
+ /// clients will have a pointer to the respective SourceManager.
+ SourceRange getSourceRange() const LLVM_READONLY;
+ SourceLocation getLocStart() const LLVM_READONLY;
+ SourceLocation getLocEnd() const LLVM_READONLY;
+
+ // global temp stats (until we have a per-module visitor)
+ static void addStmtClass(const StmtClass s);
+ static void EnableStatistics();
+ static void PrintStats();
+
+ /// dump - This does a local dump of the specified AST fragment. It dumps the
+ /// specified node and a few nodes underneath it, but not the whole subtree.
+ /// This is useful in a debugger.
+ LLVM_ATTRIBUTE_USED void dump() const;
+ LLVM_ATTRIBUTE_USED void dump(SourceManager &SM) const;
+ void dump(raw_ostream &OS, SourceManager &SM) const;
+
+ /// dumpAll - This does a dump of the specified AST fragment and all subtrees.
+ void dumpAll() const;
+ void dumpAll(SourceManager &SM) const;
+
+ /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
+ /// back to its original source language syntax.
+ void dumpPretty(ASTContext& Context) const;
+ void printPretty(raw_ostream &OS, PrinterHelper *Helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0) const {
+ printPretty(OS, *(ASTContext*)0, Helper, Policy, Indentation);
+ }
+ void printPretty(raw_ostream &OS, ASTContext &Context,
+ PrinterHelper *Helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0) const;
+
+ /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
+ /// works on systems with GraphViz (Mac OS X) or dot+gv installed.
+ void viewAST() const;
+
+ /// Skip past any implicit AST nodes which might surround this
+ /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes.
+ Stmt *IgnoreImplicit();
+
+ const Stmt *stripLabelLikeStatements() const;
+ Stmt *stripLabelLikeStatements() {
+ return const_cast<Stmt*>(
+ const_cast<const Stmt*>(this)->stripLabelLikeStatements());
+ }
+
+ // Implement isa<T> support.
+ static bool classof(const Stmt *) { return true; }
+
+ /// hasImplicitControlFlow - Some statements (e.g. short circuited operations)
+ /// contain implicit control-flow in the order their subexpressions
+ /// are evaluated. This predicate returns true if this statement has
+ /// such implicit control-flow. Such statements are also specially handled
+ /// within CFGs.
+ bool hasImplicitControlFlow() const;
+
+ /// Child Iterators: All subclasses must implement 'children'
+ /// to permit easy iteration over the substatements/subexpessions of an
+ /// AST node. This permits easy iteration over all nodes in the AST.
+ typedef StmtIterator child_iterator;
+ typedef ConstStmtIterator const_child_iterator;
+
+ typedef StmtRange child_range;
+ typedef ConstStmtRange const_child_range;
+
+ child_range children();
+ const_child_range children() const {
+ return const_cast<Stmt*>(this)->children();
+ }
+
+ child_iterator child_begin() { return children().first; }
+ child_iterator child_end() { return children().second; }
+
+ const_child_iterator child_begin() const { return children().first; }
+ const_child_iterator child_end() const { return children().second; }
+
+ /// \brief Produce a unique representation of the given statement.
+ ///
+ /// \brief ID once the profiling operation is complete, will contain
+ /// the unique representation of the given statement.
+ ///
+ /// \brief Context the AST context in which the statement resides
+ ///
+ /// \brief Canonical whether the profile should be based on the canonical
+ /// representation of this statement (e.g., where non-type template
+ /// parameters are identified by index/level rather than their
+ /// declaration pointers) or the exact representation of the statement as
+ /// written in the source.
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool Canonical) const;
+};
+
+/// DeclStmt - Adaptor class for mixing declarations with statements and
+/// expressions. For example, CompoundStmt mixes statements, expressions
+/// and declarations (variables, types). Another example is ForStmt, where
+/// the first statement can be an expression or a declaration.
+///
+class DeclStmt : public Stmt {
+ DeclGroupRef DG;
+ SourceLocation StartLoc, EndLoc;
+
+public:
+ DeclStmt(DeclGroupRef dg, SourceLocation startLoc,
+ SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg),
+ StartLoc(startLoc), EndLoc(endLoc) {}
+
+ /// \brief Build an empty declaration statement.
+ explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { }
+
+ /// isSingleDecl - This method returns true if this DeclStmt refers
+ /// to a single Decl.
+ bool isSingleDecl() const {
+ return DG.isSingleDecl();
+ }
+
+ const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
+ Decl *getSingleDecl() { return DG.getSingleDecl(); }
+
+ const DeclGroupRef getDeclGroup() const { return DG; }
+ DeclGroupRef getDeclGroup() { return DG; }
+ void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
+
+ SourceLocation getStartLoc() const { return StartLoc; }
+ void setStartLoc(SourceLocation L) { StartLoc = L; }
+ SourceLocation getEndLoc() const { return EndLoc; }
+ void setEndLoc(SourceLocation L) { EndLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(StartLoc, EndLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == DeclStmtClass;
+ }
+ static bool classof(const DeclStmt *) { return true; }
+
+ // Iterators over subexpressions.
+ child_range children() {
+ return child_range(child_iterator(DG.begin(), DG.end()),
+ child_iterator(DG.end(), DG.end()));
+ }
+
+ typedef DeclGroupRef::iterator decl_iterator;
+ typedef DeclGroupRef::const_iterator const_decl_iterator;
+
+ decl_iterator decl_begin() { return DG.begin(); }
+ decl_iterator decl_end() { return DG.end(); }
+ const_decl_iterator decl_begin() const { return DG.begin(); }
+ const_decl_iterator decl_end() const { return DG.end(); }
+};
+
+/// NullStmt - This is the null statement ";": C99 6.8.3p3.
+///
+class NullStmt : public Stmt {
+ SourceLocation SemiLoc;
+
+ /// \brief True if the null statement was preceded by an empty macro, e.g:
+ /// @code
+ /// #define CALL(x)
+ /// CALL(0);
+ /// @endcode
+ bool HasLeadingEmptyMacro;
+public:
+ NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
+ : Stmt(NullStmtClass), SemiLoc(L),
+ HasLeadingEmptyMacro(hasLeadingEmptyMacro) {}
+
+ /// \brief Build an empty null statement.
+ explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty),
+ HasLeadingEmptyMacro(false) { }
+
+ SourceLocation getSemiLoc() const { return SemiLoc; }
+ void setSemiLoc(SourceLocation L) { SemiLoc = L; }
+
+ bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(SemiLoc); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == NullStmtClass;
+ }
+ static bool classof(const NullStmt *) { return true; }
+
+ child_range children() { return child_range(); }
+
+ friend class ASTStmtReader;
+ friend class ASTStmtWriter;
+};
+
+/// CompoundStmt - This represents a group of statements like { stmt stmt }.
+///
+class CompoundStmt : public Stmt {
+ Stmt** Body;
+ SourceLocation LBracLoc, RBracLoc;
+public:
+ CompoundStmt(ASTContext& C, Stmt **StmtStart, unsigned NumStmts,
+ SourceLocation LB, SourceLocation RB)
+ : Stmt(CompoundStmtClass), LBracLoc(LB), RBracLoc(RB) {
+ CompoundStmtBits.NumStmts = NumStmts;
+ assert(CompoundStmtBits.NumStmts == NumStmts &&
+ "NumStmts doesn't fit in bits of CompoundStmtBits.NumStmts!");
+
+ if (NumStmts == 0) {
+ Body = 0;
+ return;
+ }
+
+ Body = new (C) Stmt*[NumStmts];
+ memcpy(Body, StmtStart, NumStmts * sizeof(*Body));
+ }
+
+ // \brief Build an empty compound statement.
+ explicit CompoundStmt(EmptyShell Empty)
+ : Stmt(CompoundStmtClass, Empty), Body(0) {
+ CompoundStmtBits.NumStmts = 0;
+ }
+
+ void setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts);
+
+ bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
+ unsigned size() const { return CompoundStmtBits.NumStmts; }
+
+ typedef Stmt** body_iterator;
+ body_iterator body_begin() { return Body; }
+ body_iterator body_end() { return Body + size(); }
+ Stmt *body_back() { return !body_empty() ? Body[size()-1] : 0; }
+
+ void setLastStmt(Stmt *S) {
+ assert(!body_empty() && "setLastStmt");
+ Body[size()-1] = S;
+ }
+
+ typedef Stmt* const * const_body_iterator;
+ const_body_iterator body_begin() const { return Body; }
+ const_body_iterator body_end() const { return Body + size(); }
+ const Stmt *body_back() const { return !body_empty() ? Body[size()-1] : 0; }
+
+ typedef std::reverse_iterator<body_iterator> reverse_body_iterator;
+ reverse_body_iterator body_rbegin() {
+ return reverse_body_iterator(body_end());
+ }
+ reverse_body_iterator body_rend() {
+ return reverse_body_iterator(body_begin());
+ }
+
+ typedef std::reverse_iterator<const_body_iterator>
+ const_reverse_body_iterator;
+
+ const_reverse_body_iterator body_rbegin() const {
+ return const_reverse_body_iterator(body_end());
+ }
+
+ const_reverse_body_iterator body_rend() const {
+ return const_reverse_body_iterator(body_begin());
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(LBracLoc, RBracLoc);
+ }
+
+ SourceLocation getLBracLoc() const { return LBracLoc; }
+ void setLBracLoc(SourceLocation L) { LBracLoc = L; }
+ SourceLocation getRBracLoc() const { return RBracLoc; }
+ void setRBracLoc(SourceLocation L) { RBracLoc = L; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CompoundStmtClass;
+ }
+ static bool classof(const CompoundStmt *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts);
+ }
+
+ const_child_range children() const {
+ return child_range(&Body[0], &Body[0]+CompoundStmtBits.NumStmts);
+ }
+};
+
+// SwitchCase is the base class for CaseStmt and DefaultStmt,
+class SwitchCase : public Stmt {
+protected:
+ // A pointer to the following CaseStmt or DefaultStmt class,
+ // used by SwitchStmt.
+ SwitchCase *NextSwitchCase;
+
+ SwitchCase(StmtClass SC) : Stmt(SC), NextSwitchCase(0) {}
+
+public:
+ const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
+
+ SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
+
+ void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
+
+ Stmt *getSubStmt();
+ const Stmt *getSubStmt() const {
+ return const_cast<SwitchCase*>(this)->getSubStmt();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CaseStmtClass ||
+ T->getStmtClass() == DefaultStmtClass;
+ }
+ static bool classof(const SwitchCase *) { return true; }
+};
+
+class CaseStmt : public SwitchCase {
+ enum { LHS, RHS, SUBSTMT, END_EXPR };
+ Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for
+ // GNU "case 1 ... 4" extension
+ SourceLocation CaseLoc;
+ SourceLocation EllipsisLoc;
+ SourceLocation ColonLoc;
+public:
+ CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
+ SourceLocation ellipsisLoc, SourceLocation colonLoc)
+ : SwitchCase(CaseStmtClass) {
+ SubExprs[SUBSTMT] = 0;
+ SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs);
+ SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs);
+ CaseLoc = caseLoc;
+ EllipsisLoc = ellipsisLoc;
+ ColonLoc = colonLoc;
+ }
+
+ /// \brief Build an empty switch case statement.
+ explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass) { }
+
+ SourceLocation getCaseLoc() const { return CaseLoc; }
+ void setCaseLoc(SourceLocation L) { CaseLoc = L; }
+ SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
+ void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; }
+ SourceLocation getColonLoc() const { return ColonLoc; }
+ void setColonLoc(SourceLocation L) { ColonLoc = L; }
+
+ Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); }
+ Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); }
+ Stmt *getSubStmt() { return SubExprs[SUBSTMT]; }
+
+ const Expr *getLHS() const {
+ return reinterpret_cast<const Expr*>(SubExprs[LHS]);
+ }
+ const Expr *getRHS() const {
+ return reinterpret_cast<const Expr*>(SubExprs[RHS]);
+ }
+ const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; }
+
+ void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; }
+ void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); }
+ void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); }
+
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ // Handle deeply nested case statements with iteration instead of recursion.
+ const CaseStmt *CS = this;
+ while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
+ CS = CS2;
+
+ return SourceRange(CaseLoc, CS->getSubStmt()->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CaseStmtClass;
+ }
+ static bool classof(const CaseStmt *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
+ }
+};
+
+class DefaultStmt : public SwitchCase {
+ Stmt* SubStmt;
+ SourceLocation DefaultLoc;
+ SourceLocation ColonLoc;
+public:
+ DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) :
+ SwitchCase(DefaultStmtClass), SubStmt(substmt), DefaultLoc(DL),
+ ColonLoc(CL) {}
+
+ /// \brief Build an empty default statement.
+ explicit DefaultStmt(EmptyShell) : SwitchCase(DefaultStmtClass) { }
+
+ Stmt *getSubStmt() { return SubStmt; }
+ const Stmt *getSubStmt() const { return SubStmt; }
+ void setSubStmt(Stmt *S) { SubStmt = S; }
+
+ SourceLocation getDefaultLoc() const { return DefaultLoc; }
+ void setDefaultLoc(SourceLocation L) { DefaultLoc = L; }
+ SourceLocation getColonLoc() const { return ColonLoc; }
+ void setColonLoc(SourceLocation L) { ColonLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(DefaultLoc, SubStmt->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == DefaultStmtClass;
+ }
+ static bool classof(const DefaultStmt *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&SubStmt, &SubStmt+1); }
+};
+
+
+/// LabelStmt - Represents a label, which has a substatement. For example:
+/// foo: return;
+///
+class LabelStmt : public Stmt {
+ LabelDecl *TheDecl;
+ Stmt *SubStmt;
+ SourceLocation IdentLoc;
+public:
+ LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
+ : Stmt(LabelStmtClass), TheDecl(D), SubStmt(substmt), IdentLoc(IL) {
+ }
+
+ // \brief Build an empty label statement.
+ explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { }
+
+ SourceLocation getIdentLoc() const { return IdentLoc; }
+ LabelDecl *getDecl() const { return TheDecl; }
+ void setDecl(LabelDecl *D) { TheDecl = D; }
+ const char *getName() const;
+ Stmt *getSubStmt() { return SubStmt; }
+ const Stmt *getSubStmt() const { return SubStmt; }
+ void setIdentLoc(SourceLocation L) { IdentLoc = L; }
+ void setSubStmt(Stmt *SS) { SubStmt = SS; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(IdentLoc, SubStmt->getLocEnd());
+ }
+ child_range children() { return child_range(&SubStmt, &SubStmt+1); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == LabelStmtClass;
+ }
+ static bool classof(const LabelStmt *) { return true; }
+};
+
+
+/// IfStmt - This represents an if/then/else.
+///
+class IfStmt : public Stmt {
+ enum { VAR, COND, THEN, ELSE, END_EXPR };
+ Stmt* SubExprs[END_EXPR];
+
+ SourceLocation IfLoc;
+ SourceLocation ElseLoc;
+
+public:
+ IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+ Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = 0);
+
+ /// \brief Build an empty if/then/else statement
+ explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { }
+
+ /// \brief Retrieve the variable declared in this "if" statement, if any.
+ ///
+ /// In the following example, "x" is the condition variable.
+ /// \code
+ /// if (int x = foo()) {
+ /// printf("x is %d", x);
+ /// }
+ /// \endcode
+ VarDecl *getConditionVariable() const;
+ void setConditionVariable(ASTContext &C, VarDecl *V);
+
+ /// If this IfStmt has a condition variable, return the faux DeclStmt
+ /// associated with the creation of that condition variable.
+ const DeclStmt *getConditionVariableDeclStmt() const {
+ return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
+ }
+
+ const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
+ void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
+ const Stmt *getThen() const { return SubExprs[THEN]; }
+ void setThen(Stmt *S) { SubExprs[THEN] = S; }
+ const Stmt *getElse() const { return SubExprs[ELSE]; }
+ void setElse(Stmt *S) { SubExprs[ELSE] = S; }
+
+ Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
+ Stmt *getThen() { return SubExprs[THEN]; }
+ Stmt *getElse() { return SubExprs[ELSE]; }
+
+ SourceLocation getIfLoc() const { return IfLoc; }
+ void setIfLoc(SourceLocation L) { IfLoc = L; }
+ SourceLocation getElseLoc() const { return ElseLoc; }
+ void setElseLoc(SourceLocation L) { ElseLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ if (SubExprs[ELSE])
+ return SourceRange(IfLoc, SubExprs[ELSE]->getLocEnd());
+ else
+ return SourceRange(IfLoc, SubExprs[THEN]->getLocEnd());
+ }
+
+ // Iterators over subexpressions. The iterators will include iterating
+ // over the initialization expression referenced by the condition variable.
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == IfStmtClass;
+ }
+ static bool classof(const IfStmt *) { return true; }
+};
+
+/// SwitchStmt - This represents a 'switch' stmt.
+///
+class SwitchStmt : public Stmt {
+ enum { VAR, COND, BODY, END_EXPR };
+ Stmt* SubExprs[END_EXPR];
+ // This points to a linked list of case and default statements.
+ SwitchCase *FirstCase;
+ SourceLocation SwitchLoc;
+
+ /// If the SwitchStmt is a switch on an enum value, this records whether
+ /// all the enum values were covered by CaseStmts. This value is meant to
+ /// be a hint for possible clients.
+ unsigned AllEnumCasesCovered : 1;
+
+public:
+ SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond);
+
+ /// \brief Build a empty switch statement.
+ explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { }
+
+ /// \brief Retrieve the variable declared in this "switch" statement, if any.
+ ///
+ /// In the following example, "x" is the condition variable.
+ /// \code
+ /// switch (int x = foo()) {
+ /// case 0: break;
+ /// // ...
+ /// }
+ /// \endcode
+ VarDecl *getConditionVariable() const;
+ void setConditionVariable(ASTContext &C, VarDecl *V);
+
+ /// If this SwitchStmt has a condition variable, return the faux DeclStmt
+ /// associated with the creation of that condition variable.
+ const DeclStmt *getConditionVariableDeclStmt() const {
+ return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
+ }
+
+ const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
+ const Stmt *getBody() const { return SubExprs[BODY]; }
+ const SwitchCase *getSwitchCaseList() const { return FirstCase; }
+
+ Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);}
+ void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); }
+ Stmt *getBody() { return SubExprs[BODY]; }
+ void setBody(Stmt *S) { SubExprs[BODY] = S; }
+ SwitchCase *getSwitchCaseList() { return FirstCase; }
+
+ /// \brief Set the case list for this switch statement.
+ ///
+ /// The caller is responsible for incrementing the retain counts on
+ /// all of the SwitchCase statements in this list.
+ void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
+
+ SourceLocation getSwitchLoc() const { return SwitchLoc; }
+ void setSwitchLoc(SourceLocation L) { SwitchLoc = L; }
+
+ void setBody(Stmt *S, SourceLocation SL) {
+ SubExprs[BODY] = S;
+ SwitchLoc = SL;
+ }
+ void addSwitchCase(SwitchCase *SC) {
+ assert(!SC->getNextSwitchCase()
+ && "case/default already added to a switch");
+ SC->setNextSwitchCase(FirstCase);
+ FirstCase = SC;
+ }
+
+ /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
+ /// switch over an enum value then all cases have been explicitly covered.
+ void setAllEnumCasesCovered() {
+ AllEnumCasesCovered = 1;
+ }
+
+ /// Returns true if the SwitchStmt is a switch of an enum value and all cases
+ /// have been explicitly covered.
+ bool isAllEnumCasesCovered() const {
+ return (bool) AllEnumCasesCovered;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(SwitchLoc, SubExprs[BODY]->getLocEnd());
+ }
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SwitchStmtClass;
+ }
+ static bool classof(const SwitchStmt *) { return true; }
+};
+
+
+/// WhileStmt - This represents a 'while' stmt.
+///
+class WhileStmt : public Stmt {
+ enum { VAR, COND, BODY, END_EXPR };
+ Stmt* SubExprs[END_EXPR];
+ SourceLocation WhileLoc;
+public:
+ WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+ SourceLocation WL);
+
+ /// \brief Build an empty while statement.
+ explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { }
+
+ /// \brief Retrieve the variable declared in this "while" statement, if any.
+ ///
+ /// In the following example, "x" is the condition variable.
+ /// \code
+ /// while (int x = random()) {
+ /// // ...
+ /// }
+ /// \endcode
+ VarDecl *getConditionVariable() const;
+ void setConditionVariable(ASTContext &C, VarDecl *V);
+
+ /// If this WhileStmt has a condition variable, return the faux DeclStmt
+ /// associated with the creation of that condition variable.
+ const DeclStmt *getConditionVariableDeclStmt() const {
+ return reinterpret_cast<DeclStmt*>(SubExprs[VAR]);
+ }
+
+ Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
+ const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
+ void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
+ Stmt *getBody() { return SubExprs[BODY]; }
+ const Stmt *getBody() const { return SubExprs[BODY]; }
+ void setBody(Stmt *S) { SubExprs[BODY] = S; }
+
+ SourceLocation getWhileLoc() const { return WhileLoc; }
+ void setWhileLoc(SourceLocation L) { WhileLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(WhileLoc, SubExprs[BODY]->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == WhileStmtClass;
+ }
+ static bool classof(const WhileStmt *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+};
+
+/// DoStmt - This represents a 'do/while' stmt.
+///
+class DoStmt : public Stmt {
+ enum { BODY, COND, END_EXPR };
+ Stmt* SubExprs[END_EXPR];
+ SourceLocation DoLoc;
+ SourceLocation WhileLoc;
+ SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
+
+public:
+ DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL,
+ SourceLocation RP)
+ : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) {
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = body;
+ }
+
+ /// \brief Build an empty do-while statement.
+ explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { }
+
+ Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
+ const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
+ void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
+ Stmt *getBody() { return SubExprs[BODY]; }
+ const Stmt *getBody() const { return SubExprs[BODY]; }
+ void setBody(Stmt *S) { SubExprs[BODY] = S; }
+
+ SourceLocation getDoLoc() const { return DoLoc; }
+ void setDoLoc(SourceLocation L) { DoLoc = L; }
+ SourceLocation getWhileLoc() const { return WhileLoc; }
+ void setWhileLoc(SourceLocation L) { WhileLoc = L; }
+
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(DoLoc, RParenLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == DoStmtClass;
+ }
+ static bool classof(const DoStmt *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+};
+
+
+/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
+/// the init/cond/inc parts of the ForStmt will be null if they were not
+/// specified in the source.
+///
+class ForStmt : public Stmt {
+ enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
+ Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
+ SourceLocation ForLoc;
+ SourceLocation LParenLoc, RParenLoc;
+
+public:
+ ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc,
+ Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP);
+
+ /// \brief Build an empty for statement.
+ explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { }
+
+ Stmt *getInit() { return SubExprs[INIT]; }
+
+ /// \brief Retrieve the variable declared in this "for" statement, if any.
+ ///
+ /// In the following example, "y" is the condition variable.
+ /// \code
+ /// for (int x = random(); int y = mangle(x); ++x) {
+ /// // ...
+ /// }
+ /// \endcode
+ VarDecl *getConditionVariable() const;
+ void setConditionVariable(ASTContext &C, VarDecl *V);
+
+ /// If this ForStmt has a condition variable, return the faux DeclStmt
+ /// associated with the creation of that condition variable.
+ const DeclStmt *getConditionVariableDeclStmt() const {
+ return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
+ }
+
+ Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
+ Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
+ Stmt *getBody() { return SubExprs[BODY]; }
+
+ const Stmt *getInit() const { return SubExprs[INIT]; }
+ const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
+ const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
+ const Stmt *getBody() const { return SubExprs[BODY]; }
+
+ void setInit(Stmt *S) { SubExprs[INIT] = S; }
+ void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
+ void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
+ void setBody(Stmt *S) { SubExprs[BODY] = S; }
+
+ SourceLocation getForLoc() const { return ForLoc; }
+ void setForLoc(SourceLocation L) { ForLoc = L; }
+ SourceLocation getLParenLoc() const { return LParenLoc; }
+ void setLParenLoc(SourceLocation L) { LParenLoc = L; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(ForLoc, SubExprs[BODY]->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ForStmtClass;
+ }
+ static bool classof(const ForStmt *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
+ }
+};
+
+/// GotoStmt - This represents a direct goto.
+///
+class GotoStmt : public Stmt {
+ LabelDecl *Label;
+ SourceLocation GotoLoc;
+ SourceLocation LabelLoc;
+public:
+ GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
+ : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {}
+
+ /// \brief Build an empty goto statement.
+ explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { }
+
+ LabelDecl *getLabel() const { return Label; }
+ void setLabel(LabelDecl *D) { Label = D; }
+
+ SourceLocation getGotoLoc() const { return GotoLoc; }
+ void setGotoLoc(SourceLocation L) { GotoLoc = L; }
+ SourceLocation getLabelLoc() const { return LabelLoc; }
+ void setLabelLoc(SourceLocation L) { LabelLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(GotoLoc, LabelLoc);
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == GotoStmtClass;
+ }
+ static bool classof(const GotoStmt *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// IndirectGotoStmt - This represents an indirect goto.
+///
+class IndirectGotoStmt : public Stmt {
+ SourceLocation GotoLoc;
+ SourceLocation StarLoc;
+ Stmt *Target;
+public:
+ IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc,
+ Expr *target)
+ : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc),
+ Target((Stmt*)target) {}
+
+ /// \brief Build an empty indirect goto statement.
+ explicit IndirectGotoStmt(EmptyShell Empty)
+ : Stmt(IndirectGotoStmtClass, Empty) { }
+
+ void setGotoLoc(SourceLocation L) { GotoLoc = L; }
+ SourceLocation getGotoLoc() const { return GotoLoc; }
+ void setStarLoc(SourceLocation L) { StarLoc = L; }
+ SourceLocation getStarLoc() const { return StarLoc; }
+
+ Expr *getTarget() { return reinterpret_cast<Expr*>(Target); }
+ const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);}
+ void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); }
+
+ /// getConstantTarget - Returns the fixed target of this indirect
+ /// goto, if one exists.
+ LabelDecl *getConstantTarget();
+ const LabelDecl *getConstantTarget() const {
+ return const_cast<IndirectGotoStmt*>(this)->getConstantTarget();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(GotoLoc, Target->getLocEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == IndirectGotoStmtClass;
+ }
+ static bool classof(const IndirectGotoStmt *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(&Target, &Target+1); }
+};
+
+
+/// ContinueStmt - This represents a continue.
+///
+class ContinueStmt : public Stmt {
+ SourceLocation ContinueLoc;
+public:
+ ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {}
+
+ /// \brief Build an empty continue statement.
+ explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { }
+
+ SourceLocation getContinueLoc() const { return ContinueLoc; }
+ void setContinueLoc(SourceLocation L) { ContinueLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(ContinueLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ContinueStmtClass;
+ }
+ static bool classof(const ContinueStmt *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+/// BreakStmt - This represents a break.
+///
+class BreakStmt : public Stmt {
+ SourceLocation BreakLoc;
+public:
+ BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) {}
+
+ /// \brief Build an empty break statement.
+ explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { }
+
+ SourceLocation getBreakLoc() const { return BreakLoc; }
+ void setBreakLoc(SourceLocation L) { BreakLoc = L; }
+
+ SourceRange getSourceRange() const LLVM_READONLY { return SourceRange(BreakLoc); }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == BreakStmtClass;
+ }
+ static bool classof(const BreakStmt *) { return true; }
+
+ // Iterators
+ child_range children() { return child_range(); }
+};
+
+
+/// ReturnStmt - This represents a return, optionally of an expression:
+/// return;
+/// return 4;
+///
+/// Note that GCC allows return with no argument in a function declared to
+/// return a value, and it allows returning a value in functions declared to
+/// return void. We explicitly model this in the AST, which means you can't
+/// depend on the return type of the function and the presence of an argument.
+///
+class ReturnStmt : public Stmt {
+ Stmt *RetExpr;
+ SourceLocation RetLoc;
+ const VarDecl *NRVOCandidate;
+
+public:
+ ReturnStmt(SourceLocation RL)
+ : Stmt(ReturnStmtClass), RetExpr(0), RetLoc(RL), NRVOCandidate(0) { }
+
+ ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate)
+ : Stmt(ReturnStmtClass), RetExpr((Stmt*) E), RetLoc(RL),
+ NRVOCandidate(NRVOCandidate) {}
+
+ /// \brief Build an empty return expression.
+ explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { }
+
+ const Expr *getRetValue() const;
+ Expr *getRetValue();
+ void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); }
+
+ SourceLocation getReturnLoc() const { return RetLoc; }
+ void setReturnLoc(SourceLocation L) { RetLoc = L; }
+
+ /// \brief Retrieve the variable that might be used for the named return
+ /// value optimization.
+ ///
+ /// The optimization itself can only be performed if the variable is
+ /// also marked as an NRVO object.
+ const VarDecl *getNRVOCandidate() const { return NRVOCandidate; }
+ void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ReturnStmtClass;
+ }
+ static bool classof(const ReturnStmt *) { return true; }
+
+ // Iterators
+ child_range children() {
+ if (RetExpr) return child_range(&RetExpr, &RetExpr+1);
+ return child_range();
+ }
+};
+
+/// AsmStmt - This represents a GNU inline-assembly statement extension.
+///
+class AsmStmt : public Stmt {
+ SourceLocation AsmLoc, RParenLoc;
+ StringLiteral *AsmStr;
+
+ bool IsSimple;
+ bool IsVolatile;
+ bool MSAsm;
+
+ unsigned NumOutputs;
+ unsigned NumInputs;
+ unsigned NumClobbers;
+
+ // FIXME: If we wanted to, we could allocate all of these in one big array.
+ IdentifierInfo **Names;
+ StringLiteral **Constraints;
+ Stmt **Exprs;
+ StringLiteral **Clobbers;
+
+public:
+ AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile,
+ bool msasm, unsigned numoutputs, unsigned numinputs,
+ IdentifierInfo **names, StringLiteral **constraints,
+ Expr **exprs, StringLiteral *asmstr, unsigned numclobbers,
+ StringLiteral **clobbers, SourceLocation rparenloc);
+
+ /// \brief Build an empty inline-assembly statement.
+ explicit AsmStmt(EmptyShell Empty) : Stmt(AsmStmtClass, Empty),
+ Names(0), Constraints(0), Exprs(0), Clobbers(0) { }
+
+ SourceLocation getAsmLoc() const { return AsmLoc; }
+ void setAsmLoc(SourceLocation L) { AsmLoc = L; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation L) { RParenLoc = L; }
+
+ bool isVolatile() const { return IsVolatile; }
+ void setVolatile(bool V) { IsVolatile = V; }
+ bool isSimple() const { return IsSimple; }
+ void setSimple(bool V) { IsSimple = V; }
+ bool isMSAsm() const { return MSAsm; }
+ void setMSAsm(bool V) { MSAsm = V; }
+
+ //===--- Asm String Analysis ---===//
+
+ const StringLiteral *getAsmString() const { return AsmStr; }
+ StringLiteral *getAsmString() { return AsmStr; }
+ void setAsmString(StringLiteral *E) { AsmStr = E; }
+
+ /// AsmStringPiece - this is part of a decomposed asm string specification
+ /// (for use with the AnalyzeAsmString function below). An asm string is
+ /// considered to be a concatenation of these parts.
+ class AsmStringPiece {
+ public:
+ enum Kind {
+ String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
+ Operand // Operand reference, with optional modifier %c4.
+ };
+ private:
+ Kind MyKind;
+ std::string Str;
+ unsigned OperandNo;
+ public:
+ AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
+ AsmStringPiece(unsigned OpNo, char Modifier)
+ : MyKind(Operand), Str(), OperandNo(OpNo) {
+ Str += Modifier;
+ }
+
+ bool isString() const { return MyKind == String; }
+ bool isOperand() const { return MyKind == Operand; }
+
+ const std::string &getString() const {
+ assert(isString());
+ return Str;
+ }
+
+ unsigned getOperandNo() const {
+ assert(isOperand());
+ return OperandNo;
+ }
+
+ /// getModifier - Get the modifier for this operand, if present. This
+ /// returns '\0' if there was no modifier.
+ char getModifier() const {
+ assert(isOperand());
+ return Str[0];
+ }
+ };
+
+ /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
+ /// it into pieces. If the asm string is erroneous, emit errors and return
+ /// true, otherwise return false. This handles canonicalization and
+ /// translation of strings from GCC syntax to LLVM IR syntax, and handles
+ //// flattening of named references like %[foo] to Operand AsmStringPiece's.
+ unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
+ ASTContext &C, unsigned &DiagOffs) const;
+
+
+ //===--- Output operands ---===//
+
+ unsigned getNumOutputs() const { return NumOutputs; }
+
+ IdentifierInfo *getOutputIdentifier(unsigned i) const {
+ return Names[i];
+ }
+
+ StringRef getOutputName(unsigned i) const {
+ if (IdentifierInfo *II = getOutputIdentifier(i))
+ return II->getName();
+
+ return StringRef();
+ }
+
+ /// getOutputConstraint - Return the constraint string for the specified
+ /// output operand. All output constraints are known to be non-empty (either
+ /// '=' or '+').
+ StringRef getOutputConstraint(unsigned i) const;
+
+ const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
+ return Constraints[i];
+ }
+ StringLiteral *getOutputConstraintLiteral(unsigned i) {
+ return Constraints[i];
+ }
+
+ Expr *getOutputExpr(unsigned i);
+
+ const Expr *getOutputExpr(unsigned i) const {
+ return const_cast<AsmStmt*>(this)->getOutputExpr(i);
+ }
+
+ /// isOutputPlusConstraint - Return true if the specified output constraint
+ /// is a "+" constraint (which is both an input and an output) or false if it
+ /// is an "=" constraint (just an output).
+ bool isOutputPlusConstraint(unsigned i) const {
+ return getOutputConstraint(i)[0] == '+';
+ }
+
+ /// getNumPlusOperands - Return the number of output operands that have a "+"
+ /// constraint.
+ unsigned getNumPlusOperands() const;
+
+ //===--- Input operands ---===//
+
+ unsigned getNumInputs() const { return NumInputs; }
+
+ IdentifierInfo *getInputIdentifier(unsigned i) const {
+ return Names[i + NumOutputs];
+ }
+
+ StringRef getInputName(unsigned i) const {
+ if (IdentifierInfo *II = getInputIdentifier(i))
+ return II->getName();
+
+ return StringRef();
+ }
+
+ /// getInputConstraint - Return the specified input constraint. Unlike output
+ /// constraints, these can be empty.
+ StringRef getInputConstraint(unsigned i) const;
+
+ const StringLiteral *getInputConstraintLiteral(unsigned i) const {
+ return Constraints[i + NumOutputs];
+ }
+ StringLiteral *getInputConstraintLiteral(unsigned i) {
+ return Constraints[i + NumOutputs];
+ }
+
+ Expr *getInputExpr(unsigned i);
+ void setInputExpr(unsigned i, Expr *E);
+
+ const Expr *getInputExpr(unsigned i) const {
+ return const_cast<AsmStmt*>(this)->getInputExpr(i);
+ }
+
+ void setOutputsAndInputsAndClobbers(ASTContext &C,
+ IdentifierInfo **Names,
+ StringLiteral **Constraints,
+ Stmt **Exprs,
+ unsigned NumOutputs,
+ unsigned NumInputs,
+ StringLiteral **Clobbers,
+ unsigned NumClobbers);
+
+ //===--- Other ---===//
+
+ /// getNamedOperand - Given a symbolic operand reference like %[foo],
+ /// translate this into a numeric value needed to reference the same operand.
+ /// This returns -1 if the operand name is invalid.
+ int getNamedOperand(StringRef SymbolicName) const;
+
+ unsigned getNumClobbers() const { return NumClobbers; }
+ StringLiteral *getClobber(unsigned i) { return Clobbers[i]; }
+ const StringLiteral *getClobber(unsigned i) const { return Clobbers[i]; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AsmLoc, RParenLoc);
+ }
+
+ static bool classof(const Stmt *T) {return T->getStmtClass() == AsmStmtClass;}
+ static bool classof(const AsmStmt *) { return true; }
+
+ // Input expr iterators.
+
+ typedef ExprIterator inputs_iterator;
+ typedef ConstExprIterator const_inputs_iterator;
+
+ inputs_iterator begin_inputs() {
+ return &Exprs[0] + NumOutputs;
+ }
+
+ inputs_iterator end_inputs() {
+ return &Exprs[0] + NumOutputs + NumInputs;
+ }
+
+ const_inputs_iterator begin_inputs() const {
+ return &Exprs[0] + NumOutputs;
+ }
+
+ const_inputs_iterator end_inputs() const {
+ return &Exprs[0] + NumOutputs + NumInputs;
+ }
+
+ // Output expr iterators.
+
+ typedef ExprIterator outputs_iterator;
+ typedef ConstExprIterator const_outputs_iterator;
+
+ outputs_iterator begin_outputs() {
+ return &Exprs[0];
+ }
+ outputs_iterator end_outputs() {
+ return &Exprs[0] + NumOutputs;
+ }
+
+ const_outputs_iterator begin_outputs() const {
+ return &Exprs[0];
+ }
+ const_outputs_iterator end_outputs() const {
+ return &Exprs[0] + NumOutputs;
+ }
+
+ child_range children() {
+ return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
+ }
+};
+
+class SEHExceptStmt : public Stmt {
+ SourceLocation Loc;
+ Stmt *Children[2];
+
+ enum { FILTER_EXPR, BLOCK };
+
+ SEHExceptStmt(SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block);
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+ explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { }
+
+public:
+ static SEHExceptStmt* Create(ASTContext &C,
+ SourceLocation ExceptLoc,
+ Expr *FilterExpr,
+ Stmt *Block);
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getExceptLoc(), getEndLoc());
+ }
+
+ SourceLocation getExceptLoc() const { return Loc; }
+ SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); }
+
+ Expr *getFilterExpr() const {
+ return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
+ }
+
+ CompoundStmt *getBlock() const {
+ return llvm::cast<CompoundStmt>(Children[BLOCK]);
+ }
+
+ child_range children() {
+ return child_range(Children,Children+2);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SEHExceptStmtClass;
+ }
+
+ static bool classof(SEHExceptStmt *) { return true; }
+
+};
+
+class SEHFinallyStmt : public Stmt {
+ SourceLocation Loc;
+ Stmt *Block;
+
+ SEHFinallyStmt(SourceLocation Loc,
+ Stmt *Block);
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+ explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { }
+
+public:
+ static SEHFinallyStmt* Create(ASTContext &C,
+ SourceLocation FinallyLoc,
+ Stmt *Block);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getFinallyLoc(), getEndLoc());
+ }
+
+ SourceLocation getFinallyLoc() const { return Loc; }
+ SourceLocation getEndLoc() const { return Block->getLocEnd(); }
+
+ CompoundStmt *getBlock() const { return llvm::cast<CompoundStmt>(Block); }
+
+ child_range children() {
+ return child_range(&Block,&Block+1);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SEHFinallyStmtClass;
+ }
+
+ static bool classof(SEHFinallyStmt *) { return true; }
+
+};
+
+class SEHTryStmt : public Stmt {
+ bool IsCXXTry;
+ SourceLocation TryLoc;
+ Stmt *Children[2];
+
+ enum { TRY = 0, HANDLER = 1 };
+
+ SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler);
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+ explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { }
+
+public:
+ static SEHTryStmt* Create(ASTContext &C,
+ bool isCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getTryLoc(), getEndLoc());
+ }
+
+ SourceLocation getTryLoc() const { return TryLoc; }
+ SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); }
+
+ bool getIsCXXTry() const { return IsCXXTry; }
+
+ CompoundStmt* getTryBlock() const {
+ return llvm::cast<CompoundStmt>(Children[TRY]);
+ }
+
+ Stmt *getHandler() const { return Children[HANDLER]; }
+
+ /// Returns 0 if not defined
+ SEHExceptStmt *getExceptHandler() const;
+ SEHFinallyStmt *getFinallyHandler() const;
+
+ child_range children() {
+ return child_range(Children,Children+2);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == SEHTryStmtClass;
+ }
+
+ static bool classof(SEHTryStmt *) { return true; }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
new file mode 100644
index 0000000..a948722
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtCXX.h
@@ -0,0 +1,295 @@
+//===--- StmtCXX.h - Classes for representing C++ statements ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C++ statement AST node classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_STMTCXX_H
+#define LLVM_CLANG_AST_STMTCXX_H
+
+#include "clang/AST/Stmt.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+
+class VarDecl;
+
+/// CXXCatchStmt - This represents a C++ catch block.
+///
+class CXXCatchStmt : public Stmt {
+ SourceLocation CatchLoc;
+ /// The exception-declaration of the type.
+ VarDecl *ExceptionDecl;
+ /// The handler block.
+ Stmt *HandlerBlock;
+
+public:
+ CXXCatchStmt(SourceLocation catchLoc, VarDecl *exDecl, Stmt *handlerBlock)
+ : Stmt(CXXCatchStmtClass), CatchLoc(catchLoc), ExceptionDecl(exDecl),
+ HandlerBlock(handlerBlock) {}
+
+ CXXCatchStmt(EmptyShell Empty)
+ : Stmt(CXXCatchStmtClass), ExceptionDecl(0), HandlerBlock(0) {}
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(CatchLoc, HandlerBlock->getLocEnd());
+ }
+
+ SourceLocation getCatchLoc() const { return CatchLoc; }
+ VarDecl *getExceptionDecl() const { return ExceptionDecl; }
+ QualType getCaughtType() const;
+ Stmt *getHandlerBlock() const { return HandlerBlock; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXCatchStmtClass;
+ }
+ static bool classof(const CXXCatchStmt *) { return true; }
+
+ child_range children() { return child_range(&HandlerBlock, &HandlerBlock+1); }
+
+ friend class ASTStmtReader;
+};
+
+/// CXXTryStmt - A C++ try block, including all handlers.
+///
+class CXXTryStmt : public Stmt {
+ SourceLocation TryLoc;
+ unsigned NumHandlers;
+
+ CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock, Stmt **handlers,
+ unsigned numHandlers);
+
+ CXXTryStmt(EmptyShell Empty, unsigned numHandlers)
+ : Stmt(CXXTryStmtClass), NumHandlers(numHandlers) { }
+
+ Stmt const * const *getStmts() const {
+ return reinterpret_cast<Stmt const * const*>(this + 1);
+ }
+ Stmt **getStmts() {
+ return reinterpret_cast<Stmt **>(this + 1);
+ }
+
+public:
+ static CXXTryStmt *Create(ASTContext &C, SourceLocation tryLoc,
+ Stmt *tryBlock, Stmt **handlers,
+ unsigned numHandlers);
+
+ static CXXTryStmt *Create(ASTContext &C, EmptyShell Empty,
+ unsigned numHandlers);
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getTryLoc(), getEndLoc());
+ }
+
+ SourceLocation getTryLoc() const { return TryLoc; }
+ SourceLocation getEndLoc() const {
+ return getStmts()[NumHandlers]->getLocEnd();
+ }
+
+ CompoundStmt *getTryBlock() {
+ return llvm::cast<CompoundStmt>(getStmts()[0]);
+ }
+ const CompoundStmt *getTryBlock() const {
+ return llvm::cast<CompoundStmt>(getStmts()[0]);
+ }
+
+ unsigned getNumHandlers() const { return NumHandlers; }
+ CXXCatchStmt *getHandler(unsigned i) {
+ return llvm::cast<CXXCatchStmt>(getStmts()[i + 1]);
+ }
+ const CXXCatchStmt *getHandler(unsigned i) const {
+ return llvm::cast<CXXCatchStmt>(getStmts()[i + 1]);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXTryStmtClass;
+ }
+ static bool classof(const CXXTryStmt *) { return true; }
+
+ child_range children() {
+ return child_range(getStmts(), getStmts() + getNumHandlers() + 1);
+ }
+
+ friend class ASTStmtReader;
+};
+
+/// CXXForRangeStmt - This represents C++0x [stmt.ranged]'s ranged for
+/// statement, represented as 'for (range-declarator : range-expression)'.
+///
+/// This is stored in a partially-desugared form to allow full semantic
+/// analysis of the constituent components. The original syntactic components
+/// can be extracted using getLoopVariable and getRangeInit.
+class CXXForRangeStmt : public Stmt {
+ enum { RANGE, BEGINEND, COND, INC, LOOPVAR, BODY, END };
+ // SubExprs[RANGE] is an expression or declstmt.
+ // SubExprs[COND] and SubExprs[INC] are expressions.
+ Stmt *SubExprs[END];
+ SourceLocation ForLoc;
+ SourceLocation ColonLoc;
+ SourceLocation RParenLoc;
+public:
+ CXXForRangeStmt(DeclStmt *Range, DeclStmt *BeginEnd,
+ Expr *Cond, Expr *Inc, DeclStmt *LoopVar, Stmt *Body,
+ SourceLocation FL, SourceLocation CL, SourceLocation RPL);
+ CXXForRangeStmt(EmptyShell Empty) : Stmt(CXXForRangeStmtClass, Empty) { }
+
+
+ VarDecl *getLoopVariable();
+ Expr *getRangeInit();
+
+ const VarDecl *getLoopVariable() const;
+ const Expr *getRangeInit() const;
+
+
+ DeclStmt *getRangeStmt() { return cast<DeclStmt>(SubExprs[RANGE]); }
+ DeclStmt *getBeginEndStmt() {
+ return cast_or_null<DeclStmt>(SubExprs[BEGINEND]);
+ }
+ Expr *getCond() { return cast_or_null<Expr>(SubExprs[COND]); }
+ Expr *getInc() { return cast_or_null<Expr>(SubExprs[INC]); }
+ DeclStmt *getLoopVarStmt() { return cast<DeclStmt>(SubExprs[LOOPVAR]); }
+ Stmt *getBody() { return SubExprs[BODY]; }
+
+ const DeclStmt *getRangeStmt() const {
+ return cast<DeclStmt>(SubExprs[RANGE]);
+ }
+ const DeclStmt *getBeginEndStmt() const {
+ return cast_or_null<DeclStmt>(SubExprs[BEGINEND]);
+ }
+ const Expr *getCond() const {
+ return cast_or_null<Expr>(SubExprs[COND]);
+ }
+ const Expr *getInc() const {
+ return cast_or_null<Expr>(SubExprs[INC]);
+ }
+ const DeclStmt *getLoopVarStmt() const {
+ return cast<DeclStmt>(SubExprs[LOOPVAR]);
+ }
+ const Stmt *getBody() const { return SubExprs[BODY]; }
+
+ void setRangeInit(Expr *E) { SubExprs[RANGE] = reinterpret_cast<Stmt*>(E); }
+ void setRangeStmt(Stmt *S) { SubExprs[RANGE] = S; }
+ void setBeginEndStmt(Stmt *S) { SubExprs[BEGINEND] = S; }
+ void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
+ void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
+ void setLoopVarStmt(Stmt *S) { SubExprs[LOOPVAR] = S; }
+ void setBody(Stmt *S) { SubExprs[BODY] = S; }
+
+
+ SourceLocation getForLoc() const { return ForLoc; }
+ void setForLoc(SourceLocation Loc) { ForLoc = Loc; }
+ SourceLocation getColonLoc() const { return ColonLoc; }
+ void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(ForLoc, SubExprs[BODY]->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == CXXForRangeStmtClass;
+ }
+ static bool classof(const CXXForRangeStmt *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[END]);
+ }
+};
+
+/// \brief Representation of a Microsoft __if_exists or __if_not_exists
+/// statement with a dependent name.
+///
+/// The __if_exists statement can be used to include a sequence of statements
+/// in the program only when a particular dependent name does not exist. For
+/// example:
+///
+/// \code
+/// template<typename T>
+/// void call_foo(T &t) {
+/// __if_exists (T::foo) {
+/// t.foo(); // okay: only called when T::foo exists.
+/// }
+/// }
+/// \endcode
+///
+/// Similarly, the __if_not_exists statement can be used to include the
+/// statements when a particular name does not exist.
+///
+/// Note that this statement only captures __if_exists and __if_not_exists
+/// statements whose name is dependent. All non-dependent cases are handled
+/// directly in the parser, so that they don't introduce a new scope. Clang
+/// introduces scopes in the dependent case to keep names inside the compound
+/// statement from leaking out into the surround statements, which would
+/// compromise the template instantiation model. This behavior differs from
+/// Visual C++ (which never introduces a scope), but is a fairly reasonable
+/// approximation of the VC++ behavior.
+class MSDependentExistsStmt : public Stmt {
+ SourceLocation KeywordLoc;
+ bool IsIfExists;
+ NestedNameSpecifierLoc QualifierLoc;
+ DeclarationNameInfo NameInfo;
+ Stmt *SubStmt;
+
+ friend class ASTReader;
+ friend class ASTStmtReader;
+
+public:
+ MSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ CompoundStmt *SubStmt)
+ : Stmt(MSDependentExistsStmtClass),
+ KeywordLoc(KeywordLoc), IsIfExists(IsIfExists),
+ QualifierLoc(QualifierLoc), NameInfo(NameInfo),
+ SubStmt(reinterpret_cast<Stmt *>(SubStmt)) { }
+
+ /// \brief Retrieve the location of the __if_exists or __if_not_exists
+ /// keyword.
+ SourceLocation getKeywordLoc() const { return KeywordLoc; }
+
+ /// \brief Determine whether this is an __if_exists statement.
+ bool isIfExists() const { return IsIfExists; }
+
+ /// \brief Determine whether this is an __if_exists statement.
+ bool isIfNotExists() const { return !IsIfExists; }
+
+ /// \brief Retrieve the nested-name-specifier that qualifies this name, if
+ /// any.
+ NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; }
+
+ /// \brief Retrieve the name of the entity we're testing for, along with
+ /// location information
+ DeclarationNameInfo getNameInfo() const { return NameInfo; }
+
+ /// \brief Retrieve the compound statement that will be included in the
+ /// program only if the existence of the symbol matches the initial keyword.
+ CompoundStmt *getSubStmt() const {
+ return reinterpret_cast<CompoundStmt *>(SubStmt);
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(KeywordLoc, SubStmt->getLocEnd());
+ }
+
+ child_range children() {
+ return child_range(&SubStmt, &SubStmt+1);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == MSDependentExistsStmtClass;
+ }
+
+ static bool classof(MSDependentExistsStmt *) { return true; }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtGraphTraits.h b/contrib/llvm/tools/clang/include/clang/AST/StmtGraphTraits.h
new file mode 100644
index 0000000..25d0152
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtGraphTraits.h
@@ -0,0 +1,83 @@
+//===--- StmtGraphTraits.h - Graph Traits for the class Stmt ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a template specialization of llvm::GraphTraits to
+// treat ASTs (Stmt*) as graphs
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_STMT_GRAPHTRAITS_H
+#define LLVM_CLANG_AST_STMT_GRAPHTRAITS_H
+
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+
+namespace llvm {
+
+//template <typename T> struct GraphTraits;
+
+
+template <> struct GraphTraits<clang::Stmt*> {
+ typedef clang::Stmt NodeType;
+ typedef clang::Stmt::child_iterator ChildIteratorType;
+ typedef llvm::df_iterator<clang::Stmt*> nodes_iterator;
+
+ static NodeType* getEntryNode(clang::Stmt* S) { return S; }
+
+ static inline ChildIteratorType child_begin(NodeType* N) {
+ if (N) return N->child_begin();
+ else return ChildIteratorType();
+ }
+
+ static inline ChildIteratorType child_end(NodeType* N) {
+ if (N) return N->child_end();
+ else return ChildIteratorType();
+ }
+
+ static nodes_iterator nodes_begin(clang::Stmt* S) {
+ return df_begin(S);
+ }
+
+ static nodes_iterator nodes_end(clang::Stmt* S) {
+ return df_end(S);
+ }
+};
+
+
+template <> struct GraphTraits<const clang::Stmt*> {
+ typedef const clang::Stmt NodeType;
+ typedef clang::Stmt::const_child_iterator ChildIteratorType;
+ typedef llvm::df_iterator<const clang::Stmt*> nodes_iterator;
+
+ static NodeType* getEntryNode(const clang::Stmt* S) { return S; }
+
+ static inline ChildIteratorType child_begin(NodeType* N) {
+ if (N) return N->child_begin();
+ else return ChildIteratorType();
+ }
+
+ static inline ChildIteratorType child_end(NodeType* N) {
+ if (N) return N->child_end();
+ else return ChildIteratorType();
+ }
+
+ static nodes_iterator nodes_begin(const clang::Stmt* S) {
+ return df_begin(S);
+ }
+
+ static nodes_iterator nodes_end(const clang::Stmt* S) {
+ return df_end(S);
+ }
+};
+
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h b/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
new file mode 100644
index 0000000..b933ed0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtIterator.h
@@ -0,0 +1,230 @@
+//===--- StmtIterator.h - Iterators for Statements --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the StmtIterator and ConstStmtIterator classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_STMT_ITR_H
+#define LLVM_CLANG_AST_STMT_ITR_H
+
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <utility>
+
+namespace clang {
+
+class Stmt;
+class Decl;
+class VariableArrayType;
+
+class StmtIteratorBase {
+protected:
+ enum { DeclMode = 0x1, SizeOfTypeVAMode = 0x2, DeclGroupMode = 0x3,
+ Flags = 0x3 };
+
+ Stmt **stmt;
+ union { Decl *decl; Decl **DGI; };
+ uintptr_t RawVAPtr;
+ Decl **DGE;
+
+ bool inDecl() const {
+ return (RawVAPtr & Flags) == DeclMode;
+ }
+
+ bool inDeclGroup() const {
+ return (RawVAPtr & Flags) == DeclGroupMode;
+ }
+
+ bool inSizeOfTypeVA() const {
+ return (RawVAPtr & Flags) == SizeOfTypeVAMode;
+ }
+
+ bool inStmt() const {
+ return (RawVAPtr & Flags) == 0;
+ }
+
+ const VariableArrayType *getVAPtr() const {
+ return reinterpret_cast<const VariableArrayType*>(RawVAPtr & ~Flags);
+ }
+
+ void setVAPtr(const VariableArrayType *P) {
+ assert (inDecl() || inDeclGroup() || inSizeOfTypeVA());
+ RawVAPtr = reinterpret_cast<uintptr_t>(P) | (RawVAPtr & Flags);
+ }
+
+ void NextDecl(bool ImmediateAdvance = true);
+ bool HandleDecl(Decl* D);
+ void NextVA();
+
+ Stmt*& GetDeclExpr() const;
+
+ StmtIteratorBase(Stmt **s) : stmt(s), decl(0), RawVAPtr(0) {}
+ StmtIteratorBase(Decl *d, Stmt **s);
+ StmtIteratorBase(const VariableArrayType *t);
+ StmtIteratorBase(Decl **dgi, Decl **dge);
+ StmtIteratorBase() : stmt(0), decl(0), RawVAPtr(0) {}
+};
+
+
+template <typename DERIVED, typename REFERENCE>
+class StmtIteratorImpl : public StmtIteratorBase,
+ public std::iterator<std::forward_iterator_tag,
+ REFERENCE, ptrdiff_t,
+ REFERENCE, REFERENCE> {
+protected:
+ StmtIteratorImpl(const StmtIteratorBase& RHS) : StmtIteratorBase(RHS) {}
+public:
+ StmtIteratorImpl() {}
+ StmtIteratorImpl(Stmt **s) : StmtIteratorBase(s) {}
+ StmtIteratorImpl(Decl **dgi, Decl **dge) : StmtIteratorBase(dgi, dge) {}
+ StmtIteratorImpl(Decl *d, Stmt **s) : StmtIteratorBase(d, s) {}
+ StmtIteratorImpl(const VariableArrayType *t) : StmtIteratorBase(t) {}
+
+ DERIVED& operator++() {
+ if (inStmt())
+ ++stmt;
+ else if (getVAPtr())
+ NextVA();
+ else
+ NextDecl();
+
+ return static_cast<DERIVED&>(*this);
+ }
+
+ DERIVED operator++(int) {
+ DERIVED tmp = static_cast<DERIVED&>(*this);
+ operator++();
+ return tmp;
+ }
+
+ bool operator==(const DERIVED& RHS) const {
+ return stmt == RHS.stmt && decl == RHS.decl && RawVAPtr == RHS.RawVAPtr;
+ }
+
+ bool operator!=(const DERIVED& RHS) const {
+ return stmt != RHS.stmt || decl != RHS.decl || RawVAPtr != RHS.RawVAPtr;
+ }
+
+ REFERENCE operator*() const {
+ return (REFERENCE) (inStmt() ? *stmt : GetDeclExpr());
+ }
+
+ REFERENCE operator->() const { return operator*(); }
+};
+
+struct StmtIterator : public StmtIteratorImpl<StmtIterator,Stmt*&> {
+ explicit StmtIterator() : StmtIteratorImpl<StmtIterator,Stmt*&>() {}
+
+ StmtIterator(Stmt** S) : StmtIteratorImpl<StmtIterator,Stmt*&>(S) {}
+
+ StmtIterator(Decl** dgi, Decl** dge)
+ : StmtIteratorImpl<StmtIterator,Stmt*&>(dgi, dge) {}
+
+ StmtIterator(const VariableArrayType *t)
+ : StmtIteratorImpl<StmtIterator,Stmt*&>(t) {}
+
+ StmtIterator(Decl* D, Stmt **s = 0)
+ : StmtIteratorImpl<StmtIterator,Stmt*&>(D, s) {}
+};
+
+struct ConstStmtIterator : public StmtIteratorImpl<ConstStmtIterator,
+ const Stmt*> {
+ explicit ConstStmtIterator() :
+ StmtIteratorImpl<ConstStmtIterator,const Stmt*>() {}
+
+ ConstStmtIterator(const StmtIterator& RHS) :
+ StmtIteratorImpl<ConstStmtIterator,const Stmt*>(RHS) {}
+};
+
+/// A range of statement iterators.
+///
+/// This class provides some extra functionality beyond std::pair
+/// in order to allow the following idiom:
+/// for (StmtRange range = stmt->children(); range; ++range)
+struct StmtRange : std::pair<StmtIterator,StmtIterator> {
+ StmtRange() {}
+ StmtRange(const StmtIterator &begin, const StmtIterator &end)
+ : std::pair<StmtIterator,StmtIterator>(begin, end) {}
+
+ bool empty() const { return first == second; }
+ operator bool() const { return !empty(); }
+
+ Stmt *operator->() const { return first.operator->(); }
+ Stmt *&operator*() const { return first.operator*(); }
+
+ StmtRange &operator++() {
+ assert(!empty() && "incrementing on empty range");
+ ++first;
+ return *this;
+ }
+
+ StmtRange operator++(int) {
+ assert(!empty() && "incrementing on empty range");
+ StmtRange copy = *this;
+ ++first;
+ return copy;
+ }
+
+ friend const StmtIterator &begin(const StmtRange &range) {
+ return range.first;
+ }
+ friend const StmtIterator &end(const StmtRange &range) {
+ return range.second;
+ }
+};
+
+/// A range of const statement iterators.
+///
+/// This class provides some extra functionality beyond std::pair
+/// in order to allow the following idiom:
+/// for (ConstStmtRange range = stmt->children(); range; ++range)
+struct ConstStmtRange : std::pair<ConstStmtIterator,ConstStmtIterator> {
+ ConstStmtRange() {}
+ ConstStmtRange(const ConstStmtIterator &begin,
+ const ConstStmtIterator &end)
+ : std::pair<ConstStmtIterator,ConstStmtIterator>(begin, end) {}
+ ConstStmtRange(const StmtRange &range)
+ : std::pair<ConstStmtIterator,ConstStmtIterator>(range.first, range.second)
+ {}
+ ConstStmtRange(const StmtIterator &begin, const StmtIterator &end)
+ : std::pair<ConstStmtIterator,ConstStmtIterator>(begin, end) {}
+
+ bool empty() const { return first == second; }
+ operator bool() const { return !empty(); }
+
+ const Stmt *operator->() const { return first.operator->(); }
+ const Stmt *operator*() const { return first.operator*(); }
+
+ ConstStmtRange &operator++() {
+ assert(!empty() && "incrementing on empty range");
+ ++first;
+ return *this;
+ }
+
+ ConstStmtRange operator++(int) {
+ assert(!empty() && "incrementing on empty range");
+ ConstStmtRange copy = *this;
+ ++first;
+ return copy;
+ }
+
+ friend const ConstStmtIterator &begin(const ConstStmtRange &range) {
+ return range.first;
+ }
+ friend const ConstStmtIterator &end(const ConstStmtRange &range) {
+ return range.second;
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
new file mode 100644
index 0000000..a321041
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtObjC.h
@@ -0,0 +1,381 @@
+//===--- StmtObjC.h - Classes for representing ObjC statements --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Objective-C statement AST node classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_STMTOBJC_H
+#define LLVM_CLANG_AST_STMTOBJC_H
+
+#include "clang/AST/Stmt.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+
+/// ObjCForCollectionStmt - This represents Objective-c's collection statement;
+/// represented as 'for (element 'in' collection-expression)' stmt.
+///
+class ObjCForCollectionStmt : public Stmt {
+ enum { ELEM, COLLECTION, BODY, END_EXPR };
+ Stmt* SubExprs[END_EXPR]; // SubExprs[ELEM] is an expression or declstmt.
+ SourceLocation ForLoc;
+ SourceLocation RParenLoc;
+public:
+ ObjCForCollectionStmt(Stmt *Elem, Expr *Collect, Stmt *Body,
+ SourceLocation FCL, SourceLocation RPL);
+ explicit ObjCForCollectionStmt(EmptyShell Empty) :
+ Stmt(ObjCForCollectionStmtClass, Empty) { }
+
+ Stmt *getElement() { return SubExprs[ELEM]; }
+ Expr *getCollection() {
+ return reinterpret_cast<Expr*>(SubExprs[COLLECTION]);
+ }
+ Stmt *getBody() { return SubExprs[BODY]; }
+
+ const Stmt *getElement() const { return SubExprs[ELEM]; }
+ const Expr *getCollection() const {
+ return reinterpret_cast<Expr*>(SubExprs[COLLECTION]);
+ }
+ const Stmt *getBody() const { return SubExprs[BODY]; }
+
+ void setElement(Stmt *S) { SubExprs[ELEM] = S; }
+ void setCollection(Expr *E) {
+ SubExprs[COLLECTION] = reinterpret_cast<Stmt*>(E);
+ }
+ void setBody(Stmt *S) { SubExprs[BODY] = S; }
+
+ SourceLocation getForLoc() const { return ForLoc; }
+ void setForLoc(SourceLocation Loc) { ForLoc = Loc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(ForLoc, SubExprs[BODY]->getLocEnd());
+ }
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCForCollectionStmtClass;
+ }
+ static bool classof(const ObjCForCollectionStmt *) { return true; }
+
+ // Iterators
+ child_range children() {
+ return child_range(&SubExprs[0], &SubExprs[END_EXPR]);
+ }
+};
+
+/// ObjCAtCatchStmt - This represents objective-c's @catch statement.
+class ObjCAtCatchStmt : public Stmt {
+private:
+ VarDecl *ExceptionDecl;
+ Stmt *Body;
+ SourceLocation AtCatchLoc, RParenLoc;
+
+public:
+ ObjCAtCatchStmt(SourceLocation atCatchLoc, SourceLocation rparenloc,
+ VarDecl *catchVarDecl,
+ Stmt *atCatchStmt)
+ : Stmt(ObjCAtCatchStmtClass), ExceptionDecl(catchVarDecl),
+ Body(atCatchStmt), AtCatchLoc(atCatchLoc), RParenLoc(rparenloc) { }
+
+ explicit ObjCAtCatchStmt(EmptyShell Empty) :
+ Stmt(ObjCAtCatchStmtClass, Empty) { }
+
+ const Stmt *getCatchBody() const { return Body; }
+ Stmt *getCatchBody() { return Body; }
+ void setCatchBody(Stmt *S) { Body = S; }
+
+ const VarDecl *getCatchParamDecl() const {
+ return ExceptionDecl;
+ }
+ VarDecl *getCatchParamDecl() {
+ return ExceptionDecl;
+ }
+ void setCatchParamDecl(VarDecl *D) { ExceptionDecl = D; }
+
+ SourceLocation getAtCatchLoc() const { return AtCatchLoc; }
+ void setAtCatchLoc(SourceLocation Loc) { AtCatchLoc = Loc; }
+ SourceLocation getRParenLoc() const { return RParenLoc; }
+ void setRParenLoc(SourceLocation Loc) { RParenLoc = Loc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtCatchLoc, Body->getLocEnd());
+ }
+
+ bool hasEllipsis() const { return getCatchParamDecl() == 0; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCAtCatchStmtClass;
+ }
+ static bool classof(const ObjCAtCatchStmt *) { return true; }
+
+ child_range children() { return child_range(&Body, &Body + 1); }
+};
+
+/// ObjCAtFinallyStmt - This represent objective-c's @finally Statement
+class ObjCAtFinallyStmt : public Stmt {
+ Stmt *AtFinallyStmt;
+ SourceLocation AtFinallyLoc;
+public:
+ ObjCAtFinallyStmt(SourceLocation atFinallyLoc, Stmt *atFinallyStmt)
+ : Stmt(ObjCAtFinallyStmtClass),
+ AtFinallyStmt(atFinallyStmt), AtFinallyLoc(atFinallyLoc) {}
+
+ explicit ObjCAtFinallyStmt(EmptyShell Empty) :
+ Stmt(ObjCAtFinallyStmtClass, Empty) { }
+
+ const Stmt *getFinallyBody() const { return AtFinallyStmt; }
+ Stmt *getFinallyBody() { return AtFinallyStmt; }
+ void setFinallyBody(Stmt *S) { AtFinallyStmt = S; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtFinallyLoc, AtFinallyStmt->getLocEnd());
+ }
+
+ SourceLocation getAtFinallyLoc() const { return AtFinallyLoc; }
+ void setAtFinallyLoc(SourceLocation Loc) { AtFinallyLoc = Loc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCAtFinallyStmtClass;
+ }
+ static bool classof(const ObjCAtFinallyStmt *) { return true; }
+
+ child_range children() {
+ return child_range(&AtFinallyStmt, &AtFinallyStmt+1);
+ }
+};
+
+/// ObjCAtTryStmt - This represent objective-c's over-all
+/// @try ... @catch ... @finally statement.
+class ObjCAtTryStmt : public Stmt {
+private:
+ // The location of the
+ SourceLocation AtTryLoc;
+
+ // The number of catch blocks in this statement.
+ unsigned NumCatchStmts : 16;
+
+ // Whether this statement has a @finally statement.
+ bool HasFinally : 1;
+
+ /// \brief Retrieve the statements that are stored after this @try statement.
+ ///
+ /// The order of the statements in memory follows the order in the source,
+ /// with the @try body first, followed by the @catch statements (if any) and,
+ /// finally, the @finally (if it exists).
+ Stmt **getStmts() { return reinterpret_cast<Stmt **> (this + 1); }
+ const Stmt* const *getStmts() const {
+ return reinterpret_cast<const Stmt * const*> (this + 1);
+ }
+
+ ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
+ Stmt **CatchStmts, unsigned NumCatchStmts,
+ Stmt *atFinallyStmt);
+
+ explicit ObjCAtTryStmt(EmptyShell Empty, unsigned NumCatchStmts,
+ bool HasFinally)
+ : Stmt(ObjCAtTryStmtClass, Empty), NumCatchStmts(NumCatchStmts),
+ HasFinally(HasFinally) { }
+
+public:
+ static ObjCAtTryStmt *Create(ASTContext &Context, SourceLocation atTryLoc,
+ Stmt *atTryStmt,
+ Stmt **CatchStmts, unsigned NumCatchStmts,
+ Stmt *atFinallyStmt);
+ static ObjCAtTryStmt *CreateEmpty(ASTContext &Context,
+ unsigned NumCatchStmts,
+ bool HasFinally);
+
+ /// \brief Retrieve the location of the @ in the @try.
+ SourceLocation getAtTryLoc() const { return AtTryLoc; }
+ void setAtTryLoc(SourceLocation Loc) { AtTryLoc = Loc; }
+
+ /// \brief Retrieve the @try body.
+ const Stmt *getTryBody() const { return getStmts()[0]; }
+ Stmt *getTryBody() { return getStmts()[0]; }
+ void setTryBody(Stmt *S) { getStmts()[0] = S; }
+
+ /// \brief Retrieve the number of @catch statements in this try-catch-finally
+ /// block.
+ unsigned getNumCatchStmts() const { return NumCatchStmts; }
+
+ /// \brief Retrieve a @catch statement.
+ const ObjCAtCatchStmt *getCatchStmt(unsigned I) const {
+ assert(I < NumCatchStmts && "Out-of-bounds @catch index");
+ return cast_or_null<ObjCAtCatchStmt>(getStmts()[I + 1]);
+ }
+
+ /// \brief Retrieve a @catch statement.
+ ObjCAtCatchStmt *getCatchStmt(unsigned I) {
+ assert(I < NumCatchStmts && "Out-of-bounds @catch index");
+ return cast_or_null<ObjCAtCatchStmt>(getStmts()[I + 1]);
+ }
+
+ /// \brief Set a particular catch statement.
+ void setCatchStmt(unsigned I, ObjCAtCatchStmt *S) {
+ assert(I < NumCatchStmts && "Out-of-bounds @catch index");
+ getStmts()[I + 1] = S;
+ }
+
+ /// Retrieve the @finally statement, if any.
+ const ObjCAtFinallyStmt *getFinallyStmt() const {
+ if (!HasFinally)
+ return 0;
+
+ return cast_or_null<ObjCAtFinallyStmt>(getStmts()[1 + NumCatchStmts]);
+ }
+ ObjCAtFinallyStmt *getFinallyStmt() {
+ if (!HasFinally)
+ return 0;
+
+ return cast_or_null<ObjCAtFinallyStmt>(getStmts()[1 + NumCatchStmts]);
+ }
+ void setFinallyStmt(Stmt *S) {
+ assert(HasFinally && "@try does not have a @finally slot!");
+ getStmts()[1 + NumCatchStmts] = S;
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCAtTryStmtClass;
+ }
+ static bool classof(const ObjCAtTryStmt *) { return true; }
+
+ child_range children() {
+ return child_range(getStmts(),
+ getStmts() + 1 + NumCatchStmts + HasFinally);
+ }
+};
+
+/// ObjCAtSynchronizedStmt - This is for objective-c's @synchronized statement.
+/// Example: @synchronized (sem) {
+/// do-something;
+/// }
+///
+class ObjCAtSynchronizedStmt : public Stmt {
+private:
+ enum { SYNC_EXPR, SYNC_BODY, END_EXPR };
+ Stmt* SubStmts[END_EXPR];
+ SourceLocation AtSynchronizedLoc;
+
+public:
+ ObjCAtSynchronizedStmt(SourceLocation atSynchronizedLoc, Stmt *synchExpr,
+ Stmt *synchBody)
+ : Stmt(ObjCAtSynchronizedStmtClass) {
+ SubStmts[SYNC_EXPR] = synchExpr;
+ SubStmts[SYNC_BODY] = synchBody;
+ AtSynchronizedLoc = atSynchronizedLoc;
+ }
+ explicit ObjCAtSynchronizedStmt(EmptyShell Empty) :
+ Stmt(ObjCAtSynchronizedStmtClass, Empty) { }
+
+ SourceLocation getAtSynchronizedLoc() const { return AtSynchronizedLoc; }
+ void setAtSynchronizedLoc(SourceLocation Loc) { AtSynchronizedLoc = Loc; }
+
+ const CompoundStmt *getSynchBody() const {
+ return reinterpret_cast<CompoundStmt*>(SubStmts[SYNC_BODY]);
+ }
+ CompoundStmt *getSynchBody() {
+ return reinterpret_cast<CompoundStmt*>(SubStmts[SYNC_BODY]);
+ }
+ void setSynchBody(Stmt *S) { SubStmts[SYNC_BODY] = S; }
+
+ const Expr *getSynchExpr() const {
+ return reinterpret_cast<Expr*>(SubStmts[SYNC_EXPR]);
+ }
+ Expr *getSynchExpr() {
+ return reinterpret_cast<Expr*>(SubStmts[SYNC_EXPR]);
+ }
+ void setSynchExpr(Stmt *S) { SubStmts[SYNC_EXPR] = S; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtSynchronizedLoc, getSynchBody()->getLocEnd());
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCAtSynchronizedStmtClass;
+ }
+ static bool classof(const ObjCAtSynchronizedStmt *) { return true; }
+
+ child_range children() {
+ return child_range(&SubStmts[0], &SubStmts[0]+END_EXPR);
+ }
+};
+
+/// ObjCAtThrowStmt - This represents objective-c's @throw statement.
+class ObjCAtThrowStmt : public Stmt {
+ Stmt *Throw;
+ SourceLocation AtThrowLoc;
+public:
+ ObjCAtThrowStmt(SourceLocation atThrowLoc, Stmt *throwExpr)
+ : Stmt(ObjCAtThrowStmtClass), Throw(throwExpr) {
+ AtThrowLoc = atThrowLoc;
+ }
+ explicit ObjCAtThrowStmt(EmptyShell Empty) :
+ Stmt(ObjCAtThrowStmtClass, Empty) { }
+
+ const Expr *getThrowExpr() const { return reinterpret_cast<Expr*>(Throw); }
+ Expr *getThrowExpr() { return reinterpret_cast<Expr*>(Throw); }
+ void setThrowExpr(Stmt *S) { Throw = S; }
+
+ SourceLocation getThrowLoc() { return AtThrowLoc; }
+ void setThrowLoc(SourceLocation Loc) { AtThrowLoc = Loc; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ if (Throw)
+ return SourceRange(AtThrowLoc, Throw->getLocEnd());
+ else
+ return SourceRange(AtThrowLoc);
+ }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCAtThrowStmtClass;
+ }
+ static bool classof(const ObjCAtThrowStmt *) { return true; }
+
+ child_range children() { return child_range(&Throw, &Throw+1); }
+};
+
+/// ObjCAutoreleasePoolStmt - This represent objective-c's
+/// @autoreleasepool Statement
+class ObjCAutoreleasePoolStmt : public Stmt {
+ Stmt *SubStmt;
+ SourceLocation AtLoc;
+public:
+ ObjCAutoreleasePoolStmt(SourceLocation atLoc,
+ Stmt *subStmt)
+ : Stmt(ObjCAutoreleasePoolStmtClass),
+ SubStmt(subStmt), AtLoc(atLoc) {}
+
+ explicit ObjCAutoreleasePoolStmt(EmptyShell Empty) :
+ Stmt(ObjCAutoreleasePoolStmtClass, Empty) { }
+
+ const Stmt *getSubStmt() const { return SubStmt; }
+ Stmt *getSubStmt() { return SubStmt; }
+ void setSubStmt(Stmt *S) { SubStmt = S; }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(AtLoc, SubStmt->getLocEnd());
+ }
+
+ SourceLocation getAtLoc() const { return AtLoc; }
+ void setAtLoc(SourceLocation Loc) { AtLoc = Loc; }
+
+ static bool classof(const Stmt *T) {
+ return T->getStmtClass() == ObjCAutoreleasePoolStmtClass;
+ }
+ static bool classof(const ObjCAutoreleasePoolStmt *) { return true; }
+
+ child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h
new file mode 100644
index 0000000..38c4c02
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/StmtVisitor.h
@@ -0,0 +1,189 @@
+//===--- StmtVisitor.h - Visitor for Stmt subclasses ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the StmtVisitor and ConstStmtVisitor interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_STMTVISITOR_H
+#define LLVM_CLANG_AST_STMTVISITOR_H
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+
+namespace clang {
+
+template <typename T> struct make_ptr { typedef T *type; };
+template <typename T> struct make_const_ptr { typedef const T *type; };
+
+/// StmtVisitorBase - This class implements a simple visitor for Stmt
+/// subclasses. Since Expr derives from Stmt, this also includes support for
+/// visiting Exprs.
+template<template <typename> class Ptr, typename ImplClass, typename RetTy=void>
+class StmtVisitorBase {
+public:
+
+#define PTR(CLASS) typename Ptr<CLASS>::type
+#define DISPATCH(NAME, CLASS) \
+ return static_cast<ImplClass*>(this)->Visit ## NAME(static_cast<PTR(CLASS)>(S))
+
+ RetTy Visit(PTR(Stmt) S) {
+
+ // If we have a binary expr, dispatch to the subcode of the binop. A smart
+ // optimizer (e.g. LLVM) will fold this comparison into the switch stmt
+ // below.
+ if (PTR(BinaryOperator) BinOp = dyn_cast<BinaryOperator>(S)) {
+ switch (BinOp->getOpcode()) {
+ case BO_PtrMemD: DISPATCH(BinPtrMemD, BinaryOperator);
+ case BO_PtrMemI: DISPATCH(BinPtrMemI, BinaryOperator);
+ case BO_Mul: DISPATCH(BinMul, BinaryOperator);
+ case BO_Div: DISPATCH(BinDiv, BinaryOperator);
+ case BO_Rem: DISPATCH(BinRem, BinaryOperator);
+ case BO_Add: DISPATCH(BinAdd, BinaryOperator);
+ case BO_Sub: DISPATCH(BinSub, BinaryOperator);
+ case BO_Shl: DISPATCH(BinShl, BinaryOperator);
+ case BO_Shr: DISPATCH(BinShr, BinaryOperator);
+
+ case BO_LT: DISPATCH(BinLT, BinaryOperator);
+ case BO_GT: DISPATCH(BinGT, BinaryOperator);
+ case BO_LE: DISPATCH(BinLE, BinaryOperator);
+ case BO_GE: DISPATCH(BinGE, BinaryOperator);
+ case BO_EQ: DISPATCH(BinEQ, BinaryOperator);
+ case BO_NE: DISPATCH(BinNE, BinaryOperator);
+
+ case BO_And: DISPATCH(BinAnd, BinaryOperator);
+ case BO_Xor: DISPATCH(BinXor, BinaryOperator);
+ case BO_Or : DISPATCH(BinOr, BinaryOperator);
+ case BO_LAnd: DISPATCH(BinLAnd, BinaryOperator);
+ case BO_LOr : DISPATCH(BinLOr, BinaryOperator);
+ case BO_Assign: DISPATCH(BinAssign, BinaryOperator);
+ case BO_MulAssign: DISPATCH(BinMulAssign, CompoundAssignOperator);
+ case BO_DivAssign: DISPATCH(BinDivAssign, CompoundAssignOperator);
+ case BO_RemAssign: DISPATCH(BinRemAssign, CompoundAssignOperator);
+ case BO_AddAssign: DISPATCH(BinAddAssign, CompoundAssignOperator);
+ case BO_SubAssign: DISPATCH(BinSubAssign, CompoundAssignOperator);
+ case BO_ShlAssign: DISPATCH(BinShlAssign, CompoundAssignOperator);
+ case BO_ShrAssign: DISPATCH(BinShrAssign, CompoundAssignOperator);
+ case BO_AndAssign: DISPATCH(BinAndAssign, CompoundAssignOperator);
+ case BO_OrAssign: DISPATCH(BinOrAssign, CompoundAssignOperator);
+ case BO_XorAssign: DISPATCH(BinXorAssign, CompoundAssignOperator);
+ case BO_Comma: DISPATCH(BinComma, BinaryOperator);
+ }
+ } else if (PTR(UnaryOperator) UnOp = dyn_cast<UnaryOperator>(S)) {
+ switch (UnOp->getOpcode()) {
+ case UO_PostInc: DISPATCH(UnaryPostInc, UnaryOperator);
+ case UO_PostDec: DISPATCH(UnaryPostDec, UnaryOperator);
+ case UO_PreInc: DISPATCH(UnaryPreInc, UnaryOperator);
+ case UO_PreDec: DISPATCH(UnaryPreDec, UnaryOperator);
+ case UO_AddrOf: DISPATCH(UnaryAddrOf, UnaryOperator);
+ case UO_Deref: DISPATCH(UnaryDeref, UnaryOperator);
+ case UO_Plus: DISPATCH(UnaryPlus, UnaryOperator);
+ case UO_Minus: DISPATCH(UnaryMinus, UnaryOperator);
+ case UO_Not: DISPATCH(UnaryNot, UnaryOperator);
+ case UO_LNot: DISPATCH(UnaryLNot, UnaryOperator);
+ case UO_Real: DISPATCH(UnaryReal, UnaryOperator);
+ case UO_Imag: DISPATCH(UnaryImag, UnaryOperator);
+ case UO_Extension: DISPATCH(UnaryExtension, UnaryOperator);
+ }
+ }
+
+ // Top switch stmt: dispatch to VisitFooStmt for each FooStmt.
+ switch (S->getStmtClass()) {
+ default: llvm_unreachable("Unknown stmt kind!");
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ case Stmt::CLASS ## Class: DISPATCH(CLASS, CLASS);
+#include "clang/AST/StmtNodes.inc"
+ }
+ }
+
+ // If the implementation chooses not to implement a certain visit method, fall
+ // back on VisitExpr or whatever else is the superclass.
+#define STMT(CLASS, PARENT) \
+ RetTy Visit ## CLASS(PTR(CLASS) S) { DISPATCH(PARENT, PARENT); }
+#include "clang/AST/StmtNodes.inc"
+
+ // If the implementation doesn't implement binary operator methods, fall back
+ // on VisitBinaryOperator.
+#define BINOP_FALLBACK(NAME) \
+ RetTy VisitBin ## NAME(PTR(BinaryOperator) S) { \
+ DISPATCH(BinaryOperator, BinaryOperator); \
+ }
+ BINOP_FALLBACK(PtrMemD) BINOP_FALLBACK(PtrMemI)
+ BINOP_FALLBACK(Mul) BINOP_FALLBACK(Div) BINOP_FALLBACK(Rem)
+ BINOP_FALLBACK(Add) BINOP_FALLBACK(Sub) BINOP_FALLBACK(Shl)
+ BINOP_FALLBACK(Shr)
+
+ BINOP_FALLBACK(LT) BINOP_FALLBACK(GT) BINOP_FALLBACK(LE)
+ BINOP_FALLBACK(GE) BINOP_FALLBACK(EQ) BINOP_FALLBACK(NE)
+ BINOP_FALLBACK(And) BINOP_FALLBACK(Xor) BINOP_FALLBACK(Or)
+ BINOP_FALLBACK(LAnd) BINOP_FALLBACK(LOr)
+
+ BINOP_FALLBACK(Assign)
+ BINOP_FALLBACK(Comma)
+#undef BINOP_FALLBACK
+
+ // If the implementation doesn't implement compound assignment operator
+ // methods, fall back on VisitCompoundAssignOperator.
+#define CAO_FALLBACK(NAME) \
+ RetTy VisitBin ## NAME(PTR(CompoundAssignOperator) S) { \
+ DISPATCH(CompoundAssignOperator, CompoundAssignOperator); \
+ }
+ CAO_FALLBACK(MulAssign) CAO_FALLBACK(DivAssign) CAO_FALLBACK(RemAssign)
+ CAO_FALLBACK(AddAssign) CAO_FALLBACK(SubAssign) CAO_FALLBACK(ShlAssign)
+ CAO_FALLBACK(ShrAssign) CAO_FALLBACK(AndAssign) CAO_FALLBACK(OrAssign)
+ CAO_FALLBACK(XorAssign)
+#undef CAO_FALLBACK
+
+ // If the implementation doesn't implement unary operator methods, fall back
+ // on VisitUnaryOperator.
+#define UNARYOP_FALLBACK(NAME) \
+ RetTy VisitUnary ## NAME(PTR(UnaryOperator) S) { \
+ DISPATCH(UnaryOperator, UnaryOperator); \
+ }
+ UNARYOP_FALLBACK(PostInc) UNARYOP_FALLBACK(PostDec)
+ UNARYOP_FALLBACK(PreInc) UNARYOP_FALLBACK(PreDec)
+ UNARYOP_FALLBACK(AddrOf) UNARYOP_FALLBACK(Deref)
+
+ UNARYOP_FALLBACK(Plus) UNARYOP_FALLBACK(Minus)
+ UNARYOP_FALLBACK(Not) UNARYOP_FALLBACK(LNot)
+ UNARYOP_FALLBACK(Real) UNARYOP_FALLBACK(Imag)
+ UNARYOP_FALLBACK(Extension)
+#undef UNARYOP_FALLBACK
+
+ // Base case, ignore it. :)
+ RetTy VisitStmt(PTR(Stmt) Node) { return RetTy(); }
+
+#undef PTR
+#undef DISPATCH
+};
+
+/// StmtVisitor - This class implements a simple visitor for Stmt subclasses.
+/// Since Expr derives from Stmt, this also includes support for visiting Exprs.
+///
+/// This class does not preserve constness of Stmt pointers (see also
+/// ConstStmtVisitor).
+template<typename ImplClass, typename RetTy=void>
+class StmtVisitor
+ : public StmtVisitorBase<make_ptr, ImplClass, RetTy> {};
+
+/// ConstStmtVisitor - This class implements a simple visitor for Stmt
+/// subclasses. Since Expr derives from Stmt, this also includes support for
+/// visiting Exprs.
+///
+/// This class preserves constness of Stmt pointers (see also StmtVisitor).
+template<typename ImplClass, typename RetTy=void>
+class ConstStmtVisitor
+ : public StmtVisitorBase<make_const_ptr, ImplClass, RetTy> {};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
new file mode 100644
index 0000000..65f5460
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateBase.h
@@ -0,0 +1,657 @@
+//===-- TemplateBase.h - Core classes for C++ templates ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides definitions which are common for all kinds of
+// template representation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_TEMPLATEBASE_H
+#define LLVM_CLANG_AST_TEMPLATEBASE_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/TemplateName.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace llvm {
+ class FoldingSetNodeID;
+}
+
+namespace clang {
+
+class Decl;
+class DiagnosticBuilder;
+class Expr;
+struct PrintingPolicy;
+class TypeSourceInfo;
+
+/// \brief Represents a template argument within a class template
+/// specialization.
+class TemplateArgument {
+public:
+ /// \brief The kind of template argument we're storing.
+ enum ArgKind {
+ /// \brief Represents an empty template argument, e.g., one that has not
+ /// been deduced.
+ Null = 0,
+ /// The template argument is a type. Its value is stored in the
+ /// TypeOrValue field.
+ Type,
+ /// The template argument is a declaration that was provided for a pointer
+ /// or reference non-type template parameter.
+ Declaration,
+ /// The template argument is an integral value stored in an llvm::APSInt
+ /// that was provided for an integral non-type template parameter.
+ Integral,
+ /// The template argument is a template name that was provided for a
+ /// template template parameter.
+ Template,
+ /// The template argument is a pack expansion of a template name that was
+ /// provided for a template template parameter.
+ TemplateExpansion,
+ /// The template argument is a value- or type-dependent expression
+ /// stored in an Expr*.
+ Expression,
+ /// The template argument is actually a parameter pack. Arguments are stored
+ /// in the Args struct.
+ Pack
+ };
+
+private:
+ /// \brief The kind of template argument we're storing.
+ unsigned Kind;
+
+ union {
+ uintptr_t TypeOrValue;
+ struct {
+ char Value[sizeof(llvm::APSInt)];
+ void *Type;
+ } Integer;
+ struct {
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ } Args;
+ struct {
+ void *Name;
+ unsigned NumExpansions;
+ } TemplateArg;
+ };
+
+ TemplateArgument(TemplateName, bool); // DO NOT USE
+
+public:
+ /// \brief Construct an empty, invalid template argument.
+ TemplateArgument() : Kind(Null), TypeOrValue(0) { }
+
+ /// \brief Construct a template type argument.
+ TemplateArgument(QualType T) : Kind(Type) {
+ TypeOrValue = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ }
+
+ /// \brief Construct a template argument that refers to a
+ /// declaration, which is either an external declaration or a
+ /// template declaration.
+ TemplateArgument(Decl *D) : Kind(Declaration) {
+ TypeOrValue = reinterpret_cast<uintptr_t>(D);
+ }
+
+ /// \brief Construct an integral constant template argument.
+ TemplateArgument(const llvm::APSInt &Value, QualType Type) : Kind(Integral) {
+ // FIXME: Large integral values will get leaked. Do something
+ // similar to what we did with IntegerLiteral.
+ new (Integer.Value) llvm::APSInt(Value);
+ Integer.Type = Type.getAsOpaquePtr();
+ }
+
+ /// \brief Construct a template argument that is a template.
+ ///
+ /// This form of template argument is generally used for template template
+ /// parameters. However, the template name could be a dependent template
+ /// name that ends up being instantiated to a function template whose address
+ /// is taken.
+ ///
+ /// \param Name The template name.
+ TemplateArgument(TemplateName Name) : Kind(Template)
+ {
+ TemplateArg.Name = Name.getAsVoidPointer();
+ TemplateArg.NumExpansions = 0;
+ }
+
+ /// \brief Construct a template argument that is a template pack expansion.
+ ///
+ /// This form of template argument is generally used for template template
+ /// parameters. However, the template name could be a dependent template
+ /// name that ends up being instantiated to a function template whose address
+ /// is taken.
+ ///
+ /// \param Name The template name.
+ ///
+ /// \param NumExpansions The number of expansions that will be generated by
+ /// instantiating
+ TemplateArgument(TemplateName Name, llvm::Optional<unsigned> NumExpansions)
+ : Kind(TemplateExpansion)
+ {
+ TemplateArg.Name = Name.getAsVoidPointer();
+ if (NumExpansions)
+ TemplateArg.NumExpansions = *NumExpansions + 1;
+ else
+ TemplateArg.NumExpansions = 0;
+ }
+
+ /// \brief Construct a template argument that is an expression.
+ ///
+ /// This form of template argument only occurs in template argument
+ /// lists used for dependent types and for expression; it will not
+ /// occur in a non-dependent, canonical template argument list.
+ TemplateArgument(Expr *E) : Kind(Expression) {
+ TypeOrValue = reinterpret_cast<uintptr_t>(E);
+ }
+
+ /// \brief Construct a template argument that is a template argument pack.
+ ///
+ /// We assume that storage for the template arguments provided
+ /// outlives the TemplateArgument itself.
+ TemplateArgument(const TemplateArgument *Args, unsigned NumArgs) : Kind(Pack){
+ this->Args.Args = Args;
+ this->Args.NumArgs = NumArgs;
+ }
+
+ /// \brief Copy constructor for a template argument.
+ TemplateArgument(const TemplateArgument &Other) : Kind(Other.Kind) {
+ // FIXME: Large integral values will get leaked. Do something
+ // similar to what we did with IntegerLiteral.
+ if (Kind == Integral) {
+ new (Integer.Value) llvm::APSInt(*Other.getAsIntegral());
+ Integer.Type = Other.Integer.Type;
+ } else if (Kind == Pack) {
+ Args.NumArgs = Other.Args.NumArgs;
+ Args.Args = Other.Args.Args;
+ } else if (Kind == Template || Kind == TemplateExpansion) {
+ TemplateArg.Name = Other.TemplateArg.Name;
+ TemplateArg.NumExpansions = Other.TemplateArg.NumExpansions;
+ } else
+ TypeOrValue = Other.TypeOrValue;
+ }
+
+ TemplateArgument& operator=(const TemplateArgument& Other) {
+ using llvm::APSInt;
+
+ if (Kind == Other.Kind && Kind == Integral) {
+ // Copy integral values.
+ *this->getAsIntegral() = *Other.getAsIntegral();
+ Integer.Type = Other.Integer.Type;
+ return *this;
+ }
+
+ // Destroy the current integral value, if that's what we're holding.
+ if (Kind == Integral)
+ getAsIntegral()->~APSInt();
+
+ Kind = Other.Kind;
+
+ if (Other.Kind == Integral) {
+ new (Integer.Value) llvm::APSInt(*Other.getAsIntegral());
+ Integer.Type = Other.Integer.Type;
+ } else if (Other.Kind == Pack) {
+ Args.NumArgs = Other.Args.NumArgs;
+ Args.Args = Other.Args.Args;
+ } else if (Kind == Template || Kind == TemplateExpansion) {
+ TemplateArg.Name = Other.TemplateArg.Name;
+ TemplateArg.NumExpansions = Other.TemplateArg.NumExpansions;
+ } else {
+ TypeOrValue = Other.TypeOrValue;
+ }
+
+ return *this;
+ }
+
+ ~TemplateArgument() {
+ using llvm::APSInt;
+
+ if (Kind == Integral)
+ getAsIntegral()->~APSInt();
+ }
+
+ /// \brief Create a new template argument pack by copying the given set of
+ /// template arguments.
+ static TemplateArgument CreatePackCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs);
+
+ /// \brief Return the kind of stored template argument.
+ ArgKind getKind() const { return (ArgKind)Kind; }
+
+ /// \brief Determine whether this template argument has no value.
+ bool isNull() const { return Kind == Null; }
+
+ /// \brief Whether this template argument is dependent on a template
+ /// parameter such that its result can change from one instantiation to
+ /// another.
+ bool isDependent() const;
+
+ /// \brief Whether this template argument is dependent on a template
+ /// parameter.
+ bool isInstantiationDependent() const;
+
+ /// \brief Whether this template argument contains an unexpanded
+ /// parameter pack.
+ bool containsUnexpandedParameterPack() const;
+
+ /// \brief Determine whether this template argument is a pack expansion.
+ bool isPackExpansion() const;
+
+ /// \brief Retrieve the template argument as a type.
+ QualType getAsType() const {
+ if (Kind != Type)
+ return QualType();
+
+ return QualType::getFromOpaquePtr(reinterpret_cast<void*>(TypeOrValue));
+ }
+
+ /// \brief Retrieve the template argument as a declaration.
+ Decl *getAsDecl() const {
+ if (Kind != Declaration)
+ return 0;
+ return reinterpret_cast<Decl *>(TypeOrValue);
+ }
+
+ /// \brief Retrieve the template argument as a template name.
+ TemplateName getAsTemplate() const {
+ if (Kind != Template)
+ return TemplateName();
+
+ return TemplateName::getFromVoidPointer(TemplateArg.Name);
+ }
+
+ /// \brief Retrieve the template argument as a template name; if the argument
+ /// is a pack expansion, return the pattern as a template name.
+ TemplateName getAsTemplateOrTemplatePattern() const {
+ if (Kind != Template && Kind != TemplateExpansion)
+ return TemplateName();
+
+ return TemplateName::getFromVoidPointer(TemplateArg.Name);
+ }
+
+ /// \brief Retrieve the number of expansions that a template template argument
+ /// expansion will produce, if known.
+ llvm::Optional<unsigned> getNumTemplateExpansions() const;
+
+ /// \brief Retrieve the template argument as an integral value.
+ llvm::APSInt *getAsIntegral() {
+ if (Kind != Integral)
+ return 0;
+ return reinterpret_cast<llvm::APSInt*>(&Integer.Value[0]);
+ }
+
+ const llvm::APSInt *getAsIntegral() const {
+ return const_cast<TemplateArgument*>(this)->getAsIntegral();
+ }
+
+ /// \brief Retrieve the type of the integral value.
+ QualType getIntegralType() const {
+ if (Kind != Integral)
+ return QualType();
+
+ return QualType::getFromOpaquePtr(Integer.Type);
+ }
+
+ void setIntegralType(QualType T) {
+ assert(Kind == Integral &&
+ "Cannot set the integral type of a non-integral template argument");
+ Integer.Type = T.getAsOpaquePtr();
+ }
+
+ /// \brief Retrieve the template argument as an expression.
+ Expr *getAsExpr() const {
+ if (Kind != Expression)
+ return 0;
+
+ return reinterpret_cast<Expr *>(TypeOrValue);
+ }
+
+ /// \brief Iterator that traverses the elements of a template argument pack.
+ typedef const TemplateArgument * pack_iterator;
+
+ /// \brief Iterator referencing the first argument of a template argument
+ /// pack.
+ pack_iterator pack_begin() const {
+ assert(Kind == Pack);
+ return Args.Args;
+ }
+
+ /// \brief Iterator referencing one past the last argument of a template
+ /// argument pack.
+ pack_iterator pack_end() const {
+ assert(Kind == Pack);
+ return Args.Args + Args.NumArgs;
+ }
+
+ /// \brief The number of template arguments in the given template argument
+ /// pack.
+ unsigned pack_size() const {
+ assert(Kind == Pack);
+ return Args.NumArgs;
+ }
+
+ /// Determines whether two template arguments are superficially the
+ /// same.
+ bool structurallyEquals(const TemplateArgument &Other) const;
+
+ /// \brief When the template argument is a pack expansion, returns
+ /// the pattern of the pack expansion.
+ ///
+ /// \param Ellipsis Will be set to the location of the ellipsis.
+ TemplateArgument getPackExpansionPattern() const;
+
+ /// \brief Print this template argument to the given output stream.
+ void print(const PrintingPolicy &Policy, raw_ostream &Out) const;
+
+ /// \brief Used to insert TemplateArguments into FoldingSets.
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) const;
+};
+
+/// Location information for a TemplateArgument.
+struct TemplateArgumentLocInfo {
+private:
+ union {
+ Expr *Expression;
+ TypeSourceInfo *Declarator;
+ struct {
+ // FIXME: We'd like to just use the qualifier in the TemplateName,
+ // but template arguments get canonicalized too quickly.
+ NestedNameSpecifier *Qualifier;
+ void *QualifierLocData;
+ unsigned TemplateNameLoc;
+ unsigned EllipsisLoc;
+ } Template;
+ };
+
+public:
+ TemplateArgumentLocInfo();
+
+ TemplateArgumentLocInfo(TypeSourceInfo *TInfo) : Declarator(TInfo) {}
+
+ TemplateArgumentLocInfo(Expr *E) : Expression(E) {}
+
+ TemplateArgumentLocInfo(NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateNameLoc,
+ SourceLocation EllipsisLoc)
+ {
+ Template.Qualifier = QualifierLoc.getNestedNameSpecifier();
+ Template.QualifierLocData = QualifierLoc.getOpaqueData();
+ Template.TemplateNameLoc = TemplateNameLoc.getRawEncoding();
+ Template.EllipsisLoc = EllipsisLoc.getRawEncoding();
+ }
+
+ TypeSourceInfo *getAsTypeSourceInfo() const {
+ return Declarator;
+ }
+
+ Expr *getAsExpr() const {
+ return Expression;
+ }
+
+ NestedNameSpecifierLoc getTemplateQualifierLoc() const {
+ return NestedNameSpecifierLoc(Template.Qualifier,
+ Template.QualifierLocData);
+ }
+
+ SourceLocation getTemplateNameLoc() const {
+ return SourceLocation::getFromRawEncoding(Template.TemplateNameLoc);
+ }
+
+ SourceLocation getTemplateEllipsisLoc() const {
+ return SourceLocation::getFromRawEncoding(Template.EllipsisLoc);
+ }
+};
+
+/// Location wrapper for a TemplateArgument. TemplateArgument is to
+/// TemplateArgumentLoc as Type is to TypeLoc.
+class TemplateArgumentLoc {
+ TemplateArgument Argument;
+ TemplateArgumentLocInfo LocInfo;
+
+public:
+ TemplateArgumentLoc() {}
+
+ TemplateArgumentLoc(const TemplateArgument &Argument,
+ TemplateArgumentLocInfo Opaque)
+ : Argument(Argument), LocInfo(Opaque) {
+ }
+
+ TemplateArgumentLoc(const TemplateArgument &Argument, TypeSourceInfo *TInfo)
+ : Argument(Argument), LocInfo(TInfo) {
+ assert(Argument.getKind() == TemplateArgument::Type);
+ }
+
+ TemplateArgumentLoc(const TemplateArgument &Argument, Expr *E)
+ : Argument(Argument), LocInfo(E) {
+ assert(Argument.getKind() == TemplateArgument::Expression);
+ }
+
+ TemplateArgumentLoc(const TemplateArgument &Argument,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateNameLoc,
+ SourceLocation EllipsisLoc = SourceLocation())
+ : Argument(Argument), LocInfo(QualifierLoc, TemplateNameLoc, EllipsisLoc) {
+ assert(Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion);
+ }
+
+ /// \brief - Fetches the primary location of the argument.
+ SourceLocation getLocation() const {
+ if (Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion)
+ return getTemplateNameLoc();
+
+ return getSourceRange().getBegin();
+ }
+
+ /// \brief - Fetches the full source range of the argument.
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ const TemplateArgument &getArgument() const {
+ return Argument;
+ }
+
+ TemplateArgumentLocInfo getLocInfo() const {
+ return LocInfo;
+ }
+
+ TypeSourceInfo *getTypeSourceInfo() const {
+ assert(Argument.getKind() == TemplateArgument::Type);
+ return LocInfo.getAsTypeSourceInfo();
+ }
+
+ Expr *getSourceExpression() const {
+ assert(Argument.getKind() == TemplateArgument::Expression);
+ return LocInfo.getAsExpr();
+ }
+
+ Expr *getSourceDeclExpression() const {
+ assert(Argument.getKind() == TemplateArgument::Declaration);
+ return LocInfo.getAsExpr();
+ }
+
+ NestedNameSpecifierLoc getTemplateQualifierLoc() const {
+ assert(Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion);
+ return LocInfo.getTemplateQualifierLoc();
+ }
+
+ SourceLocation getTemplateNameLoc() const {
+ assert(Argument.getKind() == TemplateArgument::Template ||
+ Argument.getKind() == TemplateArgument::TemplateExpansion);
+ return LocInfo.getTemplateNameLoc();
+ }
+
+ SourceLocation getTemplateEllipsisLoc() const {
+ assert(Argument.getKind() == TemplateArgument::TemplateExpansion);
+ return LocInfo.getTemplateEllipsisLoc();
+ }
+
+ /// \brief When the template argument is a pack expansion, returns
+ /// the pattern of the pack expansion.
+ ///
+ /// \param Ellipsis Will be set to the location of the ellipsis.
+ ///
+ /// \param NumExpansions Will be set to the number of expansions that will
+ /// be generated from this pack expansion, if known a priori.
+ TemplateArgumentLoc getPackExpansionPattern(SourceLocation &Ellipsis,
+ llvm::Optional<unsigned> &NumExpansions,
+ ASTContext &Context) const;
+};
+
+/// A convenient class for passing around template argument
+/// information. Designed to be passed by reference.
+class TemplateArgumentListInfo {
+ SmallVector<TemplateArgumentLoc, 8> Arguments;
+ SourceLocation LAngleLoc;
+ SourceLocation RAngleLoc;
+
+ // This can leak if used in an AST node, use ASTTemplateArgumentListInfo
+ // instead.
+ void* operator new(size_t bytes, ASTContext& C);
+
+public:
+ TemplateArgumentListInfo() {}
+
+ TemplateArgumentListInfo(SourceLocation LAngleLoc,
+ SourceLocation RAngleLoc)
+ : LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc) {}
+
+ SourceLocation getLAngleLoc() const { return LAngleLoc; }
+ SourceLocation getRAngleLoc() const { return RAngleLoc; }
+
+ void setLAngleLoc(SourceLocation Loc) { LAngleLoc = Loc; }
+ void setRAngleLoc(SourceLocation Loc) { RAngleLoc = Loc; }
+
+ unsigned size() const { return Arguments.size(); }
+
+ const TemplateArgumentLoc *getArgumentArray() const {
+ return Arguments.data();
+ }
+
+ const TemplateArgumentLoc &operator[](unsigned I) const {
+ return Arguments[I];
+ }
+
+ void addArgument(const TemplateArgumentLoc &Loc) {
+ Arguments.push_back(Loc);
+ }
+};
+
+/// \brief Represents an explicit template argument list in C++, e.g.,
+/// the "<int>" in "sort<int>".
+/// This is safe to be used inside an AST node, in contrast with
+/// TemplateArgumentListInfo.
+struct ASTTemplateArgumentListInfo {
+ /// \brief The source location of the left angle bracket ('<');
+ SourceLocation LAngleLoc;
+
+ /// \brief The source location of the right angle bracket ('>');
+ SourceLocation RAngleLoc;
+
+ /// \brief The number of template arguments in TemplateArgs.
+ /// The actual template arguments (if any) are stored after the
+ /// ExplicitTemplateArgumentList structure.
+ unsigned NumTemplateArgs;
+
+ /// \brief Retrieve the template arguments
+ TemplateArgumentLoc *getTemplateArgs() {
+ return reinterpret_cast<TemplateArgumentLoc *> (this + 1);
+ }
+
+ /// \brief Retrieve the template arguments
+ const TemplateArgumentLoc *getTemplateArgs() const {
+ return reinterpret_cast<const TemplateArgumentLoc *> (this + 1);
+ }
+
+ const TemplateArgumentLoc &operator[](unsigned I) const {
+ return getTemplateArgs()[I];
+ }
+
+ static const ASTTemplateArgumentListInfo *Create(ASTContext &C,
+ const TemplateArgumentListInfo &List);
+
+ void initializeFrom(const TemplateArgumentListInfo &List);
+ void initializeFrom(const TemplateArgumentListInfo &List,
+ bool &Dependent, bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack);
+ void copyInto(TemplateArgumentListInfo &List) const;
+ static std::size_t sizeFor(unsigned NumTemplateArgs);
+};
+
+/// \brief Extends ASTTemplateArgumentListInfo with the source location
+/// information for the template keyword; this is used as part of the
+/// representation of qualified identifiers, such as S<T>::template apply<T>.
+struct ASTTemplateKWAndArgsInfo : public ASTTemplateArgumentListInfo {
+ typedef ASTTemplateArgumentListInfo Base;
+
+ // NOTE: the source location of the (optional) template keyword is
+ // stored after all template arguments.
+
+ /// \brief Get the source location of the template keyword.
+ SourceLocation getTemplateKeywordLoc() const {
+ return *reinterpret_cast<const SourceLocation*>
+ (getTemplateArgs() + NumTemplateArgs);
+ }
+
+ /// \brief Sets the source location of the template keyword.
+ void setTemplateKeywordLoc(SourceLocation TemplateKWLoc) {
+ *reinterpret_cast<SourceLocation*>
+ (getTemplateArgs() + NumTemplateArgs) = TemplateKWLoc;
+ }
+
+ static const ASTTemplateKWAndArgsInfo*
+ Create(ASTContext &C, SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &List);
+
+ void initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &List);
+ void initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &List,
+ bool &Dependent, bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack);
+ void initializeFrom(SourceLocation TemplateKWLoc);
+
+ static std::size_t sizeFor(unsigned NumTemplateArgs);
+};
+
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const TemplateArgument &Arg);
+
+inline TemplateSpecializationType::iterator
+ TemplateSpecializationType::end() const {
+ return getArgs() + getNumArgs();
+}
+
+inline DependentTemplateSpecializationType::iterator
+ DependentTemplateSpecializationType::end() const {
+ return getArgs() + getNumArgs();
+}
+
+inline const TemplateArgument &
+ TemplateSpecializationType::getArg(unsigned Idx) const {
+ assert(Idx < getNumArgs() && "Template argument out of range");
+ return getArgs()[Idx];
+}
+
+inline const TemplateArgument &
+ DependentTemplateSpecializationType::getArg(unsigned Idx) const {
+ assert(Idx < getNumArgs() && "Template argument out of range");
+ return getArgs()[Idx];
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h b/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h
new file mode 100644
index 0000000..7dc75b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/TemplateName.h
@@ -0,0 +1,558 @@
+//===--- TemplateName.h - C++ Template Name Representation-------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TemplateName interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_TEMPLATENAME_H
+#define LLVM_CLANG_AST_TEMPLATENAME_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "clang/Basic/OperatorKinds.h"
+
+namespace clang {
+
+class ASTContext;
+class DependentTemplateName;
+class DiagnosticBuilder;
+class IdentifierInfo;
+class NestedNameSpecifier;
+class OverloadedTemplateStorage;
+struct PrintingPolicy;
+class QualifiedTemplateName;
+class NamedDecl;
+class SubstTemplateTemplateParmStorage;
+class SubstTemplateTemplateParmPackStorage;
+class TemplateArgument;
+class TemplateDecl;
+class TemplateTemplateParmDecl;
+
+/// \brief Implementation class used to describe either a set of overloaded
+/// template names or an already-substituted template template parameter pack.
+class UncommonTemplateNameStorage {
+protected:
+ enum Kind {
+ Overloaded,
+ SubstTemplateTemplateParm,
+ SubstTemplateTemplateParmPack
+ };
+
+ union {
+ struct {
+ /// \brief A Kind.
+ unsigned Kind : 2;
+
+ /// \brief The number of stored templates or template arguments,
+ /// depending on which subclass we have.
+ unsigned Size : 30;
+ } Bits;
+
+ void *PointerAlignment;
+ };
+
+ UncommonTemplateNameStorage(Kind kind, unsigned size) {
+ Bits.Kind = kind;
+ Bits.Size = size;
+ }
+
+public:
+ unsigned size() const { return Bits.Size; }
+
+ OverloadedTemplateStorage *getAsOverloadedStorage() {
+ return Bits.Kind == Overloaded
+ ? reinterpret_cast<OverloadedTemplateStorage *>(this)
+ : 0;
+ }
+
+ SubstTemplateTemplateParmStorage *getAsSubstTemplateTemplateParm() {
+ return Bits.Kind == SubstTemplateTemplateParm
+ ? reinterpret_cast<SubstTemplateTemplateParmStorage *>(this)
+ : 0;
+ }
+
+ SubstTemplateTemplateParmPackStorage *getAsSubstTemplateTemplateParmPack() {
+ return Bits.Kind == SubstTemplateTemplateParmPack
+ ? reinterpret_cast<SubstTemplateTemplateParmPackStorage *>(this)
+ : 0;
+ }
+};
+
+/// \brief A structure for storing the information associated with an
+/// overloaded template name.
+class OverloadedTemplateStorage : public UncommonTemplateNameStorage {
+ friend class ASTContext;
+
+ OverloadedTemplateStorage(unsigned size)
+ : UncommonTemplateNameStorage(Overloaded, size) { }
+
+ NamedDecl **getStorage() {
+ return reinterpret_cast<NamedDecl **>(this + 1);
+ }
+ NamedDecl * const *getStorage() const {
+ return reinterpret_cast<NamedDecl *const *>(this + 1);
+ }
+
+public:
+ typedef NamedDecl *const *iterator;
+
+ iterator begin() const { return getStorage(); }
+ iterator end() const { return getStorage() + size(); }
+};
+
+/// \brief A structure for storing an already-substituted template template
+/// parameter pack.
+///
+/// This kind of template names occurs when the parameter pack has been
+/// provided with a template template argument pack in a context where its
+/// enclosing pack expansion could not be fully expanded.
+class SubstTemplateTemplateParmPackStorage
+ : public UncommonTemplateNameStorage, public llvm::FoldingSetNode
+{
+ TemplateTemplateParmDecl *Parameter;
+ const TemplateArgument *Arguments;
+
+public:
+ SubstTemplateTemplateParmPackStorage(TemplateTemplateParmDecl *Parameter,
+ unsigned Size,
+ const TemplateArgument *Arguments)
+ : UncommonTemplateNameStorage(SubstTemplateTemplateParmPack, Size),
+ Parameter(Parameter), Arguments(Arguments) { }
+
+ /// \brief Retrieve the template template parameter pack being substituted.
+ TemplateTemplateParmDecl *getParameterPack() const {
+ return Parameter;
+ }
+
+ /// \brief Retrieve the template template argument pack with which this
+ /// parameter was substituted.
+ TemplateArgument getArgumentPack() const;
+
+ void Profile(llvm::FoldingSetNodeID &ID, ASTContext &Context);
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context,
+ TemplateTemplateParmDecl *Parameter,
+ const TemplateArgument &ArgPack);
+};
+
+/// \brief Represents a C++ template name within the type system.
+///
+/// A C++ template name refers to a template within the C++ type
+/// system. In most cases, a template name is simply a reference to a
+/// class template, e.g.
+///
+/// \code
+/// template<typename T> class X { };
+///
+/// X<int> xi;
+/// \endcode
+///
+/// Here, the 'X' in \c X<int> is a template name that refers to the
+/// declaration of the class template X, above. Template names can
+/// also refer to function templates, C++0x template aliases, etc.
+///
+/// Some template names are dependent. For example, consider:
+///
+/// \code
+/// template<typename MetaFun, typename T1, typename T2> struct apply2 {
+/// typedef typename MetaFun::template apply<T1, T2>::type type;
+/// };
+/// \endcode
+///
+/// Here, "apply" is treated as a template name within the typename
+/// specifier in the typedef. "apply" is a nested template, and can
+/// only be understood in the context of
+class TemplateName {
+ typedef llvm::PointerUnion4<TemplateDecl *,
+ UncommonTemplateNameStorage *,
+ QualifiedTemplateName *,
+ DependentTemplateName *> StorageType;
+
+ StorageType Storage;
+
+ explicit TemplateName(void *Ptr) {
+ Storage = StorageType::getFromOpaqueValue(Ptr);
+ }
+
+public:
+ // \brief Kind of name that is actually stored.
+ enum NameKind {
+ /// \brief A single template declaration.
+ Template,
+ /// \brief A set of overloaded template declarations.
+ OverloadedTemplate,
+ /// \brief A qualified template name, where the qualification is kept
+ /// to describe the source code as written.
+ QualifiedTemplate,
+ /// \brief A dependent template name that has not been resolved to a
+ /// template (or set of templates).
+ DependentTemplate,
+ /// \brief A template template parameter that has been substituted
+ /// for some other template name.
+ SubstTemplateTemplateParm,
+ /// \brief A template template parameter pack that has been substituted for
+ /// a template template argument pack, but has not yet been expanded into
+ /// individual arguments.
+ SubstTemplateTemplateParmPack
+ };
+
+ TemplateName() : Storage() { }
+ explicit TemplateName(TemplateDecl *Template) : Storage(Template) { }
+ explicit TemplateName(OverloadedTemplateStorage *Storage)
+ : Storage(Storage) { }
+ explicit TemplateName(SubstTemplateTemplateParmStorage *Storage);
+ explicit TemplateName(SubstTemplateTemplateParmPackStorage *Storage)
+ : Storage(Storage) { }
+ explicit TemplateName(QualifiedTemplateName *Qual) : Storage(Qual) { }
+ explicit TemplateName(DependentTemplateName *Dep) : Storage(Dep) { }
+
+ /// \brief Determine whether this template name is NULL.
+ bool isNull() const { return Storage.isNull(); }
+
+ // \brief Get the kind of name that is actually stored.
+ NameKind getKind() const;
+
+ /// \brief Retrieve the underlying template declaration that
+ /// this template name refers to, if known.
+ ///
+ /// \returns The template declaration that this template name refers
+ /// to, if any. If the template name does not refer to a specific
+ /// declaration because it is a dependent name, or if it refers to a
+ /// set of function templates, returns NULL.
+ TemplateDecl *getAsTemplateDecl() const;
+
+ /// \brief Retrieve the underlying, overloaded function template
+ // declarations that this template name refers to, if known.
+ ///
+ /// \returns The set of overloaded function templates that this template
+ /// name refers to, if known. If the template name does not refer to a
+ /// specific set of function templates because it is a dependent name or
+ /// refers to a single template, returns NULL.
+ OverloadedTemplateStorage *getAsOverloadedTemplate() const {
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsOverloadedStorage();
+
+ return 0;
+ }
+
+ /// \brief Retrieve the substituted template template parameter, if
+ /// known.
+ ///
+ /// \returns The storage for the substituted template template parameter,
+ /// if known. Otherwise, returns NULL.
+ SubstTemplateTemplateParmStorage *getAsSubstTemplateTemplateParm() const {
+ if (UncommonTemplateNameStorage *uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return uncommon->getAsSubstTemplateTemplateParm();
+
+ return 0;
+ }
+
+ /// \brief Retrieve the substituted template template parameter pack, if
+ /// known.
+ ///
+ /// \returns The storage for the substituted template template parameter pack,
+ /// if known. Otherwise, returns NULL.
+ SubstTemplateTemplateParmPackStorage *
+ getAsSubstTemplateTemplateParmPack() const {
+ if (UncommonTemplateNameStorage *Uncommon =
+ Storage.dyn_cast<UncommonTemplateNameStorage *>())
+ return Uncommon->getAsSubstTemplateTemplateParmPack();
+
+ return 0;
+ }
+
+ /// \brief Retrieve the underlying qualified template name
+ /// structure, if any.
+ QualifiedTemplateName *getAsQualifiedTemplateName() const {
+ return Storage.dyn_cast<QualifiedTemplateName *>();
+ }
+
+ /// \brief Retrieve the underlying dependent template name
+ /// structure, if any.
+ DependentTemplateName *getAsDependentTemplateName() const {
+ return Storage.dyn_cast<DependentTemplateName *>();
+ }
+
+ TemplateName getUnderlying() const;
+
+ /// \brief Determines whether this is a dependent template name.
+ bool isDependent() const;
+
+ /// \brief Determines whether this is a template name that somehow
+ /// depends on a template parameter.
+ bool isInstantiationDependent() const;
+
+ /// \brief Determines whether this template name contains an
+ /// unexpanded parameter pack (for C++0x variadic templates).
+ bool containsUnexpandedParameterPack() const;
+
+ /// \brief Print the template name.
+ ///
+ /// \param OS the output stream to which the template name will be
+ /// printed.
+ ///
+ /// \param SuppressNNS if true, don't print the
+ /// nested-name-specifier that precedes the template name (if it has
+ /// one).
+ void print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool SuppressNNS = false) const;
+
+ /// \brief Debugging aid that dumps the template name to standard
+ /// error.
+ void dump() const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddPointer(Storage.getOpaqueValue());
+ }
+
+ /// \brief Retrieve the template name as a void pointer.
+ void *getAsVoidPointer() const { return Storage.getOpaqueValue(); }
+
+ /// \brief Build a template name from a void pointer.
+ static TemplateName getFromVoidPointer(void *Ptr) {
+ return TemplateName(Ptr);
+ }
+};
+
+/// Insertion operator for diagnostics. This allows sending TemplateName's
+/// into a diagnostic with <<.
+const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ TemplateName N);
+
+/// \brief A structure for storing the information associated with a
+/// substituted template template parameter.
+class SubstTemplateTemplateParmStorage
+ : public UncommonTemplateNameStorage, public llvm::FoldingSetNode {
+ friend class ASTContext;
+
+ TemplateTemplateParmDecl *Parameter;
+ TemplateName Replacement;
+
+ SubstTemplateTemplateParmStorage(TemplateTemplateParmDecl *parameter,
+ TemplateName replacement)
+ : UncommonTemplateNameStorage(SubstTemplateTemplateParm, 0),
+ Parameter(parameter), Replacement(replacement) {}
+
+public:
+ TemplateTemplateParmDecl *getParameter() const { return Parameter; }
+ TemplateName getReplacement() const { return Replacement; }
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *parameter,
+ TemplateName replacement);
+};
+
+inline TemplateName::TemplateName(SubstTemplateTemplateParmStorage *Storage)
+ : Storage(Storage) { }
+
+inline TemplateName TemplateName::getUnderlying() const {
+ if (SubstTemplateTemplateParmStorage *subst
+ = getAsSubstTemplateTemplateParm())
+ return subst->getReplacement().getUnderlying();
+ return *this;
+}
+
+/// \brief Represents a template name that was expressed as a
+/// qualified name.
+///
+/// This kind of template name refers to a template name that was
+/// preceded by a nested name specifier, e.g., \c std::vector. Here,
+/// the nested name specifier is "std::" and the template name is the
+/// declaration for "vector". The QualifiedTemplateName class is only
+/// used to provide "sugar" for template names that were expressed
+/// with a qualified name, and has no semantic meaning. In this
+/// manner, it is to TemplateName what ElaboratedType is to Type,
+/// providing extra syntactic sugar for downstream clients.
+class QualifiedTemplateName : public llvm::FoldingSetNode {
+ /// \brief The nested name specifier that qualifies the template name.
+ ///
+ /// The bit is used to indicate whether the "template" keyword was
+ /// present before the template name itself. Note that the
+ /// "template" keyword is always redundant in this case (otherwise,
+ /// the template name would be a dependent name and we would express
+ /// this name with DependentTemplateName).
+ llvm::PointerIntPair<NestedNameSpecifier *, 1> Qualifier;
+
+ /// \brief The template declaration or set of overloaded function templates
+ /// that this qualified name refers to.
+ TemplateDecl *Template;
+
+ friend class ASTContext;
+
+ QualifiedTemplateName(NestedNameSpecifier *NNS, bool TemplateKeyword,
+ TemplateDecl *Template)
+ : Qualifier(NNS, TemplateKeyword? 1 : 0),
+ Template(Template) { }
+
+public:
+ /// \brief Return the nested name specifier that qualifies this name.
+ NestedNameSpecifier *getQualifier() const { return Qualifier.getPointer(); }
+
+ /// \brief Whether the template name was prefixed by the "template"
+ /// keyword.
+ bool hasTemplateKeyword() const { return Qualifier.getInt(); }
+
+ /// \brief The template declaration that this qualified name refers
+ /// to.
+ TemplateDecl *getDecl() const { return Template; }
+
+ /// \brief The template declaration to which this qualified name
+ /// refers.
+ TemplateDecl *getTemplateDecl() const { return Template; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getQualifier(), hasTemplateKeyword(), getTemplateDecl());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *NNS,
+ bool TemplateKeyword, TemplateDecl *Template) {
+ ID.AddPointer(NNS);
+ ID.AddBoolean(TemplateKeyword);
+ ID.AddPointer(Template);
+ }
+};
+
+/// \brief Represents a dependent template name that cannot be
+/// resolved prior to template instantiation.
+///
+/// This kind of template name refers to a dependent template name,
+/// including its nested name specifier (if any). For example,
+/// DependentTemplateName can refer to "MetaFun::template apply",
+/// where "MetaFun::" is the nested name specifier and "apply" is the
+/// template name referenced. The "template" keyword is implied.
+class DependentTemplateName : public llvm::FoldingSetNode {
+ /// \brief The nested name specifier that qualifies the template
+ /// name.
+ ///
+ /// The bit stored in this qualifier describes whether the \c Name field
+ /// is interpreted as an IdentifierInfo pointer (when clear) or as an
+ /// overloaded operator kind (when set).
+ llvm::PointerIntPair<NestedNameSpecifier *, 1, bool> Qualifier;
+
+ /// \brief The dependent template name.
+ union {
+ /// \brief The identifier template name.
+ ///
+ /// Only valid when the bit on \c Qualifier is clear.
+ const IdentifierInfo *Identifier;
+
+ /// \brief The overloaded operator name.
+ ///
+ /// Only valid when the bit on \c Qualifier is set.
+ OverloadedOperatorKind Operator;
+ };
+
+ /// \brief The canonical template name to which this dependent
+ /// template name refers.
+ ///
+ /// The canonical template name for a dependent template name is
+ /// another dependent template name whose nested name specifier is
+ /// canonical.
+ TemplateName CanonicalTemplateName;
+
+ friend class ASTContext;
+
+ DependentTemplateName(NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Identifier)
+ : Qualifier(Qualifier, false), Identifier(Identifier),
+ CanonicalTemplateName(this) { }
+
+ DependentTemplateName(NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Identifier,
+ TemplateName Canon)
+ : Qualifier(Qualifier, false), Identifier(Identifier),
+ CanonicalTemplateName(Canon) { }
+
+ DependentTemplateName(NestedNameSpecifier *Qualifier,
+ OverloadedOperatorKind Operator)
+ : Qualifier(Qualifier, true), Operator(Operator),
+ CanonicalTemplateName(this) { }
+
+ DependentTemplateName(NestedNameSpecifier *Qualifier,
+ OverloadedOperatorKind Operator,
+ TemplateName Canon)
+ : Qualifier(Qualifier, true), Operator(Operator),
+ CanonicalTemplateName(Canon) { }
+
+public:
+ /// \brief Return the nested name specifier that qualifies this name.
+ NestedNameSpecifier *getQualifier() const { return Qualifier.getPointer(); }
+
+ /// \brief Determine whether this template name refers to an identifier.
+ bool isIdentifier() const { return !Qualifier.getInt(); }
+
+ /// \brief Returns the identifier to which this template name refers.
+ const IdentifierInfo *getIdentifier() const {
+ assert(isIdentifier() && "Template name isn't an identifier?");
+ return Identifier;
+ }
+
+ /// \brief Determine whether this template name refers to an overloaded
+ /// operator.
+ bool isOverloadedOperator() const { return Qualifier.getInt(); }
+
+ /// \brief Return the overloaded operator to which this template name refers.
+ OverloadedOperatorKind getOperator() const {
+ assert(isOverloadedOperator() &&
+ "Template name isn't an overloaded operator?");
+ return Operator;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ if (isIdentifier())
+ Profile(ID, getQualifier(), getIdentifier());
+ else
+ Profile(ID, getQualifier(), getOperator());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *NNS,
+ const IdentifierInfo *Identifier) {
+ ID.AddPointer(NNS);
+ ID.AddBoolean(false);
+ ID.AddPointer(Identifier);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, NestedNameSpecifier *NNS,
+ OverloadedOperatorKind Operator) {
+ ID.AddPointer(NNS);
+ ID.AddBoolean(true);
+ ID.AddInteger(Operator);
+ }
+};
+
+} // end namespace clang.
+
+namespace llvm {
+
+/// \brief The clang::TemplateName class is effectively a pointer.
+template<>
+class PointerLikeTypeTraits<clang::TemplateName> {
+public:
+ static inline void *getAsVoidPointer(clang::TemplateName TN) {
+ return TN.getAsVoidPointer();
+ }
+
+ static inline clang::TemplateName getFromVoidPointer(void *Ptr) {
+ return clang::TemplateName::getFromVoidPointer(Ptr);
+ }
+
+ // No bits are available!
+ enum { NumLowBitsAvailable = 0 };
+};
+
+} // end namespace llvm.
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/Type.h b/contrib/llvm/tools/clang/include/clang/AST/Type.h
new file mode 100644
index 0000000..7bd367c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/Type.h
@@ -0,0 +1,4992 @@
+//===--- Type.h - C Language Family Type Representation ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Type interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_TYPE_H
+#define LLVM_CLANG_AST_TYPE_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Linkage.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/Visibility.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/TemplateName.h"
+#include "llvm/Support/type_traits.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+ enum {
+ TypeAlignmentInBits = 4,
+ TypeAlignment = 1 << TypeAlignmentInBits
+ };
+ class Type;
+ class ExtQuals;
+ class QualType;
+}
+
+namespace llvm {
+ template <typename T>
+ class PointerLikeTypeTraits;
+ template<>
+ class PointerLikeTypeTraits< ::clang::Type*> {
+ public:
+ static inline void *getAsVoidPointer(::clang::Type *P) { return P; }
+ static inline ::clang::Type *getFromVoidPointer(void *P) {
+ return static_cast< ::clang::Type*>(P);
+ }
+ enum { NumLowBitsAvailable = clang::TypeAlignmentInBits };
+ };
+ template<>
+ class PointerLikeTypeTraits< ::clang::ExtQuals*> {
+ public:
+ static inline void *getAsVoidPointer(::clang::ExtQuals *P) { return P; }
+ static inline ::clang::ExtQuals *getFromVoidPointer(void *P) {
+ return static_cast< ::clang::ExtQuals*>(P);
+ }
+ enum { NumLowBitsAvailable = clang::TypeAlignmentInBits };
+ };
+
+ template <>
+ struct isPodLike<clang::QualType> { static const bool value = true; };
+}
+
+namespace clang {
+ class ASTContext;
+ class TypedefNameDecl;
+ class TemplateDecl;
+ class TemplateTypeParmDecl;
+ class NonTypeTemplateParmDecl;
+ class TemplateTemplateParmDecl;
+ class TagDecl;
+ class RecordDecl;
+ class CXXRecordDecl;
+ class EnumDecl;
+ class FieldDecl;
+ class ObjCInterfaceDecl;
+ class ObjCProtocolDecl;
+ class ObjCMethodDecl;
+ class UnresolvedUsingTypenameDecl;
+ class Expr;
+ class Stmt;
+ class SourceLocation;
+ class StmtIteratorBase;
+ class TemplateArgument;
+ class TemplateArgumentLoc;
+ class TemplateArgumentListInfo;
+ class ElaboratedType;
+ class ExtQuals;
+ class ExtQualsTypeCommonBase;
+ struct PrintingPolicy;
+
+ template <typename> class CanQual;
+ typedef CanQual<Type> CanQualType;
+
+ // Provide forward declarations for all of the *Type classes
+#define TYPE(Class, Base) class Class##Type;
+#include "clang/AST/TypeNodes.def"
+
+/// Qualifiers - The collection of all-type qualifiers we support.
+/// Clang supports five independent qualifiers:
+/// * C99: const, volatile, and restrict
+/// * Embedded C (TR18037): address spaces
+/// * Objective C: the GC attributes (none, weak, or strong)
+class Qualifiers {
+public:
+ enum TQ { // NOTE: These flags must be kept in sync with DeclSpec::TQ.
+ Const = 0x1,
+ Restrict = 0x2,
+ Volatile = 0x4,
+ CVRMask = Const | Volatile | Restrict
+ };
+
+ enum GC {
+ GCNone = 0,
+ Weak,
+ Strong
+ };
+
+ enum ObjCLifetime {
+ /// There is no lifetime qualification on this type.
+ OCL_None,
+
+ /// This object can be modified without requiring retains or
+ /// releases.
+ OCL_ExplicitNone,
+
+ /// Assigning into this object requires the old value to be
+ /// released and the new value to be retained. The timing of the
+ /// release of the old value is inexact: it may be moved to
+ /// immediately after the last known point where the value is
+ /// live.
+ OCL_Strong,
+
+ /// Reading or writing from this object requires a barrier call.
+ OCL_Weak,
+
+ /// Assigning into this object requires a lifetime extension.
+ OCL_Autoreleasing
+ };
+
+ enum {
+ /// The maximum supported address space number.
+ /// 24 bits should be enough for anyone.
+ MaxAddressSpace = 0xffffffu,
+
+ /// The width of the "fast" qualifier mask.
+ FastWidth = 3,
+
+ /// The fast qualifier mask.
+ FastMask = (1 << FastWidth) - 1
+ };
+
+ Qualifiers() : Mask(0) {}
+
+ static Qualifiers fromFastMask(unsigned Mask) {
+ Qualifiers Qs;
+ Qs.addFastQualifiers(Mask);
+ return Qs;
+ }
+
+ static Qualifiers fromCVRMask(unsigned CVR) {
+ Qualifiers Qs;
+ Qs.addCVRQualifiers(CVR);
+ return Qs;
+ }
+
+ // Deserialize qualifiers from an opaque representation.
+ static Qualifiers fromOpaqueValue(unsigned opaque) {
+ Qualifiers Qs;
+ Qs.Mask = opaque;
+ return Qs;
+ }
+
+ // Serialize these qualifiers into an opaque representation.
+ unsigned getAsOpaqueValue() const {
+ return Mask;
+ }
+
+ bool hasConst() const { return Mask & Const; }
+ void setConst(bool flag) {
+ Mask = (Mask & ~Const) | (flag ? Const : 0);
+ }
+ void removeConst() { Mask &= ~Const; }
+ void addConst() { Mask |= Const; }
+
+ bool hasVolatile() const { return Mask & Volatile; }
+ void setVolatile(bool flag) {
+ Mask = (Mask & ~Volatile) | (flag ? Volatile : 0);
+ }
+ void removeVolatile() { Mask &= ~Volatile; }
+ void addVolatile() { Mask |= Volatile; }
+
+ bool hasRestrict() const { return Mask & Restrict; }
+ void setRestrict(bool flag) {
+ Mask = (Mask & ~Restrict) | (flag ? Restrict : 0);
+ }
+ void removeRestrict() { Mask &= ~Restrict; }
+ void addRestrict() { Mask |= Restrict; }
+
+ bool hasCVRQualifiers() const { return getCVRQualifiers(); }
+ unsigned getCVRQualifiers() const { return Mask & CVRMask; }
+ void setCVRQualifiers(unsigned mask) {
+ assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
+ Mask = (Mask & ~CVRMask) | mask;
+ }
+ void removeCVRQualifiers(unsigned mask) {
+ assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
+ Mask &= ~mask;
+ }
+ void removeCVRQualifiers() {
+ removeCVRQualifiers(CVRMask);
+ }
+ void addCVRQualifiers(unsigned mask) {
+ assert(!(mask & ~CVRMask) && "bitmask contains non-CVR bits");
+ Mask |= mask;
+ }
+
+ bool hasObjCGCAttr() const { return Mask & GCAttrMask; }
+ GC getObjCGCAttr() const { return GC((Mask & GCAttrMask) >> GCAttrShift); }
+ void setObjCGCAttr(GC type) {
+ Mask = (Mask & ~GCAttrMask) | (type << GCAttrShift);
+ }
+ void removeObjCGCAttr() { setObjCGCAttr(GCNone); }
+ void addObjCGCAttr(GC type) {
+ assert(type);
+ setObjCGCAttr(type);
+ }
+ Qualifiers withoutObjCGCAttr() const {
+ Qualifiers qs = *this;
+ qs.removeObjCGCAttr();
+ return qs;
+ }
+ Qualifiers withoutObjCLifetime() const {
+ Qualifiers qs = *this;
+ qs.removeObjCLifetime();
+ return qs;
+ }
+
+ bool hasObjCLifetime() const { return Mask & LifetimeMask; }
+ ObjCLifetime getObjCLifetime() const {
+ return ObjCLifetime((Mask & LifetimeMask) >> LifetimeShift);
+ }
+ void setObjCLifetime(ObjCLifetime type) {
+ Mask = (Mask & ~LifetimeMask) | (type << LifetimeShift);
+ }
+ void removeObjCLifetime() { setObjCLifetime(OCL_None); }
+ void addObjCLifetime(ObjCLifetime type) {
+ assert(type);
+ assert(!hasObjCLifetime());
+ Mask |= (type << LifetimeShift);
+ }
+
+ /// True if the lifetime is neither None or ExplicitNone.
+ bool hasNonTrivialObjCLifetime() const {
+ ObjCLifetime lifetime = getObjCLifetime();
+ return (lifetime > OCL_ExplicitNone);
+ }
+
+ /// True if the lifetime is either strong or weak.
+ bool hasStrongOrWeakObjCLifetime() const {
+ ObjCLifetime lifetime = getObjCLifetime();
+ return (lifetime == OCL_Strong || lifetime == OCL_Weak);
+ }
+
+ bool hasAddressSpace() const { return Mask & AddressSpaceMask; }
+ unsigned getAddressSpace() const { return Mask >> AddressSpaceShift; }
+ void setAddressSpace(unsigned space) {
+ assert(space <= MaxAddressSpace);
+ Mask = (Mask & ~AddressSpaceMask)
+ | (((uint32_t) space) << AddressSpaceShift);
+ }
+ void removeAddressSpace() { setAddressSpace(0); }
+ void addAddressSpace(unsigned space) {
+ assert(space);
+ setAddressSpace(space);
+ }
+
+ // Fast qualifiers are those that can be allocated directly
+ // on a QualType object.
+ bool hasFastQualifiers() const { return getFastQualifiers(); }
+ unsigned getFastQualifiers() const { return Mask & FastMask; }
+ void setFastQualifiers(unsigned mask) {
+ assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
+ Mask = (Mask & ~FastMask) | mask;
+ }
+ void removeFastQualifiers(unsigned mask) {
+ assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
+ Mask &= ~mask;
+ }
+ void removeFastQualifiers() {
+ removeFastQualifiers(FastMask);
+ }
+ void addFastQualifiers(unsigned mask) {
+ assert(!(mask & ~FastMask) && "bitmask contains non-fast qualifier bits");
+ Mask |= mask;
+ }
+
+ /// hasNonFastQualifiers - Return true if the set contains any
+ /// qualifiers which require an ExtQuals node to be allocated.
+ bool hasNonFastQualifiers() const { return Mask & ~FastMask; }
+ Qualifiers getNonFastQualifiers() const {
+ Qualifiers Quals = *this;
+ Quals.setFastQualifiers(0);
+ return Quals;
+ }
+
+ /// hasQualifiers - Return true if the set contains any qualifiers.
+ bool hasQualifiers() const { return Mask; }
+ bool empty() const { return !Mask; }
+
+ /// \brief Add the qualifiers from the given set to this set.
+ void addQualifiers(Qualifiers Q) {
+ // If the other set doesn't have any non-boolean qualifiers, just
+ // bit-or it in.
+ if (!(Q.Mask & ~CVRMask))
+ Mask |= Q.Mask;
+ else {
+ Mask |= (Q.Mask & CVRMask);
+ if (Q.hasAddressSpace())
+ addAddressSpace(Q.getAddressSpace());
+ if (Q.hasObjCGCAttr())
+ addObjCGCAttr(Q.getObjCGCAttr());
+ if (Q.hasObjCLifetime())
+ addObjCLifetime(Q.getObjCLifetime());
+ }
+ }
+
+ /// \brief Add the qualifiers from the given set to this set, given that
+ /// they don't conflict.
+ void addConsistentQualifiers(Qualifiers qs) {
+ assert(getAddressSpace() == qs.getAddressSpace() ||
+ !hasAddressSpace() || !qs.hasAddressSpace());
+ assert(getObjCGCAttr() == qs.getObjCGCAttr() ||
+ !hasObjCGCAttr() || !qs.hasObjCGCAttr());
+ assert(getObjCLifetime() == qs.getObjCLifetime() ||
+ !hasObjCLifetime() || !qs.hasObjCLifetime());
+ Mask |= qs.Mask;
+ }
+
+ /// \brief Determines if these qualifiers compatibly include another set.
+ /// Generally this answers the question of whether an object with the other
+ /// qualifiers can be safely used as an object with these qualifiers.
+ bool compatiblyIncludes(Qualifiers other) const {
+ return
+ // Address spaces must match exactly.
+ getAddressSpace() == other.getAddressSpace() &&
+ // ObjC GC qualifiers can match, be added, or be removed, but can't be
+ // changed.
+ (getObjCGCAttr() == other.getObjCGCAttr() ||
+ !hasObjCGCAttr() || !other.hasObjCGCAttr()) &&
+ // ObjC lifetime qualifiers must match exactly.
+ getObjCLifetime() == other.getObjCLifetime() &&
+ // CVR qualifiers may subset.
+ (((Mask & CVRMask) | (other.Mask & CVRMask)) == (Mask & CVRMask));
+ }
+
+ /// \brief Determines if these qualifiers compatibly include another set of
+ /// qualifiers from the narrow perspective of Objective-C ARC lifetime.
+ ///
+ /// One set of Objective-C lifetime qualifiers compatibly includes the other
+ /// if the lifetime qualifiers match, or if both are non-__weak and the
+ /// including set also contains the 'const' qualifier.
+ bool compatiblyIncludesObjCLifetime(Qualifiers other) const {
+ if (getObjCLifetime() == other.getObjCLifetime())
+ return true;
+
+ if (getObjCLifetime() == OCL_Weak || other.getObjCLifetime() == OCL_Weak)
+ return false;
+
+ return hasConst();
+ }
+
+ bool isSupersetOf(Qualifiers Other) const;
+
+ /// \brief Determine whether this set of qualifiers is a strict superset of
+ /// another set of qualifiers, not considering qualifier compatibility.
+ bool isStrictSupersetOf(Qualifiers Other) const;
+
+ bool operator==(Qualifiers Other) const { return Mask == Other.Mask; }
+ bool operator!=(Qualifiers Other) const { return Mask != Other.Mask; }
+
+ operator bool() const { return hasQualifiers(); }
+
+ Qualifiers &operator+=(Qualifiers R) {
+ addQualifiers(R);
+ return *this;
+ }
+
+ // Union two qualifier sets. If an enumerated qualifier appears
+ // in both sets, use the one from the right.
+ friend Qualifiers operator+(Qualifiers L, Qualifiers R) {
+ L += R;
+ return L;
+ }
+
+ Qualifiers &operator-=(Qualifiers R) {
+ Mask = Mask & ~(R.Mask);
+ return *this;
+ }
+
+ /// \brief Compute the difference between two qualifier sets.
+ friend Qualifiers operator-(Qualifiers L, Qualifiers R) {
+ L -= R;
+ return L;
+ }
+
+ std::string getAsString() const;
+ std::string getAsString(const PrintingPolicy &Policy) const {
+ std::string Buffer;
+ getAsStringInternal(Buffer, Policy);
+ return Buffer;
+ }
+ void getAsStringInternal(std::string &S, const PrintingPolicy &Policy) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Mask);
+ }
+
+private:
+
+ // bits: |0 1 2|3 .. 4|5 .. 7|8 ... 31|
+ // |C R V|GCAttr|Lifetime|AddressSpace|
+ uint32_t Mask;
+
+ static const uint32_t GCAttrMask = 0x18;
+ static const uint32_t GCAttrShift = 3;
+ static const uint32_t LifetimeMask = 0xE0;
+ static const uint32_t LifetimeShift = 5;
+ static const uint32_t AddressSpaceMask = ~(CVRMask|GCAttrMask|LifetimeMask);
+ static const uint32_t AddressSpaceShift = 8;
+};
+
+/// CallingConv - Specifies the calling convention that a function uses.
+enum CallingConv {
+ CC_Default,
+ CC_C, // __attribute__((cdecl))
+ CC_X86StdCall, // __attribute__((stdcall))
+ CC_X86FastCall, // __attribute__((fastcall))
+ CC_X86ThisCall, // __attribute__((thiscall))
+ CC_X86Pascal, // __attribute__((pascal))
+ CC_AAPCS, // __attribute__((pcs("aapcs")))
+ CC_AAPCS_VFP // __attribute__((pcs("aapcs-vfp")))
+};
+
+/// A std::pair-like structure for storing a qualified type split
+/// into its local qualifiers and its locally-unqualified type.
+struct SplitQualType {
+ /// The locally-unqualified type.
+ const Type *Ty;
+
+ /// The local qualifiers.
+ Qualifiers Quals;
+
+ SplitQualType() : Ty(0), Quals() {}
+ SplitQualType(const Type *ty, Qualifiers qs) : Ty(ty), Quals(qs) {}
+
+ SplitQualType getSingleStepDesugaredType() const; // end of this file
+
+ // Make llvm::tie work.
+ operator std::pair<const Type *,Qualifiers>() const {
+ return std::pair<const Type *,Qualifiers>(Ty, Quals);
+ }
+
+ friend bool operator==(SplitQualType a, SplitQualType b) {
+ return a.Ty == b.Ty && a.Quals == b.Quals;
+ }
+ friend bool operator!=(SplitQualType a, SplitQualType b) {
+ return a.Ty != b.Ty || a.Quals != b.Quals;
+ }
+};
+
+/// QualType - For efficiency, we don't store CV-qualified types as nodes on
+/// their own: instead each reference to a type stores the qualifiers. This
+/// greatly reduces the number of nodes we need to allocate for types (for
+/// example we only need one for 'int', 'const int', 'volatile int',
+/// 'const volatile int', etc).
+///
+/// As an added efficiency bonus, instead of making this a pair, we
+/// just store the two bits we care about in the low bits of the
+/// pointer. To handle the packing/unpacking, we make QualType be a
+/// simple wrapper class that acts like a smart pointer. A third bit
+/// indicates whether there are extended qualifiers present, in which
+/// case the pointer points to a special structure.
+class QualType {
+ // Thankfully, these are efficiently composable.
+ llvm::PointerIntPair<llvm::PointerUnion<const Type*,const ExtQuals*>,
+ Qualifiers::FastWidth> Value;
+
+ const ExtQuals *getExtQualsUnsafe() const {
+ return Value.getPointer().get<const ExtQuals*>();
+ }
+
+ const Type *getTypePtrUnsafe() const {
+ return Value.getPointer().get<const Type*>();
+ }
+
+ const ExtQualsTypeCommonBase *getCommonPtr() const {
+ assert(!isNull() && "Cannot retrieve a NULL type pointer");
+ uintptr_t CommonPtrVal
+ = reinterpret_cast<uintptr_t>(Value.getOpaqueValue());
+ CommonPtrVal &= ~(uintptr_t)((1 << TypeAlignmentInBits) - 1);
+ return reinterpret_cast<ExtQualsTypeCommonBase*>(CommonPtrVal);
+ }
+
+ friend class QualifierCollector;
+public:
+ QualType() {}
+
+ QualType(const Type *Ptr, unsigned Quals)
+ : Value(Ptr, Quals) {}
+ QualType(const ExtQuals *Ptr, unsigned Quals)
+ : Value(Ptr, Quals) {}
+
+ unsigned getLocalFastQualifiers() const { return Value.getInt(); }
+ void setLocalFastQualifiers(unsigned Quals) { Value.setInt(Quals); }
+
+ /// Retrieves a pointer to the underlying (unqualified) type.
+ /// This should really return a const Type, but it's not worth
+ /// changing all the users right now.
+ ///
+ /// This function requires that the type not be NULL. If the type might be
+ /// NULL, use the (slightly less efficient) \c getTypePtrOrNull().
+ const Type *getTypePtr() const;
+
+ const Type *getTypePtrOrNull() const;
+
+ /// Retrieves a pointer to the name of the base type.
+ const IdentifierInfo *getBaseTypeIdentifier() const;
+
+ /// Divides a QualType into its unqualified type and a set of local
+ /// qualifiers.
+ SplitQualType split() const;
+
+ void *getAsOpaquePtr() const { return Value.getOpaqueValue(); }
+ static QualType getFromOpaquePtr(const void *Ptr) {
+ QualType T;
+ T.Value.setFromOpaqueValue(const_cast<void*>(Ptr));
+ return T;
+ }
+
+ const Type &operator*() const {
+ return *getTypePtr();
+ }
+
+ const Type *operator->() const {
+ return getTypePtr();
+ }
+
+ bool isCanonical() const;
+ bool isCanonicalAsParam() const;
+
+ /// isNull - Return true if this QualType doesn't point to a type yet.
+ bool isNull() const {
+ return Value.getPointer().isNull();
+ }
+
+ /// \brief Determine whether this particular QualType instance has the
+ /// "const" qualifier set, without looking through typedefs that may have
+ /// added "const" at a different level.
+ bool isLocalConstQualified() const {
+ return (getLocalFastQualifiers() & Qualifiers::Const);
+ }
+
+ /// \brief Determine whether this type is const-qualified.
+ bool isConstQualified() const;
+
+ /// \brief Determine whether this particular QualType instance has the
+ /// "restrict" qualifier set, without looking through typedefs that may have
+ /// added "restrict" at a different level.
+ bool isLocalRestrictQualified() const {
+ return (getLocalFastQualifiers() & Qualifiers::Restrict);
+ }
+
+ /// \brief Determine whether this type is restrict-qualified.
+ bool isRestrictQualified() const;
+
+ /// \brief Determine whether this particular QualType instance has the
+ /// "volatile" qualifier set, without looking through typedefs that may have
+ /// added "volatile" at a different level.
+ bool isLocalVolatileQualified() const {
+ return (getLocalFastQualifiers() & Qualifiers::Volatile);
+ }
+
+ /// \brief Determine whether this type is volatile-qualified.
+ bool isVolatileQualified() const;
+
+ /// \brief Determine whether this particular QualType instance has any
+ /// qualifiers, without looking through any typedefs that might add
+ /// qualifiers at a different level.
+ bool hasLocalQualifiers() const {
+ return getLocalFastQualifiers() || hasLocalNonFastQualifiers();
+ }
+
+ /// \brief Determine whether this type has any qualifiers.
+ bool hasQualifiers() const;
+
+ /// \brief Determine whether this particular QualType instance has any
+ /// "non-fast" qualifiers, e.g., those that are stored in an ExtQualType
+ /// instance.
+ bool hasLocalNonFastQualifiers() const {
+ return Value.getPointer().is<const ExtQuals*>();
+ }
+
+ /// \brief Retrieve the set of qualifiers local to this particular QualType
+ /// instance, not including any qualifiers acquired through typedefs or
+ /// other sugar.
+ Qualifiers getLocalQualifiers() const;
+
+ /// \brief Retrieve the set of qualifiers applied to this type.
+ Qualifiers getQualifiers() const;
+
+ /// \brief Retrieve the set of CVR (const-volatile-restrict) qualifiers
+ /// local to this particular QualType instance, not including any qualifiers
+ /// acquired through typedefs or other sugar.
+ unsigned getLocalCVRQualifiers() const {
+ return getLocalFastQualifiers();
+ }
+
+ /// \brief Retrieve the set of CVR (const-volatile-restrict) qualifiers
+ /// applied to this type.
+ unsigned getCVRQualifiers() const;
+
+ bool isConstant(ASTContext& Ctx) const {
+ return QualType::isConstant(*this, Ctx);
+ }
+
+ /// \brief Determine whether this is a Plain Old Data (POD) type (C++ 3.9p10).
+ bool isPODType(ASTContext &Context) const;
+
+ /// isCXX11PODType() - Return true if this is a POD type according to the
+ /// more relaxed rules of the C++11 standard, regardless of the current
+ /// compilation's language.
+ /// (C++0x [basic.types]p9)
+ bool isCXX11PODType(ASTContext &Context) const;
+
+ /// isTrivialType - Return true if this is a trivial type
+ /// (C++0x [basic.types]p9)
+ bool isTrivialType(ASTContext &Context) const;
+
+ /// isTriviallyCopyableType - Return true if this is a trivially
+ /// copyable type (C++0x [basic.types]p9)
+ bool isTriviallyCopyableType(ASTContext &Context) const;
+
+ // Don't promise in the API that anything besides 'const' can be
+ // easily added.
+
+ /// addConst - add the specified type qualifier to this QualType.
+ void addConst() {
+ addFastQualifiers(Qualifiers::Const);
+ }
+ QualType withConst() const {
+ return withFastQualifiers(Qualifiers::Const);
+ }
+
+ /// addVolatile - add the specified type qualifier to this QualType.
+ void addVolatile() {
+ addFastQualifiers(Qualifiers::Volatile);
+ }
+ QualType withVolatile() const {
+ return withFastQualifiers(Qualifiers::Volatile);
+ }
+
+ /// Add the restrict qualifier to this QualType.
+ void addRestrict() {
+ addFastQualifiers(Qualifiers::Restrict);
+ }
+ QualType withRestrict() const {
+ return withFastQualifiers(Qualifiers::Restrict);
+ }
+
+ QualType withCVRQualifiers(unsigned CVR) const {
+ return withFastQualifiers(CVR);
+ }
+
+ void addFastQualifiers(unsigned TQs) {
+ assert(!(TQs & ~Qualifiers::FastMask)
+ && "non-fast qualifier bits set in mask!");
+ Value.setInt(Value.getInt() | TQs);
+ }
+
+ void removeLocalConst();
+ void removeLocalVolatile();
+ void removeLocalRestrict();
+ void removeLocalCVRQualifiers(unsigned Mask);
+
+ void removeLocalFastQualifiers() { Value.setInt(0); }
+ void removeLocalFastQualifiers(unsigned Mask) {
+ assert(!(Mask & ~Qualifiers::FastMask) && "mask has non-fast qualifiers");
+ Value.setInt(Value.getInt() & ~Mask);
+ }
+
+ // Creates a type with the given qualifiers in addition to any
+ // qualifiers already on this type.
+ QualType withFastQualifiers(unsigned TQs) const {
+ QualType T = *this;
+ T.addFastQualifiers(TQs);
+ return T;
+ }
+
+ // Creates a type with exactly the given fast qualifiers, removing
+ // any existing fast qualifiers.
+ QualType withExactLocalFastQualifiers(unsigned TQs) const {
+ return withoutLocalFastQualifiers().withFastQualifiers(TQs);
+ }
+
+ // Removes fast qualifiers, but leaves any extended qualifiers in place.
+ QualType withoutLocalFastQualifiers() const {
+ QualType T = *this;
+ T.removeLocalFastQualifiers();
+ return T;
+ }
+
+ QualType getCanonicalType() const;
+
+ /// \brief Return this type with all of the instance-specific qualifiers
+ /// removed, but without removing any qualifiers that may have been applied
+ /// through typedefs.
+ QualType getLocalUnqualifiedType() const { return QualType(getTypePtr(), 0); }
+
+ /// \brief Retrieve the unqualified variant of the given type,
+ /// removing as little sugar as possible.
+ ///
+ /// This routine looks through various kinds of sugar to find the
+ /// least-desugared type that is unqualified. For example, given:
+ ///
+ /// \code
+ /// typedef int Integer;
+ /// typedef const Integer CInteger;
+ /// typedef CInteger DifferenceType;
+ /// \endcode
+ ///
+ /// Executing \c getUnqualifiedType() on the type \c DifferenceType will
+ /// desugar until we hit the type \c Integer, which has no qualifiers on it.
+ ///
+ /// The resulting type might still be qualified if it's an array
+ /// type. To strip qualifiers even from within an array type, use
+ /// ASTContext::getUnqualifiedArrayType.
+ inline QualType getUnqualifiedType() const;
+
+ /// getSplitUnqualifiedType - Retrieve the unqualified variant of the
+ /// given type, removing as little sugar as possible.
+ ///
+ /// Like getUnqualifiedType(), but also returns the set of
+ /// qualifiers that were built up.
+ ///
+ /// The resulting type might still be qualified if it's an array
+ /// type. To strip qualifiers even from within an array type, use
+ /// ASTContext::getUnqualifiedArrayType.
+ inline SplitQualType getSplitUnqualifiedType() const;
+
+ /// \brief Determine whether this type is more qualified than the other
+ /// given type, requiring exact equality for non-CVR qualifiers.
+ bool isMoreQualifiedThan(QualType Other) const;
+
+ /// \brief Determine whether this type is at least as qualified as the other
+ /// given type, requiring exact equality for non-CVR qualifiers.
+ bool isAtLeastAsQualifiedAs(QualType Other) const;
+
+ QualType getNonReferenceType() const;
+
+ /// \brief Determine the type of a (typically non-lvalue) expression with the
+ /// specified result type.
+ ///
+ /// This routine should be used for expressions for which the return type is
+ /// explicitly specified (e.g., in a cast or call) and isn't necessarily
+ /// an lvalue. It removes a top-level reference (since there are no
+ /// expressions of reference type) and deletes top-level cvr-qualifiers
+ /// from non-class types (in C++) or all types (in C).
+ QualType getNonLValueExprType(ASTContext &Context) const;
+
+ /// getDesugaredType - Return the specified type with any "sugar" removed from
+ /// the type. This takes off typedefs, typeof's etc. If the outer level of
+ /// the type is already concrete, it returns it unmodified. This is similar
+ /// to getting the canonical type, but it doesn't remove *all* typedefs. For
+ /// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
+ /// concrete.
+ ///
+ /// Qualifiers are left in place.
+ QualType getDesugaredType(const ASTContext &Context) const {
+ return getDesugaredType(*this, Context);
+ }
+
+ SplitQualType getSplitDesugaredType() const {
+ return getSplitDesugaredType(*this);
+ }
+
+ /// \brief Return the specified type with one level of "sugar" removed from
+ /// the type.
+ ///
+ /// This routine takes off the first typedef, typeof, etc. If the outer level
+ /// of the type is already concrete, it returns it unmodified.
+ QualType getSingleStepDesugaredType(const ASTContext &Context) const {
+ return getSingleStepDesugaredTypeImpl(*this, Context);
+ }
+
+ /// IgnoreParens - Returns the specified type after dropping any
+ /// outer-level parentheses.
+ QualType IgnoreParens() const {
+ if (isa<ParenType>(*this))
+ return QualType::IgnoreParens(*this);
+ return *this;
+ }
+
+ /// operator==/!= - Indicate whether the specified types and qualifiers are
+ /// identical.
+ friend bool operator==(const QualType &LHS, const QualType &RHS) {
+ return LHS.Value == RHS.Value;
+ }
+ friend bool operator!=(const QualType &LHS, const QualType &RHS) {
+ return LHS.Value != RHS.Value;
+ }
+ std::string getAsString() const {
+ return getAsString(split());
+ }
+ static std::string getAsString(SplitQualType split) {
+ return getAsString(split.Ty, split.Quals);
+ }
+ static std::string getAsString(const Type *ty, Qualifiers qs);
+
+ std::string getAsString(const PrintingPolicy &Policy) const {
+ std::string S;
+ getAsStringInternal(S, Policy);
+ return S;
+ }
+ void getAsStringInternal(std::string &Str,
+ const PrintingPolicy &Policy) const {
+ return getAsStringInternal(split(), Str, Policy);
+ }
+ static void getAsStringInternal(SplitQualType split, std::string &out,
+ const PrintingPolicy &policy) {
+ return getAsStringInternal(split.Ty, split.Quals, out, policy);
+ }
+ static void getAsStringInternal(const Type *ty, Qualifiers qs,
+ std::string &out,
+ const PrintingPolicy &policy);
+
+ void dump(const char *s) const;
+ void dump() const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(getAsOpaquePtr());
+ }
+
+ /// getAddressSpace - Return the address space of this type.
+ inline unsigned getAddressSpace() const;
+
+ /// getObjCGCAttr - Returns gc attribute of this type.
+ inline Qualifiers::GC getObjCGCAttr() const;
+
+ /// isObjCGCWeak true when Type is objc's weak.
+ bool isObjCGCWeak() const {
+ return getObjCGCAttr() == Qualifiers::Weak;
+ }
+
+ /// isObjCGCStrong true when Type is objc's strong.
+ bool isObjCGCStrong() const {
+ return getObjCGCAttr() == Qualifiers::Strong;
+ }
+
+ /// getObjCLifetime - Returns lifetime attribute of this type.
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return getQualifiers().getObjCLifetime();
+ }
+
+ bool hasNonTrivialObjCLifetime() const {
+ return getQualifiers().hasNonTrivialObjCLifetime();
+ }
+
+ bool hasStrongOrWeakObjCLifetime() const {
+ return getQualifiers().hasStrongOrWeakObjCLifetime();
+ }
+
+ enum DestructionKind {
+ DK_none,
+ DK_cxx_destructor,
+ DK_objc_strong_lifetime,
+ DK_objc_weak_lifetime
+ };
+
+ /// isDestructedType - nonzero if objects of this type require
+ /// non-trivial work to clean up after. Non-zero because it's
+ /// conceivable that qualifiers (objc_gc(weak)?) could make
+ /// something require destruction.
+ DestructionKind isDestructedType() const {
+ return isDestructedTypeImpl(*this);
+ }
+
+ /// \brief Determine whether expressions of the given type are forbidden
+ /// from being lvalues in C.
+ ///
+ /// The expression types that are forbidden to be lvalues are:
+ /// - 'void', but not qualified void
+ /// - function types
+ ///
+ /// The exact rule here is C99 6.3.2.1:
+ /// An lvalue is an expression with an object type or an incomplete
+ /// type other than void.
+ bool isCForbiddenLValueType() const;
+
+ /// \brief Determine whether this type has trivial copy/move-assignment
+ /// semantics.
+ bool hasTrivialAssignment(ASTContext &Context, bool Copying) const;
+
+private:
+ // These methods are implemented in a separate translation unit;
+ // "static"-ize them to avoid creating temporary QualTypes in the
+ // caller.
+ static bool isConstant(QualType T, ASTContext& Ctx);
+ static QualType getDesugaredType(QualType T, const ASTContext &Context);
+ static SplitQualType getSplitDesugaredType(QualType T);
+ static SplitQualType getSplitUnqualifiedTypeImpl(QualType type);
+ static QualType getSingleStepDesugaredTypeImpl(QualType type,
+ const ASTContext &C);
+ static QualType IgnoreParens(QualType T);
+ static DestructionKind isDestructedTypeImpl(QualType type);
+};
+
+} // end clang.
+
+namespace llvm {
+/// Implement simplify_type for QualType, so that we can dyn_cast from QualType
+/// to a specific Type class.
+template<> struct simplify_type<const ::clang::QualType> {
+ typedef const ::clang::Type *SimpleType;
+ static SimpleType getSimplifiedValue(const ::clang::QualType &Val) {
+ return Val.getTypePtr();
+ }
+};
+template<> struct simplify_type< ::clang::QualType>
+ : public simplify_type<const ::clang::QualType> {};
+
+// Teach SmallPtrSet that QualType is "basically a pointer".
+template<>
+class PointerLikeTypeTraits<clang::QualType> {
+public:
+ static inline void *getAsVoidPointer(clang::QualType P) {
+ return P.getAsOpaquePtr();
+ }
+ static inline clang::QualType getFromVoidPointer(void *P) {
+ return clang::QualType::getFromOpaquePtr(P);
+ }
+ // Various qualifiers go in low bits.
+ enum { NumLowBitsAvailable = 0 };
+};
+
+} // end namespace llvm
+
+namespace clang {
+
+/// \brief Base class that is common to both the \c ExtQuals and \c Type
+/// classes, which allows \c QualType to access the common fields between the
+/// two.
+///
+class ExtQualsTypeCommonBase {
+ ExtQualsTypeCommonBase(const Type *baseType, QualType canon)
+ : BaseType(baseType), CanonicalType(canon) {}
+
+ /// \brief The "base" type of an extended qualifiers type (\c ExtQuals) or
+ /// a self-referential pointer (for \c Type).
+ ///
+ /// This pointer allows an efficient mapping from a QualType to its
+ /// underlying type pointer.
+ const Type *const BaseType;
+
+ /// \brief The canonical type of this type. A QualType.
+ QualType CanonicalType;
+
+ friend class QualType;
+ friend class Type;
+ friend class ExtQuals;
+};
+
+/// ExtQuals - We can encode up to four bits in the low bits of a
+/// type pointer, but there are many more type qualifiers that we want
+/// to be able to apply to an arbitrary type. Therefore we have this
+/// struct, intended to be heap-allocated and used by QualType to
+/// store qualifiers.
+///
+/// The current design tags the 'const', 'restrict', and 'volatile' qualifiers
+/// in three low bits on the QualType pointer; a fourth bit records whether
+/// the pointer is an ExtQuals node. The extended qualifiers (address spaces,
+/// Objective-C GC attributes) are much more rare.
+class ExtQuals : public ExtQualsTypeCommonBase, public llvm::FoldingSetNode {
+ // NOTE: changing the fast qualifiers should be straightforward as
+ // long as you don't make 'const' non-fast.
+ // 1. Qualifiers:
+ // a) Modify the bitmasks (Qualifiers::TQ and DeclSpec::TQ).
+ // Fast qualifiers must occupy the low-order bits.
+ // b) Update Qualifiers::FastWidth and FastMask.
+ // 2. QualType:
+ // a) Update is{Volatile,Restrict}Qualified(), defined inline.
+ // b) Update remove{Volatile,Restrict}, defined near the end of
+ // this header.
+ // 3. ASTContext:
+ // a) Update get{Volatile,Restrict}Type.
+
+ /// Quals - the immutable set of qualifiers applied by this
+ /// node; always contains extended qualifiers.
+ Qualifiers Quals;
+
+ ExtQuals *this_() { return this; }
+
+public:
+ ExtQuals(const Type *baseType, QualType canon, Qualifiers quals)
+ : ExtQualsTypeCommonBase(baseType,
+ canon.isNull() ? QualType(this_(), 0) : canon),
+ Quals(quals)
+ {
+ assert(Quals.hasNonFastQualifiers()
+ && "ExtQuals created with no fast qualifiers");
+ assert(!Quals.hasFastQualifiers()
+ && "ExtQuals created with fast qualifiers");
+ }
+
+ Qualifiers getQualifiers() const { return Quals; }
+
+ bool hasObjCGCAttr() const { return Quals.hasObjCGCAttr(); }
+ Qualifiers::GC getObjCGCAttr() const { return Quals.getObjCGCAttr(); }
+
+ bool hasObjCLifetime() const { return Quals.hasObjCLifetime(); }
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
+ bool hasAddressSpace() const { return Quals.hasAddressSpace(); }
+ unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+
+ const Type *getBaseType() const { return BaseType; }
+
+public:
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, getBaseType(), Quals);
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const Type *BaseType,
+ Qualifiers Quals) {
+ assert(!Quals.hasFastQualifiers() && "fast qualifiers in ExtQuals hash!");
+ ID.AddPointer(BaseType);
+ Quals.Profile(ID);
+ }
+};
+
+/// \brief The kind of C++0x ref-qualifier associated with a function type,
+/// which determines whether a member function's "this" object can be an
+/// lvalue, rvalue, or neither.
+enum RefQualifierKind {
+ /// \brief No ref-qualifier was provided.
+ RQ_None = 0,
+ /// \brief An lvalue ref-qualifier was provided (\c &).
+ RQ_LValue,
+ /// \brief An rvalue ref-qualifier was provided (\c &&).
+ RQ_RValue
+};
+
+/// Type - This is the base class of the type hierarchy. A central concept
+/// with types is that each type always has a canonical type. A canonical type
+/// is the type with any typedef names stripped out of it or the types it
+/// references. For example, consider:
+///
+/// typedef int foo;
+/// typedef foo* bar;
+/// 'int *' 'foo *' 'bar'
+///
+/// There will be a Type object created for 'int'. Since int is canonical, its
+/// canonicaltype pointer points to itself. There is also a Type for 'foo' (a
+/// TypedefType). Its CanonicalType pointer points to the 'int' Type. Next
+/// there is a PointerType that represents 'int*', which, like 'int', is
+/// canonical. Finally, there is a PointerType type for 'foo*' whose canonical
+/// type is 'int*', and there is a TypedefType for 'bar', whose canonical type
+/// is also 'int*'.
+///
+/// Non-canonical types are useful for emitting diagnostics, without losing
+/// information about typedefs being used. Canonical types are useful for type
+/// comparisons (they allow by-pointer equality tests) and useful for reasoning
+/// about whether something has a particular form (e.g. is a function type),
+/// because they implicitly, recursively, strip all typedefs out of a type.
+///
+/// Types, once created, are immutable.
+///
+class Type : public ExtQualsTypeCommonBase {
+public:
+ enum TypeClass {
+#define TYPE(Class, Base) Class,
+#define LAST_TYPE(Class) TypeLast = Class,
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ TagFirst = Record, TagLast = Enum
+ };
+
+private:
+ Type(const Type&); // DO NOT IMPLEMENT.
+ void operator=(const Type&); // DO NOT IMPLEMENT.
+
+ /// Bitfields required by the Type class.
+ class TypeBitfields {
+ friend class Type;
+ template <class T> friend class TypePropertyCache;
+
+ /// TypeClass bitfield - Enum that specifies what subclass this belongs to.
+ unsigned TC : 8;
+
+ /// Dependent - Whether this type is a dependent type (C++ [temp.dep.type]).
+ /// Note that this should stay at the end of the ivars for Type so that
+ /// subclasses can pack their bitfields into the same word.
+ unsigned Dependent : 1;
+
+ /// \brief Whether this type somehow involves a template parameter, even
+ /// if the resolution of the type does not depend on a template parameter.
+ unsigned InstantiationDependent : 1;
+
+ /// \brief Whether this type is a variably-modified type (C99 6.7.5).
+ unsigned VariablyModified : 1;
+
+ /// \brief Whether this type contains an unexpanded parameter pack
+ /// (for C++0x variadic templates).
+ unsigned ContainsUnexpandedParameterPack : 1;
+
+ /// \brief Nonzero if the cache (i.e. the bitfields here starting
+ /// with 'Cache') is valid. If so, then this is a
+ /// LangOptions::VisibilityMode+1.
+ mutable unsigned CacheValidAndVisibility : 2;
+
+ /// \brief True if the visibility was set explicitly in the source code.
+ mutable unsigned CachedExplicitVisibility : 1;
+
+ /// \brief Linkage of this type.
+ mutable unsigned CachedLinkage : 2;
+
+ /// \brief Whether this type involves and local or unnamed types.
+ mutable unsigned CachedLocalOrUnnamed : 1;
+
+ /// \brief FromAST - Whether this type comes from an AST file.
+ mutable unsigned FromAST : 1;
+
+ bool isCacheValid() const {
+ return (CacheValidAndVisibility != 0);
+ }
+ Visibility getVisibility() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return static_cast<Visibility>(CacheValidAndVisibility-1);
+ }
+ bool isVisibilityExplicit() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return CachedExplicitVisibility;
+ }
+ Linkage getLinkage() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return static_cast<Linkage>(CachedLinkage);
+ }
+ bool hasLocalOrUnnamedType() const {
+ assert(isCacheValid() && "getting linkage from invalid cache");
+ return CachedLocalOrUnnamed;
+ }
+ };
+ enum { NumTypeBits = 19 };
+
+protected:
+ // These classes allow subclasses to somewhat cleanly pack bitfields
+ // into Type.
+
+ class ArrayTypeBitfields {
+ friend class ArrayType;
+
+ unsigned : NumTypeBits;
+
+ /// IndexTypeQuals - CVR qualifiers from declarations like
+ /// 'int X[static restrict 4]'. For function parameters only.
+ unsigned IndexTypeQuals : 3;
+
+ /// SizeModifier - storage class qualifiers from declarations like
+ /// 'int X[static restrict 4]'. For function parameters only.
+ /// Actually an ArrayType::ArraySizeModifier.
+ unsigned SizeModifier : 3;
+ };
+
+ class BuiltinTypeBitfields {
+ friend class BuiltinType;
+
+ unsigned : NumTypeBits;
+
+ /// The kind (BuiltinType::Kind) of builtin type this is.
+ unsigned Kind : 8;
+ };
+
+ class FunctionTypeBitfields {
+ friend class FunctionType;
+
+ unsigned : NumTypeBits;
+
+ /// Extra information which affects how the function is called, like
+ /// regparm and the calling convention.
+ unsigned ExtInfo : 8;
+
+ /// TypeQuals - Used only by FunctionProtoType, put here to pack with the
+ /// other bitfields.
+ /// The qualifiers are part of FunctionProtoType because...
+ ///
+ /// C++ 8.3.5p4: The return type, the parameter type list and the
+ /// cv-qualifier-seq, [...], are part of the function type.
+ unsigned TypeQuals : 3;
+
+ /// \brief The ref-qualifier associated with a \c FunctionProtoType.
+ ///
+ /// This is a value of type \c RefQualifierKind.
+ unsigned RefQualifier : 2;
+ };
+
+ class ObjCObjectTypeBitfields {
+ friend class ObjCObjectType;
+
+ unsigned : NumTypeBits;
+
+ /// NumProtocols - The number of protocols stored directly on this
+ /// object type.
+ unsigned NumProtocols : 32 - NumTypeBits;
+ };
+
+ class ReferenceTypeBitfields {
+ friend class ReferenceType;
+
+ unsigned : NumTypeBits;
+
+ /// True if the type was originally spelled with an lvalue sigil.
+ /// This is never true of rvalue references but can also be false
+ /// on lvalue references because of C++0x [dcl.typedef]p9,
+ /// as follows:
+ ///
+ /// typedef int &ref; // lvalue, spelled lvalue
+ /// typedef int &&rvref; // rvalue
+ /// ref &a; // lvalue, inner ref, spelled lvalue
+ /// ref &&a; // lvalue, inner ref
+ /// rvref &a; // lvalue, inner ref, spelled lvalue
+ /// rvref &&a; // rvalue, inner ref
+ unsigned SpelledAsLValue : 1;
+
+ /// True if the inner type is a reference type. This only happens
+ /// in non-canonical forms.
+ unsigned InnerRef : 1;
+ };
+
+ class TypeWithKeywordBitfields {
+ friend class TypeWithKeyword;
+
+ unsigned : NumTypeBits;
+
+ /// An ElaboratedTypeKeyword. 8 bits for efficient access.
+ unsigned Keyword : 8;
+ };
+
+ class VectorTypeBitfields {
+ friend class VectorType;
+
+ unsigned : NumTypeBits;
+
+ /// VecKind - The kind of vector, either a generic vector type or some
+ /// target-specific vector type such as for AltiVec or Neon.
+ unsigned VecKind : 3;
+
+ /// NumElements - The number of elements in the vector.
+ unsigned NumElements : 29 - NumTypeBits;
+ };
+
+ class AttributedTypeBitfields {
+ friend class AttributedType;
+
+ unsigned : NumTypeBits;
+
+ /// AttrKind - an AttributedType::Kind
+ unsigned AttrKind : 32 - NumTypeBits;
+ };
+
+ union {
+ TypeBitfields TypeBits;
+ ArrayTypeBitfields ArrayTypeBits;
+ AttributedTypeBitfields AttributedTypeBits;
+ BuiltinTypeBitfields BuiltinTypeBits;
+ FunctionTypeBitfields FunctionTypeBits;
+ ObjCObjectTypeBitfields ObjCObjectTypeBits;
+ ReferenceTypeBitfields ReferenceTypeBits;
+ TypeWithKeywordBitfields TypeWithKeywordBits;
+ VectorTypeBitfields VectorTypeBits;
+ };
+
+private:
+ /// \brief Set whether this type comes from an AST file.
+ void setFromAST(bool V = true) const {
+ TypeBits.FromAST = V;
+ }
+
+ template <class T> friend class TypePropertyCache;
+
+protected:
+ // silence VC++ warning C4355: 'this' : used in base member initializer list
+ Type *this_() { return this; }
+ Type(TypeClass tc, QualType canon, bool Dependent,
+ bool InstantiationDependent, bool VariablyModified,
+ bool ContainsUnexpandedParameterPack)
+ : ExtQualsTypeCommonBase(this,
+ canon.isNull() ? QualType(this_(), 0) : canon) {
+ TypeBits.TC = tc;
+ TypeBits.Dependent = Dependent;
+ TypeBits.InstantiationDependent = Dependent || InstantiationDependent;
+ TypeBits.VariablyModified = VariablyModified;
+ TypeBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ TypeBits.CacheValidAndVisibility = 0;
+ TypeBits.CachedExplicitVisibility = false;
+ TypeBits.CachedLocalOrUnnamed = false;
+ TypeBits.CachedLinkage = NoLinkage;
+ TypeBits.FromAST = false;
+ }
+ friend class ASTContext;
+
+ void setDependent(bool D = true) {
+ TypeBits.Dependent = D;
+ if (D)
+ TypeBits.InstantiationDependent = true;
+ }
+ void setInstantiationDependent(bool D = true) {
+ TypeBits.InstantiationDependent = D; }
+ void setVariablyModified(bool VM = true) { TypeBits.VariablyModified = VM;
+ }
+ void setContainsUnexpandedParameterPack(bool PP = true) {
+ TypeBits.ContainsUnexpandedParameterPack = PP;
+ }
+
+public:
+ TypeClass getTypeClass() const { return static_cast<TypeClass>(TypeBits.TC); }
+
+ /// \brief Whether this type comes from an AST file.
+ bool isFromAST() const { return TypeBits.FromAST; }
+
+ /// \brief Whether this type is or contains an unexpanded parameter
+ /// pack, used to support C++0x variadic templates.
+ ///
+ /// A type that contains a parameter pack shall be expanded by the
+ /// ellipsis operator at some point. For example, the typedef in the
+ /// following example contains an unexpanded parameter pack 'T':
+ ///
+ /// \code
+ /// template<typename ...T>
+ /// struct X {
+ /// typedef T* pointer_types; // ill-formed; T is a parameter pack.
+ /// };
+ /// \endcode
+ ///
+ /// Note that this routine does not specify which
+ bool containsUnexpandedParameterPack() const {
+ return TypeBits.ContainsUnexpandedParameterPack;
+ }
+
+ /// Determines if this type would be canonical if it had no further
+ /// qualification.
+ bool isCanonicalUnqualified() const {
+ return CanonicalType == QualType(this, 0);
+ }
+
+ /// Pull a single level of sugar off of this locally-unqualified type.
+ /// Users should generally prefer SplitQualType::getSingleStepDesugaredType()
+ /// or QualType::getSingleStepDesugaredType(const ASTContext&).
+ QualType getLocallyUnqualifiedSingleStepDesugaredType() const;
+
+ /// Types are partitioned into 3 broad categories (C99 6.2.5p1):
+ /// object types, function types, and incomplete types.
+
+ /// isIncompleteType - Return true if this is an incomplete type.
+ /// A type that can describe objects, but which lacks information needed to
+ /// determine its size (e.g. void, or a fwd declared struct). Clients of this
+ /// routine will need to determine if the size is actually required.
+ ///
+ /// \brief Def If non-NULL, and the type refers to some kind of declaration
+ /// that can be completed (such as a C struct, C++ class, or Objective-C
+ /// class), will be set to the declaration.
+ bool isIncompleteType(NamedDecl **Def = 0) const;
+
+ /// isIncompleteOrObjectType - Return true if this is an incomplete or object
+ /// type, in other words, not a function type.
+ bool isIncompleteOrObjectType() const {
+ return !isFunctionType();
+ }
+
+ /// \brief Determine whether this type is an object type.
+ bool isObjectType() const {
+ // C++ [basic.types]p8:
+ // An object type is a (possibly cv-qualified) type that is not a
+ // function type, not a reference type, and not a void type.
+ return !isReferenceType() && !isFunctionType() && !isVoidType();
+ }
+
+ /// isLiteralType - Return true if this is a literal type
+ /// (C++0x [basic.types]p10)
+ bool isLiteralType() const;
+
+ /// \brief Test if this type is a standard-layout type.
+ /// (C++0x [basic.type]p9)
+ bool isStandardLayoutType() const;
+
+ /// Helper methods to distinguish type categories. All type predicates
+ /// operate on the canonical type, ignoring typedefs and qualifiers.
+
+ /// isBuiltinType - returns true if the type is a builtin type.
+ bool isBuiltinType() const;
+
+ /// isSpecificBuiltinType - Test for a particular builtin type.
+ bool isSpecificBuiltinType(unsigned K) const;
+
+ /// isPlaceholderType - Test for a type which does not represent an
+ /// actual type-system type but is instead used as a placeholder for
+ /// various convenient purposes within Clang. All such types are
+ /// BuiltinTypes.
+ bool isPlaceholderType() const;
+ const BuiltinType *getAsPlaceholderType() const;
+
+ /// isSpecificPlaceholderType - Test for a specific placeholder type.
+ bool isSpecificPlaceholderType(unsigned K) const;
+
+ /// isNonOverloadPlaceholderType - Test for a placeholder type
+ /// other than Overload; see BuiltinType::isNonOverloadPlaceholderType.
+ bool isNonOverloadPlaceholderType() const;
+
+ /// isIntegerType() does *not* include complex integers (a GCC extension).
+ /// isComplexIntegerType() can be used to test for complex integers.
+ bool isIntegerType() const; // C99 6.2.5p17 (int, char, bool, enum)
+ bool isEnumeralType() const;
+ bool isBooleanType() const;
+ bool isCharType() const;
+ bool isWideCharType() const;
+ bool isChar16Type() const;
+ bool isChar32Type() const;
+ bool isAnyCharacterType() const;
+ bool isIntegralType(ASTContext &Ctx) const;
+
+ /// \brief Determine whether this type is an integral or enumeration type.
+ bool isIntegralOrEnumerationType() const;
+ /// \brief Determine whether this type is an integral or unscoped enumeration
+ /// type.
+ bool isIntegralOrUnscopedEnumerationType() const;
+
+ /// Floating point categories.
+ bool isRealFloatingType() const; // C99 6.2.5p10 (float, double, long double)
+ /// isComplexType() does *not* include complex integers (a GCC extension).
+ /// isComplexIntegerType() can be used to test for complex integers.
+ bool isComplexType() const; // C99 6.2.5p11 (complex)
+ bool isAnyComplexType() const; // C99 6.2.5p11 (complex) + Complex Int.
+ bool isFloatingType() const; // C99 6.2.5p11 (real floating + complex)
+ bool isHalfType() const; // OpenCL 6.1.1.1, NEON (IEEE 754-2008 half)
+ bool isRealType() const; // C99 6.2.5p17 (real floating + integer)
+ bool isArithmeticType() const; // C99 6.2.5p18 (integer + floating)
+ bool isVoidType() const; // C99 6.2.5p19
+ bool isDerivedType() const; // C99 6.2.5p20
+ bool isScalarType() const; // C99 6.2.5p21 (arithmetic + pointers)
+ bool isAggregateType() const;
+ bool isFundamentalType() const;
+ bool isCompoundType() const;
+
+ // Type Predicates: Check to see if this type is structurally the specified
+ // type, ignoring typedefs and qualifiers.
+ bool isFunctionType() const;
+ bool isFunctionNoProtoType() const { return getAs<FunctionNoProtoType>(); }
+ bool isFunctionProtoType() const { return getAs<FunctionProtoType>(); }
+ bool isPointerType() const;
+ bool isAnyPointerType() const; // Any C pointer or ObjC object pointer
+ bool isBlockPointerType() const;
+ bool isVoidPointerType() const;
+ bool isReferenceType() const;
+ bool isLValueReferenceType() const;
+ bool isRValueReferenceType() const;
+ bool isFunctionPointerType() const;
+ bool isMemberPointerType() const;
+ bool isMemberFunctionPointerType() const;
+ bool isMemberDataPointerType() const;
+ bool isArrayType() const;
+ bool isConstantArrayType() const;
+ bool isIncompleteArrayType() const;
+ bool isVariableArrayType() const;
+ bool isDependentSizedArrayType() const;
+ bool isRecordType() const;
+ bool isClassType() const;
+ bool isStructureType() const;
+ bool isStructureOrClassType() const;
+ bool isUnionType() const;
+ bool isComplexIntegerType() const; // GCC _Complex integer type.
+ bool isVectorType() const; // GCC vector type.
+ bool isExtVectorType() const; // Extended vector type.
+ bool isObjCObjectPointerType() const; // pointer to ObjC object
+ bool isObjCRetainableType() const; // ObjC object or block pointer
+ bool isObjCLifetimeType() const; // (array of)* retainable type
+ bool isObjCIndirectLifetimeType() const; // (pointer to)* lifetime type
+ bool isObjCNSObjectType() const; // __attribute__((NSObject))
+ // FIXME: change this to 'raw' interface type, so we can used 'interface' type
+ // for the common case.
+ bool isObjCObjectType() const; // NSString or typeof(*(id)0)
+ bool isObjCQualifiedInterfaceType() const; // NSString<foo>
+ bool isObjCQualifiedIdType() const; // id<foo>
+ bool isObjCQualifiedClassType() const; // Class<foo>
+ bool isObjCObjectOrInterfaceType() const;
+ bool isObjCIdType() const; // id
+ bool isObjCClassType() const; // Class
+ bool isObjCSelType() const; // Class
+ bool isObjCBuiltinType() const; // 'id' or 'Class'
+ bool isObjCARCBridgableType() const;
+ bool isCARCBridgableType() const;
+ bool isTemplateTypeParmType() const; // C++ template type parameter
+ bool isNullPtrType() const; // C++0x nullptr_t
+ bool isAtomicType() const; // C11 _Atomic()
+
+ /// Determines if this type, which must satisfy
+ /// isObjCLifetimeType(), is implicitly __unsafe_unretained rather
+ /// than implicitly __strong.
+ bool isObjCARCImplicitlyUnretainedType() const;
+
+ /// Return the implicit lifetime for this type, which must not be dependent.
+ Qualifiers::ObjCLifetime getObjCARCImplicitLifetime() const;
+
+ enum ScalarTypeKind {
+ STK_CPointer,
+ STK_BlockPointer,
+ STK_ObjCObjectPointer,
+ STK_MemberPointer,
+ STK_Bool,
+ STK_Integral,
+ STK_Floating,
+ STK_IntegralComplex,
+ STK_FloatingComplex
+ };
+ /// getScalarTypeKind - Given that this is a scalar type, classify it.
+ ScalarTypeKind getScalarTypeKind() const;
+
+ /// isDependentType - Whether this type is a dependent type, meaning
+ /// that its definition somehow depends on a template parameter
+ /// (C++ [temp.dep.type]).
+ bool isDependentType() const { return TypeBits.Dependent; }
+
+ /// \brief Determine whether this type is an instantiation-dependent type,
+ /// meaning that the type involves a template parameter (even if the
+ /// definition does not actually depend on the type substituted for that
+ /// template parameter).
+ bool isInstantiationDependentType() const {
+ return TypeBits.InstantiationDependent;
+ }
+
+ /// \brief Whether this type is a variably-modified type (C99 6.7.5).
+ bool isVariablyModifiedType() const { return TypeBits.VariablyModified; }
+
+ /// \brief Whether this type involves a variable-length array type
+ /// with a definite size.
+ bool hasSizedVLAType() const;
+
+ /// \brief Whether this type is or contains a local or unnamed type.
+ bool hasUnnamedOrLocalType() const;
+
+ bool isOverloadableType() const;
+
+ /// \brief Determine wither this type is a C++ elaborated-type-specifier.
+ bool isElaboratedTypeSpecifier() const;
+
+ bool canDecayToPointerType() const;
+
+ /// hasPointerRepresentation - Whether this type is represented
+ /// natively as a pointer; this includes pointers, references, block
+ /// pointers, and Objective-C interface, qualified id, and qualified
+ /// interface types, as well as nullptr_t.
+ bool hasPointerRepresentation() const;
+
+ /// hasObjCPointerRepresentation - Whether this type can represent
+ /// an objective pointer type for the purpose of GC'ability
+ bool hasObjCPointerRepresentation() const;
+
+ /// \brief Determine whether this type has an integer representation
+ /// of some sort, e.g., it is an integer type or a vector.
+ bool hasIntegerRepresentation() const;
+
+ /// \brief Determine whether this type has an signed integer representation
+ /// of some sort, e.g., it is an signed integer type or a vector.
+ bool hasSignedIntegerRepresentation() const;
+
+ /// \brief Determine whether this type has an unsigned integer representation
+ /// of some sort, e.g., it is an unsigned integer type or a vector.
+ bool hasUnsignedIntegerRepresentation() const;
+
+ /// \brief Determine whether this type has a floating-point representation
+ /// of some sort, e.g., it is a floating-point type or a vector thereof.
+ bool hasFloatingRepresentation() const;
+
+ // Type Checking Functions: Check to see if this type is structurally the
+ // specified type, ignoring typedefs and qualifiers, and return a pointer to
+ // the best type we can.
+ const RecordType *getAsStructureType() const;
+ /// NOTE: getAs*ArrayType are methods on ASTContext.
+ const RecordType *getAsUnionType() const;
+ const ComplexType *getAsComplexIntegerType() const; // GCC complex int type.
+ // The following is a convenience method that returns an ObjCObjectPointerType
+ // for object declared using an interface.
+ const ObjCObjectPointerType *getAsObjCInterfacePointerType() const;
+ const ObjCObjectPointerType *getAsObjCQualifiedIdType() const;
+ const ObjCObjectPointerType *getAsObjCQualifiedClassType() const;
+ const ObjCObjectType *getAsObjCQualifiedInterfaceType() const;
+ const CXXRecordDecl *getCXXRecordDeclForPointerType() const;
+
+ /// \brief Retrieves the CXXRecordDecl that this type refers to, either
+ /// because the type is a RecordType or because it is the injected-class-name
+ /// type of a class template or class template partial specialization.
+ CXXRecordDecl *getAsCXXRecordDecl() const;
+
+ /// \brief Get the AutoType whose type will be deduced for a variable with
+ /// an initializer of this type. This looks through declarators like pointer
+ /// types, but not through decltype or typedefs.
+ AutoType *getContainedAutoType() const;
+
+ /// Member-template getAs<specific type>'. Look through sugar for
+ /// an instance of <specific type>. This scheme will eventually
+ /// replace the specific getAsXXXX methods above.
+ ///
+ /// There are some specializations of this member template listed
+ /// immediately following this class.
+ template <typename T> const T *getAs() const;
+
+ /// A variant of getAs<> for array types which silently discards
+ /// qualifiers from the outermost type.
+ const ArrayType *getAsArrayTypeUnsafe() const;
+
+ /// Member-template castAs<specific type>. Look through sugar for
+ /// the underlying instance of <specific type>.
+ ///
+ /// This method has the same relationship to getAs<T> as cast<T> has
+ /// to dyn_cast<T>; which is to say, the underlying type *must*
+ /// have the intended type, and this method will never return null.
+ template <typename T> const T *castAs() const;
+
+ /// A variant of castAs<> for array type which silently discards
+ /// qualifiers from the outermost type.
+ const ArrayType *castAsArrayTypeUnsafe() const;
+
+ /// getBaseElementTypeUnsafe - Get the base element type of this
+ /// type, potentially discarding type qualifiers. This method
+ /// should never be used when type qualifiers are meaningful.
+ const Type *getBaseElementTypeUnsafe() const;
+
+ /// getArrayElementTypeNoTypeQual - If this is an array type, return the
+ /// element type of the array, potentially with type qualifiers missing.
+ /// This method should never be used when type qualifiers are meaningful.
+ const Type *getArrayElementTypeNoTypeQual() const;
+
+ /// getPointeeType - If this is a pointer, ObjC object pointer, or block
+ /// pointer, this returns the respective pointee.
+ QualType getPointeeType() const;
+
+ /// getUnqualifiedDesugaredType() - Return the specified type with
+ /// any "sugar" removed from the type, removing any typedefs,
+ /// typeofs, etc., as well as any qualifiers.
+ const Type *getUnqualifiedDesugaredType() const;
+
+ /// More type predicates useful for type checking/promotion
+ bool isPromotableIntegerType() const; // C99 6.3.1.1p2
+
+ /// isSignedIntegerType - Return true if this is an integer type that is
+ /// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
+ /// or an enum decl which has a signed representation.
+ bool isSignedIntegerType() const;
+
+ /// isUnsignedIntegerType - Return true if this is an integer type that is
+ /// unsigned, according to C99 6.2.5p6 [which returns true for _Bool],
+ /// or an enum decl which has an unsigned representation.
+ bool isUnsignedIntegerType() const;
+
+ /// Determines whether this is an integer type that is signed or an
+ /// enumeration types whose underlying type is a signed integer type.
+ bool isSignedIntegerOrEnumerationType() const;
+
+ /// Determines whether this is an integer type that is unsigned or an
+ /// enumeration types whose underlying type is a unsigned integer type.
+ bool isUnsignedIntegerOrEnumerationType() const;
+
+ /// isConstantSizeType - Return true if this is not a variable sized type,
+ /// according to the rules of C99 6.7.5p3. It is not legal to call this on
+ /// incomplete types.
+ bool isConstantSizeType() const;
+
+ /// isSpecifierType - Returns true if this type can be represented by some
+ /// set of type specifiers.
+ bool isSpecifierType() const;
+
+ /// \brief Determine the linkage of this type.
+ Linkage getLinkage() const;
+
+ /// \brief Determine the visibility of this type.
+ Visibility getVisibility() const;
+
+ /// \brief Return true if the visibility was explicitly set is the code.
+ bool isVisibilityExplicit() const;
+
+ /// \brief Determine the linkage and visibility of this type.
+ std::pair<Linkage,Visibility> getLinkageAndVisibility() const;
+
+ /// \brief Note that the linkage is no longer known.
+ void ClearLinkageCache();
+
+ const char *getTypeClassName() const;
+
+ QualType getCanonicalTypeInternal() const {
+ return CanonicalType;
+ }
+ CanQualType getCanonicalTypeUnqualified() const; // in CanonicalType.h
+ LLVM_ATTRIBUTE_USED void dump() const;
+
+ static bool classof(const Type *) { return true; }
+
+ friend class ASTReader;
+ friend class ASTWriter;
+};
+
+template <> inline const TypedefType *Type::getAs() const {
+ return dyn_cast<TypedefType>(this);
+}
+
+// We can do canonical leaf types faster, because we don't have to
+// worry about preserving child type decoration.
+#define TYPE(Class, Base)
+#define LEAF_TYPE(Class) \
+template <> inline const Class##Type *Type::getAs() const { \
+ return dyn_cast<Class##Type>(CanonicalType); \
+} \
+template <> inline const Class##Type *Type::castAs() const { \
+ return cast<Class##Type>(CanonicalType); \
+}
+#include "clang/AST/TypeNodes.def"
+
+
+/// BuiltinType - This class is used for builtin types like 'int'. Builtin
+/// types are always canonical and have a literal name field.
+class BuiltinType : public Type {
+public:
+ enum Kind {
+#define BUILTIN_TYPE(Id, SingletonId) Id,
+#define LAST_BUILTIN_TYPE(Id) LastKind = Id
+#include "clang/AST/BuiltinTypes.def"
+ };
+
+public:
+ BuiltinType(Kind K)
+ : Type(Builtin, QualType(), /*Dependent=*/(K == Dependent),
+ /*InstantiationDependent=*/(K == Dependent),
+ /*VariablyModified=*/false,
+ /*Unexpanded paramter pack=*/false) {
+ BuiltinTypeBits.Kind = K;
+ }
+
+ Kind getKind() const { return static_cast<Kind>(BuiltinTypeBits.Kind); }
+ const char *getName(const PrintingPolicy &Policy) const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ bool isInteger() const {
+ return getKind() >= Bool && getKind() <= Int128;
+ }
+
+ bool isSignedInteger() const {
+ return getKind() >= Char_S && getKind() <= Int128;
+ }
+
+ bool isUnsignedInteger() const {
+ return getKind() >= Bool && getKind() <= UInt128;
+ }
+
+ bool isFloatingPoint() const {
+ return getKind() >= Half && getKind() <= LongDouble;
+ }
+
+ /// Determines whether the given kind corresponds to a placeholder type.
+ static bool isPlaceholderTypeKind(Kind K) {
+ return K >= Overload;
+ }
+
+ /// Determines whether this type is a placeholder type, i.e. a type
+ /// which cannot appear in arbitrary positions in a fully-formed
+ /// expression.
+ bool isPlaceholderType() const {
+ return isPlaceholderTypeKind(getKind());
+ }
+
+ /// Determines whether this type is a placeholder type other than
+ /// Overload. Most placeholder types require only syntactic
+ /// information about their context in order to be resolved (e.g.
+ /// whether it is a call expression), which means they can (and
+ /// should) be resolved in an earlier "phase" of analysis.
+ /// Overload expressions sometimes pick up further information
+ /// from their context, like whether the context expects a
+ /// specific function-pointer type, and so frequently need
+ /// special treatment.
+ bool isNonOverloadPlaceholderType() const {
+ return getKind() > Overload;
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Builtin; }
+ static bool classof(const BuiltinType *) { return true; }
+};
+
+/// ComplexType - C99 6.2.5p11 - Complex values. This supports the C99 complex
+/// types (_Complex float etc) as well as the GCC integer complex extensions.
+///
+class ComplexType : public Type, public llvm::FoldingSetNode {
+ QualType ElementType;
+ ComplexType(QualType Element, QualType CanonicalPtr) :
+ Type(Complex, CanonicalPtr, Element->isDependentType(),
+ Element->isInstantiationDependentType(),
+ Element->isVariablyModifiedType(),
+ Element->containsUnexpandedParameterPack()),
+ ElementType(Element) {
+ }
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+ QualType getElementType() const { return ElementType; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Element) {
+ ID.AddPointer(Element.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Complex; }
+ static bool classof(const ComplexType *) { return true; }
+};
+
+/// ParenType - Sugar for parentheses used when specifying types.
+///
+class ParenType : public Type, public llvm::FoldingSetNode {
+ QualType Inner;
+
+ ParenType(QualType InnerType, QualType CanonType) :
+ Type(Paren, CanonType, InnerType->isDependentType(),
+ InnerType->isInstantiationDependentType(),
+ InnerType->isVariablyModifiedType(),
+ InnerType->containsUnexpandedParameterPack()),
+ Inner(InnerType) {
+ }
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+
+ QualType getInnerType() const { return Inner; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getInnerType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getInnerType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Inner) {
+ Inner.Profile(ID);
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Paren; }
+ static bool classof(const ParenType *) { return true; }
+};
+
+/// PointerType - C99 6.7.5.1 - Pointer Declarators.
+///
+class PointerType : public Type, public llvm::FoldingSetNode {
+ QualType PointeeType;
+
+ PointerType(QualType Pointee, QualType CanonicalPtr) :
+ Type(Pointer, CanonicalPtr, Pointee->isDependentType(),
+ Pointee->isInstantiationDependentType(),
+ Pointee->isVariablyModifiedType(),
+ Pointee->containsUnexpandedParameterPack()),
+ PointeeType(Pointee) {
+ }
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+
+ QualType getPointeeType() const { return PointeeType; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPointeeType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
+ ID.AddPointer(Pointee.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Pointer; }
+ static bool classof(const PointerType *) { return true; }
+};
+
+/// BlockPointerType - pointer to a block type.
+/// This type is to represent types syntactically represented as
+/// "void (^)(int)", etc. Pointee is required to always be a function type.
+///
+class BlockPointerType : public Type, public llvm::FoldingSetNode {
+ QualType PointeeType; // Block is some kind of pointer type
+ BlockPointerType(QualType Pointee, QualType CanonicalCls) :
+ Type(BlockPointer, CanonicalCls, Pointee->isDependentType(),
+ Pointee->isInstantiationDependentType(),
+ Pointee->isVariablyModifiedType(),
+ Pointee->containsUnexpandedParameterPack()),
+ PointeeType(Pointee) {
+ }
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+
+ // Get the pointee type. Pointee is required to always be a function type.
+ QualType getPointeeType() const { return PointeeType; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPointeeType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee) {
+ ID.AddPointer(Pointee.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == BlockPointer;
+ }
+ static bool classof(const BlockPointerType *) { return true; }
+};
+
+/// ReferenceType - Base for LValueReferenceType and RValueReferenceType
+///
+class ReferenceType : public Type, public llvm::FoldingSetNode {
+ QualType PointeeType;
+
+protected:
+ ReferenceType(TypeClass tc, QualType Referencee, QualType CanonicalRef,
+ bool SpelledAsLValue) :
+ Type(tc, CanonicalRef, Referencee->isDependentType(),
+ Referencee->isInstantiationDependentType(),
+ Referencee->isVariablyModifiedType(),
+ Referencee->containsUnexpandedParameterPack()),
+ PointeeType(Referencee)
+ {
+ ReferenceTypeBits.SpelledAsLValue = SpelledAsLValue;
+ ReferenceTypeBits.InnerRef = Referencee->isReferenceType();
+ }
+
+public:
+ bool isSpelledAsLValue() const { return ReferenceTypeBits.SpelledAsLValue; }
+ bool isInnerRef() const { return ReferenceTypeBits.InnerRef; }
+
+ QualType getPointeeTypeAsWritten() const { return PointeeType; }
+ QualType getPointeeType() const {
+ // FIXME: this might strip inner qualifiers; okay?
+ const ReferenceType *T = this;
+ while (T->isInnerRef())
+ T = T->PointeeType->castAs<ReferenceType>();
+ return T->PointeeType;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, PointeeType, isSpelledAsLValue());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ QualType Referencee,
+ bool SpelledAsLValue) {
+ ID.AddPointer(Referencee.getAsOpaquePtr());
+ ID.AddBoolean(SpelledAsLValue);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == LValueReference ||
+ T->getTypeClass() == RValueReference;
+ }
+ static bool classof(const ReferenceType *) { return true; }
+};
+
+/// LValueReferenceType - C++ [dcl.ref] - Lvalue reference
+///
+class LValueReferenceType : public ReferenceType {
+ LValueReferenceType(QualType Referencee, QualType CanonicalRef,
+ bool SpelledAsLValue) :
+ ReferenceType(LValueReference, Referencee, CanonicalRef, SpelledAsLValue)
+ {}
+ friend class ASTContext; // ASTContext creates these
+public:
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == LValueReference;
+ }
+ static bool classof(const LValueReferenceType *) { return true; }
+};
+
+/// RValueReferenceType - C++0x [dcl.ref] - Rvalue reference
+///
+class RValueReferenceType : public ReferenceType {
+ RValueReferenceType(QualType Referencee, QualType CanonicalRef) :
+ ReferenceType(RValueReference, Referencee, CanonicalRef, false) {
+ }
+ friend class ASTContext; // ASTContext creates these
+public:
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == RValueReference;
+ }
+ static bool classof(const RValueReferenceType *) { return true; }
+};
+
+/// MemberPointerType - C++ 8.3.3 - Pointers to members
+///
+class MemberPointerType : public Type, public llvm::FoldingSetNode {
+ QualType PointeeType;
+ /// The class of which the pointee is a member. Must ultimately be a
+ /// RecordType, but could be a typedef or a template parameter too.
+ const Type *Class;
+
+ MemberPointerType(QualType Pointee, const Type *Cls, QualType CanonicalPtr) :
+ Type(MemberPointer, CanonicalPtr,
+ Cls->isDependentType() || Pointee->isDependentType(),
+ (Cls->isInstantiationDependentType() ||
+ Pointee->isInstantiationDependentType()),
+ Pointee->isVariablyModifiedType(),
+ (Cls->containsUnexpandedParameterPack() ||
+ Pointee->containsUnexpandedParameterPack())),
+ PointeeType(Pointee), Class(Cls) {
+ }
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+ QualType getPointeeType() const { return PointeeType; }
+
+ /// Returns true if the member type (i.e. the pointee type) is a
+ /// function type rather than a data-member type.
+ bool isMemberFunctionPointer() const {
+ return PointeeType->isFunctionProtoType();
+ }
+
+ /// Returns true if the member type (i.e. the pointee type) is a
+ /// data type rather than a function type.
+ bool isMemberDataPointer() const {
+ return !PointeeType->isFunctionProtoType();
+ }
+
+ const Type *getClass() const { return Class; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPointeeType(), getClass());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pointee,
+ const Type *Class) {
+ ID.AddPointer(Pointee.getAsOpaquePtr());
+ ID.AddPointer(Class);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == MemberPointer;
+ }
+ static bool classof(const MemberPointerType *) { return true; }
+};
+
+/// ArrayType - C99 6.7.5.2 - Array Declarators.
+///
+class ArrayType : public Type, public llvm::FoldingSetNode {
+public:
+ /// ArraySizeModifier - Capture whether this is a normal array (e.g. int X[4])
+ /// an array with a static size (e.g. int X[static 4]), or an array
+ /// with a star size (e.g. int X[*]).
+ /// 'static' is only allowed on function parameters.
+ enum ArraySizeModifier {
+ Normal, Static, Star
+ };
+private:
+ /// ElementType - The element type of the array.
+ QualType ElementType;
+
+protected:
+ // C++ [temp.dep.type]p1:
+ // A type is dependent if it is...
+ // - an array type constructed from any dependent type or whose
+ // size is specified by a constant expression that is
+ // value-dependent,
+ ArrayType(TypeClass tc, QualType et, QualType can,
+ ArraySizeModifier sm, unsigned tq,
+ bool ContainsUnexpandedParameterPack)
+ : Type(tc, can, et->isDependentType() || tc == DependentSizedArray,
+ et->isInstantiationDependentType() || tc == DependentSizedArray,
+ (tc == VariableArray || et->isVariablyModifiedType()),
+ ContainsUnexpandedParameterPack),
+ ElementType(et) {
+ ArrayTypeBits.IndexTypeQuals = tq;
+ ArrayTypeBits.SizeModifier = sm;
+ }
+
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+ QualType getElementType() const { return ElementType; }
+ ArraySizeModifier getSizeModifier() const {
+ return ArraySizeModifier(ArrayTypeBits.SizeModifier);
+ }
+ Qualifiers getIndexTypeQualifiers() const {
+ return Qualifiers::fromCVRMask(getIndexTypeCVRQualifiers());
+ }
+ unsigned getIndexTypeCVRQualifiers() const {
+ return ArrayTypeBits.IndexTypeQuals;
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ConstantArray ||
+ T->getTypeClass() == VariableArray ||
+ T->getTypeClass() == IncompleteArray ||
+ T->getTypeClass() == DependentSizedArray;
+ }
+ static bool classof(const ArrayType *) { return true; }
+};
+
+/// ConstantArrayType - This class represents the canonical version of
+/// C arrays with a specified constant size. For example, the canonical
+/// type for 'int A[4 + 4*100]' is a ConstantArrayType where the element
+/// type is 'int' and the size is 404.
+class ConstantArrayType : public ArrayType {
+ llvm::APInt Size; // Allows us to unique the type.
+
+ ConstantArrayType(QualType et, QualType can, const llvm::APInt &size,
+ ArraySizeModifier sm, unsigned tq)
+ : ArrayType(ConstantArray, et, can, sm, tq,
+ et->containsUnexpandedParameterPack()),
+ Size(size) {}
+protected:
+ ConstantArrayType(TypeClass tc, QualType et, QualType can,
+ const llvm::APInt &size, ArraySizeModifier sm, unsigned tq)
+ : ArrayType(tc, et, can, sm, tq, et->containsUnexpandedParameterPack()),
+ Size(size) {}
+ friend class ASTContext; // ASTContext creates these.
+public:
+ const llvm::APInt &getSize() const { return Size; }
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+
+ /// \brief Determine the number of bits required to address a member of
+ // an array with the given element type and number of elements.
+ static unsigned getNumAddressingBits(ASTContext &Context,
+ QualType ElementType,
+ const llvm::APInt &NumElements);
+
+ /// \brief Determine the maximum number of active bits that an array's size
+ /// can require, which limits the maximum size of the array.
+ static unsigned getMaxSizeBits(ASTContext &Context);
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType(), getSize(),
+ getSizeModifier(), getIndexTypeCVRQualifiers());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ET,
+ const llvm::APInt &ArraySize, ArraySizeModifier SizeMod,
+ unsigned TypeQuals) {
+ ID.AddPointer(ET.getAsOpaquePtr());
+ ID.AddInteger(ArraySize.getZExtValue());
+ ID.AddInteger(SizeMod);
+ ID.AddInteger(TypeQuals);
+ }
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ConstantArray;
+ }
+ static bool classof(const ConstantArrayType *) { return true; }
+};
+
+/// IncompleteArrayType - This class represents C arrays with an unspecified
+/// size. For example 'int A[]' has an IncompleteArrayType where the element
+/// type is 'int' and the size is unspecified.
+class IncompleteArrayType : public ArrayType {
+
+ IncompleteArrayType(QualType et, QualType can,
+ ArraySizeModifier sm, unsigned tq)
+ : ArrayType(IncompleteArray, et, can, sm, tq,
+ et->containsUnexpandedParameterPack()) {}
+ friend class ASTContext; // ASTContext creates these.
+public:
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == IncompleteArray;
+ }
+ static bool classof(const IncompleteArrayType *) { return true; }
+
+ friend class StmtIteratorBase;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType(), getSizeModifier(),
+ getIndexTypeCVRQualifiers());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ET,
+ ArraySizeModifier SizeMod, unsigned TypeQuals) {
+ ID.AddPointer(ET.getAsOpaquePtr());
+ ID.AddInteger(SizeMod);
+ ID.AddInteger(TypeQuals);
+ }
+};
+
+/// VariableArrayType - This class represents C arrays with a specified size
+/// which is not an integer-constant-expression. For example, 'int s[x+foo()]'.
+/// Since the size expression is an arbitrary expression, we store it as such.
+///
+/// Note: VariableArrayType's aren't uniqued (since the expressions aren't) and
+/// should not be: two lexically equivalent variable array types could mean
+/// different things, for example, these variables do not have the same type
+/// dynamically:
+///
+/// void foo(int x) {
+/// int Y[x];
+/// ++x;
+/// int Z[x];
+/// }
+///
+class VariableArrayType : public ArrayType {
+ /// SizeExpr - An assignment expression. VLA's are only permitted within
+ /// a function block.
+ Stmt *SizeExpr;
+ /// Brackets - The left and right array brackets.
+ SourceRange Brackets;
+
+ VariableArrayType(QualType et, QualType can, Expr *e,
+ ArraySizeModifier sm, unsigned tq,
+ SourceRange brackets)
+ : ArrayType(VariableArray, et, can, sm, tq,
+ et->containsUnexpandedParameterPack()),
+ SizeExpr((Stmt*) e), Brackets(brackets) {}
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+ Expr *getSizeExpr() const {
+ // We use C-style casts instead of cast<> here because we do not wish
+ // to have a dependency of Type.h on Stmt.h/Expr.h.
+ return (Expr*) SizeExpr;
+ }
+ SourceRange getBracketsRange() const { return Brackets; }
+ SourceLocation getLBracketLoc() const { return Brackets.getBegin(); }
+ SourceLocation getRBracketLoc() const { return Brackets.getEnd(); }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == VariableArray;
+ }
+ static bool classof(const VariableArrayType *) { return true; }
+
+ friend class StmtIteratorBase;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ llvm_unreachable("Cannot unique VariableArrayTypes.");
+ }
+};
+
+/// DependentSizedArrayType - This type represents an array type in
+/// C++ whose size is a value-dependent expression. For example:
+///
+/// \code
+/// template<typename T, int Size>
+/// class array {
+/// T data[Size];
+/// };
+/// \endcode
+///
+/// For these types, we won't actually know what the array bound is
+/// until template instantiation occurs, at which point this will
+/// become either a ConstantArrayType or a VariableArrayType.
+class DependentSizedArrayType : public ArrayType {
+ const ASTContext &Context;
+
+ /// \brief An assignment expression that will instantiate to the
+ /// size of the array.
+ ///
+ /// The expression itself might be NULL, in which case the array
+ /// type will have its size deduced from an initializer.
+ Stmt *SizeExpr;
+
+ /// Brackets - The left and right array brackets.
+ SourceRange Brackets;
+
+ DependentSizedArrayType(const ASTContext &Context, QualType et, QualType can,
+ Expr *e, ArraySizeModifier sm, unsigned tq,
+ SourceRange brackets);
+
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+ Expr *getSizeExpr() const {
+ // We use C-style casts instead of cast<> here because we do not wish
+ // to have a dependency of Type.h on Stmt.h/Expr.h.
+ return (Expr*) SizeExpr;
+ }
+ SourceRange getBracketsRange() const { return Brackets; }
+ SourceLocation getLBracketLoc() const { return Brackets.getBegin(); }
+ SourceLocation getRBracketLoc() const { return Brackets.getEnd(); }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentSizedArray;
+ }
+ static bool classof(const DependentSizedArrayType *) { return true; }
+
+ friend class StmtIteratorBase;
+
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Context, getElementType(),
+ getSizeModifier(), getIndexTypeCVRQualifiers(), getSizeExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType ET, ArraySizeModifier SizeMod,
+ unsigned TypeQuals, Expr *E);
+};
+
+/// DependentSizedExtVectorType - This type represent an extended vector type
+/// where either the type or size is dependent. For example:
+/// @code
+/// template<typename T, int Size>
+/// class vector {
+/// typedef T __attribute__((ext_vector_type(Size))) type;
+/// }
+/// @endcode
+class DependentSizedExtVectorType : public Type, public llvm::FoldingSetNode {
+ const ASTContext &Context;
+ Expr *SizeExpr;
+ /// ElementType - The element type of the array.
+ QualType ElementType;
+ SourceLocation loc;
+
+ DependentSizedExtVectorType(const ASTContext &Context, QualType ElementType,
+ QualType can, Expr *SizeExpr, SourceLocation loc);
+
+ friend class ASTContext;
+
+public:
+ Expr *getSizeExpr() const { return SizeExpr; }
+ QualType getElementType() const { return ElementType; }
+ SourceLocation getAttributeLoc() const { return loc; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentSizedExtVector;
+ }
+ static bool classof(const DependentSizedExtVectorType *) { return true; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Context, getElementType(), getSizeExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ QualType ElementType, Expr *SizeExpr);
+};
+
+
+/// VectorType - GCC generic vector type. This type is created using
+/// __attribute__((vector_size(n)), where "n" specifies the vector size in
+/// bytes; or from an Altivec __vector or vector declaration.
+/// Since the constructor takes the number of vector elements, the
+/// client is responsible for converting the size into the number of elements.
+class VectorType : public Type, public llvm::FoldingSetNode {
+public:
+ enum VectorKind {
+ GenericVector, // not a target-specific vector type
+ AltiVecVector, // is AltiVec vector
+ AltiVecPixel, // is AltiVec 'vector Pixel'
+ AltiVecBool, // is AltiVec 'vector bool ...'
+ NeonVector, // is ARM Neon vector
+ NeonPolyVector // is ARM Neon polynomial vector
+ };
+protected:
+ /// ElementType - The element type of the vector.
+ QualType ElementType;
+
+ VectorType(QualType vecType, unsigned nElements, QualType canonType,
+ VectorKind vecKind);
+
+ VectorType(TypeClass tc, QualType vecType, unsigned nElements,
+ QualType canonType, VectorKind vecKind);
+
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+
+ QualType getElementType() const { return ElementType; }
+ unsigned getNumElements() const { return VectorTypeBits.NumElements; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ VectorKind getVectorKind() const {
+ return VectorKind(VectorTypeBits.VecKind);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getElementType(), getNumElements(),
+ getTypeClass(), getVectorKind());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ElementType,
+ unsigned NumElements, TypeClass TypeClass,
+ VectorKind VecKind) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ ID.AddInteger(NumElements);
+ ID.AddInteger(TypeClass);
+ ID.AddInteger(VecKind);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Vector || T->getTypeClass() == ExtVector;
+ }
+ static bool classof(const VectorType *) { return true; }
+};
+
+/// ExtVectorType - Extended vector type. This type is created using
+/// __attribute__((ext_vector_type(n)), where "n" is the number of elements.
+/// Unlike vector_size, ext_vector_type is only allowed on typedef's. This
+/// class enables syntactic extensions, like Vector Components for accessing
+/// points, colors, and textures (modeled after OpenGL Shading Language).
+class ExtVectorType : public VectorType {
+ ExtVectorType(QualType vecType, unsigned nElements, QualType canonType) :
+ VectorType(ExtVector, vecType, nElements, canonType, GenericVector) {}
+ friend class ASTContext; // ASTContext creates these.
+public:
+ static int getPointAccessorIdx(char c) {
+ switch (c) {
+ default: return -1;
+ case 'x': return 0;
+ case 'y': return 1;
+ case 'z': return 2;
+ case 'w': return 3;
+ }
+ }
+ static int getNumericAccessorIdx(char c) {
+ switch (c) {
+ default: return -1;
+ case '0': return 0;
+ case '1': return 1;
+ case '2': return 2;
+ case '3': return 3;
+ case '4': return 4;
+ case '5': return 5;
+ case '6': return 6;
+ case '7': return 7;
+ case '8': return 8;
+ case '9': return 9;
+ case 'A':
+ case 'a': return 10;
+ case 'B':
+ case 'b': return 11;
+ case 'C':
+ case 'c': return 12;
+ case 'D':
+ case 'd': return 13;
+ case 'E':
+ case 'e': return 14;
+ case 'F':
+ case 'f': return 15;
+ }
+ }
+
+ static int getAccessorIdx(char c) {
+ if (int idx = getPointAccessorIdx(c)+1) return idx-1;
+ return getNumericAccessorIdx(c);
+ }
+
+ bool isAccessorWithinNumElements(char c) const {
+ if (int idx = getAccessorIdx(c)+1)
+ return unsigned(idx-1) < getNumElements();
+ return false;
+ }
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ExtVector;
+ }
+ static bool classof(const ExtVectorType *) { return true; }
+};
+
+/// FunctionType - C99 6.7.5.3 - Function Declarators. This is the common base
+/// class of FunctionNoProtoType and FunctionProtoType.
+///
+class FunctionType : public Type {
+ // The type returned by the function.
+ QualType ResultType;
+
+ public:
+ /// ExtInfo - A class which abstracts out some details necessary for
+ /// making a call.
+ ///
+ /// It is not actually used directly for storing this information in
+ /// a FunctionType, although FunctionType does currently use the
+ /// same bit-pattern.
+ ///
+ // If you add a field (say Foo), other than the obvious places (both,
+ // constructors, compile failures), what you need to update is
+ // * Operator==
+ // * getFoo
+ // * withFoo
+ // * functionType. Add Foo, getFoo.
+ // * ASTContext::getFooType
+ // * ASTContext::mergeFunctionTypes
+ // * FunctionNoProtoType::Profile
+ // * FunctionProtoType::Profile
+ // * TypePrinter::PrintFunctionProto
+ // * AST read and write
+ // * Codegen
+ class ExtInfo {
+ // Feel free to rearrange or add bits, but if you go over 8,
+ // you'll need to adjust both the Bits field below and
+ // Type::FunctionTypeBitfields.
+
+ // | CC |noreturn|produces|regparm|
+ // |0 .. 2| 3 | 4 | 5 .. 7|
+ //
+ // regparm is either 0 (no regparm attribute) or the regparm value+1.
+ enum { CallConvMask = 0x7 };
+ enum { NoReturnMask = 0x8 };
+ enum { ProducesResultMask = 0x10 };
+ enum { RegParmMask = ~(CallConvMask | NoReturnMask | ProducesResultMask),
+ RegParmOffset = 5 }; // Assumed to be the last field
+
+ uint16_t Bits;
+
+ ExtInfo(unsigned Bits) : Bits(static_cast<uint16_t>(Bits)) {}
+
+ friend class FunctionType;
+
+ public:
+ // Constructor with no defaults. Use this when you know that you
+ // have all the elements (when reading an AST file for example).
+ ExtInfo(bool noReturn, bool hasRegParm, unsigned regParm, CallingConv cc,
+ bool producesResult) {
+ assert((!hasRegParm || regParm < 7) && "Invalid regparm value");
+ Bits = ((unsigned) cc) |
+ (noReturn ? NoReturnMask : 0) |
+ (producesResult ? ProducesResultMask : 0) |
+ (hasRegParm ? ((regParm + 1) << RegParmOffset) : 0);
+ }
+
+ // Constructor with all defaults. Use when for example creating a
+ // function know to use defaults.
+ ExtInfo() : Bits(0) {}
+
+ bool getNoReturn() const { return Bits & NoReturnMask; }
+ bool getProducesResult() const { return Bits & ProducesResultMask; }
+ bool getHasRegParm() const { return (Bits >> RegParmOffset) != 0; }
+ unsigned getRegParm() const {
+ unsigned RegParm = Bits >> RegParmOffset;
+ if (RegParm > 0)
+ --RegParm;
+ return RegParm;
+ }
+ CallingConv getCC() const { return CallingConv(Bits & CallConvMask); }
+
+ bool operator==(ExtInfo Other) const {
+ return Bits == Other.Bits;
+ }
+ bool operator!=(ExtInfo Other) const {
+ return Bits != Other.Bits;
+ }
+
+ // Note that we don't have setters. That is by design, use
+ // the following with methods instead of mutating these objects.
+
+ ExtInfo withNoReturn(bool noReturn) const {
+ if (noReturn)
+ return ExtInfo(Bits | NoReturnMask);
+ else
+ return ExtInfo(Bits & ~NoReturnMask);
+ }
+
+ ExtInfo withProducesResult(bool producesResult) const {
+ if (producesResult)
+ return ExtInfo(Bits | ProducesResultMask);
+ else
+ return ExtInfo(Bits & ~ProducesResultMask);
+ }
+
+ ExtInfo withRegParm(unsigned RegParm) const {
+ assert(RegParm < 7 && "Invalid regparm value");
+ return ExtInfo((Bits & ~RegParmMask) |
+ ((RegParm + 1) << RegParmOffset));
+ }
+
+ ExtInfo withCallingConv(CallingConv cc) const {
+ return ExtInfo((Bits & ~CallConvMask) | (unsigned) cc);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Bits);
+ }
+ };
+
+protected:
+ FunctionType(TypeClass tc, QualType res,
+ unsigned typeQuals, RefQualifierKind RefQualifier,
+ QualType Canonical, bool Dependent,
+ bool InstantiationDependent,
+ bool VariablyModified, bool ContainsUnexpandedParameterPack,
+ ExtInfo Info)
+ : Type(tc, Canonical, Dependent, InstantiationDependent, VariablyModified,
+ ContainsUnexpandedParameterPack),
+ ResultType(res) {
+ FunctionTypeBits.ExtInfo = Info.Bits;
+ FunctionTypeBits.TypeQuals = typeQuals;
+ FunctionTypeBits.RefQualifier = static_cast<unsigned>(RefQualifier);
+ }
+ unsigned getTypeQuals() const { return FunctionTypeBits.TypeQuals; }
+
+ RefQualifierKind getRefQualifier() const {
+ return static_cast<RefQualifierKind>(FunctionTypeBits.RefQualifier);
+ }
+
+public:
+
+ QualType getResultType() const { return ResultType; }
+
+ bool getHasRegParm() const { return getExtInfo().getHasRegParm(); }
+ unsigned getRegParmType() const { return getExtInfo().getRegParm(); }
+ bool getNoReturnAttr() const { return getExtInfo().getNoReturn(); }
+ CallingConv getCallConv() const { return getExtInfo().getCC(); }
+ ExtInfo getExtInfo() const { return ExtInfo(FunctionTypeBits.ExtInfo); }
+
+ /// \brief Determine the type of an expression that calls a function of
+ /// this type.
+ QualType getCallResultType(ASTContext &Context) const {
+ return getResultType().getNonLValueExprType(Context);
+ }
+
+ static StringRef getNameForCallConv(CallingConv CC);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == FunctionNoProto ||
+ T->getTypeClass() == FunctionProto;
+ }
+ static bool classof(const FunctionType *) { return true; }
+};
+
+/// FunctionNoProtoType - Represents a K&R-style 'int foo()' function, which has
+/// no information available about its arguments.
+class FunctionNoProtoType : public FunctionType, public llvm::FoldingSetNode {
+ FunctionNoProtoType(QualType Result, QualType Canonical, ExtInfo Info)
+ : FunctionType(FunctionNoProto, Result, 0, RQ_None, Canonical,
+ /*Dependent=*/false, /*InstantiationDependent=*/false,
+ Result->isVariablyModifiedType(),
+ /*ContainsUnexpandedParameterPack=*/false, Info) {}
+
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+ // No additional state past what FunctionType provides.
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getResultType(), getExtInfo());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType ResultType,
+ ExtInfo Info) {
+ Info.Profile(ID);
+ ID.AddPointer(ResultType.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == FunctionNoProto;
+ }
+ static bool classof(const FunctionNoProtoType *) { return true; }
+};
+
+/// FunctionProtoType - Represents a prototype with argument type info, e.g.
+/// 'int foo(int)' or 'int foo(void)'. 'void' is represented as having no
+/// arguments, not as having a single void argument. Such a type can have an
+/// exception specification, but this specification is not part of the canonical
+/// type.
+class FunctionProtoType : public FunctionType, public llvm::FoldingSetNode {
+public:
+ /// ExtProtoInfo - Extra information about a function prototype.
+ struct ExtProtoInfo {
+ ExtProtoInfo() :
+ Variadic(false), HasTrailingReturn(false), TypeQuals(0),
+ ExceptionSpecType(EST_None), RefQualifier(RQ_None),
+ NumExceptions(0), Exceptions(0), NoexceptExpr(0), ConsumedArguments(0) {}
+
+ FunctionType::ExtInfo ExtInfo;
+ bool Variadic : 1;
+ bool HasTrailingReturn : 1;
+ unsigned char TypeQuals;
+ ExceptionSpecificationType ExceptionSpecType;
+ RefQualifierKind RefQualifier;
+ unsigned NumExceptions;
+ const QualType *Exceptions;
+ Expr *NoexceptExpr;
+ const bool *ConsumedArguments;
+ };
+
+private:
+ /// \brief Determine whether there are any argument types that
+ /// contain an unexpanded parameter pack.
+ static bool containsAnyUnexpandedParameterPack(const QualType *ArgArray,
+ unsigned numArgs) {
+ for (unsigned Idx = 0; Idx < numArgs; ++Idx)
+ if (ArgArray[Idx]->containsUnexpandedParameterPack())
+ return true;
+
+ return false;
+ }
+
+ FunctionProtoType(QualType result, const QualType *args, unsigned numArgs,
+ QualType canonical, const ExtProtoInfo &epi);
+
+ /// NumArgs - The number of arguments this function has, not counting '...'.
+ unsigned NumArgs : 17;
+
+ /// NumExceptions - The number of types in the exception spec, if any.
+ unsigned NumExceptions : 9;
+
+ /// ExceptionSpecType - The type of exception specification this function has.
+ unsigned ExceptionSpecType : 3;
+
+ /// HasAnyConsumedArgs - Whether this function has any consumed arguments.
+ unsigned HasAnyConsumedArgs : 1;
+
+ /// Variadic - Whether the function is variadic.
+ unsigned Variadic : 1;
+
+ /// HasTrailingReturn - Whether this function has a trailing return type.
+ unsigned HasTrailingReturn : 1;
+
+ // ArgInfo - There is an variable size array after the class in memory that
+ // holds the argument types.
+
+ // Exceptions - There is another variable size array after ArgInfo that
+ // holds the exception types.
+
+ // NoexceptExpr - Instead of Exceptions, there may be a single Expr* pointing
+ // to the expression in the noexcept() specifier.
+
+ // ConsumedArgs - A variable size array, following Exceptions
+ // and of length NumArgs, holding flags indicating which arguments
+ // are consumed. This only appears if HasAnyConsumedArgs is true.
+
+ friend class ASTContext; // ASTContext creates these.
+
+ const bool *getConsumedArgsBuffer() const {
+ assert(hasAnyConsumedArgs());
+
+ // Find the end of the exceptions.
+ Expr * const *eh_end = reinterpret_cast<Expr * const *>(arg_type_end());
+ if (getExceptionSpecType() != EST_ComputedNoexcept)
+ eh_end += NumExceptions;
+ else
+ eh_end += 1; // NoexceptExpr
+
+ return reinterpret_cast<const bool*>(eh_end);
+ }
+
+public:
+ unsigned getNumArgs() const { return NumArgs; }
+ QualType getArgType(unsigned i) const {
+ assert(i < NumArgs && "Invalid argument number!");
+ return arg_type_begin()[i];
+ }
+
+ ExtProtoInfo getExtProtoInfo() const {
+ ExtProtoInfo EPI;
+ EPI.ExtInfo = getExtInfo();
+ EPI.Variadic = isVariadic();
+ EPI.HasTrailingReturn = hasTrailingReturn();
+ EPI.ExceptionSpecType = getExceptionSpecType();
+ EPI.TypeQuals = static_cast<unsigned char>(getTypeQuals());
+ EPI.RefQualifier = getRefQualifier();
+ if (EPI.ExceptionSpecType == EST_Dynamic) {
+ EPI.NumExceptions = NumExceptions;
+ EPI.Exceptions = exception_begin();
+ } else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
+ EPI.NoexceptExpr = getNoexceptExpr();
+ }
+ if (hasAnyConsumedArgs())
+ EPI.ConsumedArguments = getConsumedArgsBuffer();
+ return EPI;
+ }
+
+ /// \brief Get the kind of exception specification on this function.
+ ExceptionSpecificationType getExceptionSpecType() const {
+ return static_cast<ExceptionSpecificationType>(ExceptionSpecType);
+ }
+ /// \brief Return whether this function has any kind of exception spec.
+ bool hasExceptionSpec() const {
+ return getExceptionSpecType() != EST_None;
+ }
+ /// \brief Return whether this function has a dynamic (throw) exception spec.
+ bool hasDynamicExceptionSpec() const {
+ return isDynamicExceptionSpec(getExceptionSpecType());
+ }
+ /// \brief Return whether this function has a noexcept exception spec.
+ bool hasNoexceptExceptionSpec() const {
+ return isNoexceptExceptionSpec(getExceptionSpecType());
+ }
+ /// \brief Result type of getNoexceptSpec().
+ enum NoexceptResult {
+ NR_NoNoexcept, ///< There is no noexcept specifier.
+ NR_BadNoexcept, ///< The noexcept specifier has a bad expression.
+ NR_Dependent, ///< The noexcept specifier is dependent.
+ NR_Throw, ///< The noexcept specifier evaluates to false.
+ NR_Nothrow ///< The noexcept specifier evaluates to true.
+ };
+ /// \brief Get the meaning of the noexcept spec on this function, if any.
+ NoexceptResult getNoexceptSpec(ASTContext &Ctx) const;
+ unsigned getNumExceptions() const { return NumExceptions; }
+ QualType getExceptionType(unsigned i) const {
+ assert(i < NumExceptions && "Invalid exception number!");
+ return exception_begin()[i];
+ }
+ Expr *getNoexceptExpr() const {
+ if (getExceptionSpecType() != EST_ComputedNoexcept)
+ return 0;
+ // NoexceptExpr sits where the arguments end.
+ return *reinterpret_cast<Expr *const *>(arg_type_end());
+ }
+ bool isNothrow(ASTContext &Ctx) const {
+ ExceptionSpecificationType EST = getExceptionSpecType();
+ assert(EST != EST_Delayed);
+ if (EST == EST_DynamicNone || EST == EST_BasicNoexcept)
+ return true;
+ if (EST != EST_ComputedNoexcept)
+ return false;
+ return getNoexceptSpec(Ctx) == NR_Nothrow;
+ }
+
+ bool isVariadic() const { return Variadic; }
+
+ /// \brief Determines whether this function prototype contains a
+ /// parameter pack at the end.
+ ///
+ /// A function template whose last parameter is a parameter pack can be
+ /// called with an arbitrary number of arguments, much like a variadic
+ /// function.
+ bool isTemplateVariadic() const;
+
+ bool hasTrailingReturn() const { return HasTrailingReturn; }
+
+ unsigned getTypeQuals() const { return FunctionType::getTypeQuals(); }
+
+
+ /// \brief Retrieve the ref-qualifier associated with this function type.
+ RefQualifierKind getRefQualifier() const {
+ return FunctionType::getRefQualifier();
+ }
+
+ typedef const QualType *arg_type_iterator;
+ arg_type_iterator arg_type_begin() const {
+ return reinterpret_cast<const QualType *>(this+1);
+ }
+ arg_type_iterator arg_type_end() const { return arg_type_begin()+NumArgs; }
+
+ typedef const QualType *exception_iterator;
+ exception_iterator exception_begin() const {
+ // exceptions begin where arguments end
+ return arg_type_end();
+ }
+ exception_iterator exception_end() const {
+ if (getExceptionSpecType() != EST_Dynamic)
+ return exception_begin();
+ return exception_begin() + NumExceptions;
+ }
+
+ bool hasAnyConsumedArgs() const {
+ return HasAnyConsumedArgs;
+ }
+ bool isArgConsumed(unsigned I) const {
+ assert(I < getNumArgs() && "argument index out of range!");
+ if (hasAnyConsumedArgs())
+ return getConsumedArgsBuffer()[I];
+ return false;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void printExceptionSpecification(std::string &S,
+ PrintingPolicy Policy) const;
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == FunctionProto;
+ }
+ static bool classof(const FunctionProtoType *) { return true; }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx);
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Result,
+ arg_type_iterator ArgTys, unsigned NumArgs,
+ const ExtProtoInfo &EPI, const ASTContext &Context);
+};
+
+
+/// \brief Represents the dependent type named by a dependently-scoped
+/// typename using declaration, e.g.
+/// using typename Base<T>::foo;
+/// Template instantiation turns these into the underlying type.
+class UnresolvedUsingType : public Type {
+ UnresolvedUsingTypenameDecl *Decl;
+
+ UnresolvedUsingType(const UnresolvedUsingTypenameDecl *D)
+ : Type(UnresolvedUsing, QualType(), true, true, false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ Decl(const_cast<UnresolvedUsingTypenameDecl*>(D)) {}
+ friend class ASTContext; // ASTContext creates these.
+public:
+
+ UnresolvedUsingTypenameDecl *getDecl() const { return Decl; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == UnresolvedUsing;
+ }
+ static bool classof(const UnresolvedUsingType *) { return true; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ return Profile(ID, Decl);
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ UnresolvedUsingTypenameDecl *D) {
+ ID.AddPointer(D);
+ }
+};
+
+
+class TypedefType : public Type {
+ TypedefNameDecl *Decl;
+protected:
+ TypedefType(TypeClass tc, const TypedefNameDecl *D, QualType can)
+ : Type(tc, can, can->isDependentType(),
+ can->isInstantiationDependentType(),
+ can->isVariablyModifiedType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Decl(const_cast<TypedefNameDecl*>(D)) {
+ assert(!isa<TypedefType>(can) && "Invalid canonical type");
+ }
+ friend class ASTContext; // ASTContext creates these.
+public:
+
+ TypedefNameDecl *getDecl() const { return Decl; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const;
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Typedef; }
+ static bool classof(const TypedefType *) { return true; }
+};
+
+/// TypeOfExprType (GCC extension).
+class TypeOfExprType : public Type {
+ Expr *TOExpr;
+
+protected:
+ TypeOfExprType(Expr *E, QualType can = QualType());
+ friend class ASTContext; // ASTContext creates these.
+public:
+ Expr *getUnderlyingExpr() const { return TOExpr; }
+
+ /// \brief Remove a single level of sugar.
+ QualType desugar() const;
+
+ /// \brief Returns whether this type directly provides sugar.
+ bool isSugared() const;
+
+ static bool classof(const Type *T) { return T->getTypeClass() == TypeOfExpr; }
+ static bool classof(const TypeOfExprType *) { return true; }
+};
+
+/// \brief Internal representation of canonical, dependent
+/// typeof(expr) types.
+///
+/// This class is used internally by the ASTContext to manage
+/// canonical, dependent types, only. Clients will only see instances
+/// of this class via TypeOfExprType nodes.
+class DependentTypeOfExprType
+ : public TypeOfExprType, public llvm::FoldingSetNode {
+ const ASTContext &Context;
+
+public:
+ DependentTypeOfExprType(const ASTContext &Context, Expr *E)
+ : TypeOfExprType(E), Context(Context) { }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Context, getUnderlyingExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ Expr *E);
+};
+
+/// TypeOfType (GCC extension).
+class TypeOfType : public Type {
+ QualType TOType;
+ TypeOfType(QualType T, QualType can)
+ : Type(TypeOf, can, T->isDependentType(),
+ T->isInstantiationDependentType(),
+ T->isVariablyModifiedType(),
+ T->containsUnexpandedParameterPack()),
+ TOType(T) {
+ assert(!isa<TypedefType>(can) && "Invalid canonical type");
+ }
+ friend class ASTContext; // ASTContext creates these.
+public:
+ QualType getUnderlyingType() const { return TOType; }
+
+ /// \brief Remove a single level of sugar.
+ QualType desugar() const { return getUnderlyingType(); }
+
+ /// \brief Returns whether this type directly provides sugar.
+ bool isSugared() const { return true; }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == TypeOf; }
+ static bool classof(const TypeOfType *) { return true; }
+};
+
+/// DecltypeType (C++0x)
+class DecltypeType : public Type {
+ Expr *E;
+ QualType UnderlyingType;
+
+protected:
+ DecltypeType(Expr *E, QualType underlyingType, QualType can = QualType());
+ friend class ASTContext; // ASTContext creates these.
+public:
+ Expr *getUnderlyingExpr() const { return E; }
+ QualType getUnderlyingType() const { return UnderlyingType; }
+
+ /// \brief Remove a single level of sugar.
+ QualType desugar() const;
+
+ /// \brief Returns whether this type directly provides sugar.
+ bool isSugared() const;
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Decltype; }
+ static bool classof(const DecltypeType *) { return true; }
+};
+
+/// \brief Internal representation of canonical, dependent
+/// decltype(expr) types.
+///
+/// This class is used internally by the ASTContext to manage
+/// canonical, dependent types, only. Clients will only see instances
+/// of this class via DecltypeType nodes.
+class DependentDecltypeType : public DecltypeType, public llvm::FoldingSetNode {
+ const ASTContext &Context;
+
+public:
+ DependentDecltypeType(const ASTContext &Context, Expr *E);
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Context, getUnderlyingExpr());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ Expr *E);
+};
+
+/// \brief A unary type transform, which is a type constructed from another
+class UnaryTransformType : public Type {
+public:
+ enum UTTKind {
+ EnumUnderlyingType
+ };
+
+private:
+ /// The untransformed type.
+ QualType BaseType;
+ /// The transformed type if not dependent, otherwise the same as BaseType.
+ QualType UnderlyingType;
+
+ UTTKind UKind;
+protected:
+ UnaryTransformType(QualType BaseTy, QualType UnderlyingTy, UTTKind UKind,
+ QualType CanonicalTy);
+ friend class ASTContext;
+public:
+ bool isSugared() const { return !isDependentType(); }
+ QualType desugar() const { return UnderlyingType; }
+
+ QualType getUnderlyingType() const { return UnderlyingType; }
+ QualType getBaseType() const { return BaseType; }
+
+ UTTKind getUTTKind() const { return UKind; }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == UnaryTransform;
+ }
+ static bool classof(const UnaryTransformType *) { return true; }
+};
+
+class TagType : public Type {
+ /// Stores the TagDecl associated with this type. The decl may point to any
+ /// TagDecl that declares the entity.
+ TagDecl * decl;
+
+ friend class ASTReader;
+
+protected:
+ TagType(TypeClass TC, const TagDecl *D, QualType can);
+
+public:
+ TagDecl *getDecl() const;
+
+ /// @brief Determines whether this type is in the process of being
+ /// defined.
+ bool isBeingDefined() const;
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() >= TagFirst && T->getTypeClass() <= TagLast;
+ }
+ static bool classof(const TagType *) { return true; }
+};
+
+/// RecordType - This is a helper class that allows the use of isa/cast/dyncast
+/// to detect TagType objects of structs/unions/classes.
+class RecordType : public TagType {
+protected:
+ explicit RecordType(const RecordDecl *D)
+ : TagType(Record, reinterpret_cast<const TagDecl*>(D), QualType()) { }
+ explicit RecordType(TypeClass TC, RecordDecl *D)
+ : TagType(TC, reinterpret_cast<const TagDecl*>(D), QualType()) { }
+ friend class ASTContext; // ASTContext creates these.
+public:
+
+ RecordDecl *getDecl() const {
+ return reinterpret_cast<RecordDecl*>(TagType::getDecl());
+ }
+
+ // FIXME: This predicate is a helper to QualType/Type. It needs to
+ // recursively check all fields for const-ness. If any field is declared
+ // const, it needs to return false.
+ bool hasConstFields() const { return false; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Record; }
+ static bool classof(const RecordType *) { return true; }
+};
+
+/// EnumType - This is a helper class that allows the use of isa/cast/dyncast
+/// to detect TagType objects of enums.
+class EnumType : public TagType {
+ explicit EnumType(const EnumDecl *D)
+ : TagType(Enum, reinterpret_cast<const TagDecl*>(D), QualType()) { }
+ friend class ASTContext; // ASTContext creates these.
+public:
+
+ EnumDecl *getDecl() const {
+ return reinterpret_cast<EnumDecl*>(TagType::getDecl());
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) { return T->getTypeClass() == Enum; }
+ static bool classof(const EnumType *) { return true; }
+};
+
+/// AttributedType - An attributed type is a type to which a type
+/// attribute has been applied. The "modified type" is the
+/// fully-sugared type to which the attributed type was applied;
+/// generally it is not canonically equivalent to the attributed type.
+/// The "equivalent type" is the minimally-desugared type which the
+/// type is canonically equivalent to.
+///
+/// For example, in the following attributed type:
+/// int32_t __attribute__((vector_size(16)))
+/// - the modified type is the TypedefType for int32_t
+/// - the equivalent type is VectorType(16, int32_t)
+/// - the canonical type is VectorType(16, int)
+class AttributedType : public Type, public llvm::FoldingSetNode {
+public:
+ // It is really silly to have yet another attribute-kind enum, but
+ // clang::attr::Kind doesn't currently cover the pure type attrs.
+ enum Kind {
+ // Expression operand.
+ attr_address_space,
+ attr_regparm,
+ attr_vector_size,
+ attr_neon_vector_type,
+ attr_neon_polyvector_type,
+
+ FirstExprOperandKind = attr_address_space,
+ LastExprOperandKind = attr_neon_polyvector_type,
+
+ // Enumerated operand (string or keyword).
+ attr_objc_gc,
+ attr_objc_ownership,
+ attr_pcs,
+
+ FirstEnumOperandKind = attr_objc_gc,
+ LastEnumOperandKind = attr_pcs,
+
+ // No operand.
+ attr_noreturn,
+ attr_cdecl,
+ attr_fastcall,
+ attr_stdcall,
+ attr_thiscall,
+ attr_pascal
+ };
+
+private:
+ QualType ModifiedType;
+ QualType EquivalentType;
+
+ friend class ASTContext; // creates these
+
+ AttributedType(QualType canon, Kind attrKind,
+ QualType modified, QualType equivalent)
+ : Type(Attributed, canon, canon->isDependentType(),
+ canon->isInstantiationDependentType(),
+ canon->isVariablyModifiedType(),
+ canon->containsUnexpandedParameterPack()),
+ ModifiedType(modified), EquivalentType(equivalent) {
+ AttributedTypeBits.AttrKind = attrKind;
+ }
+
+public:
+ Kind getAttrKind() const {
+ return static_cast<Kind>(AttributedTypeBits.AttrKind);
+ }
+
+ QualType getModifiedType() const { return ModifiedType; }
+ QualType getEquivalentType() const { return EquivalentType; }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getEquivalentType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAttrKind(), ModifiedType, EquivalentType);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, Kind attrKind,
+ QualType modified, QualType equivalent) {
+ ID.AddInteger(attrKind);
+ ID.AddPointer(modified.getAsOpaquePtr());
+ ID.AddPointer(equivalent.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Attributed;
+ }
+ static bool classof(const AttributedType *T) { return true; }
+};
+
+class TemplateTypeParmType : public Type, public llvm::FoldingSetNode {
+ // Helper data collector for canonical types.
+ struct CanonicalTTPTInfo {
+ unsigned Depth : 15;
+ unsigned ParameterPack : 1;
+ unsigned Index : 16;
+ };
+
+ union {
+ // Info for the canonical type.
+ CanonicalTTPTInfo CanTTPTInfo;
+ // Info for the non-canonical type.
+ TemplateTypeParmDecl *TTPDecl;
+ };
+
+ /// Build a non-canonical type.
+ TemplateTypeParmType(TemplateTypeParmDecl *TTPDecl, QualType Canon)
+ : Type(TemplateTypeParm, Canon, /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ /*VariablyModified=*/false,
+ Canon->containsUnexpandedParameterPack()),
+ TTPDecl(TTPDecl) { }
+
+ /// Build the canonical type.
+ TemplateTypeParmType(unsigned D, unsigned I, bool PP)
+ : Type(TemplateTypeParm, QualType(this, 0),
+ /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ /*VariablyModified=*/false, PP) {
+ CanTTPTInfo.Depth = D;
+ CanTTPTInfo.Index = I;
+ CanTTPTInfo.ParameterPack = PP;
+ }
+
+ friend class ASTContext; // ASTContext creates these
+
+ const CanonicalTTPTInfo& getCanTTPTInfo() const {
+ QualType Can = getCanonicalTypeInternal();
+ return Can->castAs<TemplateTypeParmType>()->CanTTPTInfo;
+ }
+
+public:
+ unsigned getDepth() const { return getCanTTPTInfo().Depth; }
+ unsigned getIndex() const { return getCanTTPTInfo().Index; }
+ bool isParameterPack() const { return getCanTTPTInfo().ParameterPack; }
+
+ TemplateTypeParmDecl *getDecl() const {
+ return isCanonicalUnqualified() ? 0 : TTPDecl;
+ }
+
+ IdentifierInfo *getIdentifier() const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getDepth(), getIndex(), isParameterPack(), getDecl());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, unsigned Depth,
+ unsigned Index, bool ParameterPack,
+ TemplateTypeParmDecl *TTPDecl) {
+ ID.AddInteger(Depth);
+ ID.AddInteger(Index);
+ ID.AddBoolean(ParameterPack);
+ ID.AddPointer(TTPDecl);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == TemplateTypeParm;
+ }
+ static bool classof(const TemplateTypeParmType *T) { return true; }
+};
+
+/// \brief Represents the result of substituting a type for a template
+/// type parameter.
+///
+/// Within an instantiated template, all template type parameters have
+/// been replaced with these. They are used solely to record that a
+/// type was originally written as a template type parameter;
+/// therefore they are never canonical.
+class SubstTemplateTypeParmType : public Type, public llvm::FoldingSetNode {
+ // The original type parameter.
+ const TemplateTypeParmType *Replaced;
+
+ SubstTemplateTypeParmType(const TemplateTypeParmType *Param, QualType Canon)
+ : Type(SubstTemplateTypeParm, Canon, Canon->isDependentType(),
+ Canon->isInstantiationDependentType(),
+ Canon->isVariablyModifiedType(),
+ Canon->containsUnexpandedParameterPack()),
+ Replaced(Param) { }
+
+ friend class ASTContext;
+
+public:
+ /// Gets the template parameter that was substituted for.
+ const TemplateTypeParmType *getReplacedParameter() const {
+ return Replaced;
+ }
+
+ /// Gets the type that was substituted for the template
+ /// parameter.
+ QualType getReplacementType() const {
+ return getCanonicalTypeInternal();
+ }
+
+ bool isSugared() const { return true; }
+ QualType desugar() const { return getReplacementType(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getReplacedParameter(), getReplacementType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateTypeParmType *Replaced,
+ QualType Replacement) {
+ ID.AddPointer(Replaced);
+ ID.AddPointer(Replacement.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == SubstTemplateTypeParm;
+ }
+ static bool classof(const SubstTemplateTypeParmType *T) { return true; }
+};
+
+/// \brief Represents the result of substituting a set of types for a template
+/// type parameter pack.
+///
+/// When a pack expansion in the source code contains multiple parameter packs
+/// and those parameter packs correspond to different levels of template
+/// parameter lists, this type node is used to represent a template type
+/// parameter pack from an outer level, which has already had its argument pack
+/// substituted but that still lives within a pack expansion that itself
+/// could not be instantiated. When actually performing a substitution into
+/// that pack expansion (e.g., when all template parameters have corresponding
+/// arguments), this type will be replaced with the \c SubstTemplateTypeParmType
+/// at the current pack substitution index.
+class SubstTemplateTypeParmPackType : public Type, public llvm::FoldingSetNode {
+ /// \brief The original type parameter.
+ const TemplateTypeParmType *Replaced;
+
+ /// \brief A pointer to the set of template arguments that this
+ /// parameter pack is instantiated with.
+ const TemplateArgument *Arguments;
+
+ /// \brief The number of template arguments in \c Arguments.
+ unsigned NumArguments;
+
+ SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
+ QualType Canon,
+ const TemplateArgument &ArgPack);
+
+ friend class ASTContext;
+
+public:
+ IdentifierInfo *getIdentifier() const { return Replaced->getIdentifier(); }
+
+ /// Gets the template parameter that was substituted for.
+ const TemplateTypeParmType *getReplacedParameter() const {
+ return Replaced;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ TemplateArgument getArgumentPack() const;
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateTypeParmType *Replaced,
+ const TemplateArgument &ArgPack);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == SubstTemplateTypeParmPack;
+ }
+ static bool classof(const SubstTemplateTypeParmPackType *T) { return true; }
+};
+
+/// \brief Represents a C++0x auto type.
+///
+/// These types are usually a placeholder for a deduced type. However, within
+/// templates and before the initializer is attached, there is no deduced type
+/// and an auto type is type-dependent and canonical.
+class AutoType : public Type, public llvm::FoldingSetNode {
+ AutoType(QualType DeducedType)
+ : Type(Auto, DeducedType.isNull() ? QualType(this, 0) : DeducedType,
+ /*Dependent=*/DeducedType.isNull(),
+ /*InstantiationDependent=*/DeducedType.isNull(),
+ /*VariablyModified=*/false, /*ContainsParameterPack=*/false) {
+ assert((DeducedType.isNull() || !DeducedType->isDependentType()) &&
+ "deduced a dependent type for auto");
+ }
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ bool isSugared() const { return isDeduced(); }
+ QualType desugar() const { return getCanonicalTypeInternal(); }
+
+ QualType getDeducedType() const {
+ return isDeduced() ? getCanonicalTypeInternal() : QualType();
+ }
+ bool isDeduced() const {
+ return !isDependentType();
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getDeducedType());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ QualType Deduced) {
+ ID.AddPointer(Deduced.getAsOpaquePtr());
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Auto;
+ }
+ static bool classof(const AutoType *T) { return true; }
+};
+
+/// \brief Represents a type template specialization; the template
+/// must be a class template, a type alias template, or a template
+/// template parameter. A template which cannot be resolved to one of
+/// these, e.g. because it is written with a dependent scope
+/// specifier, is instead represented as a
+/// @c DependentTemplateSpecializationType.
+///
+/// A non-dependent template specialization type is always "sugar",
+/// typically for a @c RecordType. For example, a class template
+/// specialization type of @c vector<int> will refer to a tag type for
+/// the instantiation @c std::vector<int, std::allocator<int>>
+///
+/// Template specializations are dependent if either the template or
+/// any of the template arguments are dependent, in which case the
+/// type may also be canonical.
+///
+/// Instances of this type are allocated with a trailing array of
+/// TemplateArguments, followed by a QualType representing the
+/// non-canonical aliased type when the template is a type alias
+/// template.
+class TemplateSpecializationType
+ : public Type, public llvm::FoldingSetNode {
+ /// \brief The name of the template being specialized. This is
+ /// either a TemplateName::Template (in which case it is a
+ /// ClassTemplateDecl*, a TemplateTemplateParmDecl*, or a
+ /// TypeAliasTemplateDecl*), a
+ /// TemplateName::SubstTemplateTemplateParmPack, or a
+ /// TemplateName::SubstTemplateTemplateParm (in which case the
+ /// replacement must, recursively, be one of these).
+ TemplateName Template;
+
+ /// \brief - The number of template arguments named in this class
+ /// template specialization.
+ unsigned NumArgs : 31;
+
+ /// \brief Whether this template specialization type is a substituted
+ /// type alias.
+ bool TypeAlias : 1;
+
+ TemplateSpecializationType(TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs, QualType Canon,
+ QualType Aliased);
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ /// \brief Determine whether any of the given template arguments are
+ /// dependent.
+ static bool anyDependentTemplateArguments(const TemplateArgument *Args,
+ unsigned NumArgs,
+ bool &InstantiationDependent);
+
+ static bool anyDependentTemplateArguments(const TemplateArgumentLoc *Args,
+ unsigned NumArgs,
+ bool &InstantiationDependent);
+
+ static bool anyDependentTemplateArguments(const TemplateArgumentListInfo &,
+ bool &InstantiationDependent);
+
+ /// \brief Print a template argument list, including the '<' and '>'
+ /// enclosing the template arguments.
+ static std::string PrintTemplateArgumentList(const TemplateArgument *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy,
+ bool SkipBrackets = false);
+
+ static std::string PrintTemplateArgumentList(const TemplateArgumentLoc *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy);
+
+ static std::string PrintTemplateArgumentList(const TemplateArgumentListInfo &,
+ const PrintingPolicy &Policy);
+
+ /// True if this template specialization type matches a current
+ /// instantiation in the context in which it is found.
+ bool isCurrentInstantiation() const {
+ return isa<InjectedClassNameType>(getCanonicalTypeInternal());
+ }
+
+ /// \brief Determine if this template specialization type is for a type alias
+ /// template that has been substituted.
+ ///
+ /// Nearly every template specialization type whose template is an alias
+ /// template will be substituted. However, this is not the case when
+ /// the specialization contains a pack expansion but the template alias
+ /// does not have a corresponding parameter pack, e.g.,
+ ///
+ /// \code
+ /// template<typename T, typename U, typename V> struct S;
+ /// template<typename T, typename U> using A = S<T, int, U>;
+ /// template<typename... Ts> struct X {
+ /// typedef A<Ts...> type; // not a type alias
+ /// };
+ /// \endcode
+ bool isTypeAlias() const { return TypeAlias; }
+
+ /// Get the aliased type, if this is a specialization of a type alias
+ /// template.
+ QualType getAliasedType() const {
+ assert(isTypeAlias() && "not a type alias template specialization");
+ return *reinterpret_cast<const QualType*>(end());
+ }
+
+ typedef const TemplateArgument * iterator;
+
+ iterator begin() const { return getArgs(); }
+ iterator end() const; // defined inline in TemplateBase.h
+
+ /// \brief Retrieve the name of the template that we are specializing.
+ TemplateName getTemplateName() const { return Template; }
+
+ /// \brief Retrieve the template arguments.
+ const TemplateArgument *getArgs() const {
+ return reinterpret_cast<const TemplateArgument *>(this + 1);
+ }
+
+ /// \brief Retrieve the number of template arguments.
+ unsigned getNumArgs() const { return NumArgs; }
+
+ /// \brief Retrieve a specific template argument as a type.
+ /// \precondition @c isArgType(Arg)
+ const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
+
+ bool isSugared() const {
+ return !isDependentType() || isCurrentInstantiation() || isTypeAlias();
+ }
+ QualType desugar() const { return getCanonicalTypeInternal(); }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Ctx) {
+ Profile(ID, Template, getArgs(), NumArgs, Ctx);
+ if (isTypeAlias())
+ getAliasedType().Profile(ID);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const ASTContext &Context);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == TemplateSpecialization;
+ }
+ static bool classof(const TemplateSpecializationType *T) { return true; }
+};
+
+/// \brief The injected class name of a C++ class template or class
+/// template partial specialization. Used to record that a type was
+/// spelled with a bare identifier rather than as a template-id; the
+/// equivalent for non-templated classes is just RecordType.
+///
+/// Injected class name types are always dependent. Template
+/// instantiation turns these into RecordTypes.
+///
+/// Injected class name types are always canonical. This works
+/// because it is impossible to compare an injected class name type
+/// with the corresponding non-injected template type, for the same
+/// reason that it is impossible to directly compare template
+/// parameters from different dependent contexts: injected class name
+/// types can only occur within the scope of a particular templated
+/// declaration, and within that scope every template specialization
+/// will canonicalize to the injected class name (when appropriate
+/// according to the rules of the language).
+class InjectedClassNameType : public Type {
+ CXXRecordDecl *Decl;
+
+ /// The template specialization which this type represents.
+ /// For example, in
+ /// template <class T> class A { ... };
+ /// this is A<T>, whereas in
+ /// template <class X, class Y> class A<B<X,Y> > { ... };
+ /// this is A<B<X,Y> >.
+ ///
+ /// It is always unqualified, always a template specialization type,
+ /// and always dependent.
+ QualType InjectedType;
+
+ friend class ASTContext; // ASTContext creates these.
+ friend class ASTReader; // FIXME: ASTContext::getInjectedClassNameType is not
+ // currently suitable for AST reading, too much
+ // interdependencies.
+ InjectedClassNameType(CXXRecordDecl *D, QualType TST)
+ : Type(InjectedClassName, QualType(), /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ /*VariablyModified=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ Decl(D), InjectedType(TST) {
+ assert(isa<TemplateSpecializationType>(TST));
+ assert(!TST.hasQualifiers());
+ assert(TST->isDependentType());
+ }
+
+public:
+ QualType getInjectedSpecializationType() const { return InjectedType; }
+ const TemplateSpecializationType *getInjectedTST() const {
+ return cast<TemplateSpecializationType>(InjectedType.getTypePtr());
+ }
+
+ CXXRecordDecl *getDecl() const;
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == InjectedClassName;
+ }
+ static bool classof(const InjectedClassNameType *T) { return true; }
+};
+
+/// \brief The kind of a tag type.
+enum TagTypeKind {
+ /// \brief The "struct" keyword.
+ TTK_Struct,
+ /// \brief The "union" keyword.
+ TTK_Union,
+ /// \brief The "class" keyword.
+ TTK_Class,
+ /// \brief The "enum" keyword.
+ TTK_Enum
+};
+
+/// \brief The elaboration keyword that precedes a qualified type name or
+/// introduces an elaborated-type-specifier.
+enum ElaboratedTypeKeyword {
+ /// \brief The "struct" keyword introduces the elaborated-type-specifier.
+ ETK_Struct,
+ /// \brief The "union" keyword introduces the elaborated-type-specifier.
+ ETK_Union,
+ /// \brief The "class" keyword introduces the elaborated-type-specifier.
+ ETK_Class,
+ /// \brief The "enum" keyword introduces the elaborated-type-specifier.
+ ETK_Enum,
+ /// \brief The "typename" keyword precedes the qualified type name, e.g.,
+ /// \c typename T::type.
+ ETK_Typename,
+ /// \brief No keyword precedes the qualified type name.
+ ETK_None
+};
+
+/// A helper class for Type nodes having an ElaboratedTypeKeyword.
+/// The keyword in stored in the free bits of the base class.
+/// Also provides a few static helpers for converting and printing
+/// elaborated type keyword and tag type kind enumerations.
+class TypeWithKeyword : public Type {
+protected:
+ TypeWithKeyword(ElaboratedTypeKeyword Keyword, TypeClass tc,
+ QualType Canonical, bool Dependent,
+ bool InstantiationDependent, bool VariablyModified,
+ bool ContainsUnexpandedParameterPack)
+ : Type(tc, Canonical, Dependent, InstantiationDependent, VariablyModified,
+ ContainsUnexpandedParameterPack) {
+ TypeWithKeywordBits.Keyword = Keyword;
+ }
+
+public:
+ ElaboratedTypeKeyword getKeyword() const {
+ return static_cast<ElaboratedTypeKeyword>(TypeWithKeywordBits.Keyword);
+ }
+
+ /// getKeywordForTypeSpec - Converts a type specifier (DeclSpec::TST)
+ /// into an elaborated type keyword.
+ static ElaboratedTypeKeyword getKeywordForTypeSpec(unsigned TypeSpec);
+
+ /// getTagTypeKindForTypeSpec - Converts a type specifier (DeclSpec::TST)
+ /// into a tag type kind. It is an error to provide a type specifier
+ /// which *isn't* a tag kind here.
+ static TagTypeKind getTagTypeKindForTypeSpec(unsigned TypeSpec);
+
+ /// getKeywordForTagDeclKind - Converts a TagTypeKind into an
+ /// elaborated type keyword.
+ static ElaboratedTypeKeyword getKeywordForTagTypeKind(TagTypeKind Tag);
+
+ /// getTagTypeKindForKeyword - Converts an elaborated type keyword into
+ // a TagTypeKind. It is an error to provide an elaborated type keyword
+ /// which *isn't* a tag kind here.
+ static TagTypeKind getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword);
+
+ static bool KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword);
+
+ static const char *getKeywordName(ElaboratedTypeKeyword Keyword);
+
+ static const char *getTagTypeKindName(TagTypeKind Kind) {
+ return getKeywordName(getKeywordForTagTypeKind(Kind));
+ }
+
+ class CannotCastToThisType {};
+ static CannotCastToThisType classof(const Type *);
+};
+
+/// \brief Represents a type that was referred to using an elaborated type
+/// keyword, e.g., struct S, or via a qualified name, e.g., N::M::type,
+/// or both.
+///
+/// This type is used to keep track of a type name as written in the
+/// source code, including tag keywords and any nested-name-specifiers.
+/// The type itself is always "sugar", used to express what was written
+/// in the source code but containing no additional semantic information.
+class ElaboratedType : public TypeWithKeyword, public llvm::FoldingSetNode {
+
+ /// \brief The nested name specifier containing the qualifier.
+ NestedNameSpecifier *NNS;
+
+ /// \brief The type that this qualified name refers to.
+ QualType NamedType;
+
+ ElaboratedType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
+ QualType NamedType, QualType CanonType)
+ : TypeWithKeyword(Keyword, Elaborated, CanonType,
+ NamedType->isDependentType(),
+ NamedType->isInstantiationDependentType(),
+ NamedType->isVariablyModifiedType(),
+ NamedType->containsUnexpandedParameterPack()),
+ NNS(NNS), NamedType(NamedType) {
+ assert(!(Keyword == ETK_None && NNS == 0) &&
+ "ElaboratedType cannot have elaborated type keyword "
+ "and name qualifier both null.");
+ }
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ ~ElaboratedType();
+
+ /// \brief Retrieve the qualification on this type.
+ NestedNameSpecifier *getQualifier() const { return NNS; }
+
+ /// \brief Retrieve the type named by the qualified-id.
+ QualType getNamedType() const { return NamedType; }
+
+ /// \brief Remove a single level of sugar.
+ QualType desugar() const { return getNamedType(); }
+
+ /// \brief Returns whether this type directly provides sugar.
+ bool isSugared() const { return true; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getKeyword(), NNS, NamedType);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS, QualType NamedType) {
+ ID.AddInteger(Keyword);
+ ID.AddPointer(NNS);
+ NamedType.Profile(ID);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Elaborated;
+ }
+ static bool classof(const ElaboratedType *T) { return true; }
+};
+
+/// \brief Represents a qualified type name for which the type name is
+/// dependent.
+///
+/// DependentNameType represents a class of dependent types that involve a
+/// dependent nested-name-specifier (e.g., "T::") followed by a (dependent)
+/// name of a type. The DependentNameType may start with a "typename" (for a
+/// typename-specifier), "class", "struct", "union", or "enum" (for a
+/// dependent elaborated-type-specifier), or nothing (in contexts where we
+/// know that we must be referring to a type, e.g., in a base class specifier).
+class DependentNameType : public TypeWithKeyword, public llvm::FoldingSetNode {
+
+ /// \brief The nested name specifier containing the qualifier.
+ NestedNameSpecifier *NNS;
+
+ /// \brief The type that this typename specifier refers to.
+ const IdentifierInfo *Name;
+
+ DependentNameType(ElaboratedTypeKeyword Keyword, NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name, QualType CanonType)
+ : TypeWithKeyword(Keyword, DependentName, CanonType, /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ /*VariablyModified=*/false,
+ NNS->containsUnexpandedParameterPack()),
+ NNS(NNS), Name(Name) {
+ assert(NNS->isDependent() &&
+ "DependentNameType requires a dependent nested-name-specifier");
+ }
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ /// \brief Retrieve the qualification on this type.
+ NestedNameSpecifier *getQualifier() const { return NNS; }
+
+ /// \brief Retrieve the type named by the typename specifier as an
+ /// identifier.
+ ///
+ /// This routine will return a non-NULL identifier pointer when the
+ /// form of the original typename was terminated by an identifier,
+ /// e.g., "typename T::type".
+ const IdentifierInfo *getIdentifier() const {
+ return Name;
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getKeyword(), NNS, Name);
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS, const IdentifierInfo *Name) {
+ ID.AddInteger(Keyword);
+ ID.AddPointer(NNS);
+ ID.AddPointer(Name);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentName;
+ }
+ static bool classof(const DependentNameType *T) { return true; }
+};
+
+/// DependentTemplateSpecializationType - Represents a template
+/// specialization type whose template cannot be resolved, e.g.
+/// A<T>::template B<T>
+class DependentTemplateSpecializationType :
+ public TypeWithKeyword, public llvm::FoldingSetNode {
+
+ /// \brief The nested name specifier containing the qualifier.
+ NestedNameSpecifier *NNS;
+
+ /// \brief The identifier of the template.
+ const IdentifierInfo *Name;
+
+ /// \brief - The number of template arguments named in this class
+ /// template specialization.
+ unsigned NumArgs;
+
+ const TemplateArgument *getArgBuffer() const {
+ return reinterpret_cast<const TemplateArgument*>(this+1);
+ }
+ TemplateArgument *getArgBuffer() {
+ return reinterpret_cast<TemplateArgument*>(this+1);
+ }
+
+ DependentTemplateSpecializationType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args,
+ QualType Canon);
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ NestedNameSpecifier *getQualifier() const { return NNS; }
+ const IdentifierInfo *getIdentifier() const { return Name; }
+
+ /// \brief Retrieve the template arguments.
+ const TemplateArgument *getArgs() const {
+ return getArgBuffer();
+ }
+
+ /// \brief Retrieve the number of template arguments.
+ unsigned getNumArgs() const { return NumArgs; }
+
+ const TemplateArgument &getArg(unsigned Idx) const; // in TemplateBase.h
+
+ typedef const TemplateArgument * iterator;
+ iterator begin() const { return getArgs(); }
+ iterator end() const; // inline in TemplateBase.h
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context) {
+ Profile(ID, Context, getKeyword(), NNS, Name, NumArgs, getArgs());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args);
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == DependentTemplateSpecialization;
+ }
+ static bool classof(const DependentTemplateSpecializationType *T) {
+ return true;
+ }
+};
+
+/// \brief Represents a pack expansion of types.
+///
+/// Pack expansions are part of C++0x variadic templates. A pack
+/// expansion contains a pattern, which itself contains one or more
+/// "unexpanded" parameter packs. When instantiated, a pack expansion
+/// produces a series of types, each instantiated from the pattern of
+/// the expansion, where the Ith instantiation of the pattern uses the
+/// Ith arguments bound to each of the unexpanded parameter packs. The
+/// pack expansion is considered to "expand" these unexpanded
+/// parameter packs.
+///
+/// \code
+/// template<typename ...Types> struct tuple;
+///
+/// template<typename ...Types>
+/// struct tuple_of_references {
+/// typedef tuple<Types&...> type;
+/// };
+/// \endcode
+///
+/// Here, the pack expansion \c Types&... is represented via a
+/// PackExpansionType whose pattern is Types&.
+class PackExpansionType : public Type, public llvm::FoldingSetNode {
+ /// \brief The pattern of the pack expansion.
+ QualType Pattern;
+
+ /// \brief The number of expansions that this pack expansion will
+ /// generate when substituted (+1), or indicates that
+ ///
+ /// This field will only have a non-zero value when some of the parameter
+ /// packs that occur within the pattern have been substituted but others have
+ /// not.
+ unsigned NumExpansions;
+
+ PackExpansionType(QualType Pattern, QualType Canon,
+ llvm::Optional<unsigned> NumExpansions)
+ : Type(PackExpansion, Canon, /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ /*VariableModified=*/Pattern->isVariablyModifiedType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ Pattern(Pattern),
+ NumExpansions(NumExpansions? *NumExpansions + 1: 0) { }
+
+ friend class ASTContext; // ASTContext creates these
+
+public:
+ /// \brief Retrieve the pattern of this pack expansion, which is the
+ /// type that will be repeatedly instantiated when instantiating the
+ /// pack expansion itself.
+ QualType getPattern() const { return Pattern; }
+
+ /// \brief Retrieve the number of expansions that this pack expansion will
+ /// generate, if known.
+ llvm::Optional<unsigned> getNumExpansions() const {
+ if (NumExpansions)
+ return NumExpansions - 1;
+
+ return llvm::Optional<unsigned>();
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPattern(), getNumExpansions());
+ }
+
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType Pattern,
+ llvm::Optional<unsigned> NumExpansions) {
+ ID.AddPointer(Pattern.getAsOpaquePtr());
+ ID.AddBoolean(NumExpansions);
+ if (NumExpansions)
+ ID.AddInteger(*NumExpansions);
+ }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == PackExpansion;
+ }
+ static bool classof(const PackExpansionType *T) {
+ return true;
+ }
+};
+
+/// ObjCObjectType - Represents a class type in Objective C.
+/// Every Objective C type is a combination of a base type and a
+/// list of protocols.
+///
+/// Given the following declarations:
+/// @class C;
+/// @protocol P;
+///
+/// 'C' is an ObjCInterfaceType C. It is sugar for an ObjCObjectType
+/// with base C and no protocols.
+///
+/// 'C<P>' is an ObjCObjectType with base C and protocol list [P].
+///
+/// 'id' is a TypedefType which is sugar for an ObjCPointerType whose
+/// pointee is an ObjCObjectType with base BuiltinType::ObjCIdType
+/// and no protocols.
+///
+/// 'id<P>' is an ObjCPointerType whose pointee is an ObjCObjecType
+/// with base BuiltinType::ObjCIdType and protocol list [P]. Eventually
+/// this should get its own sugar class to better represent the source.
+class ObjCObjectType : public Type {
+ // ObjCObjectType.NumProtocols - the number of protocols stored
+ // after the ObjCObjectPointerType node.
+ //
+ // These protocols are those written directly on the type. If
+ // protocol qualifiers ever become additive, the iterators will need
+ // to get kindof complicated.
+ //
+ // In the canonical object type, these are sorted alphabetically
+ // and uniqued.
+
+ /// Either a BuiltinType or an InterfaceType or sugar for either.
+ QualType BaseType;
+
+ ObjCProtocolDecl * const *getProtocolStorage() const {
+ return const_cast<ObjCObjectType*>(this)->getProtocolStorage();
+ }
+
+ ObjCProtocolDecl **getProtocolStorage();
+
+protected:
+ ObjCObjectType(QualType Canonical, QualType Base,
+ ObjCProtocolDecl * const *Protocols, unsigned NumProtocols);
+
+ enum Nonce_ObjCInterface { Nonce_ObjCInterface };
+ ObjCObjectType(enum Nonce_ObjCInterface)
+ : Type(ObjCInterface, QualType(), false, false, false, false),
+ BaseType(QualType(this_(), 0)) {
+ ObjCObjectTypeBits.NumProtocols = 0;
+ }
+
+public:
+ /// getBaseType - Gets the base type of this object type. This is
+ /// always (possibly sugar for) one of:
+ /// - the 'id' builtin type (as opposed to the 'id' type visible to the
+ /// user, which is a typedef for an ObjCPointerType)
+ /// - the 'Class' builtin type (same caveat)
+ /// - an ObjCObjectType (currently always an ObjCInterfaceType)
+ QualType getBaseType() const { return BaseType; }
+
+ bool isObjCId() const {
+ return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCId);
+ }
+ bool isObjCClass() const {
+ return getBaseType()->isSpecificBuiltinType(BuiltinType::ObjCClass);
+ }
+ bool isObjCUnqualifiedId() const { return qual_empty() && isObjCId(); }
+ bool isObjCUnqualifiedClass() const { return qual_empty() && isObjCClass(); }
+ bool isObjCUnqualifiedIdOrClass() const {
+ if (!qual_empty()) return false;
+ if (const BuiltinType *T = getBaseType()->getAs<BuiltinType>())
+ return T->getKind() == BuiltinType::ObjCId ||
+ T->getKind() == BuiltinType::ObjCClass;
+ return false;
+ }
+ bool isObjCQualifiedId() const { return !qual_empty() && isObjCId(); }
+ bool isObjCQualifiedClass() const { return !qual_empty() && isObjCClass(); }
+
+ /// Gets the interface declaration for this object type, if the base type
+ /// really is an interface.
+ ObjCInterfaceDecl *getInterface() const;
+
+ typedef ObjCProtocolDecl * const *qual_iterator;
+
+ qual_iterator qual_begin() const { return getProtocolStorage(); }
+ qual_iterator qual_end() const { return qual_begin() + getNumProtocols(); }
+
+ bool qual_empty() const { return getNumProtocols() == 0; }
+
+ /// getNumProtocols - Return the number of qualifying protocols in this
+ /// interface type, or 0 if there are none.
+ unsigned getNumProtocols() const { return ObjCObjectTypeBits.NumProtocols; }
+
+ /// \brief Fetch a protocol by index.
+ ObjCProtocolDecl *getProtocol(unsigned I) const {
+ assert(I < getNumProtocols() && "Out-of-range protocol access");
+ return qual_begin()[I];
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ObjCObject ||
+ T->getTypeClass() == ObjCInterface;
+ }
+ static bool classof(const ObjCObjectType *) { return true; }
+};
+
+/// ObjCObjectTypeImpl - A class providing a concrete implementation
+/// of ObjCObjectType, so as to not increase the footprint of
+/// ObjCInterfaceType. Code outside of ASTContext and the core type
+/// system should not reference this type.
+class ObjCObjectTypeImpl : public ObjCObjectType, public llvm::FoldingSetNode {
+ friend class ASTContext;
+
+ // If anyone adds fields here, ObjCObjectType::getProtocolStorage()
+ // will need to be modified.
+
+ ObjCObjectTypeImpl(QualType Canonical, QualType Base,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols)
+ : ObjCObjectType(Canonical, Base, Protocols, NumProtocols) {}
+
+public:
+ void Profile(llvm::FoldingSetNodeID &ID);
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ QualType Base,
+ ObjCProtocolDecl *const *protocols,
+ unsigned NumProtocols);
+};
+
+inline ObjCProtocolDecl **ObjCObjectType::getProtocolStorage() {
+ return reinterpret_cast<ObjCProtocolDecl**>(
+ static_cast<ObjCObjectTypeImpl*>(this) + 1);
+}
+
+/// ObjCInterfaceType - Interfaces are the core concept in Objective-C for
+/// object oriented design. They basically correspond to C++ classes. There
+/// are two kinds of interface types, normal interfaces like "NSString" and
+/// qualified interfaces, which are qualified with a protocol list like
+/// "NSString<NSCopyable, NSAmazing>".
+///
+/// ObjCInterfaceType guarantees the following properties when considered
+/// as a subtype of its superclass, ObjCObjectType:
+/// - There are no protocol qualifiers. To reinforce this, code which
+/// tries to invoke the protocol methods via an ObjCInterfaceType will
+/// fail to compile.
+/// - It is its own base type. That is, if T is an ObjCInterfaceType*,
+/// T->getBaseType() == QualType(T, 0).
+class ObjCInterfaceType : public ObjCObjectType {
+ mutable ObjCInterfaceDecl *Decl;
+
+ ObjCInterfaceType(const ObjCInterfaceDecl *D)
+ : ObjCObjectType(Nonce_ObjCInterface),
+ Decl(const_cast<ObjCInterfaceDecl*>(D)) {}
+ friend class ASTContext; // ASTContext creates these.
+ friend class ASTReader;
+ friend class ObjCInterfaceDecl;
+
+public:
+ /// getDecl - Get the declaration of this interface.
+ ObjCInterfaceDecl *getDecl() const { return Decl; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ObjCInterface;
+ }
+ static bool classof(const ObjCInterfaceType *) { return true; }
+
+ // Nonsense to "hide" certain members of ObjCObjectType within this
+ // class. People asking for protocols on an ObjCInterfaceType are
+ // not going to get what they want: ObjCInterfaceTypes are
+ // guaranteed to have no protocols.
+ enum {
+ qual_iterator,
+ qual_begin,
+ qual_end,
+ getNumProtocols,
+ getProtocol
+ };
+};
+
+inline ObjCInterfaceDecl *ObjCObjectType::getInterface() const {
+ if (const ObjCInterfaceType *T =
+ getBaseType()->getAs<ObjCInterfaceType>())
+ return T->getDecl();
+ return 0;
+}
+
+/// ObjCObjectPointerType - Used to represent a pointer to an
+/// Objective C object. These are constructed from pointer
+/// declarators when the pointee type is an ObjCObjectType (or sugar
+/// for one). In addition, the 'id' and 'Class' types are typedefs
+/// for these, and the protocol-qualified types 'id<P>' and 'Class<P>'
+/// are translated into these.
+///
+/// Pointers to pointers to Objective C objects are still PointerTypes;
+/// only the first level of pointer gets it own type implementation.
+class ObjCObjectPointerType : public Type, public llvm::FoldingSetNode {
+ QualType PointeeType;
+
+ ObjCObjectPointerType(QualType Canonical, QualType Pointee)
+ : Type(ObjCObjectPointer, Canonical, false, false, false, false),
+ PointeeType(Pointee) {}
+ friend class ASTContext; // ASTContext creates these.
+
+public:
+ /// getPointeeType - Gets the type pointed to by this ObjC pointer.
+ /// The result will always be an ObjCObjectType or sugar thereof.
+ QualType getPointeeType() const { return PointeeType; }
+
+ /// getObjCObjectType - Gets the type pointed to by this ObjC
+ /// pointer. This method always returns non-null.
+ ///
+ /// This method is equivalent to getPointeeType() except that
+ /// it discards any typedefs (or other sugar) between this
+ /// type and the "outermost" object type. So for:
+ /// @class A; @protocol P; @protocol Q;
+ /// typedef A<P> AP;
+ /// typedef A A1;
+ /// typedef A1<P> A1P;
+ /// typedef A1P<Q> A1PQ;
+ /// For 'A*', getObjectType() will return 'A'.
+ /// For 'A<P>*', getObjectType() will return 'A<P>'.
+ /// For 'AP*', getObjectType() will return 'A<P>'.
+ /// For 'A1*', getObjectType() will return 'A'.
+ /// For 'A1<P>*', getObjectType() will return 'A1<P>'.
+ /// For 'A1P*', getObjectType() will return 'A1<P>'.
+ /// For 'A1PQ*', getObjectType() will return 'A1<Q>', because
+ /// adding protocols to a protocol-qualified base discards the
+ /// old qualifiers (for now). But if it didn't, getObjectType()
+ /// would return 'A1P<Q>' (and we'd have to make iterating over
+ /// qualifiers more complicated).
+ const ObjCObjectType *getObjectType() const {
+ return PointeeType->castAs<ObjCObjectType>();
+ }
+
+ /// getInterfaceType - If this pointer points to an Objective C
+ /// @interface type, gets the type for that interface. Any protocol
+ /// qualifiers on the interface are ignored.
+ ///
+ /// \return null if the base type for this pointer is 'id' or 'Class'
+ const ObjCInterfaceType *getInterfaceType() const {
+ return getObjectType()->getBaseType()->getAs<ObjCInterfaceType>();
+ }
+
+ /// getInterfaceDecl - If this pointer points to an Objective @interface
+ /// type, gets the declaration for that interface.
+ ///
+ /// \return null if the base type for this pointer is 'id' or 'Class'
+ ObjCInterfaceDecl *getInterfaceDecl() const {
+ return getObjectType()->getInterface();
+ }
+
+ /// isObjCIdType - True if this is equivalent to the 'id' type, i.e. if
+ /// its object type is the primitive 'id' type with no protocols.
+ bool isObjCIdType() const {
+ return getObjectType()->isObjCUnqualifiedId();
+ }
+
+ /// isObjCClassType - True if this is equivalent to the 'Class' type,
+ /// i.e. if its object tive is the primitive 'Class' type with no protocols.
+ bool isObjCClassType() const {
+ return getObjectType()->isObjCUnqualifiedClass();
+ }
+
+ /// isObjCQualifiedIdType - True if this is equivalent to 'id<P>' for some
+ /// non-empty set of protocols.
+ bool isObjCQualifiedIdType() const {
+ return getObjectType()->isObjCQualifiedId();
+ }
+
+ /// isObjCQualifiedClassType - True if this is equivalent to 'Class<P>' for
+ /// some non-empty set of protocols.
+ bool isObjCQualifiedClassType() const {
+ return getObjectType()->isObjCQualifiedClass();
+ }
+
+ /// An iterator over the qualifiers on the object type. Provided
+ /// for convenience. This will always iterate over the full set of
+ /// protocols on a type, not just those provided directly.
+ typedef ObjCObjectType::qual_iterator qual_iterator;
+
+ qual_iterator qual_begin() const {
+ return getObjectType()->qual_begin();
+ }
+ qual_iterator qual_end() const {
+ return getObjectType()->qual_end();
+ }
+ bool qual_empty() const { return getObjectType()->qual_empty(); }
+
+ /// getNumProtocols - Return the number of qualifying protocols on
+ /// the object type.
+ unsigned getNumProtocols() const {
+ return getObjectType()->getNumProtocols();
+ }
+
+ /// \brief Retrieve a qualifying protocol by index on the object
+ /// type.
+ ObjCProtocolDecl *getProtocol(unsigned I) const {
+ return getObjectType()->getProtocol(I);
+ }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getPointeeType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
+ ID.AddPointer(T.getAsOpaquePtr());
+ }
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == ObjCObjectPointer;
+ }
+ static bool classof(const ObjCObjectPointerType *) { return true; }
+};
+
+class AtomicType : public Type, public llvm::FoldingSetNode {
+ QualType ValueType;
+
+ AtomicType(QualType ValTy, QualType Canonical)
+ : Type(Atomic, Canonical, ValTy->isDependentType(),
+ ValTy->isInstantiationDependentType(),
+ ValTy->isVariablyModifiedType(),
+ ValTy->containsUnexpandedParameterPack()),
+ ValueType(ValTy) {}
+ friend class ASTContext; // ASTContext creates these.
+
+ public:
+ /// getValueType - Gets the type contained by this atomic type, i.e.
+ /// the type returned by performing an atomic load of this atomic type.
+ QualType getValueType() const { return ValueType; }
+
+ bool isSugared() const { return false; }
+ QualType desugar() const { return QualType(this, 0); }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getValueType());
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, QualType T) {
+ ID.AddPointer(T.getAsOpaquePtr());
+ }
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == Atomic;
+ }
+ static bool classof(const AtomicType *) { return true; }
+};
+
+/// A qualifier set is used to build a set of qualifiers.
+class QualifierCollector : public Qualifiers {
+public:
+ QualifierCollector(Qualifiers Qs = Qualifiers()) : Qualifiers(Qs) {}
+
+ /// Collect any qualifiers on the given type and return an
+ /// unqualified type. The qualifiers are assumed to be consistent
+ /// with those already in the type.
+ const Type *strip(QualType type) {
+ addFastQualifiers(type.getLocalFastQualifiers());
+ if (!type.hasLocalNonFastQualifiers())
+ return type.getTypePtrUnsafe();
+
+ const ExtQuals *extQuals = type.getExtQualsUnsafe();
+ addConsistentQualifiers(extQuals->getQualifiers());
+ return extQuals->getBaseType();
+ }
+
+ /// Apply the collected qualifiers to the given type.
+ QualType apply(const ASTContext &Context, QualType QT) const;
+
+ /// Apply the collected qualifiers to the given type.
+ QualType apply(const ASTContext &Context, const Type* T) const;
+};
+
+
+// Inline function definitions.
+
+inline SplitQualType SplitQualType::getSingleStepDesugaredType() const {
+ SplitQualType desugar =
+ Ty->getLocallyUnqualifiedSingleStepDesugaredType().split();
+ desugar.Quals.addConsistentQualifiers(Quals);
+ return desugar;
+}
+
+inline const Type *QualType::getTypePtr() const {
+ return getCommonPtr()->BaseType;
+}
+
+inline const Type *QualType::getTypePtrOrNull() const {
+ return (isNull() ? 0 : getCommonPtr()->BaseType);
+}
+
+inline SplitQualType QualType::split() const {
+ if (!hasLocalNonFastQualifiers())
+ return SplitQualType(getTypePtrUnsafe(),
+ Qualifiers::fromFastMask(getLocalFastQualifiers()));
+
+ const ExtQuals *eq = getExtQualsUnsafe();
+ Qualifiers qs = eq->getQualifiers();
+ qs.addFastQualifiers(getLocalFastQualifiers());
+ return SplitQualType(eq->getBaseType(), qs);
+}
+
+inline Qualifiers QualType::getLocalQualifiers() const {
+ Qualifiers Quals;
+ if (hasLocalNonFastQualifiers())
+ Quals = getExtQualsUnsafe()->getQualifiers();
+ Quals.addFastQualifiers(getLocalFastQualifiers());
+ return Quals;
+}
+
+inline Qualifiers QualType::getQualifiers() const {
+ Qualifiers quals = getCommonPtr()->CanonicalType.getLocalQualifiers();
+ quals.addFastQualifiers(getLocalFastQualifiers());
+ return quals;
+}
+
+inline unsigned QualType::getCVRQualifiers() const {
+ unsigned cvr = getCommonPtr()->CanonicalType.getLocalCVRQualifiers();
+ cvr |= getLocalCVRQualifiers();
+ return cvr;
+}
+
+inline QualType QualType::getCanonicalType() const {
+ QualType canon = getCommonPtr()->CanonicalType;
+ return canon.withFastQualifiers(getLocalFastQualifiers());
+}
+
+inline bool QualType::isCanonical() const {
+ return getTypePtr()->isCanonicalUnqualified();
+}
+
+inline bool QualType::isCanonicalAsParam() const {
+ if (!isCanonical()) return false;
+ if (hasLocalQualifiers()) return false;
+
+ const Type *T = getTypePtr();
+ if (T->isVariablyModifiedType() && T->hasSizedVLAType())
+ return false;
+
+ return !isa<FunctionType>(T) && !isa<ArrayType>(T);
+}
+
+inline bool QualType::isConstQualified() const {
+ return isLocalConstQualified() ||
+ getCommonPtr()->CanonicalType.isLocalConstQualified();
+}
+
+inline bool QualType::isRestrictQualified() const {
+ return isLocalRestrictQualified() ||
+ getCommonPtr()->CanonicalType.isLocalRestrictQualified();
+}
+
+
+inline bool QualType::isVolatileQualified() const {
+ return isLocalVolatileQualified() ||
+ getCommonPtr()->CanonicalType.isLocalVolatileQualified();
+}
+
+inline bool QualType::hasQualifiers() const {
+ return hasLocalQualifiers() ||
+ getCommonPtr()->CanonicalType.hasLocalQualifiers();
+}
+
+inline QualType QualType::getUnqualifiedType() const {
+ if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
+ return QualType(getTypePtr(), 0);
+
+ return QualType(getSplitUnqualifiedTypeImpl(*this).Ty, 0);
+}
+
+inline SplitQualType QualType::getSplitUnqualifiedType() const {
+ if (!getTypePtr()->getCanonicalTypeInternal().hasLocalQualifiers())
+ return split();
+
+ return getSplitUnqualifiedTypeImpl(*this);
+}
+
+inline void QualType::removeLocalConst() {
+ removeLocalFastQualifiers(Qualifiers::Const);
+}
+
+inline void QualType::removeLocalRestrict() {
+ removeLocalFastQualifiers(Qualifiers::Restrict);
+}
+
+inline void QualType::removeLocalVolatile() {
+ removeLocalFastQualifiers(Qualifiers::Volatile);
+}
+
+inline void QualType::removeLocalCVRQualifiers(unsigned Mask) {
+ assert(!(Mask & ~Qualifiers::CVRMask) && "mask has non-CVR bits");
+ assert((int)Qualifiers::CVRMask == (int)Qualifiers::FastMask);
+
+ // Fast path: we don't need to touch the slow qualifiers.
+ removeLocalFastQualifiers(Mask);
+}
+
+/// getAddressSpace - Return the address space of this type.
+inline unsigned QualType::getAddressSpace() const {
+ return getQualifiers().getAddressSpace();
+}
+
+/// getObjCGCAttr - Return the gc attribute of this type.
+inline Qualifiers::GC QualType::getObjCGCAttr() const {
+ return getQualifiers().getObjCGCAttr();
+}
+
+inline FunctionType::ExtInfo getFunctionExtInfo(const Type &t) {
+ if (const PointerType *PT = t.getAs<PointerType>()) {
+ if (const FunctionType *FT = PT->getPointeeType()->getAs<FunctionType>())
+ return FT->getExtInfo();
+ } else if (const FunctionType *FT = t.getAs<FunctionType>())
+ return FT->getExtInfo();
+
+ return FunctionType::ExtInfo();
+}
+
+inline FunctionType::ExtInfo getFunctionExtInfo(QualType t) {
+ return getFunctionExtInfo(*t);
+}
+
+/// isMoreQualifiedThan - Determine whether this type is more
+/// qualified than the Other type. For example, "const volatile int"
+/// is more qualified than "const int", "volatile int", and
+/// "int". However, it is not more qualified than "const volatile
+/// int".
+inline bool QualType::isMoreQualifiedThan(QualType other) const {
+ Qualifiers myQuals = getQualifiers();
+ Qualifiers otherQuals = other.getQualifiers();
+ return (myQuals != otherQuals && myQuals.compatiblyIncludes(otherQuals));
+}
+
+/// isAtLeastAsQualifiedAs - Determine whether this type is at last
+/// as qualified as the Other type. For example, "const volatile
+/// int" is at least as qualified as "const int", "volatile int",
+/// "int", and "const volatile int".
+inline bool QualType::isAtLeastAsQualifiedAs(QualType other) const {
+ return getQualifiers().compatiblyIncludes(other.getQualifiers());
+}
+
+/// getNonReferenceType - If Type is a reference type (e.g., const
+/// int&), returns the type that the reference refers to ("const
+/// int"). Otherwise, returns the type itself. This routine is used
+/// throughout Sema to implement C++ 5p6:
+///
+/// If an expression initially has the type "reference to T" (8.3.2,
+/// 8.5.3), the type is adjusted to "T" prior to any further
+/// analysis, the expression designates the object or function
+/// denoted by the reference, and the expression is an lvalue.
+inline QualType QualType::getNonReferenceType() const {
+ if (const ReferenceType *RefType = (*this)->getAs<ReferenceType>())
+ return RefType->getPointeeType();
+ else
+ return *this;
+}
+
+inline bool QualType::isCForbiddenLValueType() const {
+ return ((getTypePtr()->isVoidType() && !hasQualifiers()) ||
+ getTypePtr()->isFunctionType());
+}
+
+/// \brief Tests whether the type is categorized as a fundamental type.
+///
+/// \returns True for types specified in C++0x [basic.fundamental].
+inline bool Type::isFundamentalType() const {
+ return isVoidType() ||
+ // FIXME: It's really annoying that we don't have an
+ // 'isArithmeticType()' which agrees with the standard definition.
+ (isArithmeticType() && !isEnumeralType());
+}
+
+/// \brief Tests whether the type is categorized as a compound type.
+///
+/// \returns True for types specified in C++0x [basic.compound].
+inline bool Type::isCompoundType() const {
+ // C++0x [basic.compound]p1:
+ // Compound types can be constructed in the following ways:
+ // -- arrays of objects of a given type [...];
+ return isArrayType() ||
+ // -- functions, which have parameters of given types [...];
+ isFunctionType() ||
+ // -- pointers to void or objects or functions [...];
+ isPointerType() ||
+ // -- references to objects or functions of a given type. [...]
+ isReferenceType() ||
+ // -- classes containing a sequence of objects of various types, [...];
+ isRecordType() ||
+ // -- unions, which are classes capable of containing objects of different
+ // types at different times;
+ isUnionType() ||
+ // -- enumerations, which comprise a set of named constant values. [...];
+ isEnumeralType() ||
+ // -- pointers to non-static class members, [...].
+ isMemberPointerType();
+}
+
+inline bool Type::isFunctionType() const {
+ return isa<FunctionType>(CanonicalType);
+}
+inline bool Type::isPointerType() const {
+ return isa<PointerType>(CanonicalType);
+}
+inline bool Type::isAnyPointerType() const {
+ return isPointerType() || isObjCObjectPointerType();
+}
+inline bool Type::isBlockPointerType() const {
+ return isa<BlockPointerType>(CanonicalType);
+}
+inline bool Type::isReferenceType() const {
+ return isa<ReferenceType>(CanonicalType);
+}
+inline bool Type::isLValueReferenceType() const {
+ return isa<LValueReferenceType>(CanonicalType);
+}
+inline bool Type::isRValueReferenceType() const {
+ return isa<RValueReferenceType>(CanonicalType);
+}
+inline bool Type::isFunctionPointerType() const {
+ if (const PointerType *T = getAs<PointerType>())
+ return T->getPointeeType()->isFunctionType();
+ else
+ return false;
+}
+inline bool Type::isMemberPointerType() const {
+ return isa<MemberPointerType>(CanonicalType);
+}
+inline bool Type::isMemberFunctionPointerType() const {
+ if (const MemberPointerType* T = getAs<MemberPointerType>())
+ return T->isMemberFunctionPointer();
+ else
+ return false;
+}
+inline bool Type::isMemberDataPointerType() const {
+ if (const MemberPointerType* T = getAs<MemberPointerType>())
+ return T->isMemberDataPointer();
+ else
+ return false;
+}
+inline bool Type::isArrayType() const {
+ return isa<ArrayType>(CanonicalType);
+}
+inline bool Type::isConstantArrayType() const {
+ return isa<ConstantArrayType>(CanonicalType);
+}
+inline bool Type::isIncompleteArrayType() const {
+ return isa<IncompleteArrayType>(CanonicalType);
+}
+inline bool Type::isVariableArrayType() const {
+ return isa<VariableArrayType>(CanonicalType);
+}
+inline bool Type::isDependentSizedArrayType() const {
+ return isa<DependentSizedArrayType>(CanonicalType);
+}
+inline bool Type::isBuiltinType() const {
+ return isa<BuiltinType>(CanonicalType);
+}
+inline bool Type::isRecordType() const {
+ return isa<RecordType>(CanonicalType);
+}
+inline bool Type::isEnumeralType() const {
+ return isa<EnumType>(CanonicalType);
+}
+inline bool Type::isAnyComplexType() const {
+ return isa<ComplexType>(CanonicalType);
+}
+inline bool Type::isVectorType() const {
+ return isa<VectorType>(CanonicalType);
+}
+inline bool Type::isExtVectorType() const {
+ return isa<ExtVectorType>(CanonicalType);
+}
+inline bool Type::isObjCObjectPointerType() const {
+ return isa<ObjCObjectPointerType>(CanonicalType);
+}
+inline bool Type::isObjCObjectType() const {
+ return isa<ObjCObjectType>(CanonicalType);
+}
+inline bool Type::isObjCObjectOrInterfaceType() const {
+ return isa<ObjCInterfaceType>(CanonicalType) ||
+ isa<ObjCObjectType>(CanonicalType);
+}
+inline bool Type::isAtomicType() const {
+ return isa<AtomicType>(CanonicalType);
+}
+
+inline bool Type::isObjCQualifiedIdType() const {
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->isObjCQualifiedIdType();
+ return false;
+}
+inline bool Type::isObjCQualifiedClassType() const {
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->isObjCQualifiedClassType();
+ return false;
+}
+inline bool Type::isObjCIdType() const {
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->isObjCIdType();
+ return false;
+}
+inline bool Type::isObjCClassType() const {
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->isObjCClassType();
+ return false;
+}
+inline bool Type::isObjCSelType() const {
+ if (const PointerType *OPT = getAs<PointerType>())
+ return OPT->getPointeeType()->isSpecificBuiltinType(BuiltinType::ObjCSel);
+ return false;
+}
+inline bool Type::isObjCBuiltinType() const {
+ return isObjCIdType() || isObjCClassType() || isObjCSelType();
+}
+inline bool Type::isTemplateTypeParmType() const {
+ return isa<TemplateTypeParmType>(CanonicalType);
+}
+
+inline bool Type::isSpecificBuiltinType(unsigned K) const {
+ if (const BuiltinType *BT = getAs<BuiltinType>())
+ if (BT->getKind() == (BuiltinType::Kind) K)
+ return true;
+ return false;
+}
+
+inline bool Type::isPlaceholderType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(this))
+ return BT->isPlaceholderType();
+ return false;
+}
+
+inline const BuiltinType *Type::getAsPlaceholderType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(this))
+ if (BT->isPlaceholderType())
+ return BT;
+ return 0;
+}
+
+inline bool Type::isSpecificPlaceholderType(unsigned K) const {
+ assert(BuiltinType::isPlaceholderTypeKind((BuiltinType::Kind) K));
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(this))
+ return (BT->getKind() == (BuiltinType::Kind) K);
+ return false;
+}
+
+inline bool Type::isNonOverloadPlaceholderType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(this))
+ return BT->isNonOverloadPlaceholderType();
+ return false;
+}
+
+inline bool Type::isVoidType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Void;
+ return false;
+}
+
+inline bool Type::isHalfType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Half;
+ // FIXME: Should we allow complex __fp16? Probably not.
+ return false;
+}
+
+inline bool Type::isNullPtrType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>())
+ return BT->getKind() == BuiltinType::NullPtr;
+ return false;
+}
+
+extern bool IsEnumDeclComplete(EnumDecl *);
+extern bool IsEnumDeclScoped(EnumDecl *);
+
+inline bool Type::isIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ return IsEnumDeclComplete(ET->getDecl()) &&
+ !IsEnumDeclScoped(ET->getDecl());
+ }
+ return false;
+}
+
+inline bool Type::isScalarType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() > BuiltinType::Void &&
+ BT->getKind() <= BuiltinType::NullPtr;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ // Enums are scalar types, but only if they are defined. Incomplete enums
+ // are not treated as scalar types.
+ return IsEnumDeclComplete(ET->getDecl());
+ return isa<PointerType>(CanonicalType) ||
+ isa<BlockPointerType>(CanonicalType) ||
+ isa<MemberPointerType>(CanonicalType) ||
+ isa<ComplexType>(CanonicalType) ||
+ isa<ObjCObjectPointerType>(CanonicalType);
+}
+
+inline bool Type::isIntegralOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ // Check for a complete enum type; incomplete enum types are not properly an
+ // enumeration type in the sense required here.
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return IsEnumDeclComplete(ET->getDecl());
+
+ return false;
+}
+
+inline bool Type::isBooleanType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Bool;
+ return false;
+}
+
+/// \brief Determines whether this is a type for which one can define
+/// an overloaded operator.
+inline bool Type::isOverloadableType() const {
+ return isDependentType() || isRecordType() || isEnumeralType();
+}
+
+/// \brief Determines whether this type can decay to a pointer type.
+inline bool Type::canDecayToPointerType() const {
+ return isFunctionType() || isArrayType();
+}
+
+inline bool Type::hasPointerRepresentation() const {
+ return (isPointerType() || isReferenceType() || isBlockPointerType() ||
+ isObjCObjectPointerType() || isNullPtrType());
+}
+
+inline bool Type::hasObjCPointerRepresentation() const {
+ return isObjCObjectPointerType();
+}
+
+inline const Type *Type::getBaseElementTypeUnsafe() const {
+ const Type *type = this;
+ while (const ArrayType *arrayType = type->getAsArrayTypeUnsafe())
+ type = arrayType->getElementType().getTypePtr();
+ return type;
+}
+
+/// Insertion operator for diagnostics. This allows sending QualType's into a
+/// diagnostic with <<.
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ QualType T) {
+ DB.AddTaggedVal(reinterpret_cast<intptr_t>(T.getAsOpaquePtr()),
+ DiagnosticsEngine::ak_qualtype);
+ return DB;
+}
+
+/// Insertion operator for partial diagnostics. This allows sending QualType's
+/// into a diagnostic with <<.
+inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ QualType T) {
+ PD.AddTaggedVal(reinterpret_cast<intptr_t>(T.getAsOpaquePtr()),
+ DiagnosticsEngine::ak_qualtype);
+ return PD;
+}
+
+// Helper class template that is used by Type::getAs to ensure that one does
+// not try to look through a qualified type to get to an array type.
+template<typename T,
+ bool isArrayType = (llvm::is_same<T, ArrayType>::value ||
+ llvm::is_base_of<ArrayType, T>::value)>
+struct ArrayType_cannot_be_used_with_getAs { };
+
+template<typename T>
+struct ArrayType_cannot_be_used_with_getAs<T, true>;
+
+/// Member-template getAs<specific type>'.
+template <typename T> const T *Type::getAs() const {
+ ArrayType_cannot_be_used_with_getAs<T> at;
+ (void)at;
+
+ // If this is directly a T type, return it.
+ if (const T *Ty = dyn_cast<T>(this))
+ return Ty;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<T>(CanonicalType))
+ return 0;
+
+ // If this is a typedef for the type, strip the typedef off without
+ // losing all typedef information.
+ return cast<T>(getUnqualifiedDesugaredType());
+}
+
+inline const ArrayType *Type::getAsArrayTypeUnsafe() const {
+ // If this is directly an array type, return it.
+ if (const ArrayType *arr = dyn_cast<ArrayType>(this))
+ return arr;
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ArrayType>(CanonicalType))
+ return 0;
+
+ // If this is a typedef for the type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ArrayType>(getUnqualifiedDesugaredType());
+}
+
+template <typename T> const T *Type::castAs() const {
+ ArrayType_cannot_be_used_with_getAs<T> at;
+ (void) at;
+
+ assert(isa<T>(CanonicalType));
+ if (const T *ty = dyn_cast<T>(this)) return ty;
+ return cast<T>(getUnqualifiedDesugaredType());
+}
+
+inline const ArrayType *Type::castAsArrayTypeUnsafe() const {
+ assert(isa<ArrayType>(CanonicalType));
+ if (const ArrayType *arr = dyn_cast<ArrayType>(this)) return arr;
+ return cast<ArrayType>(getUnqualifiedDesugaredType());
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
new file mode 100644
index 0000000..aab87be
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLoc.h
@@ -0,0 +1,1817 @@
+//===--- TypeLoc.h - Type Source Info Wrapper -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeLoc interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_TYPELOC_H
+#define LLVM_CLANG_AST_TYPELOC_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+ class ASTContext;
+ class ParmVarDecl;
+ class TypeSourceInfo;
+ class UnqualTypeLoc;
+
+// Predeclare all the type nodes.
+#define ABSTRACT_TYPELOC(Class, Base)
+#define TYPELOC(Class, Base) \
+ class Class##TypeLoc;
+#include "clang/AST/TypeLocNodes.def"
+
+/// \brief Base wrapper for a particular "section" of type source info.
+///
+/// A client should use the TypeLoc subclasses through cast/dyn_cast in order to
+/// get at the actual information.
+class TypeLoc {
+protected:
+ // The correctness of this relies on the property that, for Type *Ty,
+ // QualType(Ty, 0).getAsOpaquePtr() == (void*) Ty
+ const void *Ty;
+ void *Data;
+
+public:
+ /// The kinds of TypeLocs. Equivalent to the Type::TypeClass enum,
+ /// except it also defines a Qualified enum that corresponds to the
+ /// QualifiedLoc class.
+ enum TypeLocClass {
+#define ABSTRACT_TYPE(Class, Base)
+#define TYPE(Class, Base) \
+ Class = Type::Class,
+#include "clang/AST/TypeNodes.def"
+ Qualified
+ };
+
+ TypeLoc() : Ty(0), Data(0) { }
+ TypeLoc(QualType ty, void *opaqueData)
+ : Ty(ty.getAsOpaquePtr()), Data(opaqueData) { }
+ TypeLoc(const Type *ty, void *opaqueData)
+ : Ty(ty), Data(opaqueData) { }
+
+ TypeLocClass getTypeLocClass() const {
+ if (getType().hasLocalQualifiers()) return Qualified;
+ return (TypeLocClass) getType()->getTypeClass();
+ }
+
+ bool isNull() const { return !Ty; }
+ operator bool() const { return Ty; }
+
+ /// \brief Returns the size of type source info data block for the given type.
+ static unsigned getFullDataSizeForType(QualType Ty);
+
+ /// \brief Get the type for which this source info wrapper provides
+ /// information.
+ QualType getType() const {
+ return QualType::getFromOpaquePtr(Ty);
+ }
+
+ const Type *getTypePtr() const {
+ return QualType::getFromOpaquePtr(Ty).getTypePtr();
+ }
+
+ /// \brief Get the pointer where source information is stored.
+ void *getOpaqueData() const {
+ return Data;
+ }
+
+ /// \brief Get the begin source location.
+ SourceLocation getBeginLoc() const;
+
+ /// \brief Get the end source location.
+ SourceLocation getEndLoc() const;
+
+ /// \brief Get the full source range.
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(getBeginLoc(), getEndLoc());
+ }
+ SourceLocation getLocStart() const LLVM_READONLY { return getBeginLoc(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); }
+
+ /// \brief Get the local source range.
+ SourceRange getLocalSourceRange() const {
+ return getLocalSourceRangeImpl(*this);
+ }
+
+ /// \brief Returns the size of the type source info data block.
+ unsigned getFullDataSize() const {
+ return getFullDataSizeForType(getType());
+ }
+
+ /// \brief Get the next TypeLoc pointed by this TypeLoc, e.g for "int*" the
+ /// TypeLoc is a PointerLoc and next TypeLoc is for "int".
+ TypeLoc getNextTypeLoc() const {
+ return getNextTypeLocImpl(*this);
+ }
+
+ /// \brief Skips past any qualifiers, if this is qualified.
+ UnqualTypeLoc getUnqualifiedLoc() const; // implemented in this header
+
+ TypeLoc IgnoreParens() const {
+ if (isa<ParenTypeLoc>(this))
+ return IgnoreParensImpl(*this);
+ return *this;
+ }
+
+ /// \brief Initializes this to state that every location in this
+ /// type is the given location.
+ ///
+ /// This method exists to provide a simple transition for code that
+ /// relies on location-less types.
+ void initialize(ASTContext &Context, SourceLocation Loc) const {
+ initializeImpl(Context, *this, Loc);
+ }
+
+ /// \brief Initializes this by copying its information from another
+ /// TypeLoc of the same type.
+ void initializeFullCopy(TypeLoc Other) const {
+ assert(getType() == Other.getType());
+ size_t Size = getFullDataSize();
+ memcpy(getOpaqueData(), Other.getOpaqueData(), Size);
+ }
+
+ /// \brief Initializes this by copying its information from another
+ /// TypeLoc of the same type. The given size must be the full data
+ /// size.
+ void initializeFullCopy(TypeLoc Other, unsigned Size) const {
+ assert(getType() == Other.getType());
+ assert(getFullDataSize() == Size);
+ memcpy(getOpaqueData(), Other.getOpaqueData(), Size);
+ }
+
+ friend bool operator==(const TypeLoc &LHS, const TypeLoc &RHS) {
+ return LHS.Ty == RHS.Ty && LHS.Data == RHS.Data;
+ }
+
+ friend bool operator!=(const TypeLoc &LHS, const TypeLoc &RHS) {
+ return !(LHS == RHS);
+ }
+
+ static bool classof(const TypeLoc *TL) { return true; }
+
+private:
+ static void initializeImpl(ASTContext &Context, TypeLoc TL,
+ SourceLocation Loc);
+ static TypeLoc getNextTypeLocImpl(TypeLoc TL);
+ static TypeLoc IgnoreParensImpl(TypeLoc TL);
+ static SourceRange getLocalSourceRangeImpl(TypeLoc TL);
+};
+
+/// \brief Return the TypeLoc for a type source info.
+inline TypeLoc TypeSourceInfo::getTypeLoc() const {
+ return TypeLoc(Ty, const_cast<void*>(static_cast<const void*>(this + 1)));
+}
+
+/// \brief Wrapper of type source information for a type with
+/// no direct qualifiers.
+class UnqualTypeLoc : public TypeLoc {
+public:
+ UnqualTypeLoc() {}
+ UnqualTypeLoc(const Type *Ty, void *Data) : TypeLoc(Ty, Data) {}
+
+ const Type *getTypePtr() const {
+ return reinterpret_cast<const Type*>(Ty);
+ }
+
+ TypeLocClass getTypeLocClass() const {
+ return (TypeLocClass) getTypePtr()->getTypeClass();
+ }
+
+ static bool classof(const TypeLoc *TL) {
+ return !TL->getType().hasLocalQualifiers();
+ }
+ static bool classof(const UnqualTypeLoc *TL) { return true; }
+};
+
+/// \brief Wrapper of type source information for a type with
+/// non-trivial direct qualifiers.
+///
+/// Currently, we intentionally do not provide source location for
+/// type qualifiers.
+class QualifiedTypeLoc : public TypeLoc {
+public:
+ SourceRange getLocalSourceRange() const {
+ return SourceRange();
+ }
+
+ UnqualTypeLoc getUnqualifiedLoc() const {
+ return UnqualTypeLoc(getTypePtr(), Data);
+ }
+
+ /// Initializes the local data of this type source info block to
+ /// provide no information.
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ // do nothing
+ }
+
+ TypeLoc getNextTypeLoc() const {
+ return getUnqualifiedLoc();
+ }
+
+ /// \brief Returns the size of the type source info data block that is
+ /// specific to this type.
+ unsigned getLocalDataSize() const {
+ // In fact, we don't currently preserve any location information
+ // for qualifiers.
+ return 0;
+ }
+
+ /// \brief Returns the size of the type source info data block.
+ unsigned getFullDataSize() const {
+ return getLocalDataSize() +
+ getFullDataSizeForType(getType().getLocalUnqualifiedType());
+ }
+
+ static bool classof(const TypeLoc *TL) {
+ return TL->getType().hasLocalQualifiers();
+ }
+ static bool classof(const QualifiedTypeLoc *TL) { return true; }
+};
+
+inline UnqualTypeLoc TypeLoc::getUnqualifiedLoc() const {
+ if (isa<QualifiedTypeLoc>(this))
+ return cast<QualifiedTypeLoc>(this)->getUnqualifiedLoc();
+ return cast<UnqualTypeLoc>(*this);
+}
+
+/// A metaprogramming base class for TypeLoc classes which correspond
+/// to a particular Type subclass. It is accepted for a single
+/// TypeLoc class to correspond to multiple Type classes.
+///
+/// \param Base a class from which to derive
+/// \param Derived the class deriving from this one
+/// \param TypeClass the concrete Type subclass associated with this
+/// location type
+/// \param LocalData the structure type of local location data for
+/// this type
+///
+/// sizeof(LocalData) needs to be a multiple of sizeof(void*) or
+/// else the world will end.
+///
+/// TypeLocs with non-constant amounts of local data should override
+/// getExtraLocalDataSize(); getExtraLocalData() will then point to
+/// this extra memory.
+///
+/// TypeLocs with an inner type should define
+/// QualType getInnerType() const
+/// and getInnerTypeLoc() will then point to this inner type's
+/// location data.
+///
+/// A word about hierarchies: this template is not designed to be
+/// derived from multiple times in a hierarchy. It is also not
+/// designed to be used for classes where subtypes might provide
+/// different amounts of source information. It should be subclassed
+/// only at the deepest portion of the hierarchy where all children
+/// have identical source information; if that's an abstract type,
+/// then further descendents should inherit from
+/// InheritingConcreteTypeLoc instead.
+template <class Base, class Derived, class TypeClass, class LocalData>
+class ConcreteTypeLoc : public Base {
+
+ const Derived *asDerived() const {
+ return static_cast<const Derived*>(this);
+ }
+
+public:
+ unsigned getLocalDataSize() const {
+ return sizeof(LocalData) + asDerived()->getExtraLocalDataSize();
+ }
+ // Give a default implementation that's useful for leaf types.
+ unsigned getFullDataSize() const {
+ return asDerived()->getLocalDataSize() + getInnerTypeSize();
+ }
+
+ static bool classofType(const Type *Ty) {
+ return TypeClass::classof(Ty);
+ }
+
+ static bool classof(const TypeLoc *TL) {
+ return Derived::classofType(TL->getTypePtr());
+ }
+ static bool classof(const UnqualTypeLoc *TL) {
+ return Derived::classofType(TL->getTypePtr());
+ }
+ static bool classof(const Derived *TL) {
+ return true;
+ }
+
+ TypeLoc getNextTypeLoc() const {
+ return getNextTypeLoc(asDerived()->getInnerType());
+ }
+
+ const TypeClass *getTypePtr() const {
+ return cast<TypeClass>(Base::getTypePtr());
+ }
+
+protected:
+ unsigned getExtraLocalDataSize() const {
+ return 0;
+ }
+
+ LocalData *getLocalData() const {
+ return static_cast<LocalData*>(Base::Data);
+ }
+
+ /// Gets a pointer past the Info structure; useful for classes with
+ /// local data that can't be captured in the Info (e.g. because it's
+ /// of variable size).
+ void *getExtraLocalData() const {
+ return getLocalData() + 1;
+ }
+
+ void *getNonLocalData() const {
+ return static_cast<char*>(Base::Data) + asDerived()->getLocalDataSize();
+ }
+
+ struct HasNoInnerType {};
+ HasNoInnerType getInnerType() const { return HasNoInnerType(); }
+
+ TypeLoc getInnerTypeLoc() const {
+ return TypeLoc(asDerived()->getInnerType(), getNonLocalData());
+ }
+
+private:
+ unsigned getInnerTypeSize() const {
+ return getInnerTypeSize(asDerived()->getInnerType());
+ }
+
+ unsigned getInnerTypeSize(HasNoInnerType _) const {
+ return 0;
+ }
+
+ unsigned getInnerTypeSize(QualType _) const {
+ return getInnerTypeLoc().getFullDataSize();
+ }
+
+ TypeLoc getNextTypeLoc(HasNoInnerType _) const {
+ return TypeLoc();
+ }
+
+ TypeLoc getNextTypeLoc(QualType T) const {
+ return TypeLoc(T, getNonLocalData());
+ }
+};
+
+/// A metaprogramming class designed for concrete subtypes of abstract
+/// types where all subtypes share equivalently-structured source
+/// information. See the note on ConcreteTypeLoc.
+template <class Base, class Derived, class TypeClass>
+class InheritingConcreteTypeLoc : public Base {
+public:
+ static bool classofType(const Type *Ty) {
+ return TypeClass::classof(Ty);
+ }
+
+ static bool classof(const TypeLoc *TL) {
+ return Derived::classofType(TL->getTypePtr());
+ }
+ static bool classof(const UnqualTypeLoc *TL) {
+ return Derived::classofType(TL->getTypePtr());
+ }
+ static bool classof(const Derived *TL) {
+ return true;
+ }
+
+ const TypeClass *getTypePtr() const {
+ return cast<TypeClass>(Base::getTypePtr());
+ }
+};
+
+
+struct TypeSpecLocInfo {
+ SourceLocation NameLoc;
+};
+
+/// \brief A reasonable base class for TypeLocs that correspond to
+/// types that are written as a type-specifier.
+class TypeSpecTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ TypeSpecTypeLoc,
+ Type,
+ TypeSpecLocInfo> {
+public:
+ enum { LocalDataSize = sizeof(TypeSpecLocInfo) };
+
+ SourceLocation getNameLoc() const {
+ return this->getLocalData()->NameLoc;
+ }
+ void setNameLoc(SourceLocation Loc) {
+ this->getLocalData()->NameLoc = Loc;
+ }
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getNameLoc(), getNameLoc());
+ }
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setNameLoc(Loc);
+ }
+
+ static bool classof(const TypeLoc *TL);
+ static bool classof(const TypeSpecTypeLoc *TL) { return true; }
+};
+
+
+struct BuiltinLocInfo {
+ SourceLocation BuiltinLoc;
+};
+
+/// \brief Wrapper for source info for builtin types.
+class BuiltinTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ BuiltinTypeLoc,
+ BuiltinType,
+ BuiltinLocInfo> {
+public:
+ enum { LocalDataSize = sizeof(BuiltinLocInfo) };
+
+ SourceLocation getBuiltinLoc() const {
+ return getLocalData()->BuiltinLoc;
+ }
+ void setBuiltinLoc(SourceLocation Loc) {
+ getLocalData()->BuiltinLoc = Loc;
+ }
+
+ SourceLocation getNameLoc() const { return getBuiltinLoc(); }
+
+ WrittenBuiltinSpecs& getWrittenBuiltinSpecs() {
+ return *(static_cast<WrittenBuiltinSpecs*>(getExtraLocalData()));
+ }
+ const WrittenBuiltinSpecs& getWrittenBuiltinSpecs() const {
+ return *(static_cast<WrittenBuiltinSpecs*>(getExtraLocalData()));
+ }
+
+ bool needsExtraLocalData() const {
+ BuiltinType::Kind bk = getTypePtr()->getKind();
+ return (bk >= BuiltinType::UShort && bk <= BuiltinType::UInt128)
+ || (bk >= BuiltinType::Short && bk <= BuiltinType::LongDouble)
+ || bk == BuiltinType::UChar
+ || bk == BuiltinType::SChar;
+ }
+
+ unsigned getExtraLocalDataSize() const {
+ return needsExtraLocalData() ? sizeof(WrittenBuiltinSpecs) : 0;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getBuiltinLoc(), getBuiltinLoc());
+ }
+
+ TypeSpecifierSign getWrittenSignSpec() const {
+ if (needsExtraLocalData())
+ return static_cast<TypeSpecifierSign>(getWrittenBuiltinSpecs().Sign);
+ else
+ return TSS_unspecified;
+ }
+ bool hasWrittenSignSpec() const {
+ return getWrittenSignSpec() != TSS_unspecified;
+ }
+ void setWrittenSignSpec(TypeSpecifierSign written) {
+ if (needsExtraLocalData())
+ getWrittenBuiltinSpecs().Sign = written;
+ }
+
+ TypeSpecifierWidth getWrittenWidthSpec() const {
+ if (needsExtraLocalData())
+ return static_cast<TypeSpecifierWidth>(getWrittenBuiltinSpecs().Width);
+ else
+ return TSW_unspecified;
+ }
+ bool hasWrittenWidthSpec() const {
+ return getWrittenWidthSpec() != TSW_unspecified;
+ }
+ void setWrittenWidthSpec(TypeSpecifierWidth written) {
+ if (needsExtraLocalData())
+ getWrittenBuiltinSpecs().Width = written;
+ }
+
+ TypeSpecifierType getWrittenTypeSpec() const;
+ bool hasWrittenTypeSpec() const {
+ return getWrittenTypeSpec() != TST_unspecified;
+ }
+ void setWrittenTypeSpec(TypeSpecifierType written) {
+ if (needsExtraLocalData())
+ getWrittenBuiltinSpecs().Type = written;
+ }
+
+ bool hasModeAttr() const {
+ if (needsExtraLocalData())
+ return getWrittenBuiltinSpecs().ModeAttr;
+ else
+ return false;
+ }
+ void setModeAttr(bool written) {
+ if (needsExtraLocalData())
+ getWrittenBuiltinSpecs().ModeAttr = written;
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setBuiltinLoc(Loc);
+ if (needsExtraLocalData()) {
+ WrittenBuiltinSpecs &wbs = getWrittenBuiltinSpecs();
+ wbs.Sign = TSS_unspecified;
+ wbs.Width = TSW_unspecified;
+ wbs.Type = TST_unspecified;
+ wbs.ModeAttr = false;
+ }
+ }
+};
+
+
+/// \brief Wrapper for source info for typedefs.
+class TypedefTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ TypedefTypeLoc,
+ TypedefType> {
+public:
+ TypedefNameDecl *getTypedefNameDecl() const {
+ return getTypePtr()->getDecl();
+ }
+};
+
+/// \brief Wrapper for source info for injected class names of class
+/// templates.
+class InjectedClassNameTypeLoc :
+ public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ InjectedClassNameTypeLoc,
+ InjectedClassNameType> {
+public:
+ CXXRecordDecl *getDecl() const {
+ return getTypePtr()->getDecl();
+ }
+};
+
+/// \brief Wrapper for source info for unresolved typename using decls.
+class UnresolvedUsingTypeLoc :
+ public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ UnresolvedUsingTypeLoc,
+ UnresolvedUsingType> {
+public:
+ UnresolvedUsingTypenameDecl *getDecl() const {
+ return getTypePtr()->getDecl();
+ }
+};
+
+/// \brief Wrapper for source info for tag types. Note that this only
+/// records source info for the name itself; a type written 'struct foo'
+/// should be represented as an ElaboratedTypeLoc. We currently
+/// only do that when C++ is enabled because of the expense of
+/// creating an ElaboratedType node for so many type references in C.
+class TagTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ TagTypeLoc,
+ TagType> {
+public:
+ TagDecl *getDecl() const { return getTypePtr()->getDecl(); }
+
+ /// \brief True if the tag was defined in this type specifier.
+ bool isDefinition() const {
+ TagDecl *D = getDecl();
+ return D->isCompleteDefinition() &&
+ (D->getIdentifier() == 0 || D->getLocation() == getNameLoc());
+ }
+};
+
+/// \brief Wrapper for source info for record types.
+class RecordTypeLoc : public InheritingConcreteTypeLoc<TagTypeLoc,
+ RecordTypeLoc,
+ RecordType> {
+public:
+ RecordDecl *getDecl() const { return getTypePtr()->getDecl(); }
+};
+
+/// \brief Wrapper for source info for enum types.
+class EnumTypeLoc : public InheritingConcreteTypeLoc<TagTypeLoc,
+ EnumTypeLoc,
+ EnumType> {
+public:
+ EnumDecl *getDecl() const { return getTypePtr()->getDecl(); }
+};
+
+/// \brief Wrapper for template type parameters.
+class TemplateTypeParmTypeLoc :
+ public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ TemplateTypeParmTypeLoc,
+ TemplateTypeParmType> {
+public:
+ TemplateTypeParmDecl *getDecl() const { return getTypePtr()->getDecl(); }
+};
+
+/// \brief Wrapper for substituted template type parameters.
+class SubstTemplateTypeParmTypeLoc :
+ public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ SubstTemplateTypeParmTypeLoc,
+ SubstTemplateTypeParmType> {
+};
+
+ /// \brief Wrapper for substituted template type parameters.
+class SubstTemplateTypeParmPackTypeLoc :
+ public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ SubstTemplateTypeParmPackTypeLoc,
+ SubstTemplateTypeParmPackType> {
+};
+
+struct AttributedLocInfo {
+ union {
+ Expr *ExprOperand;
+
+ /// A raw SourceLocation.
+ unsigned EnumOperandLoc;
+ };
+
+ SourceRange OperandParens;
+
+ SourceLocation AttrLoc;
+};
+
+/// \brief Type source information for an attributed type.
+class AttributedTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ AttributedTypeLoc,
+ AttributedType,
+ AttributedLocInfo> {
+public:
+ AttributedType::Kind getAttrKind() const {
+ return getTypePtr()->getAttrKind();
+ }
+
+ bool hasAttrExprOperand() const {
+ return (getAttrKind() >= AttributedType::FirstExprOperandKind &&
+ getAttrKind() <= AttributedType::LastExprOperandKind);
+ }
+
+ bool hasAttrEnumOperand() const {
+ return (getAttrKind() >= AttributedType::FirstEnumOperandKind &&
+ getAttrKind() <= AttributedType::LastEnumOperandKind);
+ }
+
+ bool hasAttrOperand() const {
+ return hasAttrExprOperand() || hasAttrEnumOperand();
+ }
+
+ /// The modified type, which is generally canonically different from
+ /// the attribute type.
+ /// int main(int, char**) __attribute__((noreturn))
+ /// ~~~ ~~~~~~~~~~~~~
+ TypeLoc getModifiedLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ /// The location of the attribute name, i.e.
+ /// __attribute__((regparm(1000)))
+ /// ^~~~~~~
+ SourceLocation getAttrNameLoc() const {
+ return getLocalData()->AttrLoc;
+ }
+ void setAttrNameLoc(SourceLocation loc) {
+ getLocalData()->AttrLoc = loc;
+ }
+
+ /// The attribute's expression operand, if it has one.
+ /// void *cur_thread __attribute__((address_space(21)))
+ /// ^~
+ Expr *getAttrExprOperand() const {
+ assert(hasAttrExprOperand());
+ return getLocalData()->ExprOperand;
+ }
+ void setAttrExprOperand(Expr *e) {
+ assert(hasAttrExprOperand());
+ getLocalData()->ExprOperand = e;
+ }
+
+ /// The location of the attribute's enumerated operand, if it has one.
+ /// void * __attribute__((objc_gc(weak)))
+ /// ^~~~
+ SourceLocation getAttrEnumOperandLoc() const {
+ assert(hasAttrEnumOperand());
+ return SourceLocation::getFromRawEncoding(getLocalData()->EnumOperandLoc);
+ }
+ void setAttrEnumOperandLoc(SourceLocation loc) {
+ assert(hasAttrEnumOperand());
+ getLocalData()->EnumOperandLoc = loc.getRawEncoding();
+ }
+
+ /// The location of the parentheses around the operand, if there is
+ /// an operand.
+ /// void * __attribute__((objc_gc(weak)))
+ /// ^ ^
+ SourceRange getAttrOperandParensRange() const {
+ assert(hasAttrOperand());
+ return getLocalData()->OperandParens;
+ }
+ void setAttrOperandParensRange(SourceRange range) {
+ assert(hasAttrOperand());
+ getLocalData()->OperandParens = range;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ // Note that this does *not* include the range of the attribute
+ // enclosure, e.g.:
+ // __attribute__((foo(bar)))
+ // ^~~~~~~~~~~~~~~ ~~
+ // or
+ // [[foo(bar)]]
+ // ^~ ~~
+ // That enclosure doesn't necessarily belong to a single attribute
+ // anyway.
+ SourceRange range(getAttrNameLoc());
+ if (hasAttrOperand())
+ range.setEnd(getAttrOperandParensRange().getEnd());
+ return range;
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation loc) {
+ setAttrNameLoc(loc);
+ if (hasAttrExprOperand()) {
+ setAttrOperandParensRange(SourceRange(loc));
+ setAttrExprOperand(0);
+ } else if (hasAttrEnumOperand()) {
+ setAttrOperandParensRange(SourceRange(loc));
+ setAttrEnumOperandLoc(loc);
+ }
+ }
+
+ QualType getInnerType() const {
+ return getTypePtr()->getModifiedType();
+ }
+};
+
+
+struct ObjCProtocolListLocInfo {
+ SourceLocation LAngleLoc;
+ SourceLocation RAngleLoc;
+ bool HasBaseTypeAsWritten;
+};
+
+// A helper class for defining ObjC TypeLocs that can qualified with
+// protocols.
+//
+// TypeClass basically has to be either ObjCInterfaceType or
+// ObjCObjectPointerType.
+class ObjCObjectTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ ObjCObjectTypeLoc,
+ ObjCObjectType,
+ ObjCProtocolListLocInfo> {
+ // SourceLocations are stored after Info, one for each Protocol.
+ SourceLocation *getProtocolLocArray() const {
+ return (SourceLocation*) this->getExtraLocalData();
+ }
+
+public:
+ SourceLocation getLAngleLoc() const {
+ return this->getLocalData()->LAngleLoc;
+ }
+ void setLAngleLoc(SourceLocation Loc) {
+ this->getLocalData()->LAngleLoc = Loc;
+ }
+
+ SourceLocation getRAngleLoc() const {
+ return this->getLocalData()->RAngleLoc;
+ }
+ void setRAngleLoc(SourceLocation Loc) {
+ this->getLocalData()->RAngleLoc = Loc;
+ }
+
+ unsigned getNumProtocols() const {
+ return this->getTypePtr()->getNumProtocols();
+ }
+
+ SourceLocation getProtocolLoc(unsigned i) const {
+ assert(i < getNumProtocols() && "Index is out of bounds!");
+ return getProtocolLocArray()[i];
+ }
+ void setProtocolLoc(unsigned i, SourceLocation Loc) {
+ assert(i < getNumProtocols() && "Index is out of bounds!");
+ getProtocolLocArray()[i] = Loc;
+ }
+
+ ObjCProtocolDecl *getProtocol(unsigned i) const {
+ assert(i < getNumProtocols() && "Index is out of bounds!");
+ return *(this->getTypePtr()->qual_begin() + i);
+ }
+
+ bool hasBaseTypeAsWritten() const {
+ return getLocalData()->HasBaseTypeAsWritten;
+ }
+
+ void setHasBaseTypeAsWritten(bool HasBaseType) {
+ getLocalData()->HasBaseTypeAsWritten = HasBaseType;
+ }
+
+ TypeLoc getBaseLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getLAngleLoc(), getRAngleLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setHasBaseTypeAsWritten(true);
+ setLAngleLoc(Loc);
+ setRAngleLoc(Loc);
+ for (unsigned i = 0, e = getNumProtocols(); i != e; ++i)
+ setProtocolLoc(i, Loc);
+ }
+
+ unsigned getExtraLocalDataSize() const {
+ return this->getNumProtocols() * sizeof(SourceLocation);
+ }
+
+ QualType getInnerType() const {
+ return getTypePtr()->getBaseType();
+ }
+};
+
+
+struct ObjCInterfaceLocInfo {
+ SourceLocation NameLoc;
+};
+
+/// \brief Wrapper for source info for ObjC interfaces.
+class ObjCInterfaceTypeLoc : public ConcreteTypeLoc<ObjCObjectTypeLoc,
+ ObjCInterfaceTypeLoc,
+ ObjCInterfaceType,
+ ObjCInterfaceLocInfo> {
+public:
+ ObjCInterfaceDecl *getIFaceDecl() const {
+ return getTypePtr()->getDecl();
+ }
+
+ SourceLocation getNameLoc() const {
+ return getLocalData()->NameLoc;
+ }
+
+ void setNameLoc(SourceLocation Loc) {
+ getLocalData()->NameLoc = Loc;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getNameLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setNameLoc(Loc);
+ }
+};
+
+struct ParenLocInfo {
+ SourceLocation LParenLoc;
+ SourceLocation RParenLoc;
+};
+
+class ParenTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, ParenTypeLoc, ParenType,
+ ParenLocInfo> {
+public:
+ SourceLocation getLParenLoc() const {
+ return this->getLocalData()->LParenLoc;
+ }
+ SourceLocation getRParenLoc() const {
+ return this->getLocalData()->RParenLoc;
+ }
+ void setLParenLoc(SourceLocation Loc) {
+ this->getLocalData()->LParenLoc = Loc;
+ }
+ void setRParenLoc(SourceLocation Loc) {
+ this->getLocalData()->RParenLoc = Loc;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getLParenLoc(), getRParenLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setLParenLoc(Loc);
+ setRParenLoc(Loc);
+ }
+
+ TypeLoc getInnerLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ QualType getInnerType() const {
+ return this->getTypePtr()->getInnerType();
+ }
+};
+
+
+struct PointerLikeLocInfo {
+ SourceLocation StarLoc;
+};
+
+/// A base class for
+template <class Derived, class TypeClass, class LocalData = PointerLikeLocInfo>
+class PointerLikeTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc, Derived,
+ TypeClass, LocalData> {
+public:
+ SourceLocation getSigilLoc() const {
+ return this->getLocalData()->StarLoc;
+ }
+ void setSigilLoc(SourceLocation Loc) {
+ this->getLocalData()->StarLoc = Loc;
+ }
+
+ TypeLoc getPointeeLoc() const {
+ return this->getInnerTypeLoc();
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getSigilLoc(), getSigilLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setSigilLoc(Loc);
+ }
+
+ QualType getInnerType() const {
+ return this->getTypePtr()->getPointeeType();
+ }
+};
+
+
+/// \brief Wrapper for source info for pointers.
+class PointerTypeLoc : public PointerLikeTypeLoc<PointerTypeLoc,
+ PointerType> {
+public:
+ SourceLocation getStarLoc() const {
+ return getSigilLoc();
+ }
+ void setStarLoc(SourceLocation Loc) {
+ setSigilLoc(Loc);
+ }
+};
+
+
+/// \brief Wrapper for source info for block pointers.
+class BlockPointerTypeLoc : public PointerLikeTypeLoc<BlockPointerTypeLoc,
+ BlockPointerType> {
+public:
+ SourceLocation getCaretLoc() const {
+ return getSigilLoc();
+ }
+ void setCaretLoc(SourceLocation Loc) {
+ setSigilLoc(Loc);
+ }
+};
+
+struct MemberPointerLocInfo : public PointerLikeLocInfo {
+ TypeSourceInfo *ClassTInfo;
+};
+
+/// \brief Wrapper for source info for member pointers.
+class MemberPointerTypeLoc : public PointerLikeTypeLoc<MemberPointerTypeLoc,
+ MemberPointerType,
+ MemberPointerLocInfo> {
+public:
+ SourceLocation getStarLoc() const {
+ return getSigilLoc();
+ }
+ void setStarLoc(SourceLocation Loc) {
+ setSigilLoc(Loc);
+ }
+
+ const Type *getClass() const {
+ return getTypePtr()->getClass();
+ }
+ TypeSourceInfo *getClassTInfo() const {
+ return getLocalData()->ClassTInfo;
+ }
+ void setClassTInfo(TypeSourceInfo* TI) {
+ getLocalData()->ClassTInfo = TI;
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setSigilLoc(Loc);
+ setClassTInfo(0);
+ }
+
+ SourceRange getLocalSourceRange() const {
+ if (TypeSourceInfo *TI = getClassTInfo())
+ return SourceRange(TI->getTypeLoc().getBeginLoc(), getStarLoc());
+ else
+ return SourceRange(getStarLoc());
+ }
+};
+
+/// Wraps an ObjCPointerType with source location information.
+class ObjCObjectPointerTypeLoc :
+ public PointerLikeTypeLoc<ObjCObjectPointerTypeLoc,
+ ObjCObjectPointerType> {
+public:
+ SourceLocation getStarLoc() const {
+ return getSigilLoc();
+ }
+
+ void setStarLoc(SourceLocation Loc) {
+ setSigilLoc(Loc);
+ }
+};
+
+
+class ReferenceTypeLoc : public PointerLikeTypeLoc<ReferenceTypeLoc,
+ ReferenceType> {
+public:
+ QualType getInnerType() const {
+ return getTypePtr()->getPointeeTypeAsWritten();
+ }
+};
+
+class LValueReferenceTypeLoc :
+ public InheritingConcreteTypeLoc<ReferenceTypeLoc,
+ LValueReferenceTypeLoc,
+ LValueReferenceType> {
+public:
+ SourceLocation getAmpLoc() const {
+ return getSigilLoc();
+ }
+ void setAmpLoc(SourceLocation Loc) {
+ setSigilLoc(Loc);
+ }
+};
+
+class RValueReferenceTypeLoc :
+ public InheritingConcreteTypeLoc<ReferenceTypeLoc,
+ RValueReferenceTypeLoc,
+ RValueReferenceType> {
+public:
+ SourceLocation getAmpAmpLoc() const {
+ return getSigilLoc();
+ }
+ void setAmpAmpLoc(SourceLocation Loc) {
+ setSigilLoc(Loc);
+ }
+};
+
+
+struct FunctionLocInfo {
+ SourceLocation LocalRangeBegin;
+ SourceLocation LocalRangeEnd;
+ bool TrailingReturn;
+};
+
+/// \brief Wrapper for source info for functions.
+class FunctionTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ FunctionTypeLoc,
+ FunctionType,
+ FunctionLocInfo> {
+public:
+ SourceLocation getLocalRangeBegin() const {
+ return getLocalData()->LocalRangeBegin;
+ }
+ void setLocalRangeBegin(SourceLocation L) {
+ getLocalData()->LocalRangeBegin = L;
+ }
+
+ SourceLocation getLocalRangeEnd() const {
+ return getLocalData()->LocalRangeEnd;
+ }
+ void setLocalRangeEnd(SourceLocation L) {
+ getLocalData()->LocalRangeEnd = L;
+ }
+
+ bool getTrailingReturn() const {
+ return getLocalData()->TrailingReturn;
+ }
+ void setTrailingReturn(bool Trailing) {
+ getLocalData()->TrailingReturn = Trailing;
+ }
+
+ ArrayRef<ParmVarDecl *> getParams() const {
+ return ArrayRef<ParmVarDecl *>(getParmArray(), getNumArgs());
+ }
+
+ // ParmVarDecls* are stored after Info, one for each argument.
+ ParmVarDecl **getParmArray() const {
+ return (ParmVarDecl**) getExtraLocalData();
+ }
+
+ unsigned getNumArgs() const {
+ if (isa<FunctionNoProtoType>(getTypePtr()))
+ return 0;
+ return cast<FunctionProtoType>(getTypePtr())->getNumArgs();
+ }
+ ParmVarDecl *getArg(unsigned i) const { return getParmArray()[i]; }
+ void setArg(unsigned i, ParmVarDecl *VD) { getParmArray()[i] = VD; }
+
+ TypeLoc getResultLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getLocalRangeBegin(), getLocalRangeEnd());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setLocalRangeBegin(Loc);
+ setLocalRangeEnd(Loc);
+ setTrailingReturn(false);
+ for (unsigned i = 0, e = getNumArgs(); i != e; ++i)
+ setArg(i, NULL);
+ }
+
+ /// \brief Returns the size of the type source info data block that is
+ /// specific to this type.
+ unsigned getExtraLocalDataSize() const {
+ return getNumArgs() * sizeof(ParmVarDecl*);
+ }
+
+ QualType getInnerType() const { return getTypePtr()->getResultType(); }
+};
+
+class FunctionProtoTypeLoc :
+ public InheritingConcreteTypeLoc<FunctionTypeLoc,
+ FunctionProtoTypeLoc,
+ FunctionProtoType> {
+};
+
+class FunctionNoProtoTypeLoc :
+ public InheritingConcreteTypeLoc<FunctionTypeLoc,
+ FunctionNoProtoTypeLoc,
+ FunctionNoProtoType> {
+};
+
+
+struct ArrayLocInfo {
+ SourceLocation LBracketLoc, RBracketLoc;
+ Expr *Size;
+};
+
+/// \brief Wrapper for source info for arrays.
+class ArrayTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ ArrayTypeLoc,
+ ArrayType,
+ ArrayLocInfo> {
+public:
+ SourceLocation getLBracketLoc() const {
+ return getLocalData()->LBracketLoc;
+ }
+ void setLBracketLoc(SourceLocation Loc) {
+ getLocalData()->LBracketLoc = Loc;
+ }
+
+ SourceLocation getRBracketLoc() const {
+ return getLocalData()->RBracketLoc;
+ }
+ void setRBracketLoc(SourceLocation Loc) {
+ getLocalData()->RBracketLoc = Loc;
+ }
+
+ SourceRange getBracketsRange() const {
+ return SourceRange(getLBracketLoc(), getRBracketLoc());
+ }
+
+ Expr *getSizeExpr() const {
+ return getLocalData()->Size;
+ }
+ void setSizeExpr(Expr *Size) {
+ getLocalData()->Size = Size;
+ }
+
+ TypeLoc getElementLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getLBracketLoc(), getRBracketLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setLBracketLoc(Loc);
+ setRBracketLoc(Loc);
+ setSizeExpr(NULL);
+ }
+
+ QualType getInnerType() const { return getTypePtr()->getElementType(); }
+};
+
+class ConstantArrayTypeLoc :
+ public InheritingConcreteTypeLoc<ArrayTypeLoc,
+ ConstantArrayTypeLoc,
+ ConstantArrayType> {
+};
+
+class IncompleteArrayTypeLoc :
+ public InheritingConcreteTypeLoc<ArrayTypeLoc,
+ IncompleteArrayTypeLoc,
+ IncompleteArrayType> {
+};
+
+class DependentSizedArrayTypeLoc :
+ public InheritingConcreteTypeLoc<ArrayTypeLoc,
+ DependentSizedArrayTypeLoc,
+ DependentSizedArrayType> {
+
+};
+
+class VariableArrayTypeLoc :
+ public InheritingConcreteTypeLoc<ArrayTypeLoc,
+ VariableArrayTypeLoc,
+ VariableArrayType> {
+};
+
+
+// Location information for a TemplateName. Rudimentary for now.
+struct TemplateNameLocInfo {
+ SourceLocation NameLoc;
+};
+
+struct TemplateSpecializationLocInfo : TemplateNameLocInfo {
+ SourceLocation TemplateKWLoc;
+ SourceLocation LAngleLoc;
+ SourceLocation RAngleLoc;
+};
+
+class TemplateSpecializationTypeLoc :
+ public ConcreteTypeLoc<UnqualTypeLoc,
+ TemplateSpecializationTypeLoc,
+ TemplateSpecializationType,
+ TemplateSpecializationLocInfo> {
+public:
+ SourceLocation getTemplateKeywordLoc() const {
+ return getLocalData()->TemplateKWLoc;
+ }
+ void setTemplateKeywordLoc(SourceLocation Loc) {
+ getLocalData()->TemplateKWLoc = Loc;
+ }
+
+ SourceLocation getLAngleLoc() const {
+ return getLocalData()->LAngleLoc;
+ }
+ void setLAngleLoc(SourceLocation Loc) {
+ getLocalData()->LAngleLoc = Loc;
+ }
+
+ SourceLocation getRAngleLoc() const {
+ return getLocalData()->RAngleLoc;
+ }
+ void setRAngleLoc(SourceLocation Loc) {
+ getLocalData()->RAngleLoc = Loc;
+ }
+
+ unsigned getNumArgs() const {
+ return getTypePtr()->getNumArgs();
+ }
+ void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
+ getArgInfos()[i] = AI;
+ }
+ TemplateArgumentLocInfo getArgLocInfo(unsigned i) const {
+ return getArgInfos()[i];
+ }
+
+ TemplateArgumentLoc getArgLoc(unsigned i) const {
+ return TemplateArgumentLoc(getTypePtr()->getArg(i), getArgLocInfo(i));
+ }
+
+ SourceLocation getTemplateNameLoc() const {
+ return getLocalData()->NameLoc;
+ }
+ void setTemplateNameLoc(SourceLocation Loc) {
+ getLocalData()->NameLoc = Loc;
+ }
+
+ /// \brief - Copy the location information from the given info.
+ void copy(TemplateSpecializationTypeLoc Loc) {
+ unsigned size = getFullDataSize();
+ assert(size == Loc.getFullDataSize());
+
+ // We're potentially copying Expr references here. We don't
+ // bother retaining them because TypeSourceInfos live forever, so
+ // as long as the Expr was retained when originally written into
+ // the TypeLoc, we're okay.
+ memcpy(Data, Loc.Data, size);
+ }
+
+ SourceRange getLocalSourceRange() const {
+ if (getTemplateKeywordLoc().isValid())
+ return SourceRange(getTemplateKeywordLoc(), getRAngleLoc());
+ else
+ return SourceRange(getTemplateNameLoc(), getRAngleLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setTemplateKeywordLoc(Loc);
+ setTemplateNameLoc(Loc);
+ setLAngleLoc(Loc);
+ setRAngleLoc(Loc);
+ initializeArgLocs(Context, getNumArgs(), getTypePtr()->getArgs(),
+ getArgInfos(), Loc);
+ }
+
+ static void initializeArgLocs(ASTContext &Context, unsigned NumArgs,
+ const TemplateArgument *Args,
+ TemplateArgumentLocInfo *ArgInfos,
+ SourceLocation Loc);
+
+ unsigned getExtraLocalDataSize() const {
+ return getNumArgs() * sizeof(TemplateArgumentLocInfo);
+ }
+
+private:
+ TemplateArgumentLocInfo *getArgInfos() const {
+ return static_cast<TemplateArgumentLocInfo*>(getExtraLocalData());
+ }
+};
+
+//===----------------------------------------------------------------------===//
+//
+// All of these need proper implementations.
+//
+//===----------------------------------------------------------------------===//
+
+// FIXME: size expression and attribute locations (or keyword if we
+// ever fully support altivec syntax).
+class VectorTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ VectorTypeLoc,
+ VectorType> {
+};
+
+// FIXME: size expression and attribute locations.
+class ExtVectorTypeLoc : public InheritingConcreteTypeLoc<VectorTypeLoc,
+ ExtVectorTypeLoc,
+ ExtVectorType> {
+};
+
+// FIXME: attribute locations.
+// For some reason, this isn't a subtype of VectorType.
+class DependentSizedExtVectorTypeLoc :
+ public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ DependentSizedExtVectorTypeLoc,
+ DependentSizedExtVectorType> {
+};
+
+// FIXME: location of the '_Complex' keyword.
+class ComplexTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ ComplexTypeLoc,
+ ComplexType> {
+};
+
+struct TypeofLocInfo {
+ SourceLocation TypeofLoc;
+ SourceLocation LParenLoc;
+ SourceLocation RParenLoc;
+};
+
+struct TypeOfExprTypeLocInfo : public TypeofLocInfo {
+};
+
+struct TypeOfTypeLocInfo : public TypeofLocInfo {
+ TypeSourceInfo* UnderlyingTInfo;
+};
+
+template <class Derived, class TypeClass, class LocalData = TypeofLocInfo>
+class TypeofLikeTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, Derived, TypeClass, LocalData> {
+public:
+ SourceLocation getTypeofLoc() const {
+ return this->getLocalData()->TypeofLoc;
+ }
+ void setTypeofLoc(SourceLocation Loc) {
+ this->getLocalData()->TypeofLoc = Loc;
+ }
+
+ SourceLocation getLParenLoc() const {
+ return this->getLocalData()->LParenLoc;
+ }
+ void setLParenLoc(SourceLocation Loc) {
+ this->getLocalData()->LParenLoc = Loc;
+ }
+
+ SourceLocation getRParenLoc() const {
+ return this->getLocalData()->RParenLoc;
+ }
+ void setRParenLoc(SourceLocation Loc) {
+ this->getLocalData()->RParenLoc = Loc;
+ }
+
+ SourceRange getParensRange() const {
+ return SourceRange(getLParenLoc(), getRParenLoc());
+ }
+ void setParensRange(SourceRange range) {
+ setLParenLoc(range.getBegin());
+ setRParenLoc(range.getEnd());
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getTypeofLoc(), getRParenLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setTypeofLoc(Loc);
+ setLParenLoc(Loc);
+ setRParenLoc(Loc);
+ }
+};
+
+class TypeOfExprTypeLoc : public TypeofLikeTypeLoc<TypeOfExprTypeLoc,
+ TypeOfExprType,
+ TypeOfExprTypeLocInfo> {
+public:
+ Expr* getUnderlyingExpr() const {
+ return getTypePtr()->getUnderlyingExpr();
+ }
+ // Reimplemented to account for GNU/C++ extension
+ // typeof unary-expression
+ // where there are no parentheses.
+ SourceRange getLocalSourceRange() const;
+};
+
+class TypeOfTypeLoc
+ : public TypeofLikeTypeLoc<TypeOfTypeLoc, TypeOfType, TypeOfTypeLocInfo> {
+public:
+ QualType getUnderlyingType() const {
+ return this->getTypePtr()->getUnderlyingType();
+ }
+ TypeSourceInfo* getUnderlyingTInfo() const {
+ return this->getLocalData()->UnderlyingTInfo;
+ }
+ void setUnderlyingTInfo(TypeSourceInfo* TI) const {
+ this->getLocalData()->UnderlyingTInfo = TI;
+ }
+};
+
+// FIXME: location of the 'decltype' and parens.
+class DecltypeTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ DecltypeTypeLoc,
+ DecltypeType> {
+public:
+ Expr *getUnderlyingExpr() const { return getTypePtr()->getUnderlyingExpr(); }
+};
+
+struct UnaryTransformTypeLocInfo {
+ // FIXME: While there's only one unary transform right now, future ones may
+ // need different representations
+ SourceLocation KWLoc, LParenLoc, RParenLoc;
+ TypeSourceInfo *UnderlyingTInfo;
+};
+
+class UnaryTransformTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ UnaryTransformTypeLoc,
+ UnaryTransformType,
+ UnaryTransformTypeLocInfo> {
+public:
+ SourceLocation getKWLoc() const { return getLocalData()->KWLoc; }
+ void setKWLoc(SourceLocation Loc) { getLocalData()->KWLoc = Loc; }
+
+ SourceLocation getLParenLoc() const { return getLocalData()->LParenLoc; }
+ void setLParenLoc(SourceLocation Loc) { getLocalData()->LParenLoc = Loc; }
+
+ SourceLocation getRParenLoc() const { return getLocalData()->RParenLoc; }
+ void setRParenLoc(SourceLocation Loc) { getLocalData()->RParenLoc = Loc; }
+
+ TypeSourceInfo* getUnderlyingTInfo() const {
+ return getLocalData()->UnderlyingTInfo;
+ }
+ void setUnderlyingTInfo(TypeSourceInfo *TInfo) {
+ getLocalData()->UnderlyingTInfo = TInfo;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getKWLoc(), getRParenLoc());
+ }
+
+ SourceRange getParensRange() const {
+ return SourceRange(getLParenLoc(), getRParenLoc());
+ }
+ void setParensRange(SourceRange Range) {
+ setLParenLoc(Range.getBegin());
+ setRParenLoc(Range.getEnd());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setKWLoc(Loc);
+ setRParenLoc(Loc);
+ setLParenLoc(Loc);
+ }
+};
+
+class AutoTypeLoc : public InheritingConcreteTypeLoc<TypeSpecTypeLoc,
+ AutoTypeLoc,
+ AutoType> {
+};
+
+struct ElaboratedLocInfo {
+ SourceLocation ElaboratedKWLoc;
+ /// \brief Data associated with the nested-name-specifier location.
+ void *QualifierData;
+};
+
+class ElaboratedTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ ElaboratedTypeLoc,
+ ElaboratedType,
+ ElaboratedLocInfo> {
+public:
+ SourceLocation getElaboratedKeywordLoc() const {
+ return this->getLocalData()->ElaboratedKWLoc;
+ }
+ void setElaboratedKeywordLoc(SourceLocation Loc) {
+ this->getLocalData()->ElaboratedKWLoc = Loc;
+ }
+
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
+ getLocalData()->QualifierData);
+ }
+
+ void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
+ assert(QualifierLoc.getNestedNameSpecifier()
+ == getTypePtr()->getQualifier() &&
+ "Inconsistent nested-name-specifier pointer");
+ getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
+ }
+
+ SourceRange getLocalSourceRange() const {
+ if (getElaboratedKeywordLoc().isValid())
+ if (getQualifierLoc())
+ return SourceRange(getElaboratedKeywordLoc(),
+ getQualifierLoc().getEndLoc());
+ else
+ return SourceRange(getElaboratedKeywordLoc());
+ else
+ return getQualifierLoc().getSourceRange();
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc);
+
+ TypeLoc getNamedTypeLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ QualType getInnerType() const {
+ return getTypePtr()->getNamedType();
+ }
+
+ void copy(ElaboratedTypeLoc Loc) {
+ unsigned size = getFullDataSize();
+ assert(size == Loc.getFullDataSize());
+ memcpy(Data, Loc.Data, size);
+ }
+};
+
+// This is exactly the structure of an ElaboratedTypeLoc whose inner
+// type is some sort of TypeDeclTypeLoc.
+struct DependentNameLocInfo : ElaboratedLocInfo {
+ SourceLocation NameLoc;
+};
+
+class DependentNameTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc,
+ DependentNameTypeLoc,
+ DependentNameType,
+ DependentNameLocInfo> {
+public:
+ SourceLocation getElaboratedKeywordLoc() const {
+ return this->getLocalData()->ElaboratedKWLoc;
+ }
+ void setElaboratedKeywordLoc(SourceLocation Loc) {
+ this->getLocalData()->ElaboratedKWLoc = Loc;
+ }
+
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
+ getLocalData()->QualifierData);
+ }
+
+ void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
+ assert(QualifierLoc.getNestedNameSpecifier()
+ == getTypePtr()->getQualifier() &&
+ "Inconsistent nested-name-specifier pointer");
+ getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
+ }
+
+ SourceLocation getNameLoc() const {
+ return this->getLocalData()->NameLoc;
+ }
+ void setNameLoc(SourceLocation Loc) {
+ this->getLocalData()->NameLoc = Loc;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ if (getElaboratedKeywordLoc().isValid())
+ return SourceRange(getElaboratedKeywordLoc(), getNameLoc());
+ else
+ return SourceRange(getQualifierLoc().getBeginLoc(), getNameLoc());
+ }
+
+ void copy(DependentNameTypeLoc Loc) {
+ unsigned size = getFullDataSize();
+ assert(size == Loc.getFullDataSize());
+ memcpy(Data, Loc.Data, size);
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc);
+};
+
+struct DependentTemplateSpecializationLocInfo : DependentNameLocInfo {
+ SourceLocation TemplateKWLoc;
+ SourceLocation LAngleLoc;
+ SourceLocation RAngleLoc;
+ // followed by a TemplateArgumentLocInfo[]
+};
+
+class DependentTemplateSpecializationTypeLoc :
+ public ConcreteTypeLoc<UnqualTypeLoc,
+ DependentTemplateSpecializationTypeLoc,
+ DependentTemplateSpecializationType,
+ DependentTemplateSpecializationLocInfo> {
+public:
+ SourceLocation getElaboratedKeywordLoc() const {
+ return this->getLocalData()->ElaboratedKWLoc;
+ }
+ void setElaboratedKeywordLoc(SourceLocation Loc) {
+ this->getLocalData()->ElaboratedKWLoc = Loc;
+ }
+
+ NestedNameSpecifierLoc getQualifierLoc() const {
+ if (!getLocalData()->QualifierData)
+ return NestedNameSpecifierLoc();
+
+ return NestedNameSpecifierLoc(getTypePtr()->getQualifier(),
+ getLocalData()->QualifierData);
+ }
+
+ void setQualifierLoc(NestedNameSpecifierLoc QualifierLoc) {
+ if (!QualifierLoc) {
+ // Even if we have a nested-name-specifier in the dependent
+ // template specialization type, we won't record the nested-name-specifier
+ // location information when this type-source location information is
+ // part of a nested-name-specifier.
+ getLocalData()->QualifierData = 0;
+ return;
+ }
+
+ assert(QualifierLoc.getNestedNameSpecifier()
+ == getTypePtr()->getQualifier() &&
+ "Inconsistent nested-name-specifier pointer");
+ getLocalData()->QualifierData = QualifierLoc.getOpaqueData();
+ }
+
+ SourceLocation getTemplateKeywordLoc() const {
+ return getLocalData()->TemplateKWLoc;
+ }
+ void setTemplateKeywordLoc(SourceLocation Loc) {
+ getLocalData()->TemplateKWLoc = Loc;
+ }
+
+ SourceLocation getTemplateNameLoc() const {
+ return this->getLocalData()->NameLoc;
+ }
+ void setTemplateNameLoc(SourceLocation Loc) {
+ this->getLocalData()->NameLoc = Loc;
+ }
+
+ SourceLocation getLAngleLoc() const {
+ return this->getLocalData()->LAngleLoc;
+ }
+ void setLAngleLoc(SourceLocation Loc) {
+ this->getLocalData()->LAngleLoc = Loc;
+ }
+
+ SourceLocation getRAngleLoc() const {
+ return this->getLocalData()->RAngleLoc;
+ }
+ void setRAngleLoc(SourceLocation Loc) {
+ this->getLocalData()->RAngleLoc = Loc;
+ }
+
+ unsigned getNumArgs() const {
+ return getTypePtr()->getNumArgs();
+ }
+
+ void setArgLocInfo(unsigned i, TemplateArgumentLocInfo AI) {
+ getArgInfos()[i] = AI;
+ }
+ TemplateArgumentLocInfo getArgLocInfo(unsigned i) const {
+ return getArgInfos()[i];
+ }
+
+ TemplateArgumentLoc getArgLoc(unsigned i) const {
+ return TemplateArgumentLoc(getTypePtr()->getArg(i), getArgLocInfo(i));
+ }
+
+ SourceRange getLocalSourceRange() const {
+ if (getElaboratedKeywordLoc().isValid())
+ return SourceRange(getElaboratedKeywordLoc(), getRAngleLoc());
+ else if (getQualifierLoc())
+ return SourceRange(getQualifierLoc().getBeginLoc(), getRAngleLoc());
+ else if (getTemplateKeywordLoc().isValid())
+ return SourceRange(getTemplateKeywordLoc(), getRAngleLoc());
+ else
+ return SourceRange(getTemplateNameLoc(), getRAngleLoc());
+ }
+
+ void copy(DependentTemplateSpecializationTypeLoc Loc) {
+ unsigned size = getFullDataSize();
+ assert(size == Loc.getFullDataSize());
+ memcpy(Data, Loc.Data, size);
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc);
+
+ unsigned getExtraLocalDataSize() const {
+ return getNumArgs() * sizeof(TemplateArgumentLocInfo);
+ }
+
+private:
+ TemplateArgumentLocInfo *getArgInfos() const {
+ return static_cast<TemplateArgumentLocInfo*>(getExtraLocalData());
+ }
+};
+
+
+struct PackExpansionTypeLocInfo {
+ SourceLocation EllipsisLoc;
+};
+
+class PackExpansionTypeLoc
+ : public ConcreteTypeLoc<UnqualTypeLoc, PackExpansionTypeLoc,
+ PackExpansionType, PackExpansionTypeLocInfo> {
+public:
+ SourceLocation getEllipsisLoc() const {
+ return this->getLocalData()->EllipsisLoc;
+ }
+
+ void setEllipsisLoc(SourceLocation Loc) {
+ this->getLocalData()->EllipsisLoc = Loc;
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getEllipsisLoc(), getEllipsisLoc());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setEllipsisLoc(Loc);
+ }
+
+ TypeLoc getPatternLoc() const {
+ return getInnerTypeLoc();
+ }
+
+ QualType getInnerType() const {
+ return this->getTypePtr()->getPattern();
+ }
+};
+
+struct AtomicTypeLocInfo {
+ SourceLocation KWLoc, LParenLoc, RParenLoc;
+};
+
+class AtomicTypeLoc : public ConcreteTypeLoc<UnqualTypeLoc, AtomicTypeLoc,
+ AtomicType, AtomicTypeLocInfo> {
+public:
+ TypeLoc getValueLoc() const {
+ return this->getInnerTypeLoc();
+ }
+
+ SourceRange getLocalSourceRange() const {
+ return SourceRange(getKWLoc(), getRParenLoc());
+ }
+
+ SourceLocation getKWLoc() const {
+ return this->getLocalData()->KWLoc;
+ }
+ void setKWLoc(SourceLocation Loc) {
+ this->getLocalData()->KWLoc = Loc;
+ }
+
+ SourceLocation getLParenLoc() const {
+ return this->getLocalData()->LParenLoc;
+ }
+ void setLParenLoc(SourceLocation Loc) {
+ this->getLocalData()->LParenLoc = Loc;
+ }
+
+ SourceLocation getRParenLoc() const {
+ return this->getLocalData()->RParenLoc;
+ }
+ void setRParenLoc(SourceLocation Loc) {
+ this->getLocalData()->RParenLoc = Loc;
+ }
+
+ SourceRange getParensRange() const {
+ return SourceRange(getLParenLoc(), getRParenLoc());
+ }
+ void setParensRange(SourceRange Range) {
+ setLParenLoc(Range.getBegin());
+ setRParenLoc(Range.getEnd());
+ }
+
+ void initializeLocal(ASTContext &Context, SourceLocation Loc) {
+ setKWLoc(Loc);
+ setLParenLoc(Loc);
+ setRParenLoc(Loc);
+ }
+
+ QualType getInnerType() const {
+ return this->getTypePtr()->getValueType();
+ }
+};
+
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLocNodes.def b/contrib/llvm/tools/clang/include/clang/AST/TypeLocNodes.def
new file mode 100644
index 0000000..4590e48
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLocNodes.def
@@ -0,0 +1,41 @@
+//===-- TypeLocNodes.def - Metadata about TypeLoc wrappers ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeLoc info database. Each node is
+// enumerated by providing its core name (e.g., "Pointer" for "PointerTypeLoc")
+// and base class (e.g., "DeclaratorLoc"). All nodes except QualifiedTypeLoc
+// are associated
+//
+// TYPELOC(Class, Base) - A TypeLoc subclass. If UNQUAL_TYPELOC is
+// provided, there will be exactly one of these, Qualified.
+//
+// UNQUAL_TYPELOC(Class, Base, Type) - An UnqualTypeLoc subclass.
+//
+// ABSTRACT_TYPELOC(Class) - Refers to TypeSpecLoc and DeclaratorLoc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef UNQUAL_TYPELOC
+# define UNQUAL_TYPELOC(Class, Base) TYPELOC(Class, Base)
+#endif
+
+#ifndef ABSTRACT_TYPELOC
+# define ABSTRACT_TYPELOC(Class, Base) UNQUAL_TYPELOC(Class, Base)
+#endif
+
+TYPELOC(Qualified, TypeLoc)
+#define TYPE(Class, Base) UNQUAL_TYPELOC(Class, Base##Loc)
+#define ABSTRACT_TYPE(Class, Base) ABSTRACT_TYPELOC(Class, Base##Loc)
+#include "clang/AST/TypeNodes.def"
+
+#undef DECLARATOR_TYPELOC
+#undef TYPESPEC_TYPELOC
+#undef ABSTRACT_TYPELOC
+#undef UNQUAL_TYPELOC
+#undef TYPELOC
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeLocVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/TypeLocVisitor.h
new file mode 100644
index 0000000..50fc439
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeLocVisitor.h
@@ -0,0 +1,62 @@
+//===--- TypeLocVisitor.h - Visitor for TypeLoc subclasses ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeLocVisitor interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_AST_TYPELOCVISITOR_H
+#define LLVM_CLANG_AST_TYPELOCVISITOR_H
+
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeVisitor.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+
+#define DISPATCH(CLASSNAME) \
+ return static_cast<ImplClass*>(this)-> \
+ Visit##CLASSNAME(cast<CLASSNAME>(TyLoc))
+
+template<typename ImplClass, typename RetTy=void>
+class TypeLocVisitor {
+public:
+ RetTy Visit(TypeLoc TyLoc) {
+ switch (TyLoc.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: DISPATCH(CLASS##TypeLoc);
+#include "clang/AST/TypeLocNodes.def"
+ }
+ llvm_unreachable("unexpected type loc class!");
+ }
+
+ RetTy Visit(UnqualTypeLoc TyLoc) {
+ switch (TyLoc.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: DISPATCH(CLASS##TypeLoc);
+#include "clang/AST/TypeLocNodes.def"
+ }
+ llvm_unreachable("unexpected type loc class!");
+ }
+
+#define TYPELOC(CLASS, PARENT) \
+ RetTy Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ DISPATCH(PARENT); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+
+ RetTy VisitTypeLoc(TypeLoc TyLoc) { return RetTy(); }
+};
+
+#undef DISPATCH
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_AST_TYPELOCVISITOR_H
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def b/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def
new file mode 100644
index 0000000..d5c485f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeNodes.def
@@ -0,0 +1,127 @@
+//===-- TypeNodes.def - Metadata about Type AST nodes -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AST type info database. Each type node is
+// enumerated by providing its name (e.g., "Builtin" or "Enum") and
+// base class (e.g., "Type" or "TagType"). Depending on where in the
+// abstract syntax tree the type will show up, the enumeration uses
+// one of four different macros:
+//
+// TYPE(Class, Base) - A type that can show up anywhere in the AST,
+// and might be dependent, canonical, or non-canonical. All clients
+// will need to understand these types.
+//
+// ABSTRACT_TYPE(Class, Base) - An abstract class that shows up in
+// the type hierarchy but has no concrete instances.
+//
+// NON_CANONICAL_TYPE(Class, Base) - A type that can show up
+// anywhere in the AST but will never be a part of a canonical
+// type. Clients that only need to deal with canonical types
+// (ignoring, e.g., typedefs and other type alises used for
+// pretty-printing) can ignore these types.
+//
+// DEPENDENT_TYPE(Class, Base) - A type that will only show up
+// within a C++ template that has not been instantiated, e.g., a
+// type that is always dependent. Clients that do not need to deal
+// with uninstantiated C++ templates can ignore these types.
+//
+// NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) - A type that
+// is non-canonical unless it is dependent. Defaults to TYPE because
+// it is neither reliably dependent nor reliably non-canonical.
+//
+// There is a sixth macro, independent of the others. Most clients
+// will not need to use it.
+//
+// LEAF_TYPE(Class) - A type that never has inner types. Clients
+// which can operate on such types more efficiently may wish to do so.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ABSTRACT_TYPE
+# define ABSTRACT_TYPE(Class, Base) TYPE(Class, Base)
+#endif
+
+#ifndef NON_CANONICAL_TYPE
+# define NON_CANONICAL_TYPE(Class, Base) TYPE(Class, Base)
+#endif
+
+#ifndef DEPENDENT_TYPE
+# define DEPENDENT_TYPE(Class, Base) TYPE(Class, Base)
+#endif
+
+#ifndef NON_CANONICAL_UNLESS_DEPENDENT_TYPE
+# define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) TYPE(Class, Base)
+#endif
+
+TYPE(Builtin, Type)
+TYPE(Complex, Type)
+TYPE(Pointer, Type)
+TYPE(BlockPointer, Type)
+ABSTRACT_TYPE(Reference, Type)
+TYPE(LValueReference, ReferenceType)
+TYPE(RValueReference, ReferenceType)
+TYPE(MemberPointer, Type)
+ABSTRACT_TYPE(Array, Type)
+TYPE(ConstantArray, ArrayType)
+TYPE(IncompleteArray, ArrayType)
+TYPE(VariableArray, ArrayType)
+DEPENDENT_TYPE(DependentSizedArray, ArrayType)
+DEPENDENT_TYPE(DependentSizedExtVector, Type)
+TYPE(Vector, Type)
+TYPE(ExtVector, VectorType)
+ABSTRACT_TYPE(Function, Type)
+TYPE(FunctionProto, FunctionType)
+TYPE(FunctionNoProto, FunctionType)
+DEPENDENT_TYPE(UnresolvedUsing, Type)
+NON_CANONICAL_TYPE(Paren, Type)
+NON_CANONICAL_TYPE(Typedef, Type)
+NON_CANONICAL_UNLESS_DEPENDENT_TYPE(TypeOfExpr, Type)
+NON_CANONICAL_UNLESS_DEPENDENT_TYPE(TypeOf, Type)
+NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Decltype, Type)
+NON_CANONICAL_UNLESS_DEPENDENT_TYPE(UnaryTransform, Type)
+ABSTRACT_TYPE(Tag, Type)
+TYPE(Record, TagType)
+TYPE(Enum, TagType)
+NON_CANONICAL_TYPE(Elaborated, Type)
+NON_CANONICAL_TYPE(Attributed, Type)
+DEPENDENT_TYPE(TemplateTypeParm, Type)
+NON_CANONICAL_TYPE(SubstTemplateTypeParm, Type)
+DEPENDENT_TYPE(SubstTemplateTypeParmPack, Type)
+NON_CANONICAL_UNLESS_DEPENDENT_TYPE(TemplateSpecialization, Type)
+NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Auto, Type)
+DEPENDENT_TYPE(InjectedClassName, Type)
+DEPENDENT_TYPE(DependentName, Type)
+DEPENDENT_TYPE(DependentTemplateSpecialization, Type)
+DEPENDENT_TYPE(PackExpansion, Type)
+TYPE(ObjCObject, Type)
+TYPE(ObjCInterface, ObjCObjectType)
+TYPE(ObjCObjectPointer, Type)
+TYPE(Atomic, Type)
+
+#ifdef LAST_TYPE
+LAST_TYPE(Atomic)
+#undef LAST_TYPE
+#endif
+
+// These types are always leaves in the type hierarchy.
+#ifdef LEAF_TYPE
+LEAF_TYPE(Enum)
+LEAF_TYPE(Builtin)
+LEAF_TYPE(Record)
+LEAF_TYPE(InjectedClassName)
+LEAF_TYPE(ObjCInterface)
+LEAF_TYPE(TemplateTypeParm)
+#undef LEAF_TYPE
+#endif
+
+#undef NON_CANONICAL_UNLESS_DEPENDENT_TYPE
+#undef DEPENDENT_TYPE
+#undef NON_CANONICAL_TYPE
+#undef ABSTRACT_TYPE
+#undef TYPE
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeOrdering.h b/contrib/llvm/tools/clang/include/clang/AST/TypeOrdering.h
new file mode 100644
index 0000000..7cf0d5e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeOrdering.h
@@ -0,0 +1,77 @@
+//===-------------- TypeOrdering.h - Total ordering for types -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides a function objects and specializations that
+// allow QualType values to be sorted, used in std::maps, std::sets,
+// llvm::DenseMaps, and llvm::DenseSets.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TYPE_ORDERING_H
+#define LLVM_CLANG_TYPE_ORDERING_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
+#include <functional>
+
+namespace clang {
+
+/// QualTypeOrdering - Function object that provides a total ordering
+/// on QualType values.
+struct QualTypeOrdering : std::binary_function<QualType, QualType, bool> {
+ bool operator()(QualType T1, QualType T2) const {
+ return std::less<void*>()(T1.getAsOpaquePtr(), T2.getAsOpaquePtr());
+ }
+};
+
+}
+
+namespace llvm {
+ template<class> struct DenseMapInfo;
+
+ template<> struct DenseMapInfo<clang::QualType> {
+ static inline clang::QualType getEmptyKey() { return clang::QualType(); }
+
+ static inline clang::QualType getTombstoneKey() {
+ using clang::QualType;
+ return QualType::getFromOpaquePtr(reinterpret_cast<clang::Type *>(-1));
+ }
+
+ static unsigned getHashValue(clang::QualType Val) {
+ return (unsigned)((uintptr_t)Val.getAsOpaquePtr()) ^
+ ((unsigned)((uintptr_t)Val.getAsOpaquePtr() >> 9));
+ }
+
+ static bool isEqual(clang::QualType LHS, clang::QualType RHS) {
+ return LHS == RHS;
+ }
+ };
+
+ template<> struct DenseMapInfo<clang::CanQualType> {
+ static inline clang::CanQualType getEmptyKey() {
+ return clang::CanQualType();
+ }
+
+ static inline clang::CanQualType getTombstoneKey() {
+ using clang::CanQualType;
+ return CanQualType::getFromOpaquePtr(reinterpret_cast<clang::Type *>(-1));
+ }
+
+ static unsigned getHashValue(clang::CanQualType Val) {
+ return (unsigned)((uintptr_t)Val.getAsOpaquePtr()) ^
+ ((unsigned)((uintptr_t)Val.getAsOpaquePtr() >> 9));
+ }
+
+ static bool isEqual(clang::CanQualType LHS, clang::CanQualType RHS) {
+ return LHS == RHS;
+ }
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h b/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h
new file mode 100644
index 0000000..242aa58
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/TypeVisitor.h
@@ -0,0 +1,53 @@
+//===--- TypeVisitor.h - Visitor for Type subclasses ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeVisitor interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_TYPEVISITOR_H
+#define LLVM_CLANG_AST_TYPEVISITOR_H
+
+#include "clang/AST/Type.h"
+
+namespace clang {
+
+#define DISPATCH(CLASS) \
+ return static_cast<ImplClass*>(this)-> \
+ Visit##CLASS(static_cast<const CLASS*>(T))
+
+template<typename ImplClass, typename RetTy=void>
+class TypeVisitor {
+public:
+ RetTy Visit(const Type *T) {
+ // Top switch stmt: dispatch to VisitFooType for each FooType.
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) case Type::CLASS: DISPATCH(CLASS##Type);
+#include "clang/AST/TypeNodes.def"
+ }
+ llvm_unreachable("Unknown type class!");
+ }
+
+ // If the implementation chooses not to implement a certain visit method, fall
+ // back on superclass.
+#define TYPE(CLASS, PARENT) RetTy Visit##CLASS##Type(const CLASS##Type *T) { \
+ DISPATCH(PARENT); \
+}
+#include "clang/AST/TypeNodes.def"
+
+ // Base case, ignore it. :)
+ RetTy VisitType(const Type*) { return RetTy(); }
+};
+
+#undef DISPATCH
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h b/contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h
new file mode 100644
index 0000000..0918dc4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/UnresolvedSet.h
@@ -0,0 +1,186 @@
+//===-- UnresolvedSet.h - Unresolved sets of declarations ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the UnresolvedSet class, which is used to store
+// collections of declarations in the AST.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_UNRESOLVEDSET_H
+#define LLVM_CLANG_AST_UNRESOLVEDSET_H
+
+#include <iterator>
+#include "llvm/ADT/SmallVector.h"
+#include "clang/AST/DeclAccessPair.h"
+
+namespace clang {
+
+/// The iterator over UnresolvedSets. Serves as both the const and
+/// non-const iterator.
+class UnresolvedSetIterator {
+private:
+ typedef SmallVectorImpl<DeclAccessPair> DeclsTy;
+ typedef DeclsTy::iterator IteratorTy;
+
+ IteratorTy ir;
+
+ friend class UnresolvedSetImpl;
+ friend class OverloadExpr;
+ explicit UnresolvedSetIterator(DeclsTy::iterator ir) : ir(ir) {}
+ explicit UnresolvedSetIterator(DeclsTy::const_iterator ir) :
+ ir(const_cast<DeclsTy::iterator>(ir)) {}
+
+ IteratorTy getIterator() const { return ir; }
+
+public:
+ UnresolvedSetIterator() {}
+
+ typedef std::iterator_traits<IteratorTy>::difference_type difference_type;
+ typedef NamedDecl *value_type;
+ typedef NamedDecl **pointer;
+ typedef NamedDecl *reference;
+ typedef std::iterator_traits<IteratorTy>::iterator_category iterator_category;
+
+ NamedDecl *getDecl() const { return ir->getDecl(); }
+ AccessSpecifier getAccess() const { return ir->getAccess(); }
+ void setAccess(AccessSpecifier AS) { ir->setAccess(AS); }
+ DeclAccessPair getPair() const { return *ir; }
+
+ NamedDecl *operator*() const { return getDecl(); }
+
+ UnresolvedSetIterator &operator++() { ++ir; return *this; }
+ UnresolvedSetIterator operator++(int) { return UnresolvedSetIterator(ir++); }
+ UnresolvedSetIterator &operator--() { --ir; return *this; }
+ UnresolvedSetIterator operator--(int) { return UnresolvedSetIterator(ir--); }
+
+ UnresolvedSetIterator &operator+=(difference_type d) {
+ ir += d; return *this;
+ }
+ UnresolvedSetIterator operator+(difference_type d) const {
+ return UnresolvedSetIterator(ir + d);
+ }
+ UnresolvedSetIterator &operator-=(difference_type d) {
+ ir -= d; return *this;
+ }
+ UnresolvedSetIterator operator-(difference_type d) const {
+ return UnresolvedSetIterator(ir - d);
+ }
+ value_type operator[](difference_type d) const { return *(*this + d); }
+
+ difference_type operator-(const UnresolvedSetIterator &o) const {
+ return ir - o.ir;
+ }
+
+ bool operator==(const UnresolvedSetIterator &o) const { return ir == o.ir; }
+ bool operator!=(const UnresolvedSetIterator &o) const { return ir != o.ir; }
+ bool operator<(const UnresolvedSetIterator &o) const { return ir < o.ir; }
+ bool operator<=(const UnresolvedSetIterator &o) const { return ir <= o.ir; }
+ bool operator>=(const UnresolvedSetIterator &o) const { return ir >= o.ir; }
+ bool operator>(const UnresolvedSetIterator &o) const { return ir > o.ir; }
+};
+
+/// UnresolvedSet - A set of unresolved declarations.
+class UnresolvedSetImpl {
+ typedef UnresolvedSetIterator::DeclsTy DeclsTy;
+
+ // Don't allow direct construction, and only permit subclassing by
+ // UnresolvedSet.
+private:
+ template <unsigned N> friend class UnresolvedSet;
+ UnresolvedSetImpl() {}
+ UnresolvedSetImpl(const UnresolvedSetImpl &) {}
+
+public:
+ // We don't currently support assignment through this iterator, so we might
+ // as well use the same implementation twice.
+ typedef UnresolvedSetIterator iterator;
+ typedef UnresolvedSetIterator const_iterator;
+
+ iterator begin() { return iterator(decls().begin()); }
+ iterator end() { return iterator(decls().end()); }
+
+ const_iterator begin() const { return const_iterator(decls().begin()); }
+ const_iterator end() const { return const_iterator(decls().end()); }
+
+ void addDecl(NamedDecl *D) {
+ addDecl(D, AS_none);
+ }
+
+ void addDecl(NamedDecl *D, AccessSpecifier AS) {
+ decls().push_back(DeclAccessPair::make(D, AS));
+ }
+
+ /// Replaces the given declaration with the new one, once.
+ ///
+ /// \return true if the set changed
+ bool replace(const NamedDecl* Old, NamedDecl *New) {
+ for (DeclsTy::iterator I = decls().begin(), E = decls().end(); I != E; ++I)
+ if (I->getDecl() == Old)
+ return (I->setDecl(New), true);
+ return false;
+ }
+
+ /// Replaces the declaration at the given iterator with the new one,
+ /// preserving the original access bits.
+ void replace(iterator I, NamedDecl *New) {
+ I.ir->setDecl(New);
+ }
+
+ void replace(iterator I, NamedDecl *New, AccessSpecifier AS) {
+ I.ir->set(New, AS);
+ }
+
+ void erase(unsigned I) {
+ decls()[I] = decls().back();
+ decls().pop_back();
+ }
+
+ void erase(iterator I) {
+ *I.ir = decls().back();
+ decls().pop_back();
+ }
+
+ void setAccess(iterator I, AccessSpecifier AS) {
+ I.ir->setAccess(AS);
+ }
+
+ void clear() { decls().clear(); }
+ void set_size(unsigned N) { decls().set_size(N); }
+
+ bool empty() const { return decls().empty(); }
+ unsigned size() const { return decls().size(); }
+
+ void append(iterator I, iterator E) {
+ decls().append(I.ir, E.ir);
+ }
+
+ DeclAccessPair &operator[](unsigned I) { return decls()[I]; }
+ const DeclAccessPair &operator[](unsigned I) const { return decls()[I]; }
+
+private:
+ // These work because the only permitted subclass is UnresolvedSetImpl
+
+ DeclsTy &decls() {
+ return *reinterpret_cast<DeclsTy*>(this);
+ }
+ const DeclsTy &decls() const {
+ return *reinterpret_cast<const DeclsTy*>(this);
+ }
+};
+
+/// A set of unresolved declarations
+template <unsigned InlineCapacity> class UnresolvedSet :
+ public UnresolvedSetImpl {
+ SmallVector<DeclAccessPair, InlineCapacity> Decls;
+};
+
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/VTTBuilder.h b/contrib/llvm/tools/clang/include/clang/AST/VTTBuilder.h
new file mode 100644
index 0000000..6756dd1
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/VTTBuilder.h
@@ -0,0 +1,176 @@
+//===--- VTTBuilder.h - C++ VTT layout builder --------------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with generation of the layout of virtual table
+// tables (VTT).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_VTTBUILDER_H
+#define LLVM_CLANG_AST_VTTBUILDER_H
+
+#include "clang/AST/BaseSubobject.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/ABI.h"
+#include "llvm/ADT/SetVector.h"
+#include <utility>
+
+namespace clang {
+
+class VTTVTable {
+ llvm::PointerIntPair<const CXXRecordDecl *, 1, bool> BaseAndIsVirtual;
+ CharUnits BaseOffset;
+
+public:
+ VTTVTable() {}
+ VTTVTable(const CXXRecordDecl *Base, CharUnits BaseOffset, bool BaseIsVirtual)
+ : BaseAndIsVirtual(Base, BaseIsVirtual), BaseOffset(BaseOffset) {}
+ VTTVTable(BaseSubobject Base, bool BaseIsVirtual)
+ : BaseAndIsVirtual(Base.getBase(), BaseIsVirtual),
+ BaseOffset(Base.getBaseOffset()) {}
+
+ const CXXRecordDecl *getBase() const {
+ return BaseAndIsVirtual.getPointer();
+ }
+
+ CharUnits getBaseOffset() const {
+ return BaseOffset;
+ }
+
+ bool isVirtual() const {
+ return BaseAndIsVirtual.getInt();
+ }
+
+ BaseSubobject getBaseSubobject() const {
+ return BaseSubobject(getBase(), getBaseOffset());
+ }
+};
+
+struct VTTComponent {
+ uint64_t VTableIndex;
+ BaseSubobject VTableBase;
+
+ VTTComponent() {}
+ VTTComponent(uint64_t VTableIndex, BaseSubobject VTableBase)
+ : VTableIndex(VTableIndex), VTableBase(VTableBase) {}
+};
+
+/// VTT builder - Class for building VTT layout information.
+class VTTBuilder {
+
+ ASTContext &Ctx;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ typedef SmallVector<VTTVTable, 64> VTTVTablesVectorTy;
+
+ /// VTTVTables - The VTT vtables.
+ VTTVTablesVectorTy VTTVTables;
+
+ typedef SmallVector<VTTComponent, 64> VTTComponentsVectorTy;
+
+ /// VTTComponents - The VTT components.
+ VTTComponentsVectorTy VTTComponents;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
+
+ /// SubVTTIndicies - The sub-VTT indices for the bases of the most derived
+ /// class.
+ llvm::DenseMap<BaseSubobject, uint64_t> SubVTTIndicies;
+
+ /// SecondaryVirtualPointerIndices - The secondary virtual pointer indices of
+ /// all subobjects of the most derived class.
+ llvm::DenseMap<BaseSubobject, uint64_t> SecondaryVirtualPointerIndices;
+
+ /// GenerateDefinition - Whether the VTT builder should generate LLVM IR for
+ /// the VTT.
+ bool GenerateDefinition;
+
+ /// AddVTablePointer - Add a vtable pointer to the VTT currently being built.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void AddVTablePointer(BaseSubobject Base, uint64_t VTableIndex,
+ const CXXRecordDecl *VTableClass);
+
+ /// LayoutSecondaryVTTs - Lay out the secondary VTTs of the given base
+ /// subobject.
+ void LayoutSecondaryVTTs(BaseSubobject Base);
+
+ /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
+ /// for the given base subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ uint64_t VTableIndex,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
+ /// for the given base subobject.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ uint64_t VTableIndex);
+
+ /// LayoutVirtualVTTs - Lay out the VTTs for the virtual base classes of the
+ /// given record decl.
+ void LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTT - Will lay out the VTT for the given subobject, including any
+ /// secondary VTTs, secondary virtual pointers and virtual VTTs.
+ void LayoutVTT(BaseSubobject Base, bool BaseIsVirtual);
+
+public:
+ VTTBuilder(ASTContext &Ctx, const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition);
+
+ // getVTTComponents - Returns a reference to the VTT components.
+ const VTTComponentsVectorTy &getVTTComponents() const {
+ return VTTComponents;
+ }
+
+ // getVTTVTables - Returns a reference to the VTT vtables.
+ const VTTVTablesVectorTy &getVTTVTables() const {
+ return VTTVTables;
+ }
+
+ /// getSubVTTIndicies - Returns a reference to the sub-VTT indices.
+ const llvm::DenseMap<BaseSubobject, uint64_t> &getSubVTTIndicies() const {
+ return SubVTTIndicies;
+ }
+
+ /// getSecondaryVirtualPointerIndices - Returns a reference to the secondary
+ /// virtual pointer indices.
+ const llvm::DenseMap<BaseSubobject, uint64_t> &
+ getSecondaryVirtualPointerIndices() const {
+ return SecondaryVirtualPointerIndices;
+ }
+
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h b/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h
new file mode 100644
index 0000000..2aa9a3d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/AST/VTableBuilder.h
@@ -0,0 +1,357 @@
+//===--- VTableBuilder.h - C++ vtable layout builder --------------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with generation of the layout of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_VTABLEBUILDER_H
+#define LLVM_CLANG_AST_VTABLEBUILDER_H
+
+#include "clang/AST/BaseSubobject.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/ABI.h"
+#include "llvm/ADT/SetVector.h"
+#include <utility>
+
+namespace clang {
+ class CXXRecordDecl;
+
+/// VTableComponent - Represents a single component in a vtable.
+class VTableComponent {
+public:
+ enum Kind {
+ CK_VCallOffset,
+ CK_VBaseOffset,
+ CK_OffsetToTop,
+ CK_RTTI,
+ CK_FunctionPointer,
+
+ /// CK_CompleteDtorPointer - A pointer to the complete destructor.
+ CK_CompleteDtorPointer,
+
+ /// CK_DeletingDtorPointer - A pointer to the deleting destructor.
+ CK_DeletingDtorPointer,
+
+ /// CK_UnusedFunctionPointer - In some cases, a vtable function pointer
+ /// will end up never being called. Such vtable function pointers are
+ /// represented as a CK_UnusedFunctionPointer.
+ CK_UnusedFunctionPointer
+ };
+
+ VTableComponent() { }
+
+ static VTableComponent MakeVCallOffset(CharUnits Offset) {
+ return VTableComponent(CK_VCallOffset, Offset);
+ }
+
+ static VTableComponent MakeVBaseOffset(CharUnits Offset) {
+ return VTableComponent(CK_VBaseOffset, Offset);
+ }
+
+ static VTableComponent MakeOffsetToTop(CharUnits Offset) {
+ return VTableComponent(CK_OffsetToTop, Offset);
+ }
+
+ static VTableComponent MakeRTTI(const CXXRecordDecl *RD) {
+ return VTableComponent(CK_RTTI, reinterpret_cast<uintptr_t>(RD));
+ }
+
+ static VTableComponent MakeFunction(const CXXMethodDecl *MD) {
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Don't use MakeFunction with destructors!");
+
+ return VTableComponent(CK_FunctionPointer,
+ reinterpret_cast<uintptr_t>(MD));
+ }
+
+ static VTableComponent MakeCompleteDtor(const CXXDestructorDecl *DD) {
+ return VTableComponent(CK_CompleteDtorPointer,
+ reinterpret_cast<uintptr_t>(DD));
+ }
+
+ static VTableComponent MakeDeletingDtor(const CXXDestructorDecl *DD) {
+ return VTableComponent(CK_DeletingDtorPointer,
+ reinterpret_cast<uintptr_t>(DD));
+ }
+
+ static VTableComponent MakeUnusedFunction(const CXXMethodDecl *MD) {
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Don't use MakeUnusedFunction with destructors!");
+ return VTableComponent(CK_UnusedFunctionPointer,
+ reinterpret_cast<uintptr_t>(MD));
+ }
+
+ static VTableComponent getFromOpaqueInteger(uint64_t I) {
+ return VTableComponent(I);
+ }
+
+ /// getKind - Get the kind of this vtable component.
+ Kind getKind() const {
+ return (Kind)(Value & 0x7);
+ }
+
+ CharUnits getVCallOffset() const {
+ assert(getKind() == CK_VCallOffset && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ CharUnits getVBaseOffset() const {
+ assert(getKind() == CK_VBaseOffset && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ CharUnits getOffsetToTop() const {
+ assert(getKind() == CK_OffsetToTop && "Invalid component kind!");
+
+ return getOffset();
+ }
+
+ const CXXRecordDecl *getRTTIDecl() const {
+ assert(getKind() == CK_RTTI && "Invalid component kind!");
+
+ return reinterpret_cast<CXXRecordDecl *>(getPointer());
+ }
+
+ const CXXMethodDecl *getFunctionDecl() const {
+ assert(getKind() == CK_FunctionPointer);
+
+ return reinterpret_cast<CXXMethodDecl *>(getPointer());
+ }
+
+ const CXXDestructorDecl *getDestructorDecl() const {
+ assert((getKind() == CK_CompleteDtorPointer ||
+ getKind() == CK_DeletingDtorPointer) && "Invalid component kind!");
+
+ return reinterpret_cast<CXXDestructorDecl *>(getPointer());
+ }
+
+ const CXXMethodDecl *getUnusedFunctionDecl() const {
+ assert(getKind() == CK_UnusedFunctionPointer);
+
+ return reinterpret_cast<CXXMethodDecl *>(getPointer());
+ }
+
+private:
+ VTableComponent(Kind ComponentKind, CharUnits Offset) {
+ assert((ComponentKind == CK_VCallOffset ||
+ ComponentKind == CK_VBaseOffset ||
+ ComponentKind == CK_OffsetToTop) && "Invalid component kind!");
+ assert(Offset.getQuantity() <= ((1LL << 56) - 1) && "Offset is too big!");
+
+ Value = ((Offset.getQuantity() << 3) | ComponentKind);
+ }
+
+ VTableComponent(Kind ComponentKind, uintptr_t Ptr) {
+ assert((ComponentKind == CK_RTTI ||
+ ComponentKind == CK_FunctionPointer ||
+ ComponentKind == CK_CompleteDtorPointer ||
+ ComponentKind == CK_DeletingDtorPointer ||
+ ComponentKind == CK_UnusedFunctionPointer) &&
+ "Invalid component kind!");
+
+ assert((Ptr & 7) == 0 && "Pointer not sufficiently aligned!");
+
+ Value = Ptr | ComponentKind;
+ }
+
+ CharUnits getOffset() const {
+ assert((getKind() == CK_VCallOffset || getKind() == CK_VBaseOffset ||
+ getKind() == CK_OffsetToTop) && "Invalid component kind!");
+
+ return CharUnits::fromQuantity(Value >> 3);
+ }
+
+ uintptr_t getPointer() const {
+ assert((getKind() == CK_RTTI ||
+ getKind() == CK_FunctionPointer ||
+ getKind() == CK_CompleteDtorPointer ||
+ getKind() == CK_DeletingDtorPointer ||
+ getKind() == CK_UnusedFunctionPointer) &&
+ "Invalid component kind!");
+
+ return static_cast<uintptr_t>(Value & ~7ULL);
+ }
+
+ explicit VTableComponent(uint64_t Value)
+ : Value(Value) { }
+
+ /// The kind is stored in the lower 3 bits of the value. For offsets, we
+ /// make use of the facts that classes can't be larger than 2^55 bytes,
+ /// so we store the offset in the lower part of the 61 bytes that remain.
+ /// (The reason that we're not simply using a PointerIntPair here is that we
+ /// need the offsets to be 64-bit, even when on a 32-bit machine).
+ int64_t Value;
+};
+
+class VTableLayout {
+public:
+ typedef std::pair<uint64_t, ThunkInfo> VTableThunkTy;
+ typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+
+ typedef const VTableComponent *vtable_component_iterator;
+ typedef const VTableThunkTy *vtable_thunk_iterator;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
+private:
+ uint64_t NumVTableComponents;
+ VTableComponent *VTableComponents;
+
+ /// VTableThunks - Contains thunks needed by vtables.
+ uint64_t NumVTableThunks;
+ VTableThunkTy *VTableThunks;
+
+ /// Address points - Address points for all vtables.
+ AddressPointsMapTy AddressPoints;
+
+public:
+ VTableLayout(uint64_t NumVTableComponents,
+ const VTableComponent *VTableComponents,
+ uint64_t NumVTableThunks,
+ const VTableThunkTy *VTableThunks,
+ const AddressPointsMapTy &AddressPoints);
+ ~VTableLayout();
+
+ uint64_t getNumVTableComponents() const {
+ return NumVTableComponents;
+ }
+
+ vtable_component_iterator vtable_component_begin() const {
+ return VTableComponents;
+ }
+
+ vtable_component_iterator vtable_component_end() const {
+ return VTableComponents+NumVTableComponents;
+ }
+
+ uint64_t getNumVTableThunks() const {
+ return NumVTableThunks;
+ }
+
+ vtable_thunk_iterator vtable_thunk_begin() const {
+ return VTableThunks;
+ }
+
+ vtable_thunk_iterator vtable_thunk_end() const {
+ return VTableThunks+NumVTableThunks;
+ }
+
+ uint64_t getAddressPoint(BaseSubobject Base) const {
+ assert(AddressPoints.count(Base) &&
+ "Did not find address point!");
+
+ uint64_t AddressPoint = AddressPoints.lookup(Base);
+ assert(AddressPoint && "Address point must not be zero!");
+
+ return AddressPoint;
+ }
+
+ const AddressPointsMapTy &getAddressPoints() const {
+ return AddressPoints;
+ }
+};
+
+class VTableContext {
+ ASTContext &Context;
+
+public:
+ typedef SmallVector<std::pair<uint64_t, ThunkInfo>, 1>
+ VTableThunksTy;
+ typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+
+private:
+ /// MethodVTableIndices - Contains the index (relative to the vtable address
+ /// point) where the function pointer for a virtual function is stored.
+ typedef llvm::DenseMap<GlobalDecl, int64_t> MethodVTableIndicesTy;
+ MethodVTableIndicesTy MethodVTableIndices;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, const VTableLayout *>
+ VTableLayoutMapTy;
+ VTableLayoutMapTy VTableLayouts;
+
+ /// NumVirtualFunctionPointers - Contains the number of virtual function
+ /// pointers in the vtable for a given record decl.
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers;
+
+ typedef std::pair<const CXXRecordDecl *,
+ const CXXRecordDecl *> ClassPairTy;
+
+ /// VirtualBaseClassOffsetOffsets - Contains the vtable offset (relative to
+ /// the address point) in chars where the offsets for virtual bases of a class
+ /// are stored.
+ typedef llvm::DenseMap<ClassPairTy, CharUnits>
+ VirtualBaseClassOffsetOffsetsMapTy;
+ VirtualBaseClassOffsetOffsetsMapTy VirtualBaseClassOffsetOffsets;
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - Contains all thunks that a given method decl will need.
+ ThunksMapTy Thunks;
+
+ void ComputeMethodVTableIndices(const CXXRecordDecl *RD);
+
+ /// ComputeVTableRelatedInformation - Compute and store all vtable related
+ /// information (vtable layout, vbase offset offsets, thunks etc) for the
+ /// given record decl.
+ void ComputeVTableRelatedInformation(const CXXRecordDecl *RD);
+
+public:
+ VTableContext(ASTContext &Context) : Context(Context) {}
+ ~VTableContext();
+
+ const VTableLayout &getVTableLayout(const CXXRecordDecl *RD) {
+ ComputeVTableRelatedInformation(RD);
+ assert(VTableLayouts.count(RD) && "No layout for this record decl!");
+
+ return *VTableLayouts[RD];
+ }
+
+ VTableLayout *
+ createConstructionVTableLayout(const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ bool MostDerivedClassIsVirtual,
+ const CXXRecordDecl *LayoutClass);
+
+ const ThunkInfoVectorTy *getThunkInfo(const CXXMethodDecl *MD) {
+ ComputeVTableRelatedInformation(MD->getParent());
+
+ ThunksMapTy::const_iterator I = Thunks.find(MD);
+ if (I == Thunks.end()) {
+ // We did not find a thunk for this method.
+ return 0;
+ }
+
+ return &I->second;
+ }
+
+ /// getNumVirtualFunctionPointers - Return the number of virtual function
+ /// pointers in the vtable for a given record decl.
+ uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
+
+ /// getMethodVTableIndex - Return the index (relative to the vtable address
+ /// point) where the function pointer for the given virtual function is
+ /// stored.
+ uint64_t getMethodVTableIndex(GlobalDecl GD);
+
+ /// getVirtualBaseOffsetOffset - Return the offset in chars (relative to the
+ /// vtable address point) where the offset of the virtual base that contains
+ /// the given base is stored, otherwise, if no virtual base contains the given
+ /// class, return 0. Base must be a virtual base class or an unambigious
+ /// base.
+ CharUnits getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/CFGReachabilityAnalysis.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/CFGReachabilityAnalysis.h
new file mode 100644
index 0000000..a61d9e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/CFGReachabilityAnalysis.h
@@ -0,0 +1,49 @@
+//==- CFGReachabilityAnalysis.h - Basic reachability analysis ----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a flow-sensitive, (mostly) path-insensitive reachability
+// analysis based on Clang's CFGs. Clients can query if a given basic block
+// is reachable within the CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_ANALYSIS_CFG_REACHABILITY
+#define CLANG_ANALYSIS_CFG_REACHABILITY
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+
+class CFG;
+class CFGBlock;
+
+// A class that performs reachability queries for CFGBlocks. Several internal
+// checks in this checker require reachability information. The requests all
+// tend to have a common destination, so we lazily do a predecessor search
+// from the destination node and cache the results to prevent work
+// duplication.
+class CFGReverseBlockReachabilityAnalysis {
+ typedef llvm::BitVector ReachableSet;
+ typedef llvm::DenseMap<unsigned, ReachableSet> ReachableMap;
+ ReachableSet analyzed;
+ ReachableMap reachable;
+public:
+ CFGReverseBlockReachabilityAnalysis(const CFG &cfg);
+
+ /// Returns true if the block 'Dst' can be reached from block 'Src'.
+ bool isReachable(const CFGBlock *Src, const CFGBlock *Dst);
+
+private:
+ void mapReachability(const CFGBlock *Dst);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h
new file mode 100644
index 0000000..e9a431a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/Dominators.h
@@ -0,0 +1,212 @@
+//==- Dominators.h - Implementation of dominators tree for Clang CFG C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the dominators tree functionality for Clang CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DOMINATORS_H
+#define LLVM_CLANG_DOMINATORS_H
+
+#include "clang/Analysis/AnalysisContext.h"
+
+#include "llvm/Module.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/DominatorInternals.h"
+
+namespace clang {
+
+class CFGBlock;
+typedef llvm::DomTreeNodeBase<CFGBlock> DomTreeNode;
+
+/// \brief Concrete subclass of DominatorTreeBase for Clang
+/// This class implements the dominators tree functionality given a Clang CFG.
+///
+class DominatorTree : public ManagedAnalysis {
+ virtual void anchor();
+public:
+ llvm::DominatorTreeBase<CFGBlock>* DT;
+
+ DominatorTree() {
+ DT = new llvm::DominatorTreeBase<CFGBlock>(false);
+ }
+
+ ~DominatorTree() {
+ delete DT;
+ }
+
+ llvm::DominatorTreeBase<CFGBlock>& getBase() { return *DT; }
+
+ /// \brief This method returns the root CFGBlock of the dominators tree.
+ ///
+ inline CFGBlock *getRoot() const {
+ return DT->getRoot();
+ }
+
+ /// \brief This method returns the root DomTreeNode, which is the wrapper
+ /// for CFGBlock.
+ inline DomTreeNode *getRootNode() const {
+ return DT->getRootNode();
+ }
+
+ /// \brief This method compares two dominator trees.
+ /// The method returns false if the other dominator tree matches this
+ /// dominator tree, otherwise returns true.
+ ///
+ inline bool compare(DominatorTree &Other) const {
+ DomTreeNode *R = getRootNode();
+ DomTreeNode *OtherR = Other.getRootNode();
+
+ if (!R || !OtherR || R->getBlock() != OtherR->getBlock())
+ return true;
+
+ if (DT->compare(Other.getBase()))
+ return true;
+
+ return false;
+ }
+
+ /// \brief This method builds the dominator tree for a given CFG
+ /// The CFG information is passed via AnalysisDeclContext
+ ///
+ void buildDominatorTree(AnalysisDeclContext &AC) {
+ cfg = AC.getCFG();
+ DT->recalculate(*cfg);
+ }
+
+ /// \brief This method dumps immediate dominators for each block,
+ /// mainly used for debug purposes.
+ ///
+ void dump() {
+ llvm::errs() << "Immediate dominance tree (Node#,IDom#):\n";
+ for (CFG::const_iterator I = cfg->begin(),
+ E = cfg->end(); I != E; ++I) {
+ if(DT->getNode(*I)->getIDom())
+ llvm::errs() << "(" << (*I)->getBlockID()
+ << ","
+ << DT->getNode(*I)->getIDom()->getBlock()->getBlockID()
+ << ")\n";
+ else llvm::errs() << "(" << (*I)->getBlockID()
+ << "," << (*I)->getBlockID() << ")\n";
+ }
+ }
+
+ /// \brief This method tests if one CFGBlock dominates the other.
+ /// The method return true if A dominates B, false otherwise.
+ /// Note a block always dominates itself.
+ ///
+ inline bool dominates(const CFGBlock* A, const CFGBlock* B) const {
+ return DT->dominates(A, B);
+ }
+
+ /// \brief This method tests if one CFGBlock properly dominates the other.
+ /// The method return true if A properly dominates B, false otherwise.
+ ///
+ bool properlyDominates(const CFGBlock*A, const CFGBlock*B) const {
+ return DT->properlyDominates(A, B);
+ }
+
+ /// \brief This method finds the nearest common dominator CFG block
+ /// for CFG block A and B. If there is no such block then return NULL.
+ ///
+ inline CFGBlock *findNearestCommonDominator(CFGBlock *A, CFGBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ inline const CFGBlock *findNearestCommonDominator(const CFGBlock *A,
+ const CFGBlock *B) {
+ return DT->findNearestCommonDominator(A, B);
+ }
+
+ /// \brief This method is used to update the dominator
+ /// tree information when a node's immediate dominator changes.
+ ///
+ inline void changeImmediateDominator(CFGBlock *N, CFGBlock *NewIDom) {
+ DT->changeImmediateDominator(N, NewIDom);
+ }
+
+ /// \brief This method tests if the given CFGBlock can be reachable from root.
+ /// Returns true if reachable, false otherwise.
+ ///
+ bool isReachableFromEntry(const CFGBlock *A) {
+ return DT->isReachableFromEntry(A);
+ }
+
+ /// \brief This method releases the memory held by the dominator tree.
+ ///
+ virtual void releaseMemory() {
+ DT->releaseMemory();
+ }
+
+ /// \brief This method converts the dominator tree to human readable form.
+ ///
+ virtual void print(raw_ostream &OS, const llvm::Module* M= 0) const {
+ DT->print(OS);
+ }
+
+private:
+ CFG *cfg;
+};
+
+inline void WriteAsOperand(raw_ostream &OS, const CFGBlock *BB,
+ bool t) {
+ OS << "BB#" << BB->getBlockID();
+}
+
+} // end namespace clang
+
+//===-------------------------------------
+/// DominatorTree GraphTraits specialization so the DominatorTree can be
+/// iterable by generic graph iterators.
+///
+namespace llvm {
+template <> struct GraphTraits< ::clang::DomTreeNode* > {
+ typedef ::clang::DomTreeNode NodeType;
+ typedef NodeType::iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(NodeType *N) {
+ return N;
+ }
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return N->begin();
+ }
+ static inline ChildIteratorType child_end(NodeType *N) {
+ return N->end();
+ }
+
+ typedef df_iterator< ::clang::DomTreeNode* > nodes_iterator;
+
+ static nodes_iterator nodes_begin(::clang::DomTreeNode *N) {
+ return df_begin(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(::clang::DomTreeNode *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+
+template <> struct GraphTraits< ::clang::DominatorTree* >
+ : public GraphTraits< ::clang::DomTreeNode* > {
+ static NodeType *getEntryNode(::clang::DominatorTree *DT) {
+ return DT->getRootNode();
+ }
+
+ static nodes_iterator nodes_begin(::clang::DominatorTree *N) {
+ return df_begin(getEntryNode(N));
+ }
+
+ static nodes_iterator nodes_end(::clang::DominatorTree *N) {
+ return df_end(getEntryNode(N));
+ }
+};
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
new file mode 100644
index 0000000..d4d8dc0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/FormatString.h
@@ -0,0 +1,657 @@
+//= FormatString.h - Analysis of printf/fprintf format strings --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs for analyzing the format strings of printf, fscanf,
+// and friends.
+//
+// The structure of format strings for fprintf are described in C99 7.19.6.1.
+//
+// The structure of format strings for fscanf are described in C99 7.19.6.2.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FORMAT_H
+#define LLVM_CLANG_FORMAT_H
+
+#include "clang/AST/CanonicalType.h"
+
+namespace clang {
+
+//===----------------------------------------------------------------------===//
+/// Common components of both fprintf and fscanf format strings.
+namespace analyze_format_string {
+
+/// Class representing optional flags with location and representation
+/// information.
+class OptionalFlag {
+public:
+ OptionalFlag(const char *Representation)
+ : representation(Representation), flag(false) {}
+ bool isSet() { return flag; }
+ void set() { flag = true; }
+ void clear() { flag = false; }
+ void setPosition(const char *position) {
+ assert(position);
+ this->position = position;
+ }
+ const char *getPosition() const {
+ assert(position);
+ return position;
+ }
+ const char *toString() const { return representation; }
+
+ // Overloaded operators for bool like qualities
+ operator bool() const { return flag; }
+ OptionalFlag& operator=(const bool &rhs) {
+ flag = rhs;
+ return *this; // Return a reference to myself.
+ }
+private:
+ const char *representation;
+ const char *position;
+ bool flag;
+};
+
+/// Represents the length modifier in a format string in scanf/printf.
+class LengthModifier {
+public:
+ enum Kind {
+ None,
+ AsChar, // 'hh'
+ AsShort, // 'h'
+ AsLong, // 'l'
+ AsLongLong, // 'll'
+ AsQuad, // 'q' (BSD, deprecated, for 64-bit integer types)
+ AsIntMax, // 'j'
+ AsSizeT, // 'z'
+ AsPtrDiff, // 't'
+ AsLongDouble, // 'L'
+ AsAllocate, // for '%as', GNU extension to C90 scanf
+ AsMAllocate, // for '%ms', GNU extension to scanf
+ AsWideChar = AsLong // for '%ls', only makes sense for printf
+ };
+
+ LengthModifier()
+ : Position(0), kind(None) {}
+ LengthModifier(const char *pos, Kind k)
+ : Position(pos), kind(k) {}
+
+ const char *getStart() const {
+ return Position;
+ }
+
+ unsigned getLength() const {
+ switch (kind) {
+ default:
+ return 1;
+ case AsLongLong:
+ case AsChar:
+ return 2;
+ case None:
+ return 0;
+ }
+ }
+
+ Kind getKind() const { return kind; }
+ void setKind(Kind k) { kind = k; }
+
+ const char *toString() const;
+
+private:
+ const char *Position;
+ Kind kind;
+};
+
+class ConversionSpecifier {
+public:
+ enum Kind {
+ InvalidSpecifier = 0,
+ // C99 conversion specifiers.
+ cArg,
+ dArg,
+ iArg,
+ IntArgBeg = cArg, IntArgEnd = iArg,
+
+ oArg,
+ uArg,
+ xArg,
+ XArg,
+ UIntArgBeg = oArg, UIntArgEnd = XArg,
+
+ fArg,
+ FArg,
+ eArg,
+ EArg,
+ gArg,
+ GArg,
+ aArg,
+ AArg,
+ DoubleArgBeg = fArg, DoubleArgEnd = AArg,
+
+ sArg,
+ pArg,
+ nArg,
+ PercentArg,
+ CArg,
+ SArg,
+
+ // ** Printf-specific **
+
+ // Objective-C specific specifiers.
+ ObjCObjArg, // '@'
+ ObjCBeg = ObjCObjArg, ObjCEnd = ObjCObjArg,
+
+ // FreeBSD specific specifiers
+ bArg,
+ DArg,
+ rArg,
+
+ // GlibC specific specifiers.
+ PrintErrno, // 'm'
+
+ PrintfConvBeg = ObjCObjArg, PrintfConvEnd = PrintErrno,
+
+ // ** Scanf-specific **
+ ScanListArg, // '['
+ ScanfConvBeg = ScanListArg, ScanfConvEnd = ScanListArg
+ };
+
+ ConversionSpecifier(bool isPrintf)
+ : IsPrintf(isPrintf), Position(0), EndScanList(0), kind(InvalidSpecifier) {}
+
+ ConversionSpecifier(bool isPrintf, const char *pos, Kind k)
+ : IsPrintf(isPrintf), Position(pos), EndScanList(0), kind(k) {}
+
+ const char *getStart() const {
+ return Position;
+ }
+
+ StringRef getCharacters() const {
+ return StringRef(getStart(), getLength());
+ }
+
+ bool consumesDataArgument() const {
+ switch (kind) {
+ case PrintErrno:
+ assert(IsPrintf);
+ case PercentArg:
+ return false;
+ default:
+ return true;
+ }
+ }
+
+ Kind getKind() const { return kind; }
+ void setKind(Kind k) { kind = k; }
+ unsigned getLength() const {
+ return EndScanList ? EndScanList - Position : 1;
+ }
+
+ bool isUIntArg() const { return kind >= UIntArgBeg && kind <= UIntArgEnd; }
+ const char *toString() const;
+
+ bool isPrintfKind() const { return IsPrintf; }
+
+protected:
+ bool IsPrintf;
+ const char *Position;
+ const char *EndScanList;
+ Kind kind;
+};
+
+class ArgTypeResult {
+public:
+ enum Kind { UnknownTy, InvalidTy, SpecificTy, ObjCPointerTy, CPointerTy,
+ AnyCharTy, CStrTy, WCStrTy, WIntTy };
+private:
+ const Kind K;
+ QualType T;
+ const char *Name;
+ ArgTypeResult(bool) : K(InvalidTy), Name(0) {}
+public:
+ ArgTypeResult(Kind k = UnknownTy) : K(k), Name(0) {}
+ ArgTypeResult(Kind k, const char *n) : K(k), Name(n) {}
+ ArgTypeResult(QualType t) : K(SpecificTy), T(t), Name(0) {}
+ ArgTypeResult(QualType t, const char *n) : K(SpecificTy), T(t), Name(n) {}
+ ArgTypeResult(CanQualType t) : K(SpecificTy), T(t), Name(0) {}
+
+ static ArgTypeResult Invalid() { return ArgTypeResult(true); }
+
+ bool isValid() const { return K != InvalidTy; }
+
+ const QualType *getSpecificType() const {
+ return K == SpecificTy ? &T : 0;
+ }
+
+ bool matchesType(ASTContext &C, QualType argTy) const;
+
+ bool matchesAnyObjCObjectRef() const { return K == ObjCPointerTy; }
+
+ QualType getRepresentativeType(ASTContext &C) const;
+
+ std::string getRepresentativeTypeName(ASTContext &C) const;
+};
+
+class OptionalAmount {
+public:
+ enum HowSpecified { NotSpecified, Constant, Arg, Invalid };
+
+ OptionalAmount(HowSpecified howSpecified,
+ unsigned amount,
+ const char *amountStart,
+ unsigned amountLength,
+ bool usesPositionalArg)
+ : start(amountStart), length(amountLength), hs(howSpecified), amt(amount),
+ UsesPositionalArg(usesPositionalArg), UsesDotPrefix(0) {}
+
+ OptionalAmount(bool valid = true)
+ : start(0),length(0), hs(valid ? NotSpecified : Invalid), amt(0),
+ UsesPositionalArg(0), UsesDotPrefix(0) {}
+
+ bool isInvalid() const {
+ return hs == Invalid;
+ }
+
+ HowSpecified getHowSpecified() const { return hs; }
+ void setHowSpecified(HowSpecified h) { hs = h; }
+
+ bool hasDataArgument() const { return hs == Arg; }
+
+ unsigned getArgIndex() const {
+ assert(hasDataArgument());
+ return amt;
+ }
+
+ unsigned getConstantAmount() const {
+ assert(hs == Constant);
+ return amt;
+ }
+
+ const char *getStart() const {
+ // We include the . character if it is given.
+ return start - UsesDotPrefix;
+ }
+
+ unsigned getConstantLength() const {
+ assert(hs == Constant);
+ return length + UsesDotPrefix;
+ }
+
+ ArgTypeResult getArgType(ASTContext &Ctx) const;
+
+ void toString(raw_ostream &os) const;
+
+ bool usesPositionalArg() const { return (bool) UsesPositionalArg; }
+ unsigned getPositionalArgIndex() const {
+ assert(hasDataArgument());
+ return amt + 1;
+ }
+
+ bool usesDotPrefix() const { return UsesDotPrefix; }
+ void setUsesDotPrefix() { UsesDotPrefix = true; }
+
+private:
+ const char *start;
+ unsigned length;
+ HowSpecified hs;
+ unsigned amt;
+ bool UsesPositionalArg : 1;
+ bool UsesDotPrefix;
+};
+
+
+class FormatSpecifier {
+protected:
+ LengthModifier LM;
+ OptionalAmount FieldWidth;
+ ConversionSpecifier CS;
+ /// Positional arguments, an IEEE extension:
+ /// IEEE Std 1003.1, 2004 Edition
+ /// http://www.opengroup.org/onlinepubs/009695399/functions/printf.html
+ bool UsesPositionalArg;
+ unsigned argIndex;
+public:
+ FormatSpecifier(bool isPrintf)
+ : CS(isPrintf), UsesPositionalArg(false), argIndex(0) {}
+
+ void setLengthModifier(LengthModifier lm) {
+ LM = lm;
+ }
+
+ void setUsesPositionalArg() { UsesPositionalArg = true; }
+
+ void setArgIndex(unsigned i) {
+ argIndex = i;
+ }
+
+ unsigned getArgIndex() const {
+ return argIndex;
+ }
+
+ unsigned getPositionalArgIndex() const {
+ return argIndex + 1;
+ }
+
+ const LengthModifier &getLengthModifier() const {
+ return LM;
+ }
+
+ const OptionalAmount &getFieldWidth() const {
+ return FieldWidth;
+ }
+
+ void setFieldWidth(const OptionalAmount &Amt) {
+ FieldWidth = Amt;
+ }
+
+ bool usesPositionalArg() const { return UsesPositionalArg; }
+
+ bool hasValidLengthModifier() const;
+
+ bool hasStandardLengthModifier() const;
+
+ bool hasStandardConversionSpecifier(const LangOptions &LangOpt) const;
+
+ bool hasStandardLengthConversionCombination() const;
+};
+
+} // end analyze_format_string namespace
+
+//===----------------------------------------------------------------------===//
+/// Pieces specific to fprintf format strings.
+
+namespace analyze_printf {
+
+class PrintfConversionSpecifier :
+ public analyze_format_string::ConversionSpecifier {
+public:
+ PrintfConversionSpecifier()
+ : ConversionSpecifier(true, 0, InvalidSpecifier) {}
+
+ PrintfConversionSpecifier(const char *pos, Kind k)
+ : ConversionSpecifier(true, pos, k) {}
+
+ bool isObjCArg() const { return kind >= ObjCBeg && kind <= ObjCEnd; }
+ bool isIntArg() const { return kind >= IntArgBeg && kind <= IntArgEnd; }
+ bool isDoubleArg() const { return kind >= DoubleArgBeg &&
+ kind <= DoubleArgEnd; }
+ unsigned getLength() const {
+ // Conversion specifiers currently only are represented by
+ // single characters, but we be flexible.
+ return 1;
+ }
+
+ static bool classof(const analyze_format_string::ConversionSpecifier *CS) {
+ return CS->isPrintfKind();
+ }
+};
+
+using analyze_format_string::ArgTypeResult;
+using analyze_format_string::LengthModifier;
+using analyze_format_string::OptionalAmount;
+using analyze_format_string::OptionalFlag;
+
+class PrintfSpecifier : public analyze_format_string::FormatSpecifier {
+ OptionalFlag HasThousandsGrouping; // ''', POSIX extension.
+ OptionalFlag IsLeftJustified; // '-'
+ OptionalFlag HasPlusPrefix; // '+'
+ OptionalFlag HasSpacePrefix; // ' '
+ OptionalFlag HasAlternativeForm; // '#'
+ OptionalFlag HasLeadingZeroes; // '0'
+ OptionalAmount Precision;
+public:
+ PrintfSpecifier() :
+ FormatSpecifier(/* isPrintf = */ true),
+ HasThousandsGrouping("'"), IsLeftJustified("-"), HasPlusPrefix("+"),
+ HasSpacePrefix(" "), HasAlternativeForm("#"), HasLeadingZeroes("0") {}
+
+ static PrintfSpecifier Parse(const char *beg, const char *end);
+
+ // Methods for incrementally constructing the PrintfSpecifier.
+ void setConversionSpecifier(const PrintfConversionSpecifier &cs) {
+ CS = cs;
+ }
+ void setHasThousandsGrouping(const char *position) {
+ HasThousandsGrouping = true;
+ HasThousandsGrouping.setPosition(position);
+ }
+ void setIsLeftJustified(const char *position) {
+ IsLeftJustified = true;
+ IsLeftJustified.setPosition(position);
+ }
+ void setHasPlusPrefix(const char *position) {
+ HasPlusPrefix = true;
+ HasPlusPrefix.setPosition(position);
+ }
+ void setHasSpacePrefix(const char *position) {
+ HasSpacePrefix = true;
+ HasSpacePrefix.setPosition(position);
+ }
+ void setHasAlternativeForm(const char *position) {
+ HasAlternativeForm = true;
+ HasAlternativeForm.setPosition(position);
+ }
+ void setHasLeadingZeros(const char *position) {
+ HasLeadingZeroes = true;
+ HasLeadingZeroes.setPosition(position);
+ }
+ void setUsesPositionalArg() { UsesPositionalArg = true; }
+
+ // Methods for querying the format specifier.
+
+ const PrintfConversionSpecifier &getConversionSpecifier() const {
+ return cast<PrintfConversionSpecifier>(CS);
+ }
+
+ void setPrecision(const OptionalAmount &Amt) {
+ Precision = Amt;
+ Precision.setUsesDotPrefix();
+ }
+
+ const OptionalAmount &getPrecision() const {
+ return Precision;
+ }
+
+ bool consumesDataArgument() const {
+ return getConversionSpecifier().consumesDataArgument();
+ }
+
+ /// \brief Returns the builtin type that a data argument
+ /// paired with this format specifier should have. This method
+ /// will return null if the format specifier does not have
+ /// a matching data argument or the matching argument matches
+ /// more than one type.
+ ArgTypeResult getArgType(ASTContext &Ctx, bool IsObjCLiteral) const;
+
+ const OptionalFlag &hasThousandsGrouping() const {
+ return HasThousandsGrouping;
+ }
+ const OptionalFlag &isLeftJustified() const { return IsLeftJustified; }
+ const OptionalFlag &hasPlusPrefix() const { return HasPlusPrefix; }
+ const OptionalFlag &hasAlternativeForm() const { return HasAlternativeForm; }
+ const OptionalFlag &hasLeadingZeros() const { return HasLeadingZeroes; }
+ const OptionalFlag &hasSpacePrefix() const { return HasSpacePrefix; }
+ bool usesPositionalArg() const { return UsesPositionalArg; }
+
+ /// Changes the specifier and length according to a QualType, retaining any
+ /// flags or options. Returns true on success, or false when a conversion
+ /// was not successful.
+ bool fixType(QualType QT, const LangOptions &LangOpt, ASTContext &Ctx,
+ bool IsObjCLiteral);
+
+ void toString(raw_ostream &os) const;
+
+ // Validation methods - to check if any element results in undefined behavior
+ bool hasValidPlusPrefix() const;
+ bool hasValidAlternativeForm() const;
+ bool hasValidLeadingZeros() const;
+ bool hasValidSpacePrefix() const;
+ bool hasValidLeftJustified() const;
+ bool hasValidThousandsGroupingPrefix() const;
+
+ bool hasValidPrecision() const;
+ bool hasValidFieldWidth() const;
+};
+} // end analyze_printf namespace
+
+//===----------------------------------------------------------------------===//
+/// Pieces specific to fscanf format strings.
+
+namespace analyze_scanf {
+
+class ScanfConversionSpecifier :
+ public analyze_format_string::ConversionSpecifier {
+public:
+ ScanfConversionSpecifier()
+ : ConversionSpecifier(false, 0, InvalidSpecifier) {}
+
+ ScanfConversionSpecifier(const char *pos, Kind k)
+ : ConversionSpecifier(false, pos, k) {}
+
+ void setEndScanList(const char *pos) { EndScanList = pos; }
+
+ static bool classof(const analyze_format_string::ConversionSpecifier *CS) {
+ return !CS->isPrintfKind();
+ }
+};
+
+using analyze_format_string::ArgTypeResult;
+using analyze_format_string::LengthModifier;
+using analyze_format_string::OptionalAmount;
+using analyze_format_string::OptionalFlag;
+
+class ScanfArgTypeResult : public ArgTypeResult {
+public:
+ enum Kind { UnknownTy, InvalidTy, CStrTy, WCStrTy, PtrToArgTypeResultTy };
+private:
+ Kind K;
+ ArgTypeResult A;
+ const char *Name;
+ QualType getRepresentativeType(ASTContext &C) const;
+public:
+ ScanfArgTypeResult(Kind k = UnknownTy, const char* n = 0) : K(k), Name(n) {}
+ ScanfArgTypeResult(ArgTypeResult a, const char *n = 0)
+ : K(PtrToArgTypeResultTy), A(a), Name(n) {
+ assert(A.isValid());
+ }
+
+ static ScanfArgTypeResult Invalid() { return ScanfArgTypeResult(InvalidTy); }
+
+ bool isValid() const { return K != InvalidTy; }
+
+ bool matchesType(ASTContext& C, QualType argTy) const;
+
+ std::string getRepresentativeTypeName(ASTContext& C) const;
+};
+
+class ScanfSpecifier : public analyze_format_string::FormatSpecifier {
+ OptionalFlag SuppressAssignment; // '*'
+public:
+ ScanfSpecifier() :
+ FormatSpecifier(/* isPrintf = */ false),
+ SuppressAssignment("*") {}
+
+ void setSuppressAssignment(const char *position) {
+ SuppressAssignment = true;
+ SuppressAssignment.setPosition(position);
+ }
+
+ const OptionalFlag &getSuppressAssignment() const {
+ return SuppressAssignment;
+ }
+
+ void setConversionSpecifier(const ScanfConversionSpecifier &cs) {
+ CS = cs;
+ }
+
+ const ScanfConversionSpecifier &getConversionSpecifier() const {
+ return cast<ScanfConversionSpecifier>(CS);
+ }
+
+ bool consumesDataArgument() const {
+ return CS.consumesDataArgument() && !SuppressAssignment;
+ }
+
+ ScanfArgTypeResult getArgType(ASTContext &Ctx) const;
+
+ bool fixType(QualType QT, const LangOptions &LangOpt, ASTContext &Ctx);
+
+ void toString(raw_ostream &os) const;
+
+ static ScanfSpecifier Parse(const char *beg, const char *end);
+};
+
+} // end analyze_scanf namespace
+
+//===----------------------------------------------------------------------===//
+// Parsing and processing of format strings (both fprintf and fscanf).
+
+namespace analyze_format_string {
+
+enum PositionContext { FieldWidthPos = 0, PrecisionPos = 1 };
+
+class FormatStringHandler {
+public:
+ FormatStringHandler() {}
+ virtual ~FormatStringHandler();
+
+ virtual void HandleNullChar(const char *nullCharacter) {}
+
+ virtual void HandlePosition(const char *startPos, unsigned posLen) {}
+
+ virtual void HandleInvalidPosition(const char *startPos, unsigned posLen,
+ PositionContext p) {}
+
+ virtual void HandleZeroPosition(const char *startPos, unsigned posLen) {}
+
+ virtual void HandleIncompleteSpecifier(const char *startSpecifier,
+ unsigned specifierLen) {}
+
+ // Printf-specific handlers.
+
+ virtual bool HandleInvalidPrintfConversionSpecifier(
+ const analyze_printf::PrintfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ return true;
+ }
+
+ virtual bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ return true;
+ }
+
+ // Scanf-specific handlers.
+
+ virtual bool HandleInvalidScanfConversionSpecifier(
+ const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ return true;
+ }
+
+ virtual bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ return true;
+ }
+
+ virtual void HandleIncompleteScanList(const char *start, const char *end) {}
+};
+
+bool ParsePrintfString(FormatStringHandler &H,
+ const char *beg, const char *end, const LangOptions &LO);
+
+bool ParseScanfString(FormatStringHandler &H,
+ const char *beg, const char *end, const LangOptions &LO);
+
+} // end analyze_format_string namespace
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h
new file mode 100644
index 0000000..c9f39b4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/LiveVariables.h
@@ -0,0 +1,120 @@
+//===- LiveVariables.h - Live Variable Analysis for Source CFGs -*- C++ --*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Live Variables analysis for source-level CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIVEVARIABLES_H
+#define LLVM_CLANG_LIVEVARIABLES_H
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/ImmutableSet.h"
+
+namespace clang {
+
+class CFG;
+class CFGBlock;
+class Stmt;
+class DeclRefExpr;
+class SourceManager;
+
+class LiveVariables : public ManagedAnalysis {
+public:
+ class LivenessValues {
+ public:
+
+ llvm::ImmutableSet<const Stmt *> liveStmts;
+ llvm::ImmutableSet<const VarDecl *> liveDecls;
+
+ bool equals(const LivenessValues &V) const;
+
+ LivenessValues()
+ : liveStmts(0), liveDecls(0) {}
+
+ LivenessValues(llvm::ImmutableSet<const Stmt *> LiveStmts,
+ llvm::ImmutableSet<const VarDecl *> LiveDecls)
+ : liveStmts(LiveStmts), liveDecls(LiveDecls) {}
+
+ ~LivenessValues() {}
+
+ bool isLive(const Stmt *S) const;
+ bool isLive(const VarDecl *D) const;
+
+ friend class LiveVariables;
+ };
+
+ class Observer {
+ virtual void anchor();
+ public:
+ virtual ~Observer() {}
+
+ /// A callback invoked right before invoking the
+ /// liveness transfer function on the given statement.
+ virtual void observeStmt(const Stmt *S,
+ const CFGBlock *currentBlock,
+ const LivenessValues& V) {}
+
+ /// Called when the live variables analysis registers
+ /// that a variable is killed.
+ virtual void observerKill(const DeclRefExpr *DR) {}
+ };
+
+
+ virtual ~LiveVariables();
+
+ /// Compute the liveness information for a given CFG.
+ static LiveVariables *computeLiveness(AnalysisDeclContext &analysisContext,
+ bool killAtAssign);
+
+ /// Return true if a variable is live at the end of a
+ /// specified block.
+ bool isLive(const CFGBlock *B, const VarDecl *D);
+
+ /// Returns true if a variable is live at the beginning of the
+ /// the statement. This query only works if liveness information
+ /// has been recorded at the statement level (see runOnAllBlocks), and
+ /// only returns liveness information for block-level expressions.
+ bool isLive(const Stmt *S, const VarDecl *D);
+
+ /// Returns true the block-level expression "value" is live
+ /// before the given block-level expression (see runOnAllBlocks).
+ bool isLive(const Stmt *Loc, const Stmt *StmtVal);
+
+ /// Print to stderr the liveness information associated with
+ /// each basic block.
+ void dumpBlockLiveness(const SourceManager& M);
+
+ void runOnAllBlocks(Observer &obs);
+
+ static LiveVariables *create(AnalysisDeclContext &analysisContext) {
+ return computeLiveness(analysisContext, true);
+ }
+
+ static const void *getTag();
+
+private:
+ LiveVariables(void *impl);
+ void *impl;
+};
+
+class RelaxedLiveVariables : public LiveVariables {
+public:
+ static LiveVariables *create(AnalysisDeclContext &analysisContext) {
+ return computeLiveness(analysisContext, false);
+ }
+
+ static const void *getTag();
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
new file mode 100644
index 0000000..4e3244e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PostOrderCFGView.h
@@ -0,0 +1,111 @@
+//===- PostOrderCFGView.h - Post order view of CFG blocks ---------*- C++ --*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements post order view of the blocks in a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_POSTORDER_CFGVIEW
+#define LLVM_CLANG_POSTORDER_CFGVIEW
+
+#include <vector>
+//#include <algorithm>
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/BitVector.h"
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+
+namespace clang {
+
+class PostOrderCFGView : public ManagedAnalysis {
+ virtual void anchor();
+public:
+ /// \brief Implements a set of CFGBlocks using a BitVector.
+ ///
+ /// This class contains a minimal interface, primarily dictated by the SetType
+ /// template parameter of the llvm::po_iterator template, as used with
+ /// external storage. We also use this set to keep track of which CFGBlocks we
+ /// visit during the analysis.
+ class CFGBlockSet {
+ llvm::BitVector VisitedBlockIDs;
+ public:
+ // po_iterator requires this iterator, but the only interface needed is the
+ // value_type typedef.
+ struct iterator { typedef const CFGBlock *value_type; };
+
+ CFGBlockSet() {}
+ CFGBlockSet(const CFG *G) : VisitedBlockIDs(G->getNumBlockIDs(), false) {}
+
+ /// \brief Set the bit associated with a particular CFGBlock.
+ /// This is the important method for the SetType template parameter.
+ bool insert(const CFGBlock *Block) {
+ // Note that insert() is called by po_iterator, which doesn't check to
+ // make sure that Block is non-null. Moreover, the CFGBlock iterator will
+ // occasionally hand out null pointers for pruned edges, so we catch those
+ // here.
+ if (Block == 0)
+ return false; // if an edge is trivially false.
+ if (VisitedBlockIDs.test(Block->getBlockID()))
+ return false;
+ VisitedBlockIDs.set(Block->getBlockID());
+ return true;
+ }
+
+ /// \brief Check if the bit for a CFGBlock has been already set.
+ /// This method is for tracking visited blocks in the main threadsafety
+ /// loop. Block must not be null.
+ bool alreadySet(const CFGBlock *Block) {
+ return VisitedBlockIDs.test(Block->getBlockID());
+ }
+ };
+
+private:
+ typedef llvm::po_iterator<const CFG*, CFGBlockSet, true> po_iterator;
+ std::vector<const CFGBlock*> Blocks;
+
+ typedef llvm::DenseMap<const CFGBlock *, unsigned> BlockOrderTy;
+ BlockOrderTy BlockOrder;
+
+public:
+ typedef std::vector<const CFGBlock*>::reverse_iterator iterator;
+
+ PostOrderCFGView(const CFG *cfg);
+
+ iterator begin() { return Blocks.rbegin(); }
+ iterator end() { return Blocks.rend(); }
+
+ bool empty() { return begin() == end(); }
+
+ struct BlockOrderCompare;
+ friend struct BlockOrderCompare;
+
+ struct BlockOrderCompare {
+ const PostOrderCFGView &POV;
+ public:
+ BlockOrderCompare(const PostOrderCFGView &pov) : POV(pov) {}
+ bool operator()(const CFGBlock *b1, const CFGBlock *b2) const;
+ };
+
+ BlockOrderCompare getComparator() const {
+ return BlockOrderCompare(*this);
+ }
+
+ // Used by AnalyisContext to construct this object.
+ static const void *getTag();
+
+ static PostOrderCFGView *create(AnalysisDeclContext &analysisContext);
+};
+
+} // end clang namespace
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PseudoConstantAnalysis.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PseudoConstantAnalysis.h
new file mode 100644
index 0000000..cb73850
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/PseudoConstantAnalysis.h
@@ -0,0 +1,45 @@
+//== PseudoConstantAnalysis.h - Find Pseudo-constants in the AST -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file tracks the usage of variables in a Decl body to see if they are
+// never written to, implying that they constant. This is useful in static
+// analysis to see if a developer might have intended a variable to be const.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_PSEUDOCONSTANTANALYSIS
+#define LLVM_CLANG_ANALYSIS_PSEUDOCONSTANTANALYSIS
+
+#include "clang/AST/Stmt.h"
+
+namespace clang {
+
+class PseudoConstantAnalysis {
+public:
+ PseudoConstantAnalysis(const Stmt *DeclBody);
+ ~PseudoConstantAnalysis();
+
+ bool isPseudoConstant(const VarDecl *VD);
+ bool wasReferenced(const VarDecl *VD);
+
+private:
+ void RunAnalysis();
+ inline static const Decl *getDecl(const Expr *E);
+
+ // for storing the result of analyzed ValueDecls
+ void *NonConstantsImpl;
+ void *UsedVarsImpl;
+
+ const Stmt *DeclBody;
+ bool Analyzed;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h
new file mode 100644
index 0000000..30c5b2d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ReachableCode.h
@@ -0,0 +1,56 @@
+//===- ReachableCode.h -----------------------------------------*- C++ --*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A flow-sensitive, path-insensitive analysis of unreachable code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_REACHABLECODE_H
+#define LLVM_CLANG_REACHABLECODE_H
+
+#include "clang/Basic/SourceLocation.h"
+
+//===----------------------------------------------------------------------===//
+// Forward declarations.
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+ class BitVector;
+}
+
+namespace clang {
+ class AnalysisDeclContext;
+ class CFGBlock;
+}
+
+//===----------------------------------------------------------------------===//
+// API.
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+namespace reachable_code {
+
+class Callback {
+ virtual void anchor();
+public:
+ virtual ~Callback() {}
+ virtual void HandleUnreachable(SourceLocation L, SourceRange R1,
+ SourceRange R2) = 0;
+};
+
+/// ScanReachableFromBlock - Mark all blocks reachable from Start.
+/// Returns the total number of blocks that were marked reachable.
+unsigned ScanReachableFromBlock(const CFGBlock *Start,
+ llvm::BitVector &Reachable);
+
+void FindUnreachableCode(AnalysisDeclContext &AC, Callback &CB);
+
+}} // end namespace clang::reachable_code
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
new file mode 100644
index 0000000..26e258d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/ThreadSafety.h
@@ -0,0 +1,159 @@
+//===- ThreadSafety.h ------------------------------------------*- C++ --*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+//
+// A intra-procedural analysis for thread safety (e.g. deadlocks and race
+// conditions), based off of an annotation system.
+//
+// See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_THREADSAFETY_H
+#define LLVM_CLANG_THREADSAFETY_H
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+namespace thread_safety {
+
+/// This enum distinguishes between different kinds of operations that may
+/// need to be protected by locks. We use this enum in error handling.
+enum ProtectedOperationKind {
+ POK_VarDereference, /// Dereferencing a variable (e.g. p in *p = 5;)
+ POK_VarAccess, /// Reading or writing a variable (e.g. x in x = 5;)
+ POK_FunctionCall /// Making a function call (e.g. fool())
+};
+
+/// This enum distinguishes between different kinds of lock actions. For
+/// example, it is an error to write a variable protected by shared version of a
+/// mutex.
+enum LockKind {
+ LK_Shared, /// Shared/reader lock of a mutex
+ LK_Exclusive /// Exclusive/writer lock of a mutex
+};
+
+/// This enum distinguishes between different ways to access (read or write) a
+/// variable.
+enum AccessKind {
+ AK_Read, /// Reading a variable
+ AK_Written /// Writing a variable
+};
+
+/// This enum distinguishes between different situations where we warn due to
+/// inconsistent locking.
+/// \enum SK_LockedSomeLoopIterations -- a mutex is locked for some but not all
+/// loop iterations.
+/// \enum SK_LockedSomePredecessors -- a mutex is locked in some but not all
+/// predecessors of a CFGBlock.
+/// \enum SK_LockedAtEndOfFunction -- a mutex is still locked at the end of a
+/// function.
+enum LockErrorKind {
+ LEK_LockedSomeLoopIterations,
+ LEK_LockedSomePredecessors,
+ LEK_LockedAtEndOfFunction
+};
+
+/// Handler class for thread safety warnings.
+class ThreadSafetyHandler {
+public:
+ typedef llvm::StringRef Name;
+ virtual ~ThreadSafetyHandler();
+
+ /// Warn about lock expressions which fail to resolve to lockable objects.
+ /// \param Loc -- the SourceLocation of the unresolved expression.
+ virtual void handleInvalidLockExp(SourceLocation Loc) {}
+
+ /// Warn about unlock function calls that do not have a prior matching lock
+ /// expression.
+ /// \param LockName -- A StringRef name for the lock expression, to be printed
+ /// in the error message.
+ /// \param Loc -- The SourceLocation of the Unlock
+ virtual void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) {}
+
+ /// Warn about lock function calls for locks which are already held.
+ /// \param LockName -- A StringRef name for the lock expression, to be printed
+ /// in the error message.
+ /// \param Loc -- The location of the second lock expression.
+ virtual void handleDoubleLock(Name LockName, SourceLocation Loc) {}
+
+ /// Warn about situations where a mutex is sometimes held and sometimes not.
+ /// The three situations are:
+ /// 1. a mutex is locked on an "if" branch but not the "else" branch,
+ /// 2, or a mutex is only held at the start of some loop iterations,
+ /// 3. or when a mutex is locked but not unlocked inside a function.
+ /// \param LockName -- A StringRef name for the lock expression, to be printed
+ /// in the error message.
+ /// \param LocLocked -- The location of the lock expression where the mutex is
+ /// locked
+ /// \param LocEndOfScope -- The location of the end of the scope where the
+ /// mutex is no longer held
+ /// \param LEK -- which of the three above cases we should warn for
+ virtual void handleMutexHeldEndOfScope(Name LockName,
+ SourceLocation LocLocked,
+ SourceLocation LocEndOfScope,
+ LockErrorKind LEK){}
+
+ /// Warn when a mutex is held exclusively and shared at the same point. For
+ /// example, if a mutex is locked exclusively during an if branch and shared
+ /// during the else branch.
+ /// \param LockName -- A StringRef name for the lock expression, to be printed
+ /// in the error message.
+ /// \param Loc1 -- The location of the first lock expression.
+ /// \param Loc2 -- The location of the second lock expression.
+ virtual void handleExclusiveAndShared(Name LockName, SourceLocation Loc1,
+ SourceLocation Loc2) {}
+
+ /// Warn when a protected operation occurs while no locks are held.
+ /// \param D -- The decl for the protected variable or function
+ /// \param POK -- The kind of protected operation (e.g. variable access)
+ /// \param AK -- The kind of access (i.e. read or write) that occurred
+ /// \param Loc -- The location of the protected operation.
+ virtual void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
+ AccessKind AK, SourceLocation Loc) {}
+
+ /// Warn when a protected operation occurs while the specific mutex protecting
+ /// the operation is not locked.
+ /// \param LockName -- A StringRef name for the lock expression, to be printed
+ /// in the error message.
+ /// \param D -- The decl for the protected variable or function
+ /// \param POK -- The kind of protected operation (e.g. variable access)
+ /// \param AK -- The kind of access (i.e. read or write) that occurred
+ /// \param Loc -- The location of the protected operation.
+ virtual void handleMutexNotHeld(const NamedDecl *D,
+ ProtectedOperationKind POK, Name LockName,
+ LockKind LK, SourceLocation Loc) {}
+
+ /// Warn when a function is called while an excluded mutex is locked. For
+ /// example, the mutex may be locked inside the function.
+ /// \param FunName -- The name of the function
+ /// \param LockName -- A StringRef name for the lock expression, to be printed
+ /// in the error message.
+ /// \param Loc -- The location of the function call.
+ virtual void handleFunExcludesLock(Name FunName, Name LockName,
+ SourceLocation Loc) {}
+};
+
+/// \brief Check a function's CFG for thread-safety violations.
+///
+/// We traverse the blocks in the CFG, compute the set of mutexes that are held
+/// at the end of each block, and issue warnings for thread safety violations.
+/// Each block in the CFG is traversed exactly once.
+void runThreadSafetyAnalysis(AnalysisDeclContext &AC,
+ ThreadSafetyHandler &Handler);
+
+/// \brief Helper function that returns a LockKind required for the given level
+/// of access.
+LockKind getLockKindFromAccessKind(AccessKind AK);
+
+}} // end namespace clang::thread_safety
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h
new file mode 100644
index 0000000..4ee6698
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Analyses/UninitializedValues.h
@@ -0,0 +1,53 @@
+//= UninitializedValues.h - Finding uses of uninitialized values -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines APIs for invoking and reported uninitialized values
+// warnings.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_UNINIT_VALS_H
+#define LLVM_CLANG_UNINIT_VALS_H
+
+namespace clang {
+
+class AnalysisDeclContext;
+class CFG;
+class DeclContext;
+class Expr;
+class VarDecl;
+
+class UninitVariablesHandler {
+public:
+ UninitVariablesHandler() {}
+ virtual ~UninitVariablesHandler();
+
+ /// Called when the uninitialized variable is used at the given expression.
+ virtual void handleUseOfUninitVariable(const Expr *ex,
+ const VarDecl *vd,
+ bool isAlwaysUninit) {}
+
+ /// Called when the uninitialized variable analysis detects the
+ /// idiom 'int x = x'. All other uses of 'x' within the initializer
+ /// are handled by handleUseOfUninitVariable.
+ virtual void handleSelfInit(const VarDecl *vd) {}
+};
+
+struct UninitVariablesAnalysisStats {
+ unsigned NumVariablesAnalyzed;
+ unsigned NumBlockVisits;
+};
+
+void runUninitializedVariablesAnalysis(const DeclContext &dc, const CFG &cfg,
+ AnalysisDeclContext &ac,
+ UninitVariablesHandler &handler,
+ UninitVariablesAnalysisStats &stats);
+
+}
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
new file mode 100644
index 0000000..6b6f8ef
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisContext.h
@@ -0,0 +1,432 @@
+//=== AnalysisContext.h - Analysis context for Path Sens analysis --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AnalysisDeclContext, a class that manages the analysis
+// context data for path sensitive analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_ANALYSISCONTEXT_H
+#define LLVM_CLANG_ANALYSIS_ANALYSISCONTEXT_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Allocator.h"
+
+namespace clang {
+
+class Decl;
+class Stmt;
+class CFGReverseBlockReachabilityAnalysis;
+class CFGStmtMap;
+class LiveVariables;
+class ManagedAnalysis;
+class ParentMap;
+class PseudoConstantAnalysis;
+class ImplicitParamDecl;
+class LocationContextManager;
+class StackFrameContext;
+class AnalysisDeclContextManager;
+class LocationContext;
+
+namespace idx { class TranslationUnit; }
+
+/// The base class of a hierarchy of objects representing analyses tied
+/// to AnalysisDeclContext.
+class ManagedAnalysis {
+protected:
+ ManagedAnalysis() {}
+public:
+ virtual ~ManagedAnalysis();
+
+ // Subclasses need to implement:
+ //
+ // static const void *getTag();
+ //
+ // Which returns a fixed pointer address to distinguish classes of
+ // analysis objects. They also need to implement:
+ //
+ // static [Derived*] create(AnalysisDeclContext &Ctx);
+ //
+ // which creates the analysis object given an AnalysisDeclContext.
+};
+
+
+/// AnalysisDeclContext contains the context data for the function or method
+/// under analysis.
+class AnalysisDeclContext {
+ /// Backpoint to the AnalysisManager object that created this
+ /// AnalysisDeclContext. This may be null.
+ AnalysisDeclContextManager *Manager;
+
+ const Decl *D;
+
+ // TranslationUnit is NULL if we don't have multiple translation units.
+ idx::TranslationUnit *TU;
+
+ OwningPtr<CFG> cfg, completeCFG;
+ OwningPtr<CFGStmtMap> cfgStmtMap;
+
+ CFG::BuildOptions cfgBuildOptions;
+ CFG::BuildOptions::ForcedBlkExprs *forcedBlkExprs;
+
+ bool builtCFG, builtCompleteCFG;
+
+ OwningPtr<LiveVariables> liveness;
+ OwningPtr<LiveVariables> relaxedLiveness;
+ OwningPtr<ParentMap> PM;
+ OwningPtr<PseudoConstantAnalysis> PCA;
+ OwningPtr<CFGReverseBlockReachabilityAnalysis> CFA;
+
+ llvm::BumpPtrAllocator A;
+
+ llvm::DenseMap<const BlockDecl*,void*> *ReferencedBlockVars;
+
+ void *ManagedAnalyses;
+
+public:
+ AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *D,
+ idx::TranslationUnit *TU);
+
+ AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *D,
+ idx::TranslationUnit *TU,
+ const CFG::BuildOptions &BuildOptions);
+
+ ~AnalysisDeclContext();
+
+ ASTContext &getASTContext() { return D->getASTContext(); }
+ const Decl *getDecl() const { return D; }
+
+ idx::TranslationUnit *getTranslationUnit() const { return TU; }
+
+ /// Return the build options used to construct the CFG.
+ CFG::BuildOptions &getCFGBuildOptions() {
+ return cfgBuildOptions;
+ }
+
+ const CFG::BuildOptions &getCFGBuildOptions() const {
+ return cfgBuildOptions;
+ }
+
+ /// getAddEHEdges - Return true iff we are adding exceptional edges from
+ /// callExprs. If this is false, then try/catch statements and blocks
+ /// reachable from them can appear to be dead in the CFG, analysis passes must
+ /// cope with that.
+ bool getAddEHEdges() const { return cfgBuildOptions.AddEHEdges; }
+ bool getUseUnoptimizedCFG() const {
+ return !cfgBuildOptions.PruneTriviallyFalseEdges;
+ }
+ bool getAddImplicitDtors() const { return cfgBuildOptions.AddImplicitDtors; }
+ bool getAddInitializers() const { return cfgBuildOptions.AddInitializers; }
+
+ void registerForcedBlockExpression(const Stmt *stmt);
+ const CFGBlock *getBlockForRegisteredExpression(const Stmt *stmt);
+
+ Stmt *getBody() const;
+ CFG *getCFG();
+
+ CFGStmtMap *getCFGStmtMap();
+
+ CFGReverseBlockReachabilityAnalysis *getCFGReachablityAnalysis();
+
+ /// Return a version of the CFG without any edges pruned.
+ CFG *getUnoptimizedCFG();
+
+ void dumpCFG(bool ShowColors);
+
+ /// \brief Returns true if we have built a CFG for this analysis context.
+ /// Note that this doesn't correspond to whether or not a valid CFG exists, it
+ /// corresponds to whether we *attempted* to build one.
+ bool isCFGBuilt() const { return builtCFG; }
+
+ ParentMap &getParentMap();
+ PseudoConstantAnalysis *getPseudoConstantAnalysis();
+
+ typedef const VarDecl * const * referenced_decls_iterator;
+
+ std::pair<referenced_decls_iterator, referenced_decls_iterator>
+ getReferencedBlockVars(const BlockDecl *BD);
+
+ /// Return the ImplicitParamDecl* associated with 'self' if this
+ /// AnalysisDeclContext wraps an ObjCMethodDecl. Returns NULL otherwise.
+ const ImplicitParamDecl *getSelfDecl() const;
+
+ const StackFrameContext *getStackFrame(LocationContext const *Parent,
+ const Stmt *S,
+ const CFGBlock *Blk,
+ unsigned Idx);
+
+ /// Return the specified analysis object, lazily running the analysis if
+ /// necessary. Return NULL if the analysis could not run.
+ template <typename T>
+ T *getAnalysis() {
+ const void *tag = T::getTag();
+ ManagedAnalysis *&data = getAnalysisImpl(tag);
+ if (!data) {
+ data = T::create(*this);
+ }
+ return static_cast<T*>(data);
+ }
+private:
+ ManagedAnalysis *&getAnalysisImpl(const void* tag);
+
+ LocationContextManager &getLocationContextManager();
+};
+
+class LocationContext : public llvm::FoldingSetNode {
+public:
+ enum ContextKind { StackFrame, Scope, Block };
+
+private:
+ ContextKind Kind;
+
+ // AnalysisDeclContext can't be const since some methods may modify its
+ // member.
+ AnalysisDeclContext *Ctx;
+
+ const LocationContext *Parent;
+
+protected:
+ LocationContext(ContextKind k, AnalysisDeclContext *ctx,
+ const LocationContext *parent)
+ : Kind(k), Ctx(ctx), Parent(parent) {}
+
+public:
+ virtual ~LocationContext();
+
+ ContextKind getKind() const { return Kind; }
+
+ AnalysisDeclContext *getAnalysisDeclContext() const { return Ctx; }
+
+ idx::TranslationUnit *getTranslationUnit() const {
+ return Ctx->getTranslationUnit();
+ }
+
+ const LocationContext *getParent() const { return Parent; }
+
+ bool isParentOf(const LocationContext *LC) const;
+
+ const Decl *getDecl() const { return getAnalysisDeclContext()->getDecl(); }
+
+ CFG *getCFG() const { return getAnalysisDeclContext()->getCFG(); }
+
+ template <typename T>
+ T *getAnalysis() const {
+ return getAnalysisDeclContext()->getAnalysis<T>();
+ }
+
+ ParentMap &getParentMap() const {
+ return getAnalysisDeclContext()->getParentMap();
+ }
+
+ const ImplicitParamDecl *getSelfDecl() const {
+ return Ctx->getSelfDecl();
+ }
+
+ const StackFrameContext *getCurrentStackFrame() const;
+ const StackFrameContext *
+ getStackFrameForDeclContext(const DeclContext *DC) const;
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) = 0;
+
+ static bool classof(const LocationContext*) { return true; }
+
+public:
+ static void ProfileCommon(llvm::FoldingSetNodeID &ID,
+ ContextKind ck,
+ AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const void *data);
+};
+
+class StackFrameContext : public LocationContext {
+ // The callsite where this stack frame is established.
+ const Stmt *CallSite;
+
+ // The parent block of the callsite.
+ const CFGBlock *Block;
+
+ // The index of the callsite in the CFGBlock.
+ unsigned Index;
+
+ friend class LocationContextManager;
+ StackFrameContext(AnalysisDeclContext *ctx, const LocationContext *parent,
+ const Stmt *s, const CFGBlock *blk,
+ unsigned idx)
+ : LocationContext(StackFrame, ctx, parent), CallSite(s),
+ Block(blk), Index(idx) {}
+
+public:
+ ~StackFrameContext() {}
+
+ const Stmt *getCallSite() const { return CallSite; }
+
+ const CFGBlock *getCallSiteBlock() const { return Block; }
+
+ unsigned getIndex() const { return Index; }
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+
+ static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
+ const LocationContext *parent, const Stmt *s,
+ const CFGBlock *blk, unsigned idx) {
+ ProfileCommon(ID, StackFrame, ctx, parent, s);
+ ID.AddPointer(blk);
+ ID.AddInteger(idx);
+ }
+
+ static bool classof(const LocationContext *Ctx) {
+ return Ctx->getKind() == StackFrame;
+ }
+};
+
+class ScopeContext : public LocationContext {
+ const Stmt *Enter;
+
+ friend class LocationContextManager;
+ ScopeContext(AnalysisDeclContext *ctx, const LocationContext *parent,
+ const Stmt *s)
+ : LocationContext(Scope, ctx, parent), Enter(s) {}
+
+public:
+ ~ScopeContext() {}
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+
+ static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
+ const LocationContext *parent, const Stmt *s) {
+ ProfileCommon(ID, Scope, ctx, parent, s);
+ }
+
+ static bool classof(const LocationContext *Ctx) {
+ return Ctx->getKind() == Scope;
+ }
+};
+
+class BlockInvocationContext : public LocationContext {
+ // FIXME: Add back context-sensivity (we don't want libAnalysis to know
+ // about MemRegion).
+ const BlockDecl *BD;
+
+ friend class LocationContextManager;
+
+ BlockInvocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const BlockDecl *bd)
+ : LocationContext(Block, ctx, parent), BD(bd) {}
+
+public:
+ ~BlockInvocationContext() {}
+
+ const BlockDecl *getBlockDecl() const { return BD; }
+
+ void Profile(llvm::FoldingSetNodeID &ID);
+
+ static void Profile(llvm::FoldingSetNodeID &ID, AnalysisDeclContext *ctx,
+ const LocationContext *parent, const BlockDecl *bd) {
+ ProfileCommon(ID, Block, ctx, parent, bd);
+ }
+
+ static bool classof(const LocationContext *Ctx) {
+ return Ctx->getKind() == Block;
+ }
+};
+
+class LocationContextManager {
+ llvm::FoldingSet<LocationContext> Contexts;
+public:
+ ~LocationContextManager();
+
+ const StackFrameContext *getStackFrame(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const Stmt *s,
+ const CFGBlock *blk, unsigned idx);
+
+ const ScopeContext *getScope(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const Stmt *s);
+
+ /// Discard all previously created LocationContext objects.
+ void clear();
+private:
+ template <typename LOC, typename DATA>
+ const LOC *getLocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const DATA *d);
+};
+
+class AnalysisDeclContextManager {
+ typedef llvm::DenseMap<const Decl*, AnalysisDeclContext*> ContextMap;
+
+ ContextMap Contexts;
+ LocationContextManager LocContexts;
+ CFG::BuildOptions cfgBuildOptions;
+
+public:
+ AnalysisDeclContextManager(bool useUnoptimizedCFG = false,
+ bool addImplicitDtors = false,
+ bool addInitializers = false);
+
+ ~AnalysisDeclContextManager();
+
+ AnalysisDeclContext *getContext(const Decl *D, idx::TranslationUnit *TU = 0);
+
+ bool getUseUnoptimizedCFG() const {
+ return !cfgBuildOptions.PruneTriviallyFalseEdges;
+ }
+
+ CFG::BuildOptions &getCFGBuildOptions() {
+ return cfgBuildOptions;
+ }
+
+ const StackFrameContext *getStackFrame(AnalysisDeclContext *Ctx,
+ LocationContext const *Parent,
+ const Stmt *S,
+ const CFGBlock *Blk,
+ unsigned Idx) {
+ return LocContexts.getStackFrame(Ctx, Parent, S, Blk, Idx);
+ }
+
+ // Get the top level stack frame.
+ const StackFrameContext *getStackFrame(Decl const *D,
+ idx::TranslationUnit *TU) {
+ return LocContexts.getStackFrame(getContext(D, TU), 0, 0, 0, 0);
+ }
+
+ // Get a stack frame with parent.
+ StackFrameContext const *getStackFrame(const Decl *D,
+ LocationContext const *Parent,
+ const Stmt *S,
+ const CFGBlock *Blk,
+ unsigned Idx) {
+ return LocContexts.getStackFrame(getContext(D), Parent, S, Blk, Idx);
+ }
+
+
+ /// Discard all previously created AnalysisDeclContexts.
+ void clear();
+
+private:
+ friend class AnalysisDeclContext;
+
+ LocationContextManager &getLocationContextManager() {
+ return LocContexts;
+ }
+};
+
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h
new file mode 100644
index 0000000..d4e1f5f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/AnalysisDiagnostic.h
@@ -0,0 +1,28 @@
+//===--- DiagnosticAnalysis.h - Diagnostics for libanalysis -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DIAGNOSTICANALYSIS_H
+#define LLVM_CLANG_DIAGNOSTICANALYSIS_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define ANALYSISSTART
+#include "clang/Basic/DiagnosticAnalysisKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_ANALYSIS_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
new file mode 100644
index 0000000..27b22b8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CFG.h
@@ -0,0 +1,938 @@
+//===--- CFG.h - Classes for representing and building CFGs------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFG and CFGBuilder classes for representing and
+// building Control-Flow Graphs (CFGs) from ASTs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CFG_H
+#define LLVM_CLANG_CFG_H
+
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/BitVector.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Basic/SourceLocation.h"
+#include <cassert>
+#include <iterator>
+
+namespace clang {
+ class CXXDestructorDecl;
+ class Decl;
+ class Stmt;
+ class Expr;
+ class FieldDecl;
+ class VarDecl;
+ class CXXCtorInitializer;
+ class CXXBaseSpecifier;
+ class CXXBindTemporaryExpr;
+ class CFG;
+ class PrinterHelper;
+ class LangOptions;
+ class ASTContext;
+
+/// CFGElement - Represents a top-level expression in a basic block.
+class CFGElement {
+public:
+ enum Kind {
+ // main kind
+ Invalid,
+ Statement,
+ Initializer,
+ // dtor kind
+ AutomaticObjectDtor,
+ BaseDtor,
+ MemberDtor,
+ TemporaryDtor,
+ DTOR_BEGIN = AutomaticObjectDtor,
+ DTOR_END = TemporaryDtor
+ };
+
+protected:
+ // The int bits are used to mark the kind.
+ llvm::PointerIntPair<void *, 2> Data1;
+ llvm::PointerIntPair<void *, 2> Data2;
+
+ CFGElement(Kind kind, const void *Ptr1, const void *Ptr2 = 0)
+ : Data1(const_cast<void*>(Ptr1), ((unsigned) kind) & 0x3),
+ Data2(const_cast<void*>(Ptr2), (((unsigned) kind) >> 2) & 0x3) {}
+
+public:
+ CFGElement() {}
+
+ Kind getKind() const {
+ unsigned x = Data2.getInt();
+ x <<= 2;
+ x |= Data1.getInt();
+ return (Kind) x;
+ }
+
+ bool isValid() const { return getKind() != Invalid; }
+
+ operator bool() const { return isValid(); }
+
+ template<class ElemTy> const ElemTy *getAs() const {
+ if (llvm::isa<ElemTy>(this))
+ return static_cast<const ElemTy*>(this);
+ return 0;
+ }
+
+ static bool classof(const CFGElement *E) { return true; }
+};
+
+class CFGStmt : public CFGElement {
+public:
+ CFGStmt(Stmt *S) : CFGElement(Statement, S) {}
+
+ const Stmt *getStmt() const {
+ return static_cast<const Stmt *>(Data1.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == Statement;
+ }
+};
+
+/// CFGInitializer - Represents C++ base or member initializer from
+/// constructor's initialization list.
+class CFGInitializer : public CFGElement {
+public:
+ CFGInitializer(CXXCtorInitializer *initializer)
+ : CFGElement(Initializer, initializer) {}
+
+ CXXCtorInitializer* getInitializer() const {
+ return static_cast<CXXCtorInitializer*>(Data1.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == Initializer;
+ }
+};
+
+/// CFGImplicitDtor - Represents C++ object destructor implicitly generated
+/// by compiler on various occasions.
+class CFGImplicitDtor : public CFGElement {
+protected:
+ CFGImplicitDtor(Kind kind, const void *data1, const void *data2 = 0)
+ : CFGElement(kind, data1, data2) {
+ assert(kind >= DTOR_BEGIN && kind <= DTOR_END);
+ }
+
+public:
+ const CXXDestructorDecl *getDestructorDecl(ASTContext &astContext) const;
+ bool isNoReturn(ASTContext &astContext) const;
+
+ static bool classof(const CFGElement *E) {
+ Kind kind = E->getKind();
+ return kind >= DTOR_BEGIN && kind <= DTOR_END;
+ }
+};
+
+/// CFGAutomaticObjDtor - Represents C++ object destructor implicitly generated
+/// for automatic object or temporary bound to const reference at the point
+/// of leaving its local scope.
+class CFGAutomaticObjDtor: public CFGImplicitDtor {
+public:
+ CFGAutomaticObjDtor(const VarDecl *var, const Stmt *stmt)
+ : CFGImplicitDtor(AutomaticObjectDtor, var, stmt) {}
+
+ const VarDecl *getVarDecl() const {
+ return static_cast<VarDecl*>(Data1.getPointer());
+ }
+
+ // Get statement end of which triggered the destructor call.
+ const Stmt *getTriggerStmt() const {
+ return static_cast<Stmt*>(Data2.getPointer());
+ }
+
+ static bool classof(const CFGElement *elem) {
+ return elem->getKind() == AutomaticObjectDtor;
+ }
+};
+
+/// CFGBaseDtor - Represents C++ object destructor implicitly generated for
+/// base object in destructor.
+class CFGBaseDtor : public CFGImplicitDtor {
+public:
+ CFGBaseDtor(const CXXBaseSpecifier *base)
+ : CFGImplicitDtor(BaseDtor, base) {}
+
+ const CXXBaseSpecifier *getBaseSpecifier() const {
+ return static_cast<const CXXBaseSpecifier*>(Data1.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == BaseDtor;
+ }
+};
+
+/// CFGMemberDtor - Represents C++ object destructor implicitly generated for
+/// member object in destructor.
+class CFGMemberDtor : public CFGImplicitDtor {
+public:
+ CFGMemberDtor(const FieldDecl *field)
+ : CFGImplicitDtor(MemberDtor, field, 0) {}
+
+ const FieldDecl *getFieldDecl() const {
+ return static_cast<const FieldDecl*>(Data1.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == MemberDtor;
+ }
+};
+
+/// CFGTemporaryDtor - Represents C++ object destructor implicitly generated
+/// at the end of full expression for temporary object.
+class CFGTemporaryDtor : public CFGImplicitDtor {
+public:
+ CFGTemporaryDtor(CXXBindTemporaryExpr *expr)
+ : CFGImplicitDtor(TemporaryDtor, expr, 0) {}
+
+ const CXXBindTemporaryExpr *getBindTemporaryExpr() const {
+ return static_cast<const CXXBindTemporaryExpr *>(Data1.getPointer());
+ }
+
+ static bool classof(const CFGElement *E) {
+ return E->getKind() == TemporaryDtor;
+ }
+};
+
+/// CFGTerminator - Represents CFGBlock terminator statement.
+///
+/// TemporaryDtorsBranch bit is set to true if the terminator marks a branch
+/// in control flow of destructors of temporaries. In this case terminator
+/// statement is the same statement that branches control flow in evaluation
+/// of matching full expression.
+class CFGTerminator {
+ llvm::PointerIntPair<Stmt *, 1> Data;
+public:
+ CFGTerminator() {}
+ CFGTerminator(Stmt *S, bool TemporaryDtorsBranch = false)
+ : Data(S, TemporaryDtorsBranch) {}
+
+ Stmt *getStmt() { return Data.getPointer(); }
+ const Stmt *getStmt() const { return Data.getPointer(); }
+
+ bool isTemporaryDtorsBranch() const { return Data.getInt(); }
+
+ operator Stmt *() { return getStmt(); }
+ operator const Stmt *() const { return getStmt(); }
+
+ Stmt *operator->() { return getStmt(); }
+ const Stmt *operator->() const { return getStmt(); }
+
+ Stmt &operator*() { return *getStmt(); }
+ const Stmt &operator*() const { return *getStmt(); }
+
+ operator bool() const { return getStmt(); }
+};
+
+/// CFGBlock - Represents a single basic block in a source-level CFG.
+/// It consists of:
+///
+/// (1) A set of statements/expressions (which may contain subexpressions).
+/// (2) A "terminator" statement (not in the set of statements).
+/// (3) A list of successors and predecessors.
+///
+/// Terminator: The terminator represents the type of control-flow that occurs
+/// at the end of the basic block. The terminator is a Stmt* referring to an
+/// AST node that has control-flow: if-statements, breaks, loops, etc.
+/// If the control-flow is conditional, the condition expression will appear
+/// within the set of statements in the block (usually the last statement).
+///
+/// Predecessors: the order in the set of predecessors is arbitrary.
+///
+/// Successors: the order in the set of successors is NOT arbitrary. We
+/// currently have the following orderings based on the terminator:
+///
+/// Terminator Successor Ordering
+/// -----------------------------------------------------
+/// if Then Block; Else Block
+/// ? operator LHS expression; RHS expression
+/// &&, || expression that uses result of && or ||, RHS
+///
+/// But note that any of that may be NULL in case of optimized-out edges.
+///
+class CFGBlock {
+ class ElementList {
+ typedef BumpVector<CFGElement> ImplTy;
+ ImplTy Impl;
+ public:
+ ElementList(BumpVectorContext &C) : Impl(C, 4) {}
+
+ typedef std::reverse_iterator<ImplTy::iterator> iterator;
+ typedef std::reverse_iterator<ImplTy::const_iterator> const_iterator;
+ typedef ImplTy::iterator reverse_iterator;
+ typedef ImplTy::const_iterator const_reverse_iterator;
+
+ void push_back(CFGElement e, BumpVectorContext &C) { Impl.push_back(e, C); }
+ reverse_iterator insert(reverse_iterator I, size_t Cnt, CFGElement E,
+ BumpVectorContext &C) {
+ return Impl.insert(I, Cnt, E, C);
+ }
+
+ CFGElement front() const { return Impl.back(); }
+ CFGElement back() const { return Impl.front(); }
+
+ iterator begin() { return Impl.rbegin(); }
+ iterator end() { return Impl.rend(); }
+ const_iterator begin() const { return Impl.rbegin(); }
+ const_iterator end() const { return Impl.rend(); }
+ reverse_iterator rbegin() { return Impl.begin(); }
+ reverse_iterator rend() { return Impl.end(); }
+ const_reverse_iterator rbegin() const { return Impl.begin(); }
+ const_reverse_iterator rend() const { return Impl.end(); }
+
+ CFGElement operator[](size_t i) const {
+ assert(i < Impl.size());
+ return Impl[Impl.size() - 1 - i];
+ }
+
+ size_t size() const { return Impl.size(); }
+ bool empty() const { return Impl.empty(); }
+ };
+
+ /// Stmts - The set of statements in the basic block.
+ ElementList Elements;
+
+ /// Label - An (optional) label that prefixes the executable
+ /// statements in the block. When this variable is non-NULL, it is
+ /// either an instance of LabelStmt, SwitchCase or CXXCatchStmt.
+ Stmt *Label;
+
+ /// Terminator - The terminator for a basic block that
+ /// indicates the type of control-flow that occurs between a block
+ /// and its successors.
+ CFGTerminator Terminator;
+
+ /// LoopTarget - Some blocks are used to represent the "loop edge" to
+ /// the start of a loop from within the loop body. This Stmt* will be
+ /// refer to the loop statement for such blocks (and be null otherwise).
+ const Stmt *LoopTarget;
+
+ /// BlockID - A numerical ID assigned to a CFGBlock during construction
+ /// of the CFG.
+ unsigned BlockID;
+
+ /// Predecessors/Successors - Keep track of the predecessor / successor
+ /// CFG blocks.
+ typedef BumpVector<CFGBlock*> AdjacentBlocks;
+ AdjacentBlocks Preds;
+ AdjacentBlocks Succs;
+
+ /// NoReturn - This bit is set when the basic block contains a function call
+ /// or implicit destructor that is attributed as 'noreturn'. In that case,
+ /// control cannot technically ever proceed past this block. All such blocks
+ /// will have a single immediate successor: the exit block. This allows them
+ /// to be easily reached from the exit block and using this bit quickly
+ /// recognized without scanning the contents of the block.
+ ///
+ /// Optimization Note: This bit could be profitably folded with Terminator's
+ /// storage if the memory usage of CFGBlock becomes an issue.
+ unsigned HasNoReturnElement : 1;
+
+ /// Parent - The parent CFG that owns this CFGBlock.
+ CFG *Parent;
+
+public:
+ explicit CFGBlock(unsigned blockid, BumpVectorContext &C, CFG *parent)
+ : Elements(C), Label(NULL), Terminator(NULL), LoopTarget(NULL),
+ BlockID(blockid), Preds(C, 1), Succs(C, 1), HasNoReturnElement(false),
+ Parent(parent) {}
+ ~CFGBlock() {}
+
+ // Statement iterators
+ typedef ElementList::iterator iterator;
+ typedef ElementList::const_iterator const_iterator;
+ typedef ElementList::reverse_iterator reverse_iterator;
+ typedef ElementList::const_reverse_iterator const_reverse_iterator;
+
+ CFGElement front() const { return Elements.front(); }
+ CFGElement back() const { return Elements.back(); }
+
+ iterator begin() { return Elements.begin(); }
+ iterator end() { return Elements.end(); }
+ const_iterator begin() const { return Elements.begin(); }
+ const_iterator end() const { return Elements.end(); }
+
+ reverse_iterator rbegin() { return Elements.rbegin(); }
+ reverse_iterator rend() { return Elements.rend(); }
+ const_reverse_iterator rbegin() const { return Elements.rbegin(); }
+ const_reverse_iterator rend() const { return Elements.rend(); }
+
+ unsigned size() const { return Elements.size(); }
+ bool empty() const { return Elements.empty(); }
+
+ CFGElement operator[](size_t i) const { return Elements[i]; }
+
+ // CFG iterators
+ typedef AdjacentBlocks::iterator pred_iterator;
+ typedef AdjacentBlocks::const_iterator const_pred_iterator;
+ typedef AdjacentBlocks::reverse_iterator pred_reverse_iterator;
+ typedef AdjacentBlocks::const_reverse_iterator const_pred_reverse_iterator;
+
+ typedef AdjacentBlocks::iterator succ_iterator;
+ typedef AdjacentBlocks::const_iterator const_succ_iterator;
+ typedef AdjacentBlocks::reverse_iterator succ_reverse_iterator;
+ typedef AdjacentBlocks::const_reverse_iterator const_succ_reverse_iterator;
+
+ pred_iterator pred_begin() { return Preds.begin(); }
+ pred_iterator pred_end() { return Preds.end(); }
+ const_pred_iterator pred_begin() const { return Preds.begin(); }
+ const_pred_iterator pred_end() const { return Preds.end(); }
+
+ pred_reverse_iterator pred_rbegin() { return Preds.rbegin(); }
+ pred_reverse_iterator pred_rend() { return Preds.rend(); }
+ const_pred_reverse_iterator pred_rbegin() const { return Preds.rbegin(); }
+ const_pred_reverse_iterator pred_rend() const { return Preds.rend(); }
+
+ succ_iterator succ_begin() { return Succs.begin(); }
+ succ_iterator succ_end() { return Succs.end(); }
+ const_succ_iterator succ_begin() const { return Succs.begin(); }
+ const_succ_iterator succ_end() const { return Succs.end(); }
+
+ succ_reverse_iterator succ_rbegin() { return Succs.rbegin(); }
+ succ_reverse_iterator succ_rend() { return Succs.rend(); }
+ const_succ_reverse_iterator succ_rbegin() const { return Succs.rbegin(); }
+ const_succ_reverse_iterator succ_rend() const { return Succs.rend(); }
+
+ unsigned succ_size() const { return Succs.size(); }
+ bool succ_empty() const { return Succs.empty(); }
+
+ unsigned pred_size() const { return Preds.size(); }
+ bool pred_empty() const { return Preds.empty(); }
+
+
+ class FilterOptions {
+ public:
+ FilterOptions() {
+ IgnoreDefaultsWithCoveredEnums = 0;
+ }
+
+ unsigned IgnoreDefaultsWithCoveredEnums : 1;
+ };
+
+ static bool FilterEdge(const FilterOptions &F, const CFGBlock *Src,
+ const CFGBlock *Dst);
+
+ template <typename IMPL, bool IsPred>
+ class FilteredCFGBlockIterator {
+ private:
+ IMPL I, E;
+ const FilterOptions F;
+ const CFGBlock *From;
+ public:
+ explicit FilteredCFGBlockIterator(const IMPL &i, const IMPL &e,
+ const CFGBlock *from,
+ const FilterOptions &f)
+ : I(i), E(e), F(f), From(from) {}
+
+ bool hasMore() const { return I != E; }
+
+ FilteredCFGBlockIterator &operator++() {
+ do { ++I; } while (hasMore() && Filter(*I));
+ return *this;
+ }
+
+ const CFGBlock *operator*() const { return *I; }
+ private:
+ bool Filter(const CFGBlock *To) {
+ return IsPred ? FilterEdge(F, To, From) : FilterEdge(F, From, To);
+ }
+ };
+
+ typedef FilteredCFGBlockIterator<const_pred_iterator, true>
+ filtered_pred_iterator;
+
+ typedef FilteredCFGBlockIterator<const_succ_iterator, false>
+ filtered_succ_iterator;
+
+ filtered_pred_iterator filtered_pred_start_end(const FilterOptions &f) const {
+ return filtered_pred_iterator(pred_begin(), pred_end(), this, f);
+ }
+
+ filtered_succ_iterator filtered_succ_start_end(const FilterOptions &f) const {
+ return filtered_succ_iterator(succ_begin(), succ_end(), this, f);
+ }
+
+ // Manipulation of block contents
+
+ void setTerminator(Stmt *Statement) { Terminator = Statement; }
+ void setLabel(Stmt *Statement) { Label = Statement; }
+ void setLoopTarget(const Stmt *loopTarget) { LoopTarget = loopTarget; }
+ void setHasNoReturnElement() { HasNoReturnElement = true; }
+
+ CFGTerminator getTerminator() { return Terminator; }
+ const CFGTerminator getTerminator() const { return Terminator; }
+
+ Stmt *getTerminatorCondition();
+
+ const Stmt *getTerminatorCondition() const {
+ return const_cast<CFGBlock*>(this)->getTerminatorCondition();
+ }
+
+ const Stmt *getLoopTarget() const { return LoopTarget; }
+
+ Stmt *getLabel() { return Label; }
+ const Stmt *getLabel() const { return Label; }
+
+ bool hasNoReturnElement() const { return HasNoReturnElement; }
+
+ unsigned getBlockID() const { return BlockID; }
+
+ CFG *getParent() const { return Parent; }
+
+ void dump(const CFG *cfg, const LangOptions &LO, bool ShowColors = false) const;
+ void print(raw_ostream &OS, const CFG* cfg, const LangOptions &LO,
+ bool ShowColors) const;
+ void printTerminator(raw_ostream &OS, const LangOptions &LO) const;
+
+ void addSuccessor(CFGBlock *Block, BumpVectorContext &C) {
+ if (Block)
+ Block->Preds.push_back(this, C);
+ Succs.push_back(Block, C);
+ }
+
+ void appendStmt(Stmt *statement, BumpVectorContext &C) {
+ Elements.push_back(CFGStmt(statement), C);
+ }
+
+ void appendInitializer(CXXCtorInitializer *initializer,
+ BumpVectorContext &C) {
+ Elements.push_back(CFGInitializer(initializer), C);
+ }
+
+ void appendBaseDtor(const CXXBaseSpecifier *BS, BumpVectorContext &C) {
+ Elements.push_back(CFGBaseDtor(BS), C);
+ }
+
+ void appendMemberDtor(FieldDecl *FD, BumpVectorContext &C) {
+ Elements.push_back(CFGMemberDtor(FD), C);
+ }
+
+ void appendTemporaryDtor(CXXBindTemporaryExpr *E, BumpVectorContext &C) {
+ Elements.push_back(CFGTemporaryDtor(E), C);
+ }
+
+ void appendAutomaticObjDtor(VarDecl *VD, Stmt *S, BumpVectorContext &C) {
+ Elements.push_back(CFGAutomaticObjDtor(VD, S), C);
+ }
+
+ // Destructors must be inserted in reversed order. So insertion is in two
+ // steps. First we prepare space for some number of elements, then we insert
+ // the elements beginning at the last position in prepared space.
+ iterator beginAutomaticObjDtorsInsert(iterator I, size_t Cnt,
+ BumpVectorContext &C) {
+ return iterator(Elements.insert(I.base(), Cnt, CFGElement(), C));
+ }
+ iterator insertAutomaticObjDtor(iterator I, VarDecl *VD, Stmt *S) {
+ *I = CFGAutomaticObjDtor(VD, S);
+ return ++I;
+ }
+};
+
+/// CFG - Represents a source-level, intra-procedural CFG that represents the
+/// control-flow of a Stmt. The Stmt can represent an entire function body,
+/// or a single expression. A CFG will always contain one empty block that
+/// represents the Exit point of the CFG. A CFG will also contain a designated
+/// Entry block. The CFG solely represents control-flow; it consists of
+/// CFGBlocks which are simply containers of Stmt*'s in the AST the CFG
+/// was constructed from.
+class CFG {
+public:
+ //===--------------------------------------------------------------------===//
+ // CFG Construction & Manipulation.
+ //===--------------------------------------------------------------------===//
+
+ class BuildOptions {
+ llvm::BitVector alwaysAddMask;
+ public:
+ typedef llvm::DenseMap<const Stmt *, const CFGBlock*> ForcedBlkExprs;
+ ForcedBlkExprs **forcedBlkExprs;
+
+ bool PruneTriviallyFalseEdges;
+ bool AddEHEdges;
+ bool AddInitializers;
+ bool AddImplicitDtors;
+
+ bool alwaysAdd(const Stmt *stmt) const {
+ return alwaysAddMask[stmt->getStmtClass()];
+ }
+
+ BuildOptions &setAlwaysAdd(Stmt::StmtClass stmtClass, bool val = true) {
+ alwaysAddMask[stmtClass] = val;
+ return *this;
+ }
+
+ BuildOptions &setAllAlwaysAdd() {
+ alwaysAddMask.set();
+ return *this;
+ }
+
+ BuildOptions()
+ : alwaysAddMask(Stmt::lastStmtConstant, false)
+ ,forcedBlkExprs(0), PruneTriviallyFalseEdges(true)
+ ,AddEHEdges(false)
+ ,AddInitializers(false)
+ ,AddImplicitDtors(false) {}
+ };
+
+ /// \brief Provides a custom implementation of the iterator class to have the
+ /// same interface as Function::iterator - iterator returns CFGBlock
+ /// (not a pointer to CFGBlock).
+ class graph_iterator {
+ public:
+ typedef const CFGBlock value_type;
+ typedef value_type& reference;
+ typedef value_type* pointer;
+ typedef BumpVector<CFGBlock*>::iterator ImplTy;
+
+ graph_iterator(const ImplTy &i) : I(i) {}
+
+ bool operator==(const graph_iterator &X) const { return I == X.I; }
+ bool operator!=(const graph_iterator &X) const { return I != X.I; }
+
+ reference operator*() const { return **I; }
+ pointer operator->() const { return *I; }
+ operator CFGBlock* () { return *I; }
+
+ graph_iterator &operator++() { ++I; return *this; }
+ graph_iterator &operator--() { --I; return *this; }
+
+ private:
+ ImplTy I;
+ };
+
+ class const_graph_iterator {
+ public:
+ typedef const CFGBlock value_type;
+ typedef value_type& reference;
+ typedef value_type* pointer;
+ typedef BumpVector<CFGBlock*>::const_iterator ImplTy;
+
+ const_graph_iterator(const ImplTy &i) : I(i) {}
+
+ bool operator==(const const_graph_iterator &X) const { return I == X.I; }
+ bool operator!=(const const_graph_iterator &X) const { return I != X.I; }
+
+ reference operator*() const { return **I; }
+ pointer operator->() const { return *I; }
+ operator CFGBlock* () const { return *I; }
+
+ const_graph_iterator &operator++() { ++I; return *this; }
+ const_graph_iterator &operator--() { --I; return *this; }
+
+ private:
+ ImplTy I;
+ };
+
+ /// buildCFG - Builds a CFG from an AST. The responsibility to free the
+ /// constructed CFG belongs to the caller.
+ static CFG* buildCFG(const Decl *D, Stmt *AST, ASTContext *C,
+ const BuildOptions &BO);
+
+ /// createBlock - Create a new block in the CFG. The CFG owns the block;
+ /// the caller should not directly free it.
+ CFGBlock *createBlock();
+
+ /// setEntry - Set the entry block of the CFG. This is typically used
+ /// only during CFG construction. Most CFG clients expect that the
+ /// entry block has no predecessors and contains no statements.
+ void setEntry(CFGBlock *B) { Entry = B; }
+
+ /// setIndirectGotoBlock - Set the block used for indirect goto jumps.
+ /// This is typically used only during CFG construction.
+ void setIndirectGotoBlock(CFGBlock *B) { IndirectGotoBlock = B; }
+
+ //===--------------------------------------------------------------------===//
+ // Block Iterators
+ //===--------------------------------------------------------------------===//
+
+ typedef BumpVector<CFGBlock*> CFGBlockListTy;
+ typedef CFGBlockListTy::iterator iterator;
+ typedef CFGBlockListTy::const_iterator const_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+
+ CFGBlock & front() { return *Blocks.front(); }
+ CFGBlock & back() { return *Blocks.back(); }
+
+ iterator begin() { return Blocks.begin(); }
+ iterator end() { return Blocks.end(); }
+ const_iterator begin() const { return Blocks.begin(); }
+ const_iterator end() const { return Blocks.end(); }
+
+ graph_iterator nodes_begin() { return graph_iterator(Blocks.begin()); }
+ graph_iterator nodes_end() { return graph_iterator(Blocks.end()); }
+ const_graph_iterator nodes_begin() const {
+ return const_graph_iterator(Blocks.begin());
+ }
+ const_graph_iterator nodes_end() const {
+ return const_graph_iterator(Blocks.end());
+ }
+
+ reverse_iterator rbegin() { return Blocks.rbegin(); }
+ reverse_iterator rend() { return Blocks.rend(); }
+ const_reverse_iterator rbegin() const { return Blocks.rbegin(); }
+ const_reverse_iterator rend() const { return Blocks.rend(); }
+
+ CFGBlock & getEntry() { return *Entry; }
+ const CFGBlock & getEntry() const { return *Entry; }
+ CFGBlock & getExit() { return *Exit; }
+ const CFGBlock & getExit() const { return *Exit; }
+
+ CFGBlock * getIndirectGotoBlock() { return IndirectGotoBlock; }
+ const CFGBlock * getIndirectGotoBlock() const { return IndirectGotoBlock; }
+
+ typedef std::vector<const CFGBlock*>::const_iterator try_block_iterator;
+ try_block_iterator try_blocks_begin() const {
+ return TryDispatchBlocks.begin();
+ }
+ try_block_iterator try_blocks_end() const {
+ return TryDispatchBlocks.end();
+ }
+
+ void addTryDispatchBlock(const CFGBlock *block) {
+ TryDispatchBlocks.push_back(block);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Member templates useful for various batch operations over CFGs.
+ //===--------------------------------------------------------------------===//
+
+ template <typename CALLBACK>
+ void VisitBlockStmts(CALLBACK& O) const {
+ for (const_iterator I=begin(), E=end(); I != E; ++I)
+ for (CFGBlock::const_iterator BI=(*I)->begin(), BE=(*I)->end();
+ BI != BE; ++BI) {
+ if (const CFGStmt *stmt = BI->getAs<CFGStmt>())
+ O(const_cast<Stmt*>(stmt->getStmt()));
+ }
+ }
+
+ //===--------------------------------------------------------------------===//
+ // CFG Introspection.
+ //===--------------------------------------------------------------------===//
+
+ struct BlkExprNumTy {
+ const signed Idx;
+ explicit BlkExprNumTy(signed idx) : Idx(idx) {}
+ explicit BlkExprNumTy() : Idx(-1) {}
+ operator bool() const { return Idx >= 0; }
+ operator unsigned() const { assert(Idx >=0); return (unsigned) Idx; }
+ };
+
+ bool isBlkExpr(const Stmt *S) { return getBlkExprNum(S); }
+ bool isBlkExpr(const Stmt *S) const {
+ return const_cast<CFG*>(this)->isBlkExpr(S);
+ }
+ BlkExprNumTy getBlkExprNum(const Stmt *S);
+ unsigned getNumBlkExprs();
+
+ /// getNumBlockIDs - Returns the total number of BlockIDs allocated (which
+ /// start at 0).
+ unsigned getNumBlockIDs() const { return NumBlockIDs; }
+
+ /// size - Return the total number of CFGBlocks within the CFG
+ /// This is simply a renaming of the getNumBlockIDs(). This is necessary
+ /// because the dominator implementation needs such an interface.
+ unsigned size() const { return NumBlockIDs; }
+
+ //===--------------------------------------------------------------------===//
+ // CFG Debugging: Pretty-Printing and Visualization.
+ //===--------------------------------------------------------------------===//
+
+ void viewCFG(const LangOptions &LO) const;
+ void print(raw_ostream &OS, const LangOptions &LO, bool ShowColors) const;
+ void dump(const LangOptions &LO, bool ShowColors) const;
+
+ //===--------------------------------------------------------------------===//
+ // Internal: constructors and data.
+ //===--------------------------------------------------------------------===//
+
+ CFG() : Entry(NULL), Exit(NULL), IndirectGotoBlock(NULL), NumBlockIDs(0),
+ BlkExprMap(NULL), Blocks(BlkBVC, 10) {}
+
+ ~CFG();
+
+ llvm::BumpPtrAllocator& getAllocator() {
+ return BlkBVC.getAllocator();
+ }
+
+ BumpVectorContext &getBumpVectorContext() {
+ return BlkBVC;
+ }
+
+private:
+ CFGBlock *Entry;
+ CFGBlock *Exit;
+ CFGBlock* IndirectGotoBlock; // Special block to contain collective dispatch
+ // for indirect gotos
+ unsigned NumBlockIDs;
+
+ // BlkExprMap - An opaque pointer to prevent inclusion of DenseMap.h.
+ // It represents a map from Expr* to integers to record the set of
+ // block-level expressions and their "statement number" in the CFG.
+ void * BlkExprMap;
+
+ BumpVectorContext BlkBVC;
+
+ CFGBlockListTy Blocks;
+
+ /// C++ 'try' statements are modeled with an indirect dispatch block.
+ /// This is the collection of such blocks present in the CFG.
+ std::vector<const CFGBlock *> TryDispatchBlocks;
+
+};
+} // end namespace clang
+
+//===----------------------------------------------------------------------===//
+// GraphTraits specializations for CFG basic block graphs (source-level CFGs)
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+
+/// Implement simplify_type for CFGTerminator, so that we can dyn_cast from
+/// CFGTerminator to a specific Stmt class.
+template <> struct simplify_type<const ::clang::CFGTerminator> {
+ typedef const ::clang::Stmt *SimpleType;
+ static SimpleType getSimplifiedValue(const ::clang::CFGTerminator &Val) {
+ return Val.getStmt();
+ }
+};
+
+template <> struct simplify_type< ::clang::CFGTerminator> {
+ typedef ::clang::Stmt *SimpleType;
+ static SimpleType getSimplifiedValue(const ::clang::CFGTerminator &Val) {
+ return const_cast<SimpleType>(Val.getStmt());
+ }
+};
+
+// Traits for: CFGBlock
+
+template <> struct GraphTraits< ::clang::CFGBlock *> {
+ typedef ::clang::CFGBlock NodeType;
+ typedef ::clang::CFGBlock::succ_iterator ChildIteratorType;
+
+ static NodeType* getEntryNode(::clang::CFGBlock *BB)
+ { return BB; }
+
+ static inline ChildIteratorType child_begin(NodeType* N)
+ { return N->succ_begin(); }
+
+ static inline ChildIteratorType child_end(NodeType* N)
+ { return N->succ_end(); }
+};
+
+template <> struct GraphTraits< const ::clang::CFGBlock *> {
+ typedef const ::clang::CFGBlock NodeType;
+ typedef ::clang::CFGBlock::const_succ_iterator ChildIteratorType;
+
+ static NodeType* getEntryNode(const clang::CFGBlock *BB)
+ { return BB; }
+
+ static inline ChildIteratorType child_begin(NodeType* N)
+ { return N->succ_begin(); }
+
+ static inline ChildIteratorType child_end(NodeType* N)
+ { return N->succ_end(); }
+};
+
+template <> struct GraphTraits<Inverse< ::clang::CFGBlock*> > {
+ typedef ::clang::CFGBlock NodeType;
+ typedef ::clang::CFGBlock::const_pred_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(Inverse< ::clang::CFGBlock*> G)
+ { return G.Graph; }
+
+ static inline ChildIteratorType child_begin(NodeType* N)
+ { return N->pred_begin(); }
+
+ static inline ChildIteratorType child_end(NodeType* N)
+ { return N->pred_end(); }
+};
+
+template <> struct GraphTraits<Inverse<const ::clang::CFGBlock*> > {
+ typedef const ::clang::CFGBlock NodeType;
+ typedef ::clang::CFGBlock::const_pred_iterator ChildIteratorType;
+
+ static NodeType *getEntryNode(Inverse<const ::clang::CFGBlock*> G)
+ { return G.Graph; }
+
+ static inline ChildIteratorType child_begin(NodeType* N)
+ { return N->pred_begin(); }
+
+ static inline ChildIteratorType child_end(NodeType* N)
+ { return N->pred_end(); }
+};
+
+// Traits for: CFG
+
+template <> struct GraphTraits< ::clang::CFG* >
+ : public GraphTraits< ::clang::CFGBlock *> {
+
+ typedef ::clang::CFG::graph_iterator nodes_iterator;
+
+ static NodeType *getEntryNode(::clang::CFG* F) { return &F->getEntry(); }
+ static nodes_iterator nodes_begin(::clang::CFG* F) { return F->nodes_begin();}
+ static nodes_iterator nodes_end(::clang::CFG* F) { return F->nodes_end(); }
+ static unsigned size(::clang::CFG* F) { return F->size(); }
+};
+
+template <> struct GraphTraits<const ::clang::CFG* >
+ : public GraphTraits<const ::clang::CFGBlock *> {
+
+ typedef ::clang::CFG::const_graph_iterator nodes_iterator;
+
+ static NodeType *getEntryNode( const ::clang::CFG* F) {
+ return &F->getEntry();
+ }
+ static nodes_iterator nodes_begin( const ::clang::CFG* F) {
+ return F->nodes_begin();
+ }
+ static nodes_iterator nodes_end( const ::clang::CFG* F) {
+ return F->nodes_end();
+ }
+ static unsigned size(const ::clang::CFG* F) {
+ return F->size();
+ }
+};
+
+template <> struct GraphTraits<Inverse< ::clang::CFG*> >
+ : public GraphTraits<Inverse< ::clang::CFGBlock*> > {
+
+ typedef ::clang::CFG::graph_iterator nodes_iterator;
+
+ static NodeType *getEntryNode( ::clang::CFG* F) { return &F->getExit(); }
+ static nodes_iterator nodes_begin( ::clang::CFG* F) {return F->nodes_begin();}
+ static nodes_iterator nodes_end( ::clang::CFG* F) { return F->nodes_end(); }
+};
+
+template <> struct GraphTraits<Inverse<const ::clang::CFG*> >
+ : public GraphTraits<Inverse<const ::clang::CFGBlock*> > {
+
+ typedef ::clang::CFG::const_graph_iterator nodes_iterator;
+
+ static NodeType *getEntryNode(const ::clang::CFG* F) { return &F->getExit(); }
+ static nodes_iterator nodes_begin(const ::clang::CFG* F) {
+ return F->nodes_begin();
+ }
+ static nodes_iterator nodes_end(const ::clang::CFG* F) {
+ return F->nodes_end();
+ }
+};
+} // end llvm namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CFGStmtMap.h b/contrib/llvm/tools/clang/include/clang/Analysis/CFGStmtMap.h
new file mode 100644
index 0000000..6e8e140
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CFGStmtMap.h
@@ -0,0 +1,52 @@
+//===--- CFGStmtMap.h - Map from Stmt* to CFGBlock* -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFGStmtMap class, which defines a mapping from
+// Stmt* to CFGBlock*
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CFGSTMTMAP_H
+#define LLVM_CLANG_CFGSTMTMAP_H
+
+#include "clang/Analysis/CFG.h"
+
+namespace clang {
+
+class CFG;
+class CFGBlock;
+class ParentMap;
+class Stmt;
+
+class CFGStmtMap {
+ ParentMap *PM;
+ void *M;
+
+ CFGStmtMap(ParentMap *pm, void *m) : PM(pm), M(m) {}
+
+public:
+ ~CFGStmtMap();
+
+ /// Returns a new CFGMap for the given CFG. It is the caller's
+ /// responsibility to 'delete' this object when done using it.
+ static CFGStmtMap *Build(CFG* C, ParentMap *PM);
+
+ /// Returns the CFGBlock the specified Stmt* appears in. For Stmt* that
+ /// are terminators, the CFGBlock is the block they appear as a terminator,
+ /// and not the block they appear as a block-level expression (e.g, '&&').
+ /// CaseStmts and LabelStmts map to the CFGBlock they label.
+ CFGBlock *getBlock(Stmt * S);
+
+ const CFGBlock *getBlock(const Stmt * S) const {
+ return const_cast<CFGStmtMap*>(this)->getBlock(const_cast<Stmt*>(S));
+ }
+};
+
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h b/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h
new file mode 100644
index 0000000..9b68073
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/CallGraph.h
@@ -0,0 +1,257 @@
+//== CallGraph.h - AST-based Call graph ------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the AST-based CallGraph.
+//
+// A call graph for functions whose definitions/bodies are available in the
+// current translation unit. The graph has a "virtual" root node that contains
+// edges to all externally available functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_CALLGRAPH
+#define LLVM_CLANG_ANALYSIS_CALLGRAPH
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/SetVector.h"
+
+namespace clang {
+class CallGraphNode;
+
+/// \class The AST-based call graph.
+///
+/// The call graph extends itself with the given declarations by implementing
+/// the recursive AST visitor, which constructs the graph by visiting the given
+/// declarations.
+class CallGraph : public RecursiveASTVisitor<CallGraph> {
+ friend class CallGraphNode;
+
+ typedef llvm::DenseMap<const Decl *, CallGraphNode *> FunctionMapTy;
+
+ /// FunctionMap owns all CallGraphNodes.
+ FunctionMapTy FunctionMap;
+
+ /// This is a virtual root node that has edges to all the global functions -
+ /// 'main' or functions accessible from other translation units.
+ CallGraphNode *Root;
+
+ /// The list of nodes that have no parent. These are unreachable from Root.
+ /// Declarations can get to this list due to impressions in the graph, for
+ /// example, we do not track functions whose addresses were taken.
+ llvm::SetVector<CallGraphNode *> ParentlessNodes;
+
+public:
+ CallGraph();
+ ~CallGraph();
+
+ /// \brief Populate the call graph with the functions in the given
+ /// declaration.
+ ///
+ /// Recursively walks the declaration to find all the dependent Decls as well.
+ void addToCallGraph(Decl *D) {
+ TraverseDecl(D);
+ }
+
+ /// \brief Determine if a declaration should be included in the graph.
+ static bool includeInGraph(const Decl *D);
+
+ /// \brief Lookup the node for the given declaration.
+ CallGraphNode *getNode(const Decl *) const;
+
+ /// \brief Lookup the node for the given declaration. If none found, insert
+ /// one into the graph.
+ CallGraphNode *getOrInsertNode(Decl *);
+
+ /// Iterators through all the elements in the graph. Note, this gives
+ /// non-deterministic order.
+ typedef FunctionMapTy::iterator iterator;
+ typedef FunctionMapTy::const_iterator const_iterator;
+ iterator begin() { return FunctionMap.begin(); }
+ iterator end() { return FunctionMap.end(); }
+ const_iterator begin() const { return FunctionMap.begin(); }
+ const_iterator end() const { return FunctionMap.end(); }
+
+ /// \brief Get the number of nodes in the graph.
+ unsigned size() const { return FunctionMap.size(); }
+
+ /// \ brief Get the virtual root of the graph, all the functions available
+ /// externally are represented as callees of the node.
+ CallGraphNode *getRoot() const { return Root; }
+
+ /// Iterators through all the nodes of the graph that have no parent. These
+ /// are the unreachable nodes, which are either unused or are due to us
+ /// failing to add a call edge due to the analysis imprecision.
+ typedef llvm::SetVector<CallGraphNode *>::iterator nodes_iterator;
+ typedef llvm::SetVector<CallGraphNode *>::const_iterator const_nodes_iterator;
+ nodes_iterator parentless_begin() { return ParentlessNodes.begin(); }
+ nodes_iterator parentless_end() { return ParentlessNodes.end(); }
+ const_nodes_iterator
+ parentless_begin() const { return ParentlessNodes.begin(); }
+ const_nodes_iterator
+ parentless_end() const { return ParentlessNodes.end(); }
+
+ void print(raw_ostream &os) const;
+ void dump() const;
+ void viewGraph() const;
+
+ /// Part of recursive declaration visitation.
+ bool VisitFunctionDecl(FunctionDecl *FD) {
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (includeInGraph(FD))
+ // If this function has external linkage, anything could call it.
+ // Note, we are not precise here. For example, the function could have
+ // its address taken.
+ addNodeForDecl(FD, FD->isGlobal());
+ return true;
+ }
+
+ /// Part of recursive declaration visitation.
+ bool VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+ if (includeInGraph(MD))
+ addNodeForDecl(MD, true);
+ return true;
+ }
+
+private:
+ /// \brief Add the given declaration to the call graph.
+ void addNodeForDecl(Decl *D, bool IsGlobal);
+
+ /// \brief Allocate a new node in the graph.
+ CallGraphNode *allocateNewNode(Decl *);
+};
+
+class CallGraphNode {
+public:
+ typedef CallGraphNode* CallRecord;
+
+private:
+ /// \brief The function/method declaration.
+ Decl *FD;
+
+ /// \brief The list of functions called from this node.
+ // Small vector might be more efficient since we are only tracking functions
+ // whose definition is in the current TU.
+ llvm::SmallVector<CallRecord, 5> CalledFunctions;
+
+public:
+ CallGraphNode(Decl *D) : FD(D) {}
+
+ typedef llvm::SmallVector<CallRecord, 5>::iterator iterator;
+ typedef llvm::SmallVector<CallRecord, 5>::const_iterator const_iterator;
+
+ /// Iterators through all the callees/children of the node.
+ inline iterator begin() { return CalledFunctions.begin(); }
+ inline iterator end() { return CalledFunctions.end(); }
+ inline const_iterator begin() const { return CalledFunctions.begin(); }
+ inline const_iterator end() const { return CalledFunctions.end(); }
+
+ inline bool empty() const {return CalledFunctions.empty(); }
+ inline unsigned size() const {return CalledFunctions.size(); }
+
+ void addCallee(CallGraphNode *N, CallGraph *CG) {
+ CalledFunctions.push_back(N);
+ CG->ParentlessNodes.remove(N);
+ }
+
+ Decl *getDecl() const { return FD; }
+
+ StringRef getName() const;
+
+ void print(raw_ostream &os) const;
+ void dump() const;
+};
+
+} // end clang namespace
+
+// Graph traits for iteration, viewing.
+namespace llvm {
+template <> struct GraphTraits<clang::CallGraphNode*> {
+ typedef clang::CallGraphNode NodeType;
+ typedef clang::CallGraphNode::CallRecord CallRecordTy;
+ typedef std::pointer_to_unary_function<CallRecordTy,
+ clang::CallGraphNode*> CGNDerefFun;
+ static NodeType *getEntryNode(clang::CallGraphNode *CGN) { return CGN; }
+ typedef mapped_iterator<NodeType::iterator, CGNDerefFun> ChildIteratorType;
+ static inline ChildIteratorType child_begin(NodeType *N) {
+ return map_iterator(N->begin(), CGNDerefFun(CGNDeref));
+ }
+ static inline ChildIteratorType child_end (NodeType *N) {
+ return map_iterator(N->end(), CGNDerefFun(CGNDeref));
+ }
+ static clang::CallGraphNode *CGNDeref(CallRecordTy P) {
+ return P;
+ }
+};
+
+template <> struct GraphTraits<const clang::CallGraphNode*> {
+ typedef const clang::CallGraphNode NodeType;
+ typedef NodeType::const_iterator ChildIteratorType;
+ static NodeType *getEntryNode(const clang::CallGraphNode *CGN) { return CGN; }
+ static inline ChildIteratorType child_begin(NodeType *N) { return N->begin();}
+ static inline ChildIteratorType child_end (NodeType *N) { return N->end(); }
+};
+
+template <> struct GraphTraits<clang::CallGraph*>
+ : public GraphTraits<clang::CallGraphNode*> {
+
+ static NodeType *getEntryNode(clang::CallGraph *CGN) {
+ return CGN->getRoot(); // Start at the external node!
+ }
+ typedef std::pair<const clang::Decl*, clang::CallGraphNode*> PairTy;
+ typedef std::pointer_to_unary_function<PairTy, clang::CallGraphNode&> DerefFun;
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef mapped_iterator<clang::CallGraph::iterator, DerefFun> nodes_iterator;
+
+ static nodes_iterator nodes_begin(clang::CallGraph *CG) {
+ return map_iterator(CG->begin(), DerefFun(CGdereference));
+ }
+ static nodes_iterator nodes_end (clang::CallGraph *CG) {
+ return map_iterator(CG->end(), DerefFun(CGdereference));
+ }
+ static clang::CallGraphNode &CGdereference(PairTy P) {
+ return *(P.second);
+ }
+
+ static unsigned size(clang::CallGraph *CG) {
+ return CG->size();
+ }
+};
+
+template <> struct GraphTraits<const clang::CallGraph*> :
+ public GraphTraits<const clang::CallGraphNode*> {
+ static NodeType *getEntryNode(const clang::CallGraph *CGN) {
+ return CGN->getRoot();
+ }
+ typedef std::pair<const clang::Decl*, clang::CallGraphNode*> PairTy;
+ typedef std::pointer_to_unary_function<PairTy, clang::CallGraphNode&> DerefFun;
+ // nodes_iterator/begin/end - Allow iteration over all nodes in the graph
+ typedef mapped_iterator<clang::CallGraph::const_iterator,
+ DerefFun> nodes_iterator;
+
+ static nodes_iterator nodes_begin(const clang::CallGraph *CG) {
+ return map_iterator(CG->begin(), DerefFun(CGdereference));
+ }
+ static nodes_iterator nodes_end(const clang::CallGraph *CG) {
+ return map_iterator(CG->end(), DerefFun(CGdereference));
+ }
+ static clang::CallGraphNode &CGdereference(PairTy P) {
+ return *(P.second);
+ }
+
+ static unsigned size(const clang::CallGraph *CG) {
+ return CG->size();
+ }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h b/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h
new file mode 100644
index 0000000..e6a2f13
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/DomainSpecific/CocoaConventions.h
@@ -0,0 +1,42 @@
+//===- CocoaConventions.h - Special handling of Cocoa conventions -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements cocoa naming convention analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_DS_COCOA
+#define LLVM_CLANG_ANALYSIS_DS_COCOA
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+class FunctionDecl;
+class QualType;
+
+namespace ento {
+namespace cocoa {
+
+ bool isRefType(QualType RetTy, StringRef Prefix,
+ StringRef Name = StringRef());
+
+ bool isCocoaObjectRef(QualType T);
+
+}
+
+namespace coreFoundation {
+ bool isCFObjectRef(QualType T);
+
+ bool followsCreateRule(const FunctionDecl *FD);
+}
+
+}} // end: "clang:ento"
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h b/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h
new file mode 100644
index 0000000..017da63
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowSolver.h
@@ -0,0 +1,343 @@
+//===--- DataflowSolver.h - Skeleton Dataflow Analysis Code -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines skeleton code for implementing dataflow analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSES_DATAFLOW_SOLVER
+#define LLVM_CLANG_ANALYSES_DATAFLOW_SOLVER
+
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Analysis/FlowSensitive/DataflowValues.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "functional" // STL
+
+namespace clang {
+
+//===----------------------------------------------------------------------===//
+/// DataflowWorkListTy - Data structure representing the worklist used for
+/// dataflow algorithms.
+//===----------------------------------------------------------------------===//
+
+class DataflowWorkListTy {
+ llvm::DenseMap<const CFGBlock*, unsigned char> BlockSet;
+ SmallVector<const CFGBlock *, 10> BlockQueue;
+public:
+ /// enqueue - Add a block to the worklist. Blocks already on the
+ /// worklist are not added a second time.
+ void enqueue(const CFGBlock *B) {
+ unsigned char &x = BlockSet[B];
+ if (x == 1)
+ return;
+ x = 1;
+ BlockQueue.push_back(B);
+ }
+
+ /// dequeue - Remove a block from the worklist.
+ const CFGBlock *dequeue() {
+ assert(!BlockQueue.empty());
+ const CFGBlock *B = BlockQueue.back();
+ BlockQueue.pop_back();
+ BlockSet[B] = 0;
+ return B;
+ }
+
+ /// isEmpty - Return true if the worklist is empty.
+ bool isEmpty() const { return BlockQueue.empty(); }
+};
+
+//===----------------------------------------------------------------------===//
+// BlockItrTraits - Traits classes that allow transparent iteration
+// over successors/predecessors of a block depending on the direction
+// of our dataflow analysis.
+//===----------------------------------------------------------------------===//
+
+namespace dataflow {
+template<typename Tag> struct ItrTraits {};
+
+template <> struct ItrTraits<forward_analysis_tag> {
+ typedef CFGBlock::const_pred_iterator PrevBItr;
+ typedef CFGBlock::const_succ_iterator NextBItr;
+ typedef CFGBlock::const_iterator StmtItr;
+
+ static PrevBItr PrevBegin(const CFGBlock *B) { return B->pred_begin(); }
+ static PrevBItr PrevEnd(const CFGBlock *B) { return B->pred_end(); }
+
+ static NextBItr NextBegin(const CFGBlock *B) { return B->succ_begin(); }
+ static NextBItr NextEnd(const CFGBlock *B) { return B->succ_end(); }
+
+ static StmtItr StmtBegin(const CFGBlock *B) { return B->begin(); }
+ static StmtItr StmtEnd(const CFGBlock *B) { return B->end(); }
+
+ static BlockEdge PrevEdge(const CFGBlock *B, const CFGBlock *Prev) {
+ return BlockEdge(Prev, B, 0);
+ }
+
+ static BlockEdge NextEdge(const CFGBlock *B, const CFGBlock *Next) {
+ return BlockEdge(B, Next, 0);
+ }
+};
+
+template <> struct ItrTraits<backward_analysis_tag> {
+ typedef CFGBlock::const_succ_iterator PrevBItr;
+ typedef CFGBlock::const_pred_iterator NextBItr;
+ typedef CFGBlock::const_reverse_iterator StmtItr;
+
+ static PrevBItr PrevBegin(const CFGBlock *B) { return B->succ_begin(); }
+ static PrevBItr PrevEnd(const CFGBlock *B) { return B->succ_end(); }
+
+ static NextBItr NextBegin(const CFGBlock *B) { return B->pred_begin(); }
+ static NextBItr NextEnd(const CFGBlock *B) { return B->pred_end(); }
+
+ static StmtItr StmtBegin(const CFGBlock *B) { return B->rbegin(); }
+ static StmtItr StmtEnd(const CFGBlock *B) { return B->rend(); }
+
+ static BlockEdge PrevEdge(const CFGBlock *B, const CFGBlock *Prev) {
+ return BlockEdge(B, Prev, 0);
+ }
+
+ static BlockEdge NextEdge(const CFGBlock *B, const CFGBlock *Next) {
+ return BlockEdge(Next, B, 0);
+ }
+};
+} // end namespace dataflow
+
+//===----------------------------------------------------------------------===//
+/// DataflowSolverTy - Generic dataflow solver.
+//===----------------------------------------------------------------------===//
+
+template <typename _DFValuesTy, // Usually a subclass of DataflowValues
+ typename _TransferFuncsTy,
+ typename _MergeOperatorTy,
+ typename _Equal = std::equal_to<typename _DFValuesTy::ValTy> >
+class DataflowSolver {
+
+ //===----------------------------------------------------===//
+ // Type declarations.
+ //===----------------------------------------------------===//
+
+public:
+ typedef _DFValuesTy DFValuesTy;
+ typedef _TransferFuncsTy TransferFuncsTy;
+ typedef _MergeOperatorTy MergeOperatorTy;
+
+ typedef typename _DFValuesTy::AnalysisDirTag AnalysisDirTag;
+ typedef typename _DFValuesTy::ValTy ValTy;
+ typedef typename _DFValuesTy::EdgeDataMapTy EdgeDataMapTy;
+ typedef typename _DFValuesTy::BlockDataMapTy BlockDataMapTy;
+
+ typedef dataflow::ItrTraits<AnalysisDirTag> ItrTraits;
+ typedef typename ItrTraits::NextBItr NextBItr;
+ typedef typename ItrTraits::PrevBItr PrevBItr;
+ typedef typename ItrTraits::StmtItr StmtItr;
+
+ //===----------------------------------------------------===//
+ // External interface: constructing and running the solver.
+ //===----------------------------------------------------===//
+
+public:
+ DataflowSolver(DFValuesTy& d) : D(d), TF(d.getAnalysisData()) {}
+ ~DataflowSolver() {}
+
+ /// runOnCFG - Computes dataflow values for all blocks in a CFG.
+ void runOnCFG(CFG& cfg, bool recordStmtValues = false) {
+ // Set initial dataflow values and boundary conditions.
+ D.InitializeValues(cfg);
+ // Solve the dataflow equations. This will populate D.EdgeDataMap
+ // with dataflow values.
+ SolveDataflowEquations(cfg, recordStmtValues);
+ }
+
+ /// runOnBlock - Computes dataflow values for a given block. This
+ /// should usually be invoked only after previously computing
+ /// dataflow values using runOnCFG, as runOnBlock is intended to
+ /// only be used for querying the dataflow values within a block
+ /// with and Observer object.
+ void runOnBlock(const CFGBlock *B, bool recordStmtValues) {
+ BlockDataMapTy& M = D.getBlockDataMap();
+ typename BlockDataMapTy::iterator I = M.find(B);
+
+ if (I != M.end()) {
+ TF.getVal().copyValues(I->second);
+ ProcessBlock(B, recordStmtValues, AnalysisDirTag());
+ }
+ }
+
+ void runOnBlock(const CFGBlock &B, bool recordStmtValues) {
+ runOnBlock(&B, recordStmtValues);
+ }
+ void runOnBlock(CFG::iterator &I, bool recordStmtValues) {
+ runOnBlock(*I, recordStmtValues);
+ }
+ void runOnBlock(CFG::const_iterator &I, bool recordStmtValues) {
+ runOnBlock(*I, recordStmtValues);
+ }
+
+ void runOnAllBlocks(const CFG& cfg, bool recordStmtValues = false) {
+ for (CFG::const_iterator I=cfg.begin(), E=cfg.end(); I!=E; ++I)
+ runOnBlock(I, recordStmtValues);
+ }
+
+ //===----------------------------------------------------===//
+ // Internal solver logic.
+ //===----------------------------------------------------===//
+
+private:
+
+ /// SolveDataflowEquations - Perform the actual worklist algorithm
+ /// to compute dataflow values.
+ void SolveDataflowEquations(CFG& cfg, bool recordStmtValues) {
+ EnqueueBlocksOnWorklist(cfg, AnalysisDirTag());
+
+ while (!WorkList.isEmpty()) {
+ const CFGBlock *B = WorkList.dequeue();
+ ProcessMerge(cfg, B);
+ ProcessBlock(B, recordStmtValues, AnalysisDirTag());
+ UpdateEdges(cfg, B, TF.getVal());
+ }
+ }
+
+ void EnqueueBlocksOnWorklist(CFG &cfg, dataflow::forward_analysis_tag) {
+ // Enqueue all blocks to ensure the dataflow values are computed
+ // for every block. Not all blocks are guaranteed to reach the exit block.
+ for (CFG::iterator I=cfg.begin(), E=cfg.end(); I!=E; ++I)
+ WorkList.enqueue(&**I);
+ }
+
+ void EnqueueBlocksOnWorklist(CFG &cfg, dataflow::backward_analysis_tag) {
+ // Enqueue all blocks to ensure the dataflow values are computed
+ // for every block. Not all blocks are guaranteed to reach the exit block.
+ // Enqueue in reverse order since that will more likely match with
+ // the order they should ideally processed by the dataflow algorithm.
+ for (CFG::reverse_iterator I=cfg.rbegin(), E=cfg.rend(); I!=E; ++I)
+ WorkList.enqueue(&**I);
+ }
+
+ void ProcessMerge(CFG& cfg, const CFGBlock *B) {
+ ValTy& V = TF.getVal();
+ TF.SetTopValue(V);
+
+ // Merge dataflow values from all predecessors of this block.
+ MergeOperatorTy Merge;
+
+ EdgeDataMapTy& M = D.getEdgeDataMap();
+ bool firstMerge = true;
+ bool noEdges = true;
+ for (PrevBItr I=ItrTraits::PrevBegin(B),E=ItrTraits::PrevEnd(B); I!=E; ++I){
+
+ CFGBlock *PrevBlk = *I;
+
+ if (!PrevBlk)
+ continue;
+
+ typename EdgeDataMapTy::iterator EI =
+ M.find(ItrTraits::PrevEdge(B, PrevBlk));
+
+ if (EI != M.end()) {
+ noEdges = false;
+ if (firstMerge) {
+ firstMerge = false;
+ V.copyValues(EI->second);
+ }
+ else
+ Merge(V, EI->second);
+ }
+ }
+
+ bool isInitialized = true;
+ typename BlockDataMapTy::iterator BI = D.getBlockDataMap().find(B);
+ if(BI == D.getBlockDataMap().end()) {
+ isInitialized = false;
+ BI = D.getBlockDataMap().insert( std::make_pair(B,ValTy()) ).first;
+ }
+ // If no edges have been found, it means this is the first time the solver
+ // has been called on block B, we copy the initialization values (if any)
+ // as current value for V (which will be used as edge data)
+ if(noEdges && isInitialized)
+ Merge(V, BI->second);
+
+ // Set the data for the block.
+ BI->second.copyValues(V);
+ }
+
+ /// ProcessBlock - Process the transfer functions for a given block.
+ void ProcessBlock(const CFGBlock *B, bool recordStmtValues,
+ dataflow::forward_analysis_tag) {
+
+ TF.setCurrentBlock(B);
+
+ for (StmtItr I=ItrTraits::StmtBegin(B), E=ItrTraits::StmtEnd(B); I!=E;++I) {
+ CFGElement El = *I;
+ if (const CFGStmt *S = El.getAs<CFGStmt>())
+ ProcessStmt(S->getStmt(), recordStmtValues, AnalysisDirTag());
+ }
+
+ TF.VisitTerminator(const_cast<CFGBlock*>(B));
+ }
+
+ void ProcessBlock(const CFGBlock *B, bool recordStmtValues,
+ dataflow::backward_analysis_tag) {
+
+ TF.setCurrentBlock(B);
+
+ TF.VisitTerminator(const_cast<CFGBlock*>(B));
+
+ for (StmtItr I=ItrTraits::StmtBegin(B), E=ItrTraits::StmtEnd(B); I!=E;++I) {
+ CFGElement El = *I;
+ if (const CFGStmt *S = El.getAs<CFGStmt>())
+ ProcessStmt(S->getStmt(), recordStmtValues, AnalysisDirTag());
+ }
+ }
+
+ void ProcessStmt(const Stmt *S, bool record, dataflow::forward_analysis_tag) {
+ if (record) D.getStmtDataMap()[S] = TF.getVal();
+ TF.BlockStmt_Visit(const_cast<Stmt*>(S));
+ }
+
+ void ProcessStmt(const Stmt *S, bool record, dataflow::backward_analysis_tag){
+ TF.BlockStmt_Visit(const_cast<Stmt*>(S));
+ if (record) D.getStmtDataMap()[S] = TF.getVal();
+ }
+
+ /// UpdateEdges - After processing the transfer functions for a
+ /// block, update the dataflow value associated with the block's
+ /// outgoing/incoming edges (depending on whether we do a
+ // forward/backward analysis respectively)
+ void UpdateEdges(CFG& cfg, const CFGBlock *B, ValTy& V) {
+ for (NextBItr I=ItrTraits::NextBegin(B), E=ItrTraits::NextEnd(B); I!=E; ++I)
+ if (CFGBlock *NextBlk = *I)
+ UpdateEdgeValue(ItrTraits::NextEdge(B, NextBlk),V, NextBlk);
+ }
+
+ /// UpdateEdgeValue - Update the value associated with a given edge.
+ void UpdateEdgeValue(BlockEdge E, ValTy& V, const CFGBlock *TargetBlock) {
+ EdgeDataMapTy& M = D.getEdgeDataMap();
+ typename EdgeDataMapTy::iterator I = M.find(E);
+
+ if (I == M.end()) { // First computed value for this edge?
+ M[E].copyValues(V);
+ WorkList.enqueue(TargetBlock);
+ }
+ else if (!_Equal()(V,I->second)) {
+ I->second.copyValues(V);
+ WorkList.enqueue(TargetBlock);
+ }
+ }
+
+private:
+ DFValuesTy& D;
+ DataflowWorkListTy WorkList;
+ TransferFuncsTy TF;
+};
+
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h b/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
new file mode 100644
index 0000000..f86b2b0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/FlowSensitive/DataflowValues.h
@@ -0,0 +1,172 @@
+//===--- DataflowValues.h - Data structure for dataflow values --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a skeleton data structure for encapsulating the dataflow
+// values for a CFG. Typically this is subclassed to provide methods for
+// computing these values from a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSES_DATAFLOW_VALUES
+#define LLVM_CLANG_ANALYSES_DATAFLOW_VALUES
+
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "llvm/ADT/DenseMap.h"
+
+//===----------------------------------------------------------------------===//
+/// Dataflow Directional Tag Classes. These are used for tag dispatching
+/// within the dataflow solver/transfer functions to determine what direction
+/// a dataflow analysis flows.
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+namespace dataflow {
+ struct forward_analysis_tag {};
+ struct backward_analysis_tag {};
+} // end namespace dataflow
+
+//===----------------------------------------------------------------------===//
+/// DataflowValues. Container class to store dataflow values for a CFG.
+//===----------------------------------------------------------------------===//
+
+template <typename ValueTypes,
+ typename _AnalysisDirTag = dataflow::forward_analysis_tag >
+class DataflowValues {
+
+ //===--------------------------------------------------------------------===//
+ // Type declarations.
+ //===--------------------------------------------------------------------===//
+
+public:
+ typedef typename ValueTypes::ValTy ValTy;
+ typedef typename ValueTypes::AnalysisDataTy AnalysisDataTy;
+ typedef _AnalysisDirTag AnalysisDirTag;
+ typedef llvm::DenseMap<ProgramPoint, ValTy> EdgeDataMapTy;
+ typedef llvm::DenseMap<const CFGBlock*, ValTy> BlockDataMapTy;
+ typedef llvm::DenseMap<const Stmt*, ValTy> StmtDataMapTy;
+
+ //===--------------------------------------------------------------------===//
+ // Predicates.
+ //===--------------------------------------------------------------------===//
+
+public:
+ /// isForwardAnalysis - Returns true if the dataflow values are computed
+ /// from a forward analysis.
+ bool isForwardAnalysis() { return isForwardAnalysis(AnalysisDirTag()); }
+
+ /// isBackwardAnalysis - Returns true if the dataflow values are computed
+ /// from a backward analysis.
+ bool isBackwardAnalysis() { return !isForwardAnalysis(); }
+
+private:
+ bool isForwardAnalysis(dataflow::forward_analysis_tag) { return true; }
+ bool isForwardAnalysis(dataflow::backward_analysis_tag) { return false; }
+
+ //===--------------------------------------------------------------------===//
+ // Initialization and accessors methods.
+ //===--------------------------------------------------------------------===//
+
+public:
+ DataflowValues() : StmtDataMap(NULL) {}
+ ~DataflowValues() { delete StmtDataMap; }
+
+ /// InitializeValues - Invoked by the solver to initialize state needed for
+ /// dataflow analysis. This method is usually specialized by subclasses.
+ void InitializeValues(const CFG& cfg) {}
+
+
+ /// getEdgeData - Retrieves the dataflow values associated with a
+ /// CFG edge.
+ ValTy& getEdgeData(const BlockEdge &E) {
+ typename EdgeDataMapTy::iterator I = EdgeDataMap.find(E);
+ assert (I != EdgeDataMap.end() && "No data associated with Edge.");
+ return I->second;
+ }
+
+ const ValTy& getEdgeData(const BlockEdge &E) const {
+ return reinterpret_cast<DataflowValues*>(this)->getEdgeData(E);
+ }
+
+ /// getBlockData - Retrieves the dataflow values associated with a
+ /// specified CFGBlock. If the dataflow analysis is a forward analysis,
+ /// this data is associated with the END of the block. If the analysis
+ /// is a backwards analysis, it is associated with the ENTRY of the block.
+ ValTy& getBlockData(const CFGBlock *B) {
+ typename BlockDataMapTy::iterator I = BlockDataMap.find(B);
+ assert (I != BlockDataMap.end() && "No data associated with block.");
+ return I->second;
+ }
+
+ const ValTy& getBlockData(const CFGBlock *B) const {
+ return const_cast<DataflowValues*>(this)->getBlockData(B);
+ }
+
+ /// getStmtData - Retrieves the dataflow values associated with a
+ /// specified Stmt. If the dataflow analysis is a forward analysis,
+ /// this data corresponds to the point immediately before a Stmt.
+ /// If the analysis is a backwards analysis, it is associated with
+ /// the point after a Stmt. This data is only computed for block-level
+ /// expressions, and only when requested when the analysis is executed.
+ ValTy& getStmtData(const Stmt *S) {
+ assert (StmtDataMap && "Dataflow values were not computed for statements.");
+ typename StmtDataMapTy::iterator I = StmtDataMap->find(S);
+ assert (I != StmtDataMap->end() && "No data associated with statement.");
+ return I->second;
+ }
+
+ const ValTy& getStmtData(const Stmt *S) const {
+ return const_cast<DataflowValues*>(this)->getStmtData(S);
+ }
+
+ /// getEdgeDataMap - Retrieves the internal map between CFG edges and
+ /// dataflow values. Usually used by a dataflow solver to compute
+ /// values for blocks.
+ EdgeDataMapTy& getEdgeDataMap() { return EdgeDataMap; }
+ const EdgeDataMapTy& getEdgeDataMap() const { return EdgeDataMap; }
+
+ /// getBlockDataMap - Retrieves the internal map between CFGBlocks and
+ /// dataflow values. If the dataflow analysis operates in the forward
+ /// direction, the values correspond to the dataflow values at the start
+ /// of the block. Otherwise, for a backward analysis, the values correpsond
+ /// to the dataflow values at the end of the block.
+ BlockDataMapTy& getBlockDataMap() { return BlockDataMap; }
+ const BlockDataMapTy& getBlockDataMap() const { return BlockDataMap; }
+
+ /// getStmtDataMap - Retrieves the internal map between Stmts and
+ /// dataflow values.
+ StmtDataMapTy& getStmtDataMap() {
+ if (!StmtDataMap) StmtDataMap = new StmtDataMapTy();
+ return *StmtDataMap;
+ }
+
+ const StmtDataMapTy& getStmtDataMap() const {
+ return const_cast<DataflowValues*>(this)->getStmtDataMap();
+ }
+
+ /// getAnalysisData - Retrieves the meta data associated with a
+ /// dataflow analysis for analyzing a particular CFG.
+ /// This is typically consumed by transfer function code (via the solver).
+ /// This can also be used by subclasses to interpret the dataflow values.
+ AnalysisDataTy& getAnalysisData() { return AnalysisData; }
+ const AnalysisDataTy& getAnalysisData() const { return AnalysisData; }
+
+ //===--------------------------------------------------------------------===//
+ // Internal data.
+ //===--------------------------------------------------------------------===//
+
+protected:
+ EdgeDataMapTy EdgeDataMap;
+ BlockDataMapTy BlockDataMap;
+ StmtDataMapTy* StmtDataMap;
+ AnalysisDataTy AnalysisData;
+};
+
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
new file mode 100644
index 0000000..b2200c6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/ProgramPoint.h
@@ -0,0 +1,490 @@
+//==- ProgramPoint.h - Program Points for Path-Sensitive Analysis --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface ProgramPoint, which identifies a
+// distinct location in a function.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_PROGRAM_POINT
+#define LLVM_CLANG_ANALYSIS_PROGRAM_POINT
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <utility>
+#include <string>
+
+namespace clang {
+
+class AnalysisDeclContext;
+class FunctionDecl;
+class LocationContext;
+class ProgramPointTag;
+
+class ProgramPoint {
+public:
+ enum Kind { BlockEdgeKind,
+ BlockEntranceKind,
+ BlockExitKind,
+ PreStmtKind,
+ PostStmtKind,
+ PreLoadKind,
+ PostLoadKind,
+ PreStoreKind,
+ PostStoreKind,
+ PostPurgeDeadSymbolsKind,
+ PostConditionKind,
+ PostLValueKind,
+ PostInitializerKind,
+ CallEnterKind,
+ CallExitKind,
+ MinPostStmtKind = PostStmtKind,
+ MaxPostStmtKind = CallExitKind,
+ EpsilonKind};
+
+private:
+ llvm::PointerIntPair<const void *, 2, unsigned> Data1;
+ llvm::PointerIntPair<const void *, 2, unsigned> Data2;
+
+ // The LocationContext could be NULL to allow ProgramPoint to be used in
+ // context insensitive analysis.
+ llvm::PointerIntPair<const LocationContext *, 2, unsigned> L;
+
+ const ProgramPointTag *Tag;
+
+ ProgramPoint();
+
+protected:
+ ProgramPoint(const void *P,
+ Kind k,
+ const LocationContext *l,
+ const ProgramPointTag *tag = 0)
+ : Data1(P, ((unsigned) k) & 0x3),
+ Data2(0, (((unsigned) k) >> 2) & 0x3),
+ L(l, (((unsigned) k) >> 4) & 0x3),
+ Tag(tag) {
+ assert(getKind() == k);
+ assert(getLocationContext() == l);
+ assert(getData1() == P);
+ }
+
+ ProgramPoint(const void *P1,
+ const void *P2,
+ Kind k,
+ const LocationContext *l,
+ const ProgramPointTag *tag = 0)
+ : Data1(P1, ((unsigned) k) & 0x3),
+ Data2(P2, (((unsigned) k) >> 2) & 0x3),
+ L(l, (((unsigned) k) >> 4) & 0x3),
+ Tag(tag) {}
+
+protected:
+ const void *getData1() const { return Data1.getPointer(); }
+ const void *getData2() const { return Data2.getPointer(); }
+ void setData2(const void *d) { Data2.setPointer(d); }
+
+public:
+ /// Create a new ProgramPoint object that is the same as the original
+ /// except for using the specified tag value.
+ ProgramPoint withTag(const ProgramPointTag *tag) const {
+ return ProgramPoint(getData1(), getData2(), getKind(),
+ getLocationContext(), tag);
+ }
+
+ Kind getKind() const {
+ unsigned x = L.getInt();
+ x <<= 2;
+ x |= Data2.getInt();
+ x <<= 2;
+ x |= Data1.getInt();
+ return (Kind) x;
+ }
+
+ const ProgramPointTag *getTag() const { return Tag; }
+
+ const LocationContext *getLocationContext() const {
+ return L.getPointer();
+ }
+
+ // For use with DenseMap. This hash is probably slow.
+ unsigned getHashValue() const {
+ llvm::FoldingSetNodeID ID;
+ Profile(ID);
+ return ID.ComputeHash();
+ }
+
+ static bool classof(const ProgramPoint*) { return true; }
+
+ bool operator==(const ProgramPoint & RHS) const {
+ return Data1 == Data1 &&
+ Data2 == RHS.Data2 &&
+ L == RHS.L &&
+ Tag == RHS.Tag;
+ }
+
+ bool operator!=(const ProgramPoint &RHS) const {
+ return Data1 != RHS.Data1 ||
+ Data2 != RHS.Data2 ||
+ L != RHS.L ||
+ Tag != RHS.Tag;
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned) getKind());
+ ID.AddPointer(getData1());
+ ID.AddPointer(getData2());
+ ID.AddPointer(getLocationContext());
+ ID.AddPointer(Tag);
+ }
+
+ static ProgramPoint getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
+ const LocationContext *LC,
+ const ProgramPointTag *tag);
+};
+
+class BlockEntrance : public ProgramPoint {
+public:
+ BlockEntrance(const CFGBlock *B, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : ProgramPoint(B, BlockEntranceKind, L, tag) {
+ assert(B && "BlockEntrance requires non-null block");
+ }
+
+ const CFGBlock *getBlock() const {
+ return reinterpret_cast<const CFGBlock*>(getData1());
+ }
+
+ const CFGElement getFirstElement() const {
+ const CFGBlock *B = getBlock();
+ return B->empty() ? CFGElement() : B->front();
+ }
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == BlockEntranceKind;
+ }
+};
+
+class BlockExit : public ProgramPoint {
+public:
+ BlockExit(const CFGBlock *B, const LocationContext *L)
+ : ProgramPoint(B, BlockExitKind, L) {}
+
+ const CFGBlock *getBlock() const {
+ return reinterpret_cast<const CFGBlock*>(getData1());
+ }
+
+ const Stmt *getTerminator() const {
+ return getBlock()->getTerminator();
+ }
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == BlockExitKind;
+ }
+};
+
+class StmtPoint : public ProgramPoint {
+public:
+ StmtPoint(const Stmt *S, const void *p2, Kind k, const LocationContext *L,
+ const ProgramPointTag *tag)
+ : ProgramPoint(S, p2, k, L, tag) {}
+
+ const Stmt *getStmt() const { return (const Stmt*) getData1(); }
+
+ template <typename T>
+ const T* getStmtAs() const { return llvm::dyn_cast<T>(getStmt()); }
+
+ static bool classof(const ProgramPoint* Location) {
+ unsigned k = Location->getKind();
+ return k >= PreStmtKind && k <= MaxPostStmtKind;
+ }
+};
+
+
+class PreStmt : public StmtPoint {
+public:
+ PreStmt(const Stmt *S, const LocationContext *L, const ProgramPointTag *tag,
+ const Stmt *SubStmt = 0)
+ : StmtPoint(S, SubStmt, PreStmtKind, L, tag) {}
+
+ const Stmt *getSubStmt() const { return (const Stmt*) getData2(); }
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == PreStmtKind;
+ }
+};
+
+class PostStmt : public StmtPoint {
+protected:
+ PostStmt(const Stmt *S, const void *data, Kind k, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : StmtPoint(S, data, k, L, tag) {}
+
+public:
+ explicit PostStmt(const Stmt *S, Kind k,
+ const LocationContext *L, const ProgramPointTag *tag = 0)
+ : StmtPoint(S, NULL, k, L, tag) {}
+
+ explicit PostStmt(const Stmt *S, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : StmtPoint(S, NULL, PostStmtKind, L, tag) {}
+
+ static bool classof(const ProgramPoint* Location) {
+ unsigned k = Location->getKind();
+ return k >= MinPostStmtKind && k <= MaxPostStmtKind;
+ }
+};
+
+// PostCondition represents the post program point of a branch condition.
+class PostCondition : public PostStmt {
+public:
+ PostCondition(const Stmt *S, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : PostStmt(S, PostConditionKind, L, tag) {}
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == PostConditionKind;
+ }
+};
+
+class LocationCheck : public StmtPoint {
+protected:
+ LocationCheck(const Stmt *S, const LocationContext *L,
+ ProgramPoint::Kind K, const ProgramPointTag *tag)
+ : StmtPoint(S, NULL, K, L, tag) {}
+
+ static bool classof(const ProgramPoint *location) {
+ unsigned k = location->getKind();
+ return k == PreLoadKind || k == PreStoreKind;
+ }
+};
+
+class PreLoad : public LocationCheck {
+public:
+ PreLoad(const Stmt *S, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : LocationCheck(S, L, PreLoadKind, tag) {}
+
+ static bool classof(const ProgramPoint *location) {
+ return location->getKind() == PreLoadKind;
+ }
+};
+
+class PreStore : public LocationCheck {
+public:
+ PreStore(const Stmt *S, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : LocationCheck(S, L, PreStoreKind, tag) {}
+
+ static bool classof(const ProgramPoint *location) {
+ return location->getKind() == PreStoreKind;
+ }
+};
+
+class PostLoad : public PostStmt {
+public:
+ PostLoad(const Stmt *S, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : PostStmt(S, PostLoadKind, L, tag) {}
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == PostLoadKind;
+ }
+};
+
+/// \class Represents a program point after a store evaluation.
+class PostStore : public PostStmt {
+public:
+ /// Construct the post store point.
+ /// \param Loc can be used to store the information about the location
+ /// used in the form it was uttered in the code.
+ PostStore(const Stmt *S, const LocationContext *L, const void *Loc,
+ const ProgramPointTag *tag = 0)
+ : PostStmt(S, PostStoreKind, L, tag) {
+ assert(getData2() == 0);
+ setData2(Loc);
+ }
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == PostStoreKind;
+ }
+
+ /// \brief Returns the information about the location used in the store,
+ /// how it was uttered in the code.
+ const void *getLocationValue() const {
+ return getData2();
+ }
+
+};
+
+class PostLValue : public PostStmt {
+public:
+ PostLValue(const Stmt *S, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : PostStmt(S, PostLValueKind, L, tag) {}
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == PostLValueKind;
+ }
+};
+
+class PostPurgeDeadSymbols : public PostStmt {
+public:
+ PostPurgeDeadSymbols(const Stmt *S, const LocationContext *L,
+ const ProgramPointTag *tag = 0)
+ : PostStmt(S, PostPurgeDeadSymbolsKind, L, tag) {}
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == PostPurgeDeadSymbolsKind;
+ }
+};
+
+class BlockEdge : public ProgramPoint {
+public:
+ BlockEdge(const CFGBlock *B1, const CFGBlock *B2, const LocationContext *L)
+ : ProgramPoint(B1, B2, BlockEdgeKind, L) {
+ assert(B1 && "BlockEdge: source block must be non-null");
+ assert(B2 && "BlockEdge: destination block must be non-null");
+ }
+
+ const CFGBlock *getSrc() const {
+ return static_cast<const CFGBlock*>(getData1());
+ }
+
+ const CFGBlock *getDst() const {
+ return static_cast<const CFGBlock*>(getData2());
+ }
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == BlockEdgeKind;
+ }
+};
+
+class PostInitializer : public ProgramPoint {
+public:
+ PostInitializer(const CXXCtorInitializer *I,
+ const LocationContext *L)
+ : ProgramPoint(I, PostInitializerKind, L) {}
+
+ static bool classof(const ProgramPoint *Location) {
+ return Location->getKind() == PostInitializerKind;
+ }
+};
+
+class CallEnter : public StmtPoint {
+public:
+ CallEnter(const Stmt *stmt, const StackFrameContext *calleeCtx,
+ const LocationContext *callerCtx)
+ : StmtPoint(stmt, calleeCtx, CallEnterKind, callerCtx, 0) {}
+
+ const Stmt *getCallExpr() const {
+ return static_cast<const Stmt *>(getData1());
+ }
+
+ const StackFrameContext *getCalleeContext() const {
+ return static_cast<const StackFrameContext *>(getData2());
+ }
+
+ static bool classof(const ProgramPoint *Location) {
+ return Location->getKind() == CallEnterKind;
+ }
+};
+
+class CallExit : public StmtPoint {
+public:
+ // CallExit uses the callee's location context.
+ CallExit(const Stmt *S, const LocationContext *L)
+ : StmtPoint(S, 0, CallExitKind, L, 0) {}
+
+ static bool classof(const ProgramPoint *Location) {
+ return Location->getKind() == CallExitKind;
+ }
+};
+
+/// This is a meta program point, which should be skipped by all the diagnostic
+/// reasoning etc.
+class EpsilonPoint : public ProgramPoint {
+public:
+ EpsilonPoint(const LocationContext *L, const void *Data1,
+ const void *Data2 = 0, const ProgramPointTag *tag = 0)
+ : ProgramPoint(Data1, Data2, EpsilonKind, L, tag) {}
+
+ const void *getData() const { return getData1(); }
+
+ static bool classof(const ProgramPoint* Location) {
+ return Location->getKind() == EpsilonKind;
+ }
+};
+
+/// ProgramPoints can be "tagged" as representing points specific to a given
+/// analysis entity. Tags are abstract annotations, with an associated
+/// description and potentially other information.
+class ProgramPointTag {
+public:
+ ProgramPointTag(void *tagKind = 0) : TagKind(tagKind) {}
+ virtual ~ProgramPointTag();
+ virtual StringRef getTagDescription() const = 0;
+
+protected:
+ /// Used to implement 'classof' in subclasses.
+ const void *getTagKind() { return TagKind; }
+
+private:
+ const void *TagKind;
+};
+
+class SimpleProgramPointTag : public ProgramPointTag {
+ std::string desc;
+public:
+ SimpleProgramPointTag(StringRef description);
+ StringRef getTagDescription() const;
+};
+
+} // end namespace clang
+
+
+namespace llvm { // Traits specialization for DenseMap
+
+template <> struct DenseMapInfo<clang::ProgramPoint> {
+
+static inline clang::ProgramPoint getEmptyKey() {
+ uintptr_t x =
+ reinterpret_cast<uintptr_t>(DenseMapInfo<void*>::getEmptyKey()) & ~0x7;
+ return clang::BlockEntrance(reinterpret_cast<clang::CFGBlock*>(x), 0);
+}
+
+static inline clang::ProgramPoint getTombstoneKey() {
+ uintptr_t x =
+ reinterpret_cast<uintptr_t>(DenseMapInfo<void*>::getTombstoneKey()) & ~0x7;
+ return clang::BlockEntrance(reinterpret_cast<clang::CFGBlock*>(x), 0);
+}
+
+static unsigned getHashValue(const clang::ProgramPoint &Loc) {
+ return Loc.getHashValue();
+}
+
+static bool isEqual(const clang::ProgramPoint &L,
+ const clang::ProgramPoint &R) {
+ return L == R;
+}
+
+};
+
+template <>
+struct isPodLike<clang::ProgramPoint> { static const bool value = true; };
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Support/BlkExprDeclBitVector.h b/contrib/llvm/tools/clang/include/clang/Analysis/Support/BlkExprDeclBitVector.h
new file mode 100644
index 0000000..d25b848
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Support/BlkExprDeclBitVector.h
@@ -0,0 +1,307 @@
+// BlkExprDeclBitVector.h - Dataflow types for Bitvector Analysis --*- C++ --*--
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides definition of dataflow types used by analyses such
+// as LiveVariables and UninitializedValues. The underlying dataflow values
+// are implemented as bitvectors, but the definitions in this file include
+// the necessary boilerplate to use with our dataflow framework.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STMTDECLBVDVAL_H
+#define LLVM_CLANG_STMTDECLBVDVAL_H
+
+#include "clang/Analysis/CFG.h"
+#include "clang/AST/Decl.h" // for Decl* -> NamedDecl* conversion
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+
+ class Stmt;
+ class ASTContext;
+
+struct DeclBitVector_Types {
+
+ class Idx {
+ unsigned I;
+ public:
+ explicit Idx(unsigned i) : I(i) {}
+ Idx() : I(~0U) {}
+
+ bool isValid() const {
+ return I != ~0U;
+ }
+ operator unsigned() const {
+ assert (isValid());
+ return I;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ // AnalysisDataTy - Whole-function meta data.
+ //===--------------------------------------------------------------------===//
+
+ class AnalysisDataTy {
+ public:
+ typedef llvm::DenseMap<const NamedDecl*, unsigned > DMapTy;
+ typedef DMapTy::const_iterator decl_iterator;
+
+ protected:
+ DMapTy DMap;
+ unsigned NDecls;
+
+ public:
+
+ AnalysisDataTy() : NDecls(0) {}
+ virtual ~AnalysisDataTy() {}
+
+ bool isTracked(const NamedDecl *SD) { return DMap.find(SD) != DMap.end(); }
+
+ Idx getIdx(const NamedDecl *SD) const {
+ DMapTy::const_iterator I = DMap.find(SD);
+ return I == DMap.end() ? Idx() : Idx(I->second);
+ }
+
+ unsigned getNumDecls() const { return NDecls; }
+
+ void Register(const NamedDecl *SD) {
+ if (!isTracked(SD)) DMap[SD] = NDecls++;
+ }
+
+ decl_iterator begin_decl() const { return DMap.begin(); }
+ decl_iterator end_decl() const { return DMap.end(); }
+ };
+
+ //===--------------------------------------------------------------------===//
+ // ValTy - Dataflow value.
+ //===--------------------------------------------------------------------===//
+
+ class ValTy {
+ llvm::BitVector DeclBV;
+ public:
+
+ void resetDeclValues(AnalysisDataTy& AD) {
+ DeclBV.resize(AD.getNumDecls());
+ DeclBV.reset();
+ }
+
+ void setDeclValues(AnalysisDataTy& AD) {
+ DeclBV.resize(AD.getNumDecls());
+ DeclBV.set();
+ }
+
+ void resetValues(AnalysisDataTy& AD) {
+ resetDeclValues(AD);
+ }
+
+ bool operator==(const ValTy& RHS) const {
+ assert (sizesEqual(RHS));
+ return DeclBV == RHS.DeclBV;
+ }
+
+ void copyValues(const ValTy& RHS) { DeclBV = RHS.DeclBV; }
+
+ llvm::BitVector::reference getBit(unsigned i) {
+ return DeclBV[i];
+ }
+
+ bool getBit(unsigned i) const {
+ return DeclBV[i];
+ }
+
+ llvm::BitVector::reference
+ operator()(const NamedDecl *ND, const AnalysisDataTy& AD) {
+ return getBit(AD.getIdx(ND));
+ }
+
+ bool operator()(const NamedDecl *ND, const AnalysisDataTy& AD) const {
+ return getBit(AD.getIdx(ND));
+ }
+
+ llvm::BitVector::reference getDeclBit(unsigned i) { return DeclBV[i]; }
+ const llvm::BitVector::reference getDeclBit(unsigned i) const {
+ return const_cast<llvm::BitVector&>(DeclBV)[i];
+ }
+
+ ValTy& operator|=(const ValTy& RHS) {
+ assert (sizesEqual(RHS));
+ DeclBV |= RHS.DeclBV;
+ return *this;
+ }
+
+ ValTy& operator&=(const ValTy& RHS) {
+ assert (sizesEqual(RHS));
+ DeclBV &= RHS.DeclBV;
+ return *this;
+ }
+
+ ValTy& OrDeclBits(const ValTy& RHS) {
+ return operator|=(RHS);
+ }
+
+ ValTy& AndDeclBits(const ValTy& RHS) {
+ return operator&=(RHS);
+ }
+
+ bool sizesEqual(const ValTy& RHS) const {
+ return DeclBV.size() == RHS.DeclBV.size();
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ // Some useful merge operations.
+ //===--------------------------------------------------------------------===//
+
+ struct Union { void operator()(ValTy& Dst, ValTy& Src) { Dst |= Src; } };
+ struct Intersect { void operator()(ValTy& Dst, ValTy& Src) { Dst &= Src; } };
+};
+
+
+struct StmtDeclBitVector_Types {
+
+ //===--------------------------------------------------------------------===//
+ // AnalysisDataTy - Whole-function meta data.
+ //===--------------------------------------------------------------------===//
+
+ class AnalysisDataTy : public DeclBitVector_Types::AnalysisDataTy {
+ ASTContext *ctx;
+ CFG* cfg;
+ public:
+ AnalysisDataTy() : ctx(0), cfg(0) {}
+ virtual ~AnalysisDataTy() {}
+
+ void setContext(ASTContext &c) { ctx = &c; }
+ ASTContext &getContext() {
+ assert(ctx && "ASTContext should not be NULL.");
+ return *ctx;
+ }
+
+ void setCFG(CFG& c) { cfg = &c; }
+ CFG& getCFG() { assert(cfg && "CFG should not be NULL."); return *cfg; }
+
+ bool isTracked(const Stmt *S) { return cfg->isBlkExpr(S); }
+ using DeclBitVector_Types::AnalysisDataTy::isTracked;
+
+ unsigned getIdx(const Stmt *S) const {
+ CFG::BlkExprNumTy I = cfg->getBlkExprNum(S);
+ assert(I && "Stmtession not tracked for bitvector.");
+ return I;
+ }
+ using DeclBitVector_Types::AnalysisDataTy::getIdx;
+
+ unsigned getNumBlkExprs() const { return cfg->getNumBlkExprs(); }
+ };
+
+ //===--------------------------------------------------------------------===//
+ // ValTy - Dataflow value.
+ //===--------------------------------------------------------------------===//
+
+ class ValTy : public DeclBitVector_Types::ValTy {
+ llvm::BitVector BlkExprBV;
+ typedef DeclBitVector_Types::ValTy ParentTy;
+
+ static inline ParentTy& ParentRef(ValTy& X) {
+ return static_cast<ParentTy&>(X);
+ }
+
+ static inline const ParentTy& ParentRef(const ValTy& X) {
+ return static_cast<const ParentTy&>(X);
+ }
+
+ public:
+
+ void resetBlkExprValues(AnalysisDataTy& AD) {
+ BlkExprBV.resize(AD.getNumBlkExprs());
+ BlkExprBV.reset();
+ }
+
+ void setBlkExprValues(AnalysisDataTy& AD) {
+ BlkExprBV.resize(AD.getNumBlkExprs());
+ BlkExprBV.set();
+ }
+
+ void resetValues(AnalysisDataTy& AD) {
+ resetDeclValues(AD);
+ resetBlkExprValues(AD);
+ }
+
+ void setValues(AnalysisDataTy& AD) {
+ setDeclValues(AD);
+ setBlkExprValues(AD);
+ }
+
+ bool operator==(const ValTy& RHS) const {
+ return ParentRef(*this) == ParentRef(RHS)
+ && BlkExprBV == RHS.BlkExprBV;
+ }
+
+ void copyValues(const ValTy& RHS) {
+ ParentRef(*this).copyValues(ParentRef(RHS));
+ BlkExprBV = RHS.BlkExprBV;
+ }
+
+ llvm::BitVector::reference
+ operator()(const Stmt *S, const AnalysisDataTy& AD) {
+ return BlkExprBV[AD.getIdx(S)];
+ }
+ const llvm::BitVector::reference
+ operator()(const Stmt *S, const AnalysisDataTy& AD) const {
+ return const_cast<ValTy&>(*this)(S,AD);
+ }
+
+ using DeclBitVector_Types::ValTy::operator();
+
+
+ llvm::BitVector::reference getStmtBit(unsigned i) { return BlkExprBV[i]; }
+ const llvm::BitVector::reference getStmtBit(unsigned i) const {
+ return const_cast<llvm::BitVector&>(BlkExprBV)[i];
+ }
+
+ ValTy& OrBlkExprBits(const ValTy& RHS) {
+ BlkExprBV |= RHS.BlkExprBV;
+ return *this;
+ }
+
+ ValTy& AndBlkExprBits(const ValTy& RHS) {
+ BlkExprBV &= RHS.BlkExprBV;
+ return *this;
+ }
+
+ ValTy& operator|=(const ValTy& RHS) {
+ assert (sizesEqual(RHS));
+ ParentRef(*this) |= ParentRef(RHS);
+ BlkExprBV |= RHS.BlkExprBV;
+ return *this;
+ }
+
+ ValTy& operator&=(const ValTy& RHS) {
+ assert (sizesEqual(RHS));
+ ParentRef(*this) &= ParentRef(RHS);
+ BlkExprBV &= RHS.BlkExprBV;
+ return *this;
+ }
+
+ bool sizesEqual(const ValTy& RHS) const {
+ return ParentRef(*this).sizesEqual(ParentRef(RHS))
+ && BlkExprBV.size() == RHS.BlkExprBV.size();
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ // Some useful merge operations.
+ //===--------------------------------------------------------------------===//
+
+ struct Union { void operator()(ValTy& Dst, ValTy& Src) { Dst |= Src; } };
+ struct Intersect { void operator()(ValTy& Dst, ValTy& Src) { Dst &= Src; } };
+
+};
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h b/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h
new file mode 100644
index 0000000..83532e6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Support/BumpVector.h
@@ -0,0 +1,244 @@
+//===-- BumpVector.h - Vector-like ADT that uses bump allocation --*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides BumpVector, a vector-like ADT whose contents are
+// allocated from a BumpPtrAllocator.
+//
+//===----------------------------------------------------------------------===//
+
+// FIXME: Most of this is copy-and-paste from SmallVector.h. We can
+// refactor this core logic into something common that is shared between
+// the two. The main thing that is different is the allocation strategy.
+
+#ifndef LLVM_CLANG_BUMP_VECTOR
+#define LLVM_CLANG_BUMP_VECTOR
+
+#include "llvm/Support/type_traits.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include <algorithm>
+#include <cstring>
+#include <iterator>
+#include <memory>
+
+namespace clang {
+
+class BumpVectorContext {
+ llvm::PointerIntPair<llvm::BumpPtrAllocator*, 1> Alloc;
+public:
+ /// Construct a new BumpVectorContext that creates a new BumpPtrAllocator
+ /// and destroys it when the BumpVectorContext object is destroyed.
+ BumpVectorContext() : Alloc(new llvm::BumpPtrAllocator(), 1) {}
+
+ /// Construct a new BumpVectorContext that reuses an existing
+ /// BumpPtrAllocator. This BumpPtrAllocator is not destroyed when the
+ /// BumpVectorContext object is destroyed.
+ BumpVectorContext(llvm::BumpPtrAllocator &A) : Alloc(&A, 0) {}
+
+ ~BumpVectorContext() {
+ if (Alloc.getInt())
+ delete Alloc.getPointer();
+ }
+
+ llvm::BumpPtrAllocator &getAllocator() { return *Alloc.getPointer(); }
+};
+
+template<typename T>
+class BumpVector {
+ T *Begin, *End, *Capacity;
+public:
+ // Default ctor - Initialize to empty.
+ explicit BumpVector(BumpVectorContext &C, unsigned N)
+ : Begin(NULL), End(NULL), Capacity(NULL) {
+ reserve(C, N);
+ }
+
+ ~BumpVector() {
+ if (llvm::is_class<T>::value) {
+ // Destroy the constructed elements in the vector.
+ destroy_range(Begin, End);
+ }
+ }
+
+ typedef size_t size_type;
+ typedef ptrdiff_t difference_type;
+ typedef T value_type;
+ typedef T* iterator;
+ typedef const T* const_iterator;
+
+ typedef std::reverse_iterator<const_iterator> const_reverse_iterator;
+ typedef std::reverse_iterator<iterator> reverse_iterator;
+
+ typedef T& reference;
+ typedef const T& const_reference;
+ typedef T* pointer;
+ typedef const T* const_pointer;
+
+ // forward iterator creation methods.
+ iterator begin() { return Begin; }
+ const_iterator begin() const { return Begin; }
+ iterator end() { return End; }
+ const_iterator end() const { return End; }
+
+ // reverse iterator creation methods.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
+
+ bool empty() const { return Begin == End; }
+ size_type size() const { return End-Begin; }
+
+ reference operator[](unsigned idx) {
+ assert(Begin + idx < End);
+ return Begin[idx];
+ }
+ const_reference operator[](unsigned idx) const {
+ assert(Begin + idx < End);
+ return Begin[idx];
+ }
+
+ reference front() {
+ return begin()[0];
+ }
+ const_reference front() const {
+ return begin()[0];
+ }
+
+ reference back() {
+ return end()[-1];
+ }
+ const_reference back() const {
+ return end()[-1];
+ }
+
+ void pop_back() {
+ --End;
+ End->~T();
+ }
+
+ T pop_back_val() {
+ T Result = back();
+ pop_back();
+ return Result;
+ }
+
+ void clear() {
+ if (llvm::is_class<T>::value) {
+ destroy_range(Begin, End);
+ }
+ End = Begin;
+ }
+
+ /// data - Return a pointer to the vector's buffer, even if empty().
+ pointer data() {
+ return pointer(Begin);
+ }
+
+ /// data - Return a pointer to the vector's buffer, even if empty().
+ const_pointer data() const {
+ return const_pointer(Begin);
+ }
+
+ void push_back(const_reference Elt, BumpVectorContext &C) {
+ if (End < Capacity) {
+ Retry:
+ new (End) T(Elt);
+ ++End;
+ return;
+ }
+ grow(C);
+ goto Retry;
+ }
+
+ /// insert - Insert some number of copies of element into a position. Return
+ /// iterator to position after last inserted copy.
+ iterator insert(iterator I, size_t Cnt, const_reference E,
+ BumpVectorContext &C) {
+ assert (I >= Begin && I <= End && "Iterator out of bounds.");
+ if (End + Cnt <= Capacity) {
+ Retry:
+ move_range_right(I, End, Cnt);
+ construct_range(I, I + Cnt, E);
+ End += Cnt;
+ return I + Cnt;
+ }
+ ptrdiff_t D = I - Begin;
+ grow(C, size() + Cnt);
+ I = Begin + D;
+ goto Retry;
+ }
+
+ void reserve(BumpVectorContext &C, unsigned N) {
+ if (unsigned(Capacity-Begin) < N)
+ grow(C, N);
+ }
+
+ /// capacity - Return the total number of elements in the currently allocated
+ /// buffer.
+ size_t capacity() const { return Capacity - Begin; }
+
+private:
+ /// grow - double the size of the allocated memory, guaranteeing space for at
+ /// least one more element or MinSize if specified.
+ void grow(BumpVectorContext &C, size_type MinSize = 1);
+
+ void construct_range(T *S, T *E, const T &Elt) {
+ for (; S != E; ++S)
+ new (S) T(Elt);
+ }
+
+ void destroy_range(T *S, T *E) {
+ while (S != E) {
+ --E;
+ E->~T();
+ }
+ }
+
+ void move_range_right(T *S, T *E, size_t D) {
+ for (T *I = E + D - 1, *IL = S + D - 1; I != IL; --I) {
+ --E;
+ new (I) T(*E);
+ E->~T();
+ }
+ }
+};
+
+// Define this out-of-line to dissuade the C++ compiler from inlining it.
+template <typename T>
+void BumpVector<T>::grow(BumpVectorContext &C, size_t MinSize) {
+ size_t CurCapacity = Capacity-Begin;
+ size_t CurSize = size();
+ size_t NewCapacity = 2*CurCapacity;
+ if (NewCapacity < MinSize)
+ NewCapacity = MinSize;
+
+ // Allocate the memory from the BumpPtrAllocator.
+ T *NewElts = C.getAllocator().template Allocate<T>(NewCapacity);
+
+ // Copy the elements over.
+ if (llvm::is_class<T>::value) {
+ std::uninitialized_copy(Begin, End, NewElts);
+ // Destroy the original elements.
+ destroy_range(Begin, End);
+ }
+ else {
+ // Use memcpy for PODs (std::uninitialized_copy optimizes to memmove).
+ memcpy(NewElts, Begin, CurSize * sizeof(T));
+ }
+
+ // For now, leak 'Begin'. We can add it back to a freelist in
+ // BumpVectorContext.
+ Begin = NewElts;
+ End = NewElts+CurSize;
+ Capacity = Begin+NewCapacity;
+}
+
+} // end: clang namespace
+#endif // end: LLVM_CLANG_BUMP_VECTOR
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
new file mode 100644
index 0000000..97eb287
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h
@@ -0,0 +1,103 @@
+//= CFGRecStmtDeclVisitor - Recursive visitor of CFG stmts/decls -*- C++ --*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the template class CFGRecStmtDeclVisitor, which extends
+// CFGRecStmtVisitor by implementing (typed) visitation of decls.
+//
+// FIXME: This may not be fully complete. We currently explore only subtypes
+// of ScopedDecl.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_CFG_REC_STMT_DECL_VISITOR_H
+#define LLVM_CLANG_ANALYSIS_CFG_REC_STMT_DECL_VISITOR_H
+
+#include "clang/Analysis/Visitors/CFGRecStmtVisitor.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+
+#define DISPATCH_CASE(CLASS) \
+case Decl::CLASS: \
+static_cast<ImplClass*>(this)->Visit##CLASS##Decl( \
+ static_cast<CLASS##Decl*>(D)); \
+break;
+
+#define DEFAULT_DISPATCH(CLASS) void Visit##CLASS##Decl(CLASS##Decl *D) {}
+#define DEFAULT_DISPATCH_VARDECL(CLASS) void Visit##CLASS##Decl(CLASS##Decl *D)\
+ { static_cast<ImplClass*>(this)->VisitVarDecl(D); }
+
+
+namespace clang {
+template <typename ImplClass>
+class CFGRecStmtDeclVisitor : public CFGRecStmtVisitor<ImplClass> {
+public:
+
+ void VisitDeclRefExpr(DeclRefExpr *DR) {
+ static_cast<ImplClass*>(this)->VisitDecl(DR->getDecl());
+ }
+
+ void VisitDeclStmt(DeclStmt *DS) {
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ Decl *D = *DI;
+ static_cast<ImplClass*>(this)->VisitDecl(D);
+ // Visit the initializer.
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (Expr *I = VD->getInit())
+ static_cast<ImplClass*>(this)->Visit(I);
+ }
+ }
+
+ void VisitDecl(Decl *D) {
+ switch (D->getKind()) {
+ DISPATCH_CASE(Function)
+ DISPATCH_CASE(CXXMethod)
+ DISPATCH_CASE(Var)
+ DISPATCH_CASE(ParmVar) // FIXME: (same)
+ DISPATCH_CASE(ImplicitParam)
+ DISPATCH_CASE(EnumConstant)
+ DISPATCH_CASE(Typedef)
+ DISPATCH_CASE(Record) // FIXME: Refine. VisitStructDecl?
+ DISPATCH_CASE(CXXRecord)
+ DISPATCH_CASE(Enum)
+ DISPATCH_CASE(Field)
+ DISPATCH_CASE(UsingDirective)
+ DISPATCH_CASE(Using)
+ default:
+ llvm_unreachable("Subtype of ScopedDecl not handled.");
+ }
+ }
+
+ DEFAULT_DISPATCH(Var)
+ DEFAULT_DISPATCH(Function)
+ DEFAULT_DISPATCH(CXXMethod)
+ DEFAULT_DISPATCH_VARDECL(ParmVar)
+ DEFAULT_DISPATCH(ImplicitParam)
+ DEFAULT_DISPATCH(EnumConstant)
+ DEFAULT_DISPATCH(Typedef)
+ DEFAULT_DISPATCH(Record)
+ DEFAULT_DISPATCH(Enum)
+ DEFAULT_DISPATCH(Field)
+ DEFAULT_DISPATCH(ObjCInterface)
+ DEFAULT_DISPATCH(ObjCMethod)
+ DEFAULT_DISPATCH(ObjCProtocol)
+ DEFAULT_DISPATCH(ObjCCategory)
+ DEFAULT_DISPATCH(UsingDirective)
+ DEFAULT_DISPATCH(Using)
+
+ void VisitCXXRecordDecl(CXXRecordDecl *D) {
+ static_cast<ImplClass*>(this)->VisitRecordDecl(D);
+ }
+};
+
+} // end namespace clang
+
+#undef DISPATCH_CASE
+#undef DEFAULT_DISPATCH
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h
new file mode 100644
index 0000000..4d1cabf
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGRecStmtVisitor.h
@@ -0,0 +1,59 @@
+//==- CFGRecStmtVisitor - Recursive visitor of CFG statements ---*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the template class CFGRecStmtVisitor, which extends
+// CFGStmtVisitor by implementing a default recursive visit of all statements.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_CFG_REC_STMT_VISITOR_H
+#define LLVM_CLANG_ANALYSIS_CFG_REC_STMT_VISITOR_H
+
+#include "clang/Analysis/Visitors/CFGStmtVisitor.h"
+
+namespace clang {
+template <typename ImplClass>
+class CFGRecStmtVisitor : public CFGStmtVisitor<ImplClass,void> {
+public:
+
+ void VisitStmt(Stmt *S) {
+ static_cast< ImplClass* >(this)->VisitChildren(S);
+ }
+
+ void VisitCompoundStmt(CompoundStmt *S) {
+ // Do nothing. Everything in a CompoundStmt is inlined
+ // into the CFG.
+ }
+
+ void VisitConditionVariableInit(Stmt *S) {
+ assert(S == this->getCurrentBlkStmt());
+ VarDecl *CondVar = 0;
+ switch (S->getStmtClass()) {
+#define CONDVAR_CASE(CLASS) \
+case Stmt::CLASS ## Class:\
+CondVar = cast<CLASS>(S)->getConditionVariable();\
+break;
+ CONDVAR_CASE(IfStmt)
+ CONDVAR_CASE(ForStmt)
+ CONDVAR_CASE(SwitchStmt)
+ CONDVAR_CASE(WhileStmt)
+#undef CONDVAR_CASE
+ default:
+ llvm_unreachable("Infeasible");
+ }
+ static_cast<ImplClass*>(this)->Visit(CondVar->getInit());
+ }
+
+ // Defining operator() allows the visitor to be used as a C++ style functor.
+ void operator()(Stmt *S) { static_cast<ImplClass*>(this)->BlockStmt_Visit(S);}
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h
new file mode 100644
index 0000000..b354ba7
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Analysis/Visitors/CFGStmtVisitor.h
@@ -0,0 +1,175 @@
+//===--- CFGStmtVisitor.h - Visitor for Stmts in a CFG ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFGStmtVisitor interface, which extends
+// StmtVisitor. This interface is useful for visiting statements in a CFG
+// where some statements have implicit control-flow and thus should
+// be treated specially.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_CFGSTMTVISITOR_H
+#define LLVM_CLANG_ANALYSIS_CFGSTMTVISITOR_H
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/CFG.h"
+
+namespace clang {
+
+#define DISPATCH_CASE(CLASS) \
+case Stmt::CLASS ## Class: return \
+static_cast<ImplClass*>(this)->BlockStmt_Visit ## CLASS(static_cast<CLASS*>(S));
+
+#define DEFAULT_BLOCKSTMT_VISIT(CLASS) RetTy BlockStmt_Visit ## CLASS(CLASS *S)\
+{ return\
+ static_cast<ImplClass*>(this)->BlockStmt_VisitImplicitControlFlowExpr(\
+ cast<Expr>(S)); }
+
+template <typename ImplClass, typename RetTy=void>
+class CFGStmtVisitor : public StmtVisitor<ImplClass,RetTy> {
+ Stmt *CurrentBlkStmt;
+
+ struct NullifyStmt {
+ Stmt*& S;
+
+ NullifyStmt(Stmt*& s) : S(s) {}
+ ~NullifyStmt() { S = NULL; }
+ };
+
+public:
+ CFGStmtVisitor() : CurrentBlkStmt(NULL) {}
+
+ Stmt *getCurrentBlkStmt() const { return CurrentBlkStmt; }
+
+ RetTy Visit(Stmt *S) {
+ if (S == CurrentBlkStmt ||
+ !static_cast<ImplClass*>(this)->getCFG().isBlkExpr(S))
+ return StmtVisitor<ImplClass,RetTy>::Visit(S);
+ else
+ return RetTy();
+ }
+
+ /// VisitConditionVariableInit - Handle the initialization of condition
+ /// variables at branches. Valid statements include IfStmt, ForStmt,
+ /// WhileStmt, and SwitchStmt.
+ RetTy VisitConditionVariableInit(Stmt *S) {
+ return RetTy();
+ }
+
+ /// BlockVisit_XXX - Visitor methods for visiting the "root" statements in
+ /// CFGBlocks. Root statements are the statements that appear explicitly in
+ /// the list of statements in a CFGBlock. For substatements, or when there
+ /// is no implementation provided for a BlockStmt_XXX method, we default
+ /// to using StmtVisitor's Visit method.
+ RetTy BlockStmt_Visit(Stmt *S) {
+ CurrentBlkStmt = S;
+ NullifyStmt cleanup(CurrentBlkStmt);
+
+ switch (S->getStmtClass()) {
+ case Stmt::IfStmtClass:
+ case Stmt::ForStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::SwitchStmtClass:
+ return static_cast<ImplClass*>(this)->VisitConditionVariableInit(S);
+
+ DISPATCH_CASE(StmtExpr)
+ DISPATCH_CASE(ConditionalOperator)
+ DISPATCH_CASE(BinaryConditionalOperator)
+ DISPATCH_CASE(ObjCForCollectionStmt)
+ DISPATCH_CASE(CXXForRangeStmt)
+
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator* B = cast<BinaryOperator>(S);
+ if (B->isLogicalOp())
+ return static_cast<ImplClass*>(this)->BlockStmt_VisitLogicalOp(B);
+ else if (B->getOpcode() == BO_Comma)
+ return static_cast<ImplClass*>(this)->BlockStmt_VisitComma(B);
+ // Fall through.
+ }
+
+ default:
+ if (isa<Expr>(S))
+ return
+ static_cast<ImplClass*>(this)->BlockStmt_VisitExpr(cast<Expr>(S));
+ else
+ return static_cast<ImplClass*>(this)->BlockStmt_VisitStmt(S);
+ }
+ }
+
+ DEFAULT_BLOCKSTMT_VISIT(StmtExpr)
+ DEFAULT_BLOCKSTMT_VISIT(ConditionalOperator)
+ DEFAULT_BLOCKSTMT_VISIT(BinaryConditionalOperator)
+
+ RetTy BlockStmt_VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ return static_cast<ImplClass*>(this)->BlockStmt_VisitStmt(S);
+ }
+
+ RetTy BlockStmt_VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ return static_cast<ImplClass*>(this)->BlockStmt_VisitStmt(S);
+ }
+
+ RetTy BlockStmt_VisitImplicitControlFlowExpr(Expr *E) {
+ return static_cast<ImplClass*>(this)->BlockStmt_VisitExpr(E);
+ }
+
+ RetTy BlockStmt_VisitExpr(Expr *E) {
+ return static_cast<ImplClass*>(this)->BlockStmt_VisitStmt(E);
+ }
+
+ RetTy BlockStmt_VisitStmt(Stmt *S) {
+ return static_cast<ImplClass*>(this)->Visit(S);
+ }
+
+ RetTy BlockStmt_VisitLogicalOp(BinaryOperator* B) {
+ return
+ static_cast<ImplClass*>(this)->BlockStmt_VisitImplicitControlFlowExpr(B);
+ }
+
+ RetTy BlockStmt_VisitComma(BinaryOperator* B) {
+ return
+ static_cast<ImplClass*>(this)->BlockStmt_VisitImplicitControlFlowExpr(B);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utility methods. Not called by default (but subclasses may use them).
+ //===--------------------------------------------------------------------===//
+
+ /// VisitChildren: Call "Visit" on each child of S.
+ void VisitChildren(Stmt *S) {
+
+ switch (S->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::StmtExprClass: {
+ CompoundStmt *CS = cast<StmtExpr>(S)->getSubStmt();
+ if (CS->body_empty()) return;
+ static_cast<ImplClass*>(this)->Visit(CS->body_back());
+ return;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator* B = cast<BinaryOperator>(S);
+ if (B->getOpcode() != BO_Comma) break;
+ static_cast<ImplClass*>(this)->Visit(B->getRHS());
+ return;
+ }
+ }
+
+ for (Stmt::child_range I = S->children(); I; ++I)
+ if (*I) static_cast<ImplClass*>(this)->Visit(*I);
+ }
+};
+
+#undef DEFAULT_BLOCKSTMT_VISIT
+#undef DISPATCH_CASE
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ABI.h b/contrib/llvm/tools/clang/include/clang/Basic/ABI.h
new file mode 100644
index 0000000..018f500
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ABI.h
@@ -0,0 +1,126 @@
+//===----- ABI.h - ABI related declarations ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These enums/classes describe ABI related information about constructors,
+// destructors and thunks.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_BASIC_ABI_H
+#define CLANG_BASIC_ABI_H
+
+#include "llvm/Support/DataTypes.h"
+
+namespace clang {
+
+/// CXXCtorType - C++ constructor types
+enum CXXCtorType {
+ Ctor_Complete, // Complete object ctor
+ Ctor_Base, // Base object ctor
+ Ctor_CompleteAllocating // Complete object allocating ctor
+};
+
+/// CXXDtorType - C++ destructor types
+enum CXXDtorType {
+ Dtor_Deleting, // Deleting dtor
+ Dtor_Complete, // Complete object dtor
+ Dtor_Base // Base object dtor
+};
+
+/// ReturnAdjustment - A return adjustment.
+struct ReturnAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
+ /// of the virtual base class offset.
+ int64_t VBaseOffsetOffset;
+
+ ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
+
+ friend bool operator==(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset;
+ }
+
+ friend bool operator<(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset < RHS.VBaseOffsetOffset;
+ }
+};
+
+/// ThisAdjustment - A 'this' pointer adjustment.
+struct ThisAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
+ /// of the virtual call offset.
+ int64_t VCallOffsetOffset;
+
+ ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
+
+ friend bool operator==(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset == RHS.VCallOffsetOffset;
+ }
+
+ friend bool operator<(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset < RHS.VCallOffsetOffset;
+ }
+};
+
+/// ThunkInfo - The 'this' pointer adjustment as well as an optional return
+/// adjustment for a thunk.
+struct ThunkInfo {
+ /// This - The 'this' pointer adjustment.
+ ThisAdjustment This;
+
+ /// Return - The return adjustment.
+ ReturnAdjustment Return;
+
+ ThunkInfo() { }
+
+ ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
+ : This(This), Return(Return) { }
+
+ friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ return LHS.This == RHS.This && LHS.Return == RHS.Return;
+ }
+
+ friend bool operator<(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ if (LHS.This < RHS.This)
+ return true;
+
+ return LHS.This == RHS.This && LHS.Return < RHS.Return;
+ }
+
+ bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
+};
+
+} // end namespace clang
+
+#endif // CLANG_BASIC_ABI_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h b/contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h
new file mode 100644
index 0000000..d44a9c3b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AddressSpaces.h
@@ -0,0 +1,44 @@
+//===--- AddressSpaces.h - Language-specific address spaces -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides definitions for the various language-specific address
+// spaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_ADDRESSSPACES_H
+#define LLVM_CLANG_BASIC_ADDRESSSPACES_H
+
+namespace clang {
+
+namespace LangAS {
+
+/// This enum defines the set of possible language-specific address spaces.
+/// It uses a high starting offset so as not to conflict with any address
+/// space used by a target.
+enum ID {
+ Offset = 0xFFFF00,
+
+ opencl_global = Offset,
+ opencl_local,
+ opencl_constant,
+
+ Last,
+ Count = Last-Offset
+};
+
+/// The type of a lookup table which maps from language-specific address spaces
+/// to target-specific ones.
+typedef unsigned Map[Count];
+
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h b/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h
new file mode 100644
index 0000000..7e77435
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AllDiagnostics.h
@@ -0,0 +1,39 @@
+//===--- AllDiagnostics.h - Aggregate Diagnostic headers --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file includes all the separate Diagnostic headers & some related
+// helpers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ALL_DIAGNOSTICS_H
+#define LLVM_CLANG_ALL_DIAGNOSTICS_H
+
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/Analysis/AnalysisDiagnostic.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Serialization/SerializationDiagnostic.h"
+
+namespace clang {
+template <size_t SizeOfStr, typename FieldType>
+class StringSizerHelper {
+ char FIELD_TOO_SMALL[SizeOfStr <= FieldType(~0U) ? 1 : -1];
+public:
+ enum { Size = SizeOfStr };
+};
+} // end namespace clang
+
+#define STR_SIZE(str, fieldTy) clang::StringSizerHelper<sizeof(str)-1, \
+ fieldTy>::Size
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Attr.td b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
new file mode 100644
index 0000000..e8e0f35
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Attr.td
@@ -0,0 +1,716 @@
+////////////////////////////////////////////////////////////////////////////////
+// Note: This file is a work in progress. Please do not apply non-trivial
+// updates unless you have talked to Sean Hunt <rideau3@gmail.com> prior.
+// Merely adding a new attribute is a trivial update.
+////////////////////////////////////////////////////////////////////////////////
+
+// An attribute's subject is whatever it appertains to. In this file, it is
+// more accurately a list of things that an attribute can appertain to. All
+// Decls and Stmts are possibly AttrSubjects (even though the syntax may not
+// allow attributes on a given Decl or Stmt).
+class AttrSubject;
+
+include "clang/Basic/DeclNodes.td"
+include "clang/Basic/StmtNodes.td"
+
+// A subset-subject is an AttrSubject constrained to operate only on some subset
+// of that subject.
+//
+// The description is used in output messages to specify what the subject
+// represents. FIXME: Deal with translation issues.
+//
+// The code fragment is a boolean expression that will confirm that the subject
+// meets the requirements; the subject will have the name S, and will have the
+// type specified by the base. It should be a simple boolean expression.
+class SubsetSubject<AttrSubject base, string description, code check>
+ : AttrSubject {
+ AttrSubject Base = base;
+ string Description = description;
+ code CheckCode = check;
+}
+
+// This is the type of a variable which C++0x defines [[aligned()]] as being
+// a possible subject.
+def NormalVar : SubsetSubject<Var, "non-register, non-parameter variable",
+ [{S->getStorageClass() != VarDecl::Register &&
+ S->getKind() != Decl::ImplicitParam &&
+ S->getKind() != Decl::ParmVar &&
+ S->getKind() != Decl::NonTypeTemplateParm}]>;
+def CXXVirtualMethod : SubsetSubject<CXXRecord, "virtual member function",
+ [{S->isVirtual()}]>;
+def NonBitField : SubsetSubject<Field, "non-bit field",
+ [{!S->isBitField()}]>;
+
+// A single argument to an attribute
+class Argument<string name> {
+ string Name = name;
+}
+
+class BoolArgument<string name> : Argument<name>;
+class IdentifierArgument<string name> : Argument<name>;
+class IntArgument<string name> : Argument<name>;
+class StringArgument<string name> : Argument<name>;
+class ExprArgument<string name> : Argument<name>;
+class FunctionArgument<string name> : Argument<name>;
+class TypeArgument<string name> : Argument<name>;
+class UnsignedArgument<string name> : Argument<name>;
+class SourceLocArgument<string name> : Argument<name>;
+class VariadicUnsignedArgument<string name> : Argument<name>;
+class VariadicExprArgument<string name> : Argument<name>;
+
+// A version of the form major.minor[.subminor].
+class VersionArgument<string name> : Argument<name>;
+
+// This one's a doozy, so it gets its own special type
+// It can be an unsigned integer, or a type. Either can
+// be dependent.
+class AlignedArgument<string name> : Argument<name>;
+
+// An integer argument with a default value
+class DefaultIntArgument<string name, int default> : IntArgument<name> {
+ int Default = default;
+}
+
+// This argument is more complex, it includes the enumerator type name,
+// a list of strings to accept, and a list of enumerators to map them to.
+class EnumArgument<string name, string type, list<string> values,
+ list<string> enums> : Argument<name> {
+ string Type = type;
+ list<string> Values = values;
+ list<string> Enums = enums;
+}
+
+class Attr {
+ // The various ways in which an attribute can be spelled in source
+ list<string> Spellings;
+ // The things to which an attribute can appertain
+ list<AttrSubject> Subjects;
+ // The arguments allowed on an attribute
+ list<Argument> Args = [];
+ // The namespaces in which the attribute appears in C++0x attributes.
+ // The attribute will not be permitted in C++0x attribute-specifiers if
+ // this is empty; the empty string can be used as a namespace.
+ list<string> Namespaces = [];
+ // Set to true for attributes with arguments which require delayed parsing.
+ bit LateParsed = 0;
+ // Set to true for attributes which must be instantiated within templates
+ bit TemplateDependent = 0;
+ // Set to true for attributes which have handler in Sema.
+ bit SemaHandler = 1;
+ // Any additional text that should be included verbatim in the class.
+ code AdditionalMembers = [{}];
+}
+
+/// An inheritable attribute is inherited by later redeclarations.
+class InheritableAttr : Attr;
+
+/// An inheritable parameter attribute is inherited by later
+/// redeclarations, even when it's written on a parameter.
+class InheritableParamAttr : InheritableAttr;
+
+//
+// Attributes begin here
+//
+
+def Alias : InheritableAttr {
+ let Spellings = ["alias"];
+ let Args = [StringArgument<"Aliasee">];
+}
+
+def Aligned : InheritableAttr {
+ let Spellings = ["aligned"];
+ let Subjects = [NonBitField, NormalVar, Tag];
+ let Args = [AlignedArgument<"Alignment">];
+ let Namespaces = ["", "std"];
+}
+
+def AlignMac68k : InheritableAttr {
+ let Spellings = [];
+ let SemaHandler = 0;
+}
+
+def AlwaysInline : InheritableAttr {
+ let Spellings = ["always_inline"];
+}
+
+def AnalyzerNoReturn : InheritableAttr {
+ let Spellings = ["analyzer_noreturn"];
+}
+
+def Annotate : InheritableParamAttr {
+ let Spellings = ["annotate"];
+ let Args = [StringArgument<"Annotation">];
+}
+
+def AsmLabel : InheritableAttr {
+ let Spellings = [];
+ let Args = [StringArgument<"Label">];
+ let SemaHandler = 0;
+}
+
+def Availability : InheritableAttr {
+ let Spellings = ["availability"];
+ let Args = [IdentifierArgument<"platform">, VersionArgument<"introduced">,
+ VersionArgument<"deprecated">, VersionArgument<"obsoleted">,
+ BoolArgument<"unavailable">, StringArgument<"message">];
+ let AdditionalMembers =
+[{static llvm::StringRef getPrettyPlatformName(llvm::StringRef Platform) {
+ return llvm::StringSwitch<llvm::StringRef>(Platform)
+ .Case("ios", "iOS")
+ .Case("macosx", "Mac OS X")
+ .Default(llvm::StringRef());
+} }];
+}
+
+def Blocks : InheritableAttr {
+ let Spellings = ["blocks"];
+ let Args = [EnumArgument<"Type", "BlockType", ["byref"], ["ByRef"]>];
+}
+
+def CarriesDependency : InheritableParamAttr {
+ let Spellings = ["carries_dependency"];
+ let Subjects = [ParmVar, Function];
+ let Namespaces = ["", "std"];
+}
+
+def CDecl : InheritableAttr {
+ let Spellings = ["cdecl", "__cdecl"];
+}
+
+// cf_audited_transfer indicates that the given function has been
+// audited and has been marked with the appropriate cf_consumed and
+// cf_returns_retained attributes. It is generally applied by
+// '#pragma clang arc_cf_code_audited' rather than explicitly.
+def CFAuditedTransfer : InheritableAttr {
+ let Spellings = ["cf_audited_transfer"];
+ let Subjects = [Function];
+}
+
+// cf_unknown_transfer is an explicit opt-out of cf_audited_transfer.
+// It indicates that the function has unknown or unautomatable
+// transfer semantics.
+def CFUnknownTransfer : InheritableAttr {
+ let Spellings = ["cf_unknown_transfer"];
+ let Subjects = [Function];
+}
+
+def CFReturnsRetained : InheritableAttr {
+ let Spellings = ["cf_returns_retained"];
+ let Subjects = [ObjCMethod, Function];
+}
+
+def CFReturnsNotRetained : InheritableAttr {
+ let Spellings = ["cf_returns_not_retained"];
+ let Subjects = [ObjCMethod, Function];
+}
+
+def CFConsumed : InheritableParamAttr {
+ let Spellings = ["cf_consumed"];
+ let Subjects = [ParmVar];
+}
+
+def Cleanup : InheritableAttr {
+ let Spellings = ["cleanup"];
+ let Args = [FunctionArgument<"FunctionDecl">];
+}
+
+def Common : InheritableAttr {
+ let Spellings = ["common"];
+}
+
+def Const : InheritableAttr {
+ let Spellings = ["const"];
+}
+
+def Constructor : InheritableAttr {
+ let Spellings = ["constructor"];
+ let Args = [IntArgument<"Priority">];
+}
+
+def CUDAConstant : InheritableAttr {
+ let Spellings = ["constant"];
+}
+
+def CUDADevice : Attr {
+ let Spellings = ["device"];
+}
+
+def CUDAGlobal : InheritableAttr {
+ let Spellings = ["global"];
+}
+
+def CUDAHost : Attr {
+ let Spellings = ["host"];
+}
+
+def CUDALaunchBounds : InheritableAttr {
+ let Spellings = ["launch_bounds"];
+ let Args = [IntArgument<"MaxThreads">, DefaultIntArgument<"MinBlocks", 0>];
+}
+
+def CUDAShared : InheritableAttr {
+ let Spellings = ["shared"];
+}
+
+def OpenCLKernel : Attr {
+ let Spellings = ["opencl_kernel_function"];
+}
+
+def Deprecated : InheritableAttr {
+ let Spellings = ["deprecated"];
+ let Args = [StringArgument<"Message">];
+}
+
+def Destructor : InheritableAttr {
+ let Spellings = ["destructor"];
+ let Args = [IntArgument<"Priority">];
+}
+
+def DLLExport : InheritableAttr {
+ let Spellings = ["dllexport"];
+}
+
+def DLLImport : InheritableAttr {
+ let Spellings = ["dllimport"];
+}
+
+def FastCall : InheritableAttr {
+ let Spellings = ["fastcall", "__fastcall"];
+}
+
+def Final : InheritableAttr {
+ let Spellings = [];
+ let SemaHandler = 0;
+}
+
+def MsStruct : InheritableAttr {
+ let Spellings = ["__ms_struct__"];
+}
+
+def Format : InheritableAttr {
+ let Spellings = ["format"];
+ let Args = [StringArgument<"Type">, IntArgument<"FormatIdx">,
+ IntArgument<"FirstArg">];
+}
+
+def FormatArg : InheritableAttr {
+ let Spellings = ["format_arg"];
+ let Args = [IntArgument<"FormatIdx">];
+}
+
+def GNUInline : InheritableAttr {
+ let Spellings = ["gnu_inline"];
+}
+
+def IBAction : InheritableAttr {
+ let Spellings = ["ibaction"];
+}
+
+def IBOutlet : InheritableAttr {
+ let Spellings = ["iboutlet"];
+}
+
+def IBOutletCollection : InheritableAttr {
+ let Spellings = ["iboutletcollection"];
+ let Args = [TypeArgument<"Interface">, SourceLocArgument<"InterfaceLoc">];
+}
+
+def Malloc : InheritableAttr {
+ let Spellings = ["malloc"];
+}
+
+def MaxFieldAlignment : InheritableAttr {
+ let Spellings = [];
+ let Args = [UnsignedArgument<"Alignment">];
+ let SemaHandler = 0;
+}
+
+def MayAlias : InheritableAttr {
+ let Spellings = ["may_alias"];
+}
+
+def MSP430Interrupt : InheritableAttr {
+ let Spellings = [];
+ let Args = [UnsignedArgument<"Number">];
+ let SemaHandler = 0;
+}
+
+def MBlazeInterruptHandler : InheritableAttr {
+ let Spellings = [];
+ let SemaHandler = 0;
+}
+
+def MBlazeSaveVolatiles : InheritableAttr {
+ let Spellings = [];
+ let SemaHandler = 0;
+}
+
+def Naked : InheritableAttr {
+ let Spellings = ["naked"];
+}
+
+def ReturnsTwice : InheritableAttr {
+ let Spellings = ["returns_twice"];
+}
+
+def NoCommon : InheritableAttr {
+ let Spellings = ["nocommon"];
+}
+
+def NoDebug : InheritableAttr {
+ let Spellings = ["nodebug"];
+}
+
+def NoInline : InheritableAttr {
+ let Spellings = ["noinline"];
+}
+
+def NonNull : InheritableAttr {
+ let Spellings = ["nonnull"];
+ let Args = [VariadicUnsignedArgument<"Args">];
+ let AdditionalMembers =
+[{bool isNonNull(unsigned idx) const {
+ for (args_iterator i = args_begin(), e = args_end();
+ i != e; ++i)
+ if (*i == idx)
+ return true;
+ return false;
+ } }];
+}
+
+def NoReturn : InheritableAttr {
+ let Spellings = ["noreturn"];
+ // FIXME: Does GCC allow this on the function instead?
+ let Subjects = [Function];
+ let Namespaces = ["", "std"];
+}
+
+def NoInstrumentFunction : InheritableAttr {
+ let Spellings = ["no_instrument_function"];
+ let Subjects = [Function];
+}
+
+def NoThrow : InheritableAttr {
+ let Spellings = ["nothrow"];
+}
+
+def NSBridged : InheritableAttr {
+ let Spellings = ["ns_bridged"];
+ let Subjects = [Record];
+ let Args = [IdentifierArgument<"BridgedType">];
+}
+
+def NSReturnsRetained : InheritableAttr {
+ let Spellings = ["ns_returns_retained"];
+ let Subjects = [ObjCMethod, Function];
+}
+
+def NSReturnsNotRetained : InheritableAttr {
+ let Spellings = ["ns_returns_not_retained"];
+ let Subjects = [ObjCMethod, Function];
+}
+
+def NSReturnsAutoreleased : InheritableAttr {
+ let Spellings = ["ns_returns_autoreleased"];
+ let Subjects = [ObjCMethod, Function];
+}
+
+def NSConsumesSelf : InheritableAttr {
+ let Spellings = ["ns_consumes_self"];
+ let Subjects = [ObjCMethod];
+}
+
+def NSConsumed : InheritableParamAttr {
+ let Spellings = ["ns_consumed"];
+ let Subjects = [ParmVar];
+}
+
+def ObjCException : InheritableAttr {
+ let Spellings = ["objc_exception"];
+}
+
+def ObjCMethodFamily : InheritableAttr {
+ let Spellings = ["objc_method_family"];
+ let Subjects = [ObjCMethod];
+ let Args = [EnumArgument<"Family", "FamilyKind",
+ ["none", "alloc", "copy", "init", "mutableCopy", "new"],
+ ["OMF_None", "OMF_alloc", "OMF_copy", "OMF_init",
+ "OMF_mutableCopy", "OMF_new"]>];
+}
+
+def ObjCNSObject : InheritableAttr {
+ let Spellings = ["NSObject"];
+}
+
+def ObjCPreciseLifetime : Attr {
+ let Spellings = ["objc_precise_lifetime"];
+ let Subjects = [Var];
+}
+
+def ObjCReturnsInnerPointer : Attr {
+ let Spellings = ["objc_returns_inner_pointer"];
+ let Subjects = [ObjCMethod];
+}
+
+def ObjCRootClass : Attr {
+ let Spellings = ["objc_root_class"];
+ let Subjects = [ObjCInterface];
+}
+
+def Overloadable : Attr {
+ let Spellings = ["overloadable"];
+}
+
+def Override : InheritableAttr {
+ let Spellings = [];
+ let SemaHandler = 0;
+}
+
+def Ownership : InheritableAttr {
+ let Spellings = ["ownership_holds", "ownership_returns", "ownership_takes"];
+ let Args = [EnumArgument<"OwnKind", "OwnershipKind",
+ ["ownership_holds", "ownership_returns", "ownership_takes"],
+ ["Holds", "Returns", "Takes"]>,
+ StringArgument<"Module">, VariadicUnsignedArgument<"Args">];
+}
+
+def Packed : InheritableAttr {
+ let Spellings = ["packed"];
+}
+
+def Pcs : InheritableAttr {
+ let Spellings = ["pcs"];
+ let Args = [EnumArgument<"PCS", "PCSType",
+ ["aapcs", "aapcs-vfp"],
+ ["AAPCS", "AAPCS_VFP"]>];
+}
+
+def Pure : InheritableAttr {
+ let Spellings = ["pure"];
+}
+
+def Regparm : InheritableAttr {
+ let Spellings = ["regparm"];
+ let Args = [UnsignedArgument<"NumParams">];
+}
+
+def ReqdWorkGroupSize : InheritableAttr {
+ let Spellings = ["reqd_work_group_size"];
+ let Args = [UnsignedArgument<"XDim">, UnsignedArgument<"YDim">,
+ UnsignedArgument<"ZDim">];
+}
+
+def InitPriority : InheritableAttr {
+ let Spellings = ["init_priority"];
+ let Args = [UnsignedArgument<"Priority">];
+}
+
+def Section : InheritableAttr {
+ let Spellings = ["section"];
+ let Args = [StringArgument<"Name">];
+}
+
+def Sentinel : InheritableAttr {
+ let Spellings = ["sentinel"];
+ let Args = [DefaultIntArgument<"Sentinel", 0>,
+ DefaultIntArgument<"NullPos", 0>];
+}
+
+def StdCall : InheritableAttr {
+ let Spellings = ["stdcall", "__stdcall"];
+}
+
+def ThisCall : InheritableAttr {
+ let Spellings = ["thiscall", "__thiscall"];
+}
+
+def Pascal : InheritableAttr {
+ let Spellings = ["pascal", "__pascal"];
+}
+
+def TransparentUnion : InheritableAttr {
+ let Spellings = ["transparent_union"];
+}
+
+def Unavailable : InheritableAttr {
+ let Spellings = ["unavailable"];
+ let Args = [StringArgument<"Message">];
+}
+
+def ArcWeakrefUnavailable : InheritableAttr {
+ let Spellings = ["objc_arc_weak_reference_unavailable"];
+ let Subjects = [ObjCInterface];
+}
+
+def ObjCRequiresPropertyDefs : InheritableAttr {
+ let Spellings = ["objc_requires_property_definitions"];
+ let Subjects = [ObjCInterface];
+}
+
+def Unused : InheritableAttr {
+ let Spellings = ["unused"];
+}
+
+def Used : InheritableAttr {
+ let Spellings = ["used"];
+}
+
+def Uuid : InheritableAttr {
+ let Spellings = ["uuid"];
+ let Args = [StringArgument<"Guid">];
+ let Subjects = [CXXRecord];
+}
+
+def Visibility : InheritableAttr {
+ let Spellings = ["visibility"];
+ let Args = [EnumArgument<"Visibility", "VisibilityType",
+ ["default", "hidden", "internal", "protected"],
+ ["Default", "Hidden", "Hidden", "Protected"]>];
+}
+
+def VecReturn : InheritableAttr {
+ let Spellings = ["vecreturn"];
+ let Subjects = [CXXRecord];
+}
+
+def WarnUnusedResult : InheritableAttr {
+ let Spellings = ["warn_unused_result"];
+}
+
+def Weak : InheritableAttr {
+ let Spellings = ["weak"];
+}
+
+def WeakImport : InheritableAttr {
+ let Spellings = ["weak_import"];
+}
+
+def WeakRef : InheritableAttr {
+ let Spellings = ["weakref"];
+}
+
+def X86ForceAlignArgPointer : InheritableAttr {
+ let Spellings = [];
+}
+
+// AddressSafety attribute (e.g. for AddressSanitizer)
+def NoAddressSafetyAnalysis : InheritableAttr {
+ let Spellings = ["no_address_safety_analysis"];
+}
+
+// C/C++ Thread safety attributes (e.g. for deadlock, data race checking)
+
+def GuardedVar : InheritableAttr {
+ let Spellings = ["guarded_var"];
+}
+
+def PtGuardedVar : InheritableAttr {
+ let Spellings = ["pt_guarded_var"];
+}
+
+def Lockable : InheritableAttr {
+ let Spellings = ["lockable"];
+}
+
+def ScopedLockable : InheritableAttr {
+ let Spellings = ["scoped_lockable"];
+}
+
+def NoThreadSafetyAnalysis : InheritableAttr {
+ let Spellings = ["no_thread_safety_analysis"];
+}
+
+def GuardedBy : InheritableAttr {
+ let Spellings = ["guarded_by"];
+ let Args = [ExprArgument<"Arg">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def PtGuardedBy : InheritableAttr {
+ let Spellings = ["pt_guarded_by"];
+ let Args = [ExprArgument<"Arg">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def AcquiredAfter : InheritableAttr {
+ let Spellings = ["acquired_after"];
+ let Args = [VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def AcquiredBefore : InheritableAttr {
+ let Spellings = ["acquired_before"];
+ let Args = [VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def ExclusiveLockFunction : InheritableAttr {
+ let Spellings = ["exclusive_lock_function"];
+ let Args = [VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def SharedLockFunction : InheritableAttr {
+ let Spellings = ["shared_lock_function"];
+ let Args = [VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+// The first argument is an integer or boolean value specifying the return value
+// of a successful lock acquisition.
+def ExclusiveTrylockFunction : InheritableAttr {
+ let Spellings = ["exclusive_trylock_function"];
+ let Args = [ExprArgument<"SuccessValue">, VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+// The first argument is an integer or boolean value specifying the return value
+// of a successful lock acquisition.
+def SharedTrylockFunction : InheritableAttr {
+ let Spellings = ["shared_trylock_function"];
+ let Args = [ExprArgument<"SuccessValue">, VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def UnlockFunction : InheritableAttr {
+ let Spellings = ["unlock_function"];
+ let Args = [VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def LockReturned : InheritableAttr {
+ let Spellings = ["lock_returned"];
+ let Args = [ExprArgument<"Arg">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def LocksExcluded : InheritableAttr {
+ let Spellings = ["locks_excluded"];
+ let Args = [VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def ExclusiveLocksRequired : InheritableAttr {
+ let Spellings = ["exclusive_locks_required"];
+ let Args = [VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
+
+def SharedLocksRequired : InheritableAttr {
+ let Spellings = ["shared_locks_required"];
+ let Args = [VariadicExprArgument<"Args">];
+ let LateParsed = 1;
+ let TemplateDependent = 1;
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
new file mode 100644
index 0000000..9d5ae58
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/AttrKinds.h
@@ -0,0 +1,33 @@
+//===----- Attr.h - Enum values for C Attribute Kinds ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the attr::Kind enum
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ATTRKINDS_H
+#define LLVM_CLANG_ATTRKINDS_H
+
+namespace clang {
+
+namespace attr {
+
+// Kind - This is a list of all the recognized kinds of attributes.
+enum Kind {
+#define ATTR(X) X,
+#define LAST_INHERITABLE_ATTR(X) X, LAST_INHERITABLE = X,
+#define LAST_INHERITABLE_PARAM_ATTR(X) X, LAST_INHERITABLE_PARAM = X,
+#include "clang/Basic/AttrList.inc"
+ NUM_ATTRS
+};
+
+} // end namespace attr
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
new file mode 100644
index 0000000..d1af218
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.def
@@ -0,0 +1,836 @@
+//===--- Builtins.def - Builtin function info database ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the standard builtin function database. Users of this file
+// must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// FIXME: This should really be a .td file, but that requires modifying tblgen.
+// Perhaps tblgen should have plugins.
+
+// The first value provided to the macro specifies the function name of the
+// builtin, and results in a clang::builtin::BIXX enum value for XX.
+
+// The second value provided to the macro specifies the type of the function
+// (result value, then each argument) as follows:
+// v -> void
+// b -> boolean
+// c -> char
+// s -> short
+// i -> int
+// f -> float
+// d -> double
+// z -> size_t
+// F -> constant CFString
+// G -> id
+// H -> SEL
+// a -> __builtin_va_list
+// A -> "reference" to __builtin_va_list
+// V -> Vector, following num elements and a base type.
+// X -> _Complex, followed by the base type.
+// Y -> ptrdiff_t
+// P -> FILE
+// J -> jmp_buf
+// SJ -> sigjmp_buf
+// K -> ucontext_t
+// . -> "...". This may only occur at the end of the function list.
+//
+// Types may be prefixed with the following modifiers:
+// L -> long (e.g. Li for 'long int')
+// LL -> long long
+// LLL -> __int128_t (e.g. LLLi)
+// S -> signed
+// U -> unsigned
+// I -> Required to constant fold to an integer constant expression.
+//
+// Types may be postfixed with the following modifiers:
+// * -> pointer (optionally followed by an address space number)
+// & -> reference (optionally followed by an address space number)
+// C -> const
+// D -> volatile
+
+// The third value provided to the macro specifies information about attributes
+// of the function. These must be kept in sync with the predicates in the
+// Builtin::Context class. Currently we have:
+// n -> nothrow
+// r -> noreturn
+// c -> const
+// t -> signature is meaningless, use custom typechecking
+// F -> this is a libc/libm function with a '__builtin_' prefix added.
+// f -> this is a libc/libm function without the '__builtin_' prefix. It can
+// be followed by ':headername:' to state which header this function
+// comes from.
+// p:N: -> this is a printf-like function whose Nth argument is the format
+// string.
+// P:N: -> similar to the p:N: attribute, but the function is like vprintf
+// in that it accepts its arguments as a va_list rather than
+// through an ellipsis
+// s:N: -> this is a scanf-like function whose Nth argument is the format
+// string.
+// S:N: -> similar to the s:N: attribute, but the function is like vscanf
+// in that it accepts its arguments as a va_list rather than
+// through an ellipsis
+// e -> const, but only when -fmath-errno=0
+// j -> returns_twice (like setjmp)
+// FIXME: gcc has nonnull
+
+#if defined(BUILTIN) && !defined(LIBBUILTIN)
+# define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, BUILTIN_LANG) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+// Standard libc/libm functions:
+BUILTIN(__builtin_atan2 , "ddd" , "Fnc")
+BUILTIN(__builtin_atan2f, "fff" , "Fnc")
+BUILTIN(__builtin_atan2l, "LdLdLd", "Fnc")
+BUILTIN(__builtin_abs , "ii" , "ncF")
+BUILTIN(__builtin_copysign, "ddd", "ncF")
+BUILTIN(__builtin_copysignf, "fff", "ncF")
+BUILTIN(__builtin_copysignl, "LdLdLd", "ncF")
+BUILTIN(__builtin_fabs , "dd" , "ncF")
+BUILTIN(__builtin_fabsf, "ff" , "ncF")
+BUILTIN(__builtin_fabsl, "LdLd", "ncF")
+BUILTIN(__builtin_fmod , "ddd" , "Fnc")
+BUILTIN(__builtin_fmodf, "fff" , "Fnc")
+BUILTIN(__builtin_fmodl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_frexp , "ddi*" , "Fn")
+BUILTIN(__builtin_frexpf, "ffi*" , "Fn")
+BUILTIN(__builtin_frexpl, "LdLdi*", "Fn")
+BUILTIN(__builtin_huge_val, "d", "nc")
+BUILTIN(__builtin_huge_valf, "f", "nc")
+BUILTIN(__builtin_huge_vall, "Ld", "nc")
+BUILTIN(__builtin_inf , "d" , "nc")
+BUILTIN(__builtin_inff , "f" , "nc")
+BUILTIN(__builtin_infl , "Ld" , "nc")
+BUILTIN(__builtin_labs , "LiLi" , "Fnc")
+BUILTIN(__builtin_llabs, "LLiLLi", "Fnc")
+BUILTIN(__builtin_ldexp , "ddi" , "Fnc")
+BUILTIN(__builtin_ldexpf, "ffi" , "Fnc")
+BUILTIN(__builtin_ldexpl, "LdLdi", "Fnc")
+BUILTIN(__builtin_modf , "ddd*" , "Fn")
+BUILTIN(__builtin_modff, "fff*" , "Fn")
+BUILTIN(__builtin_modfl, "LdLdLd*", "Fn")
+BUILTIN(__builtin_nan, "dcC*" , "ncF")
+BUILTIN(__builtin_nanf, "fcC*" , "ncF")
+BUILTIN(__builtin_nanl, "LdcC*", "ncF")
+BUILTIN(__builtin_nans, "dcC*" , "ncF")
+BUILTIN(__builtin_nansf, "fcC*" , "ncF")
+BUILTIN(__builtin_nansl, "LdcC*", "ncF")
+BUILTIN(__builtin_powi , "ddi" , "Fnc")
+BUILTIN(__builtin_powif, "ffi" , "Fnc")
+BUILTIN(__builtin_powil, "LdLdi", "Fnc")
+BUILTIN(__builtin_pow , "ddd" , "Fnc")
+BUILTIN(__builtin_powf, "fff" , "Fnc")
+BUILTIN(__builtin_powl, "LdLdLd", "Fnc")
+
+// Standard unary libc/libm functions with double/float/long double variants:
+BUILTIN(__builtin_acos , "dd" , "Fnc")
+BUILTIN(__builtin_acosf, "ff" , "Fnc")
+BUILTIN(__builtin_acosl, "LdLd", "Fnc")
+BUILTIN(__builtin_acosh , "dd" , "Fnc")
+BUILTIN(__builtin_acoshf, "ff" , "Fnc")
+BUILTIN(__builtin_acoshl, "LdLd", "Fnc")
+BUILTIN(__builtin_asin , "dd" , "Fnc")
+BUILTIN(__builtin_asinf, "ff" , "Fnc")
+BUILTIN(__builtin_asinl, "LdLd", "Fnc")
+BUILTIN(__builtin_asinh , "dd" , "Fnc")
+BUILTIN(__builtin_asinhf, "ff" , "Fnc")
+BUILTIN(__builtin_asinhl, "LdLd", "Fnc")
+BUILTIN(__builtin_atan , "dd" , "Fnc")
+BUILTIN(__builtin_atanf, "ff" , "Fnc")
+BUILTIN(__builtin_atanl, "LdLd", "Fnc")
+BUILTIN(__builtin_atanh , "dd", "Fnc")
+BUILTIN(__builtin_atanhf, "ff", "Fnc")
+BUILTIN(__builtin_atanhl, "LdLd", "Fnc")
+BUILTIN(__builtin_cbrt , "dd", "Fnc")
+BUILTIN(__builtin_cbrtf, "ff", "Fnc")
+BUILTIN(__builtin_cbrtl, "LdLd", "Fnc")
+BUILTIN(__builtin_ceil , "dd" , "Fnc")
+BUILTIN(__builtin_ceilf, "ff" , "Fnc")
+BUILTIN(__builtin_ceill, "LdLd", "Fnc")
+BUILTIN(__builtin_cos , "dd" , "Fnc")
+BUILTIN(__builtin_cosf, "ff" , "Fnc")
+BUILTIN(__builtin_cosh , "dd" , "Fnc")
+BUILTIN(__builtin_coshf, "ff" , "Fnc")
+BUILTIN(__builtin_coshl, "LdLd", "Fnc")
+BUILTIN(__builtin_cosl, "LdLd", "Fnc")
+BUILTIN(__builtin_erf , "dd", "Fnc")
+BUILTIN(__builtin_erff, "ff", "Fnc")
+BUILTIN(__builtin_erfl, "LdLd", "Fnc")
+BUILTIN(__builtin_erfc , "dd", "Fnc")
+BUILTIN(__builtin_erfcf, "ff", "Fnc")
+BUILTIN(__builtin_erfcl, "LdLd", "Fnc")
+BUILTIN(__builtin_exp , "dd" , "Fnc")
+BUILTIN(__builtin_expf, "ff" , "Fnc")
+BUILTIN(__builtin_expl, "LdLd", "Fnc")
+BUILTIN(__builtin_exp2 , "dd" , "Fnc")
+BUILTIN(__builtin_exp2f, "ff" , "Fnc")
+BUILTIN(__builtin_exp2l, "LdLd", "Fnc")
+BUILTIN(__builtin_expm1 , "dd", "Fnc")
+BUILTIN(__builtin_expm1f, "ff", "Fnc")
+BUILTIN(__builtin_expm1l, "LdLd", "Fnc")
+BUILTIN(__builtin_fdim, "ddd", "Fnc")
+BUILTIN(__builtin_fdimf, "fff", "Fnc")
+BUILTIN(__builtin_fdiml, "LdLdLd", "Fnc")
+BUILTIN(__builtin_floor , "dd" , "Fnc")
+BUILTIN(__builtin_floorf, "ff" , "Fnc")
+BUILTIN(__builtin_floorl, "LdLd", "Fnc")
+BUILTIN(__builtin_fma, "dddd", "Fnc")
+BUILTIN(__builtin_fmaf, "ffff", "Fnc")
+BUILTIN(__builtin_fmal, "LdLdLdLd", "Fnc")
+BUILTIN(__builtin_fmax, "ddd", "Fnc")
+BUILTIN(__builtin_fmaxf, "fff", "Fnc")
+BUILTIN(__builtin_fmaxl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_fmin, "ddd", "Fnc")
+BUILTIN(__builtin_fminf, "fff", "Fnc")
+BUILTIN(__builtin_fminl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_hypot , "ddd" , "Fnc")
+BUILTIN(__builtin_hypotf, "fff" , "Fnc")
+BUILTIN(__builtin_hypotl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_ilogb , "id", "Fnc")
+BUILTIN(__builtin_ilogbf, "if", "Fnc")
+BUILTIN(__builtin_ilogbl, "iLd", "Fnc")
+BUILTIN(__builtin_lgamma , "dd", "Fnc")
+BUILTIN(__builtin_lgammaf, "ff", "Fnc")
+BUILTIN(__builtin_lgammal, "LdLd", "Fnc")
+BUILTIN(__builtin_llrint, "LLid", "Fnc")
+BUILTIN(__builtin_llrintf, "LLif", "Fnc")
+BUILTIN(__builtin_llrintl, "LLiLd", "Fnc")
+BUILTIN(__builtin_llround , "LLid", "Fnc")
+BUILTIN(__builtin_llroundf, "LLif", "Fnc")
+BUILTIN(__builtin_llroundl, "LLiLd", "Fnc")
+BUILTIN(__builtin_log , "dd" , "Fnc")
+BUILTIN(__builtin_log10 , "dd" , "Fnc")
+BUILTIN(__builtin_log10f, "ff" , "Fnc")
+BUILTIN(__builtin_log10l, "LdLd", "Fnc")
+BUILTIN(__builtin_log1p , "dd" , "Fnc")
+BUILTIN(__builtin_log1pf, "ff" , "Fnc")
+BUILTIN(__builtin_log1pl, "LdLd", "Fnc")
+BUILTIN(__builtin_log2, "dd" , "Fnc")
+BUILTIN(__builtin_log2f, "ff" , "Fnc")
+BUILTIN(__builtin_log2l, "LdLd" , "Fnc")
+BUILTIN(__builtin_logb , "dd", "Fnc")
+BUILTIN(__builtin_logbf, "ff", "Fnc")
+BUILTIN(__builtin_logbl, "LdLd", "Fnc")
+BUILTIN(__builtin_logf, "ff" , "Fnc")
+BUILTIN(__builtin_logl, "LdLd", "Fnc")
+BUILTIN(__builtin_lrint , "Lid", "Fnc")
+BUILTIN(__builtin_lrintf, "Lif", "Fnc")
+BUILTIN(__builtin_lrintl, "LiLd", "Fnc")
+BUILTIN(__builtin_lround , "Lid", "Fnc")
+BUILTIN(__builtin_lroundf, "Lif", "Fnc")
+BUILTIN(__builtin_lroundl, "LiLd", "Fnc")
+BUILTIN(__builtin_nearbyint , "dd", "Fnc")
+BUILTIN(__builtin_nearbyintf, "ff", "Fnc")
+BUILTIN(__builtin_nearbyintl, "LdLd", "Fnc")
+BUILTIN(__builtin_nextafter , "ddd", "Fnc")
+BUILTIN(__builtin_nextafterf, "fff", "Fnc")
+BUILTIN(__builtin_nextafterl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_nexttoward , "ddd", "Fnc")
+BUILTIN(__builtin_nexttowardf, "fff", "Fnc")
+BUILTIN(__builtin_nexttowardl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_remainder , "ddd", "Fnc")
+BUILTIN(__builtin_remainderf, "fff", "Fnc")
+BUILTIN(__builtin_remainderl, "LdLdLd", "Fnc")
+BUILTIN(__builtin_remquo , "dddi*", "Fn")
+BUILTIN(__builtin_remquof, "fffi*", "Fn")
+BUILTIN(__builtin_remquol, "LdLdLdi*", "Fn")
+BUILTIN(__builtin_rint , "dd", "Fnc")
+BUILTIN(__builtin_rintf, "ff", "Fnc")
+BUILTIN(__builtin_rintl, "LdLd", "Fnc")
+BUILTIN(__builtin_round, "dd" , "Fnc")
+BUILTIN(__builtin_roundf, "ff" , "Fnc")
+BUILTIN(__builtin_roundl, "LdLd" , "Fnc")
+BUILTIN(__builtin_scalbln , "ddLi", "Fnc")
+BUILTIN(__builtin_scalblnf, "ffLi", "Fnc")
+BUILTIN(__builtin_scalblnl, "LdLdLi", "Fnc")
+BUILTIN(__builtin_scalbn , "ddi", "Fnc")
+BUILTIN(__builtin_scalbnf, "ffi", "Fnc")
+BUILTIN(__builtin_scalbnl, "LdLdi", "Fnc")
+BUILTIN(__builtin_sin , "dd" , "Fnc")
+BUILTIN(__builtin_sinf, "ff" , "Fnc")
+BUILTIN(__builtin_sinh , "dd" , "Fnc")
+BUILTIN(__builtin_sinhf, "ff" , "Fnc")
+BUILTIN(__builtin_sinhl, "LdLd", "Fnc")
+BUILTIN(__builtin_sinl, "LdLd", "Fnc")
+BUILTIN(__builtin_sqrt , "dd" , "Fnc")
+BUILTIN(__builtin_sqrtf, "ff" , "Fnc")
+BUILTIN(__builtin_sqrtl, "LdLd", "Fnc")
+BUILTIN(__builtin_tan , "dd" , "Fnc")
+BUILTIN(__builtin_tanf, "ff" , "Fnc")
+BUILTIN(__builtin_tanh , "dd" , "Fnc")
+BUILTIN(__builtin_tanhf, "ff" , "Fnc")
+BUILTIN(__builtin_tanhl, "LdLd", "Fnc")
+BUILTIN(__builtin_tanl, "LdLd", "Fnc")
+BUILTIN(__builtin_tgamma , "dd", "Fnc")
+BUILTIN(__builtin_tgammaf, "ff", "Fnc")
+BUILTIN(__builtin_tgammal, "LdLd", "Fnc")
+BUILTIN(__builtin_trunc , "dd", "Fnc")
+BUILTIN(__builtin_truncf, "ff", "Fnc")
+BUILTIN(__builtin_truncl, "LdLd", "Fnc")
+
+// C99 complex builtins
+BUILTIN(__builtin_cabs, "dXd", "Fnc")
+BUILTIN(__builtin_cabsf, "fXf", "Fnc")
+BUILTIN(__builtin_cabsl, "LdXLd", "Fnc")
+BUILTIN(__builtin_cacos, "XdXd", "Fnc")
+BUILTIN(__builtin_cacosf, "XfXf", "Fnc")
+BUILTIN(__builtin_cacosh, "XdXd", "Fnc")
+BUILTIN(__builtin_cacoshf, "XfXf", "Fnc")
+BUILTIN(__builtin_cacoshl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_cacosl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_carg, "dXd", "Fnc")
+BUILTIN(__builtin_cargf, "fXf", "Fnc")
+BUILTIN(__builtin_cargl, "LdXLd", "Fnc")
+BUILTIN(__builtin_casin, "XdXd", "Fnc")
+BUILTIN(__builtin_casinf, "XfXf", "Fnc")
+BUILTIN(__builtin_casinh, "XdXd", "Fnc")
+BUILTIN(__builtin_casinhf, "XfXf", "Fnc")
+BUILTIN(__builtin_casinhl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_casinl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_catan, "XdXd", "Fnc")
+BUILTIN(__builtin_catanf, "XfXf", "Fnc")
+BUILTIN(__builtin_catanh, "XdXd", "Fnc")
+BUILTIN(__builtin_catanhf, "XfXf", "Fnc")
+BUILTIN(__builtin_catanhl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_catanl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_ccos, "XdXd", "Fnc")
+BUILTIN(__builtin_ccosf, "XfXf", "Fnc")
+BUILTIN(__builtin_ccosl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_ccosh, "XdXd", "Fnc")
+BUILTIN(__builtin_ccoshf, "XfXf", "Fnc")
+BUILTIN(__builtin_ccoshl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_cexp, "XdXd", "Fnc")
+BUILTIN(__builtin_cexpf, "XfXf", "Fnc")
+BUILTIN(__builtin_cexpl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_cimag, "dXd", "Fnc")
+BUILTIN(__builtin_cimagf, "fXf", "Fnc")
+BUILTIN(__builtin_cimagl, "LdXLd", "Fnc")
+BUILTIN(__builtin_conj, "XdXd", "Fnc")
+BUILTIN(__builtin_conjf, "XfXf", "Fnc")
+BUILTIN(__builtin_conjl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_clog, "XdXd", "Fnc")
+BUILTIN(__builtin_clogf, "XfXf", "Fnc")
+BUILTIN(__builtin_clogl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_cproj, "XdXd", "Fnc")
+BUILTIN(__builtin_cprojf, "XfXf", "Fnc")
+BUILTIN(__builtin_cprojl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_cpow, "XdXdXd", "Fnc")
+BUILTIN(__builtin_cpowf, "XfXfXf", "Fnc")
+BUILTIN(__builtin_cpowl, "XLdXLdXLd", "Fnc")
+BUILTIN(__builtin_creal, "dXd", "Fnc")
+BUILTIN(__builtin_crealf, "fXf", "Fnc")
+BUILTIN(__builtin_creall, "LdXLd", "Fnc")
+BUILTIN(__builtin_csin, "XdXd", "Fnc")
+BUILTIN(__builtin_csinf, "XfXf", "Fnc")
+BUILTIN(__builtin_csinl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_csinh, "XdXd", "Fnc")
+BUILTIN(__builtin_csinhf, "XfXf", "Fnc")
+BUILTIN(__builtin_csinhl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_csqrt, "XdXd", "Fnc")
+BUILTIN(__builtin_csqrtf, "XfXf", "Fnc")
+BUILTIN(__builtin_csqrtl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_ctan, "XdXd", "Fnc")
+BUILTIN(__builtin_ctanf, "XfXf", "Fnc")
+BUILTIN(__builtin_ctanl, "XLdXLd", "Fnc")
+BUILTIN(__builtin_ctanh, "XdXd", "Fnc")
+BUILTIN(__builtin_ctanhf, "XfXf", "Fnc")
+BUILTIN(__builtin_ctanhl, "XLdXLd", "Fnc")
+
+// FP Comparisons.
+BUILTIN(__builtin_isgreater , "i.", "nc")
+BUILTIN(__builtin_isgreaterequal, "i.", "nc")
+BUILTIN(__builtin_isless , "i.", "nc")
+BUILTIN(__builtin_islessequal , "i.", "nc")
+BUILTIN(__builtin_islessgreater , "i.", "nc")
+BUILTIN(__builtin_isunordered , "i.", "nc")
+
+// Unary FP classification
+BUILTIN(__builtin_fpclassify, "iiiii.", "nc")
+BUILTIN(__builtin_isfinite, "i.", "nc")
+BUILTIN(__builtin_isinf, "i.", "nc")
+BUILTIN(__builtin_isinf_sign, "i.", "nc")
+BUILTIN(__builtin_isnan, "i.", "nc")
+BUILTIN(__builtin_isnormal, "i.", "nc")
+
+// FP signbit builtins
+BUILTIN(__builtin_signbit, "id", "nc")
+BUILTIN(__builtin_signbitf, "if", "nc")
+BUILTIN(__builtin_signbitl, "iLd", "nc")
+
+// Builtins for arithmetic.
+BUILTIN(__builtin_clzs , "iUs" , "nc")
+BUILTIN(__builtin_clz , "iUi" , "nc")
+BUILTIN(__builtin_clzl , "iULi" , "nc")
+BUILTIN(__builtin_clzll, "iULLi", "nc")
+// TODO: int clzimax(uintmax_t)
+BUILTIN(__builtin_ctzs , "iUs" , "nc")
+BUILTIN(__builtin_ctz , "iUi" , "nc")
+BUILTIN(__builtin_ctzl , "iULi" , "nc")
+BUILTIN(__builtin_ctzll, "iULLi", "nc")
+// TODO: int ctzimax(uintmax_t)
+BUILTIN(__builtin_ffs , "iUi" , "nc")
+BUILTIN(__builtin_ffsl , "iULi" , "nc")
+BUILTIN(__builtin_ffsll, "iULLi", "nc")
+BUILTIN(__builtin_parity , "iUi" , "nc")
+BUILTIN(__builtin_parityl , "iULi" , "nc")
+BUILTIN(__builtin_parityll, "iULLi", "nc")
+BUILTIN(__builtin_popcount , "iUi" , "nc")
+BUILTIN(__builtin_popcountl , "iULi" , "nc")
+BUILTIN(__builtin_popcountll, "iULLi", "nc")
+
+// FIXME: These type signatures are not correct for targets with int != 32-bits
+// or with ULL != 64-bits.
+BUILTIN(__builtin_bswap32, "UiUi", "nc")
+BUILTIN(__builtin_bswap64, "ULLiULLi", "nc")
+
+// Random GCC builtins
+BUILTIN(__builtin_constant_p, "i.", "nct")
+BUILTIN(__builtin_classify_type, "i.", "nct")
+BUILTIN(__builtin___CFStringMakeConstantString, "FC*cC*", "nc")
+BUILTIN(__builtin___NSStringMakeConstantString, "FC*cC*", "nc")
+BUILTIN(__builtin_va_start, "vA.", "nt")
+BUILTIN(__builtin_va_end, "vA", "n")
+BUILTIN(__builtin_va_copy, "vAA", "n")
+BUILTIN(__builtin_stdarg_start, "vA.", "n")
+BUILTIN(__builtin_bcmp, "iv*v*z", "n")
+BUILTIN(__builtin_bcopy, "vv*v*z", "n")
+BUILTIN(__builtin_bzero, "vv*z", "nF")
+BUILTIN(__builtin_fprintf, "iP*cC*.", "Fp:1:")
+BUILTIN(__builtin_memchr, "v*vC*iz", "nF")
+BUILTIN(__builtin_memcmp, "ivC*vC*z", "nF")
+BUILTIN(__builtin_memcpy, "v*v*vC*z", "nF")
+BUILTIN(__builtin_memmove, "v*v*vC*z", "nF")
+BUILTIN(__builtin_mempcpy, "v*v*vC*z", "nF")
+BUILTIN(__builtin_memset, "v*v*iz", "nF")
+BUILTIN(__builtin_printf, "icC*.", "Fp:0:")
+BUILTIN(__builtin_stpcpy, "c*c*cC*", "nF")
+BUILTIN(__builtin_stpncpy, "c*c*cC*z", "nF")
+BUILTIN(__builtin_strcasecmp, "icC*cC*", "nF")
+BUILTIN(__builtin_strcat, "c*c*cC*", "nF")
+BUILTIN(__builtin_strchr, "c*cC*i", "nF")
+BUILTIN(__builtin_strcmp, "icC*cC*", "nF")
+BUILTIN(__builtin_strcpy, "c*c*cC*", "nF")
+BUILTIN(__builtin_strcspn, "zcC*cC*", "nF")
+BUILTIN(__builtin_strdup, "c*cC*", "nF")
+BUILTIN(__builtin_strlen, "zcC*", "nF")
+BUILTIN(__builtin_strncasecmp, "icC*cC*z", "nF")
+BUILTIN(__builtin_strncat, "c*c*cC*z", "nF")
+BUILTIN(__builtin_strncmp, "icC*cC*z", "nF")
+BUILTIN(__builtin_strncpy, "c*c*cC*z", "nF")
+BUILTIN(__builtin_strndup, "c*cC*z", "nF")
+BUILTIN(__builtin_strpbrk, "c*cC*cC*", "nF")
+BUILTIN(__builtin_strrchr, "c*cC*i", "nF")
+BUILTIN(__builtin_strspn, "zcC*cC*", "nF")
+BUILTIN(__builtin_strstr, "c*cC*cC*", "nF")
+BUILTIN(__builtin_return_address, "v*IUi", "n")
+BUILTIN(__builtin_extract_return_addr, "v*v*", "n")
+BUILTIN(__builtin_frame_address, "v*IUi", "n")
+BUILTIN(__builtin_flt_rounds, "i", "nc")
+BUILTIN(__builtin_setjmp, "iv**", "j")
+BUILTIN(__builtin_longjmp, "vv**i", "r")
+BUILTIN(__builtin_unwind_init, "v", "")
+BUILTIN(__builtin_eh_return_data_regno, "iIi", "nc")
+BUILTIN(__builtin_snprintf, "ic*zcC*.", "nFp:2:")
+BUILTIN(__builtin_vsprintf, "ic*cC*a", "nFP:1:")
+BUILTIN(__builtin_vsnprintf, "ic*zcC*a", "nFP:2:")
+
+// GCC exception builtins
+BUILTIN(__builtin_eh_return, "vzv*", "r") // FIXME: Takes intptr_t, not size_t!
+BUILTIN(__builtin_frob_return_addr, "v*v*", "n")
+BUILTIN(__builtin_dwarf_cfa, "v*", "n")
+BUILTIN(__builtin_init_dwarf_reg_size_table, "vv*", "n")
+BUILTIN(__builtin_dwarf_sp_column, "Ui", "n")
+BUILTIN(__builtin_extend_pointer, "ULLiv*", "n") // _Unwind_Word == uint64_t
+
+// GCC Object size checking builtins
+BUILTIN(__builtin_object_size, "zvC*i", "n")
+BUILTIN(__builtin___memcpy_chk, "v*v*vC*zz", "nF")
+BUILTIN(__builtin___memccpy_chk, "v*v*vC*iz", "nF")
+BUILTIN(__builtin___memmove_chk, "v*v*vC*zz", "nF")
+BUILTIN(__builtin___mempcpy_chk, "v*v*vC*zz", "nF")
+BUILTIN(__builtin___memset_chk, "v*v*izz", "nF")
+BUILTIN(__builtin___stpcpy_chk, "c*c*cC*z", "nF")
+BUILTIN(__builtin___strcat_chk, "c*c*cC*z", "nF")
+BUILTIN(__builtin___strcpy_chk, "c*c*cC*z", "nF")
+BUILTIN(__builtin___strlcat_chk, "c*c*cC*zz", "nF")
+BUILTIN(__builtin___strlcpy_chk, "c*c*cC*zz", "nF")
+BUILTIN(__builtin___strncat_chk, "c*c*cC*zz", "nF")
+BUILTIN(__builtin___strncpy_chk, "c*c*cC*zz", "nF")
+BUILTIN(__builtin___stpncpy_chk, "c*c*cC*zz", "nF")
+BUILTIN(__builtin___snprintf_chk, "ic*zizcC*.", "Fp:4:")
+BUILTIN(__builtin___sprintf_chk, "ic*izcC*.", "Fp:3:")
+BUILTIN(__builtin___vsnprintf_chk, "ic*zizcC*a", "FP:4:")
+BUILTIN(__builtin___vsprintf_chk, "ic*izcC*a", "FP:3:")
+BUILTIN(__builtin___fprintf_chk, "iP*icC*.", "Fp:2:")
+BUILTIN(__builtin___printf_chk, "iicC*.", "Fp:1:")
+BUILTIN(__builtin___vfprintf_chk, "iP*icC*a", "FP:2:")
+BUILTIN(__builtin___vprintf_chk, "iicC*a", "FP:1:")
+
+BUILTIN(__builtin_expect, "LiLiLi" , "nc")
+BUILTIN(__builtin_prefetch, "vvC*.", "nc")
+BUILTIN(__builtin_trap, "v", "nr")
+BUILTIN(__builtin_unreachable, "v", "nr")
+BUILTIN(__builtin_shufflevector, "v." , "nc")
+BUILTIN(__builtin_alloca, "v*z" , "n")
+
+// "Overloaded" Atomic operator builtins. These are overloaded to support data
+// types of i8, i16, i32, i64, and i128. The front-end sees calls to the
+// non-suffixed version of these (which has a bogus type) and transforms them to
+// the right overloaded version in Sema (plus casts).
+
+// FIXME: These assume that char -> i8, short -> i16, int -> i32,
+// long long -> i64.
+
+BUILTIN(__sync_fetch_and_add, "v.", "t")
+BUILTIN(__sync_fetch_and_add_1, "ccD*c.", "nt")
+BUILTIN(__sync_fetch_and_add_2, "ssD*s.", "nt")
+BUILTIN(__sync_fetch_and_add_4, "iiD*i.", "nt")
+BUILTIN(__sync_fetch_and_add_8, "LLiLLiD*LLi.", "nt")
+BUILTIN(__sync_fetch_and_add_16, "LLLiLLLiD*LLLi.", "nt")
+
+BUILTIN(__sync_fetch_and_sub, "v.", "t")
+BUILTIN(__sync_fetch_and_sub_1, "ccD*c.", "nt")
+BUILTIN(__sync_fetch_and_sub_2, "ssD*s.", "nt")
+BUILTIN(__sync_fetch_and_sub_4, "iiD*i.", "nt")
+BUILTIN(__sync_fetch_and_sub_8, "LLiLLiD*LLi.", "nt")
+BUILTIN(__sync_fetch_and_sub_16, "LLLiLLLiD*LLLi.", "nt")
+
+BUILTIN(__sync_fetch_and_or, "v.", "t")
+BUILTIN(__sync_fetch_and_or_1, "ccD*c.", "nt")
+BUILTIN(__sync_fetch_and_or_2, "ssD*s.", "nt")
+BUILTIN(__sync_fetch_and_or_4, "iiD*i.", "nt")
+BUILTIN(__sync_fetch_and_or_8, "LLiLLiD*LLi.", "nt")
+BUILTIN(__sync_fetch_and_or_16, "LLLiLLLiD*LLLi.", "nt")
+
+BUILTIN(__sync_fetch_and_and, "v.", "t")
+BUILTIN(__sync_fetch_and_and_1, "ccD*c.", "tn")
+BUILTIN(__sync_fetch_and_and_2, "ssD*s.", "tn")
+BUILTIN(__sync_fetch_and_and_4, "iiD*i.", "tn")
+BUILTIN(__sync_fetch_and_and_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_fetch_and_and_16, "LLLiLLLiD*LLLi.", "tn")
+
+BUILTIN(__sync_fetch_and_xor, "v.", "t")
+BUILTIN(__sync_fetch_and_xor_1, "ccD*c.", "tn")
+BUILTIN(__sync_fetch_and_xor_2, "ssD*s.", "tn")
+BUILTIN(__sync_fetch_and_xor_4, "iiD*i.", "tn")
+BUILTIN(__sync_fetch_and_xor_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_fetch_and_xor_16, "LLLiLLLiD*LLLi.", "tn")
+
+
+BUILTIN(__sync_add_and_fetch, "v.", "t")
+BUILTIN(__sync_add_and_fetch_1, "ccD*c.", "tn")
+BUILTIN(__sync_add_and_fetch_2, "ssD*s.", "tn")
+BUILTIN(__sync_add_and_fetch_4, "iiD*i.", "tn")
+BUILTIN(__sync_add_and_fetch_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_add_and_fetch_16, "LLLiLLLiD*LLLi.", "tn")
+
+BUILTIN(__sync_sub_and_fetch, "v.", "t")
+BUILTIN(__sync_sub_and_fetch_1, "ccD*c.", "tn")
+BUILTIN(__sync_sub_and_fetch_2, "ssD*s.", "tn")
+BUILTIN(__sync_sub_and_fetch_4, "iiD*i.", "tn")
+BUILTIN(__sync_sub_and_fetch_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_sub_and_fetch_16, "LLLiLLLiD*LLLi.", "tn")
+
+BUILTIN(__sync_or_and_fetch, "v.", "t")
+BUILTIN(__sync_or_and_fetch_1, "ccD*c.", "tn")
+BUILTIN(__sync_or_and_fetch_2, "ssD*s.", "tn")
+BUILTIN(__sync_or_and_fetch_4, "iiD*i.", "tn")
+BUILTIN(__sync_or_and_fetch_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_or_and_fetch_16, "LLLiLLLiD*LLLi.", "tn")
+
+BUILTIN(__sync_and_and_fetch, "v.", "t")
+BUILTIN(__sync_and_and_fetch_1, "ccD*c.", "tn")
+BUILTIN(__sync_and_and_fetch_2, "ssD*s.", "tn")
+BUILTIN(__sync_and_and_fetch_4, "iiD*i.", "tn")
+BUILTIN(__sync_and_and_fetch_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_and_and_fetch_16, "LLLiLLLiD*LLLi.", "tn")
+
+BUILTIN(__sync_xor_and_fetch, "v.", "t")
+BUILTIN(__sync_xor_and_fetch_1, "ccD*c.", "tn")
+BUILTIN(__sync_xor_and_fetch_2, "ssD*s.", "tn")
+BUILTIN(__sync_xor_and_fetch_4, "iiD*i.", "tn")
+BUILTIN(__sync_xor_and_fetch_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_xor_and_fetch_16, "LLLiLLLiD*LLLi.", "tn")
+
+BUILTIN(__sync_bool_compare_and_swap, "v.", "t")
+BUILTIN(__sync_bool_compare_and_swap_1, "bcD*cc.", "tn")
+BUILTIN(__sync_bool_compare_and_swap_2, "bsD*ss.", "tn")
+BUILTIN(__sync_bool_compare_and_swap_4, "biD*ii.", "tn")
+BUILTIN(__sync_bool_compare_and_swap_8, "bLLiD*LLiLLi.", "tn")
+BUILTIN(__sync_bool_compare_and_swap_16, "bLLLiD*LLLiLLLi.", "tn")
+
+BUILTIN(__sync_val_compare_and_swap, "v.", "t")
+BUILTIN(__sync_val_compare_and_swap_1, "ccD*cc.", "tn")
+BUILTIN(__sync_val_compare_and_swap_2, "ssD*ss.", "tn")
+BUILTIN(__sync_val_compare_and_swap_4, "iiD*ii.", "tn")
+BUILTIN(__sync_val_compare_and_swap_8, "LLiLLiD*LLiLLi.", "tn")
+BUILTIN(__sync_val_compare_and_swap_16, "LLLiLLLiD*LLLiLLLi.", "tn")
+
+BUILTIN(__sync_lock_test_and_set, "v.", "t")
+BUILTIN(__sync_lock_test_and_set_1, "ccD*c.", "tn")
+BUILTIN(__sync_lock_test_and_set_2, "ssD*s.", "tn")
+BUILTIN(__sync_lock_test_and_set_4, "iiD*i.", "tn")
+BUILTIN(__sync_lock_test_and_set_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_lock_test_and_set_16, "LLLiLLLiD*LLLi.", "tn")
+
+BUILTIN(__sync_lock_release, "v.", "t")
+BUILTIN(__sync_lock_release_1, "vcD*.", "tn")
+BUILTIN(__sync_lock_release_2, "vsD*.", "tn")
+BUILTIN(__sync_lock_release_4, "viD*.", "tn")
+BUILTIN(__sync_lock_release_8, "vLLiD*.", "tn")
+BUILTIN(__sync_lock_release_16, "vLLLiD*.", "tn")
+
+BUILTIN(__sync_swap, "v.", "t")
+BUILTIN(__sync_swap_1, "ccD*c.", "tn")
+BUILTIN(__sync_swap_2, "ssD*s.", "tn")
+BUILTIN(__sync_swap_4, "iiD*i.", "tn")
+BUILTIN(__sync_swap_8, "LLiLLiD*LLi.", "tn")
+BUILTIN(__sync_swap_16, "LLLiLLLiD*LLLi.", "tn")
+
+// Some of our atomics builtins are handled by AtomicExpr rather than
+// as normal builtin CallExprs. This macro is used for such builtins.
+#ifndef ATOMIC_BUILTIN
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) BUILTIN(ID, TYPE, ATTRS)
+#endif
+
+// C11 _Atomic operations for <stdatomic.h>.
+ATOMIC_BUILTIN(__c11_atomic_init, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_load, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_store, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_exchange, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_compare_exchange_strong, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_compare_exchange_weak, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_sub, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_and, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_or, "v.", "t")
+ATOMIC_BUILTIN(__c11_atomic_fetch_xor, "v.", "t")
+BUILTIN(__c11_atomic_thread_fence, "vi", "n")
+BUILTIN(__c11_atomic_signal_fence, "vi", "n")
+BUILTIN(__c11_atomic_is_lock_free, "iz", "n")
+
+// GNU atomic builtins.
+ATOMIC_BUILTIN(__atomic_load, "v.", "t")
+ATOMIC_BUILTIN(__atomic_load_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_store, "v.", "t")
+ATOMIC_BUILTIN(__atomic_store_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_exchange, "v.", "t")
+ATOMIC_BUILTIN(__atomic_exchange_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_compare_exchange, "v.", "t")
+ATOMIC_BUILTIN(__atomic_compare_exchange_n, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_add, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_sub, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_and, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_or, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_xor, "v.", "t")
+ATOMIC_BUILTIN(__atomic_fetch_nand, "v.", "t")
+ATOMIC_BUILTIN(__atomic_add_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_sub_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_and_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_or_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_xor_fetch, "v.", "t")
+ATOMIC_BUILTIN(__atomic_nand_fetch, "v.", "t")
+BUILTIN(__atomic_test_and_set, "bvD*i", "n")
+BUILTIN(__atomic_clear, "vvD*i", "n")
+BUILTIN(__atomic_thread_fence, "vi", "n")
+BUILTIN(__atomic_signal_fence, "vi", "n")
+BUILTIN(__atomic_always_lock_free, "izvCD*", "n")
+BUILTIN(__atomic_is_lock_free, "izvCD*", "n")
+
+#undef ATOMIC_BUILTIN
+
+// Non-overloaded atomic builtins.
+BUILTIN(__sync_synchronize, "v.", "n")
+// GCC does not support these, they are a Clang extension.
+BUILTIN(__sync_fetch_and_min, "iiD*i", "n")
+BUILTIN(__sync_fetch_and_max, "iiD*i", "n")
+BUILTIN(__sync_fetch_and_umin, "UiUiD*Ui", "n")
+BUILTIN(__sync_fetch_and_umax, "UiUiD*Ui", "n")
+
+// Random libc builtins.
+BUILTIN(__builtin_abort, "v", "Fnr")
+BUILTIN(__builtin_index, "c*cC*i", "Fn")
+BUILTIN(__builtin_rindex, "c*cC*i", "Fn")
+
+// Microsoft builtins.
+BUILTIN(__assume, "vb", "n")
+BUILTIN(__noop, "v.", "n")
+BUILTIN(__debugbreak, "v", "n")
+
+
+// C99 library functions
+// C99 stdlib.h
+LIBBUILTIN(abort, "v", "fr", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(calloc, "v*zz", "f", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(exit, "vi", "fr", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(_Exit, "vi", "fr", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(malloc, "v*z", "f", "stdlib.h", ALL_LANGUAGES)
+LIBBUILTIN(realloc, "v*v*z", "f", "stdlib.h", ALL_LANGUAGES)
+// C99 string.h
+LIBBUILTIN(memcpy, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memcmp, "ivC*vC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memmove, "v*v*vC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strcpy, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strncpy, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strcmp, "icC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strncmp, "icC*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strcat, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strncat, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strxfrm, "zc*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memchr, "v*vC*iz", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strchr, "c*cC*i", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strcspn, "zcC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strpbrk, "c*cC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strrchr, "c*cC*i", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strspn, "zcC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strstr, "c*cC*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strtok, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(memset, "v*v*iz", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strerror, "c*i", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strlen, "zcC*", "f", "string.h", ALL_LANGUAGES)
+// C99 stdio.h
+LIBBUILTIN(printf, "icC*.", "fp:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(fprintf, "iP*cC*.", "fp:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(snprintf, "ic*zcC*.", "fp:2:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(sprintf, "ic*cC*.", "fp:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vprintf, "icC*a", "fP:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vfprintf, "i.", "fP:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vsnprintf, "ic*zcC*a", "fP:2:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vsprintf, "ic*cC*a", "fP:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(scanf, "icC*R.", "fs:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(fscanf, "iP*RcC*R.", "fs:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(sscanf, "icC*RcC*R.", "fs:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vscanf, "icC*Ra", "fS:0:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vfscanf, "iP*RcC*Ra", "fS:1:", "stdio.h", ALL_LANGUAGES)
+LIBBUILTIN(vsscanf, "icC*RcC*Ra", "fS:1:", "stdio.h", ALL_LANGUAGES)
+// C99
+LIBBUILTIN(longjmp, "vJi", "fr", "setjmp.h", ALL_LANGUAGES)
+
+// Non-C library functions
+// FIXME: Non-C-standard stuff shouldn't be builtins in non-GNU mode!
+LIBBUILTIN(alloca, "v*z", "f", "stdlib.h", ALL_LANGUAGES)
+// POSIX string.h
+LIBBUILTIN(stpcpy, "c*c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(stpncpy, "c*c*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strdup, "c*cC*", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strndup, "c*cC*z", "f", "string.h", ALL_LANGUAGES)
+// POSIX strings.h
+LIBBUILTIN(index, "c*cC*i", "f", "strings.h", ALL_LANGUAGES)
+LIBBUILTIN(rindex, "c*cC*i", "f", "strings.h", ALL_LANGUAGES)
+LIBBUILTIN(bzero, "vv*z", "f", "strings.h", ALL_LANGUAGES)
+LIBBUILTIN(strcasecmp, "icC*cC*", "f", "strings.h", ALL_LANGUAGES)
+LIBBUILTIN(strncasecmp, "icC*cC*z", "f", "strings.h", ALL_LANGUAGES)
+// POSIX unistd.h
+LIBBUILTIN(_exit, "vi", "fr", "unistd.h", ALL_LANGUAGES)
+LIBBUILTIN(vfork, "i", "fj", "unistd.h", ALL_LANGUAGES)
+// POSIX setjmp.h
+
+// In some systems setjmp is a macro that expands to _setjmp. We undefine
+// it here to avoid having two identical LIBBUILTIN entries.
+#undef setjmp
+LIBBUILTIN(_setjmp, "iJ", "fj", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(__sigsetjmp, "iSJi", "fj", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(setjmp, "iJ", "fj", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(sigsetjmp, "iSJi", "fj", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(setjmp_syscall, "iJ", "fj", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(savectx, "iJ", "fj", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(qsetjmp, "iJ", "fj", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(getcontext, "iK*", "fj", "setjmp.h", ALL_LANGUAGES)
+
+LIBBUILTIN(_longjmp, "vJi", "fr", "setjmp.h", ALL_LANGUAGES)
+LIBBUILTIN(siglongjmp, "vSJi", "fr", "setjmp.h", ALL_LANGUAGES)
+// non-standard but very common
+LIBBUILTIN(strlcpy, "zc*cC*z", "f", "string.h", ALL_LANGUAGES)
+LIBBUILTIN(strlcat, "zc*cC*z", "f", "string.h", ALL_LANGUAGES)
+// id objc_msgSend(id, SEL, ...)
+LIBBUILTIN(objc_msgSend, "GGH.", "f", "objc/message.h", OBJC_LANG)
+
+// long double objc_msgSend_fpret(id self, SEL op, ...)
+LIBBUILTIN(objc_msgSend_fpret, "LdGH.", "f", "objc/message.h", OBJC_LANG)
+// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
+LIBBUILTIN(objc_msgSend_fp2ret, "XLdGH.", "f", "objc/message.h", OBJC_LANG)
+// id objc_msgSend_stret (id, SEL, ...)
+LIBBUILTIN(objc_msgSend_stret, "GGH.", "f", "objc/message.h", OBJC_LANG)
+// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+LIBBUILTIN(objc_msgSendSuper, "Gv*H.", "f", "objc/message.h", OBJC_LANG)
+// void objc_msgSendSuper_stret(struct objc_super *super, SEL op, ...)
+LIBBUILTIN(objc_msgSendSuper_stret, "vv*H.", "f", "objc/message.h", OBJC_LANG)
+// id objc_getClass(const char *name)
+LIBBUILTIN(objc_getClass, "GcC*", "f", "objc/runtime.h", OBJC_LANG)
+// id objc_getMetaClass(const char *name)
+LIBBUILTIN(objc_getMetaClass, "GcC*", "f", "objc/runtime.h", OBJC_LANG)
+// void objc_enumerationMutation(id)
+LIBBUILTIN(objc_enumerationMutation, "vG", "f", "objc/runtime.h", OBJC_LANG)
+
+// id objc_read_weak(id *location)
+LIBBUILTIN(objc_read_weak, "GG*", "f", "objc/objc-auto.h", OBJC_LANG)
+// id objc_assign_weak(id value, id *location)
+LIBBUILTIN(objc_assign_weak, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
+// id objc_assign_ivar(id value, id dest, ptrdiff_t offset)
+LIBBUILTIN(objc_assign_ivar, "GGGY", "f", "objc/objc-auto.h", OBJC_LANG)
+// id objc_assign_global(id val, id *dest)
+LIBBUILTIN(objc_assign_global, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
+// id objc_assign_strongCast(id val, id *dest
+LIBBUILTIN(objc_assign_strongCast, "GGG*", "f", "objc/objc-auto.h", OBJC_LANG)
+
+// id objc_exception_extract(void *localExceptionData)
+LIBBUILTIN(objc_exception_extract, "Gv*", "f", "objc/objc-exception.h", OBJC_LANG)
+// void objc_exception_try_enter(void *localExceptionData)
+LIBBUILTIN(objc_exception_try_enter, "vv*", "f", "objc/objc-exception.h", OBJC_LANG)
+// void objc_exception_try_exit(void *localExceptionData)
+LIBBUILTIN(objc_exception_try_exit, "vv*", "f", "objc/objc-exception.h", OBJC_LANG)
+// int objc_exception_match(Class exceptionClass, id exception)
+LIBBUILTIN(objc_exception_match, "iGG", "f", "objc/objc-exception.h", OBJC_LANG)
+// void objc_exception_throw(id exception)
+LIBBUILTIN(objc_exception_throw, "vG", "f", "objc/objc-exception.h", OBJC_LANG)
+
+// int objc_sync_enter(id obj)
+LIBBUILTIN(objc_sync_enter, "iG", "f", "objc/objc-sync.h", OBJC_LANG)
+// int objc_sync_exit(id obj)
+LIBBUILTIN(objc_sync_exit, "iG", "f", "objc/objc-sync.h", OBJC_LANG)
+
+BUILTIN(__builtin_objc_memmove_collectable, "v*v*vC*z", "nF")
+
+// void NSLog(NSString *fmt, ...)
+LIBBUILTIN(NSLog, "vG.", "fp:0:", "Foundation/NSObjCRuntime.h", OBJC_LANG)
+// void NSLogv(NSString *fmt, va_list args)
+LIBBUILTIN(NSLogv, "vGa", "fP:0:", "Foundation/NSObjCRuntime.h", OBJC_LANG)
+
+// Builtin math library functions
+LIBBUILTIN(pow, "ddd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(powl, "LdLdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(powf, "fff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(sqrt, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sqrtl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sqrtf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(sin, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sinl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(sinf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(cos, "dd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(cosl, "LdLd", "fe", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(cosf, "ff", "fe", "math.h", ALL_LANGUAGES)
+
+LIBBUILTIN(fma, "dddd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmal, "LdLdLdLd", "fc", "math.h", ALL_LANGUAGES)
+LIBBUILTIN(fmaf, "ffff", "fc", "math.h", ALL_LANGUAGES)
+
+// Blocks runtime Builtin math library functions
+LIBBUILTIN(_Block_object_assign, "vv*vC*iC", "f", "Blocks.h", ALL_LANGUAGES)
+LIBBUILTIN(_Block_object_dispose, "vvC*iC", "f", "Blocks.h", ALL_LANGUAGES)
+// FIXME: Also declare NSConcreteGlobalBlock and NSConcreteStackBlock.
+
+// Annotation function
+BUILTIN(__builtin_annotation, "UiUicC*", "nc")
+
+#undef BUILTIN
+#undef LIBBUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h
new file mode 100644
index 0000000..5afa020
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Builtins.h
@@ -0,0 +1,163 @@
+//===--- Builtins.h - Builtin function header -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines enum values for all the target-independent builtin
+// functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_BUILTINS_H
+#define LLVM_CLANG_BASIC_BUILTINS_H
+
+#include "clang/Basic/LLVM.h"
+#include <cstring>
+
+// VC++ defines 'alloca' as an object-like macro, which interferes with our
+// builtins.
+#undef alloca
+
+namespace clang {
+ class TargetInfo;
+ class IdentifierTable;
+ class ASTContext;
+ class QualType;
+ class LangOptions;
+
+ enum LanguageID {
+ C_LANG = 0x1, // builtin for c only.
+ CXX_LANG = 0x2, // builtin for cplusplus only.
+ OBJC_LANG = 0x4, // builtin for objective-c and objective-c++
+ ALL_LANGUAGES = (C_LANG|CXX_LANG|OBJC_LANG) //builtin is for all languages.
+ };
+
+namespace Builtin {
+enum ID {
+ NotBuiltin = 0, // This is not a builtin function.
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/Builtins.def"
+ FirstTSBuiltin
+};
+
+struct Info {
+ const char *Name, *Type, *Attributes, *HeaderName;
+ LanguageID builtin_lang;
+
+ bool operator==(const Info &RHS) const {
+ return !strcmp(Name, RHS.Name) &&
+ !strcmp(Type, RHS.Type) &&
+ !strcmp(Attributes, RHS.Attributes);
+ }
+ bool operator!=(const Info &RHS) const { return !(*this == RHS); }
+};
+
+/// Builtin::Context - This holds information about target-independent and
+/// target-specific builtins, allowing easy queries by clients.
+class Context {
+ const Info *TSRecords;
+ unsigned NumTSRecords;
+public:
+ Context();
+
+ /// \brief Perform target-specific initialization
+ void InitializeTarget(const TargetInfo &Target);
+
+ /// InitializeBuiltins - Mark the identifiers for all the builtins with their
+ /// appropriate builtin ID # and mark any non-portable builtin identifiers as
+ /// such.
+ void InitializeBuiltins(IdentifierTable &Table, const LangOptions& LangOpts);
+
+ /// \brief Popular the vector with the names of all of the builtins.
+ void GetBuiltinNames(SmallVectorImpl<const char *> &Names,
+ bool NoBuiltins);
+
+ /// Builtin::GetName - Return the identifier name for the specified builtin,
+ /// e.g. "__builtin_abs".
+ const char *GetName(unsigned ID) const {
+ return GetRecord(ID).Name;
+ }
+
+ /// GetTypeString - Get the type descriptor string for the specified builtin.
+ const char *GetTypeString(unsigned ID) const {
+ return GetRecord(ID).Type;
+ }
+
+ /// isConst - Return true if this function has no side effects and doesn't
+ /// read memory.
+ bool isConst(unsigned ID) const {
+ return strchr(GetRecord(ID).Attributes, 'c') != 0;
+ }
+
+ /// isNoThrow - Return true if we know this builtin never throws an exception.
+ bool isNoThrow(unsigned ID) const {
+ return strchr(GetRecord(ID).Attributes, 'n') != 0;
+ }
+
+ /// isNoReturn - Return true if we know this builtin never returns.
+ bool isNoReturn(unsigned ID) const {
+ return strchr(GetRecord(ID).Attributes, 'r') != 0;
+ }
+
+ /// isReturnsTwice - Return true if we know this builtin can return twice.
+ bool isReturnsTwice(unsigned ID) const {
+ return strchr(GetRecord(ID).Attributes, 'j') != 0;
+ }
+
+ /// isLibFunction - Return true if this is a builtin for a libc/libm function,
+ /// with a "__builtin_" prefix (e.g. __builtin_abs).
+ bool isLibFunction(unsigned ID) const {
+ return strchr(GetRecord(ID).Attributes, 'F') != 0;
+ }
+
+ /// \brief Determines whether this builtin is a predefined libc/libm
+ /// function, such as "malloc", where we know the signature a
+ /// priori.
+ bool isPredefinedLibFunction(unsigned ID) const {
+ return strchr(GetRecord(ID).Attributes, 'f') != 0;
+ }
+
+ /// \brief Determines whether this builtin has custom typechecking.
+ bool hasCustomTypechecking(unsigned ID) const {
+ return strchr(GetRecord(ID).Attributes, 't') != 0;
+ }
+
+ /// \brief Completely forget that the given ID was ever considered a builtin,
+ /// e.g., because the user provided a conflicting signature.
+ void ForgetBuiltin(unsigned ID, IdentifierTable &Table);
+
+ /// \brief If this is a library function that comes from a specific
+ /// header, retrieve that header name.
+ const char *getHeaderName(unsigned ID) const {
+ return GetRecord(ID).HeaderName;
+ }
+
+ /// \brief Determine whether this builtin is like printf in its
+ /// formatting rules and, if so, set the index to the format string
+ /// argument and whether this function as a va_list argument.
+ bool isPrintfLike(unsigned ID, unsigned &FormatIdx, bool &HasVAListArg);
+
+ /// \brief Determine whether this builtin is like scanf in its
+ /// formatting rules and, if so, set the index to the format string
+ /// argument and whether this function as a va_list argument.
+ bool isScanfLike(unsigned ID, unsigned &FormatIdx, bool &HasVAListArg);
+
+ /// isConstWithoutErrno - Return true if this function has no side
+ /// effects and doesn't read memory, except for possibly errno. Such
+ /// functions can be const when the MathErrno lang option is
+ /// disabled.
+ bool isConstWithoutErrno(unsigned ID) const {
+ return strchr(GetRecord(ID).Attributes, 'e') != 0;
+ }
+
+private:
+ const Info &GetRecord(unsigned ID) const;
+};
+
+}
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def
new file mode 100644
index 0000000..888e529
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsARM.def
@@ -0,0 +1,52 @@
+//===--- BuiltinsARM.def - ARM Builtin function database ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ARM-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// The format of this database matches clang/Basic/Builtins.def.
+
+// In libgcc
+BUILTIN(__clear_cache, "v.", "")
+BUILTIN(__builtin_thread_pointer, "v*", "")
+
+// Saturating arithmetic
+BUILTIN(__builtin_arm_qadd, "iii", "nc")
+BUILTIN(__builtin_arm_qsub, "iii", "nc")
+BUILTIN(__builtin_arm_ssat, "iiUi", "nc")
+BUILTIN(__builtin_arm_usat, "UiUiUi", "nc")
+
+// Store and load exclusive doubleword
+BUILTIN(__builtin_arm_ldrexd, "LLUiv*", "")
+BUILTIN(__builtin_arm_strexd, "iLLUiv*", "")
+
+// VFP
+BUILTIN(__builtin_arm_get_fpscr, "Ui", "nc")
+BUILTIN(__builtin_arm_set_fpscr, "vUi", "nc")
+BUILTIN(__builtin_arm_vcvtr_f, "ffi", "nc")
+BUILTIN(__builtin_arm_vcvtr_d, "fdi", "nc")
+
+// Coprocessor
+BUILTIN(__builtin_arm_mcr, "vUiUiUiUiUiUi", "")
+BUILTIN(__builtin_arm_mcr2, "vUiUiUiUiUiUi", "")
+BUILTIN(__builtin_arm_mrc, "UiUiUiUiUiUi", "")
+BUILTIN(__builtin_arm_mrc2, "UiUiUiUiUiUi", "")
+BUILTIN(__builtin_arm_cdp, "vUiUiUiUiUiUi", "")
+BUILTIN(__builtin_arm_cdp2, "vUiUiUiUiUiUi", "")
+BUILTIN(__builtin_arm_mcrr, "vUiUiUiUiUi", "")
+BUILTIN(__builtin_arm_mcrr2, "vUiUiUiUiUi", "")
+
+// NEON
+#define GET_NEON_BUILTINS
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_BUILTINS
+
+#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def
new file mode 100644
index 0000000..334224f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsHexagon.def
@@ -0,0 +1,689 @@
+//==--- BuiltinsHexagon.def - Hexagon Builtin function database --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the X86-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+BUILTIN(__builtin_HEXAGON_C2_cmpeq, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgt, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtu, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpeqp, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtp, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtup, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_bitsset, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_bitsclr, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpeqi, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgti, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgtui, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgei, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpgeui, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmplt, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_cmpltu, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_bitsclri, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_and, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_or, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_xor, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_andn, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_not, "bi", "")
+BUILTIN(__builtin_HEXAGON_C2_orn, "bii", "")
+BUILTIN(__builtin_HEXAGON_C2_pxfer_map, "bi", "")
+BUILTIN(__builtin_HEXAGON_C2_any8, "bi", "")
+BUILTIN(__builtin_HEXAGON_C2_all8, "bi", "")
+BUILTIN(__builtin_HEXAGON_C2_vitpack, "iii", "")
+BUILTIN(__builtin_HEXAGON_C2_mux, "iiii", "")
+BUILTIN(__builtin_HEXAGON_C2_muxii, "iiii", "")
+BUILTIN(__builtin_HEXAGON_C2_muxir, "iiii", "")
+BUILTIN(__builtin_HEXAGON_C2_muxri, "iiii", "")
+BUILTIN(__builtin_HEXAGON_C2_vmux, "LLiiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_mask, "LLii", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpbeq, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpbgtu, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpheq, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmphgt, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmphgtu, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpweq, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpwgt, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vcmpwgtu, "bLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_C2_tfrpr, "ii", "")
+BUILTIN(__builtin_HEXAGON_C2_tfrrp, "bi", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_hl_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_lh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_ll_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_hl_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_lh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_ll_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_hl_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_lh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_rnd_ll_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_hl_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_lh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_acc_ll_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_hl_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_lh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_nac_ll_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hh_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_hl_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_lh_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_ll_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hh_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_hl_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_lh_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyd_rnd_ll_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_acc_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_hl_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_lh_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s0, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_nac_ll_s1, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s0, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hh_s1, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s0, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_hl_s1, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s0, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_lh_s1, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s0, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_ll_s1, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_hl_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_lh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_acc_ll_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_hl_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_lh_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_nac_ll_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hh_s1, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_hl_s1, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_lh_s1, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyud_ll_s1, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpysmi, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_macsip, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_macsin, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_acc_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_nac_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_s0, "ULLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_acc_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyuu_nac_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpy_up, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyu_up, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_M2_dpmpyss_rnd_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyi, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mpyui, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_maci, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_acci, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_accii, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_nacci, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_naccii, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_subacc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2s_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2s_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s0pack, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2s_s1pack, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vmpy2es_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vmac2es, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrmac_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrmpy_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s0, "iLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmpyrs_s1, "iLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmacs_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmacs_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmpys_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vdmpys_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrs_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s0, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyrsc_s1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacs_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacs_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacsc_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacsc_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpys_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpys_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpysc_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpysc_s1, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cnacs_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cnacs_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cnacsc_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cnacsc_s1, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_acc_s1, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpys_s1rp, "iLLii", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_rs0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacls_rs1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_rs0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmachs_rs1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyl_rs1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyh_rs1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_hmmpyl_rs1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_hmmpyh_rs1, "iii", "")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_s1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_s1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmaculs_rs1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmacuhs_rs1, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyul_rs1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_mmpyuh_rs1, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmaci_s0c, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmacr_s0c, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_cmaci_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmacr_s0, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyi_s0c, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vrcmpyr_s0c, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyi_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_cmpyr_s0, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_i, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s0_sat_r, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_i, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmpy_s1_sat_r, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_i, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vcmac_s0_sat_r, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vcrotate, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_A2_add, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_sub, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addsat, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subsat, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addi, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_l16_sat_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_l16_sat_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_addh_h16_sat_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_subh_h16_sat_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_aslh, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_asrh, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_addp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_addpsat, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_addsp, "LLiiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_subp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_neg, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_negsat, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_abs, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_abssat, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_vconj, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_negp, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_absp, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_max, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_maxu, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_A2_min, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_minu, "Uiii", "")
+BUILTIN(__builtin_HEXAGON_A2_maxp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_maxup, "ULLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_minp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_minup, "ULLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_tfr, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_tfrsi, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_tfrp, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_tfrpi, "LLii", "")
+BUILTIN(__builtin_HEXAGON_A2_zxtb, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_sxtb, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_zxth, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_sxth, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_combinew, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_A2_combineii, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_A2_combine_hh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_combine_hl, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_combine_lh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_combine_ll, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_tfril, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_tfrih, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_and, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_or, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_xor, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_not, "ii", "")
+BUILTIN(__builtin_HEXAGON_M2_xor_xacc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_A2_subri, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_andir, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_orir, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_andp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_orp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_xorp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_notp, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_sxtw, "LLii", "")
+BUILTIN(__builtin_HEXAGON_A2_sat, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_sath, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_satuh, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_satub, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_satb, "ii", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddubs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddhs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vadduhs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vaddws, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_svavgh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svavghs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svnavgh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svaddh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svaddhs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svadduhs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svsubh, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svsubhs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_svsubuhs, "iii", "")
+BUILTIN(__builtin_HEXAGON_A2_vraddub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vraddub_acc, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vradduh, "iLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsububs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubhs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubuhs, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vsubws, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vabsh, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vabshsat, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vabsw, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vabswsat, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vabsdiffw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_M2_vabsdiffh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vrsadub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vrsadub_acc, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavguh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavgh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavgw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgwr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavgwr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgwcr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavgwcr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavghcr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavghcr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavguw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavguwr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavgubr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavguhr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vavghr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vnavghr, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxub, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminuh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxuh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vminuw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A2_vmaxuw, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_r_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_r_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_acc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_acc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_nac, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_nac, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_xacc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_xacc, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_xacc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_xacc, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_and, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_p_or, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_r_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_addasl_rrri, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_valignib, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_valignrb, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vspliceib, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsplicerb, "LLiLLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsplatrh, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsplatrb, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_insert, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_tableidxb_goodsyntax, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_tableidxh_goodsyntax, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_tableidxw_goodsyntax, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_tableidxd_goodsyntax, "iiiii", "")
+BUILTIN(__builtin_HEXAGON_S2_extractu, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S2_insertp, "LLiLLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_S2_extractup, "LLiLLiii", "")
+BUILTIN(__builtin_HEXAGON_S2_insert_rp, "iiiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_extractu_rp, "iiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_insertp_rp, "LLiLLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_extractup_rp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_tstbit_i, "bii", "")
+BUILTIN(__builtin_HEXAGON_S2_setbit_i, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_togglebit_i, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_clrbit_i, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_tstbit_r, "bii", "")
+BUILTIN(__builtin_HEXAGON_S2_setbit_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_togglebit_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_clrbit_r, "iii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_vh, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_i_svw_trun, "iLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_svw_trun, "iLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_i_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_i_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asr_r_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_asl_r_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsr_r_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_lsl_r_vw, "LLiLLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vrndpackwh, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vrndpackwhs, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsxtbh, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vzxtbh, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsathub, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_svsathub, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_svsathb, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsathb, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vtrunohb, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vtrunewh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vtrunowh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vtrunehb, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsxthw, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vzxthw, "LLii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsatwh, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsatwuh, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_packhl, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_A2_swiz, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_vsathub_nopack, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsathb_nopack, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsatwh_nopack, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_vsatwuh_nopack, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_shuffob, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_shuffeb, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_shuffoh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_shuffeh, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_parityp, "iLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_lfsp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_clbnorm, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_clb, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_cl0, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_cl1, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_clbp, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_cl0p, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_cl1p, "iLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_brev, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_ct0, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_ct1, "ii", "")
+BUILTIN(__builtin_HEXAGON_S2_interleave, "LLiLLi", "")
+BUILTIN(__builtin_HEXAGON_S2_deinterleave, "LLiLLi", "")
+
+BUILTIN(__builtin_SI_to_SXTHI_asrh, "ii", "")
+
+BUILTIN(__builtin_M2_vrcmpys_s1, "LLiLLii", "")
+BUILTIN(__builtin_M2_vrcmpys_acc_s1, "LLiLLiLLii", "")
+BUILTIN(__builtin_M2_vrcmpys_s1rp, "iLLii", "")
+
+BUILTIN(__builtin_M2_vradduh, "iLLiLLi", "")
+BUILTIN(__builtin_A2_addsp, "LLiiLLi", "")
+BUILTIN(__builtin_A2_addpsat, "LLiLLiLLi", "")
+
+BUILTIN(__builtin_A2_maxp, "LLiLLiLLi", "")
+BUILTIN(__builtin_A2_maxup, "LLiLLiLLi", "")
+
+BUILTIN(__builtin_HEXAGON_A4_orn, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_andn, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_ornp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A4_andnp, "LLiLLiLLi", "")
+BUILTIN(__builtin_HEXAGON_A4_combineir, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_A4_combineri, "LLiii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmpneqi, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmpneq, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmpltei, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmplte, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmplteui, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_cmplteu, "bii", "")
+BUILTIN(__builtin_HEXAGON_A4_rcmpneq, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_rcmpneqi, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_rcmpeq, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_rcmpeqi, "iii", "")
+BUILTIN(__builtin_HEXAGON_C4_fastcorner9, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_fastcorner9_not, "bii", "")
+BUILTIN(__builtin_HEXAGON_C4_and_andn, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_and_and, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_and_orn, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_and_or, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_or_andn, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_or_and, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_or_orn, "biii", "")
+BUILTIN(__builtin_HEXAGON_C4_or_or, "biii", "")
+BUILTIN(__builtin_HEXAGON_S4_addaddi, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S4_subaddi, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_xor_xacc, "LLiLLiLLiLLi", "")
+
+BUILTIN(__builtin_HEXAGON_M4_and_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_and_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_and_xor, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_and_andn, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_xor_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_xor_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_xor_andn, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_or_and, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_or_or, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_or_xor, "iiii", "")
+BUILTIN(__builtin_HEXAGON_M4_or_andn, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S4_or_andix, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S4_or_andi, "iiii", "")
+BUILTIN(__builtin_HEXAGON_S4_or_ori, "iiii", "")
+
+BUILTIN(__builtin_HEXAGON_A4_modwrapu, "iii", "")
+
+BUILTIN(__builtin_HEXAGON_A4_cround_ri, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_cround_rr, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_round_ri, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_round_rr, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_round_ri_sat, "iii", "")
+BUILTIN(__builtin_HEXAGON_A4_round_rr_sat, "iii", "")
+
+#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def
new file mode 100644
index 0000000..8a751e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPPC.def
@@ -0,0 +1,209 @@
+//===--- BuiltinsPPC.def - PowerPC Builtin function database ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PowerPC-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// FIXME: this needs to be the full list supported by GCC. Right now, I'm just
+// adding stuff on demand.
+
+// The format of this database matches clang/Basic/Builtins.def.
+
+// This is just a placeholder, the types and attributes are wrong.
+BUILTIN(__builtin_altivec_vaddcuw, "V4UiV4UiV4Ui", "")
+
+BUILTIN(__builtin_altivec_vaddsbs, "V16ScV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vaddubs, "V16UcV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vaddshs, "V8SsV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vadduhs, "V8UsV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vaddsws, "V4SiV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vadduws, "V4UiV4UiV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsubsbs, "V16ScV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vsububs, "V16UcV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vsubshs, "V8SsV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vsubuhs, "V8UsV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vsubsws, "V4SiV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vsubuws, "V4UiV4UiV4Ui", "")
+
+BUILTIN(__builtin_altivec_vavgsb, "V16ScV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vavgub, "V16UcV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vavgsh, "V8SsV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vavguh, "V8UsV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vavgsw, "V4SiV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vavguw, "V4UiV4UiV4Ui", "")
+
+BUILTIN(__builtin_altivec_vrfip, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vcfsx, "V4fV4ii", "")
+BUILTIN(__builtin_altivec_vcfux, "V4fV4ii", "")
+BUILTIN(__builtin_altivec_vctsxs, "V4SiV4fi", "")
+BUILTIN(__builtin_altivec_vctuxs, "V4UiV4fi", "")
+
+BUILTIN(__builtin_altivec_dss, "vUi", "")
+BUILTIN(__builtin_altivec_dssall, "v", "")
+BUILTIN(__builtin_altivec_dst, "vvC*iUi", "")
+BUILTIN(__builtin_altivec_dstt, "vvC*iUi", "")
+BUILTIN(__builtin_altivec_dstst, "vvC*iUi", "")
+BUILTIN(__builtin_altivec_dststt, "vvC*iUi", "")
+
+BUILTIN(__builtin_altivec_vexptefp, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vrfim, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_lvx, "V4iivC*", "")
+BUILTIN(__builtin_altivec_lvxl, "V4iivC*", "")
+BUILTIN(__builtin_altivec_lvebx, "V16civC*", "")
+BUILTIN(__builtin_altivec_lvehx, "V8sivC*", "")
+BUILTIN(__builtin_altivec_lvewx, "V4iivC*", "")
+
+BUILTIN(__builtin_altivec_vlogefp, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_lvsl, "V16cUcvC*", "")
+BUILTIN(__builtin_altivec_lvsr, "V16cUcvC*", "")
+
+BUILTIN(__builtin_altivec_vmaddfp, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_altivec_vmhaddshs, "V8sV8sV8sV8s", "")
+BUILTIN(__builtin_altivec_vmhraddshs, "V8sV8sV8sV8s", "")
+
+BUILTIN(__builtin_altivec_vmsumubm, "V4UiV16UcV16UcV4Ui", "")
+BUILTIN(__builtin_altivec_vmsummbm, "V4SiV16ScV16UcV4Si", "")
+BUILTIN(__builtin_altivec_vmsumuhm, "V4UiV8UsV8UsV4Ui", "")
+BUILTIN(__builtin_altivec_vmsumshm, "V4SiV8SsV8SsV4Si", "")
+BUILTIN(__builtin_altivec_vmsumuhs, "V4UiV8UsV8UsV4Ui", "")
+BUILTIN(__builtin_altivec_vmsumshs, "V4SiV8SsV8SsV4Si", "")
+
+BUILTIN(__builtin_altivec_vmuleub, "V8UsV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vmulesb, "V8SsV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vmuleuh, "V4UiV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vmulesh, "V4SiV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vmuloub, "V8UsV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vmulosb, "V8SsV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vmulouh, "V4UiV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vmulosh, "V4SiV8SsV8Ss", "")
+
+BUILTIN(__builtin_altivec_vnmsubfp, "V4fV4fV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vpkpx, "V8sV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vpkuhus, "V16UcV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vpkshss, "V16ScV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vpkuwus, "V8UsV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vpkswss, "V8SsV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vpkshus, "V16UcV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vpkswus, "V8UsV4SiV4Si", "")
+
+BUILTIN(__builtin_altivec_vperm_4si, "V4iV4iV4iV16Uc", "")
+
+BUILTIN(__builtin_altivec_stvx, "vV4iiv*", "")
+BUILTIN(__builtin_altivec_stvxl, "vV4iiv*", "")
+BUILTIN(__builtin_altivec_stvebx, "vV16civ*", "")
+BUILTIN(__builtin_altivec_stvehx, "vV8siv*", "")
+BUILTIN(__builtin_altivec_stvewx, "vV4iiv*", "")
+
+BUILTIN(__builtin_altivec_vcmpbfp, "V4iV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vcmpgefp, "V4iV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vcmpequb, "V16cV16cV16c", "")
+BUILTIN(__builtin_altivec_vcmpequh, "V8sV8sV8s", "")
+BUILTIN(__builtin_altivec_vcmpequw, "V4iV4iV4i", "")
+BUILTIN(__builtin_altivec_vcmpeqfp, "V4iV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vcmpgtsb, "V16cV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vcmpgtub, "V16cV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vcmpgtsh, "V8sV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vcmpgtuh, "V8sV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vcmpgtsw, "V4iV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vcmpgtuw, "V4iV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vcmpgtfp, "V4iV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vmaxsb, "V16ScV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vmaxub, "V16UcV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vmaxsh, "V8SsV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vmaxuh, "V8UsV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vmaxsw, "V4SiV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vmaxuw, "V4UiV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vmaxfp, "V4fV4fV4f", "")
+
+BUILTIN(__builtin_altivec_mfvscr, "V8Us", "")
+
+BUILTIN(__builtin_altivec_vminsb, "V16ScV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vminub, "V16UcV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vminsh, "V8SsV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vminuh, "V8UsV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vminsw, "V4SiV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vminuw, "V4UiV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vminfp, "V4fV4fV4f", "")
+
+BUILTIN(__builtin_altivec_mtvscr, "vV4i", "")
+
+BUILTIN(__builtin_altivec_vrefp, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vrlb, "V16cV16cV16Uc", "")
+BUILTIN(__builtin_altivec_vrlh, "V8sV8sV8Us", "")
+BUILTIN(__builtin_altivec_vrlw, "V4iV4iV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsel_4si, "V4iV4iV4iV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsl, "V4iV4iV4i", "")
+BUILTIN(__builtin_altivec_vslo, "V4iV4iV4i", "")
+
+BUILTIN(__builtin_altivec_vsrab, "V16cV16cV16Uc", "")
+BUILTIN(__builtin_altivec_vsrah, "V8sV8sV8Us", "")
+BUILTIN(__builtin_altivec_vsraw, "V4iV4iV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsr, "V4iV4iV4i", "")
+BUILTIN(__builtin_altivec_vsro, "V4iV4iV4i", "")
+
+BUILTIN(__builtin_altivec_vrfin, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vrsqrtefp, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vsubcuw, "V4UiV4UiV4Ui", "")
+
+BUILTIN(__builtin_altivec_vsum4sbs, "V4SiV16ScV4Si", "")
+BUILTIN(__builtin_altivec_vsum4ubs, "V4UiV16UcV4Ui", "")
+BUILTIN(__builtin_altivec_vsum4shs, "V4SiV8SsV4Si", "")
+
+BUILTIN(__builtin_altivec_vsum2sws, "V4SiV4SiV4Si", "")
+
+BUILTIN(__builtin_altivec_vsumsws, "V4SiV4SiV4Si", "")
+
+BUILTIN(__builtin_altivec_vrfiz, "V4fV4f", "")
+
+BUILTIN(__builtin_altivec_vupkhsb, "V8sV16c", "")
+BUILTIN(__builtin_altivec_vupkhpx, "V4UiV8s", "")
+BUILTIN(__builtin_altivec_vupkhsh, "V4iV8s", "")
+
+BUILTIN(__builtin_altivec_vupklsb, "V8sV16c", "")
+BUILTIN(__builtin_altivec_vupklpx, "V4UiV8s", "")
+BUILTIN(__builtin_altivec_vupklsh, "V4iV8s", "")
+
+BUILTIN(__builtin_altivec_vcmpbfp_p, "iiV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vcmpgefp_p, "iiV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vcmpequb_p, "iiV16cV16c", "")
+BUILTIN(__builtin_altivec_vcmpequh_p, "iiV8sV8s", "")
+BUILTIN(__builtin_altivec_vcmpequw_p, "iiV4iV4i", "")
+BUILTIN(__builtin_altivec_vcmpeqfp_p, "iiV4fV4f", "")
+
+BUILTIN(__builtin_altivec_vcmpgtsb_p, "iiV16ScV16Sc", "")
+BUILTIN(__builtin_altivec_vcmpgtub_p, "iiV16UcV16Uc", "")
+BUILTIN(__builtin_altivec_vcmpgtsh_p, "iiV8SsV8Ss", "")
+BUILTIN(__builtin_altivec_vcmpgtuh_p, "iiV8UsV8Us", "")
+BUILTIN(__builtin_altivec_vcmpgtsw_p, "iiV4SiV4Si", "")
+BUILTIN(__builtin_altivec_vcmpgtuw_p, "iiV4UiV4Ui", "")
+BUILTIN(__builtin_altivec_vcmpgtfp_p, "iiV4fV4f", "")
+
+// FIXME: Obviously incomplete.
+
+#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPTX.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPTX.def
new file mode 100644
index 0000000..f90a43f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsPTX.def
@@ -0,0 +1,62 @@
+//===--- BuiltinsPTX.def - PTX Builtin function database ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PTX-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// The format of this database matches clang/Basic/Builtins.def.
+
+BUILTIN(__builtin_ptx_read_tid_x, "i", "nc")
+BUILTIN(__builtin_ptx_read_tid_y, "i", "nc")
+BUILTIN(__builtin_ptx_read_tid_z, "i", "nc")
+BUILTIN(__builtin_ptx_read_tid_w, "i", "nc")
+
+BUILTIN(__builtin_ptx_read_ntid_x, "i", "nc")
+BUILTIN(__builtin_ptx_read_ntid_y, "i", "nc")
+BUILTIN(__builtin_ptx_read_ntid_z, "i", "nc")
+BUILTIN(__builtin_ptx_read_ntid_w, "i", "nc")
+
+BUILTIN(__builtin_ptx_read_ctaid_x, "i", "nc")
+BUILTIN(__builtin_ptx_read_ctaid_y, "i", "nc")
+BUILTIN(__builtin_ptx_read_ctaid_z, "i", "nc")
+BUILTIN(__builtin_ptx_read_ctaid_w, "i", "nc")
+
+BUILTIN(__builtin_ptx_read_nctaid_x, "i", "nc")
+BUILTIN(__builtin_ptx_read_nctaid_y, "i", "nc")
+BUILTIN(__builtin_ptx_read_nctaid_z, "i", "nc")
+BUILTIN(__builtin_ptx_read_nctaid_w, "i", "nc")
+
+BUILTIN(__builtin_ptx_read_laneid, "i", "nc")
+BUILTIN(__builtin_ptx_read_warpid, "i", "nc")
+BUILTIN(__builtin_ptx_read_nwarpid, "i", "nc")
+
+BUILTIN(__builtin_ptx_read_smid, "i", "nc")
+BUILTIN(__builtin_ptx_read_nsmid, "i", "nc")
+BUILTIN(__builtin_ptx_read_gridid, "i", "nc")
+
+BUILTIN(__builtin_ptx_read_lanemask_eq, "i", "nc")
+BUILTIN(__builtin_ptx_read_lanemask_le, "i", "nc")
+BUILTIN(__builtin_ptx_read_lanemask_lt, "i", "nc")
+BUILTIN(__builtin_ptx_read_lanemask_ge, "i", "nc")
+BUILTIN(__builtin_ptx_read_lanemask_gt, "i", "nc")
+
+BUILTIN(__builtin_ptx_read_clock, "i", "n")
+BUILTIN(__builtin_ptx_read_clock64, "Li", "n")
+
+BUILTIN(__builtin_ptx_read_pm0, "i", "n")
+BUILTIN(__builtin_ptx_read_pm1, "i", "n")
+BUILTIN(__builtin_ptx_read_pm2, "i", "n")
+BUILTIN(__builtin_ptx_read_pm3, "i", "n")
+
+BUILTIN(__builtin_ptx_bar_sync, "vi", "n")
+
+
+#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
new file mode 100644
index 0000000..f44aed6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/BuiltinsX86.def
@@ -0,0 +1,633 @@
+//===--- BuiltinsX86.def - X86 Builtin function database --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the X86-specific builtin function database. Users of
+// this file must define the BUILTIN macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+// The format of this database matches clang/Basic/Builtins.def.
+
+// FIXME: In GCC, these builtins are defined depending on whether support for
+// MMX/SSE/etc is turned on. We should do this too.
+
+// FIXME: Ideally we would be able to pull this information from what
+// LLVM already knows about X86 builtins. We need to match the LLVM
+// definition anyway, since code generation will lower to the
+// intrinsic if one exists.
+
+// FIXME: Are these nothrow/const?
+
+// 3DNow!
+//
+BUILTIN(__builtin_ia32_femms, "v", "")
+BUILTIN(__builtin_ia32_pavgusb, "V8cV8cV8c", "nc")
+BUILTIN(__builtin_ia32_pf2id, "V2iV2f", "nc")
+BUILTIN(__builtin_ia32_pfacc, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfadd, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfcmpeq, "V2iV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfcmpge, "V2iV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfcmpgt, "V2iV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfmax, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfmin, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfmul, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfrcp, "V2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfrcpit1, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfrcpit2, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfrsqrt, "V2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfrsqit1, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfsub, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfsubr, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pi2fd, "V2fV2i", "nc")
+BUILTIN(__builtin_ia32_pmulhrw, "V4sV4sV4s", "nc")
+// 3DNow! Extensions (3dnowa).
+BUILTIN(__builtin_ia32_pf2iw, "V2iV2f", "nc")
+BUILTIN(__builtin_ia32_pfnacc, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pfpnacc, "V2fV2fV2f", "nc")
+BUILTIN(__builtin_ia32_pi2fw, "V2fV2i", "nc")
+BUILTIN(__builtin_ia32_pswapdsf, "V2fV2f", "nc")
+BUILTIN(__builtin_ia32_pswapdsi, "V2iV2i", "nc")
+
+// MMX
+//
+// All MMX instructions will be generated via builtins. Any MMX vector
+// types (<1 x i64>, <2 x i32>, etc.) that aren't used by these builtins will be
+// expanded by the back-end.
+BUILTIN(__builtin_ia32_emms, "v", "")
+BUILTIN(__builtin_ia32_paddb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_paddw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_paddd, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_paddsb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_paddsw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_paddusb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_paddusw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_psubb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_psubw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_psubd, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_psubsb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_psubsw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_psubusb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_psubusw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pmulhw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pmullw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pmaddwd, "V2iV4sV4s", "")
+BUILTIN(__builtin_ia32_pand, "V1LLiV1LLiV1LLi", "")
+BUILTIN(__builtin_ia32_pandn, "V1LLiV1LLiV1LLi", "")
+BUILTIN(__builtin_ia32_por, "V1LLiV1LLiV1LLi", "")
+BUILTIN(__builtin_ia32_pxor, "V1LLiV1LLiV1LLi", "")
+BUILTIN(__builtin_ia32_psllw, "V4sV4sV1LLi", "")
+BUILTIN(__builtin_ia32_pslld, "V2iV2iV1LLi", "")
+BUILTIN(__builtin_ia32_psllq, "V1LLiV1LLiV1LLi", "")
+BUILTIN(__builtin_ia32_psrlw, "V4sV4sV1LLi", "")
+BUILTIN(__builtin_ia32_psrld, "V2iV2iV1LLi", "")
+BUILTIN(__builtin_ia32_psrlq, "V1LLiV1LLiV1LLi", "")
+BUILTIN(__builtin_ia32_psraw, "V4sV4sV1LLi", "")
+BUILTIN(__builtin_ia32_psrad, "V2iV2iV1LLi", "")
+BUILTIN(__builtin_ia32_psllwi, "V4sV4si", "")
+BUILTIN(__builtin_ia32_pslldi, "V2iV2ii", "")
+BUILTIN(__builtin_ia32_psllqi, "V1LLiV1LLii", "")
+BUILTIN(__builtin_ia32_psrlwi, "V4sV4si", "")
+BUILTIN(__builtin_ia32_psrldi, "V2iV2ii", "")
+BUILTIN(__builtin_ia32_psrlqi, "V1LLiV1LLii", "")
+BUILTIN(__builtin_ia32_psrawi, "V4sV4si", "")
+BUILTIN(__builtin_ia32_psradi, "V2iV2ii", "")
+BUILTIN(__builtin_ia32_packsswb, "V8cV4sV4s", "")
+BUILTIN(__builtin_ia32_packssdw, "V4sV2iV2i", "")
+BUILTIN(__builtin_ia32_packuswb, "V8cV4sV4s", "")
+BUILTIN(__builtin_ia32_punpckhbw, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_punpckhwd, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_punpckhdq, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_punpcklbw, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_punpcklwd, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_punpckldq, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_pcmpeqb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_pcmpeqw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pcmpeqd, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_pcmpgtb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_pcmpgtw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pcmpgtd, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_maskmovq, "vV8cV8cc*", "")
+BUILTIN(__builtin_ia32_movntq, "vV1LLi*V1LLi", "")
+BUILTIN(__builtin_ia32_vec_init_v2si, "V2iii", "")
+BUILTIN(__builtin_ia32_vec_init_v4hi, "V4sssss", "")
+BUILTIN(__builtin_ia32_vec_init_v8qi, "V8ccccccccc", "")
+BUILTIN(__builtin_ia32_vec_ext_v2si, "iV2ii", "")
+
+// MMX2 (MMX+SSE) intrinsics
+BUILTIN(__builtin_ia32_cvtpi2ps, "V4fV4fV2i", "")
+BUILTIN(__builtin_ia32_cvtps2pi, "V2iV4f", "")
+BUILTIN(__builtin_ia32_cvttps2pi, "V2iV4f", "")
+BUILTIN(__builtin_ia32_pavgb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_pavgw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pmaxsw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pmaxub, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_pminsw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pminub, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_pmovmskb, "iV8c", "")
+BUILTIN(__builtin_ia32_pmulhuw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_psadbw, "V4sV8cV8c", "")
+BUILTIN(__builtin_ia32_pshufw, "V4sV4sIc", "")
+
+// MMX+SSE2
+BUILTIN(__builtin_ia32_cvtpd2pi, "V2iV2d", "")
+BUILTIN(__builtin_ia32_cvtpi2pd, "V2dV2i", "")
+BUILTIN(__builtin_ia32_cvttpd2pi, "V2iV2d", "")
+BUILTIN(__builtin_ia32_paddq, "V1LLiV1LLiV1LLi", "")
+BUILTIN(__builtin_ia32_pmuludq, "V1LLiV2iV2i", "")
+BUILTIN(__builtin_ia32_psubq, "V1LLiV1LLiV1LLi", "")
+
+// MMX+SSSE3
+BUILTIN(__builtin_ia32_pabsb, "V8cV8c", "")
+BUILTIN(__builtin_ia32_pabsd, "V2iV2i", "")
+BUILTIN(__builtin_ia32_pabsw, "V4sV4s", "")
+BUILTIN(__builtin_ia32_palignr, "V8cV8cV8cIc", "")
+BUILTIN(__builtin_ia32_phaddd, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_phaddsw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_phaddw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_phsubd, "V2iV2iV2i", "")
+BUILTIN(__builtin_ia32_phsubsw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_phsubw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pmaddubsw, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_pmulhrsw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_pshufb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_psignw, "V4sV4sV4s", "")
+BUILTIN(__builtin_ia32_psignb, "V8cV8cV8c", "")
+BUILTIN(__builtin_ia32_psignd, "V2iV2iV2i", "")
+
+// SSE intrinsics.
+BUILTIN(__builtin_ia32_comieq, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_comilt, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_comile, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_comigt, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_comige, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_comineq, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_ucomieq, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_ucomilt, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_ucomile, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_ucomigt, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_ucomige, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_ucomineq, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_comisdeq, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_comisdlt, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_comisdle, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_comisdgt, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_comisdge, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_comisdneq, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_ucomisdeq, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_ucomisdlt, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_ucomisdle, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_ucomisdgt, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_ucomisdge, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_ucomisdneq, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_cmpps, "V4fV4fV4fIc", "")
+BUILTIN(__builtin_ia32_cmpss, "V4fV4fV4fIc", "")
+BUILTIN(__builtin_ia32_minps, "V4fV4fV4f", "")
+BUILTIN(__builtin_ia32_maxps, "V4fV4fV4f", "")
+BUILTIN(__builtin_ia32_minss, "V4fV4fV4f", "")
+BUILTIN(__builtin_ia32_maxss, "V4fV4fV4f", "")
+BUILTIN(__builtin_ia32_cmppd, "V2dV2dV2dIc", "")
+BUILTIN(__builtin_ia32_cmpsd, "V2dV2dV2dIc", "")
+BUILTIN(__builtin_ia32_minpd, "V2dV2dV2d", "")
+BUILTIN(__builtin_ia32_maxpd, "V2dV2dV2d", "")
+BUILTIN(__builtin_ia32_minsd, "V2dV2dV2d", "")
+BUILTIN(__builtin_ia32_maxsd, "V2dV2dV2d", "")
+BUILTIN(__builtin_ia32_paddsb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_paddsw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_psubsb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_psubsw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_paddusb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_paddusw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_psubusb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_psubusw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pmulhw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pavgb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_pavgw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pmaxub128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_pmaxsw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pminub128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_pminsw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_packsswb128, "V16cV8sV8s", "")
+BUILTIN(__builtin_ia32_packssdw128, "V8sV4iV4i", "")
+BUILTIN(__builtin_ia32_packuswb128, "V16cV8sV8s", "")
+BUILTIN(__builtin_ia32_pmulhuw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_addsubps, "V4fV4fV4f", "")
+BUILTIN(__builtin_ia32_addsubpd, "V2dV2dV2d", "")
+BUILTIN(__builtin_ia32_haddps, "V4fV4fV4f", "")
+BUILTIN(__builtin_ia32_haddpd, "V2dV2dV2d", "")
+BUILTIN(__builtin_ia32_hsubps, "V4fV4fV4f", "")
+BUILTIN(__builtin_ia32_hsubpd, "V2dV2dV2d", "")
+BUILTIN(__builtin_ia32_phaddw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_phaddd128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_phaddsw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_phsubw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_phsubd128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_phsubsw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pmaddubsw128, "V8sV16cV16c", "")
+BUILTIN(__builtin_ia32_pmulhrsw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pshufb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_psignb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_psignw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_psignd128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_pabsb128, "V16cV16c", "")
+BUILTIN(__builtin_ia32_pabsw128, "V8sV8s", "")
+BUILTIN(__builtin_ia32_pabsd128, "V4iV4i", "")
+BUILTIN(__builtin_ia32_ldmxcsr, "vUi", "")
+BUILTIN(__builtin_ia32_stmxcsr, "Ui", "")
+BUILTIN(__builtin_ia32_cvtss2si, "iV4f", "")
+BUILTIN(__builtin_ia32_cvtss2si64, "LLiV4f", "")
+BUILTIN(__builtin_ia32_storeups, "vf*V4f", "")
+BUILTIN(__builtin_ia32_storehps, "vV2i*V4f", "")
+BUILTIN(__builtin_ia32_storelps, "vV2i*V4f", "")
+BUILTIN(__builtin_ia32_movmskps, "iV4f", "")
+BUILTIN(__builtin_ia32_movntps, "vf*V4f", "")
+BUILTIN(__builtin_ia32_sfence, "v", "")
+BUILTIN(__builtin_ia32_rcpps, "V4fV4f", "")
+BUILTIN(__builtin_ia32_rcpss, "V4fV4f", "")
+BUILTIN(__builtin_ia32_rsqrtps, "V4fV4f", "")
+BUILTIN(__builtin_ia32_rsqrtss, "V4fV4f", "")
+BUILTIN(__builtin_ia32_sqrtps, "V4fV4f", "")
+BUILTIN(__builtin_ia32_sqrtss, "V4fV4f", "")
+BUILTIN(__builtin_ia32_maskmovdqu, "vV16cV16cc*", "")
+BUILTIN(__builtin_ia32_storeupd, "vd*V2d", "")
+BUILTIN(__builtin_ia32_movmskpd, "iV2d", "")
+BUILTIN(__builtin_ia32_pmovmskb128, "iV16c", "")
+BUILTIN(__builtin_ia32_movnti, "vi*i", "")
+BUILTIN(__builtin_ia32_movntpd, "vd*V2d", "")
+BUILTIN(__builtin_ia32_movntdq, "vV2LLi*V2LLi", "")
+BUILTIN(__builtin_ia32_psadbw128, "V2LLiV16cV16c", "")
+BUILTIN(__builtin_ia32_sqrtpd, "V2dV2d", "")
+BUILTIN(__builtin_ia32_sqrtsd, "V2dV2d", "")
+BUILTIN(__builtin_ia32_cvtdq2pd, "V2dV4i", "")
+BUILTIN(__builtin_ia32_cvtdq2ps, "V4fV4i", "")
+BUILTIN(__builtin_ia32_cvtpd2dq, "V2LLiV2d", "")
+BUILTIN(__builtin_ia32_cvtpd2ps, "V4fV2d", "")
+BUILTIN(__builtin_ia32_cvttpd2dq, "V4iV2d", "")
+BUILTIN(__builtin_ia32_cvtsd2si, "iV2d", "")
+BUILTIN(__builtin_ia32_cvtsd2si64, "LLiV2d", "")
+BUILTIN(__builtin_ia32_cvtps2dq, "V4iV4f", "")
+BUILTIN(__builtin_ia32_cvtps2pd, "V2dV4f", "")
+BUILTIN(__builtin_ia32_cvttps2dq, "V4iV4f", "")
+BUILTIN(__builtin_ia32_clflush, "vvC*", "")
+BUILTIN(__builtin_ia32_lfence, "v", "")
+BUILTIN(__builtin_ia32_mfence, "v", "")
+BUILTIN(__builtin_ia32_storedqu, "vc*V16c", "")
+BUILTIN(__builtin_ia32_pmuludq128, "V2LLiV4iV4i", "")
+BUILTIN(__builtin_ia32_psraw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_psrad128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_psrlw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_psrld128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_pslldqi128, "V2LLiV2LLiIi", "")
+BUILTIN(__builtin_ia32_psrldqi128, "V2LLiV2LLiIi", "")
+BUILTIN(__builtin_ia32_psrlq128, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_psllw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pslld128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_psllq128, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_psllwi128, "V8sV8si", "")
+BUILTIN(__builtin_ia32_pslldi128, "V4iV4ii", "")
+BUILTIN(__builtin_ia32_psllqi128, "V2LLiV2LLii", "")
+BUILTIN(__builtin_ia32_psrlwi128, "V8sV8si", "")
+BUILTIN(__builtin_ia32_psrldi128, "V4iV4ii", "")
+BUILTIN(__builtin_ia32_psrlqi128, "V2LLiV2LLii", "")
+BUILTIN(__builtin_ia32_psrawi128, "V8sV8si", "")
+BUILTIN(__builtin_ia32_psradi128, "V4iV4ii", "")
+BUILTIN(__builtin_ia32_pmaddwd128, "V4iV8sV8s", "")
+BUILTIN(__builtin_ia32_monitor, "vv*UiUi", "")
+BUILTIN(__builtin_ia32_mwait, "vUiUi", "")
+BUILTIN(__builtin_ia32_lddqu, "V16ccC*", "")
+BUILTIN(__builtin_ia32_palignr128, "V16cV16cV16cIc", "")
+BUILTIN(__builtin_ia32_insertps128, "V4fV4fV4fi", "")
+
+BUILTIN(__builtin_ia32_storelv4si, "vV2i*V2LLi", "")
+
+BUILTIN(__builtin_ia32_pblendvb128, "V16cV16cV16cV16c", "")
+BUILTIN(__builtin_ia32_pblendw128, "V8sV8sV8sIi", "")
+BUILTIN(__builtin_ia32_blendpd, "V2dV2dV2dIi", "")
+BUILTIN(__builtin_ia32_blendps, "V4fV4fV4fIi", "")
+BUILTIN(__builtin_ia32_blendvpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_blendvps, "V4fV4fV4fV4f", "")
+
+BUILTIN(__builtin_ia32_packusdw128, "V8sV4iV4i", "")
+BUILTIN(__builtin_ia32_pmaxsb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_pmaxsd128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_pmaxud128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_pmaxuw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pminsb128, "V16cV16cV16c", "")
+BUILTIN(__builtin_ia32_pminsd128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_pminud128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_pminuw128, "V8sV8sV8s", "")
+BUILTIN(__builtin_ia32_pmovsxbd128, "V4iV16c", "")
+BUILTIN(__builtin_ia32_pmovsxbq128, "V2LLiV16c", "")
+BUILTIN(__builtin_ia32_pmovsxbw128, "V8sV16c", "")
+BUILTIN(__builtin_ia32_pmovsxdq128, "V2LLiV4i", "")
+BUILTIN(__builtin_ia32_pmovsxwd128, "V4iV8s", "")
+BUILTIN(__builtin_ia32_pmovsxwq128, "V2LLiV8s", "")
+BUILTIN(__builtin_ia32_pmovzxbd128, "V4iV16c", "")
+BUILTIN(__builtin_ia32_pmovzxbq128, "V2LLiV16c", "")
+BUILTIN(__builtin_ia32_pmovzxbw128, "V8sV16c", "")
+BUILTIN(__builtin_ia32_pmovzxdq128, "V2LLiV4i", "")
+BUILTIN(__builtin_ia32_pmovzxwd128, "V4iV8s", "")
+BUILTIN(__builtin_ia32_pmovzxwq128, "V2LLiV8s", "")
+BUILTIN(__builtin_ia32_pmuldq128, "V2LLiV4iV4i", "")
+BUILTIN(__builtin_ia32_pmulld128, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_roundps, "V4fV4fi", "")
+BUILTIN(__builtin_ia32_roundss, "V4fV4fV4fi", "")
+BUILTIN(__builtin_ia32_roundsd, "V2dV2dV2di", "")
+BUILTIN(__builtin_ia32_roundpd, "V2dV2di", "")
+BUILTIN(__builtin_ia32_dpps, "V4fV4fV4fi", "")
+BUILTIN(__builtin_ia32_dppd, "V2dV2dV2di", "")
+BUILTIN(__builtin_ia32_movntdqa, "V2LLiV2LLi*", "")
+BUILTIN(__builtin_ia32_ptestz128, "iV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_ptestc128, "iV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_ptestnzc128, "iV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_mpsadbw128, "V16cV16cV16ci", "")
+BUILTIN(__builtin_ia32_phminposuw128, "V8sV8s", "")
+
+// SSE 4.2
+BUILTIN(__builtin_ia32_pcmpistrm128, "V16cV16cV16cIc", "")
+BUILTIN(__builtin_ia32_pcmpistri128, "iV16cV16cIc", "")
+BUILTIN(__builtin_ia32_pcmpestrm128, "V16cV16ciV16ciIc", "")
+BUILTIN(__builtin_ia32_pcmpestri128, "iV16ciV16ciIc","")
+
+// FIXME: These builtins are horribly broken; reenable when PR11305 is fixed.
+//BUILTIN(__builtin_ia32_pcmpistria128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpistric128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpistrio128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpistris128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpistriz128, "iV16cV16cIc","")
+//BUILTIN(__builtin_ia32_pcmpestria128, "iV16ciV16ciIc","")
+//BUILTIN(__builtin_ia32_pcmpestric128, "iV16ciV16ciIc","")
+//BUILTIN(__builtin_ia32_pcmpestrio128, "iV16ciV16ciic","")
+//BUILTIN(__builtin_ia32_pcmpestris128, "iV16ciV16ciIc","")
+//BUILTIN(__builtin_ia32_pcmpestriz128, "iV16ciV16ciIc","")
+
+BUILTIN(__builtin_ia32_crc32qi, "UiUiUc", "")
+BUILTIN(__builtin_ia32_crc32hi, "UiUiUs", "")
+BUILTIN(__builtin_ia32_crc32si, "UiUiUi", "")
+BUILTIN(__builtin_ia32_crc32di, "ULLiULLiULLi", "")
+
+// AES
+BUILTIN(__builtin_ia32_aesenc128, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_aesenclast128, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_aesdec128, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_aesdeclast128, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_aesimc128, "V2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_aeskeygenassist128, "V2LLiV2LLiIc", "")
+
+// AVX
+BUILTIN(__builtin_ia32_addsubpd256, "V4dV4dV4d", "")
+BUILTIN(__builtin_ia32_addsubps256, "V8fV8fV8f", "")
+BUILTIN(__builtin_ia32_haddpd256, "V4dV4dV4d", "")
+BUILTIN(__builtin_ia32_hsubps256, "V8fV8fV8f", "")
+BUILTIN(__builtin_ia32_hsubpd256, "V4dV4dV4d", "")
+BUILTIN(__builtin_ia32_haddps256, "V8fV8fV8f", "")
+BUILTIN(__builtin_ia32_maxpd256, "V4dV4dV4d", "")
+BUILTIN(__builtin_ia32_maxps256, "V8fV8fV8f", "")
+BUILTIN(__builtin_ia32_minpd256, "V4dV4dV4d", "")
+BUILTIN(__builtin_ia32_minps256, "V8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vpermilvarpd, "V2dV2dV2LLi", "")
+BUILTIN(__builtin_ia32_vpermilvarps, "V4fV4fV4i", "")
+BUILTIN(__builtin_ia32_vpermilvarpd256, "V4dV4dV4LLi", "")
+BUILTIN(__builtin_ia32_vpermilvarps256, "V8fV8fV8i", "")
+BUILTIN(__builtin_ia32_blendpd256, "V4dV4dV4dIi", "")
+BUILTIN(__builtin_ia32_blendps256, "V8fV8fV8fIi", "")
+BUILTIN(__builtin_ia32_blendvpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_blendvps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_dpps256, "V8fV8fV8fIi", "")
+BUILTIN(__builtin_ia32_cmppd256, "V4dV4dV4dc", "")
+BUILTIN(__builtin_ia32_cmpps256, "V8fV8fV8fc", "")
+BUILTIN(__builtin_ia32_vextractf128_pd256, "V2dV4dIc", "")
+BUILTIN(__builtin_ia32_vextractf128_ps256, "V4fV8fIc", "")
+BUILTIN(__builtin_ia32_vextractf128_si256, "V4iV8iIc", "")
+BUILTIN(__builtin_ia32_cvtdq2pd256, "V4dV4i", "")
+BUILTIN(__builtin_ia32_cvtdq2ps256, "V8fV8i", "")
+BUILTIN(__builtin_ia32_cvtpd2ps256, "V4fV4d", "")
+BUILTIN(__builtin_ia32_cvtps2dq256, "V8iV8f", "")
+BUILTIN(__builtin_ia32_cvtps2pd256, "V4dV4f", "")
+BUILTIN(__builtin_ia32_cvttpd2dq256, "V4iV4d", "")
+BUILTIN(__builtin_ia32_cvtpd2dq256, "V4iV4d", "")
+BUILTIN(__builtin_ia32_cvttps2dq256, "V8iV8f", "")
+BUILTIN(__builtin_ia32_vinsertf128_pd256, "V4dV4dV2dIc", "")
+BUILTIN(__builtin_ia32_vinsertf128_ps256, "V8fV8fV4fIc", "")
+BUILTIN(__builtin_ia32_vinsertf128_si256, "V8iV8iV4iIc", "")
+BUILTIN(__builtin_ia32_sqrtpd256, "V4dV4d", "")
+BUILTIN(__builtin_ia32_sqrtps256, "V8fV8f", "")
+BUILTIN(__builtin_ia32_rsqrtps256, "V8fV8f", "")
+BUILTIN(__builtin_ia32_rcpps256, "V8fV8f", "")
+BUILTIN(__builtin_ia32_roundpd256, "V4dV4dIi", "")
+BUILTIN(__builtin_ia32_roundps256, "V8fV8fIi", "")
+BUILTIN(__builtin_ia32_vtestzpd, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_vtestcpd, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_vtestnzcpd, "iV2dV2d", "")
+BUILTIN(__builtin_ia32_vtestzps, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_vtestcps, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_vtestnzcps, "iV4fV4f", "")
+BUILTIN(__builtin_ia32_vtestzpd256, "iV4dV4d", "")
+BUILTIN(__builtin_ia32_vtestcpd256, "iV4dV4d", "")
+BUILTIN(__builtin_ia32_vtestnzcpd256, "iV4dV4d", "")
+BUILTIN(__builtin_ia32_vtestzps256, "iV8fV8f", "")
+BUILTIN(__builtin_ia32_vtestcps256, "iV8fV8f", "")
+BUILTIN(__builtin_ia32_vtestnzcps256, "iV8fV8f", "")
+BUILTIN(__builtin_ia32_ptestz256, "iV4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_ptestc256, "iV4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_ptestnzc256, "iV4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_movmskpd256, "iV4d", "")
+BUILTIN(__builtin_ia32_movmskps256, "iV8f", "")
+BUILTIN(__builtin_ia32_vzeroall, "v", "")
+BUILTIN(__builtin_ia32_vzeroupper, "v", "")
+BUILTIN(__builtin_ia32_vbroadcastss, "V4ffC*", "")
+BUILTIN(__builtin_ia32_vbroadcastsd256, "V4ddC*", "")
+BUILTIN(__builtin_ia32_vbroadcastss256, "V8ffC*", "")
+BUILTIN(__builtin_ia32_vbroadcastf128_pd256, "V4dV2dC*", "")
+BUILTIN(__builtin_ia32_vbroadcastf128_ps256, "V8fV4fC*", "")
+BUILTIN(__builtin_ia32_storeupd256, "vd*V4d", "")
+BUILTIN(__builtin_ia32_storeups256, "vf*V8f", "")
+BUILTIN(__builtin_ia32_storedqu256, "vc*V32c", "")
+BUILTIN(__builtin_ia32_lddqu256, "V32ccC*", "")
+BUILTIN(__builtin_ia32_movntdq256, "vV4LLi*V4LLi", "")
+BUILTIN(__builtin_ia32_movntpd256, "vd*V4d", "")
+BUILTIN(__builtin_ia32_movntps256, "vf*V8f", "")
+BUILTIN(__builtin_ia32_maskloadpd, "V2dV2dC*V2d", "")
+BUILTIN(__builtin_ia32_maskloadps, "V4fV4fC*V4f", "")
+BUILTIN(__builtin_ia32_maskloadpd256, "V4dV4dC*V4d", "")
+BUILTIN(__builtin_ia32_maskloadps256, "V8fV8fC*V8f", "")
+BUILTIN(__builtin_ia32_maskstorepd, "vV2d*V2dV2d", "")
+BUILTIN(__builtin_ia32_maskstoreps, "vV4f*V4fV4f", "")
+BUILTIN(__builtin_ia32_maskstorepd256, "vV4d*V4dV4d", "")
+BUILTIN(__builtin_ia32_maskstoreps256, "vV8f*V8fV8f", "")
+
+// AVX2
+BUILTIN(__builtin_ia32_mpsadbw256, "V32cV32cV32ci", "")
+BUILTIN(__builtin_ia32_pabsb256, "V32cV32c", "")
+BUILTIN(__builtin_ia32_pabsw256, "V16sV16s", "")
+BUILTIN(__builtin_ia32_pabsd256, "V8iV8i", "")
+BUILTIN(__builtin_ia32_packsswb256, "V32cV16sV16s", "")
+BUILTIN(__builtin_ia32_packssdw256, "V16sV8iV8i", "")
+BUILTIN(__builtin_ia32_packuswb256, "V32cV16sV16s", "")
+BUILTIN(__builtin_ia32_packusdw256, "V16sV8iV8i", "")
+BUILTIN(__builtin_ia32_paddsb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_paddsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_psubsb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_psubsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_paddusb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_paddusw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_psubusb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_psubusw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_palignr256, "V32cV32cV32cIc", "")
+BUILTIN(__builtin_ia32_pavgb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pavgw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pblendvb256, "V32cV32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pblendw256, "V16sV16sV16sIi", "")
+BUILTIN(__builtin_ia32_phaddw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_phaddd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_phaddsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_phsubw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_phsubd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_phsubsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmaddubsw256, "V16sV32cV32c", "")
+BUILTIN(__builtin_ia32_pmaddwd256, "V8iV16sV16s", "")
+BUILTIN(__builtin_ia32_pmaxub256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pmaxuw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmaxud256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pmaxsb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pmaxsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmaxsd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pminub256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pminuw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pminud256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pminsb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_pminsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pminsd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pmovmskb256, "iV32c", "")
+BUILTIN(__builtin_ia32_pmovsxbw256, "V16sV16c", "")
+BUILTIN(__builtin_ia32_pmovsxbd256, "V8iV16c", "")
+BUILTIN(__builtin_ia32_pmovsxbq256, "V4LLiV16c", "")
+BUILTIN(__builtin_ia32_pmovsxwd256, "V8iV8s", "")
+BUILTIN(__builtin_ia32_pmovsxwq256, "V4LLiV8s", "")
+BUILTIN(__builtin_ia32_pmovsxdq256, "V4LLiV4i", "")
+BUILTIN(__builtin_ia32_pmovzxbw256, "V16sV16c", "")
+BUILTIN(__builtin_ia32_pmovzxbd256, "V8iV16c", "")
+BUILTIN(__builtin_ia32_pmovzxbq256, "V4LLiV16c", "")
+BUILTIN(__builtin_ia32_pmovzxwd256, "V8iV8s", "")
+BUILTIN(__builtin_ia32_pmovzxwq256, "V4LLiV8s", "")
+BUILTIN(__builtin_ia32_pmovzxdq256, "V4LLiV4i", "")
+BUILTIN(__builtin_ia32_pmuldq256, "V4LLiV8iV8i", "")
+BUILTIN(__builtin_ia32_pmulhrsw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmulhuw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmulhw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_pmuludq256, "V4LLiV8iV8i", "")
+BUILTIN(__builtin_ia32_psadbw256, "V4LLiV32cV32c", "")
+BUILTIN(__builtin_ia32_pshufb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_psignb256, "V32cV32cV32c", "")
+BUILTIN(__builtin_ia32_psignw256, "V16sV16sV16s", "")
+BUILTIN(__builtin_ia32_psignd256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_pslldqi256, "V4LLiV4LLiIi", "")
+BUILTIN(__builtin_ia32_psllwi256, "V16sV16si", "")
+BUILTIN(__builtin_ia32_psllw256, "V16sV16sV8s", "")
+BUILTIN(__builtin_ia32_pslldi256, "V8iV8ii", "")
+BUILTIN(__builtin_ia32_pslld256, "V8iV8iV4i", "")
+BUILTIN(__builtin_ia32_psllqi256, "V4LLiV4LLii", "")
+BUILTIN(__builtin_ia32_psllq256, "V4LLiV4LLiV2LLi", "")
+BUILTIN(__builtin_ia32_psrawi256, "V16sV16si", "")
+BUILTIN(__builtin_ia32_psraw256, "V16sV16sV8s", "")
+BUILTIN(__builtin_ia32_psradi256, "V8iV8ii", "")
+BUILTIN(__builtin_ia32_psrad256, "V8iV8iV4i", "")
+BUILTIN(__builtin_ia32_psrldqi256, "V4LLiV4LLiIi", "")
+BUILTIN(__builtin_ia32_psrlwi256, "V16sV16si", "")
+BUILTIN(__builtin_ia32_psrlw256, "V16sV16sV8s", "")
+BUILTIN(__builtin_ia32_psrldi256, "V8iV8ii", "")
+BUILTIN(__builtin_ia32_psrld256, "V8iV8iV4i", "")
+BUILTIN(__builtin_ia32_psrlqi256, "V4LLiV4LLii", "")
+BUILTIN(__builtin_ia32_psrlq256, "V4LLiV4LLiV2LLi", "")
+BUILTIN(__builtin_ia32_movntdqa256, "V4LLiV4LLi*", "")
+BUILTIN(__builtin_ia32_vbroadcastss_ps, "V4fV4f", "")
+BUILTIN(__builtin_ia32_vbroadcastss_ps256, "V8fV4f", "")
+BUILTIN(__builtin_ia32_vbroadcastsd_pd256, "V4dV2d", "")
+BUILTIN(__builtin_ia32_vbroadcastsi256, "V4LLiV2LLiC*", "")
+BUILTIN(__builtin_ia32_pblendd128, "V4iV4iV4iIi", "")
+BUILTIN(__builtin_ia32_pblendd256, "V8iV8iV8iIi", "")
+BUILTIN(__builtin_ia32_pbroadcastb256, "V32cV16c", "")
+BUILTIN(__builtin_ia32_pbroadcastw256, "V16sV8s", "")
+BUILTIN(__builtin_ia32_pbroadcastd256, "V8iV4i", "")
+BUILTIN(__builtin_ia32_pbroadcastq256, "V4LLiV2LLi", "")
+BUILTIN(__builtin_ia32_pbroadcastb128, "V16cV16c", "")
+BUILTIN(__builtin_ia32_pbroadcastw128, "V8sV8s", "")
+BUILTIN(__builtin_ia32_pbroadcastd128, "V4iV4i", "")
+BUILTIN(__builtin_ia32_pbroadcastq128, "V2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_permvarsi256, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_permdf256, "V4dV4dIc", "")
+BUILTIN(__builtin_ia32_permvarsf256, "V8fV8fV8f", "")
+BUILTIN(__builtin_ia32_permdi256, "V4LLiV4LLiIc", "")
+BUILTIN(__builtin_ia32_extract128i256, "V2LLiV4LLiIc", "")
+BUILTIN(__builtin_ia32_insert128i256, "V4LLiV4LLiV2LLiIc", "")
+BUILTIN(__builtin_ia32_maskloadd256, "V8iV8iC*V8i", "")
+BUILTIN(__builtin_ia32_maskloadq256, "V4LLiV4LLiC*V4LLi", "")
+BUILTIN(__builtin_ia32_maskloadd, "V4iV4iC*V4i", "")
+BUILTIN(__builtin_ia32_maskloadq, "V2LLiV2LLiC*V2LLi", "")
+BUILTIN(__builtin_ia32_maskstored256, "vV8i*V8iV8i", "")
+BUILTIN(__builtin_ia32_maskstoreq256, "vV4LLi*V4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_maskstored, "vV4i*V4iV4i", "")
+BUILTIN(__builtin_ia32_maskstoreq, "vV2LLi*V2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_psllv8si, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_psllv4si, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_psllv4di, "V4LLiV4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_psllv2di, "V2LLiV2LLiV2LLi", "")
+BUILTIN(__builtin_ia32_psrav8si, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_psrav4si, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_psrlv8si, "V8iV8iV8i", "")
+BUILTIN(__builtin_ia32_psrlv4si, "V4iV4iV4i", "")
+BUILTIN(__builtin_ia32_psrlv4di, "V4LLiV4LLiV4LLi", "")
+BUILTIN(__builtin_ia32_psrlv2di, "V2LLiV2LLiV2LLi", "")
+
+// BMI
+BUILTIN(__builtin_ia32_bextr_u32, "UiUiUi", "")
+BUILTIN(__builtin_ia32_bextr_u64, "ULLiULLiULLi", "")
+
+// BMI2
+BUILTIN(__builtin_ia32_bzhi_si, "UiUiUi", "")
+BUILTIN(__builtin_ia32_bzhi_di, "ULLiULLiULLi", "")
+BUILTIN(__builtin_ia32_pdep_si, "UiUiUi", "")
+BUILTIN(__builtin_ia32_pdep_di, "ULLiULLiULLi", "")
+BUILTIN(__builtin_ia32_pext_si, "UiUiUi", "")
+BUILTIN(__builtin_ia32_pext_di, "ULLiULLiULLi", "")
+
+// FMA4
+BUILTIN(__builtin_ia32_vfmaddps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmaddpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmaddss, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmaddsd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmsubps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmsubpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmsubss, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmsubsd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfnmaddps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfnmaddpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfnmaddss, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfnmaddsd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfnmsubps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfnmsubpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfnmsubss, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfnmsubsd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmaddsubps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmaddsubpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmsubaddps, "V4fV4fV4fV4f", "")
+BUILTIN(__builtin_ia32_vfmsubaddpd, "V2dV2dV2dV2d", "")
+BUILTIN(__builtin_ia32_vfmaddps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfmaddpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfmsubps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfmsubpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfnmaddps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfnmaddpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfnmsubps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfnmsubpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfmaddsubps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfmaddsubpd256, "V4dV4dV4dV4d", "")
+BUILTIN(__builtin_ia32_vfmsubaddps256, "V8fV8fV8fV8f", "")
+BUILTIN(__builtin_ia32_vfmsubaddpd256, "V4dV4dV4dV4d", "")
+
+#undef BUILTIN
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h b/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
new file mode 100644
index 0000000..ec6b973
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ConvertUTF.h
@@ -0,0 +1,166 @@
+/*===--- ConvertUTF.h - Universal Character Names conversions ---------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *==------------------------------------------------------------------------==*/
+/*
+ * Copyright 2001-2004 Unicode, Inc.
+ *
+ * Disclaimer
+ *
+ * This source code is provided as is by Unicode, Inc. No claims are
+ * made as to fitness for any particular purpose. No warranties of any
+ * kind are expressed or implied. The recipient agrees to determine
+ * applicability of information provided. If this file has been
+ * purchased on magnetic or optical media from Unicode, Inc., the
+ * sole remedy for any claim will be exchange of defective media
+ * within 90 days of receipt.
+ *
+ * Limitations on Rights to Redistribute This Code
+ *
+ * Unicode, Inc. hereby grants the right to freely use the information
+ * supplied in this file in the creation of products supporting the
+ * Unicode Standard, and to make copies of this file in any form
+ * for internal or external distribution as long as this notice
+ * remains attached.
+ */
+
+/* ---------------------------------------------------------------------
+
+ Conversions between UTF32, UTF-16, and UTF-8. Header file.
+
+ Several funtions are included here, forming a complete set of
+ conversions between the three formats. UTF-7 is not included
+ here, but is handled in a separate source file.
+
+ Each of these routines takes pointers to input buffers and output
+ buffers. The input buffers are const.
+
+ Each routine converts the text between *sourceStart and sourceEnd,
+ putting the result into the buffer between *targetStart and
+ targetEnd. Note: the end pointers are *after* the last item: e.g.
+ *(sourceEnd - 1) is the last item.
+
+ The return result indicates whether the conversion was successful,
+ and if not, whether the problem was in the source or target buffers.
+ (Only the first encountered problem is indicated.)
+
+ After the conversion, *sourceStart and *targetStart are both
+ updated to point to the end of last text successfully converted in
+ the respective buffers.
+
+ Input parameters:
+ sourceStart - pointer to a pointer to the source buffer.
+ The contents of this are modified on return so that
+ it points at the next thing to be converted.
+ targetStart - similarly, pointer to pointer to the target buffer.
+ sourceEnd, targetEnd - respectively pointers to the ends of the
+ two buffers, for overflow checking only.
+
+ These conversion functions take a ConversionFlags argument. When this
+ flag is set to strict, both irregular sequences and isolated surrogates
+ will cause an error. When the flag is set to lenient, both irregular
+ sequences and isolated surrogates are converted.
+
+ Whether the flag is strict or lenient, all illegal sequences will cause
+ an error return. This includes sequences such as: <F4 90 80 80>, <C0 80>,
+ or <A0> in UTF-8, and values above 0x10FFFF in UTF-32. Conformant code
+ must check for illegal sequences.
+
+ When the flag is set to lenient, characters over 0x10FFFF are converted
+ to the replacement character; otherwise (when the flag is set to strict)
+ they constitute an error.
+
+ Output parameters:
+ The value "sourceIllegal" is returned from some routines if the input
+ sequence is malformed. When "sourceIllegal" is returned, the source
+ value will point to the illegal value that caused the problem. E.g.,
+ in UTF-8 when a sequence is malformed, it points to the start of the
+ malformed sequence.
+
+ Author: Mark E. Davis, 1994.
+ Rev History: Rick McGowan, fixes & updates May 2001.
+ Fixes & updates, Sept 2001.
+
+------------------------------------------------------------------------ */
+
+#ifndef CLANG_BASIC_CONVERTUTF_H
+#define CLANG_BASIC_CONVERTUTF_H
+
+/* ---------------------------------------------------------------------
+ The following 4 definitions are compiler-specific.
+ The C standard does not guarantee that wchar_t has at least
+ 16 bits, so wchar_t is no less portable than unsigned short!
+ All should be unsigned values to avoid sign extension during
+ bit mask & shift operations.
+------------------------------------------------------------------------ */
+
+typedef unsigned int UTF32; /* at least 32 bits */
+typedef unsigned short UTF16; /* at least 16 bits */
+typedef unsigned char UTF8; /* typically 8 bits */
+typedef unsigned char Boolean; /* 0 or 1 */
+
+/* Some fundamental constants */
+#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
+#define UNI_MAX_BMP (UTF32)0x0000FFFF
+#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
+#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
+#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
+
+typedef enum {
+ conversionOK, /* conversion successful */
+ sourceExhausted, /* partial character in source, but hit end */
+ targetExhausted, /* insuff. room in target for conversion */
+ sourceIllegal /* source sequence is illegal/malformed */
+} ConversionResult;
+
+typedef enum {
+ strictConversion = 0,
+ lenientConversion
+} ConversionFlags;
+
+/* This is for C++ and does no harm in C */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ConversionResult ConvertUTF8toUTF16 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF8toUTF32 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+#ifdef CLANG_NEEDS_THESE_ONE_DAY
+ConversionResult ConvertUTF16toUTF8 (
+ const UTF16** sourceStart, const UTF16* sourceEnd,
+ UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF32toUTF8 (
+ const UTF32** sourceStart, const UTF32* sourceEnd,
+ UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF16toUTF32 (
+ const UTF16** sourceStart, const UTF16* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags);
+
+ConversionResult ConvertUTF32toUTF16 (
+ const UTF32** sourceStart, const UTF32* sourceEnd,
+ UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags);
+
+Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd);
+#endif
+
+Boolean isLegalUTF8String(const UTF8 *source, const UTF8 *sourceEnd);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
+
+/* --------------------------------------------------------------------- */
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
new file mode 100644
index 0000000..6f2bb35
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DeclNodes.td
@@ -0,0 +1,77 @@
+class AttrSubject;
+
+class Decl<bit abstract = 0> : AttrSubject {
+ bit Abstract = abstract;
+}
+
+class DDecl<Decl base, bit abstract = 0> : Decl<abstract> {
+ Decl Base = base;
+}
+
+class DeclContext { }
+
+def TranslationUnit : Decl, DeclContext;
+def Named : Decl<1>;
+ def Namespace : DDecl<Named>, DeclContext;
+ def UsingDirective : DDecl<Named>;
+ def NamespaceAlias : DDecl<Named>;
+ def Label : DDecl<Named>;
+ def Type : DDecl<Named, 1>;
+ def TypedefName : DDecl<Type, 1>;
+ def Typedef : DDecl<TypedefName>;
+ def TypeAlias : DDecl<TypedefName>;
+ def UnresolvedUsingTypename : DDecl<Type>;
+ def Tag : DDecl<Type, 1>, DeclContext;
+ def Enum : DDecl<Tag>;
+ def Record : DDecl<Tag>;
+ def CXXRecord : DDecl<Record>;
+ def ClassTemplateSpecialization : DDecl<CXXRecord>;
+ def ClassTemplatePartialSpecialization
+ : DDecl<ClassTemplateSpecialization>;
+ def TemplateTypeParm : DDecl<Type>;
+ def Value : DDecl<Named, 1>;
+ def EnumConstant : DDecl<Value>;
+ def UnresolvedUsingValue : DDecl<Value>;
+ def IndirectField : DDecl<Value>;
+ def Declarator : DDecl<Value, 1>;
+ def Function : DDecl<Declarator>, DeclContext;
+ def CXXMethod : DDecl<Function>;
+ def CXXConstructor : DDecl<CXXMethod>;
+ def CXXDestructor : DDecl<CXXMethod>;
+ def CXXConversion : DDecl<CXXMethod>;
+ def Field : DDecl<Declarator>;
+ def ObjCIvar : DDecl<Field>;
+ def ObjCAtDefsField : DDecl<Field>;
+ def Var : DDecl<Declarator>;
+ def ImplicitParam : DDecl<Var>;
+ def ParmVar : DDecl<Var>;
+ def NonTypeTemplateParm : DDecl<Declarator>;
+ def Template : DDecl<Named, 1>;
+ def RedeclarableTemplate : DDecl<Template, 1>;
+ def FunctionTemplate : DDecl<RedeclarableTemplate>;
+ def ClassTemplate : DDecl<RedeclarableTemplate>;
+ def TypeAliasTemplate : DDecl<RedeclarableTemplate>;
+ def TemplateTemplateParm : DDecl<Template>;
+ def Using : DDecl<Named>;
+ def UsingShadow : DDecl<Named>;
+ def ObjCMethod : DDecl<Named>, DeclContext;
+ def ObjCContainer : DDecl<Named, 1>, DeclContext;
+ def ObjCCategory : DDecl<ObjCContainer>;
+ def ObjCProtocol : DDecl<ObjCContainer>;
+ def ObjCInterface : DDecl<ObjCContainer>;
+ def ObjCImpl : DDecl<ObjCContainer, 1>;
+ def ObjCCategoryImpl : DDecl<ObjCImpl>;
+ def ObjCImplementation : DDecl<ObjCImpl>;
+ def ObjCProperty : DDecl<Named>;
+ def ObjCCompatibleAlias : DDecl<Named>;
+def LinkageSpec : Decl, DeclContext;
+def ObjCPropertyImpl : Decl;
+def FileScopeAsm : Decl;
+def AccessSpec : Decl;
+def Friend : Decl;
+def FriendTemplate : Decl;
+def StaticAssert : Decl;
+def Block : Decl, DeclContext;
+def ClassScopeFunctionSpecialization : Decl;
+def Import : Decl;
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DelayedCleanupPool.h b/contrib/llvm/tools/clang/include/clang/Basic/DelayedCleanupPool.h
new file mode 100644
index 0000000..8575bc2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DelayedCleanupPool.h
@@ -0,0 +1,110 @@
+//=== DelayedCleanupPool.h - Delayed Clean-up Pool Implementation *- C++ -*===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a facility to delay calling cleanup methods until specific
+// points.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_DELAYEDCLEANUPPOOL_H
+#define LLVM_CLANG_BASIC_DELAYEDCLEANUPPOOL_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+/// \brief Gathers pairs of pointer-to-object/pointer-to-cleanup-function
+/// allowing the cleanup functions to get called (with the pointer as parameter)
+/// at specific points.
+///
+/// The use case is to simplify clean-up of certain resources that, while their
+/// lifetime is well-known and restricted, cleaning them up manually is easy to
+/// miss and cause a leak.
+///
+/// The same pointer can be added multiple times; its clean-up function will
+/// only be called once.
+class DelayedCleanupPool {
+public:
+ typedef void (*CleanupFn)(void *ptr);
+
+ /// \brief Adds a pointer and its associated cleanup function to be called
+ /// at a later point.
+ ///
+ /// \returns false if the pointer is already added, true otherwise.
+ bool delayCleanup(void *ptr, CleanupFn fn) {
+ assert(ptr && "Expected valid pointer to object");
+ assert(fn && "Expected valid pointer to function");
+
+ CleanupFn &mapFn = Ptrs[ptr];
+ assert((!mapFn || mapFn == fn) &&
+ "Adding a pointer with different cleanup function!");
+
+ if (!mapFn) {
+ mapFn = fn;
+ Cleanups.push_back(std::make_pair(ptr, fn));
+ return true;
+ }
+
+ return false;
+ }
+
+ template <typename T>
+ bool delayDelete(T *ptr) {
+ return delayCleanup(ptr, cleanupWithDelete<T>);
+ }
+
+ template <typename T, void (T::*Fn)()>
+ bool delayMemberFunc(T *ptr) {
+ return delayCleanup(ptr, cleanupWithMemberFunc<T, Fn>);
+ }
+
+ void doCleanup() {
+ for (SmallVector<std::pair<void *, CleanupFn>, 8>::reverse_iterator
+ I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I)
+ I->second(I->first);
+ Cleanups.clear();
+ Ptrs.clear();
+ }
+
+ ~DelayedCleanupPool() {
+ doCleanup();
+ }
+
+private:
+ llvm::DenseMap<void *, CleanupFn> Ptrs;
+ SmallVector<std::pair<void *, CleanupFn>, 8> Cleanups;
+
+ template <typename T>
+ static void cleanupWithDelete(void *ptr) {
+ delete static_cast<T *>(ptr);
+ }
+
+ template <typename T, void (T::*Fn)()>
+ static void cleanupWithMemberFunc(void *ptr) {
+ (static_cast<T *>(ptr)->*Fn)();
+ }
+};
+
+/// \brief RAII object for triggering a cleanup of a DelayedCleanupPool.
+class DelayedCleanupPoint {
+ DelayedCleanupPool &Pool;
+
+public:
+ DelayedCleanupPoint(DelayedCleanupPool &pool) : Pool(pool) { }
+
+ ~DelayedCleanupPoint() {
+ Pool.doCleanup();
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
new file mode 100644
index 0000000..e157178
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.h
@@ -0,0 +1,1207 @@
+//===--- Diagnostic.h - C Language Family Diagnostic Handling ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Diagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DIAGNOSTIC_H
+#define LLVM_CLANG_DIAGNOSTIC_H
+
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/type_traits.h"
+
+#include <vector>
+#include <list>
+
+namespace clang {
+ class DiagnosticConsumer;
+ class DiagnosticBuilder;
+ class IdentifierInfo;
+ class DeclContext;
+ class LangOptions;
+ class Preprocessor;
+ class DiagnosticErrorTrap;
+ class StoredDiagnostic;
+
+/// \brief Annotates a diagnostic with some code that should be
+/// inserted, removed, or replaced to fix the problem.
+///
+/// This kind of hint should be used when we are certain that the
+/// introduction, removal, or modification of a particular (small!)
+/// amount of code will correct a compilation error. The compiler
+/// should also provide full recovery from such errors, such that
+/// suppressing the diagnostic output can still result in successful
+/// compilation.
+class FixItHint {
+public:
+ /// \brief Code that should be replaced to correct the error. Empty for an
+ /// insertion hint.
+ CharSourceRange RemoveRange;
+
+ /// \brief Code in the specific range that should be inserted in the insertion
+ /// location.
+ CharSourceRange InsertFromRange;
+
+ /// \brief The actual code to insert at the insertion location, as a
+ /// string.
+ std::string CodeToInsert;
+
+ bool BeforePreviousInsertions;
+
+ /// \brief Empty code modification hint, indicating that no code
+ /// modification is known.
+ FixItHint() : BeforePreviousInsertions(false) { }
+
+ bool isNull() const {
+ return !RemoveRange.isValid();
+ }
+
+ /// \brief Create a code modification hint that inserts the given
+ /// code string at a specific location.
+ static FixItHint CreateInsertion(SourceLocation InsertionLoc,
+ StringRef Code,
+ bool BeforePreviousInsertions = false) {
+ FixItHint Hint;
+ Hint.RemoveRange =
+ CharSourceRange(SourceRange(InsertionLoc, InsertionLoc), false);
+ Hint.CodeToInsert = Code;
+ Hint.BeforePreviousInsertions = BeforePreviousInsertions;
+ return Hint;
+ }
+
+ /// \brief Create a code modification hint that inserts the given
+ /// code from \arg FromRange at a specific location.
+ static FixItHint CreateInsertionFromRange(SourceLocation InsertionLoc,
+ CharSourceRange FromRange,
+ bool BeforePreviousInsertions = false) {
+ FixItHint Hint;
+ Hint.RemoveRange =
+ CharSourceRange(SourceRange(InsertionLoc, InsertionLoc), false);
+ Hint.InsertFromRange = FromRange;
+ Hint.BeforePreviousInsertions = BeforePreviousInsertions;
+ return Hint;
+ }
+
+ /// \brief Create a code modification hint that removes the given
+ /// source range.
+ static FixItHint CreateRemoval(CharSourceRange RemoveRange) {
+ FixItHint Hint;
+ Hint.RemoveRange = RemoveRange;
+ return Hint;
+ }
+ static FixItHint CreateRemoval(SourceRange RemoveRange) {
+ return CreateRemoval(CharSourceRange::getTokenRange(RemoveRange));
+ }
+
+ /// \brief Create a code modification hint that replaces the given
+ /// source range with the given code string.
+ static FixItHint CreateReplacement(CharSourceRange RemoveRange,
+ StringRef Code) {
+ FixItHint Hint;
+ Hint.RemoveRange = RemoveRange;
+ Hint.CodeToInsert = Code;
+ return Hint;
+ }
+
+ static FixItHint CreateReplacement(SourceRange RemoveRange,
+ StringRef Code) {
+ return CreateReplacement(CharSourceRange::getTokenRange(RemoveRange), Code);
+ }
+};
+
+/// DiagnosticsEngine - This concrete class is used by the front-end to report
+/// problems and issues. It massages the diagnostics (e.g. handling things like
+/// "report warnings as errors" and passes them off to the DiagnosticConsumer
+/// for reporting to the user. DiagnosticsEngine is tied to one translation unit
+/// and one SourceManager.
+class DiagnosticsEngine : public RefCountedBase<DiagnosticsEngine> {
+public:
+ /// Level - The level of the diagnostic, after it has been through mapping.
+ enum Level {
+ Ignored = DiagnosticIDs::Ignored,
+ Note = DiagnosticIDs::Note,
+ Warning = DiagnosticIDs::Warning,
+ Error = DiagnosticIDs::Error,
+ Fatal = DiagnosticIDs::Fatal
+ };
+
+ /// ExtensionHandling - How do we handle otherwise-unmapped extension? This
+ /// is controlled by -pedantic and -pedantic-errors.
+ enum ExtensionHandling {
+ Ext_Ignore, Ext_Warn, Ext_Error
+ };
+
+ enum ArgumentKind {
+ ak_std_string, // std::string
+ ak_c_string, // const char *
+ ak_sint, // int
+ ak_uint, // unsigned
+ ak_identifierinfo, // IdentifierInfo
+ ak_qualtype, // QualType
+ ak_declarationname, // DeclarationName
+ ak_nameddecl, // NamedDecl *
+ ak_nestednamespec, // NestedNameSpecifier *
+ ak_declcontext // DeclContext *
+ };
+
+ /// Specifies which overload candidates to display when overload resolution
+ /// fails.
+ enum OverloadsShown {
+ Ovl_All, ///< Show all overloads.
+ Ovl_Best ///< Show just the "best" overload candidates.
+ };
+
+ /// ArgumentValue - This typedef represents on argument value, which is a
+ /// union discriminated by ArgumentKind, with a value.
+ typedef std::pair<ArgumentKind, intptr_t> ArgumentValue;
+
+private:
+ unsigned char AllExtensionsSilenced; // Used by __extension__
+ bool IgnoreAllWarnings; // Ignore all warnings: -w
+ bool WarningsAsErrors; // Treat warnings like errors.
+ bool EnableAllWarnings; // Enable all warnings.
+ bool ErrorsAsFatal; // Treat errors like fatal errors.
+ bool SuppressSystemWarnings; // Suppress warnings in system headers.
+ bool SuppressAllDiagnostics; // Suppress all diagnostics.
+ OverloadsShown ShowOverloads; // Which overload candidates to show.
+ unsigned ErrorLimit; // Cap of # errors emitted, 0 -> no limit.
+ unsigned TemplateBacktraceLimit; // Cap on depth of template backtrace stack,
+ // 0 -> no limit.
+ unsigned ConstexprBacktraceLimit; // Cap on depth of constexpr evaluation
+ // backtrace stack, 0 -> no limit.
+ ExtensionHandling ExtBehavior; // Map extensions onto warnings or errors?
+ IntrusiveRefCntPtr<DiagnosticIDs> Diags;
+ DiagnosticConsumer *Client;
+ bool OwnsDiagClient;
+ SourceManager *SourceMgr;
+
+ /// \brief Mapping information for diagnostics. Mapping info is
+ /// packed into four bits per diagnostic. The low three bits are the mapping
+ /// (an instance of diag::Mapping), or zero if unset. The high bit is set
+ /// when the mapping was established as a user mapping. If the high bit is
+ /// clear, then the low bits are set to the default value, and should be
+ /// mapped with -pedantic, -Werror, etc.
+ ///
+ /// A new DiagState is created and kept around when diagnostic pragmas modify
+ /// the state so that we know what is the diagnostic state at any given
+ /// source location.
+ class DiagState {
+ llvm::DenseMap<unsigned, DiagnosticMappingInfo> DiagMap;
+
+ public:
+ typedef llvm::DenseMap<unsigned, DiagnosticMappingInfo>::iterator
+ iterator;
+ typedef llvm::DenseMap<unsigned, DiagnosticMappingInfo>::const_iterator
+ const_iterator;
+
+ void setMappingInfo(diag::kind Diag, DiagnosticMappingInfo Info) {
+ DiagMap[Diag] = Info;
+ }
+
+ DiagnosticMappingInfo &getOrAddMappingInfo(diag::kind Diag);
+
+ const_iterator begin() const { return DiagMap.begin(); }
+ const_iterator end() const { return DiagMap.end(); }
+ };
+
+ /// \brief Keeps and automatically disposes all DiagStates that we create.
+ std::list<DiagState> DiagStates;
+
+ /// \brief Represents a point in source where the diagnostic state was
+ /// modified because of a pragma. 'Loc' can be null if the point represents
+ /// the diagnostic state modifications done through the command-line.
+ struct DiagStatePoint {
+ DiagState *State;
+ FullSourceLoc Loc;
+ DiagStatePoint(DiagState *State, FullSourceLoc Loc)
+ : State(State), Loc(Loc) { }
+
+ bool operator<(const DiagStatePoint &RHS) const {
+ // If Loc is invalid it means it came from <command-line>, in which case
+ // we regard it as coming before any valid source location.
+ if (RHS.Loc.isInvalid())
+ return false;
+ if (Loc.isInvalid())
+ return true;
+ return Loc.isBeforeInTranslationUnitThan(RHS.Loc);
+ }
+ };
+
+ /// \brief A vector of all DiagStatePoints representing changes in diagnostic
+ /// state due to diagnostic pragmas. The vector is always sorted according to
+ /// the SourceLocation of the DiagStatePoint.
+ typedef std::vector<DiagStatePoint> DiagStatePointsTy;
+ mutable DiagStatePointsTy DiagStatePoints;
+
+ /// \brief Keeps the DiagState that was active during each diagnostic 'push'
+ /// so we can get back at it when we 'pop'.
+ std::vector<DiagState *> DiagStateOnPushStack;
+
+ DiagState *GetCurDiagState() const {
+ assert(!DiagStatePoints.empty());
+ return DiagStatePoints.back().State;
+ }
+
+ void PushDiagStatePoint(DiagState *State, SourceLocation L) {
+ FullSourceLoc Loc(L, *SourceMgr);
+ // Make sure that DiagStatePoints is always sorted according to Loc.
+ assert((Loc.isValid() || DiagStatePoints.empty()) &&
+ "Adding invalid loc point after another point");
+ assert((Loc.isInvalid() || DiagStatePoints.empty() ||
+ DiagStatePoints.back().Loc.isInvalid() ||
+ DiagStatePoints.back().Loc.isBeforeInTranslationUnitThan(Loc)) &&
+ "Previous point loc comes after or is the same as new one");
+ DiagStatePoints.push_back(DiagStatePoint(State,
+ FullSourceLoc(Loc, *SourceMgr)));
+ }
+
+ /// \brief Finds the DiagStatePoint that contains the diagnostic state of
+ /// the given source location.
+ DiagStatePointsTy::iterator GetDiagStatePointForLoc(SourceLocation Loc) const;
+
+ /// ErrorOccurred / FatalErrorOccurred - This is set to true when an error or
+ /// fatal error is emitted, and is sticky.
+ bool ErrorOccurred;
+ bool FatalErrorOccurred;
+
+ /// \brief Indicates that an unrecoverable error has occurred.
+ bool UnrecoverableErrorOccurred;
+
+ /// \brief Counts for DiagnosticErrorTrap to check whether an error occurred
+ /// during a parsing section, e.g. during parsing a function.
+ unsigned TrapNumErrorsOccurred;
+ unsigned TrapNumUnrecoverableErrorsOccurred;
+
+ /// LastDiagLevel - This is the level of the last diagnostic emitted. This is
+ /// used to emit continuation diagnostics with the same level as the
+ /// diagnostic that they follow.
+ DiagnosticIDs::Level LastDiagLevel;
+
+ unsigned NumWarnings; // Number of warnings reported
+ unsigned NumErrors; // Number of errors reported
+ unsigned NumErrorsSuppressed; // Number of errors suppressed
+
+ /// ArgToStringFn - A function pointer that converts an opaque diagnostic
+ /// argument to a strings. This takes the modifiers and argument that was
+ /// present in the diagnostic.
+ ///
+ /// The PrevArgs array (whose length is NumPrevArgs) indicates the previous
+ /// arguments formatted for this diagnostic. Implementations of this function
+ /// can use this information to avoid redundancy across arguments.
+ ///
+ /// This is a hack to avoid a layering violation between libbasic and libsema.
+ typedef void (*ArgToStringFnTy)(
+ ArgumentKind Kind, intptr_t Val,
+ const char *Modifier, unsigned ModifierLen,
+ const char *Argument, unsigned ArgumentLen,
+ const ArgumentValue *PrevArgs,
+ unsigned NumPrevArgs,
+ SmallVectorImpl<char> &Output,
+ void *Cookie,
+ ArrayRef<intptr_t> QualTypeVals);
+ void *ArgToStringCookie;
+ ArgToStringFnTy ArgToStringFn;
+
+ /// \brief ID of the "delayed" diagnostic, which is a (typically
+ /// fatal) diagnostic that had to be delayed because it was found
+ /// while emitting another diagnostic.
+ unsigned DelayedDiagID;
+
+ /// \brief First string argument for the delayed diagnostic.
+ std::string DelayedDiagArg1;
+
+ /// \brief Second string argument for the delayed diagnostic.
+ std::string DelayedDiagArg2;
+
+public:
+ explicit DiagnosticsEngine(
+ const IntrusiveRefCntPtr<DiagnosticIDs> &Diags,
+ DiagnosticConsumer *client = 0,
+ bool ShouldOwnClient = true);
+ ~DiagnosticsEngine();
+
+ const IntrusiveRefCntPtr<DiagnosticIDs> &getDiagnosticIDs() const {
+ return Diags;
+ }
+
+ DiagnosticConsumer *getClient() { return Client; }
+ const DiagnosticConsumer *getClient() const { return Client; }
+
+ /// \brief Determine whether this \c DiagnosticsEngine object own its client.
+ bool ownsClient() const { return OwnsDiagClient; }
+
+ /// \brief Return the current diagnostic client along with ownership of that
+ /// client.
+ DiagnosticConsumer *takeClient() {
+ OwnsDiagClient = false;
+ return Client;
+ }
+
+ bool hasSourceManager() const { return SourceMgr != 0; }
+ SourceManager &getSourceManager() const {
+ assert(SourceMgr && "SourceManager not set!");
+ return *SourceMgr;
+ }
+ void setSourceManager(SourceManager *SrcMgr) { SourceMgr = SrcMgr; }
+
+ //===--------------------------------------------------------------------===//
+ // DiagnosticsEngine characterization methods, used by a client to customize
+ // how diagnostics are emitted.
+ //
+
+ /// pushMappings - Copies the current DiagMappings and pushes the new copy
+ /// onto the top of the stack.
+ void pushMappings(SourceLocation Loc);
+
+ /// popMappings - Pops the current DiagMappings off the top of the stack
+ /// causing the new top of the stack to be the active mappings. Returns
+ /// true if the pop happens, false if there is only one DiagMapping on the
+ /// stack.
+ bool popMappings(SourceLocation Loc);
+
+ /// \brief Set the diagnostic client associated with this diagnostic object.
+ ///
+ /// \param ShouldOwnClient true if the diagnostic object should take
+ /// ownership of \c client.
+ void setClient(DiagnosticConsumer *client, bool ShouldOwnClient = true);
+
+ /// setErrorLimit - Specify a limit for the number of errors we should
+ /// emit before giving up. Zero disables the limit.
+ void setErrorLimit(unsigned Limit) { ErrorLimit = Limit; }
+
+ /// \brief Specify the maximum number of template instantiation
+ /// notes to emit along with a given diagnostic.
+ void setTemplateBacktraceLimit(unsigned Limit) {
+ TemplateBacktraceLimit = Limit;
+ }
+
+ /// \brief Retrieve the maximum number of template instantiation
+ /// notes to emit along with a given diagnostic.
+ unsigned getTemplateBacktraceLimit() const {
+ return TemplateBacktraceLimit;
+ }
+
+ /// \brief Specify the maximum number of constexpr evaluation
+ /// notes to emit along with a given diagnostic.
+ void setConstexprBacktraceLimit(unsigned Limit) {
+ ConstexprBacktraceLimit = Limit;
+ }
+
+ /// \brief Retrieve the maximum number of constexpr evaluation
+ /// notes to emit along with a given diagnostic.
+ unsigned getConstexprBacktraceLimit() const {
+ return ConstexprBacktraceLimit;
+ }
+
+ /// setIgnoreAllWarnings - When set to true, any unmapped warnings are
+ /// ignored. If this and WarningsAsErrors are both set, then this one wins.
+ void setIgnoreAllWarnings(bool Val) { IgnoreAllWarnings = Val; }
+ bool getIgnoreAllWarnings() const { return IgnoreAllWarnings; }
+
+ /// setEnableAllWarnings - When set to true, any unmapped ignored warnings
+ /// are no longer ignored. If this and IgnoreAllWarnings are both set,
+ /// then that one wins.
+ void setEnableAllWarnings(bool Val) { EnableAllWarnings = Val; }
+ bool getEnableAllWarnngs() const { return EnableAllWarnings; }
+
+ /// setWarningsAsErrors - When set to true, any warnings reported are issued
+ /// as errors.
+ void setWarningsAsErrors(bool Val) { WarningsAsErrors = Val; }
+ bool getWarningsAsErrors() const { return WarningsAsErrors; }
+
+ /// setErrorsAsFatal - When set to true, any error reported is made a
+ /// fatal error.
+ void setErrorsAsFatal(bool Val) { ErrorsAsFatal = Val; }
+ bool getErrorsAsFatal() const { return ErrorsAsFatal; }
+
+ /// setSuppressSystemWarnings - When set to true mask warnings that
+ /// come from system headers.
+ void setSuppressSystemWarnings(bool Val) { SuppressSystemWarnings = Val; }
+ bool getSuppressSystemWarnings() const { return SuppressSystemWarnings; }
+
+ /// \brief Suppress all diagnostics, to silence the front end when we
+ /// know that we don't want any more diagnostics to be passed along to the
+ /// client
+ void setSuppressAllDiagnostics(bool Val = true) {
+ SuppressAllDiagnostics = Val;
+ }
+ bool getSuppressAllDiagnostics() const { return SuppressAllDiagnostics; }
+
+ /// \brief Specify which overload candidates to show when overload resolution
+ /// fails. By default, we show all candidates.
+ void setShowOverloads(OverloadsShown Val) {
+ ShowOverloads = Val;
+ }
+ OverloadsShown getShowOverloads() const { return ShowOverloads; }
+
+ /// \brief Pretend that the last diagnostic issued was ignored. This can
+ /// be used by clients who suppress diagnostics themselves.
+ void setLastDiagnosticIgnored() {
+ LastDiagLevel = DiagnosticIDs::Ignored;
+ }
+
+ /// setExtensionHandlingBehavior - This controls whether otherwise-unmapped
+ /// extension diagnostics are mapped onto ignore/warning/error. This
+ /// corresponds to the GCC -pedantic and -pedantic-errors option.
+ void setExtensionHandlingBehavior(ExtensionHandling H) {
+ ExtBehavior = H;
+ }
+ ExtensionHandling getExtensionHandlingBehavior() const { return ExtBehavior; }
+
+ /// AllExtensionsSilenced - This is a counter bumped when an __extension__
+ /// block is encountered. When non-zero, all extension diagnostics are
+ /// entirely silenced, no matter how they are mapped.
+ void IncrementAllExtensionsSilenced() { ++AllExtensionsSilenced; }
+ void DecrementAllExtensionsSilenced() { --AllExtensionsSilenced; }
+ bool hasAllExtensionsSilenced() { return AllExtensionsSilenced != 0; }
+
+ /// \brief This allows the client to specify that certain
+ /// warnings are ignored. Notes can never be mapped, errors can only be
+ /// mapped to fatal, and WARNINGs and EXTENSIONs can be mapped arbitrarily.
+ ///
+ /// \param Loc The source location that this change of diagnostic state should
+ /// take affect. It can be null if we are setting the latest state.
+ void setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
+ SourceLocation Loc);
+
+ /// setDiagnosticGroupMapping - Change an entire diagnostic group (e.g.
+ /// "unknown-pragmas" to have the specified mapping. This returns true and
+ /// ignores the request if "Group" was unknown, false otherwise.
+ ///
+ /// 'Loc' is the source location that this change of diagnostic state should
+ /// take affect. It can be null if we are setting the state from command-line.
+ bool setDiagnosticGroupMapping(StringRef Group, diag::Mapping Map,
+ SourceLocation Loc = SourceLocation());
+
+ /// \brief Set the warning-as-error flag for the given diagnostic. This
+ /// function always only operates on the current diagnostic state.
+ void setDiagnosticWarningAsError(diag::kind Diag, bool Enabled);
+
+ /// \brief Set the warning-as-error flag for the given diagnostic group. This
+ /// function always only operates on the current diagnostic state.
+ ///
+ /// \returns True if the given group is unknown, false otherwise.
+ bool setDiagnosticGroupWarningAsError(StringRef Group, bool Enabled);
+
+ /// \brief Set the error-as-fatal flag for the given diagnostic. This function
+ /// always only operates on the current diagnostic state.
+ void setDiagnosticErrorAsFatal(diag::kind Diag, bool Enabled);
+
+ /// \brief Set the error-as-fatal flag for the given diagnostic group. This
+ /// function always only operates on the current diagnostic state.
+ ///
+ /// \returns True if the given group is unknown, false otherwise.
+ bool setDiagnosticGroupErrorAsFatal(StringRef Group, bool Enabled);
+
+ /// \brief Add the specified mapping to all diagnostics. Mainly to be used
+ /// by -Wno-everything to disable all warnings but allow subsequent -W options
+ /// to enable specific warnings.
+ void setMappingToAllDiagnostics(diag::Mapping Map,
+ SourceLocation Loc = SourceLocation());
+
+ bool hasErrorOccurred() const { return ErrorOccurred; }
+ bool hasFatalErrorOccurred() const { return FatalErrorOccurred; }
+
+ /// \brief Determine whether any kind of unrecoverable error has occurred.
+ bool hasUnrecoverableErrorOccurred() const {
+ return FatalErrorOccurred || UnrecoverableErrorOccurred;
+ }
+
+ unsigned getNumWarnings() const { return NumWarnings; }
+
+ void setNumWarnings(unsigned NumWarnings) {
+ this->NumWarnings = NumWarnings;
+ }
+
+ /// getCustomDiagID - Return an ID for a diagnostic with the specified message
+ /// and level. If this is the first request for this diagnosic, it is
+ /// registered and created, otherwise the existing ID is returned.
+ unsigned getCustomDiagID(Level L, StringRef Message) {
+ return Diags->getCustomDiagID((DiagnosticIDs::Level)L, Message);
+ }
+
+ /// ConvertArgToString - This method converts a diagnostic argument (as an
+ /// intptr_t) into the string that represents it.
+ void ConvertArgToString(ArgumentKind Kind, intptr_t Val,
+ const char *Modifier, unsigned ModLen,
+ const char *Argument, unsigned ArgLen,
+ const ArgumentValue *PrevArgs, unsigned NumPrevArgs,
+ SmallVectorImpl<char> &Output,
+ SmallVectorImpl<intptr_t> &QualTypeVals) const {
+ ArgToStringFn(Kind, Val, Modifier, ModLen, Argument, ArgLen,
+ PrevArgs, NumPrevArgs, Output, ArgToStringCookie,
+ QualTypeVals);
+ }
+
+ void SetArgToStringFn(ArgToStringFnTy Fn, void *Cookie) {
+ ArgToStringFn = Fn;
+ ArgToStringCookie = Cookie;
+ }
+
+ /// \brief Reset the state of the diagnostic object to its initial
+ /// configuration.
+ void Reset();
+
+ //===--------------------------------------------------------------------===//
+ // DiagnosticsEngine classification and reporting interfaces.
+ //
+
+ /// \brief Based on the way the client configured the DiagnosticsEngine
+ /// object, classify the specified diagnostic ID into a Level, consumable by
+ /// the DiagnosticConsumer.
+ ///
+ /// \param Loc The source location we are interested in finding out the
+ /// diagnostic state. Can be null in order to query the latest state.
+ Level getDiagnosticLevel(unsigned DiagID, SourceLocation Loc) const {
+ return (Level)Diags->getDiagnosticLevel(DiagID, Loc, *this);
+ }
+
+ /// Report - Issue the message to the client. @c DiagID is a member of the
+ /// @c diag::kind enum. This actually returns aninstance of DiagnosticBuilder
+ /// which emits the diagnostics (through @c ProcessDiag) when it is destroyed.
+ /// @c Pos represents the source location associated with the diagnostic,
+ /// which can be an invalid location if no position information is available.
+ inline DiagnosticBuilder Report(SourceLocation Pos, unsigned DiagID);
+ inline DiagnosticBuilder Report(unsigned DiagID);
+
+ void Report(const StoredDiagnostic &storedDiag);
+
+ /// \brief Determine whethere there is already a diagnostic in flight.
+ bool isDiagnosticInFlight() const { return CurDiagID != ~0U; }
+
+ /// \brief Set the "delayed" diagnostic that will be emitted once
+ /// the current diagnostic completes.
+ ///
+ /// If a diagnostic is already in-flight but the front end must
+ /// report a problem (e.g., with an inconsistent file system
+ /// state), this routine sets a "delayed" diagnostic that will be
+ /// emitted after the current diagnostic completes. This should
+ /// only be used for fatal errors detected at inconvenient
+ /// times. If emitting a delayed diagnostic causes a second delayed
+ /// diagnostic to be introduced, that second delayed diagnostic
+ /// will be ignored.
+ ///
+ /// \param DiagID The ID of the diagnostic being delayed.
+ ///
+ /// \param Arg1 A string argument that will be provided to the
+ /// diagnostic. A copy of this string will be stored in the
+ /// DiagnosticsEngine object itself.
+ ///
+ /// \param Arg2 A string argument that will be provided to the
+ /// diagnostic. A copy of this string will be stored in the
+ /// DiagnosticsEngine object itself.
+ void SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1 = "",
+ StringRef Arg2 = "");
+
+ /// \brief Clear out the current diagnostic.
+ void Clear() { CurDiagID = ~0U; }
+
+private:
+ /// \brief Report the delayed diagnostic.
+ void ReportDelayed();
+
+ // This is private state used by DiagnosticBuilder. We put it here instead of
+ // in DiagnosticBuilder in order to keep DiagnosticBuilder a small lightweight
+ // object. This implementation choice means that we can only have one
+ // diagnostic "in flight" at a time, but this seems to be a reasonable
+ // tradeoff to keep these objects small. Assertions verify that only one
+ // diagnostic is in flight at a time.
+ friend class DiagnosticIDs;
+ friend class DiagnosticBuilder;
+ friend class Diagnostic;
+ friend class PartialDiagnostic;
+ friend class DiagnosticErrorTrap;
+
+ /// CurDiagLoc - This is the location of the current diagnostic that is in
+ /// flight.
+ SourceLocation CurDiagLoc;
+
+ /// CurDiagID - This is the ID of the current diagnostic that is in flight.
+ /// This is set to ~0U when there is no diagnostic in flight.
+ unsigned CurDiagID;
+
+ enum {
+ /// MaxArguments - The maximum number of arguments we can hold. We currently
+ /// only support up to 10 arguments (%0-%9). A single diagnostic with more
+ /// than that almost certainly has to be simplified anyway.
+ MaxArguments = 10,
+
+ /// MaxRanges - The maximum number of ranges we can hold.
+ MaxRanges = 10,
+
+ /// MaxFixItHints - The maximum number of ranges we can hold.
+ MaxFixItHints = 10
+ };
+
+ /// NumDiagArgs - This contains the number of entries in Arguments.
+ signed char NumDiagArgs;
+ /// NumDiagRanges - This is the number of ranges in the DiagRanges array.
+ unsigned char NumDiagRanges;
+ /// NumDiagFixItHints - This is the number of hints in the DiagFixItHints
+ /// array.
+ unsigned char NumDiagFixItHints;
+
+ /// DiagArgumentsKind - This is an array of ArgumentKind::ArgumentKind enum
+ /// values, with one for each argument. This specifies whether the argument
+ /// is in DiagArgumentsStr or in DiagArguments.
+ unsigned char DiagArgumentsKind[MaxArguments];
+
+ /// DiagArgumentsStr - This holds the values of each string argument for the
+ /// current diagnostic. This value is only used when the corresponding
+ /// ArgumentKind is ak_std_string.
+ std::string DiagArgumentsStr[MaxArguments];
+
+ /// DiagArgumentsVal - The values for the various substitution positions. This
+ /// is used when the argument is not an std::string. The specific value is
+ /// mangled into an intptr_t and the interpretation depends on exactly what
+ /// sort of argument kind it is.
+ intptr_t DiagArgumentsVal[MaxArguments];
+
+ /// DiagRanges - The list of ranges added to this diagnostic.
+ CharSourceRange DiagRanges[MaxRanges];
+
+ /// FixItHints - If valid, provides a hint with some code to insert, remove,
+ /// or modify at a particular position.
+ FixItHint DiagFixItHints[MaxFixItHints];
+
+ DiagnosticMappingInfo makeMappingInfo(diag::Mapping Map, SourceLocation L) {
+ bool isPragma = L.isValid();
+ DiagnosticMappingInfo MappingInfo = DiagnosticMappingInfo::Make(
+ Map, /*IsUser=*/true, isPragma);
+
+ // If this is a pragma mapping, then set the diagnostic mapping flags so
+ // that we override command line options.
+ if (isPragma) {
+ MappingInfo.setNoWarningAsError(true);
+ MappingInfo.setNoErrorAsFatal(true);
+ }
+
+ return MappingInfo;
+ }
+
+ /// ProcessDiag - This is the method used to report a diagnostic that is
+ /// finally fully formed.
+ ///
+ /// \returns true if the diagnostic was emitted, false if it was
+ /// suppressed.
+ bool ProcessDiag() {
+ return Diags->ProcessDiag(*this);
+ }
+
+ /// @name Diagnostic Emission
+ /// @{
+protected:
+ // Sema requires access to the following functions because the current design
+ // of SFINAE requires it to use its own SemaDiagnosticBuilder, which needs to
+ // access us directly to ensure we minimize the emitted code for the common
+ // Sema::Diag() patterns.
+ friend class Sema;
+
+ /// \brief Emit the current diagnostic and clear the diagnostic state.
+ bool EmitCurrentDiagnostic();
+
+ unsigned getCurrentDiagID() const { return CurDiagID; }
+
+ SourceLocation getCurrentDiagLoc() const { return CurDiagLoc; }
+
+ /// @}
+
+ friend class ASTReader;
+ friend class ASTWriter;
+};
+
+/// \brief RAII class that determines when any errors have occurred
+/// between the time the instance was created and the time it was
+/// queried.
+class DiagnosticErrorTrap {
+ DiagnosticsEngine &Diag;
+ unsigned NumErrors;
+ unsigned NumUnrecoverableErrors;
+
+public:
+ explicit DiagnosticErrorTrap(DiagnosticsEngine &Diag)
+ : Diag(Diag) { reset(); }
+
+ /// \brief Determine whether any errors have occurred since this
+ /// object instance was created.
+ bool hasErrorOccurred() const {
+ return Diag.TrapNumErrorsOccurred > NumErrors;
+ }
+
+ /// \brief Determine whether any unrecoverable errors have occurred since this
+ /// object instance was created.
+ bool hasUnrecoverableErrorOccurred() const {
+ return Diag.TrapNumUnrecoverableErrorsOccurred > NumUnrecoverableErrors;
+ }
+
+ // Set to initial state of "no errors occurred".
+ void reset() {
+ NumErrors = Diag.TrapNumErrorsOccurred;
+ NumUnrecoverableErrors = Diag.TrapNumUnrecoverableErrorsOccurred;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// DiagnosticBuilder
+//===----------------------------------------------------------------------===//
+
+/// DiagnosticBuilder - This is a little helper class used to produce
+/// diagnostics. This is constructed by the DiagnosticsEngine::Report method,
+/// and allows insertion of extra information (arguments and source ranges) into
+/// the currently "in flight" diagnostic. When the temporary for the builder is
+/// destroyed, the diagnostic is issued.
+///
+/// Note that many of these will be created as temporary objects (many call
+/// sites), so we want them to be small and we never want their address taken.
+/// This ensures that compilers with somewhat reasonable optimizers will promote
+/// the common fields to registers, eliminating increments of the NumArgs field,
+/// for example.
+class DiagnosticBuilder {
+ mutable DiagnosticsEngine *DiagObj;
+ mutable unsigned NumArgs, NumRanges, NumFixits;
+
+ /// \brief Status variable indicating if this diagnostic is still active.
+ ///
+ // NOTE: This field is redundant with DiagObj (IsActive iff (DiagObj == 0)),
+ // but LLVM is not currently smart enough to eliminate the null check that
+ // Emit() would end up with if we used that as our status variable.
+ mutable bool IsActive;
+
+ void operator=(const DiagnosticBuilder&); // DO NOT IMPLEMENT
+ friend class DiagnosticsEngine;
+ explicit DiagnosticBuilder(DiagnosticsEngine *diagObj)
+ : DiagObj(diagObj), NumArgs(0), NumRanges(0), NumFixits(0), IsActive(true) {
+ assert(diagObj && "DiagnosticBuilder requires a valid DiagnosticsEngine!");
+ }
+
+ friend class PartialDiagnostic;
+
+protected:
+ void FlushCounts() {
+ DiagObj->NumDiagArgs = NumArgs;
+ DiagObj->NumDiagRanges = NumRanges;
+ DiagObj->NumDiagFixItHints = NumFixits;
+ }
+
+ /// \brief Clear out the current diagnostic.
+ void Clear() const {
+ DiagObj = 0;
+ IsActive = false;
+ }
+
+ /// isActive - Determine whether this diagnostic is still active.
+ bool isActive() const { return IsActive; }
+
+ /// \brief Force the diagnostic builder to emit the diagnostic now.
+ ///
+ /// Once this function has been called, the DiagnosticBuilder object
+ /// should not be used again before it is destroyed.
+ ///
+ /// \returns true if a diagnostic was emitted, false if the
+ /// diagnostic was suppressed.
+ bool Emit() {
+ // If this diagnostic is inactive, then its soul was stolen by the copy ctor
+ // (or by a subclass, as in SemaDiagnosticBuilder).
+ if (!isActive()) return false;
+
+ // When emitting diagnostics, we set the final argument count into
+ // the DiagnosticsEngine object.
+ FlushCounts();
+
+ // Process the diagnostic.
+ bool Result = DiagObj->EmitCurrentDiagnostic();
+
+ // This diagnostic is dead.
+ Clear();
+
+ return Result;
+ }
+
+public:
+ /// Copy constructor. When copied, this "takes" the diagnostic info from the
+ /// input and neuters it.
+ DiagnosticBuilder(const DiagnosticBuilder &D) {
+ DiagObj = D.DiagObj;
+ IsActive = D.IsActive;
+ D.Clear();
+ NumArgs = D.NumArgs;
+ NumRanges = D.NumRanges;
+ NumFixits = D.NumFixits;
+ }
+
+ /// Destructor - The dtor emits the diagnostic.
+ ~DiagnosticBuilder() {
+ Emit();
+ }
+
+ /// Operator bool: conversion of DiagnosticBuilder to bool always returns
+ /// true. This allows is to be used in boolean error contexts like:
+ /// return Diag(...);
+ operator bool() const { return true; }
+
+ void AddString(StringRef S) const {
+ assert(isActive() && "Clients must not add to cleared diagnostic!");
+ assert(NumArgs < DiagnosticsEngine::MaxArguments &&
+ "Too many arguments to diagnostic!");
+ DiagObj->DiagArgumentsKind[NumArgs] = DiagnosticsEngine::ak_std_string;
+ DiagObj->DiagArgumentsStr[NumArgs++] = S;
+ }
+
+ void AddTaggedVal(intptr_t V, DiagnosticsEngine::ArgumentKind Kind) const {
+ assert(isActive() && "Clients must not add to cleared diagnostic!");
+ assert(NumArgs < DiagnosticsEngine::MaxArguments &&
+ "Too many arguments to diagnostic!");
+ DiagObj->DiagArgumentsKind[NumArgs] = Kind;
+ DiagObj->DiagArgumentsVal[NumArgs++] = V;
+ }
+
+ void AddSourceRange(const CharSourceRange &R) const {
+ assert(isActive() && "Clients must not add to cleared diagnostic!");
+ assert(NumRanges < DiagnosticsEngine::MaxRanges &&
+ "Too many arguments to diagnostic!");
+ DiagObj->DiagRanges[NumRanges++] = R;
+ }
+
+ void AddFixItHint(const FixItHint &Hint) const {
+ assert(isActive() && "Clients must not add to cleared diagnostic!");
+ assert(NumFixits < DiagnosticsEngine::MaxFixItHints &&
+ "Too many arguments to diagnostic!");
+ DiagObj->DiagFixItHints[NumFixits++] = Hint;
+ }
+};
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ StringRef S) {
+ DB.AddString(S);
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const char *Str) {
+ DB.AddTaggedVal(reinterpret_cast<intptr_t>(Str),
+ DiagnosticsEngine::ak_c_string);
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB, int I) {
+ DB.AddTaggedVal(I, DiagnosticsEngine::ak_sint);
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,bool I) {
+ DB.AddTaggedVal(I, DiagnosticsEngine::ak_sint);
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ unsigned I) {
+ DB.AddTaggedVal(I, DiagnosticsEngine::ak_uint);
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const IdentifierInfo *II) {
+ DB.AddTaggedVal(reinterpret_cast<intptr_t>(II),
+ DiagnosticsEngine::ak_identifierinfo);
+ return DB;
+}
+
+// Adds a DeclContext to the diagnostic. The enable_if template magic is here
+// so that we only match those arguments that are (statically) DeclContexts;
+// other arguments that derive from DeclContext (e.g., RecordDecls) will not
+// match.
+template<typename T>
+inline
+typename llvm::enable_if<llvm::is_same<T, DeclContext>,
+ const DiagnosticBuilder &>::type
+operator<<(const DiagnosticBuilder &DB, T *DC) {
+ DB.AddTaggedVal(reinterpret_cast<intptr_t>(DC),
+ DiagnosticsEngine::ak_declcontext);
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const SourceRange &R) {
+ DB.AddSourceRange(CharSourceRange::getTokenRange(R));
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const CharSourceRange &R) {
+ DB.AddSourceRange(R);
+ return DB;
+}
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const FixItHint &Hint) {
+ if (!Hint.isNull())
+ DB.AddFixItHint(Hint);
+ return DB;
+}
+
+/// Report - Issue the message to the client. DiagID is a member of the
+/// diag::kind enum. This actually returns a new instance of DiagnosticBuilder
+/// which emits the diagnostics (through ProcessDiag) when it is destroyed.
+inline DiagnosticBuilder DiagnosticsEngine::Report(SourceLocation Loc,
+ unsigned DiagID){
+ assert(CurDiagID == ~0U && "Multiple diagnostics in flight at once!");
+ CurDiagLoc = Loc;
+ CurDiagID = DiagID;
+ return DiagnosticBuilder(this);
+}
+inline DiagnosticBuilder DiagnosticsEngine::Report(unsigned DiagID) {
+ return Report(SourceLocation(), DiagID);
+}
+
+//===----------------------------------------------------------------------===//
+// Diagnostic
+//===----------------------------------------------------------------------===//
+
+/// Diagnostic - This is a little helper class (which is basically a smart
+/// pointer that forward info from DiagnosticsEngine) that allows clients to
+/// enquire about the currently in-flight diagnostic.
+class Diagnostic {
+ const DiagnosticsEngine *DiagObj;
+ StringRef StoredDiagMessage;
+public:
+ explicit Diagnostic(const DiagnosticsEngine *DO) : DiagObj(DO) {}
+ Diagnostic(const DiagnosticsEngine *DO, StringRef storedDiagMessage)
+ : DiagObj(DO), StoredDiagMessage(storedDiagMessage) {}
+
+ const DiagnosticsEngine *getDiags() const { return DiagObj; }
+ unsigned getID() const { return DiagObj->CurDiagID; }
+ const SourceLocation &getLocation() const { return DiagObj->CurDiagLoc; }
+ bool hasSourceManager() const { return DiagObj->hasSourceManager(); }
+ SourceManager &getSourceManager() const { return DiagObj->getSourceManager();}
+
+ unsigned getNumArgs() const { return DiagObj->NumDiagArgs; }
+
+ /// getArgKind - Return the kind of the specified index. Based on the kind
+ /// of argument, the accessors below can be used to get the value.
+ DiagnosticsEngine::ArgumentKind getArgKind(unsigned Idx) const {
+ assert(Idx < getNumArgs() && "Argument index out of range!");
+ return (DiagnosticsEngine::ArgumentKind)DiagObj->DiagArgumentsKind[Idx];
+ }
+
+ /// getArgStdStr - Return the provided argument string specified by Idx.
+ const std::string &getArgStdStr(unsigned Idx) const {
+ assert(getArgKind(Idx) == DiagnosticsEngine::ak_std_string &&
+ "invalid argument accessor!");
+ return DiagObj->DiagArgumentsStr[Idx];
+ }
+
+ /// getArgCStr - Return the specified C string argument.
+ const char *getArgCStr(unsigned Idx) const {
+ assert(getArgKind(Idx) == DiagnosticsEngine::ak_c_string &&
+ "invalid argument accessor!");
+ return reinterpret_cast<const char*>(DiagObj->DiagArgumentsVal[Idx]);
+ }
+
+ /// getArgSInt - Return the specified signed integer argument.
+ int getArgSInt(unsigned Idx) const {
+ assert(getArgKind(Idx) == DiagnosticsEngine::ak_sint &&
+ "invalid argument accessor!");
+ return (int)DiagObj->DiagArgumentsVal[Idx];
+ }
+
+ /// getArgUInt - Return the specified unsigned integer argument.
+ unsigned getArgUInt(unsigned Idx) const {
+ assert(getArgKind(Idx) == DiagnosticsEngine::ak_uint &&
+ "invalid argument accessor!");
+ return (unsigned)DiagObj->DiagArgumentsVal[Idx];
+ }
+
+ /// getArgIdentifier - Return the specified IdentifierInfo argument.
+ const IdentifierInfo *getArgIdentifier(unsigned Idx) const {
+ assert(getArgKind(Idx) == DiagnosticsEngine::ak_identifierinfo &&
+ "invalid argument accessor!");
+ return reinterpret_cast<IdentifierInfo*>(DiagObj->DiagArgumentsVal[Idx]);
+ }
+
+ /// getRawArg - Return the specified non-string argument in an opaque form.
+ intptr_t getRawArg(unsigned Idx) const {
+ assert(getArgKind(Idx) != DiagnosticsEngine::ak_std_string &&
+ "invalid argument accessor!");
+ return DiagObj->DiagArgumentsVal[Idx];
+ }
+
+
+ /// getNumRanges - Return the number of source ranges associated with this
+ /// diagnostic.
+ unsigned getNumRanges() const {
+ return DiagObj->NumDiagRanges;
+ }
+
+ const CharSourceRange &getRange(unsigned Idx) const {
+ assert(Idx < DiagObj->NumDiagRanges && "Invalid diagnostic range index!");
+ return DiagObj->DiagRanges[Idx];
+ }
+
+ /// \brief Return an array reference for this diagnostic's ranges.
+ ArrayRef<CharSourceRange> getRanges() const {
+ return llvm::makeArrayRef(DiagObj->DiagRanges, DiagObj->NumDiagRanges);
+ }
+
+ unsigned getNumFixItHints() const {
+ return DiagObj->NumDiagFixItHints;
+ }
+
+ const FixItHint &getFixItHint(unsigned Idx) const {
+ assert(Idx < getNumFixItHints() && "Invalid index!");
+ return DiagObj->DiagFixItHints[Idx];
+ }
+
+ const FixItHint *getFixItHints() const {
+ return getNumFixItHints()? DiagObj->DiagFixItHints : 0;
+ }
+
+ /// FormatDiagnostic - Format this diagnostic into a string, substituting the
+ /// formal arguments into the %0 slots. The result is appended onto the Str
+ /// array.
+ void FormatDiagnostic(SmallVectorImpl<char> &OutStr) const;
+
+ /// FormatDiagnostic - Format the given format-string into the
+ /// output buffer using the arguments stored in this diagnostic.
+ void FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
+ SmallVectorImpl<char> &OutStr) const;
+};
+
+/**
+ * \brief Represents a diagnostic in a form that can be retained until its
+ * corresponding source manager is destroyed.
+ */
+class StoredDiagnostic {
+ unsigned ID;
+ DiagnosticsEngine::Level Level;
+ FullSourceLoc Loc;
+ std::string Message;
+ std::vector<CharSourceRange> Ranges;
+ std::vector<FixItHint> FixIts;
+
+public:
+ StoredDiagnostic();
+ StoredDiagnostic(DiagnosticsEngine::Level Level, const Diagnostic &Info);
+ StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
+ StringRef Message);
+ StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
+ StringRef Message, FullSourceLoc Loc,
+ ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> Fixits);
+ ~StoredDiagnostic();
+
+ /// \brief Evaluates true when this object stores a diagnostic.
+ operator bool() const { return Message.size() > 0; }
+
+ unsigned getID() const { return ID; }
+ DiagnosticsEngine::Level getLevel() const { return Level; }
+ const FullSourceLoc &getLocation() const { return Loc; }
+ StringRef getMessage() const { return Message; }
+
+ void setLocation(FullSourceLoc Loc) { this->Loc = Loc; }
+
+ typedef std::vector<CharSourceRange>::const_iterator range_iterator;
+ range_iterator range_begin() const { return Ranges.begin(); }
+ range_iterator range_end() const { return Ranges.end(); }
+ unsigned range_size() const { return Ranges.size(); }
+
+ ArrayRef<CharSourceRange> getRanges() const {
+ return llvm::makeArrayRef(Ranges);
+ }
+
+
+ typedef std::vector<FixItHint>::const_iterator fixit_iterator;
+ fixit_iterator fixit_begin() const { return FixIts.begin(); }
+ fixit_iterator fixit_end() const { return FixIts.end(); }
+ unsigned fixit_size() const { return FixIts.size(); }
+
+ ArrayRef<FixItHint> getFixIts() const {
+ return llvm::makeArrayRef(FixIts);
+ }
+};
+
+/// DiagnosticConsumer - This is an abstract interface implemented by clients of
+/// the front-end, which formats and prints fully processed diagnostics.
+class DiagnosticConsumer {
+protected:
+ unsigned NumWarnings; // Number of warnings reported
+ unsigned NumErrors; // Number of errors reported
+
+public:
+ DiagnosticConsumer() : NumWarnings(0), NumErrors(0) { }
+
+ unsigned getNumErrors() const { return NumErrors; }
+ unsigned getNumWarnings() const { return NumWarnings; }
+ virtual void clear() { NumWarnings = NumErrors = 0; }
+
+ virtual ~DiagnosticConsumer();
+
+ /// BeginSourceFile - Callback to inform the diagnostic client that processing
+ /// of a source file is beginning.
+ ///
+ /// Note that diagnostics may be emitted outside the processing of a source
+ /// file, for example during the parsing of command line options. However,
+ /// diagnostics with source range information are required to only be emitted
+ /// in between BeginSourceFile() and EndSourceFile().
+ ///
+ /// \arg LO - The language options for the source file being processed.
+ /// \arg PP - The preprocessor object being used for the source; this optional
+ /// and may not be present, for example when processing AST source files.
+ virtual void BeginSourceFile(const LangOptions &LangOpts,
+ const Preprocessor *PP = 0) {}
+
+ /// EndSourceFile - Callback to inform the diagnostic client that processing
+ /// of a source file has ended. The diagnostic client should assume that any
+ /// objects made available via \see BeginSourceFile() are inaccessible.
+ virtual void EndSourceFile() {}
+
+ /// \brief Callback to inform the diagnostic client that processing of all
+ /// source files has ended.
+ virtual void finish() {}
+
+ /// IncludeInDiagnosticCounts - This method (whose default implementation
+ /// returns true) indicates whether the diagnostics handled by this
+ /// DiagnosticConsumer should be included in the number of diagnostics
+ /// reported by DiagnosticsEngine.
+ virtual bool IncludeInDiagnosticCounts() const;
+
+ /// HandleDiagnostic - Handle this diagnostic, reporting it to the user or
+ /// capturing it to a log as needed.
+ ///
+ /// Default implementation just keeps track of the total number of warnings
+ /// and errors.
+ virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info);
+
+ /// \brief Clone the diagnostic consumer, producing an equivalent consumer
+ /// that can be used in a different context.
+ virtual DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const = 0;
+};
+
+/// IgnoringDiagConsumer - This is a diagnostic client that just ignores all
+/// diags.
+class IgnoringDiagConsumer : public DiagnosticConsumer {
+ virtual void anchor();
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ // Just ignore it.
+ }
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const {
+ return new IgnoringDiagConsumer();
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
new file mode 100644
index 0000000..109cd08
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Diagnostic.td
@@ -0,0 +1,98 @@
+//===--- Diagnostic.td - C Language Family Diagnostic Handling ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen core definitions for the diagnostics
+// and diagnostic control.
+//
+//===----------------------------------------------------------------------===//
+
+// Define the diagnostic mappings.
+class DiagMapping;
+def MAP_IGNORE : DiagMapping;
+def MAP_WARNING : DiagMapping;
+def MAP_ERROR : DiagMapping;
+def MAP_FATAL : DiagMapping;
+
+// Define the diagnostic classes.
+class DiagClass;
+def CLASS_NOTE : DiagClass;
+def CLASS_WARNING : DiagClass;
+def CLASS_EXTENSION : DiagClass;
+def CLASS_ERROR : DiagClass;
+
+// Diagnostic Categories. These can be applied to groups or individual
+// diagnostics to specify a category.
+class DiagCategory<string Name> {
+ string CategoryName = Name;
+}
+
+// Diagnostic Groups.
+class DiagGroup<string Name, list<DiagGroup> subgroups = []> {
+ string GroupName = Name;
+ list<DiagGroup> SubGroups = subgroups;
+ string CategoryName = "";
+}
+class InGroup<DiagGroup G> { DiagGroup Group = G; }
+//class IsGroup<string Name> { DiagGroup Group = DiagGroup<Name>; }
+
+
+// This defines all of the named diagnostic categories.
+include "DiagnosticCategories.td"
+
+// This defines all of the named diagnostic groups.
+include "DiagnosticGroups.td"
+
+
+// All diagnostics emitted by the compiler are an indirect subclass of this.
+class Diagnostic<string text, DiagClass DC, DiagMapping defaultmapping> {
+ /// Component is specified by the file with a big let directive.
+ string Component = ?;
+ string Text = text;
+ DiagClass Class = DC;
+ bit SFINAE = 1;
+ bit AccessControl = 0;
+ bit WarningNoWerror = 0;
+ bit WarningShowInSystemHeader = 0;
+ DiagMapping DefaultMapping = defaultmapping;
+ DiagGroup Group;
+ string CategoryName = "";
+}
+
+class Error<string str> : Diagnostic<str, CLASS_ERROR, MAP_ERROR>;
+class Warning<string str> : Diagnostic<str, CLASS_WARNING, MAP_WARNING>;
+class Extension<string str> : Diagnostic<str, CLASS_EXTENSION, MAP_IGNORE>;
+class ExtWarn<string str> : Diagnostic<str, CLASS_EXTENSION, MAP_WARNING>;
+class Note<string str> : Diagnostic<str, CLASS_NOTE, MAP_FATAL/*ignored*/>;
+
+
+class DefaultIgnore { DiagMapping DefaultMapping = MAP_IGNORE; }
+class DefaultWarn { DiagMapping DefaultMapping = MAP_WARNING; }
+class DefaultError { DiagMapping DefaultMapping = MAP_ERROR; }
+class DefaultFatal { DiagMapping DefaultMapping = MAP_FATAL; }
+class DefaultWarnNoWerror {
+ bit WarningNoWerror = 1;
+}
+class DefaultWarnShowInSystemHeader {
+ bit WarningShowInSystemHeader = 1;
+}
+
+class NoSFINAE { bit SFINAE = 0; }
+class AccessControl { bit AccessControl = 1; }
+
+// Definitions for Diagnostics.
+include "DiagnosticASTKinds.td"
+include "DiagnosticAnalysisKinds.td"
+include "DiagnosticCommonKinds.td"
+include "DiagnosticDriverKinds.td"
+include "DiagnosticFrontendKinds.td"
+include "DiagnosticLexKinds.td"
+include "DiagnosticParseKinds.td"
+include "DiagnosticSemaKinds.td"
+include "DiagnosticSerializationKinds.td"
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
new file mode 100644
index 0000000..9cfe5ef
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticASTKinds.td
@@ -0,0 +1,211 @@
+//==--- DiagnosticASTKinds.td - libast diagnostics ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+let Component = "AST" in {
+
+// Constant expression diagnostics. These (and their users) belong in Sema.
+def note_expr_divide_by_zero : Note<"division by zero">;
+def note_constexpr_invalid_cast : Note<
+ "%select{reinterpret_cast|dynamic_cast|cast which performs the conversions of"
+ " a reinterpret_cast|cast from %1}0 is not allowed in a constant expression">;
+def note_constexpr_invalid_downcast : Note<
+ "cannot cast object of dynamic type %0 to type %1">;
+def note_constexpr_overflow : Note<
+ "value %0 is outside the range of representable values of type %1">;
+def note_constexpr_negative_shift : Note<"negative shift count %0">;
+def note_constexpr_large_shift : Note<
+ "shift count %0 >= width of type %1 (%2 bit%s2)">;
+def note_constexpr_lshift_of_negative : Note<"left shift of negative value %0">;
+def note_constexpr_lshift_discards : Note<"signed left shift discards bits">;
+def note_constexpr_invalid_function : Note<
+ "%select{non-constexpr|undefined}0 %select{function|constructor}1 %2 cannot "
+ "be used in a constant expression">;
+def note_constexpr_virtual_call : Note<
+ "cannot evaluate virtual function call in a constant expression">;
+def note_constexpr_virtual_base : Note<
+ "cannot construct object of type %0 with virtual base class "
+ "in a constant expression">;
+def note_constexpr_nonliteral : Note<
+ "non-literal type %0 cannot be used in a constant expression">;
+def note_constexpr_non_global : Note<
+ "%select{pointer|reference}0 to %select{|subobject of }1"
+ "%select{temporary|%3}2 is not a constant expression">;
+def note_constexpr_array_index : Note<"cannot refer to element %0 of "
+ "%select{array of %2 elements|non-array object}1 in a constant expression">;
+def note_constexpr_float_arithmetic : Note<
+ "floating point arithmetic produces %select{an infinity|a NaN}0">;
+def note_constexpr_pointer_subtraction_not_same_array : Note<
+ "subtracted pointers are not elements of the same array">;
+def note_constexpr_pointer_comparison_base_classes : Note<
+ "comparison of addresses of subobjects of different base classes "
+ "has unspecified value">;
+def note_constexpr_pointer_comparison_base_field : Note<
+ "comparison of address of base class subobject %0 of class %1 to field %2 "
+ "has unspecified value">;
+def note_constexpr_pointer_comparison_differing_access : Note<
+ "comparison of address of fields %0 and %2 of %4 with differing access "
+ "specifiers (%1 vs %3) has unspecified value">;
+def note_constexpr_compare_virtual_mem_ptr : Note<
+ "comparison of pointer to virtual member function %0 has unspecified value">;
+def note_constexpr_past_end : Note<
+ "dereferenced pointer past the end of %select{|subobject of }0"
+ "%select{temporary|%2}1 is not a constant expression">;
+def note_constexpr_past_end_subobject : Note<
+ "cannot %select{access base class of|access derived class of|access field of|"
+ "access array element of|ERROR|call member function on|"
+ "access real component of|access imaginary component of}0 "
+ "pointer past the end of object">;
+def note_constexpr_null_subobject : Note<
+ "cannot %select{access base class of|access derived class of|access field of|"
+ "access array element of|perform pointer arithmetic on|"
+ "call member function on|access real component of|"
+ "access imaginary component of}0 null pointer">;
+def note_constexpr_var_init_non_constant : Note<
+ "initializer of %0 is not a constant expression">;
+def note_constexpr_typeid_polymorphic : Note<
+ "typeid applied to expression of polymorphic type %0 is "
+ "not allowed in a constant expression">;
+def note_constexpr_void_comparison : Note<
+ "comparison between unequal pointers to void has unspecified result">;
+def note_constexpr_temporary_here : Note<"temporary created here">;
+def note_constexpr_conditional_never_const : Note<
+ "both arms of conditional operator are unable to produce a "
+ "constant expression">;
+def note_constexpr_depth_limit_exceeded : Note<
+ "constexpr evaluation exceeded maximum depth of %0 calls">;
+def note_constexpr_call_limit_exceeded : Note<
+ "constexpr evaluation hit maximum call limit">;
+def note_constexpr_lifetime_ended : Note<
+ "read of %select{temporary|variable}0 whose lifetime has ended">;
+def note_constexpr_ltor_volatile_type : Note<
+ "read of volatile-qualified type %0 is not allowed in a constant expression">;
+def note_constexpr_ltor_volatile_obj : Note<
+ "read of volatile %select{temporary|object %1|member %1}0 is not allowed in "
+ "a constant expression">;
+def note_constexpr_ltor_mutable : Note<
+ "read of mutable member %0 is not allowed in a constant expression">;
+def note_constexpr_ltor_non_const_int : Note<
+ "read of non-const variable %0 is not allowed in a constant expression">;
+def note_constexpr_ltor_non_constexpr : Note<
+ "read of non-constexpr variable %0 is not allowed in a constant expression">;
+def note_constexpr_read_past_end : Note<
+ "read of dereferenced one-past-the-end pointer is not allowed in a "
+ "constant expression">;
+def note_constexpr_read_inactive_union_member : Note<
+ "read of member %0 of union with %select{active member %2|no active member}1 "
+ "is not allowed in a constant expression">;
+def note_constexpr_read_uninit : Note<
+ "read of uninitialized object is not allowed in a constant expression">;
+def note_constexpr_calls_suppressed : Note<
+ "(skipping %0 call%s0 in backtrace; use -fconstexpr-backtrace-limit=0 to "
+ "see all)">;
+def note_constexpr_call_here : Note<"in call to '%0'">;
+
+// inline asm related.
+let CategoryName = "Inline Assembly Issue" in {
+ def err_asm_invalid_escape : Error<
+ "invalid %% escape in inline assembly string">;
+ def err_asm_unknown_symbolic_operand_name : Error<
+ "unknown symbolic operand name in inline assembly string">;
+
+ def err_asm_unterminated_symbolic_operand_name : Error<
+ "unterminated symbolic operand name in inline assembly string">;
+ def err_asm_empty_symbolic_operand_name : Error<
+ "empty symbolic operand name in inline assembly string">;
+ def err_asm_invalid_operand_number : Error<
+ "invalid operand number in inline asm string">;
+}
+
+
+// Importing ASTs
+def err_odr_variable_type_inconsistent : Error<
+ "external variable %0 declared with incompatible types in different "
+ "translation units (%1 vs. %2)">;
+def err_odr_variable_multiple_def : Error<
+ "external variable %0 defined in multiple translation units">;
+def note_odr_value_here : Note<"declared here with type %0">;
+def note_odr_defined_here : Note<"also defined here">;
+def err_odr_function_type_inconsistent : Error<
+ "external function %0 declared with incompatible types in different "
+ "translation units (%1 vs. %2)">;
+def warn_odr_tag_type_inconsistent : Warning<
+ "type %0 has incompatible definitions in different translation units">;
+def note_odr_tag_kind_here: Note<
+ "%0 is a %select{struct|union|class|enum}1 here">;
+def note_odr_field : Note<"field %0 has type %1 here">;
+def note_odr_missing_field : Note<"no corresponding field here">;
+def note_odr_bit_field : Note<"bit-field %0 with type %1 and length %2 here">;
+def note_odr_not_bit_field : Note<"field %0 is not a bit-field">;
+def note_odr_base : Note<"class has base type %0">;
+def note_odr_virtual_base : Note<
+ "%select{non-virtual|virtual}0 derivation here">;
+def note_odr_missing_base : Note<"no corresponding base class here">;
+def note_odr_number_of_bases : Note<
+ "class has %0 base %plural{1:class|:classes}0">;
+def note_odr_enumerator : Note<"enumerator %0 with value %1 here">;
+def note_odr_missing_enumerator : Note<"no corresponding enumerator here">;
+
+def err_odr_field_type_inconsistent : Error<
+ "field %0 declared with incompatible types in different "
+ "translation units (%1 vs. %2)">;
+
+// Importing Objective-C ASTs
+def err_odr_ivar_type_inconsistent : Error<
+ "instance variable %0 declared with incompatible types in different "
+ "translation units (%1 vs. %2)">;
+def err_odr_objc_superclass_inconsistent : Error<
+ "class %0 has incompatible superclasses">;
+def note_odr_objc_superclass : Note<"inherits from superclass %0 here">;
+def note_odr_objc_missing_superclass : Note<"no corresponding superclass here">;
+def err_odr_objc_method_result_type_inconsistent : Error<
+ "%select{class|instance}0 method %1 has incompatible result types in "
+ "different translation units (%2 vs. %3)">;
+def err_odr_objc_method_num_params_inconsistent : Error<
+ "%select{class|instance}0 method %1 has a different number of parameters in "
+ "different translation units (%2 vs. %3)">;
+def err_odr_objc_method_param_type_inconsistent : Error<
+ "%select{class|instance}0 method %1 has a parameter with a different types "
+ "in different translation units (%2 vs. %3)">;
+def err_odr_objc_method_variadic_inconsistent : Error<
+ "%select{class|instance}0 method %1 is variadic in one translation unit "
+ "and not variadic in another">;
+def note_odr_objc_method_here : Note<
+ "%select{class|instance}0 method %1 also declared here">;
+def err_odr_objc_property_type_inconsistent : Error<
+ "property %0 declared with incompatible types in different "
+ "translation units (%1 vs. %2)">;
+def err_odr_objc_property_impl_kind_inconsistent : Error<
+ "property %0 is implemented with %select{@synthesize|@dynamic}1 in one "
+ "translation but %select{@dynamic|@synthesize}1 in another translation unit">;
+def note_odr_objc_property_impl_kind : Note<
+ "property %0 is implemented with %select{@synthesize|@dynamic}1 here">;
+def err_odr_objc_synthesize_ivar_inconsistent : Error<
+ "property %0 is synthesized to different ivars in different translation "
+ "units (%1 vs. %2)">;
+def note_odr_objc_synthesize_ivar_here : Note<
+ "property is synthesized to ivar %0 here">;
+
+// Importing C++ ASTs
+def err_odr_different_num_template_parameters : Error<
+ "template parameter lists have a different number of parameters (%0 vs %1)">;
+def note_odr_template_parameter_list : Note<
+ "template parameter list also declared here">;
+def err_odr_different_template_parameter_kind : Error<
+ "template parameter has different kinds in different translation units">;
+def note_odr_template_parameter_here : Note<
+ "template parameter declared here">;
+def err_odr_parameter_pack_non_pack : Error<
+ "parameter kind mismatch; parameter is %select{not a|a}0 parameter pack">;
+def note_odr_parameter_pack_non_pack : Note<
+ "%select{parameter|parameter pack}0 declared here">;
+def err_odr_non_type_parameter_type_inconsistent : Error<
+ "non-type template parameter declared with incompatible types in different "
+ "translation units (%0 vs. %1)">;
+def err_unsupported_ast_node: Error<"cannot import unsupported AST node %0">;
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td
new file mode 100644
index 0000000..5461212
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticAnalysisKinds.td
@@ -0,0 +1,12 @@
+//==--- DiagnosticAnalysisKinds.td - libanalysis diagnostics --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+let Component = "Analysis" in {
+
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCategories.h b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCategories.h
new file mode 100644
index 0000000..4dd067b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCategories.h
@@ -0,0 +1,26 @@
+//===- DiagnosticCategories.h - Diagnostic Categories Enumerators-*- C++ -*===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_DIAGNOSTICCATEGORIES_H
+#define LLVM_CLANG_BASIC_DIAGNOSTICCATEGORIES_H
+
+namespace clang {
+ namespace diag {
+ enum {
+#define GET_CATEGORY_TABLE
+#define CATEGORY(X, ENUM) ENUM,
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef CATEGORY
+#undef GET_CATEGORY_TABLE
+ DiagCat_NUM_CATEGORIES
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCategories.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCategories.td
new file mode 100644
index 0000000..a02fbdf
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCategories.td
@@ -0,0 +1,10 @@
+//==--- DiagnosticCategories.td - Diagnostic Category Definitions ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+class CatInlineAsm : DiagCategory<"Inline Assembly Issue">;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
new file mode 100644
index 0000000..103fc00
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticCommonKinds.td
@@ -0,0 +1,109 @@
+//==--- DiagnosticCommonKinds.td - common diagnostics ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Common Helpers
+//===----------------------------------------------------------------------===//
+
+let Component = "Common" in {
+
+// Basic.
+
+def fatal_too_many_errors
+ : Error<"too many errors emitted, stopping now">, DefaultFatal;
+
+def note_declared_at : Note<"declared here">;
+def note_previous_definition : Note<"previous definition is here">;
+def note_previous_declaration : Note<"previous declaration is here">;
+def note_previous_implicit_declaration : Note<
+ "previous implicit declaration is here">;
+def note_previous_use : Note<"previous use is here">;
+def note_duplicate_case_prev : Note<"previous case defined here">;
+def note_forward_declaration : Note<"forward declaration of %0">;
+def note_type_being_defined : Note<
+ "definition of %0 is not complete until the closing '}'">;
+/// note_matching - this is used as a continuation of a previous diagnostic,
+/// e.g. to specify the '(' when we expected a ')'.
+def note_matching : Note<"to match this '%0'">;
+
+def note_using : Note<"using">;
+def note_possibility : Note<"one possibility">;
+def note_also_found : Note<"also found">;
+
+// Parse && Lex
+def err_expected_colon : Error<"expected ':'">;
+def err_expected_colon_after_setter_name : Error<
+ "method name referenced in property setter attribute "
+ "must end with ':'">;
+def err_invalid_string_udl : Error<
+ "string literal with user-defined suffix cannot be used here">;
+def err_invalid_character_udl : Error<
+ "character literal with user-defined suffix cannot be used here">;
+def err_invalid_numeric_udl : Error<
+ "numeric literal with user-defined suffix cannot be used here">;
+
+// Parse && Sema
+def ext_no_declarators : ExtWarn<"declaration does not declare anything">,
+ InGroup<MissingDeclarations>;
+def err_param_redefinition : Error<"redefinition of parameter %0">;
+def warn_method_param_redefinition : Warning<"redefinition of method parameter %0">;
+def warn_method_param_declaration : Warning<"redeclaration of method parameter %0">,
+ InGroup<DuplicateArgDecl>, DefaultIgnore;
+def err_invalid_storage_class_in_func_decl : Error<
+ "invalid storage class specifier in function declarator">;
+def err_expected_namespace_name : Error<"expected namespace name">;
+def ext_variadic_templates : ExtWarn<
+ "variadic templates are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_variadic_templates :
+ Warning<"variadic templates are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_default_special_members : Error<
+ "only special member functions may be defaulted">;
+def err_deleted_non_function : Error<
+ "only functions can have deleted definitions">;
+def err_module_not_found : Error<"module '%0' not found">, DefaultFatal;
+def err_module_not_built : Error<"could not build module '%0'">, DefaultFatal;
+def err_module_cycle : Error<"cyclic dependency in module '%0': %1">,
+ DefaultFatal;
+def warn_module_build : Warning<"building module '%0' from source">,
+ InGroup<ModuleBuild>, DefaultIgnore;
+def note_pragma_entered_here : Note<"#pragma entered here">;
+
+// Sema && Lex
+def ext_longlong : Extension<
+ "'long long' is an extension when C99 mode is not enabled">,
+ InGroup<LongLong>;
+def warn_cxx98_compat_longlong : Warning<
+ "'long long' is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def warn_integer_too_large : Warning<
+ "integer constant is too large for its type">;
+def warn_integer_too_large_for_signed : Warning<
+ "integer constant is so large that it is unsigned">;
+
+// Sema && AST
+def note_invalid_subexpr_in_const_expr : Note<
+ "subexpression not valid in a constant expression">;
+
+// Targets
+
+def err_target_unknown_triple : Error<
+ "unknown target triple '%0', please use -triple or -arch">;
+def err_target_unknown_cpu : Error<"unknown target CPU '%0'">;
+def err_target_unknown_abi : Error<"unknown target ABI '%0'">;
+def err_target_unknown_cxxabi : Error<"unknown C++ ABI '%0'">;
+def err_target_invalid_feature : Error<"invalid target feature '%0'">;
+
+// Source manager
+def err_cannot_open_file : Error<"cannot open file '%0': %1">, DefaultFatal;
+def err_file_modified : Error<
+ "file '%0' modified since it was first processed">, DefaultFatal;
+def err_unsupported_bom : Error<"%0 byte order mark detected in '%1', but "
+ "encoding is not supported">, DefaultFatal;
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
new file mode 100644
index 0000000..b443159
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -0,0 +1,138 @@
+//==--- DiagnosticDriverKinds.td - libdriver diagnostics ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+let Component = "Driver" in {
+
+def err_drv_no_such_file : Error<"no such file or directory: '%0'">;
+def err_drv_unsupported_opt : Error<"unsupported option '%0'">;
+def err_drv_unsupported_option_argument : Error<
+ "unsupported argument '%1' to option '%0'">;
+def err_drv_unknown_stdin_type : Error<
+ "-E or -x required when input is from standard input">;
+def err_drv_unknown_language : Error<"language not recognized: '%0'">;
+def err_drv_invalid_arch_name : Error<
+ "invalid arch name '%0'">;
+def err_drv_invalid_rtlib_name : Error<
+ "invalid runtime library name in argument '%0'">;
+def err_drv_unsupported_rtlib_for_platform : Error<
+ "unsupported runtime library '%0' for platform '%1'">;
+def err_drv_invalid_stdlib_name : Error<
+ "invalid library name in argument '%0'">;
+def err_drv_invalid_opt_with_multiple_archs : Error<
+ "option '%0' cannot be used with multiple -arch options">;
+def err_drv_invalid_output_with_multiple_archs : Error<
+ "cannot use '%0' output with multiple -arch options">;
+def err_drv_no_input_files : Error<"no input files">;
+def err_drv_use_of_Z_option : Error<
+ "unsupported use of internal gcc -Z option '%0'">;
+def err_drv_output_argument_with_multiple_files : Error<
+ "cannot specify -o when generating multiple output files">;
+def err_drv_unable_to_make_temp : Error<
+ "unable to make temporary file: %0">;
+def err_drv_unable_to_remove_file : Error<
+ "unable to remove file: %0">;
+def err_drv_command_failure : Error<
+ "unable to execute command: %0">;
+def err_drv_invalid_darwin_version : Error<
+ "invalid Darwin version number: %0">;
+def err_drv_missing_argument : Error<
+ "argument to '%0' is missing (expected %1 value%s1)">;
+def err_drv_invalid_Xarch_argument_with_args : Error<
+ "invalid Xarch argument: '%0', options requiring arguments are unsupported">;
+def err_drv_invalid_Xarch_argument_isdriver : Error<
+ "invalid Xarch argument: '%0', cannot change driver behavior inside Xarch argument">;
+def err_drv_argument_only_allowed_with : Error<
+ "invalid argument '%0' only allowed with '%1'">;
+def err_drv_argument_not_allowed_with : Error<
+ "invalid argument '%0' not allowed with '%1'">;
+def err_drv_invalid_version_number : Error<
+ "invalid version number in '%0'">;
+def err_drv_no_linker_llvm_support : Error<
+ "'%0': unable to pass LLVM bit-code files to linker">;
+def err_drv_no_ast_support : Error<
+ "'%0': unable to use AST files with this tool">;
+def err_drv_clang_unsupported : Error<
+ "the clang compiler does not support '%0'">;
+def err_drv_clang_unsupported_per_platform : Error<
+ "the clang compiler does not support '%0' on this platform">;
+def err_drv_clang_unsupported_opt_cxx_darwin_i386 : Error<
+ "the clang compiler does not support '%0' for C++ on Darwin/i386">;
+def err_drv_command_failed : Error<
+ "%0 command failed with exit code %1 (use -v to see invocation)">;
+def err_drv_command_signalled : Error<
+ "%0 command failed due to signal (use -v to see invocation)">;
+def err_drv_invalid_mfloat_abi : Error<
+ "invalid float ABI '%0'">;
+def err_drv_invalid_libcxx_deployment : Error<
+ "invalid deployment target for -stdlib=libc++ (requires %0 or later)">;
+def err_drv_invalid_feature : Error<
+ "invalid feature '%0' for CPU '%1'">;
+
+def err_drv_I_dash_not_supported : Error<
+ "'%0' not supported, please use -iquote instead">;
+def err_drv_unknown_argument : Error<"unknown argument: '%0'">;
+def err_drv_invalid_value : Error<"invalid value '%1' in '%0'">;
+def err_drv_invalid_int_value : Error<"invalid integral value '%1' in '%0'">;
+def err_drv_invalid_remap_file : Error<
+ "invalid option '%0' not of the form <from-file>;<to-file>">;
+def err_drv_invalid_gcc_output_type : Error<
+ "invalid output type '%0' for use with gcc tool">;
+def err_drv_cc_print_options_failure : Error<
+ "unable to open CC_PRINT_OPTIONS file: %0">;
+def err_drv_preamble_format : Error<
+ "incorrect format for -preamble-bytes=N,END">;
+def err_drv_conflicting_deployment_targets : Error<
+ "conflicting deployment targets, both '%0' and '%1' are present in environment">;
+def err_drv_invalid_arch_for_deployment_target : Error<
+ "invalid architecture '%0' for deployment target '%1'">;
+def err_drv_objc_gc_arr : Error<
+ "cannot specify both '-fobjc-arc' and '%0'">;
+def err_arc_nonfragile_abi : Error<
+ "-fobjc-arc is not supported with fragile abi">;
+def err_arc_unsupported : Error<
+ "-fobjc-arc is not supported on current deployment target">;
+def err_drv_mg_requires_m_or_mm : Error<
+ "option '-MG' requires '-M' or '-MM'">;
+
+def warn_c_kext : Warning<
+ "ignoring -fapple-kext which is valid for c++ and objective-c++ only">;
+def warn_drv_input_file_unused : Warning<
+ "%0: '%1' input unused when '%2' is present">;
+def warn_drv_preprocessed_input_file_unused : Warning<
+ "%0: previously preprocessed input unused when '%1' is present">;
+def warn_drv_unused_argument : Warning<
+ "argument unused during compilation: '%0'">,
+ InGroup<DiagGroup<"unused-command-line-argument">>;
+def warn_drv_empty_joined_argument : Warning<
+ "joined argument expects addition arg: '%0'">,
+ InGroup<DiagGroup<"unused-command-line-argument">>;
+def warn_drv_not_using_clang_cpp : Warning<
+ "not using the clang preprocessor due to user override">;
+def warn_drv_not_using_clang_cxx : Warning<
+ "not using the clang compiler for C++ inputs">;
+def warn_drv_not_using_clang_arch : Warning<
+ "not using the clang compiler for the '%0' architecture">;
+def warn_drv_clang_unsupported : Warning<
+ "the clang compiler does not support '%0'">;
+def warn_drv_assuming_mfloat_abi_is : Warning<
+ "unknown platform, assuming -mfloat-abi=%0">;
+def warn_ignoring_ftabstop_value : Warning<
+ "ignoring invalid -ftabstop value '%0', using default value %1">;
+def warn_drv_treating_input_as_cxx : Warning<
+ "treating '%0' input as '%1' when in C++ mode, this behavior is deprecated">,
+ InGroup<Deprecated>;
+def warn_drv_objc_gc_unsupported : Warning<
+ "Objective-C garbage collection is not supported on this platform, ignoring '%0'">;
+def warn_drv_pch_not_first_include : Warning<
+ "precompiled header '%0' was ignored because '%1' is not first '-include'">;
+
+def note_drv_command_failed_diag_msg : Note<
+ "diagnostic msg: %0">;
+
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
new file mode 100644
index 0000000..5d6b887
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticFrontendKinds.td
@@ -0,0 +1,134 @@
+//==--- DiagnosticFrontendKinds.td - frontend diagnostics -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+let Component = "Frontend" in {
+
+def err_fe_error_opening : Error<"error opening '%0': %1">;
+def err_fe_error_reading : Error<"error reading '%0'">;
+def err_fe_error_reading_stdin : Error<"error reading stdin">;
+def err_fe_error_backend : Error<"error in backend: %0">, DefaultFatal;
+
+// Error generated by the backend.
+def err_fe_inline_asm : Error<"%0">, CatInlineAsm;
+def note_fe_inline_asm_here : Note<"instantiated into assembly here">;
+def err_fe_cannot_link_module : Error<"cannot link module '%0': %1">,
+ DefaultFatal;
+
+
+
+def err_fe_invalid_code_complete_file : Error<
+ "cannot locate code-completion file %0">, DefaultFatal;
+def err_fe_stdout_binary : Error<"unable to change standard output to binary">,
+ DefaultFatal;
+def err_fe_dependency_file_requires_MT : Error<
+ "-dependency-file requires at least one -MT or -MQ option">;
+def err_fe_invalid_plugin_name : Error<
+ "unable to find plugin '%0'">;
+def err_fe_expected_compiler_job : Error<
+ "unable to handle compilation, expected exactly one compiler job in '%0'">;
+def err_fe_expected_clang_command : Error<
+ "expected a clang compiler command">;
+def err_fe_remap_missing_to_file : Error<
+ "could not remap file '%0' to the contents of file '%1'">, DefaultFatal;
+def err_fe_remap_missing_from_file : Error<
+ "could not remap from missing file '%0'">, DefaultFatal;
+def err_fe_unable_to_load_pch : Error<
+ "unable to load PCH file">;
+def err_fe_unable_to_load_plugin : Error<
+ "unable to load plugin '%0': '%1'">;
+def err_fe_unable_to_create_target : Error<
+ "unable to create target: '%0'">;
+def err_fe_unable_to_interface_with_target : Error<
+ "unable to interface with target machine">;
+def err_fe_unable_to_open_output : Error<
+ "unable to open output file '%0': '%1'">;
+def err_fe_unable_to_rename_temp : Error<
+ "unable to rename temporary '%0' to output file '%1': '%2'">;
+def err_fe_unable_to_open_logfile : Error<
+ "unable to open logfile file '%0': '%1'">;
+def err_fe_pth_file_has_no_source_header : Error<
+ "PTH file '%0' does not designate an original source header file for -include-pth">;
+def warn_fe_macro_contains_embedded_newline : Warning<
+ "macro '%0' contains embedded newline, text after the newline is ignored.">;
+def warn_fe_cc_print_header_failure : Warning<
+ "unable to open CC_PRINT_HEADERS file: %0 (using stderr)">;
+def warn_fe_cc_log_diagnostics_failure : Warning<
+ "unable to open CC_LOG_DIAGNOSTICS file: %0 (using stderr)">;
+
+def warn_fe_serialized_diag_failure : Warning<
+ "unable to open file %0 for serializing diagnostics (%1)">,
+ InGroup<DiagGroup<"serialized-diagnostics">>;
+
+def err_verify_missing_start : Error<
+ "cannot find start ('{{') of expected %0">;
+def err_verify_missing_end : Error<
+ "cannot find end ('}}') of expected %0">;
+def err_verify_invalid_content : Error<
+ "invalid expected %0: %1">;
+def err_verify_inconsistent_diags : Error<
+ "'%0' diagnostics %select{expected|seen}1 but not %select{seen|expected}1: "
+ "%2">;
+
+def note_fixit_applied : Note<"FIX-IT applied suggested code changes">;
+def note_fixit_in_macro : Note<
+ "FIX-IT unable to apply suggested code changes in a macro">;
+def note_fixit_failed : Note<
+ "FIX-IT unable to apply suggested code changes">;
+def note_fixit_unfixed_error : Note<"FIX-IT detected an error it cannot fix">;
+def warn_fixit_no_changes : Note<
+ "FIX-IT detected errors it could not fix; no output will be generated">;
+
+// PCH reader
+def err_relocatable_without_isysroot : Error<
+ "must specify system root with -isysroot when building a relocatable "
+ "PCH file">;
+
+def warn_unknown_warning_option : Warning<
+ "unknown warning option '%0'">,
+ InGroup<DiagGroup<"unknown-warning-option"> >;
+def warn_unknown_negative_warning_option : Warning<
+ "unknown warning option '%0'?">,
+ InGroup<DiagGroup<"unknown-warning-option"> >, DefaultIgnore;
+def warn_unknown_warning_option_suggest : Warning<
+ "unknown warning option '%0'; did you mean '%1'?">,
+ InGroup<DiagGroup<"unknown-warning-option"> >;
+def warn_unknown_negative_warning_option_suggest : Warning<
+ "unknown warning option '%0'; did you mean '%1'?">,
+ InGroup<DiagGroup<"unknown-warning-option"> >, DefaultIgnore;
+def warn_unknown_warning_specifier : Warning<
+ "unknown %0 warning specifier: '%1'">,
+ InGroup<DiagGroup<"unknown-warning-option"> >;
+
+def warn_unknown_analyzer_checker : Warning<
+ "no analyzer checkers are associated with '%0'">;
+def warn_incompatible_analyzer_plugin_api : Warning<
+ "checker plugin '%0' is not compatible with this version of the analyzer">,
+ InGroup<DiagGroup<"analyzer-incompatible-plugin"> >;
+def note_incompatible_analyzer_plugin_api : Note<
+ "current API version is '%0', but plugin was compiled with version '%1'">;
+
+def err_module_map_not_found : Error<"module map file '%0' not found">,
+ DefaultFatal;
+def err_missing_module_name : Error<
+ "no module name provided; specify one with -fmodule-name=">,
+ DefaultFatal;
+def err_missing_module : Error<
+ "no module named '%0' declared in module map file '%1'">, DefaultFatal;
+def err_missing_umbrella_header : Error<
+ "cannot open umbrella header '%0': %1">, DefaultFatal;
+def err_no_submodule : Error<"no submodule named %0 in module '%1'">;
+def err_no_submodule_suggest : Error<
+ "no submodule named %0 in module '%1'; did you mean '%2'?">;
+def warn_missing_submodule : Warning<"missing submodule '%0'">,
+ InGroup<IncompleteUmbrella>;
+def err_module_map_temp_file : Error<
+ "unable to write temporary module map file '%0'">, DefaultFatal;
+def err_module_unavailable : Error<"module '%0' requires feature '%1'">;
+
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
new file mode 100644
index 0000000..c839853
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticGroups.td
@@ -0,0 +1,415 @@
+//==--- DiagnosticGroups.td - Diagnostic Group Definitions ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+def ImplicitFunctionDeclare : DiagGroup<"implicit-function-declaration">;
+def ImplicitInt : DiagGroup<"implicit-int">;
+
+// Aggregation warning settings.
+def Implicit : DiagGroup<"implicit", [
+ ImplicitFunctionDeclare,
+ ImplicitInt
+]>;
+
+// Empty DiagGroups are recognized by clang but ignored.
+def : DiagGroup<"abi">;
+def : DiagGroup<"address">;
+def AddressOfTemporary : DiagGroup<"address-of-temporary">;
+def : DiagGroup<"aggregate-return">;
+def AmbigMemberTemplate : DiagGroup<"ambiguous-member-template">;
+def : DiagGroup<"attributes">;
+def : DiagGroup<"bad-function-cast">;
+def Availability : DiagGroup<"availability">;
+def AutoImport : DiagGroup<"auto-import">;
+def ConstantConversion : DiagGroup<"constant-conversion">;
+def LiteralConversion : DiagGroup<"literal-conversion">;
+def StringConversion : DiagGroup<"string-conversion">;
+def SignConversion : DiagGroup<"sign-conversion">;
+def BoolConversion : DiagGroup<"bool-conversion">;
+def IntConversion : DiagGroup<"int-conversion">;
+def NullConversion : DiagGroup<"null-conversion">;
+def BuiltinRequiresHeader : DiagGroup<"builtin-requires-header">;
+def CXXCompat: DiagGroup<"c++-compat">;
+def CastAlign : DiagGroup<"cast-align">;
+def : DiagGroup<"cast-qual">;
+def : DiagGroup<"char-align">;
+def Comment : DiagGroup<"comment">;
+def : DiagGroup<"ctor-dtor-privacy">;
+def : DiagGroup<"declaration-after-statement">;
+def DefaultArgSpecialMember : DiagGroup<"default-arg-special-member">;
+def GNUDesignator : DiagGroup<"gnu-designator">;
+
+def DeleteNonVirtualDtor : DiagGroup<"delete-non-virtual-dtor">;
+
+def DeprecatedDeclarations : DiagGroup<"deprecated-declarations">;
+def DeprecatedWritableStr : DiagGroup<"deprecated-writable-strings">;
+def Deprecated : DiagGroup<"deprecated", [ DeprecatedDeclarations] >,
+ DiagCategory<"Deprecations">;
+
+def DeprecatedImplementations :DiagGroup<"deprecated-implementations">;
+
+def : DiagGroup<"disabled-optimization">;
+def : DiagGroup<"discard-qual">;
+def : DiagGroup<"div-by-zero">;
+def EmptyBody : DiagGroup<"empty-body">;
+def ExtraTokens : DiagGroup<"extra-tokens">;
+
+def FormatExtraArgs : DiagGroup<"format-extra-args">;
+def FormatZeroLength : DiagGroup<"format-zero-length">;
+
+def CXX98CompatBindToTemporaryCopy :
+ DiagGroup<"c++98-compat-bind-to-temporary-copy">;
+def CXX98CompatLocalTypeTemplateArgs :
+ DiagGroup<"c++98-compat-local-type-template-args">;
+def CXX98CompatUnnamedTypeTemplateArgs :
+ DiagGroup<"c++98-compat-unnamed-type-template-args">;
+
+def CXX98Compat : DiagGroup<"c++98-compat",
+ [CXX98CompatBindToTemporaryCopy,
+ CXX98CompatLocalTypeTemplateArgs,
+ CXX98CompatUnnamedTypeTemplateArgs]>;
+// Warnings for C++11 features which are Extensions in C++98 mode.
+def CXX98CompatPedantic : DiagGroup<"c++98-compat-pedantic", [CXX98Compat]>;
+
+def CXX11Narrowing : DiagGroup<"c++11-narrowing">;
+
+// Original name of this warning in Clang
+def : DiagGroup<"c++0x-narrowing", [CXX11Narrowing]>;
+
+// Name of this warning in GCC
+def : DiagGroup<"narrowing", [CXX11Narrowing]>;
+
+def CXX11CompatReservedUserDefinedLiteral :
+ DiagGroup<"c++11-compat-reserved-user-defined-literal">;
+def ReservedUserDefinedLiteral :
+ DiagGroup<"reserved-user-defined-literal",
+ [CXX11CompatReservedUserDefinedLiteral]>;
+
+def CXX11Compat : DiagGroup<"c++11-compat",
+ [CXX11Narrowing,
+ CXX11CompatReservedUserDefinedLiteral]>;
+def : DiagGroup<"c++0x-compat", [CXX11Compat]>;
+
+def : DiagGroup<"effc++">;
+def ExitTimeDestructors : DiagGroup<"exit-time-destructors">;
+def FlexibleArrayExtensions : DiagGroup<"flexible-array-extensions">;
+def FourByteMultiChar : DiagGroup<"four-char-constants">;
+def GlobalConstructors : DiagGroup<"global-constructors">;
+def : DiagGroup<"idiomatic-parentheses">;
+def BitwiseOpParentheses: DiagGroup<"bitwise-op-parentheses">;
+def LogicalOpParentheses: DiagGroup<"logical-op-parentheses">;
+def DanglingElse: DiagGroup<"dangling-else">;
+def IgnoredQualifiers : DiagGroup<"ignored-qualifiers">;
+def : DiagGroup<"import">;
+def IncompatiblePointerTypes : DiagGroup<"incompatible-pointer-types">;
+def IncompleteUmbrella : DiagGroup<"incomplete-umbrella">;
+def KNRPromotedParameter : DiagGroup<"knr-promoted-parameter">;
+def : DiagGroup<"init-self">;
+def : DiagGroup<"inline">;
+def : DiagGroup<"int-to-pointer-cast">;
+def : DiagGroup<"invalid-pch">;
+def LiteralRange : DiagGroup<"literal-range">;
+def LocalTypeTemplateArgs : DiagGroup<"local-type-template-args",
+ [CXX98CompatLocalTypeTemplateArgs]>;
+def MalformedWarningCheck : DiagGroup<"malformed-warning-check">;
+def Main : DiagGroup<"main">;
+def MainReturnType : DiagGroup<"main-return-type">;
+def MissingBraces : DiagGroup<"missing-braces">;
+def MissingDeclarations: DiagGroup<"missing-declarations">;
+def : DiagGroup<"missing-format-attribute">;
+def : DiagGroup<"missing-include-dirs">;
+def : DiagGroup<"missing-noreturn">;
+def MultiChar : DiagGroup<"multichar">;
+def : DiagGroup<"nested-externs">;
+def : DiagGroup<"newline-eof">;
+def LongLong : DiagGroup<"long-long">;
+def MismatchedTags : DiagGroup<"mismatched-tags">;
+def MissingFieldInitializers : DiagGroup<"missing-field-initializers">;
+def ModuleBuild : DiagGroup<"module-build">;
+def NullCharacter : DiagGroup<"null-character">;
+def NullDereference : DiagGroup<"null-dereference">;
+def InitializerOverrides : DiagGroup<"initializer-overrides">;
+def NonNull : DiagGroup<"nonnull">;
+def : DiagGroup<"nonportable-cfstrings">;
+def NonVirtualDtor : DiagGroup<"non-virtual-dtor">;
+def OveralignedType : DiagGroup<"over-aligned">;
+def : DiagGroup<"old-style-cast">;
+def : DiagGroup<"old-style-definition">;
+def OutOfLineDeclaration : DiagGroup<"out-of-line-declaration">;
+def : DiagGroup<"overflow">;
+def OverlengthStrings : DiagGroup<"overlength-strings">;
+def OverloadedVirtual : DiagGroup<"overloaded-virtual">;
+def ObjCPropertyImpl : DiagGroup<"objc-property-implementation">;
+def ObjCMissingSuperCalls : DiagGroup<"objc-missing-super-calls">;
+def ObjCRetainBlockProperty : DiagGroup<"objc-noncopy-retain-block-property">;
+def ObjCReadonlyPropertyHasSetter : DiagGroup<"objc-readonly-with-setter-property">;
+def ObjCRootClass : DiagGroup<"objc-root-class">;
+def Packed : DiagGroup<"packed">;
+def Padded : DiagGroup<"padded">;
+def PointerArith : DiagGroup<"pointer-arith">;
+def PoundWarning : DiagGroup<"#warnings">,
+ DiagCategory<"#warning Directive">;
+def PoundPragmaMessage : DiagGroup<"#pragma-messages">,
+ DiagCategory<"#pragma message Directive">;
+def : DiagGroup<"pointer-to-int-cast">;
+def : DiagGroup<"redundant-decls">;
+def ReturnTypeCLinkage : DiagGroup<"return-type-c-linkage">;
+def ReturnType : DiagGroup<"return-type", [ReturnTypeCLinkage]>;
+def BindToTemporaryCopy : DiagGroup<"bind-to-temporary-copy",
+ [CXX98CompatBindToTemporaryCopy]>;
+def SelfAssignment : DiagGroup<"self-assign">;
+def SemiBeforeMethodBody : DiagGroup<"semicolon-before-method-body">;
+def Sentinel : DiagGroup<"sentinel">;
+def MissingMethodReturnType : DiagGroup<"missing-method-return-type">;
+def : DiagGroup<"sequence-point">;
+def Shadow : DiagGroup<"shadow">;
+def : DiagGroup<"shorten-64-to-32">;
+def : DiagGroup<"sign-promo">;
+def SignCompare : DiagGroup<"sign-compare">;
+def : DiagGroup<"stack-protector">;
+def : DiagGroup<"switch-default">;
+def : DiagGroup<"synth">;
+def SizeofArrayArgument : DiagGroup<"sizeof-array-argument">;
+def StringPlusInt : DiagGroup<"string-plus-int">;
+def StrncatSize : DiagGroup<"strncat-size">;
+def TautologicalCompare : DiagGroup<"tautological-compare">;
+def HeaderHygiene : DiagGroup<"header-hygiene">;
+
+// Preprocessor warnings.
+def : DiagGroup<"builtin-macro-redefined">;
+
+// Just silence warnings about -Wstrict-aliasing for now.
+def : DiagGroup<"strict-aliasing=0">;
+def : DiagGroup<"strict-aliasing=1">;
+def : DiagGroup<"strict-aliasing=2">;
+def : DiagGroup<"strict-aliasing">;
+
+// Just silence warnings about -Wstrict-overflow for now.
+def : DiagGroup<"strict-overflow=0">;
+def : DiagGroup<"strict-overflow=1">;
+def : DiagGroup<"strict-overflow=2">;
+def : DiagGroup<"strict-overflow=3">;
+def : DiagGroup<"strict-overflow=4">;
+def : DiagGroup<"strict-overflow=5">;
+def : DiagGroup<"strict-overflow">;
+
+def InvalidOffsetof : DiagGroup<"invalid-offsetof">;
+def LambdaExtensions : DiagGroup<"lambda-extensions">;
+def : DiagGroup<"strict-prototypes">;
+def StrictSelector : DiagGroup<"strict-selector-match">;
+def MethodDuplicate : DiagGroup<"duplicate-method-match">;
+def CoveredSwitchDefault : DiagGroup<"covered-switch-default">;
+def SwitchEnum : DiagGroup<"switch-enum">;
+def Switch : DiagGroup<"switch">;
+def Trigraphs : DiagGroup<"trigraphs">;
+
+def : DiagGroup<"type-limits">;
+def Unicode : DiagGroup<"unicode">;
+def Uninitialized : DiagGroup<"uninitialized">;
+def UninitializedMaybe : DiagGroup<"conditional-uninitialized">;
+def UnknownPragmas : DiagGroup<"unknown-pragmas">;
+def NSobjectAttribute : DiagGroup<"NSObject-attribute">;
+def UnknownAttributes : DiagGroup<"attributes">;
+def IgnoredAttributes : DiagGroup<"ignored-attributes">;
+def UnnamedTypeTemplateArgs : DiagGroup<"unnamed-type-template-args",
+ [CXX98CompatUnnamedTypeTemplateArgs]>;
+def UnusedArgument : DiagGroup<"unused-argument">;
+def UnusedComparison : DiagGroup<"unused-comparison">;
+def UnusedExceptionParameter : DiagGroup<"unused-exception-parameter">;
+def UnneededInternalDecl : DiagGroup<"unneeded-internal-declaration">;
+def UnneededMemberFunction : DiagGroup<"unneeded-member-function">;
+def UnusedFunction : DiagGroup<"unused-function", [UnneededInternalDecl]>;
+def UnusedMemberFunction : DiagGroup<"unused-member-function",
+ [UnneededMemberFunction]>;
+def UnusedLabel : DiagGroup<"unused-label">;
+def UnusedParameter : DiagGroup<"unused-parameter">;
+def UnusedResult : DiagGroup<"unused-result">;
+def UnusedValue : DiagGroup<"unused-value", [UnusedComparison, UnusedResult]>;
+def UnusedVariable : DiagGroup<"unused-variable">;
+def UsedButMarkedUnused : DiagGroup<"used-but-marked-unused">;
+def UserDefinedLiterals : DiagGroup<"user-defined-literals">;
+def ReadOnlySetterAttrs : DiagGroup<"readonly-setter-attrs">;
+def Reorder : DiagGroup<"reorder">;
+def UndeclaredSelector : DiagGroup<"undeclared-selector">;
+def ImplicitAtomic : DiagGroup<"implicit-atomic-properties">;
+def CustomAtomic : DiagGroup<"custom-atomic-properties">;
+def AtomicProperties : DiagGroup<"atomic-properties",
+ [ImplicitAtomic, CustomAtomic]>;
+def AutomaticReferenceCountingABI : DiagGroup<"arc-abi">;
+def ARCUnsafeRetainedAssign : DiagGroup<"arc-unsafe-retained-assign">;
+def ARCRetainCycles : DiagGroup<"arc-retain-cycles">;
+def ARCNonPodMemAccess : DiagGroup<"arc-non-pod-memaccess">;
+def AutomaticReferenceCounting : DiagGroup<"arc",
+ [AutomaticReferenceCountingABI,
+ ARCUnsafeRetainedAssign,
+ ARCRetainCycles,
+ ARCNonPodMemAccess]>;
+def Selector : DiagGroup<"selector">;
+def NonfragileAbi2 : DiagGroup<"nonfragile-abi2">;
+def Protocol : DiagGroup<"protocol">;
+def SuperSubClassMismatch : DiagGroup<"super-class-method-mismatch">;
+def OverridingMethodMismatch : DiagGroup<"overriding-method-mismatch">;
+def : DiagGroup<"variadic-macros">;
+def VariadicMacros : DiagGroup<"variadic-macros">;
+def VectorConversion : DiagGroup<"vector-conversion">; // clang specific
+def VexingParse : DiagGroup<"vexing-parse">;
+def VLA : DiagGroup<"vla">;
+def VolatileRegisterVar : DiagGroup<"volatile-register-var">;
+def Visibility : DiagGroup<"visibility">;
+
+// GCC calls -Wdeprecated-writable-strings -Wwrite-strings.
+def GCCWriteStrings : DiagGroup<"write-strings" , [DeprecatedWritableStr]>;
+
+def CharSubscript : DiagGroup<"char-subscripts">;
+def LargeByValueCopy : DiagGroup<"large-by-value-copy">;
+def DuplicateArgDecl : DiagGroup<"duplicate-method-arg">;
+
+// Aggregation warning settings.
+
+// -Widiomatic-parentheses contains warnings about 'idiomatic'
+// missing parentheses; it is off by default. We do not include it
+// in -Wparentheses because most users who use -Wparentheses explicitly
+// do not want these warnings.
+def ParenthesesOnEquality : DiagGroup<"parentheses-equality">;
+def Parentheses : DiagGroup<"parentheses",
+ [LogicalOpParentheses,
+ BitwiseOpParentheses,
+ ParenthesesOnEquality,
+ DanglingElse]>;
+
+// -Wconversion has its own warnings, but we split a few out for
+// legacy reasons:
+// - some people want just 64-to-32 warnings
+// - conversion warnings with constant sources are on by default
+// - conversion warnings for literals are on by default
+// - bool-to-pointer conversion warnings are on by default
+// - __null-to-integer conversion warnings are on by default
+def Conversion : DiagGroup<"conversion",
+ [DiagGroup<"shorten-64-to-32">,
+ ConstantConversion,
+ LiteralConversion,
+ StringConversion,
+ SignConversion,
+ BoolConversion,
+ NullConversion,
+ IntConversion]>,
+ DiagCategory<"Value Conversion Issue">;
+
+def Unused : DiagGroup<"unused",
+ [UnusedArgument, UnusedFunction, UnusedLabel,
+ // UnusedParameter, (matches GCC's behavior)
+ // UnusedMemberFunction, (clean-up llvm before enabling)
+ UnusedValue, UnusedVariable]>,
+ DiagCategory<"Unused Entity Issue">;
+
+// Format settings.
+def FormatInvalidSpecifier : DiagGroup<"format-invalid-specifier">;
+def FormatSecurity : DiagGroup<"format-security">;
+def FormatNonStandard : DiagGroup<"format-non-iso">;
+def FormatY2K : DiagGroup<"format-y2k">;
+def Format : DiagGroup<"format",
+ [FormatExtraArgs, FormatZeroLength, NonNull,
+ FormatSecurity, FormatY2K, FormatInvalidSpecifier]>,
+ DiagCategory<"Format String Issue">;
+def FormatNonLiteral : DiagGroup<"format-nonliteral", [FormatSecurity]>;
+def Format2 : DiagGroup<"format=2",
+ [FormatNonLiteral, FormatSecurity, FormatY2K]>;
+
+def Extra : DiagGroup<"extra", [
+ MissingFieldInitializers,
+ IgnoredQualifiers,
+ InitializerOverrides,
+ SemiBeforeMethodBody,
+ MissingMethodReturnType,
+ SignCompare,
+ UnusedParameter
+ ]>;
+
+def Most : DiagGroup<"most", [
+ CharSubscript,
+ Comment,
+ DeleteNonVirtualDtor,
+ Format,
+ Implicit,
+ MismatchedTags,
+ MissingBraces,
+ MultiChar,
+ Reorder,
+ ReturnType,
+ SelfAssignment,
+ SizeofArrayArgument,
+ StringPlusInt,
+ Trigraphs,
+ Uninitialized,
+ UnknownPragmas,
+ Unused,
+ VolatileRegisterVar,
+ ObjCMissingSuperCalls,
+ OverloadedVirtual
+ ]>;
+
+// Thread Safety warnings
+def ThreadSafety : DiagGroup<"thread-safety">;
+
+// Note that putting warnings in -Wall will not disable them by default. If a
+// warning should be active _only_ when -Wall is passed in, mark it as
+// DefaultIgnore in addition to putting it here.
+def : DiagGroup<"all", [Most, Parentheses, Switch]>;
+
+// Aliases.
+def : DiagGroup<"", [Extra]>; // -W = -Wextra
+def : DiagGroup<"endif-labels", [ExtraTokens]>; // -Wendif-labels=-Wendif-tokens
+def : DiagGroup<"comments", [Comment]>; // -Wcomments = -Wcomment
+def : DiagGroup<"conversion-null",
+ [NullConversion]>; // -Wconversion-null = -Wnull-conversion
+def : DiagGroup<"bool-conversions",
+ [BoolConversion]>; // -Wbool-conversions = -Wbool-conversion
+def : DiagGroup<"int-conversions",
+ [IntConversion]>; // -Wint-conversions = -Wint-conversion
+def : DiagGroup<"vector-conversions",
+ [VectorConversion]>; // -Wvector-conversions = -Wvector-conversion
+
+// A warning group for warnings that we want to have on by default in clang,
+// but which aren't on by default in GCC.
+def NonGCC : DiagGroup<"non-gcc",
+ [SignCompare, Conversion, LiteralRange]>;
+
+// A warning group for warnings about using C++11 features as extensions in
+// earlier C++ versions.
+def CXX11 : DiagGroup<"c++11-extensions">;
+def : DiagGroup<"c++0x-extensions", [CXX11]>;
+def DelegatingCtorCycles :
+ DiagGroup<"delegating-ctor-cycles">;
+
+// A warning group for warnings about using C11 features as extensions.
+def C11 : DiagGroup<"c11-extensions">;
+
+// A warning group for warnings about using C99 features as extensions.
+def C99 : DiagGroup<"c99-extensions">;
+
+// A warning group for warnings about GCC extensions.
+def GNU : DiagGroup<"gnu", [GNUDesignator, VLA]>;
+// A warning group for warnings about code that clang accepts but gcc doesn't.
+def GccCompat : DiagGroup<"gcc-compat">;
+
+// A warning group for warnings about Microsoft extensions.
+def Microsoft : DiagGroup<"microsoft">;
+
+def ObjCNonUnifiedException : DiagGroup<"objc-nonunified-exceptions">;
+
+def ObjCProtocolMethodImpl : DiagGroup<"objc-protocol-method-implementation">;
+
+// ObjC API warning groups.
+def ObjCRedundantLiteralUse : DiagGroup<"objc-redundant-literal-use">;
+def ObjCRedundantAPIUse : DiagGroup<"objc-redundant-api-use", [
+ ObjCRedundantLiteralUse
+ ]>;
+
+def ObjCCocoaAPI : DiagGroup<"objc-cocoa-api", [
+ ObjCRedundantAPIUse
+ ]>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
new file mode 100644
index 0000000..a6c22db
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticIDs.h
@@ -0,0 +1,279 @@
+//===--- DiagnosticIDs.h - Diagnostic IDs Handling --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Diagnostic IDs-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DIAGNOSTICIDS_H
+#define LLVM_CLANG_DIAGNOSTICIDS_H
+
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringRef.h"
+#include "clang/Basic/LLVM.h"
+
+namespace llvm {
+ template<typename T, unsigned> class SmallVector;
+}
+
+namespace clang {
+ class DiagnosticsEngine;
+ class SourceLocation;
+ struct WarningOption;
+
+ // Import the diagnostic enums themselves.
+ namespace diag {
+ // Start position for diagnostics.
+ enum {
+ DIAG_START_DRIVER = 300,
+ DIAG_START_FRONTEND = DIAG_START_DRIVER + 100,
+ DIAG_START_SERIALIZATION = DIAG_START_FRONTEND + 100,
+ DIAG_START_LEX = DIAG_START_SERIALIZATION + 120,
+ DIAG_START_PARSE = DIAG_START_LEX + 300,
+ DIAG_START_AST = DIAG_START_PARSE + 400,
+ DIAG_START_SEMA = DIAG_START_AST + 100,
+ DIAG_START_ANALYSIS = DIAG_START_SEMA + 3000,
+ DIAG_UPPER_LIMIT = DIAG_START_ANALYSIS + 100
+ };
+
+ class CustomDiagInfo;
+
+ /// diag::kind - All of the diagnostics that can be emitted by the frontend.
+ typedef unsigned kind;
+
+ // Get typedefs for common diagnostics.
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,CATEGORY,NOWERROR,SHOWINSYSHEADER) ENUM,
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+ NUM_BUILTIN_COMMON_DIAGNOSTICS
+#undef DIAG
+ };
+
+ /// Enum values that allow the client to map NOTEs, WARNINGs, and EXTENSIONs
+ /// to either MAP_IGNORE (nothing), MAP_WARNING (emit a warning), MAP_ERROR
+ /// (emit as an error). It allows clients to map errors to
+ /// MAP_ERROR/MAP_DEFAULT or MAP_FATAL (stop emitting diagnostics after this
+ /// one).
+ enum Mapping {
+ // NOTE: 0 means "uncomputed".
+ MAP_IGNORE = 1, //< Map this diagnostic to nothing, ignore it.
+ MAP_WARNING = 2, //< Map this diagnostic to a warning.
+ MAP_ERROR = 3, //< Map this diagnostic to an error.
+ MAP_FATAL = 4 //< Map this diagnostic to a fatal error.
+ };
+ }
+
+class DiagnosticMappingInfo {
+ unsigned Mapping : 3;
+ unsigned IsUser : 1;
+ unsigned IsPragma : 1;
+ unsigned HasShowInSystemHeader : 1;
+ unsigned HasNoWarningAsError : 1;
+ unsigned HasNoErrorAsFatal : 1;
+
+public:
+ static DiagnosticMappingInfo Make(diag::Mapping Mapping, bool IsUser,
+ bool IsPragma) {
+ DiagnosticMappingInfo Result;
+ Result.Mapping = Mapping;
+ Result.IsUser = IsUser;
+ Result.IsPragma = IsPragma;
+ Result.HasShowInSystemHeader = 0;
+ Result.HasNoWarningAsError = 0;
+ Result.HasNoErrorAsFatal = 0;
+ return Result;
+ }
+
+ diag::Mapping getMapping() const { return diag::Mapping(Mapping); }
+ void setMapping(diag::Mapping Value) { Mapping = Value; }
+
+ bool isUser() const { return IsUser; }
+ bool isPragma() const { return IsPragma; }
+
+ bool hasShowInSystemHeader() const { return HasShowInSystemHeader; }
+ void setShowInSystemHeader(bool Value) { HasShowInSystemHeader = Value; }
+
+ bool hasNoWarningAsError() const { return HasNoWarningAsError; }
+ void setNoWarningAsError(bool Value) { HasNoWarningAsError = Value; }
+
+ bool hasNoErrorAsFatal() const { return HasNoErrorAsFatal; }
+ void setNoErrorAsFatal(bool Value) { HasNoErrorAsFatal = Value; }
+};
+
+/// \brief Used for handling and querying diagnostic IDs. Can be used and shared
+/// by multiple Diagnostics for multiple translation units.
+class DiagnosticIDs : public RefCountedBase<DiagnosticIDs> {
+public:
+ /// Level - The level of the diagnostic, after it has been through mapping.
+ enum Level {
+ Ignored, Note, Warning, Error, Fatal
+ };
+
+private:
+ /// CustomDiagInfo - Information for uniquing and looking up custom diags.
+ diag::CustomDiagInfo *CustomDiagInfo;
+
+public:
+ DiagnosticIDs();
+ ~DiagnosticIDs();
+
+ /// getCustomDiagID - Return an ID for a diagnostic with the specified message
+ /// and level. If this is the first request for this diagnosic, it is
+ /// registered and created, otherwise the existing ID is returned.
+ unsigned getCustomDiagID(Level L, StringRef Message);
+
+ //===--------------------------------------------------------------------===//
+ // Diagnostic classification and reporting interfaces.
+ //
+
+ /// getDescription - Given a diagnostic ID, return a description of the
+ /// issue.
+ StringRef getDescription(unsigned DiagID) const;
+
+ /// isBuiltinWarningOrExtension - Return true if the unmapped diagnostic level
+ /// of the specified diagnostic ID is a Warning or Extension. This only works
+ /// on builtin diagnostics, not custom ones, and is not legal to call on
+ /// NOTEs.
+ static bool isBuiltinWarningOrExtension(unsigned DiagID);
+
+ /// \brief Return true if the specified diagnostic is mapped to errors by
+ /// default.
+ static bool isDefaultMappingAsError(unsigned DiagID);
+
+ /// \brief Determine whether the given built-in diagnostic ID is a
+ /// Note.
+ static bool isBuiltinNote(unsigned DiagID);
+
+ /// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
+ /// ID is for an extension of some sort.
+ ///
+ static bool isBuiltinExtensionDiag(unsigned DiagID) {
+ bool ignored;
+ return isBuiltinExtensionDiag(DiagID, ignored);
+ }
+
+ /// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
+ /// ID is for an extension of some sort. This also returns EnabledByDefault,
+ /// which is set to indicate whether the diagnostic is ignored by default (in
+ /// which case -pedantic enables it) or treated as a warning/error by default.
+ ///
+ static bool isBuiltinExtensionDiag(unsigned DiagID, bool &EnabledByDefault);
+
+
+ /// getWarningOptionForDiag - Return the lowest-level warning option that
+ /// enables the specified diagnostic. If there is no -Wfoo flag that controls
+ /// the diagnostic, this returns null.
+ static StringRef getWarningOptionForDiag(unsigned DiagID);
+
+ /// getCategoryNumberForDiag - Return the category number that a specified
+ /// DiagID belongs to, or 0 if no category.
+ static unsigned getCategoryNumberForDiag(unsigned DiagID);
+
+ /// getNumberOfCategories - Return the number of categories
+ static unsigned getNumberOfCategories();
+
+ /// getCategoryNameFromID - Given a category ID, return the name of the
+ /// category.
+ static StringRef getCategoryNameFromID(unsigned CategoryID);
+
+ /// isARCDiagnostic - Return true if a given diagnostic falls into an
+ /// ARC diagnostic category;
+ static bool isARCDiagnostic(unsigned DiagID);
+
+ /// \brief Enumeration describing how the the emission of a diagnostic should
+ /// be treated when it occurs during C++ template argument deduction.
+ enum SFINAEResponse {
+ /// \brief The diagnostic should not be reported, but it should cause
+ /// template argument deduction to fail.
+ ///
+ /// The vast majority of errors that occur during template argument
+ /// deduction fall into this category.
+ SFINAE_SubstitutionFailure,
+
+ /// \brief The diagnostic should be suppressed entirely.
+ ///
+ /// Warnings generally fall into this category.
+ SFINAE_Suppress,
+
+ /// \brief The diagnostic should be reported.
+ ///
+ /// The diagnostic should be reported. Various fatal errors (e.g.,
+ /// template instantiation depth exceeded) fall into this category.
+ SFINAE_Report,
+
+ /// \brief The diagnostic is an access-control diagnostic, which will be
+ /// substitution failures in some contexts and reported in others.
+ SFINAE_AccessControl
+ };
+
+ /// \brief Determines whether the given built-in diagnostic ID is
+ /// for an error that is suppressed if it occurs during C++ template
+ /// argument deduction.
+ ///
+ /// When an error is suppressed due to SFINAE, the template argument
+ /// deduction fails but no diagnostic is emitted. Certain classes of
+ /// errors, such as those errors that involve C++ access control,
+ /// are not SFINAE errors.
+ static SFINAEResponse getDiagnosticSFINAEResponse(unsigned DiagID);
+
+ /// \brief Get the set of all diagnostic IDs in the group with the given name.
+ ///
+ /// \param Diags [out] - On return, the diagnostics in the group.
+ /// \returns True if the given group is unknown, false otherwise.
+ bool getDiagnosticsInGroup(StringRef Group,
+ llvm::SmallVectorImpl<diag::kind> &Diags) const;
+
+ /// \brief Get the set of all diagnostic IDs.
+ void getAllDiagnostics(llvm::SmallVectorImpl<diag::kind> &Diags) const;
+
+ /// \brief Get the warning option with the closest edit distance to the given
+ /// group name.
+ static StringRef getNearestWarningOption(StringRef Group);
+
+private:
+ /// \brief Get the set of all diagnostic IDs in the given group.
+ ///
+ /// \param Diags [out] - On return, the diagnostics in the group.
+ void getDiagnosticsInGroup(const WarningOption *Group,
+ llvm::SmallVectorImpl<diag::kind> &Diags) const;
+
+ /// \brief Based on the way the client configured the DiagnosticsEngine
+ /// object, classify the specified diagnostic ID into a Level, consumable by
+ /// the DiagnosticClient.
+ ///
+ /// \param Loc The source location we are interested in finding out the
+ /// diagnostic state. Can be null in order to query the latest state.
+ DiagnosticIDs::Level getDiagnosticLevel(unsigned DiagID, SourceLocation Loc,
+ const DiagnosticsEngine &Diag) const;
+
+ /// getDiagnosticLevel - This is an internal implementation helper used when
+ /// DiagClass is already known.
+ DiagnosticIDs::Level getDiagnosticLevel(unsigned DiagID,
+ unsigned DiagClass,
+ SourceLocation Loc,
+ const DiagnosticsEngine &Diag) const;
+
+ /// ProcessDiag - This is the method used to report a diagnostic that is
+ /// finally fully formed.
+ ///
+ /// \returns true if the diagnostic was emitted, false if it was
+ /// suppressed.
+ bool ProcessDiag(DiagnosticsEngine &Diag) const;
+
+ /// \brief Whether the diagnostic may leave the AST in a state where some
+ /// invariants can break.
+ bool isUnrecoverable(unsigned DiagID) const;
+
+ friend class DiagnosticsEngine;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
new file mode 100644
index 0000000..670283e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticLexKinds.td
@@ -0,0 +1,503 @@
+//==--- DiagnosticLexKinds.td - liblex diagnostics ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Lexer Diagnostics
+//===----------------------------------------------------------------------===//
+
+let Component = "Lex", CategoryName = "Lexical or Preprocessor Issue" in {
+
+def null_in_string : Warning<"null character(s) preserved in string literal">,
+ InGroup<NullCharacter>;
+def null_in_char : Warning<"null character(s) preserved in character literal">,
+ InGroup<NullCharacter>;
+def null_in_file : Warning<"null character ignored">, InGroup<NullCharacter>;
+def warn_nested_block_comment : Warning<"'/*' within block comment">,
+ InGroup<Comment>;
+def escaped_newline_block_comment_end : Warning<
+ "escaped newline between */ characters at block comment end">,
+ InGroup<Comment>;
+def backslash_newline_space : Warning<
+ "backslash and newline separated by space">,
+ InGroup<DiagGroup<"backslash-newline-escape">>;
+
+// Digraphs.
+def warn_cxx98_compat_less_colon_colon : Warning<
+ "'<::' is treated as digraph '<:' (aka '[') followed by ':' in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+// Trigraphs.
+def trigraph_ignored : Warning<"trigraph ignored">, InGroup<Trigraphs>;
+def trigraph_ignored_block_comment : Warning<
+ "ignored trigraph would end block comment">, InGroup<Trigraphs>;
+def trigraph_ends_block_comment : Warning<"trigraph ends block comment">,
+ InGroup<Trigraphs>;
+def trigraph_converted : Warning<"trigraph converted to '%0' character">,
+ InGroup<Trigraphs>;
+
+def ext_multi_line_bcpl_comment : Extension<"multi-line // comment">,
+ InGroup<Comment>;
+def ext_bcpl_comment : Extension<
+ "// comments are not allowed in this language">,
+ InGroup<Comment>;
+def ext_no_newline_eof : Extension<"no newline at end of file">,
+ InGroup<DiagGroup<"newline-eof">>;
+
+def warn_cxx98_compat_no_newline_eof : Warning<
+ "C++98 requires newline at end of file">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+
+def ext_dollar_in_identifier : Extension<"'$' in identifier">,
+ InGroup<DiagGroup<"dollar-in-identifier-extension">>;
+def ext_charize_microsoft : Extension<"@# is a microsoft extension">,
+ InGroup<Microsoft>;
+
+def ext_token_used : Extension<"extension used">,
+ InGroup<DiagGroup<"language-extension-token">>;
+
+def warn_cxx11_keyword : Warning<"'%0' is a keyword in C++11">,
+ InGroup<CXX11Compat>, DefaultIgnore;
+
+def warn_unterminated_string : ExtWarn<"missing terminating '\"' character">;
+def warn_unterminated_char : ExtWarn<"missing terminating ' character">;
+def err_empty_character : Error<"empty character constant">;
+def err_unterminated_block_comment : Error<"unterminated /* comment">;
+def err_invalid_character_to_charify : Error<
+ "invalid argument to convert to character">;
+def err_unterminated___pragma : Error<"missing terminating ')' character">;
+
+def err_conflict_marker : Error<"version control conflict marker in file">;
+
+def err_raw_delim_too_long : Error<
+ "raw string delimiter longer than 16 characters"
+ "; use PREFIX( )PREFIX to delimit raw string">;
+def err_invalid_char_raw_delim : Error<
+ "invalid character '%0' character in raw string delimiter"
+ "; use PREFIX( )PREFIX to delimit raw string">;
+def err_unterminated_raw_string : Error<
+ "raw string missing terminating delimiter )%0\"">;
+def warn_cxx98_compat_raw_string_literal : Warning<
+ "raw string literals are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+def ext_multichar_character_literal : ExtWarn<
+ "multi-character character constant">, InGroup<MultiChar>;
+def ext_four_char_character_literal : Extension<
+ "multi-character character constant">, InGroup<FourByteMultiChar>;
+
+
+// Literal
+def ext_nonstandard_escape : Extension<
+ "use of non-standard escape character '\\%0'">;
+def ext_unknown_escape : ExtWarn<"unknown escape sequence '\\%0'">;
+def err_hex_escape_no_digits : Error<"\\x used with no following hex digits">;
+def err_ucn_escape_no_digits : Error<"\\u used with no following hex digits">;
+def err_ucn_escape_invalid : Error<"invalid universal character">;
+def err_ucn_escape_incomplete : Error<"incomplete universal character name">;
+def err_ucn_escape_basic_scs : Error<
+ "character '%0' cannot be specified by a universal character name">;
+def err_ucn_control_character : Error<
+ "universal character name refers to a control character">;
+def warn_cxx98_compat_literal_ucn_escape_basic_scs : Warning<
+ "specifying character '%0' with a universal character name "
+ "is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_literal_ucn_control_character : Warning<
+ "universal character name referring to a control character "
+ "is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def err_invalid_decimal_digit : Error<"invalid digit '%0' in decimal constant">;
+def err_invalid_binary_digit : Error<"invalid digit '%0' in binary constant">;
+def err_invalid_octal_digit : Error<"invalid digit '%0' in octal constant">;
+def err_invalid_suffix_integer_constant : Error<
+ "invalid suffix '%0' on integer constant">;
+def err_invalid_suffix_float_constant : Error<
+ "invalid suffix '%0' on floating constant">;
+def warn_extraneous_char_constant : Warning<
+ "extraneous characters in character constant ignored">;
+def warn_char_constant_too_large : Warning<
+ "character constant too long for its type">;
+def err_multichar_utf_character_literal : Error<
+ "Unicode character literals may not contain multiple characters">;
+def err_exponent_has_no_digits : Error<"exponent has no digits">;
+def ext_imaginary_constant : Extension<"imaginary constants are an extension">;
+def err_hexconstant_requires_exponent : Error<
+ "hexadecimal floating constants require an exponent">;
+def err_hexconstant_requires_digits : Error<
+ "hexadecimal floating constants require a significand">;
+def ext_hexconstant_invalid : Extension<
+ "hexadecimal floating constants are a C99 feature">;
+def ext_binary_literal : Extension<
+ "binary integer literals are an extension">;
+def err_pascal_string_too_long : Error<"Pascal string is too long">;
+def warn_octal_escape_too_large : ExtWarn<"octal escape sequence out of range">;
+def warn_hex_escape_too_large : ExtWarn<"hex escape sequence out of range">;
+def ext_string_too_long : Extension<"string literal of length %0 exceeds "
+ "maximum length %1 that %select{C90|ISO C99|C++}2 compilers are required to "
+ "support">, InGroup<OverlengthStrings>;
+def err_character_too_large : Error<
+ "character too large for enclosing character literal type">;
+def warn_ucn_not_valid_in_c89 : ExtWarn<
+ "unicode escape sequences are only valid in C99 or C++">, InGroup<Unicode>;
+def warn_cxx98_compat_unicode_literal : Warning<
+ "unicode literals are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx11_compat_user_defined_literal : Warning<
+ "identifier after literal will be treated as a user-defined literal suffix "
+ "in C++11">, InGroup<CXX11Compat>, DefaultIgnore;
+def warn_cxx11_compat_reserved_user_defined_literal : Warning<
+ "identifier after literal will be treated as a reserved user-defined literal "
+ "suffix in C++11">,
+ InGroup<CXX11CompatReservedUserDefinedLiteral>, DefaultIgnore;
+def ext_reserved_user_defined_literal : ExtWarn<
+ "invalid suffix on literal; C++11 requires a space between literal and "
+ "identifier">, InGroup<ReservedUserDefinedLiteral>, DefaultError;
+def ext_ms_reserved_user_defined_literal : ExtWarn<
+ "invalid suffix on literal; C++11 requires a space between literal and "
+ "identifier">, InGroup<ReservedUserDefinedLiteral>;
+def err_unsupported_string_concat : Error<
+ "unsupported non-standard concatenation of string literals">;
+def err_string_concat_mixed_suffix : Error<
+ "differing user-defined suffixes ('%0' and '%1') in string literal "
+ "concatenation">;
+def err_pp_invalid_udl : Error<
+ "%select{character|integer}0 literal with user-defined suffix "
+ "cannot be used in preprocessor constant expression">;
+def err_bad_string_encoding : Error<
+ "illegal character encoding in string literal">;
+def warn_bad_string_encoding : ExtWarn<
+ "illegal character encoding in string literal">,
+ InGroup<DiagGroup<"invalid-source-encoding">>;
+def err_bad_character_encoding : Error<
+ "illegal character encoding in character literal">;
+def warn_bad_character_encoding : ExtWarn<
+ "illegal character encoding in character literal">,
+ InGroup<DiagGroup<"invalid-source-encoding">>;
+
+
+//===----------------------------------------------------------------------===//
+// PTH Diagnostics
+//===----------------------------------------------------------------------===//
+def err_invalid_pth_file : Error<
+ "invalid or corrupt PTH file '%0'">;
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Diagnostics
+//===----------------------------------------------------------------------===//
+
+let CategoryName = "User Defined Issues" in {
+def pp_hash_warning : Warning<"%0">,
+ InGroup<PoundWarning>, DefaultWarnShowInSystemHeader;
+def err_pp_hash_error : Error<"%0">;
+}
+
+def pp_include_next_in_primary : Warning<
+ "#include_next in primary source file">;
+def pp_include_macros_out_of_predefines : Error<
+ "the #__include_macros directive is only for internal use by -imacros">;
+def pp_include_next_absolute_path : Warning<"#include_next with absolute path">;
+def ext_c99_whitespace_required_after_macro_name : ExtWarn<
+ "ISO C99 requires whitespace after the macro name">, InGroup<C99>;
+def ext_missing_whitespace_after_macro_name : ExtWarn<
+ "whitespace required after macro name">;
+def warn_missing_whitespace_after_macro_name : Warning<
+ "whitespace recommended after macro name">;
+
+def pp_pragma_once_in_main_file : Warning<"#pragma once in main file">;
+def pp_pragma_sysheader_in_main_file : Warning<
+ "#pragma system_header ignored in main file">;
+def pp_poisoning_existing_macro : Warning<"poisoning existing macro">;
+def pp_out_of_date_dependency : Warning<
+ "current file is older than dependency %0">;
+def pp_undef_builtin_macro : Warning<"undefining builtin macro">;
+def pp_redef_builtin_macro : Warning<"redefining builtin macro">,
+ InGroup<DiagGroup<"builtin-macro-redefined">>;
+def pp_disabled_macro_expansion : Warning<
+ "disabled expansion of recursive macro">, DefaultIgnore,
+ InGroup<DiagGroup<"disabled-macro-expansion">>;
+def pp_macro_not_used : Warning<"macro is not used">, DefaultIgnore,
+ InGroup<DiagGroup<"unused-macros">>;
+def warn_pp_undef_identifier : Warning<
+ "%0 is not defined, evaluates to 0">,
+ InGroup<DiagGroup<"undef">>, DefaultIgnore;
+
+def pp_invalid_string_literal : Warning<
+ "invalid string literal, ignoring final '\\'">;
+def warn_pp_expr_overflow : Warning<
+ "integer overflow in preprocessor expression">;
+def warn_pp_convert_lhs_to_positive : Warning<
+ "left side of operator converted from negative value to unsigned: %0">;
+def warn_pp_convert_rhs_to_positive : Warning<
+ "right side of operator converted from negative value to unsigned: %0">;
+
+def ext_pp_import_directive : Extension<"#import is a language extension">,
+ InGroup<DiagGroup<"import-preprocessor-directive-pedantic">>;
+def err_pp_import_directive_ms : Error<
+ "#import of type library is an unsupported Microsoft feature">;
+
+def ext_pp_ident_directive : Extension<"#ident is a language extension">;
+def ext_pp_include_next_directive : Extension<
+ "#include_next is a language extension">;
+def ext_pp_warning_directive : Extension<"#warning is a language extension">;
+
+def ext_pp_extra_tokens_at_eol : ExtWarn<
+ "extra tokens at end of #%0 directive">, InGroup<ExtraTokens>;
+
+def ext_pp_comma_expr : Extension<"comma operator in operand of #if">;
+def ext_pp_bad_vaargs_use : Extension<
+ "__VA_ARGS__ can only appear in the expansion of a C99 variadic macro">;
+def ext_pp_macro_redef : ExtWarn<"%0 macro redefined">;
+def ext_variadic_macro : Extension<"variadic macros were introduced in C99">,
+ InGroup<VariadicMacros>;
+def warn_cxx98_compat_variadic_macro : Warning<
+ "variadic macros are incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def ext_named_variadic_macro : Extension<
+ "named variadic macros are a GNU extension">, InGroup<VariadicMacros>;
+def err_embedded_include : Error<
+ "embedding a #%0 directive within macro arguments is not supported">;
+def ext_embedded_directive : Extension<
+ "embedding a directive within macro arguments has undefined behavior">,
+ InGroup<DiagGroup<"embedded-directive">>;
+def ext_missing_varargs_arg : Extension<
+ "varargs argument missing, but tolerated as an extension">;
+def ext_empty_fnmacro_arg : Extension<
+ "empty macro arguments were standardized in C99">;
+def warn_cxx98_compat_empty_fnmacro_arg : Warning<
+ "empty macro argument list is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+
+def err_pp_invalid_directive : Error<"invalid preprocessing directive">;
+def err_pp_file_not_found : Error<"'%0' file not found">, DefaultFatal;
+def err_pp_error_opening_file : Error<
+ "error opening file '%0': %1">, DefaultFatal;
+def err_pp_empty_filename : Error<"empty filename">;
+def err_pp_include_too_deep : Error<"#include nested too deeply">;
+def err_pp_expects_filename : Error<"expected \"FILENAME\" or <FILENAME>">;
+def err_pp_macro_not_identifier : Error<"macro names must be identifiers">;
+def err_pp_missing_macro_name : Error<"macro name missing">;
+def err_pp_missing_rparen_in_macro_def : Error<
+ "missing ')' in macro parameter list">;
+def err_pp_invalid_tok_in_arg_list : Error<
+ "invalid token in macro parameter list">;
+def err_pp_expected_ident_in_arg_list : Error<
+ "expected identifier in macro parameter list">;
+def err_pp_expected_comma_in_arg_list : Error<
+ "expected comma in macro parameter list">;
+def err_pp_duplicate_name_in_arg_list : Error<
+ "duplicate macro parameter name %0">;
+def err_pp_stringize_not_parameter : Error<
+ "'#' is not followed by a macro parameter">;
+def err_pp_malformed_ident : Error<"invalid #ident directive">;
+def err_pp_unterminated_conditional : Error<
+ "unterminated conditional directive">;
+def pp_err_else_after_else : Error<"#else after #else">;
+def pp_err_elif_after_else : Error<"#elif after #else">;
+def pp_err_else_without_if : Error<"#else without #if">;
+def pp_err_elif_without_if : Error<"#elif without #if">;
+def err_pp_endif_without_if : Error<"#endif without #if">;
+def err_pp_expected_value_in_expr : Error<"expected value in expression">;
+def err_pp_expected_rparen : Error<"expected ')' in preprocessor expression">;
+def err_pp_expected_eol : Error<
+ "expected end of line in preprocessor expression">;
+def err_pp_defined_requires_identifier : Error<
+ "operator 'defined' requires an identifier">;
+def err_pp_missing_lparen : Error<"missing '(' after '%0'">;
+def err_pp_missing_rparen : Error<"missing ')' after '%0'">;
+def err_pp_colon_without_question : Error<"':' without preceding '?'">;
+def err_pp_division_by_zero : Error<
+ "division by zero in preprocessor expression">;
+def err_pp_remainder_by_zero : Error<
+ "remainder by zero in preprocessor expression">;
+def err_pp_expr_bad_token_binop : Error<
+ "token is not a valid binary operator in a preprocessor subexpression">;
+def err_pp_expr_bad_token_start_expr : Error<
+ "invalid token at start of a preprocessor expression">;
+def err_pp_invalid_poison : Error<"can only poison identifier tokens">;
+def err_pp_used_poisoned_id : Error<"attempt to use a poisoned identifier">;
+
+def err_feature_check_malformed : Error<
+ "builtin feature check macro requires a parenthesized identifier">;
+
+def err_warning_check_malformed : Error<
+ "builtin warning check macro requires a parenthesized string">,
+ InGroup<MalformedWarningCheck>;
+def warn_has_warning_invalid_option :
+ ExtWarn<"__has_warning expected option name (e.g. \"-Wundef\")">,
+ InGroup<MalformedWarningCheck>;
+
+def warn_pragma_include_alias_mismatch_angle :
+ ExtWarn<"angle-bracketed include <%0> cannot be aliased to double-quoted "
+ "include \"%1\"">, InGroup<UnknownPragmas>;
+def warn_pragma_include_alias_mismatch_quote :
+ ExtWarn<"double-quoted include \"%0\" cannot be aliased to angle-bracketed "
+ "include <%1>">, InGroup<UnknownPragmas>;
+def warn_pragma_include_alias_expected :
+ ExtWarn<"pragma include_alias expected '%0'">,
+ InGroup<UnknownPragmas>;
+def warn_pragma_include_alias_expected_filename :
+ ExtWarn<"pragma include_alias expected include filename">,
+ InGroup<UnknownPragmas>;
+
+def err__Pragma_malformed : Error<
+ "_Pragma takes a parenthesized string literal">;
+def err_pragma_comment_malformed : Error<
+ "pragma comment requires parenthesized identifier and optional string">;
+def err_pragma_message_malformed : Error<
+ "pragma message requires parenthesized string">;
+def err_pragma_push_pop_macro_malformed : Error<
+ "pragma %0 requires a parenthesized string">;
+def warn_pragma_pop_macro_no_push : Warning<
+ "pragma pop_macro could not pop '%0', no matching push_macro">;
+def warn_pragma_message : Warning<"%0">,
+ InGroup<PoundPragmaMessage>, DefaultWarnNoWerror;
+def warn_pragma_ignored : Warning<"unknown pragma ignored">,
+ InGroup<UnknownPragmas>, DefaultIgnore;
+def ext_stdc_pragma_ignored : ExtWarn<"unknown pragma in STDC namespace">,
+ InGroup<UnknownPragmas>;
+def ext_on_off_switch_syntax :
+ ExtWarn<"expected 'ON' or 'OFF' or 'DEFAULT' in pragma">,
+ InGroup<UnknownPragmas>;
+def ext_pragma_syntax_eod :
+ ExtWarn<"expected end of directive in pragma">,
+ InGroup<UnknownPragmas>;
+def warn_stdc_fenv_access_not_supported :
+ Warning<"pragma STDC FENV_ACCESS ON is not supported, ignoring pragma">,
+ InGroup<UnknownPragmas>;
+def warn_pragma_diagnostic_invalid :
+ ExtWarn<"pragma diagnostic expected 'error', 'warning', 'ignored', 'fatal',"
+ " 'push', or 'pop'">,
+ InGroup<UnknownPragmas>;
+def warn_pragma_diagnostic_cannot_pop :
+ ExtWarn<"pragma diagnostic pop could not pop, no matching push">,
+ InGroup<UnknownPragmas>;
+def warn_pragma_diagnostic_invalid_option :
+ ExtWarn<"pragma diagnostic expected option name (e.g. \"-Wundef\")">,
+ InGroup<UnknownPragmas>;
+def warn_pragma_diagnostic_invalid_token :
+ ExtWarn<"unexpected token in pragma diagnostic">,
+ InGroup<UnknownPragmas>;
+def warn_pragma_diagnostic_unknown_warning :
+ ExtWarn<"unknown warning group '%0', ignored">,
+ InGroup<UnknownPragmas>;
+// - #pragma __debug
+def warn_pragma_debug_unexpected_command : Warning<
+ "unexpected debug command '%0'">;
+
+def err_pragma_comment_unknown_kind : Error<"unknown kind of pragma comment">;
+def err_defined_macro_name : Error<"'defined' cannot be used as a macro name">;
+def err_paste_at_start : Error<
+ "'##' cannot appear at start of macro expansion">;
+def err_paste_at_end : Error<"'##' cannot appear at end of macro expansion">;
+def ext_paste_comma : Extension<
+ "Use of comma pasting extension is non-portable">;
+def err_unterm_macro_invoc : Error<
+ "unterminated function-like macro invocation">;
+def err_too_many_args_in_macro_invoc : Error<
+ "too many arguments provided to function-like macro invocation">;
+def err_too_few_args_in_macro_invoc : Error<
+ "too few arguments provided to function-like macro invocation">;
+def err_pp_bad_paste : Error<
+ "pasting formed '%0', an invalid preprocessing token">;
+def err_pp_bad_paste_ms : Warning<
+ "pasting formed '%0', an invalid preprocessing token">, DefaultError,
+ InGroup<DiagGroup<"invalid-token-paste">>;
+def err_pp_operator_used_as_macro_name : Error<
+ "C++ operator '%0' cannot be used as a macro name">;
+def err_pp_illegal_floating_literal : Error<
+ "floating point literal in preprocessor expression">;
+def err_pp_line_requires_integer : Error<
+ "#line directive requires a positive integer argument">;
+def err_pp_line_invalid_filename : Error<
+ "invalid filename for #line directive">;
+def warn_pp_line_decimal : Warning<
+ "#line directive interprets number as decimal, not octal">;
+def err_pp_line_digit_sequence : Error<
+ "#line directive requires a simple digit sequence">;
+def err_pp_linemarker_requires_integer : Error<
+ "line marker directive requires a positive integer argument">;
+def err_pp_linemarker_invalid_filename : Error<
+ "invalid filename for line marker directive">;
+def err_pp_linemarker_invalid_flag : Error<
+ "invalid flag line marker directive">;
+def err_pp_linemarker_invalid_pop : Error<
+ "invalid line marker flag '2': cannot pop empty include stack">;
+def ext_pp_line_too_big : Extension<
+ "C requires #line number to be less than %0, allowed as extension">;
+def warn_cxx98_compat_pp_line_too_big : Warning<
+ "#line number greater than 32767 is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+
+def err_pp_visibility_non_macro : Error<"no macro named %0">;
+
+def err_pp_arc_cf_code_audited_syntax : Error<"expected 'begin' or 'end'">;
+def err_pp_double_begin_of_arc_cf_code_audited : Error<
+ "already inside '#pragma clang arc_cf_code_audited'">;
+def err_pp_unmatched_end_of_arc_cf_code_audited : Error<
+ "not currently inside '#pragma clang arc_cf_code_audited'">;
+def err_pp_include_in_arc_cf_code_audited : Error<
+ "cannot #include files inside '#pragma clang arc_cf_code_audited'">;
+def err_pp_eof_in_arc_cf_code_audited : Error<
+ "'#pragma clang arc_cf_code_audited' was not ended within this file">;
+
+// Module map parsing
+def err_mmap_unknown_token : Error<"skipping stray token">;
+def err_mmap_expected_module : Error<"expected module declaration">;
+def err_mmap_expected_module_name : Error<"expected module name">;
+def err_mmap_expected_lbrace : Error<"expected '{' to start module '%0'">;
+def err_mmap_expected_rbrace : Error<"expected '}'">;
+def note_mmap_lbrace_match : Note<"to match this '{'">;
+def err_mmap_expected_rsquare : Error<"expected ']' to close attribute">;
+def note_mmap_lsquare_match : Note<"to match this ']'">;
+def err_mmap_expected_member : Error<
+ "expected umbrella, header, submodule, or module export">;
+def err_mmap_expected_header : Error<"expected a header name after '%0'">;
+def err_mmap_module_redefinition : Error<
+ "redefinition of module '%0'">;
+def note_mmap_prev_definition : Note<"previously defined here">;
+def err_mmap_header_conflict : Error<
+ "header '%0' is already part of module '%1'">;
+def err_mmap_header_not_found : Error<
+ "%select{|umbrella }0header '%1' not found">;
+def err_mmap_umbrella_dir_not_found : Error<
+ "umbrella directory '%0' not found">;
+def err_mmap_umbrella_clash : Error<
+ "umbrella for module '%0' already covers this directory">;
+def err_mmap_export_module_id : Error<
+ "expected an exported module name or '*'">;
+def err_mmap_missing_module_unqualified : Error<
+ "no module named '%0' visible from '%1'">;
+def err_mmap_missing_module_qualified : Error<
+ "no module named '%0' in '%1'">;
+def err_mmap_top_level_inferred_submodule : Error<
+ "only submodules may be inferred with wildcard syntax">;
+def err_mmap_inferred_no_umbrella : Error<
+ "inferred submodules require a module with an umbrella">;
+def err_mmap_inferred_redef : Error<
+ "redefinition of inferred submodule">;
+def err_mmap_expected_lbrace_wildcard : Error<
+ "expected '{' to start inferred submodule">;
+def err_mmap_expected_wildcard_member : Error<
+ "expected module export wildcard">;
+def err_mmap_expected_export_wildcard : Error<
+ "only '*' can be exported from an inferred submodule">;
+def err_mmap_explicit_top_level : Error<
+ "'explicit' is not permitted on top-level modules">;
+def err_mmap_nested_submodule_id : Error<
+ "qualified module name can only be used to define modules at the top level">;
+def err_mmap_expected_feature : Error<"expected a feature name">;
+def err_mmap_expected_attribute : Error<"expected an attribute name">;
+def warn_mmap_unknown_attribute : Warning<"unknown attribute '%0'">,
+ InGroup<IgnoredAttributes>;
+
+def warn_auto_module_import : Warning<
+ "treating #%select{include|import|include_next|__include_macros}0 as an "
+ "import of module '%1'">, InGroup<AutoImport>, DefaultIgnore;
+def warn_uncovered_module_header : Warning<
+ "umbrella header does not include header '%0'">, InGroup<IncompleteUmbrella>;
+
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
new file mode 100644
index 0000000..c183da7
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticParseKinds.td
@@ -0,0 +1,718 @@
+//==--- DiagnosticParseKinds.td - libparse diagnostics --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Parser Diagnostics
+//===----------------------------------------------------------------------===//
+
+let Component = "Parse" in {
+
+def w_asm_qualifier_ignored : Warning<"ignored %0 qualifier on asm">,
+ CatInlineAsm;
+def warn_file_asm_volatile : Warning<
+ "meaningless 'volatile' on asm outside function">, CatInlineAsm;
+
+let CategoryName = "Parse Issue" in {
+
+def ext_empty_source_file : Extension<"ISO C forbids an empty source file">;
+def ext_top_level_semi : Extension<
+ "extra ';' outside of a function">;
+def warn_cxx98_compat_top_level_semi : Warning<
+ "extra ';' outside of a function is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def ext_extra_struct_semi : Extension<
+ "extra ';' inside a %0">;
+def ext_extra_ivar_semi : Extension<
+ "extra ';' inside instance variable list">;
+
+def ext_duplicate_declspec : Extension<"duplicate '%0' declaration specifier">;
+def ext_plain_complex : ExtWarn<
+ "plain '_Complex' requires a type specifier; assuming '_Complex double'">;
+def ext_integer_complex : Extension<
+ "complex integer types are an extension">;
+def ext_thread_before : Extension<"'__thread' before 'static'">;
+
+def ext_empty_struct_union : Extension<
+ "empty %select{struct|union}0 is a GNU extension">, InGroup<GNU>;
+def warn_empty_struct_union_compat : Warning<"empty %select{struct|union}0 "
+ "has size 0 in C, size 1 in C++">, InGroup<CXXCompat>, DefaultIgnore;
+def error_empty_enum : Error<"use of empty enum">;
+def err_invalid_sign_spec : Error<"'%0' cannot be signed or unsigned">;
+def err_invalid_short_spec : Error<"'short %0' is invalid">;
+def err_invalid_long_spec : Error<"'long %0' is invalid">;
+def err_invalid_longlong_spec : Error<"'long long %0' is invalid">;
+def err_invalid_complex_spec : Error<"'_Complex %0' is invalid">;
+def err_friend_storage_spec : Error<"'%0' is invalid in friend declarations">;
+
+def ext_ident_list_in_param : Extension<
+ "type-less parameter names in function declaration">;
+def ext_c99_variable_decl_in_for_loop : Extension<
+ "variable declaration in for loop is a C99-specific feature">, InGroup<C99>;
+def ext_c99_compound_literal : Extension<
+ "compound literals are a C99-specific feature">, InGroup<C99>;
+def ext_c99_flexible_array_member : Extension<
+ "Flexible array members are a C99-specific feature">, InGroup<C99>;
+def ext_enumerator_list_comma : Extension<
+ "commas at the end of enumerator lists are a %select{C99|C++11}0-specific "
+ "feature">;
+def warn_cxx98_compat_enumerator_list_comma : Warning<
+ "commas at the end of enumerator lists are incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def err_enumerator_list_missing_comma : Error<
+ "missing ',' between enumerators">;
+def err_enumerator_unnamed_no_def : Error<
+ "unnamed enumeration must be a definition">;
+def ext_ms_enum_fixed_underlying_type : Extension<
+ "enumeration types with a fixed underlying type are a Microsoft extension">,
+ InGroup<Microsoft>;
+def warn_cxx98_compat_enum_fixed_underlying_type : Warning<
+ "enumeration types with a fixed underlying type are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_alignof : Warning<
+ "alignof expressions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+def warn_microsoft_dependent_exists : Warning<
+ "dependent %select{__if_not_exists|__if_exists}0 declarations are ignored">,
+ InGroup<DiagGroup<"microsoft-exists">>;
+
+def ext_c11_generic_selection : Extension<
+ "generic selections are a C11-specific feature">, InGroup<C11>;
+def err_duplicate_default_assoc : Error<
+ "duplicate default generic association">;
+def note_previous_default_assoc : Note<
+ "previous default generic association is here">;
+
+def ext_c11_alignas : Extension<
+ "_Alignas is a C11-specific feature">, InGroup<C11>;
+
+def ext_gnu_indirect_goto : Extension<
+ "use of GNU indirect-goto extension">, InGroup<GNU>;
+def ext_gnu_address_of_label : Extension<
+ "use of GNU address-of-label extension">, InGroup<GNU>;
+def ext_gnu_local_label : Extension<
+ "use of GNU locally declared label extension">, InGroup<GNU>;
+def ext_gnu_statement_expr : Extension<
+ "use of GNU statement expression extension">, InGroup<GNU>;
+def ext_gnu_conditional_expr : Extension<
+ "use of GNU ?: expression extension, eliding middle term">, InGroup<GNU>;
+def ext_gnu_empty_initializer : Extension<
+ "use of GNU empty initializer extension">, InGroup<GNU>;
+def ext_gnu_array_range : Extension<"use of GNU array range extension">,
+ InGroup<GNUDesignator>;
+def ext_gnu_missing_equal_designator : ExtWarn<
+ "use of GNU 'missing =' extension in designator">,
+ InGroup<GNUDesignator>;
+def err_expected_equal_designator : Error<"expected '=' or another designator">;
+def ext_gnu_old_style_field_designator : ExtWarn<
+ "use of GNU old-style field designator extension">,
+ InGroup<GNUDesignator>;
+def ext_gnu_case_range : Extension<"use of GNU case range extension">,
+ InGroup<GNU>;
+
+// Generic errors.
+def err_expected_expression : Error<"expected expression">;
+def err_expected_type : Error<"expected a type">;
+def err_expected_external_declaration : Error<"expected external declaration">;
+def err_extraneous_closing_brace : Error<"extraneous closing brace ('}')">;
+def err_expected_ident : Error<"expected identifier">;
+def err_expected_ident_lparen : Error<"expected identifier or '('">;
+def err_expected_ident_lbrace : Error<"expected identifier or '{'">;
+def err_expected_lbrace : Error<"expected '{'">;
+def err_expected_lparen : Error<"expected '('">;
+def err_expected_lparen_or_lbrace : Error<"expected '('or '{'">;
+def err_expected_rparen : Error<"expected ')'">;
+def err_expected_lsquare : Error<"expected '['">;
+def err_expected_rsquare : Error<"expected ']'">;
+def err_expected_rbrace : Error<"expected '}'">;
+def err_expected_greater : Error<"expected '>'">;
+def err_expected_ggg : Error<"expected '>>>'">;
+def err_expected_semi_declaration : Error<
+ "expected ';' at end of declaration">;
+def err_expected_semi_decl_list : Error<
+ "expected ';' at end of declaration list">;
+def ext_expected_semi_decl_list : ExtWarn<
+ "expected ';' at end of declaration list">;
+def err_expected_member_name_or_semi : Error<
+ "expected member name or ';' after declaration specifiers">;
+def err_function_declared_typedef : Error<
+ "function definition declared 'typedef'">;
+def err_iboutletcollection_builtintype : Error<
+ "type argument of iboutletcollection attribute cannot be a builtin type">;
+def err_iboutletcollection_with_protocol : Error<
+ "invalid argument of iboutletcollection attribute">;
+def err_at_defs_cxx : Error<"@defs is not supported in Objective-C++">;
+def err_at_in_class : Error<"unexpected '@' in member specification">;
+
+def err_expected_fn_body : Error<
+ "expected function body after function declarator">;
+def warn_attribute_on_function_definition : Warning<
+ "GCC does not allow %0 attribute in this position on a function definition">,
+ InGroup<GccCompat>;
+def warn_attribute_no_decl : Warning<
+ "attribute %0 ignored, because it is not attached to a declaration">,
+ InGroup<IgnoredAttributes>;
+def err_expected_method_body : Error<"expected method body">;
+def err_invalid_token_after_toplevel_declarator : Error<
+ "expected ';' after top level declarator">;
+def err_invalid_token_after_declarator_suggest_equal : Error<
+ "invalid '%0' at end of declaration; did you mean '='?">;
+def err_expected_statement : Error<"expected statement">;
+def err_expected_lparen_after : Error<"expected '(' after '%0'">;
+def err_expected_lparen_after_id : Error<"expected '(' after %0">;
+def err_expected_less_after : Error<"expected '<' after '%0'">;
+def err_expected_equal_after : Error<"expected '=' after %0">;
+def err_expected_comma : Error<"expected ','">;
+def err_expected_lbrace_in_compound_literal : Error<
+ "expected '{' in compound literal">;
+def err_expected_while : Error<"expected 'while' in do/while loop">;
+
+def err_expected_semi_after : Error<"expected ';' after %0">;
+def err_expected_semi_after_stmt : Error<"expected ';' after %0 statement">;
+def err_expected_semi_after_expr : Error<"expected ';' after expression">;
+def err_extraneous_token_before_semi : Error<"extraneous '%0' before ';'">;
+
+def err_expected_semi_after_method_proto : Error<
+ "expected ';' after method prototype">;
+def err_expected_semi_after_namespace_name : Error<
+ "expected ';' after namespace name">;
+def err_unexpected_namespace_attributes_alias : Error<
+ "attributes can not be specified on namespace alias">;
+def err_inline_namespace_alias : Error<"namespace alias cannot be inline">;
+def err_namespace_nonnamespace_scope : Error<
+ "namespaces can only be defined in global or namespace scope">;
+def err_nested_namespaces_with_double_colon : Error<
+ "nested namespace definition must define each namespace separately">;
+def err_expected_semi_after_attribute_list : Error<
+ "expected ';' after attribute list">;
+def err_expected_semi_after_static_assert : Error<
+ "expected ';' after static_assert">;
+def err_expected_semi_for : Error<"expected ';' in 'for' statement specifier">;
+def err_expected_colon_after : Error<"expected ':' after %0">;
+def err_label_end_of_compound_statement : Error<
+ "label at end of compound statement: expected statement">;
+def err_address_of_label_outside_fn : Error<
+ "use of address-of-label extension outside of a function body">;
+def err_expected_string_literal : Error<"expected string literal">;
+def err_asm_operand_wide_string_literal : Error<
+ "cannot use %select{unicode|wide}0 string literal in 'asm'">;
+def err_expected_selector_for_method : Error<
+ "expected selector for Objective-C method">;
+def err_expected_property_name : Error<"expected property name">;
+
+def err_unexpected_at : Error<"unexpected '@' in program">;
+
+def err_invalid_reference_qualifier_application : Error<
+ "'%0' qualifier may not be applied to a reference">;
+def err_illegal_decl_reference_to_reference : Error<
+ "%0 declared as a reference to a reference">;
+def ext_rvalue_reference : ExtWarn<
+ "rvalue references are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_rvalue_reference : Warning<
+ "rvalue references are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_ref_qualifier : ExtWarn<
+ "reference qualifiers on functions are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_ref_qualifier : Warning<
+ "reference qualifiers on functions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_inline_namespace : ExtWarn<
+ "inline namespaces are a C++11 feature">, InGroup<CXX11>;
+def warn_cxx98_compat_inline_namespace : Warning<
+ "inline namespaces are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_generalized_initializer_lists : ExtWarn<
+ "generalized initializer lists are a C++11 extension">,
+ InGroup<CXX11>;
+def warn_cxx98_compat_generalized_initializer_lists : Warning<
+ "generalized initializer lists are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_init_list_bin_op : Error<"initializer list cannot be used on the "
+ "%select{left|right}0 hand side of operator '%1'">;
+def warn_cxx98_compat_trailing_return_type : Warning<
+ "trailing return types are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_auto_type_specifier : ExtWarn<
+ "'auto' type specifier is a C++11 extension">, InGroup<CXX11>;
+def warn_auto_storage_class : Warning<
+ "'auto' storage class specifier is redundant and incompatible with C++11">,
+ InGroup<CXX11Compat>, DefaultIgnore;
+def ext_auto_storage_class : ExtWarn<
+ "'auto' storage class specifier is not permitted in C++11, and will not "
+ "be supported in future releases">, InGroup<DiagGroup<"auto-storage-class">>;
+def ext_for_range : ExtWarn<
+ "range-based for loop is a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_for_range : Warning<
+ "range-based for loop is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_for_range_expected_decl : Error<
+ "for range declaration must declare a variable">;
+def err_argument_required_after_attribute : Error<
+ "argument required after attribute">;
+def err_missing_param : Error<"expected parameter declarator">;
+def err_missing_comma_before_ellipsis : Error<
+ "C requires a comma prior to the ellipsis in a variadic function type">;
+def err_unexpected_typedef_ident : Error<
+ "unexpected type name %0: expected identifier">;
+def warn_cxx98_compat_decltype : Warning<
+ "'decltype' type specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_unexpected_scope_on_base_decltype : Error<
+ "unexpected namespace scope prior to decltype">;
+def err_expected_class_name : Error<"expected class name">;
+def err_expected_class_name_not_template :
+ Error<"'typename' is redundant; base classes are implicitly types">;
+def err_unspecified_vla_size_with_static : Error<
+ "'static' may not be used with an unspecified variable length array size">;
+
+def err_expected_case_before_expression: Error<
+ "expected 'case' keyword before expression">;
+
+// Declarations.
+def err_typename_requires_specqual : Error<
+ "type name requires a specifier or qualifier">;
+def err_typename_invalid_storageclass : Error<
+ "type name does not allow storage class to be specified">;
+def err_typename_invalid_functionspec : Error<
+ "type name does not allow function specifier to be specified">;
+def err_typename_invalid_constexpr : Error<
+ "type name does not allow constexpr specifier to be specified">;
+def err_typename_identifiers_only : Error<
+ "typename is allowed for identifiers only">;
+
+def err_invalid_decl_spec_combination : Error<
+ "cannot combine with previous '%0' declaration specifier">;
+def err_invalid_vector_decl_spec_combination : Error<
+ "cannot combine with previous '%0' declaration specifier. "
+ "'__vector' must be first">;
+def err_invalid_pixel_decl_spec_combination : Error<
+ "'__pixel' must be preceded by '__vector'. "
+ "'%0' declaration specifier not allowed here">;
+def err_invalid_vector_decl_spec : Error<
+ "cannot use '%0' with '__vector'">;
+def err_invalid_vector_bool_decl_spec : Error<
+ "cannot use '%0' with '__vector bool'">;
+def warn_vector_long_decl_spec_combination : Warning<
+ "Use of 'long' with '__vector' is deprecated">, InGroup<Deprecated>;
+def err_friend_invalid_in_context : Error<
+ "'friend' used outside of class">;
+def err_unknown_typename : Error<
+ "unknown type name %0">;
+def err_use_of_tag_name_without_tag : Error<
+ "must use '%1' tag to refer to type %0%select{| in this scope}2">;
+def err_templated_using_directive : Error<
+ "cannot template a using directive">;
+def err_templated_using_declaration : Error<
+ "cannot template a using declaration">;
+def err_unexected_colon_in_nested_name_spec : Error<
+ "unexpected ':' in nested name specifier">;
+def err_bool_redeclaration : Error<
+ "redeclaration of C++ built-in type 'bool'">;
+def ext_c11_static_assert : Extension<
+ "_Static_assert is a C11-specific feature">, InGroup<C11>;
+def warn_cxx98_compat_static_assert : Warning<
+ "static_assert declarations are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+/// Objective-C parser diagnostics
+def err_expected_minus_or_plus : Error<
+ "method type specifier must start with '-' or '+'">;
+def err_objc_no_attributes_on_category : Error<
+ "attributes may not be specified on a category">;
+def err_objc_missing_end : Error<"missing '@end'">;
+def note_objc_container_start : Note<
+ "%select{class|protocol|category|class extension|implementation"
+ "|category implementation}0 started here">;
+def warn_objc_protocol_qualifier_missing_id : Warning<
+ "protocol qualifiers without 'id' is archaic">;
+def err_objc_unknown_at : Error<"expected an Objective-C directive after '@'">;
+def err_illegal_super_cast : Error<
+ "cannot cast 'super' (it isn't an expression)">;
+def err_nsnumber_nonliteral_unary : Error<
+ "@%0 must be followed by a number to form an NSNumber object">;
+
+let CategoryName = "ARC Parse Issue" in {
+def err_arc_bridge_retain : Error<
+ "unknown cast annotation __bridge_retain; did you mean __bridge_retained?">;
+// To be default mapped to an error later.
+def warn_arc_bridge_cast_nonarc : Warning<
+ "'%0' casts have no effect when not using ARC">,
+ InGroup<DiagGroup<"arc-bridge-casts-disallowed-in-nonarc">>;
+}
+
+def err_objc_illegal_visibility_spec : Error<
+ "illegal visibility specification">;
+def err_objc_illegal_interface_qual : Error<"illegal interface qualifier">;
+def err_objc_expected_equal_for_getter : Error<
+ "expected '=' for Objective-C getter">;
+def err_objc_expected_equal_for_setter : Error<
+ "expected '=' for Objective-C setter">;
+def err_objc_expected_selector_for_getter_setter : Error<
+ "expected selector for Objective-C %select{setter|getter}0">;
+def err_objc_property_requires_field_name : Error<
+ "property requires fields to be named">;
+def err_objc_property_bitfield : Error<"property name cannot be a bitfield">;
+def err_objc_expected_property_attr : Error<"unknown property attribute %0">;
+def err_objc_properties_require_objc2 : Error<
+ "properties are an Objective-C 2 feature">;
+def err_objc_unexpected_attr : Error<
+ "prefix attribute must be followed by an interface or protocol">;
+def err_objc_directive_only_in_protocol : Error<
+ "directive may only be specified in protocols only">;
+def err_missing_catch_finally : Error<
+ "@try statement without a @catch and @finally clause">;
+def err_objc_concat_string : Error<"unexpected token after Objective-C string">;
+def err_expected_objc_container : Error<
+ "'@end' must appear in an Objective-C context">;
+def error_property_ivar_decl : Error<
+ "property synthesize requires specification of an ivar">;
+def err_synthesized_property_name : Error<
+ "expected a property name in @synthesize">;
+def warn_semicolon_before_method_body : Warning<
+ "semicolon before method body is ignored">,
+ InGroup<DiagGroup<"semicolon-before-method-body">>, DefaultIgnore;
+
+def err_expected_field_designator : Error<
+ "expected a field designator, such as '.field = 4'">;
+
+def err_declaration_does_not_declare_param : Error<
+ "declaration does not declare a parameter">;
+def err_no_matching_param : Error<"parameter named %0 is missing">;
+
+/// C++ parser diagnostics
+def err_expected_unqualified_id : Error<
+ "expected %select{identifier|unqualified-id}0">;
+def err_func_def_no_params : Error<
+ "function definition does not declare parameters">;
+def err_expected_lparen_after_type : Error<
+ "expected '(' for function-style cast or type construction">;
+def err_expected_init_in_condition : Error<
+ "variable declaration in condition must have an initializer">;
+def err_expected_init_in_condition_lparen : Error<
+ "variable declaration in condition cannot have a parenthesized initializer">;
+def warn_parens_disambiguated_as_function_decl : Warning<
+ "parentheses were disambiguated as a function declarator">,
+ InGroup<VexingParse>;
+def warn_dangling_else : Warning<
+ "add explicit braces to avoid dangling else">,
+ InGroup<DanglingElse>;
+def err_expected_member_or_base_name : Error<
+ "expected class member or base class name">;
+def err_expected_lbrace_after_base_specifiers : Error<
+ "expected '{' after base class list">;
+def ext_ellipsis_exception_spec : Extension<
+ "exception specification of '...' is a Microsoft extension">;
+def err_dynamic_and_noexcept_specification : Error<
+ "cannot have both throw() and noexcept() clause on the same function">;
+def warn_cxx98_compat_noexcept_decl : Warning<
+ "noexcept specifications are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_expected_catch : Error<"expected catch">;
+def err_expected_lbrace_or_comma : Error<"expected '{' or ','">;
+def err_expected_rbrace_or_comma : Error<"expected '}' or ','">;
+def err_expected_rsquare_or_comma : Error<"expected ']' or ','">;
+def err_using_namespace_in_class : Error<
+ "'using namespace' is not allowed in classes">;
+def err_destructor_tilde_identifier : Error<
+ "expected a class name after '~' to name a destructor">;
+def err_destructor_template_id : Error<
+ "destructor name %0 does not refer to a template">;
+def err_default_arg_unparsed : Error<
+ "unexpected end of default argument expression">;
+def err_parser_impl_limit_overflow : Error<
+ "parser recursion limit reached, program too complex">, DefaultFatal;
+def err_misplaced_ellipsis_in_declaration : Error<
+ "'...' must %select{immediately precede declared identifier|"
+ "be innermost component of anonymous pack declaration}0">;
+
+// C++ derived classes
+def err_dup_virtual : Error<"duplicate 'virtual' in base specifier">;
+
+// C++ operator overloading
+def err_literal_operator_string_prefix : Error<
+ "string literal after 'operator' cannot have an encoding prefix">;
+def err_literal_operator_string_not_empty : Error<
+ "string literal after 'operator' must be '\"\"'">;
+def err_literal_operator_missing_space : Error<
+ "C++11 requires a space between the \"\" and the user-defined suffix in a "
+ "literal operator">;
+def warn_cxx98_compat_literal_operator : Warning<
+ "literal operators are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+// Classes.
+def err_anon_type_definition : Error<
+ "declaration of anonymous %0 must be a definition">;
+def err_default_delete_in_multiple_declaration : Error<
+ "'= %select{default|delete}0' is a function definition and must occur in a "
+ "standalone declaration">;
+
+def warn_cxx98_compat_noexcept_expr : Warning<
+ "noexcept expressions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_nullptr : Warning<
+ "'nullptr' is incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+
+def warn_cxx98_compat_alignas : Warning<"'alignas' is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def warn_cxx98_compat_attribute : Warning<
+ "attributes are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_cxx11_attribute_forbids_arguments : Error<
+ "attribute '%0' cannot have an argument list">;
+def err_cxx11_attribute_forbids_ellipsis : Error<
+ "attribute '%0' cannot be used as an attribute pack">;
+def err_attributes_not_allowed : Error<"an attribute list cannot appear here">;
+def err_l_square_l_square_not_attribute : Error<
+ "C++11 only allows consecutive left square brackets when "
+ "introducing an attribute">;
+def err_alignas_pack_exp_unsupported : Error<
+ "pack expansions in alignment specifiers are not supported yet">;
+
+/// C++ Templates
+def err_expected_template : Error<"expected template">;
+def err_unknown_template_name : Error<
+ "unknown template name %0">;
+def err_expected_comma_greater : Error<
+ "expected ',' or '>' in template-parameter-list">;
+def err_class_on_template_template_param : Error<
+ "template template parameter requires 'class' after the parameter list">;
+def err_template_spec_syntax_non_template : Error<
+ "identifier followed by '<' indicates a class template specialization but "
+ "%0 %select{does not refer to a template|refers to a function "
+ "template|<unused>|refers to a template template parameter}1">;
+def err_id_after_template_in_nested_name_spec : Error<
+ "expected template name after 'template' keyword in nested name specifier">;
+def err_two_right_angle_brackets_need_space : Error<
+ "a space is required between consecutive right angle brackets (use '> >')">;
+def warn_cxx0x_right_shift_in_template_arg : Warning<
+ "use of right-shift operator ('>>') in template argument will require "
+ "parentheses in C++11">;
+def warn_cxx98_compat_two_right_angle_brackets : Warning<
+ "consecutive right angle brackets are incompatible with C++98 (use '> >')">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_multiple_template_declarators : Error<
+ "%select{|a template declaration|an explicit template specialization|"
+ "an explicit template instantiation}0 can "
+ "only %select{|declare|declare|instantiate}0 a single entity">;
+def err_explicit_instantiation_with_definition : Error<
+ "explicit template instantiation cannot have a definition; if this "
+ "definition is meant to be an explicit specialization, add '<>' after the "
+ "'template' keyword">;
+def err_enum_template : Error<"enumeration cannot be a template">;
+def err_explicit_instantiation_enum : Error<
+ "enumerations cannot be explicitly instantiated">;
+def err_expected_template_parameter : Error<"expected template parameter">;
+
+def err_missing_dependent_template_keyword : Error<
+ "use 'template' keyword to treat '%0' as a dependent template name">;
+def warn_missing_dependent_template_keyword : ExtWarn<
+ "use 'template' keyword to treat '%0' as a dependent template name">;
+
+def ext_extern_template : Extension<
+ "extern templates are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_extern_template : Warning<
+ "extern templates are incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def warn_static_inline_explicit_inst_ignored : Warning<
+ "ignoring '%select{static|inline}0' keyword on explicit template "
+ "instantiation">;
+
+// Constructor template diagnostics.
+def err_out_of_line_constructor_template_id : Error<
+ "out-of-line constructor for %0 cannot have template arguments">;
+def err_out_of_line_template_id_names_constructor : Error<
+ "qualified reference to %0 is a constructor name rather than a "
+ "template name wherever a constructor can be declared">;
+def err_out_of_line_type_names_constructor : Error<
+ "qualified reference to %0 is a constructor name rather than a "
+ "type wherever a constructor can be declared">;
+
+def err_expected_qualified_after_typename : Error<
+ "expected a qualified name after 'typename'">;
+def warn_expected_qualified_after_typename : ExtWarn<
+ "expected a qualified name after 'typename'">;
+def err_expected_semi_after_tagdecl : Error<
+ "expected ';' after %0">;
+
+def err_typename_refers_to_non_type_template : Error<
+ "typename specifier refers to a non-template">;
+def err_expected_type_name_after_typename : Error<
+ "expected an identifier or template-id after '::'">;
+def err_explicit_spec_non_template : Error<
+ "explicit %select{specialization|instantiation}0 of non-template "
+ "%select{class|struct|union}1 %2">;
+
+def err_default_template_template_parameter_not_template : Error<
+ "default template argument for a template template parameter must be a class "
+ "template">;
+
+def err_ctor_init_missing_comma : Error<
+ "missing ',' between base or member initializers">;
+
+// C++ declarations
+def err_friend_decl_defines_type : Error<
+ "cannot define a type in a friend declaration">;
+def err_missing_whitespace_digraph : Error<
+ "found '<::' after a "
+ "%select{template name|const_cast|dynamic_cast|reinterpret_cast|static_cast}0"
+ " which forms the digraph '<:' (aka '[') and a ':', did you mean '< ::'?">;
+
+def ext_deleted_function : ExtWarn<
+ "deleted function definitions are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_deleted_function : Warning<
+ "deleted function definitions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_defaulted_function : ExtWarn<
+ "defaulted function definitions are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_defaulted_function : Warning<
+ "defaulted function definitions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+// C++11 in-class member initialization
+def ext_nonstatic_member_init : ExtWarn<
+ "in-class initialization of non-static data member is a C++11 extension">,
+ InGroup<CXX11>;
+def warn_cxx98_compat_nonstatic_member_init : Warning<
+ "in-class initialization of non-static data members is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_bitfield_member_init: Error<
+ "bitfield member cannot have an in-class initializer">;
+def err_incomplete_array_member_init: Error<
+ "array bound cannot be deduced from an in-class initializer">;
+
+// C++11 alias-declaration
+def ext_alias_declaration : ExtWarn<
+ "alias declarations are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_alias_declaration : Warning<
+ "alias declarations are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_alias_declaration_not_identifier : Error<
+ "name defined in alias declaration must be an identifier">;
+def err_alias_declaration_specialization : Error<
+ "%select{partial specialization|explicit specialization|explicit instantiation}0 of alias templates is not permitted">;
+
+// C++11 override control
+def ext_override_control_keyword : ExtWarn<
+ "'%0' keyword is a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_override_control_keyword : Warning<
+ "'%0' keyword is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+def err_duplicate_virt_specifier : Error<
+ "class member already marked '%0'">;
+
+def err_scoped_enum_missing_identifier : Error<
+ "scoped enumeration requires a name">;
+def warn_cxx98_compat_scoped_enum : Warning<
+ "scoped enumerations are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+def err_expected_parameter_pack : Error<
+ "expected the name of a parameter pack">;
+def err_paren_sizeof_parameter_pack : Error<
+ "missing parentheses around the size of parameter pack %0">;
+def err_sizeof_parameter_pack : Error<
+ "expected parenthesized parameter pack name in 'sizeof...' expression">;
+
+// C++11 lambda expressions
+def err_expected_comma_or_rsquare : Error<
+ "expected ',' or ']' in lambda capture list">;
+def err_this_captured_by_reference : Error<
+ "'this' cannot be captured by reference">;
+def err_expected_capture : Error<
+ "expected variable name or 'this' in lambda capture list">;
+def err_expected_lambda_body : Error<"expected body of lambda expression">;
+def warn_cxx98_compat_lambda : Warning<
+ "lambda expressions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_lambda_missing_parens : Error<
+ "lambda requires '()' before %select{'mutable'|return type}0">;
+
+// Availability attribute
+def err_expected_version : Error<
+ "expected a version of the form 'major[.minor[.subminor]]'">;
+def err_zero_version : Error<
+ "version number must have non-zero major, minor, or sub-minor version">;
+def err_availability_expected_platform : Error<
+ "expected a platform name, e.g., 'macosx'">;
+def err_availability_expected_change : Error<
+ "expected 'introduced', 'deprecated', or 'obsoleted'">;
+def err_availability_unknown_change : Error<
+ "%0 is not an availability stage; use 'introduced', 'deprecated', or "
+ "'obsoleted'">;
+def err_availability_redundant : Error<
+ "redundant %0 availability change; only the last specified change will " "be used">;
+def warn_availability_and_unavailable : Warning<
+ "'unavailable' availability overrides all other availability information">;
+
+// Language specific pragmas
+// - Generic warnings
+def warn_pragma_expected_lparen : Warning<
+ "missing '(' after '#pragma %0' - ignoring">;
+def warn_pragma_expected_rparen : Warning<
+ "missing ')' after '#pragma %0' - ignoring">;
+def warn_pragma_expected_identifier : Warning<
+ "expected identifier in '#pragma %0' - ignored">;
+def warn_pragma_ms_struct : Warning<
+ "incorrect use of '#pragma ms_struct on|off' - ignored">;
+def warn_pragma_extra_tokens_at_eol : Warning<
+ "extra tokens at end of '#pragma %0' - ignored">;
+// - #pragma options
+def warn_pragma_options_expected_align : Warning<
+ "expected 'align' following '#pragma options' - ignored">;
+def warn_pragma_align_expected_equal : Warning<
+ "expected '=' following '#pragma %select{align|options align}0' - ignored">;
+def warn_pragma_align_invalid_option : Warning<
+ "invalid alignment option in '#pragma %select{align|options align}0' - ignored">;
+// - #pragma pack
+def warn_pragma_pack_invalid_action : Warning<
+ "unknown action for '#pragma pack' - ignored">;
+def warn_pragma_pack_malformed : Warning<
+ "expected integer or identifier in '#pragma pack' - ignored">;
+// - #pragma unused
+def warn_pragma_unused_expected_var : Warning<
+ "expected '#pragma unused' argument to be a variable name">;
+def warn_pragma_unused_expected_punc : Warning<
+ "expected ')' or ',' in '#pragma unused'">;
+
+// OpenCL Section 6.8.g
+def err_not_opencl_storage_class_specifier : Error<
+ "OpenCL does not support the '%0' storage class specifier">;
+
+// OpenCL EXTENSION pragma (OpenCL 1.1 [9.1])
+def warn_pragma_expected_colon : Warning<
+ "missing ':' after %0 - ignoring">;
+def warn_pragma_expected_enable_disable : Warning<
+ "expected 'enable' or 'disable' - ignoring">;
+def warn_pragma_unknown_extension : Warning<
+ "unknown OpenCL extension %0 - ignoring">;
+
+def err_seh_expected_handler : Error<
+ "expected '__except' or '__finally' block">;
+
+def err_seh___except_block : Error<
+ "%0 only allowed in __except block">;
+
+def err_seh___except_filter : Error<
+ "%0 only allowed in __except filter expression">;
+
+def err_seh___finally_block : Error<
+ "%0 only allowed in __finally block">;
+
+} // end of Parse Issue category.
+
+let CategoryName = "Modules Issue" in {
+def err_module_expected_ident : Error<
+ "expected a module name after module import">;
+def err_module_expected_semi : Error<
+ "expected a semicolon name after module name">;
+}
+
+} // end of Parser diagnostics
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
new file mode 100644
index 0000000..e553740
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -0,0 +1,5489 @@
+//==--- DiagnosticSemaKinds.td - libsema diagnostics ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+//===----------------------------------------------------------------------===//
+// Semantic Analysis
+//===----------------------------------------------------------------------===//
+
+let Component = "Sema" in {
+let CategoryName = "Semantic Issue" in {
+
+// Constant expressions
+def err_expr_not_ice : Error<
+ "expression is not an %select{integer|integral}0 constant expression">;
+def ext_expr_not_ice : Extension<
+ "expression is not an %select{integer|integral}0 constant expression; "
+ "folding it to a constant is a GNU extension">, InGroup<GNU>;
+def err_typecheck_converted_constant_expression : Error<
+ "value of type %0 is not implicitly convertible to %1">;
+def err_typecheck_converted_constant_expression_disallowed : Error<
+ "conversion from %0 to %1 is not allowed in a converted constant expression">;
+def err_expr_not_cce : Error<
+ "%select{case value|enumerator value|non-type template argument}0 "
+ "is not a constant expression">;
+def err_cce_narrowing : ExtWarn<
+ "%select{case value|enumerator value|non-type template argument}0 "
+ "%select{cannot be narrowed from type %2 to %3|"
+ "evaluates to %2, which cannot be narrowed to type %3}1">,
+ InGroup<CXX11Narrowing>, DefaultError;
+def err_cce_narrowing_sfinae : Error<
+ "%select{case value|enumerator value|non-type template argument}0 "
+ "%select{cannot be narrowed from type %2 to %3|"
+ "evaluates to %2, which cannot be narrowed to type %3}1">;
+def err_ice_not_integral : Error<
+ "integral constant expression must have integral or unscoped enumeration "
+ "type, not %0">;
+def err_ice_incomplete_type : Error<
+ "integral constant expression has incomplete class type %0">;
+def err_ice_explicit_conversion : Error<
+ "integral constant expression requires explicit conversion from %0 to %1">;
+def note_ice_conversion_here : Note<
+ "conversion to %select{integral|enumeration}0 type %1 declared here">;
+def err_ice_ambiguous_conversion : Error<
+ "ambiguous conversion from type %0 to an integral or unscoped "
+ "enumeration type">;
+
+// Semantic analysis of constant literals.
+def ext_predef_outside_function : Warning<
+ "predefined identifier is only valid inside function">,
+ InGroup<DiagGroup<"predefined-identifier-outside-function">>;
+def warn_float_overflow : Warning<
+ "magnitude of floating-point constant too large for type %0; maximum is %1">,
+ InGroup<LiteralRange>;
+def warn_float_underflow : Warning<
+ "magnitude of floating-point constant too small for type %0; minimum is %1">,
+ InGroup<LiteralRange>;
+def warn_double_const_requires_fp64 : Warning<
+ "double precision constant requires cl_khr_fp64, casting to single precision">;
+
+// C99 variable-length arrays
+def ext_vla : Extension<"variable length arrays are a C99 feature">,
+ InGroup<VLA>;
+def err_vla_non_pod : Error<"variable length array of non-POD element type %0">;
+def err_vla_in_sfinae : Error<
+ "variable length array cannot be formed during template argument deduction">;
+def err_array_star_in_function_definition : Error<
+ "variable length array must be bound in function definition">;
+def err_vla_decl_in_file_scope : Error<
+ "variable length array declaration not allowed at file scope">;
+def err_vla_decl_has_static_storage : Error<
+ "variable length array declaration can not have 'static' storage duration">;
+def err_vla_decl_has_extern_linkage : Error<
+ "variable length array declaration can not have 'extern' linkage">;
+def ext_vla_folded_to_constant : Extension<
+ "variable length array folded to constant array as an extension">;
+
+// C99 variably modified types
+def err_variably_modified_template_arg : Error<
+ "variably modified type %0 cannot be used as a template argument">;
+def err_variably_modified_nontype_template_param : Error<
+ "non-type template parameter of variably modified type %0">;
+def err_variably_modified_new_type : Error<
+ "'new' cannot allocate object of variably modified type %0">;
+
+// C99 Designated Initializers
+def ext_designated_init : Extension<
+ "designated initializers are a C99 feature">;
+def err_array_designator_negative : Error<
+ "array designator value '%0' is negative">;
+def err_array_designator_empty_range : Error<
+ "array designator range [%0, %1] is empty">;
+def err_array_designator_non_array : Error<
+ "array designator cannot initialize non-array type %0">;
+def err_array_designator_too_large : Error<
+ "array designator index (%0) exceeds array bounds (%1)">;
+def err_field_designator_non_aggr : Error<
+ "field designator cannot initialize a "
+ "%select{non-struct, non-union|non-class}0 type %1">;
+def err_field_designator_unknown : Error<
+ "field designator %0 does not refer to any field in type %1">;
+def err_field_designator_nonfield : Error<
+ "field designator %0 does not refer to a non-static data member">;
+def note_field_designator_found : Note<"field designator refers here">;
+def err_designator_for_scalar_init : Error<
+ "designator in initializer for scalar type %0">;
+def warn_subobject_initializer_overrides : Warning<
+ "subobject initialization overrides initialization of other fields "
+ "within its enclosing subobject">, InGroup<InitializerOverrides>;
+def warn_initializer_overrides : Warning<
+ "initializer overrides prior initialization of this subobject">,
+ InGroup<InitializerOverrides>;
+def note_previous_initializer : Note<
+ "previous initialization %select{|with side effects }0is here"
+ "%select{| (side effects may not occur at run time)}0">;
+def err_designator_into_flexible_array_member : Error<
+ "designator into flexible array member subobject">;
+def note_flexible_array_member : Note<
+ "initialized flexible array member %0 is here">;
+def ext_flexible_array_init : Extension<
+ "flexible array initialization is a GNU extension">, InGroup<GNU>;
+
+// Declarations.
+def err_bad_variable_name : Error<
+ "%0 cannot be the name of a variable or data member">;
+def err_bad_parameter_name : Error<
+ "'%0' cannot be the name of a parameter">;
+def err_parameter_name_omitted : Error<"parameter name omitted">;
+def warn_unused_parameter : Warning<"unused parameter %0">,
+ InGroup<UnusedParameter>, DefaultIgnore;
+def warn_unused_variable : Warning<"unused variable %0">,
+ InGroup<UnusedVariable>, DefaultIgnore;
+def warn_unused_exception_param : Warning<"unused exception parameter %0">,
+ InGroup<UnusedExceptionParameter>, DefaultIgnore;
+def warn_decl_in_param_list : Warning<
+ "declaration of %0 will not be visible outside of this function">,
+ InGroup<Visibility>;
+def warn_redefinition_in_param_list : Warning<
+ "redefinition of %0 will not be visible outside of this function">,
+ InGroup<Visibility>;
+def warn_empty_parens_are_function_decl : Warning<
+ "empty parentheses interpreted as a function declaration">,
+ InGroup<VexingParse>;
+def note_empty_parens_function_call : Note<
+ "change this ',' to a ';' to call %0">;
+def note_empty_parens_default_ctor : Note<
+ "remove parentheses to declare a variable">;
+def note_empty_parens_zero_initialize : Note<
+ "replace parentheses with an initializer to declare a variable">;
+def warn_unused_function : Warning<"unused function %0">,
+ InGroup<UnusedFunction>, DefaultIgnore;
+def warn_unused_member_function : Warning<"unused member function %0">,
+ InGroup<UnusedMemberFunction>, DefaultIgnore;
+def warn_used_but_marked_unused: Warning<"%0 was marked unused but was used">,
+ InGroup<UsedButMarkedUnused>, DefaultIgnore;
+def warn_unneeded_internal_decl : Warning<
+ "%select{function|variable}0 %1 is not needed and will not be emitted">,
+ InGroup<UnneededInternalDecl>, DefaultIgnore;
+def warn_unneeded_member_function : Warning<
+ "member function %0 is not needed and will not be emitted">,
+ InGroup<UnneededMemberFunction>, DefaultIgnore;
+
+def warn_parameter_size: Warning<
+ "%0 is a large (%1 bytes) pass-by-value argument; "
+ "pass it by reference instead ?">, InGroup<LargeByValueCopy>;
+def warn_return_value_size: Warning<
+ "return value of %0 is a large (%1 bytes) pass-by-value object; "
+ "pass it by reference instead ?">, InGroup<LargeByValueCopy>;
+def warn_return_value_udt: Warning<
+ "%0 has C-linkage specified, but returns user-defined type %1 which is "
+ "incompatible with C">, InGroup<ReturnTypeCLinkage>;
+def warn_implicit_function_decl : Warning<
+ "implicit declaration of function %0">,
+ InGroup<ImplicitFunctionDeclare>, DefaultIgnore;
+def ext_implicit_function_decl : ExtWarn<
+ "implicit declaration of function %0 is invalid in C99">,
+ InGroup<ImplicitFunctionDeclare>;
+def note_function_suggestion : Note<"did you mean %0?">;
+
+def err_ellipsis_first_arg : Error<
+ "ISO C requires a named argument before '...'">;
+def err_declarator_need_ident : Error<"declarator requires an identifier">;
+def err_bad_language : Error<"unknown linkage language">;
+def warn_use_out_of_scope_declaration : Warning<
+ "use of out-of-scope declaration of %0">;
+def err_inline_non_function : Error<
+ "'inline' can only appear on functions">;
+def warn_qual_return_type : Warning<
+ "'%0' type qualifier%s1 on return type %plural{1:has|:have}1 no effect">,
+ InGroup<IgnoredQualifiers>, DefaultIgnore;
+
+def warn_decl_shadow :
+ Warning<"declaration shadows a %select{"
+ "local variable|"
+ "variable in %2|"
+ "static data member of %2|"
+ "field of %2}1">,
+ InGroup<Shadow>, DefaultIgnore;
+
+// C++ using declarations
+def err_using_requires_qualname : Error<
+ "using declaration requires a qualified name">;
+def err_using_typename_non_type : Error<
+ "'typename' keyword used on a non-type">;
+def err_using_dependent_value_is_type : Error<
+ "dependent using declaration resolved to type without 'typename'">;
+def err_using_decl_nested_name_specifier_is_not_class : Error<
+ "using declaration in class refers into '%0', which is not a class">;
+def err_using_decl_nested_name_specifier_is_current_class : Error<
+ "using declaration refers to its own class">;
+def err_using_decl_nested_name_specifier_is_not_base_class : Error<
+ "using declaration refers into '%0', which is not a base class of %1">;
+def err_using_decl_constructor_not_in_direct_base : Error<
+ "%0 is not a direct base of %1, can not inherit constructors">;
+def err_using_decl_constructor_conflict : Error<
+ "can not inherit constructor, already inherited constructor with "
+ "the same signature">;
+def note_using_decl_constructor_conflict_current_ctor : Note<
+ "conflicting constructor">;
+def note_using_decl_constructor_conflict_previous_ctor : Note<
+ "previous constructor">;
+def note_using_decl_constructor_conflict_previous_using : Note<
+ "previously inherited here">;
+def err_using_decl_can_not_refer_to_class_member : Error<
+ "using declaration can not refer to class member">;
+def err_using_decl_can_not_refer_to_namespace : Error<
+ "using declaration can not refer to namespace">;
+def err_using_decl_constructor : Error<
+ "using declaration can not refer to a constructor">;
+def warn_cxx98_compat_using_decl_constructor : Warning<
+ "inherited constructors are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_using_decl_destructor : Error<
+ "using declaration can not refer to a destructor">;
+def err_using_decl_template_id : Error<
+ "using declaration can not refer to a template specialization">;
+def note_using_decl_target : Note<"target of using declaration">;
+def note_using_decl_conflict : Note<"conflicting declaration">;
+def err_using_decl_redeclaration : Error<"redeclaration of using decl">;
+def err_using_decl_conflict : Error<
+ "target of using declaration conflicts with declaration already in scope">;
+def err_using_decl_conflict_reverse : Error<
+ "declaration conflicts with target of using declaration already in scope">;
+def note_using_decl : Note<"%select{|previous }0using declaration">;
+
+def warn_access_decl_deprecated : Warning<
+ "access declarations are deprecated; use using declarations instead">,
+ InGroup<Deprecated>;
+
+def warn_global_constructor : Warning<
+ "declaration requires a global constructor">,
+ InGroup<GlobalConstructors>, DefaultIgnore;
+def warn_global_destructor : Warning<
+ "declaration requires a global destructor">,
+ InGroup<GlobalConstructors>, DefaultIgnore;
+def warn_exit_time_destructor : Warning<
+ "declaration requires an exit-time destructor">,
+ InGroup<ExitTimeDestructors>, DefaultIgnore;
+
+def err_invalid_thread : Error<
+ "'__thread' is only allowed on variable declarations">;
+def err_thread_non_global : Error<
+ "'__thread' variables must have global storage">;
+def err_thread_unsupported : Error<
+ "thread-local storage is unsupported for the current target">;
+
+def warn_maybe_falloff_nonvoid_function : Warning<
+ "control may reach end of non-void function">,
+ InGroup<ReturnType>;
+def warn_falloff_nonvoid_function : Warning<
+ "control reaches end of non-void function">,
+ InGroup<ReturnType>;
+def err_maybe_falloff_nonvoid_block : Error<
+ "control may reach end of non-void block">;
+def err_falloff_nonvoid_block : Error<
+ "control reaches end of non-void block">;
+def warn_suggest_noreturn_function : Warning<
+ "%select{function|method}0 %1 could be declared with attribute 'noreturn'">,
+ InGroup<DiagGroup<"missing-noreturn">>, DefaultIgnore;
+def warn_suggest_noreturn_block : Warning<
+ "block could be declared with attribute 'noreturn'">,
+ InGroup<DiagGroup<"missing-noreturn">>, DefaultIgnore;
+def warn_unreachable : Warning<"will never be executed">,
+ InGroup<DiagGroup<"unreachable-code">>, DefaultIgnore;
+
+/// Built-in functions.
+def ext_implicit_lib_function_decl : ExtWarn<
+ "implicitly declaring library function '%0' with type %1">;
+def note_please_include_header : Note<
+ "please include the header <%0> or explicitly provide a "
+ "declaration for '%1'">;
+def note_previous_builtin_declaration : Note<"%0 is a builtin with type %1">;
+def warn_implicit_decl_requires_stdio : Warning<
+ "declaration of built-in function '%0' requires inclusion of the header "
+ "<stdio.h>">,
+ InGroup<BuiltinRequiresHeader>;
+def warn_implicit_decl_requires_setjmp : Warning<
+ "declaration of built-in function '%0' requires inclusion of the header "
+ "<setjmp.h>">,
+ InGroup<BuiltinRequiresHeader>;
+def warn_implicit_decl_requires_ucontext : Warning<
+ "declaration of built-in function '%0' requires inclusion of the header "
+ "<ucontext.h>">,
+ InGroup<BuiltinRequiresHeader>;
+def warn_redecl_library_builtin : Warning<
+ "incompatible redeclaration of library function %0">;
+def err_builtin_definition : Error<"definition of builtin function %0">;
+def err_types_compatible_p_in_cplusplus : Error<
+ "__builtin_types_compatible_p is not valid in C++">;
+def warn_builtin_unknown : Warning<"use of unknown builtin %0">,
+ InGroup<ImplicitFunctionDeclare>, DefaultError;
+def warn_dyn_class_memaccess : Warning<
+ "%select{destination for|source of|first operand of|second operand of}0 this "
+ "%1 call is a pointer to dynamic class %2; vtable pointer will be "
+ "%select{overwritten|copied|moved|compared}3">,
+ InGroup<DiagGroup<"dynamic-class-memaccess">>;
+def note_bad_memaccess_silence : Note<
+ "explicitly cast the pointer to silence this warning">;
+def warn_sizeof_pointer_expr_memaccess : Warning<
+ "argument to 'sizeof' in %0 call is the same expression as the "
+ "%select{destination|source}1; did you mean to "
+ "%select{dereference it|remove the addressof|provide an explicit length}2?">,
+ InGroup<DiagGroup<"sizeof-pointer-memaccess">>;
+def warn_sizeof_pointer_type_memaccess : Warning<
+ "argument to 'sizeof' in %0 call is the same pointer type %1 as the "
+ "%select{destination|source}2; expected %3 or an explicit length">,
+ InGroup<DiagGroup<"sizeof-pointer-memaccess">>;
+def warn_strlcpycat_wrong_size : Warning<
+ "size argument in %0 call appears to be size of the source; expected the size of "
+ "the destination">,
+ InGroup<DiagGroup<"strlcpy-strlcat-size">>;
+def note_strlcpycat_wrong_size : Note<
+ "change size argument to be the size of the destination">;
+
+def warn_strncat_large_size : Warning<
+ "the value of the size argument in 'strncat' is too large, might lead to a "
+ "buffer overflow">, InGroup<StrncatSize>, DefaultIgnore;
+def warn_strncat_src_size : Warning<"size argument in 'strncat' call appears "
+ "to be size of the source">, InGroup<StrncatSize>, DefaultIgnore;
+def note_strncat_wrong_size : Note<
+ "change the argument to be the free space in the destination buffer minus "
+ "the terminating null byte">;
+
+/// main()
+// static/inline main() are not errors in C, just in C++.
+def warn_static_main : Warning<"'main' should not be declared static">,
+ InGroup<Main>;
+def err_static_main : Error<"'main' is not allowed to be declared static">;
+def err_inline_main : Error<"'main' is not allowed to be declared inline">;
+def err_constexpr_main : Error<
+ "'main' is not allowed to be declared constexpr">;
+def err_main_template_decl : Error<"'main' cannot be a template">;
+def err_main_returns_nonint : Error<"'main' must return 'int'">;
+def ext_main_returns_nonint : ExtWarn<"return type of 'main' is not 'int'">,
+ InGroup<MainReturnType>;
+def err_main_surplus_args : Error<"too many parameters (%0) for 'main': "
+ "must be 0, 2, or 3">;
+def warn_main_one_arg : Warning<"only one parameter on 'main' declaration">,
+ InGroup<Main>;
+def err_main_arg_wrong : Error<"%select{first|second|third|fourth}0 "
+ "parameter of 'main' (%select{argument count|argument array|environment|"
+ "platform-specific data}0) must be of type %1">;
+
+/// parser diagnostics
+def ext_typedef_without_a_name : ExtWarn<"typedef requires a name">,
+ InGroup<MissingDeclarations>;
+def err_typedef_not_identifier : Error<"typedef name must be an identifier">;
+def err_statically_allocated_object : Error<
+ "interface type cannot be statically allocated">;
+def err_object_cannot_be_passed_returned_by_value : Error<
+ "interface type %1 cannot be %select{returned|passed}0 by value"
+ "; did you forget * in %1">;
+def err_parameters_retval_cannot_have_fp16_type : Error<
+ "%select{parameters|function return value}0 cannot have __fp16 type; did you forget * ?">;
+def warn_enum_value_overflow : Warning<"overflow in enumeration value">;
+def warn_pragma_options_align_unsupported_option : Warning<
+ "unsupported alignment option in '#pragma options align'">;
+def warn_pragma_options_align_reset_failed : Warning<
+ "#pragma options align=reset failed: %0">;
+def err_pragma_options_align_mac68k_target_unsupported : Error<
+ "mac68k alignment pragma is not supported on this target">;
+def warn_pragma_pack_invalid_alignment : Warning<
+ "expected #pragma pack parameter to be '1', '2', '4', '8', or '16'">;
+// Follow the MSVC implementation.
+def warn_pragma_pack_show : Warning<"value of #pragma pack(show) == %0">;
+def warn_pragma_pack_pop_identifer_and_alignment : Warning<
+ "specifying both a name and alignment to 'pop' is undefined">;
+def warn_pragma_pack_pop_failed : Warning<"#pragma pack(pop, ...) failed: %0">;
+
+def warn_pragma_unused_undeclared_var : Warning<
+ "undeclared variable %0 used as an argument for '#pragma unused'">;
+def warn_pragma_unused_expected_var_arg : Warning<
+ "only variables can be arguments to '#pragma unused'">;
+def err_pragma_push_visibility_mismatch : Error<
+ "#pragma visibility push with no matching #pragma visibility pop">;
+def note_surrounding_namespace_ends_here : Note<
+ "surrounding namespace with visibility attribute ends here">;
+def err_pragma_pop_visibility_mismatch : Error<
+ "#pragma visibility pop with no matching #pragma visibility push">;
+def note_surrounding_namespace_starts_here : Note<
+ "surrounding namespace with visibility attribute starts here">;
+
+/// Objective-C parser diagnostics
+def err_duplicate_class_def : Error<
+ "duplicate interface definition for class %0">;
+def err_undef_superclass : Error<
+ "cannot find interface declaration for %0, superclass of %1">;
+def err_forward_superclass : Error<
+ "attempting to use the forward class %0 as superclass of %1">;
+def err_no_nsconstant_string_class : Error<
+ "cannot find interface declaration for %0">;
+def err_recursive_superclass : Error<
+ "trying to recursively use %0 as superclass of %1">;
+def warn_previous_alias_decl : Warning<"previously declared alias is ignored">;
+def err_conflicting_aliasing_type : Error<"conflicting types for alias %0">;
+def warn_undef_interface : Warning<"cannot find interface declaration for %0">;
+def warn_duplicate_protocol_def : Warning<"duplicate protocol definition of %0 is ignored">;
+def err_protocol_has_circular_dependency : Error<
+ "protocol has circular dependency">;
+def err_undeclared_protocol : Error<"cannot find protocol declaration for %0">;
+def warn_undef_protocolref : Warning<"cannot find protocol definition for %0">;
+def warn_readonly_property : Warning<
+ "attribute 'readonly' of property %0 restricts attribute "
+ "'readwrite' of property inherited from %1">;
+
+def warn_property_attribute : Warning<
+ "property %0 '%1' attribute does not match the property inherited from %2">;
+def warn_property_types_are_incompatible : Warning<
+ "property type %0 is incompatible with type %1 inherited from %2">;
+def err_undef_interface : Error<"cannot find interface declaration for %0">;
+def err_category_forward_interface : Error<
+ "cannot define %select{category|class extension}0 for undefined class %1">;
+def err_class_extension_after_impl : Error<
+ "cannot declare class extension for %0 after class implementation">;
+def note_implementation_declared : Note<
+ "class implementation is declared here">;
+def note_class_declared : Note<
+ "class is declared here">;
+def note_receiver_is_id : Note<
+ "receiver is treated with 'id' type for purpose of method lookup">;
+def note_suppressed_class_declare : Note<
+ "class with specified objc_requires_property_definitions attribute is declared here">;
+def err_objc_root_class_subclass : Error<
+ "objc_root_class attribute may only be specified on a root class declaration">;
+def warn_objc_root_class_missing : Warning<
+ "class %0 defined without specifying a base class">,
+ InGroup<ObjCRootClass>, DefaultIgnore;
+def note_objc_needs_superclass : Note<
+ "add a super class to fix this problem">;
+def warn_dup_category_def : Warning<
+ "duplicate definition of category %1 on interface %0">;
+def err_conflicting_super_class : Error<"conflicting super class name %0">;
+def err_dup_implementation_class : Error<"reimplementation of class %0">;
+def err_dup_implementation_category : Error<
+ "reimplementation of category %1 for class %0">;
+def err_conflicting_ivar_type : Error<
+ "instance variable %0 has conflicting type: %1 vs %2">;
+def err_duplicate_ivar_declaration : Error<
+ "instance variable is already declared">;
+def warn_on_superclass_use : Warning<
+ "class implementation may not have super class">;
+def err_conflicting_ivar_bitwidth : Error<
+ "instance variable %0 has conflicting bit-field width">;
+def err_conflicting_ivar_name : Error<
+ "conflicting instance variable names: %0 vs %1">;
+def err_inconsistant_ivar_count : Error<
+ "inconsistent number of instance variables specified">;
+def warn_incomplete_impl : Warning<"incomplete implementation">,
+ InGroup<DiagGroup<"incomplete-implementation">>;
+def note_undef_method_impl : Note<"method definition for %0 not found">;
+def note_required_for_protocol_at :
+ Note<"required for direct or indirect protocol %0">;
+
+def warn_conflicting_overriding_ret_types : Warning<
+ "conflicting return type in "
+ "declaration of %0: %1 vs %2">,
+ InGroup<OverridingMethodMismatch>, DefaultIgnore;
+
+def warn_conflicting_ret_types : Warning<
+ "conflicting return type in "
+ "implementation of %0: %1 vs %2">;
+
+def warn_conflicting_overriding_ret_type_modifiers : Warning<
+ "conflicting distributed object modifiers on return type "
+ "in declaration of %0">,
+ InGroup<OverridingMethodMismatch>, DefaultIgnore;
+
+def warn_conflicting_ret_type_modifiers : Warning<
+ "conflicting distributed object modifiers on return type "
+ "in implementation of %0">,
+ InGroup<DiagGroup<"distributed-object-modifiers">>;
+
+def warn_non_covariant_overriding_ret_types : Warning<
+ "conflicting return type in "
+ "declaration of %0: %1 vs %2">,
+ InGroup<OverridingMethodMismatch>, DefaultIgnore;
+
+def warn_non_covariant_ret_types : Warning<
+ "conflicting return type in "
+ "implementation of %0: %1 vs %2">,
+ InGroup<DiagGroup<"method-signatures">>, DefaultIgnore;
+
+def warn_conflicting_overriding_param_types : Warning<
+ "conflicting parameter types in "
+ "declaration of %0: %1 vs %2">,
+ InGroup<OverridingMethodMismatch>, DefaultIgnore;
+
+def warn_conflicting_param_types : Warning<
+ "conflicting parameter types in "
+ "implementation of %0: %1 vs %2">;
+def warn_conflicting_param_modifiers : Warning<
+ "conflicting distributed object modifiers on parameter type "
+ "in implementation of %0">,
+ InGroup<DiagGroup<"distributed-object-modifiers">>;
+
+def warn_conflicting_overriding_param_modifiers : Warning<
+ "conflicting distributed object modifiers on parameter type "
+ "in declaration of %0">,
+ InGroup<OverridingMethodMismatch>, DefaultIgnore;
+
+def warn_non_contravariant_overriding_param_types : Warning<
+ "conflicting parameter types in "
+ "declaration of %0: %1 vs %2">,
+ InGroup<OverridingMethodMismatch>, DefaultIgnore;
+
+def warn_non_contravariant_param_types : Warning<
+ "conflicting parameter types in "
+ "implementation of %0: %1 vs %2">,
+ InGroup<DiagGroup<"method-signatures">>, DefaultIgnore;
+
+def warn_conflicting_overriding_variadic :Warning<
+ "conflicting variadic declaration of method and its "
+ "implementation">,
+ InGroup<OverridingMethodMismatch>, DefaultIgnore;
+
+def warn_conflicting_variadic :Warning<
+ "conflicting variadic declaration of method and its "
+ "implementation">;
+
+def warn_category_method_impl_match:Warning<
+ "category is implementing a method which will also be implemented"
+ " by its primary class">, InGroup<ObjCProtocolMethodImpl>;
+
+def warn_implements_nscopying : Warning<
+"default assign attribute on property %0 which implements "
+"NSCopying protocol is not appropriate with -fobjc-gc[-only]">;
+
+def warn_multiple_method_decl : Warning<"multiple methods named %0 found">;
+def warn_strict_multiple_method_decl : Warning<
+ "multiple methods named %0 found">, InGroup<StrictSelector>, DefaultIgnore;
+def warn_accessor_property_type_mismatch : Warning<
+ "type of property %0 does not match type of accessor %1">;
+def not_conv_function_declared_at : Note<"type conversion function declared here">;
+def note_method_declared_at : Note<"method %0 declared here">;
+def err_setter_type_void : Error<"type of setter must be void">;
+def err_duplicate_method_decl : Error<"duplicate declaration of method %0">;
+def warn_duplicate_method_decl :
+ Warning<"multiple declarations of method %0 found and ignored">,
+ InGroup<MethodDuplicate>, DefaultIgnore;
+def err_objc_var_decl_inclass :
+ Error<"cannot declare variable inside @interface or @protocol">;
+def error_missing_method_context : Error<
+ "missing context for method declaration">;
+def err_objc_property_attr_mutually_exclusive : Error<
+ "property attributes '%0' and '%1' are mutually exclusive">;
+def err_objc_property_requires_object : Error<
+ "property with '%0' attribute must be of object type">;
+def warn_objc_property_no_assignment_attribute : Warning<
+ "no 'assign', 'retain', or 'copy' attribute is specified - "
+ "'assign' is assumed">;
+def warn_objc_isa_use : Warning<
+ "direct access to objective-c's isa is deprecated "
+ "in favor of object_setClass() and object_getClass()">,
+ InGroup<DiagGroup<"deprecated-objc-isa-usage">>;
+def warn_objc_property_default_assign_on_object : Warning<
+ "default property attribute 'assign' not appropriate for non-gc object">;
+def warn_property_attr_mismatch : Warning<
+ "property attribute in continuation class does not match the primary class">;
+def warn_objc_property_copy_missing_on_block : Warning<
+ "'copy' attribute must be specified for the block property "
+ "when -fobjc-gc-only is specified">;
+def warn_objc_property_retain_of_block : Warning<
+ "retain'ed block property does not copy the block "
+ "- use copy attribute instead">, InGroup<ObjCRetainBlockProperty>;
+def warn_objc_readonly_property_has_setter : Warning<
+ "setter cannot be specified for a readonly property">,
+ InGroup<ObjCReadonlyPropertyHasSetter>;
+def warn_atomic_property_rule : Warning<
+ "writable atomic property %0 cannot pair a synthesized %select{getter|setter}1 "
+ "with a user defined %select{getter|setter}2">,
+ InGroup<DiagGroup<"atomic-property-with-user-defined-accessor">>;
+def note_atomic_property_fixup_suggest : Note<"setter and getter must both be "
+ "synthesized, or both be user defined,or the property must be nonatomic">;
+def err_atomic_property_nontrivial_assign_op : Error<
+ "atomic property of reference type %0 cannot have non-trivial assignment"
+ " operator">;
+def warn_owning_getter_rule : Warning<
+ "property's synthesized getter follows Cocoa naming"
+ " convention for returning 'owned' objects">,
+ InGroup<DiagGroup<"objc-property-matches-cocoa-ownership-rule">>;
+def warn_auto_synthesizing_protocol_property :Warning<
+ "auto property synthesis will not synthesize property"
+ " declared in a protocol">,
+ InGroup<DiagGroup<"objc-protocol-property-synthesis">>;
+def warn_property_getter_owning_mismatch : Warning<
+ "property declared as returning non-retained objects"
+ "; getter returning retained objects">;
+def err_ownin_getter_rule : Error<
+ "property's synthesized getter follows Cocoa naming"
+ " convention for returning 'owned' objects">;
+def warn_default_atomic_custom_getter_setter : Warning<
+ "atomic by default property %0 has a user defined %select{getter|setter}1 "
+ "(property should be marked 'atomic' if this is intended)">,
+ InGroup<CustomAtomic>, DefaultIgnore;
+def err_use_continuation_class : Error<
+ "illegal redeclaration of property in continuation class %0"
+ " (attribute must be 'readwrite', while its primary must be 'readonly')">;
+def err_type_mismatch_continuation_class : Error<
+ "type of property %0 in continuation class does not match "
+ "property type in primary class">;
+def err_use_continuation_class_redeclaration_readwrite : Error<
+ "illegal redeclaration of 'readwrite' property in continuation class %0"
+ " (perhaps you intended this to be a 'readwrite' redeclaration of a "
+ "'readonly' public property?)">;
+def err_continuation_class : Error<"continuation class has no primary class">;
+def err_property_type : Error<"property cannot have array or function type %0">;
+def error_missing_property_context : Error<
+ "missing context for property implementation declaration">;
+def error_bad_property_decl : Error<
+ "property implementation must have its declaration in interface %0">;
+def error_category_property : Error<
+ "property declared in category %0 cannot be implemented in "
+ "class implementation">;
+def note_property_declare : Note<
+ "property declared here">;
+def error_synthesize_category_decl : Error<
+ "@synthesize not allowed in a category's implementation">;
+def error_reference_property : Error<
+ "property of reference type is not supported">;
+def error_missing_property_interface : Error<
+ "property implementation in a category with no category declaration">;
+def error_bad_category_property_decl : Error<
+ "property implementation must have its declaration in the category %0">;
+def error_bad_property_context : Error<
+ "property implementation must be in a class or category implementation">;
+def error_missing_property_ivar_decl : Error<
+ "synthesized property %0 must either be named the same as a compatible"
+ " ivar or must explicitly name an ivar">;
+def error_synthesize_weak_non_arc_or_gc : Error<
+ "@synthesize of 'weak' property is only allowed in ARC or GC mode">;
+def err_arc_perform_selector_retains : Error<
+ "performSelector names a selector which retains the object">;
+def warn_arc_perform_selector_leaks : Warning<
+ "performSelector may cause a leak because its selector is unknown">,
+ InGroup<DiagGroup<"arc-performSelector-leaks">>;
+def err_gc_weak_property_strong_type : Error<
+ "weak attribute declared on a __strong type property in GC mode">;
+def warn_receiver_is_weak : Warning <
+ "weak receiver may be unpredictably null in ARC mode">,
+ InGroup<DiagGroup<"receiver-is-weak">>, DefaultIgnore;
+
+def error_synthesized_ivar_yet_not_supported : Error<
+ "instance variable synthesis not yet supported"
+ " (need to declare %0 explicitly)">;
+
+def error_property_ivar_type : Error<
+ "type of property %0 (%1) does not match type of ivar %2 (%3)">;
+def error_ivar_in_superclass_use : Error<
+ "property %0 attempting to use ivar %1 declared in super class %2">;
+def error_weak_property : Error<
+ "existing ivar %1 for __weak property %0 must be __weak">;
+def error_strong_property : Error<
+ "existing ivar %1 for strong property %0 may not be __weak">;
+def error_dynamic_property_ivar_decl : Error<
+ "dynamic property can not have ivar specification">;
+def error_duplicate_ivar_use : Error<
+ "synthesized properties %0 and %1 both claim ivar %2">;
+def error_property_implemented : Error<"property %0 is already implemented">;
+def warn_objc_property_attr_mutually_exclusive : Warning<
+ "property attributes '%0' and '%1' are mutually exclusive">,
+ InGroup<ReadOnlySetterAttrs>, DefaultIgnore;
+def warn_objc_missing_super_dealloc : Warning<
+ "method possibly missing a [super dealloc] call">,
+ InGroup<ObjCMissingSuperCalls>;
+def warn_objc_missing_super_finalize : Warning<
+ "method possibly missing a [super finalize] call">,
+ InGroup<ObjCMissingSuperCalls>;
+def warn_undeclared_selector : Warning<
+ "undeclared selector %0">, InGroup<UndeclaredSelector>, DefaultIgnore;
+def warn_implicit_atomic_property : Warning<
+ "property is assumed atomic by default">, InGroup<ImplicitAtomic>, DefaultIgnore;
+def warn_auto_implicit_atomic_property : Warning<
+ "property is assumed atomic when auto-synthesizing the property">,
+ InGroup<ImplicitAtomic>, DefaultIgnore;
+def warn_unimplemented_selector: Warning<
+ "unimplemented selector %0">, InGroup<Selector>, DefaultIgnore;
+def warn_unimplemented_protocol_method : Warning<
+ "method %0 in protocol not implemented">, InGroup<Protocol>;
+
+// C++ declarations
+def err_static_assert_expression_is_not_constant : Error<
+ "static_assert expression is not an integral constant expression">;
+def err_static_assert_failed : Error<"static_assert failed %0">;
+
+def warn_inline_namespace_reopened_noninline : Warning<
+ "inline namespace cannot be re-opened as a non-inline namespace">;
+def err_inline_namespace_mismatch : Error<
+ "%select{|non-}0inline namespace "
+ "cannot be reopened as %select{non-|}0inline">;
+
+def err_unexpected_friend : Error<
+ "friends can only be classes or functions">;
+def ext_enum_friend : ExtWarn<
+ "enumeration type %0 cannot be a friend">;
+def warn_cxx98_compat_enum_friend : Warning<
+ "befriending enumeration type %0 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_nonclass_type_friend : ExtWarn<
+ "non-class friend type %0 is a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_nonclass_type_friend : Warning<
+ "non-class friend type %0 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_friend_is_member : Error<
+ "friends cannot be members of the declaring class">;
+def warn_cxx98_compat_friend_is_member : Warning<
+ "friend declaration naming a member of the declaring class is incompatible "
+ "with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def ext_unelaborated_friend_type : ExtWarn<
+ "unelaborated friend declaration is a C++11 extension; specify "
+ "'%select{struct|union|class|enum}0' to befriend %1">, InGroup<CXX11>;
+def warn_cxx98_compat_unelaborated_friend_type : Warning<
+ "befriending %1 without '%select{struct|union|class|enum}0' keyword is "
+ "incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def err_qualified_friend_not_found : Error<
+ "no function named %0 with type %1 was found in the specified scope">;
+def err_introducing_special_friend : Error<
+ "must use a qualified name when declaring a %select{constructor|"
+ "destructor|conversion operator}0 as a friend">;
+def err_tagless_friend_type_template : Error<
+ "friend type templates must use an elaborated type">;
+def err_no_matching_local_friend : Error<
+ "no matching function found in local scope">;
+def err_no_matching_local_friend_suggest : Error<
+ "no matching function %0 found in local scope; did you mean %2">;
+def err_partial_specialization_friend : Error<
+ "partial specialization cannot be declared as a friend">;
+def err_qualified_friend_def : Error<
+ "friend function definition cannot be qualified with '%0'">;
+def err_friend_def_in_local_class : Error<
+ "friend function cannot be defined in a local class">;
+
+def err_abstract_type_in_decl : Error<
+ "%select{return|parameter|variable|field}0 type %1 is an abstract class">;
+def err_allocation_of_abstract_type : Error<
+ "allocating an object of abstract class type %0">;
+def err_throw_abstract_type : Error<
+ "cannot throw an object of abstract type %0">;
+def err_array_of_abstract_type : Error<"array of abstract class type %0">;
+
+def err_multiple_final_overriders : Error<
+ "virtual function %q0 has more than one final overrider in %1">;
+def note_final_overrider : Note<"final overrider of %q0 in %1">;
+
+def err_type_defined_in_type_specifier : Error<
+ "%0 can not be defined in a type specifier">;
+def err_type_defined_in_result_type : Error<
+ "%0 can not be defined in the result type of a function">;
+def err_type_defined_in_param_type : Error<
+ "%0 can not be defined in a parameter type">;
+def err_type_defined_in_alias_template : Error<
+ "%0 can not be defined in a type alias template">;
+
+def note_pure_virtual_function : Note<
+ "unimplemented pure virtual method %0 in %1">;
+
+def err_deleted_decl_not_first : Error<
+ "deleted definition must be first declaration">;
+
+def warn_weak_vtable : Warning<
+ "%0 has no out-of-line virtual method definitions; its vtable will be "
+ "emitted in every translation unit">,
+ InGroup<DiagGroup<"weak-vtables">>, DefaultIgnore;
+def warn_weak_template_vtable : Warning<
+ "explicit template instantiation %0 will emit a vtable in every "
+ "translation unit">,
+ InGroup<DiagGroup<"weak-template-vtables">>, DefaultIgnore;
+
+def ext_using_undefined_std : ExtWarn<
+ "using directive refers to implicitly-defined namespace 'std'">;
+
+// C++ exception specifications
+def err_exception_spec_in_typedef : Error<
+ "exception specifications are not allowed in %select{typedefs|type aliases}0">;
+def err_distant_exception_spec : Error<
+ "exception specifications are not allowed beyond a single level "
+ "of indirection">;
+def err_incomplete_in_exception_spec : Error<
+ "%select{|pointer to |reference to }0incomplete type %1 is not allowed "
+ "in exception specification">;
+def err_mismatched_exception_spec : Error<
+ "exception specification in declaration does not match previous declaration">;
+def warn_mismatched_exception_spec : ExtWarn<
+ "exception specification in declaration does not match previous declaration">;
+def err_override_exception_spec : Error<
+ "exception specification of overriding function is more lax than "
+ "base version">;
+def warn_override_exception_spec : ExtWarn<
+ "exception specification of overriding function is more lax than "
+ "base version">, InGroup<Microsoft>;
+def err_incompatible_exception_specs : Error<
+ "target exception specification is not superset of source">;
+def err_deep_exception_specs_differ : Error<
+ "exception specifications of %select{return|argument}0 types differ">;
+def warn_missing_exception_specification : Warning<
+ "%0 is missing exception specification '%1'">;
+def err_noexcept_needs_constant_expression : Error<
+ "argument to noexcept specifier must be a constant expression">;
+def err_exception_spec_unknown : Error<
+ "exception specification is not available until end of class definition">;
+
+// C++ access checking
+def err_class_redeclared_with_different_access : Error<
+ "%0 redeclared with '%1' access">;
+def err_access : Error<
+ "%1 is a %select{private|protected}0 member of %3">, AccessControl;
+def ext_ms_using_declaration_inaccessible : ExtWarn<
+ "using declaration referring to inaccessible member '%0' (which refers "
+ "to accessible member '%1') is a Microsoft compatibility extension">,
+ AccessControl, InGroup<Microsoft>;
+def err_access_ctor : Error<
+ "calling a %select{private|protected}0 constructor of class %2">,
+ AccessControl;
+def ext_rvalue_to_reference_access_ctor : ExtWarn<
+ "C++98 requires an accessible copy constructor for class %2 when binding "
+ "a reference to a temporary; was %select{private|protected}0">,
+ AccessControl, InGroup<BindToTemporaryCopy>;
+def err_access_base_ctor : Error<
+ // The ERRORs represent other special members that aren't constructors, in
+ // hopes that someone will bother noticing and reporting if they appear
+ "%select{base class|inherited virtual base class}0 %1 has %select{private|"
+ "protected}3 %select{default |copy |move |*ERROR* |*ERROR* "
+ "|*ERROR*|}2constructor">, AccessControl;
+def err_access_field_ctor : Error<
+ // The ERRORs represent other special members that aren't constructors, in
+ // hopes that someone will bother noticing and reporting if they appear
+ "field of type %0 has %select{private|protected}2 "
+ "%select{default |copy |move |*ERROR* |*ERROR* |*ERROR* |}1constructor">,
+ AccessControl;
+
+def err_access_dtor : Error<
+ "calling a %select{private|protected}1 destructor of class %0">,
+ AccessControl;
+def err_access_dtor_base :
+ Error<"base class %0 has %select{private|protected}1 destructor">,
+ AccessControl;
+def err_access_dtor_vbase :
+ Error<"inherited virtual base class %0 has "
+ "%select{private|protected}1 destructor">,
+ AccessControl;
+def err_access_dtor_temp :
+ Error<"temporary of type %0 has %select{private|protected}1 destructor">,
+ AccessControl;
+def err_access_dtor_exception :
+ Error<"exception object of type %0 has %select{private|protected}1 "
+ "destructor">, AccessControl;
+def err_access_dtor_field :
+ Error<"field of type %1 has %select{private|protected}2 destructor">,
+ AccessControl;
+def err_access_dtor_var :
+ Error<"variable of type %1 has %select{private|protected}2 destructor">,
+ AccessControl;
+def err_access_dtor_ivar :
+ Error<"instance variable of type %0 has %select{private|protected}1 "
+ "destructor">,
+ AccessControl;
+def note_previous_access_declaration : Note<
+ "previously declared '%1' here">;
+def note_access_natural : Note<
+ "%select{|implicitly }1declared %select{private|protected}0 here">;
+def note_access_constrained_by_path : Note<
+ "constrained by %select{|implicitly }1%select{private|protected}0"
+ " inheritance here">;
+def note_access_protected_restricted_noobject : Note<
+ "must name member using the type of the current context %0">;
+def note_access_protected_restricted_ctordtor : Note<
+ "protected %select{constructor|destructor}0 can only be used to "
+ "%select{construct|destroy}0 a base class subobject">;
+def note_access_protected_restricted_object : Note<
+ "can only access this member on an object of type %0">;
+def warn_cxx98_compat_sfinae_access_control : Warning<
+ "substitution failure due to access control is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore, NoSFINAE;
+
+// C++ name lookup
+def err_incomplete_nested_name_spec : Error<
+ "incomplete type %0 named in nested name specifier">;
+def err_dependent_nested_name_spec : Error<
+ "nested name specifier for a declaration cannot depend on a template "
+ "parameter">;
+def err_nested_name_member_ref_lookup_ambiguous : Error<
+ "lookup of %0 in member access expression is ambiguous">;
+def ext_nested_name_member_ref_lookup_ambiguous : ExtWarn<
+ "lookup of %0 in member access expression is ambiguous; using member of %1">,
+ InGroup<AmbigMemberTemplate>;
+def note_ambig_member_ref_object_type : Note<
+ "lookup in the object type %0 refers here">;
+def note_ambig_member_ref_scope : Note<
+ "lookup from the current scope refers here">;
+def err_qualified_member_nonclass : Error<
+ "qualified member access refers to a member in %0">;
+def err_incomplete_member_access : Error<
+ "member access into incomplete type %0">;
+def err_incomplete_type : Error<
+ "incomplete type %0 where a complete type is required">;
+def warn_cxx98_compat_enum_nested_name_spec : Warning<
+ "enumeration type in nested name specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+// C++ class members
+def err_storageclass_invalid_for_member : Error<
+ "storage class specified for a member declaration">;
+def err_mutable_function : Error<"'mutable' cannot be applied to functions">;
+def err_mutable_reference : Error<"'mutable' cannot be applied to references">;
+def err_mutable_const : Error<"'mutable' and 'const' cannot be mixed">;
+def err_mutable_nonmember : Error<
+ "'mutable' can only be applied to member variables">;
+def err_virtual_non_function : Error<
+ "'virtual' can only appear on non-static member functions">;
+def err_virtual_out_of_class : Error<
+ "'virtual' can only be specified inside the class definition">;
+def err_virtual_member_function_template : Error<
+ "'virtual' can not be specified on member function templates">;
+def err_static_overrides_virtual : Error<
+ "'static' member function %0 overrides a virtual function in a base class">;
+def err_explicit_non_function : Error<
+ "'explicit' can only appear on non-static member functions">;
+def err_explicit_out_of_class : Error<
+ "'explicit' can only be specified inside the class definition">;
+def err_explicit_non_ctor_or_conv_function : Error<
+ "'explicit' can only be applied to a constructor or conversion function">;
+def err_static_not_bitfield : Error<"static member %0 cannot be a bit-field">;
+def err_static_out_of_line : Error<
+ "'static' can only be specified inside the class definition">;
+def err_typedef_not_bitfield : Error<"typedef member %0 cannot be a bit-field">;
+def err_not_integral_type_bitfield : Error<
+ "bit-field %0 has non-integral type %1">;
+def err_not_integral_type_anon_bitfield : Error<
+ "anonymous bit-field has non-integral type %0">;
+def err_member_function_initialization : Error<
+ "initializer on function does not look like a pure-specifier">;
+def err_non_virtual_pure : Error<
+ "%0 is not virtual and cannot be declared pure">;
+def warn_pure_function_definition : ExtWarn<
+ "function definition with pure-specifier is a Microsoft extension">,
+ InGroup<Microsoft>;
+def err_implicit_object_parameter_init : Error<
+ "cannot initialize object parameter of type %0 with an expression "
+ "of type %1">;
+def err_qualified_member_of_unrelated : Error<
+ "%q0 is not a member of class %1">;
+
+def warn_call_to_pure_virtual_member_function_from_ctor_dtor : Warning<
+ "call to pure virtual member function %0; overrides of %0 in subclasses are "
+ "not available in the %select{constructor|destructor}1 of %2">;
+
+def note_field_decl : Note<"member is declared here">;
+def note_ivar_decl : Note<"ivar is declared here">;
+def note_bitfield_decl : Note<"bit-field is declared here">;
+def note_previous_decl : Note<"%0 declared here">;
+def note_member_synthesized_at : Note<
+ "implicit default %select{constructor|copy constructor|move constructor|copy "
+ "assignment operator|move assignment operator|destructor}0 for %1 first "
+ "required here">;
+def err_missing_default_ctor : Error<
+ "%select{|implicit default }0constructor for %1 must explicitly initialize "
+ "the %select{base class|member}2 %3 which does not have a default "
+ "constructor">;
+def err_illegal_union_or_anon_struct_member : Error<
+ "%select{anonymous struct|union}0 member %1 has a non-trivial "
+ "%select{constructor|copy constructor|move constructor|copy assignment "
+ "operator|move assignment operator|destructor}2">;
+def warn_cxx98_compat_nontrivial_union_or_anon_struct_member : Warning<
+ "%select{anonymous struct|union}0 member %1 with a non-trivial "
+ "%select{constructor|copy constructor|move constructor|copy assignment "
+ "operator|move assignment operator|destructor}2 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def note_nontrivial_has_virtual : Note<
+ "because type %0 has a virtual %select{member function|base class}1">;
+def note_nontrivial_has_nontrivial : Note<
+ "because type %0 has a %select{member|base class}1 with a non-trivial "
+ "%select{constructor|copy constructor|move constructor|copy assignment "
+ "operator|move assignment operator|destructor}2">;
+def note_nontrivial_user_defined : Note<
+ "because type %0 has a user-declared %select{constructor|copy constructor|"
+ "move constructor|copy assignment operator|move assignment operator|"
+ "destructor}1">;
+def err_static_data_member_not_allowed_in_anon_struct : Error<
+ "static data member %0 not allowed in anonymous struct">;
+def ext_static_data_member_in_union : ExtWarn<
+ "static data member %0 in union is a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_static_data_member_in_union : Warning<
+ "static data member %0 in union is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_union_member_of_reference_type : Error<
+ "union member %0 has reference type %1">;
+def ext_anonymous_struct_union_qualified : Extension<
+ "anonymous %select{struct|union}0 cannot be '%select{const|volatile|"
+ "restrict}1'">;
+def err_different_return_type_for_overriding_virtual_function : Error<
+ "virtual function %0 has a different return type (%1) than the "
+ "function it overrides (which has return type %2)">;
+def note_overridden_virtual_function : Note<
+ "overridden virtual function is here">;
+
+def err_covariant_return_inaccessible_base : Error<
+ "invalid covariant return for virtual function: %1 is a "
+ "%select{private|protected}2 base class of %0">, AccessControl;
+def err_covariant_return_ambiguous_derived_to_base_conv : Error<
+ "return type of virtual function %3 is not covariant with the return type of "
+ "the function it overrides (ambiguous conversion from derived class "
+ "%0 to base class %1:%2)">;
+def err_covariant_return_not_derived : Error<
+ "return type of virtual function %0 is not covariant with the return type of "
+ "the function it overrides (%1 is not derived from %2)">;
+def err_covariant_return_incomplete : Error<
+ "return type of virtual function %0 is not covariant with the return type of "
+ "the function it overrides (%1 is incomplete)">;
+def err_covariant_return_type_different_qualifications : Error<
+ "return type of virtual function %0 is not covariant with the return type of "
+ "the function it overrides (%1 has different qualifiers than %2)">;
+def err_covariant_return_type_class_type_more_qualified : Error<
+ "return type of virtual function %0 is not covariant with the return type of "
+ "the function it overrides (class type %1 is more qualified than class "
+ "type %2">;
+
+// C++ constructors
+def err_constructor_cannot_be : Error<"constructor cannot be declared '%0'">;
+def err_invalid_qualified_constructor : Error<
+ "'%0' qualifier is not allowed on a constructor">;
+def err_ref_qualifier_constructor : Error<
+ "ref-qualifier '%select{&&|&}0' is not allowed on a constructor">;
+
+def err_constructor_return_type : Error<
+ "constructor cannot have a return type">;
+def err_constructor_redeclared : Error<"constructor cannot be redeclared">;
+def err_constructor_byvalue_arg : Error<
+ "copy constructor must pass its first argument by reference">;
+def warn_no_constructor_for_refconst : Warning<
+ "%select{struct|union|class|enum}0 %1 does not declare any constructor to "
+ "initialize its non-modifiable members">;
+def note_refconst_member_not_initialized : Note<
+ "%select{const|reference}0 member %1 will never be initialized">;
+def ext_ms_explicit_constructor_call : ExtWarn<
+ "explicit constructor calls are a Microsoft extension">, InGroup<Microsoft>;
+
+// C++ destructors
+def err_destructor_not_member : Error<
+ "destructor must be a non-static member function">;
+def err_destructor_cannot_be : Error<"destructor cannot be declared '%0'">;
+def err_invalid_qualified_destructor : Error<
+ "'%0' qualifier is not allowed on a destructor">;
+def err_ref_qualifier_destructor : Error<
+ "ref-qualifier '%select{&&|&}0' is not allowed on a destructor">;
+def err_destructor_return_type : Error<"destructor cannot have a return type">;
+def err_destructor_redeclared : Error<"destructor cannot be redeclared">;
+def err_destructor_with_params : Error<"destructor cannot have any parameters">;
+def err_destructor_variadic : Error<"destructor cannot be variadic">;
+def err_destructor_typedef_name : Error<
+ "destructor cannot be declared using a %select{typedef|type alias}1 %0 of the class name">;
+def err_destructor_name : Error<
+ "expected the class name after '~' to name the enclosing class">;
+def err_destructor_class_name : Error<
+ "expected the class name after '~' to name a destructor">;
+def err_ident_in_dtor_not_a_type : Error<
+ "identifier %0 in object destruction expression does not name a type">;
+def err_destructor_expr_type_mismatch : Error<
+ "destructor type %0 in object destruction expression does not match the "
+ "type %1 of the object being destroyed">;
+def note_destructor_type_here : Note<
+ "type %0 is declared here">;
+
+def err_destructor_template : Error<
+ "destructor cannot be declared as a template">;
+
+// C++ initialization
+def err_init_conversion_failed : Error<
+ "cannot initialize %select{a variable|a parameter|return object|an "
+ "exception object|a member subobject|an array element|a new value|a value|a "
+ "base class|a constructor delegation|a vector element}0 of type %1 with an "
+ "%select{rvalue|lvalue}2 of type %3"
+ "%select{|: different classes (%5 vs %6)"
+ "|: different number of parameters (%5 vs %6)"
+ "|: type mismatch at %ordinal5 parameter (%6 vs %7)"
+ "|: different return type (%5 vs %6)"
+ "|: different qualifiers ("
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}5 vs "
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}6)}4">;
+
+def err_lvalue_to_rvalue_ref : Error<"rvalue reference to type %0 cannot bind "
+ "to lvalue of type %1">;
+def err_lvalue_reference_bind_to_initlist : Error<
+ "%select{non-const|volatile}0 lvalue reference to type %1 cannot bind to an "
+ "initializer list temporary">;
+def err_lvalue_reference_bind_to_temporary : Error<
+ "%select{non-const|volatile}0 lvalue reference to type %1 cannot bind to a "
+ "temporary of type %2">;
+def err_lvalue_reference_bind_to_unrelated : Error<
+ "%select{non-const|volatile}0 lvalue reference to type %1 cannot bind to a "
+ "value of unrelated type %2">;
+def err_reference_bind_drops_quals : Error<
+ "binding of reference to type %0 to a value of type %1 drops qualifiers">;
+def err_reference_bind_failed : Error<
+ "reference to type %0 could not bind to an %select{rvalue|lvalue}1 of type "
+ "%2">;
+def err_reference_bind_init_list : Error<
+ "reference to type %0 cannot bind to an initializer list">;
+def err_init_list_bad_dest_type : Error<
+ "%select{|non-aggregate }0type %1 cannot be initialized with an initializer "
+ "list">;
+def err_member_function_call_bad_cvr : Error<"member function %0 not viable: "
+ "'this' argument has type %1, but function is not marked "
+ "%select{const|restrict|const or restrict|volatile|const or volatile|"
+ "volatile or restrict|const, volatile, or restrict}2">;
+
+def err_reference_bind_to_bitfield : Error<
+ "%select{non-const|volatile}0 reference cannot bind to bit-field %1">;
+def err_reference_bind_to_vector_element : Error<
+ "%select{non-const|volatile}0 reference cannot bind to vector element">;
+def err_reference_var_requires_init : Error<
+ "declaration of reference variable %0 requires an initializer">;
+def err_reference_without_init : Error<
+ "reference to type %0 requires an initializer">;
+def err_reference_has_multiple_inits : Error<
+ "reference cannot be initialized with multiple values">;
+def err_init_non_aggr_init_list : Error<
+ "initialization of non-aggregate type %0 with an initializer list">;
+def err_init_reference_member_uninitialized : Error<
+ "reference member of type %0 uninitialized">;
+def note_uninit_reference_member : Note<
+ "uninitialized reference member is here">;
+def warn_field_is_uninit : Warning<"field is uninitialized when used here">,
+ InGroup<Uninitialized>;
+def warn_uninit_self_reference_in_init : Warning<
+ "variable %0 is uninitialized when used within its own initialization">,
+ InGroup<Uninitialized>;
+def warn_uninit_var : Warning<
+ "variable %0 is uninitialized when used here">,
+ InGroup<Uninitialized>, DefaultIgnore;
+def warn_maybe_uninit_var :
+ Warning<"variable %0 may be uninitialized when used here">,
+ InGroup<UninitializedMaybe>, DefaultIgnore;
+def note_uninit_var_def : Note<
+ "variable %0 is declared here">;
+def warn_uninit_var_captured_by_block : Warning<
+ "variable %0 is uninitialized when captured by block">,
+ InGroup<Uninitialized>, DefaultIgnore;
+def warn_maybe_uninit_var_captured_by_block : Warning<
+ "variable %0 may be uninitialized when captured by block">,
+ InGroup<UninitializedMaybe>, DefaultIgnore;
+def warn_uninit_byref_blockvar_captured_by_block : Warning<
+ "block pointer variable %0 is uninitialized when captured by block">,
+ InGroup<Uninitialized>, DefaultIgnore;
+def note_block_var_fixit_add_initialization : Note<
+ "maybe you meant to use __block %0">;
+def note_var_fixit_add_initialization : Note<
+ "initialize the variable %0 to silence this warning">;
+def err_init_incomplete_type : Error<"initialization of incomplete type %0">;
+
+def err_temp_copy_no_viable : Error<
+ "no viable constructor %select{copying variable|copying parameter|"
+ "returning object|throwing object|copying member subobject|copying array "
+ "element|allocating object|copying temporary|initializing base subobject|"
+ "initializing vector element|capturing value}0 of type %1">;
+def ext_rvalue_to_reference_temp_copy_no_viable : ExtWarn<
+ "no viable constructor %select{copying variable|copying parameter|"
+ "returning object|throwing object|copying member subobject|copying array "
+ "element|allocating object|copying temporary|initializing base subobject|"
+ "initializing vector element|capturing value}0 of type %1; C++98 requires a copy "
+ "constructor when binding a reference to a temporary">,
+ InGroup<BindToTemporaryCopy>;
+def err_temp_copy_ambiguous : Error<
+ "ambiguous constructor call when %select{copying variable|copying "
+ "parameter|returning object|throwing object|copying member subobject|copying "
+ "array element|allocating object|copying temporary|initializing base subobject|"
+ "initializing vector element|capturing value}0 of type %1">;
+def err_temp_copy_deleted : Error<
+ "%select{copying variable|copying parameter|returning object|throwing "
+ "object|copying member subobject|copying array element|allocating object|"
+ "copying temporary|initializing base subobject|initializing vector element|"
+ "capturing value}0 of type %1 invokes deleted constructor">;
+def err_temp_copy_incomplete : Error<
+ "copying a temporary object of incomplete type %0">;
+def warn_cxx98_compat_temp_copy : Warning<
+ "%select{copying variable|copying parameter|returning object|throwing "
+ "object|copying member subobject|copying array element|allocating object|"
+ "copying temporary|initializing base subobject|initializing vector element}1 "
+ "of type %2 when binding a reference to a temporary would %select{invoke "
+ "an inaccessible constructor|find no viable constructor|find ambiguous "
+ "constructors|invoke a deleted constructor}0 in C++98">,
+ InGroup<CXX98CompatBindToTemporaryCopy>, DefaultIgnore;
+def err_selected_explicit_constructor : Error<
+ "chosen constructor is explicit in copy-initialization">;
+def note_constructor_declared_here : Note<
+ "constructor declared here">;
+
+// C++11 decltype
+def err_decltype_in_declarator : Error<
+ "'decltype' cannot be used to name a declaration">;
+
+// C++11 auto
+def warn_cxx98_compat_auto_type_specifier : Warning<
+ "'auto' type specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_auto_variable_cannot_appear_in_own_initializer : Error<
+ "variable %0 declared with 'auto' type cannot appear in its own initializer">;
+def err_illegal_decl_array_of_auto : Error<
+ "'%0' declared as array of %1">;
+def err_new_array_of_auto : Error<
+ "cannot allocate array of 'auto'">;
+def err_auto_not_allowed : Error<
+ "'auto' not allowed %select{in function prototype|in non-static struct member"
+ "|in non-static union member|in non-static class member|in exception declaration"
+ "|in template parameter|in block literal|in template argument"
+ "|in typedef|in type alias|in function return type|here}0">;
+def err_auto_var_requires_init : Error<
+ "declaration of variable %0 with type %1 requires an initializer">;
+def err_auto_new_requires_ctor_arg : Error<
+ "new expression for type %0 requires a constructor argument">;
+def err_auto_new_requires_parens : Error<
+ "new expression for type %0 cannot use list-initialization">;
+def err_auto_var_init_no_expression : Error<
+ "initializer for variable %0 with type %1 is empty">;
+def err_auto_var_init_multiple_expressions : Error<
+ "initializer for variable %0 with type %1 contains multiple expressions">;
+def err_auto_new_ctor_multiple_expressions : Error<
+ "new expression for type %0 contains multiple constructor arguments">;
+def err_auto_missing_trailing_return : Error<
+ "'auto' return without trailing return type">;
+def err_trailing_return_without_auto : Error<
+ "function with trailing return type must specify return type 'auto', not %0">;
+def err_trailing_return_in_parens : Error<
+ "trailing return type may not be nested within parentheses">;
+def err_auto_var_deduction_failure : Error<
+ "variable %0 with type %1 has incompatible initializer of type %2">;
+def err_auto_var_deduction_failure_from_init_list : Error<
+ "cannot deduce actual type for variable %0 with type %1 from initializer list">;
+def err_auto_new_deduction_failure : Error<
+ "new expression for type %0 has incompatible constructor argument of type %1">;
+def err_auto_different_deductions : Error<
+ "'auto' deduced as %0 in declaration of %1 and deduced as %2 in declaration of %3">;
+def err_implied_std_initializer_list_not_found : Error<
+ "cannot deduce type of initializer list because std::initializer_list was "
+ "not found; include <initializer_list>">;
+def err_malformed_std_initializer_list : Error<
+ "std::initializer_list must be a class template with a single type parameter">;
+def warn_dangling_std_initializer_list : Warning<
+ "array backing the initializer list will be destroyed at the end of "
+ "%select{the full-expression|the constructor}0">,
+ InGroup<DiagGroup<"dangling-initializer-list">>;
+
+// C++11 override control
+def override_keyword_only_allowed_on_virtual_member_functions : Error<
+ "only virtual member functions can be marked '%0'">;
+def err_function_marked_override_not_overriding : Error<
+ "%0 marked 'override' but does not override any member functions">;
+def err_class_marked_final_used_as_base : Error<
+ "base %0 is marked 'final'">;
+
+// C++11 attributes
+def err_repeat_attribute : Error<"'%0' attribute cannot be repeated">;
+
+// C++11 final
+def err_final_function_overridden : Error<
+ "declaration of %0 overrides a 'final' function">;
+
+// C++11 scoped enumerations
+def err_enum_invalid_underlying : Error<
+ "non-integral type %0 is an invalid underlying type">;
+def err_enumerator_too_large : Error<
+ "enumerator value is not representable in the underlying type %0">;
+def ext_enumerator_too_large : ExtWarn<
+ "enumerator value is not representable in the underlying type %0">,
+ InGroup<Microsoft>;
+def err_enumerator_wrapped : Error<
+ "enumerator value %0 is not representable in the underlying type %1">;
+def err_enum_redeclare_type_mismatch : Error<
+ "enumeration redeclared with different underlying type %0 (was %1)">;
+def err_enum_redeclare_fixed_mismatch : Error<
+ "enumeration previously declared with %select{non|}0fixed underlying type">;
+def err_enum_redeclare_scoped_mismatch : Error<
+ "enumeration previously declared as %select{un|}0scoped">;
+def err_enum_class_reference : Error<
+ "reference to %select{|scoped }0enumeration must use 'enum' "
+ "not 'enum class'">;
+def err_only_enums_have_underlying_types : Error<
+ "only enumeration types have underlying types">;
+
+// C++11 delegating constructors
+def err_delegating_ctor : Error<
+ "delegating constructors are permitted only in C++11">;
+def warn_cxx98_compat_delegating_ctor : Warning<
+ "delegating constructors are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_delegating_initializer_alone : Error<
+ "an initializer for a delegating constructor must appear alone">;
+def warn_delegating_ctor_cycle : Warning<
+ "constructor for %0 creates a delegation cycle">, DefaultError,
+ InGroup<DelegatingCtorCycles>;
+def note_it_delegates_to : Note<
+ "it delegates to">, InGroup<DelegatingCtorCycles>;
+def note_which_delegates_to : Note<
+ "which delegates to">, InGroup<DelegatingCtorCycles>;
+
+// C++11 range-based for loop
+def err_for_range_decl_must_be_var : Error<
+ "for range declaration must declare a variable">;
+def err_for_range_storage_class : Error<
+ "loop variable %0 may not be declared %select{'extern'|'static'|"
+ "'__private_extern__'|'auto'|'register'|'constexpr'}1">;
+def err_type_defined_in_for_range : Error<
+ "types may not be defined in a for range declaration">;
+def err_for_range_deduction_failure : Error<
+ "cannot use type %0 as a range">;
+def err_for_range_incomplete_type : Error<
+ "cannot use incomplete type %0 as a range">;
+def err_for_range_iter_deduction_failure : Error<
+ "cannot use type %0 as an iterator">;
+def err_for_range_member_begin_end_mismatch : Error<
+ "range type %0 has '%select{begin|end}1' member but no '%select{end|begin}1' member">;
+def err_for_range_begin_end_types_differ : Error<
+ "'begin' and 'end' must return the same type (got %0 and %1)">;
+def note_for_range_type : Note<"range has type %0">;
+def note_for_range_begin_end : Note<
+ "selected '%select{begin|end}0' %select{function|template }1%2 with iterator type %3">;
+
+// C++11 constexpr
+def warn_cxx98_compat_constexpr : Warning<
+ "'constexpr' specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_invalid_constexpr : Error<
+ "%select{function parameter|typedef|non-static data member}0 "
+ "cannot be constexpr">;
+def err_constexpr_tag : Error<
+ "%select{class|struct|union|enum}0 cannot be marked constexpr">;
+def err_constexpr_dtor : Error<"destructor cannot be marked constexpr">;
+def err_constexpr_no_declarators : Error<
+ "constexpr can only be used in variable and function declarations">;
+def err_invalid_constexpr_var_decl : Error<
+ "constexpr variable declaration must be a definition">;
+def err_constexpr_static_mem_var_requires_init : Error<
+ "declaration of constexpr static data member %0 requires an initializer">;
+def err_constexpr_var_non_literal : Error<
+ "constexpr variable cannot have non-literal type %0">;
+def err_constexpr_var_requires_const_init : Error<
+ "constexpr variable %0 must be initialized by a constant expression">;
+def err_constexpr_redecl_mismatch : Error<
+ "%select{non-constexpr declaration of %0 follows constexpr declaration"
+ "|constexpr declaration of %0 follows non-constexpr declaration}1">;
+def err_constexpr_virtual : Error<"virtual function cannot be constexpr">;
+def err_constexpr_virtual_base : Error<
+ "constexpr %select{member function|constructor}0 not allowed in "
+ "%select{class|struct}1 with virtual base %plural{1:class|:classes}2">;
+def note_non_literal_virtual_base : Note<"%select{class|struct}0 with virtual "
+ "base %plural{1:class|:classes}1 is not a literal type">;
+def note_constexpr_virtual_base_here : Note<"virtual base class declared here">;
+def err_constexpr_non_literal_return : Error<
+ "constexpr function's return type %0 is not a literal type">;
+def err_constexpr_non_literal_param : Error<
+ "constexpr %select{function|constructor}1's %ordinal0 parameter type %2 is "
+ "not a literal type">;
+def err_constexpr_body_invalid_stmt : Error<
+ "statement not allowed in constexpr %select{function|constructor}0">;
+def err_constexpr_type_definition : Error<
+ "types cannot be defined in a constexpr %select{function|constructor}0">;
+def err_constexpr_vla : Error<
+ "variably-modified type %0 cannot be used in a constexpr "
+ "%select{function|constructor}1">;
+def err_constexpr_var_declaration : Error<
+ "variables cannot be declared in a constexpr %select{function|constructor}0">;
+def err_constexpr_function_never_constant_expr : ExtWarn<
+ "constexpr %select{function|constructor}0 never produces a "
+ "constant expression">, InGroup<DiagGroup<"invalid-constexpr">>, DefaultError;
+def err_constexpr_body_no_return : Error<
+ "no return statement in constexpr function">;
+def err_constexpr_body_multiple_return : Error<
+ "multiple return statements in constexpr function">;
+def note_constexpr_body_previous_return : Note<
+ "previous return statement is here">;
+def err_constexpr_function_try_block : Error<
+ "function try block not allowed in constexpr %select{function|constructor}0">;
+def err_constexpr_union_ctor_no_init : Error<
+ "constexpr union constructor does not initialize any member">;
+def err_constexpr_ctor_missing_init : Error<
+ "constexpr constructor must initialize all members">;
+def note_constexpr_ctor_missing_init : Note<
+ "member not initialized by constructor">;
+def err_constexpr_method_non_literal : Error<
+ "non-literal type %0 cannot have constexpr members">;
+def note_non_literal_no_constexpr_ctors : Note<
+ "%0 is not literal because it is not an aggregate and has no constexpr "
+ "constructors other than copy or move constructors">;
+def note_non_literal_base_class : Note<
+ "%0 is not literal because it has base class %1 of non-literal type">;
+def note_non_literal_field : Note<
+ "%0 is not literal because it has data member %1 of "
+ "%select{non-literal|volatile}3 type %2">;
+def note_non_literal_user_provided_dtor : Note<
+ "%0 is not literal because it has a user-provided destructor">;
+def note_non_literal_nontrivial_dtor : Note<
+ "%0 is not literal because it has a non-trivial destructor">;
+
+// C++11 char16_t/char32_t
+def warn_cxx98_compat_unicode_type : Warning<
+ "'%0' type specifier is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+// Objective-C++
+def err_objc_decls_may_only_appear_in_global_scope : Error<
+ "Objective-C declarations may only appear in global scope">;
+// Attributes
+def err_nsobject_attribute : Error<
+ "__attribute ((NSObject)) is for pointer types only">;
+def err_attribute_can_be_applied_only_to_symbol_declaration : Error<
+ "%0 attribute can be applied only to symbol declaration">;
+def err_attributes_are_not_compatible : Error<
+ "%0 and %1 attributes are not compatible">;
+def err_attribute_wrong_number_arguments : Error<
+ "attribute %plural{0:takes no arguments|1:takes one argument|"
+ ":requires exactly %0 arguments}0">;
+def err_attribute_too_many_arguments : Error<
+ "attribute takes no more than %0 argument%s0">;
+def err_suppress_autosynthesis : Error<
+ "objc_requires_property_definitions attribute may only be specified on a class"
+ "to a class declaration">;
+def err_attribute_too_few_arguments : Error<
+ "attribute takes at least %0 argument%s0">;
+def err_attribute_missing_parameter_name : Error<
+ "attribute requires unquoted parameter">;
+def err_attribute_invalid_vector_type : Error<"invalid vector element type %0">;
+def err_attribute_bad_neon_vector_size : Error<
+ "Neon vector size must be 64 or 128 bits">;
+def err_attribute_argument_not_int : Error<
+ "'%0' attribute requires integer constant">;
+def err_attribute_argument_not_class : Error<
+ "%0 attribute requires arguments that are class type or point to class type">;
+def err_attribute_first_argument_not_int_or_bool : Error<
+ "%0 attribute first argument must be of int or bool type">;
+def err_attribute_argument_outof_range : Error<
+ "init_priority attribute requires integer constant between "
+ "101 and 65535 inclusive">;
+def err_init_priority_object_attr : Error<
+ "can only use 'init_priority' attribute on file-scope definitions "
+ "of objects of class type">;
+def err_attribute_argument_n_not_int : Error<
+ "'%0' attribute requires parameter %1 to be an integer constant">;
+def err_attribute_argument_n_not_string : Error<
+ "'%0' attribute requires parameter %1 to be a string">;
+def err_attribute_argument_out_of_bounds : Error<
+ "'%0' attribute parameter %1 is out of bounds">;
+def err_attribute_requires_objc_interface : Error<
+ "attribute may only be applied to an Objective-C interface">;
+def err_attribute_uuid_malformed_guid : Error<
+ "uuid attribute contains a malformed GUID">;
+def warn_nonnull_pointers_only : Warning<
+ "nonnull attribute only applies to pointer arguments">;
+def err_attribute_invalid_implicit_this_argument : Error<
+ "'%0' attribute is invalid for the implicit this argument">;
+def err_ownership_type : Error<
+ "%0 attribute only applies to %1 arguments">;
+def err_format_strftime_third_parameter : Error<
+ "strftime format attribute requires 3rd parameter to be 0">;
+def err_format_attribute_requires_variadic : Error<
+ "format attribute requires variadic function">;
+def err_format_attribute_not : Error<"format argument not %0">;
+def err_format_attribute_result_not : Error<"function does not return %0">;
+def err_format_attribute_implicit_this_format_string : Error<
+ "format attribute cannot specify the implicit this argument as the format "
+ "string">;
+def warn_unknown_method_family : Warning<"unrecognized method family">;
+def err_init_method_bad_return_type : Error<
+ "init methods must return an object pointer type, not %0">;
+def err_attribute_invalid_size : Error<
+ "vector size not an integral multiple of component size">;
+def err_attribute_zero_size : Error<"zero vector size">;
+def err_typecheck_vector_not_convertable : Error<
+ "can't convert between vector values of different size (%0 and %1)">;
+def err_typecheck_ext_vector_not_typedef : Error<
+ "ext_vector_type only applies to types, not variables">;
+def err_ext_vector_component_exceeds_length : Error<
+ "vector component access exceeds type %0">;
+def err_ext_vector_component_name_illegal : Error<
+ "illegal vector component name '%0'">;
+def err_attribute_address_space_not_int : Error<
+ "address space attribute requires an integer constant">;
+def err_attribute_address_space_negative : Error<
+ "address space is negative">;
+def err_attribute_address_space_too_high : Error<
+ "address space is larger than the maximum supported (%0)">;
+def err_attribute_address_multiple_qualifiers : Error<
+ "multiple address spaces specified for type">;
+def err_attribute_address_function_type : Error<
+ "function type may not be qualified with an address space">;
+def err_as_qualified_auto_decl : Error<
+ "automatic variable qualified with an address space">;
+def err_arg_with_address_space : Error<
+ "parameter may not be qualified with an address space">;
+def err_attr_objc_ownership_redundant : Error<
+ "the type %0 is already explicitly ownership-qualified">;
+def err_attribute_not_string : Error<
+ "argument to %0 attribute was not a string literal">;
+def err_undeclared_nsnumber : Error<
+ "NSNumber must be available to use Objective-C literals">;
+def err_invalid_nsnumber_type : Error<
+ "%0 is not a valid literal type for NSNumber">;
+def err_undeclared_nsarray : Error<
+ "NSArray must be available to use Objective-C array literals">;
+def err_undeclared_nsdictionary : Error<
+ "NSDictionary must be available to use Objective-C dictionary "
+ "literals">;
+def err_undeclared_arraywithobjects : Error<
+ "declaration of %0 is missing in NSArray class">;
+def err_undeclared_dictwithobjects : Error<
+ "declaration of %0 is missing in NSDictionary class">;
+def err_undeclared_nsnumber_method : Error<
+ "declaration of %0 is missing in NSNumber class">;
+def err_objc_literal_method_sig : Error<
+ "literal construction method %0 has incompatible signature">;
+def note_objc_literal_method_param : Note<
+ "%select{first|second|third}0 parameter has unexpected type %1 "
+ "(should be %2)">;
+def note_objc_literal_method_return : Note<
+ "method returns unexpected type %0 (should be an object type)">;
+def err_invalid_collection_element : Error<
+ "collection element of type %0 is not an Objective-C object">;
+def err_box_literal_collection : Error<
+ "%select{string|character|boolean|numeric}0 literal must be prefixed by '@' "
+ "in a collection">;
+
+let CategoryName = "Cocoa API Issue" in {
+def warn_objc_redundant_literal_use : Warning<
+ "using %0 with a literal is redundant">, InGroup<ObjCRedundantLiteralUse>;
+}
+
+def warn_bool_for_boolean_literal : Warning<
+ "BOOL of type %0 is non-intergal and unsuitable for a "
+ "boolean literal - ignored">, InGroup<DiagGroup<"numeric-literals">>;
+def err_only_annotate_after_access_spec : Error<
+ "access specifier can only have annotation attributes">;
+def err_attribute_section_invalid_for_target : Error<
+ "argument to 'section' attribute is not valid for this target: %0">;
+def err_attribute_section_local_variable : Error<
+ "'section' attribute is not valid on local variables">;
+def err_attribute_aligned_not_power_of_two : Error<
+ "requested alignment is not a power of 2">;
+def warn_redeclaration_without_attribute_prev_attribute_ignored : Warning<
+ "'%0' redeclared without %1 attribute: previous %1 ignored">;
+def warn_attribute_ignored : Warning<"%0 attribute ignored">;
+def warn_unknown_attribute_ignored : Warning<
+ "unknown attribute %0 ignored">, InGroup<UnknownAttributes>;
+def warn_declspec_attribute_ignored : Warning<
+ "attribute %0 is ignored, place it after \"%select{class|struct|union|enum}1\" to apply attribute to type declaration">, InGroup<IgnoredAttributes>;
+def warn_attribute_precede_definition : Warning<
+ "attribute declaration must precede definition">;
+def warn_attribute_void_function_method : Warning<
+ "attribute %0 cannot be applied to "
+ "%select{functions|Objective-C method}1 without return value">;
+def warn_attribute_weak_on_field : Warning<
+ "__weak attribute cannot be specified on a field declaration">;
+def warn_gc_attribute_weak_on_local : Warning<
+ "Objective-C GC does not allow weak variables on the stack">;
+def warn_nsobject_attribute : Warning<
+ "__attribute ((NSObject)) may be put on a typedef only, "
+ "attribute is ignored">, InGroup<NSobjectAttribute>;
+def warn_attribute_weak_on_local : Warning<
+ "__weak attribute cannot be specified on an automatic variable">;
+def warn_weak_identifier_undeclared : Warning<
+ "weak identifier %0 never declared">;
+def err_attribute_weak_static : Error<
+ "weak declaration cannot have internal linkage">;
+def warn_attribute_weak_import_invalid_on_definition : Warning<
+ "'weak_import' attribute cannot be specified on a definition">;
+def err_attribute_weakref_not_static : Error<
+ "weakref declaration must have internal linkage">;
+def err_attribute_weakref_not_global_context : Error<
+ "weakref declaration of '%0' must be in a global context">;
+def err_attribute_weakref_without_alias : Error<
+ "weakref declaration of '%0' must also have an alias attribute">;
+def err_alias_not_supported_on_darwin : Error <
+ "only weak aliases are supported on darwin">;
+def warn_attribute_wrong_decl_type : Warning<
+ "%0 attribute only applies to %select{functions|unions|"
+ "variables and functions|functions and methods|parameters|"
+ "functions, methods and blocks|functions, methods, and parameters|"
+ "classes|variables|methods|variables, functions and labels|"
+ "fields and global variables|structs}1">;
+def err_attribute_wrong_decl_type : Error<
+ "%0 attribute only applies to %select{functions|unions|"
+ "variables and functions|functions and methods|parameters|"
+ "functions, methods and blocks|functions, methods, and parameters|"
+ "classes|variables|methods|variables, functions and labels|"
+ "fields and global variables|structs}1">;
+def warn_function_attribute_wrong_type : Warning<
+ "'%0' only applies to function types; type here is %1">;
+def warn_pointer_attribute_wrong_type : Warning<
+ "'%0' only applies to pointer types; type here is %1">;
+def warn_objc_object_attribute_wrong_type : Warning<
+ "'%0' only applies to objective-c object or block pointer types; type here is %1">;
+def warn_gnu_inline_attribute_requires_inline : Warning<
+ "'gnu_inline' attribute requires function to be marked 'inline',"
+ " attribute ignored">;
+def err_attribute_vecreturn_only_vector_member : Error<
+ "the vecreturn attribute can only be used on a class or structure with one member, which must be a vector">;
+def err_attribute_vecreturn_only_pod_record : Error<
+ "the vecreturn attribute can only be used on a POD (plain old data) class or structure (i.e. no virtual functions)">;
+def err_cconv_change : Error<
+ "function declared '%0' here was previously declared "
+ "%select{'%2'|without calling convention}1">;
+def err_cconv_knr : Error<
+ "function with no prototype cannot use %0 calling convention">;
+def err_cconv_varargs : Error<
+ "variadic function cannot use %0 calling convention">;
+def err_regparm_mismatch : Error<"function declared with with regparm(%0) "
+ "attribute was previously declared "
+ "%plural{0:without the regparm|:with the regparm(%1)}1 attribute">;
+def err_returns_retained_mismatch : Error<
+ "function declared with the ns_returns_retained attribute "
+ "was previously declared without the ns_returns_retained attribute">;
+def err_objc_precise_lifetime_bad_type : Error<
+ "objc_precise_lifetime only applies to retainable types; type here is %0">;
+def warn_objc_precise_lifetime_meaningless : Error<
+ "objc_precise_lifetime is not meaningful for "
+ "%select{__unsafe_unretained|__autoreleasing}0 objects">;
+def err_invalid_pcs : Error<"Invalid PCS type">;
+def err_attribute_can_be_applied_only_to_value_decl : Error<
+ "%0 attribute can only be applied to value declarations">;
+def warn_attribute_not_on_decl : Error<
+ "%0 attribute ignored when parsing type">;
+
+
+// Availability attribute
+def warn_availability_unknown_platform : Warning<
+ "unknown platform %0 in availability macro">;
+def warn_availability_version_ordering : Warning<
+ "feature cannot be %select{introduced|deprecated|obsoleted}0 in %1 version "
+ "%2 before it was %select{introduced|deprecated|obsoleted}3 in version %4; "
+ "attribute ignored">;
+
+// Thread Safety Attributes
+// Errors when parsing the attributes
+def err_attribute_argument_out_of_range : Error<
+ "%0 attribute parameter %1 is out of bounds: "
+ "%plural{0:no parameters to index into|"
+ "1:can only be 1, since there is one parameter|"
+ ":must be between 1 and %2}2">;
+def warn_attribute_argument_not_lockable : Warning<
+ "%0 attribute requires arguments whose type is annotated "
+ "with 'lockable' attribute; type here is '%1'">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_attribute_decl_not_lockable : Warning<
+ "%0 attribute can only be applied in a context annotated "
+ "with 'lockable' attribute">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_attribute_argument_not_class : Warning<
+ "%0 attribute requires arguments that are class type or point to"
+ " class type; type here is '%1'">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_unlock_but_no_lock : Warning<
+ "unlocking '%0' that was not locked">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_double_lock : Warning<
+ "locking '%0' that is already locked">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_no_unlock : Warning<
+ "mutex '%0' is still locked at the end of function">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+// FIXME: improve the error message about locks not in scope
+def warn_lock_some_predecessors : Warning<
+ "mutex '%0' is not locked on every path through here">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_expecting_lock_held_on_loop : Warning<
+ "expecting mutex '%0' to be locked at start of each loop">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def note_locked_here : Note<"mutex acquired here">;
+def warn_lock_exclusive_and_shared : Warning<
+ "mutex '%0' is locked exclusively and shared in the same scope">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def note_lock_exclusive_and_shared : Note<
+ "the other lock of mutex '%0' is here">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_variable_requires_lock : Warning<
+ "%select{reading|writing}2 variable '%0' requires locking "
+ "%select{'%1'|'%1' exclusively}2">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_var_deref_requires_lock : Warning<
+ "%select{reading|writing}2 the value pointed to by '%0' requires locking "
+ "%select{'%1'|'%1' exclusively}2">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_variable_requires_any_lock : Warning<
+ "%select{reading|writing}1 variable '%0' requires locking "
+ "%select{any mutex|any mutex exclusively}1">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_var_deref_requires_any_lock : Warning<
+ "%select{reading|writing}1 the value pointed to by '%0' requires locking "
+ "%select{any mutex|any mutex exclusively}1">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_fun_requires_lock : Warning<
+ "calling function '%0' requires %select{shared|exclusive}2 lock on '%1'">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_fun_excludes_mutex : Warning<
+ "cannot call function '%0' while mutex '%1' is locked">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+def warn_cannot_resolve_lock : Warning<
+ "cannot resolve lock expression">,
+ InGroup<ThreadSafety>, DefaultIgnore;
+
+
+def warn_impcast_vector_scalar : Warning<
+ "implicit conversion turns vector to scalar: %0 to %1">,
+ InGroup<DiagGroup<"conversion">>, DefaultIgnore;
+def warn_impcast_complex_scalar : Warning<
+ "implicit conversion discards imaginary component: %0 to %1">,
+ InGroup<DiagGroup<"conversion">>, DefaultIgnore;
+def warn_impcast_float_precision : Warning<
+ "implicit conversion loses floating-point precision: %0 to %1">,
+ InGroup<DiagGroup<"conversion">>, DefaultIgnore;
+def warn_impcast_float_integer : Warning<
+ "implicit conversion turns floating-point number into integer: %0 to %1">,
+ InGroup<DiagGroup<"conversion">>, DefaultIgnore;
+def warn_impcast_integer_sign : Warning<
+ "implicit conversion changes signedness: %0 to %1">,
+ InGroup<SignConversion>, DefaultIgnore;
+def warn_impcast_integer_sign_conditional : Warning<
+ "operand of ? changes signedness: %0 to %1">,
+ InGroup<SignConversion>, DefaultIgnore;
+def warn_impcast_integer_precision : Warning<
+ "implicit conversion loses integer precision: %0 to %1">,
+ InGroup<DiagGroup<"conversion">>, DefaultIgnore;
+def warn_impcast_integer_64_32 : Warning<
+ "implicit conversion loses integer precision: %0 to %1">,
+ InGroup<DiagGroup<"shorten-64-to-32">>, DefaultIgnore;
+def warn_impcast_integer_precision_constant : Warning<
+ "implicit conversion from %2 to %3 changes value from %0 to %1">,
+ InGroup<ConstantConversion>;
+def warn_impcast_bitfield_precision_constant : Warning<
+ "implicit truncation from %2 to bitfield changes value from %0 to %1">,
+ InGroup<ConstantConversion>;
+def warn_impcast_literal_float_to_integer : Warning<
+ "implicit conversion turns literal floating-point number into integer: "
+ "%0 to %1">,
+ InGroup<LiteralConversion>;
+def warn_impcast_string_literal_to_bool : Warning<
+ "implicit conversion turns string literal into bool: %0 to %1">,
+ InGroup<StringConversion>, DefaultIgnore;
+def warn_impcast_different_enum_types : Warning<
+ "implicit conversion from enumeration type %0 to different enumeration type "
+ "%1">, InGroup<DiagGroup<"conversion">>;
+def warn_impcast_bool_to_null_pointer : Warning<
+ "initialization of pointer of type %0 to null from a constant boolean "
+ "expression">, InGroup<BoolConversion>;
+def warn_impcast_null_pointer_to_integer : Warning<
+ "implicit conversion of NULL constant to %0">,
+ InGroup<NullConversion>;
+def warn_impcast_function_to_bool : Warning<
+ "address of function %q0 will always evaluate to 'true'">,
+ InGroup<BoolConversion>;
+def note_function_to_bool_silence : Note<
+ "prefix with the address-of operator to silence this warning">;
+def note_function_to_bool_call : Note<
+ "suffix with parentheses to turn this into a function call">;
+
+def warn_cast_align : Warning<
+ "cast from %0 to %1 increases required alignment from %2 to %3">,
+ InGroup<CastAlign>, DefaultIgnore;
+
+def warn_attribute_ignored_for_field_of_type : Warning<
+ "%0 attribute ignored for field of type %1">;
+def warn_transparent_union_attribute_field_size_align : Warning<
+ "%select{alignment|size}0 of field %1 (%2 bits) does not match the "
+ "%select{alignment|size}0 of the first field in transparent union; "
+ "transparent_union attribute ignored">;
+def note_transparent_union_first_field_size_align : Note<
+ "%select{alignment|size}0 of first field is %1 bits">;
+def warn_transparent_union_attribute_not_definition : Warning<
+ "transparent_union attribute can only be applied to a union definition; "
+ "attribute ignored">;
+def warn_transparent_union_attribute_floating : Warning<
+ "first field of a transparent union cannot have %select{floating point|"
+ "vector}0 type %1; transparent_union attribute ignored">;
+def warn_transparent_union_attribute_zero_fields : Warning<
+ "transparent union definition must contain at least one field; "
+ "transparent_union attribute ignored">;
+def warn_attribute_type_not_supported : Warning<
+ "'%0' attribute argument not supported: %1">;
+def warn_attribute_unknown_visibility : Warning<"unknown visibility '%0'">;
+def warn_attribute_protected_visibility :
+ Warning<"target does not support 'protected' visibility; using 'default'">,
+ InGroup<DiagGroup<"unsupported-visibility">>;
+def err_unknown_machine_mode : Error<"unknown machine mode %0">;
+def err_unsupported_machine_mode : Error<"unsupported machine mode %0">;
+def err_mode_not_primitive : Error<
+ "mode attribute only supported for integer and floating-point types">;
+def err_mode_wrong_type : Error<
+ "type of machine mode does not match type of base type">;
+def err_attr_wrong_decl : Error<
+ "'%0' attribute invalid on this declaration, requires typedef or value">;
+def warn_attribute_nonnull_no_pointers : Warning<
+ "'nonnull' attribute applied to function with no pointer arguments">;
+def warn_attribute_malloc_pointer_only : Warning<
+ "'malloc' attribute only applies to functions returning a pointer type">;
+def warn_attribute_sentinel_named_arguments : Warning<
+ "'sentinel' attribute requires named arguments">;
+def warn_attribute_sentinel_not_variadic : Warning<
+ "'sentinel' attribute only supported for variadic %select{functions|blocks}0">;
+def err_attribute_sentinel_less_than_zero : Error<
+ "'sentinel' parameter 1 less than zero">;
+def err_attribute_sentinel_not_zero_or_one : Error<
+ "'sentinel' parameter 2 not 0 or 1">;
+def err_attribute_cleanup_arg_not_found : Error<
+ "'cleanup' argument %0 not found">;
+def err_attribute_cleanup_arg_not_function : Error<
+ "'cleanup' argument %0 is not a function">;
+def err_attribute_cleanup_func_must_take_one_arg : Error<
+ "'cleanup' function %0 must take 1 parameter">;
+def err_attribute_cleanup_func_arg_incompatible_type : Error<
+ "'cleanup' function %0 parameter has type %1 which is incompatible with "
+ "type %2">;
+def err_attribute_regparm_wrong_platform : Error<
+ "'regparm' is not valid on this platform">;
+def err_attribute_regparm_invalid_number : Error<
+ "'regparm' parameter must be between 0 and %0 inclusive">;
+
+
+// Clang-Specific Attributes
+def warn_attribute_iboutlet : Warning<
+ "%0 attribute can only be applied to instance variables or properties">;
+def warn_attribute_ibaction: Warning<
+ "ibaction attribute can only be applied to Objective-C instance methods">;
+def err_iboutletcollection_type : Error<
+ "invalid type %0 as argument of iboutletcollection attribute">;
+def warn_iboutlet_object_type : Warning<
+ "%select{ivar|property}2 with %0 attribute must "
+ "be an object type (invalid %1)">,
+ InGroup<DiagGroup<"invalid-iboutlet">>;
+def err_attribute_overloadable_not_function : Error<
+ "'overloadable' attribute can only be applied to a function">;
+def err_attribute_overloadable_missing : Error<
+ "%select{overloaded function|redeclaration of}0 %1 must have the "
+ "'overloadable' attribute">;
+def note_attribute_overloadable_prev_overload : Note<
+ "previous overload of function is here">;
+def err_attribute_overloadable_no_prototype : Error<
+ "'overloadable' function %0 must have a prototype">;
+def warn_ns_attribute_wrong_return_type : Warning<
+ "%0 attribute only applies to %select{functions|methods}1 that "
+ "return %select{an Objective-C object|a pointer|a non-retainable pointer}2">;
+def warn_ns_attribute_wrong_parameter_type : Warning<
+ "%0 attribute only applies to %select{Objective-C object|pointer}1 "
+ "parameters">;
+def err_ns_bridged_not_interface : Error<
+ "parameter of 'ns_bridged' attribute does not name an Objective-C class">;
+
+// Function Parameter Semantic Analysis.
+def err_param_with_void_type : Error<"argument may not have 'void' type">;
+def err_void_only_param : Error<
+ "'void' must be the first and only parameter if specified">;
+def err_void_param_qualified : Error<
+ "'void' as parameter must not have type qualifiers">;
+def err_ident_list_in_fn_declaration : Error<
+ "a parameter list without types is only allowed in a function definition">;
+def ext_param_not_declared : Extension<
+ "parameter %0 was not declared, defaulting to type 'int'">;
+def err_param_typedef_of_void : Error<
+ "empty parameter list defined with a %select{typedef|type alias}0 of 'void' not allowed%select{ in C++|}0">;
+def err_param_default_argument : Error<
+ "C does not support default arguments">;
+def err_param_default_argument_redefinition : Error<
+ "redefinition of default argument">;
+def warn_param_default_argument_redefinition : ExtWarn<
+ "redefinition of default argument">;
+def err_param_default_argument_missing : Error<
+ "missing default argument on parameter">;
+def err_param_default_argument_missing_name : Error<
+ "missing default argument on parameter %0">;
+def err_param_default_argument_references_param : Error<
+ "default argument references parameter %0">;
+def err_param_default_argument_references_local : Error<
+ "default argument references local variable %0 of enclosing function">;
+def err_param_default_argument_references_this : Error<
+ "default argument references 'this'">;
+def err_param_default_argument_nonfunc : Error<
+ "default arguments can only be specified for parameters in a function "
+ "declaration">;
+def err_param_default_argument_template_redecl : Error<
+ "default arguments cannot be added to a function template that has already "
+ "been declared">;
+def err_param_default_argument_member_template_redecl : Error<
+ "default arguments cannot be added to an out-of-line definition of a member "
+ "of a %select{class template|class template partial specialization|nested "
+ "class in a template}0">;
+def err_uninitialized_member_for_assign : Error<
+ "cannot define the implicit default assignment operator for %0, because "
+ "non-static %select{reference|const}1 member %2 can't use default "
+ "assignment operator">;
+def err_uninitialized_member_in_ctor : Error<
+ "%select{|implicit default }0constructor for %1 must explicitly initialize "
+ "the %select{reference|const}2 member %3">;
+def warn_default_arg_makes_ctor_special : Warning<
+ "addition of default argument on redeclaration makes this constructor a "
+ "%select{default|copy|move}0 constructor">, InGroup<DefaultArgSpecialMember>;
+def note_previous_declaration_special : Note<
+ // The ERRORs are in hopes that if they occur, they'll get reported.
+ "previous declaration was %select{*ERROR*|a copy constructor|a move "
+ "constructor|*ERROR*|*ERROR*|*ERROR*|not a special member function}0">;
+
+def err_use_of_default_argument_to_function_declared_later : Error<
+ "use of default argument to function %0 that is declared later in class %1">;
+def note_default_argument_declared_here : Note<
+ "default argument declared here">;
+
+def ext_param_promoted_not_compatible_with_prototype : ExtWarn<
+ "promoted type %0 of K&R function parameter is not compatible with the "
+ "parameter type %1 declared in a previous prototype">,
+ InGroup<KNRPromotedParameter>;
+
+
+// C++ Overloading Semantic Analysis.
+def err_ovl_diff_return_type : Error<
+ "functions that differ only in their return type cannot be overloaded">;
+def err_ovl_static_nonstatic_member : Error<
+ "static and non-static member functions with the same parameter types "
+ "cannot be overloaded">;
+
+def err_ovl_no_viable_function_in_call : Error<
+ "no matching function for call to %0">;
+def err_ovl_no_viable_member_function_in_call : Error<
+ "no matching member function for call to %0">;
+def err_ovl_ambiguous_call : Error<
+ "call to %0 is ambiguous">;
+def err_ovl_deleted_call : Error<
+ "call to %select{unavailable|deleted}0 function %1%2">;
+def err_ovl_ambiguous_member_call : Error<
+ "call to member function %0 is ambiguous">;
+def err_ovl_deleted_member_call : Error<
+ "call to %select{unavailable|deleted}0 member function %1%2">;
+def note_ovl_too_many_candidates : Note<
+ "remaining %0 candidate%s0 omitted; "
+ "pass -fshow-overloads=all to show them">;
+def note_ovl_candidate : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "is the implicit default constructor|"
+ "is the implicit copy constructor|"
+ "is the implicit move constructor|"
+ "is the implicit copy assignment operator|"
+ "is the implicit move assignment operator|"
+ "is an inherited constructor}0%1"
+ "%select{| has different class (expected %3 but has %4)"
+ "| has different number of parameters (expected %3 but has %4)"
+ "| has type mismatch at %ordinal3 parameter (expected %4 but has %5)"
+ "| has different return type (%3 expected but has %4)"
+ "| has different qualifiers (expected "
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile"
+ "|volatile and restrict|const, volatile, and restrict}3 but found "
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile"
+ "|volatile and restrict|const, volatile, and restrict}4)}2">;
+
+def note_ovl_candidate_inherited_constructor : Note<"inherited from here">;
+def note_ovl_candidate_bad_deduction : Note<
+ "candidate template ignored: failed template argument deduction">;
+def note_ovl_candidate_incomplete_deduction : Note<"candidate template ignored: "
+ "couldn't infer template argument %0">;
+def note_ovl_candidate_inconsistent_deduction : Note<
+ "candidate template ignored: deduced conflicting %select{types|values|"
+ "templates}0 for parameter %1 (%2 vs. %3)">;
+def note_ovl_candidate_explicit_arg_mismatch_named : Note<
+ "candidate template ignored: invalid explicitly-specified argument "
+ "for template parameter %0">;
+def note_ovl_candidate_explicit_arg_mismatch_unnamed : Note<
+ "candidate template ignored: invalid explicitly-specified argument "
+ "for %ordinal0 template parameter">;
+def note_ovl_candidate_instantiation_depth : Note<
+ "candidate template ignored: substitution exceeded maximum template "
+ "instantiation depth">;
+def note_ovl_candidate_underqualified : Note<
+ "candidate template ignored: can't deduce a type for %0 which would "
+ "make %2 equal %1">;
+def note_ovl_candidate_substitution_failure : Note<
+ "candidate template ignored: substitution failure %0">;
+
+// Note that we don't treat templates differently for this diagnostic.
+def note_ovl_candidate_arity : Note<"candidate "
+ "%select{function|function|constructor|function|function|constructor|"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0 %select{|template }1"
+ "not viable: requires%select{ at least| at most|}2 %3 argument%s3, but %4 "
+ "%plural{1:was|:were}4 provided">;
+
+def note_ovl_candidate_deleted : Note<
+ "candidate %select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1 has been "
+ "%select{explicitly made unavailable|explicitly deleted|"
+ "implicitly deleted}2">;
+
+// Giving the index of the bad argument really clutters this message, and
+// it's relatively unimportant because 1) it's generally obvious which
+// argument(s) are of the given object type and 2) the fix is usually
+// to complete the type, which doesn't involve changes to the call line
+// anyway. If people complain, we can change it.
+def note_ovl_candidate_bad_conv_incomplete : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1 "
+ "not viable: cannot convert argument of incomplete type %2 to %3">;
+def note_ovl_candidate_bad_list_argument : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1 "
+ "not viable: cannot convert initializer list argument to %3">;
+def note_ovl_candidate_bad_overload : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1"
+ " not viable: no overload of %3 matching %2 for %ordinal4 argument">;
+def note_ovl_candidate_bad_conv : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1"
+ " not viable: no known conversion from %2 to %3 for "
+ "%select{%ordinal5 argument|object argument}4; "
+ "%select{|dereference the argument with *|"
+ "take the address of the argument with &|"
+ "remove *|"
+ "remove &}6">;
+def note_ovl_candidate_bad_arc_conv : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1"
+ " not viable: cannot implicitly convert argument of type %2 to %3 for "
+ "%select{%ordinal5 argument|object argument}4 under ARC">;
+def note_ovl_candidate_bad_addrspace : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1 not viable: "
+ "%select{%ordinal6|'this'}5 argument (%2) is in "
+ "address space %3, but parameter must be in address space %4">;
+def note_ovl_candidate_bad_gc : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1 not viable: "
+ "%select{%ordinal6|'this'}5 argument (%2) has %select{no|__weak|__strong}3 "
+ "ownership, but parameter has %select{no|__weak|__strong}4 ownership">;
+def note_ovl_candidate_bad_ownership : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1 not viable: "
+ "%select{%ordinal6|'this'}5 argument (%2) has "
+ "%select{no|__unsafe_unretained|__strong|__weak|__autoreleasing}3 ownership,"
+ " but parameter has %select{no|__unsafe_unretained|__strong|__weak|"
+ "__autoreleasing}4 ownership">;
+def note_ovl_candidate_bad_cvr_this : Note<"candidate "
+ "%select{|function|||function|||||"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|}0 not viable: "
+ "'this' argument has type %2, but method is not marked "
+ "%select{const|restrict|const or restrict|volatile|const or volatile|"
+ "volatile or restrict|const, volatile, or restrict}3">;
+def note_ovl_candidate_bad_cvr : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1 not viable: "
+ "%ordinal4 argument (%2) would lose "
+ "%select{const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}3 qualifier"
+ "%select{||s||s|s|s}3">;
+def note_ovl_candidate_bad_base_to_derived_conv : Note<"candidate "
+ "%select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0%1"
+ " not viable: cannot %select{convert from|convert from|bind}2 "
+ "%select{base class pointer|superclass|base class object of type}2 %3 to "
+ "%select{derived class pointer|subclass|derived class reference}2 %4 for "
+ "%ordinal5 argument">;
+def note_ovl_candidate_bad_target : Note<
+ "candidate %select{function|function|constructor|"
+ "function |function |constructor |"
+ "constructor (the implicit default constructor)|"
+ "constructor (the implicit copy constructor)|"
+ "constructor (the implicit move constructor)|"
+ "function (the implicit copy assignment operator)|"
+ "function (the implicit move assignment operator)|"
+ "constructor (inherited)}0 not viable: call to "
+ "%select{__device__|__global__|__host__|__host__ __device__}1 function from"
+ " %select{__device__|__global__|__host__|__host__ __device__}2 function">;
+
+def note_ambiguous_type_conversion: Note<
+ "because of ambiguity in conversion of %0 to %1">;
+def note_ovl_builtin_binary_candidate : Note<
+ "built-in candidate %0">;
+def note_ovl_builtin_unary_candidate : Note<
+ "built-in candidate %0">;
+def err_ovl_no_viable_function_in_init : Error<
+ "no matching constructor for initialization of %0">;
+def err_ovl_no_conversion_in_cast : Error<
+ "cannot convert %1 to %2 without a conversion operator">;
+def err_ovl_no_viable_conversion_in_cast : Error<
+ "no matching conversion for %select{|static_cast|reinterpret_cast|"
+ "dynamic_cast|C-style cast|functional-style cast}0 from %1 to %2">;
+def err_ovl_ambiguous_conversion_in_cast : Error<
+ "ambiguous conversion for %select{|static_cast|reinterpret_cast|"
+ "dynamic_cast|C-style cast|functional-style cast}0 from %1 to %2">;
+def err_ovl_deleted_conversion_in_cast : Error<
+ "%select{|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
+ "functional-style cast}0 from %1 to %2 uses deleted function">;
+def err_ovl_ambiguous_init : Error<"call to constructor of %0 is ambiguous">;
+def err_ref_init_ambiguous : Error<
+ "reference initialization of type %0 with initializer of type %1 is ambiguous">;
+def err_ovl_deleted_init : Error<
+ "call to %select{unavailable|deleted}0 constructor of %1">;
+def err_ovl_deleted_special_init : Error<
+ "call to implicitly-deleted %select{default constructor|copy constructor|"
+ "move constructor|copy assignment operator|move assignment operator|"
+ "destructor|function}0 of %1">;
+def err_ovl_ambiguous_oper_unary : Error<
+ "use of overloaded operator '%0' is ambiguous (operand type %1)">;
+def err_ovl_ambiguous_oper_binary : Error<
+ "use of overloaded operator '%0' is ambiguous (with operand types %1 and %2)">;
+def err_ovl_no_viable_oper : Error<"no viable overloaded '%0'">;
+def err_ovl_deleted_oper : Error<
+ "overload resolution selected %select{unavailable|deleted}0 operator '%1'%2">;
+def err_ovl_deleted_special_oper : Error<
+ "overload resolution selected implicitly-deleted %select{default constructor|"
+ "copy constructor|move constructor|copy assignment operator|move assignment "
+ "operator|destructor|'%1'}0%2">;
+def err_ovl_no_viable_subscript :
+ Error<"no viable overloaded operator[] for type %0">;
+def err_ovl_no_oper :
+ Error<"type %0 does not provide a %select{subscript|call}1 operator">;
+def err_ovl_unresolvable : Error<
+ "reference to overloaded function could not be resolved; "
+ "did you mean to call it%select{| with no arguments}0?">;
+def err_bound_member_function : Error<
+ "reference to non-static member function must be called"
+ "%select{|; did you mean to call it with no arguments?}0">;
+def note_possible_target_of_call : Note<"possible target for call">;
+
+def err_ovl_no_viable_object_call : Error<
+ "no matching function for call to object of type %0">;
+def err_ovl_ambiguous_object_call : Error<
+ "call to object of type %0 is ambiguous">;
+def err_ovl_deleted_object_call : Error<
+ "call to %select{unavailable|deleted}0 function call operator in type %1%2">;
+def note_ovl_surrogate_cand : Note<"conversion candidate of type %0">;
+def err_member_call_without_object : Error<
+ "call to non-static member function without an object argument">;
+
+// C++ Address of Overloaded Function
+def err_addr_ovl_no_viable : Error<
+ "address of overloaded function %0 does not match required type %1">;
+def err_addr_ovl_ambiguous : Error<
+ "address of overloaded function %0 is ambiguous">;
+def err_addr_ovl_not_func_ptrref : Error<
+ "address of overloaded function %0 cannot be converted to type %1">;
+def err_addr_ovl_no_qualifier : Error<
+ "can't form member pointer of type %0 without '&' and class name">;
+
+// C++11 Literal Operators
+def err_ovl_no_viable_literal_operator : Error<
+ "no matching literal operator for call to %0"
+ "%select{| with argument of type %2| with arguments of types %2 and %3}1"
+ "%select{| or 'const char *', and no matching literal operator template}4">;
+
+// C++ Template Declarations
+def err_template_param_shadow : Error<
+ "declaration of %0 shadows template parameter">;
+def note_template_param_here : Note<"template parameter is declared here">;
+def warn_template_export_unsupported : Warning<
+ "exported templates are unsupported">;
+def err_template_outside_namespace_or_class_scope : Error<
+ "templates can only be declared in namespace or class scope">;
+def err_template_linkage : Error<"templates must have C++ linkage">;
+def err_template_typedef : Error<"a typedef cannot be a template">;
+def err_template_unnamed_class : Error<
+ "cannot declare a class template with no name">;
+def err_template_param_list_different_arity : Error<
+ "%select{too few|too many}0 template parameters in template "
+ "%select{|template parameter }1redeclaration">;
+def note_template_param_list_different_arity : Note<
+ "%select{too few|too many}0 template parameters in template template "
+ "argument">;
+def note_template_prev_declaration : Note<
+ "previous template %select{declaration|template parameter}0 is here">;
+def err_template_param_different_kind : Error<
+ "template parameter has a different kind in template "
+ "%select{|template parameter }0redeclaration">;
+def note_template_param_different_kind : Note<
+ "template parameter has a different kind in template argument">;
+
+def err_template_nontype_parm_different_type : Error<
+ "template non-type parameter has a different type %0 in template "
+ "%select{|template parameter }1redeclaration">;
+
+def note_template_nontype_parm_different_type : Note<
+ "template non-type parameter has a different type %0 in template argument">;
+def note_template_nontype_parm_prev_declaration : Note<
+ "previous non-type template parameter with type %0 is here">;
+def err_template_nontype_parm_bad_type : Error<
+ "a non-type template parameter cannot have type %0">;
+def err_template_param_default_arg_redefinition : Error<
+ "template parameter redefines default argument">;
+def note_template_param_prev_default_arg : Note<
+ "previous default template argument defined here">;
+def err_template_param_default_arg_missing : Error<
+ "template parameter missing a default argument">;
+def ext_template_parameter_default_in_function_template : ExtWarn<
+ "default template arguments for a function template are a C++11 extension">,
+ InGroup<CXX11>;
+def warn_cxx98_compat_template_parameter_default_in_function_template : Warning<
+ "default template arguments for a function template are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_template_parameter_default_template_member : Error<
+ "cannot add a default template argument to the definition of a member of a "
+ "class template">;
+def err_template_parameter_default_friend_template : Error<
+ "default template argument not permitted on a friend template">;
+def err_template_template_parm_no_parms : Error<
+ "template template parameter must have its own template parameters">;
+
+def err_template_variable : Error<"variable %0 declared as a template">;
+def err_template_variable_noparams : Error<
+ "extraneous 'template<>' in declaration of variable %0">;
+def err_template_member : Error<"member %0 declared as a template">;
+def err_template_member_noparams : Error<
+ "extraneous 'template<>' in declaration of member %0">;
+def err_template_tag_noparams : Error<
+ "extraneous 'template<>' in declaration of %0 %1">;
+def err_template_decl_ref : Error<
+ "cannot refer to class template %0 without a template argument list">;
+
+// C++ Template Argument Lists
+def err_template_missing_args : Error<
+ "use of class template %0 requires template arguments">;
+def err_template_arg_list_different_arity : Error<
+ "%select{too few|too many}0 template arguments for "
+ "%select{class template|function template|template template parameter"
+ "|template}1 %2">;
+def note_template_decl_here : Note<"template is declared here">;
+def note_member_of_template_here : Note<"member is declared here">;
+def err_template_arg_must_be_type : Error<
+ "template argument for template type parameter must be a type">;
+def err_template_arg_must_be_expr : Error<
+ "template argument for non-type template parameter must be an expression">;
+def err_template_arg_nontype_ambig : Error<
+ "template argument for non-type template parameter is treated as type %0">;
+def err_template_arg_must_be_template : Error<
+ "template argument for template template parameter must be a class template%select{| or type alias template}0">;
+def ext_template_arg_local_type : ExtWarn<
+ "template argument uses local type %0">, InGroup<LocalTypeTemplateArgs>;
+def ext_template_arg_unnamed_type : ExtWarn<
+ "template argument uses unnamed type">, InGroup<UnnamedTypeTemplateArgs>;
+def warn_cxx98_compat_template_arg_local_type : Warning<
+ "local type %0 as template argument is incompatible with C++98">,
+ InGroup<CXX98CompatLocalTypeTemplateArgs>, DefaultIgnore;
+def warn_cxx98_compat_template_arg_unnamed_type : Warning<
+ "unnamed type as template argument is incompatible with C++98">,
+ InGroup<CXX98CompatUnnamedTypeTemplateArgs>, DefaultIgnore;
+def note_template_unnamed_type_here : Note<
+ "unnamed type used in template argument was declared here">;
+def err_template_arg_overload_type : Error<
+ "template argument is the type of an unresolved overloaded function">;
+def err_template_arg_not_class_template : Error<
+ "template argument does not refer to a class template or template "
+ "template parameter">;
+def note_template_arg_refers_here_func : Note<
+ "template argument refers to function template %0, here">;
+def err_template_arg_template_params_mismatch : Error<
+ "template template argument has different template parameters than its "
+ "corresponding template template parameter">;
+def err_template_arg_not_integral_or_enumeral : Error<
+ "non-type template argument of type %0 must have an integral or enumeration"
+ " type">;
+def err_template_arg_not_ice : Error<
+ "non-type template argument of type %0 is not an integral constant "
+ "expression">;
+def err_template_arg_not_address_constant : Error<
+ "non-type template argument of type %0 is not a constant expression">;
+def err_template_arg_untyped_null_constant : Error<
+ "null non-type template argument must be cast to template parameter type %0">;
+def err_template_arg_wrongtype_null_constant : Error<
+ "null non-type template argument of type %0 does not match template parameter "
+ "of type %1">;
+def err_deduced_non_type_template_arg_type_mismatch : Error<
+ "deduced non-type template argument does not have the same type as the "
+ "its corresponding template parameter (%0 vs %1)">;
+def err_template_arg_not_convertible : Error<
+ "non-type template argument of type %0 cannot be converted to a value "
+ "of type %1">;
+def warn_template_arg_negative : Warning<
+ "non-type template argument with value '%0' converted to '%1' for unsigned "
+ "template parameter of type %2">, InGroup<Conversion>, DefaultIgnore;
+def warn_template_arg_too_large : Warning<
+ "non-type template argument value '%0' truncated to '%1' for "
+ "template parameter of type %2">, InGroup<Conversion>, DefaultIgnore;
+def err_template_arg_no_ref_bind : Error<
+ "non-type template parameter of reference type %0 cannot bind to template "
+ "argument of type %1">;
+def err_template_arg_ref_bind_ignores_quals : Error<
+ "reference binding of non-type template parameter of type %0 to template "
+ "argument of type %1 ignores qualifiers">;
+def err_template_arg_not_decl_ref : Error<
+ "non-type template argument does not refer to any declaration">;
+def err_template_arg_not_object_or_func_form : Error<
+ "non-type template argument does not directly refer to an object or "
+ "function">;
+def err_template_arg_not_address_of : Error<
+ "non-type template argument for template parameter of pointer type %0 must "
+ "have its address taken">;
+def err_template_arg_address_of_non_pointer : Error<
+ "address taken in non-type template argument for template parameter of "
+ "reference type %0">;
+def err_template_arg_reference_var : Error<
+ "non-type template argument of reference type %0 is not an object">;
+def err_template_arg_field : Error<
+ "non-type template argument refers to non-static data member %0">;
+def err_template_arg_method : Error<
+ "non-type template argument refers to non-static member function %0">;
+def err_template_arg_object_no_linkage : Error<
+ "non-type template argument refers to %select{function|object}0 %1 that "
+ "does not have linkage">;
+def warn_cxx98_compat_template_arg_object_internal : Warning<
+ "non-type template argument referring to %select{function|object}0 %1 with "
+ "internal linkage is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def ext_template_arg_object_internal : ExtWarn<
+ "non-type template argument referring to %select{function|object}0 %1 with "
+ "internal linkage is a C++11 extension">, InGroup<CXX11>;
+def err_template_arg_thread_local : Error<
+ "non-type template argument refers to thread-local object">;
+def note_template_arg_internal_object : Note<
+ "non-type template argument refers to %select{function|object}0 here">;
+def note_template_arg_refers_here : Note<
+ "non-type template argument refers here">;
+def err_template_arg_not_object_or_func : Error<
+ "non-type template argument does not refer to an object or function">;
+def err_template_arg_not_pointer_to_member_form : Error<
+ "non-type template argument is not a pointer to member constant">;
+def ext_template_arg_extra_parens : ExtWarn<
+ "address non-type template argument cannot be surrounded by parentheses">;
+def warn_cxx98_compat_template_arg_extra_parens : Warning<
+ "redundant parentheses surrounding address non-type template argument are "
+ "incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def err_pointer_to_member_type : Error<
+ "invalid use of pointer to member type after %select{.*|->*}0">;
+def err_pointer_to_member_call_drops_quals : Error<
+ "call to pointer to member function of type %0 drops '%1' qualifier%s2">;
+def err_pointer_to_member_oper_value_classify: Error<
+ "pointer-to-member function type %0 can only be called on an "
+ "%select{rvalue|lvalue}1">;
+
+// C++ template specialization
+def err_template_spec_unknown_kind : Error<
+ "can only provide an explicit specialization for a class template, function "
+ "template, or a member function, static data member, "
+ "%select{or member class|member class, or member enumeration}0 of a "
+ "class template">;
+def note_specialized_entity : Note<
+ "explicitly specialized declaration is here">;
+def err_template_spec_decl_function_scope : Error<
+ "explicit specialization of %0 in function scope">;
+def err_template_spec_decl_class_scope : Error<
+ "explicit specialization of %0 in class scope">;
+def err_template_spec_decl_friend : Error<
+ "cannot declare an explicit specialization in a friend">;
+def err_template_spec_decl_out_of_scope_global : Error<
+ "%select{class template|class template partial|function template|member "
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 must originally be declared in the global scope">;
+def err_template_spec_decl_out_of_scope : Error<
+ "%select{class template|class template partial|function template|member "
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 must originally be declared in namespace %2">;
+def ext_template_spec_decl_out_of_scope : ExtWarn<
+ "first declaration of %select{class template|class template partial|"
+ "function template|member function|static data member|member class|"
+ "member enumeration}0 specialization of %1 outside namespace %2 is a "
+ "C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_template_spec_decl_out_of_scope : Warning<
+ "%select{class template|class template partial|function template|member "
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 outside namespace %2 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_template_spec_redecl_out_of_scope : Error<
+ "%select{class template|class template partial|function template|member "
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 not in a namespace enclosing %2">;
+def err_template_spec_redecl_global_scope : Error<
+ "%select{class template|class template partial|function template|member "
+ "function|static data member|member class|member enumeration}0 "
+ "specialization of %1 must occur at global scope">;
+def err_spec_member_not_instantiated : Error<
+ "specialization of member %q0 does not specialize an instantiated member">;
+def note_specialized_decl : Note<"attempt to specialize declaration here">;
+def err_specialization_after_instantiation : Error<
+ "explicit specialization of %0 after instantiation">;
+def note_instantiation_required_here : Note<
+ "%select{implicit|explicit}0 instantiation first required here">;
+def err_template_spec_friend : Error<
+ "template specialization declaration cannot be a friend">;
+def err_template_spec_default_arg : Error<
+ "default argument not permitted on an explicit "
+ "%select{instantiation|specialization}0 of function %1">;
+def err_not_class_template_specialization : Error<
+ "cannot specialize a %select{dependent template|template template "
+ "parameter}0">;
+def err_function_specialization_in_class : Error<
+ "cannot specialize a function %0 within class scope">;
+def ext_function_specialization_in_class : ExtWarn<
+ "explicit specialization of %0 within class scope is a Microsoft extension">,
+ InGroup<Microsoft>;
+def ext_explicit_specialization_storage_class : ExtWarn<
+ "explicit specialization cannot have a storage class">;
+def err_explicit_specialization_inconsistent_storage_class : Error<
+ "explicit specialization has extraneous, inconsistent storage class "
+ "'%select{none|extern|static|__private_extern__|auto|register}0'">;
+
+// C++ class template specializations and out-of-line definitions
+def err_template_spec_needs_header : Error<
+ "template specialization requires 'template<>'">;
+def err_template_spec_needs_template_parameters : Error<
+ "template specialization or definition requires a template parameter list "
+ "corresponding to the nested type %0">;
+def err_template_param_list_matches_nontemplate : Error<
+ "template parameter list matching the non-templated nested type %0 should "
+ "be empty ('template<>')">;
+def err_alias_template_extra_headers : Error<
+ "extraneous template parameter list in alias template declaration">;
+def err_template_spec_extra_headers : Error<
+ "extraneous template parameter list in template specialization or "
+ "out-of-line template definition">;
+def warn_template_spec_extra_headers : Warning<
+ "extraneous template parameter list in template specialization">;
+def note_explicit_template_spec_does_not_need_header : Note<
+ "'template<>' header not required for explicitly-specialized class %0 "
+ "declared here">;
+def err_template_qualified_declarator_no_match : Error<
+ "nested name specifier '%0' for declaration does not refer into a class, "
+ "class template or class template partial specialization">;
+def err_specialize_member_of_template : Error<
+ "cannot specialize (with 'template<>') a member of an unspecialized "
+ "template">;
+
+// C++ Class Template Partial Specialization
+def err_default_arg_in_partial_spec : Error<
+ "default template argument in a class template partial specialization">;
+def err_dependent_non_type_arg_in_partial_spec : Error<
+ "non-type template argument depends on a template parameter of the "
+ "partial specialization">;
+def err_dependent_typed_non_type_arg_in_partial_spec : Error<
+ "non-type template argument specializes a template parameter with "
+ "dependent type %0">;
+def err_partial_spec_args_match_primary_template : Error<
+ "class template partial specialization does not specialize any template "
+ "argument; to %select{declare|define}0 the primary template, remove the "
+ "template argument list">;
+def warn_partial_specs_not_deducible : Warning<
+ "class template partial specialization contains "
+ "%select{a template parameter|template parameters}0 that can not be "
+ "deduced; this partial specialization will never be used">;
+def note_partial_spec_unused_parameter : Note<
+ "non-deducible template parameter %0">;
+def err_partial_spec_ordering_ambiguous : Error<
+ "ambiguous partial specializations of %0">;
+def note_partial_spec_match : Note<"partial specialization matches %0">;
+def err_partial_spec_redeclared : Error<
+ "class template partial specialization %0 cannot be redeclared">;
+def note_prev_partial_spec_here : Note<
+ "previous declaration of class template partial specialization %0 is here">;
+def err_partial_spec_fully_specialized : Error<
+ "partial specialization of %0 does not use any of its template parameters">;
+
+// C++ Function template specializations
+def err_function_template_spec_no_match : Error<
+ "no function template matches function template specialization %0">;
+def err_function_template_spec_ambiguous : Error<
+ "function template specialization %0 ambiguously refers to more than one "
+ "function template; explicitly specify%select{| additional}1 template "
+ "arguments to identify a particular function template">;
+def note_function_template_spec_matched : Note<
+ "function template matches specialization %0">;
+def err_function_template_partial_spec : Error<
+ "function template partial specialization is not allowed">;
+
+// C++ Template Instantiation
+def err_template_recursion_depth_exceeded : Error<
+ "recursive template instantiation exceeded maximum depth of %0">,
+ DefaultFatal, NoSFINAE;
+def note_template_recursion_depth : Note<
+ "use -ftemplate-depth=N to increase recursive template instantiation depth">;
+
+def err_template_instantiate_within_definition : Error<
+ "%select{implicit|explicit}0 instantiation of template %1 within its"
+ " own definition">;
+def err_template_instantiate_undefined : Error<
+ "%select{implicit|explicit}0 instantiation of undefined template %1">;
+def err_implicit_instantiate_member_undefined : Error<
+ "implicit instantiation of undefined member %0">;
+def note_template_class_instantiation_here : Note<
+ "in instantiation of template class %0 requested here">;
+def note_template_member_class_here : Note<
+ "in instantiation of member class %0 requested here">;
+def note_template_member_function_here : Note<
+ "in instantiation of member function %q0 requested here">;
+def note_function_template_spec_here : Note<
+ "in instantiation of function template specialization %q0 requested here">;
+def note_template_static_data_member_def_here : Note<
+ "in instantiation of static data member %q0 requested here">;
+def note_template_enum_def_here : Note<
+ "in instantiation of enumeration %q0 requested here">;
+def note_template_type_alias_instantiation_here : Note<
+ "in instantiation of template type alias %0 requested here">;
+
+def note_default_arg_instantiation_here : Note<
+ "in instantiation of default argument for '%0' required here">;
+def note_default_function_arg_instantiation_here : Note<
+ "in instantiation of default function argument expression "
+ "for '%0' required here">;
+def note_explicit_template_arg_substitution_here : Note<
+ "while substituting explicitly-specified template arguments into function "
+ "template %0 %1">;
+def note_function_template_deduction_instantiation_here : Note<
+ "while substituting deduced template arguments into function template %0 "
+ "%1">;
+def note_partial_spec_deduct_instantiation_here : Note<
+ "during template argument deduction for class template partial "
+ "specialization %0 %1">;
+def note_prior_template_arg_substitution : Note<
+ "while substituting prior template arguments into %select{non-type|template}0"
+ " template parameter%1 %2">;
+def note_template_default_arg_checking : Note<
+ "while checking a default template argument used here">;
+def note_instantiation_contexts_suppressed : Note<
+ "(skipping %0 context%s0 in backtrace; use -ftemplate-backtrace-limit=0 to "
+ "see all)">;
+
+def err_field_instantiates_to_function : Error<
+ "data member instantiated with function type %0">;
+def err_variable_instantiates_to_function : Error<
+ "%select{variable|static data member}0 instantiated with function type %1">;
+def err_nested_name_spec_non_tag : Error<
+ "type %0 cannot be used prior to '::' because it has no members">;
+
+// C++ Explicit Instantiation
+def err_explicit_instantiation_duplicate : Error<
+ "duplicate explicit instantiation of %0">;
+def note_previous_explicit_instantiation : Note<
+ "previous explicit instantiation is here">;
+def ext_explicit_instantiation_after_specialization : Extension<
+ "explicit instantiation of %0 that occurs after an explicit "
+ "specialization will be ignored (C++11 extension)">,
+ InGroup<CXX11>;
+def warn_cxx98_compat_explicit_instantiation_after_specialization : Warning<
+ "explicit instantiation of %0 that occurs after an explicit "
+ "specialization is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def note_previous_template_specialization : Note<
+ "previous template specialization is here">;
+def err_explicit_instantiation_nontemplate_type : Error<
+ "explicit instantiation of non-templated type %0">;
+def note_nontemplate_decl_here : Note<
+ "non-templated declaration is here">;
+def err_explicit_instantiation_in_class : Error<
+ "explicit instantiation of %0 in class scope">;
+def err_explicit_instantiation_out_of_scope : Error<
+ "explicit instantiation of %0 not in a namespace enclosing %1">;
+def err_explicit_instantiation_must_be_global : Error<
+ "explicit instantiation of %0 must occur at global scope">;
+def warn_explicit_instantiation_out_of_scope_0x : Warning<
+ "explicit instantiation of %0 not in a namespace enclosing %1">,
+ InGroup<CXX11Compat>, DefaultIgnore;
+def warn_explicit_instantiation_must_be_global_0x : Warning<
+ "explicit instantiation of %0 must occur at global scope">,
+ InGroup<CXX11Compat>, DefaultIgnore;
+
+def err_explicit_instantiation_requires_name : Error<
+ "explicit instantiation declaration requires a name">;
+def err_explicit_instantiation_of_typedef : Error<
+ "explicit instantiation of typedef %0">;
+def err_explicit_instantiation_storage_class : Error<
+ "explicit instantiation cannot have a storage class">;
+def err_explicit_instantiation_not_known : Error<
+ "explicit instantiation of %0 does not refer to a function template, member "
+ "function, member class, or static data member">;
+def note_explicit_instantiation_here : Note<
+ "explicit instantiation refers here">;
+def err_explicit_instantiation_data_member_not_instantiated : Error<
+ "explicit instantiation refers to static data member %q0 that is not an "
+ "instantiation">;
+def err_explicit_instantiation_member_function_not_instantiated : Error<
+ "explicit instantiation refers to member function %q0 that is not an "
+ "instantiation">;
+def err_explicit_instantiation_ambiguous : Error<
+ "partial ordering for explicit instantiation of %0 is ambiguous">;
+def note_explicit_instantiation_candidate : Note<
+ "explicit instantiation candidate function template here %0">;
+def err_explicit_instantiation_inline : Error<
+ "explicit instantiation cannot be 'inline'">;
+def warn_explicit_instantiation_inline_0x : Warning<
+ "explicit instantiation cannot be 'inline'">, InGroup<CXX11Compat>,
+ DefaultIgnore;
+def err_explicit_instantiation_constexpr : Error<
+ "explicit instantiation cannot be 'constexpr'">;
+def ext_explicit_instantiation_without_qualified_id : Extension<
+ "qualifier in explicit instantiation of %q0 requires a template-id "
+ "(a typedef is not permitted)">;
+def err_explicit_instantiation_unqualified_wrong_namespace : Error<
+ "explicit instantiation of %q0 must occur in namespace %1">;
+def warn_explicit_instantiation_unqualified_wrong_namespace_0x : Warning<
+ "explicit instantiation of %q0 must occur in namespace %1">,
+ InGroup<CXX11Compat>, DefaultIgnore;
+def err_explicit_instantiation_undefined_member : Error<
+ "explicit instantiation of undefined %select{member class|member function|"
+ "static data member}0 %1 of class template %2">;
+def err_explicit_instantiation_undefined_func_template : Error<
+ "explicit instantiation of undefined function template %0">;
+def err_explicit_instantiation_declaration_after_definition : Error<
+ "explicit instantiation declaration (with 'extern') follows explicit "
+ "instantiation definition (without 'extern')">;
+def note_explicit_instantiation_definition_here : Note<
+ "explicit instantiation definition is here">;
+
+// C++ typename-specifiers
+def err_typename_nested_not_found : Error<"no type named %0 in %1">;
+def err_typename_nested_not_type : Error<
+ "typename specifier refers to non-type member %0 in %1">;
+def note_typename_refers_here : Note<
+ "referenced member %0 is declared here">;
+def err_typename_missing : Error<
+ "missing 'typename' prior to dependent type name '%0%1'">;
+def warn_typename_missing : ExtWarn<
+ "missing 'typename' prior to dependent type name '%0%1'">,
+ InGroup<DiagGroup<"typename-missing">>;
+def ext_typename_outside_of_template : ExtWarn<
+ "'typename' occurs outside of a template">, InGroup<CXX11>;
+def warn_cxx98_compat_typename_outside_of_template : Warning<
+ "use of 'typename' outside of a template is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_typename_refers_to_using_value_decl : Error<
+ "typename specifier refers to a dependent using declaration for a value "
+ "%0 in %1">;
+def note_using_value_decl_missing_typename : Note<
+ "add 'typename' to treat this using declaration as a type">;
+
+def err_template_kw_refers_to_non_template : Error<
+ "%0 following the 'template' keyword does not refer to a template">;
+def err_template_kw_refers_to_class_template : Error<
+ "'%0%1' instantiated to a class template, not a function template">;
+def note_referenced_class_template : Error<
+ "class template declared here">;
+def err_template_kw_missing : Error<
+ "missing 'template' keyword prior to dependent template name '%0%1'">;
+def ext_template_outside_of_template : ExtWarn<
+ "'template' keyword outside of a template">, InGroup<CXX11>;
+def warn_cxx98_compat_template_outside_of_template : Warning<
+ "use of 'template' keyword outside of a template is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+def err_non_type_template_in_nested_name_specifier : Error<
+ "qualified name refers into a specialization of function template '%0'">;
+def err_template_id_not_a_type : Error<
+ "template name refers to non-type template '%0'">;
+def note_template_declared_here : Note<
+ "%select{function template|class template|type alias template|template template parameter}0 "
+ "%1 declared here">;
+def note_parameter_type : Note<
+ "parameter of type %0 is declared here">;
+
+// C++11 Variadic Templates
+def err_template_param_pack_default_arg : Error<
+ "template parameter pack cannot have a default argument">;
+def err_template_param_pack_must_be_last_template_parameter : Error<
+ "template parameter pack must be the last template parameter">;
+
+def err_template_parameter_pack_non_pack : Error<
+ "%select{template type|non-type template|template template}0 parameter"
+ "%select{| pack}1 conflicts with previous %select{template type|"
+ "non-type template|template template}0 parameter%select{ pack|}1">;
+def note_template_parameter_pack_non_pack : Note<
+ "%select{template type|non-type template|template template}0 parameter"
+ "%select{| pack}1 does not match %select{template type|non-type template"
+ "|template template}0 parameter%select{ pack|}1 in template argument">;
+def note_template_parameter_pack_here : Note<
+ "previous %select{template type|non-type template|template template}0 "
+ "parameter%select{| pack}1 declared here">;
+
+def err_unexpanded_parameter_pack_0 : Error<
+ "%select{expression|base type|declaration type|data member type|bit-field "
+ "size|static assertion|fixed underlying type|enumerator value|"
+ "using declaration|friend declaration|qualifier|initializer|default argument|"
+ "non-type template parameter type|exception type|partial specialization|"
+ "__if_exists name|__if_not_exists name}0 "
+ "contains an unexpanded parameter pack">;
+def err_unexpanded_parameter_pack_1 : Error<
+ "%select{expression|base type|declaration type|data member type|bit-field "
+ "size|static assertion|fixed underlying type|enumerator value|"
+ "using declaration|friend declaration|qualifier|initializer|default argument|"
+ "non-type template parameter type|exception type|partial specialization|"
+ "__if_exists name|__if_not_exists name}0 "
+ "contains unexpanded parameter pack %1">;
+def err_unexpanded_parameter_pack_2 : Error<
+ "%select{expression|base type|declaration type|data member type|bit-field "
+ "size|static assertion|fixed underlying type|enumerator value|"
+ "using declaration|friend declaration|qualifier|initializer|default argument|"
+ "non-type template parameter type|exception type|partial specialization|"
+ "__if_exists name|__if_not_exists name}0 "
+ "contains unexpanded parameter packs %1 and %2">;
+def err_unexpanded_parameter_pack_3_or_more : Error<
+ "%select{expression|base type|declaration type|data member type|bit-field "
+ "size|static assertion|fixed underlying type|enumerator value|"
+ "using declaration|friend declaration|qualifier|initializer|default argument|"
+ "non-type template parameter type|exception type|partial specialization|"
+ "__if_exists name|__if_not_exists name}0 "
+ "contains unexpanded parameter packs %1, %2, ...">;
+
+def err_pack_expansion_without_parameter_packs : Error<
+ "pack expansion does not contain any unexpanded parameter packs">;
+def err_pack_expansion_length_conflict : Error<
+ "pack expansion contains parameter packs %0 and %1 that have different "
+ "lengths (%2 vs. %3)">;
+def err_pack_expansion_length_conflict_multilevel : Error<
+ "pack expansion contains parameter pack %0 that has a different "
+ "length (%1 vs. %2) from outer parameter packs">;
+def err_pack_expansion_member_init : Error<
+ "pack expansion for initialization of member %0">;
+
+def err_function_parameter_pack_without_parameter_packs : Error<
+ "type %0 of function parameter pack does not contain any unexpanded "
+ "parameter packs">;
+def err_ellipsis_in_declarator_not_parameter : Error<
+ "only function and template parameters can be parameter packs">;
+
+def err_sizeof_pack_no_pack_name : Error<
+ "%0 does not refer to the name of a parameter pack">;
+
+def err_unexpected_typedef : Error<
+ "unexpected type name %0: expected expression">;
+def err_unexpected_namespace : Error<
+ "unexpected namespace name %0: expected expression">;
+def err_undeclared_var_use : Error<"use of undeclared identifier %0">;
+def warn_found_via_dependent_bases_lookup : ExtWarn<"use of identifier %0 "
+ "found via unqualified lookup into dependent bases of class templates is a "
+ "Microsoft extension">, InGroup<Microsoft>;
+def note_dependent_var_use : Note<"must qualify identifier to find this "
+ "declaration in dependent base class">;
+def err_not_found_by_two_phase_lookup : Error<"call to function %0 that is neither "
+ "visible in the template definition nor found by argument-dependent lookup">;
+def note_not_found_by_two_phase_lookup : Note<"%0 should be declared prior to the "
+ "call site%select{| or in %2| or in an associated namespace of one of its arguments}1">;
+def err_undeclared_use : Error<"use of undeclared %0">;
+def warn_deprecated : Warning<"%0 is deprecated">,
+ InGroup<DeprecatedDeclarations>;
+def warn_deprecated_message : Warning<"%0 is deprecated: %1">,
+ InGroup<DeprecatedDeclarations>;
+def warn_deprecated_fwdclass_message : Warning<
+ "%0 maybe deprecated because receiver type is unknown">,
+ InGroup<DeprecatedDeclarations>;
+def warn_deprecated_def : Warning<
+ "Implementing deprecated %select{method|class|category}0">,
+ InGroup<DeprecatedImplementations>, DefaultIgnore;
+def err_unavailable : Error<"%0 is unavailable">;
+def err_unavailable_message : Error<"%0 is unavailable: %1">;
+def warn_unavailable_fwdclass_message : Warning<
+ "%0 maybe unavailable because receiver type is unknown">;
+def note_unavailable_here : Note<
+ "%select{declaration|function}0 has been explicitly marked "
+ "%select{unavailable|deleted|deprecated}1 here">;
+def note_implicitly_deleted : Note<
+ "explicitly defaulted function was implicitly deleted here">;
+def warn_not_enough_argument : Warning<
+ "not enough variable arguments in %0 declaration to fit a sentinel">,
+ InGroup<Sentinel>;
+def warn_missing_sentinel : Warning <
+ "missing sentinel in %select{function call|method dispatch|block call}0">,
+ InGroup<Sentinel>;
+def note_sentinel_here : Note<
+ "%select{function|method|block}0 has been explicitly marked sentinel here">;
+def warn_missing_prototype : Warning<
+ "no previous prototype for function %0">,
+ InGroup<DiagGroup<"missing-prototypes">>, DefaultIgnore;
+def err_redefinition : Error<"redefinition of %0">;
+def err_definition_of_implicitly_declared_member : Error<
+ "definition of implicitly declared %select{default constructor|copy "
+ "constructor|move constructor|copy assignment operator|move assignment "
+ "operator|destructor}1">;
+def err_definition_of_explicitly_defaulted_member : Error<
+ "definition of explicitly defaulted %select{default constructor|copy "
+ "constructor|move constructor|copy assignment operator|move assignment "
+ "operator|destructor}0">;
+def err_redefinition_extern_inline : Error<
+ "redefinition of a 'extern inline' function %0 is not supported in "
+ "%select{C99 mode|C++}1">;
+def warn_cxx98_compat_friend_redefinition : Warning<
+ "friend function %0 would be implicitly redefined in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+def note_deleted_dtor_no_operator_delete : Note<
+ "virtual destructor requires an unambiguous, accessible 'operator delete'">;
+def note_deleted_special_member_class_subobject : Note<
+ "%select{default constructor|copy constructor|move constructor|"
+ "copy assignment operator|move assignment operator|destructor}0 of "
+ "%select{||||union }4%1 is implicitly deleted because "
+ "%select{base class %3|field %3}2 has "
+ "%select{no|a deleted|multiple|an inaccessible|a non-trivial}4 "
+ "%select{%select{default constructor|copy constructor|move constructor|copy "
+ "assignment operator|move assignment operator|destructor}0|destructor}5"
+ "%select{||s||}4">;
+def note_deleted_default_ctor_uninit_field : Note<
+ "default constructor of %0 is implicitly deleted because field %1 of "
+ "%select{reference|const-qualified}3 type %2 would not be initialized">;
+def note_deleted_default_ctor_all_const : Note<
+ "default constructor of %0 is implicitly deleted because all "
+ "%select{data members|data members of an anonymous union member}1"
+ " are const-qualified">;
+def note_deleted_copy_ctor_rvalue_reference : Note<
+ "copy constructor of %0 is implicitly deleted because field %1 is of "
+ "rvalue reference type %2">;
+def note_deleted_copy_user_declared_move : Note<
+ "copy %select{constructor|assignment operator}0 is implicitly deleted because"
+ " %1 has a user-declared move %select{constructor|assignment operator}2">;
+def note_deleted_assign_field : Note<
+ "%select{copy|move}0 assignment operator of %0 is implicitly deleted "
+ "because field %1 is of %select{reference|const-qualified}3 type %2">;
+
+// This should eventually be an error.
+def warn_undefined_internal : Warning<
+ "%select{function|variable}0 %q1 has internal linkage but is not defined">,
+ DiagGroup<"undefined-internal">;
+def note_used_here : Note<"used here">;
+
+def warn_redefinition_of_typedef : ExtWarn<
+ "redefinition of typedef %0 is a C11 feature">,
+ InGroup<DiagGroup<"typedef-redefinition"> >;
+def err_redefinition_variably_modified_typedef : Error<
+ "redefinition of %select{typedef|type alias}0 for variably-modified type %1">;
+
+def err_inline_declaration_block_scope : Error<
+ "inline declaration of %0 not allowed in block scope">;
+def err_static_non_static : Error<
+ "static declaration of %0 follows non-static declaration">;
+def warn_weak_import : Warning <
+ "an already-declared variable is made a weak_import declaration %0">;
+def warn_static_non_static : ExtWarn<
+ "static declaration of %0 follows non-static declaration">;
+def err_non_static_static : Error<
+ "non-static declaration of %0 follows static declaration">;
+def err_extern_non_extern : Error<
+ "extern declaration of %0 follows non-extern declaration">;
+def err_non_extern_extern : Error<
+ "non-extern declaration of %0 follows extern declaration">;
+def err_non_thread_thread : Error<
+ "non-thread-local declaration of %0 follows thread-local declaration">;
+def err_thread_non_thread : Error<
+ "thread-local declaration of %0 follows non-thread-local declaration">;
+def err_redefinition_different_type : Error<
+ "redefinition of %0 with a different type">;
+def err_redefinition_different_kind : Error<
+ "redefinition of %0 as different kind of symbol">;
+def warn_forward_class_redefinition : Warning<
+ "redefinition of forward class %0 of a typedef name of an object type is ignored">,
+ InGroup<DiagGroup<"objc-forward-class-redefinition">>;
+def err_redefinition_different_typedef : Error<
+ "%select{typedef|type alias|type alias template}0 redefinition with different types (%1 vs %2)">;
+def err_tag_reference_non_tag : Error<
+ "elaborated type refers to %select{a non-tag type|a typedef|a type alias|a template|a type alias template}0">;
+def err_tag_reference_conflict : Error<
+ "implicit declaration introduced by elaborated type conflicts with "
+ "%select{a declaration|a typedef|a type alias|a template}0 of the same name">;
+def err_dependent_tag_decl : Error<
+ "%select{declaration|definition}0 of %select{struct|union|class|enum}1 "
+ "in a dependent scope">;
+def err_tag_definition_of_typedef : Error<
+ "definition of type %0 conflicts with %select{typedef|type alias}1 of the same name">;
+def err_conflicting_types : Error<"conflicting types for %0">;
+def err_nested_redefinition : Error<"nested redefinition of %0">;
+def err_use_with_wrong_tag : Error<
+ "use of %0 with tag type that does not match previous declaration">;
+def warn_struct_class_tag_mismatch : Warning<
+ "%select{struct|class}0%select{| template}1 %2 was previously declared "
+ "as a %select{class|struct}0%select{| template}1">,
+ InGroup<MismatchedTags>, DefaultIgnore;
+def warn_struct_class_previous_tag_mismatch : Warning<
+ "%2 defined as a %select{struct|class}0%select{| template}1 here but "
+ "previously declared as a %select{class|struct}0%select{| template}1">,
+ InGroup<MismatchedTags>, DefaultIgnore;
+def note_struct_class_suggestion : Note<
+ "did you mean %select{struct|class}0 here?">;
+def ext_forward_ref_enum : Extension<
+ "ISO C forbids forward references to 'enum' types">;
+def err_forward_ref_enum : Error<
+ "ISO C++ forbids forward references to 'enum' types">;
+def ext_ms_forward_ref_enum : Extension<
+ "forward references to 'enum' types are a Microsoft extension">, InGroup<Microsoft>;
+def ext_forward_ref_enum_def : Extension<
+ "redeclaration of already-defined enum %0 is a GNU extension">, InGroup<GNU>;
+
+def err_redefinition_of_enumerator : Error<"redefinition of enumerator %0">;
+def err_duplicate_member : Error<"duplicate member %0">;
+def err_misplaced_ivar : Error<
+ "ivars may not be placed in %select{categories|class extension}0">;
+def ext_enum_value_not_int : Extension<
+ "ISO C restricts enumerator values to range of 'int' (%0 is too "
+ "%select{small|large}1)">;
+def warn_enum_too_large : Warning<
+ "enumeration values exceed range of largest integer">;
+def warn_enumerator_too_large : Warning<
+ "enumerator value %0 is not representable in the largest integer type">;
+
+def warn_illegal_constant_array_size : Extension<
+ "size of static array must be an integer constant expression">;
+def err_vm_decl_in_file_scope : Error<
+ "variably modified type declaration not allowed at file scope">;
+def err_vm_decl_has_extern_linkage : Error<
+ "variably modified type declaration can not have 'extern' linkage">;
+def err_typecheck_field_variable_size : Error<
+ "fields must have a constant size: 'variable length array in structure' "
+ "extension will never be supported">;
+def err_vm_func_decl : Error<
+ "function declaration cannot have variably modified type">;
+def err_array_too_large : Error<
+ "array is too large (%0 elements)">;
+def warn_array_new_too_large : Warning<"array is too large (%0 elements)">,
+ // FIXME PR11644: ", will throw std::bad_array_new_length at runtime"
+ InGroup<DiagGroup<"bad-array-new-length">>;
+
+// -Wpadded, -Wpacked
+def warn_padded_struct_field : Warning<
+ "padding %select{struct|class}0 %1 with %2 %select{byte|bit}3%select{|s}4 "
+ "to align %5">, InGroup<Padded>, DefaultIgnore;
+def warn_padded_struct_anon_field : Warning<
+ "padding %select{struct|class}0 %1 with %2 %select{byte|bit}3%select{|s}4 "
+ "to align anonymous bit-field">, InGroup<Padded>, DefaultIgnore;
+def warn_padded_struct_size : Warning<
+ "padding size of %0 with %1 %select{byte|bit}2%select{|s}3 "
+ "to alignment boundary">, InGroup<Padded>, DefaultIgnore;
+def warn_unnecessary_packed : Warning<
+ "packed attribute is unnecessary for %0">, InGroup<Packed>, DefaultIgnore;
+
+def err_typecheck_negative_array_size : Error<"array size is negative">;
+def warn_typecheck_negative_array_new_size : Warning<"array size is negative">,
+ // FIXME PR11644: ", will throw std::bad_array_new_length at runtime"
+ InGroup<DiagGroup<"bad-array-new-length">>;
+def warn_typecheck_function_qualifiers : Warning<
+ "qualifier on function type %0 has unspecified behavior">;
+def err_typecheck_invalid_restrict_not_pointer : Error<
+ "restrict requires a pointer or reference (%0 is invalid)">;
+def err_typecheck_invalid_restrict_not_pointer_noarg : Error<
+ "restrict requires a pointer or reference">;
+def err_typecheck_invalid_restrict_invalid_pointee : Error<
+ "pointer to function type %0 may not be 'restrict' qualified">;
+def ext_typecheck_zero_array_size : Extension<
+ "zero size arrays are an extension">;
+def err_typecheck_zero_array_size : Error<
+ "zero-length arrays are not permitted in C++">;
+def warn_typecheck_zero_static_array_size : Warning<
+ "'static' has no effect on zero-length arrays">,
+ InGroup<DiagGroup<"array-bounds">>;
+def err_array_size_non_int : Error<"size of array has non-integer type %0">;
+def err_init_element_not_constant : Error<
+ "initializer element is not a compile-time constant">;
+def err_local_cant_init : Error<
+ "'__local' variable cannot have an initializer">;
+def err_block_extern_cant_init : Error<
+ "'extern' variable cannot have an initializer">;
+def warn_extern_init : Warning<"'extern' variable has an initializer">;
+def err_variable_object_no_init : Error<
+ "variable-sized object may not be initialized">;
+def err_excess_initializers : Error<
+ "excess elements in %select{array|vector|scalar|union|struct}0 initializer">;
+def warn_excess_initializers : ExtWarn<
+ "excess elements in %select{array|vector|scalar|union|struct}0 initializer">;
+def err_excess_initializers_in_char_array_initializer : Error<
+ "excess elements in char array initializer">;
+def warn_excess_initializers_in_char_array_initializer : ExtWarn<
+ "excess elements in char array initializer">;
+def err_initializer_string_for_char_array_too_long : Error<
+ "initializer-string for char array is too long">;
+def warn_initializer_string_for_char_array_too_long : ExtWarn<
+ "initializer-string for char array is too long">;
+def warn_missing_field_initializers : Warning<
+ "missing field '%0' initializer">,
+ InGroup<MissingFieldInitializers>, DefaultIgnore;
+def warn_braces_around_scalar_init : Warning<
+ "braces around scalar initializer">;
+def warn_many_braces_around_scalar_init : ExtWarn<
+ "too many braces around scalar initializer">;
+def ext_complex_component_init : Extension<
+ "complex initialization specifying real and imaginary components "
+ "is an extension">, InGroup<DiagGroup<"complex-component-init">>;
+def err_empty_scalar_initializer : Error<"scalar initializer cannot be empty">;
+def warn_cxx98_compat_empty_scalar_initializer : Warning<
+ "scalar initialized from empty initializer list is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_illegal_initializer : Error<
+ "illegal initializer (only variables can be initialized)">;
+def err_illegal_initializer_type : Error<"illegal initializer type %0">;
+def err_init_list_type_narrowing_sfinae : Error<
+ "type %0 cannot be narrowed to %1 in initializer list">;
+def err_init_list_type_narrowing : ExtWarn<
+ "type %0 cannot be narrowed to %1 in initializer list">,
+ InGroup<CXX11Narrowing>, DefaultError;
+def err_init_list_variable_narrowing_sfinae : Error<
+ "non-constant-expression cannot be narrowed from type %0 to %1 in "
+ "initializer list">;
+def err_init_list_variable_narrowing : ExtWarn<
+ "non-constant-expression cannot be narrowed from type %0 to %1 in "
+ "initializer list">, InGroup<CXX11Narrowing>, DefaultError;
+def err_init_list_constant_narrowing_sfinae : Error<
+ "constant expression evaluates to %0 which cannot be narrowed to type %1">;
+def err_init_list_constant_narrowing : ExtWarn<
+ "constant expression evaluates to %0 which cannot be narrowed to type %1">,
+ InGroup<CXX11Narrowing>, DefaultError;
+def warn_init_list_type_narrowing : Warning<
+ "type %0 cannot be narrowed to %1 in initializer list in C++11">,
+ InGroup<CXX11Narrowing>, DefaultIgnore;
+def warn_init_list_variable_narrowing : Warning<
+ "non-constant-expression cannot be narrowed from type %0 to %1 in "
+ "initializer list in C++11">,
+ InGroup<CXX11Narrowing>, DefaultIgnore;
+def warn_init_list_constant_narrowing : Warning<
+ "constant expression evaluates to %0 which cannot be narrowed to type %1 in "
+ "C++11">,
+ InGroup<CXX11Narrowing>, DefaultIgnore;
+def note_init_list_narrowing_override : Note<
+ "override this message by inserting an explicit cast">;
+def err_init_objc_class : Error<
+ "cannot initialize Objective-C class type %0">;
+def err_implicit_empty_initializer : Error<
+ "initializer for aggregate with no elements requires explicit braces">;
+def err_bitfield_has_negative_width : Error<
+ "bit-field %0 has negative width (%1)">;
+def err_anon_bitfield_has_negative_width : Error<
+ "anonymous bit-field has negative width (%0)">;
+def err_bitfield_has_zero_width : Error<"named bit-field %0 has zero width">;
+def err_bitfield_width_exceeds_type_size : Error<
+ "size of bit-field %0 (%1 bits) exceeds size of its type (%2 bits)">;
+def err_anon_bitfield_width_exceeds_type_size : Error<
+ "size of anonymous bit-field (%0 bits) exceeds size of its type (%1 bits)">;
+def err_incorrect_number_of_vector_initializers : Error<
+ "number of elements must be either one or match the size of the vector">;
+
+// Used by C++ which allows bit-fields that are wider than the type.
+def warn_bitfield_width_exceeds_type_size: Warning<
+ "size of bit-field %0 (%1 bits) exceeds the size of its type; value will be "
+ "truncated to %2 bits">;
+def warn_anon_bitfield_width_exceeds_type_size : Warning<
+ "size of anonymous bit-field (%0 bits) exceeds size of its type; value will "
+ "be truncated to %1 bits">;
+
+def warn_missing_braces : Warning<
+ "suggest braces around initialization of subobject">,
+ InGroup<DiagGroup<"missing-braces">>, DefaultIgnore;
+def err_missing_braces : Error<
+ "cannot omit braces around initialization of subobject when using direct "
+ "list-initialization">;
+
+def err_redefinition_of_label : Error<"redefinition of label %0">;
+def err_undeclared_label_use : Error<"use of undeclared label %0">;
+def warn_unused_label : Warning<"unused label %0">,
+ InGroup<UnusedLabel>, DefaultIgnore;
+
+def err_goto_into_protected_scope : Error<"goto into protected scope">;
+def warn_goto_into_protected_scope : ExtWarn<"goto into protected scope">,
+ InGroup<Microsoft>;
+def warn_cxx98_compat_goto_into_protected_scope : Warning<
+ "goto would jump into protected scope in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_switch_into_protected_scope : Error<
+ "switch case is in protected scope">;
+def warn_cxx98_compat_switch_into_protected_scope : Warning<
+ "switch case would be in a protected scope in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def err_indirect_goto_without_addrlabel : Error<
+ "indirect goto in function with no address-of-label expressions">;
+def err_indirect_goto_in_protected_scope : Error<
+ "indirect goto might cross protected scopes">;
+def warn_cxx98_compat_indirect_goto_in_protected_scope : Warning<
+ "indirect goto might cross protected scopes in C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+def note_indirect_goto_target : Note<"possible target of indirect goto">;
+def note_protected_by_variable_init : Note<
+ "jump bypasses variable initialization">;
+def note_protected_by_variable_nontriv_destructor : Note<
+ "jump bypasses variable with a non-trivial destructor">;
+def note_protected_by_variable_non_pod : Note<
+ "jump bypasses initialization of non-POD variable">;
+def note_protected_by_cleanup : Note<
+ "jump bypasses initialization of variable with __attribute__((cleanup))">;
+def note_protected_by_vla_typedef : Note<
+ "jump bypasses initialization of VLA typedef">;
+def note_protected_by_vla_type_alias : Note<
+ "jump bypasses initialization of VLA type alias">;
+def note_protected_by_vla : Note<
+ "jump bypasses initialization of variable length array">;
+def note_protected_by_objc_try : Note<
+ "jump bypasses initialization of @try block">;
+def note_protected_by_objc_catch : Note<
+ "jump bypasses initialization of @catch block">;
+def note_protected_by_objc_finally : Note<
+ "jump bypasses initialization of @finally block">;
+def note_protected_by_objc_synchronized : Note<
+ "jump bypasses initialization of @synchronized block">;
+def note_protected_by_objc_autoreleasepool : Note<
+ "jump bypasses auto release push of @autoreleasepool block">;
+def note_protected_by_cxx_try : Note<
+ "jump bypasses initialization of try block">;
+def note_protected_by_cxx_catch : Note<
+ "jump bypasses initialization of catch block">;
+def note_protected_by___block : Note<
+ "jump bypasses setup of __block variable">;
+def note_protected_by_objc_ownership : Note<
+ "jump bypasses initialization of retaining variable">;
+def note_enters_block_captures_cxx_obj : Note<
+ "jump enters lifetime of block which captures a destructible c++ object">;
+def note_enters_block_captures_strong : Note<
+ "jump enters lifetime of block which strongly captures a variable">;
+def note_enters_block_captures_weak : Note<
+ "jump enters lifetime of block which weakly captures a variable">;
+
+def note_exits_cleanup : Note<
+ "jump exits scope of variable with __attribute__((cleanup))">;
+def note_exits_dtor : Note<
+ "jump exits scope of variable with non-trivial destructor">;
+def note_exits___block : Note<
+ "jump exits scope of __block variable">;
+def note_exits_objc_try : Note<
+ "jump exits @try block">;
+def note_exits_objc_catch : Note<
+ "jump exits @catch block">;
+def note_exits_objc_finally : Note<
+ "jump exits @finally block">;
+def note_exits_objc_synchronized : Note<
+ "jump exits @synchronized block">;
+def note_exits_cxx_try : Note<
+ "jump exits try block">;
+def note_exits_cxx_catch : Note<
+ "jump exits catch block">;
+def note_exits_objc_autoreleasepool : Note<
+ "jump exits autoreleasepool block">;
+def note_exits_objc_ownership : Note<
+ "jump exits scope of retaining variable">;
+def note_exits_block_captures_cxx_obj : Note<
+ "jump exits lifetime of block which captures a destructible c++ object">;
+def note_exits_block_captures_strong : Note<
+ "jump exits lifetime of block which strongly captures a variable">;
+def note_exits_block_captures_weak : Note<
+ "jump exits lifetime of block which weakly captures a variable">;
+
+def err_func_returning_array_function : Error<
+ "function cannot return %select{array|function}0 type %1">;
+def err_field_declared_as_function : Error<"field %0 declared as a function">;
+def err_field_incomplete : Error<"field has incomplete type %0">;
+def ext_variable_sized_type_in_struct : ExtWarn<
+ "field %0 with variable sized type %1 not at the end of a struct or class is"
+ " a GNU extension">, InGroup<GNU>;
+
+def err_flexible_array_empty_struct : Error<
+ "flexible array %0 not allowed in otherwise empty struct">;
+def err_flexible_array_has_nonpod_type : Error<
+ "flexible array member %0 of non-POD element type %1">;
+def ext_flexible_array_in_struct : Extension<
+ "%0 may not be nested in a struct due to flexible array member">,
+ InGroup<FlexibleArrayExtensions>;
+def ext_flexible_array_in_array : Extension<
+ "%0 may not be used as an array element due to flexible array member">,
+ InGroup<FlexibleArrayExtensions>;
+def err_flexible_array_init : Error<
+ "initialization of flexible array member is not allowed">;
+def ext_flexible_array_empty_aggregate_ms : Extension<
+ "flexible array member %0 in otherwise empty %select{struct|class}1 "
+ "is a Microsoft extension">, InGroup<Microsoft>;
+def ext_flexible_array_union_ms : Extension<
+ "flexible array member %0 in a union is a Microsoft extension">,
+ InGroup<Microsoft>;
+def ext_flexible_array_empty_aggregate_gnu : Extension<
+ "flexible array member %0 in otherwise empty %select{struct|class}1 "
+ "is a GNU extension">, InGroup<GNU>;
+def ext_flexible_array_union_gnu : Extension<
+ "flexible array member %0 in a union is a GNU extension">, InGroup<GNU>;
+
+let CategoryName = "ARC Semantic Issue" in {
+
+// ARC-mode diagnostics.
+
+let CategoryName = "ARC Weak References" in {
+
+def err_arc_weak_no_runtime : Error<
+ "the current deployment target does not support automated __weak references">;
+def err_arc_unsupported_weak_class : Error<
+ "class is incompatible with __weak references">;
+def err_arc_weak_unavailable_assign : Error<
+ "assignment of a weak-unavailable object to a __weak object">;
+def err_arc_weak_unavailable_property : Error<
+ "synthesis of a weak-unavailable property is disallowed "
+ "because it requires synthesis of an ivar of the __weak object">;
+def err_arc_convesion_of_weak_unavailable : Error<
+ "%select{implicit conversion|cast}0 of weak-unavailable object of type %1 to"
+ " a __weak object of type %2">;
+
+} // end "ARC Weak References" category
+
+let CategoryName = "ARC Restrictions" in {
+
+def err_arc_illegal_explicit_message : Error<
+ "ARC forbids explicit message send of %0">;
+def err_arc_unused_init_message : Error<
+ "the result of a delegate init call must be immediately returned "
+ "or assigned to 'self'">;
+def err_arc_mismatched_cast : Error<
+ "%select{implicit conversion|cast}0 of "
+ "%select{%2|a non-Objective-C pointer type %2|a block pointer|"
+ "an Objective-C pointer|an indirect pointer to an Objective-C pointer}1"
+ " to %3 is disallowed with ARC">;
+def err_arc_nolifetime_behavior : Error<
+ "explicit ownership qualifier on cast result has no effect">;
+def err_arc_objc_object_in_struct : Error<
+ "ARC forbids %select{Objective-C objects|blocks}0 in structs or unions">;
+def err_arc_objc_property_default_assign_on_object : Error<
+ "ARC forbids synthesizing a property of an Objective-C object "
+ "with unspecified ownership or storage attribute">;
+def err_arc_illegal_selector : Error<
+ "ARC forbids use of %0 in a @selector">;
+def err_arc_illegal_method_def : Error<
+ "ARC forbids implementation of %0">;
+
+} // end "ARC Restrictions" category
+
+def err_arc_lost_method_convention : Error<
+ "method was declared as %select{an 'alloc'|a 'copy'|an 'init'|a 'new'}0 "
+ "method, but its implementation doesn't match because %select{"
+ "its result type is not an object pointer|"
+ "its result type is unrelated to its receiver type}1">;
+def note_arc_lost_method_convention : Note<"declaration in interface">;
+def err_arc_gained_method_convention : Error<
+ "method implementation does not match its declaration">;
+def note_arc_gained_method_convention : Note<
+ "declaration in interface is not in the '%select{alloc|copy|init|new}0' "
+ "family because %select{its result type is not an object pointer|"
+ "its result type is unrelated to its receiver type}1">;
+def err_typecheck_arc_assign_self : Error<
+ "cannot assign to 'self' outside of a method in the init family">;
+def err_typecheck_arc_assign_self_class_method : Error<
+ "cannot assign to 'self' in a class method">;
+def err_typecheck_arr_assign_enumeration : Error<
+ "fast enumeration variables can't be modified in ARC by default; "
+ "declare the variable __strong to allow this">;
+def warn_arc_non_pod_class_with_object_member : Warning<
+ "%0 cannot be shared between ARC and non-ARC "
+ "code; add a copy constructor, a copy assignment operator, and a destructor "
+ "to make it ABI-compatible">, InGroup<AutomaticReferenceCountingABI>,
+ DefaultIgnore;
+def warn_arc_retained_assign : Warning<
+ "assigning retained object to %select{weak|unsafe_unretained}0 variable"
+ "; object will be released after assignment">,
+ InGroup<ARCUnsafeRetainedAssign>;
+def warn_arc_retained_property_assign : Warning<
+ "assigning retained object to unsafe property"
+ "; object will be released after assignment">,
+ InGroup<ARCUnsafeRetainedAssign>;
+def warn_arc_trivial_member_function_with_object_member : Warning<
+ "%0 cannot be shared between ARC and non-ARC "
+ "code; add a non-trivial %select{copy constructor|copy assignment operator|"
+ "destructor}1 to make it ABI-compatible">,
+ InGroup<AutomaticReferenceCountingABI>, DefaultIgnore;
+def err_arc_new_array_without_ownership : Error<
+ "'new' cannot allocate an array of %0 with no explicit ownership">;
+def warn_err_new_delete_object_array : Warning<
+ "%select{allocating|destroying}0 an array of %1; this array must not "
+ "%select{be deleted in|have been allocated from}0 non-ARC code">,
+ InGroup<AutomaticReferenceCountingABI>, DefaultIgnore;
+def err_arc_autoreleasing_var : Error<
+ "%select{__block variables|global variables|fields|ivars}0 cannot have "
+ "__autoreleasing ownership">;
+def err_arc_autoreleasing_capture : Error<
+ "cannot capture __autoreleasing variable in a "
+ "%select{block|lambda by copy}0">;
+def err_arc_thread_ownership : Error<
+ "thread-local variable has non-trivial ownership: type is %0">;
+def err_arc_indirect_no_ownership : Error<
+ "%select{pointer|reference}1 to non-const type %0 with no explicit ownership">,
+ InGroup<AutomaticReferenceCounting>;
+def err_arc_array_param_no_ownership : Error<
+ "must explicitly describe intended ownership of an object array parameter">;
+def err_arc_pseudo_dtor_inconstant_quals : Error<
+ "pseudo-destructor destroys object of type %0 with inconsistently-qualified "
+ "type %1">;
+def err_arc_init_method_unrelated_result_type : Error<
+ "init methods must return a type related to the receiver type">;
+def err_arc_nonlocal_writeback : Error<
+ "passing address of %select{non-local|non-scalar}0 object to "
+ "__autoreleasing parameter for write-back">;
+def err_arc_method_not_found : Error<
+ "no known %select{instance|class}1 method for selector %0">;
+def err_arc_receiver_forward_class : Error<
+ "receiver %0 for class message is a forward declaration">;
+def err_arc_may_not_respond : Error<
+ "no visible @interface for %0 declares the selector %1">;
+def err_arc_receiver_forward_instance : Error<
+ "receiver type %0 for instance message is a forward declaration">;
+def warn_receiver_forward_instance : Warning<
+ "receiver type %0 for instance message is a forward declaration">,
+ InGroup<DiagGroup<"receiver-forward-class">>, DefaultIgnore;
+def err_arc_collection_forward : Error<
+ "collection expression type %0 is a forward declaration">;
+def err_arc_multiple_method_decl : Error<
+ "multiple methods named %0 found with mismatched result, "
+ "parameter type or attributes">;
+
+let CategoryName = "ARC Retain Cycle" in {
+
+def warn_arc_retain_cycle : Warning<
+ "capturing %0 strongly in this block is likely to lead to a retain cycle">,
+ InGroup<ARCRetainCycles>;
+def note_arc_retain_cycle_owner : Note<
+ "block will be retained by %select{the captured object|an object strongly "
+ "retained by the captured object}0">;
+
+} // end "ARC Retain Cycle" category
+
+def note_nontrivial_objc_ownership : Note<
+ "because type %0 has %select{no|no|__strong|__weak|__autoreleasing}1 "
+ "ownership">;
+def warn_arc_object_memaccess : Warning<
+ "%select{destination for|source of}0 this %1 call is a pointer to "
+ "ownership-qualified type %2">, InGroup<ARCNonPodMemAccess>;
+
+let CategoryName = "ARC and @properties" in {
+
+def err_arc_strong_property_ownership : Error<
+ "existing ivar %1 for strong property %0 may not be "
+ "%select{|__unsafe_unretained||__weak}2">;
+def err_arc_assign_property_ownership : Error<
+ "existing ivar %1 for property %0 with %select{unsafe_unretained| assign}2 "
+ "attribute must be __unsafe_unretained">;
+def err_arc_inconsistent_property_ownership : Error<
+ "%select{|unsafe_unretained|strong|weak}1 property %0 may not also be "
+ "declared %select{|__unsafe_unretained|__strong|__weak|__autoreleasing}2">;
+
+} // end "ARC and @properties" category
+
+def err_arc_atomic_ownership : Error<
+ "cannot perform atomic operation on a pointer to type %0: type has "
+ "non-trivial ownership">;
+
+let CategoryName = "ARC Casting Rules" in {
+
+def err_arc_bridge_cast_incompatible : Error<
+ "incompatible types casting %0 to %1 with a %select{__bridge|"
+ "__bridge_transfer|__bridge_retained}2 cast">;
+def err_arc_bridge_cast_wrong_kind : Error<
+ "cast of %select{Objective-C|block|C}0 pointer type %1 to "
+ "%select{Objective-C|block|C}2 pointer type %3 cannot use %select{__bridge|"
+ "__bridge_transfer|__bridge_retained}4">;
+def err_arc_cast_requires_bridge : Error<
+ "%select{cast|implicit conversion}0 of %select{Objective-C|block|C}1 "
+ "pointer type %2 to %select{Objective-C|block|C}3 pointer type %4 "
+ "requires a bridged cast">;
+def note_arc_bridge : Note<
+ "use __bridge to convert directly (no change in ownership)">;
+def note_arc_bridge_transfer : Note<
+ "use %select{__bridge_transfer|CFBridgingRelease call}1 to transfer "
+ "ownership of a +1 %0 into ARC">;
+def note_arc_bridge_retained : Note<
+ "use %select{__bridge_retained|CFBridgingRetain call}1 to make an "
+ "ARC object available as a +1 %0">;
+
+} // ARC Casting category
+
+} // ARC category name
+
+def err_flexible_array_init_needs_braces : Error<
+ "flexible array requires brace-enclosed initializer">;
+def err_illegal_decl_array_of_functions : Error<
+ "'%0' declared as array of functions of type %1">;
+def err_illegal_decl_array_incomplete_type : Error<
+ "array has incomplete element type %0">;
+def err_illegal_message_expr_incomplete_type : Error<
+ "objective-c message has incomplete result type %0">;
+def err_illegal_decl_array_of_references : Error<
+ "'%0' declared as array of references of type %1">;
+def err_decl_negative_array_size : Error<
+ "'%0' declared as an array with a negative size">;
+def err_array_star_outside_prototype : Error<
+ "star modifier used outside of function prototype">;
+def err_illegal_decl_pointer_to_reference : Error<
+ "'%0' declared as a pointer to a reference of type %1">;
+def err_illegal_decl_mempointer_to_reference : Error<
+ "'%0' declared as a member pointer to a reference of type %1">;
+def err_illegal_decl_mempointer_to_void : Error<
+ "'%0' declared as a member pointer to void">;
+def err_illegal_decl_mempointer_in_nonclass : Error<
+ "'%0' does not point into a class">;
+def err_mempointer_in_nonclass_type : Error<
+ "member pointer refers into non-class type %0">;
+def err_reference_to_void : Error<"cannot form a reference to 'void'">;
+def err_nonfunction_block_type : Error<
+ "block pointer to non-function type is invalid">;
+def err_return_block_has_expr : Error<"void block should not return a value">;
+def err_block_return_missing_expr : Error<
+ "non-void block should return a value">;
+def err_func_def_incomplete_result : Error<
+ "incomplete result type %0 in function definition">;
+def err_atomic_specifier_bad_type : Error<
+ "_Atomic cannot be applied to "
+ "%select{incomplete |array |function |reference |atomic |qualified |}0type "
+ "%1 %select{||||||which is not trivially copyable}0">;
+
+// Expressions.
+def ext_sizeof_function_type : Extension<
+ "invalid application of 'sizeof' to a function type">, InGroup<PointerArith>;
+def ext_sizeof_void_type : Extension<
+ "invalid application of '%select{sizeof|__alignof|vec_step}0' to a void "
+ "type">, InGroup<PointerArith>;
+def err_sizeof_alignof_incomplete_type : Error<
+ "invalid application of '%select{sizeof|__alignof|vec_step}0' to an "
+ "incomplete type %1">;
+def err_sizeof_alignof_bitfield : Error<
+ "invalid application of '%select{sizeof|__alignof}0' to bit-field">;
+def err_vecstep_non_scalar_vector_type : Error<
+ "'vec_step' requires built-in scalar or vector type, %0 invalid">;
+def err_offsetof_incomplete_type : Error<
+ "offsetof of incomplete type %0">;
+def err_offsetof_record_type : Error<
+ "offsetof requires struct, union, or class type, %0 invalid">;
+def err_offsetof_array_type : Error<"offsetof requires array type, %0 invalid">;
+def ext_offsetof_extended_field_designator : Extension<
+ "using extended field designator is an extension">,
+ InGroup<DiagGroup<"extended-offsetof">>;
+def warn_offsetof_non_pod_type : ExtWarn<"offset of on non-POD type %0">,
+ InGroup<InvalidOffsetof>;
+def err_offsetof_bitfield : Error<"cannot compute offset of bit-field %0">;
+
+def warn_floatingpoint_eq : Warning<
+ "comparing floating point with == or != is unsafe">,
+ InGroup<DiagGroup<"float-equal">>, DefaultIgnore;
+
+def warn_division_by_zero : Warning<"division by zero is undefined">;
+def warn_remainder_by_zero : Warning<"remainder by zero is undefined">;
+def warn_shift_negative : Warning<"shift count is negative">,
+ InGroup<DiagGroup<"shift-count-negative">>;
+def warn_shift_gt_typewidth : Warning<"shift count >= width of type">,
+ InGroup<DiagGroup<"shift-count-overflow">>;
+def warn_shift_result_gt_typewidth : Warning<
+ "signed shift result (%0) requires %1 bits to represent, but %2 only has "
+ "%3 bits">, InGroup<DiagGroup<"shift-overflow">>;
+def warn_shift_result_sets_sign_bit : Warning<
+ "signed shift result (%0) sets the sign bit of the shift expression's "
+ "type (%1) and becomes negative">,
+ InGroup<DiagGroup<"shift-sign-overflow">>, DefaultIgnore;
+
+def warn_precedence_bitwise_rel : Warning<
+ "%0 has lower precedence than %1; %1 will be evaluated first">,
+ InGroup<Parentheses>;
+def note_precedence_bitwise_first : Note<
+ "place parentheses around the %0 expression to evaluate it first">;
+def note_precedence_bitwise_silence : Note<
+ "place parentheses around the %0 expression to silence this warning">;
+
+def warn_precedence_conditional : Warning<
+ "operator '?:' has lower precedence than '%0'; '%0' will be evaluated first">,
+ InGroup<Parentheses>;
+def note_precedence_conditional_first : Note<
+ "place parentheses around the '?:' expression to evaluate it first">;
+def note_precedence_conditional_silence : Note<
+ "place parentheses around the '%0' expression to silence this warning">;
+
+def warn_logical_instead_of_bitwise : Warning<
+ "use of logical '%0' with constant operand">,
+ InGroup<DiagGroup<"constant-logical-operand">>;
+def note_logical_instead_of_bitwise_change_operator : Note<
+ "use '%0' for a bitwise operation">;
+def note_logical_instead_of_bitwise_remove_constant : Note<
+ "remove constant to silence this warning">;
+
+def warn_bitwise_and_in_bitwise_or : Warning<
+ "'&' within '|'">, InGroup<BitwiseOpParentheses>;
+def note_bitwise_and_in_bitwise_or_silence : Note<
+ "place parentheses around the '&' expression to silence this warning">;
+
+def warn_logical_and_in_logical_or : Warning<
+ "'&&' within '||'">, InGroup<LogicalOpParentheses>;
+def note_logical_and_in_logical_or_silence : Note<
+ "place parentheses around the '&&' expression to silence this warning">;
+
+def warn_self_assignment : Warning<
+ "explicitly assigning a variable of type %0 to itself">,
+ InGroup<SelfAssignment>, DefaultIgnore;
+
+def warn_string_plus_int : Warning<
+ "adding %0 to a string does not append to the string">,
+ InGroup<StringPlusInt>;
+def note_string_plus_int_silence : Note<
+ "use array indexing to silence this warning">;
+
+def warn_sizeof_array_param : Warning<
+ "sizeof on array function parameter will return size of %0 instead of %1">,
+ InGroup<SizeofArrayArgument>;
+
+def err_sizeof_nonfragile_interface : Error<
+ "invalid application of '%select{alignof|sizeof}1' to interface %0 in "
+ "non-fragile ABI">;
+def err_atdef_nonfragile_interface : Error<
+ "invalid application of @defs in non-fragile ABI">;
+def err_subscript_nonfragile_interface : Error<
+ "subscript requires size of interface %0, which is not constant in "
+ "non-fragile ABI">;
+
+def err_arithmetic_nonfragile_interface : Error<
+ "arithmetic on pointer to interface %0, which is not a constant size in "
+ "non-fragile ABI">;
+
+
+def ext_subscript_non_lvalue : Extension<
+ "ISO C90 does not allow subscripting non-lvalue array">;
+def err_typecheck_subscript_value : Error<
+ "subscripted value is not an array, pointer, or vector">;
+def err_typecheck_subscript_not_integer : Error<
+ "array subscript is not an integer">;
+def err_subscript_function_type : Error<
+ "subscript of pointer to function type %0">;
+def err_subscript_incomplete_type : Error<
+ "subscript of pointer to incomplete type %0">;
+def ext_gnu_subscript_void_type : Extension<
+ "subscript of a pointer to void is a GNU extension">, InGroup<PointerArith>;
+def err_typecheck_member_reference_struct_union : Error<
+ "member reference base type %0 is not a structure or union">;
+def err_typecheck_member_reference_ivar : Error<
+ "%0 does not have a member named %1">;
+def error_arc_weak_ivar_access : Error<
+ "dereferencing a __weak pointer is not allowed due to possible "
+ "null value caused by race condition, assign it to strong variable first">;
+def err_typecheck_member_reference_arrow : Error<
+ "member reference type %0 is not a pointer">;
+def err_typecheck_member_reference_suggestion : Error<
+ "member reference type %0 is %select{a|not a}1 pointer; maybe you meant to use '%select{->|.}1'?">;
+def err_typecheck_member_reference_type : Error<
+ "cannot refer to type member %0 in %1 with '%select{.|->}2'">;
+def err_typecheck_member_reference_unknown : Error<
+ "cannot refer to member %0 in %1 with '%select{.|->}2'">;
+def err_member_reference_needs_call : Error<
+ "base of member reference is a function; perhaps you meant to call "
+ "it%select{| with no arguments}0?">;
+def warn_subscript_is_char : Warning<"array subscript is of type 'char'">,
+ InGroup<CharSubscript>, DefaultIgnore;
+
+def err_typecheck_incomplete_tag : Error<"incomplete definition of type %0">;
+def err_no_member : Error<"no member named %0 in %1">;
+
+def err_member_not_yet_instantiated : Error<
+ "no member %0 in %1; it has not yet been instantiated">;
+def note_non_instantiated_member_here : Note<
+ "not-yet-instantiated member is declared here">;
+
+def err_enumerator_does_not_exist : Error<
+ "enumerator %0 does not exist in instantiation of %1">;
+def note_enum_specialized_here : Note<
+ "enum %0 was explicitly specialized here">;
+
+def err_member_redeclared : Error<"class member cannot be redeclared">;
+def err_member_name_of_class : Error<"member %0 has the same name as its class">;
+def err_member_def_undefined_record : Error<
+ "out-of-line definition of %0 from class %1 without definition">;
+def err_member_def_does_not_match : Error<
+ "out-of-line definition of %0 does not match any declaration in %1">;
+def err_member_def_does_not_match_suggest : Error<
+ "out-of-line definition of %0 does not match any declaration in %1; "
+ "did you mean %2?">;
+def err_member_def_does_not_match_ret_type : Error<
+ "out-of-line definition of %q0 differs from the declaration in the return type">;
+def err_nonstatic_member_out_of_line : Error<
+ "non-static data member defined out-of-line">;
+def err_qualified_typedef_declarator : Error<
+ "typedef declarator cannot be qualified">;
+def err_qualified_param_declarator : Error<
+ "parameter declarator cannot be qualified">;
+def ext_out_of_line_declaration : ExtWarn<
+ "out-of-line declaration of a member must be a definition">,
+ InGroup<OutOfLineDeclaration>, DefaultError;
+def warn_member_extra_qualification : Warning<
+ "extra qualification on member %0">;
+def err_member_qualification : Error<
+ "non-friend class member %0 cannot have a qualified name">;
+def note_member_def_close_match : Note<"member declaration nearly matches">;
+def note_member_def_close_const_match : Note<
+ "member declaration does not match because "
+ "it %select{is|is not}0 const qualified">;
+def note_member_def_close_param_match : Note<
+ "type of %ordinal0 parameter of member declaration does not match "
+ "definition (%1 vs %2)">;
+def err_typecheck_ivar_variable_size : Error<
+ "instance variables must have a constant size">;
+def err_ivar_reference_type : Error<
+ "instance variables cannot be of reference type">;
+def err_typecheck_illegal_increment_decrement : Error<
+ "cannot %select{decrement|increment}1 value of type %0">;
+def err_typecheck_arithmetic_incomplete_type : Error<
+ "arithmetic on a pointer to an incomplete type %0">;
+def err_typecheck_pointer_arith_function_type : Error<
+ "arithmetic on%select{ a|}0 pointer%select{|s}0 to%select{ the|}2 "
+ "function type%select{|s}2 %1%select{| and %3}2">;
+def err_typecheck_pointer_arith_void_type : Error<
+ "arithmetic on%select{ a|}0 pointer%select{|s}0 to void">;
+def err_typecheck_decl_incomplete_type : Error<
+ "variable has incomplete type %0">;
+def ext_typecheck_decl_incomplete_type : ExtWarn<
+ "tentative definition of variable with internal linkage has incomplete non-array type %0">,
+ InGroup<DiagGroup<"tentative-definition-incomplete-type">>;
+def err_tentative_def_incomplete_type : Error<
+ "tentative definition has type %0 that is never completed">;
+def err_tentative_def_incomplete_type_arr : Error<
+ "tentative definition has array of type %0 that is never completed">;
+def warn_tentative_incomplete_array : Warning<
+ "tentative array definition assumed to have one element">;
+def err_typecheck_incomplete_array_needs_initializer : Error<
+ "definition of variable with array type needs an explicit size "
+ "or an initializer">;
+def err_array_init_not_init_list : Error<
+ "array initializer must be an initializer "
+ "list%select{| or string literal}0">;
+def err_array_init_different_type : Error<
+ "cannot initialize array of type %0 with array of type %1">;
+def err_array_init_non_constant_array : Error<
+ "cannot initialize array of type %0 with non-constant array of type %1">;
+def ext_array_init_copy : Extension<
+ "initialization of an array of type %0 from a compound literal of type %1 is "
+ "a GNU extension">, InGroup<GNU>;
+// This is intentionally not disabled by -Wno-gnu.
+def ext_array_init_parens : ExtWarn<
+ "parenthesized initialization of a member array is a GNU extension">,
+ InGroup<DiagGroup<"gnu-array-member-paren-init">>, DefaultError;
+def warn_deprecated_string_literal_conversion : Warning<
+ "conversion from string literal to %0 is deprecated">, InGroup<DeprecatedWritableStr>;
+def err_realimag_invalid_type : Error<"invalid type %0 to %1 operator">;
+def err_typecheck_sclass_fscope : Error<
+ "illegal storage class on file-scoped variable">;
+def err_unsupported_global_register : Error<
+ "global register variables are not supported">;
+def warn_standalone_specifier : Warning<"'%0' ignored on this declaration">;
+def err_typecheck_sclass_func : Error<"illegal storage class on function">;
+def err_static_block_func : Error<
+ "function declared in block scope cannot have 'static' storage class">;
+def err_typecheck_address_of : Error<"address of %select{bit-field"
+ "|vector element|property expression|register variable}0 requested">;
+def ext_typecheck_addrof_void : Extension<
+ "ISO C forbids taking the address of an expression of type 'void'">;
+def err_unqualified_pointer_member_function : Error<
+ "must explicitly qualify name of member function when taking its address">;
+def err_invalid_form_pointer_member_function : Error<
+ "cannot create a non-constant pointer to member function">;
+def err_parens_pointer_member_function : Error<
+ "cannot parenthesize the name of a method when forming a member pointer">;
+def err_typecheck_invalid_lvalue_addrof : Error<
+ "address expression must be an lvalue or a function designator">;
+def ext_typecheck_addrof_class_temporary : ExtWarn<
+ "taking the address of a temporary object of type %0">,
+ InGroup<DiagGroup<"address-of-temporary">>, DefaultError;
+def err_typecheck_addrof_class_temporary : Error<
+ "taking the address of a temporary object of type %0">;
+def err_typecheck_unary_expr : Error<
+ "invalid argument type %0 to unary expression">;
+def err_typecheck_indirection_requires_pointer : Error<
+ "indirection requires pointer operand (%0 invalid)">;
+def warn_indirection_through_null : Warning<
+ "indirection of non-volatile null pointer will be deleted, not trap">, InGroup<NullDereference>;
+def note_indirection_through_null : Note<
+ "consider using __builtin_trap() or qualifying pointer with 'volatile'">;
+def warn_pointer_indirection_from_incompatible_type : Warning<
+ "dereference of type %1 that was reinterpret_cast from type %0 has undefined "
+ "behavior.">,
+ InGroup<DiagGroup<"undefined-reinterpret-cast">>, DefaultIgnore;
+
+def err_objc_object_assignment : Error<
+ "cannot assign to class object (%0 invalid)">;
+def err_typecheck_invalid_operands : Error<
+ "invalid operands to binary expression (%0 and %1)">;
+def err_typecheck_sub_ptr_compatible : Error<
+ "%0 and %1 are not pointers to compatible types">;
+def ext_typecheck_ordered_comparison_of_pointer_integer : ExtWarn<
+ "ordered comparison between pointer and integer (%0 and %1)">;
+def ext_typecheck_ordered_comparison_of_pointer_and_zero : Extension<
+ "ordered comparison between pointer and zero (%0 and %1) is an extension">;
+def ext_typecheck_ordered_comparison_of_function_pointers : ExtWarn<
+ "ordered comparison of function pointers (%0 and %1)">;
+def ext_typecheck_comparison_of_fptr_to_void : Extension<
+ "equality comparison between function pointer and void pointer (%0 and %1)">;
+def err_typecheck_comparison_of_fptr_to_void : Error<
+ "equality comparison between function pointer and void pointer (%0 and %1)">;
+def ext_typecheck_comparison_of_pointer_integer : ExtWarn<
+ "comparison between pointer and integer (%0 and %1)">;
+def err_typecheck_comparison_of_pointer_integer : Error<
+ "comparison between pointer and integer (%0 and %1)">;
+def ext_typecheck_comparison_of_distinct_pointers : ExtWarn<
+ "comparison of distinct pointer types (%0 and %1)">;
+def ext_typecheck_cond_incompatible_operands : ExtWarn<
+ "incompatible operand types (%0 and %1)">;
+def err_cond_voidptr_arc : Error <
+ "operands to conditional of types %0 and %1 are incompatible in ARC mode">;
+def err_typecheck_comparison_of_distinct_pointers : Error<
+ "comparison of distinct pointer types (%0 and %1)">;
+def ext_typecheck_comparison_of_distinct_pointers_nonstandard : ExtWarn<
+ "comparison of distinct pointer types (%0 and %1) uses non-standard "
+ "composite pointer type %2">;
+def err_typecheck_assign_const : Error<"read-only variable is not assignable">;
+def err_stmtexpr_file_scope : Error<
+ "statement expression not allowed at file scope">;
+def warn_mixed_sign_comparison : Warning<
+ "comparison of integers of different signs: %0 and %1">,
+ InGroup<SignCompare>, DefaultIgnore;
+def warn_lunsigned_always_true_comparison : Warning<
+ "comparison of unsigned%select{| enum}2 expression %0 is always %1">,
+ InGroup<TautologicalCompare>;
+def warn_runsigned_always_true_comparison : Warning<
+ "comparison of %0 unsigned%select{| enum}2 expression is always %1">,
+ InGroup<TautologicalCompare>;
+def warn_comparison_of_mixed_enum_types : Warning<
+ "comparison of two values with different enumeration types (%0 and %1)">,
+ InGroup<DiagGroup<"enum-compare">>;
+def warn_null_in_arithmetic_operation : Warning<
+ "use of NULL in arithmetic operation">,
+ InGroup<DiagGroup<"null-arithmetic">>;
+def warn_null_in_comparison_operation : Warning<
+ "comparison between NULL and non-pointer "
+ "%select{(%1 and NULL)|(NULL and %1)}0">,
+ InGroup<DiagGroup<"null-arithmetic">>;
+
+def err_invalid_this_use : Error<
+ "invalid use of 'this' outside of a non-static member function">;
+def err_invalid_member_use_in_static_method : Error<
+ "invalid use of member %0 in static member function">;
+def err_invalid_qualified_function_type : Error<
+ "%select{static |non-}0member function %select{of type %2 |}1"
+ "cannot have '%3' qualifier">;
+def err_compound_qualified_function_type : Error<
+ "%select{block pointer|pointer|reference}0 to function type %select{%2 |}1"
+ "cannot have '%3' qualifier">;
+
+def err_ref_qualifier_overload : Error<
+ "cannot overload a member function %select{without a ref-qualifier|with "
+ "ref-qualifier '&'|with ref-qualifier '&&'}0 with a member function %select{"
+ "without a ref-qualifier|with ref-qualifier '&'|with ref-qualifier '&&'}1">;
+
+def err_invalid_non_static_member_use : Error<
+ "invalid use of non-static data member %0">;
+def err_nested_non_static_member_use : Error<
+ "%select{call to non-static member function|use of non-static data member}0 "
+ "%2 of %1 from nested type %3">;
+def warn_cxx98_compat_non_static_member_use : Warning<
+ "use of non-static data member %0 in an unevaluated context is "
+ "incompatible with C++98">, InGroup<CXX98Compat>, DefaultIgnore;
+def err_invalid_incomplete_type_use : Error<
+ "invalid use of incomplete type %0">;
+def err_builtin_func_cast_more_than_one_arg : Error<
+ "function-style cast to a builtin type can only take one argument">;
+def err_value_init_for_array_type : Error<
+ "array types cannot be value-initialized">;
+def warn_format_nonliteral_noargs : Warning<
+ "format string is not a string literal (potentially insecure)">,
+ InGroup<FormatSecurity>;
+def warn_format_nonliteral : Warning<
+ "format string is not a string literal">,
+ InGroup<FormatNonLiteral>, DefaultIgnore;
+
+def err_unexpected_interface : Error<
+ "unexpected interface name %0: expected expression">;
+def err_ref_non_value : Error<"%0 does not refer to a value">;
+def err_ref_vm_type : Error<
+ "cannot refer to declaration with a variably modified type inside block">;
+def err_ref_array_type : Error<
+ "cannot refer to declaration with an array type inside block">;
+def err_property_not_found : Error<
+ "property %0 not found on object of type %1">;
+def err_invalid_property_name : Error<
+ "%0 is not a valid property name (accessing an object of type %1)">;
+def err_getter_not_found : Error<
+ "expected getter method not found on object of type %0">;
+def err_objc_subscript_method_not_found : Error<
+ "expected method to %select{read|write}1 %select{dictionary|array}2 element not "
+ "found on object of type %0">;
+def err_objc_subscript_index_type : Error<
+ "method index parameter type %0 is not integral type">;
+def err_objc_subscript_key_type : Error<
+ "method key parameter type %0 is not object type">;
+def err_objc_subscript_dic_object_type : Error<
+ "method object parameter type %0 is not object type">;
+def err_objc_subscript_object_type : Error<
+ "cannot assign to this %select{dictionary|array}1 because assigning method's 2nd parameter"
+ " of type %0 is not an objective-C pointer type">;
+def err_objc_subscript_base_type : Error<
+ "%select{dictionary|array}1 subscript base type %0 is not an Objective-C object">;
+def err_objc_multiple_subscript_type_conversion : Error<
+ "indexing expression is invalid because subscript type %0 has "
+ "multiple type conversion functions">;
+def err_objc_subscript_type_conversion : Error<
+ "indexing expression is invalid because subscript type %0 is not an integral"
+ " or objective-C pointer type">;
+def err_objc_subscript_pointer : Error<
+ "indexing expression is invalid because subscript type %0 is not an"
+ " objective-C pointer">;
+def err_objc_indexing_method_result_type : Error<
+ "method for accessing %select{dictionary|array}1 element must have Objective-C"
+ " object return type instead of %0">;
+def err_objc_index_incomplete_class_type : Error<
+ "objective-C index expression has incomplete class type %0">;
+def err_illegal_container_subscripting_op : Error<
+ "illegal operation on objective-c container subscripting">;
+def err_property_not_found_forward_class : Error<
+ "property %0 cannot be found in forward class object %1">;
+def err_property_not_as_forward_class : Error<
+ "property %0 refers to an incomplete Objective-C class %1 "
+ "(with no @interface available)">;
+def note_forward_class : Note<
+ "forward declaration of class here">;
+def err_duplicate_property : Error<
+ "property has a previous declaration">;
+def ext_gnu_void_ptr : Extension<
+ "arithmetic on%select{ a|}0 pointer%select{|s}0 to void is a GNU extension">,
+ InGroup<PointerArith>;
+def ext_gnu_ptr_func_arith : Extension<
+ "arithmetic on%select{ a|}0 pointer%select{|s}0 to%select{ the|}2 function "
+ "type%select{|s}2 %1%select{| and %3}2 is a GNU extension">,
+ InGroup<PointerArith>;
+def error_readonly_message_assignment : Error<
+ "assigning to 'readonly' return result of an objective-c message not allowed">;
+def ext_integer_increment_complex : Extension<
+ "ISO C does not support '++'/'--' on complex integer type %0">;
+def ext_integer_complement_complex : Extension<
+ "ISO C does not support '~' for complex conjugation of %0">;
+def err_nosetter_property_assignment : Error<
+ "%select{assignment to readonly property|"
+ "no setter method %1 for assignment to property}0">;
+def err_nosetter_property_incdec : Error<
+ "%select{%select{increment|decrement}1 of readonly property|"
+ "no setter method %2 for %select{increment|decrement}1 of property}0">;
+def err_nogetter_property_compound_assignment : Error<
+ "a getter method is needed to perform a compound assignment on a property">;
+def err_nogetter_property_incdec : Error<
+ "no getter method %1 for %select{increment|decrement} of property">;
+def error_no_subobject_property_setting : Error<
+ "expression is not assignable">;
+def err_qualified_objc_access : Error<
+ "%select{property|ivar}0 access cannot be qualified with '%1'">;
+
+def ext_freestanding_complex : Extension<
+ "complex numbers are an extension in a freestanding C99 implementation">;
+
+// FIXME: Remove when we support imaginary.
+def err_imaginary_not_supported : Error<"imaginary types are not supported">;
+
+// Obj-c expressions
+def warn_root_inst_method_not_found : Warning<
+ "instance method %0 is being used on 'Class' which is not in the root class">;
+def warn_class_method_not_found : Warning<
+ "class method %objcclass0 not found (return type defaults to 'id')">;
+def warn_instance_method_on_class_found : Warning<
+ "instance method %0 found instead of class method %1">;
+def warn_inst_method_not_found : Warning<
+ "instance method %objcinstance0 not found (return type defaults to 'id')">;
+def error_no_super_class_message : Error<
+ "no @interface declaration found in class messaging of %0">;
+def error_root_class_cannot_use_super : Error<
+ "%0 cannot use 'super' because it is a root class">;
+def err_invalid_receiver_to_message : Error<
+ "invalid receiver to message expression">;
+def err_invalid_receiver_to_message_super : Error<
+ "'super' is only valid in a method body">;
+def err_invalid_receiver_class_message : Error<
+ "receiver type %0 is not an Objective-C class">;
+def err_missing_open_square_message_send : Error<
+ "missing '[' at start of message send expression">;
+def warn_bad_receiver_type : Warning<
+ "receiver type %0 is not 'id' or interface pointer, consider "
+ "casting it to 'id'">;
+def err_bad_receiver_type : Error<"bad receiver type %0">;
+def err_unknown_receiver_suggest : Error<
+ "unknown receiver %0; did you mean %1?">;
+def error_objc_throw_expects_object : Error<
+ "@throw requires an Objective-C object type (%0 invalid)">;
+def error_objc_synchronized_expects_object : Error<
+ "@synchronized requires an Objective-C object type (%0 invalid)">;
+def error_rethrow_used_outside_catch : Error<
+ "@throw (rethrow) used outside of a @catch block">;
+def err_attribute_multiple_objc_gc : Error<
+ "multiple garbage collection attributes specified for type">;
+def err_catch_param_not_objc_type : Error<
+ "@catch parameter is not a pointer to an interface type">;
+def err_illegal_qualifiers_on_catch_parm : Error<
+ "illegal qualifiers on @catch parameter">;
+def err_storage_spec_on_catch_parm : Error<
+ "@catch parameter cannot have storage specifier %select{|'typedef'|'extern'|"
+ "'static'|'auto'|'register'|'__private_extern__'|'mutable'}0">;
+def warn_register_objc_catch_parm : Warning<
+ "'register' storage specifier on @catch parameter will be ignored">;
+def err_qualified_objc_catch_parm : Error<
+ "@catch parameter declarator cannot be qualified">;
+def warn_objc_pointer_cxx_catch_fragile : Warning<
+ "can not catch an exception thrown with @throw in C++ in the non-unified "
+ "exception model">, InGroup<ObjCNonUnifiedException>;
+def err_objc_object_catch : Error<
+ "can't catch an Objective C object by value">;
+def err_incomplete_type_objc_at_encode : Error<
+ "'@encode' of incomplete type %0">;
+
+def warn_setter_getter_impl_required : Warning<
+ "property %0 requires method %1 to be defined - "
+ "use @synthesize, @dynamic or provide a method implementation "
+ "in this class implementation">,
+ InGroup<ObjCPropertyImpl>;
+def warn_setter_getter_impl_required_in_category : Warning<
+ "property %0 requires method %1 to be defined - "
+ "use @dynamic or provide a method implementation in this category">,
+ InGroup<ObjCPropertyImpl>;
+def note_parameter_named_here : Note<
+ "passing argument to parameter %0 here">;
+def note_parameter_here : Note<
+ "passing argument to parameter here">;
+
+// C++ casts
+// These messages adhere to the TryCast pattern: %0 is an int specifying the
+// cast type, %1 is the source type, %2 is the destination type.
+def err_bad_reinterpret_cast_overload : Error<
+ "reinterpret_cast cannot resolve overloaded function %0 to type %1">;
+
+def err_bad_static_cast_overload : Error<
+ "address of overloaded function %0 cannot be static_cast to type %1">;
+
+def err_bad_cstyle_cast_overload : Error<
+ "address of overloaded function %0 cannot be cast to type %1">;
+
+
+def err_bad_cxx_cast_generic : Error<
+ "%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
+ "functional-style cast}0 from %1 to %2 is not allowed">;
+def err_bad_cxx_cast_rvalue : Error<
+ "%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
+ "functional-style cast}0 from rvalue to reference type %2">;
+def err_bad_cxx_cast_qualifiers_away : Error<
+ "%select{const_cast|static_cast|reinterpret_cast|dynamic_cast|C-style cast|"
+ "functional-style cast}0 from %1 to %2 casts away qualifiers">;
+def err_bad_const_cast_dest : Error<
+ "%select{const_cast||||C-style cast|functional-style cast}0 to %2, "
+ "which is not a reference, pointer-to-object, or pointer-to-data-member">;
+def ext_cast_fn_obj : Extension<
+ "cast between pointer-to-function and pointer-to-object is an extension">;
+def warn_cxx98_compat_cast_fn_obj : Warning<
+ "cast between pointer-to-function and pointer-to-object is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def err_bad_reinterpret_cast_small_int : Error<
+ "cast from pointer to smaller type %2 loses information">;
+def err_bad_cxx_cast_vector_to_scalar_different_size : Error<
+ "%select{||reinterpret_cast||C-style cast|}0 from vector %1 "
+ "to scalar %2 of different size">;
+def err_bad_cxx_cast_scalar_to_vector_different_size : Error<
+ "%select{||reinterpret_cast||C-style cast|}0 from scalar %1 "
+ "to vector %2 of different size">;
+def err_bad_cxx_cast_vector_to_vector_different_size : Error<
+ "%select{||reinterpret_cast||C-style cast|}0 from vector %1 "
+ "to vector %2 of different size">;
+def err_bad_lvalue_to_rvalue_cast : Error<
+ "cannot cast from lvalue of type %1 to rvalue reference type %2; types are "
+ "not compatible">;
+def err_bad_static_cast_pointer_nonpointer : Error<
+ "cannot cast from type %1 to pointer type %2">;
+def err_bad_static_cast_member_pointer_nonmp : Error<
+ "cannot cast from type %1 to member pointer type %2">;
+def err_bad_cxx_cast_member_pointer_size : Error<
+ "cannot %select{||reinterpret_cast||C-style cast|}0 from member pointer "
+ "type %1 to member pointer type %2 of different size">;
+def err_bad_reinterpret_cast_reference : Error<
+ "reinterpret_cast of a %0 to %1 needs its address which is not allowed">;
+def warn_undefined_reinterpret_cast : Warning<
+ "reinterpret_cast from %0 to %1 has undefined behavior.">,
+ InGroup<DiagGroup<"undefined-reinterpret-cast">>, DefaultIgnore;
+
+// These messages don't adhere to the pattern.
+// FIXME: Display the path somehow better.
+def err_ambiguous_base_to_derived_cast : Error<
+ "ambiguous cast from base %0 to derived %1:%2">;
+def err_static_downcast_via_virtual : Error<
+ "cannot cast %0 to %1 via virtual base %2">;
+def err_downcast_from_inaccessible_base : Error<
+ "cannot cast %select{private|protected}2 base class %1 to %0">;
+def err_upcast_to_inaccessible_base : Error<
+ "cannot cast %0 to its %select{private|protected}2 base class %1">;
+def err_bad_dynamic_cast_not_ref_or_ptr : Error<
+ "%0 is not a reference or pointer">;
+def err_bad_dynamic_cast_not_class : Error<"%0 is not a class">;
+def err_bad_dynamic_cast_incomplete : Error<"%0 is an incomplete type">;
+def err_bad_dynamic_cast_not_ptr : Error<"%0 is not a pointer">;
+def err_bad_dynamic_cast_not_polymorphic : Error<"%0 is not polymorphic">;
+
+// Other C++ expressions
+def err_need_header_before_typeid : Error<
+ "you need to include <typeinfo> before using the 'typeid' operator">;
+def err_need_header_before_ms_uuidof : Error<
+ "you need to include <guiddef.h> before using the '__uuidof' operator">;
+def err_uuidof_without_guid : Error<
+ "cannot call operator __uuidof on a type with no GUID">;
+def err_incomplete_typeid : Error<"'typeid' of incomplete type %0">;
+def err_static_illegal_in_new : Error<
+ "the 'static' modifier for the array size is not legal in new expressions">;
+def err_array_new_needs_size : Error<
+ "array size must be specified in new expressions">;
+def err_bad_new_type : Error<
+ "cannot allocate %select{function|reference}1 type %0 with new">;
+def err_new_incomplete_type : Error<
+ "allocation of incomplete type %0">;
+def err_new_array_nonconst : Error<
+ "only the first dimension of an allocated array may have dynamic size">;
+def err_new_array_init_args : Error<
+ "array 'new' cannot have initialization arguments">;
+def ext_new_paren_array_nonconst : ExtWarn<
+ "when type is in parentheses, array cannot have dynamic size">;
+def err_placement_new_non_placement_delete : Error<
+ "'new' expression with placement arguments refers to non-placement "
+ "'operator delete'">;
+def err_array_size_not_integral : Error<
+ "array size expression must have integral or %select{|unscoped }0"
+ "enumeration type, not %1">;
+def err_array_size_incomplete_type : Error<
+ "array size expression has incomplete class type %0">;
+def err_array_size_explicit_conversion : Error<
+ "array size expression of type %0 requires explicit conversion to type %1">;
+def note_array_size_conversion : Note<
+ "conversion to %select{integral|enumeration}0 type %1 declared here">;
+def err_array_size_ambiguous_conversion : Error<
+ "ambiguous conversion of array size expression of type %0 to an integral or "
+ "enumeration type">;
+def ext_array_size_conversion : Extension<
+ "implicit conversion from array size expression of type %0 to "
+ "%select{integral|enumeration}1 type %2 is a C++11 extension">,
+ InGroup<CXX11>;
+def warn_cxx98_compat_array_size_conversion : Warning<
+ "implicit conversion from array size expression of type %0 to "
+ "%select{integral|enumeration}1 type %2 is incompatible with C++98">,
+ InGroup<CXX98CompatPedantic>, DefaultIgnore;
+def err_address_space_qualified_new : Error<
+ "'new' cannot allocate objects of type %0 in address space '%1'">;
+def err_address_space_qualified_delete : Error<
+ "'delete' cannot delete objects of type %0 in address space '%1'">;
+
+def err_default_init_const : Error<
+ "default initialization of an object of const type %0"
+ "%select{| requires a user-provided default constructor}1">;
+def err_delete_operand : Error<"cannot delete expression of type %0">;
+def ext_delete_void_ptr_operand : ExtWarn<
+ "cannot delete expression with pointer-to-'void' type %0">;
+def err_ambiguous_delete_operand : Error<"ambiguous conversion of delete "
+ "expression of type %0 to a pointer">;
+def warn_delete_incomplete : Warning<
+ "deleting pointer to incomplete type %0 may cause undefined behaviour">,
+ InGroup<DiagGroup<"delete-incomplete">>;
+def err_delete_incomplete_class_type : Error<
+ "deleting incomplete class type %0; no conversions to pointer type">;
+def warn_delete_array_type : Warning<
+ "'delete' applied to a pointer-to-array type %0 treated as delete[]">;
+def err_no_suitable_delete_member_function_found : Error<
+ "no suitable member %0 in %1">;
+def err_ambiguous_suitable_delete_member_function_found : Error<
+ "multiple suitable %0 functions in %1">;
+def note_member_declared_here : Note<
+ "member %0 declared here">;
+def err_decrement_bool : Error<"cannot decrement expression of type bool">;
+def warn_increment_bool : Warning<
+ "incrementing expression of type bool is deprecated">, InGroup<Deprecated>;
+def err_catch_incomplete_ptr : Error<
+ "cannot catch pointer to incomplete type %0">;
+def err_catch_incomplete_ref : Error<
+ "cannot catch reference to incomplete type %0">;
+def err_catch_incomplete : Error<"cannot catch incomplete type %0">;
+def err_catch_rvalue_ref : Error<"cannot catch exceptions by rvalue reference">;
+def err_qualified_catch_declarator : Error<
+ "exception declarator cannot be qualified">;
+def err_early_catch_all : Error<"catch-all handler must come last">;
+def err_bad_memptr_rhs : Error<
+ "right hand operand to %0 has non pointer-to-member type %1">;
+def err_bad_memptr_lhs : Error<
+ "left hand operand to %0 must be a %select{|pointer to }1class "
+ "compatible with the right hand operand, but is %2">;
+def warn_exception_caught_by_earlier_handler : Warning<
+ "exception of type %0 will be caught by earlier handler">;
+def note_previous_exception_handler : Note<"for type %0">;
+def err_exceptions_disabled : Error<
+ "cannot use '%0' with exceptions disabled">;
+def err_objc_exceptions_disabled : Error<
+ "cannot use '%0' with Objective-C exceptions disabled">;
+def warn_non_virtual_dtor : Warning<
+ "%0 has virtual functions but non-virtual destructor">,
+ InGroup<NonVirtualDtor>, DefaultIgnore;
+def warn_delete_non_virtual_dtor : Warning<
+ "delete called on %0 that has virtual functions but non-virtual destructor">,
+ InGroup<DeleteNonVirtualDtor>, DefaultIgnore;
+def warn_delete_abstract_non_virtual_dtor : Warning<
+ "delete called on %0 that is abstract but has non-virtual destructor">,
+ InGroup<DeleteNonVirtualDtor>;
+def warn_overloaded_virtual : Warning<
+ "%q0 hides overloaded virtual %select{function|functions}1">,
+ InGroup<OverloadedVirtual>, DefaultIgnore;
+def note_hidden_overloaded_virtual_declared_here : Note<
+ "hidden overloaded virtual function %q0 declared here">;
+def warn_using_directive_in_header : Warning<
+ "using namespace directive in global context in header">,
+ InGroup<HeaderHygiene>, DefaultIgnore;
+def warn_overaligned_type : Warning<
+ "type %0 requires %1 bytes of alignment and the default allocator only "
+ "guarantees %2 bytes">,
+ InGroup<OveralignedType>, DefaultIgnore;
+
+def err_conditional_void_nonvoid : Error<
+ "%select{left|right}1 operand to ? is void, but %select{right|left}1 operand "
+ "is of type %0">;
+def err_conditional_ambiguous : Error<
+ "conditional expression is ambiguous; %0 can be converted to %1 "
+ "and vice versa">;
+def err_conditional_ambiguous_ovl : Error<
+ "conditional expression is ambiguous; %0 and %1 can be converted to several "
+ "common types">;
+
+def err_throw_incomplete : Error<
+ "cannot throw object of incomplete type %0">;
+def err_throw_incomplete_ptr : Error<
+ "cannot throw pointer to object of incomplete type %0">;
+def err_return_in_constructor_handler : Error<
+ "return in the catch of a function try block of a constructor is illegal">;
+
+let CategoryName = "Lambda Issue" in {
+ def err_capture_more_than_once : Error<
+ "%0 can appear only once in a capture list">;
+ def err_reference_capture_with_reference_default : Error<
+ "'&' cannot precede a capture when the capture default is '&'">;
+ def err_this_capture_with_copy_default : Error<
+ "'this' cannot be explicitly captured when the capture default is '='">;
+ def err_copy_capture_with_copy_default : Error<
+ "'&' must precede a capture when the capture default is '='">;
+ def err_capture_does_not_name_variable : Error<
+ "%0 in capture list does not name a variable">;
+ def err_capture_non_automatic_variable : Error<
+ "%0 cannot be captured because it does not have automatic storage "
+ "duration">;
+ def err_this_capture : Error<
+ "'this' cannot be %select{implicitly |}0captured in this context">;
+ def err_lambda_capture_block : Error<
+ "__block variable %0 cannot be captured in a lambda expression">;
+ def err_lambda_capture_anonymous_var : Error<
+ "unnamed variable cannot be implicitly captured in a lambda expression">;
+ def err_lambda_capture_vm_type : Error<
+ "variable %0 with variably modified type cannot be captured in "
+ "a lambda expression">;
+ def err_lambda_impcap : Error<
+ "variable %0 cannot be implicitly captured in a lambda with no "
+ "capture-default specified">;
+ def note_lambda_decl : Note<"lambda expression begins here">;
+ def err_lambda_unevaluated_operand : Error<
+ "lambda expression in an unevaluated operand">;
+ def ext_lambda_implies_void_return : ExtWarn<
+ "C++11 requires lambda with omitted result type to consist of a single "
+ "return statement">,
+ InGroup<LambdaExtensions>;
+ def err_lambda_return_init_list : Error<
+ "cannot deduce lambda return type from initializer list">;
+ def err_lambda_capture_default_arg : Error<
+ "lambda expression in default argument cannot capture any entity">;
+ def err_lambda_unexpanded_pack : Error<
+ "unexpanded function parameter pack capture is unsupported">;
+ def err_lambda_incomplete_result : Error<
+ "incomplete result type %0 in lambda expression">;
+ def err_lambda_objc_object_result : Error<
+ "non-pointer Objective-C class type %0 in lambda expression result">;
+ def ext_lambda_default_arguments : ExtWarn<
+ "C++11 forbids default arguments for lambda expressions">,
+ InGroup<LambdaExtensions>;
+ def err_noreturn_lambda_has_return_expr : Error<
+ "lambda declared 'noreturn' should not return">;
+ def warn_maybe_falloff_nonvoid_lambda : Warning<
+ "control may reach end of non-void lambda">,
+ InGroup<ReturnType>;
+ def warn_falloff_nonvoid_lambda : Warning<
+ "control reaches end of non-void lambda">,
+ InGroup<ReturnType>;
+ def err_access_lambda_capture : Error<
+ // The ERRORs represent other special members that aren't constructors, in
+ // hopes that someone will bother noticing and reporting if they appear
+ "capture of variable '%0' as type %1 calls %select{private|protected}3 "
+ "%select{default |copy |move |*ERROR* |*ERROR* |*ERROR* |}2constructor">,
+ AccessControl;
+ def note_lambda_to_block_conv : Note<
+ "implicit capture of lambda object due to conversion to block pointer "
+ "here">;
+}
+
+def err_operator_arrow_circular : Error<
+ "circular pointer delegation detected">;
+def err_pseudo_dtor_base_not_scalar : Error<
+ "object expression of non-scalar type %0 cannot be used in a "
+ "pseudo-destructor expression">;
+def ext_pseudo_dtor_on_void : ExtWarn<
+ "pseudo-destructors on type void are a Microsoft extension">,
+ InGroup<Microsoft>;
+def err_pseudo_dtor_type_mismatch : Error<
+ "the type of object expression (%0) does not match the type being destroyed "
+ "(%1) in pseudo-destructor expression">;
+def err_pseudo_dtor_call_with_args : Error<
+ "call to pseudo-destructor cannot have any arguments">;
+def err_dtor_expr_without_call : Error<
+ "%select{destructor reference|pseudo-destructor expression}0 must be "
+ "called immediately with '()'">;
+def err_pseudo_dtor_destructor_non_type : Error<
+ "%0 does not refer to a type name in pseudo-destructor expression; expected "
+ "the name of type %1">;
+def err_invalid_use_of_function_type : Error<
+ "a function type is not allowed here">;
+def err_invalid_use_of_array_type : Error<"an array type is not allowed here">;
+def err_type_defined_in_condition : Error<
+ "types may not be defined in conditions">;
+def err_typecheck_bool_condition : Error<
+ "value of type %0 is not contextually convertible to 'bool'">;
+def err_typecheck_ambiguous_condition : Error<
+ "conversion from %0 to %1 is ambiguous">;
+def err_typecheck_nonviable_condition : Error<
+ "no viable conversion from %0 to %1">;
+def err_typecheck_deleted_function : Error<
+ "conversion function from %0 to %1 invokes a deleted function">;
+
+def err_expected_class_or_namespace : Error<"expected a class or namespace">;
+def err_expected_class : Error<"%0 is not a class%select{ or namespace|, "
+ "namespace, or scoped enumeration}1">;
+def err_invalid_declarator_scope : Error<"cannot define or redeclare %0 here "
+ "because namespace %1 does not enclose namespace %2">;
+def err_invalid_declarator_global_scope : Error<
+ "definition or redeclaration of %0 cannot name the global scope">;
+def err_invalid_declarator_in_function : Error<
+ "definition or redeclaration of %0 not allowed inside a function">;
+def err_not_tag_in_scope : Error<
+ "no %select{struct|union|class|enum}0 named %1 in %2">;
+
+def err_cannot_form_pointer_to_member_of_reference_type : Error<
+ "cannot form a pointer-to-member to member %0 of reference type %1">;
+def err_incomplete_object_call : Error<
+ "incomplete type in call to object of type %0">;
+
+def warn_condition_is_assignment : Warning<"using the result of an "
+ "assignment as a condition without parentheses">,
+ InGroup<Parentheses>;
+// Completely identical except off by default.
+def warn_condition_is_idiomatic_assignment : Warning<"using the result "
+ "of an assignment as a condition without parentheses">,
+ InGroup<DiagGroup<"idiomatic-parentheses">>, DefaultIgnore;
+def note_condition_assign_to_comparison : Note<
+ "use '==' to turn this assignment into an equality comparison">;
+def note_condition_or_assign_to_comparison : Note<
+ "use '!=' to turn this compound assignment into an inequality comparison">;
+def note_condition_assign_silence : Note<
+ "place parentheses around the assignment to silence this warning">;
+
+def warn_equality_with_extra_parens : Warning<"equality comparison with "
+ "extraneous parentheses">, InGroup<ParenthesesOnEquality>;
+def note_equality_comparison_to_assign : Note<
+ "use '=' to turn this equality comparison into an assignment">;
+def note_equality_comparison_silence : Note<
+ "remove extraneous parentheses around the comparison to silence this warning">;
+
+// assignment related diagnostics (also for argument passing, returning, etc).
+// In most of these diagnostics the %2 is a value from the
+// Sema::AssignmentAction enumeration
+def err_typecheck_convert_incompatible : Error<
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from incompatible type|to parameter of incompatible type|"
+ "from a function with incompatible result type|to incompatible type|"
+ "with an expression of incompatible type|to parameter of incompatible type|"
+ "to incompatible type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3"
+ "%select{|: different classes (%5 vs %6)"
+ "|: different number of parameters (%5 vs %6)"
+ "|: type mismatch at %ordinal5 parameter (%6 vs %7)"
+ "|: different return type (%5 vs %6)"
+ "|: different qualifiers ("
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}5 vs "
+ "%select{none|const|restrict|const and restrict|volatile|const and volatile|"
+ "volatile and restrict|const, volatile, and restrict}6)}4">;
+def err_typecheck_missing_return_type_incompatible : Error<
+ "return type %0 must match previous return type %1 when %select{block "
+ "literal|lambda expression}2 has unspecified explicit return type">;
+
+def warn_incompatible_qualified_id : Warning<
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from incompatible type|to parameter of incompatible type|"
+ "from a function with incompatible result type|to incompatible type|"
+ "with an expression of incompatible type|to parameter of incompatible type|"
+ "to incompatible type}2 %1">;
+def ext_typecheck_convert_pointer_int : ExtWarn<
+ "incompatible pointer to integer conversion "
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
+ InGroup<IntConversion>;
+def ext_typecheck_convert_int_pointer : ExtWarn<
+ "incompatible integer to pointer conversion "
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
+ InGroup<IntConversion>;
+def ext_typecheck_convert_pointer_void_func : Extension<
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1 "
+ "converts between void pointer and function pointer">;
+def ext_typecheck_convert_incompatible_pointer_sign : ExtWarn<
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1 "
+ "converts between pointers to integer types with different sign">,
+ InGroup<DiagGroup<"pointer-sign">>;
+def ext_typecheck_convert_incompatible_pointer : ExtWarn<
+ "incompatible pointer types "
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1"
+ "%select{|; dereference with *|"
+ "; take the address with &|"
+ "; remove *|"
+ "; remove &}3">,
+ InGroup<IncompatiblePointerTypes>;
+def ext_typecheck_convert_discards_qualifiers : ExtWarn<
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1 discards "
+ "qualifiers">,
+ InGroup<IncompatiblePointerTypes>;
+def ext_nested_pointer_qualifier_mismatch : ExtWarn<
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1 discards "
+ "qualifiers in nested pointer types">,
+ InGroup<IncompatiblePointerTypes>;
+def warn_incompatible_vectors : Warning<
+ "incompatible vector types "
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1">,
+ InGroup<VectorConversion>, DefaultIgnore;
+def err_int_to_block_pointer : Error<
+ "invalid block pointer conversion "
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1">;
+def err_typecheck_convert_incompatible_block_pointer : Error<
+ "incompatible block pointer types "
+ "%select{assigning to|passing|returning|converting|initializing|sending|casting}2"
+ " %0 "
+ "%select{from|to parameter of type|from a function with result type|to type|"
+ "with an expression of type|to parameter of type|to type}2 %1">;
+def err_typecheck_incompatible_address_space : Error<
+ "%select{assigning %1 to %0"
+ "|passing %0 to parameter of type %1"
+ "|returning %0 from a function with result type %1"
+ "|converting %0 to type %1"
+ "|initializing %0 with an expression of type %1"
+ "|sending %0 to parameter of type %1"
+ "|casting %0 to type %1}2"
+ " changes address space of pointer">;
+def err_typecheck_incompatible_ownership : Error<
+ "%select{assigning %1 to %0"
+ "|passing %0 to parameter of type %1"
+ "|returning %0 from a function with result type %1"
+ "|converting %0 to type %1"
+ "|initializing %0 with an expression of type %1"
+ "|sending %0 to parameter of type %1"
+ "|casting %0 to type %1}2"
+ " changes retain/release properties of pointer">;
+def err_typecheck_comparison_of_distinct_blocks : Error<
+ "comparison of distinct block types (%0 and %1)">;
+
+def err_typecheck_array_not_modifiable_lvalue : Error<
+ "array type %0 is not assignable">;
+def err_typecheck_non_object_not_modifiable_lvalue : Error<
+ "non-object type %0 is not assignable">;
+def err_typecheck_expression_not_modifiable_lvalue : Error<
+ "expression is not assignable">;
+def err_typecheck_incomplete_type_not_modifiable_lvalue : Error<
+ "incomplete type %0 is not assignable">;
+def err_typecheck_lvalue_casts_not_supported : Error<
+ "assignment to cast is illegal, lvalue casts are not supported">;
+
+def err_typecheck_duplicate_vector_components_not_mlvalue : Error<
+ "vector is not assignable (contains duplicate components)">;
+def err_block_decl_ref_not_modifiable_lvalue : Error<
+ "variable is not assignable (missing __block type specifier)">;
+def err_lambda_decl_ref_not_modifiable_lvalue : Error<
+ "cannot assign to a variable captured by copy in a non-mutable lambda">;
+def err_typecheck_call_not_function : Error<
+ "called object type %0 is not a function or function pointer">;
+def err_call_incomplete_return : Error<
+ "calling function with incomplete return type %0">;
+def err_call_function_incomplete_return : Error<
+ "calling %0 with incomplete return type %1">;
+def note_function_with_incomplete_return_type_declared_here : Note<
+ "%0 declared here">;
+def err_call_incomplete_argument : Error<
+ "argument type %0 is incomplete">;
+def err_typecheck_call_too_few_args : Error<
+ "too few %select{|||execution configuration }0arguments to "
+ "%select{function|block|method|kernel function}0 call, "
+ "expected %1, have %2">;
+def err_typecheck_call_too_few_args_at_least : Error<
+ "too few %select{|||execution configuration }0arguments to "
+ "%select{function|block|method|kernel function}0 call, "
+ "expected at least %1, have %2">;
+def err_typecheck_call_too_many_args : Error<
+ "too many %select{|||execution configuration }0arguments to "
+ "%select{function|block|method|kernel function}0 call, "
+ "expected %1, have %2">;
+def err_typecheck_call_too_many_args_at_most : Error<
+ "too many %select{|||execution configuration }0arguments to "
+ "%select{function|block|method|kernel function}0 call, "
+ "expected at most %1, have %2">;
+def note_callee_decl : Note<
+ "%0 declared here">;
+def note_defined_here : Note<"%0 defined here">;
+
+def warn_call_wrong_number_of_arguments : Warning<
+ "too %select{few|many}0 arguments in call to %1">;
+def err_atomic_builtin_must_be_pointer : Error<
+ "first argument to atomic builtin must be a pointer (%0 invalid)">;
+def err_atomic_builtin_must_be_pointer_intptr : Error<
+ "first argument to atomic builtin must be a pointer to integer or pointer"
+ " (%0 invalid)">;
+def err_atomic_builtin_pointer_size : Error<
+ "first argument to atomic builtin must be a pointer to 1,2,4,8 or 16 byte "
+ "type (%0 invalid)">;
+def err_atomic_op_needs_atomic : Error<
+ "first argument to atomic operation must be a pointer to _Atomic "
+ "type (%0 invalid)">;
+def err_atomic_op_needs_trivial_copy : Error<
+ "first argument to atomic operation must be a pointer to a trivially-copyable"
+ " type (%0 invalid)">;
+def err_atomic_op_needs_atomic_int_or_ptr : Error<
+ "first argument to atomic operation must be a pointer to %select{|atomic }0"
+ "integer or pointer (%1 invalid)">;
+def err_atomic_op_bitwise_needs_atomic_int : Error<
+ "first argument to bitwise atomic operation must be a pointer to "
+ "%select{|atomic }0integer (%1 invalid)">;
+
+def err_deleted_function_use : Error<"attempt to use a deleted function">;
+
+def err_kern_type_not_void_return : Error<
+ "kernel function type %0 must have void return type">;
+def err_config_scalar_return : Error<
+ "CUDA special function 'cudaConfigureCall' must have scalar return type">;
+def err_kern_call_not_global_function : Error<
+ "kernel call to non-global function %0">;
+def err_global_call_not_config : Error<
+ "call to global function %0 not configured">;
+def err_ref_bad_target : Error<
+ "reference to %select{__device__|__global__|__host__|__host__ __device__}0 "
+ "function %1 in %select{__device__|__global__|__host__|__host__ __device__}2 function">;
+
+
+def err_cannot_pass_objc_interface_to_vararg : Error<
+ "cannot pass object with interface type %0 by-value through variadic "
+ "%select{function|block|method}1">;
+
+def warn_cannot_pass_non_pod_arg_to_vararg : Warning<
+ "cannot pass object of %select{non-POD|non-trivial}0 type %1 through variadic"
+ " %select{function|block|method|constructor}2; call will abort at runtime">,
+ InGroup<DiagGroup<"non-pod-varargs">>, DefaultError;
+def warn_cxx98_compat_pass_non_pod_arg_to_vararg : Warning<
+ "passing object of trivial but non-POD type %0 through variadic"
+ " %select{function|block|method|constructor}1 is incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+def err_typecheck_call_invalid_ordered_compare : Error<
+ "ordered compare requires two args of floating point type (%0 and %1)">;
+def err_typecheck_call_invalid_unary_fp : Error<
+ "floating point classification requires argument of floating point type "
+ "(passed in %0)">;
+def err_typecheck_cond_expect_scalar : Error<
+ "used type %0 where arithmetic or pointer type is required">;
+def ext_typecheck_cond_one_void : Extension<
+ "C99 forbids conditional expressions with only one void side">;
+def err_typecheck_cond_expect_scalar_or_vector : Error<
+ "used type %0 where arithmetic, pointer, or vector type is required">;
+def err_typecheck_cast_to_incomplete : Error<
+ "cast to incomplete type %0">;
+def ext_typecheck_cast_nonscalar : Extension<
+ "C99 forbids casting nonscalar type %0 to the same type">;
+def ext_typecheck_cast_to_union : Extension<"C99 forbids casts to union type">;
+def err_typecheck_cast_to_union_no_type : Error<
+ "cast to union type from type %0 not present in union">;
+def err_cast_pointer_from_non_pointer_int : Error<
+ "operand of type %0 cannot be cast to a pointer type">;
+def err_cast_pointer_to_non_pointer_int : Error<
+ "pointer cannot be cast to type %0">;
+def err_typecheck_expect_scalar_operand : Error<
+ "operand of type %0 where arithmetic or pointer type is required">;
+def err_typecheck_cond_incompatible_operands : Error<
+ "incompatible operand types (%0 and %1)">;
+def ext_typecheck_cond_incompatible_operands_nonstandard : ExtWarn<
+ "incompatible operand types (%0 and %1) use non-standard composite pointer "
+ "type %2">;
+def err_cast_selector_expr : Error<
+ "cannot type cast @selector expression">;
+def warn_typecheck_cond_incompatible_pointers : ExtWarn<
+ "pointer type mismatch (%0 and %1)">,
+ InGroup<DiagGroup<"pointer-type-mismatch">>;
+def warn_typecheck_cond_pointer_integer_mismatch : ExtWarn<
+ "pointer/integer type mismatch in conditional expression (%0 and %1)">,
+ InGroup<DiagGroup<"conditional-type-mismatch">>;
+def err_typecheck_choose_expr_requires_constant : Error<
+ "'__builtin_choose_expr' requires a constant expression">;
+def warn_unused_expr : Warning<"expression result unused">,
+ InGroup<UnusedValue>;
+def warn_unused_voidptr : Warning<
+ "expression result unused; should this cast be to 'void'?">,
+ InGroup<UnusedValue>;
+def warn_unused_property_expr : Warning<
+ "property access result unused - getters should not be used for side effects">,
+ InGroup<UnusedValue>;
+def warn_unused_container_subscript_expr : Warning<
+ "container access result unused - container access should not be used for side effects">,
+ InGroup<UnusedValue>;
+def warn_unused_call : Warning<
+ "ignoring return value of function declared with %0 attribute">,
+ InGroup<UnusedValue>;
+def warn_unused_result : Warning<
+ "ignoring return value of function declared with warn_unused_result "
+ "attribute">, InGroup<DiagGroup<"unused-result">>;
+def warn_unused_comparison : Warning<
+ "%select{equality|inequality}0 comparison result unused">,
+ InGroup<UnusedComparison>;
+def note_inequality_comparison_to_or_assign : Note<
+ "use '|=' to turn this inequality comparison into an or-assignment">;
+
+def err_incomplete_type_used_in_type_trait_expr : Error<
+ "incomplete type %0 used in type trait expression">;
+def err_type_trait_arity : Error<
+ "type trait requires %0%select{| or more}1 argument%select{|s}2; have "
+ "%3 argument%s3">;
+
+def err_dimension_expr_not_constant_integer : Error<
+ "dimension expression does not evaluate to a constant unsigned int">;
+def err_expected_ident_or_lparen : Error<"expected identifier or '('">;
+
+def err_typecheck_cond_incompatible_operands_null : Error<
+ "non-pointer operand type %0 incompatible with %select{NULL|nullptr}1">;
+} // End of general sema category.
+
+// inline asm.
+let CategoryName = "Inline Assembly Issue" in {
+ def err_asm_wide_character : Error<"wide string is invalid in 'asm'">;
+ def err_asm_invalid_lvalue_in_output : Error<"invalid lvalue in asm output">;
+ def err_asm_invalid_output_constraint : Error<
+ "invalid output constraint '%0' in asm">;
+ def err_asm_invalid_lvalue_in_input : Error<
+ "invalid lvalue in asm input for constraint '%0'">;
+ def err_asm_invalid_input_constraint : Error<
+ "invalid input constraint '%0' in asm">;
+ def err_asm_invalid_type_in_input : Error<
+ "invalid type %0 in asm input for constraint '%1'">;
+ def err_asm_tying_incompatible_types : Error<
+ "unsupported inline asm: input with type %0 matching output with type %1">;
+ def err_asm_unknown_register_name : Error<"unknown register name '%0' in asm">;
+ def warn_asm_label_on_auto_decl : Warning<
+ "ignored asm label '%0' on automatic variable">;
+ def err_invalid_asm_cast_lvalue : Error<
+ "invalid use of a cast in a inline asm context requiring an l-value: "
+ "remove the cast or build with -fheinous-gnu-extensions">;
+
+ def warn_invalid_asm_cast_lvalue : Warning<
+ "invalid use of a cast in a inline asm context requiring an l-value: "
+ "accepted due to -fheinous-gnu-extensions, but clang may remove support "
+ "for this in the future">;
+}
+
+let CategoryName = "Semantic Issue" in {
+
+def err_invalid_conversion_between_vectors : Error<
+ "invalid conversion between vector type %0 and %1 of different size">;
+def err_invalid_conversion_between_vector_and_integer : Error<
+ "invalid conversion between vector type %0 and integer type %1 "
+ "of different size">;
+
+def err_invalid_conversion_between_vector_and_scalar : Error<
+ "invalid conversion between vector type %0 and scalar type %1">;
+
+// C++ member initializers.
+def err_only_constructors_take_base_inits : Error<
+ "only constructors take base initializers">;
+
+def err_multiple_mem_initialization : Error <
+ "multiple initializations given for non-static member %0">;
+def err_multiple_mem_union_initialization : Error <
+ "initializing multiple members of union">;
+def err_multiple_base_initialization : Error <
+ "multiple initializations given for base %0">;
+
+def err_mem_init_not_member_or_class : Error<
+ "member initializer %0 does not name a non-static data member or base "
+ "class">;
+
+def warn_initializer_out_of_order : Warning<
+ "%select{field|base class}0 %1 will be initialized after "
+ "%select{field|base}2 %3">,
+ InGroup<Reorder>, DefaultIgnore;
+
+def err_base_init_does_not_name_class : Error<
+ "constructor initializer %0 does not name a class">;
+def err_base_init_direct_and_virtual : Error<
+ "base class initializer %0 names both a direct base class and an "
+ "inherited virtual base class">;
+def err_not_direct_base_or_virtual : Error<
+ "type %0 is not a direct or virtual base of %1">;
+
+def err_in_class_initializer_non_const : Error<
+ "non-const static data member must be initialized out of line">;
+def err_in_class_initializer_volatile : Error<
+ "static const volatile data member must be initialized out of line">;
+def err_in_class_initializer_bad_type : Error<
+ "static data member of type %0 must be initialized out of line">;
+def ext_in_class_initializer_float_type : ExtWarn<
+ "in-class initializer for static data member of type %0 is a GNU extension">,
+ InGroup<GNU>;
+def note_in_class_initializer_float_type_constexpr : Note<
+ "use 'constexpr' specifier to silence this warning">;
+def err_in_class_initializer_literal_type : Error<
+ "in-class initializer for static data member of type %0 requires "
+ "'constexpr' specifier">;
+def err_in_class_initializer_non_constant : Error<
+ "in-class initializer for static data member is not a constant expression">;
+
+def ext_in_class_initializer_non_constant : Extension<
+ "in-class initializer for static data member is not a constant expression; "
+ "folding it to a constant is a GNU extension">;
+
+// C++ anonymous unions and GNU anonymous structs/unions
+def ext_anonymous_union : Extension<
+ "anonymous unions are a C11 extension">, InGroup<C11>;
+def ext_gnu_anonymous_struct : Extension<
+ "anonymous structs are a GNU extension">, InGroup<GNU>;
+def ext_c11_anonymous_struct : Extension<
+ "anonymous structs are a C11 extension">, InGroup<C11>;
+def err_anonymous_union_not_static : Error<
+ "anonymous unions at namespace or global scope must be declared 'static'">;
+def err_anonymous_union_with_storage_spec : Error<
+ "anonymous union at class scope must not have a storage specifier">;
+def err_anonymous_struct_not_member : Error<
+ "anonymous %select{structs|structs and classes}0 must be "
+ "%select{struct or union|class}0 members">;
+def err_anonymous_union_member_redecl : Error<
+ "member of anonymous union redeclares %0">;
+def err_anonymous_struct_member_redecl : Error<
+ "member of anonymous struct redeclares %0">;
+def err_anonymous_record_with_type : Error<
+ "types cannot be declared in an anonymous %select{struct|union}0">;
+def ext_anonymous_record_with_type : Extension<
+ "types declared in an anonymous %select{struct|union}0 are a Microsoft "
+ "extension">, InGroup<Microsoft>;
+def err_anonymous_record_with_function : Error<
+ "functions cannot be declared in an anonymous %select{struct|union}0">;
+def err_anonymous_record_with_static : Error<
+ "static members cannot be declared in an anonymous %select{struct|union}0">;
+def err_anonymous_record_bad_member : Error<
+ "anonymous %select{struct|union}0 can only contain non-static data members">;
+def err_anonymous_record_nonpublic_member : Error<
+ "anonymous %select{struct|union}0 cannot contain a "
+ "%select{private|protected}1 data member">;
+def ext_ms_anonymous_struct : ExtWarn<
+ "anonymous structs are a Microsoft extension">, InGroup<Microsoft>;
+
+// C++ local classes
+def err_reference_to_local_var_in_enclosing_function : Error<
+ "reference to local variable %0 declared in enclosing function %1">;
+def err_reference_to_local_var_in_enclosing_block : Error<
+ "reference to local variable %0 declared in enclosing block literal">;
+def err_reference_to_local_var_in_enclosing_lambda : Error<
+ "reference to local variable %0 declared in enclosing lambda expression">;
+def err_reference_to_local_var_in_enclosing_context : Error<
+ "reference to local variable %0 declared in enclosing context">;
+
+def note_local_variable_declared_here : Note<
+ "%0 declared here">;
+def err_static_data_member_not_allowed_in_local_class : Error<
+ "static data member %0 not allowed in local class %1">;
+
+// C++ derived classes
+def err_base_clause_on_union : Error<"unions cannot have base classes">;
+def err_base_must_be_class : Error<"base specifier must name a class">;
+def err_union_as_base_class : Error<"unions cannot be base classes">;
+def err_incomplete_base_class : Error<"base class has incomplete type">;
+def err_duplicate_base_class : Error<
+ "base class %0 specified more than once as a direct base class">;
+// FIXME: better way to display derivation? Pass entire thing into diagclient?
+def err_ambiguous_derived_to_base_conv : Error<
+ "ambiguous conversion from derived class %0 to base class %1:%2">;
+def err_ambiguous_memptr_conv : Error<
+ "ambiguous conversion from pointer to member of %select{base|derived}0 "
+ "class %1 to pointer to member of %select{derived|base}0 class %2:%3">;
+
+def err_memptr_conv_via_virtual : Error<
+ "conversion from pointer to member of class %0 to pointer to member "
+ "of class %1 via virtual base %2 is not allowed">;
+
+// C++ member name lookup
+def err_ambiguous_member_multiple_subobjects : Error<
+ "non-static member %0 found in multiple base-class subobjects of type %1:%2">;
+def err_ambiguous_member_multiple_subobject_types : Error<
+ "member %0 found in multiple base classes of different types">;
+def note_ambiguous_member_found : Note<"member found by ambiguous name lookup">;
+def err_ambiguous_reference : Error<"reference to %0 is ambiguous">;
+def note_ambiguous_candidate : Note<"candidate found by name lookup is %q0">;
+def err_ambiguous_tag_hiding : Error<"a type named %0 is hidden by a "
+ "declaration in a different namespace">;
+def note_hidden_tag : Note<"type declaration hidden">;
+def note_hiding_object : Note<"declaration hides type">;
+
+// C++ operator overloading
+def err_operator_overload_needs_class_or_enum : Error<
+ "overloaded %0 must have at least one parameter of class "
+ "or enumeration type">;
+
+def err_operator_overload_variadic : Error<"overloaded %0 cannot be variadic">;
+def err_operator_overload_static : Error<
+ "overloaded %0 cannot be a static member function">;
+def err_operator_overload_default_arg : Error<
+ "parameter of overloaded %0 cannot have a default argument">;
+def err_operator_overload_must_be : Error<
+ "overloaded %0 must be a %select{unary|binary|unary or binary}2 operator "
+ "(has %1 parameter%s1)">;
+
+def err_operator_overload_must_be_member : Error<
+ "overloaded %0 must be a non-static member function">;
+def err_operator_overload_post_incdec_must_be_int : Error<
+ "parameter of overloaded post-%select{increment|decrement}1 operator must "
+ "have type 'int' (not %0)">;
+
+// C++ allocation and deallocation functions.
+def err_operator_new_delete_declared_in_namespace : Error<
+ "%0 cannot be declared inside a namespace">;
+def err_operator_new_delete_declared_static : Error<
+ "%0 cannot be declared static in global scope">;
+def err_operator_new_delete_invalid_result_type : Error<
+ "%0 must return type %1">;
+def err_operator_new_delete_dependent_result_type : Error<
+ "%0 cannot have a dependent return type; use %1 instead">;
+def err_operator_new_delete_too_few_parameters : Error<
+ "%0 must have at least one parameter.">;
+def err_operator_new_delete_template_too_few_parameters : Error<
+ "%0 template must have at least two parameters.">;
+
+def err_operator_new_dependent_param_type : Error<
+ "%0 cannot take a dependent type as first parameter; "
+ "use size_t (%1) instead">;
+def err_operator_new_param_type : Error<
+ "%0 takes type size_t (%1) as first parameter">;
+def err_operator_new_default_arg: Error<
+ "parameter of %0 cannot have a default argument">;
+def err_operator_delete_dependent_param_type : Error<
+ "%0 cannot take a dependent type as first parameter; use %1 instead">;
+def err_operator_delete_param_type : Error<
+ "first parameter of %0 must have type %1">;
+
+// C++ literal operators
+def err_literal_operator_outside_namespace : Error<
+ "literal operator %0 must be in a namespace or global scope">;
+def err_literal_operator_default_argument : Error<
+ "literal operator cannot have a default argument">;
+// FIXME: This diagnostic sucks
+def err_literal_operator_params : Error<
+ "parameter declaration for literal operator %0 is not valid">;
+def err_literal_operator_extern_c : Error<
+ "literal operator must have C++ linkage">;
+def warn_user_literal_reserved : Warning<
+ "user-defined literal suffixes not starting with '_' are reserved; "
+ "no literal will invoke this operator">,
+ InGroup<UserDefinedLiterals>;
+
+// C++ conversion functions
+def err_conv_function_not_member : Error<
+ "conversion function must be a non-static member function">;
+def err_conv_function_return_type : Error<
+ "conversion function cannot have a return type">;
+def err_conv_function_with_params : Error<
+ "conversion function cannot have any parameters">;
+def err_conv_function_variadic : Error<
+ "conversion function cannot be variadic">;
+def err_conv_function_to_array : Error<
+ "conversion function cannot convert to an array type">;
+def err_conv_function_to_function : Error<
+ "conversion function cannot convert to a function type">;
+def err_conv_function_with_complex_decl : Error<
+ "must use a typedef to declare a conversion to %0">;
+def err_conv_function_redeclared : Error<
+ "conversion function cannot be redeclared">;
+def warn_conv_to_self_not_used : Warning<
+ "conversion function converting %0 to itself will never be used">;
+def warn_conv_to_base_not_used : Warning<
+ "conversion function converting %0 to its base class %1 will never be used">;
+def warn_conv_to_void_not_used : Warning<
+ "conversion function converting %0 to %1 will never be used">;
+
+def warn_not_compound_assign : Warning<
+ "use of unary operator that may be intended as compound assignment (%0=)">;
+
+// C++11 explicit conversion operators
+def ext_explicit_conversion_functions : ExtWarn<
+ "explicit conversion functions are a C++11 extension">, InGroup<CXX11>;
+def warn_cxx98_compat_explicit_conversion_functions : Warning<
+ "explicit conversion functions are incompatible with C++98">,
+ InGroup<CXX98Compat>, DefaultIgnore;
+
+// C++11 defaulted functions
+def err_defaulted_default_ctor_params : Error<
+ "an explicitly-defaulted default constructor must have no parameters">;
+def err_defaulted_copy_ctor_params : Error<
+ "an explicitly-defaulted copy constructor must have exactly one parameter">;
+def err_defaulted_copy_ctor_volatile_param : Error<
+ "the parameter for an explicitly-defaulted copy constructor may not be "
+ "volatile">;
+def err_defaulted_copy_ctor_const_param : Error<
+ "the parameter for this explicitly-defaulted copy constructor is const, but "
+ "a member or base requires it to be non-const">;
+def err_defaulted_copy_assign_params : Error<
+ "an explicitly-defaulted copy assignment operator must have exactly one "
+ "parameter">;
+def err_defaulted_copy_assign_return_type : Error<
+ "an explicitly-defaulted copy assignment operator must return an unqualified "
+ "lvalue reference to its class type">;
+def err_defaulted_copy_assign_not_ref : Error<
+ "the parameter for an explicitly-defaulted copy assignment operator must be an "
+ "lvalue reference type">;
+def err_defaulted_copy_assign_volatile_param : Error<
+ "the parameter for an explicitly-defaulted copy assignment operator may not "
+ "be volatile">;
+def err_defaulted_copy_assign_const_param : Error<
+ "the parameter for this explicitly-defaulted copy assignment operator is "
+ "const, but a member or base requires it to be non-const">;
+def err_defaulted_copy_assign_quals : Error<
+ "an explicitly-defaulted copy assignment operator may not have 'const', "
+ "'constexpr' or 'volatile' qualifiers">;
+def err_defaulted_move_ctor_params : Error<
+ "an explicitly-defaulted move constructor must have exactly one parameter">;
+def err_defaulted_move_ctor_volatile_param : Error<
+ "the parameter for an explicitly-defaulted move constructor may not be "
+ "volatile">;
+def err_defaulted_move_ctor_const_param : Error<
+ "the parameter for an explicitly-defaulted move constructor may not be "
+ "const">;
+def err_defaulted_move_assign_params : Error<
+ "an explicitly-defaulted move assignment operator must have exactly one "
+ "parameter">;
+def err_defaulted_move_assign_return_type : Error<
+ "an explicitly-defaulted move assignment operator must return an unqualified "
+ "lvalue reference to its class type">;
+def err_defaulted_move_assign_not_ref : Error<
+ "the parameter for an explicitly-defaulted move assignment operator must be an "
+ "rvalue reference type">;
+def err_defaulted_move_assign_volatile_param : Error<
+ "the parameter for an explicitly-defaulted move assignment operator may not "
+ "be volatile">;
+def err_defaulted_move_assign_const_param : Error<
+ "the parameter for an explicitly-defaulted move assignment operator may not "
+ "be const">;
+def err_defaulted_move_assign_quals : Error<
+ "an explicitly-defaulted move assignment operator may not have 'const', "
+ "'constexpr' or 'volatile' qualifiers">;
+def err_incorrect_defaulted_exception_spec : Error<
+ "exception specification of explicitly defaulted %select{default constructor|"
+ "copy constructor|move constructor|copy assignment operator|move assignment "
+ "operator|destructor}0 does not match the "
+ "calculated one">;
+def err_incorrect_defaulted_constexpr : Error<
+ "defaulted definition of %select{default constructor|copy constructor|"
+ "move constructor}0 is not constexpr">;
+def err_out_of_line_default_deletes : Error<
+ "defaulting this %select{default constructor|copy constructor|move "
+ "constructor|copy assignment operator|move assignment operator|destructor}0 "
+ "would delete it after its first declaration">;
+
+def warn_ptr_arith_precedes_bounds : Warning<
+ "the pointer decremented by %0 refers before the beginning of the array">,
+ InGroup<DiagGroup<"array-bounds-pointer-arithmetic">>, DefaultIgnore;
+def warn_ptr_arith_exceeds_bounds : Warning<
+ "the pointer incremented by %0 refers past the end of the array (that "
+ "contains %1 element%s2)">,
+ InGroup<DiagGroup<"array-bounds-pointer-arithmetic">>, DefaultIgnore;
+def warn_array_index_precedes_bounds : Warning<
+ "array index %0 is before the beginning of the array">,
+ InGroup<DiagGroup<"array-bounds">>;
+def warn_array_index_exceeds_bounds : Warning<
+ "array index %0 is past the end of the array (which contains %1 "
+ "element%s2)">, InGroup<DiagGroup<"array-bounds">>;
+def note_array_index_out_of_bounds : Note<
+ "array %0 declared here">;
+
+def warn_printf_write_back : Warning<
+ "use of '%%n' in format string discouraged (potentially insecure)">,
+ InGroup<FormatSecurity>;
+def warn_printf_insufficient_data_args : Warning<
+ "more '%%' conversions than data arguments">, InGroup<Format>;
+def warn_printf_data_arg_not_used : Warning<
+ "data argument not used by format string">, InGroup<FormatExtraArgs>;
+def warn_format_invalid_conversion : Warning<
+ "invalid conversion specifier '%0'">, InGroup<FormatInvalidSpecifier>;
+def warn_printf_incomplete_specifier : Warning<
+ "incomplete format specifier">, InGroup<Format>;
+def warn_missing_format_string : Warning<
+ "format string missing">, InGroup<Format>;
+def warn_scanf_nonzero_width : Warning<
+ "zero field width in scanf format string is unused">,
+ InGroup<Format>;
+def warn_printf_conversion_argument_type_mismatch : Warning<
+ "format specifies type %0 but the argument has type %1">,
+ InGroup<Format>;
+def warn_printf_positional_arg_exceeds_data_args : Warning <
+ "data argument position '%0' exceeds the number of data arguments (%1)">,
+ InGroup<Format>;
+def warn_format_zero_positional_specifier : Warning<
+ "position arguments in format strings start counting at 1 (not 0)">,
+ InGroup<Format>;
+def warn_format_invalid_positional_specifier : Warning<
+ "invalid position specified for %select{field width|field precision}0">,
+ InGroup<Format>;
+def warn_format_mix_positional_nonpositional_args : Warning<
+ "cannot mix positional and non-positional arguments in format string">,
+ InGroup<Format>;
+def warn_static_array_too_small : Warning<
+ "array argument is too small; contains %0 elements, callee requires at least %1">,
+ InGroup<DiagGroup<"array-bounds">>;
+def note_callee_static_array : Note<
+ "callee declares array parameter as static here">;
+def warn_empty_format_string : Warning<
+ "format string is empty">, InGroup<FormatZeroLength>;
+def warn_format_string_is_wide_literal : Warning<
+ "format string should not be a wide string">, InGroup<Format>;
+def warn_printf_format_string_contains_null_char : Warning<
+ "format string contains '\\0' within the string body">, InGroup<Format>;
+def warn_printf_asterisk_missing_arg : Warning<
+ "'%select{*|.*}0' specified field %select{width|precision}0 is missing a matching 'int' argument">;
+def warn_printf_asterisk_wrong_type : Warning<
+ "field %select{width|precision}0 should have type %1, but argument has type %2">,
+ InGroup<Format>;
+def warn_printf_nonsensical_optional_amount: Warning<
+ "%select{field width|precision}0 used with '%1' conversion specifier, resulting in undefined behavior">,
+ InGroup<Format>;
+def warn_printf_nonsensical_flag: Warning<
+ "flag '%0' results in undefined behavior with '%1' conversion specifier">,
+ InGroup<Format>;
+def warn_format_nonsensical_length: Warning<
+ "length modifier '%0' results in undefined behavior or no effect with '%1' conversion specifier">,
+ InGroup<Format>;
+def warn_format_non_standard_positional_arg: ExtWarn<
+ "positional arguments are not supported by ISO C">, InGroup<FormatNonStandard>, DefaultIgnore;
+def warn_format_non_standard: ExtWarn<
+ "'%0' %select{length modifier|conversion specifier}1 is not supported by ISO C">,
+ InGroup<FormatNonStandard>, DefaultIgnore;
+def warn_format_non_standard_conversion_spec: ExtWarn<
+ "using length modifier '%0' with conversion specifier '%1' is not supported by ISO C">,
+ InGroup<FormatNonStandard>, DefaultIgnore;
+def warn_printf_ignored_flag: Warning<
+ "flag '%0' is ignored when flag '%1' is present">,
+ InGroup<Format>;
+def warn_scanf_scanlist_incomplete : Warning<
+ "no closing ']' for '%%[' in scanf format string">,
+ InGroup<Format>;
+def note_format_string_defined : Note<"format string is defined here">;
+
+def warn_null_arg : Warning<
+ "null passed to a callee which requires a non-null argument">,
+ InGroup<NonNull>;
+
+// CHECK: returning address/reference of stack memory
+def warn_ret_stack_addr : Warning<
+ "address of stack memory associated with local variable %0 returned">,
+ InGroup<DiagGroup<"return-stack-address">>;
+def warn_ret_stack_ref : Warning<
+ "reference to stack memory associated with local variable %0 returned">,
+ InGroup<DiagGroup<"return-stack-address">>;
+def warn_ret_local_temp_addr : Warning<
+ "returning address of local temporary object">,
+ InGroup<DiagGroup<"return-stack-address">>;
+def warn_ret_local_temp_ref : Warning<
+ "returning reference to local temporary object">,
+ InGroup<DiagGroup<"return-stack-address">>;
+def warn_ret_addr_label : Warning<
+ "returning address of label, which is local">,
+ InGroup<DiagGroup<"return-stack-address">>;
+def err_ret_local_block : Error<
+ "returning block that lives on the local stack">;
+def note_ref_var_local_bind : Note<
+ "binding reference variable %0 here">;
+
+// Check for initializing a member variable with the address or a reference to
+// a constructor parameter.
+def warn_bind_ref_member_to_parameter : Warning<
+ "binding reference member %0 to stack allocated parameter %1">,
+ InGroup<DiagGroup<"dangling-field">>;
+def warn_init_ptr_member_to_parameter_addr : Warning<
+ "initializing pointer member %0 with the stack address of parameter %1">,
+ InGroup<DiagGroup<"dangling-field">>;
+def warn_bind_ref_member_to_temporary : Warning<
+ "binding reference member %0 to a temporary value">,
+ InGroup<DiagGroup<"dangling-field">>;
+def note_ref_or_ptr_member_declared_here : Note<
+ "%select{reference|pointer}0 member declared here">;
+
+// For non-floating point, expressions of the form x == x or x != x
+// should result in a warning, since these always evaluate to a constant.
+// Array comparisons have similar warnings
+def warn_comparison_always : Warning<
+ "%select{self-|array }0comparison always evaluates to %select{false|true|a constant}1">,
+ InGroup<TautologicalCompare>;
+
+def warn_stringcompare : Warning<
+ "result of comparison against %select{a string literal|@encode}0 is "
+ "unspecified (use strncmp instead)">,
+ InGroup<DiagGroup<"string-compare">>;
+
+// Generic selections.
+def err_assoc_type_incomplete : Error<
+ "type %0 in generic association incomplete">;
+def err_assoc_type_nonobject : Error<
+ "type %0 in generic association not an object type">;
+def err_assoc_type_variably_modified : Error<
+ "type %0 in generic association is a variably modified type">;
+def err_assoc_compatible_types : Error<
+ "type %0 in generic association compatible with previously specified type %1">;
+def note_compat_assoc : Note<
+ "compatible type %0 specified here">;
+def err_generic_sel_no_match : Error<
+ "controlling expression type %0 not compatible with any generic association type">;
+def err_generic_sel_multi_match : Error<
+ "controlling expression type %0 compatible with %1 generic association types">;
+
+
+// Blocks
+def err_blocks_disable : Error<"blocks support disabled - compile with -fblocks"
+ " or pick a deployment target that supports them">;
+def err_block_returning_array_function : Error<
+ "block cannot return %select{array|function}0 type %1">;
+
+// Builtin annotation string.
+def err_builtin_annotation_not_string_constant : Error<
+ "__builtin_annotation requires a non wide string constant">;
+
+// CFString checking
+def err_cfstring_literal_not_string_constant : Error<
+ "CFString literal is not a string constant">,
+ InGroup<DiagGroup<"CFString-literal">>;
+def warn_cfstring_truncated : Warning<
+ "input conversion stopped due to an input byte that does not "
+ "belong to the input codeset UTF-8">,
+ InGroup<DiagGroup<"CFString-literal">>;
+
+// Statements.
+def err_continue_not_in_loop : Error<
+ "'continue' statement not in loop statement">;
+def err_break_not_in_loop_or_switch : Error<
+ "'break' statement not in loop or switch statement">;
+def err_default_not_in_switch : Error<
+ "'default' statement not in switch statement">;
+def err_case_not_in_switch : Error<"'case' statement not in switch statement">;
+def warn_bool_switch_condition : Warning<
+ "switch condition has boolean value">;
+def warn_case_value_overflow : Warning<
+ "overflow converting case value to switch condition type (%0 to %1)">,
+ InGroup<DiagGroup<"switch">>;
+def err_duplicate_case : Error<"duplicate case value '%0'">;
+def warn_case_empty_range : Warning<"empty case range specified">;
+def warn_missing_case_for_condition :
+ Warning<"no case matching constant switch condition '%0'">;
+
+def warn_def_missing_case1 : Warning<
+ "enumeration value %0 not explicitly handled in switch">,
+ InGroup<SwitchEnum>, DefaultIgnore;
+def warn_def_missing_case2 : Warning<
+ "enumeration values %0 and %1 not explicitly handled in switch">,
+ InGroup<SwitchEnum>, DefaultIgnore;
+def warn_def_missing_case3 : Warning<
+ "enumeration values %0, %1, and %2 not explicitly handled in switch">,
+ InGroup<SwitchEnum>, DefaultIgnore;
+def warn_def_missing_cases : Warning<
+ "%0 enumeration values not explicitly handled in switch: %1, %2, %3...">,
+ InGroup<SwitchEnum>, DefaultIgnore;
+
+def warn_missing_case1 : Warning<"enumeration value %0 not handled in switch">,
+ InGroup<Switch>;
+def warn_missing_case2 : Warning<
+ "enumeration values %0 and %1 not handled in switch">,
+ InGroup<Switch>;
+def warn_missing_case3 : Warning<
+ "enumeration values %0, %1, and %2 not handled in switch">,
+ InGroup<Switch>;
+def warn_missing_cases : Warning<
+ "%0 enumeration values not handled in switch: %1, %2, %3...">,
+ InGroup<Switch>;
+
+def warn_unreachable_default : Warning<
+ "default label in switch which covers all enumeration values">,
+ InGroup<CoveredSwitchDefault>, DefaultIgnore;
+def warn_not_in_enum : Warning<"case value not in enumerated type %0">,
+ InGroup<Switch>;
+def err_typecheck_statement_requires_scalar : Error<
+ "statement requires expression of scalar type (%0 invalid)">;
+def err_typecheck_statement_requires_integer : Error<
+ "statement requires expression of integer type (%0 invalid)">;
+def err_multiple_default_labels_defined : Error<
+ "multiple default labels in one switch">;
+def err_switch_multiple_conversions : Error<
+ "multiple conversions from switch condition type %0 to an integral or "
+ "enumeration type">;
+def note_switch_conversion : Note<
+ "conversion to %select{integral|enumeration}0 type %1">;
+def err_switch_explicit_conversion : Error<
+ "switch condition type %0 requires explicit conversion to %1">;
+def err_switch_incomplete_class_type : Error<
+ "switch condition has incomplete class type %0">;
+
+def warn_empty_if_body : Warning<
+ "if statement has empty body">, InGroup<EmptyBody>;
+def warn_empty_for_body : Warning<
+ "for loop has empty body">, InGroup<EmptyBody>;
+def warn_empty_range_based_for_body : Warning<
+ "range-based for loop has empty body">, InGroup<EmptyBody>;
+def warn_empty_while_body : Warning<
+ "while loop has empty body">, InGroup<EmptyBody>;
+def warn_empty_switch_body : Warning<
+ "switch statement has empty body">, InGroup<EmptyBody>;
+def note_empty_body_on_separate_line : Note<
+ "put the semicolon on a separate line to silence this warning">,
+ InGroup<EmptyBody>;
+
+def err_va_start_used_in_non_variadic_function : Error<
+ "'va_start' used in function with fixed args">;
+def warn_second_parameter_of_va_start_not_last_named_argument : Warning<
+ "second parameter of 'va_start' not last named argument">;
+def err_first_argument_to_va_arg_not_of_type_va_list : Error<
+ "first argument to 'va_arg' is of type %0 and not 'va_list'">;
+def err_second_parameter_to_va_arg_incomplete: Error<
+ "second argument to 'va_arg' is of incomplete type %0">;
+def err_second_parameter_to_va_arg_abstract: Error<
+ "second argument to 'va_arg' is of abstract type %0">;
+def warn_second_parameter_to_va_arg_not_pod : Warning<
+ "second argument to 'va_arg' is of non-POD type %0">,
+ InGroup<DiagGroup<"non-pod-varargs">>, DefaultError;
+def warn_second_parameter_to_va_arg_ownership_qualified : Warning<
+ "second argument to 'va_arg' is of ARC ownership-qualified type %0">,
+ InGroup<DiagGroup<"non-pod-varargs">>, DefaultError;
+def warn_second_parameter_to_va_arg_never_compatible : Warning<
+ "second argument to 'va_arg' is of promotable type %0; this va_arg has "
+ "undefined behavior because arguments will be promoted to %1">;
+
+def warn_return_missing_expr : Warning<
+ "non-void %select{function|method}1 %0 should return a value">, DefaultError,
+ InGroup<ReturnType>;
+def ext_return_missing_expr : ExtWarn<
+ "non-void %select{function|method}1 %0 should return a value">, DefaultError,
+ InGroup<ReturnType>;
+def ext_return_has_expr : ExtWarn<
+ "%select{void function|void method|constructor|destructor}1 %0 "
+ "should not return a value">,
+ DefaultError, InGroup<ReturnType>;
+def ext_return_has_void_expr : Extension<
+ "void %select{function|method|block}1 %0 should not return void expression">;
+def err_return_init_list : Error<
+ "%select{void function|void method|constructor|destructor}1 %0 "
+ "must not return a value">;
+def warn_noreturn_function_has_return_expr : Warning<
+ "function %0 declared 'noreturn' should not return">,
+ InGroup<DiagGroup<"invalid-noreturn">>;
+def warn_falloff_noreturn_function : Warning<
+ "function declared 'noreturn' should not return">,
+ InGroup<DiagGroup<"invalid-noreturn">>;
+def err_noreturn_block_has_return_expr : Error<
+ "block declared 'noreturn' should not return">;
+def err_block_on_nonlocal : Error<
+ "__block attribute not allowed, only allowed on local variables">;
+def err_block_on_vm : Error<
+ "__block attribute not allowed on declaration with a variably modified type">;
+
+def err_shufflevector_non_vector : Error<
+ "first two arguments to __builtin_shufflevector must be vectors">;
+def err_shufflevector_incompatible_vector : Error<
+ "first two arguments to __builtin_shufflevector must have the same type">;
+def err_shufflevector_nonconstant_argument : Error<
+ "index for __builtin_shufflevector must be a constant integer">;
+def err_shufflevector_argument_too_large : Error<
+ "index for __builtin_shufflevector must be less than the total number "
+ "of vector elements">;
+
+def err_vector_incorrect_num_initializers : Error<
+ "%select{too many|too few}0 elements in vector initialization (expected %1 elements, have %2)">;
+def err_altivec_empty_initializer : Error<"expected initializer">;
+
+def err_invalid_neon_type_code : Error<
+ "incompatible constant for this __builtin_neon function">;
+def err_argument_invalid_range : Error<
+ "argument should be a value from %0 to %1">;
+
+def err_builtin_longjmp_invalid_val : Error<
+ "argument to __builtin_longjmp must be a constant 1">;
+
+def err_constant_integer_arg_type : Error<
+ "argument to %0 must be a constant integer">;
+
+def ext_mixed_decls_code : Extension<
+ "ISO C90 forbids mixing declarations and code">,
+ InGroup<DiagGroup<"declaration-after-statement">>;
+
+def err_non_variable_decl_in_for : Error<
+ "declaration of non-local variable in 'for' loop">;
+def err_toomany_element_decls : Error<
+ "only one element declaration is allowed">;
+def err_selector_element_not_lvalue : Error<
+ "selector element is not a valid lvalue">;
+def err_selector_element_type : Error<
+ "selector element type %0 is not a valid object">;
+def err_collection_expr_type : Error<
+ "collection expression type %0 is not a valid object">;
+def warn_collection_expr_type : Warning<
+ "collection expression type %0 may not respond to %1">;
+
+def err_invalid_conversion_between_ext_vectors : Error<
+ "invalid conversion between ext-vector type %0 and %1">;
+
+// Type
+def ext_invalid_sign_spec : Extension<"'%0' cannot be signed or unsigned">;
+def warn_receiver_forward_class : Warning<
+ "receiver %0 is a forward class and corresponding @interface may not exist">;
+def note_method_sent_forward_class : Note<"method %0 is used for the forward class">;
+def ext_missing_declspec : ExtWarn<
+ "declaration specifier missing, defaulting to 'int'">;
+def ext_missing_type_specifier : ExtWarn<
+ "type specifier missing, defaults to 'int'">,
+ InGroup<ImplicitInt>;
+def err_decimal_unsupported : Error<
+ "GNU decimal type extension not supported">;
+def err_missing_type_specifier : Error<
+ "C++ requires a type specifier for all declarations">;
+def err_objc_array_of_interfaces : Error<
+ "array of interface %0 is invalid (probably should be an array of pointers)">;
+def ext_c99_array_usage : Extension<
+ "%select{qualifier in |static |}0array size %select{||'[*] '}0is a C99 "
+ "feature">, InGroup<C99>;
+def err_c99_array_usage_cxx : Error<
+ "%select{qualifier in |static |}0array size %select{||'[*] '}0is a C99 "
+ "feature, not permitted in C++">;
+def err_double_requires_fp64 : Error<
+ "use of type 'double' requires cl_khr_fp64 extension to be enabled">;
+def err_nsconsumed_attribute_mismatch : Error<
+ "overriding method has mismatched ns_consumed attribute on its"
+ " parameter">;
+def err_nsreturns_retained_attribute_mismatch : Error<
+ "overriding method has mismatched ns_returns_%select{not_retained|retained}0"
+ " attributes">;
+
+def note_getter_unavailable : Note<
+ "or because setter is declared here, but no getter method %0 is found">;
+def err_invalid_protocol_qualifiers : Error<
+ "invalid protocol qualifiers on non-ObjC type">;
+def warn_ivar_use_hidden : Warning<
+ "local declaration of %0 hides instance variable">,
+ InGroup<DiagGroup<"shadow-ivar">>;
+def error_ivar_use_in_class_method : Error<
+ "instance variable %0 accessed in class method">;
+def error_implicit_ivar_access : Error<
+ "instance variable %0 cannot be accessed because 'self' has been redeclared">;
+def error_private_ivar_access : Error<"instance variable %0 is private">,
+ AccessControl;
+def error_protected_ivar_access : Error<"instance variable %0 is protected">,
+ AccessControl;
+def warn_maynot_respond : Warning<"%0 may not respond to %1">;
+def warn_attribute_method_def : Warning<
+ "attributes on method implementation and its declaration must match">,
+ InGroup<DiagGroup<"mismatched-method-attributes">>;
+def ext_typecheck_base_super : Warning<
+ "method parameter type %0 does not match "
+ "super class method parameter type %1">, InGroup<SuperSubClassMismatch>, DefaultIgnore;
+def warn_missing_method_return_type : Warning<
+ "method has no return type specified; defaults to 'id'">,
+ InGroup<MissingMethodReturnType>, DefaultIgnore;
+
+// Spell-checking diagnostics
+def err_unknown_typename_suggest : Error<
+ "unknown type name %0; did you mean %1?">;
+def err_unknown_nested_typename_suggest : Error<
+ "no type named %0 in %1; did you mean %2?">;
+def err_no_member_suggest : Error<"no member named %0 in %1; did you mean %2?">;
+def err_undeclared_use_suggest : Error<
+ "use of undeclared %0; did you mean %1?">;
+def err_undeclared_var_use_suggest : Error<
+ "use of undeclared identifier %0; did you mean %1?">;
+def err_no_template_suggest : Error<"no template named %0; did you mean %1?">;
+def err_no_member_template_suggest : Error<
+ "no template named %0 in %1; did you mean %2?">;
+def err_mem_init_not_member_or_class_suggest : Error<
+ "initializer %0 does not name a non-static data member or base "
+ "class; did you mean the %select{base class|member}1 %2?">;
+def err_field_designator_unknown_suggest : Error<
+ "field designator %0 does not refer to any field in type %1; did you mean "
+ "%2?">;
+def err_typecheck_member_reference_ivar_suggest : Error<
+ "%0 does not have a member named %1; did you mean %2?">;
+def err_property_not_found_suggest : Error<
+ "property %0 not found on object of type %1; did you mean %2?">;
+def err_ivar_access_using_property_syntax_suggest : Error<
+ "property %0 not found on object of type %1; did you mean to access ivar %2?">;
+def err_property_found_suggest : Error<
+ "property %0 found on object of type %1; did you mean to access "
+ "it with the \".\" operator?">;
+def err_undef_interface_suggest : Error<
+ "cannot find interface declaration for %0; did you mean %1?">;
+def warn_undef_interface_suggest : Warning<
+ "cannot find interface declaration for %0; did you mean %1?">;
+def err_undef_superclass_suggest : Error<
+ "cannot find interface declaration for %0, superclass of %1; did you mean "
+ "%2?">;
+def err_undeclared_protocol_suggest : Error<
+ "cannot find protocol declaration for %0; did you mean %1?">;
+def note_base_class_specified_here : Note<
+ "base class %0 specified here">;
+def err_using_directive_suggest : Error<
+ "no namespace named %0; did you mean %1?">;
+def err_using_directive_member_suggest : Error<
+ "no namespace named %0 in %1; did you mean %2?">;
+def note_namespace_defined_here : Note<"namespace %0 defined here">;
+def err_sizeof_pack_no_pack_name_suggest : Error<
+ "%0 does not refer to the name of a parameter pack; did you mean %1?">;
+def note_parameter_pack_here : Note<"parameter pack %0 declared here">;
+
+def err_uncasted_use_of_unknown_any : Error<
+ "%0 has unknown type; cast it to its declared type to use it">;
+def err_uncasted_call_of_unknown_any : Error<
+ "%0 has unknown return type; cast the call to its declared return type">;
+def err_uncasted_send_to_unknown_any_method : Error<
+ "no known method %select{%objcinstance1|%objcclass1}0; cast the "
+ "message send to the method's return type">;
+def err_unsupported_unknown_any_decl : Error<
+ "%0 has unknown type, which is unsupported for this kind of declaration">;
+def err_unsupported_unknown_any_expr : Error<
+ "unsupported expression with unknown type">;
+def err_unsupported_unknown_any_call : Error<
+ "call to unsupported expression with unknown type">;
+def err_unknown_any_addrof : Error<
+ "the address of a declaration with unknown type "
+ "can only be cast to a pointer type">;
+def err_unknown_any_var_function_type : Error<
+ "variable %0 with unknown type cannot be given a function type">;
+def err_unknown_any_function : Error<
+ "function %0 with unknown type must be given a function type">;
+
+def err_filter_expression_integral : Error<
+ "filter expression type should be an integral value not %0">;
+
+// OpenCL warnings and errors.
+def err_invalid_astype_of_different_size : Error<
+ "invalid reinterpretation: sizes of %0 and %1 must match">;
+
+} // end of sema category
+
+let CategoryName = "Related Result Type Issue" in {
+// Objective-C related result type compatibility
+def warn_related_result_type_compatibility_class : Warning<
+ "method is expected to return an instance of its class type %0, but "
+ "is declared to return %1">;
+def warn_related_result_type_compatibility_protocol : Warning<
+ "protocol method is expected to return an instance of the implementing "
+ "class, but is declared to return %0">;
+def note_related_result_type_overridden_family : Note<
+ "overridden method is part of the '%select{|alloc|copy|init|mutableCopy|"
+ "new|autorelease|dealloc|finalize|release|retain|retainCount|self}0' method "
+ "family">;
+def note_related_result_type_overridden : Note<
+ "overridden method returns an instance of its class type">;
+def note_related_result_type_inferred : Note<
+ "%select{class|instance}0 method %1 is assumed to return an instance of "
+ "its receiver type (%2)">;
+
+}
+
+let CategoryName = "Modules Issue" in {
+def err_module_private_specialization : Error<
+ "%select{template|partial|member}0 specialization cannot be "
+ "declared __module_private__">;
+def err_module_private_local : Error<
+ "%select{local variable|parameter|typedef}0 %1 cannot be declared "
+ "__module_private__">;
+def err_module_private_local_class : Error<
+ "local %select{struct|union|class|enum}0 cannot be declared "
+ "__module_private__">;
+def err_module_private_definition : Error<
+ "definition of %0 must be imported before it is required">;
+}
+
+} // end of sema component.
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
new file mode 100644
index 0000000..7f9fe26
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/DiagnosticSerializationKinds.td
@@ -0,0 +1,60 @@
+//==--- DiagnosticSerializationKinds.td - serialization diagnostics -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+let Component = "Serialization" in {
+
+def err_fe_unable_to_read_pch_file : Error<
+ "unable to read PCH file: '%0'">;
+def err_fe_not_a_pch_file : Error<
+ "input is not a PCH file: '%0'">;
+def err_fe_pch_malformed : Error<
+ "malformed or corrupted PCH file: '%0'">, DefaultFatal;
+def err_fe_pch_malformed_block : Error<
+ "malformed block record in PCH file: '%0'">, DefaultFatal;
+def err_fe_pch_error_at_end_block : Error<
+ "error at end of module block in PCH file: '%0'">, DefaultFatal;
+def err_fe_pch_file_modified : Error<
+ "file '%0' has been modified since the precompiled header was built">,
+ DefaultFatal;
+
+def warn_pch_target_triple : Error<
+ "PCH file was compiled for the target '%0' but the current translation "
+ "unit is being compiled for target '%1'">;
+def err_pch_langopt_mismatch : Error<"%0 was %select{disabled|enabled}1 in "
+ "PCH file but is currently %select{disabled|enabled}2">;
+def err_pch_langopt_value_mismatch : Error<
+ "%0 differs in PCH file vs. current file">;
+
+def warn_pch_version_too_old : Error<
+ "PCH file uses an older PCH format that is no longer supported">;
+def warn_pch_version_too_new : Error<
+ "PCH file uses a newer PCH format that cannot be read">;
+def warn_pch_different_branch : Error<
+ "PCH file built from a different branch (%0) than the compiler (%1)">;
+def err_pch_with_compiler_errors : Error<
+ "PCH file contains compiler errors">;
+def warn_cmdline_conflicting_macro_def : Error<
+ "definition of the macro '%0' conflicts with the definition used to "
+ "build the precompiled header">;
+def note_pch_macro_defined_as : Note<
+ "definition of macro '%0' in the precompiled header">;
+def warn_cmdline_missing_macro_defs : Warning<
+ "macro definitions used to build the precompiled header are missing">;
+def note_using_macro_def_from_pch : Note<
+ "using this macro definition from precompiled header">;
+def warn_macro_name_used_in_pch : Error<
+ "definition of macro %0 conflicts with an identifier used in the "
+ "precompiled header">;
+def warn_pch_compiler_options_mismatch : Error<
+ "compiler options used when building the precompiled header differ from "
+ "the options used when using the precompiled header">;
+
+def err_not_a_pch_file : Error<
+ "'%0' does not appear to be a precompiled header file">, DefaultFatal;
+}
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h b/contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h
new file mode 100644
index 0000000..98cfd29
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ExceptionSpecificationType.h
@@ -0,0 +1,40 @@
+//===--- ExceptionSpecificationType.h ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ExceptionSpecificationType enumeration and various
+// utility functions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_EXCEPTIONSPECIFICATIONTYPE_H
+#define LLVM_CLANG_BASIC_EXCEPTIONSPECIFICATIONTYPE_H
+
+namespace clang {
+
+/// \brief The various types of exception specifications that exist in C++0x.
+enum ExceptionSpecificationType {
+ EST_None, ///< no exception specification
+ EST_DynamicNone, ///< throw()
+ EST_Dynamic, ///< throw(T1, T2)
+ EST_MSAny, ///< Microsoft throw(...) extension
+ EST_BasicNoexcept, ///< noexcept
+ EST_ComputedNoexcept, ///< noexcept(expression)
+ EST_Delayed ///< not known yet
+};
+
+inline bool isDynamicExceptionSpec(ExceptionSpecificationType ESpecType) {
+ return ESpecType >= EST_DynamicNone && ESpecType <= EST_MSAny;
+}
+
+inline bool isNoexceptExceptionSpec(ExceptionSpecificationType ESpecType) {
+ return ESpecType == EST_BasicNoexcept || ESpecType == EST_ComputedNoexcept;
+}
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_EXCEPTIONSPECIFICATIONTYPE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h b/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h
new file mode 100644
index 0000000..c4e6a1c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/ExpressionTraits.h
@@ -0,0 +1,25 @@
+//===- ExpressionTraits.h - C++ Expression Traits Support Enums -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines enumerations for expression traits intrinsics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EXPRESSIONTRAITS_H
+#define LLVM_CLANG_EXPRESSIONTRAITS_H
+
+namespace clang {
+
+ enum ExpressionTrait {
+ ET_IsLValueExpr,
+ ET_IsRValueExpr
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
new file mode 100644
index 0000000..5c7d9eb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileManager.h
@@ -0,0 +1,234 @@
+//===--- FileManager.h - File System Probing and Caching --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FileManager interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FILEMANAGER_H
+#define LLVM_CLANG_FILEMANAGER_H
+
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/Allocator.h"
+// FIXME: Enhance libsystem to support inode and other fields in stat.
+#include <sys/types.h>
+
+#ifdef _MSC_VER
+typedef unsigned short mode_t;
+#endif
+
+struct stat;
+
+namespace llvm {
+class MemoryBuffer;
+namespace sys { class Path; }
+}
+
+namespace clang {
+class FileManager;
+class FileSystemStatCache;
+
+/// DirectoryEntry - Cached information about one directory (either on
+/// the disk or in the virtual file system).
+///
+class DirectoryEntry {
+ const char *Name; // Name of the directory.
+ friend class FileManager;
+public:
+ DirectoryEntry() : Name(0) {}
+ const char *getName() const { return Name; }
+};
+
+/// FileEntry - Cached information about one file (either on the disk
+/// or in the virtual file system). If the 'FD' member is valid, then
+/// this FileEntry has an open file descriptor for the file.
+///
+class FileEntry {
+ const char *Name; // Name of the file.
+ off_t Size; // File size in bytes.
+ time_t ModTime; // Modification time of file.
+ const DirectoryEntry *Dir; // Directory file lives in.
+ unsigned UID; // A unique (small) ID for the file.
+ dev_t Device; // ID for the device containing the file.
+ ino_t Inode; // Inode number for the file.
+ mode_t FileMode; // The file mode as returned by 'stat'.
+
+ /// FD - The file descriptor for the file entry if it is opened and owned
+ /// by the FileEntry. If not, this is set to -1.
+ mutable int FD;
+ friend class FileManager;
+
+public:
+ FileEntry(dev_t device, ino_t inode, mode_t m)
+ : Name(0), Device(device), Inode(inode), FileMode(m), FD(-1) {}
+ // Add a default constructor for use with llvm::StringMap
+ FileEntry() : Name(0), Device(0), Inode(0), FileMode(0), FD(-1) {}
+
+ FileEntry(const FileEntry &FE) {
+ memcpy(this, &FE, sizeof(FE));
+ assert(FD == -1 && "Cannot copy a file-owning FileEntry");
+ }
+
+ void operator=(const FileEntry &FE) {
+ memcpy(this, &FE, sizeof(FE));
+ assert(FD == -1 && "Cannot assign a file-owning FileEntry");
+ }
+
+ ~FileEntry();
+
+ const char *getName() const { return Name; }
+ off_t getSize() const { return Size; }
+ unsigned getUID() const { return UID; }
+ ino_t getInode() const { return Inode; }
+ dev_t getDevice() const { return Device; }
+ time_t getModificationTime() const { return ModTime; }
+ mode_t getFileMode() const { return FileMode; }
+
+ /// getDir - Return the directory the file lives in.
+ ///
+ const DirectoryEntry *getDir() const { return Dir; }
+
+ bool operator<(const FileEntry &RHS) const {
+ return Device < RHS.Device || (Device == RHS.Device && Inode < RHS.Inode);
+ }
+};
+
+/// FileManager - Implements support for file system lookup, file system
+/// caching, and directory search management. This also handles more advanced
+/// properties, such as uniquing files based on "inode", so that a file with two
+/// names (e.g. symlinked) will be treated as a single file.
+///
+class FileManager : public RefCountedBase<FileManager> {
+ FileSystemOptions FileSystemOpts;
+
+ class UniqueDirContainer;
+ class UniqueFileContainer;
+
+ /// UniqueRealDirs/UniqueRealFiles - Cache for existing real
+ /// directories/files.
+ ///
+ UniqueDirContainer &UniqueRealDirs;
+ UniqueFileContainer &UniqueRealFiles;
+
+ /// \brief The virtual directories that we have allocated. For each
+ /// virtual file (e.g. foo/bar/baz.cpp), we add all of its parent
+ /// directories (foo/ and foo/bar/) here.
+ SmallVector<DirectoryEntry*, 4> VirtualDirectoryEntries;
+ /// \brief The virtual files that we have allocated.
+ SmallVector<FileEntry*, 4> VirtualFileEntries;
+
+ /// SeenDirEntries/SeenFileEntries - This is a cache that maps paths
+ /// to directory/file entries (either real or virtual) we have
+ /// looked up. The actual Entries for real directories/files are
+ /// owned by UniqueRealDirs/UniqueRealFiles above, while the Entries
+ /// for virtual directories/files are owned by
+ /// VirtualDirectoryEntries/VirtualFileEntries above.
+ ///
+ llvm::StringMap<DirectoryEntry*, llvm::BumpPtrAllocator> SeenDirEntries;
+ llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator> SeenFileEntries;
+
+ /// NextFileUID - Each FileEntry we create is assigned a unique ID #.
+ ///
+ unsigned NextFileUID;
+
+ // Statistics.
+ unsigned NumDirLookups, NumFileLookups;
+ unsigned NumDirCacheMisses, NumFileCacheMisses;
+
+ // Caching.
+ OwningPtr<FileSystemStatCache> StatCache;
+
+ bool getStatValue(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor);
+
+ /// Add all ancestors of the given path (pointing to either a file
+ /// or a directory) as virtual directories.
+ void addAncestorsAsVirtualDirs(StringRef Path);
+
+public:
+ FileManager(const FileSystemOptions &FileSystemOpts);
+ ~FileManager();
+
+ /// \brief Installs the provided FileSystemStatCache object within
+ /// the FileManager.
+ ///
+ /// Ownership of this object is transferred to the FileManager.
+ ///
+ /// \param statCache the new stat cache to install. Ownership of this
+ /// object is transferred to the FileManager.
+ ///
+ /// \param AtBeginning whether this new stat cache must be installed at the
+ /// beginning of the chain of stat caches. Otherwise, it will be added to
+ /// the end of the chain.
+ void addStatCache(FileSystemStatCache *statCache, bool AtBeginning = false);
+
+ /// \brief Removes the specified FileSystemStatCache object from the manager.
+ void removeStatCache(FileSystemStatCache *statCache);
+
+ /// getDirectory - Lookup, cache, and verify the specified directory
+ /// (real or virtual). This returns NULL if the directory doesn't exist.
+ ///
+ /// \param CacheFailure If true and the file does not exist, we'll cache
+ /// the failure to find this file.
+ const DirectoryEntry *getDirectory(StringRef DirName,
+ bool CacheFailure = true);
+
+ /// \brief Lookup, cache, and verify the specified file (real or
+ /// virtual). This returns NULL if the file doesn't exist.
+ ///
+ /// \param OpenFile if true and the file exists, it will be opened.
+ ///
+ /// \param CacheFailure If true and the file does not exist, we'll cache
+ /// the failure to find this file.
+ const FileEntry *getFile(StringRef Filename, bool OpenFile = false,
+ bool CacheFailure = true);
+
+ /// \brief Returns the current file system options
+ const FileSystemOptions &getFileSystemOptions() { return FileSystemOpts; }
+
+ /// \brief Retrieve a file entry for a "virtual" file that acts as
+ /// if there were a file with the given name on disk. The file
+ /// itself is not accessed.
+ const FileEntry *getVirtualFile(StringRef Filename, off_t Size,
+ time_t ModificationTime);
+
+ /// \brief Open the specified file as a MemoryBuffer, returning a new
+ /// MemoryBuffer if successful, otherwise returning null.
+ llvm::MemoryBuffer *getBufferForFile(const FileEntry *Entry,
+ std::string *ErrorStr = 0);
+ llvm::MemoryBuffer *getBufferForFile(StringRef Filename,
+ std::string *ErrorStr = 0);
+
+ // getNoncachedStatValue - Will get the 'stat' information for the given path.
+ // If the path is relative, it will be resolved against the WorkingDir of the
+ // FileManager's FileSystemOptions.
+ bool getNoncachedStatValue(StringRef Path, struct stat &StatBuf);
+
+ /// \brief If path is not absolute and FileSystemOptions set the working
+ /// directory, the path is modified to be relative to the given
+ /// working directory.
+ void FixupRelativePath(SmallVectorImpl<char> &path) const;
+
+ /// \brief Produce an array mapping from the unique IDs assigned to each
+ /// file to the corresponding FileEntry pointer.
+ void GetUniqueIDMapping(
+ SmallVectorImpl<const FileEntry *> &UIDToFiles) const;
+
+ void PrintStats() const;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h
new file mode 100644
index 0000000..81e928d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemOptions.h
@@ -0,0 +1,31 @@
+//===--- FileSystemOptions.h - File System Options --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FileSystemOptions interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_FILESYSTEMOPTIONS_H
+#define LLVM_CLANG_BASIC_FILESYSTEMOPTIONS_H
+
+#include <string>
+
+namespace clang {
+
+/// \brief Keeps track of options that affect how file operations are performed.
+class FileSystemOptions {
+public:
+ /// \brief If set, paths are resolved as if the working directory was
+ /// set to the value of WorkingDir.
+ std::string WorkingDir;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
new file mode 100644
index 0000000..96a2f90
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/FileSystemStatCache.h
@@ -0,0 +1,103 @@
+//===--- FileSystemStatCache.h - Caching for 'stat' calls -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FileSystemStatCache interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FILESYSTEMSTATCACHE_H
+#define LLVM_CLANG_FILESYSTEMSTATCACHE_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringMap.h"
+#include <sys/types.h>
+#include <sys/stat.h>
+
+namespace clang {
+
+/// \brief Abstract interface for introducing a FileManager cache for 'stat'
+/// system calls, which is used by precompiled and pretokenized headers to
+/// improve performance.
+class FileSystemStatCache {
+ virtual void anchor();
+protected:
+ OwningPtr<FileSystemStatCache> NextStatCache;
+
+public:
+ virtual ~FileSystemStatCache() {}
+
+ enum LookupResult {
+ CacheExists, //< We know the file exists and its cached stat data.
+ CacheMissing //< We know that the file doesn't exist.
+ };
+
+ /// FileSystemStatCache::get - Get the 'stat' information for the specified
+ /// path, using the cache to accellerate it if possible. This returns true if
+ /// the path does not exist or false if it exists.
+ ///
+ /// If FileDescriptor is non-null, then this lookup should only return success
+ /// for files (not directories). If it is null this lookup should only return
+ /// success for directories (not files). On a successful file lookup, the
+ /// implementation can optionally fill in FileDescriptor with a valid
+ /// descriptor and the client guarantees that it will close it.
+ static bool get(const char *Path, struct stat &StatBuf, int *FileDescriptor,
+ FileSystemStatCache *Cache);
+
+
+ /// \brief Sets the next stat call cache in the chain of stat caches.
+ /// Takes ownership of the given stat cache.
+ void setNextStatCache(FileSystemStatCache *Cache) {
+ NextStatCache.reset(Cache);
+ }
+
+ /// \brief Retrieve the next stat call cache in the chain.
+ FileSystemStatCache *getNextStatCache() { return NextStatCache.get(); }
+
+ /// \brief Retrieve the next stat call cache in the chain, transferring
+ /// ownership of this cache (and, transitively, all of the remaining caches)
+ /// to the caller.
+ FileSystemStatCache *takeNextStatCache() { return NextStatCache.take(); }
+
+protected:
+ virtual LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) = 0;
+
+ LookupResult statChained(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ if (FileSystemStatCache *Next = getNextStatCache())
+ return Next->getStat(Path, StatBuf, FileDescriptor);
+
+ // If we hit the end of the list of stat caches to try, just compute and
+ // return it without a cache.
+ return get(Path, StatBuf, FileDescriptor, 0) ? CacheMissing : CacheExists;
+ }
+};
+
+/// \brief A stat "cache" that can be used by FileManager to keep
+/// track of the results of stat() calls that occur throughout the
+/// execution of the front end.
+class MemorizeStatCalls : public FileSystemStatCache {
+public:
+ /// \brief The set of stat() calls that have been seen.
+ llvm::StringMap<struct stat, llvm::BumpPtrAllocator> StatCalls;
+
+ typedef llvm::StringMap<struct stat, llvm::BumpPtrAllocator>::const_iterator
+ iterator;
+
+ iterator begin() const { return StatCalls.begin(); }
+ iterator end() const { return StatCalls.end(); }
+
+ virtual LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
new file mode 100644
index 0000000..cc0080b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/IdentifierTable.h
@@ -0,0 +1,809 @@
+//===--- IdentifierTable.h - Hash table for identifier lookup ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IdentifierInfo, IdentifierTable, and Selector
+// interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_IDENTIFIERTABLE_H
+#define LLVM_CLANG_BASIC_IDENTIFIERTABLE_H
+
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
+#include <string>
+
+namespace llvm {
+ template <typename T> struct DenseMapInfo;
+}
+
+namespace clang {
+ class LangOptions;
+ class IdentifierInfo;
+ class IdentifierTable;
+ class SourceLocation;
+ class MultiKeywordSelector; // private class used by Selector
+ class DeclarationName; // AST class that stores declaration names
+
+ /// IdentifierLocPair - A simple pair of identifier info and location.
+ typedef std::pair<IdentifierInfo*, SourceLocation> IdentifierLocPair;
+
+
+/// IdentifierInfo - One of these records is kept for each identifier that
+/// is lexed. This contains information about whether the token was #define'd,
+/// is a language keyword, or if it is a front-end token of some sort (e.g. a
+/// variable or function name). The preprocessor keeps this information in a
+/// set, and all tok::identifier tokens have a pointer to one of these.
+class IdentifierInfo {
+ unsigned TokenID : 9; // Front-end token ID or tok::identifier.
+ // Objective-C keyword ('protocol' in '@protocol') or builtin (__builtin_inf).
+ // First NUM_OBJC_KEYWORDS values are for Objective-C, the remaining values
+ // are for builtins.
+ unsigned ObjCOrBuiltinID :11;
+ bool HasMacro : 1; // True if there is a #define for this.
+ bool IsExtension : 1; // True if identifier is a lang extension.
+ bool IsCXX11CompatKeyword : 1; // True if identifier is a keyword in C++11.
+ bool IsPoisoned : 1; // True if identifier is poisoned.
+ bool IsCPPOperatorKeyword : 1; // True if ident is a C++ operator keyword.
+ bool NeedsHandleIdentifier : 1; // See "RecomputeNeedsHandleIdentifier".
+ bool IsFromAST : 1; // True if identifier was loaded (at least
+ // partially) from an AST file.
+ bool ChangedAfterLoad : 1; // True if identifier has changed from the
+ // definition loaded from an AST file.
+ bool RevertedTokenID : 1; // True if RevertTokenIDToIdentifier was
+ // called.
+ bool OutOfDate : 1; // True if there may be additional
+ // information about this identifier
+ // stored externally.
+ bool IsModulesImport : 1; // True if this is the 'import' contextual
+ // keyword.
+ // 1 bit left in 32-bit word.
+
+ void *FETokenInfo; // Managed by the language front-end.
+ llvm::StringMapEntry<IdentifierInfo*> *Entry;
+
+ IdentifierInfo(const IdentifierInfo&); // NONCOPYABLE.
+ void operator=(const IdentifierInfo&); // NONASSIGNABLE.
+
+ friend class IdentifierTable;
+
+public:
+ IdentifierInfo();
+
+
+ /// isStr - Return true if this is the identifier for the specified string.
+ /// This is intended to be used for string literals only: II->isStr("foo").
+ template <std::size_t StrLen>
+ bool isStr(const char (&Str)[StrLen]) const {
+ return getLength() == StrLen-1 && !memcmp(getNameStart(), Str, StrLen-1);
+ }
+
+ /// getNameStart - Return the beginning of the actual string for this
+ /// identifier. The returned string is properly null terminated.
+ ///
+ const char *getNameStart() const {
+ if (Entry) return Entry->getKeyData();
+ // FIXME: This is gross. It would be best not to embed specific details
+ // of the PTH file format here.
+ // The 'this' pointer really points to a
+ // std::pair<IdentifierInfo, const char*>, where internal pointer
+ // points to the external string data.
+ typedef std::pair<IdentifierInfo, const char*> actualtype;
+ return ((const actualtype*) this)->second;
+ }
+
+ /// getLength - Efficiently return the length of this identifier info.
+ ///
+ unsigned getLength() const {
+ if (Entry) return Entry->getKeyLength();
+ // FIXME: This is gross. It would be best not to embed specific details
+ // of the PTH file format here.
+ // The 'this' pointer really points to a
+ // std::pair<IdentifierInfo, const char*>, where internal pointer
+ // points to the external string data.
+ typedef std::pair<IdentifierInfo, const char*> actualtype;
+ const char* p = ((const actualtype*) this)->second - 2;
+ return (((unsigned) p[0]) | (((unsigned) p[1]) << 8)) - 1;
+ }
+
+ /// getName - Return the actual identifier string.
+ StringRef getName() const {
+ return StringRef(getNameStart(), getLength());
+ }
+
+ /// hasMacroDefinition - Return true if this identifier is #defined to some
+ /// other value.
+ bool hasMacroDefinition() const {
+ return HasMacro;
+ }
+ void setHasMacroDefinition(bool Val) {
+ if (HasMacro == Val) return;
+
+ HasMacro = Val;
+ if (Val)
+ NeedsHandleIdentifier = 1;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+ /// getTokenID - If this is a source-language token (e.g. 'for'), this API
+ /// can be used to cause the lexer to map identifiers to source-language
+ /// tokens.
+ tok::TokenKind getTokenID() const { return (tok::TokenKind)TokenID; }
+
+ /// \brief True if RevertTokenIDToIdentifier() was called.
+ bool hasRevertedTokenIDToIdentifier() const { return RevertedTokenID; }
+
+ /// \brief Revert TokenID to tok::identifier; used for GNU libstdc++ 4.2
+ /// compatibility.
+ ///
+ /// TokenID is normally read-only but there are 2 instances where we revert it
+ /// to tok::identifier for libstdc++ 4.2. Keep track of when this happens
+ /// using this method so we can inform serialization about it.
+ void RevertTokenIDToIdentifier() {
+ assert(TokenID != tok::identifier && "Already at tok::identifier");
+ TokenID = tok::identifier;
+ RevertedTokenID = true;
+ }
+
+ /// getPPKeywordID - Return the preprocessor keyword ID for this identifier.
+ /// For example, "define" will return tok::pp_define.
+ tok::PPKeywordKind getPPKeywordID() const;
+
+ /// getObjCKeywordID - Return the Objective-C keyword ID for the this
+ /// identifier. For example, 'class' will return tok::objc_class if ObjC is
+ /// enabled.
+ tok::ObjCKeywordKind getObjCKeywordID() const {
+ if (ObjCOrBuiltinID < tok::NUM_OBJC_KEYWORDS)
+ return tok::ObjCKeywordKind(ObjCOrBuiltinID);
+ else
+ return tok::objc_not_keyword;
+ }
+ void setObjCKeywordID(tok::ObjCKeywordKind ID) { ObjCOrBuiltinID = ID; }
+
+ /// getBuiltinID - Return a value indicating whether this is a builtin
+ /// function. 0 is not-built-in. 1 is builtin-for-some-nonprimary-target.
+ /// 2+ are specific builtin functions.
+ unsigned getBuiltinID() const {
+ if (ObjCOrBuiltinID >= tok::NUM_OBJC_KEYWORDS)
+ return ObjCOrBuiltinID - tok::NUM_OBJC_KEYWORDS;
+ else
+ return 0;
+ }
+ void setBuiltinID(unsigned ID) {
+ ObjCOrBuiltinID = ID + tok::NUM_OBJC_KEYWORDS;
+ assert(ObjCOrBuiltinID - unsigned(tok::NUM_OBJC_KEYWORDS) == ID
+ && "ID too large for field!");
+ }
+
+ unsigned getObjCOrBuiltinID() const { return ObjCOrBuiltinID; }
+ void setObjCOrBuiltinID(unsigned ID) { ObjCOrBuiltinID = ID; }
+
+ /// get/setExtension - Initialize information about whether or not this
+ /// language token is an extension. This controls extension warnings, and is
+ /// only valid if a custom token ID is set.
+ bool isExtensionToken() const { return IsExtension; }
+ void setIsExtensionToken(bool Val) {
+ IsExtension = Val;
+ if (Val)
+ NeedsHandleIdentifier = 1;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+ /// is/setIsCXX11CompatKeyword - Initialize information about whether or not
+ /// this language token is a keyword in C++11. This controls compatibility
+ /// warnings, and is only true when not parsing C++11. Once a compatibility
+ /// problem has been diagnosed with this keyword, the flag will be cleared.
+ bool isCXX11CompatKeyword() const { return IsCXX11CompatKeyword; }
+ void setIsCXX11CompatKeyword(bool Val) {
+ IsCXX11CompatKeyword = Val;
+ if (Val)
+ NeedsHandleIdentifier = 1;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+ /// setIsPoisoned - Mark this identifier as poisoned. After poisoning, the
+ /// Preprocessor will emit an error every time this token is used.
+ void setIsPoisoned(bool Value = true) {
+ IsPoisoned = Value;
+ if (Value)
+ NeedsHandleIdentifier = 1;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+ /// isPoisoned - Return true if this token has been poisoned.
+ bool isPoisoned() const { return IsPoisoned; }
+
+ /// isCPlusPlusOperatorKeyword/setIsCPlusPlusOperatorKeyword controls whether
+ /// this identifier is a C++ alternate representation of an operator.
+ void setIsCPlusPlusOperatorKeyword(bool Val = true) {
+ IsCPPOperatorKeyword = Val;
+ if (Val)
+ NeedsHandleIdentifier = 1;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+ bool isCPlusPlusOperatorKeyword() const { return IsCPPOperatorKeyword; }
+
+ /// getFETokenInfo/setFETokenInfo - The language front-end is allowed to
+ /// associate arbitrary metadata with this token.
+ template<typename T>
+ T *getFETokenInfo() const { return static_cast<T*>(FETokenInfo); }
+ void setFETokenInfo(void *T) { FETokenInfo = T; }
+
+ /// isHandleIdentifierCase - Return true if the Preprocessor::HandleIdentifier
+ /// must be called on a token of this identifier. If this returns false, we
+ /// know that HandleIdentifier will not affect the token.
+ bool isHandleIdentifierCase() const { return NeedsHandleIdentifier; }
+
+ /// isFromAST - Return true if the identifier in its current state was loaded
+ /// from an AST file.
+ bool isFromAST() const { return IsFromAST; }
+
+ void setIsFromAST() { IsFromAST = true; }
+
+ /// \brief Determine whether this identifier has changed since it was loaded
+ /// from an AST file.
+ bool hasChangedSinceDeserialization() const {
+ return ChangedAfterLoad;
+ }
+
+ /// \brief Note that this identifier has changed since it was loaded from
+ /// an AST file.
+ void setChangedSinceDeserialization() {
+ ChangedAfterLoad = true;
+ }
+
+ /// \brief Determine whether the information for this identifier is out of
+ /// date with respect to the external source.
+ bool isOutOfDate() const { return OutOfDate; }
+
+ /// \brief Set whether the information for this identifier is out of
+ /// date with respect to the external source.
+ void setOutOfDate(bool OOD) {
+ OutOfDate = OOD;
+ if (OOD)
+ NeedsHandleIdentifier = true;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+ /// \brief Determine whether this is the contextual keyword
+ /// '__experimental_modules_import'.
+ bool isModulesImport() const { return IsModulesImport; }
+
+ /// \brief Set whether this identifier is the contextual keyword
+ /// '__experimental_modules_import'.
+ void setModulesImport(bool I) {
+ IsModulesImport = I;
+ if (I)
+ NeedsHandleIdentifier = true;
+ else
+ RecomputeNeedsHandleIdentifier();
+ }
+
+private:
+ /// RecomputeNeedsHandleIdentifier - The Preprocessor::HandleIdentifier does
+ /// several special (but rare) things to identifiers of various sorts. For
+ /// example, it changes the "for" keyword token from tok::identifier to
+ /// tok::for.
+ ///
+ /// This method is very tied to the definition of HandleIdentifier. Any
+ /// change to it should be reflected here.
+ void RecomputeNeedsHandleIdentifier() {
+ NeedsHandleIdentifier =
+ (isPoisoned() | hasMacroDefinition() | isCPlusPlusOperatorKeyword() |
+ isExtensionToken() | isCXX11CompatKeyword() || isOutOfDate() ||
+ isModulesImport());
+ }
+};
+
+/// \brief an RAII object for [un]poisoning an identifier
+/// within a certain scope. II is allowed to be null, in
+/// which case, objects of this type have no effect.
+class PoisonIdentifierRAIIObject {
+ IdentifierInfo *const II;
+ const bool OldValue;
+public:
+ PoisonIdentifierRAIIObject(IdentifierInfo *II, bool NewValue)
+ : II(II), OldValue(II ? II->isPoisoned() : false) {
+ if(II)
+ II->setIsPoisoned(NewValue);
+ }
+
+ ~PoisonIdentifierRAIIObject() {
+ if(II)
+ II->setIsPoisoned(OldValue);
+ }
+};
+
+/// \brief An iterator that walks over all of the known identifiers
+/// in the lookup table.
+///
+/// Since this iterator uses an abstract interface via virtual
+/// functions, it uses an object-oriented interface rather than the
+/// more standard C++ STL iterator interface. In this OO-style
+/// iteration, the single function \c Next() provides dereference,
+/// advance, and end-of-sequence checking in a single
+/// operation. Subclasses of this iterator type will provide the
+/// actual functionality.
+class IdentifierIterator {
+private:
+ IdentifierIterator(const IdentifierIterator&); // Do not implement
+ IdentifierIterator &operator=(const IdentifierIterator&); // Do not implement
+
+protected:
+ IdentifierIterator() { }
+
+public:
+ virtual ~IdentifierIterator();
+
+ /// \brief Retrieve the next string in the identifier table and
+ /// advances the iterator for the following string.
+ ///
+ /// \returns The next string in the identifier table. If there is
+ /// no such string, returns an empty \c StringRef.
+ virtual StringRef Next() = 0;
+};
+
+/// IdentifierInfoLookup - An abstract class used by IdentifierTable that
+/// provides an interface for performing lookups from strings
+/// (const char *) to IdentiferInfo objects.
+class IdentifierInfoLookup {
+public:
+ virtual ~IdentifierInfoLookup();
+
+ /// get - Return the identifier token info for the specified named identifier.
+ /// Unlike the version in IdentifierTable, this returns a pointer instead
+ /// of a reference. If the pointer is NULL then the IdentifierInfo cannot
+ /// be found.
+ virtual IdentifierInfo* get(StringRef Name) = 0;
+
+ /// \brief Retrieve an iterator into the set of all identifiers
+ /// known to this identifier lookup source.
+ ///
+ /// This routine provides access to all of the identifiers known to
+ /// the identifier lookup, allowing access to the contents of the
+ /// identifiers without introducing the overhead of constructing
+ /// IdentifierInfo objects for each.
+ ///
+ /// \returns A new iterator into the set of known identifiers. The
+ /// caller is responsible for deleting this iterator.
+ virtual IdentifierIterator *getIdentifiers() const;
+};
+
+/// \brief An abstract class used to resolve numerical identifier
+/// references (meaningful only to some external source) into
+/// IdentifierInfo pointers.
+class ExternalIdentifierLookup {
+public:
+ virtual ~ExternalIdentifierLookup();
+
+ /// \brief Return the identifier associated with the given ID number.
+ ///
+ /// The ID 0 is associated with the NULL identifier.
+ virtual IdentifierInfo *GetIdentifier(unsigned ID) = 0;
+};
+
+/// IdentifierTable - This table implements an efficient mapping from strings to
+/// IdentifierInfo nodes. It has no other purpose, but this is an
+/// extremely performance-critical piece of the code, as each occurrence of
+/// every identifier goes through here when lexed.
+class IdentifierTable {
+ // Shark shows that using MallocAllocator is *much* slower than using this
+ // BumpPtrAllocator!
+ typedef llvm::StringMap<IdentifierInfo*, llvm::BumpPtrAllocator> HashTableTy;
+ HashTableTy HashTable;
+
+ IdentifierInfoLookup* ExternalLookup;
+
+public:
+ /// IdentifierTable ctor - Create the identifier table, populating it with
+ /// info about the language keywords for the language specified by LangOpts.
+ IdentifierTable(const LangOptions &LangOpts,
+ IdentifierInfoLookup* externalLookup = 0);
+
+ /// \brief Set the external identifier lookup mechanism.
+ void setExternalIdentifierLookup(IdentifierInfoLookup *IILookup) {
+ ExternalLookup = IILookup;
+ }
+
+ /// \brief Retrieve the external identifier lookup object, if any.
+ IdentifierInfoLookup *getExternalIdentifierLookup() const {
+ return ExternalLookup;
+ }
+
+ llvm::BumpPtrAllocator& getAllocator() {
+ return HashTable.getAllocator();
+ }
+
+ /// get - Return the identifier token info for the specified named identifier.
+ ///
+ IdentifierInfo &get(StringRef Name) {
+ llvm::StringMapEntry<IdentifierInfo*> &Entry =
+ HashTable.GetOrCreateValue(Name);
+
+ IdentifierInfo *II = Entry.getValue();
+ if (II) return *II;
+
+ // No entry; if we have an external lookup, look there first.
+ if (ExternalLookup) {
+ II = ExternalLookup->get(Name);
+ if (II) {
+ // Cache in the StringMap for subsequent lookups.
+ Entry.setValue(II);
+ return *II;
+ }
+ }
+
+ // Lookups failed, make a new IdentifierInfo.
+ void *Mem = getAllocator().Allocate<IdentifierInfo>();
+ II = new (Mem) IdentifierInfo();
+ Entry.setValue(II);
+
+ // Make sure getName() knows how to find the IdentifierInfo
+ // contents.
+ II->Entry = &Entry;
+
+ return *II;
+ }
+
+ IdentifierInfo &get(StringRef Name, tok::TokenKind TokenCode) {
+ IdentifierInfo &II = get(Name);
+ II.TokenID = TokenCode;
+ assert(II.TokenID == (unsigned) TokenCode && "TokenCode too large");
+ return II;
+ }
+
+ /// \brief Gets an IdentifierInfo for the given name without consulting
+ /// external sources.
+ ///
+ /// This is a version of get() meant for external sources that want to
+ /// introduce or modify an identifier. If they called get(), they would
+ /// likely end up in a recursion.
+ IdentifierInfo &getOwn(StringRef Name) {
+ llvm::StringMapEntry<IdentifierInfo*> &Entry =
+ HashTable.GetOrCreateValue(Name);
+
+ IdentifierInfo *II = Entry.getValue();
+ if (!II) {
+
+ // Lookups failed, make a new IdentifierInfo.
+ void *Mem = getAllocator().Allocate<IdentifierInfo>();
+ II = new (Mem) IdentifierInfo();
+ Entry.setValue(II);
+
+ // Make sure getName() knows how to find the IdentifierInfo
+ // contents.
+ II->Entry = &Entry;
+
+ // If this is the 'import' contextual keyword, mark it as such.
+ if (Name.equals("import"))
+ II->setModulesImport(true);
+ }
+
+ return *II;
+ }
+
+ typedef HashTableTy::const_iterator iterator;
+ typedef HashTableTy::const_iterator const_iterator;
+
+ iterator begin() const { return HashTable.begin(); }
+ iterator end() const { return HashTable.end(); }
+ unsigned size() const { return HashTable.size(); }
+
+ /// PrintStats - Print some statistics to stderr that indicate how well the
+ /// hashing is doing.
+ void PrintStats() const;
+
+ void AddKeywords(const LangOptions &LangOpts);
+};
+
+/// ObjCMethodFamily - A family of Objective-C methods. These
+/// families have no inherent meaning in the language, but are
+/// nonetheless central enough in the existing implementations to
+/// merit direct AST support. While, in theory, arbitrary methods can
+/// be considered to form families, we focus here on the methods
+/// involving allocation and retain-count management, as these are the
+/// most "core" and the most likely to be useful to diverse clients
+/// without extra information.
+///
+/// Both selectors and actual method declarations may be classified
+/// into families. Method families may impose additional restrictions
+/// beyond their selector name; for example, a method called '_init'
+/// that returns void is not considered to be in the 'init' family
+/// (but would be if it returned 'id'). It is also possible to
+/// explicitly change or remove a method's family. Therefore the
+/// method's family should be considered the single source of truth.
+enum ObjCMethodFamily {
+ /// \brief No particular method family.
+ OMF_None,
+
+ // Selectors in these families may have arbitrary arity, may be
+ // written with arbitrary leading underscores, and may have
+ // additional CamelCase "words" in their first selector chunk
+ // following the family name.
+ OMF_alloc,
+ OMF_copy,
+ OMF_init,
+ OMF_mutableCopy,
+ OMF_new,
+
+ // These families are singletons consisting only of the nullary
+ // selector with the given name.
+ OMF_autorelease,
+ OMF_dealloc,
+ OMF_finalize,
+ OMF_release,
+ OMF_retain,
+ OMF_retainCount,
+ OMF_self,
+
+ // performSelector families
+ OMF_performSelector
+};
+
+/// Enough bits to store any enumerator in ObjCMethodFamily or
+/// InvalidObjCMethodFamily.
+enum { ObjCMethodFamilyBitWidth = 4 };
+
+/// An invalid value of ObjCMethodFamily.
+enum { InvalidObjCMethodFamily = (1 << ObjCMethodFamilyBitWidth) - 1 };
+
+/// Selector - This smart pointer class efficiently represents Objective-C
+/// method names. This class will either point to an IdentifierInfo or a
+/// MultiKeywordSelector (which is private). This enables us to optimize
+/// selectors that take no arguments and selectors that take 1 argument, which
+/// accounts for 78% of all selectors in Cocoa.h.
+class Selector {
+ friend class Diagnostic;
+
+ enum IdentifierInfoFlag {
+ // MultiKeywordSelector = 0.
+ ZeroArg = 0x1,
+ OneArg = 0x2,
+ ArgFlags = ZeroArg|OneArg
+ };
+ uintptr_t InfoPtr; // a pointer to the MultiKeywordSelector or IdentifierInfo.
+
+ Selector(IdentifierInfo *II, unsigned nArgs) {
+ InfoPtr = reinterpret_cast<uintptr_t>(II);
+ assert((InfoPtr & ArgFlags) == 0 &&"Insufficiently aligned IdentifierInfo");
+ assert(nArgs < 2 && "nArgs not equal to 0/1");
+ InfoPtr |= nArgs+1;
+ }
+ Selector(MultiKeywordSelector *SI) {
+ InfoPtr = reinterpret_cast<uintptr_t>(SI);
+ assert((InfoPtr & ArgFlags) == 0 &&"Insufficiently aligned IdentifierInfo");
+ }
+
+ IdentifierInfo *getAsIdentifierInfo() const {
+ if (getIdentifierInfoFlag())
+ return reinterpret_cast<IdentifierInfo *>(InfoPtr & ~ArgFlags);
+ return 0;
+ }
+ unsigned getIdentifierInfoFlag() const {
+ return InfoPtr & ArgFlags;
+ }
+
+ static ObjCMethodFamily getMethodFamilyImpl(Selector sel);
+
+public:
+ friend class SelectorTable; // only the SelectorTable can create these
+ friend class DeclarationName; // and the AST's DeclarationName.
+
+ /// The default ctor should only be used when creating data structures that
+ /// will contain selectors.
+ Selector() : InfoPtr(0) {}
+ Selector(uintptr_t V) : InfoPtr(V) {}
+
+ /// operator==/!= - Indicate whether the specified selectors are identical.
+ bool operator==(Selector RHS) const {
+ return InfoPtr == RHS.InfoPtr;
+ }
+ bool operator!=(Selector RHS) const {
+ return InfoPtr != RHS.InfoPtr;
+ }
+ void *getAsOpaquePtr() const {
+ return reinterpret_cast<void*>(InfoPtr);
+ }
+
+ /// \brief Determine whether this is the empty selector.
+ bool isNull() const { return InfoPtr == 0; }
+
+ // Predicates to identify the selector type.
+ bool isKeywordSelector() const {
+ return getIdentifierInfoFlag() != ZeroArg;
+ }
+ bool isUnarySelector() const {
+ return getIdentifierInfoFlag() == ZeroArg;
+ }
+ unsigned getNumArgs() const;
+
+
+ /// \brief Retrieve the identifier at a given position in the selector.
+ ///
+ /// Note that the identifier pointer returned may be NULL. Clients that only
+ /// care about the text of the identifier string, and not the specific,
+ /// uniqued identifier pointer, should use \c getNameForSlot(), which returns
+ /// an empty string when the identifier pointer would be NULL.
+ ///
+ /// \param argIndex The index for which we want to retrieve the identifier.
+ /// This index shall be less than \c getNumArgs() unless this is a keyword
+ /// selector, in which case 0 is the only permissible value.
+ ///
+ /// \returns the uniqued identifier for this slot, or NULL if this slot has
+ /// no corresponding identifier.
+ IdentifierInfo *getIdentifierInfoForSlot(unsigned argIndex) const;
+
+ /// \brief Retrieve the name at a given position in the selector.
+ ///
+ /// \param argIndex The index for which we want to retrieve the name.
+ /// This index shall be less than \c getNumArgs() unless this is a keyword
+ /// selector, in which case 0 is the only permissible value.
+ ///
+ /// \returns the name for this slot, which may be the empty string if no
+ /// name was supplied.
+ StringRef getNameForSlot(unsigned argIndex) const;
+
+ /// getAsString - Derive the full selector name (e.g. "foo:bar:") and return
+ /// it as an std::string.
+ std::string getAsString() const;
+
+ /// getMethodFamily - Derive the conventional family of this method.
+ ObjCMethodFamily getMethodFamily() const {
+ return getMethodFamilyImpl(*this);
+ }
+
+ static Selector getEmptyMarker() {
+ return Selector(uintptr_t(-1));
+ }
+ static Selector getTombstoneMarker() {
+ return Selector(uintptr_t(-2));
+ }
+};
+
+/// SelectorTable - This table allows us to fully hide how we implement
+/// multi-keyword caching.
+class SelectorTable {
+ void *Impl; // Actually a SelectorTableImpl
+ SelectorTable(const SelectorTable&); // DISABLED: DO NOT IMPLEMENT
+ void operator=(const SelectorTable&); // DISABLED: DO NOT IMPLEMENT
+public:
+ SelectorTable();
+ ~SelectorTable();
+
+ /// getSelector - This can create any sort of selector. NumArgs indicates
+ /// whether this is a no argument selector "foo", a single argument selector
+ /// "foo:" or multi-argument "foo:bar:".
+ Selector getSelector(unsigned NumArgs, IdentifierInfo **IIV);
+
+ Selector getUnarySelector(IdentifierInfo *ID) {
+ return Selector(ID, 1);
+ }
+ Selector getNullarySelector(IdentifierInfo *ID) {
+ return Selector(ID, 0);
+ }
+
+ /// Return the total amount of memory allocated for managing selectors.
+ size_t getTotalMemory() const;
+
+ /// constructSetterName - Return the setter name for the given
+ /// identifier, i.e. "set" + Name where the initial character of Name
+ /// has been capitalized.
+ static Selector constructSetterName(IdentifierTable &Idents,
+ SelectorTable &SelTable,
+ const IdentifierInfo *Name);
+};
+
+/// DeclarationNameExtra - Common base of the MultiKeywordSelector,
+/// CXXSpecialName, and CXXOperatorIdName classes, all of which are
+/// private classes that describe different kinds of names.
+class DeclarationNameExtra {
+public:
+ /// ExtraKind - The kind of "extra" information stored in the
+ /// DeclarationName. See @c ExtraKindOrNumArgs for an explanation of
+ /// how these enumerator values are used.
+ enum ExtraKind {
+ CXXConstructor = 0,
+ CXXDestructor,
+ CXXConversionFunction,
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ CXXOperator##Name,
+#include "clang/Basic/OperatorKinds.def"
+ CXXLiteralOperator,
+ CXXUsingDirective,
+ NUM_EXTRA_KINDS
+ };
+
+ /// ExtraKindOrNumArgs - Either the kind of C++ special name or
+ /// operator-id (if the value is one of the CXX* enumerators of
+ /// ExtraKind), in which case the DeclarationNameExtra is also a
+ /// CXXSpecialName, (for CXXConstructor, CXXDestructor, or
+ /// CXXConversionFunction) CXXOperatorIdName, or CXXLiteralOperatorName,
+ /// it may be also name common to C++ using-directives (CXXUsingDirective),
+ /// otherwise it is NUM_EXTRA_KINDS+NumArgs, where NumArgs is the number of
+ /// arguments in the Objective-C selector, in which case the
+ /// DeclarationNameExtra is also a MultiKeywordSelector.
+ unsigned ExtraKindOrNumArgs;
+};
+
+} // end namespace clang
+
+namespace llvm {
+/// Define DenseMapInfo so that Selectors can be used as keys in DenseMap and
+/// DenseSets.
+template <>
+struct DenseMapInfo<clang::Selector> {
+ static inline clang::Selector getEmptyKey() {
+ return clang::Selector::getEmptyMarker();
+ }
+ static inline clang::Selector getTombstoneKey() {
+ return clang::Selector::getTombstoneMarker();
+ }
+
+ static unsigned getHashValue(clang::Selector S);
+
+ static bool isEqual(clang::Selector LHS, clang::Selector RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <>
+struct isPodLike<clang::Selector> { static const bool value = true; };
+
+template<>
+class PointerLikeTypeTraits<clang::Selector> {
+public:
+ static inline const void *getAsVoidPointer(clang::Selector P) {
+ return P.getAsOpaquePtr();
+ }
+ static inline clang::Selector getFromVoidPointer(const void *P) {
+ return clang::Selector(reinterpret_cast<uintptr_t>(P));
+ }
+ enum { NumLowBitsAvailable = 0 };
+};
+
+// Provide PointerLikeTypeTraits for IdentifierInfo pointers, which
+// are not guaranteed to be 8-byte aligned.
+template<>
+class PointerLikeTypeTraits<clang::IdentifierInfo*> {
+public:
+ static inline void *getAsVoidPointer(clang::IdentifierInfo* P) {
+ return P;
+ }
+ static inline clang::IdentifierInfo *getFromVoidPointer(void *P) {
+ return static_cast<clang::IdentifierInfo*>(P);
+ }
+ enum { NumLowBitsAvailable = 1 };
+};
+
+template<>
+class PointerLikeTypeTraits<const clang::IdentifierInfo*> {
+public:
+ static inline const void *getAsVoidPointer(const clang::IdentifierInfo* P) {
+ return P;
+ }
+ static inline const clang::IdentifierInfo *getFromVoidPointer(const void *P) {
+ return static_cast<const clang::IdentifierInfo*>(P);
+ }
+ enum { NumLowBitsAvailable = 1 };
+};
+
+} // end namespace llvm
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h b/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h
new file mode 100644
index 0000000..813b49e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LLVM.h
@@ -0,0 +1,73 @@
+//===--- LLVM.h - Import various common LLVM datatypes ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file forward declares and imports various common LLVM datatypes that
+// clang wants to use unqualified.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_BASIC_LLVM_H
+#define CLANG_BASIC_LLVM_H
+
+// This should be the only #include, force #includes of all the others on
+// clients.
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+ // ADT's.
+ class StringRef;
+ class Twine;
+ template<typename T> class ArrayRef;
+ template<class T> class OwningPtr;
+ template<unsigned InternalLen> class SmallString;
+ template<typename T, unsigned N> class SmallVector;
+ template<typename T> class SmallVectorImpl;
+
+ template<typename T>
+ struct SaveAndRestore;
+
+ // Reference counting.
+ template <typename T> class IntrusiveRefCntPtr;
+ template <typename T> struct IntrusiveRefCntPtrInfo;
+ template <class Derived> class RefCountedBase;
+ class RefCountedBaseVPTR;
+
+ class raw_ostream;
+ // TODO: DenseMap, ...
+}
+
+
+namespace clang {
+ // Casting operators.
+ using llvm::isa;
+ using llvm::cast;
+ using llvm::dyn_cast;
+ using llvm::dyn_cast_or_null;
+ using llvm::cast_or_null;
+
+ // ADT's.
+ using llvm::StringRef;
+ using llvm::Twine;
+ using llvm::ArrayRef;
+ using llvm::OwningPtr;
+ using llvm::SmallString;
+ using llvm::SmallVector;
+ using llvm::SmallVectorImpl;
+ using llvm::SaveAndRestore;
+
+ // Reference counting.
+ using llvm::IntrusiveRefCntPtr;
+ using llvm::IntrusiveRefCntPtrInfo;
+ using llvm::RefCountedBase;
+ using llvm::RefCountedBaseVPTR;
+
+ using llvm::raw_ostream;
+} // end namespace clang.
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h b/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h
new file mode 100644
index 0000000..df50d94
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Lambda.h
@@ -0,0 +1,38 @@
+//===--- Lambda.h - Types for C++ Lambdas -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several types used to describe C++ lambda
+// expressions that are shared between the parser and AST.
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CLANG_BASIC_LAMBDA_H
+#define LLVM_CLANG_BASIC_LAMBDA_H
+
+namespace clang {
+
+/// LambdaCaptureDefault - The default, if any, capture method for a
+/// lambda expression.
+enum LambdaCaptureDefault {
+ LCD_None,
+ LCD_ByCopy,
+ LCD_ByRef
+};
+
+/// LambdaCaptureKind - The different capture forms in a lambda
+/// introducer: 'this' or a copied or referenced variable.
+enum LambdaCaptureKind {
+ LCK_This,
+ LCK_ByCopy,
+ LCK_ByRef
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_LAMBDA_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
new file mode 100644
index 0000000..786ae12
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.def
@@ -0,0 +1,171 @@
+//===--- LangOptions.def - Language option database --------------- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the language options. Users of this file must
+// define the LANGOPT macro to make use of this information.
+// Optionally, the user may also define BENIGN_LANGOPT
+// (for options that don't affect the construction of the AST in an
+// incompatible way), ENUM_LANGOPT (for options that have enumeration,
+// rather than unsigned, type), BENIGN_ENUM_LANGOPT (for benign
+// options that have enumeration type), and VALUE_LANGOPT is a language option
+// that describes a value rather than a flag.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LANGOPT
+# error Define the LANGOPT macro to handle language options
+#endif
+
+#ifndef VALUE_LANGOPT
+# define VALUE_LANGOPT(Name, Bits, Default, Description) \
+ LANGOPT(Name, Bits, Default, Description)
+#endif
+
+#ifndef BENIGN_LANGOPT
+# define BENIGN_LANGOPT(Name, Bits, Default, Description) \
+ LANGOPT(Name, Bits, Default, Description)
+#endif
+
+#ifndef ENUM_LANGOPT
+# define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ LANGOPT(Name, Bits, Default, Description)
+#endif
+
+#ifndef BENIGN_ENUM_LANGOPT
+# define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ ENUM_LANGOPT(Name, Type, Bits, Default, Description)
+#endif
+
+LANGOPT(C99 , 1, 0, "C99")
+LANGOPT(C11 , 1, 0, "C11")
+LANGOPT(MicrosoftExt , 1, 0, "Microsoft extensions")
+LANGOPT(MicrosoftMode , 1, 0, "Microsoft compatibility mode")
+LANGOPT(Borland , 1, 0, "Borland extensions")
+LANGOPT(CPlusPlus , 1, 0, "C++")
+LANGOPT(CPlusPlus0x , 1, 0, "C++0x")
+LANGOPT(ObjC1 , 1, 0, "Objective-C 1")
+LANGOPT(ObjC2 , 1, 0, "Objective-C 2")
+LANGOPT(ObjCNonFragileABI , 1, 0, "Objective-C modern abi")
+LANGOPT(ObjCNonFragileABI2 , 1, 0, "Objective-C enhanced modern abi")
+BENIGN_LANGOPT(ObjCDefaultSynthProperties , 1, 0,
+ "Objective-C auto-synthesized properties")
+BENIGN_LANGOPT(ObjCInferRelatedResultType , 1, 1,
+ "Objective-C related result type inference")
+LANGOPT(Trigraphs , 1, 0,"trigraphs")
+LANGOPT(BCPLComment , 1, 0, "BCPL-style '//' comments")
+LANGOPT(Bool , 1, 0, "bool, true, and false keywords")
+BENIGN_LANGOPT(DollarIdents , 1, 1, "'$' in identifiers")
+BENIGN_LANGOPT(AsmPreprocessor, 1, 0, "preprocessor in asm mode")
+BENIGN_LANGOPT(GNUMode , 1, 1, "GNU extensions")
+LANGOPT(GNUKeywords , 1, 1, "GNU keywords")
+BENIGN_LANGOPT(ImplicitInt, 1, !C99 && !CPlusPlus, "C89 implicit 'int'")
+LANGOPT(Digraphs , 1, 0, "digraphs")
+BENIGN_LANGOPT(HexFloats , 1, C99, "C99 hexadecimal float constants")
+LANGOPT(CXXOperatorNames , 1, 0, "C++ operator name keywords")
+LANGOPT(AppleKext , 1, 0, "Apple kext support")
+BENIGN_LANGOPT(PascalStrings, 1, 0, "Pascal string support")
+LANGOPT(WritableStrings , 1, 0, "writable string support")
+LANGOPT(ConstStrings , 1, 0, "const-qualified string support")
+LANGOPT(LaxVectorConversions , 1, 1, "lax vector conversions")
+LANGOPT(AltiVec , 1, 0, "AltiVec-style vector initializers")
+LANGOPT(Exceptions , 1, 0, "exception handling")
+LANGOPT(ObjCExceptions , 1, 0, "Objective-C exceptions")
+LANGOPT(CXXExceptions , 1, 0, "C++ exceptions")
+LANGOPT(SjLjExceptions , 1, 0, "setjmp-longjump exception handling")
+LANGOPT(TraditionalCPP , 1, 0, "traditional CPP emulation")
+LANGOPT(RTTI , 1, 1, "run-time type information")
+LANGOPT(MSBitfields , 1, 0, "Microsoft-compatible structure layout")
+LANGOPT(NeXTRuntime , 1, 1, "NeXT Objective-C runtime")
+LANGOPT(Freestanding, 1, 0, "freestanding implementation")
+LANGOPT(FormatExtensions , 1, 0, "FreeBSD format extensions")
+LANGOPT(NoBuiltin , 1, 0, "disable builtin functions")
+
+BENIGN_LANGOPT(ThreadsafeStatics , 1, 1, "thread-safe static initializers")
+LANGOPT(POSIXThreads , 1, 0, "POSIX thread support")
+LANGOPT(Blocks , 1, 0, "blocks extension to C")
+BENIGN_LANGOPT(EmitAllDecls , 1, 0, "support for emitting all declarations")
+LANGOPT(MathErrno , 1, 1, "errno support for math functions")
+BENIGN_LANGOPT(HeinousExtensions , 1, 0, "Extensions that we really don't like and may be ripped out at any time")
+LANGOPT(Modules , 1, 0, "modules extension to C")
+LANGOPT(Optimize , 1, 0, "__OPTIMIZE__ predefined macro")
+LANGOPT(OptimizeSize , 1, 0, "__OPTIMIZE_SIZE__ predefined macro")
+LANGOPT(Static , 1, 0, "__STATIC__ predefined macro (as opposed to __DYNAMIC__)")
+VALUE_LANGOPT(PackStruct , 32, 0,
+ "default struct packing maximum alignment")
+VALUE_LANGOPT(PICLevel , 2, 0, "__PIC__ level")
+VALUE_LANGOPT(PIELevel , 2, 0, "__PIE__ level")
+LANGOPT(GNUInline , 1, 0, "GNU inline semantics")
+LANGOPT(NoInlineDefine , 1, 0, "__NO_INLINE__ predefined macro")
+LANGOPT(Deprecated , 1, 0, "__DEPRECATED predefined macro")
+LANGOPT(FastMath , 1, 0, "__FAST_MATH__ predefined macro")
+
+BENIGN_LANGOPT(ObjCGCBitmapPrint , 1, 0, "printing of GC's bitmap layout for __weak/__strong ivars")
+
+BENIGN_LANGOPT(AccessControl , 1, 1, "C++ access control")
+LANGOPT(CharIsSigned , 1, 1, "signed char")
+LANGOPT(ShortWChar , 1, 0, "unsigned short wchar_t")
+
+LANGOPT(ShortEnums , 1, 0, "short enum types")
+
+LANGOPT(OpenCL , 1, 0, "OpenCL")
+LANGOPT(CUDA , 1, 0, "CUDA")
+
+LANGOPT(AssumeSaneOperatorNew , 1, 1, "implicit __attribute__((malloc)) for C++'s new operators")
+BENIGN_LANGOPT(ElideConstructors , 1, 1, "C++ copy constructor elision")
+BENIGN_LANGOPT(CatchUndefined , 1, 0, "catching undefined behavior at run time")
+BENIGN_LANGOPT(DumpRecordLayouts , 1, 0, "dumping the layout of IRgen'd records")
+BENIGN_LANGOPT(DumpRecordLayoutsSimple , 1, 0, "dumping the layout of IRgen'd records in a simple form")
+BENIGN_LANGOPT(DumpVTableLayouts , 1, 0, "dumping the layouts of emitted vtables")
+LANGOPT(NoConstantCFStrings , 1, 0, "no constant CoreFoundation strings")
+BENIGN_LANGOPT(InlineVisibilityHidden , 1, 0, "hidden default visibility for inline C++ methods")
+BENIGN_LANGOPT(ParseUnknownAnytype, 1, 0, "__unknown_anytype")
+BENIGN_LANGOPT(DebuggerSupport , 1, 0, "debugger support")
+BENIGN_LANGOPT(DebuggerCastResultToId, 1, 0, "for 'po' in the debugger, cast the result to id if it is of unknown type")
+BENIGN_LANGOPT(DebuggerObjCLiteral , 1, 0, "debugger objective-C literals and subscripting support")
+BENIGN_LANGOPT(AddressSanitizer , 1, 0, "AddressSanitizer enabled")
+BENIGN_LANGOPT(ThreadSanitizer , 1, 0, "ThreadSanitizer enabled")
+
+BENIGN_LANGOPT(SpellChecking , 1, 1, "spell-checking")
+LANGOPT(SinglePrecisionConstants , 1, 0, "treating double-precision floating point constants as single precision constants")
+LANGOPT(FastRelaxedMath , 1, 0, "OpenCL fast relaxed math")
+LANGOPT(DefaultFPContract , 1, 0, "FP_CONTRACT")
+LANGOPT(NoBitFieldTypeAlign , 1, 0, "bit-field type alignment")
+LANGOPT(HexagonQdsp6Compat , 1, 0, "hexagon-qdsp6 backward compatibility")
+LANGOPT(ObjCAutoRefCount , 1, 0, "Objective-C automated reference counting")
+LANGOPT(ObjCRuntimeHasWeak , 1, 0, "__weak support in the ARC runtime")
+LANGOPT(FakeAddressSpaceMap , 1, 0, "OpenCL fake address space map")
+
+LANGOPT(MRTD , 1, 0, "-mrtd calling convention")
+BENIGN_LANGOPT(DelayedTemplateParsing , 1, 0, "delayed template parsing")
+LANGOPT(BlocksRuntimeOptional , 1, 0, "optional blocks runtime")
+
+ENUM_LANGOPT(GC, GCMode, 2, NonGC, "Objective-C Garbage Collection mode")
+ENUM_LANGOPT(VisibilityMode, Visibility, 3, DefaultVisibility,
+ "symbol visibility")
+ENUM_LANGOPT(StackProtector, StackProtectorMode, 2, SSPOff,
+ "stack protector mode")
+ENUM_LANGOPT(SignedOverflowBehavior, SignedOverflowBehaviorTy, 2, SOB_Undefined,
+ "signed integer overflow handling")
+
+BENIGN_LANGOPT(InstantiationDepth, 32, 1024,
+ "maximum template instantiation depth")
+BENIGN_LANGOPT(ConstexprCallDepth, 32, 512,
+ "maximum constexpr call depth")
+BENIGN_LANGOPT(NumLargeByValueCopy, 32, 0,
+ "if non-zero, warn about parameter or return Warn if parameter/return value is larger in bytes than this setting. 0 is no check.")
+VALUE_LANGOPT(MSCVersion, 32, 0,
+ "version of Microsoft Visual C/C++")
+
+LANGOPT(ApplePragmaPack, 1, 0, "Apple gcc-compatible #pragma pack handling")
+
+#undef LANGOPT
+#undef VALUE_LANGOPT
+#undef BENIGN_LANGOPT
+#undef ENUM_LANGOPT
+#undef BENIGN_ENUM_LANGOPT
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
new file mode 100644
index 0000000..ce4ff06
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/LangOptions.h
@@ -0,0 +1,122 @@
+//===--- LangOptions.h - C Language Family Language Options -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LangOptions interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LANGOPTIONS_H
+#define LLVM_CLANG_LANGOPTIONS_H
+
+#include <string>
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Visibility.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+
+namespace clang {
+
+/// Bitfields of LangOptions, split out from LangOptions in order to ensure that
+/// this large collection of bitfields is a trivial class type.
+class LangOptionsBase {
+public:
+ // Define simple language options (with no accessors).
+#define LANGOPT(Name, Bits, Default, Description) unsigned Name : Bits;
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description)
+#include "clang/Basic/LangOptions.def"
+
+protected:
+ // Define language options of enumeration type. These are private, and will
+ // have accessors (below).
+#define LANGOPT(Name, Bits, Default, Description)
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ unsigned Name : Bits;
+#include "clang/Basic/LangOptions.def"
+};
+
+/// LangOptions - This class keeps track of the various options that can be
+/// enabled, which controls the dialect of C that is accepted.
+class LangOptions : public RefCountedBase<LangOptions>, public LangOptionsBase {
+public:
+ typedef clang::Visibility Visibility;
+
+ enum GCMode { NonGC, GCOnly, HybridGC };
+ enum StackProtectorMode { SSPOff, SSPOn, SSPReq };
+
+ enum SignedOverflowBehaviorTy {
+ SOB_Undefined, // Default C standard behavior.
+ SOB_Defined, // -fwrapv
+ SOB_Trapping // -ftrapv
+ };
+
+public:
+ std::string ObjCConstantStringClass;
+
+ /// The name of the handler function to be called when -ftrapv is specified.
+ /// If none is specified, abort (GCC-compatible behaviour).
+ std::string OverflowHandler;
+
+ /// \brief The name of the current module.
+ std::string CurrentModule;
+
+ LangOptions();
+
+ // Define accessors/mutators for language options of enumeration type.
+#define LANGOPT(Name, Bits, Default, Description)
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ Type get##Name() const { return static_cast<Type>(Name); } \
+ void set##Name(Type Value) { Name = static_cast<unsigned>(Value); }
+#include "clang/Basic/LangOptions.def"
+
+ bool isSignedOverflowDefined() const {
+ return getSignedOverflowBehavior() == SOB_Defined;
+ }
+
+ /// \brief Reset all of the options that are not considered when building a
+ /// module.
+ void resetNonModularOptions();
+};
+
+/// Floating point control options
+class FPOptions {
+public:
+ unsigned fp_contract : 1;
+
+ FPOptions() : fp_contract(0) {}
+
+ FPOptions(const LangOptions &LangOpts) :
+ fp_contract(LangOpts.DefaultFPContract) {}
+};
+
+/// OpenCL volatile options
+class OpenCLOptions {
+public:
+#define OPENCLEXT(nm) unsigned nm : 1;
+#include "clang/Basic/OpenCLExtensions.def"
+
+ OpenCLOptions() {
+#define OPENCLEXT(nm) nm = 0;
+#include "clang/Basic/OpenCLExtensions.def"
+ }
+};
+
+/// \brief Describes the kind of translation unit being processed.
+enum TranslationUnitKind {
+ /// \brief The translation unit is a complete translation unit.
+ TU_Complete,
+ /// \brief The translation unit is a prefix to a translation unit, and is
+ /// not complete.
+ TU_Prefix,
+ /// \brief The translation unit is a module.
+ TU_Module
+};
+
+ /// \brief
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h b/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h
new file mode 100644
index 0000000..09a5a0b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Linkage.h
@@ -0,0 +1,68 @@
+//===--- Linkage.h - Linkage enumeration and utilities ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Linkage enumeration and various utility
+// functions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_LINKAGE_H
+#define LLVM_CLANG_BASIC_LINKAGE_H
+
+namespace clang {
+
+/// \brief Describes the different kinds of linkage
+/// (C++ [basic.link], C99 6.2.2) that an entity may have.
+enum Linkage {
+ /// \brief No linkage, which means that the entity is unique and
+ /// can only be referred to from within its scope.
+ NoLinkage = 0,
+
+ /// \brief Internal linkage, which indicates that the entity can
+ /// be referred to from within the translation unit (but not other
+ /// translation units).
+ InternalLinkage,
+
+ /// \brief External linkage within a unique namespace. From the
+ /// language perspective, these entities have external
+ /// linkage. However, since they reside in an anonymous namespace,
+ /// their names are unique to this translation unit, which is
+ /// equivalent to having internal linkage from the code-generation
+ /// point of view.
+ UniqueExternalLinkage,
+
+ /// \brief External linkage, which indicates that the entity can
+ /// be referred to from other translation units.
+ ExternalLinkage
+};
+
+/// \brief A more specific kind of linkage. This is relevant to CodeGen and
+/// AST file reading.
+enum GVALinkage {
+ GVA_Internal,
+ GVA_C99Inline,
+ GVA_CXXInline,
+ GVA_StrongExternal,
+ GVA_TemplateInstantiation,
+ GVA_ExplicitTemplateInstantiation
+};
+
+/// \brief Determine whether the given linkage is semantically
+/// external.
+inline bool isExternalLinkage(Linkage L) {
+ return L == UniqueExternalLinkage || L == ExternalLinkage;
+}
+
+/// \brief Compute the minimum linkage given two linages.
+static inline Linkage minLinkage(Linkage L1, Linkage L2) {
+ return L1 < L2? L1 : L2;
+}
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_LINKAGE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h b/contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h
new file mode 100644
index 0000000..1d0f1e8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/MacroBuilder.h
@@ -0,0 +1,46 @@
+//===--- MacroBuilder.h - CPP Macro building utility ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MacroBuilder utility class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_MACROBUILDER_H
+#define LLVM_CLANG_BASIC_MACROBUILDER_H
+
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+class MacroBuilder {
+ raw_ostream &Out;
+public:
+ MacroBuilder(raw_ostream &Output) : Out(Output) {}
+
+ /// Append a #define line for macro of the form "#define Name Value\n".
+ void defineMacro(const Twine &Name, const Twine &Value = "1") {
+ Out << "#define " << Name << ' ' << Value << '\n';
+ }
+
+ /// Append a #undef line for Name. Name should be of the form XXX
+ /// and we emit "#undef XXX".
+ void undefineMacro(const Twine &Name) {
+ Out << "#undef " << Name << '\n';
+ }
+
+ /// Directly append Str and a newline to the underlying buffer.
+ void append(const Twine &Str) {
+ Out << Str << '\n';
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Module.h b/contrib/llvm/tools/clang/include/clang/Basic/Module.h
new file mode 100644
index 0000000..82dbd5b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Module.h
@@ -0,0 +1,284 @@
+//===--- Module.h - Describe a module ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Module class, which describes a module in the source
+// code.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_MODULE_H
+#define LLVM_CLANG_BASIC_MODULE_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+ class raw_ostream;
+}
+
+namespace clang {
+
+class DirectoryEntry;
+class FileEntry;
+class LangOptions;
+class TargetInfo;
+
+/// \brief Describes the name of a module.
+typedef llvm::SmallVector<std::pair<std::string, SourceLocation>, 2>
+ ModuleId;
+
+/// \brief Describes a module or submodule.
+class Module {
+public:
+ /// \brief The name of this module.
+ std::string Name;
+
+ /// \brief The location of the module definition.
+ SourceLocation DefinitionLoc;
+
+ /// \brief The parent of this module. This will be NULL for the top-level
+ /// module.
+ Module *Parent;
+
+ /// \brief The umbrella header or directory.
+ llvm::PointerUnion<const DirectoryEntry *, const FileEntry *> Umbrella;
+
+private:
+ /// \brief The submodules of this module, indexed by name.
+ std::vector<Module *> SubModules;
+
+ /// \brief A mapping from the submodule name to the index into the
+ /// \c SubModules vector at which that submodule resides.
+ llvm::StringMap<unsigned> SubModuleIndex;
+
+public:
+ /// \brief The headers that are part of this module.
+ llvm::SmallVector<const FileEntry *, 2> Headers;
+
+ /// \brief The set of language features required to use this module.
+ ///
+ /// If any of these features is not present, the \c IsAvailable bit
+ /// will be false to indicate that this (sub)module is not
+ /// available.
+ llvm::SmallVector<std::string, 2> Requires;
+
+ /// \brief Whether this module is available in the current
+ /// translation unit.
+ unsigned IsAvailable : 1;
+
+ /// \brief Whether this module was loaded from a module file.
+ unsigned IsFromModuleFile : 1;
+
+ /// \brief Whether this is a framework module.
+ unsigned IsFramework : 1;
+
+ /// \brief Whether this is an explicit submodule.
+ unsigned IsExplicit : 1;
+
+ /// \brief Whether this is a "system" module (which assumes that all
+ /// headers in it are system headers).
+ unsigned IsSystem : 1;
+
+ /// \brief Whether we should infer submodules for this module based on
+ /// the headers.
+ ///
+ /// Submodules can only be inferred for modules with an umbrella header.
+ unsigned InferSubmodules : 1;
+
+ /// \brief Whether, when inferring submodules, the inferred submodules
+ /// should be explicit.
+ unsigned InferExplicitSubmodules : 1;
+
+ /// \brief Whether, when inferring submodules, the inferr submodules should
+ /// export all modules they import (e.g., the equivalent of "export *").
+ unsigned InferExportWildcard : 1;
+
+ /// \brief Describes the visibility of the various names within a
+ /// particular module.
+ enum NameVisibilityKind {
+ /// \brief All of the names in this module are hidden.
+ ///
+ Hidden,
+ /// \brief Only the macro names in this module are visible.
+ MacrosVisible,
+ /// \brief All of the names in this module are visible.
+ AllVisible
+ };
+
+ ///\ brief The visibility of names within this particular module.
+ NameVisibilityKind NameVisibility;
+
+ /// \brief The location of the inferred submodule.
+ SourceLocation InferredSubmoduleLoc;
+
+ /// \brief The set of modules imported by this module, and on which this
+ /// module depends.
+ llvm::SmallVector<Module *, 2> Imports;
+
+ /// \brief Describes an exported module.
+ ///
+ /// The pointer is the module being re-exported, while the bit will be true
+ /// to indicate that this is a wildcard export.
+ typedef llvm::PointerIntPair<Module *, 1, bool> ExportDecl;
+
+ /// \brief The set of export declarations.
+ llvm::SmallVector<ExportDecl, 2> Exports;
+
+ /// \brief Describes an exported module that has not yet been resolved
+ /// (perhaps because tASThe module it refers to has not yet been loaded).
+ struct UnresolvedExportDecl {
+ /// \brief The location of the 'export' keyword in the module map file.
+ SourceLocation ExportLoc;
+
+ /// \brief The name of the module.
+ ModuleId Id;
+
+ /// \brief Whether this export declaration ends in a wildcard, indicating
+ /// that all of its submodules should be exported (rather than the named
+ /// module itself).
+ bool Wildcard;
+ };
+
+ /// \brief The set of export declarations that have yet to be resolved.
+ llvm::SmallVector<UnresolvedExportDecl, 2> UnresolvedExports;
+
+ /// \brief Construct a top-level module.
+ explicit Module(StringRef Name, SourceLocation DefinitionLoc,
+ bool IsFramework)
+ : Name(Name), DefinitionLoc(DefinitionLoc), Parent(0), Umbrella(),
+ IsAvailable(true), IsFromModuleFile(false), IsFramework(IsFramework),
+ IsExplicit(false), IsSystem(false),
+ InferSubmodules(false), InferExplicitSubmodules(false),
+ InferExportWildcard(false), NameVisibility(Hidden) { }
+
+ /// \brief Construct a new module or submodule.
+ Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
+ bool IsFramework, bool IsExplicit);
+
+ ~Module();
+
+ /// \brief Determine whether this module is available for use within the
+ /// current translation unit.
+ bool isAvailable() const { return IsAvailable; }
+
+ /// \brief Determine whether this module is available for use within the
+ /// current translation unit.
+ ///
+ /// \param LangOpts The language options used for the current
+ /// translation unit.
+ ///
+ /// \param Target The target options used for the current translation unit.
+ ///
+ /// \param Feature If this module is unavailable, this parameter
+ /// will be set to one of the features that is required for use of
+ /// this module (but is not available).
+ bool isAvailable(const LangOptions &LangOpts,
+ const TargetInfo &Target,
+ StringRef &Feature) const;
+
+ /// \brief Determine whether this module is a submodule.
+ bool isSubModule() const { return Parent != 0; }
+
+ /// \brief Determine whether this module is a submodule of the given other
+ /// module.
+ bool isSubModuleOf(Module *Other) const;
+
+ /// \brief Determine whether this module is a part of a framework,
+ /// either because it is a framework module or because it is a submodule
+ /// of a framework module.
+ bool isPartOfFramework() const {
+ for (const Module *Mod = this; Mod; Mod = Mod->Parent)
+ if (Mod->IsFramework)
+ return true;
+
+ return false;
+ }
+
+ /// \brief Retrieve the full name of this module, including the path from
+ /// its top-level module.
+ std::string getFullModuleName() const;
+
+ /// \brief Retrieve the top-level module for this (sub)module, which may
+ /// be this module.
+ Module *getTopLevelModule() {
+ return const_cast<Module *>(
+ const_cast<const Module *>(this)->getTopLevelModule());
+ }
+
+ /// \brief Retrieve the top-level module for this (sub)module, which may
+ /// be this module.
+ const Module *getTopLevelModule() const;
+
+ /// \brief Retrieve the name of the top-level module.
+ ///
+ StringRef getTopLevelModuleName() const {
+ return getTopLevelModule()->Name;
+ }
+
+ /// \brief Retrieve the directory for which this module serves as the
+ /// umbrella.
+ const DirectoryEntry *getUmbrellaDir() const;
+
+ /// \brief Retrieve the header that serves as the umbrella header for this
+ /// module.
+ const FileEntry *getUmbrellaHeader() const {
+ return Umbrella.dyn_cast<const FileEntry *>();
+ }
+
+ /// \brief Determine whether this module has an umbrella directory that is
+ /// not based on an umbrella header.
+ bool hasUmbrellaDir() const {
+ return Umbrella && Umbrella.is<const DirectoryEntry *>();
+ }
+
+ /// \briaf Add the given feature requirement to the list of features
+ /// required by this module.
+ ///
+ /// \param Feature The feature that is required by this module (and
+ /// its submodules).
+ ///
+ /// \param LangOpts The set of language options that will be used to
+ /// evaluate the availability of this feature.
+ ///
+ /// \param Target The target options that will be used to evaluate the
+ /// availability of this feature.
+ void addRequirement(StringRef Feature, const LangOptions &LangOpts,
+ const TargetInfo &Target);
+
+ /// \brief Find the submodule with the given name.
+ ///
+ /// \returns The submodule if found, or NULL otherwise.
+ Module *findSubmodule(StringRef Name) const;
+
+ typedef std::vector<Module *>::iterator submodule_iterator;
+ typedef std::vector<Module *>::const_iterator submodule_const_iterator;
+
+ submodule_iterator submodule_begin() { return SubModules.begin(); }
+ submodule_const_iterator submodule_begin() const {return SubModules.begin();}
+ submodule_iterator submodule_end() { return SubModules.end(); }
+ submodule_const_iterator submodule_end() const { return SubModules.end(); }
+
+ /// \brief Print the module map for this module to the given stream.
+ ///
+ void print(llvm::raw_ostream &OS, unsigned Indent = 0) const;
+
+ /// \brief Dump the contents of this module to the given output stream.
+ void dump() const;
+};
+
+} // end namespace clang
+
+
+#endif // LLVM_CLANG_BASIC_MODULE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
new file mode 100644
index 0000000..8028a73
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OnDiskHashTable.h
@@ -0,0 +1,486 @@
+//===--- OnDiskHashTable.h - On-Disk Hash Table Implementation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines facilities for reading and writing on-disk hash
+// tables.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_ON_DISK_HASH_TABLE_H
+#define LLVM_CLANG_BASIC_ON_DISK_HASH_TABLE_H
+
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Host.h"
+#include <cassert>
+#include <cstdlib>
+
+namespace clang {
+
+namespace io {
+
+typedef uint32_t Offset;
+
+inline void Emit8(raw_ostream& Out, uint32_t V) {
+ Out << (unsigned char)(V);
+}
+
+inline void Emit16(raw_ostream& Out, uint32_t V) {
+ Out << (unsigned char)(V);
+ Out << (unsigned char)(V >> 8);
+ assert((V >> 16) == 0);
+}
+
+inline void Emit24(raw_ostream& Out, uint32_t V) {
+ Out << (unsigned char)(V);
+ Out << (unsigned char)(V >> 8);
+ Out << (unsigned char)(V >> 16);
+ assert((V >> 24) == 0);
+}
+
+inline void Emit32(raw_ostream& Out, uint32_t V) {
+ Out << (unsigned char)(V);
+ Out << (unsigned char)(V >> 8);
+ Out << (unsigned char)(V >> 16);
+ Out << (unsigned char)(V >> 24);
+}
+
+inline void Emit64(raw_ostream& Out, uint64_t V) {
+ Out << (unsigned char)(V);
+ Out << (unsigned char)(V >> 8);
+ Out << (unsigned char)(V >> 16);
+ Out << (unsigned char)(V >> 24);
+ Out << (unsigned char)(V >> 32);
+ Out << (unsigned char)(V >> 40);
+ Out << (unsigned char)(V >> 48);
+ Out << (unsigned char)(V >> 56);
+}
+
+inline void Pad(raw_ostream& Out, unsigned A) {
+ Offset off = (Offset) Out.tell();
+ uint32_t n = ((uintptr_t)(off+A-1) & ~(uintptr_t)(A-1)) - off;
+ for (; n ; --n)
+ Emit8(Out, 0);
+}
+
+inline uint16_t ReadUnalignedLE16(const unsigned char *&Data) {
+ uint16_t V = ((uint16_t)Data[0]) |
+ ((uint16_t)Data[1] << 8);
+ Data += 2;
+ return V;
+}
+
+inline uint32_t ReadUnalignedLE32(const unsigned char *&Data) {
+ uint32_t V = ((uint32_t)Data[0]) |
+ ((uint32_t)Data[1] << 8) |
+ ((uint32_t)Data[2] << 16) |
+ ((uint32_t)Data[3] << 24);
+ Data += 4;
+ return V;
+}
+
+inline uint64_t ReadUnalignedLE64(const unsigned char *&Data) {
+ uint64_t V = ((uint64_t)Data[0]) |
+ ((uint64_t)Data[1] << 8) |
+ ((uint64_t)Data[2] << 16) |
+ ((uint64_t)Data[3] << 24) |
+ ((uint64_t)Data[4] << 32) |
+ ((uint64_t)Data[5] << 40) |
+ ((uint64_t)Data[6] << 48) |
+ ((uint64_t)Data[7] << 56);
+ Data += 8;
+ return V;
+}
+
+inline uint32_t ReadLE32(const unsigned char *&Data) {
+ // Hosts that directly support little-endian 32-bit loads can just
+ // use them. Big-endian hosts need a bswap.
+ uint32_t V = *((uint32_t*)Data);
+ if (llvm::sys::isBigEndianHost())
+ V = llvm::ByteSwap_32(V);
+ Data += 4;
+ return V;
+}
+
+} // end namespace io
+
+template<typename Info>
+class OnDiskChainedHashTableGenerator {
+ unsigned NumBuckets;
+ unsigned NumEntries;
+ llvm::BumpPtrAllocator BA;
+
+ class Item {
+ public:
+ typename Info::key_type key;
+ typename Info::data_type data;
+ Item *next;
+ const uint32_t hash;
+
+ Item(typename Info::key_type_ref k, typename Info::data_type_ref d,
+ Info &InfoObj)
+ : key(k), data(d), next(0), hash(InfoObj.ComputeHash(k)) {}
+ };
+
+ class Bucket {
+ public:
+ io::Offset off;
+ Item* head;
+ unsigned length;
+
+ Bucket() {}
+ };
+
+ Bucket* Buckets;
+
+private:
+ void insert(Bucket* b, size_t size, Item* E) {
+ unsigned idx = E->hash & (size - 1);
+ Bucket& B = b[idx];
+ E->next = B.head;
+ ++B.length;
+ B.head = E;
+ }
+
+ void resize(size_t newsize) {
+ Bucket* newBuckets = (Bucket*) std::calloc(newsize, sizeof(Bucket));
+ // Populate newBuckets with the old entries.
+ for (unsigned i = 0; i < NumBuckets; ++i)
+ for (Item* E = Buckets[i].head; E ; ) {
+ Item* N = E->next;
+ E->next = 0;
+ insert(newBuckets, newsize, E);
+ E = N;
+ }
+
+ free(Buckets);
+ NumBuckets = newsize;
+ Buckets = newBuckets;
+ }
+
+public:
+
+ void insert(typename Info::key_type_ref key,
+ typename Info::data_type_ref data) {
+ Info InfoObj;
+ insert(key, data, InfoObj);
+ }
+
+ void insert(typename Info::key_type_ref key,
+ typename Info::data_type_ref data, Info &InfoObj) {
+
+ ++NumEntries;
+ if (4*NumEntries >= 3*NumBuckets) resize(NumBuckets*2);
+ insert(Buckets, NumBuckets, new (BA.Allocate<Item>()) Item(key, data,
+ InfoObj));
+ }
+
+ io::Offset Emit(raw_ostream &out) {
+ Info InfoObj;
+ return Emit(out, InfoObj);
+ }
+
+ io::Offset Emit(raw_ostream &out, Info &InfoObj) {
+ using namespace clang::io;
+
+ // Emit the payload of the table.
+ for (unsigned i = 0; i < NumBuckets; ++i) {
+ Bucket& B = Buckets[i];
+ if (!B.head) continue;
+
+ // Store the offset for the data of this bucket.
+ B.off = out.tell();
+ assert(B.off && "Cannot write a bucket at offset 0. Please add padding.");
+
+ // Write out the number of items in the bucket.
+ Emit16(out, B.length);
+
+ // Write out the entries in the bucket.
+ for (Item *I = B.head; I ; I = I->next) {
+ Emit32(out, I->hash);
+ const std::pair<unsigned, unsigned>& Len =
+ InfoObj.EmitKeyDataLength(out, I->key, I->data);
+ InfoObj.EmitKey(out, I->key, Len.first);
+ InfoObj.EmitData(out, I->key, I->data, Len.second);
+ }
+ }
+
+ // Emit the hashtable itself.
+ Pad(out, 4);
+ io::Offset TableOff = out.tell();
+ Emit32(out, NumBuckets);
+ Emit32(out, NumEntries);
+ for (unsigned i = 0; i < NumBuckets; ++i) Emit32(out, Buckets[i].off);
+
+ return TableOff;
+ }
+
+ OnDiskChainedHashTableGenerator() {
+ NumEntries = 0;
+ NumBuckets = 64;
+ // Note that we do not need to run the constructors of the individual
+ // Bucket objects since 'calloc' returns bytes that are all 0.
+ Buckets = (Bucket*) std::calloc(NumBuckets, sizeof(Bucket));
+ }
+
+ ~OnDiskChainedHashTableGenerator() {
+ std::free(Buckets);
+ }
+};
+
+template<typename Info>
+class OnDiskChainedHashTable {
+ const unsigned NumBuckets;
+ const unsigned NumEntries;
+ const unsigned char* const Buckets;
+ const unsigned char* const Base;
+ Info InfoObj;
+
+public:
+ typedef typename Info::internal_key_type internal_key_type;
+ typedef typename Info::external_key_type external_key_type;
+ typedef typename Info::data_type data_type;
+
+ OnDiskChainedHashTable(unsigned numBuckets, unsigned numEntries,
+ const unsigned char* buckets,
+ const unsigned char* base,
+ const Info &InfoObj = Info())
+ : NumBuckets(numBuckets), NumEntries(numEntries),
+ Buckets(buckets), Base(base), InfoObj(InfoObj) {
+ assert((reinterpret_cast<uintptr_t>(buckets) & 0x3) == 0 &&
+ "'buckets' must have a 4-byte alignment");
+ }
+
+ unsigned getNumBuckets() const { return NumBuckets; }
+ unsigned getNumEntries() const { return NumEntries; }
+ const unsigned char* getBase() const { return Base; }
+ const unsigned char* getBuckets() const { return Buckets; }
+
+ bool isEmpty() const { return NumEntries == 0; }
+
+ class iterator {
+ internal_key_type key;
+ const unsigned char* const data;
+ const unsigned len;
+ Info *InfoObj;
+ public:
+ iterator() : data(0), len(0) {}
+ iterator(const internal_key_type k, const unsigned char* d, unsigned l,
+ Info *InfoObj)
+ : key(k), data(d), len(l), InfoObj(InfoObj) {}
+
+ data_type operator*() const { return InfoObj->ReadData(key, data, len); }
+ bool operator==(const iterator& X) const { return X.data == data; }
+ bool operator!=(const iterator& X) const { return X.data != data; }
+ };
+
+ iterator find(const external_key_type& eKey, Info *InfoPtr = 0) {
+ if (!InfoPtr)
+ InfoPtr = &InfoObj;
+
+ using namespace io;
+ const internal_key_type& iKey = InfoObj.GetInternalKey(eKey);
+ unsigned key_hash = InfoObj.ComputeHash(iKey);
+
+ // Each bucket is just a 32-bit offset into the hash table file.
+ unsigned idx = key_hash & (NumBuckets - 1);
+ const unsigned char* Bucket = Buckets + sizeof(uint32_t)*idx;
+
+ unsigned offset = ReadLE32(Bucket);
+ if (offset == 0) return iterator(); // Empty bucket.
+ const unsigned char* Items = Base + offset;
+
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ unsigned len = ReadUnalignedLE16(Items);
+
+ for (unsigned i = 0; i < len; ++i) {
+ // Read the hash.
+ uint32_t item_hash = ReadUnalignedLE32(Items);
+
+ // Determine the length of the key and the data.
+ const std::pair<unsigned, unsigned>& L = Info::ReadKeyDataLength(Items);
+ unsigned item_len = L.first + L.second;
+
+ // Compare the hashes. If they are not the same, skip the entry entirely.
+ if (item_hash != key_hash) {
+ Items += item_len;
+ continue;
+ }
+
+ // Read the key.
+ const internal_key_type& X =
+ InfoPtr->ReadKey((const unsigned char* const) Items, L.first);
+
+ // If the key doesn't match just skip reading the value.
+ if (!InfoPtr->EqualKey(X, iKey)) {
+ Items += item_len;
+ continue;
+ }
+
+ // The key matches!
+ return iterator(X, Items + L.first, L.second, InfoPtr);
+ }
+
+ return iterator();
+ }
+
+ iterator end() const { return iterator(); }
+
+ /// \brief Iterates over all of the keys in the table.
+ class key_iterator {
+ const unsigned char* Ptr;
+ unsigned NumItemsInBucketLeft;
+ unsigned NumEntriesLeft;
+ Info *InfoObj;
+ public:
+ typedef external_key_type value_type;
+
+ key_iterator(const unsigned char* const Ptr, unsigned NumEntries,
+ Info *InfoObj)
+ : Ptr(Ptr), NumItemsInBucketLeft(0), NumEntriesLeft(NumEntries),
+ InfoObj(InfoObj) { }
+ key_iterator()
+ : Ptr(0), NumItemsInBucketLeft(0), NumEntriesLeft(0), InfoObj(0) { }
+
+ friend bool operator==(const key_iterator &X, const key_iterator &Y) {
+ return X.NumEntriesLeft == Y.NumEntriesLeft;
+ }
+ friend bool operator!=(const key_iterator& X, const key_iterator &Y) {
+ return X.NumEntriesLeft != Y.NumEntriesLeft;
+ }
+
+ key_iterator& operator++() { // Preincrement
+ if (!NumItemsInBucketLeft) {
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ NumItemsInBucketLeft = io::ReadUnalignedLE16(Ptr);
+ }
+ Ptr += 4; // Skip the hash.
+ // Determine the length of the key and the data.
+ const std::pair<unsigned, unsigned>& L = Info::ReadKeyDataLength(Ptr);
+ Ptr += L.first + L.second;
+ assert(NumItemsInBucketLeft);
+ --NumItemsInBucketLeft;
+ assert(NumEntriesLeft);
+ --NumEntriesLeft;
+ return *this;
+ }
+ key_iterator operator++(int) { // Postincrement
+ key_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ value_type operator*() const {
+ const unsigned char* LocalPtr = Ptr;
+ if (!NumItemsInBucketLeft)
+ LocalPtr += 2; // number of items in bucket
+ LocalPtr += 4; // Skip the hash.
+
+ // Determine the length of the key and the data.
+ const std::pair<unsigned, unsigned>& L
+ = Info::ReadKeyDataLength(LocalPtr);
+
+ // Read the key.
+ const internal_key_type& Key = InfoObj->ReadKey(LocalPtr, L.first);
+ return InfoObj->GetExternalKey(Key);
+ }
+ };
+
+ key_iterator key_begin() {
+ return key_iterator(Base + 4, getNumEntries(), &InfoObj);
+ }
+ key_iterator key_end() { return key_iterator(); }
+
+ /// \brief Iterates over all the entries in the table, returning
+ /// a key/data pair.
+ class item_iterator {
+ const unsigned char* Ptr;
+ unsigned NumItemsInBucketLeft;
+ unsigned NumEntriesLeft;
+ Info *InfoObj;
+ public:
+ typedef std::pair<external_key_type, data_type> value_type;
+
+ item_iterator(const unsigned char* const Ptr, unsigned NumEntries,
+ Info *InfoObj)
+ : Ptr(Ptr), NumItemsInBucketLeft(0), NumEntriesLeft(NumEntries),
+ InfoObj(InfoObj) { }
+ item_iterator()
+ : Ptr(0), NumItemsInBucketLeft(0), NumEntriesLeft(0), InfoObj(0) { }
+
+ bool operator==(const item_iterator& X) const {
+ return X.NumEntriesLeft == NumEntriesLeft;
+ }
+ bool operator!=(const item_iterator& X) const {
+ return X.NumEntriesLeft != NumEntriesLeft;
+ }
+
+ item_iterator& operator++() { // Preincrement
+ if (!NumItemsInBucketLeft) {
+ // 'Items' starts with a 16-bit unsigned integer representing the
+ // number of items in this bucket.
+ NumItemsInBucketLeft = io::ReadUnalignedLE16(Ptr);
+ }
+ Ptr += 4; // Skip the hash.
+ // Determine the length of the key and the data.
+ const std::pair<unsigned, unsigned>& L = Info::ReadKeyDataLength(Ptr);
+ Ptr += L.first + L.second;
+ assert(NumItemsInBucketLeft);
+ --NumItemsInBucketLeft;
+ assert(NumEntriesLeft);
+ --NumEntriesLeft;
+ return *this;
+ }
+ item_iterator operator++(int) { // Postincrement
+ item_iterator tmp = *this; ++*this; return tmp;
+ }
+
+ value_type operator*() const {
+ const unsigned char* LocalPtr = Ptr;
+ if (!NumItemsInBucketLeft)
+ LocalPtr += 2; // number of items in bucket
+ LocalPtr += 4; // Skip the hash.
+
+ // Determine the length of the key and the data.
+ const std::pair<unsigned, unsigned>& L =Info::ReadKeyDataLength(LocalPtr);
+
+ // Read the key.
+ const internal_key_type& Key =
+ InfoObj->ReadKey(LocalPtr, L.first);
+ return std::make_pair(InfoObj->GetExternalKey(Key),
+ InfoObj->ReadData(Key, LocalPtr + L.first, L.second));
+ }
+ };
+
+ item_iterator item_begin() {
+ return item_iterator(Base + 4, getNumEntries(), &InfoObj);
+ }
+ item_iterator item_end() { return item_iterator(); }
+
+ Info &getInfoObj() { return InfoObj; }
+
+ static OnDiskChainedHashTable* Create(const unsigned char* buckets,
+ const unsigned char* const base,
+ const Info &InfoObj = Info()) {
+ using namespace io;
+ assert(buckets > base);
+ assert((reinterpret_cast<uintptr_t>(buckets) & 0x3) == 0 &&
+ "buckets should be 4-byte aligned.");
+
+ unsigned numBuckets = ReadLE32(buckets);
+ unsigned numEntries = ReadLE32(buckets);
+ return new OnDiskChainedHashTable<Info>(numBuckets, numEntries, buckets,
+ base, InfoObj);
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h b/contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h
new file mode 100644
index 0000000..6f9785f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OpenCL.h
@@ -0,0 +1,28 @@
+//===--- OpenCL.h - OpenCL enums --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines some OpenCL-specific enums.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_OPENCL_H
+#define LLVM_CLANG_BASIC_OPENCL_H
+
+namespace clang {
+
+/// Names for the OpenCL image access qualifiers (OpenCL 1.1 6.6).
+enum OpenCLImageAccess {
+ CLIA_read_only = 1,
+ CLIA_write_only = 2,
+ CLIA_read_write = 3
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OpenCLExtensions.def b/contrib/llvm/tools/clang/include/clang/Basic/OpenCLExtensions.def
new file mode 100644
index 0000000..103fa83
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OpenCLExtensions.def
@@ -0,0 +1,32 @@
+//===--- OpenCLExtensions.def - OpenCL extension list -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the list of supported OpenCL extensions.
+//
+//===----------------------------------------------------------------------===//
+
+// OpenCL 1.1.
+OPENCLEXT(cl_khr_fp64)
+OPENCLEXT(cl_khr_int64_base_atomics)
+OPENCLEXT(cl_khr_int64_extended_atomics)
+OPENCLEXT(cl_khr_fp16)
+OPENCLEXT(cl_khr_gl_sharing)
+OPENCLEXT(cl_khr_gl_event)
+OPENCLEXT(cl_khr_d3d10_sharing)
+OPENCLEXT(cl_khr_global_int32_base_atomics)
+OPENCLEXT(cl_khr_global_int32_extended_atomics)
+OPENCLEXT(cl_khr_local_int32_base_atomics)
+OPENCLEXT(cl_khr_local_int32_extended_atomics)
+OPENCLEXT(cl_khr_byte_addressable_store)
+OPENCLEXT(cl_khr_3d_image_writes)
+
+// Clang Extensions.
+OPENCLEXT(cl_clang_storage_class_specifiers)
+
+#undef OPENCLEXT
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.def b/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.def
new file mode 100644
index 0000000..d011e9d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.def
@@ -0,0 +1,106 @@
+//===--- OperatorKinds.def - C++ Overloaded Operator Database ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the OverloadedOperator database, which includes
+// all of the overloadable C++ operators.
+//
+//===----------------------------------------------------------------------===//
+//
+/// @file OperatorKinds.def
+///
+/// In this file, each of the overloadable C++ operators is enumerated
+/// with either the OVERLOADED_OPERATOR or OVERLOADED_OPERATOR_MULTI
+/// macro, each of which can be specified by the code including this
+/// file. OVERLOADED_OPERATOR is used for single-token operators
+/// (e.g., "+"), and has six arguments:
+///
+/// Name: The name of the token. OO_Name will be the name of the
+/// corresponding enumerator in OverloadedOperatorKind in
+/// OperatorKinds.h.
+///
+/// Spelling: A string that provides a canonical spelling for the
+/// operator, e.g., "operator+".
+///
+/// Token: The name of the token that specifies the operator, e.g.,
+/// "plus" for operator+ or "greatergreaterequal" for
+/// "operator>>=". With a "kw_" prefix, the token name can be used as
+/// an enumerator into the TokenKind enumeration.
+///
+/// Unary: True if the operator can be declared as a unary operator.
+///
+/// Binary: True if the operator can be declared as a binary
+/// operator. Note that some operators (e.g., "operator+" and
+/// "operator*") can be both unary and binary.
+///
+/// MemberOnly: True if this operator can only be declared as a
+/// non-static member function. False if the operator can be both a
+/// non-member function and a non-static member function.
+///
+/// OVERLOADED_OPERATOR_MULTI is used to enumerate the multi-token
+/// overloaded operator names, e.g., "operator delete []". The macro
+/// has all of the parameters of OVERLOADED_OPERATOR except Token,
+/// which is omitted.
+
+#ifndef OVERLOADED_OPERATOR
+# define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly)
+#endif
+
+#ifndef OVERLOADED_OPERATOR_MULTI
+# define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly) \
+ OVERLOADED_OPERATOR(Name,Spelling,unknown,Unary,Binary,MemberOnly)
+#endif
+
+OVERLOADED_OPERATOR_MULTI(New , "new" , true , true , false)
+OVERLOADED_OPERATOR_MULTI(Delete , "delete" , true , true , false)
+OVERLOADED_OPERATOR_MULTI(Array_New , "new[]" , true , true , false)
+OVERLOADED_OPERATOR_MULTI(Array_Delete , "delete[]" , true , true , false)
+OVERLOADED_OPERATOR(Plus , "+" , plus , true , true , false)
+OVERLOADED_OPERATOR(Minus , "-" , minus , true , true , false)
+OVERLOADED_OPERATOR(Star , "*" , star , true , true , false)
+OVERLOADED_OPERATOR(Slash , "/" , slash , false, true , false)
+OVERLOADED_OPERATOR(Percent , "%" , percent , false, true , false)
+OVERLOADED_OPERATOR(Caret , "^" , caret , false, true , false)
+OVERLOADED_OPERATOR(Amp , "&" , amp , true , true , false)
+OVERLOADED_OPERATOR(Pipe , "|" , pipe , false, true , false)
+OVERLOADED_OPERATOR(Tilde , "~" , tilde , true , false, false)
+OVERLOADED_OPERATOR(Exclaim , "!" , exclaim , true , false, false)
+OVERLOADED_OPERATOR(Equal , "=" , equal , false, true , true)
+OVERLOADED_OPERATOR(Less , "<" , less , false, true , false)
+OVERLOADED_OPERATOR(Greater , ">" , greater , false, true , false)
+OVERLOADED_OPERATOR(PlusEqual , "+=" , plusequal , false, true , false)
+OVERLOADED_OPERATOR(MinusEqual , "-=" , minusequal , false, true , false)
+OVERLOADED_OPERATOR(StarEqual , "*=" , starequal , false, true , false)
+OVERLOADED_OPERATOR(SlashEqual , "/=" , slashequal , false, true , false)
+OVERLOADED_OPERATOR(PercentEqual , "%=" , percentequal , false, true , false)
+OVERLOADED_OPERATOR(CaretEqual , "^=" , caretequal , false, true , false)
+OVERLOADED_OPERATOR(AmpEqual , "&=" , ampequal , false, true , false)
+OVERLOADED_OPERATOR(PipeEqual , "|=" , pipeequal , false, true , false)
+OVERLOADED_OPERATOR(LessLess , "<<" , lessless , false, true , false)
+OVERLOADED_OPERATOR(GreaterGreater , ">>" , greatergreater , false, true , false)
+OVERLOADED_OPERATOR(LessLessEqual , "<<=" , lesslessequal , false, true , false)
+OVERLOADED_OPERATOR(GreaterGreaterEqual , ">>=" , greatergreaterequal, false, true , false)
+OVERLOADED_OPERATOR(EqualEqual , "==" , equalequal , false, true , false)
+OVERLOADED_OPERATOR(ExclaimEqual , "!=" , exclaimequal , false, true , false)
+OVERLOADED_OPERATOR(LessEqual , "<=" , lessequal , false, true , false)
+OVERLOADED_OPERATOR(GreaterEqual , ">=" , greaterequal , false, true , false)
+OVERLOADED_OPERATOR(AmpAmp , "&&" , ampamp , false, true , false)
+OVERLOADED_OPERATOR(PipePipe , "||" , pipepipe , false, true , false)
+OVERLOADED_OPERATOR(PlusPlus , "++" , plusplus , true , true , false)
+OVERLOADED_OPERATOR(MinusMinus , "--" , minusminus , true , true , false)
+OVERLOADED_OPERATOR(Comma , "," , comma , false, true , false)
+OVERLOADED_OPERATOR(ArrowStar , "->*" , arrowstar , false, true , false)
+OVERLOADED_OPERATOR(Arrow , "->" , arrow , true , false, true)
+OVERLOADED_OPERATOR_MULTI(Call , "()" , true , true , true)
+OVERLOADED_OPERATOR_MULTI(Subscript , "[]" , false, true , true)
+// ?: can *not* be overloaded, but we need the overload
+// resolution machinery for it.
+OVERLOADED_OPERATOR_MULTI(Conditional , "?" , false, true , false)
+
+#undef OVERLOADED_OPERATOR_MULTI
+#undef OVERLOADED_OPERATOR
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h
new file mode 100644
index 0000000..c0a9505
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/OperatorKinds.h
@@ -0,0 +1,35 @@
+//===--- OperatorKinds.h - C++ Overloaded Operators -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines C++ overloaded operators.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_OPERATOR_KINDS_H
+#define LLVM_CLANG_BASIC_OPERATOR_KINDS_H
+
+namespace clang {
+
+/// OverloadedOperatorKind - Enumeration specifying the different kinds of
+/// C++ overloaded operators.
+enum OverloadedOperatorKind {
+ OO_None, //< Not an overloaded operator
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ OO_##Name,
+#include "clang/Basic/OperatorKinds.def"
+ NUM_OVERLOADED_OPERATORS
+};
+
+/// \brief Retrieve the spelling of the given overloaded operator, without
+/// the preceding "operator" keyword.
+const char *getOperatorSpelling(OverloadedOperatorKind Operator);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
new file mode 100644
index 0000000..007e6a4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/PartialDiagnostic.h
@@ -0,0 +1,352 @@
+//===--- PartialDiagnostic.h - Diagnostic "closures" ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a partial diagnostic that can be emitted anwyhere
+// in a DiagnosticBuilder stream.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PARTIALDIAGNOSTIC_H
+#define LLVM_CLANG_PARTIALDIAGNOSTIC_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/DataTypes.h"
+#include <cassert>
+
+namespace clang {
+
+class PartialDiagnostic {
+public:
+ enum {
+ // The MaxArguments and MaxFixItHints member enum values from
+ // DiagnosticsEngine are private but DiagnosticsEngine declares
+ // PartialDiagnostic a friend. These enum values are redeclared
+ // here so that the nested Storage class below can access them.
+ MaxArguments = DiagnosticsEngine::MaxArguments
+ };
+
+ struct Storage {
+ Storage() : NumDiagArgs(0), NumDiagRanges(0) { }
+
+ enum {
+ /// MaxArguments - The maximum number of arguments we can hold. We
+ /// currently only support up to 10 arguments (%0-%9).
+ /// A single diagnostic with more than that almost certainly has to
+ /// be simplified anyway.
+ MaxArguments = PartialDiagnostic::MaxArguments
+ };
+
+ /// NumDiagArgs - This contains the number of entries in Arguments.
+ unsigned char NumDiagArgs;
+
+ /// NumDiagRanges - This is the number of ranges in the DiagRanges array.
+ unsigned char NumDiagRanges;
+
+ /// DiagArgumentsKind - This is an array of ArgumentKind::ArgumentKind enum
+ /// values, with one for each argument. This specifies whether the argument
+ /// is in DiagArgumentsStr or in DiagArguments.
+ unsigned char DiagArgumentsKind[MaxArguments];
+
+ /// DiagArgumentsVal - The values for the various substitution positions.
+ /// This is used when the argument is not an std::string. The specific value
+ /// is mangled into an intptr_t and the interpretation depends on exactly
+ /// what sort of argument kind it is.
+ intptr_t DiagArgumentsVal[MaxArguments];
+
+ /// \brief The values for the various substitution positions that have
+ /// string arguments.
+ std::string DiagArgumentsStr[MaxArguments];
+
+ /// DiagRanges - The list of ranges added to this diagnostic. It currently
+ /// only support 10 ranges, could easily be extended if needed.
+ CharSourceRange DiagRanges[10];
+
+ /// FixItHints - If valid, provides a hint with some code
+ /// to insert, remove, or modify at a particular position.
+ SmallVector<FixItHint, 6> FixItHints;
+ };
+
+ /// \brief An allocator for Storage objects, which uses a small cache to
+ /// objects, used to reduce malloc()/free() traffic for partial diagnostics.
+ class StorageAllocator {
+ static const unsigned NumCached = 16;
+ Storage Cached[NumCached];
+ Storage *FreeList[NumCached];
+ unsigned NumFreeListEntries;
+
+ public:
+ StorageAllocator();
+ ~StorageAllocator();
+
+ /// \brief Allocate new storage.
+ Storage *Allocate() {
+ if (NumFreeListEntries == 0)
+ return new Storage;
+
+ Storage *Result = FreeList[--NumFreeListEntries];
+ Result->NumDiagArgs = 0;
+ Result->NumDiagRanges = 0;
+ Result->FixItHints.clear();
+ return Result;
+ }
+
+ /// \brief Free the given storage object.
+ void Deallocate(Storage *S) {
+ if (S >= Cached && S <= Cached + NumCached) {
+ FreeList[NumFreeListEntries++] = S;
+ return;
+ }
+
+ delete S;
+ }
+ };
+
+private:
+ // NOTE: Sema assumes that PartialDiagnostic is location-invariant
+ // in the sense that its bits can be safely memcpy'ed and destructed
+ // in the new location.
+
+ /// DiagID - The diagnostic ID.
+ mutable unsigned DiagID;
+
+ /// DiagStorage - Storage for args and ranges.
+ mutable Storage *DiagStorage;
+
+ /// \brief Allocator used to allocate storage for this diagnostic.
+ StorageAllocator *Allocator;
+
+ /// \brief Retrieve storage for this particular diagnostic.
+ Storage *getStorage() const {
+ if (DiagStorage)
+ return DiagStorage;
+
+ if (Allocator)
+ DiagStorage = Allocator->Allocate();
+ else {
+ assert(Allocator != reinterpret_cast<StorageAllocator *>(~uintptr_t(0)));
+ DiagStorage = new Storage;
+ }
+ return DiagStorage;
+ }
+
+ void freeStorage() {
+ if (!DiagStorage)
+ return;
+
+ // The hot path for PartialDiagnostic is when we just used it to wrap an ID
+ // (typically so we have the flexibility of passing a more complex
+ // diagnostic into the callee, but that does not commonly occur).
+ //
+ // Split this out into a slow function for silly compilers (*cough*) which
+ // can't do decent partial inlining.
+ freeStorageSlow();
+ }
+
+ void freeStorageSlow() {
+ if (Allocator)
+ Allocator->Deallocate(DiagStorage);
+ else if (Allocator != reinterpret_cast<StorageAllocator *>(~uintptr_t(0)))
+ delete DiagStorage;
+ DiagStorage = 0;
+ }
+
+ void AddSourceRange(const CharSourceRange &R) const {
+ if (!DiagStorage)
+ DiagStorage = getStorage();
+
+ assert(DiagStorage->NumDiagRanges <
+ llvm::array_lengthof(DiagStorage->DiagRanges) &&
+ "Too many arguments to diagnostic!");
+ DiagStorage->DiagRanges[DiagStorage->NumDiagRanges++] = R;
+ }
+
+ void AddFixItHint(const FixItHint &Hint) const {
+ if (Hint.isNull())
+ return;
+
+ if (!DiagStorage)
+ DiagStorage = getStorage();
+
+ DiagStorage->FixItHints.push_back(Hint);
+ }
+
+public:
+ PartialDiagnostic(unsigned DiagID, StorageAllocator &Allocator)
+ : DiagID(DiagID), DiagStorage(0), Allocator(&Allocator) { }
+
+ PartialDiagnostic(const PartialDiagnostic &Other)
+ : DiagID(Other.DiagID), DiagStorage(0), Allocator(Other.Allocator)
+ {
+ if (Other.DiagStorage) {
+ DiagStorage = getStorage();
+ *DiagStorage = *Other.DiagStorage;
+ }
+ }
+
+ PartialDiagnostic(const PartialDiagnostic &Other, Storage *DiagStorage)
+ : DiagID(Other.DiagID), DiagStorage(DiagStorage),
+ Allocator(reinterpret_cast<StorageAllocator *>(~uintptr_t(0)))
+ {
+ if (Other.DiagStorage)
+ *this->DiagStorage = *Other.DiagStorage;
+ }
+
+ PartialDiagnostic(const Diagnostic &Other, StorageAllocator &Allocator)
+ : DiagID(Other.getID()), DiagStorage(0), Allocator(&Allocator)
+ {
+ // Copy arguments.
+ for (unsigned I = 0, N = Other.getNumArgs(); I != N; ++I) {
+ if (Other.getArgKind(I) == DiagnosticsEngine::ak_std_string)
+ AddString(Other.getArgStdStr(I));
+ else
+ AddTaggedVal(Other.getRawArg(I), Other.getArgKind(I));
+ }
+
+ // Copy source ranges.
+ for (unsigned I = 0, N = Other.getNumRanges(); I != N; ++I)
+ AddSourceRange(Other.getRange(I));
+
+ // Copy fix-its.
+ for (unsigned I = 0, N = Other.getNumFixItHints(); I != N; ++I)
+ AddFixItHint(Other.getFixItHint(I));
+ }
+
+ PartialDiagnostic &operator=(const PartialDiagnostic &Other) {
+ DiagID = Other.DiagID;
+ if (Other.DiagStorage) {
+ if (!DiagStorage)
+ DiagStorage = getStorage();
+
+ *DiagStorage = *Other.DiagStorage;
+ } else {
+ freeStorage();
+ }
+
+ return *this;
+ }
+
+ ~PartialDiagnostic() {
+ freeStorage();
+ }
+
+ unsigned getDiagID() const { return DiagID; }
+
+ void AddTaggedVal(intptr_t V, DiagnosticsEngine::ArgumentKind Kind) const {
+ if (!DiagStorage)
+ DiagStorage = getStorage();
+
+ assert(DiagStorage->NumDiagArgs < Storage::MaxArguments &&
+ "Too many arguments to diagnostic!");
+ DiagStorage->DiagArgumentsKind[DiagStorage->NumDiagArgs] = Kind;
+ DiagStorage->DiagArgumentsVal[DiagStorage->NumDiagArgs++] = V;
+ }
+
+ void AddString(StringRef V) const {
+ if (!DiagStorage)
+ DiagStorage = getStorage();
+
+ assert(DiagStorage->NumDiagArgs < Storage::MaxArguments &&
+ "Too many arguments to diagnostic!");
+ DiagStorage->DiagArgumentsKind[DiagStorage->NumDiagArgs]
+ = DiagnosticsEngine::ak_std_string;
+ DiagStorage->DiagArgumentsStr[DiagStorage->NumDiagArgs++] = V;
+ }
+
+ void Emit(const DiagnosticBuilder &DB) const {
+ if (!DiagStorage)
+ return;
+
+ // Add all arguments.
+ for (unsigned i = 0, e = DiagStorage->NumDiagArgs; i != e; ++i) {
+ if ((DiagnosticsEngine::ArgumentKind)DiagStorage->DiagArgumentsKind[i]
+ == DiagnosticsEngine::ak_std_string)
+ DB.AddString(DiagStorage->DiagArgumentsStr[i]);
+ else
+ DB.AddTaggedVal(DiagStorage->DiagArgumentsVal[i],
+ (DiagnosticsEngine::ArgumentKind)DiagStorage->DiagArgumentsKind[i]);
+ }
+
+ // Add all ranges.
+ for (unsigned i = 0, e = DiagStorage->NumDiagRanges; i != e; ++i)
+ DB.AddSourceRange(DiagStorage->DiagRanges[i]);
+
+ // Add all fix-its.
+ for (unsigned i = 0, e = DiagStorage->FixItHints.size(); i != e; ++i)
+ DB.AddFixItHint(DiagStorage->FixItHints[i]);
+ }
+
+ /// \brief Clear out this partial diagnostic, giving it a new diagnostic ID
+ /// and removing all of its arguments, ranges, and fix-it hints.
+ void Reset(unsigned DiagID = 0) {
+ this->DiagID = DiagID;
+ freeStorage();
+ }
+
+ bool hasStorage() const { return DiagStorage != 0; }
+
+ friend const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ unsigned I) {
+ PD.AddTaggedVal(I, DiagnosticsEngine::ak_uint);
+ return PD;
+ }
+
+ friend const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ int I) {
+ PD.AddTaggedVal(I, DiagnosticsEngine::ak_sint);
+ return PD;
+ }
+
+ friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ const char *S) {
+ PD.AddTaggedVal(reinterpret_cast<intptr_t>(S),
+ DiagnosticsEngine::ak_c_string);
+ return PD;
+ }
+
+ friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ StringRef S) {
+
+ PD.AddString(S);
+ return PD;
+ }
+
+ friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ const SourceRange &R) {
+ PD.AddSourceRange(CharSourceRange::getTokenRange(R));
+ return PD;
+ }
+
+ friend inline const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ const CharSourceRange &R) {
+ PD.AddSourceRange(R);
+ return PD;
+ }
+
+ friend const PartialDiagnostic &operator<<(const PartialDiagnostic &PD,
+ const FixItHint &Hint) {
+ PD.AddFixItHint(Hint);
+ return PD;
+ }
+
+};
+
+inline const DiagnosticBuilder &operator<<(const DiagnosticBuilder &DB,
+ const PartialDiagnostic &PD) {
+ PD.Emit(DB);
+ return DB;
+}
+
+/// \brief A partial diagnostic along with the source location where this
+/// diagnostic occurs.
+typedef std::pair<SourceLocation, PartialDiagnostic> PartialDiagnosticAt;
+
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h b/contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h
new file mode 100644
index 0000000..06a1264
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/PrettyStackTrace.h
@@ -0,0 +1,37 @@
+//===- clang/Basic/PrettyStackTrace.h - Pretty Crash Handling --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PrettyStackTraceEntry class, which is used to make
+// crashes give more contextual information about what the program was doing
+// when it crashed.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_BASIC_PRETTYSTACKTRACE_H
+#define CLANG_BASIC_PRETTYSTACKTRACE_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/Support/PrettyStackTrace.h"
+
+namespace clang {
+
+ /// PrettyStackTraceLoc - If a crash happens while one of these objects are
+ /// live, the message is printed out along with the specified source location.
+ class PrettyStackTraceLoc : public llvm::PrettyStackTraceEntry {
+ SourceManager &SM;
+ SourceLocation Loc;
+ const char *Message;
+ public:
+ PrettyStackTraceLoc(SourceManager &sm, SourceLocation L, const char *Msg)
+ : SM(sm), Loc(L), Message(Msg) {}
+ virtual void print(raw_ostream &OS) const;
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
new file mode 100644
index 0000000..d5fa7e7
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceLocation.h
@@ -0,0 +1,426 @@
+//===--- SourceLocation.h - Compact identifier for Source Files -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SourceLocation class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SOURCELOCATION_H
+#define LLVM_CLANG_SOURCELOCATION_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/Support/PointerLikeTypeTraits.h"
+#include "llvm/Support/Compiler.h"
+#include <utility>
+#include <functional>
+#include <cassert>
+
+namespace llvm {
+ class MemoryBuffer;
+ template <typename T> struct DenseMapInfo;
+ template <typename T> struct isPodLike;
+}
+
+namespace clang {
+
+class SourceManager;
+
+/// FileID - This is an opaque identifier used by SourceManager which refers to
+/// a source file (MemoryBuffer) along with its #include path and #line data.
+///
+class FileID {
+ /// ID - Opaque identifier, 0 is "invalid". >0 is this module, <-1 is
+ /// something loaded from another module.
+ int ID;
+public:
+ FileID() : ID(0) {}
+
+ bool isInvalid() const { return ID == 0; }
+
+ bool operator==(const FileID &RHS) const { return ID == RHS.ID; }
+ bool operator<(const FileID &RHS) const { return ID < RHS.ID; }
+ bool operator<=(const FileID &RHS) const { return ID <= RHS.ID; }
+ bool operator!=(const FileID &RHS) const { return !(*this == RHS); }
+ bool operator>(const FileID &RHS) const { return RHS < *this; }
+ bool operator>=(const FileID &RHS) const { return RHS <= *this; }
+
+ static FileID getSentinel() { return get(-1); }
+ unsigned getHashValue() const { return static_cast<unsigned>(ID); }
+
+private:
+ friend class SourceManager;
+ friend class ASTWriter;
+ friend class ASTReader;
+
+ static FileID get(int V) {
+ FileID F;
+ F.ID = V;
+ return F;
+ }
+ int getOpaqueValue() const { return ID; }
+};
+
+
+/// \brief Encodes a location in the source. The SourceManager can decode this
+/// to get at the full include stack, line and column information.
+///
+/// Technically, a source location is simply an offset into the manager's view
+/// of the input source, which is all input buffers (including macro
+/// expansions) concatenated in an effectively arbitrary order. The manager
+/// actually maintains two blocks of input buffers. One, starting at offset
+/// 0 and growing upwards, contains all buffers from this module. The other,
+/// starting at the highest possible offset and growing downwards, contains
+/// buffers of loaded modules.
+///
+/// In addition, one bit of SourceLocation is used for quick access to the
+/// information whether the location is in a file or a macro expansion.
+///
+/// It is important that this type remains small. It is currently 32 bits wide.
+class SourceLocation {
+ unsigned ID;
+ friend class SourceManager;
+ friend class ASTReader;
+ friend class ASTWriter;
+ enum {
+ MacroIDBit = 1U << 31
+ };
+public:
+
+ SourceLocation() : ID(0) {}
+
+ bool isFileID() const { return (ID & MacroIDBit) == 0; }
+ bool isMacroID() const { return (ID & MacroIDBit) != 0; }
+
+ /// \brief Return true if this is a valid SourceLocation object.
+ ///
+ /// Invalid SourceLocations are often used when events have no corresponding
+ /// location in the source (e.g. a diagnostic is required for a command line
+ /// option).
+ bool isValid() const { return ID != 0; }
+ bool isInvalid() const { return ID == 0; }
+
+private:
+ /// \brief Return the offset into the manager's global input view.
+ unsigned getOffset() const {
+ return ID & ~MacroIDBit;
+ }
+
+ static SourceLocation getFileLoc(unsigned ID) {
+ assert((ID & MacroIDBit) == 0 && "Ran out of source locations!");
+ SourceLocation L;
+ L.ID = ID;
+ return L;
+ }
+
+ static SourceLocation getMacroLoc(unsigned ID) {
+ assert((ID & MacroIDBit) == 0 && "Ran out of source locations!");
+ SourceLocation L;
+ L.ID = MacroIDBit | ID;
+ return L;
+ }
+public:
+
+ /// \brief Return a source location with the specified offset from this
+ /// SourceLocation.
+ SourceLocation getLocWithOffset(int Offset) const {
+ assert(((getOffset()+Offset) & MacroIDBit) == 0 && "offset overflow");
+ SourceLocation L;
+ L.ID = ID+Offset;
+ return L;
+ }
+
+ /// getRawEncoding - When a SourceLocation itself cannot be used, this returns
+ /// an (opaque) 32-bit integer encoding for it. This should only be passed
+ /// to SourceLocation::getFromRawEncoding, it should not be inspected
+ /// directly.
+ unsigned getRawEncoding() const { return ID; }
+
+ /// getFromRawEncoding - Turn a raw encoding of a SourceLocation object into
+ /// a real SourceLocation.
+ static SourceLocation getFromRawEncoding(unsigned Encoding) {
+ SourceLocation X;
+ X.ID = Encoding;
+ return X;
+ }
+
+ /// getPtrEncoding - When a SourceLocation itself cannot be used, this returns
+ /// an (opaque) pointer encoding for it. This should only be passed
+ /// to SourceLocation::getFromPtrEncoding, it should not be inspected
+ /// directly.
+ void* getPtrEncoding() const {
+ // Double cast to avoid a warning "cast to pointer from integer of different
+ // size".
+ return (void*)(uintptr_t)getRawEncoding();
+ }
+
+ /// getFromPtrEncoding - Turn a pointer encoding of a SourceLocation object
+ /// into a real SourceLocation.
+ static SourceLocation getFromPtrEncoding(void *Encoding) {
+ return getFromRawEncoding((unsigned)(uintptr_t)Encoding);
+ }
+
+ void print(raw_ostream &OS, const SourceManager &SM) const;
+ void dump(const SourceManager &SM) const;
+};
+
+inline bool operator==(const SourceLocation &LHS, const SourceLocation &RHS) {
+ return LHS.getRawEncoding() == RHS.getRawEncoding();
+}
+
+inline bool operator!=(const SourceLocation &LHS, const SourceLocation &RHS) {
+ return !(LHS == RHS);
+}
+
+inline bool operator<(const SourceLocation &LHS, const SourceLocation &RHS) {
+ return LHS.getRawEncoding() < RHS.getRawEncoding();
+}
+
+/// SourceRange - a trival tuple used to represent a source range.
+class SourceRange {
+ SourceLocation B;
+ SourceLocation E;
+public:
+ SourceRange(): B(SourceLocation()), E(SourceLocation()) {}
+ SourceRange(SourceLocation loc) : B(loc), E(loc) {}
+ SourceRange(SourceLocation begin, SourceLocation end) : B(begin), E(end) {}
+
+ SourceLocation getBegin() const { return B; }
+ SourceLocation getEnd() const { return E; }
+
+ void setBegin(SourceLocation b) { B = b; }
+ void setEnd(SourceLocation e) { E = e; }
+
+ bool isValid() const { return B.isValid() && E.isValid(); }
+ bool isInvalid() const { return !isValid(); }
+
+ bool operator==(const SourceRange &X) const {
+ return B == X.B && E == X.E;
+ }
+
+ bool operator!=(const SourceRange &X) const {
+ return B != X.B || E != X.E;
+ }
+};
+
+/// CharSourceRange - This class represents a character granular source range.
+/// The underlying SourceRange can either specify the starting/ending character
+/// of the range, or it can specify the start or the range and the start of the
+/// last token of the range (a "token range"). In the token range case, the
+/// size of the last token must be measured to determine the actual end of the
+/// range.
+class CharSourceRange {
+ SourceRange Range;
+ bool IsTokenRange;
+public:
+ CharSourceRange() : IsTokenRange(false) {}
+ CharSourceRange(SourceRange R, bool ITR) : Range(R),IsTokenRange(ITR){}
+
+ static CharSourceRange getTokenRange(SourceRange R) {
+ CharSourceRange Result;
+ Result.Range = R;
+ Result.IsTokenRange = true;
+ return Result;
+ }
+
+ static CharSourceRange getCharRange(SourceRange R) {
+ CharSourceRange Result;
+ Result.Range = R;
+ Result.IsTokenRange = false;
+ return Result;
+ }
+
+ static CharSourceRange getTokenRange(SourceLocation B, SourceLocation E) {
+ return getTokenRange(SourceRange(B, E));
+ }
+ static CharSourceRange getCharRange(SourceLocation B, SourceLocation E) {
+ return getCharRange(SourceRange(B, E));
+ }
+
+ /// isTokenRange - Return true if the end of this range specifies the start of
+ /// the last token. Return false if the end of this range specifies the last
+ /// character in the range.
+ bool isTokenRange() const { return IsTokenRange; }
+ bool isCharRange() const { return !IsTokenRange; }
+
+ SourceLocation getBegin() const { return Range.getBegin(); }
+ SourceLocation getEnd() const { return Range.getEnd(); }
+ const SourceRange &getAsRange() const { return Range; }
+
+ void setBegin(SourceLocation b) { Range.setBegin(b); }
+ void setEnd(SourceLocation e) { Range.setEnd(e); }
+
+ bool isValid() const { return Range.isValid(); }
+ bool isInvalid() const { return !isValid(); }
+};
+
+/// FullSourceLoc - A SourceLocation and its associated SourceManager. Useful
+/// for argument passing to functions that expect both objects.
+class FullSourceLoc : public SourceLocation {
+ const SourceManager *SrcMgr;
+public:
+ /// Creates a FullSourceLoc where isValid() returns false.
+ explicit FullSourceLoc() : SrcMgr(0) {}
+
+ explicit FullSourceLoc(SourceLocation Loc, const SourceManager &SM)
+ : SourceLocation(Loc), SrcMgr(&SM) {}
+
+ const SourceManager &getManager() const {
+ assert(SrcMgr && "SourceManager is NULL.");
+ return *SrcMgr;
+ }
+
+ FileID getFileID() const;
+
+ FullSourceLoc getExpansionLoc() const;
+ FullSourceLoc getSpellingLoc() const;
+
+ unsigned getExpansionLineNumber(bool *Invalid = 0) const;
+ unsigned getExpansionColumnNumber(bool *Invalid = 0) const;
+
+ unsigned getSpellingLineNumber(bool *Invalid = 0) const;
+ unsigned getSpellingColumnNumber(bool *Invalid = 0) const;
+
+ const char *getCharacterData(bool *Invalid = 0) const;
+
+ const llvm::MemoryBuffer* getBuffer(bool *Invalid = 0) const;
+
+ /// getBufferData - Return a StringRef to the source buffer data for the
+ /// specified FileID.
+ StringRef getBufferData(bool *Invalid = 0) const;
+
+ /// getDecomposedLoc - Decompose the specified location into a raw FileID +
+ /// Offset pair. The first element is the FileID, the second is the
+ /// offset from the start of the buffer of the location.
+ std::pair<FileID, unsigned> getDecomposedLoc() const;
+
+ bool isInSystemHeader() const;
+
+ /// \brief Determines the order of 2 source locations in the translation unit.
+ ///
+ /// \returns true if this source location comes before 'Loc', false otherwise.
+ bool isBeforeInTranslationUnitThan(SourceLocation Loc) const;
+
+ /// \brief Determines the order of 2 source locations in the translation unit.
+ ///
+ /// \returns true if this source location comes before 'Loc', false otherwise.
+ bool isBeforeInTranslationUnitThan(FullSourceLoc Loc) const {
+ assert(Loc.isValid());
+ assert(SrcMgr == Loc.SrcMgr && "Loc comes from another SourceManager!");
+ return isBeforeInTranslationUnitThan((SourceLocation)Loc);
+ }
+
+ /// \brief Comparison function class, useful for sorting FullSourceLocs.
+ struct BeforeThanCompare : public std::binary_function<FullSourceLoc,
+ FullSourceLoc, bool> {
+ bool operator()(const FullSourceLoc& lhs, const FullSourceLoc& rhs) const {
+ return lhs.isBeforeInTranslationUnitThan(rhs);
+ }
+ };
+
+ /// Prints information about this FullSourceLoc to stderr. Useful for
+ /// debugging.
+ LLVM_ATTRIBUTE_USED void dump() const;
+
+ friend inline bool
+ operator==(const FullSourceLoc &LHS, const FullSourceLoc &RHS) {
+ return LHS.getRawEncoding() == RHS.getRawEncoding() &&
+ LHS.SrcMgr == RHS.SrcMgr;
+ }
+
+ friend inline bool
+ operator!=(const FullSourceLoc &LHS, const FullSourceLoc &RHS) {
+ return !(LHS == RHS);
+ }
+
+};
+
+/// PresumedLoc - This class represents an unpacked "presumed" location which
+/// can be presented to the user. A 'presumed' location can be modified by
+/// #line and GNU line marker directives and is always the expansion point of
+/// a normal location.
+///
+/// You can get a PresumedLoc from a SourceLocation with SourceManager.
+class PresumedLoc {
+ const char *Filename;
+ unsigned Line, Col;
+ SourceLocation IncludeLoc;
+public:
+ PresumedLoc() : Filename(0) {}
+ PresumedLoc(const char *FN, unsigned Ln, unsigned Co, SourceLocation IL)
+ : Filename(FN), Line(Ln), Col(Co), IncludeLoc(IL) {
+ }
+
+ /// isInvalid - Return true if this object is invalid or uninitialized. This
+ /// occurs when created with invalid source locations or when walking off
+ /// the top of a #include stack.
+ bool isInvalid() const { return Filename == 0; }
+ bool isValid() const { return Filename != 0; }
+
+ /// getFilename - Return the presumed filename of this location. This can be
+ /// affected by #line etc.
+ const char *getFilename() const { return Filename; }
+
+ /// getLine - Return the presumed line number of this location. This can be
+ /// affected by #line etc.
+ unsigned getLine() const { return Line; }
+
+ /// getColumn - Return the presumed column number of this location. This can
+ /// not be affected by #line, but is packaged here for convenience.
+ unsigned getColumn() const { return Col; }
+
+ /// getIncludeLoc - Return the presumed include location of this location.
+ /// This can be affected by GNU linemarker directives.
+ SourceLocation getIncludeLoc() const { return IncludeLoc; }
+};
+
+
+} // end namespace clang
+
+namespace llvm {
+ /// Define DenseMapInfo so that FileID's can be used as keys in DenseMap and
+ /// DenseSets.
+ template <>
+ struct DenseMapInfo<clang::FileID> {
+ static inline clang::FileID getEmptyKey() {
+ return clang::FileID();
+ }
+ static inline clang::FileID getTombstoneKey() {
+ return clang::FileID::getSentinel();
+ }
+
+ static unsigned getHashValue(clang::FileID S) {
+ return S.getHashValue();
+ }
+
+ static bool isEqual(clang::FileID LHS, clang::FileID RHS) {
+ return LHS == RHS;
+ }
+ };
+
+ template <>
+ struct isPodLike<clang::SourceLocation> { static const bool value = true; };
+ template <>
+ struct isPodLike<clang::FileID> { static const bool value = true; };
+
+ // Teach SmallPtrSet how to handle SourceLocation.
+ template<>
+ class PointerLikeTypeTraits<clang::SourceLocation> {
+ public:
+ static inline void *getAsVoidPointer(clang::SourceLocation L) {
+ return L.getPtrEncoding();
+ }
+ static inline clang::SourceLocation getFromVoidPointer(void *P) {
+ return clang::SourceLocation::getFromRawEncoding((unsigned)(uintptr_t)P);
+ }
+ enum { NumLowBitsAvailable = 0 };
+ };
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
new file mode 100644
index 0000000..bcb2d56
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceManager.h
@@ -0,0 +1,1402 @@
+//===--- SourceManager.h - Track and cache source files ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SourceManager interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SOURCEMANAGER_H
+#define LLVM_CLANG_SOURCEMANAGER_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <map>
+#include <vector>
+#include <cassert>
+
+namespace clang {
+
+class DiagnosticsEngine;
+class SourceManager;
+class FileManager;
+class FileEntry;
+class LineTableInfo;
+class LangOptions;
+class ASTWriter;
+class ASTReader;
+
+/// There are three different types of locations in a file: a spelling
+/// location, an expansion location, and a presumed location.
+///
+/// Given an example of:
+/// #define min(x, y) x < y ? x : y
+///
+/// and then later on a use of min:
+/// #line 17
+/// return min(a, b);
+///
+/// The expansion location is the line in the source code where the macro
+/// was expanded (the return statement), the spelling location is the
+/// location in the source where the macro was originally defined,
+/// and the presumed location is where the line directive states that
+/// the line is 17, or any other line.
+
+/// SrcMgr - Public enums and private classes that are part of the
+/// SourceManager implementation.
+///
+namespace SrcMgr {
+ /// CharacteristicKind - This is used to represent whether a file or directory
+ /// holds normal user code, system code, or system code which is implicitly
+ /// 'extern "C"' in C++ mode. Entire directories can be tagged with this
+ /// (this is maintained by DirectoryLookup and friends) as can specific
+ /// FileInfos when a #pragma system_header is seen or various other cases.
+ ///
+ enum CharacteristicKind {
+ C_User, C_System, C_ExternCSystem
+ };
+
+ /// ContentCache - One instance of this struct is kept for every file
+ /// loaded or used. This object owns the MemoryBuffer object.
+ class ContentCache {
+ enum CCFlags {
+ /// \brief Whether the buffer is invalid.
+ InvalidFlag = 0x01,
+ /// \brief Whether the buffer should not be freed on destruction.
+ DoNotFreeFlag = 0x02
+ };
+
+ /// Buffer - The actual buffer containing the characters from the input
+ /// file. This is owned by the ContentCache object.
+ /// The bits indicate indicates whether the buffer is invalid.
+ mutable llvm::PointerIntPair<const llvm::MemoryBuffer *, 2> Buffer;
+
+ public:
+ /// Reference to the file entry representing this ContentCache.
+ /// This reference does not own the FileEntry object.
+ /// It is possible for this to be NULL if
+ /// the ContentCache encapsulates an imaginary text buffer.
+ const FileEntry *OrigEntry;
+
+ /// \brief References the file which the contents were actually loaded from.
+ /// Can be different from 'Entry' if we overridden the contents of one file
+ /// with the contents of another file.
+ const FileEntry *ContentsEntry;
+
+ /// SourceLineCache - A bump pointer allocated array of offsets for each
+ /// source line. This is lazily computed. This is owned by the
+ /// SourceManager BumpPointerAllocator object.
+ unsigned *SourceLineCache;
+
+ /// NumLines - The number of lines in this ContentCache. This is only valid
+ /// if SourceLineCache is non-null.
+ unsigned NumLines : 31;
+
+ /// \brief Indicates whether the buffer itself was provided to override
+ /// the actual file contents.
+ ///
+ /// When true, the original entry may be a virtual file that does not
+ /// exist.
+ unsigned BufferOverridden : 1;
+
+ ContentCache(const FileEntry *Ent = 0)
+ : Buffer(0, false), OrigEntry(Ent), ContentsEntry(Ent),
+ SourceLineCache(0), NumLines(0), BufferOverridden(false) {}
+
+ ContentCache(const FileEntry *Ent, const FileEntry *contentEnt)
+ : Buffer(0, false), OrigEntry(Ent), ContentsEntry(contentEnt),
+ SourceLineCache(0), NumLines(0), BufferOverridden(false) {}
+
+ ~ContentCache();
+
+ /// The copy ctor does not allow copies where source object has either
+ /// a non-NULL Buffer or SourceLineCache. Ownership of allocated memory
+ /// is not transferred, so this is a logical error.
+ ContentCache(const ContentCache &RHS)
+ : Buffer(0, false), SourceLineCache(0), BufferOverridden(false)
+ {
+ OrigEntry = RHS.OrigEntry;
+ ContentsEntry = RHS.ContentsEntry;
+
+ assert (RHS.Buffer.getPointer() == 0 && RHS.SourceLineCache == 0 &&
+ "Passed ContentCache object cannot own a buffer.");
+
+ NumLines = RHS.NumLines;
+ }
+
+ /// getBuffer - Returns the memory buffer for the associated content.
+ ///
+ /// \param Diag Object through which diagnostics will be emitted if the
+ /// buffer cannot be retrieved.
+ ///
+ /// \param Loc If specified, is the location that invalid file diagnostics
+ /// will be emitted at.
+ ///
+ /// \param Invalid If non-NULL, will be set \c true if an error occurred.
+ const llvm::MemoryBuffer *getBuffer(DiagnosticsEngine &Diag,
+ const SourceManager &SM,
+ SourceLocation Loc = SourceLocation(),
+ bool *Invalid = 0) const;
+
+ /// getSize - Returns the size of the content encapsulated by this
+ /// ContentCache. This can be the size of the source file or the size of an
+ /// arbitrary scratch buffer. If the ContentCache encapsulates a source
+ /// file this size is retrieved from the file's FileEntry.
+ unsigned getSize() const;
+
+ /// getSizeBytesMapped - Returns the number of bytes actually mapped for
+ /// this ContentCache. This can be 0 if the MemBuffer was not actually
+ /// expanded.
+ unsigned getSizeBytesMapped() const;
+
+ /// Returns the kind of memory used to back the memory buffer for
+ /// this content cache. This is used for performance analysis.
+ llvm::MemoryBuffer::BufferKind getMemoryBufferKind() const;
+
+ void setBuffer(const llvm::MemoryBuffer *B) {
+ assert(!Buffer.getPointer() && "MemoryBuffer already set.");
+ Buffer.setPointer(B);
+ Buffer.setInt(false);
+ }
+
+ /// \brief Get the underlying buffer, returning NULL if the buffer is not
+ /// yet available.
+ const llvm::MemoryBuffer *getRawBuffer() const {
+ return Buffer.getPointer();
+ }
+
+ /// \brief Replace the existing buffer (which will be deleted)
+ /// with the given buffer.
+ void replaceBuffer(const llvm::MemoryBuffer *B, bool DoNotFree = false);
+
+ /// \brief Determine whether the buffer itself is invalid.
+ bool isBufferInvalid() const {
+ return Buffer.getInt() & InvalidFlag;
+ }
+
+ /// \brief Determine whether the buffer should be freed.
+ bool shouldFreeBuffer() const {
+ return (Buffer.getInt() & DoNotFreeFlag) == 0;
+ }
+
+ private:
+ // Disable assignments.
+ ContentCache &operator=(const ContentCache& RHS);
+ };
+
+ /// FileInfo - Information about a FileID, basically just the logical file
+ /// that it represents and include stack information.
+ ///
+ /// Each FileInfo has include stack information, indicating where it came
+ /// from. This information encodes the #include chain that a token was
+ /// expanded from. The main include file has an invalid IncludeLoc.
+ ///
+ /// FileInfos contain a "ContentCache *", with the contents of the file.
+ ///
+ class FileInfo {
+ /// IncludeLoc - The location of the #include that brought in this file.
+ /// This is an invalid SLOC for the main file (top of the #include chain).
+ unsigned IncludeLoc; // Really a SourceLocation
+
+ /// \brief Number of FileIDs (files and macros) that were created during
+ /// preprocessing of this #include, including this SLocEntry.
+ /// Zero means the preprocessor didn't provide such info for this SLocEntry.
+ unsigned NumCreatedFIDs;
+
+ /// Data - This contains the ContentCache* and the bits indicating the
+ /// characteristic of the file and whether it has #line info, all bitmangled
+ /// together.
+ uintptr_t Data;
+
+ friend class clang::SourceManager;
+ friend class clang::ASTWriter;
+ friend class clang::ASTReader;
+ public:
+ /// get - Return a FileInfo object.
+ static FileInfo get(SourceLocation IL, const ContentCache *Con,
+ CharacteristicKind FileCharacter) {
+ FileInfo X;
+ X.IncludeLoc = IL.getRawEncoding();
+ X.NumCreatedFIDs = 0;
+ X.Data = (uintptr_t)Con;
+ assert((X.Data & 7) == 0 &&"ContentCache pointer insufficiently aligned");
+ assert((unsigned)FileCharacter < 4 && "invalid file character");
+ X.Data |= (unsigned)FileCharacter;
+ return X;
+ }
+
+ SourceLocation getIncludeLoc() const {
+ return SourceLocation::getFromRawEncoding(IncludeLoc);
+ }
+ const ContentCache* getContentCache() const {
+ return reinterpret_cast<const ContentCache*>(Data & ~7UL);
+ }
+
+ /// getCharacteristic - Return whether this is a system header or not.
+ CharacteristicKind getFileCharacteristic() const {
+ return (CharacteristicKind)(Data & 3);
+ }
+
+ /// hasLineDirectives - Return true if this FileID has #line directives in
+ /// it.
+ bool hasLineDirectives() const { return (Data & 4) != 0; }
+
+ /// setHasLineDirectives - Set the flag that indicates that this FileID has
+ /// line table entries associated with it.
+ void setHasLineDirectives() {
+ Data |= 4;
+ }
+ };
+
+ /// ExpansionInfo - Each ExpansionInfo encodes the expansion location - where
+ /// the token was ultimately expanded, and the SpellingLoc - where the actual
+ /// character data for the token came from.
+ class ExpansionInfo {
+ // Really these are all SourceLocations.
+
+ /// SpellingLoc - Where the spelling for the token can be found.
+ unsigned SpellingLoc;
+
+ /// ExpansionLocStart/ExpansionLocEnd - In a macro expansion, these
+ /// indicate the start and end of the expansion. In object-like macros,
+ /// these will be the same. In a function-like macro expansion, the start
+ /// will be the identifier and the end will be the ')'. Finally, in
+ /// macro-argument instantitions, the end will be 'SourceLocation()', an
+ /// invalid location.
+ unsigned ExpansionLocStart, ExpansionLocEnd;
+
+ public:
+ SourceLocation getSpellingLoc() const {
+ return SourceLocation::getFromRawEncoding(SpellingLoc);
+ }
+ SourceLocation getExpansionLocStart() const {
+ return SourceLocation::getFromRawEncoding(ExpansionLocStart);
+ }
+ SourceLocation getExpansionLocEnd() const {
+ SourceLocation EndLoc =
+ SourceLocation::getFromRawEncoding(ExpansionLocEnd);
+ return EndLoc.isInvalid() ? getExpansionLocStart() : EndLoc;
+ }
+
+ std::pair<SourceLocation,SourceLocation> getExpansionLocRange() const {
+ return std::make_pair(getExpansionLocStart(), getExpansionLocEnd());
+ }
+
+ bool isMacroArgExpansion() const {
+ // Note that this needs to return false for default constructed objects.
+ return getExpansionLocStart().isValid() &&
+ SourceLocation::getFromRawEncoding(ExpansionLocEnd).isInvalid();
+ }
+
+ bool isFunctionMacroExpansion() const {
+ return getExpansionLocStart().isValid() &&
+ getExpansionLocStart() != getExpansionLocEnd();
+ }
+
+ /// create - Return a ExpansionInfo for an expansion. Start and End specify
+ /// the expansion range (where the macro is expanded), and SpellingLoc
+ /// specifies the spelling location (where the characters from the token
+ /// come from). All three can refer to normal File SLocs or expansion
+ /// locations.
+ static ExpansionInfo create(SourceLocation SpellingLoc,
+ SourceLocation Start, SourceLocation End) {
+ ExpansionInfo X;
+ X.SpellingLoc = SpellingLoc.getRawEncoding();
+ X.ExpansionLocStart = Start.getRawEncoding();
+ X.ExpansionLocEnd = End.getRawEncoding();
+ return X;
+ }
+
+ /// createForMacroArg - Return a special ExpansionInfo for the expansion of
+ /// a macro argument into a function-like macro's body. ExpansionLoc
+ /// specifies the expansion location (where the macro is expanded). This
+ /// doesn't need to be a range because a macro is always expanded at
+ /// a macro parameter reference, and macro parameters are always exactly
+ /// one token. SpellingLoc specifies the spelling location (where the
+ /// characters from the token come from). ExpansionLoc and SpellingLoc can
+ /// both refer to normal File SLocs or expansion locations.
+ ///
+ /// Given the code:
+ /// \code
+ /// #define F(x) f(x)
+ /// F(42);
+ /// \endcode
+ ///
+ /// When expanding '\c F(42)', the '\c x' would call this with an
+ /// SpellingLoc pointing at '\c 42' anad an ExpansionLoc pointing at its
+ /// location in the definition of '\c F'.
+ static ExpansionInfo createForMacroArg(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLoc) {
+ // We store an intentionally invalid source location for the end of the
+ // expansion range to mark that this is a macro argument ion rather than
+ // a normal one.
+ return create(SpellingLoc, ExpansionLoc, SourceLocation());
+ }
+ };
+
+ /// SLocEntry - This is a discriminated union of FileInfo and
+ /// ExpansionInfo. SourceManager keeps an array of these objects, and
+ /// they are uniquely identified by the FileID datatype.
+ class SLocEntry {
+ unsigned Offset; // low bit is set for expansion info.
+ union {
+ FileInfo File;
+ ExpansionInfo Expansion;
+ };
+ public:
+ unsigned getOffset() const { return Offset >> 1; }
+
+ bool isExpansion() const { return Offset & 1; }
+ bool isFile() const { return !isExpansion(); }
+
+ const FileInfo &getFile() const {
+ assert(isFile() && "Not a file SLocEntry!");
+ return File;
+ }
+
+ const ExpansionInfo &getExpansion() const {
+ assert(isExpansion() && "Not a macro expansion SLocEntry!");
+ return Expansion;
+ }
+
+ static SLocEntry get(unsigned Offset, const FileInfo &FI) {
+ SLocEntry E;
+ E.Offset = Offset << 1;
+ E.File = FI;
+ return E;
+ }
+
+ static SLocEntry get(unsigned Offset, const ExpansionInfo &Expansion) {
+ SLocEntry E;
+ E.Offset = (Offset << 1) | 1;
+ E.Expansion = Expansion;
+ return E;
+ }
+ };
+} // end SrcMgr namespace.
+
+/// \brief External source of source location entries.
+class ExternalSLocEntrySource {
+public:
+ virtual ~ExternalSLocEntrySource();
+
+ /// \brief Read the source location entry with index ID, which will always be
+ /// less than -1.
+ ///
+ /// \returns true if an error occurred that prevented the source-location
+ /// entry from being loaded.
+ virtual bool ReadSLocEntry(int ID) = 0;
+};
+
+
+/// IsBeforeInTranslationUnitCache - This class holds the cache used by
+/// isBeforeInTranslationUnit. The cache structure is complex enough to be
+/// worth breaking out of SourceManager.
+class IsBeforeInTranslationUnitCache {
+ /// L/R QueryFID - These are the FID's of the cached query. If these match up
+ /// with a subsequent query, the result can be reused.
+ FileID LQueryFID, RQueryFID;
+
+ /// \brief True if LQueryFID was created before RQueryFID. This is used
+ /// to compare macro expansion locations.
+ bool IsLQFIDBeforeRQFID;
+
+ /// CommonFID - This is the file found in common between the two #include
+ /// traces. It is the nearest common ancestor of the #include tree.
+ FileID CommonFID;
+
+ /// L/R CommonOffset - This is the offset of the previous query in CommonFID.
+ /// Usually, this represents the location of the #include for QueryFID, but if
+ /// LQueryFID is a parent of RQueryFID (or vise versa) then these can be a
+ /// random token in the parent.
+ unsigned LCommonOffset, RCommonOffset;
+public:
+
+ /// isCacheValid - Return true if the currently cached values match up with
+ /// the specified LHS/RHS query. If not, we can't use the cache.
+ bool isCacheValid(FileID LHS, FileID RHS) const {
+ return LQueryFID == LHS && RQueryFID == RHS;
+ }
+
+ /// getCachedResult - If the cache is valid, compute the result given the
+ /// specified offsets in the LHS/RHS FID's.
+ bool getCachedResult(unsigned LOffset, unsigned ROffset) const {
+ // If one of the query files is the common file, use the offset. Otherwise,
+ // use the #include loc in the common file.
+ if (LQueryFID != CommonFID) LOffset = LCommonOffset;
+ if (RQueryFID != CommonFID) ROffset = RCommonOffset;
+
+ // It is common for multiple macro expansions to be "included" from the same
+ // location (expansion location), in which case use the order of the FileIDs
+ // to determine which came first. This will also take care the case where
+ // one of the locations points at the inclusion/expansion point of the other
+ // in which case its FileID will come before the other.
+ if (LOffset == ROffset)
+ return IsLQFIDBeforeRQFID;
+
+ return LOffset < ROffset;
+ }
+
+ // Set up a new query.
+ void setQueryFIDs(FileID LHS, FileID RHS, bool isLFIDBeforeRFID) {
+ assert(LHS != RHS);
+ LQueryFID = LHS;
+ RQueryFID = RHS;
+ IsLQFIDBeforeRQFID = isLFIDBeforeRFID;
+ }
+
+ void clear() {
+ LQueryFID = RQueryFID = FileID();
+ IsLQFIDBeforeRQFID = false;
+ }
+
+ void setCommonLoc(FileID commonFID, unsigned lCommonOffset,
+ unsigned rCommonOffset) {
+ CommonFID = commonFID;
+ LCommonOffset = lCommonOffset;
+ RCommonOffset = rCommonOffset;
+ }
+
+};
+
+/// \brief This class handles loading and caching of source files into memory.
+///
+/// This object owns the MemoryBuffer objects for all of the loaded
+/// files and assigns unique FileID's for each unique #include chain.
+///
+/// The SourceManager can be queried for information about SourceLocation
+/// objects, turning them into either spelling or expansion locations. Spelling
+/// locations represent where the bytes corresponding to a token came from and
+/// expansion locations represent where the location is in the user's view. In
+/// the case of a macro expansion, for example, the spelling location indicates
+/// where the expanded token came from and the expansion location specifies
+/// where it was expanded.
+class SourceManager : public RefCountedBase<SourceManager> {
+ /// \brief DiagnosticsEngine object.
+ DiagnosticsEngine &Diag;
+
+ FileManager &FileMgr;
+
+ mutable llvm::BumpPtrAllocator ContentCacheAlloc;
+
+ /// FileInfos - Memoized information about all of the files tracked by this
+ /// SourceManager. This set allows us to merge ContentCache entries based
+ /// on their FileEntry*. All ContentCache objects will thus have unique,
+ /// non-null, FileEntry pointers.
+ llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*> FileInfos;
+
+ /// \brief True if the ContentCache for files that are overriden by other
+ /// files, should report the original file name. Defaults to true.
+ bool OverridenFilesKeepOriginalName;
+
+ /// \brief Files that have been overriden with the contents from another file.
+ llvm::DenseMap<const FileEntry *, const FileEntry *> OverriddenFiles;
+
+ /// MemBufferInfos - Information about various memory buffers that we have
+ /// read in. All FileEntry* within the stored ContentCache objects are NULL,
+ /// as they do not refer to a file.
+ std::vector<SrcMgr::ContentCache*> MemBufferInfos;
+
+ /// \brief The table of SLocEntries that are local to this module.
+ ///
+ /// Positive FileIDs are indexes into this table. Entry 0 indicates an invalid
+ /// expansion.
+ std::vector<SrcMgr::SLocEntry> LocalSLocEntryTable;
+
+ /// \brief The table of SLocEntries that are loaded from other modules.
+ ///
+ /// Negative FileIDs are indexes into this table. To get from ID to an index,
+ /// use (-ID - 2).
+ mutable std::vector<SrcMgr::SLocEntry> LoadedSLocEntryTable;
+
+ /// \brief The starting offset of the next local SLocEntry.
+ ///
+ /// This is LocalSLocEntryTable.back().Offset + the size of that entry.
+ unsigned NextLocalOffset;
+
+ /// \brief The starting offset of the latest batch of loaded SLocEntries.
+ ///
+ /// This is LoadedSLocEntryTable.back().Offset, except that that entry might
+ /// not have been loaded, so that value would be unknown.
+ unsigned CurrentLoadedOffset;
+
+ /// \brief The highest possible offset is 2^31-1, so CurrentLoadedOffset
+ /// starts at 2^31.
+ static const unsigned MaxLoadedOffset = 1U << 31U;
+
+ /// \brief A bitmap that indicates whether the entries of LoadedSLocEntryTable
+ /// have already been loaded from the external source.
+ ///
+ /// Same indexing as LoadedSLocEntryTable.
+ std::vector<bool> SLocEntryLoaded;
+
+ /// \brief An external source for source location entries.
+ ExternalSLocEntrySource *ExternalSLocEntries;
+
+ /// LastFileIDLookup - This is a one-entry cache to speed up getFileID.
+ /// LastFileIDLookup records the last FileID looked up or created, because it
+ /// is very common to look up many tokens from the same file.
+ mutable FileID LastFileIDLookup;
+
+ /// LineTable - This holds information for #line directives. It is referenced
+ /// by indices from SLocEntryTable.
+ LineTableInfo *LineTable;
+
+ /// LastLineNo - These ivars serve as a cache used in the getLineNumber
+ /// method which is used to speedup getLineNumber calls to nearby locations.
+ mutable FileID LastLineNoFileIDQuery;
+ mutable SrcMgr::ContentCache *LastLineNoContentCache;
+ mutable unsigned LastLineNoFilePos;
+ mutable unsigned LastLineNoResult;
+
+ /// MainFileID - The file ID for the main source file of the translation unit.
+ FileID MainFileID;
+
+ /// \brief The file ID for the precompiled preamble there is one.
+ FileID PreambleFileID;
+
+ // Statistics for -print-stats.
+ mutable unsigned NumLinearScans, NumBinaryProbes;
+
+ // Cache results for the isBeforeInTranslationUnit method.
+ mutable IsBeforeInTranslationUnitCache IsBeforeInTUCache;
+
+ // Cache for the "fake" buffer used for error-recovery purposes.
+ mutable llvm::MemoryBuffer *FakeBufferForRecovery;
+
+ mutable SrcMgr::ContentCache *FakeContentCacheForRecovery;
+
+ /// \brief Lazily computed map of macro argument chunks to their expanded
+ /// source location.
+ typedef std::map<unsigned, SourceLocation> MacroArgsMap;
+
+ mutable llvm::DenseMap<FileID, MacroArgsMap *> MacroArgsCacheMap;
+
+ // SourceManager doesn't support copy construction.
+ explicit SourceManager(const SourceManager&);
+ void operator=(const SourceManager&);
+public:
+ SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr);
+ ~SourceManager();
+
+ void clearIDTables();
+
+ DiagnosticsEngine &getDiagnostics() const { return Diag; }
+
+ FileManager &getFileManager() const { return FileMgr; }
+
+ /// \brief Set true if the SourceManager should report the original file name
+ /// for contents of files that were overriden by other files.Defaults to true.
+ void setOverridenFilesKeepOriginalName(bool value) {
+ OverridenFilesKeepOriginalName = value;
+ }
+
+ /// createMainFileIDForMembuffer - Create the FileID for a memory buffer
+ /// that will represent the FileID for the main source. One example
+ /// of when this would be used is when the main source is read from STDIN.
+ FileID createMainFileIDForMemBuffer(const llvm::MemoryBuffer *Buffer) {
+ assert(MainFileID.isInvalid() && "MainFileID already set!");
+ MainFileID = createFileIDForMemBuffer(Buffer);
+ return MainFileID;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // MainFileID creation and querying methods.
+ //===--------------------------------------------------------------------===//
+
+ /// getMainFileID - Returns the FileID of the main source file.
+ FileID getMainFileID() const { return MainFileID; }
+
+ /// createMainFileID - Create the FileID for the main source file.
+ FileID createMainFileID(const FileEntry *SourceFile,
+ SrcMgr::CharacteristicKind Kind = SrcMgr::C_User) {
+ assert(MainFileID.isInvalid() && "MainFileID already set!");
+ MainFileID = createFileID(SourceFile, SourceLocation(), Kind);
+ return MainFileID;
+ }
+
+ /// \brief Set the file ID for the main source file.
+ void setMainFileID(FileID FID) {
+ assert(MainFileID.isInvalid() && "MainFileID already set!");
+ MainFileID = FID;
+ }
+
+ /// \brief Set the file ID for the precompiled preamble.
+ void setPreambleFileID(FileID Preamble) {
+ assert(PreambleFileID.isInvalid() && "PreambleFileID already set!");
+ PreambleFileID = Preamble;
+ }
+
+ /// \brief Get the file ID for the precompiled preamble if there is one.
+ FileID getPreambleFileID() const { return PreambleFileID; }
+
+ //===--------------------------------------------------------------------===//
+ // Methods to create new FileID's and macro expansions.
+ //===--------------------------------------------------------------------===//
+
+ /// createFileID - Create a new FileID that represents the specified file
+ /// being #included from the specified IncludePosition. This translates NULL
+ /// into standard input.
+ FileID createFileID(const FileEntry *SourceFile, SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID = 0, unsigned LoadedOffset = 0) {
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(SourceFile);
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+ return createFileID(IR, IncludePos, FileCharacter, LoadedID, LoadedOffset);
+ }
+
+ /// createFileIDForMemBuffer - Create a new FileID that represents the
+ /// specified memory buffer. This does no caching of the buffer and takes
+ /// ownership of the MemoryBuffer, so only pass a MemoryBuffer to this once.
+ FileID createFileIDForMemBuffer(const llvm::MemoryBuffer *Buffer,
+ int LoadedID = 0, unsigned LoadedOffset = 0,
+ SourceLocation IncludeLoc = SourceLocation()) {
+ return createFileID(createMemBufferContentCache(Buffer), IncludeLoc,
+ SrcMgr::C_User, LoadedID, LoadedOffset);
+ }
+
+ /// createMacroArgExpansionLoc - Return a new SourceLocation that encodes the
+ /// fact that a token from SpellingLoc should actually be referenced from
+ /// ExpansionLoc, and that it represents the expansion of a macro argument
+ /// into the function-like macro body.
+ SourceLocation createMacroArgExpansionLoc(SourceLocation Loc,
+ SourceLocation ExpansionLoc,
+ unsigned TokLength);
+
+ /// createExpansionLoc - Return a new SourceLocation that encodes the fact
+ /// that a token from SpellingLoc should actually be referenced from
+ /// ExpansionLoc.
+ SourceLocation createExpansionLoc(SourceLocation Loc,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd,
+ unsigned TokLength,
+ int LoadedID = 0,
+ unsigned LoadedOffset = 0);
+
+ /// \brief Retrieve the memory buffer associated with the given file.
+ ///
+ /// \param Invalid If non-NULL, will be set \c true if an error
+ /// occurs while retrieving the memory buffer.
+ const llvm::MemoryBuffer *getMemoryBufferForFile(const FileEntry *File,
+ bool *Invalid = 0);
+
+ /// \brief Override the contents of the given source file by providing an
+ /// already-allocated buffer.
+ ///
+ /// \param SourceFile the source file whose contents will be overriden.
+ ///
+ /// \param Buffer the memory buffer whose contents will be used as the
+ /// data in the given source file.
+ ///
+ /// \param DoNotFree If true, then the buffer will not be freed when the
+ /// source manager is destroyed.
+ void overrideFileContents(const FileEntry *SourceFile,
+ const llvm::MemoryBuffer *Buffer,
+ bool DoNotFree = false);
+
+ /// \brief Override the the given source file with another one.
+ ///
+ /// \param SourceFile the source file which will be overriden.
+ ///
+ /// \param NewFile the file whose contents will be used as the
+ /// data instead of the contents of the given source file.
+ void overrideFileContents(const FileEntry *SourceFile,
+ const FileEntry *NewFile);
+
+ //===--------------------------------------------------------------------===//
+ // FileID manipulation methods.
+ //===--------------------------------------------------------------------===//
+
+ /// getBuffer - Return the buffer for the specified FileID. If there is an
+ /// error opening this buffer the first time, this manufactures a temporary
+ /// buffer and returns a non-empty error string.
+ const llvm::MemoryBuffer *getBuffer(FileID FID, SourceLocation Loc,
+ bool *Invalid = 0) const {
+ bool MyInvalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &MyInvalid);
+ if (MyInvalid || !Entry.isFile()) {
+ if (Invalid)
+ *Invalid = true;
+
+ return getFakeBufferForRecovery();
+ }
+
+ return Entry.getFile().getContentCache()->getBuffer(Diag, *this, Loc,
+ Invalid);
+ }
+
+ const llvm::MemoryBuffer *getBuffer(FileID FID, bool *Invalid = 0) const {
+ bool MyInvalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &MyInvalid);
+ if (MyInvalid || !Entry.isFile()) {
+ if (Invalid)
+ *Invalid = true;
+
+ return getFakeBufferForRecovery();
+ }
+
+ return Entry.getFile().getContentCache()->getBuffer(Diag, *this,
+ SourceLocation(),
+ Invalid);
+ }
+
+ /// getFileEntryForID - Returns the FileEntry record for the provided FileID.
+ const FileEntry *getFileEntryForID(FileID FID) const {
+ bool MyInvalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &MyInvalid);
+ if (MyInvalid || !Entry.isFile())
+ return 0;
+
+ const SrcMgr::ContentCache *Content = Entry.getFile().getContentCache();
+ if (!Content)
+ return 0;
+ return Content->OrigEntry;
+ }
+
+ /// Returns the FileEntry record for the provided SLocEntry.
+ const FileEntry *getFileEntryForSLocEntry(const SrcMgr::SLocEntry &sloc) const
+ {
+ const SrcMgr::ContentCache *Content = sloc.getFile().getContentCache();
+ if (!Content)
+ return 0;
+ return Content->OrigEntry;
+ }
+
+ /// getBufferData - Return a StringRef to the source buffer data for the
+ /// specified FileID.
+ ///
+ /// \param FID The file ID whose contents will be returned.
+ /// \param Invalid If non-NULL, will be set true if an error occurred.
+ StringRef getBufferData(FileID FID, bool *Invalid = 0) const;
+
+ /// \brief Get the number of FileIDs (files and macros) that were created
+ /// during preprocessing of \p FID, including it.
+ unsigned getNumCreatedFIDsForFileID(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return 0;
+
+ return Entry.getFile().NumCreatedFIDs;
+ }
+
+ /// \brief Set the number of FileIDs (files and macros) that were created
+ /// during preprocessing of \p FID, including it.
+ void setNumCreatedFIDsForFileID(FileID FID, unsigned NumFIDs) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return;
+
+ assert(Entry.getFile().NumCreatedFIDs == 0 && "Already set!");
+ const_cast<SrcMgr::FileInfo &>(Entry.getFile()).NumCreatedFIDs = NumFIDs;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // SourceLocation manipulation methods.
+ //===--------------------------------------------------------------------===//
+
+ /// getFileID - Return the FileID for a SourceLocation. This is a very
+ /// hot method that is used for all SourceManager queries that start with a
+ /// SourceLocation object. It is responsible for finding the entry in
+ /// SLocEntryTable which contains the specified location.
+ ///
+ FileID getFileID(SourceLocation SpellingLoc) const {
+ unsigned SLocOffset = SpellingLoc.getOffset();
+
+ // If our one-entry cache covers this offset, just return it.
+ if (isOffsetInFileID(LastFileIDLookup, SLocOffset))
+ return LastFileIDLookup;
+
+ return getFileIDSlow(SLocOffset);
+ }
+
+ /// getLocForStartOfFile - Return the source location corresponding to the
+ /// first byte of the specified file.
+ SourceLocation getLocForStartOfFile(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return SourceLocation();
+
+ unsigned FileOffset = Entry.getOffset();
+ return SourceLocation::getFileLoc(FileOffset);
+ }
+
+ /// \brief Return the source location corresponding to the last byte of the
+ /// specified file.
+ SourceLocation getLocForEndOfFile(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return SourceLocation();
+
+ unsigned FileOffset = Entry.getOffset();
+ return SourceLocation::getFileLoc(FileOffset + getFileIDSize(FID) - 1);
+ }
+
+ /// \brief Returns the include location if \p FID is a #include'd file
+ /// otherwise it returns an invalid location.
+ SourceLocation getIncludeLoc(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return SourceLocation();
+
+ return Entry.getFile().getIncludeLoc();
+ }
+
+ /// getExpansionLoc - Given a SourceLocation object, return the expansion
+ /// location referenced by the ID.
+ SourceLocation getExpansionLoc(SourceLocation Loc) const {
+ // Handle the non-mapped case inline, defer to out of line code to handle
+ // expansions.
+ if (Loc.isFileID()) return Loc;
+ return getExpansionLocSlowCase(Loc);
+ }
+
+ /// \brief Given \p Loc, if it is a macro location return the expansion
+ /// location or the spelling location, depending on if it comes from a
+ /// macro argument or not.
+ SourceLocation getFileLoc(SourceLocation Loc) const {
+ if (Loc.isFileID()) return Loc;
+ return getFileLocSlowCase(Loc);
+ }
+
+ /// getImmediateExpansionRange - Loc is required to be an expansion location.
+ /// Return the start/end of the expansion information.
+ std::pair<SourceLocation,SourceLocation>
+ getImmediateExpansionRange(SourceLocation Loc) const;
+
+ /// getExpansionRange - Given a SourceLocation object, return the range of
+ /// tokens covered by the expansion the ultimate file.
+ std::pair<SourceLocation,SourceLocation>
+ getExpansionRange(SourceLocation Loc) const;
+
+
+ /// getSpellingLoc - Given a SourceLocation object, return the spelling
+ /// location referenced by the ID. This is the place where the characters
+ /// that make up the lexed token can be found.
+ SourceLocation getSpellingLoc(SourceLocation Loc) const {
+ // Handle the non-mapped case inline, defer to out of line code to handle
+ // expansions.
+ if (Loc.isFileID()) return Loc;
+ return getSpellingLocSlowCase(Loc);
+ }
+
+ /// getImmediateSpellingLoc - Given a SourceLocation object, return the
+ /// spelling location referenced by the ID. This is the first level down
+ /// towards the place where the characters that make up the lexed token can be
+ /// found. This should not generally be used by clients.
+ SourceLocation getImmediateSpellingLoc(SourceLocation Loc) const;
+
+ /// getDecomposedLoc - Decompose the specified location into a raw FileID +
+ /// Offset pair. The first element is the FileID, the second is the
+ /// offset from the start of the buffer of the location.
+ std::pair<FileID, unsigned> getDecomposedLoc(SourceLocation Loc) const {
+ FileID FID = getFileID(Loc);
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &E = getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return std::make_pair(FileID(), 0);
+ return std::make_pair(FID, Loc.getOffset()-E.getOffset());
+ }
+
+ /// getDecomposedExpansionLoc - Decompose the specified location into a raw
+ /// FileID + Offset pair. If the location is an expansion record, walk
+ /// through it until we find the final location expanded.
+ std::pair<FileID, unsigned>
+ getDecomposedExpansionLoc(SourceLocation Loc) const {
+ FileID FID = getFileID(Loc);
+ bool Invalid = false;
+ const SrcMgr::SLocEntry *E = &getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return std::make_pair(FileID(), 0);
+
+ unsigned Offset = Loc.getOffset()-E->getOffset();
+ if (Loc.isFileID())
+ return std::make_pair(FID, Offset);
+
+ return getDecomposedExpansionLocSlowCase(E);
+ }
+
+ /// getDecomposedSpellingLoc - Decompose the specified location into a raw
+ /// FileID + Offset pair. If the location is an expansion record, walk
+ /// through it until we find its spelling record.
+ std::pair<FileID, unsigned>
+ getDecomposedSpellingLoc(SourceLocation Loc) const {
+ FileID FID = getFileID(Loc);
+ bool Invalid = false;
+ const SrcMgr::SLocEntry *E = &getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return std::make_pair(FileID(), 0);
+
+ unsigned Offset = Loc.getOffset()-E->getOffset();
+ if (Loc.isFileID())
+ return std::make_pair(FID, Offset);
+ return getDecomposedSpellingLocSlowCase(E, Offset);
+ }
+
+ /// getFileOffset - This method returns the offset from the start
+ /// of the file that the specified SourceLocation represents. This is not very
+ /// meaningful for a macro ID.
+ unsigned getFileOffset(SourceLocation SpellingLoc) const {
+ return getDecomposedLoc(SpellingLoc).second;
+ }
+
+ /// isMacroArgExpansion - This method tests whether the given source location
+ /// represents a macro argument's expansion into the function-like macro
+ /// definition. Such source locations only appear inside of the expansion
+ /// locations representing where a particular function-like macro was
+ /// expanded.
+ bool isMacroArgExpansion(SourceLocation Loc) const;
+
+ /// \brief Returns true if \p Loc is inside the [\p Start, +\p Length)
+ /// chunk of the source location address space.
+ /// If it's true and \p RelativeOffset is non-null, it will be set to the
+ /// relative offset of \p Loc inside the chunk.
+ bool isInSLocAddrSpace(SourceLocation Loc,
+ SourceLocation Start, unsigned Length,
+ unsigned *RelativeOffset = 0) const {
+ assert(((Start.getOffset() < NextLocalOffset &&
+ Start.getOffset()+Length <= NextLocalOffset) ||
+ (Start.getOffset() >= CurrentLoadedOffset &&
+ Start.getOffset()+Length < MaxLoadedOffset)) &&
+ "Chunk is not valid SLoc address space");
+ unsigned LocOffs = Loc.getOffset();
+ unsigned BeginOffs = Start.getOffset();
+ unsigned EndOffs = BeginOffs + Length;
+ if (LocOffs >= BeginOffs && LocOffs < EndOffs) {
+ if (RelativeOffset)
+ *RelativeOffset = LocOffs - BeginOffs;
+ return true;
+ }
+
+ return false;
+ }
+
+ /// \brief Return true if both \p LHS and \p RHS are in the local source
+ /// location address space or the loaded one. If it's true and \p
+ /// RelativeOffset is non-null, it will be set to the offset of \p RHS
+ /// relative to \p LHS.
+ bool isInSameSLocAddrSpace(SourceLocation LHS, SourceLocation RHS,
+ int *RelativeOffset) const {
+ unsigned LHSOffs = LHS.getOffset(), RHSOffs = RHS.getOffset();
+ bool LHSLoaded = LHSOffs >= CurrentLoadedOffset;
+ bool RHSLoaded = RHSOffs >= CurrentLoadedOffset;
+
+ if (LHSLoaded == RHSLoaded) {
+ if (RelativeOffset)
+ *RelativeOffset = RHSOffs - LHSOffs;
+ return true;
+ }
+
+ return false;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Queries about the code at a SourceLocation.
+ //===--------------------------------------------------------------------===//
+
+ /// getCharacterData - Return a pointer to the start of the specified location
+ /// in the appropriate spelling MemoryBuffer.
+ ///
+ /// \param Invalid If non-NULL, will be set \c true if an error occurs.
+ const char *getCharacterData(SourceLocation SL, bool *Invalid = 0) const;
+
+ /// getColumnNumber - Return the column # for the specified file position.
+ /// This is significantly cheaper to compute than the line number. This
+ /// returns zero if the column number isn't known. This may only be called
+ /// on a file sloc, so you must choose a spelling or expansion location
+ /// before calling this method.
+ unsigned getColumnNumber(FileID FID, unsigned FilePos,
+ bool *Invalid = 0) const;
+ unsigned getSpellingColumnNumber(SourceLocation Loc, bool *Invalid = 0) const;
+ unsigned getExpansionColumnNumber(SourceLocation Loc,
+ bool *Invalid = 0) const;
+ unsigned getPresumedColumnNumber(SourceLocation Loc, bool *Invalid = 0) const;
+
+
+ /// getLineNumber - Given a SourceLocation, return the spelling line number
+ /// for the position indicated. This requires building and caching a table of
+ /// line offsets for the MemoryBuffer, so this is not cheap: use only when
+ /// about to emit a diagnostic.
+ unsigned getLineNumber(FileID FID, unsigned FilePos, bool *Invalid = 0) const;
+ unsigned getSpellingLineNumber(SourceLocation Loc, bool *Invalid = 0) const;
+ unsigned getExpansionLineNumber(SourceLocation Loc, bool *Invalid = 0) const;
+ unsigned getPresumedLineNumber(SourceLocation Loc, bool *Invalid = 0) const;
+
+ /// Return the filename or buffer identifier of the buffer the location is in.
+ /// Note that this name does not respect #line directives. Use getPresumedLoc
+ /// for normal clients.
+ const char *getBufferName(SourceLocation Loc, bool *Invalid = 0) const;
+
+ /// getFileCharacteristic - return the file characteristic of the specified
+ /// source location, indicating whether this is a normal file, a system
+ /// header, or an "implicit extern C" system header.
+ ///
+ /// This state can be modified with flags on GNU linemarker directives like:
+ /// # 4 "foo.h" 3
+ /// which changes all source locations in the current file after that to be
+ /// considered to be from a system header.
+ SrcMgr::CharacteristicKind getFileCharacteristic(SourceLocation Loc) const;
+
+ /// getPresumedLoc - This method returns the "presumed" location of a
+ /// SourceLocation specifies. A "presumed location" can be modified by #line
+ /// or GNU line marker directives. This provides a view on the data that a
+ /// user should see in diagnostics, for example.
+ ///
+ /// Note that a presumed location is always given as the expansion point of
+ /// an expansion location, not at the spelling location.
+ ///
+ /// \returns The presumed location of the specified SourceLocation. If the
+ /// presumed location cannot be calculate (e.g., because \p Loc is invalid
+ /// or the file containing \p Loc has changed on disk), returns an invalid
+ /// presumed location.
+ PresumedLoc getPresumedLoc(SourceLocation Loc) const;
+
+ /// isFromSameFile - Returns true if both SourceLocations correspond to
+ /// the same file.
+ bool isFromSameFile(SourceLocation Loc1, SourceLocation Loc2) const {
+ return getFileID(Loc1) == getFileID(Loc2);
+ }
+
+ /// isFromMainFile - Returns true if the file of provided SourceLocation is
+ /// the main file.
+ bool isFromMainFile(SourceLocation Loc) const {
+ return getFileID(Loc) == getMainFileID();
+ }
+
+ /// isInSystemHeader - Returns if a SourceLocation is in a system header.
+ bool isInSystemHeader(SourceLocation Loc) const {
+ return getFileCharacteristic(Loc) != SrcMgr::C_User;
+ }
+
+ /// isInExternCSystemHeader - Returns if a SourceLocation is in an "extern C"
+ /// system header.
+ bool isInExternCSystemHeader(SourceLocation Loc) const {
+ return getFileCharacteristic(Loc) == SrcMgr::C_ExternCSystem;
+ }
+
+ /// \brief Returns whether \p Loc is expanded from a macro in a system header.
+ bool isInSystemMacro(SourceLocation loc) {
+ return loc.isMacroID() && isInSystemHeader(getSpellingLoc(loc));
+ }
+
+ /// \brief The size of the SLocEnty that \p FID represents.
+ unsigned getFileIDSize(FileID FID) const;
+
+ /// \brief Given a specific FileID, returns true if \p Loc is inside that
+ /// FileID chunk and sets relative offset (offset of \p Loc from beginning
+ /// of FileID) to \p relativeOffset.
+ bool isInFileID(SourceLocation Loc, FileID FID,
+ unsigned *RelativeOffset = 0) const {
+ unsigned Offs = Loc.getOffset();
+ if (isOffsetInFileID(FID, Offs)) {
+ if (RelativeOffset)
+ *RelativeOffset = Offs - getSLocEntry(FID).getOffset();
+ return true;
+ }
+
+ return false;
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Line Table Manipulation Routines
+ //===--------------------------------------------------------------------===//
+
+ /// getLineTableFilenameID - Return the uniqued ID for the specified filename.
+ ///
+ unsigned getLineTableFilenameID(StringRef Str);
+
+ /// AddLineNote - Add a line note to the line table for the FileID and offset
+ /// specified by Loc. If FilenameID is -1, it is considered to be
+ /// unspecified.
+ void AddLineNote(SourceLocation Loc, unsigned LineNo, int FilenameID);
+ void AddLineNote(SourceLocation Loc, unsigned LineNo, int FilenameID,
+ bool IsFileEntry, bool IsFileExit,
+ bool IsSystemHeader, bool IsExternCHeader);
+
+ /// \brief Determine if the source manager has a line table.
+ bool hasLineTable() const { return LineTable != 0; }
+
+ /// \brief Retrieve the stored line table.
+ LineTableInfo &getLineTable();
+
+ //===--------------------------------------------------------------------===//
+ // Queries for performance analysis.
+ //===--------------------------------------------------------------------===//
+
+ /// Return the total amount of physical memory allocated by the
+ /// ContentCache allocator.
+ size_t getContentCacheSize() const {
+ return ContentCacheAlloc.getTotalMemory();
+ }
+
+ struct MemoryBufferSizes {
+ const size_t malloc_bytes;
+ const size_t mmap_bytes;
+
+ MemoryBufferSizes(size_t malloc_bytes, size_t mmap_bytes)
+ : malloc_bytes(malloc_bytes), mmap_bytes(mmap_bytes) {}
+ };
+
+ /// Return the amount of memory used by memory buffers, breaking down
+ /// by heap-backed versus mmap'ed memory.
+ MemoryBufferSizes getMemoryBufferSizes() const;
+
+ // Return the amount of memory used for various side tables and
+ // data structures in the SourceManager.
+ size_t getDataStructureSizes() const;
+
+ //===--------------------------------------------------------------------===//
+ // Other miscellaneous methods.
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Get the source location for the given file:line:col triplet.
+ ///
+ /// If the source file is included multiple times, the source location will
+ /// be based upon the first inclusion.
+ SourceLocation translateFileLineCol(const FileEntry *SourceFile,
+ unsigned Line, unsigned Col) const;
+
+ /// \brief Get the FileID for the given file.
+ ///
+ /// If the source file is included multiple times, the FileID will be the
+ /// first inclusion.
+ FileID translateFile(const FileEntry *SourceFile) const;
+
+ /// \brief Get the source location in \p FID for the given line:col.
+ /// Returns null location if \p FID is not a file SLocEntry.
+ SourceLocation translateLineCol(FileID FID,
+ unsigned Line, unsigned Col) const;
+
+ /// \brief If \p Loc points inside a function macro argument, the returned
+ /// location will be the macro location in which the argument was expanded.
+ /// If a macro argument is used multiple times, the expanded location will
+ /// be at the first expansion of the argument.
+ /// e.g.
+ /// MY_MACRO(foo);
+ /// ^
+ /// Passing a file location pointing at 'foo', will yield a macro location
+ /// where 'foo' was expanded into.
+ SourceLocation getMacroArgExpandedLocation(SourceLocation Loc) const;
+
+ /// \brief Determines the order of 2 source locations in the translation unit.
+ ///
+ /// \returns true if LHS source location comes before RHS, false otherwise.
+ bool isBeforeInTranslationUnit(SourceLocation LHS, SourceLocation RHS) const;
+
+ /// \brief Comparison function class.
+ class LocBeforeThanCompare : public std::binary_function<SourceLocation,
+ SourceLocation, bool> {
+ SourceManager &SM;
+
+ public:
+ explicit LocBeforeThanCompare(SourceManager &SM) : SM(SM) { }
+
+ bool operator()(SourceLocation LHS, SourceLocation RHS) const {
+ return SM.isBeforeInTranslationUnit(LHS, RHS);
+ }
+ };
+
+ /// \brief Determines the order of 2 source locations in the "source location
+ /// address space".
+ bool isBeforeInSLocAddrSpace(SourceLocation LHS, SourceLocation RHS) const {
+ return isBeforeInSLocAddrSpace(LHS, RHS.getOffset());
+ }
+
+ /// \brief Determines the order of a source location and a source location
+ /// offset in the "source location address space".
+ ///
+ /// Note that we always consider source locations loaded from
+ bool isBeforeInSLocAddrSpace(SourceLocation LHS, unsigned RHS) const {
+ unsigned LHSOffset = LHS.getOffset();
+ bool LHSLoaded = LHSOffset >= CurrentLoadedOffset;
+ bool RHSLoaded = RHS >= CurrentLoadedOffset;
+ if (LHSLoaded == RHSLoaded)
+ return LHSOffset < RHS;
+
+ return LHSLoaded;
+ }
+
+ // Iterators over FileInfos.
+ typedef llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*>
+ ::const_iterator fileinfo_iterator;
+ fileinfo_iterator fileinfo_begin() const { return FileInfos.begin(); }
+ fileinfo_iterator fileinfo_end() const { return FileInfos.end(); }
+ bool hasFileInfo(const FileEntry *File) const {
+ return FileInfos.find(File) != FileInfos.end();
+ }
+
+ /// PrintStats - Print statistics to stderr.
+ ///
+ void PrintStats() const;
+
+ /// \brief Get the number of local SLocEntries we have.
+ unsigned local_sloc_entry_size() const { return LocalSLocEntryTable.size(); }
+
+ /// \brief Get a local SLocEntry. This is exposed for indexing.
+ const SrcMgr::SLocEntry &getLocalSLocEntry(unsigned Index,
+ bool *Invalid = 0) const {
+ assert(Index < LocalSLocEntryTable.size() && "Invalid index");
+ return LocalSLocEntryTable[Index];
+ }
+
+ /// \brief Get the number of loaded SLocEntries we have.
+ unsigned loaded_sloc_entry_size() const { return LoadedSLocEntryTable.size();}
+
+ /// \brief Get a loaded SLocEntry. This is exposed for indexing.
+ const SrcMgr::SLocEntry &getLoadedSLocEntry(unsigned Index,
+ bool *Invalid = 0) const {
+ assert(Index < LoadedSLocEntryTable.size() && "Invalid index");
+ if (SLocEntryLoaded[Index])
+ return LoadedSLocEntryTable[Index];
+ return loadSLocEntry(Index, Invalid);
+ }
+
+ const SrcMgr::SLocEntry &getSLocEntry(FileID FID, bool *Invalid = 0) const {
+ if (FID.ID == 0 || FID.ID == -1) {
+ if (Invalid) *Invalid = true;
+ return LocalSLocEntryTable[0];
+ }
+ return getSLocEntryByID(FID.ID);
+ }
+
+ unsigned getNextLocalOffset() const { return NextLocalOffset; }
+
+ void setExternalSLocEntrySource(ExternalSLocEntrySource *Source) {
+ assert(LoadedSLocEntryTable.empty() &&
+ "Invalidating existing loaded entries");
+ ExternalSLocEntries = Source;
+ }
+
+ /// \brief Allocate a number of loaded SLocEntries, which will be actually
+ /// loaded on demand from the external source.
+ ///
+ /// NumSLocEntries will be allocated, which occupy a total of TotalSize space
+ /// in the global source view. The lowest ID and the base offset of the
+ /// entries will be returned.
+ std::pair<int, unsigned>
+ AllocateLoadedSLocEntries(unsigned NumSLocEntries, unsigned TotalSize);
+
+ /// \brief Returns true if \p Loc came from a PCH/Module.
+ bool isLoadedSourceLocation(SourceLocation Loc) const {
+ return Loc.getOffset() >= CurrentLoadedOffset;
+ }
+
+ /// \brief Returns true if \p Loc did not come from a PCH/Module.
+ bool isLocalSourceLocation(SourceLocation Loc) const {
+ return Loc.getOffset() < NextLocalOffset;
+ }
+
+ /// \brief Returns true if \p FID came from a PCH/Module.
+ bool isLoadedFileID(FileID FID) const {
+ assert(FID.ID != -1 && "Using FileID sentinel value");
+ return FID.ID < 0;
+ }
+
+ /// \brief Returns true if \p FID did not come from a PCH/Module.
+ bool isLocalFileID(FileID FID) const {
+ return !isLoadedFileID(FID);
+ }
+
+private:
+ const llvm::MemoryBuffer *getFakeBufferForRecovery() const;
+ const SrcMgr::ContentCache *getFakeContentCacheForRecovery() const;
+
+ const SrcMgr::SLocEntry &loadSLocEntry(unsigned Index, bool *Invalid) const;
+
+ /// \brief Get the entry with the given unwrapped FileID.
+ const SrcMgr::SLocEntry &getSLocEntryByID(int ID) const {
+ assert(ID != -1 && "Using FileID sentinel value");
+ if (ID < 0)
+ return getLoadedSLocEntryByID(ID);
+ return getLocalSLocEntry(static_cast<unsigned>(ID));
+ }
+
+ const SrcMgr::SLocEntry &getLoadedSLocEntryByID(int ID,
+ bool *Invalid = 0) const {
+ return getLoadedSLocEntry(static_cast<unsigned>(-ID - 2), Invalid);
+ }
+
+ /// createExpansionLoc - Implements the common elements of storing an
+ /// expansion info struct into the SLocEntry table and producing a source
+ /// location that refers to it.
+ SourceLocation createExpansionLocImpl(const SrcMgr::ExpansionInfo &Expansion,
+ unsigned TokLength,
+ int LoadedID = 0,
+ unsigned LoadedOffset = 0);
+
+ /// isOffsetInFileID - Return true if the specified FileID contains the
+ /// specified SourceLocation offset. This is a very hot method.
+ inline bool isOffsetInFileID(FileID FID, unsigned SLocOffset) const {
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID);
+ // If the entry is after the offset, it can't contain it.
+ if (SLocOffset < Entry.getOffset()) return false;
+
+ // If this is the very last entry then it does.
+ if (FID.ID == -2)
+ return true;
+
+ // If it is the last local entry, then it does if the location is local.
+ if (static_cast<unsigned>(FID.ID+1) == LocalSLocEntryTable.size()) {
+ return SLocOffset < NextLocalOffset;
+ }
+
+ // Otherwise, the entry after it has to not include it. This works for both
+ // local and loaded entries.
+ return SLocOffset < getSLocEntry(FileID::get(FID.ID+1)).getOffset();
+ }
+
+ /// createFileID - Create a new fileID for the specified ContentCache and
+ /// include position. This works regardless of whether the ContentCache
+ /// corresponds to a file or some other input source.
+ FileID createFileID(const SrcMgr::ContentCache* File,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind DirCharacter,
+ int LoadedID, unsigned LoadedOffset);
+
+ const SrcMgr::ContentCache *
+ getOrCreateContentCache(const FileEntry *SourceFile);
+
+ /// createMemBufferContentCache - Create a new ContentCache for the specified
+ /// memory buffer.
+ const SrcMgr::ContentCache*
+ createMemBufferContentCache(const llvm::MemoryBuffer *Buf);
+
+ FileID getFileIDSlow(unsigned SLocOffset) const;
+ FileID getFileIDLocal(unsigned SLocOffset) const;
+ FileID getFileIDLoaded(unsigned SLocOffset) const;
+
+ SourceLocation getExpansionLocSlowCase(SourceLocation Loc) const;
+ SourceLocation getSpellingLocSlowCase(SourceLocation Loc) const;
+ SourceLocation getFileLocSlowCase(SourceLocation Loc) const;
+
+ std::pair<FileID, unsigned>
+ getDecomposedExpansionLocSlowCase(const SrcMgr::SLocEntry *E) const;
+ std::pair<FileID, unsigned>
+ getDecomposedSpellingLocSlowCase(const SrcMgr::SLocEntry *E,
+ unsigned Offset) const;
+ void computeMacroArgsCache(MacroArgsMap *&MacroArgsCache, FileID FID) const;
+
+ friend class ASTReader;
+ friend class ASTWriter;
+};
+
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h b/contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h
new file mode 100644
index 0000000..1cb16b4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/SourceManagerInternals.h
@@ -0,0 +1,130 @@
+//===--- SourceManagerInternals.h - SourceManager Internals -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the implementation details of the SourceManager
+// class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SOURCEMANAGER_INTERNALS_H
+#define LLVM_CLANG_SOURCEMANAGER_INTERNALS_H
+
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringMap.h"
+#include <map>
+
+namespace clang {
+
+//===----------------------------------------------------------------------===//
+// Line Table Implementation
+//===----------------------------------------------------------------------===//
+
+struct LineEntry {
+ /// FileOffset - The offset in this file that the line entry occurs at.
+ unsigned FileOffset;
+
+ /// LineNo - The presumed line number of this line entry: #line 4.
+ unsigned LineNo;
+
+ /// FilenameID - The ID of the filename identified by this line entry:
+ /// #line 4 "foo.c". This is -1 if not specified.
+ int FilenameID;
+
+ /// Flags - Set the 0 if no flags, 1 if a system header,
+ SrcMgr::CharacteristicKind FileKind;
+
+ /// IncludeOffset - This is the offset of the virtual include stack location,
+ /// which is manipulated by GNU linemarker directives. If this is 0 then
+ /// there is no virtual #includer.
+ unsigned IncludeOffset;
+
+ static LineEntry get(unsigned Offs, unsigned Line, int Filename,
+ SrcMgr::CharacteristicKind FileKind,
+ unsigned IncludeOffset) {
+ LineEntry E;
+ E.FileOffset = Offs;
+ E.LineNo = Line;
+ E.FilenameID = Filename;
+ E.FileKind = FileKind;
+ E.IncludeOffset = IncludeOffset;
+ return E;
+ }
+};
+
+// needed for FindNearestLineEntry (upper_bound of LineEntry)
+inline bool operator<(const LineEntry &lhs, const LineEntry &rhs) {
+ // FIXME: should check the other field?
+ return lhs.FileOffset < rhs.FileOffset;
+}
+
+inline bool operator<(const LineEntry &E, unsigned Offset) {
+ return E.FileOffset < Offset;
+}
+
+inline bool operator<(unsigned Offset, const LineEntry &E) {
+ return Offset < E.FileOffset;
+}
+
+/// LineTableInfo - This class is used to hold and unique data used to
+/// represent #line information.
+class LineTableInfo {
+ /// FilenameIDs - This map is used to assign unique IDs to filenames in
+ /// #line directives. This allows us to unique the filenames that
+ /// frequently reoccur and reference them with indices. FilenameIDs holds
+ /// the mapping from string -> ID, and FilenamesByID holds the mapping of ID
+ /// to string.
+ llvm::StringMap<unsigned, llvm::BumpPtrAllocator> FilenameIDs;
+ std::vector<llvm::StringMapEntry<unsigned>*> FilenamesByID;
+
+ /// LineEntries - This is a map from FileIDs to a list of line entries (sorted
+ /// by the offset they occur in the file.
+ std::map<int, std::vector<LineEntry> > LineEntries;
+public:
+ LineTableInfo() {
+ }
+
+ void clear() {
+ FilenameIDs.clear();
+ FilenamesByID.clear();
+ LineEntries.clear();
+ }
+
+ ~LineTableInfo() {}
+
+ unsigned getLineTableFilenameID(StringRef Str);
+ const char *getFilename(unsigned ID) const {
+ assert(ID < FilenamesByID.size() && "Invalid FilenameID");
+ return FilenamesByID[ID]->getKeyData();
+ }
+ unsigned getNumFilenames() const { return FilenamesByID.size(); }
+
+ void AddLineNote(int FID, unsigned Offset,
+ unsigned LineNo, int FilenameID);
+ void AddLineNote(int FID, unsigned Offset,
+ unsigned LineNo, int FilenameID,
+ unsigned EntryExit, SrcMgr::CharacteristicKind FileKind);
+
+
+ /// FindNearestLineEntry - Find the line entry nearest to FID that is before
+ /// it. If there is no line entry before Offset in FID, return null.
+ const LineEntry *FindNearestLineEntry(int FID, unsigned Offset);
+
+ // Low-level access
+ typedef std::map<int, std::vector<LineEntry> >::iterator iterator;
+ iterator begin() { return LineEntries.begin(); }
+ iterator end() { return LineEntries.end(); }
+
+ /// \brief Add a new line entry that has already been encoded into
+ /// the internal representation of the line table.
+ void AddEntry(int FID, const std::vector<LineEntry> &Entries);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
new file mode 100644
index 0000000..9e71827
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Specifiers.h
@@ -0,0 +1,173 @@
+//===--- Specifiers.h - Declaration and Type Specifiers ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines various enumerations that describe declaration and
+// type specifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_SPECIFIERS_H
+#define LLVM_CLANG_BASIC_SPECIFIERS_H
+
+namespace clang {
+ /// \brief Specifies the width of a type, e.g., short, long, or long long.
+ enum TypeSpecifierWidth {
+ TSW_unspecified,
+ TSW_short,
+ TSW_long,
+ TSW_longlong
+ };
+
+ /// \brief Specifies the signedness of a type, e.g., signed or unsigned.
+ enum TypeSpecifierSign {
+ TSS_unspecified,
+ TSS_signed,
+ TSS_unsigned
+ };
+
+ /// \brief Specifies the kind of type.
+ enum TypeSpecifierType {
+ TST_unspecified,
+ TST_void,
+ TST_char,
+ TST_wchar, // C++ wchar_t
+ TST_char16, // C++0x char16_t
+ TST_char32, // C++0x char32_t
+ TST_int,
+ TST_int128,
+ TST_half, // OpenCL half, ARM NEON __fp16
+ TST_float,
+ TST_double,
+ TST_bool, // _Bool
+ TST_decimal32, // _Decimal32
+ TST_decimal64, // _Decimal64
+ TST_decimal128, // _Decimal128
+ TST_enum,
+ TST_union,
+ TST_struct,
+ TST_class, // C++ class type
+ TST_typename, // Typedef, C++ class-name or enum name, etc.
+ TST_typeofType,
+ TST_typeofExpr,
+ TST_decltype, // C++0x decltype
+ TST_underlyingType, // __underlying_type for C++0x
+ TST_auto, // C++0x auto
+ TST_unknown_anytype, // __unknown_anytype extension
+ TST_atomic, // C11 _Atomic
+ TST_error // erroneous type
+ };
+
+ /// WrittenBuiltinSpecs - Structure that packs information about the
+ /// type specifiers that were written in a particular type specifier
+ /// sequence.
+ struct WrittenBuiltinSpecs {
+ /*DeclSpec::TST*/ unsigned Type : 5;
+ /*DeclSpec::TSS*/ unsigned Sign : 2;
+ /*DeclSpec::TSW*/ unsigned Width : 2;
+ bool ModeAttr : 1;
+ };
+
+ /// AccessSpecifier - A C++ access specifier (public, private,
+ /// protected), plus the special value "none" which means
+ /// different things in different contexts.
+ enum AccessSpecifier {
+ AS_public,
+ AS_protected,
+ AS_private,
+ AS_none
+ };
+
+ /// ExprValueKind - The categorization of expression values,
+ /// currently following the C++0x scheme.
+ enum ExprValueKind {
+ /// An r-value expression (a pr-value in the C++0x taxonomy)
+ /// produces a temporary value.
+ VK_RValue,
+
+ /// An l-value expression is a reference to an object with
+ /// independent storage.
+ VK_LValue,
+
+ /// An x-value expression is a reference to an object with
+ /// independent storage but which can be "moved", i.e.
+ /// efficiently cannibalized for its resources.
+ VK_XValue
+ };
+
+ /// A further classification of the kind of object referenced by an
+ /// l-value or x-value.
+ enum ExprObjectKind {
+ /// An ordinary object is located at an address in memory.
+ OK_Ordinary,
+
+ /// A bitfield object is a bitfield on a C or C++ record.
+ OK_BitField,
+
+ /// A vector component is an element or range of elements on a vector.
+ OK_VectorComponent,
+
+ /// An Objective C property is a logical field of an Objective-C
+ /// object which is read and written via Objective C method calls.
+ OK_ObjCProperty,
+
+ /// An Objective C array/dictionary subscripting which reads an object
+ /// or writes at the subscripted array/dictionary element via
+ /// Objective C method calls.
+ OK_ObjCSubscript
+ };
+
+ // \brief Describes the kind of template specialization that a
+ // particular template specialization declaration represents.
+ enum TemplateSpecializationKind {
+ /// This template specialization was formed from a template-id but
+ /// has not yet been declared, defined, or instantiated.
+ TSK_Undeclared = 0,
+ /// This template specialization was implicitly instantiated from a
+ /// template. (C++ [temp.inst]).
+ TSK_ImplicitInstantiation,
+ /// This template specialization was declared or defined by an
+ /// explicit specialization (C++ [temp.expl.spec]) or partial
+ /// specialization (C++ [temp.class.spec]).
+ TSK_ExplicitSpecialization,
+ /// This template specialization was instantiated from a template
+ /// due to an explicit instantiation declaration request
+ /// (C++0x [temp.explicit]).
+ TSK_ExplicitInstantiationDeclaration,
+ /// This template specialization was instantiated from a template
+ /// due to an explicit instantiation definition request
+ /// (C++ [temp.explicit]).
+ TSK_ExplicitInstantiationDefinition
+ };
+
+ /// \brief Storage classes.
+ enum StorageClass {
+ // These are legal on both functions and variables.
+ SC_None,
+ SC_Extern,
+ SC_Static,
+ SC_PrivateExtern,
+
+ // These are only legal on variables.
+ SC_OpenCLWorkGroupLocal,
+ SC_Auto,
+ SC_Register
+ };
+
+ /// Checks whether the given storage class is legal for functions.
+ inline bool isLegalForFunction(StorageClass SC) {
+ return SC <= SC_PrivateExtern;
+ }
+
+ /// Checks whether the given storage class is legal for variables.
+ inline bool isLegalForVariable(StorageClass SC) {
+ return true;
+ }
+} // end namespace clang
+
+#endif // LLVM_CLANG_BASIC_SPECIFIERS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
new file mode 100644
index 0000000..67d71e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/StmtNodes.td
@@ -0,0 +1,169 @@
+class AttrSubject;
+
+class Stmt<bit abstract = 0> : AttrSubject {
+ bit Abstract = abstract;
+}
+
+class DStmt<Stmt base, bit abstract = 0> : Stmt<abstract> {
+ Stmt Base = base;
+}
+
+// Statements
+def NullStmt : Stmt;
+def CompoundStmt : Stmt;
+def LabelStmt : Stmt;
+def IfStmt : Stmt;
+def SwitchStmt : Stmt;
+def WhileStmt : Stmt;
+def DoStmt : Stmt;
+def ForStmt : Stmt;
+def GotoStmt : Stmt;
+def IndirectGotoStmt : Stmt;
+def ContinueStmt : Stmt;
+def BreakStmt : Stmt;
+def ReturnStmt : Stmt;
+def DeclStmt : Stmt;
+def SwitchCase : Stmt<1>;
+def CaseStmt : DStmt<SwitchCase>;
+def DefaultStmt : DStmt<SwitchCase>;
+
+// GNU Extensions
+def AsmStmt : Stmt;
+
+// Obj-C statements
+def ObjCAtTryStmt : Stmt;
+def ObjCAtCatchStmt : Stmt;
+def ObjCAtFinallyStmt : Stmt;
+def ObjCAtThrowStmt : Stmt;
+def ObjCAtSynchronizedStmt : Stmt;
+def ObjCForCollectionStmt : Stmt;
+def ObjCAutoreleasePoolStmt : Stmt;
+
+// C++ statments
+def CXXCatchStmt : Stmt;
+def CXXTryStmt : Stmt;
+def CXXForRangeStmt : Stmt;
+
+// Expressions
+def Expr : Stmt<1>;
+def PredefinedExpr : DStmt<Expr>;
+def DeclRefExpr : DStmt<Expr>;
+def IntegerLiteral : DStmt<Expr>;
+def FloatingLiteral : DStmt<Expr>;
+def ImaginaryLiteral : DStmt<Expr>;
+def StringLiteral : DStmt<Expr>;
+def CharacterLiteral : DStmt<Expr>;
+def ParenExpr : DStmt<Expr>;
+def UnaryOperator : DStmt<Expr>;
+def OffsetOfExpr : DStmt<Expr>;
+def UnaryExprOrTypeTraitExpr : DStmt<Expr>;
+def ArraySubscriptExpr : DStmt<Expr>;
+def CallExpr : DStmt<Expr>;
+def MemberExpr : DStmt<Expr>;
+def CastExpr : DStmt<Expr, 1>;
+def BinaryOperator : DStmt<Expr>;
+def CompoundAssignOperator : DStmt<BinaryOperator>;
+def AbstractConditionalOperator : DStmt<Expr, 1>;
+def ConditionalOperator : DStmt<AbstractConditionalOperator>;
+def BinaryConditionalOperator : DStmt<AbstractConditionalOperator>;
+def ImplicitCastExpr : DStmt<CastExpr>;
+def ExplicitCastExpr : DStmt<CastExpr, 1>;
+def CStyleCastExpr : DStmt<ExplicitCastExpr>;
+def CompoundLiteralExpr : DStmt<Expr>;
+def ExtVectorElementExpr : DStmt<Expr>;
+def InitListExpr : DStmt<Expr>;
+def DesignatedInitExpr : DStmt<Expr>;
+def ImplicitValueInitExpr : DStmt<Expr>;
+def ParenListExpr : DStmt<Expr>;
+def VAArgExpr : DStmt<Expr>;
+def GenericSelectionExpr : DStmt<Expr>;
+def PseudoObjectExpr : DStmt<Expr>;
+
+// Atomic expressions
+def AtomicExpr : DStmt<Expr>;
+
+// GNU Extensions.
+def AddrLabelExpr : DStmt<Expr>;
+def StmtExpr : DStmt<Expr>;
+def ChooseExpr : DStmt<Expr>;
+def GNUNullExpr : DStmt<Expr>;
+
+// C++ Expressions.
+def CXXOperatorCallExpr : DStmt<CallExpr>;
+def CXXMemberCallExpr : DStmt<CallExpr>;
+def CXXNamedCastExpr : DStmt<ExplicitCastExpr, 1>;
+def CXXStaticCastExpr : DStmt<CXXNamedCastExpr>;
+def CXXDynamicCastExpr : DStmt<CXXNamedCastExpr>;
+def CXXReinterpretCastExpr : DStmt<CXXNamedCastExpr>;
+def CXXConstCastExpr : DStmt<CXXNamedCastExpr>;
+def CXXFunctionalCastExpr : DStmt<ExplicitCastExpr>;
+def CXXTypeidExpr : DStmt<Expr>;
+def UserDefinedLiteral : DStmt<CallExpr>;
+def CXXBoolLiteralExpr : DStmt<Expr>;
+def CXXNullPtrLiteralExpr : DStmt<Expr>;
+def CXXThisExpr : DStmt<Expr>;
+def CXXThrowExpr : DStmt<Expr>;
+def CXXDefaultArgExpr : DStmt<Expr>;
+def CXXScalarValueInitExpr : DStmt<Expr>;
+def CXXNewExpr : DStmt<Expr>;
+def CXXDeleteExpr : DStmt<Expr>;
+def CXXPseudoDestructorExpr : DStmt<Expr>;
+def TypeTraitExpr : DStmt<Expr>;
+def UnaryTypeTraitExpr : DStmt<Expr>;
+def BinaryTypeTraitExpr : DStmt<Expr>;
+def ArrayTypeTraitExpr : DStmt<Expr>;
+def ExpressionTraitExpr : DStmt<Expr>;
+def DependentScopeDeclRefExpr : DStmt<Expr>;
+def CXXConstructExpr : DStmt<Expr>;
+def CXXBindTemporaryExpr : DStmt<Expr>;
+def ExprWithCleanups : DStmt<Expr>;
+def CXXTemporaryObjectExpr : DStmt<CXXConstructExpr>;
+def CXXUnresolvedConstructExpr : DStmt<Expr>;
+def CXXDependentScopeMemberExpr : DStmt<Expr>;
+def OverloadExpr : DStmt<Expr, 1>;
+def UnresolvedLookupExpr : DStmt<OverloadExpr>;
+def UnresolvedMemberExpr : DStmt<OverloadExpr>;
+def CXXNoexceptExpr : DStmt<Expr>;
+def PackExpansionExpr : DStmt<Expr>;
+def SizeOfPackExpr : DStmt<Expr>;
+def SubstNonTypeTemplateParmExpr : DStmt<Expr>;
+def SubstNonTypeTemplateParmPackExpr : DStmt<Expr>;
+def MaterializeTemporaryExpr : DStmt<Expr>;
+def LambdaExpr : DStmt<Expr>;
+
+// Obj-C Expressions.
+def ObjCStringLiteral : DStmt<Expr>;
+def ObjCNumericLiteral : DStmt<Expr>;
+def ObjCArrayLiteral : DStmt<Expr>;
+def ObjCDictionaryLiteral : DStmt<Expr>;
+def ObjCEncodeExpr : DStmt<Expr>;
+def ObjCMessageExpr : DStmt<Expr>;
+def ObjCSelectorExpr : DStmt<Expr>;
+def ObjCProtocolExpr : DStmt<Expr>;
+def ObjCIvarRefExpr : DStmt<Expr>;
+def ObjCPropertyRefExpr : DStmt<Expr>;
+def ObjCIsaExpr : DStmt<Expr>;
+def ObjCIndirectCopyRestoreExpr : DStmt<Expr>;
+def ObjCBoolLiteralExpr : DStmt<Expr>;
+def ObjCSubscriptRefExpr : DStmt<Expr>;
+
+// Obj-C ARC Expressions.
+def ObjCBridgedCastExpr : DStmt<ExplicitCastExpr>;
+
+// CUDA Expressions.
+def CUDAKernelCallExpr : DStmt<CallExpr>;
+
+// Clang Extensions.
+def ShuffleVectorExpr : DStmt<Expr>;
+def BlockExpr : DStmt<Expr>;
+def OpaqueValueExpr : DStmt<Expr>;
+
+// Microsoft Extensions.
+def CXXUuidofExpr : DStmt<Expr>;
+def SEHTryStmt : Stmt;
+def SEHExceptStmt : Stmt;
+def SEHFinallyStmt : Stmt;
+def MSDependentExistsStmt : Stmt;
+
+// OpenCL Extensions.
+def AsTypeExpr : DStmt<Expr>;
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h
new file mode 100644
index 0000000..7c04bf7
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetBuiltins.h
@@ -0,0 +1,110 @@
+//===--- TargetBuiltins.h - Target specific builtin IDs -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_TARGET_BUILTINS_H
+#define LLVM_CLANG_BASIC_TARGET_BUILTINS_H
+
+#include "clang/Basic/Builtins.h"
+#undef PPC
+
+namespace clang {
+
+ /// ARM builtins
+ namespace ARM {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsARM.def"
+ LastTSBuiltin
+ };
+ }
+
+ /// PPC builtins
+ namespace PPC {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsPPC.def"
+ LastTSBuiltin
+ };
+ }
+
+ /// PTX builtins
+ namespace PTX {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsPTX.def"
+ LastTSBuiltin
+ };
+ }
+
+
+ /// X86 builtins
+ namespace X86 {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsX86.def"
+ LastTSBuiltin
+ };
+ }
+
+ /// NeonTypeFlags - Flags to identify the types for overloaded Neon
+ /// builtins. These must be kept in sync with the flags in
+ /// utils/TableGen/NeonEmitter.h.
+ class NeonTypeFlags {
+ enum {
+ EltTypeMask = 0xf,
+ UnsignedFlag = 0x10,
+ QuadFlag = 0x20
+ };
+ uint32_t Flags;
+
+ public:
+ enum EltType {
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ Poly8,
+ Poly16,
+ Float16,
+ Float32
+ };
+
+ NeonTypeFlags(unsigned F) : Flags(F) {}
+ NeonTypeFlags(EltType ET, bool IsUnsigned, bool IsQuad) : Flags(ET) {
+ if (IsUnsigned)
+ Flags |= UnsignedFlag;
+ if (IsQuad)
+ Flags |= QuadFlag;
+ }
+
+ EltType getEltType() const { return (EltType)(Flags & EltTypeMask); }
+ bool isPoly() const {
+ EltType ET = getEltType();
+ return ET == Poly8 || ET == Poly16;
+ }
+ bool isUnsigned() const { return (Flags & UnsignedFlag) != 0; }
+ bool isQuad() const { return (Flags & QuadFlag) != 0; }
+ };
+
+ /// Hexagon builtins
+ namespace Hexagon {
+ enum {
+ LastTIBuiltin = clang::Builtin::FirstTSBuiltin-1,
+#define BUILTIN(ID, TYPE, ATTRS) BI##ID,
+#include "clang/Basic/BuiltinsHexagon.def"
+ LastTSBuiltin
+ };
+ }
+} // end namespace clang.
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
new file mode 100644
index 0000000..bbd376a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetInfo.h
@@ -0,0 +1,680 @@
+//===--- TargetInfo.h - Expose information about the target -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TargetInfo interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_TARGETINFO_H
+#define LLVM_CLANG_BASIC_TARGETINFO_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/DataTypes.h"
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/VersionTuple.h"
+#include <cassert>
+#include <vector>
+#include <string>
+
+namespace llvm {
+struct fltSemantics;
+}
+
+namespace clang {
+class DiagnosticsEngine;
+class LangOptions;
+class MacroBuilder;
+class SourceLocation;
+class SourceManager;
+class TargetOptions;
+
+namespace Builtin { struct Info; }
+
+/// TargetCXXABI - The types of C++ ABIs for which we can generate code.
+enum TargetCXXABI {
+ /// The generic ("Itanium") C++ ABI, documented at:
+ /// http://www.codesourcery.com/public/cxx-abi/
+ CXXABI_Itanium,
+
+ /// The ARM C++ ABI, based largely on the Itanium ABI but with
+ /// significant differences.
+ /// http://infocenter.arm.com
+ /// /help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+ CXXABI_ARM,
+
+ /// The Visual Studio ABI. Only scattered official documentation exists.
+ CXXABI_Microsoft
+};
+
+/// TargetInfo - This class exposes information about the current target.
+///
+class TargetInfo : public RefCountedBase<TargetInfo> {
+ llvm::Triple Triple;
+protected:
+ // Target values set by the ctor of the actual target implementation. Default
+ // values are specified by the TargetInfo constructor.
+ bool BigEndian;
+ bool TLSSupported;
+ bool NoAsmVariants; // True if {|} are normal characters.
+ unsigned char PointerWidth, PointerAlign;
+ unsigned char BoolWidth, BoolAlign;
+ unsigned char IntWidth, IntAlign;
+ unsigned char HalfWidth, HalfAlign;
+ unsigned char FloatWidth, FloatAlign;
+ unsigned char DoubleWidth, DoubleAlign;
+ unsigned char LongDoubleWidth, LongDoubleAlign;
+ unsigned char LargeArrayMinWidth, LargeArrayAlign;
+ unsigned char LongWidth, LongAlign;
+ unsigned char LongLongWidth, LongLongAlign;
+ unsigned char SuitableAlign;
+ unsigned char MaxAtomicPromoteWidth, MaxAtomicInlineWidth;
+ const char *DescriptionString;
+ const char *UserLabelPrefix;
+ const char *MCountName;
+ const llvm::fltSemantics *HalfFormat, *FloatFormat, *DoubleFormat,
+ *LongDoubleFormat;
+ unsigned char RegParmMax, SSERegParmMax;
+ TargetCXXABI CXXABI;
+ const LangAS::Map *AddrSpaceMap;
+
+ mutable StringRef PlatformName;
+ mutable VersionTuple PlatformMinVersion;
+
+ unsigned HasAlignMac68kSupport : 1;
+ unsigned RealTypeUsesObjCFPRet : 3;
+ unsigned ComplexLongDoubleUsesFP2Ret : 1;
+
+ // TargetInfo Constructor. Default initializes all fields.
+ TargetInfo(const std::string &T);
+
+public:
+ /// CreateTargetInfo - Construct a target for the given options.
+ ///
+ /// \param Opts - The options to use to initialize the target. The target may
+ /// modify the options to canonicalize the target feature information to match
+ /// what the backend expects.
+ static TargetInfo* CreateTargetInfo(DiagnosticsEngine &Diags,
+ TargetOptions &Opts);
+
+ virtual ~TargetInfo();
+
+ ///===---- Target Data Type Query Methods -------------------------------===//
+ enum IntType {
+ NoInt = 0,
+ SignedShort,
+ UnsignedShort,
+ SignedInt,
+ UnsignedInt,
+ SignedLong,
+ UnsignedLong,
+ SignedLongLong,
+ UnsignedLongLong
+ };
+
+ enum RealType {
+ Float = 0,
+ Double,
+ LongDouble
+ };
+
+protected:
+ IntType SizeType, IntMaxType, UIntMaxType, PtrDiffType, IntPtrType, WCharType,
+ WIntType, Char16Type, Char32Type, Int64Type, SigAtomicType;
+
+ /// Control whether the alignment of bit-field types is respected when laying
+ /// out structures. If true, then the alignment of the bit-field type will be
+ /// used to (a) impact the alignment of the containing structure, and (b)
+ /// ensure that the individual bit-field will not straddle an alignment
+ /// boundary.
+ unsigned UseBitFieldTypeAlignment : 1;
+
+ /// Control whether zero length bitfields (e.g., int : 0;) force alignment of
+ /// the next bitfield. If the alignment of the zero length bitfield is
+ /// greater than the member that follows it, `bar', `bar' will be aligned as
+ /// the type of the zero-length bitfield.
+ unsigned UseZeroLengthBitfieldAlignment : 1;
+
+ /// If non-zero, specifies a fixed alignment value for bitfields that follow
+ /// zero length bitfield, regardless of the zero length bitfield type.
+ unsigned ZeroLengthBitfieldBoundary;
+
+public:
+ IntType getSizeType() const { return SizeType; }
+ IntType getIntMaxType() const { return IntMaxType; }
+ IntType getUIntMaxType() const { return UIntMaxType; }
+ IntType getPtrDiffType(unsigned AddrSpace) const {
+ return AddrSpace == 0 ? PtrDiffType : getPtrDiffTypeV(AddrSpace);
+ }
+ IntType getIntPtrType() const { return IntPtrType; }
+ IntType getWCharType() const { return WCharType; }
+ IntType getWIntType() const { return WIntType; }
+ IntType getChar16Type() const { return Char16Type; }
+ IntType getChar32Type() const { return Char32Type; }
+ IntType getInt64Type() const { return Int64Type; }
+ IntType getSigAtomicType() const { return SigAtomicType; }
+
+
+ /// getTypeWidth - Return the width (in bits) of the specified integer type
+ /// enum. For example, SignedInt -> getIntWidth().
+ unsigned getTypeWidth(IntType T) const;
+
+ /// getTypeAlign - Return the alignment (in bits) of the specified integer
+ /// type enum. For example, SignedInt -> getIntAlign().
+ unsigned getTypeAlign(IntType T) const;
+
+ /// isTypeSigned - Return whether an integer types is signed. Returns true if
+ /// the type is signed; false otherwise.
+ static bool isTypeSigned(IntType T);
+
+ /// getPointerWidth - Return the width of pointers on this target, for the
+ /// specified address space.
+ uint64_t getPointerWidth(unsigned AddrSpace) const {
+ return AddrSpace == 0 ? PointerWidth : getPointerWidthV(AddrSpace);
+ }
+ uint64_t getPointerAlign(unsigned AddrSpace) const {
+ return AddrSpace == 0 ? PointerAlign : getPointerAlignV(AddrSpace);
+ }
+
+ /// getBoolWidth/Align - Return the size of '_Bool' and C++ 'bool' for this
+ /// target, in bits.
+ unsigned getBoolWidth() const { return BoolWidth; }
+ unsigned getBoolAlign() const { return BoolAlign; }
+
+ unsigned getCharWidth() const { return 8; } // FIXME
+ unsigned getCharAlign() const { return 8; } // FIXME
+
+ /// getShortWidth/Align - Return the size of 'signed short' and
+ /// 'unsigned short' for this target, in bits.
+ unsigned getShortWidth() const { return 16; } // FIXME
+ unsigned getShortAlign() const { return 16; } // FIXME
+
+ /// getIntWidth/Align - Return the size of 'signed int' and 'unsigned int' for
+ /// this target, in bits.
+ unsigned getIntWidth() const { return IntWidth; }
+ unsigned getIntAlign() const { return IntAlign; }
+
+ /// getLongWidth/Align - Return the size of 'signed long' and 'unsigned long'
+ /// for this target, in bits.
+ unsigned getLongWidth() const { return LongWidth; }
+ unsigned getLongAlign() const { return LongAlign; }
+
+ /// getLongLongWidth/Align - Return the size of 'signed long long' and
+ /// 'unsigned long long' for this target, in bits.
+ unsigned getLongLongWidth() const { return LongLongWidth; }
+ unsigned getLongLongAlign() const { return LongLongAlign; }
+
+ /// getSuitableAlign - Return the alignment that is suitable for storing any
+ /// object with a fundamental alignment requirement.
+ unsigned getSuitableAlign() const { return SuitableAlign; }
+
+ /// getWCharWidth/Align - Return the size of 'wchar_t' for this target, in
+ /// bits.
+ unsigned getWCharWidth() const { return getTypeWidth(WCharType); }
+ unsigned getWCharAlign() const { return getTypeAlign(WCharType); }
+
+ /// getChar16Width/Align - Return the size of 'char16_t' for this target, in
+ /// bits.
+ unsigned getChar16Width() const { return getTypeWidth(Char16Type); }
+ unsigned getChar16Align() const { return getTypeAlign(Char16Type); }
+
+ /// getChar32Width/Align - Return the size of 'char32_t' for this target, in
+ /// bits.
+ unsigned getChar32Width() const { return getTypeWidth(Char32Type); }
+ unsigned getChar32Align() const { return getTypeAlign(Char32Type); }
+
+ /// getHalfWidth/Align/Format - Return the size/align/format of 'half'.
+ unsigned getHalfWidth() const { return HalfWidth; }
+ unsigned getHalfAlign() const { return HalfAlign; }
+ const llvm::fltSemantics &getHalfFormat() const { return *HalfFormat; }
+
+ /// getFloatWidth/Align/Format - Return the size/align/format of 'float'.
+ unsigned getFloatWidth() const { return FloatWidth; }
+ unsigned getFloatAlign() const { return FloatAlign; }
+ const llvm::fltSemantics &getFloatFormat() const { return *FloatFormat; }
+
+ /// getDoubleWidth/Align/Format - Return the size/align/format of 'double'.
+ unsigned getDoubleWidth() const { return DoubleWidth; }
+ unsigned getDoubleAlign() const { return DoubleAlign; }
+ const llvm::fltSemantics &getDoubleFormat() const { return *DoubleFormat; }
+
+ /// getLongDoubleWidth/Align/Format - Return the size/align/format of 'long
+ /// double'.
+ unsigned getLongDoubleWidth() const { return LongDoubleWidth; }
+ unsigned getLongDoubleAlign() const { return LongDoubleAlign; }
+ const llvm::fltSemantics &getLongDoubleFormat() const {
+ return *LongDoubleFormat;
+ }
+
+ /// getFloatEvalMethod - Return the value for the C99 FLT_EVAL_METHOD macro.
+ virtual unsigned getFloatEvalMethod() const { return 0; }
+
+ // getLargeArrayMinWidth/Align - Return the minimum array size that is
+ // 'large' and its alignment.
+ unsigned getLargeArrayMinWidth() const { return LargeArrayMinWidth; }
+ unsigned getLargeArrayAlign() const { return LargeArrayAlign; }
+
+ /// getMaxAtomicPromoteWidth - Return the maximum width lock-free atomic
+ /// operation which will ever be supported for the given target
+ unsigned getMaxAtomicPromoteWidth() const { return MaxAtomicPromoteWidth; }
+ /// getMaxAtomicInlineWidth - Return the maximum width lock-free atomic
+ /// operation which can be inlined given the supported features of the
+ /// given target.
+ unsigned getMaxAtomicInlineWidth() const { return MaxAtomicInlineWidth; }
+
+ /// getIntMaxTWidth - Return the size of intmax_t and uintmax_t for this
+ /// target, in bits.
+ unsigned getIntMaxTWidth() const {
+ return getTypeWidth(IntMaxType);
+ }
+
+ /// getRegisterWidth - Return the "preferred" register width on this target.
+ uint64_t getRegisterWidth() const {
+ // Currently we assume the register width on the target matches the pointer
+ // width, we can introduce a new variable for this if/when some target wants
+ // it.
+ return LongWidth;
+ }
+
+ /// getUserLabelPrefix - This returns the default value of the
+ /// __USER_LABEL_PREFIX__ macro, which is the prefix given to user symbols by
+ /// default. On most platforms this is "_", but it is "" on some, and "." on
+ /// others.
+ const char *getUserLabelPrefix() const {
+ return UserLabelPrefix;
+ }
+
+ /// MCountName - This returns name of the mcount instrumentation function.
+ const char *getMCountName() const {
+ return MCountName;
+ }
+
+ /// useBitFieldTypeAlignment() - Check whether the alignment of bit-field
+ /// types is respected when laying out structures.
+ bool useBitFieldTypeAlignment() const {
+ return UseBitFieldTypeAlignment;
+ }
+
+ /// useZeroLengthBitfieldAlignment() - Check whether zero length bitfields
+ /// should force alignment of the next member.
+ bool useZeroLengthBitfieldAlignment() const {
+ return UseZeroLengthBitfieldAlignment;
+ }
+
+ /// getZeroLengthBitfieldBoundary() - Get the fixed alignment value in bits
+ /// for a member that follows a zero length bitfield.
+ unsigned getZeroLengthBitfieldBoundary() const {
+ return ZeroLengthBitfieldBoundary;
+ }
+
+ /// hasAlignMac68kSupport - Check whether this target support '#pragma options
+ /// align=mac68k'.
+ bool hasAlignMac68kSupport() const {
+ return HasAlignMac68kSupport;
+ }
+
+ /// getTypeName - Return the user string for the specified integer type enum.
+ /// For example, SignedShort -> "short".
+ static const char *getTypeName(IntType T);
+
+ /// getTypeConstantSuffix - Return the constant suffix for the specified
+ /// integer type enum. For example, SignedLong -> "L".
+ static const char *getTypeConstantSuffix(IntType T);
+
+ /// \brief Check whether the given real type should use the "fpret" flavor of
+ /// Obj-C message passing on this target.
+ bool useObjCFPRetForRealType(RealType T) const {
+ return RealTypeUsesObjCFPRet & (1 << T);
+ }
+
+ /// \brief Check whether _Complex long double should use the "fp2ret" flavor
+ /// of Obj-C message passing on this target.
+ bool useObjCFP2RetForComplexLongDouble() const {
+ return ComplexLongDoubleUsesFP2Ret;
+ }
+
+ ///===---- Other target property query methods --------------------------===//
+
+ /// getTargetDefines - Appends the target-specific #define values for this
+ /// target set to the specified buffer.
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const = 0;
+
+
+ /// getTargetBuiltins - Return information about target-specific builtins for
+ /// the current primary target, and info about which builtins are non-portable
+ /// across the current set of primary and secondary targets.
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const = 0;
+
+ /// isCLZForZeroUndef - The __builtin_clz* and __builtin_ctz* built-in
+ /// functions are specified to have undefined results for zero inputs, but
+ /// on targets that support these operations in a way that provides
+ /// well-defined results for zero without loss of performance, it is a good
+ /// idea to avoid optimizing based on that undef behavior.
+ virtual bool isCLZForZeroUndef() const { return true; }
+
+ /// getVAListDeclaration - Return the declaration to use for
+ /// __builtin_va_list, which is target-specific.
+ virtual const char *getVAListDeclaration() const = 0;
+
+ /// isValidClobber - Returns whether the passed in string is
+ /// a valid clobber in an inline asm statement. This is used by
+ /// Sema.
+ bool isValidClobber(StringRef Name) const;
+
+ /// isValidGCCRegisterName - Returns whether the passed in string
+ /// is a valid register name according to GCC. This is used by Sema for
+ /// inline asm statements.
+ bool isValidGCCRegisterName(StringRef Name) const;
+
+ // getNormalizedGCCRegisterName - Returns the "normalized" GCC register name.
+ // For example, on x86 it will return "ax" when "eax" is passed in.
+ StringRef getNormalizedGCCRegisterName(StringRef Name) const;
+
+ struct ConstraintInfo {
+ enum {
+ CI_None = 0x00,
+ CI_AllowsMemory = 0x01,
+ CI_AllowsRegister = 0x02,
+ CI_ReadWrite = 0x04, // "+r" output constraint (read and write).
+ CI_HasMatchingInput = 0x08 // This output operand has a matching input.
+ };
+ unsigned Flags;
+ int TiedOperand;
+
+ std::string ConstraintStr; // constraint: "=rm"
+ std::string Name; // Operand name: [foo] with no []'s.
+ public:
+ ConstraintInfo(StringRef ConstraintStr, StringRef Name)
+ : Flags(0), TiedOperand(-1), ConstraintStr(ConstraintStr.str()),
+ Name(Name.str()) {}
+
+ const std::string &getConstraintStr() const { return ConstraintStr; }
+ const std::string &getName() const { return Name; }
+ bool isReadWrite() const { return (Flags & CI_ReadWrite) != 0; }
+ bool allowsRegister() const { return (Flags & CI_AllowsRegister) != 0; }
+ bool allowsMemory() const { return (Flags & CI_AllowsMemory) != 0; }
+
+ /// hasMatchingInput - Return true if this output operand has a matching
+ /// (tied) input operand.
+ bool hasMatchingInput() const { return (Flags & CI_HasMatchingInput) != 0; }
+
+ /// hasTiedOperand() - Return true if this input operand is a matching
+ /// constraint that ties it to an output operand. If this returns true,
+ /// then getTiedOperand will indicate which output operand this is tied to.
+ bool hasTiedOperand() const { return TiedOperand != -1; }
+ unsigned getTiedOperand() const {
+ assert(hasTiedOperand() && "Has no tied operand!");
+ return (unsigned)TiedOperand;
+ }
+
+ void setIsReadWrite() { Flags |= CI_ReadWrite; }
+ void setAllowsMemory() { Flags |= CI_AllowsMemory; }
+ void setAllowsRegister() { Flags |= CI_AllowsRegister; }
+ void setHasMatchingInput() { Flags |= CI_HasMatchingInput; }
+
+ /// setTiedOperand - Indicate that this is an input operand that is tied to
+ /// the specified output operand. Copy over the various constraint
+ /// information from the output.
+ void setTiedOperand(unsigned N, ConstraintInfo &Output) {
+ Output.setHasMatchingInput();
+ Flags = Output.Flags;
+ TiedOperand = N;
+ // Don't copy Name or constraint string.
+ }
+ };
+
+ // validateOutputConstraint, validateInputConstraint - Checks that
+ // a constraint is valid and provides information about it.
+ // FIXME: These should return a real error instead of just true/false.
+ bool validateOutputConstraint(ConstraintInfo &Info) const;
+ bool validateInputConstraint(ConstraintInfo *OutputConstraints,
+ unsigned NumOutputs,
+ ConstraintInfo &info) const;
+ bool resolveSymbolicName(const char *&Name,
+ ConstraintInfo *OutputConstraints,
+ unsigned NumOutputs, unsigned &Index) const;
+
+ // Constraint parm will be left pointing at the last character of
+ // the constraint. In practice, it won't be changed unless the
+ // constraint is longer than one character.
+ virtual std::string convertConstraint(const char *&Constraint) const {
+ // 'p' defaults to 'r', but can be overridden by targets.
+ if (*Constraint == 'p')
+ return std::string("r");
+ return std::string(1, *Constraint);
+ }
+
+ // Returns a string of target-specific clobbers, in LLVM format.
+ virtual const char *getClobbers() const = 0;
+
+
+ /// getTriple - Return the target triple of the primary target.
+ const llvm::Triple &getTriple() const {
+ return Triple;
+ }
+
+ const char *getTargetDescription() const {
+ return DescriptionString;
+ }
+
+ struct GCCRegAlias {
+ const char * const Aliases[5];
+ const char * const Register;
+ };
+
+ struct AddlRegName {
+ const char * const Names[5];
+ const unsigned RegNum;
+ };
+
+ /// hasProtectedVisibility - Does this target support "protected"
+ /// visibility?
+ ///
+ /// Any target which dynamic libraries will naturally support
+ /// something like "default" (meaning that the symbol is visible
+ /// outside this shared object) and "hidden" (meaning that it isn't)
+ /// visibilities, but "protected" is really an ELF-specific concept
+ /// with wierd semantics designed around the convenience of dynamic
+ /// linker implementations. Which is not to suggest that there's
+ /// consistent target-independent semantics for "default" visibility
+ /// either; the entire thing is pretty badly mangled.
+ virtual bool hasProtectedVisibility() const { return true; }
+
+ virtual bool useGlobalsForAutomaticVariables() const { return false; }
+
+ /// getCFStringSection - Return the section to use for CFString
+ /// literals, or 0 if no special section is used.
+ virtual const char *getCFStringSection() const {
+ return "__DATA,__cfstring";
+ }
+
+ /// getNSStringSection - Return the section to use for NSString
+ /// literals, or 0 if no special section is used.
+ virtual const char *getNSStringSection() const {
+ return "__OBJC,__cstring_object,regular,no_dead_strip";
+ }
+
+ /// getNSStringNonFragileABISection - Return the section to use for
+ /// NSString literals, or 0 if no special section is used (NonFragile ABI).
+ virtual const char *getNSStringNonFragileABISection() const {
+ return "__DATA, __objc_stringobj, regular, no_dead_strip";
+ }
+
+ /// isValidSectionSpecifier - This is an optional hook that targets can
+ /// implement to perform semantic checking on attribute((section("foo")))
+ /// specifiers. In this case, "foo" is passed in to be checked. If the
+ /// section specifier is invalid, the backend should return a non-empty string
+ /// that indicates the problem.
+ ///
+ /// This hook is a simple quality of implementation feature to catch errors
+ /// and give good diagnostics in cases when the assembler or code generator
+ /// would otherwise reject the section specifier.
+ ///
+ virtual std::string isValidSectionSpecifier(StringRef SR) const {
+ return "";
+ }
+
+ /// setForcedLangOptions - Set forced language options.
+ /// Apply changes to the target information with respect to certain
+ /// language options which change the target configuration.
+ virtual void setForcedLangOptions(LangOptions &Opts);
+
+ /// getDefaultFeatures - Get the default set of target features for the CPU;
+ /// this should include all legal feature strings on the target.
+ virtual void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+ }
+
+ /// getABI - Get the ABI in use.
+ virtual const char *getABI() const {
+ return "";
+ }
+
+ /// getCXXABI - Get the C++ ABI in use.
+ virtual TargetCXXABI getCXXABI() const {
+ return CXXABI;
+ }
+
+ /// setCPU - Target the specific CPU.
+ ///
+ /// \return - False on error (invalid CPU name).
+ virtual bool setCPU(const std::string &Name) {
+ return false;
+ }
+
+ /// setABI - Use the specific ABI.
+ ///
+ /// \return - False on error (invalid ABI name).
+ virtual bool setABI(const std::string &Name) {
+ return false;
+ }
+
+ /// setCXXABI - Use this specific C++ ABI.
+ ///
+ /// \return - False on error (invalid C++ ABI name).
+ bool setCXXABI(const std::string &Name) {
+ static const TargetCXXABI Unknown = static_cast<TargetCXXABI>(-1);
+ TargetCXXABI ABI = llvm::StringSwitch<TargetCXXABI>(Name)
+ .Case("arm", CXXABI_ARM)
+ .Case("itanium", CXXABI_Itanium)
+ .Case("microsoft", CXXABI_Microsoft)
+ .Default(Unknown);
+ if (ABI == Unknown) return false;
+ return setCXXABI(ABI);
+ }
+
+ /// setCXXABI - Set the C++ ABI to be used by this implementation.
+ ///
+ /// \return - False on error (ABI not valid on this target)
+ virtual bool setCXXABI(TargetCXXABI ABI) {
+ CXXABI = ABI;
+ return true;
+ }
+
+ /// setFeatureEnabled - Enable or disable a specific target feature,
+ /// the feature name must be valid.
+ ///
+ /// \return - False on error (invalid feature name).
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
+ return false;
+ }
+
+ /// HandleTargetOptions - Perform initialization based on the user configured
+ /// set of features (e.g., +sse4). The list is guaranteed to have at most one
+ /// entry per feature.
+ ///
+ /// The target may modify the features list, to change which options are
+ /// passed onwards to the backend.
+ virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
+ }
+
+ /// \brief Determine whether the given target has the given feature.
+ virtual bool hasFeature(StringRef Feature) const {
+ return false;
+ }
+
+ // getRegParmMax - Returns maximal number of args passed in registers.
+ unsigned getRegParmMax() const {
+ assert(RegParmMax < 7 && "RegParmMax value is larger than AST can handle");
+ return RegParmMax;
+ }
+
+ /// isTLSSupported - Whether the target supports thread-local storage.
+ bool isTLSSupported() const {
+ return TLSSupported;
+ }
+
+ /// hasNoAsmVariants - Return true if {|} are normal characters in the
+ /// asm string. If this returns false (the default), then {abc|xyz} is syntax
+ /// that says that when compiling for asm variant #0, "abc" should be
+ /// generated, but when compiling for asm variant #1, "xyz" should be
+ /// generated.
+ bool hasNoAsmVariants() const {
+ return NoAsmVariants;
+ }
+
+ /// getEHDataRegisterNumber - Return the register number that
+ /// __builtin_eh_return_regno would return with the specified argument.
+ virtual int getEHDataRegisterNumber(unsigned RegNo) const {
+ return -1;
+ }
+
+ /// getStaticInitSectionSpecifier - Return the section to use for C++ static
+ /// initialization functions.
+ virtual const char *getStaticInitSectionSpecifier() const {
+ return 0;
+ }
+
+ const LangAS::Map &getAddressSpaceMap() const {
+ return *AddrSpaceMap;
+ }
+
+ /// \brief Retrieve the name of the platform as it is used in the
+ /// availability attribute.
+ StringRef getPlatformName() const { return PlatformName; }
+
+ /// \brief Retrieve the minimum desired version of the platform, to
+ /// which the program should be compiled.
+ VersionTuple getPlatformMinVersion() const { return PlatformMinVersion; }
+
+ bool isBigEndian() const { return BigEndian; }
+
+protected:
+ virtual uint64_t getPointerWidthV(unsigned AddrSpace) const {
+ return PointerWidth;
+ }
+ virtual uint64_t getPointerAlignV(unsigned AddrSpace) const {
+ return PointerAlign;
+ }
+ virtual enum IntType getPtrDiffTypeV(unsigned AddrSpace) const {
+ return PtrDiffType;
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const = 0;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const = 0;
+ virtual void getGCCAddlRegNames(const AddlRegName *&Addl,
+ unsigned &NumAddl) const {
+ Addl = 0;
+ NumAddl = 0;
+ }
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const= 0;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h b/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
new file mode 100644
index 0000000..f3c206f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TargetOptions.h
@@ -0,0 +1,45 @@
+//===--- TargetOptions.h ----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_TARGETOPTIONS_H
+#define LLVM_CLANG_FRONTEND_TARGETOPTIONS_H
+
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// TargetOptions - Options for controlling the target.
+class TargetOptions {
+public:
+ /// If given, the name of the target triple to compile for. If not given the
+ /// target will be selected to match the host.
+ std::string Triple;
+
+ /// If given, the name of the target CPU to generate code for.
+ std::string CPU;
+
+ /// If given, the name of the target ABI to use.
+ std::string ABI;
+
+ /// If given, the name of the target C++ ABI to use. If not given, defaults
+ /// to "itanium".
+ std::string CXXABI;
+
+ /// If given, the version string of the linker in use.
+ std::string LinkerVersion;
+
+ /// The list of target specific features to enable or disable -- this should
+ /// be a list of strings starting with by '+' or '-'.
+ std::vector<std::string> Features;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h
new file mode 100644
index 0000000..c6ea05b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TemplateKinds.h
@@ -0,0 +1,39 @@
+//===--- TemplateKinds.h - Enum values for C++ Template Kinds ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TemplateNameKind enum.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_TEMPLATEKINDS_H
+#define LLVM_CLANG_TEMPLATEKINDS_H
+
+namespace clang {
+
+/// \brief Specifies the kind of template name that an identifier refers to.
+enum TemplateNameKind {
+ /// The name does not refer to a template.
+ TNK_Non_template = 0,
+ /// The name refers to a function template or a set of overloaded
+ /// functions that includes at least one function template.
+ TNK_Function_template,
+ /// The name refers to a template whose specialization produces a
+ /// type. The template itself could be a class template, template
+ /// template parameter, or C++0x template alias.
+ TNK_Type_template,
+ /// The name refers to a dependent template name. Whether the
+ /// template name is assumed to refer to a type template or a
+ /// function template depends on the context in which the template
+ /// name occurs.
+ TNK_Dependent_template_name
+};
+
+}
+#endif
+
+
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
new file mode 100644
index 0000000..2e4d34d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.def
@@ -0,0 +1,596 @@
+//===--- TokenKinds.def - C Family Token Kind Database ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TokenKind database. This includes normal tokens like
+// tok::ampamp (corresponding to the && token) as well as keywords for various
+// languages. Users of this file must optionally #define the TOK, KEYWORD,
+// ALIAS, or PPKEYWORD macros to make use of this file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TOK
+#define TOK(X)
+#endif
+#ifndef PUNCTUATOR
+#define PUNCTUATOR(X,Y) TOK(X)
+#endif
+#ifndef KEYWORD
+#define KEYWORD(X,Y) TOK(kw_ ## X)
+#endif
+#ifndef ALIAS
+#define ALIAS(X,Y,Z)
+#endif
+#ifndef PPKEYWORD
+#define PPKEYWORD(X)
+#endif
+#ifndef CXX_KEYWORD_OPERATOR
+#define CXX_KEYWORD_OPERATOR(X,Y)
+#endif
+#ifndef OBJC1_AT_KEYWORD
+#define OBJC1_AT_KEYWORD(X)
+#endif
+#ifndef OBJC2_AT_KEYWORD
+#define OBJC2_AT_KEYWORD(X)
+#endif
+#ifndef TESTING_KEYWORD
+#define TESTING_KEYWORD(X, L) KEYWORD(X, L)
+#endif
+#ifndef ANNOTATION
+#define ANNOTATION(X) TOK(annot_ ## X)
+#endif
+
+//===----------------------------------------------------------------------===//
+// Preprocessor keywords.
+//===----------------------------------------------------------------------===//
+
+// These have meaning after a '#' at the start of a line. These define enums in
+// the tok::pp_* namespace. Note that IdentifierInfo::getPPKeywordID must be
+// manually updated if something is added here.
+PPKEYWORD(not_keyword)
+
+// C99 6.10.1 - Conditional Inclusion.
+PPKEYWORD(if)
+PPKEYWORD(ifdef)
+PPKEYWORD(ifndef)
+PPKEYWORD(elif)
+PPKEYWORD(else)
+PPKEYWORD(endif)
+PPKEYWORD(defined)
+
+// C99 6.10.2 - Source File Inclusion.
+PPKEYWORD(include)
+PPKEYWORD(__include_macros)
+
+// C99 6.10.3 - Macro Replacement.
+PPKEYWORD(define)
+PPKEYWORD(undef)
+
+// C99 6.10.4 - Line Control.
+PPKEYWORD(line)
+
+// C99 6.10.5 - Error Directive.
+PPKEYWORD(error)
+
+// C99 6.10.6 - Pragma Directive.
+PPKEYWORD(pragma)
+
+// GNU Extensions.
+PPKEYWORD(import)
+PPKEYWORD(include_next)
+PPKEYWORD(warning)
+PPKEYWORD(ident)
+PPKEYWORD(sccs)
+PPKEYWORD(assert)
+PPKEYWORD(unassert)
+
+// Clang extensions
+PPKEYWORD(__public_macro)
+PPKEYWORD(__private_macro)
+
+//===----------------------------------------------------------------------===//
+// Language keywords.
+//===----------------------------------------------------------------------===//
+
+// These define members of the tok::* namespace.
+
+TOK(unknown) // Not a token.
+TOK(eof) // End of file.
+TOK(eod) // End of preprocessing directive (end of line inside a
+ // directive).
+TOK(code_completion) // Code completion marker
+TOK(cxx_defaultarg_end) // C++ default argument end marker
+
+// C99 6.4.9: Comments.
+TOK(comment) // Comment (only in -E -C[C] mode)
+
+// C99 6.4.2: Identifiers.
+TOK(identifier) // abcde123
+TOK(raw_identifier) // Used only in raw lexing mode.
+
+// C99 6.4.4.1: Integer Constants
+// C99 6.4.4.2: Floating Constants
+TOK(numeric_constant) // 0x123
+
+// C99 6.4.4: Character Constants
+TOK(char_constant) // 'a'
+TOK(wide_char_constant) // L'b'
+
+// C++0x Character Constants
+TOK(utf16_char_constant) // u'a'
+TOK(utf32_char_constant) // U'a'
+
+// C99 6.4.5: String Literals.
+TOK(string_literal) // "foo"
+TOK(wide_string_literal) // L"foo"
+TOK(angle_string_literal)// <foo>
+
+// C++0x String Literals.
+TOK(utf8_string_literal) // u8"foo"
+TOK(utf16_string_literal)// u"foo"
+TOK(utf32_string_literal)// U"foo"
+
+// C99 6.4.6: Punctuators.
+PUNCTUATOR(l_square, "[")
+PUNCTUATOR(r_square, "]")
+PUNCTUATOR(l_paren, "(")
+PUNCTUATOR(r_paren, ")")
+PUNCTUATOR(l_brace, "{")
+PUNCTUATOR(r_brace, "}")
+PUNCTUATOR(period, ".")
+PUNCTUATOR(ellipsis, "...")
+PUNCTUATOR(amp, "&")
+PUNCTUATOR(ampamp, "&&")
+PUNCTUATOR(ampequal, "&=")
+PUNCTUATOR(star, "*")
+PUNCTUATOR(starequal, "*=")
+PUNCTUATOR(plus, "+")
+PUNCTUATOR(plusplus, "++")
+PUNCTUATOR(plusequal, "+=")
+PUNCTUATOR(minus, "-")
+PUNCTUATOR(arrow, "->")
+PUNCTUATOR(minusminus, "--")
+PUNCTUATOR(minusequal, "-=")
+PUNCTUATOR(tilde, "~")
+PUNCTUATOR(exclaim, "!")
+PUNCTUATOR(exclaimequal, "!=")
+PUNCTUATOR(slash, "/")
+PUNCTUATOR(slashequal, "/=")
+PUNCTUATOR(percent, "%")
+PUNCTUATOR(percentequal, "%=")
+PUNCTUATOR(less, "<")
+PUNCTUATOR(lessless, "<<")
+PUNCTUATOR(lessequal, "<=")
+PUNCTUATOR(lesslessequal, "<<=")
+PUNCTUATOR(greater, ">")
+PUNCTUATOR(greatergreater, ">>")
+PUNCTUATOR(greaterequal, ">=")
+PUNCTUATOR(greatergreaterequal, ">>=")
+PUNCTUATOR(caret, "^")
+PUNCTUATOR(caretequal, "^=")
+PUNCTUATOR(pipe, "|")
+PUNCTUATOR(pipepipe, "||")
+PUNCTUATOR(pipeequal, "|=")
+PUNCTUATOR(question, "?")
+PUNCTUATOR(colon, ":")
+PUNCTUATOR(semi, ";")
+PUNCTUATOR(equal, "=")
+PUNCTUATOR(equalequal, "==")
+PUNCTUATOR(comma, ",")
+PUNCTUATOR(hash, "#")
+PUNCTUATOR(hashhash, "##")
+PUNCTUATOR(hashat, "#@")
+
+// C++ Support
+PUNCTUATOR(periodstar, ".*")
+PUNCTUATOR(arrowstar, "->*")
+PUNCTUATOR(coloncolon, "::")
+
+// Objective C support.
+PUNCTUATOR(at, "@")
+
+// CUDA support.
+PUNCTUATOR(lesslessless, "<<<")
+PUNCTUATOR(greatergreatergreater, ">>>")
+
+// C99 6.4.1: Keywords. These turn into kw_* tokens.
+// Flags allowed:
+// KEYALL - This is a keyword in all variants of C and C++, or it
+// is a keyword in the implementation namespace that should
+// always be treated as a keyword
+// KEYC99 - This is a keyword introduced to C in C99
+// KEYC11 - This is a keyword introduced to C in C11
+// KEYCXX - This is a C++ keyword, or a C++-specific keyword in the
+// implementation namespace
+// KEYNOCXX - This is a keyword in every non-C++ dialect.
+// KEYCXX0X - This is a C++ keyword introduced to C++ in C++0x
+// KEYGNU - This is a keyword if GNU extensions are enabled
+// KEYMS - This is a keyword if Microsoft extensions are enabled
+// KEYOPENCL - This is a keyword in OpenCL
+// KEYALTIVEC - This is a keyword in AltiVec
+// KEYBORLAND - This is a keyword if Borland extensions are enabled
+// BOOLSUPPORT - This is a keyword if 'bool' is a built-in type
+//
+KEYWORD(auto , KEYALL)
+KEYWORD(break , KEYALL)
+KEYWORD(case , KEYALL)
+KEYWORD(char , KEYALL)
+KEYWORD(const , KEYALL)
+KEYWORD(continue , KEYALL)
+KEYWORD(default , KEYALL)
+KEYWORD(do , KEYALL)
+KEYWORD(double , KEYALL)
+KEYWORD(else , KEYALL)
+KEYWORD(enum , KEYALL)
+KEYWORD(extern , KEYALL)
+KEYWORD(float , KEYALL)
+KEYWORD(for , KEYALL)
+KEYWORD(goto , KEYALL)
+KEYWORD(if , KEYALL)
+KEYWORD(inline , KEYC99|KEYCXX|KEYGNU)
+KEYWORD(int , KEYALL)
+KEYWORD(long , KEYALL)
+KEYWORD(register , KEYALL)
+KEYWORD(restrict , KEYC99)
+KEYWORD(return , KEYALL)
+KEYWORD(short , KEYALL)
+KEYWORD(signed , KEYALL)
+KEYWORD(sizeof , KEYALL)
+KEYWORD(static , KEYALL)
+KEYWORD(struct , KEYALL)
+KEYWORD(switch , KEYALL)
+KEYWORD(typedef , KEYALL)
+KEYWORD(union , KEYALL)
+KEYWORD(unsigned , KEYALL)
+KEYWORD(void , KEYALL)
+KEYWORD(volatile , KEYALL)
+KEYWORD(while , KEYALL)
+KEYWORD(_Alignas , KEYALL)
+KEYWORD(_Atomic , KEYALL)
+KEYWORD(_Bool , KEYNOCXX)
+KEYWORD(_Complex , KEYALL)
+KEYWORD(_Generic , KEYALL)
+KEYWORD(_Imaginary , KEYALL)
+KEYWORD(_Static_assert , KEYALL)
+KEYWORD(__func__ , KEYALL)
+KEYWORD(__objc_yes , KEYALL)
+KEYWORD(__objc_no , KEYALL)
+
+
+// C++ 2.11p1: Keywords.
+KEYWORD(asm , KEYCXX|KEYGNU)
+KEYWORD(bool , BOOLSUPPORT|KEYALTIVEC)
+KEYWORD(catch , KEYCXX)
+KEYWORD(class , KEYCXX)
+KEYWORD(const_cast , KEYCXX)
+KEYWORD(delete , KEYCXX)
+KEYWORD(dynamic_cast , KEYCXX)
+KEYWORD(explicit , KEYCXX)
+KEYWORD(export , KEYCXX)
+KEYWORD(false , BOOLSUPPORT|KEYALTIVEC)
+KEYWORD(friend , KEYCXX)
+KEYWORD(mutable , KEYCXX)
+KEYWORD(namespace , KEYCXX)
+KEYWORD(new , KEYCXX)
+KEYWORD(operator , KEYCXX)
+KEYWORD(private , KEYCXX|KEYOPENCL)
+KEYWORD(protected , KEYCXX)
+KEYWORD(public , KEYCXX)
+KEYWORD(reinterpret_cast , KEYCXX)
+KEYWORD(static_cast , KEYCXX)
+KEYWORD(template , KEYCXX)
+KEYWORD(this , KEYCXX)
+KEYWORD(throw , KEYCXX)
+KEYWORD(true , BOOLSUPPORT|KEYALTIVEC)
+KEYWORD(try , KEYCXX)
+KEYWORD(typename , KEYCXX)
+KEYWORD(typeid , KEYCXX)
+KEYWORD(using , KEYCXX)
+KEYWORD(virtual , KEYCXX)
+KEYWORD(wchar_t , KEYCXX)
+
+// C++ 2.5p2: Alternative Representations.
+CXX_KEYWORD_OPERATOR(and , ampamp)
+CXX_KEYWORD_OPERATOR(and_eq , ampequal)
+CXX_KEYWORD_OPERATOR(bitand , amp)
+CXX_KEYWORD_OPERATOR(bitor , pipe)
+CXX_KEYWORD_OPERATOR(compl , tilde)
+CXX_KEYWORD_OPERATOR(not , exclaim)
+CXX_KEYWORD_OPERATOR(not_eq , exclaimequal)
+CXX_KEYWORD_OPERATOR(or , pipepipe)
+CXX_KEYWORD_OPERATOR(or_eq , pipeequal)
+CXX_KEYWORD_OPERATOR(xor , caret)
+CXX_KEYWORD_OPERATOR(xor_eq , caretequal)
+
+// C++0x keywords
+KEYWORD(alignas , KEYCXX0X)
+KEYWORD(alignof , KEYCXX0X)
+KEYWORD(char16_t , KEYCXX0X)
+KEYWORD(char32_t , KEYCXX0X)
+KEYWORD(constexpr , KEYCXX0X)
+KEYWORD(decltype , KEYCXX0X)
+KEYWORD(noexcept , KEYCXX0X)
+KEYWORD(nullptr , KEYCXX0X)
+KEYWORD(static_assert , KEYCXX0X)
+KEYWORD(thread_local , KEYCXX0X)
+
+// GNU Extensions (in impl-reserved namespace)
+KEYWORD(_Decimal32 , KEYALL)
+KEYWORD(_Decimal64 , KEYALL)
+KEYWORD(_Decimal128 , KEYALL)
+KEYWORD(__null , KEYCXX)
+KEYWORD(__alignof , KEYALL)
+KEYWORD(__attribute , KEYALL)
+KEYWORD(__builtin_choose_expr , KEYALL)
+KEYWORD(__builtin_offsetof , KEYALL)
+KEYWORD(__builtin_types_compatible_p, KEYALL)
+KEYWORD(__builtin_va_arg , KEYALL)
+KEYWORD(__extension__ , KEYALL)
+KEYWORD(__imag , KEYALL)
+KEYWORD(__int128 , KEYALL)
+KEYWORD(__label__ , KEYALL)
+KEYWORD(__real , KEYALL)
+KEYWORD(__thread , KEYALL)
+KEYWORD(__FUNCTION__ , KEYALL)
+KEYWORD(__PRETTY_FUNCTION__ , KEYALL)
+
+// GNU Extensions (outside impl-reserved namespace)
+KEYWORD(typeof , KEYGNU)
+
+// GNU and MS Type Traits
+KEYWORD(__has_nothrow_assign , KEYCXX)
+KEYWORD(__has_nothrow_copy , KEYCXX)
+KEYWORD(__has_nothrow_constructor , KEYCXX)
+KEYWORD(__has_trivial_assign , KEYCXX)
+KEYWORD(__has_trivial_copy , KEYCXX)
+KEYWORD(__has_trivial_constructor , KEYCXX)
+KEYWORD(__has_trivial_destructor , KEYCXX)
+KEYWORD(__has_virtual_destructor , KEYCXX)
+KEYWORD(__is_abstract , KEYCXX)
+KEYWORD(__is_base_of , KEYCXX)
+KEYWORD(__is_class , KEYCXX)
+KEYWORD(__is_convertible_to , KEYCXX)
+KEYWORD(__is_empty , KEYCXX)
+KEYWORD(__is_enum , KEYCXX)
+KEYWORD(__is_final , KEYCXX)
+// Tentative name - there's no implementation of std::is_literal_type yet.
+KEYWORD(__is_literal , KEYCXX)
+// Name for GCC 4.6 compatibility - people have already written libraries using
+// this name unfortunately.
+KEYWORD(__is_literal_type , KEYCXX)
+KEYWORD(__is_pod , KEYCXX)
+KEYWORD(__is_polymorphic , KEYCXX)
+KEYWORD(__is_trivial , KEYCXX)
+KEYWORD(__is_union , KEYCXX)
+
+// Clang-only C++ Type Traits
+KEYWORD(__is_trivially_constructible, KEYCXX)
+KEYWORD(__is_trivially_copyable , KEYCXX)
+KEYWORD(__is_trivially_assignable , KEYCXX)
+KEYWORD(__underlying_type , KEYCXX)
+
+// Embarcadero Expression Traits
+KEYWORD(__is_lvalue_expr , KEYCXX)
+KEYWORD(__is_rvalue_expr , KEYCXX)
+
+// Embarcadero Unary Type Traits
+KEYWORD(__is_arithmetic , KEYCXX)
+KEYWORD(__is_floating_point , KEYCXX)
+KEYWORD(__is_integral , KEYCXX)
+KEYWORD(__is_complete_type , KEYCXX)
+KEYWORD(__is_void , KEYCXX)
+KEYWORD(__is_array , KEYCXX)
+KEYWORD(__is_function , KEYCXX)
+KEYWORD(__is_reference , KEYCXX)
+KEYWORD(__is_lvalue_reference , KEYCXX)
+KEYWORD(__is_rvalue_reference , KEYCXX)
+KEYWORD(__is_fundamental , KEYCXX)
+KEYWORD(__is_object , KEYCXX)
+KEYWORD(__is_scalar , KEYCXX)
+KEYWORD(__is_compound , KEYCXX)
+KEYWORD(__is_pointer , KEYCXX)
+KEYWORD(__is_member_object_pointer , KEYCXX)
+KEYWORD(__is_member_function_pointer, KEYCXX)
+KEYWORD(__is_member_pointer , KEYCXX)
+KEYWORD(__is_const , KEYCXX)
+KEYWORD(__is_volatile , KEYCXX)
+KEYWORD(__is_standard_layout , KEYCXX)
+KEYWORD(__is_signed , KEYCXX)
+KEYWORD(__is_unsigned , KEYCXX)
+
+// Embarcadero Binary Type Traits
+KEYWORD(__is_same , KEYCXX)
+KEYWORD(__is_convertible , KEYCXX)
+KEYWORD(__array_rank , KEYCXX)
+KEYWORD(__array_extent , KEYCXX)
+
+// Apple Extension.
+KEYWORD(__private_extern__ , KEYALL)
+KEYWORD(__module_private__ , KEYALL)
+
+// Microsoft Extension.
+KEYWORD(__declspec , KEYALL)
+KEYWORD(__cdecl , KEYALL)
+KEYWORD(__stdcall , KEYALL)
+KEYWORD(__fastcall , KEYALL)
+KEYWORD(__thiscall , KEYALL)
+KEYWORD(__forceinline , KEYALL)
+KEYWORD(__unaligned , KEYMS)
+
+// OpenCL-specific keywords
+KEYWORD(__kernel , KEYOPENCL)
+ALIAS("kernel", __kernel , KEYOPENCL)
+KEYWORD(vec_step , KEYOPENCL|KEYALTIVEC)
+KEYWORD(__private , KEYOPENCL)
+KEYWORD(__global , KEYOPENCL)
+KEYWORD(__local , KEYOPENCL)
+KEYWORD(__constant , KEYOPENCL)
+ALIAS("global", __global , KEYOPENCL)
+ALIAS("local", __local , KEYOPENCL)
+ALIAS("constant", __constant , KEYOPENCL)
+KEYWORD(__read_only , KEYOPENCL)
+KEYWORD(__write_only , KEYOPENCL)
+KEYWORD(__read_write , KEYOPENCL)
+ALIAS("read_only", __read_only , KEYOPENCL)
+ALIAS("write_only", __write_only , KEYOPENCL)
+ALIAS("read_write", __read_write , KEYOPENCL)
+KEYWORD(__builtin_astype , KEYOPENCL)
+
+// Borland Extensions.
+KEYWORD(__pascal , KEYALL)
+
+// Altivec Extension.
+KEYWORD(__vector , KEYALTIVEC)
+KEYWORD(__pixel , KEYALTIVEC)
+
+// ARM NEON extensions.
+ALIAS("__fp16", half , KEYALL)
+
+// OpenCL Extension.
+KEYWORD(half , KEYOPENCL)
+
+// Objective-C ARC keywords.
+KEYWORD(__bridge , KEYARC)
+KEYWORD(__bridge_transfer , KEYARC)
+KEYWORD(__bridge_retained , KEYARC)
+KEYWORD(__bridge_retain , KEYARC)
+
+// Alternate spelling for various tokens. There are GCC extensions in all
+// languages, but should not be disabled in strict conformance mode.
+ALIAS("__alignof__" , __alignof , KEYALL)
+ALIAS("__asm" , asm , KEYALL)
+ALIAS("__asm__" , asm , KEYALL)
+ALIAS("__attribute__", __attribute, KEYALL)
+ALIAS("__complex" , _Complex , KEYALL)
+ALIAS("__complex__" , _Complex , KEYALL)
+ALIAS("__const" , const , KEYALL)
+ALIAS("__const__" , const , KEYALL)
+ALIAS("__decltype" , decltype , KEYCXX)
+ALIAS("__imag__" , __imag , KEYALL)
+ALIAS("__inline" , inline , KEYALL)
+ALIAS("__inline__" , inline , KEYALL)
+ALIAS("__nullptr" , nullptr , KEYCXX)
+ALIAS("__real__" , __real , KEYALL)
+ALIAS("__restrict" , restrict , KEYALL)
+ALIAS("__restrict__" , restrict , KEYALL)
+ALIAS("__signed" , signed , KEYALL)
+ALIAS("__signed__" , signed , KEYALL)
+ALIAS("__typeof" , typeof , KEYALL)
+ALIAS("__typeof__" , typeof , KEYALL)
+ALIAS("__volatile" , volatile , KEYALL)
+ALIAS("__volatile__" , volatile , KEYALL)
+
+// Microsoft extensions which should be disabled in strict conformance mode
+KEYWORD(__ptr64 , KEYMS)
+KEYWORD(__ptr32 , KEYMS)
+KEYWORD(__w64 , KEYMS)
+KEYWORD(__uuidof , KEYMS | KEYBORLAND)
+KEYWORD(__try , KEYMS | KEYBORLAND)
+KEYWORD(__finally , KEYMS | KEYBORLAND)
+KEYWORD(__leave , KEYMS | KEYBORLAND)
+KEYWORD(__int64 , KEYMS)
+KEYWORD(__if_exists , KEYMS)
+KEYWORD(__if_not_exists , KEYMS)
+ALIAS("__int8" , char , KEYMS)
+ALIAS("__int16" , short , KEYMS)
+ALIAS("__int32" , int , KEYMS)
+ALIAS("_asm" , asm , KEYMS)
+ALIAS("_cdecl" , __cdecl , KEYMS | KEYBORLAND)
+ALIAS("_fastcall" , __fastcall , KEYMS | KEYBORLAND)
+ALIAS("_stdcall" , __stdcall , KEYMS | KEYBORLAND)
+ALIAS("_thiscall" , __thiscall , KEYMS)
+ALIAS("_uuidof" , __uuidof , KEYMS | KEYBORLAND)
+ALIAS("_inline" , inline , KEYMS)
+ALIAS("_declspec" , __declspec , KEYMS)
+ALIAS("__interface" , struct , KEYMS)
+
+// Borland Extensions which should be disabled in strict conformance mode.
+ALIAS("_pascal" , __pascal , KEYBORLAND)
+
+// Clang Extensions.
+ALIAS("__char16_t" , char16_t , KEYCXX)
+ALIAS("__char32_t" , char32_t , KEYCXX)
+
+// Clang-specific keywords enabled only in testing.
+TESTING_KEYWORD(__unknown_anytype , KEYALL)
+
+
+//===----------------------------------------------------------------------===//
+// Objective-C @-preceded keywords.
+//===----------------------------------------------------------------------===//
+
+// These have meaning after an '@' in Objective-C mode. These define enums in
+// the tok::objc_* namespace.
+
+OBJC1_AT_KEYWORD(not_keyword)
+OBJC1_AT_KEYWORD(class)
+OBJC1_AT_KEYWORD(compatibility_alias)
+OBJC1_AT_KEYWORD(defs)
+OBJC1_AT_KEYWORD(encode)
+OBJC1_AT_KEYWORD(end)
+OBJC1_AT_KEYWORD(implementation)
+OBJC1_AT_KEYWORD(interface)
+OBJC1_AT_KEYWORD(private)
+OBJC1_AT_KEYWORD(protected)
+OBJC1_AT_KEYWORD(protocol)
+OBJC1_AT_KEYWORD(public)
+OBJC1_AT_KEYWORD(selector)
+OBJC1_AT_KEYWORD(throw)
+OBJC1_AT_KEYWORD(try)
+OBJC1_AT_KEYWORD(catch)
+OBJC1_AT_KEYWORD(finally)
+OBJC1_AT_KEYWORD(synchronized)
+OBJC1_AT_KEYWORD(autoreleasepool)
+
+OBJC2_AT_KEYWORD(property)
+OBJC2_AT_KEYWORD(package)
+OBJC2_AT_KEYWORD(required)
+OBJC2_AT_KEYWORD(optional)
+OBJC2_AT_KEYWORD(synthesize)
+OBJC2_AT_KEYWORD(dynamic)
+OBJC2_AT_KEYWORD(__experimental_modules_import)
+
+// TODO: What to do about context-sensitive keywords like:
+// bycopy/byref/in/inout/oneway/out?
+
+ANNOTATION(cxxscope) // annotation for a C++ scope spec, e.g. "::foo::bar::"
+ANNOTATION(typename) // annotation for a C typedef name, a C++ (possibly
+ // qualified) typename, e.g. "foo::MyClass", or
+ // template-id that names a type ("std::vector<int>")
+ANNOTATION(template_id) // annotation for a C++ template-id that names a
+ // function template specialization (not a type),
+ // e.g., "std::swap<int>"
+ANNOTATION(primary_expr) // annotation for a primary expression
+ANNOTATION(decltype) // annotation for a decltype expression,
+ // e.g., "decltype(foo.bar())"
+
+// Annotation for #pragma unused(...)
+// For each argument inside the parentheses the pragma handler will produce
+// one 'pragma_unused' annotation token followed by the argument token.
+ANNOTATION(pragma_unused)
+
+// Annotation for #pragma GCC visibility...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_vis)
+
+// Annotation for #pragma pack...
+// The lexer produces these so that they only take effect when the parser
+// handles them.
+ANNOTATION(pragma_pack)
+
+#undef ANNOTATION
+#undef TESTING_KEYWORD
+#undef OBJC2_AT_KEYWORD
+#undef OBJC1_AT_KEYWORD
+#undef CXX_KEYWORD_OPERATOR
+#undef PPKEYWORD
+#undef ALIAS
+#undef KEYWORD
+#undef PUNCTUATOR
+#undef TOK
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
new file mode 100644
index 0000000..515390a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TokenKinds.h
@@ -0,0 +1,70 @@
+//===--- TokenKinds.h - Enum values for C Token Kinds -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TokenKind enum and support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOKENKINDS_H
+#define LLVM_CLANG_TOKENKINDS_H
+
+namespace clang {
+
+namespace tok {
+
+/// TokenKind - This provides a simple uniform namespace for tokens from all C
+/// languages.
+enum TokenKind {
+#define TOK(X) X,
+#include "clang/Basic/TokenKinds.def"
+ NUM_TOKENS
+};
+
+/// PPKeywordKind - This provides a namespace for preprocessor keywords which
+/// start with a '#' at the beginning of the line.
+enum PPKeywordKind {
+#define PPKEYWORD(X) pp_##X,
+#include "clang/Basic/TokenKinds.def"
+ NUM_PP_KEYWORDS
+};
+
+/// ObjCKeywordKind - This provides a namespace for Objective-C keywords which
+/// start with an '@'.
+enum ObjCKeywordKind {
+#define OBJC1_AT_KEYWORD(X) objc_##X,
+#define OBJC2_AT_KEYWORD(X) objc_##X,
+#include "clang/Basic/TokenKinds.def"
+ NUM_OBJC_KEYWORDS
+};
+
+/// OnOffSwitch - This defines the possible values of an on-off-switch
+/// (C99 6.10.6p2).
+enum OnOffSwitch {
+ OOS_ON, OOS_OFF, OOS_DEFAULT
+};
+
+/// \brief Determines the name of a token as used within the front end.
+///
+/// The name of a token will be an internal name (such as "l_square")
+/// and should not be used as part of diagnostic messages.
+const char *getTokenName(enum TokenKind Kind);
+
+/// \brief Determines the spelling of simple punctuation tokens like
+/// '!' or '%', and returns NULL for literal and annotation tokens.
+///
+/// This routine only retrieves the "simple" spelling of the token,
+/// and will not produce any alternative spellings (e.g., a
+/// digraph). For the actual spelling of a given Token, use
+/// Preprocessor::getSpelling().
+const char *getTokenSimpleSpelling(enum TokenKind Kind);
+
+} // end namespace tok
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
new file mode 100644
index 0000000..721f44f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/TypeTraits.h
@@ -0,0 +1,95 @@
+//===--- TypeTraits.h - C++ Type Traits Support Enumerations ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines enumerations for the type traits support.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TYPETRAITS_H
+#define LLVM_CLANG_TYPETRAITS_H
+
+namespace clang {
+
+ /// UnaryTypeTrait - Names for the unary type traits.
+ enum UnaryTypeTrait {
+ UTT_HasNothrowAssign,
+ UTT_HasNothrowCopy,
+ UTT_HasNothrowConstructor,
+ UTT_HasTrivialAssign,
+ UTT_HasTrivialCopy,
+ UTT_HasTrivialDefaultConstructor,
+ UTT_HasTrivialDestructor,
+ UTT_HasVirtualDestructor,
+ UTT_IsAbstract,
+ UTT_IsArithmetic,
+ UTT_IsArray,
+ UTT_IsClass,
+ UTT_IsCompleteType,
+ UTT_IsCompound,
+ UTT_IsConst,
+ UTT_IsEmpty,
+ UTT_IsEnum,
+ UTT_IsFinal,
+ UTT_IsFloatingPoint,
+ UTT_IsFunction,
+ UTT_IsFundamental,
+ UTT_IsIntegral,
+ UTT_IsLiteral,
+ UTT_IsLvalueReference,
+ UTT_IsMemberFunctionPointer,
+ UTT_IsMemberObjectPointer,
+ UTT_IsMemberPointer,
+ UTT_IsObject,
+ UTT_IsPOD,
+ UTT_IsPointer,
+ UTT_IsPolymorphic,
+ UTT_IsReference,
+ UTT_IsRvalueReference,
+ UTT_IsScalar,
+ UTT_IsSigned,
+ UTT_IsStandardLayout,
+ UTT_IsTrivial,
+ UTT_IsTriviallyCopyable,
+ UTT_IsUnion,
+ UTT_IsUnsigned,
+ UTT_IsVoid,
+ UTT_IsVolatile
+ };
+
+ /// BinaryTypeTrait - Names for the binary type traits.
+ enum BinaryTypeTrait {
+ BTT_IsBaseOf,
+ BTT_IsConvertible,
+ BTT_IsConvertibleTo,
+ BTT_IsSame,
+ BTT_TypeCompatible,
+ BTT_IsTriviallyAssignable
+ };
+
+ /// ArrayTypeTrait - Names for the array type traits.
+ enum ArrayTypeTrait {
+ ATT_ArrayRank,
+ ATT_ArrayExtent
+ };
+
+ /// UnaryExprOrTypeTrait - Names for the "expression or type" traits.
+ enum UnaryExprOrTypeTrait {
+ UETT_SizeOf,
+ UETT_AlignOf,
+ UETT_VecStep
+ };
+
+ /// \brief Names for type traits that operate specifically on types.
+ enum TypeTrait {
+ TT_IsTriviallyConstructible
+ };
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Version.h b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
new file mode 100644
index 0000000..f3f5b5a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Version.h
@@ -0,0 +1,78 @@
+//===- Version.h - Clang Version Number -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines version macros and version-related utility functions
+// for Clang.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_BASIC_VERSION_H
+#define LLVM_CLANG_BASIC_VERSION_H
+
+#include "llvm/ADT/StringRef.h"
+
+#include "clang/Basic/Version.inc"
+
+/// \brief Helper macro for CLANG_VERSION_STRING.
+#define CLANG_MAKE_VERSION_STRING2(X) #X
+
+#ifdef CLANG_VERSION_PATCHLEVEL
+/// \brief Helper macro for CLANG_VERSION_STRING.
+#define CLANG_MAKE_VERSION_STRING(X,Y,Z) CLANG_MAKE_VERSION_STRING2(X.Y.Z)
+
+/// \brief A string that describes the Clang version number, e.g.,
+/// "1.0".
+#define CLANG_VERSION_STRING \
+ CLANG_MAKE_VERSION_STRING(CLANG_VERSION_MAJOR,CLANG_VERSION_MINOR, \
+ CLANG_VERSION_PATCHLEVEL)
+#else
+/// \brief Helper macro for CLANG_VERSION_STRING.
+#define CLANG_MAKE_VERSION_STRING(X,Y) CLANG_MAKE_VERSION_STRING2(X.Y)
+
+/// \brief A string that describes the Clang version number, e.g.,
+/// "1.0".
+#define CLANG_VERSION_STRING \
+ CLANG_MAKE_VERSION_STRING(CLANG_VERSION_MAJOR,CLANG_VERSION_MINOR)
+#endif
+
+namespace clang {
+ /// \brief Retrieves the repository path (e.g., Subversion path) that
+ /// identifies the particular Clang branch, tag, or trunk from which this
+ /// Clang was built.
+ std::string getClangRepositoryPath();
+
+ /// \brief Retrieves the repository path from which LLVM was built. Supports
+ /// LLVM residing in a separate repository from clang.
+ std::string getLLVMRepositoryPath();
+
+ /// \brief Retrieves the repository revision number (or identifer) from which
+ /// this Clang was built.
+ std::string getClangRevision();
+
+ /// \brief Retrieves the repository revision number (or identifer) from which
+ /// LLVM was built. If Clang and LLVM are in the same repository, this returns
+ /// the same string as getClangRevision.
+ std::string getLLVMRevision();
+
+ /// \brief Retrieves the full repository version that is an amalgamation of
+ /// the information in getClangRepositoryPath() and getClangRevision().
+ std::string getClangFullRepositoryVersion();
+
+ /// \brief Retrieves a string representing the complete clang version,
+ /// which includes the clang version number, the repository version,
+ /// and the vendor tag.
+ std::string getClangFullVersion();
+
+ /// \brief Retrieves a string representing the complete clang version suitable
+ /// for use in the CPP __VERSION__ macro, which includes the clang version
+ /// number, the repository version, and the vendor tag.
+ std::string getClangFullCPPVersion();
+}
+
+#endif // LLVM_CLANG_BASIC_VERSION_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h b/contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h
new file mode 100644
index 0000000..30ef6641
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/VersionTuple.h
@@ -0,0 +1,123 @@
+//===- VersionTuple.h - Version Number Handling -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the VersionTuple class, which represents a version in
+// the form major[.minor[.subminor]].
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_VERSIONTUPLE_H
+#define LLVM_CLANG_BASIC_VERSIONTUPLE_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/Optional.h"
+#include <string>
+
+namespace clang {
+
+/// \brief Represents a version number in the form major[.minor[.subminor]].
+class VersionTuple {
+ unsigned Major;
+ unsigned Minor : 31;
+ unsigned Subminor : 31;
+ unsigned HasMinor : 1;
+ unsigned HasSubminor : 1;
+
+public:
+ VersionTuple()
+ : Major(0), Minor(0), Subminor(0), HasMinor(false), HasSubminor(false) { }
+
+ explicit VersionTuple(unsigned Major)
+ : Major(Major), Minor(0), Subminor(0), HasMinor(false), HasSubminor(false)
+ { }
+
+ explicit VersionTuple(unsigned Major, unsigned Minor)
+ : Major(Major), Minor(Minor), Subminor(0), HasMinor(true),
+ HasSubminor(false)
+ { }
+
+ explicit VersionTuple(unsigned Major, unsigned Minor, unsigned Subminor)
+ : Major(Major), Minor(Minor), Subminor(Subminor), HasMinor(true),
+ HasSubminor(true)
+ { }
+
+ /// \brief Determine whether this version information is empty
+ /// (e.g., all version components are zero).
+ bool empty() const { return Major == 0 && Minor == 0 && Subminor == 0; }
+
+ /// \brief Retrieve the major version number.
+ unsigned getMajor() const { return Major; }
+
+ /// \brief Retrieve the minor version number, if provided.
+ llvm::Optional<unsigned> getMinor() const {
+ if (!HasMinor)
+ return llvm::Optional<unsigned>();
+ return Minor;
+ }
+
+ /// \brief Retrieve the subminor version number, if provided.
+ llvm::Optional<unsigned> getSubminor() const {
+ if (!HasSubminor)
+ return llvm::Optional<unsigned>();
+ return Subminor;
+ }
+
+ /// \brief Determine if two version numbers are equivalent. If not
+ /// provided, minor and subminor version numbers are considered to be zero.
+ friend bool operator==(const VersionTuple& X, const VersionTuple &Y) {
+ return X.Major == Y.Major && X.Minor == Y.Minor && X.Subminor == Y.Subminor;
+ }
+
+ /// \brief Determine if two version numbers are not equivalent. If
+ /// not provided, minor and subminor version numbers are considered to be
+ /// zero.
+ friend bool operator!=(const VersionTuple &X, const VersionTuple &Y) {
+ return !(X == Y);
+ }
+
+ /// \brief Determine whether one version number precedes another. If not
+ /// provided, minor and subminor version numbers are considered to be zero.
+ friend bool operator<(const VersionTuple &X, const VersionTuple &Y) {
+ if (X.Major != Y.Major)
+ return X.Major < Y.Major;
+
+ if (X.Minor != Y.Minor)
+ return X.Minor < Y.Minor;
+
+ return X.Subminor < Y.Subminor;
+ }
+
+ /// \brief Determine whether one version number follows another. If not
+ /// provided, minor and subminor version numbers are considered to be zero.
+ friend bool operator>(const VersionTuple &X, const VersionTuple &Y) {
+ return Y < X;
+ }
+
+ /// \brief Determine whether one version number precedes or is
+ /// equivalent to another. If not provided, minor and subminor
+ /// version numbers are considered to be zero.
+ friend bool operator<=(const VersionTuple &X, const VersionTuple &Y) {
+ return !(Y < X);
+ }
+
+ /// \brief Determine whether one version number follows or is
+ /// equivalent to another. If not provided, minor and subminor
+ /// version numbers are considered to be zero.
+ friend bool operator>=(const VersionTuple &X, const VersionTuple &Y) {
+ return !(X < Y);
+ }
+
+ /// \brief Retrieve a string representation of the version number/
+ std::string getAsString() const;
+};
+
+/// \brief Print a version number.
+raw_ostream& operator<<(raw_ostream &Out, const VersionTuple &V);
+
+} // end namespace clang
+#endif // LLVM_CLANG_BASIC_VERSIONTUPLE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h b/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h
new file mode 100644
index 0000000..90e288a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/Visibility.h
@@ -0,0 +1,48 @@
+//===--- Visibility.h - Visibility enumeration and utilities ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Visibility enumeration and various utility
+// functions.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_BASIC_VISIBILITY_H
+#define LLVM_CLANG_BASIC_VISIBILITY_H
+
+namespace clang {
+
+/// \link Describes the different kinds of visibility that a
+/// declaration may have. Visibility determines how a declaration
+/// interacts with the dynamic linker. It may also affect whether the
+/// symbol can be found by runtime symbol lookup APIs.
+///
+/// Visibility is not described in any language standard and
+/// (nonetheless) sometimes has odd behavior. Not all platforms
+/// support all visibility kinds.
+enum Visibility {
+ /// Objects with "hidden" visibility are not seen by the dynamic
+ /// linker.
+ HiddenVisibility,
+
+ /// Objects with "protected" visibility are seen by the dynamic
+ /// linker but always dynamically resolve to an object within this
+ /// shared object.
+ ProtectedVisibility,
+
+ /// Objects with "default" visibility are seen by the dynamic linker
+ /// and act like normal objects.
+ DefaultVisibility
+};
+
+inline Visibility minVisibility(Visibility L, Visibility R) {
+ return L < R ? L : R;
+}
+
+}
+
+#endif // LLVM_CLANG_BASIC_VISIBILITY_H
diff --git a/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
new file mode 100644
index 0000000..71a0aa2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Basic/arm_neon.td
@@ -0,0 +1,395 @@
+//===--- arm_neon.td - ARM NEON compiler interface ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen definitions from which the ARM NEON header
+// file will be generated. See ARM document DUI0348B.
+//
+//===----------------------------------------------------------------------===//
+
+class Op;
+
+def OP_NONE : Op;
+def OP_ADD : Op;
+def OP_ADDL : Op;
+def OP_ADDW : Op;
+def OP_SUB : Op;
+def OP_SUBL : Op;
+def OP_SUBW : Op;
+def OP_MUL : Op;
+def OP_MLA : Op;
+def OP_MLAL : Op;
+def OP_MLS : Op;
+def OP_MLSL : Op;
+def OP_MUL_N : Op;
+def OP_MLA_N : Op;
+def OP_MLS_N : Op;
+def OP_MLAL_N : Op;
+def OP_MLSL_N : Op;
+def OP_MUL_LN: Op;
+def OP_MULL_LN : Op;
+def OP_MLA_LN: Op;
+def OP_MLS_LN: Op;
+def OP_MLAL_LN : Op;
+def OP_MLSL_LN : Op;
+def OP_QDMULL_LN : Op;
+def OP_QDMLAL_LN : Op;
+def OP_QDMLSL_LN : Op;
+def OP_QDMULH_LN : Op;
+def OP_QRDMULH_LN : Op;
+def OP_EQ : Op;
+def OP_GE : Op;
+def OP_LE : Op;
+def OP_GT : Op;
+def OP_LT : Op;
+def OP_NEG : Op;
+def OP_NOT : Op;
+def OP_AND : Op;
+def OP_OR : Op;
+def OP_XOR : Op;
+def OP_ANDN : Op;
+def OP_ORN : Op;
+def OP_CAST : Op;
+def OP_HI : Op;
+def OP_LO : Op;
+def OP_CONC : Op;
+def OP_DUP : Op;
+def OP_DUP_LN: Op;
+def OP_SEL : Op;
+def OP_REV64 : Op;
+def OP_REV32 : Op;
+def OP_REV16 : Op;
+def OP_REINT : Op;
+def OP_ABDL : Op;
+def OP_ABA : Op;
+def OP_ABAL : Op;
+
+class Inst <string n, string p, string t, Op o> {
+ string Name = n;
+ string Prototype = p;
+ string Types = t;
+ Op Operand = o;
+ bit isShift = 0;
+ bit isVCVT_N = 0;
+}
+
+// Used to generate Builtins.def:
+// SInst: Instruction with signed/unsigned suffix (e.g., "s8", "u8", "p8")
+// IInst: Instruction with generic integer suffix (e.g., "i8")
+// WInst: Instruction with only bit size suffix (e.g., "8")
+class SInst<string n, string p, string t> : Inst<n, p, t, OP_NONE> {}
+class IInst<string n, string p, string t> : Inst<n, p, t, OP_NONE> {}
+class WInst<string n, string p, string t> : Inst<n, p, t, OP_NONE> {}
+
+// prototype: return (arg, arg, ...)
+// v: void
+// t: best-fit integer (int/poly args)
+// x: signed integer (int/float args)
+// u: unsigned integer (int/float args)
+// f: float (int args)
+// d: default
+// g: default, ignore 'Q' size modifier.
+// w: double width elements, same num elts
+// n: double width elements, half num elts
+// h: half width elements, double num elts
+// e: half width elements, double num elts, unsigned
+// i: constant int
+// l: constant uint64
+// s: scalar of element type
+// a: scalar of element type (splat to vector type)
+// k: default elt width, double num elts
+// #: array of default vectors
+// p: pointer type
+// c: const pointer type
+
+// sizes:
+// c: char
+// s: short
+// i: int
+// l: long
+// f: float
+// h: half-float
+
+// size modifiers:
+// U: unsigned
+// Q: 128b
+// P: polynomial
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.1 Addition
+def VADD : Inst<"vadd", "ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_ADD>;
+def VADDL : Inst<"vaddl", "wdd", "csiUcUsUi", OP_ADDL>;
+def VADDW : Inst<"vaddw", "wwd", "csiUcUsUi", OP_ADDW>;
+def VHADD : SInst<"vhadd", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VRHADD : SInst<"vrhadd", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VQADD : SInst<"vqadd", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VADDHN : IInst<"vaddhn", "hkk", "silUsUiUl">;
+def VRADDHN : IInst<"vraddhn", "hkk", "silUsUiUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.2 Multiplication
+def VMUL : Inst<"vmul", "ddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MUL>;
+def VMULP : SInst<"vmul", "ddd", "PcQPc">;
+def VMLA : Inst<"vmla", "dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLA>;
+def VMLAL : Inst<"vmlal", "wwdd", "csiUcUsUi", OP_MLAL>;
+def VMLS : Inst<"vmls", "dddd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_MLS>;
+def VMLSL : Inst<"vmlsl", "wwdd", "csiUcUsUi", OP_MLSL>;
+def VQDMULH : SInst<"vqdmulh", "ddd", "siQsQi">;
+def VQRDMULH : SInst<"vqrdmulh", "ddd", "siQsQi">;
+def VQDMLAL : SInst<"vqdmlal", "wwdd", "si">;
+def VQDMLSL : SInst<"vqdmlsl", "wwdd", "si">;
+def VMULL : SInst<"vmull", "wdd", "csiUcUsUiPc">;
+def VQDMULL : SInst<"vqdmull", "wdd", "si">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.3 Subtraction
+def VSUB : Inst<"vsub", "ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUl", OP_SUB>;
+def VSUBL : Inst<"vsubl", "wdd", "csiUcUsUi", OP_SUBL>;
+def VSUBW : Inst<"vsubw", "wwd", "csiUcUsUi", OP_SUBW>;
+def VQSUB : SInst<"vqsub", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VHSUB : SInst<"vhsub", "ddd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VSUBHN : IInst<"vsubhn", "hkk", "silUsUiUl">;
+def VRSUBHN : IInst<"vrsubhn", "hkk", "silUsUiUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.4 Comparison
+def VCEQ : Inst<"vceq", "udd", "csifUcUsUiPcQcQsQiQfQUcQUsQUiQPc", OP_EQ>;
+def VCGE : Inst<"vcge", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GE>;
+def VCLE : Inst<"vcle", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LE>;
+def VCGT : Inst<"vcgt", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_GT>;
+def VCLT : Inst<"vclt", "udd", "csifUcUsUiQcQsQiQfQUcQUsQUi", OP_LT>;
+def VCAGE : IInst<"vcage", "udd", "fQf">;
+def VCALE : IInst<"vcale", "udd", "fQf">;
+def VCAGT : IInst<"vcagt", "udd", "fQf">;
+def VCALT : IInst<"vcalt", "udd", "fQf">;
+def VTST : WInst<"vtst", "udd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.5 Absolute Difference
+def VABD : SInst<"vabd", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VABDL : Inst<"vabdl", "wdd", "csiUcUsUi", OP_ABDL>;
+def VABA : Inst<"vaba", "dddd", "csiUcUsUiQcQsQiQUcQUsQUi", OP_ABA>;
+def VABAL : Inst<"vabal", "wwdd", "csiUcUsUi", OP_ABAL>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.6 Max/Min
+def VMAX : SInst<"vmax", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+def VMIN : SInst<"vmin", "ddd", "csiUcUsUifQcQsQiQUcQUsQUiQf">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.7 Pairwise Addition
+def VPADD : IInst<"vpadd", "ddd", "csiUcUsUif">;
+def VPADDL : SInst<"vpaddl", "nd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VPADAL : SInst<"vpadal", "nnd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.8-9 Folding Max/Min
+def VPMAX : SInst<"vpmax", "ddd", "csiUcUsUif">;
+def VPMIN : SInst<"vpmin", "ddd", "csiUcUsUif">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.10 Reciprocal/Sqrt
+def VRECPS : IInst<"vrecps", "ddd", "fQf">;
+def VRSQRTS : IInst<"vrsqrts", "ddd", "fQf">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.11 Shifts by signed variable
+def VSHL : SInst<"vshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHL : SInst<"vqshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSHL : SInst<"vrshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQRSHL : SInst<"vqrshl", "ddx", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.12 Shifts by constant
+let isShift = 1 in {
+def VSHR_N : SInst<"vshr_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSHL_N : IInst<"vshl_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSHR_N : SInst<"vrshr_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VSRA_N : SInst<"vsra_n", "dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VRSRA_N : SInst<"vrsra_n", "dddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHL_N : SInst<"vqshl_n", "ddi", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl">;
+def VQSHLU_N : SInst<"vqshlu_n", "udi", "csilQcQsQiQl">;
+def VSHRN_N : IInst<"vshrn_n", "hki", "silUsUiUl">;
+def VQSHRUN_N : SInst<"vqshrun_n", "eki", "sil">;
+def VQRSHRUN_N : SInst<"vqrshrun_n", "eki", "sil">;
+def VQSHRN_N : SInst<"vqshrn_n", "hki", "silUsUiUl">;
+def VRSHRN_N : IInst<"vrshrn_n", "hki", "silUsUiUl">;
+def VQRSHRN_N : SInst<"vqrshrn_n", "hki", "silUsUiUl">;
+def VSHLL_N : SInst<"vshll_n", "wdi", "csiUcUsUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.13 Shifts with insert
+def VSRI_N : WInst<"vsri_n", "dddi",
+ "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
+def VSLI_N : WInst<"vsli_n", "dddi",
+ "csilUcUsUiUlPcPsQcQsQiQlQUcQUsQUiQUlQPcQPs">;
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.14 Loads and stores of a single vector
+def VLD1 : WInst<"vld1", "dc",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1_LANE : WInst<"vld1_lane", "dcdi",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD1_DUP : WInst<"vld1_dup", "dc",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST1 : WInst<"vst1", "vpd",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST1_LANE : WInst<"vst1_lane", "vpdi",
+ "QUcQUsQUiQUlQcQsQiQlQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.15 Loads and stores of an N-element structure
+def VLD2 : WInst<"vld2", "2c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD3 : WInst<"vld3", "3c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD4 : WInst<"vld4", "4c", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VLD2_DUP : WInst<"vld2_dup", "2c", "UcUsUiUlcsilhfPcPs">;
+def VLD3_DUP : WInst<"vld3_dup", "3c", "UcUsUiUlcsilhfPcPs">;
+def VLD4_DUP : WInst<"vld4_dup", "4c", "UcUsUiUlcsilhfPcPs">;
+def VLD2_LANE : WInst<"vld2_lane", "2c2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD3_LANE : WInst<"vld3_lane", "3c3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VLD4_LANE : WInst<"vld4_lane", "4c4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST2 : WInst<"vst2", "vp2", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST3 : WInst<"vst3", "vp3", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST4 : WInst<"vst4", "vp4", "QUcQUsQUiQcQsQiQhQfQPcQPsUcUsUiUlcsilhfPcPs">;
+def VST2_LANE : WInst<"vst2_lane", "vp2i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST3_LANE : WInst<"vst3_lane", "vp3i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+def VST4_LANE : WInst<"vst4_lane", "vp4i", "QUsQUiQsQiQhQfQPsUcUsUicsihfPcPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.16 Extract lanes from a vector
+def VGET_LANE : IInst<"vget_lane", "sdi",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.17 Set lanes within a vector
+def VSET_LANE : IInst<"vset_lane", "dsdi",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.18 Initialize a vector from bit pattern
+def VCREATE: Inst<"vcreate", "dl", "csihfUcUsUiUlPcPsl", OP_CAST>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.19 Set all lanes to same value
+def VDUP_N : Inst<"vdup_n", "ds",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
+def VMOV_N : Inst<"vmov_n", "ds",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl", OP_DUP>;
+def VDUP_LANE : Inst<"vdup_lane", "dgi",
+ "UcUsUicsiPcPsfQUcQUsQUiQcQsQiQPcQPsQflUlQlQUl",OP_DUP_LN>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.20 Combining vectors
+def VCOMBINE : Inst<"vcombine", "kdd", "csilhfUcUsUiUlPcPs", OP_CONC>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.21 Splitting vectors
+def VGET_HIGH : Inst<"vget_high", "dk", "csilhfUcUsUiUlPcPs", OP_HI>;
+def VGET_LOW : Inst<"vget_low", "dk", "csilhfUcUsUiUlPcPs", OP_LO>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.22 Converting vectors
+def VCVT_S32 : SInst<"vcvt_s32", "xd", "fQf">;
+def VCVT_U32 : SInst<"vcvt_u32", "ud", "fQf">;
+def VCVT_F16 : SInst<"vcvt_f16", "hk", "f">;
+def VCVT_F32 : SInst<"vcvt_f32", "fd", "iUiQiQUi">;
+def VCVT_F32_F16 : SInst<"vcvt_f32_f16", "fd", "h">;
+let isVCVT_N = 1 in {
+def VCVT_N_S32 : SInst<"vcvt_n_s32", "xdi", "fQf">;
+def VCVT_N_U32 : SInst<"vcvt_n_u32", "udi", "fQf">;
+def VCVT_N_F32 : SInst<"vcvt_n_f32", "fdi", "iUiQiQUi">;
+}
+def VMOVN : IInst<"vmovn", "hk", "silUsUiUl">;
+def VMOVL : SInst<"vmovl", "wd", "csiUcUsUi">;
+def VQMOVN : SInst<"vqmovn", "hk", "silUsUiUl">;
+def VQMOVUN : SInst<"vqmovun", "ek", "sil">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.23-24 Table lookup, Extended table lookup
+def VTBL1 : WInst<"vtbl1", "ddt", "UccPc">;
+def VTBL2 : WInst<"vtbl2", "d2t", "UccPc">;
+def VTBL3 : WInst<"vtbl3", "d3t", "UccPc">;
+def VTBL4 : WInst<"vtbl4", "d4t", "UccPc">;
+def VTBX1 : WInst<"vtbx1", "dddt", "UccPc">;
+def VTBX2 : WInst<"vtbx2", "dd2t", "UccPc">;
+def VTBX3 : WInst<"vtbx3", "dd3t", "UccPc">;
+def VTBX4 : WInst<"vtbx4", "dd4t", "UccPc">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.25 Operations with a scalar value
+def VMLA_LANE : Inst<"vmla_lane", "dddgi", "siUsUifQsQiQUsQUiQf", OP_MLA_LN>;
+def VMLAL_LANE : Inst<"vmlal_lane", "wwddi", "siUsUi", OP_MLAL_LN>;
+def VQDMLAL_LANE : Inst<"vqdmlal_lane", "wwddi", "si", OP_QDMLAL_LN>;
+def VMLS_LANE : Inst<"vmls_lane", "dddgi", "siUsUifQsQiQUsQUiQf", OP_MLS_LN>;
+def VMLSL_LANE : Inst<"vmlsl_lane", "wwddi", "siUsUi", OP_MLSL_LN>;
+def VQDMLSL_LANE : Inst<"vqdmlsl_lane", "wwddi", "si", OP_QDMLSL_LN>;
+def VMUL_N : Inst<"vmul_n", "dds", "sifUsUiQsQiQfQUsQUi", OP_MUL_N>;
+def VMUL_LANE : Inst<"vmul_lane", "ddgi", "sifUsUiQsQiQfQUsQUi", OP_MUL_LN>;
+def VMULL_N : SInst<"vmull_n", "wda", "siUsUi">;
+def VMULL_LANE : Inst<"vmull_lane", "wddi", "siUsUi", OP_MULL_LN>;
+def VQDMULL_N : SInst<"vqdmull_n", "wda", "si">;
+def VQDMULL_LANE : Inst<"vqdmull_lane", "wddi", "si", OP_QDMULL_LN>;
+def VQDMULH_N : SInst<"vqdmulh_n", "dda", "siQsQi">;
+def VQDMULH_LANE : Inst<"vqdmulh_lane", "ddgi", "siQsQi", OP_QDMULH_LN>;
+def VQRDMULH_N : SInst<"vqrdmulh_n", "dda", "siQsQi">;
+def VQRDMULH_LANE : Inst<"vqrdmulh_lane", "ddgi", "siQsQi", OP_QRDMULH_LN>;
+def VMLA_N : Inst<"vmla_n", "ddda", "siUsUifQsQiQUsQUiQf", OP_MLA_N>;
+def VMLAL_N : Inst<"vmlal_n", "wwda", "siUsUi", OP_MLAL_N>;
+def VQDMLAL_N : SInst<"vqdmlal_n", "wwda", "si">;
+def VMLS_N : Inst<"vmls_n", "ddds", "siUsUifQsQiQUsQUiQf", OP_MLS_N>;
+def VMLSL_N : Inst<"vmlsl_n", "wwda", "siUsUi", OP_MLSL_N>;
+def VQDMLSL_N : SInst<"vqdmlsl_n", "wwda", "si">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.26 Vector Extract
+def VEXT : WInst<"vext", "dddi",
+ "cUcPcsUsPsiUilUlfQcQUcQPcQsQUsQPsQiQUiQlQUlQf">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.27 Reverse vector elements
+def VREV64 : Inst<"vrev64", "dd", "csiUcUsUiPcPsfQcQsQiQUcQUsQUiQPcQPsQf",
+ OP_REV64>;
+def VREV32 : Inst<"vrev32", "dd", "csUcUsPcPsQcQsQUcQUsQPcQPs", OP_REV32>;
+def VREV16 : Inst<"vrev16", "dd", "cUcPcQcQUcQPc", OP_REV16>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.28 Other single operand arithmetic
+def VABS : SInst<"vabs", "dd", "csifQcQsQiQf">;
+def VQABS : SInst<"vqabs", "dd", "csiQcQsQi">;
+def VNEG : Inst<"vneg", "dd", "csifQcQsQiQf", OP_NEG>;
+def VQNEG : SInst<"vqneg", "dd", "csiQcQsQi">;
+def VCLS : SInst<"vcls", "dd", "csiQcQsQi">;
+def VCLZ : IInst<"vclz", "dd", "csiUcUsUiQcQsQiQUcQUsQUi">;
+def VCNT : WInst<"vcnt", "dd", "UccPcQUcQcQPc">;
+def VRECPE : SInst<"vrecpe", "dd", "fUiQfQUi">;
+def VRSQRTE : SInst<"vrsqrte", "dd", "fUiQfQUi">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.29 Logical operations
+def VMVN : Inst<"vmvn", "dd", "csiUcUsUiPcQcQsQiQUcQUsQUiQPc", OP_NOT>;
+def VAND : Inst<"vand", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_AND>;
+def VORR : Inst<"vorr", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_OR>;
+def VEOR : Inst<"veor", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_XOR>;
+def VBIC : Inst<"vbic", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ANDN>;
+def VORN : Inst<"vorn", "ddd", "csilUcUsUiUlQcQsQiQlQUcQUsQUiQUl", OP_ORN>;
+def VBSL : Inst<"vbsl", "dudd",
+ "csilUcUsUiUlfPcPsQcQsQiQlQUcQUsQUiQUlQfQPcQPs", OP_SEL>;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.30 Transposition operations
+def VTRN : WInst<"vtrn", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VZIP : WInst<"vzip", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+def VUZP : WInst<"vuzp", "2dd", "csiUcUsUifPcPsQcQsQiQUcQUsQUiQfQPcQPs">;
+
+////////////////////////////////////////////////////////////////////////////////
+// E.3.31 Vector reinterpret cast operations
+def VREINTERPRET
+ : Inst<"vreinterpret", "dd",
+ "csilUcUsUiUlhfPcPsQcQsQiQlQUcQUsQUiQUlQhQfQPcQPs", OP_REINT>;
+
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/BackendUtil.h b/contrib/llvm/tools/clang/include/clang/CodeGen/BackendUtil.h
new file mode 100644
index 0000000..135b6a9
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/BackendUtil.h
@@ -0,0 +1,40 @@
+//===--- BackendUtil.h - LLVM Backend Utilities -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_BACKEND_UTIL_H
+#define LLVM_CLANG_CODEGEN_BACKEND_UTIL_H
+
+#include "clang/Basic/LLVM.h"
+
+namespace llvm {
+ class Module;
+}
+
+namespace clang {
+ class DiagnosticsEngine;
+ class CodeGenOptions;
+ class TargetOptions;
+ class LangOptions;
+
+ enum BackendAction {
+ Backend_EmitAssembly, ///< Emit native assembly files
+ Backend_EmitBC, ///< Emit LLVM bitcode files
+ Backend_EmitLL, ///< Emit human-readable LLVM assembly
+ Backend_EmitNothing, ///< Don't emit anything (benchmarking mode)
+ Backend_EmitMCNull, ///< Run CodeGen, but don't emit anything
+ Backend_EmitObj ///< Emit native object files
+ };
+
+ void EmitBackendOutput(DiagnosticsEngine &Diags, const CodeGenOptions &CGOpts,
+ const TargetOptions &TOpts, const LangOptions &LOpts,
+ llvm::Module *M,
+ BackendAction Action, raw_ostream *OS);
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
new file mode 100644
index 0000000..7fa589f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/CodeGenAction.h
@@ -0,0 +1,103 @@
+//===--- CodeGenAction.h - LLVM Code Generation Frontend Action -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_CODE_GEN_ACTION_H
+#define LLVM_CLANG_CODEGEN_CODE_GEN_ACTION_H
+
+#include "clang/Frontend/FrontendAction.h"
+#include "llvm/ADT/OwningPtr.h"
+
+namespace llvm {
+ class LLVMContext;
+ class Module;
+}
+
+namespace clang {
+class BackendConsumer;
+
+class CodeGenAction : public ASTFrontendAction {
+private:
+ unsigned Act;
+ OwningPtr<llvm::Module> TheModule;
+ llvm::Module *LinkModule;
+ llvm::LLVMContext *VMContext;
+ bool OwnsVMContext;
+
+protected:
+ /// Create a new code generation action. If the optional \arg _VMContext
+ /// parameter is supplied, the action uses it without taking ownership,
+ /// otherwise it creates a fresh LLVM context and takes ownership.
+ CodeGenAction(unsigned _Act, llvm::LLVMContext *_VMContext = 0);
+
+ virtual bool hasIRSupport() const;
+
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+ virtual void ExecuteAction();
+
+ virtual void EndSourceFileAction();
+
+public:
+ ~CodeGenAction();
+
+ /// setLinkModule - Set the link module to be used by this action. If a link
+ /// module is not provided, and CodeGenOptions::LinkBitcodeFile is non-empty,
+ /// the action will load it from the specified file.
+ void setLinkModule(llvm::Module *Mod) { LinkModule = Mod; }
+
+ /// takeModule - Take the generated LLVM module, for use after the action has
+ /// been run. The result may be null on failure.
+ llvm::Module *takeModule();
+
+ /// Take the LLVM context used by this action.
+ llvm::LLVMContext *takeLLVMContext();
+
+ BackendConsumer *BEConsumer;
+};
+
+class EmitAssemblyAction : public CodeGenAction {
+ virtual void anchor();
+public:
+ EmitAssemblyAction(llvm::LLVMContext *_VMContext = 0);
+};
+
+class EmitBCAction : public CodeGenAction {
+ virtual void anchor();
+public:
+ EmitBCAction(llvm::LLVMContext *_VMContext = 0);
+};
+
+class EmitLLVMAction : public CodeGenAction {
+ virtual void anchor();
+public:
+ EmitLLVMAction(llvm::LLVMContext *_VMContext = 0);
+};
+
+class EmitLLVMOnlyAction : public CodeGenAction {
+ virtual void anchor();
+public:
+ EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext = 0);
+};
+
+class EmitCodeGenOnlyAction : public CodeGenAction {
+ virtual void anchor();
+public:
+ EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext = 0);
+};
+
+class EmitObjAction : public CodeGenAction {
+ virtual void anchor();
+public:
+ EmitObjAction(llvm::LLVMContext *_VMContext = 0);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h b/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h
new file mode 100644
index 0000000..ba9d1f9
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/CodeGen/ModuleBuilder.h
@@ -0,0 +1,46 @@
+//===--- CodeGen/ModuleBuilder.h - Build LLVM from ASTs ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleBuilder interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_CODEGEN_MODULEBUILDER_H
+#define LLVM_CLANG_CODEGEN_MODULEBUILDER_H
+
+#include "clang/AST/ASTConsumer.h"
+#include <string>
+
+namespace llvm {
+ class LLVMContext;
+ class Module;
+}
+
+namespace clang {
+ class DiagnosticsEngine;
+ class LangOptions;
+ class CodeGenOptions;
+
+ class CodeGenerator : public ASTConsumer {
+ virtual void anchor();
+ public:
+ virtual llvm::Module* GetModule() = 0;
+ virtual llvm::Module* ReleaseModule() = 0;
+ };
+
+ /// CreateLLVMCodeGen - Create a CodeGenerator instance.
+ /// It is the responsibility of the caller to call delete on
+ /// the allocated CodeGenerator instance.
+ CodeGenerator *CreateLLVMCodeGen(DiagnosticsEngine &Diags,
+ const std::string &ModuleName,
+ const CodeGenOptions &CGO,
+ llvm::LLVMContext& C);
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Action.h b/contrib/llvm/tools/clang/include/clang/Driver/Action.h
new file mode 100644
index 0000000..6e317a0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Action.h
@@ -0,0 +1,254 @@
+//===--- Action.h - Abstract compilation steps ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_ACTION_H_
+#define CLANG_DRIVER_ACTION_H_
+
+#include "clang/Driver/Types.h"
+#include "clang/Driver/Util.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+namespace driver {
+ class Arg;
+
+/// Action - Represent an abstract compilation step to perform.
+///
+/// An action represents an edge in the compilation graph; typically
+/// it is a job to transform an input using some tool.
+///
+/// The current driver is hard wired to expect actions which produce a
+/// single primary output, at least in terms of controlling the
+/// compilation. Actions can produce auxiliary files, but can only
+/// produce a single output to feed into subsequent actions.
+class Action {
+public:
+ typedef ActionList::size_type size_type;
+ typedef ActionList::iterator iterator;
+ typedef ActionList::const_iterator const_iterator;
+
+ enum ActionClass {
+ InputClass = 0,
+ BindArchClass,
+ PreprocessJobClass,
+ PrecompileJobClass,
+ AnalyzeJobClass,
+ MigrateJobClass,
+ CompileJobClass,
+ AssembleJobClass,
+ LinkJobClass,
+ LipoJobClass,
+ DsymutilJobClass,
+ VerifyJobClass,
+
+ JobClassFirst=PreprocessJobClass,
+ JobClassLast=VerifyJobClass
+ };
+
+ static const char *getClassName(ActionClass AC);
+
+private:
+ ActionClass Kind;
+
+ /// The output type of this action.
+ types::ID Type;
+
+ ActionList Inputs;
+
+ unsigned OwnsInputs : 1;
+
+protected:
+ Action(ActionClass _Kind, types::ID _Type)
+ : Kind(_Kind), Type(_Type), OwnsInputs(true) {}
+ Action(ActionClass _Kind, Action *Input, types::ID _Type)
+ : Kind(_Kind), Type(_Type), Inputs(&Input, &Input + 1), OwnsInputs(true) {}
+ Action(ActionClass _Kind, const ActionList &_Inputs, types::ID _Type)
+ : Kind(_Kind), Type(_Type), Inputs(_Inputs), OwnsInputs(true) {}
+public:
+ virtual ~Action();
+
+ const char *getClassName() const { return Action::getClassName(getKind()); }
+
+ bool getOwnsInputs() { return OwnsInputs; }
+ void setOwnsInputs(bool Value) { OwnsInputs = Value; }
+
+ ActionClass getKind() const { return Kind; }
+ types::ID getType() const { return Type; }
+
+ ActionList &getInputs() { return Inputs; }
+ const ActionList &getInputs() const { return Inputs; }
+
+ size_type size() const { return Inputs.size(); }
+
+ iterator begin() { return Inputs.begin(); }
+ iterator end() { return Inputs.end(); }
+ const_iterator begin() const { return Inputs.begin(); }
+ const_iterator end() const { return Inputs.end(); }
+
+ static bool classof(const Action *) { return true; }
+};
+
+class InputAction : public Action {
+ virtual void anchor();
+ const Arg &Input;
+public:
+ InputAction(const Arg &_Input, types::ID _Type);
+
+ const Arg &getInputArg() const { return Input; }
+
+ static bool classof(const Action *A) {
+ return A->getKind() == InputClass;
+ }
+ static bool classof(const InputAction *) { return true; }
+};
+
+class BindArchAction : public Action {
+ virtual void anchor();
+ /// The architecture to bind, or 0 if the default architecture
+ /// should be bound.
+ const char *ArchName;
+
+public:
+ BindArchAction(Action *Input, const char *_ArchName);
+
+ const char *getArchName() const { return ArchName; }
+
+ static bool classof(const Action *A) {
+ return A->getKind() == BindArchClass;
+ }
+ static bool classof(const BindArchAction *) { return true; }
+};
+
+class JobAction : public Action {
+ virtual void anchor();
+protected:
+ JobAction(ActionClass Kind, Action *Input, types::ID Type);
+ JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type);
+
+public:
+ static bool classof(const Action *A) {
+ return (A->getKind() >= JobClassFirst &&
+ A->getKind() <= JobClassLast);
+ }
+ static bool classof(const JobAction *) { return true; }
+};
+
+class PreprocessJobAction : public JobAction {
+ virtual void anchor();
+public:
+ PreprocessJobAction(Action *Input, types::ID OutputType);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == PreprocessJobClass;
+ }
+ static bool classof(const PreprocessJobAction *) { return true; }
+};
+
+class PrecompileJobAction : public JobAction {
+ virtual void anchor();
+public:
+ PrecompileJobAction(Action *Input, types::ID OutputType);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == PrecompileJobClass;
+ }
+ static bool classof(const PrecompileJobAction *) { return true; }
+};
+
+class AnalyzeJobAction : public JobAction {
+ virtual void anchor();
+public:
+ AnalyzeJobAction(Action *Input, types::ID OutputType);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == AnalyzeJobClass;
+ }
+ static bool classof(const AnalyzeJobAction *) { return true; }
+};
+
+class MigrateJobAction : public JobAction {
+ virtual void anchor();
+public:
+ MigrateJobAction(Action *Input, types::ID OutputType);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == MigrateJobClass;
+ }
+ static bool classof(const MigrateJobAction *) { return true; }
+};
+
+class CompileJobAction : public JobAction {
+ virtual void anchor();
+public:
+ CompileJobAction(Action *Input, types::ID OutputType);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == CompileJobClass;
+ }
+ static bool classof(const CompileJobAction *) { return true; }
+};
+
+class AssembleJobAction : public JobAction {
+ virtual void anchor();
+public:
+ AssembleJobAction(Action *Input, types::ID OutputType);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == AssembleJobClass;
+ }
+ static bool classof(const AssembleJobAction *) { return true; }
+};
+
+class LinkJobAction : public JobAction {
+ virtual void anchor();
+public:
+ LinkJobAction(ActionList &Inputs, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == LinkJobClass;
+ }
+ static bool classof(const LinkJobAction *) { return true; }
+};
+
+class LipoJobAction : public JobAction {
+ virtual void anchor();
+public:
+ LipoJobAction(ActionList &Inputs, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == LipoJobClass;
+ }
+ static bool classof(const LipoJobAction *) { return true; }
+};
+
+class DsymutilJobAction : public JobAction {
+ virtual void anchor();
+public:
+ DsymutilJobAction(ActionList &Inputs, types::ID Type);
+
+ static bool classof(const Action *A) {
+ return A->getKind() == DsymutilJobClass;
+ }
+ static bool classof(const DsymutilJobAction *) { return true; }
+};
+
+class VerifyJobAction : public JobAction {
+ virtual void anchor();
+public:
+ VerifyJobAction(ActionList &Inputs, types::ID Type);
+ static bool classof(const Action *A) {
+ return A->getKind() == VerifyJobClass;
+ }
+ static bool classof(const VerifyJobAction *) { return true; }
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Arg.h b/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
new file mode 100644
index 0000000..e8625bb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Arg.h
@@ -0,0 +1,122 @@
+//===--- Arg.h - Parsed Argument Classes ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_ARG_H_
+#define CLANG_DRIVER_ARG_H_
+
+#include "Util.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace clang {
+namespace driver {
+ class ArgList;
+ class Option;
+
+ /// Arg - A concrete instance of a particular driver option.
+ ///
+ /// The Arg class encodes just enough information to be able to
+ /// derive the argument values efficiently. In addition, Arg
+ /// instances have an intrusive double linked list which is used by
+ /// ArgList to provide efficient iteration over all instances of a
+ /// particular option.
+ class Arg {
+ Arg(const Arg &); // DO NOT IMPLEMENT
+ void operator=(const Arg &); // DO NOT IMPLEMENT
+
+ private:
+ /// The option this argument is an instance of.
+ const Option *Opt;
+
+ /// The argument this argument was derived from (during tool chain
+ /// argument translation), if any.
+ const Arg *BaseArg;
+
+ /// The index at which this argument appears in the containing
+ /// ArgList.
+ unsigned Index;
+
+ /// Was this argument used to effect compilation; used for generating
+ /// "argument unused" diagnostics.
+ mutable unsigned Claimed : 1;
+
+ /// Does this argument own its values.
+ mutable unsigned OwnsValues : 1;
+
+ /// The argument values, as C strings.
+ SmallVector<const char *, 2> Values;
+
+ public:
+ Arg(const Option *Opt, unsigned Index, const Arg *BaseArg = 0);
+ Arg(const Option *Opt, unsigned Index,
+ const char *Value0, const Arg *BaseArg = 0);
+ Arg(const Option *Opt, unsigned Index,
+ const char *Value0, const char *Value1, const Arg *BaseArg = 0);
+ ~Arg();
+
+ const Option &getOption() const { return *Opt; }
+ unsigned getIndex() const { return Index; }
+
+ /// getBaseArg - Return the base argument which generated this
+ /// arg; this is either the argument itself or the argument it was
+ /// derived from during tool chain specific argument translation.
+ const Arg &getBaseArg() const {
+ return BaseArg ? *BaseArg : *this;
+ }
+ void setBaseArg(const Arg *_BaseArg) {
+ BaseArg = _BaseArg;
+ }
+
+ bool getOwnsValues() const { return OwnsValues; }
+ void setOwnsValues(bool Value) const { OwnsValues = Value; }
+
+ bool isClaimed() const { return getBaseArg().Claimed; }
+
+ /// claim - Set the Arg claimed bit.
+ void claim() const { getBaseArg().Claimed = true; }
+
+ unsigned getNumValues() const { return Values.size(); }
+ const char *getValue(const ArgList &Args, unsigned N=0) const {
+ return Values[N];
+ }
+
+ SmallVectorImpl<const char*> &getValues() {
+ return Values;
+ }
+
+ bool containsValue(StringRef Value) const {
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+ if (Values[i] == Value)
+ return true;
+ return false;
+ }
+
+ /// render - Append the argument onto the given array as strings.
+ void render(const ArgList &Args, ArgStringList &Output) const;
+
+ /// renderAsInput - Append the argument, render as an input, onto
+ /// the given array as strings. The distinction is that some
+ /// options only render their values when rendered as a input
+ /// (e.g., Xlinker).
+ void renderAsInput(const ArgList &Args, ArgStringList &Output) const;
+
+ static bool classof(const Arg *) { return true; }
+
+ void dump() const;
+
+ /// getAsString - Return a formatted version of the argument and
+ /// its values, for debugging and diagnostics.
+ std::string getAsString(const ArgList &Args) const;
+ };
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
new file mode 100644
index 0000000..3affb00
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ArgList.h
@@ -0,0 +1,426 @@
+//===--- ArgList.h - Argument List Management ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_ARGLIST_H_
+#define CLANG_DRIVER_ARGLIST_H_
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Driver/OptSpecifier.h"
+#include "clang/Driver/Util.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+
+#include <list>
+#include <string>
+#include <vector>
+
+namespace clang {
+ class DiagnosticsEngine;
+
+namespace driver {
+ class Arg;
+ class ArgList;
+ class Option;
+
+ /// arg_iterator - Iterates through arguments stored inside an ArgList.
+ class arg_iterator {
+ /// The current argument.
+ SmallVectorImpl<Arg*>::const_iterator Current;
+
+ /// The argument list we are iterating over.
+ const ArgList &Args;
+
+ /// Optional filters on the arguments which will be match. Most clients
+ /// should never want to iterate over arguments without filters, so we won't
+ /// bother to factor this into two separate iterator implementations.
+ //
+ // FIXME: Make efficient; the idea is to provide efficient iteration over
+ // all arguments which match a particular id and then just provide an
+ // iterator combinator which takes multiple iterators which can be
+ // efficiently compared and returns them in order.
+ OptSpecifier Id0, Id1, Id2;
+
+ void SkipToNextArg();
+
+ public:
+ typedef Arg * const * value_type;
+ typedef Arg * const & reference;
+ typedef Arg * const * pointer;
+ typedef std::forward_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ arg_iterator(SmallVectorImpl<Arg*>::const_iterator it,
+ const ArgList &_Args, OptSpecifier _Id0 = 0U,
+ OptSpecifier _Id1 = 0U, OptSpecifier _Id2 = 0U)
+ : Current(it), Args(_Args), Id0(_Id0), Id1(_Id1), Id2(_Id2) {
+ SkipToNextArg();
+ }
+
+ operator const Arg*() { return *Current; }
+ reference operator*() const { return *Current; }
+ pointer operator->() const { return Current; }
+
+ arg_iterator &operator++() {
+ ++Current;
+ SkipToNextArg();
+ return *this;
+ }
+
+ arg_iterator operator++(int) {
+ arg_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(arg_iterator LHS, arg_iterator RHS) {
+ return LHS.Current == RHS.Current;
+ }
+ friend bool operator!=(arg_iterator LHS, arg_iterator RHS) {
+ return !(LHS == RHS);
+ }
+ };
+
+ /// ArgList - Ordered collection of driver arguments.
+ ///
+ /// The ArgList class manages a list of Arg instances as well as
+ /// auxiliary data and convenience methods to allow Tools to quickly
+ /// check for the presence of Arg instances for a particular Option
+ /// and to iterate over groups of arguments.
+ class ArgList {
+ private:
+ ArgList(const ArgList &); // DO NOT IMPLEMENT
+ void operator=(const ArgList &); // DO NOT IMPLEMENT
+
+ public:
+ typedef SmallVector<Arg*, 16> arglist_type;
+ typedef arglist_type::iterator iterator;
+ typedef arglist_type::const_iterator const_iterator;
+ typedef arglist_type::reverse_iterator reverse_iterator;
+ typedef arglist_type::const_reverse_iterator const_reverse_iterator;
+
+ private:
+ /// The internal list of arguments.
+ arglist_type Args;
+
+ protected:
+ ArgList();
+
+ public:
+ virtual ~ArgList();
+
+ /// @name Arg Access
+ /// @{
+
+ /// append - Append \arg A to the arg list.
+ void append(Arg *A);
+
+ arglist_type &getArgs() { return Args; }
+ const arglist_type &getArgs() const { return Args; }
+
+ unsigned size() const { return Args.size(); }
+
+ /// @}
+ /// @name Arg Iteration
+ /// @{
+
+ iterator begin() { return Args.begin(); }
+ iterator end() { return Args.end(); }
+
+ reverse_iterator rbegin() { return Args.rbegin(); }
+ reverse_iterator rend() { return Args.rend(); }
+
+ const_iterator begin() const { return Args.begin(); }
+ const_iterator end() const { return Args.end(); }
+
+ const_reverse_iterator rbegin() const { return Args.rbegin(); }
+ const_reverse_iterator rend() const { return Args.rend(); }
+
+ arg_iterator filtered_begin(OptSpecifier Id0 = 0U, OptSpecifier Id1 = 0U,
+ OptSpecifier Id2 = 0U) const {
+ return arg_iterator(Args.begin(), *this, Id0, Id1, Id2);
+ }
+ arg_iterator filtered_end() const {
+ return arg_iterator(Args.end(), *this);
+ }
+
+ /// @}
+ /// @name Arg Removal
+ /// @{
+
+ /// eraseArg - Remove any option matching \arg Id.
+ void eraseArg(OptSpecifier Id);
+
+ /// @}
+ /// @name Arg Access
+ /// @{
+
+ /// hasArg - Does the arg list contain any option matching \arg Id.
+ ///
+ /// \arg Claim Whether the argument should be claimed, if it exists.
+ bool hasArgNoClaim(OptSpecifier Id) const {
+ return getLastArgNoClaim(Id) != 0;
+ }
+ bool hasArg(OptSpecifier Id) const {
+ return getLastArg(Id) != 0;
+ }
+ bool hasArg(OptSpecifier Id0, OptSpecifier Id1) const {
+ return getLastArg(Id0, Id1) != 0;
+ }
+ bool hasArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const {
+ return getLastArg(Id0, Id1, Id2) != 0;
+ }
+
+ /// getLastArg - Return the last argument matching \arg Id, or null.
+ ///
+ /// \arg Claim Whether the argument should be claimed, if it exists.
+ Arg *getLastArgNoClaim(OptSpecifier Id) const;
+ Arg *getLastArg(OptSpecifier Id) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3) const;
+ Arg *getLastArg(OptSpecifier Id0, OptSpecifier Id1, OptSpecifier Id2,
+ OptSpecifier Id3, OptSpecifier Id4) const;
+
+ /// getArgString - Return the input argument string at \arg Index.
+ virtual const char *getArgString(unsigned Index) const = 0;
+
+ /// getNumInputArgStrings - Return the number of original argument strings,
+ /// which are guaranteed to be the first strings in the argument string
+ /// list.
+ virtual unsigned getNumInputArgStrings() const = 0;
+
+ /// @}
+ /// @name Argument Lookup Utilities
+ /// @{
+
+ /// getLastArgValue - Return the value of the last argument, or a default.
+ StringRef getLastArgValue(OptSpecifier Id,
+ StringRef Default = "") const;
+
+ /// getLastArgValue - Return the value of the last argument as an integer,
+ /// or a default. If Diags is non-null, emits an error if the argument
+ /// is given, but non-integral.
+ int getLastArgIntValue(OptSpecifier Id, int Default,
+ DiagnosticsEngine *Diags = 0) const;
+
+ /// getLastArgValue - Return the value of the last argument as an integer,
+ /// or a default. Emits an error if the argument is given, but non-integral.
+ int getLastArgIntValue(OptSpecifier Id, int Default,
+ DiagnosticsEngine &Diags) const {
+ return getLastArgIntValue(Id, Default, &Diags);
+ }
+
+ /// getAllArgValues - Get the values of all instances of the given argument
+ /// as strings.
+ std::vector<std::string> getAllArgValues(OptSpecifier Id) const;
+
+ /// @}
+ /// @name Translation Utilities
+ /// @{
+
+ /// hasFlag - Given an option \arg Pos and its negative form \arg
+ /// Neg, return true if the option is present, false if the
+ /// negation is present, and \arg Default if neither option is
+ /// given. If both the option and its negation are present, the
+ /// last one wins.
+ bool hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default=true) const;
+
+ /// AddLastArg - Render only the last argument match \arg Id0, if
+ /// present.
+ void AddLastArg(ArgStringList &Output, OptSpecifier Id0) const;
+
+ /// AddAllArgs - Render all arguments matching the given ids.
+ void AddAllArgs(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1 = 0U, OptSpecifier Id2 = 0U) const;
+
+ /// AddAllArgValues - Render the argument values of all arguments
+ /// matching the given ids.
+ void AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1 = 0U, OptSpecifier Id2 = 0U) const;
+
+ /// AddAllArgsTranslated - Render all the arguments matching the
+ /// given ids, but forced to separate args and using the provided
+ /// name instead of the first option value.
+ ///
+ /// \param Joined - If true, render the argument as joined with
+ /// the option specifier.
+ void AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
+ const char *Translation,
+ bool Joined = false) const;
+
+ /// ClaimAllArgs - Claim all arguments which match the given
+ /// option id.
+ void ClaimAllArgs(OptSpecifier Id0) const;
+
+ /// ClaimAllArgs - Claim all arguments.
+ ///
+ void ClaimAllArgs() const;
+
+ /// @}
+ /// @name Arg Synthesis
+ /// @{
+
+ /// MakeArgString - Construct a constant string pointer whose
+ /// lifetime will match that of the ArgList.
+ virtual const char *MakeArgString(StringRef Str) const = 0;
+ const char *MakeArgString(const char *Str) const {
+ return MakeArgString(StringRef(Str));
+ }
+ const char *MakeArgString(std::string Str) const {
+ return MakeArgString(StringRef(Str));
+ }
+ const char *MakeArgString(const Twine &Str) const;
+
+ /// \brief Create an arg string for (\arg LHS + \arg RHS), reusing the
+ /// string at \arg Index if possible.
+ const char *GetOrMakeJoinedArgString(unsigned Index, StringRef LHS,
+ StringRef RHS) const;
+
+ /// @}
+ };
+
+ class InputArgList : public ArgList {
+ private:
+ /// List of argument strings used by the contained Args.
+ ///
+ /// This is mutable since we treat the ArgList as being the list
+ /// of Args, and allow routines to add new strings (to have a
+ /// convenient place to store the memory) via MakeIndex.
+ mutable ArgStringList ArgStrings;
+
+ /// Strings for synthesized arguments.
+ ///
+ /// This is mutable since we treat the ArgList as being the list
+ /// of Args, and allow routines to add new strings (to have a
+ /// convenient place to store the memory) via MakeIndex.
+ mutable std::list<std::string> SynthesizedStrings;
+
+ /// The number of original input argument strings.
+ unsigned NumInputArgStrings;
+
+ public:
+ InputArgList(const char* const *ArgBegin, const char* const *ArgEnd);
+ ~InputArgList();
+
+ virtual const char *getArgString(unsigned Index) const {
+ return ArgStrings[Index];
+ }
+
+ virtual unsigned getNumInputArgStrings() const {
+ return NumInputArgStrings;
+ }
+
+ /// @name Arg Synthesis
+ /// @{
+
+ public:
+ /// MakeIndex - Get an index for the given string(s).
+ unsigned MakeIndex(StringRef String0) const;
+ unsigned MakeIndex(StringRef String0, StringRef String1) const;
+
+ virtual const char *MakeArgString(StringRef Str) const;
+
+ /// @}
+ };
+
+ /// DerivedArgList - An ordered collection of driver arguments,
+ /// whose storage may be in another argument list.
+ class DerivedArgList : public ArgList {
+ const InputArgList &BaseArgs;
+
+ /// The list of arguments we synthesized.
+ mutable arglist_type SynthesizedArgs;
+
+ public:
+ /// Construct a new derived arg list from \arg BaseArgs.
+ DerivedArgList(const InputArgList &BaseArgs);
+ ~DerivedArgList();
+
+ virtual const char *getArgString(unsigned Index) const {
+ return BaseArgs.getArgString(Index);
+ }
+
+ virtual unsigned getNumInputArgStrings() const {
+ return BaseArgs.getNumInputArgStrings();
+ }
+
+ const InputArgList &getBaseArgs() const {
+ return BaseArgs;
+ }
+
+ /// @name Arg Synthesis
+ /// @{
+
+ /// AddSynthesizedArg - Add a argument to the list of synthesized arguments
+ /// (to be freed).
+ void AddSynthesizedArg(Arg *A) {
+ SynthesizedArgs.push_back(A);
+ }
+
+ virtual const char *MakeArgString(StringRef Str) const;
+
+ /// AddFlagArg - Construct a new FlagArg for the given option \arg Id and
+ /// append it to the argument list.
+ void AddFlagArg(const Arg *BaseArg, const Option *Opt) {
+ append(MakeFlagArg(BaseArg, Opt));
+ }
+
+ /// AddPositionalArg - Construct a new Positional arg for the given option
+ /// \arg Id, with the provided \arg Value and append it to the argument
+ /// list.
+ void AddPositionalArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) {
+ append(MakePositionalArg(BaseArg, Opt, Value));
+ }
+
+
+ /// AddSeparateArg - Construct a new Positional arg for the given option
+ /// \arg Id, with the provided \arg Value and append it to the argument
+ /// list.
+ void AddSeparateArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) {
+ append(MakeSeparateArg(BaseArg, Opt, Value));
+ }
+
+
+ /// AddJoinedArg - Construct a new Positional arg for the given option \arg
+ /// Id, with the provided \arg Value and append it to the argument list.
+ void AddJoinedArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) {
+ append(MakeJoinedArg(BaseArg, Opt, Value));
+ }
+
+
+ /// MakeFlagArg - Construct a new FlagArg for the given option
+ /// \arg Id.
+ Arg *MakeFlagArg(const Arg *BaseArg, const Option *Opt) const;
+
+ /// MakePositionalArg - Construct a new Positional arg for the
+ /// given option \arg Id, with the provided \arg Value.
+ Arg *MakePositionalArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) const;
+
+ /// MakeSeparateArg - Construct a new Positional arg for the
+ /// given option \arg Id, with the provided \arg Value.
+ Arg *MakeSeparateArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) const;
+
+ /// MakeJoinedArg - Construct a new Positional arg for the
+ /// given option \arg Id, with the provided \arg Value.
+ Arg *MakeJoinedArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) const;
+
+ /// @}
+ };
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h
new file mode 100644
index 0000000..0508213
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.h
@@ -0,0 +1,32 @@
+//===--- CC1AsOptions.h - Clang Assembler Options Table ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_CC1ASOPTIONS_H
+#define CLANG_DRIVER_CC1ASOPTIONS_H
+
+namespace clang {
+namespace driver {
+ class OptTable;
+
+namespace cc1asoptions {
+ enum ID {
+ OPT_INVALID = 0, // This is not an option ID.
+#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) OPT_##ID,
+#include "clang/Driver/CC1AsOptions.inc"
+ LastOption
+#undef OPTION
+ };
+}
+
+ OptTable *createCC1AsOptTable();
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
new file mode 100644
index 0000000..37ba602
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1AsOptions.td
@@ -0,0 +1,91 @@
+//===--- CC1AsOptions.td - Options for clang -cc1as -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the options accepted by clang -cc1as.
+//
+//===----------------------------------------------------------------------===//
+
+// Include the common option parsing interfaces.
+include "OptParser.td"
+
+//===----------------------------------------------------------------------===//
+// Target Options
+//===----------------------------------------------------------------------===//
+
+def triple : Separate<"-triple">,
+ HelpText<"Specify target triple (e.g. x86_64-pc-linux-gnu)">;
+def target_cpu : Separate<"-target-cpu">,
+ HelpText<"Target a specific cpu type">;
+def target_feature : Separate<"-target-feature">,
+ HelpText<"Target specific attributes">;
+
+//===----------------------------------------------------------------------===//
+// Language Options
+//===----------------------------------------------------------------------===//
+
+def I : JoinedOrSeparate<"-I">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to include search path">;
+def n : Flag<"-n">,
+ HelpText<"Don't automatically start assembly file with a text section">;
+def L : Flag<"-L">,
+ HelpText<"Save temporary labels in the symbol table. "
+ "Note this may change .s semantics, it should almost never be used "
+ "on compiler generated code!">;
+
+//===----------------------------------------------------------------------===//
+// Frontend Options
+//===----------------------------------------------------------------------===//
+
+def o : Separate<"-o">, MetaVarName<"<path>">, HelpText<"Specify output file">;
+
+def filetype : Separate<"-filetype">,
+ HelpText<"Specify the output file type ('asm', 'null', or 'obj')">;
+
+def help : Flag<"-help">,
+ HelpText<"Print this help text">;
+def _help : Flag<"--help">, Alias<help>;
+
+def version : Flag<"-version">,
+ HelpText<"Print the assembler version">;
+def _version : Flag<"--version">, Alias<version>;
+def v : Flag<"-v">, Alias<version>;
+
+// Generic forwarding to LLVM options. This should only be used for debugging
+// and experimental features.
+def mllvm : Separate<"-mllvm">,
+ HelpText<"Additional arguments to forward to LLVM's option processing">;
+
+//===----------------------------------------------------------------------===//
+// Transliterate Options
+//===----------------------------------------------------------------------===//
+
+def output_asm_variant : Separate<"-output-asm-variant">,
+ HelpText<"Select the asm variant index to use for output">;
+def show_encoding : Flag<"-show-encoding">,
+ HelpText<"Show instruction encoding information in transliterate mode">;
+def show_inst : Flag<"-show-inst">,
+ HelpText<"Show internal instruction representation in transliterate mode">;
+
+//===----------------------------------------------------------------------===//
+// Assemble Options
+//===----------------------------------------------------------------------===//
+
+def relax_all : Flag<"-relax-all">,
+ HelpText<"Relax all fixups (for performance testing)">;
+
+def no_exec_stack : Flag<"--noexecstack">,
+ HelpText<"Mark the file as not needing an executable stack">;
+
+def fatal_warnings : Flag<"--fatal-warnings">,
+ HelpText<"Consider warnings as errors">;
+
+def g : Flag<"-g">, HelpText<"Generate source level debug information">;
+
+def dwarf_debug_flags : Separate<"-dwarf-debug-flags">,
+ HelpText<"The string to embed in the Dwarf debug flags record.">;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h
new file mode 100644
index 0000000..4a8bbe5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.h
@@ -0,0 +1,32 @@
+//===--- CC1Options.h - Clang CC1 Options Table -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_CC1OPTIONS_H
+#define CLANG_DRIVER_CC1OPTIONS_H
+
+namespace clang {
+namespace driver {
+ class OptTable;
+
+namespace cc1options {
+ enum ID {
+ OPT_INVALID = 0, // This is not an option ID.
+#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) OPT_##ID,
+#include "clang/Driver/CC1Options.inc"
+ LastOption
+#undef OPTION
+ };
+}
+
+ OptTable *createCC1OptTable();
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
new file mode 100644
index 0000000..88009ed
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/CC1Options.td
@@ -0,0 +1,847 @@
+//===--- CC1Options.td - Options for clang -cc1 ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the options accepted by clang -cc1.
+//
+//===----------------------------------------------------------------------===//
+
+// Include the common option parsing interfaces.
+include "OptParser.td"
+
+//===----------------------------------------------------------------------===//
+// Target Options
+//===----------------------------------------------------------------------===//
+
+def cxx_abi : Separate<"-cxx-abi">,
+ HelpText<"Target a particular C++ ABI type">;
+def target_abi : Separate<"-target-abi">,
+ HelpText<"Target a particular ABI type">;
+def target_cpu : Separate<"-target-cpu">,
+ HelpText<"Target a specific cpu type">;
+def target_feature : Separate<"-target-feature">,
+ HelpText<"Target specific attributes">;
+def target_linker_version : Separate<"-target-linker-version">,
+ HelpText<"Target linker version">;
+def triple : Separate<"-triple">,
+ HelpText<"Specify target triple (e.g. i686-apple-darwin9)">;
+def triple_EQ : Joined<"-triple=">, Alias<triple>;
+
+//===----------------------------------------------------------------------===//
+// Analyzer Options
+//===----------------------------------------------------------------------===//
+
+def analysis_UnoptimizedCFG : Flag<"-unoptimized-cfg">,
+ HelpText<"Generate unoptimized CFGs for all analyses">;
+def analysis_CFGAddImplicitDtors : Flag<"-cfg-add-implicit-dtors">,
+ HelpText<"Add C++ implicit destructors to CFGs for all analyses">;
+def analysis_CFGAddInitializers : Flag<"-cfg-add-initializers">,
+ HelpText<"Add C++ initializers to CFGs for all analyses">;
+
+def analyzer_store : Separate<"-analyzer-store">,
+ HelpText<"Source Code Analysis - Abstract Memory Store Models">;
+def analyzer_store_EQ : Joined<"-analyzer-store=">, Alias<analyzer_store>;
+
+def analyzer_constraints : Separate<"-analyzer-constraints">,
+ HelpText<"Source Code Analysis - Symbolic Constraint Engines">;
+def analyzer_constraints_EQ : Joined<"-analyzer-constraints=">,
+ Alias<analyzer_constraints>;
+
+def analyzer_output : Separate<"-analyzer-output">,
+ HelpText<"Source Code Analysis - Output Options">;
+def analyzer_output_EQ : Joined<"-analyzer-output=">,
+ Alias<analyzer_output>;
+
+def analyzer_purge : Separate<"-analyzer-purge">,
+ HelpText<"Source Code Analysis - Dead Symbol Removal Frequency">;
+def analyzer_purge_EQ : Joined<"-analyzer-purge=">, Alias<analyzer_purge>;
+
+def analyzer_opt_analyze_headers : Flag<"-analyzer-opt-analyze-headers">,
+ HelpText<"Force the static analyzer to analyze functions defined in header files">;
+def analyzer_opt_analyze_nested_blocks : Flag<"-analyzer-opt-analyze-nested-blocks">,
+ HelpText<"Analyze the definitions of blocks in addition to functions">;
+def analyzer_display_progress : Flag<"-analyzer-display-progress">,
+ HelpText<"Emit verbose output about the analyzer's progress">;
+def analyze_function : Separate<"-analyze-function">,
+ HelpText<"Run analysis on specific function">;
+def analyze_function_EQ : Joined<"-analyze-function=">, Alias<analyze_function>;
+def analyzer_eagerly_assume : Flag<"-analyzer-eagerly-assume">,
+ HelpText<"Eagerly assume the truth/falseness of some symbolic constraints">;
+def analyzer_no_eagerly_trim_egraph : Flag<"-analyzer-no-eagerly-trim-egraph">,
+ HelpText<"Don't eagerly remove uninteresting ExplodedNodes from the ExplodedGraph">;
+def trim_egraph : Flag<"-trim-egraph">,
+ HelpText<"Only show error-related paths in the analysis graph">;
+def analyzer_viz_egraph_graphviz : Flag<"-analyzer-viz-egraph-graphviz">,
+ HelpText<"Display exploded graph using GraphViz">;
+def analyzer_viz_egraph_ubigraph : Flag<"-analyzer-viz-egraph-ubigraph">,
+ HelpText<"Display exploded graph using Ubigraph">;
+
+def analyzer_inline_max_stack_depth : Separate<"-analyzer-inline-max-stack-depth">,
+ HelpText<"Bound on stack depth while inlining (4 by default)">;
+def analyzer_inline_max_stack_depth_EQ : Joined<"-analyzer-inline-max-stack-depth=">,
+ Alias<analyzer_inline_max_stack_depth>;
+
+def analyzer_inline_max_function_size : Separate<"-analyzer-inline-max-function-size">,
+ HelpText<"Bound on the number of basic blocks in an inlined function (200 by default)">;
+def analyzer_inline_max_function_size_EQ : Joined<"-analyzer-inline-max-function-size=">,
+ Alias<analyzer_inline_max_function_size>;
+
+def analyzer_ipa : Separate<"-analyzer-ipa">,
+ HelpText<"Specify the inter-procedural analysis mode">;
+def analyzer_ipa_EQ : Joined<"-analyzer-ipa=">, Alias<analyzer_ipa>;
+
+def analyzer_inlining_mode : Separate<"-analyzer-inlining-mode">,
+ HelpText<"Specify the function selection heuristic used during inlining">;
+def analyzer_inlining_mode_EQ : Joined<"-analyzer-inlining-mode=">, Alias<analyzer_inlining_mode>;
+
+def analyzer_disable_retry_exhausted : Flag<"-analyzer-disable-retry-exhausted">,
+ HelpText<"Do not re-analyze paths leading to exhausted nodes with a different strategy (may decrease code coverage)">;
+
+def analyzer_max_nodes : Separate<"-analyzer-max-nodes">,
+ HelpText<"The maximum number of nodes the analyzer can generate (150000 default, 0 = no limit)">;
+def analyzer_max_loop : Separate<"-analyzer-max-loop">,
+ HelpText<"The maximum number of times the analyzer will go through a loop">;
+def analyzer_stats : Flag<"-analyzer-stats">,
+ HelpText<"Print internal analyzer statistics.">;
+
+def analyzer_checker : Separate<"-analyzer-checker">,
+ HelpText<"Choose analyzer checkers to enable">;
+def analyzer_checker_EQ : Joined<"-analyzer-checker=">,
+ Alias<analyzer_checker>;
+
+def analyzer_disable_checker : Separate<"-analyzer-disable-checker">,
+ HelpText<"Choose analyzer checkers to disable">;
+def analyzer_disable_checker_EQ : Joined<"-analyzer-disable-checker=">,
+ Alias<analyzer_disable_checker>;
+
+def analyzer_checker_help : Flag<"-analyzer-checker-help">,
+ HelpText<"Display the list of analyzer checkers that are available">;
+
+//===----------------------------------------------------------------------===//
+// Migrator Options
+//===----------------------------------------------------------------------===//
+def migrator_no_nsalloc_error : Flag<"-no-ns-alloc-error">,
+ HelpText<"Do not error on use of NSAllocateCollectable/NSReallocateCollectable">;
+
+def migrator_no_finalize_removal : Flag<"-no-finalize-removal">,
+ HelpText<"Do not remove finalize method in gc mode">;
+
+//===----------------------------------------------------------------------===//
+// CodeGen Options
+//===----------------------------------------------------------------------===//
+
+def disable_llvm_optzns : Flag<"-disable-llvm-optzns">,
+ HelpText<"Don't run LLVM optimization passes">;
+def disable_llvm_verifier : Flag<"-disable-llvm-verifier">,
+ HelpText<"Don't run the LLVM IR verifier pass">;
+def disable_red_zone : Flag<"-disable-red-zone">,
+ HelpText<"Do not emit code that uses the red zone.">;
+def fdebug_compilation_dir : Separate<"-fdebug-compilation-dir">,
+ HelpText<"The compilation directory to embed in the debug info.">;
+def dwarf_debug_flags : Separate<"-dwarf-debug-flags">,
+ HelpText<"The string to embed in the Dwarf debug flags record.">;
+def faddress_sanitizer: Flag<"-faddress-sanitizer">,
+ HelpText<"Enable AddressSanitizer instrumentation (memory error detection)">;
+def fthread_sanitizer: Flag<"-fthread-sanitizer">,
+ HelpText<"Enable ThreadSanitizer instrumentation (race detection)">;
+def fforbid_guard_variables : Flag<"-fforbid-guard-variables">,
+ HelpText<"Emit an error if a C++ static local initializer would need a guard variable">;
+def g : Flag<"-g">, HelpText<"Generate source level debug information">;
+def fno_dwarf2_cfi_asm : Flag<"-fno-dwarf2-cfi-asm">,
+ HelpText<"Don't use the cfi directives">;
+def fno_dwarf_directory_asm : Flag<"-fno-dwarf-directory-asm">,
+ HelpText<"Don't separate directory and filename in .file directives">;
+def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">,
+ HelpText<"Generate runtime checks for undefined behavior.">;
+def flimit_debug_info : Flag<"-flimit-debug-info">,
+ HelpText<"Limit debug information produced to reduce size of debug binary">;
+def fno_common : Flag<"-fno-common">,
+ HelpText<"Compile common globals like normal definitions">;
+def no_implicit_float : Flag<"-no-implicit-float">,
+ HelpText<"Don't generate implicit floating point instructions (x86-only)">;
+def finstrument_functions : Flag<"-finstrument-functions">,
+ HelpText<"Generate calls to instrument function entry and exit">;
+def fno_limit_debug_info : Flag<"-fno-limit-debug-info">,
+ HelpText<"Do not limit debug information produced to reduce size of debug binary">;
+def fno_merge_all_constants : Flag<"-fno-merge-all-constants">,
+ HelpText<"Disallow merging of constants.">;
+def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">,
+ HelpText<"Do not emit code to make initialization of local statics thread safe">;
+def fdump_vtable_layouts : Flag<"-fdump-vtable-layouts">,
+ HelpText<"Dump the layouts of all vtables that will be emitted in a translation unit">;
+def ffunction_sections : Flag<"-ffunction-sections">,
+ HelpText<"Place each function in its own section (ELF Only)">;
+def fdata_sections : Flag<"-fdata-sections">,
+ HelpText<"Place each data in its own section (ELF Only)">;
+def fstrict_enums : Flag<"-fstrict-enums">,
+ HelpText<"Enable optimizations based on the strict definition of an enum's "
+ "value range.">;
+def ftrap_function_EQ : Joined<"-ftrap-function=">,
+ HelpText<"Issue call to specified function rather than a trap instruction">;
+def funroll_loops : Flag<"-funroll-loops">,
+ HelpText<"Turn on loop unroller">;
+def femit_coverage_notes : Flag<"-femit-coverage-notes">,
+ HelpText<"Emit a gcov coverage notes file when compiling.">;
+def femit_coverage_data: Flag<"-femit-coverage-data">,
+ HelpText<"Instrument the program to emit gcov coverage data when run.">;
+def coverage_file : Separate<"-coverage-file">,
+ HelpText<"Emit coverage data to this filename. The extension will be replaced.">;
+def coverage_file_EQ : Joined<"-coverage-file=">, Alias<coverage_file>;
+def fuse_register_sized_bitfield_access: Flag<"-fuse-register-sized-bitfield-access">,
+ HelpText<"Use register sized accesses to bit-fields, when possible.">;
+def relaxed_aliasing : Flag<"-relaxed-aliasing">,
+ HelpText<"Turn off Type Based Alias Analysis">;
+def masm_verbose : Flag<"-masm-verbose">,
+ HelpText<"Generate verbose assembly output">;
+def mcode_model : Separate<"-mcode-model">,
+ HelpText<"The code model to use">;
+def mdebug_pass : Separate<"-mdebug-pass">,
+ HelpText<"Enable additional debug output">;
+def mdisable_fp_elim : Flag<"-mdisable-fp-elim">,
+ HelpText<"Disable frame pointer elimination optimization">;
+def mdisable_tail_calls : Flag<"-mdisable-tail-calls">,
+ HelpText<"Disable tail call optimization, keeping the call stack accurate">;
+def menable_no_infinities : Flag<"-menable-no-infs">,
+ HelpText<"Allow optimization to assume there are no infinities.">;
+def menable_no_nans : Flag<"-menable-no-nans">,
+ HelpText<"Allow optimization to assume there are no NaNs.">;
+def menable_unsafe_fp_math : Flag<"-menable-unsafe-fp-math">,
+ HelpText<"Allow unsafe floating-point math optimizations which may decrease "
+ "precision">;
+def mfloat_abi : Separate<"-mfloat-abi">,
+ HelpText<"The float ABI to use">;
+def mno_global_merge : Flag<"-mno-global-merge">,
+ HelpText<"Disable merging of globals">;
+def mlimit_float_precision : Separate<"-mlimit-float-precision">,
+ HelpText<"Limit float precision to the given value">;
+def mno_exec_stack : Flag<"-mnoexecstack">,
+ HelpText<"Mark the file as not needing an executable stack">;
+def mno_zero_initialized_in_bss : Flag<"-mno-zero-initialized-in-bss">,
+ HelpText<"Do not put zero initialized data in the BSS">;
+def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">,
+ HelpText<"Omit frame pointer setup for leaf functions.">;
+def msoft_float : Flag<"-msoft-float">,
+ HelpText<"Use software floating point">;
+def backend_option : Separate<"-backend-option">,
+ HelpText<"Additional arguments to forward to LLVM backend (during code gen)">;
+def mregparm : Separate<"-mregparm">,
+ HelpText<"Limit the number of registers available for integer arguments">;
+def mrelax_all : Flag<"-mrelax-all">,
+ HelpText<"(integrated-as) Relax all machine instructions">;
+def msave_temp_labels : Flag<"-msave-temp-labels">,
+ HelpText<"(integrated-as) Save temporary labels">;
+def mrtd: Flag<"-mrtd">,
+ HelpText<"Make StdCall calling convention the default">;
+def mrelocation_model : Separate<"-mrelocation-model">,
+ HelpText<"The relocation model to use">;
+def munwind_tables : Flag<"-munwind-tables">,
+ HelpText<"Generate unwinding tables for all functions">;
+def mconstructor_aliases : Flag<"-mconstructor-aliases">,
+ HelpText<"Emit complete constructors and destructors as aliases when possible">;
+def mms_bitfields : Flag<"-mms-bitfields">,
+ HelpText<"Set the default structure layout to be compatible with the Microsoft compiler standard.">;
+def mstackrealign : Flag<"-mstackrealign">,
+ HelpText<"Force realign the stack at entry to every function.">;
+def mstack_alignment : Joined<"-mstack-alignment=">,
+ HelpText<"Set the stack alignment">;
+def mlink_bitcode_file : Separate<"-mlink-bitcode-file">,
+ HelpText<"Link the given bitcode file before performing optimizations.">;
+def O : Joined<"-O">, HelpText<"Optimization level">;
+def Os : Flag<"-Os">, HelpText<"Optimize for size">;
+def Oz : Flag<"-Oz">, HelpText<"Optimize for size, regardless of performance">;
+def pg : Flag<"-pg">, HelpText<"Enable mcount instrumentation">;
+
+//===----------------------------------------------------------------------===//
+// Dependency Output Options
+//===----------------------------------------------------------------------===//
+
+def dependency_file : Separate<"-dependency-file">,
+ HelpText<"Filename (or -) to write dependency output to">;
+def dependency_dot : Separate<"-dependency-dot">,
+ HelpText<"Filename to write DOT-formatted header dependencies to">;
+def sys_header_deps : Flag<"-sys-header-deps">,
+ HelpText<"Include system headers in dependency output">;
+def header_include_file : Separate<"-header-include-file">,
+ HelpText<"Filename (or -) to write header include output to">;
+def H : Flag<"-H">,
+ HelpText<"Show header includes and nesting depth">;
+def MQ : Separate<"-MQ">, HelpText<"Specify target to quote for dependency">;
+def MT : Separate<"-MT">, HelpText<"Specify target for dependency">;
+def MP : Flag<"-MP">,
+ HelpText<"Create phony target for each dependency (other than main file)">;
+def MG : Flag<"-MG">, HelpText<"Add missing headers to dependency list">;
+
+//===----------------------------------------------------------------------===//
+// Diagnostic Options
+//===----------------------------------------------------------------------===//
+
+def dump_build_information : Separate<"-dump-build-information">,
+ MetaVarName<"<filename>">,
+ HelpText<"output a dump of some build information to a file">;
+def diagnostic_log_file : Separate<"-diagnostic-log-file">,
+ HelpText<"Filename (or -) to log diagnostics to">;
+def diagnostic_serialized_file : Separate<"-serialize-diagnostic-file">,
+ MetaVarName<"<filename>">,
+ HelpText<"File for serializing diagnostics in a binary format">;
+def fno_show_column : Flag<"-fno-show-column">,
+ HelpText<"Do not include column number on diagnostics">;
+def fshow_column : Flag<"-fshow-column">,
+ HelpText<"Include column number on diagnostics">;
+def fno_show_source_location : Flag<"-fno-show-source-location">,
+ HelpText<"Do not include source location information with diagnostics">;
+def fshow_overloads_EQ : Joined<"-fshow-overloads=">,
+ HelpText<"Which overload candidates to show when overload resolution fails: "
+ "best|all; defaults to all">;
+def fno_caret_diagnostics : Flag<"-fno-caret-diagnostics">,
+ HelpText<"Do not include source line and caret with diagnostics">;
+def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">,
+ HelpText<"Do not include fixit information in diagnostics">;
+def fno_diagnostics_show_note_include_stack :
+ Flag<"-fno-diagnostics-show-note-include-stack">,
+ HelpText<"Display include stacks for diagnostic notes">;
+def w : Flag<"-w">, HelpText<"Suppress all warnings">;
+def pedantic : Flag<"-pedantic">;
+def pedantic_errors : Flag<"-pedantic-errors">;
+
+// This gets all -W options, including -Werror, -W[no-]system-headers, etc. The
+// driver has stripped off -Wa,foo etc. The driver has also translated -W to
+// -Wextra, so we don't need to worry about it.
+def W : Joined<"-W">;
+
+def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-range-info">,
+ HelpText<"Print source range spans in numeric form">;
+def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">,
+ HelpText<"Print fix-its in machine parseable form">;
+def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">,
+ HelpText<"Print option name with mappable diagnostics">;
+def fdiagnostics_format : Separate<"-fdiagnostics-format">,
+ HelpText<"Change diagnostic formatting to match IDE and command line tools">;
+def fdiagnostics_show_category : Separate<"-fdiagnostics-show-category">,
+ HelpText<"Print diagnostic category">;
+def fdiagnostics_show_note_include_stack :
+ Flag<"-fdiagnostics-show-note-include-stack">,
+ HelpText<"Display include stacks for diagnostic notes">;
+def ftabstop : Separate<"-ftabstop">, MetaVarName<"<N>">,
+ HelpText<"Set the tab stop distance.">;
+def ferror_limit : Separate<"-ferror-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of errors to emit before stopping (0 = no limit).">;
+def fmacro_backtrace_limit : Separate<"-fmacro-backtrace-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of entries to print in a macro expansion backtrace (0 = no limit).">;
+def ftemplate_backtrace_limit : Separate<"-ftemplate-backtrace-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of entries to print in a template instantiation backtrace (0 = no limit).">;
+def fconstexpr_backtrace_limit : Separate<"-fconstexpr-backtrace-limit">, MetaVarName<"<N>">,
+ HelpText<"Set the maximum number of entries to print in a constexpr evaluation backtrace (0 = no limit).">;
+def fmessage_length : Separate<"-fmessage-length">, MetaVarName<"<N>">,
+ HelpText<"Format message diagnostics so that they fit within N columns or fewer, when possible.">;
+def fcolor_diagnostics : Flag<"-fcolor-diagnostics">,
+ HelpText<"Use colors in diagnostics">;
+def Wno_rewrite_macros : Flag<"-Wno-rewrite-macros">,
+ HelpText<"Silence ObjC rewriting warnings">;
+def verify : Flag<"-verify">,
+ HelpText<"Verify emitted diagnostics and warnings">;
+
+//===----------------------------------------------------------------------===//
+// Frontend Options
+//===----------------------------------------------------------------------===//
+
+// This isn't normally used, it is just here so we can parse a
+// CompilerInvocation out of a driver-derived argument vector.
+def cc1 : Flag<"-cc1">;
+
+def ast_merge : Separate<"-ast-merge">,
+ MetaVarName<"<ast file>">,
+ HelpText<"Merge the given AST file into the translation unit being compiled.">;
+def code_completion_at : Separate<"-code-completion-at">,
+ MetaVarName<"<file>:<line>:<column>">,
+ HelpText<"Dump code-completion information at a location">;
+def remap_file : Separate<"-remap-file">,
+ MetaVarName<"<from>;<to>">,
+ HelpText<"Replace the contents of the <from> file with the contents of the <to> file">;
+def code_completion_at_EQ : Joined<"-code-completion-at=">,
+ Alias<code_completion_at>;
+def code_completion_macros : Flag<"-code-completion-macros">,
+ HelpText<"Include macros in code-completion results">;
+def code_completion_patterns : Flag<"-code-completion-patterns">,
+ HelpText<"Include code patterns in code-completion results">;
+def no_code_completion_globals : Flag<"-no-code-completion-globals">,
+ HelpText<"Do not include global declarations in code-completion results.">;
+def disable_free : Flag<"-disable-free">,
+ HelpText<"Disable freeing of memory on exit">;
+def help : Flag<"-help">,
+ HelpText<"Print this help text">;
+def _help : Flag<"--help">, Alias<help>;
+def x : Separate<"-x">, HelpText<"Input language type">;
+def o : Separate<"-o">, MetaVarName<"<path>">, HelpText<"Specify output file">;
+def load : Separate<"-load">, MetaVarName<"<dsopath>">,
+ HelpText<"Load the named plugin (dynamic shared object)">;
+def plugin : Separate<"-plugin">, MetaVarName<"<name>">,
+ HelpText<"Use the named plugin action instead of the default action (use \"help\" to list available options)">;
+def plugin_arg : JoinedAndSeparate<"-plugin-arg-">,
+ MetaVarName<"<name> <arg>">,
+ HelpText<"Pass <arg> to plugin <name>">;
+def add_plugin : Separate<"-add-plugin">, MetaVarName<"<name>">,
+ HelpText<"Use the named plugin action in addition to the default action">;
+def resource_dir : Separate<"-resource-dir">,
+ HelpText<"The directory which holds the compiler resource files">;
+def version : Flag<"-version">,
+ HelpText<"Print the compiler version">;
+def _version : Flag<"--version">, Alias<version>;
+
+def Action_Group : OptionGroup<"<action group>">;
+let Group = Action_Group in {
+
+def Eonly : Flag<"-Eonly">,
+ HelpText<"Just run preprocessor, no output (for timings)">;
+def E : Flag<"-E">,
+ HelpText<"Run preprocessor, emit preprocessed file">;
+def dump_raw_tokens : Flag<"-dump-raw-tokens">,
+ HelpText<"Lex file in raw mode and dump raw tokens">;
+def analyze : Flag<"-analyze">,
+ HelpText<"Run static analysis engine">;
+def dump_tokens : Flag<"-dump-tokens">,
+ HelpText<"Run preprocessor, dump internal rep of tokens">;
+def init_only : Flag<"-init-only">,
+ HelpText<"Only execute frontend initialization">;
+def fsyntax_only : Flag<"-fsyntax-only">,
+ HelpText<"Run parser and perform semantic analysis">;
+def fixit : Flag<"-fixit">,
+ HelpText<"Apply fix-it advice to the input source">;
+def fixit_EQ : Joined<"-fixit=">,
+ HelpText<"Apply fix-it advice creating a file with the given suffix">;
+def print_preamble : Flag<"-print-preamble">,
+ HelpText<"Print the \"preamble\" of a file, which is a candidate for implicit"
+ " precompiled headers.">;
+def emit_html : Flag<"-emit-html">,
+ HelpText<"Output input source as HTML">;
+def ast_print : Flag<"-ast-print">,
+ HelpText<"Build ASTs and then pretty-print them">;
+def ast_dump : Flag<"-ast-dump">,
+ HelpText<"Build ASTs and then debug dump them">;
+def ast_dump_xml : Flag<"-ast-dump-xml">,
+ HelpText<"Build ASTs and then debug dump them in a verbose XML format">;
+def ast_view : Flag<"-ast-view">,
+ HelpText<"Build ASTs and view them with GraphViz">;
+def print_decl_contexts : Flag<"-print-decl-contexts">,
+ HelpText<"Print DeclContexts and their Decls">;
+def pubnames_dump : Flag<"-pubnames-dump">,
+ HelpText<"Print all of the public (global) names in the source, e.g., the "
+ "names of all global declarations and macros">;
+def emit_module : Flag<"-emit-module">,
+ HelpText<"Generate pre-compiled module file from a module map">;
+def emit_pth : Flag<"-emit-pth">,
+ HelpText<"Generate pre-tokenized header file">;
+def emit_pch : Flag<"-emit-pch">,
+ HelpText<"Generate pre-compiled header file">;
+def S : Flag<"-S">,
+ HelpText<"Emit native assembly code">;
+def emit_llvm : Flag<"-emit-llvm">,
+ HelpText<"Build ASTs then convert to LLVM, emit .ll file">;
+def emit_llvm_bc : Flag<"-emit-llvm-bc">,
+ HelpText<"Build ASTs then convert to LLVM, emit .bc file">;
+def emit_llvm_only : Flag<"-emit-llvm-only">,
+ HelpText<"Build ASTs and convert to LLVM, discarding output">;
+def emit_codegen_only : Flag<"-emit-codegen-only">,
+ HelpText<"Generate machine code, but discard output">;
+def emit_obj : Flag<"-emit-obj">,
+ HelpText<"Emit native object files">;
+def rewrite_test : Flag<"-rewrite-test">,
+ HelpText<"Rewriter playground">;
+def rewrite_objc : Flag<"-rewrite-objc">,
+ HelpText<"Rewrite ObjC into C (code rewriter example)">;
+def rewrite_macros : Flag<"-rewrite-macros">,
+ HelpText<"Expand macros without full preprocessing">;
+def migrate : Flag<"-migrate">,
+ HelpText<"Migrate source code">;
+}
+
+def mt_migrate_directory : Separate<"-mt-migrate-directory">,
+ HelpText<"Directory for temporary files produced during ARC or ObjC migration">;
+def arcmt_check : Flag<"-arcmt-check">,
+ HelpText<"Check for ARC migration issues that need manual handling">;
+def arcmt_modify : Flag<"-arcmt-modify">,
+ HelpText<"Apply modifications to files to conform to ARC">;
+def arcmt_migrate : Flag<"-arcmt-migrate">,
+ HelpText<"Apply modifications and produces temporary files that conform to ARC">;
+def arcmt_migrate_report_output : Separate<"-arcmt-migrate-report-output">,
+ HelpText<"Output path for the plist report">;
+def arcmt_migrate_emit_arc_errors : Flag<"-arcmt-migrate-emit-errors">,
+ HelpText<"Emit ARC errors even if the migrator can fix them">;
+
+def objcmt_migrate_literals : Flag<"-objcmt-migrate-literals">,
+ HelpText<"Enable migration to modern ObjC literals">;
+def objcmt_migrate_subscripting : Flag<"-objcmt-migrate-subscripting">,
+ HelpText<"Enable migration to modern ObjC subscripting">;
+
+def working_directory : JoinedOrSeparate<"-working-directory">,
+ HelpText<"Resolve file paths relative to the specified directory">;
+def working_directory_EQ : Joined<"-working-directory=">,
+ Alias<working_directory>;
+
+def relocatable_pch : Flag<"-relocatable-pch">,
+ HelpText<"Whether to build a relocatable precompiled header">;
+def print_stats : Flag<"-print-stats">,
+ HelpText<"Print performance metrics and statistics">;
+def ftime_report : Flag<"-ftime-report">,
+ HelpText<"Print the amount of time each phase of compilation takes">;
+def fdump_record_layouts : Flag<"-fdump-record-layouts">,
+ HelpText<"Dump record layout information">;
+def fdump_record_layouts_simple : Flag<"-fdump-record-layouts-simple">,
+ HelpText<"Dump record layout information in a simple form used for testing">;
+def fix_what_you_can : Flag<"-fix-what-you-can">,
+ HelpText<"Apply fix-it advice even in the presence of unfixable errors">;
+def fix_only_warnings : Flag<"-fix-only-warnings">,
+ HelpText<"Apply fix-it advice only for warnings, not errors">;
+def fixit_recompile : Flag<"-fixit-recompile">,
+ HelpText<"Apply fix-it changes and recompile">;
+def fixit_to_temp : Flag<"-fixit-to-temporary">,
+ HelpText<"Apply fix-it changes to temporary files">;
+
+// Generic forwarding to LLVM options. This should only be used for debugging
+// and experimental features.
+def mllvm : Separate<"-mllvm">,
+ HelpText<"Additional arguments to forward to LLVM's option processing">;
+
+def foverride_record_layout_EQ : Joined<"-foverride-record-layout=">,
+ HelpText<"Override record layouts with those in the given file">;
+
+//===----------------------------------------------------------------------===//
+// Language Options
+//===----------------------------------------------------------------------===//
+
+def fno_builtin : Flag<"-fno-builtin">,
+ HelpText<"Disable implicit builtin knowledge of functions">;
+def faltivec : Flag<"-faltivec">,
+ HelpText<"Enable AltiVec vector initializer syntax">;
+def fno_access_control : Flag<"-fno-access-control">,
+ HelpText<"Disable C++ access control">;
+def fno_assume_sane_operator_new : Flag<"-fno-assume-sane-operator-new">,
+ HelpText<"Don't assume that C++'s global operator new can't alias any pointer">;
+def fgnu_keywords : Flag<"-fgnu-keywords">,
+ HelpText<"Allow GNU-extension keywords regardless of language standard">;
+def fgnu89_inline : Flag<"-fgnu89-inline">,
+ HelpText<"Use the gnu89 inline semantics">;
+def fno_inline : Flag<"-fno-inline">,
+ HelpText<"Disable use of the inline keyword">;
+def fno_inline_functions : Flag<"-fno-inline-functions">,
+ HelpText<"Disable automatic function inlining">;
+def fno_gnu_keywords : Flag<"-fno-gnu-keywords">,
+ HelpText<"Disallow GNU-extension keywords regardless of language standard">;
+def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">,
+ HelpText<"Allow '$' in identifiers">;
+def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">,
+ HelpText<"Disallow '$' in identifiers">;
+def femit_all_decls : Flag<"-femit-all-decls">,
+ HelpText<"Emit all declarations, even if unused">;
+def fblocks : Flag<"-fblocks">,
+ HelpText<"Enable the 'blocks' language feature">;
+def fblocks_runtime_optional : Flag<"-fblocks-runtime-optional">,
+ HelpText<"Weakly link in the blocks runtime">;
+def fheinous_gnu_extensions : Flag<"-fheinous-gnu-extensions">;
+def fexceptions : Flag<"-fexceptions">,
+ HelpText<"Enable support for exception handling">;
+def fobjc_exceptions : Flag<"-fobjc-exceptions">,
+ HelpText<"Enable Objective-C exceptions">;
+def fcxx_exceptions : Flag<"-fcxx-exceptions">,
+ HelpText<"Enable C++ exceptions">;
+def fsjlj_exceptions : Flag<"-fsjlj-exceptions">,
+ HelpText<"Use SjLj style exceptions">;
+def ffast_math : Flag<"-ffast-math">,
+ HelpText<"Enable the *frontend*'s 'fast-math' mode. This has no effect on "
+ "optimizations, but provides a preprocessor macro __FAST_MATH__ the "
+ "same as GCC's -ffast-math flag.">;
+def ffreestanding : Flag<"-ffreestanding">,
+ HelpText<"Assert that the compilation takes place in a freestanding environment">;
+def fformat_extensions : Flag<"-fformat-extensions">,
+ HelpText<"FreeBSD printf format extensions">;
+def fgnu_runtime : Flag<"-fgnu-runtime">,
+ HelpText<"Generate output compatible with the standard GNU Objective-C runtime">;
+def fhidden_weak_vtables : Flag<"-fhidden-weak-vtables">,
+ HelpText<"Generate weak vtables and RTTI with hidden visibility">;
+def std_EQ : Joined<"-std=">,
+ HelpText<"Language standard to compile for">;
+def stdlib_EQ : Joined<"-stdlib=">,
+ HelpText<"C++ standard library to use">;
+def fmath_errno : Flag<"-fmath-errno">,
+ HelpText<"Require math functions to indicate errors by setting errno">;
+def fms_extensions : Flag<"-fms-extensions">,
+ HelpText<"Accept some non-standard constructs supported by the Microsoft compiler">;
+def fms_compatibility : Flag<"-fms-compatibility">,
+ HelpText<"Enable Microsoft compatibility mode">;
+def fmsc_version : Joined<"-fmsc-version=">,
+ HelpText<"Version of the Microsoft C/C++ compiler to report in _MSC_VER (0 = don't define it (default))">;
+def fborland_extensions : Flag<"-fborland-extensions">,
+ HelpText<"Accept non-standard constructs supported by the Borland compiler">;
+def main_file_name : Separate<"-main-file-name">,
+ HelpText<"Main file name to use for debug info">;
+def fno_elide_constructors : Flag<"-fno-elide-constructors">,
+ HelpText<"Disable C++ copy constructor elision">;
+def fno_lax_vector_conversions : Flag<"-fno-lax-vector-conversions">,
+ HelpText<"Disallow implicit conversions between vectors with a different number of elements or different element types">;
+def fno_operator_names : Flag<"-fno-operator-names">,
+ HelpText<"Do not treat C++ operator name keywords as synonyms for operators">;
+def fno_signed_char : Flag<"-fno-signed-char">,
+ HelpText<"Char is unsigned">;
+def fno_spell_checking : Flag<"-fno-spell-checking">,
+ HelpText<"Disable spell-checking">;
+def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">,
+ HelpText<"Don't use __cxa_atexit for calling destructors">;
+def fconstant_string_class : Separate<"-fconstant-string-class">,
+ MetaVarName<"<class name>">,
+ HelpText<"Specify the class to use for constant Objective-C string objects.">;
+def fno_constant_cfstrings : Flag<"-fno-constant-cfstrings">,
+ HelpText<"Enable creation of CodeFoundation-type constant strings">;
+def fobjc_arc : Flag<"-fobjc-arc">,
+ HelpText<"Synthesize retain and release calls for Objective-C pointers">;
+def fobjc_arc_cxxlib_EQ : Joined<"-fobjc-arc-cxxlib=">,
+ HelpText<"Objective-C++ Automatic Reference Counting standard library kind">;
+def fobjc_arc_exceptions : Flag<"-fobjc-arc-exceptions">,
+ HelpText<"Use EH-safe code when synthesizing retains and releases in -fobjc-arc">;
+def fobjc_runtime_has_arc : Flag<"-fobjc-runtime-has-arc">,
+ HelpText<"The target Objective-C runtime provides ARC entrypoints">;
+def fobjc_runtime_has_weak : Flag<"-fobjc-runtime-has-weak">,
+ HelpText<"The target Objective-C runtime supports ARC weak operations">;
+def fobjc_runtime_has_terminate : Flag<"-fobjc-runtime-has-terminate">,
+ HelpText<"The target Objective-C runtime provides an objc_terminate entrypoint">;
+def fobjc_gc : Flag<"-fobjc-gc">,
+ HelpText<"Enable Objective-C garbage collection">;
+def fobjc_gc_only : Flag<"-fobjc-gc-only">,
+ HelpText<"Use GC exclusively for Objective-C related memory management">;
+def fapple_kext : Flag<"-fapple-kext">,
+ HelpText<"Use Apple's kernel extensions ABI">;
+def fobjc_dispatch_method_EQ : Joined<"-fobjc-dispatch-method=">,
+ HelpText<"Objective-C dispatch method to use">;
+def fobjc_default_synthesize_properties : Flag<"-fobjc-default-synthesize-properties">,
+ HelpText<"enable the default synthesis of Objective-C properties">;
+def print_ivar_layout : Flag<"-print-ivar-layout">,
+ HelpText<"Enable Objective-C Ivar layout bitmap print trace">;
+def fobjc_fragile_abi : Flag<"-fobjc-fragile-abi">,
+ HelpText<"Use Objective-C's fragile ABI">;
+def fno_objc_infer_related_result_type : Flag<
+ "-fno-objc-infer-related-result-type">,
+ HelpText<
+ "do not infer Objective-C related result type based on method family">;
+def ftrapv : Flag<"-ftrapv">,
+ HelpText<"Trap on integer overflow">;
+def ftrapv_handler : Separate<"-ftrapv-handler">,
+ MetaVarName<"<function name>">,
+ HelpText<"Specify the function to be called on overflow.">;
+def fwrapv : Flag<"-fwrapv">,
+ HelpText<"Treat signed integer overflow as two's complement">;
+def pic_level : Separate<"-pic-level">,
+ HelpText<"Value for __PIC__">;
+def pie_level : Separate<"-pie-level">,
+ HelpText<"Value for __PIE__">;
+def pthread : Flag<"-pthread">,
+ HelpText<"Support POSIX threads in generated code">;
+def fpack_struct : Separate<"-fpack-struct">,
+ HelpText<"Specify the default maximum struct packing alignment">;
+def fpascal_strings : Flag<"-fpascal-strings">,
+ HelpText<"Recognize and construct Pascal-style string literals">;
+def fno_rtti : Flag<"-fno-rtti">,
+ HelpText<"Disable generation of rtti information">;
+def fno_validate_pch : Flag<"-fno-validate-pch">,
+ HelpText<"Disable validation of precompiled headers">;
+def dump_deserialized_pch_decls : Flag<"-dump-deserialized-decls">,
+ HelpText<"Dump declarations that are deserialized from PCH, for testing">;
+def error_on_deserialized_pch_decl : Separate<"-error-on-deserialized-decl">,
+ HelpText<"Emit error if a specific declaration is deserialized from PCH, for testing">;
+def error_on_deserialized_pch_decl_EQ : Joined<"-error-on-deserialized-decl=">,
+ Alias<error_on_deserialized_pch_decl>;
+def fshort_wchar : Flag<"-fshort-wchar">,
+ HelpText<"Force wchar_t to be a short unsigned int">;
+def fshort_enums : Flag<"-fshort-enums">,
+ HelpText<"Allocate to an enum type only as many bytes as it needs for the declared range of possible values">;
+def static_define : Flag<"-static-define">,
+ HelpText<"Should __STATIC__ be defined">;
+def stack_protector : Separate<"-stack-protector">,
+ HelpText<"Enable stack protectors">;
+def fvisibility : Separate<"-fvisibility">,
+ HelpText<"Default symbol visibility">;
+def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">,
+ HelpText<"Give inline C++ member functions default visibility by default">;
+def ftemplate_depth : Separate<"-ftemplate-depth">,
+ HelpText<"Maximum depth of recursive template instantiation">;
+def fconstexpr_depth : Separate<"-fconstexpr-depth">,
+ HelpText<"Maximum depth of recursive constexpr function calls">;
+def Wlarge_by_value_copy : Separate<"-Wlarge-by-value-copy">,
+ HelpText<"Warn if a function definition returns or accepts an object larger "
+ "in bytes that a given value">;
+def Wlarge_by_value_copy_EQ : Joined<"-Wlarge-by-value-copy=">,
+ Alias<Wlarge_by_value_copy>;
+def trigraphs : Flag<"-trigraphs">,
+ HelpText<"Process trigraph sequences">;
+def fwritable_strings : Flag<"-fwritable-strings">,
+ HelpText<"Store string literals as writable data">;
+def fconst_strings : Flag<"-fconst-strings">,
+ HelpText<"Use a const qualified type for string literals in C and ObjC">;
+def fno_const_strings : Flag<"-fno-const-strings">,
+ HelpText<"Don't use a const qualified type for string literals in C and ObjC">;
+def fno_bitfield_type_align : Flag<"-fno-bitfield-type-align">,
+ HelpText<"Ignore bit-field types when aligning structures">;
+def traditional_cpp : Flag<"-traditional-cpp">,
+ HelpText<"Enable some traditional CPP emulation">;
+def ffake_address_space_map : Flag<"-ffake-address-space-map">,
+ HelpText<"Use a fake address space map; OpenCL testing purposes only">;
+def fdelayed_template_parsing : Flag<"-fdelayed-template-parsing">,
+ HelpText<"Parse templated function definitions at the end of the "
+ "translation unit ">;
+def funknown_anytype : Flag<"-funknown-anytype">,
+ HelpText<"Enable parser support for the __unknown_anytype type; for testing purposes only">;
+def fdebugger_support : Flag<"-fdebugger-support">,
+ HelpText<"Enable special debugger support behavior">;
+def fdebugger_cast_result_to_id : Flag<"-fdebugger-cast-result-to-id">,
+ HelpText<"Enable casting unknown expression results to id">;
+def fdebugger_objc_literal : Flag<"-fdebugger-objc-literal">,
+ HelpText<"Enable special debugger support for objective-C subscripting and literals">;
+def fdeprecated_macro : Flag<"-fdeprecated-macro">,
+ HelpText<"Defines the __DEPRECATED macro">;
+def fno_deprecated_macro : Flag<"-fno-deprecated-macro">,
+ HelpText<"Undefines the __DEPRECATED macro">;
+def fapple_pragma_pack : Flag<"-fapple-pragma-pack">,
+ HelpText<"Enable Apple gcc-compatible #pragma pack handling">;
+
+//===----------------------------------------------------------------------===//
+// Header Search Options
+//===----------------------------------------------------------------------===//
+
+def nostdsysteminc : Flag<"-nostdsysteminc">,
+ HelpText<"Disable standard system #include directories">;
+def nostdincxx : Flag<"-nostdinc++">,
+ HelpText<"Disable standard #include directories for the C++ standard library">;
+def nobuiltininc : Flag<"-nobuiltininc">,
+ HelpText<"Disable builtin #include directories">;
+def fmodule_cache_path : Separate<"-fmodule-cache-path">,
+ MetaVarName<"<directory>">,
+ HelpText<"Specify the module cache path">;
+def fmodule_name : Joined<"-fmodule-name=">,
+ MetaVarName<"<name>">,
+ HelpText<"Specify the name of the module to build">;
+def fdisable_module_hash : Flag<"-fdisable-module-hash">,
+ HelpText<"Disable the module hash">;
+def fmodules : Flag<"-fmodules">,
+ HelpText<"Enable the 'modules' language feature">;
+
+def F : JoinedOrSeparate<"-F">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to framework include search path">;
+def I : JoinedOrSeparate<"-I">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to include search path">;
+def idirafter : JoinedOrSeparate<"-idirafter">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to AFTER include search path">;
+def index_header_map : Flag<"-index-header-map">,
+ HelpText<"Make the next included directory (-I or -F) an indexer header map">;
+def iquote : JoinedOrSeparate<"-iquote">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to QUOTE include search path">;
+def c_isystem : JoinedOrSeparate<"-c-isystem">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to the C SYSTEM include search path">;
+def cxx_isystem : JoinedOrSeparate<"-cxx-isystem">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to the C++ SYSTEM include search path">;
+def objc_isystem : JoinedOrSeparate<"-objc-isystem">,
+ MetaVarName<"<directory>">,
+ HelpText<"Add directory to the ObjC SYSTEM include search path">;
+def objcxx_isystem : JoinedOrSeparate<"-objcxx-isystem">,
+ MetaVarName<"<directory>">,
+ HelpText<"Add directory to the ObjC++ SYSTEM include search path">;
+def iframework : JoinedOrSeparate<"-iframework">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to SYSTEM framework search path">;
+def isystem : JoinedOrSeparate<"-isystem">, MetaVarName<"<directory>">,
+ HelpText<"Add directory to SYSTEM include search path">;
+def iwithsysroot : JoinedOrSeparate<"-iwithsysroot">,MetaVarName<"<directory>">,
+ HelpText<"Add directory to SYSTEM include search path, "
+ "absolute paths are relative to -isysroot">;
+def internal_isystem : JoinedOrSeparate<"-internal-isystem">,
+ MetaVarName<"<directory>">,
+ HelpText<"Add directory to the internal system include search path; these "
+ "are assumed to not be user-provided and are used to model system "
+ "and standard headers' paths.">;
+def internal_externc_isystem : JoinedOrSeparate<"-internal-externc-isystem">,
+ MetaVarName<"<directory>">,
+ HelpText<"Add directory to the internal system include search path with "
+ "implicit extern \"C\" semantics; these are assumed to not be "
+ "user-provided and are used to model system and standard headers' "
+ "paths.">;
+def iprefix : JoinedOrSeparate<"-iprefix">, MetaVarName<"<prefix>">,
+ HelpText<"Set the -iwithprefix/-iwithprefixbefore prefix">;
+def iwithprefix : JoinedOrSeparate<"-iwithprefix">, MetaVarName<"<dir>">,
+ HelpText<"Set directory to SYSTEM include search path with prefix">;
+def iwithprefixbefore : JoinedOrSeparate<"-iwithprefixbefore">,
+ MetaVarName<"<dir>">,
+ HelpText<"Set directory to include search path with prefix">;
+def isysroot : JoinedOrSeparate<"-isysroot">, MetaVarName<"<dir>">,
+ HelpText<"Set the system root directory (usually /)">;
+def v : Flag<"-v">, HelpText<"Enable verbose output">;
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Options
+//===----------------------------------------------------------------------===//
+
+def D : JoinedOrSeparate<"-D">, MetaVarName<"<macro>">,
+ HelpText<"Predefine the specified macro">;
+def include_ : JoinedOrSeparate<"-include">, MetaVarName<"<file>">, EnumName<"include">,
+ HelpText<"Include file before parsing">;
+def imacros : JoinedOrSeparate<"-imacros">, MetaVarName<"<file>">,
+ HelpText<"Include macros from file before parsing">;
+def include_pch : Separate<"-include-pch">, MetaVarName<"<file>">,
+ HelpText<"Include precompiled header file">;
+def include_pth : Separate<"-include-pth">, MetaVarName<"<file>">,
+ HelpText<"Include file before parsing">;
+def chain_include : Separate<"-chain-include">, MetaVarName<"<file>">,
+ HelpText<"Include and chain a header file after turning it into PCH">;
+def preamble_bytes_EQ : Joined<"-preamble-bytes=">,
+ HelpText<"Assume that the precompiled header is a precompiled preamble "
+ "covering the first N bytes of the main file">;
+def token_cache : Separate<"-token-cache">, MetaVarName<"<path>">,
+ HelpText<"Use specified token cache file">;
+def U : JoinedOrSeparate<"-U">, MetaVarName<"<macro>">,
+ HelpText<"Undefine the specified macro">;
+def undef : Flag<"-undef">, MetaVarName<"<macro>">,
+ HelpText<"undef all system defines">;
+def detailed_preprocessing_record : Flag<"-detailed-preprocessing-record">,
+ HelpText<"include a detailed record of preprocessing actions">;
+def mqdsp6_compat : Flag<"-mqdsp6-compat">,
+ HelpText<"Enable hexagon-qdsp6 backward compatibility">;
+
+//===----------------------------------------------------------------------===//
+// Preprocessed Output Options
+//===----------------------------------------------------------------------===//
+
+def P : Flag<"-P">,
+ HelpText<"Disable linemarker output in -E mode">;
+def C : Flag<"-C">,
+ HelpText<"Enable comment output in -E mode">;
+def CC : Flag<"-CC">,
+ HelpText<"Enable comment output in -E mode, even from macro expansions">;
+def dM : Flag<"-dM">,
+ HelpText<"Print macro definitions in -E mode instead of normal output">;
+def dD : Flag<"-dD">,
+ HelpText<"Print macro definitions in -E mode in addition to normal output">;
+
+//===----------------------------------------------------------------------===//
+// OpenCL Options
+//===----------------------------------------------------------------------===//
+
+def cl_opt_disable : Flag<"-cl-opt-disable">,
+ HelpText<"OpenCL only. This option disables all optimizations. The default is optimizations are enabled.">;
+def cl_single_precision_constant : Flag<"-cl-single-precision-constant">,
+ HelpText<"OpenCL only. Treat double precision floating-point constant as single precision constant.">;
+def cl_finite_math_only : Flag<"-cl-finite-math-only">,
+ HelpText<"OpenCL only. Allow floating-point optimizations that assume arguments and results are not NaNs or +-Inf.">;
+def cl_unsafe_math_optimizations : Flag<"-cl-unsafe-math-optimizations">,
+ HelpText<"OpenCL only. Allow unsafe floating-point optimizations. Also implies -cl-no-signed-zeros and -cl-mad-enable">;
+def cl_fast_relaxed_math : Flag<"-cl-fast-relaxed-math">,
+ HelpText<"OpenCL only. Sets -cl-finite-math-only and -cl-unsafe-math-optimizations, and defines __FAST_RELAXED_MATH__">;
+def cl_mad_enable : Flag<"-cl-mad-enable">,
+ HelpText<"OpenCL only. Enable less precise MAD instructions to be generated.">;
+def cl_std_EQ : Joined<"-cl-std=">,
+ HelpText<"OpenCL language standard to compile for">;
+
+//===----------------------------------------------------------------------===//
+// CUDA Options
+//===----------------------------------------------------------------------===//
+
+def fcuda_is_device : Flag<"-fcuda-is-device">,
+ HelpText<"Generate code for CUDA device">;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
new file mode 100644
index 0000000..fd88c3a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Compilation.h
@@ -0,0 +1,163 @@
+//===--- Compilation.h - Compilation Task Data Structure --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_COMPILATION_H_
+#define CLANG_DRIVER_COMPILATION_H_
+
+#include "clang/Driver/Job.h"
+#include "clang/Driver/Util.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Path.h"
+
+namespace clang {
+namespace driver {
+ class DerivedArgList;
+ class Driver;
+ class InputArgList;
+ class JobList;
+ class ToolChain;
+
+/// Compilation - A set of tasks to perform for a single driver
+/// invocation.
+class Compilation {
+ /// The driver we were created by.
+ const Driver &TheDriver;
+
+ /// The default tool chain.
+ const ToolChain &DefaultToolChain;
+
+ /// The original (untranslated) input argument list.
+ InputArgList *Args;
+
+ /// The driver translated arguments. Note that toolchains may perform their
+ /// own argument translation.
+ DerivedArgList *TranslatedArgs;
+
+ /// The list of actions.
+ ActionList Actions;
+
+ /// The root list of jobs.
+ JobList Jobs;
+
+ /// Cache of translated arguments for a particular tool chain and bound
+ /// architecture.
+ llvm::DenseMap<std::pair<const ToolChain*, const char*>,
+ DerivedArgList*> TCArgs;
+
+ /// Temporary files which should be removed on exit.
+ ArgStringList TempFiles;
+
+ /// Result files which should be removed on failure.
+ ArgStringList ResultFiles;
+
+ /// Result files which are generated correctly on failure, and which should
+ /// only be removed if we crash.
+ ArgStringList FailureResultFiles;
+
+ /// Redirection for stdout, stderr, etc.
+ const llvm::sys::Path **Redirects;
+
+public:
+ Compilation(const Driver &D, const ToolChain &DefaultToolChain,
+ InputArgList *Args, DerivedArgList *TranslatedArgs);
+ ~Compilation();
+
+ const Driver &getDriver() const { return TheDriver; }
+
+ const ToolChain &getDefaultToolChain() const { return DefaultToolChain; }
+
+ const InputArgList &getInputArgs() const { return *Args; }
+
+ const DerivedArgList &getArgs() const { return *TranslatedArgs; }
+
+ ActionList &getActions() { return Actions; }
+ const ActionList &getActions() const { return Actions; }
+
+ JobList &getJobs() { return Jobs; }
+ const JobList &getJobs() const { return Jobs; }
+
+ void addCommand(Command *C) { Jobs.addJob(C); }
+
+ const ArgStringList &getTempFiles() const { return TempFiles; }
+
+ const ArgStringList &getResultFiles() const { return ResultFiles; }
+
+ const ArgStringList &getFailureResultFiles() const {
+ return FailureResultFiles;
+ }
+
+ /// getArgsForToolChain - Return the derived argument list for the
+ /// tool chain \arg TC (or the default tool chain, if TC is not
+ /// specified).
+ ///
+ /// \param BoundArch - The bound architecture name, or 0.
+ const DerivedArgList &getArgsForToolChain(const ToolChain *TC,
+ const char *BoundArch);
+
+ /// addTempFile - Add a file to remove on exit, and returns its
+ /// argument.
+ const char *addTempFile(const char *Name) {
+ TempFiles.push_back(Name);
+ return Name;
+ }
+
+ /// addResultFile - Add a file to remove on failure, and returns its
+ /// argument.
+ const char *addResultFile(const char *Name) {
+ ResultFiles.push_back(Name);
+ return Name;
+ }
+
+ /// addFailureResultFile - Add a file to remove if we crash, and returns its
+ /// argument.
+ const char *addFailureResultFile(const char *Name) {
+ FailureResultFiles.push_back(Name);
+ return Name;
+ }
+
+ /// CleanupFileList - Remove the files in the given list.
+ ///
+ /// \param IssueErrors - Report failures as errors.
+ /// \return Whether all files were removed successfully.
+ bool CleanupFileList(const ArgStringList &Files,
+ bool IssueErrors=false) const;
+
+ /// PrintJob - Print one job in -### format.
+ ///
+ /// \param OS - The stream to print on.
+ /// \param J - The job to print.
+ /// \param Terminator - A string to print at the end of the line.
+ /// \param Quote - Should separate arguments be quoted.
+ void PrintJob(raw_ostream &OS, const Job &J,
+ const char *Terminator, bool Quote) const;
+
+ /// ExecuteCommand - Execute an actual command.
+ ///
+ /// \param FailingCommand - For non-zero results, this will be set to the
+ /// Command which failed, if any.
+ /// \return The result code of the subprocess.
+ int ExecuteCommand(const Command &C, const Command *&FailingCommand) const;
+
+ /// ExecuteJob - Execute a single job.
+ ///
+ /// \param FailingCommand - For non-zero results, this will be set to the
+ /// Command which failed.
+ /// \return The accumulated result code of the job.
+ int ExecuteJob(const Job &J, const Command *&FailingCommand) const;
+
+ /// initCompilationForDiagnostics - Remove stale state and suppress output
+ /// so compilation can be reexecuted to generate additional diagnostic
+ /// information (e.g., preprocessed source(s)).
+ void initCompilationForDiagnostics();
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Driver.h b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
new file mode 100644
index 0000000..0538334
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Driver.h
@@ -0,0 +1,418 @@
+//===--- Driver.h - Clang GCC Compatible Driver -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_DRIVER_H_
+#define CLANG_DRIVER_DRIVER_H_
+
+#include "clang/Basic/Diagnostic.h"
+
+#include "clang/Driver/Phases.h"
+#include "clang/Driver/Types.h"
+#include "clang/Driver/Util.h"
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Path.h" // FIXME: Kill when CompilationInfo
+ // lands.
+#include <list>
+#include <set>
+#include <string>
+
+namespace llvm {
+ template<typename T> class ArrayRef;
+}
+namespace clang {
+namespace driver {
+ class Action;
+ class Arg;
+ class ArgList;
+ class Command;
+ class Compilation;
+ class DerivedArgList;
+ class InputArgList;
+ class InputInfo;
+ class JobAction;
+ class OptTable;
+ class ToolChain;
+
+/// Driver - Encapsulate logic for constructing compilation processes
+/// from a set of gcc-driver-like command line arguments.
+class Driver {
+ OptTable *Opts;
+
+ DiagnosticsEngine &Diags;
+
+public:
+ // Diag - Forwarding function for diagnostics.
+ DiagnosticBuilder Diag(unsigned DiagID) const {
+ return Diags.Report(DiagID);
+ }
+
+ // FIXME: Privatize once interface is stable.
+public:
+ /// The name the driver was invoked as.
+ std::string Name;
+
+ /// The path the driver executable was in, as invoked from the
+ /// command line.
+ std::string Dir;
+
+ /// The original path to the clang executable.
+ std::string ClangExecutable;
+
+ /// The path to the installed clang directory, if any.
+ std::string InstalledDir;
+
+ /// The path to the compiler resource directory.
+ std::string ResourceDir;
+
+ /// A prefix directory used to emulated a limited subset of GCC's '-Bprefix'
+ /// functionality.
+ /// FIXME: This type of customization should be removed in favor of the
+ /// universal driver when it is ready.
+ typedef SmallVector<std::string, 4> prefix_list;
+ prefix_list PrefixDirs;
+
+ /// sysroot, if present
+ std::string SysRoot;
+
+ /// If the standard library is used
+ bool UseStdLib;
+
+ /// Default target triple.
+ std::string DefaultTargetTriple;
+
+ /// Default name for linked images (e.g., "a.out").
+ std::string DefaultImageName;
+
+ /// Driver title to use with help.
+ std::string DriverTitle;
+
+ /// Information about the host which can be overridden by the user.
+ std::string HostBits, HostMachine, HostSystem, HostRelease;
+
+ /// The file to log CC_PRINT_OPTIONS output to, if enabled.
+ const char *CCPrintOptionsFilename;
+
+ /// The file to log CC_PRINT_HEADERS output to, if enabled.
+ const char *CCPrintHeadersFilename;
+
+ /// The file to log CC_LOG_DIAGNOSTICS output to, if enabled.
+ const char *CCLogDiagnosticsFilename;
+
+ /// A list of inputs and their types for the given arguments.
+ typedef SmallVector<std::pair<types::ID, const Arg*>, 16> InputList;
+
+ /// Whether the driver should follow g++ like behavior.
+ unsigned CCCIsCXX : 1;
+
+ /// Whether the driver is just the preprocessor.
+ unsigned CCCIsCPP : 1;
+
+ /// Echo commands while executing (in -v style).
+ unsigned CCCEcho : 1;
+
+ /// Only print tool bindings, don't build any jobs.
+ unsigned CCCPrintBindings : 1;
+
+ /// Set CC_PRINT_OPTIONS mode, which is like -v but logs the commands to
+ /// CCPrintOptionsFilename or to stderr.
+ unsigned CCPrintOptions : 1;
+
+ /// Set CC_PRINT_HEADERS mode, which causes the frontend to log header include
+ /// information to CCPrintHeadersFilename or to stderr.
+ unsigned CCPrintHeaders : 1;
+
+ /// Set CC_LOG_DIAGNOSTICS mode, which causes the frontend to log diagnostics
+ /// to CCLogDiagnosticsFilename or to stderr, in a stable machine readable
+ /// format.
+ unsigned CCLogDiagnostics : 1;
+
+ /// Whether the driver is generating diagnostics for debugging purposes.
+ unsigned CCGenDiagnostics : 1;
+
+private:
+ /// Name to use when invoking gcc/g++.
+ std::string CCCGenericGCCName;
+
+ /// Whether to check that input files exist when constructing compilation
+ /// jobs.
+ unsigned CheckInputsExist : 1;
+
+ /// Use the clang compiler where possible.
+ unsigned CCCUseClang : 1;
+
+ /// Use clang for handling C++ and Objective-C++ inputs.
+ unsigned CCCUseClangCXX : 1;
+
+ /// Use clang as a preprocessor (clang's preprocessor will still be
+ /// used where an integrated CPP would).
+ unsigned CCCUseClangCPP : 1;
+
+public:
+ /// Use lazy precompiled headers for PCH support.
+ unsigned CCCUsePCH : 1;
+
+private:
+ /// Only use clang for the given architectures (only used when
+ /// non-empty).
+ std::set<llvm::Triple::ArchType> CCCClangArchs;
+
+ /// Certain options suppress the 'no input files' warning.
+ bool SuppressMissingInputWarning : 1;
+
+ std::list<std::string> TempFiles;
+ std::list<std::string> ResultFiles;
+
+ /// \brief Cache of all the ToolChains in use by the driver.
+ ///
+ /// This maps from the string representation of a triple to a ToolChain
+ /// created targetting that triple. The driver owns all the ToolChain objects
+ /// stored in it, and will clean them up when torn down.
+ mutable llvm::StringMap<ToolChain *> ToolChains;
+
+private:
+ /// TranslateInputArgs - Create a new derived argument list from the input
+ /// arguments, after applying the standard argument translations.
+ DerivedArgList *TranslateInputArgs(const InputArgList &Args) const;
+
+ // getFinalPhase - Determine which compilation mode we are in and record
+ // which option we used to determine the final phase.
+ phases::ID getFinalPhase(const DerivedArgList &DAL, Arg **FinalPhaseArg = 0)
+ const;
+
+public:
+ Driver(StringRef _ClangExecutable,
+ StringRef _DefaultTargetTriple,
+ StringRef _DefaultImageName,
+ bool IsProduction,
+ DiagnosticsEngine &_Diags);
+ ~Driver();
+
+ /// @name Accessors
+ /// @{
+
+ /// Name to use when invoking gcc/g++.
+ const std::string &getCCCGenericGCCName() const { return CCCGenericGCCName; }
+
+
+ const OptTable &getOpts() const { return *Opts; }
+
+ const DiagnosticsEngine &getDiags() const { return Diags; }
+
+ bool getCheckInputsExist() const { return CheckInputsExist; }
+
+ void setCheckInputsExist(bool Value) { CheckInputsExist = Value; }
+
+ const std::string &getTitle() { return DriverTitle; }
+ void setTitle(std::string Value) { DriverTitle = Value; }
+
+ /// \brief Get the path to the main clang executable.
+ const char *getClangProgramPath() const {
+ return ClangExecutable.c_str();
+ }
+
+ /// \brief Get the path to where the clang executable was installed.
+ const char *getInstalledDir() const {
+ if (!InstalledDir.empty())
+ return InstalledDir.c_str();
+ return Dir.c_str();
+ }
+ void setInstalledDir(StringRef Value) {
+ InstalledDir = Value;
+ }
+
+ /// @}
+ /// @name Primary Functionality
+ /// @{
+
+ /// BuildCompilation - Construct a compilation object for a command
+ /// line argument vector.
+ ///
+ /// \return A compilation, or 0 if none was built for the given
+ /// argument vector. A null return value does not necessarily
+ /// indicate an error condition, the diagnostics should be queried
+ /// to determine if an error occurred.
+ Compilation *BuildCompilation(ArrayRef<const char *> Args);
+
+ /// @name Driver Steps
+ /// @{
+
+ /// ParseArgStrings - Parse the given list of strings into an
+ /// ArgList.
+ InputArgList *ParseArgStrings(ArrayRef<const char *> Args);
+
+ /// BuildInputs - Construct the list of inputs and their types from
+ /// the given arguments.
+ ///
+ /// \param TC - The default host tool chain.
+ /// \param Args - The input arguments.
+ /// \param Inputs - The list to store the resulting compilation
+ /// inputs onto.
+ void BuildInputs(const ToolChain &TC, const DerivedArgList &Args,
+ InputList &Inputs) const;
+
+ /// BuildActions - Construct the list of actions to perform for the
+ /// given arguments, which are only done for a single architecture.
+ ///
+ /// \param TC - The default host tool chain.
+ /// \param Args - The input arguments.
+ /// \param Actions - The list to store the resulting actions onto.
+ void BuildActions(const ToolChain &TC, const DerivedArgList &Args,
+ const InputList &Inputs, ActionList &Actions) const;
+
+ /// BuildUniversalActions - Construct the list of actions to perform
+ /// for the given arguments, which may require a universal build.
+ ///
+ /// \param TC - The default host tool chain.
+ /// \param Args - The input arguments.
+ /// \param Actions - The list to store the resulting actions onto.
+ void BuildUniversalActions(const ToolChain &TC, const DerivedArgList &Args,
+ const InputList &BAInputs,
+ ActionList &Actions) const;
+
+ /// BuildJobs - Bind actions to concrete tools and translate
+ /// arguments to form the list of jobs to run.
+ ///
+ /// \arg C - The compilation that is being built.
+ void BuildJobs(Compilation &C) const;
+
+ /// ExecuteCompilation - Execute the compilation according to the command line
+ /// arguments and return an appropriate exit code.
+ ///
+ /// This routine handles additional processing that must be done in addition
+ /// to just running the subprocesses, for example reporting errors, removing
+ /// temporary files, etc.
+ int ExecuteCompilation(const Compilation &C,
+ const Command *&FailingCommand) const;
+
+ /// generateCompilationDiagnostics - Generate diagnostics information
+ /// including preprocessed source file(s).
+ ///
+ void generateCompilationDiagnostics(Compilation &C,
+ const Command *FailingCommand);
+
+ /// @}
+ /// @name Helper Methods
+ /// @{
+
+ /// PrintActions - Print the list of actions.
+ void PrintActions(const Compilation &C) const;
+
+ /// PrintHelp - Print the help text.
+ ///
+ /// \param ShowHidden - Show hidden options.
+ void PrintHelp(bool ShowHidden) const;
+
+ /// PrintOptions - Print the list of arguments.
+ void PrintOptions(const ArgList &Args) const;
+
+ /// PrintVersion - Print the driver version.
+ void PrintVersion(const Compilation &C, raw_ostream &OS) const;
+
+ /// GetFilePath - Lookup \arg Name in the list of file search paths.
+ ///
+ /// \arg TC - The tool chain for additional information on
+ /// directories to search.
+ //
+ // FIXME: This should be in CompilationInfo.
+ std::string GetFilePath(const char *Name, const ToolChain &TC) const;
+
+ /// GetProgramPath - Lookup \arg Name in the list of program search
+ /// paths.
+ ///
+ /// \arg TC - The provided tool chain for additional information on
+ /// directories to search.
+ ///
+ /// \arg WantFile - False when searching for an executable file, otherwise
+ /// true. Defaults to false.
+ //
+ // FIXME: This should be in CompilationInfo.
+ std::string GetProgramPath(const char *Name, const ToolChain &TC,
+ bool WantFile = false) const;
+
+ /// HandleImmediateArgs - Handle any arguments which should be
+ /// treated before building actions or binding tools.
+ ///
+ /// \return Whether any compilation should be built for this
+ /// invocation.
+ bool HandleImmediateArgs(const Compilation &C);
+
+ /// ConstructAction - Construct the appropriate action to do for
+ /// \arg Phase on the \arg Input, taking in to account arguments
+ /// like -fsyntax-only or --analyze.
+ Action *ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
+ Action *Input) const;
+
+
+ /// BuildJobsForAction - Construct the jobs to perform for the
+ /// action \arg A.
+ void BuildJobsForAction(Compilation &C,
+ const Action *A,
+ const ToolChain *TC,
+ const char *BoundArch,
+ bool AtTopLevel,
+ const char *LinkingOutput,
+ InputInfo &Result) const;
+
+ /// GetNamedOutputPath - Return the name to use for the output of
+ /// the action \arg JA. The result is appended to the compilation's
+ /// list of temporary or result files, as appropriate.
+ ///
+ /// \param C - The compilation.
+ /// \param JA - The action of interest.
+ /// \param BaseInput - The original input file that this action was
+ /// triggered by.
+ /// \param AtTopLevel - Whether this is a "top-level" action.
+ const char *GetNamedOutputPath(Compilation &C,
+ const JobAction &JA,
+ const char *BaseInput,
+ bool AtTopLevel) const;
+
+ /// GetTemporaryPath - Return the pathname of a temporary file to use
+ /// as part of compilation; the file will have the given prefix and suffix.
+ ///
+ /// GCC goes to extra lengths here to be a bit more robust.
+ std::string GetTemporaryPath(StringRef Prefix, const char *Suffix) const;
+
+ /// ShouldUseClangCompilar - Should the clang compiler be used to
+ /// handle this action.
+ bool ShouldUseClangCompiler(const Compilation &C, const JobAction &JA,
+ const llvm::Triple &ArchName) const;
+
+ bool IsUsingLTO(const ArgList &Args) const;
+
+private:
+ /// \brief Retrieves a ToolChain for a particular target triple.
+ ///
+ /// Will cache ToolChains for the life of the driver object, and create them
+ /// on-demand.
+ const ToolChain &getToolChain(const ArgList &Args,
+ StringRef DarwinArchName = "") const;
+
+ /// @}
+
+public:
+ /// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and
+ /// return the grouped values as integers. Numbers which are not
+ /// provided are set to 0.
+ ///
+ /// \return True if the entire string was parsed (9.2), or all
+ /// groups were parsed (10.3.5extrastuff). HadExtra is true if all
+ /// groups were parsed but extra characters remain at the end.
+ static bool GetReleaseVersion(const char *Str, unsigned &Major,
+ unsigned &Minor, unsigned &Micro,
+ bool &HadExtra);
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h
new file mode 100644
index 0000000..ea7b52f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/DriverDiagnostic.h
@@ -0,0 +1,28 @@
+//===--- DiagnosticDriver.h - Diagnostics for libdriver ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DRIVERDIAGNOSTIC_H
+#define LLVM_CLANG_DRIVERDIAGNOSTIC_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define DRIVERSTART
+#include "clang/Basic/DiagnosticDriverKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_DRIVER_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Job.h b/contrib/llvm/tools/clang/include/clang/Driver/Job.h
new file mode 100644
index 0000000..c94886d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Job.h
@@ -0,0 +1,122 @@
+//===--- Job.h - Commands to Execute ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_JOB_H_
+#define CLANG_DRIVER_JOB_H_
+
+#include "clang/Driver/Util.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+namespace driver {
+class Command;
+class Tool;
+
+class Job {
+public:
+ enum JobClass {
+ CommandClass,
+ JobListClass
+ };
+
+private:
+ JobClass Kind;
+
+protected:
+ Job(JobClass _Kind) : Kind(_Kind) {}
+public:
+ virtual ~Job();
+
+ JobClass getKind() const { return Kind; }
+
+ /// addCommand - Append a command to the current job, which must be
+ /// either a piped job or a job list.
+ void addCommand(Command *C);
+
+ static bool classof(const Job *) { return true; }
+};
+
+ /// Command - An executable path/name and argument vector to
+ /// execute.
+class Command : public Job {
+ virtual void anchor();
+
+ /// Source - The action which caused the creation of this job.
+ const Action &Source;
+
+ /// Tool - The tool which caused the creation of this job.
+ const Tool &Creator;
+
+ /// The executable to run.
+ const char *Executable;
+
+ /// The list of program arguments (not including the implicit first
+ /// argument, which will be the executable).
+ ArgStringList Arguments;
+
+public:
+ Command(const Action &_Source, const Tool &_Creator, const char *_Executable,
+ const ArgStringList &_Arguments);
+
+ /// getSource - Return the Action which caused the creation of this job.
+ const Action &getSource() const { return Source; }
+
+ /// getCreator - Return the Tool which caused the creation of this job.
+ const Tool &getCreator() const { return Creator; }
+
+ const char *getExecutable() const { return Executable; }
+
+ const ArgStringList &getArguments() const { return Arguments; }
+
+ static bool classof(const Job *J) {
+ return J->getKind() == CommandClass;
+ }
+ static bool classof(const Command *) { return true; }
+};
+
+ /// JobList - A sequence of jobs to perform.
+class JobList : public Job {
+public:
+ typedef SmallVector<Job*, 4> list_type;
+ typedef list_type::size_type size_type;
+ typedef list_type::iterator iterator;
+ typedef list_type::const_iterator const_iterator;
+
+private:
+ list_type Jobs;
+
+public:
+ JobList();
+ virtual ~JobList();
+
+ /// Add a job to the list (taking ownership).
+ void addJob(Job *J) { Jobs.push_back(J); }
+
+ /// Clear the job list.
+ void clear();
+
+ const list_type &getJobs() const { return Jobs; }
+
+ size_type size() const { return Jobs.size(); }
+ iterator begin() { return Jobs.begin(); }
+ const_iterator begin() const { return Jobs.begin(); }
+ iterator end() { return Jobs.end(); }
+ const_iterator end() const { return Jobs.end(); }
+
+ static bool classof(const Job *J) {
+ return J->getKind() == JobListClass;
+ }
+ static bool classof(const JobList *) { return true; }
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h b/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h
new file mode 100644
index 0000000..094873a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ObjCRuntime.h
@@ -0,0 +1,49 @@
+//===--- ObjCRuntime.h - Objective C runtime features -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_OBJCRUNTIME_H_
+#define CLANG_DRIVER_OBJCRUNTIME_H_
+
+namespace clang {
+namespace driver {
+
+class ObjCRuntime {
+public:
+ enum Kind { GNU, NeXT };
+private:
+ unsigned RuntimeKind : 1;
+public:
+ void setKind(Kind k) { RuntimeKind = k; }
+ Kind getKind() const { return static_cast<Kind>(RuntimeKind); }
+
+ /// True if the runtime provides native ARC entrypoints. ARC may
+ /// still be usable without this if the tool-chain provides a
+ /// statically-linked runtime support library.
+ unsigned HasARC : 1;
+
+ /// True if the runtime supports ARC zeroing __weak.
+ unsigned HasWeak : 1;
+
+ /// \brief True if the runtime supports subscripting methods.
+ unsigned HasSubscripting : 1;
+
+ /// True if the runtime provides the following entrypoint:
+ /// void objc_terminate(void);
+ /// If available, this will be called instead of abort() when an
+ /// exception is thrown out of an EH cleanup.
+ unsigned HasTerminate : 1;
+
+ ObjCRuntime() : RuntimeKind(NeXT), HasARC(false), HasWeak(false),
+ HasSubscripting(false), HasTerminate(false) {}
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td b/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td
new file mode 100644
index 0000000..25ecbc3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptParser.td
@@ -0,0 +1,138 @@
+//===--- OptParser.td - Common Option Parsing Interfaces ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the common interfaces used by the option parsing TableGen
+// backend.
+//
+//===----------------------------------------------------------------------===//
+
+// Define the kinds of options.
+
+class OptionKind<string name, int predecence = 0, bit sentinel = 0> {
+ string Name = name;
+ // The kind precedence, kinds with lower precedence are matched first.
+ int Precedence = predecence;
+ // Indicate a sentinel option.
+ bit Sentinel = sentinel;
+}
+
+// An option group.
+def KIND_GROUP : OptionKind<"Group">;
+// The input option kind.
+def KIND_INPUT : OptionKind<"Input", 1, 1>;
+// The unknown option kind.
+def KIND_UNKNOWN : OptionKind<"Unknown", 2, 1>;
+// A flag with no values.
+def KIND_FLAG : OptionKind<"Flag">;
+// An option which prefixes its (single) value.
+def KIND_JOINED : OptionKind<"Joined", 1>;
+// An option which is followed by its value.
+def KIND_SEPARATE : OptionKind<"Separate">;
+// An option followed by its values, which are separated by commas.
+def KIND_COMMAJOINED : OptionKind<"CommaJoined">;
+// An option which is which takes multiple (separate) arguments.
+def KIND_MULTIARG : OptionKind<"MultiArg">;
+// An option which is either joined to its (non-empty) value, or followed by its
+// value.
+def KIND_JOINED_OR_SEPARATE : OptionKind<"JoinedOrSeparate">;
+// An option which is both joined to its (first) value, and followed by its
+// (second) value.
+def KIND_JOINED_AND_SEPARATE : OptionKind<"JoinedAndSeparate">;
+
+// Define the option flags.
+
+class OptionFlag {}
+
+// DriverOption - The option is a "driver" option, and should not be forwarded
+// to gcc.
+def DriverOption : OptionFlag;
+
+// LinkerInput - The option is a linker input.
+def LinkerInput : OptionFlag;
+
+// NoArgumentUnused - Don't report argument unused warnings for this option; this
+// is useful for options like -static or -dynamic which a user may always end up
+// passing, even if the platform defaults to (or only supports) that option.
+def NoArgumentUnused : OptionFlag;
+
+// RenderAsInput - The option should not render the name when rendered as an
+// input (i.e., the option is rendered as values).
+def RenderAsInput : OptionFlag;
+
+// RenderJoined - The option should be rendered joined, even if separate (only
+// sensible on single value separate options).
+def RenderJoined : OptionFlag;
+
+// RenderSeparate - The option should be rendered separately, even if joined
+// (only sensible on joined options).
+def RenderSeparate : OptionFlag;
+
+// Unsupported - The option is unsupported, and the driver will reject command
+// lines that use it.
+def Unsupported : OptionFlag;
+
+// HelpHidden - The option should not be displayed in --help, even if it has
+// help text. Clients *can* use this in conjunction with the OptTable::PrintHelp
+// arguments to implement hidden help groups.
+def HelpHidden : OptionFlag;
+
+// NoForward - The option should not be implicitly forwarded to other tools.
+def NoForward : OptionFlag;
+
+// Define the option group class.
+
+class OptionGroup<string name> {
+ string EnumName = ?; // Uses the def name if undefined.
+ string Name = name;
+ string HelpText = ?;
+ OptionGroup Group = ?;
+}
+
+// Define the option class.
+
+class Option<string name, OptionKind kind> {
+ string EnumName = ?; // Uses the def name if undefined.
+ string Name = name;
+ OptionKind Kind = kind;
+ // Used by MultiArg option kind.
+ int NumArgs = 0;
+ string HelpText = ?;
+ string MetaVarName = ?;
+ list<OptionFlag> Flags = [];
+ OptionGroup Group = ?;
+ Option Alias = ?;
+}
+
+// Helpers for defining options.
+
+class Flag<string name> : Option<name, KIND_FLAG>;
+class Joined<string name> : Option<name, KIND_JOINED>;
+class Separate<string name> : Option<name, KIND_SEPARATE>;
+class CommaJoined<string name> : Option<name, KIND_COMMAJOINED>;
+class MultiArg<string name, int numargs> : Option<name, KIND_MULTIARG> {
+ int NumArgs = numargs;
+}
+class JoinedOrSeparate<string name> : Option<name, KIND_JOINED_OR_SEPARATE>;
+class JoinedAndSeparate<string name> : Option<name, KIND_JOINED_AND_SEPARATE>;
+
+// Mix-ins for adding optional attributes.
+
+class Alias<Option alias> { Option Alias = alias; }
+class EnumName<string name> { string EnumName = name; }
+class Flags<list<OptionFlag> flags> { list<OptionFlag> Flags = flags; }
+class Group<OptionGroup group> { OptionGroup Group = group; }
+class HelpText<string text> { string HelpText = text; }
+class MetaVarName<string name> { string MetaVarName = name; }
+
+// Predefined options.
+
+// FIXME: Have generator validate that these appear in correct position (and
+// aren't duplicated).
+def INPUT : Option<"<input>", KIND_INPUT>, Flags<[DriverOption]>;
+def UNKNOWN : Option<"<unknown>", KIND_UNKNOWN>;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptSpecifier.h b/contrib/llvm/tools/clang/include/clang/Driver/OptSpecifier.h
new file mode 100644
index 0000000..bb1cd17
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptSpecifier.h
@@ -0,0 +1,39 @@
+//===--- OptSpecifier.h - Option Specifiers ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_OPTSPECIFIER_H
+#define CLANG_DRIVER_OPTSPECIFIER_H
+
+namespace clang {
+namespace driver {
+ class Option;
+
+ /// OptSpecifier - Wrapper class for abstracting references to option IDs.
+ class OptSpecifier {
+ unsigned ID;
+
+ private:
+ explicit OptSpecifier(bool); // DO NOT IMPLEMENT
+
+ public:
+ OptSpecifier() : ID(0) {}
+ /*implicit*/ OptSpecifier(unsigned _ID) : ID(_ID) {}
+ /*implicit*/ OptSpecifier(const Option *Opt);
+
+ bool isValid() const { return ID != 0; }
+
+ unsigned getID() const { return ID; }
+
+ bool operator==(OptSpecifier Opt) const { return ID == Opt.getID(); }
+ bool operator!=(OptSpecifier Opt) const { return !(*this == Opt); }
+ };
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
new file mode 100644
index 0000000..3af6f8f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/OptTable.h
@@ -0,0 +1,186 @@
+//===--- OptTable.h - Option Table ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_OPTTABLE_H
+#define CLANG_DRIVER_OPTTABLE_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Driver/OptSpecifier.h"
+
+namespace clang {
+namespace driver {
+namespace options {
+ enum DriverFlag {
+ DriverOption = (1 << 0),
+ HelpHidden = (1 << 1),
+ LinkerInput = (1 << 2),
+ NoArgumentUnused = (1 << 3),
+ NoForward = (1 << 4),
+ RenderAsInput = (1 << 5),
+ RenderJoined = (1 << 6),
+ RenderSeparate = (1 << 7),
+ Unsupported = (1 << 8)
+ };
+}
+
+ class Arg;
+ class ArgList;
+ class InputArgList;
+ class Option;
+
+ /// OptTable - Provide access to the Option info table.
+ ///
+ /// The OptTable class provides a layer of indirection which allows Option
+ /// instance to be created lazily. In the common case, only a few options will
+ /// be needed at runtime; the OptTable class maintains enough information to
+ /// parse command lines without instantiating Options, while letting other
+ /// parts of the driver still use Option instances where convenient.
+ class OptTable {
+ public:
+ /// Info - Entry for a single option instance in the option data table.
+ struct Info {
+ const char *Name;
+ const char *HelpText;
+ const char *MetaVar;
+ unsigned char Kind;
+ unsigned char Param;
+ unsigned short Flags;
+ unsigned short GroupID;
+ unsigned short AliasID;
+ };
+
+ private:
+ /// The static option information table.
+ const Info *OptionInfos;
+ unsigned NumOptionInfos;
+
+ /// The lazily constructed options table, indexed by option::ID - 1.
+ mutable Option **Options;
+
+ /// Prebound input option instance.
+ const Option *TheInputOption;
+
+ /// Prebound unknown option instance.
+ const Option *TheUnknownOption;
+
+ /// The index of the first option which can be parsed (i.e., is not a
+ /// special option like 'input' or 'unknown', and is not an option group).
+ unsigned FirstSearchableIndex;
+
+ private:
+ const Info &getInfo(OptSpecifier Opt) const {
+ unsigned id = Opt.getID();
+ assert(id > 0 && id - 1 < getNumOptions() && "Invalid Option ID.");
+ return OptionInfos[id - 1];
+ }
+
+ Option *CreateOption(unsigned id) const;
+
+ protected:
+ OptTable(const Info *_OptionInfos, unsigned _NumOptionInfos);
+ public:
+ ~OptTable();
+
+ /// getNumOptions - Return the total number of option classes.
+ unsigned getNumOptions() const { return NumOptionInfos; }
+
+ /// getOption - Get the given \arg id's Option instance, lazily creating it
+ /// if necessary.
+ ///
+ /// \return The option, or null for the INVALID option id.
+ const Option *getOption(OptSpecifier Opt) const {
+ unsigned id = Opt.getID();
+ if (id == 0)
+ return 0;
+
+ assert((unsigned) (id - 1) < getNumOptions() && "Invalid ID.");
+ Option *&Entry = Options[id - 1];
+ if (!Entry)
+ Entry = CreateOption(id);
+ return Entry;
+ }
+
+ /// getOptionName - Lookup the name of the given option.
+ const char *getOptionName(OptSpecifier id) const {
+ return getInfo(id).Name;
+ }
+
+ /// getOptionKind - Get the kind of the given option.
+ unsigned getOptionKind(OptSpecifier id) const {
+ return getInfo(id).Kind;
+ }
+
+ /// getOptionGroupID - Get the group id for the given option.
+ unsigned getOptionGroupID(OptSpecifier id) const {
+ return getInfo(id).GroupID;
+ }
+
+ /// isOptionHelpHidden - Should the help for the given option be hidden by
+ /// default.
+ bool isOptionHelpHidden(OptSpecifier id) const {
+ return getInfo(id).Flags & options::HelpHidden;
+ }
+
+ /// getOptionHelpText - Get the help text to use to describe this option.
+ const char *getOptionHelpText(OptSpecifier id) const {
+ return getInfo(id).HelpText;
+ }
+
+ /// getOptionMetaVar - Get the meta-variable name to use when describing
+ /// this options values in the help text.
+ const char *getOptionMetaVar(OptSpecifier id) const {
+ return getInfo(id).MetaVar;
+ }
+
+ /// ParseOneArg - Parse a single argument; returning the new argument and
+ /// updating Index.
+ ///
+ /// \param [in] [out] Index - The current parsing position in the argument
+ /// string list; on return this will be the index of the next argument
+ /// string to parse.
+ ///
+ /// \return - The parsed argument, or 0 if the argument is missing values
+ /// (in which case Index still points at the conceptual next argument string
+ /// to parse).
+ Arg *ParseOneArg(const ArgList &Args, unsigned &Index) const;
+
+ /// ParseArgs - Parse an list of arguments into an InputArgList.
+ ///
+ /// The resulting InputArgList will reference the strings in [ArgBegin,
+ /// ArgEnd), and their lifetime should extend past that of the returned
+ /// InputArgList.
+ ///
+ /// The only error that can occur in this routine is if an argument is
+ /// missing values; in this case \arg MissingArgCount will be non-zero.
+ ///
+ /// \param ArgBegin - The beginning of the argument vector.
+ /// \param ArgEnd - The end of the argument vector.
+ /// \param MissingArgIndex - On error, the index of the option which could
+ /// not be parsed.
+ /// \param MissingArgCount - On error, the number of missing options.
+ /// \return - An InputArgList; on error this will contain all the options
+ /// which could be parsed.
+ InputArgList *ParseArgs(const char* const *ArgBegin,
+ const char* const *ArgEnd,
+ unsigned &MissingArgIndex,
+ unsigned &MissingArgCount) const;
+
+ /// PrintHelp - Render the help text for an option table.
+ ///
+ /// \param OS - The stream to write the help text to.
+ /// \param Name - The name to use in the usage line.
+ /// \param Title - The title to use in the usage line.
+ /// \param ShowHidden - Whether help-hidden arguments should be shown.
+ void PrintHelp(raw_ostream &OS, const char *Name,
+ const char *Title, bool ShowHidden = false) const;
+ };
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Option.h b/contrib/llvm/tools/clang/include/clang/Driver/Option.h
new file mode 100644
index 0000000..8243f6d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Option.h
@@ -0,0 +1,318 @@
+//===--- Option.h - Abstract Driver Options ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_OPTION_H_
+#define CLANG_DRIVER_OPTION_H_
+
+#include "clang/Driver/OptSpecifier.h"
+#include "llvm/ADT/StringRef.h"
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+namespace driver {
+ class Arg;
+ class ArgList;
+ class OptionGroup;
+
+ /// Option - Abstract representation for a single form of driver
+ /// argument.
+ ///
+ /// An Option class represents a form of option that the driver
+ /// takes, for example how many arguments the option has and how
+ /// they can be provided. Individual option instances store
+ /// additional information about what group the option is a member
+ /// of (if any), if the option is an alias, and a number of
+ /// flags. At runtime the driver parses the command line into
+ /// concrete Arg instances, each of which corresponds to a
+ /// particular Option instance.
+ class Option {
+ public:
+ enum OptionClass {
+ GroupClass = 0,
+ InputClass,
+ UnknownClass,
+ FlagClass,
+ JoinedClass,
+ SeparateClass,
+ CommaJoinedClass,
+ MultiArgClass,
+ JoinedOrSeparateClass,
+ JoinedAndSeparateClass
+ };
+
+ enum RenderStyleKind {
+ RenderCommaJoinedStyle,
+ RenderJoinedStyle,
+ RenderSeparateStyle,
+ RenderValuesStyle
+ };
+
+ private:
+ OptionClass Kind;
+
+ /// The option ID.
+ OptSpecifier ID;
+
+ /// The option name.
+ StringRef Name;
+
+ /// Group this option is a member of, if any.
+ const OptionGroup *Group;
+
+ /// Option that this is an alias for, if any.
+ const Option *Alias;
+
+ /// Unsupported options will be rejected.
+ bool Unsupported : 1;
+
+ /// Treat this option like a linker input?
+ bool LinkerInput : 1;
+
+ /// When rendering as an input, don't render the option.
+
+ // FIXME: We should ditch the render/renderAsInput distinction.
+ bool NoOptAsInput : 1;
+
+ /// The style to using when rendering arguments parsed by this option.
+ unsigned RenderStyle : 2;
+
+ /// This option is only consumed by the driver.
+ bool DriverOption : 1;
+
+ /// This option should not report argument unused errors.
+ bool NoArgumentUnused : 1;
+
+ /// This option should not be implicitly forwarded.
+ bool NoForward : 1;
+
+ protected:
+ Option(OptionClass Kind, OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias);
+ public:
+ virtual ~Option();
+
+ unsigned getID() const { return ID.getID(); }
+ OptionClass getKind() const { return Kind; }
+ StringRef getName() const { return Name; }
+ const OptionGroup *getGroup() const { return Group; }
+ const Option *getAlias() const { return Alias; }
+
+ bool isUnsupported() const { return Unsupported; }
+ void setUnsupported(bool Value) { Unsupported = Value; }
+
+ bool isLinkerInput() const { return LinkerInput; }
+ void setLinkerInput(bool Value) { LinkerInput = Value; }
+
+ bool hasNoOptAsInput() const { return NoOptAsInput; }
+ void setNoOptAsInput(bool Value) { NoOptAsInput = Value; }
+
+ RenderStyleKind getRenderStyle() const {
+ return RenderStyleKind(RenderStyle);
+ }
+ void setRenderStyle(RenderStyleKind Value) { RenderStyle = Value; }
+
+ bool isDriverOption() const { return DriverOption; }
+ void setDriverOption(bool Value) { DriverOption = Value; }
+
+ bool hasNoArgumentUnused() const { return NoArgumentUnused; }
+ void setNoArgumentUnused(bool Value) { NoArgumentUnused = Value; }
+
+ bool hasNoForward() const { return NoForward; }
+ void setNoForward(bool Value) { NoForward = Value; }
+
+ bool hasForwardToGCC() const {
+ return !NoForward && !DriverOption && !LinkerInput;
+ }
+
+ /// getUnaliasedOption - Return the final option this option
+ /// aliases (itself, if the option has no alias).
+ const Option *getUnaliasedOption() const {
+ if (Alias) return Alias->getUnaliasedOption();
+ return this;
+ }
+
+ /// getRenderName - Return the name to use when rendering this
+ /// option.
+ StringRef getRenderName() const {
+ return getUnaliasedOption()->getName();
+ }
+
+ /// matches - Predicate for whether this option is part of the
+ /// given option (which may be a group).
+ ///
+ /// Note that matches against options which are an alias should never be
+ /// done -- aliases do not participate in matching and so such a query will
+ /// always be false.
+ bool matches(OptSpecifier ID) const;
+
+ /// accept - Potentially accept the current argument, returning a
+ /// new Arg instance, or 0 if the option does not accept this
+ /// argument (or the argument is missing values).
+ ///
+ /// If the option accepts the current argument, accept() sets
+ /// Index to the position where argument parsing should resume
+ /// (even if the argument is missing values).
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const = 0;
+
+ void dump() const;
+
+ static bool classof(const Option *) { return true; }
+ };
+
+ /// OptionGroup - A set of options which are can be handled uniformly
+ /// by the driver.
+ class OptionGroup : public Option {
+ public:
+ OptionGroup(OptSpecifier ID, const char *Name, const OptionGroup *Group);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::GroupClass;
+ }
+ static bool classof(const OptionGroup *) { return true; }
+ };
+
+ // Dummy option classes.
+
+ /// InputOption - Dummy option class for representing driver inputs.
+ class InputOption : public Option {
+ public:
+ InputOption(OptSpecifier ID);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::InputClass;
+ }
+ static bool classof(const InputOption *) { return true; }
+ };
+
+ /// UnknownOption - Dummy option class for represent unknown arguments.
+ class UnknownOption : public Option {
+ public:
+ UnknownOption(OptSpecifier ID);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::UnknownClass;
+ }
+ static bool classof(const UnknownOption *) { return true; }
+ };
+
+ // Normal options.
+
+ class FlagOption : public Option {
+ public:
+ FlagOption(OptSpecifier ID, const char *Name, const OptionGroup *Group,
+ const Option *Alias);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::FlagClass;
+ }
+ static bool classof(const FlagOption *) { return true; }
+ };
+
+ class JoinedOption : public Option {
+ public:
+ JoinedOption(OptSpecifier ID, const char *Name, const OptionGroup *Group,
+ const Option *Alias);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::JoinedClass;
+ }
+ static bool classof(const JoinedOption *) { return true; }
+ };
+
+ class SeparateOption : public Option {
+ public:
+ SeparateOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::SeparateClass;
+ }
+ static bool classof(const SeparateOption *) { return true; }
+ };
+
+ class CommaJoinedOption : public Option {
+ public:
+ CommaJoinedOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::CommaJoinedClass;
+ }
+ static bool classof(const CommaJoinedOption *) { return true; }
+ };
+
+ // FIXME: Fold MultiArgOption into SeparateOption?
+
+ /// MultiArgOption - An option which takes multiple arguments (these
+ /// are always separate arguments).
+ class MultiArgOption : public Option {
+ unsigned NumArgs;
+
+ public:
+ MultiArgOption(OptSpecifier ID, const char *Name, const OptionGroup *Group,
+ const Option *Alias, unsigned NumArgs);
+
+ unsigned getNumArgs() const { return NumArgs; }
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::MultiArgClass;
+ }
+ static bool classof(const MultiArgOption *) { return true; }
+ };
+
+ /// JoinedOrSeparateOption - An option which either literally
+ /// prefixes its (non-empty) value, or is follwed by a value.
+ class JoinedOrSeparateOption : public Option {
+ public:
+ JoinedOrSeparateOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::JoinedOrSeparateClass;
+ }
+ static bool classof(const JoinedOrSeparateOption *) { return true; }
+ };
+
+ /// JoinedAndSeparateOption - An option which literally prefixes its
+ /// value and is followed by another value.
+ class JoinedAndSeparateOption : public Option {
+ public:
+ JoinedAndSeparateOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias);
+
+ virtual Arg *accept(const ArgList &Args, unsigned &Index) const;
+
+ static bool classof(const Option *O) {
+ return O->getKind() == Option::JoinedAndSeparateClass;
+ }
+ static bool classof(const JoinedAndSeparateOption *) { return true; }
+ };
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.h b/contrib/llvm/tools/clang/include/clang/Driver/Options.h
new file mode 100644
index 0000000..ac312cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.h
@@ -0,0 +1,32 @@
+//===--- Options.h - Option info & table ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_OPTIONS_H
+#define CLANG_DRIVER_OPTIONS_H
+
+namespace clang {
+namespace driver {
+ class OptTable;
+
+namespace options {
+ enum ID {
+ OPT_INVALID = 0, // This is not an option ID.
+#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) OPT_##ID,
+#include "clang/Driver/Options.inc"
+ LastOption
+#undef OPTION
+ };
+}
+
+ OptTable *createDriverOptTable();
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Options.td b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
new file mode 100644
index 0000000..0a29bb9
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Options.td
@@ -0,0 +1,968 @@
+//===--- DriverOptions.td - Options for clang -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the options accepted by clang.
+//
+//===----------------------------------------------------------------------===//
+
+// Include the common option parsing interfaces.
+include "OptParser.td"
+
+/////////
+// Groups
+
+// Meta-group which defines
+def CompileOnly_Group : OptionGroup<"<CompileOnly group>">;
+
+def I_Group : OptionGroup<"<I group>">, Group<CompileOnly_Group>;
+def L_Group : OptionGroup<"<L group>">, Group<CompileOnly_Group>;
+def M_Group : OptionGroup<"<M group>">, Group<CompileOnly_Group>;
+def T_Group : OptionGroup<"<T group>">;
+def O_Group : OptionGroup<"<O group>">, Group<CompileOnly_Group>;
+def W_Group : OptionGroup<"<W group>">, Group<CompileOnly_Group>;
+def X_Group : OptionGroup<"<X group>">;
+def a_Group : OptionGroup<"<a group>">;
+def d_Group : OptionGroup<"<d group>">;
+def f_Group : OptionGroup<"<f group>">, Group<CompileOnly_Group>;
+def f_clang_Group : OptionGroup<"<f (clang-only) group>">, Group<CompileOnly_Group>;
+def g_Group : OptionGroup<"<g group>">;
+def i_Group : OptionGroup<"<i group>">, Group<CompileOnly_Group>;
+def clang_i_Group : OptionGroup<"<clang i group>">, Group<i_Group>;
+def m_Group : OptionGroup<"<m group>">, Group<CompileOnly_Group>;
+def m_x86_Features_Group : OptionGroup<"<m x86 features group>">, Group<m_Group>;
+def u_Group : OptionGroup<"<u group>">;
+
+def pedantic_Group : OptionGroup<"<pedantic group>">,
+ Group<CompileOnly_Group>;
+def reserved_lib_Group : OptionGroup<"<reserved libs group>">;
+
+// Temporary groups for clang options which we know we don't support,
+// but don't want to verbosely warn the user about.
+def clang_ignored_f_Group : OptionGroup<"<clang ignored f group>">,
+ Group<f_Group>;
+def clang_ignored_m_Group : OptionGroup<"<clang ignored m group>">,
+ Group<m_Group>;
+
+/////////
+// Options
+
+// The internal option ID must be a valid C++ identifier and results in a
+// clang::driver::options::OPT_XX enum constant for XX.
+//
+// We want to unambiguously be able to refer to options from the driver source
+// code, for this reason the option name is mangled into an ID. This mangling
+// isn't guaranteed to have an inverse, but for practical purposes it does.
+//
+// The mangling scheme is to ignore the leading '-', and perform the following
+// substitutions:
+// _ => __
+// - => _
+// # => _HASH
+// , => _COMMA
+// = => _EQ
+// C++ => CXX
+// . => _
+
+// Developer Driver Options
+
+def ccc_Group : OptionGroup<"<clang internal options>">;
+def ccc_driver_Group : OptionGroup<"<clang driver internal options>">,
+ Group<ccc_Group>, HelpText<"DRIVER OPTIONS">;
+def ccc_debug_Group : OptionGroup<"<clang debug/development internal options>">,
+ Group<ccc_Group>, HelpText<"DEBUG/DEVELOPMENT OPTIONS">;
+
+class CCCDriverOpt : Group<ccc_driver_Group>, Flags<[DriverOption, HelpHidden]>;
+def ccc_cxx : Flag<"-ccc-cxx">, CCCDriverOpt,
+ HelpText<"Act as a C++ driver">;
+def ccc_echo : Flag<"-ccc-echo">, CCCDriverOpt,
+ HelpText<"Echo commands before running them">;
+def ccc_gcc_name : Separate<"-ccc-gcc-name">, CCCDriverOpt,
+ HelpText<"Name for native GCC compiler">,
+ MetaVarName<"<gcc-path>">;
+def ccc_clang_cxx : Flag<"-ccc-clang-cxx">, CCCDriverOpt,
+ HelpText<"Enable the clang compiler for C++">;
+def ccc_no_clang_cxx : Flag<"-ccc-no-clang-cxx">, CCCDriverOpt,
+ HelpText<"Disable the clang compiler for C++">;
+def ccc_no_clang : Flag<"-ccc-no-clang">, CCCDriverOpt,
+ HelpText<"Disable the clang compiler">;
+def ccc_no_clang_cpp : Flag<"-ccc-no-clang-cpp">, CCCDriverOpt,
+ HelpText<"Disable the clang preprocessor">;
+def ccc_clang_archs : Separate<"-ccc-clang-archs">, CCCDriverOpt,
+ HelpText<"Comma separate list of architectures to use the clang compiler for">,
+ MetaVarName<"<arch-list>">;
+def ccc_pch_is_pch : Flag<"-ccc-pch-is-pch">, CCCDriverOpt,
+ HelpText<"Use lazy PCH for precompiled headers">;
+def ccc_pch_is_pth : Flag<"-ccc-pch-is-pth">, CCCDriverOpt,
+ HelpText<"Use pretokenized headers for precompiled headers">;
+
+class CCCDebugOpt : Group<ccc_debug_Group>, Flags<[DriverOption, HelpHidden]>;
+def ccc_install_dir : Separate<"-ccc-install-dir">, CCCDebugOpt,
+ HelpText<"Simulate installation in the given directory">;
+def ccc_print_options : Flag<"-ccc-print-options">, CCCDebugOpt,
+ HelpText<"Dump parsed command line arguments">;
+def ccc_print_phases : Flag<"-ccc-print-phases">, CCCDebugOpt,
+ HelpText<"Dump list of actions to perform">;
+def ccc_print_bindings : Flag<"-ccc-print-bindings">, CCCDebugOpt,
+ HelpText<"Show bindings of tools to actions">;
+
+def ccc_arcmt_check : Flag<"-ccc-arcmt-check">, CCCDriverOpt,
+ HelpText<"Check for ARC migration issues that need manual handling">;
+def ccc_arcmt_modify : Flag<"-ccc-arcmt-modify">, CCCDriverOpt,
+ HelpText<"Apply modifications to files to conform to ARC">;
+def ccc_arrmt_check : Flag<"-ccc-arrmt-check">, Alias<ccc_arcmt_check>;
+def ccc_arrmt_modify : Flag<"-ccc-arrmt-modify">, Alias<ccc_arcmt_modify>;
+def ccc_arcmt_migrate : Separate<"-ccc-arcmt-migrate">, CCCDriverOpt,
+ HelpText<"Apply modifications and produces temporary files that conform to ARC">;
+def arcmt_migrate_report_output : Separate<"-arcmt-migrate-report-output">,
+ HelpText<"Output path for the plist report">;
+def arcmt_migrate_emit_arc_errors : Flag<"-arcmt-migrate-emit-errors">,
+ HelpText<"Emit ARC errors even if the migrator can fix them">;
+
+def _migrate : Flag<"--migrate">, Flags<[DriverOption]>,
+ HelpText<"Run the migrator">;
+def ccc_objcmt_migrate : Separate<"-ccc-objcmt-migrate">, CCCDriverOpt,
+ HelpText<"Apply modifications and produces temporary files to migrate to "
+ "modern ObjC syntax">;
+def objcmt_migrate_literals : Flag<"-objcmt-migrate-literals">,
+ HelpText<"Enable migration to modern ObjC literals">;
+def objcmt_migrate_subscripting : Flag<"-objcmt-migrate-subscripting">,
+ HelpText<"Enable migration to modern ObjC subscripting">;
+
+// Make sure all other -ccc- options are rejected.
+def ccc_ : Joined<"-ccc-">, Group<ccc_Group>, Flags<[Unsupported]>;
+
+// Standard Options
+
+def _HASH_HASH_HASH : Flag<"-###">, Flags<[DriverOption]>,
+ HelpText<"Print the commands to run for this compilation">;
+// The '--' option is here for the sake of compatibility with gcc, but is
+// being ignored by the driver.
+def _DASH_DASH : Flag<"--">, Flags<[DriverOption]>;
+def A : JoinedOrSeparate<"-A">;
+def B : JoinedOrSeparate<"-B">;
+def CC : Flag<"-CC">;
+def C : Flag<"-C">;
+def D : JoinedOrSeparate<"-D">, Group<CompileOnly_Group>;
+def E : Flag<"-E">, Flags<[DriverOption]>,
+ HelpText<"Only run the preprocessor">;
+def F : JoinedOrSeparate<"-F">, Flags<[RenderJoined]>;
+def G : Separate<"-G">, Flags<[DriverOption]>;
+def H : Flag<"-H">;
+def I_ : Flag<"-I-">, Group<I_Group>;
+def I : JoinedOrSeparate<"-I">, Group<I_Group>;
+def L : JoinedOrSeparate<"-L">, Flags<[RenderJoined]>;
+def MD : Flag<"-MD">, Group<M_Group>;
+def MF : JoinedOrSeparate<"-MF">, Group<M_Group>;
+def MG : Flag<"-MG">, Group<M_Group>;
+def MMD : Flag<"-MMD">, Group<M_Group>;
+def MM : Flag<"-MM">, Group<M_Group>;
+def MP : Flag<"-MP">, Group<M_Group>;
+def MQ : JoinedOrSeparate<"-MQ">, Group<M_Group>;
+def MT : JoinedOrSeparate<"-MT">, Group<M_Group>;
+def Mach : Flag<"-Mach">;
+def M : Flag<"-M">, Group<M_Group>;
+def O0 : Joined<"-O0">, Group<O_Group>;
+def O4 : Joined<"-O4">, Group<O_Group>;
+def ObjCXX : Flag<"-ObjC++">, Flags<[DriverOption]>,
+ HelpText<"Treat source input files as Objective-C++ inputs">;
+def ObjC : Flag<"-ObjC">, Flags<[DriverOption]>,
+ HelpText<"Treat source input files as Objective-C inputs">;
+def O : Joined<"-O">, Group<O_Group>;
+def P : Flag<"-P">;
+def Qn : Flag<"-Qn">;
+def Qunused_arguments : Flag<"-Qunused-arguments">, Flags<[DriverOption]>,
+ HelpText<"Don't emit warning for unused driver arguments">;
+def Q : Flag<"-Q">;
+def R : Flag<"-R">;
+def S : Flag<"-S">, Flags<[DriverOption]>,
+ HelpText<"Only run preprocess and compilation steps">;
+def Tbss : JoinedOrSeparate<"-Tbss">, Group<T_Group>;
+def Tdata : JoinedOrSeparate<"-Tdata">, Group<T_Group>;
+def Ttext : JoinedOrSeparate<"-Ttext">, Group<T_Group>;
+def T : JoinedOrSeparate<"-T">, Group<T_Group>;
+def U : JoinedOrSeparate<"-U">, Group<CompileOnly_Group>;
+def V : JoinedOrSeparate<"-V">, Flags<[DriverOption, Unsupported]>;
+def Wa_COMMA : CommaJoined<"-Wa,">,
+ HelpText<"Pass the comma separated arguments in <arg> to the assembler">,
+ MetaVarName<"<arg>">;
+def Wall : Flag<"-Wall">, Group<W_Group>;
+def Wdeprecated : Flag<"-Wdeprecated">, Group<W_Group>;
+def Wno_deprecated : Flag<"-Wno-deprecated">, Group<W_Group>;
+def Wextra : Flag<"-Wextra">, Group<W_Group>;
+def Wl_COMMA : CommaJoined<"-Wl,">, Flags<[LinkerInput, RenderAsInput]>,
+ HelpText<"Pass the comma separated arguments in <arg> to the linker">,
+ MetaVarName<"<arg>">;
+def Wno_nonportable_cfstrings : Joined<"-Wno-nonportable-cfstrings">, Group<W_Group>;
+def Wnonportable_cfstrings : Joined<"-Wnonportable-cfstrings">, Group<W_Group>;
+def Wp_COMMA : CommaJoined<"-Wp,">,
+ HelpText<"Pass the comma separated arguments in <arg> to the preprocessor">,
+ MetaVarName<"<arg>">;
+def Wwrite_strings : Flag<"-Wwrite-strings">, Group<W_Group>;
+def Wno_write_strings : Flag<"-Wno-write-strings">, Group<W_Group>;
+def W_Joined : Joined<"-W">, Group<W_Group>;
+def Xanalyzer : Separate<"-Xanalyzer">,
+ HelpText<"Pass <arg> to the static analyzer">, MetaVarName<"<arg>">;
+def Xarch__ : JoinedAndSeparate<"-Xarch_">, Flags<[DriverOption]>;
+def Xassembler : Separate<"-Xassembler">,
+ HelpText<"Pass <arg> to the assembler">, MetaVarName<"<arg>">;
+def Xclang : Separate<"-Xclang">,
+ HelpText<"Pass <arg> to the clang compiler">, MetaVarName<"<arg>">,
+ Flags<[NoForward]>;
+def Xlinker : Separate<"-Xlinker">, Flags<[LinkerInput, RenderAsInput]>,
+ HelpText<"Pass <arg> to the linker">, MetaVarName<"<arg>">;
+def Xpreprocessor : Separate<"-Xpreprocessor">,
+ HelpText<"Pass <arg> to the preprocessor">, MetaVarName<"<arg>">;
+def X_Flag : Flag<"-X">;
+def X_Joined : Joined<"-X">;
+def Z_Flag : Flag<"-Z">;
+def Z_Joined : Joined<"-Z">;
+def all__load : Flag<"-all_load">;
+def allowable__client : Separate<"-allowable_client">;
+def ansi : Flag<"-ansi">, Group<a_Group>;
+def arch__errors__fatal : Flag<"-arch_errors_fatal">;
+def arch : Separate<"-arch">, Flags<[DriverOption]>;
+def arch__only : Separate<"-arch_only">;
+def a : Joined<"-a">, Group<a_Group>;
+def bind__at__load : Flag<"-bind_at_load">;
+def bundle__loader : Separate<"-bundle_loader">;
+def bundle : Flag<"-bundle">;
+def b : JoinedOrSeparate<"-b">, Flags<[Unsupported]>;
+def client__name : JoinedOrSeparate<"-client_name">;
+def combine : Flag<"-combine">, Flags<[DriverOption, Unsupported]>;
+def compatibility__version : JoinedOrSeparate<"-compatibility_version">;
+def coverage : Flag<"-coverage">;
+def cpp_precomp : Flag<"-cpp-precomp">, Group<clang_ignored_f_Group>;
+def current__version : JoinedOrSeparate<"-current_version">;
+def cxx_isystem : JoinedOrSeparate<"-cxx-isystem">, Group<clang_i_Group>;
+def c : Flag<"-c">, Flags<[DriverOption]>,
+ HelpText<"Only run preprocess, compile, and assemble steps">;
+def dA : Flag<"-dA">, Group<d_Group>;
+def dD : Flag<"-dD">, Group<d_Group>;
+def dM : Flag<"-dM">, Group<d_Group>;
+def dead__strip : Flag<"-dead_strip">;
+def dependency_file : Separate<"-dependency-file">;
+def dumpmachine : Flag<"-dumpmachine">;
+def dumpspecs : Flag<"-dumpspecs">, Flags<[Unsupported]>;
+def dumpversion : Flag<"-dumpversion">;
+def dylib__file : Separate<"-dylib_file">;
+def dylinker__install__name : JoinedOrSeparate<"-dylinker_install_name">;
+def dylinker : Flag<"-dylinker">;
+def dynamiclib : Flag<"-dynamiclib">;
+def dynamic : Flag<"-dynamic">, Flags<[NoArgumentUnused]>;
+def d_Flag : Flag<"-d">, Group<d_Group>;
+def d_Joined : Joined<"-d">, Group<d_Group>;
+def emit_ast : Flag<"-emit-ast">,
+ HelpText<"Emit Clang AST files for source inputs">;
+def emit_llvm : Flag<"-emit-llvm">,
+ HelpText<"Use the LLVM representation for assembler and object files">;
+def exported__symbols__list : Separate<"-exported_symbols_list">;
+def e : JoinedOrSeparate<"-e">;
+def fPIC : Flag<"-fPIC">, Group<f_Group>;
+def fno_PIC : Flag<"-fno-PIC">, Group<f_Group>;
+def fPIE : Flag<"-fPIE">, Group<f_Group>;
+def fno_PIE : Flag<"-fno-PIE">, Group<f_Group>;
+def faccess_control : Flag<"-faccess-control">, Group<f_Group>;
+def fallow_unsupported : Flag<"-fallow-unsupported">, Group<f_Group>;
+def faltivec : Flag<"-faltivec">, Group<f_Group>;
+def fapple_kext : Flag<"-fapple-kext">, Group<f_Group>;
+def fapple_pragma_pack : Flag<"-fapple-pragma-pack">, Group<f_Group>;
+def faddress_sanitizer : Flag<"-faddress-sanitizer">, Group<f_Group>;
+def fno_address_sanitizer : Flag<"-fno-address-sanitizer">, Group<f_Group>;
+def fthread_sanitizer : Flag<"-fthread-sanitizer">, Group<f_Group>;
+def fno_thread_sanitizer : Flag<"-fno-thread-sanitizer">, Group<f_Group>;
+def fasm : Flag<"-fasm">, Group<f_Group>;
+
+def fasm_blocks : Flag<"-fasm-blocks">, Group<f_Group>;
+def fno_asm_blocks : Flag<"-fno-asm-blocks">, Group<f_Group>;
+
+def fassume_sane_operator_new : Flag<"-fassume-sane-operator-new">, Group<f_Group>;
+def fastcp : Flag<"-fastcp">, Group<f_Group>;
+def fastf : Flag<"-fastf">, Group<f_Group>;
+def fast : Flag<"-fast">, Group<f_Group>;
+def fasynchronous_unwind_tables : Flag<"-fasynchronous-unwind-tables">, Group<f_Group>;
+def fblocks : Flag<"-fblocks">, Group<f_Group>;
+def fbootclasspath_EQ : Joined<"-fbootclasspath=">, Group<f_Group>;
+def fborland_extensions : Flag<"-fborland-extensions">, Group<f_Group>;
+def fbuiltin_strcat : Flag<"-fbuiltin-strcat">, Group<f_Group>;
+def fbuiltin_strcpy : Flag<"-fbuiltin-strcpy">, Group<f_Group>;
+def fbuiltin : Flag<"-fbuiltin">, Group<f_Group>;
+def fcaret_diagnostics : Flag<"-fcaret-diagnostics">, Group<f_Group>;
+def fcatch_undefined_behavior : Flag<"-fcatch-undefined-behavior">,
+ Group<f_Group>, HelpText<"Generate runtime checks for undefined behavior.">;
+def fclasspath_EQ : Joined<"-fclasspath=">, Group<f_Group>;
+def fcolor_diagnostics : Flag<"-fcolor-diagnostics">, Group<f_Group>;
+def fcommon : Flag<"-fcommon">, Group<f_Group>;
+def fcompile_resource_EQ : Joined<"-fcompile-resource=">, Group<f_Group>;
+def fconstant_cfstrings : Flag<"-fconstant-cfstrings">, Group<f_Group>;
+def fconstant_string_class_EQ : Joined<"-fconstant-string-class=">, Group<f_Group>;
+def fconstexpr_depth_EQ : Joined<"-fconstexpr-depth=">, Group<f_Group>;
+def fconstexpr_backtrace_limit_EQ : Joined<"-fconstexpr-backtrace-limit=">,
+ Group<f_Group>;
+def fno_crash_diagnostics : Flag<"-fno-crash-diagnostics">, Group<f_clang_Group>, Flags<[NoArgumentUnused]>;
+def fcreate_profile : Flag<"-fcreate-profile">, Group<f_Group>;
+def fcxx_exceptions: Flag<"-fcxx-exceptions">, Group<f_Group>;
+def fcxx_modules : Flag <"-fcxx-modules">, Group<f_Group>, Flags<[NoForward]>;
+def fdebug_pass_arguments : Flag<"-fdebug-pass-arguments">, Group<f_Group>;
+def fdebug_pass_structure : Flag<"-fdebug-pass-structure">, Group<f_Group>;
+def fdiagnostics_fixit_info : Flag<"-fdiagnostics-fixit-info">, Group<f_clang_Group>;
+def fdiagnostics_print_source_range_info : Flag<"-fdiagnostics-print-source-range-info">, Group<f_clang_Group>;
+def fdiagnostics_parseable_fixits : Flag<"-fdiagnostics-parseable-fixits">, Group<f_clang_Group>;
+def fdiagnostics_show_option : Flag<"-fdiagnostics-show-option">, Group<f_Group>;
+def fdiagnostics_show_note_include_stack : Flag<"-fdiagnostics-show-note-include-stack">, Group<f_Group>;
+def fdiagnostics_format_EQ : Joined<"-fdiagnostics-format=">, Group<f_clang_Group>;
+def fdiagnostics_show_category_EQ : Joined<"-fdiagnostics-show-category=">, Group<f_clang_Group>;
+def fdollars_in_identifiers : Flag<"-fdollars-in-identifiers">, Group<f_Group>;
+def fdwarf2_cfi_asm : Flag<"-fdwarf2-cfi-asm">, Group<f_Group>;
+def fno_dwarf2_cfi_asm : Flag<"-fno-dwarf2-cfi-asm">, Group<f_Group>;
+def fdwarf_directory_asm : Flag<"-fdwarf-directory-asm">, Group<f_Group>;
+def fno_dwarf_directory_asm : Flag<"-fno-dwarf-directory-asm">, Group<f_Group>;
+def felide_constructors : Flag<"-felide-constructors">, Group<f_Group>;
+def feliminate_unused_debug_symbols : Flag<"-feliminate-unused-debug-symbols">, Group<f_Group>;
+def femit_all_decls : Flag<"-femit-all-decls">, Group<f_Group>;
+def fencoding_EQ : Joined<"-fencoding=">, Group<f_Group>;
+def ferror_limit_EQ : Joined<"-ferror-limit=">, Group<f_Group>;
+def fexceptions : Flag<"-fexceptions">, Group<f_Group>;
+def fextdirs_EQ : Joined<"-fextdirs=">, Group<f_Group>;
+def fhosted : Flag<"-fhosted">, Group<f_Group>;
+def ffast_math : Flag<"-ffast-math">, Group<f_Group>;
+def fmath_errno : Flag<"-fmath-errno">, Group<f_Group>;
+def fno_math_errno : Flag<"-fno-math-errno">, Group<f_Group>;
+def fsignaling_math : Flag<"-fsignaling-math">, Group<f_Group>;
+def fno_signaling_math : Flag<"-fno-signaling-math">, Group<f_Group>;
+def funsafe_math_optimizations : Flag<"-funsafe-math-optimizations">,
+ Group<f_Group>;
+def fno_unsafe_math_optimizations : Flag<"-fno-unsafe-math-optimizations">,
+ Group<f_Group>;
+def fassociative_math : Flag<"-fassociative-math">, Group<f_Group>;
+def fno_associative_math : Flag<"-fno-associative-math">, Group<f_Group>;
+def freciprocal_math : Flag<"-freciprocal-math">, Group<f_Group>;
+def fno_reciprocal_math : Flag<"-fno-reciprocal-math">, Group<f_Group>;
+def ffinite_math_only : Flag<"-ffinite-math-only">, Group<f_Group>;
+def fno_finite_math_only : Flag<"-fno-finite-math-only">, Group<f_Group>;
+def fsigned_zeros : Flag<"-fsigned-zeros">, Group<f_Group>;
+def fno_signed_zeros : Flag<"-fno-signed-zeros">, Group<f_Group>;
+def fhonor_nans : Flag<"-fhonor-nans">, Group<f_Group>;
+def fno_honor_nans : Flag<"-fno-honor-nans">, Group<f_Group>;
+def fhonor_infinities : Flag<"-fhonor-infinities">, Group<f_Group>;
+def fno_honor_infinities : Flag<"-fno-honor-infinities">, Group<f_Group>;
+// Sic. This option was misspelled originally.
+def fhonor_infinites : Flag<"-fhonor-infinites">, Group<f_Group>,
+ Alias<fhonor_infinities>;
+def fno_honor_infinites : Flag<"-fno-honor-infinites">, Group<f_Group>,
+ Alias<fno_honor_infinities>;
+def ftrapping_math : Flag<"-ftrapping-math">, Group<f_Group>;
+def fno_trapping_math : Flag<"-fno-trapping-math">, Group<f_Group>;
+
+def ffor_scope : Flag<"-ffor-scope">, Group<f_Group>;
+def fno_for_scope : Flag<"-fno-for-scope">, Group<f_Group>;
+
+def ffreestanding : Flag<"-ffreestanding">, Group<f_Group>;
+def fformat_extensions: Flag<"-fformat-extensions">;
+def fgnu_keywords : Flag<"-fgnu-keywords">, Group<f_Group>;
+def fgnu89_inline : Flag<"-fgnu89-inline">, Group<f_Group>;
+def fno_gnu89_inline : Flag<"-fno-gnu89-inline">, Group<f_Group>;
+def fgnu_runtime : Flag<"-fgnu-runtime">, Group<f_Group>;
+def fheinous_gnu_extensions : Flag<"-fheinous-gnu-extensions">;
+def filelist : Separate<"-filelist">, Flags<[LinkerInput]>;
+def findirect_virtual_calls : Flag<"-findirect-virtual-calls">, Alias<fapple_kext>;
+def finline_functions : Flag<"-finline-functions">, Group<clang_ignored_f_Group>;
+def finline : Flag<"-finline">, Group<clang_ignored_f_Group>;
+def finstrument_functions : Flag<"-finstrument-functions">, Group<f_Group>;
+def fkeep_inline_functions : Flag<"-fkeep-inline-functions">, Group<clang_ignored_f_Group>;
+def flat__namespace : Flag<"-flat_namespace">;
+def flax_vector_conversions : Flag<"-flax-vector-conversions">, Group<f_Group>;
+def flimit_debug_info : Flag<"-flimit-debug-info">, Group<f_Group>,
+ HelpText<"Limit debug information produced to reduce size of debug binary">;
+def flimited_precision_EQ : Joined<"-flimited-precision=">, Group<f_Group>;
+def flto : Flag<"-flto">, Group<f_Group>;
+def fno_lto : Flag<"-fno-lto">, Group<f_Group>;
+def fmacro_backtrace_limit_EQ : Joined<"-fmacro-backtrace-limit=">,
+ Group<f_Group>;
+def fmerge_all_constants : Flag<"-fmerge-all-constants">, Group<f_Group>;
+def fmessage_length_EQ : Joined<"-fmessage-length=">, Group<f_Group>;
+def fms_extensions : Flag<"-fms-extensions">, Group<f_Group>;
+def fms_compatibility : Flag<"-fms-compatibility">, Group<f_Group>;
+def fmsc_version : Joined<"-fmsc-version=">, Group<f_Group>;
+def fdelayed_template_parsing : Flag<"-fdelayed-template-parsing">, Group<f_Group>;
+def fmodule_cache_path : Separate<"-fmodule-cache-path">, Group<i_Group>,
+ Flags<[NoForward]>;
+def fmodules : Flag <"-fmodules">, Group<f_Group>, Flags<[NoForward]>;
+
+def fmudflapth : Flag<"-fmudflapth">, Group<f_Group>;
+def fmudflap : Flag<"-fmudflap">, Group<f_Group>;
+def fnested_functions : Flag<"-fnested-functions">, Group<f_Group>;
+def fnext_runtime : Flag<"-fnext-runtime">, Group<f_Group>;
+def fno_access_control : Flag<"-fno-access-control">, Group<f_Group>;
+def fno_apple_pragma_pack : Flag<"-fno-apple-pragma-pack">, Group<f_Group>;
+def fno_asm : Flag<"-fno-asm">, Group<f_Group>;
+def fno_asynchronous_unwind_tables : Flag<"-fno-asynchronous-unwind-tables">, Group<f_Group>;
+def fno_assume_sane_operator_new : Flag<"-fno-assume-sane-operator-new">, Group<f_Group>;
+def fno_blocks : Flag<"-fno-blocks">, Group<f_Group>;
+def fno_borland_extensions : Flag<"-fno-borland-extensions">, Group<f_Group>;
+def fno_builtin_strcat : Flag<"-fno-builtin-strcat">, Group<f_Group>;
+def fno_builtin_strcpy : Flag<"-fno-builtin-strcpy">, Group<f_Group>;
+def fno_builtin : Flag<"-fno-builtin">, Group<f_Group>;
+def fno_caret_diagnostics : Flag<"-fno-caret-diagnostics">, Group<f_Group>;
+def fno_color_diagnostics : Flag<"-fno-color-diagnostics">, Group<f_Group>;
+def fno_common : Flag<"-fno-common">, Group<f_Group>;
+def fno_constant_cfstrings : Flag<"-fno-constant-cfstrings">, Group<f_Group>;
+def fno_cxx_exceptions: Flag<"-fno-cxx-exceptions">, Group<f_Group>;
+def fno_cxx_modules : Flag <"-fno-cxx-modules">, Group<f_Group>, Flags<[NoForward]>;
+def fno_diagnostics_fixit_info : Flag<"-fno-diagnostics-fixit-info">, Group<f_Group>;
+def fno_diagnostics_show_option : Flag<"-fno-diagnostics-show-option">, Group<f_Group>;
+def fno_diagnostics_show_note_include_stack : Flag<"-fno-diagnostics-show-note-include-stack">, Group<f_Group>;
+def fno_dollars_in_identifiers : Flag<"-fno-dollars-in-identifiers">, Group<f_Group>;
+def fno_elide_constructors : Flag<"-fno-elide-constructors">, Group<f_Group>;
+def fno_eliminate_unused_debug_symbols : Flag<"-fno-eliminate-unused-debug-symbols">, Group<f_Group>;
+def fno_exceptions : Flag<"-fno-exceptions">, Group<f_Group>;
+def fno_gnu_keywords : Flag<"-fno-gnu-keywords">, Group<f_Group>;
+def fno_inline_functions : Flag<"-fno-inline-functions">, Group<f_Group>;
+def fno_inline : Flag<"-fno-inline">, Group<f_Group>;
+def fno_keep_inline_functions : Flag<"-fno-keep-inline-functions">, Group<clang_ignored_f_Group>;
+def fno_lax_vector_conversions : Flag<"-fno-lax-vector-conversions">, Group<f_Group>;
+def fno_limit_debug_info : Flag<"-fno-limit-debug-info">, Group<f_Group>,
+ HelpText<"Do not limit debug information produced to reduce size of debug binary">;
+def fno_merge_all_constants : Flag<"-fno-merge-all-constants">, Group<f_Group>;
+def fno_modules : Flag <"-fno-modules">, Group<f_Group>, Flags<[NoForward]>;
+def fno_ms_extensions : Flag<"-fno-ms-extensions">, Group<f_Group>;
+def fno_ms_compatibility : Flag<"-fno-ms-compatibility">, Group<f_Group>;
+def fno_delayed_template_parsing : Flag<"-fno-delayed-template-parsing">, Group<f_Group>;
+def fno_objc_exceptions: Flag<"-fno-objc-exceptions">, Group<f_Group>;
+def fno_objc_legacy_dispatch : Flag<"-fno-objc-legacy-dispatch">, Group<f_Group>;
+def fno_omit_frame_pointer : Flag<"-fno-omit-frame-pointer">, Group<f_Group>;
+def fno_operator_names : Flag<"-fno-operator-names">, Group<f_Group>;
+def fno_pascal_strings : Flag<"-fno-pascal-strings">, Group<f_Group>;
+def fno_rtti : Flag<"-fno-rtti">, Group<f_Group>;
+def fno_short_enums : Flag<"-fno-short-enums">, Group<f_Group>;
+def fno_show_column : Flag<"-fno-show-column">, Group<f_Group>;
+def fno_show_source_location : Flag<"-fno-show-source-location">, Group<f_Group>;
+def fno_spell_checking : Flag<"-fno-spell-checking">, Group<f_Group>;
+def fno_stack_protector : Flag<"-fno-stack-protector">, Group<f_Group>;
+def fno_strict_aliasing : Flag<"-fno-strict-aliasing">, Group<f_Group>;
+def fno_strict_enums : Flag<"-fno-strict-enums">, Group<f_Group>;
+def fno_strict_overflow : Flag<"-fno-strict-overflow">, Group<f_Group>;
+def fno_threadsafe_statics : Flag<"-fno-threadsafe-statics">, Group<f_Group>;
+def fno_use_cxa_atexit : Flag<"-fno-use-cxa-atexit">, Group<f_Group>;
+def fno_unit_at_a_time : Flag<"-fno-unit-at-a-time">, Group<f_Group>;
+def fno_unwind_tables : Flag<"-fno-unwind-tables">, Group<f_Group>;
+def fno_verbose_asm : Flag<"-fno-verbose-asm">, Group<f_Group>;
+def fno_working_directory : Flag<"-fno-working-directory">, Group<f_Group>;
+def fno_wrapv : Flag<"-fno-wrapv">, Group<f_Group>;
+def fno_zero_initialized_in_bss : Flag<"-fno-zero-initialized-in-bss">, Group<f_Group>;
+def fobjc_arc : Flag<"-fobjc-arc">, Group<f_Group>;
+def fno_objc_arc : Flag<"-fno-objc-arc">, Group<f_Group>;
+def fobjc_arc_exceptions : Flag<"-fobjc-arc-exceptions">, Group<f_Group>;
+def fno_objc_arc_exceptions : Flag<"-fno-objc-arc-exceptions">, Group<f_Group>;
+def fobjc_atdefs : Flag<"-fobjc-atdefs">, Group<clang_ignored_f_Group>;
+def fobjc_call_cxx_cdtors : Flag<"-fobjc-call-cxx-cdtors">, Group<clang_ignored_f_Group>;
+def fobjc_exceptions: Flag<"-fobjc-exceptions">, Group<f_Group>;
+
+def fobjc_gc_only : Flag<"-fobjc-gc-only">, Group<f_Group>;
+def fobjc_gc : Flag<"-fobjc-gc">, Group<f_Group>;
+def fobjc_legacy_dispatch : Flag<"-fobjc-legacy-dispatch">, Group<f_Group>;
+def fobjc_new_property : Flag<"-fobjc-new-property">, Group<clang_ignored_f_Group>;
+def fobjc_infer_related_result_type : Flag<"-fobjc-infer-related-result-type">,
+ Group<f_Group>;
+def fno_objc_infer_related_result_type : Flag<
+ "-fno-objc-infer-related-result-type">, Group<f_Group>;
+def fobjc_link_runtime: Flag<"-fobjc-link-runtime">, Group<f_Group>;
+
+// Objective-C ABI options.
+def fobjc_abi_version_EQ : Joined<"-fobjc-abi-version=">, Group<f_Group>;
+def fobjc_nonfragile_abi_version_EQ : Joined<"-fobjc-nonfragile-abi-version=">, Group<f_Group>;
+def fobjc_nonfragile_abi : Flag<"-fobjc-nonfragile-abi">, Group<f_Group>;
+def fno_objc_nonfragile_abi : Flag<"-fno-objc-nonfragile-abi">, Group<f_Group>;
+
+def fobjc_sender_dependent_dispatch : Flag<"-fobjc-sender-dependent-dispatch">, Group<f_Group>;
+def fobjc : Flag<"-fobjc">, Group<f_Group>;
+def fomit_frame_pointer : Flag<"-fomit-frame-pointer">, Group<f_Group>;
+def fopenmp : Flag<"-fopenmp">, Group<f_Group>;
+def fno_optimize_sibling_calls : Flag<"-fno-optimize-sibling-calls">, Group<f_Group>;
+def foptimize_sibling_calls : Flag<"-foptimize-sibling-calls">, Group<f_Group>;
+def force__cpusubtype__ALL : Flag<"-force_cpusubtype_ALL">;
+def force__flat__namespace : Flag<"-force_flat_namespace">;
+def force__load : Separate<"-force_load">;
+def foutput_class_dir_EQ : Joined<"-foutput-class-dir=">, Group<f_Group>;
+def fpack_struct : Flag<"-fpack-struct">, Group<f_Group>;
+def fno_pack_struct : Flag<"-fno-pack-struct">, Group<f_Group>;
+def fpack_struct_EQ : Joined<"-fpack-struct=">, Group<f_Group>;
+def fpascal_strings : Flag<"-fpascal-strings">, Group<f_Group>;
+def fpch_preprocess : Flag<"-fpch-preprocess">, Group<f_Group>;
+def fpic : Flag<"-fpic">, Group<f_Group>;
+def fno_pic : Flag<"-fno-pic">, Group<f_Group>;
+def fpie : Flag<"-fpie">, Group<f_Group>;
+def fno_pie : Flag<"-fno-pie">, Group<f_Group>;
+def fprofile_arcs : Flag<"-fprofile-arcs">, Group<f_Group>;
+def fprofile_generate : Flag<"-fprofile-generate">, Group<f_Group>;
+def framework : Separate<"-framework">, Flags<[LinkerInput]>;
+def frandom_seed_EQ : Joined<"-frandom-seed=">, Group<clang_ignored_f_Group>;
+def frtti : Flag<"-frtti">, Group<f_Group>;
+def fsched_interblock : Flag<"-fsched-interblock">, Group<clang_ignored_f_Group>;
+def fshort_enums : Flag<"-fshort-enums">, Group<f_Group>;
+def freorder_blocks : Flag<"-freorder-blocks">, Group<clang_ignored_f_Group>;
+def fshort_wchar : Flag<"-fshort-wchar">, Group<f_Group>;
+def fshow_overloads_EQ : Joined<"-fshow-overloads=">, Group<f_Group>;
+def fshow_column : Flag<"-fshow-column">, Group<f_Group>;
+def fshow_source_location : Flag<"-fshow-source-location">, Group<f_Group>;
+def fspell_checking : Flag<"-fspell-checking">, Group<f_Group>;
+def fsigned_bitfields : Flag<"-fsigned-bitfields">, Group<f_Group>;
+def fsigned_char : Flag<"-fsigned-char">, Group<f_Group>;
+def fstack_protector_all : Flag<"-fstack-protector-all">, Group<f_Group>;
+def fstack_protector : Flag<"-fstack-protector">, Group<f_Group>;
+def fstrict_aliasing : Flag<"-fstrict-aliasing">, Group<f_Group>;
+def fstrict_enums : Flag<"-fstrict-enums">, Group<f_Group>;
+def fstrict_overflow : Flag<"-fstrict-overflow">, Group<f_Group>;
+def fsyntax_only : Flag<"-fsyntax-only">, Flags<[DriverOption]>;
+def ftabstop_EQ : Joined<"-ftabstop=">, Group<f_Group>;
+def ftemplate_depth_EQ : Joined<"-ftemplate-depth=">, Group<f_Group>;
+def ftemplate_depth_ : Joined<"-ftemplate-depth-">, Group<f_Group>;
+def ftemplate_backtrace_limit_EQ : Joined<"-ftemplate-backtrace-limit=">,
+ Group<f_Group>;
+def ftest_coverage : Flag<"-ftest-coverage">, Group<f_Group>;
+def Wlarge_by_value_copy_def : Flag<"-Wlarge-by-value-copy">;
+def Wlarge_by_value_copy_EQ : Joined<"-Wlarge-by-value-copy=">;
+
+// Just silence warnings about -Wlarger-than, -Wframe-larger-than for now.
+def Wlarger_than : Separate<"-Wlarger-than">, Group<clang_ignored_f_Group>;
+def Wlarger_than_EQ : Joined<"-Wlarger-than=">, Alias<Wlarger_than>;
+def Wlarger_than_ : Joined<"-Wlarger-than-">, Alias<Wlarger_than>;
+def Wframe_larger_than : Separate<"-Wframe-larger-than">, Group<clang_ignored_f_Group>;
+def Wframe_larger_than_EQ : Joined<"-Wframe-larger-than=">, Alias<Wframe_larger_than>;
+
+def fterminated_vtables : Flag<"-fterminated-vtables">, Alias<fapple_kext>;
+def fthreadsafe_statics : Flag<"-fthreadsafe-statics">, Group<f_Group>;
+def ftime_report : Flag<"-ftime-report">, Group<f_Group>;
+def ftrapv : Flag<"-ftrapv">, Group<f_Group>;
+def ftrapv_handler_EQ : Joined<"-ftrapv-handler=">, Group<f_Group>;
+def ftrap_function_EQ : Joined<"-ftrap-function=">, Group<f_Group>,
+ HelpText<"Issue call to specified function rather than a trap instruction">;
+def funit_at_a_time : Flag<"-funit-at-a-time">, Group<f_Group>;
+def funroll_loops : Flag<"-funroll-loops">, Group<f_Group>;
+def funsigned_bitfields : Flag<"-funsigned-bitfields">, Group<f_Group>;
+def funsigned_char : Flag<"-funsigned-char">, Group<f_Group>;
+def funwind_tables : Flag<"-funwind-tables">, Group<f_Group>;
+def fuse_cxa_atexit : Flag<"-fuse-cxa-atexit">, Group<f_Group>;
+def fverbose_asm : Flag<"-fverbose-asm">, Group<f_Group>;
+def fvisibility_EQ : Joined<"-fvisibility=">, Group<f_Group>;
+def fvisibility_inlines_hidden : Flag<"-fvisibility-inlines-hidden">, Group<f_Group>;
+def fwrapv : Flag<"-fwrapv">, Group<f_Group>;
+def fwritable_strings : Flag<"-fwritable-strings">, Group<f_Group>;
+def fzero_initialized_in_bss : Flag<"-fzero-initialized-in-bss">, Group<f_Group>;
+def ffunction_sections: Flag <"-ffunction-sections">, Group<f_Group>;
+def fdata_sections : Flag <"-fdata-sections">, Group<f_Group>;
+def f : Joined<"-f">, Group<f_Group>;
+def g0 : Flag<"-g0">, Group<g_Group>;
+def g2 : Flag<"-g2">, Group<g_Group>;
+def g3 : Flag<"-g3">, Group<g_Group>;
+def gdwarf2 : Flag<"-gdwarf-2">, Group<g_Group>;
+def gfull : Flag<"-gfull">, Group<g_Group>;
+def ggdb : Flag<"-ggdb">, Group<g_Group>;
+def gstabs : Flag<"-gstabs">, Group<g_Group>;
+def gstabsplus : Flag<"-gstabs+">, Group<g_Group>;
+def gstabs1 : Flag<"-gstabs1">, Group<g_Group>;
+def gstabs2 : Flag<"-gstabs2">, Group<g_Group>;
+def gused : Flag<"-gused">, Group<g_Group>;
+def g_Flag : Flag<"-g">, Group<g_Group>;
+def headerpad__max__install__names : Joined<"-headerpad_max_install_names">;
+def index_header_map : Flag<"-index-header-map">;
+def idirafter : JoinedOrSeparate<"-idirafter">, Group<clang_i_Group>;
+def iframework : Joined<"-iframework">, Group<clang_i_Group>;
+def imacros : JoinedOrSeparate<"-imacros">, Group<clang_i_Group>;
+def image__base : Separate<"-image_base">;
+def include_ : JoinedOrSeparate<"-include">, Group<clang_i_Group>, EnumName<"include">;
+def include_pch : Separate<"-include-pch">, Group<clang_i_Group>;
+def init : Separate<"-init">;
+def install__name : Separate<"-install_name">;
+def integrated_as : Flag<"-integrated-as">, Flags<[DriverOption]>;
+def iprefix : JoinedOrSeparate<"-iprefix">, Group<clang_i_Group>;
+def iquote : JoinedOrSeparate<"-iquote">, Group<clang_i_Group>;
+def isysroot : JoinedOrSeparate<"-isysroot">, Group<clang_i_Group>;
+def isystem : JoinedOrSeparate<"-isystem">, Group<clang_i_Group>;
+def iwithprefixbefore : JoinedOrSeparate<"-iwithprefixbefore">, Group<clang_i_Group>;
+def iwithprefix : JoinedOrSeparate<"-iwithprefix">, Group<clang_i_Group>;
+def iwithsysroot : JoinedOrSeparate<"-iwithsysroot">, Group<clang_i_Group>;
+def i : Joined<"-i">, Group<i_Group>;
+def keep__private__externs : Flag<"-keep_private_externs">;
+def l : JoinedOrSeparate<"-l">, Flags<[LinkerInput, RenderJoined]>;
+def lazy__framework : Separate<"-lazy_framework">, Flags<[LinkerInput]>;
+def lazy__library : Separate<"-lazy_library">, Flags<[LinkerInput]>;
+def m32 : Flag<"-m32">, Group<m_Group>, Flags<[DriverOption]>;
+def mqdsp6_compat : Flag<"-mqdsp6-compat">, Group<m_Group>, Flags<[DriverOption]>;
+def m3dnowa : Flag<"-m3dnowa">, Group<m_x86_Features_Group>;
+def m3dnow : Flag<"-m3dnow">, Group<m_x86_Features_Group>;
+def m64 : Flag<"-m64">, Group<m_Group>, Flags<[DriverOption]>;
+def mabi_EQ : Joined<"-mabi=">, Group<m_Group>;
+def march_EQ : Joined<"-march=">, Group<m_Group>;
+def mcmodel_EQ : Joined<"-mcmodel=">, Group<m_Group>;
+def mconstant_cfstrings : Flag<"-mconstant-cfstrings">, Group<clang_ignored_m_Group>;
+def mcpu_EQ : Joined<"-mcpu=">, Group<m_Group>;
+def mdynamic_no_pic : Joined<"-mdynamic-no-pic">, Group<m_Group>;
+def mfix_and_continue : Flag<"-mfix-and-continue">, Group<clang_ignored_m_Group>;
+def mfloat_abi_EQ : Joined<"-mfloat-abi=">, Group<m_Group>;
+def mfpmath_EQ : Joined<"-mfpmath=">, Group<m_Group>;
+def mfpu_EQ : Joined<"-mfpu=">, Group<m_Group>;
+def mglobal_merge : Flag<"-mglobal-merge">, Group<m_Group>;
+def mhard_float : Flag<"-mhard-float">, Group<m_Group>;
+def miphoneos_version_min_EQ : Joined<"-miphoneos-version-min=">, Group<m_Group>;
+def mios_version_min_EQ : Joined<"-mios-version-min=">, Alias<miphoneos_version_min_EQ>;
+def mios_simulator_version_min_EQ : Joined<"-mios-simulator-version-min=">, Group<m_Group>;
+def mkernel : Flag<"-mkernel">, Group<m_Group>;
+def mlinker_version_EQ : Joined<"-mlinker-version=">, Flags<[NoForward]>;
+def mllvm : Separate<"-mllvm">;
+def mmacosx_version_min_EQ : Joined<"-mmacosx-version-min=">, Group<m_Group>;
+def mms_bitfields : Flag<"-mms-bitfields">, Group<m_Group>;
+def mstackrealign : Flag<"-mstackrealign">, Group<m_Group>;
+def mstack_alignment : Joined<"-mstack-alignment=">, Group<m_Group>;
+def mmmx : Flag<"-mmmx">, Group<m_x86_Features_Group>;
+def mno_3dnowa : Flag<"-mno-3dnowa">, Group<m_x86_Features_Group>;
+def mno_3dnow : Flag<"-mno-3dnow">, Group<m_x86_Features_Group>;
+def mno_constant_cfstrings : Flag<"-mno-constant-cfstrings">, Group<m_Group>;
+def mno_global_merge : Flag<"-mno-global-merge">, Group<m_Group>;
+def mno_mmx : Flag<"-mno-mmx">, Group<m_x86_Features_Group>;
+def mno_pascal_strings : Flag<"-mno-pascal-strings">, Group<m_Group>;
+def mno_red_zone : Flag<"-mno-red-zone">, Group<m_Group>;
+def mno_relax_all : Flag<"-mno-relax-all">, Group<m_Group>;
+def mno_rtd: Flag<"-mno-rtd">, Group<m_Group>;
+def mno_soft_float : Flag<"-mno-soft-float">, Group<m_Group>;
+def mno_stackrealign : Flag<"-mno-stackrealign">, Group<m_Group>;
+def mno_sse2 : Flag<"-mno-sse2">, Group<m_x86_Features_Group>;
+def mno_sse3 : Flag<"-mno-sse3">, Group<m_x86_Features_Group>;
+def mno_sse4a : Flag<"-mno-sse4a">, Group<m_x86_Features_Group>;
+def mno_sse4 : Flag<"-mno-sse4">, Group<m_x86_Features_Group>;
+def mno_sse4_1 : Flag<"-mno-sse4.1">, Group<m_x86_Features_Group>;
+def mno_sse4_2 : Flag<"-mno-sse4.2">, Group<m_x86_Features_Group>;
+def mno_sse : Flag<"-mno-sse">, Group<m_x86_Features_Group>;
+def mno_ssse3 : Flag<"-mno-ssse3">, Group<m_x86_Features_Group>;
+def mno_aes : Flag<"-mno-aes">, Group<m_x86_Features_Group>;
+def mno_avx : Flag<"-mno-avx">, Group<m_x86_Features_Group>;
+def mno_avx2 : Flag<"-mno-avx2">, Group<m_x86_Features_Group>;
+def mno_lzcnt : Flag<"-mno-lzcnt">, Group<m_x86_Features_Group>;
+def mno_bmi : Flag<"-mno-bmi">, Group<m_x86_Features_Group>;
+def mno_bmi2 : Flag<"-mno-bmi2">, Group<m_x86_Features_Group>;
+def mno_popcnt : Flag<"-mno-popcnt">, Group<m_x86_Features_Group>;
+def mno_fma4 : Flag<"-mno-fma4">, Group<m_x86_Features_Group>;
+
+def mno_thumb : Flag<"-mno-thumb">, Group<m_Group>;
+def marm : Flag<"-marm">, Alias<mno_thumb>;
+
+def mno_warn_nonportable_cfstrings : Flag<"-mno-warn-nonportable-cfstrings">, Group<m_Group>;
+def mno_omit_leaf_frame_pointer : Flag<"-mno-omit-leaf-frame-pointer">, Group<f_Group>;
+def momit_leaf_frame_pointer : Flag<"-momit-leaf-frame-pointer">, Group<f_Group>;
+def mpascal_strings : Flag<"-mpascal-strings">, Group<m_Group>;
+def mred_zone : Flag<"-mred-zone">, Group<m_Group>;
+def mregparm_EQ : Joined<"-mregparm=">, Group<m_Group>;
+def mrelax_all : Flag<"-mrelax-all">, Group<m_Group>;
+def mrtd: Flag<"-mrtd">, Group<m_Group>;
+def msmall_data_threshold_EQ : Joined <"-msmall-data-threshold=">, Group<m_Group>;
+def msoft_float : Flag<"-msoft-float">, Group<m_Group>;
+def msse2 : Flag<"-msse2">, Group<m_x86_Features_Group>;
+def msse3 : Flag<"-msse3">, Group<m_x86_Features_Group>;
+def msse4a : Flag<"-msse4a">, Group<m_x86_Features_Group>;
+def msse4 : Flag<"-msse4">, Group<m_x86_Features_Group>;
+def msse4_1 : Flag<"-msse4.1">, Group<m_x86_Features_Group>;
+def msse4_2 : Flag<"-msse4.2">, Group<m_x86_Features_Group>;
+def msse : Flag<"-msse">, Group<m_x86_Features_Group>;
+def mssse3 : Flag<"-mssse3">, Group<m_x86_Features_Group>;
+def maes : Flag<"-maes">, Group<m_x86_Features_Group>;
+def mavx : Flag<"-mavx">, Group<m_x86_Features_Group>;
+def mavx2 : Flag<"-mavx2">, Group<m_x86_Features_Group>;
+def mlzcnt : Flag<"-mlzcnt">, Group<m_x86_Features_Group>;
+def mbmi : Flag<"-mbmi">, Group<m_x86_Features_Group>;
+def mbmi2 : Flag<"-mbmi2">, Group<m_x86_Features_Group>;
+def mpopcnt : Flag<"-mpopcnt">, Group<m_x86_Features_Group>;
+def mfma4 : Flag<"-mfma4">, Group<m_x86_Features_Group>;
+def mthumb : Flag<"-mthumb">, Group<m_Group>;
+def mtune_EQ : Joined<"-mtune=">, Group<m_Group>;
+def multi__module : Flag<"-multi_module">;
+def multiply__defined__unused : Separate<"-multiply_defined_unused">;
+def multiply__defined : Separate<"-multiply_defined">;
+def mwarn_nonportable_cfstrings : Flag<"-mwarn-nonportable-cfstrings">, Group<m_Group>;
+def m_Separate : Separate<"-m">, Group<m_Group>;
+def m_Joined : Joined<"-m">, Group<m_Group>;
+def no_canonical_prefixes : Flag<"-no-canonical-prefixes">, Flags<[HelpHidden]>,
+ HelpText<"Use relative instead of canonical paths">;
+def no_cpp_precomp : Flag<"-no-cpp-precomp">, Group<clang_ignored_f_Group>;
+def no_integrated_as : Flag<"-no-integrated-as">, Flags<[DriverOption]>;
+def no_integrated_cpp : Flag<"-no-integrated-cpp">, Flags<[DriverOption]>;
+def no__dead__strip__inits__and__terms : Flag<"-no_dead_strip_inits_and_terms">;
+def nobuiltininc : Flag<"-nobuiltininc">;
+def nodefaultlibs : Flag<"-nodefaultlibs">;
+def nofixprebinding : Flag<"-nofixprebinding">;
+def nolibc : Flag<"-nolibc">;
+def nomultidefs : Flag<"-nomultidefs">;
+def noprebind : Flag<"-noprebind">;
+def noseglinkedit : Flag<"-noseglinkedit">;
+def nostartfiles : Flag<"-nostartfiles">;
+def nostdinc : Flag<"-nostdinc">;
+def nostdlibinc : Flag<"-nostdlibinc">;
+def nostdincxx : Flag<"-nostdinc++">;
+def nostdlib : Flag<"-nostdlib">;
+def object : Flag<"-object">;
+def o : JoinedOrSeparate<"-o">, Flags<[DriverOption, RenderAsInput]>,
+ HelpText<"Write output to <file>">, MetaVarName<"<file>">;
+def pagezero__size : JoinedOrSeparate<"-pagezero_size">;
+def pass_exit_codes : Flag<"-pass-exit-codes">, Flags<[Unsupported]>;
+def pedantic_errors : Flag<"-pedantic-errors">, Group<pedantic_Group>;
+def pedantic : Flag<"-pedantic">, Group<pedantic_Group>;
+def pg : Flag<"-pg">;
+def pipe : Flag<"-pipe">,
+ HelpText<"Use pipes between commands, when possible">;
+def prebind__all__twolevel__modules : Flag<"-prebind_all_twolevel_modules">;
+def prebind : Flag<"-prebind">;
+def preload : Flag<"-preload">;
+def print_file_name_EQ : Joined<"-print-file-name=">,
+ HelpText<"Print the full library path of <file>">, MetaVarName<"<file>">;
+def print_ivar_layout : Flag<"-print-ivar-layout">;
+def print_libgcc_file_name : Flag<"-print-libgcc-file-name">,
+ HelpText<"Print the library path for \"libgcc.a\"">;
+def print_multi_directory : Flag<"-print-multi-directory">;
+def print_multi_lib : Flag<"-print-multi-lib">;
+def print_multi_os_directory : Flag<"-print-multi-os-directory">;
+def print_prog_name_EQ : Joined<"-print-prog-name=">,
+ HelpText<"Print the full program path of <name>">, MetaVarName<"<name>">;
+def print_search_dirs : Flag<"-print-search-dirs">,
+ HelpText<"Print the paths used for finding libraries and programs">;
+def private__bundle : Flag<"-private_bundle">;
+def pthreads : Flag<"-pthreads">;
+def pthread : Flag<"-pthread">;
+def p : Flag<"-p">;
+def pie : Flag<"-pie">;
+def read__only__relocs : Separate<"-read_only_relocs">;
+def remap : Flag<"-remap">;
+def rewrite_objc : Flag<"-rewrite-objc">, Flags<[DriverOption]>,
+ HelpText<"Rewrite Objective-C source to C++">;
+def rewrite_legacy_objc : Flag<"-rewrite-legacy-objc">, Flags<[DriverOption]>,
+ HelpText<"Rewrite Legacy Objective-C source to C++">;
+def rdynamic : Flag<"-rdynamic">;
+def rpath : Separate<"-rpath">, Flags<[LinkerInput]>;
+def rtlib_EQ : Joined<"-rtlib=">;
+def r : Flag<"-r">;
+def save_temps : Flag<"-save-temps">, Flags<[DriverOption]>,
+ HelpText<"Save intermediate compilation results">;
+def sectalign : MultiArg<"-sectalign", 3>;
+def sectcreate : MultiArg<"-sectcreate", 3>;
+def sectobjectsymbols : MultiArg<"-sectobjectsymbols", 2>;
+def sectorder : MultiArg<"-sectorder", 3>;
+def seg1addr : JoinedOrSeparate<"-seg1addr">;
+def seg__addr__table__filename : Separate<"-seg_addr_table_filename">;
+def seg__addr__table : Separate<"-seg_addr_table">;
+def segaddr : MultiArg<"-segaddr", 2>;
+def segcreate : MultiArg<"-segcreate", 3>;
+def seglinkedit : Flag<"-seglinkedit">;
+def segprot : MultiArg<"-segprot", 3>;
+def segs__read__only__addr : Separate<"-segs_read_only_addr">;
+def segs__read__write__addr : Separate<"-segs_read_write_addr">;
+def segs__read__ : Joined<"-segs_read_">;
+def shared_libgcc : Flag<"-shared-libgcc">;
+def shared : Flag<"-shared">;
+def single__module : Flag<"-single_module">;
+def specs_EQ : Joined<"-specs=">;
+def specs : Separate<"-specs">, Flags<[Unsupported]>;
+def static_libgcc : Flag<"-static-libgcc">;
+def static_libstdcxx : Flag<"-static-libstdc++">;
+def static : Flag<"-static">, Flags<[NoArgumentUnused]>;
+def std_default_EQ : Joined<"-std-default=">;
+def std_EQ : Joined<"-std=">, Group<L_Group>;
+def stdlib_EQ : Joined<"-stdlib=">;
+def sub__library : JoinedOrSeparate<"-sub_library">;
+def sub__umbrella : JoinedOrSeparate<"-sub_umbrella">;
+def s : Flag<"-s">;
+def target : Separate<"-target">, Flags<[DriverOption]>,
+ HelpText<"Generate code for the given target">;
+def gcc_toolchain : Separate<"-gcc-toolchain">, Flags<[DriverOption]>,
+ HelpText<"Use the gcc toolchain at the given directory">;
+// We should deprecate the use of -ccc-host-triple, and then remove.
+def ccc_host_triple : Separate<"-ccc-host-triple">, Alias<target>;
+def time : Flag<"-time">,
+ HelpText<"Time individual commands">;
+def traditional_cpp : Flag<"-traditional-cpp">;
+def traditional : Flag<"-traditional">;
+def trigraphs : Flag<"-trigraphs">;
+def twolevel__namespace__hints : Flag<"-twolevel_namespace_hints">;
+def twolevel__namespace : Flag<"-twolevel_namespace">;
+def t : Flag<"-t">;
+def umbrella : Separate<"-umbrella">;
+def undefined : JoinedOrSeparate<"-undefined">, Group<u_Group>;
+def undef : Flag<"-undef">, Group<u_Group>;
+def unexported__symbols__list : Separate<"-unexported_symbols_list">;
+def u : JoinedOrSeparate<"-u">, Group<u_Group>;
+def use_gold_plugin : Flag<"-use-gold-plugin">;
+def v : Flag<"-v">,
+ HelpText<"Show commands to run and use verbose output">;
+def verify : Flag<"-verify">, Flags<[DriverOption]>,
+ HelpText<"Verify output using a verifier.">;
+def weak_l : Joined<"-weak-l">, Flags<[LinkerInput]>;
+def weak__framework : Separate<"-weak_framework">, Flags<[LinkerInput]>;
+def weak__library : Separate<"-weak_library">, Flags<[LinkerInput]>;
+def weak__reference__mismatches : Separate<"-weak_reference_mismatches">;
+def whatsloaded : Flag<"-whatsloaded">;
+def whyload : Flag<"-whyload">;
+def w : Flag<"-w">;
+def x : JoinedOrSeparate<"-x">, Flags<[DriverOption]>,
+ HelpText<"Treat subsequent input files as having type <language>">,
+ MetaVarName<"<language>">;
+def y : Joined<"-y">;
+
+def working_directory : Separate<"-working-directory">,
+ HelpText<"Resolve file paths relative to the specified directory">;
+def working_directory_EQ : Joined<"-working-directory=">,
+ Alias<working_directory>;
+
+// Double dash options, which are usually an alias for one of the previous
+// options.
+
+def _CLASSPATH_EQ : Joined<"--CLASSPATH=">, Alias<fclasspath_EQ>;
+def _CLASSPATH : Separate<"--CLASSPATH">, Alias<fclasspath_EQ>;
+def _all_warnings : Flag<"--all-warnings">, Alias<Wall>;
+def _analyze_auto : Flag<"--analyze-auto">, Flags<[DriverOption]>;
+def _analyzer_no_default_checks : Flag<"--analyzer-no-default-checks">, Flags<[DriverOption]>;
+def _analyzer_output : JoinedOrSeparate<"--analyzer-output">, Flags<[DriverOption]>;
+def _analyze : Flag<"--analyze">, Flags<[DriverOption]>,
+ HelpText<"Run the static analyzer">;
+def _ansi : Flag<"--ansi">, Alias<ansi>;
+def _assemble : Flag<"--assemble">, Alias<S>;
+def _assert_EQ : Joined<"--assert=">, Alias<A>;
+def _assert : Separate<"--assert">, Alias<A>;
+def _bootclasspath_EQ : Joined<"--bootclasspath=">, Alias<fbootclasspath_EQ>;
+def _bootclasspath : Separate<"--bootclasspath">, Alias<fbootclasspath_EQ>;
+def _classpath_EQ : Joined<"--classpath=">, Alias<fclasspath_EQ>;
+def _classpath : Separate<"--classpath">, Alias<fclasspath_EQ>;
+def _combine : Flag<"--combine">, Alias<combine>;
+def _comments_in_macros : Flag<"--comments-in-macros">, Alias<CC>;
+def _comments : Flag<"--comments">, Alias<C>;
+def _compile : Flag<"--compile">, Alias<c>;
+def _constant_cfstrings : Flag<"--constant-cfstrings">;
+def _coverage : Flag<"--coverage">, Alias<coverage>;
+def _debug_EQ : Joined<"--debug=">, Alias<g_Flag>;
+def _debug : Flag<"--debug">, Alias<g_Flag>;
+def _define_macro_EQ : Joined<"--define-macro=">, Alias<D>;
+def _define_macro : Separate<"--define-macro">, Alias<D>;
+def _dependencies : Flag<"--dependencies">, Alias<M>;
+def _encoding_EQ : Joined<"--encoding=">, Alias<fencoding_EQ>;
+def _encoding : Separate<"--encoding">, Alias<fencoding_EQ>;
+def _entry : Flag<"--entry">, Alias<e>;
+def _extdirs_EQ : Joined<"--extdirs=">, Alias<fextdirs_EQ>;
+def _extdirs : Separate<"--extdirs">, Alias<fextdirs_EQ>;
+def _extra_warnings : Flag<"--extra-warnings">, Alias<W_Joined>;
+def _for_linker_EQ : Joined<"--for-linker=">, Alias<Xlinker>;
+def _for_linker : Separate<"--for-linker">, Alias<Xlinker>;
+def _force_link_EQ : Joined<"--force-link=">, Alias<u>;
+def _force_link : Separate<"--force-link">, Alias<u>;
+def _help_hidden : Flag<"--help-hidden">;
+def _help : Flag<"--help">,
+ HelpText<"Display available options">;
+def _imacros_EQ : Joined<"--imacros=">, Alias<imacros>;
+def _imacros : Separate<"--imacros">, Alias<imacros>;
+def _include_barrier : Flag<"--include-barrier">, Alias<I_>;
+def _include_directory_after_EQ : Joined<"--include-directory-after=">, Alias<idirafter>;
+def _include_directory_after : Separate<"--include-directory-after">, Alias<idirafter>;
+def _include_directory_EQ : Joined<"--include-directory=">, Alias<I>;
+def _include_directory : Separate<"--include-directory">, Alias<I>;
+def _include_prefix_EQ : Joined<"--include-prefix=">, Alias<iprefix>;
+def _include_prefix : Separate<"--include-prefix">, Alias<iprefix>;
+def _include_with_prefix_after_EQ : Joined<"--include-with-prefix-after=">, Alias<iwithprefix>;
+def _include_with_prefix_after : Separate<"--include-with-prefix-after">, Alias<iwithprefix>;
+def _include_with_prefix_before_EQ : Joined<"--include-with-prefix-before=">, Alias<iwithprefixbefore>;
+def _include_with_prefix_before : Separate<"--include-with-prefix-before">, Alias<iwithprefixbefore>;
+def _include_with_prefix_EQ : Joined<"--include-with-prefix=">, Alias<iwithprefix>;
+def _include_with_prefix : Separate<"--include-with-prefix">, Alias<iwithprefix>;
+def _include_EQ : Joined<"--include=">, Alias<include_>;
+def _include : Separate<"--include">, Alias<include_>;
+def _language_EQ : Joined<"--language=">, Alias<x>;
+def _language : Separate<"--language">, Alias<x>;
+def _library_directory_EQ : Joined<"--library-directory=">, Alias<L>;
+def _library_directory : Separate<"--library-directory">, Alias<L>;
+def _machine__EQ : Joined<"--machine-=">, Alias<m_Joined>;
+def _machine_ : Joined<"--machine-">, Alias<m_Joined>;
+def _machine_EQ : Joined<"--machine=">, Alias<m_Joined>;
+def _machine : Separate<"--machine">, Alias<m_Joined>;
+def _no_integrated_cpp : Flag<"--no-integrated-cpp">, Alias<no_integrated_cpp>;
+def _no_line_commands : Flag<"--no-line-commands">, Alias<P>;
+def _no_standard_includes : Flag<"--no-standard-includes">, Alias<nostdinc>;
+def _no_standard_libraries : Flag<"--no-standard-libraries">, Alias<nostdlib>;
+def _no_undefined : Flag<"--no-undefined">, Flags<[LinkerInput]>;
+def _no_warnings : Flag<"--no-warnings">, Alias<w>;
+def _optimize_EQ : Joined<"--optimize=">, Alias<O>;
+def _optimize : Flag<"--optimize">, Alias<O>;
+def _output_class_directory_EQ : Joined<"--output-class-directory=">, Alias<foutput_class_dir_EQ>;
+def _output_class_directory : Separate<"--output-class-directory">, Alias<foutput_class_dir_EQ>;
+def _output_EQ : Joined<"--output=">, Alias<o>;
+def _output : Separate<"--output">, Alias<o>;
+def _param : Separate<"--param">;
+def _param_EQ : Joined<"--param=">, Alias<_param>;
+def _pass_exit_codes : Flag<"--pass-exit-codes">, Alias<pass_exit_codes>;
+def _pedantic_errors : Flag<"--pedantic-errors">, Alias<pedantic_errors>;
+def _pedantic : Flag<"--pedantic">, Alias<pedantic>;
+def _pipe : Flag<"--pipe">, Alias<pipe>;
+def _prefix_EQ : Joined<"--prefix=">, Alias<B>;
+def _prefix : Separate<"--prefix">, Alias<B>;
+def _preprocess : Flag<"--preprocess">, Alias<E>;
+def _print_diagnostic_categories : Flag<"--print-diagnostic-categories">;
+def _print_file_name_EQ : Joined<"--print-file-name=">, Alias<print_file_name_EQ>;
+def _print_file_name : Separate<"--print-file-name">, Alias<print_file_name_EQ>;
+def _print_libgcc_file_name : Flag<"--print-libgcc-file-name">, Alias<print_libgcc_file_name>;
+def _print_missing_file_dependencies : Flag<"--print-missing-file-dependencies">, Alias<MG>;
+def _print_multi_directory : Flag<"--print-multi-directory">, Alias<print_multi_directory>;
+def _print_multi_lib : Flag<"--print-multi-lib">, Alias<print_multi_lib>;
+def _print_multi_os_directory : Flag<"--print-multi-os-directory">, Alias<print_multi_os_directory>;
+def _print_prog_name_EQ : Joined<"--print-prog-name=">, Alias<print_prog_name_EQ>;
+def _print_prog_name : Separate<"--print-prog-name">, Alias<print_prog_name_EQ>;
+def _print_search_dirs : Flag<"--print-search-dirs">, Alias<print_search_dirs>;
+def _profile_blocks : Flag<"--profile-blocks">, Alias<a>;
+def _profile : Flag<"--profile">, Alias<p>;
+def _relocatable_pch : Flag<"--relocatable-pch">,
+ HelpText<"Build a relocatable precompiled header">;
+def _resource_EQ : Joined<"--resource=">, Alias<fcompile_resource_EQ>;
+def _resource : Separate<"--resource">, Alias<fcompile_resource_EQ>;
+def _rtlib_EQ : Joined<"--rtlib=">, Alias<rtlib_EQ>;
+def _rtlib : Separate<"--rtlib">, Alias<rtlib_EQ>;
+def _save_temps : Flag<"--save-temps">, Alias<save_temps>;
+def _serialize_diags : Separate<"--serialize-diagnostics">, Flags<[DriverOption]>,
+ HelpText<"Serialize compiler diagnostics to a file">;
+def _shared : Flag<"--shared">, Alias<shared>;
+def _signed_char : Flag<"--signed-char">, Alias<fsigned_char>;
+def _specs_EQ : Joined<"--specs=">, Alias<specs_EQ>;
+def _specs : Separate<"--specs">, Alias<specs_EQ>;
+def _static : Flag<"--static">, Alias<static>;
+def _std_EQ : Joined<"--std=">, Alias<std_EQ>;
+def _std : Separate<"--std">, Alias<std_EQ>;
+def _stdlib_EQ : Joined<"--stdlib=">, Alias<stdlib_EQ>;
+def _stdlib : Separate<"--stdlib">, Alias<stdlib_EQ>;
+def _sysroot_EQ : Joined<"--sysroot=">;
+def _sysroot : Separate<"--sysroot">, Alias<_sysroot_EQ>;
+def _target_help : Flag<"--target-help">;
+def _trace_includes : Flag<"--trace-includes">, Alias<H>;
+def _traditional_cpp : Flag<"--traditional-cpp">, Alias<traditional_cpp>;
+def _traditional : Flag<"--traditional">, Alias<traditional>;
+def _trigraphs : Flag<"--trigraphs">, Alias<trigraphs>;
+def _undefine_macro_EQ : Joined<"--undefine-macro=">, Alias<U>;
+def _undefine_macro : Separate<"--undefine-macro">, Alias<U>;
+def _unsigned_char : Flag<"--unsigned-char">, Alias<funsigned_char>;
+def _user_dependencies : Flag<"--user-dependencies">, Alias<MM>;
+def _verbose : Flag<"--verbose">, Alias<v>;
+def _version : Flag<"--version">;
+def _warn__EQ : Joined<"--warn-=">, Alias<W_Joined>;
+def _warn_ : Joined<"--warn-">, Alias<W_Joined>;
+def _write_dependencies : Flag<"--write-dependencies">, Alias<MD>;
+def _write_user_dependencies : Flag<"--write-user-dependencies">, Alias<MMD>;
+def _ : Joined<"--">, Flags<[Unsupported]>;
+
+// Special internal option to handle -Xlinker --no-demangle.
+def Z_Xlinker__no_demangle : Flag<"-Z-Xlinker-no-demangle">,
+ Flags<[Unsupported, NoArgumentUnused]>;
+
+// Special internal option to allow forwarding arbitrary arguments to linker.
+def Zlinker_input : Separate<"-Zlinker-input">,
+ Flags<[Unsupported, NoArgumentUnused]>;
+
+// Reserved library options.
+def Z_reserved_lib_stdcxx : Flag<"-Z-reserved-lib-stdc++">,
+ Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
+def Z_reserved_lib_cckext : Flag<"-Z-reserved-lib-cckext">,
+ Flags<[LinkerInput, NoArgumentUnused, Unsupported]>, Group<reserved_lib_Group>;
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Phases.h b/contrib/llvm/tools/clang/include/clang/Driver/Phases.h
new file mode 100644
index 0000000..a0c42ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Phases.h
@@ -0,0 +1,32 @@
+//===--- Phases.h - Transformations on Driver Types -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_PHASES_H_
+#define CLANG_DRIVER_PHASES_H_
+
+namespace clang {
+namespace driver {
+namespace phases {
+ /// ID - Ordered values for successive stages in the
+ /// compilation process which interact with user options.
+ enum ID {
+ Preprocess,
+ Precompile,
+ Compile,
+ Assemble,
+ Link
+ };
+
+ const char *getPhaseName(ID Id);
+
+} // end namespace phases
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Tool.h b/contrib/llvm/tools/clang/include/clang/Driver/Tool.h
new file mode 100644
index 0000000..8822d7b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Tool.h
@@ -0,0 +1,75 @@
+//===--- Tool.h - Compilation Tools -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_TOOL_H_
+#define CLANG_DRIVER_TOOL_H_
+
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+namespace driver {
+ class ArgList;
+ class Compilation;
+ class InputInfo;
+ class Job;
+ class JobAction;
+ class ToolChain;
+
+ typedef SmallVector<InputInfo, 4> InputInfoList;
+
+/// Tool - Information on a specific compilation tool.
+class Tool {
+ /// The tool name (for debugging).
+ const char *Name;
+
+ /// The human readable name for the tool, for use in diagnostics.
+ const char *ShortName;
+
+ /// The tool chain this tool is a part of.
+ const ToolChain &TheToolChain;
+
+public:
+ Tool(const char *Name, const char *ShortName,
+ const ToolChain &TC);
+
+public:
+ virtual ~Tool();
+
+ const char *getName() const { return Name; }
+
+ const char *getShortName() const { return ShortName; }
+
+ const ToolChain &getToolChain() const { return TheToolChain; }
+
+ virtual bool hasIntegratedAssembler() const { return false; }
+ virtual bool hasIntegratedCPP() const = 0;
+ virtual bool isLinkJob() const { return false; }
+
+ /// \brief Does this tool have "good" standardized diagnostics, or should the
+ /// driver add an additional "command failed" diagnostic on failures.
+ virtual bool hasGoodDiagnostics() const { return false; }
+
+ /// ConstructJob - Construct jobs to perform the action \arg JA,
+ /// writing to \arg Output and with \arg Inputs.
+ ///
+ /// \param TCArgs - The argument list for this toolchain, with any
+ /// tool chain specific translations applied.
+ /// \param LinkingOutput - If this output will eventually feed the
+ /// linker, then this is the final output name of the linked image.
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const = 0;
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
new file mode 100644
index 0000000..c35cf67
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/ToolChain.h
@@ -0,0 +1,257 @@
+//===--- ToolChain.h - Collections of tools for one platform ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_TOOLCHAIN_H_
+#define CLANG_DRIVER_TOOLCHAIN_H_
+
+#include "clang/Driver/Util.h"
+#include "clang/Driver/Types.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Path.h"
+#include <string>
+
+namespace clang {
+namespace driver {
+ class ArgList;
+ class Compilation;
+ class DerivedArgList;
+ class Driver;
+ class InputArgList;
+ class JobAction;
+ class ObjCRuntime;
+ class Tool;
+
+/// ToolChain - Access to tools for a single platform.
+class ToolChain {
+public:
+ typedef SmallVector<std::string, 4> path_list;
+
+ enum CXXStdlibType {
+ CST_Libcxx,
+ CST_Libstdcxx
+ };
+
+ enum RuntimeLibType {
+ RLT_CompilerRT,
+ RLT_Libgcc
+ };
+
+private:
+ const Driver &D;
+ const llvm::Triple Triple;
+
+ /// The list of toolchain specific path prefixes to search for
+ /// files.
+ path_list FilePaths;
+
+ /// The list of toolchain specific path prefixes to search for
+ /// programs.
+ path_list ProgramPaths;
+
+protected:
+ ToolChain(const Driver &D, const llvm::Triple &T);
+
+ /// \name Utilities for implementing subclasses.
+ ///@{
+ static void addSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path);
+ static void addExternCSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path);
+ static void addSystemIncludes(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ ArrayRef<StringRef> Paths);
+ ///@}
+
+public:
+ virtual ~ToolChain();
+
+ // Accessors
+
+ const Driver &getDriver() const;
+ const llvm::Triple &getTriple() const { return Triple; }
+
+ llvm::Triple::ArchType getArch() const { return Triple.getArch(); }
+ StringRef getArchName() const { return Triple.getArchName(); }
+ StringRef getPlatform() const { return Triple.getVendorName(); }
+ StringRef getOS() const { return Triple.getOSName(); }
+
+ std::string getTripleString() const {
+ return Triple.getTriple();
+ }
+
+ path_list &getFilePaths() { return FilePaths; }
+ const path_list &getFilePaths() const { return FilePaths; }
+
+ path_list &getProgramPaths() { return ProgramPaths; }
+ const path_list &getProgramPaths() const { return ProgramPaths; }
+
+ // Tool access.
+
+ /// TranslateArgs - Create a new derived argument list for any argument
+ /// translations this ToolChain may wish to perform, or 0 if no tool chain
+ /// specific translations are needed.
+ ///
+ /// \param BoundArch - The bound architecture name, or 0.
+ virtual DerivedArgList *TranslateArgs(const DerivedArgList &Args,
+ const char *BoundArch) const {
+ return 0;
+ }
+
+ /// SelectTool - Choose a tool to use to handle the action \arg JA with the
+ /// given \arg Inputs.
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const = 0;
+
+ // Helper methods
+
+ std::string GetFilePath(const char *Name) const;
+ std::string GetProgramPath(const char *Name, bool WantFile = false) const;
+
+ // Platform defaults information
+
+ /// HasNativeLTOLinker - Check whether the linker and related tools have
+ /// native LLVM support.
+ virtual bool HasNativeLLVMSupport() const;
+
+ /// LookupTypeForExtension - Return the default language type to use for the
+ /// given extension.
+ virtual types::ID LookupTypeForExtension(const char *Ext) const;
+
+ /// IsBlocksDefault - Does this tool chain enable -fblocks by default.
+ virtual bool IsBlocksDefault() const { return false; }
+
+ /// IsIntegratedAssemblerDefault - Does this tool chain enable -integrated-as
+ /// by default.
+ virtual bool IsIntegratedAssemblerDefault() const { return false; }
+
+ /// IsStrictAliasingDefault - Does this tool chain use -fstrict-aliasing by
+ /// default.
+ virtual bool IsStrictAliasingDefault() const { return true; }
+
+ /// IsObjCDefaultSynthPropertiesDefault - Does this tool chain enable
+ /// -fobjc-default-synthesize-properties by default.
+ virtual bool IsObjCDefaultSynthPropertiesDefault() const { return false; }
+
+ /// IsObjCNonFragileABIDefault - Does this tool chain set
+ /// -fobjc-nonfragile-abi by default.
+ virtual bool IsObjCNonFragileABIDefault() const { return false; }
+
+ /// IsObjCLegacyDispatchDefault - Does this tool chain set
+ /// -fobjc-legacy-dispatch by default (this is only used with the non-fragile
+ /// ABI).
+ virtual bool IsObjCLegacyDispatchDefault() const { return true; }
+
+ /// UseObjCMixedDispatchDefault - When using non-legacy dispatch, should the
+ /// mixed dispatch method be used?
+ virtual bool UseObjCMixedDispatch() const { return false; }
+
+ /// GetDefaultStackProtectorLevel - Get the default stack protector level for
+ /// this tool chain (0=off, 1=on, 2=all).
+ virtual unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const {
+ return 0;
+ }
+
+ /// GetDefaultRuntimeLibType - Get the default runtime library variant to use.
+ virtual RuntimeLibType GetDefaultRuntimeLibType() const {
+ return ToolChain::RLT_Libgcc;
+ }
+
+ /// IsUnwindTablesDefault - Does this tool chain use -funwind-tables
+ /// by default.
+ virtual bool IsUnwindTablesDefault() const = 0;
+
+ /// GetDefaultRelocationModel - Return the LLVM name of the default
+ /// relocation model for this tool chain.
+ virtual const char *GetDefaultRelocationModel() const = 0;
+
+ /// GetForcedPicModel - Return the LLVM name of the forced PIC model
+ /// for this tool chain, or 0 if this tool chain does not force a
+ /// particular PIC mode.
+ virtual const char *GetForcedPicModel() const = 0;
+
+ /// SupportsProfiling - Does this tool chain support -pg.
+ virtual bool SupportsProfiling() const { return true; }
+
+ /// Does this tool chain support Objective-C garbage collection.
+ virtual bool SupportsObjCGC() const { return true; }
+
+ /// Does this tool chain support Objective-C ARC.
+ virtual bool SupportsObjCARC() const { return true; }
+
+ /// UseDwarfDebugFlags - Embed the compile options to clang into the Dwarf
+ /// compile unit information.
+ virtual bool UseDwarfDebugFlags() const { return false; }
+
+ /// UseSjLjExceptions - Does this tool chain use SjLj exceptions.
+ virtual bool UseSjLjExceptions() const { return false; }
+
+ /// ComputeLLVMTriple - Return the LLVM target triple to use, after taking
+ /// command line arguments into account.
+ virtual std::string ComputeLLVMTriple(const ArgList &Args,
+ types::ID InputType = types::TY_INVALID) const;
+
+ /// ComputeEffectiveClangTriple - Return the Clang triple to use for this
+ /// target, which may take into account the command line arguments. For
+ /// example, on Darwin the -mmacosx-version-min= command line argument (which
+ /// sets the deployment target) determines the version in the triple passed to
+ /// Clang.
+ virtual std::string ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType = types::TY_INVALID) const;
+
+ /// configureObjCRuntime - Configure the known properties of the
+ /// Objective-C runtime for this platform.
+ ///
+ /// FIXME: this really belongs on some sort of DeploymentTarget abstraction
+ virtual void configureObjCRuntime(ObjCRuntime &runtime) const;
+
+ /// hasBlocksRuntime - Given that the user is compiling with
+ /// -fblocks, does this tool chain guarantee the existence of a
+ /// blocks runtime?
+ ///
+ /// FIXME: this really belongs on some sort of DeploymentTarget abstraction
+ virtual bool hasBlocksRuntime() const { return true; }
+
+ /// \brief Add the clang cc1 arguments for system include paths.
+ ///
+ /// This routine is responsible for adding the necessary cc1 arguments to
+ /// include headers from standard system header directories.
+ virtual void AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const;
+
+ // GetRuntimeLibType - Determine the runtime library type to use with the
+ // given compilation arguments.
+ virtual RuntimeLibType GetRuntimeLibType(const ArgList &Args) const;
+
+ // GetCXXStdlibType - Determine the C++ standard library type to use with the
+ // given compilation arguments.
+ virtual CXXStdlibType GetCXXStdlibType(const ArgList &Args) const;
+
+ /// AddClangCXXStdlibIncludeArgs - Add the clang -cc1 level arguments to set
+ /// the include paths to use for the given C++ standard library type.
+ virtual void AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const;
+
+ /// AddCXXStdlibLibArgs - Add the system specific linker arguments to use
+ /// for the given C++ standard library type.
+ virtual void AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
+ /// AddCCKextLibArgs - Add the system specific linker arguments to use
+ /// for kernel extensions (Darwin-specific).
+ virtual void AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.def b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
new file mode 100644
index 0000000..b107dfb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.def
@@ -0,0 +1,93 @@
+//===--- Types.def - Driver Type info ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the driver type information. Users of this file
+// must define the TYPE macro to make use of this information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TYPE
+#error "Define TYPE prior to including this file!"
+#endif
+
+// TYPE(NAME, ID, PP_TYPE, TEMP_SUFFIX, FLAGS)
+
+// The first value is the type name as a string; for types which can
+// be user specified this should be the equivalent -x option.
+
+// The second value is the type id, which will result in a
+// clang::driver::types::TY_XX enum constant.
+
+// The third value is that id of the type for preprocessed inputs of
+// this type, or INVALID if this type is not preprocessed.
+
+// The fourth value is the suffix to use when creating temporary files
+// of this type, or null if unspecified.
+
+// The fifth value is a string containt option flags. Valid values:
+// a - The type should only be assembled.
+// p - The type should only be precompiled.
+// u - The type can be user specified (with -x).
+// A - The type's temporary suffix should be appended when generating
+// outputs of this type.
+
+
+// C family source language (with and without preprocessing).
+TYPE("cpp-output", PP_C, INVALID, "i", "u")
+TYPE("c", C, PP_C, 0, "u")
+TYPE("cl", CL, PP_C, 0, "u")
+TYPE("cuda", CUDA, PP_CXX, 0, "u")
+TYPE("objective-c-cpp-output", PP_ObjC, INVALID, "mi", "u")
+TYPE("objc-cpp-output", PP_ObjC_Alias, INVALID, "mi", "u")
+TYPE("objective-c", ObjC, PP_ObjC, 0, "u")
+TYPE("c++-cpp-output", PP_CXX, INVALID, "ii", "u")
+TYPE("c++", CXX, PP_CXX, 0, "u")
+TYPE("objective-c++-cpp-output", PP_ObjCXX, INVALID, "mii", "u")
+TYPE("objc++-cpp-output", PP_ObjCXX_Alias, INVALID, "mii", "u")
+TYPE("objective-c++", ObjCXX, PP_ObjCXX, 0, "u")
+
+// C family input files to precompile.
+TYPE("c-header-cpp-output", PP_CHeader, INVALID, "i", "p")
+TYPE("c-header", CHeader, PP_CHeader, 0, "pu")
+TYPE("cl-header", CLHeader, PP_CHeader, 0, "pu")
+TYPE("objective-c-header-cpp-output", PP_ObjCHeader, INVALID, "mi", "p")
+TYPE("objective-c-header", ObjCHeader, PP_ObjCHeader, 0, "pu")
+TYPE("c++-header-cpp-output", PP_CXXHeader, INVALID, "ii", "p")
+TYPE("c++-header", CXXHeader, PP_CXXHeader, 0, "pu")
+TYPE("objective-c++-header-cpp-output", PP_ObjCXXHeader, INVALID, "mii", "p")
+TYPE("objective-c++-header", ObjCXXHeader, PP_ObjCXXHeader, 0, "pu")
+
+// Other languages.
+TYPE("ada", Ada, INVALID, 0, "u")
+TYPE("assembler", PP_Asm, INVALID, "s", "au")
+TYPE("assembler-with-cpp", Asm, PP_Asm, 0, "au")
+TYPE("f95", PP_Fortran, INVALID, 0, "u")
+TYPE("f95-cpp-input", Fortran, PP_Fortran, 0, "u")
+TYPE("java", Java, INVALID, 0, "u")
+
+// LLVM IR/LTO types. We define separate types for IR and LTO because LTO
+// outputs should use the standard suffixes.
+TYPE("ir", LLVM_IR, INVALID, "ll", "u")
+TYPE("ir", LLVM_BC, INVALID, "bc", "u")
+TYPE("lto-ir", LTO_IR, INVALID, "s", "")
+TYPE("lto-bc", LTO_BC, INVALID, "o", "")
+
+// Misc.
+TYPE("ast", AST, INVALID, "ast", "u")
+TYPE("plist", Plist, INVALID, "plist", "")
+TYPE("rewritten-objc", RewrittenObjC,INVALID, "cpp", "")
+TYPE("rewritten-legacy-objc", RewrittenLegacyObjC,INVALID, "cpp", "")
+TYPE("remap", Remap, INVALID, "remap", "")
+TYPE("precompiled-header", PCH, INVALID, "gch", "A")
+TYPE("object", Object, INVALID, "o", "")
+TYPE("treelang", Treelang, INVALID, 0, "u")
+TYPE("image", Image, INVALID, "out", "")
+TYPE("dSYM", dSYM, INVALID, "dSYM", "A")
+TYPE("dependencies", Dependencies, INVALID, "d", "")
+TYPE("none", Nothing, INVALID, 0, "u")
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Types.h b/contrib/llvm/tools/clang/include/clang/Driver/Types.h
new file mode 100644
index 0000000..9187529
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Types.h
@@ -0,0 +1,96 @@
+//===--- Types.h - Input & Temporary Driver Types ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_TYPES_H_
+#define CLANG_DRIVER_TYPES_H_
+
+#include "clang/Driver/Phases.h"
+
+namespace clang {
+namespace driver {
+namespace types {
+ enum ID {
+ TY_INVALID,
+#define TYPE(NAME, ID, PP_TYPE, TEMP_SUFFIX, FLAGS) TY_##ID,
+#include "clang/Driver/Types.def"
+#undef TYPE
+ TY_LAST
+ };
+
+ /// getTypeName - Return the name of the type for \arg Id.
+ const char *getTypeName(ID Id);
+
+ /// getPreprocessedType - Get the ID of the type for this input when
+ /// it has been preprocessed, or INVALID if this input is not
+ /// preprocessed.
+ ID getPreprocessedType(ID Id);
+
+ /// getTypeTempSuffix - Return the suffix to use when creating a
+ /// temp file of this type, or null if unspecified.
+ const char *getTypeTempSuffix(ID Id);
+
+ /// onlyAssembleType - Should this type only be assembled.
+ bool onlyAssembleType(ID Id);
+
+ /// onlyPrecompileType - Should this type only be precompiled.
+ bool onlyPrecompileType(ID Id);
+
+ /// canTypeBeUserSpecified - Can this type be specified on the
+ /// command line (by the type name); this is used when forwarding
+ /// commands to gcc.
+ bool canTypeBeUserSpecified(ID Id);
+
+ /// appendSuffixForType - When generating outputs of this type,
+ /// should the suffix be appended (instead of replacing the existing
+ /// suffix).
+ bool appendSuffixForType(ID Id);
+
+ /// canLipoType - Is this type acceptable as the output of a
+ /// universal build (currently, just the Nothing, Image, and Object
+ /// types).
+ bool canLipoType(ID Id);
+
+ /// isAcceptedByClang - Can clang handle this input type.
+ bool isAcceptedByClang(ID Id);
+
+ /// isOnlyAcceptedByClang - Is clang the only compiler that can handle this
+ /// input type.
+ bool isOnlyAcceptedByClang(ID Id);
+
+ /// isCXX - Is this a "C++" input (C++ and Obj-C++ sources and headers).
+ bool isCXX(ID Id);
+
+ /// isObjC - Is this an "ObjC" input (Obj-C and Obj-C++ sources and headers).
+ bool isObjC(ID Id);
+
+ /// lookupTypeForExtension - Lookup the type to use for the file
+ /// extension \arg Ext.
+ ID lookupTypeForExtension(const char *Ext);
+
+ /// lookupTypeForTypSpecifier - Lookup the type to use for a user
+ /// specified type name.
+ ID lookupTypeForTypeSpecifier(const char *Name);
+
+ /// getNumCompilationPhases - Return the complete number of phases
+ /// to be done for this type.
+ unsigned getNumCompilationPhases(ID Id);
+
+ /// getCompilationPhase - Return the \args N th compilation phase to
+ /// be done for this type.
+ phases::ID getCompilationPhase(ID Id, unsigned N);
+
+ /// lookupCXXTypeForCType - Lookup CXX input type that corresponds to given
+ /// C type (used for clang++ emulation of g++ behaviour)
+ ID lookupCXXTypeForCType(ID Id);
+
+} // end namespace types
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Driver/Util.h b/contrib/llvm/tools/clang/include/clang/Driver/Util.h
new file mode 100644
index 0000000..65aef4b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Driver/Util.h
@@ -0,0 +1,28 @@
+//===--- Util.h - Common Driver Utilities -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_DRIVER_UTIL_H_
+#define CLANG_DRIVER_UTIL_H_
+
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+namespace driver {
+ class Action;
+
+ /// ArgStringList - Type used for constructing argv lists for subprocesses.
+ typedef SmallVector<const char*, 16> ArgStringList;
+
+ /// ActionList - Type used for lists of actions.
+ typedef SmallVector<Action*, 3> ActionList;
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/Commit.h b/contrib/llvm/tools/clang/include/clang/Edit/Commit.h
new file mode 100644
index 0000000..aaf6b18
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/Commit.h
@@ -0,0 +1,140 @@
+//===----- Commit.h - A unit of edits ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_COMMIT_H
+#define LLVM_CLANG_EDIT_COMMIT_H
+
+#include "clang/Edit/FileOffset.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+ class LangOptions;
+ class PreprocessingRecord;
+
+namespace edit {
+ class EditedSource;
+
+class Commit {
+public:
+ enum EditKind {
+ Act_Insert,
+ Act_InsertFromRange,
+ Act_Remove
+ };
+
+ struct Edit {
+ EditKind Kind;
+ StringRef Text;
+ SourceLocation OrigLoc;
+ FileOffset Offset;
+ FileOffset InsertFromRangeOffs;
+ unsigned Length;
+ bool BeforePrev;
+
+ SourceLocation getFileLocation(SourceManager &SM) const;
+ CharSourceRange getFileRange(SourceManager &SM) const;
+ CharSourceRange getInsertFromRange(SourceManager &SM) const;
+ };
+
+private:
+ const SourceManager &SourceMgr;
+ const LangOptions &LangOpts;
+ const PreprocessingRecord *PPRec;
+ EditedSource *Editor;
+
+ bool IsCommitable;
+ SmallVector<Edit, 8> CachedEdits;
+
+public:
+ explicit Commit(EditedSource &Editor);
+ Commit(const SourceManager &SM, const LangOptions &LangOpts,
+ const PreprocessingRecord *PPRec = 0)
+ : SourceMgr(SM), LangOpts(LangOpts), PPRec(PPRec), Editor(0),
+ IsCommitable(true) { }
+
+ bool isCommitable() const { return IsCommitable; }
+
+ bool insert(SourceLocation loc, StringRef text, bool afterToken = false,
+ bool beforePreviousInsertions = false);
+ bool insertAfterToken(SourceLocation loc, StringRef text,
+ bool beforePreviousInsertions = false) {
+ return insert(loc, text, /*afterToken=*/true, beforePreviousInsertions);
+ }
+ bool insertBefore(SourceLocation loc, StringRef text) {
+ return insert(loc, text, /*afterToken=*/false,
+ /*beforePreviousInsertions=*/true);
+ }
+ bool insertFromRange(SourceLocation loc, CharSourceRange range,
+ bool afterToken = false,
+ bool beforePreviousInsertions = false);
+ bool insertWrap(StringRef before, CharSourceRange range, StringRef after);
+
+ bool remove(CharSourceRange range);
+
+ bool replace(CharSourceRange range, StringRef text);
+ bool replaceWithInner(CharSourceRange range, CharSourceRange innerRange);
+ bool replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText);
+
+ bool insertFromRange(SourceLocation loc, SourceRange TokenRange,
+ bool afterToken = false,
+ bool beforePreviousInsertions = false) {
+ return insertFromRange(loc, CharSourceRange::getTokenRange(TokenRange),
+ afterToken, beforePreviousInsertions);
+ }
+ bool insertWrap(StringRef before, SourceRange TokenRange, StringRef after) {
+ return insertWrap(before, CharSourceRange::getTokenRange(TokenRange), after);
+ }
+ bool remove(SourceRange TokenRange) {
+ return remove(CharSourceRange::getTokenRange(TokenRange));
+ }
+ bool replace(SourceRange TokenRange, StringRef text) {
+ return replace(CharSourceRange::getTokenRange(TokenRange), text);
+ }
+ bool replaceWithInner(SourceRange TokenRange, SourceRange TokenInnerRange) {
+ return replaceWithInner(CharSourceRange::getTokenRange(TokenRange),
+ CharSourceRange::getTokenRange(TokenInnerRange));
+ }
+
+ typedef SmallVector<Edit, 8>::const_iterator edit_iterator;
+ edit_iterator edit_begin() const { return CachedEdits.begin(); }
+ edit_iterator edit_end() const { return CachedEdits.end(); }
+
+private:
+ void addInsert(SourceLocation OrigLoc,
+ FileOffset Offs, StringRef text, bool beforePreviousInsertions);
+ void addInsertFromRange(SourceLocation OrigLoc, FileOffset Offs,
+ FileOffset RangeOffs, unsigned RangeLen,
+ bool beforePreviousInsertions);
+ void addRemove(SourceLocation OrigLoc, FileOffset Offs, unsigned Len);
+
+ bool canInsert(SourceLocation loc, FileOffset &Offset);
+ bool canInsertAfterToken(SourceLocation loc, FileOffset &Offset,
+ SourceLocation &AfterLoc);
+ bool canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs);
+ bool canRemoveRange(CharSourceRange range, FileOffset &Offs, unsigned &Len);
+ bool canReplaceText(SourceLocation loc, StringRef text,
+ FileOffset &Offs, unsigned &Len);
+
+ void commitInsert(FileOffset offset, StringRef text,
+ bool beforePreviousInsertions);
+ void commitRemove(FileOffset offset, unsigned length);
+
+ bool isAtStartOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroBegin = 0) const;
+ bool isAtEndOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroEnd = 0) const;
+};
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h b/contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h
new file mode 100644
index 0000000..c685753
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/EditedSource.h
@@ -0,0 +1,87 @@
+//===----- EditedSource.h - Collection of source edits ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_EDITEDSOURCE_H
+#define LLVM_CLANG_EDIT_EDITEDSOURCE_H
+
+#include "clang/Edit/FileOffset.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <map>
+
+namespace clang {
+ class LangOptions;
+ class PreprocessingRecord;
+
+namespace edit {
+ class Commit;
+ class EditsReceiver;
+
+class EditedSource {
+ const SourceManager &SourceMgr;
+ const LangOptions &LangOpts;
+ const PreprocessingRecord *PPRec;
+
+ struct FileEdit {
+ StringRef Text;
+ unsigned RemoveLen;
+
+ FileEdit() : RemoveLen(0) {}
+ };
+
+ typedef std::map<FileOffset, FileEdit> FileEditsTy;
+ FileEditsTy FileEdits;
+
+ llvm::DenseMap<unsigned, SourceLocation> ExpansionToArgMap;
+
+ llvm::BumpPtrAllocator StrAlloc;
+
+public:
+ EditedSource(const SourceManager &SM, const LangOptions &LangOpts,
+ const PreprocessingRecord *PPRec = 0)
+ : SourceMgr(SM), LangOpts(LangOpts), PPRec(PPRec),
+ StrAlloc(/*size=*/512) { }
+
+ const SourceManager &getSourceManager() const { return SourceMgr; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ const PreprocessingRecord *getPreprocessingRecord() const { return PPRec; }
+
+ bool canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs);
+
+ bool commit(const Commit &commit);
+
+ void applyRewrites(EditsReceiver &receiver);
+ void clearRewrites();
+
+ StringRef copyString(StringRef str) {
+ char *buf = StrAlloc.Allocate<char>(str.size());
+ std::memcpy(buf, str.data(), str.size());
+ return StringRef(buf, str.size());
+ }
+ StringRef copyString(const Twine &twine);
+
+private:
+ bool commitInsert(SourceLocation OrigLoc, FileOffset Offs, StringRef text,
+ bool beforePreviousInsertions);
+ bool commitInsertFromRange(SourceLocation OrigLoc, FileOffset Offs,
+ FileOffset InsertFromRangeOffs, unsigned Len,
+ bool beforePreviousInsertions);
+ void commitRemove(SourceLocation OrigLoc, FileOffset BeginOffs, unsigned Len);
+
+ StringRef getSourceText(FileOffset BeginOffs, FileOffset EndOffs,
+ bool &Invalid);
+ FileEditsTy::iterator getActionForOffset(FileOffset Offs);
+};
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h b/contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h
new file mode 100644
index 0000000..600ac28
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/EditsReceiver.h
@@ -0,0 +1,35 @@
+//===----- EditedSource.h - Collection of source edits ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_EDITSRECEIVER_H
+#define LLVM_CLANG_EDIT_EDITSRECEIVER_H
+
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+ class SourceLocation;
+ class CharSourceRange;
+
+namespace edit {
+
+class EditsReceiver {
+public:
+ virtual ~EditsReceiver() { }
+
+ virtual void insert(SourceLocation loc, StringRef text) = 0;
+ virtual void replace(CharSourceRange range, StringRef text) = 0;
+ /// \brief By default it calls replace with an empty string.
+ virtual void remove(CharSourceRange range);
+};
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h b/contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h
new file mode 100644
index 0000000..675ad18
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/FileOffset.h
@@ -0,0 +1,65 @@
+//===----- FileOffset.h - Offset in a file ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_FILEOFFSET_H
+#define LLVM_CLANG_EDIT_FILEOFFSET_H
+
+#include "clang/Basic/SourceLocation.h"
+
+namespace clang {
+
+namespace edit {
+
+class FileOffset {
+ FileID FID;
+ unsigned Offs;
+public:
+ FileOffset() : Offs(0) { }
+ FileOffset(FileID fid, unsigned offs) : FID(fid), Offs(offs) { }
+
+ bool isInvalid() const { return FID.isInvalid(); }
+
+ FileID getFID() const { return FID; }
+ unsigned getOffset() const { return Offs; }
+
+ FileOffset getWithOffset(unsigned offset) const {
+ FileOffset NewOffs = *this;
+ NewOffs.Offs += offset;
+ return NewOffs;
+ }
+
+ friend bool operator==(FileOffset LHS, FileOffset RHS) {
+ return LHS.FID == RHS.FID && LHS.Offs == RHS.Offs;
+ }
+ friend bool operator!=(FileOffset LHS, FileOffset RHS) {
+ return !(LHS == RHS);
+ }
+ friend bool operator<(FileOffset LHS, FileOffset RHS) {
+ if (LHS.FID != RHS.FID)
+ return LHS.FID < RHS.FID;
+ return LHS.Offs < RHS.Offs;
+ }
+ friend bool operator>(FileOffset LHS, FileOffset RHS) {
+ if (LHS.FID != RHS.FID)
+ return LHS.FID > RHS.FID;
+ return LHS.Offs > RHS.Offs;
+ }
+ friend bool operator>=(FileOffset LHS, FileOffset RHS) {
+ return LHS > RHS || LHS == RHS;
+ }
+ friend bool operator<=(FileOffset LHS, FileOffset RHS) {
+ return LHS < RHS || LHS == RHS;
+ }
+};
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h b/contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h
new file mode 100644
index 0000000..aa7a5b2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Edit/Rewriters.h
@@ -0,0 +1,33 @@
+//===--- Rewriters.h - Rewritings ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_EDIT_REWRITERS_H
+#define LLVM_CLANG_EDIT_REWRITERS_H
+
+namespace clang {
+ class ObjCMessageExpr;
+ class NSAPI;
+
+namespace edit {
+ class Commit;
+
+bool rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+bool rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+bool rewriteToObjCSubscriptSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
new file mode 100644
index 0000000..cef9509
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTConsumers.h
@@ -0,0 +1,57 @@
+//===--- ASTConsumers.h - ASTConsumer implementations -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AST Consumers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef DRIVER_ASTCONSUMERS_H
+#define DRIVER_ASTCONSUMERS_H
+
+#include "clang/Basic/LLVM.h"
+
+namespace llvm {
+ namespace sys { class Path; }
+}
+namespace clang {
+
+class ASTConsumer;
+class CodeGenOptions;
+class DiagnosticsEngine;
+class FileManager;
+class LangOptions;
+class Preprocessor;
+class TargetOptions;
+
+// AST pretty-printer: prints out the AST in a format that is close to the
+// original C code. The output is intended to be in a format such that
+// clang could re-parse the output back into the same AST, but the
+// implementation is still incomplete.
+ASTConsumer *CreateASTPrinter(raw_ostream *OS);
+
+// AST dumper: dumps the raw AST in human-readable form to stderr; this is
+// intended for debugging.
+ASTConsumer *CreateASTDumper();
+
+// AST XML-dumper: dumps out the AST to stderr in a very detailed XML
+// format; this is intended for particularly intense debugging.
+ASTConsumer *CreateASTDumperXML(raw_ostream &OS);
+
+// Graphical AST viewer: for each function definition, creates a graph of
+// the AST and displays it with the graph viewer "dotty". Also outputs
+// function declarations to stderr.
+ASTConsumer *CreateASTViewer();
+
+// DeclContext printer: prints out the DeclContext tree in human-readable form
+// to stderr; this is intended for debugging.
+ASTConsumer *CreateDeclContextPrinter();
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
new file mode 100644
index 0000000..5e4ecad
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ASTUnit.h
@@ -0,0 +1,801 @@
+//===--- ASTUnit.h - ASTUnit utility ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASTUnit utility class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_ASTUNIT_H
+#define LLVM_CLANG_FRONTEND_ASTUNIT_H
+
+#include "clang/Index/ASTLocation.h"
+#include "clang/Serialization/ASTBitCodes.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang-c/Index.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/Path.h"
+#include <map>
+#include <string>
+#include <vector>
+#include <cassert>
+#include <utility>
+#include <sys/types.h>
+
+namespace llvm {
+ class MemoryBuffer;
+}
+
+namespace clang {
+class ASTContext;
+class ASTReader;
+class CodeCompleteConsumer;
+class CompilerInvocation;
+class CompilerInstance;
+class Decl;
+class DiagnosticsEngine;
+class FileEntry;
+class FileManager;
+class HeaderSearch;
+class Preprocessor;
+class SourceManager;
+class TargetInfo;
+class ASTFrontendAction;
+
+using namespace idx;
+
+/// \brief Utility class for loading a ASTContext from an AST file.
+///
+class ASTUnit : public ModuleLoader {
+private:
+ IntrusiveRefCntPtr<LangOptions> LangOpts;
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
+ IntrusiveRefCntPtr<FileManager> FileMgr;
+ IntrusiveRefCntPtr<SourceManager> SourceMgr;
+ OwningPtr<HeaderSearch> HeaderInfo;
+ IntrusiveRefCntPtr<TargetInfo> Target;
+ IntrusiveRefCntPtr<Preprocessor> PP;
+ IntrusiveRefCntPtr<ASTContext> Ctx;
+ ASTReader *Reader;
+
+ FileSystemOptions FileSystemOpts;
+
+ /// \brief The AST consumer that received information about the translation
+ /// unit as it was parsed or loaded.
+ OwningPtr<ASTConsumer> Consumer;
+
+ /// \brief The semantic analysis object used to type-check the translation
+ /// unit.
+ OwningPtr<Sema> TheSema;
+
+ /// Optional owned invocation, just used to make the invocation used in
+ /// LoadFromCommandLine available.
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation;
+
+ /// \brief The set of target features.
+ ///
+ /// FIXME: each time we reparse, we need to restore the set of target
+ /// features from this vector, because TargetInfo::CreateTargetInfo()
+ /// mangles the target options in place. Yuck!
+ std::vector<std::string> TargetFeatures;
+
+ // OnlyLocalDecls - when true, walking this AST should only visit declarations
+ // that come from the AST itself, not from included precompiled headers.
+ // FIXME: This is temporary; eventually, CIndex will always do this.
+ bool OnlyLocalDecls;
+
+ /// \brief Whether to capture any diagnostics produced.
+ bool CaptureDiagnostics;
+
+ /// \brief Track whether the main file was loaded from an AST or not.
+ bool MainFileIsAST;
+
+ /// \brief What kind of translation unit this AST represents.
+ TranslationUnitKind TUKind;
+
+ /// \brief Whether we should time each operation.
+ bool WantTiming;
+
+ /// \brief Whether the ASTUnit should delete the remapped buffers.
+ bool OwnsRemappedFileBuffers;
+
+ /// Track the top-level decls which appeared in an ASTUnit which was loaded
+ /// from a source file.
+ //
+ // FIXME: This is just an optimization hack to avoid deserializing large parts
+ // of a PCH file when using the Index library on an ASTUnit loaded from
+ // source. In the long term we should make the Index library use efficient and
+ // more scalable search mechanisms.
+ std::vector<Decl*> TopLevelDecls;
+
+ /// \brief Sorted (by file offset) vector of pairs of file offset/Decl.
+ typedef SmallVector<std::pair<unsigned, Decl *>, 64> LocDeclsTy;
+ typedef llvm::DenseMap<FileID, LocDeclsTy *> FileDeclsTy;
+
+ /// \brief Map from FileID to the file-level declarations that it contains.
+ /// The files and decls are only local (and non-preamble) ones.
+ FileDeclsTy FileDecls;
+
+ /// The name of the original source file used to generate this ASTUnit.
+ std::string OriginalSourceFile;
+
+ // Critical optimization when using clang_getCursor().
+ ASTLocation LastLoc;
+
+ /// \brief The set of diagnostics produced when creating the preamble.
+ SmallVector<StoredDiagnostic, 4> PreambleDiagnostics;
+
+ /// \brief The set of diagnostics produced when creating this
+ /// translation unit.
+ SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
+
+ /// \brief The set of diagnostics produced when failing to parse, e.g. due
+ /// to failure to load the PCH.
+ SmallVector<StoredDiagnostic, 4> FailedParseDiagnostics;
+
+ /// \brief The number of stored diagnostics that come from the driver
+ /// itself.
+ ///
+ /// Diagnostics that come from the driver are retained from one parse to
+ /// the next.
+ unsigned NumStoredDiagnosticsFromDriver;
+
+ /// \brief Counter that determines when we want to try building a
+ /// precompiled preamble.
+ ///
+ /// If zero, we will never build a precompiled preamble. Otherwise,
+ /// it's treated as a counter that decrements each time we reparse
+ /// without the benefit of a precompiled preamble. When it hits 1,
+ /// we'll attempt to rebuild the precompiled header. This way, if
+ /// building the precompiled preamble fails, we won't try again for
+ /// some number of calls.
+ unsigned PreambleRebuildCounter;
+
+public:
+ class PreambleData {
+ const FileEntry *File;
+ std::vector<char> Buffer;
+ mutable unsigned NumLines;
+
+ public:
+ PreambleData() : File(0), NumLines(0) { }
+
+ void assign(const FileEntry *F, const char *begin, const char *end) {
+ File = F;
+ Buffer.assign(begin, end);
+ NumLines = 0;
+ }
+
+ void clear() { Buffer.clear(); File = 0; NumLines = 0; }
+
+ size_t size() const { return Buffer.size(); }
+ bool empty() const { return Buffer.empty(); }
+
+ const char *getBufferStart() const { return &Buffer[0]; }
+
+ unsigned getNumLines() const {
+ if (NumLines)
+ return NumLines;
+ countLines();
+ return NumLines;
+ }
+
+ SourceRange getSourceRange(const SourceManager &SM) const {
+ SourceLocation FileLoc = SM.getLocForStartOfFile(SM.getPreambleFileID());
+ return SourceRange(FileLoc, FileLoc.getLocWithOffset(size()-1));
+ }
+
+ private:
+ void countLines() const;
+ };
+
+ const PreambleData &getPreambleData() const {
+ return Preamble;
+ }
+
+private:
+
+ /// \brief The contents of the preamble that has been precompiled to
+ /// \c PreambleFile.
+ PreambleData Preamble;
+
+ /// \brief Whether the preamble ends at the start of a new line.
+ ///
+ /// Used to inform the lexer as to whether it's starting at the beginning of
+ /// a line after skipping the preamble.
+ bool PreambleEndsAtStartOfLine;
+
+ /// \brief The size of the source buffer that we've reserved for the main
+ /// file within the precompiled preamble.
+ unsigned PreambleReservedSize;
+
+ /// \brief Keeps track of the files that were used when computing the
+ /// preamble, with both their buffer size and their modification time.
+ ///
+ /// If any of the files have changed from one compile to the next,
+ /// the preamble must be thrown away.
+ llvm::StringMap<std::pair<off_t, time_t> > FilesInPreamble;
+
+ /// \brief When non-NULL, this is the buffer used to store the contents of
+ /// the main file when it has been padded for use with the precompiled
+ /// preamble.
+ llvm::MemoryBuffer *SavedMainFileBuffer;
+
+ /// \brief When non-NULL, this is the buffer used to store the
+ /// contents of the preamble when it has been padded to build the
+ /// precompiled preamble.
+ llvm::MemoryBuffer *PreambleBuffer;
+
+ /// \brief The number of warnings that occurred while parsing the preamble.
+ ///
+ /// This value will be used to restore the state of the \c DiagnosticsEngine
+ /// object when re-using the precompiled preamble. Note that only the
+ /// number of warnings matters, since we will not save the preamble
+ /// when any errors are present.
+ unsigned NumWarningsInPreamble;
+
+ /// \brief A list of the serialization ID numbers for each of the top-level
+ /// declarations parsed within the precompiled preamble.
+ std::vector<serialization::DeclID> TopLevelDeclsInPreamble;
+
+ /// \brief Whether we should be caching code-completion results.
+ bool ShouldCacheCodeCompletionResults;
+
+ /// \brief The language options used when we load an AST file.
+ LangOptions ASTFileLangOpts;
+
+ static void ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
+ const char **ArgBegin, const char **ArgEnd,
+ ASTUnit &AST, bool CaptureDiagnostics);
+
+ void TranslateStoredDiagnostics(ASTReader *MMan, StringRef ModName,
+ SourceManager &SrcMan,
+ const SmallVectorImpl<StoredDiagnostic> &Diags,
+ SmallVectorImpl<StoredDiagnostic> &Out);
+
+ void clearFileLevelDecls();
+
+public:
+ /// \brief A cached code-completion result, which may be introduced in one of
+ /// many different contexts.
+ struct CachedCodeCompletionResult {
+ /// \brief The code-completion string corresponding to this completion
+ /// result.
+ CodeCompletionString *Completion;
+
+ /// \brief A bitmask that indicates which code-completion contexts should
+ /// contain this completion result.
+ ///
+ /// The bits in the bitmask correspond to the values of
+ /// CodeCompleteContext::Kind. To map from a completion context kind to a
+ /// bit, subtract one from the completion context kind and shift 1 by that
+ /// number of bits. Many completions can occur in several different
+ /// contexts.
+ unsigned ShowInContexts;
+
+ /// \brief The priority given to this code-completion result.
+ unsigned Priority;
+
+ /// \brief The libclang cursor kind corresponding to this code-completion
+ /// result.
+ CXCursorKind Kind;
+
+ /// \brief The availability of this code-completion result.
+ CXAvailabilityKind Availability;
+
+ /// \brief The simplified type class for a non-macro completion result.
+ SimplifiedTypeClass TypeClass;
+
+ /// \brief The type of a non-macro completion result, stored as a unique
+ /// integer used by the string map of cached completion types.
+ ///
+ /// This value will be zero if the type is not known, or a unique value
+ /// determined by the formatted type string. Se \c CachedCompletionTypes
+ /// for more information.
+ unsigned Type;
+ };
+
+ /// \brief Retrieve the mapping from formatted type names to unique type
+ /// identifiers.
+ llvm::StringMap<unsigned> &getCachedCompletionTypes() {
+ return CachedCompletionTypes;
+ }
+
+ /// \brief Retrieve the allocator used to cache global code completions.
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
+ getCachedCompletionAllocator() {
+ return CachedCompletionAllocator;
+ }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() {
+ if (!CCTUInfo)
+ CCTUInfo.reset(new CodeCompletionTUInfo(
+ new GlobalCodeCompletionAllocator));
+ return *CCTUInfo;
+ }
+
+private:
+ /// \brief Allocator used to store cached code completions.
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator>
+ CachedCompletionAllocator;
+
+ OwningPtr<CodeCompletionTUInfo> CCTUInfo;
+
+ /// \brief The set of cached code-completion results.
+ std::vector<CachedCodeCompletionResult> CachedCompletionResults;
+
+ /// \brief A mapping from the formatted type name to a unique number for that
+ /// type, which is used for type equality comparisons.
+ llvm::StringMap<unsigned> CachedCompletionTypes;
+
+ /// \brief A string hash of the top-level declaration and macro definition
+ /// names processed the last time that we reparsed the file.
+ ///
+ /// This hash value is used to determine when we need to refresh the
+ /// global code-completion cache.
+ unsigned CompletionCacheTopLevelHashValue;
+
+ /// \brief A string hash of the top-level declaration and macro definition
+ /// names processed the last time that we reparsed the precompiled preamble.
+ ///
+ /// This hash value is used to determine when we need to refresh the
+ /// global code-completion cache after a rebuild of the precompiled preamble.
+ unsigned PreambleTopLevelHashValue;
+
+ /// \brief The current hash value for the top-level declaration and macro
+ /// definition names
+ unsigned CurrentTopLevelHashValue;
+
+ /// \brief Bit used by CIndex to mark when a translation unit may be in an
+ /// inconsistent state, and is not safe to free.
+ unsigned UnsafeToFree : 1;
+
+ /// \brief Cache any "global" code-completion results, so that we can avoid
+ /// recomputing them with each completion.
+ void CacheCodeCompletionResults();
+
+ /// \brief Clear out and deallocate
+ void ClearCachedCompletionResults();
+
+ ASTUnit(const ASTUnit&); // DO NOT IMPLEMENT
+ ASTUnit &operator=(const ASTUnit &); // DO NOT IMPLEMENT
+
+ explicit ASTUnit(bool MainFileIsAST);
+
+ void CleanTemporaryFiles();
+ bool Parse(llvm::MemoryBuffer *OverrideMainBuffer);
+
+ std::pair<llvm::MemoryBuffer *, std::pair<unsigned, bool> >
+ ComputePreamble(CompilerInvocation &Invocation,
+ unsigned MaxLines, bool &CreatedBuffer);
+
+ llvm::MemoryBuffer *getMainBufferWithPrecompiledPreamble(
+ const CompilerInvocation &PreambleInvocationIn,
+ bool AllowRebuild = true,
+ unsigned MaxLines = 0);
+ void RealizeTopLevelDeclsFromPreamble();
+
+ /// \brief Transfers ownership of the objects (like SourceManager) from
+ /// \param CI to this ASTUnit.
+ void transferASTDataFromCompilerInstance(CompilerInstance &CI);
+
+ /// \brief Allows us to assert that ASTUnit is not being used concurrently,
+ /// which is not supported.
+ ///
+ /// Clients should create instances of the ConcurrencyCheck class whenever
+ /// using the ASTUnit in a way that isn't intended to be concurrent, which is
+ /// just about any usage.
+ /// Becomes a noop in release mode; only useful for debug mode checking.
+ class ConcurrencyState {
+ void *Mutex; // a llvm::sys::MutexImpl in debug;
+
+ public:
+ ConcurrencyState();
+ ~ConcurrencyState();
+
+ void start();
+ void finish();
+ };
+ ConcurrencyState ConcurrencyCheckValue;
+
+public:
+ class ConcurrencyCheck {
+ ASTUnit &Self;
+
+ public:
+ explicit ConcurrencyCheck(ASTUnit &Self)
+ : Self(Self)
+ {
+ Self.ConcurrencyCheckValue.start();
+ }
+ ~ConcurrencyCheck() {
+ Self.ConcurrencyCheckValue.finish();
+ }
+ };
+ friend class ConcurrencyCheck;
+
+ ~ASTUnit();
+
+ bool isMainFileAST() const { return MainFileIsAST; }
+
+ bool isUnsafeToFree() const { return UnsafeToFree; }
+ void setUnsafeToFree(bool Value) { UnsafeToFree = Value; }
+
+ const DiagnosticsEngine &getDiagnostics() const { return *Diagnostics; }
+ DiagnosticsEngine &getDiagnostics() { return *Diagnostics; }
+
+ const SourceManager &getSourceManager() const { return *SourceMgr; }
+ SourceManager &getSourceManager() { return *SourceMgr; }
+
+ const Preprocessor &getPreprocessor() const { return *PP; }
+ Preprocessor &getPreprocessor() { return *PP; }
+
+ const ASTContext &getASTContext() const { return *Ctx; }
+ ASTContext &getASTContext() { return *Ctx; }
+
+ void setASTContext(ASTContext *ctx) { Ctx = ctx; }
+ void setPreprocessor(Preprocessor *pp);
+
+ bool hasSema() const { return TheSema; }
+ Sema &getSema() const {
+ assert(TheSema && "ASTUnit does not have a Sema object!");
+ return *TheSema;
+ }
+
+ const FileManager &getFileManager() const { return *FileMgr; }
+ FileManager &getFileManager() { return *FileMgr; }
+
+ const FileSystemOptions &getFileSystemOpts() const { return FileSystemOpts; }
+
+ const std::string &getOriginalSourceFileName();
+
+ /// \brief Add a temporary file that the ASTUnit depends on.
+ ///
+ /// This file will be erased when the ASTUnit is destroyed.
+ void addTemporaryFile(const llvm::sys::Path &TempFile);
+
+ bool getOnlyLocalDecls() const { return OnlyLocalDecls; }
+
+ bool getOwnsRemappedFileBuffers() const { return OwnsRemappedFileBuffers; }
+ void setOwnsRemappedFileBuffers(bool val) { OwnsRemappedFileBuffers = val; }
+
+ void setLastASTLocation(ASTLocation ALoc) { LastLoc = ALoc; }
+ ASTLocation getLastASTLocation() const { return LastLoc; }
+
+
+ StringRef getMainFileName() const;
+
+ typedef std::vector<Decl *>::iterator top_level_iterator;
+
+ top_level_iterator top_level_begin() {
+ assert(!isMainFileAST() && "Invalid call for AST based ASTUnit!");
+ if (!TopLevelDeclsInPreamble.empty())
+ RealizeTopLevelDeclsFromPreamble();
+ return TopLevelDecls.begin();
+ }
+
+ top_level_iterator top_level_end() {
+ assert(!isMainFileAST() && "Invalid call for AST based ASTUnit!");
+ if (!TopLevelDeclsInPreamble.empty())
+ RealizeTopLevelDeclsFromPreamble();
+ return TopLevelDecls.end();
+ }
+
+ std::size_t top_level_size() const {
+ assert(!isMainFileAST() && "Invalid call for AST based ASTUnit!");
+ return TopLevelDeclsInPreamble.size() + TopLevelDecls.size();
+ }
+
+ bool top_level_empty() const {
+ assert(!isMainFileAST() && "Invalid call for AST based ASTUnit!");
+ return TopLevelDeclsInPreamble.empty() && TopLevelDecls.empty();
+ }
+
+ /// \brief Add a new top-level declaration.
+ void addTopLevelDecl(Decl *D) {
+ TopLevelDecls.push_back(D);
+ }
+
+ /// \brief Add a new local file-level declaration.
+ void addFileLevelDecl(Decl *D);
+
+ /// \brief Get the decls that are contained in a file in the Offset/Length
+ /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// a range.
+ void findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
+ SmallVectorImpl<Decl *> &Decls);
+
+ /// \brief Add a new top-level declaration, identified by its ID in
+ /// the precompiled preamble.
+ void addTopLevelDeclFromPreamble(serialization::DeclID D) {
+ TopLevelDeclsInPreamble.push_back(D);
+ }
+
+ /// \brief Retrieve a reference to the current top-level name hash value.
+ ///
+ /// Note: This is used internally by the top-level tracking action
+ unsigned &getCurrentTopLevelHashValue() { return CurrentTopLevelHashValue; }
+
+ /// \brief Get the source location for the given file:line:col triplet.
+ ///
+ /// The difference with SourceManager::getLocation is that this method checks
+ /// whether the requested location points inside the precompiled preamble
+ /// in which case the returned source location will be a "loaded" one.
+ SourceLocation getLocation(const FileEntry *File,
+ unsigned Line, unsigned Col) const;
+
+ /// \brief Get the source location for the given file:offset pair.
+ SourceLocation getLocation(const FileEntry *File, unsigned Offset) const;
+
+ /// \brief If \arg Loc is a loaded location from the preamble, returns
+ /// the corresponding local location of the main file, otherwise it returns
+ /// \arg Loc.
+ SourceLocation mapLocationFromPreamble(SourceLocation Loc);
+
+ /// \brief If \arg Loc is a local location of the main file but inside the
+ /// preamble chunk, returns the corresponding loaded location from the
+ /// preamble, otherwise it returns \arg Loc.
+ SourceLocation mapLocationToPreamble(SourceLocation Loc);
+
+ bool isInPreambleFileID(SourceLocation Loc);
+ bool isInMainFileID(SourceLocation Loc);
+ SourceLocation getStartOfMainFileID();
+ SourceLocation getEndOfPreambleFileID();
+
+ /// \brief \see mapLocationFromPreamble.
+ SourceRange mapRangeFromPreamble(SourceRange R) {
+ return SourceRange(mapLocationFromPreamble(R.getBegin()),
+ mapLocationFromPreamble(R.getEnd()));
+ }
+
+ /// \brief \see mapLocationToPreamble.
+ SourceRange mapRangeToPreamble(SourceRange R) {
+ return SourceRange(mapLocationToPreamble(R.getBegin()),
+ mapLocationToPreamble(R.getEnd()));
+ }
+
+ // Retrieve the diagnostics associated with this AST
+ typedef StoredDiagnostic *stored_diag_iterator;
+ typedef const StoredDiagnostic *stored_diag_const_iterator;
+ stored_diag_const_iterator stored_diag_begin() const {
+ return StoredDiagnostics.begin();
+ }
+ stored_diag_iterator stored_diag_begin() {
+ return StoredDiagnostics.begin();
+ }
+ stored_diag_const_iterator stored_diag_end() const {
+ return StoredDiagnostics.end();
+ }
+ stored_diag_iterator stored_diag_end() {
+ return StoredDiagnostics.end();
+ }
+ unsigned stored_diag_size() const { return StoredDiagnostics.size(); }
+
+ stored_diag_iterator stored_diag_afterDriver_begin() {
+ if (NumStoredDiagnosticsFromDriver > StoredDiagnostics.size())
+ NumStoredDiagnosticsFromDriver = 0;
+ return StoredDiagnostics.begin() + NumStoredDiagnosticsFromDriver;
+ }
+
+ typedef std::vector<CachedCodeCompletionResult>::iterator
+ cached_completion_iterator;
+
+ cached_completion_iterator cached_completion_begin() {
+ return CachedCompletionResults.begin();
+ }
+
+ cached_completion_iterator cached_completion_end() {
+ return CachedCompletionResults.end();
+ }
+
+ unsigned cached_completion_size() const {
+ return CachedCompletionResults.size();
+ }
+
+ llvm::MemoryBuffer *getBufferForFile(StringRef Filename,
+ std::string *ErrorStr = 0);
+
+ /// \brief Determine what kind of translation unit this AST represents.
+ TranslationUnitKind getTranslationUnitKind() const { return TUKind; }
+
+ typedef llvm::PointerUnion<const char *, const llvm::MemoryBuffer *>
+ FilenameOrMemBuf;
+ /// \brief A mapping from a file name to the memory buffer that stores the
+ /// remapped contents of that file.
+ typedef std::pair<std::string, FilenameOrMemBuf> RemappedFile;
+
+ /// \brief Create a ASTUnit. Gets ownership of the passed CompilerInvocation.
+ static ASTUnit *create(CompilerInvocation *CI,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ bool CaptureDiagnostics = false);
+
+ /// \brief Create a ASTUnit from an AST file.
+ ///
+ /// \param Filename - The AST file to load.
+ ///
+ /// \param Diags - The diagnostics engine to use for reporting errors; its
+ /// lifetime is expected to extend past that of the returned ASTUnit.
+ ///
+ /// \returns - The initialized ASTUnit or null if the AST failed to load.
+ static ASTUnit *LoadFromASTFile(const std::string &Filename,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ const FileSystemOptions &FileSystemOpts,
+ bool OnlyLocalDecls = false,
+ RemappedFile *RemappedFiles = 0,
+ unsigned NumRemappedFiles = 0,
+ bool CaptureDiagnostics = false,
+ bool AllowPCHWithCompilerErrors = false);
+
+private:
+ /// \brief Helper function for \c LoadFromCompilerInvocation() and
+ /// \c LoadFromCommandLine(), which loads an AST from a compiler invocation.
+ ///
+ /// \param PrecompilePreamble Whether to precompile the preamble of this
+ /// translation unit, to improve the performance of reparsing.
+ ///
+ /// \returns \c true if a catastrophic failure occurred (which means that the
+ /// \c ASTUnit itself is invalid), or \c false otherwise.
+ bool LoadFromCompilerInvocation(bool PrecompilePreamble);
+
+public:
+
+ /// \brief Create an ASTUnit from a source file, via a CompilerInvocation
+ /// object, by invoking the optionally provided ASTFrontendAction.
+ ///
+ /// \param CI - The compiler invocation to use; it must have exactly one input
+ /// source file. The ASTUnit takes ownership of the CompilerInvocation object.
+ ///
+ /// \param Diags - The diagnostics engine to use for reporting errors; its
+ /// lifetime is expected to extend past that of the returned ASTUnit.
+ ///
+ /// \param Action - The ASTFrontendAction to invoke. Its ownership is not
+ /// transfered.
+ ///
+ /// \param Unit - optionally an already created ASTUnit. Its ownership is not
+ /// transfered.
+ ///
+ /// \param Persistent - if true the returned ASTUnit will be complete.
+ /// false means the caller is only interested in getting info through the
+ /// provided \see Action.
+ ///
+ /// \param ErrAST - If non-null and parsing failed without any AST to return
+ /// (e.g. because the PCH could not be loaded), this accepts the ASTUnit
+ /// mainly to allow the caller to see the diagnostics.
+ /// This will only receive an ASTUnit if a new one was created. If an already
+ /// created ASTUnit was passed in \param Unit then the caller can check that.
+ ///
+ static ASTUnit *LoadFromCompilerInvocationAction(CompilerInvocation *CI,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ ASTFrontendAction *Action = 0,
+ ASTUnit *Unit = 0,
+ bool Persistent = true,
+ StringRef ResourceFilesPath = StringRef(),
+ bool OnlyLocalDecls = false,
+ bool CaptureDiagnostics = false,
+ bool PrecompilePreamble = false,
+ bool CacheCodeCompletionResults = false,
+ OwningPtr<ASTUnit> *ErrAST = 0);
+
+ /// LoadFromCompilerInvocation - Create an ASTUnit from a source file, via a
+ /// CompilerInvocation object.
+ ///
+ /// \param CI - The compiler invocation to use; it must have exactly one input
+ /// source file. The ASTUnit takes ownership of the CompilerInvocation object.
+ ///
+ /// \param Diags - The diagnostics engine to use for reporting errors; its
+ /// lifetime is expected to extend past that of the returned ASTUnit.
+ //
+ // FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we
+ // shouldn't need to specify them at construction time.
+ static ASTUnit *LoadFromCompilerInvocation(CompilerInvocation *CI,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ bool OnlyLocalDecls = false,
+ bool CaptureDiagnostics = false,
+ bool PrecompilePreamble = false,
+ TranslationUnitKind TUKind = TU_Complete,
+ bool CacheCodeCompletionResults = false);
+
+ /// LoadFromCommandLine - Create an ASTUnit from a vector of command line
+ /// arguments, which must specify exactly one source file.
+ ///
+ /// \param ArgBegin - The beginning of the argument vector.
+ ///
+ /// \param ArgEnd - The end of the argument vector.
+ ///
+ /// \param Diags - The diagnostics engine to use for reporting errors; its
+ /// lifetime is expected to extend past that of the returned ASTUnit.
+ ///
+ /// \param ResourceFilesPath - The path to the compiler resource files.
+ ///
+ /// \param ErrAST - If non-null and parsing failed without any AST to return
+ /// (e.g. because the PCH could not be loaded), this accepts the ASTUnit
+ /// mainly to allow the caller to see the diagnostics.
+ ///
+ // FIXME: Move OnlyLocalDecls, UseBumpAllocator to setters on the ASTUnit, we
+ // shouldn't need to specify them at construction time.
+ static ASTUnit *LoadFromCommandLine(const char **ArgBegin,
+ const char **ArgEnd,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ StringRef ResourceFilesPath,
+ bool OnlyLocalDecls = false,
+ bool CaptureDiagnostics = false,
+ RemappedFile *RemappedFiles = 0,
+ unsigned NumRemappedFiles = 0,
+ bool RemappedFilesKeepOriginalName = true,
+ bool PrecompilePreamble = false,
+ TranslationUnitKind TUKind = TU_Complete,
+ bool CacheCodeCompletionResults = false,
+ bool AllowPCHWithCompilerErrors = false,
+ bool SkipFunctionBodies = false,
+ OwningPtr<ASTUnit> *ErrAST = 0);
+
+ /// \brief Reparse the source files using the same command-line options that
+ /// were originally used to produce this translation unit.
+ ///
+ /// \returns True if a failure occurred that causes the ASTUnit not to
+ /// contain any translation-unit information, false otherwise.
+ bool Reparse(RemappedFile *RemappedFiles = 0,
+ unsigned NumRemappedFiles = 0);
+
+ /// \brief Perform code completion at the given file, line, and
+ /// column within this translation unit.
+ ///
+ /// \param File The file in which code completion will occur.
+ ///
+ /// \param Line The line at which code completion will occur.
+ ///
+ /// \param Column The column at which code completion will occur.
+ ///
+ /// \param IncludeMacros Whether to include macros in the code-completion
+ /// results.
+ ///
+ /// \param IncludeCodePatterns Whether to include code patterns (such as a
+ /// for loop) in the code-completion results.
+ ///
+ /// FIXME: The Diag, LangOpts, SourceMgr, FileMgr, StoredDiagnostics, and
+ /// OwnedBuffers parameters are all disgusting hacks. They will go away.
+ void CodeComplete(StringRef File, unsigned Line, unsigned Column,
+ RemappedFile *RemappedFiles, unsigned NumRemappedFiles,
+ bool IncludeMacros, bool IncludeCodePatterns,
+ CodeCompleteConsumer &Consumer,
+ DiagnosticsEngine &Diag, LangOptions &LangOpts,
+ SourceManager &SourceMgr, FileManager &FileMgr,
+ SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
+ SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers);
+
+ /// \brief Save this translation unit to a file with the given name.
+ ///
+ /// \returns An indication of whether the save was successful or not.
+ CXSaveError Save(StringRef File);
+
+ /// \brief Serialize this translation unit with the given output stream.
+ ///
+ /// \returns True if an error occurred, false otherwise.
+ bool serialize(raw_ostream &OS);
+
+ virtual Module *loadModule(SourceLocation ImportLoc, ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective) {
+ // ASTUnit doesn't know how to load modules (not that this matters).
+ return 0;
+ }
+};
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def b/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
new file mode 100644
index 0000000..b5b9394
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/Analyses.def
@@ -0,0 +1,65 @@
+//===-- Analyses.def - Metadata about Static Analyses -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the set of static analyses used by AnalysisConsumer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ANALYSIS_STORE
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN)
+#endif
+
+ANALYSIS_STORE(RegionStore, "region", "Use region-based analyzer store", CreateRegionStoreManager)
+
+#ifndef ANALYSIS_CONSTRAINTS
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN)
+#endif
+
+ANALYSIS_CONSTRAINTS(BasicConstraints, "basic", "Use basic constraint tracking", CreateBasicConstraintManager)
+ANALYSIS_CONSTRAINTS(RangeConstraints, "range", "Use constraint tracking of concrete value ranges", CreateRangeConstraintManager)
+
+#ifndef ANALYSIS_DIAGNOSTICS
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE)
+#endif
+
+ANALYSIS_DIAGNOSTICS(HTML, "html", "Output analysis results using HTML", createHTMLDiagnosticConsumer, false)
+ANALYSIS_DIAGNOSTICS(PLIST, "plist", "Output analysis results using Plists", createPlistDiagnosticConsumer, true)
+ANALYSIS_DIAGNOSTICS(PLIST_MULTI_FILE, "plist-multi-file", "Output analysis results using Plists (allowing for mult-file bugs)", createPlistMultiFileDiagnosticConsumer, true)
+ANALYSIS_DIAGNOSTICS(PLIST_HTML, "plist-html", "Output analysis results using HTML wrapped with Plists", createPlistHTMLDiagnosticConsumer, true)
+ANALYSIS_DIAGNOSTICS(TEXT, "text", "Text output of analysis results", createTextPathDiagnosticConsumer, true)
+
+#ifndef ANALYSIS_PURGE
+#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC)
+#endif
+
+ANALYSIS_PURGE(PurgeStmt, "statement", "Purge symbols, bindings, and constraints before every statement")
+ANALYSIS_PURGE(PurgeBlock, "block", "Purge symbols, bindings, and constraints before every basic block")
+ANALYSIS_PURGE(PurgeNone, "none", "Do not purge symbols, bindings, or constraints")
+
+#ifndef ANALYSIS_IPA
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC)
+#endif
+
+ANALYSIS_IPA(None, "none", "Perform only intra-procedural analysis")
+ANALYSIS_IPA(Inlining, "inlining", "Experimental: Inline callees when their definitions are available")
+
+#ifndef ANALYSIS_INLINING_MODE
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC)
+#endif
+
+ANALYSIS_INLINING_MODE(All, "all", "Analyze all functions in the order defined in the TU")
+ANALYSIS_INLINING_MODE(NoRedundancy, "noredundancy", "Do not analyze a function which has been previously inlined, use call graph to order")
+
+#undef ANALYSIS_STORE
+#undef ANALYSIS_CONSTRAINTS
+#undef ANALYSIS_DIAGNOSTICS
+#undef ANALYSIS_PURGE
+#undef ANALYSIS_INLINING_MODE
+#undef ANALYSIS_IPA
+
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
new file mode 100644
index 0000000..847bfbd
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/AnalyzerOptions.h
@@ -0,0 +1,137 @@
+//===--- AnalyzerOptions.h - Analysis Engine Options ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains the structures necessary for a front-end to specify
+// various analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_ANALYZEROPTIONS_H
+#define LLVM_CLANG_FRONTEND_ANALYZEROPTIONS_H
+
+#include <string>
+#include <vector>
+
+namespace clang {
+class ASTConsumer;
+class DiagnosticsEngine;
+class Preprocessor;
+class LangOptions;
+
+/// Analysis - Set of available source code analyses.
+enum Analyses {
+#define ANALYSIS(NAME, CMDFLAG, DESC, SCOPE) NAME,
+#include "clang/Frontend/Analyses.def"
+NumAnalyses
+};
+
+/// AnalysisStores - Set of available analysis store models.
+enum AnalysisStores {
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) NAME##Model,
+#include "clang/Frontend/Analyses.def"
+NumStores
+};
+
+/// AnalysisConstraints - Set of available constraint models.
+enum AnalysisConstraints {
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) NAME##Model,
+#include "clang/Frontend/Analyses.def"
+NumConstraints
+};
+
+/// AnalysisDiagClients - Set of available diagnostic clients for rendering
+/// analysis results.
+enum AnalysisDiagClients {
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN, AUTOCREAT) PD_##NAME,
+#include "clang/Frontend/Analyses.def"
+NUM_ANALYSIS_DIAG_CLIENTS
+};
+
+/// AnalysisPurgeModes - Set of available strategies for dead symbol removal.
+enum AnalysisPurgeMode {
+#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) NAME,
+#include "clang/Frontend/Analyses.def"
+NumPurgeModes
+};
+
+/// AnalysisIPAMode - Set of inter-procedural modes.
+enum AnalysisIPAMode {
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) NAME,
+#include "clang/Frontend/Analyses.def"
+NumIPAModes
+};
+
+/// AnalysisInlineFunctionSelection - Set of inlining function selection heuristics.
+enum AnalysisInliningMode {
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) NAME,
+#include "clang/Frontend/Analyses.def"
+NumInliningModes
+};
+
+class AnalyzerOptions {
+public:
+ /// \brief Pair of checker name and enable/disable.
+ std::vector<std::pair<std::string, bool> > CheckersControlList;
+ AnalysisStores AnalysisStoreOpt;
+ AnalysisConstraints AnalysisConstraintsOpt;
+ AnalysisDiagClients AnalysisDiagOpt;
+ AnalysisPurgeMode AnalysisPurgeOpt;
+ AnalysisIPAMode IPAMode;
+ std::string AnalyzeSpecificFunction;
+ unsigned MaxNodes;
+ unsigned MaxLoop;
+ unsigned ShowCheckerHelp : 1;
+ unsigned AnalyzeAll : 1;
+ unsigned AnalyzerDisplayProgress : 1;
+ unsigned AnalyzeNestedBlocks : 1;
+ unsigned EagerlyAssume : 1;
+ unsigned TrimGraph : 1;
+ unsigned VisualizeEGDot : 1;
+ unsigned VisualizeEGUbi : 1;
+ unsigned UnoptimizedCFG : 1;
+ unsigned CFGAddImplicitDtors : 1;
+ unsigned CFGAddInitializers : 1;
+ unsigned EagerlyTrimEGraph : 1;
+ unsigned PrintStats : 1;
+ unsigned NoRetryExhausted : 1;
+ unsigned InlineMaxStackDepth;
+ unsigned InlineMaxFunctionSize;
+ AnalysisInliningMode InliningMode;
+
+public:
+ AnalyzerOptions() {
+ AnalysisStoreOpt = RegionStoreModel;
+ AnalysisConstraintsOpt = RangeConstraintsModel;
+ AnalysisDiagOpt = PD_HTML;
+ AnalysisPurgeOpt = PurgeStmt;
+ IPAMode = Inlining;
+ ShowCheckerHelp = 0;
+ AnalyzeAll = 0;
+ AnalyzerDisplayProgress = 0;
+ AnalyzeNestedBlocks = 0;
+ EagerlyAssume = 0;
+ TrimGraph = 0;
+ VisualizeEGDot = 0;
+ VisualizeEGUbi = 0;
+ UnoptimizedCFG = 0;
+ CFGAddImplicitDtors = 0;
+ CFGAddInitializers = 0;
+ EagerlyTrimEGraph = 0;
+ PrintStats = 0;
+ NoRetryExhausted = 0;
+ // Cap the stack depth at 4 calls (5 stack frames, base + 4 calls).
+ InlineMaxStackDepth = 5;
+ InlineMaxFunctionSize = 200;
+ InliningMode = NoRedundancy;
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h
new file mode 100644
index 0000000..ce2b242
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ChainedDiagnosticConsumer.h
@@ -0,0 +1,73 @@
+//===- ChainedDiagnosticConsumer.h - Chain Diagnostic Clients ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_CHAINEDDIAGNOSTICCONSUMER_H
+#define LLVM_CLANG_FRONTEND_CHAINEDDIAGNOSTICCONSUMER_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/OwningPtr.h"
+
+namespace clang {
+class LangOptions;
+
+/// ChainedDiagnosticConsumer - Chain two diagnostic clients so that diagnostics
+/// go to the first client and then the second. The first diagnostic client
+/// should be the "primary" client, and will be used for computing whether the
+/// diagnostics should be included in counts.
+class ChainedDiagnosticConsumer : public DiagnosticConsumer {
+ virtual void anchor();
+ OwningPtr<DiagnosticConsumer> Primary;
+ OwningPtr<DiagnosticConsumer> Secondary;
+
+public:
+ ChainedDiagnosticConsumer(DiagnosticConsumer *_Primary,
+ DiagnosticConsumer *_Secondary) {
+ Primary.reset(_Primary);
+ Secondary.reset(_Secondary);
+ }
+
+ virtual void BeginSourceFile(const LangOptions &LO,
+ const Preprocessor *PP) {
+ Primary->BeginSourceFile(LO, PP);
+ Secondary->BeginSourceFile(LO, PP);
+ }
+
+ virtual void EndSourceFile() {
+ Secondary->EndSourceFile();
+ Primary->EndSourceFile();
+ }
+
+ virtual void finish() {
+ Secondary->finish();
+ Primary->finish();
+ }
+
+ virtual bool IncludeInDiagnosticCounts() const {
+ return Primary->IncludeInDiagnosticCounts();
+ }
+
+ virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(DiagLevel, Info);
+
+ Primary->HandleDiagnostic(DiagLevel, Info);
+ Secondary->HandleDiagnostic(DiagLevel, Info);
+ }
+
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const {
+ return new ChainedDiagnosticConsumer(Primary->clone(Diags),
+ Secondary->clone(Diags));
+ }
+
+};
+
+} // end namspace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/ChainedIncludesSource.h b/contrib/llvm/tools/clang/include/clang/Frontend/ChainedIncludesSource.h
new file mode 100644
index 0000000..d7119e9
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/ChainedIncludesSource.h
@@ -0,0 +1,75 @@
+//===- ChainedIncludesSource.h - Chained PCHs in Memory ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ChainedIncludesSource class, which converts headers
+// to chained PCHs in memory, mainly used for testing.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SERIALIZATION_CHAINEDINCLUDESSOURCE_H
+#define LLVM_CLANG_SERIALIZATION_CHAINEDINCLUDESSOURCE_H
+
+#include "clang/Sema/ExternalSemaSource.h"
+#include <vector>
+
+namespace clang {
+ class CompilerInstance;
+
+class ChainedIncludesSource : public ExternalSemaSource {
+public:
+ virtual ~ChainedIncludesSource();
+
+ static ChainedIncludesSource *create(CompilerInstance &CI);
+
+private:
+ ExternalSemaSource &getFinalReader() const { return *FinalReader; }
+
+ std::vector<CompilerInstance *> CIs;
+ OwningPtr<ExternalSemaSource> FinalReader;
+
+
+protected:
+
+//===----------------------------------------------------------------------===//
+// ExternalASTSource interface.
+//===----------------------------------------------------------------------===//
+
+ virtual Decl *GetExternalDecl(uint32_t ID);
+ virtual Selector GetExternalSelector(uint32_t ID);
+ virtual uint32_t GetNumExternalSelectors();
+ virtual Stmt *GetExternalDeclStmt(uint64_t Offset);
+ virtual CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset);
+ virtual DeclContextLookupResult
+ FindExternalVisibleDeclsByName(const DeclContext *DC, DeclarationName Name);
+ virtual ExternalLoadResult FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Result);
+ virtual void CompleteType(TagDecl *Tag);
+ virtual void CompleteType(ObjCInterfaceDecl *Class);
+ virtual void StartedDeserializing();
+ virtual void FinishedDeserializing();
+ virtual void StartTranslationUnit(ASTConsumer *Consumer);
+ virtual void PrintStats();
+
+ /// Return the amount of memory used by memory buffers, breaking down
+ /// by heap-backed versus mmap'ed memory.
+ virtual void getMemoryBufferSizes(MemoryBufferSizes &sizes) const;
+
+//===----------------------------------------------------------------------===//
+// ExternalSemaSource interface.
+//===----------------------------------------------------------------------===//
+
+ virtual void InitializeSema(Sema &S);
+ virtual void ForgetSema();
+ virtual void ReadMethodPool(Selector Sel);
+ virtual bool LookupUnqualified(LookupResult &R, Scope *S);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
new file mode 100644
index 0000000..e844f88
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CodeGenOptions.h
@@ -0,0 +1,231 @@
+//===--- CodeGenOptions.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CodeGenOptions interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_CODEGENOPTIONS_H
+#define LLVM_CLANG_FRONTEND_CODEGENOPTIONS_H
+
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// CodeGenOptions - Track various options which control how the code
+/// is optimized and passed to the backend.
+class CodeGenOptions {
+public:
+ enum InliningMethod {
+ NoInlining, // Perform no inlining whatsoever.
+ NormalInlining, // Use the standard function inlining pass.
+ OnlyAlwaysInlining // Only run the always inlining pass.
+ };
+
+ enum ObjCDispatchMethodKind {
+ Legacy = 0,
+ NonLegacy = 1,
+ Mixed = 2
+ };
+
+ unsigned AsmVerbose : 1; /// -dA, -fverbose-asm.
+ unsigned ObjCAutoRefCountExceptions : 1; /// Whether ARC should be EH-safe.
+ unsigned CUDAIsDevice : 1; /// Set when compiling for CUDA device.
+ unsigned CXAAtExit : 1; /// Use __cxa_atexit for calling destructors.
+ unsigned CXXCtorDtorAliases: 1; /// Emit complete ctors/dtors as linker
+ /// aliases to base ctors when possible.
+ unsigned DataSections : 1; /// Set when -fdata-sections is enabled
+ unsigned DebugInfo : 1; /// Should generate debug info (-g).
+ unsigned LimitDebugInfo : 1; /// Limit generated debug info to reduce size.
+ unsigned DisableFPElim : 1; /// Set when -fomit-frame-pointer is enabled.
+ unsigned DisableLLVMOpts : 1; /// Don't run any optimizations, for use in
+ /// getting .bc files that correspond to the
+ /// internal state before optimizations are
+ /// done.
+ unsigned DisableRedZone : 1; /// Set when -mno-red-zone is enabled.
+ unsigned DisableTailCalls : 1; /// Do not emit tail calls.
+ unsigned EmitDeclMetadata : 1; /// Emit special metadata indicating what
+ /// Decl* various IR entities came from. Only
+ /// useful when running CodeGen as a
+ /// subroutine.
+ unsigned EmitGcovArcs : 1; /// Emit coverage data files, aka. GCDA.
+ unsigned EmitGcovNotes : 1; /// Emit coverage "notes" files, aka GCNO.
+ unsigned ForbidGuardVariables : 1; /// Issue errors if C++ guard variables
+ /// are required
+ unsigned FunctionSections : 1; /// Set when -ffunction-sections is enabled
+ unsigned HiddenWeakTemplateVTables : 1; /// Emit weak vtables and RTTI for
+ /// template classes with hidden visibility
+ unsigned HiddenWeakVTables : 1; /// Emit weak vtables, RTTI, and thunks with
+ /// hidden visibility.
+ unsigned InstrumentFunctions : 1; /// Set when -finstrument-functions is
+ /// enabled.
+ unsigned InstrumentForProfiling : 1; /// Set when -pg is enabled
+ unsigned LessPreciseFPMAD : 1; /// Enable less precise MAD instructions to be
+ /// generated.
+ unsigned MergeAllConstants : 1; /// Merge identical constants.
+ unsigned NoCommon : 1; /// Set when -fno-common or C++ is enabled.
+ unsigned NoDwarf2CFIAsm : 1; /// Set when -fno-dwarf2-cfi-asm is enabled.
+ unsigned NoDwarfDirectoryAsm : 1; /// Set when -fno-dwarf-directory-asm is
+ /// enabled.
+ unsigned NoExecStack : 1; /// Set when -Wa,--noexecstack is enabled.
+ unsigned NoGlobalMerge : 1; /// Set when -mno-global-merge is enabled.
+ unsigned NoImplicitFloat : 1; /// Set when -mno-implicit-float is enabled.
+ unsigned NoInfsFPMath : 1; /// Assume FP arguments, results not +-Inf.
+ unsigned NoInline : 1; /// Set when -fno-inline is enabled. Disables
+ /// use of the inline keyword.
+ unsigned NoNaNsFPMath : 1; /// Assume FP arguments, results not NaN.
+ unsigned NoZeroInitializedInBSS : 1; /// -fno-zero-initialized-in-bss
+ unsigned ObjCDispatchMethod : 2; /// Method of Objective-C dispatch to use.
+ unsigned ObjCRuntimeHasARC : 1; /// The target runtime supports ARC natively
+ unsigned ObjCRuntimeHasTerminate : 1; /// The ObjC runtime has objc_terminate
+ unsigned OmitLeafFramePointer : 1; /// Set when -momit-leaf-frame-pointer is
+ /// enabled.
+ unsigned OptimizationLevel : 3; /// The -O[0-4] option specified.
+ unsigned OptimizeSize : 2; /// If -Os (==1) or -Oz (==2) is specified.
+ unsigned RelaxAll : 1; /// Relax all machine code instructions.
+ unsigned RelaxedAliasing : 1; /// Set when -fno-strict-aliasing is enabled.
+ unsigned SaveTempLabels : 1; /// Save temporary labels.
+ unsigned SimplifyLibCalls : 1; /// Set when -fbuiltin is enabled.
+ unsigned SoftFloat : 1; /// -soft-float.
+ unsigned StrictEnums : 1; /// Optimize based on strict enum definition.
+ unsigned TimePasses : 1; /// Set when -ftime-report is enabled.
+ unsigned UnitAtATime : 1; /// Unused. For mirroring GCC optimization
+ /// selection.
+ unsigned UnrollLoops : 1; /// Control whether loops are unrolled.
+ unsigned UnsafeFPMath : 1; /// Allow unsafe floating point optzns.
+ unsigned UnwindTables : 1; /// Emit unwind tables.
+
+ /// Attempt to use register sized accesses to bit-fields in structures, when
+ /// possible.
+ unsigned UseRegisterSizedBitfieldAccess : 1;
+
+ unsigned VerifyModule : 1; /// Control whether the module should be run
+ /// through the LLVM Verifier.
+
+ unsigned StackRealignment : 1; /// Control whether to permit stack
+ /// realignment.
+ unsigned StackAlignment; /// Overrides default stack alignment,
+ /// if not 0.
+
+ /// The code model to use (-mcmodel).
+ std::string CodeModel;
+
+ /// The filename with path we use for coverage files. The extension will be
+ /// replaced.
+ std::string CoverageFile;
+
+ /// Enable additional debugging information.
+ std::string DebugPass;
+
+ /// The string to embed in debug information as the current working directory.
+ std::string DebugCompilationDir;
+
+ /// The string to embed in the debug information for the compile unit, if
+ /// non-empty.
+ std::string DwarfDebugFlags;
+
+ /// The ABI to use for passing floating point arguments.
+ std::string FloatABI;
+
+ /// The float precision limit to use, if non-empty.
+ std::string LimitFloatPrecision;
+
+ /// The name of the bitcode file to link before optzns.
+ std::string LinkBitcodeFile;
+
+ /// The kind of inlining to perform.
+ InliningMethod Inlining;
+
+ /// The user provided name for the "main file", if non-empty. This is useful
+ /// in situations where the input file name does not match the original input
+ /// file, for example with -save-temps.
+ std::string MainFileName;
+
+ /// The name of the relocation model to use.
+ std::string RelocationModel;
+
+ /// If not an empty string, trap intrinsics are lowered to calls to this
+ /// function instead of to trap instructions.
+ std::string TrapFuncName;
+
+ /// A list of command-line options to forward to the LLVM backend.
+ std::vector<std::string> BackendOptions;
+
+ /// The user specified number of registers to be used for integral arguments,
+ /// or 0 if unspecified.
+ unsigned NumRegisterParameters;
+
+public:
+ CodeGenOptions() {
+ AsmVerbose = 0;
+ CUDAIsDevice = 0;
+ CXAAtExit = 1;
+ CXXCtorDtorAliases = 0;
+ DataSections = 0;
+ DebugInfo = 0;
+ LimitDebugInfo = 0;
+ DisableFPElim = 0;
+ DisableLLVMOpts = 0;
+ DisableRedZone = 0;
+ DisableTailCalls = 0;
+ EmitDeclMetadata = 0;
+ EmitGcovArcs = 0;
+ EmitGcovNotes = 0;
+ ForbidGuardVariables = 0;
+ FunctionSections = 0;
+ HiddenWeakTemplateVTables = 0;
+ HiddenWeakVTables = 0;
+ InstrumentFunctions = 0;
+ InstrumentForProfiling = 0;
+ LessPreciseFPMAD = 0;
+ MergeAllConstants = 1;
+ NoCommon = 0;
+ NoDwarf2CFIAsm = 0;
+ NoImplicitFloat = 0;
+ NoInfsFPMath = 0;
+ NoInline = 0;
+ NoNaNsFPMath = 0;
+ NoZeroInitializedInBSS = 0;
+ NumRegisterParameters = 0;
+ ObjCAutoRefCountExceptions = 0;
+ ObjCDispatchMethod = Legacy;
+ ObjCRuntimeHasARC = 0;
+ ObjCRuntimeHasTerminate = 0;
+ OmitLeafFramePointer = 0;
+ OptimizationLevel = 0;
+ OptimizeSize = 0;
+ RelaxAll = 0;
+ RelaxedAliasing = 0;
+ SaveTempLabels = 0;
+ SimplifyLibCalls = 1;
+ SoftFloat = 0;
+ StrictEnums = 0;
+ TimePasses = 0;
+ UnitAtATime = 1;
+ UnrollLoops = 0;
+ UnsafeFPMath = 0;
+ UnwindTables = 0;
+ UseRegisterSizedBitfieldAccess = 0;
+ VerifyModule = 1;
+ StackRealignment = 0;
+ StackAlignment = 0;
+
+ Inlining = NoInlining;
+ RelocationModel = "pic";
+ }
+
+ ObjCDispatchMethodKind getObjCDispatchMethod() const {
+ return ObjCDispatchMethodKind(ObjCDispatchMethod);
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h b/contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h
new file mode 100644
index 0000000..c01f91d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CommandLineSourceLoc.h
@@ -0,0 +1,87 @@
+
+//===--- CommandLineSourceLoc.h - Parsing for source locations-*- C++ -*---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Command line parsing for source locations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_COMMANDLINESOURCELOC_H
+#define LLVM_CLANG_FRONTEND_COMMANDLINESOURCELOC_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+/// \brief A source location that has been parsed on the command line.
+struct ParsedSourceLocation {
+ std::string FileName;
+ unsigned Line;
+ unsigned Column;
+
+public:
+ /// Construct a parsed source location from a string; the Filename is empty on
+ /// error.
+ static ParsedSourceLocation FromString(StringRef Str) {
+ ParsedSourceLocation PSL;
+ std::pair<StringRef, StringRef> ColSplit = Str.rsplit(':');
+ std::pair<StringRef, StringRef> LineSplit =
+ ColSplit.first.rsplit(':');
+
+ // If both tail splits were valid integers, return success.
+ if (!ColSplit.second.getAsInteger(10, PSL.Column) &&
+ !LineSplit.second.getAsInteger(10, PSL.Line)) {
+ PSL.FileName = LineSplit.first;
+
+ // On the command-line, stdin may be specified via "-". Inside the
+ // compiler, stdin is called "<stdin>".
+ if (PSL.FileName == "-")
+ PSL.FileName = "<stdin>";
+ }
+
+ return PSL;
+ }
+};
+
+}
+
+namespace llvm {
+ namespace cl {
+ /// \brief Command-line option parser that parses source locations.
+ ///
+ /// Source locations are of the form filename:line:column.
+ template<>
+ class parser<clang::ParsedSourceLocation>
+ : public basic_parser<clang::ParsedSourceLocation> {
+ public:
+ inline bool parse(Option &O, StringRef ArgName, StringRef ArgValue,
+ clang::ParsedSourceLocation &Val);
+ };
+
+ bool
+ parser<clang::ParsedSourceLocation>::
+ parse(Option &O, StringRef ArgName, StringRef ArgValue,
+ clang::ParsedSourceLocation &Val) {
+ using namespace clang;
+
+ Val = ParsedSourceLocation::FromString(ArgValue);
+ if (Val.FileName.empty()) {
+ errs() << "error: "
+ << "source location must be of the form filename:line:column\n";
+ return true;
+ }
+
+ return false;
+ }
+ }
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
new file mode 100644
index 0000000..1bb7695
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInstance.h
@@ -0,0 +1,664 @@
+//===-- CompilerInstance.h - Clang Compiler Instance ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_COMPILERINSTANCE_H_
+#define LLVM_CLANG_FRONTEND_COMPILERINSTANCE_H_
+
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <cassert>
+#include <list>
+#include <string>
+#include <utility>
+
+namespace llvm {
+class raw_fd_ostream;
+class Timer;
+}
+
+namespace clang {
+class ASTContext;
+class ASTConsumer;
+class ASTReader;
+class CodeCompleteConsumer;
+class DiagnosticsEngine;
+class DiagnosticConsumer;
+class ExternalASTSource;
+class FileEntry;
+class FileManager;
+class FrontendAction;
+class Module;
+class Preprocessor;
+class Sema;
+class SourceManager;
+class TargetInfo;
+
+/// CompilerInstance - Helper class for managing a single instance of the Clang
+/// compiler.
+///
+/// The CompilerInstance serves two purposes:
+/// (1) It manages the various objects which are necessary to run the compiler,
+/// for example the preprocessor, the target information, and the AST
+/// context.
+/// (2) It provides utility routines for constructing and manipulating the
+/// common Clang objects.
+///
+/// The compiler instance generally owns the instance of all the objects that it
+/// manages. However, clients can still share objects by manually setting the
+/// object and retaking ownership prior to destroying the CompilerInstance.
+///
+/// The compiler instance is intended to simplify clients, but not to lock them
+/// in to the compiler instance for everything. When possible, utility functions
+/// come in two forms; a short form that reuses the CompilerInstance objects,
+/// and a long form that takes explicit instances of any required objects.
+class CompilerInstance : public ModuleLoader {
+ /// The options used in this compiler instance.
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation;
+
+ /// The diagnostics engine instance.
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diagnostics;
+
+ /// The target being compiled for.
+ IntrusiveRefCntPtr<TargetInfo> Target;
+
+ /// The file manager.
+ IntrusiveRefCntPtr<FileManager> FileMgr;
+
+ /// The source manager.
+ IntrusiveRefCntPtr<SourceManager> SourceMgr;
+
+ /// The preprocessor.
+ IntrusiveRefCntPtr<Preprocessor> PP;
+
+ /// The AST context.
+ IntrusiveRefCntPtr<ASTContext> Context;
+
+ /// The AST consumer.
+ OwningPtr<ASTConsumer> Consumer;
+
+ /// The code completion consumer.
+ OwningPtr<CodeCompleteConsumer> CompletionConsumer;
+
+ /// \brief The semantic analysis object.
+ OwningPtr<Sema> TheSema;
+
+ /// \brief The frontend timer
+ OwningPtr<llvm::Timer> FrontendTimer;
+
+ /// \brief Non-owning reference to the ASTReader, if one exists.
+ ASTReader *ModuleManager;
+
+ /// \brief The set of top-level modules that has already been loaded,
+ /// along with the module map
+ llvm::DenseMap<const IdentifierInfo *, Module *> KnownModules;
+
+ /// \brief The location of the module-import keyword for the last module
+ /// import.
+ SourceLocation LastModuleImportLoc;
+
+ /// \brief The result of the last module import.
+ ///
+ Module *LastModuleImportResult;
+
+ /// \brief Holds information about the output file.
+ ///
+ /// If TempFilename is not empty we must rename it to Filename at the end.
+ /// TempFilename may be empty and Filename non empty if creating the temporary
+ /// failed.
+ struct OutputFile {
+ std::string Filename;
+ std::string TempFilename;
+ raw_ostream *OS;
+
+ OutputFile(const std::string &filename, const std::string &tempFilename,
+ raw_ostream *os)
+ : Filename(filename), TempFilename(tempFilename), OS(os) { }
+ };
+
+ /// The list of active output files.
+ std::list<OutputFile> OutputFiles;
+
+ void operator=(const CompilerInstance &); // DO NOT IMPLEMENT
+ CompilerInstance(const CompilerInstance&); // DO NOT IMPLEMENT
+public:
+ CompilerInstance();
+ ~CompilerInstance();
+
+ /// @name High-Level Operations
+ /// {
+
+ /// ExecuteAction - Execute the provided action against the compiler's
+ /// CompilerInvocation object.
+ ///
+ /// This function makes the following assumptions:
+ ///
+ /// - The invocation options should be initialized. This function does not
+ /// handle the '-help' or '-version' options, clients should handle those
+ /// directly.
+ ///
+ /// - The diagnostics engine should have already been created by the client.
+ ///
+ /// - No other CompilerInstance state should have been initialized (this is
+ /// an unchecked error).
+ ///
+ /// - Clients should have initialized any LLVM target features that may be
+ /// required.
+ ///
+ /// - Clients should eventually call llvm_shutdown() upon the completion of
+ /// this routine to ensure that any managed objects are properly destroyed.
+ ///
+ /// Note that this routine may write output to 'stderr'.
+ ///
+ /// \param Act - The action to execute.
+ /// \return - True on success.
+ //
+ // FIXME: This function should take the stream to write any debugging /
+ // verbose output to as an argument.
+ //
+ // FIXME: Eliminate the llvm_shutdown requirement, that should either be part
+ // of the context or else not CompilerInstance specific.
+ bool ExecuteAction(FrontendAction &Act);
+
+ /// }
+ /// @name Compiler Invocation and Options
+ /// {
+
+ bool hasInvocation() const { return Invocation != 0; }
+
+ CompilerInvocation &getInvocation() {
+ assert(Invocation && "Compiler instance has no invocation!");
+ return *Invocation;
+ }
+
+ /// setInvocation - Replace the current invocation.
+ void setInvocation(CompilerInvocation *Value);
+
+ /// }
+ /// @name Forwarding Methods
+ /// {
+
+ AnalyzerOptions &getAnalyzerOpts() {
+ return Invocation->getAnalyzerOpts();
+ }
+ const AnalyzerOptions &getAnalyzerOpts() const {
+ return Invocation->getAnalyzerOpts();
+ }
+
+ CodeGenOptions &getCodeGenOpts() {
+ return Invocation->getCodeGenOpts();
+ }
+ const CodeGenOptions &getCodeGenOpts() const {
+ return Invocation->getCodeGenOpts();
+ }
+
+ DependencyOutputOptions &getDependencyOutputOpts() {
+ return Invocation->getDependencyOutputOpts();
+ }
+ const DependencyOutputOptions &getDependencyOutputOpts() const {
+ return Invocation->getDependencyOutputOpts();
+ }
+
+ DiagnosticOptions &getDiagnosticOpts() {
+ return Invocation->getDiagnosticOpts();
+ }
+ const DiagnosticOptions &getDiagnosticOpts() const {
+ return Invocation->getDiagnosticOpts();
+ }
+
+ const FileSystemOptions &getFileSystemOpts() const {
+ return Invocation->getFileSystemOpts();
+ }
+
+ FrontendOptions &getFrontendOpts() {
+ return Invocation->getFrontendOpts();
+ }
+ const FrontendOptions &getFrontendOpts() const {
+ return Invocation->getFrontendOpts();
+ }
+
+ HeaderSearchOptions &getHeaderSearchOpts() {
+ return Invocation->getHeaderSearchOpts();
+ }
+ const HeaderSearchOptions &getHeaderSearchOpts() const {
+ return Invocation->getHeaderSearchOpts();
+ }
+
+ LangOptions &getLangOpts() {
+ return *Invocation->getLangOpts();
+ }
+ const LangOptions &getLangOpts() const {
+ return *Invocation->getLangOpts();
+ }
+
+ PreprocessorOptions &getPreprocessorOpts() {
+ return Invocation->getPreprocessorOpts();
+ }
+ const PreprocessorOptions &getPreprocessorOpts() const {
+ return Invocation->getPreprocessorOpts();
+ }
+
+ PreprocessorOutputOptions &getPreprocessorOutputOpts() {
+ return Invocation->getPreprocessorOutputOpts();
+ }
+ const PreprocessorOutputOptions &getPreprocessorOutputOpts() const {
+ return Invocation->getPreprocessorOutputOpts();
+ }
+
+ TargetOptions &getTargetOpts() {
+ return Invocation->getTargetOpts();
+ }
+ const TargetOptions &getTargetOpts() const {
+ return Invocation->getTargetOpts();
+ }
+
+ /// }
+ /// @name Diagnostics Engine
+ /// {
+
+ bool hasDiagnostics() const { return Diagnostics != 0; }
+
+ /// Get the current diagnostics engine.
+ DiagnosticsEngine &getDiagnostics() const {
+ assert(Diagnostics && "Compiler instance has no diagnostics!");
+ return *Diagnostics;
+ }
+
+ /// setDiagnostics - Replace the current diagnostics engine.
+ void setDiagnostics(DiagnosticsEngine *Value);
+
+ DiagnosticConsumer &getDiagnosticClient() const {
+ assert(Diagnostics && Diagnostics->getClient() &&
+ "Compiler instance has no diagnostic client!");
+ return *Diagnostics->getClient();
+ }
+
+ /// }
+ /// @name Target Info
+ /// {
+
+ bool hasTarget() const { return Target != 0; }
+
+ TargetInfo &getTarget() const {
+ assert(Target && "Compiler instance has no target!");
+ return *Target;
+ }
+
+ /// Replace the current diagnostics engine.
+ void setTarget(TargetInfo *Value);
+
+ /// }
+ /// @name File Manager
+ /// {
+
+ bool hasFileManager() const { return FileMgr != 0; }
+
+ /// Return the current file manager to the caller.
+ FileManager &getFileManager() const {
+ assert(FileMgr && "Compiler instance has no file manager!");
+ return *FileMgr;
+ }
+
+ void resetAndLeakFileManager() {
+ FileMgr.resetWithoutRelease();
+ }
+
+ /// setFileManager - Replace the current file manager.
+ void setFileManager(FileManager *Value);
+
+ /// }
+ /// @name Source Manager
+ /// {
+
+ bool hasSourceManager() const { return SourceMgr != 0; }
+
+ /// Return the current source manager.
+ SourceManager &getSourceManager() const {
+ assert(SourceMgr && "Compiler instance has no source manager!");
+ return *SourceMgr;
+ }
+
+ void resetAndLeakSourceManager() {
+ SourceMgr.resetWithoutRelease();
+ }
+
+ /// setSourceManager - Replace the current source manager.
+ void setSourceManager(SourceManager *Value);
+
+ /// }
+ /// @name Preprocessor
+ /// {
+
+ bool hasPreprocessor() const { return PP != 0; }
+
+ /// Return the current preprocessor.
+ Preprocessor &getPreprocessor() const {
+ assert(PP && "Compiler instance has no preprocessor!");
+ return *PP;
+ }
+
+ void resetAndLeakPreprocessor() {
+ PP.resetWithoutRelease();
+ }
+
+ /// Replace the current preprocessor.
+ void setPreprocessor(Preprocessor *Value);
+
+ /// }
+ /// @name ASTContext
+ /// {
+
+ bool hasASTContext() const { return Context != 0; }
+
+ ASTContext &getASTContext() const {
+ assert(Context && "Compiler instance has no AST context!");
+ return *Context;
+ }
+
+ void resetAndLeakASTContext() {
+ Context.resetWithoutRelease();
+ }
+
+ /// setASTContext - Replace the current AST context.
+ void setASTContext(ASTContext *Value);
+
+ /// \brief Replace the current Sema; the compiler instance takes ownership
+ /// of S.
+ void setSema(Sema *S);
+
+ /// }
+ /// @name ASTConsumer
+ /// {
+
+ bool hasASTConsumer() const { return Consumer != 0; }
+
+ ASTConsumer &getASTConsumer() const {
+ assert(Consumer && "Compiler instance has no AST consumer!");
+ return *Consumer;
+ }
+
+ /// takeASTConsumer - Remove the current AST consumer and give ownership to
+ /// the caller.
+ ASTConsumer *takeASTConsumer() { return Consumer.take(); }
+
+ /// setASTConsumer - Replace the current AST consumer; the compiler instance
+ /// takes ownership of \arg Value.
+ void setASTConsumer(ASTConsumer *Value);
+
+ /// }
+ /// @name Semantic analysis
+ /// {
+ bool hasSema() const { return TheSema != 0; }
+
+ Sema &getSema() const {
+ assert(TheSema && "Compiler instance has no Sema object!");
+ return *TheSema;
+ }
+
+ Sema *takeSema() { return TheSema.take(); }
+
+ /// }
+ /// @name Module Management
+ /// {
+
+ ASTReader *getModuleManager() const { return ModuleManager; }
+
+ /// }
+ /// @name Code Completion
+ /// {
+
+ bool hasCodeCompletionConsumer() const { return CompletionConsumer != 0; }
+
+ CodeCompleteConsumer &getCodeCompletionConsumer() const {
+ assert(CompletionConsumer &&
+ "Compiler instance has no code completion consumer!");
+ return *CompletionConsumer;
+ }
+
+ /// takeCodeCompletionConsumer - Remove the current code completion consumer
+ /// and give ownership to the caller.
+ CodeCompleteConsumer *takeCodeCompletionConsumer() {
+ return CompletionConsumer.take();
+ }
+
+ /// setCodeCompletionConsumer - Replace the current code completion consumer;
+ /// the compiler instance takes ownership of \arg Value.
+ void setCodeCompletionConsumer(CodeCompleteConsumer *Value);
+
+ /// }
+ /// @name Frontend timer
+ /// {
+
+ bool hasFrontendTimer() const { return FrontendTimer != 0; }
+
+ llvm::Timer &getFrontendTimer() const {
+ assert(FrontendTimer && "Compiler instance has no frontend timer!");
+ return *FrontendTimer;
+ }
+
+ /// }
+ /// @name Output Files
+ /// {
+
+ /// addOutputFile - Add an output file onto the list of tracked output files.
+ ///
+ /// \param OutFile - The output file info.
+ void addOutputFile(const OutputFile &OutFile);
+
+ /// clearOutputFiles - Clear the output file list, destroying the contained
+ /// output streams.
+ ///
+ /// \param EraseFiles - If true, attempt to erase the files from disk.
+ void clearOutputFiles(bool EraseFiles);
+
+ /// }
+ /// @name Construction Utility Methods
+ /// {
+
+ /// Create the diagnostics engine using the invocation's diagnostic options
+ /// and replace any existing one with it.
+ ///
+ /// Note that this routine also replaces the diagnostic client,
+ /// allocating one if one is not provided.
+ ///
+ /// \param Client If non-NULL, a diagnostic client that will be
+ /// attached to (and, then, owned by) the DiagnosticsEngine inside this AST
+ /// unit.
+ ///
+ /// \param ShouldOwnClient If Client is non-NULL, specifies whether
+ /// the diagnostic object should take ownership of the client.
+ ///
+ /// \param ShouldCloneClient If Client is non-NULL, specifies whether that
+ /// client should be cloned.
+ void createDiagnostics(int Argc, const char* const *Argv,
+ DiagnosticConsumer *Client = 0,
+ bool ShouldOwnClient = true,
+ bool ShouldCloneClient = true);
+
+ /// Create a DiagnosticsEngine object with a the TextDiagnosticPrinter.
+ ///
+ /// The \arg Argc and \arg Argv arguments are used only for logging purposes,
+ /// when the diagnostic options indicate that the compiler should output
+ /// logging information.
+ ///
+ /// If no diagnostic client is provided, this creates a
+ /// DiagnosticConsumer that is owned by the returned diagnostic
+ /// object, if using directly the caller is responsible for
+ /// releasing the returned DiagnosticsEngine's client eventually.
+ ///
+ /// \param Opts - The diagnostic options; note that the created text
+ /// diagnostic object contains a reference to these options and its lifetime
+ /// must extend past that of the diagnostic engine.
+ ///
+ /// \param Client If non-NULL, a diagnostic client that will be
+ /// attached to (and, then, owned by) the returned DiagnosticsEngine
+ /// object.
+ ///
+ /// \param CodeGenOpts If non-NULL, the code gen options in use, which may be
+ /// used by some diagnostics printers (for logging purposes only).
+ ///
+ /// \return The new object on success, or null on failure.
+ static IntrusiveRefCntPtr<DiagnosticsEngine>
+ createDiagnostics(const DiagnosticOptions &Opts, int Argc,
+ const char* const *Argv,
+ DiagnosticConsumer *Client = 0,
+ bool ShouldOwnClient = true,
+ bool ShouldCloneClient = true,
+ const CodeGenOptions *CodeGenOpts = 0);
+
+ /// Create the file manager and replace any existing one with it.
+ void createFileManager();
+
+ /// Create the source manager and replace any existing one with it.
+ void createSourceManager(FileManager &FileMgr);
+
+ /// Create the preprocessor, using the invocation, file, and source managers,
+ /// and replace any existing one with it.
+ void createPreprocessor();
+
+ /// Create the AST context.
+ void createASTContext();
+
+ /// Create an external AST source to read a PCH file and attach it to the AST
+ /// context.
+ void createPCHExternalASTSource(StringRef Path,
+ bool DisablePCHValidation,
+ bool DisableStatCache,
+ bool AllowPCHWithCompilerErrors,
+ void *DeserializationListener);
+
+ /// Create an external AST source to read a PCH file.
+ ///
+ /// \return - The new object on success, or null on failure.
+ static ExternalASTSource *
+ createPCHExternalASTSource(StringRef Path, const std::string &Sysroot,
+ bool DisablePCHValidation,
+ bool DisableStatCache,
+ bool AllowPCHWithCompilerErrors,
+ Preprocessor &PP, ASTContext &Context,
+ void *DeserializationListener, bool Preamble);
+
+ /// Create a code completion consumer using the invocation; note that this
+ /// will cause the source manager to truncate the input source file at the
+ /// completion point.
+ void createCodeCompletionConsumer();
+
+ /// Create a code completion consumer to print code completion results, at
+ /// \arg Filename, \arg Line, and \arg Column, to the given output stream \arg
+ /// OS.
+ static CodeCompleteConsumer *
+ createCodeCompletionConsumer(Preprocessor &PP, const std::string &Filename,
+ unsigned Line, unsigned Column,
+ bool ShowMacros,
+ bool ShowCodePatterns, bool ShowGlobals,
+ raw_ostream &OS);
+
+ /// \brief Create the Sema object to be used for parsing.
+ void createSema(TranslationUnitKind TUKind,
+ CodeCompleteConsumer *CompletionConsumer);
+
+ /// Create the frontend timer and replace any existing one with it.
+ void createFrontendTimer();
+
+ /// Create the default output file (from the invocation's options) and add it
+ /// to the list of tracked output files.
+ ///
+ /// The files created by this function always use temporary files to write to
+ /// their result (that is, the data is written to a temporary file which will
+ /// atomically replace the target output on success).
+ ///
+ /// \return - Null on error.
+ llvm::raw_fd_ostream *
+ createDefaultOutputFile(bool Binary = true, StringRef BaseInput = "",
+ StringRef Extension = "");
+
+ /// Create a new output file and add it to the list of tracked output files,
+ /// optionally deriving the output path name.
+ ///
+ /// \return - Null on error.
+ llvm::raw_fd_ostream *
+ createOutputFile(StringRef OutputPath,
+ bool Binary = true, bool RemoveFileOnSignal = true,
+ StringRef BaseInput = "",
+ StringRef Extension = "",
+ bool UseTemporary = false,
+ bool CreateMissingDirectories = false);
+
+ /// Create a new output file, optionally deriving the output path name.
+ ///
+ /// If \arg OutputPath is empty, then createOutputFile will derive an output
+ /// path location as \arg BaseInput, with any suffix removed, and \arg
+ /// Extension appended. If OutputPath is not stdout and \arg UseTemporary
+ /// is true, createOutputFile will create a new temporary file that must be
+ /// renamed to OutputPath in the end.
+ ///
+ /// \param OutputPath - If given, the path to the output file.
+ /// \param Error [out] - On failure, the error message.
+ /// \param BaseInput - If \arg OutputPath is empty, the input path name to use
+ /// for deriving the output path.
+ /// \param Extension - The extension to use for derived output names.
+ /// \param Binary - The mode to open the file in.
+ /// \param RemoveFileOnSignal - Whether the file should be registered with
+ /// llvm::sys::RemoveFileOnSignal. Note that this is not safe for
+ /// multithreaded use, as the underlying signal mechanism is not reentrant
+ /// \param UseTemporary - Create a new temporary file that must be renamed to
+ /// OutputPath in the end.
+ /// \param CreateMissingDirectories - When \arg UseTemporary is true, create
+ /// missing directories in the output path.
+ /// \param ResultPathName [out] - If given, the result path name will be
+ /// stored here on success.
+ /// \param TempPathName [out] - If given, the temporary file path name
+ /// will be stored here on success.
+ static llvm::raw_fd_ostream *
+ createOutputFile(StringRef OutputPath, std::string &Error,
+ bool Binary = true, bool RemoveFileOnSignal = true,
+ StringRef BaseInput = "",
+ StringRef Extension = "",
+ bool UseTemporary = false,
+ bool CreateMissingDirectories = false,
+ std::string *ResultPathName = 0,
+ std::string *TempPathName = 0);
+
+ /// }
+ /// @name Initialization Utility Methods
+ /// {
+
+ /// InitializeSourceManager - Initialize the source manager to set InputFile
+ /// as the main file.
+ ///
+ /// \return True on success.
+ bool InitializeSourceManager(StringRef InputFile,
+ SrcMgr::CharacteristicKind Kind = SrcMgr::C_User);
+
+ /// InitializeSourceManager - Initialize the source manager to set InputFile
+ /// as the main file.
+ ///
+ /// \return True on success.
+ static bool InitializeSourceManager(StringRef InputFile,
+ SrcMgr::CharacteristicKind Kind,
+ DiagnosticsEngine &Diags,
+ FileManager &FileMgr,
+ SourceManager &SourceMgr,
+ const FrontendOptions &Opts);
+
+ /// }
+
+ virtual Module *loadModule(SourceLocation ImportLoc, ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
new file mode 100644
index 0000000..0d2260a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/CompilerInvocation.h
@@ -0,0 +1,221 @@
+//===-- CompilerInvocation.h - Compiler Invocation Helper Data --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_COMPILERINVOCATION_H_
+#define LLVM_CLANG_FRONTEND_COMPILERINVOCATION_H_
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/Frontend/MigratorOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/DependencyOutputOptions.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/HeaderSearchOptions.h"
+#include "clang/Frontend/LangStandard.h"
+#include "clang/Frontend/PreprocessorOptions.h"
+#include "clang/Frontend/PreprocessorOutputOptions.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringMap.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+class CompilerInvocation;
+class DiagnosticsEngine;
+
+namespace driver {
+class ArgList;
+}
+
+/// CompilerInvocation - Fill out Opts based on the options given in Args.
+/// Args must have been created from the OptTable returned by
+/// createCC1OptTable(). When errors are encountered, return false and,
+/// if Diags is non-null, report the error(s).
+bool ParseDiagnosticArgs(DiagnosticOptions &Opts, driver::ArgList &Args,
+ DiagnosticsEngine *Diags = 0);
+
+class CompilerInvocationBase : public RefCountedBase<CompilerInvocation> {
+protected:
+ /// Options controlling the language variant.
+ IntrusiveRefCntPtr<LangOptions> LangOpts;
+public:
+ CompilerInvocationBase();
+
+ CompilerInvocationBase(const CompilerInvocationBase &X);
+
+ LangOptions *getLangOpts() { return LangOpts.getPtr(); }
+ const LangOptions *getLangOpts() const { return LangOpts.getPtr(); }
+};
+
+/// CompilerInvocation - Helper class for holding the data necessary to invoke
+/// the compiler.
+///
+/// This class is designed to represent an abstract "invocation" of the
+/// compiler, including data such as the include paths, the code generation
+/// options, the warning flags, and so on.
+class CompilerInvocation : public CompilerInvocationBase {
+ /// Options controlling the static analyzer.
+ AnalyzerOptions AnalyzerOpts;
+
+ MigratorOptions MigratorOpts;
+
+ /// Options controlling IRgen and the backend.
+ CodeGenOptions CodeGenOpts;
+
+ /// Options controlling dependency output.
+ DependencyOutputOptions DependencyOutputOpts;
+
+ /// Options controlling the diagnostic engine.
+ DiagnosticOptions DiagnosticOpts;
+
+ /// Options controlling file system operations.
+ FileSystemOptions FileSystemOpts;
+
+ /// Options controlling the frontend itself.
+ FrontendOptions FrontendOpts;
+
+ /// Options controlling the #include directive.
+ HeaderSearchOptions HeaderSearchOpts;
+
+ /// Options controlling the preprocessor (aside from #include handling).
+ PreprocessorOptions PreprocessorOpts;
+
+ /// Options controlling preprocessed output.
+ PreprocessorOutputOptions PreprocessorOutputOpts;
+
+ /// Options controlling the target.
+ TargetOptions TargetOpts;
+
+public:
+ CompilerInvocation() {}
+
+ /// @name Utility Methods
+ /// @{
+
+ /// CreateFromArgs - Create a compiler invocation from a list of input
+ /// options. Returns true on success.
+ ///
+ /// \param Res [out] - The resulting invocation.
+ /// \param ArgBegin - The first element in the argument vector.
+ /// \param ArgEnd - The last element in the argument vector.
+ /// \param Diags - The diagnostic engine to use for errors.
+ static bool CreateFromArgs(CompilerInvocation &Res,
+ const char* const *ArgBegin,
+ const char* const *ArgEnd,
+ DiagnosticsEngine &Diags);
+
+ /// GetBuiltinIncludePath - Get the directory where the compiler headers
+ /// reside, relative to the compiler binary (found by the passed in
+ /// arguments).
+ ///
+ /// \param Argv0 - The program path (from argv[0]), for finding the builtin
+ /// compiler path.
+ /// \param MainAddr - The address of main (or some other function in the main
+ /// executable), for finding the builtin compiler path.
+ static std::string GetResourcesPath(const char *Argv0, void *MainAddr);
+
+ /// toArgs - Convert the CompilerInvocation to a list of strings suitable for
+ /// passing to CreateFromArgs.
+ void toArgs(std::vector<std::string> &Res);
+
+ /// setLangDefaults - Set language defaults for the given input language and
+ /// language standard in this CompilerInvocation.
+ ///
+ /// \param IK - The input language.
+ /// \param LangStd - The input language standard.
+ void setLangDefaults(InputKind IK,
+ LangStandard::Kind LangStd = LangStandard::lang_unspecified) {
+ setLangDefaults(*getLangOpts(), IK, LangStd);
+ }
+
+ /// setLangDefaults - Set language defaults for the given input language and
+ /// language standard in the given LangOptions object.
+ ///
+ /// \param LangOpts - The LangOptions object to set up.
+ /// \param IK - The input language.
+ /// \param LangStd - The input language standard.
+ static void setLangDefaults(LangOptions &Opts, InputKind IK,
+ LangStandard::Kind LangStd = LangStandard::lang_unspecified);
+
+ /// \brief Retrieve a module hash string that is suitable for uniquely
+ /// identifying the conditions under which the module was built.
+ std::string getModuleHash() const;
+
+ /// @}
+ /// @name Option Subgroups
+ /// @{
+
+ AnalyzerOptions &getAnalyzerOpts() { return AnalyzerOpts; }
+ const AnalyzerOptions &getAnalyzerOpts() const {
+ return AnalyzerOpts;
+ }
+
+ MigratorOptions &getMigratorOpts() { return MigratorOpts; }
+ const MigratorOptions &getMigratorOpts() const {
+ return MigratorOpts;
+ }
+
+ CodeGenOptions &getCodeGenOpts() { return CodeGenOpts; }
+ const CodeGenOptions &getCodeGenOpts() const {
+ return CodeGenOpts;
+ }
+
+ DependencyOutputOptions &getDependencyOutputOpts() {
+ return DependencyOutputOpts;
+ }
+ const DependencyOutputOptions &getDependencyOutputOpts() const {
+ return DependencyOutputOpts;
+ }
+
+ DiagnosticOptions &getDiagnosticOpts() { return DiagnosticOpts; }
+ const DiagnosticOptions &getDiagnosticOpts() const { return DiagnosticOpts; }
+
+ FileSystemOptions &getFileSystemOpts() { return FileSystemOpts; }
+ const FileSystemOptions &getFileSystemOpts() const {
+ return FileSystemOpts;
+ }
+
+ HeaderSearchOptions &getHeaderSearchOpts() { return HeaderSearchOpts; }
+ const HeaderSearchOptions &getHeaderSearchOpts() const {
+ return HeaderSearchOpts;
+ }
+
+ FrontendOptions &getFrontendOpts() { return FrontendOpts; }
+ const FrontendOptions &getFrontendOpts() const {
+ return FrontendOpts;
+ }
+
+ PreprocessorOptions &getPreprocessorOpts() { return PreprocessorOpts; }
+ const PreprocessorOptions &getPreprocessorOpts() const {
+ return PreprocessorOpts;
+ }
+
+ PreprocessorOutputOptions &getPreprocessorOutputOpts() {
+ return PreprocessorOutputOpts;
+ }
+ const PreprocessorOutputOptions &getPreprocessorOutputOpts() const {
+ return PreprocessorOutputOpts;
+ }
+
+ TargetOptions &getTargetOpts() { return TargetOpts; }
+ const TargetOptions &getTargetOpts() const {
+ return TargetOpts;
+ }
+
+ /// @}
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h
new file mode 100644
index 0000000..83976c3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DependencyOutputOptions.h
@@ -0,0 +1,56 @@
+//===--- DependencyOutputOptions.h ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_DEPENDENCYOUTPUTOPTIONS_H
+#define LLVM_CLANG_FRONTEND_DEPENDENCYOUTPUTOPTIONS_H
+
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// DependencyOutputOptions - Options for controlling the compiler dependency
+/// file generation.
+class DependencyOutputOptions {
+public:
+ unsigned IncludeSystemHeaders : 1; ///< Include system header dependencies.
+ unsigned ShowHeaderIncludes : 1; ///< Show header inclusions (-H).
+ unsigned UsePhonyTargets : 1; ///< Include phony targets for each
+ /// dependency, which can avoid some 'make'
+ /// problems.
+ unsigned AddMissingHeaderDeps : 1; ///< Add missing headers to dependency list
+
+ /// The file to write dependency output to.
+ std::string OutputFile;
+
+ /// The file to write header include output to. This is orthogonal to
+ /// ShowHeaderIncludes (-H) and will include headers mentioned in the
+ /// predefines buffer. If the output file is "-", output will be sent to
+ /// stderr.
+ std::string HeaderIncludeOutputFile;
+
+ /// A list of names to use as the targets in the dependency file; this list
+ /// must contain at least one entry.
+ std::vector<std::string> Targets;
+
+ /// \brief The file to write GraphViz-formatted header dependencies to.
+ std::string DOTOutputFile;
+
+public:
+ DependencyOutputOptions() {
+ IncludeSystemHeaders = 0;
+ ShowHeaderIncludes = 0;
+ UsePhonyTargets = 0;
+ AddMissingHeaderDeps = 0;
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
new file mode 100644
index 0000000..1c6ba6a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticOptions.h
@@ -0,0 +1,108 @@
+//===--- DiagnosticOptions.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_DIAGNOSTICOPTIONS_H
+#define LLVM_CLANG_FRONTEND_DIAGNOSTICOPTIONS_H
+
+#include "clang/Basic/Diagnostic.h"
+
+#include <string>
+#include <vector>
+
+namespace clang {
+
+/// DiagnosticOptions - Options for controlling the compiler diagnostics
+/// engine.
+class DiagnosticOptions {
+public:
+ unsigned IgnoreWarnings : 1; /// -w
+ unsigned NoRewriteMacros : 1; /// -Wno-rewrite-macros
+ unsigned Pedantic : 1; /// -pedantic
+ unsigned PedanticErrors : 1; /// -pedantic-errors
+ unsigned ShowColumn : 1; /// Show column number on diagnostics.
+ unsigned ShowLocation : 1; /// Show source location information.
+ unsigned ShowCarets : 1; /// Show carets in diagnostics.
+ unsigned ShowFixits : 1; /// Show fixit information.
+ unsigned ShowSourceRanges : 1; /// Show source ranges in numeric form.
+ unsigned ShowParseableFixits : 1; /// Show machine parseable fix-its.
+ unsigned ShowOptionNames : 1; /// Show the option name for mappable
+ /// diagnostics.
+ unsigned ShowNoteIncludeStack : 1; /// Show include stacks for notes.
+ unsigned ShowCategories : 2; /// Show categories: 0 -> none, 1 -> Number,
+ /// 2 -> Full Name.
+
+ unsigned Format : 2; /// Format for diagnostics:
+ enum TextDiagnosticFormat { Clang, Msvc, Vi };
+
+ unsigned ShowColors : 1; /// Show diagnostics with ANSI color sequences.
+ unsigned ShowOverloads : 1; /// Overload candidates to show. Values from
+ /// DiagnosticsEngine::OverloadsShown
+ unsigned VerifyDiagnostics: 1; /// Check that diagnostics match the expected
+ /// diagnostics, indicated by markers in the
+ /// input source file.
+
+ unsigned ErrorLimit; /// Limit # errors emitted.
+ unsigned MacroBacktraceLimit; /// Limit depth of macro expansion backtrace.
+ unsigned TemplateBacktraceLimit; /// Limit depth of instantiation backtrace.
+ unsigned ConstexprBacktraceLimit; /// Limit depth of constexpr backtrace.
+
+ /// The distance between tab stops.
+ unsigned TabStop;
+ enum { DefaultTabStop = 8, MaxTabStop = 100,
+ DefaultMacroBacktraceLimit = 6,
+ DefaultTemplateBacktraceLimit = 10,
+ DefaultConstexprBacktraceLimit = 10 };
+
+ /// Column limit for formatting message diagnostics, or 0 if unused.
+ unsigned MessageLength;
+
+ /// If non-empty, a file to log extended build information to, for development
+ /// testing and analysis.
+ std::string DumpBuildInformation;
+
+ /// The file to log diagnostic output to.
+ std::string DiagnosticLogFile;
+
+ /// The file to serialize diagnostics to (non-appending).
+ std::string DiagnosticSerializationFile;
+
+ /// The list of -W... options used to alter the diagnostic mappings, with the
+ /// prefixes removed.
+ std::vector<std::string> Warnings;
+
+public:
+ DiagnosticOptions() {
+ IgnoreWarnings = 0;
+ TabStop = DefaultTabStop;
+ MessageLength = 0;
+ NoRewriteMacros = 0;
+ Pedantic = 0;
+ PedanticErrors = 0;
+ ShowCarets = 1;
+ ShowColors = 0;
+ ShowOverloads = DiagnosticsEngine::Ovl_All;
+ ShowColumn = 1;
+ ShowFixits = 1;
+ ShowLocation = 1;
+ ShowOptionNames = 0;
+ ShowCategories = 0;
+ Format = Clang;
+ ShowSourceRanges = 0;
+ ShowParseableFixits = 0;
+ VerifyDiagnostics = 0;
+ ErrorLimit = 0;
+ TemplateBacktraceLimit = DefaultTemplateBacktraceLimit;
+ MacroBacktraceLimit = DefaultMacroBacktraceLimit;
+ ConstexprBacktraceLimit = DefaultConstexprBacktraceLimit;
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
new file mode 100644
index 0000000..5ad88a8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/DiagnosticRenderer.h
@@ -0,0 +1,149 @@
+//===--- DiagnosticRenderer.h - Diagnostic Pretty-Printing ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class that provides support for pretty-printing of
+// diagnostics. It is used to implement the different code paths which require
+// such functionality in a consistent way.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_DIAGNOSTIC_RENDERER_H_
+#define LLVM_CLANG_FRONTEND_DIAGNOSTIC_RENDERER_H_
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/PointerUnion.h"
+
+namespace clang {
+
+class DiagnosticOptions;
+class LangOptions;
+class SourceManager;
+
+typedef llvm::PointerUnion<const Diagnostic *,
+ const StoredDiagnostic *> DiagOrStoredDiag;
+
+/// \brief Class to encapsulate the logic for formatting a diagnostic message.
+/// Actual "printing" logic is implemented by subclasses.
+///
+/// This class provides an interface for building and emitting
+/// diagnostic, including all of the macro backtraces, caret diagnostics, FixIt
+/// Hints, and code snippets. In the presence of macros this involves
+/// a recursive process, synthesizing notes for each macro expansion.
+///
+/// A brief worklist:
+/// FIXME: Sink the recursive printing of template instantiations into this
+/// class.
+class DiagnosticRenderer {
+protected:
+ const SourceManager &SM;
+ const LangOptions &LangOpts;
+ const DiagnosticOptions &DiagOpts;
+
+ /// \brief The location of the previous diagnostic if known.
+ ///
+ /// This will be invalid in cases where there is no (known) previous
+ /// diagnostic location, or that location itself is invalid or comes from
+ /// a different source manager than SM.
+ SourceLocation LastLoc;
+
+ /// \brief The location of the last include whose stack was printed if known.
+ ///
+ /// Same restriction as \see LastLoc essentially, but tracking include stack
+ /// root locations rather than diagnostic locations.
+ SourceLocation LastIncludeLoc;
+
+ /// \brief The level of the last diagnostic emitted.
+ ///
+ /// The level of the last diagnostic emitted. Used to detect level changes
+ /// which change the amount of information displayed.
+ DiagnosticsEngine::Level LastLevel;
+
+ DiagnosticRenderer(const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts);
+
+ virtual ~DiagnosticRenderer();
+
+ virtual void emitDiagnosticMessage(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ DiagOrStoredDiag Info) = 0;
+
+ virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges) = 0;
+
+ virtual void emitBasicNote(StringRef Message) = 0;
+
+ virtual void emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints) = 0;
+
+ virtual void emitIncludeLocation(SourceLocation Loc, PresumedLoc PLoc) = 0;
+
+ virtual void beginDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {}
+ virtual void endDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {}
+
+
+private:
+ void emitIncludeStack(SourceLocation Loc, DiagnosticsEngine::Level Level);
+ void emitIncludeStackRecursively(SourceLocation Loc);
+ void emitMacroExpansionsAndCarets(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints,
+ unsigned &MacroDepth,
+ unsigned OnMacroInst = 0);
+public:
+ /// \brief Emit a diagnostic.
+ ///
+ /// This is the primary entry point for emitting diagnostic messages.
+ /// It handles formatting and rendering the message as well as any ancillary
+ /// information needed based on macros whose expansions impact the
+ /// diagnostic.
+ ///
+ /// \param Loc The location for this caret.
+ /// \param Level The level of the diagnostic to be emitted.
+ /// \param Message The diagnostic message to emit.
+ /// \param Ranges The underlined ranges for this code snippet.
+ /// \param FixItHints The FixIt hints active for this diagnostic.
+ void emitDiagnostic(SourceLocation Loc, DiagnosticsEngine::Level Level,
+ StringRef Message, ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> FixItHints,
+ DiagOrStoredDiag D = (Diagnostic *)0);
+
+ void emitStoredDiagnostic(StoredDiagnostic &Diag);
+};
+
+/// Subclass of DiagnosticRender that turns all subdiagostics into explicit
+/// notes. It is up to subclasses to further define the behavior.
+class DiagnosticNoteRenderer : public DiagnosticRenderer {
+public:
+ DiagnosticNoteRenderer(const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts)
+ : DiagnosticRenderer(SM, LangOpts, DiagOpts) {}
+
+ virtual ~DiagnosticNoteRenderer();
+
+ virtual void emitBasicNote(StringRef Message);
+
+ virtual void emitIncludeLocation(SourceLocation Loc,
+ PresumedLoc PLoc);
+
+ virtual void emitNote(SourceLocation Loc, StringRef Message) = 0;
+};
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
new file mode 100644
index 0000000..6839028
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendAction.h
@@ -0,0 +1,277 @@
+//===-- FrontendAction.h - Generic Frontend Action Interface ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_FRONTENDACTION_H
+#define LLVM_CLANG_FRONTEND_FRONTENDACTION_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+class ASTConsumer;
+class ASTMergeAction;
+class ASTUnit;
+class CompilerInstance;
+
+/// FrontendAction - Abstract base class for actions which can be performed by
+/// the frontend.
+class FrontendAction {
+ FrontendInputFile CurrentInput;
+ OwningPtr<ASTUnit> CurrentASTUnit;
+ CompilerInstance *Instance;
+ friend class ASTMergeAction;
+ friend class WrapperFrontendAction;
+
+private:
+ ASTConsumer* CreateWrappedASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+protected:
+ /// @name Implementation Action Interface
+ /// @{
+
+ /// CreateASTConsumer - Create the AST consumer object for this action, if
+ /// supported.
+ ///
+ /// This routine is called as part of \see BeginSourceAction(), which will
+ /// fail if the AST consumer cannot be created. This will not be called if the
+ /// action has indicated that it only uses the preprocessor.
+ ///
+ /// \param CI - The current compiler instance, provided as a convenience, \see
+ /// getCompilerInstance().
+ ///
+ /// \param InFile - The current input file, provided as a convenience, \see
+ /// getCurrentFile().
+ ///
+ /// \return The new AST consumer, or 0 on failure.
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) = 0;
+
+ /// \brief Callback before starting processing a single input, giving the
+ /// opportunity to modify the CompilerInvocation or do some other action
+ /// before BeginSourceFileAction is called.
+ ///
+ /// \return True on success; on failure \see BeginSourceFileAction() and
+ /// ExecutionAction() and EndSourceFileAction() will not be called.
+ virtual bool BeginInvocation(CompilerInstance &CI) { return true; }
+
+ /// BeginSourceFileAction - Callback at the start of processing a single
+ /// input.
+ ///
+ /// \return True on success; on failure \see ExecutionAction() and
+ /// EndSourceFileAction() will not be called.
+ virtual bool BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ return true;
+ }
+
+ /// ExecuteAction - Callback to run the program action, using the initialized
+ /// compiler instance.
+ ///
+ /// This routine is guaranteed to only be called between \see
+ /// BeginSourceFileAction() and \see EndSourceFileAction().
+ virtual void ExecuteAction() = 0;
+
+ /// EndSourceFileAction - Callback at the end of processing a single input;
+ /// this is guaranteed to only be called following a successful call to
+ /// BeginSourceFileAction (and BeginSourceFile).
+ virtual void EndSourceFileAction() {}
+
+ /// @}
+
+public:
+ FrontendAction();
+ virtual ~FrontendAction();
+
+ /// @name Compiler Instance Access
+ /// @{
+
+ CompilerInstance &getCompilerInstance() const {
+ assert(Instance && "Compiler instance not registered!");
+ return *Instance;
+ }
+
+ void setCompilerInstance(CompilerInstance *Value) { Instance = Value; }
+
+ /// @}
+ /// @name Current File Information
+ /// @{
+
+ bool isCurrentFileAST() const {
+ assert(!CurrentInput.File.empty() && "No current file!");
+ return CurrentASTUnit != 0;
+ }
+
+ const FrontendInputFile &getCurrentInput() const {
+ return CurrentInput;
+ }
+
+ const std::string &getCurrentFile() const {
+ assert(!CurrentInput.File.empty() && "No current file!");
+ return CurrentInput.File;
+ }
+
+ InputKind getCurrentFileKind() const {
+ assert(!CurrentInput.File.empty() && "No current file!");
+ return CurrentInput.Kind;
+ }
+
+ ASTUnit &getCurrentASTUnit() const {
+ assert(CurrentASTUnit && "No current AST unit!");
+ return *CurrentASTUnit;
+ }
+
+ ASTUnit *takeCurrentASTUnit() {
+ return CurrentASTUnit.take();
+ }
+
+ void setCurrentInput(const FrontendInputFile &CurrentInput, ASTUnit *AST = 0);
+
+ /// @}
+ /// @name Supported Modes
+ /// @{
+
+ /// usesPreprocessorOnly - Does this action only use the preprocessor? If so
+ /// no AST context will be created and this action will be invalid with AST
+ /// file inputs.
+ virtual bool usesPreprocessorOnly() const = 0;
+
+ /// \brief For AST-based actions, the kind of translation unit we're handling.
+ virtual TranslationUnitKind getTranslationUnitKind() { return TU_Complete; }
+
+ /// hasPCHSupport - Does this action support use with PCH?
+ virtual bool hasPCHSupport() const { return !usesPreprocessorOnly(); }
+
+ /// hasASTFileSupport - Does this action support use with AST files?
+ virtual bool hasASTFileSupport() const { return !usesPreprocessorOnly(); }
+
+ /// hasIRSupport - Does this action support use with IR files?
+ virtual bool hasIRSupport() const { return false; }
+
+ /// hasCodeCompletionSupport - Does this action support use with code
+ /// completion?
+ virtual bool hasCodeCompletionSupport() const { return false; }
+
+ /// @}
+ /// @name Public Action Interface
+ /// @{
+
+ /// BeginSourceFile - Prepare the action for processing the input file \arg
+ /// Filename; this is run after the options and frontend have been
+ /// initialized, but prior to executing any per-file processing.
+ ///
+ /// \param CI - The compiler instance this action is being run from. The
+ /// action may store and use this object up until the matching EndSourceFile
+ /// action.
+ ///
+ /// \param Input - The input filename and kind. Some input kinds are handled
+ /// specially, for example AST inputs, since the AST file itself contains
+ /// several objects which would normally be owned by the
+ /// CompilerInstance. When processing AST input files, these objects should
+ /// generally not be initialized in the CompilerInstance -- they will
+ /// automatically be shared with the AST file in between \see
+ /// BeginSourceFile() and \see EndSourceFile().
+ ///
+ /// \return True on success; on failure the compilation of this file should
+ /// be aborted and neither Execute nor EndSourceFile should be called.
+ bool BeginSourceFile(CompilerInstance &CI, const FrontendInputFile &Input);
+
+ /// Execute - Set the source managers main input file, and run the action.
+ void Execute();
+
+ /// EndSourceFile - Perform any per-file post processing, deallocate per-file
+ /// objects, and run statistics and output file cleanup code.
+ void EndSourceFile();
+
+ /// @}
+};
+
+/// ASTFrontendAction - Abstract base class to use for AST consumer based
+/// frontend actions.
+class ASTFrontendAction : public FrontendAction {
+protected:
+ /// ExecuteAction - Implement the ExecuteAction interface by running Sema on
+ /// the already initialized AST consumer.
+ ///
+ /// This will also take care of instantiating a code completion consumer if
+ /// the user requested it and the action supports it.
+ virtual void ExecuteAction();
+
+public:
+ virtual bool usesPreprocessorOnly() const { return false; }
+};
+
+class PluginASTAction : public ASTFrontendAction {
+ virtual void anchor();
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) = 0;
+
+public:
+ /// ParseArgs - Parse the given plugin command line arguments.
+ ///
+ /// \param CI - The compiler instance, for use in reporting diagnostics.
+ /// \return True if the parsing succeeded; otherwise the plugin will be
+ /// destroyed and no action run. The plugin is responsible for using the
+ /// CompilerInstance's Diagnostic object to report errors.
+ virtual bool ParseArgs(const CompilerInstance &CI,
+ const std::vector<std::string> &arg) = 0;
+};
+
+/// PreprocessorFrontendAction - Abstract base class to use for preprocessor
+/// based frontend actions.
+class PreprocessorFrontendAction : public FrontendAction {
+protected:
+ /// CreateASTConsumer - Provide a default implementation which returns aborts,
+ /// this method should never be called by FrontendAction clients.
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+public:
+ virtual bool usesPreprocessorOnly() const { return true; }
+};
+
+/// WrapperFrontendAction - A frontend action which simply wraps some other
+/// runtime specified frontend action. Deriving from this class allows an
+/// action to inject custom logic around some existing action's behavior. It
+/// implements every virtual method in the FrontendAction interface by
+/// forwarding to the wrapped action.
+class WrapperFrontendAction : public FrontendAction {
+ OwningPtr<FrontendAction> WrappedAction;
+
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+ virtual bool BeginInvocation(CompilerInstance &CI);
+ virtual bool BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename);
+ virtual void ExecuteAction();
+ virtual void EndSourceFileAction();
+
+public:
+ /// Construct a WrapperFrontendAction from an existing action, taking
+ /// ownership of it.
+ WrapperFrontendAction(FrontendAction *WrappedAction);
+
+ virtual bool usesPreprocessorOnly() const;
+ virtual TranslationUnitKind getTranslationUnitKind();
+ virtual bool hasPCHSupport() const;
+ virtual bool hasASTFileSupport() const;
+ virtual bool hasIRSupport() const;
+ virtual bool hasCodeCompletionSupport() const;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
new file mode 100644
index 0000000..8817c5a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendActions.h
@@ -0,0 +1,218 @@
+//===-- FrontendActions.h - Useful Frontend Actions -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_FRONTENDACTIONS_H
+#define LLVM_CLANG_FRONTEND_FRONTENDACTIONS_H
+
+#include "clang/Frontend/FrontendAction.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+class Module;
+
+//===----------------------------------------------------------------------===//
+// Custom Consumer Actions
+//===----------------------------------------------------------------------===//
+
+class InitOnlyAction : public FrontendAction {
+ virtual void ExecuteAction();
+
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+public:
+ // Don't claim to only use the preprocessor, we want to follow the AST path,
+ // but do nothing.
+ virtual bool usesPreprocessorOnly() const { return false; }
+};
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+class ASTPrintAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+class ASTDumpAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+class ASTDumpXMLAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+class ASTViewAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+class DeclContextPrintAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+class GeneratePCHAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+ virtual TranslationUnitKind getTranslationUnitKind() {
+ return TU_Prefix;
+ }
+
+ virtual bool hasASTFileSupport() const { return false; }
+
+public:
+ /// \brief Compute the AST consumer arguments that will be used to
+ /// create the PCHGenerator instance returned by CreateASTConsumer.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ static bool ComputeASTConsumerArguments(CompilerInstance &CI,
+ StringRef InFile,
+ std::string &Sysroot,
+ std::string &OutputFile,
+ raw_ostream *&OS);
+};
+
+class GenerateModuleAction : public ASTFrontendAction {
+ clang::Module *Module;
+
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+ virtual TranslationUnitKind getTranslationUnitKind() {
+ return TU_Module;
+ }
+
+ virtual bool hasASTFileSupport() const { return false; }
+
+public:
+ virtual bool BeginSourceFileAction(CompilerInstance &CI, StringRef Filename);
+
+ /// \brief Compute the AST consumer arguments that will be used to
+ /// create the PCHGenerator instance returned by CreateASTConsumer.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ static bool ComputeASTConsumerArguments(CompilerInstance &CI,
+ StringRef InFile,
+ std::string &Sysroot,
+ std::string &OutputFile,
+ raw_ostream *&OS);
+};
+
+class SyntaxOnlyAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+public:
+ virtual bool hasCodeCompletionSupport() const { return true; }
+};
+
+/**
+ * \brief Frontend action adaptor that merges ASTs together.
+ *
+ * This action takes an existing AST file and "merges" it into the AST
+ * context, producing a merged context. This action is an action
+ * adaptor, which forwards most of its calls to another action that
+ * will consume the merged context.
+ */
+class ASTMergeAction : public FrontendAction {
+ /// \brief The action that the merge action adapts.
+ FrontendAction *AdaptedAction;
+
+ /// \brief The set of AST files to merge.
+ std::vector<std::string> ASTFiles;
+
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+ virtual bool BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename);
+
+ virtual void ExecuteAction();
+ virtual void EndSourceFileAction();
+
+public:
+ ASTMergeAction(FrontendAction *AdaptedAction, ArrayRef<std::string> ASTFiles);
+ virtual ~ASTMergeAction();
+
+ virtual bool usesPreprocessorOnly() const;
+ virtual TranslationUnitKind getTranslationUnitKind();
+ virtual bool hasPCHSupport() const;
+ virtual bool hasASTFileSupport() const;
+ virtual bool hasCodeCompletionSupport() const;
+};
+
+class PrintPreambleAction : public FrontendAction {
+protected:
+ void ExecuteAction();
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &, StringRef) {
+ return 0;
+ }
+
+ virtual bool usesPreprocessorOnly() const { return true; }
+};
+
+class PubnamesDumpAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+public:
+ virtual bool hasCodeCompletionSupport() const { return false; }
+};
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Actions
+//===----------------------------------------------------------------------===//
+
+class DumpRawTokensAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
+class DumpTokensAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
+class GeneratePTHAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
+class PreprocessOnlyAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
+class PrintPreprocessedAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+
+ virtual bool hasPCHSupport() const { return true; }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h
new file mode 100644
index 0000000..0b05b74
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendDiagnostic.h
@@ -0,0 +1,28 @@
+//===--- DiagnosticFrontend.h - Diagnostics for frontend --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTENDDIAGNOSTIC_H
+#define LLVM_CLANG_FRONTENDDIAGNOSTIC_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define FRONTENDSTART
+#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_FRONTEND_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
new file mode 100644
index 0000000..a051d7f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendOptions.h
@@ -0,0 +1,209 @@
+//===--- FrontendOptions.h --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_FRONTENDOPTIONS_H
+#define LLVM_CLANG_FRONTEND_FRONTENDOPTIONS_H
+
+#include "clang/Frontend/CommandLineSourceLoc.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+namespace frontend {
+ enum ActionKind {
+ ASTDump, ///< Parse ASTs and dump them.
+ ASTDumpXML, ///< Parse ASTs and dump them in XML.
+ ASTPrint, ///< Parse ASTs and print them.
+ ASTView, ///< Parse ASTs and view them in Graphviz.
+ DumpRawTokens, ///< Dump out raw tokens.
+ DumpTokens, ///< Dump out preprocessed tokens.
+ EmitAssembly, ///< Emit a .s file.
+ EmitBC, ///< Emit a .bc file.
+ EmitHTML, ///< Translate input source into HTML.
+ EmitLLVM, ///< Emit a .ll file.
+ EmitLLVMOnly, ///< Generate LLVM IR, but do not emit anything.
+ EmitCodeGenOnly, ///< Generate machine code, but don't emit anything.
+ EmitObj, ///< Emit a .o file.
+ FixIt, ///< Parse and apply any fixits to the source.
+ GenerateModule, ///< Generate pre-compiled module.
+ GeneratePCH, ///< Generate pre-compiled header.
+ GeneratePTH, ///< Generate pre-tokenized header.
+ InitOnly, ///< Only execute frontend initialization.
+ ParseSyntaxOnly, ///< Parse and perform semantic analysis.
+ PluginAction, ///< Run a plugin action, \see ActionName.
+ PrintDeclContext, ///< Print DeclContext and their Decls.
+ PrintPreamble, ///< Print the "preamble" of the input file
+ PrintPreprocessedInput, ///< -E mode.
+ PubnamesDump, ///< Print all of the "public" names in the source.
+ RewriteMacros, ///< Expand macros but not #includes.
+ RewriteObjC, ///< ObjC->C Rewriter.
+ RewriteTest, ///< Rewriter playground
+ RunAnalysis, ///< Run one or more source code analyses.
+ MigrateSource, ///< Run migrator.
+ RunPreprocessorOnly ///< Just lex, no output.
+ };
+}
+
+enum InputKind {
+ IK_None,
+ IK_Asm,
+ IK_C,
+ IK_CXX,
+ IK_ObjC,
+ IK_ObjCXX,
+ IK_PreprocessedC,
+ IK_PreprocessedCXX,
+ IK_PreprocessedObjC,
+ IK_PreprocessedObjCXX,
+ IK_OpenCL,
+ IK_CUDA,
+ IK_AST,
+ IK_LLVM_IR
+};
+
+
+/// \brief An input file for the front end.
+struct FrontendInputFile {
+ /// \brief The file name, or "-" to read from standard input.
+ std::string File;
+
+ /// \brief The kind of input, e.g., C source, AST file, LLVM IR.
+ InputKind Kind;
+
+ /// \brief Whether we're dealing with a 'system' input (vs. a 'user' input).
+ bool IsSystem;
+
+ FrontendInputFile() : Kind(IK_None) { }
+ FrontendInputFile(StringRef File, InputKind Kind, bool IsSystem = false)
+ : File(File.str()), Kind(Kind), IsSystem(IsSystem) { }
+};
+
+/// FrontendOptions - Options for controlling the behavior of the frontend.
+class FrontendOptions {
+public:
+ unsigned DisableFree : 1; ///< Disable memory freeing on exit.
+ unsigned RelocatablePCH : 1; ///< When generating PCH files,
+ /// instruct the AST writer to create
+ /// relocatable PCH files.
+ unsigned ShowHelp : 1; ///< Show the -help text.
+ unsigned ShowMacrosInCodeCompletion : 1; ///< Show macros in code completion
+ /// results.
+ unsigned ShowCodePatternsInCodeCompletion : 1; ///< Show code patterns in code
+ /// completion results.
+ unsigned ShowGlobalSymbolsInCodeCompletion : 1; ///< Show top-level decls in
+ /// code completion results.
+ unsigned ShowStats : 1; ///< Show frontend performance
+ /// metrics and statistics.
+ unsigned ShowTimers : 1; ///< Show timers for individual
+ /// actions.
+ unsigned ShowVersion : 1; ///< Show the -version text.
+ unsigned FixWhatYouCan : 1; ///< Apply fixes even if there are
+ /// unfixable errors.
+ unsigned FixOnlyWarnings : 1; ///< Apply fixes only for warnings.
+ unsigned FixAndRecompile : 1; ///< Apply fixes and recompile.
+ unsigned FixToTemporaries : 1; ///< Apply fixes to temporary files.
+ unsigned ARCMTMigrateEmitARCErrors : 1; /// Emit ARC errors even if the
+ /// migrator can fix them
+ unsigned SkipFunctionBodies : 1; ///< Skip over function bodies to
+ /// speed up parsing in cases you do
+ /// not need them (e.g. with code
+ /// completion).
+
+ enum {
+ ARCMT_None,
+ ARCMT_Check,
+ ARCMT_Modify,
+ ARCMT_Migrate
+ } ARCMTAction;
+
+ enum {
+ ObjCMT_None = 0,
+ /// \brief Enable migration to modern ObjC literals.
+ ObjCMT_Literals = 0x1,
+ /// \brief Enable migration to modern ObjC subscripting.
+ ObjCMT_Subscripting = 0x2
+ };
+ unsigned ObjCMTAction;
+
+ std::string MTMigrateDir;
+ std::string ARCMTMigrateReportOut;
+
+ /// The input files and their types.
+ std::vector<FrontendInputFile> Inputs;
+
+ /// The output file, if any.
+ std::string OutputFile;
+
+ /// If given, the new suffix for fix-it rewritten files.
+ std::string FixItSuffix;
+
+ /// If given, enable code completion at the provided location.
+ ParsedSourceLocation CodeCompletionAt;
+
+ /// The frontend action to perform.
+ frontend::ActionKind ProgramAction;
+
+ /// The name of the action to run when using a plugin action.
+ std::string ActionName;
+
+ /// Args to pass to the plugin
+ std::vector<std::string> PluginArgs;
+
+ /// The list of plugin actions to run in addition to the normal action.
+ std::vector<std::string> AddPluginActions;
+
+ /// Args to pass to the additional plugins
+ std::vector<std::vector<std::string> > AddPluginArgs;
+
+ /// The list of plugins to load.
+ std::vector<std::string> Plugins;
+
+ /// \brief The list of AST files to merge.
+ std::vector<std::string> ASTMergeFiles;
+
+ /// \brief A list of arguments to forward to LLVM's option processing; this
+ /// should only be used for debugging and experimental features.
+ std::vector<std::string> LLVMArgs;
+
+ /// \brief File name of the file that will provide record layouts
+ /// (in the format produced by -fdump-record-layouts).
+ std::string OverrideRecordLayoutsFile;
+
+public:
+ FrontendOptions() {
+ DisableFree = 0;
+ ProgramAction = frontend::ParseSyntaxOnly;
+ ActionName = "";
+ RelocatablePCH = 0;
+ ShowHelp = 0;
+ ShowMacrosInCodeCompletion = 0;
+ ShowCodePatternsInCodeCompletion = 0;
+ ShowGlobalSymbolsInCodeCompletion = 1;
+ ShowStats = 0;
+ ShowTimers = 0;
+ ShowVersion = 0;
+ ARCMTAction = ARCMT_None;
+ ARCMTMigrateEmitARCErrors = 0;
+ SkipFunctionBodies = 0;
+ ObjCMTAction = ObjCMT_None;
+ }
+
+ /// getInputKindForExtension - Return the appropriate input kind for a file
+ /// extension. For example, "c" would return IK_C.
+ ///
+ /// \return The input kind for the extension, or IK_None if the extension is
+ /// not recognized.
+ static InputKind getInputKindForExtension(StringRef Extension);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h
new file mode 100644
index 0000000..ec925ad
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/FrontendPluginRegistry.h
@@ -0,0 +1,23 @@
+//===-- FrontendAction.h - Pluggable Frontend Action Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_PLUGINFRONTENDACTION_H
+#define LLVM_CLANG_FRONTEND_PLUGINFRONTENDACTION_H
+
+#include "clang/Frontend/FrontendAction.h"
+#include "llvm/Support/Registry.h"
+
+namespace clang {
+
+/// The frontend plugin registry.
+typedef llvm::Registry<PluginASTAction> FrontendPluginRegistry;
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
new file mode 100644
index 0000000..687f439
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/HeaderSearchOptions.h
@@ -0,0 +1,124 @@
+//===--- HeaderSearchOptions.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_HEADERSEARCHOPTIONS_H
+#define LLVM_CLANG_FRONTEND_HEADERSEARCHOPTIONS_H
+
+#include "llvm/ADT/StringRef.h"
+#include <vector>
+
+namespace clang {
+
+namespace frontend {
+ /// IncludeDirGroup - Identifiers the group a include entry belongs to, which
+ /// represents its relative positive in the search list. A #include of a ""
+ /// path starts at the -iquote group, then searches the Angled group, then
+ /// searches the system group, etc.
+ enum IncludeDirGroup {
+ Quoted = 0, ///< '#include ""' paths, added by'gcc -iquote'.
+ Angled, ///< Paths for '#include <>' added by '-I'.
+ IndexHeaderMap, ///< Like Angled, but marks header maps used when
+ /// building frameworks.
+ System, ///< Like Angled, but marks system directories.
+ CSystem, ///< Like System, but only used for C.
+ CXXSystem, ///< Like System, but only used for C++.
+ ObjCSystem, ///< Like System, but only used for ObjC.
+ ObjCXXSystem, ///< Like System, but only used for ObjC++.
+ After ///< Like System, but searched after the system directories.
+ };
+}
+
+/// HeaderSearchOptions - Helper class for storing options related to the
+/// initialization of the HeaderSearch object.
+class HeaderSearchOptions {
+public:
+ struct Entry {
+ std::string Path;
+ frontend::IncludeDirGroup Group;
+ unsigned IsUserSupplied : 1;
+ unsigned IsFramework : 1;
+
+ /// IgnoreSysRoot - This is false if an absolute path should be treated
+ /// relative to the sysroot, or true if it should always be the absolute
+ /// path.
+ unsigned IgnoreSysRoot : 1;
+
+ /// \brief True if this entry is an internal search path.
+ ///
+ /// This typically indicates that users didn't directly provide it, but
+ /// instead it was provided by a compatibility layer for a particular
+ /// system. This isn't redundant with IsUserSupplied (even though perhaps
+ /// it should be) because that is false for user provided '-iwithprefix'
+ /// header search entries.
+ unsigned IsInternal : 1;
+
+ /// \brief True if this entry's headers should be wrapped in extern "C".
+ unsigned ImplicitExternC : 1;
+
+ Entry(StringRef path, frontend::IncludeDirGroup group,
+ bool isUserSupplied, bool isFramework, bool ignoreSysRoot,
+ bool isInternal, bool implicitExternC)
+ : Path(path), Group(group), IsUserSupplied(isUserSupplied),
+ IsFramework(isFramework), IgnoreSysRoot(ignoreSysRoot),
+ IsInternal(isInternal), ImplicitExternC(implicitExternC) {}
+ };
+
+ /// If non-empty, the directory to use as a "virtual system root" for include
+ /// paths.
+ std::string Sysroot;
+
+ /// User specified include entries.
+ std::vector<Entry> UserEntries;
+
+ /// The directory which holds the compiler resource files (builtin includes,
+ /// etc.).
+ std::string ResourceDir;
+
+ /// \brief The directory used for the module cache.
+ std::string ModuleCachePath;
+
+ /// \brief Whether we should disable the use of the hash string within the
+ /// module cache.
+ ///
+ /// Note: Only used for testing!
+ unsigned DisableModuleHash : 1;
+
+ /// Include the compiler builtin includes.
+ unsigned UseBuiltinIncludes : 1;
+
+ /// Include the system standard include search directories.
+ unsigned UseStandardSystemIncludes : 1;
+
+ /// Include the system standard C++ library include search directories.
+ unsigned UseStandardCXXIncludes : 1;
+
+ /// Use libc++ instead of the default libstdc++.
+ unsigned UseLibcxx : 1;
+
+ /// Whether header search information should be output as for -v.
+ unsigned Verbose : 1;
+
+public:
+ HeaderSearchOptions(StringRef _Sysroot = "/")
+ : Sysroot(_Sysroot), DisableModuleHash(0), UseBuiltinIncludes(true),
+ UseStandardSystemIncludes(true), UseStandardCXXIncludes(true),
+ UseLibcxx(false), Verbose(false) {}
+
+ /// AddPath - Add the \arg Path path to the specified \arg Group list.
+ void AddPath(StringRef Path, frontend::IncludeDirGroup Group,
+ bool IsUserSupplied, bool IsFramework, bool IgnoreSysRoot,
+ bool IsInternal = false, bool ImplicitExternC = false) {
+ UserEntries.push_back(Entry(Path, Group, IsUserSupplied, IsFramework,
+ IgnoreSysRoot, IsInternal, ImplicitExternC));
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h
new file mode 100644
index 0000000..e6f4403
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandard.h
@@ -0,0 +1,92 @@
+//===--- LangStandard.h -----------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_LANGSTANDARD_H
+#define LLVM_CLANG_FRONTEND_LANGSTANDARD_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+
+namespace frontend {
+
+enum LangFeatures {
+ BCPLComment = (1 << 0),
+ C89 = (1 << 1),
+ C99 = (1 << 2),
+ C11 = (1 << 3),
+ CPlusPlus = (1 << 4),
+ CPlusPlus0x = (1 << 5),
+ Digraphs = (1 << 6),
+ GNUMode = (1 << 7),
+ HexFloat = (1 << 8),
+ ImplicitInt = (1 << 9)
+};
+
+}
+
+/// LangStandard - Information about the properties of a particular language
+/// standard.
+struct LangStandard {
+ enum Kind {
+#define LANGSTANDARD(id, name, desc, features) \
+ lang_##id,
+#include "clang/Frontend/LangStandards.def"
+ lang_unspecified
+ };
+
+ const char *ShortName;
+ const char *Description;
+ unsigned Flags;
+
+public:
+ /// getName - Get the name of this standard.
+ const char *getName() const { return ShortName; }
+
+ /// getDescription - Get the description of this standard.
+ const char *getDescription() const { return Description; }
+
+ /// hasBCPLComments - Language supports '//' comments.
+ bool hasBCPLComments() const { return Flags & frontend::BCPLComment; }
+
+ /// isC89 - Language is a superset of C89.
+ bool isC89() const { return Flags & frontend::C89; }
+
+ /// isC99 - Language is a superset of C99.
+ bool isC99() const { return Flags & frontend::C99; }
+
+ /// isC11 - Language is a superset of C11.
+ bool isC11() const { return Flags & frontend::C11; }
+
+ /// isCPlusPlus - Language is a C++ variant.
+ bool isCPlusPlus() const { return Flags & frontend::CPlusPlus; }
+
+ /// isCPlusPlus0x - Language is a C++0x variant.
+ bool isCPlusPlus0x() const { return Flags & frontend::CPlusPlus0x; }
+
+ /// hasDigraphs - Language supports digraphs.
+ bool hasDigraphs() const { return Flags & frontend::Digraphs; }
+
+ /// isGNUMode - Language includes GNU extensions.
+ bool isGNUMode() const { return Flags & frontend::GNUMode; }
+
+ /// hasHexFloats - Language supports hexadecimal float constants.
+ bool hasHexFloats() const { return Flags & frontend::HexFloat; }
+
+ /// hasImplicitInt - Language allows variables to be typed as int implicitly.
+ bool hasImplicitInt() const { return Flags & frontend::ImplicitInt; }
+
+ static const LangStandard &getLangStandardForKind(Kind K);
+ static const LangStandard *getLangStandardForName(StringRef Name);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
new file mode 100644
index 0000000..4bcff4a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LangStandards.def
@@ -0,0 +1,120 @@
+//===-- LangStandards.def - Language Standard Data --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LANGSTANDARD
+#error "LANGSTANDARD must be defined before including this file"
+#endif
+
+/// LANGSTANDARD(IDENT, NAME, DESC, FEATURES)
+///
+/// \param IDENT - The name of the standard as a C++ identifier.
+/// \param NAME - The name of the standard.
+/// \param DESC - A short description of the standard.
+/// \param FEATURES - The standard features as flags, these are enums from the
+/// clang::frontend namespace, which is assumed to be be available.
+
+// C89-ish modes.
+LANGSTANDARD(c89, "c89",
+ "ISO C 1990",
+ C89 | ImplicitInt)
+LANGSTANDARD(c90, "c90",
+ "ISO C 1990",
+ C89 | ImplicitInt)
+LANGSTANDARD(iso9899_1990, "iso9899:1990",
+ "ISO C 1990",
+ C89 | ImplicitInt)
+
+LANGSTANDARD(c94, "iso9899:199409",
+ "ISO C 1990 with amendment 1",
+ C89 | Digraphs | ImplicitInt)
+
+LANGSTANDARD(gnu89, "gnu89",
+ "ISO C 1990 with GNU extensions",
+ BCPLComment | C89 | Digraphs | GNUMode | ImplicitInt)
+LANGSTANDARD(gnu90, "gnu90",
+ "ISO C 1990 with GNU extensions",
+ BCPLComment | C89 | Digraphs | GNUMode | ImplicitInt)
+
+// C99-ish modes
+LANGSTANDARD(c99, "c99",
+ "ISO C 1999",
+ BCPLComment | C99 | Digraphs | HexFloat)
+LANGSTANDARD(c9x, "c9x",
+ "ISO C 1999",
+ BCPLComment | C99 | Digraphs | HexFloat)
+LANGSTANDARD(iso9899_1999,
+ "iso9899:1999", "ISO C 1999",
+ BCPLComment | C99 | Digraphs | HexFloat)
+LANGSTANDARD(iso9899_199x,
+ "iso9899:199x", "ISO C 1999",
+ BCPLComment | C99 | Digraphs | HexFloat)
+
+LANGSTANDARD(gnu99, "gnu99",
+ "ISO C 1999 with GNU extensions",
+ BCPLComment | C99 | Digraphs | GNUMode | HexFloat)
+LANGSTANDARD(gnu9x, "gnu9x",
+ "ISO C 1999 with GNU extensions",
+ BCPLComment | C99 | Digraphs | GNUMode | HexFloat)
+
+// C11 modes
+LANGSTANDARD(c11, "c11",
+ "ISO C 2011",
+ BCPLComment | C99 | C11 | Digraphs | HexFloat)
+LANGSTANDARD(c1x, "c1x",
+ "ISO C 2011",
+ BCPLComment | C99 | C11 | Digraphs | HexFloat)
+LANGSTANDARD(iso9899_2011,
+ "iso9899:2011", "ISO C 2011",
+ BCPLComment | C99 | C11 | Digraphs | HexFloat)
+LANGSTANDARD(iso9899_201x,
+ "iso9899:2011", "ISO C 2011",
+ BCPLComment | C99 | C11 | Digraphs | HexFloat)
+
+LANGSTANDARD(gnu11, "gnu11",
+ "ISO C 2011 with GNU extensions",
+ BCPLComment | C99 | C11 | Digraphs | GNUMode | HexFloat)
+LANGSTANDARD(gnu1x, "gnu1x",
+ "ISO C 2011 with GNU extensions",
+ BCPLComment | C99 | C11 | Digraphs | GNUMode | HexFloat)
+
+// C++ modes
+LANGSTANDARD(cxx98, "c++98",
+ "ISO C++ 1998 with amendments",
+ BCPLComment | CPlusPlus | Digraphs)
+LANGSTANDARD(cxx03, "c++03",
+ "ISO C++ 1998 with amendments",
+ BCPLComment | CPlusPlus | Digraphs)
+LANGSTANDARD(gnucxx98, "gnu++98",
+ "ISO C++ 1998 with amendments and GNU extensions",
+ BCPLComment | CPlusPlus | Digraphs | GNUMode)
+
+LANGSTANDARD(cxx0x, "c++0x",
+ "ISO C++ 2011 with amendments",
+ BCPLComment | CPlusPlus | CPlusPlus0x | Digraphs)
+LANGSTANDARD(cxx11, "c++11",
+ "ISO C++ 2011 with amendments",
+ BCPLComment | CPlusPlus | CPlusPlus0x | Digraphs)
+LANGSTANDARD(gnucxx0x, "gnu++0x",
+ "ISO C++ 2011 with amendments and GNU extensions",
+ BCPLComment | CPlusPlus | CPlusPlus0x | Digraphs | GNUMode)
+LANGSTANDARD(gnucxx11, "gnu++11",
+ "ISO C++ 2011 with amendments and GNU extensions",
+ BCPLComment | CPlusPlus | CPlusPlus0x | Digraphs | GNUMode)
+
+// OpenCL
+LANGSTANDARD(opencl, "cl",
+ "OpenCL 1.0",
+ BCPLComment | C99 | Digraphs | HexFloat)
+
+// CUDA
+LANGSTANDARD(cuda, "cuda",
+ "NVIDIA CUDA(tm)",
+ BCPLComment | CPlusPlus | Digraphs)
+
+#undef LANGSTANDARD
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h b/contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h
new file mode 100644
index 0000000..225efe6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LayoutOverrideSource.h
@@ -0,0 +1,61 @@
+//===--- LayoutOverrideSource.h --Override Record Layouts -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_LAYOUTOVERRIDESOURCE_H
+#define LLVM_CLANG_FRONTEND_LAYOUTOVERRIDESOURCE_H
+
+#include "clang/AST/ExternalASTSource.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace clang {
+ /// \brief An external AST source that overrides the layout of
+ /// a specified set of record types.
+ ///
+ /// This class is used only for testing the ability of external AST sources
+ /// to override the layout of record types. Its input is the output format
+ /// of the command-line argument -fdump-record-layouts.
+ class LayoutOverrideSource : public ExternalASTSource {
+ /// \brief The layout of a given record.
+ struct Layout {
+ /// \brief The size of the record.
+ uint64_t Size;
+
+ /// \brief The alignment of the record.
+ uint64_t Align;
+
+ /// \brief The offsets of the fields, in source order.
+ llvm::SmallVector<uint64_t, 8> FieldOffsets;
+ };
+
+ /// \brief The set of layouts that will be overridden.
+ llvm::StringMap<Layout> Layouts;
+
+ public:
+ /// \brief Create a new AST source that overrides the layout of some
+ /// set of record types.
+ ///
+ /// The file is the result of passing -fdump-record-layouts to a file.
+ explicit LayoutOverrideSource(llvm::StringRef Filename);
+
+ /// \brief If this particular record type has an overridden layout,
+ /// return that layout.
+ virtual bool
+ layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets);
+
+ /// \brief Dump the overridden layouts.
+ void dump();
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h
new file mode 100644
index 0000000..4de15f2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/LogDiagnosticPrinter.h
@@ -0,0 +1,79 @@
+//===--- LogDiagnosticPrinter.h - Log Diagnostic Client ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_LOG_DIAGNOSTIC_PRINTER_H_
+#define LLVM_CLANG_FRONTEND_LOG_DIAGNOSTIC_PRINTER_H_
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+class DiagnosticOptions;
+class LangOptions;
+
+class LogDiagnosticPrinter : public DiagnosticConsumer {
+ struct DiagEntry {
+ /// The primary message line of the diagnostic.
+ std::string Message;
+
+ /// The source file name, if available.
+ std::string Filename;
+
+ /// The source file line number, if available.
+ unsigned Line;
+
+ /// The source file column number, if available.
+ unsigned Column;
+
+ /// The ID of the diagnostic.
+ unsigned DiagnosticID;
+
+ /// The level of the diagnostic.
+ DiagnosticsEngine::Level DiagnosticLevel;
+ };
+
+ raw_ostream &OS;
+ const LangOptions *LangOpts;
+ const DiagnosticOptions *DiagOpts;
+
+ SourceLocation LastWarningLoc;
+ FullSourceLoc LastLoc;
+ unsigned OwnsOutputStream : 1;
+
+ SmallVector<DiagEntry, 8> Entries;
+
+ std::string MainFilename;
+ std::string DwarfDebugFlags;
+
+public:
+ LogDiagnosticPrinter(raw_ostream &OS, const DiagnosticOptions &Diags,
+ bool OwnsOutputStream = false);
+ virtual ~LogDiagnosticPrinter();
+
+ void setDwarfDebugFlags(StringRef Value) {
+ DwarfDebugFlags = Value;
+ }
+
+ void BeginSourceFile(const LangOptions &LO, const Preprocessor *PP) {
+ LangOpts = &LO;
+ }
+
+ void EndSourceFile();
+
+ virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info);
+
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h
new file mode 100644
index 0000000..f9554e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/MigratorOptions.h
@@ -0,0 +1,31 @@
+//===--- MigratorOptions.h - MigratorOptions Options ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains the structures necessary for a front-end to specify
+// various migration analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_MIGRATOROPTIONS
+#define LLVM_CLANG_FRONTEND_MIGRATOROPTIONS
+
+namespace clang {
+
+class MigratorOptions {
+public:
+ unsigned NoNSAllocReallocError : 1;
+ unsigned NoFinalizeRemoval : 1;
+ MigratorOptions() {
+ NoNSAllocReallocError = 0;
+ NoFinalizeRemoval = 0;
+ }
+};
+
+}
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
new file mode 100644
index 0000000..ffa7b4a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/MultiplexConsumer.h
@@ -0,0 +1,64 @@
+//===-- MultiplexConsumer.h - AST Consumer for PCH Generation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the MultiplexConsumer class, which can be used to
+// multiplex ASTConsumer and SemaConsumer messages to many consumers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_FRONTEND_MULTIPLEXCONSUMER_H
+#define CLANG_FRONTEND_MULTIPLEXCONSUMER_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <vector>
+
+namespace clang {
+
+class MultiplexASTMutationListener;
+class MultiplexASTDeserializationListener;
+
+// Has a list of ASTConsumers and calls each of them. Owns its children.
+class MultiplexConsumer : public SemaConsumer {
+public:
+ // Takes ownership of the pointers in C.
+ MultiplexConsumer(ArrayRef<ASTConsumer*> C);
+ ~MultiplexConsumer();
+
+ // ASTConsumer
+ virtual void Initialize(ASTContext &Context);
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
+ virtual bool HandleTopLevelDecl(DeclGroupRef D);
+ virtual void HandleInterestingDecl(DeclGroupRef D);
+ virtual void HandleTranslationUnit(ASTContext &Ctx);
+ virtual void HandleTagDeclDefinition(TagDecl *D);
+ virtual void HandleCXXImplicitFunctionInstantiation(FunctionDecl *D);
+ virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
+ virtual void CompleteTentativeDefinition(VarDecl *D);
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired);
+ virtual ASTMutationListener *GetASTMutationListener();
+ virtual ASTDeserializationListener *GetASTDeserializationListener();
+ virtual void PrintStats();
+
+ // SemaConsumer
+ virtual void InitializeSema(Sema &S);
+ virtual void ForgetSema();
+
+ static bool classof(const MultiplexConsumer *) { return true; }
+private:
+ std::vector<ASTConsumer*> Consumers; // Owns these.
+ OwningPtr<MultiplexASTMutationListener> MutationListener;
+ OwningPtr<MultiplexASTDeserializationListener> DeserializationListener;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
new file mode 100644
index 0000000..d86a923
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOptions.h
@@ -0,0 +1,224 @@
+//===--- PreprocessorOptions.h ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_PREPROCESSOROPTIONS_H_
+#define LLVM_CLANG_FRONTEND_PREPROCESSOROPTIONS_H_
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+#include <string>
+#include <utility>
+#include <vector>
+#include <set>
+
+namespace llvm {
+ class MemoryBuffer;
+}
+
+namespace clang {
+
+class Preprocessor;
+class LangOptions;
+
+/// \brief Enumerate the kinds of standard library that
+enum ObjCXXARCStandardLibraryKind {
+ ARCXX_nolib,
+ /// \brief libc++
+ ARCXX_libcxx,
+ /// \brief libstdc++
+ ARCXX_libstdcxx
+};
+
+/// PreprocessorOptions - This class is used for passing the various options
+/// used in preprocessor initialization to InitializePreprocessor().
+class PreprocessorOptions {
+public:
+ std::vector<std::pair<std::string, bool/*isUndef*/> > Macros;
+ std::vector<std::string> Includes;
+ std::vector<std::string> MacroIncludes;
+
+ unsigned UsePredefines : 1; /// Initialize the preprocessor with the compiler
+ /// and target specific predefines.
+
+ unsigned DetailedRecord : 1; /// Whether we should maintain a detailed
+ /// record of all macro definitions and
+ /// expansions.
+ unsigned DetailedRecordConditionalDirectives : 1; /// Whether in the
+ /// preprocessing record we should also keep
+ /// track of locations of conditional directives
+ /// in non-system files.
+
+ /// The implicit PCH included at the start of the translation unit, or empty.
+ std::string ImplicitPCHInclude;
+
+ /// \brief Headers that will be converted to chained PCHs in memory.
+ std::vector<std::string> ChainedIncludes;
+
+ /// \brief When true, disables most of the normal validation performed on
+ /// precompiled headers.
+ bool DisablePCHValidation;
+
+ /// \brief When true, disables the use of the stat cache within a
+ /// precompiled header or AST file.
+ bool DisableStatCache;
+
+ /// \brief When true, a PCH with compiler errors will not be rejected.
+ bool AllowPCHWithCompilerErrors;
+
+ /// \brief Dump declarations that are deserialized from PCH, for testing.
+ bool DumpDeserializedPCHDecls;
+
+ /// \brief This is a set of names for decls that we do not want to be
+ /// deserialized, and we emit an error if they are; for testing purposes.
+ std::set<std::string> DeserializedPCHDeclsToErrorOn;
+
+ /// \brief If non-zero, the implicit PCH include is actually a precompiled
+ /// preamble that covers this number of bytes in the main source file.
+ ///
+ /// The boolean indicates whether the preamble ends at the start of a new
+ /// line.
+ std::pair<unsigned, bool> PrecompiledPreambleBytes;
+
+ /// The implicit PTH input included at the start of the translation unit, or
+ /// empty.
+ std::string ImplicitPTHInclude;
+
+ /// If given, a PTH cache file to use for speeding up header parsing.
+ std::string TokenCache;
+
+ /// \brief True if the SourceManager should report the original file name for
+ /// contents of files that were remapped to other files. Defaults to true.
+ bool RemappedFilesKeepOriginalName;
+
+ /// \brief The set of file remappings, which take existing files on
+ /// the system (the first part of each pair) and gives them the
+ /// contents of other files on the system (the second part of each
+ /// pair).
+ std::vector<std::pair<std::string, std::string> > RemappedFiles;
+
+ /// \brief The set of file-to-buffer remappings, which take existing files
+ /// on the system (the first part of each pair) and gives them the contents
+ /// of the specified memory buffer (the second part of each pair).
+ std::vector<std::pair<std::string, const llvm::MemoryBuffer *> >
+ RemappedFileBuffers;
+
+ /// \brief Whether the compiler instance should retain (i.e., not free)
+ /// the buffers associated with remapped files.
+ ///
+ /// This flag defaults to false; it can be set true only through direct
+ /// manipulation of the compiler invocation object, in cases where the
+ /// compiler invocation and its buffers will be reused.
+ bool RetainRemappedFileBuffers;
+
+ /// \brief The Objective-C++ ARC standard library that we should support,
+ /// by providing appropriate definitions to retrofit the standard library
+ /// with support for lifetime-qualified pointers.
+ ObjCXXARCStandardLibraryKind ObjCXXARCStandardLibrary;
+
+ /// \brief The path of modules being build, which is used to detect
+ /// cycles in the module dependency graph as modules are being built.
+ ///
+ /// There is no way to set this value from the command line. If we ever need
+ /// to do so (e.g., if on-demand module construction moves out-of-process),
+ /// we can add a cc1-level option to do so.
+ SmallVector<std::string, 2> ModuleBuildPath;
+
+ typedef std::vector<std::pair<std::string, std::string> >::iterator
+ remapped_file_iterator;
+ typedef std::vector<std::pair<std::string, std::string> >::const_iterator
+ const_remapped_file_iterator;
+ remapped_file_iterator remapped_file_begin() {
+ return RemappedFiles.begin();
+ }
+ const_remapped_file_iterator remapped_file_begin() const {
+ return RemappedFiles.begin();
+ }
+ remapped_file_iterator remapped_file_end() {
+ return RemappedFiles.end();
+ }
+ const_remapped_file_iterator remapped_file_end() const {
+ return RemappedFiles.end();
+ }
+
+ typedef std::vector<std::pair<std::string, const llvm::MemoryBuffer *> >::
+ iterator remapped_file_buffer_iterator;
+ typedef std::vector<std::pair<std::string, const llvm::MemoryBuffer *> >::
+ const_iterator const_remapped_file_buffer_iterator;
+ remapped_file_buffer_iterator remapped_file_buffer_begin() {
+ return RemappedFileBuffers.begin();
+ }
+ const_remapped_file_buffer_iterator remapped_file_buffer_begin() const {
+ return RemappedFileBuffers.begin();
+ }
+ remapped_file_buffer_iterator remapped_file_buffer_end() {
+ return RemappedFileBuffers.end();
+ }
+ const_remapped_file_buffer_iterator remapped_file_buffer_end() const {
+ return RemappedFileBuffers.end();
+ }
+
+public:
+ PreprocessorOptions() : UsePredefines(true), DetailedRecord(false),
+ DetailedRecordConditionalDirectives(false),
+ DisablePCHValidation(false), DisableStatCache(false),
+ AllowPCHWithCompilerErrors(false),
+ DumpDeserializedPCHDecls(false),
+ PrecompiledPreambleBytes(0, true),
+ RemappedFilesKeepOriginalName(true),
+ RetainRemappedFileBuffers(false),
+ ObjCXXARCStandardLibrary(ARCXX_nolib) { }
+
+ void addMacroDef(StringRef Name) {
+ Macros.push_back(std::make_pair(Name, false));
+ }
+ void addMacroUndef(StringRef Name) {
+ Macros.push_back(std::make_pair(Name, true));
+ }
+ void addRemappedFile(StringRef From, StringRef To) {
+ RemappedFiles.push_back(std::make_pair(From, To));
+ }
+
+ remapped_file_iterator eraseRemappedFile(remapped_file_iterator Remapped) {
+ return RemappedFiles.erase(Remapped);
+ }
+
+ void addRemappedFile(StringRef From, const llvm::MemoryBuffer * To) {
+ RemappedFileBuffers.push_back(std::make_pair(From, To));
+ }
+
+ remapped_file_buffer_iterator
+ eraseRemappedFile(remapped_file_buffer_iterator Remapped) {
+ return RemappedFileBuffers.erase(Remapped);
+ }
+
+ void clearRemappedFiles() {
+ RemappedFiles.clear();
+ RemappedFileBuffers.clear();
+ }
+
+ /// \brief Reset any options that are not considered when building a
+ /// module.
+ void resetNonModularOptions() {
+ Includes.clear();
+ MacroIncludes.clear();
+ ChainedIncludes.clear();
+ DumpDeserializedPCHDecls = false;
+ ImplicitPCHInclude.clear();
+ ImplicitPTHInclude.clear();
+ TokenCache.clear();
+ RetainRemappedFileBuffers = true;
+ PrecompiledPreambleBytes.first = 0;
+ PrecompiledPreambleBytes.second = 0;
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h
new file mode 100644
index 0000000..1eda0d4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/PreprocessorOutputOptions.h
@@ -0,0 +1,37 @@
+//===--- PreprocessorOutputOptions.h ----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_PREPROCESSOROUTPUTOPTIONS_H
+#define LLVM_CLANG_FRONTEND_PREPROCESSOROUTPUTOPTIONS_H
+
+namespace clang {
+
+/// PreprocessorOutputOptions - Options for controlling the C preprocessor
+/// output (e.g., -E).
+class PreprocessorOutputOptions {
+public:
+ unsigned ShowCPP : 1; ///< Print normal preprocessed output.
+ unsigned ShowComments : 1; ///< Show comments.
+ unsigned ShowLineMarkers : 1; ///< Show #line markers.
+ unsigned ShowMacroComments : 1; ///< Show comments, even in macros.
+ unsigned ShowMacros : 1; ///< Print macro definitions.
+
+public:
+ PreprocessorOutputOptions() {
+ ShowCPP = 1;
+ ShowComments = 0;
+ ShowLineMarkers = 1;
+ ShowMacroComments = 0;
+ ShowMacros = 0;
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
new file mode 100644
index 0000000..aa0695f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/SerializedDiagnosticPrinter.h
@@ -0,0 +1,62 @@
+//===--- SerializedDiagnosticPrinter.h - Serializer for diagnostics -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_SERIALIZE_DIAGNOSTIC_PRINTER_H_
+#define LLVM_CLANG_FRONTEND_SERIALIZE_DIAGNOSTIC_PRINTER_H_
+
+#include "llvm/Bitcode/BitstreamWriter.h"
+
+namespace llvm {
+class raw_ostream;
+}
+
+namespace clang {
+class DiagnosticConsumer;
+class DiagnosticsEngine;
+class DiagnosticOptions;
+
+namespace serialized_diags {
+
+enum BlockIDs {
+ /// \brief A top-level block which represents any meta data associated
+ /// with the diagostics, including versioning of the format.
+ BLOCK_META = llvm::bitc::FIRST_APPLICATION_BLOCKID,
+
+ /// \brief The this block acts as a container for all the information
+ /// for a specific diagnostic.
+ BLOCK_DIAG
+};
+
+enum RecordIDs {
+ RECORD_VERSION = 1,
+ RECORD_DIAG,
+ RECORD_SOURCE_RANGE,
+ RECORD_DIAG_FLAG,
+ RECORD_CATEGORY,
+ RECORD_FILENAME,
+ RECORD_FIXIT,
+ RECORD_FIRST = RECORD_VERSION,
+ RECORD_LAST = RECORD_FIXIT
+};
+
+/// \brief Returns a DiagnosticConsumer that serializes diagnostics to
+/// a bitcode file.
+///
+/// The created DiagnosticConsumer is designed for quick and lightweight
+/// transfer of of diagnostics to the enclosing build system (e.g., an IDE).
+/// This allows wrapper tools for Clang to get diagnostics from Clang
+/// (via libclang) without needing to parse Clang's command line output.
+///
+DiagnosticConsumer *create(llvm::raw_ostream *OS,
+ const DiagnosticOptions &diags);
+
+} // end serialized_diags namespace
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
new file mode 100644
index 0000000..519d3b6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnostic.h
@@ -0,0 +1,120 @@
+//===--- TextDiagnostic.h - Text Diagnostic Pretty-Printing -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a utility class that provides support for textual pretty-printing of
+// diagnostics. It is used to implement the different code paths which require
+// such functionality in a consistent way.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_H_
+#define LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_H_
+
+#include "clang/Frontend/DiagnosticRenderer.h"
+
+namespace clang {
+
+/// \brief Class to encapsulate the logic for formatting and printing a textual
+/// diagnostic message.
+///
+/// This class provides an interface for building and emitting a textual
+/// diagnostic, including all of the macro backtraces, caret diagnostics, FixIt
+/// Hints, and code snippets. In the presence of macros this involves
+/// a recursive process, synthesizing notes for each macro expansion.
+///
+/// The purpose of this class is to isolate the implementation of printing
+/// beautiful text diagnostics from any particular interfaces. The Clang
+/// DiagnosticClient is implemented through this class as is diagnostic
+/// printing coming out of libclang.
+class TextDiagnostic : public DiagnosticRenderer {
+ raw_ostream &OS;
+
+public:
+ TextDiagnostic(raw_ostream &OS,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts);
+
+ virtual ~TextDiagnostic();
+
+ /// \brief Print the diagonstic level to a raw_ostream.
+ ///
+ /// This is a static helper that handles colorizing the level and formatting
+ /// it into an arbitrary output stream. This is used internally by the
+ /// TextDiagnostic emission code, but it can also be used directly by
+ /// consumers that don't have a source manager or other state that the full
+ /// TextDiagnostic logic requires.
+ static void printDiagnosticLevel(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ bool ShowColors);
+
+ /// \brief Pretty-print a diagnostic message to a raw_ostream.
+ ///
+ /// This is a static helper to handle the line wrapping, colorizing, and
+ /// rendering of a diagnostic message to a particular ostream. It is
+ /// publically visible so that clients which do not have sufficient state to
+ /// build a complete TextDiagnostic object can still get consistent
+ /// formatting of their diagnostic messages.
+ ///
+ /// \param OS Where the message is printed
+ /// \param Level Used to colorizing the message
+ /// \param Message The text actually printed
+ /// \param CurrentColumn The starting column of the first line, accounting
+ /// for any prefix.
+ /// \param Columns The number of columns to use in line-wrapping, 0 disables
+ /// all line-wrapping.
+ /// \param ShowColors Enable colorizing of the message.
+ static void printDiagnosticMessage(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ unsigned CurrentColumn, unsigned Columns,
+ bool ShowColors);
+
+protected:
+ virtual void emitDiagnosticMessage(SourceLocation Loc,PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ DiagOrStoredDiag D);
+
+ virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges);
+
+ virtual void emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints) {
+ emitSnippetAndCaret(Loc, Level, Ranges, Hints);
+ }
+
+ virtual void emitBasicNote(StringRef Message);
+
+ virtual void emitIncludeLocation(SourceLocation Loc, PresumedLoc PLoc);
+
+private:
+ void emitSnippetAndCaret(SourceLocation Loc, DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints);
+
+ void highlightRange(const CharSourceRange &R,
+ unsigned LineNo, FileID FID,
+ const std::string &SourceLine,
+ std::string &CaretLine);
+ std::string buildFixItInsertionLine(unsigned LineNo,
+ const char *LineStart,
+ const char *LineEnd,
+ ArrayRef<FixItHint> Hints);
+ void expandTabs(std::string &SourceLine, std::string &CaretLine);
+ void emitParseableFixits(ArrayRef<FixItHint> Hints);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticBuffer.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticBuffer.h
new file mode 100644
index 0000000..6f1c0e8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticBuffer.h
@@ -0,0 +1,54 @@
+//===--- TextDiagnosticBuffer.h - Buffer Text Diagnostics -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a concrete diagnostic client, which buffers the diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_BUFFER_H_
+#define LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_BUFFER_H_
+
+#include "clang/Basic/Diagnostic.h"
+#include <vector>
+
+namespace clang {
+
+class Preprocessor;
+class SourceManager;
+
+class TextDiagnosticBuffer : public DiagnosticConsumer {
+public:
+ typedef std::vector<std::pair<SourceLocation, std::string> > DiagList;
+ typedef DiagList::iterator iterator;
+ typedef DiagList::const_iterator const_iterator;
+private:
+ DiagList Errors, Warnings, Notes;
+public:
+ const_iterator err_begin() const { return Errors.begin(); }
+ const_iterator err_end() const { return Errors.end(); }
+
+ const_iterator warn_begin() const { return Warnings.begin(); }
+ const_iterator warn_end() const { return Warnings.end(); }
+
+ const_iterator note_begin() const { return Notes.begin(); }
+ const_iterator note_end() const { return Notes.end(); }
+
+ virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info);
+
+ /// FlushDiagnostics - Flush the buffered diagnostics to an given
+ /// diagnostic engine.
+ void FlushDiagnostics(DiagnosticsEngine &Diags) const;
+
+ virtual DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const;
+};
+
+} // end namspace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
new file mode 100644
index 0000000..9b6ac24
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/TextDiagnosticPrinter.h
@@ -0,0 +1,59 @@
+//===--- TextDiagnosticPrinter.h - Text Diagnostic Client -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a concrete diagnostic client, which prints the diagnostics to
+// standard error.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_PRINTER_H_
+#define LLVM_CLANG_FRONTEND_TEXT_DIAGNOSTIC_PRINTER_H_
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/OwningPtr.h"
+
+namespace clang {
+class DiagnosticOptions;
+class LangOptions;
+class TextDiagnostic;
+
+class TextDiagnosticPrinter : public DiagnosticConsumer {
+ raw_ostream &OS;
+ const LangOptions *LangOpts;
+ const DiagnosticOptions *DiagOpts;
+ const SourceManager *SM;
+
+ /// \brief Handle to the currently active text diagnostic emitter.
+ OwningPtr<TextDiagnostic> TextDiag;
+
+ /// A string to prefix to error messages.
+ std::string Prefix;
+
+ unsigned OwnsOutputStream : 1;
+
+public:
+ TextDiagnosticPrinter(raw_ostream &os, const DiagnosticOptions &diags,
+ bool OwnsOutputStream = false);
+ virtual ~TextDiagnosticPrinter();
+
+ /// setPrefix - Set the diagnostic printer prefix string, which will be
+ /// printed at the start of any diagnostics. If empty, no prefix string is
+ /// used.
+ void setPrefix(std::string Value) { Prefix = Value; }
+
+ void BeginSourceFile(const LangOptions &LO, const Preprocessor *PP);
+ void EndSourceFile();
+ void HandleDiagnostic(DiagnosticsEngine::Level Level, const Diagnostic &Info);
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h b/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
new file mode 100644
index 0000000..6b1fc63
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/Utils.h
@@ -0,0 +1,108 @@
+//===--- Utils.h - Misc utilities for the front-end -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains miscellaneous utilities for various front-end actions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_UTILS_H
+#define LLVM_CLANG_FRONTEND_UTILS_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+class raw_fd_ostream;
+class Triple;
+}
+
+namespace clang {
+class ASTConsumer;
+class CompilerInstance;
+class CompilerInvocation;
+class Decl;
+class DependencyOutputOptions;
+class DiagnosticsEngine;
+class DiagnosticOptions;
+class FileManager;
+class HeaderSearch;
+class HeaderSearchOptions;
+class IdentifierTable;
+class LangOptions;
+class Preprocessor;
+class PreprocessorOptions;
+class PreprocessorOutputOptions;
+class SourceManager;
+class Stmt;
+class TargetInfo;
+class FrontendOptions;
+
+/// Apply the header search options to get given HeaderSearch object.
+void ApplyHeaderSearchOptions(HeaderSearch &HS,
+ const HeaderSearchOptions &HSOpts,
+ const LangOptions &Lang,
+ const llvm::Triple &triple);
+
+/// InitializePreprocessor - Initialize the preprocessor getting it and the
+/// environment ready to process a single file.
+void InitializePreprocessor(Preprocessor &PP,
+ const PreprocessorOptions &PPOpts,
+ const HeaderSearchOptions &HSOpts,
+ const FrontendOptions &FEOpts);
+
+/// ProcessWarningOptions - Initialize the diagnostic client and process the
+/// warning options specified on the command line.
+void ProcessWarningOptions(DiagnosticsEngine &Diags,
+ const DiagnosticOptions &Opts);
+
+/// DoPrintPreprocessedInput - Implement -E mode.
+void DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream* OS,
+ const PreprocessorOutputOptions &Opts);
+
+/// AttachDependencyFileGen - Create a dependency file generator, and attach
+/// it to the given preprocessor. This takes ownership of the output stream.
+void AttachDependencyFileGen(Preprocessor &PP,
+ const DependencyOutputOptions &Opts);
+
+/// AttachDependencyGraphGen - Create a dependency graph generator, and attach
+/// it to the given preprocessor.
+ void AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
+ StringRef SysRoot);
+
+/// AttachHeaderIncludeGen - Create a header include list generator, and attach
+/// it to the given preprocessor.
+///
+/// \param ShowAllHeaders - If true, show all header information instead of just
+/// headers following the predefines buffer. This is useful for making sure
+/// includes mentioned on the command line are also reported, but differs from
+/// the default behavior used by -H.
+/// \param OutputPath - If non-empty, a path to write the header include
+/// information to, instead of writing to stderr.
+void AttachHeaderIncludeGen(Preprocessor &PP, bool ShowAllHeaders = false,
+ StringRef OutputPath = "",
+ bool ShowDepth = true);
+
+/// CacheTokens - Cache tokens for use with PCH. Note that this requires
+/// a seekable stream.
+void CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS);
+
+/// createInvocationFromCommandLine - Construct a compiler invocation object for
+/// a command line argument vector.
+///
+/// \return A CompilerInvocation, or 0 if none was built for the given
+/// argument vector.
+CompilerInvocation *
+createInvocationFromCommandLine(ArrayRef<const char *> Args,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags =
+ IntrusiveRefCntPtr<DiagnosticsEngine>());
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
new file mode 100644
index 0000000..2fc6ccc
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Frontend/VerifyDiagnosticConsumer.h
@@ -0,0 +1,97 @@
+//===- VerifyDiagnosticConsumer.h - Verifying Diagnostic Client -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_VERIFYDIAGNOSTICSCLIENT_H
+#define LLVM_CLANG_FRONTEND_VERIFYDIAGNOSTICSCLIENT_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/OwningPtr.h"
+
+namespace clang {
+
+class DiagnosticsEngine;
+class TextDiagnosticBuffer;
+
+/// VerifyDiagnosticConsumer - Create a diagnostic client which will use
+/// markers in the input source to check that all the emitted diagnostics match
+/// those expected.
+///
+/// USING THE DIAGNOSTIC CHECKER:
+///
+/// Indicating that a line expects an error or a warning is simple. Put a
+/// comment on the line that has the diagnostic, use:
+///
+/// expected-{error,warning,note}
+///
+/// to tag if it's an expected error or warning, and place the expected text
+/// between {{ and }} markers. The full text doesn't have to be included, only
+/// enough to ensure that the correct diagnostic was emitted.
+///
+/// Here's an example:
+///
+/// int A = B; // expected-error {{use of undeclared identifier 'B'}}
+///
+/// You can place as many diagnostics on one line as you wish. To make the code
+/// more readable, you can use slash-newline to separate out the diagnostics.
+///
+/// The simple syntax above allows each specification to match exactly one
+/// error. You can use the extended syntax to customize this. The extended
+/// syntax is "expected-<type> <n> {{diag text}}", where <type> is one of
+/// "error", "warning" or "note", and <n> is a positive integer. This allows the
+/// diagnostic to appear as many times as specified. Example:
+///
+/// void f(); // expected-note 2 {{previous declaration is here}}
+///
+/// Regex matching mode may be selected by appending '-re' to type. Example:
+///
+/// expected-error-re
+///
+/// Examples matching error: "variable has incomplete type 'struct s'"
+///
+/// // expected-error {{variable has incomplete type 'struct s'}}
+/// // expected-error {{variable has incomplete type}}
+///
+/// // expected-error-re {{variable has has type 'struct .'}}
+/// // expected-error-re {{variable has has type 'struct .*'}}
+/// // expected-error-re {{variable has has type 'struct (.*)'}}
+/// // expected-error-re {{variable has has type 'struct[[:space:]](.*)'}}
+///
+class VerifyDiagnosticConsumer: public DiagnosticConsumer {
+public:
+ DiagnosticsEngine &Diags;
+ DiagnosticConsumer *PrimaryClient;
+ bool OwnsPrimaryClient;
+ OwningPtr<TextDiagnosticBuffer> Buffer;
+ Preprocessor *CurrentPreprocessor;
+
+private:
+ FileID FirstErrorFID; // FileID of first diagnostic
+ void CheckDiagnostics();
+
+public:
+ /// Create a new verifying diagnostic client, which will issue errors to \arg
+ /// the currently-attached diagnostic client when a diagnostic does not match
+ /// what is expected (as indicated in the source file).
+ VerifyDiagnosticConsumer(DiagnosticsEngine &Diags);
+ ~VerifyDiagnosticConsumer();
+
+ virtual void BeginSourceFile(const LangOptions &LangOpts,
+ const Preprocessor *PP);
+
+ virtual void EndSourceFile();
+
+ virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info);
+
+ virtual DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const;
+};
+
+} // end namspace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/FrontendTool/Utils.h b/contrib/llvm/tools/clang/include/clang/FrontendTool/Utils.h
new file mode 100644
index 0000000..031ee7d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/FrontendTool/Utils.h
@@ -0,0 +1,30 @@
+//===--- Utils.h - Misc utilities for the front-end -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains miscellaneous utilities for various front-end actions
+// which were split from Frontend to minimise Frontend's dependencies.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTENDTOOL_UTILS_H
+#define LLVM_CLANG_FRONTENDTOOL_UTILS_H
+
+namespace clang {
+
+class CompilerInstance;
+
+/// ExecuteCompilerInvocation - Execute the given actions described by the
+/// compiler invocation object in the given compiler instance.
+///
+/// \return - True on success.
+bool ExecuteCompilerInvocation(CompilerInstance *Clang);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h b/contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h
new file mode 100644
index 0000000..45097cc
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/ASTLocation.h
@@ -0,0 +1,173 @@
+//===--- ASTLocation.h - A <Decl, Stmt> pair --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASTLocation is Decl or a Stmt and its immediate Decl parent.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_ASTLOCATION_H
+#define LLVM_CLANG_INDEX_ASTLOCATION_H
+
+#include "clang/AST/TypeLoc.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+ class Decl;
+ class Stmt;
+ class NamedDecl;
+
+namespace idx {
+ class TranslationUnit;
+
+/// \brief Represents a Decl or a Stmt and its immediate Decl parent. It's
+/// immutable.
+///
+/// ASTLocation is intended to be used as a "pointer" into the AST. It is either
+/// just a Decl, or a Stmt and its Decl parent. Since a single Stmt is devoid
+/// of context, its parent Decl provides all the additional missing information
+/// like the declaration context, ASTContext, etc.
+///
+class ASTLocation {
+public:
+ enum NodeKind {
+ N_Decl, N_NamedRef, N_Stmt, N_Type
+ };
+
+ struct NamedRef {
+ NamedDecl *ND;
+ SourceLocation Loc;
+
+ NamedRef() : ND(0) { }
+ NamedRef(NamedDecl *nd, SourceLocation loc) : ND(nd), Loc(loc) { }
+ };
+
+private:
+ llvm::PointerIntPair<Decl *, 2, NodeKind> ParentDecl;
+
+ union {
+ Decl *D;
+ Stmt *Stm;
+ struct {
+ NamedDecl *ND;
+ unsigned RawLoc;
+ } NDRef;
+ struct {
+ void *TyPtr;
+ void *Data;
+ } Ty;
+ };
+
+public:
+ ASTLocation() { }
+
+ explicit ASTLocation(const Decl *d)
+ : ParentDecl(const_cast<Decl*>(d), N_Decl), D(const_cast<Decl*>(d)) { }
+
+ ASTLocation(const Decl *parentDecl, const Stmt *stm)
+ : ParentDecl(const_cast<Decl*>(parentDecl), N_Stmt),
+ Stm(const_cast<Stmt*>(stm)) {
+ if (!stm) ParentDecl.setPointer(0);
+ }
+
+ ASTLocation(const Decl *parentDecl, NamedDecl *ndRef, SourceLocation loc)
+ : ParentDecl(const_cast<Decl*>(parentDecl), N_NamedRef) {
+ if (ndRef) {
+ NDRef.ND = ndRef;
+ NDRef.RawLoc = loc.getRawEncoding();
+ } else
+ ParentDecl.setPointer(0);
+ }
+
+ ASTLocation(const Decl *parentDecl, TypeLoc tyLoc)
+ : ParentDecl(const_cast<Decl*>(parentDecl), N_Type) {
+ if (tyLoc) {
+ Ty.TyPtr = tyLoc.getType().getAsOpaquePtr();
+ Ty.Data = tyLoc.getOpaqueData();
+ } else
+ ParentDecl.setPointer(0);
+ }
+
+ bool isValid() const { return ParentDecl.getPointer() != 0; }
+ bool isInvalid() const { return !isValid(); }
+
+ NodeKind getKind() const {
+ assert(isValid());
+ return (NodeKind)ParentDecl.getInt();
+ }
+
+ Decl *getParentDecl() const { return ParentDecl.getPointer(); }
+
+ Decl *AsDecl() const {
+ assert(getKind() == N_Decl);
+ return D;
+ }
+ Stmt *AsStmt() const {
+ assert(getKind() == N_Stmt);
+ return Stm;
+ }
+ NamedRef AsNamedRef() const {
+ assert(getKind() == N_NamedRef);
+ return NamedRef(NDRef.ND, SourceLocation::getFromRawEncoding(NDRef.RawLoc));
+ }
+ TypeLoc AsTypeLoc() const {
+ assert(getKind() == N_Type);
+ return TypeLoc(QualType::getFromOpaquePtr(Ty.TyPtr), Ty.Data);
+ }
+
+ Decl *dyn_AsDecl() const { return isValid() && getKind() == N_Decl ? D : 0; }
+ Stmt *dyn_AsStmt() const {
+ return isValid() && getKind() == N_Stmt ? Stm : 0;
+ }
+ NamedRef dyn_AsNamedRef() const {
+ return getKind() == N_Type ? AsNamedRef() : NamedRef();
+ }
+ TypeLoc dyn_AsTypeLoc() const {
+ return getKind() == N_Type ? AsTypeLoc() : TypeLoc();
+ }
+
+ bool isDecl() const { return isValid() && getKind() == N_Decl; }
+ bool isStmt() const { return isValid() && getKind() == N_Stmt; }
+ bool isNamedRef() const { return isValid() && getKind() == N_NamedRef; }
+ bool isType() const { return isValid() && getKind() == N_Type; }
+
+ /// \brief Returns the declaration that this ASTLocation references.
+ ///
+ /// If this points to a Decl, that Decl is returned.
+ /// If this points to an Expr that references a Decl, that Decl is returned,
+ /// otherwise it returns NULL.
+ Decl *getReferencedDecl();
+ const Decl *getReferencedDecl() const {
+ return const_cast<ASTLocation*>(this)->getReferencedDecl();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+
+ void print(raw_ostream &OS) const;
+};
+
+/// \brief Like ASTLocation but also contains the TranslationUnit that the
+/// ASTLocation originated from.
+class TULocation : public ASTLocation {
+ TranslationUnit *TU;
+
+public:
+ TULocation(TranslationUnit *tu, ASTLocation astLoc)
+ : ASTLocation(astLoc), TU(tu) {
+ assert(tu && "Passed null translation unit");
+ }
+
+ TranslationUnit *getTU() const { return TU; }
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/Analyzer.h b/contrib/llvm/tools/clang/include/clang/Index/Analyzer.h
new file mode 100644
index 0000000..f6b5465
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/Analyzer.h
@@ -0,0 +1,56 @@
+//===--- Analyzer.h - Analysis for indexing information ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Analyzer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_ANALYZER_H
+#define LLVM_CLANG_INDEX_ANALYZER_H
+
+namespace clang {
+ class Decl;
+ class ObjCMessageExpr;
+
+namespace idx {
+ class Program;
+ class IndexProvider;
+ class TULocationHandler;
+
+/// \brief Provides indexing information, like finding all references of an
+/// Entity across translation units.
+class Analyzer {
+ Program &Prog;
+ IndexProvider &Idxer;
+
+ Analyzer(const Analyzer&); // do not implement
+ Analyzer &operator=(const Analyzer &); // do not implement
+
+public:
+ explicit Analyzer(Program &prog, IndexProvider &idxer)
+ : Prog(prog), Idxer(idxer) { }
+
+ /// \brief Find all TULocations for declarations of the given Decl and pass
+ /// them to Handler.
+ void FindDeclarations(Decl *D, TULocationHandler &Handler);
+
+ /// \brief Find all TULocations for references of the given Decl and pass
+ /// them to Handler.
+ void FindReferences(Decl *D, TULocationHandler &Handler);
+
+ /// \brief Find methods that may respond to the given message and pass them
+ /// to Handler.
+ void FindObjCMethods(ObjCMessageExpr *MsgE, TULocationHandler &Handler);
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/DeclReferenceMap.h b/contrib/llvm/tools/clang/include/clang/Index/DeclReferenceMap.h
new file mode 100644
index 0000000..73f2fe5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/DeclReferenceMap.h
@@ -0,0 +1,50 @@
+//===--- DeclReferenceMap.h - Map Decls to their references -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DeclReferenceMap creates a mapping from Decls to the ASTLocations that
+// reference them.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_DECLREFERENCEMAP_H
+#define LLVM_CLANG_INDEX_DECLREFERENCEMAP_H
+
+#include "clang/Index/ASTLocation.h"
+#include "clang/Index/STLExtras.h"
+#include <map>
+
+namespace clang {
+ class ASTContext;
+ class NamedDecl;
+
+namespace idx {
+
+/// \brief Maps NamedDecls with the ASTLocations that reference them.
+///
+/// References are mapped and retrieved using the canonical decls.
+class DeclReferenceMap {
+public:
+ explicit DeclReferenceMap(ASTContext &Ctx);
+
+ typedef std::multimap<NamedDecl*, ASTLocation> MapTy;
+ typedef pair_value_iterator<MapTy::iterator> astlocation_iterator;
+
+ astlocation_iterator refs_begin(NamedDecl *D) const;
+ astlocation_iterator refs_end(NamedDecl *D) const;
+ bool refs_empty(NamedDecl *D) const;
+
+private:
+ mutable MapTy Map;
+};
+
+} // end idx namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/Entity.h b/contrib/llvm/tools/clang/include/clang/Index/Entity.h
new file mode 100644
index 0000000..d104458
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/Entity.h
@@ -0,0 +1,149 @@
+//===--- Entity.h - Cross-translation-unit "token" for decls ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Entity is a ASTContext-independent way to refer to declarations that are
+// visible across translation units.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_ENTITY_H
+#define LLVM_CLANG_INDEX_ENTITY_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+
+namespace idx {
+ class Program;
+ class EntityImpl;
+
+/// \brief A ASTContext-independent way to refer to declarations.
+///
+/// Entity is basically the link for declarations that are semantically the same
+/// in multiple ASTContexts. A client will convert a Decl into an Entity and
+/// later use that Entity to find the "same" Decl into another ASTContext.
+/// Declarations that are semantically the same and visible across translation
+/// units will be associated with the same Entity.
+///
+/// An Entity may also refer to declarations that cannot be visible across
+/// translation units, e.g. static functions with the same name in multiple
+/// translation units will be associated with different Entities.
+///
+/// Entities can be checked for equality but note that the same Program object
+/// should be used when getting Entities.
+///
+class Entity {
+ /// \brief Stores the Decl directly if it is not visible outside of its own
+ /// translation unit, otherwise it stores the associated EntityImpl.
+ llvm::PointerUnion<Decl *, EntityImpl *> Val;
+
+ explicit Entity(Decl *D);
+ explicit Entity(EntityImpl *impl) : Val(impl) { }
+ friend class EntityGetter;
+
+public:
+ Entity() { }
+
+ /// \brief Find the Decl that can be referred to by this entity.
+ Decl *getDecl(ASTContext &AST) const;
+
+ /// \brief If this Entity represents a declaration that is internal to its
+ /// translation unit, getInternalDecl() returns it.
+ Decl *getInternalDecl() const {
+ assert(isInternalToTU() && "This Entity is not internal!");
+ return Val.get<Decl *>();
+ }
+
+ /// \brief Get a printable name for debugging purpose.
+ std::string getPrintableName() const;
+
+ /// \brief Get an Entity associated with the given Decl.
+ /// \returns invalid Entity if an Entity cannot refer to this Decl.
+ static Entity get(Decl *D, Program &Prog);
+
+ /// \brief Get an Entity associated with a name in the global namespace.
+ static Entity get(StringRef Name, Program &Prog);
+
+ /// \brief true if the Entity is not visible outside the trasnlation unit.
+ bool isInternalToTU() const {
+ assert(isValid() && "This Entity is not valid!");
+ return Val.is<Decl *>();
+ }
+
+ bool isValid() const { return !Val.isNull(); }
+ bool isInvalid() const { return !isValid(); }
+
+ void *getAsOpaquePtr() const { return Val.getOpaqueValue(); }
+ static Entity getFromOpaquePtr(void *Ptr) {
+ Entity Ent;
+ Ent.Val = llvm::PointerUnion<Decl *, EntityImpl *>::getFromOpaqueValue(Ptr);
+ return Ent;
+ }
+
+ friend bool operator==(const Entity &LHS, const Entity &RHS) {
+ return LHS.getAsOpaquePtr() == RHS.getAsOpaquePtr();
+ }
+
+ // For use in a std::map.
+ friend bool operator < (const Entity &LHS, const Entity &RHS) {
+ return LHS.getAsOpaquePtr() < RHS.getAsOpaquePtr();
+ }
+
+ // For use in DenseMap/DenseSet.
+ static Entity getEmptyMarker() {
+ Entity Ent;
+ Ent.Val =
+ llvm::PointerUnion<Decl *, EntityImpl *>::getFromOpaqueValue((void*)-1);
+ return Ent;
+ }
+ static Entity getTombstoneMarker() {
+ Entity Ent;
+ Ent.Val =
+ llvm::PointerUnion<Decl *, EntityImpl *>::getFromOpaqueValue((void*)-2);
+ return Ent;
+ }
+};
+
+} // namespace idx
+
+} // namespace clang
+
+namespace llvm {
+/// Define DenseMapInfo so that Entities can be used as keys in DenseMap and
+/// DenseSets.
+template<>
+struct DenseMapInfo<clang::idx::Entity> {
+ static inline clang::idx::Entity getEmptyKey() {
+ return clang::idx::Entity::getEmptyMarker();
+ }
+
+ static inline clang::idx::Entity getTombstoneKey() {
+ return clang::idx::Entity::getTombstoneMarker();
+ }
+
+ static unsigned getHashValue(clang::idx::Entity);
+
+ static inline bool
+ isEqual(clang::idx::Entity LHS, clang::idx::Entity RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <>
+struct isPodLike<clang::idx::Entity> { static const bool value = true; };
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/GlobalCallGraph.h b/contrib/llvm/tools/clang/include/clang/Index/GlobalCallGraph.h
new file mode 100644
index 0000000..7ba1cce
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/GlobalCallGraph.h
@@ -0,0 +1,149 @@
+//== GlobalCallGraph.h - Call graph building --------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the CallGraph and CallGraphNode classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_CALLGRAPH
+#define LLVM_CLANG_INDEX_CALLGRAPH
+
+#include "clang/Index/ASTLocation.h"
+#include "clang/Index/Entity.h"
+#include "clang/Index/Program.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/STLExtras.h"
+#include <vector>
+#include <map>
+
+using namespace clang;
+
+namespace clang {
+namespace idx {
+
+class CallGraphNode {
+ Entity F;
+ typedef std::pair<ASTLocation, CallGraphNode*> CallRecord;
+ std::vector<CallRecord> CalledFunctions;
+
+public:
+ CallGraphNode(Entity f) : F(f) {}
+
+ typedef std::vector<CallRecord>::iterator iterator;
+ typedef std::vector<CallRecord>::const_iterator const_iterator;
+
+ iterator begin() { return CalledFunctions.begin(); }
+ iterator end() { return CalledFunctions.end(); }
+ const_iterator begin() const { return CalledFunctions.begin(); }
+ const_iterator end() const { return CalledFunctions.end(); }
+
+ void addCallee(ASTLocation L, CallGraphNode *Node) {
+ CalledFunctions.push_back(std::make_pair(L, Node));
+ }
+
+ bool hasCallee() const { return begin() != end(); }
+
+ std::string getName() const { return F.getPrintableName(); }
+
+ Decl *getDecl(ASTContext &Ctx) const { return F.getDecl(Ctx); }
+};
+
+class CallGraph {
+ /// Program manages all Entities.
+ Program &Prog;
+
+ typedef std::map<Entity, CallGraphNode *> FunctionMapTy;
+
+ /// FunctionMap owns all CallGraphNodes.
+ FunctionMapTy FunctionMap;
+
+ /// CallerCtx maps a caller to its ASTContext.
+ llvm::DenseMap<CallGraphNode *, ASTContext *> CallerCtx;
+
+ /// Root node is the 'main' function or 0.
+ CallGraphNode *Root;
+
+ /// ExternalCallingNode has edges to all external functions.
+ CallGraphNode *ExternalCallingNode;
+
+public:
+ CallGraph(Program &P);
+ ~CallGraph();
+
+ typedef FunctionMapTy::iterator iterator;
+ typedef FunctionMapTy::const_iterator const_iterator;
+
+ iterator begin() { return FunctionMap.begin(); }
+ iterator end() { return FunctionMap.end(); }
+ const_iterator begin() const { return FunctionMap.begin(); }
+ const_iterator end() const { return FunctionMap.end(); }
+
+ CallGraphNode *getRoot() { return Root; }
+
+ CallGraphNode *getExternalCallingNode() { return ExternalCallingNode; }
+
+ void addTU(ASTContext &AST);
+
+ Program &getProgram() { return Prog; }
+
+ CallGraphNode *getOrInsertFunction(idx::Entity F);
+
+ Decl *getDecl(CallGraphNode *Node);
+
+ void print(raw_ostream &os);
+ void dump();
+
+ void ViewCallGraph() const;
+};
+
+}} // end clang idx namespace
+
+namespace llvm {
+
+template <> struct GraphTraits<clang::idx::CallGraph> {
+ typedef clang::idx::CallGraph GraphType;
+ typedef clang::idx::CallGraphNode NodeType;
+
+ typedef std::pair<clang::idx::ASTLocation, NodeType*> CGNPairTy;
+ typedef std::pointer_to_unary_function<CGNPairTy, NodeType*> CGNDerefFun;
+
+ typedef mapped_iterator<NodeType::iterator, CGNDerefFun> ChildIteratorType;
+
+ static NodeType *getEntryNode(GraphType *CG) {
+ return CG->getExternalCallingNode();
+ }
+
+ static ChildIteratorType child_begin(NodeType *N) {
+ return map_iterator(N->begin(), CGNDerefFun(CGNDeref));
+ }
+ static ChildIteratorType child_end(NodeType *N) {
+ return map_iterator(N->end(), CGNDerefFun(CGNDeref));
+ }
+
+ typedef std::pair<clang::idx::Entity, NodeType*> PairTy;
+ typedef std::pointer_to_unary_function<PairTy, NodeType*> DerefFun;
+
+ typedef mapped_iterator<GraphType::const_iterator, DerefFun> nodes_iterator;
+
+ static nodes_iterator nodes_begin(const GraphType &CG) {
+ return map_iterator(CG.begin(), DerefFun(CGDeref));
+ }
+ static nodes_iterator nodes_end(const GraphType &CG) {
+ return map_iterator(CG.end(), DerefFun(CGDeref));
+ }
+
+ static NodeType *CGNDeref(CGNPairTy P) { return P.second; }
+
+ static NodeType *CGDeref(PairTy P) { return P.second; }
+};
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/GlobalSelector.h b/contrib/llvm/tools/clang/include/clang/Index/GlobalSelector.h
new file mode 100644
index 0000000..9cd83a8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/GlobalSelector.h
@@ -0,0 +1,100 @@
+//===--- GlobalSelector.h - Cross-translation-unit "token" for selectors --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// GlobalSelector is a ASTContext-independent way to refer to selectors.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_GLOBALSELECTOR_H
+#define LLVM_CLANG_INDEX_GLOBALSELECTOR_H
+
+#include "llvm/ADT/DenseMap.h"
+#include <string>
+
+namespace clang {
+ class ASTContext;
+ class Selector;
+
+namespace idx {
+ class Program;
+
+/// \brief A ASTContext-independent way to refer to selectors.
+class GlobalSelector {
+ void *Val;
+
+ explicit GlobalSelector(void *val) : Val(val) { }
+
+public:
+ GlobalSelector() : Val(0) { }
+
+ /// \brief Get the ASTContext-specific selector.
+ Selector getSelector(ASTContext &AST) const;
+
+ bool isValid() const { return Val != 0; }
+ bool isInvalid() const { return !isValid(); }
+
+ /// \brief Get a printable name for debugging purpose.
+ std::string getPrintableName() const;
+
+ /// \brief Get a GlobalSelector for the ASTContext-specific selector.
+ static GlobalSelector get(Selector Sel, Program &Prog);
+
+ void *getAsOpaquePtr() const { return Val; }
+
+ static GlobalSelector getFromOpaquePtr(void *Ptr) {
+ return GlobalSelector(Ptr);
+ }
+
+ friend bool operator==(const GlobalSelector &LHS, const GlobalSelector &RHS) {
+ return LHS.getAsOpaquePtr() == RHS.getAsOpaquePtr();
+ }
+
+ // For use in a std::map.
+ friend bool operator< (const GlobalSelector &LHS, const GlobalSelector &RHS) {
+ return LHS.getAsOpaquePtr() < RHS.getAsOpaquePtr();
+ }
+
+ // For use in DenseMap/DenseSet.
+ static GlobalSelector getEmptyMarker() { return GlobalSelector((void*)-1); }
+ static GlobalSelector getTombstoneMarker() {
+ return GlobalSelector((void*)-2);
+ }
+};
+
+} // namespace idx
+
+} // namespace clang
+
+namespace llvm {
+/// Define DenseMapInfo so that GlobalSelectors can be used as keys in DenseMap
+/// and DenseSets.
+template<>
+struct DenseMapInfo<clang::idx::GlobalSelector> {
+ static inline clang::idx::GlobalSelector getEmptyKey() {
+ return clang::idx::GlobalSelector::getEmptyMarker();
+ }
+
+ static inline clang::idx::GlobalSelector getTombstoneKey() {
+ return clang::idx::GlobalSelector::getTombstoneMarker();
+ }
+
+ static unsigned getHashValue(clang::idx::GlobalSelector);
+
+ static inline bool
+ isEqual(clang::idx::GlobalSelector LHS, clang::idx::GlobalSelector RHS) {
+ return LHS == RHS;
+ }
+};
+
+template <>
+struct isPodLike<clang::idx::GlobalSelector> { static const bool value = true;};
+
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/Handlers.h b/contrib/llvm/tools/clang/include/clang/Index/Handlers.h
new file mode 100644
index 0000000..1e017f8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/Handlers.h
@@ -0,0 +1,82 @@
+//===--- Handlers.h - Interfaces for receiving information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Abstract interfaces for receiving information.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_HANDLERS_H
+#define LLVM_CLANG_INDEX_HANDLERS_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+namespace idx {
+ class Entity;
+ class TranslationUnit;
+ class TULocation;
+
+/// \brief Abstract interface for receiving Entities.
+class EntityHandler {
+public:
+ typedef Entity receiving_type;
+
+ virtual ~EntityHandler();
+ virtual void Handle(Entity Ent) = 0;
+};
+
+/// \brief Abstract interface for receiving TranslationUnits.
+class TranslationUnitHandler {
+public:
+ typedef TranslationUnit* receiving_type;
+
+ virtual ~TranslationUnitHandler();
+ virtual void Handle(TranslationUnit *TU) = 0;
+};
+
+/// \brief Abstract interface for receiving TULocations.
+class TULocationHandler {
+public:
+ typedef TULocation receiving_type;
+
+ virtual ~TULocationHandler();
+ virtual void Handle(TULocation TULoc) = 0;
+};
+
+/// \brief Helper for the Handler classes. Stores the objects into a vector.
+/// example:
+/// @code
+/// Storing<TranslationUnitHandler> TURes;
+/// IndexProvider.GetTranslationUnitsFor(Entity, TURes);
+/// for (Storing<TranslationUnitHandler>::iterator
+/// I = TURes.begin(), E = TURes.end(); I != E; ++I) { ....
+/// @endcode
+template <typename handler_type>
+class Storing : public handler_type {
+ typedef typename handler_type::receiving_type receiving_type;
+ typedef SmallVector<receiving_type, 8> StoreTy;
+ StoreTy Store;
+
+public:
+ virtual void Handle(receiving_type Obj) {
+ Store.push_back(Obj);
+ }
+
+ typedef typename StoreTy::const_iterator iterator;
+ iterator begin() const { return Store.begin(); }
+ iterator end() const { return Store.end(); }
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/IndexProvider.h b/contrib/llvm/tools/clang/include/clang/Index/IndexProvider.h
new file mode 100644
index 0000000..187dd93
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/IndexProvider.h
@@ -0,0 +1,38 @@
+//===--- IndexProvider.h - Maps information to translation units -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Maps information to TranslationUnits.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_INDEXPROVIDER_H
+#define LLVM_CLANG_INDEX_INDEXPROVIDER_H
+
+namespace clang {
+
+namespace idx {
+ class Entity;
+ class TranslationUnitHandler;
+ class GlobalSelector;
+
+/// \brief Maps information to TranslationUnits.
+class IndexProvider {
+public:
+ virtual ~IndexProvider();
+ virtual void GetTranslationUnitsFor(Entity Ent,
+ TranslationUnitHandler &Handler) = 0;
+ virtual void GetTranslationUnitsFor(GlobalSelector Sel,
+ TranslationUnitHandler &Handler) = 0;
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/Indexer.h b/contrib/llvm/tools/clang/include/clang/Index/Indexer.h
new file mode 100644
index 0000000..96c585d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/Indexer.h
@@ -0,0 +1,71 @@
+//===--- Indexer.h - IndexProvider implementation ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// IndexProvider implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_INDEXER_H
+#define LLVM_CLANG_INDEX_INDEXER_H
+
+#include "clang/Index/IndexProvider.h"
+#include "clang/Index/Entity.h"
+#include "clang/Index/GlobalSelector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include <map>
+
+namespace clang {
+ class ASTContext;
+ class FunctionDecl;
+
+namespace idx {
+ class Program;
+ class TranslationUnit;
+
+/// \brief Maps information to TranslationUnits.
+class Indexer : public IndexProvider {
+public:
+ typedef llvm::SmallPtrSet<TranslationUnit *, 4> TUSetTy;
+ typedef llvm::DenseMap<ASTContext *, TranslationUnit *> CtxTUMapTy;
+ typedef std::map<Entity, TUSetTy> MapTy;
+ typedef std::map<GlobalSelector, TUSetTy> SelMapTy;
+ typedef std::map<Entity, std::pair<FunctionDecl*,TranslationUnit*> > DefMapTy;
+
+ explicit Indexer(Program &prog) :
+ Prog(prog) { }
+
+ Program &getProgram() const { return Prog; }
+
+ /// \brief Find all Entities and map them to the given translation unit.
+ void IndexAST(TranslationUnit *TU);
+
+ virtual void GetTranslationUnitsFor(Entity Ent,
+ TranslationUnitHandler &Handler);
+ virtual void GetTranslationUnitsFor(GlobalSelector Sel,
+ TranslationUnitHandler &Handler);
+
+ std::pair<FunctionDecl*, TranslationUnit*> getDefinitionFor(Entity Ent);
+
+private:
+ Program &Prog;
+
+ MapTy Map;
+ // Map a function Entity to the its definition.
+ DefMapTy DefMap;
+
+ CtxTUMapTy CtxTUMap;
+ SelMapTy SelMap;
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/Program.h b/contrib/llvm/tools/clang/include/clang/Index/Program.h
new file mode 100644
index 0000000..8039192
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/Program.h
@@ -0,0 +1,45 @@
+//===--- Program.h - Cross-translation unit information ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the idx::Program interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_PROGRAM_H
+#define LLVM_CLANG_INDEX_PROGRAM_H
+
+namespace clang {
+ class ASTContext;
+
+namespace idx {
+ class EntityHandler;
+
+/// \brief Top level object that owns and maintains information
+/// that is common across translation units.
+class Program {
+ void *Impl;
+
+ Program(const Program&); // do not implement
+ Program &operator=(const Program &); // do not implement
+ friend class Entity;
+ friend class GlobalSelector;
+
+public:
+ Program();
+ ~Program();
+
+ /// \brief Traverses the AST and passes all the entities to the Handler.
+ void FindEntities(ASTContext &Ctx, EntityHandler &Handler);
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/STLExtras.h b/contrib/llvm/tools/clang/include/clang/Index/STLExtras.h
new file mode 100644
index 0000000..a3693c6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/STLExtras.h
@@ -0,0 +1,63 @@
+//===--- STLExtras.h - Helper STL related templates -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Helper templates for using with the STL.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_STLEXTRAS_H
+#define LLVM_CLANG_INDEX_STLEXTRAS_H
+
+namespace clang {
+
+namespace idx {
+
+/// \brief Wraps an iterator whose value_type is a pair, and provides
+/// pair's second object as the value.
+template <typename iter_type>
+class pair_value_iterator {
+ iter_type I;
+
+public:
+ typedef typename iter_type::value_type::second_type value_type;
+ typedef value_type& reference;
+ typedef value_type* pointer;
+ typedef typename iter_type::iterator_category iterator_category;
+ typedef typename iter_type::difference_type difference_type;
+
+ pair_value_iterator() { }
+ pair_value_iterator(iter_type i) : I(i) { }
+
+ reference operator*() const { return I->second; }
+ pointer operator->() const { return &I->second; }
+
+ pair_value_iterator& operator++() {
+ ++I;
+ return *this;
+ }
+
+ pair_value_iterator operator++(int) {
+ pair_value_iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }
+
+ friend bool operator==(pair_value_iterator L, pair_value_iterator R) {
+ return L.I == R.I;
+ }
+ friend bool operator!=(pair_value_iterator L, pair_value_iterator R) {
+ return L.I != R.I;
+ }
+};
+
+} // end idx namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/SelectorMap.h b/contrib/llvm/tools/clang/include/clang/Index/SelectorMap.h
new file mode 100644
index 0000000..be01702
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/SelectorMap.h
@@ -0,0 +1,57 @@
+//===--- SelectorMap.h - Maps selectors to methods and messages -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// SelectorMap creates a mapping from selectors to ObjC method declarations
+// and ObjC message expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_SELECTORMAP_H
+#define LLVM_CLANG_INDEX_SELECTORMAP_H
+
+#include "clang/Index/ASTLocation.h"
+#include "clang/Index/STLExtras.h"
+#include "clang/Basic/IdentifierTable.h"
+#include <map>
+
+namespace clang {
+ class ASTContext;
+ class ObjCMethodDecl;
+
+namespace idx {
+
+/// \brief Maps NamedDecls with the ASTLocations that reference them.
+///
+/// References are mapped and retrieved using the canonical decls.
+class SelectorMap {
+public:
+ explicit SelectorMap(ASTContext &Ctx);
+
+ typedef std::multimap<Selector, ObjCMethodDecl *> SelMethMapTy;
+ typedef std::multimap<Selector, ASTLocation> SelRefMapTy;
+
+ typedef pair_value_iterator<SelMethMapTy::iterator> method_iterator;
+ typedef pair_value_iterator<SelRefMapTy::iterator> astlocation_iterator;
+
+ method_iterator methods_begin(Selector Sel) const;
+ method_iterator methods_end(Selector Sel) const;
+
+ astlocation_iterator refs_begin(Selector Sel) const;
+ astlocation_iterator refs_end(Selector Sel) const;
+
+private:
+ mutable SelMethMapTy SelMethMap;
+ mutable SelRefMapTy SelRefMap;
+};
+
+} // end idx namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h b/contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h
new file mode 100644
index 0000000..ba5d48d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Index/TranslationUnit.h
@@ -0,0 +1,41 @@
+//===--- TranslationUnit.h - Interface for a translation unit ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Abstract interface for a translation unit.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_TRANSLATIONUNIT_H
+#define LLVM_CLANG_INDEX_TRANSLATIONUNIT_H
+
+namespace clang {
+ class ASTContext;
+ class DiagnosticsEngine;
+ class Preprocessor;
+
+namespace idx {
+ class DeclReferenceMap;
+ class SelectorMap;
+
+/// \brief Abstract interface for a translation unit.
+class TranslationUnit {
+public:
+ virtual ~TranslationUnit();
+ virtual ASTContext &getASTContext() = 0;
+ virtual Preprocessor &getPreprocessor() = 0;
+ virtual DiagnosticsEngine &getDiagnostic() = 0;
+ virtual DeclReferenceMap &getDeclReferenceMap() = 0;
+ virtual SelectorMap &getSelectorMap() = 0;
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h b/contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h
new file mode 100644
index 0000000..d876776
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/CodeCompletionHandler.h
@@ -0,0 +1,71 @@
+//===--- CodeCompletionHandler.h - Preprocessor code completion -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CodeCompletionHandler interface, which provides
+// code-completion callbacks for the preprocessor.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LEX_CODECOMPLETIONHANDLER_H
+#define LLVM_CLANG_LEX_CODECOMPLETIONHANDLER_H
+
+namespace clang {
+
+class IdentifierInfo;
+class MacroInfo;
+
+/// \brief Callback handler that receives notifications when performing code
+/// completion within the preprocessor.
+class CodeCompletionHandler {
+public:
+ virtual ~CodeCompletionHandler();
+
+ /// \brief Callback invoked when performing code completion for a preprocessor
+ /// directive.
+ ///
+ /// This callback will be invoked when the preprocessor processes a '#' at the
+ /// start of a line, followed by the code-completion token.
+ ///
+ /// \param InConditional Whether we're inside a preprocessor conditional
+ /// already.
+ virtual void CodeCompleteDirective(bool InConditional) { }
+
+ /// \brief Callback invoked when performing code completion within a block of
+ /// code that was excluded due to preprocessor conditionals.
+ virtual void CodeCompleteInConditionalExclusion() { }
+
+ /// \brief Callback invoked when performing code completion in a context
+ /// where the name of a macro is expected.
+ ///
+ /// \param IsDefinition Whether this is the definition of a macro, e.g.,
+ /// in a #define.
+ virtual void CodeCompleteMacroName(bool IsDefinition) { }
+
+ /// \brief Callback invoked when performing code completion in a preprocessor
+ /// expression, such as the condition of an #if or #elif directive.
+ virtual void CodeCompletePreprocessorExpression() { }
+
+ /// \brief Callback invoked when performing code completion inside a
+ /// function-like macro argument.
+ ///
+ /// There will be another callback invocation after the macro arguments are
+ /// parsed, so this callback should generally be used to note that the next
+ /// callback is invoked inside a macro argument.
+ virtual void CodeCompleteMacroArgument(IdentifierInfo *Macro,
+ MacroInfo *MacroInfo,
+ unsigned ArgumentIndex) { }
+
+ /// \brief Callback invoked when performing code completion in a part of the
+ /// file where we expect natural language, e.g., a comment, string, or
+ /// #error directive.
+ virtual void CodeCompleteNaturalLanguage() { }
+};
+
+}
+
+#endif // LLVM_CLANG_LEX_CODECOMPLETIONHANDLER_H
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h b/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h
new file mode 100644
index 0000000..95f0d27
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/DirectoryLookup.h
@@ -0,0 +1,170 @@
+//===--- DirectoryLookup.h - Info for searching for headers -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DirectoryLookup interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LEX_DIRECTORYLOOKUP_H
+#define LLVM_CLANG_LEX_DIRECTORYLOOKUP_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceManager.h"
+
+namespace clang {
+class HeaderMap;
+class DirectoryEntry;
+class FileEntry;
+class HeaderSearch;
+class Module;
+
+/// DirectoryLookup - This class represents one entry in the search list that
+/// specifies the search order for directories in #include directives. It
+/// represents either a directory, a framework, or a headermap.
+///
+class DirectoryLookup {
+public:
+ enum LookupType_t {
+ LT_NormalDir,
+ LT_Framework,
+ LT_HeaderMap
+ };
+private:
+ union { // This union is discriminated by isHeaderMap.
+ /// Dir - This is the actual directory that we're referring to for a normal
+ /// directory or a framework.
+ const DirectoryEntry *Dir;
+
+ /// Map - This is the HeaderMap if this is a headermap lookup.
+ ///
+ const HeaderMap *Map;
+ } u;
+
+ /// DirCharacteristic - The type of directory this is: this is an instance of
+ /// SrcMgr::CharacteristicKind.
+ unsigned DirCharacteristic : 2;
+
+ /// UserSupplied - True if this is a user-supplied directory.
+ ///
+ bool UserSupplied : 1;
+
+ /// LookupType - This indicates whether this DirectoryLookup object is a
+ /// normal directory, a framework, or a headermap.
+ unsigned LookupType : 2;
+
+ /// \brief Whether this is a header map used when building a framework.
+ unsigned IsIndexHeaderMap : 1;
+
+public:
+ /// DirectoryLookup ctor - Note that this ctor *does not take ownership* of
+ /// 'dir'.
+ DirectoryLookup(const DirectoryEntry *dir, SrcMgr::CharacteristicKind DT,
+ bool isUser, bool isFramework)
+ : DirCharacteristic(DT), UserSupplied(isUser),
+ LookupType(isFramework ? LT_Framework : LT_NormalDir),
+ IsIndexHeaderMap(false) {
+ u.Dir = dir;
+ }
+
+ /// DirectoryLookup ctor - Note that this ctor *does not take ownership* of
+ /// 'map'.
+ DirectoryLookup(const HeaderMap *map, SrcMgr::CharacteristicKind DT,
+ bool isUser, bool isIndexHeaderMap)
+ : DirCharacteristic(DT), UserSupplied(isUser), LookupType(LT_HeaderMap),
+ IsIndexHeaderMap(isIndexHeaderMap) {
+ u.Map = map;
+ }
+
+ /// getLookupType - Return the kind of directory lookup that this is: either a
+ /// normal directory, a framework path, or a HeaderMap.
+ LookupType_t getLookupType() const { return (LookupType_t)LookupType; }
+
+ /// getName - Return the directory or filename corresponding to this lookup
+ /// object.
+ const char *getName() const;
+
+ /// getDir - Return the directory that this entry refers to.
+ ///
+ const DirectoryEntry *getDir() const { return isNormalDir() ? u.Dir : 0; }
+
+ /// getFrameworkDir - Return the directory that this framework refers to.
+ ///
+ const DirectoryEntry *getFrameworkDir() const {
+ return isFramework() ? u.Dir : 0;
+ }
+
+ /// getHeaderMap - Return the directory that this entry refers to.
+ ///
+ const HeaderMap *getHeaderMap() const { return isHeaderMap() ? u.Map : 0; }
+
+ /// isNormalDir - Return true if this is a normal directory, not a header map.
+ bool isNormalDir() const { return getLookupType() == LT_NormalDir; }
+
+ /// isFramework - True if this is a framework directory.
+ ///
+ bool isFramework() const { return getLookupType() == LT_Framework; }
+
+ /// isHeaderMap - Return true if this is a header map, not a normal directory.
+ bool isHeaderMap() const { return getLookupType() == LT_HeaderMap; }
+
+ /// DirCharacteristic - The type of directory this is, one of the DirType enum
+ /// values.
+ SrcMgr::CharacteristicKind getDirCharacteristic() const {
+ return (SrcMgr::CharacteristicKind)DirCharacteristic;
+ }
+
+ /// isUserSupplied - True if this is a user-supplied directory.
+ ///
+ bool isUserSupplied() const { return UserSupplied; }
+
+ /// \brief Whether this header map is building a framework or not.
+ bool isIndexHeaderMap() const {
+ return isHeaderMap() && IsIndexHeaderMap;
+ }
+
+ /// LookupFile - Lookup the specified file in this search path, returning it
+ /// if it exists or returning null if not.
+ ///
+ /// \param Filename The file to look up relative to the search paths.
+ ///
+ /// \param HS The header search instance to search with.
+ ///
+ /// \param SearchPath If not NULL, will be set to the search path relative
+ /// to which the file was found.
+ ///
+ /// \param RelativePath If not NULL, will be set to the path relative to
+ /// SearchPath at which the file was found. This only differs from the
+ /// Filename for framework includes.
+ ///
+ /// \param SuggestedModule If non-null, and the file found is semantically
+ /// part of a known module, this will be set to the module that should
+ /// be imported instead of preprocessing/parsing the file found.
+ ///
+ /// \param InUserSpecifiedSystemHeader [out] If the file is found, set to true
+ /// if the file is located in a framework that has been user-specified to be
+ /// treated as a system framework.
+ const FileEntry *LookupFile(StringRef Filename, HeaderSearch &HS,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module **SuggestedModule,
+ bool &InUserSpecifiedSystemHeader) const;
+
+private:
+ const FileEntry *DoFrameworkLookup(
+ StringRef Filename, HeaderSearch &HS,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module **SuggestedModule,
+ bool &InUserSpecifiedSystemHeader) const;
+
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h b/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
new file mode 100644
index 0000000..f172b5c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ExternalPreprocessorSource.h
@@ -0,0 +1,40 @@
+//===- ExternalPreprocessorSource.h - Abstract Macro Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ExternalPreprocessorSource interface, which enables
+// construction of macro definitions from some external source.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LEX_EXTERNAL_PREPROCESSOR_SOURCE_H
+#define LLVM_CLANG_LEX_EXTERNAL_PREPROCESSOR_SOURCE_H
+
+namespace clang {
+
+/// \brief Abstract interface for external sources of preprocessor
+/// information.
+///
+/// This abstract class allows an external sources (such as the \c ASTReader)
+/// to provide additional macro definitions.
+class ExternalPreprocessorSource {
+public:
+ virtual ~ExternalPreprocessorSource();
+
+ /// \brief Read the set of macros defined by this external macro source.
+ virtual void ReadDefinedMacros() = 0;
+
+ /// \brief Read the definition for the given macro.
+ virtual void LoadMacroDefinition(IdentifierInfo *II) = 0;
+
+ /// \brief Update an out-of-date identifier.
+ virtual void updateOutOfDateIdentifier(IdentifierInfo &II) = 0;
+};
+
+}
+
+#endif // LLVM_CLANG_LEX_EXTERNAL_PREPROCESSOR_SOURCE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
new file mode 100644
index 0000000..08bc5b6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderMap.h
@@ -0,0 +1,72 @@
+//===--- HeaderMap.h - A file that acts like dir of symlinks ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HeaderMap interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LEX_HEADERMAP_H
+#define LLVM_CLANG_LEX_HEADERMAP_H
+
+#include "clang/Basic/LLVM.h"
+
+namespace llvm {
+ class MemoryBuffer;
+}
+namespace clang {
+ class FileEntry;
+ class FileManager;
+ struct HMapBucket;
+ struct HMapHeader;
+
+/// This class represents an Apple concept known as a 'header map'. To the
+/// #include file resolution process, it basically acts like a directory of
+/// symlinks to files. Its advantages are that it is dense and more efficient
+/// to create and process than a directory of symlinks.
+class HeaderMap {
+ HeaderMap(const HeaderMap&); // DO NOT IMPLEMENT
+ void operator=(const HeaderMap&); // DO NOT IMPLEMENT
+
+ const llvm::MemoryBuffer *FileBuffer;
+ bool NeedsBSwap;
+
+ HeaderMap(const llvm::MemoryBuffer *File, bool BSwap)
+ : FileBuffer(File), NeedsBSwap(BSwap) {
+ }
+public:
+ ~HeaderMap();
+
+ /// HeaderMap::Create - This attempts to load the specified file as a header
+ /// map. If it doesn't look like a HeaderMap, it gives up and returns null.
+ static const HeaderMap *Create(const FileEntry *FE, FileManager &FM);
+
+ /// LookupFile - Check to see if the specified relative filename is located in
+ /// this HeaderMap. If so, open it and return its FileEntry.
+ /// If RawPath is not NULL and the file is found, RawPath will be set to the
+ /// raw path at which the file was found in the file system. For example,
+ /// for a search path ".." and a filename "../file.h" this would be
+ /// "../../file.h".
+ const FileEntry *LookupFile(StringRef Filename, FileManager &FM) const;
+
+ /// getFileName - Return the filename of the headermap.
+ const char *getFileName() const;
+
+ /// dump - Print the contents of this headermap to stderr.
+ void dump() const;
+
+private:
+ unsigned getEndianAdjustedWord(unsigned X) const;
+ const HMapHeader &getHeader() const;
+ HMapBucket getBucket(unsigned BucketNo) const;
+ const char *getString(unsigned StrTabIdx) const;
+};
+
+} // end namespace clang.
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
new file mode 100644
index 0000000..5128ce6
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/HeaderSearch.h
@@ -0,0 +1,562 @@
+//===--- HeaderSearch.h - Resolve Header File Locations ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HeaderSearch interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LEX_HEADERSEARCH_H
+#define LLVM_CLANG_LEX_HEADERSEARCH_H
+
+#include "clang/Lex/DirectoryLookup.h"
+#include "clang/Lex/ModuleMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <vector>
+
+namespace clang {
+
+class DiagnosticsEngine;
+class ExternalIdentifierLookup;
+class FileEntry;
+class FileManager;
+class IdentifierInfo;
+
+/// HeaderFileInfo - The preprocessor keeps track of this information for each
+/// file that is #included.
+struct HeaderFileInfo {
+ /// isImport - True if this is a #import'd or #pragma once file.
+ unsigned isImport : 1;
+
+ /// isPragmaOnce - True if this is #pragma once file.
+ unsigned isPragmaOnce : 1;
+
+ /// DirInfo - Keep track of whether this is a system header, and if so,
+ /// whether it is C++ clean or not. This can be set by the include paths or
+ /// by #pragma gcc system_header. This is an instance of
+ /// SrcMgr::CharacteristicKind.
+ unsigned DirInfo : 2;
+
+ /// \brief Whether this header file info was supplied by an external source.
+ unsigned External : 1;
+
+ /// \brief Whether this structure is considered to already have been
+ /// "resolved", meaning that it was loaded from the external source.
+ unsigned Resolved : 1;
+
+ /// \brief Whether this is a header inside a framework that is currently
+ /// being built.
+ ///
+ /// When a framework is being built, the headers have not yet been placed
+ /// into the appropriate framework subdirectories, and therefore are
+ /// provided via a header map. This bit indicates when this is one of
+ /// those framework headers.
+ unsigned IndexHeaderMapHeader : 1;
+
+ /// NumIncludes - This is the number of times the file has been included
+ /// already.
+ unsigned short NumIncludes;
+
+ /// \brief The ID number of the controlling macro.
+ ///
+ /// This ID number will be non-zero when there is a controlling
+ /// macro whose IdentifierInfo may not yet have been loaded from
+ /// external storage.
+ unsigned ControllingMacroID;
+
+ /// ControllingMacro - If this file has a #ifndef XXX (or equivalent) guard
+ /// that protects the entire contents of the file, this is the identifier
+ /// for the macro that controls whether or not it has any effect.
+ ///
+ /// Note: Most clients should use getControllingMacro() to access
+ /// the controlling macro of this header, since
+ /// getControllingMacro() is able to load a controlling macro from
+ /// external storage.
+ const IdentifierInfo *ControllingMacro;
+
+ /// \brief If this header came from a framework include, this is the name
+ /// of the framework.
+ StringRef Framework;
+
+ HeaderFileInfo()
+ : isImport(false), isPragmaOnce(false), DirInfo(SrcMgr::C_User),
+ External(false), Resolved(false), IndexHeaderMapHeader(false),
+ NumIncludes(0), ControllingMacroID(0), ControllingMacro(0) {}
+
+ /// \brief Retrieve the controlling macro for this header file, if
+ /// any.
+ const IdentifierInfo *getControllingMacro(ExternalIdentifierLookup *External);
+
+ /// \brief Determine whether this is a non-default header file info, e.g.,
+ /// it corresponds to an actual header we've included or tried to include.
+ bool isNonDefault() const {
+ return isImport || isPragmaOnce || NumIncludes || ControllingMacro ||
+ ControllingMacroID;
+ }
+};
+
+/// \brief An external source of header file information, which may supply
+/// information about header files already included.
+class ExternalHeaderFileInfoSource {
+public:
+ virtual ~ExternalHeaderFileInfoSource();
+
+ /// \brief Retrieve the header file information for the given file entry.
+ ///
+ /// \returns Header file information for the given file entry, with the
+ /// \c External bit set. If the file entry is not known, return a
+ /// default-constructed \c HeaderFileInfo.
+ virtual HeaderFileInfo GetHeaderFileInfo(const FileEntry *FE) = 0;
+};
+
+/// HeaderSearch - This class encapsulates the information needed to find the
+/// file referenced by a #include or #include_next, (sub-)framework lookup, etc.
+class HeaderSearch {
+ /// This structure is used to record entries in our framework cache.
+ struct FrameworkCacheEntry {
+ /// The directory entry which should be used for the cached framework.
+ const DirectoryEntry *Directory;
+
+ /// Whether this framework has been "user-specified" to be treated as if it
+ /// were a system framework (even if it was found outside a system framework
+ /// directory).
+ bool IsUserSpecifiedSystemFramework;
+ };
+
+ FileManager &FileMgr;
+ DiagnosticsEngine &Diags;
+ /// #include search path information. Requests for #include "x" search the
+ /// directory of the #including file first, then each directory in SearchDirs
+ /// consecutively. Requests for <x> search the current dir first, then each
+ /// directory in SearchDirs, starting at AngledDirIdx, consecutively. If
+ /// NoCurDirSearch is true, then the check for the file in the current
+ /// directory is suppressed.
+ std::vector<DirectoryLookup> SearchDirs;
+ unsigned AngledDirIdx;
+ unsigned SystemDirIdx;
+ bool NoCurDirSearch;
+
+ /// \brief The path to the module cache.
+ std::string ModuleCachePath;
+
+ /// FileInfo - This contains all of the preprocessor-specific data about files
+ /// that are included. The vector is indexed by the FileEntry's UID.
+ ///
+ std::vector<HeaderFileInfo> FileInfo;
+
+ /// LookupFileCache - This is keeps track of each lookup performed by
+ /// LookupFile. The first part of the value is the starting index in
+ /// SearchDirs that the cached search was performed from. If there is a hit
+ /// and this value doesn't match the current query, the cache has to be
+ /// ignored. The second value is the entry in SearchDirs that satisfied the
+ /// query.
+ llvm::StringMap<std::pair<unsigned, unsigned>, llvm::BumpPtrAllocator>
+ LookupFileCache;
+
+ /// FrameworkMap - This is a collection mapping a framework or subframework
+ /// name like "Carbon" to the Carbon.framework directory.
+ llvm::StringMap<FrameworkCacheEntry, llvm::BumpPtrAllocator> FrameworkMap;
+
+ /// IncludeAliases - maps include file names (including the quotes or
+ /// angle brackets) to other include file names. This is used to support the
+ /// include_alias pragma for Microsoft compatibility.
+ typedef llvm::StringMap<std::string, llvm::BumpPtrAllocator>
+ IncludeAliasMap;
+ OwningPtr<IncludeAliasMap> IncludeAliases;
+
+ /// HeaderMaps - This is a mapping from FileEntry -> HeaderMap, uniquing
+ /// headermaps. This vector owns the headermap.
+ std::vector<std::pair<const FileEntry*, const HeaderMap*> > HeaderMaps;
+
+ /// \brief The mapping between modules and headers.
+ ModuleMap ModMap;
+
+ /// \brief Describes whether a given directory has a module map in it.
+ llvm::DenseMap<const DirectoryEntry *, bool> DirectoryHasModuleMap;
+
+ /// \brief Uniqued set of framework names, which is used to track which
+ /// headers were included as framework headers.
+ llvm::StringSet<llvm::BumpPtrAllocator> FrameworkNames;
+
+ /// \brief Entity used to resolve the identifier IDs of controlling
+ /// macros into IdentifierInfo pointers, as needed.
+ ExternalIdentifierLookup *ExternalLookup;
+
+ /// \brief Entity used to look up stored header file information.
+ ExternalHeaderFileInfoSource *ExternalSource;
+
+ // Various statistics we track for performance analysis.
+ unsigned NumIncluded;
+ unsigned NumMultiIncludeFileOptzn;
+ unsigned NumFrameworkLookups, NumSubFrameworkLookups;
+
+ // HeaderSearch doesn't support default or copy construction.
+ explicit HeaderSearch();
+ explicit HeaderSearch(const HeaderSearch&);
+ void operator=(const HeaderSearch&);
+
+ friend class DirectoryLookup;
+
+public:
+ HeaderSearch(FileManager &FM, DiagnosticsEngine &Diags,
+ const LangOptions &LangOpts, const TargetInfo *Target);
+ ~HeaderSearch();
+
+ FileManager &getFileMgr() const { return FileMgr; }
+
+ /// SetSearchPaths - Interface for setting the file search paths.
+ ///
+ void SetSearchPaths(const std::vector<DirectoryLookup> &dirs,
+ unsigned angledDirIdx, unsigned systemDirIdx,
+ bool noCurDirSearch) {
+ assert(angledDirIdx <= systemDirIdx && systemDirIdx <= dirs.size() &&
+ "Directory indicies are unordered");
+ SearchDirs = dirs;
+ AngledDirIdx = angledDirIdx;
+ SystemDirIdx = systemDirIdx;
+ NoCurDirSearch = noCurDirSearch;
+ //LookupFileCache.clear();
+ }
+
+ /// AddSearchPath - Add an additional search path.
+ void AddSearchPath(const DirectoryLookup &dir, bool isAngled) {
+ unsigned idx = isAngled ? SystemDirIdx : AngledDirIdx;
+ SearchDirs.insert(SearchDirs.begin() + idx, dir);
+ if (!isAngled)
+ AngledDirIdx++;
+ SystemDirIdx++;
+ }
+
+ /// HasIncludeAliasMap - Checks whether the map exists or not
+ bool HasIncludeAliasMap() const {
+ return IncludeAliases;
+ }
+
+ /// AddIncludeAlias - Map the source include name to the dest include name.
+ /// The Source should include the angle brackets or quotes, the dest
+ /// should not. This allows for distinction between <> and "" headers.
+ void AddIncludeAlias(StringRef Source, StringRef Dest) {
+ if (!IncludeAliases)
+ IncludeAliases.reset(new IncludeAliasMap);
+ (*IncludeAliases)[Source] = Dest;
+ }
+
+ /// MapHeaderToIncludeAlias - Maps one header file name to a different header
+ /// file name, for use with the include_alias pragma. Note that the source
+ /// file name should include the angle brackets or quotes. Returns StringRef
+ /// as null if the header cannot be mapped.
+ StringRef MapHeaderToIncludeAlias(StringRef Source) {
+ assert(IncludeAliases && "Trying to map headers when there's no map");
+
+ // Do any filename replacements before anything else
+ IncludeAliasMap::const_iterator Iter = IncludeAliases->find(Source);
+ if (Iter != IncludeAliases->end())
+ return Iter->second;
+ return StringRef();
+ }
+
+ /// \brief Set the path to the module cache.
+ void setModuleCachePath(StringRef CachePath) {
+ ModuleCachePath = CachePath;
+ }
+
+ /// \brief Retrieve the path to the module cache.
+ StringRef getModuleCachePath() const { return ModuleCachePath; }
+
+ /// ClearFileInfo - Forget everything we know about headers so far.
+ void ClearFileInfo() {
+ FileInfo.clear();
+ }
+
+ void SetExternalLookup(ExternalIdentifierLookup *EIL) {
+ ExternalLookup = EIL;
+ }
+
+ ExternalIdentifierLookup *getExternalLookup() const {
+ return ExternalLookup;
+ }
+
+ /// \brief Set the external source of header information.
+ void SetExternalSource(ExternalHeaderFileInfoSource *ES) {
+ ExternalSource = ES;
+ }
+
+ /// \brief Set the target information for the header search, if not
+ /// already known.
+ void setTarget(const TargetInfo &Target);
+
+ /// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
+ /// return null on failure.
+ ///
+ /// \returns If successful, this returns 'UsedDir', the DirectoryLookup member
+ /// the file was found in, or null if not applicable.
+ ///
+ /// \param isAngled indicates whether the file reference is a <> reference.
+ ///
+ /// \param CurDir If non-null, the file was found in the specified directory
+ /// search location. This is used to implement #include_next.
+ ///
+ /// \param CurFileEnt If non-null, indicates where the #including file is, in
+ /// case a relative search is needed.
+ ///
+ /// \param SearchPath If non-null, will be set to the search path relative
+ /// to which the file was found. If the include path is absolute, SearchPath
+ /// will be set to an empty string.
+ ///
+ /// \param RelativePath If non-null, will be set to the path relative to
+ /// SearchPath at which the file was found. This only differs from the
+ /// Filename for framework includes.
+ ///
+ /// \param SuggestedModule If non-null, and the file found is semantically
+ /// part of a known module, this will be set to the module that should
+ /// be imported instead of preprocessing/parsing the file found.
+ const FileEntry *LookupFile(StringRef Filename, bool isAngled,
+ const DirectoryLookup *FromDir,
+ const DirectoryLookup *&CurDir,
+ const FileEntry *CurFileEnt,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module **SuggestedModule,
+ bool SkipCache = false);
+
+ /// LookupSubframeworkHeader - Look up a subframework for the specified
+ /// #include file. For example, if #include'ing <HIToolbox/HIToolbox.h> from
+ /// within ".../Carbon.framework/Headers/Carbon.h", check to see if HIToolbox
+ /// is a subframework within Carbon.framework. If so, return the FileEntry
+ /// for the designated file, otherwise return null.
+ const FileEntry *LookupSubframeworkHeader(
+ StringRef Filename,
+ const FileEntry *RelativeFileEnt,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath);
+
+ /// LookupFrameworkCache - Look up the specified framework name in our
+ /// framework cache, returning the DirectoryEntry it is in if we know,
+ /// otherwise, return null.
+ FrameworkCacheEntry &LookupFrameworkCache(StringRef FWName) {
+ return FrameworkMap.GetOrCreateValue(FWName).getValue();
+ }
+
+ /// ShouldEnterIncludeFile - Mark the specified file as a target of of a
+ /// #include, #include_next, or #import directive. Return false if #including
+ /// the file will have no effect or true if we should include it.
+ bool ShouldEnterIncludeFile(const FileEntry *File, bool isImport);
+
+
+ /// getFileDirFlavor - Return whether the specified file is a normal header,
+ /// a system header, or a C++ friendly system header.
+ SrcMgr::CharacteristicKind getFileDirFlavor(const FileEntry *File) {
+ return (SrcMgr::CharacteristicKind)getFileInfo(File).DirInfo;
+ }
+
+ /// MarkFileIncludeOnce - Mark the specified file as a "once only" file, e.g.
+ /// due to #pragma once.
+ void MarkFileIncludeOnce(const FileEntry *File) {
+ HeaderFileInfo &FI = getFileInfo(File);
+ FI.isImport = true;
+ FI.isPragmaOnce = true;
+ }
+
+ /// MarkFileSystemHeader - Mark the specified file as a system header, e.g.
+ /// due to #pragma GCC system_header.
+ void MarkFileSystemHeader(const FileEntry *File) {
+ getFileInfo(File).DirInfo = SrcMgr::C_System;
+ }
+
+ /// IncrementIncludeCount - Increment the count for the number of times the
+ /// specified FileEntry has been entered.
+ void IncrementIncludeCount(const FileEntry *File) {
+ ++getFileInfo(File).NumIncludes;
+ }
+
+ /// SetFileControllingMacro - Mark the specified file as having a controlling
+ /// macro. This is used by the multiple-include optimization to eliminate
+ /// no-op #includes.
+ void SetFileControllingMacro(const FileEntry *File,
+ const IdentifierInfo *ControllingMacro) {
+ getFileInfo(File).ControllingMacro = ControllingMacro;
+ }
+
+ /// \brief Determine whether this file is intended to be safe from
+ /// multiple inclusions, e.g., it has #pragma once or a controlling
+ /// macro.
+ ///
+ /// This routine does not consider the effect of #import
+ bool isFileMultipleIncludeGuarded(const FileEntry *File);
+
+ /// CreateHeaderMap - This method returns a HeaderMap for the specified
+ /// FileEntry, uniquing them through the the 'HeaderMaps' datastructure.
+ const HeaderMap *CreateHeaderMap(const FileEntry *FE);
+
+ /// \brief Retrieve the name of the module file that should be used to
+ /// load the given module.
+ ///
+ /// \param Module The module whose module file name will be returned.
+ ///
+ /// \returns The name of the module file that corresponds to this module,
+ /// or an empty string if this module does not correspond to any module file.
+ std::string getModuleFileName(Module *Module);
+
+ /// \brief Retrieve the name of the module file that should be used to
+ /// load a module with the given name.
+ ///
+ /// \param Module The module whose module file name will be returned.
+ ///
+ /// \returns The name of the module file that corresponds to this module,
+ /// or an empty string if this module does not correspond to any module file.
+ std::string getModuleFileName(StringRef ModuleName);
+
+ /// \brief Lookup a module Search for a module with the given name.
+ ///
+ /// \param ModuleName The name of the module we're looking for.
+ ///
+ /// \param AllowSearch Whether we are allowed to search in the various
+ /// search directories to produce a module definition. If not, this lookup
+ /// will only return an already-known module.
+ ///
+ /// \returns The module with the given name.
+ Module *lookupModule(StringRef ModuleName, bool AllowSearch = true);
+
+ void IncrementFrameworkLookupCount() { ++NumFrameworkLookups; }
+
+ /// \brief Determine whether there is a module map that may map the header
+ /// with the given file name to a (sub)module.
+ ///
+ /// \param Filename The name of the file.
+ ///
+ /// \param Root The "root" directory, at which we should stop looking for
+ /// module maps.
+ bool hasModuleMap(StringRef Filename, const DirectoryEntry *Root);
+
+ /// \brief Retrieve the module that corresponds to the given file, if any.
+ ///
+ /// \param File The header that we wish to map to a module.
+ Module *findModuleForHeader(const FileEntry *File);
+
+ /// \brief Read the contents of the given module map file.
+ ///
+ /// \param File The module map file.
+ ///
+ /// \param OnlyModule If non-NULL, this will receive the
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool loadModuleMapFile(const FileEntry *File);
+
+ /// \brief Collect the set of all known, top-level modules.
+ ///
+ /// \param Modules Will be filled with the set of known, top-level modules.
+ void collectAllModules(llvm::SmallVectorImpl<Module *> &Modules);
+
+private:
+ /// \brief Retrieve a module with the given name, which may be part of the
+ /// given framework.
+ ///
+ /// \param Name The name of the module to retrieve.
+ ///
+ /// \param Dir The framework directory (e.g., ModuleName.framework).
+ ///
+ /// \param IsSystem Whether the framework directory is part of the system
+ /// frameworks.
+ ///
+ /// \returns The module, if found; otherwise, null.
+ Module *loadFrameworkModule(StringRef Name,
+ const DirectoryEntry *Dir,
+ bool IsSystem);
+
+public:
+ /// \brief Retrieve the module map.
+ ModuleMap &getModuleMap() { return ModMap; }
+
+ unsigned header_file_size() const { return FileInfo.size(); }
+
+ // Used by ASTReader.
+ void setHeaderFileInfoForUID(HeaderFileInfo HFI, unsigned UID);
+
+ /// getFileInfo - Return the HeaderFileInfo structure for the specified
+ /// FileEntry.
+ const HeaderFileInfo &getFileInfo(const FileEntry *FE) const {
+ return const_cast<HeaderSearch*>(this)->getFileInfo(FE);
+ }
+
+ // Used by external tools
+ typedef std::vector<DirectoryLookup>::const_iterator search_dir_iterator;
+ search_dir_iterator search_dir_begin() const { return SearchDirs.begin(); }
+ search_dir_iterator search_dir_end() const { return SearchDirs.end(); }
+ unsigned search_dir_size() const { return SearchDirs.size(); }
+
+ search_dir_iterator quoted_dir_begin() const {
+ return SearchDirs.begin();
+ }
+ search_dir_iterator quoted_dir_end() const {
+ return SearchDirs.begin() + AngledDirIdx;
+ }
+
+ search_dir_iterator angled_dir_begin() const {
+ return SearchDirs.begin() + AngledDirIdx;
+ }
+ search_dir_iterator angled_dir_end() const {
+ return SearchDirs.begin() + SystemDirIdx;
+ }
+
+ search_dir_iterator system_dir_begin() const {
+ return SearchDirs.begin() + SystemDirIdx;
+ }
+ search_dir_iterator system_dir_end() const { return SearchDirs.end(); }
+
+ /// \brief Retrieve a uniqued framework name.
+ StringRef getUniqueFrameworkName(StringRef Framework);
+
+ void PrintStats();
+
+ size_t getTotalMemory() const;
+
+ static std::string NormalizeDashIncludePath(StringRef File,
+ FileManager &FileMgr);
+
+private:
+ /// \brief Describes what happened when we tried to load a module map file.
+ enum LoadModuleMapResult {
+ /// \brief The module map file had already been loaded.
+ LMM_AlreadyLoaded,
+ /// \brief The module map file was loaded by this invocation.
+ LMM_NewlyLoaded,
+ /// \brief There is was directory with the given name.
+ LMM_NoDirectory,
+ /// \brief There was either no module map file or the module map file was
+ /// invalid.
+ LMM_InvalidModuleMap
+ };
+
+ /// \brief Try to load the module map file in the given directory.
+ ///
+ /// \param DirName The name of the directory where we will look for a module
+ /// map file.
+ ///
+ /// \returns The result of attempting to load the module map file from the
+ /// named directory.
+ LoadModuleMapResult loadModuleMapFile(StringRef DirName);
+
+ /// \brief Try to load the module map file in the given directory.
+ ///
+ /// \param Dir The directory where we will look for a module map file.
+ ///
+ /// \returns The result of attempting to load the module map file from the
+ /// named directory.
+ LoadModuleMapResult loadModuleMapFile(const DirectoryEntry *Dir);
+
+ /// getFileInfo - Return the HeaderFileInfo structure for the specified
+ /// FileEntry.
+ HeaderFileInfo &getFileInfo(const FileEntry *FE);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h
new file mode 100644
index 0000000..41b9396
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/LexDiagnostic.h
@@ -0,0 +1,28 @@
+//===--- DiagnosticLex.h - Diagnostics for liblex ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DIAGNOSTICLEX_H
+#define LLVM_CLANG_DIAGNOSTICLEX_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define LEXSTART
+#include "clang/Basic/DiagnosticLexKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_LEX_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
new file mode 100644
index 0000000..04bcead
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Lexer.h
@@ -0,0 +1,563 @@
+//===--- Lexer.h - C Language Family Lexer ----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Lexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LEXER_H
+#define LLVM_CLANG_LEXER_H
+
+#include "clang/Lex/PreprocessorLexer.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/SmallVector.h"
+#include <string>
+#include <cassert>
+
+namespace clang {
+class DiagnosticsEngine;
+class SourceManager;
+class Preprocessor;
+class DiagnosticBuilder;
+
+/// ConflictMarkerKind - Kinds of conflict marker which the lexer might be
+/// recovering from.
+enum ConflictMarkerKind {
+ /// Not within a conflict marker.
+ CMK_None,
+ /// A normal or diff3 conflict marker, initiated by at least 7 <s,
+ /// separated by at least 7 =s or |s, and terminated by at least 7 >s.
+ CMK_Normal,
+ /// A Perforce-style conflict marker, initiated by 4 >s, separated by 4 =s,
+ /// and terminated by 4 <s.
+ CMK_Perforce
+};
+
+/// Lexer - This provides a simple interface that turns a text buffer into a
+/// stream of tokens. This provides no support for file reading or buffering,
+/// or buffering/seeking of tokens, only forward lexing is supported. It relies
+/// on the specified Preprocessor object to handle preprocessor directives, etc.
+class Lexer : public PreprocessorLexer {
+ virtual void anchor();
+
+ //===--------------------------------------------------------------------===//
+ // Constant configuration values for this lexer.
+ const char *BufferStart; // Start of the buffer.
+ const char *BufferEnd; // End of the buffer.
+ SourceLocation FileLoc; // Location for start of file.
+ LangOptions LangOpts; // LangOpts enabled by this language (cache).
+ bool Is_PragmaLexer; // True if lexer for _Pragma handling.
+
+ //===--------------------------------------------------------------------===//
+ // Context-specific lexing flags set by the preprocessor.
+ //
+
+ /// ExtendedTokenMode - The lexer can optionally keep comments and whitespace
+ /// and return them as tokens. This is used for -C and -CC modes, and
+ /// whitespace preservation can be useful for some clients that want to lex
+ /// the file in raw mode and get every character from the file.
+ ///
+ /// When this is set to 2 it returns comments and whitespace. When set to 1
+ /// it returns comments, when it is set to 0 it returns normal tokens only.
+ unsigned char ExtendedTokenMode;
+
+ //===--------------------------------------------------------------------===//
+ // Context that changes as the file is lexed.
+ // NOTE: any state that mutates when in raw mode must have save/restore code
+ // in Lexer::isNextPPTokenLParen.
+
+ // BufferPtr - Current pointer into the buffer. This is the next character
+ // to be lexed.
+ const char *BufferPtr;
+
+ // IsAtStartOfLine - True if the next lexed token should get the "start of
+ // line" flag set on it.
+ bool IsAtStartOfLine;
+
+ // CurrentConflictMarkerState - The kind of conflict marker we are handling.
+ ConflictMarkerKind CurrentConflictMarkerState;
+
+ Lexer(const Lexer&); // DO NOT IMPLEMENT
+ void operator=(const Lexer&); // DO NOT IMPLEMENT
+ friend class Preprocessor;
+
+ void InitLexer(const char *BufStart, const char *BufPtr, const char *BufEnd);
+public:
+
+ /// Lexer constructor - Create a new lexer object for the specified buffer
+ /// with the specified preprocessor managing the lexing process. This lexer
+ /// assumes that the associated file buffer and Preprocessor objects will
+ /// outlive it, so it doesn't take ownership of either of them.
+ Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer, Preprocessor &PP);
+
+ /// Lexer constructor - Create a new raw lexer object. This object is only
+ /// suitable for calls to 'LexRawToken'. This lexer assumes that the text
+ /// range will outlive it, so it doesn't take ownership of it.
+ Lexer(SourceLocation FileLoc, const LangOptions &LangOpts,
+ const char *BufStart, const char *BufPtr, const char *BufEnd);
+
+ /// Lexer constructor - Create a new raw lexer object. This object is only
+ /// suitable for calls to 'LexRawToken'. This lexer assumes that the text
+ /// range will outlive it, so it doesn't take ownership of it.
+ Lexer(FileID FID, const llvm::MemoryBuffer *InputBuffer,
+ const SourceManager &SM, const LangOptions &LangOpts);
+
+ /// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
+ /// _Pragma expansion. This has a variety of magic semantics that this method
+ /// sets up. It returns a new'd Lexer that must be delete'd when done.
+ static Lexer *Create_PragmaLexer(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd,
+ unsigned TokLen, Preprocessor &PP);
+
+
+ /// getLangOpts - Return the language features currently enabled.
+ /// NOTE: this lexer modifies features as a file is parsed!
+ const LangOptions &getLangOpts() const { return LangOpts; }
+
+ /// getFileLoc - Return the File Location for the file we are lexing out of.
+ /// The physical location encodes the location where the characters come from,
+ /// the virtual location encodes where we should *claim* the characters came
+ /// from. Currently this is only used by _Pragma handling.
+ SourceLocation getFileLoc() const { return FileLoc; }
+
+ /// Lex - Return the next token in the file. If this is the end of file, it
+ /// return the tok::eof token. Return true if an error occurred and
+ /// compilation should terminate, false if normal. This implicitly involves
+ /// the preprocessor.
+ void Lex(Token &Result) {
+ // Start a new token.
+ Result.startToken();
+
+ // NOTE, any changes here should also change code after calls to
+ // Preprocessor::HandleDirective
+ if (IsAtStartOfLine) {
+ Result.setFlag(Token::StartOfLine);
+ IsAtStartOfLine = false;
+ }
+
+ // Get a token. Note that this may delete the current lexer if the end of
+ // file is reached.
+ LexTokenInternal(Result);
+ }
+
+ /// isPragmaLexer - Returns true if this Lexer is being used to lex a pragma.
+ bool isPragmaLexer() const { return Is_PragmaLexer; }
+
+ /// IndirectLex - An indirect call to 'Lex' that can be invoked via
+ /// the PreprocessorLexer interface.
+ void IndirectLex(Token &Result) { Lex(Result); }
+
+ /// LexFromRawLexer - Lex a token from a designated raw lexer (one with no
+ /// associated preprocessor object. Return true if the 'next character to
+ /// read' pointer points at the end of the lexer buffer, false otherwise.
+ bool LexFromRawLexer(Token &Result) {
+ assert(LexingRawMode && "Not already in raw mode!");
+ Lex(Result);
+ // Note that lexing to the end of the buffer doesn't implicitly delete the
+ // lexer when in raw mode.
+ return BufferPtr == BufferEnd;
+ }
+
+ /// isKeepWhitespaceMode - Return true if the lexer should return tokens for
+ /// every character in the file, including whitespace and comments. This
+ /// should only be used in raw mode, as the preprocessor is not prepared to
+ /// deal with the excess tokens.
+ bool isKeepWhitespaceMode() const {
+ return ExtendedTokenMode > 1;
+ }
+
+ /// SetKeepWhitespaceMode - This method lets clients enable or disable
+ /// whitespace retention mode.
+ void SetKeepWhitespaceMode(bool Val) {
+ assert((!Val || LexingRawMode) &&
+ "Can only enable whitespace retention in raw mode");
+ ExtendedTokenMode = Val ? 2 : 0;
+ }
+
+ /// inKeepCommentMode - Return true if the lexer should return comments as
+ /// tokens.
+ bool inKeepCommentMode() const {
+ return ExtendedTokenMode > 0;
+ }
+
+ /// SetCommentRetentionMode - Change the comment retention mode of the lexer
+ /// to the specified mode. This is really only useful when lexing in raw
+ /// mode, because otherwise the lexer needs to manage this.
+ void SetCommentRetentionState(bool Mode) {
+ assert(!isKeepWhitespaceMode() &&
+ "Can't play with comment retention state when retaining whitespace");
+ ExtendedTokenMode = Mode ? 1 : 0;
+ }
+
+ const char *getBufferStart() const { return BufferStart; }
+
+ /// ReadToEndOfLine - Read the rest of the current preprocessor line as an
+ /// uninterpreted string. This switches the lexer out of directive mode.
+ std::string ReadToEndOfLine();
+
+
+ /// Diag - Forwarding function for diagnostics. This translate a source
+ /// position in the current buffer into a SourceLocation object for rendering.
+ DiagnosticBuilder Diag(const char *Loc, unsigned DiagID) const;
+
+ /// getSourceLocation - Return a source location identifier for the specified
+ /// offset in the current file.
+ SourceLocation getSourceLocation(const char *Loc, unsigned TokLen = 1) const;
+
+ /// getSourceLocation - Return a source location for the next character in
+ /// the current file.
+ SourceLocation getSourceLocation() { return getSourceLocation(BufferPtr); }
+
+ /// \brief Return the current location in the buffer.
+ const char *getBufferLocation() const { return BufferPtr; }
+
+ /// Stringify - Convert the specified string into a C string by escaping '\'
+ /// and " characters. This does not add surrounding ""'s to the string.
+ /// If Charify is true, this escapes the ' character instead of ".
+ static std::string Stringify(const std::string &Str, bool Charify = false);
+
+ /// Stringify - Convert the specified string into a C string by escaping '\'
+ /// and " characters. This does not add surrounding ""'s to the string.
+ static void Stringify(SmallVectorImpl<char> &Str);
+
+
+ /// getSpelling - This method is used to get the spelling of a token into a
+ /// preallocated buffer, instead of as an std::string. The caller is required
+ /// to allocate enough space for the token, which is guaranteed to be at least
+ /// Tok.getLength() bytes long. The length of the actual result is returned.
+ ///
+ /// Note that this method may do two possible things: it may either fill in
+ /// the buffer specified with characters, or it may *change the input pointer*
+ /// to point to a constant buffer with the data already in it (avoiding a
+ /// copy). The caller is not allowed to modify the returned buffer pointer
+ /// if an internal buffer is returned.
+ static unsigned getSpelling(const Token &Tok, const char *&Buffer,
+ const SourceManager &SourceMgr,
+ const LangOptions &LangOpts,
+ bool *Invalid = 0);
+
+ /// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a
+ /// token is the characters used to represent the token in the source file
+ /// after trigraph expansion and escaped-newline folding. In particular, this
+ /// wants to get the true, uncanonicalized, spelling of things like digraphs
+ /// UCNs, etc.
+ static std::string getSpelling(const Token &Tok,
+ const SourceManager &SourceMgr,
+ const LangOptions &LangOpts,
+ bool *Invalid = 0);
+
+ /// getSpelling - This method is used to get the spelling of the
+ /// token at the given source location. If, as is usually true, it
+ /// is not necessary to copy any data, then the returned string may
+ /// not point into the provided buffer.
+ ///
+ /// This method lexes at the expansion depth of the given
+ /// location and does not jump to the expansion or spelling
+ /// location.
+ static StringRef getSpelling(SourceLocation loc,
+ SmallVectorImpl<char> &buffer,
+ const SourceManager &SourceMgr,
+ const LangOptions &LangOpts,
+ bool *invalid = 0);
+
+ /// MeasureTokenLength - Relex the token at the specified location and return
+ /// its length in bytes in the input file. If the token needs cleaning (e.g.
+ /// includes a trigraph or an escaped newline) then this count includes bytes
+ /// that are part of that.
+ static unsigned MeasureTokenLength(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
+
+ /// \brief Given a location any where in a source buffer, find the location
+ /// that corresponds to the beginning of the token in which the original
+ /// source location lands.
+ ///
+ /// \param Loc
+ static SourceLocation GetBeginningOfToken(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
+
+ /// AdvanceToTokenCharacter - If the current SourceLocation specifies a
+ /// location at the start of a token, return a new location that specifies a
+ /// character within the token. This handles trigraphs and escaped newlines.
+ static SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart,
+ unsigned Character,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
+
+ /// \brief Computes the source location just past the end of the
+ /// token at this source location.
+ ///
+ /// This routine can be used to produce a source location that
+ /// points just past the end of the token referenced by \p Loc, and
+ /// is generally used when a diagnostic needs to point just after a
+ /// token where it expected something different that it received. If
+ /// the returned source location would not be meaningful (e.g., if
+ /// it points into a macro), this routine returns an invalid
+ /// source location.
+ ///
+ /// \param Offset an offset from the end of the token, where the source
+ /// location should refer to. The default offset (0) produces a source
+ /// location pointing just past the end of the token; an offset of 1 produces
+ /// a source location pointing to the last character in the token, etc.
+ static SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
+
+ /// \brief Returns true if the given MacroID location points at the first
+ /// token of the macro expansion.
+ ///
+ /// \param MacroBegin If non-null and function returns true, it is set to
+ /// begin location of the macro.
+ static bool isAtStartOfMacroExpansion(SourceLocation loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroBegin = 0);
+
+ /// \brief Returns true if the given MacroID location points at the last
+ /// token of the macro expansion.
+ ///
+ /// \param MacroBegin If non-null and function returns true, it is set to
+ /// end location of the macro.
+ static bool isAtEndOfMacroExpansion(SourceLocation loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroEnd = 0);
+
+ /// \brief Accepts a range and returns a character range with file locations.
+ ///
+ /// Returns a null range if a part of the range resides inside a macro
+ /// expansion or the range does not reside on the same FileID.
+ static CharSourceRange makeFileCharRange(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
+
+ /// \brief Returns a string for the source that the range encompasses.
+ static StringRef getSourceText(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool *Invalid = 0);
+
+ /// \brief Retrieve the name of the immediate macro expansion.
+ ///
+ /// This routine starts from a source location, and finds the name of the macro
+ /// responsible for its immediate expansion. It looks through any intervening
+ /// macro argument expansions to compute this. It returns a StringRef which
+ /// refers to the SourceManager-owned buffer of the source where that macro
+ /// name is spelled. Thus, the result shouldn't out-live that SourceManager.
+ static StringRef getImmediateMacroName(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts);
+
+ /// \brief Compute the preamble of the given file.
+ ///
+ /// The preamble of a file contains the initial comments, include directives,
+ /// and other preprocessor directives that occur before the code in this
+ /// particular file actually begins. The preamble of the main source file is
+ /// a potential prefix header.
+ ///
+ /// \param Buffer The memory buffer containing the file's contents.
+ ///
+ /// \param MaxLines If non-zero, restrict the length of the preamble
+ /// to fewer than this number of lines.
+ ///
+ /// \returns The offset into the file where the preamble ends and the rest
+ /// of the file begins along with a boolean value indicating whether
+ /// the preamble ends at the beginning of a new line.
+ static std::pair<unsigned, bool>
+ ComputePreamble(const llvm::MemoryBuffer *Buffer, const LangOptions &LangOpts,
+ unsigned MaxLines = 0);
+
+ //===--------------------------------------------------------------------===//
+ // Internal implementation interfaces.
+private:
+
+ /// LexTokenInternal - Internal interface to lex a preprocessing token. Called
+ /// by Lex.
+ ///
+ void LexTokenInternal(Token &Result);
+
+ /// FormTokenWithChars - When we lex a token, we have identified a span
+ /// starting at BufferPtr, going to TokEnd that forms the token. This method
+ /// takes that range and assigns it to the token as its location and size. In
+ /// addition, since tokens cannot overlap, this also updates BufferPtr to be
+ /// TokEnd.
+ void FormTokenWithChars(Token &Result, const char *TokEnd,
+ tok::TokenKind Kind) {
+ unsigned TokLen = TokEnd-BufferPtr;
+ Result.setLength(TokLen);
+ Result.setLocation(getSourceLocation(BufferPtr, TokLen));
+ Result.setKind(Kind);
+ BufferPtr = TokEnd;
+ }
+
+ /// isNextPPTokenLParen - Return 1 if the next unexpanded token will return a
+ /// tok::l_paren token, 0 if it is something else and 2 if there are no more
+ /// tokens in the buffer controlled by this lexer.
+ unsigned isNextPPTokenLParen();
+
+ //===--------------------------------------------------------------------===//
+ // Lexer character reading interfaces.
+public:
+
+ // This lexer is built on two interfaces for reading characters, both of which
+ // automatically provide phase 1/2 translation. getAndAdvanceChar is used
+ // when we know that we will be reading a character from the input buffer and
+ // that this character will be part of the result token. This occurs in (f.e.)
+ // string processing, because we know we need to read until we find the
+ // closing '"' character.
+ //
+ // The second interface is the combination of getCharAndSize with
+ // ConsumeChar. getCharAndSize reads a phase 1/2 translated character,
+ // returning it and its size. If the lexer decides that this character is
+ // part of the current token, it calls ConsumeChar on it. This two stage
+ // approach allows us to emit diagnostics for characters (e.g. warnings about
+ // trigraphs), knowing that they only are emitted if the character is
+ // consumed.
+
+ /// isObviouslySimpleCharacter - Return true if the specified character is
+ /// obviously the same in translation phase 1 and translation phase 3. This
+ /// can return false for characters that end up being the same, but it will
+ /// never return true for something that needs to be mapped.
+ static bool isObviouslySimpleCharacter(char C) {
+ return C != '?' && C != '\\';
+ }
+
+ /// getAndAdvanceChar - Read a single 'character' from the specified buffer,
+ /// advance over it, and return it. This is tricky in several cases. Here we
+ /// just handle the trivial case and fall-back to the non-inlined
+ /// getCharAndSizeSlow method to handle the hard case.
+ inline char getAndAdvanceChar(const char *&Ptr, Token &Tok) {
+ // If this is not a trigraph and not a UCN or escaped newline, return
+ // quickly.
+ if (isObviouslySimpleCharacter(Ptr[0])) return *Ptr++;
+
+ unsigned Size = 0;
+ char C = getCharAndSizeSlow(Ptr, Size, &Tok);
+ Ptr += Size;
+ return C;
+ }
+
+private:
+ /// ConsumeChar - When a character (identified by getCharAndSize) is consumed
+ /// and added to a given token, check to see if there are diagnostics that
+ /// need to be emitted or flags that need to be set on the token. If so, do
+ /// it.
+ const char *ConsumeChar(const char *Ptr, unsigned Size, Token &Tok) {
+ // Normal case, we consumed exactly one token. Just return it.
+ if (Size == 1)
+ return Ptr+Size;
+
+ // Otherwise, re-lex the character with a current token, allowing
+ // diagnostics to be emitted and flags to be set.
+ Size = 0;
+ getCharAndSizeSlow(Ptr, Size, &Tok);
+ return Ptr+Size;
+ }
+
+ /// getCharAndSize - Peek a single 'character' from the specified buffer,
+ /// get its size, and return it. This is tricky in several cases. Here we
+ /// just handle the trivial case and fall-back to the non-inlined
+ /// getCharAndSizeSlow method to handle the hard case.
+ inline char getCharAndSize(const char *Ptr, unsigned &Size) {
+ // If this is not a trigraph and not a UCN or escaped newline, return
+ // quickly.
+ if (isObviouslySimpleCharacter(Ptr[0])) {
+ Size = 1;
+ return *Ptr;
+ }
+
+ Size = 0;
+ return getCharAndSizeSlow(Ptr, Size);
+ }
+
+ /// getCharAndSizeSlow - Handle the slow/uncommon case of the getCharAndSize
+ /// method.
+ char getCharAndSizeSlow(const char *Ptr, unsigned &Size, Token *Tok = 0);
+public:
+
+ /// getCharAndSizeNoWarn - Like the getCharAndSize method, but does not ever
+ /// emit a warning.
+ static inline char getCharAndSizeNoWarn(const char *Ptr, unsigned &Size,
+ const LangOptions &LangOpts) {
+ // If this is not a trigraph and not a UCN or escaped newline, return
+ // quickly.
+ if (isObviouslySimpleCharacter(Ptr[0])) {
+ Size = 1;
+ return *Ptr;
+ }
+
+ Size = 0;
+ return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
+ }
+
+ /// getEscapedNewLineSize - Return the size of the specified escaped newline,
+ /// or 0 if it is not an escaped newline. P[-1] is known to be a "\" on entry
+ /// to this function.
+ static unsigned getEscapedNewLineSize(const char *P);
+
+ /// SkipEscapedNewLines - If P points to an escaped newline (or a series of
+ /// them), skip over them and return the first non-escaped-newline found,
+ /// otherwise return P.
+ static const char *SkipEscapedNewLines(const char *P);
+
+ /// \brief Checks that the given token is the first token that occurs after
+ /// the given location (this excludes comments and whitespace). Returns the
+ /// location immediately after the specified token. If the token is not found
+ /// or the location is inside a macro, the returned source location will be
+ /// invalid.
+ static SourceLocation findLocationAfterToken(SourceLocation loc,
+ tok::TokenKind TKind,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool SkipTrailingWhitespaceAndNewLine);
+
+private:
+
+ /// getCharAndSizeSlowNoWarn - Same as getCharAndSizeSlow, but never emits a
+ /// diagnostic.
+ static char getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
+ const LangOptions &LangOpts);
+
+ //===--------------------------------------------------------------------===//
+ // Other lexer functions.
+
+ void SkipBytes(unsigned Bytes, bool StartOfLine);
+
+ const char *LexUDSuffix(Token &Result, const char *CurPtr);
+
+ // Helper functions to lex the remainder of a token of the specific type.
+ void LexIdentifier (Token &Result, const char *CurPtr);
+ void LexNumericConstant (Token &Result, const char *CurPtr);
+ void LexStringLiteral (Token &Result, const char *CurPtr,
+ tok::TokenKind Kind);
+ void LexRawStringLiteral (Token &Result, const char *CurPtr,
+ tok::TokenKind Kind);
+ void LexAngledStringLiteral(Token &Result, const char *CurPtr);
+ void LexCharConstant (Token &Result, const char *CurPtr,
+ tok::TokenKind Kind);
+ bool LexEndOfFile (Token &Result, const char *CurPtr);
+
+ bool SkipWhitespace (Token &Result, const char *CurPtr);
+ bool SkipBCPLComment (Token &Result, const char *CurPtr);
+ bool SkipBlockComment (Token &Result, const char *CurPtr);
+ bool SaveBCPLComment (Token &Result, const char *CurPtr);
+
+ bool IsStartOfConflictMarker(const char *CurPtr);
+ bool HandleEndOfConflictMarker(const char *CurPtr);
+
+ bool isCodeCompletionPoint(const char *CurPtr) const;
+ void cutOffLexing() { BufferPtr = BufferEnd; }
+};
+
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
new file mode 100644
index 0000000..7e7f82f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/LiteralSupport.h
@@ -0,0 +1,239 @@
+//===--- LiteralSupport.h ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NumericLiteralParser, CharLiteralParser, and
+// StringLiteralParser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LITERALSUPPORT_H
+#define CLANG_LITERALSUPPORT_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/DataTypes.h"
+#include "clang/Basic/TokenKinds.h"
+#include <cctype>
+
+namespace clang {
+
+class DiagnosticsEngine;
+class Preprocessor;
+class Token;
+class SourceLocation;
+class TargetInfo;
+class SourceManager;
+class LangOptions;
+
+/// NumericLiteralParser - This performs strict semantic analysis of the content
+/// of a ppnumber, classifying it as either integer, floating, or erroneous,
+/// determines the radix of the value and can convert it to a useful value.
+class NumericLiteralParser {
+ Preprocessor &PP; // needed for diagnostics
+
+ const char *const ThisTokBegin;
+ const char *const ThisTokEnd;
+ const char *DigitsBegin, *SuffixBegin; // markers
+ const char *s; // cursor
+
+ unsigned radix;
+
+ bool saw_exponent, saw_period, saw_ud_suffix;
+
+public:
+ NumericLiteralParser(const char *begin, const char *end,
+ SourceLocation Loc, Preprocessor &PP);
+ bool hadError;
+ bool isUnsigned;
+ bool isLong; // This is *not* set for long long.
+ bool isLongLong;
+ bool isFloat; // 1.0f
+ bool isImaginary; // 1.0i
+ bool isMicrosoftInteger; // Microsoft suffix extension i8, i16, i32, or i64.
+
+ bool isIntegerLiteral() const {
+ return !saw_period && !saw_exponent;
+ }
+ bool isFloatingLiteral() const {
+ return saw_period || saw_exponent;
+ }
+
+ bool hasUDSuffix() const {
+ return saw_ud_suffix;
+ }
+ StringRef getUDSuffix() const {
+ assert(saw_ud_suffix);
+ return StringRef(SuffixBegin, ThisTokEnd - SuffixBegin);
+ }
+ unsigned getUDSuffixOffset() const {
+ assert(saw_ud_suffix);
+ return SuffixBegin - ThisTokBegin;
+ }
+
+ unsigned getRadix() const { return radix; }
+
+ /// GetIntegerValue - Convert this numeric literal value to an APInt that
+ /// matches Val's input width. If there is an overflow (i.e., if the unsigned
+ /// value read is larger than the APInt's bits will hold), set Val to the low
+ /// bits of the result and return true. Otherwise, return false.
+ bool GetIntegerValue(llvm::APInt &Val);
+
+ /// GetFloatValue - Convert this numeric literal to a floating value, using
+ /// the specified APFloat fltSemantics (specifying float, double, etc).
+ /// The optional bool isExact (passed-by-reference) has its value
+ /// set to true if the returned APFloat can represent the number in the
+ /// literal exactly, and false otherwise.
+ llvm::APFloat::opStatus GetFloatValue(llvm::APFloat &Result);
+
+private:
+
+ void ParseNumberStartingWithZero(SourceLocation TokLoc);
+
+ /// SkipHexDigits - Read and skip over any hex digits, up to End.
+ /// Return a pointer to the first non-hex digit or End.
+ const char *SkipHexDigits(const char *ptr) {
+ while (ptr != ThisTokEnd && isxdigit(*ptr))
+ ptr++;
+ return ptr;
+ }
+
+ /// SkipOctalDigits - Read and skip over any octal digits, up to End.
+ /// Return a pointer to the first non-hex digit or End.
+ const char *SkipOctalDigits(const char *ptr) {
+ while (ptr != ThisTokEnd && ((*ptr >= '0') && (*ptr <= '7')))
+ ptr++;
+ return ptr;
+ }
+
+ /// SkipDigits - Read and skip over any digits, up to End.
+ /// Return a pointer to the first non-hex digit or End.
+ const char *SkipDigits(const char *ptr) {
+ while (ptr != ThisTokEnd && isdigit(*ptr))
+ ptr++;
+ return ptr;
+ }
+
+ /// SkipBinaryDigits - Read and skip over any binary digits, up to End.
+ /// Return a pointer to the first non-binary digit or End.
+ const char *SkipBinaryDigits(const char *ptr) {
+ while (ptr != ThisTokEnd && (*ptr == '0' || *ptr == '1'))
+ ptr++;
+ return ptr;
+ }
+
+};
+
+/// CharLiteralParser - Perform interpretation and semantic analysis of a
+/// character literal.
+class CharLiteralParser {
+ uint64_t Value;
+ tok::TokenKind Kind;
+ bool IsMultiChar;
+ bool HadError;
+ SmallString<32> UDSuffixBuf;
+ unsigned UDSuffixOffset;
+public:
+ CharLiteralParser(const char *begin, const char *end,
+ SourceLocation Loc, Preprocessor &PP,
+ tok::TokenKind kind);
+
+ bool hadError() const { return HadError; }
+ bool isAscii() const { return Kind == tok::char_constant; }
+ bool isWide() const { return Kind == tok::wide_char_constant; }
+ bool isUTF16() const { return Kind == tok::utf16_char_constant; }
+ bool isUTF32() const { return Kind == tok::utf32_char_constant; }
+ bool isMultiChar() const { return IsMultiChar; }
+ uint64_t getValue() const { return Value; }
+ StringRef getUDSuffix() const { return UDSuffixBuf; }
+ unsigned getUDSuffixOffset() const {
+ assert(!UDSuffixBuf.empty() && "no ud-suffix");
+ return UDSuffixOffset;
+ }
+};
+
+/// StringLiteralParser - This decodes string escape characters and performs
+/// wide string analysis and Translation Phase #6 (concatenation of string
+/// literals) (C99 5.1.1.2p1).
+class StringLiteralParser {
+ const SourceManager &SM;
+ const LangOptions &Features;
+ const TargetInfo &Target;
+ DiagnosticsEngine *Diags;
+
+ unsigned MaxTokenLength;
+ unsigned SizeBound;
+ unsigned CharByteWidth;
+ tok::TokenKind Kind;
+ SmallString<512> ResultBuf;
+ char *ResultPtr; // cursor
+ SmallString<32> UDSuffixBuf;
+ unsigned UDSuffixToken;
+ unsigned UDSuffixOffset;
+public:
+ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
+ Preprocessor &PP, bool Complain = true);
+ StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
+ const SourceManager &sm, const LangOptions &features,
+ const TargetInfo &target, DiagnosticsEngine *diags = 0)
+ : SM(sm), Features(features), Target(target), Diags(diags),
+ MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
+ ResultPtr(ResultBuf.data()), hadError(false), Pascal(false) {
+ init(StringToks, NumStringToks);
+ }
+
+
+ bool hadError;
+ bool Pascal;
+
+ StringRef GetString() const {
+ return StringRef(ResultBuf.data(), GetStringLength());
+ }
+ unsigned GetStringLength() const { return ResultPtr-ResultBuf.data(); }
+
+ unsigned GetNumStringChars() const {
+ return GetStringLength() / CharByteWidth;
+ }
+ /// getOffsetOfStringByte - This function returns the offset of the
+ /// specified byte of the string data represented by Token. This handles
+ /// advancing over escape sequences in the string.
+ ///
+ /// If the Diagnostics pointer is non-null, then this will do semantic
+ /// checking of the string literal and emit errors and warnings.
+ unsigned getOffsetOfStringByte(const Token &TheTok, unsigned ByteNo) const;
+
+ bool isAscii() const { return Kind == tok::string_literal; }
+ bool isWide() const { return Kind == tok::wide_string_literal; }
+ bool isUTF8() const { return Kind == tok::utf8_string_literal; }
+ bool isUTF16() const { return Kind == tok::utf16_string_literal; }
+ bool isUTF32() const { return Kind == tok::utf32_string_literal; }
+ bool isPascal() const { return Pascal; }
+
+ StringRef getUDSuffix() const { return UDSuffixBuf; }
+
+ /// Get the index of a token containing a ud-suffix.
+ unsigned getUDSuffixToken() const {
+ assert(!UDSuffixBuf.empty() && "no ud-suffix");
+ return UDSuffixToken;
+ }
+ /// Get the spelling offset of the first byte of the ud-suffix.
+ unsigned getUDSuffixOffset() const {
+ assert(!UDSuffixBuf.empty() && "no ud-suffix");
+ return UDSuffixOffset;
+ }
+
+private:
+ void init(const Token *StringToks, unsigned NumStringToks);
+ bool CopyStringFragment(StringRef Fragment);
+ bool DiagnoseBadString(const Token& Tok);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
new file mode 100644
index 0000000..8775d39
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/MacroInfo.h
@@ -0,0 +1,305 @@
+//===--- MacroInfo.h - Information about #defined identifiers ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MacroInfo interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_MACROINFO_H
+#define LLVM_CLANG_MACROINFO_H
+
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+#include <cassert>
+
+namespace clang {
+ class Preprocessor;
+
+/// MacroInfo - Each identifier that is #define'd has an instance of this class
+/// associated with it, used to implement macro expansion.
+class MacroInfo {
+ //===--------------------------------------------------------------------===//
+ // State set when the macro is defined.
+
+ /// Location - This is the place the macro is defined.
+ SourceLocation Location;
+ /// EndLocation - The location of the last token in the macro.
+ SourceLocation EndLocation;
+
+ /// Arguments - The list of arguments for a function-like macro. This can be
+ /// empty, for, e.g. "#define X()". In a C99-style variadic macro, this
+ /// includes the __VA_ARGS__ identifier on the list.
+ IdentifierInfo **ArgumentList;
+ unsigned NumArguments;
+
+ /// \brief The location at which this macro was either explicitly exported
+ /// from its module or marked as private.
+ ///
+ /// If invalid, this macro has not been explicitly given any visibility.
+ SourceLocation VisibilityLocation;
+
+ /// ReplacementTokens - This is the list of tokens that the macro is defined
+ /// to.
+ SmallVector<Token, 8> ReplacementTokens;
+
+ /// \brief Length in characters of the macro definition.
+ mutable unsigned DefinitionLength;
+ mutable bool IsDefinitionLengthCached : 1;
+
+ /// IsFunctionLike - True if this macro is a function-like macro, false if it
+ /// is an object-like macro.
+ bool IsFunctionLike : 1;
+
+ /// IsC99Varargs - True if this macro is of the form "#define X(...)" or
+ /// "#define X(Y,Z,...)". The __VA_ARGS__ token should be replaced with the
+ /// contents of "..." in an invocation.
+ bool IsC99Varargs : 1;
+
+ /// IsGNUVarargs - True if this macro is of the form "#define X(a...)". The
+ /// "a" identifier in the replacement list will be replaced with all arguments
+ /// of the macro starting with the specified one.
+ bool IsGNUVarargs : 1;
+
+ /// IsBuiltinMacro - True if this is a builtin macro, such as __LINE__, and if
+ /// it has not yet been redefined or undefined.
+ bool IsBuiltinMacro : 1;
+
+ /// IsFromAST - True if this macro was loaded from an AST file.
+ bool IsFromAST : 1;
+
+ /// \brief Whether this macro changed after it was loaded from an AST file.
+ bool ChangedAfterLoad : 1;
+
+private:
+ //===--------------------------------------------------------------------===//
+ // State that changes as the macro is used.
+
+ /// IsDisabled - True if we have started an expansion of this macro already.
+ /// This disbles recursive expansion, which would be quite bad for things like
+ /// #define A A.
+ bool IsDisabled : 1;
+
+ /// IsUsed - True if this macro is either defined in the main file and has
+ /// been used, or if it is not defined in the main file. This is used to
+ /// emit -Wunused-macros diagnostics.
+ bool IsUsed : 1;
+
+ /// AllowRedefinitionsWithoutWarning - True if this macro can be redefined
+ /// without emitting a warning.
+ bool IsAllowRedefinitionsWithoutWarning : 1;
+
+ /// \brief Must warn if the macro is unused at the end of translation unit.
+ bool IsWarnIfUnused : 1;
+
+ /// \brief Whether the macro has public (when described in a module).
+ bool IsPublic : 1;
+
+ ~MacroInfo() {
+ assert(ArgumentList == 0 && "Didn't call destroy before dtor!");
+ }
+
+public:
+ MacroInfo(SourceLocation DefLoc);
+ MacroInfo(const MacroInfo &MI, llvm::BumpPtrAllocator &PPAllocator);
+
+ /// FreeArgumentList - Free the argument list of the macro, restoring it to a
+ /// state where it can be reused for other devious purposes.
+ void FreeArgumentList() {
+ ArgumentList = 0;
+ NumArguments = 0;
+ }
+
+ /// Destroy - destroy this MacroInfo object.
+ void Destroy() {
+ FreeArgumentList();
+ this->~MacroInfo();
+ }
+
+ /// getDefinitionLoc - Return the location that the macro was defined at.
+ ///
+ SourceLocation getDefinitionLoc() const { return Location; }
+
+ /// setDefinitionEndLoc - Set the location of the last token in the macro.
+ ///
+ void setDefinitionEndLoc(SourceLocation EndLoc) { EndLocation = EndLoc; }
+ /// getDefinitionEndLoc - Return the location of the last token in the macro.
+ ///
+ SourceLocation getDefinitionEndLoc() const { return EndLocation; }
+
+ /// \brief Get length in characters of the macro definition.
+ unsigned getDefinitionLength(SourceManager &SM) const {
+ if (IsDefinitionLengthCached)
+ return DefinitionLength;
+ return getDefinitionLengthSlow(SM);
+ }
+
+ /// isIdenticalTo - Return true if the specified macro definition is equal to
+ /// this macro in spelling, arguments, and whitespace. This is used to emit
+ /// duplicate definition warnings. This implements the rules in C99 6.10.3.
+ bool isIdenticalTo(const MacroInfo &Other, Preprocessor &PP) const;
+
+ /// setIsBuiltinMacro - Set or clear the isBuiltinMacro flag.
+ ///
+ void setIsBuiltinMacro(bool Val = true) {
+ IsBuiltinMacro = Val;
+ }
+
+ /// setIsUsed - Set the value of the IsUsed flag.
+ ///
+ void setIsUsed(bool Val) {
+ IsUsed = Val;
+ }
+
+ /// setIsAllowRedefinitionsWithoutWarning - Set the value of the
+ /// IsAllowRedefinitionsWithoutWarning flag.
+ void setIsAllowRedefinitionsWithoutWarning(bool Val) {
+ IsAllowRedefinitionsWithoutWarning = Val;
+ }
+
+ /// \brief Set the value of the IsWarnIfUnused flag.
+ void setIsWarnIfUnused(bool val) {
+ IsWarnIfUnused = val;
+ }
+
+ /// setArgumentList - Set the specified list of identifiers as the argument
+ /// list for this macro.
+ void setArgumentList(IdentifierInfo* const *List, unsigned NumArgs,
+ llvm::BumpPtrAllocator &PPAllocator) {
+ assert(ArgumentList == 0 && NumArguments == 0 &&
+ "Argument list already set!");
+ if (NumArgs == 0) return;
+
+ NumArguments = NumArgs;
+ ArgumentList = PPAllocator.Allocate<IdentifierInfo*>(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ArgumentList[i] = List[i];
+ }
+
+ /// Arguments - The list of arguments for a function-like macro. This can be
+ /// empty, for, e.g. "#define X()".
+ typedef IdentifierInfo* const *arg_iterator;
+ bool arg_empty() const { return NumArguments == 0; }
+ arg_iterator arg_begin() const { return ArgumentList; }
+ arg_iterator arg_end() const { return ArgumentList+NumArguments; }
+ unsigned getNumArgs() const { return NumArguments; }
+
+ /// getArgumentNum - Return the argument number of the specified identifier,
+ /// or -1 if the identifier is not a formal argument identifier.
+ int getArgumentNum(IdentifierInfo *Arg) const {
+ for (arg_iterator I = arg_begin(), E = arg_end(); I != E; ++I)
+ if (*I == Arg) return I-arg_begin();
+ return -1;
+ }
+
+ /// Function/Object-likeness. Keep track of whether this macro has formal
+ /// parameters.
+ void setIsFunctionLike() { IsFunctionLike = true; }
+ bool isFunctionLike() const { return IsFunctionLike; }
+ bool isObjectLike() const { return !IsFunctionLike; }
+
+ /// Varargs querying methods. This can only be set for function-like macros.
+ void setIsC99Varargs() { IsC99Varargs = true; }
+ void setIsGNUVarargs() { IsGNUVarargs = true; }
+ bool isC99Varargs() const { return IsC99Varargs; }
+ bool isGNUVarargs() const { return IsGNUVarargs; }
+ bool isVariadic() const { return IsC99Varargs | IsGNUVarargs; }
+
+ /// isBuiltinMacro - Return true if this macro is a builtin macro, such as
+ /// __LINE__, which requires processing before expansion.
+ bool isBuiltinMacro() const { return IsBuiltinMacro; }
+
+ /// isFromAST - Return true if this macro was loaded from an AST file.
+ bool isFromAST() const { return IsFromAST; }
+
+ /// setIsFromAST - Set whether this macro was loaded from an AST file.
+ void setIsFromAST(bool FromAST = true) { IsFromAST = FromAST; }
+
+ /// \brief Determine whether this macro has changed since it was loaded from
+ /// an AST file.
+ bool hasChangedAfterLoad() const { return ChangedAfterLoad; }
+
+ /// \brief Note whether this macro has changed after it was loaded from an
+ /// AST file.
+ void setChangedAfterLoad(bool CAL = true) { ChangedAfterLoad = CAL; }
+
+ /// isUsed - Return false if this macro is defined in the main file and has
+ /// not yet been used.
+ bool isUsed() const { return IsUsed; }
+
+ /// isAllowRedefinitionsWithoutWarning - Return true if this macro can be
+ /// redefined without warning.
+ bool isAllowRedefinitionsWithoutWarning() const {
+ return IsAllowRedefinitionsWithoutWarning;
+ }
+
+ /// \brief Return true if we should emit a warning if the macro is unused.
+ bool isWarnIfUnused() const {
+ return IsWarnIfUnused;
+ }
+
+ /// getNumTokens - Return the number of tokens that this macro expands to.
+ ///
+ unsigned getNumTokens() const {
+ return ReplacementTokens.size();
+ }
+
+ const Token &getReplacementToken(unsigned Tok) const {
+ assert(Tok < ReplacementTokens.size() && "Invalid token #");
+ return ReplacementTokens[Tok];
+ }
+
+ typedef SmallVector<Token, 8>::const_iterator tokens_iterator;
+ tokens_iterator tokens_begin() const { return ReplacementTokens.begin(); }
+ tokens_iterator tokens_end() const { return ReplacementTokens.end(); }
+ bool tokens_empty() const { return ReplacementTokens.empty(); }
+
+ /// AddTokenToBody - Add the specified token to the replacement text for the
+ /// macro.
+ void AddTokenToBody(const Token &Tok) {
+ assert(!IsDefinitionLengthCached &&
+ "Changing replacement tokens after definition length got calculated");
+ ReplacementTokens.push_back(Tok);
+ }
+
+ /// isEnabled - Return true if this macro is enabled: in other words, that we
+ /// are not currently in an expansion of this macro.
+ bool isEnabled() const { return !IsDisabled; }
+
+ void EnableMacro() {
+ assert(IsDisabled && "Cannot enable an already-enabled macro!");
+ IsDisabled = false;
+ }
+
+ void DisableMacro() {
+ assert(!IsDisabled && "Cannot disable an already-disabled macro!");
+ IsDisabled = true;
+ }
+
+ /// \brief Set the export location for this macro.
+ void setVisibility(bool Public, SourceLocation Loc) {
+ VisibilityLocation = Loc;
+ IsPublic = Public;
+ }
+
+ /// \brief Determine whether this macro is part of the public API of its
+ /// module.
+ bool isPublic() const { return IsPublic; }
+
+ /// \brief Determine the location where this macro was explicitly made
+ /// public or private within its module.
+ SourceLocation getVisibilityLocation() { return VisibilityLocation; }
+
+private:
+ unsigned getDefinitionLengthSlow(SourceManager &SM) const;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h b/contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h
new file mode 100644
index 0000000..36d03c0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ModuleLoader.h
@@ -0,0 +1,65 @@
+//===--- ModuleLoader.h - Module Loader Interface ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleLoader interface, which is responsible for
+// loading named modules.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LEX_MODULE_LOADER_H
+#define LLVM_CLANG_LEX_MODULE_LOADER_H
+
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/ArrayRef.h"
+
+namespace clang {
+
+class IdentifierInfo;
+
+/// \brief A sequence of identifier/location pairs used to describe a particular
+/// module or submodule, e.g., std.vector.
+typedef llvm::ArrayRef<std::pair<IdentifierInfo*, SourceLocation> >
+ ModuleIdPath;
+
+/// \brief Abstract interface for a module loader.
+///
+/// This abstract interface describes a module loader, which is responsible
+/// for resolving a module name (e.g., "std") to an actual module file, and
+/// then loading that module.
+class ModuleLoader {
+public:
+ virtual ~ModuleLoader();
+
+ /// \brief Attempt to load the given module.
+ ///
+ /// This routine attempts to load the module described by the given
+ /// parameters.
+ ///
+ /// \param ImportLoc The location of the 'import' keyword.
+ ///
+ /// \param Path The identifiers (and their locations) of the module
+ /// "path", e.g., "std.vector" would be split into "std" and "vector".
+ ///
+ /// \param Visibility The visibility provided for the names in the loaded
+ /// module.
+ ///
+ /// \param IsInclusionDirective Indicates that this module is being loaded
+ /// implicitly, due to the presence of an inclusion directive. Otherwise,
+ /// it is being loaded due to an import declaration.
+ ///
+ /// \returns If successful, returns the loaded module. Otherwise, returns
+ /// NULL to indicate that the module could not be loaded.
+ virtual Module *loadModule(SourceLocation ImportLoc, ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective) = 0;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
new file mode 100644
index 0000000..4ebb1d4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ModuleMap.h
@@ -0,0 +1,237 @@
+//===--- ModuleMap.h - Describe the layout of modules -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleMap interface, which describes the layout of a
+// module as it relates to headers.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CLANG_LEX_MODULEMAP_H
+#define LLVM_CLANG_LEX_MODULEMAP_H
+
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringMap.h"
+#include <string>
+
+namespace clang {
+
+class DirectoryEntry;
+class FileEntry;
+class FileManager;
+class DiagnosticConsumer;
+class DiagnosticsEngine;
+class ModuleMapParser;
+
+class ModuleMap {
+ SourceManager *SourceMgr;
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags;
+ const LangOptions &LangOpts;
+ const TargetInfo *Target;
+
+ /// \brief The directory used for Clang-supplied, builtin include headers,
+ /// such as "stdint.h".
+ const DirectoryEntry *BuiltinIncludeDir;
+
+ /// \brief Language options used to parse the module map itself.
+ ///
+ /// These are always simple C language options.
+ LangOptions MMapLangOpts;
+
+ /// \brief The top-level modules that are known.
+ llvm::StringMap<Module *> Modules;
+
+ /// \brief Mapping from each header to the module that owns the contents of the
+ /// that header.
+ llvm::DenseMap<const FileEntry *, Module *> Headers;
+
+ /// \brief Mapping from directories with umbrella headers to the module
+ /// that is generated from the umbrella header.
+ ///
+ /// This mapping is used to map headers that haven't explicitly been named
+ /// in the module map over to the module that includes them via its umbrella
+ /// header.
+ llvm::DenseMap<const DirectoryEntry *, Module *> UmbrellaDirs;
+
+ friend class ModuleMapParser;
+
+ /// \brief Resolve the given export declaration into an actual export
+ /// declaration.
+ ///
+ /// \param Mod The module in which we're resolving the export declaration.
+ ///
+ /// \param Unresolved The export declaration to resolve.
+ ///
+ /// \param Complain Whether this routine should complain about unresolvable
+ /// exports.
+ ///
+ /// \returns The resolved export declaration, which will have a NULL pointer
+ /// if the export could not be resolved.
+ Module::ExportDecl
+ resolveExport(Module *Mod, const Module::UnresolvedExportDecl &Unresolved,
+ bool Complain);
+
+public:
+ /// \brief Construct a new module map.
+ ///
+ /// \param FileMgr The file manager used to find module files and headers.
+ /// This file manager should be shared with the header-search mechanism, since
+ /// they will refer to the same headers.
+ ///
+ /// \param DC A diagnostic consumer that will be cloned for use in generating
+ /// diagnostics.
+ ///
+ /// \param LangOpts Language options for this translation unit.
+ ///
+ /// \param Target The target for this translation unit.
+ ModuleMap(FileManager &FileMgr, const DiagnosticConsumer &DC,
+ const LangOptions &LangOpts, const TargetInfo *Target);
+
+ /// \brief Destroy the module map.
+ ///
+ ~ModuleMap();
+
+ /// \brief Set the target information.
+ void setTarget(const TargetInfo &Target);
+
+ /// \brief Set the directory that contains Clang-supplied include
+ /// files, such as our stdarg.h or tgmath.h.
+ void setBuiltinIncludeDir(const DirectoryEntry *Dir) {
+ BuiltinIncludeDir = Dir;
+ }
+
+ /// \brief Retrieve the module that owns the given header file, if any.
+ ///
+ /// \param File The header file that is likely to be included.
+ ///
+ /// \returns The module that owns the given header file, or null to indicate
+ /// that no module owns this header file.
+ Module *findModuleForHeader(const FileEntry *File);
+
+ /// \brief Determine whether the given header is part of a module
+ /// marked 'unavailable'.
+ bool isHeaderInUnavailableModule(const FileEntry *Header);
+
+ /// \brief Retrieve a module with the given name.
+ ///
+ /// \param The name of the module to look up.
+ ///
+ /// \returns The named module, if known; otherwise, returns null.
+ Module *findModule(StringRef Name);
+
+ /// \brief Retrieve a module with the given name using lexical name lookup,
+ /// starting at the given context.
+ ///
+ /// \param The name of the module to look up.
+ ///
+ /// \param Context The module context, from which we will perform lexical
+ /// name lookup.
+ ///
+ /// \returns The named module, if known; otherwise, returns null.
+ Module *lookupModuleUnqualified(StringRef Name, Module *Context);
+
+ /// \brief Retrieve a module with the given name within the given context,
+ /// using direct (qualified) name lookup.
+ ///
+ /// \param The name of the module to look up.
+ ///
+ /// \param Context The module for which we will look for a submodule. If
+ /// null, we will look for a top-level module.
+ ///
+ /// \returns The named submodule, if known; otherwose, returns null.
+ Module *lookupModuleQualified(StringRef Name, Module *Context);
+
+ /// \brief Find a new module or submodule, or create it if it does not already
+ /// exist.
+ ///
+ /// \param Name The name of the module to find or create.
+ ///
+ /// \param Parent The module that will act as the parent of this submodule,
+ /// or NULL to indicate that this is a top-level module.
+ ///
+ /// \param IsFramework Whether this is a framework module.
+ ///
+ /// \param IsExplicit Whether this is an explicit submodule.
+ ///
+ /// \returns The found or newly-created module, along with a boolean value
+ /// that will be true if the module is newly-created.
+ std::pair<Module *, bool> findOrCreateModule(StringRef Name, Module *Parent,
+ bool IsFramework,
+ bool IsExplicit);
+
+ /// \brief Infer the contents of a framework module map from the given
+ /// framework directory.
+ Module *inferFrameworkModule(StringRef ModuleName,
+ const DirectoryEntry *FrameworkDir,
+ bool IsSystem, Module *Parent);
+
+ /// \brief Retrieve the module map file containing the definition of the given
+ /// module.
+ ///
+ /// \param Module The module whose module map file will be returned, if known.
+ ///
+ /// \returns The file entry for the module map file containing the given
+ /// module, or NULL if the module definition was inferred.
+ const FileEntry *getContainingModuleMapFile(Module *Module);
+
+ /// \brief Resolve all of the unresolved exports in the given module.
+ ///
+ /// \param Mod The module whose exports should be resolved.
+ ///
+ /// \param Complain Whether to emit diagnostics for failures.
+ ///
+ /// \returns true if any errors were encountered while resolving exports,
+ /// false otherwise.
+ bool resolveExports(Module *Mod, bool Complain);
+
+ /// \brief Infers the (sub)module based on the given source location and
+ /// source manager.
+ ///
+ /// \param Loc The location within the source that we are querying, along
+ /// with its source manager.
+ ///
+ /// \returns The module that owns this source location, or null if no
+ /// module owns this source location.
+ Module *inferModuleFromLocation(FullSourceLoc Loc);
+
+ /// \brief Sets the umbrella header of the given module to the given
+ /// header.
+ void setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader);
+
+ /// \brief Sets the umbrella directory of the given module to the given
+ /// directory.
+ void setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir);
+
+ /// \brief Adds this header to the given module.
+ void addHeader(Module *Mod, const FileEntry *Header);
+
+ /// \brief Parse the given module map file, and record any modules we
+ /// encounter.
+ ///
+ /// \param File The file to be parsed.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool parseModuleMapFile(const FileEntry *File);
+
+ /// \brief Dump the contents of the module map, for debugging purposes.
+ void dump();
+
+ typedef llvm::StringMap<Module *>::const_iterator module_iterator;
+ module_iterator module_begin() const { return Modules.begin(); }
+ module_iterator module_end() const { return Modules.end(); }
+};
+
+}
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h b/contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h
new file mode 100644
index 0000000..95b00df
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/MultipleIncludeOpt.h
@@ -0,0 +1,130 @@
+//===--- MultipleIncludeOpt.h - Header Multiple-Include Optzn ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MultipleIncludeOpt interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_MULTIPLEINCLUDEOPT_H
+#define LLVM_CLANG_MULTIPLEINCLUDEOPT_H
+
+namespace clang {
+class IdentifierInfo;
+
+/// MultipleIncludeOpt - This class implements the simple state machine that the
+/// Lexer class uses to detect files subject to the 'multiple-include'
+/// optimization. The public methods in this class are triggered by various
+/// events that occur when a file is lexed, and after the entire file is lexed,
+/// information about which macro (if any) controls the header is returned.
+class MultipleIncludeOpt {
+ /// ReadAnyTokens - This is set to false when a file is first opened and true
+ /// any time a token is returned to the client or a (non-multiple-include)
+ /// directive is parsed. When the final #endif is parsed this is reset back
+ /// to false, that way any tokens before the first #ifdef or after the last
+ /// #endif can be easily detected.
+ bool ReadAnyTokens;
+
+ /// ReadAnyTokens - This is set to false when a file is first opened and true
+ /// any time a token is returned to the client or a (non-multiple-include)
+ /// directive is parsed. When the final #endif is parsed this is reset back
+ /// to false, that way any tokens before the first #ifdef or after the last
+ /// #endif can be easily detected.
+ bool DidMacroExpansion;
+
+ /// TheMacro - The controlling macro for a file, if valid.
+ ///
+ const IdentifierInfo *TheMacro;
+public:
+ MultipleIncludeOpt() {
+ ReadAnyTokens = false;
+ DidMacroExpansion = false;
+ TheMacro = 0;
+ }
+
+ /// Invalidate - Permanently mark this file as not being suitable for the
+ /// include-file optimization.
+ void Invalidate() {
+ // If we have read tokens but have no controlling macro, the state-machine
+ // below can never "accept".
+ ReadAnyTokens = true;
+ TheMacro = 0;
+ }
+
+ /// getHasReadAnyTokensVal - This is used for the #ifndef hande-shake at the
+ /// top of the file when reading preprocessor directives. Otherwise, reading
+ /// the "ifndef x" would count as reading tokens.
+ bool getHasReadAnyTokensVal() const { return ReadAnyTokens; }
+
+ // If a token is read, remember that we have seen a side-effect in this file.
+ void ReadToken() { ReadAnyTokens = true; }
+
+ /// ExpandedMacro - When a macro is expanded with this lexer as the current
+ /// buffer, this method is called to disable the MIOpt if needed.
+ void ExpandedMacro() { DidMacroExpansion = true; }
+
+ /// EnterTopLevelIFNDEF - When entering a top-level #ifndef directive (or the
+ /// "#if !defined" equivalent) without any preceding tokens, this method is
+ /// called.
+ ///
+ /// Note, we don't care about the input value of 'ReadAnyTokens'. The caller
+ /// ensures that this is only called if there are no tokens read before the
+ /// #ifndef. The caller is required to do this, because reading the #if line
+ /// obviously reads in in tokens.
+ void EnterTopLevelIFNDEF(const IdentifierInfo *M) {
+ // If the macro is already set, this is after the top-level #endif.
+ if (TheMacro)
+ return Invalidate();
+
+ // If we have already expanded a macro by the end of the #ifndef line, then
+ // there is a macro expansion *in* the #ifndef line. This means that the
+ // condition could evaluate differently when subsequently #included. Reject
+ // this.
+ if (DidMacroExpansion)
+ return Invalidate();
+
+ // Remember that we're in the #if and that we have the macro.
+ ReadAnyTokens = true;
+ TheMacro = M;
+ }
+
+ /// EnterTopLevelConditional - This is invoked when a top level conditional
+ /// (except #ifndef) is found.
+ void EnterTopLevelConditional() {
+ /// If a conditional directive (except #ifndef) is found at the top level,
+ /// there is a chunk of the file not guarded by the controlling macro.
+ Invalidate();
+ }
+
+ /// ExitTopLevelConditional - This method is called when the lexer exits the
+ /// top-level conditional.
+ void ExitTopLevelConditional() {
+ // If we have a macro, that means the top of the file was ok. Set our state
+ // back to "not having read any tokens" so we can detect anything after the
+ // #endif.
+ if (!TheMacro) return Invalidate();
+
+ // At this point, we haven't "read any tokens" but we do have a controlling
+ // macro.
+ ReadAnyTokens = false;
+ }
+
+ /// GetControllingMacroAtEndOfFile - Once the entire file has been lexed, if
+ /// there is a controlling macro, return it.
+ const IdentifierInfo *GetControllingMacroAtEndOfFile() const {
+ // If we haven't read any tokens after the #endif, return the controlling
+ // macro if it's valid (if it isn't, it will be null).
+ if (!ReadAnyTokens)
+ return TheMacro;
+ return 0;
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
new file mode 100644
index 0000000..33558c8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PPCallbacks.h
@@ -0,0 +1,385 @@
+//===--- PPCallbacks.h - Callbacks for Preprocessor actions -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PPCallbacks interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LEX_PPCALLBACKS_H
+#define LLVM_CLANG_LEX_PPCALLBACKS_H
+
+#include "clang/Lex/DirectoryLookup.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/DiagnosticIDs.h"
+#include "llvm/ADT/StringRef.h"
+#include <string>
+
+namespace clang {
+ class SourceLocation;
+ class Token;
+ class IdentifierInfo;
+ class MacroInfo;
+
+/// PPCallbacks - This interface provides a way to observe the actions of the
+/// preprocessor as it does its thing. Clients can define their hooks here to
+/// implement preprocessor level tools.
+class PPCallbacks {
+public:
+ virtual ~PPCallbacks();
+
+ enum FileChangeReason {
+ EnterFile, ExitFile, SystemHeaderPragma, RenameFile
+ };
+
+ /// FileChanged - This callback is invoked whenever a source file is
+ /// entered or exited. The SourceLocation indicates the new location, and
+ /// EnteringFile indicates whether this is because we are entering a new
+ /// #include'd file (when true) or whether we're exiting one because we ran
+ /// off the end (when false).
+ ///
+ /// \param PrevFID the file that was exited if \arg Reason is ExitFile.
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID = FileID()) {
+ }
+
+ /// FileSkipped - This callback is invoked whenever a source file is
+ /// skipped as the result of header guard optimization. ParentFile
+ /// is the file that #includes the skipped file. FilenameTok is the
+ /// token in ParentFile that indicates the skipped file.
+ virtual void FileSkipped(const FileEntry &ParentFile,
+ const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) {
+ }
+
+ /// FileNotFound - This callback is invoked whenever an inclusion directive
+ /// results in a file-not-found error.
+ ///
+ /// \param FileName The name of the file being included, as written in the
+ /// source code.
+ ///
+ /// \param RecoveryPath If this client indicates that it can recover from
+ /// this missing file, the client should set this as an additional header
+ /// search patch.
+ ///
+ /// \returns true to indicate that the preprocessor should attempt to recover
+ /// by adding \p RecoveryPath as a header search path.
+ virtual bool FileNotFound(StringRef FileName,
+ SmallVectorImpl<char> &RecoveryPath) {
+ return false;
+ }
+
+ /// \brief This callback is invoked whenever an inclusion directive of
+ /// any kind (\c #include, \c #import, etc.) has been processed, regardless
+ /// of whether the inclusion will actually result in an inclusion.
+ ///
+ /// \param HashLoc The location of the '#' that starts the inclusion
+ /// directive.
+ ///
+ /// \param IncludeTok The token that indicates the kind of inclusion
+ /// directive, e.g., 'include' or 'import'.
+ ///
+ /// \param FileName The name of the file being included, as written in the
+ /// source code.
+ ///
+ /// \param IsAngled Whether the file name was enclosed in angle brackets;
+ /// otherwise, it was enclosed in quotes.
+ ///
+ /// \param File The actual file that may be included by this inclusion
+ /// directive.
+ ///
+ /// \param EndLoc The location of the last token within the inclusion
+ /// directive.
+ ///
+ /// \param SearchPath Contains the search path which was used to find the file
+ /// in the file system. If the file was found via an absolute include path,
+ /// SearchPath will be empty. For framework includes, the SearchPath and
+ /// RelativePath will be split up. For example, if an include of "Some/Some.h"
+ /// is found via the framework path
+ /// "path/to/Frameworks/Some.framework/Headers/Some.h", SearchPath will be
+ /// "path/to/Frameworks/Some.framework/Headers" and RelativePath will be
+ /// "Some.h".
+ ///
+ /// \param RelativePath The path relative to SearchPath, at which the include
+ /// file was found. This is equal to FileName except for framework includes.
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath) {
+ }
+
+ /// EndOfMainFile - This callback is invoked when the end of the main file is
+ /// reach, no subsequent callbacks will be made.
+ virtual void EndOfMainFile() {
+ }
+
+ /// Ident - This callback is invoked when a #ident or #sccs directive is read.
+ /// \param Loc The location of the directive.
+ /// \param str The text of the directive.
+ ///
+ virtual void Ident(SourceLocation Loc, const std::string &str) {
+ }
+
+ /// PragmaComment - This callback is invoked when a #pragma comment directive
+ /// is read.
+ ///
+ virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind,
+ const std::string &Str) {
+ }
+
+ /// PragmaMessage - This callback is invoked when a #pragma message directive
+ /// is read.
+ /// \param Loc The location of the message directive.
+ /// \param str The text of the message directive.
+ ///
+ virtual void PragmaMessage(SourceLocation Loc, StringRef Str) {
+ }
+
+ /// PragmaDiagnosticPush - This callback is invoked when a
+ /// #pragma gcc dianostic push directive is read.
+ virtual void PragmaDiagnosticPush(SourceLocation Loc,
+ StringRef Namespace) {
+ }
+
+ /// PragmaDiagnosticPop - This callback is invoked when a
+ /// #pragma gcc dianostic pop directive is read.
+ virtual void PragmaDiagnosticPop(SourceLocation Loc,
+ StringRef Namespace) {
+ }
+
+ /// PragmaDiagnostic - This callback is invoked when a
+ /// #pragma gcc dianostic directive is read.
+ virtual void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
+ diag::Mapping mapping, StringRef Str) {
+ }
+
+ /// MacroExpands - This is called by
+ /// Preprocessor::HandleMacroExpandedIdentifier when a macro invocation is
+ /// found.
+ virtual void MacroExpands(const Token &MacroNameTok, const MacroInfo* MI,
+ SourceRange Range) {
+ }
+
+ /// MacroDefined - This hook is called whenever a macro definition is seen.
+ virtual void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ }
+
+ /// MacroUndefined - This hook is called whenever a macro #undef is seen.
+ /// MI is released immediately following this callback.
+ virtual void MacroUndefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ }
+
+ /// Defined - This hook is called whenever the 'defined' operator is seen.
+ virtual void Defined(const Token &MacroNameTok) {
+ }
+
+ /// SourceRangeSkipped - This hook is called when a source range is skipped.
+ /// \param Range The SourceRange that was skipped. The range begins at the
+ /// #if/#else directive and ends after the #endif/#else directive.
+ virtual void SourceRangeSkipped(SourceRange Range) {
+ }
+
+ /// If -- This hook is called whenever an #if is seen.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ virtual void If(SourceLocation Loc, SourceRange ConditionRange) {
+ }
+
+ /// Elif -- This hook is called whenever an #elif is seen.
+ /// \param Loc the source location of the directive.
+ /// \param ConditionRange The SourceRange of the expression being tested.
+ /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
+ // FIXME: better to pass in a list (or tree!) of Tokens.
+ virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) {
+ }
+
+ /// Ifdef -- This hook is called whenever an #ifdef is seen.
+ /// \param Loc the source location of the directive.
+ /// \param II Information on the token being tested.
+ virtual void Ifdef(SourceLocation Loc, const Token &MacroNameTok) {
+ }
+
+ /// Ifndef -- This hook is called whenever an #ifndef is seen.
+ /// \param Loc the source location of the directive.
+ /// \param II Information on the token being tested.
+ virtual void Ifndef(SourceLocation Loc, const Token &MacroNameTok) {
+ }
+
+ /// Else -- This hook is called whenever an #else is seen.
+ /// \param Loc the source location of the directive.
+ /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
+ virtual void Else(SourceLocation Loc, SourceLocation IfLoc) {
+ }
+
+ /// Endif -- This hook is called whenever an #endif is seen.
+ /// \param Loc the source location of the directive.
+ /// \param IfLoc the source location of the #if/#ifdef/#ifndef directive.
+ virtual void Endif(SourceLocation Loc, SourceLocation IfLoc) {
+ }
+};
+
+/// PPChainedCallbacks - Simple wrapper class for chaining callbacks.
+class PPChainedCallbacks : public PPCallbacks {
+ virtual void anchor();
+ PPCallbacks *First, *Second;
+
+public:
+ PPChainedCallbacks(PPCallbacks *_First, PPCallbacks *_Second)
+ : First(_First), Second(_Second) {}
+ ~PPChainedCallbacks() {
+ delete Second;
+ delete First;
+ }
+
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) {
+ First->FileChanged(Loc, Reason, FileType, PrevFID);
+ Second->FileChanged(Loc, Reason, FileType, PrevFID);
+ }
+
+ virtual void FileSkipped(const FileEntry &ParentFile,
+ const Token &FilenameTok,
+ SrcMgr::CharacteristicKind FileType) {
+ First->FileSkipped(ParentFile, FilenameTok, FileType);
+ Second->FileSkipped(ParentFile, FilenameTok, FileType);
+ }
+
+ virtual bool FileNotFound(StringRef FileName,
+ SmallVectorImpl<char> &RecoveryPath) {
+ return First->FileNotFound(FileName, RecoveryPath) ||
+ Second->FileNotFound(FileName, RecoveryPath);
+ }
+
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath) {
+ First->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled, File,
+ EndLoc, SearchPath, RelativePath);
+ Second->InclusionDirective(HashLoc, IncludeTok, FileName, IsAngled, File,
+ EndLoc, SearchPath, RelativePath);
+ }
+
+ virtual void EndOfMainFile() {
+ First->EndOfMainFile();
+ Second->EndOfMainFile();
+ }
+
+ virtual void Ident(SourceLocation Loc, const std::string &str) {
+ First->Ident(Loc, str);
+ Second->Ident(Loc, str);
+ }
+
+ virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind,
+ const std::string &Str) {
+ First->PragmaComment(Loc, Kind, Str);
+ Second->PragmaComment(Loc, Kind, Str);
+ }
+
+ virtual void PragmaMessage(SourceLocation Loc, StringRef Str) {
+ First->PragmaMessage(Loc, Str);
+ Second->PragmaMessage(Loc, Str);
+ }
+
+ virtual void PragmaDiagnosticPush(SourceLocation Loc,
+ StringRef Namespace) {
+ First->PragmaDiagnosticPush(Loc, Namespace);
+ Second->PragmaDiagnosticPush(Loc, Namespace);
+ }
+
+ virtual void PragmaDiagnosticPop(SourceLocation Loc,
+ StringRef Namespace) {
+ First->PragmaDiagnosticPop(Loc, Namespace);
+ Second->PragmaDiagnosticPop(Loc, Namespace);
+ }
+
+ virtual void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
+ diag::Mapping mapping, StringRef Str) {
+ First->PragmaDiagnostic(Loc, Namespace, mapping, Str);
+ Second->PragmaDiagnostic(Loc, Namespace, mapping, Str);
+ }
+
+ virtual void MacroExpands(const Token &MacroNameTok, const MacroInfo* MI,
+ SourceRange Range) {
+ First->MacroExpands(MacroNameTok, MI, Range);
+ Second->MacroExpands(MacroNameTok, MI, Range);
+ }
+
+ virtual void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ First->MacroDefined(MacroNameTok, MI);
+ Second->MacroDefined(MacroNameTok, MI);
+ }
+
+ virtual void MacroUndefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ First->MacroUndefined(MacroNameTok, MI);
+ Second->MacroUndefined(MacroNameTok, MI);
+ }
+
+ virtual void Defined(const Token &MacroNameTok) {
+ First->Defined(MacroNameTok);
+ Second->Defined(MacroNameTok);
+ }
+
+ virtual void SourceRangeSkipped(SourceRange Range) {
+ First->SourceRangeSkipped(Range);
+ Second->SourceRangeSkipped(Range);
+ }
+
+ /// If -- This hook is called whenever an #if is seen.
+ virtual void If(SourceLocation Loc, SourceRange ConditionRange) {
+ First->If(Loc, ConditionRange);
+ Second->If(Loc, ConditionRange);
+ }
+
+ /// Elif -- This hook is called whenever an #if is seen.
+ virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) {
+ First->Elif(Loc, ConditionRange, IfLoc);
+ Second->Elif(Loc, ConditionRange, IfLoc);
+ }
+
+ /// Ifdef -- This hook is called whenever an #ifdef is seen.
+ virtual void Ifdef(SourceLocation Loc, const Token &MacroNameTok) {
+ First->Ifdef(Loc, MacroNameTok);
+ Second->Ifdef(Loc, MacroNameTok);
+ }
+
+ /// Ifndef -- This hook is called whenever an #ifndef is seen.
+ virtual void Ifndef(SourceLocation Loc, const Token &MacroNameTok) {
+ First->Ifndef(Loc, MacroNameTok);
+ Second->Ifndef(Loc, MacroNameTok);
+ }
+
+ /// Else -- This hook is called whenever an #else is seen.
+ virtual void Else(SourceLocation Loc, SourceLocation IfLoc) {
+ First->Else(Loc, IfLoc);
+ Second->Else(Loc, IfLoc);
+ }
+
+ /// Endif -- This hook is called whenever an #endif is seen.
+ virtual void Endif(SourceLocation Loc, SourceLocation IfLoc) {
+ First->Endif(Loc, IfLoc);
+ Second->Endif(Loc, IfLoc);
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h
new file mode 100644
index 0000000..f6a97a0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PTHLexer.h
@@ -0,0 +1,105 @@
+//===--- PTHLexer.h - Lexer based on Pre-tokenized input --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PTHLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PTHLEXER_H
+#define LLVM_CLANG_PTHLEXER_H
+
+#include "clang/Lex/PreprocessorLexer.h"
+
+namespace clang {
+
+class PTHManager;
+class PTHSpellingSearch;
+
+class PTHLexer : public PreprocessorLexer {
+ SourceLocation FileStartLoc;
+
+ /// TokBuf - Buffer from PTH file containing raw token data.
+ const unsigned char* TokBuf;
+
+ /// CurPtr - Pointer into current offset of the token buffer where
+ /// the next token will be read.
+ const unsigned char* CurPtr;
+
+ /// LastHashTokPtr - Pointer into TokBuf of the last processed '#'
+ /// token that appears at the start of a line.
+ const unsigned char* LastHashTokPtr;
+
+ /// PPCond - Pointer to a side table in the PTH file that provides a
+ /// a consise summary of the preproccessor conditional block structure.
+ /// This is used to perform quick skipping of conditional blocks.
+ const unsigned char* PPCond;
+
+ /// CurPPCondPtr - Pointer inside PPCond that refers to the next entry
+ /// to process when doing quick skipping of preprocessor blocks.
+ const unsigned char* CurPPCondPtr;
+
+ PTHLexer(const PTHLexer&); // DO NOT IMPLEMENT
+ void operator=(const PTHLexer&); // DO NOT IMPLEMENT
+
+ /// ReadToken - Used by PTHLexer to read tokens TokBuf.
+ void ReadToken(Token& T);
+
+ bool LexEndOfFile(Token &Result);
+
+ /// PTHMgr - The PTHManager object that created this PTHLexer.
+ PTHManager& PTHMgr;
+
+ Token EofToken;
+
+protected:
+ friend class PTHManager;
+
+ /// Create a PTHLexer for the specified token stream.
+ PTHLexer(Preprocessor& pp, FileID FID, const unsigned char *D,
+ const unsigned char* ppcond, PTHManager &PM);
+public:
+
+ ~PTHLexer() {}
+
+ /// Lex - Return the next token.
+ void Lex(Token &Tok);
+
+ void getEOF(Token &Tok);
+
+ /// DiscardToEndOfLine - Read the rest of the current preprocessor line as an
+ /// uninterpreted string. This switches the lexer out of directive mode.
+ void DiscardToEndOfLine();
+
+ /// isNextPPTokenLParen - Return 1 if the next unexpanded token will return a
+ /// tok::l_paren token, 0 if it is something else and 2 if there are no more
+ /// tokens controlled by this lexer.
+ unsigned isNextPPTokenLParen() {
+ // isNextPPTokenLParen is not on the hot path, and all we care about is
+ // whether or not we are at a token with kind tok::eof or tok::l_paren.
+ // Just read the first byte from the current token pointer to determine
+ // its kind.
+ tok::TokenKind x = (tok::TokenKind)*CurPtr;
+ return x == tok::eof ? 2 : x == tok::l_paren;
+ }
+
+ /// IndirectLex - An indirect call to 'Lex' that can be invoked via
+ /// the PreprocessorLexer interface.
+ void IndirectLex(Token &Result) { Lex(Result); }
+
+ /// getSourceLocation - Return a source location for the token in
+ /// the current file.
+ SourceLocation getSourceLocation();
+
+ /// SkipBlock - Used by Preprocessor to skip the current conditional block.
+ bool SkipBlock();
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h b/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
new file mode 100644
index 0000000..25a4903
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PTHManager.h
@@ -0,0 +1,140 @@
+//===--- PTHManager.h - Manager object for PTH processing -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PTHManager interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PTHMANAGER_H
+#define LLVM_CLANG_PTHMANAGER_H
+
+#include "clang/Lex/PTHLexer.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Allocator.h"
+#include <string>
+
+namespace llvm {
+ class MemoryBuffer;
+}
+
+namespace clang {
+
+class FileEntry;
+class PTHLexer;
+class DiagnosticsEngine;
+class FileSystemStatCache;
+
+class PTHManager : public IdentifierInfoLookup {
+ friend class PTHLexer;
+
+ /// The memory mapped PTH file.
+ const llvm::MemoryBuffer* Buf;
+
+ /// Alloc - Allocator used for IdentifierInfo objects.
+ llvm::BumpPtrAllocator Alloc;
+
+ /// IdMap - A lazily generated cache mapping from persistent identifiers to
+ /// IdentifierInfo*.
+ IdentifierInfo** PerIDCache;
+
+ /// FileLookup - Abstract data structure used for mapping between files
+ /// and token data in the PTH file.
+ void* FileLookup;
+
+ /// IdDataTable - Array representing the mapping from persistent IDs to the
+ /// data offset within the PTH file containing the information to
+ /// reconsitute an IdentifierInfo.
+ const unsigned char* const IdDataTable;
+
+ /// SortedIdTable - Abstract data structure mapping from strings to
+ /// persistent IDs. This is used by get().
+ void* StringIdLookup;
+
+ /// NumIds - The number of identifiers in the PTH file.
+ const unsigned NumIds;
+
+ /// PP - The Preprocessor object that will use this PTHManager to create
+ /// PTHLexer objects.
+ Preprocessor* PP;
+
+ /// SpellingBase - The base offset within the PTH memory buffer that
+ /// contains the cached spellings for literals.
+ const unsigned char* const SpellingBase;
+
+ /// OriginalSourceFile - A null-terminated C-string that specifies the name
+ /// if the file (if any) that was to used to generate the PTH cache.
+ const char* OriginalSourceFile;
+
+ /// This constructor is intended to only be called by the static 'Create'
+ /// method.
+ PTHManager(const llvm::MemoryBuffer* buf, void* fileLookup,
+ const unsigned char* idDataTable, IdentifierInfo** perIDCache,
+ void* stringIdLookup, unsigned numIds,
+ const unsigned char* spellingBase, const char *originalSourceFile);
+
+ // Do not implement.
+ PTHManager();
+ void operator=(const PTHManager&);
+
+ /// getSpellingAtPTHOffset - Used by PTHLexer classes to get the cached
+ /// spelling for a token.
+ unsigned getSpellingAtPTHOffset(unsigned PTHOffset, const char*& Buffer);
+
+ /// GetIdentifierInfo - Used to reconstruct IdentifierInfo objects from the
+ /// PTH file.
+ inline IdentifierInfo* GetIdentifierInfo(unsigned PersistentID) {
+ // Check if the IdentifierInfo has already been resolved.
+ if (IdentifierInfo* II = PerIDCache[PersistentID])
+ return II;
+ return LazilyCreateIdentifierInfo(PersistentID);
+ }
+ IdentifierInfo* LazilyCreateIdentifierInfo(unsigned PersistentID);
+
+public:
+ // The current PTH version.
+ enum { Version = 9 };
+
+ ~PTHManager();
+
+ /// getOriginalSourceFile - Return the full path to the original header
+ /// file name that was used to generate the PTH cache.
+ const char* getOriginalSourceFile() const {
+ return OriginalSourceFile;
+ }
+
+ /// get - Return the identifier token info for the specified named identifier.
+ /// Unlike the version in IdentifierTable, this returns a pointer instead
+ /// of a reference. If the pointer is NULL then the IdentifierInfo cannot
+ /// be found.
+ IdentifierInfo *get(StringRef Name);
+
+ /// Create - This method creates PTHManager objects. The 'file' argument
+ /// is the name of the PTH file. This method returns NULL upon failure.
+ static PTHManager *Create(const std::string& file, DiagnosticsEngine &Diags);
+
+ void setPreprocessor(Preprocessor *pp) { PP = pp; }
+
+ /// CreateLexer - Return a PTHLexer that "lexes" the cached tokens for the
+ /// specified file. This method returns NULL if no cached tokens exist.
+ /// It is the responsibility of the caller to 'delete' the returned object.
+ PTHLexer *CreateLexer(FileID FID);
+
+ /// createStatCache - Returns a FileSystemStatCache object for use with
+ /// FileManager objects. These objects use the PTH data to speed up
+ /// calls to stat by memoizing their results from when the PTH file
+ /// was generated.
+ FileSystemStatCache *createStatCache();
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h b/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
new file mode 100644
index 0000000..4868811
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Pragma.h
@@ -0,0 +1,126 @@
+//===--- Pragma.h - Pragma registration and handling ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PragmaHandler and PragmaTable interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PRAGMA_H
+#define LLVM_CLANG_PRAGMA_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include <cassert>
+
+namespace clang {
+ class Preprocessor;
+ class Token;
+ class IdentifierInfo;
+ class PragmaNamespace;
+
+ /**
+ * \brief Describes how the pragma was introduced, e.g., with #pragma,
+ * _Pragma, or __pragma.
+ */
+ enum PragmaIntroducerKind {
+ /**
+ * \brief The pragma was introduced via #pragma.
+ */
+ PIK_HashPragma,
+
+ /**
+ * \brief The pragma was introduced via the C99 _Pragma(string-literal).
+ */
+ PIK__Pragma,
+
+ /**
+ * \brief The pragma was introduced via the Microsoft
+ * __pragma(token-string).
+ */
+ PIK___pragma
+ };
+
+/// PragmaHandler - Instances of this interface defined to handle the various
+/// pragmas that the language front-end uses. Each handler optionally has a
+/// name (e.g. "pack") and the HandlePragma method is invoked when a pragma with
+/// that identifier is found. If a handler does not match any of the declared
+/// pragmas the handler with a null identifier is invoked, if it exists.
+///
+/// Note that the PragmaNamespace class can be used to subdivide pragmas, e.g.
+/// we treat "#pragma STDC" and "#pragma GCC" as namespaces that contain other
+/// pragmas.
+class PragmaHandler {
+ std::string Name;
+public:
+ explicit PragmaHandler(StringRef name) : Name(name) {}
+ PragmaHandler() {}
+ virtual ~PragmaHandler();
+
+ StringRef getName() const { return Name; }
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken) = 0;
+
+ /// getIfNamespace - If this is a namespace, return it. This is equivalent to
+ /// using a dynamic_cast, but doesn't require RTTI.
+ virtual PragmaNamespace *getIfNamespace() { return 0; }
+};
+
+/// EmptyPragmaHandler - A pragma handler which takes no action, which can be
+/// used to ignore particular pragmas.
+class EmptyPragmaHandler : public PragmaHandler {
+public:
+ EmptyPragmaHandler();
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+/// PragmaNamespace - This PragmaHandler subdivides the namespace of pragmas,
+/// allowing hierarchical pragmas to be defined. Common examples of namespaces
+/// are "#pragma GCC", "#pragma STDC", and "#pragma omp", but any namespaces may
+/// be (potentially recursively) defined.
+class PragmaNamespace : public PragmaHandler {
+ /// Handlers - This is a map of the handlers in this namespace with their name
+ /// as key.
+ ///
+ llvm::StringMap<PragmaHandler*> Handlers;
+public:
+ explicit PragmaNamespace(StringRef Name) : PragmaHandler(Name) {}
+ virtual ~PragmaNamespace();
+
+ /// FindHandler - Check to see if there is already a handler for the
+ /// specified name. If not, return the handler for the null name if it
+ /// exists, otherwise return null. If IgnoreNull is true (the default) then
+ /// the null handler isn't returned on failure to match.
+ PragmaHandler *FindHandler(StringRef Name,
+ bool IgnoreNull = true) const;
+
+ /// AddPragma - Add a pragma to this namespace.
+ ///
+ void AddPragma(PragmaHandler *Handler);
+
+ /// RemovePragmaHandler - Remove the given handler from the
+ /// namespace.
+ void RemovePragmaHandler(PragmaHandler *Handler);
+
+ bool IsEmpty() {
+ return Handlers.empty();
+ }
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+
+ virtual PragmaNamespace *getIfNamespace() { return this; }
+};
+
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
new file mode 100644
index 0000000..45e3a5d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessingRecord.h
@@ -0,0 +1,637 @@
+//===--- PreprocessingRecord.h - Record of Preprocessing --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PreprocessingRecord class, which maintains a record
+// of what occurred during preprocessing.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_LEX_PREPROCESSINGRECORD_H
+#define LLVM_CLANG_LEX_PREPROCESSINGRECORD_H
+
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Compiler.h"
+#include <vector>
+
+namespace clang {
+ class IdentifierInfo;
+ class PreprocessingRecord;
+}
+
+/// \brief Allocates memory within a Clang preprocessing record.
+void* operator new(size_t bytes, clang::PreprocessingRecord& PR,
+ unsigned alignment = 8) throw();
+
+/// \brief Frees memory allocated in a Clang preprocessing record.
+void operator delete(void* ptr, clang::PreprocessingRecord& PR,
+ unsigned) throw();
+
+namespace clang {
+ class MacroDefinition;
+ class FileEntry;
+
+ /// \brief Base class that describes a preprocessed entity, which may be a
+ /// preprocessor directive or macro expansion.
+ class PreprocessedEntity {
+ public:
+ /// \brief The kind of preprocessed entity an object describes.
+ enum EntityKind {
+ /// \brief Indicates a problem trying to load the preprocessed entity.
+ InvalidKind,
+
+ /// \brief A macro expansion.
+ MacroExpansionKind,
+
+ /// \defgroup Preprocessing directives
+ /// @{
+
+ /// \brief A macro definition.
+ MacroDefinitionKind,
+
+ /// \brief An inclusion directive, such as \c #include, \c
+ /// #import, or \c #include_next.
+ InclusionDirectiveKind,
+
+ /// @}
+
+ FirstPreprocessingDirective = MacroDefinitionKind,
+ LastPreprocessingDirective = InclusionDirectiveKind
+ };
+
+ private:
+ /// \brief The kind of preprocessed entity that this object describes.
+ EntityKind Kind;
+
+ /// \brief The source range that covers this preprocessed entity.
+ SourceRange Range;
+
+ protected:
+ PreprocessedEntity(EntityKind Kind, SourceRange Range)
+ : Kind(Kind), Range(Range) { }
+
+ friend class PreprocessingRecord;
+
+ public:
+ /// \brief Retrieve the kind of preprocessed entity stored in this object.
+ EntityKind getKind() const { return Kind; }
+
+ /// \brief Retrieve the source range that covers this entire preprocessed
+ /// entity.
+ SourceRange getSourceRange() const LLVM_READONLY { return Range; }
+
+ /// \brief Returns true if there was a problem loading the preprocessed
+ /// entity.
+ bool isInvalid() const { return Kind == InvalidKind; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const PreprocessedEntity *) { return true; }
+
+ // Only allow allocation of preprocessed entities using the allocator
+ // in PreprocessingRecord or by doing a placement new.
+ void* operator new(size_t bytes, PreprocessingRecord& PR,
+ unsigned alignment = 8) throw() {
+ return ::operator new(bytes, PR, alignment);
+ }
+
+ void* operator new(size_t bytes, void* mem) throw() {
+ return mem;
+ }
+
+ void operator delete(void* ptr, PreprocessingRecord& PR,
+ unsigned alignment) throw() {
+ return ::operator delete(ptr, PR, alignment);
+ }
+
+ void operator delete(void*, std::size_t) throw() { }
+ void operator delete(void*, void*) throw() { }
+
+ private:
+ // Make vanilla 'new' and 'delete' illegal for preprocessed entities.
+ void* operator new(size_t bytes) throw();
+ void operator delete(void* data) throw();
+ };
+
+ /// \brief Records the presence of a preprocessor directive.
+ class PreprocessingDirective : public PreprocessedEntity {
+ public:
+ PreprocessingDirective(EntityKind Kind, SourceRange Range)
+ : PreprocessedEntity(Kind, Range) { }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const PreprocessedEntity *PD) {
+ return PD->getKind() >= FirstPreprocessingDirective &&
+ PD->getKind() <= LastPreprocessingDirective;
+ }
+ static bool classof(const PreprocessingDirective *) { return true; }
+ };
+
+ /// \brief Record the location of a macro definition.
+ class MacroDefinition : public PreprocessingDirective {
+ /// \brief The name of the macro being defined.
+ const IdentifierInfo *Name;
+
+ public:
+ explicit MacroDefinition(const IdentifierInfo *Name, SourceRange Range)
+ : PreprocessingDirective(MacroDefinitionKind, Range), Name(Name) { }
+
+ /// \brief Retrieve the name of the macro being defined.
+ const IdentifierInfo *getName() const { return Name; }
+
+ /// \brief Retrieve the location of the macro name in the definition.
+ SourceLocation getLocation() const { return getSourceRange().getBegin(); }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const PreprocessedEntity *PE) {
+ return PE->getKind() == MacroDefinitionKind;
+ }
+ static bool classof(const MacroDefinition *) { return true; }
+ };
+
+ /// \brief Records the location of a macro expansion.
+ class MacroExpansion : public PreprocessedEntity {
+ /// \brief The definition of this macro or the name of the macro if it is
+ /// a builtin macro.
+ llvm::PointerUnion<IdentifierInfo *, MacroDefinition *> NameOrDef;
+
+ public:
+ MacroExpansion(IdentifierInfo *BuiltinName, SourceRange Range)
+ : PreprocessedEntity(MacroExpansionKind, Range),
+ NameOrDef(BuiltinName) { }
+
+ MacroExpansion(MacroDefinition *Definition, SourceRange Range)
+ : PreprocessedEntity(MacroExpansionKind, Range),
+ NameOrDef(Definition) { }
+
+ /// \brief True if it is a builtin macro.
+ bool isBuiltinMacro() const { return NameOrDef.is<IdentifierInfo *>(); }
+
+ /// \brief The name of the macro being expanded.
+ const IdentifierInfo *getName() const {
+ if (MacroDefinition *Def = getDefinition())
+ return Def->getName();
+ return NameOrDef.get<IdentifierInfo*>();
+ }
+
+ /// \brief The definition of the macro being expanded. May return null if
+ /// this is a builtin macro.
+ MacroDefinition *getDefinition() const {
+ return NameOrDef.dyn_cast<MacroDefinition *>();
+ }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const PreprocessedEntity *PE) {
+ return PE->getKind() == MacroExpansionKind;
+ }
+ static bool classof(const MacroExpansion *) { return true; }
+ };
+
+ /// \brief Record the location of an inclusion directive, such as an
+ /// \c #include or \c #import statement.
+ class InclusionDirective : public PreprocessingDirective {
+ public:
+ /// \brief The kind of inclusion directives known to the
+ /// preprocessor.
+ enum InclusionKind {
+ /// \brief An \c #include directive.
+ Include,
+ /// \brief An Objective-C \c #import directive.
+ Import,
+ /// \brief A GNU \c #include_next directive.
+ IncludeNext,
+ /// \brief A Clang \c #__include_macros directive.
+ IncludeMacros
+ };
+
+ private:
+ /// \brief The name of the file that was included, as written in
+ /// the source.
+ StringRef FileName;
+
+ /// \brief Whether the file name was in quotation marks; otherwise, it was
+ /// in angle brackets.
+ unsigned InQuotes : 1;
+
+ /// \brief The kind of inclusion directive we have.
+ ///
+ /// This is a value of type InclusionKind.
+ unsigned Kind : 2;
+
+ /// \brief The file that was included.
+ const FileEntry *File;
+
+ public:
+ InclusionDirective(PreprocessingRecord &PPRec,
+ InclusionKind Kind, StringRef FileName,
+ bool InQuotes, const FileEntry *File, SourceRange Range);
+
+ /// \brief Determine what kind of inclusion directive this is.
+ InclusionKind getKind() const { return static_cast<InclusionKind>(Kind); }
+
+ /// \brief Retrieve the included file name as it was written in the source.
+ StringRef getFileName() const { return FileName; }
+
+ /// \brief Determine whether the included file name was written in quotes;
+ /// otherwise, it was written in angle brackets.
+ bool wasInQuotes() const { return InQuotes; }
+
+ /// \brief Retrieve the file entry for the actual file that was included
+ /// by this directive.
+ const FileEntry *getFile() const { return File; }
+
+ // Implement isa/cast/dyncast/etc.
+ static bool classof(const PreprocessedEntity *PE) {
+ return PE->getKind() == InclusionDirectiveKind;
+ }
+ static bool classof(const InclusionDirective *) { return true; }
+ };
+
+ /// \brief An abstract class that should be subclassed by any external source
+ /// of preprocessing record entries.
+ class ExternalPreprocessingRecordSource {
+ public:
+ virtual ~ExternalPreprocessingRecordSource();
+
+ /// \brief Read a preallocated preprocessed entity from the external source.
+ ///
+ /// \returns null if an error occurred that prevented the preprocessed
+ /// entity from being loaded.
+ virtual PreprocessedEntity *ReadPreprocessedEntity(unsigned Index) = 0;
+
+ /// \brief Returns a pair of [Begin, End) indices of preallocated
+ /// preprocessed entities that \arg Range encompasses.
+ virtual std::pair<unsigned, unsigned>
+ findPreprocessedEntitiesInRange(SourceRange Range) = 0;
+
+ /// \brief Optionally returns true or false if the preallocated preprocessed
+ /// entity with index \arg Index came from file \arg FID.
+ virtual llvm::Optional<bool> isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID) {
+ return llvm::Optional<bool>();
+ }
+ };
+
+ /// \brief A record of the steps taken while preprocessing a source file,
+ /// including the various preprocessing directives processed, macros
+ /// expanded, etc.
+ class PreprocessingRecord : public PPCallbacks {
+ SourceManager &SourceMgr;
+
+ /// \brief Allocator used to store preprocessing objects.
+ llvm::BumpPtrAllocator BumpAlloc;
+
+ /// \brief The set of preprocessed entities in this record, in order they
+ /// were seen.
+ std::vector<PreprocessedEntity *> PreprocessedEntities;
+
+ /// \brief The set of preprocessed entities in this record that have been
+ /// loaded from external sources.
+ ///
+ /// The entries in this vector are loaded lazily from the external source,
+ /// and are referenced by the iterator using negative indices.
+ std::vector<PreprocessedEntity *> LoadedPreprocessedEntities;
+
+ bool RecordCondDirectives;
+ unsigned CondDirectiveNextIdx;
+ SmallVector<unsigned, 6> CondDirectiveStack;
+
+ class CondDirectiveLoc {
+ SourceLocation Loc;
+ unsigned Idx;
+
+ public:
+ CondDirectiveLoc(SourceLocation Loc, unsigned Idx) : Loc(Loc), Idx(Idx) {}
+
+ SourceLocation getLoc() const { return Loc; }
+ unsigned getIdx() const { return Idx; }
+
+ class Comp {
+ SourceManager &SM;
+ public:
+ explicit Comp(SourceManager &SM) : SM(SM) {}
+ bool operator()(const CondDirectiveLoc &LHS,
+ const CondDirectiveLoc &RHS) {
+ return SM.isBeforeInTranslationUnit(LHS.getLoc(), RHS.getLoc());
+ }
+ bool operator()(const CondDirectiveLoc &LHS, SourceLocation RHS) {
+ return SM.isBeforeInTranslationUnit(LHS.getLoc(), RHS);
+ }
+ bool operator()(SourceLocation LHS, const CondDirectiveLoc &RHS) {
+ return SM.isBeforeInTranslationUnit(LHS, RHS.getLoc());
+ }
+ };
+ };
+
+ typedef std::vector<CondDirectiveLoc> CondDirectiveLocsTy;
+ /// \brief The locations of conditional directives in source order.
+ CondDirectiveLocsTy CondDirectiveLocs;
+
+ void addCondDirectiveLoc(CondDirectiveLoc DirLoc);
+ unsigned findCondDirectiveIdx(SourceLocation Loc) const;
+
+ /// \brief Global (loaded or local) ID for a preprocessed entity.
+ /// Negative values are used to indicate preprocessed entities
+ /// loaded from the external source while non-negative values are used to
+ /// indicate preprocessed entities introduced by the current preprocessor.
+ /// If M is the number of loaded preprocessed entities, value -M
+ /// corresponds to element 0 in the loaded entities vector, position -M+1
+ /// corresponds to element 1 in the loaded entities vector, etc.
+ typedef int PPEntityID;
+
+ PPEntityID getPPEntityID(unsigned Index, bool isLoaded) const {
+ return isLoaded ? PPEntityID(Index) - LoadedPreprocessedEntities.size()
+ : Index;
+ }
+
+ /// \brief Mapping from MacroInfo structures to their definitions.
+ llvm::DenseMap<const MacroInfo *, PPEntityID> MacroDefinitions;
+
+ /// \brief External source of preprocessed entities.
+ ExternalPreprocessingRecordSource *ExternalSource;
+
+ /// \brief Retrieve the preprocessed entity at the given ID.
+ PreprocessedEntity *getPreprocessedEntity(PPEntityID PPID);
+
+ /// \brief Retrieve the loaded preprocessed entity at the given index.
+ PreprocessedEntity *getLoadedPreprocessedEntity(unsigned Index);
+
+ /// \brief Determine the number of preprocessed entities that were
+ /// loaded (or can be loaded) from an external source.
+ unsigned getNumLoadedPreprocessedEntities() const {
+ return LoadedPreprocessedEntities.size();
+ }
+
+ /// \brief Returns a pair of [Begin, End) indices of local preprocessed
+ /// entities that \arg Range encompasses.
+ std::pair<unsigned, unsigned>
+ findLocalPreprocessedEntitiesInRange(SourceRange Range) const;
+ unsigned findBeginLocalPreprocessedEntity(SourceLocation Loc) const;
+ unsigned findEndLocalPreprocessedEntity(SourceLocation Loc) const;
+
+ /// \brief Allocate space for a new set of loaded preprocessed entities.
+ ///
+ /// \returns The index into the set of loaded preprocessed entities, which
+ /// corresponds to the first newly-allocated entity.
+ unsigned allocateLoadedEntities(unsigned NumEntities);
+
+ /// \brief Register a new macro definition.
+ void RegisterMacroDefinition(MacroInfo *Macro, PPEntityID PPID);
+
+ public:
+ /// \brief Construct a new preprocessing record.
+ PreprocessingRecord(SourceManager &SM, bool RecordConditionalDirectives);
+
+ /// \brief Allocate memory in the preprocessing record.
+ void *Allocate(unsigned Size, unsigned Align = 8) {
+ return BumpAlloc.Allocate(Size, Align);
+ }
+
+ /// \brief Deallocate memory in the preprocessing record.
+ void Deallocate(void *Ptr) { }
+
+ size_t getTotalMemory() const;
+
+ SourceManager &getSourceManager() const { return SourceMgr; }
+
+ // Iteration over the preprocessed entities.
+ class iterator {
+ PreprocessingRecord *Self;
+
+ /// \brief Position within the preprocessed entity sequence.
+ ///
+ /// In a complete iteration, the Position field walks the range [-M, N),
+ /// where negative values are used to indicate preprocessed entities
+ /// loaded from the external source while non-negative values are used to
+ /// indicate preprocessed entities introduced by the current preprocessor.
+ /// However, to provide iteration in source order (for, e.g., chained
+ /// precompiled headers), dereferencing the iterator flips the negative
+ /// values (corresponding to loaded entities), so that position -M
+ /// corresponds to element 0 in the loaded entities vector, position -M+1
+ /// corresponds to element 1 in the loaded entities vector, etc. This
+ /// gives us a reasonably efficient, source-order walk.
+ PPEntityID Position;
+
+ public:
+ typedef PreprocessedEntity *value_type;
+ typedef value_type& reference;
+ typedef value_type* pointer;
+ typedef std::random_access_iterator_tag iterator_category;
+ typedef int difference_type;
+
+ iterator() : Self(0), Position(0) { }
+
+ iterator(PreprocessingRecord *Self, PPEntityID Position)
+ : Self(Self), Position(Position) { }
+
+ value_type operator*() const {
+ return Self->getPreprocessedEntity(Position);
+ }
+
+ value_type operator[](difference_type D) {
+ return *(*this + D);
+ }
+
+ iterator &operator++() {
+ ++Position;
+ return *this;
+ }
+
+ iterator operator++(int) {
+ iterator Prev(*this);
+ ++Position;
+ return Prev;
+ }
+
+ iterator &operator--() {
+ --Position;
+ return *this;
+ }
+
+ iterator operator--(int) {
+ iterator Prev(*this);
+ --Position;
+ return Prev;
+ }
+
+ friend bool operator==(const iterator &X, const iterator &Y) {
+ return X.Position == Y.Position;
+ }
+
+ friend bool operator!=(const iterator &X, const iterator &Y) {
+ return X.Position != Y.Position;
+ }
+
+ friend bool operator<(const iterator &X, const iterator &Y) {
+ return X.Position < Y.Position;
+ }
+
+ friend bool operator>(const iterator &X, const iterator &Y) {
+ return X.Position > Y.Position;
+ }
+
+ friend bool operator<=(const iterator &X, const iterator &Y) {
+ return X.Position < Y.Position;
+ }
+
+ friend bool operator>=(const iterator &X, const iterator &Y) {
+ return X.Position > Y.Position;
+ }
+
+ friend iterator& operator+=(iterator &X, difference_type D) {
+ X.Position += D;
+ return X;
+ }
+
+ friend iterator& operator-=(iterator &X, difference_type D) {
+ X.Position -= D;
+ return X;
+ }
+
+ friend iterator operator+(iterator X, difference_type D) {
+ X.Position += D;
+ return X;
+ }
+
+ friend iterator operator+(difference_type D, iterator X) {
+ X.Position += D;
+ return X;
+ }
+
+ friend difference_type operator-(const iterator &X, const iterator &Y) {
+ return X.Position - Y.Position;
+ }
+
+ friend iterator operator-(iterator X, difference_type D) {
+ X.Position -= D;
+ return X;
+ }
+ friend class PreprocessingRecord;
+ };
+ friend class iterator;
+
+ /// \brief Begin iterator for all preprocessed entities.
+ iterator begin() {
+ return iterator(this, -(int)LoadedPreprocessedEntities.size());
+ }
+
+ /// \brief End iterator for all preprocessed entities.
+ iterator end() {
+ return iterator(this, PreprocessedEntities.size());
+ }
+
+ /// \brief Begin iterator for local, non-loaded, preprocessed entities.
+ iterator local_begin() {
+ return iterator(this, 0);
+ }
+
+ /// \brief End iterator for local, non-loaded, preprocessed entities.
+ iterator local_end() {
+ return iterator(this, PreprocessedEntities.size());
+ }
+
+ /// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
+ /// that source range \arg R encompasses.
+ ///
+ /// \param R the range to look for preprocessed entities.
+ ///
+ std::pair<iterator, iterator> getPreprocessedEntitiesInRange(SourceRange R);
+
+ /// \brief Returns true if the preprocessed entity that \arg PPEI iterator
+ /// points to is coming from the file \arg FID.
+ ///
+ /// Can be used to avoid implicit deserializations of preallocated
+ /// preprocessed entities if we only care about entities of a specific file
+ /// and not from files #included in the range given at
+ /// \see getPreprocessedEntitiesInRange.
+ bool isEntityInFileID(iterator PPEI, FileID FID);
+
+ /// \brief Add a new preprocessed entity to this record.
+ PPEntityID addPreprocessedEntity(PreprocessedEntity *Entity);
+
+ /// \brief Returns true if this PreprocessingRecord is keeping track of
+ /// conditional directives locations.
+ bool isRecordingConditionalDirectives() const {
+ return RecordCondDirectives;
+ }
+
+ /// \brief Returns true if the given range intersects with a conditional
+ /// directive. if a #if/#endif block is fully contained within the range,
+ /// this function will return false.
+ bool rangeIntersectsConditionalDirective(SourceRange Range) const;
+
+ /// \brief Returns true if the given locations are in different regions,
+ /// separated by conditional directive blocks.
+ bool areInDifferentConditionalDirectiveRegion(SourceLocation LHS,
+ SourceLocation RHS) const {
+ return findCondDirectiveIdx(LHS) != findCondDirectiveIdx(RHS);
+ }
+
+ /// \brief Set the external source for preprocessed entities.
+ void SetExternalSource(ExternalPreprocessingRecordSource &Source);
+
+ /// \brief Retrieve the external source for preprocessed entities.
+ ExternalPreprocessingRecordSource *getExternalSource() const {
+ return ExternalSource;
+ }
+
+ /// \brief Retrieve the macro definition that corresponds to the given
+ /// \c MacroInfo.
+ MacroDefinition *findMacroDefinition(const MacroInfo *MI);
+
+ private:
+ virtual void MacroExpands(const Token &Id, const MacroInfo* MI,
+ SourceRange Range);
+ virtual void MacroDefined(const Token &Id, const MacroInfo *MI);
+ virtual void MacroUndefined(const Token &Id, const MacroInfo *MI);
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath);
+ virtual void If(SourceLocation Loc, SourceRange ConditionRange);
+ virtual void Elif(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc);
+ virtual void Ifdef(SourceLocation Loc, const Token &MacroNameTok);
+ virtual void Ifndef(SourceLocation Loc, const Token &MacroNameTok);
+ virtual void Else(SourceLocation Loc, SourceLocation IfLoc);
+ virtual void Endif(SourceLocation Loc, SourceLocation IfLoc);
+
+ /// \brief Cached result of the last \see getPreprocessedEntitiesInRange
+ /// query.
+ struct {
+ SourceRange Range;
+ std::pair<PPEntityID, PPEntityID> Result;
+ } CachedRangeQuery;
+
+ std::pair<PPEntityID, PPEntityID>
+ getPreprocessedEntitiesInRangeSlow(SourceRange R);
+
+ friend class ASTReader;
+ friend class ASTWriter;
+ };
+} // end namespace clang
+
+inline void* operator new(size_t bytes, clang::PreprocessingRecord& PR,
+ unsigned alignment) throw() {
+ return PR.Allocate(bytes, alignment);
+}
+
+inline void operator delete(void* ptr, clang::PreprocessingRecord& PR,
+ unsigned) throw() {
+ PR.Deallocate(ptr);
+}
+
+#endif // LLVM_CLANG_LEX_PREPROCESSINGRECORD_H
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
new file mode 100644
index 0000000..055008f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Preprocessor.h
@@ -0,0 +1,1308 @@
+//===--- Preprocessor.h - C Language Family Preprocessor --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Preprocessor interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LEX_PREPROCESSOR_H
+#define LLVM_CLANG_LEX_PREPROCESSOR_H
+
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/PTHLexer.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/TokenLexer.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/Allocator.h"
+#include <vector>
+
+namespace llvm {
+ template<unsigned InternalLen> class SmallString;
+}
+
+namespace clang {
+
+class SourceManager;
+class ExternalPreprocessorSource;
+class FileManager;
+class FileEntry;
+class HeaderSearch;
+class PragmaNamespace;
+class PragmaHandler;
+class CommentHandler;
+class ScratchBuffer;
+class TargetInfo;
+class PPCallbacks;
+class CodeCompletionHandler;
+class DirectoryLookup;
+class PreprocessingRecord;
+class ModuleLoader;
+
+/// Preprocessor - This object engages in a tight little dance with the lexer to
+/// efficiently preprocess tokens. Lexers know only about tokens within a
+/// single source file, and don't know anything about preprocessor-level issues
+/// like the #include stack, token expansion, etc.
+///
+class Preprocessor : public RefCountedBase<Preprocessor> {
+ DiagnosticsEngine *Diags;
+ LangOptions &LangOpts;
+ const TargetInfo *Target;
+ FileManager &FileMgr;
+ SourceManager &SourceMgr;
+ ScratchBuffer *ScratchBuf;
+ HeaderSearch &HeaderInfo;
+ ModuleLoader &TheModuleLoader;
+
+ /// \brief External source of macros.
+ ExternalPreprocessorSource *ExternalSource;
+
+
+ /// PTH - An optional PTHManager object used for getting tokens from
+ /// a token cache rather than lexing the original source file.
+ OwningPtr<PTHManager> PTH;
+
+ /// BP - A BumpPtrAllocator object used to quickly allocate and release
+ /// objects internal to the Preprocessor.
+ llvm::BumpPtrAllocator BP;
+
+ /// Identifiers for builtin macros and other builtins.
+ IdentifierInfo *Ident__LINE__, *Ident__FILE__; // __LINE__, __FILE__
+ IdentifierInfo *Ident__DATE__, *Ident__TIME__; // __DATE__, __TIME__
+ IdentifierInfo *Ident__INCLUDE_LEVEL__; // __INCLUDE_LEVEL__
+ IdentifierInfo *Ident__BASE_FILE__; // __BASE_FILE__
+ IdentifierInfo *Ident__TIMESTAMP__; // __TIMESTAMP__
+ IdentifierInfo *Ident__COUNTER__; // __COUNTER__
+ IdentifierInfo *Ident_Pragma, *Ident__pragma; // _Pragma, __pragma
+ IdentifierInfo *Ident__VA_ARGS__; // __VA_ARGS__
+ IdentifierInfo *Ident__has_feature; // __has_feature
+ IdentifierInfo *Ident__has_extension; // __has_extension
+ IdentifierInfo *Ident__has_builtin; // __has_builtin
+ IdentifierInfo *Ident__has_attribute; // __has_attribute
+ IdentifierInfo *Ident__has_include; // __has_include
+ IdentifierInfo *Ident__has_include_next; // __has_include_next
+ IdentifierInfo *Ident__has_warning; // __has_warning
+
+ SourceLocation DATELoc, TIMELoc;
+ unsigned CounterValue; // Next __COUNTER__ value.
+
+ enum {
+ /// MaxIncludeStackDepth - Maximum depth of #includes.
+ MaxAllowedIncludeStackDepth = 200
+ };
+
+ // State that is set before the preprocessor begins.
+ bool KeepComments : 1;
+ bool KeepMacroComments : 1;
+ bool SuppressIncludeNotFoundError : 1;
+
+ // State that changes while the preprocessor runs:
+ bool InMacroArgs : 1; // True if parsing fn macro invocation args.
+
+ /// Whether the preprocessor owns the header search object.
+ bool OwnsHeaderSearch : 1;
+
+ /// DisableMacroExpansion - True if macro expansion is disabled.
+ bool DisableMacroExpansion : 1;
+
+ /// \brief Whether we have already loaded macros from the external source.
+ mutable bool ReadMacrosFromExternalSource : 1;
+
+ /// \brief True if we are pre-expanding macro arguments.
+ bool InMacroArgPreExpansion;
+
+ /// Identifiers - This is mapping/lookup information for all identifiers in
+ /// the program, including program keywords.
+ mutable IdentifierTable Identifiers;
+
+ /// Selectors - This table contains all the selectors in the program. Unlike
+ /// IdentifierTable above, this table *isn't* populated by the preprocessor.
+ /// It is declared/expanded here because it's role/lifetime is
+ /// conceptually similar the IdentifierTable. In addition, the current control
+ /// flow (in clang::ParseAST()), make it convenient to put here.
+ /// FIXME: Make sure the lifetime of Identifiers/Selectors *isn't* tied to
+ /// the lifetime of the preprocessor.
+ SelectorTable Selectors;
+
+ /// BuiltinInfo - Information about builtins.
+ Builtin::Context BuiltinInfo;
+
+ /// PragmaHandlers - This tracks all of the pragmas that the client registered
+ /// with this preprocessor.
+ PragmaNamespace *PragmaHandlers;
+
+ /// \brief Tracks all of the comment handlers that the client registered
+ /// with this preprocessor.
+ std::vector<CommentHandler *> CommentHandlers;
+
+ /// \brief True if we want to ignore EOF token and continue later on (thus
+ /// avoid tearing the Lexer and etc. down).
+ bool IncrementalProcessing;
+
+ /// \brief The code-completion handler.
+ CodeCompletionHandler *CodeComplete;
+
+ /// \brief The file that we're performing code-completion for, if any.
+ const FileEntry *CodeCompletionFile;
+
+ /// \brief The offset in file for the code-completion point.
+ unsigned CodeCompletionOffset;
+
+ /// \brief The location for the code-completion point. This gets instantiated
+ /// when the CodeCompletionFile gets #include'ed for preprocessing.
+ SourceLocation CodeCompletionLoc;
+
+ /// \brief The start location for the file of the code-completion point.
+ /// This gets instantiated when the CodeCompletionFile gets #include'ed
+ /// for preprocessing.
+ SourceLocation CodeCompletionFileLoc;
+
+ /// \brief The source location of the 'import' contextual keyword we just
+ /// lexed, if any.
+ SourceLocation ModuleImportLoc;
+
+ /// \brief The module import path that we're currently processing.
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2>
+ ModuleImportPath;
+
+ /// \brief Whether the module import expectes an identifier next. Otherwise,
+ /// it expects a '.' or ';'.
+ bool ModuleImportExpectsIdentifier;
+
+ /// \brief The source location of the currently-active
+ /// #pragma clang arc_cf_code_audited begin.
+ SourceLocation PragmaARCCFCodeAuditedLoc;
+
+ /// \brief True if we hit the code-completion point.
+ bool CodeCompletionReached;
+
+ /// \brief The number of bytes that we will initially skip when entering the
+ /// main file, which is used when loading a precompiled preamble, along
+ /// with a flag that indicates whether skipping this number of bytes will
+ /// place the lexer at the start of a line.
+ std::pair<unsigned, bool> SkipMainFilePreamble;
+
+ /// CurLexer - This is the current top of the stack that we're lexing from if
+ /// not expanding a macro and we are lexing directly from source code.
+ /// Only one of CurLexer, CurPTHLexer, or CurTokenLexer will be non-null.
+ OwningPtr<Lexer> CurLexer;
+
+ /// CurPTHLexer - This is the current top of stack that we're lexing from if
+ /// not expanding from a macro and we are lexing from a PTH cache.
+ /// Only one of CurLexer, CurPTHLexer, or CurTokenLexer will be non-null.
+ OwningPtr<PTHLexer> CurPTHLexer;
+
+ /// CurPPLexer - This is the current top of the stack what we're lexing from
+ /// if not expanding a macro. This is an alias for either CurLexer or
+ /// CurPTHLexer.
+ PreprocessorLexer *CurPPLexer;
+
+ /// CurLookup - The DirectoryLookup structure used to find the current
+ /// FileEntry, if CurLexer is non-null and if applicable. This allows us to
+ /// implement #include_next and find directory-specific properties.
+ const DirectoryLookup *CurDirLookup;
+
+ /// CurTokenLexer - This is the current macro we are expanding, if we are
+ /// expanding a macro. One of CurLexer and CurTokenLexer must be null.
+ OwningPtr<TokenLexer> CurTokenLexer;
+
+ /// \brief The kind of lexer we're currently working with.
+ enum CurLexerKind {
+ CLK_Lexer,
+ CLK_PTHLexer,
+ CLK_TokenLexer,
+ CLK_CachingLexer,
+ CLK_LexAfterModuleImport
+ } CurLexerKind;
+
+ /// IncludeMacroStack - This keeps track of the stack of files currently
+ /// #included, and macros currently being expanded from, not counting
+ /// CurLexer/CurTokenLexer.
+ struct IncludeStackInfo {
+ enum CurLexerKind CurLexerKind;
+ Lexer *TheLexer;
+ PTHLexer *ThePTHLexer;
+ PreprocessorLexer *ThePPLexer;
+ TokenLexer *TheTokenLexer;
+ const DirectoryLookup *TheDirLookup;
+
+ IncludeStackInfo(enum CurLexerKind K, Lexer *L, PTHLexer* P,
+ PreprocessorLexer* PPL,
+ TokenLexer* TL, const DirectoryLookup *D)
+ : CurLexerKind(K), TheLexer(L), ThePTHLexer(P), ThePPLexer(PPL),
+ TheTokenLexer(TL), TheDirLookup(D) {}
+ };
+ std::vector<IncludeStackInfo> IncludeMacroStack;
+
+ /// Callbacks - These are actions invoked when some preprocessor activity is
+ /// encountered (e.g. a file is #included, etc).
+ PPCallbacks *Callbacks;
+
+ /// Macros - For each IdentifierInfo with 'HasMacro' set, we keep a mapping
+ /// to the actual definition of the macro.
+ llvm::DenseMap<IdentifierInfo*, MacroInfo*> Macros;
+
+ /// \brief Macros that we want to warn because they are not used at the end
+ /// of the translation unit; we store just their SourceLocations instead
+ /// something like MacroInfo*. The benefit of this is that when we are
+ /// deserializing from PCH, we don't need to deserialize identifier & macros
+ /// just so that we can report that they are unused, we just warn using
+ /// the SourceLocations of this set (that will be filled by the ASTReader).
+ /// We are using SmallPtrSet instead of a vector for faster removal.
+ typedef llvm::SmallPtrSet<SourceLocation, 32> WarnUnusedMacroLocsTy;
+ WarnUnusedMacroLocsTy WarnUnusedMacroLocs;
+
+ /// MacroArgCache - This is a "freelist" of MacroArg objects that can be
+ /// reused for quick allocation.
+ MacroArgs *MacroArgCache;
+ friend class MacroArgs;
+
+ /// PragmaPushMacroInfo - For each IdentifierInfo used in a #pragma
+ /// push_macro directive, we keep a MacroInfo stack used to restore
+ /// previous macro value.
+ llvm::DenseMap<IdentifierInfo*, std::vector<MacroInfo*> > PragmaPushMacroInfo;
+
+ // Various statistics we track for performance analysis.
+ unsigned NumDirectives, NumIncluded, NumDefined, NumUndefined, NumPragma;
+ unsigned NumIf, NumElse, NumEndif;
+ unsigned NumEnteredSourceFiles, MaxIncludeStackDepth;
+ unsigned NumMacroExpanded, NumFnMacroExpanded, NumBuiltinMacroExpanded;
+ unsigned NumFastMacroExpanded, NumTokenPaste, NumFastTokenPaste;
+ unsigned NumSkipped;
+
+ /// Predefines - This string is the predefined macros that preprocessor
+ /// should use from the command line etc.
+ std::string Predefines;
+
+ /// TokenLexerCache - Cache macro expanders to reduce malloc traffic.
+ enum { TokenLexerCacheSize = 8 };
+ unsigned NumCachedTokenLexers;
+ TokenLexer *TokenLexerCache[TokenLexerCacheSize];
+
+ /// \brief Keeps macro expanded tokens for TokenLexers.
+ //
+ /// Works like a stack; a TokenLexer adds the macro expanded tokens that is
+ /// going to lex in the cache and when it finishes the tokens are removed
+ /// from the end of the cache.
+ SmallVector<Token, 16> MacroExpandedTokens;
+ std::vector<std::pair<TokenLexer *, size_t> > MacroExpandingLexersStack;
+
+ /// \brief A record of the macro definitions and expansions that
+ /// occurred during preprocessing.
+ ///
+ /// This is an optional side structure that can be enabled with
+ /// \c createPreprocessingRecord() prior to preprocessing.
+ PreprocessingRecord *Record;
+
+private: // Cached tokens state.
+ typedef SmallVector<Token, 1> CachedTokensTy;
+
+ /// CachedTokens - Cached tokens are stored here when we do backtracking or
+ /// lookahead. They are "lexed" by the CachingLex() method.
+ CachedTokensTy CachedTokens;
+
+ /// CachedLexPos - The position of the cached token that CachingLex() should
+ /// "lex" next. If it points beyond the CachedTokens vector, it means that
+ /// a normal Lex() should be invoked.
+ CachedTokensTy::size_type CachedLexPos;
+
+ /// BacktrackPositions - Stack of backtrack positions, allowing nested
+ /// backtracks. The EnableBacktrackAtThisPos() method pushes a position to
+ /// indicate where CachedLexPos should be set when the BackTrack() method is
+ /// invoked (at which point the last position is popped).
+ std::vector<CachedTokensTy::size_type> BacktrackPositions;
+
+ struct MacroInfoChain {
+ MacroInfo MI;
+ MacroInfoChain *Next;
+ MacroInfoChain *Prev;
+ };
+
+ /// MacroInfos are managed as a chain for easy disposal. This is the head
+ /// of that list.
+ MacroInfoChain *MIChainHead;
+
+ /// MICache - A "freelist" of MacroInfo objects that can be reused for quick
+ /// allocation.
+ MacroInfoChain *MICache;
+
+ MacroInfo *getInfoForMacro(IdentifierInfo *II) const;
+
+public:
+ Preprocessor(DiagnosticsEngine &diags, LangOptions &opts,
+ const TargetInfo *target,
+ SourceManager &SM, HeaderSearch &Headers,
+ ModuleLoader &TheModuleLoader,
+ IdentifierInfoLookup *IILookup = 0,
+ bool OwnsHeaderSearch = false,
+ bool DelayInitialization = false,
+ bool IncrProcessing = false);
+
+ ~Preprocessor();
+
+ /// \brief Initialize the preprocessor, if the constructor did not already
+ /// perform the initialization.
+ ///
+ /// \param Target Information about the target.
+ void Initialize(const TargetInfo &Target);
+
+ DiagnosticsEngine &getDiagnostics() const { return *Diags; }
+ void setDiagnostics(DiagnosticsEngine &D) { Diags = &D; }
+
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ const TargetInfo &getTargetInfo() const { return *Target; }
+ FileManager &getFileManager() const { return FileMgr; }
+ SourceManager &getSourceManager() const { return SourceMgr; }
+ HeaderSearch &getHeaderSearchInfo() const { return HeaderInfo; }
+
+ IdentifierTable &getIdentifierTable() { return Identifiers; }
+ SelectorTable &getSelectorTable() { return Selectors; }
+ Builtin::Context &getBuiltinInfo() { return BuiltinInfo; }
+ llvm::BumpPtrAllocator &getPreprocessorAllocator() { return BP; }
+
+ void setPTHManager(PTHManager* pm);
+
+ PTHManager *getPTHManager() { return PTH.get(); }
+
+ void setExternalSource(ExternalPreprocessorSource *Source) {
+ ExternalSource = Source;
+ }
+
+ ExternalPreprocessorSource *getExternalSource() const {
+ return ExternalSource;
+ }
+
+ /// \brief Retrieve the module loader associated with this preprocessor.
+ ModuleLoader &getModuleLoader() const { return TheModuleLoader; }
+
+ /// SetCommentRetentionState - Control whether or not the preprocessor retains
+ /// comments in output.
+ void SetCommentRetentionState(bool KeepComments, bool KeepMacroComments) {
+ this->KeepComments = KeepComments | KeepMacroComments;
+ this->KeepMacroComments = KeepMacroComments;
+ }
+
+ bool getCommentRetentionState() const { return KeepComments; }
+
+ void SetSuppressIncludeNotFoundError(bool Suppress) {
+ SuppressIncludeNotFoundError = Suppress;
+ }
+
+ bool GetSuppressIncludeNotFoundError() {
+ return SuppressIncludeNotFoundError;
+ }
+
+ /// isCurrentLexer - Return true if we are lexing directly from the specified
+ /// lexer.
+ bool isCurrentLexer(const PreprocessorLexer *L) const {
+ return CurPPLexer == L;
+ }
+
+ /// getCurrentLexer - Return the current lexer being lexed from. Note
+ /// that this ignores any potentially active macro expansions and _Pragma
+ /// expansions going on at the time.
+ PreprocessorLexer *getCurrentLexer() const { return CurPPLexer; }
+
+ /// getCurrentFileLexer - Return the current file lexer being lexed from.
+ /// Note that this ignores any potentially active macro expansions and _Pragma
+ /// expansions going on at the time.
+ PreprocessorLexer *getCurrentFileLexer() const;
+
+ /// getPPCallbacks/addPPCallbacks - Accessors for preprocessor callbacks.
+ /// Note that this class takes ownership of any PPCallbacks object given to
+ /// it.
+ PPCallbacks *getPPCallbacks() const { return Callbacks; }
+ void addPPCallbacks(PPCallbacks *C) {
+ if (Callbacks)
+ C = new PPChainedCallbacks(C, Callbacks);
+ Callbacks = C;
+ }
+
+ /// getMacroInfo - Given an identifier, return the MacroInfo it is #defined to
+ /// or null if it isn't #define'd.
+ MacroInfo *getMacroInfo(IdentifierInfo *II) const {
+ if (!II->hasMacroDefinition())
+ return 0;
+
+ return getInfoForMacro(II);
+ }
+
+ /// setMacroInfo - Specify a macro for this identifier.
+ ///
+ void setMacroInfo(IdentifierInfo *II, MacroInfo *MI,
+ bool LoadedFromAST = false);
+
+ /// macro_iterator/macro_begin/macro_end - This allows you to walk the current
+ /// state of the macro table. This visits every currently-defined macro.
+ typedef llvm::DenseMap<IdentifierInfo*,
+ MacroInfo*>::const_iterator macro_iterator;
+ macro_iterator macro_begin(bool IncludeExternalMacros = true) const;
+ macro_iterator macro_end(bool IncludeExternalMacros = true) const;
+
+ const std::string &getPredefines() const { return Predefines; }
+ /// setPredefines - Set the predefines for this Preprocessor. These
+ /// predefines are automatically injected when parsing the main file.
+ void setPredefines(const char *P) { Predefines = P; }
+ void setPredefines(const std::string &P) { Predefines = P; }
+
+ /// getIdentifierInfo - Return information about the specified preprocessor
+ /// identifier token. The version of this method that takes two character
+ /// pointers is preferred unless the identifier is already available as a
+ /// string (this avoids allocation and copying of memory to construct an
+ /// std::string).
+ IdentifierInfo *getIdentifierInfo(StringRef Name) const {
+ return &Identifiers.get(Name);
+ }
+
+ /// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
+ /// If 'Namespace' is non-null, then it is a token required to exist on the
+ /// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
+ void AddPragmaHandler(StringRef Namespace, PragmaHandler *Handler);
+ void AddPragmaHandler(PragmaHandler *Handler) {
+ AddPragmaHandler(StringRef(), Handler);
+ }
+
+ /// RemovePragmaHandler - Remove the specific pragma handler from
+ /// the preprocessor. If \arg Namespace is non-null, then it should
+ /// be the namespace that \arg Handler was added to. It is an error
+ /// to remove a handler that has not been registered.
+ void RemovePragmaHandler(StringRef Namespace, PragmaHandler *Handler);
+ void RemovePragmaHandler(PragmaHandler *Handler) {
+ RemovePragmaHandler(StringRef(), Handler);
+ }
+
+ /// \brief Add the specified comment handler to the preprocessor.
+ void AddCommentHandler(CommentHandler *Handler);
+
+ /// \brief Remove the specified comment handler.
+ ///
+ /// It is an error to remove a handler that has not been registered.
+ void RemoveCommentHandler(CommentHandler *Handler);
+
+ /// \brief Set the code completion handler to the given object.
+ void setCodeCompletionHandler(CodeCompletionHandler &Handler) {
+ CodeComplete = &Handler;
+ }
+
+ /// \brief Retrieve the current code-completion handler.
+ CodeCompletionHandler *getCodeCompletionHandler() const {
+ return CodeComplete;
+ }
+
+ /// \brief Clear out the code completion handler.
+ void clearCodeCompletionHandler() {
+ CodeComplete = 0;
+ }
+
+ /// \brief Hook used by the lexer to invoke the "natural language" code
+ /// completion point.
+ void CodeCompleteNaturalLanguage();
+
+ /// \brief Retrieve the preprocessing record, or NULL if there is no
+ /// preprocessing record.
+ PreprocessingRecord *getPreprocessingRecord() const { return Record; }
+
+ /// \brief Create a new preprocessing record, which will keep track of
+ /// all macro expansions, macro definitions, etc.
+ void createPreprocessingRecord(bool RecordConditionalDirectives);
+
+ /// EnterMainSourceFile - Enter the specified FileID as the main source file,
+ /// which implicitly adds the builtin defines etc.
+ void EnterMainSourceFile();
+
+ /// EndSourceFile - Inform the preprocessor callbacks that processing is
+ /// complete.
+ void EndSourceFile();
+
+ /// EnterSourceFile - Add a source file to the top of the include stack and
+ /// start lexing tokens from it instead of the current buffer. Emit an error
+ /// and don't enter the file on error.
+ void EnterSourceFile(FileID CurFileID, const DirectoryLookup *Dir,
+ SourceLocation Loc);
+
+ /// EnterMacro - Add a Macro to the top of the include stack and start lexing
+ /// tokens from it instead of the current buffer. Args specifies the
+ /// tokens input to a function-like macro.
+ ///
+ /// ILEnd specifies the location of the ')' for a function-like macro or the
+ /// identifier for an object-like macro.
+ void EnterMacro(Token &Identifier, SourceLocation ILEnd, MacroArgs *Args);
+
+ /// EnterTokenStream - Add a "macro" context to the top of the include stack,
+ /// which will cause the lexer to start returning the specified tokens.
+ ///
+ /// If DisableMacroExpansion is true, tokens lexed from the token stream will
+ /// not be subject to further macro expansion. Otherwise, these tokens will
+ /// be re-macro-expanded when/if expansion is enabled.
+ ///
+ /// If OwnsTokens is false, this method assumes that the specified stream of
+ /// tokens has a permanent owner somewhere, so they do not need to be copied.
+ /// If it is true, it assumes the array of tokens is allocated with new[] and
+ /// must be freed.
+ ///
+ void EnterTokenStream(const Token *Toks, unsigned NumToks,
+ bool DisableMacroExpansion, bool OwnsTokens);
+
+ /// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
+ /// lexer stack. This should only be used in situations where the current
+ /// state of the top-of-stack lexer is known.
+ void RemoveTopOfLexerStack();
+
+ /// EnableBacktrackAtThisPos - From the point that this method is called, and
+ /// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
+ /// keeps track of the lexed tokens so that a subsequent Backtrack() call will
+ /// make the Preprocessor re-lex the same tokens.
+ ///
+ /// Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
+ /// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
+ /// be combined with the EnableBacktrackAtThisPos calls in reverse order.
+ ///
+ /// NOTE: *DO NOT* forget to call either CommitBacktrackedTokens or Backtrack
+ /// at some point after EnableBacktrackAtThisPos. If you don't, caching of
+ /// tokens will continue indefinitely.
+ ///
+ void EnableBacktrackAtThisPos();
+
+ /// CommitBacktrackedTokens - Disable the last EnableBacktrackAtThisPos call.
+ void CommitBacktrackedTokens();
+
+ /// Backtrack - Make Preprocessor re-lex the tokens that were lexed since
+ /// EnableBacktrackAtThisPos() was previously called.
+ void Backtrack();
+
+ /// isBacktrackEnabled - True if EnableBacktrackAtThisPos() was called and
+ /// caching of tokens is on.
+ bool isBacktrackEnabled() const { return !BacktrackPositions.empty(); }
+
+ /// Lex - To lex a token from the preprocessor, just pull a token from the
+ /// current lexer or macro object.
+ void Lex(Token &Result) {
+ switch (CurLexerKind) {
+ case CLK_Lexer: CurLexer->Lex(Result); break;
+ case CLK_PTHLexer: CurPTHLexer->Lex(Result); break;
+ case CLK_TokenLexer: CurTokenLexer->Lex(Result); break;
+ case CLK_CachingLexer: CachingLex(Result); break;
+ case CLK_LexAfterModuleImport: LexAfterModuleImport(Result); break;
+ }
+ }
+
+ void LexAfterModuleImport(Token &Result);
+
+ /// LexNonComment - Lex a token. If it's a comment, keep lexing until we get
+ /// something not a comment. This is useful in -E -C mode where comments
+ /// would foul up preprocessor directive handling.
+ void LexNonComment(Token &Result) {
+ do
+ Lex(Result);
+ while (Result.getKind() == tok::comment);
+ }
+
+ /// LexUnexpandedToken - This is just like Lex, but this disables macro
+ /// expansion of identifier tokens.
+ void LexUnexpandedToken(Token &Result) {
+ // Disable macro expansion.
+ bool OldVal = DisableMacroExpansion;
+ DisableMacroExpansion = true;
+ // Lex the token.
+ Lex(Result);
+
+ // Reenable it.
+ DisableMacroExpansion = OldVal;
+ }
+
+ /// LexUnexpandedNonComment - Like LexNonComment, but this disables macro
+ /// expansion of identifier tokens.
+ void LexUnexpandedNonComment(Token &Result) {
+ do
+ LexUnexpandedToken(Result);
+ while (Result.getKind() == tok::comment);
+ }
+
+ /// LookAhead - This peeks ahead N tokens and returns that token without
+ /// consuming any tokens. LookAhead(0) returns the next token that would be
+ /// returned by Lex(), LookAhead(1) returns the token after it, etc. This
+ /// returns normal tokens after phase 5. As such, it is equivalent to using
+ /// 'Lex', not 'LexUnexpandedToken'.
+ const Token &LookAhead(unsigned N) {
+ if (CachedLexPos + N < CachedTokens.size())
+ return CachedTokens[CachedLexPos+N];
+ else
+ return PeekAhead(N+1);
+ }
+
+ /// RevertCachedTokens - When backtracking is enabled and tokens are cached,
+ /// this allows to revert a specific number of tokens.
+ /// Note that the number of tokens being reverted should be up to the last
+ /// backtrack position, not more.
+ void RevertCachedTokens(unsigned N) {
+ assert(isBacktrackEnabled() &&
+ "Should only be called when tokens are cached for backtracking");
+ assert(signed(CachedLexPos) - signed(N) >= signed(BacktrackPositions.back())
+ && "Should revert tokens up to the last backtrack position, not more");
+ assert(signed(CachedLexPos) - signed(N) >= 0 &&
+ "Corrupted backtrack positions ?");
+ CachedLexPos -= N;
+ }
+
+ /// EnterToken - Enters a token in the token stream to be lexed next. If
+ /// BackTrack() is called afterwards, the token will remain at the insertion
+ /// point.
+ void EnterToken(const Token &Tok) {
+ EnterCachingLexMode();
+ CachedTokens.insert(CachedTokens.begin()+CachedLexPos, Tok);
+ }
+
+ /// AnnotateCachedTokens - We notify the Preprocessor that if it is caching
+ /// tokens (because backtrack is enabled) it should replace the most recent
+ /// cached tokens with the given annotation token. This function has no effect
+ /// if backtracking is not enabled.
+ ///
+ /// Note that the use of this function is just for optimization; so that the
+ /// cached tokens doesn't get re-parsed and re-resolved after a backtrack is
+ /// invoked.
+ void AnnotateCachedTokens(const Token &Tok) {
+ assert(Tok.isAnnotation() && "Expected annotation token");
+ if (CachedLexPos != 0 && isBacktrackEnabled())
+ AnnotatePreviousCachedTokens(Tok);
+ }
+
+ /// \brief Replace the last token with an annotation token.
+ ///
+ /// Like AnnotateCachedTokens(), this routine replaces an
+ /// already-parsed (and resolved) token with an annotation
+ /// token. However, this routine only replaces the last token with
+ /// the annotation token; it does not affect any other cached
+ /// tokens. This function has no effect if backtracking is not
+ /// enabled.
+ void ReplaceLastTokenWithAnnotation(const Token &Tok) {
+ assert(Tok.isAnnotation() && "Expected annotation token");
+ if (CachedLexPos != 0 && isBacktrackEnabled())
+ CachedTokens[CachedLexPos-1] = Tok;
+ }
+
+ /// \brief Recompute the current lexer kind based on the CurLexer/CurPTHLexer/
+ /// CurTokenLexer pointers.
+ void recomputeCurLexerKind();
+
+ /// \brief Returns true if incremental processing is enabled
+ bool isIncrementalProcessingEnabled() const { return IncrementalProcessing; }
+
+ /// \brief Enables the incremental processing
+ void enableIncrementalProcessing(bool value = true) {
+ IncrementalProcessing = value;
+ }
+
+ /// \brief Specify the point at which code-completion will be performed.
+ ///
+ /// \param File the file in which code completion should occur. If
+ /// this file is included multiple times, code-completion will
+ /// perform completion the first time it is included. If NULL, this
+ /// function clears out the code-completion point.
+ ///
+ /// \param Line the line at which code completion should occur
+ /// (1-based).
+ ///
+ /// \param Column the column at which code completion should occur
+ /// (1-based).
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool SetCodeCompletionPoint(const FileEntry *File,
+ unsigned Line, unsigned Column);
+
+ /// \brief Determine if we are performing code completion.
+ bool isCodeCompletionEnabled() const { return CodeCompletionFile != 0; }
+
+ /// \brief Returns the location of the code-completion point.
+ /// Returns an invalid location if code-completion is not enabled or the file
+ /// containing the code-completion point has not been lexed yet.
+ SourceLocation getCodeCompletionLoc() const { return CodeCompletionLoc; }
+
+ /// \brief Returns the start location of the file of code-completion point.
+ /// Returns an invalid location if code-completion is not enabled or the file
+ /// containing the code-completion point has not been lexed yet.
+ SourceLocation getCodeCompletionFileLoc() const {
+ return CodeCompletionFileLoc;
+ }
+
+ /// \brief Returns true if code-completion is enabled and we have hit the
+ /// code-completion point.
+ bool isCodeCompletionReached() const { return CodeCompletionReached; }
+
+ /// \brief Note that we hit the code-completion point.
+ void setCodeCompletionReached() {
+ assert(isCodeCompletionEnabled() && "Code-completion not enabled!");
+ CodeCompletionReached = true;
+ // Silence any diagnostics that occur after we hit the code-completion.
+ getDiagnostics().setSuppressAllDiagnostics(true);
+ }
+
+ /// \brief The location of the currently-active #pragma clang
+ /// arc_cf_code_audited begin. Returns an invalid location if there
+ /// is no such pragma active.
+ SourceLocation getPragmaARCCFCodeAuditedLoc() const {
+ return PragmaARCCFCodeAuditedLoc;
+ }
+
+ /// \brief Set the location of the currently-active #pragma clang
+ /// arc_cf_code_audited begin. An invalid location ends the pragma.
+ void setPragmaARCCFCodeAuditedLoc(SourceLocation Loc) {
+ PragmaARCCFCodeAuditedLoc = Loc;
+ }
+
+ /// \brief Instruct the preprocessor to skip part of the main
+ /// the main source file.
+ ///
+ /// \brief Bytes The number of bytes in the preamble to skip.
+ ///
+ /// \brief StartOfLine Whether skipping these bytes puts the lexer at the
+ /// start of a line.
+ void setSkipMainFilePreamble(unsigned Bytes, bool StartOfLine) {
+ SkipMainFilePreamble.first = Bytes;
+ SkipMainFilePreamble.second = StartOfLine;
+ }
+
+ /// Diag - Forwarding function for diagnostics. This emits a diagnostic at
+ /// the specified Token's location, translating the token's start
+ /// position in the current buffer into a SourcePosition object for rendering.
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) const {
+ return Diags->Report(Loc, DiagID);
+ }
+
+ DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID) const {
+ return Diags->Report(Tok.getLocation(), DiagID);
+ }
+
+ /// getSpelling() - Return the 'spelling' of the token at the given
+ /// location; does not go up to the spelling location or down to the
+ /// expansion location.
+ ///
+ /// \param buffer A buffer which will be used only if the token requires
+ /// "cleaning", e.g. if it contains trigraphs or escaped newlines
+ /// \param invalid If non-null, will be set \c true if an error occurs.
+ StringRef getSpelling(SourceLocation loc,
+ SmallVectorImpl<char> &buffer,
+ bool *invalid = 0) const {
+ return Lexer::getSpelling(loc, buffer, SourceMgr, LangOpts, invalid);
+ }
+
+ /// getSpelling() - Return the 'spelling' of the Tok token. The spelling of a
+ /// token is the characters used to represent the token in the source file
+ /// after trigraph expansion and escaped-newline folding. In particular, this
+ /// wants to get the true, uncanonicalized, spelling of things like digraphs
+ /// UCNs, etc.
+ ///
+ /// \param Invalid If non-null, will be set \c true if an error occurs.
+ std::string getSpelling(const Token &Tok, bool *Invalid = 0) const {
+ return Lexer::getSpelling(Tok, SourceMgr, LangOpts, Invalid);
+ }
+
+ /// getSpelling - This method is used to get the spelling of a token into a
+ /// preallocated buffer, instead of as an std::string. The caller is required
+ /// to allocate enough space for the token, which is guaranteed to be at least
+ /// Tok.getLength() bytes long. The length of the actual result is returned.
+ ///
+ /// Note that this method may do two possible things: it may either fill in
+ /// the buffer specified with characters, or it may *change the input pointer*
+ /// to point to a constant buffer with the data already in it (avoiding a
+ /// copy). The caller is not allowed to modify the returned buffer pointer
+ /// if an internal buffer is returned.
+ unsigned getSpelling(const Token &Tok, const char *&Buffer,
+ bool *Invalid = 0) const {
+ return Lexer::getSpelling(Tok, Buffer, SourceMgr, LangOpts, Invalid);
+ }
+
+ /// getSpelling - This method is used to get the spelling of a token into a
+ /// SmallVector. Note that the returned StringRef may not point to the
+ /// supplied buffer if a copy can be avoided.
+ StringRef getSpelling(const Token &Tok,
+ SmallVectorImpl<char> &Buffer,
+ bool *Invalid = 0) const;
+
+ /// getSpellingOfSingleCharacterNumericConstant - Tok is a numeric constant
+ /// with length 1, return the character.
+ char getSpellingOfSingleCharacterNumericConstant(const Token &Tok,
+ bool *Invalid = 0) const {
+ assert(Tok.is(tok::numeric_constant) &&
+ Tok.getLength() == 1 && "Called on unsupported token");
+ assert(!Tok.needsCleaning() && "Token can't need cleaning with length 1");
+
+ // If the token is carrying a literal data pointer, just use it.
+ if (const char *D = Tok.getLiteralData())
+ return *D;
+
+ // Otherwise, fall back on getCharacterData, which is slower, but always
+ // works.
+ return *SourceMgr.getCharacterData(Tok.getLocation(), Invalid);
+ }
+
+ /// \brief Retrieve the name of the immediate macro expansion.
+ ///
+ /// This routine starts from a source location, and finds the name of the macro
+ /// responsible for its immediate expansion. It looks through any intervening
+ /// macro argument expansions to compute this. It returns a StringRef which
+ /// refers to the SourceManager-owned buffer of the source where that macro
+ /// name is spelled. Thus, the result shouldn't out-live the SourceManager.
+ StringRef getImmediateMacroName(SourceLocation Loc) {
+ return Lexer::getImmediateMacroName(Loc, SourceMgr, getLangOpts());
+ }
+
+ /// CreateString - Plop the specified string into a scratch buffer and set the
+ /// specified token's location and length to it. If specified, the source
+ /// location provides a location of the expansion point of the token.
+ void CreateString(const char *Buf, unsigned Len, Token &Tok,
+ SourceLocation ExpansionLocStart = SourceLocation(),
+ SourceLocation ExpansionLocEnd = SourceLocation());
+
+ /// \brief Computes the source location just past the end of the
+ /// token at this source location.
+ ///
+ /// This routine can be used to produce a source location that
+ /// points just past the end of the token referenced by \p Loc, and
+ /// is generally used when a diagnostic needs to point just after a
+ /// token where it expected something different that it received. If
+ /// the returned source location would not be meaningful (e.g., if
+ /// it points into a macro), this routine returns an invalid
+ /// source location.
+ ///
+ /// \param Offset an offset from the end of the token, where the source
+ /// location should refer to. The default offset (0) produces a source
+ /// location pointing just past the end of the token; an offset of 1 produces
+ /// a source location pointing to the last character in the token, etc.
+ SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0) {
+ return Lexer::getLocForEndOfToken(Loc, Offset, SourceMgr, LangOpts);
+ }
+
+ /// \brief Returns true if the given MacroID location points at the first
+ /// token of the macro expansion.
+ ///
+ /// \param MacroBegin If non-null and function returns true, it is set to
+ /// begin location of the macro.
+ bool isAtStartOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroBegin = 0) const {
+ return Lexer::isAtStartOfMacroExpansion(loc, SourceMgr, LangOpts,
+ MacroBegin);
+ }
+
+ /// \brief Returns true if the given MacroID location points at the last
+ /// token of the macro expansion.
+ ///
+ /// \param MacroBegin If non-null and function returns true, it is set to
+ /// end location of the macro.
+ bool isAtEndOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroEnd = 0) const {
+ return Lexer::isAtEndOfMacroExpansion(loc, SourceMgr, LangOpts, MacroEnd);
+ }
+
+ /// DumpToken - Print the token to stderr, used for debugging.
+ ///
+ void DumpToken(const Token &Tok, bool DumpFlags = false) const;
+ void DumpLocation(SourceLocation Loc) const;
+ void DumpMacro(const MacroInfo &MI) const;
+
+ /// AdvanceToTokenCharacter - Given a location that specifies the start of a
+ /// token, return a new location that specifies a character within the token.
+ SourceLocation AdvanceToTokenCharacter(SourceLocation TokStart,
+ unsigned Char) const {
+ return Lexer::AdvanceToTokenCharacter(TokStart, Char, SourceMgr, LangOpts);
+ }
+
+ /// IncrementPasteCounter - Increment the counters for the number of token
+ /// paste operations performed. If fast was specified, this is a 'fast paste'
+ /// case we handled.
+ ///
+ void IncrementPasteCounter(bool isFast) {
+ if (isFast)
+ ++NumFastTokenPaste;
+ else
+ ++NumTokenPaste;
+ }
+
+ void PrintStats();
+
+ size_t getTotalMemory() const;
+
+ /// HandleMicrosoftCommentPaste - When the macro expander pastes together a
+ /// comment (/##/) in microsoft mode, this method handles updating the current
+ /// state, returning the token on the next source line.
+ void HandleMicrosoftCommentPaste(Token &Tok);
+
+ //===--------------------------------------------------------------------===//
+ // Preprocessor callback methods. These are invoked by a lexer as various
+ // directives and events are found.
+
+ /// LookUpIdentifierInfo - Given a tok::raw_identifier token, look up the
+ /// identifier information for the token and install it into the token,
+ /// updating the token kind accordingly.
+ IdentifierInfo *LookUpIdentifierInfo(Token &Identifier) const;
+
+private:
+ llvm::DenseMap<IdentifierInfo*,unsigned> PoisonReasons;
+
+public:
+
+ // SetPoisonReason - Call this function to indicate the reason for
+ // poisoning an identifier. If that identifier is accessed while
+ // poisoned, then this reason will be used instead of the default
+ // "poisoned" diagnostic.
+ void SetPoisonReason(IdentifierInfo *II, unsigned DiagID);
+
+ // HandlePoisonedIdentifier - Display reason for poisoned
+ // identifier.
+ void HandlePoisonedIdentifier(Token & Tok);
+
+ void MaybeHandlePoisonedIdentifier(Token & Identifier) {
+ if(IdentifierInfo * II = Identifier.getIdentifierInfo()) {
+ if(II->isPoisoned()) {
+ HandlePoisonedIdentifier(Identifier);
+ }
+ }
+ }
+
+private:
+ /// Identifiers used for SEH handling in Borland. These are only
+ /// allowed in particular circumstances
+ // __except block
+ IdentifierInfo *Ident__exception_code,
+ *Ident___exception_code,
+ *Ident_GetExceptionCode;
+ // __except filter expression
+ IdentifierInfo *Ident__exception_info,
+ *Ident___exception_info,
+ *Ident_GetExceptionInfo;
+ // __finally
+ IdentifierInfo *Ident__abnormal_termination,
+ *Ident___abnormal_termination,
+ *Ident_AbnormalTermination;
+public:
+ void PoisonSEHIdentifiers(bool Poison = true); // Borland
+
+ /// HandleIdentifier - This callback is invoked when the lexer reads an
+ /// identifier and has filled in the tokens IdentifierInfo member. This
+ /// callback potentially macro expands it or turns it into a named token (like
+ /// 'for').
+ void HandleIdentifier(Token &Identifier);
+
+
+ /// HandleEndOfFile - This callback is invoked when the lexer hits the end of
+ /// the current file. This either returns the EOF token and returns true, or
+ /// pops a level off the include stack and returns false, at which point the
+ /// client should call lex again.
+ bool HandleEndOfFile(Token &Result, bool isEndOfMacro = false);
+
+ /// HandleEndOfTokenLexer - This callback is invoked when the current
+ /// TokenLexer hits the end of its token stream.
+ bool HandleEndOfTokenLexer(Token &Result);
+
+ /// HandleDirective - This callback is invoked when the lexer sees a # token
+ /// at the start of a line. This consumes the directive, modifies the
+ /// lexer/preprocessor state, and advances the lexer(s) so that the next token
+ /// read is the correct one.
+ void HandleDirective(Token &Result);
+
+ /// CheckEndOfDirective - Ensure that the next token is a tok::eod token. If
+ /// not, emit a diagnostic and consume up until the eod. If EnableMacros is
+ /// true, then we consider macros that expand to zero tokens as being ok.
+ void CheckEndOfDirective(const char *Directive, bool EnableMacros = false);
+
+ /// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
+ /// current line until the tok::eod token is found.
+ void DiscardUntilEndOfDirective();
+
+ /// SawDateOrTime - This returns true if the preprocessor has seen a use of
+ /// __DATE__ or __TIME__ in the file so far.
+ bool SawDateOrTime() const {
+ return DATELoc != SourceLocation() || TIMELoc != SourceLocation();
+ }
+ unsigned getCounterValue() const { return CounterValue; }
+ void setCounterValue(unsigned V) { CounterValue = V; }
+
+ /// \brief Retrieves the module that we're currently building, if any.
+ Module *getCurrentModule();
+
+ /// AllocateMacroInfo - Allocate a new MacroInfo object with the provide
+ /// SourceLocation.
+ MacroInfo *AllocateMacroInfo(SourceLocation L);
+
+ /// CloneMacroInfo - Allocate a new MacroInfo object which is clone of MI.
+ MacroInfo *CloneMacroInfo(const MacroInfo &MI);
+
+ /// GetIncludeFilenameSpelling - Turn the specified lexer token into a fully
+ /// checked and spelled filename, e.g. as an operand of #include. This returns
+ /// true if the input filename was in <>'s or false if it were in ""'s. The
+ /// caller is expected to provide a buffer that is large enough to hold the
+ /// spelling of the filename, but is also expected to handle the case when
+ /// this method decides to use a different buffer.
+ bool GetIncludeFilenameSpelling(SourceLocation Loc,StringRef &Filename);
+
+ /// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
+ /// return null on failure. isAngled indicates whether the file reference is
+ /// for system #include's or not (i.e. using <> instead of "").
+ const FileEntry *LookupFile(StringRef Filename,
+ bool isAngled, const DirectoryLookup *FromDir,
+ const DirectoryLookup *&CurDir,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module **SuggestedModule,
+ bool SkipCache = false);
+
+ /// GetCurLookup - The DirectoryLookup structure used to find the current
+ /// FileEntry, if CurLexer is non-null and if applicable. This allows us to
+ /// implement #include_next and find directory-specific properties.
+ const DirectoryLookup *GetCurDirLookup() { return CurDirLookup; }
+
+ /// isInPrimaryFile - Return true if we're in the top-level file, not in a
+ /// #include.
+ bool isInPrimaryFile() const;
+
+ /// ConcatenateIncludeName - Handle cases where the #include name is expanded
+ /// from a macro as multiple tokens, which need to be glued together. This
+ /// occurs for code like:
+ /// #define FOO <a/b.h>
+ /// #include FOO
+ /// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
+ ///
+ /// This code concatenates and consumes tokens up to the '>' token. It
+ /// returns false if the > was found, otherwise it returns true if it finds
+ /// and consumes the EOD marker.
+ bool ConcatenateIncludeName(SmallString<128> &FilenameBuffer,
+ SourceLocation &End);
+
+ /// LexOnOffSwitch - Lex an on-off-switch (C99 6.10.6p2) and verify that it is
+ /// followed by EOD. Return true if the token is not a valid on-off-switch.
+ bool LexOnOffSwitch(tok::OnOffSwitch &OOS);
+
+private:
+
+ void PushIncludeMacroStack() {
+ IncludeMacroStack.push_back(IncludeStackInfo(CurLexerKind,
+ CurLexer.take(),
+ CurPTHLexer.take(),
+ CurPPLexer,
+ CurTokenLexer.take(),
+ CurDirLookup));
+ CurPPLexer = 0;
+ }
+
+ void PopIncludeMacroStack() {
+ CurLexer.reset(IncludeMacroStack.back().TheLexer);
+ CurPTHLexer.reset(IncludeMacroStack.back().ThePTHLexer);
+ CurPPLexer = IncludeMacroStack.back().ThePPLexer;
+ CurTokenLexer.reset(IncludeMacroStack.back().TheTokenLexer);
+ CurDirLookup = IncludeMacroStack.back().TheDirLookup;
+ CurLexerKind = IncludeMacroStack.back().CurLexerKind;
+ IncludeMacroStack.pop_back();
+ }
+
+ /// AllocateMacroInfo - Allocate a new MacroInfo object.
+ MacroInfo *AllocateMacroInfo();
+
+ /// ReleaseMacroInfo - Release the specified MacroInfo. This memory will
+ /// be reused for allocating new MacroInfo objects.
+ void ReleaseMacroInfo(MacroInfo* MI);
+
+ /// ReadMacroName - Lex and validate a macro name, which occurs after a
+ /// #define or #undef. This emits a diagnostic, sets the token kind to eod,
+ /// and discards the rest of the macro line if the macro name is invalid.
+ void ReadMacroName(Token &MacroNameTok, char isDefineUndef = 0);
+
+ /// ReadMacroDefinitionArgList - The ( starting an argument list of a macro
+ /// definition has just been read. Lex the rest of the arguments and the
+ /// closing ), updating MI with what we learn and saving in LastTok the
+ /// last token read.
+ /// Return true if an error occurs parsing the arg list.
+ bool ReadMacroDefinitionArgList(MacroInfo *MI, Token& LastTok);
+
+ /// SkipExcludedConditionalBlock - We just read a #if or related directive and
+ /// decided that the subsequent tokens are in the #if'd out portion of the
+ /// file. Lex the rest of the file, until we see an #endif. If
+ /// FoundNonSkipPortion is true, then we have already emitted code for part of
+ /// this #if directive, so #else/#elif blocks should never be entered. If
+ /// FoundElse is false, then #else directives are ok, if not, then we have
+ /// already seen one so a #else directive is a duplicate. When this returns,
+ /// the caller can lex the first valid token.
+ void SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
+ bool FoundNonSkipPortion, bool FoundElse,
+ SourceLocation ElseLoc = SourceLocation());
+
+ /// PTHSkipExcludedConditionalBlock - A fast PTH version of
+ /// SkipExcludedConditionalBlock.
+ void PTHSkipExcludedConditionalBlock();
+
+ /// EvaluateDirectiveExpression - Evaluate an integer constant expression that
+ /// may occur after a #if or #elif directive and return it as a bool. If the
+ /// expression is equivalent to "!defined(X)" return X in IfNDefMacro.
+ bool EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro);
+
+ /// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
+ /// #pragma GCC poison/system_header/dependency and #pragma once.
+ void RegisterBuiltinPragmas();
+
+ /// RegisterBuiltinMacros - Register builtin macros, such as __LINE__ with the
+ /// identifier table.
+ void RegisterBuiltinMacros();
+
+ /// HandleMacroExpandedIdentifier - If an identifier token is read that is to
+ /// be expanded as a macro, handle it and return the next token as 'Tok'. If
+ /// the macro should not be expanded return true, otherwise return false.
+ bool HandleMacroExpandedIdentifier(Token &Tok, MacroInfo *MI);
+
+ /// \brief Cache macro expanded tokens for TokenLexers.
+ //
+ /// Works like a stack; a TokenLexer adds the macro expanded tokens that is
+ /// going to lex in the cache and when it finishes the tokens are removed
+ /// from the end of the cache.
+ Token *cacheMacroExpandedTokens(TokenLexer *tokLexer,
+ ArrayRef<Token> tokens);
+ void removeCachedMacroExpandedTokensOfLastLexer();
+ friend void TokenLexer::ExpandFunctionArguments();
+
+ /// isNextPPTokenLParen - Determine whether the next preprocessor token to be
+ /// lexed is a '('. If so, consume the token and return true, if not, this
+ /// method should have no observable side-effect on the lexed tokens.
+ bool isNextPPTokenLParen();
+
+ /// ReadFunctionLikeMacroArgs - After reading "MACRO(", this method is
+ /// invoked to read all of the formal arguments specified for the macro
+ /// invocation. This returns null on error.
+ MacroArgs *ReadFunctionLikeMacroArgs(Token &MacroName, MacroInfo *MI,
+ SourceLocation &ExpansionEnd);
+
+ /// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
+ /// as a builtin macro, handle it and return the next token as 'Tok'.
+ void ExpandBuiltinMacro(Token &Tok);
+
+ /// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
+ /// return the first token after the directive. The _Pragma token has just
+ /// been read into 'Tok'.
+ void Handle_Pragma(Token &Tok);
+
+ /// HandleMicrosoft__pragma - Like Handle_Pragma except the pragma text
+ /// is not enclosed within a string literal.
+ void HandleMicrosoft__pragma(Token &Tok);
+
+ /// EnterSourceFileWithLexer - Add a lexer to the top of the include stack and
+ /// start lexing tokens from it instead of the current buffer.
+ void EnterSourceFileWithLexer(Lexer *TheLexer, const DirectoryLookup *Dir);
+
+ /// EnterSourceFileWithPTH - Add a lexer to the top of the include stack and
+ /// start getting tokens from it using the PTH cache.
+ void EnterSourceFileWithPTH(PTHLexer *PL, const DirectoryLookup *Dir);
+
+ /// IsFileLexer - Returns true if we are lexing from a file and not a
+ /// pragma or a macro.
+ static bool IsFileLexer(const Lexer* L, const PreprocessorLexer* P) {
+ return L ? !L->isPragmaLexer() : P != 0;
+ }
+
+ static bool IsFileLexer(const IncludeStackInfo& I) {
+ return IsFileLexer(I.TheLexer, I.ThePPLexer);
+ }
+
+ bool IsFileLexer() const {
+ return IsFileLexer(CurLexer.get(), CurPPLexer);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Caching stuff.
+ void CachingLex(Token &Result);
+ bool InCachingLexMode() const {
+ // If the Lexer pointers are 0 and IncludeMacroStack is empty, it means
+ // that we are past EOF, not that we are in CachingLex mode.
+ return CurPPLexer == 0 && CurTokenLexer == 0 && CurPTHLexer == 0 &&
+ !IncludeMacroStack.empty();
+ }
+ void EnterCachingLexMode();
+ void ExitCachingLexMode() {
+ if (InCachingLexMode())
+ RemoveTopOfLexerStack();
+ }
+ const Token &PeekAhead(unsigned N);
+ void AnnotatePreviousCachedTokens(const Token &Tok);
+
+ //===--------------------------------------------------------------------===//
+ /// Handle*Directive - implement the various preprocessor directives. These
+ /// should side-effect the current preprocessor object so that the next call
+ /// to Lex() will return the appropriate token next.
+ void HandleLineDirective(Token &Tok);
+ void HandleDigitDirective(Token &Tok);
+ void HandleUserDiagnosticDirective(Token &Tok, bool isWarning);
+ void HandleIdentSCCSDirective(Token &Tok);
+ void HandleMacroPublicDirective(Token &Tok);
+ void HandleMacroPrivateDirective(Token &Tok);
+
+ // File inclusion.
+ void HandleIncludeDirective(SourceLocation HashLoc,
+ Token &Tok,
+ const DirectoryLookup *LookupFrom = 0,
+ bool isImport = false);
+ void HandleIncludeNextDirective(SourceLocation HashLoc, Token &Tok);
+ void HandleIncludeMacrosDirective(SourceLocation HashLoc, Token &Tok);
+ void HandleImportDirective(SourceLocation HashLoc, Token &Tok);
+ void HandleMicrosoftImportDirective(Token &Tok);
+
+ // Macro handling.
+ void HandleDefineDirective(Token &Tok);
+ void HandleUndefDirective(Token &Tok);
+
+ // Conditional Inclusion.
+ void HandleIfdefDirective(Token &Tok, bool isIfndef,
+ bool ReadAnyTokensBeforeDirective);
+ void HandleIfDirective(Token &Tok, bool ReadAnyTokensBeforeDirective);
+ void HandleEndifDirective(Token &Tok);
+ void HandleElseDirective(Token &Tok);
+ void HandleElifDirective(Token &Tok);
+
+ // Pragmas.
+ void HandlePragmaDirective(unsigned Introducer);
+public:
+ void HandlePragmaOnce(Token &OnceTok);
+ void HandlePragmaMark();
+ void HandlePragmaPoison(Token &PoisonTok);
+ void HandlePragmaSystemHeader(Token &SysHeaderTok);
+ void HandlePragmaDependency(Token &DependencyTok);
+ void HandlePragmaComment(Token &CommentTok);
+ void HandlePragmaMessage(Token &MessageTok);
+ void HandlePragmaPushMacro(Token &Tok);
+ void HandlePragmaPopMacro(Token &Tok);
+ void HandlePragmaIncludeAlias(Token &Tok);
+ IdentifierInfo *ParsePragmaPushOrPopMacro(Token &Tok);
+
+ // Return true and store the first token only if any CommentHandler
+ // has inserted some tokens and getCommentRetentionState() is false.
+ bool HandleComment(Token &Token, SourceRange Comment);
+
+ /// \brief A macro is used, update information about macros that need unused
+ /// warnings.
+ void markMacroAsUsed(MacroInfo *MI);
+};
+
+/// \brief Abstract base class that describes a handler that will receive
+/// source ranges for each of the comments encountered in the source file.
+class CommentHandler {
+public:
+ virtual ~CommentHandler();
+
+ // The handler shall return true if it has pushed any tokens
+ // to be read using e.g. EnterToken or EnterTokenStream.
+ virtual bool HandleComment(Preprocessor &PP, SourceRange Comment) = 0;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
new file mode 100644
index 0000000..b551cd4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/PreprocessorLexer.h
@@ -0,0 +1,180 @@
+//===--- PreprocessorLexer.h - C Language Family Lexer ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PreprocessorLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PreprocessorLexer_H
+#define LLVM_CLANG_PreprocessorLexer_H
+
+#include "clang/Lex/MultipleIncludeOpt.h"
+#include "clang/Lex/Token.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+class FileEntry;
+class Preprocessor;
+
+class PreprocessorLexer {
+ virtual void anchor();
+protected:
+ Preprocessor *PP; // Preprocessor object controlling lexing.
+
+ /// The SourceManager FileID corresponding to the file being lexed.
+ const FileID FID;
+
+ /// \brief Number of SLocEntries before lexing the file.
+ unsigned InitialNumSLocEntries;
+
+ //===--------------------------------------------------------------------===//
+ // Context-specific lexing flags set by the preprocessor.
+ //===--------------------------------------------------------------------===//
+
+ /// ParsingPreprocessorDirective - This is true when parsing #XXX. This turns
+ /// '\n' into a tok::eod token.
+ bool ParsingPreprocessorDirective;
+
+ /// ParsingFilename - True after #include: this turns <xx> into a
+ /// tok::angle_string_literal token.
+ bool ParsingFilename;
+
+ /// LexingRawMode - True if in raw mode: This flag disables interpretation of
+ /// tokens and is a far faster mode to lex in than non-raw-mode. This flag:
+ /// 1. If EOF of the current lexer is found, the include stack isn't popped.
+ /// 2. Identifier information is not looked up for identifier tokens. As an
+ /// effect of this, implicit macro expansion is naturally disabled.
+ /// 3. "#" tokens at the start of a line are treated as normal tokens, not
+ /// implicitly transformed by the lexer.
+ /// 4. All diagnostic messages are disabled.
+ /// 5. No callbacks are made into the preprocessor.
+ ///
+ /// Note that in raw mode that the PP pointer may be null.
+ bool LexingRawMode;
+
+ /// MIOpt - This is a state machine that detects the #ifndef-wrapping a file
+ /// idiom for the multiple-include optimization.
+ MultipleIncludeOpt MIOpt;
+
+ /// ConditionalStack - Information about the set of #if/#ifdef/#ifndef blocks
+ /// we are currently in.
+ SmallVector<PPConditionalInfo, 4> ConditionalStack;
+
+ PreprocessorLexer(const PreprocessorLexer&); // DO NOT IMPLEMENT
+ void operator=(const PreprocessorLexer&); // DO NOT IMPLEMENT
+ friend class Preprocessor;
+
+ PreprocessorLexer(Preprocessor *pp, FileID fid);
+
+ PreprocessorLexer()
+ : PP(0), InitialNumSLocEntries(0),
+ ParsingPreprocessorDirective(false),
+ ParsingFilename(false),
+ LexingRawMode(false) {}
+
+ virtual ~PreprocessorLexer() {}
+
+ virtual void IndirectLex(Token& Result) = 0;
+
+ /// getSourceLocation - Return the source location for the next observable
+ /// location.
+ virtual SourceLocation getSourceLocation() = 0;
+
+ //===--------------------------------------------------------------------===//
+ // #if directive handling.
+
+ /// pushConditionalLevel - When we enter a #if directive, this keeps track of
+ /// what we are currently in for diagnostic emission (e.g. #if with missing
+ /// #endif).
+ void pushConditionalLevel(SourceLocation DirectiveStart, bool WasSkipping,
+ bool FoundNonSkip, bool FoundElse) {
+ PPConditionalInfo CI;
+ CI.IfLoc = DirectiveStart;
+ CI.WasSkipping = WasSkipping;
+ CI.FoundNonSkip = FoundNonSkip;
+ CI.FoundElse = FoundElse;
+ ConditionalStack.push_back(CI);
+ }
+ void pushConditionalLevel(const PPConditionalInfo &CI) {
+ ConditionalStack.push_back(CI);
+ }
+
+ /// popConditionalLevel - Remove an entry off the top of the conditional
+ /// stack, returning information about it. If the conditional stack is empty,
+ /// this returns true and does not fill in the arguments.
+ bool popConditionalLevel(PPConditionalInfo &CI) {
+ if (ConditionalStack.empty()) return true;
+ CI = ConditionalStack.back();
+ ConditionalStack.pop_back();
+ return false;
+ }
+
+ /// peekConditionalLevel - Return the top of the conditional stack. This
+ /// requires that there be a conditional active.
+ PPConditionalInfo &peekConditionalLevel() {
+ assert(!ConditionalStack.empty() && "No conditionals active!");
+ return ConditionalStack.back();
+ }
+
+ unsigned getConditionalStackDepth() const { return ConditionalStack.size(); }
+
+public:
+
+ //===--------------------------------------------------------------------===//
+ // Misc. lexing methods.
+
+ /// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
+ /// (potentially) macro expand the filename. If the sequence parsed is not
+ /// lexically legal, emit a diagnostic and return a result EOD token.
+ void LexIncludeFilename(Token &Result);
+
+ /// setParsingPreprocessorDirective - Inform the lexer whether or not
+ /// we are currently lexing a preprocessor directive.
+ void setParsingPreprocessorDirective(bool f) {
+ ParsingPreprocessorDirective = f;
+ }
+
+ /// isLexingRawMode - Return true if this lexer is in raw mode or not.
+ bool isLexingRawMode() const { return LexingRawMode; }
+
+ /// getPP - Return the preprocessor object for this lexer.
+ Preprocessor *getPP() const { return PP; }
+
+ FileID getFileID() const {
+ assert(PP &&
+ "PreprocessorLexer::getFileID() should only be used with a Preprocessor");
+ return FID;
+ }
+
+ /// \brief Number of SLocEntries before lexing the file.
+ unsigned getInitialNumSLocEntries() const {
+ return InitialNumSLocEntries;
+ }
+
+ /// getFileEntry - Return the FileEntry corresponding to this FileID. Like
+ /// getFileID(), this only works for lexers with attached preprocessors.
+ const FileEntry *getFileEntry() const;
+
+ /// \brief Iterator that traverses the current stack of preprocessor
+ /// conditional directives (#if/#ifdef/#ifndef).
+ typedef SmallVectorImpl<PPConditionalInfo>::const_iterator
+ conditional_iterator;
+
+ conditional_iterator conditional_begin() const {
+ return ConditionalStack.begin();
+ }
+ conditional_iterator conditional_end() const {
+ return ConditionalStack.end();
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/ScratchBuffer.h b/contrib/llvm/tools/clang/include/clang/Lex/ScratchBuffer.h
new file mode 100644
index 0000000..f03515f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/ScratchBuffer.h
@@ -0,0 +1,45 @@
+//===--- ScratchBuffer.h - Scratch space for forming tokens -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ScratchBuffer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SCRATCHBUFFER_H
+#define LLVM_CLANG_SCRATCHBUFFER_H
+
+#include "clang/Basic/SourceLocation.h"
+
+namespace clang {
+ class SourceManager;
+
+/// ScratchBuffer - This class exposes a simple interface for the dynamic
+/// construction of tokens. This is used for builtin macros (e.g. __LINE__) as
+/// well as token pasting, etc.
+class ScratchBuffer {
+ SourceManager &SourceMgr;
+ char *CurBuffer;
+ SourceLocation BufferStartLoc;
+ unsigned BytesUsed;
+public:
+ ScratchBuffer(SourceManager &SM);
+
+ /// getToken - Splat the specified text into a temporary MemoryBuffer and
+ /// return a SourceLocation that refers to the token. This is just like the
+ /// previous method, but returns a location that indicates the physloc of the
+ /// token.
+ SourceLocation getToken(const char *Buf, unsigned Len, const char *&DestPtr);
+
+private:
+ void AllocScratchBuffer(unsigned RequestLen);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/Token.h b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
new file mode 100644
index 0000000..a88f607
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/Token.h
@@ -0,0 +1,299 @@
+//===--- Token.h - Token interface ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Token interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOKEN_H
+#define LLVM_CLANG_TOKEN_H
+
+#include "clang/Basic/TemplateKinds.h"
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/OperatorKinds.h"
+#include <cstdlib>
+
+namespace clang {
+
+class IdentifierInfo;
+
+/// Token - This structure provides full information about a lexed token.
+/// It is not intended to be space efficient, it is intended to return as much
+/// information as possible about each returned token. This is expected to be
+/// compressed into a smaller form if memory footprint is important.
+///
+/// The parser can create a special "annotation token" representing a stream of
+/// tokens that were parsed and semantically resolved, e.g.: "foo::MyClass<int>"
+/// can be represented by a single typename annotation token that carries
+/// information about the SourceRange of the tokens and the type object.
+class Token {
+ /// The location of the token.
+ SourceLocation Loc;
+
+ // Conceptually these next two fields could be in a union. However, this
+ // causes gcc 4.2 to pessimize LexTokenInternal, a very performance critical
+ // routine. Keeping as separate members with casts until a more beautiful fix
+ // presents itself.
+
+ /// UintData - This holds either the length of the token text, when
+ /// a normal token, or the end of the SourceRange when an annotation
+ /// token.
+ unsigned UintData;
+
+ /// PtrData - This is a union of four different pointer types, which depends
+ /// on what type of token this is:
+ /// Identifiers, keywords, etc:
+ /// This is an IdentifierInfo*, which contains the uniqued identifier
+ /// spelling.
+ /// Literals: isLiteral() returns true.
+ /// This is a pointer to the start of the token in a text buffer, which
+ /// may be dirty (have trigraphs / escaped newlines).
+ /// Annotations (resolved type names, C++ scopes, etc): isAnnotation().
+ /// This is a pointer to sema-specific data for the annotation token.
+ /// Other:
+ /// This is null.
+ void *PtrData;
+
+ /// Kind - The actual flavor of token this is.
+ ///
+ unsigned short Kind;
+
+ /// Flags - Bits we track about this token, members of the TokenFlags enum.
+ unsigned char Flags;
+public:
+
+ // Various flags set per token:
+ enum TokenFlags {
+ StartOfLine = 0x01, // At start of line or only after whitespace.
+ LeadingSpace = 0x02, // Whitespace exists before this token.
+ DisableExpand = 0x04, // This identifier may never be macro expanded.
+ NeedsCleaning = 0x08, // Contained an escaped newline or trigraph.
+ LeadingEmptyMacro = 0x10, // Empty macro exists before this token.
+ HasUDSuffix = 0x20 // This string or character literal has a ud-suffix.
+ };
+
+ tok::TokenKind getKind() const { return (tok::TokenKind)Kind; }
+ void setKind(tok::TokenKind K) { Kind = K; }
+
+ /// is/isNot - Predicates to check if this token is a specific kind, as in
+ /// "if (Tok.is(tok::l_brace)) {...}".
+ bool is(tok::TokenKind K) const { return Kind == (unsigned) K; }
+ bool isNot(tok::TokenKind K) const { return Kind != (unsigned) K; }
+
+ /// isAnyIdentifier - Return true if this is a raw identifier (when lexing
+ /// in raw mode) or a non-keyword identifier (when lexing in non-raw mode).
+ bool isAnyIdentifier() const {
+ return is(tok::identifier) || is(tok::raw_identifier);
+ }
+
+ /// isLiteral - Return true if this is a "literal", like a numeric
+ /// constant, string, etc.
+ bool isLiteral() const {
+ return is(tok::numeric_constant) || is(tok::char_constant) ||
+ is(tok::wide_char_constant) || is(tok::utf16_char_constant) ||
+ is(tok::utf32_char_constant) || is(tok::string_literal) ||
+ is(tok::wide_string_literal) || is(tok::utf8_string_literal) ||
+ is(tok::utf16_string_literal) || is(tok::utf32_string_literal) ||
+ is(tok::angle_string_literal);
+ }
+
+ bool isAnnotation() const {
+#define ANNOTATION(NAME) \
+ if (is(tok::annot_##NAME)) \
+ return true;
+#include "clang/Basic/TokenKinds.def"
+ return false;
+ }
+
+ /// getLocation - Return a source location identifier for the specified
+ /// offset in the current file.
+ SourceLocation getLocation() const { return Loc; }
+ unsigned getLength() const {
+ assert(!isAnnotation() && "Annotation tokens have no length field");
+ return UintData;
+ }
+
+ void setLocation(SourceLocation L) { Loc = L; }
+ void setLength(unsigned Len) {
+ assert(!isAnnotation() && "Annotation tokens have no length field");
+ UintData = Len;
+ }
+
+ SourceLocation getAnnotationEndLoc() const {
+ assert(isAnnotation() && "Used AnnotEndLocID on non-annotation token");
+ return SourceLocation::getFromRawEncoding(UintData);
+ }
+ void setAnnotationEndLoc(SourceLocation L) {
+ assert(isAnnotation() && "Used AnnotEndLocID on non-annotation token");
+ UintData = L.getRawEncoding();
+ }
+
+ SourceLocation getLastLoc() const {
+ return isAnnotation() ? getAnnotationEndLoc() : getLocation();
+ }
+
+ /// getAnnotationRange - SourceRange of the group of tokens that this
+ /// annotation token represents.
+ SourceRange getAnnotationRange() const {
+ return SourceRange(getLocation(), getAnnotationEndLoc());
+ }
+ void setAnnotationRange(SourceRange R) {
+ setLocation(R.getBegin());
+ setAnnotationEndLoc(R.getEnd());
+ }
+
+ const char *getName() const {
+ return tok::getTokenName( (tok::TokenKind) Kind);
+ }
+
+ /// startToken - Reset all flags to cleared.
+ ///
+ void startToken() {
+ Kind = tok::unknown;
+ Flags = 0;
+ PtrData = 0;
+ UintData = 0;
+ Loc = SourceLocation();
+ }
+
+ IdentifierInfo *getIdentifierInfo() const {
+ assert(isNot(tok::raw_identifier) &&
+ "getIdentifierInfo() on a tok::raw_identifier token!");
+ assert(!isAnnotation() &&
+ "getIdentifierInfo() on an annotation token!");
+ if (isLiteral()) return 0;
+ return (IdentifierInfo*) PtrData;
+ }
+ void setIdentifierInfo(IdentifierInfo *II) {
+ PtrData = (void*) II;
+ }
+
+ /// getRawIdentifierData - For a raw identifier token (i.e., an identifier
+ /// lexed in raw mode), returns a pointer to the start of it in the text
+ /// buffer if known, null otherwise.
+ const char *getRawIdentifierData() const {
+ assert(is(tok::raw_identifier));
+ return reinterpret_cast<const char*>(PtrData);
+ }
+ void setRawIdentifierData(const char *Ptr) {
+ assert(is(tok::raw_identifier));
+ PtrData = const_cast<char*>(Ptr);
+ }
+
+ /// getLiteralData - For a literal token (numeric constant, string, etc), this
+ /// returns a pointer to the start of it in the text buffer if known, null
+ /// otherwise.
+ const char *getLiteralData() const {
+ assert(isLiteral() && "Cannot get literal data of non-literal");
+ return reinterpret_cast<const char*>(PtrData);
+ }
+ void setLiteralData(const char *Ptr) {
+ assert(isLiteral() && "Cannot set literal data of non-literal");
+ PtrData = const_cast<char*>(Ptr);
+ }
+
+ void *getAnnotationValue() const {
+ assert(isAnnotation() && "Used AnnotVal on non-annotation token");
+ return PtrData;
+ }
+ void setAnnotationValue(void *val) {
+ assert(isAnnotation() && "Used AnnotVal on non-annotation token");
+ PtrData = val;
+ }
+
+ /// setFlag - Set the specified flag.
+ void setFlag(TokenFlags Flag) {
+ Flags |= Flag;
+ }
+
+ /// clearFlag - Unset the specified flag.
+ void clearFlag(TokenFlags Flag) {
+ Flags &= ~Flag;
+ }
+
+ /// getFlags - Return the internal represtation of the flags.
+ /// Only intended for low-level operations such as writing tokens to
+ // disk.
+ unsigned getFlags() const {
+ return Flags;
+ }
+
+ /// setFlagValue - Set a flag to either true or false.
+ void setFlagValue(TokenFlags Flag, bool Val) {
+ if (Val)
+ setFlag(Flag);
+ else
+ clearFlag(Flag);
+ }
+
+ /// isAtStartOfLine - Return true if this token is at the start of a line.
+ ///
+ bool isAtStartOfLine() const { return (Flags & StartOfLine) ? true : false; }
+
+ /// hasLeadingSpace - Return true if this token has whitespace before it.
+ ///
+ bool hasLeadingSpace() const { return (Flags & LeadingSpace) ? true : false; }
+
+ /// isExpandDisabled - Return true if this identifier token should never
+ /// be expanded in the future, due to C99 6.10.3.4p2.
+ bool isExpandDisabled() const {
+ return (Flags & DisableExpand) ? true : false;
+ }
+
+ /// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
+ bool isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const;
+
+ /// getObjCKeywordID - Return the ObjC keyword kind.
+ tok::ObjCKeywordKind getObjCKeywordID() const;
+
+ /// needsCleaning - Return true if this token has trigraphs or escaped
+ /// newlines in it.
+ ///
+ bool needsCleaning() const { return (Flags & NeedsCleaning) ? true : false; }
+
+ /// \brief Return true if this token has an empty macro before it.
+ ///
+ bool hasLeadingEmptyMacro() const {
+ return (Flags & LeadingEmptyMacro) ? true : false;
+ }
+
+ /// \brief Return true if this token is a string or character literal which
+ /// has a ud-suffix.
+ bool hasUDSuffix() const { return (Flags & HasUDSuffix) ? true : false; }
+};
+
+/// PPConditionalInfo - Information about the conditional stack (#if directives)
+/// currently active.
+struct PPConditionalInfo {
+ /// IfLoc - Location where the conditional started.
+ ///
+ SourceLocation IfLoc;
+
+ /// WasSkipping - True if this was contained in a skipping directive, e.g.
+ /// in a "#if 0" block.
+ bool WasSkipping;
+
+ /// FoundNonSkip - True if we have emitted tokens already, and now we're in
+ /// an #else block or something. Only useful in Skipping blocks.
+ bool FoundNonSkip;
+
+ /// FoundElse - True if we've seen a #else in this block. If so,
+ /// #elif/#else directives are not allowed.
+ bool FoundElse;
+};
+
+} // end namespace clang
+
+namespace llvm {
+ template <>
+ struct isPodLike<clang::Token> { static const bool value = true; };
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/TokenConcatenation.h b/contrib/llvm/tools/clang/include/clang/Lex/TokenConcatenation.h
new file mode 100644
index 0000000..551300f
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/TokenConcatenation.h
@@ -0,0 +1,72 @@
+//===--- TokenConcatenation.h - Token Concatenation Avoidance ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TokenConcatenation class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LEX_TOKEN_CONCATENATION_H
+#define CLANG_LEX_TOKEN_CONCATENATION_H
+
+#include "clang/Basic/TokenKinds.h"
+
+namespace clang {
+ class Preprocessor;
+ class Token;
+
+ /// TokenConcatenation class, which answers the question of
+ /// "Is it safe to emit two tokens without a whitespace between them, or
+ /// would that cause implicit concatenation of the tokens?"
+ ///
+ /// For example, it emitting two identifiers "foo" and "bar" next to each
+ /// other would cause the lexer to produce one "foobar" token. Emitting "1"
+ /// and ")" next to each other is safe.
+ ///
+ class TokenConcatenation {
+ Preprocessor &PP;
+
+ enum AvoidConcatInfo {
+ /// By default, a token never needs to avoid concatenation. Most tokens
+ /// (e.g. ',', ')', etc) don't cause a problem when concatenated.
+ aci_never_avoid_concat = 0,
+
+ /// aci_custom_firstchar - AvoidConcat contains custom code to handle this
+ /// token's requirements, and it needs to know the first character of the
+ /// token.
+ aci_custom_firstchar = 1,
+
+ /// aci_custom - AvoidConcat contains custom code to handle this token's
+ /// requirements, but it doesn't need to know the first character of the
+ /// token.
+ aci_custom = 2,
+
+ /// aci_avoid_equal - Many tokens cannot be safely followed by an '='
+ /// character. For example, "<<" turns into "<<=" when followed by an =.
+ aci_avoid_equal = 4
+ };
+
+ /// TokenInfo - This array contains information for each token on what
+ /// action to take when avoiding concatenation of tokens in the AvoidConcat
+ /// method.
+ char TokenInfo[tok::NUM_TOKENS];
+ public:
+ TokenConcatenation(Preprocessor &PP);
+
+ bool AvoidConcat(const Token &PrevPrevTok,
+ const Token &PrevTok,
+ const Token &Tok) const;
+
+ private:
+ /// IsIdentifierStringPrefix - Return true if the spelling of the token
+ /// is literally 'L', 'u', 'U', or 'u8'.
+ bool IsIdentifierStringPrefix(const Token &Tok) const;
+ };
+ } // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h b/contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h
new file mode 100644
index 0000000..1330ad5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Lex/TokenLexer.h
@@ -0,0 +1,187 @@
+//===--- TokenLexer.h - Lex from a token buffer -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TokenLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOKENLEXER_H
+#define LLVM_CLANG_TOKENLEXER_H
+
+#include "clang/Basic/SourceLocation.h"
+
+namespace clang {
+ class MacroInfo;
+ class Preprocessor;
+ class Token;
+ class MacroArgs;
+
+/// TokenLexer - This implements a lexer that returns token from a macro body
+/// or token stream instead of lexing from a character buffer. This is used for
+/// macro expansion and _Pragma handling, for example.
+///
+class TokenLexer {
+ /// Macro - The macro we are expanding from. This is null if expanding a
+ /// token stream.
+ ///
+ MacroInfo *Macro;
+
+ /// ActualArgs - The actual arguments specified for a function-like macro, or
+ /// null. The TokenLexer owns the pointed-to object.
+ MacroArgs *ActualArgs;
+
+ /// PP - The current preprocessor object we are expanding for.
+ ///
+ Preprocessor &PP;
+
+ /// Tokens - This is the pointer to an array of tokens that the macro is
+ /// defined to, with arguments expanded for function-like macros. If this is
+ /// a token stream, these are the tokens we are returning. This points into
+ /// the macro definition we are lexing from, a cache buffer that is owned by
+ /// the preprocessor, or some other buffer that we may or may not own
+ /// (depending on OwnsTokens).
+ /// Note that if it points into Preprocessor's cache buffer, the Preprocessor
+ /// may update the pointer as needed.
+ const Token *Tokens;
+ friend class Preprocessor;
+
+ /// NumTokens - This is the length of the Tokens array.
+ ///
+ unsigned NumTokens;
+
+ /// CurToken - This is the next token that Lex will return.
+ ///
+ unsigned CurToken;
+
+ /// ExpandLocStart/End - The source location range where this macro was
+ /// expanded.
+ SourceLocation ExpandLocStart, ExpandLocEnd;
+
+ /// \brief Source location pointing at the source location entry chunk that
+ /// was reserved for the current macro expansion.
+ SourceLocation MacroExpansionStart;
+
+ /// \brief The offset of the macro expansion in the
+ /// "source location address space".
+ unsigned MacroStartSLocOffset;
+
+ /// \brief Location of the macro definition.
+ SourceLocation MacroDefStart;
+ /// \brief Length of the macro definition.
+ unsigned MacroDefLength;
+
+ /// Lexical information about the expansion point of the macro: the identifier
+ /// that the macro expanded from had these properties.
+ bool AtStartOfLine : 1;
+ bool HasLeadingSpace : 1;
+
+ /// OwnsTokens - This is true if this TokenLexer allocated the Tokens
+ /// array, and thus needs to free it when destroyed. For simple object-like
+ /// macros (for example) we just point into the token buffer of the macro
+ /// definition, we don't make a copy of it.
+ bool OwnsTokens : 1;
+
+ /// DisableMacroExpansion - This is true when tokens lexed from the TokenLexer
+ /// should not be subject to further macro expansion.
+ bool DisableMacroExpansion : 1;
+
+ TokenLexer(const TokenLexer&); // DO NOT IMPLEMENT
+ void operator=(const TokenLexer&); // DO NOT IMPLEMENT
+public:
+ /// Create a TokenLexer for the specified macro with the specified actual
+ /// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
+ /// ILEnd specifies the location of the ')' for a function-like macro or the
+ /// identifier for an object-like macro.
+ TokenLexer(Token &Tok, SourceLocation ILEnd, MacroArgs *ActualArgs,
+ Preprocessor &pp)
+ : Macro(0), ActualArgs(0), PP(pp), OwnsTokens(false) {
+ Init(Tok, ILEnd, ActualArgs);
+ }
+
+ /// Init - Initialize this TokenLexer to expand from the specified macro
+ /// with the specified argument information. Note that this ctor takes
+ /// ownership of the ActualArgs pointer. ILEnd specifies the location of the
+ /// ')' for a function-like macro or the identifier for an object-like macro.
+ void Init(Token &Tok, SourceLocation ILEnd, MacroArgs *ActualArgs);
+
+ /// Create a TokenLexer for the specified token stream. If 'OwnsTokens' is
+ /// specified, this takes ownership of the tokens and delete[]'s them when
+ /// the token lexer is empty.
+ TokenLexer(const Token *TokArray, unsigned NumToks, bool DisableExpansion,
+ bool ownsTokens, Preprocessor &pp)
+ : Macro(0), ActualArgs(0), PP(pp), OwnsTokens(false) {
+ Init(TokArray, NumToks, DisableExpansion, ownsTokens);
+ }
+
+ /// Init - Initialize this TokenLexer with the specified token stream.
+ /// This does not take ownership of the specified token vector.
+ ///
+ /// DisableExpansion is true when macro expansion of tokens lexed from this
+ /// stream should be disabled.
+ void Init(const Token *TokArray, unsigned NumToks,
+ bool DisableMacroExpansion, bool OwnsTokens);
+
+ ~TokenLexer() { destroy(); }
+
+ /// isNextTokenLParen - If the next token lexed will pop this macro off the
+ /// expansion stack, return 2. If the next unexpanded token is a '(', return
+ /// 1, otherwise return 0.
+ unsigned isNextTokenLParen() const;
+
+ /// Lex - Lex and return a token from this macro stream.
+ void Lex(Token &Tok);
+
+ /// isParsingPreprocessorDirective - Return true if we are in the middle of a
+ /// preprocessor directive.
+ bool isParsingPreprocessorDirective() const;
+
+private:
+ void destroy();
+
+ /// isAtEnd - Return true if the next lex call will pop this macro off the
+ /// include stack.
+ bool isAtEnd() const {
+ return CurToken == NumTokens;
+ }
+
+ /// PasteTokens - Tok is the LHS of a ## operator, and CurToken is the ##
+ /// operator. Read the ## and RHS, and paste the LHS/RHS together. If there
+ /// are is another ## after it, chomp it iteratively. Return the result as
+ /// Tok. If this returns true, the caller should immediately return the
+ /// token.
+ bool PasteTokens(Token &Tok);
+
+ /// Expand the arguments of a function-like macro so that we can quickly
+ /// return preexpanded tokens from Tokens.
+ void ExpandFunctionArguments();
+
+ /// HandleMicrosoftCommentPaste - In microsoft compatibility mode, /##/ pastes
+ /// together to form a comment that comments out everything in the current
+ /// macro, other active macros, and anything left on the current physical
+ /// source line of the expanded buffer. Handle this by returning the
+ /// first token on the next line.
+ void HandleMicrosoftCommentPaste(Token &Tok);
+
+ /// \brief If \arg loc is a FileID and points inside the current macro
+ /// definition, returns the appropriate source location pointing at the
+ /// macro expansion source location entry.
+ SourceLocation getExpansionLocForMacroDefLoc(SourceLocation loc) const;
+
+ /// \brief Creates SLocEntries and updates the locations of macro argument
+ /// tokens to their new expanded locations.
+ ///
+ /// \param ArgIdSpellLoc the location of the macro argument id inside the
+ /// macro definition.
+ void updateLocForMacroArgTokens(SourceLocation ArgIdSpellLoc,
+ Token *begin_tokens, Token *end_tokens);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h b/contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h
new file mode 100644
index 0000000..2405a0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Parse/ParseAST.h
@@ -0,0 +1,49 @@
+//===--- ParseAST.h - Define the ParseAST method ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the clang::ParseAST method.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PARSE_PARSEAST_H
+#define LLVM_CLANG_PARSE_PARSEAST_H
+
+#include "clang/Basic/LangOptions.h"
+
+namespace clang {
+ class Preprocessor;
+ class ASTConsumer;
+ class ASTContext;
+ class CodeCompleteConsumer;
+ class Sema;
+
+ /// \brief Parse the entire file specified, notifying the ASTConsumer as
+ /// the file is parsed.
+ ///
+ /// This operation inserts the parsed decls into the translation
+ /// unit held by Ctx.
+ ///
+ /// \param TUKind The kind of translation unit being parsed.
+ ///
+ /// \param CompletionConsumer If given, an object to consume code completion
+ /// results.
+ void ParseAST(Preprocessor &pp, ASTConsumer *C,
+ ASTContext &Ctx, bool PrintStats = false,
+ TranslationUnitKind TUKind = TU_Complete,
+ CodeCompleteConsumer *CompletionConsumer = 0,
+ bool SkipFunctionBodies = false);
+
+ /// \brief Parse the main file known to the preprocessor, producing an
+ /// abstract syntax tree.
+ void ParseAST(Sema &S, bool PrintStats = false,
+ bool SkipFunctionBodies = false);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h
new file mode 100644
index 0000000..0d47292
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Parse/ParseDiagnostic.h
@@ -0,0 +1,28 @@
+//===--- DiagnosticParse.h - Diagnostics for libparse -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DIAGNOSTICPARSE_H
+#define LLVM_CLANG_DIAGNOSTICPARSE_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define PARSESTART
+#include "clang/Basic/DiagnosticParseKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_PARSE_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Parse/Parser.h b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
new file mode 100644
index 0000000..dff8510
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Parse/Parser.h
@@ -0,0 +1,2224 @@
+//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Parser interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PARSE_PARSER_H
+#define LLVM_CLANG_PARSE_PARSER_H
+
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/DelayedCleanupPool.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/DeclSpec.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include <stack>
+
+namespace clang {
+ class PragmaHandler;
+ class Scope;
+ class DeclGroupRef;
+ class DiagnosticBuilder;
+ class Parser;
+ class PragmaUnusedHandler;
+ class ColonProtectionRAIIObject;
+ class InMessageExpressionRAIIObject;
+ class PoisonSEHIdentifiersRAIIObject;
+ class VersionTuple;
+
+/// PrettyStackTraceParserEntry - If a crash happens while the parser is active,
+/// an entry is printed for it.
+class PrettyStackTraceParserEntry : public llvm::PrettyStackTraceEntry {
+ const Parser &P;
+public:
+ PrettyStackTraceParserEntry(const Parser &p) : P(p) {}
+ virtual void print(raw_ostream &OS) const;
+};
+
+/// PrecedenceLevels - These are precedences for the binary/ternary
+/// operators in the C99 grammar. These have been named to relate
+/// with the C99 grammar productions. Low precedences numbers bind
+/// more weakly than high numbers.
+namespace prec {
+ enum Level {
+ Unknown = 0, // Not binary operator.
+ Comma = 1, // ,
+ Assignment = 2, // =, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=
+ Conditional = 3, // ?
+ LogicalOr = 4, // ||
+ LogicalAnd = 5, // &&
+ InclusiveOr = 6, // |
+ ExclusiveOr = 7, // ^
+ And = 8, // &
+ Equality = 9, // ==, !=
+ Relational = 10, // >=, <=, >, <
+ Shift = 11, // <<, >>
+ Additive = 12, // -, +
+ Multiplicative = 13, // *, /, %
+ PointerToMember = 14 // .*, ->*
+ };
+}
+
+/// Parser - This implements a parser for the C family of languages. After
+/// parsing units of the grammar, productions are invoked to handle whatever has
+/// been read.
+///
+class Parser : public CodeCompletionHandler {
+ friend class PragmaUnusedHandler;
+ friend class ColonProtectionRAIIObject;
+ friend class InMessageExpressionRAIIObject;
+ friend class PoisonSEHIdentifiersRAIIObject;
+ friend class ParenBraceBracketBalancer;
+
+ Preprocessor &PP;
+
+ /// Tok - The current token we are peeking ahead. All parsing methods assume
+ /// that this is valid.
+ Token Tok;
+
+ // PrevTokLocation - The location of the token we previously
+ // consumed. This token is used for diagnostics where we expected to
+ // see a token following another token (e.g., the ';' at the end of
+ // a statement).
+ SourceLocation PrevTokLocation;
+
+ unsigned short ParenCount, BracketCount, BraceCount;
+
+ /// Actions - These are the callbacks we invoke as we parse various constructs
+ /// in the file.
+ Sema &Actions;
+
+ DiagnosticsEngine &Diags;
+
+ /// ScopeCache - Cache scopes to reduce malloc traffic.
+ enum { ScopeCacheSize = 16 };
+ unsigned NumCachedScopes;
+ Scope *ScopeCache[ScopeCacheSize];
+
+ /// Identifiers used for SEH handling in Borland. These are only
+ /// allowed in particular circumstances
+ // __except block
+ IdentifierInfo *Ident__exception_code,
+ *Ident___exception_code,
+ *Ident_GetExceptionCode;
+ // __except filter expression
+ IdentifierInfo *Ident__exception_info,
+ *Ident___exception_info,
+ *Ident_GetExceptionInfo;
+ // __finally
+ IdentifierInfo *Ident__abnormal_termination,
+ *Ident___abnormal_termination,
+ *Ident_AbnormalTermination;
+
+ /// Contextual keywords for Microsoft extensions.
+ IdentifierInfo *Ident__except;
+
+ /// Ident_super - IdentifierInfo for "super", to support fast
+ /// comparison.
+ IdentifierInfo *Ident_super;
+ /// Ident_vector and Ident_pixel - cached IdentifierInfo's for
+ /// "vector" and "pixel" fast comparison. Only present if
+ /// AltiVec enabled.
+ IdentifierInfo *Ident_vector;
+ IdentifierInfo *Ident_pixel;
+
+ /// Objective-C contextual keywords.
+ mutable IdentifierInfo *Ident_instancetype;
+
+ /// \brief Identifier for "introduced".
+ IdentifierInfo *Ident_introduced;
+
+ /// \brief Identifier for "deprecated".
+ IdentifierInfo *Ident_deprecated;
+
+ /// \brief Identifier for "obsoleted".
+ IdentifierInfo *Ident_obsoleted;
+
+ /// \brief Identifier for "unavailable".
+ IdentifierInfo *Ident_unavailable;
+
+ /// \brief Identifier for "message".
+ IdentifierInfo *Ident_message;
+
+ /// C++0x contextual keywords.
+ mutable IdentifierInfo *Ident_final;
+ mutable IdentifierInfo *Ident_override;
+
+ OwningPtr<PragmaHandler> AlignHandler;
+ OwningPtr<PragmaHandler> GCCVisibilityHandler;
+ OwningPtr<PragmaHandler> OptionsHandler;
+ OwningPtr<PragmaHandler> PackHandler;
+ OwningPtr<PragmaHandler> MSStructHandler;
+ OwningPtr<PragmaHandler> UnusedHandler;
+ OwningPtr<PragmaHandler> WeakHandler;
+ OwningPtr<PragmaHandler> RedefineExtnameHandler;
+ OwningPtr<PragmaHandler> FPContractHandler;
+ OwningPtr<PragmaHandler> OpenCLExtensionHandler;
+
+ /// Whether the '>' token acts as an operator or not. This will be
+ /// true except when we are parsing an expression within a C++
+ /// template argument list, where the '>' closes the template
+ /// argument list.
+ bool GreaterThanIsOperator;
+
+ /// ColonIsSacred - When this is false, we aggressively try to recover from
+ /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
+ /// safe in case statements and a few other things. This is managed by the
+ /// ColonProtectionRAIIObject RAII object.
+ bool ColonIsSacred;
+
+ /// \brief When true, we are directly inside an Objective-C messsage
+ /// send expression.
+ ///
+ /// This is managed by the \c InMessageExpressionRAIIObject class, and
+ /// should not be set directly.
+ bool InMessageExpression;
+
+ /// The "depth" of the template parameters currently being parsed.
+ unsigned TemplateParameterDepth;
+
+ /// Factory object for creating AttributeList objects.
+ AttributeFactory AttrFactory;
+
+ /// \brief Gathers and cleans up objects when parsing of a top-level
+ /// declaration is finished.
+ DelayedCleanupPool TopLevelDeclCleanupPool;
+
+ IdentifierInfo *getSEHExceptKeyword();
+
+ bool SkipFunctionBodies;
+
+public:
+ Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
+ ~Parser();
+
+ const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
+ const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
+ Preprocessor &getPreprocessor() const { return PP; }
+ Sema &getActions() const { return Actions; }
+
+ const Token &getCurToken() const { return Tok; }
+ Scope *getCurScope() const { return Actions.getCurScope(); }
+
+ Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
+
+ // Type forwarding. All of these are statically 'void*', but they may all be
+ // different actual classes based on the actions in place.
+ typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
+ typedef OpaquePtr<TemplateName> TemplateTy;
+
+ typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
+
+ typedef clang::ExprResult ExprResult;
+ typedef clang::StmtResult StmtResult;
+ typedef clang::BaseResult BaseResult;
+ typedef clang::MemInitResult MemInitResult;
+ typedef clang::TypeResult TypeResult;
+
+ typedef Expr *ExprArg;
+ typedef ASTMultiPtr<Stmt*> MultiStmtArg;
+ typedef Sema::FullExprArg FullExprArg;
+
+ /// Adorns a ExprResult with Actions to make it an ExprResult
+ ExprResult Owned(ExprResult res) {
+ return ExprResult(res);
+ }
+ /// Adorns a StmtResult with Actions to make it an StmtResult
+ StmtResult Owned(StmtResult res) {
+ return StmtResult(res);
+ }
+
+ ExprResult ExprError() { return ExprResult(true); }
+ StmtResult StmtError() { return StmtResult(true); }
+
+ ExprResult ExprError(const DiagnosticBuilder &) { return ExprError(); }
+ StmtResult StmtError(const DiagnosticBuilder &) { return StmtError(); }
+
+ ExprResult ExprEmpty() { return ExprResult(false); }
+
+ // Parsing methods.
+
+ /// ParseTranslationUnit - All in one method that initializes parses, and
+ /// shuts down the parser.
+ void ParseTranslationUnit();
+
+ /// Initialize - Warm up the parser.
+ ///
+ void Initialize();
+
+ /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
+ /// the EOF was encountered.
+ bool ParseTopLevelDecl(DeclGroupPtrTy &Result);
+
+private:
+ //===--------------------------------------------------------------------===//
+ // Low-Level token peeking and consumption methods.
+ //
+
+ /// isTokenParen - Return true if the cur token is '(' or ')'.
+ bool isTokenParen() const {
+ return Tok.getKind() == tok::l_paren || Tok.getKind() == tok::r_paren;
+ }
+ /// isTokenBracket - Return true if the cur token is '[' or ']'.
+ bool isTokenBracket() const {
+ return Tok.getKind() == tok::l_square || Tok.getKind() == tok::r_square;
+ }
+ /// isTokenBrace - Return true if the cur token is '{' or '}'.
+ bool isTokenBrace() const {
+ return Tok.getKind() == tok::l_brace || Tok.getKind() == tok::r_brace;
+ }
+
+ /// isTokenStringLiteral - True if this token is a string-literal.
+ ///
+ bool isTokenStringLiteral() const {
+ return Tok.getKind() == tok::string_literal ||
+ Tok.getKind() == tok::wide_string_literal ||
+ Tok.getKind() == tok::utf8_string_literal ||
+ Tok.getKind() == tok::utf16_string_literal ||
+ Tok.getKind() == tok::utf32_string_literal;
+ }
+
+ /// \brief Returns true if the current token is '=' or is a type of '='.
+ /// For typos, give a fixit to '='
+ bool isTokenEqualOrEqualTypo();
+
+ /// ConsumeToken - Consume the current 'peek token' and lex the next one.
+ /// This does not work with all kinds of tokens: strings and specific other
+ /// tokens must be consumed with custom methods below. This returns the
+ /// location of the consumed token.
+ SourceLocation ConsumeToken() {
+ assert(!isTokenStringLiteral() && !isTokenParen() && !isTokenBracket() &&
+ !isTokenBrace() &&
+ "Should consume special tokens with Consume*Token");
+
+ if (Tok.is(tok::code_completion))
+ return handleUnexpectedCodeCompletionToken();
+
+ PrevTokLocation = Tok.getLocation();
+ PP.Lex(Tok);
+ return PrevTokLocation;
+ }
+
+ /// ConsumeAnyToken - Dispatch to the right Consume* method based on the
+ /// current token type. This should only be used in cases where the type of
+ /// the token really isn't known, e.g. in error recovery.
+ SourceLocation ConsumeAnyToken() {
+ if (isTokenParen())
+ return ConsumeParen();
+ else if (isTokenBracket())
+ return ConsumeBracket();
+ else if (isTokenBrace())
+ return ConsumeBrace();
+ else if (isTokenStringLiteral())
+ return ConsumeStringToken();
+ else
+ return ConsumeToken();
+ }
+
+ /// ConsumeParen - This consume method keeps the paren count up-to-date.
+ ///
+ SourceLocation ConsumeParen() {
+ assert(isTokenParen() && "wrong consume method");
+ if (Tok.getKind() == tok::l_paren)
+ ++ParenCount;
+ else if (ParenCount)
+ --ParenCount; // Don't let unbalanced )'s drive the count negative.
+ PrevTokLocation = Tok.getLocation();
+ PP.Lex(Tok);
+ return PrevTokLocation;
+ }
+
+ /// ConsumeBracket - This consume method keeps the bracket count up-to-date.
+ ///
+ SourceLocation ConsumeBracket() {
+ assert(isTokenBracket() && "wrong consume method");
+ if (Tok.getKind() == tok::l_square)
+ ++BracketCount;
+ else if (BracketCount)
+ --BracketCount; // Don't let unbalanced ]'s drive the count negative.
+
+ PrevTokLocation = Tok.getLocation();
+ PP.Lex(Tok);
+ return PrevTokLocation;
+ }
+
+ /// ConsumeBrace - This consume method keeps the brace count up-to-date.
+ ///
+ SourceLocation ConsumeBrace() {
+ assert(isTokenBrace() && "wrong consume method");
+ if (Tok.getKind() == tok::l_brace)
+ ++BraceCount;
+ else if (BraceCount)
+ --BraceCount; // Don't let unbalanced }'s drive the count negative.
+
+ PrevTokLocation = Tok.getLocation();
+ PP.Lex(Tok);
+ return PrevTokLocation;
+ }
+
+ /// ConsumeStringToken - Consume the current 'peek token', lexing a new one
+ /// and returning the token kind. This method is specific to strings, as it
+ /// handles string literal concatenation, as per C99 5.1.1.2, translation
+ /// phase #6.
+ SourceLocation ConsumeStringToken() {
+ assert(isTokenStringLiteral() &&
+ "Should only consume string literals with this method");
+ PrevTokLocation = Tok.getLocation();
+ PP.Lex(Tok);
+ return PrevTokLocation;
+ }
+
+ /// \brief Consume the current code-completion token.
+ ///
+ /// This routine should be called to consume the code-completion token once
+ /// a code-completion action has already been invoked.
+ SourceLocation ConsumeCodeCompletionToken() {
+ assert(Tok.is(tok::code_completion));
+ PrevTokLocation = Tok.getLocation();
+ PP.Lex(Tok);
+ return PrevTokLocation;
+ }
+
+ ///\ brief When we are consuming a code-completion token without having
+ /// matched specific position in the grammar, provide code-completion results
+ /// based on context.
+ ///
+ /// \returns the source location of the code-completion token.
+ SourceLocation handleUnexpectedCodeCompletionToken();
+
+ /// \brief Abruptly cut off parsing; mainly used when we have reached the
+ /// code-completion point.
+ void cutOffParsing() {
+ PP.setCodeCompletionReached();
+ // Cut off parsing by acting as if we reached the end-of-file.
+ Tok.setKind(tok::eof);
+ }
+
+ /// \brief Handle the annotation token produced for #pragma unused(...)
+ void HandlePragmaUnused();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma GCC visibility...
+ void HandlePragmaVisibility();
+
+ /// \brief Handle the annotation token produced for
+ /// #pragma pack...
+ void HandlePragmaPack();
+
+ /// GetLookAheadToken - This peeks ahead N tokens and returns that token
+ /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
+ /// returns the token after Tok, etc.
+ ///
+ /// Note that this differs from the Preprocessor's LookAhead method, because
+ /// the Parser always has one token lexed that the preprocessor doesn't.
+ ///
+ const Token &GetLookAheadToken(unsigned N) {
+ if (N == 0 || Tok.is(tok::eof)) return Tok;
+ return PP.LookAhead(N-1);
+ }
+
+ /// NextToken - This peeks ahead one token and returns it without
+ /// consuming it.
+ const Token &NextToken() {
+ return PP.LookAhead(0);
+ }
+
+ /// \brief RAII class that helps handle the parsing of an open/close delimiter
+ /// pair, such as braces { ... } or parentheses ( ... ).
+ class BalancedDelimiterTracker {
+ Parser& P;
+ tok::TokenKind Kind, Close;
+ SourceLocation (Parser::*Consumer)();
+ SourceLocation LOpen, LClose;
+
+ unsigned short &getDepth() {
+ switch (Kind) {
+ case tok::l_brace: return P.BraceCount;
+ case tok::l_square: return P.BracketCount;
+ case tok::l_paren: return P.ParenCount;
+ default: llvm_unreachable("Wrong token kind");
+ }
+ }
+
+ enum { MaxDepth = 512 };
+
+ bool diagnoseOverflow();
+ bool diagnoseMissingClose();
+
+ public:
+ BalancedDelimiterTracker(Parser& p, tok::TokenKind k) : P(p), Kind(k) {
+ switch (Kind) {
+ default: llvm_unreachable("Unexpected balanced token");
+ case tok::l_brace:
+ Close = tok::r_brace;
+ Consumer = &Parser::ConsumeBrace;
+ break;
+ case tok::l_paren:
+ Close = tok::r_paren;
+ Consumer = &Parser::ConsumeParen;
+ break;
+
+ case tok::l_square:
+ Close = tok::r_square;
+ Consumer = &Parser::ConsumeBracket;
+ break;
+ }
+ }
+
+ SourceLocation getOpenLocation() const { return LOpen; }
+ SourceLocation getCloseLocation() const { return LClose; }
+ SourceRange getRange() const { return SourceRange(LOpen, LClose); }
+
+ bool consumeOpen() {
+ if (!P.Tok.is(Kind))
+ return true;
+
+ if (getDepth() < MaxDepth) {
+ LOpen = (P.*Consumer)();
+ return false;
+ }
+
+ return diagnoseOverflow();
+ }
+
+ bool expectAndConsume(unsigned DiagID,
+ const char *Msg = "",
+ tok::TokenKind SkipToTok = tok::unknown);
+ bool consumeClose() {
+ if (P.Tok.is(Close)) {
+ LClose = (P.*Consumer)();
+ return false;
+ }
+
+ return diagnoseMissingClose();
+ }
+ void skipToEnd();
+ };
+
+ /// getTypeAnnotation - Read a parsed type out of an annotation token.
+ static ParsedType getTypeAnnotation(Token &Tok) {
+ return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
+ }
+
+ static void setTypeAnnotation(Token &Tok, ParsedType T) {
+ Tok.setAnnotationValue(T.getAsOpaquePtr());
+ }
+
+ /// \brief Read an already-translated primary expression out of an annotation
+ /// token.
+ static ExprResult getExprAnnotation(Token &Tok) {
+ if (Tok.getAnnotationValue())
+ return ExprResult((Expr *)Tok.getAnnotationValue());
+
+ return ExprResult(true);
+ }
+
+ /// \brief Set the primary expression corresponding to the given annotation
+ /// token.
+ static void setExprAnnotation(Token &Tok, ExprResult ER) {
+ if (ER.isInvalid())
+ Tok.setAnnotationValue(0);
+ else
+ Tok.setAnnotationValue(ER.get());
+ }
+
+ // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
+ // find a type name by attempting typo correction.
+ bool TryAnnotateTypeOrScopeToken(bool EnteringContext = false,
+ bool NeedType = false);
+ bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
+
+ /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
+ /// replacing them with the non-context-sensitive keywords. This returns
+ /// true if the token was replaced.
+ bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
+ bool &isInvalid) {
+ if (!getLangOpts().AltiVec ||
+ (Tok.getIdentifierInfo() != Ident_vector &&
+ Tok.getIdentifierInfo() != Ident_pixel))
+ return false;
+
+ return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
+ }
+
+ /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
+ /// identifier token, replacing it with the non-context-sensitive __vector.
+ /// This returns true if the token was replaced.
+ bool TryAltiVecVectorToken() {
+ if (!getLangOpts().AltiVec ||
+ Tok.getIdentifierInfo() != Ident_vector) return false;
+ return TryAltiVecVectorTokenOutOfLine();
+ }
+
+ bool TryAltiVecVectorTokenOutOfLine();
+ bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
+ bool &isInvalid);
+
+ /// \brief Get the TemplateIdAnnotation from the token and put it in the
+ /// cleanup pool so that it gets destroyed when parsing the current top level
+ /// declaration is finished.
+ TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
+
+ /// TentativeParsingAction - An object that is used as a kind of "tentative
+ /// parsing transaction". It gets instantiated to mark the token position and
+ /// after the token consumption is done, Commit() or Revert() is called to
+ /// either "commit the consumed tokens" or revert to the previously marked
+ /// token position. Example:
+ ///
+ /// TentativeParsingAction TPA(*this);
+ /// ConsumeToken();
+ /// ....
+ /// TPA.Revert();
+ ///
+ class TentativeParsingAction {
+ Parser &P;
+ Token PrevTok;
+ bool isActive;
+
+ public:
+ explicit TentativeParsingAction(Parser& p) : P(p) {
+ PrevTok = P.Tok;
+ P.PP.EnableBacktrackAtThisPos();
+ isActive = true;
+ }
+ void Commit() {
+ assert(isActive && "Parsing action was finished!");
+ P.PP.CommitBacktrackedTokens();
+ isActive = false;
+ }
+ void Revert() {
+ assert(isActive && "Parsing action was finished!");
+ P.PP.Backtrack();
+ P.Tok = PrevTok;
+ isActive = false;
+ }
+ ~TentativeParsingAction() {
+ assert(!isActive && "Forgot to call Commit or Revert!");
+ }
+ };
+
+ /// ObjCDeclContextSwitch - An object used to switch context from
+ /// an objective-c decl context to its enclosing decl context and
+ /// back.
+ class ObjCDeclContextSwitch {
+ Parser &P;
+ Decl *DC;
+ public:
+ explicit ObjCDeclContextSwitch(Parser &p) : P(p),
+ DC(p.getObjCDeclContext()) {
+ if (DC)
+ P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
+ }
+ ~ObjCDeclContextSwitch() {
+ if (DC)
+ P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
+ }
+ };
+
+ /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
+ /// input. If so, it is consumed and false is returned.
+ ///
+ /// If the input is malformed, this emits the specified diagnostic. Next, if
+ /// SkipToTok is specified, it calls SkipUntil(SkipToTok). Finally, true is
+ /// returned.
+ bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag,
+ const char *DiagMsg = "",
+ tok::TokenKind SkipToTok = tok::unknown);
+
+ /// \brief The parser expects a semicolon and, if present, will consume it.
+ ///
+ /// If the next token is not a semicolon, this emits the specified diagnostic,
+ /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
+ /// to the semicolon, consumes that extra token.
+ bool ExpectAndConsumeSemi(unsigned DiagID);
+
+ //===--------------------------------------------------------------------===//
+ // Scope manipulation
+
+ /// ParseScope - Introduces a new scope for parsing. The kind of
+ /// scope is determined by ScopeFlags. Objects of this type should
+ /// be created on the stack to coincide with the position where the
+ /// parser enters the new scope, and this object's constructor will
+ /// create that new scope. Similarly, once the object is destroyed
+ /// the parser will exit the scope.
+ class ParseScope {
+ Parser *Self;
+ ParseScope(const ParseScope&); // do not implement
+ ParseScope& operator=(const ParseScope&); // do not implement
+
+ public:
+ // ParseScope - Construct a new object to manage a scope in the
+ // parser Self where the new Scope is created with the flags
+ // ScopeFlags, but only when ManageScope is true (the default). If
+ // ManageScope is false, this object does nothing.
+ ParseScope(Parser *Self, unsigned ScopeFlags, bool ManageScope = true)
+ : Self(Self) {
+ if (ManageScope)
+ Self->EnterScope(ScopeFlags);
+ else
+ this->Self = 0;
+ }
+
+ // Exit - Exit the scope associated with this object now, rather
+ // than waiting until the object is destroyed.
+ void Exit() {
+ if (Self) {
+ Self->ExitScope();
+ Self = 0;
+ }
+ }
+
+ ~ParseScope() {
+ Exit();
+ }
+ };
+
+ /// EnterScope - Start a new scope.
+ void EnterScope(unsigned ScopeFlags);
+
+ /// ExitScope - Pop a scope off the scope stack.
+ void ExitScope();
+
+ /// \brief RAII object used to modify the scope flags for the current scope.
+ class ParseScopeFlags {
+ Scope *CurScope;
+ unsigned OldFlags;
+ ParseScopeFlags(const ParseScopeFlags &); // do not implement
+ void operator=(const ParseScopeFlags &); // do not implement
+
+ public:
+ ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
+ ~ParseScopeFlags();
+ };
+
+ //===--------------------------------------------------------------------===//
+ // Diagnostic Emission and Error recovery.
+
+public:
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
+ DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
+
+private:
+ void SuggestParentheses(SourceLocation Loc, unsigned DK,
+ SourceRange ParenRange);
+ void CheckNestedObjCContexts(SourceLocation AtLoc);
+
+ /// SkipUntil - Read tokens until we get to the specified token, then consume
+ /// it (unless DontConsume is true). Because we cannot guarantee that the
+ /// token will ever occur, this skips to the next token, or to some likely
+ /// good stopping point. If StopAtSemi is true, skipping will stop at a ';'
+ /// character.
+ ///
+ /// If SkipUntil finds the specified token, it returns true, otherwise it
+ /// returns false.
+ bool SkipUntil(tok::TokenKind T, bool StopAtSemi = true,
+ bool DontConsume = false, bool StopAtCodeCompletion = false) {
+ return SkipUntil(llvm::makeArrayRef(T), StopAtSemi, DontConsume,
+ StopAtCodeCompletion);
+ }
+ bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, bool StopAtSemi = true,
+ bool DontConsume = false, bool StopAtCodeCompletion = false) {
+ tok::TokenKind TokArray[] = {T1, T2};
+ return SkipUntil(TokArray, StopAtSemi, DontConsume,StopAtCodeCompletion);
+ }
+ bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
+ bool StopAtSemi = true, bool DontConsume = false,
+ bool StopAtCodeCompletion = false) {
+ tok::TokenKind TokArray[] = {T1, T2, T3};
+ return SkipUntil(TokArray, StopAtSemi, DontConsume,StopAtCodeCompletion);
+ }
+ bool SkipUntil(ArrayRef<tok::TokenKind> Toks, bool StopAtSemi = true,
+ bool DontConsume = false, bool StopAtCodeCompletion = false);
+
+ /// SkipMalformedDecl - Read tokens until we get to some likely good stopping
+ /// point for skipping past a simple-declaration.
+ void SkipMalformedDecl();
+
+ //===--------------------------------------------------------------------===//
+ // Lexing and parsing of C++ inline methods.
+
+ struct ParsingClass;
+
+ /// [class.mem]p1: "... the class is regarded as complete within
+ /// - function bodies
+ /// - default arguments
+ /// - exception-specifications (TODO: C++0x)
+ /// - and brace-or-equal-initializers for non-static data members
+ /// (including such things in nested classes)."
+ /// LateParsedDeclarations build the tree of those elements so they can
+ /// be parsed after parsing the top-level class.
+ class LateParsedDeclaration {
+ public:
+ virtual ~LateParsedDeclaration();
+
+ virtual void ParseLexedMethodDeclarations();
+ virtual void ParseLexedMemberInitializers();
+ virtual void ParseLexedMethodDefs();
+ virtual void ParseLexedAttributes();
+ };
+
+ /// Inner node of the LateParsedDeclaration tree that parses
+ /// all its members recursively.
+ class LateParsedClass : public LateParsedDeclaration {
+ public:
+ LateParsedClass(Parser *P, ParsingClass *C);
+ virtual ~LateParsedClass();
+
+ virtual void ParseLexedMethodDeclarations();
+ virtual void ParseLexedMemberInitializers();
+ virtual void ParseLexedMethodDefs();
+ virtual void ParseLexedAttributes();
+
+ private:
+ Parser *Self;
+ ParsingClass *Class;
+ };
+
+ /// Contains the lexed tokens of an attribute with arguments that
+ /// may reference member variables and so need to be parsed at the
+ /// end of the class declaration after parsing all other member
+ /// member declarations.
+ /// FIXME: Perhaps we should change the name of LateParsedDeclaration to
+ /// LateParsedTokens.
+ struct LateParsedAttribute : public LateParsedDeclaration {
+ Parser *Self;
+ CachedTokens Toks;
+ IdentifierInfo &AttrName;
+ SourceLocation AttrNameLoc;
+ SmallVector<Decl*, 2> Decls;
+
+ explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
+ SourceLocation Loc)
+ : Self(P), AttrName(Name), AttrNameLoc(Loc) {}
+
+ virtual void ParseLexedAttributes();
+
+ void addDecl(Decl *D) { Decls.push_back(D); }
+ };
+
+ /// A list of late parsed attributes. Used by ParseGNUAttributes.
+ typedef llvm::SmallVector<LateParsedAttribute*, 2> LateParsedAttrList;
+
+
+ /// Contains the lexed tokens of a member function definition
+ /// which needs to be parsed at the end of the class declaration
+ /// after parsing all other member declarations.
+ struct LexedMethod : public LateParsedDeclaration {
+ Parser *Self;
+ Decl *D;
+ CachedTokens Toks;
+
+ /// \brief Whether this member function had an associated template
+ /// scope. When true, D is a template declaration.
+ /// othewise, it is a member function declaration.
+ bool TemplateScope;
+
+ explicit LexedMethod(Parser* P, Decl *MD)
+ : Self(P), D(MD), TemplateScope(false) {}
+
+ virtual void ParseLexedMethodDefs();
+ };
+
+ /// LateParsedDefaultArgument - Keeps track of a parameter that may
+ /// have a default argument that cannot be parsed yet because it
+ /// occurs within a member function declaration inside the class
+ /// (C++ [class.mem]p2).
+ struct LateParsedDefaultArgument {
+ explicit LateParsedDefaultArgument(Decl *P,
+ CachedTokens *Toks = 0)
+ : Param(P), Toks(Toks) { }
+
+ /// Param - The parameter declaration for this parameter.
+ Decl *Param;
+
+ /// Toks - The sequence of tokens that comprises the default
+ /// argument expression, not including the '=' or the terminating
+ /// ')' or ','. This will be NULL for parameters that have no
+ /// default argument.
+ CachedTokens *Toks;
+ };
+
+ /// LateParsedMethodDeclaration - A method declaration inside a class that
+ /// contains at least one entity whose parsing needs to be delayed
+ /// until the class itself is completely-defined, such as a default
+ /// argument (C++ [class.mem]p2).
+ struct LateParsedMethodDeclaration : public LateParsedDeclaration {
+ explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
+ : Self(P), Method(M), TemplateScope(false) { }
+
+ virtual void ParseLexedMethodDeclarations();
+
+ Parser* Self;
+
+ /// Method - The method declaration.
+ Decl *Method;
+
+ /// \brief Whether this member function had an associated template
+ /// scope. When true, D is a template declaration.
+ /// othewise, it is a member function declaration.
+ bool TemplateScope;
+
+ /// DefaultArgs - Contains the parameters of the function and
+ /// their default arguments. At least one of the parameters will
+ /// have a default argument, but all of the parameters of the
+ /// method will be stored so that they can be reintroduced into
+ /// scope at the appropriate times.
+ SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
+ };
+
+ /// LateParsedMemberInitializer - An initializer for a non-static class data
+ /// member whose parsing must to be delayed until the class is completely
+ /// defined (C++11 [class.mem]p2).
+ struct LateParsedMemberInitializer : public LateParsedDeclaration {
+ LateParsedMemberInitializer(Parser *P, Decl *FD)
+ : Self(P), Field(FD) { }
+
+ virtual void ParseLexedMemberInitializers();
+
+ Parser *Self;
+
+ /// Field - The field declaration.
+ Decl *Field;
+
+ /// CachedTokens - The sequence of tokens that comprises the initializer,
+ /// including any leading '='.
+ CachedTokens Toks;
+ };
+
+ /// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
+ /// C++ class, its method declarations that contain parts that won't be
+ /// parsed until after the definition is completed (C++ [class.mem]p2),
+ /// the method declarations and possibly attached inline definitions
+ /// will be stored here with the tokens that will be parsed to create those
+ /// entities.
+ typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
+
+ /// \brief Representation of a class that has been parsed, including
+ /// any member function declarations or definitions that need to be
+ /// parsed after the corresponding top-level class is complete.
+ struct ParsingClass {
+ ParsingClass(Decl *TagOrTemplate, bool TopLevelClass)
+ : TopLevelClass(TopLevelClass), TemplateScope(false),
+ TagOrTemplate(TagOrTemplate) { }
+
+ /// \brief Whether this is a "top-level" class, meaning that it is
+ /// not nested within another class.
+ bool TopLevelClass : 1;
+
+ /// \brief Whether this class had an associated template
+ /// scope. When true, TagOrTemplate is a template declaration;
+ /// othewise, it is a tag declaration.
+ bool TemplateScope : 1;
+
+ /// \brief The class or class template whose definition we are parsing.
+ Decl *TagOrTemplate;
+
+ /// LateParsedDeclarations - Method declarations, inline definitions and
+ /// nested classes that contain pieces whose parsing will be delayed until
+ /// the top-level class is fully defined.
+ LateParsedDeclarationsContainer LateParsedDeclarations;
+ };
+
+ /// \brief The stack of classes that is currently being
+ /// parsed. Nested and local classes will be pushed onto this stack
+ /// when they are parsed, and removed afterward.
+ std::stack<ParsingClass *> ClassStack;
+
+ ParsingClass &getCurrentClass() {
+ assert(!ClassStack.empty() && "No lexed method stacks!");
+ return *ClassStack.top();
+ }
+
+ /// \brief RAII object used to inform the actions that we're
+ /// currently parsing a declaration. This is active when parsing a
+ /// variable's initializer, but not when parsing the body of a
+ /// class or function definition.
+ class ParsingDeclRAIIObject {
+ Sema &Actions;
+ Sema::ParsingDeclState State;
+ bool Popped;
+
+ public:
+ ParsingDeclRAIIObject(Parser &P) : Actions(P.Actions) {
+ push();
+ }
+
+ ParsingDeclRAIIObject(Parser &P, ParsingDeclRAIIObject *Other)
+ : Actions(P.Actions) {
+ if (Other) steal(*Other);
+ else push();
+ }
+
+ /// Creates a RAII object which steals the state from a different
+ /// object instead of pushing.
+ ParsingDeclRAIIObject(ParsingDeclRAIIObject &Other)
+ : Actions(Other.Actions) {
+ steal(Other);
+ }
+
+ ~ParsingDeclRAIIObject() {
+ abort();
+ }
+
+ /// Resets the RAII object for a new declaration.
+ void reset() {
+ abort();
+ push();
+ }
+
+ /// Signals that the context was completed without an appropriate
+ /// declaration being parsed.
+ void abort() {
+ pop(0);
+ }
+
+ void complete(Decl *D) {
+ assert(!Popped && "ParsingDeclaration has already been popped!");
+ pop(D);
+ }
+
+ private:
+ void steal(ParsingDeclRAIIObject &Other) {
+ State = Other.State;
+ Popped = Other.Popped;
+ Other.Popped = true;
+ }
+
+ void push() {
+ State = Actions.PushParsingDeclaration();
+ Popped = false;
+ }
+
+ void pop(Decl *D) {
+ if (!Popped) {
+ Actions.PopParsingDeclaration(State, D);
+ Popped = true;
+ }
+ }
+ };
+
+ /// A class for parsing a DeclSpec.
+ class ParsingDeclSpec : public DeclSpec {
+ ParsingDeclRAIIObject ParsingRAII;
+
+ public:
+ ParsingDeclSpec(Parser &P) : DeclSpec(P.AttrFactory), ParsingRAII(P) {}
+ ParsingDeclSpec(Parser &P, ParsingDeclRAIIObject *RAII)
+ : DeclSpec(P.AttrFactory), ParsingRAII(P, RAII) {}
+
+ void complete(Decl *D) {
+ ParsingRAII.complete(D);
+ }
+
+ void abort() {
+ ParsingRAII.abort();
+ }
+ };
+
+ /// A class for parsing a declarator.
+ class ParsingDeclarator : public Declarator {
+ ParsingDeclRAIIObject ParsingRAII;
+
+ public:
+ ParsingDeclarator(Parser &P, const ParsingDeclSpec &DS, TheContext C)
+ : Declarator(DS, C), ParsingRAII(P) {
+ }
+
+ const ParsingDeclSpec &getDeclSpec() const {
+ return static_cast<const ParsingDeclSpec&>(Declarator::getDeclSpec());
+ }
+
+ ParsingDeclSpec &getMutableDeclSpec() const {
+ return const_cast<ParsingDeclSpec&>(getDeclSpec());
+ }
+
+ void clear() {
+ Declarator::clear();
+ ParsingRAII.reset();
+ }
+
+ void complete(Decl *D) {
+ ParsingRAII.complete(D);
+ }
+ };
+
+ /// \brief RAII object used to
+ class ParsingClassDefinition {
+ Parser &P;
+ bool Popped;
+ Sema::ParsingClassState State;
+
+ public:
+ ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass)
+ : P(P), Popped(false),
+ State(P.PushParsingClass(TagOrTemplate, TopLevelClass)) {
+ }
+
+ /// \brief Pop this class of the stack.
+ void Pop() {
+ assert(!Popped && "Nested class has already been popped");
+ Popped = true;
+ P.PopParsingClass(State);
+ }
+
+ ~ParsingClassDefinition() {
+ if (!Popped)
+ P.PopParsingClass(State);
+ }
+ };
+
+ /// \brief Contains information about any template-specific
+ /// information that has been parsed prior to parsing declaration
+ /// specifiers.
+ struct ParsedTemplateInfo {
+ ParsedTemplateInfo()
+ : Kind(NonTemplate), TemplateParams(0), TemplateLoc() { }
+
+ ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
+ bool isSpecialization,
+ bool lastParameterListWasEmpty = false)
+ : Kind(isSpecialization? ExplicitSpecialization : Template),
+ TemplateParams(TemplateParams),
+ LastParameterListWasEmpty(lastParameterListWasEmpty) { }
+
+ explicit ParsedTemplateInfo(SourceLocation ExternLoc,
+ SourceLocation TemplateLoc)
+ : Kind(ExplicitInstantiation), TemplateParams(0),
+ ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
+ LastParameterListWasEmpty(false){ }
+
+ /// \brief The kind of template we are parsing.
+ enum {
+ /// \brief We are not parsing a template at all.
+ NonTemplate = 0,
+ /// \brief We are parsing a template declaration.
+ Template,
+ /// \brief We are parsing an explicit specialization.
+ ExplicitSpecialization,
+ /// \brief We are parsing an explicit instantiation.
+ ExplicitInstantiation
+ } Kind;
+
+ /// \brief The template parameter lists, for template declarations
+ /// and explicit specializations.
+ TemplateParameterLists *TemplateParams;
+
+ /// \brief The location of the 'extern' keyword, if any, for an explicit
+ /// instantiation
+ SourceLocation ExternLoc;
+
+ /// \brief The location of the 'template' keyword, for an explicit
+ /// instantiation.
+ SourceLocation TemplateLoc;
+
+ /// \brief Whether the last template parameter list was empty.
+ bool LastParameterListWasEmpty;
+
+ SourceRange getSourceRange() const LLVM_READONLY;
+ };
+
+ /// \brief Contains a late templated function.
+ /// Will be parsed at the end of the translation unit.
+ struct LateParsedTemplatedFunction {
+ explicit LateParsedTemplatedFunction(Decl *MD)
+ : D(MD) {}
+
+ CachedTokens Toks;
+
+ /// \brief The template function declaration to be late parsed.
+ Decl *D;
+ };
+
+ void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
+ void ParseLateTemplatedFuncDef(LateParsedTemplatedFunction &LMT);
+ typedef llvm::DenseMap<const FunctionDecl*, LateParsedTemplatedFunction*>
+ LateParsedTemplateMapT;
+ LateParsedTemplateMapT LateParsedTemplateMap;
+
+ static void LateTemplateParserCallback(void *P, const FunctionDecl *FD);
+ void LateTemplateParser(const FunctionDecl *FD);
+
+ Sema::ParsingClassState
+ PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass);
+ void DeallocateParsedClasses(ParsingClass *Class);
+ void PopParsingClass(Sema::ParsingClassState);
+
+ Decl *ParseCXXInlineMethodDef(AccessSpecifier AS, AttributeList *AccessAttrs,
+ ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo,
+ const VirtSpecifiers& VS,
+ FunctionDefinitionKind DefinitionKind,
+ ExprResult& Init);
+ void ParseCXXNonStaticMemberInitializer(Decl *VarD);
+ void ParseLexedAttributes(ParsingClass &Class);
+ void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
+ bool EnterScope, bool OnDefinition);
+ void ParseLexedAttribute(LateParsedAttribute &LA,
+ bool EnterScope, bool OnDefinition);
+ void ParseLexedMethodDeclarations(ParsingClass &Class);
+ void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
+ void ParseLexedMethodDefs(ParsingClass &Class);
+ void ParseLexedMethodDef(LexedMethod &LM);
+ void ParseLexedMemberInitializers(ParsingClass &Class);
+ void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
+ Decl *ParseLexedObjCMethodDefs(LexedMethod &LM);
+ bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
+ bool ConsumeAndStoreUntil(tok::TokenKind T1,
+ CachedTokens &Toks,
+ bool StopAtSemi = true,
+ bool ConsumeFinalToken = true) {
+ return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
+ }
+ bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
+ CachedTokens &Toks,
+ bool StopAtSemi = true,
+ bool ConsumeFinalToken = true);
+
+ //===--------------------------------------------------------------------===//
+ // C99 6.9: External Definitions.
+ struct ParsedAttributesWithRange : ParsedAttributes {
+ ParsedAttributesWithRange(AttributeFactory &factory)
+ : ParsedAttributes(factory) {}
+
+ SourceRange Range;
+ };
+
+ DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec *DS = 0);
+ bool isDeclarationAfterDeclarator();
+ bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
+ DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsedAttributes &attrs,
+ AccessSpecifier AS = AS_none);
+ DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
+ AccessSpecifier AS = AS_none);
+
+ Decl *ParseFunctionDefinition(ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
+ LateParsedAttrList *LateParsedAttrs = 0);
+ void ParseKNRParamDeclarations(Declarator &D);
+ // EndLoc, if non-NULL, is filled with the location of the last token of
+ // the simple-asm.
+ ExprResult ParseSimpleAsm(SourceLocation *EndLoc = 0);
+ ExprResult ParseAsmStringLiteral();
+
+ // Objective-C External Declarations
+ DeclGroupPtrTy ParseObjCAtDirectives();
+ DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
+ Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
+ ParsedAttributes &prefixAttrs);
+ void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
+ tok::ObjCKeywordKind visibility,
+ SourceLocation atLoc);
+ bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
+ SmallVectorImpl<SourceLocation> &PLocs,
+ bool WarnOnDeclarations,
+ SourceLocation &LAngleLoc,
+ SourceLocation &EndProtoLoc);
+ bool ParseObjCProtocolQualifiers(DeclSpec &DS);
+ void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
+ Decl *CDecl);
+ DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
+ ParsedAttributes &prefixAttrs);
+
+ struct ObjCImplParsingDataRAII {
+ Parser &P;
+ Decl *Dcl;
+ typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
+ LateParsedObjCMethodContainer LateParsedObjCMethods;
+
+ ObjCImplParsingDataRAII(Parser &parser, Decl *D)
+ : P(parser), Dcl(D) {
+ P.CurParsedObjCImpl = this;
+ Finished = false;
+ }
+ ~ObjCImplParsingDataRAII();
+
+ void finish(SourceRange AtEnd);
+ bool isFinished() const { return Finished; }
+
+ private:
+ bool Finished;
+ };
+ ObjCImplParsingDataRAII *CurParsedObjCImpl;
+
+ DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc);
+ DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
+ Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
+ Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
+ Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
+
+ IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
+ // Definitions for Objective-c context sensitive keywords recognition.
+ enum ObjCTypeQual {
+ objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
+ objc_NumQuals
+ };
+ IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
+
+ bool isTokIdentifier_in() const;
+
+ ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, Declarator::TheContext Ctx,
+ ParsedAttributes *ParamAttrs);
+ void ParseObjCMethodRequirement();
+ Decl *ParseObjCMethodPrototype(
+ tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
+ bool MethodDefinition = true);
+ Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
+ tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
+ bool MethodDefinition=true);
+ void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
+
+ Decl *ParseObjCMethodDefinition();
+
+ //===--------------------------------------------------------------------===//
+ // C99 6.5: Expressions.
+
+ /// TypeCastState - State whether an expression is or may be a type cast.
+ enum TypeCastState {
+ NotTypeCast = 0,
+ MaybeTypeCast,
+ IsTypeCast
+ };
+
+ ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
+ ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
+ // Expr that doesn't include commas.
+ ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
+
+ ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
+
+ ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
+
+ ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
+ prec::Level MinPrec);
+ ExprResult ParseCastExpression(bool isUnaryExpression,
+ bool isAddressOfOperand,
+ bool &NotCastExpr,
+ TypeCastState isTypeCast);
+ ExprResult ParseCastExpression(bool isUnaryExpression,
+ bool isAddressOfOperand = false,
+ TypeCastState isTypeCast = NotTypeCast);
+
+ /// Returns true if the next token would start a postfix-expression
+ /// suffix.
+ bool isPostfixExpressionSuffixStart() {
+ tok::TokenKind K = Tok.getKind();
+ return (K == tok::l_square || K == tok::l_paren ||
+ K == tok::period || K == tok::arrow ||
+ K == tok::plusplus || K == tok::minusminus);
+ }
+
+ ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
+ ExprResult ParseUnaryExprOrTypeTraitExpression();
+ ExprResult ParseBuiltinPrimaryExpression();
+
+ ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
+ bool &isCastExpr,
+ ParsedType &CastTy,
+ SourceRange &CastRange);
+
+ typedef SmallVector<Expr*, 20> ExprListTy;
+ typedef SmallVector<SourceLocation, 20> CommaLocsTy;
+
+ /// ParseExpressionList - Used for C/C++ (argument-)expression-list.
+ bool ParseExpressionList(SmallVectorImpl<Expr*> &Exprs,
+ SmallVectorImpl<SourceLocation> &CommaLocs,
+ void (Sema::*Completer)(Scope *S,
+ Expr *Data,
+ llvm::ArrayRef<Expr *> Args) = 0,
+ Expr *Data = 0);
+
+ /// ParenParseOption - Control what ParseParenExpression will parse.
+ enum ParenParseOption {
+ SimpleExpr, // Only parse '(' expression ')'
+ CompoundStmt, // Also allow '(' compound-statement ')'
+ CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
+ CastExpr // Also allow '(' type-name ')' <anything>
+ };
+ ExprResult ParseParenExpression(ParenParseOption &ExprType,
+ bool stopIfCastExpr,
+ bool isTypeCast,
+ ParsedType &CastTy,
+ SourceLocation &RParenLoc);
+
+ ExprResult ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
+ ParsedType &CastTy,
+ BalancedDelimiterTracker &Tracker);
+ ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
+
+ ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
+
+ ExprResult ParseGenericSelectionExpression();
+
+ ExprResult ParseObjCBoolLiteral();
+
+ //===--------------------------------------------------------------------===//
+ // C++ Expressions
+ ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
+
+ void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
+ bool EnteringContext, IdentifierInfo &II,
+ CXXScopeSpec &SS);
+
+ bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ bool *MayBePseudoDestructor = 0,
+ bool IsTypename = false);
+
+ //===--------------------------------------------------------------------===//
+ // C++0x 5.1.2: Lambda expressions
+
+ // [...] () -> type {...}
+ ExprResult ParseLambdaExpression();
+ ExprResult TryParseLambdaExpression();
+ llvm::Optional<unsigned> ParseLambdaIntroducer(LambdaIntroducer &Intro);
+ bool TryParseLambdaIntroducer(LambdaIntroducer &Intro);
+ ExprResult ParseLambdaExpressionAfterIntroducer(
+ LambdaIntroducer &Intro);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 5.2p1: C++ Casts
+ ExprResult ParseCXXCasts();
+
+ //===--------------------------------------------------------------------===//
+ // C++ 5.2p1: C++ Type Identification
+ ExprResult ParseCXXTypeid();
+
+ //===--------------------------------------------------------------------===//
+ // C++ : Microsoft __uuidof Expression
+ ExprResult ParseCXXUuidof();
+
+ //===--------------------------------------------------------------------===//
+ // C++ 5.2.4: C++ Pseudo-Destructor Expressions
+ ExprResult ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ ParsedType ObjectType);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 9.3.2: C++ 'this' pointer
+ ExprResult ParseCXXThis();
+
+ //===--------------------------------------------------------------------===//
+ // C++ 15: C++ Throw Expression
+ ExprResult ParseThrowExpression();
+
+ ExceptionSpecificationType MaybeParseExceptionSpecification(
+ SourceRange &SpecificationRange,
+ SmallVectorImpl<ParsedType> &DynamicExceptions,
+ SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
+ ExprResult &NoexceptExpr);
+
+ // EndLoc is filled with the location of the last token of the specification.
+ ExceptionSpecificationType ParseDynamicExceptionSpecification(
+ SourceRange &SpecificationRange,
+ SmallVectorImpl<ParsedType> &Exceptions,
+ SmallVectorImpl<SourceRange> &Ranges);
+
+ //===--------------------------------------------------------------------===//
+ // C++0x 8: Function declaration trailing-return-type
+ TypeResult ParseTrailingReturnType(SourceRange &Range);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 2.13.5: C++ Boolean Literals
+ ExprResult ParseCXXBoolLiteral();
+
+ //===--------------------------------------------------------------------===//
+ // C++ 5.2.3: Explicit type conversion (functional notation)
+ ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
+
+ bool isCXXSimpleTypeSpecifier() const;
+
+ /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
+ /// This should only be called when the current token is known to be part of
+ /// simple-type-specifier.
+ void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
+
+ bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 5.3.4 and 5.3.5: C++ new and delete
+ bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
+ Declarator &D);
+ void ParseDirectNewDeclarator(Declarator &D);
+ ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
+ ExprResult ParseCXXDeleteExpression(bool UseGlobal,
+ SourceLocation Start);
+
+ //===--------------------------------------------------------------------===//
+ // C++ if/switch/while condition expression.
+ bool ParseCXXCondition(ExprResult &ExprResult, Decl *&DeclResult,
+ SourceLocation Loc, bool ConvertToBoolean);
+
+ //===--------------------------------------------------------------------===//
+ // C++ types
+
+ //===--------------------------------------------------------------------===//
+ // C99 6.7.8: Initialization.
+
+ /// ParseInitializer
+ /// initializer: [C99 6.7.8]
+ /// assignment-expression
+ /// '{' ...
+ ExprResult ParseInitializer() {
+ if (Tok.isNot(tok::l_brace))
+ return ParseAssignmentExpression();
+ return ParseBraceInitializer();
+ }
+ bool MayBeDesignationStart();
+ ExprResult ParseBraceInitializer();
+ ExprResult ParseInitializerWithPotentialDesignator();
+
+ //===--------------------------------------------------------------------===//
+ // clang Expressions
+
+ ExprResult ParseBlockLiteralExpression(); // ^{...}
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C Expressions
+ ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
+ ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
+ ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
+ ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
+ ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
+ ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
+ bool isSimpleObjCMessageExpression();
+ ExprResult ParseObjCMessageExpression();
+ ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
+ SourceLocation SuperLoc,
+ ParsedType ReceiverType,
+ ExprArg ReceiverExpr);
+ ExprResult ParseAssignmentExprWithObjCMessageExprStart(
+ SourceLocation LBracloc, SourceLocation SuperLoc,
+ ParsedType ReceiverType, ExprArg ReceiverExpr);
+ bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
+
+ //===--------------------------------------------------------------------===//
+ // C99 6.8: Statements and Blocks.
+
+ StmtResult ParseStatement(SourceLocation *TrailingElseLoc = NULL) {
+ StmtVector Stmts(Actions);
+ return ParseStatementOrDeclaration(Stmts, true, TrailingElseLoc);
+ }
+ StmtResult ParseStatementOrDeclaration(StmtVector& Stmts,
+ bool OnlyStatement,
+ SourceLocation *TrailingElseLoc = NULL);
+ StmtResult ParseExprStatement(ParsedAttributes &Attrs);
+ StmtResult ParseLabeledStatement(ParsedAttributes &Attr);
+ StmtResult ParseCaseStatement(ParsedAttributes &Attr,
+ bool MissingCase = false,
+ ExprResult Expr = ExprResult());
+ StmtResult ParseDefaultStatement(ParsedAttributes &Attr);
+ StmtResult ParseCompoundStatement(ParsedAttributes &Attr,
+ bool isStmtExpr = false);
+ StmtResult ParseCompoundStatement(ParsedAttributes &Attr,
+ bool isStmtExpr,
+ unsigned ScopeFlags);
+ StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
+ bool ParseParenExprOrCondition(ExprResult &ExprResult,
+ Decl *&DeclResult,
+ SourceLocation Loc,
+ bool ConvertToBoolean);
+ StmtResult ParseIfStatement(ParsedAttributes &Attr,
+ SourceLocation *TrailingElseLoc);
+ StmtResult ParseSwitchStatement(ParsedAttributes &Attr,
+ SourceLocation *TrailingElseLoc);
+ StmtResult ParseWhileStatement(ParsedAttributes &Attr,
+ SourceLocation *TrailingElseLoc);
+ StmtResult ParseDoStatement(ParsedAttributes &Attr);
+ StmtResult ParseForStatement(ParsedAttributes &Attr,
+ SourceLocation *TrailingElseLoc);
+ StmtResult ParseGotoStatement(ParsedAttributes &Attr);
+ StmtResult ParseContinueStatement(ParsedAttributes &Attr);
+ StmtResult ParseBreakStatement(ParsedAttributes &Attr);
+ StmtResult ParseReturnStatement(ParsedAttributes &Attr);
+ StmtResult ParseAsmStatement(bool &msAsm);
+ StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
+
+ /// \brief Describes the behavior that should be taken for an __if_exists
+ /// block.
+ enum IfExistsBehavior {
+ /// \brief Parse the block; this code is always used.
+ IEB_Parse,
+ /// \brief Skip the block entirely; this code is never used.
+ IEB_Skip,
+ /// \brief Parse the block as a dependent block, which may be used in
+ /// some template instantiations but not others.
+ IEB_Dependent
+ };
+
+ /// \brief Describes the condition of a Microsoft __if_exists or
+ /// __if_not_exists block.
+ struct IfExistsCondition {
+ /// \brief The location of the initial keyword.
+ SourceLocation KeywordLoc;
+ /// \brief Whether this is an __if_exists block (rather than an
+ /// __if_not_exists block).
+ bool IsIfExists;
+
+ /// \brief Nested-name-specifier preceding the name.
+ CXXScopeSpec SS;
+
+ /// \brief The name we're looking for.
+ UnqualifiedId Name;
+
+ /// \brief The behavior of this __if_exists or __if_not_exists block
+ /// should.
+ IfExistsBehavior Behavior;
+};
+
+ bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
+ void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
+ void ParseMicrosoftIfExistsExternalDeclaration();
+ void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
+ AccessSpecifier& CurAS);
+ bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
+ bool &InitExprsOk);
+ bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
+ SmallVectorImpl<Expr *> &Constraints,
+ SmallVectorImpl<Expr *> &Exprs);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 6: Statements and Blocks
+
+ StmtResult ParseCXXTryBlock(ParsedAttributes &Attr);
+ StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc);
+ StmtResult ParseCXXCatchBlock();
+
+ //===--------------------------------------------------------------------===//
+ // MS: SEH Statements and Blocks
+
+ StmtResult ParseSEHTryBlock(ParsedAttributes &Attr);
+ StmtResult ParseSEHTryBlockCommon(SourceLocation Loc);
+ StmtResult ParseSEHExceptBlock(SourceLocation Loc);
+ StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C Statements
+
+ StmtResult ParseObjCAtStatement(SourceLocation atLoc);
+ StmtResult ParseObjCTryStmt(SourceLocation atLoc);
+ StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
+ StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
+ StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
+
+
+ //===--------------------------------------------------------------------===//
+ // C99 6.7: Declarations.
+
+ /// A context for parsing declaration specifiers. TODO: flesh this
+ /// out, there are other significant restrictions on specifiers than
+ /// would be best implemented in the parser.
+ enum DeclSpecContext {
+ DSC_normal, // normal context
+ DSC_class, // class context, enables 'friend'
+ DSC_type_specifier, // C++ type-specifier-seq
+ DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
+ DSC_top_level // top-level/namespace declaration context
+ };
+
+ /// Information on a C++0x for-range-initializer found while parsing a
+ /// declaration which turns out to be a for-range-declaration.
+ struct ForRangeInit {
+ SourceLocation ColonLoc;
+ ExprResult RangeExpr;
+
+ bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
+ };
+
+ DeclGroupPtrTy ParseDeclaration(StmtVector &Stmts,
+ unsigned Context, SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs);
+ DeclGroupPtrTy ParseSimpleDeclaration(StmtVector &Stmts,
+ unsigned Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &attrs,
+ bool RequireSemi,
+ ForRangeInit *FRI = 0);
+ bool MightBeDeclarator(unsigned Context);
+ DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, unsigned Context,
+ bool AllowFunctionDefinitions,
+ SourceLocation *DeclEnd = 0,
+ ForRangeInit *FRI = 0);
+ Decl *ParseDeclarationAfterDeclarator(Declarator &D,
+ const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
+ bool ParseAsmAttributesAfterDeclarator(Declarator &D);
+ Decl *ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
+ const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
+ Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
+ Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
+
+ /// \brief When in code-completion, skip parsing of the function/method body
+ /// unless the body contains the code-completion point.
+ ///
+ /// \returns true if the function body was skipped.
+ bool trySkippingFunctionBody();
+
+ bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, DeclSpecContext DSC);
+ DeclSpecContext getDeclSpecContextFromDeclaratorContext(unsigned Context);
+ void ParseDeclarationSpecifiers(DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
+ AccessSpecifier AS = AS_none,
+ DeclSpecContext DSC = DSC_normal,
+ LateParsedAttrList *LateAttrs = 0);
+
+ void ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS = AS_none,
+ DeclSpecContext DSC = DSC_normal);
+
+ void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
+ Declarator::TheContext Context);
+
+ void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, DeclSpecContext DSC);
+ void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
+ void ParseStructUnionBody(SourceLocation StartLoc, unsigned TagType,
+ Decl *TagDecl);
+
+ struct FieldCallback {
+ virtual Decl *invoke(FieldDeclarator &Field) = 0;
+ virtual ~FieldCallback() {}
+
+ private:
+ virtual void _anchor();
+ };
+ struct ObjCPropertyCallback;
+
+ void ParseStructDeclaration(DeclSpec &DS, FieldCallback &Callback);
+
+ bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
+ bool isTypeSpecifierQualifier();
+ bool isTypeQualifier() const;
+
+ /// isKnownToBeTypeSpecifier - Return true if we know that the specified token
+ /// is definitely a type-specifier. Return false if it isn't part of a type
+ /// specifier or if we're not sure.
+ bool isKnownToBeTypeSpecifier(const Token &Tok) const;
+
+ /// isDeclarationStatement - Disambiguates between a declaration or an
+ /// expression statement, when parsing function bodies.
+ /// Returns true for declaration, false for expression.
+ bool isDeclarationStatement() {
+ if (getLangOpts().CPlusPlus)
+ return isCXXDeclarationStatement();
+ return isDeclarationSpecifier(true);
+ }
+
+ /// isForInitDeclaration - Disambiguates between a declaration or an
+ /// expression in the context of the C 'clause-1' or the C++
+ // 'for-init-statement' part of a 'for' statement.
+ /// Returns true for declaration, false for expression.
+ bool isForInitDeclaration() {
+ if (getLangOpts().CPlusPlus)
+ return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
+ return isDeclarationSpecifier(true);
+ }
+
+ /// \brief Determine whether we are currently at the start of an Objective-C
+ /// class message that appears to be missing the open bracket '['.
+ bool isStartOfObjCClassMessageMissingOpenBracket();
+
+ /// \brief Starting with a scope specifier, identifier, or
+ /// template-id that refers to the current class, determine whether
+ /// this is a constructor declarator.
+ bool isConstructorDeclarator();
+
+ /// \brief Specifies the context in which type-id/expression
+ /// disambiguation will occur.
+ enum TentativeCXXTypeIdContext {
+ TypeIdInParens,
+ TypeIdAsTemplateArgument
+ };
+
+
+ /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
+ /// whether the parens contain an expression or a type-id.
+ /// Returns true for a type-id and false for an expression.
+ bool isTypeIdInParens(bool &isAmbiguous) {
+ if (getLangOpts().CPlusPlus)
+ return isCXXTypeId(TypeIdInParens, isAmbiguous);
+ isAmbiguous = false;
+ return isTypeSpecifierQualifier();
+ }
+ bool isTypeIdInParens() {
+ bool isAmbiguous;
+ return isTypeIdInParens(isAmbiguous);
+ }
+
+ /// isCXXDeclarationStatement - C++-specialized function that disambiguates
+ /// between a declaration or an expression statement, when parsing function
+ /// bodies. Returns true for declaration, false for expression.
+ bool isCXXDeclarationStatement();
+
+ /// isCXXSimpleDeclaration - C++-specialized function that disambiguates
+ /// between a simple-declaration or an expression-statement.
+ /// If during the disambiguation process a parsing error is encountered,
+ /// the function returns true to let the declaration parsing code handle it.
+ /// Returns false if the statement is disambiguated as expression.
+ bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
+
+ /// isCXXFunctionDeclarator - Disambiguates between a function declarator or
+ /// a constructor-style initializer, when parsing declaration statements.
+ /// Returns true for function declarator and false for constructor-style
+ /// initializer. If 'warnIfAmbiguous' is true a warning will be emitted to
+ /// indicate that the parens were disambiguated as function declarator.
+ /// If during the disambiguation process a parsing error is encountered,
+ /// the function returns true to let the declaration parsing code handle it.
+ bool isCXXFunctionDeclarator(bool warnIfAmbiguous);
+
+ /// isCXXConditionDeclaration - Disambiguates between a declaration or an
+ /// expression for a condition of a if/switch/while/for statement.
+ /// If during the disambiguation process a parsing error is encountered,
+ /// the function returns true to let the declaration parsing code handle it.
+ bool isCXXConditionDeclaration();
+
+ bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
+ bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
+ bool isAmbiguous;
+ return isCXXTypeId(Context, isAmbiguous);
+ }
+
+ /// TPResult - Used as the result value for functions whose purpose is to
+ /// disambiguate C++ constructs by "tentatively parsing" them.
+ /// This is a class instead of a simple enum because the implicit enum-to-bool
+ /// conversions may cause subtle bugs.
+ class TPResult {
+ enum Result {
+ TPR_true,
+ TPR_false,
+ TPR_ambiguous,
+ TPR_error
+ };
+ Result Res;
+ TPResult(Result result) : Res(result) {}
+ public:
+ static TPResult True() { return TPR_true; }
+ static TPResult False() { return TPR_false; }
+ static TPResult Ambiguous() { return TPR_ambiguous; }
+ static TPResult Error() { return TPR_error; }
+
+ bool operator==(const TPResult &RHS) const { return Res == RHS.Res; }
+ bool operator!=(const TPResult &RHS) const { return Res != RHS.Res; }
+ };
+
+ /// \brief Based only on the given token kind, determine whether we know that
+ /// we're at the start of an expression or a type-specifier-seq (which may
+ /// be an expression, in C++).
+ ///
+ /// This routine does not attempt to resolve any of the trick cases, e.g.,
+ /// those involving lookup of identifiers.
+ ///
+ /// \returns \c TPR_true if this token starts an expression, \c TPR_false if
+ /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
+ /// tell.
+ TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
+
+ /// isCXXDeclarationSpecifier - Returns TPResult::True() if it is a
+ /// declaration specifier, TPResult::False() if it is not,
+ /// TPResult::Ambiguous() if it could be either a decl-specifier or a
+ /// function-style cast, and TPResult::Error() if a parsing error was
+ /// encountered. If it could be a braced C++11 function-style cast, returns
+ /// BracedCastResult.
+ /// Doesn't consume tokens.
+ TPResult
+ isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False());
+
+ // "Tentative parsing" functions, used for disambiguation. If a parsing error
+ // is encountered they will return TPResult::Error().
+ // Returning TPResult::True()/False() indicates that the ambiguity was
+ // resolved and tentative parsing may stop. TPResult::Ambiguous() indicates
+ // that more tentative parsing is necessary for disambiguation.
+ // They all consume tokens, so backtracking should be used after calling them.
+
+ TPResult TryParseDeclarationSpecifier();
+ TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
+ TPResult TryParseTypeofSpecifier();
+ TPResult TryParseProtocolQualifiers();
+ TPResult TryParseInitDeclaratorList();
+ TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier=true);
+ TPResult TryParseParameterDeclarationClause();
+ TPResult TryParseFunctionDeclarator();
+ TPResult TryParseBracketDeclarator();
+
+ TypeResult ParseTypeName(SourceRange *Range = 0,
+ Declarator::TheContext Context
+ = Declarator::TypeNameContext,
+ AccessSpecifier AS = AS_none,
+ Decl **OwnedType = 0);
+ void ParseBlockId();
+
+ // Check for the start of a C++11 attribute-specifier-seq in a context where
+ // an attribute is not allowed.
+ bool CheckProhibitedCXX11Attribute() {
+ assert(Tok.is(tok::l_square));
+ if (!getLangOpts().CPlusPlus0x || NextToken().isNot(tok::l_square))
+ return false;
+ return DiagnoseProhibitedCXX11Attribute();
+ }
+ bool DiagnoseProhibitedCXX11Attribute();
+
+ void ProhibitAttributes(ParsedAttributesWithRange &attrs) {
+ if (!attrs.Range.isValid()) return;
+ DiagnoseProhibitedAttributes(attrs);
+ }
+ void DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs);
+
+ void MaybeParseGNUAttributes(Declarator &D,
+ LateParsedAttrList *LateAttrs = 0) {
+ if (Tok.is(tok::kw___attribute)) {
+ ParsedAttributes attrs(AttrFactory);
+ SourceLocation endLoc;
+ ParseGNUAttributes(attrs, &endLoc, LateAttrs);
+ D.takeAttributes(attrs, endLoc);
+ }
+ }
+ void MaybeParseGNUAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0,
+ LateParsedAttrList *LateAttrs = 0) {
+ if (Tok.is(tok::kw___attribute))
+ ParseGNUAttributes(attrs, endLoc, LateAttrs);
+ }
+ void ParseGNUAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0,
+ LateParsedAttrList *LateAttrs = 0);
+ void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc);
+
+ void MaybeParseCXX0XAttributes(Declarator &D) {
+ if (getLangOpts().CPlusPlus0x && isCXX11AttributeSpecifier()) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ SourceLocation endLoc;
+ ParseCXX11Attributes(attrs, &endLoc);
+ D.takeAttributes(attrs, endLoc);
+ }
+ }
+ void MaybeParseCXX0XAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0) {
+ if (getLangOpts().CPlusPlus0x && isCXX11AttributeSpecifier()) {
+ ParsedAttributesWithRange attrsWithRange(AttrFactory);
+ ParseCXX11Attributes(attrsWithRange, endLoc);
+ attrs.takeAllFrom(attrsWithRange);
+ }
+ }
+ void MaybeParseCXX0XAttributes(ParsedAttributesWithRange &attrs,
+ SourceLocation *endLoc = 0,
+ bool OuterMightBeMessageSend = false) {
+ if (getLangOpts().CPlusPlus0x &&
+ isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
+ ParseCXX11Attributes(attrs, endLoc);
+ }
+
+ void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
+ SourceLocation *EndLoc = 0);
+ void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
+ SourceLocation *EndLoc = 0);
+ IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
+
+ void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0) {
+ if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
+ ParseMicrosoftAttributes(attrs, endLoc);
+ }
+ void ParseMicrosoftAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc = 0);
+ void ParseMicrosoftDeclSpec(ParsedAttributes &attrs);
+ void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
+ void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
+ void ParseOpenCLAttributes(ParsedAttributes &attrs);
+ void ParseOpenCLQualifiers(DeclSpec &DS);
+
+ VersionTuple ParseVersionTuple(SourceRange &Range);
+ void ParseAvailabilityAttribute(IdentifierInfo &Availability,
+ SourceLocation AvailabilityLoc,
+ ParsedAttributes &attrs,
+ SourceLocation *endLoc);
+
+ bool IsThreadSafetyAttribute(llvm::StringRef AttrName);
+ void ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc);
+
+
+ void ParseTypeofSpecifier(DeclSpec &DS);
+ SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
+ void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
+ void ParseAtomicSpecifier(DeclSpec &DS);
+
+ ExprResult ParseAlignArgument(SourceLocation Start,
+ SourceLocation &EllipsisLoc);
+ void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
+ SourceLocation *endLoc = 0);
+
+ VirtSpecifiers::Specifier isCXX0XVirtSpecifier(const Token &Tok) const;
+ VirtSpecifiers::Specifier isCXX0XVirtSpecifier() const {
+ return isCXX0XVirtSpecifier(Tok);
+ }
+ void ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS);
+
+ bool isCXX0XFinalKeyword() const;
+
+ /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
+ /// enter a new C++ declarator scope and exit it when the function is
+ /// finished.
+ class DeclaratorScopeObj {
+ Parser &P;
+ CXXScopeSpec &SS;
+ bool EnteredScope;
+ bool CreatedScope;
+ public:
+ DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
+ : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
+
+ void EnterDeclaratorScope() {
+ assert(!EnteredScope && "Already entered the scope!");
+ assert(SS.isSet() && "C++ scope was not set!");
+
+ CreatedScope = true;
+ P.EnterScope(0); // Not a decl scope.
+
+ if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
+ EnteredScope = true;
+ }
+
+ ~DeclaratorScopeObj() {
+ if (EnteredScope) {
+ assert(SS.isSet() && "C++ scope was cleared ?");
+ P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
+ }
+ if (CreatedScope)
+ P.ExitScope();
+ }
+ };
+
+ /// ParseDeclarator - Parse and verify a newly-initialized declarator.
+ void ParseDeclarator(Declarator &D);
+ /// A function that parses a variant of direct-declarator.
+ typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
+ void ParseDeclaratorInternal(Declarator &D,
+ DirectDeclParseFunction DirectDeclParser);
+
+ void ParseTypeQualifierListOpt(DeclSpec &DS, bool GNUAttributesAllowed = true,
+ bool CXX0XAttributesAllowed = true);
+ void ParseDirectDeclarator(Declarator &D);
+ void ParseParenDeclarator(Declarator &D);
+ void ParseFunctionDeclarator(Declarator &D,
+ ParsedAttributes &attrs,
+ BalancedDelimiterTracker &Tracker,
+ bool RequiresArg = false);
+ bool isFunctionDeclaratorIdentifierList();
+ void ParseFunctionDeclaratorIdentifierList(
+ Declarator &D,
+ SmallVector<DeclaratorChunk::ParamInfo, 16> &ParamInfo);
+ void ParseParameterDeclarationClause(
+ Declarator &D,
+ ParsedAttributes &attrs,
+ SmallVector<DeclaratorChunk::ParamInfo, 16> &ParamInfo,
+ SourceLocation &EllipsisLoc);
+ void ParseBracketDeclarator(Declarator &D);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 7: Declarations [dcl.dcl]
+
+ /// The kind of attribute specifier we have found.
+ enum CXX11AttributeKind {
+ /// This is not an attribute specifier.
+ CAK_NotAttributeSpecifier,
+ /// This should be treated as an attribute-specifier.
+ CAK_AttributeSpecifier,
+ /// The next tokens are '[[', but this is not an attribute-specifier. This
+ /// is ill-formed by C++11 [dcl.attr.grammar]p6.
+ CAK_InvalidAttributeSpecifier
+ };
+ CXX11AttributeKind
+ isCXX11AttributeSpecifier(bool Disambiguate = false,
+ bool OuterMightBeMessageSend = false);
+
+ Decl *ParseNamespace(unsigned Context, SourceLocation &DeclEnd,
+ SourceLocation InlineLoc = SourceLocation());
+ void ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
+ std::vector<IdentifierInfo*>& Ident,
+ std::vector<SourceLocation>& NamespaceLoc,
+ unsigned int index, SourceLocation& InlineLoc,
+ ParsedAttributes& attrs,
+ BalancedDelimiterTracker &Tracker);
+ Decl *ParseLinkage(ParsingDeclSpec &DS, unsigned Context);
+ Decl *ParseUsingDirectiveOrDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs,
+ Decl **OwnedType = 0);
+ Decl *ParseUsingDirective(unsigned Context,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &attrs);
+ Decl *ParseUsingDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS = AS_none,
+ Decl **OwnedType = 0);
+ Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
+ Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc, IdentifierInfo *Alias,
+ SourceLocation &DeclEnd);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 9: classes [class] and C structs/unions.
+ void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
+ DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, bool EnteringContext,
+ DeclSpecContext DSC);
+ void ParseCXXMemberSpecification(SourceLocation StartLoc, unsigned TagType,
+ Decl *TagDecl);
+ ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
+ SourceLocation &EqualLoc);
+ void ParseCXXClassMemberDeclaration(AccessSpecifier AS, AttributeList *Attr,
+ const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
+ ParsingDeclRAIIObject *DiagsFromTParams = 0);
+ void ParseConstructorInitializer(Decl *ConstructorDecl);
+ MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
+ void HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo,
+ Decl *ThisDecl);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 10: Derived classes [class.derived]
+ TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
+ SourceLocation &EndLocation);
+ void ParseBaseClause(Decl *ClassDecl);
+ BaseResult ParseBaseSpecifier(Decl *ClassDecl);
+ AccessSpecifier getAccessSpecifierIfPresent() const;
+
+ bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ bool EnteringContext,
+ ParsedType ObjectType,
+ UnqualifiedId &Id,
+ bool AssumeTemplateId);
+ bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
+ ParsedType ObjectType,
+ UnqualifiedId &Result);
+ bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
+ bool AllowDestructorName,
+ bool AllowConstructorName,
+ ParsedType ObjectType,
+ SourceLocation& TemplateKWLoc,
+ UnqualifiedId &Result);
+
+ //===--------------------------------------------------------------------===//
+ // C++ 14: Templates [temp]
+
+ // C++ 14.1: Template Parameters [temp.param]
+ Decl *ParseDeclarationStartingWithTemplate(unsigned Context,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS = AS_none,
+ AttributeList *AccessAttrs = 0);
+ Decl *ParseTemplateDeclarationOrSpecialization(unsigned Context,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ AttributeList *AccessAttrs);
+ Decl *ParseSingleDeclarationAfterTemplate(
+ unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ ParsingDeclRAIIObject &DiagsFromParams,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS=AS_none,
+ AttributeList *AccessAttrs = 0);
+ bool ParseTemplateParameters(unsigned Depth,
+ SmallVectorImpl<Decl*> &TemplateParams,
+ SourceLocation &LAngleLoc,
+ SourceLocation &RAngleLoc);
+ bool ParseTemplateParameterList(unsigned Depth,
+ SmallVectorImpl<Decl*> &TemplateParams);
+ bool isStartOfTemplateTypeParameter();
+ Decl *ParseTemplateParameter(unsigned Depth, unsigned Position);
+ Decl *ParseTypeParameter(unsigned Depth, unsigned Position);
+ Decl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
+ Decl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
+ // C++ 14.3: Template arguments [temp.arg]
+ typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
+
+ bool ParseTemplateIdAfterTemplateName(TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ const CXXScopeSpec &SS,
+ bool ConsumeLastToken,
+ SourceLocation &LAngleLoc,
+ TemplateArgList &TemplateArgs,
+ SourceLocation &RAngleLoc);
+
+ bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &TemplateName,
+ bool AllowTypeAnnotation = true);
+ void AnnotateTemplateIdTokenAsType();
+ bool IsTemplateArgumentList(unsigned Skip = 0);
+ bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
+ ParsedTemplateArgument ParseTemplateTemplateArgument();
+ ParsedTemplateArgument ParseTemplateArgument();
+ Decl *ParseExplicitInstantiation(unsigned Context,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS = AS_none);
+
+ //===--------------------------------------------------------------------===//
+ // Modules
+ DeclGroupPtrTy ParseModuleImport(SourceLocation AtLoc);
+
+ //===--------------------------------------------------------------------===//
+ // GNU G++: Type Traits [Type-Traits.html in the GCC manual]
+ ExprResult ParseUnaryTypeTrait();
+ ExprResult ParseBinaryTypeTrait();
+ ExprResult ParseTypeTrait();
+
+ //===--------------------------------------------------------------------===//
+ // Embarcadero: Arary and Expression Traits
+ ExprResult ParseArrayTypeTrait();
+ ExprResult ParseExpressionTrait();
+
+ //===--------------------------------------------------------------------===//
+ // Preprocessor code-completion pass-through
+ virtual void CodeCompleteDirective(bool InConditional);
+ virtual void CodeCompleteInConditionalExclusion();
+ virtual void CodeCompleteMacroName(bool IsDefinition);
+ virtual void CodeCompletePreprocessorExpression();
+ virtual void CodeCompleteMacroArgument(IdentifierInfo *Macro,
+ MacroInfo *MacroInfo,
+ unsigned ArgumentIndex);
+ virtual void CodeCompleteNaturalLanguage();
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h b/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
new file mode 100644
index 0000000..c9c92e3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/ASTConsumers.h
@@ -0,0 +1,48 @@
+//===--- ASTConsumers.h - ASTConsumer implementations -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AST Consumers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef REWRITE_ASTCONSUMERS_H
+#define REWRITE_ASTCONSUMERS_H
+
+#include "clang/Basic/LLVM.h"
+#include <string>
+
+namespace clang {
+
+class ASTConsumer;
+class DiagnosticsEngine;
+class LangOptions;
+class Preprocessor;
+
+// ObjC rewriter: attempts to rewrite ObjC constructs into pure C code.
+// This is considered experimental, and only works with Apple's ObjC runtime.
+ASTConsumer *CreateObjCRewriter(const std::string &InFile,
+ raw_ostream *OS,
+ DiagnosticsEngine &Diags,
+ const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning);
+ASTConsumer *CreateModernObjCRewriter(const std::string &InFile,
+ raw_ostream *OS,
+ DiagnosticsEngine &Diags,
+ const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning);
+
+/// CreateHTMLPrinter - Create an AST consumer which rewrites source code to
+/// HTML with syntax highlighting suitable for viewing in a web-browser.
+ASTConsumer *CreateHTMLPrinter(raw_ostream *OS, Preprocessor &PP,
+ bool SyntaxHighlight = true,
+ bool HighlightMacros = true);
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/DeltaTree.h b/contrib/llvm/tools/clang/include/clang/Rewrite/DeltaTree.h
new file mode 100644
index 0000000..f32906a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/DeltaTree.h
@@ -0,0 +1,48 @@
+//===--- DeltaTree.h - B-Tree for Rewrite Delta tracking --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeltaTree class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_REWRITE_DELTATREE_H
+#define CLANG_REWRITE_DELTATREE_H
+
+namespace clang {
+
+ /// DeltaTree - a multiway search tree (BTree) structure with some fancy
+ /// features. B-Trees are generally more memory and cache efficient than
+ /// binary trees, because they store multiple keys/values in each node. This
+ /// implements a key/value mapping from index to delta, and allows fast lookup
+ /// on index. However, an added (important) bonus is that it can also
+ /// efficiently tell us the full accumulated delta for a specific file offset
+ /// as well, without traversing the whole tree.
+ class DeltaTree {
+ void *Root; // "DeltaTreeNode *"
+ void operator=(const DeltaTree&); // DO NOT IMPLEMENT
+ public:
+ DeltaTree();
+
+ // Note: Currently we only support copying when the RHS is empty.
+ DeltaTree(const DeltaTree &RHS);
+ ~DeltaTree();
+
+ /// getDeltaAt - Return the accumulated delta at the specified file offset.
+ /// This includes all insertions or delections that occurred *before* the
+ /// specified file index.
+ int getDeltaAt(unsigned FileIndex) const;
+
+ /// AddDelta - When a change is made that shifts around the text buffer,
+ /// this method is used to record that info. It inserts a delta of 'Delta'
+ /// into the current DeltaTree at offset FileIndex.
+ void AddDelta(unsigned FileIndex, int Delta);
+ };
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
new file mode 100644
index 0000000..44f0611
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/FixItRewriter.h
@@ -0,0 +1,130 @@
+//===--- FixItRewriter.h - Fix-It Rewriter Diagnostic Client ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a diagnostic client adaptor that performs rewrites as
+// suggested by code modification hints attached to diagnostics. It
+// then forwards any diagnostics to the adapted diagnostic client.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_REWRITE_FIX_IT_REWRITER_H
+#define LLVM_CLANG_REWRITE_FIX_IT_REWRITER_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Edit/EditedSource.h"
+
+namespace clang {
+
+class SourceManager;
+class FileEntry;
+
+class FixItOptions {
+public:
+ FixItOptions() : FixWhatYouCan(false),
+ FixOnlyWarnings(false), Silent(false) { }
+
+ virtual ~FixItOptions();
+
+ /// \brief This file is about to be rewritten. Return the name of the file
+ /// that is okay to write to.
+ ///
+ /// \param fd out parameter for file descriptor. After the call it may be set
+ /// to an open file descriptor for the returned filename, or it will be -1
+ /// otherwise.
+ ///
+ virtual std::string RewriteFilename(const std::string &Filename, int &fd) = 0;
+
+ /// \brief Whether to abort fixing a file when not all errors could be fixed.
+ bool FixWhatYouCan;
+
+ /// \brief Whether to only fix warnings and not errors.
+ bool FixOnlyWarnings;
+
+ /// \brief If true, only pass the diagnostic to the actual diagnostic consumer
+ /// if it is an error or a fixit was applied as part of the diagnostic.
+ /// It basically silences warnings without accompanying fixits.
+ bool Silent;
+};
+
+class FixItRewriter : public DiagnosticConsumer {
+ /// \brief The diagnostics machinery.
+ DiagnosticsEngine &Diags;
+
+ edit::EditedSource Editor;
+
+ /// \brief The rewriter used to perform the various code
+ /// modifications.
+ Rewriter Rewrite;
+
+ /// \brief The diagnostic client that performs the actual formatting
+ /// of error messages.
+ DiagnosticConsumer *Client;
+ bool OwnsClient;
+
+ /// \brief Turn an input path into an output path. NULL implies overwriting
+ /// the original.
+ FixItOptions *FixItOpts;
+
+ /// \brief The number of rewriter failures.
+ unsigned NumFailures;
+
+ /// \brief Whether the previous diagnostic was not passed to the consumer.
+ bool PrevDiagSilenced;
+
+public:
+ typedef Rewriter::buffer_iterator iterator;
+
+ /// \brief Initialize a new fix-it rewriter.
+ FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
+ const LangOptions &LangOpts, FixItOptions *FixItOpts);
+
+ /// \brief Destroy the fix-it rewriter.
+ ~FixItRewriter();
+
+ /// \brief Check whether there are modifications for a given file.
+ bool IsModified(FileID ID) const {
+ return Rewrite.getRewriteBufferFor(ID) != NULL;
+ }
+
+ // Iteration over files with changes.
+ iterator buffer_begin() { return Rewrite.buffer_begin(); }
+ iterator buffer_end() { return Rewrite.buffer_end(); }
+
+ /// \brief Write a single modified source file.
+ ///
+ /// \returns true if there was an error, false otherwise.
+ bool WriteFixedFile(FileID ID, raw_ostream &OS);
+
+ /// \brief Write the modified source files.
+ ///
+ /// \returns true if there was an error, false otherwise.
+ bool WriteFixedFiles(
+ std::vector<std::pair<std::string, std::string> > *RewrittenFiles = 0);
+
+ /// IncludeInDiagnosticCounts - This method (whose default implementation
+ /// returns true) indicates whether the diagnostics handled by this
+ /// DiagnosticConsumer should be included in the number of diagnostics
+ /// reported by DiagnosticsEngine.
+ virtual bool IncludeInDiagnosticCounts() const;
+
+ /// HandleDiagnostic - Handle this diagnostic, reporting it to the user or
+ /// capturing it to a log as needed.
+ virtual void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info);
+
+ /// \brief Emit a diagnostic via the adapted diagnostic client.
+ void Diag(SourceLocation Loc, unsigned DiagID);
+
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const;
+};
+
+}
+
+#endif // LLVM_CLANG_REWRITE_FIX_IT_REWRITER_H
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
new file mode 100644
index 0000000..6e9ecac
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/FrontendActions.h
@@ -0,0 +1,78 @@
+//===-- FrontendActions.h - Useful Frontend Actions -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_REWRITE_FRONTENDACTIONS_H
+#define LLVM_CLANG_REWRITE_FRONTENDACTIONS_H
+
+#include "clang/Frontend/FrontendAction.h"
+
+namespace clang {
+class FixItRewriter;
+class FixItOptions;
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+class HTMLPrintAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+class FixItAction : public ASTFrontendAction {
+protected:
+ OwningPtr<FixItRewriter> Rewriter;
+ OwningPtr<FixItOptions> FixItOpts;
+
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+
+ virtual bool BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename);
+
+ virtual void EndSourceFileAction();
+
+ virtual bool hasASTFileSupport() const { return false; }
+
+public:
+ FixItAction();
+ ~FixItAction();
+};
+
+/// \brief Emits changes to temporary files and uses them for the original
+/// frontend action.
+class FixItRecompile : public WrapperFrontendAction {
+public:
+ FixItRecompile(FrontendAction *WrappedAction)
+ : WrapperFrontendAction(WrappedAction) {}
+
+protected:
+ virtual bool BeginInvocation(CompilerInstance &CI);
+};
+
+class RewriteObjCAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+class RewriteMacrosAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
+class RewriteTestAction : public PreprocessorFrontendAction {
+protected:
+ void ExecuteAction();
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/HTMLRewrite.h b/contrib/llvm/tools/clang/include/clang/Rewrite/HTMLRewrite.h
new file mode 100644
index 0000000..88caf85
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/HTMLRewrite.h
@@ -0,0 +1,81 @@
+//==- HTMLRewrite.h - Translate source code into prettified HTML ---*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of functions used for translating source code
+// into beautified HTML.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_HTMLREWRITER_H
+#define LLVM_CLANG_HTMLREWRITER_H
+
+#include "clang/Basic/SourceLocation.h"
+#include <string>
+
+namespace clang {
+
+class Rewriter;
+class RewriteBuffer;
+class Preprocessor;
+
+namespace html {
+
+ /// HighlightRange - Highlight a range in the source code with the specified
+ /// start/end tags. B/E must be in the same file. This ensures that
+ /// start/end tags are placed at the start/end of each line if the range is
+ /// multiline.
+ void HighlightRange(Rewriter &R, SourceLocation B, SourceLocation E,
+ const char *StartTag, const char *EndTag);
+
+ /// HighlightRange - Highlight a range in the source code with the specified
+ /// start/end tags. The Start/end of the range must be in the same file.
+ /// This ensures that start/end tags are placed at the start/end of each line
+ /// if the range is multiline.
+ inline void HighlightRange(Rewriter &R, SourceRange Range,
+ const char *StartTag, const char *EndTag) {
+ HighlightRange(R, Range.getBegin(), Range.getEnd(), StartTag, EndTag);
+ }
+
+ /// HighlightRange - This is the same as the above method, but takes
+ /// decomposed file locations.
+ void HighlightRange(RewriteBuffer &RB, unsigned B, unsigned E,
+ const char *BufferStart,
+ const char *StartTag, const char *EndTag);
+
+ /// EscapeText - HTMLize a specified file so that special characters are
+ /// are translated so that they are not interpreted as HTML tags.
+ void EscapeText(Rewriter& R, FileID FID,
+ bool EscapeSpaces = false, bool ReplaceTabs = false);
+
+ /// EscapeText - HTMLized the provided string so that special characters
+ /// in 's' are not interpreted as HTML tags. Unlike the version of
+ /// EscapeText that rewrites a file, this version by default replaces tabs
+ /// with spaces.
+ std::string EscapeText(const std::string& s,
+ bool EscapeSpaces = false, bool ReplaceTabs = false);
+
+ void AddLineNumbers(Rewriter& R, FileID FID);
+
+ void AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
+ const char *title = NULL);
+
+ /// SyntaxHighlight - Relex the specified FileID and annotate the HTML with
+ /// information about keywords, comments, etc.
+ void SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP);
+
+ /// HighlightMacros - This uses the macro table state from the end of the
+ /// file, to reexpand macros and insert (into the HTML) information about the
+ /// macro expansions. This won't be perfectly perfect, but it will be
+ /// reasonably close.
+ void HighlightMacros(Rewriter &R, FileID FID, const Preprocessor &PP);
+
+} // end html namespace
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h b/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h
new file mode 100644
index 0000000..cb3f8a8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/RewriteRope.h
@@ -0,0 +1,231 @@
+//===--- RewriteRope.h - Rope specialized for rewriter ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RewriteRope class, which is a powerful string class.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_REWRITEROPE_H
+#define LLVM_CLANG_REWRITEROPE_H
+
+#include <cstring>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+
+namespace clang {
+ //===--------------------------------------------------------------------===//
+ // RopeRefCountString Class
+ //===--------------------------------------------------------------------===//
+
+ /// RopeRefCountString - This struct is allocated with 'new char[]' from the
+ /// heap, and represents a reference counted chunk of string data. When its
+ /// ref count drops to zero, it is delete[]'d. This is primarily managed
+ /// through the RopePiece class below.
+ struct RopeRefCountString {
+ unsigned RefCount;
+ char Data[1]; // Variable sized.
+
+ void addRef() {
+ if (this) ++RefCount;
+ }
+
+ void dropRef() {
+ if (this && --RefCount == 0)
+ delete [] (char*)this;
+ }
+ };
+
+ //===--------------------------------------------------------------------===//
+ // RopePiece Class
+ //===--------------------------------------------------------------------===//
+
+ /// RopePiece - This class represents a view into a RopeRefCountString object.
+ /// This allows references to string data to be efficiently chopped up and
+ /// moved around without having to push around the string data itself.
+ ///
+ /// For example, we could have a 1M RopePiece and want to insert something
+ /// into the middle of it. To do this, we split it into two RopePiece objects
+ /// that both refer to the same underlying RopeRefCountString (just with
+ /// different offsets) which is a nice constant time operation.
+ struct RopePiece {
+ RopeRefCountString *StrData;
+ unsigned StartOffs;
+ unsigned EndOffs;
+
+ RopePiece() : StrData(0), StartOffs(0), EndOffs(0) {}
+
+ RopePiece(RopeRefCountString *Str, unsigned Start, unsigned End)
+ : StrData(Str), StartOffs(Start), EndOffs(End) {
+ StrData->addRef();
+ }
+ RopePiece(const RopePiece &RP)
+ : StrData(RP.StrData), StartOffs(RP.StartOffs), EndOffs(RP.EndOffs) {
+ StrData->addRef();
+ }
+
+ ~RopePiece() {
+ StrData->dropRef();
+ }
+
+ void operator=(const RopePiece &RHS) {
+ if (StrData != RHS.StrData) {
+ StrData->dropRef();
+ StrData = RHS.StrData;
+ StrData->addRef();
+ }
+ StartOffs = RHS.StartOffs;
+ EndOffs = RHS.EndOffs;
+ }
+
+ const char &operator[](unsigned Offset) const {
+ return StrData->Data[Offset+StartOffs];
+ }
+ char &operator[](unsigned Offset) {
+ return StrData->Data[Offset+StartOffs];
+ }
+
+ unsigned size() const { return EndOffs-StartOffs; }
+ };
+
+ //===--------------------------------------------------------------------===//
+ // RopePieceBTreeIterator Class
+ //===--------------------------------------------------------------------===//
+
+ /// RopePieceBTreeIterator - This class provides read-only forward iteration
+ /// over bytes that are in a RopePieceBTree. This first iterates over bytes
+ /// in a RopePiece, then iterates over RopePiece's in a RopePieceBTreeLeaf,
+ /// then iterates over RopePieceBTreeLeaf's in a RopePieceBTree.
+ class RopePieceBTreeIterator :
+ public std::iterator<std::forward_iterator_tag, const char, ptrdiff_t> {
+ /// CurNode - The current B+Tree node that we are inspecting.
+ const void /*RopePieceBTreeLeaf*/ *CurNode;
+ /// CurPiece - The current RopePiece in the B+Tree node that we're
+ /// inspecting.
+ const RopePiece *CurPiece;
+ /// CurChar - The current byte in the RopePiece we are pointing to.
+ unsigned CurChar;
+ public:
+ // begin iterator.
+ RopePieceBTreeIterator(const void /*RopePieceBTreeNode*/ *N);
+ // end iterator
+ RopePieceBTreeIterator() : CurNode(0), CurPiece(0), CurChar(0) {}
+
+ char operator*() const {
+ return (*CurPiece)[CurChar];
+ }
+
+ bool operator==(const RopePieceBTreeIterator &RHS) const {
+ return CurPiece == RHS.CurPiece && CurChar == RHS.CurChar;
+ }
+ bool operator!=(const RopePieceBTreeIterator &RHS) const {
+ return !operator==(RHS);
+ }
+
+ RopePieceBTreeIterator& operator++() { // Preincrement
+ if (CurChar+1 < CurPiece->size())
+ ++CurChar;
+ else
+ MoveToNextPiece();
+ return *this;
+ }
+ inline RopePieceBTreeIterator operator++(int) { // Postincrement
+ RopePieceBTreeIterator tmp = *this; ++*this; return tmp;
+ }
+ private:
+ void MoveToNextPiece();
+ };
+
+ //===--------------------------------------------------------------------===//
+ // RopePieceBTree Class
+ //===--------------------------------------------------------------------===//
+
+ class RopePieceBTree {
+ void /*RopePieceBTreeNode*/ *Root;
+ void operator=(const RopePieceBTree &); // DO NOT IMPLEMENT
+ public:
+ RopePieceBTree();
+ RopePieceBTree(const RopePieceBTree &RHS);
+ ~RopePieceBTree();
+
+ typedef RopePieceBTreeIterator iterator;
+ iterator begin() const { return iterator(Root); }
+ iterator end() const { return iterator(); }
+ unsigned size() const;
+ unsigned empty() const { return size() == 0; }
+
+ void clear();
+
+ void insert(unsigned Offset, const RopePiece &R);
+
+ void erase(unsigned Offset, unsigned NumBytes);
+ };
+
+ //===--------------------------------------------------------------------===//
+ // RewriteRope Class
+ //===--------------------------------------------------------------------===//
+
+/// RewriteRope - A powerful string class. This class supports extremely
+/// efficient insertions and deletions into the middle of it, even for
+/// ridiculously long strings.
+class RewriteRope {
+ RopePieceBTree Chunks;
+
+ /// We allocate space for string data out of a buffer of size AllocChunkSize.
+ /// This keeps track of how much space is left.
+ RopeRefCountString *AllocBuffer;
+ unsigned AllocOffs;
+ enum { AllocChunkSize = 4080 };
+
+public:
+ RewriteRope() : AllocBuffer(0), AllocOffs(AllocChunkSize) {}
+ RewriteRope(const RewriteRope &RHS)
+ : Chunks(RHS.Chunks), AllocBuffer(0), AllocOffs(AllocChunkSize) {
+ }
+
+ ~RewriteRope() {
+ // If we had an allocation buffer, drop our reference to it.
+ AllocBuffer->dropRef();
+ }
+
+ typedef RopePieceBTree::iterator iterator;
+ typedef RopePieceBTree::iterator const_iterator;
+ iterator begin() const { return Chunks.begin(); }
+ iterator end() const { return Chunks.end(); }
+ unsigned size() const { return Chunks.size(); }
+
+ void clear() {
+ Chunks.clear();
+ }
+
+ void assign(const char *Start, const char *End) {
+ clear();
+ if (Start != End)
+ Chunks.insert(0, MakeRopeString(Start, End));
+ }
+
+ void insert(unsigned Offset, const char *Start, const char *End) {
+ assert(Offset <= size() && "Invalid position to insert!");
+ if (Start == End) return;
+ Chunks.insert(Offset, MakeRopeString(Start, End));
+ }
+
+ void erase(unsigned Offset, unsigned NumBytes) {
+ assert(Offset+NumBytes <= size() && "Invalid region to erase!");
+ if (NumBytes == 0) return;
+ Chunks.erase(Offset, NumBytes);
+ }
+
+private:
+ RopePiece MakeRopeString(const char *Start, const char *End);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
new file mode 100644
index 0000000..f1358a0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriter.h
@@ -0,0 +1,288 @@
+//===--- Rewriter.h - Code rewriting interface ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Rewriter class, which is used for code
+// transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_REWRITER_H
+#define LLVM_CLANG_REWRITER_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Rewrite/DeltaTree.h"
+#include "clang/Rewrite/RewriteRope.h"
+#include "llvm/ADT/StringRef.h"
+#include <cstring>
+#include <map>
+#include <string>
+
+namespace clang {
+ class LangOptions;
+ class Rewriter;
+ class SourceManager;
+ class Stmt;
+
+/// RewriteBuffer - As code is rewritten, SourceBuffer's from the original
+/// input with modifications get a new RewriteBuffer associated with them. The
+/// RewriteBuffer captures the modified text itself as well as information used
+/// to map between SourceLocation's in the original input and offsets in the
+/// RewriteBuffer. For example, if text is inserted into the buffer, any
+/// locations after the insertion point have to be mapped.
+class RewriteBuffer {
+ friend class Rewriter;
+ /// Deltas - Keep track of all the deltas in the source code due to insertions
+ /// and deletions.
+ DeltaTree Deltas;
+
+ /// Buffer - This is the actual buffer itself. Note that using a vector or
+ /// string is a horribly inefficient way to do this, we should use a rope
+ /// instead.
+ typedef RewriteRope BufferTy;
+ BufferTy Buffer;
+public:
+ typedef BufferTy::const_iterator iterator;
+ iterator begin() const { return Buffer.begin(); }
+ iterator end() const { return Buffer.end(); }
+ unsigned size() const { return Buffer.size(); }
+
+ raw_ostream &write(raw_ostream &) const;
+
+ /// RemoveText - Remove the specified text.
+ void RemoveText(unsigned OrigOffset, unsigned Size,
+ bool removeLineIfEmpty = false);
+
+ /// InsertText - Insert some text at the specified point, where the offset in
+ /// the buffer is specified relative to the original SourceBuffer. The
+ /// text is inserted after the specified location.
+ ///
+ void InsertText(unsigned OrigOffset, StringRef Str,
+ bool InsertAfter = true);
+
+
+ /// InsertTextBefore - Insert some text before the specified point, where the
+ /// offset in the buffer is specified relative to the original
+ /// SourceBuffer. The text is inserted before the specified location. This is
+ /// method is the same as InsertText with "InsertAfter == false".
+ void InsertTextBefore(unsigned OrigOffset, StringRef Str) {
+ InsertText(OrigOffset, Str, false);
+ }
+
+ /// InsertTextAfter - Insert some text at the specified point, where the
+ /// offset in the buffer is specified relative to the original SourceBuffer.
+ /// The text is inserted after the specified location.
+ void InsertTextAfter(unsigned OrigOffset, StringRef Str) {
+ InsertText(OrigOffset, Str);
+ }
+
+ /// ReplaceText - This method replaces a range of characters in the input
+ /// buffer with a new string. This is effectively a combined "remove/insert"
+ /// operation.
+ void ReplaceText(unsigned OrigOffset, unsigned OrigLength,
+ StringRef NewStr);
+
+private: // Methods only usable by Rewriter.
+
+ /// Initialize - Start this rewrite buffer out with a copy of the unmodified
+ /// input buffer.
+ void Initialize(const char *BufStart, const char *BufEnd) {
+ Buffer.assign(BufStart, BufEnd);
+ }
+
+ /// getMappedOffset - Given an offset into the original SourceBuffer that this
+ /// RewriteBuffer is based on, map it into the offset space of the
+ /// RewriteBuffer. If AfterInserts is true and if the OrigOffset indicates a
+ /// position where text is inserted, the location returned will be after any
+ /// inserted text at the position.
+ unsigned getMappedOffset(unsigned OrigOffset,
+ bool AfterInserts = false) const{
+ return Deltas.getDeltaAt(2*OrigOffset+AfterInserts)+OrigOffset;
+ }
+
+ /// AddInsertDelta - When an insertion is made at a position, this
+ /// method is used to record that information.
+ void AddInsertDelta(unsigned OrigOffset, int Change) {
+ return Deltas.AddDelta(2*OrigOffset, Change);
+ }
+
+ /// AddReplaceDelta - When a replacement/deletion is made at a position, this
+ /// method is used to record that information.
+ void AddReplaceDelta(unsigned OrigOffset, int Change) {
+ return Deltas.AddDelta(2*OrigOffset+1, Change);
+ }
+};
+
+
+/// Rewriter - This is the main interface to the rewrite buffers. Its primary
+/// job is to dispatch high-level requests to the low-level RewriteBuffers that
+/// are involved.
+class Rewriter {
+ SourceManager *SourceMgr;
+ const LangOptions *LangOpts;
+ std::map<FileID, RewriteBuffer> RewriteBuffers;
+public:
+ struct RewriteOptions {
+ /// \brief Given a source range, true to include previous inserts at the
+ /// beginning of the range as part of the range itself (true by default).
+ bool IncludeInsertsAtBeginOfRange;
+ /// \brief Given a source range, true to include previous inserts at the
+ /// end of the range as part of the range itself (true by default).
+ bool IncludeInsertsAtEndOfRange;
+ /// \brief If true and removing some text leaves a blank line
+ /// also remove the empty line (false by default).
+ bool RemoveLineIfEmpty;
+
+ RewriteOptions()
+ : IncludeInsertsAtBeginOfRange(true),
+ IncludeInsertsAtEndOfRange(true),
+ RemoveLineIfEmpty(false) { }
+ };
+
+ typedef std::map<FileID, RewriteBuffer>::iterator buffer_iterator;
+
+ explicit Rewriter(SourceManager &SM, const LangOptions &LO)
+ : SourceMgr(&SM), LangOpts(&LO) {}
+ explicit Rewriter() : SourceMgr(0), LangOpts(0) {}
+
+ void setSourceMgr(SourceManager &SM, const LangOptions &LO) {
+ SourceMgr = &SM;
+ LangOpts = &LO;
+ }
+ SourceManager &getSourceMgr() const { return *SourceMgr; }
+ const LangOptions &getLangOpts() const { return *LangOpts; }
+
+ /// isRewritable - Return true if this location is a raw file location, which
+ /// is rewritable. Locations from macros, etc are not rewritable.
+ static bool isRewritable(SourceLocation Loc) {
+ return Loc.isFileID();
+ }
+
+ /// getRangeSize - Return the size in bytes of the specified range if they
+ /// are in the same file. If not, this returns -1.
+ int getRangeSize(SourceRange Range,
+ RewriteOptions opts = RewriteOptions()) const;
+ int getRangeSize(const CharSourceRange &Range,
+ RewriteOptions opts = RewriteOptions()) const;
+
+ /// getRewrittenText - Return the rewritten form of the text in the specified
+ /// range. If the start or end of the range was unrewritable or if they are
+ /// in different buffers, this returns an empty string.
+ ///
+ /// Note that this method is not particularly efficient.
+ ///
+ std::string getRewrittenText(SourceRange Range) const;
+
+ /// InsertText - Insert the specified string at the specified location in the
+ /// original buffer. This method returns true (and does nothing) if the input
+ /// location was not rewritable, false otherwise.
+ ///
+ /// \param indentNewLines if true new lines in the string are indented
+ /// using the indentation of the source line in position \arg Loc.
+ bool InsertText(SourceLocation Loc, StringRef Str,
+ bool InsertAfter = true, bool indentNewLines = false);
+
+ /// InsertTextAfter - Insert the specified string at the specified location in
+ /// the original buffer. This method returns true (and does nothing) if
+ /// the input location was not rewritable, false otherwise. Text is
+ /// inserted after any other text that has been previously inserted
+ /// at the some point (the default behavior for InsertText).
+ bool InsertTextAfter(SourceLocation Loc, StringRef Str) {
+ return InsertText(Loc, Str);
+ }
+
+ /// \brief Insert the specified string after the token in the
+ /// specified location.
+ bool InsertTextAfterToken(SourceLocation Loc, StringRef Str);
+
+ /// InsertText - Insert the specified string at the specified location in the
+ /// original buffer. This method returns true (and does nothing) if the input
+ /// location was not rewritable, false otherwise. Text is
+ /// inserted before any other text that has been previously inserted
+ /// at the some point.
+ bool InsertTextBefore(SourceLocation Loc, StringRef Str) {
+ return InsertText(Loc, Str, false);
+ }
+
+ /// RemoveText - Remove the specified text region.
+ bool RemoveText(SourceLocation Start, unsigned Length,
+ RewriteOptions opts = RewriteOptions());
+
+ /// \brief Remove the specified text region.
+ bool RemoveText(CharSourceRange range,
+ RewriteOptions opts = RewriteOptions()) {
+ return RemoveText(range.getBegin(), getRangeSize(range, opts), opts);
+ }
+
+ /// \brief Remove the specified text region.
+ bool RemoveText(SourceRange range, RewriteOptions opts = RewriteOptions()) {
+ return RemoveText(range.getBegin(), getRangeSize(range, opts), opts);
+ }
+
+ /// ReplaceText - This method replaces a range of characters in the input
+ /// buffer with a new string. This is effectively a combined "remove/insert"
+ /// operation.
+ bool ReplaceText(SourceLocation Start, unsigned OrigLength,
+ StringRef NewStr);
+
+ /// ReplaceText - This method replaces a range of characters in the input
+ /// buffer with a new string. This is effectively a combined "remove/insert"
+ /// operation.
+ bool ReplaceText(SourceRange range, StringRef NewStr) {
+ return ReplaceText(range.getBegin(), getRangeSize(range), NewStr);
+ }
+
+ /// ReplaceText - This method replaces a range of characters in the input
+ /// buffer with a new string. This is effectively a combined "remove/insert"
+ /// operation.
+ bool ReplaceText(SourceRange range, SourceRange replacementRange);
+
+ /// ReplaceStmt - This replaces a Stmt/Expr with another, using the pretty
+ /// printer to generate the replacement code. This returns true if the input
+ /// could not be rewritten, or false if successful.
+ bool ReplaceStmt(Stmt *From, Stmt *To);
+
+ /// \brief Increase indentation for the lines between the given source range.
+ /// To determine what the indentation should be, 'parentIndent' is used
+ /// that should be at a source location with an indentation one degree
+ /// lower than the given range.
+ bool IncreaseIndentation(CharSourceRange range, SourceLocation parentIndent);
+ bool IncreaseIndentation(SourceRange range, SourceLocation parentIndent) {
+ return IncreaseIndentation(CharSourceRange::getTokenRange(range),
+ parentIndent);
+ }
+
+ /// ConvertToString converts statement 'From' to a string using the
+ /// pretty printer.
+ std::string ConvertToString(Stmt *From);
+
+ /// getEditBuffer - This is like getRewriteBufferFor, but always returns a
+ /// buffer, and allows you to write on it directly. This is useful if you
+ /// want efficient low-level access to apis for scribbling on one specific
+ /// FileID's buffer.
+ RewriteBuffer &getEditBuffer(FileID FID);
+
+ /// getRewriteBufferFor - Return the rewrite buffer for the specified FileID.
+ /// If no modification has been made to it, return null.
+ const RewriteBuffer *getRewriteBufferFor(FileID FID) const {
+ std::map<FileID, RewriteBuffer>::const_iterator I =
+ RewriteBuffers.find(FID);
+ return I == RewriteBuffers.end() ? 0 : &I->second;
+ }
+
+ // Iterators over rewrite buffers.
+ buffer_iterator buffer_begin() { return RewriteBuffers.begin(); }
+ buffer_iterator buffer_end() { return RewriteBuffers.end(); }
+
+private:
+ unsigned getLocationOffsetAndFileID(SourceLocation Loc, FileID &FID) const;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h
new file mode 100644
index 0000000..203b9bc
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/Rewriters.h
@@ -0,0 +1,30 @@
+//===--- Rewriters.h - Rewriter implementations -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains miscellaneous utilities for various front-end actions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_REWRITE_REWRITERS_H
+#define LLVM_CLANG_REWRITE_REWRITERS_H
+
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+class Preprocessor;
+
+/// RewriteMacrosInInput - Implement -rewrite-macros mode.
+void RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS);
+
+/// DoRewriteTest - A simple test for the TokenRewriter class.
+void DoRewriteTest(Preprocessor &PP, raw_ostream *OS);
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h b/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
new file mode 100644
index 0000000..9ebd33a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Rewrite/TokenRewriter.h
@@ -0,0 +1,79 @@
+//===--- TokenRewriter.h - Token-based Rewriter -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TokenRewriter class, which is used for code
+// transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOKENREWRITER_H
+#define LLVM_CLANG_TOKENREWRITER_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <list>
+#include <map>
+
+namespace clang {
+ class Token;
+ class LangOptions;
+ class ScratchBuffer;
+
+ class TokenRewriter {
+ /// TokenList - This is the list of raw tokens that make up this file. Each
+ /// of these tokens has a unique SourceLocation, which is a FileID.
+ std::list<Token> TokenList;
+
+ /// TokenRefTy - This is the type used to refer to a token in the TokenList.
+ typedef std::list<Token>::iterator TokenRefTy;
+
+ /// TokenAtLoc - This map indicates which token exists at a specific
+ /// SourceLocation. Since each token has a unique SourceLocation, this is a
+ /// one to one map. The token can return its own location directly, to map
+ /// backwards.
+ std::map<SourceLocation, TokenRefTy> TokenAtLoc;
+
+ /// ScratchBuf - This is the buffer that we create scratch tokens from.
+ ///
+ OwningPtr<ScratchBuffer> ScratchBuf;
+
+ TokenRewriter(const TokenRewriter&); // DO NOT IMPLEMENT
+ void operator=(const TokenRewriter&); // DO NOT IMPLEMENT.
+ public:
+ /// TokenRewriter - This creates a TokenRewriter for the file with the
+ /// specified FileID.
+ TokenRewriter(FileID FID, SourceManager &SM, const LangOptions &LO);
+ ~TokenRewriter();
+
+ typedef std::list<Token>::const_iterator token_iterator;
+ token_iterator token_begin() const { return TokenList.begin(); }
+ token_iterator token_end() const { return TokenList.end(); }
+
+
+ token_iterator AddTokenBefore(token_iterator I, const char *Val);
+ token_iterator AddTokenAfter(token_iterator I, const char *Val) {
+ assert(I != token_end() && "Cannot insert after token_end()!");
+ return AddTokenBefore(++I, Val);
+ }
+
+ private:
+ /// RemapIterator - Convert from token_iterator (a const iterator) to
+ /// TokenRefTy (a non-const iterator).
+ TokenRefTy RemapIterator(token_iterator I);
+
+ /// AddToken - Add the specified token into the Rewriter before the other
+ /// position.
+ TokenRefTy AddToken(const Token &T, TokenRefTy Where);
+ };
+
+
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/AnalysisBasedWarnings.h b/contrib/llvm/tools/clang/include/clang/Sema/AnalysisBasedWarnings.h
new file mode 100644
index 0000000..eeac973
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/AnalysisBasedWarnings.h
@@ -0,0 +1,102 @@
+//=- AnalysisBasedWarnings.h - Sema warnings based on libAnalysis -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AnalysisBasedWarnings, a worker object used by Sema
+// that issues warnings based on dataflow-analysis.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_ANALYSIS_WARNINGS_H
+#define LLVM_CLANG_SEMA_ANALYSIS_WARNINGS_H
+
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+
+class BlockExpr;
+class Decl;
+class FunctionDecl;
+class ObjCMethodDecl;
+class QualType;
+class Sema;
+namespace sema {
+ class FunctionScopeInfo;
+}
+
+namespace sema {
+
+class AnalysisBasedWarnings {
+public:
+ class Policy {
+ friend class AnalysisBasedWarnings;
+ // The warnings to run.
+ unsigned enableCheckFallThrough : 1;
+ unsigned enableCheckUnreachable : 1;
+ unsigned enableThreadSafetyAnalysis : 1;
+ public:
+ Policy();
+ void disableCheckFallThrough() { enableCheckFallThrough = 0; }
+ };
+
+private:
+ Sema &S;
+ Policy DefaultPolicy;
+
+ enum VisitFlag { NotVisited = 0, Visited = 1, Pending = 2 };
+ llvm::DenseMap<const FunctionDecl*, VisitFlag> VisitedFD;
+
+ /// \name Statistics
+ /// @{
+
+ /// \brief Number of function CFGs built and analyzed.
+ unsigned NumFunctionsAnalyzed;
+
+ /// \brief Number of functions for which the CFG could not be successfully
+ /// built.
+ unsigned NumFunctionsWithBadCFGs;
+
+ /// \brief Total number of blocks across all CFGs.
+ unsigned NumCFGBlocks;
+
+ /// \brief Largest number of CFG blocks for a single function analyzed.
+ unsigned MaxCFGBlocksPerFunction;
+
+ /// \brief Total number of CFGs with variables analyzed for uninitialized
+ /// uses.
+ unsigned NumUninitAnalysisFunctions;
+
+ /// \brief Total number of variables analyzed for uninitialized uses.
+ unsigned NumUninitAnalysisVariables;
+
+ /// \brief Max number of variables analyzed for uninitialized uses in a single
+ /// function.
+ unsigned MaxUninitAnalysisVariablesPerFunction;
+
+ /// \brief Total number of block visits during uninitialized use analysis.
+ unsigned NumUninitAnalysisBlockVisits;
+
+ /// \brief Max number of block visits during uninitialized use analysis of
+ /// a single function.
+ unsigned MaxUninitAnalysisBlockVisitsPerFunction;
+
+ /// @}
+
+public:
+ AnalysisBasedWarnings(Sema &s);
+
+ void IssueWarnings(Policy P, FunctionScopeInfo *fscope,
+ const Decl *D, const BlockExpr *blkExpr);
+
+ Policy getDefaultPolicy() { return DefaultPolicy; }
+
+ void PrintStats() const;
+};
+
+}} // end namespace clang::sema
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
new file mode 100644
index 0000000..142f144
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/AttributeList.h
@@ -0,0 +1,555 @@
+//===--- AttributeList.h - Parsed attribute sets ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AttributeList class, which is used to collect
+// parsed attributes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_ATTRLIST_H
+#define LLVM_CLANG_SEMA_ATTRLIST_H
+
+#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/VersionTuple.h"
+#include <cassert>
+
+namespace clang {
+ class ASTContext;
+ class IdentifierInfo;
+ class Expr;
+
+/// \brief Represents information about a change in availability for
+/// an entity, which is part of the encoding of the 'availability'
+/// attribute.
+struct AvailabilityChange {
+ /// \brief The location of the keyword indicating the kind of change.
+ SourceLocation KeywordLoc;
+
+ /// \brief The version number at which the change occurred.
+ VersionTuple Version;
+
+ /// \brief The source range covering the version number.
+ SourceRange VersionRange;
+
+ /// \brief Determine whether this availability change is valid.
+ bool isValid() const { return !Version.empty(); }
+};
+
+/// AttributeList - Represents GCC's __attribute__ declaration. There are
+/// 4 forms of this construct...they are:
+///
+/// 1: __attribute__(( const )). ParmName/Args/NumArgs will all be unused.
+/// 2: __attribute__(( mode(byte) )). ParmName used, Args/NumArgs unused.
+/// 3: __attribute__(( format(printf, 1, 2) )). ParmName/Args/NumArgs all used.
+/// 4: __attribute__(( aligned(16) )). ParmName is unused, Args/Num used.
+///
+class AttributeList { // TODO: This should really be called ParsedAttribute
+private:
+ IdentifierInfo *AttrName;
+ IdentifierInfo *ScopeName;
+ IdentifierInfo *ParmName;
+ SourceRange AttrRange;
+ SourceLocation ScopeLoc;
+ SourceLocation ParmLoc;
+
+ /// The number of expression arguments this attribute has.
+ /// The expressions themselves are stored after the object.
+ unsigned NumArgs : 16;
+
+ /// True if Microsoft style: declspec(foo).
+ unsigned DeclspecAttribute : 1;
+
+ /// True if C++0x-style: [[foo]].
+ unsigned CXX0XAttribute : 1;
+
+ /// True if already diagnosed as invalid.
+ mutable unsigned Invalid : 1;
+
+ /// True if this attribute was used as a type attribute.
+ mutable unsigned UsedAsTypeAttr : 1;
+
+ /// True if this has the extra information associated with an
+ /// availability attribute.
+ unsigned IsAvailability : 1;
+
+ unsigned AttrKind : 8;
+
+ /// \brief The location of the 'unavailable' keyword in an
+ /// availability attribute.
+ SourceLocation UnavailableLoc;
+
+ const Expr *MessageExpr;
+
+ /// The next attribute in the current position.
+ AttributeList *NextInPosition;
+
+ /// The next attribute allocated in the current Pool.
+ AttributeList *NextInPool;
+
+ Expr **getArgsBuffer() {
+ return reinterpret_cast<Expr**>(this+1);
+ }
+ Expr * const *getArgsBuffer() const {
+ return reinterpret_cast<Expr* const *>(this+1);
+ }
+
+ enum AvailabilitySlot {
+ IntroducedSlot, DeprecatedSlot, ObsoletedSlot
+ };
+
+ AvailabilityChange &getAvailabilitySlot(AvailabilitySlot index) {
+ return reinterpret_cast<AvailabilityChange*>(this+1)[index];
+ }
+ const AvailabilityChange &getAvailabilitySlot(AvailabilitySlot index) const {
+ return reinterpret_cast<const AvailabilityChange*>(this+1)[index];
+ }
+
+ AttributeList(const AttributeList &); // DO NOT IMPLEMENT
+ void operator=(const AttributeList &); // DO NOT IMPLEMENT
+ void operator delete(void *); // DO NOT IMPLEMENT
+ ~AttributeList(); // DO NOT IMPLEMENT
+
+ size_t allocated_size() const;
+
+ AttributeList(IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *parmName, SourceLocation parmLoc,
+ Expr **args, unsigned numArgs,
+ bool declspec, bool cxx0x)
+ : AttrName(attrName), ScopeName(scopeName), ParmName(parmName),
+ AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(parmLoc),
+ NumArgs(numArgs),
+ DeclspecAttribute(declspec), CXX0XAttribute(cxx0x), Invalid(false),
+ UsedAsTypeAttr(false), IsAvailability(false),
+ NextInPosition(0), NextInPool(0) {
+ if (numArgs) memcpy(getArgsBuffer(), args, numArgs * sizeof(Expr*));
+ AttrKind = getKind(getName());
+ }
+
+ AttributeList(IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *parmName, SourceLocation parmLoc,
+ const AvailabilityChange &introduced,
+ const AvailabilityChange &deprecated,
+ const AvailabilityChange &obsoleted,
+ SourceLocation unavailable,
+ const Expr *messageExpr,
+ bool declspec, bool cxx0x)
+ : AttrName(attrName), ScopeName(scopeName), ParmName(parmName),
+ AttrRange(attrRange), ScopeLoc(scopeLoc), ParmLoc(parmLoc),
+ NumArgs(0), DeclspecAttribute(declspec), CXX0XAttribute(cxx0x),
+ Invalid(false), UsedAsTypeAttr(false), IsAvailability(true),
+ UnavailableLoc(unavailable), MessageExpr(messageExpr),
+ NextInPosition(0), NextInPool(0) {
+ new (&getAvailabilitySlot(IntroducedSlot)) AvailabilityChange(introduced);
+ new (&getAvailabilitySlot(DeprecatedSlot)) AvailabilityChange(deprecated);
+ new (&getAvailabilitySlot(ObsoletedSlot)) AvailabilityChange(obsoleted);
+ AttrKind = getKind(getName());
+ }
+
+ friend class AttributePool;
+ friend class AttributeFactory;
+
+public:
+ enum Kind {
+ #define PARSED_ATTR(NAME) AT_##NAME,
+ #include "clang/Sema/AttrParsedAttrList.inc"
+ PARSED_ATTR(address_space)
+ PARSED_ATTR(base_check)
+ PARSED_ATTR(cf_returns_autoreleased)
+ PARSED_ATTR(ext_vector_type)
+ PARSED_ATTR(mode)
+ PARSED_ATTR(neon_polyvector_type)
+ PARSED_ATTR(neon_vector_type)
+ PARSED_ATTR(objc_gc)
+ PARSED_ATTR(objc_ownership)
+ PARSED_ATTR(opencl_image_access)
+ PARSED_ATTR(vector_size)
+ #undef PARSED_ATTR
+ IgnoredAttribute,
+ UnknownAttribute
+ };
+
+ IdentifierInfo *getName() const { return AttrName; }
+ SourceLocation getLoc() const { return AttrRange.getBegin(); }
+ SourceRange getRange() const { return AttrRange; }
+
+ bool hasScope() const { return ScopeName; }
+ IdentifierInfo *getScopeName() const { return ScopeName; }
+ SourceLocation getScopeLoc() const { return ScopeLoc; }
+
+ IdentifierInfo *getParameterName() const { return ParmName; }
+ SourceLocation getParameterLoc() const { return ParmLoc; }
+
+ bool isDeclspecAttribute() const { return DeclspecAttribute; }
+ bool isCXX0XAttribute() const { return CXX0XAttribute; }
+
+ bool isInvalid() const { return Invalid; }
+ void setInvalid(bool b = true) const { Invalid = b; }
+
+ bool isUsedAsTypeAttr() const { return UsedAsTypeAttr; }
+ void setUsedAsTypeAttr() { UsedAsTypeAttr = true; }
+
+ Kind getKind() const { return Kind(AttrKind); }
+ static Kind getKind(const IdentifierInfo *Name);
+
+ AttributeList *getNext() const { return NextInPosition; }
+ void setNext(AttributeList *N) { NextInPosition = N; }
+
+ /// getNumArgs - Return the number of actual arguments to this attribute.
+ unsigned getNumArgs() const { return NumArgs; }
+
+ /// hasParameterOrArguments - Return true if this attribute has a parameter,
+ /// or has a non empty argument expression list.
+ bool hasParameterOrArguments() const { return ParmName || NumArgs; }
+
+ /// getArg - Return the specified argument.
+ Expr *getArg(unsigned Arg) const {
+ assert(Arg < NumArgs && "Arg access out of range!");
+ return getArgsBuffer()[Arg];
+ }
+
+ class arg_iterator {
+ Expr * const *X;
+ unsigned Idx;
+ public:
+ arg_iterator(Expr * const *x, unsigned idx) : X(x), Idx(idx) {}
+
+ arg_iterator& operator++() {
+ ++Idx;
+ return *this;
+ }
+
+ bool operator==(const arg_iterator& I) const {
+ assert (X == I.X &&
+ "compared arg_iterators are for different argument lists");
+ return Idx == I.Idx;
+ }
+
+ bool operator!=(const arg_iterator& I) const {
+ return !operator==(I);
+ }
+
+ Expr* operator*() const {
+ return X[Idx];
+ }
+
+ unsigned getArgNum() const {
+ return Idx+1;
+ }
+ };
+
+ arg_iterator arg_begin() const {
+ return arg_iterator(getArgsBuffer(), 0);
+ }
+
+ arg_iterator arg_end() const {
+ return arg_iterator(getArgsBuffer(), NumArgs);
+ }
+
+ const AvailabilityChange &getAvailabilityIntroduced() const {
+ assert(getKind() == AT_availability && "Not an availability attribute");
+ return getAvailabilitySlot(IntroducedSlot);
+ }
+
+ const AvailabilityChange &getAvailabilityDeprecated() const {
+ assert(getKind() == AT_availability && "Not an availability attribute");
+ return getAvailabilitySlot(DeprecatedSlot);
+ }
+
+ const AvailabilityChange &getAvailabilityObsoleted() const {
+ assert(getKind() == AT_availability && "Not an availability attribute");
+ return getAvailabilitySlot(ObsoletedSlot);
+ }
+
+ SourceLocation getUnavailableLoc() const {
+ assert(getKind() == AT_availability && "Not an availability attribute");
+ return UnavailableLoc;
+ }
+
+ const Expr * getMessageExpr() const {
+ assert(getKind() == AT_availability && "Not an availability attribute");
+ return MessageExpr;
+ }
+};
+
+/// A factory, from which one makes pools, from which one creates
+/// individual attributes which are deallocated with the pool.
+///
+/// Note that it's tolerably cheap to create and destroy one of
+/// these as long as you don't actually allocate anything in it.
+class AttributeFactory {
+public:
+ enum {
+ /// The required allocation size of an availability attribute,
+ /// which we want to ensure is a multiple of sizeof(void*).
+ AvailabilityAllocSize =
+ sizeof(AttributeList)
+ + ((3 * sizeof(AvailabilityChange) + sizeof(void*) - 1)
+ / sizeof(void*) * sizeof(void*))
+ };
+
+private:
+ enum {
+ /// The number of free lists we want to be sure to support
+ /// inline. This is just enough that availability attributes
+ /// don't surpass it. It's actually very unlikely we'll see an
+ /// attribute that needs more than that; on x86-64 you'd need 10
+ /// expression arguments, and on i386 you'd need 19.
+ InlineFreeListsCapacity =
+ 1 + (AvailabilityAllocSize - sizeof(AttributeList)) / sizeof(void*)
+ };
+
+ llvm::BumpPtrAllocator Alloc;
+
+ /// Free lists. The index is determined by the following formula:
+ /// (size - sizeof(AttributeList)) / sizeof(void*)
+ SmallVector<AttributeList*, InlineFreeListsCapacity> FreeLists;
+
+ // The following are the private interface used by AttributePool.
+ friend class AttributePool;
+
+ /// Allocate an attribute of the given size.
+ void *allocate(size_t size);
+
+ /// Reclaim all the attributes in the given pool chain, which is
+ /// non-empty. Note that the current implementation is safe
+ /// against reclaiming things which were not actually allocated
+ /// with the allocator, although of course it's important to make
+ /// sure that their allocator lives at least as long as this one.
+ void reclaimPool(AttributeList *head);
+
+public:
+ AttributeFactory();
+ ~AttributeFactory();
+};
+
+class AttributePool {
+ AttributeFactory &Factory;
+ AttributeList *Head;
+
+ void *allocate(size_t size) {
+ return Factory.allocate(size);
+ }
+
+ AttributeList *add(AttributeList *attr) {
+ // We don't care about the order of the pool.
+ attr->NextInPool = Head;
+ Head = attr;
+ return attr;
+ }
+
+ void takePool(AttributeList *pool);
+
+public:
+ /// Create a new pool for a factory.
+ AttributePool(AttributeFactory &factory) : Factory(factory), Head(0) {}
+
+ /// Move the given pool's allocations to this pool.
+ AttributePool(AttributePool &pool) : Factory(pool.Factory), Head(pool.Head) {
+ pool.Head = 0;
+ }
+
+ AttributeFactory &getFactory() const { return Factory; }
+
+ void clear() {
+ if (Head) {
+ Factory.reclaimPool(Head);
+ Head = 0;
+ }
+ }
+
+ /// Take the given pool's allocations and add them to this pool.
+ void takeAllFrom(AttributePool &pool) {
+ if (pool.Head) {
+ takePool(pool.Head);
+ pool.Head = 0;
+ }
+ }
+
+ ~AttributePool() {
+ if (Head) Factory.reclaimPool(Head);
+ }
+
+ AttributeList *create(IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *parmName, SourceLocation parmLoc,
+ Expr **args, unsigned numArgs,
+ bool declspec = false, bool cxx0x = false) {
+ void *memory = allocate(sizeof(AttributeList)
+ + numArgs * sizeof(Expr*));
+ return add(new (memory) AttributeList(attrName, attrRange,
+ scopeName, scopeLoc,
+ parmName, parmLoc,
+ args, numArgs,
+ declspec, cxx0x));
+ }
+
+ AttributeList *create(IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *parmName, SourceLocation parmLoc,
+ const AvailabilityChange &introduced,
+ const AvailabilityChange &deprecated,
+ const AvailabilityChange &obsoleted,
+ SourceLocation unavailable,
+ const Expr *MessageExpr,
+ bool declspec = false, bool cxx0x = false) {
+ void *memory = allocate(AttributeFactory::AvailabilityAllocSize);
+ return add(new (memory) AttributeList(attrName, attrRange,
+ scopeName, scopeLoc,
+ parmName, parmLoc,
+ introduced, deprecated, obsoleted,
+ unavailable, MessageExpr,
+ declspec, cxx0x));
+ }
+
+ AttributeList *createIntegerAttribute(ASTContext &C, IdentifierInfo *Name,
+ SourceLocation TokLoc, int Arg);
+};
+
+/// addAttributeLists - Add two AttributeLists together
+/// The right-hand list is appended to the left-hand list, if any
+/// A pointer to the joined list is returned.
+/// Note: the lists are not left unmodified.
+inline AttributeList *addAttributeLists(AttributeList *Left,
+ AttributeList *Right) {
+ if (!Left)
+ return Right;
+
+ AttributeList *next = Left, *prev;
+ do {
+ prev = next;
+ next = next->getNext();
+ } while (next);
+ prev->setNext(Right);
+ return Left;
+}
+
+/// CXX0XAttributeList - A wrapper around a C++0x attribute list.
+/// Stores, in addition to the list proper, whether or not an actual list was
+/// (as opposed to an empty list, which may be ill-formed in some places) and
+/// the source range of the list.
+struct CXX0XAttributeList {
+ AttributeList *AttrList;
+ SourceRange Range;
+ bool HasAttr;
+ CXX0XAttributeList (AttributeList *attrList, SourceRange range, bool hasAttr)
+ : AttrList(attrList), Range(range), HasAttr (hasAttr) {
+ }
+ CXX0XAttributeList ()
+ : AttrList(0), Range(), HasAttr(false) {
+ }
+};
+
+/// ParsedAttributes - A collection of parsed attributes. Currently
+/// we don't differentiate between the various attribute syntaxes,
+/// which is basically silly.
+///
+/// Right now this is a very lightweight container, but the expectation
+/// is that this will become significantly more serious.
+class ParsedAttributes {
+public:
+ ParsedAttributes(AttributeFactory &factory)
+ : pool(factory), list(0) {
+ }
+
+ ParsedAttributes(ParsedAttributes &attrs)
+ : pool(attrs.pool), list(attrs.list) {
+ attrs.list = 0;
+ }
+
+ AttributePool &getPool() const { return pool; }
+
+ bool empty() const { return list == 0; }
+
+ void add(AttributeList *newAttr) {
+ assert(newAttr);
+ assert(newAttr->getNext() == 0);
+ newAttr->setNext(list);
+ list = newAttr;
+ }
+
+ void addAll(AttributeList *newList) {
+ if (!newList) return;
+
+ AttributeList *lastInNewList = newList;
+ while (AttributeList *next = lastInNewList->getNext())
+ lastInNewList = next;
+
+ lastInNewList->setNext(list);
+ list = newList;
+ }
+
+ void set(AttributeList *newList) {
+ list = newList;
+ }
+
+ void takeAllFrom(ParsedAttributes &attrs) {
+ addAll(attrs.list);
+ attrs.list = 0;
+ pool.takeAllFrom(attrs.pool);
+ }
+
+ void clear() { list = 0; pool.clear(); }
+ AttributeList *getList() const { return list; }
+
+ /// Returns a reference to the attribute list. Try not to introduce
+ /// dependencies on this method, it may not be long-lived.
+ AttributeList *&getListRef() { return list; }
+
+
+ AttributeList *addNew(IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *parmName, SourceLocation parmLoc,
+ Expr **args, unsigned numArgs,
+ bool declspec = false, bool cxx0x = false) {
+ AttributeList *attr =
+ pool.create(attrName, attrRange, scopeName, scopeLoc, parmName, parmLoc,
+ args, numArgs, declspec, cxx0x);
+ add(attr);
+ return attr;
+ }
+
+ AttributeList *addNew(IdentifierInfo *attrName, SourceRange attrRange,
+ IdentifierInfo *scopeName, SourceLocation scopeLoc,
+ IdentifierInfo *parmName, SourceLocation parmLoc,
+ const AvailabilityChange &introduced,
+ const AvailabilityChange &deprecated,
+ const AvailabilityChange &obsoleted,
+ SourceLocation unavailable,
+ const Expr *MessageExpr,
+ bool declspec = false, bool cxx0x = false) {
+ AttributeList *attr =
+ pool.create(attrName, attrRange, scopeName, scopeLoc, parmName, parmLoc,
+ introduced, deprecated, obsoleted, unavailable,
+ MessageExpr,
+ declspec, cxx0x);
+ add(attr);
+ return attr;
+ }
+
+ AttributeList *addNewInteger(ASTContext &C, IdentifierInfo *name,
+ SourceLocation loc, int arg) {
+ AttributeList *attr =
+ pool.createIntegerAttribute(C, name, loc, arg);
+ add(attr);
+ return attr;
+ }
+
+
+private:
+ mutable AttributePool pool;
+ AttributeList *list;
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/CXXFieldCollector.h b/contrib/llvm/tools/clang/include/clang/Sema/CXXFieldCollector.h
new file mode 100644
index 0000000..6f3c0b4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/CXXFieldCollector.h
@@ -0,0 +1,79 @@
+//===- CXXFieldCollector.h - Utility class for C++ class semantic analysis ===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides CXXFieldCollector that is used during parsing & semantic
+// analysis of C++ classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_CXXFIELDCOLLECTOR_H
+#define LLVM_CLANG_SEMA_CXXFIELDCOLLECTOR_H
+
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+ class FieldDecl;
+
+/// CXXFieldCollector - Used to keep track of CXXFieldDecls during parsing of
+/// C++ classes.
+class CXXFieldCollector {
+ /// Fields - Contains all FieldDecls collected during parsing of a C++
+ /// class. When a nested class is entered, its fields are appended to the
+ /// fields of its parent class, when it is exited its fields are removed.
+ SmallVector<FieldDecl*, 32> Fields;
+
+ /// FieldCount - Each entry represents the number of fields collected during
+ /// the parsing of a C++ class. When a nested class is entered, a new field
+ /// count is pushed, when it is exited, the field count is popped.
+ SmallVector<size_t, 4> FieldCount;
+
+ // Example:
+ //
+ // class C {
+ // int x,y;
+ // class NC {
+ // int q;
+ // // At this point, Fields contains [x,y,q] decls and FieldCount contains
+ // // [2,1].
+ // };
+ // int z;
+ // // At this point, Fields contains [x,y,z] decls and FieldCount contains
+ // // [3].
+ // };
+
+public:
+ /// StartClass - Called by Sema::ActOnStartCXXClassDef.
+ void StartClass() { FieldCount.push_back(0); }
+
+ /// Add - Called by Sema::ActOnCXXMemberDeclarator.
+ void Add(FieldDecl *D) {
+ Fields.push_back(D);
+ ++FieldCount.back();
+ }
+
+ /// getCurNumField - The number of fields added to the currently parsed class.
+ size_t getCurNumFields() const {
+ assert(!FieldCount.empty() && "no currently-parsed class");
+ return FieldCount.back();
+ }
+
+ /// getCurFields - Pointer to array of fields added to the currently parsed
+ /// class.
+ FieldDecl **getCurFields() { return &*(Fields.end() - getCurNumFields()); }
+
+ /// FinishClass - Called by Sema::ActOnFinishCXXClassDef.
+ void FinishClass() {
+ Fields.resize(Fields.size() - getCurNumFields());
+ FieldCount.pop_back();
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
new file mode 100644
index 0000000..fe9bed5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/CodeCompleteConsumer.h
@@ -0,0 +1,992 @@
+//===---- CodeCompleteConsumer.h - Code Completion Interface ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CodeCompleteConsumer class.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SEMA_CODECOMPLETECONSUMER_H
+#define LLVM_CLANG_SEMA_CODECOMPLETECONSUMER_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "clang-c/Index.h"
+#include <string>
+
+namespace clang {
+
+class Decl;
+
+/// \brief Default priority values for code-completion results based
+/// on their kind.
+enum {
+ /// \brief Priority for the next initialization in a constructor initializer
+ /// list.
+ CCP_NextInitializer = 7,
+ /// \brief Priority for an enumeration constant inside a switch whose
+ /// condition is of the enumeration type.
+ CCP_EnumInCase = 7,
+ /// \brief Priority for a send-to-super completion.
+ CCP_SuperCompletion = 20,
+ /// \brief Priority for a declaration that is in the local scope.
+ CCP_LocalDeclaration = 34,
+ /// \brief Priority for a member declaration found from the current
+ /// method or member function.
+ CCP_MemberDeclaration = 35,
+ /// \brief Priority for a language keyword (that isn't any of the other
+ /// categories).
+ CCP_Keyword = 40,
+ /// \brief Priority for a code pattern.
+ CCP_CodePattern = 40,
+ /// \brief Priority for a non-type declaration.
+ CCP_Declaration = 50,
+ /// \brief Priority for a type.
+ CCP_Type = CCP_Declaration,
+ /// \brief Priority for a constant value (e.g., enumerator).
+ CCP_Constant = 65,
+ /// \brief Priority for a preprocessor macro.
+ CCP_Macro = 70,
+ /// \brief Priority for a nested-name-specifier.
+ CCP_NestedNameSpecifier = 75,
+ /// \brief Priority for a result that isn't likely to be what the user wants,
+ /// but is included for completeness.
+ CCP_Unlikely = 80,
+
+ /// \brief Priority for the Objective-C "_cmd" implicit parameter.
+ CCP_ObjC_cmd = CCP_Unlikely
+};
+
+/// \brief Priority value deltas that are added to code-completion results
+/// based on the context of the result.
+enum {
+ /// \brief The result is in a base class.
+ CCD_InBaseClass = 2,
+ /// \brief The result is a C++ non-static member function whose qualifiers
+ /// exactly match the object type on which the member function can be called.
+ CCD_ObjectQualifierMatch = -1,
+ /// \brief The selector of the given message exactly matches the selector
+ /// of the current method, which might imply that some kind of delegation
+ /// is occurring.
+ CCD_SelectorMatch = -3,
+
+ /// \brief Adjustment to the "bool" type in Objective-C, where the typedef
+ /// "BOOL" is preferred.
+ CCD_bool_in_ObjC = 1,
+
+ /// \brief Adjustment for KVC code pattern priorities when it doesn't look
+ /// like the
+ CCD_ProbablyNotObjCCollection = 15,
+
+ /// \brief An Objective-C method being used as a property.
+ CCD_MethodAsProperty = 2
+};
+
+/// \brief Priority value factors by which we will divide or multiply the
+/// priority of a code-completion result.
+enum {
+ /// \brief Divide by this factor when a code-completion result's type exactly
+ /// matches the type we expect.
+ CCF_ExactTypeMatch = 4,
+ /// \brief Divide by this factor when a code-completion result's type is
+ /// similar to the type we expect (e.g., both arithmetic types, both
+ /// Objective-C object pointer types).
+ CCF_SimilarTypeMatch = 2
+};
+
+/// \brief A simplified classification of types used when determining
+/// "similar" types for code completion.
+enum SimplifiedTypeClass {
+ STC_Arithmetic,
+ STC_Array,
+ STC_Block,
+ STC_Function,
+ STC_ObjectiveC,
+ STC_Other,
+ STC_Pointer,
+ STC_Record,
+ STC_Void
+};
+
+/// \brief Determine the simplified type class of the given canonical type.
+SimplifiedTypeClass getSimplifiedTypeClass(CanQualType T);
+
+/// \brief Determine the type that this declaration will have if it is used
+/// as a type or in an expression.
+QualType getDeclUsageType(ASTContext &C, NamedDecl *ND);
+
+/// \brief Determine the priority to be given to a macro code completion result
+/// with the given name.
+///
+/// \param MacroName The name of the macro.
+///
+/// \param LangOpts Options describing the current language dialect.
+///
+/// \param PreferredTypeIsPointer Whether the preferred type for the context
+/// of this macro is a pointer type.
+unsigned getMacroUsagePriority(StringRef MacroName,
+ const LangOptions &LangOpts,
+ bool PreferredTypeIsPointer = false);
+
+/// \brief Determine the libclang cursor kind associated with the given
+/// declaration.
+CXCursorKind getCursorKindForDecl(Decl *D);
+
+class FunctionDecl;
+class FunctionType;
+class FunctionTemplateDecl;
+class IdentifierInfo;
+class NamedDecl;
+class NestedNameSpecifier;
+class Sema;
+
+/// \brief The context in which code completion occurred, so that the
+/// code-completion consumer can process the results accordingly.
+class CodeCompletionContext {
+public:
+ enum Kind {
+ /// \brief An unspecified code-completion context.
+ CCC_Other,
+ /// \brief An unspecified code-completion context where we should also add
+ /// macro completions.
+ CCC_OtherWithMacros,
+ /// \brief Code completion occurred within a "top-level" completion context,
+ /// e.g., at namespace or global scope.
+ CCC_TopLevel,
+ /// \brief Code completion occurred within an Objective-C interface,
+ /// protocol, or category interface.
+ CCC_ObjCInterface,
+ /// \brief Code completion occurred within an Objective-C implementation
+ /// or category implementation.
+ CCC_ObjCImplementation,
+ /// \brief Code completion occurred within the instance variable list of
+ /// an Objective-C interface, implementation, or category implementation.
+ CCC_ObjCIvarList,
+ /// \brief Code completion occurred within a class, struct, or union.
+ CCC_ClassStructUnion,
+ /// \brief Code completion occurred where a statement (or declaration) is
+ /// expected in a function, method, or block.
+ CCC_Statement,
+ /// \brief Code completion occurred where an expression is expected.
+ CCC_Expression,
+ /// \brief Code completion occurred where an Objective-C message receiver
+ /// is expected.
+ CCC_ObjCMessageReceiver,
+ /// \brief Code completion occurred on the right-hand side of a member
+ /// access expression using the dot operator.
+ ///
+ /// The results of this completion are the members of the type being
+ /// accessed. The type itself is available via
+ /// \c CodeCompletionContext::getType().
+ CCC_DotMemberAccess,
+ /// \brief Code completion occurred on the right-hand side of a member
+ /// access expression using the arrow operator.
+ ///
+ /// The results of this completion are the members of the type being
+ /// accessed. The type itself is available via
+ /// \c CodeCompletionContext::getType().
+ CCC_ArrowMemberAccess,
+ /// \brief Code completion occurred on the right-hand side of an Objective-C
+ /// property access expression.
+ ///
+ /// The results of this completion are the members of the type being
+ /// accessed. The type itself is available via
+ /// \c CodeCompletionContext::getType().
+ CCC_ObjCPropertyAccess,
+ /// \brief Code completion occurred after the "enum" keyword, to indicate
+ /// an enumeration name.
+ CCC_EnumTag,
+ /// \brief Code completion occurred after the "union" keyword, to indicate
+ /// a union name.
+ CCC_UnionTag,
+ /// \brief Code completion occurred after the "struct" or "class" keyword,
+ /// to indicate a struct or class name.
+ CCC_ClassOrStructTag,
+ /// \brief Code completion occurred where a protocol name is expected.
+ CCC_ObjCProtocolName,
+ /// \brief Code completion occurred where a namespace or namespace alias
+ /// is expected.
+ CCC_Namespace,
+ /// \brief Code completion occurred where a type name is expected.
+ CCC_Type,
+ /// \brief Code completion occurred where a new name is expected.
+ CCC_Name,
+ /// \brief Code completion occurred where a new name is expected and a
+ /// qualified name is permissible.
+ CCC_PotentiallyQualifiedName,
+ /// \brief Code completion occurred where an macro is being defined.
+ CCC_MacroName,
+ /// \brief Code completion occurred where a macro name is expected
+ /// (without any arguments, in the case of a function-like macro).
+ CCC_MacroNameUse,
+ /// \brief Code completion occurred within a preprocessor expression.
+ CCC_PreprocessorExpression,
+ /// \brief Code completion occurred where a preprocessor directive is
+ /// expected.
+ CCC_PreprocessorDirective,
+ /// \brief Code completion occurred in a context where natural language is
+ /// expected, e.g., a comment or string literal.
+ ///
+ /// This context usually implies that no completions should be added,
+ /// unless they come from an appropriate natural-language dictionary.
+ CCC_NaturalLanguage,
+ /// \brief Code completion for a selector, as in an @selector expression.
+ CCC_SelectorName,
+ /// \brief Code completion within a type-qualifier list.
+ CCC_TypeQualifiers,
+ /// \brief Code completion in a parenthesized expression, which means that
+ /// we may also have types here in C and Objective-C (as well as in C++).
+ CCC_ParenthesizedExpression,
+ /// \brief Code completion where an Objective-C instance message is expcted.
+ CCC_ObjCInstanceMessage,
+ /// \brief Code completion where an Objective-C class message is expected.
+ CCC_ObjCClassMessage,
+ /// \brief Code completion where the name of an Objective-C class is
+ /// expected.
+ CCC_ObjCInterfaceName,
+ /// \brief Code completion where an Objective-C category name is expected.
+ CCC_ObjCCategoryName,
+ /// \brief An unknown context, in which we are recovering from a parsing
+ /// error and don't know which completions we should give.
+ CCC_Recovery
+ };
+
+private:
+ enum Kind Kind;
+
+ /// \brief The type that would prefer to see at this point (e.g., the type
+ /// of an initializer or function parameter).
+ QualType PreferredType;
+
+ /// \brief The type of the base object in a member access expression.
+ QualType BaseType;
+
+ /// \brief The identifiers for Objective-C selector parts.
+ IdentifierInfo **SelIdents;
+
+ /// \brief The number of Objective-C selector parts.
+ unsigned NumSelIdents;
+
+public:
+ /// \brief Construct a new code-completion context of the given kind.
+ CodeCompletionContext(enum Kind Kind) : Kind(Kind), SelIdents(NULL),
+ NumSelIdents(0) { }
+
+ /// \brief Construct a new code-completion context of the given kind.
+ CodeCompletionContext(enum Kind Kind, QualType T,
+ IdentifierInfo **SelIdents = NULL,
+ unsigned NumSelIdents = 0) : Kind(Kind),
+ SelIdents(SelIdents),
+ NumSelIdents(NumSelIdents) {
+ if (Kind == CCC_DotMemberAccess || Kind == CCC_ArrowMemberAccess ||
+ Kind == CCC_ObjCPropertyAccess || Kind == CCC_ObjCClassMessage ||
+ Kind == CCC_ObjCInstanceMessage)
+ BaseType = T;
+ else
+ PreferredType = T;
+ }
+
+ /// \brief Retrieve the kind of code-completion context.
+ enum Kind getKind() const { return Kind; }
+
+ /// \brief Retrieve the type that this expression would prefer to have, e.g.,
+ /// if the expression is a variable initializer or a function argument, the
+ /// type of the corresponding variable or function parameter.
+ QualType getPreferredType() const { return PreferredType; }
+
+ /// \brief Retrieve the type of the base object in a member-access
+ /// expression.
+ QualType getBaseType() const { return BaseType; }
+
+ /// \brief Retrieve the Objective-C selector identifiers.
+ IdentifierInfo **getSelIdents() const { return SelIdents; }
+
+ /// \brief Retrieve the number of Objective-C selector identifiers.
+ unsigned getNumSelIdents() const { return NumSelIdents; }
+
+ /// \brief Determines whether we want C++ constructors as results within this
+ /// context.
+ bool wantConstructorResults() const;
+};
+
+
+/// \brief A "string" used to describe how code completion can
+/// be performed for an entity.
+///
+/// A code completion string typically shows how a particular entity can be
+/// used. For example, the code completion string for a function would show
+/// the syntax to call it, including the parentheses, placeholders for the
+/// arguments, etc.
+class CodeCompletionString {
+public:
+ /// \brief The different kinds of "chunks" that can occur within a code
+ /// completion string.
+ enum ChunkKind {
+ /// \brief The piece of text that the user is expected to type to
+ /// match the code-completion string, typically a keyword or the name of a
+ /// declarator or macro.
+ CK_TypedText,
+ /// \brief A piece of text that should be placed in the buffer, e.g.,
+ /// parentheses or a comma in a function call.
+ CK_Text,
+ /// \brief A code completion string that is entirely optional. For example,
+ /// an optional code completion string that describes the default arguments
+ /// in a function call.
+ CK_Optional,
+ /// \brief A string that acts as a placeholder for, e.g., a function
+ /// call argument.
+ CK_Placeholder,
+ /// \brief A piece of text that describes something about the result but
+ /// should not be inserted into the buffer.
+ CK_Informative,
+ /// \brief A piece of text that describes the type of an entity or, for
+ /// functions and methods, the return type.
+ CK_ResultType,
+ /// \brief A piece of text that describes the parameter that corresponds
+ /// to the code-completion location within a function call, message send,
+ /// macro invocation, etc.
+ CK_CurrentParameter,
+ /// \brief A left parenthesis ('(').
+ CK_LeftParen,
+ /// \brief A right parenthesis (')').
+ CK_RightParen,
+ /// \brief A left bracket ('[').
+ CK_LeftBracket,
+ /// \brief A right bracket (']').
+ CK_RightBracket,
+ /// \brief A left brace ('{').
+ CK_LeftBrace,
+ /// \brief A right brace ('}').
+ CK_RightBrace,
+ /// \brief A left angle bracket ('<').
+ CK_LeftAngle,
+ /// \brief A right angle bracket ('>').
+ CK_RightAngle,
+ /// \brief A comma separator (',').
+ CK_Comma,
+ /// \brief A colon (':').
+ CK_Colon,
+ /// \brief A semicolon (';').
+ CK_SemiColon,
+ /// \brief An '=' sign.
+ CK_Equal,
+ /// \brief Horizontal whitespace (' ').
+ CK_HorizontalSpace,
+ /// \brief Verticle whitespace ('\n' or '\r\n', depending on the
+ /// platform).
+ CK_VerticalSpace
+ };
+
+ /// \brief One piece of the code completion string.
+ struct Chunk {
+ /// \brief The kind of data stored in this piece of the code completion
+ /// string.
+ ChunkKind Kind;
+
+ union {
+ /// \brief The text string associated with a CK_Text, CK_Placeholder,
+ /// CK_Informative, or CK_Comma chunk.
+ /// The string is owned by the chunk and will be deallocated
+ /// (with delete[]) when the chunk is destroyed.
+ const char *Text;
+
+ /// \brief The code completion string associated with a CK_Optional chunk.
+ /// The optional code completion string is owned by the chunk, and will
+ /// be deallocated (with delete) when the chunk is destroyed.
+ CodeCompletionString *Optional;
+ };
+
+ Chunk() : Kind(CK_Text), Text(0) { }
+
+ explicit Chunk(ChunkKind Kind, const char *Text = "");
+
+ /// \brief Create a new text chunk.
+ static Chunk CreateText(const char *Text);
+
+ /// \brief Create a new optional chunk.
+ static Chunk CreateOptional(CodeCompletionString *Optional);
+
+ /// \brief Create a new placeholder chunk.
+ static Chunk CreatePlaceholder(const char *Placeholder);
+
+ /// \brief Create a new informative chunk.
+ static Chunk CreateInformative(const char *Informative);
+
+ /// \brief Create a new result type chunk.
+ static Chunk CreateResultType(const char *ResultType);
+
+ /// \brief Create a new current-parameter chunk.
+ static Chunk CreateCurrentParameter(const char *CurrentParameter);
+ };
+
+private:
+ /// \brief The number of chunks stored in this string.
+ unsigned NumChunks : 16;
+
+ /// \brief The number of annotations for this code-completion result.
+ unsigned NumAnnotations : 16;
+
+ /// \brief The priority of this code-completion string.
+ unsigned Priority : 16;
+
+ /// \brief The availability of this code-completion result.
+ unsigned Availability : 2;
+
+ /// \brief The kind of the parent context.
+ unsigned ParentKind : 14;
+
+ /// \brief The name of the parent context.
+ StringRef ParentName;
+
+ CodeCompletionString(const CodeCompletionString &); // DO NOT IMPLEMENT
+ CodeCompletionString &operator=(const CodeCompletionString &); // DITTO
+
+ CodeCompletionString(const Chunk *Chunks, unsigned NumChunks,
+ unsigned Priority, CXAvailabilityKind Availability,
+ const char **Annotations, unsigned NumAnnotations,
+ CXCursorKind ParentKind, StringRef ParentName);
+ ~CodeCompletionString() { }
+
+ friend class CodeCompletionBuilder;
+ friend class CodeCompletionResult;
+
+public:
+ typedef const Chunk *iterator;
+ iterator begin() const { return reinterpret_cast<const Chunk *>(this + 1); }
+ iterator end() const { return begin() + NumChunks; }
+ bool empty() const { return NumChunks == 0; }
+ unsigned size() const { return NumChunks; }
+
+ const Chunk &operator[](unsigned I) const {
+ assert(I < size() && "Chunk index out-of-range");
+ return begin()[I];
+ }
+
+ /// \brief Returns the text in the TypedText chunk.
+ const char *getTypedText() const;
+
+ /// \brief Retrieve the priority of this code completion result.
+ unsigned getPriority() const { return Priority; }
+
+ /// \brief Retrieve the availability of this code completion result.
+ unsigned getAvailability() const { return Availability; }
+
+ /// \brief Retrieve the number of annotations for this code completion result.
+ unsigned getAnnotationCount() const;
+
+ /// \brief Retrieve the annotation string specified by \c AnnotationNr.
+ const char *getAnnotation(unsigned AnnotationNr) const;
+
+ /// \brief Retrieve parent context's cursor kind.
+ CXCursorKind getParentContextKind() const {
+ return (CXCursorKind)ParentKind;
+ }
+
+ /// \brief Retrieve the name of the parent context.
+ StringRef getParentContextName() const {
+ return ParentName;
+ }
+
+ /// \brief Retrieve a string representation of the code completion string,
+ /// which is mainly useful for debugging.
+ std::string getAsString() const;
+};
+
+/// \brief An allocator used specifically for the purpose of code completion.
+class CodeCompletionAllocator : public llvm::BumpPtrAllocator {
+public:
+ /// \brief Copy the given string into this allocator.
+ const char *CopyString(StringRef String);
+
+ /// \brief Copy the given string into this allocator.
+ const char *CopyString(Twine String);
+
+ // \brief Copy the given string into this allocator.
+ const char *CopyString(const char *String) {
+ return CopyString(StringRef(String));
+ }
+
+ /// \brief Copy the given string into this allocator.
+ const char *CopyString(const std::string &String) {
+ return CopyString(StringRef(String));
+ }
+};
+
+/// \brief Allocator for a cached set of global code completions.
+class GlobalCodeCompletionAllocator
+ : public CodeCompletionAllocator,
+ public RefCountedBase<GlobalCodeCompletionAllocator>
+{
+
+};
+
+class CodeCompletionTUInfo {
+ llvm::DenseMap<DeclContext *, StringRef> ParentNames;
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator> AllocatorRef;
+
+public:
+ explicit CodeCompletionTUInfo(
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator> Allocator)
+ : AllocatorRef(Allocator) { }
+
+ IntrusiveRefCntPtr<GlobalCodeCompletionAllocator> getAllocatorRef() const {
+ return AllocatorRef;
+ }
+ CodeCompletionAllocator &getAllocator() const {
+ assert(AllocatorRef);
+ return *AllocatorRef;
+ }
+
+ StringRef getParentName(DeclContext *DC);
+};
+
+} // end namespace clang
+
+namespace llvm {
+ template <> struct isPodLike<clang::CodeCompletionString::Chunk> {
+ static const bool value = true;
+ };
+}
+
+namespace clang {
+
+/// \brief A builder class used to construct new code-completion strings.
+class CodeCompletionBuilder {
+public:
+ typedef CodeCompletionString::Chunk Chunk;
+
+private:
+ CodeCompletionAllocator &Allocator;
+ CodeCompletionTUInfo &CCTUInfo;
+ unsigned Priority;
+ CXAvailabilityKind Availability;
+ CXCursorKind ParentKind;
+ StringRef ParentName;
+
+ /// \brief The chunks stored in this string.
+ SmallVector<Chunk, 4> Chunks;
+
+ SmallVector<const char *, 2> Annotations;
+
+public:
+ CodeCompletionBuilder(CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo)
+ : Allocator(Allocator), CCTUInfo(CCTUInfo),
+ Priority(0), Availability(CXAvailability_Available),
+ ParentKind(CXCursor_NotImplemented) { }
+
+ CodeCompletionBuilder(CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ unsigned Priority, CXAvailabilityKind Availability)
+ : Allocator(Allocator), CCTUInfo(CCTUInfo),
+ Priority(Priority), Availability(Availability),
+ ParentKind(CXCursor_NotImplemented) { }
+
+ /// \brief Retrieve the allocator into which the code completion
+ /// strings should be allocated.
+ CodeCompletionAllocator &getAllocator() const { return Allocator; }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
+
+ /// \brief Take the resulting completion string.
+ ///
+ /// This operation can only be performed once.
+ CodeCompletionString *TakeString();
+
+ /// \brief Add a new typed-text chunk.
+ void AddTypedTextChunk(const char *Text);
+
+ /// \brief Add a new text chunk.
+ void AddTextChunk(const char *Text);
+
+ /// \brief Add a new optional chunk.
+ void AddOptionalChunk(CodeCompletionString *Optional);
+
+ /// \brief Add a new placeholder chunk.
+ void AddPlaceholderChunk(const char *Placeholder);
+
+ /// \brief Add a new informative chunk.
+ void AddInformativeChunk(const char *Text);
+
+ /// \brief Add a new result-type chunk.
+ void AddResultTypeChunk(const char *ResultType);
+
+ /// \brief Add a new current-parameter chunk.
+ void AddCurrentParameterChunk(const char *CurrentParameter);
+
+ /// \brief Add a new chunk.
+ void AddChunk(CodeCompletionString::ChunkKind CK, const char *Text = "");
+
+ void AddAnnotation(const char *A) { Annotations.push_back(A); }
+
+ /// \brief Add the parent context information to this code completion.
+ void addParentContext(DeclContext *DC);
+
+ CXCursorKind getParentKind() const { return ParentKind; }
+ StringRef getParentName() const { return ParentName; }
+};
+
+/// \brief Captures a result of code completion.
+class CodeCompletionResult {
+public:
+ /// \brief Describes the kind of result generated.
+ enum ResultKind {
+ RK_Declaration = 0, //< Refers to a declaration
+ RK_Keyword, //< Refers to a keyword or symbol.
+ RK_Macro, //< Refers to a macro
+ RK_Pattern //< Refers to a precomputed pattern.
+ };
+
+ /// \brief The kind of result stored here.
+ ResultKind Kind;
+
+ /// \brief When Kind == RK_Declaration or RK_Pattern, the declaration we are
+ /// referring to. In the latter case, the declaration might be NULL.
+ NamedDecl *Declaration;
+
+ union {
+ /// \brief When Kind == RK_Keyword, the string representing the keyword
+ /// or symbol's spelling.
+ const char *Keyword;
+
+ /// \brief When Kind == RK_Pattern, the code-completion string that
+ /// describes the completion text to insert.
+ CodeCompletionString *Pattern;
+
+ /// \brief When Kind == RK_Macro, the identifier that refers to a macro.
+ IdentifierInfo *Macro;
+ };
+
+ /// \brief The priority of this particular code-completion result.
+ unsigned Priority;
+
+ /// \brief The cursor kind that describes this result.
+ CXCursorKind CursorKind;
+
+ /// \brief The availability of this result.
+ CXAvailabilityKind Availability;
+
+ /// \brief Specifies which parameter (of a function, Objective-C method,
+ /// macro, etc.) we should start with when formatting the result.
+ unsigned StartParameter;
+
+ /// \brief Whether this result is hidden by another name.
+ bool Hidden : 1;
+
+ /// \brief Whether this result was found via lookup into a base class.
+ bool QualifierIsInformative : 1;
+
+ /// \brief Whether this declaration is the beginning of a
+ /// nested-name-specifier and, therefore, should be followed by '::'.
+ bool StartsNestedNameSpecifier : 1;
+
+ /// \brief Whether all parameters (of a function, Objective-C
+ /// method, etc.) should be considered "informative".
+ bool AllParametersAreInformative : 1;
+
+ /// \brief Whether we're completing a declaration of the given entity,
+ /// rather than a use of that entity.
+ bool DeclaringEntity : 1;
+
+ /// \brief If the result should have a nested-name-specifier, this is it.
+ /// When \c QualifierIsInformative, the nested-name-specifier is
+ /// informative rather than required.
+ NestedNameSpecifier *Qualifier;
+
+ /// \brief Build a result that refers to a declaration.
+ CodeCompletionResult(NamedDecl *Declaration,
+ NestedNameSpecifier *Qualifier = 0,
+ bool QualifierIsInformative = false,
+ bool Accessible = true)
+ : Kind(RK_Declaration), Declaration(Declaration),
+ Priority(getPriorityFromDecl(Declaration)),
+ Availability(CXAvailability_Available), StartParameter(0),
+ Hidden(false), QualifierIsInformative(QualifierIsInformative),
+ StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
+ DeclaringEntity(false), Qualifier(Qualifier) {
+ computeCursorKindAndAvailability(Accessible);
+ }
+
+ /// \brief Build a result that refers to a keyword or symbol.
+ CodeCompletionResult(const char *Keyword, unsigned Priority = CCP_Keyword)
+ : Kind(RK_Keyword), Declaration(0), Keyword(Keyword), Priority(Priority),
+ Availability(CXAvailability_Available),
+ StartParameter(0), Hidden(false), QualifierIsInformative(0),
+ StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
+ DeclaringEntity(false), Qualifier(0) {
+ computeCursorKindAndAvailability();
+ }
+
+ /// \brief Build a result that refers to a macro.
+ CodeCompletionResult(IdentifierInfo *Macro, unsigned Priority = CCP_Macro)
+ : Kind(RK_Macro), Declaration(0), Macro(Macro), Priority(Priority),
+ Availability(CXAvailability_Available), StartParameter(0),
+ Hidden(false), QualifierIsInformative(0),
+ StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
+ DeclaringEntity(false), Qualifier(0) {
+ computeCursorKindAndAvailability();
+ }
+
+ /// \brief Build a result that refers to a pattern.
+ CodeCompletionResult(CodeCompletionString *Pattern,
+ unsigned Priority = CCP_CodePattern,
+ CXCursorKind CursorKind = CXCursor_NotImplemented,
+ CXAvailabilityKind Availability = CXAvailability_Available,
+ NamedDecl *D = 0)
+ : Kind(RK_Pattern), Declaration(D), Pattern(Pattern), Priority(Priority),
+ CursorKind(CursorKind), Availability(Availability), StartParameter(0),
+ Hidden(false), QualifierIsInformative(0),
+ StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
+ DeclaringEntity(false), Qualifier(0)
+ {
+ }
+
+ /// \brief Build a result that refers to a pattern with an associated
+ /// declaration.
+ CodeCompletionResult(CodeCompletionString *Pattern, NamedDecl *D,
+ unsigned Priority)
+ : Kind(RK_Pattern), Declaration(D), Pattern(Pattern), Priority(Priority),
+ Availability(CXAvailability_Available), StartParameter(0),
+ Hidden(false), QualifierIsInformative(false),
+ StartsNestedNameSpecifier(false), AllParametersAreInformative(false),
+ DeclaringEntity(false), Qualifier(0) {
+ computeCursorKindAndAvailability();
+ }
+
+ /// \brief Retrieve the declaration stored in this result.
+ NamedDecl *getDeclaration() const {
+ assert(Kind == RK_Declaration && "Not a declaration result");
+ return Declaration;
+ }
+
+ /// \brief Retrieve the keyword stored in this result.
+ const char *getKeyword() const {
+ assert(Kind == RK_Keyword && "Not a keyword result");
+ return Keyword;
+ }
+
+ /// \brief Create a new code-completion string that describes how to insert
+ /// this result into a program.
+ ///
+ /// \param S The semantic analysis that created the result.
+ ///
+ /// \param Allocator The allocator that will be used to allocate the
+ /// string itself.
+ CodeCompletionString *CreateCodeCompletionString(Sema &S,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo);
+ CodeCompletionString *CreateCodeCompletionString(ASTContext &Ctx,
+ Preprocessor &PP,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo);
+
+ /// \brief Determine a base priority for the given declaration.
+ static unsigned getPriorityFromDecl(NamedDecl *ND);
+
+private:
+ void computeCursorKindAndAvailability(bool Accessible = true);
+};
+
+bool operator<(const CodeCompletionResult &X, const CodeCompletionResult &Y);
+
+inline bool operator>(const CodeCompletionResult &X,
+ const CodeCompletionResult &Y) {
+ return Y < X;
+}
+
+inline bool operator<=(const CodeCompletionResult &X,
+ const CodeCompletionResult &Y) {
+ return !(Y < X);
+}
+
+inline bool operator>=(const CodeCompletionResult &X,
+ const CodeCompletionResult &Y) {
+ return !(X < Y);
+}
+
+
+raw_ostream &operator<<(raw_ostream &OS,
+ const CodeCompletionString &CCS);
+
+/// \brief Abstract interface for a consumer of code-completion
+/// information.
+class CodeCompleteConsumer {
+protected:
+ /// \brief Whether to include macros in the code-completion results.
+ bool IncludeMacros;
+
+ /// \brief Whether to include code patterns (such as for loops) within
+ /// the completion results.
+ bool IncludeCodePatterns;
+
+ /// \brief Whether to include global (top-level) declarations and names in
+ /// the completion results.
+ bool IncludeGlobals;
+
+ /// \brief Whether the output format for the code-completion consumer is
+ /// binary.
+ bool OutputIsBinary;
+
+public:
+ class OverloadCandidate {
+ public:
+ /// \brief Describes the type of overload candidate.
+ enum CandidateKind {
+ /// \brief The candidate is a function declaration.
+ CK_Function,
+ /// \brief The candidate is a function template.
+ CK_FunctionTemplate,
+ /// \brief The "candidate" is actually a variable, expression, or block
+ /// for which we only have a function prototype.
+ CK_FunctionType
+ };
+
+ private:
+ /// \brief The kind of overload candidate.
+ CandidateKind Kind;
+
+ union {
+ /// \brief The function overload candidate, available when
+ /// Kind == CK_Function.
+ FunctionDecl *Function;
+
+ /// \brief The function template overload candidate, available when
+ /// Kind == CK_FunctionTemplate.
+ FunctionTemplateDecl *FunctionTemplate;
+
+ /// \brief The function type that describes the entity being called,
+ /// when Kind == CK_FunctionType.
+ const FunctionType *Type;
+ };
+
+ public:
+ OverloadCandidate(FunctionDecl *Function)
+ : Kind(CK_Function), Function(Function) { }
+
+ OverloadCandidate(FunctionTemplateDecl *FunctionTemplateDecl)
+ : Kind(CK_FunctionTemplate), FunctionTemplate(FunctionTemplateDecl) { }
+
+ OverloadCandidate(const FunctionType *Type)
+ : Kind(CK_FunctionType), Type(Type) { }
+
+ /// \brief Determine the kind of overload candidate.
+ CandidateKind getKind() const { return Kind; }
+
+ /// \brief Retrieve the function overload candidate or the templated
+ /// function declaration for a function template.
+ FunctionDecl *getFunction() const;
+
+ /// \brief Retrieve the function template overload candidate.
+ FunctionTemplateDecl *getFunctionTemplate() const {
+ assert(getKind() == CK_FunctionTemplate && "Not a function template");
+ return FunctionTemplate;
+ }
+
+ /// \brief Retrieve the function type of the entity, regardless of how the
+ /// function is stored.
+ const FunctionType *getFunctionType() const;
+
+ /// \brief Create a new code-completion string that describes the function
+ /// signature of this overload candidate.
+ CodeCompletionString *CreateSignatureString(unsigned CurrentArg,
+ Sema &S,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) const;
+ };
+
+ CodeCompleteConsumer() : IncludeMacros(false), IncludeCodePatterns(false),
+ IncludeGlobals(true), OutputIsBinary(false) { }
+
+ CodeCompleteConsumer(bool IncludeMacros, bool IncludeCodePatterns,
+ bool IncludeGlobals, bool OutputIsBinary)
+ : IncludeMacros(IncludeMacros), IncludeCodePatterns(IncludeCodePatterns),
+ IncludeGlobals(IncludeGlobals), OutputIsBinary(OutputIsBinary) { }
+
+ /// \brief Whether the code-completion consumer wants to see macros.
+ bool includeMacros() const { return IncludeMacros; }
+
+ /// \brief Whether the code-completion consumer wants to see code patterns.
+ bool includeCodePatterns() const { return IncludeCodePatterns; }
+
+ /// \brief Whether to include global (top-level) declaration results.
+ bool includeGlobals() const { return IncludeGlobals; }
+
+ /// \brief Determine whether the output of this consumer is binary.
+ bool isOutputBinary() const { return OutputIsBinary; }
+
+ /// \brief Deregisters and destroys this code-completion consumer.
+ virtual ~CodeCompleteConsumer();
+
+ /// \name Code-completion callbacks
+ //@{
+ /// \brief Process the finalized code-completion results.
+ virtual void ProcessCodeCompleteResults(Sema &S,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults) { }
+
+ /// \param S the semantic-analyzer object for which code-completion is being
+ /// done.
+ ///
+ /// \param CurrentArg the index of the current argument.
+ ///
+ /// \param Candidates an array of overload candidates.
+ ///
+ /// \param NumCandidates the number of overload candidates
+ virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
+ OverloadCandidate *Candidates,
+ unsigned NumCandidates) { }
+ //@}
+
+ /// \brief Retrieve the allocator that will be used to allocate
+ /// code completion strings.
+ virtual CodeCompletionAllocator &getAllocator() = 0;
+
+ virtual CodeCompletionTUInfo &getCodeCompletionTUInfo() = 0;
+};
+
+/// \brief A simple code-completion consumer that prints the results it
+/// receives in a simple format.
+class PrintingCodeCompleteConsumer : public CodeCompleteConsumer {
+ /// \brief The raw output stream.
+ raw_ostream &OS;
+
+ CodeCompletionTUInfo CCTUInfo;
+
+public:
+ /// \brief Create a new printing code-completion consumer that prints its
+ /// results to the given raw output stream.
+ PrintingCodeCompleteConsumer(bool IncludeMacros, bool IncludeCodePatterns,
+ bool IncludeGlobals,
+ raw_ostream &OS)
+ : CodeCompleteConsumer(IncludeMacros, IncludeCodePatterns, IncludeGlobals,
+ false), OS(OS),
+ CCTUInfo(new GlobalCodeCompletionAllocator) {}
+
+ /// \brief Prints the finalized code-completion results.
+ virtual void ProcessCodeCompleteResults(Sema &S,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults);
+
+ virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
+ OverloadCandidate *Candidates,
+ unsigned NumCandidates);
+
+ virtual CodeCompletionAllocator &getAllocator() {
+ return CCTUInfo.getAllocator();
+ }
+
+ virtual CodeCompletionTUInfo &getCodeCompletionTUInfo() { return CCTUInfo; }
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_SEMA_CODECOMPLETECONSUMER_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
new file mode 100644
index 0000000..67fd393
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DeclSpec.h
@@ -0,0 +1,1984 @@
+//===--- DeclSpec.h - Parsed declaration specifiers -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the classes used to store parsed information about
+// declaration-specifiers and declarators.
+//
+// static const int volatile x, *y, *(*(*z)[10])(const void *x);
+// ------------------------- - -- ---------------------------
+// declaration-specifiers \ | /
+// declarators
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_DECLSPEC_H
+#define LLVM_CLANG_SEMA_DECLSPEC_H
+
+#include "clang/Sema/AttributeList.h"
+#include "clang/Sema/Ownership.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/Lex/Token.h"
+#include "clang/Basic/ExceptionSpecificationType.h"
+#include "clang/Basic/Lambda.h"
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/ErrorHandling.h"
+
+namespace clang {
+ class ASTContext;
+ class TypeLoc;
+ class LangOptions;
+ class DiagnosticsEngine;
+ class IdentifierInfo;
+ class NamespaceAliasDecl;
+ class NamespaceDecl;
+ class NestedNameSpecifier;
+ class NestedNameSpecifierLoc;
+ class ObjCDeclSpec;
+ class Preprocessor;
+ class Sema;
+ class Declarator;
+ struct TemplateIdAnnotation;
+
+/// CXXScopeSpec - Represents a C++ nested-name-specifier or a global scope
+/// specifier. These can be in 3 states:
+/// 1) Not present, identified by isEmpty()
+/// 2) Present, identified by isNotEmpty()
+/// 2.a) Valid, idenified by isValid()
+/// 2.b) Invalid, identified by isInvalid().
+///
+/// isSet() is deprecated because it mostly corresponded to "valid" but was
+/// often used as if it meant "present".
+///
+/// The actual scope is described by getScopeRep().
+class CXXScopeSpec {
+ SourceRange Range;
+ NestedNameSpecifierLocBuilder Builder;
+
+public:
+ const SourceRange &getRange() const { return Range; }
+ void setRange(const SourceRange &R) { Range = R; }
+ void setBeginLoc(SourceLocation Loc) { Range.setBegin(Loc); }
+ void setEndLoc(SourceLocation Loc) { Range.setEnd(Loc); }
+ SourceLocation getBeginLoc() const { return Range.getBegin(); }
+ SourceLocation getEndLoc() const { return Range.getEnd(); }
+
+ /// \brief Retrieve the representation of the nested-name-specifier.
+ NestedNameSpecifier *getScopeRep() const {
+ return Builder.getRepresentation();
+ }
+
+ /// \brief Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'type::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param TemplateKWLoc The location of the 'template' keyword, if present.
+ ///
+ /// \param TL The TypeLoc that describes the type preceding the '::'.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, SourceLocation TemplateKWLoc, TypeLoc TL,
+ SourceLocation ColonColonLoc);
+
+ /// \brief Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'identifier::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param Identifier The identifier.
+ ///
+ /// \param IdentifierLoc The location of the identifier.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, IdentifierInfo *Identifier,
+ SourceLocation IdentifierLoc, SourceLocation ColonColonLoc);
+
+ /// \brief Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'namespace::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param Namespace The namespace.
+ ///
+ /// \param NamespaceLoc The location of the namespace name.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, NamespaceDecl *Namespace,
+ SourceLocation NamespaceLoc, SourceLocation ColonColonLoc);
+
+ /// \brief Extend the current nested-name-specifier by another
+ /// nested-name-specifier component of the form 'namespace-alias::'.
+ ///
+ /// \param Context The AST context in which this nested-name-specifier
+ /// resides.
+ ///
+ /// \param Alias The namespace alias.
+ ///
+ /// \param AliasLoc The location of the namespace alias
+ /// name.
+ ///
+ /// \param ColonColonLoc The location of the trailing '::'.
+ void Extend(ASTContext &Context, NamespaceAliasDecl *Alias,
+ SourceLocation AliasLoc, SourceLocation ColonColonLoc);
+
+ /// \brief Turn this (empty) nested-name-specifier into the global
+ /// nested-name-specifier '::'.
+ void MakeGlobal(ASTContext &Context, SourceLocation ColonColonLoc);
+
+ /// \brief Make a new nested-name-specifier from incomplete source-location
+ /// information.
+ ///
+ /// FIXME: This routine should be used very, very rarely, in cases where we
+ /// need to synthesize a nested-name-specifier. Most code should instead use
+ /// \c Adopt() with a proper \c NestedNameSpecifierLoc.
+ void MakeTrivial(ASTContext &Context, NestedNameSpecifier *Qualifier,
+ SourceRange R);
+
+ /// \brief Adopt an existing nested-name-specifier (with source-range
+ /// information).
+ void Adopt(NestedNameSpecifierLoc Other);
+
+ /// \brief Retrieve a nested-name-specifier with location information, copied
+ /// into the given AST context.
+ ///
+ /// \param Context The context into which this nested-name-specifier will be
+ /// copied.
+ NestedNameSpecifierLoc getWithLocInContext(ASTContext &Context) const;
+
+ /// \brief Retrieve the location of the name in the last qualifier
+ /// in this nested name specifier. For example:
+ /// ::foo::bar<0>::
+ /// ^~~
+ SourceLocation getLastQualifierNameLoc() const;
+
+ /// No scope specifier.
+ bool isEmpty() const { return !Range.isValid(); }
+ /// A scope specifier is present, but may be valid or invalid.
+ bool isNotEmpty() const { return !isEmpty(); }
+
+ /// An error occurred during parsing of the scope specifier.
+ bool isInvalid() const { return isNotEmpty() && getScopeRep() == 0; }
+ /// A scope specifier is present, and it refers to a real scope.
+ bool isValid() const { return isNotEmpty() && getScopeRep() != 0; }
+
+ /// \brief Indicate that this nested-name-specifier is invalid.
+ void SetInvalid(SourceRange R) {
+ assert(R.isValid() && "Must have a valid source range");
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(R.getBegin());
+ Range.setEnd(R.getEnd());
+ Builder.Clear();
+ }
+
+ /// Deprecated. Some call sites intend isNotEmpty() while others intend
+ /// isValid().
+ bool isSet() const { return getScopeRep() != 0; }
+
+ void clear() {
+ Range = SourceRange();
+ Builder.Clear();
+ }
+
+ /// \brief Retrieve the data associated with the source-location information.
+ char *location_data() const { return Builder.getBuffer().first; }
+
+ /// \brief Retrieve the size of the data associated with source-location
+ /// information.
+ unsigned location_size() const { return Builder.getBuffer().second; }
+};
+
+/// DeclSpec - This class captures information about "declaration specifiers",
+/// which encompasses storage-class-specifiers, type-specifiers,
+/// type-qualifiers, and function-specifiers.
+class DeclSpec {
+public:
+ // storage-class-specifier
+ // Note: The order of these enumerators is important for diagnostics.
+ enum SCS {
+ SCS_unspecified = 0,
+ SCS_typedef,
+ SCS_extern,
+ SCS_static,
+ SCS_auto,
+ SCS_register,
+ SCS_private_extern,
+ SCS_mutable
+ };
+
+ // Import type specifier width enumeration and constants.
+ typedef TypeSpecifierWidth TSW;
+ static const TSW TSW_unspecified = clang::TSW_unspecified;
+ static const TSW TSW_short = clang::TSW_short;
+ static const TSW TSW_long = clang::TSW_long;
+ static const TSW TSW_longlong = clang::TSW_longlong;
+
+ enum TSC {
+ TSC_unspecified,
+ TSC_imaginary,
+ TSC_complex
+ };
+
+ // Import type specifier sign enumeration and constants.
+ typedef TypeSpecifierSign TSS;
+ static const TSS TSS_unspecified = clang::TSS_unspecified;
+ static const TSS TSS_signed = clang::TSS_signed;
+ static const TSS TSS_unsigned = clang::TSS_unsigned;
+
+ // Import type specifier type enumeration and constants.
+ typedef TypeSpecifierType TST;
+ static const TST TST_unspecified = clang::TST_unspecified;
+ static const TST TST_void = clang::TST_void;
+ static const TST TST_char = clang::TST_char;
+ static const TST TST_wchar = clang::TST_wchar;
+ static const TST TST_char16 = clang::TST_char16;
+ static const TST TST_char32 = clang::TST_char32;
+ static const TST TST_int = clang::TST_int;
+ static const TST TST_int128 = clang::TST_int128;
+ static const TST TST_half = clang::TST_half;
+ static const TST TST_float = clang::TST_float;
+ static const TST TST_double = clang::TST_double;
+ static const TST TST_bool = clang::TST_bool;
+ static const TST TST_decimal32 = clang::TST_decimal32;
+ static const TST TST_decimal64 = clang::TST_decimal64;
+ static const TST TST_decimal128 = clang::TST_decimal128;
+ static const TST TST_enum = clang::TST_enum;
+ static const TST TST_union = clang::TST_union;
+ static const TST TST_struct = clang::TST_struct;
+ static const TST TST_class = clang::TST_class;
+ static const TST TST_typename = clang::TST_typename;
+ static const TST TST_typeofType = clang::TST_typeofType;
+ static const TST TST_typeofExpr = clang::TST_typeofExpr;
+ static const TST TST_decltype = clang::TST_decltype;
+ static const TST TST_underlyingType = clang::TST_underlyingType;
+ static const TST TST_auto = clang::TST_auto;
+ static const TST TST_unknown_anytype = clang::TST_unknown_anytype;
+ static const TST TST_atomic = clang::TST_atomic;
+ static const TST TST_error = clang::TST_error;
+
+ // type-qualifiers
+ enum TQ { // NOTE: These flags must be kept in sync with Qualifiers::TQ.
+ TQ_unspecified = 0,
+ TQ_const = 1,
+ TQ_restrict = 2,
+ TQ_volatile = 4
+ };
+
+ /// ParsedSpecifiers - Flags to query which specifiers were applied. This is
+ /// returned by getParsedSpecifiers.
+ enum ParsedSpecifiers {
+ PQ_None = 0,
+ PQ_StorageClassSpecifier = 1,
+ PQ_TypeSpecifier = 2,
+ PQ_TypeQualifier = 4,
+ PQ_FunctionSpecifier = 8
+ };
+
+private:
+ // storage-class-specifier
+ /*SCS*/unsigned StorageClassSpec : 3;
+ unsigned SCS_thread_specified : 1;
+ unsigned SCS_extern_in_linkage_spec : 1;
+
+ // type-specifier
+ /*TSW*/unsigned TypeSpecWidth : 2;
+ /*TSC*/unsigned TypeSpecComplex : 2;
+ /*TSS*/unsigned TypeSpecSign : 2;
+ /*TST*/unsigned TypeSpecType : 5;
+ unsigned TypeAltiVecVector : 1;
+ unsigned TypeAltiVecPixel : 1;
+ unsigned TypeAltiVecBool : 1;
+ unsigned TypeSpecOwned : 1;
+
+ // type-qualifiers
+ unsigned TypeQualifiers : 3; // Bitwise OR of TQ.
+
+ // function-specifier
+ unsigned FS_inline_specified : 1;
+ unsigned FS_virtual_specified : 1;
+ unsigned FS_explicit_specified : 1;
+
+ // friend-specifier
+ unsigned Friend_specified : 1;
+
+ // constexpr-specifier
+ unsigned Constexpr_specified : 1;
+
+ /*SCS*/unsigned StorageClassSpecAsWritten : 3;
+
+ union {
+ UnionParsedType TypeRep;
+ Decl *DeclRep;
+ Expr *ExprRep;
+ };
+
+ // attributes.
+ ParsedAttributes Attrs;
+
+ // Scope specifier for the type spec, if applicable.
+ CXXScopeSpec TypeScope;
+
+ // List of protocol qualifiers for objective-c classes. Used for
+ // protocol-qualified interfaces "NString<foo>" and protocol-qualified id
+ // "id<foo>".
+ Decl * const *ProtocolQualifiers;
+ unsigned NumProtocolQualifiers;
+ SourceLocation ProtocolLAngleLoc;
+ SourceLocation *ProtocolLocs;
+
+ // SourceLocation info. These are null if the item wasn't specified or if
+ // the setting was synthesized.
+ SourceRange Range;
+
+ SourceLocation StorageClassSpecLoc, SCS_threadLoc;
+ SourceLocation TSWLoc, TSCLoc, TSSLoc, TSTLoc, AltiVecLoc;
+ /// TSTNameLoc - If TypeSpecType is any of class, enum, struct, union,
+ /// typename, then this is the location of the named type (if present);
+ /// otherwise, it is the same as TSTLoc. Hence, the pair TSTLoc and
+ /// TSTNameLoc provides source range info for tag types.
+ SourceLocation TSTNameLoc;
+ SourceRange TypeofParensRange;
+ SourceLocation TQ_constLoc, TQ_restrictLoc, TQ_volatileLoc;
+ SourceLocation FS_inlineLoc, FS_virtualLoc, FS_explicitLoc;
+ SourceLocation FriendLoc, ModulePrivateLoc, ConstexprLoc;
+
+ WrittenBuiltinSpecs writtenBS;
+ void SaveWrittenBuiltinSpecs();
+ void SaveStorageSpecifierAsWritten();
+
+ ObjCDeclSpec *ObjCQualifiers;
+
+ static bool isTypeRep(TST T) {
+ return (T == TST_typename || T == TST_typeofType ||
+ T == TST_underlyingType || T == TST_atomic);
+ }
+ static bool isExprRep(TST T) {
+ return (T == TST_typeofExpr || T == TST_decltype);
+ }
+ static bool isDeclRep(TST T) {
+ return (T == TST_enum || T == TST_struct ||
+ T == TST_union || T == TST_class);
+ }
+
+ DeclSpec(const DeclSpec&); // DO NOT IMPLEMENT
+ void operator=(const DeclSpec&); // DO NOT IMPLEMENT
+public:
+
+ DeclSpec(AttributeFactory &attrFactory)
+ : StorageClassSpec(SCS_unspecified),
+ SCS_thread_specified(false),
+ SCS_extern_in_linkage_spec(false),
+ TypeSpecWidth(TSW_unspecified),
+ TypeSpecComplex(TSC_unspecified),
+ TypeSpecSign(TSS_unspecified),
+ TypeSpecType(TST_unspecified),
+ TypeAltiVecVector(false),
+ TypeAltiVecPixel(false),
+ TypeAltiVecBool(false),
+ TypeSpecOwned(false),
+ TypeQualifiers(TQ_unspecified),
+ FS_inline_specified(false),
+ FS_virtual_specified(false),
+ FS_explicit_specified(false),
+ Friend_specified(false),
+ Constexpr_specified(false),
+ StorageClassSpecAsWritten(SCS_unspecified),
+ Attrs(attrFactory),
+ ProtocolQualifiers(0),
+ NumProtocolQualifiers(0),
+ ProtocolLocs(0),
+ writtenBS(),
+ ObjCQualifiers(0) {
+ }
+ ~DeclSpec() {
+ delete [] ProtocolQualifiers;
+ delete [] ProtocolLocs;
+ }
+ // storage-class-specifier
+ SCS getStorageClassSpec() const { return (SCS)StorageClassSpec; }
+ bool isThreadSpecified() const { return SCS_thread_specified; }
+ bool isExternInLinkageSpec() const { return SCS_extern_in_linkage_spec; }
+ void setExternInLinkageSpec(bool Value) {
+ SCS_extern_in_linkage_spec = Value;
+ }
+
+ SourceLocation getStorageClassSpecLoc() const { return StorageClassSpecLoc; }
+ SourceLocation getThreadSpecLoc() const { return SCS_threadLoc; }
+
+ void ClearStorageClassSpecs() {
+ StorageClassSpec = DeclSpec::SCS_unspecified;
+ SCS_thread_specified = false;
+ SCS_extern_in_linkage_spec = false;
+ StorageClassSpecLoc = SourceLocation();
+ SCS_threadLoc = SourceLocation();
+ }
+
+ // type-specifier
+ TSW getTypeSpecWidth() const { return (TSW)TypeSpecWidth; }
+ TSC getTypeSpecComplex() const { return (TSC)TypeSpecComplex; }
+ TSS getTypeSpecSign() const { return (TSS)TypeSpecSign; }
+ TST getTypeSpecType() const { return (TST)TypeSpecType; }
+ bool isTypeAltiVecVector() const { return TypeAltiVecVector; }
+ bool isTypeAltiVecPixel() const { return TypeAltiVecPixel; }
+ bool isTypeAltiVecBool() const { return TypeAltiVecBool; }
+ bool isTypeSpecOwned() const { return TypeSpecOwned; }
+ ParsedType getRepAsType() const {
+ assert(isTypeRep((TST) TypeSpecType) && "DeclSpec does not store a type");
+ return TypeRep;
+ }
+ Decl *getRepAsDecl() const {
+ assert(isDeclRep((TST) TypeSpecType) && "DeclSpec does not store a decl");
+ return DeclRep;
+ }
+ Expr *getRepAsExpr() const {
+ assert(isExprRep((TST) TypeSpecType) && "DeclSpec does not store an expr");
+ return ExprRep;
+ }
+ CXXScopeSpec &getTypeSpecScope() { return TypeScope; }
+ const CXXScopeSpec &getTypeSpecScope() const { return TypeScope; }
+
+ const SourceRange &getSourceRange() const LLVM_READONLY { return Range; }
+ SourceLocation getLocStart() const LLVM_READONLY { return Range.getBegin(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return Range.getEnd(); }
+
+ SourceLocation getTypeSpecWidthLoc() const { return TSWLoc; }
+ SourceLocation getTypeSpecComplexLoc() const { return TSCLoc; }
+ SourceLocation getTypeSpecSignLoc() const { return TSSLoc; }
+ SourceLocation getTypeSpecTypeLoc() const { return TSTLoc; }
+ SourceLocation getAltiVecLoc() const { return AltiVecLoc; }
+
+ SourceLocation getTypeSpecTypeNameLoc() const {
+ assert(isDeclRep((TST) TypeSpecType) || TypeSpecType == TST_typename);
+ return TSTNameLoc;
+ }
+
+ SourceRange getTypeofParensRange() const { return TypeofParensRange; }
+ void setTypeofParensRange(SourceRange range) { TypeofParensRange = range; }
+
+ /// getSpecifierName - Turn a type-specifier-type into a string like "_Bool"
+ /// or "union".
+ static const char *getSpecifierName(DeclSpec::TST T);
+ static const char *getSpecifierName(DeclSpec::TQ Q);
+ static const char *getSpecifierName(DeclSpec::TSS S);
+ static const char *getSpecifierName(DeclSpec::TSC C);
+ static const char *getSpecifierName(DeclSpec::TSW W);
+ static const char *getSpecifierName(DeclSpec::SCS S);
+
+ // type-qualifiers
+
+ /// getTypeQualifiers - Return a set of TQs.
+ unsigned getTypeQualifiers() const { return TypeQualifiers; }
+ SourceLocation getConstSpecLoc() const { return TQ_constLoc; }
+ SourceLocation getRestrictSpecLoc() const { return TQ_restrictLoc; }
+ SourceLocation getVolatileSpecLoc() const { return TQ_volatileLoc; }
+
+ /// \brief Clear out all of the type qualifiers.
+ void ClearTypeQualifiers() {
+ TypeQualifiers = 0;
+ TQ_constLoc = SourceLocation();
+ TQ_restrictLoc = SourceLocation();
+ TQ_volatileLoc = SourceLocation();
+ }
+
+ // function-specifier
+ bool isInlineSpecified() const { return FS_inline_specified; }
+ SourceLocation getInlineSpecLoc() const { return FS_inlineLoc; }
+
+ bool isVirtualSpecified() const { return FS_virtual_specified; }
+ SourceLocation getVirtualSpecLoc() const { return FS_virtualLoc; }
+
+ bool isExplicitSpecified() const { return FS_explicit_specified; }
+ SourceLocation getExplicitSpecLoc() const { return FS_explicitLoc; }
+
+ void ClearFunctionSpecs() {
+ FS_inline_specified = false;
+ FS_inlineLoc = SourceLocation();
+ FS_virtual_specified = false;
+ FS_virtualLoc = SourceLocation();
+ FS_explicit_specified = false;
+ FS_explicitLoc = SourceLocation();
+ }
+
+ /// hasTypeSpecifier - Return true if any type-specifier has been found.
+ bool hasTypeSpecifier() const {
+ return getTypeSpecType() != DeclSpec::TST_unspecified ||
+ getTypeSpecWidth() != DeclSpec::TSW_unspecified ||
+ getTypeSpecComplex() != DeclSpec::TSC_unspecified ||
+ getTypeSpecSign() != DeclSpec::TSS_unspecified;
+ }
+
+ /// getParsedSpecifiers - Return a bitmask of which flavors of specifiers this
+ /// DeclSpec includes.
+ ///
+ unsigned getParsedSpecifiers() const;
+
+ SCS getStorageClassSpecAsWritten() const {
+ return (SCS)StorageClassSpecAsWritten;
+ }
+
+ /// isEmpty - Return true if this declaration specifier is completely empty:
+ /// no tokens were parsed in the production of it.
+ bool isEmpty() const {
+ return getParsedSpecifiers() == DeclSpec::PQ_None;
+ }
+
+ void SetRangeStart(SourceLocation Loc) { Range.setBegin(Loc); }
+ void SetRangeEnd(SourceLocation Loc) { Range.setEnd(Loc); }
+
+ /// These methods set the specified attribute of the DeclSpec and
+ /// return false if there was no error. If an error occurs (for
+ /// example, if we tried to set "auto" on a spec with "extern"
+ /// already set), they return true and set PrevSpec and DiagID
+ /// such that
+ /// Diag(Loc, DiagID) << PrevSpec;
+ /// will yield a useful result.
+ ///
+ /// TODO: use a more general approach that still allows these
+ /// diagnostics to be ignored when desired.
+ bool SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID);
+ bool SetStorageClassSpecThread(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool SetTypeSpecWidth(TSW W, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool SetTypeSpecComplex(TSC C, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool SetTypeSpecSign(TSS S, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID, ParsedType Rep);
+ bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID, Decl *Rep, bool Owned);
+ bool SetTypeSpecType(TST T, SourceLocation TagKwLoc,
+ SourceLocation TagNameLoc, const char *&PrevSpec,
+ unsigned &DiagID, ParsedType Rep);
+ bool SetTypeSpecType(TST T, SourceLocation TagKwLoc,
+ SourceLocation TagNameLoc, const char *&PrevSpec,
+ unsigned &DiagID, Decl *Rep, bool Owned);
+
+ bool SetTypeSpecType(TST T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID, Expr *Rep);
+ bool SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID);
+ bool SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID);
+ bool SetTypeSpecError();
+ void UpdateDeclRep(Decl *Rep) {
+ assert(isDeclRep((TST) TypeSpecType));
+ DeclRep = Rep;
+ }
+ void UpdateTypeRep(ParsedType Rep) {
+ assert(isTypeRep((TST) TypeSpecType));
+ TypeRep = Rep;
+ }
+ void UpdateExprRep(Expr *Rep) {
+ assert(isExprRep((TST) TypeSpecType));
+ ExprRep = Rep;
+ }
+
+ bool SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID, const LangOptions &Lang);
+
+ bool SetFunctionSpecInline(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool SetFunctionSpecVirtual(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool SetFunctionSpecExplicit(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+
+ bool SetFriendSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+ bool SetConstexprSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID);
+
+ bool isFriendSpecified() const { return Friend_specified; }
+ SourceLocation getFriendSpecLoc() const { return FriendLoc; }
+
+ bool isModulePrivateSpecified() const { return ModulePrivateLoc.isValid(); }
+ SourceLocation getModulePrivateSpecLoc() const { return ModulePrivateLoc; }
+
+ bool isConstexprSpecified() const { return Constexpr_specified; }
+ SourceLocation getConstexprSpecLoc() const { return ConstexprLoc; }
+
+ void ClearConstexprSpec() {
+ Constexpr_specified = false;
+ ConstexprLoc = SourceLocation();
+ }
+
+ AttributePool &getAttributePool() const {
+ return Attrs.getPool();
+ }
+
+ /// AddAttributes - contatenates two attribute lists.
+ /// The GCC attribute syntax allows for the following:
+ ///
+ /// short __attribute__(( unused, deprecated ))
+ /// int __attribute__(( may_alias, aligned(16) )) var;
+ ///
+ /// This declares 4 attributes using 2 lists. The following syntax is
+ /// also allowed and equivalent to the previous declaration.
+ ///
+ /// short __attribute__((unused)) __attribute__((deprecated))
+ /// int __attribute__((may_alias)) __attribute__((aligned(16))) var;
+ ///
+ void addAttributes(AttributeList *AL) {
+ Attrs.addAll(AL);
+ }
+ void setAttributes(AttributeList *AL) {
+ Attrs.set(AL);
+ }
+
+ bool hasAttributes() const { return !Attrs.empty(); }
+
+ ParsedAttributes &getAttributes() { return Attrs; }
+ const ParsedAttributes &getAttributes() const { return Attrs; }
+
+ /// TakeAttributes - Return the current attribute list and remove them from
+ /// the DeclSpec so that it doesn't own them.
+ ParsedAttributes takeAttributes() {
+ // The non-const "copy" constructor clears the operand automatically.
+ return Attrs;
+ }
+
+ void takeAttributesFrom(ParsedAttributes &attrs) {
+ Attrs.takeAllFrom(attrs);
+ }
+
+ typedef Decl * const *ProtocolQualifierListTy;
+ ProtocolQualifierListTy getProtocolQualifiers() const {
+ return ProtocolQualifiers;
+ }
+ SourceLocation *getProtocolLocs() const { return ProtocolLocs; }
+ unsigned getNumProtocolQualifiers() const {
+ return NumProtocolQualifiers;
+ }
+ SourceLocation getProtocolLAngleLoc() const { return ProtocolLAngleLoc; }
+ void setProtocolQualifiers(Decl * const *Protos, unsigned NP,
+ SourceLocation *ProtoLocs,
+ SourceLocation LAngleLoc);
+
+ /// Finish - This does final analysis of the declspec, issuing diagnostics for
+ /// things like "_Imaginary" (lacking an FP type). After calling this method,
+ /// DeclSpec is guaranteed self-consistent, even if an error occurred.
+ void Finish(DiagnosticsEngine &D, Preprocessor &PP);
+
+ const WrittenBuiltinSpecs& getWrittenBuiltinSpecs() const {
+ return writtenBS;
+ }
+
+ ObjCDeclSpec *getObjCQualifiers() const { return ObjCQualifiers; }
+ void setObjCQualifiers(ObjCDeclSpec *quals) { ObjCQualifiers = quals; }
+
+ /// isMissingDeclaratorOk - This checks if this DeclSpec can stand alone,
+ /// without a Declarator. Only tag declspecs can stand alone.
+ bool isMissingDeclaratorOk();
+};
+
+/// ObjCDeclSpec - This class captures information about
+/// "declaration specifiers" specific to objective-c
+class ObjCDeclSpec {
+public:
+ /// ObjCDeclQualifier - Qualifier used on types in method
+ /// declarations. Not all combinations are sensible. Parameters
+ /// can be one of { in, out, inout } with one of { bycopy, byref }.
+ /// Returns can either be { oneway } or not.
+ ///
+ /// This should be kept in sync with Decl::ObjCDeclQualifier.
+ enum ObjCDeclQualifier {
+ DQ_None = 0x0,
+ DQ_In = 0x1,
+ DQ_Inout = 0x2,
+ DQ_Out = 0x4,
+ DQ_Bycopy = 0x8,
+ DQ_Byref = 0x10,
+ DQ_Oneway = 0x20
+ };
+
+ /// PropertyAttributeKind - list of property attributes.
+ enum ObjCPropertyAttributeKind {
+ DQ_PR_noattr = 0x0,
+ DQ_PR_readonly = 0x01,
+ DQ_PR_getter = 0x02,
+ DQ_PR_assign = 0x04,
+ DQ_PR_readwrite = 0x08,
+ DQ_PR_retain = 0x10,
+ DQ_PR_copy = 0x20,
+ DQ_PR_nonatomic = 0x40,
+ DQ_PR_setter = 0x80,
+ DQ_PR_atomic = 0x100,
+ DQ_PR_weak = 0x200,
+ DQ_PR_strong = 0x400,
+ DQ_PR_unsafe_unretained = 0x800
+ };
+
+
+ ObjCDeclSpec()
+ : objcDeclQualifier(DQ_None), PropertyAttributes(DQ_PR_noattr),
+ GetterName(0), SetterName(0) { }
+ ObjCDeclQualifier getObjCDeclQualifier() const { return objcDeclQualifier; }
+ void setObjCDeclQualifier(ObjCDeclQualifier DQVal) {
+ objcDeclQualifier = (ObjCDeclQualifier) (objcDeclQualifier | DQVal);
+ }
+
+ ObjCPropertyAttributeKind getPropertyAttributes() const {
+ return ObjCPropertyAttributeKind(PropertyAttributes);
+ }
+ void setPropertyAttributes(ObjCPropertyAttributeKind PRVal) {
+ PropertyAttributes =
+ (ObjCPropertyAttributeKind)(PropertyAttributes | PRVal);
+ }
+
+ const IdentifierInfo *getGetterName() const { return GetterName; }
+ IdentifierInfo *getGetterName() { return GetterName; }
+ void setGetterName(IdentifierInfo *name) { GetterName = name; }
+
+ const IdentifierInfo *getSetterName() const { return SetterName; }
+ IdentifierInfo *getSetterName() { return SetterName; }
+ void setSetterName(IdentifierInfo *name) { SetterName = name; }
+
+private:
+ // FIXME: These two are unrelated and mutially exclusive. So perhaps
+ // we can put them in a union to reflect their mutual exclusiveness
+ // (space saving is negligible).
+ ObjCDeclQualifier objcDeclQualifier : 6;
+
+ // NOTE: VC++ treats enums as signed, avoid using ObjCPropertyAttributeKind
+ unsigned PropertyAttributes : 12;
+ IdentifierInfo *GetterName; // getter name of NULL if no getter
+ IdentifierInfo *SetterName; // setter name of NULL if no setter
+};
+
+/// \brief Represents a C++ unqualified-id that has been parsed.
+class UnqualifiedId {
+private:
+ const UnqualifiedId &operator=(const UnqualifiedId &); // DO NOT IMPLEMENT
+
+public:
+ /// \brief Describes the kind of unqualified-id parsed.
+ enum IdKind {
+ /// \brief An identifier.
+ IK_Identifier,
+ /// \brief An overloaded operator name, e.g., operator+.
+ IK_OperatorFunctionId,
+ /// \brief A conversion function name, e.g., operator int.
+ IK_ConversionFunctionId,
+ /// \brief A user-defined literal name, e.g., operator "" _i.
+ IK_LiteralOperatorId,
+ /// \brief A constructor name.
+ IK_ConstructorName,
+ /// \brief A constructor named via a template-id.
+ IK_ConstructorTemplateId,
+ /// \brief A destructor name.
+ IK_DestructorName,
+ /// \brief A template-id, e.g., f<int>.
+ IK_TemplateId,
+ /// \brief An implicit 'self' parameter
+ IK_ImplicitSelfParam
+ } Kind;
+
+ /// \brief Anonymous union that holds extra data associated with the
+ /// parsed unqualified-id.
+ union {
+ /// \brief When Kind == IK_Identifier, the parsed identifier, or when Kind
+ /// == IK_UserLiteralId, the identifier suffix.
+ IdentifierInfo *Identifier;
+
+ /// \brief When Kind == IK_OperatorFunctionId, the overloaded operator
+ /// that we parsed.
+ struct {
+ /// \brief The kind of overloaded operator.
+ OverloadedOperatorKind Operator;
+
+ /// \brief The source locations of the individual tokens that name
+ /// the operator, e.g., the "new", "[", and "]" tokens in
+ /// operator new [].
+ ///
+ /// Different operators have different numbers of tokens in their name,
+ /// up to three. Any remaining source locations in this array will be
+ /// set to an invalid value for operators with fewer than three tokens.
+ unsigned SymbolLocations[3];
+ } OperatorFunctionId;
+
+ /// \brief When Kind == IK_ConversionFunctionId, the type that the
+ /// conversion function names.
+ UnionParsedType ConversionFunctionId;
+
+ /// \brief When Kind == IK_ConstructorName, the class-name of the type
+ /// whose constructor is being referenced.
+ UnionParsedType ConstructorName;
+
+ /// \brief When Kind == IK_DestructorName, the type referred to by the
+ /// class-name.
+ UnionParsedType DestructorName;
+
+ /// \brief When Kind == IK_TemplateId or IK_ConstructorTemplateId,
+ /// the template-id annotation that contains the template name and
+ /// template arguments.
+ TemplateIdAnnotation *TemplateId;
+ };
+
+ /// \brief The location of the first token that describes this unqualified-id,
+ /// which will be the location of the identifier, "operator" keyword,
+ /// tilde (for a destructor), or the template name of a template-id.
+ SourceLocation StartLocation;
+
+ /// \brief The location of the last token that describes this unqualified-id.
+ SourceLocation EndLocation;
+
+ UnqualifiedId() : Kind(IK_Identifier), Identifier(0) { }
+
+ /// \brief Do not use this copy constructor. It is temporary, and only
+ /// exists because we are holding FieldDeclarators in a SmallVector when we
+ /// don't actually need them.
+ ///
+ /// FIXME: Kill this copy constructor.
+ UnqualifiedId(const UnqualifiedId &Other)
+ : Kind(IK_Identifier), Identifier(Other.Identifier),
+ StartLocation(Other.StartLocation), EndLocation(Other.EndLocation) {
+ assert(Other.Kind == IK_Identifier && "Cannot copy non-identifiers");
+ }
+
+ /// \brief Destroy this unqualified-id.
+ ~UnqualifiedId() { clear(); }
+
+ /// \brief Clear out this unqualified-id, setting it to default (invalid)
+ /// state.
+ void clear();
+
+ /// \brief Determine whether this unqualified-id refers to a valid name.
+ bool isValid() const { return StartLocation.isValid(); }
+
+ /// \brief Determine whether this unqualified-id refers to an invalid name.
+ bool isInvalid() const { return !isValid(); }
+
+ /// \brief Determine what kind of name we have.
+ IdKind getKind() const { return Kind; }
+ void setKind(IdKind kind) { Kind = kind; }
+
+ /// \brief Specify that this unqualified-id was parsed as an identifier.
+ ///
+ /// \param Id the parsed identifier.
+ /// \param IdLoc the location of the parsed identifier.
+ void setIdentifier(const IdentifierInfo *Id, SourceLocation IdLoc) {
+ Kind = IK_Identifier;
+ Identifier = const_cast<IdentifierInfo *>(Id);
+ StartLocation = EndLocation = IdLoc;
+ }
+
+ /// \brief Specify that this unqualified-id was parsed as an
+ /// operator-function-id.
+ ///
+ /// \param OperatorLoc the location of the 'operator' keyword.
+ ///
+ /// \param Op the overloaded operator.
+ ///
+ /// \param SymbolLocations the locations of the individual operator symbols
+ /// in the operator.
+ void setOperatorFunctionId(SourceLocation OperatorLoc,
+ OverloadedOperatorKind Op,
+ SourceLocation SymbolLocations[3]);
+
+ /// \brief Specify that this unqualified-id was parsed as a
+ /// conversion-function-id.
+ ///
+ /// \param OperatorLoc the location of the 'operator' keyword.
+ ///
+ /// \param Ty the type to which this conversion function is converting.
+ ///
+ /// \param EndLoc the location of the last token that makes up the type name.
+ void setConversionFunctionId(SourceLocation OperatorLoc,
+ ParsedType Ty,
+ SourceLocation EndLoc) {
+ Kind = IK_ConversionFunctionId;
+ StartLocation = OperatorLoc;
+ EndLocation = EndLoc;
+ ConversionFunctionId = Ty;
+ }
+
+ /// \brief Specific that this unqualified-id was parsed as a
+ /// literal-operator-id.
+ ///
+ /// \param Id the parsed identifier.
+ ///
+ /// \param OpLoc the location of the 'operator' keyword.
+ ///
+ /// \param IdLoc the location of the identifier.
+ void setLiteralOperatorId(const IdentifierInfo *Id, SourceLocation OpLoc,
+ SourceLocation IdLoc) {
+ Kind = IK_LiteralOperatorId;
+ Identifier = const_cast<IdentifierInfo *>(Id);
+ StartLocation = OpLoc;
+ EndLocation = IdLoc;
+ }
+
+ /// \brief Specify that this unqualified-id was parsed as a constructor name.
+ ///
+ /// \param ClassType the class type referred to by the constructor name.
+ ///
+ /// \param ClassNameLoc the location of the class name.
+ ///
+ /// \param EndLoc the location of the last token that makes up the type name.
+ void setConstructorName(ParsedType ClassType,
+ SourceLocation ClassNameLoc,
+ SourceLocation EndLoc) {
+ Kind = IK_ConstructorName;
+ StartLocation = ClassNameLoc;
+ EndLocation = EndLoc;
+ ConstructorName = ClassType;
+ }
+
+ /// \brief Specify that this unqualified-id was parsed as a
+ /// template-id that names a constructor.
+ ///
+ /// \param TemplateId the template-id annotation that describes the parsed
+ /// template-id. This UnqualifiedId instance will take ownership of the
+ /// \p TemplateId and will free it on destruction.
+ void setConstructorTemplateId(TemplateIdAnnotation *TemplateId);
+
+ /// \brief Specify that this unqualified-id was parsed as a destructor name.
+ ///
+ /// \param TildeLoc the location of the '~' that introduces the destructor
+ /// name.
+ ///
+ /// \param ClassType the name of the class referred to by the destructor name.
+ void setDestructorName(SourceLocation TildeLoc,
+ ParsedType ClassType,
+ SourceLocation EndLoc) {
+ Kind = IK_DestructorName;
+ StartLocation = TildeLoc;
+ EndLocation = EndLoc;
+ DestructorName = ClassType;
+ }
+
+ /// \brief Specify that this unqualified-id was parsed as a template-id.
+ ///
+ /// \param TemplateId the template-id annotation that describes the parsed
+ /// template-id. This UnqualifiedId instance will take ownership of the
+ /// \p TemplateId and will free it on destruction.
+ void setTemplateId(TemplateIdAnnotation *TemplateId);
+
+ /// \brief Return the source range that covers this unqualified-id.
+ SourceRange getSourceRange() const LLVM_READONLY {
+ return SourceRange(StartLocation, EndLocation);
+ }
+ SourceLocation getLocStart() const LLVM_READONLY { return StartLocation; }
+ SourceLocation getLocEnd() const LLVM_READONLY { return EndLocation; }
+};
+
+/// CachedTokens - A set of tokens that has been cached for later
+/// parsing.
+typedef SmallVector<Token, 4> CachedTokens;
+
+/// DeclaratorChunk - One instance of this struct is used for each type in a
+/// declarator that is parsed.
+///
+/// This is intended to be a small value object.
+struct DeclaratorChunk {
+ enum {
+ Pointer, Reference, Array, Function, BlockPointer, MemberPointer, Paren
+ } Kind;
+
+ /// Loc - The place where this type was defined.
+ SourceLocation Loc;
+ /// EndLoc - If valid, the place where this chunck ends.
+ SourceLocation EndLoc;
+
+ struct TypeInfoCommon {
+ AttributeList *AttrList;
+ };
+
+ struct PointerTypeInfo : TypeInfoCommon {
+ /// The type qualifiers: const/volatile/restrict.
+ unsigned TypeQuals : 3;
+
+ /// The location of the const-qualifier, if any.
+ unsigned ConstQualLoc;
+
+ /// The location of the volatile-qualifier, if any.
+ unsigned VolatileQualLoc;
+
+ /// The location of the restrict-qualifier, if any.
+ unsigned RestrictQualLoc;
+
+ void destroy() {
+ }
+ };
+
+ struct ReferenceTypeInfo : TypeInfoCommon {
+ /// The type qualifier: restrict. [GNU] C++ extension
+ bool HasRestrict : 1;
+ /// True if this is an lvalue reference, false if it's an rvalue reference.
+ bool LValueRef : 1;
+ void destroy() {
+ }
+ };
+
+ struct ArrayTypeInfo : TypeInfoCommon {
+ /// The type qualifiers for the array: const/volatile/restrict.
+ unsigned TypeQuals : 3;
+
+ /// True if this dimension included the 'static' keyword.
+ bool hasStatic : 1;
+
+ /// True if this dimension was [*]. In this case, NumElts is null.
+ bool isStar : 1;
+
+ /// This is the size of the array, or null if [] or [*] was specified.
+ /// Since the parser is multi-purpose, and we don't want to impose a root
+ /// expression class on all clients, NumElts is untyped.
+ Expr *NumElts;
+
+ void destroy() {}
+ };
+
+ /// ParamInfo - An array of paraminfo objects is allocated whenever a function
+ /// declarator is parsed. There are two interesting styles of arguments here:
+ /// K&R-style identifier lists and parameter type lists. K&R-style identifier
+ /// lists will have information about the identifier, but no type information.
+ /// Parameter type lists will have type info (if the actions module provides
+ /// it), but may have null identifier info: e.g. for 'void foo(int X, int)'.
+ struct ParamInfo {
+ IdentifierInfo *Ident;
+ SourceLocation IdentLoc;
+ Decl *Param;
+
+ /// DefaultArgTokens - When the parameter's default argument
+ /// cannot be parsed immediately (because it occurs within the
+ /// declaration of a member function), it will be stored here as a
+ /// sequence of tokens to be parsed once the class definition is
+ /// complete. Non-NULL indicates that there is a default argument.
+ CachedTokens *DefaultArgTokens;
+
+ ParamInfo() {}
+ ParamInfo(IdentifierInfo *ident, SourceLocation iloc,
+ Decl *param,
+ CachedTokens *DefArgTokens = 0)
+ : Ident(ident), IdentLoc(iloc), Param(param),
+ DefaultArgTokens(DefArgTokens) {}
+ };
+
+ struct TypeAndRange {
+ ParsedType Ty;
+ SourceRange Range;
+ };
+
+ struct FunctionTypeInfo : TypeInfoCommon {
+ /// hasPrototype - This is true if the function had at least one typed
+ /// argument. If the function is () or (a,b,c), then it has no prototype,
+ /// and is treated as a K&R-style function.
+ unsigned hasPrototype : 1;
+
+ /// isVariadic - If this function has a prototype, and if that
+ /// proto ends with ',...)', this is true. When true, EllipsisLoc
+ /// contains the location of the ellipsis.
+ unsigned isVariadic : 1;
+
+ /// \brief Whether the ref-qualifier (if any) is an lvalue reference.
+ /// Otherwise, it's an rvalue reference.
+ unsigned RefQualifierIsLValueRef : 1;
+
+ /// The type qualifiers: const/volatile/restrict.
+ /// The qualifier bitmask values are the same as in QualType.
+ unsigned TypeQuals : 3;
+
+ /// ExceptionSpecType - An ExceptionSpecificationType value.
+ unsigned ExceptionSpecType : 3;
+
+ /// DeleteArgInfo - If this is true, we need to delete[] ArgInfo.
+ unsigned DeleteArgInfo : 1;
+
+ /// When isVariadic is true, the location of the ellipsis in the source.
+ unsigned EllipsisLoc;
+
+ /// NumArgs - This is the number of formal arguments provided for the
+ /// declarator.
+ unsigned NumArgs;
+
+ /// NumExceptions - This is the number of types in the dynamic-exception-
+ /// decl, if the function has one.
+ unsigned NumExceptions;
+
+ /// \brief The location of the ref-qualifier, if any.
+ ///
+ /// If this is an invalid location, there is no ref-qualifier.
+ unsigned RefQualifierLoc;
+
+ /// \brief The location of the const-qualifier, if any.
+ ///
+ /// If this is an invalid location, there is no const-qualifier.
+ unsigned ConstQualifierLoc;
+
+ /// \brief The location of the volatile-qualifier, if any.
+ ///
+ /// If this is an invalid location, there is no volatile-qualifier.
+ unsigned VolatileQualifierLoc;
+
+ /// \brief The location of the 'mutable' qualifer in a lambda-declarator, if
+ /// any.
+ unsigned MutableLoc;
+
+ /// \brief When ExceptionSpecType isn't EST_None or EST_Delayed, the
+ /// location of the keyword introducing the spec.
+ unsigned ExceptionSpecLoc;
+
+ /// ArgInfo - This is a pointer to a new[]'d array of ParamInfo objects that
+ /// describe the arguments for this function declarator. This is null if
+ /// there are no arguments specified.
+ ParamInfo *ArgInfo;
+
+ union {
+ /// \brief Pointer to a new[]'d array of TypeAndRange objects that
+ /// contain the types in the function's dynamic exception specification
+ /// and their locations, if there is one.
+ TypeAndRange *Exceptions;
+
+ /// \brief Pointer to the expression in the noexcept-specifier of this
+ /// function, if it has one.
+ Expr *NoexceptExpr;
+ };
+
+ /// TrailingReturnType - If this isn't null, it's the trailing return type
+ /// specified. This is actually a ParsedType, but stored as void* to
+ /// allow union storage.
+ void *TrailingReturnType;
+
+ /// freeArgs - reset the argument list to having zero arguments. This is
+ /// used in various places for error recovery.
+ void freeArgs() {
+ if (DeleteArgInfo) {
+ delete[] ArgInfo;
+ DeleteArgInfo = false;
+ }
+ NumArgs = 0;
+ }
+
+ void destroy() {
+ if (DeleteArgInfo)
+ delete[] ArgInfo;
+ if (getExceptionSpecType() == EST_Dynamic)
+ delete[] Exceptions;
+ }
+
+ /// isKNRPrototype - Return true if this is a K&R style identifier list,
+ /// like "void foo(a,b,c)". In a function definition, this will be followed
+ /// by the argument type definitions.
+ bool isKNRPrototype() const {
+ return !hasPrototype && NumArgs != 0;
+ }
+
+ SourceLocation getEllipsisLoc() const {
+ return SourceLocation::getFromRawEncoding(EllipsisLoc);
+ }
+ SourceLocation getExceptionSpecLoc() const {
+ return SourceLocation::getFromRawEncoding(ExceptionSpecLoc);
+ }
+
+ /// \brief Retrieve the location of the ref-qualifier, if any.
+ SourceLocation getRefQualifierLoc() const {
+ return SourceLocation::getFromRawEncoding(RefQualifierLoc);
+ }
+
+ /// \brief Retrieve the location of the ref-qualifier, if any.
+ SourceLocation getConstQualifierLoc() const {
+ return SourceLocation::getFromRawEncoding(ConstQualifierLoc);
+ }
+
+ /// \brief Retrieve the location of the ref-qualifier, if any.
+ SourceLocation getVolatileQualifierLoc() const {
+ return SourceLocation::getFromRawEncoding(VolatileQualifierLoc);
+ }
+
+ /// \brief Retrieve the location of the 'mutable' qualifier, if any.
+ SourceLocation getMutableLoc() const {
+ return SourceLocation::getFromRawEncoding(MutableLoc);
+ }
+
+ /// \brief Determine whether this function declaration contains a
+ /// ref-qualifier.
+ bool hasRefQualifier() const { return getRefQualifierLoc().isValid(); }
+
+ /// \brief Determine whether this lambda-declarator contains a 'mutable'
+ /// qualifier.
+ bool hasMutableQualifier() const { return getMutableLoc().isValid(); }
+
+ /// \brief Get the type of exception specification this function has.
+ ExceptionSpecificationType getExceptionSpecType() const {
+ return static_cast<ExceptionSpecificationType>(ExceptionSpecType);
+ }
+ };
+
+ struct BlockPointerTypeInfo : TypeInfoCommon {
+ /// For now, sema will catch these as invalid.
+ /// The type qualifiers: const/volatile/restrict.
+ unsigned TypeQuals : 3;
+
+ void destroy() {
+ }
+ };
+
+ struct MemberPointerTypeInfo : TypeInfoCommon {
+ /// The type qualifiers: const/volatile/restrict.
+ unsigned TypeQuals : 3;
+ // CXXScopeSpec has a constructor, so it can't be a direct member.
+ // So we need some pointer-aligned storage and a bit of trickery.
+ union {
+ void *Aligner;
+ char Mem[sizeof(CXXScopeSpec)];
+ } ScopeMem;
+ CXXScopeSpec &Scope() {
+ return *reinterpret_cast<CXXScopeSpec*>(ScopeMem.Mem);
+ }
+ const CXXScopeSpec &Scope() const {
+ return *reinterpret_cast<const CXXScopeSpec*>(ScopeMem.Mem);
+ }
+ void destroy() {
+ Scope().~CXXScopeSpec();
+ }
+ };
+
+ union {
+ TypeInfoCommon Common;
+ PointerTypeInfo Ptr;
+ ReferenceTypeInfo Ref;
+ ArrayTypeInfo Arr;
+ FunctionTypeInfo Fun;
+ BlockPointerTypeInfo Cls;
+ MemberPointerTypeInfo Mem;
+ };
+
+ void destroy() {
+ switch (Kind) {
+ case DeclaratorChunk::Function: return Fun.destroy();
+ case DeclaratorChunk::Pointer: return Ptr.destroy();
+ case DeclaratorChunk::BlockPointer: return Cls.destroy();
+ case DeclaratorChunk::Reference: return Ref.destroy();
+ case DeclaratorChunk::Array: return Arr.destroy();
+ case DeclaratorChunk::MemberPointer: return Mem.destroy();
+ case DeclaratorChunk::Paren: return;
+ }
+ }
+
+ /// getAttrs - If there are attributes applied to this declaratorchunk, return
+ /// them.
+ const AttributeList *getAttrs() const {
+ return Common.AttrList;
+ }
+
+ AttributeList *&getAttrListRef() {
+ return Common.AttrList;
+ }
+
+ /// getPointer - Return a DeclaratorChunk for a pointer.
+ ///
+ static DeclaratorChunk getPointer(unsigned TypeQuals, SourceLocation Loc,
+ SourceLocation ConstQualLoc,
+ SourceLocation VolatileQualLoc,
+ SourceLocation RestrictQualLoc) {
+ DeclaratorChunk I;
+ I.Kind = Pointer;
+ I.Loc = Loc;
+ I.Ptr.TypeQuals = TypeQuals;
+ I.Ptr.ConstQualLoc = ConstQualLoc.getRawEncoding();
+ I.Ptr.VolatileQualLoc = VolatileQualLoc.getRawEncoding();
+ I.Ptr.RestrictQualLoc = RestrictQualLoc.getRawEncoding();
+ I.Ptr.AttrList = 0;
+ return I;
+ }
+
+ /// getReference - Return a DeclaratorChunk for a reference.
+ ///
+ static DeclaratorChunk getReference(unsigned TypeQuals, SourceLocation Loc,
+ bool lvalue) {
+ DeclaratorChunk I;
+ I.Kind = Reference;
+ I.Loc = Loc;
+ I.Ref.HasRestrict = (TypeQuals & DeclSpec::TQ_restrict) != 0;
+ I.Ref.LValueRef = lvalue;
+ I.Ref.AttrList = 0;
+ return I;
+ }
+
+ /// getArray - Return a DeclaratorChunk for an array.
+ ///
+ static DeclaratorChunk getArray(unsigned TypeQuals,
+ bool isStatic, bool isStar, Expr *NumElts,
+ SourceLocation LBLoc, SourceLocation RBLoc) {
+ DeclaratorChunk I;
+ I.Kind = Array;
+ I.Loc = LBLoc;
+ I.EndLoc = RBLoc;
+ I.Arr.AttrList = 0;
+ I.Arr.TypeQuals = TypeQuals;
+ I.Arr.hasStatic = isStatic;
+ I.Arr.isStar = isStar;
+ I.Arr.NumElts = NumElts;
+ return I;
+ }
+
+ /// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
+ /// "TheDeclarator" is the declarator that this will be added to.
+ static DeclaratorChunk getFunction(bool hasProto, bool isVariadic,
+ SourceLocation EllipsisLoc,
+ ParamInfo *ArgInfo, unsigned NumArgs,
+ unsigned TypeQuals,
+ bool RefQualifierIsLvalueRef,
+ SourceLocation RefQualifierLoc,
+ SourceLocation ConstQualifierLoc,
+ SourceLocation VolatileQualifierLoc,
+ SourceLocation MutableLoc,
+ ExceptionSpecificationType ESpecType,
+ SourceLocation ESpecLoc,
+ ParsedType *Exceptions,
+ SourceRange *ExceptionRanges,
+ unsigned NumExceptions,
+ Expr *NoexceptExpr,
+ SourceLocation LocalRangeBegin,
+ SourceLocation LocalRangeEnd,
+ Declarator &TheDeclarator,
+ ParsedType TrailingReturnType =
+ ParsedType());
+
+ /// getBlockPointer - Return a DeclaratorChunk for a block.
+ ///
+ static DeclaratorChunk getBlockPointer(unsigned TypeQuals,
+ SourceLocation Loc) {
+ DeclaratorChunk I;
+ I.Kind = BlockPointer;
+ I.Loc = Loc;
+ I.Cls.TypeQuals = TypeQuals;
+ I.Cls.AttrList = 0;
+ return I;
+ }
+
+ static DeclaratorChunk getMemberPointer(const CXXScopeSpec &SS,
+ unsigned TypeQuals,
+ SourceLocation Loc) {
+ DeclaratorChunk I;
+ I.Kind = MemberPointer;
+ I.Loc = Loc;
+ I.Mem.TypeQuals = TypeQuals;
+ I.Mem.AttrList = 0;
+ new (I.Mem.ScopeMem.Mem) CXXScopeSpec(SS);
+ return I;
+ }
+
+ /// getParen - Return a DeclaratorChunk for a paren.
+ ///
+ static DeclaratorChunk getParen(SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ DeclaratorChunk I;
+ I.Kind = Paren;
+ I.Loc = LParenLoc;
+ I.EndLoc = RParenLoc;
+ I.Common.AttrList = 0;
+ return I;
+ }
+
+};
+
+/// \brief Described the kind of function definition (if any) provided for
+/// a function.
+enum FunctionDefinitionKind {
+ FDK_Declaration,
+ FDK_Definition,
+ FDK_Defaulted,
+ FDK_Deleted
+};
+
+/// Declarator - Information about one declarator, including the parsed type
+/// information and the identifier. When the declarator is fully formed, this
+/// is turned into the appropriate Decl object.
+///
+/// Declarators come in two types: normal declarators and abstract declarators.
+/// Abstract declarators are used when parsing types, and don't have an
+/// identifier. Normal declarators do have ID's.
+///
+/// Instances of this class should be a transient object that lives on the
+/// stack, not objects that are allocated in large quantities on the heap.
+class Declarator {
+public:
+ enum TheContext {
+ FileContext, // File scope declaration.
+ PrototypeContext, // Within a function prototype.
+ ObjCResultContext, // An ObjC method result type.
+ ObjCParameterContext,// An ObjC method parameter type.
+ KNRTypeListContext, // K&R type definition list for formals.
+ TypeNameContext, // Abstract declarator for types.
+ MemberContext, // Struct/Union field.
+ BlockContext, // Declaration within a block in a function.
+ ForContext, // Declaration within first part of a for loop.
+ ConditionContext, // Condition declaration in a C++ if/switch/while/for.
+ TemplateParamContext,// Within a template parameter list.
+ CXXNewContext, // C++ new-expression.
+ CXXCatchContext, // C++ catch exception-declaration
+ ObjCCatchContext, // Objective-C catch exception-declaration
+ BlockLiteralContext, // Block literal declarator.
+ LambdaExprContext, // Lambda-expression declarator.
+ TrailingReturnContext, // C++11 trailing-type-specifier.
+ TemplateTypeArgContext, // Template type argument.
+ AliasDeclContext, // C++11 alias-declaration.
+ AliasTemplateContext // C++11 alias-declaration template.
+ };
+
+private:
+ const DeclSpec &DS;
+ CXXScopeSpec SS;
+ UnqualifiedId Name;
+ SourceRange Range;
+
+ /// Context - Where we are parsing this declarator.
+ ///
+ TheContext Context;
+
+ /// DeclTypeInfo - This holds each type that the declarator includes as it is
+ /// parsed. This is pushed from the identifier out, which means that element
+ /// #0 will be the most closely bound to the identifier, and
+ /// DeclTypeInfo.back() will be the least closely bound.
+ SmallVector<DeclaratorChunk, 8> DeclTypeInfo;
+
+ /// InvalidType - Set by Sema::GetTypeForDeclarator().
+ bool InvalidType : 1;
+
+ /// GroupingParens - Set by Parser::ParseParenDeclarator().
+ bool GroupingParens : 1;
+
+ /// FunctionDefinition - Is this Declarator for a function or member
+ /// definition and, if so, what kind?
+ ///
+ /// Actually a FunctionDefinitionKind.
+ unsigned FunctionDefinition : 2;
+
+ // Redeclaration - Is this Declarator is a redeclaration.
+ bool Redeclaration : 1;
+
+ /// Attrs - Attributes.
+ ParsedAttributes Attrs;
+
+ /// AsmLabel - The asm label, if specified.
+ Expr *AsmLabel;
+
+ /// InlineParams - This is a local array used for the first function decl
+ /// chunk to avoid going to the heap for the common case when we have one
+ /// function chunk in the declarator.
+ DeclaratorChunk::ParamInfo InlineParams[16];
+ bool InlineParamsUsed;
+
+ /// Extension - true if the declaration is preceded by __extension__.
+ bool Extension : 1;
+
+ /// \brief If this is the second or subsequent declarator in this declaration,
+ /// the location of the comma before this declarator.
+ SourceLocation CommaLoc;
+
+ /// \brief If provided, the source location of the ellipsis used to describe
+ /// this declarator as a parameter pack.
+ SourceLocation EllipsisLoc;
+
+ friend struct DeclaratorChunk;
+
+public:
+ Declarator(const DeclSpec &ds, TheContext C)
+ : DS(ds), Range(ds.getSourceRange()), Context(C),
+ InvalidType(DS.getTypeSpecType() == DeclSpec::TST_error),
+ GroupingParens(false), FunctionDefinition(FDK_Declaration),
+ Redeclaration(false),
+ Attrs(ds.getAttributePool().getFactory()), AsmLabel(0),
+ InlineParamsUsed(false), Extension(false) {
+ }
+
+ ~Declarator() {
+ clear();
+ }
+
+ /// getDeclSpec - Return the declaration-specifier that this declarator was
+ /// declared with.
+ const DeclSpec &getDeclSpec() const { return DS; }
+
+ /// getMutableDeclSpec - Return a non-const version of the DeclSpec. This
+ /// should be used with extreme care: declspecs can often be shared between
+ /// multiple declarators, so mutating the DeclSpec affects all of the
+ /// Declarators. This should only be done when the declspec is known to not
+ /// be shared or when in error recovery etc.
+ DeclSpec &getMutableDeclSpec() { return const_cast<DeclSpec &>(DS); }
+
+ AttributePool &getAttributePool() const {
+ return Attrs.getPool();
+ }
+
+ /// getCXXScopeSpec - Return the C++ scope specifier (global scope or
+ /// nested-name-specifier) that is part of the declarator-id.
+ const CXXScopeSpec &getCXXScopeSpec() const { return SS; }
+ CXXScopeSpec &getCXXScopeSpec() { return SS; }
+
+ /// \brief Retrieve the name specified by this declarator.
+ UnqualifiedId &getName() { return Name; }
+
+ TheContext getContext() const { return Context; }
+
+ bool isPrototypeContext() const {
+ return (Context == PrototypeContext ||
+ Context == ObjCParameterContext ||
+ Context == ObjCResultContext);
+ }
+
+ /// getSourceRange - Get the source range that spans this declarator.
+ const SourceRange &getSourceRange() const LLVM_READONLY { return Range; }
+ SourceLocation getLocStart() const LLVM_READONLY { return Range.getBegin(); }
+ SourceLocation getLocEnd() const LLVM_READONLY { return Range.getEnd(); }
+
+ void SetSourceRange(SourceRange R) { Range = R; }
+ /// SetRangeBegin - Set the start of the source range to Loc, unless it's
+ /// invalid.
+ void SetRangeBegin(SourceLocation Loc) {
+ if (!Loc.isInvalid())
+ Range.setBegin(Loc);
+ }
+ /// SetRangeEnd - Set the end of the source range to Loc, unless it's invalid.
+ void SetRangeEnd(SourceLocation Loc) {
+ if (!Loc.isInvalid())
+ Range.setEnd(Loc);
+ }
+ /// ExtendWithDeclSpec - Extend the declarator source range to include the
+ /// given declspec, unless its location is invalid. Adopts the range start if
+ /// the current range start is invalid.
+ void ExtendWithDeclSpec(const DeclSpec &DS) {
+ const SourceRange &SR = DS.getSourceRange();
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(SR.getBegin());
+ if (!SR.getEnd().isInvalid())
+ Range.setEnd(SR.getEnd());
+ }
+
+ /// clear - Reset the contents of this Declarator.
+ void clear() {
+ SS.clear();
+ Name.clear();
+ Range = DS.getSourceRange();
+
+ for (unsigned i = 0, e = DeclTypeInfo.size(); i != e; ++i)
+ DeclTypeInfo[i].destroy();
+ DeclTypeInfo.clear();
+ Attrs.clear();
+ AsmLabel = 0;
+ InlineParamsUsed = false;
+ CommaLoc = SourceLocation();
+ EllipsisLoc = SourceLocation();
+ }
+
+ /// mayOmitIdentifier - Return true if the identifier is either optional or
+ /// not allowed. This is true for typenames, prototypes, and template
+ /// parameter lists.
+ bool mayOmitIdentifier() const {
+ switch (Context) {
+ case FileContext:
+ case KNRTypeListContext:
+ case MemberContext:
+ case BlockContext:
+ case ForContext:
+ case ConditionContext:
+ return false;
+
+ case TypeNameContext:
+ case AliasDeclContext:
+ case AliasTemplateContext:
+ case PrototypeContext:
+ case ObjCParameterContext:
+ case ObjCResultContext:
+ case TemplateParamContext:
+ case CXXNewContext:
+ case CXXCatchContext:
+ case ObjCCatchContext:
+ case BlockLiteralContext:
+ case LambdaExprContext:
+ case TemplateTypeArgContext:
+ case TrailingReturnContext:
+ return true;
+ }
+ llvm_unreachable("unknown context kind!");
+ }
+
+ /// mayHaveIdentifier - Return true if the identifier is either optional or
+ /// required. This is true for normal declarators and prototypes, but not
+ /// typenames.
+ bool mayHaveIdentifier() const {
+ switch (Context) {
+ case FileContext:
+ case KNRTypeListContext:
+ case MemberContext:
+ case BlockContext:
+ case ForContext:
+ case ConditionContext:
+ case PrototypeContext:
+ case TemplateParamContext:
+ case CXXCatchContext:
+ case ObjCCatchContext:
+ return true;
+
+ case TypeNameContext:
+ case CXXNewContext:
+ case AliasDeclContext:
+ case AliasTemplateContext:
+ case ObjCParameterContext:
+ case ObjCResultContext:
+ case BlockLiteralContext:
+ case LambdaExprContext:
+ case TemplateTypeArgContext:
+ case TrailingReturnContext:
+ return false;
+ }
+ llvm_unreachable("unknown context kind!");
+ }
+
+ /// mayBeFollowedByCXXDirectInit - Return true if the declarator can be
+ /// followed by a C++ direct initializer, e.g. "int x(1);".
+ bool mayBeFollowedByCXXDirectInit() const {
+ if (hasGroupingParens()) return false;
+
+ if (getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ return false;
+
+ if (getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern &&
+ Context != FileContext)
+ return false;
+
+ // Special names can't have direct initializers.
+ if (Name.getKind() != UnqualifiedId::IK_Identifier)
+ return false;
+
+ switch (Context) {
+ case FileContext:
+ case BlockContext:
+ case ForContext:
+ return true;
+
+ case ConditionContext:
+ // This may not be followed by a direct initializer, but it can't be a
+ // function declaration either, and we'd prefer to perform a tentative
+ // parse in order to produce the right diagnostic.
+ return true;
+
+ case KNRTypeListContext:
+ case MemberContext:
+ case PrototypeContext:
+ case ObjCParameterContext:
+ case ObjCResultContext:
+ case TemplateParamContext:
+ case CXXCatchContext:
+ case ObjCCatchContext:
+ case TypeNameContext:
+ case CXXNewContext:
+ case AliasDeclContext:
+ case AliasTemplateContext:
+ case BlockLiteralContext:
+ case LambdaExprContext:
+ case TemplateTypeArgContext:
+ case TrailingReturnContext:
+ return false;
+ }
+ llvm_unreachable("unknown context kind!");
+ }
+
+ /// isPastIdentifier - Return true if we have parsed beyond the point where
+ /// the
+ bool isPastIdentifier() const { return Name.isValid(); }
+
+ /// hasName - Whether this declarator has a name, which might be an
+ /// identifier (accessible via getIdentifier()) or some kind of
+ /// special C++ name (constructor, destructor, etc.).
+ bool hasName() const {
+ return Name.getKind() != UnqualifiedId::IK_Identifier || Name.Identifier;
+ }
+
+ IdentifierInfo *getIdentifier() const {
+ if (Name.getKind() == UnqualifiedId::IK_Identifier)
+ return Name.Identifier;
+
+ return 0;
+ }
+ SourceLocation getIdentifierLoc() const { return Name.StartLocation; }
+
+ /// \brief Set the name of this declarator to be the given identifier.
+ void SetIdentifier(IdentifierInfo *Id, SourceLocation IdLoc) {
+ Name.setIdentifier(Id, IdLoc);
+ }
+
+ /// AddTypeInfo - Add a chunk to this declarator. Also extend the range to
+ /// EndLoc, which should be the last token of the chunk.
+ void AddTypeInfo(const DeclaratorChunk &TI,
+ ParsedAttributes &attrs,
+ SourceLocation EndLoc) {
+ DeclTypeInfo.push_back(TI);
+ DeclTypeInfo.back().getAttrListRef() = attrs.getList();
+ getAttributePool().takeAllFrom(attrs.getPool());
+
+ if (!EndLoc.isInvalid())
+ SetRangeEnd(EndLoc);
+ }
+
+ /// AddInnermostTypeInfo - Add a new innermost chunk to this declarator.
+ void AddInnermostTypeInfo(const DeclaratorChunk &TI) {
+ DeclTypeInfo.insert(DeclTypeInfo.begin(), TI);
+ }
+
+ /// getNumTypeObjects() - Return the number of types applied to this
+ /// declarator.
+ unsigned getNumTypeObjects() const { return DeclTypeInfo.size(); }
+
+ /// Return the specified TypeInfo from this declarator. TypeInfo #0 is
+ /// closest to the identifier.
+ const DeclaratorChunk &getTypeObject(unsigned i) const {
+ assert(i < DeclTypeInfo.size() && "Invalid type chunk");
+ return DeclTypeInfo[i];
+ }
+ DeclaratorChunk &getTypeObject(unsigned i) {
+ assert(i < DeclTypeInfo.size() && "Invalid type chunk");
+ return DeclTypeInfo[i];
+ }
+
+ void DropFirstTypeObject()
+ {
+ assert(!DeclTypeInfo.empty() && "No type chunks to drop.");
+ DeclTypeInfo.front().destroy();
+ DeclTypeInfo.erase(DeclTypeInfo.begin());
+ }
+
+ /// isArrayOfUnknownBound - This method returns true if the declarator
+ /// is a declarator for an array of unknown bound (looking through
+ /// parentheses).
+ bool isArrayOfUnknownBound() const {
+ for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) {
+ switch (DeclTypeInfo[i].Kind) {
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ return false;
+ case DeclaratorChunk::Array:
+ return !DeclTypeInfo[i].Arr.NumElts;
+ }
+ llvm_unreachable("Invalid type chunk");
+ }
+ return false;
+ }
+
+ /// isFunctionDeclarator - This method returns true if the declarator
+ /// is a function declarator (looking through parentheses).
+ /// If true is returned, then the reference type parameter idx is
+ /// assigned with the index of the declaration chunk.
+ bool isFunctionDeclarator(unsigned& idx) const {
+ for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) {
+ switch (DeclTypeInfo[i].Kind) {
+ case DeclaratorChunk::Function:
+ idx = i;
+ return true;
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ return false;
+ }
+ llvm_unreachable("Invalid type chunk");
+ }
+ return false;
+ }
+
+ /// isFunctionDeclarator - Once this declarator is fully parsed and formed,
+ /// this method returns true if the identifier is a function declarator
+ /// (looking through parentheses).
+ bool isFunctionDeclarator() const {
+ unsigned index;
+ return isFunctionDeclarator(index);
+ }
+
+ /// getFunctionTypeInfo - Retrieves the function type info object
+ /// (looking through parentheses).
+ DeclaratorChunk::FunctionTypeInfo &getFunctionTypeInfo() {
+ assert(isFunctionDeclarator() && "Not a function declarator!");
+ unsigned index = 0;
+ isFunctionDeclarator(index);
+ return DeclTypeInfo[index].Fun;
+ }
+
+ /// getFunctionTypeInfo - Retrieves the function type info object
+ /// (looking through parentheses).
+ const DeclaratorChunk::FunctionTypeInfo &getFunctionTypeInfo() const {
+ return const_cast<Declarator*>(this)->getFunctionTypeInfo();
+ }
+
+ /// \brief Determine whether the declaration that will be produced from
+ /// this declaration will be a function.
+ ///
+ /// A declaration can declare a function even if the declarator itself
+ /// isn't a function declarator, if the type specifier refers to a function
+ /// type. This routine checks for both cases.
+ bool isDeclarationOfFunction() const;
+
+ /// takeAttributes - Takes attributes from the given parsed-attributes
+ /// set and add them to this declarator.
+ ///
+ /// These examples both add 3 attributes to "var":
+ /// short int var __attribute__((aligned(16),common,deprecated));
+ /// short int x, __attribute__((aligned(16)) var
+ /// __attribute__((common,deprecated));
+ ///
+ /// Also extends the range of the declarator.
+ void takeAttributes(ParsedAttributes &attrs, SourceLocation lastLoc) {
+ Attrs.takeAllFrom(attrs);
+
+ if (!lastLoc.isInvalid())
+ SetRangeEnd(lastLoc);
+ }
+
+ const AttributeList *getAttributes() const { return Attrs.getList(); }
+ AttributeList *getAttributes() { return Attrs.getList(); }
+
+ AttributeList *&getAttrListRef() { return Attrs.getListRef(); }
+
+ /// hasAttributes - do we contain any attributes?
+ bool hasAttributes() const {
+ if (getAttributes() || getDeclSpec().hasAttributes()) return true;
+ for (unsigned i = 0, e = getNumTypeObjects(); i != e; ++i)
+ if (getTypeObject(i).getAttrs())
+ return true;
+ return false;
+ }
+
+ void setAsmLabel(Expr *E) { AsmLabel = E; }
+ Expr *getAsmLabel() const { return AsmLabel; }
+
+ void setExtension(bool Val = true) { Extension = Val; }
+ bool getExtension() const { return Extension; }
+
+ void setInvalidType(bool Val = true) { InvalidType = Val; }
+ bool isInvalidType() const {
+ return InvalidType || DS.getTypeSpecType() == DeclSpec::TST_error;
+ }
+
+ void setGroupingParens(bool flag) { GroupingParens = flag; }
+ bool hasGroupingParens() const { return GroupingParens; }
+
+ bool isFirstDeclarator() const { return !CommaLoc.isValid(); }
+ SourceLocation getCommaLoc() const { return CommaLoc; }
+ void setCommaLoc(SourceLocation CL) { CommaLoc = CL; }
+
+ bool hasEllipsis() const { return EllipsisLoc.isValid(); }
+ SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
+ void setEllipsisLoc(SourceLocation EL) { EllipsisLoc = EL; }
+
+ void setFunctionDefinitionKind(FunctionDefinitionKind Val) {
+ FunctionDefinition = Val;
+ }
+
+ bool isFunctionDefinition() const {
+ return getFunctionDefinitionKind() != FDK_Declaration;
+ }
+
+ FunctionDefinitionKind getFunctionDefinitionKind() const {
+ return (FunctionDefinitionKind)FunctionDefinition;
+ }
+
+ void setRedeclaration(bool Val) { Redeclaration = Val; }
+ bool isRedeclaration() const { return Redeclaration; }
+};
+
+/// FieldDeclarator - This little struct is used to capture information about
+/// structure field declarators, which is basically just a bitfield size.
+struct FieldDeclarator {
+ Declarator D;
+ Expr *BitfieldSize;
+ explicit FieldDeclarator(DeclSpec &DS) : D(DS, Declarator::MemberContext) {
+ BitfieldSize = 0;
+ }
+};
+
+/// VirtSpecifiers - Represents a C++0x virt-specifier-seq.
+class VirtSpecifiers {
+public:
+ enum Specifier {
+ VS_None = 0,
+ VS_Override = 1,
+ VS_Final = 2
+ };
+
+ VirtSpecifiers() : Specifiers(0) { }
+
+ bool SetSpecifier(Specifier VS, SourceLocation Loc,
+ const char *&PrevSpec);
+
+ bool isOverrideSpecified() const { return Specifiers & VS_Override; }
+ SourceLocation getOverrideLoc() const { return VS_overrideLoc; }
+
+ bool isFinalSpecified() const { return Specifiers & VS_Final; }
+ SourceLocation getFinalLoc() const { return VS_finalLoc; }
+
+ void clear() { Specifiers = 0; }
+
+ static const char *getSpecifierName(Specifier VS);
+
+ SourceLocation getLastLocation() const { return LastLocation; }
+
+private:
+ unsigned Specifiers;
+
+ SourceLocation VS_overrideLoc, VS_finalLoc;
+ SourceLocation LastLocation;
+};
+
+/// LambdaCapture - An individual capture in a lambda introducer.
+struct LambdaCapture {
+ LambdaCaptureKind Kind;
+ SourceLocation Loc;
+ IdentifierInfo* Id;
+ SourceLocation EllipsisLoc;
+
+ LambdaCapture(LambdaCaptureKind Kind, SourceLocation Loc,
+ IdentifierInfo* Id = 0,
+ SourceLocation EllipsisLoc = SourceLocation())
+ : Kind(Kind), Loc(Loc), Id(Id), EllipsisLoc(EllipsisLoc)
+ {}
+};
+
+/// LambdaIntroducer - Represents a complete lambda introducer.
+struct LambdaIntroducer {
+ SourceRange Range;
+ SourceLocation DefaultLoc;
+ LambdaCaptureDefault Default;
+ llvm::SmallVector<LambdaCapture, 4> Captures;
+
+ LambdaIntroducer()
+ : Default(LCD_None) {}
+
+ /// addCapture - Append a capture in a lambda introducer.
+ void addCapture(LambdaCaptureKind Kind,
+ SourceLocation Loc,
+ IdentifierInfo* Id = 0,
+ SourceLocation EllipsisLoc = SourceLocation()) {
+ Captures.push_back(LambdaCapture(Kind, Loc, Id, EllipsisLoc));
+ }
+
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
new file mode 100644
index 0000000..3320cd8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/DelayedDiagnostic.h
@@ -0,0 +1,220 @@
+//===--- DelayedDiagnostic.h - Delayed declarator diagnostics ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DelayedDiagnostic class, which is used to
+// record diagnostics that are being conditionally produced during
+// declarator parsing. Certain kinds of diagnostics --- notably
+// deprecation and access control --- are suppressed based on
+// semantic properties of the parsed declaration that aren't known
+// until it is fully parsed.
+//
+// This file also defines AccessedEntity.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_DELAYED_DIAGNOSTIC_H
+#define LLVM_CLANG_SEMA_DELAYED_DIAGNOSTIC_H
+
+#include "clang/AST/DeclCXX.h"
+
+namespace clang {
+namespace sema {
+
+/// A declaration being accessed, together with information about how
+/// it was accessed.
+class AccessedEntity {
+public:
+ /// A member declaration found through lookup. The target is the
+ /// member.
+ enum MemberNonce { Member };
+
+ /// A hierarchy (base-to-derived or derived-to-base) conversion.
+ /// The target is the base class.
+ enum BaseNonce { Base };
+
+ bool isMemberAccess() const { return IsMember; }
+
+ AccessedEntity(ASTContext &Context,
+ MemberNonce _,
+ CXXRecordDecl *NamingClass,
+ DeclAccessPair FoundDecl,
+ QualType BaseObjectType)
+ : Access(FoundDecl.getAccess()), IsMember(true),
+ Target(FoundDecl.getDecl()), NamingClass(NamingClass),
+ BaseObjectType(BaseObjectType), Diag(0, Context.getDiagAllocator()) {
+ }
+
+ AccessedEntity(ASTContext &Context,
+ BaseNonce _,
+ CXXRecordDecl *BaseClass,
+ CXXRecordDecl *DerivedClass,
+ AccessSpecifier Access)
+ : Access(Access), IsMember(false),
+ Target(BaseClass),
+ NamingClass(DerivedClass),
+ Diag(0, Context.getDiagAllocator()) {
+ }
+
+ bool isQuiet() const { return Diag.getDiagID() == 0; }
+
+ AccessSpecifier getAccess() const { return AccessSpecifier(Access); }
+
+ // These apply to member decls...
+ NamedDecl *getTargetDecl() const { return Target; }
+ CXXRecordDecl *getNamingClass() const { return NamingClass; }
+
+ // ...and these apply to hierarchy conversions.
+ CXXRecordDecl *getBaseClass() const {
+ assert(!IsMember); return cast<CXXRecordDecl>(Target);
+ }
+ CXXRecordDecl *getDerivedClass() const { return NamingClass; }
+
+ /// Retrieves the base object type, important when accessing
+ /// an instance member.
+ QualType getBaseObjectType() const { return BaseObjectType; }
+
+ /// Sets a diagnostic to be performed. The diagnostic is given
+ /// four (additional) arguments:
+ /// %0 - 0 if the entity was private, 1 if protected
+ /// %1 - the DeclarationName of the entity
+ /// %2 - the TypeDecl type of the naming class
+ /// %3 - the TypeDecl type of the declaring class
+ void setDiag(const PartialDiagnostic &PDiag) {
+ assert(isQuiet() && "partial diagnostic already defined");
+ Diag = PDiag;
+ }
+ PartialDiagnostic &setDiag(unsigned DiagID) {
+ assert(isQuiet() && "partial diagnostic already defined");
+ assert(DiagID && "creating null diagnostic");
+ Diag.Reset(DiagID);
+ return Diag;
+ }
+ const PartialDiagnostic &getDiag() const {
+ return Diag;
+ }
+
+private:
+ unsigned Access : 2;
+ unsigned IsMember : 1;
+ NamedDecl *Target;
+ CXXRecordDecl *NamingClass;
+ QualType BaseObjectType;
+ PartialDiagnostic Diag;
+};
+
+/// A diagnostic message which has been conditionally emitted pending
+/// the complete parsing of the current declaration.
+class DelayedDiagnostic {
+public:
+ enum DDKind { Deprecation, Access, ForbiddenType };
+
+ unsigned char Kind; // actually a DDKind
+ bool Triggered;
+
+ SourceLocation Loc;
+
+ void Destroy();
+
+ static DelayedDiagnostic makeDeprecation(SourceLocation Loc,
+ const NamedDecl *D,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ StringRef Msg);
+
+ static DelayedDiagnostic makeAccess(SourceLocation Loc,
+ const AccessedEntity &Entity) {
+ DelayedDiagnostic DD;
+ DD.Kind = Access;
+ DD.Triggered = false;
+ DD.Loc = Loc;
+ new (&DD.getAccessData()) AccessedEntity(Entity);
+ return DD;
+ }
+
+ static DelayedDiagnostic makeForbiddenType(SourceLocation loc,
+ unsigned diagnostic,
+ QualType type,
+ unsigned argument) {
+ DelayedDiagnostic DD;
+ DD.Kind = ForbiddenType;
+ DD.Triggered = false;
+ DD.Loc = loc;
+ DD.ForbiddenTypeData.Diagnostic = diagnostic;
+ DD.ForbiddenTypeData.OperandType = type.getAsOpaquePtr();
+ DD.ForbiddenTypeData.Argument = argument;
+ return DD;
+ }
+
+ AccessedEntity &getAccessData() {
+ assert(Kind == Access && "Not an access diagnostic.");
+ return *reinterpret_cast<AccessedEntity*>(AccessData);
+ }
+ const AccessedEntity &getAccessData() const {
+ assert(Kind == Access && "Not an access diagnostic.");
+ return *reinterpret_cast<const AccessedEntity*>(AccessData);
+ }
+
+ const NamedDecl *getDeprecationDecl() const {
+ assert(Kind == Deprecation && "Not a deprecation diagnostic.");
+ return DeprecationData.Decl;
+ }
+
+ StringRef getDeprecationMessage() const {
+ assert(Kind == Deprecation && "Not a deprecation diagnostic.");
+ return StringRef(DeprecationData.Message,
+ DeprecationData.MessageLen);
+ }
+
+ /// The diagnostic ID to emit. Used like so:
+ /// Diag(diag.Loc, diag.getForbiddenTypeDiagnostic())
+ /// << diag.getForbiddenTypeOperand()
+ /// << diag.getForbiddenTypeArgument();
+ unsigned getForbiddenTypeDiagnostic() const {
+ assert(Kind == ForbiddenType && "not a forbidden-type diagnostic");
+ return ForbiddenTypeData.Diagnostic;
+ }
+
+ unsigned getForbiddenTypeArgument() const {
+ assert(Kind == ForbiddenType && "not a forbidden-type diagnostic");
+ return ForbiddenTypeData.Argument;
+ }
+
+ QualType getForbiddenTypeOperand() const {
+ assert(Kind == ForbiddenType && "not a forbidden-type diagnostic");
+ return QualType::getFromOpaquePtr(ForbiddenTypeData.OperandType);
+ }
+
+ const ObjCInterfaceDecl *getUnknownObjCClass() const {
+ return DeprecationData.UnknownObjCClass;
+ }
+
+private:
+ union {
+ /// Deprecation.
+ struct {
+ const NamedDecl *Decl;
+ const ObjCInterfaceDecl *UnknownObjCClass;
+ const char *Message;
+ size_t MessageLen;
+ } DeprecationData;
+
+ struct {
+ unsigned Diagnostic;
+ unsigned Argument;
+ void *OperandType;
+ } ForbiddenTypeData;
+
+ /// Access control.
+ char AccessData[sizeof(AccessedEntity)];
+ };
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Designator.h b/contrib/llvm/tools/clang/include/clang/Sema/Designator.h
new file mode 100644
index 0000000..fe01f4d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Designator.h
@@ -0,0 +1,218 @@
+//===--- Designator.h - Initialization Designator ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines interfaces used to represent designators (a la
+// C99 designated initializers) during parsing.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_DESIGNATOR_H
+#define LLVM_CLANG_SEMA_DESIGNATOR_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+class Expr;
+class IdentifierInfo;
+class Sema;
+
+/// Designator - A designator in a C99 designated initializer.
+///
+/// This class is a discriminated union which holds the various
+/// different sorts of designators possible. A Designation is an array of
+/// these. An example of a designator are things like this:
+/// [8] .field [47] // C99 designation: 3 designators
+/// [8 ... 47] field: // GNU extensions: 2 designators
+/// These occur in initializers, e.g.:
+/// int a[10] = {2, 4, [8]=9, 10};
+///
+class Designator {
+public:
+ enum DesignatorKind {
+ FieldDesignator, ArrayDesignator, ArrayRangeDesignator
+ };
+private:
+ DesignatorKind Kind;
+
+ struct FieldDesignatorInfo {
+ const IdentifierInfo *II;
+ unsigned DotLoc;
+ unsigned NameLoc;
+ };
+ struct ArrayDesignatorInfo {
+ Expr *Index;
+ unsigned LBracketLoc;
+ mutable unsigned RBracketLoc;
+ };
+ struct ArrayRangeDesignatorInfo {
+ Expr *Start, *End;
+ unsigned LBracketLoc, EllipsisLoc;
+ mutable unsigned RBracketLoc;
+ };
+
+ union {
+ FieldDesignatorInfo FieldInfo;
+ ArrayDesignatorInfo ArrayInfo;
+ ArrayRangeDesignatorInfo ArrayRangeInfo;
+ };
+
+public:
+
+ DesignatorKind getKind() const { return Kind; }
+ bool isFieldDesignator() const { return Kind == FieldDesignator; }
+ bool isArrayDesignator() const { return Kind == ArrayDesignator; }
+ bool isArrayRangeDesignator() const { return Kind == ArrayRangeDesignator; }
+
+ const IdentifierInfo *getField() const {
+ assert(isFieldDesignator() && "Invalid accessor");
+ return FieldInfo.II;
+ }
+
+ SourceLocation getDotLoc() const {
+ assert(isFieldDesignator() && "Invalid accessor");
+ return SourceLocation::getFromRawEncoding(FieldInfo.DotLoc);
+ }
+
+ SourceLocation getFieldLoc() const {
+ assert(isFieldDesignator() && "Invalid accessor");
+ return SourceLocation::getFromRawEncoding(FieldInfo.NameLoc);
+ }
+
+ Expr *getArrayIndex() const {
+ assert(isArrayDesignator() && "Invalid accessor");
+ return ArrayInfo.Index;
+ }
+
+ Expr *getArrayRangeStart() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return ArrayRangeInfo.Start;
+ }
+ Expr *getArrayRangeEnd() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return ArrayRangeInfo.End;
+ }
+
+ SourceLocation getLBracketLoc() const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
+ "Invalid accessor");
+ if (isArrayDesignator())
+ return SourceLocation::getFromRawEncoding(ArrayInfo.LBracketLoc);
+ else
+ return SourceLocation::getFromRawEncoding(ArrayRangeInfo.LBracketLoc);
+ }
+
+ SourceLocation getRBracketLoc() const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
+ "Invalid accessor");
+ if (isArrayDesignator())
+ return SourceLocation::getFromRawEncoding(ArrayInfo.RBracketLoc);
+ else
+ return SourceLocation::getFromRawEncoding(ArrayRangeInfo.RBracketLoc);
+ }
+
+ SourceLocation getEllipsisLoc() const {
+ assert(isArrayRangeDesignator() && "Invalid accessor");
+ return SourceLocation::getFromRawEncoding(ArrayRangeInfo.EllipsisLoc);
+ }
+
+ static Designator getField(const IdentifierInfo *II, SourceLocation DotLoc,
+ SourceLocation NameLoc) {
+ Designator D;
+ D.Kind = FieldDesignator;
+ D.FieldInfo.II = II;
+ D.FieldInfo.DotLoc = DotLoc.getRawEncoding();
+ D.FieldInfo.NameLoc = NameLoc.getRawEncoding();
+ return D;
+ }
+
+ static Designator getArray(Expr *Index,
+ SourceLocation LBracketLoc) {
+ Designator D;
+ D.Kind = ArrayDesignator;
+ D.ArrayInfo.Index = Index;
+ D.ArrayInfo.LBracketLoc = LBracketLoc.getRawEncoding();
+ D.ArrayInfo.RBracketLoc = 0;
+ return D;
+ }
+
+ static Designator getArrayRange(Expr *Start,
+ Expr *End,
+ SourceLocation LBracketLoc,
+ SourceLocation EllipsisLoc) {
+ Designator D;
+ D.Kind = ArrayRangeDesignator;
+ D.ArrayRangeInfo.Start = Start;
+ D.ArrayRangeInfo.End = End;
+ D.ArrayRangeInfo.LBracketLoc = LBracketLoc.getRawEncoding();
+ D.ArrayRangeInfo.EllipsisLoc = EllipsisLoc.getRawEncoding();
+ D.ArrayRangeInfo.RBracketLoc = 0;
+ return D;
+ }
+
+ void setRBracketLoc(SourceLocation RBracketLoc) const {
+ assert((isArrayDesignator() || isArrayRangeDesignator()) &&
+ "Invalid accessor");
+ if (isArrayDesignator())
+ ArrayInfo.RBracketLoc = RBracketLoc.getRawEncoding();
+ else
+ ArrayRangeInfo.RBracketLoc = RBracketLoc.getRawEncoding();
+ }
+
+ /// ClearExprs - Null out any expression references, which prevents
+ /// them from being 'delete'd later.
+ void ClearExprs(Sema &Actions) {}
+
+ /// FreeExprs - Release any unclaimed memory for the expressions in
+ /// this designator.
+ void FreeExprs(Sema &Actions) {}
+};
+
+
+/// Designation - Represent a full designation, which is a sequence of
+/// designators. This class is mostly a helper for InitListDesignations.
+class Designation {
+ /// InitIndex - The index of the initializer expression this is for. For
+ /// example, if the initializer were "{ A, .foo=B, C }" a Designation would
+ /// exist with InitIndex=1, because element #1 has a designation.
+ unsigned InitIndex;
+
+ /// Designators - The actual designators for this initializer.
+ SmallVector<Designator, 2> Designators;
+
+ Designation(unsigned Idx) : InitIndex(Idx) {}
+public:
+ Designation() : InitIndex(4000) {}
+
+ /// AddDesignator - Add a designator to the end of this list.
+ void AddDesignator(Designator D) {
+ Designators.push_back(D);
+ }
+
+ bool empty() const { return Designators.empty(); }
+
+ unsigned getNumDesignators() const { return Designators.size(); }
+ const Designator &getDesignator(unsigned Idx) const {
+ assert(Idx < Designators.size());
+ return Designators[Idx];
+ }
+
+ /// ClearExprs - Null out any expression references, which prevents them from
+ /// being 'delete'd later.
+ void ClearExprs(Sema &Actions) {}
+
+ /// FreeExprs - Release any unclaimed memory for the expressions in this
+ /// designation.
+ void FreeExprs(Sema &Actions) {}
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
new file mode 100644
index 0000000..785bf6a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ExternalSemaSource.h
@@ -0,0 +1,183 @@
+//===--- ExternalSemaSource.h - External Sema Interface ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ExternalSemaSource interface.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SEMA_EXTERNAL_SEMA_SOURCE_H
+#define LLVM_CLANG_SEMA_EXTERNAL_SEMA_SOURCE_H
+
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/Sema/Weak.h"
+#include <utility>
+
+namespace clang {
+
+class CXXConstructorDecl;
+class CXXRecordDecl;
+class DeclaratorDecl;
+class LookupResult;
+struct ObjCMethodList;
+class Scope;
+class Sema;
+class TypedefNameDecl;
+class ValueDecl;
+class VarDecl;
+
+/// \brief A simple structure that captures a vtable use for the purposes of
+/// the \c ExternalSemaSource.
+struct ExternalVTableUse {
+ CXXRecordDecl *Record;
+ SourceLocation Location;
+ bool DefinitionRequired;
+};
+
+/// \brief An abstract interface that should be implemented by
+/// external AST sources that also provide information for semantic
+/// analysis.
+class ExternalSemaSource : public ExternalASTSource {
+public:
+ ExternalSemaSource() {
+ ExternalASTSource::SemaSource = true;
+ }
+
+ ~ExternalSemaSource();
+
+ /// \brief Initialize the semantic source with the Sema instance
+ /// being used to perform semantic analysis on the abstract syntax
+ /// tree.
+ virtual void InitializeSema(Sema &S) {}
+
+ /// \brief Inform the semantic consumer that Sema is no longer available.
+ virtual void ForgetSema() {}
+
+ /// \brief Load the contents of the global method pool for a given
+ /// selector.
+ virtual void ReadMethodPool(Selector Sel);
+
+ /// \brief Load the set of namespaces that are known to the external source,
+ /// which will be used during typo correction.
+ virtual void ReadKnownNamespaces(
+ SmallVectorImpl<NamespaceDecl *> &Namespaces);
+
+ /// \brief Do last resort, unqualified lookup on a LookupResult that
+ /// Sema cannot find.
+ ///
+ /// \param R a LookupResult that is being recovered.
+ ///
+ /// \param S the Scope of the identifier occurrence.
+ ///
+ /// \return true to tell Sema to recover using the LookupResult.
+ virtual bool LookupUnqualified(LookupResult &R, Scope *S) { return false; }
+
+ /// \brief Read the set of tentative definitions known to the external Sema
+ /// source.
+ ///
+ /// The external source should append its own tentative definitions to the
+ /// given vector of tentative definitions. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadTentativeDefinitions(
+ SmallVectorImpl<VarDecl *> &TentativeDefs) {}
+
+ /// \brief Read the set of unused file-scope declarations known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own unused, filed-scope to the
+ /// given vector of declarations. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadUnusedFileScopedDecls(
+ SmallVectorImpl<const DeclaratorDecl *> &Decls) {}
+
+ /// \brief Read the set of delegating constructors known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own delegating constructors to the
+ /// given vector of declarations. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadDelegatingConstructors(
+ SmallVectorImpl<CXXConstructorDecl *> &Decls) {}
+
+ /// \brief Read the set of ext_vector type declarations known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own ext_vector type declarations to
+ /// the given vector of declarations. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadExtVectorDecls(SmallVectorImpl<TypedefNameDecl *> &Decls) {}
+
+ /// \brief Read the set of dynamic classes known to the external Sema source.
+ ///
+ /// The external source should append its own dynamic classes to
+ /// the given vector of declarations. Note that this routine may be
+ /// invoked multiple times; the external source should take care not to
+ /// introduce the same declarations repeatedly.
+ virtual void ReadDynamicClasses(SmallVectorImpl<CXXRecordDecl *> &Decls) {}
+
+ /// \brief Read the set of locally-scoped external declarations known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own locally-scoped external
+ /// declarations to the given vector of declarations. Note that this routine
+ /// may be invoked multiple times; the external source should take care not
+ /// to introduce the same declarations repeatedly.
+ virtual void ReadLocallyScopedExternalDecls(
+ SmallVectorImpl<NamedDecl *> &Decls) {}
+
+ /// \brief Read the set of referenced selectors known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own referenced selectors to the
+ /// given vector of selectors. Note that this routine
+ /// may be invoked multiple times; the external source should take care not
+ /// to introduce the same selectors repeatedly.
+ virtual void ReadReferencedSelectors(
+ SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) {}
+
+ /// \brief Read the set of weak, undeclared identifiers known to the
+ /// external Sema source.
+ ///
+ /// The external source should append its own weak, undeclared identifiers to
+ /// the given vector. Note that this routine may be invoked multiple times;
+ /// the external source should take care not to introduce the same identifiers
+ /// repeatedly.
+ virtual void ReadWeakUndeclaredIdentifiers(
+ SmallVectorImpl<std::pair<IdentifierInfo *, WeakInfo> > &WI) {}
+
+ /// \brief Read the set of used vtables known to the external Sema source.
+ ///
+ /// The external source should append its own used vtables to the given
+ /// vector. Note that this routine may be invoked multiple times; the external
+ /// source should take care not to introduce the same vtables repeatedly.
+ virtual void ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables) {}
+
+ /// \brief Read the set of pending instantiations known to the external
+ /// Sema source.
+ ///
+ /// The external source should append its own pending instantiations to the
+ /// given vector. Note that this routine may be invoked multiple times; the
+ /// external source should take care not to introduce the same instantiations
+ /// repeatedly.
+ virtual void ReadPendingInstantiations(
+ SmallVectorImpl<std::pair<ValueDecl *,
+ SourceLocation> > &Pending) {}
+
+ // isa/cast/dyn_cast support
+ static bool classof(const ExternalASTSource *Source) {
+ return Source->SemaSource;
+ }
+ static bool classof(const ExternalSemaSource *) { return true; }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h b/contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h
new file mode 100644
index 0000000..dff0134
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/IdentifierResolver.h
@@ -0,0 +1,221 @@
+//===- IdentifierResolver.h - Lexical Scope Name lookup ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the IdentifierResolver class, which is used for lexical
+// scoped lookup, based on declaration names.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_SEMA_IDENTIFIERRESOLVER_H
+#define LLVM_CLANG_AST_SEMA_IDENTIFIERRESOLVER_H
+
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+class ASTContext;
+class Decl;
+class DeclContext;
+class DeclarationName;
+class ExternalPreprocessorSource;
+class NamedDecl;
+class Preprocessor;
+class Scope;
+
+/// IdentifierResolver - Keeps track of shadowed decls on enclosing
+/// scopes. It manages the shadowing chains of declaration names and
+/// implements efficient decl lookup based on a declaration name.
+class IdentifierResolver {
+
+ /// IdDeclInfo - Keeps track of information about decls associated
+ /// to a particular declaration name. IdDeclInfos are lazily
+ /// constructed and assigned to a declaration name the first time a
+ /// decl with that declaration name is shadowed in some scope.
+ class IdDeclInfo {
+ public:
+ typedef SmallVector<NamedDecl*, 2> DeclsTy;
+
+ inline DeclsTy::iterator decls_begin() { return Decls.begin(); }
+ inline DeclsTy::iterator decls_end() { return Decls.end(); }
+
+ void AddDecl(NamedDecl *D) { Decls.push_back(D); }
+
+ /// RemoveDecl - Remove the decl from the scope chain.
+ /// The decl must already be part of the decl chain.
+ void RemoveDecl(NamedDecl *D);
+
+ /// Replaces the Old declaration with the New declaration. If the
+ /// replacement is successful, returns true. If the old
+ /// declaration was not found, returns false.
+ bool ReplaceDecl(NamedDecl *Old, NamedDecl *New);
+
+ /// \brief Insert the given declaration at the given position in the list.
+ void InsertDecl(DeclsTy::iterator Pos, NamedDecl *D) {
+ Decls.insert(Pos, D);
+ }
+
+ private:
+ DeclsTy Decls;
+ };
+
+public:
+
+ /// iterator - Iterate over the decls of a specified declaration name.
+ /// It will walk or not the parent declaration contexts depending on how
+ /// it was instantiated.
+ class iterator {
+ public:
+ typedef NamedDecl * value_type;
+ typedef NamedDecl * reference;
+ typedef NamedDecl * pointer;
+ typedef std::input_iterator_tag iterator_category;
+ typedef std::ptrdiff_t difference_type;
+
+ /// Ptr - There are 3 forms that 'Ptr' represents:
+ /// 1) A single NamedDecl. (Ptr & 0x1 == 0)
+ /// 2) A IdDeclInfo::DeclsTy::iterator that traverses only the decls of the
+ /// same declaration context. (Ptr & 0x3 == 0x1)
+ /// 3) A IdDeclInfo::DeclsTy::iterator that traverses the decls of parent
+ /// declaration contexts too. (Ptr & 0x3 == 0x3)
+ uintptr_t Ptr;
+ typedef IdDeclInfo::DeclsTy::iterator BaseIter;
+
+ /// A single NamedDecl. (Ptr & 0x1 == 0)
+ iterator(NamedDecl *D) {
+ Ptr = reinterpret_cast<uintptr_t>(D);
+ assert((Ptr & 0x1) == 0 && "Invalid Ptr!");
+ }
+ /// A IdDeclInfo::DeclsTy::iterator that walks or not the parent declaration
+ /// contexts depending on 'LookInParentCtx'.
+ iterator(BaseIter I) {
+ Ptr = reinterpret_cast<uintptr_t>(I) | 0x1;
+ }
+
+ bool isIterator() const { return (Ptr & 0x1); }
+
+ BaseIter getIterator() const {
+ assert(isIterator() && "Ptr not an iterator!");
+ return reinterpret_cast<BaseIter>(Ptr & ~0x3);
+ }
+
+ friend class IdentifierResolver;
+
+ void incrementSlowCase();
+ public:
+ iterator() : Ptr(0) {}
+
+ NamedDecl *operator*() const {
+ if (isIterator())
+ return *getIterator();
+ else
+ return reinterpret_cast<NamedDecl*>(Ptr);
+ }
+
+ bool operator==(const iterator &RHS) const {
+ return Ptr == RHS.Ptr;
+ }
+ bool operator!=(const iterator &RHS) const {
+ return Ptr != RHS.Ptr;
+ }
+
+ // Preincrement.
+ iterator& operator++() {
+ if (!isIterator()) // common case.
+ Ptr = 0;
+ else
+ incrementSlowCase();
+ return *this;
+ }
+
+ uintptr_t getAsOpaqueValue() const { return Ptr; }
+
+ static iterator getFromOpaqueValue(uintptr_t P) {
+ iterator Result;
+ Result.Ptr = P;
+ return Result;
+ }
+ };
+
+ /// begin - Returns an iterator for decls with the name 'Name'.
+ iterator begin(DeclarationName Name);
+
+ /// end - Returns an iterator that has 'finished'.
+ iterator end() {
+ return iterator();
+ }
+
+ /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
+ /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
+ /// true if 'D' belongs to the given declaration context.
+ ///
+ /// \param ExplicitInstantiationOrSpecialization When true, we are checking
+ /// whether the declaration is in scope for the purposes of explicit template
+ /// instantiation or specialization. The default is false.
+ bool isDeclInScope(Decl *D, DeclContext *Ctx, ASTContext &Context,
+ Scope *S = 0,
+ bool ExplicitInstantiationOrSpecialization = false) const;
+
+ /// AddDecl - Link the decl to its shadowed decl chain.
+ void AddDecl(NamedDecl *D);
+
+ /// RemoveDecl - Unlink the decl from its shadowed decl chain.
+ /// The decl must already be part of the decl chain.
+ void RemoveDecl(NamedDecl *D);
+
+ /// Replace the decl Old with the new declaration New on its
+ /// identifier chain. Returns true if the old declaration was found
+ /// (and, therefore, replaced).
+ bool ReplaceDecl(NamedDecl *Old, NamedDecl *New);
+
+ /// \brief Insert the given declaration after the given iterator
+ /// position.
+ void InsertDeclAfter(iterator Pos, NamedDecl *D);
+
+ /// \brief Try to add the given declaration to the top level scope, if it
+ /// (or a redeclaration of it) hasn't already been added.
+ ///
+ /// \param D The externally-produced declaration to add.
+ ///
+ /// \param Name The name of the externally-produced declaration.
+ ///
+ /// \returns true if the declaration was added, false otherwise.
+ bool tryAddTopLevelDecl(NamedDecl *D, DeclarationName Name);
+
+ explicit IdentifierResolver(Preprocessor &PP);
+ ~IdentifierResolver();
+
+private:
+ const LangOptions &LangOpt;
+ Preprocessor &PP;
+
+ class IdDeclInfoMap;
+ IdDeclInfoMap *IdDeclInfos;
+
+ void updatingIdentifier(IdentifierInfo &II);
+ void readingIdentifier(IdentifierInfo &II);
+
+ /// FETokenInfo contains a Decl pointer if lower bit == 0.
+ static inline bool isDeclPtr(void *Ptr) {
+ return (reinterpret_cast<uintptr_t>(Ptr) & 0x1) == 0;
+ }
+
+ /// FETokenInfo contains a IdDeclInfo pointer if lower bit == 1.
+ static inline IdDeclInfo *toIdDeclInfo(void *Ptr) {
+ assert((reinterpret_cast<uintptr_t>(Ptr) & 0x1) == 1
+ && "Ptr not a IdDeclInfo* !");
+ return reinterpret_cast<IdDeclInfo*>(
+ reinterpret_cast<uintptr_t>(Ptr) & ~0x1
+ );
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
new file mode 100644
index 0000000..4433843
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Initialization.h
@@ -0,0 +1,999 @@
+//===--- Initialization.h - Semantic Analysis for Initializers --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides supporting data types for initialization of objects.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SEMA_INITIALIZATION_H
+#define LLVM_CLANG_SEMA_INITIALIZATION_H
+
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/Overload.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+
+namespace clang {
+
+class CXXBaseSpecifier;
+class DeclaratorDecl;
+class DeclaratorInfo;
+class FieldDecl;
+class FunctionDecl;
+class ParmVarDecl;
+class Sema;
+class TypeLoc;
+class VarDecl;
+
+/// \brief Describes an entity that is being initialized.
+class InitializedEntity {
+public:
+ /// \brief Specifies the kind of entity being initialized.
+ enum EntityKind {
+ /// \brief The entity being initialized is a variable.
+ EK_Variable,
+ /// \brief The entity being initialized is a function parameter.
+ EK_Parameter,
+ /// \brief The entity being initialized is the result of a function call.
+ EK_Result,
+ /// \brief The entity being initialized is an exception object that
+ /// is being thrown.
+ EK_Exception,
+ /// \brief The entity being initialized is a non-static data member
+ /// subobject.
+ EK_Member,
+ /// \brief The entity being initialized is an element of an array.
+ EK_ArrayElement,
+ /// \brief The entity being initialized is an object (or array of
+ /// objects) allocated via new.
+ EK_New,
+ /// \brief The entity being initialized is a temporary object.
+ EK_Temporary,
+ /// \brief The entity being initialized is a base member subobject.
+ EK_Base,
+ /// \brief The initialization is being done by a delegating constructor.
+ EK_Delegating,
+ /// \brief The entity being initialized is an element of a vector.
+ /// or vector.
+ EK_VectorElement,
+ /// \brief The entity being initialized is a field of block descriptor for
+ /// the copied-in c++ object.
+ EK_BlockElement,
+ /// \brief The entity being initialized is the real or imaginary part of a
+ /// complex number.
+ EK_ComplexElement,
+ /// \brief The entity being initialized is the field that captures a
+ /// variable in a lambda.
+ EK_LambdaCapture
+ };
+
+private:
+ /// \brief The kind of entity being initialized.
+ EntityKind Kind;
+
+ /// \brief If non-NULL, the parent entity in which this
+ /// initialization occurs.
+ const InitializedEntity *Parent;
+
+ /// \brief The type of the object or reference being initialized.
+ QualType Type;
+
+ union {
+ /// \brief When Kind == EK_Variable, or EK_Member, the VarDecl or
+ /// FieldDecl, respectively.
+ DeclaratorDecl *VariableOrMember;
+
+ /// \brief When Kind == EK_Parameter, the ParmVarDecl, with the
+ /// low bit indicating whether the parameter is "consumed".
+ uintptr_t Parameter;
+
+ /// \brief When Kind == EK_Temporary, the type source information for
+ /// the temporary.
+ TypeSourceInfo *TypeInfo;
+
+ struct {
+ /// \brief When Kind == EK_Result, EK_Exception, EK_New, the
+ /// location of the 'return', 'throw', or 'new' keyword,
+ /// respectively. When Kind == EK_Temporary, the location where
+ /// the temporary is being created.
+ unsigned Location;
+
+ /// \brief Whether the entity being initialized may end up using the
+ /// named return value optimization (NRVO).
+ bool NRVO;
+ } LocAndNRVO;
+
+ /// \brief When Kind == EK_Base, the base specifier that provides the
+ /// base class. The lower bit specifies whether the base is an inherited
+ /// virtual base.
+ uintptr_t Base;
+
+ /// \brief When Kind == EK_ArrayElement, EK_VectorElement, or
+ /// EK_ComplexElement, the index of the array or vector element being
+ /// initialized.
+ unsigned Index;
+
+ struct {
+ /// \brief The variable being captured by an EK_LambdaCapture.
+ VarDecl *Var;
+
+ /// \brief The source location at which the capture occurs.
+ unsigned Location;
+ } Capture;
+ };
+
+ InitializedEntity() { }
+
+ /// \brief Create the initialization entity for a variable.
+ InitializedEntity(VarDecl *Var)
+ : Kind(EK_Variable), Parent(0), Type(Var->getType()),
+ VariableOrMember(Var) { }
+
+ /// \brief Create the initialization entity for the result of a
+ /// function, throwing an object, performing an explicit cast, or
+ /// initializing a parameter for which there is no declaration.
+ InitializedEntity(EntityKind Kind, SourceLocation Loc, QualType Type,
+ bool NRVO = false)
+ : Kind(Kind), Parent(0), Type(Type)
+ {
+ LocAndNRVO.Location = Loc.getRawEncoding();
+ LocAndNRVO.NRVO = NRVO;
+ }
+
+ /// \brief Create the initialization entity for a member subobject.
+ InitializedEntity(FieldDecl *Member, const InitializedEntity *Parent)
+ : Kind(EK_Member), Parent(Parent), Type(Member->getType()),
+ VariableOrMember(Member) { }
+
+ /// \brief Create the initialization entity for an array element.
+ InitializedEntity(ASTContext &Context, unsigned Index,
+ const InitializedEntity &Parent);
+
+ /// \brief Create the initialization entity for a lambda capture.
+ InitializedEntity(VarDecl *Var, FieldDecl *Field, SourceLocation Loc)
+ : Kind(EK_LambdaCapture), Parent(0), Type(Field->getType())
+ {
+ Capture.Var = Var;
+ Capture.Location = Loc.getRawEncoding();
+ }
+
+public:
+ /// \brief Create the initialization entity for a variable.
+ static InitializedEntity InitializeVariable(VarDecl *Var) {
+ return InitializedEntity(Var);
+ }
+
+ /// \brief Create the initialization entity for a parameter.
+ static InitializedEntity InitializeParameter(ASTContext &Context,
+ ParmVarDecl *Parm) {
+ bool Consumed = (Context.getLangOpts().ObjCAutoRefCount &&
+ Parm->hasAttr<NSConsumedAttr>());
+
+ InitializedEntity Entity;
+ Entity.Kind = EK_Parameter;
+ Entity.Type = Context.getVariableArrayDecayedType(
+ Parm->getType().getUnqualifiedType());
+ Entity.Parent = 0;
+ Entity.Parameter
+ = (static_cast<uintptr_t>(Consumed) | reinterpret_cast<uintptr_t>(Parm));
+ return Entity;
+ }
+
+ /// \brief Create the initialization entity for a parameter that is
+ /// only known by its type.
+ static InitializedEntity InitializeParameter(ASTContext &Context,
+ QualType Type,
+ bool Consumed) {
+ InitializedEntity Entity;
+ Entity.Kind = EK_Parameter;
+ Entity.Type = Context.getVariableArrayDecayedType(Type);
+ Entity.Parent = 0;
+ Entity.Parameter = (Consumed);
+ return Entity;
+ }
+
+ /// \brief Create the initialization entity for the result of a function.
+ static InitializedEntity InitializeResult(SourceLocation ReturnLoc,
+ QualType Type, bool NRVO) {
+ return InitializedEntity(EK_Result, ReturnLoc, Type, NRVO);
+ }
+
+ static InitializedEntity InitializeBlock(SourceLocation BlockVarLoc,
+ QualType Type, bool NRVO) {
+ return InitializedEntity(EK_BlockElement, BlockVarLoc, Type, NRVO);
+ }
+
+ /// \brief Create the initialization entity for an exception object.
+ static InitializedEntity InitializeException(SourceLocation ThrowLoc,
+ QualType Type, bool NRVO) {
+ return InitializedEntity(EK_Exception, ThrowLoc, Type, NRVO);
+ }
+
+ /// \brief Create the initialization entity for an object allocated via new.
+ static InitializedEntity InitializeNew(SourceLocation NewLoc, QualType Type) {
+ return InitializedEntity(EK_New, NewLoc, Type);
+ }
+
+ /// \brief Create the initialization entity for a temporary.
+ static InitializedEntity InitializeTemporary(QualType Type) {
+ return InitializedEntity(EK_Temporary, SourceLocation(), Type);
+ }
+
+ /// \brief Create the initialization entity for a temporary.
+ static InitializedEntity InitializeTemporary(TypeSourceInfo *TypeInfo) {
+ InitializedEntity Result(EK_Temporary, SourceLocation(),
+ TypeInfo->getType());
+ Result.TypeInfo = TypeInfo;
+ return Result;
+ }
+
+ /// \brief Create the initialization entity for a base class subobject.
+ static InitializedEntity InitializeBase(ASTContext &Context,
+ CXXBaseSpecifier *Base,
+ bool IsInheritedVirtualBase);
+
+ /// \brief Create the initialization entity for a delegated constructor.
+ static InitializedEntity InitializeDelegation(QualType Type) {
+ return InitializedEntity(EK_Delegating, SourceLocation(), Type);
+ }
+
+ /// \brief Create the initialization entity for a member subobject.
+ static InitializedEntity InitializeMember(FieldDecl *Member,
+ const InitializedEntity *Parent = 0) {
+ return InitializedEntity(Member, Parent);
+ }
+
+ /// \brief Create the initialization entity for a member subobject.
+ static InitializedEntity InitializeMember(IndirectFieldDecl *Member,
+ const InitializedEntity *Parent = 0) {
+ return InitializedEntity(Member->getAnonField(), Parent);
+ }
+
+ /// \brief Create the initialization entity for an array element.
+ static InitializedEntity InitializeElement(ASTContext &Context,
+ unsigned Index,
+ const InitializedEntity &Parent) {
+ return InitializedEntity(Context, Index, Parent);
+ }
+
+ /// \brief Create the initialization entity for a lambda capture.
+ static InitializedEntity InitializeLambdaCapture(VarDecl *Var,
+ FieldDecl *Field,
+ SourceLocation Loc) {
+ return InitializedEntity(Var, Field, Loc);
+ }
+
+ /// \brief Determine the kind of initialization.
+ EntityKind getKind() const { return Kind; }
+
+ /// \brief Retrieve the parent of the entity being initialized, when
+ /// the initialization itself is occurring within the context of a
+ /// larger initialization.
+ const InitializedEntity *getParent() const { return Parent; }
+
+ /// \brief Retrieve type being initialized.
+ QualType getType() const { return Type; }
+
+ /// \brief Retrieve complete type-source information for the object being
+ /// constructed, if known.
+ TypeSourceInfo *getTypeSourceInfo() const {
+ if (Kind == EK_Temporary)
+ return TypeInfo;
+
+ return 0;
+ }
+
+ /// \brief Retrieve the name of the entity being initialized.
+ DeclarationName getName() const;
+
+ /// \brief Retrieve the variable, parameter, or field being
+ /// initialized.
+ DeclaratorDecl *getDecl() const;
+
+ /// \brief Determine whether this initialization allows the named return
+ /// value optimization, which also applies to thrown objects.
+ bool allowsNRVO() const;
+
+ /// \brief Determine whether this initialization consumes the
+ /// parameter.
+ bool isParameterConsumed() const {
+ assert(getKind() == EK_Parameter && "Not a parameter");
+ return (Parameter & 1);
+ }
+
+ /// \brief Retrieve the base specifier.
+ CXXBaseSpecifier *getBaseSpecifier() const {
+ assert(getKind() == EK_Base && "Not a base specifier");
+ return reinterpret_cast<CXXBaseSpecifier *>(Base & ~0x1);
+ }
+
+ /// \brief Return whether the base is an inherited virtual base.
+ bool isInheritedVirtualBase() const {
+ assert(getKind() == EK_Base && "Not a base specifier");
+ return Base & 0x1;
+ }
+
+ /// \brief Determine the location of the 'return' keyword when initializing
+ /// the result of a function call.
+ SourceLocation getReturnLoc() const {
+ assert(getKind() == EK_Result && "No 'return' location!");
+ return SourceLocation::getFromRawEncoding(LocAndNRVO.Location);
+ }
+
+ /// \brief Determine the location of the 'throw' keyword when initializing
+ /// an exception object.
+ SourceLocation getThrowLoc() const {
+ assert(getKind() == EK_Exception && "No 'throw' location!");
+ return SourceLocation::getFromRawEncoding(LocAndNRVO.Location);
+ }
+
+ /// \brief If this is already the initializer for an array or vector
+ /// element, sets the element index.
+ void setElementIndex(unsigned Index) {
+ assert(getKind() == EK_ArrayElement || getKind() == EK_VectorElement ||
+ getKind() == EK_ComplexElement);
+ this->Index = Index;
+ }
+
+ /// \brief Retrieve the variable for a captured variable in a lambda.
+ VarDecl *getCapturedVar() const {
+ assert(getKind() == EK_LambdaCapture && "Not a lambda capture!");
+ return Capture.Var;
+ }
+
+ /// \brief Determine the location of the capture when initializing
+ /// field from a captured variable in a lambda.
+ SourceLocation getCaptureLoc() const {
+ assert(getKind() == EK_LambdaCapture && "Not a lambda capture!");
+ return SourceLocation::getFromRawEncoding(Capture.Location);
+ }
+};
+
+/// \brief Describes the kind of initialization being performed, along with
+/// location information for tokens related to the initialization (equal sign,
+/// parentheses).
+class InitializationKind {
+public:
+ /// \brief The kind of initialization being performed.
+ enum InitKind {
+ IK_Direct, ///< Direct initialization
+ IK_DirectList, ///< Direct list-initialization
+ IK_Copy, ///< Copy initialization
+ IK_Default, ///< Default initialization
+ IK_Value ///< Value initialization
+ };
+
+private:
+ /// \brief The context of the initialization.
+ enum InitContext {
+ IC_Normal, ///< Normal context
+ IC_ExplicitConvs, ///< Normal context, but allows explicit conversion funcs
+ IC_Implicit, ///< Implicit context (value initialization)
+ IC_StaticCast, ///< Static cast context
+ IC_CStyleCast, ///< C-style cast context
+ IC_FunctionalCast ///< Functional cast context
+ };
+
+ /// \brief The kind of initialization being performed.
+ InitKind Kind : 8;
+
+ /// \brief The context of the initialization.
+ InitContext Context : 8;
+
+ /// \brief The source locations involved in the initialization.
+ SourceLocation Locations[3];
+
+ InitializationKind(InitKind Kind, InitContext Context, SourceLocation Loc1,
+ SourceLocation Loc2, SourceLocation Loc3)
+ : Kind(Kind), Context(Context)
+ {
+ Locations[0] = Loc1;
+ Locations[1] = Loc2;
+ Locations[2] = Loc3;
+ }
+
+public:
+ /// \brief Create a direct initialization.
+ static InitializationKind CreateDirect(SourceLocation InitLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ return InitializationKind(IK_Direct, IC_Normal,
+ InitLoc, LParenLoc, RParenLoc);
+ }
+
+ static InitializationKind CreateDirectList(SourceLocation InitLoc) {
+ return InitializationKind(IK_DirectList, IC_Normal,
+ InitLoc, InitLoc, InitLoc);
+ }
+
+ /// \brief Create a direct initialization due to a cast that isn't a C-style
+ /// or functional cast.
+ static InitializationKind CreateCast(SourceRange TypeRange) {
+ return InitializationKind(IK_Direct, IC_StaticCast, TypeRange.getBegin(),
+ TypeRange.getBegin(), TypeRange.getEnd());
+ }
+
+ /// \brief Create a direct initialization for a C-style cast.
+ static InitializationKind CreateCStyleCast(SourceLocation StartLoc,
+ SourceRange TypeRange,
+ bool InitList) {
+ // C++ cast syntax doesn't permit init lists, but C compound literals are
+ // exactly that.
+ return InitializationKind(InitList ? IK_DirectList : IK_Direct,
+ IC_CStyleCast, StartLoc, TypeRange.getBegin(),
+ TypeRange.getEnd());
+ }
+
+ /// \brief Create a direct initialization for a functional cast.
+ static InitializationKind CreateFunctionalCast(SourceRange TypeRange,
+ bool InitList) {
+ return InitializationKind(InitList ? IK_DirectList : IK_Direct,
+ IC_FunctionalCast, TypeRange.getBegin(),
+ TypeRange.getBegin(), TypeRange.getEnd());
+ }
+
+ /// \brief Create a copy initialization.
+ static InitializationKind CreateCopy(SourceLocation InitLoc,
+ SourceLocation EqualLoc,
+ bool AllowExplicitConvs = false) {
+ return InitializationKind(IK_Copy,
+ AllowExplicitConvs? IC_ExplicitConvs : IC_Normal,
+ InitLoc, EqualLoc, EqualLoc);
+ }
+
+ /// \brief Create a default initialization.
+ static InitializationKind CreateDefault(SourceLocation InitLoc) {
+ return InitializationKind(IK_Default, IC_Normal, InitLoc, InitLoc, InitLoc);
+ }
+
+ /// \brief Create a value initialization.
+ static InitializationKind CreateValue(SourceLocation InitLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc,
+ bool isImplicit = false) {
+ return InitializationKind(IK_Value, isImplicit ? IC_Implicit : IC_Normal,
+ InitLoc, LParenLoc, RParenLoc);
+ }
+
+ /// \brief Determine the initialization kind.
+ InitKind getKind() const {
+ return Kind;
+ }
+
+ /// \brief Determine whether this initialization is an explicit cast.
+ bool isExplicitCast() const {
+ return Context >= IC_StaticCast;
+ }
+
+ /// \brief Determine whether this initialization is a C-style cast.
+ bool isCStyleOrFunctionalCast() const {
+ return Context >= IC_CStyleCast;
+ }
+
+ /// \brief Determine whether this is a C-style cast.
+ bool isCStyleCast() const {
+ return Context == IC_CStyleCast;
+ }
+
+ /// \brief Determine whether this is a functional-style cast.
+ bool isFunctionalCast() const {
+ return Context == IC_FunctionalCast;
+ }
+
+ /// \brief Determine whether this initialization is an implicit
+ /// value-initialization, e.g., as occurs during aggregate
+ /// initialization.
+ bool isImplicitValueInit() const { return Context == IC_Implicit; }
+
+ /// \brief Retrieve the location at which initialization is occurring.
+ SourceLocation getLocation() const { return Locations[0]; }
+
+ /// \brief Retrieve the source range that covers the initialization.
+ SourceRange getRange() const {
+ return SourceRange(Locations[0], Locations[2]);
+ }
+
+ /// \brief Retrieve the location of the equal sign for copy initialization
+ /// (if present).
+ SourceLocation getEqualLoc() const {
+ assert(Kind == IK_Copy && "Only copy initialization has an '='");
+ return Locations[1];
+ }
+
+ bool isCopyInit() const { return Kind == IK_Copy; }
+
+ /// \brief Retrieve whether this initialization allows the use of explicit
+ /// constructors.
+ bool AllowExplicit() const { return !isCopyInit(); }
+
+ /// \brief Retrieve whether this initialization allows the use of explicit
+ /// conversion functions.
+ bool allowExplicitConversionFunctions() const {
+ return !isCopyInit() || Context == IC_ExplicitConvs;
+ }
+
+ /// \brief Retrieve the source range containing the locations of the open
+ /// and closing parentheses for value and direct initializations.
+ SourceRange getParenRange() const {
+ assert((Kind == IK_Direct || Kind == IK_Value) &&
+ "Only direct- and value-initialization have parentheses");
+ return SourceRange(Locations[1], Locations[2]);
+ }
+};
+
+/// \brief Describes the sequence of initializations required to initialize
+/// a given object or reference with a set of arguments.
+class InitializationSequence {
+public:
+ /// \brief Describes the kind of initialization sequence computed.
+ enum SequenceKind {
+ /// \brief A failed initialization sequence. The failure kind tells what
+ /// happened.
+ FailedSequence = 0,
+
+ /// \brief A dependent initialization, which could not be
+ /// type-checked due to the presence of dependent types or
+ /// dependently-typed expressions.
+ DependentSequence,
+
+ /// \brief A normal sequence.
+ NormalSequence
+ };
+
+ /// \brief Describes the kind of a particular step in an initialization
+ /// sequence.
+ enum StepKind {
+ /// \brief Resolve the address of an overloaded function to a specific
+ /// function declaration.
+ SK_ResolveAddressOfOverloadedFunction,
+ /// \brief Perform a derived-to-base cast, producing an rvalue.
+ SK_CastDerivedToBaseRValue,
+ /// \brief Perform a derived-to-base cast, producing an xvalue.
+ SK_CastDerivedToBaseXValue,
+ /// \brief Perform a derived-to-base cast, producing an lvalue.
+ SK_CastDerivedToBaseLValue,
+ /// \brief Reference binding to an lvalue.
+ SK_BindReference,
+ /// \brief Reference binding to a temporary.
+ SK_BindReferenceToTemporary,
+ /// \brief An optional copy of a temporary object to another
+ /// temporary object, which is permitted (but not required) by
+ /// C++98/03 but not C++0x.
+ SK_ExtraneousCopyToTemporary,
+ /// \brief Perform a user-defined conversion, either via a conversion
+ /// function or via a constructor.
+ SK_UserConversion,
+ /// \brief Perform a qualification conversion, producing an rvalue.
+ SK_QualificationConversionRValue,
+ /// \brief Perform a qualification conversion, producing an xvalue.
+ SK_QualificationConversionXValue,
+ /// \brief Perform a qualification conversion, producing an lvalue.
+ SK_QualificationConversionLValue,
+ /// \brief Perform an implicit conversion sequence.
+ SK_ConversionSequence,
+ /// \brief Perform list-initialization without a constructor
+ SK_ListInitialization,
+ /// \brief Perform list-initialization with a constructor.
+ SK_ListConstructorCall,
+ /// \brief Unwrap the single-element initializer list for a reference.
+ SK_UnwrapInitList,
+ /// \brief Rewrap the single-element initializer list for a reference.
+ SK_RewrapInitList,
+ /// \brief Perform initialization via a constructor.
+ SK_ConstructorInitialization,
+ /// \brief Zero-initialize the object
+ SK_ZeroInitialization,
+ /// \brief C assignment
+ SK_CAssignment,
+ /// \brief Initialization by string
+ SK_StringInit,
+ /// \brief An initialization that "converts" an Objective-C object
+ /// (not a point to an object) to another Objective-C object type.
+ SK_ObjCObjectConversion,
+ /// \brief Array initialization (from an array rvalue).
+ /// This is a GNU C extension.
+ SK_ArrayInit,
+ /// \brief Array initialization from a parenthesized initializer list.
+ /// This is a GNU C++ extension.
+ SK_ParenthesizedArrayInit,
+ /// \brief Pass an object by indirect copy-and-restore.
+ SK_PassByIndirectCopyRestore,
+ /// \brief Pass an object by indirect restore.
+ SK_PassByIndirectRestore,
+ /// \brief Produce an Objective-C object pointer.
+ SK_ProduceObjCObject,
+ /// \brief Construct a std::initializer_list from an initializer list.
+ SK_StdInitializerList
+ };
+
+ /// \brief A single step in the initialization sequence.
+ class Step {
+ public:
+ /// \brief The kind of conversion or initialization step we are taking.
+ StepKind Kind;
+
+ // \brief The type that results from this initialization.
+ QualType Type;
+
+ union {
+ /// \brief When Kind == SK_ResolvedOverloadedFunction or Kind ==
+ /// SK_UserConversion, the function that the expression should be
+ /// resolved to or the conversion function to call, respectively.
+ /// When Kind == SK_ConstructorInitialization or SK_ListConstruction,
+ /// the constructor to be called.
+ ///
+ /// Always a FunctionDecl, plus a Boolean flag telling if it was
+ /// selected from an overloaded set having size greater than 1.
+ /// For conversion decls, the naming class is the source type.
+ /// For construct decls, the naming class is the target type.
+ struct {
+ bool HadMultipleCandidates;
+ FunctionDecl *Function;
+ DeclAccessPair FoundDecl;
+ } Function;
+
+ /// \brief When Kind = SK_ConversionSequence, the implicit conversion
+ /// sequence.
+ ImplicitConversionSequence *ICS;
+
+ /// \brief When Kind = SK_RewrapInitList, the syntactic form of the
+ /// wrapping list.
+ InitListExpr *WrappingSyntacticList;
+ };
+
+ void Destroy();
+ };
+
+private:
+ /// \brief The kind of initialization sequence computed.
+ enum SequenceKind SequenceKind;
+
+ /// \brief Steps taken by this initialization.
+ SmallVector<Step, 4> Steps;
+
+public:
+ /// \brief Describes why initialization failed.
+ enum FailureKind {
+ /// \brief Too many initializers provided for a reference.
+ FK_TooManyInitsForReference,
+ /// \brief Array must be initialized with an initializer list.
+ FK_ArrayNeedsInitList,
+ /// \brief Array must be initialized with an initializer list or a
+ /// string literal.
+ FK_ArrayNeedsInitListOrStringLiteral,
+ /// \brief Array type mismatch.
+ FK_ArrayTypeMismatch,
+ /// \brief Non-constant array initializer
+ FK_NonConstantArrayInit,
+ /// \brief Cannot resolve the address of an overloaded function.
+ FK_AddressOfOverloadFailed,
+ /// \brief Overloading due to reference initialization failed.
+ FK_ReferenceInitOverloadFailed,
+ /// \brief Non-const lvalue reference binding to a temporary.
+ FK_NonConstLValueReferenceBindingToTemporary,
+ /// \brief Non-const lvalue reference binding to an lvalue of unrelated
+ /// type.
+ FK_NonConstLValueReferenceBindingToUnrelated,
+ /// \brief Rvalue reference binding to an lvalue.
+ FK_RValueReferenceBindingToLValue,
+ /// \brief Reference binding drops qualifiers.
+ FK_ReferenceInitDropsQualifiers,
+ /// \brief Reference binding failed.
+ FK_ReferenceInitFailed,
+ /// \brief Implicit conversion failed.
+ FK_ConversionFailed,
+ /// \brief Implicit conversion failed.
+ FK_ConversionFromPropertyFailed,
+ /// \brief Too many initializers for scalar
+ FK_TooManyInitsForScalar,
+ /// \brief Reference initialization from an initializer list
+ FK_ReferenceBindingToInitList,
+ /// \brief Initialization of some unused destination type with an
+ /// initializer list.
+ FK_InitListBadDestinationType,
+ /// \brief Overloading for a user-defined conversion failed.
+ FK_UserConversionOverloadFailed,
+ /// \brief Overloading for initialization by constructor failed.
+ FK_ConstructorOverloadFailed,
+ /// \brief Overloading for list-initialization by constructor failed.
+ FK_ListConstructorOverloadFailed,
+ /// \brief Default-initialization of a 'const' object.
+ FK_DefaultInitOfConst,
+ /// \brief Initialization of an incomplete type.
+ FK_Incomplete,
+ /// \brief Variable-length array must not have an initializer.
+ FK_VariableLengthArrayHasInitializer,
+ /// \brief List initialization failed at some point.
+ FK_ListInitializationFailed,
+ /// \brief Initializer has a placeholder type which cannot be
+ /// resolved by initialization.
+ FK_PlaceholderType,
+ /// \brief Failed to initialize a std::initializer_list because copy
+ /// construction of some element failed.
+ FK_InitListElementCopyFailure,
+ /// \brief List-copy-initialization chose an explicit constructor.
+ FK_ExplicitConstructor
+ };
+
+private:
+ /// \brief The reason why initialization failed.
+ FailureKind Failure;
+
+ /// \brief The failed result of overload resolution.
+ OverloadingResult FailedOverloadResult;
+
+ /// \brief The candidate set created when initialization failed.
+ OverloadCandidateSet FailedCandidateSet;
+
+ /// \brief The incomplete type that caused a failure.
+ QualType FailedIncompleteType;
+
+ /// \brief Prints a follow-up note that highlights the location of
+ /// the initialized entity, if it's remote.
+ void PrintInitLocationNote(Sema &S, const InitializedEntity &Entity);
+
+public:
+ /// \brief Try to perform initialization of the given entity, creating a
+ /// record of the steps required to perform the initialization.
+ ///
+ /// The generated initialization sequence will either contain enough
+ /// information to diagnose
+ ///
+ /// \param S the semantic analysis object.
+ ///
+ /// \param Entity the entity being initialized.
+ ///
+ /// \param Kind the kind of initialization being performed.
+ ///
+ /// \param Args the argument(s) provided for initialization.
+ ///
+ /// \param NumArgs the number of arguments provided for initialization.
+ InitializationSequence(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr **Args,
+ unsigned NumArgs);
+
+ ~InitializationSequence();
+
+ /// \brief Perform the actual initialization of the given entity based on
+ /// the computed initialization sequence.
+ ///
+ /// \param S the semantic analysis object.
+ ///
+ /// \param Entity the entity being initialized.
+ ///
+ /// \param Kind the kind of initialization being performed.
+ ///
+ /// \param Args the argument(s) provided for initialization, ownership of
+ /// which is transferred into the routine.
+ ///
+ /// \param ResultType if non-NULL, will be set to the type of the
+ /// initialized object, which is the type of the declaration in most
+ /// cases. However, when the initialized object is a variable of
+ /// incomplete array type and the initializer is an initializer
+ /// list, this type will be set to the completed array type.
+ ///
+ /// \returns an expression that performs the actual object initialization, if
+ /// the initialization is well-formed. Otherwise, emits diagnostics
+ /// and returns an invalid expression.
+ ExprResult Perform(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ QualType *ResultType = 0);
+
+ /// \brief Diagnose an potentially-invalid initialization sequence.
+ ///
+ /// \returns true if the initialization sequence was ill-formed,
+ /// false otherwise.
+ bool Diagnose(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr **Args, unsigned NumArgs);
+
+ /// \brief Determine the kind of initialization sequence computed.
+ enum SequenceKind getKind() const { return SequenceKind; }
+
+ /// \brief Set the kind of sequence computed.
+ void setSequenceKind(enum SequenceKind SK) { SequenceKind = SK; }
+
+ /// \brief Determine whether the initialization sequence is valid.
+ operator bool() const { return !Failed(); }
+
+ /// \brief Determine whether the initialization sequence is invalid.
+ bool Failed() const { return SequenceKind == FailedSequence; }
+
+ typedef SmallVector<Step, 4>::const_iterator step_iterator;
+ step_iterator step_begin() const { return Steps.begin(); }
+ step_iterator step_end() const { return Steps.end(); }
+
+ /// \brief Determine whether this initialization is a direct reference
+ /// binding (C++ [dcl.init.ref]).
+ bool isDirectReferenceBinding() const;
+
+ /// \brief Determine whether this initialization failed due to an ambiguity.
+ bool isAmbiguous() const;
+
+ /// \brief Determine whether this initialization is direct call to a
+ /// constructor.
+ bool isConstructorInitialization() const;
+
+ /// \brief Returns whether the last step in this initialization sequence is a
+ /// narrowing conversion, defined by C++0x [dcl.init.list]p7.
+ ///
+ /// If this function returns true, *isInitializerConstant will be set to
+ /// describe whether *Initializer was a constant expression. If
+ /// *isInitializerConstant is set to true, *ConstantValue will be set to the
+ /// evaluated value of *Initializer.
+ bool endsWithNarrowing(ASTContext &Ctx, const Expr *Initializer,
+ bool *isInitializerConstant,
+ APValue *ConstantValue) const;
+
+ /// \brief Add a new step in the initialization that resolves the address
+ /// of an overloaded function to a specific function declaration.
+ ///
+ /// \param Function the function to which the overloaded function reference
+ /// resolves.
+ void AddAddressOverloadResolutionStep(FunctionDecl *Function,
+ DeclAccessPair Found,
+ bool HadMultipleCandidates);
+
+ /// \brief Add a new step in the initialization that performs a derived-to-
+ /// base cast.
+ ///
+ /// \param BaseType the base type to which we will be casting.
+ ///
+ /// \param IsLValue true if the result of this cast will be treated as
+ /// an lvalue.
+ void AddDerivedToBaseCastStep(QualType BaseType,
+ ExprValueKind Category);
+
+ /// \brief Add a new step binding a reference to an object.
+ ///
+ /// \param BindingTemporary True if we are binding a reference to a temporary
+ /// object (thereby extending its lifetime); false if we are binding to an
+ /// lvalue or an lvalue treated as an rvalue.
+ ///
+ /// \param UnnecessaryCopy True if we should check for a copy
+ /// constructor for a completely unnecessary but
+ void AddReferenceBindingStep(QualType T, bool BindingTemporary);
+
+ /// \brief Add a new step that makes an extraneous copy of the input
+ /// to a temporary of the same class type.
+ ///
+ /// This extraneous copy only occurs during reference binding in
+ /// C++98/03, where we are permitted (but not required) to introduce
+ /// an extra copy. At a bare minimum, we must check that we could
+ /// call the copy constructor, and produce a diagnostic if the copy
+ /// constructor is inaccessible or no copy constructor matches.
+ //
+ /// \param T The type of the temporary being created.
+ void AddExtraneousCopyToTemporary(QualType T);
+
+ /// \brief Add a new step invoking a conversion function, which is either
+ /// a constructor or a conversion function.
+ void AddUserConversionStep(FunctionDecl *Function,
+ DeclAccessPair FoundDecl,
+ QualType T,
+ bool HadMultipleCandidates);
+
+ /// \brief Add a new step that performs a qualification conversion to the
+ /// given type.
+ void AddQualificationConversionStep(QualType Ty,
+ ExprValueKind Category);
+
+ /// \brief Add a new step that applies an implicit conversion sequence.
+ void AddConversionSequenceStep(const ImplicitConversionSequence &ICS,
+ QualType T);
+
+ /// \brief Add a list-initialization step.
+ void AddListInitializationStep(QualType T);
+
+ /// \brief Add a constructor-initialization step.
+ ///
+ /// \arg FromInitList The constructor call is syntactically an initializer
+ /// list.
+ /// \arg AsInitList The constructor is called as an init list constructor.
+ void AddConstructorInitializationStep(CXXConstructorDecl *Constructor,
+ AccessSpecifier Access,
+ QualType T,
+ bool HadMultipleCandidates,
+ bool FromInitList, bool AsInitList);
+
+ /// \brief Add a zero-initialization step.
+ void AddZeroInitializationStep(QualType T);
+
+ /// \brief Add a C assignment step.
+ //
+ // FIXME: It isn't clear whether this should ever be needed;
+ // ideally, we would handle everything needed in C in the common
+ // path. However, that isn't the case yet.
+ void AddCAssignmentStep(QualType T);
+
+ /// \brief Add a string init step.
+ void AddStringInitStep(QualType T);
+
+ /// \brief Add an Objective-C object conversion step, which is
+ /// always a no-op.
+ void AddObjCObjectConversionStep(QualType T);
+
+ /// \brief Add an array initialization step.
+ void AddArrayInitStep(QualType T);
+
+ /// \brief Add a parenthesized array initialization step.
+ void AddParenthesizedArrayInitStep(QualType T);
+
+ /// \brief Add a step to pass an object by indirect copy-restore.
+ void AddPassByIndirectCopyRestoreStep(QualType T, bool shouldCopy);
+
+ /// \brief Add a step to "produce" an Objective-C object (by
+ /// retaining it).
+ void AddProduceObjCObjectStep(QualType T);
+
+ /// \brief Add a step to construct a std::initializer_list object from an
+ /// initializer list.
+ void AddStdInitializerListConstructionStep(QualType T);
+
+ /// \brief Add steps to unwrap a initializer list for a reference around a
+ /// single element and rewrap it at the end.
+ void RewrapReferenceInitList(QualType T, InitListExpr *Syntactic);
+
+ /// \brief Note that this initialization sequence failed.
+ void SetFailed(FailureKind Failure) {
+ SequenceKind = FailedSequence;
+ this->Failure = Failure;
+ assert((Failure != FK_Incomplete || !FailedIncompleteType.isNull()) &&
+ "Incomplete type failure requires a type!");
+ }
+
+ /// \brief Note that this initialization sequence failed due to failed
+ /// overload resolution.
+ void SetOverloadFailure(FailureKind Failure, OverloadingResult Result);
+
+ /// \brief Retrieve a reference to the candidate set when overload
+ /// resolution fails.
+ OverloadCandidateSet &getFailedCandidateSet() {
+ return FailedCandidateSet;
+ }
+
+ /// \brief Get the overloading result, for when the initialization
+ /// sequence failed due to a bad overload.
+ OverloadingResult getFailedOverloadResult() const {
+ return FailedOverloadResult;
+ }
+
+ /// \brief Note that this initialization sequence failed due to an
+ /// incomplete type.
+ void setIncompleteTypeFailure(QualType IncompleteType) {
+ FailedIncompleteType = IncompleteType;
+ SetFailed(FK_Incomplete);
+ }
+
+ /// \brief Determine why initialization failed.
+ FailureKind getFailureKind() const {
+ assert(Failed() && "Not an initialization failure!");
+ return Failure;
+ }
+
+ /// \brief Dump a representation of this initialization sequence to
+ /// the given stream, for debugging purposes.
+ void dump(raw_ostream &OS) const;
+
+ /// \brief Dump a representation of this initialization sequence to
+ /// standard error, for debugging purposes.
+ void dump() const;
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_SEMA_INITIALIZATION_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h b/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h
new file mode 100644
index 0000000..93cb8cb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/LocInfoType.h
@@ -0,0 +1,63 @@
+//===--- LocInfoType.h - Parsed Type with Location Information---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LocInfoType class, which holds a type and its
+// source-location information.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SEMA_LOCINFOTYPE_H
+#define LLVM_CLANG_SEMA_LOCINFOTYPE_H
+
+#include "clang/AST/Type.h"
+
+namespace clang {
+
+class TypeSourceInfo;
+
+/// \brief Holds a QualType and a TypeSourceInfo* that came out of a declarator
+/// parsing.
+///
+/// LocInfoType is a "transient" type, only needed for passing to/from Parser
+/// and Sema, when we want to preserve type source info for a parsed type.
+/// It will not participate in the type system semantics in any way.
+class LocInfoType : public Type {
+ enum {
+ // The last number that can fit in Type's TC.
+ // Avoids conflict with an existing Type class.
+ LocInfo = Type::TypeLast + 1
+ };
+
+ TypeSourceInfo *DeclInfo;
+
+ LocInfoType(QualType ty, TypeSourceInfo *TInfo)
+ : Type((TypeClass)LocInfo, ty, ty->isDependentType(),
+ ty->isInstantiationDependentType(),
+ ty->isVariablyModifiedType(),
+ ty->containsUnexpandedParameterPack()),
+ DeclInfo(TInfo) {
+ assert(getTypeClass() == (TypeClass)LocInfo && "LocInfo didn't fit in TC?");
+ }
+ friend class Sema;
+
+public:
+ QualType getType() const { return getCanonicalTypeInternal(); }
+ TypeSourceInfo *getTypeSourceInfo() const { return DeclInfo; }
+
+ void getAsStringInternal(std::string &Str,
+ const PrintingPolicy &Policy) const;
+
+ static bool classof(const Type *T) {
+ return T->getTypeClass() == (TypeClass)LocInfo;
+ }
+ static bool classof(const LocInfoType *) { return true; }
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_SEMA_LOCINFOTYPE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h b/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h
new file mode 100644
index 0000000..fe5d262
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Lookup.h
@@ -0,0 +1,715 @@
+//===--- Lookup.h - Classes for name lookup ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LookupResult class, which is integral to
+// Sema's name-lookup subsystem.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_LOOKUP_H
+#define LLVM_CLANG_SEMA_LOOKUP_H
+
+#include "clang/Sema/Sema.h"
+#include "clang/AST/DeclCXX.h"
+
+namespace clang {
+
+/// @brief Represents the results of name lookup.
+///
+/// An instance of the LookupResult class captures the results of a
+/// single name lookup, which can return no result (nothing found),
+/// a single declaration, a set of overloaded functions, or an
+/// ambiguity. Use the getKind() method to determine which of these
+/// results occurred for a given lookup.
+class LookupResult {
+public:
+ enum LookupResultKind {
+ /// @brief No entity found met the criteria.
+ NotFound = 0,
+
+ /// @brief No entity found met the criteria within the current
+ /// instantiation,, but there were dependent base classes of the
+ /// current instantiation that could not be searched.
+ NotFoundInCurrentInstantiation,
+
+ /// @brief Name lookup found a single declaration that met the
+ /// criteria. getFoundDecl() will return this declaration.
+ Found,
+
+ /// @brief Name lookup found a set of overloaded functions that
+ /// met the criteria.
+ FoundOverloaded,
+
+ /// @brief Name lookup found an unresolvable value declaration
+ /// and cannot yet complete. This only happens in C++ dependent
+ /// contexts with dependent using declarations.
+ FoundUnresolvedValue,
+
+ /// @brief Name lookup results in an ambiguity; use
+ /// getAmbiguityKind to figure out what kind of ambiguity
+ /// we have.
+ Ambiguous
+ };
+
+ enum AmbiguityKind {
+ /// Name lookup results in an ambiguity because multiple
+ /// entities that meet the lookup criteria were found in
+ /// subobjects of different types. For example:
+ /// @code
+ /// struct A { void f(int); }
+ /// struct B { void f(double); }
+ /// struct C : A, B { };
+ /// void test(C c) {
+ /// c.f(0); // error: A::f and B::f come from subobjects of different
+ /// // types. overload resolution is not performed.
+ /// }
+ /// @endcode
+ AmbiguousBaseSubobjectTypes,
+
+ /// Name lookup results in an ambiguity because multiple
+ /// nonstatic entities that meet the lookup criteria were found
+ /// in different subobjects of the same type. For example:
+ /// @code
+ /// struct A { int x; };
+ /// struct B : A { };
+ /// struct C : A { };
+ /// struct D : B, C { };
+ /// int test(D d) {
+ /// return d.x; // error: 'x' is found in two A subobjects (of B and C)
+ /// }
+ /// @endcode
+ AmbiguousBaseSubobjects,
+
+ /// Name lookup results in an ambiguity because multiple definitions
+ /// of entity that meet the lookup criteria were found in different
+ /// declaration contexts.
+ /// @code
+ /// namespace A {
+ /// int i;
+ /// namespace B { int i; }
+ /// int test() {
+ /// using namespace B;
+ /// return i; // error 'i' is found in namespace A and A::B
+ /// }
+ /// }
+ /// @endcode
+ AmbiguousReference,
+
+ /// Name lookup results in an ambiguity because an entity with a
+ /// tag name was hidden by an entity with an ordinary name from
+ /// a different context.
+ /// @code
+ /// namespace A { struct Foo {}; }
+ /// namespace B { void Foo(); }
+ /// namespace C {
+ /// using namespace A;
+ /// using namespace B;
+ /// }
+ /// void test() {
+ /// C::Foo(); // error: tag 'A::Foo' is hidden by an object in a
+ /// // different namespace
+ /// }
+ /// @endcode
+ AmbiguousTagHiding
+ };
+
+ /// A little identifier for flagging temporary lookup results.
+ enum TemporaryToken {
+ Temporary
+ };
+
+ typedef UnresolvedSetImpl::iterator iterator;
+
+ LookupResult(Sema &SemaRef, const DeclarationNameInfo &NameInfo,
+ Sema::LookupNameKind LookupKind,
+ Sema::RedeclarationKind Redecl = Sema::NotForRedeclaration)
+ : ResultKind(NotFound),
+ Paths(0),
+ NamingClass(0),
+ SemaRef(SemaRef),
+ NameInfo(NameInfo),
+ LookupKind(LookupKind),
+ IDNS(0),
+ Redecl(Redecl != Sema::NotForRedeclaration),
+ HideTags(true),
+ Diagnose(Redecl == Sema::NotForRedeclaration)
+ {
+ configure();
+ }
+
+ // TODO: consider whether this constructor should be restricted to take
+ // as input a const IndentifierInfo* (instead of Name),
+ // forcing other cases towards the constructor taking a DNInfo.
+ LookupResult(Sema &SemaRef, DeclarationName Name,
+ SourceLocation NameLoc, Sema::LookupNameKind LookupKind,
+ Sema::RedeclarationKind Redecl = Sema::NotForRedeclaration)
+ : ResultKind(NotFound),
+ Paths(0),
+ NamingClass(0),
+ SemaRef(SemaRef),
+ NameInfo(Name, NameLoc),
+ LookupKind(LookupKind),
+ IDNS(0),
+ Redecl(Redecl != Sema::NotForRedeclaration),
+ HideTags(true),
+ Diagnose(Redecl == Sema::NotForRedeclaration)
+ {
+ configure();
+ }
+
+ /// Creates a temporary lookup result, initializing its core data
+ /// using the information from another result. Diagnostics are always
+ /// disabled.
+ LookupResult(TemporaryToken _, const LookupResult &Other)
+ : ResultKind(NotFound),
+ Paths(0),
+ NamingClass(0),
+ SemaRef(Other.SemaRef),
+ NameInfo(Other.NameInfo),
+ LookupKind(Other.LookupKind),
+ IDNS(Other.IDNS),
+ Redecl(Other.Redecl),
+ HideTags(Other.HideTags),
+ Diagnose(false)
+ {}
+
+ ~LookupResult() {
+ if (Diagnose) diagnose();
+ if (Paths) deletePaths(Paths);
+ }
+
+ /// Gets the name info to look up.
+ const DeclarationNameInfo &getLookupNameInfo() const {
+ return NameInfo;
+ }
+
+ /// \brief Sets the name info to look up.
+ void setLookupNameInfo(const DeclarationNameInfo &NameInfo) {
+ this->NameInfo = NameInfo;
+ }
+
+ /// Gets the name to look up.
+ DeclarationName getLookupName() const {
+ return NameInfo.getName();
+ }
+
+ /// \brief Sets the name to look up.
+ void setLookupName(DeclarationName Name) {
+ NameInfo.setName(Name);
+ }
+
+ /// Gets the kind of lookup to perform.
+ Sema::LookupNameKind getLookupKind() const {
+ return LookupKind;
+ }
+
+ /// True if this lookup is just looking for an existing declaration.
+ bool isForRedeclaration() const {
+ return Redecl;
+ }
+
+ /// \brief Determine whether this lookup is permitted to see hidden
+ /// declarations, such as those in modules that have not yet been imported.
+ bool isHiddenDeclarationVisible() const {
+ return Redecl || LookupKind == Sema::LookupTagName;
+ }
+
+ /// Sets whether tag declarations should be hidden by non-tag
+ /// declarations during resolution. The default is true.
+ void setHideTags(bool Hide) {
+ HideTags = Hide;
+ }
+
+ bool isAmbiguous() const {
+ return getResultKind() == Ambiguous;
+ }
+
+ /// Determines if this names a single result which is not an
+ /// unresolved value using decl. If so, it is safe to call
+ /// getFoundDecl().
+ bool isSingleResult() const {
+ return getResultKind() == Found;
+ }
+
+ /// Determines if the results are overloaded.
+ bool isOverloadedResult() const {
+ return getResultKind() == FoundOverloaded;
+ }
+
+ bool isUnresolvableResult() const {
+ return getResultKind() == FoundUnresolvedValue;
+ }
+
+ LookupResultKind getResultKind() const {
+ sanity();
+ return ResultKind;
+ }
+
+ AmbiguityKind getAmbiguityKind() const {
+ assert(isAmbiguous());
+ return Ambiguity;
+ }
+
+ const UnresolvedSetImpl &asUnresolvedSet() const {
+ return Decls;
+ }
+
+ iterator begin() const { return iterator(Decls.begin()); }
+ iterator end() const { return iterator(Decls.end()); }
+
+ /// \brief Return true if no decls were found
+ bool empty() const { return Decls.empty(); }
+
+ /// \brief Return the base paths structure that's associated with
+ /// these results, or null if none is.
+ CXXBasePaths *getBasePaths() const {
+ return Paths;
+ }
+
+ /// \brief Determine whether the given declaration is visible to the
+ /// program.
+ static bool isVisible(NamedDecl *D) {
+ // If this declaration is not hidden, it's visible.
+ if (!D->isHidden())
+ return true;
+
+ // FIXME: We should be allowed to refer to a module-private name from
+ // within the same module, e.g., during template instantiation.
+ // This requires us know which module a particular declaration came from.
+ return false;
+ }
+
+ /// \brief Retrieve the accepted (re)declaration of the given declaration,
+ /// if there is one.
+ NamedDecl *getAcceptableDecl(NamedDecl *D) const {
+ if (!D->isInIdentifierNamespace(IDNS))
+ return 0;
+
+ if (isHiddenDeclarationVisible() || isVisible(D))
+ return D;
+
+ return getAcceptableDeclSlow(D);
+ }
+
+private:
+ NamedDecl *getAcceptableDeclSlow(NamedDecl *D) const;
+public:
+
+ /// \brief Returns the identifier namespace mask for this lookup.
+ unsigned getIdentifierNamespace() const {
+ return IDNS;
+ }
+
+ /// \brief Returns whether these results arose from performing a
+ /// lookup into a class.
+ bool isClassLookup() const {
+ return NamingClass != 0;
+ }
+
+ /// \brief Returns the 'naming class' for this lookup, i.e. the
+ /// class which was looked into to find these results.
+ ///
+ /// C++0x [class.access.base]p5:
+ /// The access to a member is affected by the class in which the
+ /// member is named. This naming class is the class in which the
+ /// member name was looked up and found. [Note: this class can be
+ /// explicit, e.g., when a qualified-id is used, or implicit,
+ /// e.g., when a class member access operator (5.2.5) is used
+ /// (including cases where an implicit "this->" is added). If both
+ /// a class member access operator and a qualified-id are used to
+ /// name the member (as in p->T::m), the class naming the member
+ /// is the class named by the nested-name-specifier of the
+ /// qualified-id (that is, T). -- end note ]
+ ///
+ /// This is set by the lookup routines when they find results in a class.
+ CXXRecordDecl *getNamingClass() const {
+ return NamingClass;
+ }
+
+ /// \brief Sets the 'naming class' for this lookup.
+ void setNamingClass(CXXRecordDecl *Record) {
+ NamingClass = Record;
+ }
+
+ /// \brief Returns the base object type associated with this lookup;
+ /// important for [class.protected]. Most lookups do not have an
+ /// associated base object.
+ QualType getBaseObjectType() const {
+ return BaseObjectType;
+ }
+
+ /// \brief Sets the base object type for this lookup.
+ void setBaseObjectType(QualType T) {
+ BaseObjectType = T;
+ }
+
+ /// \brief Add a declaration to these results with its natural access.
+ /// Does not test the acceptance criteria.
+ void addDecl(NamedDecl *D) {
+ addDecl(D, D->getAccess());
+ }
+
+ /// \brief Add a declaration to these results with the given access.
+ /// Does not test the acceptance criteria.
+ void addDecl(NamedDecl *D, AccessSpecifier AS) {
+ Decls.addDecl(D, AS);
+ ResultKind = Found;
+ }
+
+ /// \brief Add all the declarations from another set of lookup
+ /// results.
+ void addAllDecls(const LookupResult &Other) {
+ Decls.append(Other.Decls.begin(), Other.Decls.end());
+ ResultKind = Found;
+ }
+
+ /// \brief Determine whether no result was found because we could not
+ /// search into dependent base classes of the current instantiation.
+ bool wasNotFoundInCurrentInstantiation() const {
+ return ResultKind == NotFoundInCurrentInstantiation;
+ }
+
+ /// \brief Note that while no result was found in the current instantiation,
+ /// there were dependent base classes that could not be searched.
+ void setNotFoundInCurrentInstantiation() {
+ assert(ResultKind == NotFound && Decls.empty());
+ ResultKind = NotFoundInCurrentInstantiation;
+ }
+
+ /// \brief Resolves the result kind of the lookup, possibly hiding
+ /// decls.
+ ///
+ /// This should be called in any environment where lookup might
+ /// generate multiple lookup results.
+ void resolveKind();
+
+ /// \brief Re-resolves the result kind of the lookup after a set of
+ /// removals has been performed.
+ void resolveKindAfterFilter() {
+ if (Decls.empty()) {
+ if (ResultKind != NotFoundInCurrentInstantiation)
+ ResultKind = NotFound;
+
+ if (Paths) {
+ deletePaths(Paths);
+ Paths = 0;
+ }
+ } else {
+ AmbiguityKind SavedAK = Ambiguity;
+ ResultKind = Found;
+ resolveKind();
+
+ // If we didn't make the lookup unambiguous, restore the old
+ // ambiguity kind.
+ if (ResultKind == Ambiguous) {
+ Ambiguity = SavedAK;
+ } else if (Paths) {
+ deletePaths(Paths);
+ Paths = 0;
+ }
+ }
+ }
+
+ template <class DeclClass>
+ DeclClass *getAsSingle() const {
+ if (getResultKind() != Found) return 0;
+ return dyn_cast<DeclClass>(getFoundDecl());
+ }
+
+ /// \brief Fetch the unique decl found by this lookup. Asserts
+ /// that one was found.
+ ///
+ /// This is intended for users who have examined the result kind
+ /// and are certain that there is only one result.
+ NamedDecl *getFoundDecl() const {
+ assert(getResultKind() == Found
+ && "getFoundDecl called on non-unique result");
+ return (*begin())->getUnderlyingDecl();
+ }
+
+ /// Fetches a representative decl. Useful for lazy diagnostics.
+ NamedDecl *getRepresentativeDecl() const {
+ assert(!Decls.empty() && "cannot get representative of empty set");
+ return *begin();
+ }
+
+ /// \brief Asks if the result is a single tag decl.
+ bool isSingleTagDecl() const {
+ return getResultKind() == Found && isa<TagDecl>(getFoundDecl());
+ }
+
+ /// \brief Make these results show that the name was found in
+ /// base classes of different types.
+ ///
+ /// The given paths object is copied and invalidated.
+ void setAmbiguousBaseSubobjectTypes(CXXBasePaths &P);
+
+ /// \brief Make these results show that the name was found in
+ /// distinct base classes of the same type.
+ ///
+ /// The given paths object is copied and invalidated.
+ void setAmbiguousBaseSubobjects(CXXBasePaths &P);
+
+ /// \brief Make these results show that the name was found in
+ /// different contexts and a tag decl was hidden by an ordinary
+ /// decl in a different context.
+ void setAmbiguousQualifiedTagHiding() {
+ setAmbiguous(AmbiguousTagHiding);
+ }
+
+ /// \brief Clears out any current state.
+ void clear() {
+ ResultKind = NotFound;
+ Decls.clear();
+ if (Paths) deletePaths(Paths);
+ Paths = NULL;
+ NamingClass = 0;
+ }
+
+ /// \brief Clears out any current state and re-initializes for a
+ /// different kind of lookup.
+ void clear(Sema::LookupNameKind Kind) {
+ clear();
+ LookupKind = Kind;
+ configure();
+ }
+
+ /// \brief Change this lookup's redeclaration kind.
+ void setRedeclarationKind(Sema::RedeclarationKind RK) {
+ Redecl = RK;
+ configure();
+ }
+
+ void print(raw_ostream &);
+
+ /// Suppress the diagnostics that would normally fire because of this
+ /// lookup. This happens during (e.g.) redeclaration lookups.
+ void suppressDiagnostics() {
+ Diagnose = false;
+ }
+
+ /// Determines whether this lookup is suppressing diagnostics.
+ bool isSuppressingDiagnostics() const {
+ return !Diagnose;
+ }
+
+ /// Sets a 'context' source range.
+ void setContextRange(SourceRange SR) {
+ NameContextRange = SR;
+ }
+
+ /// Gets the source range of the context of this name; for C++
+ /// qualified lookups, this is the source range of the scope
+ /// specifier.
+ SourceRange getContextRange() const {
+ return NameContextRange;
+ }
+
+ /// Gets the location of the identifier. This isn't always defined:
+ /// sometimes we're doing lookups on synthesized names.
+ SourceLocation getNameLoc() const {
+ return NameInfo.getLoc();
+ }
+
+ /// \brief Get the Sema object that this lookup result is searching
+ /// with.
+ Sema &getSema() const { return SemaRef; }
+
+ /// A class for iterating through a result set and possibly
+ /// filtering out results. The results returned are possibly
+ /// sugared.
+ class Filter {
+ LookupResult &Results;
+ LookupResult::iterator I;
+ bool Changed;
+ bool CalledDone;
+
+ friend class LookupResult;
+ Filter(LookupResult &Results)
+ : Results(Results), I(Results.begin()), Changed(false), CalledDone(false)
+ {}
+
+ public:
+ ~Filter() {
+ assert(CalledDone &&
+ "LookupResult::Filter destroyed without done() call");
+ }
+
+ bool hasNext() const {
+ return I != Results.end();
+ }
+
+ NamedDecl *next() {
+ assert(I != Results.end() && "next() called on empty filter");
+ return *I++;
+ }
+
+ /// Restart the iteration.
+ void restart() {
+ I = Results.begin();
+ }
+
+ /// Erase the last element returned from this iterator.
+ void erase() {
+ Results.Decls.erase(--I);
+ Changed = true;
+ }
+
+ /// Replaces the current entry with the given one, preserving the
+ /// access bits.
+ void replace(NamedDecl *D) {
+ Results.Decls.replace(I-1, D);
+ Changed = true;
+ }
+
+ /// Replaces the current entry with the given one.
+ void replace(NamedDecl *D, AccessSpecifier AS) {
+ Results.Decls.replace(I-1, D, AS);
+ Changed = true;
+ }
+
+ void done() {
+ assert(!CalledDone && "done() called twice");
+ CalledDone = true;
+
+ if (Changed)
+ Results.resolveKindAfterFilter();
+ }
+ };
+
+ /// Create a filter for this result set.
+ Filter makeFilter() {
+ return Filter(*this);
+ }
+
+private:
+ void diagnose() {
+ if (isAmbiguous())
+ SemaRef.DiagnoseAmbiguousLookup(*this);
+ else if (isClassLookup() && SemaRef.getLangOpts().AccessControl)
+ SemaRef.CheckLookupAccess(*this);
+ }
+
+ void setAmbiguous(AmbiguityKind AK) {
+ ResultKind = Ambiguous;
+ Ambiguity = AK;
+ }
+
+ void addDeclsFromBasePaths(const CXXBasePaths &P);
+ void configure();
+
+ // Sanity checks.
+ void sanityImpl() const;
+
+ void sanity() const {
+#ifndef NDEBUG
+ sanityImpl();
+#endif
+ }
+
+ bool sanityCheckUnresolved() const {
+ for (iterator I = begin(), E = end(); I != E; ++I)
+ if (isa<UnresolvedUsingValueDecl>(*I))
+ return true;
+ return false;
+ }
+
+ static void deletePaths(CXXBasePaths *);
+
+ // Results.
+ LookupResultKind ResultKind;
+ AmbiguityKind Ambiguity; // ill-defined unless ambiguous
+ UnresolvedSet<8> Decls;
+ CXXBasePaths *Paths;
+ CXXRecordDecl *NamingClass;
+ QualType BaseObjectType;
+
+ // Parameters.
+ Sema &SemaRef;
+ DeclarationNameInfo NameInfo;
+ SourceRange NameContextRange;
+ Sema::LookupNameKind LookupKind;
+ unsigned IDNS; // set by configure()
+
+ bool Redecl;
+
+ /// \brief True if tag declarations should be hidden if non-tags
+ /// are present
+ bool HideTags;
+
+ bool Diagnose;
+};
+
+ /// \brief Consumes visible declarations found when searching for
+ /// all visible names within a given scope or context.
+ ///
+ /// This abstract class is meant to be subclassed by clients of \c
+ /// Sema::LookupVisibleDecls(), each of which should override the \c
+ /// FoundDecl() function to process declarations as they are found.
+ class VisibleDeclConsumer {
+ public:
+ /// \brief Destroys the visible declaration consumer.
+ virtual ~VisibleDeclConsumer();
+
+ /// \brief Invoked each time \p Sema::LookupVisibleDecls() finds a
+ /// declaration visible from the current scope or context.
+ ///
+ /// \param ND the declaration found.
+ ///
+ /// \param Hiding a declaration that hides the declaration \p ND,
+ /// or NULL if no such declaration exists.
+ ///
+ /// \param Ctx the original context from which the lookup started.
+ ///
+ /// \param InBaseClass whether this declaration was found in base
+ /// class of the context we searched.
+ virtual void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
+ bool InBaseClass) = 0;
+ };
+
+/// \brief A class for storing results from argument-dependent lookup.
+class ADLResult {
+private:
+ /// A map from canonical decls to the 'most recent' decl.
+ llvm::DenseMap<NamedDecl*, NamedDecl*> Decls;
+
+public:
+ /// Adds a new ADL candidate to this map.
+ void insert(NamedDecl *D);
+
+ /// Removes any data associated with a given decl.
+ void erase(NamedDecl *D) {
+ Decls.erase(cast<NamedDecl>(D->getCanonicalDecl()));
+ }
+
+ class iterator {
+ typedef llvm::DenseMap<NamedDecl*,NamedDecl*>::iterator inner_iterator;
+ inner_iterator iter;
+
+ friend class ADLResult;
+ iterator(const inner_iterator &iter) : iter(iter) {}
+ public:
+ iterator() {}
+
+ iterator &operator++() { ++iter; return *this; }
+ iterator operator++(int) { return iterator(iter++); }
+
+ NamedDecl *operator*() const { return iter->second; }
+
+ bool operator==(const iterator &other) const { return iter == other.iter; }
+ bool operator!=(const iterator &other) const { return iter != other.iter; }
+ };
+
+ iterator begin() { return iterator(Decls.begin()); }
+ iterator end() { return iterator(Decls.end()); }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ObjCMethodList.h b/contrib/llvm/tools/clang/include/clang/Sema/ObjCMethodList.h
new file mode 100644
index 0000000..225c137
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ObjCMethodList.h
@@ -0,0 +1,38 @@
+//===--- ObjCMethodList.h - A singly linked list of methods -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ObjCMethodList, a singly-linked list of methods.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_OBJC_METHOD_LIST_H
+#define LLVM_CLANG_SEMA_OBJC_METHOD_LIST_H
+
+namespace clang {
+
+class ObjCMethodDecl;
+
+/// ObjCMethodList - a linked list of methods with different signatures.
+struct ObjCMethodList {
+ ObjCMethodDecl *Method;
+ ObjCMethodList *Next;
+
+ ObjCMethodList() {
+ Method = 0;
+ Next = 0;
+ }
+ ObjCMethodList(ObjCMethodDecl *M, ObjCMethodList *C) {
+ Method = M;
+ Next = C;
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Overload.h b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
new file mode 100644
index 0000000..d334447
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Overload.h
@@ -0,0 +1,813 @@
+//===--- Overload.h - C++ Overloading ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the data structures and types used in C++
+// overload resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_OVERLOAD_H
+#define LLVM_CLANG_SEMA_OVERLOAD_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/UnresolvedSet.h"
+#include "clang/Sema/SemaFixItUtils.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Allocator.h"
+
+namespace clang {
+ class ASTContext;
+ class CXXConstructorDecl;
+ class CXXConversionDecl;
+ class FunctionDecl;
+ class Sema;
+
+ /// OverloadingResult - Capture the result of performing overload
+ /// resolution.
+ enum OverloadingResult {
+ OR_Success, ///< Overload resolution succeeded.
+ OR_No_Viable_Function, ///< No viable function found.
+ OR_Ambiguous, ///< Ambiguous candidates found.
+ OR_Deleted ///< Succeeded, but refers to a deleted function.
+ };
+
+ enum OverloadCandidateDisplayKind {
+ /// Requests that all candidates be shown. Viable candidates will
+ /// be printed first.
+ OCD_AllCandidates,
+
+ /// Requests that only viable candidates be shown.
+ OCD_ViableCandidates
+ };
+
+ /// ImplicitConversionKind - The kind of implicit conversion used to
+ /// convert an argument to a parameter's type. The enumerator values
+ /// match with Table 9 of (C++ 13.3.3.1.1) and are listed such that
+ /// better conversion kinds have smaller values.
+ enum ImplicitConversionKind {
+ ICK_Identity = 0, ///< Identity conversion (no conversion)
+ ICK_Lvalue_To_Rvalue, ///< Lvalue-to-rvalue conversion (C++ 4.1)
+ ICK_Array_To_Pointer, ///< Array-to-pointer conversion (C++ 4.2)
+ ICK_Function_To_Pointer, ///< Function-to-pointer (C++ 4.3)
+ ICK_NoReturn_Adjustment, ///< Removal of noreturn from a type (Clang)
+ ICK_Qualification, ///< Qualification conversions (C++ 4.4)
+ ICK_Integral_Promotion, ///< Integral promotions (C++ 4.5)
+ ICK_Floating_Promotion, ///< Floating point promotions (C++ 4.6)
+ ICK_Complex_Promotion, ///< Complex promotions (Clang extension)
+ ICK_Integral_Conversion, ///< Integral conversions (C++ 4.7)
+ ICK_Floating_Conversion, ///< Floating point conversions (C++ 4.8)
+ ICK_Complex_Conversion, ///< Complex conversions (C99 6.3.1.6)
+ ICK_Floating_Integral, ///< Floating-integral conversions (C++ 4.9)
+ ICK_Pointer_Conversion, ///< Pointer conversions (C++ 4.10)
+ ICK_Pointer_Member, ///< Pointer-to-member conversions (C++ 4.11)
+ ICK_Boolean_Conversion, ///< Boolean conversions (C++ 4.12)
+ ICK_Compatible_Conversion, ///< Conversions between compatible types in C99
+ ICK_Derived_To_Base, ///< Derived-to-base (C++ [over.best.ics])
+ ICK_Vector_Conversion, ///< Vector conversions
+ ICK_Vector_Splat, ///< A vector splat from an arithmetic type
+ ICK_Complex_Real, ///< Complex-real conversions (C99 6.3.1.7)
+ ICK_Block_Pointer_Conversion, ///< Block Pointer conversions
+ ICK_TransparentUnionConversion, /// Transparent Union Conversions
+ ICK_Writeback_Conversion, ///< Objective-C ARC writeback conversion
+ ICK_Num_Conversion_Kinds ///< The number of conversion kinds
+ };
+
+ /// ImplicitConversionCategory - The category of an implicit
+ /// conversion kind. The enumerator values match with Table 9 of
+ /// (C++ 13.3.3.1.1) and are listed such that better conversion
+ /// categories have smaller values.
+ enum ImplicitConversionCategory {
+ ICC_Identity = 0, ///< Identity
+ ICC_Lvalue_Transformation, ///< Lvalue transformation
+ ICC_Qualification_Adjustment, ///< Qualification adjustment
+ ICC_Promotion, ///< Promotion
+ ICC_Conversion ///< Conversion
+ };
+
+ ImplicitConversionCategory
+ GetConversionCategory(ImplicitConversionKind Kind);
+
+ /// ImplicitConversionRank - The rank of an implicit conversion
+ /// kind. The enumerator values match with Table 9 of (C++
+ /// 13.3.3.1.1) and are listed such that better conversion ranks
+ /// have smaller values.
+ enum ImplicitConversionRank {
+ ICR_Exact_Match = 0, ///< Exact Match
+ ICR_Promotion, ///< Promotion
+ ICR_Conversion, ///< Conversion
+ ICR_Complex_Real_Conversion, ///< Complex <-> Real conversion
+ ICR_Writeback_Conversion ///< ObjC ARC writeback conversion
+ };
+
+ ImplicitConversionRank GetConversionRank(ImplicitConversionKind Kind);
+
+ /// NarrowingKind - The kind of narrowing conversion being performed by a
+ /// standard conversion sequence according to C++11 [dcl.init.list]p7.
+ enum NarrowingKind {
+ /// Not a narrowing conversion.
+ NK_Not_Narrowing,
+
+ /// A narrowing conversion by virtue of the source and destination types.
+ NK_Type_Narrowing,
+
+ /// A narrowing conversion, because a constant expression got narrowed.
+ NK_Constant_Narrowing,
+
+ /// A narrowing conversion, because a non-constant-expression variable might
+ /// have got narrowed.
+ NK_Variable_Narrowing
+ };
+
+ /// StandardConversionSequence - represents a standard conversion
+ /// sequence (C++ 13.3.3.1.1). A standard conversion sequence
+ /// contains between zero and three conversions. If a particular
+ /// conversion is not needed, it will be set to the identity conversion
+ /// (ICK_Identity). Note that the three conversions are
+ /// specified as separate members (rather than in an array) so that
+ /// we can keep the size of a standard conversion sequence to a
+ /// single word.
+ class StandardConversionSequence {
+ public:
+ /// First -- The first conversion can be an lvalue-to-rvalue
+ /// conversion, array-to-pointer conversion, or
+ /// function-to-pointer conversion.
+ ImplicitConversionKind First : 8;
+
+ /// Second - The second conversion can be an integral promotion,
+ /// floating point promotion, integral conversion, floating point
+ /// conversion, floating-integral conversion, pointer conversion,
+ /// pointer-to-member conversion, or boolean conversion.
+ ImplicitConversionKind Second : 8;
+
+ /// Third - The third conversion can be a qualification conversion.
+ ImplicitConversionKind Third : 8;
+
+ /// \brief Whether this is the deprecated conversion of a
+ /// string literal to a pointer to non-const character data
+ /// (C++ 4.2p2).
+ unsigned DeprecatedStringLiteralToCharPtr : 1;
+
+ /// \brief Whether the qualification conversion involves a change in the
+ /// Objective-C lifetime (for automatic reference counting).
+ unsigned QualificationIncludesObjCLifetime : 1;
+
+ /// IncompatibleObjC - Whether this is an Objective-C conversion
+ /// that we should warn about (if we actually use it).
+ unsigned IncompatibleObjC : 1;
+
+ /// ReferenceBinding - True when this is a reference binding
+ /// (C++ [over.ics.ref]).
+ unsigned ReferenceBinding : 1;
+
+ /// DirectBinding - True when this is a reference binding that is a
+ /// direct binding (C++ [dcl.init.ref]).
+ unsigned DirectBinding : 1;
+
+ /// \brief Whether this is an lvalue reference binding (otherwise, it's
+ /// an rvalue reference binding).
+ unsigned IsLvalueReference : 1;
+
+ /// \brief Whether we're binding to a function lvalue.
+ unsigned BindsToFunctionLvalue : 1;
+
+ /// \brief Whether we're binding to an rvalue.
+ unsigned BindsToRvalue : 1;
+
+ /// \brief Whether this binds an implicit object argument to a
+ /// non-static member function without a ref-qualifier.
+ unsigned BindsImplicitObjectArgumentWithoutRefQualifier : 1;
+
+ /// \brief Whether this binds a reference to an object with a different
+ /// Objective-C lifetime qualifier.
+ unsigned ObjCLifetimeConversionBinding : 1;
+
+ /// FromType - The type that this conversion is converting
+ /// from. This is an opaque pointer that can be translated into a
+ /// QualType.
+ void *FromTypePtr;
+
+ /// ToType - The types that this conversion is converting to in
+ /// each step. This is an opaque pointer that can be translated
+ /// into a QualType.
+ void *ToTypePtrs[3];
+
+ /// CopyConstructor - The copy constructor that is used to perform
+ /// this conversion, when the conversion is actually just the
+ /// initialization of an object via copy constructor. Such
+ /// conversions are either identity conversions or derived-to-base
+ /// conversions.
+ CXXConstructorDecl *CopyConstructor;
+
+ void setFromType(QualType T) { FromTypePtr = T.getAsOpaquePtr(); }
+ void setToType(unsigned Idx, QualType T) {
+ assert(Idx < 3 && "To type index is out of range");
+ ToTypePtrs[Idx] = T.getAsOpaquePtr();
+ }
+ void setAllToTypes(QualType T) {
+ ToTypePtrs[0] = T.getAsOpaquePtr();
+ ToTypePtrs[1] = ToTypePtrs[0];
+ ToTypePtrs[2] = ToTypePtrs[0];
+ }
+
+ QualType getFromType() const {
+ return QualType::getFromOpaquePtr(FromTypePtr);
+ }
+ QualType getToType(unsigned Idx) const {
+ assert(Idx < 3 && "To type index is out of range");
+ return QualType::getFromOpaquePtr(ToTypePtrs[Idx]);
+ }
+
+ void setAsIdentityConversion();
+
+ bool isIdentityConversion() const {
+ return Second == ICK_Identity && Third == ICK_Identity;
+ }
+
+ ImplicitConversionRank getRank() const;
+ NarrowingKind getNarrowingKind(ASTContext &Context, const Expr *Converted,
+ APValue &ConstantValue,
+ QualType &ConstantType) const;
+ bool isPointerConversionToBool() const;
+ bool isPointerConversionToVoidPointer(ASTContext& Context) const;
+ void DebugPrint() const;
+ };
+
+ /// UserDefinedConversionSequence - Represents a user-defined
+ /// conversion sequence (C++ 13.3.3.1.2).
+ struct UserDefinedConversionSequence {
+ /// \brief Represents the standard conversion that occurs before
+ /// the actual user-defined conversion.
+ ///
+ /// C++11 13.3.3.1.2p1:
+ /// If the user-defined conversion is specified by a constructor
+ /// (12.3.1), the initial standard conversion sequence converts
+ /// the source type to the type required by the argument of the
+ /// constructor. If the user-defined conversion is specified by
+ /// a conversion function (12.3.2), the initial standard
+ /// conversion sequence converts the source type to the implicit
+ /// object parameter of the conversion function.
+ StandardConversionSequence Before;
+
+ /// EllipsisConversion - When this is true, it means user-defined
+ /// conversion sequence starts with a ... (elipsis) conversion, instead of
+ /// a standard conversion. In this case, 'Before' field must be ignored.
+ // FIXME. I much rather put this as the first field. But there seems to be
+ // a gcc code gen. bug which causes a crash in a test. Putting it here seems
+ // to work around the crash.
+ bool EllipsisConversion : 1;
+
+ /// HadMultipleCandidates - When this is true, it means that the
+ /// conversion function was resolved from an overloaded set having
+ /// size greater than 1.
+ bool HadMultipleCandidates : 1;
+
+ /// After - Represents the standard conversion that occurs after
+ /// the actual user-defined conversion.
+ StandardConversionSequence After;
+
+ /// ConversionFunction - The function that will perform the
+ /// user-defined conversion. Null if the conversion is an
+ /// aggregate initialization from an initializer list.
+ FunctionDecl* ConversionFunction;
+
+ /// \brief The declaration that we found via name lookup, which might be
+ /// the same as \c ConversionFunction or it might be a using declaration
+ /// that refers to \c ConversionFunction.
+ DeclAccessPair FoundConversionFunction;
+
+ void DebugPrint() const;
+ };
+
+ /// Represents an ambiguous user-defined conversion sequence.
+ struct AmbiguousConversionSequence {
+ typedef SmallVector<FunctionDecl*, 4> ConversionSet;
+
+ void *FromTypePtr;
+ void *ToTypePtr;
+ char Buffer[sizeof(ConversionSet)];
+
+ QualType getFromType() const {
+ return QualType::getFromOpaquePtr(FromTypePtr);
+ }
+ QualType getToType() const {
+ return QualType::getFromOpaquePtr(ToTypePtr);
+ }
+ void setFromType(QualType T) { FromTypePtr = T.getAsOpaquePtr(); }
+ void setToType(QualType T) { ToTypePtr = T.getAsOpaquePtr(); }
+
+ ConversionSet &conversions() {
+ return *reinterpret_cast<ConversionSet*>(Buffer);
+ }
+
+ const ConversionSet &conversions() const {
+ return *reinterpret_cast<const ConversionSet*>(Buffer);
+ }
+
+ void addConversion(FunctionDecl *D) {
+ conversions().push_back(D);
+ }
+
+ typedef ConversionSet::iterator iterator;
+ iterator begin() { return conversions().begin(); }
+ iterator end() { return conversions().end(); }
+
+ typedef ConversionSet::const_iterator const_iterator;
+ const_iterator begin() const { return conversions().begin(); }
+ const_iterator end() const { return conversions().end(); }
+
+ void construct();
+ void destruct();
+ void copyFrom(const AmbiguousConversionSequence &);
+ };
+
+ /// BadConversionSequence - Records information about an invalid
+ /// conversion sequence.
+ struct BadConversionSequence {
+ enum FailureKind {
+ no_conversion,
+ unrelated_class,
+ suppressed_user,
+ bad_qualifiers,
+ lvalue_ref_to_rvalue,
+ rvalue_ref_to_lvalue
+ };
+
+ // This can be null, e.g. for implicit object arguments.
+ Expr *FromExpr;
+
+ FailureKind Kind;
+
+ private:
+ // The type we're converting from (an opaque QualType).
+ void *FromTy;
+
+ // The type we're converting to (an opaque QualType).
+ void *ToTy;
+
+ public:
+ void init(FailureKind K, Expr *From, QualType To) {
+ init(K, From->getType(), To);
+ FromExpr = From;
+ }
+ void init(FailureKind K, QualType From, QualType To) {
+ Kind = K;
+ FromExpr = 0;
+ setFromType(From);
+ setToType(To);
+ }
+
+ QualType getFromType() const { return QualType::getFromOpaquePtr(FromTy); }
+ QualType getToType() const { return QualType::getFromOpaquePtr(ToTy); }
+
+ void setFromExpr(Expr *E) {
+ FromExpr = E;
+ setFromType(E->getType());
+ }
+ void setFromType(QualType T) { FromTy = T.getAsOpaquePtr(); }
+ void setToType(QualType T) { ToTy = T.getAsOpaquePtr(); }
+ };
+
+ /// ImplicitConversionSequence - Represents an implicit conversion
+ /// sequence, which may be a standard conversion sequence
+ /// (C++ 13.3.3.1.1), user-defined conversion sequence (C++ 13.3.3.1.2),
+ /// or an ellipsis conversion sequence (C++ 13.3.3.1.3).
+ class ImplicitConversionSequence {
+ public:
+ /// Kind - The kind of implicit conversion sequence. BadConversion
+ /// specifies that there is no conversion from the source type to
+ /// the target type. AmbiguousConversion represents the unique
+ /// ambiguous conversion (C++0x [over.best.ics]p10).
+ enum Kind {
+ StandardConversion = 0,
+ UserDefinedConversion,
+ AmbiguousConversion,
+ EllipsisConversion,
+ BadConversion
+ };
+
+ private:
+ enum {
+ Uninitialized = BadConversion + 1
+ };
+
+ /// ConversionKind - The kind of implicit conversion sequence.
+ unsigned ConversionKind : 30;
+
+ /// \brief Whether the argument is an initializer list.
+ bool ListInitializationSequence : 1;
+
+ /// \brief Whether the target is really a std::initializer_list, and the
+ /// sequence only represents the worst element conversion.
+ bool StdInitializerListElement : 1;
+
+ void setKind(Kind K) {
+ destruct();
+ ConversionKind = K;
+ }
+
+ void destruct() {
+ if (ConversionKind == AmbiguousConversion) Ambiguous.destruct();
+ }
+
+ public:
+ union {
+ /// When ConversionKind == StandardConversion, provides the
+ /// details of the standard conversion sequence.
+ StandardConversionSequence Standard;
+
+ /// When ConversionKind == UserDefinedConversion, provides the
+ /// details of the user-defined conversion sequence.
+ UserDefinedConversionSequence UserDefined;
+
+ /// When ConversionKind == AmbiguousConversion, provides the
+ /// details of the ambiguous conversion.
+ AmbiguousConversionSequence Ambiguous;
+
+ /// When ConversionKind == BadConversion, provides the details
+ /// of the bad conversion.
+ BadConversionSequence Bad;
+ };
+
+ ImplicitConversionSequence()
+ : ConversionKind(Uninitialized), ListInitializationSequence(false),
+ StdInitializerListElement(false)
+ {}
+ ~ImplicitConversionSequence() {
+ destruct();
+ }
+ ImplicitConversionSequence(const ImplicitConversionSequence &Other)
+ : ConversionKind(Other.ConversionKind),
+ ListInitializationSequence(Other.ListInitializationSequence),
+ StdInitializerListElement(Other.StdInitializerListElement)
+ {
+ switch (ConversionKind) {
+ case Uninitialized: break;
+ case StandardConversion: Standard = Other.Standard; break;
+ case UserDefinedConversion: UserDefined = Other.UserDefined; break;
+ case AmbiguousConversion: Ambiguous.copyFrom(Other.Ambiguous); break;
+ case EllipsisConversion: break;
+ case BadConversion: Bad = Other.Bad; break;
+ }
+ }
+
+ ImplicitConversionSequence &
+ operator=(const ImplicitConversionSequence &Other) {
+ destruct();
+ new (this) ImplicitConversionSequence(Other);
+ return *this;
+ }
+
+ Kind getKind() const {
+ assert(isInitialized() && "querying uninitialized conversion");
+ return Kind(ConversionKind);
+ }
+
+ /// \brief Return a ranking of the implicit conversion sequence
+ /// kind, where smaller ranks represent better conversion
+ /// sequences.
+ ///
+ /// In particular, this routine gives user-defined conversion
+ /// sequences and ambiguous conversion sequences the same rank,
+ /// per C++ [over.best.ics]p10.
+ unsigned getKindRank() const {
+ switch (getKind()) {
+ case StandardConversion:
+ return 0;
+
+ case UserDefinedConversion:
+ case AmbiguousConversion:
+ return 1;
+
+ case EllipsisConversion:
+ return 2;
+
+ case BadConversion:
+ return 3;
+ }
+
+ llvm_unreachable("Invalid ImplicitConversionSequence::Kind!");
+ }
+
+ bool isBad() const { return getKind() == BadConversion; }
+ bool isStandard() const { return getKind() == StandardConversion; }
+ bool isEllipsis() const { return getKind() == EllipsisConversion; }
+ bool isAmbiguous() const { return getKind() == AmbiguousConversion; }
+ bool isUserDefined() const { return getKind() == UserDefinedConversion; }
+ bool isFailure() const { return isBad() || isAmbiguous(); }
+
+ /// Determines whether this conversion sequence has been
+ /// initialized. Most operations should never need to query
+ /// uninitialized conversions and should assert as above.
+ bool isInitialized() const { return ConversionKind != Uninitialized; }
+
+ /// Sets this sequence as a bad conversion for an explicit argument.
+ void setBad(BadConversionSequence::FailureKind Failure,
+ Expr *FromExpr, QualType ToType) {
+ setKind(BadConversion);
+ Bad.init(Failure, FromExpr, ToType);
+ }
+
+ /// Sets this sequence as a bad conversion for an implicit argument.
+ void setBad(BadConversionSequence::FailureKind Failure,
+ QualType FromType, QualType ToType) {
+ setKind(BadConversion);
+ Bad.init(Failure, FromType, ToType);
+ }
+
+ void setStandard() { setKind(StandardConversion); }
+ void setEllipsis() { setKind(EllipsisConversion); }
+ void setUserDefined() { setKind(UserDefinedConversion); }
+ void setAmbiguous() {
+ if (ConversionKind == AmbiguousConversion) return;
+ ConversionKind = AmbiguousConversion;
+ Ambiguous.construct();
+ }
+
+ /// \brief Whether this sequence was created by the rules of
+ /// list-initialization sequences.
+ bool isListInitializationSequence() const {
+ return ListInitializationSequence;
+ }
+
+ void setListInitializationSequence() {
+ ListInitializationSequence = true;
+ }
+
+ /// \brief Whether the target is really a std::initializer_list, and the
+ /// sequence only represents the worst element conversion.
+ bool isStdInitializerListElement() const {
+ return StdInitializerListElement;
+ }
+
+ void setStdInitializerListElement(bool V = true) {
+ StdInitializerListElement = V;
+ }
+
+ // The result of a comparison between implicit conversion
+ // sequences. Use Sema::CompareImplicitConversionSequences to
+ // actually perform the comparison.
+ enum CompareKind {
+ Better = -1,
+ Indistinguishable = 0,
+ Worse = 1
+ };
+
+ void DiagnoseAmbiguousConversion(Sema &S,
+ SourceLocation CaretLoc,
+ const PartialDiagnostic &PDiag) const;
+
+ void DebugPrint() const;
+ };
+
+ enum OverloadFailureKind {
+ ovl_fail_too_many_arguments,
+ ovl_fail_too_few_arguments,
+ ovl_fail_bad_conversion,
+ ovl_fail_bad_deduction,
+
+ /// This conversion candidate was not considered because it
+ /// duplicates the work of a trivial or derived-to-base
+ /// conversion.
+ ovl_fail_trivial_conversion,
+
+ /// This conversion candidate is not viable because its result
+ /// type is not implicitly convertible to the desired type.
+ ovl_fail_bad_final_conversion,
+
+ /// This conversion function template specialization candidate is not
+ /// viable because the final conversion was not an exact match.
+ ovl_fail_final_conversion_not_exact,
+
+ /// (CUDA) This candidate was not viable because the callee
+ /// was not accessible from the caller's target (i.e. host->device,
+ /// global->host, device->host).
+ ovl_fail_bad_target
+ };
+
+ /// OverloadCandidate - A single candidate in an overload set (C++ 13.3).
+ struct OverloadCandidate {
+ /// Function - The actual function that this candidate
+ /// represents. When NULL, this is a built-in candidate
+ /// (C++ [over.oper]) or a surrogate for a conversion to a
+ /// function pointer or reference (C++ [over.call.object]).
+ FunctionDecl *Function;
+
+ /// FoundDecl - The original declaration that was looked up /
+ /// invented / otherwise found, together with its access.
+ /// Might be a UsingShadowDecl or a FunctionTemplateDecl.
+ DeclAccessPair FoundDecl;
+
+ // BuiltinTypes - Provides the return and parameter types of a
+ // built-in overload candidate. Only valid when Function is NULL.
+ struct {
+ QualType ResultTy;
+ QualType ParamTypes[3];
+ } BuiltinTypes;
+
+ /// Surrogate - The conversion function for which this candidate
+ /// is a surrogate, but only if IsSurrogate is true.
+ CXXConversionDecl *Surrogate;
+
+ /// Conversions - The conversion sequences used to convert the
+ /// function arguments to the function parameters, the pointer points to a
+ /// fixed size array with NumConversions elements. The memory is owned by
+ /// the OverloadCandidateSet.
+ ImplicitConversionSequence *Conversions;
+
+ /// The FixIt hints which can be used to fix the Bad candidate.
+ ConversionFixItGenerator Fix;
+
+ /// NumConversions - The number of elements in the Conversions array.
+ unsigned NumConversions;
+
+ /// Viable - True to indicate that this overload candidate is viable.
+ bool Viable;
+
+ /// IsSurrogate - True to indicate that this candidate is a
+ /// surrogate for a conversion to a function pointer or reference
+ /// (C++ [over.call.object]).
+ bool IsSurrogate;
+
+ /// IgnoreObjectArgument - True to indicate that the first
+ /// argument's conversion, which for this function represents the
+ /// implicit object argument, should be ignored. This will be true
+ /// when the candidate is a static member function (where the
+ /// implicit object argument is just a placeholder) or a
+ /// non-static member function when the call doesn't have an
+ /// object argument.
+ bool IgnoreObjectArgument;
+
+ /// FailureKind - The reason why this candidate is not viable.
+ /// Actually an OverloadFailureKind.
+ unsigned char FailureKind;
+
+ /// \brief The number of call arguments that were explicitly provided,
+ /// to be used while performing partial ordering of function templates.
+ unsigned ExplicitCallArguments;
+
+ /// A structure used to record information about a failed
+ /// template argument deduction.
+ struct DeductionFailureInfo {
+ // A Sema::TemplateDeductionResult.
+ unsigned Result;
+
+ /// \brief Opaque pointer containing additional data about
+ /// this deduction failure.
+ void *Data;
+
+ /// \brief Retrieve the template parameter this deduction failure
+ /// refers to, if any.
+ TemplateParameter getTemplateParameter();
+
+ /// \brief Retrieve the template argument list associated with this
+ /// deduction failure, if any.
+ TemplateArgumentList *getTemplateArgumentList();
+
+ /// \brief Return the first template argument this deduction failure
+ /// refers to, if any.
+ const TemplateArgument *getFirstArg();
+
+ /// \brief Return the second template argument this deduction failure
+ /// refers to, if any.
+ const TemplateArgument *getSecondArg();
+
+ /// \brief Free any memory associated with this deduction failure.
+ void Destroy();
+ };
+
+ union {
+ DeductionFailureInfo DeductionFailure;
+
+ /// FinalConversion - For a conversion function (where Function is
+ /// a CXXConversionDecl), the standard conversion that occurs
+ /// after the call to the overload candidate to convert the result
+ /// of calling the conversion function to the required type.
+ StandardConversionSequence FinalConversion;
+ };
+
+ /// hasAmbiguousConversion - Returns whether this overload
+ /// candidate requires an ambiguous conversion or not.
+ bool hasAmbiguousConversion() const {
+ for (unsigned i = 0, e = NumConversions; i != e; ++i) {
+ if (!Conversions[i].isInitialized()) return false;
+ if (Conversions[i].isAmbiguous()) return true;
+ }
+ return false;
+ }
+
+ bool TryToFixBadConversion(unsigned Idx, Sema &S) {
+ bool CanFix = Fix.tryToFixConversion(
+ Conversions[Idx].Bad.FromExpr,
+ Conversions[Idx].Bad.getFromType(),
+ Conversions[Idx].Bad.getToType(), S);
+
+ // If at least one conversion fails, the candidate cannot be fixed.
+ if (!CanFix)
+ Fix.clear();
+
+ return CanFix;
+ }
+ };
+
+ /// OverloadCandidateSet - A set of overload candidates, used in C++
+ /// overload resolution (C++ 13.3).
+ class OverloadCandidateSet {
+ SmallVector<OverloadCandidate, 16> Candidates;
+ llvm::SmallPtrSet<Decl *, 16> Functions;
+
+ // Allocator for OverloadCandidate::Conversions. We store the first few
+ // elements inline to avoid allocation for small sets.
+ llvm::BumpPtrAllocator ConversionSequenceAllocator;
+
+ SourceLocation Loc;
+
+ unsigned NumInlineSequences;
+ char InlineSpace[16 * sizeof(ImplicitConversionSequence)];
+
+ OverloadCandidateSet(const OverloadCandidateSet &);
+ OverloadCandidateSet &operator=(const OverloadCandidateSet &);
+
+ public:
+ OverloadCandidateSet(SourceLocation Loc) : Loc(Loc), NumInlineSequences(0){}
+ ~OverloadCandidateSet() {
+ for (iterator i = begin(), e = end(); i != e; ++i)
+ for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
+ i->Conversions[ii].~ImplicitConversionSequence();
+ }
+
+ SourceLocation getLocation() const { return Loc; }
+
+ /// \brief Determine when this overload candidate will be new to the
+ /// overload set.
+ bool isNewCandidate(Decl *F) {
+ return Functions.insert(F->getCanonicalDecl());
+ }
+
+ /// \brief Clear out all of the candidates.
+ void clear();
+
+ typedef SmallVector<OverloadCandidate, 16>::iterator iterator;
+ iterator begin() { return Candidates.begin(); }
+ iterator end() { return Candidates.end(); }
+
+ size_t size() const { return Candidates.size(); }
+ bool empty() const { return Candidates.empty(); }
+
+ /// \brief Add a new candidate with NumConversions conversion sequence slots
+ /// to the overload set.
+ OverloadCandidate &addCandidate(unsigned NumConversions = 0) {
+ Candidates.push_back(OverloadCandidate());
+ OverloadCandidate &C = Candidates.back();
+
+ // Assign space from the inline array if there are enough free slots
+ // available.
+ if (NumConversions + NumInlineSequences <= 16) {
+ ImplicitConversionSequence *I =
+ (ImplicitConversionSequence*)InlineSpace;
+ C.Conversions = &I[NumInlineSequences];
+ NumInlineSequences += NumConversions;
+ } else {
+ // Otherwise get memory from the allocator.
+ C.Conversions = ConversionSequenceAllocator
+ .Allocate<ImplicitConversionSequence>(NumConversions);
+ }
+
+ // Construct the new objects.
+ for (unsigned i = 0; i != NumConversions; ++i)
+ new (&C.Conversions[i]) ImplicitConversionSequence();
+
+ C.NumConversions = NumConversions;
+ return C;
+ }
+
+ /// Find the best viable function on this overload set, if it exists.
+ OverloadingResult BestViableFunction(Sema &S, SourceLocation Loc,
+ OverloadCandidateSet::iterator& Best,
+ bool UserDefinedConversion = false);
+
+ void NoteCandidates(Sema &S,
+ OverloadCandidateDisplayKind OCD,
+ llvm::ArrayRef<Expr *> Args,
+ const char *Opc = 0,
+ SourceLocation Loc = SourceLocation());
+ };
+
+ bool isBetterOverloadCandidate(Sema &S,
+ const OverloadCandidate& Cand1,
+ const OverloadCandidate& Cand2,
+ SourceLocation Loc,
+ bool UserDefinedConversion = false);
+} // end namespace clang
+
+#endif // LLVM_CLANG_SEMA_OVERLOAD_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h b/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h
new file mode 100644
index 0000000..fb9e368d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Ownership.h
@@ -0,0 +1,469 @@
+//===--- Ownership.h - Parser ownership helpers -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains classes for managing ownership of Stmt and Expr nodes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_OWNERSHIP_H
+#define LLVM_CLANG_SEMA_OWNERSHIP_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/PointerIntPair.h"
+
+//===----------------------------------------------------------------------===//
+// OpaquePtr
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ class Attr;
+ class CXXCtorInitializer;
+ class CXXBaseSpecifier;
+ class Decl;
+ class DeclGroupRef;
+ class Expr;
+ class NestedNameSpecifier;
+ class QualType;
+ class Sema;
+ class Stmt;
+ class TemplateName;
+ class TemplateParameterList;
+
+ /// OpaquePtr - This is a very simple POD type that wraps a pointer that the
+ /// Parser doesn't know about but that Sema or another client does. The UID
+ /// template argument is used to make sure that "Decl" pointers are not
+ /// compatible with "Type" pointers for example.
+ template <class PtrTy>
+ class OpaquePtr {
+ void *Ptr;
+ explicit OpaquePtr(void *Ptr) : Ptr(Ptr) {}
+
+ typedef llvm::PointerLikeTypeTraits<PtrTy> Traits;
+
+ public:
+ OpaquePtr() : Ptr(0) {}
+
+ static OpaquePtr make(PtrTy P) { OpaquePtr OP; OP.set(P); return OP; }
+
+ template <typename T> T* getAs() const {
+ return get();
+ }
+
+ template <typename T> T getAsVal() const {
+ return get();
+ }
+
+ PtrTy get() const {
+ return Traits::getFromVoidPointer(Ptr);
+ }
+
+ void set(PtrTy P) {
+ Ptr = Traits::getAsVoidPointer(P);
+ }
+
+ operator bool() const { return Ptr != 0; }
+
+ void *getAsOpaquePtr() const { return Ptr; }
+ static OpaquePtr getFromOpaquePtr(void *P) { return OpaquePtr(P); }
+ };
+
+ /// UnionOpaquePtr - A version of OpaquePtr suitable for membership
+ /// in a union.
+ template <class T> struct UnionOpaquePtr {
+ void *Ptr;
+
+ static UnionOpaquePtr make(OpaquePtr<T> P) {
+ UnionOpaquePtr OP = { P.getAsOpaquePtr() };
+ return OP;
+ }
+
+ OpaquePtr<T> get() const { return OpaquePtr<T>::getFromOpaquePtr(Ptr); }
+ operator OpaquePtr<T>() const { return get(); }
+
+ UnionOpaquePtr &operator=(OpaquePtr<T> P) {
+ Ptr = P.getAsOpaquePtr();
+ return *this;
+ }
+ };
+}
+
+namespace llvm {
+ template <class T>
+ class PointerLikeTypeTraits<clang::OpaquePtr<T> > {
+ public:
+ static inline void *getAsVoidPointer(clang::OpaquePtr<T> P) {
+ // FIXME: Doesn't work? return P.getAs< void >();
+ return P.getAsOpaquePtr();
+ }
+ static inline clang::OpaquePtr<T> getFromVoidPointer(void *P) {
+ return clang::OpaquePtr<T>::getFromOpaquePtr(P);
+ }
+ enum { NumLowBitsAvailable = 0 };
+ };
+
+ template <class T>
+ struct isPodLike<clang::OpaquePtr<T> > { static const bool value = true; };
+}
+
+
+
+// -------------------------- About Move Emulation -------------------------- //
+// The smart pointer classes in this file attempt to emulate move semantics
+// as they appear in C++0x with rvalue references. Since C++03 doesn't have
+// rvalue references, some tricks are needed to get similar results.
+// Move semantics in C++0x have the following properties:
+// 1) "Moving" means transferring the value of an object to another object,
+// similar to copying, but without caring what happens to the old object.
+// In particular, this means that the new object can steal the old object's
+// resources instead of creating a copy.
+// 2) Since moving can modify the source object, it must either be explicitly
+// requested by the user, or the modifications must be unnoticeable.
+// 3) As such, C++0x moving is only allowed in three contexts:
+// * By explicitly using std::move() to request it.
+// * From a temporary object, since that object cannot be accessed
+// afterwards anyway, thus making the state unobservable.
+// * On function return, since the object is not observable afterwards.
+//
+// To sum up: moving from a named object should only be possible with an
+// explicit std::move(), or on function return. Moving from a temporary should
+// be implicitly done. Moving from a const object is forbidden.
+//
+// The emulation is not perfect, and has the following shortcomings:
+// * move() is not in namespace std.
+// * move() is required on function return.
+// * There are difficulties with implicit conversions.
+// * Microsoft's compiler must be given the /Za switch to successfully compile.
+//
+// -------------------------- Implementation -------------------------------- //
+// The move emulation relies on the peculiar reference binding semantics of
+// C++03: as a rule, a non-const reference may not bind to a temporary object,
+// except for the implicit object parameter in a member function call, which
+// can refer to a temporary even when not being const.
+// The moveable object has five important functions to facilitate moving:
+// * A private, unimplemented constructor taking a non-const reference to its
+// own class. This constructor serves a two-fold purpose.
+// - It prevents the creation of a copy constructor that takes a const
+// reference. Temporaries would be able to bind to the argument of such a
+// constructor, and that would be bad.
+// - Named objects will bind to the non-const reference, but since it's
+// private, this will fail to compile. This prevents implicit moving from
+// named objects.
+// There's also a copy assignment operator for the same purpose.
+// * An implicit, non-const conversion operator to a special mover type. This
+// type represents the rvalue reference of C++0x. Being a non-const member,
+// its implicit this parameter can bind to temporaries.
+// * A constructor that takes an object of this mover type. This constructor
+// performs the actual move operation. There is an equivalent assignment
+// operator.
+// There is also a free move() function that takes a non-const reference to
+// an object and returns a temporary. Internally, this function uses explicit
+// constructor calls to move the value from the referenced object to the return
+// value.
+//
+// There are now three possible scenarios of use.
+// * Copying from a const object. Constructor overload resolution will find the
+// non-const copy constructor, and the move constructor. The first is not
+// viable because the const object cannot be bound to the non-const reference.
+// The second fails because the conversion to the mover object is non-const.
+// Moving from a const object fails as intended.
+// * Copying from a named object. Constructor overload resolution will select
+// the non-const copy constructor, but fail as intended, because this
+// constructor is private.
+// * Copying from a temporary. Constructor overload resolution cannot select
+// the non-const copy constructor, because the temporary cannot be bound to
+// the non-const reference. It thus selects the move constructor. The
+// temporary can be bound to the implicit this parameter of the conversion
+// operator, because of the special binding rule. Construction succeeds.
+// Note that the Microsoft compiler, as an extension, allows binding
+// temporaries against non-const references. The compiler thus selects the
+// non-const copy constructor and fails, because the constructor is private.
+// Passing /Za (disable extensions) disables this behaviour.
+// The free move() function is used to move from a named object.
+//
+// Note that when passing an object of a different type (the classes below
+// have OwningResult and OwningPtr, which should be mixable), you get a problem.
+// Argument passing and function return use copy initialization rules. The
+// effect of this is that, when the source object is not already of the target
+// type, the compiler will first seek a way to convert the source object to the
+// target type, and only then attempt to copy the resulting object. This means
+// that when passing an OwningResult where an OwningPtr is expected, the
+// compiler will first seek a conversion from OwningResult to OwningPtr, then
+// copy the OwningPtr. The resulting conversion sequence is:
+// OwningResult object -> ResultMover -> OwningResult argument to
+// OwningPtr(OwningResult) -> OwningPtr -> PtrMover -> final OwningPtr
+// This conversion sequence is too complex to be allowed. Thus the special
+// move_* functions, which help the compiler out with some explicit
+// conversions.
+
+namespace clang {
+ // Basic
+ class DiagnosticBuilder;
+
+ // Determines whether the low bit of the result pointer for the
+ // given UID is always zero. If so, ActionResult will use that bit
+ // for it's "invalid" flag.
+ template<class Ptr>
+ struct IsResultPtrLowBitFree {
+ static const bool value = false;
+ };
+
+ /// ActionResult - This structure is used while parsing/acting on
+ /// expressions, stmts, etc. It encapsulates both the object returned by
+ /// the action, plus a sense of whether or not it is valid.
+ /// When CompressInvalid is true, the "invalid" flag will be
+ /// stored in the low bit of the Val pointer.
+ template<class PtrTy,
+ bool CompressInvalid = IsResultPtrLowBitFree<PtrTy>::value>
+ class ActionResult {
+ PtrTy Val;
+ bool Invalid;
+
+ public:
+ ActionResult(bool Invalid = false)
+ : Val(PtrTy()), Invalid(Invalid) {}
+ ActionResult(PtrTy val) : Val(val), Invalid(false) {}
+ ActionResult(const DiagnosticBuilder &) : Val(PtrTy()), Invalid(true) {}
+
+ // These two overloads prevent void* -> bool conversions.
+ ActionResult(const void *);
+ ActionResult(volatile void *);
+
+ bool isInvalid() const { return Invalid; }
+ bool isUsable() const { return !Invalid && Val; }
+
+ PtrTy get() const { return Val; }
+ PtrTy release() const { return Val; }
+ PtrTy take() const { return Val; }
+ template <typename T> T *takeAs() { return static_cast<T*>(get()); }
+
+ void set(PtrTy V) { Val = V; }
+
+ const ActionResult &operator=(PtrTy RHS) {
+ Val = RHS;
+ Invalid = false;
+ return *this;
+ }
+ };
+
+ // This ActionResult partial specialization places the "invalid"
+ // flag into the low bit of the pointer.
+ template<typename PtrTy>
+ class ActionResult<PtrTy, true> {
+ // A pointer whose low bit is 1 if this result is invalid, 0
+ // otherwise.
+ uintptr_t PtrWithInvalid;
+ typedef llvm::PointerLikeTypeTraits<PtrTy> PtrTraits;
+ public:
+ ActionResult(bool Invalid = false)
+ : PtrWithInvalid(static_cast<uintptr_t>(Invalid)) { }
+
+ ActionResult(PtrTy V) {
+ void *VP = PtrTraits::getAsVoidPointer(V);
+ PtrWithInvalid = reinterpret_cast<uintptr_t>(VP);
+ assert((PtrWithInvalid & 0x01) == 0 && "Badly aligned pointer");
+ }
+ ActionResult(const DiagnosticBuilder &) : PtrWithInvalid(0x01) { }
+
+ // These two overloads prevent void* -> bool conversions.
+ ActionResult(const void *);
+ ActionResult(volatile void *);
+
+ bool isInvalid() const { return PtrWithInvalid & 0x01; }
+ bool isUsable() const { return PtrWithInvalid > 0x01; }
+
+ PtrTy get() const {
+ void *VP = reinterpret_cast<void *>(PtrWithInvalid & ~0x01);
+ return PtrTraits::getFromVoidPointer(VP);
+ }
+ PtrTy take() const { return get(); }
+ PtrTy release() const { return get(); }
+ template <typename T> T *takeAs() { return static_cast<T*>(get()); }
+
+ void set(PtrTy V) {
+ void *VP = PtrTraits::getAsVoidPointer(V);
+ PtrWithInvalid = reinterpret_cast<uintptr_t>(VP);
+ assert((PtrWithInvalid & 0x01) == 0 && "Badly aligned pointer");
+ }
+
+ const ActionResult &operator=(PtrTy RHS) {
+ void *VP = PtrTraits::getAsVoidPointer(RHS);
+ PtrWithInvalid = reinterpret_cast<uintptr_t>(VP);
+ assert((PtrWithInvalid & 0x01) == 0 && "Badly aligned pointer");
+ return *this;
+ }
+ };
+
+ /// ASTMultiPtr - A moveable smart pointer to multiple AST nodes. Only owns
+ /// the individual pointers, not the array holding them.
+ template <typename PtrTy> class ASTMultiPtr;
+
+ template <class PtrTy>
+ class ASTMultiPtr {
+ PtrTy *Nodes;
+ unsigned Count;
+
+ public:
+ // Normal copying implicitly defined
+ ASTMultiPtr() : Nodes(0), Count(0) {}
+ explicit ASTMultiPtr(Sema &) : Nodes(0), Count(0) {}
+ ASTMultiPtr(Sema &, PtrTy *nodes, unsigned count)
+ : Nodes(nodes), Count(count) {}
+ // Fake mover in Parse/AstGuard.h needs this:
+ ASTMultiPtr(PtrTy *nodes, unsigned count) : Nodes(nodes), Count(count) {}
+
+ /// Access to the raw pointers.
+ PtrTy *get() const { return Nodes; }
+
+ /// Access to the count.
+ unsigned size() const { return Count; }
+
+ PtrTy *release() {
+ return Nodes;
+ }
+ };
+
+ class ParsedTemplateArgument;
+
+ class ASTTemplateArgsPtr {
+ ParsedTemplateArgument *Args;
+ mutable unsigned Count;
+
+ public:
+ ASTTemplateArgsPtr(Sema &actions, ParsedTemplateArgument *args,
+ unsigned count) :
+ Args(args), Count(count) { }
+
+ // FIXME: Lame, not-fully-type-safe emulation of 'move semantics'.
+ ASTTemplateArgsPtr(ASTTemplateArgsPtr &Other) :
+ Args(Other.Args), Count(Other.Count) {
+ }
+
+ // FIXME: Lame, not-fully-type-safe emulation of 'move semantics'.
+ ASTTemplateArgsPtr& operator=(ASTTemplateArgsPtr &Other) {
+ Args = Other.Args;
+ Count = Other.Count;
+ return *this;
+ }
+
+ ParsedTemplateArgument *getArgs() const { return Args; }
+ unsigned size() const { return Count; }
+
+ void reset(ParsedTemplateArgument *args, unsigned count) {
+ Args = args;
+ Count = count;
+ }
+
+ const ParsedTemplateArgument &operator[](unsigned Arg) const;
+
+ ParsedTemplateArgument *release() const {
+ return Args;
+ }
+ };
+
+ /// \brief A small vector that owns a set of AST nodes.
+ template <class PtrTy, unsigned N = 8>
+ class ASTOwningVector : public SmallVector<PtrTy, N> {
+ ASTOwningVector(ASTOwningVector &); // do not implement
+ ASTOwningVector &operator=(ASTOwningVector &); // do not implement
+
+ public:
+ explicit ASTOwningVector(Sema &Actions)
+ { }
+
+ PtrTy *take() {
+ return &this->front();
+ }
+
+ template<typename T> T **takeAs() { return reinterpret_cast<T**>(take()); }
+ };
+
+ /// An opaque type for threading parsed type information through the
+ /// parser.
+ typedef OpaquePtr<QualType> ParsedType;
+ typedef UnionOpaquePtr<QualType> UnionParsedType;
+
+ /// A SmallVector of statements, with stack size 32 (as that is the only one
+ /// used.)
+ typedef ASTOwningVector<Stmt*, 32> StmtVector;
+ /// A SmallVector of expressions, with stack size 12 (the maximum used.)
+ typedef ASTOwningVector<Expr*, 12> ExprVector;
+ /// A SmallVector of types.
+ typedef ASTOwningVector<ParsedType, 12> TypeVector;
+
+ template <class T, unsigned N> inline
+ ASTMultiPtr<T> move_arg(ASTOwningVector<T, N> &vec) {
+ return ASTMultiPtr<T>(vec.take(), vec.size());
+ }
+
+ // These versions are hopefully no-ops.
+ template <class T, bool C>
+ inline ActionResult<T,C> move(ActionResult<T,C> &ptr) {
+ return ptr;
+ }
+
+ template <class T> inline
+ ASTMultiPtr<T>& move(ASTMultiPtr<T> &ptr) {
+ return ptr;
+ }
+
+ // We can re-use the low bit of expression, statement, base, and
+ // member-initializer pointers for the "invalid" flag of
+ // ActionResult.
+ template<> struct IsResultPtrLowBitFree<Expr*> {
+ static const bool value = true;
+ };
+ template<> struct IsResultPtrLowBitFree<Stmt*> {
+ static const bool value = true;
+ };
+ template<> struct IsResultPtrLowBitFree<CXXBaseSpecifier*> {
+ static const bool value = true;
+ };
+ template<> struct IsResultPtrLowBitFree<CXXCtorInitializer*> {
+ static const bool value = true;
+ };
+
+ typedef ActionResult<Expr*> ExprResult;
+ typedef ActionResult<Stmt*> StmtResult;
+ typedef ActionResult<ParsedType> TypeResult;
+ typedef ActionResult<CXXBaseSpecifier*> BaseResult;
+ typedef ActionResult<CXXCtorInitializer*> MemInitResult;
+
+ typedef ActionResult<Decl*> DeclResult;
+ typedef OpaquePtr<TemplateName> ParsedTemplateTy;
+
+ inline Expr *move(Expr *E) { return E; }
+ inline Stmt *move(Stmt *S) { return S; }
+
+ typedef ASTMultiPtr<Expr*> MultiExprArg;
+ typedef ASTMultiPtr<Stmt*> MultiStmtArg;
+ typedef ASTMultiPtr<ParsedType> MultiTypeArg;
+ typedef ASTMultiPtr<TemplateParameterList*> MultiTemplateParamsArg;
+
+ inline ExprResult ExprError() { return ExprResult(true); }
+ inline StmtResult StmtError() { return StmtResult(true); }
+
+ inline ExprResult ExprError(const DiagnosticBuilder&) { return ExprError(); }
+ inline StmtResult StmtError(const DiagnosticBuilder&) { return StmtError(); }
+
+ inline ExprResult ExprEmpty() { return ExprResult(false); }
+ inline StmtResult StmtEmpty() { return StmtResult(false); }
+
+ inline Expr *AssertSuccess(ExprResult R) {
+ assert(!R.isInvalid() && "operation was asserted to never fail!");
+ return R.get();
+ }
+
+ inline Stmt *AssertSuccess(StmtResult R) {
+ assert(!R.isInvalid() && "operation was asserted to never fail!");
+ return R.get();
+ }
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
new file mode 100644
index 0000000..3ff0459
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ParsedTemplate.h
@@ -0,0 +1,215 @@
+//===--- ParsedTemplate.h - Template Parsing Data Types -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides data structures that store the parsed representation of
+// templates.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SEMA_PARSEDTEMPLATE_H
+#define LLVM_CLANG_SEMA_PARSEDTEMPLATE_H
+
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Ownership.h"
+#include <cassert>
+
+namespace clang {
+ /// \brief Represents the parsed form of a C++ template argument.
+ class ParsedTemplateArgument {
+ public:
+ /// \brief Describes the kind of template argument that was parsed.
+ enum KindType {
+ /// \brief A template type parameter, stored as a type.
+ Type,
+ /// \brief A non-type template parameter, stored as an expression.
+ NonType,
+ /// \brief A template template argument, stored as a template name.
+ Template
+ };
+
+ /// \brief Build an empty template argument.
+ ///
+ /// This template argument is invalid.
+ ParsedTemplateArgument() : Kind(Type), Arg(0) { }
+
+ /// \brief Create a template type argument or non-type template argument.
+ ///
+ /// \param Arg the template type argument or non-type template argument.
+ /// \param Loc the location of the type.
+ ParsedTemplateArgument(KindType Kind, void *Arg, SourceLocation Loc)
+ : Kind(Kind), Arg(Arg), Loc(Loc) { }
+
+ /// \brief Create a template template argument.
+ ///
+ /// \param SS the C++ scope specifier that precedes the template name, if
+ /// any.
+ ///
+ /// \param Template the template to which this template template
+ /// argument refers.
+ ///
+ /// \param TemplateLoc the location of the template name.
+ ParsedTemplateArgument(const CXXScopeSpec &SS,
+ ParsedTemplateTy Template,
+ SourceLocation TemplateLoc)
+ : Kind(ParsedTemplateArgument::Template),
+ Arg(Template.getAsOpaquePtr()),
+ Loc(TemplateLoc), SS(SS), EllipsisLoc() { }
+
+ /// \brief Determine whether the given template argument is invalid.
+ bool isInvalid() const { return Arg == 0; }
+
+ /// \brief Determine what kind of template argument we have.
+ KindType getKind() const { return Kind; }
+
+ /// \brief Retrieve the template type argument's type.
+ ParsedType getAsType() const {
+ assert(Kind == Type && "Not a template type argument");
+ return ParsedType::getFromOpaquePtr(Arg);
+ }
+
+ /// \brief Retrieve the non-type template argument's expression.
+ Expr *getAsExpr() const {
+ assert(Kind == NonType && "Not a non-type template argument");
+ return static_cast<Expr*>(Arg);
+ }
+
+ /// \brief Retrieve the template template argument's template name.
+ ParsedTemplateTy getAsTemplate() const {
+ assert(Kind == Template && "Not a template template argument");
+ return ParsedTemplateTy::getFromOpaquePtr(Arg);
+ }
+
+ /// \brief Retrieve the location of the template argument.
+ SourceLocation getLocation() const { return Loc; }
+
+ /// \brief Retrieve the nested-name-specifier that precedes the template
+ /// name in a template template argument.
+ const CXXScopeSpec &getScopeSpec() const {
+ assert(Kind == Template &&
+ "Only template template arguments can have a scope specifier");
+ return SS;
+ }
+
+ /// \brief Retrieve the location of the ellipsis that makes a template
+ /// template argument into a pack expansion.
+ SourceLocation getEllipsisLoc() const {
+ assert(Kind == Template &&
+ "Only template template arguments can have an ellipsis");
+ return EllipsisLoc;
+ }
+
+ /// \brief Retrieve a pack expansion of the given template template
+ /// argument.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ ParsedTemplateArgument getTemplatePackExpansion(
+ SourceLocation EllipsisLoc) const;
+
+ private:
+ KindType Kind;
+
+ /// \brief The actual template argument representation, which may be
+ /// an \c ActionBase::TypeTy* (for a type), an Expr* (for an
+ /// expression), or an ActionBase::TemplateTy (for a template).
+ void *Arg;
+
+ /// \brief the location of the template argument.
+ SourceLocation Loc;
+
+ /// \brief The nested-name-specifier that can accompany a template template
+ /// argument.
+ CXXScopeSpec SS;
+
+ /// \brief The ellipsis location that can accompany a template template
+ /// argument (turning it into a template template argument expansion).
+ SourceLocation EllipsisLoc;
+ };
+
+ /// \brief Information about a template-id annotation
+ /// token.
+ ///
+ /// A template-id annotation token contains the template declaration,
+ /// template arguments, whether those template arguments were types,
+ /// expressions, or template names, and the source locations for important
+ /// tokens. All of the information about template arguments is allocated
+ /// directly after this structure.
+ struct TemplateIdAnnotation {
+ /// \brief The nested-name-specifier that precedes the template name.
+ CXXScopeSpec SS;
+
+ /// TemplateKWLoc - The location of the template keyword within the
+ /// source.
+ SourceLocation TemplateKWLoc;
+
+ /// TemplateNameLoc - The location of the template name within the
+ /// source.
+ SourceLocation TemplateNameLoc;
+
+ /// FIXME: Temporarily stores the name of a specialization
+ IdentifierInfo *Name;
+
+ /// FIXME: Temporarily stores the overloaded operator kind.
+ OverloadedOperatorKind Operator;
+
+ /// The declaration of the template corresponding to the
+ /// template-name.
+ ParsedTemplateTy Template;
+
+ /// The kind of template that Template refers to.
+ TemplateNameKind Kind;
+
+ /// The location of the '<' before the template argument
+ /// list.
+ SourceLocation LAngleLoc;
+
+ /// The location of the '>' after the template argument
+ /// list.
+ SourceLocation RAngleLoc;
+
+ /// NumArgs - The number of template arguments.
+ unsigned NumArgs;
+
+ /// \brief Retrieves a pointer to the template arguments
+ ParsedTemplateArgument *getTemplateArgs() {
+ return reinterpret_cast<ParsedTemplateArgument *>(this + 1);
+ }
+
+ static TemplateIdAnnotation* Allocate(unsigned NumArgs) {
+ TemplateIdAnnotation *TemplateId
+ = (TemplateIdAnnotation *)std::malloc(sizeof(TemplateIdAnnotation) +
+ sizeof(ParsedTemplateArgument) * NumArgs);
+ TemplateId->NumArgs = NumArgs;
+
+ // Default-construct nested-name-specifier.
+ new (&TemplateId->SS) CXXScopeSpec();
+
+ // Default-construct parsed template arguments.
+ ParsedTemplateArgument *TemplateArgs = TemplateId->getTemplateArgs();
+ for (unsigned I = 0; I != NumArgs; ++I)
+ new (TemplateArgs + I) ParsedTemplateArgument();
+
+ return TemplateId;
+ }
+
+ void Destroy() {
+ SS.~CXXScopeSpec();
+ free(this);
+ }
+ };
+
+ /// Retrieves the range of the given template parameter lists.
+ SourceRange getTemplateParamsRange(TemplateParameterList const *const *Params,
+ unsigned NumParams);
+
+ inline const ParsedTemplateArgument &
+ ASTTemplateArgsPtr::operator[](unsigned Arg) const {
+ return Args[Arg];
+ }
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h b/contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h
new file mode 100644
index 0000000..aa55705
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/PrettyDeclStackTrace.h
@@ -0,0 +1,47 @@
+//===- PrettyDeclStackTrace.h - Stack trace for decl processing -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines an llvm::PrettyStackTraceEntry object for showing
+// that a particular declaration was being processed when a crash
+// occurred.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_PRETTY_DECL_STACK_TRACE_H
+#define LLVM_CLANG_SEMA_PRETTY_DECL_STACK_TRACE_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/Support/PrettyStackTrace.h"
+
+namespace clang {
+
+class Decl;
+class Sema;
+class SourceManager;
+
+/// PrettyDeclStackTraceEntry - If a crash occurs in the parser while
+/// parsing something related to a declaration, include that
+/// declaration in the stack trace.
+class PrettyDeclStackTraceEntry : public llvm::PrettyStackTraceEntry {
+ Sema &S;
+ Decl *TheDecl;
+ SourceLocation Loc;
+ const char *Message;
+
+public:
+ PrettyDeclStackTraceEntry(Sema &S, Decl *D, SourceLocation Loc,
+ const char *Msg)
+ : S(S), TheDecl(D), Loc(Loc), Message(Msg) {}
+
+ virtual void print(raw_ostream &OS) const;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Scope.h b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
new file mode 100644
index 0000000..e9aa173
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Scope.h
@@ -0,0 +1,334 @@
+//===--- Scope.h - Scope interface ------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Scope interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SCOPE_H
+#define LLVM_CLANG_SEMA_SCOPE_H
+
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+class Decl;
+class UsingDirectiveDecl;
+
+/// Scope - A scope is a transient data structure that is used while parsing the
+/// program. It assists with resolving identifiers to the appropriate
+/// declaration.
+///
+class Scope {
+public:
+ /// ScopeFlags - These are bitfields that are or'd together when creating a
+ /// scope, which defines the sorts of things the scope contains.
+ enum ScopeFlags {
+ /// FnScope - This indicates that the scope corresponds to a function, which
+ /// means that labels are set here.
+ FnScope = 0x01,
+
+ /// BreakScope - This is a while,do,switch,for, etc that can have break
+ /// stmts embedded into it.
+ BreakScope = 0x02,
+
+ /// ContinueScope - This is a while,do,for, which can have continue
+ /// stmt embedded into it.
+ ContinueScope = 0x04,
+
+ /// DeclScope - This is a scope that can contain a declaration. Some scopes
+ /// just contain loop constructs but don't contain decls.
+ DeclScope = 0x08,
+
+ /// ControlScope - The controlling scope in a if/switch/while/for statement.
+ ControlScope = 0x10,
+
+ /// ClassScope - The scope of a struct/union/class definition.
+ ClassScope = 0x20,
+
+ /// BlockScope - This is a scope that corresponds to a block/closure object.
+ /// Blocks serve as top-level scopes for some objects like labels, they
+ /// also prevent things like break and continue. BlockScopes always have
+ /// the FnScope and DeclScope flags set as well.
+ BlockScope = 0x40,
+
+ /// TemplateParamScope - This is a scope that corresponds to the
+ /// template parameters of a C++ template. Template parameter
+ /// scope starts at the 'template' keyword and ends when the
+ /// template declaration ends.
+ TemplateParamScope = 0x80,
+
+ /// FunctionPrototypeScope - This is a scope that corresponds to the
+ /// parameters within a function prototype.
+ FunctionPrototypeScope = 0x100,
+
+ /// AtCatchScope - This is a scope that corresponds to the Objective-C
+ /// @catch statement.
+ AtCatchScope = 0x200,
+
+ /// ObjCMethodScope - This scope corresponds to an Objective-C method body.
+ /// It always has FnScope and DeclScope set as well.
+ ObjCMethodScope = 0x400,
+
+ /// SwitchScope - This is a scope that corresponds to a switch statement.
+ SwitchScope = 0x800,
+
+ /// ThisScope - This is the scope of a struct/union/class definition,
+ /// outside of any member function definition, where 'this' is nonetheless
+ /// usable.
+ ThisScope = 0x1000,
+
+ /// TryScope - This is the scope of a C++ try statement.
+ TryScope = 0x2000
+ };
+private:
+ /// The parent scope for this scope. This is null for the translation-unit
+ /// scope.
+ Scope *AnyParent;
+
+ /// Depth - This is the depth of this scope. The translation-unit scope has
+ /// depth 0.
+ unsigned short Depth;
+
+ /// Flags - This contains a set of ScopeFlags, which indicates how the scope
+ /// interrelates with other control flow statements.
+ unsigned short Flags;
+
+ /// PrototypeDepth - This is the number of function prototype scopes
+ /// enclosing this scope, including this scope.
+ unsigned short PrototypeDepth;
+
+ /// PrototypeIndex - This is the number of parameters currently
+ /// declared in this scope.
+ unsigned short PrototypeIndex;
+
+ /// FnParent - If this scope has a parent scope that is a function body, this
+ /// pointer is non-null and points to it. This is used for label processing.
+ Scope *FnParent;
+
+ /// BreakParent/ContinueParent - This is a direct link to the innermost
+ /// BreakScope/ContinueScope which contains the contents of this scope
+ /// for control flow purposes (and might be this scope itself), or null
+ /// if there is no such scope.
+ Scope *BreakParent, *ContinueParent;
+
+ /// BlockParent - This is a direct link to the immediately containing
+ /// BlockScope if this scope is not one, or null if there is none.
+ Scope *BlockParent;
+
+ /// TemplateParamParent - This is a direct link to the
+ /// immediately containing template parameter scope. In the
+ /// case of nested templates, template parameter scopes can have
+ /// other template parameter scopes as parents.
+ Scope *TemplateParamParent;
+
+ /// DeclsInScope - This keeps track of all declarations in this scope. When
+ /// the declaration is added to the scope, it is set as the current
+ /// declaration for the identifier in the IdentifierTable. When the scope is
+ /// popped, these declarations are removed from the IdentifierTable's notion
+ /// of current declaration. It is up to the current Action implementation to
+ /// implement these semantics.
+ typedef llvm::SmallPtrSet<Decl *, 32> DeclSetTy;
+ DeclSetTy DeclsInScope;
+
+ /// Entity - The entity with which this scope is associated. For
+ /// example, the entity of a class scope is the class itself, the
+ /// entity of a function scope is a function, etc. This field is
+ /// maintained by the Action implementation.
+ void *Entity;
+
+ typedef SmallVector<UsingDirectiveDecl *, 2> UsingDirectivesTy;
+ UsingDirectivesTy UsingDirectives;
+
+ /// \brief Used to determine if errors occurred in this scope.
+ DiagnosticErrorTrap ErrorTrap;
+
+public:
+ Scope(Scope *Parent, unsigned ScopeFlags, DiagnosticsEngine &Diag)
+ : ErrorTrap(Diag) {
+ Init(Parent, ScopeFlags);
+ }
+
+ /// getFlags - Return the flags for this scope.
+ ///
+ unsigned getFlags() const { return Flags; }
+ void setFlags(unsigned F) { Flags = F; }
+
+ /// isBlockScope - Return true if this scope correspond to a closure.
+ bool isBlockScope() const { return Flags & BlockScope; }
+
+ /// getParent - Return the scope that this is nested in.
+ ///
+ const Scope *getParent() const { return AnyParent; }
+ Scope *getParent() { return AnyParent; }
+
+ /// getFnParent - Return the closest scope that is a function body.
+ ///
+ const Scope *getFnParent() const { return FnParent; }
+ Scope *getFnParent() { return FnParent; }
+
+ /// getContinueParent - Return the closest scope that a continue statement
+ /// would be affected by.
+ Scope *getContinueParent() {
+ return ContinueParent;
+ }
+
+ const Scope *getContinueParent() const {
+ return const_cast<Scope*>(this)->getContinueParent();
+ }
+
+ /// getBreakParent - Return the closest scope that a break statement
+ /// would be affected by.
+ Scope *getBreakParent() {
+ return BreakParent;
+ }
+ const Scope *getBreakParent() const {
+ return const_cast<Scope*>(this)->getBreakParent();
+ }
+
+ Scope *getBlockParent() { return BlockParent; }
+ const Scope *getBlockParent() const { return BlockParent; }
+
+ Scope *getTemplateParamParent() { return TemplateParamParent; }
+ const Scope *getTemplateParamParent() const { return TemplateParamParent; }
+
+ /// Returns the number of function prototype scopes in this scope
+ /// chain.
+ unsigned getFunctionPrototypeDepth() const {
+ return PrototypeDepth;
+ }
+
+ /// Return the number of parameters declared in this function
+ /// prototype, increasing it by one for the next call.
+ unsigned getNextFunctionPrototypeIndex() {
+ assert(isFunctionPrototypeScope());
+ return PrototypeIndex++;
+ }
+
+ typedef DeclSetTy::iterator decl_iterator;
+ decl_iterator decl_begin() const { return DeclsInScope.begin(); }
+ decl_iterator decl_end() const { return DeclsInScope.end(); }
+ bool decl_empty() const { return DeclsInScope.empty(); }
+
+ void AddDecl(Decl *D) {
+ DeclsInScope.insert(D);
+ }
+
+ void RemoveDecl(Decl *D) {
+ DeclsInScope.erase(D);
+ }
+
+ /// isDeclScope - Return true if this is the scope that the specified decl is
+ /// declared in.
+ bool isDeclScope(Decl *D) {
+ return DeclsInScope.count(D) != 0;
+ }
+
+ void* getEntity() const { return Entity; }
+ void setEntity(void *E) { Entity = E; }
+
+ bool hasErrorOccurred() const { return ErrorTrap.hasErrorOccurred(); }
+
+ /// isClassScope - Return true if this scope is a class/struct/union scope.
+ bool isClassScope() const {
+ return (getFlags() & Scope::ClassScope);
+ }
+
+ /// isInCXXInlineMethodScope - Return true if this scope is a C++ inline
+ /// method scope or is inside one.
+ bool isInCXXInlineMethodScope() const {
+ if (const Scope *FnS = getFnParent()) {
+ assert(FnS->getParent() && "TUScope not created?");
+ return FnS->getParent()->isClassScope();
+ }
+ return false;
+ }
+
+ /// isInObjcMethodScope - Return true if this scope is, or is contained in, an
+ /// Objective-C method body. Note that this method is not constant time.
+ bool isInObjcMethodScope() const {
+ for (const Scope *S = this; S; S = S->getParent()) {
+ // If this scope is an objc method scope, then we succeed.
+ if (S->getFlags() & ObjCMethodScope)
+ return true;
+ }
+ return false;
+ }
+
+ /// isTemplateParamScope - Return true if this scope is a C++
+ /// template parameter scope.
+ bool isTemplateParamScope() const {
+ return getFlags() & Scope::TemplateParamScope;
+ }
+
+ /// isFunctionPrototypeScope - Return true if this scope is a
+ /// function prototype scope.
+ bool isFunctionPrototypeScope() const {
+ return getFlags() & Scope::FunctionPrototypeScope;
+ }
+
+ /// isAtCatchScope - Return true if this scope is @catch.
+ bool isAtCatchScope() const {
+ return getFlags() & Scope::AtCatchScope;
+ }
+
+ /// isSwitchScope - Return true if this scope is a switch scope.
+ bool isSwitchScope() const {
+ for (const Scope *S = this; S; S = S->getParent()) {
+ if (S->getFlags() & Scope::SwitchScope)
+ return true;
+ else if (S->getFlags() & (Scope::FnScope | Scope::ClassScope |
+ Scope::BlockScope | Scope::TemplateParamScope |
+ Scope::FunctionPrototypeScope |
+ Scope::AtCatchScope | Scope::ObjCMethodScope))
+ return false;
+ }
+ return false;
+ }
+
+ /// \brief Determine whether this scope is a C++ 'try' block.
+ bool isTryScope() const { return getFlags() & Scope::TryScope; }
+
+ /// containedInPrototypeScope - Return true if this or a parent scope
+ /// is a FunctionPrototypeScope.
+ bool containedInPrototypeScope() const;
+
+ typedef UsingDirectivesTy::iterator udir_iterator;
+ typedef UsingDirectivesTy::const_iterator const_udir_iterator;
+
+ void PushUsingDirective(UsingDirectiveDecl *UDir) {
+ UsingDirectives.push_back(UDir);
+ }
+
+ udir_iterator using_directives_begin() {
+ return UsingDirectives.begin();
+ }
+
+ udir_iterator using_directives_end() {
+ return UsingDirectives.end();
+ }
+
+ const_udir_iterator using_directives_begin() const {
+ return UsingDirectives.begin();
+ }
+
+ const_udir_iterator using_directives_end() const {
+ return UsingDirectives.end();
+ }
+
+ /// Init - This is used by the parser to implement scope caching.
+ ///
+ void Init(Scope *parent, unsigned flags);
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
new file mode 100644
index 0000000..ceaf586
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/ScopeInfo.h
@@ -0,0 +1,380 @@
+//===--- ScopeInfo.h - Information about a semantic context -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines FunctionScopeInfo and BlockScopeInfo.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SCOPE_INFO_H
+#define LLVM_CLANG_SEMA_SCOPE_INFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+class BlockDecl;
+class CXXMethodDecl;
+class IdentifierInfo;
+class LabelDecl;
+class ReturnStmt;
+class Scope;
+class SwitchStmt;
+class VarDecl;
+
+namespace sema {
+
+/// \brief Contains information about the compound statement currently being
+/// parsed.
+class CompoundScopeInfo {
+public:
+ CompoundScopeInfo()
+ : HasEmptyLoopBodies(false) { }
+
+ /// \brief Whether this compound stamement contains `for' or `while' loops
+ /// with empty bodies.
+ bool HasEmptyLoopBodies;
+
+ void setHasEmptyLoopBodies() {
+ HasEmptyLoopBodies = true;
+ }
+};
+
+class PossiblyUnreachableDiag {
+public:
+ PartialDiagnostic PD;
+ SourceLocation Loc;
+ const Stmt *stmt;
+
+ PossiblyUnreachableDiag(const PartialDiagnostic &PD, SourceLocation Loc,
+ const Stmt *stmt)
+ : PD(PD), Loc(Loc), stmt(stmt) {}
+};
+
+/// \brief Retains information about a function, method, or block that is
+/// currently being parsed.
+class FunctionScopeInfo {
+protected:
+ enum ScopeKind {
+ SK_Function,
+ SK_Block,
+ SK_Lambda
+ };
+
+public:
+ /// \brief What kind of scope we are describing.
+ ///
+ ScopeKind Kind;
+
+ /// \brief Whether this function contains a VLA, @try, try, C++
+ /// initializer, or anything else that can't be jumped past.
+ bool HasBranchProtectedScope;
+
+ /// \brief Whether this function contains any switches or direct gotos.
+ bool HasBranchIntoScope;
+
+ /// \brief Whether this function contains any indirect gotos.
+ bool HasIndirectGoto;
+
+ /// \brief Used to determine if errors occurred in this function or block.
+ DiagnosticErrorTrap ErrorTrap;
+
+ /// SwitchStack - This is the current set of active switch statements in the
+ /// block.
+ SmallVector<SwitchStmt*, 8> SwitchStack;
+
+ /// \brief The list of return statements that occur within the function or
+ /// block, if there is any chance of applying the named return value
+ /// optimization.
+ SmallVector<ReturnStmt*, 4> Returns;
+
+ /// \brief The stack of currently active compound stamement scopes in the
+ /// function.
+ SmallVector<CompoundScopeInfo, 4> CompoundScopes;
+
+ /// \brief A list of PartialDiagnostics created but delayed within the
+ /// current function scope. These diagnostics are vetted for reachability
+ /// prior to being emitted.
+ SmallVector<PossiblyUnreachableDiag, 4> PossiblyUnreachableDiags;
+
+ void setHasBranchIntoScope() {
+ HasBranchIntoScope = true;
+ }
+
+ void setHasBranchProtectedScope() {
+ HasBranchProtectedScope = true;
+ }
+
+ void setHasIndirectGoto() {
+ HasIndirectGoto = true;
+ }
+
+ bool NeedsScopeChecking() const {
+ return HasIndirectGoto ||
+ (HasBranchProtectedScope && HasBranchIntoScope);
+ }
+
+ FunctionScopeInfo(DiagnosticsEngine &Diag)
+ : Kind(SK_Function),
+ HasBranchProtectedScope(false),
+ HasBranchIntoScope(false),
+ HasIndirectGoto(false),
+ ErrorTrap(Diag) { }
+
+ virtual ~FunctionScopeInfo();
+
+ /// \brief Clear out the information in this function scope, making it
+ /// suitable for reuse.
+ void Clear();
+
+ static bool classof(const FunctionScopeInfo *FSI) { return true; }
+};
+
+class CapturingScopeInfo : public FunctionScopeInfo {
+public:
+ enum ImplicitCaptureStyle {
+ ImpCap_None, ImpCap_LambdaByval, ImpCap_LambdaByref, ImpCap_Block
+ };
+
+ ImplicitCaptureStyle ImpCaptureStyle;
+
+ class Capture {
+ // There are two categories of capture: capturing 'this', and capturing
+ // local variables. There are three ways to capture a local variable:
+ // capture by copy in the C++11 sense, capture by reference
+ // in the C++11 sense, and __block capture. Lambdas explicitly specify
+ // capture by copy or capture by reference. For blocks, __block capture
+ // applies to variables with that annotation, variables of reference type
+ // are captured by reference, and other variables are captured by copy.
+ enum CaptureKind {
+ Cap_This, Cap_ByCopy, Cap_ByRef, Cap_Block
+ };
+
+ // The variable being captured (if we are not capturing 'this'),
+ // and misc bits descibing the capture.
+ llvm::PointerIntPair<VarDecl*, 2, CaptureKind> VarAndKind;
+
+ // Expression to initialize a field of the given type, and whether this
+ // is a nested capture; the expression is only required if we are
+ // capturing ByVal and the variable's type has a non-trivial
+ // copy constructor.
+ llvm::PointerIntPair<Expr*, 1, bool> CopyExprAndNested;
+
+ /// \brief The source location at which the first capture occurred..
+ SourceLocation Loc;
+
+ /// \brief The location of the ellipsis that expands a parameter pack.
+ SourceLocation EllipsisLoc;
+
+ /// \brief The type as it was captured, which is in effect the type of the
+ /// non-static data member that would hold the capture.
+ QualType CaptureType;
+
+ public:
+ Capture(VarDecl *Var, bool block, bool byRef, bool isNested,
+ SourceLocation Loc, SourceLocation EllipsisLoc,
+ QualType CaptureType, Expr *Cpy)
+ : VarAndKind(Var, block ? Cap_Block : byRef ? Cap_ByRef : Cap_ByCopy),
+ CopyExprAndNested(Cpy, isNested), Loc(Loc), EllipsisLoc(EllipsisLoc),
+ CaptureType(CaptureType){}
+
+ enum IsThisCapture { ThisCapture };
+ Capture(IsThisCapture, bool isNested, SourceLocation Loc,
+ QualType CaptureType, Expr *Cpy)
+ : VarAndKind(0, Cap_This), CopyExprAndNested(Cpy, isNested), Loc(Loc),
+ EllipsisLoc(), CaptureType(CaptureType) { }
+
+ bool isThisCapture() const { return VarAndKind.getInt() == Cap_This; }
+ bool isVariableCapture() const { return !isThisCapture(); }
+ bool isCopyCapture() const { return VarAndKind.getInt() == Cap_ByCopy; }
+ bool isReferenceCapture() const { return VarAndKind.getInt() == Cap_ByRef; }
+ bool isBlockCapture() const { return VarAndKind.getInt() == Cap_Block; }
+ bool isNested() { return CopyExprAndNested.getInt(); }
+
+ VarDecl *getVariable() const {
+ return VarAndKind.getPointer();
+ }
+
+ /// \brief Retrieve the location at which this variable was captured.
+ SourceLocation getLocation() const { return Loc; }
+
+ /// \brief Retrieve the source location of the ellipsis, whose presence
+ /// indicates that the capture is a pack expansion.
+ SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
+
+ /// \brief Retrieve the capture type for this capture, which is effectively
+ /// the type of the non-static data member in the lambda/block structure
+ /// that would store this capture.
+ QualType getCaptureType() const { return CaptureType; }
+
+ Expr *getCopyExpr() const {
+ return CopyExprAndNested.getPointer();
+ }
+ };
+
+ CapturingScopeInfo(DiagnosticsEngine &Diag, ImplicitCaptureStyle Style)
+ : FunctionScopeInfo(Diag), ImpCaptureStyle(Style), CXXThisCaptureIndex(0),
+ HasImplicitReturnType(false)
+ {}
+
+ /// CaptureMap - A map of captured variables to (index+1) into Captures.
+ llvm::DenseMap<VarDecl*, unsigned> CaptureMap;
+
+ /// CXXThisCaptureIndex - The (index+1) of the capture of 'this';
+ /// zero if 'this' is not captured.
+ unsigned CXXThisCaptureIndex;
+
+ /// Captures - The captures.
+ SmallVector<Capture, 4> Captures;
+
+ /// \brief - Whether the target type of return statements in this context
+ /// is deduced (e.g. a lambda or block with omitted return type).
+ bool HasImplicitReturnType;
+
+ /// ReturnType - The target type of return statements in this context,
+ /// or null if unknown.
+ QualType ReturnType;
+
+ void addCapture(VarDecl *Var, bool isBlock, bool isByref, bool isNested,
+ SourceLocation Loc, SourceLocation EllipsisLoc,
+ QualType CaptureType, Expr *Cpy) {
+ Captures.push_back(Capture(Var, isBlock, isByref, isNested, Loc,
+ EllipsisLoc, CaptureType, Cpy));
+ CaptureMap[Var] = Captures.size();
+ }
+
+ void addThisCapture(bool isNested, SourceLocation Loc, QualType CaptureType,
+ Expr *Cpy) {
+ Captures.push_back(Capture(Capture::ThisCapture, isNested, Loc, CaptureType,
+ Cpy));
+ CXXThisCaptureIndex = Captures.size();
+ }
+
+ /// \brief Determine whether the C++ 'this' is captured.
+ bool isCXXThisCaptured() const { return CXXThisCaptureIndex != 0; }
+
+ /// \brief Retrieve the capture of C++ 'this', if it has been captured.
+ Capture &getCXXThisCapture() {
+ assert(isCXXThisCaptured() && "this has not been captured");
+ return Captures[CXXThisCaptureIndex - 1];
+ }
+
+ /// \brief Determine whether the given variable has been captured.
+ bool isCaptured(VarDecl *Var) const {
+ return CaptureMap.count(Var);
+ }
+
+ /// \brief Retrieve the capture of the given variable, if it has been
+ /// captured already.
+ Capture &getCapture(VarDecl *Var) {
+ assert(isCaptured(Var) && "Variable has not been captured");
+ return Captures[CaptureMap[Var] - 1];
+ }
+
+ const Capture &getCapture(VarDecl *Var) const {
+ llvm::DenseMap<VarDecl*, unsigned>::const_iterator Known
+ = CaptureMap.find(Var);
+ assert(Known != CaptureMap.end() && "Variable has not been captured");
+ return Captures[Known->second - 1];
+ }
+
+ static bool classof(const FunctionScopeInfo *FSI) {
+ return FSI->Kind == SK_Block || FSI->Kind == SK_Lambda;
+ }
+ static bool classof(const CapturingScopeInfo *BSI) { return true; }
+};
+
+/// \brief Retains information about a block that is currently being parsed.
+class BlockScopeInfo : public CapturingScopeInfo {
+public:
+ BlockDecl *TheDecl;
+
+ /// TheScope - This is the scope for the block itself, which contains
+ /// arguments etc.
+ Scope *TheScope;
+
+ /// BlockType - The function type of the block, if one was given.
+ /// Its return type may be BuiltinType::Dependent.
+ QualType FunctionType;
+
+ BlockScopeInfo(DiagnosticsEngine &Diag, Scope *BlockScope, BlockDecl *Block)
+ : CapturingScopeInfo(Diag, ImpCap_Block), TheDecl(Block),
+ TheScope(BlockScope)
+ {
+ Kind = SK_Block;
+ }
+
+ virtual ~BlockScopeInfo();
+
+ static bool classof(const FunctionScopeInfo *FSI) {
+ return FSI->Kind == SK_Block;
+ }
+ static bool classof(const BlockScopeInfo *BSI) { return true; }
+};
+
+class LambdaScopeInfo : public CapturingScopeInfo {
+public:
+ /// \brief The class that describes the lambda.
+ CXXRecordDecl *Lambda;
+
+ /// \brief The class that describes the lambda.
+ CXXMethodDecl *CallOperator;
+
+ /// \brief Source range covering the lambda introducer [...].
+ SourceRange IntroducerRange;
+
+ /// \brief The number of captures in the \c Captures list that are
+ /// explicit captures.
+ unsigned NumExplicitCaptures;
+
+ /// \brief Whether this is a mutable lambda.
+ bool Mutable;
+
+ /// \brief Whether the (empty) parameter list is explicit.
+ bool ExplicitParams;
+
+ /// \brief Whether any of the capture expressions requires cleanups.
+ bool ExprNeedsCleanups;
+
+ /// \brief Variables used to index into by-copy array captures.
+ llvm::SmallVector<VarDecl *, 4> ArrayIndexVars;
+
+ /// \brief Offsets into the ArrayIndexVars array at which each capture starts
+ /// its list of array index variables.
+ llvm::SmallVector<unsigned, 4> ArrayIndexStarts;
+
+ LambdaScopeInfo(DiagnosticsEngine &Diag, CXXRecordDecl *Lambda,
+ CXXMethodDecl *CallOperator)
+ : CapturingScopeInfo(Diag, ImpCap_None), Lambda(Lambda),
+ CallOperator(CallOperator), NumExplicitCaptures(0), Mutable(false),
+ ExprNeedsCleanups(false)
+ {
+ Kind = SK_Lambda;
+ }
+
+ virtual ~LambdaScopeInfo();
+
+ /// \brief Note when
+ void finishedExplicitCaptures() {
+ NumExplicitCaptures = Captures.size();
+ }
+
+ static bool classof(const FunctionScopeInfo *FSI) {
+ return FSI->Kind == SK_Lambda;
+ }
+ static bool classof(const LambdaScopeInfo *BSI) { return true; }
+
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Sema.h b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
new file mode 100644
index 0000000..31c410a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Sema.h
@@ -0,0 +1,6798 @@
+//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Sema class, which performs semantic analysis and
+// builds ASTs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMA_H
+#define LLVM_CLANG_SEMA_SEMA_H
+
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/AnalysisBasedWarnings.h"
+#include "clang/Sema/IdentifierResolver.h"
+#include "clang/Sema/ObjCMethodList.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/LocInfoType.h"
+#include "clang/Sema/TypoCorrection.h"
+#include "clang/Sema/Weak.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/NSAPI.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TemplateKinds.h"
+#include "clang/Basic/TypeTraits.h"
+#include "clang/Basic/ExpressionTraits.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include <deque>
+#include <string>
+
+namespace llvm {
+ class APSInt;
+ template <typename ValueT> struct DenseMapInfo;
+ template <typename ValueT, typename ValueInfoT> class DenseSet;
+ class SmallBitVector;
+}
+
+namespace clang {
+ class ADLResult;
+ class ASTConsumer;
+ class ASTContext;
+ class ASTMutationListener;
+ class ASTReader;
+ class ASTWriter;
+ class ArrayType;
+ class AttributeList;
+ class BlockDecl;
+ class CXXBasePath;
+ class CXXBasePaths;
+ class CXXBindTemporaryExpr;
+ typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
+ class CXXConstructorDecl;
+ class CXXConversionDecl;
+ class CXXDestructorDecl;
+ class CXXFieldCollector;
+ class CXXMemberCallExpr;
+ class CXXMethodDecl;
+ class CXXScopeSpec;
+ class CXXTemporary;
+ class CXXTryStmt;
+ class CallExpr;
+ class ClassTemplateDecl;
+ class ClassTemplatePartialSpecializationDecl;
+ class ClassTemplateSpecializationDecl;
+ class CodeCompleteConsumer;
+ class CodeCompletionAllocator;
+ class CodeCompletionTUInfo;
+ class CodeCompletionResult;
+ class Decl;
+ class DeclAccessPair;
+ class DeclContext;
+ class DeclRefExpr;
+ class DeclaratorDecl;
+ class DeducedTemplateArgument;
+ class DependentDiagnostic;
+ class DesignatedInitExpr;
+ class Designation;
+ class EnumConstantDecl;
+ class Expr;
+ class ExtVectorType;
+ class ExternalSemaSource;
+ class FormatAttr;
+ class FriendDecl;
+ class FunctionDecl;
+ class FunctionProtoType;
+ class FunctionTemplateDecl;
+ class ImplicitConversionSequence;
+ class InitListExpr;
+ class InitializationKind;
+ class InitializationSequence;
+ class InitializedEntity;
+ class IntegerLiteral;
+ class LabelStmt;
+ class LambdaExpr;
+ class LangOptions;
+ class LocalInstantiationScope;
+ class LookupResult;
+ class MacroInfo;
+ class MultiLevelTemplateArgumentList;
+ class NamedDecl;
+ class NonNullAttr;
+ class ObjCCategoryDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCCompatibleAliasDecl;
+ class ObjCContainerDecl;
+ class ObjCImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ template <class T> class ObjCList;
+ class ObjCMessageExpr;
+ class ObjCMethodDecl;
+ class ObjCPropertyDecl;
+ class ObjCProtocolDecl;
+ class OverloadCandidateSet;
+ class OverloadExpr;
+ class ParenListExpr;
+ class ParmVarDecl;
+ class Preprocessor;
+ class PseudoDestructorTypeStorage;
+ class PseudoObjectExpr;
+ class QualType;
+ class StandardConversionSequence;
+ class Stmt;
+ class StringLiteral;
+ class SwitchStmt;
+ class TargetAttributesSema;
+ class TemplateArgument;
+ class TemplateArgumentList;
+ class TemplateArgumentLoc;
+ class TemplateDecl;
+ class TemplateParameterList;
+ class TemplatePartialOrderingContext;
+ class TemplateTemplateParmDecl;
+ class Token;
+ class TypeAliasDecl;
+ class TypedefDecl;
+ class TypedefNameDecl;
+ class TypeLoc;
+ class UnqualifiedId;
+ class UnresolvedLookupExpr;
+ class UnresolvedMemberExpr;
+ class UnresolvedSetImpl;
+ class UnresolvedSetIterator;
+ class UsingDecl;
+ class UsingShadowDecl;
+ class ValueDecl;
+ class VarDecl;
+ class VisibilityAttr;
+ class VisibleDeclConsumer;
+ class IndirectFieldDecl;
+
+namespace sema {
+ class AccessedEntity;
+ class BlockScopeInfo;
+ class CompoundScopeInfo;
+ class DelayedDiagnostic;
+ class FunctionScopeInfo;
+ class LambdaScopeInfo;
+ class PossiblyUnreachableDiag;
+ class TemplateDeductionInfo;
+}
+
+// FIXME: No way to easily map from TemplateTypeParmTypes to
+// TemplateTypeParmDecls, so we have this horrible PointerUnion.
+typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
+ SourceLocation> UnexpandedParameterPack;
+
+/// Sema - This implements semantic analysis and AST building for C.
+class Sema {
+ Sema(const Sema&); // DO NOT IMPLEMENT
+ void operator=(const Sema&); // DO NOT IMPLEMENT
+ mutable const TargetAttributesSema* TheTargetAttributesSema;
+public:
+ typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
+ typedef OpaquePtr<TemplateName> TemplateTy;
+ typedef OpaquePtr<QualType> TypeTy;
+
+ OpenCLOptions OpenCLFeatures;
+ FPOptions FPFeatures;
+
+ const LangOptions &LangOpts;
+ Preprocessor &PP;
+ ASTContext &Context;
+ ASTConsumer &Consumer;
+ DiagnosticsEngine &Diags;
+ SourceManager &SourceMgr;
+
+ /// \brief Flag indicating whether or not to collect detailed statistics.
+ bool CollectStats;
+
+ /// \brief Source of additional semantic information.
+ ExternalSemaSource *ExternalSource;
+
+ /// \brief Code-completion consumer.
+ CodeCompleteConsumer *CodeCompleter;
+
+ /// CurContext - This is the current declaration context of parsing.
+ DeclContext *CurContext;
+
+ /// \brief Generally null except when we temporarily switch decl contexts,
+ /// like in \see ActOnObjCTemporaryExitContainerContext.
+ DeclContext *OriginalLexicalContext;
+
+ /// VAListTagName - The declaration name corresponding to __va_list_tag.
+ /// This is used as part of a hack to omit that class from ADL results.
+ DeclarationName VAListTagName;
+
+ /// PackContext - Manages the stack for #pragma pack. An alignment
+ /// of 0 indicates default alignment.
+ void *PackContext; // Really a "PragmaPackStack*"
+
+ bool MSStructPragmaOn; // True when #pragma ms_struct on
+
+ /// VisContext - Manages the stack for #pragma GCC visibility.
+ void *VisContext; // Really a "PragmaVisStack*"
+
+ /// ExprNeedsCleanups - True if the current evaluation context
+ /// requires cleanups to be run at its conclusion.
+ bool ExprNeedsCleanups;
+
+ /// ExprCleanupObjects - This is the stack of objects requiring
+ /// cleanup that are created by the current full expression. The
+ /// element type here is ExprWithCleanups::Object.
+ SmallVector<BlockDecl*, 8> ExprCleanupObjects;
+
+ llvm::SmallPtrSet<Expr*, 8> MaybeODRUseExprs;
+
+ /// \brief Stack containing information about each of the nested
+ /// function, block, and method scopes that are currently active.
+ ///
+ /// This array is never empty. Clients should ignore the first
+ /// element, which is used to cache a single FunctionScopeInfo
+ /// that's used to parse every top-level function.
+ SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
+
+ typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
+ &ExternalSemaSource::ReadExtVectorDecls, 2, 2>
+ ExtVectorDeclsType;
+
+ /// ExtVectorDecls - This is a list all the extended vector types. This allows
+ /// us to associate a raw vector type with one of the ext_vector type names.
+ /// This is only necessary for issuing pretty diagnostics.
+ ExtVectorDeclsType ExtVectorDecls;
+
+ /// \brief The set of types for which we have already complained about the
+ /// definitions being hidden.
+ ///
+ /// This set is used to suppress redundant diagnostics.
+ llvm::SmallPtrSet<NamedDecl *, 4> HiddenDefinitions;
+
+ /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
+ OwningPtr<CXXFieldCollector> FieldCollector;
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
+
+ /// PureVirtualClassDiagSet - a set of class declarations which we have
+ /// emitted a list of pure virtual functions. Used to prevent emitting the
+ /// same list more than once.
+ OwningPtr<RecordDeclSetTy> PureVirtualClassDiagSet;
+
+ /// ParsingInitForAutoVars - a set of declarations with auto types for which
+ /// we are currently parsing the initializer.
+ llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
+
+ /// \brief A mapping from external names to the most recent
+ /// locally-scoped external declaration with that name.
+ ///
+ /// This map contains external declarations introduced in local
+ /// scoped, e.g.,
+ ///
+ /// \code
+ /// void f() {
+ /// void foo(int, int);
+ /// }
+ /// \endcode
+ ///
+ /// Here, the name "foo" will be associated with the declaration on
+ /// "foo" within f. This name is not visible outside of
+ /// "f". However, we still find it in two cases:
+ ///
+ /// - If we are declaring another external with the name "foo", we
+ /// can find "foo" as a previous declaration, so that the types
+ /// of this external declaration can be checked for
+ /// compatibility.
+ ///
+ /// - If we would implicitly declare "foo" (e.g., due to a call to
+ /// "foo" in C when no prototype or definition is visible), then
+ /// we find this declaration of "foo" and complain that it is
+ /// not visible.
+ llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternalDecls;
+
+ /// \brief Look for a locally scoped external declaration by the given name.
+ llvm::DenseMap<DeclarationName, NamedDecl *>::iterator
+ findLocallyScopedExternalDecl(DeclarationName Name);
+
+ typedef LazyVector<VarDecl *, ExternalSemaSource,
+ &ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
+ TentativeDefinitionsType;
+
+ /// \brief All the tentative definitions encountered in the TU.
+ TentativeDefinitionsType TentativeDefinitions;
+
+ typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
+ &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
+ UnusedFileScopedDeclsType;
+
+ /// \brief The set of file scoped decls seen so far that have not been used
+ /// and must warn if not used. Only contains the first declaration.
+ UnusedFileScopedDeclsType UnusedFileScopedDecls;
+
+ typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
+ &ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
+ DelegatingCtorDeclsType;
+
+ /// \brief All the delegating constructors seen so far in the file, used for
+ /// cycle detection at the end of the TU.
+ DelegatingCtorDeclsType DelegatingCtorDecls;
+
+ /// \brief All the overriding destructors seen during a class definition
+ /// (there could be multiple due to nested classes) that had their exception
+ /// spec checks delayed, plus the overridden destructor.
+ SmallVector<std::pair<const CXXDestructorDecl*,
+ const CXXDestructorDecl*>, 2>
+ DelayedDestructorExceptionSpecChecks;
+
+ /// \brief Callback to the parser to parse templated functions when needed.
+ typedef void LateTemplateParserCB(void *P, const FunctionDecl *FD);
+ LateTemplateParserCB *LateTemplateParser;
+ void *OpaqueParser;
+
+ void SetLateTemplateParser(LateTemplateParserCB *LTP, void *P) {
+ LateTemplateParser = LTP;
+ OpaqueParser = P;
+ }
+
+ class DelayedDiagnostics;
+
+ class ParsingDeclState {
+ unsigned SavedStackSize;
+ friend class Sema::DelayedDiagnostics;
+ };
+
+ class ProcessingContextState {
+ unsigned SavedParsingDepth;
+ unsigned SavedActiveStackBase;
+ friend class Sema::DelayedDiagnostics;
+ };
+
+ /// A class which encapsulates the logic for delaying diagnostics
+ /// during parsing and other processing.
+ class DelayedDiagnostics {
+ /// \brief The stack of diagnostics that were delayed due to being
+ /// produced during the parsing of a declaration.
+ sema::DelayedDiagnostic *Stack;
+
+ /// \brief The number of objects on the delayed-diagnostics stack.
+ unsigned StackSize;
+
+ /// \brief The current capacity of the delayed-diagnostics stack.
+ unsigned StackCapacity;
+
+ /// \brief The index of the first "active" delayed diagnostic in
+ /// the stack. When parsing class definitions, we ignore active
+ /// delayed diagnostics from the surrounding context.
+ unsigned ActiveStackBase;
+
+ /// \brief The depth of the declarations we're currently parsing.
+ /// This gets saved and reset whenever we enter a class definition.
+ unsigned ParsingDepth;
+
+ public:
+ DelayedDiagnostics() : Stack(0), StackSize(0), StackCapacity(0),
+ ActiveStackBase(0), ParsingDepth(0) {}
+
+ ~DelayedDiagnostics() {
+ delete[] reinterpret_cast<char*>(Stack);
+ }
+
+ /// Adds a delayed diagnostic.
+ void add(const sema::DelayedDiagnostic &diag);
+
+ /// Determines whether diagnostics should be delayed.
+ bool shouldDelayDiagnostics() { return ParsingDepth > 0; }
+
+ /// Observe that we've started parsing a declaration. Access and
+ /// deprecation diagnostics will be delayed; when the declaration
+ /// is completed, all active delayed diagnostics will be evaluated
+ /// in its context, and then active diagnostics stack will be
+ /// popped down to the saved depth.
+ ParsingDeclState pushParsingDecl() {
+ ParsingDepth++;
+
+ ParsingDeclState state;
+ state.SavedStackSize = StackSize;
+ return state;
+ }
+
+ /// Observe that we're completed parsing a declaration.
+ static void popParsingDecl(Sema &S, ParsingDeclState state, Decl *decl);
+
+ /// Observe that we've started processing a different context, the
+ /// contents of which are semantically separate from the
+ /// declarations it may lexically appear in. This sets aside the
+ /// current stack of active diagnostics and starts afresh.
+ ProcessingContextState pushContext() {
+ assert(StackSize >= ActiveStackBase);
+
+ ProcessingContextState state;
+ state.SavedParsingDepth = ParsingDepth;
+ state.SavedActiveStackBase = ActiveStackBase;
+
+ ActiveStackBase = StackSize;
+ ParsingDepth = 0;
+
+ return state;
+ }
+
+ /// Observe that we've stopped processing a context. This
+ /// restores the previous stack of active diagnostics.
+ void popContext(ProcessingContextState state) {
+ assert(ActiveStackBase == StackSize);
+ assert(ParsingDepth == 0);
+ ActiveStackBase = state.SavedActiveStackBase;
+ ParsingDepth = state.SavedParsingDepth;
+ }
+ } DelayedDiagnostics;
+
+ /// A RAII object to temporarily push a declaration context.
+ class ContextRAII {
+ private:
+ Sema &S;
+ DeclContext *SavedContext;
+ ProcessingContextState SavedContextState;
+
+ public:
+ ContextRAII(Sema &S, DeclContext *ContextToPush)
+ : S(S), SavedContext(S.CurContext),
+ SavedContextState(S.DelayedDiagnostics.pushContext())
+ {
+ assert(ContextToPush && "pushing null context");
+ S.CurContext = ContextToPush;
+ }
+
+ void pop() {
+ if (!SavedContext) return;
+ S.CurContext = SavedContext;
+ S.DelayedDiagnostics.popContext(SavedContextState);
+ SavedContext = 0;
+ }
+
+ ~ContextRAII() {
+ pop();
+ }
+ };
+
+ /// WeakUndeclaredIdentifiers - Identifiers contained in
+ /// #pragma weak before declared. rare. may alias another
+ /// identifier, declared or undeclared
+ llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers;
+
+ /// ExtnameUndeclaredIdentifiers - Identifiers contained in
+ /// #pragma redefine_extname before declared. Used in Solaris system headers
+ /// to define functions that occur in multiple standards to call the version
+ /// in the currently selected standard.
+ llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
+
+
+ /// \brief Load weak undeclared identifiers from the external source.
+ void LoadExternalWeakUndeclaredIdentifiers();
+
+ /// WeakTopLevelDecl - Translation-unit scoped declarations generated by
+ /// #pragma weak during processing of other Decls.
+ /// I couldn't figure out a clean way to generate these in-line, so
+ /// we store them here and handle separately -- which is a hack.
+ /// It would be best to refactor this.
+ SmallVector<Decl*,2> WeakTopLevelDecl;
+
+ IdentifierResolver IdResolver;
+
+ /// Translation Unit Scope - useful to Objective-C actions that need
+ /// to lookup file scope declarations in the "ordinary" C decl namespace.
+ /// For example, user-defined classes, built-in "id" type, etc.
+ Scope *TUScope;
+
+ /// \brief The C++ "std" namespace, where the standard library resides.
+ LazyDeclPtr StdNamespace;
+
+ /// \brief The C++ "std::bad_alloc" class, which is defined by the C++
+ /// standard library.
+ LazyDeclPtr StdBadAlloc;
+
+ /// \brief The C++ "std::initializer_list" template, which is defined in
+ /// <initializer_list>.
+ ClassTemplateDecl *StdInitializerList;
+
+ /// \brief The C++ "type_info" declaration, which is defined in <typeinfo>.
+ RecordDecl *CXXTypeInfoDecl;
+
+ /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files.
+ RecordDecl *MSVCGuidDecl;
+
+ /// \brief Caches identifiers/selectors for NSFoundation APIs.
+ llvm::OwningPtr<NSAPI> NSAPIObj;
+
+ /// \brief The declaration of the Objective-C NSNumber class.
+ ObjCInterfaceDecl *NSNumberDecl;
+
+ /// \brief The Objective-C NSNumber methods used to create NSNumber literals.
+ ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
+
+ /// \brief The declaration of the Objective-C NSArray class.
+ ObjCInterfaceDecl *NSArrayDecl;
+
+ /// \brief The declaration of the arrayWithObjects:count: method.
+ ObjCMethodDecl *ArrayWithObjectsMethod;
+
+ /// \brief The declaration of the Objective-C NSDictionary class.
+ ObjCInterfaceDecl *NSDictionaryDecl;
+
+ /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method.
+ ObjCMethodDecl *DictionaryWithObjectsMethod;
+
+ /// \brief id<NSCopying> type.
+ QualType QIDNSCopying;
+
+ /// A flag to remember whether the implicit forms of operator new and delete
+ /// have been declared.
+ bool GlobalNewDeleteDeclared;
+
+ /// A flag that is set when parsing a -dealloc method and no [super dealloc]
+ /// call was found yet.
+ bool ObjCShouldCallSuperDealloc;
+ /// A flag that is set when parsing a -finalize method and no [super finalize]
+ /// call was found yet.
+ bool ObjCShouldCallSuperFinalize;
+
+ /// \brief Describes how the expressions currently being parsed are
+ /// evaluated at run-time, if at all.
+ enum ExpressionEvaluationContext {
+ /// \brief The current expression and its subexpressions occur within an
+ /// unevaluated operand (C++11 [expr]p7), such as the subexpression of
+ /// \c sizeof, where the type of the expression may be significant but
+ /// no code will be generated to evaluate the value of the expression at
+ /// run time.
+ Unevaluated,
+
+ /// \brief The current context is "potentially evaluated" in C++11 terms,
+ /// but the expression is evaluated at compile-time (like the values of
+ /// cases in a switch statment).
+ ConstantEvaluated,
+
+ /// \brief The current expression is potentially evaluated at run time,
+ /// which means that code may be generated to evaluate the value of the
+ /// expression at run time.
+ PotentiallyEvaluated,
+
+ /// \brief The current expression is potentially evaluated, but any
+ /// declarations referenced inside that expression are only used if
+ /// in fact the current expression is used.
+ ///
+ /// This value is used when parsing default function arguments, for which
+ /// we would like to provide diagnostics (e.g., passing non-POD arguments
+ /// through varargs) but do not want to mark declarations as "referenced"
+ /// until the default argument is used.
+ PotentiallyEvaluatedIfUsed
+ };
+
+ /// \brief Data structure used to record current or nested
+ /// expression evaluation contexts.
+ struct ExpressionEvaluationContextRecord {
+ /// \brief The expression evaluation context.
+ ExpressionEvaluationContext Context;
+
+ /// \brief Whether the enclosing context needed a cleanup.
+ bool ParentNeedsCleanups;
+
+ /// \brief Whether we are in a decltype expression.
+ bool IsDecltype;
+
+ /// \brief The number of active cleanup objects when we entered
+ /// this expression evaluation context.
+ unsigned NumCleanupObjects;
+
+ llvm::SmallPtrSet<Expr*, 8> SavedMaybeODRUseExprs;
+
+ /// \brief The lambdas that are present within this context, if it
+ /// is indeed an unevaluated context.
+ llvm::SmallVector<LambdaExpr *, 2> Lambdas;
+
+ /// \brief The declaration that provides context for the lambda expression
+ /// if the normal declaration context does not suffice, e.g., in a
+ /// default function argument.
+ Decl *LambdaContextDecl;
+
+ /// \brief The context information used to mangle lambda expressions
+ /// within this context.
+ ///
+ /// This mangling information is allocated lazily, since most contexts
+ /// do not have lambda expressions.
+ LambdaMangleContext *LambdaMangle;
+
+ /// \brief If we are processing a decltype type, a set of call expressions
+ /// for which we have deferred checking the completeness of the return type.
+ llvm::SmallVector<CallExpr*, 8> DelayedDecltypeCalls;
+
+ /// \brief If we are processing a decltype type, a set of temporary binding
+ /// expressions for which we have deferred checking the destructor.
+ llvm::SmallVector<CXXBindTemporaryExpr*, 8> DelayedDecltypeBinds;
+
+ ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
+ unsigned NumCleanupObjects,
+ bool ParentNeedsCleanups,
+ Decl *LambdaContextDecl,
+ bool IsDecltype)
+ : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups),
+ IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects),
+ LambdaContextDecl(LambdaContextDecl), LambdaMangle() { }
+
+ ~ExpressionEvaluationContextRecord() {
+ delete LambdaMangle;
+ }
+
+ /// \brief Retrieve the mangling context for lambdas.
+ LambdaMangleContext &getLambdaMangleContext() {
+ assert(LambdaContextDecl && "Need to have a lambda context declaration");
+ if (!LambdaMangle)
+ LambdaMangle = new LambdaMangleContext;
+ return *LambdaMangle;
+ }
+ };
+
+ /// A stack of expression evaluation contexts.
+ SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
+
+ /// SpecialMemberOverloadResult - The overloading result for a special member
+ /// function.
+ ///
+ /// This is basically a wrapper around PointerIntPair. The lowest bit of the
+ /// integer is used to determine whether we have a parameter qualification
+ /// match, the second-lowest is whether we had success in resolving the
+ /// overload to a unique non-deleted function.
+ ///
+ /// The ConstParamMatch bit represents whether, when looking up a copy
+ /// constructor or assignment operator, we found a potential copy
+ /// constructor/assignment operator whose first parameter is const-qualified.
+ /// This is used for determining parameter types of other objects and is
+ /// utterly meaningless on other types of special members.
+ class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode {
+ public:
+ enum Kind {
+ NoMemberOrDeleted,
+ Ambiguous,
+ SuccessNonConst,
+ SuccessConst
+ };
+
+ private:
+ llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
+
+ public:
+ SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID)
+ : FastFoldingSetNode(ID)
+ {}
+
+ CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
+ void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
+
+ Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
+ void setKind(Kind K) { Pair.setInt(K); }
+
+ bool hasSuccess() const { return getKind() >= SuccessNonConst; }
+ bool hasConstParamMatch() const { return getKind() == SuccessConst; }
+ };
+
+ /// \brief A cache of special member function overload resolution results
+ /// for C++ records.
+ llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache;
+
+ /// \brief The kind of translation unit we are processing.
+ ///
+ /// When we're processing a complete translation unit, Sema will perform
+ /// end-of-translation-unit semantic tasks (such as creating
+ /// initializers for tentative definitions in C) once parsing has
+ /// completed. Modules and precompiled headers perform different kinds of
+ /// checks.
+ TranslationUnitKind TUKind;
+
+ llvm::BumpPtrAllocator BumpAlloc;
+
+ /// \brief The number of SFINAE diagnostics that have been trapped.
+ unsigned NumSFINAEErrors;
+
+ typedef llvm::DenseMap<ParmVarDecl *, SmallVector<ParmVarDecl *, 1> >
+ UnparsedDefaultArgInstantiationsMap;
+
+ /// \brief A mapping from parameters with unparsed default arguments to the
+ /// set of instantiations of each parameter.
+ ///
+ /// This mapping is a temporary data structure used when parsing
+ /// nested class templates or nested classes of class templates,
+ /// where we might end up instantiating an inner class before the
+ /// default arguments of its methods have been parsed.
+ UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
+
+ // Contains the locations of the beginning of unparsed default
+ // argument locations.
+ llvm::DenseMap<ParmVarDecl *,SourceLocation> UnparsedDefaultArgLocs;
+
+ /// UndefinedInternals - all the used, undefined objects with
+ /// internal linkage in this translation unit.
+ llvm::DenseMap<NamedDecl*, SourceLocation> UndefinedInternals;
+
+ typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
+ typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
+
+ /// Method Pool - allows efficient lookup when typechecking messages to "id".
+ /// We need to maintain a list, since selectors can have differing signatures
+ /// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
+ /// of selectors are "overloaded").
+ GlobalMethodPool MethodPool;
+
+ /// Method selectors used in a @selector expression. Used for implementation
+ /// of -Wselector.
+ llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors;
+
+ void ReadMethodPool(Selector Sel);
+
+ /// Private Helper predicate to check for 'self'.
+ bool isSelfExpr(Expr *RExpr);
+
+ /// \brief Cause the active diagnostic on the DiagosticsEngine to be
+ /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
+ /// should not be used elsewhere.
+ void EmitCurrentDiagnostic(unsigned DiagID);
+
+public:
+ Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
+ TranslationUnitKind TUKind = TU_Complete,
+ CodeCompleteConsumer *CompletionConsumer = 0);
+ ~Sema();
+
+ /// \brief Perform initialization that occurs after the parser has been
+ /// initialized but before it parses anything.
+ void Initialize();
+
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
+ FPOptions &getFPOptions() { return FPFeatures; }
+
+ DiagnosticsEngine &getDiagnostics() const { return Diags; }
+ SourceManager &getSourceManager() const { return SourceMgr; }
+ const TargetAttributesSema &getTargetAttributesSema() const;
+ Preprocessor &getPreprocessor() const { return PP; }
+ ASTContext &getASTContext() const { return Context; }
+ ASTConsumer &getASTConsumer() const { return Consumer; }
+ ASTMutationListener *getASTMutationListener() const;
+
+ void PrintStats() const;
+
+ /// \brief Helper class that creates diagnostics with optional
+ /// template instantiation stacks.
+ ///
+ /// This class provides a wrapper around the basic DiagnosticBuilder
+ /// class that emits diagnostics. SemaDiagnosticBuilder is
+ /// responsible for emitting the diagnostic (as DiagnosticBuilder
+ /// does) and, if the diagnostic comes from inside a template
+ /// instantiation, printing the template instantiation stack as
+ /// well.
+ class SemaDiagnosticBuilder : public DiagnosticBuilder {
+ Sema &SemaRef;
+ unsigned DiagID;
+
+ public:
+ SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
+ : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
+
+ ~SemaDiagnosticBuilder() {
+ // If we aren't active, there is nothing to do.
+ if (!isActive()) return;
+
+ // Otherwise, we need to emit the diagnostic. First flush the underlying
+ // DiagnosticBuilder data, and clear the diagnostic builder itself so it
+ // won't emit the diagnostic in its own destructor.
+ //
+ // This seems wasteful, in that as written the DiagnosticBuilder dtor will
+ // do its own needless checks to see if the diagnostic needs to be
+ // emitted. However, because we take care to ensure that the builder
+ // objects never escape, a sufficiently smart compiler will be able to
+ // eliminate that code.
+ FlushCounts();
+ Clear();
+
+ // Dispatch to Sema to emit the diagnostic.
+ SemaRef.EmitCurrentDiagnostic(DiagID);
+ }
+ };
+
+ /// \brief Emit a diagnostic.
+ SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
+ DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
+ return SemaDiagnosticBuilder(DB, *this, DiagID);
+ }
+
+ /// \brief Emit a partial diagnostic.
+ SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
+
+ /// \brief Build a partial diagnostic.
+ PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
+
+ bool findMacroSpelling(SourceLocation &loc, StringRef name);
+
+ /// \brief Get a string to suggest for zero-initialization of a type.
+ const char *getFixItZeroInitializerForType(QualType T) const;
+
+ ExprResult Owned(Expr* E) { return E; }
+ ExprResult Owned(ExprResult R) { return R; }
+ StmtResult Owned(Stmt* S) { return S; }
+
+ void ActOnEndOfTranslationUnit();
+
+ void CheckDelegatingCtorCycles();
+
+ Scope *getScopeForContext(DeclContext *Ctx);
+
+ void PushFunctionScope();
+ void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
+ void PushLambdaScope(CXXRecordDecl *Lambda, CXXMethodDecl *CallOperator);
+ void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP =0,
+ const Decl *D = 0, const BlockExpr *blkExpr = 0);
+
+ sema::FunctionScopeInfo *getCurFunction() const {
+ return FunctionScopes.back();
+ }
+
+ void PushCompoundScope();
+ void PopCompoundScope();
+
+ sema::CompoundScopeInfo &getCurCompoundScope() const;
+
+ bool hasAnyUnrecoverableErrorsInThisFunction() const;
+
+ /// \brief Retrieve the current block, if any.
+ sema::BlockScopeInfo *getCurBlock();
+
+ /// \brief Retrieve the current lambda expression, if any.
+ sema::LambdaScopeInfo *getCurLambda();
+
+ /// WeakTopLevelDeclDecls - access to #pragma weak-generated Decls
+ SmallVector<Decl*,2> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
+
+ //===--------------------------------------------------------------------===//
+ // Type Analysis / Processing: SemaType.cpp.
+ //
+
+ QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs);
+ QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVR) {
+ return BuildQualifiedType(T, Loc, Qualifiers::fromCVRMask(CVR));
+ }
+ QualType BuildPointerType(QualType T,
+ SourceLocation Loc, DeclarationName Entity);
+ QualType BuildReferenceType(QualType T, bool LValueRef,
+ SourceLocation Loc, DeclarationName Entity);
+ QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
+ Expr *ArraySize, unsigned Quals,
+ SourceRange Brackets, DeclarationName Entity);
+ QualType BuildExtVectorType(QualType T, Expr *ArraySize,
+ SourceLocation AttrLoc);
+ QualType BuildFunctionType(QualType T,
+ QualType *ParamTypes, unsigned NumParamTypes,
+ bool Variadic, bool HasTrailingReturn,
+ unsigned Quals, RefQualifierKind RefQualifier,
+ SourceLocation Loc, DeclarationName Entity,
+ FunctionType::ExtInfo Info);
+ QualType BuildMemberPointerType(QualType T, QualType Class,
+ SourceLocation Loc,
+ DeclarationName Entity);
+ QualType BuildBlockPointerType(QualType T,
+ SourceLocation Loc, DeclarationName Entity);
+ QualType BuildParenType(QualType T);
+ QualType BuildAtomicType(QualType T, SourceLocation Loc);
+
+ TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
+ TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
+ TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
+ TypeSourceInfo *ReturnTypeInfo);
+ /// \brief Package the given type and TSI into a ParsedType.
+ ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
+ DeclarationNameInfo GetNameForDeclarator(Declarator &D);
+ DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
+ static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = 0);
+ bool CheckSpecifiedExceptionType(QualType T, const SourceRange &Range);
+ bool CheckDistantExceptionSpec(QualType T);
+ bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
+ bool CheckEquivalentExceptionSpec(
+ const FunctionProtoType *Old, SourceLocation OldLoc,
+ const FunctionProtoType *New, SourceLocation NewLoc);
+ bool CheckEquivalentExceptionSpec(
+ const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
+ const FunctionProtoType *Old, SourceLocation OldLoc,
+ const FunctionProtoType *New, SourceLocation NewLoc,
+ bool *MissingExceptionSpecification = 0,
+ bool *MissingEmptyExceptionSpecification = 0,
+ bool AllowNoexceptAllMatchWithNoSpec = false,
+ bool IsOperatorNew = false);
+ bool CheckExceptionSpecSubset(
+ const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
+ const FunctionProtoType *Superset, SourceLocation SuperLoc,
+ const FunctionProtoType *Subset, SourceLocation SubLoc);
+ bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
+ const FunctionProtoType *Target, SourceLocation TargetLoc,
+ const FunctionProtoType *Source, SourceLocation SourceLoc);
+
+ TypeResult ActOnTypeName(Scope *S, Declarator &D);
+
+ /// \brief The parser has parsed the context-sensitive type 'instancetype'
+ /// in an Objective-C message declaration. Return the appropriate type.
+ ParsedType ActOnObjCInstanceType(SourceLocation Loc);
+
+ bool RequireCompleteType(SourceLocation Loc, QualType T,
+ const PartialDiagnostic &PD,
+ std::pair<SourceLocation, PartialDiagnostic> Note);
+ bool RequireCompleteType(SourceLocation Loc, QualType T,
+ const PartialDiagnostic &PD);
+ bool RequireCompleteType(SourceLocation Loc, QualType T,
+ unsigned DiagID);
+ bool RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
+ std::pair<SourceLocation,
+ PartialDiagnostic> Note);
+
+ bool RequireLiteralType(SourceLocation Loc, QualType T,
+ const PartialDiagnostic &PD);
+
+ QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
+ const CXXScopeSpec &SS, QualType T);
+
+ QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
+ QualType BuildDecltypeType(Expr *E, SourceLocation Loc);
+ QualType BuildUnaryTransformType(QualType BaseType,
+ UnaryTransformType::UTTKind UKind,
+ SourceLocation Loc);
+
+ //===--------------------------------------------------------------------===//
+ // Symbol table / Decl tracking callbacks: SemaDecl.cpp.
+ //
+
+ /// List of decls defined in a function prototype. This contains EnumConstants
+ /// that incorrectly end up in translation unit scope because there is no
+ /// function to pin them on. ActOnFunctionDeclarator reads this list and patches
+ /// them into the FunctionDecl.
+ std::vector<NamedDecl*> DeclsInPrototypeScope;
+ /// Nonzero if we are currently parsing a function declarator. This is a counter
+ /// as opposed to a boolean so we can deal with nested function declarators
+ /// such as:
+ /// void f(void (*g)(), ...)
+ unsigned InFunctionDeclarator;
+
+ DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = 0);
+
+ void DiagnoseUseOfUnimplementedSelectors();
+
+ ParsedType getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
+ Scope *S, CXXScopeSpec *SS = 0,
+ bool isClassName = false,
+ bool HasTrailingDot = false,
+ ParsedType ObjectType = ParsedType(),
+ bool IsCtorOrDtorName = false,
+ bool WantNontrivialTypeSourceInfo = false,
+ IdentifierInfo **CorrectedII = 0);
+ TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
+ bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
+ bool DiagnoseUnknownTypeName(const IdentifierInfo &II,
+ SourceLocation IILoc,
+ Scope *S,
+ CXXScopeSpec *SS,
+ ParsedType &SuggestedType);
+
+ /// \brief Describes the result of the name lookup and resolution performed
+ /// by \c ClassifyName().
+ enum NameClassificationKind {
+ NC_Unknown,
+ NC_Error,
+ NC_Keyword,
+ NC_Type,
+ NC_Expression,
+ NC_NestedNameSpecifier,
+ NC_TypeTemplate,
+ NC_FunctionTemplate
+ };
+
+ class NameClassification {
+ NameClassificationKind Kind;
+ ExprResult Expr;
+ TemplateName Template;
+ ParsedType Type;
+ const IdentifierInfo *Keyword;
+
+ explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
+
+ public:
+ NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
+
+ NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
+
+ NameClassification(const IdentifierInfo *Keyword)
+ : Kind(NC_Keyword), Keyword(Keyword) { }
+
+ static NameClassification Error() {
+ return NameClassification(NC_Error);
+ }
+
+ static NameClassification Unknown() {
+ return NameClassification(NC_Unknown);
+ }
+
+ static NameClassification NestedNameSpecifier() {
+ return NameClassification(NC_NestedNameSpecifier);
+ }
+
+ static NameClassification TypeTemplate(TemplateName Name) {
+ NameClassification Result(NC_TypeTemplate);
+ Result.Template = Name;
+ return Result;
+ }
+
+ static NameClassification FunctionTemplate(TemplateName Name) {
+ NameClassification Result(NC_FunctionTemplate);
+ Result.Template = Name;
+ return Result;
+ }
+
+ NameClassificationKind getKind() const { return Kind; }
+
+ ParsedType getType() const {
+ assert(Kind == NC_Type);
+ return Type;
+ }
+
+ ExprResult getExpression() const {
+ assert(Kind == NC_Expression);
+ return Expr;
+ }
+
+ TemplateName getTemplateName() const {
+ assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate);
+ return Template;
+ }
+
+ TemplateNameKind getTemplateNameKind() const {
+ assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate);
+ return Kind == NC_TypeTemplate? TNK_Type_template : TNK_Function_template;
+ }
+};
+
+ /// \brief Perform name lookup on the given name, classifying it based on
+ /// the results of name lookup and the following token.
+ ///
+ /// This routine is used by the parser to resolve identifiers and help direct
+ /// parsing. When the identifier cannot be found, this routine will attempt
+ /// to correct the typo and classify based on the resulting name.
+ ///
+ /// \param S The scope in which we're performing name lookup.
+ ///
+ /// \param SS The nested-name-specifier that precedes the name.
+ ///
+ /// \param Name The identifier. If typo correction finds an alternative name,
+ /// this pointer parameter will be updated accordingly.
+ ///
+ /// \param NameLoc The location of the identifier.
+ ///
+ /// \param NextToken The token following the identifier. Used to help
+ /// disambiguate the name.
+ NameClassification ClassifyName(Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *&Name,
+ SourceLocation NameLoc,
+ const Token &NextToken);
+
+ Decl *ActOnDeclarator(Scope *S, Declarator &D);
+
+ Decl *HandleDeclarator(Scope *S, Declarator &D,
+ MultiTemplateParamsArg TemplateParameterLists);
+ void RegisterLocallyScopedExternCDecl(NamedDecl *ND,
+ const LookupResult &Previous,
+ Scope *S);
+ bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
+ bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
+ DeclarationName Name,
+ SourceLocation Loc);
+ void DiagnoseFunctionSpecifiers(Declarator& D);
+ void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R);
+ void CheckShadow(Scope *S, VarDecl *D);
+ void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
+ void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
+ NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ TypeSourceInfo *TInfo,
+ LookupResult &Previous);
+ NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
+ LookupResult &Previous, bool &Redeclaration);
+ NamedDecl* ActOnVariableDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ TypeSourceInfo *TInfo,
+ LookupResult &Previous,
+ MultiTemplateParamsArg TemplateParamLists);
+ // Returns true if the variable declaration is a redeclaration
+ bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
+ void CheckCompleteVariableDeclaration(VarDecl *var);
+ void ActOnStartFunctionDeclarator();
+ void ActOnEndFunctionDeclarator();
+ NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ TypeSourceInfo *TInfo,
+ LookupResult &Previous,
+ MultiTemplateParamsArg TemplateParamLists,
+ bool &AddToScope);
+ bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
+
+ bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
+ bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
+
+ void DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
+ // Returns true if the function declaration is a redeclaration
+ bool CheckFunctionDeclaration(Scope *S,
+ FunctionDecl *NewFD, LookupResult &Previous,
+ bool IsExplicitSpecialization);
+ void CheckMain(FunctionDecl *FD, const DeclSpec &D);
+ Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
+ ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
+ SourceLocation Loc,
+ QualType T);
+ ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation NameLoc, IdentifierInfo *Name,
+ QualType T, TypeSourceInfo *TSInfo,
+ StorageClass SC, StorageClass SCAsWritten);
+ void ActOnParamDefaultArgument(Decl *param,
+ SourceLocation EqualLoc,
+ Expr *defarg);
+ void ActOnParamUnparsedDefaultArgument(Decl *param,
+ SourceLocation EqualLoc,
+ SourceLocation ArgLoc);
+ void ActOnParamDefaultArgumentError(Decl *param);
+ bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
+ SourceLocation EqualLoc);
+
+ void CheckSelfReference(Decl *OrigDecl, Expr *E);
+ void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
+ bool TypeMayContainAuto);
+ void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto);
+ void ActOnInitializerError(Decl *Dcl);
+ void ActOnCXXForRangeDecl(Decl *D);
+ void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
+ void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
+ void FinalizeDeclaration(Decl *D);
+ DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
+ Decl **Group,
+ unsigned NumDecls);
+ DeclGroupPtrTy BuildDeclaratorGroup(Decl **Group, unsigned NumDecls,
+ bool TypeMayContainAuto = true);
+ void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
+ SourceLocation LocAfterDecls);
+ void CheckForFunctionRedefinition(FunctionDecl *FD);
+ Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D);
+ Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D);
+ void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
+
+ void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
+ Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
+ Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
+
+ /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
+ /// attribute for which parsing is delayed.
+ void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
+
+ /// \brief Diagnose any unused parameters in the given sequence of
+ /// ParmVarDecl pointers.
+ void DiagnoseUnusedParameters(ParmVarDecl * const *Begin,
+ ParmVarDecl * const *End);
+
+ /// \brief Diagnose whether the size of parameters or return value of a
+ /// function or obj-c method definition is pass-by-value and larger than a
+ /// specified threshold.
+ void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin,
+ ParmVarDecl * const *End,
+ QualType ReturnTy,
+ NamedDecl *D);
+
+ void DiagnoseInvalidJumps(Stmt *Body);
+ Decl *ActOnFileScopeAsmDecl(Expr *expr,
+ SourceLocation AsmLoc,
+ SourceLocation RParenLoc);
+
+ /// \brief The parser has processed a module import declaration.
+ ///
+ /// \param AtLoc The location of the '@' symbol, if any.
+ ///
+ /// \param ImportLoc The location of the 'import' keyword.
+ ///
+ /// \param Path The module access path.
+ DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc,
+ ModuleIdPath Path);
+
+ /// \brief Retrieve a suitable printing policy.
+ PrintingPolicy getPrintingPolicy() const {
+ return getPrintingPolicy(Context, PP);
+ }
+
+ /// \brief Retrieve a suitable printing policy.
+ static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
+ const Preprocessor &PP);
+
+ /// Scope actions.
+ void ActOnPopScope(SourceLocation Loc, Scope *S);
+ void ActOnTranslationUnitScope(Scope *S);
+
+ Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS);
+ Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS,
+ MultiTemplateParamsArg TemplateParams);
+
+ Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
+ AccessSpecifier AS,
+ RecordDecl *Record);
+
+ Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
+ RecordDecl *Record);
+
+ bool isAcceptableTagRedeclaration(const TagDecl *Previous,
+ TagTypeKind NewTag, bool isDefinition,
+ SourceLocation NewTagLoc,
+ const IdentifierInfo &Name);
+
+ enum TagUseKind {
+ TUK_Reference, // Reference to a tag: 'struct foo *X;'
+ TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
+ TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
+ TUK_Friend // Friend declaration: 'friend struct foo;'
+ };
+
+ Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr, AccessSpecifier AS,
+ SourceLocation ModulePrivateLoc,
+ MultiTemplateParamsArg TemplateParameterLists,
+ bool &OwnedDecl, bool &IsDependent,
+ SourceLocation ScopedEnumKWLoc,
+ bool ScopedEnumUsesClassTag, TypeResult UnderlyingType);
+
+ Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
+ unsigned TagSpec, SourceLocation TagLoc,
+ CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TempParamLists);
+
+ TypeResult ActOnDependentTag(Scope *S,
+ unsigned TagSpec,
+ TagUseKind TUK,
+ const CXXScopeSpec &SS,
+ IdentifierInfo *Name,
+ SourceLocation TagLoc,
+ SourceLocation NameLoc);
+
+ void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
+ IdentifierInfo *ClassName,
+ SmallVectorImpl<Decl *> &Decls);
+ Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
+ Declarator &D, Expr *BitfieldWidth);
+
+ FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
+ Declarator &D, Expr *BitfieldWidth, bool HasInit,
+ AccessSpecifier AS);
+
+ FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
+ TypeSourceInfo *TInfo,
+ RecordDecl *Record, SourceLocation Loc,
+ bool Mutable, Expr *BitfieldWidth, bool HasInit,
+ SourceLocation TSSL,
+ AccessSpecifier AS, NamedDecl *PrevDecl,
+ Declarator *D = 0);
+
+ enum CXXSpecialMember {
+ CXXDefaultConstructor,
+ CXXCopyConstructor,
+ CXXMoveConstructor,
+ CXXCopyAssignment,
+ CXXMoveAssignment,
+ CXXDestructor,
+ CXXInvalid
+ };
+ bool CheckNontrivialField(FieldDecl *FD);
+ void DiagnoseNontrivial(const RecordType* Record, CXXSpecialMember mem);
+ CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
+ void ActOnLastBitfield(SourceLocation DeclStart,
+ SmallVectorImpl<Decl *> &AllIvarDecls);
+ Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
+ Declarator &D, Expr *BitfieldWidth,
+ tok::ObjCKeywordKind visibility);
+
+ // This is used for both record definitions and ObjC interface declarations.
+ void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl,
+ llvm::ArrayRef<Decl *> Fields,
+ SourceLocation LBrac, SourceLocation RBrac,
+ AttributeList *AttrList);
+
+ /// ActOnTagStartDefinition - Invoked when we have entered the
+ /// scope of a tag's definition (e.g., for an enumeration, class,
+ /// struct, or union).
+ void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
+
+ Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
+
+ /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
+ /// C++ record definition's base-specifiers clause and are starting its
+ /// member declarations.
+ void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
+ SourceLocation FinalLoc,
+ SourceLocation LBraceLoc);
+
+ /// ActOnTagFinishDefinition - Invoked once we have finished parsing
+ /// the definition of a tag (enumeration, class, struct, or union).
+ void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
+ SourceLocation RBraceLoc);
+
+ void ActOnObjCContainerFinishDefinition();
+
+ /// \brief Invoked when we must temporarily exit the objective-c container
+ /// scope for parsing/looking-up C constructs.
+ ///
+ /// Must be followed by a call to \see ActOnObjCReenterContainerContext
+ void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
+ void ActOnObjCReenterContainerContext(DeclContext *DC);
+
+ /// ActOnTagDefinitionError - Invoked when there was an unrecoverable
+ /// error parsing the definition of a tag.
+ void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
+
+ EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
+ EnumConstantDecl *LastEnumConst,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ Expr *val);
+ bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
+ bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
+ QualType EnumUnderlyingTy, const EnumDecl *Prev);
+
+ Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ AttributeList *Attrs,
+ SourceLocation EqualLoc, Expr *Val);
+ void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
+ SourceLocation RBraceLoc, Decl *EnumDecl,
+ Decl **Elements, unsigned NumElements,
+ Scope *S, AttributeList *Attr);
+
+ DeclContext *getContainingDC(DeclContext *DC);
+
+ /// Set the current declaration context until it gets popped.
+ void PushDeclContext(Scope *S, DeclContext *DC);
+ void PopDeclContext();
+
+ /// EnterDeclaratorContext - Used when we must lookup names in the context
+ /// of a declarator's nested name specifier.
+ void EnterDeclaratorContext(Scope *S, DeclContext *DC);
+ void ExitDeclaratorContext(Scope *S);
+
+ /// Push the parameters of D, which must be a function, into scope.
+ void ActOnReenterFunctionContext(Scope* S, Decl* D);
+ void ActOnExitFunctionContext();
+
+ DeclContext *getFunctionLevelDeclContext();
+
+ /// getCurFunctionDecl - If inside of a function body, this returns a pointer
+ /// to the function decl for the function being parsed. If we're currently
+ /// in a 'block', this returns the containing context.
+ FunctionDecl *getCurFunctionDecl();
+
+ /// getCurMethodDecl - If inside of a method body, this returns a pointer to
+ /// the method decl for the method being parsed. If we're currently
+ /// in a 'block', this returns the containing context.
+ ObjCMethodDecl *getCurMethodDecl();
+
+ /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
+ /// or C function we're in, otherwise return null. If we're currently
+ /// in a 'block', this returns the containing context.
+ NamedDecl *getCurFunctionOrMethodDecl();
+
+ /// Add this decl to the scope shadowed decl chains.
+ void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
+
+ /// \brief Make the given externally-produced declaration visible at the
+ /// top level scope.
+ ///
+ /// \param D The externally-produced declaration to push.
+ ///
+ /// \param Name The name of the externally-produced declaration.
+ void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name);
+
+ /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
+ /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
+ /// true if 'D' belongs to the given declaration context.
+ ///
+ /// \param ExplicitInstantiationOrSpecialization When true, we are checking
+ /// whether the declaration is in scope for the purposes of explicit template
+ /// instantiation or specialization. The default is false.
+ bool isDeclInScope(NamedDecl *&D, DeclContext *Ctx, Scope *S = 0,
+ bool ExplicitInstantiationOrSpecialization = false);
+
+ /// Finds the scope corresponding to the given decl context, if it
+ /// happens to be an enclosing scope. Otherwise return NULL.
+ static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
+
+ /// Subroutines of ActOnDeclarator().
+ TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
+ TypeSourceInfo *TInfo);
+ bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
+ void mergeDeclAttributes(Decl *New, Decl *Old, bool MergeDeprecation = true);
+ void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls);
+ bool MergeFunctionDecl(FunctionDecl *New, Decl *Old, Scope *S);
+ bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
+ Scope *S);
+ void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
+ void MergeVarDecl(VarDecl *New, LookupResult &OldDecls);
+ void MergeVarDeclTypes(VarDecl *New, VarDecl *Old);
+ void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
+ bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
+
+ // AssignmentAction - This is used by all the assignment diagnostic functions
+ // to represent what is actually causing the operation
+ enum AssignmentAction {
+ AA_Assigning,
+ AA_Passing,
+ AA_Returning,
+ AA_Converting,
+ AA_Initializing,
+ AA_Sending,
+ AA_Casting
+ };
+
+ /// C++ Overloading.
+ enum OverloadKind {
+ /// This is a legitimate overload: the existing declarations are
+ /// functions or function templates with different signatures.
+ Ovl_Overload,
+
+ /// This is not an overload because the signature exactly matches
+ /// an existing declaration.
+ Ovl_Match,
+
+ /// This is not an overload because the lookup results contain a
+ /// non-function.
+ Ovl_NonFunction
+ };
+ OverloadKind CheckOverload(Scope *S,
+ FunctionDecl *New,
+ const LookupResult &OldDecls,
+ NamedDecl *&OldDecl,
+ bool IsForUsingDecl);
+ bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl);
+
+ /// \brief Checks availability of the function depending on the current
+ /// function context.Inside an unavailable function,unavailability is ignored.
+ ///
+ /// \returns true if \arg FD is unavailable and current context is inside
+ /// an available function, false otherwise.
+ bool isFunctionConsideredUnavailable(FunctionDecl *FD);
+
+ ImplicitConversionSequence
+ TryImplicitConversion(Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion);
+
+ bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
+ bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
+ bool IsComplexPromotion(QualType FromType, QualType ToType);
+ bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
+ bool InOverloadResolution,
+ QualType& ConvertedType, bool &IncompatibleObjC);
+ bool isObjCPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType, bool &IncompatibleObjC);
+ bool isObjCWritebackConversion(QualType FromType, QualType ToType,
+ QualType &ConvertedType);
+ bool IsBlockPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType);
+ bool FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
+ const FunctionProtoType *NewType,
+ unsigned *ArgPos = 0);
+ void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
+ QualType FromType, QualType ToType);
+
+ CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
+ bool CheckPointerConversion(Expr *From, QualType ToType,
+ CastKind &Kind,
+ CXXCastPath& BasePath,
+ bool IgnoreBaseAccess);
+ bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
+ bool InOverloadResolution,
+ QualType &ConvertedType);
+ bool CheckMemberPointerConversion(Expr *From, QualType ToType,
+ CastKind &Kind,
+ CXXCastPath &BasePath,
+ bool IgnoreBaseAccess);
+ bool IsQualificationConversion(QualType FromType, QualType ToType,
+ bool CStyle, bool &ObjCLifetimeConversion);
+ bool IsNoReturnConversion(QualType FromType, QualType ToType,
+ QualType &ResultTy);
+ bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
+
+
+ ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
+ const VarDecl *NRVOCandidate,
+ QualType ResultType,
+ Expr *Value,
+ bool AllowNRVO = true);
+
+ bool CanPerformCopyInitialization(const InitializedEntity &Entity,
+ ExprResult Init);
+ ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
+ SourceLocation EqualLoc,
+ ExprResult Init,
+ bool TopLevelOfInitList = false,
+ bool AllowExplicit = false);
+ ExprResult PerformObjectArgumentInitialization(Expr *From,
+ NestedNameSpecifier *Qualifier,
+ NamedDecl *FoundDecl,
+ CXXMethodDecl *Method);
+
+ ExprResult PerformContextuallyConvertToBool(Expr *From);
+ ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
+
+ /// Contexts in which a converted constant expression is required.
+ enum CCEKind {
+ CCEK_CaseValue, ///< Expression in a case label.
+ CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
+ CCEK_TemplateArg ///< Value of a non-type template parameter.
+ };
+ ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
+ llvm::APSInt &Value, CCEKind CCE);
+
+ ExprResult
+ ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *FromE,
+ const PartialDiagnostic &NotIntDiag,
+ const PartialDiagnostic &IncompleteDiag,
+ const PartialDiagnostic &ExplicitConvDiag,
+ const PartialDiagnostic &ExplicitConvNote,
+ const PartialDiagnostic &AmbigDiag,
+ const PartialDiagnostic &AmbigNote,
+ const PartialDiagnostic &ConvDiag,
+ bool AllowScopedEnumerations);
+ enum ObjCSubscriptKind {
+ OS_Array,
+ OS_Dictionary,
+ OS_Error
+ };
+ ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
+
+ ExprResult PerformObjectMemberConversion(Expr *From,
+ NestedNameSpecifier *Qualifier,
+ NamedDecl *FoundDecl,
+ NamedDecl *Member);
+
+ // Members have to be NamespaceDecl* or TranslationUnitDecl*.
+ // TODO: make this is a typesafe union.
+ typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet;
+ typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet;
+
+ void AddOverloadCandidate(FunctionDecl *Function,
+ DeclAccessPair FoundDecl,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions = false,
+ bool PartialOverloading = false,
+ bool AllowExplicit = false);
+ void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions = false,
+ TemplateArgumentListInfo *ExplicitTemplateArgs = 0);
+ void AddMethodCandidate(DeclAccessPair FoundDecl,
+ QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversion = false);
+ void AddMethodCandidate(CXXMethodDecl *Method,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext, QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions = false);
+ void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions = false);
+ void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
+ DeclAccessPair FoundDecl,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions = false);
+ void AddConversionCandidate(CXXConversionDecl *Conversion,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ Expr *From, QualType ToType,
+ OverloadCandidateSet& CandidateSet);
+ void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ Expr *From, QualType ToType,
+ OverloadCandidateSet &CandidateSet);
+ void AddSurrogateCandidate(CXXConversionDecl *Conversion,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ const FunctionProtoType *Proto,
+ Expr *Object, llvm::ArrayRef<Expr*> Args,
+ OverloadCandidateSet& CandidateSet);
+ void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ SourceRange OpRange = SourceRange());
+ void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool IsAssignmentOperator = false,
+ unsigned NumContextualBoolArguments = 0);
+ void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet);
+ void AddArgumentDependentLookupCandidates(DeclarationName Name,
+ bool Operator, SourceLocation Loc,
+ llvm::ArrayRef<Expr *> Args,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool PartialOverloading = false,
+ bool StdNamespaceIsAssociated = false);
+
+ // Emit as a 'note' the specific overload candidate
+ void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType());
+
+ // Emit as a series of 'note's all template and non-templates
+ // identified by the expression Expr
+ void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType());
+
+ // [PossiblyAFunctionType] --> [Return]
+ // NonFunctionType --> NonFunctionType
+ // R (A) --> R(A)
+ // R (*)(A) --> R (A)
+ // R (&)(A) --> R (A)
+ // R (S::*)(A) --> R (A)
+ QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
+
+ FunctionDecl *
+ ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
+ QualType TargetType,
+ bool Complain,
+ DeclAccessPair &Found,
+ bool *pHadMultipleCandidates = 0);
+
+ FunctionDecl *ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
+ bool Complain = false,
+ DeclAccessPair* Found = 0);
+
+ bool ResolveAndFixSingleFunctionTemplateSpecialization(
+ ExprResult &SrcExpr,
+ bool DoFunctionPointerConverion = false,
+ bool Complain = false,
+ const SourceRange& OpRangeForComplaining = SourceRange(),
+ QualType DestTypeForComplaining = QualType(),
+ unsigned DiagIDForComplaining = 0);
+
+
+ Expr *FixOverloadedFunctionReference(Expr *E,
+ DeclAccessPair FoundDecl,
+ FunctionDecl *Fn);
+ ExprResult FixOverloadedFunctionReference(ExprResult,
+ DeclAccessPair FoundDecl,
+ FunctionDecl *Fn);
+
+ void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool PartialOverloading = false);
+
+ ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig,
+ bool AllowTypoCorrection=true);
+
+ ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
+ unsigned Opc,
+ const UnresolvedSetImpl &Fns,
+ Expr *input);
+
+ ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
+ unsigned Opc,
+ const UnresolvedSetImpl &Fns,
+ Expr *LHS, Expr *RHS);
+
+ ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
+ SourceLocation RLoc,
+ Expr *Base,Expr *Idx);
+
+ ExprResult
+ BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
+ SourceLocation LParenLoc, Expr **Args,
+ unsigned NumArgs, SourceLocation RParenLoc);
+ ExprResult
+ BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc);
+
+ ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc);
+
+ /// CheckCallReturnType - Checks that a call expression's return type is
+ /// complete. Returns true on failure. The location passed in is the location
+ /// that best represents the call.
+ bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
+ CallExpr *CE, FunctionDecl *FD);
+
+ /// Helpers for dealing with blocks and functions.
+ bool CheckParmsForFunctionDef(ParmVarDecl **Param, ParmVarDecl **ParamEnd,
+ bool CheckParameterNames);
+ void CheckCXXDefaultArguments(FunctionDecl *FD);
+ void CheckExtraCXXDefaultArguments(Declarator &D);
+ Scope *getNonFieldDeclScope(Scope *S);
+
+ /// \name Name lookup
+ ///
+ /// These routines provide name lookup that is used during semantic
+ /// analysis to resolve the various kinds of names (identifiers,
+ /// overloaded operator names, constructor names, etc.) into zero or
+ /// more declarations within a particular scope. The major entry
+ /// points are LookupName, which performs unqualified name lookup,
+ /// and LookupQualifiedName, which performs qualified name lookup.
+ ///
+ /// All name lookup is performed based on some specific criteria,
+ /// which specify what names will be visible to name lookup and how
+ /// far name lookup should work. These criteria are important both
+ /// for capturing language semantics (certain lookups will ignore
+ /// certain names, for example) and for performance, since name
+ /// lookup is often a bottleneck in the compilation of C++. Name
+ /// lookup criteria is specified via the LookupCriteria enumeration.
+ ///
+ /// The results of name lookup can vary based on the kind of name
+ /// lookup performed, the current language, and the translation
+ /// unit. In C, for example, name lookup will either return nothing
+ /// (no entity found) or a single declaration. In C++, name lookup
+ /// can additionally refer to a set of overloaded functions or
+ /// result in an ambiguity. All of the possible results of name
+ /// lookup are captured by the LookupResult class, which provides
+ /// the ability to distinguish among them.
+ //@{
+
+ /// @brief Describes the kind of name lookup to perform.
+ enum LookupNameKind {
+ /// Ordinary name lookup, which finds ordinary names (functions,
+ /// variables, typedefs, etc.) in C and most kinds of names
+ /// (functions, variables, members, types, etc.) in C++.
+ LookupOrdinaryName = 0,
+ /// Tag name lookup, which finds the names of enums, classes,
+ /// structs, and unions.
+ LookupTagName,
+ /// Label name lookup.
+ LookupLabel,
+ /// Member name lookup, which finds the names of
+ /// class/struct/union members.
+ LookupMemberName,
+ /// Look up of an operator name (e.g., operator+) for use with
+ /// operator overloading. This lookup is similar to ordinary name
+ /// lookup, but will ignore any declarations that are class members.
+ LookupOperatorName,
+ /// Look up of a name that precedes the '::' scope resolution
+ /// operator in C++. This lookup completely ignores operator, object,
+ /// function, and enumerator names (C++ [basic.lookup.qual]p1).
+ LookupNestedNameSpecifierName,
+ /// Look up a namespace name within a C++ using directive or
+ /// namespace alias definition, ignoring non-namespace names (C++
+ /// [basic.lookup.udir]p1).
+ LookupNamespaceName,
+ /// Look up all declarations in a scope with the given name,
+ /// including resolved using declarations. This is appropriate
+ /// for checking redeclarations for a using declaration.
+ LookupUsingDeclName,
+ /// Look up an ordinary name that is going to be redeclared as a
+ /// name with linkage. This lookup ignores any declarations that
+ /// are outside of the current scope unless they have linkage. See
+ /// C99 6.2.2p4-5 and C++ [basic.link]p6.
+ LookupRedeclarationWithLinkage,
+ /// Look up the name of an Objective-C protocol.
+ LookupObjCProtocolName,
+ /// Look up implicit 'self' parameter of an objective-c method.
+ LookupObjCImplicitSelfParam,
+ /// \brief Look up any declaration with any name.
+ LookupAnyName
+ };
+
+ /// \brief Specifies whether (or how) name lookup is being performed for a
+ /// redeclaration (vs. a reference).
+ enum RedeclarationKind {
+ /// \brief The lookup is a reference to this name that is not for the
+ /// purpose of redeclaring the name.
+ NotForRedeclaration = 0,
+ /// \brief The lookup results will be used for redeclaration of a name,
+ /// if an entity by that name already exists.
+ ForRedeclaration
+ };
+
+ /// \brief The possible outcomes of name lookup for a literal operator.
+ enum LiteralOperatorLookupResult {
+ /// \brief The lookup resulted in an error.
+ LOLR_Error,
+ /// \brief The lookup found a single 'cooked' literal operator, which
+ /// expects a normal literal to be built and passed to it.
+ LOLR_Cooked,
+ /// \brief The lookup found a single 'raw' literal operator, which expects
+ /// a string literal containing the spelling of the literal token.
+ LOLR_Raw,
+ /// \brief The lookup found an overload set of literal operator templates,
+ /// which expect the characters of the spelling of the literal token to be
+ /// passed as a non-type template argument pack.
+ LOLR_Template
+ };
+
+ SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D,
+ CXXSpecialMember SM,
+ bool ConstArg,
+ bool VolatileArg,
+ bool RValueThis,
+ bool ConstThis,
+ bool VolatileThis);
+
+private:
+ bool CppLookupName(LookupResult &R, Scope *S);
+
+ // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls.
+ //
+ // The boolean value will be true to indicate that the namespace was loaded
+ // from an AST/PCH file, or false otherwise.
+ llvm::DenseMap<NamespaceDecl*, bool> KnownNamespaces;
+
+ /// \brief Whether we have already loaded known namespaces from an extenal
+ /// source.
+ bool LoadedExternalKnownNamespaces;
+
+public:
+ /// \brief Look up a name, looking for a single declaration. Return
+ /// null if the results were absent, ambiguous, or overloaded.
+ ///
+ /// It is preferable to use the elaborated form and explicitly handle
+ /// ambiguity and overloaded.
+ NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
+ SourceLocation Loc,
+ LookupNameKind NameKind,
+ RedeclarationKind Redecl
+ = NotForRedeclaration);
+ bool LookupName(LookupResult &R, Scope *S,
+ bool AllowBuiltinCreation = false);
+ bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
+ bool InUnqualifiedLookup = false);
+ bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
+ bool AllowBuiltinCreation = false,
+ bool EnteringContext = false);
+ ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
+ RedeclarationKind Redecl
+ = NotForRedeclaration);
+
+ void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
+ QualType T1, QualType T2,
+ UnresolvedSetImpl &Functions);
+
+ LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
+ SourceLocation GnuLabelLoc = SourceLocation());
+
+ DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
+ CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
+ CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
+ unsigned Quals,
+ bool *ConstParam = 0);
+ CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
+ bool RValueThis, unsigned ThisQuals,
+ bool *ConstParam = 0);
+ CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class);
+ CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, bool RValueThis,
+ unsigned ThisQuals);
+ CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
+
+ LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
+ ArrayRef<QualType> ArgTys,
+ bool AllowRawAndTemplate);
+
+ void ArgumentDependentLookup(DeclarationName Name, bool Operator,
+ SourceLocation Loc,
+ llvm::ArrayRef<Expr *> Args,
+ ADLResult &Functions,
+ bool StdNamespaceIsAssociated = false);
+
+ void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
+ VisibleDeclConsumer &Consumer,
+ bool IncludeGlobalScope = true);
+ void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
+ VisibleDeclConsumer &Consumer,
+ bool IncludeGlobalScope = true);
+
+ TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
+ Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ CorrectionCandidateCallback &CCC,
+ DeclContext *MemberContext = 0,
+ bool EnteringContext = false,
+ const ObjCObjectPointerType *OPT = 0);
+
+ void FindAssociatedClassesAndNamespaces(llvm::ArrayRef<Expr *> Args,
+ AssociatedNamespaceSet &AssociatedNamespaces,
+ AssociatedClassSet &AssociatedClasses);
+
+ void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
+ bool ConsiderLinkage,
+ bool ExplicitInstantiationOrSpecialization);
+
+ bool DiagnoseAmbiguousLookup(LookupResult &Result);
+ //@}
+
+ ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
+ SourceLocation IdLoc,
+ bool TypoCorrection = false);
+ NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
+ Scope *S, bool ForRedeclaration,
+ SourceLocation Loc);
+ NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
+ Scope *S);
+ void AddKnownFunctionAttributes(FunctionDecl *FD);
+
+ // More parsing and symbol table subroutines.
+
+ // Decl attributes - this routine is the top level dispatcher.
+ void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD,
+ bool NonInheritable = true, bool Inheritable = true);
+ void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL,
+ bool NonInheritable = true, bool Inheritable = true);
+ bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
+ const AttributeList *AttrList);
+
+ void checkUnusedDeclAttributes(Declarator &D);
+
+ bool CheckRegparmAttr(const AttributeList &attr, unsigned &value);
+ bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC);
+ bool CheckNoReturnAttr(const AttributeList &attr);
+
+ void WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
+ bool &IncompleteImpl, unsigned DiagID);
+ void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl);
+
+ void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
+ ObjCMethodDecl *Overridden,
+ bool IsProtocolMethodDecl);
+
+ /// WarnExactTypedMethods - This routine issues a warning if method
+ /// implementation declaration matches exactly that of its declaration.
+ void WarnExactTypedMethods(ObjCMethodDecl *Method,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl);
+
+ bool isPropertyReadonly(ObjCPropertyDecl *PropertyDecl,
+ ObjCInterfaceDecl *IDecl);
+
+ typedef llvm::DenseSet<Selector, llvm::DenseMapInfo<Selector> > SelectorSet;
+ typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap;
+
+ /// CheckProtocolMethodDefs - This routine checks unimplemented
+ /// methods declared in protocol, and those referenced by it.
+ /// \param IDecl - Used for checking for methods which may have been
+ /// inherited.
+ void CheckProtocolMethodDefs(SourceLocation ImpLoc,
+ ObjCProtocolDecl *PDecl,
+ bool& IncompleteImpl,
+ const SelectorSet &InsMap,
+ const SelectorSet &ClsMap,
+ ObjCContainerDecl *CDecl);
+
+ /// CheckImplementationIvars - This routine checks if the instance variables
+ /// listed in the implelementation match those listed in the interface.
+ void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
+ ObjCIvarDecl **Fields, unsigned nIvars,
+ SourceLocation Loc);
+
+ /// ImplMethodsVsClassMethods - This is main routine to warn if any method
+ /// remains unimplemented in the class or category @implementation.
+ void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* IDecl,
+ bool IncompleteImpl = false);
+
+ /// DiagnoseUnimplementedProperties - This routine warns on those properties
+ /// which must be implemented by this implementation.
+ void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl *CDecl,
+ const SelectorSet &InsMap);
+
+ /// DefaultSynthesizeProperties - This routine default synthesizes all
+ /// properties which must be synthesized in class's @implementation.
+ void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCInterfaceDecl *IDecl);
+ void DefaultSynthesizeProperties(Scope *S, Decl *D);
+
+ /// CollectImmediateProperties - This routine collects all properties in
+ /// the class and its conforming protocols; but not those it its super class.
+ void CollectImmediateProperties(ObjCContainerDecl *CDecl,
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap);
+
+
+ /// LookupPropertyDecl - Looks up a property in the current class and all
+ /// its protocols.
+ ObjCPropertyDecl *LookupPropertyDecl(const ObjCContainerDecl *CDecl,
+ IdentifierInfo *II);
+
+ /// Called by ActOnProperty to handle @property declarations in
+ //// class extensions.
+ Decl *HandlePropertyInClassExtension(Scope *S,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
+ Selector GetterSel,
+ Selector SetterSel,
+ const bool isAssign,
+ const bool isReadWrite,
+ const unsigned Attributes,
+ const unsigned AttributesAsWritten,
+ bool *isOverridingProperty,
+ TypeSourceInfo *T,
+ tok::ObjCKeywordKind MethodImplKind);
+
+ /// Called by ActOnProperty and HandlePropertyInClassExtension to
+ /// handle creating the ObjcPropertyDecl for a category or @interface.
+ ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
+ ObjCContainerDecl *CDecl,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
+ Selector GetterSel,
+ Selector SetterSel,
+ const bool isAssign,
+ const bool isReadWrite,
+ const unsigned Attributes,
+ const unsigned AttributesAsWritten,
+ TypeSourceInfo *T,
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC = 0);
+
+ /// AtomicPropertySetterGetterRules - This routine enforces the rule (via
+ /// warning) when atomic property has one but not the other user-declared
+ /// setter or getter.
+ void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* IDecl);
+
+ void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
+
+ void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
+
+ enum MethodMatchStrategy {
+ MMS_loose,
+ MMS_strict
+ };
+
+ /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
+ /// true, or false, accordingly.
+ bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
+ const ObjCMethodDecl *PrevMethod,
+ MethodMatchStrategy strategy = MMS_strict);
+
+ /// MatchAllMethodDeclarations - Check methods declaraed in interface or
+ /// or protocol against those declared in their implementations.
+ void MatchAllMethodDeclarations(const SelectorSet &InsMap,
+ const SelectorSet &ClsMap,
+ SelectorSet &InsMapSeen,
+ SelectorSet &ClsMapSeen,
+ ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* IDecl,
+ bool &IncompleteImpl,
+ bool ImmediateClass,
+ bool WarnCategoryMethodImpl=false);
+
+ /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
+ /// category matches with those implemented in its primary class and
+ /// warns each time an exact match is found.
+ void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
+
+ /// \brief Add the given method to the list of globally-known methods.
+ void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
+
+private:
+ /// AddMethodToGlobalPool - Add an instance or factory method to the global
+ /// pool. See descriptoin of AddInstanceMethodToGlobalPool.
+ void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
+
+ /// LookupMethodInGlobalPool - Returns the instance or factory method and
+ /// optionally warns if there are multiple signatures.
+ ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
+ bool receiverIdOrClass,
+ bool warn, bool instance);
+
+public:
+ /// AddInstanceMethodToGlobalPool - All instance methods in a translation
+ /// unit are added to a global pool. This allows us to efficiently associate
+ /// a selector with a method declaraation for purposes of typechecking
+ /// messages sent to "id" (where the class of the object is unknown).
+ void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
+ AddMethodToGlobalPool(Method, impl, /*instance*/true);
+ }
+
+ /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
+ void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
+ AddMethodToGlobalPool(Method, impl, /*instance*/false);
+ }
+
+ /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
+ /// pool.
+ void AddAnyMethodToGlobalPool(Decl *D);
+
+ /// LookupInstanceMethodInGlobalPool - Returns the method and warns if
+ /// there are multiple signatures.
+ ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
+ bool receiverIdOrClass=false,
+ bool warn=true) {
+ return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
+ warn, /*instance*/true);
+ }
+
+ /// LookupFactoryMethodInGlobalPool - Returns the method and warns if
+ /// there are multiple signatures.
+ ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
+ bool receiverIdOrClass=false,
+ bool warn=true) {
+ return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
+ warn, /*instance*/false);
+ }
+
+ /// LookupImplementedMethodInGlobalPool - Returns the method which has an
+ /// implementation.
+ ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
+
+ /// CollectIvarsToConstructOrDestruct - Collect those ivars which require
+ /// initialization.
+ void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
+ SmallVectorImpl<ObjCIvarDecl*> &Ivars);
+
+ //===--------------------------------------------------------------------===//
+ // Statement Parsing Callbacks: SemaStmt.cpp.
+public:
+ class FullExprArg {
+ public:
+ FullExprArg(Sema &actions) : E(0) { }
+
+ // FIXME: The const_cast here is ugly. RValue references would make this
+ // much nicer (or we could duplicate a bunch of the move semantics
+ // emulation code from Ownership.h).
+ FullExprArg(const FullExprArg& Other) : E(Other.E) {}
+
+ ExprResult release() {
+ return move(E);
+ }
+
+ Expr *get() const { return E; }
+
+ Expr *operator->() {
+ return E;
+ }
+
+ private:
+ // FIXME: No need to make the entire Sema class a friend when it's just
+ // Sema::MakeFullExpr that needs access to the constructor below.
+ friend class Sema;
+
+ explicit FullExprArg(Expr *expr) : E(expr) {}
+
+ Expr *E;
+ };
+
+ FullExprArg MakeFullExpr(Expr *Arg) {
+ return FullExprArg(ActOnFinishFullExpr(Arg).release());
+ }
+
+ StmtResult ActOnExprStmt(FullExprArg Expr);
+
+ StmtResult ActOnNullStmt(SourceLocation SemiLoc,
+ bool HasLeadingEmptyMacro = false);
+
+ void ActOnStartOfCompoundStmt();
+ void ActOnFinishOfCompoundStmt();
+ StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
+ MultiStmtArg Elts,
+ bool isStmtExpr);
+
+ /// \brief A RAII object to enter scope of a compound statement.
+ class CompoundScopeRAII {
+ public:
+ CompoundScopeRAII(Sema &S): S(S) {
+ S.ActOnStartOfCompoundStmt();
+ }
+
+ ~CompoundScopeRAII() {
+ S.ActOnFinishOfCompoundStmt();
+ }
+
+ private:
+ Sema &S;
+ };
+
+ StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc);
+ void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
+ StmtResult ActOnForEachLValueExpr(Expr *E);
+ StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
+ SourceLocation DotDotDotLoc, Expr *RHSVal,
+ SourceLocation ColonLoc);
+ void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
+
+ StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
+ SourceLocation ColonLoc,
+ Stmt *SubStmt, Scope *CurScope);
+ StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
+ SourceLocation ColonLoc, Stmt *SubStmt);
+
+ StmtResult ActOnIfStmt(SourceLocation IfLoc,
+ FullExprArg CondVal, Decl *CondVar,
+ Stmt *ThenVal,
+ SourceLocation ElseLoc, Stmt *ElseVal);
+ StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
+ Expr *Cond,
+ Decl *CondVar);
+ StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
+ Stmt *Switch, Stmt *Body);
+ StmtResult ActOnWhileStmt(SourceLocation WhileLoc,
+ FullExprArg Cond,
+ Decl *CondVar, Stmt *Body);
+ StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
+ SourceLocation WhileLoc,
+ SourceLocation CondLParen, Expr *Cond,
+ SourceLocation CondRParen);
+
+ StmtResult ActOnForStmt(SourceLocation ForLoc,
+ SourceLocation LParenLoc,
+ Stmt *First, FullExprArg Second,
+ Decl *SecondVar,
+ FullExprArg Third,
+ SourceLocation RParenLoc,
+ Stmt *Body);
+ ExprResult ActOnObjCForCollectionOperand(SourceLocation forLoc,
+ Expr *collection);
+ StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
+ SourceLocation LParenLoc,
+ Stmt *First, Expr *Second,
+ SourceLocation RParenLoc, Stmt *Body);
+ StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc,
+ SourceLocation LParenLoc, Stmt *LoopVar,
+ SourceLocation ColonLoc, Expr *Collection,
+ SourceLocation RParenLoc);
+ StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
+ SourceLocation ColonLoc,
+ Stmt *RangeDecl, Stmt *BeginEndDecl,
+ Expr *Cond, Expr *Inc,
+ Stmt *LoopVarDecl,
+ SourceLocation RParenLoc);
+ StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
+
+ StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
+ SourceLocation LabelLoc,
+ LabelDecl *TheDecl);
+ StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
+ SourceLocation StarLoc,
+ Expr *DestExp);
+ StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
+ StmtResult ActOnBreakStmt(SourceLocation GotoLoc, Scope *CurScope);
+
+ const VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
+ bool AllowFunctionParameters);
+
+ StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
+ StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
+
+ StmtResult ActOnAsmStmt(SourceLocation AsmLoc,
+ bool IsSimple, bool IsVolatile,
+ unsigned NumOutputs, unsigned NumInputs,
+ IdentifierInfo **Names,
+ MultiExprArg Constraints,
+ MultiExprArg Exprs,
+ Expr *AsmString,
+ MultiExprArg Clobbers,
+ SourceLocation RParenLoc,
+ bool MSAsm = false);
+
+
+ VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ bool Invalid = false);
+
+ Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
+
+ StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
+ Decl *Parm, Stmt *Body);
+
+ StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
+
+ StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
+ MultiStmtArg Catch, Stmt *Finally);
+
+ StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
+ StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
+ Scope *CurScope);
+ ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
+ Expr *operand);
+ StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
+ Expr *SynchExpr,
+ Stmt *SynchBody);
+
+ StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
+
+ VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id);
+
+ Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
+
+ StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
+ Decl *ExDecl, Stmt *HandlerBlock);
+ StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
+ MultiStmtArg Handlers);
+
+ StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler);
+
+ StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block);
+
+ StmtResult ActOnSEHFinallyBlock(SourceLocation Loc,
+ Stmt *Block);
+
+ void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
+
+ bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
+
+ /// \brief If it's a file scoped decl that must warn if not used, keep track
+ /// of it.
+ void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
+
+ /// DiagnoseUnusedExprResult - If the statement passed in is an expression
+ /// whose result is unused, warn.
+ void DiagnoseUnusedExprResult(const Stmt *S);
+ void DiagnoseUnusedDecl(const NamedDecl *ND);
+
+ /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
+ /// statement as a \p Body, and it is located on the same line.
+ ///
+ /// This helps prevent bugs due to typos, such as:
+ /// if (condition);
+ /// do_stuff();
+ void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
+ const Stmt *Body,
+ unsigned DiagID);
+
+ /// Warn if a for/while loop statement \p S, which is followed by
+ /// \p PossibleBody, has a suspicious null statement as a body.
+ void DiagnoseEmptyLoopBody(const Stmt *S,
+ const Stmt *PossibleBody);
+
+ ParsingDeclState PushParsingDeclaration() {
+ return DelayedDiagnostics.pushParsingDecl();
+ }
+ void PopParsingDeclaration(ParsingDeclState state, Decl *decl) {
+ DelayedDiagnostics::popParsingDecl(*this, state, decl);
+ }
+
+ typedef ProcessingContextState ParsingClassState;
+ ParsingClassState PushParsingClass() {
+ return DelayedDiagnostics.pushContext();
+ }
+ void PopParsingClass(ParsingClassState state) {
+ DelayedDiagnostics.popContext(state);
+ }
+
+ void EmitDeprecationWarning(NamedDecl *D, StringRef Message,
+ SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass=0);
+
+ void HandleDelayedDeprecationCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
+
+ bool makeUnavailableInSystemHeader(SourceLocation loc,
+ StringRef message);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Parsing Callbacks: SemaExpr.cpp.
+
+ bool CanUseDecl(NamedDecl *D);
+ bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass=0);
+ void NoteDeletedFunction(FunctionDecl *FD);
+ std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD);
+ bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
+ ObjCMethodDecl *Getter,
+ SourceLocation Loc);
+ void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
+ Expr **Args, unsigned NumArgs);
+
+ void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl = 0,
+ bool IsDecltype = false);
+
+ void PopExpressionEvaluationContext();
+
+ void DiscardCleanupsInEvaluationContext();
+
+ ExprResult TranformToPotentiallyEvaluated(Expr *E);
+ ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
+
+ ExprResult ActOnConstantExpression(ExprResult Res);
+
+ // Functions for marking a declaration referenced. These functions also
+ // contain the relevant logic for marking if a reference to a function or
+ // variable is an odr-use (in the C++11 sense). There are separate variants
+ // for expressions referring to a decl; these exist because odr-use marking
+ // needs to be delayed for some constant variables when we build one of the
+ // named expressions.
+ void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D);
+ void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func);
+ void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
+ void MarkDeclRefReferenced(DeclRefExpr *E);
+ void MarkMemberReferenced(MemberExpr *E);
+
+ void UpdateMarkingForLValueToRValue(Expr *E);
+ void CleanupVarDeclMarking();
+
+ enum TryCaptureKind {
+ TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
+ };
+
+ /// \brief Try to capture the given variable.
+ ///
+ /// \param Var The variable to capture.
+ ///
+ /// \param Loc The location at which the capture occurs.
+ ///
+ /// \param Kind The kind of capture, which may be implicit (for either a
+ /// block or a lambda), or explicit by-value or by-reference (for a lambda).
+ ///
+ /// \param EllipsisLoc The location of the ellipsis, if one is provided in
+ /// an explicit lambda capture.
+ ///
+ /// \param BuildAndDiagnose Whether we are actually supposed to add the
+ /// captures or diagnose errors. If false, this routine merely check whether
+ /// the capture can occur without performing the capture itself or complaining
+ /// if the variable cannot be captured.
+ ///
+ /// \param CaptureType Will be set to the type of the field used to capture
+ /// this variable in the innermost block or lambda. Only valid when the
+ /// variable can be captured.
+ ///
+ /// \param DeclRefType Will be set to the type of a refernce to the capture
+ /// from within the current scope. Only valid when the variable can be
+ /// captured.
+ ///
+ /// \returns true if an error occurred (i.e., the variable cannot be
+ /// captured) and false if the capture succeeded.
+ bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
+ SourceLocation EllipsisLoc, bool BuildAndDiagnose,
+ QualType &CaptureType,
+ QualType &DeclRefType);
+
+ /// \brief Try to capture the given variable.
+ bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+ TryCaptureKind Kind = TryCapture_Implicit,
+ SourceLocation EllipsisLoc = SourceLocation());
+
+ /// \brief Given a variable, determine the type that a reference to that
+ /// variable will have in the given scope.
+ QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
+
+ void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
+ void MarkDeclarationsReferencedInExpr(Expr *E,
+ bool SkipLocalVariables = false);
+
+ /// \brief Try to recover by turning the given expression into a
+ /// call. Returns true if recovery was attempted or an error was
+ /// emitted; this may also leave the ExprResult invalid.
+ bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
+ bool ForceComplain = false,
+ bool (*IsPlausibleResult)(QualType) = 0);
+
+ /// \brief Figure out if an expression could be turned into a call.
+ bool isExprCallable(const Expr &E, QualType &ZeroArgCallReturnTy,
+ UnresolvedSetImpl &NonTemplateOverloads);
+
+ /// \brief Conditionally issue a diagnostic based on the current
+ /// evaluation context.
+ ///
+ /// \param stmt - If stmt is non-null, delay reporting the diagnostic until
+ /// the function body is parsed, and then do a basic reachability analysis to
+ /// determine if the statement is reachable. If it is unreachable, the
+ /// diagnostic will not be emitted.
+ bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
+ const PartialDiagnostic &PD);
+
+ // Primary Expressions.
+ SourceRange getExprRange(Expr *E) const;
+
+ ExprResult ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Id,
+ bool HasTrailingLParen, bool IsAddressOfOperand,
+ CorrectionCandidateCallback *CCC = 0);
+
+ void DecomposeUnqualifiedId(const UnqualifiedId &Id,
+ TemplateArgumentListInfo &Buffer,
+ DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *&TemplateArgs);
+
+ bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
+ CorrectionCandidateCallback &CCC,
+ TemplateArgumentListInfo *ExplicitTemplateArgs = 0,
+ llvm::ArrayRef<Expr *> Args = llvm::ArrayRef<Expr *>());
+
+ ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
+ IdentifierInfo *II,
+ bool AllowBuiltinCreation=false);
+
+ ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool isAddressOfOperand,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+ ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
+ ExprValueKind VK,
+ SourceLocation Loc,
+ const CXXScopeSpec *SS = 0);
+ ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty,
+ ExprValueKind VK,
+ const DeclarationNameInfo &NameInfo,
+ const CXXScopeSpec *SS = 0);
+ ExprResult
+ BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
+ SourceLocation nameLoc,
+ IndirectFieldDecl *indirectField,
+ Expr *baseObjectExpr = 0,
+ SourceLocation opLoc = SourceLocation());
+ ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs);
+ ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool IsDefiniteInstance);
+ bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
+ const LookupResult &R,
+ bool HasTrailingLParen);
+
+ ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo);
+ ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+ ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
+ LookupResult &R,
+ bool NeedsADL);
+ ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo,
+ NamedDecl *D);
+
+ ExprResult BuildLiteralOperatorCall(LookupResult &R,
+ DeclarationNameInfo &SuffixInfo,
+ ArrayRef<Expr*> Args,
+ SourceLocation LitEndLoc,
+ TemplateArgumentListInfo *ExplicitTemplateArgs = 0);
+
+ ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
+ ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
+ ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = 0);
+ ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = 0);
+ ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
+ ExprResult ActOnParenListExpr(SourceLocation L,
+ SourceLocation R,
+ MultiExprArg Val);
+
+ /// ActOnStringLiteral - The specified tokens were lexed as pasted string
+ /// fragments (e.g. "foo" "bar" L"baz").
+ ExprResult ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks,
+ Scope *UDLScope = 0);
+
+ ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ Expr *ControllingExpr,
+ MultiTypeArg ArgTypes,
+ MultiExprArg ArgExprs);
+ ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ Expr *ControllingExpr,
+ TypeSourceInfo **Types,
+ Expr **Exprs,
+ unsigned NumAssocs);
+
+ // Binary/Unary Operators. 'Tok' is the token for the operator.
+ ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
+ Expr *InputExpr);
+ ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
+ UnaryOperatorKind Opc, Expr *Input);
+ ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Op, Expr *Input);
+
+ ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
+ SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R);
+ ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind);
+ ExprResult
+ ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ bool IsType, void *TyOrEx,
+ const SourceRange &ArgRange);
+
+ ExprResult CheckPlaceholderExpr(Expr *E);
+ bool CheckVecStepExpr(Expr *E);
+
+ bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
+ bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
+ SourceRange ExprRange,
+ UnaryExprOrTypeTrait ExprKind);
+ ExprResult ActOnSizeofParameterPackExpr(Scope *S,
+ SourceLocation OpLoc,
+ IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ SourceLocation RParenLoc);
+ ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Kind, Expr *Input);
+
+ ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
+ Expr *Idx, SourceLocation RLoc);
+ ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
+ Expr *Idx, SourceLocation RLoc);
+
+ ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
+ SourceLocation OpLoc, bool IsArrow,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+ ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
+ SourceLocation OpLoc, bool IsArrow,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool SuppressQualifierCheck = false);
+
+ ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
+ ExprResult LookupMemberExpr(LookupResult &R, ExprResult &Base,
+ bool &IsArrow, SourceLocation OpLoc,
+ CXXScopeSpec &SS,
+ Decl *ObjCImpDecl,
+ bool HasTemplateArgs);
+
+ bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
+ const CXXScopeSpec &SS,
+ const LookupResult &R);
+
+ ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
+ bool IsArrow, SourceLocation OpLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+ ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Member,
+ Decl *ObjCImpDecl,
+ bool HasTrailingLParen);
+
+ void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
+ bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
+ FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ bool ExecConfig = false);
+ void CheckStaticArrayArgument(SourceLocation CallLoc,
+ ParmVarDecl *Param,
+ const Expr *ArgExpr);
+
+ /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
+ /// This provides the location of the left/right parens and a list of comma
+ /// locations.
+ ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
+ MultiExprArg ArgExprs, SourceLocation RParenLoc,
+ Expr *ExecConfig = 0, bool IsExecConfig = false);
+ ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ Expr *Config = 0,
+ bool IsExecConfig = false);
+
+ ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
+ MultiExprArg ExecConfig,
+ SourceLocation GGGLoc);
+
+ ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
+ Declarator &D, ParsedType &Ty,
+ SourceLocation RParenLoc, Expr *CastExpr);
+ ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
+ TypeSourceInfo *Ty,
+ SourceLocation RParenLoc,
+ Expr *Op);
+ CastKind PrepareScalarCast(ExprResult &src, QualType destType);
+
+ /// \brief Build an altivec or OpenCL literal.
+ ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
+ SourceLocation RParenLoc, Expr *E,
+ TypeSourceInfo *TInfo);
+
+ ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
+
+ ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
+ ParsedType Ty,
+ SourceLocation RParenLoc,
+ Expr *InitExpr);
+
+ ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc,
+ Expr *LiteralExpr);
+
+ ExprResult ActOnInitList(SourceLocation LBraceLoc,
+ MultiExprArg InitArgList,
+ SourceLocation RBraceLoc);
+
+ ExprResult ActOnDesignatedInitializer(Designation &Desig,
+ SourceLocation Loc,
+ bool GNUSyntax,
+ ExprResult Init);
+
+ ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
+ tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
+ ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
+ BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
+ ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
+ Expr *LHSExpr, Expr *RHSExpr);
+
+ /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
+ /// in the case of a the GNU conditional expr extension.
+ ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
+ SourceLocation ColonLoc,
+ Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
+
+ /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
+ ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
+ LabelDecl *TheDecl);
+
+ void ActOnStartStmtExpr();
+ ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
+ SourceLocation RPLoc); // "({..})"
+ void ActOnStmtExprError();
+
+ // __builtin_offsetof(type, identifier(.identifier|[expr])*)
+ struct OffsetOfComponent {
+ SourceLocation LocStart, LocEnd;
+ bool isBrackets; // true if [expr], false if .ident
+ union {
+ IdentifierInfo *IdentInfo;
+ Expr *E;
+ } U;
+ };
+
+ /// __builtin_offsetof(type, a.b[123][456].c)
+ ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
+ TypeSourceInfo *TInfo,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RParenLoc);
+ ExprResult ActOnBuiltinOffsetOf(Scope *S,
+ SourceLocation BuiltinLoc,
+ SourceLocation TypeLoc,
+ ParsedType ParsedArgTy,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RParenLoc);
+
+ // __builtin_choose_expr(constExpr, expr1, expr2)
+ ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
+ Expr *CondExpr, Expr *LHSExpr,
+ Expr *RHSExpr, SourceLocation RPLoc);
+
+ // __builtin_va_arg(expr, type)
+ ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
+ SourceLocation RPLoc);
+ ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
+ TypeSourceInfo *TInfo, SourceLocation RPLoc);
+
+ // __null
+ ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
+
+ bool CheckCaseExpression(Expr *E);
+
+ /// \brief Describes the result of an "if-exists" condition check.
+ enum IfExistsResult {
+ /// \brief The symbol exists.
+ IER_Exists,
+
+ /// \brief The symbol does not exist.
+ IER_DoesNotExist,
+
+ /// \brief The name is a dependent name, so the results will differ
+ /// from one instantiation to the next.
+ IER_Dependent,
+
+ /// \brief An error occurred.
+ IER_Error
+ };
+
+ IfExistsResult
+ CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
+ const DeclarationNameInfo &TargetNameInfo);
+
+ IfExistsResult
+ CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
+ bool IsIfExists, CXXScopeSpec &SS,
+ UnqualifiedId &Name);
+
+ StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ Stmt *Nested);
+ StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ CXXScopeSpec &SS, UnqualifiedId &Name,
+ Stmt *Nested);
+
+ //===------------------------- "Block" Extension ------------------------===//
+
+ /// ActOnBlockStart - This callback is invoked when a block literal is
+ /// started.
+ void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
+
+ /// ActOnBlockArguments - This callback allows processing of block arguments.
+ /// If there are no arguments, this is still invoked.
+ void ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope);
+
+ /// ActOnBlockError - If there is an error parsing a block, this callback
+ /// is invoked to pop the information about the block from the action impl.
+ void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
+
+ /// ActOnBlockStmtExpr - This is called when the body of a block statement
+ /// literal was successfully completed. ^(int x){...}
+ ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
+ Scope *CurScope);
+
+ //===---------------------------- OpenCL Features -----------------------===//
+
+ /// __builtin_astype(...)
+ ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
+ SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc);
+
+ //===---------------------------- C++ Features --------------------------===//
+
+ // Act on C++ namespaces
+ Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
+ SourceLocation NamespaceLoc,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident,
+ SourceLocation LBrace,
+ AttributeList *AttrList);
+ void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
+
+ NamespaceDecl *getStdNamespace() const;
+ NamespaceDecl *getOrCreateStdNamespace();
+
+ CXXRecordDecl *getStdBadAlloc() const;
+
+ /// \brief Tests whether Ty is an instance of std::initializer_list and, if
+ /// it is and Element is not NULL, assigns the element type to Element.
+ bool isStdInitializerList(QualType Ty, QualType *Element);
+
+ /// \brief Looks for the std::initializer_list template and instantiates it
+ /// with Element, or emits an error if it's not found.
+ ///
+ /// \returns The instantiated template, or null on error.
+ QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
+
+ /// \brief Determine whether Ctor is an initializer-list constructor, as
+ /// defined in [dcl.init.list]p2.
+ bool isInitListConstructor(const CXXConstructorDecl *Ctor);
+
+ Decl *ActOnUsingDirective(Scope *CurScope,
+ SourceLocation UsingLoc,
+ SourceLocation NamespcLoc,
+ CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *NamespcName,
+ AttributeList *AttrList);
+
+ void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
+
+ Decl *ActOnNamespaceAliasDef(Scope *CurScope,
+ SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident);
+
+ void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
+ bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
+ const LookupResult &PreviousDecls);
+ UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
+ NamedDecl *Target);
+
+ bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
+ bool isTypeName,
+ const CXXScopeSpec &SS,
+ SourceLocation NameLoc,
+ const LookupResult &Previous);
+ bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation NameLoc);
+
+ NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
+ SourceLocation UsingLoc,
+ CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo,
+ AttributeList *AttrList,
+ bool IsInstantiation,
+ bool IsTypeName,
+ SourceLocation TypenameLoc);
+
+ bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
+
+ Decl *ActOnUsingDeclaration(Scope *CurScope,
+ AccessSpecifier AS,
+ bool HasUsingKeyword,
+ SourceLocation UsingLoc,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ AttributeList *AttrList,
+ bool IsTypeName,
+ SourceLocation TypenameLoc);
+ Decl *ActOnAliasDeclaration(Scope *CurScope,
+ AccessSpecifier AS,
+ MultiTemplateParamsArg TemplateParams,
+ SourceLocation UsingLoc,
+ UnqualifiedId &Name,
+ TypeResult Type);
+
+ /// InitializeVarWithConstructor - Creates an CXXConstructExpr
+ /// and sets it as the initializer for the the passed in VarDecl.
+ bool InitializeVarWithConstructor(VarDecl *VD,
+ CXXConstructorDecl *Constructor,
+ MultiExprArg Exprs,
+ bool HadMultipleCandidates);
+
+ /// BuildCXXConstructExpr - Creates a complete call to a constructor,
+ /// including handling of its default argument expressions.
+ ///
+ /// \param ConstructKind - a CXXConstructExpr::ConstructionKind
+ ExprResult
+ BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
+ CXXConstructorDecl *Constructor, MultiExprArg Exprs,
+ bool HadMultipleCandidates, bool RequiresZeroInit,
+ unsigned ConstructKind, SourceRange ParenRange);
+
+ // FIXME: Can re remove this and have the above BuildCXXConstructExpr check if
+ // the constructor can be elidable?
+ ExprResult
+ BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
+ CXXConstructorDecl *Constructor, bool Elidable,
+ MultiExprArg Exprs, bool HadMultipleCandidates,
+ bool RequiresZeroInit, unsigned ConstructKind,
+ SourceRange ParenRange);
+
+ /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
+ /// the default expr if needed.
+ ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
+ FunctionDecl *FD,
+ ParmVarDecl *Param);
+
+ /// FinalizeVarWithDestructor - Prepare for calling destructor on the
+ /// constructed variable.
+ void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
+
+ /// \brief Helper class that collects exception specifications for
+ /// implicitly-declared special member functions.
+ class ImplicitExceptionSpecification {
+ // Pointer to allow copying
+ ASTContext *Context;
+ // We order exception specifications thus:
+ // noexcept is the most restrictive, but is only used in C++0x.
+ // throw() comes next.
+ // Then a throw(collected exceptions)
+ // Finally no specification.
+ // throw(...) is used instead if any called function uses it.
+ //
+ // If this exception specification cannot be known yet (for instance,
+ // because this is the exception specification for a defaulted default
+ // constructor and we haven't finished parsing the deferred parts of the
+ // class yet), the C++0x standard does not specify how to behave. We
+ // record this as an 'unknown' exception specification, which overrules
+ // any other specification (even 'none', to keep this rule simple).
+ ExceptionSpecificationType ComputedEST;
+ llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
+ SmallVector<QualType, 4> Exceptions;
+
+ void ClearExceptions() {
+ ExceptionsSeen.clear();
+ Exceptions.clear();
+ }
+
+ public:
+ explicit ImplicitExceptionSpecification(ASTContext &Context)
+ : Context(&Context), ComputedEST(EST_BasicNoexcept) {
+ if (!Context.getLangOpts().CPlusPlus0x)
+ ComputedEST = EST_DynamicNone;
+ }
+
+ /// \brief Get the computed exception specification type.
+ ExceptionSpecificationType getExceptionSpecType() const {
+ assert(ComputedEST != EST_ComputedNoexcept &&
+ "noexcept(expr) should not be a possible result");
+ return ComputedEST;
+ }
+
+ /// \brief The number of exceptions in the exception specification.
+ unsigned size() const { return Exceptions.size(); }
+
+ /// \brief The set of exceptions in the exception specification.
+ const QualType *data() const { return Exceptions.data(); }
+
+ /// \brief Integrate another called method into the collected data.
+ void CalledDecl(CXXMethodDecl *Method);
+
+ /// \brief Integrate an invoked expression into the collected data.
+ void CalledExpr(Expr *E);
+
+ /// \brief Specify that the exception specification can't be detemined yet.
+ void SetDelayed() {
+ ClearExceptions();
+ ComputedEST = EST_Delayed;
+ }
+
+ FunctionProtoType::ExtProtoInfo getEPI() const {
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExceptionSpecType = getExceptionSpecType();
+ EPI.NumExceptions = size();
+ EPI.Exceptions = data();
+ return EPI;
+ }
+ };
+
+ /// \brief Determine what sort of exception specification a defaulted
+ /// copy constructor of a class will have.
+ ImplicitExceptionSpecification
+ ComputeDefaultedDefaultCtorExceptionSpec(CXXRecordDecl *ClassDecl);
+
+ /// \brief Determine what sort of exception specification a defaulted
+ /// default constructor of a class will have, and whether the parameter
+ /// will be const.
+ std::pair<ImplicitExceptionSpecification, bool>
+ ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl);
+
+ /// \brief Determine what sort of exception specification a defautled
+ /// copy assignment operator of a class will have, and whether the
+ /// parameter will be const.
+ std::pair<ImplicitExceptionSpecification, bool>
+ ComputeDefaultedCopyAssignmentExceptionSpecAndConst(CXXRecordDecl *ClassDecl);
+
+ /// \brief Determine what sort of exception specification a defaulted move
+ /// constructor of a class will have.
+ ImplicitExceptionSpecification
+ ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl);
+
+ /// \brief Determine what sort of exception specification a defaulted move
+ /// assignment operator of a class will have.
+ ImplicitExceptionSpecification
+ ComputeDefaultedMoveAssignmentExceptionSpec(CXXRecordDecl *ClassDecl);
+
+ /// \brief Determine what sort of exception specification a defaulted
+ /// destructor of a class will have.
+ ImplicitExceptionSpecification
+ ComputeDefaultedDtorExceptionSpec(CXXRecordDecl *ClassDecl);
+
+ /// \brief Determine if a special member function should have a deleted
+ /// definition when it is defaulted.
+ bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
+ bool Diagnose = false);
+
+ /// \brief Declare the implicit default constructor for the given class.
+ ///
+ /// \param ClassDecl The class declaration into which the implicit
+ /// default constructor will be added.
+ ///
+ /// \returns The implicitly-declared default constructor.
+ CXXConstructorDecl *DeclareImplicitDefaultConstructor(
+ CXXRecordDecl *ClassDecl);
+
+ /// DefineImplicitDefaultConstructor - Checks for feasibility of
+ /// defining this constructor as the default constructor.
+ void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *Constructor);
+
+ /// \brief Declare the implicit destructor for the given class.
+ ///
+ /// \param ClassDecl The class declaration into which the implicit
+ /// destructor will be added.
+ ///
+ /// \returns The implicitly-declared destructor.
+ CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
+
+ /// DefineImplicitDestructor - Checks for feasibility of
+ /// defining this destructor as the default destructor.
+ void DefineImplicitDestructor(SourceLocation CurrentLocation,
+ CXXDestructorDecl *Destructor);
+
+ /// \brief Build an exception spec for destructors that don't have one.
+ ///
+ /// C++11 says that user-defined destructors with no exception spec get one
+ /// that looks as if the destructor was implicitly declared.
+ void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl,
+ CXXDestructorDecl *Destructor);
+
+ /// \brief Declare all inherited constructors for the given class.
+ ///
+ /// \param ClassDecl The class declaration into which the inherited
+ /// constructors will be added.
+ void DeclareInheritedConstructors(CXXRecordDecl *ClassDecl);
+
+ /// \brief Declare the implicit copy constructor for the given class.
+ ///
+ /// \param ClassDecl The class declaration into which the implicit
+ /// copy constructor will be added.
+ ///
+ /// \returns The implicitly-declared copy constructor.
+ CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
+
+ /// DefineImplicitCopyConstructor - Checks for feasibility of
+ /// defining this constructor as the copy constructor.
+ void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *Constructor);
+
+ /// \brief Declare the implicit move constructor for the given class.
+ ///
+ /// \param ClassDecl The Class declaration into which the implicit
+ /// move constructor will be added.
+ ///
+ /// \returns The implicitly-declared move constructor, or NULL if it wasn't
+ /// declared.
+ CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
+
+ /// DefineImplicitMoveConstructor - Checks for feasibility of
+ /// defining this constructor as the move constructor.
+ void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *Constructor);
+
+ /// \brief Declare the implicit copy assignment operator for the given class.
+ ///
+ /// \param ClassDecl The class declaration into which the implicit
+ /// copy assignment operator will be added.
+ ///
+ /// \returns The implicitly-declared copy assignment operator.
+ CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
+
+ /// \brief Defines an implicitly-declared copy assignment operator.
+ void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
+ CXXMethodDecl *MethodDecl);
+
+ /// \brief Declare the implicit move assignment operator for the given class.
+ ///
+ /// \param ClassDecl The Class declaration into which the implicit
+ /// move assignment operator will be added.
+ ///
+ /// \returns The implicitly-declared move assignment operator, or NULL if it
+ /// wasn't declared.
+ CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
+
+ /// \brief Defines an implicitly-declared move assignment operator.
+ void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
+ CXXMethodDecl *MethodDecl);
+
+ /// \brief Force the declaration of any implicitly-declared members of this
+ /// class.
+ void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
+
+ /// \brief Determine whether the given function is an implicitly-deleted
+ /// special member function.
+ bool isImplicitlyDeleted(FunctionDecl *FD);
+
+ /// MaybeBindToTemporary - If the passed in expression has a record type with
+ /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
+ /// it simply returns the passed in expression.
+ ExprResult MaybeBindToTemporary(Expr *E);
+
+ bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
+ MultiExprArg ArgsPtr,
+ SourceLocation Loc,
+ ASTOwningVector<Expr*> &ConvertedArgs,
+ bool AllowExplicit = false);
+
+ ParsedType getDestructorName(SourceLocation TildeLoc,
+ IdentifierInfo &II, SourceLocation NameLoc,
+ Scope *S, CXXScopeSpec &SS,
+ ParsedType ObjectType,
+ bool EnteringContext);
+
+ ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType);
+
+ // Checks that reinterpret casts don't have undefined behavior.
+ void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
+ bool IsDereference, SourceRange Range);
+
+ /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
+ ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
+ tok::TokenKind Kind,
+ SourceLocation LAngleBracketLoc,
+ Declarator &D,
+ SourceLocation RAngleBracketLoc,
+ SourceLocation LParenLoc,
+ Expr *E,
+ SourceLocation RParenLoc);
+
+ ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
+ tok::TokenKind Kind,
+ TypeSourceInfo *Ty,
+ Expr *E,
+ SourceRange AngleBrackets,
+ SourceRange Parens);
+
+ ExprResult BuildCXXTypeId(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc);
+ ExprResult BuildCXXTypeId(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *Operand,
+ SourceLocation RParenLoc);
+
+ /// ActOnCXXTypeid - Parse typeid( something ).
+ ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
+ SourceLocation LParenLoc, bool isType,
+ void *TyOrExpr,
+ SourceLocation RParenLoc);
+
+ ExprResult BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc);
+ ExprResult BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *Operand,
+ SourceLocation RParenLoc);
+
+ /// ActOnCXXUuidof - Parse __uuidof( something ).
+ ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
+ SourceLocation LParenLoc, bool isType,
+ void *TyOrExpr,
+ SourceLocation RParenLoc);
+
+
+ //// ActOnCXXThis - Parse 'this' pointer.
+ ExprResult ActOnCXXThis(SourceLocation loc);
+
+ /// \brief Try to retrieve the type of the 'this' pointer.
+ ///
+ /// \param Capture If true, capture 'this' in this context.
+ ///
+ /// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
+ QualType getCurrentThisType();
+
+ /// \brief Make sure the value of 'this' is actually available in the current
+ /// context, if it is a potentially evaluated context.
+ ///
+ /// \param Loc The location at which the capture of 'this' occurs.
+ ///
+ /// \param Explicit Whether 'this' is explicitly captured in a lambda
+ /// capture list.
+ void CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false);
+
+ /// ActOnCXXBoolLiteral - Parse {true,false} literals.
+ ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
+
+
+ /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
+ ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
+
+ /// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
+ ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
+
+ //// ActOnCXXThrow - Parse throw expressions.
+ ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
+ ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
+ bool IsThrownVarInScope);
+ ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
+ bool IsThrownVarInScope);
+
+ /// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
+ /// Can be interpreted either as function-style casting ("int(x)")
+ /// or class type construction ("ClassType(x,y,z)")
+ /// or creation of a value-initialized type ("int()").
+ ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation RParenLoc);
+
+ ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ MultiExprArg Exprs,
+ SourceLocation RParenLoc);
+
+ /// ActOnCXXNew - Parsed a C++ 'new' expression.
+ ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ SourceRange TypeIdParens, Declarator &D,
+ Expr *Initializer);
+ ExprResult BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ SourceRange TypeIdParens,
+ QualType AllocType,
+ TypeSourceInfo *AllocTypeInfo,
+ Expr *ArraySize,
+ SourceRange DirectInitRange,
+ Expr *Initializer,
+ bool TypeMayContainAuto = true);
+
+ bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
+ SourceRange R);
+ bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
+ bool UseGlobal, QualType AllocType, bool IsArray,
+ Expr **PlaceArgs, unsigned NumPlaceArgs,
+ FunctionDecl *&OperatorNew,
+ FunctionDecl *&OperatorDelete);
+ bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
+ DeclarationName Name, Expr** Args,
+ unsigned NumArgs, DeclContext *Ctx,
+ bool AllowMissing, FunctionDecl *&Operator,
+ bool Diagnose = true);
+ void DeclareGlobalNewDelete();
+ void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
+ QualType Argument,
+ bool addMallocAttr = false);
+
+ bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
+ DeclarationName Name, FunctionDecl* &Operator,
+ bool Diagnose = true);
+
+ /// ActOnCXXDelete - Parsed a C++ 'delete' expression
+ ExprResult ActOnCXXDelete(SourceLocation StartLoc,
+ bool UseGlobal, bool ArrayForm,
+ Expr *Operand);
+
+ DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
+ ExprResult CheckConditionVariable(VarDecl *ConditionVar,
+ SourceLocation StmtLoc,
+ bool ConvertToBoolean);
+
+ ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
+ Expr *Operand, SourceLocation RParen);
+ ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
+ SourceLocation RParen);
+
+ /// ActOnUnaryTypeTrait - Parsed one of the unary type trait support
+ /// pseudo-functions.
+ ExprResult ActOnUnaryTypeTrait(UnaryTypeTrait OTT,
+ SourceLocation KWLoc,
+ ParsedType Ty,
+ SourceLocation RParen);
+
+ ExprResult BuildUnaryTypeTrait(UnaryTypeTrait OTT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *T,
+ SourceLocation RParen);
+
+ /// ActOnBinaryTypeTrait - Parsed one of the bianry type trait support
+ /// pseudo-functions.
+ ExprResult ActOnBinaryTypeTrait(BinaryTypeTrait OTT,
+ SourceLocation KWLoc,
+ ParsedType LhsTy,
+ ParsedType RhsTy,
+ SourceLocation RParen);
+
+ ExprResult BuildBinaryTypeTrait(BinaryTypeTrait BTT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *LhsT,
+ TypeSourceInfo *RhsT,
+ SourceLocation RParen);
+
+ /// \brief Parsed one of the type trait support pseudo-functions.
+ ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<ParsedType> Args,
+ SourceLocation RParenLoc);
+ ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc);
+
+ /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support
+ /// pseudo-functions.
+ ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
+ SourceLocation KWLoc,
+ ParsedType LhsTy,
+ Expr *DimExpr,
+ SourceLocation RParen);
+
+ ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *TSInfo,
+ Expr *DimExpr,
+ SourceLocation RParen);
+
+ /// ActOnExpressionTrait - Parsed one of the unary type trait support
+ /// pseudo-functions.
+ ExprResult ActOnExpressionTrait(ExpressionTrait OET,
+ SourceLocation KWLoc,
+ Expr *Queried,
+ SourceLocation RParen);
+
+ ExprResult BuildExpressionTrait(ExpressionTrait OET,
+ SourceLocation KWLoc,
+ Expr *Queried,
+ SourceLocation RParen);
+
+ ExprResult ActOnStartCXXMemberReference(Scope *S,
+ Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ ParsedType &ObjectType,
+ bool &MayBePseudoDestructor);
+
+ ExprResult DiagnoseDtorReference(SourceLocation NameLoc, Expr *MemExpr);
+
+ ExprResult BuildPseudoDestructorExpr(Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ const CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeType,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage DestroyedType,
+ bool HasTrailingLParen);
+
+ ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ UnqualifiedId &FirstTypeName,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ UnqualifiedId &SecondTypeName,
+ bool HasTrailingLParen);
+
+ ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ SourceLocation TildeLoc,
+ const DeclSpec& DS,
+ bool HasTrailingLParen);
+
+ /// MaybeCreateExprWithCleanups - If the current full-expression
+ /// requires any cleanups, surround it with a ExprWithCleanups node.
+ /// Otherwise, just returns the passed-in expression.
+ Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
+ Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
+ ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
+
+ ExprResult ActOnFinishFullExpr(Expr *Expr);
+ StmtResult ActOnFinishFullStmt(Stmt *Stmt);
+
+ // Marks SS invalid if it represents an incomplete type.
+ bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
+
+ DeclContext *computeDeclContext(QualType T);
+ DeclContext *computeDeclContext(const CXXScopeSpec &SS,
+ bool EnteringContext = false);
+ bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
+ CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
+ bool isUnknownSpecialization(const CXXScopeSpec &SS);
+
+ /// \brief The parser has parsed a global nested-name-specifier '::'.
+ ///
+ /// \param S The scope in which this nested-name-specifier occurs.
+ ///
+ /// \param CCLoc The location of the '::'.
+ ///
+ /// \param SS The nested-name-specifier, which will be updated in-place
+ /// to reflect the parsed nested-name-specifier.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc,
+ CXXScopeSpec &SS);
+
+ bool isAcceptableNestedNameSpecifier(NamedDecl *SD);
+ NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
+
+ bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
+ SourceLocation IdLoc,
+ IdentifierInfo &II,
+ ParsedType ObjectType);
+
+ bool BuildCXXNestedNameSpecifier(Scope *S,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation CCLoc,
+ QualType ObjectType,
+ bool EnteringContext,
+ CXXScopeSpec &SS,
+ NamedDecl *ScopeLookupResult,
+ bool ErrorRecoveryLookup);
+
+ /// \brief The parser has parsed a nested-name-specifier 'identifier::'.
+ ///
+ /// \param S The scope in which this nested-name-specifier occurs.
+ ///
+ /// \param Identifier The identifier preceding the '::'.
+ ///
+ /// \param IdentifierLoc The location of the identifier.
+ ///
+ /// \param CCLoc The location of the '::'.
+ ///
+ /// \param ObjectType The type of the object, if we're parsing
+ /// nested-name-specifier in a member access expression.
+ ///
+ /// \param EnteringContext Whether we're entering the context nominated by
+ /// this nested-name-specifier.
+ ///
+ /// \param SS The nested-name-specifier, which is both an input
+ /// parameter (the nested-name-specifier before this type) and an
+ /// output parameter (containing the full nested-name-specifier,
+ /// including this new type).
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool ActOnCXXNestedNameSpecifier(Scope *S,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation CCLoc,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ CXXScopeSpec &SS);
+
+ ExprResult ActOnDecltypeExpression(Expr *E);
+
+ bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
+ const DeclSpec &DS,
+ SourceLocation ColonColonLoc);
+
+ bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation ColonLoc,
+ ParsedType ObjectType,
+ bool EnteringContext);
+
+ /// \brief The parser has parsed a nested-name-specifier
+ /// 'template[opt] template-name < template-args >::'.
+ ///
+ /// \param S The scope in which this nested-name-specifier occurs.
+ ///
+ /// \param SS The nested-name-specifier, which is both an input
+ /// parameter (the nested-name-specifier before this type) and an
+ /// output parameter (containing the full nested-name-specifier,
+ /// including this new type).
+ ///
+ /// \param TemplateKWLoc the location of the 'template' keyword, if any.
+ /// \param TemplateName The template name.
+ /// \param TemplateNameLoc The location of the template name.
+ /// \param LAngleLoc The location of the opening angle bracket ('<').
+ /// \param TemplateArgs The template arguments.
+ /// \param RAngleLoc The location of the closing angle bracket ('>').
+ /// \param CCLoc The location of the '::'.
+ ///
+ /// \param EnteringContext Whether we're entering the context of the
+ /// nested-name-specifier.
+ ///
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool ActOnCXXNestedNameSpecifier(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgs,
+ SourceLocation RAngleLoc,
+ SourceLocation CCLoc,
+ bool EnteringContext);
+
+ /// \brief Given a C++ nested-name-specifier, produce an annotation value
+ /// that the parser can use later to reconstruct the given
+ /// nested-name-specifier.
+ ///
+ /// \param SS A nested-name-specifier.
+ ///
+ /// \returns A pointer containing all of the information in the
+ /// nested-name-specifier \p SS.
+ void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
+
+ /// \brief Given an annotation pointer for a nested-name-specifier, restore
+ /// the nested-name-specifier structure.
+ ///
+ /// \param Annotation The annotation pointer, produced by
+ /// \c SaveNestedNameSpecifierAnnotation().
+ ///
+ /// \param AnnotationRange The source range corresponding to the annotation.
+ ///
+ /// \param SS The nested-name-specifier that will be updated with the contents
+ /// of the annotation pointer.
+ void RestoreNestedNameSpecifierAnnotation(void *Annotation,
+ SourceRange AnnotationRange,
+ CXXScopeSpec &SS);
+
+ bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
+
+ /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
+ /// scope or nested-name-specifier) is parsed, part of a declarator-id.
+ /// After this method is called, according to [C++ 3.4.3p3], names should be
+ /// looked up in the declarator-id's scope, until the declarator is parsed and
+ /// ActOnCXXExitDeclaratorScope is called.
+ /// The 'SS' should be a non-empty valid CXXScopeSpec.
+ bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
+
+ /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
+ /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
+ /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
+ /// Used to indicate that names should revert to being looked up in the
+ /// defining scope.
+ void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
+
+ /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
+ /// initializer for the declaration 'Dcl'.
+ /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
+ /// static data member of class X, names should be looked up in the scope of
+ /// class X.
+ void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
+
+ /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
+ /// initializer for the declaration 'Dcl'.
+ void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
+
+ /// \brief Create a new lambda closure type.
+ CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
+ bool KnownDependent = false);
+
+ /// \brief Start the definition of a lambda expression.
+ CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ TypeSourceInfo *MethodType,
+ SourceLocation EndLoc,
+ llvm::ArrayRef<ParmVarDecl *> Params,
+ llvm::Optional<unsigned> ManglingNumber
+ = llvm::Optional<unsigned>(),
+ Decl *ContextDecl = 0);
+
+ /// \brief Introduce the scope for a lambda expression.
+ sema::LambdaScopeInfo *enterLambdaScope(CXXMethodDecl *CallOperator,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ bool Mutable);
+
+ /// \brief Note that we have finished the explicit captures for the
+ /// given lambda.
+ void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
+
+ /// \brief Introduce the lambda parameters into scope.
+ void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope);
+
+ /// ActOnStartOfLambdaDefinition - This is called just before we start
+ /// parsing the body of a lambda; it analyzes the explicit captures and
+ /// arguments, and sets up various data-structures for the body of the
+ /// lambda.
+ void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
+ Declarator &ParamInfo, Scope *CurScope);
+
+ /// ActOnLambdaError - If there is an error parsing a lambda, this callback
+ /// is invoked to pop the information about the lambda.
+ void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
+ bool IsInstantiation = false);
+
+ /// ActOnLambdaExpr - This is called when the body of a lambda expression
+ /// was successfully completed.
+ ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
+ Scope *CurScope,
+ bool IsInstantiation = false);
+
+ /// \brief Define the "body" of the conversion from a lambda object to a
+ /// function pointer.
+ ///
+ /// This routine doesn't actually define a sensible body; rather, it fills
+ /// in the initialization expression needed to copy the lambda object into
+ /// the block, and IR generation actually generates the real body of the
+ /// block pointer conversion.
+ void DefineImplicitLambdaToFunctionPointerConversion(
+ SourceLocation CurrentLoc, CXXConversionDecl *Conv);
+
+ /// \brief Define the "body" of the conversion from a lambda object to a
+ /// block pointer.
+ ///
+ /// This routine doesn't actually define a sensible body; rather, it fills
+ /// in the initialization expression needed to copy the lambda object into
+ /// the block, and IR generation actually generates the real body of the
+ /// block pointer conversion.
+ void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
+ CXXConversionDecl *Conv);
+
+ ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
+ SourceLocation ConvLocation,
+ CXXConversionDecl *Conv,
+ Expr *Src);
+
+ // ParseObjCStringLiteral - Parse Objective-C string literals.
+ ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
+ Expr **Strings,
+ unsigned NumStrings);
+
+ ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
+
+ /// BuildObjCNumericLiteral - builds an ObjCNumericLiteral AST node for the
+ /// numeric literal expression. Type of the expression will be "NSNumber *"
+ /// or "id" if NSNumber is unavailable.
+ ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
+ ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
+ bool Value);
+ ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
+
+ ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
+ Expr *IndexExpr,
+ ObjCMethodDecl *getterMethod,
+ ObjCMethodDecl *setterMethod);
+
+ ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
+ ObjCDictionaryElement *Elements,
+ unsigned NumElements);
+
+ ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
+ TypeSourceInfo *EncodedTypeInfo,
+ SourceLocation RParenLoc);
+ ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
+ CXXConversionDecl *Method,
+ bool HadMultipleCandidates);
+
+ ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
+ SourceLocation EncodeLoc,
+ SourceLocation LParenLoc,
+ ParsedType Ty,
+ SourceLocation RParenLoc);
+
+ // ParseObjCSelectorExpression - Build selector expression for @selector
+ ExprResult ParseObjCSelectorExpression(Selector Sel,
+ SourceLocation AtLoc,
+ SourceLocation SelLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
+
+ // ParseObjCProtocolExpression - Build protocol expression for @protocol
+ ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
+ SourceLocation AtLoc,
+ SourceLocation ProtoLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Declarations
+ //
+ Decl *ActOnStartLinkageSpecification(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation LangLoc,
+ StringRef Lang,
+ SourceLocation LBraceLoc);
+ Decl *ActOnFinishLinkageSpecification(Scope *S,
+ Decl *LinkageSpec,
+ SourceLocation RBraceLoc);
+
+
+ //===--------------------------------------------------------------------===//
+ // C++ Classes
+ //
+ bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
+ const CXXScopeSpec *SS = 0);
+
+ bool ActOnAccessSpecifier(AccessSpecifier Access,
+ SourceLocation ASLoc,
+ SourceLocation ColonLoc,
+ AttributeList *Attrs = 0);
+
+ Decl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
+ Declarator &D,
+ MultiTemplateParamsArg TemplateParameterLists,
+ Expr *BitfieldWidth, const VirtSpecifiers &VS,
+ bool HasDeferredInit);
+ void ActOnCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc,
+ Expr *Init);
+
+ MemInitResult ActOnMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ SourceLocation EllipsisLoc);
+
+ MemInitResult ActOnMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ Expr *InitList,
+ SourceLocation EllipsisLoc);
+
+ MemInitResult BuildMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ Expr *Init,
+ SourceLocation EllipsisLoc);
+
+ MemInitResult BuildMemberInitializer(ValueDecl *Member,
+ Expr *Init,
+ SourceLocation IdLoc);
+
+ MemInitResult BuildBaseInitializer(QualType BaseType,
+ TypeSourceInfo *BaseTInfo,
+ Expr *Init,
+ CXXRecordDecl *ClassDecl,
+ SourceLocation EllipsisLoc);
+
+ MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
+ Expr *Init,
+ CXXRecordDecl *ClassDecl);
+
+ bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
+ CXXCtorInitializer *Initializer);
+
+ bool SetCtorInitializers(CXXConstructorDecl *Constructor,
+ CXXCtorInitializer **Initializers,
+ unsigned NumInitializers, bool AnyErrors);
+
+ void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
+
+
+ /// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
+ /// mark all the non-trivial destructors of its members and bases as
+ /// referenced.
+ void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
+ CXXRecordDecl *Record);
+
+ /// \brief The list of classes whose vtables have been used within
+ /// this translation unit, and the source locations at which the
+ /// first use occurred.
+ typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
+
+ /// \brief The list of vtables that are required but have not yet been
+ /// materialized.
+ SmallVector<VTableUse, 16> VTableUses;
+
+ /// \brief The set of classes whose vtables have been used within
+ /// this translation unit, and a bit that will be true if the vtable is
+ /// required to be emitted (otherwise, it should be emitted only if needed
+ /// by code generation).
+ llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
+
+ /// \brief Load any externally-stored vtable uses.
+ void LoadExternalVTableUses();
+
+ typedef LazyVector<CXXRecordDecl *, ExternalSemaSource,
+ &ExternalSemaSource::ReadDynamicClasses, 2, 2>
+ DynamicClassesType;
+
+ /// \brief A list of all of the dynamic classes in this translation
+ /// unit.
+ DynamicClassesType DynamicClasses;
+
+ /// \brief Note that the vtable for the given class was used at the
+ /// given location.
+ void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
+ bool DefinitionRequired = false);
+
+ /// MarkVirtualMembersReferenced - Will mark all members of the given
+ /// CXXRecordDecl referenced.
+ void MarkVirtualMembersReferenced(SourceLocation Loc,
+ const CXXRecordDecl *RD);
+
+ /// \brief Define all of the vtables that have been used in this
+ /// translation unit and reference any virtual members used by those
+ /// vtables.
+ ///
+ /// \returns true if any work was done, false otherwise.
+ bool DefineUsedVTables();
+
+ void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
+
+ void ActOnMemInitializers(Decl *ConstructorDecl,
+ SourceLocation ColonLoc,
+ CXXCtorInitializer **MemInits,
+ unsigned NumMemInits,
+ bool AnyErrors);
+
+ void CheckCompletedCXXClass(CXXRecordDecl *Record);
+ void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
+ Decl *TagDecl,
+ SourceLocation LBrac,
+ SourceLocation RBrac,
+ AttributeList *AttrList);
+
+ void ActOnReenterTemplateScope(Scope *S, Decl *Template);
+ void ActOnReenterDeclaratorTemplateScope(Scope *S, DeclaratorDecl *D);
+ void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
+ void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
+ void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
+ void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
+ void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
+ void ActOnFinishDelayedMemberInitializers(Decl *Record);
+ void MarkAsLateParsedTemplate(FunctionDecl *FD, bool Flag = true);
+ bool IsInsideALocalClassWithinATemplateFunction();
+
+ Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ Expr *AssertMessageExpr,
+ SourceLocation RParenLoc);
+
+ FriendDecl *CheckFriendTypeDecl(SourceLocation Loc,
+ SourceLocation FriendLoc,
+ TypeSourceInfo *TSInfo);
+ Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
+ MultiTemplateParamsArg TemplateParams);
+ Decl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
+ MultiTemplateParamsArg TemplateParams);
+
+ QualType CheckConstructorDeclarator(Declarator &D, QualType R,
+ StorageClass& SC);
+ void CheckConstructor(CXXConstructorDecl *Constructor);
+ QualType CheckDestructorDeclarator(Declarator &D, QualType R,
+ StorageClass& SC);
+ bool CheckDestructor(CXXDestructorDecl *Destructor);
+ void CheckConversionDeclarator(Declarator &D, QualType &R,
+ StorageClass& SC);
+ Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
+
+ void CheckExplicitlyDefaultedMethods(CXXRecordDecl *Record);
+ void CheckExplicitlyDefaultedDefaultConstructor(CXXConstructorDecl *Ctor);
+ void CheckExplicitlyDefaultedCopyConstructor(CXXConstructorDecl *Ctor);
+ void CheckExplicitlyDefaultedCopyAssignment(CXXMethodDecl *Method);
+ void CheckExplicitlyDefaultedMoveConstructor(CXXConstructorDecl *Ctor);
+ void CheckExplicitlyDefaultedMoveAssignment(CXXMethodDecl *Method);
+ void CheckExplicitlyDefaultedDestructor(CXXDestructorDecl *Dtor);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Derived Classes
+ //
+
+ /// ActOnBaseSpecifier - Parsed a base specifier
+ CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ TypeSourceInfo *TInfo,
+ SourceLocation EllipsisLoc);
+
+ BaseResult ActOnBaseSpecifier(Decl *classdecl,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ ParsedType basetype,
+ SourceLocation BaseLoc,
+ SourceLocation EllipsisLoc);
+
+ bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
+ unsigned NumBases);
+ void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
+ unsigned NumBases);
+
+ bool IsDerivedFrom(QualType Derived, QualType Base);
+ bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths);
+
+ // FIXME: I don't like this name.
+ void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
+
+ bool BasePathInvolvesVirtualBase(const CXXCastPath &BasePath);
+
+ bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ SourceLocation Loc, SourceRange Range,
+ CXXCastPath *BasePath = 0,
+ bool IgnoreAccess = false);
+ bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ unsigned InaccessibleBaseID,
+ unsigned AmbigiousBaseConvID,
+ SourceLocation Loc, SourceRange Range,
+ DeclarationName Name,
+ CXXCastPath *BasePath);
+
+ std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
+
+ /// CheckOverridingFunctionReturnType - Checks whether the return types are
+ /// covariant, according to C++ [class.virtual]p5.
+ bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old);
+
+ /// CheckOverridingFunctionExceptionSpec - Checks whether the exception
+ /// spec is a subset of base spec.
+ bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old);
+
+ bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
+
+ /// CheckOverrideControl - Check C++0x override control semantics.
+ void CheckOverrideControl(const Decl *D);
+
+ /// CheckForFunctionMarkedFinal - Checks whether a virtual member function
+ /// overrides a virtual member function marked 'final', according to
+ /// C++0x [class.virtual]p3.
+ bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old);
+
+
+ //===--------------------------------------------------------------------===//
+ // C++ Access Control
+ //
+
+ enum AccessResult {
+ AR_accessible,
+ AR_inaccessible,
+ AR_dependent,
+ AR_delayed
+ };
+
+ bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
+ NamedDecl *PrevMemberDecl,
+ AccessSpecifier LexicalAS);
+
+ AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
+ DeclAccessPair FoundDecl);
+ AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
+ DeclAccessPair FoundDecl);
+ AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
+ SourceRange PlacementRange,
+ CXXRecordDecl *NamingClass,
+ DeclAccessPair FoundDecl,
+ bool Diagnose = true);
+ AccessResult CheckConstructorAccess(SourceLocation Loc,
+ CXXConstructorDecl *D,
+ const InitializedEntity &Entity,
+ AccessSpecifier Access,
+ bool IsCopyBindingRefToTemp = false);
+ AccessResult CheckConstructorAccess(SourceLocation Loc,
+ CXXConstructorDecl *D,
+ const InitializedEntity &Entity,
+ AccessSpecifier Access,
+ const PartialDiagnostic &PDiag);
+ AccessResult CheckDestructorAccess(SourceLocation Loc,
+ CXXDestructorDecl *Dtor,
+ const PartialDiagnostic &PDiag,
+ QualType objectType = QualType());
+ AccessResult CheckDirectMemberAccess(SourceLocation Loc,
+ NamedDecl *D,
+ const PartialDiagnostic &PDiag);
+ AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
+ Expr *ObjectExpr,
+ Expr *ArgExpr,
+ DeclAccessPair FoundDecl);
+ AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
+ DeclAccessPair FoundDecl);
+ AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
+ QualType Base, QualType Derived,
+ const CXXBasePath &Path,
+ unsigned DiagID,
+ bool ForceCheck = false,
+ bool ForceUnprivileged = false);
+ void CheckLookupAccess(const LookupResult &R);
+ bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx);
+ bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
+ AccessSpecifier access,
+ QualType objectType);
+
+ void HandleDependentAccessCheck(const DependentDiagnostic &DD,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+ void PerformDependentDiagnostics(const DeclContext *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
+
+ /// A flag to suppress access checking.
+ bool SuppressAccessChecking;
+
+ /// \brief When true, access checking violations are treated as SFINAE
+ /// failures rather than hard errors.
+ bool AccessCheckingSFINAE;
+
+ /// \brief RAII object used to temporarily suppress access checking.
+ class SuppressAccessChecksRAII {
+ Sema &S;
+ bool SuppressingAccess;
+
+ public:
+ SuppressAccessChecksRAII(Sema &S, bool Suppress)
+ : S(S), SuppressingAccess(Suppress) {
+ if (Suppress) S.ActOnStartSuppressingAccessChecks();
+ }
+ ~SuppressAccessChecksRAII() {
+ done();
+ }
+ void done() {
+ if (!SuppressingAccess) return;
+ S.ActOnStopSuppressingAccessChecks();
+ SuppressingAccess = false;
+ }
+ };
+
+ void ActOnStartSuppressingAccessChecks();
+ void ActOnStopSuppressingAccessChecks();
+
+ enum AbstractDiagSelID {
+ AbstractNone = -1,
+ AbstractReturnType,
+ AbstractParamType,
+ AbstractVariableType,
+ AbstractFieldType,
+ AbstractArrayType
+ };
+
+ bool RequireNonAbstractType(SourceLocation Loc, QualType T,
+ const PartialDiagnostic &PD);
+ void DiagnoseAbstractType(const CXXRecordDecl *RD);
+
+ bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
+ AbstractDiagSelID SelID = AbstractNone);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Overloaded Operators [C++ 13.5]
+ //
+
+ bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
+
+ bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Templates [C++ 14]
+ //
+ void FilterAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates = true);
+ bool hasAnyAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates = true);
+
+ void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
+ QualType ObjectType, bool EnteringContext,
+ bool &MemberOfUnknownSpecialization);
+
+ TemplateNameKind isTemplateName(Scope *S,
+ CXXScopeSpec &SS,
+ bool hasTemplateKeyword,
+ UnqualifiedId &Name,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ TemplateTy &Template,
+ bool &MemberOfUnknownSpecialization);
+
+ bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
+ SourceLocation IILoc,
+ Scope *S,
+ const CXXScopeSpec *SS,
+ TemplateTy &SuggestedTemplate,
+ TemplateNameKind &SuggestedKind);
+
+ void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
+ TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
+
+ Decl *ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
+ SourceLocation EllipsisLoc,
+ SourceLocation KeyLoc,
+ IdentifierInfo *ParamName,
+ SourceLocation ParamNameLoc,
+ unsigned Depth, unsigned Position,
+ SourceLocation EqualLoc,
+ ParsedType DefaultArg);
+
+ QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
+ Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
+ unsigned Depth,
+ unsigned Position,
+ SourceLocation EqualLoc,
+ Expr *DefaultArg);
+ Decl *ActOnTemplateTemplateParameter(Scope *S,
+ SourceLocation TmpLoc,
+ TemplateParameterList *Params,
+ SourceLocation EllipsisLoc,
+ IdentifierInfo *ParamName,
+ SourceLocation ParamNameLoc,
+ unsigned Depth,
+ unsigned Position,
+ SourceLocation EqualLoc,
+ ParsedTemplateArgument DefaultArg);
+
+ TemplateParameterList *
+ ActOnTemplateParameterList(unsigned Depth,
+ SourceLocation ExportLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ Decl **Params, unsigned NumParams,
+ SourceLocation RAngleLoc);
+
+ /// \brief The context in which we are checking a template parameter
+ /// list.
+ enum TemplateParamListContext {
+ TPC_ClassTemplate,
+ TPC_FunctionTemplate,
+ TPC_ClassTemplateMember,
+ TPC_FriendFunctionTemplate,
+ TPC_FriendFunctionTemplateDefinition,
+ TPC_TypeAliasTemplate
+ };
+
+ bool CheckTemplateParameterList(TemplateParameterList *NewParams,
+ TemplateParameterList *OldParams,
+ TemplateParamListContext TPC);
+ TemplateParameterList *
+ MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
+ SourceLocation DeclLoc,
+ const CXXScopeSpec &SS,
+ TemplateParameterList **ParamLists,
+ unsigned NumParamLists,
+ bool IsFriend,
+ bool &IsExplicitSpecialization,
+ bool &Invalid);
+
+ DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ TemplateParameterList *TemplateParams,
+ AccessSpecifier AS,
+ SourceLocation ModulePrivateLoc,
+ unsigned NumOuterTemplateParamLists,
+ TemplateParameterList **OuterTemplateParamLists);
+
+ void translateTemplateArguments(const ASTTemplateArgsPtr &In,
+ TemplateArgumentListInfo &Out);
+
+ void NoteAllFoundTemplates(TemplateName Name);
+
+ QualType CheckTemplateIdType(TemplateName Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs);
+
+ TypeResult
+ ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ TemplateTy Template, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgs,
+ SourceLocation RAngleLoc,
+ bool IsCtorOrDtorName = false);
+
+ /// \brief Parsed an elaborated-type-specifier that refers to a template-id,
+ /// such as \c class T::template apply<U>.
+ ///
+ /// \param TUK
+ TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
+ TypeSpecifierType TagSpec,
+ SourceLocation TagLoc,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ TemplateTy TemplateD,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc);
+
+
+ ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ bool RequiresADL,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+ ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs);
+
+ TemplateNameKind ActOnDependentTemplateName(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Name,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ TemplateTy &Template);
+
+ DeclResult
+ ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ SourceLocation KWLoc,
+ SourceLocation ModulePrivateLoc,
+ CXXScopeSpec &SS,
+ TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgs,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TemplateParameterLists);
+
+ Decl *ActOnTemplateDeclarator(Scope *S,
+ MultiTemplateParamsArg TemplateParameterLists,
+ Declarator &D);
+
+ Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
+ MultiTemplateParamsArg TemplateParameterLists,
+ Declarator &D);
+
+ bool
+ CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
+ TemplateSpecializationKind NewTSK,
+ NamedDecl *PrevDecl,
+ TemplateSpecializationKind PrevTSK,
+ SourceLocation PrevPtOfInstantiation,
+ bool &SuppressNew);
+
+ bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
+ const TemplateArgumentListInfo &ExplicitTemplateArgs,
+ LookupResult &Previous);
+
+ bool CheckFunctionTemplateSpecialization(FunctionDecl *FD,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ LookupResult &Previous);
+ bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
+
+ DeclResult
+ ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgs,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr);
+
+ DeclResult
+ ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ CXXScopeSpec &SS,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ AttributeList *Attr);
+
+ DeclResult ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ Declarator &D);
+
+ TemplateArgumentLoc
+ SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ Decl *Param,
+ SmallVectorImpl<TemplateArgument> &Converted);
+
+ /// \brief Specifies the context in which a particular template
+ /// argument is being checked.
+ enum CheckTemplateArgumentKind {
+ /// \brief The template argument was specified in the code or was
+ /// instantiated with some deduced template arguments.
+ CTAK_Specified,
+
+ /// \brief The template argument was deduced via template argument
+ /// deduction.
+ CTAK_Deduced,
+
+ /// \brief The template argument was deduced from an array bound
+ /// via template argument deduction.
+ CTAK_DeducedFromArrayBound
+ };
+
+ bool CheckTemplateArgument(NamedDecl *Param,
+ const TemplateArgumentLoc &Arg,
+ NamedDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ unsigned ArgumentPackIndex,
+ SmallVectorImpl<TemplateArgument> &Converted,
+ CheckTemplateArgumentKind CTAK = CTAK_Specified);
+
+ /// \brief Check that the given template arguments can be be provided to
+ /// the given template, converting the arguments along the way.
+ ///
+ /// \param Template The template to which the template arguments are being
+ /// provided.
+ ///
+ /// \param TemplateLoc The location of the template name in the source.
+ ///
+ /// \param TemplateArgs The list of template arguments. If the template is
+ /// a template template parameter, this function may extend the set of
+ /// template arguments to also include substituted, defaulted template
+ /// arguments.
+ ///
+ /// \param PartialTemplateArgs True if the list of template arguments is
+ /// intentionally partial, e.g., because we're checking just the initial
+ /// set of template arguments.
+ ///
+ /// \param Converted Will receive the converted, canonicalized template
+ /// arguments.
+ ///
+ ///
+ /// \param ExpansionIntoFixedList If non-NULL, will be set true to indicate
+ /// when the template arguments contain a pack expansion that is being
+ /// expanded into a fixed parameter list.
+ ///
+ /// \returns True if an error occurred, false otherwise.
+ bool CheckTemplateArgumentList(TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs,
+ bool PartialTemplateArgs,
+ SmallVectorImpl<TemplateArgument> &Converted,
+ bool *ExpansionIntoFixedList = 0);
+
+ bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
+ const TemplateArgumentLoc &Arg,
+ SmallVectorImpl<TemplateArgument> &Converted);
+
+ bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
+ TypeSourceInfo *Arg);
+ ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
+ QualType InstantiatedParamType, Expr *Arg,
+ TemplateArgument &Converted,
+ CheckTemplateArgumentKind CTAK = CTAK_Specified);
+ bool CheckTemplateArgument(TemplateTemplateParmDecl *Param,
+ const TemplateArgumentLoc &Arg);
+
+ ExprResult
+ BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
+ QualType ParamType,
+ SourceLocation Loc);
+ ExprResult
+ BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
+ SourceLocation Loc);
+
+ /// \brief Enumeration describing how template parameter lists are compared
+ /// for equality.
+ enum TemplateParameterListEqualKind {
+ /// \brief We are matching the template parameter lists of two templates
+ /// that might be redeclarations.
+ ///
+ /// \code
+ /// template<typename T> struct X;
+ /// template<typename T> struct X;
+ /// \endcode
+ TPL_TemplateMatch,
+
+ /// \brief We are matching the template parameter lists of two template
+ /// template parameters as part of matching the template parameter lists
+ /// of two templates that might be redeclarations.
+ ///
+ /// \code
+ /// template<template<int I> class TT> struct X;
+ /// template<template<int Value> class Other> struct X;
+ /// \endcode
+ TPL_TemplateTemplateParmMatch,
+
+ /// \brief We are matching the template parameter lists of a template
+ /// template argument against the template parameter lists of a template
+ /// template parameter.
+ ///
+ /// \code
+ /// template<template<int Value> class Metafun> struct X;
+ /// template<int Value> struct integer_c;
+ /// X<integer_c> xic;
+ /// \endcode
+ TPL_TemplateTemplateArgumentMatch
+ };
+
+ bool TemplateParameterListsAreEqual(TemplateParameterList *New,
+ TemplateParameterList *Old,
+ bool Complain,
+ TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc
+ = SourceLocation());
+
+ bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
+
+ /// \brief Called when the parser has parsed a C++ typename
+ /// specifier, e.g., "typename T::type".
+ ///
+ /// \param S The scope in which this typename type occurs.
+ /// \param TypenameLoc the location of the 'typename' keyword
+ /// \param SS the nested-name-specifier following the typename (e.g., 'T::').
+ /// \param II the identifier we're retrieving (e.g., 'type' in the example).
+ /// \param IdLoc the location of the identifier.
+ TypeResult
+ ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, const IdentifierInfo &II,
+ SourceLocation IdLoc);
+
+ /// \brief Called when the parser has parsed a C++ typename
+ /// specifier that ends in a template-id, e.g.,
+ /// "typename MetaFun::template apply<T1, T2>".
+ ///
+ /// \param S The scope in which this typename type occurs.
+ /// \param TypenameLoc the location of the 'typename' keyword
+ /// \param SS the nested-name-specifier following the typename (e.g., 'T::').
+ /// \param TemplateLoc the location of the 'template' keyword, if any.
+ /// \param TemplateName The template name.
+ /// \param TemplateNameLoc The location of the template name.
+ /// \param LAngleLoc The location of the opening angle bracket ('<').
+ /// \param TemplateArgs The template arguments.
+ /// \param RAngleLoc The location of the closing angle bracket ('>').
+ TypeResult
+ ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateLoc,
+ TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgs,
+ SourceLocation RAngleLoc);
+
+ QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
+ SourceLocation KeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const IdentifierInfo &II,
+ SourceLocation IILoc);
+
+ TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
+ SourceLocation Loc,
+ DeclarationName Name);
+ bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
+
+ ExprResult RebuildExprInCurrentInstantiation(Expr *E);
+ bool RebuildTemplateParamsInCurrentInstantiation(
+ TemplateParameterList *Params);
+
+ std::string
+ getTemplateArgumentBindingsText(const TemplateParameterList *Params,
+ const TemplateArgumentList &Args);
+
+ std::string
+ getTemplateArgumentBindingsText(const TemplateParameterList *Params,
+ const TemplateArgument *Args,
+ unsigned NumArgs);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Variadic Templates (C++0x [temp.variadic])
+ //===--------------------------------------------------------------------===//
+
+ /// \brief The context in which an unexpanded parameter pack is
+ /// being diagnosed.
+ ///
+ /// Note that the values of this enumeration line up with the first
+ /// argument to the \c err_unexpanded_parameter_pack diagnostic.
+ enum UnexpandedParameterPackContext {
+ /// \brief An arbitrary expression.
+ UPPC_Expression = 0,
+
+ /// \brief The base type of a class type.
+ UPPC_BaseType,
+
+ /// \brief The type of an arbitrary declaration.
+ UPPC_DeclarationType,
+
+ /// \brief The type of a data member.
+ UPPC_DataMemberType,
+
+ /// \brief The size of a bit-field.
+ UPPC_BitFieldWidth,
+
+ /// \brief The expression in a static assertion.
+ UPPC_StaticAssertExpression,
+
+ /// \brief The fixed underlying type of an enumeration.
+ UPPC_FixedUnderlyingType,
+
+ /// \brief The enumerator value.
+ UPPC_EnumeratorValue,
+
+ /// \brief A using declaration.
+ UPPC_UsingDeclaration,
+
+ /// \brief A friend declaration.
+ UPPC_FriendDeclaration,
+
+ /// \brief A declaration qualifier.
+ UPPC_DeclarationQualifier,
+
+ /// \brief An initializer.
+ UPPC_Initializer,
+
+ /// \brief A default argument.
+ UPPC_DefaultArgument,
+
+ /// \brief The type of a non-type template parameter.
+ UPPC_NonTypeTemplateParameterType,
+
+ /// \brief The type of an exception.
+ UPPC_ExceptionType,
+
+ /// \brief Partial specialization.
+ UPPC_PartialSpecialization,
+
+ /// \brief Microsoft __if_exists.
+ UPPC_IfExists,
+
+ /// \brief Microsoft __if_not_exists.
+ UPPC_IfNotExists
+};
+
+ /// \brief Diagnose unexpanded parameter packs.
+ ///
+ /// \param Loc The location at which we should emit the diagnostic.
+ ///
+ /// \param UPPC The context in which we are diagnosing unexpanded
+ /// parameter packs.
+ ///
+ /// \param Unexpanded the set of unexpanded parameter packs.
+ void DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
+ UnexpandedParameterPackContext UPPC,
+ ArrayRef<UnexpandedParameterPack> Unexpanded);
+
+ /// \brief If the given type contains an unexpanded parameter pack,
+ /// diagnose the error.
+ ///
+ /// \param Loc The source location where a diagnostc should be emitted.
+ ///
+ /// \param T The type that is being checked for unexpanded parameter
+ /// packs.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief If the given expression contains an unexpanded parameter
+ /// pack, diagnose the error.
+ ///
+ /// \param E The expression that is being checked for unexpanded
+ /// parameter packs.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(Expr *E,
+ UnexpandedParameterPackContext UPPC = UPPC_Expression);
+
+ /// \brief If the given nested-name-specifier contains an unexpanded
+ /// parameter pack, diagnose the error.
+ ///
+ /// \param SS The nested-name-specifier that is being checked for
+ /// unexpanded parameter packs.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief If the given name contains an unexpanded parameter pack,
+ /// diagnose the error.
+ ///
+ /// \param NameInfo The name (with source location information) that
+ /// is being checked for unexpanded parameter packs.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief If the given template name contains an unexpanded parameter pack,
+ /// diagnose the error.
+ ///
+ /// \param Loc The location of the template name.
+ ///
+ /// \param Template The template name that is being checked for unexpanded
+ /// parameter packs.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
+ TemplateName Template,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief If the given template argument contains an unexpanded parameter
+ /// pack, diagnose the error.
+ ///
+ /// \param Arg The template argument that is being checked for unexpanded
+ /// parameter packs.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
+ UnexpandedParameterPackContext UPPC);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// template argument.
+ ///
+ /// \param Arg The template argument that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(TemplateArgument Arg,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// template argument.
+ ///
+ /// \param Arg The template argument that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// type.
+ ///
+ /// \param T The type that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(QualType T,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// type.
+ ///
+ /// \param TL The type that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(TypeLoc TL,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// nested-name-specifier.
+ ///
+ /// \param SS The nested-name-specifier that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(CXXScopeSpec &SS,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Collect the set of unexpanded parameter packs within the given
+ /// name.
+ ///
+ /// \param NameInfo The name that will be traversed to find
+ /// unexpanded parameter packs.
+ void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
+
+ /// \brief Invoked when parsing a template argument followed by an
+ /// ellipsis, which creates a pack expansion.
+ ///
+ /// \param Arg The template argument preceding the ellipsis, which
+ /// may already be invalid.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
+ SourceLocation EllipsisLoc);
+
+ /// \brief Invoked when parsing a type followed by an ellipsis, which
+ /// creates a pack expansion.
+ ///
+ /// \param Type The type preceding the ellipsis, which will become
+ /// the pattern of the pack expansion.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
+
+ /// \brief Construct a pack expansion type from the pattern of the pack
+ /// expansion.
+ TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions);
+
+ /// \brief Construct a pack expansion type from the pattern of the pack
+ /// expansion.
+ QualType CheckPackExpansion(QualType Pattern,
+ SourceRange PatternRange,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions);
+
+ /// \brief Invoked when parsing an expression followed by an ellipsis, which
+ /// creates a pack expansion.
+ ///
+ /// \param Pattern The expression preceding the ellipsis, which will become
+ /// the pattern of the pack expansion.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
+
+ /// \brief Invoked when parsing an expression followed by an ellipsis, which
+ /// creates a pack expansion.
+ ///
+ /// \param Pattern The expression preceding the ellipsis, which will become
+ /// the pattern of the pack expansion.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis.
+ ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions);
+
+ /// \brief Determine whether we could expand a pack expansion with the
+ /// given set of parameter packs into separate arguments by repeatedly
+ /// transforming the pattern.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis that identifies the
+ /// pack expansion.
+ ///
+ /// \param PatternRange The source range that covers the entire pattern of
+ /// the pack expansion.
+ ///
+ /// \param Unexpanded The set of unexpanded parameter packs within the
+ /// pattern.
+ ///
+ /// \param NumUnexpanded The number of unexpanded parameter packs in
+ /// \p Unexpanded.
+ ///
+ /// \param ShouldExpand Will be set to \c true if the transformer should
+ /// expand the corresponding pack expansions into separate arguments. When
+ /// set, \c NumExpansions must also be set.
+ ///
+ /// \param RetainExpansion Whether the caller should add an unexpanded
+ /// pack expansion after all of the expanded arguments. This is used
+ /// when extending explicitly-specified template argument packs per
+ /// C++0x [temp.arg.explicit]p9.
+ ///
+ /// \param NumExpansions The number of separate arguments that will be in
+ /// the expanded form of the corresponding pack expansion. This is both an
+ /// input and an output parameter, which can be set by the caller if the
+ /// number of expansions is known a priori (e.g., due to a prior substitution)
+ /// and will be set by the callee when the number of expansions is known.
+ /// The callee must set this value when \c ShouldExpand is \c true; it may
+ /// set this value in other cases.
+ ///
+ /// \returns true if an error occurred (e.g., because the parameter packs
+ /// are to be instantiated with arguments of different lengths), false
+ /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
+ /// must be set.
+ bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ llvm::ArrayRef<UnexpandedParameterPack> Unexpanded,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ llvm::Optional<unsigned> &NumExpansions);
+
+ /// \brief Determine the number of arguments in the given pack expansion
+ /// type.
+ ///
+ /// This routine already assumes that the pack expansion type can be
+ /// expanded and that the number of arguments in the expansion is
+ /// consistent across all of the unexpanded parameter packs in its pattern.
+ unsigned getNumArgumentsInExpansion(QualType T,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ /// \brief Determine whether the given declarator contains any unexpanded
+ /// parameter packs.
+ ///
+ /// This routine is used by the parser to disambiguate function declarators
+ /// with an ellipsis prior to the ')', e.g.,
+ ///
+ /// \code
+ /// void f(T...);
+ /// \endcode
+ ///
+ /// To determine whether we have an (unnamed) function parameter pack or
+ /// a variadic function.
+ ///
+ /// \returns true if the declarator contains any unexpanded parameter packs,
+ /// false otherwise.
+ bool containsUnexpandedParameterPacks(Declarator &D);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Template Argument Deduction (C++ [temp.deduct])
+ //===--------------------------------------------------------------------===//
+
+ /// \brief Describes the result of template argument deduction.
+ ///
+ /// The TemplateDeductionResult enumeration describes the result of
+ /// template argument deduction, as returned from
+ /// DeduceTemplateArguments(). The separate TemplateDeductionInfo
+ /// structure provides additional information about the results of
+ /// template argument deduction, e.g., the deduced template argument
+ /// list (if successful) or the specific template parameters or
+ /// deduced arguments that were involved in the failure.
+ enum TemplateDeductionResult {
+ /// \brief Template argument deduction was successful.
+ TDK_Success = 0,
+ /// \brief Template argument deduction exceeded the maximum template
+ /// instantiation depth (which has already been diagnosed).
+ TDK_InstantiationDepth,
+ /// \brief Template argument deduction did not deduce a value
+ /// for every template parameter.
+ TDK_Incomplete,
+ /// \brief Template argument deduction produced inconsistent
+ /// deduced values for the given template parameter.
+ TDK_Inconsistent,
+ /// \brief Template argument deduction failed due to inconsistent
+ /// cv-qualifiers on a template parameter type that would
+ /// otherwise be deduced, e.g., we tried to deduce T in "const T"
+ /// but were given a non-const "X".
+ TDK_Underqualified,
+ /// \brief Substitution of the deduced template argument values
+ /// resulted in an error.
+ TDK_SubstitutionFailure,
+ /// \brief Substitution of the deduced template argument values
+ /// into a non-deduced context produced a type or value that
+ /// produces a type that does not match the original template
+ /// arguments provided.
+ TDK_NonDeducedMismatch,
+ /// \brief When performing template argument deduction for a function
+ /// template, there were too many call arguments.
+ TDK_TooManyArguments,
+ /// \brief When performing template argument deduction for a function
+ /// template, there were too few call arguments.
+ TDK_TooFewArguments,
+ /// \brief The explicitly-specified template arguments were not valid
+ /// template arguments for the given template.
+ TDK_InvalidExplicitArguments,
+ /// \brief The arguments included an overloaded function name that could
+ /// not be resolved to a suitable function.
+ TDK_FailedOverloadResolution
+ };
+
+ TemplateDeductionResult
+ DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs,
+ sema::TemplateDeductionInfo &Info);
+
+ TemplateDeductionResult
+ SubstituteExplicitTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo &ExplicitTemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ SmallVectorImpl<QualType> &ParamTypes,
+ QualType *FunctionType,
+ sema::TemplateDeductionInfo &Info);
+
+ /// brief A function argument from which we performed template argument
+ // deduction for a call.
+ struct OriginalCallArg {
+ OriginalCallArg(QualType OriginalParamType,
+ unsigned ArgIdx,
+ QualType OriginalArgType)
+ : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx),
+ OriginalArgType(OriginalArgType) { }
+
+ QualType OriginalParamType;
+ unsigned ArgIdx;
+ QualType OriginalArgType;
+ };
+
+ TemplateDeductionResult
+ FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned NumExplicitlySpecified,
+ FunctionDecl *&Specialization,
+ sema::TemplateDeductionInfo &Info,
+ SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = 0);
+
+ TemplateDeductionResult
+ DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ llvm::ArrayRef<Expr *> Args,
+ FunctionDecl *&Specialization,
+ sema::TemplateDeductionInfo &Info);
+
+ TemplateDeductionResult
+ DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ QualType ArgFunctionType,
+ FunctionDecl *&Specialization,
+ sema::TemplateDeductionInfo &Info);
+
+ TemplateDeductionResult
+ DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ QualType ToType,
+ CXXConversionDecl *&Specialization,
+ sema::TemplateDeductionInfo &Info);
+
+ TemplateDeductionResult
+ DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ FunctionDecl *&Specialization,
+ sema::TemplateDeductionInfo &Info);
+
+ /// \brief Result type of DeduceAutoType.
+ enum DeduceAutoResult {
+ DAR_Succeeded,
+ DAR_Failed,
+ DAR_FailedAlreadyDiagnosed
+ };
+
+ DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer,
+ TypeSourceInfo *&Result);
+ void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
+
+ FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
+ FunctionTemplateDecl *FT2,
+ SourceLocation Loc,
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments);
+ UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin,
+ UnresolvedSetIterator SEnd,
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments,
+ SourceLocation Loc,
+ const PartialDiagnostic &NoneDiag,
+ const PartialDiagnostic &AmbigDiag,
+ const PartialDiagnostic &CandidateDiag,
+ bool Complain = true,
+ QualType TargetType = QualType());
+
+ ClassTemplatePartialSpecializationDecl *
+ getMoreSpecializedPartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *PS1,
+ ClassTemplatePartialSpecializationDecl *PS2,
+ SourceLocation Loc);
+
+ void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used);
+ void MarkDeducedTemplateParameters(FunctionTemplateDecl *FunctionTemplate,
+ llvm::SmallBitVector &Deduced) {
+ return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
+ }
+ static void MarkDeducedTemplateParameters(ASTContext &Ctx,
+ FunctionTemplateDecl *FunctionTemplate,
+ llvm::SmallBitVector &Deduced);
+
+ //===--------------------------------------------------------------------===//
+ // C++ Template Instantiation
+ //
+
+ MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D,
+ const TemplateArgumentList *Innermost = 0,
+ bool RelativeToPrimary = false,
+ const FunctionDecl *Pattern = 0);
+
+ /// \brief A template instantiation that is currently in progress.
+ struct ActiveTemplateInstantiation {
+ /// \brief The kind of template instantiation we are performing
+ enum InstantiationKind {
+ /// We are instantiating a template declaration. The entity is
+ /// the declaration we're instantiating (e.g., a CXXRecordDecl).
+ TemplateInstantiation,
+
+ /// We are instantiating a default argument for a template
+ /// parameter. The Entity is the template, and
+ /// TemplateArgs/NumTemplateArguments provides the template
+ /// arguments as specified.
+ /// FIXME: Use a TemplateArgumentList
+ DefaultTemplateArgumentInstantiation,
+
+ /// We are instantiating a default argument for a function.
+ /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
+ /// provides the template arguments as specified.
+ DefaultFunctionArgumentInstantiation,
+
+ /// We are substituting explicit template arguments provided for
+ /// a function template. The entity is a FunctionTemplateDecl.
+ ExplicitTemplateArgumentSubstitution,
+
+ /// We are substituting template argument determined as part of
+ /// template argument deduction for either a class template
+ /// partial specialization or a function template. The
+ /// Entity is either a ClassTemplatePartialSpecializationDecl or
+ /// a FunctionTemplateDecl.
+ DeducedTemplateArgumentSubstitution,
+
+ /// We are substituting prior template arguments into a new
+ /// template parameter. The template parameter itself is either a
+ /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
+ PriorTemplateArgumentSubstitution,
+
+ /// We are checking the validity of a default template argument that
+ /// has been used when naming a template-id.
+ DefaultTemplateArgumentChecking
+ } Kind;
+
+ /// \brief The point of instantiation within the source code.
+ SourceLocation PointOfInstantiation;
+
+ /// \brief The template (or partial specialization) in which we are
+ /// performing the instantiation, for substitutions of prior template
+ /// arguments.
+ NamedDecl *Template;
+
+ /// \brief The entity that is being instantiated.
+ uintptr_t Entity;
+
+ /// \brief The list of template arguments we are substituting, if they
+ /// are not part of the entity.
+ const TemplateArgument *TemplateArgs;
+
+ /// \brief The number of template arguments in TemplateArgs.
+ unsigned NumTemplateArgs;
+
+ /// \brief The template deduction info object associated with the
+ /// substitution or checking of explicit or deduced template arguments.
+ sema::TemplateDeductionInfo *DeductionInfo;
+
+ /// \brief The source range that covers the construct that cause
+ /// the instantiation, e.g., the template-id that causes a class
+ /// template instantiation.
+ SourceRange InstantiationRange;
+
+ ActiveTemplateInstantiation()
+ : Kind(TemplateInstantiation), Template(0), Entity(0), TemplateArgs(0),
+ NumTemplateArgs(0), DeductionInfo(0) {}
+
+ /// \brief Determines whether this template is an actual instantiation
+ /// that should be counted toward the maximum instantiation depth.
+ bool isInstantiationRecord() const;
+
+ friend bool operator==(const ActiveTemplateInstantiation &X,
+ const ActiveTemplateInstantiation &Y) {
+ if (X.Kind != Y.Kind)
+ return false;
+
+ if (X.Entity != Y.Entity)
+ return false;
+
+ switch (X.Kind) {
+ case TemplateInstantiation:
+ return true;
+
+ case PriorTemplateArgumentSubstitution:
+ case DefaultTemplateArgumentChecking:
+ if (X.Template != Y.Template)
+ return false;
+
+ // Fall through
+
+ case DefaultTemplateArgumentInstantiation:
+ case ExplicitTemplateArgumentSubstitution:
+ case DeducedTemplateArgumentSubstitution:
+ case DefaultFunctionArgumentInstantiation:
+ return X.TemplateArgs == Y.TemplateArgs;
+
+ }
+
+ llvm_unreachable("Invalid InstantiationKind!");
+ }
+
+ friend bool operator!=(const ActiveTemplateInstantiation &X,
+ const ActiveTemplateInstantiation &Y) {
+ return !(X == Y);
+ }
+ };
+
+ /// \brief List of active template instantiations.
+ ///
+ /// This vector is treated as a stack. As one template instantiation
+ /// requires another template instantiation, additional
+ /// instantiations are pushed onto the stack up to a
+ /// user-configurable limit LangOptions::InstantiationDepth.
+ SmallVector<ActiveTemplateInstantiation, 16>
+ ActiveTemplateInstantiations;
+
+ /// \brief Whether we are in a SFINAE context that is not associated with
+ /// template instantiation.
+ ///
+ /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
+ /// of a template instantiation or template argument deduction.
+ bool InNonInstantiationSFINAEContext;
+
+ /// \brief The number of ActiveTemplateInstantiation entries in
+ /// \c ActiveTemplateInstantiations that are not actual instantiations and,
+ /// therefore, should not be counted as part of the instantiation depth.
+ unsigned NonInstantiationEntries;
+
+ /// \brief The last template from which a template instantiation
+ /// error or warning was produced.
+ ///
+ /// This value is used to suppress printing of redundant template
+ /// instantiation backtraces when there are multiple errors in the
+ /// same instantiation. FIXME: Does this belong in Sema? It's tough
+ /// to implement it anywhere else.
+ ActiveTemplateInstantiation LastTemplateInstantiationErrorContext;
+
+ /// \brief The current index into pack expansion arguments that will be
+ /// used for substitution of parameter packs.
+ ///
+ /// The pack expansion index will be -1 to indicate that parameter packs
+ /// should be instantiated as themselves. Otherwise, the index specifies
+ /// which argument within the parameter pack will be used for substitution.
+ int ArgumentPackSubstitutionIndex;
+
+ /// \brief RAII object used to change the argument pack substitution index
+ /// within a \c Sema object.
+ ///
+ /// See \c ArgumentPackSubstitutionIndex for more information.
+ class ArgumentPackSubstitutionIndexRAII {
+ Sema &Self;
+ int OldSubstitutionIndex;
+
+ public:
+ ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
+ : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
+ Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
+ }
+
+ ~ArgumentPackSubstitutionIndexRAII() {
+ Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
+ }
+ };
+
+ friend class ArgumentPackSubstitutionRAII;
+
+ /// \brief The stack of calls expression undergoing template instantiation.
+ ///
+ /// The top of this stack is used by a fixit instantiating unresolved
+ /// function calls to fix the AST to match the textual change it prints.
+ SmallVector<CallExpr *, 8> CallsUndergoingInstantiation;
+
+ /// \brief For each declaration that involved template argument deduction, the
+ /// set of diagnostics that were suppressed during that template argument
+ /// deduction.
+ ///
+ /// FIXME: Serialize this structure to the AST file.
+ llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
+ SuppressedDiagnostics;
+
+ /// \brief A stack object to be created when performing template
+ /// instantiation.
+ ///
+ /// Construction of an object of type \c InstantiatingTemplate
+ /// pushes the current instantiation onto the stack of active
+ /// instantiations. If the size of this stack exceeds the maximum
+ /// number of recursive template instantiations, construction
+ /// produces an error and evaluates true.
+ ///
+ /// Destruction of this object will pop the named instantiation off
+ /// the stack.
+ struct InstantiatingTemplate {
+ /// \brief Note that we are instantiating a class template,
+ /// function template, or a member thereof.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ Decl *Entity,
+ SourceRange InstantiationRange = SourceRange());
+
+ /// \brief Note that we are instantiating a default argument in a
+ /// template-id.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TemplateDecl *Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange = SourceRange());
+
+ /// \brief Note that we are instantiating a default argument in a
+ /// template-id.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ FunctionTemplateDecl *FunctionTemplate,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ ActiveTemplateInstantiation::InstantiationKind Kind,
+ sema::TemplateDeductionInfo &DeductionInfo,
+ SourceRange InstantiationRange = SourceRange());
+
+ /// \brief Note that we are instantiating as part of template
+ /// argument deduction for a class template partial
+ /// specialization.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ ClassTemplatePartialSpecializationDecl *PartialSpec,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ sema::TemplateDeductionInfo &DeductionInfo,
+ SourceRange InstantiationRange = SourceRange());
+
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ ParmVarDecl *Param,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange = SourceRange());
+
+ /// \brief Note that we are substituting prior template arguments into a
+ /// non-type or template template parameter.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ NamedDecl *Template,
+ NonTypeTemplateParmDecl *Param,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange);
+
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ NamedDecl *Template,
+ TemplateTemplateParmDecl *Param,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange);
+
+ /// \brief Note that we are checking the default template argument
+ /// against the template parameter for a given template-id.
+ InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TemplateDecl *Template,
+ NamedDecl *Param,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange);
+
+
+ /// \brief Note that we have finished instantiating this template.
+ void Clear();
+
+ ~InstantiatingTemplate() { Clear(); }
+
+ /// \brief Determines whether we have exceeded the maximum
+ /// recursive template instantiations.
+ operator bool() const { return Invalid; }
+
+ private:
+ Sema &SemaRef;
+ bool Invalid;
+ bool SavedInNonInstantiationSFINAEContext;
+ bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
+ SourceRange InstantiationRange);
+
+ InstantiatingTemplate(const InstantiatingTemplate&); // not implemented
+
+ InstantiatingTemplate&
+ operator=(const InstantiatingTemplate&); // not implemented
+ };
+
+ void PrintInstantiationStack();
+
+ /// \brief Determines whether we are currently in a context where
+ /// template argument substitution failures are not considered
+ /// errors.
+ ///
+ /// \returns An empty \c llvm::Optional if we're not in a SFINAE context.
+ /// Otherwise, contains a pointer that, if non-NULL, contains the nearest
+ /// template-deduction context object, which can be used to capture
+ /// diagnostics that will be suppressed.
+ llvm::Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
+
+ /// \brief RAII class used to determine whether SFINAE has
+ /// trapped any errors that occur during template argument
+ /// deduction.`
+ class SFINAETrap {
+ Sema &SemaRef;
+ unsigned PrevSFINAEErrors;
+ bool PrevInNonInstantiationSFINAEContext;
+ bool PrevAccessCheckingSFINAE;
+
+ public:
+ explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
+ : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
+ PrevInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext),
+ PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE)
+ {
+ if (!SemaRef.isSFINAEContext())
+ SemaRef.InNonInstantiationSFINAEContext = true;
+ SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
+ }
+
+ ~SFINAETrap() {
+ SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
+ SemaRef.InNonInstantiationSFINAEContext
+ = PrevInNonInstantiationSFINAEContext;
+ SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
+ }
+
+ /// \brief Determine whether any SFINAE errors have been trapped.
+ bool hasErrorOccurred() const {
+ return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
+ }
+ };
+
+ /// \brief The current instantiation scope used to store local
+ /// variables.
+ LocalInstantiationScope *CurrentInstantiationScope;
+
+ /// \brief The number of typos corrected by CorrectTypo.
+ unsigned TyposCorrected;
+
+ typedef llvm::DenseMap<IdentifierInfo *, TypoCorrection>
+ UnqualifiedTyposCorrectedMap;
+
+ /// \brief A cache containing the results of typo correction for unqualified
+ /// name lookup.
+ ///
+ /// The string is the string that we corrected to (which may be empty, if
+ /// there was no correction), while the boolean will be true when the
+ /// string represents a keyword.
+ UnqualifiedTyposCorrectedMap UnqualifiedTyposCorrected;
+
+ /// \brief Worker object for performing CFG-based warnings.
+ sema::AnalysisBasedWarnings AnalysisWarnings;
+
+ /// \brief An entity for which implicit template instantiation is required.
+ ///
+ /// The source location associated with the declaration is the first place in
+ /// the source code where the declaration was "used". It is not necessarily
+ /// the point of instantiation (which will be either before or after the
+ /// namespace-scope declaration that triggered this implicit instantiation),
+ /// However, it is the location that diagnostics should generally refer to,
+ /// because users will need to know what code triggered the instantiation.
+ typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
+
+ /// \brief The queue of implicit template instantiations that are required
+ /// but have not yet been performed.
+ std::deque<PendingImplicitInstantiation> PendingInstantiations;
+
+ /// \brief The queue of implicit template instantiations that are required
+ /// and must be performed within the current local scope.
+ ///
+ /// This queue is only used for member functions of local classes in
+ /// templates, which must be instantiated in the same scope as their
+ /// enclosing function, so that they can reference function-local
+ /// types, static variables, enumerators, etc.
+ std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
+
+ void PerformPendingInstantiations(bool LocalOnly = false);
+
+ TypeSourceInfo *SubstType(TypeSourceInfo *T,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity);
+
+ QualType SubstType(QualType T,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity);
+
+ TypeSourceInfo *SubstType(TypeLoc TL,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity);
+
+ TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc,
+ DeclarationName Entity);
+ ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ int indexAdjustment,
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
+ bool SubstParmTypes(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<QualType> &ParamTypes,
+ SmallVectorImpl<ParmVarDecl *> *OutParams = 0);
+ ExprResult SubstExpr(Expr *E,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ /// \brief Substitute the given template arguments into a list of
+ /// expressions, expanding pack expansions if required.
+ ///
+ /// \param Exprs The list of expressions to substitute into.
+ ///
+ /// \param NumExprs The number of expressions in \p Exprs.
+ ///
+ /// \param IsCall Whether this is some form of call, in which case
+ /// default arguments will be dropped.
+ ///
+ /// \param TemplateArgs The set of template arguments to substitute.
+ ///
+ /// \param Outputs Will receive all of the substituted arguments.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<Expr *> &Outputs);
+
+ StmtResult SubstStmt(Stmt *S,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ Decl *SubstDecl(Decl *D, DeclContext *Owner,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ ExprResult SubstInitializer(Expr *E,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool CXXDirectInit);
+
+ bool
+ SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ bool
+ InstantiateClass(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK,
+ bool Complain = true);
+
+ bool InstantiateEnum(SourceLocation PointOfInstantiation,
+ EnumDecl *Instantiation, EnumDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK);
+
+ struct LateInstantiatedAttribute {
+ const Attr *TmplAttr;
+ LocalInstantiationScope *Scope;
+ Decl *NewDecl;
+
+ LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
+ Decl *D)
+ : TmplAttr(A), Scope(S), NewDecl(D)
+ { }
+ };
+ typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
+
+ void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
+ const Decl *Pattern, Decl *Inst,
+ LateInstantiatedAttrVec *LateAttrs = 0,
+ LocalInstantiationScope *OuterMostScope = 0);
+
+ bool
+ InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
+ ClassTemplateSpecializationDecl *ClassTemplateSpec,
+ TemplateSpecializationKind TSK,
+ bool Complain = true);
+
+ void InstantiateClassMembers(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK);
+
+ void InstantiateClassTemplateSpecializationMembers(
+ SourceLocation PointOfInstantiation,
+ ClassTemplateSpecializationDecl *ClassTemplateSpec,
+ TemplateSpecializationKind TSK);
+
+ NestedNameSpecifierLoc
+ SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ DeclarationNameInfo
+ SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+ TemplateName
+ SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
+ SourceLocation Loc,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+ bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
+ TemplateArgumentListInfo &Result,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
+ FunctionDecl *Function,
+ bool Recursive = false,
+ bool DefinitionRequired = false);
+ void InstantiateStaticDataMemberDefinition(
+ SourceLocation PointOfInstantiation,
+ VarDecl *Var,
+ bool Recursive = false,
+ bool DefinitionRequired = false);
+
+ void InstantiateMemInitializers(CXXConstructorDecl *New,
+ const CXXConstructorDecl *Tmpl,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+ DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+
+ // Objective-C declarations.
+ enum ObjCContainerKind {
+ OCK_None = -1,
+ OCK_Interface = 0,
+ OCK_Protocol,
+ OCK_Category,
+ OCK_ClassExtension,
+ OCK_Implementation,
+ OCK_CategoryImplementation
+ };
+ ObjCContainerKind getObjCContainerKind() const;
+
+ Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *SuperName,
+ SourceLocation SuperLoc,
+ Decl * const *ProtoRefs,
+ unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList);
+
+ Decl *ActOnCompatiblityAlias(
+ SourceLocation AtCompatibilityAliasLoc,
+ IdentifierInfo *AliasName, SourceLocation AliasLocation,
+ IdentifierInfo *ClassName, SourceLocation ClassLocation);
+
+ bool CheckForwardProtocolDeclarationForCircularDependency(
+ IdentifierInfo *PName,
+ SourceLocation &PLoc, SourceLocation PrevLoc,
+ const ObjCList<ObjCProtocolDecl> &PList);
+
+ Decl *ActOnStartProtocolInterface(
+ SourceLocation AtProtoInterfaceLoc,
+ IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc,
+ Decl * const *ProtoRefNames, unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList);
+
+ Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *CategoryName,
+ SourceLocation CategoryLoc,
+ Decl * const *ProtoRefs,
+ unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc);
+
+ Decl *ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *SuperClassname,
+ SourceLocation SuperClassLoc);
+
+ Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLoc,
+ IdentifierInfo *CatName,
+ SourceLocation CatLoc);
+
+ DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
+ ArrayRef<Decl *> Decls);
+
+ DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
+ IdentifierInfo **IdentList,
+ SourceLocation *IdentLocs,
+ unsigned NumElts);
+
+ DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
+ const IdentifierLocPair *IdentList,
+ unsigned NumElts,
+ AttributeList *attrList);
+
+ void FindProtocolDeclaration(bool WarnOnDeclarations,
+ const IdentifierLocPair *ProtocolId,
+ unsigned NumProtocols,
+ SmallVectorImpl<Decl *> &Protocols);
+
+ /// Ensure attributes are consistent with type.
+ /// \param [in, out] Attributes The attributes to check; they will
+ /// be modified to be consistent with \arg PropertyTy.
+ void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
+ SourceLocation Loc,
+ unsigned &Attributes);
+
+ /// Process the specified property declaration and create decls for the
+ /// setters and getters as needed.
+ /// \param property The property declaration being processed
+ /// \param DC The semantic container for the property
+ /// \param redeclaredProperty Declaration for property if redeclared
+ /// in class extension.
+ /// \param lexicalDC Container for redeclaredProperty.
+ void ProcessPropertyDecl(ObjCPropertyDecl *property,
+ ObjCContainerDecl *DC,
+ ObjCPropertyDecl *redeclaredProperty = 0,
+ ObjCContainerDecl *lexicalDC = 0);
+
+ void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
+ ObjCPropertyDecl *SuperProperty,
+ const IdentifierInfo *Name);
+ void ComparePropertiesInBaseAndSuper(ObjCInterfaceDecl *IDecl);
+
+ void CompareMethodParamsInBaseAndSuper(Decl *IDecl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsInstance);
+
+ void CompareProperties(Decl *CDecl, Decl *MergeProtocols);
+
+ void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
+ ObjCInterfaceDecl *ID);
+
+ void MatchOneProtocolPropertiesInClass(Decl *CDecl,
+ ObjCProtocolDecl *PDecl);
+
+ Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
+ Decl **allMethods = 0, unsigned allNum = 0,
+ Decl **allProperties = 0, unsigned pNum = 0,
+ DeclGroupPtrTy *allTUVars = 0, unsigned tuvNum = 0);
+
+ Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD, ObjCDeclSpec &ODS,
+ Selector GetterSel, Selector SetterSel,
+ bool *OverridingProperty,
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC = 0);
+
+ Decl *ActOnPropertyImplDecl(Scope *S,
+ SourceLocation AtLoc,
+ SourceLocation PropertyLoc,
+ bool ImplKind,
+ IdentifierInfo *PropertyId,
+ IdentifierInfo *PropertyIvar,
+ SourceLocation PropertyIvarLoc);
+
+ enum ObjCSpecialMethodKind {
+ OSMK_None,
+ OSMK_Alloc,
+ OSMK_New,
+ OSMK_Copy,
+ OSMK_RetainingInit,
+ OSMK_NonRetainingInit
+ };
+
+ struct ObjCArgInfo {
+ IdentifierInfo *Name;
+ SourceLocation NameLoc;
+ // The Type is null if no type was specified, and the DeclSpec is invalid
+ // in this case.
+ ParsedType Type;
+ ObjCDeclSpec DeclSpec;
+
+ /// ArgAttrs - Attribute list for this argument.
+ AttributeList *ArgAttrs;
+ };
+
+ Decl *ActOnMethodDeclaration(
+ Scope *S,
+ SourceLocation BeginLoc, // location of the + or -.
+ SourceLocation EndLoc, // location of the ; or {.
+ tok::TokenKind MethodType,
+ ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
+ ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
+ // optional arguments. The number of types/arguments is obtained
+ // from the Sel.getNumArgs().
+ ObjCArgInfo *ArgInfo,
+ DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
+ AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind,
+ bool isVariadic, bool MethodDefinition);
+
+ // Helper method for ActOnClassMethod/ActOnInstanceMethod.
+ // Will search "local" class/category implementations for a method decl.
+ // Will also search in class's root looking for instance method.
+ // Returns 0 if no method is found.
+ ObjCMethodDecl *LookupPrivateClassMethod(Selector Sel,
+ ObjCInterfaceDecl *CDecl);
+ ObjCMethodDecl *LookupPrivateInstanceMethod(Selector Sel,
+ ObjCInterfaceDecl *ClassDecl);
+ ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
+ const ObjCObjectPointerType *OPT,
+ bool IsInstance);
+ ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
+ bool IsInstance);
+
+ bool inferObjCARCLifetime(ValueDecl *decl);
+
+ ExprResult
+ HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
+ Expr *BaseExpr,
+ SourceLocation OpLoc,
+ DeclarationName MemberName,
+ SourceLocation MemberLoc,
+ SourceLocation SuperLoc, QualType SuperType,
+ bool Super);
+
+ ExprResult
+ ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
+ IdentifierInfo &propertyName,
+ SourceLocation receiverNameLoc,
+ SourceLocation propertyNameLoc);
+
+ ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
+
+ /// \brief Describes the kind of message expression indicated by a message
+ /// send that starts with an identifier.
+ enum ObjCMessageKind {
+ /// \brief The message is sent to 'super'.
+ ObjCSuperMessage,
+ /// \brief The message is an instance message.
+ ObjCInstanceMessage,
+ /// \brief The message is a class message, and the identifier is a type
+ /// name.
+ ObjCClassMessage
+ };
+
+ ObjCMessageKind getObjCMessageKind(Scope *S,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ bool IsSuper,
+ bool HasTrailingDot,
+ ParsedType &ReceiverType);
+
+ ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args);
+
+ ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
+ QualType ReceiverType,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args,
+ bool isImplicit = false);
+
+ ExprResult BuildClassMessageImplicit(QualType ReceiverType,
+ bool isSuperReceiver,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args);
+
+ ExprResult ActOnClassMessage(Scope *S,
+ ParsedType Receiver,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args);
+
+ ExprResult BuildInstanceMessage(Expr *Receiver,
+ QualType ReceiverType,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args,
+ bool isImplicit = false);
+
+ ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
+ QualType ReceiverType,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args);
+
+ ExprResult ActOnInstanceMessage(Scope *S,
+ Expr *Receiver,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args);
+
+ ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ TypeSourceInfo *TSInfo,
+ Expr *SubExpr);
+
+ ExprResult ActOnObjCBridgedCast(Scope *S,
+ SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ ParsedType Type,
+ SourceLocation RParenLoc,
+ Expr *SubExpr);
+
+ bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
+
+ /// \brief Check whether the given new method is a valid override of the
+ /// given overridden method, and set any properties that should be inherited.
+ void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
+ const ObjCMethodDecl *Overridden,
+ bool IsImplementation);
+
+ /// \brief Check whether the given method overrides any methods in its class,
+ /// calling \c CheckObjCMethodOverride for each overridden method.
+ bool CheckObjCMethodOverrides(ObjCMethodDecl *NewMethod, DeclContext *DC);
+
+ enum PragmaOptionsAlignKind {
+ POAK_Native, // #pragma options align=native
+ POAK_Natural, // #pragma options align=natural
+ POAK_Packed, // #pragma options align=packed
+ POAK_Power, // #pragma options align=power
+ POAK_Mac68k, // #pragma options align=mac68k
+ POAK_Reset // #pragma options align=reset
+ };
+
+ /// ActOnPragmaOptionsAlign - Called on well formed #pragma options align.
+ void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
+ SourceLocation PragmaLoc,
+ SourceLocation KindLoc);
+
+ enum PragmaPackKind {
+ PPK_Default, // #pragma pack([n])
+ PPK_Show, // #pragma pack(show), only supported by MSVC.
+ PPK_Push, // #pragma pack(push, [identifier], [n])
+ PPK_Pop // #pragma pack(pop, [identifier], [n])
+ };
+
+ enum PragmaMSStructKind {
+ PMSST_OFF, // #pragms ms_struct off
+ PMSST_ON // #pragms ms_struct on
+ };
+
+ /// ActOnPragmaPack - Called on well formed #pragma pack(...).
+ void ActOnPragmaPack(PragmaPackKind Kind,
+ IdentifierInfo *Name,
+ Expr *Alignment,
+ SourceLocation PragmaLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc);
+
+ /// ActOnPragmaMSStruct - Called on well formed #pragms ms_struct [on|off].
+ void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
+
+ /// ActOnPragmaUnused - Called on well-formed '#pragma unused'.
+ void ActOnPragmaUnused(const Token &Identifier,
+ Scope *curScope,
+ SourceLocation PragmaLoc);
+
+ /// ActOnPragmaVisibility - Called on well formed #pragma GCC visibility... .
+ void ActOnPragmaVisibility(const IdentifierInfo* VisType,
+ SourceLocation PragmaLoc);
+
+ NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
+ SourceLocation Loc);
+ void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
+
+ /// ActOnPragmaWeakID - Called on well formed #pragma weak ident.
+ void ActOnPragmaWeakID(IdentifierInfo* WeakName,
+ SourceLocation PragmaLoc,
+ SourceLocation WeakNameLoc);
+
+ /// ActOnPragmaRedefineExtname - Called on well formed
+ /// #pragma redefine_extname oldname newname.
+ void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation WeakNameLoc,
+ SourceLocation AliasNameLoc);
+
+ /// ActOnPragmaWeakAlias - Called on well formed #pragma weak ident = ident.
+ void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation WeakNameLoc,
+ SourceLocation AliasNameLoc);
+
+ /// ActOnPragmaFPContract - Called on well formed
+ /// #pragma {STDC,OPENCL} FP_CONTRACT
+ void ActOnPragmaFPContract(tok::OnOffSwitch OOS);
+
+ /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
+ /// a the record decl, to handle '#pragma pack' and '#pragma options align'.
+ void AddAlignmentAttributesForRecord(RecordDecl *RD);
+
+ /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
+ void AddMsStructLayoutForRecord(RecordDecl *RD);
+
+ /// FreePackedContext - Deallocate and null out PackContext.
+ void FreePackedContext();
+
+ /// PushNamespaceVisibilityAttr - Note that we've entered a
+ /// namespace with a visibility attribute.
+ void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
+ SourceLocation Loc);
+
+ /// AddPushedVisibilityAttribute - If '#pragma GCC visibility' was used,
+ /// add an appropriate visibility attribute.
+ void AddPushedVisibilityAttribute(Decl *RD);
+
+ /// PopPragmaVisibility - Pop the top element of the visibility stack; used
+ /// for '#pragma GCC visibility' and visibility attributes on namespaces.
+ void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
+
+ /// FreeVisContext - Deallocate and null out VisContext.
+ void FreeVisContext();
+
+ /// AddCFAuditedAttribute - Check whether we're currently within
+ /// '#pragma clang arc_cf_code_audited' and, if so, consider adding
+ /// the appropriate attribute.
+ void AddCFAuditedAttribute(Decl *D);
+
+ /// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
+ void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E);
+ void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T);
+
+ /// \brief The kind of conversion being performed.
+ enum CheckedConversionKind {
+ /// \brief An implicit conversion.
+ CCK_ImplicitConversion,
+ /// \brief A C-style cast.
+ CCK_CStyleCast,
+ /// \brief A functional-style cast.
+ CCK_FunctionalCast,
+ /// \brief A cast other than a C-style cast.
+ CCK_OtherCast
+ };
+
+ /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
+ /// cast. If there is already an implicit cast, merge into the existing one.
+ /// If isLvalue, the result of the cast is an lvalue.
+ ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
+ ExprValueKind VK = VK_RValue,
+ const CXXCastPath *BasePath = 0,
+ CheckedConversionKind CCK
+ = CCK_ImplicitConversion);
+
+ /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
+ /// to the conversion from scalar type ScalarTy to the Boolean type.
+ static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
+
+ /// IgnoredValueConversions - Given that an expression's result is
+ /// syntactically ignored, perform any conversions that are
+ /// required.
+ ExprResult IgnoredValueConversions(Expr *E);
+
+ // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
+ // functions and arrays to their respective pointers (C99 6.3.2.1).
+ ExprResult UsualUnaryConversions(Expr *E);
+
+ // DefaultFunctionArrayConversion - converts functions and arrays
+ // to their respective pointers (C99 6.3.2.1).
+ ExprResult DefaultFunctionArrayConversion(Expr *E);
+
+ // DefaultFunctionArrayLvalueConversion - converts functions and
+ // arrays to their respective pointers and performs the
+ // lvalue-to-rvalue conversion.
+ ExprResult DefaultFunctionArrayLvalueConversion(Expr *E);
+
+ // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
+ // the operand. This is DefaultFunctionArrayLvalueConversion,
+ // except that it assumes the operand isn't of function or array
+ // type.
+ ExprResult DefaultLvalueConversion(Expr *E);
+
+ // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
+ // do not have a prototype. Integer promotions are performed on each
+ // argument, and arguments that have type float are promoted to double.
+ ExprResult DefaultArgumentPromotion(Expr *E);
+
+ // Used for emitting the right warning by DefaultVariadicArgumentPromotion
+ enum VariadicCallType {
+ VariadicFunction,
+ VariadicBlock,
+ VariadicMethod,
+ VariadicConstructor,
+ VariadicDoesNotApply
+ };
+
+ /// GatherArgumentsForCall - Collector argument expressions for various
+ /// form of call prototypes.
+ bool GatherArgumentsForCall(SourceLocation CallLoc,
+ FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ unsigned FirstProtoArg,
+ Expr **Args, unsigned NumArgs,
+ SmallVector<Expr *, 8> &AllArgs,
+ VariadicCallType CallType = VariadicDoesNotApply,
+ bool AllowExplicit = false);
+
+ // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
+ // will warn if the resulting type is not a POD type.
+ ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
+ FunctionDecl *FDecl);
+
+ // UsualArithmeticConversions - performs the UsualUnaryConversions on it's
+ // operands and then handles various conversions that are common to binary
+ // operators (C99 6.3.1.8). If both operands aren't arithmetic, this
+ // routine returns the first non-arithmetic type found. The client is
+ // responsible for emitting appropriate error diagnostics.
+ QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
+ bool IsCompAssign = false);
+
+ /// AssignConvertType - All of the 'assignment' semantic checks return this
+ /// enum to indicate whether the assignment was allowed. These checks are
+ /// done for simple assignments, as well as initialization, return from
+ /// function, argument passing, etc. The query is phrased in terms of a
+ /// source and destination type.
+ enum AssignConvertType {
+ /// Compatible - the types are compatible according to the standard.
+ Compatible,
+
+ /// PointerToInt - The assignment converts a pointer to an int, which we
+ /// accept as an extension.
+ PointerToInt,
+
+ /// IntToPointer - The assignment converts an int to a pointer, which we
+ /// accept as an extension.
+ IntToPointer,
+
+ /// FunctionVoidPointer - The assignment is between a function pointer and
+ /// void*, which the standard doesn't allow, but we accept as an extension.
+ FunctionVoidPointer,
+
+ /// IncompatiblePointer - The assignment is between two pointers types that
+ /// are not compatible, but we accept them as an extension.
+ IncompatiblePointer,
+
+ /// IncompatiblePointer - The assignment is between two pointers types which
+ /// point to integers which have a different sign, but are otherwise
+ /// identical. This is a subset of the above, but broken out because it's by
+ /// far the most common case of incompatible pointers.
+ IncompatiblePointerSign,
+
+ /// CompatiblePointerDiscardsQualifiers - The assignment discards
+ /// c/v/r qualifiers, which we accept as an extension.
+ CompatiblePointerDiscardsQualifiers,
+
+ /// IncompatiblePointerDiscardsQualifiers - The assignment
+ /// discards qualifiers that we don't permit to be discarded,
+ /// like address spaces.
+ IncompatiblePointerDiscardsQualifiers,
+
+ /// IncompatibleNestedPointerQualifiers - The assignment is between two
+ /// nested pointer types, and the qualifiers other than the first two
+ /// levels differ e.g. char ** -> const char **, but we accept them as an
+ /// extension.
+ IncompatibleNestedPointerQualifiers,
+
+ /// IncompatibleVectors - The assignment is between two vector types that
+ /// have the same size, which we accept as an extension.
+ IncompatibleVectors,
+
+ /// IntToBlockPointer - The assignment converts an int to a block
+ /// pointer. We disallow this.
+ IntToBlockPointer,
+
+ /// IncompatibleBlockPointer - The assignment is between two block
+ /// pointers types that are not compatible.
+ IncompatibleBlockPointer,
+
+ /// IncompatibleObjCQualifiedId - The assignment is between a qualified
+ /// id type and something else (that is incompatible with it). For example,
+ /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
+ IncompatibleObjCQualifiedId,
+
+ /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
+ /// object with __weak qualifier.
+ IncompatibleObjCWeakRef,
+
+ /// Incompatible - We reject this conversion outright, it is invalid to
+ /// represent it in the AST.
+ Incompatible
+ };
+
+ /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
+ /// assignment conversion type specified by ConvTy. This returns true if the
+ /// conversion was invalid or false if the conversion was accepted.
+ bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
+ SourceLocation Loc,
+ QualType DstType, QualType SrcType,
+ Expr *SrcExpr, AssignmentAction Action,
+ bool *Complained = 0);
+
+ /// CheckAssignmentConstraints - Perform type checking for assignment,
+ /// argument passing, variable initialization, and function return values.
+ /// C99 6.5.16.
+ AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
+ QualType LHSType,
+ QualType RHSType);
+
+ /// Check assignment constraints and prepare for a conversion of the
+ /// RHS to the LHS type.
+ AssignConvertType CheckAssignmentConstraints(QualType LHSType,
+ ExprResult &RHS,
+ CastKind &Kind);
+
+ // CheckSingleAssignmentConstraints - Currently used by
+ // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking,
+ // this routine performs the default function/array converions.
+ AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType,
+ ExprResult &RHS,
+ bool Diagnose = true);
+
+ // \brief If the lhs type is a transparent union, check whether we
+ // can initialize the transparent union with the given expression.
+ AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
+ ExprResult &RHS);
+
+ bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
+
+ bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
+
+ ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
+ AssignmentAction Action,
+ bool AllowExplicit = false);
+ ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
+ AssignmentAction Action,
+ bool AllowExplicit,
+ ImplicitConversionSequence& ICS);
+ ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
+ const ImplicitConversionSequence& ICS,
+ AssignmentAction Action,
+ CheckedConversionKind CCK
+ = CCK_ImplicitConversion);
+ ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
+ const StandardConversionSequence& SCS,
+ AssignmentAction Action,
+ CheckedConversionKind CCK);
+
+ /// the following "Check" methods will return a valid/converted QualType
+ /// or a null QualType (indicating an error diagnostic was issued).
+
+ /// type checking binary operators (subroutines of CreateBuiltinBinOp).
+ QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
+ ExprResult &RHS);
+ QualType CheckPointerToMemberOperands( // C++ 5.5
+ ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
+ SourceLocation OpLoc, bool isIndirect);
+ QualType CheckMultiplyDivideOperands( // C99 6.5.5
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
+ bool IsDivide);
+ QualType CheckRemainderOperands( // C99 6.5.5
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
+ bool IsCompAssign = false);
+ QualType CheckAdditionOperands( // C99 6.5.6
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
+ QualType* CompLHSTy = 0);
+ QualType CheckSubtractionOperands( // C99 6.5.6
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
+ QualType* CompLHSTy = 0);
+ QualType CheckShiftOperands( // C99 6.5.7
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
+ bool IsCompAssign = false);
+ QualType CheckCompareOperands( // C99 6.5.8/9
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc,
+ bool isRelational);
+ QualType CheckBitwiseOperands( // C99 6.5.[10...12]
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
+ bool IsCompAssign = false);
+ QualType CheckLogicalOperands( // C99 6.5.[13,14]
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc);
+ // CheckAssignmentOperands is used for both simple and compound assignment.
+ // For simple assignment, pass both expressions and a null converted type.
+ // For compound assignment, pass both expressions and the converted type.
+ QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
+ Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
+
+ ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
+ UnaryOperatorKind Opcode, Expr *Op);
+ ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
+ BinaryOperatorKind Opcode,
+ Expr *LHS, Expr *RHS);
+ ExprResult checkPseudoObjectRValue(Expr *E);
+ Expr *recreateSyntacticForm(PseudoObjectExpr *E);
+
+ QualType CheckConditionalOperands( // C99 6.5.15
+ ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
+ ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
+ QualType CXXCheckConditionalOperands( // C++ 5.16
+ ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
+ ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
+ QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
+ bool *NonStandardCompositeType = 0);
+ QualType FindCompositePointerType(SourceLocation Loc,
+ ExprResult &E1, ExprResult &E2,
+ bool *NonStandardCompositeType = 0) {
+ Expr *E1Tmp = E1.take(), *E2Tmp = E2.take();
+ QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp,
+ NonStandardCompositeType);
+ E1 = Owned(E1Tmp);
+ E2 = Owned(E2Tmp);
+ return Composite;
+ }
+
+ QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation QuestionLoc);
+
+ bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation QuestionLoc);
+
+ /// type checking for vector binary operators.
+ QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompAssign);
+ QualType GetSignedVectorType(QualType V);
+ QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool isRelational);
+ QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc);
+
+ /// type checking declaration initializers (C99 6.7.8)
+ bool CheckForConstantInitializer(Expr *e, QualType t);
+
+ // type checking C++ declaration initializers (C++ [dcl.init]).
+
+ /// ReferenceCompareResult - Expresses the result of comparing two
+ /// types (cv1 T1 and cv2 T2) to determine their compatibility for the
+ /// purposes of initialization by reference (C++ [dcl.init.ref]p4).
+ enum ReferenceCompareResult {
+ /// Ref_Incompatible - The two types are incompatible, so direct
+ /// reference binding is not possible.
+ Ref_Incompatible = 0,
+ /// Ref_Related - The two types are reference-related, which means
+ /// that their unqualified forms (T1 and T2) are either the same
+ /// or T1 is a base class of T2.
+ Ref_Related,
+ /// Ref_Compatible_With_Added_Qualification - The two types are
+ /// reference-compatible with added qualification, meaning that
+ /// they are reference-compatible and the qualifiers on T1 (cv1)
+ /// are greater than the qualifiers on T2 (cv2).
+ Ref_Compatible_With_Added_Qualification,
+ /// Ref_Compatible - The two types are reference-compatible and
+ /// have equivalent qualifiers (cv1 == cv2).
+ Ref_Compatible
+ };
+
+ ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
+ QualType T1, QualType T2,
+ bool &DerivedToBase,
+ bool &ObjCConversion,
+ bool &ObjCLifetimeConversion);
+
+ ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
+ Expr *CastExpr, CastKind &CastKind,
+ ExprValueKind &VK, CXXCastPath &Path);
+
+ /// \brief Force an expression with unknown-type to an expression of the
+ /// given type.
+ ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
+
+ // CheckVectorCast - check type constraints for vectors.
+ // Since vectors are an extension, there are no C standard reference for this.
+ // We allow casting between vectors and integer datatypes of the same size.
+ // returns true if the cast is invalid
+ bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
+ CastKind &Kind);
+
+ // CheckExtVectorCast - check type constraints for extended vectors.
+ // Since vectors are an extension, there are no C standard reference for this.
+ // We allow casting between vectors and integer datatypes of the same size,
+ // or vectors and the element type of that vector.
+ // returns the cast expr
+ ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
+ CastKind &Kind);
+
+ ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
+ SourceLocation LParenLoc,
+ Expr *CastExpr,
+ SourceLocation RParenLoc);
+
+ enum ARCConversionResult { ACR_okay, ACR_unbridged };
+
+ /// \brief Checks for invalid conversions and casts between
+ /// retainable pointers and other pointer kinds.
+ ARCConversionResult CheckObjCARCConversion(SourceRange castRange,
+ QualType castType, Expr *&op,
+ CheckedConversionKind CCK);
+
+ Expr *stripARCUnbridgedCast(Expr *e);
+ void diagnoseARCUnbridgedCast(Expr *e);
+
+ bool CheckObjCARCUnavailableWeakConversion(QualType castType,
+ QualType ExprType);
+
+ /// checkRetainCycles - Check whether an Objective-C message send
+ /// might create an obvious retain cycle.
+ void checkRetainCycles(ObjCMessageExpr *msg);
+ void checkRetainCycles(Expr *receiver, Expr *argument);
+
+ /// checkUnsafeAssigns - Check whether +1 expr is being assigned
+ /// to weak/__unsafe_unretained type.
+ bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
+
+ /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
+ /// to weak/__unsafe_unretained expression.
+ void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
+
+ /// CheckMessageArgumentTypes - Check types in an Obj-C message send.
+ /// \param Method - May be null.
+ /// \param [out] ReturnType - The return type of the send.
+ /// \return true iff there were any incompatible types.
+ bool CheckMessageArgumentTypes(QualType ReceiverType,
+ Expr **Args, unsigned NumArgs, Selector Sel,
+ ObjCMethodDecl *Method, bool isClassMessage,
+ bool isSuperMessage,
+ SourceLocation lbrac, SourceLocation rbrac,
+ QualType &ReturnType, ExprValueKind &VK);
+
+ /// \brief Determine the result of a message send expression based on
+ /// the type of the receiver, the method expected to receive the message,
+ /// and the form of the message send.
+ QualType getMessageSendResultType(QualType ReceiverType,
+ ObjCMethodDecl *Method,
+ bool isClassMessage, bool isSuperMessage);
+
+ /// \brief If the given expression involves a message send to a method
+ /// with a related result type, emit a note describing what happened.
+ void EmitRelatedResultTypeNote(const Expr *E);
+
+ /// CheckBooleanCondition - Diagnose problems involving the use of
+ /// the given expression as a boolean condition (e.g. in an if
+ /// statement). Also performs the standard function and array
+ /// decays, possibly changing the input variable.
+ ///
+ /// \param Loc - A location associated with the condition, e.g. the
+ /// 'if' keyword.
+ /// \return true iff there were any errors
+ ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc);
+
+ ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc,
+ Expr *SubExpr);
+
+ /// DiagnoseAssignmentAsCondition - Given that an expression is
+ /// being used as a boolean condition, warn if it's an assignment.
+ void DiagnoseAssignmentAsCondition(Expr *E);
+
+ /// \brief Redundant parentheses over an equality comparison can indicate
+ /// that the user intended an assignment used as condition.
+ void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
+
+ /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
+ ExprResult CheckCXXBooleanCondition(Expr *CondExpr);
+
+ /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
+ /// the specified width and sign. If an overflow occurs, detect it and emit
+ /// the specified diagnostic.
+ void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
+ unsigned NewWidth, bool NewSign,
+ SourceLocation Loc, unsigned DiagID);
+
+ /// Checks that the Objective-C declaration is declared in the global scope.
+ /// Emits an error and marks the declaration as invalid if it's not declared
+ /// in the global scope.
+ bool CheckObjCDeclScope(Decl *D);
+
+ /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
+ /// and reports the appropriate diagnostics. Returns false on success.
+ /// Can optionally return the value of the expression.
+ ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
+ PartialDiagnostic Diag,
+ bool AllowFold,
+ PartialDiagnostic FoldDiag);
+ ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
+ PartialDiagnostic Diag,
+ bool AllowFold = true) {
+ return VerifyIntegerConstantExpression(E, Result, Diag, AllowFold,
+ PDiag(0));
+ }
+ ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = 0);
+
+ /// VerifyBitField - verifies that a bit field expression is an ICE and has
+ /// the correct width, and that the field type is valid.
+ /// Returns false on success.
+ /// Can optionally return whether the bit-field is of width 0
+ ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
+ QualType FieldTy, Expr *BitWidth,
+ bool *ZeroWidth = 0);
+
+ enum CUDAFunctionTarget {
+ CFT_Device,
+ CFT_Global,
+ CFT_Host,
+ CFT_HostDevice
+ };
+
+ CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D);
+
+ bool CheckCUDATarget(CUDAFunctionTarget CallerTarget,
+ CUDAFunctionTarget CalleeTarget);
+
+ bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee) {
+ return CheckCUDATarget(IdentifyCUDATarget(Caller),
+ IdentifyCUDATarget(Callee));
+ }
+
+ /// \name Code completion
+ //@{
+ /// \brief Describes the context in which code completion occurs.
+ enum ParserCompletionContext {
+ /// \brief Code completion occurs at top-level or namespace context.
+ PCC_Namespace,
+ /// \brief Code completion occurs within a class, struct, or union.
+ PCC_Class,
+ /// \brief Code completion occurs within an Objective-C interface, protocol,
+ /// or category.
+ PCC_ObjCInterface,
+ /// \brief Code completion occurs within an Objective-C implementation or
+ /// category implementation
+ PCC_ObjCImplementation,
+ /// \brief Code completion occurs within the list of instance variables
+ /// in an Objective-C interface, protocol, category, or implementation.
+ PCC_ObjCInstanceVariableList,
+ /// \brief Code completion occurs following one or more template
+ /// headers.
+ PCC_Template,
+ /// \brief Code completion occurs following one or more template
+ /// headers within a class.
+ PCC_MemberTemplate,
+ /// \brief Code completion occurs within an expression.
+ PCC_Expression,
+ /// \brief Code completion occurs within a statement, which may
+ /// also be an expression or a declaration.
+ PCC_Statement,
+ /// \brief Code completion occurs at the beginning of the
+ /// initialization statement (or expression) in a for loop.
+ PCC_ForInit,
+ /// \brief Code completion occurs within the condition of an if,
+ /// while, switch, or for statement.
+ PCC_Condition,
+ /// \brief Code completion occurs within the body of a function on a
+ /// recovery path, where we do not have a specific handle on our position
+ /// in the grammar.
+ PCC_RecoveryInFunction,
+ /// \brief Code completion occurs where only a type is permitted.
+ PCC_Type,
+ /// \brief Code completion occurs in a parenthesized expression, which
+ /// might also be a type cast.
+ PCC_ParenthesizedExpression,
+ /// \brief Code completion occurs within a sequence of declaration
+ /// specifiers within a function, method, or block.
+ PCC_LocalDeclarationSpecifiers
+ };
+
+ void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
+ void CodeCompleteOrdinaryName(Scope *S,
+ ParserCompletionContext CompletionContext);
+ void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
+ bool AllowNonIdentifiers,
+ bool AllowNestedNameSpecifiers);
+
+ struct CodeCompleteExpressionData;
+ void CodeCompleteExpression(Scope *S,
+ const CodeCompleteExpressionData &Data);
+ void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ bool IsArrow);
+ void CodeCompletePostfixExpression(Scope *S, ExprResult LHS);
+ void CodeCompleteTag(Scope *S, unsigned TagSpec);
+ void CodeCompleteTypeQualifiers(DeclSpec &DS);
+ void CodeCompleteCase(Scope *S);
+ void CodeCompleteCall(Scope *S, Expr *Fn, llvm::ArrayRef<Expr *> Args);
+ void CodeCompleteInitializer(Scope *S, Decl *D);
+ void CodeCompleteReturn(Scope *S);
+ void CodeCompleteAfterIf(Scope *S);
+ void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS);
+
+ void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
+ bool EnteringContext);
+ void CodeCompleteUsing(Scope *S);
+ void CodeCompleteUsingDirective(Scope *S);
+ void CodeCompleteNamespaceDecl(Scope *S);
+ void CodeCompleteNamespaceAliasDecl(Scope *S);
+ void CodeCompleteOperatorName(Scope *S);
+ void CodeCompleteConstructorInitializer(Decl *Constructor,
+ CXXCtorInitializer** Initializers,
+ unsigned NumInitializers);
+ void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
+ bool AfterAmpersand);
+
+ void CodeCompleteObjCAtDirective(Scope *S);
+ void CodeCompleteObjCAtVisibility(Scope *S);
+ void CodeCompleteObjCAtStatement(Scope *S);
+ void CodeCompleteObjCAtExpression(Scope *S);
+ void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
+ void CodeCompleteObjCPropertyGetter(Scope *S);
+ void CodeCompleteObjCPropertySetter(Scope *S);
+ void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
+ bool IsParameter);
+ void CodeCompleteObjCMessageReceiver(Scope *S);
+ void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression);
+ void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper = false);
+ void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ ObjCInterfaceDecl *Super = 0);
+ void CodeCompleteObjCForCollection(Scope *S,
+ DeclGroupPtrTy IterationVar);
+ void CodeCompleteObjCSelector(Scope *S,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents);
+ void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
+ unsigned NumProtocols);
+ void CodeCompleteObjCProtocolDecl(Scope *S);
+ void CodeCompleteObjCInterfaceDecl(Scope *S);
+ void CodeCompleteObjCSuperclass(Scope *S,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc);
+ void CodeCompleteObjCImplementationDecl(Scope *S);
+ void CodeCompleteObjCInterfaceCategory(Scope *S,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc);
+ void CodeCompleteObjCImplementationCategory(Scope *S,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc);
+ void CodeCompleteObjCPropertyDefinition(Scope *S);
+ void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
+ IdentifierInfo *PropertyName);
+ void CodeCompleteObjCMethodDecl(Scope *S,
+ bool IsInstanceMethod,
+ ParsedType ReturnType);
+ void CodeCompleteObjCMethodDeclSelector(Scope *S,
+ bool IsInstanceMethod,
+ bool AtParameterName,
+ ParsedType ReturnType,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents);
+ void CodeCompletePreprocessorDirective(bool InConditional);
+ void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
+ void CodeCompletePreprocessorMacroName(bool IsDefinition);
+ void CodeCompletePreprocessorExpression();
+ void CodeCompletePreprocessorMacroArgument(Scope *S,
+ IdentifierInfo *Macro,
+ MacroInfo *MacroInfo,
+ unsigned Argument);
+ void CodeCompleteNaturalLanguage();
+ void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ SmallVectorImpl<CodeCompletionResult> &Results);
+ //@}
+
+ //===--------------------------------------------------------------------===//
+ // Extra semantic analysis beyond the C type system
+
+public:
+ SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
+ unsigned ByteNo) const;
+
+private:
+ void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
+ const ArraySubscriptExpr *ASE=0,
+ bool AllowOnePastEnd=true, bool IndexNegated=false);
+ void CheckArrayAccess(const Expr *E);
+ bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall);
+ bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
+ Expr **Args, unsigned NumArgs);
+ bool CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall);
+
+ bool CheckObjCString(Expr *Arg);
+
+ ExprResult CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+ bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
+
+ bool SemaBuiltinVAStart(CallExpr *TheCall);
+ bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
+ bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
+
+public:
+ // Used by C++ template instantiation.
+ ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
+
+private:
+ bool SemaBuiltinPrefetch(CallExpr *TheCall);
+ bool SemaBuiltinObjectSize(CallExpr *TheCall);
+ bool SemaBuiltinLongjmp(CallExpr *TheCall);
+ ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
+ ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
+ AtomicExpr::AtomicOp Op);
+ bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
+ llvm::APSInt &Result);
+
+ enum FormatStringType {
+ FST_Scanf,
+ FST_Printf,
+ FST_NSString,
+ FST_Strftime,
+ FST_Strfmon,
+ FST_Kprintf,
+ FST_Unknown
+ };
+ static FormatStringType GetFormatStringType(const FormatAttr *Format);
+ bool SemaCheckStringLiteral(const Expr *E, Expr **Args, unsigned NumArgs,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ bool inFunctionCall = true);
+
+ void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr,
+ Expr **Args, unsigned NumArgs, bool HasVAListArg,
+ unsigned format_idx, unsigned firstDataArg,
+ FormatStringType Type, bool inFunctionCall);
+
+ void CheckFormatArguments(const FormatAttr *Format, CallExpr *TheCall);
+ void CheckFormatArguments(const FormatAttr *Format, Expr **Args,
+ unsigned NumArgs, bool IsCXXMember,
+ SourceLocation Loc, SourceRange Range);
+ void CheckFormatArguments(Expr **Args, unsigned NumArgs,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ SourceLocation Loc, SourceRange range);
+
+ void CheckNonNullArguments(const NonNullAttr *NonNull,
+ const Expr * const *ExprArgs,
+ SourceLocation CallSiteLoc);
+
+ void CheckMemaccessArguments(const CallExpr *Call,
+ unsigned BId,
+ IdentifierInfo *FnName);
+
+ void CheckStrlcpycatArguments(const CallExpr *Call,
+ IdentifierInfo *FnName);
+
+ void CheckStrncatArguments(const CallExpr *Call,
+ IdentifierInfo *FnName);
+
+ void CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
+ SourceLocation ReturnLoc);
+ void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS);
+ void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
+
+ void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
+ Expr *Init);
+
+ /// \brief The parser's current scope.
+ ///
+ /// The parser maintains this state here.
+ Scope *CurScope;
+
+protected:
+ friend class Parser;
+ friend class InitializationSequence;
+ friend class ASTReader;
+ friend class ASTWriter;
+
+public:
+ /// \brief Retrieve the parser's current scope.
+ ///
+ /// This routine must only be used when it is certain that semantic analysis
+ /// and the parser are in precisely the same context, which is not the case
+ /// when, e.g., we are performing any kind of template instantiation.
+ /// Therefore, the only safe places to use this scope are in the parser
+ /// itself and in routines directly invoked from the parser and *never* from
+ /// template substitution or instantiation.
+ Scope *getCurScope() const { return CurScope; }
+
+ Decl *getObjCDeclContext() const;
+
+ DeclContext *getCurLexicalContext() const {
+ return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
+ }
+
+ AvailabilityResult getCurContextAvailability() const;
+};
+
+/// \brief RAII object that enters a new expression evaluation context.
+class EnterExpressionEvaluationContext {
+ Sema &Actions;
+
+public:
+ EnterExpressionEvaluationContext(Sema &Actions,
+ Sema::ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl = 0,
+ bool IsDecltype = false)
+ : Actions(Actions) {
+ Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
+ IsDecltype);
+ }
+
+ ~EnterExpressionEvaluationContext() {
+ Actions.PopExpressionEvaluationContext();
+ }
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h
new file mode 100644
index 0000000..139cce8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaConsumer.h
@@ -0,0 +1,49 @@
+//===--- SemaConsumer.h - Abstract interface for AST semantics --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the SemaConsumer class, a subclass of
+// ASTConsumer that is used by AST clients that also require
+// additional semantic analysis.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SEMA_SEMACONSUMER_H
+#define LLVM_CLANG_SEMA_SEMACONSUMER_H
+
+#include "clang/AST/ASTConsumer.h"
+
+namespace clang {
+ class Sema;
+
+ /// \brief An abstract interface that should be implemented by
+ /// clients that read ASTs and then require further semantic
+ /// analysis of the entities in those ASTs.
+ class SemaConsumer : public ASTConsumer {
+ virtual void anchor();
+ public:
+ SemaConsumer() {
+ ASTConsumer::SemaConsumer = true;
+ }
+
+ /// \brief Initialize the semantic consumer with the Sema instance
+ /// being used to perform semantic analysis on the abstract syntax
+ /// tree.
+ virtual void InitializeSema(Sema &S) {}
+
+ /// \brief Inform the semantic consumer that Sema is no longer available.
+ virtual void ForgetSema() {}
+
+ // isa/cast/dyn_cast support
+ static bool classof(const ASTConsumer *Consumer) {
+ return Consumer->SemaConsumer;
+ }
+ static bool classof(const SemaConsumer *) { return true; }
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h
new file mode 100644
index 0000000..9605bf8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaDiagnostic.h
@@ -0,0 +1,28 @@
+//===--- DiagnosticSema.h - Diagnostics for libsema -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_DIAGNOSTICSEMA_H
+#define LLVM_CLANG_DIAGNOSTICSEMA_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define SEMASTART
+#include "clang/Basic/DiagnosticSemaKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_SEMA_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h
new file mode 100644
index 0000000..fffca67
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaFixItUtils.h
@@ -0,0 +1,91 @@
+//===--- SemaFixItUtils.h - Sema FixIts -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines helper classes for generation of Sema FixItHints.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SEMA_FIXITUTILS_H
+#define LLVM_CLANG_SEMA_FIXITUTILS_H
+
+#include "clang/AST/Expr.h"
+
+namespace clang {
+
+enum OverloadFixItKind {
+ OFIK_Undefined = 0,
+ OFIK_Dereference,
+ OFIK_TakeAddress,
+ OFIK_RemoveDereference,
+ OFIK_RemoveTakeAddress
+};
+
+class Sema;
+
+/// The class facilities generation and storage of conversion FixIts. Hints for
+/// new conversions are added using TryToFixConversion method. The default type
+/// conversion checker can be reset.
+struct ConversionFixItGenerator {
+ /// Performs a simple check to see if From type can be converted to To type.
+ static bool compareTypesSimple(CanQualType From,
+ CanQualType To,
+ Sema &S,
+ SourceLocation Loc,
+ ExprValueKind FromVK);
+
+ /// The list of Hints generated so far.
+ std::vector<FixItHint> Hints;
+
+ /// The number of Conversions fixed. This can be different from the size
+ /// of the Hints vector since we allow multiple FixIts per conversion.
+ unsigned NumConversionsFixed;
+
+ /// The type of fix applied. If multiple conversions are fixed, corresponds
+ /// to the kid of the very first conversion.
+ OverloadFixItKind Kind;
+
+ typedef bool (*TypeComparisonFuncTy) (const CanQualType FromTy,
+ const CanQualType ToTy,
+ Sema &S,
+ SourceLocation Loc,
+ ExprValueKind FromVK);
+ /// The type comparison function used to decide if expression FromExpr of
+ /// type FromTy can be converted to ToTy. For example, one could check if
+ /// an implicit conversion exists. Returns true if comparison exists.
+ TypeComparisonFuncTy CompareTypes;
+
+ ConversionFixItGenerator(TypeComparisonFuncTy Foo): NumConversionsFixed(0),
+ Kind(OFIK_Undefined),
+ CompareTypes(Foo) {}
+
+ ConversionFixItGenerator(): NumConversionsFixed(0),
+ Kind(OFIK_Undefined),
+ CompareTypes(compareTypesSimple) {}
+
+ /// Resets the default conversion checker method.
+ void setConversionChecker(TypeComparisonFuncTy Foo) {
+ CompareTypes = Foo;
+ }
+
+ /// If possible, generates and stores a fix for the given conversion.
+ bool tryToFixConversion(const Expr *FromExpr,
+ const QualType FromQTy, const QualType ToQTy,
+ Sema &S);
+
+ void clear() {
+ Hints.clear();
+ NumConversionsFixed = 0;
+ }
+
+ bool isNull() {
+ return (NumConversionsFixed == 0);
+ }
+};
+
+} // endof namespace clang
+#endif // LLVM_CLANG_SEMA_FIXITUTILS_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/SemaInternal.h b/contrib/llvm/tools/clang/include/clang/Sema/SemaInternal.h
new file mode 100644
index 0000000..64b83e3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/SemaInternal.h
@@ -0,0 +1,30 @@
+//===--- SemaInternal.h - Internal Sema Interfaces --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides common API and #includes for the internal
+// implementation of Sema.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMA_INTERNAL_H
+#define LLVM_CLANG_SEMA_SEMA_INTERNAL_H
+
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/ASTContext.h"
+
+namespace clang {
+
+inline PartialDiagnostic Sema::PDiag(unsigned DiagID) {
+ return PartialDiagnostic(DiagID, Context.getDiagAllocator());
+}
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Template.h b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
new file mode 100644
index 0000000..c16823a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Template.h
@@ -0,0 +1,491 @@
+//===------- SemaTemplate.h - C++ Templates ---------------------*- C++ -*-===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file provides types used in the semantic analysis of C++ templates.
+//
+//===----------------------------------------------------------------------===/
+#ifndef LLVM_CLANG_SEMA_TEMPLATE_H
+#define LLVM_CLANG_SEMA_TEMPLATE_H
+
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "llvm/ADT/SmallVector.h"
+#include <cassert>
+#include <utility>
+
+namespace clang {
+ /// \brief Data structure that captures multiple levels of template argument
+ /// lists for use in template instantiation.
+ ///
+ /// Multiple levels of template arguments occur when instantiating the
+ /// definitions of member templates. For example:
+ ///
+ /// \code
+ /// template<typename T>
+ /// struct X {
+ /// template<T Value>
+ /// struct Y {
+ /// void f();
+ /// };
+ /// };
+ /// \endcode
+ ///
+ /// When instantiating X<int>::Y<17>::f, the multi-level template argument
+ /// list will contain a template argument list (int) at depth 0 and a
+ /// template argument list (17) at depth 1.
+ class MultiLevelTemplateArgumentList {
+ public:
+ typedef std::pair<const TemplateArgument *, unsigned> ArgList;
+
+ private:
+ /// \brief The template argument lists, stored from the innermost template
+ /// argument list (first) to the outermost template argument list (last).
+ SmallVector<ArgList, 4> TemplateArgumentLists;
+
+ public:
+ /// \brief Construct an empty set of template argument lists.
+ MultiLevelTemplateArgumentList() { }
+
+ /// \brief Construct a single-level template argument list.
+ explicit
+ MultiLevelTemplateArgumentList(const TemplateArgumentList &TemplateArgs) {
+ addOuterTemplateArguments(&TemplateArgs);
+ }
+
+ /// \brief Determine the number of levels in this template argument
+ /// list.
+ unsigned getNumLevels() const { return TemplateArgumentLists.size(); }
+
+ /// \brief Retrieve the template argument at a given depth and index.
+ const TemplateArgument &operator()(unsigned Depth, unsigned Index) const {
+ assert(Depth < TemplateArgumentLists.size());
+ assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1].second);
+ return TemplateArgumentLists[getNumLevels() - Depth - 1].first[Index];
+ }
+
+ /// \brief Determine whether there is a non-NULL template argument at the
+ /// given depth and index.
+ ///
+ /// There must exist a template argument list at the given depth.
+ bool hasTemplateArgument(unsigned Depth, unsigned Index) const {
+ assert(Depth < TemplateArgumentLists.size());
+
+ if (Index >= TemplateArgumentLists[getNumLevels() - Depth - 1].second)
+ return false;
+
+ return !(*this)(Depth, Index).isNull();
+ }
+
+ /// \brief Clear out a specific template argument.
+ void setArgument(unsigned Depth, unsigned Index,
+ TemplateArgument Arg) {
+ assert(Depth < TemplateArgumentLists.size());
+ assert(Index < TemplateArgumentLists[getNumLevels() - Depth - 1].second);
+ const_cast<TemplateArgument&>(
+ TemplateArgumentLists[getNumLevels() - Depth - 1].first[Index])
+ = Arg;
+ }
+
+ /// \brief Add a new outermost level to the multi-level template argument
+ /// list.
+ void addOuterTemplateArguments(const TemplateArgumentList *TemplateArgs) {
+ TemplateArgumentLists.push_back(ArgList(TemplateArgs->data(),
+ TemplateArgs->size()));
+ }
+
+ /// \brief Add a new outmost level to the multi-level template argument
+ /// list.
+ void addOuterTemplateArguments(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ TemplateArgumentLists.push_back(ArgList(Args, NumArgs));
+ }
+
+ /// \brief Retrieve the innermost template argument list.
+ const ArgList &getInnermost() const {
+ return TemplateArgumentLists.front();
+ }
+ };
+
+ /// \brief The context in which partial ordering of function templates occurs.
+ enum TPOC {
+ /// \brief Partial ordering of function templates for a function call.
+ TPOC_Call,
+ /// \brief Partial ordering of function templates for a call to a
+ /// conversion function.
+ TPOC_Conversion,
+ /// \brief Partial ordering of function templates in other contexts, e.g.,
+ /// taking the address of a function template or matching a function
+ /// template specialization to a function template.
+ TPOC_Other
+ };
+
+ // This is lame but unavoidable in a world without forward
+ // declarations of enums. The alternatives are to either pollute
+ // Sema.h (by including this file) or sacrifice type safety (by
+ // making Sema.h declare things as enums).
+ class TemplatePartialOrderingContext {
+ TPOC Value;
+ public:
+ TemplatePartialOrderingContext(TPOC Value) : Value(Value) {}
+ operator TPOC() const { return Value; }
+ };
+
+ /// \brief Captures a template argument whose value has been deduced
+ /// via c++ template argument deduction.
+ class DeducedTemplateArgument : public TemplateArgument {
+ /// \brief For a non-type template argument, whether the value was
+ /// deduced from an array bound.
+ bool DeducedFromArrayBound;
+
+ public:
+ DeducedTemplateArgument()
+ : TemplateArgument(), DeducedFromArrayBound(false) { }
+
+ DeducedTemplateArgument(const TemplateArgument &Arg,
+ bool DeducedFromArrayBound = false)
+ : TemplateArgument(Arg), DeducedFromArrayBound(DeducedFromArrayBound) { }
+
+ /// \brief Construct an integral non-type template argument that
+ /// has been deduced, possibly from an array bound.
+ DeducedTemplateArgument(const llvm::APSInt &Value,
+ QualType ValueType,
+ bool DeducedFromArrayBound)
+ : TemplateArgument(Value, ValueType),
+ DeducedFromArrayBound(DeducedFromArrayBound) { }
+
+ /// \brief For a non-type template argument, determine whether the
+ /// template argument was deduced from an array bound.
+ bool wasDeducedFromArrayBound() const { return DeducedFromArrayBound; }
+
+ /// \brief Specify whether the given non-type template argument
+ /// was deduced from an array bound.
+ void setDeducedFromArrayBound(bool Deduced) {
+ DeducedFromArrayBound = Deduced;
+ }
+ };
+
+ /// \brief A stack-allocated class that identifies which local
+ /// variable declaration instantiations are present in this scope.
+ ///
+ /// A new instance of this class type will be created whenever we
+ /// instantiate a new function declaration, which will have its own
+ /// set of parameter declarations.
+ class LocalInstantiationScope {
+ public:
+ /// \brief A set of declarations.
+ typedef SmallVector<Decl *, 4> DeclArgumentPack;
+
+ private:
+ /// \brief Reference to the semantic analysis that is performing
+ /// this template instantiation.
+ Sema &SemaRef;
+
+ typedef llvm::DenseMap<const Decl *,
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> >
+ LocalDeclsMap;
+
+ /// \brief A mapping from local declarations that occur
+ /// within a template to their instantiations.
+ ///
+ /// This mapping is used during instantiation to keep track of,
+ /// e.g., function parameter and variable declarations. For example,
+ /// given:
+ ///
+ /// \code
+ /// template<typename T> T add(T x, T y) { return x + y; }
+ /// \endcode
+ ///
+ /// when we instantiate add<int>, we will introduce a mapping from
+ /// the ParmVarDecl for 'x' that occurs in the template to the
+ /// instantiated ParmVarDecl for 'x'.
+ ///
+ /// For a parameter pack, the local instantiation scope may contain a
+ /// set of instantiated parameters. This is stored as a DeclArgumentPack
+ /// pointer.
+ LocalDeclsMap LocalDecls;
+
+ /// \brief The set of argument packs we've allocated.
+ SmallVector<DeclArgumentPack *, 1> ArgumentPacks;
+
+ /// \brief The outer scope, which contains local variable
+ /// definitions from some other instantiation (that may not be
+ /// relevant to this particular scope).
+ LocalInstantiationScope *Outer;
+
+ /// \brief Whether we have already exited this scope.
+ bool Exited;
+
+ /// \brief Whether to combine this scope with the outer scope, such that
+ /// lookup will search our outer scope.
+ bool CombineWithOuterScope;
+
+ /// \brief If non-NULL, the template parameter pack that has been
+ /// partially substituted per C++0x [temp.arg.explicit]p9.
+ NamedDecl *PartiallySubstitutedPack;
+
+ /// \brief If \c PartiallySubstitutedPack is non-null, the set of
+ /// explicitly-specified template arguments in that pack.
+ const TemplateArgument *ArgsInPartiallySubstitutedPack;
+
+ /// \brief If \c PartiallySubstitutedPack, the number of
+ /// explicitly-specified template arguments in
+ /// ArgsInPartiallySubstitutedPack.
+ unsigned NumArgsInPartiallySubstitutedPack;
+
+ // This class is non-copyable
+ LocalInstantiationScope(const LocalInstantiationScope &);
+ LocalInstantiationScope &operator=(const LocalInstantiationScope &);
+
+ public:
+ LocalInstantiationScope(Sema &SemaRef, bool CombineWithOuterScope = false)
+ : SemaRef(SemaRef), Outer(SemaRef.CurrentInstantiationScope),
+ Exited(false), CombineWithOuterScope(CombineWithOuterScope),
+ PartiallySubstitutedPack(0)
+ {
+ SemaRef.CurrentInstantiationScope = this;
+ }
+
+ ~LocalInstantiationScope() {
+ Exit();
+ }
+
+ const Sema &getSema() const { return SemaRef; }
+
+ /// \brief Exit this local instantiation scope early.
+ void Exit() {
+ if (Exited)
+ return;
+
+ for (unsigned I = 0, N = ArgumentPacks.size(); I != N; ++I)
+ delete ArgumentPacks[I];
+
+ SemaRef.CurrentInstantiationScope = Outer;
+ Exited = true;
+ }
+
+ /// \brief Clone this scope, and all outer scopes, down to the given
+ /// outermost scope.
+ LocalInstantiationScope *cloneScopes(LocalInstantiationScope *Outermost) {
+ if (this == Outermost) return this;
+ LocalInstantiationScope *newScope =
+ new LocalInstantiationScope(SemaRef, CombineWithOuterScope);
+
+ newScope->Outer = 0;
+ if (Outer)
+ newScope->Outer = Outer->cloneScopes(Outermost);
+
+ newScope->PartiallySubstitutedPack = PartiallySubstitutedPack;
+ newScope->ArgsInPartiallySubstitutedPack = ArgsInPartiallySubstitutedPack;
+ newScope->NumArgsInPartiallySubstitutedPack =
+ NumArgsInPartiallySubstitutedPack;
+
+ for (LocalDeclsMap::iterator I = LocalDecls.begin(), E = LocalDecls.end();
+ I != E; ++I) {
+ const Decl *D = I->first;
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored =
+ newScope->LocalDecls[D];
+ if (I->second.is<Decl *>()) {
+ Stored = I->second.get<Decl *>();
+ } else {
+ DeclArgumentPack *OldPack = I->second.get<DeclArgumentPack *>();
+ DeclArgumentPack *NewPack = new DeclArgumentPack(*OldPack);
+ Stored = NewPack;
+ newScope->ArgumentPacks.push_back(NewPack);
+ }
+ }
+ return newScope;
+ }
+
+ /// \brief deletes the given scope, and all otuer scopes, down to the
+ /// given outermost scope.
+ static void deleteScopes(LocalInstantiationScope *Scope,
+ LocalInstantiationScope *Outermost) {
+ while (Scope && Scope != Outermost) {
+ LocalInstantiationScope *Out = Scope->Outer;
+ delete Scope;
+ Scope = Out;
+ }
+ }
+
+ /// \brief Find the instantiation of the declaration D within the current
+ /// instantiation scope.
+ ///
+ /// \param D The declaration whose instantiation we are searching for.
+ ///
+ /// \returns A pointer to the declaration or argument pack of declarations
+ /// to which the declaration \c D is instantiataed, if found. Otherwise,
+ /// returns NULL.
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *
+ findInstantiationOf(const Decl *D);
+
+ void InstantiatedLocal(const Decl *D, Decl *Inst);
+ void InstantiatedLocalPackArg(const Decl *D, Decl *Inst);
+ void MakeInstantiatedLocalArgPack(const Decl *D);
+
+ /// \brief Note that the given parameter pack has been partially substituted
+ /// via explicit specification of template arguments
+ /// (C++0x [temp.arg.explicit]p9).
+ ///
+ /// \param Pack The parameter pack, which will always be a template
+ /// parameter pack.
+ ///
+ /// \param ExplicitArgs The explicitly-specified template arguments provided
+ /// for this parameter pack.
+ ///
+ /// \param NumExplicitArgs The number of explicitly-specified template
+ /// arguments provided for this parameter pack.
+ void SetPartiallySubstitutedPack(NamedDecl *Pack,
+ const TemplateArgument *ExplicitArgs,
+ unsigned NumExplicitArgs);
+
+ /// \brief Retrieve the partially-substitued template parameter pack.
+ ///
+ /// If there is no partially-substituted parameter pack, returns NULL.
+ NamedDecl *getPartiallySubstitutedPack(
+ const TemplateArgument **ExplicitArgs = 0,
+ unsigned *NumExplicitArgs = 0) const;
+ };
+
+ class TemplateDeclInstantiator
+ : public DeclVisitor<TemplateDeclInstantiator, Decl *>
+ {
+ Sema &SemaRef;
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex;
+ DeclContext *Owner;
+ const MultiLevelTemplateArgumentList &TemplateArgs;
+ Sema::LateInstantiatedAttrVec* LateAttrs;
+ LocalInstantiationScope *StartingScope;
+
+ /// \brief A list of out-of-line class template partial
+ /// specializations that will need to be instantiated after the
+ /// enclosing class's instantiation is complete.
+ SmallVector<std::pair<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>, 4>
+ OutOfLinePartialSpecs;
+
+ public:
+ TemplateDeclInstantiator(Sema &SemaRef, DeclContext *Owner,
+ const MultiLevelTemplateArgumentList &TemplateArgs)
+ : SemaRef(SemaRef), SubstIndex(SemaRef, -1), Owner(Owner),
+ TemplateArgs(TemplateArgs), LateAttrs(0), StartingScope(0) { }
+
+ // FIXME: Once we get closer to completion, replace these manually-written
+ // declarations with automatically-generated ones from
+ // clang/AST/DeclNodes.inc.
+ Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ Decl *VisitLabelDecl(LabelDecl *D);
+ Decl *VisitNamespaceDecl(NamespaceDecl *D);
+ Decl *VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ Decl *VisitTypedefDecl(TypedefDecl *D);
+ Decl *VisitTypeAliasDecl(TypeAliasDecl *D);
+ Decl *VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
+ Decl *VisitVarDecl(VarDecl *D);
+ Decl *VisitAccessSpecDecl(AccessSpecDecl *D);
+ Decl *VisitFieldDecl(FieldDecl *D);
+ Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ Decl *VisitStaticAssertDecl(StaticAssertDecl *D);
+ Decl *VisitEnumDecl(EnumDecl *D);
+ Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
+ Decl *VisitFriendDecl(FriendDecl *D);
+ Decl *VisitFunctionDecl(FunctionDecl *D,
+ TemplateParameterList *TemplateParams = 0);
+ Decl *VisitCXXRecordDecl(CXXRecordDecl *D);
+ Decl *VisitCXXMethodDecl(CXXMethodDecl *D,
+ TemplateParameterList *TemplateParams = 0,
+ bool IsClassScopeSpecialization = false);
+ Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
+ ParmVarDecl *VisitParmVarDecl(ParmVarDecl *D);
+ Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
+ Decl *VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D);
+ Decl *VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ Decl *VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ Decl *VisitUsingDecl(UsingDecl *D);
+ Decl *VisitUsingShadowDecl(UsingShadowDecl *D);
+ Decl *VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ Decl *VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ Decl *VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D);
+
+ // Base case. FIXME: Remove once we can instantiate everything.
+ Decl *VisitDecl(Decl *D) {
+ unsigned DiagID = SemaRef.getDiagnostics().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "cannot instantiate %0 yet");
+ SemaRef.Diag(D->getLocation(), DiagID)
+ << D->getDeclKindName();
+
+ return 0;
+ }
+
+ // Enable late instantiation of attributes. Late instantiated attributes
+ // will be stored in LA.
+ void enableLateAttributeInstantiation(Sema::LateInstantiatedAttrVec *LA) {
+ LateAttrs = LA;
+ StartingScope = SemaRef.CurrentInstantiationScope;
+ }
+
+ // Disable late instantiation of attributes.
+ void disableLateAttributeInstantiation() {
+ LateAttrs = 0;
+ StartingScope = 0;
+ }
+
+ LocalInstantiationScope *getStartingScope() const { return StartingScope; }
+
+ typedef
+ SmallVectorImpl<std::pair<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *> >
+ ::iterator
+ delayed_partial_spec_iterator;
+
+ /// \brief Return an iterator to the beginning of the set of
+ /// "delayed" partial specializations, which must be passed to
+ /// InstantiateClassTemplatePartialSpecialization once the class
+ /// definition has been completed.
+ delayed_partial_spec_iterator delayed_partial_spec_begin() {
+ return OutOfLinePartialSpecs.begin();
+ }
+
+ /// \brief Return an iterator to the end of the set of
+ /// "delayed" partial specializations, which must be passed to
+ /// InstantiateClassTemplatePartialSpecialization once the class
+ /// definition has been completed.
+ delayed_partial_spec_iterator delayed_partial_spec_end() {
+ return OutOfLinePartialSpecs.end();
+ }
+
+ // Helper functions for instantiating methods.
+ TypeSourceInfo *SubstFunctionType(FunctionDecl *D,
+ SmallVectorImpl<ParmVarDecl *> &Params);
+ bool InitFunctionInstantiation(FunctionDecl *New, FunctionDecl *Tmpl);
+ bool InitMethodInstantiation(CXXMethodDecl *New, CXXMethodDecl *Tmpl);
+
+ TemplateParameterList *
+ SubstTemplateParams(TemplateParameterList *List);
+
+ bool SubstQualifier(const DeclaratorDecl *OldDecl,
+ DeclaratorDecl *NewDecl);
+ bool SubstQualifier(const TagDecl *OldDecl,
+ TagDecl *NewDecl);
+
+ Decl *InstantiateTypedefNameDecl(TypedefNameDecl *D, bool IsTypeAlias);
+ ClassTemplatePartialSpecializationDecl *
+ InstantiateClassTemplatePartialSpecialization(
+ ClassTemplateDecl *ClassTemplate,
+ ClassTemplatePartialSpecializationDecl *PartialSpec);
+ void InstantiateEnumDefinition(EnumDecl *Enum, EnumDecl *Pattern);
+ };
+}
+
+#endif // LLVM_CLANG_SEMA_TEMPLATE_H
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
new file mode 100644
index 0000000..100d56e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/TemplateDeduction.h
@@ -0,0 +1,135 @@
+//===- TemplateDeduction.h - C++ template argument deduction ----*- C++ -*-===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file provides types used with Sema's template argument deduction
+// routines.
+//
+//===----------------------------------------------------------------------===/
+#ifndef LLVM_CLANG_SEMA_TEMPLATE_DEDUCTION_H
+#define LLVM_CLANG_SEMA_TEMPLATE_DEDUCTION_H
+
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/AST/DeclTemplate.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+class ASTContext;
+class TemplateArgumentList;
+
+namespace sema {
+
+/// \brief Provides information about an attempted template argument
+/// deduction, whose success or failure was described by a
+/// TemplateDeductionResult value.
+class TemplateDeductionInfo {
+ /// \brief The context in which the template arguments are stored.
+ ASTContext &Context;
+
+ /// \brief The deduced template argument list.
+ ///
+ TemplateArgumentList *Deduced;
+
+ /// \brief The source location at which template argument
+ /// deduction is occurring.
+ SourceLocation Loc;
+
+ /// \brief Warnings (and follow-on notes) that were suppressed due to
+ /// SFINAE while performing template argument deduction.
+ SmallVector<PartialDiagnosticAt, 4> SuppressedDiagnostics;
+
+ // do not implement these
+ TemplateDeductionInfo(const TemplateDeductionInfo&);
+ TemplateDeductionInfo &operator=(const TemplateDeductionInfo&);
+
+public:
+ TemplateDeductionInfo(ASTContext &Context, SourceLocation Loc)
+ : Context(Context), Deduced(0), Loc(Loc) { }
+
+ ~TemplateDeductionInfo() {
+ // FIXME: if (Deduced) Deduced->Destroy(Context);
+ }
+
+ /// \brief Returns the location at which template argument is
+ /// occurring.
+ SourceLocation getLocation() const {
+ return Loc;
+ }
+
+ /// \brief Take ownership of the deduced template argument list.
+ TemplateArgumentList *take() {
+ TemplateArgumentList *Result = Deduced;
+ Deduced = 0;
+ return Result;
+ }
+
+ /// \brief Provide a new template argument list that contains the
+ /// results of template argument deduction.
+ void reset(TemplateArgumentList *NewDeduced) {
+ // FIXME: if (Deduced) Deduced->Destroy(Context);
+ Deduced = NewDeduced;
+ }
+
+ /// \brief Add a new diagnostic to the set of diagnostics
+ void addSuppressedDiagnostic(SourceLocation Loc,
+ const PartialDiagnostic &PD) {
+ SuppressedDiagnostics.push_back(std::make_pair(Loc, PD));
+ }
+
+ /// \brief Iterator over the set of suppressed diagnostics.
+ typedef SmallVectorImpl<PartialDiagnosticAt>::const_iterator
+ diag_iterator;
+
+ /// \brief Returns an iterator at the beginning of the sequence of suppressed
+ /// diagnostics.
+ diag_iterator diag_begin() const { return SuppressedDiagnostics.begin(); }
+
+ /// \brief Returns an iterator at the end of the sequence of suppressed
+ /// diagnostics.
+ diag_iterator diag_end() const { return SuppressedDiagnostics.end(); }
+
+ /// \brief The template parameter to which a template argument
+ /// deduction failure refers.
+ ///
+ /// Depending on the result of template argument deduction, this
+ /// template parameter may have different meanings:
+ ///
+ /// TDK_Incomplete: this is the first template parameter whose
+ /// corresponding template argument was not deduced.
+ ///
+ /// TDK_Inconsistent: this is the template parameter for which
+ /// two different template argument values were deduced.
+ TemplateParameter Param;
+
+ /// \brief The first template argument to which the template
+ /// argument deduction failure refers.
+ ///
+ /// Depending on the result of the template argument deduction,
+ /// this template argument may have different meanings:
+ ///
+ /// TDK_Inconsistent: this argument is the first value deduced
+ /// for the corresponding template parameter.
+ ///
+ /// TDK_SubstitutionFailure: this argument is the template
+ /// argument we were instantiating when we encountered an error.
+ ///
+ /// TDK_NonDeducedMismatch: this is the template argument
+ /// provided in the source code.
+ TemplateArgument FirstArg;
+
+ /// \brief The second template argument to which the template
+ /// argument deduction failure refers.
+ ///
+ /// FIXME: Finish documenting this.
+ TemplateArgument SecondArg;
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h b/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h
new file mode 100644
index 0000000..a8f6e11
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/TypoCorrection.h
@@ -0,0 +1,256 @@
+//===--- TypoCorrection.h - Class for typo correction results ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypoCorrection class, which stores the results of
+// Sema's typo correction (Sema::CorrectTypo).
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_TYPOCORRECTION_H
+#define LLVM_CLANG_SEMA_TYPOCORRECTION_H
+
+#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/SmallVector.h"
+
+namespace clang {
+
+/// @brief Simple class containing the result of Sema::CorrectTypo
+class TypoCorrection {
+public:
+ // "Distance" for unusable corrections
+ static const unsigned InvalidDistance = ~0U;
+ // The largest distance still considered valid (larger edit distances are
+ // mapped to InvalidDistance by getEditDistance).
+ static const unsigned MaximumDistance = 10000U;
+
+ // Relative weightings of the "edit distance" components. The higher the
+ // weight, the more of a penalty to fitness the component will give (higher
+ // weights mean greater contribution to the total edit distance, with the
+ // best correction candidates having the lowest edit distance).
+ static const unsigned CharDistanceWeight = 100U;
+ static const unsigned QualifierDistanceWeight = 110U;
+ static const unsigned CallbackDistanceWeight = 150U;
+
+ TypoCorrection(const DeclarationName &Name, NamedDecl *NameDecl,
+ NestedNameSpecifier *NNS=0, unsigned CharDistance=0,
+ unsigned QualifierDistance=0)
+ : CorrectionName(Name), CorrectionNameSpec(NNS),
+ CharDistance(CharDistance), QualifierDistance(QualifierDistance),
+ CallbackDistance(0) {
+ if (NameDecl)
+ CorrectionDecls.push_back(NameDecl);
+ }
+
+ TypoCorrection(NamedDecl *Name, NestedNameSpecifier *NNS=0,
+ unsigned CharDistance=0)
+ : CorrectionName(Name->getDeclName()), CorrectionNameSpec(NNS),
+ CharDistance(CharDistance), QualifierDistance(0), CallbackDistance(0) {
+ if (Name)
+ CorrectionDecls.push_back(Name);
+ }
+
+ TypoCorrection(DeclarationName Name, NestedNameSpecifier *NNS=0,
+ unsigned CharDistance=0)
+ : CorrectionName(Name), CorrectionNameSpec(NNS),
+ CharDistance(CharDistance), QualifierDistance(0), CallbackDistance(0) {}
+
+ TypoCorrection()
+ : CorrectionNameSpec(0), CharDistance(0), QualifierDistance(0),
+ CallbackDistance(0) {}
+
+ /// \brief Gets the DeclarationName of the typo correction
+ DeclarationName getCorrection() const { return CorrectionName; }
+ IdentifierInfo* getCorrectionAsIdentifierInfo() const {
+ return CorrectionName.getAsIdentifierInfo();
+ }
+
+ /// \brief Gets the NestedNameSpecifier needed to use the typo correction
+ NestedNameSpecifier* getCorrectionSpecifier() const {
+ return CorrectionNameSpec;
+ }
+ void setCorrectionSpecifier(NestedNameSpecifier* NNS) {
+ CorrectionNameSpec = NNS;
+ }
+
+ void setQualifierDistance(unsigned ED) {
+ QualifierDistance = ED;
+ }
+
+ void setCallbackDistance(unsigned ED) {
+ CallbackDistance = ED;
+ }
+
+ // Convert the given weighted edit distance to a roughly equivalent number of
+ // single-character edits (typically for comparison to the length of the
+ // string being edited).
+ static unsigned NormalizeEditDistance(unsigned ED) {
+ if (ED > MaximumDistance)
+ return InvalidDistance;
+ return (ED + CharDistanceWeight / 2) / CharDistanceWeight;
+ }
+
+ /// \brief Gets the "edit distance" of the typo correction from the typo.
+ /// If Normalized is true, scale the distance down by the CharDistanceWeight
+ /// to return the edit distance in terms of single-character edits.
+ unsigned getEditDistance(bool Normalized = true) const {
+ if (CharDistance > MaximumDistance || QualifierDistance > MaximumDistance ||
+ CallbackDistance > MaximumDistance)
+ return InvalidDistance;
+ unsigned ED =
+ CharDistance * CharDistanceWeight +
+ QualifierDistance * QualifierDistanceWeight +
+ CallbackDistance * CallbackDistanceWeight;
+ if (ED > MaximumDistance)
+ return InvalidDistance;
+ // Half the CharDistanceWeight is added to ED to simulate rounding since
+ // integer division truncates the value (i.e. round-to-nearest-int instead
+ // of round-to-zero).
+ return Normalized ? NormalizeEditDistance(ED) : ED;
+ }
+
+ /// \brief Gets the pointer to the declaration of the typo correction
+ NamedDecl* getCorrectionDecl() const {
+ return hasCorrectionDecl() ? *(CorrectionDecls.begin()) : 0;
+ }
+ template <class DeclClass>
+ DeclClass *getCorrectionDeclAs() const {
+ return dyn_cast_or_null<DeclClass>(getCorrectionDecl());
+ }
+
+ /// \brief Clears the list of NamedDecls before adding the new one.
+ void setCorrectionDecl(NamedDecl *CDecl) {
+ CorrectionDecls.clear();
+ addCorrectionDecl(CDecl);
+ }
+
+ /// \brief Add the given NamedDecl to the list of NamedDecls that are the
+ /// declarations associated with the DeclarationName of this TypoCorrection
+ void addCorrectionDecl(NamedDecl *CDecl);
+
+ std::string getAsString(const LangOptions &LO) const;
+ std::string getQuoted(const LangOptions &LO) const {
+ return "'" + getAsString(LO) + "'";
+ }
+
+ /// \brief Returns whether this TypoCorrection has a non-empty DeclarationName
+ operator bool() const { return bool(CorrectionName); }
+
+ /// \brief Mark this TypoCorrection as being a keyword.
+ /// Since addCorrectionDeclsand setCorrectionDecl don't allow NULL to be
+ /// added to the list of the correction's NamedDecl pointers, NULL is added
+ /// as the only element in the list to mark this TypoCorrection as a keyword.
+ void makeKeyword() {
+ CorrectionDecls.clear();
+ CorrectionDecls.push_back(0);
+ }
+
+ // Check if this TypoCorrection is a keyword by checking if the first
+ // item in CorrectionDecls is NULL.
+ bool isKeyword() const {
+ return !CorrectionDecls.empty() &&
+ CorrectionDecls.front() == 0;
+ }
+
+ // Check if this TypoCorrection is the given keyword.
+ template<std::size_t StrLen>
+ bool isKeyword(const char (&Str)[StrLen]) const {
+ return isKeyword() && getCorrectionAsIdentifierInfo()->isStr(Str);
+ }
+
+ // Returns true if the correction either is a keyword or has a known decl.
+ bool isResolved() const { return !CorrectionDecls.empty(); }
+
+ bool isOverloaded() const {
+ return CorrectionDecls.size() > 1;
+ }
+
+ typedef llvm::SmallVector<NamedDecl*, 1>::iterator decl_iterator;
+ decl_iterator begin() {
+ return isKeyword() ? CorrectionDecls.end() : CorrectionDecls.begin();
+ }
+ decl_iterator end() { return CorrectionDecls.end(); }
+ typedef llvm::SmallVector<NamedDecl*, 1>::const_iterator const_decl_iterator;
+ const_decl_iterator begin() const {
+ return isKeyword() ? CorrectionDecls.end() : CorrectionDecls.begin();
+ }
+ const_decl_iterator end() const { return CorrectionDecls.end(); }
+
+private:
+ bool hasCorrectionDecl() const {
+ return (!isKeyword() && !CorrectionDecls.empty());
+ }
+
+ // Results.
+ DeclarationName CorrectionName;
+ NestedNameSpecifier *CorrectionNameSpec;
+ llvm::SmallVector<NamedDecl*, 1> CorrectionDecls;
+ unsigned CharDistance;
+ unsigned QualifierDistance;
+ unsigned CallbackDistance;
+};
+
+/// @brief Base class for callback objects used by Sema::CorrectTypo to check
+/// the validity of a potential typo correction.
+class CorrectionCandidateCallback {
+ public:
+ static const unsigned InvalidDistance = TypoCorrection::InvalidDistance;
+
+ CorrectionCandidateCallback()
+ : WantTypeSpecifiers(true), WantExpressionKeywords(true),
+ WantCXXNamedCasts(true), WantRemainingKeywords(true),
+ WantObjCSuper(false),
+ IsObjCIvarLookup(false) {}
+
+ virtual ~CorrectionCandidateCallback() {}
+
+ /// \brief Simple predicate used by the default RankCandidate to
+ /// determine whether to return an edit distance of 0 or InvalidDistance.
+ /// This can be overrided by validators that only need to determine if a
+ /// candidate is viable, without ranking potentially viable candidates.
+ /// Only ValidateCandidate or RankCandidate need to be overriden by a
+ /// callback wishing to check the viability of correction candidates.
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return true;
+ }
+
+ /// \brief Method used by Sema::CorrectTypo to assign an "edit distance" rank
+ /// to a candidate (where a lower value represents a better candidate), or
+ /// returning InvalidDistance if the candidate is not at all viable. For
+ /// validation callbacks that only need to determine if a candidate is viable,
+ /// the default RankCandidate returns either 0 or InvalidDistance depending
+ /// whether ValidateCandidate returns true or false.
+ virtual unsigned RankCandidate(const TypoCorrection &candidate) {
+ return ValidateCandidate(candidate) ? 0 : InvalidDistance;
+ }
+
+ // Flags for context-dependent keywords.
+ // TODO: Expand these to apply to non-keywords or possibly remove them.
+ bool WantTypeSpecifiers;
+ bool WantExpressionKeywords;
+ bool WantCXXNamedCasts;
+ bool WantRemainingKeywords;
+ bool WantObjCSuper;
+ // Temporary hack for the one case where a CorrectTypoContext enum is used
+ // when looking up results.
+ bool IsObjCIvarLookup;
+};
+
+/// @brief Simple template class for restricting typo correction candidates
+/// to ones having a single Decl* of the given type.
+template <class C>
+class DeclFilterCCC : public CorrectionCandidateCallback {
+ public:
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return candidate.getCorrectionDeclAs<C>();
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Sema/Weak.h b/contrib/llvm/tools/clang/include/clang/Sema/Weak.h
new file mode 100644
index 0000000..d36b970
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Sema/Weak.h
@@ -0,0 +1,46 @@
+//===-- UnresolvedSet.h - Unresolved sets of declarations ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the WeakInfo class, which is used to store
+// information about the target of a #pragma weak directive.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_WEAK_H
+#define LLVM_CLANG_SEMA_WEAK_H
+
+#include "clang/Basic/SourceLocation.h"
+
+namespace clang {
+
+class IdentifierInfo;
+
+/// \brief Captures information about a #pragma weak directive.
+class WeakInfo {
+ IdentifierInfo *alias; // alias (optional)
+ SourceLocation loc; // for diagnostics
+ bool used; // identifier later declared?
+public:
+ WeakInfo()
+ : alias(0), loc(SourceLocation()), used(false) {}
+ WeakInfo(IdentifierInfo *Alias, SourceLocation Loc)
+ : alias(Alias), loc(Loc), used(false) {}
+ inline IdentifierInfo * getAlias() const { return alias; }
+ inline SourceLocation getLocation() const { return loc; }
+ void setUsed(bool Used=true) { used = Used; }
+ inline bool getUsed() { return used; }
+ bool operator==(WeakInfo RHS) const {
+ return alias == RHS.getAlias() && loc == RHS.getLocation();
+ }
+ bool operator!=(WeakInfo RHS) const { return !(*this == RHS); }
+};
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_SEMA_WEAK_H
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
new file mode 100644
index 0000000..4591630
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTBitCodes.h
@@ -0,0 +1,1277 @@
+//===- ASTBitCodes.h - Enum values for the PCH bitcode format ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines Bitcode enum values for Clang serialized AST files.
+//
+// The enum values defined in this file should be considered permanent. If
+// new features are added, they should have values added at the end of the
+// respective lists.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_FRONTEND_PCHBITCODES_H
+#define LLVM_CLANG_FRONTEND_PCHBITCODES_H
+
+#include "clang/AST/Type.h"
+#include "llvm/Bitcode/BitCodes.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+ namespace serialization {
+ /// \brief AST file major version number supported by this version of
+ /// Clang.
+ ///
+ /// Whenever the AST file format changes in a way that makes it
+ /// incompatible with previous versions (such that a reader
+ /// designed for the previous version could not support reading
+ /// the new version), this number should be increased.
+ ///
+ /// Version 4 of AST files also requires that the version control branch and
+ /// revision match exactly, since there is no backward compatibility of
+ /// AST files at this time.
+ const unsigned VERSION_MAJOR = 4;
+
+ /// \brief AST file minor version number supported by this version of
+ /// Clang.
+ ///
+ /// Whenever the AST format changes in a way that is still
+ /// compatible with previous versions (such that a reader designed
+ /// for the previous version could still support reading the new
+ /// version by ignoring new kinds of subblocks), this number
+ /// should be increased.
+ const unsigned VERSION_MINOR = 0;
+
+ /// \brief An ID number that refers to an identifier in an AST file.
+ ///
+ /// The ID numbers of identifiers are consecutive (in order of discovery)
+ /// and start at 1. 0 is reserved for NULL.
+ typedef uint32_t IdentifierID;
+
+ /// \brief An ID number that refers to a declaration in an AST file.
+ ///
+ /// The ID numbers of declarations are consecutive (in order of
+ /// discovery), with values below NUM_PREDEF_DECL_IDS being reserved.
+ /// At the start of a chain of precompiled headers, declaration ID 1 is
+ /// used for the translation unit declaration.
+ typedef uint32_t DeclID;
+
+ /// \brief a Decl::Kind/DeclID pair.
+ typedef std::pair<uint32_t, DeclID> KindDeclIDPair;
+
+ // FIXME: Turn these into classes so we can have some type safety when
+ // we go from local ID to global and vice-versa.
+ typedef DeclID LocalDeclID;
+ typedef DeclID GlobalDeclID;
+
+ /// \brief An ID number that refers to a type in an AST file.
+ ///
+ /// The ID of a type is partitioned into two parts: the lower
+ /// three bits are used to store the const/volatile/restrict
+ /// qualifiers (as with QualType) and the upper bits provide a
+ /// type index. The type index values are partitioned into two
+ /// sets. The values below NUM_PREDEF_TYPE_IDs are predefined type
+ /// IDs (based on the PREDEF_TYPE_*_ID constants), with 0 as a
+ /// placeholder for "no type". Values from NUM_PREDEF_TYPE_IDs are
+ /// other types that have serialized representations.
+ typedef uint32_t TypeID;
+
+ /// \brief A type index; the type ID with the qualifier bits removed.
+ class TypeIdx {
+ uint32_t Idx;
+ public:
+ TypeIdx() : Idx(0) { }
+ explicit TypeIdx(uint32_t index) : Idx(index) { }
+
+ uint32_t getIndex() const { return Idx; }
+ TypeID asTypeID(unsigned FastQuals) const {
+ if (Idx == uint32_t(-1))
+ return TypeID(-1);
+
+ return (Idx << Qualifiers::FastWidth) | FastQuals;
+ }
+ static TypeIdx fromTypeID(TypeID ID) {
+ if (ID == TypeID(-1))
+ return TypeIdx(-1);
+
+ return TypeIdx(ID >> Qualifiers::FastWidth);
+ }
+ };
+
+ /// A structure for putting "fast"-unqualified QualTypes into a
+ /// DenseMap. This uses the standard pointer hash function.
+ struct UnsafeQualTypeDenseMapInfo {
+ static inline bool isEqual(QualType A, QualType B) { return A == B; }
+ static inline QualType getEmptyKey() {
+ return QualType::getFromOpaquePtr((void*) 1);
+ }
+ static inline QualType getTombstoneKey() {
+ return QualType::getFromOpaquePtr((void*) 2);
+ }
+ static inline unsigned getHashValue(QualType T) {
+ assert(!T.getLocalFastQualifiers() &&
+ "hash invalid for types with fast quals");
+ uintptr_t v = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ return (unsigned(v) >> 4) ^ (unsigned(v) >> 9);
+ }
+ };
+
+ /// \brief An ID number that refers to an identifier in an AST file.
+ typedef uint32_t IdentID;
+
+ /// \brief The number of predefined identifier IDs.
+ const unsigned int NUM_PREDEF_IDENT_IDS = 1;
+
+ /// \brief An ID number that refers to an ObjC selector in an AST file.
+ typedef uint32_t SelectorID;
+
+ /// \brief The number of predefined selector IDs.
+ const unsigned int NUM_PREDEF_SELECTOR_IDS = 1;
+
+ /// \brief An ID number that refers to a set of CXXBaseSpecifiers in an
+ /// AST file.
+ typedef uint32_t CXXBaseSpecifiersID;
+
+ /// \brief An ID number that refers to an entity in the detailed
+ /// preprocessing record.
+ typedef uint32_t PreprocessedEntityID;
+
+ /// \brief An ID number that refers to a submodule in a module file.
+ typedef uint32_t SubmoduleID;
+
+ /// \brief The number of predefined submodule IDs.
+ const unsigned int NUM_PREDEF_SUBMODULE_IDS = 1;
+
+ /// \brief Source range/offset of a preprocessed entity.
+ struct PPEntityOffset {
+ /// \brief Raw source location of beginning of range.
+ unsigned Begin;
+ /// \brief Raw source location of end of range.
+ unsigned End;
+ /// \brief Offset in the AST file.
+ uint32_t BitOffset;
+
+ PPEntityOffset(SourceRange R, uint32_t BitOffset)
+ : Begin(R.getBegin().getRawEncoding()),
+ End(R.getEnd().getRawEncoding()),
+ BitOffset(BitOffset) { }
+ };
+
+ /// \brief Source range/offset of a preprocessed entity.
+ struct DeclOffset {
+ /// \brief Raw source location.
+ unsigned Loc;
+ /// \brief Offset in the AST file.
+ uint32_t BitOffset;
+
+ DeclOffset() : Loc(0), BitOffset(0) { }
+ DeclOffset(SourceLocation Loc, uint32_t BitOffset)
+ : Loc(Loc.getRawEncoding()),
+ BitOffset(BitOffset) { }
+ void setLocation(SourceLocation L) {
+ Loc = L.getRawEncoding();
+ }
+ };
+
+ /// \brief The number of predefined preprocessed entity IDs.
+ const unsigned int NUM_PREDEF_PP_ENTITY_IDS = 1;
+
+ /// \brief Describes the various kinds of blocks that occur within
+ /// an AST file.
+ enum BlockIDs {
+ /// \brief The AST block, which acts as a container around the
+ /// full AST block.
+ AST_BLOCK_ID = llvm::bitc::FIRST_APPLICATION_BLOCKID,
+
+ /// \brief The block containing information about the source
+ /// manager.
+ SOURCE_MANAGER_BLOCK_ID,
+
+ /// \brief The block containing information about the
+ /// preprocessor.
+ PREPROCESSOR_BLOCK_ID,
+
+ /// \brief The block containing the definitions of all of the
+ /// types and decls used within the AST file.
+ DECLTYPES_BLOCK_ID,
+
+ /// \brief The block containing DECL_UPDATES records.
+ DECL_UPDATES_BLOCK_ID,
+
+ /// \brief The block containing the detailed preprocessing record.
+ PREPROCESSOR_DETAIL_BLOCK_ID,
+
+ /// \brief The block containing the submodule structure.
+ SUBMODULE_BLOCK_ID
+ };
+
+ /// \brief Record types that occur within the AST block itself.
+ enum ASTRecordTypes {
+ /// \brief Record code for the offsets of each type.
+ ///
+ /// The TYPE_OFFSET constant describes the record that occurs
+ /// within the AST block. The record itself is an array of offsets that
+ /// point into the declarations and types block (identified by
+ /// DECLTYPES_BLOCK_ID). The index into the array is based on the ID
+ /// of a type. For a given type ID @c T, the lower three bits of
+ /// @c T are its qualifiers (const, volatile, restrict), as in
+ /// the QualType class. The upper bits, after being shifted and
+ /// subtracting NUM_PREDEF_TYPE_IDS, are used to index into the
+ /// TYPE_OFFSET block to determine the offset of that type's
+ /// corresponding record within the DECLTYPES_BLOCK_ID block.
+ TYPE_OFFSET = 1,
+
+ /// \brief Record code for the offsets of each decl.
+ ///
+ /// The DECL_OFFSET constant describes the record that occurs
+ /// within the block identified by DECL_OFFSETS_BLOCK_ID within
+ /// the AST block. The record itself is an array of offsets that
+ /// point into the declarations and types block (identified by
+ /// DECLTYPES_BLOCK_ID). The declaration ID is an index into this
+ /// record, after subtracting one to account for the use of
+ /// declaration ID 0 for a NULL declaration pointer. Index 0 is
+ /// reserved for the translation unit declaration.
+ DECL_OFFSET = 2,
+
+ /// \brief Record code for the language options table.
+ ///
+ /// The record with this code contains the contents of the
+ /// LangOptions structure. We serialize the entire contents of
+ /// the structure, and let the reader decide which options are
+ /// actually important to check.
+ LANGUAGE_OPTIONS = 3,
+
+ /// \brief AST file metadata, including the AST file version number
+ /// and the target triple used to build the AST file.
+ METADATA = 4,
+
+ /// \brief Record code for the table of offsets of each
+ /// identifier ID.
+ ///
+ /// The offset table contains offsets into the blob stored in
+ /// the IDENTIFIER_TABLE record. Each offset points to the
+ /// NULL-terminated string that corresponds to that identifier.
+ IDENTIFIER_OFFSET = 5,
+
+ /// \brief Record code for the identifier table.
+ ///
+ /// The identifier table is a simple blob that contains
+ /// NULL-terminated strings for all of the identifiers
+ /// referenced by the AST file. The IDENTIFIER_OFFSET table
+ /// contains the mapping from identifier IDs to the characters
+ /// in this blob. Note that the starting offsets of all of the
+ /// identifiers are odd, so that, when the identifier offset
+ /// table is loaded in, we can use the low bit to distinguish
+ /// between offsets (for unresolved identifier IDs) and
+ /// IdentifierInfo pointers (for already-resolved identifier
+ /// IDs).
+ IDENTIFIER_TABLE = 6,
+
+ /// \brief Record code for the array of external definitions.
+ ///
+ /// The AST file contains a list of all of the unnamed external
+ /// definitions present within the parsed headers, stored as an
+ /// array of declaration IDs. These external definitions will be
+ /// reported to the AST consumer after the AST file has been
+ /// read, since their presence can affect the semantics of the
+ /// program (e.g., for code generation).
+ EXTERNAL_DEFINITIONS = 7,
+
+ /// \brief Record code for the set of non-builtin, special
+ /// types.
+ ///
+ /// This record contains the type IDs for the various type nodes
+ /// that are constructed during semantic analysis (e.g.,
+ /// __builtin_va_list). The SPECIAL_TYPE_* constants provide
+ /// offsets into this record.
+ SPECIAL_TYPES = 8,
+
+ /// \brief Record code for the extra statistics we gather while
+ /// generating an AST file.
+ STATISTICS = 9,
+
+ /// \brief Record code for the array of tentative definitions.
+ TENTATIVE_DEFINITIONS = 10,
+
+ /// \brief Record code for the array of locally-scoped external
+ /// declarations.
+ LOCALLY_SCOPED_EXTERNAL_DECLS = 11,
+
+ /// \brief Record code for the table of offsets into the
+ /// Objective-C method pool.
+ SELECTOR_OFFSETS = 12,
+
+ /// \brief Record code for the Objective-C method pool,
+ METHOD_POOL = 13,
+
+ /// \brief The value of the next __COUNTER__ to dispense.
+ /// [PP_COUNTER_VALUE, Val]
+ PP_COUNTER_VALUE = 14,
+
+ /// \brief Record code for the table of offsets into the block
+ /// of source-location information.
+ SOURCE_LOCATION_OFFSETS = 15,
+
+ /// \brief Record code for the set of source location entries
+ /// that need to be preloaded by the AST reader.
+ ///
+ /// This set contains the source location entry for the
+ /// predefines buffer and for any file entries that need to be
+ /// preloaded.
+ SOURCE_LOCATION_PRELOADS = 16,
+
+ /// \brief Record code for the stat() cache.
+ STAT_CACHE = 17,
+
+ /// \brief Record code for the set of ext_vector type names.
+ EXT_VECTOR_DECLS = 18,
+
+ /// \brief Record code for the original file that was used to
+ /// generate the AST file.
+ ORIGINAL_FILE_NAME = 19,
+
+ /// \brief Record code for the file ID of the original file used to
+ /// generate the AST file.
+ ORIGINAL_FILE_ID = 20,
+
+ /// \brief Record code for the version control branch and revision
+ /// information of the compiler used to build this AST file.
+ VERSION_CONTROL_BRANCH_REVISION = 21,
+
+ /// \brief Record code for the array of unused file scoped decls.
+ UNUSED_FILESCOPED_DECLS = 22,
+
+ /// \brief Record code for the table of offsets to entries in the
+ /// preprocessing record.
+ PPD_ENTITIES_OFFSETS = 23,
+
+ /// \brief Record code for the array of VTable uses.
+ VTABLE_USES = 24,
+
+ /// \brief Record code for the array of dynamic classes.
+ DYNAMIC_CLASSES = 25,
+
+ /// \brief Record code for the list of other AST files imported by
+ /// this AST file.
+ IMPORTS = 26,
+
+ /// \brief Record code for referenced selector pool.
+ REFERENCED_SELECTOR_POOL = 27,
+
+ /// \brief Record code for an update to the TU's lexically contained
+ /// declarations.
+ TU_UPDATE_LEXICAL = 28,
+
+ /// \brief Record code for the array describing the locations (in the
+ /// LOCAL_REDECLARATIONS record) of the redeclaration chains, indexed by
+ /// the first known ID.
+ LOCAL_REDECLARATIONS_MAP = 29,
+
+ /// \brief Record code for declarations that Sema keeps references of.
+ SEMA_DECL_REFS = 30,
+
+ /// \brief Record code for weak undeclared identifiers.
+ WEAK_UNDECLARED_IDENTIFIERS = 31,
+
+ /// \brief Record code for pending implicit instantiations.
+ PENDING_IMPLICIT_INSTANTIATIONS = 32,
+
+ /// \brief Record code for a decl replacement block.
+ ///
+ /// If a declaration is modified after having been deserialized, and then
+ /// written to a dependent AST file, its ID and offset must be added to
+ /// the replacement block.
+ DECL_REPLACEMENTS = 33,
+
+ /// \brief Record code for an update to a decl context's lookup table.
+ ///
+ /// In practice, this should only be used for the TU and namespaces.
+ UPDATE_VISIBLE = 34,
+
+ /// \brief Record for offsets of DECL_UPDATES records for declarations
+ /// that were modified after being deserialized and need updates.
+ DECL_UPDATE_OFFSETS = 35,
+
+ /// \brief Record of updates for a declaration that was modified after
+ /// being deserialized.
+ DECL_UPDATES = 36,
+
+ /// \brief Record code for the table of offsets to CXXBaseSpecifier
+ /// sets.
+ CXX_BASE_SPECIFIER_OFFSETS = 37,
+
+ /// \brief Record code for #pragma diagnostic mappings.
+ DIAG_PRAGMA_MAPPINGS = 38,
+
+ /// \brief Record code for special CUDA declarations.
+ CUDA_SPECIAL_DECL_REFS = 39,
+
+ /// \brief Record code for header search information.
+ HEADER_SEARCH_TABLE = 40,
+
+ /// \brief The directory that the PCH was originally created in.
+ ORIGINAL_PCH_DIR = 41,
+
+ /// \brief Record code for floating point #pragma options.
+ FP_PRAGMA_OPTIONS = 42,
+
+ /// \brief Record code for enabled OpenCL extensions.
+ OPENCL_EXTENSIONS = 43,
+
+ /// \brief The list of delegating constructor declarations.
+ DELEGATING_CTORS = 44,
+
+ /// \brief Record code for the table of offsets into the block
+ /// of file source-location information.
+ FILE_SOURCE_LOCATION_OFFSETS = 45,
+
+ /// \brief Record code for the set of known namespaces, which are used
+ /// for typo correction.
+ KNOWN_NAMESPACES = 46,
+
+ /// \brief Record code for the remapping information used to relate
+ /// loaded modules to the various offsets and IDs(e.g., source location
+ /// offests, declaration and type IDs) that are used in that module to
+ /// refer to other modules.
+ MODULE_OFFSET_MAP = 47,
+
+ /// \brief Record code for the source manager line table information,
+ /// which stores information about #line directives.
+ SOURCE_MANAGER_LINE_TABLE = 48,
+
+ /// \brief Record code for map of Objective-C class definition IDs to the
+ /// ObjC categories in a module that are attached to that class.
+ OBJC_CATEGORIES_MAP = 49,
+
+ /// \brief Record code for a file sorted array of DeclIDs in a module.
+ FILE_SORTED_DECLS = 50,
+
+ /// \brief Record code for an array of all of the (sub)modules that were
+ /// imported by the AST file.
+ IMPORTED_MODULES = 51,
+
+ /// \brief Record code for the set of merged declarations in an AST file.
+ MERGED_DECLARATIONS = 52,
+
+ /// \brief Record code for the array of redeclaration chains.
+ ///
+ /// This array can only be interpreted properly using the local
+ /// redeclarations map.
+ LOCAL_REDECLARATIONS = 53,
+
+ /// \brief Record code for the array of Objective-C categories (including
+ /// extensions).
+ ///
+ /// This array can only be interpreted properly using the Objective-C
+ /// categories map.
+ OBJC_CATEGORIES
+ };
+
+ /// \brief Record types used within a source manager block.
+ enum SourceManagerRecordTypes {
+ /// \brief Describes a source location entry (SLocEntry) for a
+ /// file.
+ SM_SLOC_FILE_ENTRY = 1,
+ /// \brief Describes a source location entry (SLocEntry) for a
+ /// buffer.
+ SM_SLOC_BUFFER_ENTRY = 2,
+ /// \brief Describes a blob that contains the data for a buffer
+ /// entry. This kind of record always directly follows a
+ /// SM_SLOC_BUFFER_ENTRY record or a SM_SLOC_FILE_ENTRY with an
+ /// overridden buffer.
+ SM_SLOC_BUFFER_BLOB = 3,
+ /// \brief Describes a source location entry (SLocEntry) for a
+ /// macro expansion.
+ SM_SLOC_EXPANSION_ENTRY = 4
+ };
+
+ /// \brief Record types used within a preprocessor block.
+ enum PreprocessorRecordTypes {
+ // The macros in the PP section are a PP_MACRO_* instance followed by a
+ // list of PP_TOKEN instances for each token in the definition.
+
+ /// \brief An object-like macro definition.
+ /// [PP_MACRO_OBJECT_LIKE, IdentInfoID, SLoc, IsUsed]
+ PP_MACRO_OBJECT_LIKE = 1,
+
+ /// \brief A function-like macro definition.
+ /// [PP_MACRO_FUNCTION_LIKE, <ObjectLikeStuff>, IsC99Varargs, IsGNUVarars,
+ /// NumArgs, ArgIdentInfoID* ]
+ PP_MACRO_FUNCTION_LIKE = 2,
+
+ /// \brief Describes one token.
+ /// [PP_TOKEN, SLoc, Length, IdentInfoID, Kind, Flags]
+ PP_TOKEN = 3
+ };
+
+ /// \brief Record types used within a preprocessor detail block.
+ enum PreprocessorDetailRecordTypes {
+ /// \brief Describes a macro expansion within the preprocessing record.
+ PPD_MACRO_EXPANSION = 0,
+
+ /// \brief Describes a macro definition within the preprocessing record.
+ PPD_MACRO_DEFINITION = 1,
+
+ /// \brief Describes an inclusion directive within the preprocessing
+ /// record.
+ PPD_INCLUSION_DIRECTIVE = 2
+ };
+
+ /// \brief Record types used within a submodule description block.
+ enum SubmoduleRecordTypes {
+ /// \brief Metadata for submodules as a whole.
+ SUBMODULE_METADATA = 0,
+ /// \brief Defines the major attributes of a submodule, including its
+ /// name and parent.
+ SUBMODULE_DEFINITION = 1,
+ /// \brief Specifies the umbrella header used to create this module,
+ /// if any.
+ SUBMODULE_UMBRELLA_HEADER = 2,
+ /// \brief Specifies a header that falls into this (sub)module.
+ SUBMODULE_HEADER = 3,
+ /// \brief Specifies an umbrella directory.
+ SUBMODULE_UMBRELLA_DIR = 4,
+ /// \brief Specifies the submodules that are imported by this
+ /// submodule.
+ SUBMODULE_IMPORTS = 5,
+ /// \brief Specifies the submodules that are re-exported from this
+ /// submodule.
+ SUBMODULE_EXPORTS = 6,
+ /// \brief Specifies a required feature.
+ SUBMODULE_REQUIRES = 7
+ };
+
+ /// \defgroup ASTAST AST file AST constants
+ ///
+ /// The constants in this group describe various components of the
+ /// abstract syntax tree within an AST file.
+ ///
+ /// @{
+
+ /// \brief Predefined type IDs.
+ ///
+ /// These type IDs correspond to predefined types in the AST
+ /// context, such as built-in types (int) and special place-holder
+ /// types (the <overload> and <dependent> type markers). Such
+ /// types are never actually serialized, since they will be built
+ /// by the AST context when it is created.
+ enum PredefinedTypeIDs {
+ /// \brief The NULL type.
+ PREDEF_TYPE_NULL_ID = 0,
+ /// \brief The void type.
+ PREDEF_TYPE_VOID_ID = 1,
+ /// \brief The 'bool' or '_Bool' type.
+ PREDEF_TYPE_BOOL_ID = 2,
+ /// \brief The 'char' type, when it is unsigned.
+ PREDEF_TYPE_CHAR_U_ID = 3,
+ /// \brief The 'unsigned char' type.
+ PREDEF_TYPE_UCHAR_ID = 4,
+ /// \brief The 'unsigned short' type.
+ PREDEF_TYPE_USHORT_ID = 5,
+ /// \brief The 'unsigned int' type.
+ PREDEF_TYPE_UINT_ID = 6,
+ /// \brief The 'unsigned long' type.
+ PREDEF_TYPE_ULONG_ID = 7,
+ /// \brief The 'unsigned long long' type.
+ PREDEF_TYPE_ULONGLONG_ID = 8,
+ /// \brief The 'char' type, when it is signed.
+ PREDEF_TYPE_CHAR_S_ID = 9,
+ /// \brief The 'signed char' type.
+ PREDEF_TYPE_SCHAR_ID = 10,
+ /// \brief The C++ 'wchar_t' type.
+ PREDEF_TYPE_WCHAR_ID = 11,
+ /// \brief The (signed) 'short' type.
+ PREDEF_TYPE_SHORT_ID = 12,
+ /// \brief The (signed) 'int' type.
+ PREDEF_TYPE_INT_ID = 13,
+ /// \brief The (signed) 'long' type.
+ PREDEF_TYPE_LONG_ID = 14,
+ /// \brief The (signed) 'long long' type.
+ PREDEF_TYPE_LONGLONG_ID = 15,
+ /// \brief The 'float' type.
+ PREDEF_TYPE_FLOAT_ID = 16,
+ /// \brief The 'double' type.
+ PREDEF_TYPE_DOUBLE_ID = 17,
+ /// \brief The 'long double' type.
+ PREDEF_TYPE_LONGDOUBLE_ID = 18,
+ /// \brief The placeholder type for overloaded function sets.
+ PREDEF_TYPE_OVERLOAD_ID = 19,
+ /// \brief The placeholder type for dependent types.
+ PREDEF_TYPE_DEPENDENT_ID = 20,
+ /// \brief The '__uint128_t' type.
+ PREDEF_TYPE_UINT128_ID = 21,
+ /// \brief The '__int128_t' type.
+ PREDEF_TYPE_INT128_ID = 22,
+ /// \brief The type of 'nullptr'.
+ PREDEF_TYPE_NULLPTR_ID = 23,
+ /// \brief The C++ 'char16_t' type.
+ PREDEF_TYPE_CHAR16_ID = 24,
+ /// \brief The C++ 'char32_t' type.
+ PREDEF_TYPE_CHAR32_ID = 25,
+ /// \brief The ObjC 'id' type.
+ PREDEF_TYPE_OBJC_ID = 26,
+ /// \brief The ObjC 'Class' type.
+ PREDEF_TYPE_OBJC_CLASS = 27,
+ /// \brief The ObjC 'SEL' type.
+ PREDEF_TYPE_OBJC_SEL = 28,
+ /// \brief The 'unknown any' placeholder type.
+ PREDEF_TYPE_UNKNOWN_ANY = 29,
+ /// \brief The placeholder type for bound member functions.
+ PREDEF_TYPE_BOUND_MEMBER = 30,
+ /// \brief The "auto" deduction type.
+ PREDEF_TYPE_AUTO_DEDUCT = 31,
+ /// \brief The "auto &&" deduction type.
+ PREDEF_TYPE_AUTO_RREF_DEDUCT = 32,
+ /// \brief The OpenCL 'half' / ARM NEON __fp16 type.
+ PREDEF_TYPE_HALF_ID = 33,
+ /// \brief ARC's unbridged-cast placeholder type.
+ PREDEF_TYPE_ARC_UNBRIDGED_CAST = 34,
+ /// \brief The pseudo-object placeholder type.
+ PREDEF_TYPE_PSEUDO_OBJECT = 35
+ };
+
+ /// \brief The number of predefined type IDs that are reserved for
+ /// the PREDEF_TYPE_* constants.
+ ///
+ /// Type IDs for non-predefined types will start at
+ /// NUM_PREDEF_TYPE_IDs.
+ const unsigned NUM_PREDEF_TYPE_IDS = 100;
+
+ /// \brief The number of allowed abbreviations in bits
+ const unsigned NUM_ALLOWED_ABBREVS_SIZE = 4;
+
+ /// \brief Record codes for each kind of type.
+ ///
+ /// These constants describe the type records that can occur within a
+ /// block identified by DECLTYPES_BLOCK_ID in the AST file. Each
+ /// constant describes a record for a specific type class in the
+ /// AST.
+ enum TypeCode {
+ /// \brief An ExtQualType record.
+ TYPE_EXT_QUAL = 1,
+ /// \brief A ComplexType record.
+ TYPE_COMPLEX = 3,
+ /// \brief A PointerType record.
+ TYPE_POINTER = 4,
+ /// \brief A BlockPointerType record.
+ TYPE_BLOCK_POINTER = 5,
+ /// \brief An LValueReferenceType record.
+ TYPE_LVALUE_REFERENCE = 6,
+ /// \brief An RValueReferenceType record.
+ TYPE_RVALUE_REFERENCE = 7,
+ /// \brief A MemberPointerType record.
+ TYPE_MEMBER_POINTER = 8,
+ /// \brief A ConstantArrayType record.
+ TYPE_CONSTANT_ARRAY = 9,
+ /// \brief An IncompleteArrayType record.
+ TYPE_INCOMPLETE_ARRAY = 10,
+ /// \brief A VariableArrayType record.
+ TYPE_VARIABLE_ARRAY = 11,
+ /// \brief A VectorType record.
+ TYPE_VECTOR = 12,
+ /// \brief An ExtVectorType record.
+ TYPE_EXT_VECTOR = 13,
+ /// \brief A FunctionNoProtoType record.
+ TYPE_FUNCTION_NO_PROTO = 14,
+ /// \brief A FunctionProtoType record.
+ TYPE_FUNCTION_PROTO = 15,
+ /// \brief A TypedefType record.
+ TYPE_TYPEDEF = 16,
+ /// \brief A TypeOfExprType record.
+ TYPE_TYPEOF_EXPR = 17,
+ /// \brief A TypeOfType record.
+ TYPE_TYPEOF = 18,
+ /// \brief A RecordType record.
+ TYPE_RECORD = 19,
+ /// \brief An EnumType record.
+ TYPE_ENUM = 20,
+ /// \brief An ObjCInterfaceType record.
+ TYPE_OBJC_INTERFACE = 21,
+ /// \brief An ObjCObjectPointerType record.
+ TYPE_OBJC_OBJECT_POINTER = 22,
+ /// \brief a DecltypeType record.
+ TYPE_DECLTYPE = 23,
+ /// \brief An ElaboratedType record.
+ TYPE_ELABORATED = 24,
+ /// \brief A SubstTemplateTypeParmType record.
+ TYPE_SUBST_TEMPLATE_TYPE_PARM = 25,
+ /// \brief An UnresolvedUsingType record.
+ TYPE_UNRESOLVED_USING = 26,
+ /// \brief An InjectedClassNameType record.
+ TYPE_INJECTED_CLASS_NAME = 27,
+ /// \brief An ObjCObjectType record.
+ TYPE_OBJC_OBJECT = 28,
+ /// \brief An TemplateTypeParmType record.
+ TYPE_TEMPLATE_TYPE_PARM = 29,
+ /// \brief An TemplateSpecializationType record.
+ TYPE_TEMPLATE_SPECIALIZATION = 30,
+ /// \brief A DependentNameType record.
+ TYPE_DEPENDENT_NAME = 31,
+ /// \brief A DependentTemplateSpecializationType record.
+ TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION = 32,
+ /// \brief A DependentSizedArrayType record.
+ TYPE_DEPENDENT_SIZED_ARRAY = 33,
+ /// \brief A ParenType record.
+ TYPE_PAREN = 34,
+ /// \brief A PackExpansionType record.
+ TYPE_PACK_EXPANSION = 35,
+ /// \brief An AttributedType record.
+ TYPE_ATTRIBUTED = 36,
+ /// \brief A SubstTemplateTypeParmPackType record.
+ TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK = 37,
+ /// \brief A AutoType record.
+ TYPE_AUTO = 38,
+ /// \brief A UnaryTransformType record.
+ TYPE_UNARY_TRANSFORM = 39,
+ /// \brief An AtomicType record.
+ TYPE_ATOMIC = 40
+ };
+
+ /// \brief The type IDs for special types constructed by semantic
+ /// analysis.
+ ///
+ /// The constants in this enumeration are indices into the
+ /// SPECIAL_TYPES record.
+ enum SpecialTypeIDs {
+ /// \brief __builtin_va_list
+ SPECIAL_TYPE_BUILTIN_VA_LIST = 0,
+ /// \brief CFConstantString type
+ SPECIAL_TYPE_CF_CONSTANT_STRING = 1,
+ /// \brief C FILE typedef type
+ SPECIAL_TYPE_FILE = 2,
+ /// \brief C jmp_buf typedef type
+ SPECIAL_TYPE_JMP_BUF = 3,
+ /// \brief C sigjmp_buf typedef type
+ SPECIAL_TYPE_SIGJMP_BUF = 4,
+ /// \brief Objective-C "id" redefinition type
+ SPECIAL_TYPE_OBJC_ID_REDEFINITION = 5,
+ /// \brief Objective-C "Class" redefinition type
+ SPECIAL_TYPE_OBJC_CLASS_REDEFINITION = 6,
+ /// \brief Objective-C "SEL" redefinition type
+ SPECIAL_TYPE_OBJC_SEL_REDEFINITION = 7,
+ /// \brief C ucontext_t typedef type
+ SPECIAL_TYPE_UCONTEXT_T = 8
+ };
+
+ /// \brief The number of special type IDs.
+ const unsigned NumSpecialTypeIDs = 9;
+
+ /// \brief Predefined declaration IDs.
+ ///
+ /// These declaration IDs correspond to predefined declarations in the AST
+ /// context, such as the NULL declaration ID. Such declarations are never
+ /// actually serialized, since they will be built by the AST context when
+ /// it is created.
+ enum PredefinedDeclIDs {
+ /// \brief The NULL declaration.
+ PREDEF_DECL_NULL_ID = 0,
+
+ /// \brief The translation unit.
+ PREDEF_DECL_TRANSLATION_UNIT_ID = 1,
+
+ /// \brief The Objective-C 'id' type.
+ PREDEF_DECL_OBJC_ID_ID = 2,
+
+ /// \brief The Objective-C 'SEL' type.
+ PREDEF_DECL_OBJC_SEL_ID = 3,
+
+ /// \brief The Objective-C 'Class' type.
+ PREDEF_DECL_OBJC_CLASS_ID = 4,
+
+ /// \brief The Objective-C 'Protocol' type.
+ PREDEF_DECL_OBJC_PROTOCOL_ID = 5,
+
+ /// \brief The signed 128-bit integer type.
+ PREDEF_DECL_INT_128_ID = 6,
+
+ /// \brief The unsigned 128-bit integer type.
+ PREDEF_DECL_UNSIGNED_INT_128_ID = 7,
+
+ /// \brief The internal 'instancetype' typedef.
+ PREDEF_DECL_OBJC_INSTANCETYPE_ID = 8
+ };
+
+ /// \brief The number of declaration IDs that are predefined.
+ ///
+ /// For more information about predefined declarations, see the
+ /// \c PredefinedDeclIDs type and the PREDEF_DECL_*_ID constants.
+ const unsigned int NUM_PREDEF_DECL_IDS = 9;
+
+ /// \brief Record codes for each kind of declaration.
+ ///
+ /// These constants describe the declaration records that can occur within
+ /// a declarations block (identified by DECLS_BLOCK_ID). Each
+ /// constant describes a record for a specific declaration class
+ /// in the AST.
+ enum DeclCode {
+ /// \brief A TypedefDecl record.
+ DECL_TYPEDEF = 51,
+ /// \brief A TypeAliasDecl record.
+ DECL_TYPEALIAS,
+ /// \brief An EnumDecl record.
+ DECL_ENUM,
+ /// \brief A RecordDecl record.
+ DECL_RECORD,
+ /// \brief An EnumConstantDecl record.
+ DECL_ENUM_CONSTANT,
+ /// \brief A FunctionDecl record.
+ DECL_FUNCTION,
+ /// \brief A ObjCMethodDecl record.
+ DECL_OBJC_METHOD,
+ /// \brief A ObjCInterfaceDecl record.
+ DECL_OBJC_INTERFACE,
+ /// \brief A ObjCProtocolDecl record.
+ DECL_OBJC_PROTOCOL,
+ /// \brief A ObjCIvarDecl record.
+ DECL_OBJC_IVAR,
+ /// \brief A ObjCAtDefsFieldDecl record.
+ DECL_OBJC_AT_DEFS_FIELD,
+ /// \brief A ObjCCategoryDecl record.
+ DECL_OBJC_CATEGORY,
+ /// \brief A ObjCCategoryImplDecl record.
+ DECL_OBJC_CATEGORY_IMPL,
+ /// \brief A ObjCImplementationDecl record.
+ DECL_OBJC_IMPLEMENTATION,
+ /// \brief A ObjCCompatibleAliasDecl record.
+ DECL_OBJC_COMPATIBLE_ALIAS,
+ /// \brief A ObjCPropertyDecl record.
+ DECL_OBJC_PROPERTY,
+ /// \brief A ObjCPropertyImplDecl record.
+ DECL_OBJC_PROPERTY_IMPL,
+ /// \brief A FieldDecl record.
+ DECL_FIELD,
+ /// \brief A VarDecl record.
+ DECL_VAR,
+ /// \brief An ImplicitParamDecl record.
+ DECL_IMPLICIT_PARAM,
+ /// \brief A ParmVarDecl record.
+ DECL_PARM_VAR,
+ /// \brief A FileScopeAsmDecl record.
+ DECL_FILE_SCOPE_ASM,
+ /// \brief A BlockDecl record.
+ DECL_BLOCK,
+ /// \brief A record that stores the set of declarations that are
+ /// lexically stored within a given DeclContext.
+ ///
+ /// The record itself is a blob that is an array of declaration IDs,
+ /// in the order in which those declarations were added to the
+ /// declaration context. This data is used when iterating over
+ /// the contents of a DeclContext, e.g., via
+ /// DeclContext::decls_begin()/DeclContext::decls_end().
+ DECL_CONTEXT_LEXICAL,
+ /// \brief A record that stores the set of declarations that are
+ /// visible from a given DeclContext.
+ ///
+ /// The record itself stores a set of mappings, each of which
+ /// associates a declaration name with one or more declaration
+ /// IDs. This data is used when performing qualified name lookup
+ /// into a DeclContext via DeclContext::lookup.
+ DECL_CONTEXT_VISIBLE,
+ /// \brief A LabelDecl record.
+ DECL_LABEL,
+ /// \brief A NamespaceDecl record.
+ DECL_NAMESPACE,
+ /// \brief A NamespaceAliasDecl record.
+ DECL_NAMESPACE_ALIAS,
+ /// \brief A UsingDecl record.
+ DECL_USING,
+ /// \brief A UsingShadowDecl record.
+ DECL_USING_SHADOW,
+ /// \brief A UsingDirecitveDecl record.
+ DECL_USING_DIRECTIVE,
+ /// \brief An UnresolvedUsingValueDecl record.
+ DECL_UNRESOLVED_USING_VALUE,
+ /// \brief An UnresolvedUsingTypenameDecl record.
+ DECL_UNRESOLVED_USING_TYPENAME,
+ /// \brief A LinkageSpecDecl record.
+ DECL_LINKAGE_SPEC,
+ /// \brief A CXXRecordDecl record.
+ DECL_CXX_RECORD,
+ /// \brief A CXXMethodDecl record.
+ DECL_CXX_METHOD,
+ /// \brief A CXXConstructorDecl record.
+ DECL_CXX_CONSTRUCTOR,
+ /// \brief A CXXDestructorDecl record.
+ DECL_CXX_DESTRUCTOR,
+ /// \brief A CXXConversionDecl record.
+ DECL_CXX_CONVERSION,
+ /// \brief An AccessSpecDecl record.
+ DECL_ACCESS_SPEC,
+
+ /// \brief A FriendDecl record.
+ DECL_FRIEND,
+ /// \brief A FriendTemplateDecl record.
+ DECL_FRIEND_TEMPLATE,
+ /// \brief A ClassTemplateDecl record.
+ DECL_CLASS_TEMPLATE,
+ /// \brief A ClassTemplateSpecializationDecl record.
+ DECL_CLASS_TEMPLATE_SPECIALIZATION,
+ /// \brief A ClassTemplatePartialSpecializationDecl record.
+ DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION,
+ /// \brief A FunctionTemplateDecl record.
+ DECL_FUNCTION_TEMPLATE,
+ /// \brief A TemplateTypeParmDecl record.
+ DECL_TEMPLATE_TYPE_PARM,
+ /// \brief A NonTypeTemplateParmDecl record.
+ DECL_NON_TYPE_TEMPLATE_PARM,
+ /// \brief A TemplateTemplateParmDecl record.
+ DECL_TEMPLATE_TEMPLATE_PARM,
+ /// \brief A TypeAliasTemplateDecl record.
+ DECL_TYPE_ALIAS_TEMPLATE,
+ /// \brief A StaticAssertDecl record.
+ DECL_STATIC_ASSERT,
+ /// \brief A record containing CXXBaseSpecifiers.
+ DECL_CXX_BASE_SPECIFIERS,
+ /// \brief A IndirectFieldDecl record.
+ DECL_INDIRECTFIELD,
+ /// \brief A NonTypeTemplateParmDecl record that stores an expanded
+ /// non-type template parameter pack.
+ DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK,
+ /// \brief A ClassScopeFunctionSpecializationDecl record a class scope
+ /// function specialization. (Microsoft extension).
+ DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION,
+ /// \brief An ImportDecl recording a module import.
+ DECL_IMPORT
+ };
+
+ /// \brief Record codes for each kind of statement or expression.
+ ///
+ /// These constants describe the records that describe statements
+ /// or expressions. These records occur within type and declarations
+ /// block, so they begin with record values of 100. Each constant
+ /// describes a record for a specific statement or expression class in the
+ /// AST.
+ enum StmtCode {
+ /// \brief A marker record that indicates that we are at the end
+ /// of an expression.
+ STMT_STOP = 100,
+ /// \brief A NULL expression.
+ STMT_NULL_PTR,
+ /// \brief A reference to a previously [de]serialized Stmt record.
+ STMT_REF_PTR,
+ /// \brief A NullStmt record.
+ STMT_NULL,
+ /// \brief A CompoundStmt record.
+ STMT_COMPOUND,
+ /// \brief A CaseStmt record.
+ STMT_CASE,
+ /// \brief A DefaultStmt record.
+ STMT_DEFAULT,
+ /// \brief A LabelStmt record.
+ STMT_LABEL,
+ /// \brief An IfStmt record.
+ STMT_IF,
+ /// \brief A SwitchStmt record.
+ STMT_SWITCH,
+ /// \brief A WhileStmt record.
+ STMT_WHILE,
+ /// \brief A DoStmt record.
+ STMT_DO,
+ /// \brief A ForStmt record.
+ STMT_FOR,
+ /// \brief A GotoStmt record.
+ STMT_GOTO,
+ /// \brief An IndirectGotoStmt record.
+ STMT_INDIRECT_GOTO,
+ /// \brief A ContinueStmt record.
+ STMT_CONTINUE,
+ /// \brief A BreakStmt record.
+ STMT_BREAK,
+ /// \brief A ReturnStmt record.
+ STMT_RETURN,
+ /// \brief A DeclStmt record.
+ STMT_DECL,
+ /// \brief An AsmStmt record.
+ STMT_ASM,
+ /// \brief A PredefinedExpr record.
+ EXPR_PREDEFINED,
+ /// \brief A DeclRefExpr record.
+ EXPR_DECL_REF,
+ /// \brief An IntegerLiteral record.
+ EXPR_INTEGER_LITERAL,
+ /// \brief A FloatingLiteral record.
+ EXPR_FLOATING_LITERAL,
+ /// \brief An ImaginaryLiteral record.
+ EXPR_IMAGINARY_LITERAL,
+ /// \brief A StringLiteral record.
+ EXPR_STRING_LITERAL,
+ /// \brief A CharacterLiteral record.
+ EXPR_CHARACTER_LITERAL,
+ /// \brief A ParenExpr record.
+ EXPR_PAREN,
+ /// \brief A ParenListExpr record.
+ EXPR_PAREN_LIST,
+ /// \brief A UnaryOperator record.
+ EXPR_UNARY_OPERATOR,
+ /// \brief An OffsetOfExpr record.
+ EXPR_OFFSETOF,
+ /// \brief A SizefAlignOfExpr record.
+ EXPR_SIZEOF_ALIGN_OF,
+ /// \brief An ArraySubscriptExpr record.
+ EXPR_ARRAY_SUBSCRIPT,
+ /// \brief A CallExpr record.
+ EXPR_CALL,
+ /// \brief A MemberExpr record.
+ EXPR_MEMBER,
+ /// \brief A BinaryOperator record.
+ EXPR_BINARY_OPERATOR,
+ /// \brief A CompoundAssignOperator record.
+ EXPR_COMPOUND_ASSIGN_OPERATOR,
+ /// \brief A ConditionOperator record.
+ EXPR_CONDITIONAL_OPERATOR,
+ /// \brief An ImplicitCastExpr record.
+ EXPR_IMPLICIT_CAST,
+ /// \brief A CStyleCastExpr record.
+ EXPR_CSTYLE_CAST,
+ /// \brief A CompoundLiteralExpr record.
+ EXPR_COMPOUND_LITERAL,
+ /// \brief An ExtVectorElementExpr record.
+ EXPR_EXT_VECTOR_ELEMENT,
+ /// \brief An InitListExpr record.
+ EXPR_INIT_LIST,
+ /// \brief A DesignatedInitExpr record.
+ EXPR_DESIGNATED_INIT,
+ /// \brief An ImplicitValueInitExpr record.
+ EXPR_IMPLICIT_VALUE_INIT,
+ /// \brief A VAArgExpr record.
+ EXPR_VA_ARG,
+ /// \brief An AddrLabelExpr record.
+ EXPR_ADDR_LABEL,
+ /// \brief A StmtExpr record.
+ EXPR_STMT,
+ /// \brief A ChooseExpr record.
+ EXPR_CHOOSE,
+ /// \brief A GNUNullExpr record.
+ EXPR_GNU_NULL,
+ /// \brief A ShuffleVectorExpr record.
+ EXPR_SHUFFLE_VECTOR,
+ /// \brief BlockExpr
+ EXPR_BLOCK,
+ /// \brief A GenericSelectionExpr record.
+ EXPR_GENERIC_SELECTION,
+ /// \brief A PseudoObjectExpr record.
+ EXPR_PSEUDO_OBJECT,
+ /// \brief An AtomicExpr record.
+ EXPR_ATOMIC,
+
+ // Objective-C
+
+ /// \brief An ObjCStringLiteral record.
+ EXPR_OBJC_STRING_LITERAL,
+
+ EXPR_OBJC_NUMERIC_LITERAL,
+ EXPR_OBJC_ARRAY_LITERAL,
+ EXPR_OBJC_DICTIONARY_LITERAL,
+
+
+ /// \brief An ObjCEncodeExpr record.
+ EXPR_OBJC_ENCODE,
+ /// \brief An ObjCSelectorExpr record.
+ EXPR_OBJC_SELECTOR_EXPR,
+ /// \brief An ObjCProtocolExpr record.
+ EXPR_OBJC_PROTOCOL_EXPR,
+ /// \brief An ObjCIvarRefExpr record.
+ EXPR_OBJC_IVAR_REF_EXPR,
+ /// \brief An ObjCPropertyRefExpr record.
+ EXPR_OBJC_PROPERTY_REF_EXPR,
+ /// \brief An ObjCSubscriptRefExpr record.
+ EXPR_OBJC_SUBSCRIPT_REF_EXPR,
+ /// \brief UNUSED
+ EXPR_OBJC_KVC_REF_EXPR,
+ /// \brief An ObjCMessageExpr record.
+ EXPR_OBJC_MESSAGE_EXPR,
+ /// \brief An ObjCIsa Expr record.
+ EXPR_OBJC_ISA,
+ /// \breif An ObjCIndirectCopyRestoreExpr record.
+ EXPR_OBJC_INDIRECT_COPY_RESTORE,
+
+ /// \brief An ObjCForCollectionStmt record.
+ STMT_OBJC_FOR_COLLECTION,
+ /// \brief An ObjCAtCatchStmt record.
+ STMT_OBJC_CATCH,
+ /// \brief An ObjCAtFinallyStmt record.
+ STMT_OBJC_FINALLY,
+ /// \brief An ObjCAtTryStmt record.
+ STMT_OBJC_AT_TRY,
+ /// \brief An ObjCAtSynchronizedStmt record.
+ STMT_OBJC_AT_SYNCHRONIZED,
+ /// \brief An ObjCAtThrowStmt record.
+ STMT_OBJC_AT_THROW,
+ /// \brief An ObjCAutoreleasePoolStmt record.
+ STMT_OBJC_AUTORELEASE_POOL,
+ /// \brief A ObjCBoolLiteralExpr record.
+ EXPR_OBJC_BOOL_LITERAL,
+
+ // C++
+
+ /// \brief A CXXCatchStmt record.
+ STMT_CXX_CATCH,
+ /// \brief A CXXTryStmt record.
+ STMT_CXX_TRY,
+ /// \brief A CXXForRangeStmt record.
+ STMT_CXX_FOR_RANGE,
+
+ /// \brief A CXXOperatorCallExpr record.
+ EXPR_CXX_OPERATOR_CALL,
+ /// \brief A CXXMemberCallExpr record.
+ EXPR_CXX_MEMBER_CALL,
+ /// \brief A CXXConstructExpr record.
+ EXPR_CXX_CONSTRUCT,
+ /// \brief A CXXTemporaryObjectExpr record.
+ EXPR_CXX_TEMPORARY_OBJECT,
+ /// \brief A CXXStaticCastExpr record.
+ EXPR_CXX_STATIC_CAST,
+ /// \brief A CXXDynamicCastExpr record.
+ EXPR_CXX_DYNAMIC_CAST,
+ /// \brief A CXXReinterpretCastExpr record.
+ EXPR_CXX_REINTERPRET_CAST,
+ /// \brief A CXXConstCastExpr record.
+ EXPR_CXX_CONST_CAST,
+ /// \brief A CXXFunctionalCastExpr record.
+ EXPR_CXX_FUNCTIONAL_CAST,
+ /// \brief A UserDefinedLiteral record.
+ EXPR_USER_DEFINED_LITERAL,
+ /// \brief A CXXBoolLiteralExpr record.
+ EXPR_CXX_BOOL_LITERAL,
+ EXPR_CXX_NULL_PTR_LITERAL, // CXXNullPtrLiteralExpr
+ EXPR_CXX_TYPEID_EXPR, // CXXTypeidExpr (of expr).
+ EXPR_CXX_TYPEID_TYPE, // CXXTypeidExpr (of type).
+ EXPR_CXX_THIS, // CXXThisExpr
+ EXPR_CXX_THROW, // CXXThrowExpr
+ EXPR_CXX_DEFAULT_ARG, // CXXDefaultArgExpr
+ EXPR_CXX_BIND_TEMPORARY, // CXXBindTemporaryExpr
+
+ EXPR_CXX_SCALAR_VALUE_INIT, // CXXScalarValueInitExpr
+ EXPR_CXX_NEW, // CXXNewExpr
+ EXPR_CXX_DELETE, // CXXDeleteExpr
+ EXPR_CXX_PSEUDO_DESTRUCTOR, // CXXPseudoDestructorExpr
+
+ EXPR_EXPR_WITH_CLEANUPS, // ExprWithCleanups
+
+ EXPR_CXX_DEPENDENT_SCOPE_MEMBER, // CXXDependentScopeMemberExpr
+ EXPR_CXX_DEPENDENT_SCOPE_DECL_REF, // DependentScopeDeclRefExpr
+ EXPR_CXX_UNRESOLVED_CONSTRUCT, // CXXUnresolvedConstructExpr
+ EXPR_CXX_UNRESOLVED_MEMBER, // UnresolvedMemberExpr
+ EXPR_CXX_UNRESOLVED_LOOKUP, // UnresolvedLookupExpr
+
+ EXPR_CXX_UNARY_TYPE_TRAIT, // UnaryTypeTraitExpr
+ EXPR_CXX_EXPRESSION_TRAIT, // ExpressionTraitExpr
+ EXPR_CXX_NOEXCEPT, // CXXNoexceptExpr
+
+ EXPR_OPAQUE_VALUE, // OpaqueValueExpr
+ EXPR_BINARY_CONDITIONAL_OPERATOR, // BinaryConditionalOperator
+ EXPR_BINARY_TYPE_TRAIT, // BinaryTypeTraitExpr
+ EXPR_TYPE_TRAIT, // TypeTraitExpr
+ EXPR_ARRAY_TYPE_TRAIT, // ArrayTypeTraitIntExpr
+
+ EXPR_PACK_EXPANSION, // PackExpansionExpr
+ EXPR_SIZEOF_PACK, // SizeOfPackExpr
+ EXPR_SUBST_NON_TYPE_TEMPLATE_PARM, // SubstNonTypeTemplateParmExpr
+ EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK,// SubstNonTypeTemplateParmPackExpr
+ EXPR_MATERIALIZE_TEMPORARY, // MaterializeTemporaryExpr
+
+ // CUDA
+ EXPR_CUDA_KERNEL_CALL, // CUDAKernelCallExpr
+
+ // OpenCL
+ EXPR_ASTYPE, // AsTypeExpr
+
+ // Microsoft
+ EXPR_CXX_UUIDOF_EXPR, // CXXUuidofExpr (of expr).
+ EXPR_CXX_UUIDOF_TYPE, // CXXUuidofExpr (of type).
+ STMT_SEH_EXCEPT, // SEHExceptStmt
+ STMT_SEH_FINALLY, // SEHFinallyStmt
+ STMT_SEH_TRY, // SEHTryStmt
+
+ // ARC
+ EXPR_OBJC_BRIDGED_CAST, // ObjCBridgedCastExpr
+
+ STMT_MS_DEPENDENT_EXISTS, // MSDependentExistsStmt
+ EXPR_LAMBDA // LambdaExpr
+ };
+
+ /// \brief The kinds of designators that can occur in a
+ /// DesignatedInitExpr.
+ enum DesignatorTypes {
+ /// \brief Field designator where only the field name is known.
+ DESIG_FIELD_NAME = 0,
+ /// \brief Field designator where the field has been resolved to
+ /// a declaration.
+ DESIG_FIELD_DECL = 1,
+ /// \brief Array designator.
+ DESIG_ARRAY = 2,
+ /// \brief GNU array range designator.
+ DESIG_ARRAY_RANGE = 3
+ };
+
+ /// \brief The different kinds of data that can occur in a
+ /// CtorInitializer.
+ enum CtorInitializerType {
+ CTOR_INITIALIZER_BASE,
+ CTOR_INITIALIZER_DELEGATING,
+ CTOR_INITIALIZER_MEMBER,
+ CTOR_INITIALIZER_INDIRECT_MEMBER
+ };
+
+ /// \brief Describes the redeclarations of a declaration.
+ struct LocalRedeclarationsInfo {
+ DeclID FirstID; // The ID of the first declaration
+ unsigned Offset; // Offset into the array of redeclaration chains.
+
+ friend bool operator<(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID < Y.FirstID;
+ }
+
+ friend bool operator>(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID > Y.FirstID;
+ }
+
+ friend bool operator<=(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID <= Y.FirstID;
+ }
+
+ friend bool operator>=(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID >= Y.FirstID;
+ }
+ };
+
+ /// \brief Describes the categories of an Objective-C class.
+ struct ObjCCategoriesInfo {
+ DeclID DefinitionID; // The ID of the definition
+ unsigned Offset; // Offset into the array of category lists.
+
+ friend bool operator<(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID < Y.DefinitionID;
+ }
+
+ friend bool operator>(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID > Y.DefinitionID;
+ }
+
+ friend bool operator<=(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID <= Y.DefinitionID;
+ }
+
+ friend bool operator>=(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID >= Y.DefinitionID;
+ }
+ };
+
+ /// @}
+ }
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
new file mode 100644
index 0000000..ab0d313
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTDeserializationListener.h
@@ -0,0 +1,60 @@
+//===- ASTDeserializationListener.h - Decl/Type PCH Read Events -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTDeserializationListener class, which is notified
+// by the ASTReader whenever a type or declaration is deserialized.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_AST_DESERIALIZATION_LISTENER_H
+#define LLVM_CLANG_FRONTEND_AST_DESERIALIZATION_LISTENER_H
+
+#include "clang/Serialization/ASTBitCodes.h"
+
+namespace clang {
+
+class Decl;
+class ASTReader;
+class QualType;
+class MacroDefinition;
+class Module;
+
+class ASTDeserializationListener {
+protected:
+ virtual ~ASTDeserializationListener();
+
+public:
+
+ /// \brief The ASTReader was initialized.
+ virtual void ReaderInitialized(ASTReader *Reader) { }
+
+ /// \brief An identifier was deserialized from the AST file.
+ virtual void IdentifierRead(serialization::IdentID ID,
+ IdentifierInfo *II) { }
+ /// \brief A type was deserialized from the AST file. The ID here has the
+ /// qualifier bits already removed, and T is guaranteed to be locally
+ /// unqualified.
+ virtual void TypeRead(serialization::TypeIdx Idx, QualType T) { }
+ /// \brief A decl was deserialized from the AST file.
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) { }
+ /// \brief A selector was read from the AST file.
+ virtual void SelectorRead(serialization::SelectorID iD, Selector Sel) { }
+ /// \brief A macro definition was read from the AST file.
+ virtual void MacroDefinitionRead(serialization::PreprocessedEntityID,
+ MacroDefinition *MD) { }
+ /// \brief A macro definition that had previously been deserialized
+ /// (and removed via IdentifierRead) has now been made visible.
+ virtual void MacroVisible(IdentifierInfo *II) { }
+ /// \brief A module definition was read from the AST file.
+ virtual void ModuleRead(serialization::SubmoduleID ID, Module *Mod) { }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
new file mode 100644
index 0000000..9baaf4b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTReader.h
@@ -0,0 +1,1519 @@
+//===--- ASTReader.h - AST File Reader --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTReader class, which reads AST files.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_FRONTEND_AST_READER_H
+#define LLVM_CLANG_FRONTEND_AST_READER_H
+
+#include "clang/Serialization/ASTBitCodes.h"
+#include "clang/Serialization/ContinuousRangeMap.h"
+#include "clang/Serialization/Module.h"
+#include "clang/Serialization/ModuleManager.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemOptions.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Support/DataTypes.h"
+#include <deque>
+#include <map>
+#include <string>
+#include <utility>
+#include <vector>
+
+namespace llvm {
+ class MemoryBuffer;
+}
+
+namespace clang {
+
+class AddrLabelExpr;
+class ASTConsumer;
+class ASTContext;
+class ASTIdentifierIterator;
+class ASTUnit; // FIXME: Layering violation and egregious hack.
+class Attr;
+class Decl;
+class DeclContext;
+class NestedNameSpecifier;
+class CXXBaseSpecifier;
+class CXXConstructorDecl;
+class CXXCtorInitializer;
+class GotoStmt;
+class MacroDefinition;
+class NamedDecl;
+class OpaqueValueExpr;
+class Preprocessor;
+class Sema;
+class SwitchCase;
+class ASTDeserializationListener;
+class ASTWriter;
+class ASTReader;
+class ASTDeclReader;
+class ASTStmtReader;
+class TypeLocReader;
+struct HeaderFileInfo;
+class VersionTuple;
+
+struct PCHPredefinesBlock {
+ /// \brief The file ID for this predefines buffer in a PCH file.
+ FileID BufferID;
+
+ /// \brief This predefines buffer in a PCH file.
+ StringRef Data;
+};
+typedef SmallVector<PCHPredefinesBlock, 2> PCHPredefinesBlocks;
+
+/// \brief Abstract interface for callback invocations by the ASTReader.
+///
+/// While reading an AST file, the ASTReader will call the methods of the
+/// listener to pass on specific information. Some of the listener methods can
+/// return true to indicate to the ASTReader that the information (and
+/// consequently the AST file) is invalid.
+class ASTReaderListener {
+public:
+ virtual ~ASTReaderListener();
+
+ /// \brief Receives the language options.
+ ///
+ /// \returns true to indicate the options are invalid or false otherwise.
+ virtual bool ReadLanguageOptions(const LangOptions &LangOpts) {
+ return false;
+ }
+
+ /// \brief Receives the target triple.
+ ///
+ /// \returns true to indicate the target triple is invalid or false otherwise.
+ virtual bool ReadTargetTriple(StringRef Triple) {
+ return false;
+ }
+
+ /// \brief Receives the contents of the predefines buffer.
+ ///
+ /// \param Buffers Information about the predefines buffers.
+ ///
+ /// \param OriginalFileName The original file name for the AST file, which
+ /// will appear as an entry in the predefines buffer.
+ ///
+ /// \param SuggestedPredefines If necessary, additional definitions are added
+ /// here.
+ ///
+ /// \returns true to indicate the predefines are invalid or false otherwise.
+ virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
+ StringRef OriginalFileName,
+ std::string &SuggestedPredefines,
+ FileManager &FileMgr) {
+ return false;
+ }
+
+ /// \brief Receives a HeaderFileInfo entry.
+ virtual void ReadHeaderFileInfo(const HeaderFileInfo &HFI, unsigned ID) {}
+
+ /// \brief Receives __COUNTER__ value.
+ virtual void ReadCounter(unsigned Value) {}
+};
+
+/// \brief ASTReaderListener implementation to validate the information of
+/// the PCH file against an initialized Preprocessor.
+class PCHValidator : public ASTReaderListener {
+ Preprocessor &PP;
+ ASTReader &Reader;
+
+ unsigned NumHeaderInfos;
+
+public:
+ PCHValidator(Preprocessor &PP, ASTReader &Reader)
+ : PP(PP), Reader(Reader), NumHeaderInfos(0) {}
+
+ virtual bool ReadLanguageOptions(const LangOptions &LangOpts);
+ virtual bool ReadTargetTriple(StringRef Triple);
+ virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
+ StringRef OriginalFileName,
+ std::string &SuggestedPredefines,
+ FileManager &FileMgr);
+ virtual void ReadHeaderFileInfo(const HeaderFileInfo &HFI, unsigned ID);
+ virtual void ReadCounter(unsigned Value);
+
+private:
+ void Error(const char *Msg);
+};
+
+namespace serialization {
+
+class ReadMethodPoolVisitor;
+
+namespace reader {
+ class ASTIdentifierLookupTrait;
+}
+
+} // end namespace serialization
+
+/// \brief Reads an AST files chain containing the contents of a translation
+/// unit.
+///
+/// The ASTReader class reads bitstreams (produced by the ASTWriter
+/// class) containing the serialized representation of a given
+/// abstract syntax tree and its supporting data structures. An
+/// instance of the ASTReader can be attached to an ASTContext object,
+/// which will provide access to the contents of the AST files.
+///
+/// The AST reader provides lazy de-serialization of declarations, as
+/// required when traversing the AST. Only those AST nodes that are
+/// actually required will be de-serialized.
+class ASTReader
+ : public ExternalPreprocessorSource,
+ public ExternalPreprocessingRecordSource,
+ public ExternalHeaderFileInfoSource,
+ public ExternalSemaSource,
+ public IdentifierInfoLookup,
+ public ExternalIdentifierLookup,
+ public ExternalSLocEntrySource
+{
+public:
+ enum ASTReadResult { Success, Failure, IgnorePCH };
+ /// \brief Types of AST files.
+ friend class PCHValidator;
+ friend class ASTDeclReader;
+ friend class ASTStmtReader;
+ friend class ASTIdentifierIterator;
+ friend class serialization::reader::ASTIdentifierLookupTrait;
+ friend class TypeLocReader;
+ friend class ASTWriter;
+ friend class ASTUnit; // ASTUnit needs to remap source locations.
+ friend class serialization::ReadMethodPoolVisitor;
+
+ typedef serialization::ModuleFile ModuleFile;
+ typedef serialization::ModuleKind ModuleKind;
+ typedef serialization::ModuleManager ModuleManager;
+
+ typedef ModuleManager::ModuleIterator ModuleIterator;
+ typedef ModuleManager::ModuleConstIterator ModuleConstIterator;
+ typedef ModuleManager::ModuleReverseIterator ModuleReverseIterator;
+
+private:
+ /// \brief The receiver of some callbacks invoked by ASTReader.
+ OwningPtr<ASTReaderListener> Listener;
+
+ /// \brief The receiver of deserialization events.
+ ASTDeserializationListener *DeserializationListener;
+
+ SourceManager &SourceMgr;
+ FileManager &FileMgr;
+ DiagnosticsEngine &Diags;
+
+ /// \brief The semantic analysis object that will be processing the
+ /// AST files and the translation unit that uses it.
+ Sema *SemaObj;
+
+ /// \brief The preprocessor that will be loading the source file.
+ Preprocessor &PP;
+
+ /// \brief The AST context into which we'll read the AST files.
+ ASTContext &Context;
+
+ /// \brief The AST consumer.
+ ASTConsumer *Consumer;
+
+ /// \brief The module manager which manages modules and their dependencies
+ ModuleManager ModuleMgr;
+
+ /// \brief A map of global bit offsets to the module that stores entities
+ /// at those bit offsets.
+ ContinuousRangeMap<uint64_t, ModuleFile*, 4> GlobalBitOffsetsMap;
+
+ /// \brief A map of negated SLocEntryIDs to the modules containing them.
+ ContinuousRangeMap<unsigned, ModuleFile*, 64> GlobalSLocEntryMap;
+
+ typedef ContinuousRangeMap<unsigned, ModuleFile*, 64> GlobalSLocOffsetMapType;
+
+ /// \brief A map of reversed (SourceManager::MaxLoadedOffset - SLocOffset)
+ /// SourceLocation offsets to the modules containing them.
+ GlobalSLocOffsetMapType GlobalSLocOffsetMap;
+
+ /// \brief Types that have already been loaded from the chain.
+ ///
+ /// When the pointer at index I is non-NULL, the type with
+ /// ID = (I + 1) << FastQual::Width has already been loaded
+ std::vector<QualType> TypesLoaded;
+
+ typedef ContinuousRangeMap<serialization::TypeID, ModuleFile *, 4>
+ GlobalTypeMapType;
+
+ /// \brief Mapping from global type IDs to the module in which the
+ /// type resides along with the offset that should be added to the
+ /// global type ID to produce a local ID.
+ GlobalTypeMapType GlobalTypeMap;
+
+ /// \brief Declarations that have already been loaded from the chain.
+ ///
+ /// When the pointer at index I is non-NULL, the declaration with ID
+ /// = I + 1 has already been loaded.
+ std::vector<Decl *> DeclsLoaded;
+
+ typedef ContinuousRangeMap<serialization::DeclID, ModuleFile *, 4>
+ GlobalDeclMapType;
+
+ /// \brief Mapping from global declaration IDs to the module in which the
+ /// declaration resides.
+ GlobalDeclMapType GlobalDeclMap;
+
+ typedef std::pair<ModuleFile *, uint64_t> FileOffset;
+ typedef SmallVector<FileOffset, 2> FileOffsetsTy;
+ typedef llvm::DenseMap<serialization::DeclID, FileOffsetsTy>
+ DeclUpdateOffsetsMap;
+
+ /// \brief Declarations that have modifications residing in a later file
+ /// in the chain.
+ DeclUpdateOffsetsMap DeclUpdateOffsets;
+
+ struct ReplacedDeclInfo {
+ ModuleFile *Mod;
+ uint64_t Offset;
+ unsigned RawLoc;
+
+ ReplacedDeclInfo() : Mod(0), Offset(0), RawLoc(0) {}
+ ReplacedDeclInfo(ModuleFile *Mod, uint64_t Offset, unsigned RawLoc)
+ : Mod(Mod), Offset(Offset), RawLoc(RawLoc) {}
+ };
+
+ typedef llvm::DenseMap<serialization::DeclID, ReplacedDeclInfo>
+ DeclReplacementMap;
+ /// \brief Declarations that have been replaced in a later file in the chain.
+ DeclReplacementMap ReplacedDecls;
+
+ struct FileDeclsInfo {
+ ModuleFile *Mod;
+ ArrayRef<serialization::LocalDeclID> Decls;
+
+ FileDeclsInfo() : Mod(0) {}
+ FileDeclsInfo(ModuleFile *Mod, ArrayRef<serialization::LocalDeclID> Decls)
+ : Mod(Mod), Decls(Decls) {}
+ };
+
+ /// \brief Map from a FileID to the file-level declarations that it contains.
+ llvm::DenseMap<FileID, FileDeclsInfo> FileDeclIDs;
+
+ // Updates for visible decls can occur for other contexts than just the
+ // TU, and when we read those update records, the actual context will not
+ // be available yet (unless it's the TU), so have this pending map using the
+ // ID as a key. It will be realized when the context is actually loaded.
+ typedef SmallVector<std::pair<void *, ModuleFile*>, 1> DeclContextVisibleUpdates;
+ typedef llvm::DenseMap<serialization::DeclID, DeclContextVisibleUpdates>
+ DeclContextVisibleUpdatesPending;
+
+ /// \brief Updates to the visible declarations of declaration contexts that
+ /// haven't been loaded yet.
+ DeclContextVisibleUpdatesPending PendingVisibleUpdates;
+
+ /// \brief The set of C++ or Objective-C classes that have forward
+ /// declarations that have not yet been linked to their definitions.
+ llvm::SmallPtrSet<Decl *, 4> PendingDefinitions;
+
+ /// \brief Read the records that describe the contents of declcontexts.
+ bool ReadDeclContextStorage(ModuleFile &M,
+ llvm::BitstreamCursor &Cursor,
+ const std::pair<uint64_t, uint64_t> &Offsets,
+ serialization::DeclContextInfo &Info);
+
+ /// \brief A vector containing identifiers that have already been
+ /// loaded.
+ ///
+ /// If the pointer at index I is non-NULL, then it refers to the
+ /// IdentifierInfo for the identifier with ID=I+1 that has already
+ /// been loaded.
+ std::vector<IdentifierInfo *> IdentifiersLoaded;
+
+ typedef ContinuousRangeMap<serialization::IdentID, ModuleFile *, 4>
+ GlobalIdentifierMapType;
+
+ /// \brief Mapping from global identifer IDs to the module in which the
+ /// identifier resides along with the offset that should be added to the
+ /// global identifier ID to produce a local ID.
+ GlobalIdentifierMapType GlobalIdentifierMap;
+
+ /// \brief A vector containing submodules that have already been loaded.
+ ///
+ /// This vector is indexed by the Submodule ID (-1). NULL submodule entries
+ /// indicate that the particular submodule ID has not yet been loaded.
+ SmallVector<Module *, 2> SubmodulesLoaded;
+
+ typedef ContinuousRangeMap<serialization::SubmoduleID, ModuleFile *, 4>
+ GlobalSubmoduleMapType;
+
+ /// \brief Mapping from global submodule IDs to the module file in which the
+ /// submodule resides along with the offset that should be added to the
+ /// global submodule ID to produce a local ID.
+ GlobalSubmoduleMapType GlobalSubmoduleMap;
+
+ /// \brief A set of hidden declarations.
+ typedef llvm::SmallVector<llvm::PointerUnion<Decl *, IdentifierInfo *>, 2>
+ HiddenNames;
+
+ typedef llvm::DenseMap<Module *, HiddenNames> HiddenNamesMapType;
+
+ /// \brief A mapping from each of the hidden submodules to the deserialized
+ /// declarations in that submodule that could be made visible.
+ HiddenNamesMapType HiddenNamesMap;
+
+
+ /// \brief A module import or export that hasn't yet been resolved.
+ struct UnresolvedModuleImportExport {
+ /// \brief The file in which this module resides.
+ ModuleFile *File;
+
+ /// \brief The module that is importing or exporting.
+ Module *Mod;
+
+ /// \brief The local ID of the module that is being exported.
+ unsigned ID;
+
+ /// \brief Whether this is an import (vs. an export).
+ unsigned IsImport : 1;
+
+ /// \brief Whether this is a wildcard export.
+ unsigned IsWildcard : 1;
+ };
+
+ /// \brief The set of module imports and exports that still need to be
+ /// resolved.
+ llvm::SmallVector<UnresolvedModuleImportExport, 2>
+ UnresolvedModuleImportExports;
+
+ /// \brief A vector containing selectors that have already been loaded.
+ ///
+ /// This vector is indexed by the Selector ID (-1). NULL selector
+ /// entries indicate that the particular selector ID has not yet
+ /// been loaded.
+ SmallVector<Selector, 16> SelectorsLoaded;
+
+ typedef ContinuousRangeMap<serialization::SelectorID, ModuleFile *, 4>
+ GlobalSelectorMapType;
+
+ /// \brief Mapping from global selector IDs to the module in which the
+ /// selector resides along with the offset that should be added to the
+ /// global selector ID to produce a local ID.
+ GlobalSelectorMapType GlobalSelectorMap;
+
+ /// \brief The generation number of the last time we loaded data from the
+ /// global method pool for this selector.
+ llvm::DenseMap<Selector, unsigned> SelectorGeneration;
+
+ /// \brief Mapping from identifiers that represent macros whose definitions
+ /// have not yet been deserialized to the global offset where the macro
+ /// record resides.
+ llvm::DenseMap<IdentifierInfo *, uint64_t> UnreadMacroRecordOffsets;
+
+ typedef ContinuousRangeMap<unsigned, ModuleFile *, 4>
+ GlobalPreprocessedEntityMapType;
+
+ /// \brief Mapping from global preprocessing entity IDs to the module in
+ /// which the preprocessed entity resides along with the offset that should be
+ /// added to the global preprocessing entitiy ID to produce a local ID.
+ GlobalPreprocessedEntityMapType GlobalPreprocessedEntityMap;
+
+ /// \name CodeGen-relevant special data
+ /// \brief Fields containing data that is relevant to CodeGen.
+ //@{
+
+ /// \brief The IDs of all declarations that fulfill the criteria of
+ /// "interesting" decls.
+ ///
+ /// This contains the data loaded from all EXTERNAL_DEFINITIONS blocks in the
+ /// chain. The referenced declarations are deserialized and passed to the
+ /// consumer eagerly.
+ SmallVector<uint64_t, 16> ExternalDefinitions;
+
+ /// \brief The IDs of all tentative definitions stored in the the chain.
+ ///
+ /// Sema keeps track of all tentative definitions in a TU because it has to
+ /// complete them and pass them on to CodeGen. Thus, tentative definitions in
+ /// the PCH chain must be eagerly deserialized.
+ SmallVector<uint64_t, 16> TentativeDefinitions;
+
+ /// \brief The IDs of all CXXRecordDecls stored in the chain whose VTables are
+ /// used.
+ ///
+ /// CodeGen has to emit VTables for these records, so they have to be eagerly
+ /// deserialized.
+ SmallVector<uint64_t, 64> VTableUses;
+
+ /// \brief A snapshot of the pending instantiations in the chain.
+ ///
+ /// This record tracks the instantiations that Sema has to perform at the
+ /// end of the TU. It consists of a pair of values for every pending
+ /// instantiation where the first value is the ID of the decl and the second
+ /// is the instantiation location.
+ SmallVector<uint64_t, 64> PendingInstantiations;
+
+ //@}
+
+ /// \name DiagnosticsEngine-relevant special data
+ /// \brief Fields containing data that is used for generating diagnostics
+ //@{
+
+ /// \brief A snapshot of Sema's unused file-scoped variable tracking, for
+ /// generating warnings.
+ SmallVector<uint64_t, 16> UnusedFileScopedDecls;
+
+ /// \brief A list of all the delegating constructors we've seen, to diagnose
+ /// cycles.
+ SmallVector<uint64_t, 4> DelegatingCtorDecls;
+
+ /// \brief Method selectors used in a @selector expression. Used for
+ /// implementation of -Wselector.
+ SmallVector<uint64_t, 64> ReferencedSelectorsData;
+
+ /// \brief A snapshot of Sema's weak undeclared identifier tracking, for
+ /// generating warnings.
+ SmallVector<uint64_t, 64> WeakUndeclaredIdentifiers;
+
+ /// \brief The IDs of type aliases for ext_vectors that exist in the chain.
+ ///
+ /// Used by Sema for finding sugared names for ext_vectors in diagnostics.
+ SmallVector<uint64_t, 4> ExtVectorDecls;
+
+ //@}
+
+ /// \name Sema-relevant special data
+ /// \brief Fields containing data that is used for semantic analysis
+ //@{
+
+ /// \brief The IDs of all locally scoped external decls in the chain.
+ ///
+ /// Sema tracks these to validate that the types are consistent across all
+ /// local external declarations.
+ SmallVector<uint64_t, 16> LocallyScopedExternalDecls;
+
+ /// \brief The IDs of all dynamic class declarations in the chain.
+ ///
+ /// Sema tracks these because it checks for the key functions being defined
+ /// at the end of the TU, in which case it directs CodeGen to emit the VTable.
+ SmallVector<uint64_t, 16> DynamicClasses;
+
+ /// \brief The IDs of the declarations Sema stores directly.
+ ///
+ /// Sema tracks a few important decls, such as namespace std, directly.
+ SmallVector<uint64_t, 4> SemaDeclRefs;
+
+ /// \brief The IDs of the types ASTContext stores directly.
+ ///
+ /// The AST context tracks a few important types, such as va_list, directly.
+ SmallVector<uint64_t, 16> SpecialTypes;
+
+ /// \brief The IDs of CUDA-specific declarations ASTContext stores directly.
+ ///
+ /// The AST context tracks a few important decls, currently cudaConfigureCall,
+ /// directly.
+ SmallVector<uint64_t, 2> CUDASpecialDeclRefs;
+
+ /// \brief The floating point pragma option settings.
+ SmallVector<uint64_t, 1> FPPragmaOptions;
+
+ /// \brief The OpenCL extension settings.
+ SmallVector<uint64_t, 1> OpenCLExtensions;
+
+ /// \brief A list of the namespaces we've seen.
+ SmallVector<uint64_t, 4> KnownNamespaces;
+
+ /// \brief A list of modules that were imported by precompiled headers or
+ /// any other non-module AST file.
+ SmallVector<serialization::SubmoduleID, 2> ImportedModules;
+ //@}
+
+ /// \brief The original file name that was used to build the primary AST file,
+ /// which may have been modified for relocatable-pch support.
+ std::string OriginalFileName;
+
+ /// \brief The actual original file name that was used to build the primary
+ /// AST file.
+ std::string ActualOriginalFileName;
+
+ /// \brief The file ID for the original file that was used to build the
+ /// primary AST file.
+ FileID OriginalFileID;
+
+ /// \brief The directory that the PCH was originally created in. Used to
+ /// allow resolving headers even after headers+PCH was moved to a new path.
+ std::string OriginalDir;
+
+ /// \brief The directory that the PCH we are reading is stored in.
+ std::string CurrentDir;
+
+ /// \brief Whether this precompiled header is a relocatable PCH file.
+ bool RelocatablePCH;
+
+ /// \brief The system include root to be used when loading the
+ /// precompiled header.
+ std::string isysroot;
+
+ /// \brief Whether to disable the normal validation performed on precompiled
+ /// headers when they are loaded.
+ bool DisableValidation;
+
+ /// \brief Whether to disable the use of stat caches in AST files.
+ bool DisableStatCache;
+
+ /// \brief Whether to accept an AST file with compiler errors.
+ bool AllowASTWithCompilerErrors;
+
+ /// \brief The current "generation" of the module file import stack, which
+ /// indicates how many separate module file load operations have occurred.
+ unsigned CurrentGeneration;
+
+ /// \brief Mapping from switch-case IDs in the chain to switch-case statements
+ ///
+ /// Statements usually don't have IDs, but switch cases need them, so that the
+ /// switch statement can refer to them.
+ std::map<unsigned, SwitchCase *> SwitchCaseStmts;
+
+ /// \brief The number of stat() calls that hit/missed the stat
+ /// cache.
+ unsigned NumStatHits, NumStatMisses;
+
+ /// \brief The number of source location entries de-serialized from
+ /// the PCH file.
+ unsigned NumSLocEntriesRead;
+
+ /// \brief The number of source location entries in the chain.
+ unsigned TotalNumSLocEntries;
+
+ /// \brief The number of statements (and expressions) de-serialized
+ /// from the chain.
+ unsigned NumStatementsRead;
+
+ /// \brief The total number of statements (and expressions) stored
+ /// in the chain.
+ unsigned TotalNumStatements;
+
+ /// \brief The number of macros de-serialized from the chain.
+ unsigned NumMacrosRead;
+
+ /// \brief The total number of macros stored in the chain.
+ unsigned TotalNumMacros;
+
+ /// \brief The number of selectors that have been read.
+ unsigned NumSelectorsRead;
+
+ /// \brief The number of method pool entries that have been read.
+ unsigned NumMethodPoolEntriesRead;
+
+ /// \brief The number of times we have looked up a selector in the method
+ /// pool and not found anything interesting.
+ unsigned NumMethodPoolMisses;
+
+ /// \brief The total number of method pool entries in the selector table.
+ unsigned TotalNumMethodPoolEntries;
+
+ /// Number of lexical decl contexts read/total.
+ unsigned NumLexicalDeclContextsRead, TotalLexicalDeclContexts;
+
+ /// Number of visible decl contexts read/total.
+ unsigned NumVisibleDeclContextsRead, TotalVisibleDeclContexts;
+
+ /// Total size of modules, in bits, currently loaded
+ uint64_t TotalModulesSizeInBits;
+
+ /// \brief Number of Decl/types that are currently deserializing.
+ unsigned NumCurrentElementsDeserializing;
+
+ /// \brief Set true while we are in the process of passing deserialized
+ /// "interesting" decls to consumer inside FinishedDeserializing().
+ /// This is used as a guard to avoid recursively repeating the process of
+ /// passing decls to consumer.
+ bool PassingDeclsToConsumer;
+
+ /// Number of CXX base specifiers currently loaded
+ unsigned NumCXXBaseSpecifiersLoaded;
+
+ /// \brief An IdentifierInfo that has been loaded but whose top-level
+ /// declarations of the same name have not (yet) been loaded.
+ struct PendingIdentifierInfo {
+ IdentifierInfo *II;
+ SmallVector<uint32_t, 4> DeclIDs;
+ };
+
+ /// \brief The set of identifiers that were read while the AST reader was
+ /// (recursively) loading declarations.
+ ///
+ /// The declarations on the identifier chain for these identifiers will be
+ /// loaded once the recursive loading has completed.
+ std::deque<PendingIdentifierInfo> PendingIdentifierInfos;
+
+ /// \brief The generation number of each identifier, which keeps track of
+ /// the last time we loaded information about this identifier.
+ llvm::DenseMap<IdentifierInfo *, unsigned> IdentifierGeneration;
+
+ /// \brief Contains declarations and definitions that will be
+ /// "interesting" to the ASTConsumer, when we get that AST consumer.
+ ///
+ /// "Interesting" declarations are those that have data that may
+ /// need to be emitted, such as inline function definitions or
+ /// Objective-C protocols.
+ std::deque<Decl *> InterestingDecls;
+
+ /// \brief The set of redeclarable declaraations that have been deserialized
+ /// since the last time the declaration chains were linked.
+ llvm::SmallPtrSet<Decl *, 16> RedeclsDeserialized;
+
+ /// \brief The list of redeclaration chains that still need to be
+ /// reconstructed.
+ ///
+ /// Each element is the global declaration ID of the first declaration in
+ /// the chain. Elements in this vector should be unique; use
+ /// PendingDeclChainsKnown to ensure uniqueness.
+ llvm::SmallVector<serialization::DeclID, 16> PendingDeclChains;
+
+ /// \brief Keeps track of the elements added to PendingDeclChains.
+ llvm::SmallSet<serialization::DeclID, 16> PendingDeclChainsKnown;
+
+ /// \brief The set of Objective-C categories that have been deserialized
+ /// since the last time the declaration chains were linked.
+ llvm::SmallPtrSet<ObjCCategoryDecl *, 16> CategoriesDeserialized;
+
+ /// \brief The set of Objective-C class definitions that have already been
+ /// loaded, for which we will need to check for categories whenever a new
+ /// module is loaded.
+ llvm::SmallVector<ObjCInterfaceDecl *, 16> ObjCClassesLoaded;
+
+ typedef llvm::DenseMap<Decl *, llvm::SmallVector<serialization::DeclID, 2> >
+ MergedDeclsMap;
+
+ /// \brief A mapping from canonical declarations to the set of additional
+ /// (global, previously-canonical) declaration IDs that have been merged with
+ /// that canonical declaration.
+ MergedDeclsMap MergedDecls;
+
+ typedef llvm::DenseMap<serialization::GlobalDeclID,
+ llvm::SmallVector<serialization::DeclID, 2> >
+ StoredMergedDeclsMap;
+
+ /// \brief A mapping from canonical declaration IDs to the set of additional
+ /// declaration IDs that have been merged with that canonical declaration.
+ ///
+ /// This is the deserialized representation of the entries in MergedDecls.
+ /// When we query entries in MergedDecls, they will be augmented with entries
+ /// from StoredMergedDecls.
+ StoredMergedDeclsMap StoredMergedDecls;
+
+ /// \brief Combine the stored merged declarations for the given canonical
+ /// declaration into the set of merged declarations.
+ ///
+ /// \returns An iterator into MergedDecls that corresponds to the position of
+ /// the given canonical declaration.
+ MergedDeclsMap::iterator
+ combineStoredMergedDecls(Decl *Canon, serialization::GlobalDeclID CanonID);
+
+ /// \brief Ready to load the previous declaration of the given Decl.
+ void loadAndAttachPreviousDecl(Decl *D, serialization::DeclID ID);
+
+ /// \brief When reading a Stmt tree, Stmt operands are placed in this stack.
+ SmallVector<Stmt *, 16> StmtStack;
+
+ /// \brief What kind of records we are reading.
+ enum ReadingKind {
+ Read_Decl, Read_Type, Read_Stmt
+ };
+
+ /// \brief What kind of records we are reading.
+ ReadingKind ReadingKind;
+
+ /// \brief RAII object to change the reading kind.
+ class ReadingKindTracker {
+ ASTReader &Reader;
+ enum ReadingKind PrevKind;
+
+ ReadingKindTracker(const ReadingKindTracker&); // do not implement
+ ReadingKindTracker &operator=(const ReadingKindTracker&);// do not implement
+
+ public:
+ ReadingKindTracker(enum ReadingKind newKind, ASTReader &reader)
+ : Reader(reader), PrevKind(Reader.ReadingKind) {
+ Reader.ReadingKind = newKind;
+ }
+
+ ~ReadingKindTracker() { Reader.ReadingKind = PrevKind; }
+ };
+
+ /// \brief All predefines buffers in the chain, to be treated as if
+ /// concatenated.
+ PCHPredefinesBlocks PCHPredefinesBuffers;
+
+ /// \brief Suggested contents of the predefines buffer, after this
+ /// PCH file has been processed.
+ ///
+ /// In most cases, this string will be empty, because the predefines
+ /// buffer computed to build the PCH file will be identical to the
+ /// predefines buffer computed from the command line. However, when
+ /// there are differences that the PCH reader can work around, this
+ /// predefines buffer may contain additional definitions.
+ std::string SuggestedPredefines;
+
+ /// \brief Reads a statement from the specified cursor.
+ Stmt *ReadStmtFromStream(ModuleFile &F);
+
+ /// \brief Get a FileEntry out of stored-in-PCH filename, making sure we take
+ /// into account all the necessary relocations.
+ const FileEntry *getFileEntry(StringRef filename);
+
+ void MaybeAddSystemRootToFilename(std::string &Filename);
+
+ ASTReadResult ReadASTCore(StringRef FileName, ModuleKind Type,
+ ModuleFile *ImportedBy);
+ ASTReadResult ReadASTBlock(ModuleFile &F);
+ bool CheckPredefinesBuffers();
+ bool ParseLineTable(ModuleFile &F, SmallVectorImpl<uint64_t> &Record);
+ ASTReadResult ReadSourceManagerBlock(ModuleFile &F);
+ ASTReadResult ReadSLocEntryRecord(int ID);
+ llvm::BitstreamCursor &SLocCursorForID(int ID);
+ SourceLocation getImportLocation(ModuleFile *F);
+ ASTReadResult ReadSubmoduleBlock(ModuleFile &F);
+ bool ParseLanguageOptions(const SmallVectorImpl<uint64_t> &Record);
+
+ struct RecordLocation {
+ RecordLocation(ModuleFile *M, uint64_t O)
+ : F(M), Offset(O) {}
+ ModuleFile *F;
+ uint64_t Offset;
+ };
+
+ QualType readTypeRecord(unsigned Index);
+ RecordLocation TypeCursorForIndex(unsigned Index);
+ void LoadedDecl(unsigned Index, Decl *D);
+ Decl *ReadDeclRecord(serialization::DeclID ID);
+ RecordLocation DeclCursorForID(serialization::DeclID ID,
+ unsigned &RawLocation);
+ void loadDeclUpdateRecords(serialization::DeclID ID, Decl *D);
+ void loadPendingDeclChain(serialization::GlobalDeclID ID);
+ void loadObjCCategories(serialization::GlobalDeclID ID, ObjCInterfaceDecl *D,
+ unsigned PreviousGeneration = 0);
+
+ RecordLocation getLocalBitOffset(uint64_t GlobalOffset);
+ uint64_t getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset);
+
+ /// \brief Returns the first preprocessed entity ID that ends after \arg BLoc.
+ serialization::PreprocessedEntityID
+ findBeginPreprocessedEntity(SourceLocation BLoc) const;
+
+ /// \brief Returns the first preprocessed entity ID that begins after \arg
+ /// ELoc.
+ serialization::PreprocessedEntityID
+ findEndPreprocessedEntity(SourceLocation ELoc) const;
+
+ /// \brief \arg SLocMapI points at a chunk of a module that contains no
+ /// preprocessed entities or the entities it contains are not the ones we are
+ /// looking for. Find the next module that contains entities and return the ID
+ /// of the first entry.
+ serialization::PreprocessedEntityID
+ findNextPreprocessedEntity(
+ GlobalSLocOffsetMapType::const_iterator SLocMapI) const;
+
+ /// \brief Returns (ModuleFile, Local index) pair for \arg GlobalIndex of a
+ /// preprocessed entity.
+ std::pair<ModuleFile *, unsigned>
+ getModulePreprocessedEntity(unsigned GlobalIndex);
+
+ void PassInterestingDeclsToConsumer();
+ void PassInterestingDeclToConsumer(Decl *D);
+
+ void finishPendingActions();
+
+ /// \brief Produce an error diagnostic and return true.
+ ///
+ /// This routine should only be used for fatal errors that have to
+ /// do with non-routine failures (e.g., corrupted AST file).
+ void Error(StringRef Msg);
+ void Error(unsigned DiagID, StringRef Arg1 = StringRef(),
+ StringRef Arg2 = StringRef());
+
+ ASTReader(const ASTReader&); // do not implement
+ ASTReader &operator=(const ASTReader &); // do not implement
+public:
+ typedef SmallVector<uint64_t, 64> RecordData;
+
+ /// \brief Load the AST file and validate its contents against the given
+ /// Preprocessor.
+ ///
+ /// \param PP the preprocessor associated with the context in which this
+ /// precompiled header will be loaded.
+ ///
+ /// \param Context the AST context that this precompiled header will be
+ /// loaded into.
+ ///
+ /// \param isysroot If non-NULL, the system include path specified by the
+ /// user. This is only used with relocatable PCH files. If non-NULL,
+ /// a relocatable PCH file will use the default path "/".
+ ///
+ /// \param DisableValidation If true, the AST reader will suppress most
+ /// of its regular consistency checking, allowing the use of precompiled
+ /// headers that cannot be determined to be compatible.
+ ///
+ /// \param DisableStatCache If true, the AST reader will ignore the
+ /// stat cache in the AST files. This performance pessimization can
+ /// help when an AST file is being used in cases where the
+ /// underlying files in the file system may have changed, but
+ /// parsing should still continue.
+ ///
+ /// \param AllowASTWithCompilerErrors If true, the AST reader will accept an
+ /// AST file the was created out of an AST with compiler errors,
+ /// otherwise it will reject it.
+ ASTReader(Preprocessor &PP, ASTContext &Context, StringRef isysroot = "",
+ bool DisableValidation = false, bool DisableStatCache = false,
+ bool AllowASTWithCompilerErrors = false);
+
+ ~ASTReader();
+
+ SourceManager &getSourceManager() const { return SourceMgr; }
+
+ /// \brief Load the AST file designated by the given file name.
+ ASTReadResult ReadAST(const std::string &FileName, ModuleKind Type);
+
+ /// \brief Checks that no file that is stored in PCH is out-of-sync with
+ /// the actual file in the file system.
+ ASTReadResult validateFileEntries(ModuleFile &M);
+
+ /// \brief Make the entities in the given module and any of its (non-explicit)
+ /// submodules visible to name lookup.
+ ///
+ /// \param Mod The module whose names should be made visible.
+ ///
+ /// \param Visibility The level of visibility to give the names in the module.
+ /// Visibility can only be increased over time.
+ void makeModuleVisible(Module *Mod,
+ Module::NameVisibilityKind NameVisibility);
+
+ /// \brief Make the names within this set of hidden names visible.
+ void makeNamesVisible(const HiddenNames &Names);
+
+ /// \brief Set the AST callbacks listener.
+ void setListener(ASTReaderListener *listener) {
+ Listener.reset(listener);
+ }
+
+ /// \brief Set the AST deserialization listener.
+ void setDeserializationListener(ASTDeserializationListener *Listener);
+
+ /// \brief Initializes the ASTContext
+ void InitializeContext();
+
+ /// \brief Add in-memory (virtual file) buffer.
+ void addInMemoryBuffer(StringRef &FileName, llvm::MemoryBuffer *Buffer) {
+ ModuleMgr.addInMemoryBuffer(FileName, Buffer);
+ }
+
+ /// \brief Finalizes the AST reader's state before writing an AST file to
+ /// disk.
+ ///
+ /// This operation may undo temporary state in the AST that should not be
+ /// emitted.
+ void finalizeForWriting();
+
+ /// \brief Retrieve the module manager.
+ ModuleManager &getModuleManager() { return ModuleMgr; }
+
+ /// \brief Retrieve the preprocessor.
+ Preprocessor &getPreprocessor() const { return PP; }
+
+ /// \brief Retrieve the name of the original source file name
+ const std::string &getOriginalSourceFile() { return OriginalFileName; }
+
+ /// \brief Retrieve the name of the original source file name directly from
+ /// the AST file, without actually loading the AST file.
+ static std::string getOriginalSourceFile(const std::string &ASTFileName,
+ FileManager &FileMgr,
+ DiagnosticsEngine &Diags);
+
+ /// \brief Returns the suggested contents of the predefines buffer,
+ /// which contains a (typically-empty) subset of the predefines
+ /// build prior to including the precompiled header.
+ const std::string &getSuggestedPredefines() { return SuggestedPredefines; }
+
+ /// \brief Read a preallocated preprocessed entity from the external source.
+ ///
+ /// \returns null if an error occurred that prevented the preprocessed
+ /// entity from being loaded.
+ virtual PreprocessedEntity *ReadPreprocessedEntity(unsigned Index);
+
+ /// \brief Returns a pair of [Begin, End) indices of preallocated
+ /// preprocessed entities that \arg Range encompasses.
+ virtual std::pair<unsigned, unsigned>
+ findPreprocessedEntitiesInRange(SourceRange Range);
+
+ /// \brief Optionally returns true or false if the preallocated preprocessed
+ /// entity with index \arg Index came from file \arg FID.
+ virtual llvm::Optional<bool> isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID);
+
+ /// \brief Read the header file information for the given file entry.
+ virtual HeaderFileInfo GetHeaderFileInfo(const FileEntry *FE);
+
+ void ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag);
+
+ /// \brief Returns the number of source locations found in the chain.
+ unsigned getTotalNumSLocs() const {
+ return TotalNumSLocEntries;
+ }
+
+ /// \brief Returns the number of identifiers found in the chain.
+ unsigned getTotalNumIdentifiers() const {
+ return static_cast<unsigned>(IdentifiersLoaded.size());
+ }
+
+ /// \brief Returns the number of types found in the chain.
+ unsigned getTotalNumTypes() const {
+ return static_cast<unsigned>(TypesLoaded.size());
+ }
+
+ /// \brief Returns the number of declarations found in the chain.
+ unsigned getTotalNumDecls() const {
+ return static_cast<unsigned>(DeclsLoaded.size());
+ }
+
+ /// \brief Returns the number of submodules known.
+ unsigned getTotalNumSubmodules() const {
+ return static_cast<unsigned>(SubmodulesLoaded.size());
+ }
+
+ /// \brief Returns the number of selectors found in the chain.
+ unsigned getTotalNumSelectors() const {
+ return static_cast<unsigned>(SelectorsLoaded.size());
+ }
+
+ /// \brief Returns the number of preprocessed entities known to the AST
+ /// reader.
+ unsigned getTotalNumPreprocessedEntities() const {
+ unsigned Result = 0;
+ for (ModuleConstIterator I = ModuleMgr.begin(),
+ E = ModuleMgr.end(); I != E; ++I) {
+ Result += (*I)->NumPreprocessedEntities;
+ }
+
+ return Result;
+ }
+
+ /// \brief Returns the number of C++ base specifiers found in the chain.
+ unsigned getTotalNumCXXBaseSpecifiers() const {
+ return NumCXXBaseSpecifiersLoaded;
+ }
+
+ /// \brief Reads a TemplateArgumentLocInfo appropriate for the
+ /// given TemplateArgument kind.
+ TemplateArgumentLocInfo
+ GetTemplateArgumentLocInfo(ModuleFile &F, TemplateArgument::ArgKind Kind,
+ const RecordData &Record, unsigned &Idx);
+
+ /// \brief Reads a TemplateArgumentLoc.
+ TemplateArgumentLoc
+ ReadTemplateArgumentLoc(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx);
+
+ /// \brief Reads a declarator info from the given record.
+ TypeSourceInfo *GetTypeSourceInfo(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx);
+
+ /// \brief Resolve a type ID into a type, potentially building a new
+ /// type.
+ QualType GetType(serialization::TypeID ID);
+
+ /// \brief Resolve a local type ID within a given AST file into a type.
+ QualType getLocalType(ModuleFile &F, unsigned LocalID);
+
+ /// \brief Map a local type ID within a given AST file into a global type ID.
+ serialization::TypeID getGlobalTypeID(ModuleFile &F, unsigned LocalID) const;
+
+ /// \brief Read a type from the current position in the given record, which
+ /// was read from the given AST file.
+ QualType readType(ModuleFile &F, const RecordData &Record, unsigned &Idx) {
+ if (Idx >= Record.size())
+ return QualType();
+
+ return getLocalType(F, Record[Idx++]);
+ }
+
+ /// \brief Map from a local declaration ID within a given module to a
+ /// global declaration ID.
+ serialization::DeclID getGlobalDeclID(ModuleFile &F, unsigned LocalID) const;
+
+ /// \brief Returns true if global DeclID \arg ID originated from module
+ /// \arg M.
+ bool isDeclIDFromModule(serialization::GlobalDeclID ID, ModuleFile &M) const;
+
+ /// \brief Retrieve the module file that owns the given declaration, or NULL
+ /// if the declaration is not from a module file.
+ ModuleFile *getOwningModuleFile(Decl *D);
+
+ /// \brief Returns the source location for the decl \arg ID.
+ SourceLocation getSourceLocationForDeclID(serialization::GlobalDeclID ID);
+
+ /// \brief Resolve a declaration ID into a declaration, potentially
+ /// building a new declaration.
+ Decl *GetDecl(serialization::DeclID ID);
+ virtual Decl *GetExternalDecl(uint32_t ID);
+
+ /// \brief Reads a declaration with the given local ID in the given module.
+ Decl *GetLocalDecl(ModuleFile &F, uint32_t LocalID) {
+ return GetDecl(getGlobalDeclID(F, LocalID));
+ }
+
+ /// \brief Reads a declaration with the given local ID in the given module.
+ ///
+ /// \returns The requested declaration, casted to the given return type.
+ template<typename T>
+ T *GetLocalDeclAs(ModuleFile &F, uint32_t LocalID) {
+ return cast_or_null<T>(GetLocalDecl(F, LocalID));
+ }
+
+ /// \brief Map a global declaration ID into the declaration ID used to
+ /// refer to this declaration within the given module fule.
+ ///
+ /// \returns the global ID of the given declaration as known in the given
+ /// module file.
+ serialization::DeclID
+ mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
+ serialization::DeclID GlobalID);
+
+ /// \brief Reads a declaration ID from the given position in a record in the
+ /// given module.
+ ///
+ /// \returns The declaration ID read from the record, adjusted to a global ID.
+ serialization::DeclID ReadDeclID(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Reads a declaration from the given position in a record in the
+ /// given module.
+ Decl *ReadDecl(ModuleFile &F, const RecordData &R, unsigned &I) {
+ return GetDecl(ReadDeclID(F, R, I));
+ }
+
+ /// \brief Reads a declaration from the given position in a record in the
+ /// given module.
+ ///
+ /// \returns The declaration read from this location, casted to the given
+ /// result type.
+ template<typename T>
+ T *ReadDeclAs(ModuleFile &F, const RecordData &R, unsigned &I) {
+ return cast_or_null<T>(GetDecl(ReadDeclID(F, R, I)));
+ }
+
+ /// \brief Read a CXXBaseSpecifiers ID form the given record and
+ /// return its global bit offset.
+ uint64_t readCXXBaseSpecifiers(ModuleFile &M, const RecordData &Record,
+ unsigned &Idx);
+
+ virtual CXXBaseSpecifier *GetExternalCXXBaseSpecifiers(uint64_t Offset);
+
+ /// \brief Resolve the offset of a statement into a statement.
+ ///
+ /// This operation will read a new statement from the external
+ /// source each time it is called, and is meant to be used via a
+ /// LazyOffsetPtr (which is used by Decls for the body of functions, etc).
+ virtual Stmt *GetExternalDeclStmt(uint64_t Offset);
+
+ /// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
+ /// specified cursor. Read the abbreviations that are at the top of the block
+ /// and then leave the cursor pointing into the block.
+ bool ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor, unsigned BlockID);
+
+ /// \brief Finds all the visible declarations with a given name.
+ /// The current implementation of this method just loads the entire
+ /// lookup table as unmaterialized references.
+ virtual DeclContext::lookup_result
+ FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name);
+
+ /// \brief Read all of the declarations lexically stored in a
+ /// declaration context.
+ ///
+ /// \param DC The declaration context whose declarations will be
+ /// read.
+ ///
+ /// \param Decls Vector that will contain the declarations loaded
+ /// from the external source. The caller is responsible for merging
+ /// these declarations with any declarations already stored in the
+ /// declaration context.
+ ///
+ /// \returns true if there was an error while reading the
+ /// declarations for this declaration context.
+ virtual ExternalLoadResult FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Decls);
+
+ /// \brief Get the decls that are contained in a file in the Offset/Length
+ /// range. \arg Length can be 0 to indicate a point at \arg Offset instead of
+ /// a range.
+ virtual void FindFileRegionDecls(FileID File, unsigned Offset,unsigned Length,
+ SmallVectorImpl<Decl *> &Decls);
+
+ /// \brief Notify ASTReader that we started deserialization of
+ /// a decl or type so until FinishedDeserializing is called there may be
+ /// decls that are initializing. Must be paired with FinishedDeserializing.
+ virtual void StartedDeserializing() { ++NumCurrentElementsDeserializing; }
+
+ /// \brief Notify ASTReader that we finished the deserialization of
+ /// a decl or type. Must be paired with StartedDeserializing.
+ virtual void FinishedDeserializing();
+
+ /// \brief Function that will be invoked when we begin parsing a new
+ /// translation unit involving this external AST source.
+ ///
+ /// This function will provide all of the external definitions to
+ /// the ASTConsumer.
+ virtual void StartTranslationUnit(ASTConsumer *Consumer);
+
+ /// \brief Print some statistics about AST usage.
+ virtual void PrintStats();
+
+ /// \brief Dump information about the AST reader to standard error.
+ void dump();
+
+ /// Return the amount of memory used by memory buffers, breaking down
+ /// by heap-backed versus mmap'ed memory.
+ virtual void getMemoryBufferSizes(MemoryBufferSizes &sizes) const;
+
+ /// \brief Initialize the semantic source with the Sema instance
+ /// being used to perform semantic analysis on the abstract syntax
+ /// tree.
+ virtual void InitializeSema(Sema &S);
+
+ /// \brief Inform the semantic consumer that Sema is no longer available.
+ virtual void ForgetSema() { SemaObj = 0; }
+
+ /// \brief Retrieve the IdentifierInfo for the named identifier.
+ ///
+ /// This routine builds a new IdentifierInfo for the given identifier. If any
+ /// declarations with this name are visible from translation unit scope, their
+ /// declarations will be deserialized and introduced into the declaration
+ /// chain of the identifier.
+ virtual IdentifierInfo *get(const char *NameStart, const char *NameEnd);
+ IdentifierInfo *get(StringRef Name) {
+ return get(Name.begin(), Name.end());
+ }
+
+ /// \brief Retrieve an iterator into the set of all identifiers
+ /// in all loaded AST files.
+ virtual IdentifierIterator *getIdentifiers() const;
+
+ /// \brief Load the contents of the global method pool for a given
+ /// selector.
+ virtual void ReadMethodPool(Selector Sel);
+
+ /// \brief Load the set of namespaces that are known to the external source,
+ /// which will be used during typo correction.
+ virtual void ReadKnownNamespaces(
+ SmallVectorImpl<NamespaceDecl *> &Namespaces);
+
+ virtual void ReadTentativeDefinitions(
+ SmallVectorImpl<VarDecl *> &TentativeDefs);
+
+ virtual void ReadUnusedFileScopedDecls(
+ SmallVectorImpl<const DeclaratorDecl *> &Decls);
+
+ virtual void ReadDelegatingConstructors(
+ SmallVectorImpl<CXXConstructorDecl *> &Decls);
+
+ virtual void ReadExtVectorDecls(SmallVectorImpl<TypedefNameDecl *> &Decls);
+
+ virtual void ReadDynamicClasses(SmallVectorImpl<CXXRecordDecl *> &Decls);
+
+ virtual void ReadLocallyScopedExternalDecls(
+ SmallVectorImpl<NamedDecl *> &Decls);
+
+ virtual void ReadReferencedSelectors(
+ SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels);
+
+ virtual void ReadWeakUndeclaredIdentifiers(
+ SmallVectorImpl<std::pair<IdentifierInfo *, WeakInfo> > &WI);
+
+ virtual void ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables);
+
+ virtual void ReadPendingInstantiations(
+ SmallVectorImpl<std::pair<ValueDecl *,
+ SourceLocation> > &Pending);
+
+ /// \brief Load a selector from disk, registering its ID if it exists.
+ void LoadSelector(Selector Sel);
+
+ void SetIdentifierInfo(unsigned ID, IdentifierInfo *II);
+ void SetGloballyVisibleDecls(IdentifierInfo *II,
+ const SmallVectorImpl<uint32_t> &DeclIDs,
+ bool Nonrecursive = false);
+
+ /// \brief Report a diagnostic.
+ DiagnosticBuilder Diag(unsigned DiagID);
+
+ /// \brief Report a diagnostic.
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
+
+ IdentifierInfo *DecodeIdentifierInfo(serialization::IdentifierID ID);
+
+ IdentifierInfo *GetIdentifierInfo(ModuleFile &M, const RecordData &Record,
+ unsigned &Idx) {
+ return DecodeIdentifierInfo(getGlobalIdentifierID(M, Record[Idx++]));
+ }
+
+ virtual IdentifierInfo *GetIdentifier(serialization::IdentifierID ID) {
+ return DecodeIdentifierInfo(ID);
+ }
+
+ IdentifierInfo *getLocalIdentifier(ModuleFile &M, unsigned LocalID);
+
+ serialization::IdentifierID getGlobalIdentifierID(ModuleFile &M,
+ unsigned LocalID);
+
+ /// \brief Read the source location entry with index ID.
+ virtual bool ReadSLocEntry(int ID);
+
+ /// \brief Retrieve the global submodule ID given a module and its local ID
+ /// number.
+ serialization::SubmoduleID
+ getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID);
+
+ /// \brief Retrieve the submodule that corresponds to a global submodule ID.
+ ///
+ Module *getSubmodule(serialization::SubmoduleID GlobalID);
+
+ /// \brief Retrieve a selector from the given module with its local ID
+ /// number.
+ Selector getLocalSelector(ModuleFile &M, unsigned LocalID);
+
+ Selector DecodeSelector(serialization::SelectorID Idx);
+
+ virtual Selector GetExternalSelector(serialization::SelectorID ID);
+ uint32_t GetNumExternalSelectors();
+
+ Selector ReadSelector(ModuleFile &M, const RecordData &Record, unsigned &Idx) {
+ return getLocalSelector(M, Record[Idx++]);
+ }
+
+ /// \brief Retrieve the global selector ID that corresponds to this
+ /// the local selector ID in a given module.
+ serialization::SelectorID getGlobalSelectorID(ModuleFile &F,
+ unsigned LocalID) const;
+
+ /// \brief Read a declaration name.
+ DeclarationName ReadDeclarationName(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx);
+ void ReadDeclarationNameLoc(ModuleFile &F,
+ DeclarationNameLoc &DNLoc, DeclarationName Name,
+ const RecordData &Record, unsigned &Idx);
+ void ReadDeclarationNameInfo(ModuleFile &F, DeclarationNameInfo &NameInfo,
+ const RecordData &Record, unsigned &Idx);
+
+ void ReadQualifierInfo(ModuleFile &F, QualifierInfo &Info,
+ const RecordData &Record, unsigned &Idx);
+
+ NestedNameSpecifier *ReadNestedNameSpecifier(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx);
+
+ NestedNameSpecifierLoc ReadNestedNameSpecifierLoc(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Read a template name.
+ TemplateName ReadTemplateName(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Read a template argument.
+ TemplateArgument ReadTemplateArgument(ModuleFile &F,
+ const RecordData &Record,unsigned &Idx);
+
+ /// \brief Read a template parameter list.
+ TemplateParameterList *ReadTemplateParameterList(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Read a template argument array.
+ void
+ ReadTemplateArgumentList(SmallVector<TemplateArgument, 8> &TemplArgs,
+ ModuleFile &F, const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Read a UnresolvedSet structure.
+ void ReadUnresolvedSet(ModuleFile &F, UnresolvedSetImpl &Set,
+ const RecordData &Record, unsigned &Idx);
+
+ /// \brief Read a C++ base specifier.
+ CXXBaseSpecifier ReadCXXBaseSpecifier(ModuleFile &F,
+ const RecordData &Record,unsigned &Idx);
+
+ /// \brief Read a CXXCtorInitializer array.
+ std::pair<CXXCtorInitializer **, unsigned>
+ ReadCXXCtorInitializers(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Read a source location from raw form.
+ SourceLocation ReadSourceLocation(ModuleFile &ModuleFile, unsigned Raw) const {
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(Raw);
+ assert(ModuleFile.SLocRemap.find(Loc.getOffset()) != ModuleFile.SLocRemap.end() &&
+ "Cannot find offset to remap.");
+ int Remap = ModuleFile.SLocRemap.find(Loc.getOffset())->second;
+ return Loc.getLocWithOffset(Remap);
+ }
+
+ /// \brief Read a source location.
+ SourceLocation ReadSourceLocation(ModuleFile &ModuleFile,
+ const RecordData &Record, unsigned& Idx) {
+ return ReadSourceLocation(ModuleFile, Record[Idx++]);
+ }
+
+ /// \brief Read a source range.
+ SourceRange ReadSourceRange(ModuleFile &F,
+ const RecordData &Record, unsigned& Idx);
+
+ /// \brief Read an integral value
+ llvm::APInt ReadAPInt(const RecordData &Record, unsigned &Idx);
+
+ /// \brief Read a signed integral value
+ llvm::APSInt ReadAPSInt(const RecordData &Record, unsigned &Idx);
+
+ /// \brief Read a floating-point value
+ llvm::APFloat ReadAPFloat(const RecordData &Record, unsigned &Idx);
+
+ // \brief Read a string
+ std::string ReadString(const RecordData &Record, unsigned &Idx);
+
+ /// \brief Read a version tuple.
+ VersionTuple ReadVersionTuple(const RecordData &Record, unsigned &Idx);
+
+ CXXTemporary *ReadCXXTemporary(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx);
+
+ /// \brief Reads attributes from the current stream position.
+ void ReadAttributes(ModuleFile &F, AttrVec &Attrs,
+ const RecordData &Record, unsigned &Idx);
+
+ /// \brief Reads a statement.
+ Stmt *ReadStmt(ModuleFile &F);
+
+ /// \brief Reads an expression.
+ Expr *ReadExpr(ModuleFile &F);
+
+ /// \brief Reads a sub-statement operand during statement reading.
+ Stmt *ReadSubStmt() {
+ assert(ReadingKind == Read_Stmt &&
+ "Should be called only during statement reading!");
+ // Subexpressions are stored from last to first, so the next Stmt we need
+ // is at the back of the stack.
+ assert(!StmtStack.empty() && "Read too many sub statements!");
+ return StmtStack.pop_back_val();
+ }
+
+ /// \brief Reads a sub-expression operand during statement reading.
+ Expr *ReadSubExpr();
+
+ /// \brief Reads the macro record located at the given offset.
+ void ReadMacroRecord(ModuleFile &F, uint64_t Offset);
+
+ /// \brief Determine the global preprocessed entity ID that corresponds to
+ /// the given local ID within the given module.
+ serialization::PreprocessedEntityID
+ getGlobalPreprocessedEntityID(ModuleFile &M, unsigned LocalID) const;
+
+ /// \brief Note that the identifier is a macro whose record will be loaded
+ /// from the given AST file at the given (file-local) offset.
+ ///
+ /// \param II The name of the macro.
+ ///
+ /// \param F The module file from which the macro definition was deserialized.
+ ///
+ /// \param Offset The offset into the module file at which the macro
+ /// definition is located.
+ ///
+ /// \param Visible Whether the macro should be made visible.
+ void setIdentifierIsMacro(IdentifierInfo *II, ModuleFile &F,
+ uint64_t Offset, bool Visible);
+
+ /// \brief Read the set of macros defined by this external macro source.
+ virtual void ReadDefinedMacros();
+
+ /// \brief Read the macro definition for this identifier.
+ virtual void LoadMacroDefinition(IdentifierInfo *II);
+
+ /// \brief Update an out-of-date identifier.
+ virtual void updateOutOfDateIdentifier(IdentifierInfo &II);
+
+ /// \brief Note that this identifier is up-to-date.
+ void markIdentifierUpToDate(IdentifierInfo *II);
+
+ /// \brief Read the macro definition corresponding to this iterator
+ /// into the unread macro record offsets table.
+ void LoadMacroDefinition(
+ llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos);
+
+ /// \brief Load all external visible decls in the given DeclContext.
+ void completeVisibleDeclsMap(DeclContext *DC);
+
+ /// \brief Retrieve the AST context that this AST reader supplements.
+ ASTContext &getContext() { return Context; }
+
+ // \brief Contains declarations that were loaded before we have
+ // access to a Sema object.
+ SmallVector<NamedDecl *, 16> PreloadedDecls;
+
+ /// \brief Retrieve the semantic analysis object used to analyze the
+ /// translation unit in which the precompiled header is being
+ /// imported.
+ Sema *getSema() { return SemaObj; }
+
+ /// \brief Retrieve the identifier table associated with the
+ /// preprocessor.
+ IdentifierTable &getIdentifierTable();
+
+ /// \brief Record that the given ID maps to the given switch-case
+ /// statement.
+ void RecordSwitchCaseID(SwitchCase *SC, unsigned ID);
+
+ /// \brief Retrieve the switch-case statement with the given ID.
+ SwitchCase *getSwitchCaseWithID(unsigned ID);
+
+ void ClearSwitchCaseIDs();
+};
+
+/// \brief Helper class that saves the current stream position and
+/// then restores it when destroyed.
+struct SavedStreamPosition {
+ explicit SavedStreamPosition(llvm::BitstreamCursor &Cursor)
+ : Cursor(Cursor), Offset(Cursor.GetCurrentBitNo()) { }
+
+ ~SavedStreamPosition() {
+ Cursor.JumpToBit(Offset);
+ }
+
+private:
+ llvm::BitstreamCursor &Cursor;
+ uint64_t Offset;
+};
+
+inline void PCHValidator::Error(const char *Msg) {
+ Reader.Error(Msg);
+}
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
new file mode 100644
index 0000000..4c62385
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ASTWriter.h
@@ -0,0 +1,737 @@
+//===--- ASTWriter.h - AST File Writer --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTWriter class, which writes an AST file
+// containing a serialized representation of a translation unit.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_FRONTEND_AST_WRITER_H
+#define LLVM_CLANG_FRONTEND_AST_WRITER_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Serialization/ASTBitCodes.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include <map>
+#include <queue>
+#include <vector>
+
+namespace llvm {
+ class APFloat;
+ class APInt;
+ class BitstreamWriter;
+}
+
+namespace clang {
+
+class ASTContext;
+class NestedNameSpecifier;
+class CXXBaseSpecifier;
+class CXXCtorInitializer;
+class FPOptions;
+class HeaderSearch;
+class IdentifierResolver;
+class MacroDefinition;
+class MemorizeStatCalls;
+class OpaqueValueExpr;
+class OpenCLOptions;
+class ASTReader;
+class Module;
+class PreprocessedEntity;
+class PreprocessingRecord;
+class Preprocessor;
+class Sema;
+class SourceManager;
+class SwitchCase;
+class TargetInfo;
+class VersionTuple;
+
+namespace SrcMgr { class SLocEntry; }
+
+/// \brief Writes an AST file containing the contents of a translation unit.
+///
+/// The ASTWriter class produces a bitstream containing the serialized
+/// representation of a given abstract syntax tree and its supporting
+/// data structures. This bitstream can be de-serialized via an
+/// instance of the ASTReader class.
+class ASTWriter : public ASTDeserializationListener,
+ public ASTMutationListener {
+public:
+ typedef SmallVector<uint64_t, 64> RecordData;
+ typedef SmallVectorImpl<uint64_t> RecordDataImpl;
+
+ friend class ASTDeclWriter;
+private:
+ /// \brief Map that provides the ID numbers of each type within the
+ /// output stream, plus those deserialized from a chained PCH.
+ ///
+ /// The ID numbers of types are consecutive (in order of discovery)
+ /// and start at 1. 0 is reserved for NULL. When types are actually
+ /// stored in the stream, the ID number is shifted by 2 bits to
+ /// allow for the const/volatile qualifiers.
+ ///
+ /// Keys in the map never have const/volatile qualifiers.
+ typedef llvm::DenseMap<QualType, serialization::TypeIdx,
+ serialization::UnsafeQualTypeDenseMapInfo>
+ TypeIdxMap;
+
+ /// \brief The bitstream writer used to emit this precompiled header.
+ llvm::BitstreamWriter &Stream;
+
+ /// \brief The ASTContext we're writing.
+ ASTContext *Context;
+
+ /// \brief The preprocessor we're writing.
+ Preprocessor *PP;
+
+ /// \brief The reader of existing AST files, if we're chaining.
+ ASTReader *Chain;
+
+ /// \brief The module we're currently writing, if any.
+ Module *WritingModule;
+
+ /// \brief Indicates when the AST writing is actively performing
+ /// serialization, rather than just queueing updates.
+ bool WritingAST;
+
+ /// \brief Indicates that the AST contained compiler errors.
+ bool ASTHasCompilerErrors;
+
+ /// \brief Stores a declaration or a type to be written to the AST file.
+ class DeclOrType {
+ public:
+ DeclOrType(Decl *D) : Stored(D), IsType(false) { }
+ DeclOrType(QualType T) : Stored(T.getAsOpaquePtr()), IsType(true) { }
+
+ bool isType() const { return IsType; }
+ bool isDecl() const { return !IsType; }
+
+ QualType getType() const {
+ assert(isType() && "Not a type!");
+ return QualType::getFromOpaquePtr(Stored);
+ }
+
+ Decl *getDecl() const {
+ assert(isDecl() && "Not a decl!");
+ return static_cast<Decl *>(Stored);
+ }
+
+ private:
+ void *Stored;
+ bool IsType;
+ };
+
+ /// \brief The declarations and types to emit.
+ std::queue<DeclOrType> DeclTypesToEmit;
+
+ /// \brief The first ID number we can use for our own declarations.
+ serialization::DeclID FirstDeclID;
+
+ /// \brief The decl ID that will be assigned to the next new decl.
+ serialization::DeclID NextDeclID;
+
+ /// \brief Map that provides the ID numbers of each declaration within
+ /// the output stream, as well as those deserialized from a chained PCH.
+ ///
+ /// The ID numbers of declarations are consecutive (in order of
+ /// discovery) and start at 2. 1 is reserved for the translation
+ /// unit, while 0 is reserved for NULL.
+ llvm::DenseMap<const Decl *, serialization::DeclID> DeclIDs;
+
+ /// \brief Offset of each declaration in the bitstream, indexed by
+ /// the declaration's ID.
+ std::vector<serialization::DeclOffset> DeclOffsets;
+
+ /// \brief Sorted (by file offset) vector of pairs of file offset/DeclID.
+ typedef SmallVector<std::pair<unsigned, serialization::DeclID>, 64>
+ LocDeclIDsTy;
+ struct DeclIDInFileInfo {
+ LocDeclIDsTy DeclIDs;
+ /// \brief Set when the DeclIDs vectors from all files are joined, this
+ /// indicates the index that this particular vector has in the global one.
+ unsigned FirstDeclIndex;
+ };
+ typedef llvm::DenseMap<const SrcMgr::SLocEntry *,
+ DeclIDInFileInfo *> FileDeclIDsTy;
+
+ /// \brief Map from file SLocEntries to info about the file-level declarations
+ /// that it contains.
+ FileDeclIDsTy FileDeclIDs;
+
+ void associateDeclWithFile(const Decl *D, serialization::DeclID);
+
+ /// \brief The first ID number we can use for our own types.
+ serialization::TypeID FirstTypeID;
+
+ /// \brief The type ID that will be assigned to the next new type.
+ serialization::TypeID NextTypeID;
+
+ /// \brief Map that provides the ID numbers of each type within the
+ /// output stream, plus those deserialized from a chained PCH.
+ ///
+ /// The ID numbers of types are consecutive (in order of discovery)
+ /// and start at 1. 0 is reserved for NULL. When types are actually
+ /// stored in the stream, the ID number is shifted by 2 bits to
+ /// allow for the const/volatile qualifiers.
+ ///
+ /// Keys in the map never have const/volatile qualifiers.
+ TypeIdxMap TypeIdxs;
+
+ /// \brief Offset of each type in the bitstream, indexed by
+ /// the type's ID.
+ std::vector<uint32_t> TypeOffsets;
+
+ /// \brief The first ID number we can use for our own identifiers.
+ serialization::IdentID FirstIdentID;
+
+ /// \brief The identifier ID that will be assigned to the next new identifier.
+ serialization::IdentID NextIdentID;
+
+ /// \brief Map that provides the ID numbers of each identifier in
+ /// the output stream.
+ ///
+ /// The ID numbers for identifiers are consecutive (in order of
+ /// discovery), starting at 1. An ID of zero refers to a NULL
+ /// IdentifierInfo.
+ llvm::DenseMap<const IdentifierInfo *, serialization::IdentID> IdentifierIDs;
+
+ /// @name FlushStmt Caches
+ /// @{
+
+ /// \brief Set of parent Stmts for the currently serializing sub stmt.
+ llvm::DenseSet<Stmt *> ParentStmts;
+
+ /// \brief Offsets of sub stmts already serialized. The offset points
+ /// just after the stmt record.
+ llvm::DenseMap<Stmt *, uint64_t> SubStmtEntries;
+
+ /// @}
+
+ /// \brief Offsets of each of the identifier IDs into the identifier
+ /// table.
+ std::vector<uint32_t> IdentifierOffsets;
+
+ /// \brief The first ID number we can use for our own submodules.
+ serialization::SubmoduleID FirstSubmoduleID;
+
+ /// \brief The submodule ID that will be assigned to the next new submodule.
+ serialization::SubmoduleID NextSubmoduleID;
+
+ /// \brief The first ID number we can use for our own selectors.
+ serialization::SelectorID FirstSelectorID;
+
+ /// \brief The selector ID that will be assigned to the next new selector.
+ serialization::SelectorID NextSelectorID;
+
+ /// \brief Map that provides the ID numbers of each Selector.
+ llvm::DenseMap<Selector, serialization::SelectorID> SelectorIDs;
+
+ /// \brief Offset of each selector within the method pool/selector
+ /// table, indexed by the Selector ID (-1).
+ std::vector<uint32_t> SelectorOffsets;
+
+ /// \brief Offsets of each of the macro identifiers into the
+ /// bitstream.
+ ///
+ /// For each identifier that is associated with a macro, this map
+ /// provides the offset into the bitstream where that macro is
+ /// defined.
+ llvm::DenseMap<const IdentifierInfo *, uint64_t> MacroOffsets;
+
+ /// \brief The set of identifiers that had macro definitions at some point.
+ std::vector<const IdentifierInfo *> DeserializedMacroNames;
+
+ /// \brief Mapping from macro definitions (as they occur in the preprocessing
+ /// record) to the macro IDs.
+ llvm::DenseMap<const MacroDefinition *, serialization::PreprocessedEntityID>
+ MacroDefinitions;
+
+ typedef SmallVector<uint64_t, 2> UpdateRecord;
+ typedef llvm::DenseMap<const Decl *, UpdateRecord> DeclUpdateMap;
+ /// \brief Mapping from declarations that came from a chained PCH to the
+ /// record containing modifications to them.
+ DeclUpdateMap DeclUpdates;
+
+ typedef llvm::DenseMap<Decl *, Decl *> FirstLatestDeclMap;
+ /// \brief Map of first declarations from a chained PCH that point to the
+ /// most recent declarations in another PCH.
+ FirstLatestDeclMap FirstLatestDecls;
+
+ /// \brief Declarations encountered that might be external
+ /// definitions.
+ ///
+ /// We keep track of external definitions (as well as tentative
+ /// definitions) as we are emitting declarations to the AST
+ /// file. The AST file contains a separate record for these external
+ /// definitions, which are provided to the AST consumer by the AST
+ /// reader. This is behavior is required to properly cope with,
+ /// e.g., tentative variable definitions that occur within
+ /// headers. The declarations themselves are stored as declaration
+ /// IDs, since they will be written out to an EXTERNAL_DEFINITIONS
+ /// record.
+ SmallVector<uint64_t, 16> ExternalDefinitions;
+
+ /// \brief DeclContexts that have received extensions since their serialized
+ /// form.
+ ///
+ /// For namespaces, when we're chaining and encountering a namespace, we check
+ /// if its primary namespace comes from the chain. If it does, we add the
+ /// primary to this set, so that we can write out lexical content updates for
+ /// it.
+ llvm::SmallPtrSet<const DeclContext *, 16> UpdatedDeclContexts;
+
+ typedef llvm::SmallPtrSet<const Decl *, 16> DeclsToRewriteTy;
+ /// \brief Decls that will be replaced in the current dependent AST file.
+ DeclsToRewriteTy DeclsToRewrite;
+
+ /// \brief The set of Objective-C class that have categories we
+ /// should serialize.
+ llvm::SetVector<ObjCInterfaceDecl *> ObjCClassesWithCategories;
+
+ struct ReplacedDeclInfo {
+ serialization::DeclID ID;
+ uint64_t Offset;
+ unsigned Loc;
+
+ ReplacedDeclInfo() : ID(0), Offset(0), Loc(0) {}
+ ReplacedDeclInfo(serialization::DeclID ID, uint64_t Offset,
+ SourceLocation Loc)
+ : ID(ID), Offset(Offset), Loc(Loc.getRawEncoding()) {}
+ };
+
+ /// \brief Decls that have been replaced in the current dependent AST file.
+ ///
+ /// When a decl changes fundamentally after being deserialized (this shouldn't
+ /// happen, but the ObjC AST nodes are designed this way), it will be
+ /// serialized again. In this case, it is registered here, so that the reader
+ /// knows to read the updated version.
+ SmallVector<ReplacedDeclInfo, 16> ReplacedDecls;
+
+ /// \brief The set of declarations that may have redeclaration chains that
+ /// need to be serialized.
+ llvm::SetVector<Decl *, llvm::SmallVector<Decl *, 4>,
+ llvm::SmallPtrSet<Decl *, 4> > Redeclarations;
+
+ /// \brief Statements that we've encountered while serializing a
+ /// declaration or type.
+ SmallVector<Stmt *, 16> StmtsToEmit;
+
+ /// \brief Statements collection to use for ASTWriter::AddStmt().
+ /// It will point to StmtsToEmit unless it is overriden.
+ SmallVector<Stmt *, 16> *CollectedStmts;
+
+ /// \brief Mapping from SwitchCase statements to IDs.
+ std::map<SwitchCase *, unsigned> SwitchCaseIDs;
+
+ /// \brief The number of statements written to the AST file.
+ unsigned NumStatements;
+
+ /// \brief The number of macros written to the AST file.
+ unsigned NumMacros;
+
+ /// \brief The number of lexical declcontexts written to the AST
+ /// file.
+ unsigned NumLexicalDeclContexts;
+
+ /// \brief The number of visible declcontexts written to the AST
+ /// file.
+ unsigned NumVisibleDeclContexts;
+
+ /// \brief The offset of each CXXBaseSpecifier set within the AST.
+ SmallVector<uint32_t, 4> CXXBaseSpecifiersOffsets;
+
+ /// \brief The first ID number we can use for our own base specifiers.
+ serialization::CXXBaseSpecifiersID FirstCXXBaseSpecifiersID;
+
+ /// \brief The base specifiers ID that will be assigned to the next new
+ /// set of C++ base specifiers.
+ serialization::CXXBaseSpecifiersID NextCXXBaseSpecifiersID;
+
+ /// \brief A set of C++ base specifiers that is queued to be written into the
+ /// AST file.
+ struct QueuedCXXBaseSpecifiers {
+ QueuedCXXBaseSpecifiers() : ID(), Bases(), BasesEnd() { }
+
+ QueuedCXXBaseSpecifiers(serialization::CXXBaseSpecifiersID ID,
+ CXXBaseSpecifier const *Bases,
+ CXXBaseSpecifier const *BasesEnd)
+ : ID(ID), Bases(Bases), BasesEnd(BasesEnd) { }
+
+ serialization::CXXBaseSpecifiersID ID;
+ CXXBaseSpecifier const * Bases;
+ CXXBaseSpecifier const * BasesEnd;
+ };
+
+ /// \brief Queue of C++ base specifiers to be written to the AST file,
+ /// in the order they should be written.
+ SmallVector<QueuedCXXBaseSpecifiers, 2> CXXBaseSpecifiersToWrite;
+
+ /// \brief A mapping from each known submodule to its ID number, which will
+ /// be a positive integer.
+ llvm::DenseMap<Module *, unsigned> SubmoduleIDs;
+
+ /// \brief Retrieve or create a submodule ID for this module.
+ unsigned getSubmoduleID(Module *Mod);
+
+ /// \brief Write the given subexpression to the bitstream.
+ void WriteSubStmt(Stmt *S,
+ llvm::DenseMap<Stmt *, uint64_t> &SubStmtEntries,
+ llvm::DenseSet<Stmt *> &ParentStmts);
+
+ void WriteBlockInfoBlock();
+ void WriteMetadata(ASTContext &Context, StringRef isysroot,
+ const std::string &OutputFile);
+ void WriteLanguageOptions(const LangOptions &LangOpts);
+ void WriteStatCache(MemorizeStatCalls &StatCalls);
+ void WriteSourceManagerBlock(SourceManager &SourceMgr,
+ const Preprocessor &PP,
+ StringRef isysroot);
+ void WritePreprocessor(const Preprocessor &PP, bool IsModule);
+ void WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot);
+ void WritePreprocessorDetail(PreprocessingRecord &PPRec);
+ void WriteSubmodules(Module *WritingModule);
+
+ void WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag);
+ void WriteCXXBaseSpecifiersOffsets();
+ void WriteType(QualType T);
+ uint64_t WriteDeclContextLexicalBlock(ASTContext &Context, DeclContext *DC);
+ uint64_t WriteDeclContextVisibleBlock(ASTContext &Context, DeclContext *DC);
+ void WriteTypeDeclOffsets();
+ void WriteFileDeclIDsMap();
+ void WriteSelectors(Sema &SemaRef);
+ void WriteReferencedSelectorsPool(Sema &SemaRef);
+ void WriteIdentifierTable(Preprocessor &PP, IdentifierResolver &IdResolver,
+ bool IsModule);
+ void WriteAttributes(const AttrVec &Attrs, RecordDataImpl &Record);
+ void ResolveDeclUpdatesBlocks();
+ void WriteDeclUpdatesBlocks();
+ void WriteDeclReplacementsBlock();
+ void WriteDeclContextVisibleUpdate(const DeclContext *DC);
+ void WriteFPPragmaOptions(const FPOptions &Opts);
+ void WriteOpenCLExtensions(Sema &SemaRef);
+ void WriteObjCCategories();
+ void WriteRedeclarations();
+ void WriteMergedDecls();
+
+ unsigned DeclParmVarAbbrev;
+ unsigned DeclContextLexicalAbbrev;
+ unsigned DeclContextVisibleLookupAbbrev;
+ unsigned UpdateVisibleAbbrev;
+ unsigned DeclRefExprAbbrev;
+ unsigned CharacterLiteralAbbrev;
+ unsigned DeclRecordAbbrev;
+ unsigned IntegerLiteralAbbrev;
+ unsigned DeclTypedefAbbrev;
+ unsigned DeclVarAbbrev;
+ unsigned DeclFieldAbbrev;
+ unsigned DeclEnumAbbrev;
+ unsigned DeclObjCIvarAbbrev;
+
+ void WriteDeclsBlockAbbrevs();
+ void WriteDecl(ASTContext &Context, Decl *D);
+
+ void WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ StringRef isysroot, const std::string &OutputFile,
+ Module *WritingModule);
+
+public:
+ /// \brief Create a new precompiled header writer that outputs to
+ /// the given bitstream.
+ ASTWriter(llvm::BitstreamWriter &Stream);
+ ~ASTWriter();
+
+ /// \brief Write a precompiled header for the given semantic analysis.
+ ///
+ /// \param SemaRef a reference to the semantic analysis object that processed
+ /// the AST to be written into the precompiled header.
+ ///
+ /// \param StatCalls the object that cached all of the stat() calls made while
+ /// searching for source files and headers.
+ ///
+ /// \param WritingModule The module that we are writing. If null, we are
+ /// writing a precompiled header.
+ ///
+ /// \param isysroot if non-empty, write a relocatable file whose headers
+ /// are relative to the given system root.
+ void WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ const std::string &OutputFile,
+ Module *WritingModule, StringRef isysroot,
+ bool hasErrors = false);
+
+ /// \brief Emit a source location.
+ void AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record);
+
+ /// \brief Emit a source range.
+ void AddSourceRange(SourceRange Range, RecordDataImpl &Record);
+
+ /// \brief Emit an integral value.
+ void AddAPInt(const llvm::APInt &Value, RecordDataImpl &Record);
+
+ /// \brief Emit a signed integral value.
+ void AddAPSInt(const llvm::APSInt &Value, RecordDataImpl &Record);
+
+ /// \brief Emit a floating-point value.
+ void AddAPFloat(const llvm::APFloat &Value, RecordDataImpl &Record);
+
+ /// \brief Emit a reference to an identifier.
+ void AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record);
+
+ /// \brief Emit a Selector (which is a smart pointer reference).
+ void AddSelectorRef(Selector, RecordDataImpl &Record);
+
+ /// \brief Emit a CXXTemporary.
+ void AddCXXTemporary(const CXXTemporary *Temp, RecordDataImpl &Record);
+
+ /// \brief Emit a set of C++ base specifiers to the record.
+ void AddCXXBaseSpecifiersRef(CXXBaseSpecifier const *Bases,
+ CXXBaseSpecifier const *BasesEnd,
+ RecordDataImpl &Record);
+
+ /// \brief Get the unique number used to refer to the given selector.
+ serialization::SelectorID getSelectorRef(Selector Sel);
+
+ /// \brief Get the unique number used to refer to the given identifier.
+ serialization::IdentID getIdentifierRef(const IdentifierInfo *II);
+
+ /// \brief Retrieve the offset of the macro definition for the given
+ /// identifier.
+ ///
+ /// The identifier must refer to a macro.
+ uint64_t getMacroOffset(const IdentifierInfo *II) {
+ assert(MacroOffsets.find(II) != MacroOffsets.end() &&
+ "Identifier does not name a macro");
+ return MacroOffsets[II];
+ }
+
+ /// \brief Emit a reference to a type.
+ void AddTypeRef(QualType T, RecordDataImpl &Record);
+
+ /// \brief Force a type to be emitted and get its ID.
+ serialization::TypeID GetOrCreateTypeID(QualType T);
+
+ /// \brief Determine the type ID of an already-emitted type.
+ serialization::TypeID getTypeID(QualType T) const;
+
+ /// \brief Force a type to be emitted and get its index.
+ serialization::TypeIdx GetOrCreateTypeIdx( QualType T);
+
+ /// \brief Determine the type index of an already-emitted type.
+ serialization::TypeIdx getTypeIdx(QualType T) const;
+
+ /// \brief Emits a reference to a declarator info.
+ void AddTypeSourceInfo(TypeSourceInfo *TInfo, RecordDataImpl &Record);
+
+ /// \brief Emits a type with source-location information.
+ void AddTypeLoc(TypeLoc TL, RecordDataImpl &Record);
+
+ /// \brief Emits a template argument location info.
+ void AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
+ const TemplateArgumentLocInfo &Arg,
+ RecordDataImpl &Record);
+
+ /// \brief Emits a template argument location.
+ void AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
+ RecordDataImpl &Record);
+
+ /// \brief Emit a reference to a declaration.
+ void AddDeclRef(const Decl *D, RecordDataImpl &Record);
+
+
+ /// \brief Force a declaration to be emitted and get its ID.
+ serialization::DeclID GetDeclRef(const Decl *D);
+
+ /// \brief Determine the declaration ID of an already-emitted
+ /// declaration.
+ serialization::DeclID getDeclID(const Decl *D);
+
+ /// \brief Emit a declaration name.
+ void AddDeclarationName(DeclarationName Name, RecordDataImpl &Record);
+ void AddDeclarationNameLoc(const DeclarationNameLoc &DNLoc,
+ DeclarationName Name, RecordDataImpl &Record);
+ void AddDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
+ RecordDataImpl &Record);
+
+ void AddQualifierInfo(const QualifierInfo &Info, RecordDataImpl &Record);
+
+ /// \brief Emit a nested name specifier.
+ void AddNestedNameSpecifier(NestedNameSpecifier *NNS, RecordDataImpl &Record);
+
+ /// \brief Emit a nested name specifier with source-location information.
+ void AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
+ RecordDataImpl &Record);
+
+ /// \brief Emit a template name.
+ void AddTemplateName(TemplateName Name, RecordDataImpl &Record);
+
+ /// \brief Emit a template argument.
+ void AddTemplateArgument(const TemplateArgument &Arg, RecordDataImpl &Record);
+
+ /// \brief Emit a template parameter list.
+ void AddTemplateParameterList(const TemplateParameterList *TemplateParams,
+ RecordDataImpl &Record);
+
+ /// \brief Emit a template argument list.
+ void AddTemplateArgumentList(const TemplateArgumentList *TemplateArgs,
+ RecordDataImpl &Record);
+
+ /// \brief Emit a UnresolvedSet structure.
+ void AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordDataImpl &Record);
+
+ /// \brief Emit a C++ base specifier.
+ void AddCXXBaseSpecifier(const CXXBaseSpecifier &Base,
+ RecordDataImpl &Record);
+
+ /// \brief Emit a CXXCtorInitializer array.
+ void AddCXXCtorInitializers(
+ const CXXCtorInitializer * const *CtorInitializers,
+ unsigned NumCtorInitializers,
+ RecordDataImpl &Record);
+
+ void AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Record);
+
+ /// \brief Add a string to the given record.
+ void AddString(StringRef Str, RecordDataImpl &Record);
+
+ /// \brief Add a version tuple to the given record
+ void AddVersionTuple(const VersionTuple &Version, RecordDataImpl &Record);
+
+ /// \brief Mark a declaration context as needing an update.
+ void AddUpdatedDeclContext(const DeclContext *DC) {
+ UpdatedDeclContexts.insert(DC);
+ }
+
+ void RewriteDecl(const Decl *D) {
+ DeclsToRewrite.insert(D);
+ }
+
+ bool isRewritten(const Decl *D) const {
+ return DeclsToRewrite.count(D);
+ }
+
+ /// \brief Infer the submodule ID that contains an entity at the given
+ /// source location.
+ serialization::SubmoduleID inferSubmoduleIDFromLocation(SourceLocation Loc);
+
+ /// \brief Note that the identifier II occurs at the given offset
+ /// within the identifier table.
+ void SetIdentifierOffset(const IdentifierInfo *II, uint32_t Offset);
+
+ /// \brief Note that the selector Sel occurs at the given offset
+ /// within the method pool/selector table.
+ void SetSelectorOffset(Selector Sel, uint32_t Offset);
+
+ /// \brief Add the given statement or expression to the queue of
+ /// statements to emit.
+ ///
+ /// This routine should be used when emitting types and declarations
+ /// that have expressions as part of their formulation. Once the
+ /// type or declaration has been written, call FlushStmts() to write
+ /// the corresponding statements just after the type or
+ /// declaration.
+ void AddStmt(Stmt *S) {
+ CollectedStmts->push_back(S);
+ }
+
+ /// \brief Flush all of the statements and expressions that have
+ /// been added to the queue via AddStmt().
+ void FlushStmts();
+
+ /// \brief Flush all of the C++ base specifier sets that have been added
+ /// via \c AddCXXBaseSpecifiersRef().
+ void FlushCXXBaseSpecifiers();
+
+ /// \brief Record an ID for the given switch-case statement.
+ unsigned RecordSwitchCaseID(SwitchCase *S);
+
+ /// \brief Retrieve the ID for the given switch-case statement.
+ unsigned getSwitchCaseID(SwitchCase *S);
+
+ void ClearSwitchCaseIDs();
+
+ unsigned getDeclParmVarAbbrev() const { return DeclParmVarAbbrev; }
+ unsigned getDeclRefExprAbbrev() const { return DeclRefExprAbbrev; }
+ unsigned getCharacterLiteralAbbrev() const { return CharacterLiteralAbbrev; }
+ unsigned getDeclRecordAbbrev() const { return DeclRecordAbbrev; }
+ unsigned getIntegerLiteralAbbrev() const { return IntegerLiteralAbbrev; }
+ unsigned getDeclTypedefAbbrev() const { return DeclTypedefAbbrev; }
+ unsigned getDeclVarAbbrev() const { return DeclVarAbbrev; }
+ unsigned getDeclFieldAbbrev() const { return DeclFieldAbbrev; }
+ unsigned getDeclEnumAbbrev() const { return DeclEnumAbbrev; }
+ unsigned getDeclObjCIvarAbbrev() const { return DeclObjCIvarAbbrev; }
+
+ bool hasChain() const { return Chain; }
+
+ // ASTDeserializationListener implementation
+ void ReaderInitialized(ASTReader *Reader);
+ void IdentifierRead(serialization::IdentID ID, IdentifierInfo *II);
+ void TypeRead(serialization::TypeIdx Idx, QualType T);
+ void SelectorRead(serialization::SelectorID ID, Selector Sel);
+ void MacroDefinitionRead(serialization::PreprocessedEntityID ID,
+ MacroDefinition *MD);
+ void MacroVisible(IdentifierInfo *II);
+ void ModuleRead(serialization::SubmoduleID ID, Module *Mod);
+
+ // ASTMutationListener implementation.
+ virtual void CompletedTagDefinition(const TagDecl *D);
+ virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D);
+ virtual void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D);
+ virtual void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D);
+ virtual void AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
+ const FunctionDecl *D);
+ virtual void CompletedImplicitDefinition(const FunctionDecl *D);
+ virtual void StaticDataMemberInstantiated(const VarDecl *D);
+ virtual void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD);
+ virtual void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt);
+};
+
+/// \brief AST and semantic-analysis consumer that generates a
+/// precompiled header from the parsed source code.
+class PCHGenerator : public SemaConsumer {
+ const Preprocessor &PP;
+ std::string OutputFile;
+ clang::Module *Module;
+ std::string isysroot;
+ raw_ostream *Out;
+ Sema *SemaPtr;
+ MemorizeStatCalls *StatCalls; // owned by the FileManager
+ llvm::SmallVector<char, 128> Buffer;
+ llvm::BitstreamWriter Stream;
+ ASTWriter Writer;
+
+protected:
+ ASTWriter &getWriter() { return Writer; }
+ const ASTWriter &getWriter() const { return Writer; }
+
+public:
+ PCHGenerator(const Preprocessor &PP, StringRef OutputFile,
+ clang::Module *Module,
+ StringRef isysroot, raw_ostream *Out);
+ ~PCHGenerator();
+ virtual void InitializeSema(Sema &S) { SemaPtr = &S; }
+ virtual void HandleTranslationUnit(ASTContext &Ctx);
+ virtual ASTMutationListener *GetASTMutationListener();
+ virtual ASTDeserializationListener *GetASTDeserializationListener();
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h b/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h
new file mode 100644
index 0000000..f368a80
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ContinuousRangeMap.h
@@ -0,0 +1,130 @@
+//===--- ContinuousRangeMap.h - Map with int range as key -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ContinuousRangeMap class, which is a highly
+// specialized container used by serialization.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SERIALIZATION_CONTINUOUS_RANGE_MAP_H
+#define LLVM_CLANG_SERIALIZATION_CONTINUOUS_RANGE_MAP_H
+
+#include "llvm/ADT/SmallVector.h"
+#include <algorithm>
+#include <utility>
+
+namespace clang {
+
+/// \brief A map from continuous integer ranges to some value, with a very
+/// specialized interface.
+///
+/// CRM maps from integer ranges to values. The ranges are continuous, i.e.
+/// where one ends, the next one begins. So if the map contains the stops I0-3,
+/// the first range is from I0 to I1, the second from I1 to I2, the third from
+/// I2 to I3 and the last from I3 to infinity.
+///
+/// Ranges must be inserted in order. Inserting a new stop I4 into the map will
+/// shrink the fourth range to I3 to I4 and add the new range I4 to inf.
+template <typename Int, typename V, unsigned InitialCapacity>
+class ContinuousRangeMap {
+public:
+ typedef std::pair<Int, V> value_type;
+ typedef value_type &reference;
+ typedef const value_type &const_reference;
+ typedef value_type *pointer;
+ typedef const value_type *const_pointer;
+
+private:
+ typedef SmallVector<value_type, InitialCapacity> Representation;
+ Representation Rep;
+
+ struct Compare {
+ bool operator ()(const_reference L, Int R) const {
+ return L.first < R;
+ }
+ bool operator ()(Int L, const_reference R) const {
+ return L < R.first;
+ }
+ bool operator ()(Int L, Int R) const {
+ return L < R;
+ }
+ bool operator ()(const_reference L, const_reference R) const {
+ return L.first < R.first;
+ }
+ };
+
+public:
+ void insert(const value_type &Val) {
+ if (!Rep.empty() && Rep.back() == Val)
+ return;
+
+ assert((Rep.empty() || Rep.back().first < Val.first) &&
+ "Must insert keys in order.");
+ Rep.push_back(Val);
+ }
+
+ void insertOrReplace(const value_type &Val) {
+ iterator I = std::lower_bound(Rep.begin(), Rep.end(), Val, Compare());
+ if (I != Rep.end() && I->first == Val.first) {
+ I->second = Val.second;
+ return;
+ }
+
+ Rep.insert(I, Val);
+ }
+
+ typedef typename Representation::iterator iterator;
+ typedef typename Representation::const_iterator const_iterator;
+
+ iterator begin() { return Rep.begin(); }
+ iterator end() { return Rep.end(); }
+ const_iterator begin() const { return Rep.begin(); }
+ const_iterator end() const { return Rep.end(); }
+
+ iterator find(Int K) {
+ iterator I = std::upper_bound(Rep.begin(), Rep.end(), K, Compare());
+ // I points to the first entry with a key > K, which is the range that
+ // follows the one containing K.
+ if (I == Rep.begin())
+ return Rep.end();
+ --I;
+ return I;
+ }
+ const_iterator find(Int K) const {
+ return const_cast<ContinuousRangeMap*>(this)->find(K);
+ }
+
+ reference back() { return Rep.back(); }
+ const_reference back() const { return Rep.back(); }
+
+ /// \brief An object that helps properly build a continuous range map
+ /// from a set of values.
+ class Builder {
+ ContinuousRangeMap &Self;
+
+ Builder(const Builder&); // DO NOT IMPLEMENT
+ Builder &operator=(const Builder&); // DO NOT IMPLEMENT
+
+ public:
+ explicit Builder(ContinuousRangeMap &Self) : Self(Self) { }
+
+ ~Builder() {
+ std::sort(Self.Rep.begin(), Self.Rep.end(), Compare());
+ }
+
+ void insert(const value_type &Val) {
+ Self.Rep.push_back(Val);
+ }
+ };
+ friend class Builder;
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/Module.h b/contrib/llvm/tools/clang/include/clang/Serialization/Module.h
new file mode 100644
index 0000000..4c93c33
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/Module.h
@@ -0,0 +1,358 @@
+//===--- Module.h - Module description --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Module class, which describes a module that has
+// been loaded from an AST file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SERIALIZATION_MODULE_H
+#define LLVM_CLANG_SERIALIZATION_MODULE_H
+
+#include "clang/Serialization/ASTBitCodes.h"
+#include "clang/Serialization/ContinuousRangeMap.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include <string>
+
+namespace clang {
+
+class DeclContext;
+class Module;
+
+namespace serialization {
+
+/// \brief Specifies the kind of module that has been loaded.
+enum ModuleKind {
+ MK_Module, ///< File is a module proper.
+ MK_PCH, ///< File is a PCH file treated as such.
+ MK_Preamble, ///< File is a PCH file treated as the preamble.
+ MK_MainFile ///< File is a PCH file treated as the actual main file.
+};
+
+/// \brief Information about the contents of a DeclContext.
+struct DeclContextInfo {
+ DeclContextInfo()
+ : NameLookupTableData(), LexicalDecls(), NumLexicalDecls() {}
+
+ void *NameLookupTableData; // an ASTDeclContextNameLookupTable.
+ const KindDeclIDPair *LexicalDecls;
+ unsigned NumLexicalDecls;
+};
+
+/// \brief Information about a module that has been loaded by the ASTReader.
+///
+/// Each instance of the Module class corresponds to a single AST file, which
+/// may be a precompiled header, precompiled preamble, a module, or an AST file
+/// of some sort loaded as the main file, all of which are specific formulations
+/// of the general notion of a "module". A module may depend on any number of
+/// other modules.
+class ModuleFile {
+public:
+ ModuleFile(ModuleKind Kind, unsigned Generation);
+ ~ModuleFile();
+
+ // === General information ===
+
+ /// \brief The type of this module.
+ ModuleKind Kind;
+
+ /// \brief The file name of the module file.
+ std::string FileName;
+
+ /// \brief Whether this module has been directly imported by the
+ /// user.
+ bool DirectlyImported;
+
+ /// \brief The generation of which this module file is a part.
+ unsigned Generation;
+
+ /// \brief The memory buffer that stores the data associated with
+ /// this AST file.
+ OwningPtr<llvm::MemoryBuffer> Buffer;
+
+ /// \brief The size of this file, in bits.
+ uint64_t SizeInBits;
+
+ /// \brief The global bit offset (or base) of this module
+ uint64_t GlobalBitOffset;
+
+ /// \brief The bitstream reader from which we'll read the AST file.
+ llvm::BitstreamReader StreamFile;
+
+ /// \brief The main bitstream cursor for the main block.
+ llvm::BitstreamCursor Stream;
+
+ /// \brief The source location where this module was first imported.
+ SourceLocation ImportLoc;
+
+ /// \brief The first source location in this module.
+ SourceLocation FirstLoc;
+
+ // === Source Locations ===
+
+ /// \brief Cursor used to read source location entries.
+ llvm::BitstreamCursor SLocEntryCursor;
+
+ /// \brief The number of source location entries in this AST file.
+ unsigned LocalNumSLocEntries;
+
+ /// \brief The base ID in the source manager's view of this module.
+ int SLocEntryBaseID;
+
+ /// \brief The base offset in the source manager's view of this module.
+ unsigned SLocEntryBaseOffset;
+
+ /// \brief Offsets for all of the source location entries in the
+ /// AST file.
+ const uint32_t *SLocEntryOffsets;
+
+ /// \brief SLocEntries that we're going to preload.
+ SmallVector<uint64_t, 4> PreloadSLocEntries;
+
+ /// \brief The number of source location file entries in this AST file.
+ unsigned LocalNumSLocFileEntries;
+
+ /// \brief Offsets for all of the source location file entries in the
+ /// AST file.
+ const uint32_t *SLocFileOffsets;
+
+ /// \brief Remapping table for source locations in this module.
+ ContinuousRangeMap<uint32_t, int, 2> SLocRemap;
+
+ // === Identifiers ===
+
+ /// \brief The number of identifiers in this AST file.
+ unsigned LocalNumIdentifiers;
+
+ /// \brief Offsets into the identifier table data.
+ ///
+ /// This array is indexed by the identifier ID (-1), and provides
+ /// the offset into IdentifierTableData where the string data is
+ /// stored.
+ const uint32_t *IdentifierOffsets;
+
+ /// \brief Base identifier ID for identifiers local to this module.
+ serialization::IdentID BaseIdentifierID;
+
+ /// \brief Remapping table for identifier IDs in this module.
+ ContinuousRangeMap<uint32_t, int, 2> IdentifierRemap;
+
+ /// \brief Actual data for the on-disk hash table of identifiers.
+ ///
+ /// This pointer points into a memory buffer, where the on-disk hash
+ /// table for identifiers actually lives.
+ const char *IdentifierTableData;
+
+ /// \brief A pointer to an on-disk hash table of opaque type
+ /// IdentifierHashTable.
+ void *IdentifierLookupTable;
+
+ // === Macros ===
+
+ /// \brief The cursor to the start of the preprocessor block, which stores
+ /// all of the macro definitions.
+ llvm::BitstreamCursor MacroCursor;
+
+ /// \brief The offset of the start of the set of defined macros.
+ uint64_t MacroStartOffset;
+
+ // === Detailed PreprocessingRecord ===
+
+ /// \brief The cursor to the start of the (optional) detailed preprocessing
+ /// record block.
+ llvm::BitstreamCursor PreprocessorDetailCursor;
+
+ /// \brief The offset of the start of the preprocessor detail cursor.
+ uint64_t PreprocessorDetailStartOffset;
+
+ /// \brief Base preprocessed entity ID for preprocessed entities local to
+ /// this module.
+ serialization::PreprocessedEntityID BasePreprocessedEntityID;
+
+ /// \brief Remapping table for preprocessed entity IDs in this module.
+ ContinuousRangeMap<uint32_t, int, 2> PreprocessedEntityRemap;
+
+ const PPEntityOffset *PreprocessedEntityOffsets;
+ unsigned NumPreprocessedEntities;
+
+ // === Header search information ===
+
+ /// \brief The number of local HeaderFileInfo structures.
+ unsigned LocalNumHeaderFileInfos;
+
+ /// \brief Actual data for the on-disk hash table of header file
+ /// information.
+ ///
+ /// This pointer points into a memory buffer, where the on-disk hash
+ /// table for header file information actually lives.
+ const char *HeaderFileInfoTableData;
+
+ /// \brief The on-disk hash table that contains information about each of
+ /// the header files.
+ void *HeaderFileInfoTable;
+
+ /// \brief Actual data for the list of framework names used in the header
+ /// search information.
+ const char *HeaderFileFrameworkStrings;
+
+ // === Submodule information ===
+ /// \brief The number of submodules in this module.
+ unsigned LocalNumSubmodules;
+
+ /// \brief Base submodule ID for submodules local to this module.
+ serialization::SubmoduleID BaseSubmoduleID;
+
+ /// \brief Remapping table for submodule IDs in this module.
+ ContinuousRangeMap<uint32_t, int, 2> SubmoduleRemap;
+
+ // === Selectors ===
+
+ /// \brief The number of selectors new to this file.
+ ///
+ /// This is the number of entries in SelectorOffsets.
+ unsigned LocalNumSelectors;
+
+ /// \brief Offsets into the selector lookup table's data array
+ /// where each selector resides.
+ const uint32_t *SelectorOffsets;
+
+ /// \brief Base selector ID for selectors local to this module.
+ serialization::SelectorID BaseSelectorID;
+
+ /// \brief Remapping table for selector IDs in this module.
+ ContinuousRangeMap<uint32_t, int, 2> SelectorRemap;
+
+ /// \brief A pointer to the character data that comprises the selector table
+ ///
+ /// The SelectorOffsets table refers into this memory.
+ const unsigned char *SelectorLookupTableData;
+
+ /// \brief A pointer to an on-disk hash table of opaque type
+ /// ASTSelectorLookupTable.
+ ///
+ /// This hash table provides the IDs of all selectors, and the associated
+ /// instance and factory methods.
+ void *SelectorLookupTable;
+
+ // === Declarations ===
+
+ /// DeclsCursor - This is a cursor to the start of the DECLS_BLOCK block. It
+ /// has read all the abbreviations at the start of the block and is ready to
+ /// jump around with these in context.
+ llvm::BitstreamCursor DeclsCursor;
+
+ /// \brief The number of declarations in this AST file.
+ unsigned LocalNumDecls;
+
+ /// \brief Offset of each declaration within the bitstream, indexed
+ /// by the declaration ID (-1).
+ const DeclOffset *DeclOffsets;
+
+ /// \brief Base declaration ID for declarations local to this module.
+ serialization::DeclID BaseDeclID;
+
+ /// \brief Remapping table for declaration IDs in this module.
+ ContinuousRangeMap<uint32_t, int, 2> DeclRemap;
+
+ /// \brief Mapping from the module files that this module file depends on
+ /// to the base declaration ID for that module as it is understood within this
+ /// module.
+ ///
+ /// This is effectively a reverse global-to-local mapping for declaration
+ /// IDs, so that we can interpret a true global ID (for this translation unit)
+ /// as a local ID (for this module file).
+ llvm::DenseMap<ModuleFile *, serialization::DeclID> GlobalToLocalDeclIDs;
+
+ /// \brief The number of C++ base specifier sets in this AST file.
+ unsigned LocalNumCXXBaseSpecifiers;
+
+ /// \brief Offset of each C++ base specifier set within the bitstream,
+ /// indexed by the C++ base specifier set ID (-1).
+ const uint32_t *CXXBaseSpecifiersOffsets;
+
+ typedef llvm::DenseMap<const DeclContext *, DeclContextInfo>
+ DeclContextInfosMap;
+
+ /// \brief Information about the lexical and visible declarations
+ /// for each DeclContext.
+ DeclContextInfosMap DeclContextInfos;
+
+ /// \brief Array of file-level DeclIDs sorted by file.
+ const serialization::DeclID *FileSortedDecls;
+
+ /// \brief Array of redeclaration chain location information within this
+ /// module file, sorted by the first declaration ID.
+ const serialization::LocalRedeclarationsInfo *RedeclarationsMap;
+
+ /// \brief The number of redeclaration info entries in RedeclarationsMap.
+ unsigned LocalNumRedeclarationsInMap;
+
+ /// \brief The redeclaration chains for declarations local to this
+ /// module file.
+ SmallVector<uint64_t, 1> RedeclarationChains;
+
+ /// \brief Array of category list location information within this
+ /// module file, sorted by the definition ID.
+ const serialization::ObjCCategoriesInfo *ObjCCategoriesMap;
+
+ /// \brief The number of redeclaration info entries in ObjCCategoriesMap.
+ unsigned LocalNumObjCCategoriesInMap;
+
+ /// \brief The Objective-C category lists for categories known to this
+ /// module.
+ SmallVector<uint64_t, 1> ObjCCategories;
+
+ // === Types ===
+
+ /// \brief The number of types in this AST file.
+ unsigned LocalNumTypes;
+
+ /// \brief Offset of each type within the bitstream, indexed by the
+ /// type ID, or the representation of a Type*.
+ const uint32_t *TypeOffsets;
+
+ /// \brief Base type ID for types local to this module as represented in
+ /// the global type ID space.
+ serialization::TypeID BaseTypeIndex;
+
+ /// \brief Remapping table for type IDs in this module.
+ ContinuousRangeMap<uint32_t, int, 2> TypeRemap;
+
+ // === Miscellaneous ===
+
+ /// \brief Diagnostic IDs and their mappings that the user changed.
+ SmallVector<uint64_t, 8> PragmaDiagMappings;
+
+ /// \brief The AST stat cache installed for this file, if any.
+ ///
+ /// The dynamic type of this stat cache is always ASTStatCache
+ void *StatCache;
+
+ /// \brief List of modules which depend on this module
+ llvm::SetVector<ModuleFile *> ImportedBy;
+
+ /// \brief List of modules which this module depends on
+ llvm::SetVector<ModuleFile *> Imports;
+
+ /// \brief Determine whether this module was directly imported at
+ /// any point during translation.
+ bool isDirectlyImported() const { return DirectlyImported; }
+
+ /// \brief Dump debugging output for this module.
+ void dump();
+};
+
+} // end namespace serialization
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h b/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h
new file mode 100644
index 0000000..6ff0640
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/ModuleManager.h
@@ -0,0 +1,158 @@
+//===--- ModuleManager.cpp - Module Manager ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleManager class, which manages a set of loaded
+// modules for the ASTReader.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SERIALIZATION_MODULE_MANAGER_H
+#define LLVM_CLANG_SERIALIZATION_MODULE_MANAGER_H
+
+#include "clang/Serialization/Module.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace clang {
+
+namespace serialization {
+
+/// \brief Manages the set of modules loaded by an AST reader.
+class ModuleManager {
+ /// \brief The chain of AST files. The first entry is the one named by the
+ /// user, the last one is the one that doesn't depend on anything further.
+ llvm::SmallVector<ModuleFile*, 2> Chain;
+
+ /// \brief All loaded modules, indexed by name.
+ llvm::DenseMap<const FileEntry *, ModuleFile *> Modules;
+
+ /// \brief FileManager that handles translating between filenames and
+ /// FileEntry *.
+ FileManager FileMgr;
+
+ /// \brief A lookup of in-memory (virtual file) buffers
+ llvm::DenseMap<const FileEntry *, llvm::MemoryBuffer *> InMemoryBuffers;
+
+public:
+ typedef SmallVector<ModuleFile*, 2>::iterator ModuleIterator;
+ typedef SmallVector<ModuleFile*, 2>::const_iterator ModuleConstIterator;
+ typedef SmallVector<ModuleFile*, 2>::reverse_iterator ModuleReverseIterator;
+ typedef std::pair<uint32_t, StringRef> ModuleOffset;
+
+ ModuleManager(const FileSystemOptions &FSO);
+ ~ModuleManager();
+
+ /// \brief Forward iterator to traverse all loaded modules. This is reverse
+ /// source-order.
+ ModuleIterator begin() { return Chain.begin(); }
+ /// \brief Forward iterator end-point to traverse all loaded modules
+ ModuleIterator end() { return Chain.end(); }
+
+ /// \brief Const forward iterator to traverse all loaded modules. This is
+ /// in reverse source-order.
+ ModuleConstIterator begin() const { return Chain.begin(); }
+ /// \brief Const forward iterator end-point to traverse all loaded modules
+ ModuleConstIterator end() const { return Chain.end(); }
+
+ /// \brief Reverse iterator to traverse all loaded modules. This is in
+ /// source order.
+ ModuleReverseIterator rbegin() { return Chain.rbegin(); }
+ /// \brief Reverse iterator end-point to traverse all loaded modules.
+ ModuleReverseIterator rend() { return Chain.rend(); }
+
+ /// \brief Returns the primary module associated with the manager, that is,
+ /// the first module loaded
+ ModuleFile &getPrimaryModule() { return *Chain[0]; }
+
+ /// \brief Returns the primary module associated with the manager, that is,
+ /// the first module loaded.
+ ModuleFile &getPrimaryModule() const { return *Chain[0]; }
+
+ /// \brief Returns the module associated with the given index
+ ModuleFile &operator[](unsigned Index) const { return *Chain[Index]; }
+
+ /// \brief Returns the module associated with the given name
+ ModuleFile *lookup(StringRef Name);
+
+ /// \brief Returns the in-memory (virtual file) buffer with the given name
+ llvm::MemoryBuffer *lookupBuffer(StringRef Name);
+
+ /// \brief Number of modules loaded
+ unsigned size() const { return Chain.size(); }
+ /// \brief Attempts to create a new module and add it to the list of known
+ /// modules.
+ ///
+ /// \param FileName The file name of the module to be loaded.
+ ///
+ /// \param Type The kind of module being loaded.
+ ///
+ /// \param ImportedBy The module that is importing this module, or NULL if
+ /// this module is imported directly by the user.
+ ///
+ /// \param Generation The generation in which this module was loaded.
+ ///
+ /// \param ErrorStr Will be set to a non-empty string if any errors occurred
+ /// while trying to load the module.
+ ///
+ /// \return A pointer to the module that corresponds to this file name,
+ /// and a boolean indicating whether the module was newly added.
+ std::pair<ModuleFile *, bool>
+ addModule(StringRef FileName, ModuleKind Type, ModuleFile *ImportedBy,
+ unsigned Generation, std::string &ErrorStr);
+
+ /// \brief Add an in-memory buffer the list of known buffers
+ void addInMemoryBuffer(StringRef FileName, llvm::MemoryBuffer *Buffer);
+
+ /// \brief Visit each of the modules.
+ ///
+ /// This routine visits each of the modules, starting with the
+ /// "root" modules that no other loaded modules depend on, and
+ /// proceeding to the leaf modules, visiting each module only once
+ /// during the traversal.
+ ///
+ /// This traversal is intended to support various "lookup"
+ /// operations that can find data in any of the loaded modules.
+ ///
+ /// \param Visitor A visitor function that will be invoked with each
+ /// module and the given user data pointer. The return value must be
+ /// convertible to bool; when false, the visitation continues to
+ /// modules that the current module depends on. When true, the
+ /// visitation skips any modules that the current module depends on.
+ ///
+ /// \param UserData User data associated with the visitor object, which
+ /// will be passed along to the visitor.
+ void visit(bool (*Visitor)(ModuleFile &M, void *UserData), void *UserData);
+
+ /// \brief Visit each of the modules with a depth-first traversal.
+ ///
+ /// This routine visits each of the modules known to the module
+ /// manager using a depth-first search, starting with the first
+ /// loaded module. The traversal invokes the callback both before
+ /// traversing the children (preorder traversal) and after
+ /// traversing the children (postorder traversal).
+ ///
+ /// \param Visitor A visitor function that will be invoked with each
+ /// module and given a \c Preorder flag that indicates whether we're
+ /// visiting the module before or after visiting its children. The
+ /// visitor may return true at any time to abort the depth-first
+ /// visitation.
+ ///
+ /// \param UserData User data ssociated with the visitor object,
+ /// which will be passed along to the user.
+ void visitDepthFirst(bool (*Visitor)(ModuleFile &M, bool Preorder,
+ void *UserData),
+ void *UserData);
+
+ /// \brief View the graphviz representation of the module graph.
+ void viewGraph();
+};
+
+} } // end namespace clang::serialization
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h b/contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h
new file mode 100644
index 0000000..e63f814
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Serialization/SerializationDiagnostic.h
@@ -0,0 +1,28 @@
+//===--- SerializationDiagnostic.h - Serialization Diagnostics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SERIALIZATIONDIAGNOSTIC_H
+#define LLVM_CLANG_SERIALIZATIONDIAGNOSTIC_H
+
+#include "clang/Basic/Diagnostic.h"
+
+namespace clang {
+ namespace diag {
+ enum {
+#define DIAG(ENUM,FLAGS,DEFAULT_MAPPING,DESC,GROUP,\
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER,CATEGORY) ENUM,
+#define SERIALIZATIONSTART
+#include "clang/Basic/DiagnosticSerializationKinds.inc"
+#undef DIAG
+ NUM_BUILTIN_SERIALIZATION_DIAGNOSTICS
+ };
+ } // end namespace diag
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
new file mode 100644
index 0000000..11f1e5d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CheckerBase.td
@@ -0,0 +1,39 @@
+//===--- CheckerBase.td - Checker TableGen classes ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TableGen core definitions for checkers
+//
+//===----------------------------------------------------------------------===//
+
+class CheckerGroup<string name> {
+ string GroupName = name;
+}
+class InGroup<CheckerGroup G> { CheckerGroup Group = G; }
+
+class Package<string name> {
+ string PackageName = name;
+ bit Hidden = 0;
+ Package ParentPackage;
+ CheckerGroup Group;
+}
+class InPackage<Package P> { Package ParentPackage = P; }
+
+// All checkers are an indirect subclass of this.
+class Checker<string name = ""> {
+ string CheckerName = name;
+ string DescFile;
+ string HelpText;
+ bit Hidden = 0;
+ Package ParentPackage;
+ CheckerGroup Group;
+}
+
+class DescFile<string filename> { string DescFile = filename; }
+class HelpText<string text> { string HelpText = text; }
+class Hidden { bit Hidden = 1; }
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/ClangCheckers.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/ClangCheckers.h
new file mode 100644
index 0000000..cf0a30a
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/ClangCheckers.h
@@ -0,0 +1,22 @@
+//===--- ClangCheckers.h - Provides builtin checkers ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CHECKERS_CLANGCHECKERS_H
+#define LLVM_CLANG_STATICANALYZER_CHECKERS_CLANGCHECKERS_H
+
+namespace clang {
+namespace ento {
+class CheckerRegistry;
+
+void registerBuiltinCheckers(CheckerRegistry &registry);
+
+} // end namespace ento
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h
new file mode 100644
index 0000000..9d4251b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/CommonBugCategories.h
@@ -0,0 +1,24 @@
+//=--- CommonBugCategories.h - Provides common issue categories -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATIC_ANALYZER_CHECKER_CATEGORIES_H
+#define LLVM_CLANG_STATIC_ANALYZER_CHECKER_CATEGORIES_H
+
+// Common strings used for the "category" of many static analyzer issues.
+namespace clang {
+ namespace ento {
+ namespace categories {
+ extern const char *CoreFoundationObjectiveC;
+ extern const char *MemoryCoreFoundationObjectiveC;
+ extern const char *UnixAPI;
+ }
+ }
+}
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h
new file mode 100644
index 0000000..f9cce9c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/DereferenceChecker.h
@@ -0,0 +1,35 @@
+//== NullDerefChecker.h - Null dereference checker --------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NullDerefChecker and UndefDerefChecker, two builtin checks
+// in ExprEngine that check for null and undefined pointers at loads
+// and stores.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_DEREFCHECKER
+#define LLVM_CLANG_GR_DEREFCHECKER
+
+#include <utility>
+
+namespace clang {
+
+namespace ento {
+
+class ExprEngine;
+class ExplodedNode;
+
+std::pair<ExplodedNode * const *, ExplodedNode * const *>
+GetImplicitNullDereferences(ExprEngine &Eng);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h
new file mode 100644
index 0000000..eee38e9
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Checkers/LocalCheckers.h
@@ -0,0 +1,28 @@
+//==- LocalCheckers.h - Intra-Procedural+Flow-Sensitive Checkers -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface to call a set of intra-procedural (local)
+// checkers that use flow/path-sensitive analyses to find bugs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_LOCALCHECKERS_H
+#define LLVM_CLANG_GR_LOCALCHECKERS_H
+
+namespace clang {
+namespace ento {
+
+class ExprEngine;
+
+void RegisterCallInliner(ExprEngine &Eng);
+
+} // end namespace ento
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
new file mode 100644
index 0000000..2b699a8
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporter.h
@@ -0,0 +1,453 @@
+//===--- BugReporter.h - Generate PathDiagnostics --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BugReporter, a utility class for generating
+// PathDiagnostics for analyses based on ProgramState.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_BUGREPORTER
+#define LLVM_CLANG_GR_BUGREPORTER
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ilist.h"
+#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/ADT/DenseSet.h"
+
+namespace clang {
+
+class ASTContext;
+class DiagnosticsEngine;
+class Stmt;
+class ParentMap;
+
+namespace ento {
+
+class PathDiagnostic;
+class ExplodedNode;
+class ExplodedGraph;
+class BugReport;
+class BugReporter;
+class BugReporterContext;
+class ExprEngine;
+class BugType;
+
+//===----------------------------------------------------------------------===//
+// Interface for individual bug reports.
+//===----------------------------------------------------------------------===//
+
+/// This class provides an interface through which checkers can create
+/// individual bug reports.
+class BugReport : public llvm::ilist_node<BugReport> {
+public:
+ class NodeResolver {
+ virtual void anchor();
+ public:
+ virtual ~NodeResolver() {}
+ virtual const ExplodedNode*
+ getOriginalNode(const ExplodedNode *N) = 0;
+ };
+
+ typedef const SourceRange *ranges_iterator;
+ typedef SmallVector<BugReporterVisitor *, 8> VisitorList;
+ typedef VisitorList::iterator visitor_iterator;
+ typedef SmallVector<StringRef, 2> ExtraTextList;
+
+protected:
+ friend class BugReporter;
+ friend class BugReportEquivClass;
+
+ BugType& BT;
+ const Decl *DeclWithIssue;
+ std::string ShortDescription;
+ std::string Description;
+ PathDiagnosticLocation Location;
+ PathDiagnosticLocation UniqueingLocation;
+ const ExplodedNode *ErrorNode;
+ SmallVector<SourceRange, 4> Ranges;
+ ExtraTextList ExtraText;
+
+ typedef llvm::DenseSet<SymbolRef> Symbols;
+ typedef llvm::DenseSet<const MemRegion *> Regions;
+
+ /// A set of symbols that are registered with this report as being
+ /// "interesting", and thus used to help decide which diagnostics
+ /// to include when constructing the final path diagnostic.
+ Symbols interestingSymbols;
+
+ /// A set of regions that are registered with this report as being
+ /// "interesting", and thus used to help decide which diagnostics
+ /// to include when constructing the final path diagnostic.
+ Regions interestingRegions;
+
+ /// A set of custom visitors which generate "event" diagnostics at
+ /// interesting points in the path.
+ VisitorList Callbacks;
+
+ /// Used for ensuring the visitors are only added once.
+ llvm::FoldingSet<BugReporterVisitor> CallbacksSet;
+
+ /// Used for clients to tell if the report's configuration has changed
+ /// since the last time they checked.
+ unsigned ConfigurationChangeToken;
+
+public:
+ BugReport(BugType& bt, StringRef desc, const ExplodedNode *errornode)
+ : BT(bt), DeclWithIssue(0), Description(desc), ErrorNode(errornode),
+ ConfigurationChangeToken(0) {}
+
+ BugReport(BugType& bt, StringRef shortDesc, StringRef desc,
+ const ExplodedNode *errornode)
+ : BT(bt), DeclWithIssue(0), ShortDescription(shortDesc), Description(desc),
+ ErrorNode(errornode), ConfigurationChangeToken(0) {}
+
+ BugReport(BugType& bt, StringRef desc, PathDiagnosticLocation l)
+ : BT(bt), DeclWithIssue(0), Description(desc), Location(l), ErrorNode(0),
+ ConfigurationChangeToken(0) {}
+
+ /// \brief Create a BugReport with a custom uniqueing location.
+ ///
+ /// The reports that have the same report location, description, bug type, and
+ /// ranges are uniqued - only one of the equivalent reports will be presented
+ /// to the user. This method allows to rest the location which should be used
+ /// for uniquing reports. For example, memory leaks checker, could set this to
+ /// the allocation site, rather then the location where the bug is reported.
+ BugReport(BugType& bt, StringRef desc, const ExplodedNode *errornode,
+ PathDiagnosticLocation LocationToUnique)
+ : BT(bt), DeclWithIssue(0), Description(desc),
+ UniqueingLocation(LocationToUnique),
+ ErrorNode(errornode), ConfigurationChangeToken(0) {}
+
+ virtual ~BugReport();
+
+ const BugType& getBugType() const { return BT; }
+ BugType& getBugType() { return BT; }
+
+ const ExplodedNode *getErrorNode() const { return ErrorNode; }
+
+ const StringRef getDescription() const { return Description; }
+
+ const StringRef getShortDescription() const {
+ return ShortDescription.empty() ? Description : ShortDescription;
+ }
+
+ void markInteresting(SymbolRef sym);
+ void markInteresting(const MemRegion *R);
+ void markInteresting(SVal V);
+
+ bool isInteresting(SymbolRef sym) const;
+ bool isInteresting(const MemRegion *R) const;
+ bool isInteresting(SVal V) const;
+
+ unsigned getConfigurationChangeToken() const {
+ return ConfigurationChangeToken;
+ }
+
+ /// Return the canonical declaration, be it a method or class, where
+ /// this issue semantically occurred.
+ const Decl *getDeclWithIssue() const;
+
+ /// Specifically set the Decl where an issue occurred. This isn't necessary
+ /// for BugReports that cover a path as it will be automatically inferred.
+ void setDeclWithIssue(const Decl *declWithIssue) {
+ DeclWithIssue = declWithIssue;
+ }
+
+ /// \brief This allows for addition of meta data to the diagnostic.
+ ///
+ /// Currently, only the HTMLDiagnosticClient knows how to display it.
+ void addExtraText(StringRef S) {
+ ExtraText.push_back(S);
+ }
+
+ virtual const ExtraTextList &getExtraText() {
+ return ExtraText;
+ }
+
+ /// \brief Return the "definitive" location of the reported bug.
+ ///
+ /// While a bug can span an entire path, usually there is a specific
+ /// location that can be used to identify where the key issue occurred.
+ /// This location is used by clients rendering diagnostics.
+ virtual PathDiagnosticLocation getLocation(const SourceManager &SM) const;
+
+ const Stmt *getStmt() const;
+
+ /// \brief Add a range to a bug report.
+ ///
+ /// Ranges are used to highlight regions of interest in the source code.
+ /// They should be at the same source code line as the BugReport location.
+ /// By default, the source range of the statement corresponding to the error
+ /// node will be used; add a single invalid range to specify absence of
+ /// ranges.
+ void addRange(SourceRange R) {
+ assert((R.isValid() || Ranges.empty()) && "Invalid range can only be used "
+ "to specify that the report does not have a range.");
+ Ranges.push_back(R);
+ }
+
+ /// \brief Get the SourceRanges associated with the report.
+ virtual std::pair<ranges_iterator, ranges_iterator> getRanges();
+
+ /// \brief Add custom or predefined bug report visitors to this report.
+ ///
+ /// The visitors should be used when the default trace is not sufficient.
+ /// For example, they allow constructing a more elaborate trace.
+ /// \sa registerConditionVisitor(), registerTrackNullOrUndefValue(),
+ /// registerFindLastStore(), registerNilReceiverVisitor(), and
+ /// registerVarDeclsLastStore().
+ void addVisitor(BugReporterVisitor *visitor);
+
+ /// Iterators through the custom diagnostic visitors.
+ visitor_iterator visitor_begin() { return Callbacks.begin(); }
+ visitor_iterator visitor_end() { return Callbacks.end(); }
+
+ /// Profile to identify equivalent bug reports for error report coalescing.
+ /// Reports are uniqued to ensure that we do not emit multiple diagnostics
+ /// for each bug.
+ virtual void Profile(llvm::FoldingSetNodeID& hash) const;
+};
+
+} // end ento namespace
+} // end clang namespace
+
+namespace llvm {
+ template<> struct ilist_traits<clang::ento::BugReport>
+ : public ilist_default_traits<clang::ento::BugReport> {
+ clang::ento::BugReport *createSentinel() const {
+ return static_cast<clang::ento::BugReport *>(&Sentinel);
+ }
+ void destroySentinel(clang::ento::BugReport *) const {}
+
+ clang::ento::BugReport *provideInitialHead() const {
+ return createSentinel();
+ }
+ clang::ento::BugReport *ensureHead(clang::ento::BugReport *) const {
+ return createSentinel();
+ }
+ private:
+ mutable ilist_half_node<clang::ento::BugReport> Sentinel;
+ };
+}
+
+namespace clang {
+namespace ento {
+
+//===----------------------------------------------------------------------===//
+// BugTypes (collections of related reports).
+//===----------------------------------------------------------------------===//
+
+class BugReportEquivClass : public llvm::FoldingSetNode {
+ /// List of *owned* BugReport objects.
+ llvm::ilist<BugReport> Reports;
+
+ friend class BugReporter;
+ void AddReport(BugReport* R) { Reports.push_back(R); }
+public:
+ BugReportEquivClass(BugReport* R) { Reports.push_back(R); }
+ ~BugReportEquivClass();
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ assert(!Reports.empty());
+ Reports.front().Profile(ID);
+ }
+
+ typedef llvm::ilist<BugReport>::iterator iterator;
+ typedef llvm::ilist<BugReport>::const_iterator const_iterator;
+
+ iterator begin() { return Reports.begin(); }
+ iterator end() { return Reports.end(); }
+
+ const_iterator begin() const { return Reports.begin(); }
+ const_iterator end() const { return Reports.end(); }
+};
+
+//===----------------------------------------------------------------------===//
+// BugReporter and friends.
+//===----------------------------------------------------------------------===//
+
+class BugReporterData {
+public:
+ virtual ~BugReporterData();
+ virtual DiagnosticsEngine& getDiagnostic() = 0;
+ virtual PathDiagnosticConsumer* getPathDiagnosticConsumer() = 0;
+ virtual ASTContext &getASTContext() = 0;
+ virtual SourceManager& getSourceManager() = 0;
+};
+
+/// BugReporter is a utility class for generating PathDiagnostics for analysis.
+/// It collects the BugReports and BugTypes and knows how to generate
+/// and flush the corresponding diagnostics.
+class BugReporter {
+public:
+ enum Kind { BaseBRKind, GRBugReporterKind };
+
+private:
+ typedef llvm::ImmutableSet<BugType*> BugTypesTy;
+ BugTypesTy::Factory F;
+ BugTypesTy BugTypes;
+
+ const Kind kind;
+ BugReporterData& D;
+
+ /// Generate and flush the diagnostics for the given bug report.
+ void FlushReport(BugReportEquivClass& EQ);
+
+ /// The set of bug reports tracked by the BugReporter.
+ llvm::FoldingSet<BugReportEquivClass> EQClasses;
+ /// A vector of BugReports for tracking the allocated pointers and cleanup.
+ std::vector<BugReportEquivClass *> EQClassesVector;
+
+protected:
+ BugReporter(BugReporterData& d, Kind k) : BugTypes(F.getEmptySet()), kind(k),
+ D(d) {}
+
+public:
+ BugReporter(BugReporterData& d) : BugTypes(F.getEmptySet()), kind(BaseBRKind),
+ D(d) {}
+ virtual ~BugReporter();
+
+ /// \brief Generate and flush diagnostics for all bug reports.
+ void FlushReports();
+
+ Kind getKind() const { return kind; }
+
+ DiagnosticsEngine& getDiagnostic() {
+ return D.getDiagnostic();
+ }
+
+ PathDiagnosticConsumer* getPathDiagnosticConsumer() {
+ return D.getPathDiagnosticConsumer();
+ }
+
+ /// \brief Iterator over the set of BugTypes tracked by the BugReporter.
+ typedef BugTypesTy::iterator iterator;
+ iterator begin() { return BugTypes.begin(); }
+ iterator end() { return BugTypes.end(); }
+
+ /// \brief Iterator over the set of BugReports tracked by the BugReporter.
+ typedef llvm::FoldingSet<BugReportEquivClass>::iterator EQClasses_iterator;
+ EQClasses_iterator EQClasses_begin() { return EQClasses.begin(); }
+ EQClasses_iterator EQClasses_end() { return EQClasses.end(); }
+
+ ASTContext &getContext() { return D.getASTContext(); }
+
+ SourceManager& getSourceManager() { return D.getSourceManager(); }
+
+ virtual void GeneratePathDiagnostic(PathDiagnostic& pathDiagnostic,
+ SmallVectorImpl<BugReport *> &bugReports) {}
+
+ void Register(BugType *BT);
+
+ /// \brief Add the given report to the set of reports tracked by BugReporter.
+ ///
+ /// The reports are usually generated by the checkers. Further, they are
+ /// folded based on the profile value, which is done to coalesce similar
+ /// reports.
+ void EmitReport(BugReport *R);
+
+ void EmitBasicReport(const Decl *DeclWithIssue,
+ StringRef BugName, StringRef BugCategory,
+ StringRef BugStr, PathDiagnosticLocation Loc,
+ SourceRange* RangeBeg, unsigned NumRanges);
+
+ void EmitBasicReport(const Decl *DeclWithIssue,
+ StringRef BugName, StringRef BugCategory,
+ StringRef BugStr, PathDiagnosticLocation Loc) {
+ EmitBasicReport(DeclWithIssue, BugName, BugCategory, BugStr, Loc, 0, 0);
+ }
+
+ void EmitBasicReport(const Decl *DeclWithIssue,
+ StringRef BugName, StringRef Category,
+ StringRef BugStr, PathDiagnosticLocation Loc,
+ SourceRange R) {
+ EmitBasicReport(DeclWithIssue, BugName, Category, BugStr, Loc, &R, 1);
+ }
+
+ static bool classof(const BugReporter* R) { return true; }
+
+private:
+ llvm::StringMap<BugType *> StrBugTypes;
+
+ /// \brief Returns a BugType that is associated with the given name and
+ /// category.
+ BugType *getBugTypeForName(StringRef name, StringRef category);
+};
+
+// FIXME: Get rid of GRBugReporter. It's the wrong abstraction.
+class GRBugReporter : public BugReporter {
+ ExprEngine& Eng;
+public:
+ GRBugReporter(BugReporterData& d, ExprEngine& eng)
+ : BugReporter(d, GRBugReporterKind), Eng(eng) {}
+
+ virtual ~GRBugReporter();
+
+ /// getEngine - Return the analysis engine used to analyze a given
+ /// function or method.
+ ExprEngine &getEngine() { return Eng; }
+
+ /// getGraph - Get the exploded graph created by the analysis engine
+ /// for the analyzed method or function.
+ ExplodedGraph &getGraph();
+
+ /// getStateManager - Return the state manager used by the analysis
+ /// engine.
+ ProgramStateManager &getStateManager();
+
+ virtual void GeneratePathDiagnostic(PathDiagnostic &pathDiagnostic,
+ SmallVectorImpl<BugReport*> &bugReports);
+
+ /// classof - Used by isa<>, cast<>, and dyn_cast<>.
+ static bool classof(const BugReporter* R) {
+ return R->getKind() == GRBugReporterKind;
+ }
+};
+
+class BugReporterContext {
+ virtual void anchor();
+ GRBugReporter &BR;
+public:
+ BugReporterContext(GRBugReporter& br) : BR(br) {}
+
+ virtual ~BugReporterContext() {}
+
+ GRBugReporter& getBugReporter() { return BR; }
+
+ ExplodedGraph &getGraph() { return BR.getGraph(); }
+
+ ProgramStateManager& getStateManager() {
+ return BR.getStateManager();
+ }
+
+ SValBuilder& getSValBuilder() {
+ return getStateManager().getSValBuilder();
+ }
+
+ ASTContext &getASTContext() {
+ return BR.getContext();
+ }
+
+ SourceManager& getSourceManager() {
+ return BR.getSourceManager();
+ }
+
+ virtual BugReport::NodeResolver& getNodeResolver() = 0;
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
new file mode 100644
index 0000000..7e665ce
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h
@@ -0,0 +1,243 @@
+//===--- BugReporterVisitor.h - Generate PathDiagnostics -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares BugReporterVisitors, which are used to generate enhanced
+// diagnostic traces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_BUGREPORTERVISITOR
+#define LLVM_CLANG_GR_BUGREPORTERVISITOR
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/FoldingSet.h"
+
+namespace clang {
+
+namespace ento {
+
+class BugReport;
+class BugReporterContext;
+class ExplodedNode;
+class MemRegion;
+class PathDiagnosticPiece;
+
+/// \brief BugReporterVisitors are used to add custom diagnostics along a path.
+///
+/// Custom visitors should subclass the BugReporterVisitorImpl class for a
+/// default implementation of the clone() method.
+/// (Warning: if you have a deep subclass of BugReporterVisitorImpl, the
+/// default implementation of clone() will NOT do the right thing, and you
+/// will have to provide your own implementation.)
+class BugReporterVisitor : public llvm::FoldingSetNode {
+public:
+ virtual ~BugReporterVisitor();
+
+ /// \brief Returns a copy of this BugReporter.
+ ///
+ /// Custom BugReporterVisitors should not override this method directly.
+ /// Instead, they should inherit from BugReporterVisitorImpl and provide
+ /// a protected or public copy constructor.
+ ///
+ /// (Warning: if you have a deep subclass of BugReporterVisitorImpl, the
+ /// default implementation of clone() will NOT do the right thing, and you
+ /// will have to provide your own implementation.)
+ virtual BugReporterVisitor *clone() const = 0;
+
+ /// \brief Return a diagnostic piece which should be associated with the
+ /// given node.
+ ///
+ /// The last parameter can be used to register a new visitor with the given
+ /// BugReport while processing a node.
+ virtual PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) = 0;
+
+ /// \brief Provide custom definition for the final diagnostic piece on the
+ /// path - the piece, which is displayed before the path is expanded.
+ ///
+ /// If returns NULL the default implementation will be used.
+ /// Also note that at most one visitor of a BugReport should generate a
+ /// non-NULL end of path diagnostic piece.
+ virtual PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR);
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const = 0;
+
+ /// \brief Generates the default final diagnostic piece.
+ static PathDiagnosticPiece *getDefaultEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR);
+
+};
+
+/// This class provides a convenience implementation for clone() using the
+/// Curiously-Recurring Template Pattern. If you are implementing a custom
+/// BugReporterVisitor, subclass BugReporterVisitorImpl and provide a public
+/// or protected copy constructor.
+///
+/// (Warning: if you have a deep subclass of BugReporterVisitorImpl, the
+/// default implementation of clone() will NOT do the right thing, and you
+/// will have to provide your own implementation.)
+template <class DERIVED>
+class BugReporterVisitorImpl : public BugReporterVisitor {
+ virtual BugReporterVisitor *clone() const {
+ return new DERIVED(*static_cast<const DERIVED *>(this));
+ }
+};
+
+class FindLastStoreBRVisitor
+ : public BugReporterVisitorImpl<FindLastStoreBRVisitor>
+{
+ const MemRegion *R;
+ SVal V;
+ bool satisfied;
+ const ExplodedNode *StoreSite;
+
+public:
+ /// \brief Convenience method to create a visitor given only the MemRegion.
+ /// Returns NULL if the visitor cannot be created. For example, when the
+ /// corresponding value is unknown.
+ static BugReporterVisitor *createVisitorObject(const ExplodedNode *N,
+ const MemRegion *R);
+
+ /// Creates a visitor for every VarDecl inside a Stmt and registers it with
+ /// the BugReport.
+ static void registerStatementVarDecls(BugReport &BR, const Stmt *S);
+
+ FindLastStoreBRVisitor(SVal v, const MemRegion *r)
+ : R(r), V(v), satisfied(false), StoreSite(0) {
+ assert (!V.isUnknown() && "Cannot track unknown value.");
+
+ // TODO: Does it make sense to allow undef values here?
+ // (If not, also see UndefCapturedBlockVarChecker)?
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR);
+};
+
+class TrackConstraintBRVisitor
+ : public BugReporterVisitorImpl<TrackConstraintBRVisitor>
+{
+ DefinedSVal Constraint;
+ const bool Assumption;
+ bool isSatisfied;
+
+public:
+ TrackConstraintBRVisitor(DefinedSVal constraint, bool assumption)
+ : Constraint(constraint), Assumption(assumption), isSatisfied(false) {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR);
+};
+
+class NilReceiverBRVisitor
+ : public BugReporterVisitorImpl<NilReceiverBRVisitor>
+{
+public:
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int x = 0;
+ ID.AddPointer(&x);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR);
+};
+
+/// Visitor that tries to report interesting diagnostics from conditions.
+class ConditionBRVisitor : public BugReporterVisitorImpl<ConditionBRVisitor> {
+public:
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int x = 0;
+ ID.AddPointer(&x);
+ }
+
+
+ virtual PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR);
+
+ PathDiagnosticPiece *VisitNodeImpl(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR);
+
+ PathDiagnosticPiece *VisitTerminator(const Stmt *Term,
+ const ExplodedNode *N,
+ const CFGBlock *srcBlk,
+ const CFGBlock *dstBlk,
+ BugReport &R,
+ BugReporterContext &BRC);
+
+ PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
+ bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
+
+ PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
+ const DeclRefExpr *DR,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
+
+ PathDiagnosticPiece *VisitTrueTest(const Expr *Cond,
+ const BinaryOperator *BExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
+
+ PathDiagnosticPiece *VisitConditionVariable(StringRef LhsString,
+ const Expr *CondVarExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N);
+
+ bool patternMatch(const Expr *Ex,
+ llvm::raw_ostream &Out,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N,
+ llvm::Optional<bool> &prunable);
+};
+
+namespace bugreporter {
+
+BugReporterVisitor *getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
+ const Stmt *S,
+ BugReport *R);
+
+const Stmt *GetDerefExpr(const ExplodedNode *N);
+const Stmt *GetDenomExpr(const ExplodedNode *N);
+const Stmt *GetCalleeExpr(const ExplodedNode *N);
+const Stmt *GetRetValExpr(const ExplodedNode *N);
+
+} // end namespace clang
+} // end namespace ento
+} // end namespace bugreporter
+
+
+#endif //LLVM_CLANG_GR__BUGREPORTERVISITOR
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
new file mode 100644
index 0000000..cb49122
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/BugType.h
@@ -0,0 +1,67 @@
+//===--- BugType.h - Bug Information Desciption ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BugType, a class representing a bug type.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_BUGTYPE
+#define LLVM_CLANG_ANALYSIS_BUGTYPE
+
+#include "llvm/ADT/FoldingSet.h"
+#include <string>
+
+namespace clang {
+
+namespace ento {
+
+class BugReporter;
+class ExplodedNode;
+class ExprEngine;
+
+class BugType {
+private:
+ const std::string Name;
+ const std::string Category;
+ bool SuppressonSink;
+public:
+ BugType(StringRef name, StringRef cat)
+ : Name(name), Category(cat), SuppressonSink(false) {}
+ virtual ~BugType();
+
+ // FIXME: Should these be made strings as well?
+ StringRef getName() const { return Name; }
+ StringRef getCategory() const { return Category; }
+
+ /// isSuppressOnSink - Returns true if bug reports associated with this bug
+ /// type should be suppressed if the end node of the report is post-dominated
+ /// by a sink node.
+ bool isSuppressOnSink() const { return SuppressonSink; }
+ void setSuppressOnSink(bool x) { SuppressonSink = x; }
+
+ virtual void FlushReports(BugReporter& BR);
+};
+
+class BuiltinBug : public BugType {
+ virtual void anchor();
+ const std::string desc;
+public:
+ BuiltinBug(const char *name, const char *description)
+ : BugType(name, "Logic error"), desc(description) {}
+
+ BuiltinBug(const char *name)
+ : BugType(name, "Logic error"), desc(name) {}
+
+ StringRef getDescription() const { return desc; }
+};
+
+} // end GR namespace
+
+} // end clang namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
new file mode 100644
index 0000000..5a8a1c7
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h
@@ -0,0 +1,679 @@
+//===--- PathDiagnostic.h - Path-Specific Diagnostic Handling ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PathDiagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PATH_DIAGNOSTIC_H
+#define LLVM_CLANG_PATH_DIAGNOSTIC_H
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/Optional.h"
+#include <deque>
+#include <iterator>
+#include <string>
+#include <vector>
+
+namespace clang {
+
+class AnalysisDeclContext;
+class BinaryOperator;
+class CompoundStmt;
+class Decl;
+class LocationContext;
+class MemberExpr;
+class ParentMap;
+class ProgramPoint;
+class SourceManager;
+class Stmt;
+
+namespace ento {
+
+class ExplodedNode;
+class SymExpr;
+typedef const SymExpr* SymbolRef;
+
+//===----------------------------------------------------------------------===//
+// High-level interface for handlers of path-sensitive diagnostics.
+//===----------------------------------------------------------------------===//
+
+class PathDiagnostic;
+
+class PathDiagnosticConsumer {
+ virtual void anchor();
+public:
+ PathDiagnosticConsumer() : flushed(false) {}
+ virtual ~PathDiagnosticConsumer();
+
+ void FlushDiagnostics(SmallVectorImpl<std::string> *FilesMade);
+
+ virtual void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade)
+ = 0;
+
+ virtual StringRef getName() const = 0;
+
+ void HandlePathDiagnostic(PathDiagnostic *D);
+
+ enum PathGenerationScheme { Minimal, Extensive };
+ virtual PathGenerationScheme getGenerationScheme() const { return Minimal; }
+ virtual bool supportsLogicalOpControlFlow() const { return false; }
+ virtual bool supportsAllBlockEdges() const { return false; }
+ virtual bool useVerboseDescription() const { return true; }
+
+ /// Return true if the PathDiagnosticConsumer supports individual
+ /// PathDiagnostics that span multiple files.
+ virtual bool supportsCrossFileDiagnostics() const { return false; }
+
+protected:
+ bool flushed;
+ llvm::FoldingSet<PathDiagnostic> Diags;
+};
+
+//===----------------------------------------------------------------------===//
+// Path-sensitive diagnostics.
+//===----------------------------------------------------------------------===//
+
+class PathDiagnosticRange : public SourceRange {
+public:
+ bool isPoint;
+
+ PathDiagnosticRange(const SourceRange &R, bool isP = false)
+ : SourceRange(R), isPoint(isP) {}
+
+ PathDiagnosticRange() : isPoint(false) {}
+};
+
+typedef llvm::PointerUnion<const LocationContext*, AnalysisDeclContext*>
+ LocationOrAnalysisDeclContext;
+
+class PathDiagnosticLocation {
+private:
+ enum Kind { RangeK, SingleLocK, StmtK, DeclK } K;
+ const Stmt *S;
+ const Decl *D;
+ const SourceManager *SM;
+ FullSourceLoc Loc;
+ PathDiagnosticRange Range;
+
+ PathDiagnosticLocation(SourceLocation L, const SourceManager &sm,
+ Kind kind)
+ : K(kind), S(0), D(0), SM(&sm),
+ Loc(genLocation(L)), Range(genRange()) {
+ assert(Loc.isValid());
+ assert(Range.isValid());
+ }
+
+ FullSourceLoc
+ genLocation(SourceLocation L = SourceLocation(),
+ LocationOrAnalysisDeclContext LAC = (AnalysisDeclContext*)0) const;
+
+ PathDiagnosticRange
+ genRange(LocationOrAnalysisDeclContext LAC = (AnalysisDeclContext*)0) const;
+
+public:
+ /// Create an invalid location.
+ PathDiagnosticLocation()
+ : K(SingleLocK), S(0), D(0), SM(0) {}
+
+ /// Create a location corresponding to the given statement.
+ PathDiagnosticLocation(const Stmt *s,
+ const SourceManager &sm,
+ LocationOrAnalysisDeclContext lac)
+ : K(StmtK), S(s), D(0), SM(&sm),
+ Loc(genLocation(SourceLocation(), lac)),
+ Range(genRange(lac)) {
+ assert(S);
+ assert(Loc.isValid());
+ assert(Range.isValid());
+ }
+
+ /// Create a location corresponding to the given declaration.
+ PathDiagnosticLocation(const Decl *d, const SourceManager &sm)
+ : K(DeclK), S(0), D(d), SM(&sm),
+ Loc(genLocation()), Range(genRange()) {
+ assert(D);
+ assert(Loc.isValid());
+ assert(Range.isValid());
+ }
+
+ /// Create a location corresponding to the given declaration.
+ static PathDiagnosticLocation create(const Decl *D,
+ const SourceManager &SM) {
+ return PathDiagnosticLocation(D, SM);
+ }
+
+ /// Create a location for the beginning of the declaration.
+ static PathDiagnosticLocation createBegin(const Decl *D,
+ const SourceManager &SM);
+
+ /// Create a location for the beginning of the statement.
+ static PathDiagnosticLocation createBegin(const Stmt *S,
+ const SourceManager &SM,
+ const LocationOrAnalysisDeclContext LAC);
+
+ /// Create the location for the operator of the binary expression.
+ /// Assumes the statement has a valid location.
+ static PathDiagnosticLocation createOperatorLoc(const BinaryOperator *BO,
+ const SourceManager &SM);
+
+ /// For member expressions, return the location of the '.' or '->'.
+ /// Assumes the statement has a valid location.
+ static PathDiagnosticLocation createMemberLoc(const MemberExpr *ME,
+ const SourceManager &SM);
+
+ /// Create a location for the beginning of the compound statement.
+ /// Assumes the statement has a valid location.
+ static PathDiagnosticLocation createBeginBrace(const CompoundStmt *CS,
+ const SourceManager &SM);
+
+ /// Create a location for the end of the compound statement.
+ /// Assumes the statement has a valid location.
+ static PathDiagnosticLocation createEndBrace(const CompoundStmt *CS,
+ const SourceManager &SM);
+
+ /// Create a location for the beginning of the enclosing declaration body.
+ /// Defaults to the beginning of the first statement in the declaration body.
+ static PathDiagnosticLocation createDeclBegin(const LocationContext *LC,
+ const SourceManager &SM);
+
+ /// Constructs a location for the end of the enclosing declaration body.
+ /// Defaults to the end of brace.
+ static PathDiagnosticLocation createDeclEnd(const LocationContext *LC,
+ const SourceManager &SM);
+
+ /// Create a location corresponding to the given valid ExplodedNode.
+ static PathDiagnosticLocation create(const ProgramPoint& P,
+ const SourceManager &SMng);
+
+ /// Create a location corresponding to the next valid ExplodedNode as end
+ /// of path location.
+ static PathDiagnosticLocation createEndOfPath(const ExplodedNode* N,
+ const SourceManager &SM);
+
+ /// Convert the given location into a single kind location.
+ static PathDiagnosticLocation createSingleLocation(
+ const PathDiagnosticLocation &PDL);
+
+ bool operator==(const PathDiagnosticLocation &X) const {
+ return K == X.K && Loc == X.Loc && Range == X.Range;
+ }
+
+ bool operator!=(const PathDiagnosticLocation &X) const {
+ return !(*this == X);
+ }
+
+ bool isValid() const {
+ return SM != 0;
+ }
+
+ FullSourceLoc asLocation() const {
+ return Loc;
+ }
+
+ PathDiagnosticRange asRange() const {
+ return Range;
+ }
+
+ const Stmt *asStmt() const { assert(isValid()); return S; }
+ const Decl *asDecl() const { assert(isValid()); return D; }
+
+ bool hasRange() const { return K == StmtK || K == RangeK || K == DeclK; }
+
+ void invalidate() {
+ *this = PathDiagnosticLocation();
+ }
+
+ void flatten();
+
+ const SourceManager& getManager() const { assert(isValid()); return *SM; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+};
+
+class PathDiagnosticLocationPair {
+private:
+ PathDiagnosticLocation Start, End;
+public:
+ PathDiagnosticLocationPair(const PathDiagnosticLocation &start,
+ const PathDiagnosticLocation &end)
+ : Start(start), End(end) {}
+
+ const PathDiagnosticLocation &getStart() const { return Start; }
+ const PathDiagnosticLocation &getEnd() const { return End; }
+
+ void flatten() {
+ Start.flatten();
+ End.flatten();
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Start.Profile(ID);
+ End.Profile(ID);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Path "pieces" for path-sensitive diagnostics.
+//===----------------------------------------------------------------------===//
+
+class PathDiagnosticPiece : public RefCountedBaseVPTR {
+public:
+ enum Kind { ControlFlow, Event, Macro, Call };
+ enum DisplayHint { Above, Below };
+
+private:
+ const std::string str;
+ const Kind kind;
+ const DisplayHint Hint;
+ std::vector<SourceRange> ranges;
+
+ // Do not implement:
+ PathDiagnosticPiece();
+ PathDiagnosticPiece(const PathDiagnosticPiece &P);
+ PathDiagnosticPiece& operator=(const PathDiagnosticPiece &P);
+
+protected:
+ PathDiagnosticPiece(StringRef s, Kind k, DisplayHint hint = Below);
+
+ PathDiagnosticPiece(Kind k, DisplayHint hint = Below);
+
+public:
+ virtual ~PathDiagnosticPiece();
+
+ const std::string& getString() const { return str; }
+
+ /// getDisplayHint - Return a hint indicating where the diagnostic should
+ /// be displayed by the PathDiagnosticConsumer.
+ DisplayHint getDisplayHint() const { return Hint; }
+
+ virtual PathDiagnosticLocation getLocation() const = 0;
+ virtual void flattenLocations() = 0;
+
+ Kind getKind() const { return kind; }
+
+ void addRange(SourceRange R) {
+ if (!R.isValid())
+ return;
+ ranges.push_back(R);
+ }
+
+ void addRange(SourceLocation B, SourceLocation E) {
+ if (!B.isValid() || !E.isValid())
+ return;
+ ranges.push_back(SourceRange(B,E));
+ }
+
+ typedef const SourceRange* range_iterator;
+
+ range_iterator ranges_begin() const {
+ return ranges.empty() ? NULL : &ranges[0];
+ }
+
+ range_iterator ranges_end() const {
+ return ranges_begin() + ranges.size();
+ }
+
+ static inline bool classof(const PathDiagnosticPiece *P) {
+ return true;
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const;
+};
+
+
+class PathPieces :
+ public std::deque<IntrusiveRefCntPtr<PathDiagnosticPiece> > {
+public:
+ ~PathPieces();
+};
+
+class PathDiagnosticSpotPiece : public PathDiagnosticPiece {
+private:
+ PathDiagnosticLocation Pos;
+public:
+ PathDiagnosticSpotPiece(const PathDiagnosticLocation &pos,
+ StringRef s,
+ PathDiagnosticPiece::Kind k,
+ bool addPosRange = true)
+ : PathDiagnosticPiece(s, k), Pos(pos) {
+ assert(Pos.isValid() && Pos.asLocation().isValid() &&
+ "PathDiagnosticSpotPiece's must have a valid location.");
+ if (addPosRange && Pos.hasRange()) addRange(Pos.asRange());
+ }
+
+ PathDiagnosticLocation getLocation() const { return Pos; }
+ virtual void flattenLocations() { Pos.flatten(); }
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const;
+};
+
+/// \brief Interface for classes constructing Stack hints.
+///
+/// If a PathDiagnosticEvent occurs in a different frame than the final
+/// diagnostic the hints can be used to summarise the effect of the call.
+class StackHintGenerator {
+public:
+ virtual ~StackHintGenerator() = 0;
+
+ /// \brief Construct the Diagnostic message for the given ExplodedNode.
+ virtual std::string getMessage(const ExplodedNode *N) = 0;
+};
+
+/// \brief Constructs a Stack hint for the given symbol.
+///
+/// The class knows how to construct the stack hint message based on
+/// traversing the CallExpr associated with the call and checking if the given
+/// symbol is returned or is one of the arguments.
+/// The hint can be customized by redefining 'getMessageForX()' methods.
+class StackHintGeneratorForSymbol : public StackHintGenerator {
+private:
+ SymbolRef Sym;
+ std::string Msg;
+
+public:
+ StackHintGeneratorForSymbol(SymbolRef S, StringRef M) : Sym(S), Msg(M) {}
+ virtual ~StackHintGeneratorForSymbol() {}
+
+ /// \brief Search the call expression for the symbol Sym and dispatch the
+ /// 'getMessageForX()' methods to construct a specific message.
+ virtual std::string getMessage(const ExplodedNode *N);
+
+ /// Prints the ordinal form of the given integer,
+ /// only valid for ValNo : ValNo > 0.
+ void printOrdinal(unsigned ValNo, llvm::raw_svector_ostream &Out);
+
+ /// Produces the message of the following form:
+ /// 'Msg via Nth parameter'
+ virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex);
+ virtual std::string getMessageForReturn(const CallExpr *CallExpr) {
+ return Msg;
+ }
+ virtual std::string getMessageForSymbolNotFound() {
+ return Msg;
+ }
+};
+
+class PathDiagnosticEventPiece : public PathDiagnosticSpotPiece {
+ llvm::Optional<bool> IsPrunable;
+
+ /// If the event occurs in a different frame than the final diagnostic,
+ /// supply a message that will be used to construct an extra hint on the
+ /// returns from all the calls on the stack from this event to the final
+ /// diagnostic.
+ llvm::OwningPtr<StackHintGenerator> CallStackHint;
+
+public:
+ PathDiagnosticEventPiece(const PathDiagnosticLocation &pos,
+ StringRef s, bool addPosRange = true,
+ StackHintGenerator *stackHint = 0)
+ : PathDiagnosticSpotPiece(pos, s, Event, addPosRange),
+ CallStackHint(stackHint) {}
+
+ ~PathDiagnosticEventPiece();
+
+ /// Mark the diagnostic piece as being potentially prunable. This
+ /// flag may have been previously set, at which point it will not
+ /// be reset unless one specifies to do so.
+ void setPrunable(bool isPrunable, bool override = false) {
+ if (IsPrunable.hasValue() && !override)
+ return;
+ IsPrunable = isPrunable;
+ }
+
+ /// Return true if the diagnostic piece is prunable.
+ bool isPrunable() const {
+ return IsPrunable.hasValue() ? IsPrunable.getValue() : false;
+ }
+
+ bool hasCallStackHint() {
+ return (CallStackHint != 0);
+ }
+
+ /// Produce the hint for the given node. The node contains
+ /// information about the call for which the diagnostic can be generated.
+ std::string getCallStackMessage(const ExplodedNode *N) {
+ if (CallStackHint)
+ return CallStackHint->getMessage(N);
+ return "";
+ }
+
+ static inline bool classof(const PathDiagnosticPiece *P) {
+ return P->getKind() == Event;
+ }
+};
+
+class PathDiagnosticCallPiece : public PathDiagnosticPiece {
+ PathDiagnosticCallPiece(const Decl *callerD,
+ const PathDiagnosticLocation &callReturnPos)
+ : PathDiagnosticPiece(Call), Caller(callerD), Callee(0),
+ NoExit(false), callReturn(callReturnPos) {}
+
+ PathDiagnosticCallPiece(PathPieces &oldPath, const Decl *caller)
+ : PathDiagnosticPiece(Call), Caller(caller), Callee(0),
+ NoExit(true), path(oldPath) {}
+
+ const Decl *Caller;
+ const Decl *Callee;
+
+ // Flag signifying that this diagnostic has only call enter and no matching
+ // call exit.
+ bool NoExit;
+
+ // The custom string, which should appear after the call Return Diagnostic.
+ // TODO: Should we allow multiple diagnostics?
+ std::string CallStackMessage;
+
+public:
+ PathDiagnosticLocation callEnter;
+ PathDiagnosticLocation callEnterWithin;
+ PathDiagnosticLocation callReturn;
+ PathPieces path;
+
+ virtual ~PathDiagnosticCallPiece();
+
+ const Decl *getCaller() const { return Caller; }
+
+ const Decl *getCallee() const { return Callee; }
+ void setCallee(const CallEnter &CE, const SourceManager &SM);
+
+ bool hasCallStackMessage() { return !CallStackMessage.empty(); }
+ void setCallStackMessage(StringRef st) {
+ CallStackMessage = st;
+ }
+
+ virtual PathDiagnosticLocation getLocation() const {
+ return callEnter;
+ }
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> getCallEnterEvent() const;
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+ getCallEnterWithinCallerEvent() const;
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> getCallExitEvent() const;
+
+ virtual void flattenLocations() {
+ callEnter.flatten();
+ callReturn.flatten();
+ for (PathPieces::iterator I = path.begin(),
+ E = path.end(); I != E; ++I) (*I)->flattenLocations();
+ }
+
+ static PathDiagnosticCallPiece *construct(const ExplodedNode *N,
+ const CallExit &CE,
+ const SourceManager &SM);
+
+ static PathDiagnosticCallPiece *construct(PathPieces &pieces,
+ const Decl *caller);
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ static inline bool classof(const PathDiagnosticPiece *P) {
+ return P->getKind() == Call;
+ }
+};
+
+class PathDiagnosticControlFlowPiece : public PathDiagnosticPiece {
+ std::vector<PathDiagnosticLocationPair> LPairs;
+public:
+ PathDiagnosticControlFlowPiece(const PathDiagnosticLocation &startPos,
+ const PathDiagnosticLocation &endPos,
+ StringRef s)
+ : PathDiagnosticPiece(s, ControlFlow) {
+ LPairs.push_back(PathDiagnosticLocationPair(startPos, endPos));
+ }
+
+ PathDiagnosticControlFlowPiece(const PathDiagnosticLocation &startPos,
+ const PathDiagnosticLocation &endPos)
+ : PathDiagnosticPiece(ControlFlow) {
+ LPairs.push_back(PathDiagnosticLocationPair(startPos, endPos));
+ }
+
+ ~PathDiagnosticControlFlowPiece();
+
+ PathDiagnosticLocation getStartLocation() const {
+ assert(!LPairs.empty() &&
+ "PathDiagnosticControlFlowPiece needs at least one location.");
+ return LPairs[0].getStart();
+ }
+
+ PathDiagnosticLocation getEndLocation() const {
+ assert(!LPairs.empty() &&
+ "PathDiagnosticControlFlowPiece needs at least one location.");
+ return LPairs[0].getEnd();
+ }
+
+ void push_back(const PathDiagnosticLocationPair &X) { LPairs.push_back(X); }
+
+ virtual PathDiagnosticLocation getLocation() const {
+ return getStartLocation();
+ }
+
+ typedef std::vector<PathDiagnosticLocationPair>::iterator iterator;
+ iterator begin() { return LPairs.begin(); }
+ iterator end() { return LPairs.end(); }
+
+ virtual void flattenLocations() {
+ for (iterator I=begin(), E=end(); I!=E; ++I) I->flatten();
+ }
+
+ typedef std::vector<PathDiagnosticLocationPair>::const_iterator
+ const_iterator;
+ const_iterator begin() const { return LPairs.begin(); }
+ const_iterator end() const { return LPairs.end(); }
+
+ static inline bool classof(const PathDiagnosticPiece *P) {
+ return P->getKind() == ControlFlow;
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const;
+};
+
+class PathDiagnosticMacroPiece : public PathDiagnosticSpotPiece {
+public:
+ PathDiagnosticMacroPiece(const PathDiagnosticLocation &pos)
+ : PathDiagnosticSpotPiece(pos, "", Macro) {}
+
+ ~PathDiagnosticMacroPiece();
+
+ PathPieces subPieces;
+
+ bool containsEvent() const;
+
+ virtual void flattenLocations() {
+ PathDiagnosticSpotPiece::flattenLocations();
+ for (PathPieces::iterator I = subPieces.begin(),
+ E = subPieces.end(); I != E; ++I) (*I)->flattenLocations();
+ }
+
+ static inline bool classof(const PathDiagnosticPiece *P) {
+ return P->getKind() == Macro;
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const;
+};
+
+/// PathDiagnostic - PathDiagnostic objects represent a single path-sensitive
+/// diagnostic. It represents an ordered-collection of PathDiagnosticPieces,
+/// each which represent the pieces of the path.
+class PathDiagnostic : public llvm::FoldingSetNode {
+ const Decl *DeclWithIssue;
+ std::string BugType;
+ std::string Desc;
+ std::string Category;
+ std::deque<std::string> OtherDesc;
+ PathPieces pathImpl;
+ llvm::SmallVector<PathPieces *, 3> pathStack;
+
+ PathDiagnostic(); // Do not implement.
+public:
+ const PathPieces &path;
+
+ /// Return the path currently used by builders for constructing the
+ /// PathDiagnostic.
+ PathPieces &getActivePath() {
+ if (pathStack.empty())
+ return pathImpl;
+ return *pathStack.back();
+ }
+
+ /// Return a mutable version of 'path'.
+ PathPieces &getMutablePieces() {
+ return pathImpl;
+ }
+
+ /// Return the unrolled size of the path.
+ unsigned full_size();
+
+ void pushActivePath(PathPieces *p) { pathStack.push_back(p); }
+ void popActivePath() { if (!pathStack.empty()) pathStack.pop_back(); }
+
+ // PathDiagnostic();
+ PathDiagnostic(const Decl *DeclWithIssue,
+ StringRef bugtype,
+ StringRef desc,
+ StringRef category);
+
+ ~PathDiagnostic();
+
+ StringRef getDescription() const { return Desc; }
+ StringRef getBugType() const { return BugType; }
+ StringRef getCategory() const { return Category; }
+
+ /// Return the semantic context where an issue occurred. If the
+ /// issue occurs along a path, this represents the "central" area
+ /// where the bug manifests.
+ const Decl *getDeclWithIssue() const { return DeclWithIssue; }
+
+ typedef std::deque<std::string>::const_iterator meta_iterator;
+ meta_iterator meta_begin() const { return OtherDesc.begin(); }
+ meta_iterator meta_end() const { return OtherDesc.end(); }
+ void addMeta(StringRef s) { OtherDesc.push_back(s); }
+
+ PathDiagnosticLocation getLocation() const;
+
+ void flattenLocations() {
+ for (PathPieces::iterator I = pathImpl.begin(), E = pathImpl.end();
+ I != E; ++I) (*I)->flattenLocations();
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ void FullProfile(llvm::FoldingSetNodeID &ID) const;
+};
+
+} // end GR namespace
+
+} //end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
new file mode 100644
index 0000000..76d8c15
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/Checker.h
@@ -0,0 +1,441 @@
+//== Checker.h - Registration mechanism for checkers -------------*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines Checker, used to create and register checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_CORE_CHECKER
+#define LLVM_CLANG_SA_CORE_CHECKER
+
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/Support/Casting.h"
+
+namespace clang {
+namespace ento {
+ class BugReporter;
+
+namespace check {
+
+struct _VoidCheck {
+ static void _register(void *checker, CheckerManager &mgr) { }
+};
+
+template <typename DECL>
+class ASTDecl {
+ template <typename CHECKER>
+ static void _checkDecl(void *checker, const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ ((const CHECKER *)checker)->checkASTDecl(llvm::cast<DECL>(D), mgr, BR);
+ }
+
+ static bool _handlesDecl(const Decl *D) {
+ return llvm::isa<DECL>(D);
+ }
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForDecl(CheckerManager::CheckDeclFunc(checker,
+ _checkDecl<CHECKER>),
+ _handlesDecl);
+ }
+};
+
+class ASTCodeBody {
+ template <typename CHECKER>
+ static void _checkBody(void *checker, const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ ((const CHECKER *)checker)->checkASTCodeBody(D, mgr, BR);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForBody(CheckerManager::CheckDeclFunc(checker,
+ _checkBody<CHECKER>));
+ }
+};
+
+class EndOfTranslationUnit {
+ template <typename CHECKER>
+ static void _checkEndOfTranslationUnit(void *checker,
+ const TranslationUnitDecl *TU,
+ AnalysisManager& mgr,
+ BugReporter &BR) {
+ ((const CHECKER *)checker)->checkEndOfTranslationUnit(TU, mgr, BR);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr){
+ mgr._registerForEndOfTranslationUnit(
+ CheckerManager::CheckEndOfTranslationUnit(checker,
+ _checkEndOfTranslationUnit<CHECKER>));
+ }
+};
+
+template <typename STMT>
+class PreStmt {
+ template <typename CHECKER>
+ static void _checkStmt(void *checker, const Stmt *S, CheckerContext &C) {
+ ((const CHECKER *)checker)->checkPreStmt(llvm::cast<STMT>(S), C);
+ }
+
+ static bool _handlesStmt(const Stmt *S) {
+ return llvm::isa<STMT>(S);
+ }
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForPreStmt(CheckerManager::CheckStmtFunc(checker,
+ _checkStmt<CHECKER>),
+ _handlesStmt);
+ }
+};
+
+template <typename STMT>
+class PostStmt {
+ template <typename CHECKER>
+ static void _checkStmt(void *checker, const Stmt *S, CheckerContext &C) {
+ ((const CHECKER *)checker)->checkPostStmt(llvm::cast<STMT>(S), C);
+ }
+
+ static bool _handlesStmt(const Stmt *S) {
+ return llvm::isa<STMT>(S);
+ }
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForPostStmt(CheckerManager::CheckStmtFunc(checker,
+ _checkStmt<CHECKER>),
+ _handlesStmt);
+ }
+};
+
+class PreObjCMessage {
+ template <typename CHECKER>
+ static void _checkObjCMessage(void *checker, const ObjCMessage &msg,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkPreObjCMessage(msg, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForPreObjCMessage(
+ CheckerManager::CheckObjCMessageFunc(checker, _checkObjCMessage<CHECKER>));
+ }
+};
+
+class PostObjCMessage {
+ template <typename CHECKER>
+ static void _checkObjCMessage(void *checker, const ObjCMessage &msg,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkPostObjCMessage(msg, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForPostObjCMessage(
+ CheckerManager::CheckObjCMessageFunc(checker, _checkObjCMessage<CHECKER>));
+ }
+};
+
+class Location {
+ template <typename CHECKER>
+ static void _checkLocation(void *checker,
+ const SVal &location, bool isLoad, const Stmt *S,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkLocation(location, isLoad, S, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForLocation(
+ CheckerManager::CheckLocationFunc(checker, _checkLocation<CHECKER>));
+ }
+};
+
+class Bind {
+ template <typename CHECKER>
+ static void _checkBind(void *checker,
+ const SVal &location, const SVal &val, const Stmt *S,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkBind(location, val, S, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForBind(
+ CheckerManager::CheckBindFunc(checker, _checkBind<CHECKER>));
+ }
+};
+
+class EndAnalysis {
+ template <typename CHECKER>
+ static void _checkEndAnalysis(void *checker, ExplodedGraph &G,
+ BugReporter &BR, ExprEngine &Eng) {
+ ((const CHECKER *)checker)->checkEndAnalysis(G, BR, Eng);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForEndAnalysis(
+ CheckerManager::CheckEndAnalysisFunc(checker, _checkEndAnalysis<CHECKER>));
+ }
+};
+
+class EndPath {
+ template <typename CHECKER>
+ static void _checkEndPath(void *checker,
+ CheckerContext &C) {
+ ((const CHECKER *)checker)->checkEndPath(C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForEndPath(
+ CheckerManager::CheckEndPathFunc(checker, _checkEndPath<CHECKER>));
+ }
+};
+
+class BranchCondition {
+ template <typename CHECKER>
+ static void _checkBranchCondition(void *checker, const Stmt *Condition,
+ CheckerContext & C) {
+ ((const CHECKER *)checker)->checkBranchCondition(Condition, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForBranchCondition(
+ CheckerManager::CheckBranchConditionFunc(checker,
+ _checkBranchCondition<CHECKER>));
+ }
+};
+
+class LiveSymbols {
+ template <typename CHECKER>
+ static void _checkLiveSymbols(void *checker, ProgramStateRef state,
+ SymbolReaper &SR) {
+ ((const CHECKER *)checker)->checkLiveSymbols(state, SR);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForLiveSymbols(
+ CheckerManager::CheckLiveSymbolsFunc(checker, _checkLiveSymbols<CHECKER>));
+ }
+};
+
+class DeadSymbols {
+ template <typename CHECKER>
+ static void _checkDeadSymbols(void *checker,
+ SymbolReaper &SR, CheckerContext &C) {
+ ((const CHECKER *)checker)->checkDeadSymbols(SR, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForDeadSymbols(
+ CheckerManager::CheckDeadSymbolsFunc(checker, _checkDeadSymbols<CHECKER>));
+ }
+};
+
+class RegionChanges {
+ template <typename CHECKER>
+ static ProgramStateRef
+ _checkRegionChanges(void *checker,
+ ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> Explicits,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) {
+ return ((const CHECKER *)checker)->checkRegionChanges(state, invalidated,
+ Explicits, Regions, Call);
+ }
+ template <typename CHECKER>
+ static bool _wantsRegionChangeUpdate(void *checker,
+ ProgramStateRef state) {
+ return ((const CHECKER *)checker)->wantsRegionChangeUpdate(state);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForRegionChanges(
+ CheckerManager::CheckRegionChangesFunc(checker,
+ _checkRegionChanges<CHECKER>),
+ CheckerManager::WantsRegionChangeUpdateFunc(checker,
+ _wantsRegionChangeUpdate<CHECKER>));
+ }
+};
+
+template <typename EVENT>
+class Event {
+ template <typename CHECKER>
+ static void _checkEvent(void *checker, const void *event) {
+ ((const CHECKER *)checker)->checkEvent(*(const EVENT *)event);
+ }
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerListenerForEvent<EVENT>(
+ CheckerManager::CheckEventFunc(checker, _checkEvent<CHECKER>));
+ }
+};
+
+} // end check namespace
+
+namespace eval {
+
+class Assume {
+ template <typename CHECKER>
+ static ProgramStateRef _evalAssume(void *checker,
+ ProgramStateRef state,
+ const SVal &cond,
+ bool assumption) {
+ return ((const CHECKER *)checker)->evalAssume(state, cond, assumption);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForEvalAssume(
+ CheckerManager::EvalAssumeFunc(checker, _evalAssume<CHECKER>));
+ }
+};
+
+class Call {
+ template <typename CHECKER>
+ static bool _evalCall(void *checker, const CallExpr *CE, CheckerContext &C) {
+ return ((const CHECKER *)checker)->evalCall(CE, C);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForEvalCall(
+ CheckerManager::EvalCallFunc(checker, _evalCall<CHECKER>));
+ }
+};
+
+class InlineCall {
+ template <typename CHECKER>
+ static bool _inlineCall(void *checker, const CallExpr *CE,
+ ExprEngine &Eng,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ return ((const CHECKER *)checker)->inlineCall(CE, Eng, Pred, Dst);
+ }
+
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerForInlineCall(
+ CheckerManager::InlineCallFunc(checker, _inlineCall<CHECKER>));
+ }
+};
+
+} // end eval namespace
+
+class CheckerBase : public ProgramPointTag {
+public:
+ StringRef getTagDescription() const;
+
+ /// See CheckerManager::runCheckersForPrintState.
+ virtual void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const { }
+};
+
+template <typename CHECK1, typename CHECK2=check::_VoidCheck,
+ typename CHECK3=check::_VoidCheck, typename CHECK4=check::_VoidCheck,
+ typename CHECK5=check::_VoidCheck, typename CHECK6=check::_VoidCheck,
+ typename CHECK7=check::_VoidCheck, typename CHECK8=check::_VoidCheck,
+ typename CHECK9=check::_VoidCheck, typename CHECK10=check::_VoidCheck,
+ typename CHECK11=check::_VoidCheck,typename CHECK12=check::_VoidCheck,
+ typename CHECK13=check::_VoidCheck,typename CHECK14=check::_VoidCheck,
+ typename CHECK15=check::_VoidCheck,typename CHECK16=check::_VoidCheck,
+ typename CHECK17=check::_VoidCheck,typename CHECK18=check::_VoidCheck>
+class Checker;
+
+template <>
+class Checker<check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck>
+ : public CheckerBase
+{
+ virtual void anchor();
+public:
+ static void _register(void *checker, CheckerManager &mgr) { }
+};
+
+template <typename CHECK1, typename CHECK2, typename CHECK3, typename CHECK4,
+ typename CHECK5, typename CHECK6, typename CHECK7, typename CHECK8,
+ typename CHECK9, typename CHECK10,typename CHECK11,typename CHECK12,
+ typename CHECK13,typename CHECK14,typename CHECK15,typename CHECK16,
+ typename CHECK17,typename CHECK18>
+class Checker
+ : public CHECK1,
+ public Checker<CHECK2, CHECK3, CHECK4, CHECK5, CHECK6, CHECK7, CHECK8,
+ CHECK9, CHECK10,CHECK11,CHECK12,CHECK13,CHECK14,CHECK15,
+ CHECK16,CHECK17,CHECK18> {
+public:
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ CHECK1::_register(checker, mgr);
+ Checker<CHECK2, CHECK3, CHECK4, CHECK5, CHECK6, CHECK7, CHECK8,
+ CHECK9, CHECK10,CHECK11,CHECK12,CHECK13,CHECK14,CHECK15,
+ CHECK16,CHECK17,CHECK18>::_register(checker, mgr);
+ }
+};
+
+template <typename EVENT>
+class EventDispatcher {
+ CheckerManager *Mgr;
+public:
+ EventDispatcher() : Mgr(0) { }
+
+ template <typename CHECKER>
+ static void _register(CHECKER *checker, CheckerManager &mgr) {
+ mgr._registerDispatcherForEvent<EVENT>();
+ static_cast<EventDispatcher<EVENT> *>(checker)->Mgr = &mgr;
+ }
+
+ void dispatchEvent(const EVENT &event) const {
+ Mgr->_dispatchEvent(event);
+ }
+};
+
+/// \brief We dereferenced a location that may be null.
+struct ImplicitNullDerefEvent {
+ SVal Location;
+ bool IsLoad;
+ ExplodedNode *SinkNode;
+ BugReporter *BR;
+};
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
new file mode 100644
index 0000000..d215f99
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerManager.h
@@ -0,0 +1,594 @@
+//===--- CheckerManager.h - Static Analyzer Checker Manager -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Manager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_CORE_CHECKERMANAGER_H
+#define LLVM_CLANG_SA_CORE_CHECKERMANAGER_H
+
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include <vector>
+
+namespace clang {
+ class Decl;
+ class Stmt;
+ class CallExpr;
+
+namespace ento {
+ class CheckerBase;
+ class ExprEngine;
+ class AnalysisManager;
+ class BugReporter;
+ class CheckerContext;
+ class ObjCMessage;
+ class SVal;
+ class ExplodedNode;
+ class ExplodedNodeSet;
+ class ExplodedGraph;
+ class ProgramState;
+ class NodeBuilder;
+ struct NodeBuilderContext;
+ class MemRegion;
+ class SymbolReaper;
+
+class GraphExpander {
+public:
+ virtual ~GraphExpander();
+ virtual void expandGraph(ExplodedNodeSet &Dst, ExplodedNode *Pred) = 0;
+};
+
+template <typename T> class CheckerFn;
+
+template <typename RET, typename P1, typename P2, typename P3, typename P4,
+ typename P5>
+class CheckerFn<RET(P1, P2, P3, P4, P5)> {
+ typedef RET (*Func)(void *, P1, P2, P3, P4, P5);
+ Func Fn;
+public:
+ CheckerBase *Checker;
+ CheckerFn(CheckerBase *checker, Func fn) : Fn(fn), Checker(checker) { }
+ RET operator()(P1 p1, P2 p2, P3 p3, P4 p4, P5 p5) const {
+ return Fn(Checker, p1, p2, p3, p4, p5);
+ }
+};
+
+template <typename RET, typename P1, typename P2, typename P3, typename P4>
+class CheckerFn<RET(P1, P2, P3, P4)> {
+ typedef RET (*Func)(void *, P1, P2, P3, P4);
+ Func Fn;
+public:
+ CheckerBase *Checker;
+ CheckerFn(CheckerBase *checker, Func fn) : Fn(fn), Checker(checker) { }
+ RET operator()(P1 p1, P2 p2, P3 p3, P4 p4) const {
+ return Fn(Checker, p1, p2, p3, p4);
+ }
+};
+
+template <typename RET, typename P1, typename P2, typename P3>
+class CheckerFn<RET(P1, P2, P3)> {
+ typedef RET (*Func)(void *, P1, P2, P3);
+ Func Fn;
+public:
+ CheckerBase *Checker;
+ CheckerFn(CheckerBase *checker, Func fn) : Fn(fn), Checker(checker) { }
+ RET operator()(P1 p1, P2 p2, P3 p3) const { return Fn(Checker, p1, p2, p3); }
+};
+
+template <typename RET, typename P1, typename P2>
+class CheckerFn<RET(P1, P2)> {
+ typedef RET (*Func)(void *, P1, P2);
+ Func Fn;
+public:
+ CheckerBase *Checker;
+ CheckerFn(CheckerBase *checker, Func fn) : Fn(fn), Checker(checker) { }
+ RET operator()(P1 p1, P2 p2) const { return Fn(Checker, p1, p2); }
+};
+
+template <typename RET, typename P1>
+class CheckerFn<RET(P1)> {
+ typedef RET (*Func)(void *, P1);
+ Func Fn;
+public:
+ CheckerBase *Checker;
+ CheckerFn(CheckerBase *checker, Func fn) : Fn(fn), Checker(checker) { }
+ RET operator()(P1 p1) const { return Fn(Checker, p1); }
+};
+
+template <typename RET>
+class CheckerFn<RET()> {
+ typedef RET (*Func)(void *);
+ Func Fn;
+public:
+ CheckerBase *Checker;
+ CheckerFn(CheckerBase *checker, Func fn) : Fn(fn), Checker(checker) { }
+ RET operator()() const { return Fn(Checker); }
+};
+
+class CheckerManager {
+ const LangOptions LangOpts;
+
+public:
+ CheckerManager(const LangOptions &langOpts) : LangOpts(langOpts) { }
+ ~CheckerManager();
+
+ bool hasPathSensitiveCheckers() const;
+
+ void finishedCheckerRegistration();
+
+ const LangOptions &getLangOpts() const { return LangOpts; }
+
+ typedef CheckerBase *CheckerRef;
+ typedef const void *CheckerTag;
+ typedef CheckerFn<void ()> CheckerDtor;
+
+//===----------------------------------------------------------------------===//
+// registerChecker
+//===----------------------------------------------------------------------===//
+
+ /// \brief Used to register checkers.
+ ///
+ /// \returns a pointer to the checker object.
+ template <typename CHECKER>
+ CHECKER *registerChecker() {
+ CheckerTag tag = getTag<CHECKER>();
+ CheckerRef &ref = CheckerTags[tag];
+ if (ref)
+ return static_cast<CHECKER *>(ref); // already registered.
+
+ CHECKER *checker = new CHECKER();
+ CheckerDtors.push_back(CheckerDtor(checker, destruct<CHECKER>));
+ CHECKER::_register(checker, *this);
+ ref = checker;
+ return checker;
+ }
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers for AST traversing..
+//===----------------------------------------------------------------------===//
+
+ /// \brief Run checkers handling Decls.
+ void runCheckersOnASTDecl(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR);
+
+ /// \brief Run checkers handling Decls containing a Stmt body.
+ void runCheckersOnASTBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR);
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers for path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+ /// \brief Run checkers for pre-visiting Stmts.
+ ///
+ /// The notification is performed for every explored CFGElement, which does
+ /// not include the control flow statements such as IfStmt.
+ ///
+ /// \sa runCheckersForBranchCondition, runCheckersForPostStmt
+ void runCheckersForPreStmt(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const Stmt *S,
+ ExprEngine &Eng) {
+ runCheckersForStmt(/*isPreVisit=*/true, Dst, Src, S, Eng);
+ }
+
+ /// \brief Run checkers for post-visiting Stmts.
+ ///
+ /// The notification is performed for every explored CFGElement, which does
+ /// not include the control flow statements such as IfStmt.
+ ///
+ /// \sa runCheckersForBranchCondition, runCheckersForPreStmt
+ void runCheckersForPostStmt(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const Stmt *S,
+ ExprEngine &Eng,
+ bool wasInlined = false) {
+ runCheckersForStmt(/*isPreVisit=*/false, Dst, Src, S, Eng, wasInlined);
+ }
+
+ /// \brief Run checkers for visiting Stmts.
+ void runCheckersForStmt(bool isPreVisit,
+ ExplodedNodeSet &Dst, const ExplodedNodeSet &Src,
+ const Stmt *S, ExprEngine &Eng,
+ bool wasInlined = false);
+
+ /// \brief Run checkers for pre-visiting obj-c messages.
+ void runCheckersForPreObjCMessage(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const ObjCMessage &msg,
+ ExprEngine &Eng) {
+ runCheckersForObjCMessage(/*isPreVisit=*/true, Dst, Src, msg, Eng);
+ }
+
+ /// \brief Run checkers for post-visiting obj-c messages.
+ void runCheckersForPostObjCMessage(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const ObjCMessage &msg,
+ ExprEngine &Eng) {
+ runCheckersForObjCMessage(/*isPreVisit=*/false, Dst, Src, msg, Eng);
+ }
+
+ /// \brief Run checkers for visiting obj-c messages.
+ void runCheckersForObjCMessage(bool isPreVisit,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const ObjCMessage &msg, ExprEngine &Eng);
+
+ /// \brief Run checkers for load/store of a location.
+ void runCheckersForLocation(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SVal location,
+ bool isLoad,
+ const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExprEngine &Eng);
+
+ /// \brief Run checkers for binding of a value to a location.
+ void runCheckersForBind(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SVal location, SVal val,
+ const Stmt *S, ExprEngine &Eng,
+ ProgramPoint::Kind PointKind);
+
+ /// \brief Run checkers for end of analysis.
+ void runCheckersForEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+ ExprEngine &Eng);
+
+ /// \brief Run checkers for end of path.
+ void runCheckersForEndPath(NodeBuilderContext &BC,
+ ExplodedNodeSet &Dst,
+ ExprEngine &Eng);
+
+ /// \brief Run checkers for branch condition.
+ void runCheckersForBranchCondition(const Stmt *condition,
+ ExplodedNodeSet &Dst, ExplodedNode *Pred,
+ ExprEngine &Eng);
+
+ /// \brief Run checkers for live symbols.
+ ///
+ /// Allows modifying SymbolReaper object. For example, checkers can explicitly
+ /// register symbols of interest as live. These symbols will not be marked
+ /// dead and removed.
+ void runCheckersForLiveSymbols(ProgramStateRef state,
+ SymbolReaper &SymReaper);
+
+ /// \brief Run checkers for dead symbols.
+ ///
+ /// Notifies checkers when symbols become dead. For example, this allows
+ /// checkers to aggressively clean up/reduce the checker state and produce
+ /// precise diagnostics.
+ void runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SymbolReaper &SymReaper, const Stmt *S,
+ ExprEngine &Eng);
+
+ /// \brief True if at least one checker wants to check region changes.
+ bool wantsRegionChangeUpdate(ProgramStateRef state);
+
+ /// \brief Run checkers for region changes.
+ ///
+ /// This corresponds to the check::RegionChanges callback.
+ /// \param state The current program state.
+ /// \param invalidated A set of all symbols potentially touched by the change.
+ /// \param ExplicitRegions The regions explicitly requested for invalidation.
+ /// For example, in the case of a function call, these would be arguments.
+ /// \param Regions The transitive closure of accessible regions,
+ /// i.e. all regions that may have been touched by this change.
+ /// \param The call expression wrapper if the regions are invalidated by a
+ /// call.
+ ProgramStateRef
+ runCheckersForRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call);
+
+ /// \brief Run checkers for handling assumptions on symbolic values.
+ ProgramStateRef runCheckersForEvalAssume(ProgramStateRef state,
+ SVal Cond, bool Assumption);
+
+ /// \brief Run checkers for evaluating a call.
+ void runCheckersForEvalCall(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const CallExpr *CE, ExprEngine &Eng,
+ GraphExpander *defaultEval = 0);
+
+ /// \brief Run checkers for the entire Translation Unit.
+ void runCheckersOnEndOfTranslationUnit(const TranslationUnitDecl *TU,
+ AnalysisManager &mgr,
+ BugReporter &BR);
+
+ /// \brief Run checkers for debug-printing a ProgramState.
+ ///
+ /// Unlike most other callbacks, any checker can simply implement the virtual
+ /// method CheckerBase::printState if it has custom data to print.
+ /// \param Out The output stream
+ /// \param State The state being printed
+ /// \param NL The preferred representation of a newline.
+ /// \param Sep The preferred separator between different kinds of data.
+ void runCheckersForPrintState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep);
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for AST traversing.
+//===----------------------------------------------------------------------===//
+
+ // Functions used by the registration mechanism, checkers should not touch
+ // these directly.
+
+ typedef CheckerFn<void (const Decl *, AnalysisManager&, BugReporter &)>
+ CheckDeclFunc;
+
+ typedef bool (*HandlesDeclFunc)(const Decl *D);
+ void _registerForDecl(CheckDeclFunc checkfn, HandlesDeclFunc isForDeclFn);
+
+ void _registerForBody(CheckDeclFunc checkfn);
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+ typedef CheckerFn<void (const Stmt *, CheckerContext &)> CheckStmtFunc;
+
+ typedef CheckerFn<void (const ObjCMessage &, CheckerContext &)>
+ CheckObjCMessageFunc;
+
+ typedef CheckerFn<void (const SVal &location, bool isLoad,
+ const Stmt *S,
+ CheckerContext &)>
+ CheckLocationFunc;
+
+ typedef CheckerFn<void (const SVal &location, const SVal &val,
+ const Stmt *S, CheckerContext &)>
+ CheckBindFunc;
+
+ typedef CheckerFn<void (ExplodedGraph &, BugReporter &, ExprEngine &)>
+ CheckEndAnalysisFunc;
+
+ typedef CheckerFn<void (CheckerContext &)>
+ CheckEndPathFunc;
+
+ typedef CheckerFn<void (const Stmt *, CheckerContext &)>
+ CheckBranchConditionFunc;
+
+ typedef CheckerFn<void (SymbolReaper &, CheckerContext &)>
+ CheckDeadSymbolsFunc;
+
+ typedef CheckerFn<void (ProgramStateRef,SymbolReaper &)> CheckLiveSymbolsFunc;
+
+ typedef CheckerFn<ProgramStateRef (ProgramStateRef,
+ const StoreManager::InvalidatedSymbols *symbols,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call)>
+ CheckRegionChangesFunc;
+
+ typedef CheckerFn<bool (ProgramStateRef)> WantsRegionChangeUpdateFunc;
+
+ typedef CheckerFn<ProgramStateRef (ProgramStateRef,
+ const SVal &cond, bool assumption)>
+ EvalAssumeFunc;
+
+ typedef CheckerFn<bool (const CallExpr *, CheckerContext &)>
+ EvalCallFunc;
+
+ typedef CheckerFn<bool (const CallExpr *, ExprEngine &Eng,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst)>
+ InlineCallFunc;
+
+ typedef CheckerFn<void (const TranslationUnitDecl *,
+ AnalysisManager&, BugReporter &)>
+ CheckEndOfTranslationUnit;
+
+ typedef bool (*HandlesStmtFunc)(const Stmt *D);
+ void _registerForPreStmt(CheckStmtFunc checkfn,
+ HandlesStmtFunc isForStmtFn);
+ void _registerForPostStmt(CheckStmtFunc checkfn,
+ HandlesStmtFunc isForStmtFn);
+
+ void _registerForPreObjCMessage(CheckObjCMessageFunc checkfn);
+ void _registerForPostObjCMessage(CheckObjCMessageFunc checkfn);
+
+ void _registerForLocation(CheckLocationFunc checkfn);
+
+ void _registerForBind(CheckBindFunc checkfn);
+
+ void _registerForEndAnalysis(CheckEndAnalysisFunc checkfn);
+
+ void _registerForEndPath(CheckEndPathFunc checkfn);
+
+ void _registerForBranchCondition(CheckBranchConditionFunc checkfn);
+
+ void _registerForLiveSymbols(CheckLiveSymbolsFunc checkfn);
+
+ void _registerForDeadSymbols(CheckDeadSymbolsFunc checkfn);
+
+ void _registerForRegionChanges(CheckRegionChangesFunc checkfn,
+ WantsRegionChangeUpdateFunc wantUpdateFn);
+
+ void _registerForEvalAssume(EvalAssumeFunc checkfn);
+
+ void _registerForEvalCall(EvalCallFunc checkfn);
+
+ void _registerForInlineCall(InlineCallFunc checkfn);
+
+ void _registerForEndOfTranslationUnit(CheckEndOfTranslationUnit checkfn);
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for events.
+//===----------------------------------------------------------------------===//
+
+ typedef void *EventTag;
+ typedef CheckerFn<void (const void *event)> CheckEventFunc;
+
+ template <typename EVENT>
+ void _registerListenerForEvent(CheckEventFunc checkfn) {
+ EventInfo &info = Events[getTag<EVENT>()];
+ info.Checkers.push_back(checkfn);
+ }
+
+ template <typename EVENT>
+ void _registerDispatcherForEvent() {
+ EventInfo &info = Events[getTag<EVENT>()];
+ info.HasDispatcher = true;
+ }
+
+ template <typename EVENT>
+ void _dispatchEvent(const EVENT &event) const {
+ EventsTy::const_iterator I = Events.find(getTag<EVENT>());
+ if (I == Events.end())
+ return;
+ const EventInfo &info = I->second;
+ for (unsigned i = 0, e = info.Checkers.size(); i != e; ++i)
+ info.Checkers[i](&event);
+ }
+
+//===----------------------------------------------------------------------===//
+// Implementation details.
+//===----------------------------------------------------------------------===//
+
+private:
+ template <typename CHECKER>
+ static void destruct(void *obj) { delete static_cast<CHECKER *>(obj); }
+
+ template <typename T>
+ static void *getTag() { static int tag; return &tag; }
+
+ llvm::DenseMap<CheckerTag, CheckerRef> CheckerTags;
+
+ std::vector<CheckerDtor> CheckerDtors;
+
+ struct DeclCheckerInfo {
+ CheckDeclFunc CheckFn;
+ HandlesDeclFunc IsForDeclFn;
+ };
+ std::vector<DeclCheckerInfo> DeclCheckers;
+
+ std::vector<CheckDeclFunc> BodyCheckers;
+
+ typedef SmallVector<CheckDeclFunc, 4> CachedDeclCheckers;
+ typedef llvm::DenseMap<unsigned, CachedDeclCheckers> CachedDeclCheckersMapTy;
+ CachedDeclCheckersMapTy CachedDeclCheckersMap;
+
+ struct StmtCheckerInfo {
+ CheckStmtFunc CheckFn;
+ HandlesStmtFunc IsForStmtFn;
+ bool IsPreVisit;
+ };
+ std::vector<StmtCheckerInfo> StmtCheckers;
+
+ struct CachedStmtCheckersKey {
+ unsigned StmtKind;
+ bool IsPreVisit;
+
+ CachedStmtCheckersKey() : StmtKind(0), IsPreVisit(0) { }
+ CachedStmtCheckersKey(unsigned stmtKind, bool isPreVisit)
+ : StmtKind(stmtKind), IsPreVisit(isPreVisit) { }
+
+ static CachedStmtCheckersKey getSentinel() {
+ return CachedStmtCheckersKey(~0U, 0);
+ }
+ unsigned getHashValue() const {
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(StmtKind);
+ ID.AddBoolean(IsPreVisit);
+ return ID.ComputeHash();
+ }
+ bool operator==(const CachedStmtCheckersKey &RHS) const {
+ return StmtKind == RHS.StmtKind && IsPreVisit == RHS.IsPreVisit;
+ }
+ };
+ friend struct llvm::DenseMapInfo<CachedStmtCheckersKey>;
+
+ typedef SmallVector<CheckStmtFunc, 4> CachedStmtCheckers;
+ typedef llvm::DenseMap<CachedStmtCheckersKey, CachedStmtCheckers>
+ CachedStmtCheckersMapTy;
+ CachedStmtCheckersMapTy CachedStmtCheckersMap;
+
+ CachedStmtCheckers *getCachedStmtCheckersFor(const Stmt *S, bool isPreVisit);
+
+ std::vector<CheckObjCMessageFunc> PreObjCMessageCheckers;
+ std::vector<CheckObjCMessageFunc> PostObjCMessageCheckers;
+
+ std::vector<CheckLocationFunc> LocationCheckers;
+
+ std::vector<CheckBindFunc> BindCheckers;
+
+ std::vector<CheckEndAnalysisFunc> EndAnalysisCheckers;
+
+ std::vector<CheckEndPathFunc> EndPathCheckers;
+
+ std::vector<CheckBranchConditionFunc> BranchConditionCheckers;
+
+ std::vector<CheckLiveSymbolsFunc> LiveSymbolsCheckers;
+
+ std::vector<CheckDeadSymbolsFunc> DeadSymbolsCheckers;
+
+ struct RegionChangesCheckerInfo {
+ CheckRegionChangesFunc CheckFn;
+ WantsRegionChangeUpdateFunc WantUpdateFn;
+ };
+ std::vector<RegionChangesCheckerInfo> RegionChangesCheckers;
+
+ std::vector<EvalAssumeFunc> EvalAssumeCheckers;
+
+ std::vector<EvalCallFunc> EvalCallCheckers;
+
+ std::vector<InlineCallFunc> InlineCallCheckers;
+
+ std::vector<CheckEndOfTranslationUnit> EndOfTranslationUnitCheckers;
+
+ struct EventInfo {
+ SmallVector<CheckEventFunc, 4> Checkers;
+ bool HasDispatcher;
+ EventInfo() : HasDispatcher(false) { }
+ };
+
+ typedef llvm::DenseMap<EventTag, EventInfo> EventsTy;
+ EventsTy Events;
+};
+
+} // end ento namespace
+
+} // end clang namespace
+
+namespace llvm {
+ /// Define DenseMapInfo so that CachedStmtCheckersKey can be used as key
+ /// in DenseMap and DenseSets.
+ template <>
+ struct DenseMapInfo<clang::ento::CheckerManager::CachedStmtCheckersKey> {
+ static inline clang::ento::CheckerManager::CachedStmtCheckersKey
+ getEmptyKey() {
+ return clang::ento::CheckerManager::CachedStmtCheckersKey();
+ }
+ static inline clang::ento::CheckerManager::CachedStmtCheckersKey
+ getTombstoneKey() {
+ return clang::ento::CheckerManager::CachedStmtCheckersKey::getSentinel();
+ }
+
+ static unsigned
+ getHashValue(clang::ento::CheckerManager::CachedStmtCheckersKey S) {
+ return S.getHashValue();
+ }
+
+ static bool isEqual(clang::ento::CheckerManager::CachedStmtCheckersKey LHS,
+ clang::ento::CheckerManager::CachedStmtCheckersKey RHS) {
+ return LHS == RHS;
+ }
+ };
+} // end namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerOptInfo.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerOptInfo.h
new file mode 100644
index 0000000..6ce5b3c
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerOptInfo.h
@@ -0,0 +1,43 @@
+//===--- CheckerOptInfo.h - Specifies which checkers to use -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_CHECKEROPTINFO_H
+#define LLVM_CLANG_STATICANALYZER_CORE_CHECKEROPTINFO_H
+
+#include "clang/Basic/LLVM.h"
+
+namespace clang {
+namespace ento {
+
+/// Represents a request to include or exclude a checker or package from a
+/// specific analysis run.
+///
+/// \sa CheckerRegistry::initializeManager
+class CheckerOptInfo {
+ StringRef Name;
+ bool Enable;
+ bool Claimed;
+
+public:
+ CheckerOptInfo(StringRef name, bool enable)
+ : Name(name), Enable(enable), Claimed(false) { }
+
+ StringRef getName() const { return Name; }
+ bool isEnabled() const { return Enable; }
+ bool isDisabled() const { return !isEnabled(); }
+
+ bool isClaimed() const { return Claimed; }
+ bool isUnclaimed() const { return !isClaimed(); }
+ void claim() { Claimed = true; }
+};
+
+} // end namespace ento
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h
new file mode 100644
index 0000000..1452d45
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/CheckerRegistry.h
@@ -0,0 +1,134 @@
+//===--- CheckerRegistry.h - Maintains all available checkers ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_CORE_CHECKERREGISTRY_H
+#define LLVM_CLANG_STATICANALYZER_CORE_CHECKERREGISTRY_H
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/Basic/LLVM.h"
+#include <vector>
+
+// FIXME: move this information to an HTML file in docs/.
+// At the very least, a checker plugin is a dynamic library that exports
+// clang_analyzerAPIVersionString. This should be defined as follows:
+//
+// extern "C"
+// const char clang_analyzerAPIVersionString[] =
+// CLANG_ANALYZER_API_VERSION_STRING;
+//
+// This is used to check whether the current version of the analyzer is known to
+// be incompatible with a plugin. Plugins with incompatible version strings,
+// or without a version string at all, will not be loaded.
+//
+// To add a custom checker to the analyzer, the plugin must also define the
+// function clang_registerCheckers. For example:
+//
+// extern "C"
+// void clang_registerCheckers (CheckerRegistry &registry) {
+// registry.addChecker<MainCallChecker>("example.MainCallChecker",
+// "Disallows calls to functions called main");
+// }
+//
+// The first method argument is the full name of the checker, including its
+// enclosing package. By convention, the registered name of a checker is the
+// name of the associated class (the template argument).
+// The second method argument is a short human-readable description of the
+// checker.
+//
+// The clang_registerCheckers function may add any number of checkers to the
+// registry. If any checkers require additional initialization, use the three-
+// argument form of CheckerRegistry::addChecker.
+//
+// To load a checker plugin, specify the full path to the dynamic library as
+// the argument to the -load option in the cc1 frontend. You can then enable
+// your custom checker using the -analyzer-checker:
+//
+// clang -cc1 -load </path/to/plugin.dylib> -analyze
+// -analyzer-checker=<example.MainCallChecker>
+//
+// For a complete working example, see examples/analyzer-plugin.
+
+
+namespace clang {
+namespace ento {
+
+#ifndef CLANG_ANALYZER_API_VERSION_STRING
+// FIXME: The Clang version string is not particularly granular;
+// the analyzer infrastructure can change a lot between releases.
+// Unfortunately, this string has to be statically embedded in each plugin,
+// so we can't just use the functions defined in Version.h.
+#include "clang/Basic/Version.h"
+#define CLANG_ANALYZER_API_VERSION_STRING CLANG_VERSION_STRING
+#endif
+
+class CheckerOptInfo;
+
+/// Manages a set of available checkers for running a static analysis.
+/// The checkers are organized into packages by full name, where including
+/// a package will recursively include all subpackages and checkers within it.
+/// For example, the checker "core.builtin.NoReturnFunctionChecker" will be
+/// included if initializeManager() is called with an option of "core",
+/// "core.builtin", or the full name "core.builtin.NoReturnFunctionChecker".
+class CheckerRegistry {
+public:
+ /// Initialization functions perform any necessary setup for a checker.
+ /// They should include a call to CheckerManager::registerChecker.
+ typedef void (*InitializationFunction)(CheckerManager &);
+ struct CheckerInfo {
+ InitializationFunction Initialize;
+ StringRef FullName;
+ StringRef Desc;
+
+ CheckerInfo(InitializationFunction fn, StringRef name, StringRef desc)
+ : Initialize(fn), FullName(name), Desc(desc) {}
+ };
+
+ typedef std::vector<CheckerInfo> CheckerInfoList;
+
+private:
+ template <typename T>
+ static void initializeManager(CheckerManager &mgr) {
+ mgr.registerChecker<T>();
+ }
+
+public:
+ /// Adds a checker to the registry. Use this non-templated overload when your
+ /// checker requires custom initialization.
+ void addChecker(InitializationFunction fn, StringRef fullName,
+ StringRef desc);
+
+ /// Adds a checker to the registry. Use this templated overload when your
+ /// checker does not require any custom initialization.
+ template <class T>
+ void addChecker(StringRef fullName, StringRef desc) {
+ // Avoid MSVC's Compiler Error C2276:
+ // http://msdn.microsoft.com/en-us/library/850cstw1(v=VS.80).aspx
+ addChecker(&CheckerRegistry::initializeManager<T>, fullName, desc);
+ }
+
+ /// Initializes a CheckerManager by calling the initialization functions for
+ /// all checkers specified by the given CheckerOptInfo list. The order of this
+ /// list is significant; later options can be used to reverse earlier ones.
+ /// This can be used to exclude certain checkers in an included package.
+ void initializeManager(CheckerManager &mgr,
+ SmallVectorImpl<CheckerOptInfo> &opts) const;
+
+ /// Prints the name and description of all checkers in this registry.
+ /// This output is not intended to be machine-parseable.
+ void printHelp(raw_ostream &out, size_t maxNameChars = 30) const ;
+
+private:
+ mutable CheckerInfoList Checkers;
+ mutable llvm::StringMap<size_t> Packages;
+};
+
+} // end namespace ento
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
new file mode 100644
index 0000000..65be3a4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h
@@ -0,0 +1,46 @@
+//===--- PathDiagnosticClients.h - Path Diagnostic Clients ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface to create different path diagostic clients.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_PATH_DIAGNOSTIC_CLIENTS_H
+#define LLVM_CLANG_GR_PATH_DIAGNOSTIC_CLIENTS_H
+
+#include <string>
+
+namespace clang {
+
+class Preprocessor;
+
+namespace ento {
+
+class PathDiagnosticConsumer;
+
+PathDiagnosticConsumer*
+createHTMLDiagnosticConsumer(const std::string& prefix, const Preprocessor &PP);
+
+PathDiagnosticConsumer*
+createPlistDiagnosticConsumer(const std::string& prefix, const Preprocessor &PP,
+ PathDiagnosticConsumer *SubPD = 0);
+
+PathDiagnosticConsumer*
+createPlistMultiFileDiagnosticConsumer(const std::string& prefix,
+ const Preprocessor &PP);
+
+PathDiagnosticConsumer*
+createTextPathDiagnosticConsumer(const std::string& prefix,
+ const Preprocessor &PP);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
new file mode 100644
index 0000000..3cbecf7
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h
@@ -0,0 +1,220 @@
+//== AnalysisManager.h - Path sensitive analysis data manager ------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AnalysisManager class that manages the data and policy
+// for path sensitive analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_ANALYSISMANAGER_H
+#define LLVM_CLANG_GR_ANALYSISMANAGER_H
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+
+namespace clang {
+
+namespace idx {
+ class Indexer;
+ class TranslationUnit;
+}
+
+namespace ento {
+ class CheckerManager;
+
+class AnalysisManager : public BugReporterData {
+ virtual void anchor();
+ AnalysisDeclContextManager AnaCtxMgr;
+
+ ASTContext &Ctx;
+ DiagnosticsEngine &Diags;
+ const LangOptions &LangOpts;
+
+ OwningPtr<PathDiagnosticConsumer> PD;
+
+ // Configurable components creators.
+ StoreManagerCreator CreateStoreMgr;
+ ConstraintManagerCreator CreateConstraintMgr;
+
+ CheckerManager *CheckerMgr;
+
+ /// \brief Provide function definitions in other translation units. This is
+ /// NULL if we don't have multiple translation units. AnalysisManager does
+ /// not own the Indexer.
+ idx::Indexer *Idxer;
+
+ enum AnalysisScope { ScopeTU, ScopeDecl } AScope;
+
+ /// \brief The maximum number of exploded nodes the analyzer will generate.
+ unsigned MaxNodes;
+
+ /// \brief The maximum number of times the analyzer visits a block.
+ unsigned MaxVisit;
+
+ bool VisualizeEGDot;
+ bool VisualizeEGUbi;
+ AnalysisPurgeMode PurgeDead;
+
+ /// \brief The flag regulates if we should eagerly assume evaluations of
+ /// conditionals, thus, bifurcating the path.
+ ///
+ /// EagerlyAssume - A flag indicating how the engine should handle
+ /// expressions such as: 'x = (y != 0)'. When this flag is true then
+ /// the subexpression 'y != 0' will be eagerly assumed to be true or false,
+ /// thus evaluating it to the integers 0 or 1 respectively. The upside
+ /// is that this can increase analysis precision until we have a better way
+ /// to lazily evaluate such logic. The downside is that it eagerly
+ /// bifurcates paths.
+ bool EagerlyAssume;
+ bool TrimGraph;
+ bool EagerlyTrimEGraph;
+
+public:
+ // \brief inter-procedural analysis mode.
+ AnalysisIPAMode IPAMode;
+
+ // Settings for inlining tuning.
+ /// \brief The inlining stack depth limit.
+ unsigned InlineMaxStackDepth;
+ /// \brief The max number of basic blocks in a function being inlined.
+ unsigned InlineMaxFunctionSize;
+ /// \brief The mode of function selection used during inlining.
+ AnalysisInliningMode InliningMode;
+
+ /// \brief Do not re-analyze paths leading to exhausted nodes with a different
+ /// strategy. We get better code coverage when retry is enabled.
+ bool NoRetryExhausted;
+
+public:
+ AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
+ const LangOptions &lang, PathDiagnosticConsumer *pd,
+ StoreManagerCreator storemgr,
+ ConstraintManagerCreator constraintmgr,
+ CheckerManager *checkerMgr,
+ idx::Indexer *idxer,
+ unsigned maxnodes, unsigned maxvisit,
+ bool vizdot, bool vizubi, AnalysisPurgeMode purge,
+ bool eager, bool trim,
+ bool useUnoptimizedCFG,
+ bool addImplicitDtors, bool addInitializers,
+ bool eagerlyTrimEGraph,
+ AnalysisIPAMode ipa,
+ unsigned inlineMaxStack,
+ unsigned inlineMaxFunctionSize,
+ AnalysisInliningMode inliningMode,
+ bool NoRetry);
+
+ /// Construct a clone of the given AnalysisManager with the given ASTContext
+ /// and DiagnosticsEngine.
+ AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
+ AnalysisManager &ParentAM);
+
+ ~AnalysisManager() { FlushDiagnostics(); }
+
+ void ClearContexts() {
+ AnaCtxMgr.clear();
+ }
+
+ AnalysisDeclContextManager& getAnalysisDeclContextManager() {
+ return AnaCtxMgr;
+ }
+
+ StoreManagerCreator getStoreManagerCreator() {
+ return CreateStoreMgr;
+ }
+
+ ConstraintManagerCreator getConstraintManagerCreator() {
+ return CreateConstraintMgr;
+ }
+
+ CheckerManager *getCheckerManager() const { return CheckerMgr; }
+
+ idx::Indexer *getIndexer() const { return Idxer; }
+
+ virtual ASTContext &getASTContext() {
+ return Ctx;
+ }
+
+ virtual SourceManager &getSourceManager() {
+ return getASTContext().getSourceManager();
+ }
+
+ virtual DiagnosticsEngine &getDiagnostic() {
+ return Diags;
+ }
+
+ const LangOptions &getLangOpts() const {
+ return LangOpts;
+ }
+
+ virtual PathDiagnosticConsumer *getPathDiagnosticConsumer() {
+ return PD.get();
+ }
+
+ void FlushDiagnostics() {
+ if (PD.get())
+ PD->FlushDiagnostics(0);
+ }
+
+ unsigned getMaxNodes() const { return MaxNodes; }
+
+ unsigned getMaxVisit() const { return MaxVisit; }
+
+ bool shouldVisualizeGraphviz() const { return VisualizeEGDot; }
+
+ bool shouldVisualizeUbigraph() const { return VisualizeEGUbi; }
+
+ bool shouldVisualize() const {
+ return VisualizeEGDot || VisualizeEGUbi;
+ }
+
+ bool shouldEagerlyTrimExplodedGraph() const { return EagerlyTrimEGraph; }
+
+ bool shouldTrimGraph() const { return TrimGraph; }
+
+ AnalysisPurgeMode getPurgeMode() const { return PurgeDead; }
+
+ bool shouldEagerlyAssume() const { return EagerlyAssume; }
+
+ bool shouldInlineCall() const { return (IPAMode == Inlining); }
+
+ bool hasIndexer() const { return Idxer != 0; }
+
+ AnalysisDeclContext *getAnalysisDeclContextInAnotherTU(const Decl *D);
+
+ CFG *getCFG(Decl const *D) {
+ return AnaCtxMgr.getContext(D)->getCFG();
+ }
+
+ template <typename T>
+ T *getAnalysis(Decl const *D) {
+ return AnaCtxMgr.getContext(D)->getAnalysis<T>();
+ }
+
+ ParentMap &getParentMap(Decl const *D) {
+ return AnaCtxMgr.getContext(D)->getParentMap();
+ }
+
+ AnalysisDeclContext *getAnalysisDeclContext(const Decl *D) {
+ return AnaCtxMgr.getContext(D);
+ }
+
+ AnalysisDeclContext *getAnalysisDeclContext(const Decl *D, idx::TranslationUnit *TU) {
+ return AnaCtxMgr.getContext(D, TU);
+ }
+
+};
+
+} // enAnaCtxMgrspace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
new file mode 100644
index 0000000..9a699f9
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h
@@ -0,0 +1,199 @@
+//=== BasicValueFactory.h - Basic values for Path Sens analysis --*- C++ -*---//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicValueFactory, a class that manages the lifetime
+// of APSInt objects and symbolic constraints used by ExprEngine
+// and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_BASICVALUEFACTORY_H
+#define LLVM_CLANG_GR_BASICVALUEFACTORY_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+
+namespace clang {
+namespace ento {
+
+class CompoundValData : public llvm::FoldingSetNode {
+ QualType T;
+ llvm::ImmutableList<SVal> L;
+
+public:
+ CompoundValData(QualType t, llvm::ImmutableList<SVal> l)
+ : T(t), L(l) {}
+
+ typedef llvm::ImmutableList<SVal>::iterator iterator;
+ iterator begin() const { return L.begin(); }
+ iterator end() const { return L.end(); }
+
+ static void Profile(llvm::FoldingSetNodeID& ID, QualType T,
+ llvm::ImmutableList<SVal> L);
+
+ void Profile(llvm::FoldingSetNodeID& ID) { Profile(ID, T, L); }
+};
+
+class LazyCompoundValData : public llvm::FoldingSetNode {
+ StoreRef store;
+ const TypedValueRegion *region;
+public:
+ LazyCompoundValData(const StoreRef &st, const TypedValueRegion *r)
+ : store(st), region(r) {}
+
+ const void *getStore() const { return store.getStore(); }
+ const TypedValueRegion *getRegion() const { return region; }
+
+ static void Profile(llvm::FoldingSetNodeID& ID,
+ const StoreRef &store,
+ const TypedValueRegion *region);
+
+ void Profile(llvm::FoldingSetNodeID& ID) { Profile(ID, store, region); }
+};
+
+class BasicValueFactory {
+ typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<llvm::APSInt> >
+ APSIntSetTy;
+
+ ASTContext &Ctx;
+ llvm::BumpPtrAllocator& BPAlloc;
+
+ APSIntSetTy APSIntSet;
+ void * PersistentSVals;
+ void * PersistentSValPairs;
+
+ llvm::ImmutableList<SVal>::Factory SValListFactory;
+ llvm::FoldingSet<CompoundValData> CompoundValDataSet;
+ llvm::FoldingSet<LazyCompoundValData> LazyCompoundValDataSet;
+
+public:
+ BasicValueFactory(ASTContext &ctx, llvm::BumpPtrAllocator& Alloc)
+ : Ctx(ctx), BPAlloc(Alloc), PersistentSVals(0), PersistentSValPairs(0),
+ SValListFactory(Alloc) {}
+
+ ~BasicValueFactory();
+
+ ASTContext &getContext() const { return Ctx; }
+
+ const llvm::APSInt& getValue(const llvm::APSInt& X);
+ const llvm::APSInt& getValue(const llvm::APInt& X, bool isUnsigned);
+ const llvm::APSInt& getValue(uint64_t X, unsigned BitWidth, bool isUnsigned);
+ const llvm::APSInt& getValue(uint64_t X, QualType T);
+
+ /// Convert - Create a new persistent APSInt with the same value as 'From'
+ /// but with the bitwidth and signedness of 'To'.
+ const llvm::APSInt &Convert(const llvm::APSInt& To,
+ const llvm::APSInt& From) {
+
+ if (To.isUnsigned() == From.isUnsigned() &&
+ To.getBitWidth() == From.getBitWidth())
+ return From;
+
+ return getValue(From.getSExtValue(), To.getBitWidth(), To.isUnsigned());
+ }
+
+ const llvm::APSInt &Convert(QualType T, const llvm::APSInt &From) {
+ assert(T->isIntegerType() || Loc::isLocType(T));
+ unsigned bitwidth = Ctx.getTypeSize(T);
+ bool isUnsigned
+ = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);
+
+ if (isUnsigned == From.isUnsigned() && bitwidth == From.getBitWidth())
+ return From;
+
+ return getValue(From.getSExtValue(), bitwidth, isUnsigned);
+ }
+
+ const llvm::APSInt& getIntValue(uint64_t X, bool isUnsigned) {
+ QualType T = isUnsigned ? Ctx.UnsignedIntTy : Ctx.IntTy;
+ return getValue(X, T);
+ }
+
+ inline const llvm::APSInt& getMaxValue(const llvm::APSInt &v) {
+ return getValue(llvm::APSInt::getMaxValue(v.getBitWidth(), v.isUnsigned()));
+ }
+
+ inline const llvm::APSInt& getMinValue(const llvm::APSInt &v) {
+ return getValue(llvm::APSInt::getMinValue(v.getBitWidth(), v.isUnsigned()));
+ }
+
+ inline const llvm::APSInt& getMaxValue(QualType T) {
+ assert(T->isIntegerType() || Loc::isLocType(T));
+ bool isUnsigned
+ = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);
+ return getValue(llvm::APSInt::getMaxValue(Ctx.getTypeSize(T), isUnsigned));
+ }
+
+ inline const llvm::APSInt& getMinValue(QualType T) {
+ assert(T->isIntegerType() || Loc::isLocType(T));
+ bool isUnsigned
+ = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);
+ return getValue(llvm::APSInt::getMinValue(Ctx.getTypeSize(T), isUnsigned));
+ }
+
+ inline const llvm::APSInt& Add1(const llvm::APSInt& V) {
+ llvm::APSInt X = V;
+ ++X;
+ return getValue(X);
+ }
+
+ inline const llvm::APSInt& Sub1(const llvm::APSInt& V) {
+ llvm::APSInt X = V;
+ --X;
+ return getValue(X);
+ }
+
+ inline const llvm::APSInt& getZeroWithPtrWidth(bool isUnsigned = true) {
+ return getValue(0, Ctx.getTypeSize(Ctx.VoidPtrTy), isUnsigned);
+ }
+
+ inline const llvm::APSInt &getIntWithPtrWidth(uint64_t X, bool isUnsigned) {
+ return getValue(X, Ctx.getTypeSize(Ctx.VoidPtrTy), isUnsigned);
+ }
+
+ inline const llvm::APSInt& getTruthValue(bool b, QualType T) {
+ return getValue(b ? 1 : 0, Ctx.getTypeSize(T), false);
+ }
+
+ inline const llvm::APSInt& getTruthValue(bool b) {
+ return getTruthValue(b, Ctx.getLogicalOperationType());
+ }
+
+ const CompoundValData *getCompoundValData(QualType T,
+ llvm::ImmutableList<SVal> Vals);
+
+ const LazyCompoundValData *getLazyCompoundValData(const StoreRef &store,
+ const TypedValueRegion *region);
+
+ llvm::ImmutableList<SVal> getEmptySValList() {
+ return SValListFactory.getEmptyList();
+ }
+
+ llvm::ImmutableList<SVal> consVals(SVal X, llvm::ImmutableList<SVal> L) {
+ return SValListFactory.add(X, L);
+ }
+
+ const llvm::APSInt* evalAPSInt(BinaryOperator::Opcode Op,
+ const llvm::APSInt& V1,
+ const llvm::APSInt& V2);
+
+ const std::pair<SVal, uintptr_t>&
+ getPersistentSValWithData(const SVal& V, uintptr_t Data);
+
+ const std::pair<SVal, SVal>&
+ getPersistentSValPair(const SVal& V1, const SVal& V2);
+
+ const SVal* getPersistentSVal(SVal X);
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h
new file mode 100644
index 0000000..2483a79
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h
@@ -0,0 +1,62 @@
+//==- BlockCounter.h - ADT for counting block visits ---------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BlockCounter, an abstract data type used to count
+// the number of times a given block has been visited along a path
+// analyzed by CoreEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_BLOCKCOUNTER
+#define LLVM_CLANG_GR_BLOCKCOUNTER
+
+namespace llvm {
+ class BumpPtrAllocator;
+}
+
+namespace clang {
+
+class StackFrameContext;
+
+namespace ento {
+
+/// \class BlockCounter
+/// \brief An abstract data type used to count the number of times a given
+/// block has been visited along a path analyzed by CoreEngine.
+class BlockCounter {
+ void *Data;
+
+ BlockCounter(void *D) : Data(D) {}
+
+public:
+ BlockCounter() : Data(0) {}
+
+ unsigned getNumVisited(const StackFrameContext *CallSite,
+ unsigned BlockID) const;
+
+ class Factory {
+ void *F;
+ public:
+ Factory(llvm::BumpPtrAllocator& Alloc);
+ ~Factory();
+
+ BlockCounter GetEmptyCounter();
+ BlockCounter IncrementCount(BlockCounter BC,
+ const StackFrameContext *CallSite,
+ unsigned BlockID);
+ };
+
+ friend class Factory;
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
new file mode 100644
index 0000000..b051d33
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h
@@ -0,0 +1,239 @@
+//== CheckerContext.h - Context info for path-sensitive checkers--*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CheckerContext that provides contextual info for
+// path-sensitive checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_CORE_PATHSENSITIVE_CHECKERCONTEXT
+#define LLVM_CLANG_SA_CORE_PATHSENSITIVE_CHECKERCONTEXT
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+namespace clang {
+namespace ento {
+
+class CheckerContext {
+ ExprEngine &Eng;
+ /// The current exploded(symbolic execution) graph node.
+ ExplodedNode *Pred;
+ /// The flag is true if the (state of the execution) has been modified
+ /// by the checker using this context. For example, a new transition has been
+ /// added or a bug report issued.
+ bool Changed;
+ /// The tagged location, which is used to generate all new nodes.
+ const ProgramPoint Location;
+ NodeBuilder &NB;
+
+public:
+ /// If we are post visiting a call, this flag will be set if the
+ /// call was inlined. In all other cases it will be false.
+ const bool wasInlined;
+
+ CheckerContext(NodeBuilder &builder,
+ ExprEngine &eng,
+ ExplodedNode *pred,
+ const ProgramPoint &loc,
+ bool wasInlined = false)
+ : Eng(eng),
+ Pred(pred),
+ Changed(false),
+ Location(loc),
+ NB(builder),
+ wasInlined(wasInlined) {
+ assert(Pred->getState() &&
+ "We should not call the checkers on an empty state.");
+ }
+
+ AnalysisManager &getAnalysisManager() {
+ return Eng.getAnalysisManager();
+ }
+
+ ConstraintManager &getConstraintManager() {
+ return Eng.getConstraintManager();
+ }
+
+ StoreManager &getStoreManager() {
+ return Eng.getStoreManager();
+ }
+
+ /// \brief Returns the previous node in the exploded graph, which includes
+ /// the state of the program before the checker ran. Note, checkers should
+ /// not retain the node in their state since the nodes might get invalidated.
+ ExplodedNode *getPredecessor() { return Pred; }
+ ProgramStateRef getState() const { return Pred->getState(); }
+
+ /// \brief Check if the checker changed the state of the execution; ex: added
+ /// a new transition or a bug report.
+ bool isDifferent() { return Changed; }
+
+ /// \brief Returns the number of times the current block has been visited
+ /// along the analyzed path.
+ unsigned getCurrentBlockCount() const {
+ return NB.getContext().getCurrentBlockCount();
+ }
+
+ ASTContext &getASTContext() {
+ return Eng.getContext();
+ }
+
+ const LangOptions &getLangOpts() const {
+ return Eng.getContext().getLangOpts();
+ }
+
+ const LocationContext *getLocationContext() const {
+ return Pred->getLocationContext();
+ }
+
+ BugReporter &getBugReporter() {
+ return Eng.getBugReporter();
+ }
+
+ SourceManager &getSourceManager() {
+ return getBugReporter().getSourceManager();
+ }
+
+ SValBuilder &getSValBuilder() {
+ return Eng.getSValBuilder();
+ }
+
+ SymbolManager &getSymbolManager() {
+ return getSValBuilder().getSymbolManager();
+ }
+
+ bool isObjCGCEnabled() const {
+ return Eng.isObjCGCEnabled();
+ }
+
+ ProgramStateManager &getStateManager() {
+ return Eng.getStateManager();
+ }
+
+ AnalysisDeclContext *getCurrentAnalysisDeclContext() const {
+ return Pred->getLocationContext()->getAnalysisDeclContext();
+ }
+
+ /// \brief If the given node corresponds to a PostStore program point, retrieve
+ /// the location region as it was uttered in the code.
+ ///
+ /// This utility can be useful for generating extensive diagnostics, for
+ /// example, for finding variables that the given symbol was assigned to.
+ static const MemRegion *getLocationRegionIfPostStore(const ExplodedNode *N) {
+ ProgramPoint L = N->getLocation();
+ if (const PostStore *PSL = dyn_cast<PostStore>(&L))
+ return reinterpret_cast<const MemRegion*>(PSL->getLocationValue());
+ return 0;
+ }
+
+ /// \brief Generates a new transition in the program state graph
+ /// (ExplodedGraph). Uses the default CheckerContext predecessor node.
+ ///
+ /// @param State The state of the generated node.
+ /// @param Tag The tag is used to uniquely identify the creation site. If no
+ /// tag is specified, a default tag, unique to the given checker,
+ /// will be used. Tags are used to prevent states generated at
+ /// different sites from caching out.
+ ExplodedNode *addTransition(ProgramStateRef State,
+ const ProgramPointTag *Tag = 0) {
+ return addTransitionImpl(State, false, 0, Tag);
+ }
+
+ /// \brief Generates a default transition (containing checker tag but no
+ /// checker state changes).
+ ExplodedNode *addTransition() {
+ return addTransition(getState());
+ }
+
+ /// \brief Generates a new transition with the given predecessor.
+ /// Allows checkers to generate a chain of nodes.
+ ///
+ /// @param State The state of the generated node.
+ /// @param Pred The transition will be generated from the specified Pred node
+ /// to the newly generated node.
+ /// @param Tag The tag to uniquely identify the creation site.
+ /// @param IsSink Mark the new node as sink, which will stop exploration of
+ /// the given path.
+ ExplodedNode *addTransition(ProgramStateRef State,
+ ExplodedNode *Pred,
+ const ProgramPointTag *Tag = 0,
+ bool IsSink = false) {
+ return addTransitionImpl(State, IsSink, Pred, Tag);
+ }
+
+ /// \brief Generate a sink node. Generating sink stops exploration of the
+ /// given path.
+ ExplodedNode *generateSink(ProgramStateRef state = 0) {
+ return addTransitionImpl(state ? state : getState(), true);
+ }
+
+ /// \brief Emit the diagnostics report.
+ void EmitReport(BugReport *R) {
+ Changed = true;
+ Eng.getBugReporter().EmitReport(R);
+ }
+
+ /// \brief Get the declaration of the called function (path-sensitive).
+ const FunctionDecl *getCalleeDecl(const CallExpr *CE) const;
+
+ /// \brief Get the name of the called function (path-sensitive).
+ StringRef getCalleeName(const FunctionDecl *FunDecl) const;
+
+ /// \brief Get the name of the called function (path-sensitive).
+ StringRef getCalleeName(const CallExpr *CE) const {
+ const FunctionDecl *FunDecl = getCalleeDecl(CE);
+ return getCalleeName(FunDecl);
+ }
+
+ /// Given a function declaration and a name checks if this is a C lib
+ /// function with the given name.
+ bool isCLibraryFunction(const FunctionDecl *FD, StringRef Name);
+ static bool isCLibraryFunction(const FunctionDecl *FD, StringRef Name,
+ ASTContext &Context);
+
+ /// \brief Depending on wither the location corresponds to a macro, return
+ /// either the macro name or the token spelling.
+ ///
+ /// This could be useful when checkers' logic depends on whether a function
+ /// is called with a given macro argument. For example:
+ /// s = socket(AF_INET,..)
+ /// If AF_INET is a macro, the result should be treated as a source of taint.
+ ///
+ /// \sa clang::Lexer::getSpelling(), clang::Lexer::getImmediateMacroName().
+ StringRef getMacroNameOrSpelling(SourceLocation &Loc);
+
+private:
+ ExplodedNode *addTransitionImpl(ProgramStateRef State,
+ bool MarkAsSink,
+ ExplodedNode *P = 0,
+ const ProgramPointTag *Tag = 0) {
+ if (!State || (State == Pred->getState() && !Tag && !MarkAsSink))
+ return Pred;
+
+ Changed = true;
+ ExplodedNode *node = NB.generateNode(Tag ? Location.withTag(Tag) : Location,
+ State,
+ P ? P : Pred, MarkAsSink);
+ return node;
+ }
+};
+
+/// \brief A helper class which wraps a boolean value set to false by default.
+struct DefaultBool {
+ bool Val;
+ DefaultBool() : Val(false) {}
+ operator bool() const { return Val; }
+ DefaultBool &operator=(bool b) { Val = b; return *this; }
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
new file mode 100644
index 0000000..12547e0
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h
@@ -0,0 +1,43 @@
+//== CheckerHelpers.h - Helper functions for checkers ------------*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CheckerVisitor.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_PATHSENSITIVE_CHECKERHELPERS
+#define LLVM_CLANG_GR_PATHSENSITIVE_CHECKERHELPERS
+
+#include "clang/AST/Stmt.h"
+
+namespace clang {
+
+namespace ento {
+
+bool containsMacro(const Stmt *S);
+bool containsEnum(const Stmt *S);
+bool containsStaticLocal(const Stmt *S);
+bool containsBuiltinOffsetOf(const Stmt *S);
+template <class T> bool containsStmt(const Stmt *S) {
+ if (isa<T>(S))
+ return true;
+
+ for (Stmt::const_child_range I = S->children(); I; ++I)
+ if (const Stmt *child = *I)
+ if (containsStmt<T>(child))
+ return true;
+
+ return false;
+}
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
new file mode 100644
index 0000000..631858d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h
@@ -0,0 +1,81 @@
+//== ConstraintManager.h - Constraints on symbolic values.-------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the interface to manage constraints on symbolic values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_CONSTRAINT_MANAGER_H
+#define LLVM_CLANG_GR_CONSTRAINT_MANAGER_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+
+namespace llvm {
+class APSInt;
+}
+
+namespace clang {
+namespace ento {
+
+class SubEngine;
+
+class ConstraintManager {
+public:
+ virtual ~ConstraintManager();
+ virtual ProgramStateRef assume(ProgramStateRef state,
+ DefinedSVal Cond,
+ bool Assumption) = 0;
+
+ std::pair<ProgramStateRef, ProgramStateRef >
+ assumeDual(ProgramStateRef state, DefinedSVal Cond)
+ {
+ std::pair<ProgramStateRef, ProgramStateRef > res =
+ std::make_pair(assume(state, Cond, true), assume(state, Cond, false));
+
+ assert(!(!res.first && !res.second) && "System is over constrained.");
+ return res;
+ }
+
+ virtual const llvm::APSInt* getSymVal(ProgramStateRef state,
+ SymbolRef sym) const = 0;
+
+ virtual bool isEqual(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V) const = 0;
+
+ virtual ProgramStateRef removeDeadBindings(ProgramStateRef state,
+ SymbolReaper& SymReaper) = 0;
+
+ virtual void print(ProgramStateRef state,
+ raw_ostream &Out,
+ const char* nl,
+ const char *sep) = 0;
+
+ virtual void EndPath(ProgramStateRef state) {}
+
+protected:
+ /// canReasonAbout - Not all ConstraintManagers can accurately reason about
+ /// all SVal values. This method returns true if the ConstraintManager can
+ /// reasonably handle a given SVal value. This is typically queried by
+ /// ExprEngine to determine if the value should be replaced with a
+ /// conjured symbolic value in order to recover some precision.
+ virtual bool canReasonAbout(SVal X) const = 0;
+};
+
+ConstraintManager* CreateBasicConstraintManager(ProgramStateManager& statemgr,
+ SubEngine &subengine);
+ConstraintManager* CreateRangeConstraintManager(ProgramStateManager& statemgr,
+ SubEngine &subengine);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
new file mode 100644
index 0000000..59136fc
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h
@@ -0,0 +1,541 @@
+//==- CoreEngine.h - Path-Sensitive Dataflow Engine ----------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a generic engine for intraprocedural, path-sensitive,
+// dataflow analysis via graph reachability.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_COREENGINE
+#define LLVM_CLANG_GR_COREENGINE
+
+#include "clang/AST/Expr.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/WorkList.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
+#include "llvm/ADT/OwningPtr.h"
+
+namespace clang {
+
+class ProgramPointTag;
+
+namespace ento {
+
+class NodeBuilder;
+
+//===----------------------------------------------------------------------===//
+/// CoreEngine - Implements the core logic of the graph-reachability
+/// analysis. It traverses the CFG and generates the ExplodedGraph.
+/// Program "states" are treated as opaque void pointers.
+/// The template class CoreEngine (which subclasses CoreEngine)
+/// provides the matching component to the engine that knows the actual types
+/// for states. Note that this engine only dispatches to transfer functions
+/// at the statement and block-level. The analyses themselves must implement
+/// any transfer function logic and the sub-expression level (if any).
+class CoreEngine {
+ friend struct NodeBuilderContext;
+ friend class NodeBuilder;
+ friend class ExprEngine;
+ friend class CommonNodeBuilder;
+ friend class IndirectGotoNodeBuilder;
+ friend class SwitchNodeBuilder;
+ friend class EndOfFunctionNodeBuilder;
+public:
+ typedef std::vector<std::pair<BlockEdge, const ExplodedNode*> >
+ BlocksExhausted;
+
+ typedef std::vector<std::pair<const CFGBlock*, const ExplodedNode*> >
+ BlocksAborted;
+
+private:
+
+ SubEngine& SubEng;
+
+ /// G - The simulation graph. Each node is a (location,state) pair.
+ OwningPtr<ExplodedGraph> G;
+
+ /// WList - A set of queued nodes that need to be processed by the
+ /// worklist algorithm. It is up to the implementation of WList to decide
+ /// the order that nodes are processed.
+ WorkList* WList;
+
+ /// BCounterFactory - A factory object for created BlockCounter objects.
+ /// These are used to record for key nodes in the ExplodedGraph the
+ /// number of times different CFGBlocks have been visited along a path.
+ BlockCounter::Factory BCounterFactory;
+
+ /// The locations where we stopped doing work because we visited a location
+ /// too many times.
+ BlocksExhausted blocksExhausted;
+
+ /// The locations where we stopped because the engine aborted analysis,
+ /// usually because it could not reason about something.
+ BlocksAborted blocksAborted;
+
+ /// The functions which have been analyzed through inlining. This is owned by
+ /// AnalysisConsumer. It can be null.
+ SetOfConstDecls *AnalyzedCallees;
+
+ /// The information about functions shared by the whole translation unit.
+ /// (This data is owned by AnalysisConsumer.)
+ FunctionSummariesTy *FunctionSummaries;
+
+ void generateNode(const ProgramPoint &Loc,
+ ProgramStateRef State,
+ ExplodedNode *Pred);
+
+ void HandleBlockEdge(const BlockEdge &E, ExplodedNode *Pred);
+ void HandleBlockEntrance(const BlockEntrance &E, ExplodedNode *Pred);
+ void HandleBlockExit(const CFGBlock *B, ExplodedNode *Pred);
+ void HandlePostStmt(const CFGBlock *B, unsigned StmtIdx, ExplodedNode *Pred);
+
+ void HandleBranch(const Stmt *Cond, const Stmt *Term, const CFGBlock *B,
+ ExplodedNode *Pred);
+
+private:
+ CoreEngine(const CoreEngine&); // Do not implement.
+ CoreEngine& operator=(const CoreEngine&);
+
+ ExplodedNode *generateCallExitNode(ExplodedNode *N);
+
+public:
+ /// Construct a CoreEngine object to analyze the provided CFG using
+ /// a DFS exploration of the exploded graph.
+ CoreEngine(SubEngine& subengine, SetOfConstDecls *VisitedCallees,
+ FunctionSummariesTy *FS)
+ : SubEng(subengine), G(new ExplodedGraph()),
+ WList(WorkList::makeBFS()),
+ BCounterFactory(G->getAllocator()),
+ AnalyzedCallees(VisitedCallees),
+ FunctionSummaries(FS){}
+
+ ~CoreEngine() {
+ delete WList;
+ }
+
+ /// getGraph - Returns the exploded graph.
+ ExplodedGraph& getGraph() { return *G.get(); }
+
+ /// takeGraph - Returns the exploded graph. Ownership of the graph is
+ /// transferred to the caller.
+ ExplodedGraph* takeGraph() { return G.take(); }
+
+ /// ExecuteWorkList - Run the worklist algorithm for a maximum number of
+ /// steps. Returns true if there is still simulation state on the worklist.
+ bool ExecuteWorkList(const LocationContext *L, unsigned Steps,
+ ProgramStateRef InitState);
+ /// Returns true if there is still simulation state on the worklist.
+ bool ExecuteWorkListWithInitialState(const LocationContext *L,
+ unsigned Steps,
+ ProgramStateRef InitState,
+ ExplodedNodeSet &Dst);
+
+ /// Dispatch the work list item based on the given location information.
+ /// Use Pred parameter as the predecessor state.
+ void dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
+ const WorkListUnit& WU);
+
+ // Functions for external checking of whether we have unfinished work
+ bool wasBlockAborted() const { return !blocksAborted.empty(); }
+ bool wasBlocksExhausted() const { return !blocksExhausted.empty(); }
+ bool hasWorkRemaining() const { return wasBlocksExhausted() ||
+ WList->hasWork() ||
+ wasBlockAborted(); }
+
+ /// Inform the CoreEngine that a basic block was aborted because
+ /// it could not be completely analyzed.
+ void addAbortedBlock(const ExplodedNode *node, const CFGBlock *block) {
+ blocksAborted.push_back(std::make_pair(block, node));
+ }
+
+ WorkList *getWorkList() const { return WList; }
+
+ BlocksExhausted::const_iterator blocks_exhausted_begin() const {
+ return blocksExhausted.begin();
+ }
+ BlocksExhausted::const_iterator blocks_exhausted_end() const {
+ return blocksExhausted.end();
+ }
+ BlocksAborted::const_iterator blocks_aborted_begin() const {
+ return blocksAborted.begin();
+ }
+ BlocksAborted::const_iterator blocks_aborted_end() const {
+ return blocksAborted.end();
+ }
+
+ /// \brief Enqueue the given set of nodes onto the work list.
+ void enqueue(ExplodedNodeSet &Set);
+
+ /// \brief Enqueue nodes that were created as a result of processing
+ /// a statement onto the work list.
+ void enqueue(ExplodedNodeSet &Set, const CFGBlock *Block, unsigned Idx);
+
+ /// \brief enqueue the nodes corresponding to the end of function onto the
+ /// end of path / work list.
+ void enqueueEndOfFunction(ExplodedNodeSet &Set);
+
+ /// \brief Enqueue a single node created as a result of statement processing.
+ void enqueueStmtNode(ExplodedNode *N, const CFGBlock *Block, unsigned Idx);
+};
+
+// TODO: Turn into a calss.
+struct NodeBuilderContext {
+ CoreEngine &Eng;
+ const CFGBlock *Block;
+ ExplodedNode *Pred;
+ NodeBuilderContext(CoreEngine &E, const CFGBlock *B, ExplodedNode *N)
+ : Eng(E), Block(B), Pred(N) { assert(B); assert(!N->isSink()); }
+
+ ExplodedNode *getPred() const { return Pred; }
+
+ /// \brief Return the CFGBlock associated with this builder.
+ const CFGBlock *getBlock() const { return Block; }
+
+ /// \brief Returns the number of times the current basic block has been
+ /// visited on the exploded graph path.
+ unsigned getCurrentBlockCount() const {
+ return Eng.WList->getBlockCounter().getNumVisited(
+ Pred->getLocationContext()->getCurrentStackFrame(),
+ Block->getBlockID());
+ }
+};
+
+/// \class NodeBuilder
+/// \brief This is the simplest builder which generates nodes in the
+/// ExplodedGraph.
+///
+/// The main benefit of the builder is that it automatically tracks the
+/// frontier nodes (or destination set). This is the set of nodes which should
+/// be propagated to the next step / builder. They are the nodes which have been
+/// added to the builder (either as the input node set or as the newly
+/// constructed nodes) but did not have any outgoing transitions added.
+class NodeBuilder {
+ virtual void anchor();
+protected:
+ const NodeBuilderContext &C;
+
+ /// Specifies if the builder results have been finalized. For example, if it
+ /// is set to false, autotransitions are yet to be generated.
+ bool Finalized;
+ bool HasGeneratedNodes;
+ /// \brief The frontier set - a set of nodes which need to be propagated after
+ /// the builder dies.
+ ExplodedNodeSet &Frontier;
+
+ /// Checkes if the results are ready.
+ virtual bool checkResults() {
+ if (!Finalized)
+ return false;
+ return true;
+ }
+
+ bool hasNoSinksInFrontier() {
+ for (iterator I = Frontier.begin(), E = Frontier.end(); I != E; ++I) {
+ if ((*I)->isSink())
+ return false;
+ }
+ return true;
+ }
+
+ /// Allow subclasses to finalize results before result_begin() is executed.
+ virtual void finalizeResults() {}
+
+ ExplodedNode *generateNodeImpl(const ProgramPoint &PP,
+ ProgramStateRef State,
+ ExplodedNode *Pred,
+ bool MarkAsSink = false);
+
+public:
+ NodeBuilder(ExplodedNode *SrcNode, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, bool F = true)
+ : C(Ctx), Finalized(F), HasGeneratedNodes(false), Frontier(DstSet) {
+ Frontier.Add(SrcNode);
+ }
+
+ NodeBuilder(const ExplodedNodeSet &SrcSet, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, bool F = true)
+ : C(Ctx), Finalized(F), HasGeneratedNodes(false), Frontier(DstSet) {
+ Frontier.insert(SrcSet);
+ assert(hasNoSinksInFrontier());
+ }
+
+ virtual ~NodeBuilder() {}
+
+ /// \brief Generates a node in the ExplodedGraph.
+ ///
+ /// When a node is marked as sink, the exploration from the node is stopped -
+ /// the node becomes the last node on the path.
+ ExplodedNode *generateNode(const ProgramPoint &PP,
+ ProgramStateRef State,
+ ExplodedNode *Pred,
+ bool MarkAsSink = false) {
+ return generateNodeImpl(PP, State, Pred, MarkAsSink);
+ }
+
+ const ExplodedNodeSet &getResults() {
+ finalizeResults();
+ assert(checkResults());
+ return Frontier;
+ }
+
+ typedef ExplodedNodeSet::iterator iterator;
+ /// \brief Iterators through the results frontier.
+ inline iterator begin() {
+ finalizeResults();
+ assert(checkResults());
+ return Frontier.begin();
+ }
+ inline iterator end() {
+ finalizeResults();
+ return Frontier.end();
+ }
+
+ const NodeBuilderContext &getContext() { return C; }
+ bool hasGeneratedNodes() { return HasGeneratedNodes; }
+
+ void takeNodes(const ExplodedNodeSet &S) {
+ for (ExplodedNodeSet::iterator I = S.begin(), E = S.end(); I != E; ++I )
+ Frontier.erase(*I);
+ }
+ void takeNodes(ExplodedNode *N) { Frontier.erase(N); }
+ void addNodes(const ExplodedNodeSet &S) { Frontier.insert(S); }
+ void addNodes(ExplodedNode *N) { Frontier.Add(N); }
+};
+
+/// \class NodeBuilderWithSinks
+/// \brief This node builder keeps track of the generated sink nodes.
+class NodeBuilderWithSinks: public NodeBuilder {
+ virtual void anchor();
+protected:
+ SmallVector<ExplodedNode*, 2> sinksGenerated;
+ ProgramPoint &Location;
+
+public:
+ NodeBuilderWithSinks(ExplodedNode *Pred, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, ProgramPoint &L)
+ : NodeBuilder(Pred, DstSet, Ctx), Location(L) {}
+ ExplodedNode *generateNode(ProgramStateRef State,
+ ExplodedNode *Pred,
+ const ProgramPointTag *Tag = 0,
+ bool MarkAsSink = false) {
+ ProgramPoint LocalLoc = (Tag ? Location.withTag(Tag): Location);
+
+ ExplodedNode *N = generateNodeImpl(LocalLoc, State, Pred, MarkAsSink);
+ if (N && N->isSink())
+ sinksGenerated.push_back(N);
+ return N;
+ }
+
+ const SmallVectorImpl<ExplodedNode*> &getSinks() const {
+ return sinksGenerated;
+ }
+};
+
+/// \class StmtNodeBuilder
+/// \brief This builder class is useful for generating nodes that resulted from
+/// visiting a statement. The main difference from it's parent NodeBuilder is
+/// that it creates a statement specific ProgramPoint.
+class StmtNodeBuilder: public NodeBuilder {
+ NodeBuilder *EnclosingBldr;
+public:
+
+ /// \brief Constructs a StmtNodeBuilder. If the builder is going to process
+ /// nodes currently owned by another builder(with larger scope), use
+ /// Enclosing builder to transfer ownership.
+ StmtNodeBuilder(ExplodedNode *SrcNode, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, NodeBuilder *Enclosing = 0)
+ : NodeBuilder(SrcNode, DstSet, Ctx), EnclosingBldr(Enclosing) {
+ if (EnclosingBldr)
+ EnclosingBldr->takeNodes(SrcNode);
+ }
+
+ StmtNodeBuilder(ExplodedNodeSet &SrcSet, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &Ctx, NodeBuilder *Enclosing = 0)
+ : NodeBuilder(SrcSet, DstSet, Ctx), EnclosingBldr(Enclosing) {
+ if (EnclosingBldr)
+ for (ExplodedNodeSet::iterator I = SrcSet.begin(),
+ E = SrcSet.end(); I != E; ++I )
+ EnclosingBldr->takeNodes(*I);
+ }
+
+ virtual ~StmtNodeBuilder();
+
+ ExplodedNode *generateNode(const Stmt *S,
+ ExplodedNode *Pred,
+ ProgramStateRef St,
+ bool MarkAsSink = false,
+ const ProgramPointTag *tag = 0,
+ ProgramPoint::Kind K = ProgramPoint::PostStmtKind){
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+ Pred->getLocationContext(), tag);
+ return generateNodeImpl(L, St, Pred, MarkAsSink);
+ }
+
+ ExplodedNode *generateNode(const ProgramPoint &PP,
+ ExplodedNode *Pred,
+ ProgramStateRef State,
+ bool MarkAsSink = false) {
+ return generateNodeImpl(PP, State, Pred, MarkAsSink);
+ }
+};
+
+/// \brief BranchNodeBuilder is responsible for constructing the nodes
+/// corresponding to the two branches of the if statement - true and false.
+class BranchNodeBuilder: public NodeBuilder {
+ virtual void anchor();
+ const CFGBlock *DstT;
+ const CFGBlock *DstF;
+
+ bool InFeasibleTrue;
+ bool InFeasibleFalse;
+
+public:
+ BranchNodeBuilder(ExplodedNode *SrcNode, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &C,
+ const CFGBlock *dstT, const CFGBlock *dstF)
+ : NodeBuilder(SrcNode, DstSet, C), DstT(dstT), DstF(dstF),
+ InFeasibleTrue(!DstT), InFeasibleFalse(!DstF) {
+ // The branch node builder does not generate autotransitions.
+ // If there are no successors it means that both branches are infeasible.
+ takeNodes(SrcNode);
+ }
+
+ BranchNodeBuilder(const ExplodedNodeSet &SrcSet, ExplodedNodeSet &DstSet,
+ const NodeBuilderContext &C,
+ const CFGBlock *dstT, const CFGBlock *dstF)
+ : NodeBuilder(SrcSet, DstSet, C), DstT(dstT), DstF(dstF),
+ InFeasibleTrue(!DstT), InFeasibleFalse(!DstF) {
+ takeNodes(SrcSet);
+ }
+
+ ExplodedNode *generateNode(ProgramStateRef State, bool branch,
+ ExplodedNode *Pred);
+
+ const CFGBlock *getTargetBlock(bool branch) const {
+ return branch ? DstT : DstF;
+ }
+
+ void markInfeasible(bool branch) {
+ if (branch)
+ InFeasibleTrue = true;
+ else
+ InFeasibleFalse = true;
+ }
+
+ bool isFeasible(bool branch) {
+ return branch ? !InFeasibleTrue : !InFeasibleFalse;
+ }
+};
+
+class IndirectGotoNodeBuilder {
+ CoreEngine& Eng;
+ const CFGBlock *Src;
+ const CFGBlock &DispatchBlock;
+ const Expr *E;
+ ExplodedNode *Pred;
+
+public:
+ IndirectGotoNodeBuilder(ExplodedNode *pred, const CFGBlock *src,
+ const Expr *e, const CFGBlock *dispatch, CoreEngine* eng)
+ : Eng(*eng), Src(src), DispatchBlock(*dispatch), E(e), Pred(pred) {}
+
+ class iterator {
+ CFGBlock::const_succ_iterator I;
+
+ friend class IndirectGotoNodeBuilder;
+ iterator(CFGBlock::const_succ_iterator i) : I(i) {}
+ public:
+
+ iterator &operator++() { ++I; return *this; }
+ bool operator!=(const iterator &X) const { return I != X.I; }
+
+ const LabelDecl *getLabel() const {
+ return llvm::cast<LabelStmt>((*I)->getLabel())->getDecl();
+ }
+
+ const CFGBlock *getBlock() const {
+ return *I;
+ }
+ };
+
+ iterator begin() { return iterator(DispatchBlock.succ_begin()); }
+ iterator end() { return iterator(DispatchBlock.succ_end()); }
+
+ ExplodedNode *generateNode(const iterator &I,
+ ProgramStateRef State,
+ bool isSink = false);
+
+ const Expr *getTarget() const { return E; }
+
+ ProgramStateRef getState() const { return Pred->State; }
+
+ const LocationContext *getLocationContext() const {
+ return Pred->getLocationContext();
+ }
+};
+
+class SwitchNodeBuilder {
+ CoreEngine& Eng;
+ const CFGBlock *Src;
+ const Expr *Condition;
+ ExplodedNode *Pred;
+
+public:
+ SwitchNodeBuilder(ExplodedNode *pred, const CFGBlock *src,
+ const Expr *condition, CoreEngine* eng)
+ : Eng(*eng), Src(src), Condition(condition), Pred(pred) {}
+
+ class iterator {
+ CFGBlock::const_succ_reverse_iterator I;
+
+ friend class SwitchNodeBuilder;
+ iterator(CFGBlock::const_succ_reverse_iterator i) : I(i) {}
+
+ public:
+ iterator &operator++() { ++I; return *this; }
+ bool operator!=(const iterator &X) const { return I != X.I; }
+ bool operator==(const iterator &X) const { return I == X.I; }
+
+ const CaseStmt *getCase() const {
+ return llvm::cast<CaseStmt>((*I)->getLabel());
+ }
+
+ const CFGBlock *getBlock() const {
+ return *I;
+ }
+ };
+
+ iterator begin() { return iterator(Src->succ_rbegin()+1); }
+ iterator end() { return iterator(Src->succ_rend()); }
+
+ const SwitchStmt *getSwitch() const {
+ return llvm::cast<SwitchStmt>(Src->getTerminator());
+ }
+
+ ExplodedNode *generateCaseStmtNode(const iterator &I,
+ ProgramStateRef State);
+
+ ExplodedNode *generateDefaultCaseNode(ProgramStateRef State,
+ bool isSink = false);
+
+ const Expr *getCondition() const { return Condition; }
+
+ ProgramStateRef getState() const { return Pred->State; }
+
+ const LocationContext *getLocationContext() const {
+ return Pred->getLocationContext();
+ }
+};
+
+} // end ento namespace
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
new file mode 100644
index 0000000..b80213e
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Environment.h
@@ -0,0 +1,139 @@
+//== Environment.h - Map from Stmt* to Locations/Values ---------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the Environment and EnvironmentManager classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_ENVIRONMENT_H
+#define LLVM_CLANG_GR_ENVIRONMENT_H
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+namespace clang {
+
+class LiveVariables;
+
+namespace ento {
+
+class EnvironmentManager;
+class SValBuilder;
+
+/// An entry in the environment consists of a Stmt and an LocationContext.
+/// This allows the environment to manage context-sensitive bindings,
+/// which is essentially for modeling recursive function analysis, among
+/// other things.
+class EnvironmentEntry : public std::pair<const Stmt*,
+ const StackFrameContext *> {
+public:
+ EnvironmentEntry(const Stmt *s, const LocationContext *L)
+ : std::pair<const Stmt*,
+ const StackFrameContext*>(s, L ? L->getCurrentStackFrame():0) {}
+
+ const Stmt *getStmt() const { return first; }
+ const LocationContext *getLocationContext() const { return second; }
+
+ /// Profile an EnvironmentEntry for inclusion in a FoldingSet.
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const EnvironmentEntry &E) {
+ ID.AddPointer(E.getStmt());
+ ID.AddPointer(E.getLocationContext());
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, *this);
+ }
+};
+
+/// An immutable map from EnvironemntEntries to SVals.
+class Environment {
+private:
+ friend class EnvironmentManager;
+
+ // Type definitions.
+ typedef llvm::ImmutableMap<EnvironmentEntry, SVal> BindingsTy;
+
+ // Data.
+ BindingsTy ExprBindings;
+
+ Environment(BindingsTy eb)
+ : ExprBindings(eb) {}
+
+ SVal lookupExpr(const EnvironmentEntry &E) const;
+
+public:
+ typedef BindingsTy::iterator iterator;
+ iterator begin() const { return ExprBindings.begin(); }
+ iterator end() const { return ExprBindings.end(); }
+
+ /// Fetches the current binding of the expression in the
+ /// Environment.
+ SVal getSVal(const EnvironmentEntry &E,
+ SValBuilder &svalBuilder,
+ bool useOnlyDirectBindings = false) const;
+
+ /// Profile - Profile the contents of an Environment object for use
+ /// in a FoldingSet.
+ static void Profile(llvm::FoldingSetNodeID& ID, const Environment* env) {
+ env->ExprBindings.Profile(ID);
+ }
+
+ /// Profile - Used to profile the contents of this object for inclusion
+ /// in a FoldingSet.
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ Profile(ID, this);
+ }
+
+ bool operator==(const Environment& RHS) const {
+ return ExprBindings == RHS.ExprBindings;
+ }
+
+ void print(raw_ostream &Out, const char *NL, const char *Sep) const;
+
+private:
+ void printAux(raw_ostream &Out, bool printLocations,
+ const char *NL, const char *Sep) const;
+};
+
+class EnvironmentManager {
+private:
+ typedef Environment::BindingsTy::Factory FactoryTy;
+ FactoryTy F;
+
+public:
+ EnvironmentManager(llvm::BumpPtrAllocator& Allocator) : F(Allocator) {}
+ ~EnvironmentManager() {}
+
+ Environment getInitialEnvironment() {
+ return Environment(F.getEmptyMap());
+ }
+
+ /// Bind a symbolic value to the given environment entry.
+ Environment bindExpr(Environment Env, const EnvironmentEntry &E, SVal V,
+ bool Invalidate);
+
+ /// Bind the location 'location' and value 'V' to the specified
+ /// environment entry.
+ Environment bindExprAndLocation(Environment Env,
+ const EnvironmentEntry &E,
+ SVal location,
+ SVal V);
+
+ Environment removeDeadBindings(Environment Env,
+ SymbolReaper &SymReaper,
+ ProgramStateRef state);
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
new file mode 100644
index 0000000..46fbb88
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h
@@ -0,0 +1,480 @@
+//=-- ExplodedGraph.h - Local, Path-Sens. "Exploded Graph" -*- C++ -*-------==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the template classes ExplodedNode and ExplodedGraph,
+// which represent a path-sensitive, intra-procedural "exploded graph."
+// See "Precise interprocedural dataflow analysis via graph reachability"
+// by Reps, Horwitz, and Sagiv
+// (http://portal.acm.org/citation.cfm?id=199462) for the definition of an
+// exploded graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_EXPLODEDGRAPH
+#define LLVM_CLANG_GR_EXPLODEDGRAPH
+
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/GraphTraits.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/Support/Casting.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include <vector>
+
+namespace clang {
+
+class CFG;
+
+namespace ento {
+
+class ExplodedGraph;
+
+//===----------------------------------------------------------------------===//
+// ExplodedGraph "implementation" classes. These classes are not typed to
+// contain a specific kind of state. Typed-specialized versions are defined
+// on top of these classes.
+//===----------------------------------------------------------------------===//
+
+// ExplodedNode is not constified all over the engine because we need to add
+// successors to it at any time after creating it.
+
+class ExplodedNode : public llvm::FoldingSetNode {
+ friend class ExplodedGraph;
+ friend class CoreEngine;
+ friend class NodeBuilder;
+ friend class BranchNodeBuilder;
+ friend class IndirectGotoNodeBuilder;
+ friend class SwitchNodeBuilder;
+ friend class EndOfFunctionNodeBuilder;
+
+ class NodeGroup {
+ enum { Size1 = 0x0, SizeOther = 0x1, AuxFlag = 0x2, Mask = 0x3 };
+ uintptr_t P;
+
+ unsigned getKind() const {
+ return P & 0x1;
+ }
+
+ void *getPtr() const {
+ assert (!getFlag());
+ return reinterpret_cast<void*>(P & ~Mask);
+ }
+
+ ExplodedNode *getNode() const {
+ return reinterpret_cast<ExplodedNode*>(getPtr());
+ }
+
+ public:
+ NodeGroup() : P(0) {}
+
+ ExplodedNode **begin() const;
+
+ ExplodedNode **end() const;
+
+ unsigned size() const;
+
+ bool empty() const { return (P & ~Mask) == 0; }
+
+ void addNode(ExplodedNode *N, ExplodedGraph &G);
+
+ void replaceNode(ExplodedNode *node);
+
+ void setFlag() {
+ assert(P == 0);
+ P = AuxFlag;
+ }
+
+ bool getFlag() const {
+ return P & AuxFlag ? true : false;
+ }
+ };
+
+ /// Location - The program location (within a function body) associated
+ /// with this node.
+ const ProgramPoint Location;
+
+ /// State - The state associated with this node.
+ ProgramStateRef State;
+
+ /// Preds - The predecessors of this node.
+ NodeGroup Preds;
+
+ /// Succs - The successors of this node.
+ NodeGroup Succs;
+
+public:
+
+ explicit ExplodedNode(const ProgramPoint &loc, ProgramStateRef state,
+ bool IsSink)
+ : Location(loc), State(state) {
+ if (IsSink)
+ Succs.setFlag();
+ }
+
+ ~ExplodedNode() {}
+
+ /// getLocation - Returns the edge associated with the given node.
+ ProgramPoint getLocation() const { return Location; }
+
+ const LocationContext *getLocationContext() const {
+ return getLocation().getLocationContext();
+ }
+
+ const Decl &getCodeDecl() const { return *getLocationContext()->getDecl(); }
+
+ CFG &getCFG() const { return *getLocationContext()->getCFG(); }
+
+ ParentMap &getParentMap() const {return getLocationContext()->getParentMap();}
+
+ template <typename T>
+ T &getAnalysis() const {
+ return *getLocationContext()->getAnalysis<T>();
+ }
+
+ ProgramStateRef getState() const { return State; }
+
+ template <typename T>
+ const T* getLocationAs() const { return llvm::dyn_cast<T>(&Location); }
+
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const ProgramPoint &Loc,
+ ProgramStateRef state,
+ bool IsSink) {
+ ID.Add(Loc);
+ ID.AddPointer(state.getPtr());
+ ID.AddBoolean(IsSink);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ Profile(ID, getLocation(), getState(), isSink());
+ }
+
+ /// addPredeccessor - Adds a predecessor to the current node, and
+ /// in tandem add this node as a successor of the other node.
+ void addPredecessor(ExplodedNode *V, ExplodedGraph &G);
+
+ unsigned succ_size() const { return Succs.size(); }
+ unsigned pred_size() const { return Preds.size(); }
+ bool succ_empty() const { return Succs.empty(); }
+ bool pred_empty() const { return Preds.empty(); }
+
+ bool isSink() const { return Succs.getFlag(); }
+
+ bool hasSinglePred() const {
+ return (pred_size() == 1);
+ }
+
+ ExplodedNode *getFirstPred() {
+ return pred_empty() ? NULL : *(pred_begin());
+ }
+
+ const ExplodedNode *getFirstPred() const {
+ return const_cast<ExplodedNode*>(this)->getFirstPred();
+ }
+
+ // Iterators over successor and predecessor vertices.
+ typedef ExplodedNode** succ_iterator;
+ typedef const ExplodedNode* const * const_succ_iterator;
+ typedef ExplodedNode** pred_iterator;
+ typedef const ExplodedNode* const * const_pred_iterator;
+
+ pred_iterator pred_begin() { return Preds.begin(); }
+ pred_iterator pred_end() { return Preds.end(); }
+
+ const_pred_iterator pred_begin() const {
+ return const_cast<ExplodedNode*>(this)->pred_begin();
+ }
+ const_pred_iterator pred_end() const {
+ return const_cast<ExplodedNode*>(this)->pred_end();
+ }
+
+ succ_iterator succ_begin() { return Succs.begin(); }
+ succ_iterator succ_end() { return Succs.end(); }
+
+ const_succ_iterator succ_begin() const {
+ return const_cast<ExplodedNode*>(this)->succ_begin();
+ }
+ const_succ_iterator succ_end() const {
+ return const_cast<ExplodedNode*>(this)->succ_end();
+ }
+
+ // For debugging.
+
+public:
+
+ class Auditor {
+ public:
+ virtual ~Auditor();
+ virtual void AddEdge(ExplodedNode *Src, ExplodedNode *Dst) = 0;
+ };
+
+ static void SetAuditor(Auditor* A);
+
+private:
+ void replaceSuccessor(ExplodedNode *node) { Succs.replaceNode(node); }
+ void replacePredecessor(ExplodedNode *node) { Preds.replaceNode(node); }
+};
+
+// FIXME: Is this class necessary?
+class InterExplodedGraphMap {
+ virtual void anchor();
+ llvm::DenseMap<const ExplodedNode*, ExplodedNode*> M;
+ friend class ExplodedGraph;
+
+public:
+ ExplodedNode *getMappedNode(const ExplodedNode *N) const;
+
+ InterExplodedGraphMap() {}
+ virtual ~InterExplodedGraphMap() {}
+};
+
+class ExplodedGraph {
+protected:
+ friend class CoreEngine;
+
+ // Type definitions.
+ typedef std::vector<ExplodedNode *> NodeVector;
+
+ /// The roots of the simulation graph. Usually there will be only
+ /// one, but clients are free to establish multiple subgraphs within a single
+ /// SimulGraph. Moreover, these subgraphs can often merge when paths from
+ /// different roots reach the same state at the same program location.
+ NodeVector Roots;
+
+ /// The nodes in the simulation graph which have been
+ /// specially marked as the endpoint of an abstract simulation path.
+ NodeVector EndNodes;
+
+ /// Nodes - The nodes in the graph.
+ llvm::FoldingSet<ExplodedNode> Nodes;
+
+ /// BVC - Allocator and context for allocating nodes and their predecessor
+ /// and successor groups.
+ BumpVectorContext BVC;
+
+ /// NumNodes - The number of nodes in the graph.
+ unsigned NumNodes;
+
+ /// A list of recently allocated nodes that can potentially be recycled.
+ NodeVector ChangedNodes;
+
+ /// A list of nodes that can be reused.
+ NodeVector FreeNodes;
+
+ /// A flag that indicates whether nodes should be recycled.
+ bool reclaimNodes;
+
+ /// Counter to determine when to reclaim nodes.
+ unsigned reclaimCounter;
+
+public:
+
+ /// \brief Retrieve the node associated with a (Location,State) pair,
+ /// where the 'Location' is a ProgramPoint in the CFG. If no node for
+ /// this pair exists, it is created. IsNew is set to true if
+ /// the node was freshly created.
+ ExplodedNode *getNode(const ProgramPoint &L, ProgramStateRef State,
+ bool IsSink = false,
+ bool* IsNew = 0);
+
+ ExplodedGraph* MakeEmptyGraph() const {
+ return new ExplodedGraph();
+ }
+
+ /// addRoot - Add an untyped node to the set of roots.
+ ExplodedNode *addRoot(ExplodedNode *V) {
+ Roots.push_back(V);
+ return V;
+ }
+
+ /// addEndOfPath - Add an untyped node to the set of EOP nodes.
+ ExplodedNode *addEndOfPath(ExplodedNode *V) {
+ EndNodes.push_back(V);
+ return V;
+ }
+
+ ExplodedGraph();
+
+ ~ExplodedGraph();
+
+ unsigned num_roots() const { return Roots.size(); }
+ unsigned num_eops() const { return EndNodes.size(); }
+
+ bool empty() const { return NumNodes == 0; }
+ unsigned size() const { return NumNodes; }
+
+ // Iterators.
+ typedef ExplodedNode NodeTy;
+ typedef llvm::FoldingSet<ExplodedNode> AllNodesTy;
+ typedef NodeVector::iterator roots_iterator;
+ typedef NodeVector::const_iterator const_roots_iterator;
+ typedef NodeVector::iterator eop_iterator;
+ typedef NodeVector::const_iterator const_eop_iterator;
+ typedef AllNodesTy::iterator node_iterator;
+ typedef AllNodesTy::const_iterator const_node_iterator;
+
+ node_iterator nodes_begin() { return Nodes.begin(); }
+
+ node_iterator nodes_end() { return Nodes.end(); }
+
+ const_node_iterator nodes_begin() const { return Nodes.begin(); }
+
+ const_node_iterator nodes_end() const { return Nodes.end(); }
+
+ roots_iterator roots_begin() { return Roots.begin(); }
+
+ roots_iterator roots_end() { return Roots.end(); }
+
+ const_roots_iterator roots_begin() const { return Roots.begin(); }
+
+ const_roots_iterator roots_end() const { return Roots.end(); }
+
+ eop_iterator eop_begin() { return EndNodes.begin(); }
+
+ eop_iterator eop_end() { return EndNodes.end(); }
+
+ const_eop_iterator eop_begin() const { return EndNodes.begin(); }
+
+ const_eop_iterator eop_end() const { return EndNodes.end(); }
+
+ llvm::BumpPtrAllocator & getAllocator() { return BVC.getAllocator(); }
+ BumpVectorContext &getNodeAllocator() { return BVC; }
+
+ typedef llvm::DenseMap<const ExplodedNode*, ExplodedNode*> NodeMap;
+
+ std::pair<ExplodedGraph*, InterExplodedGraphMap*>
+ Trim(const NodeTy* const* NBeg, const NodeTy* const* NEnd,
+ llvm::DenseMap<const void*, const void*> *InverseMap = 0) const;
+
+ ExplodedGraph* TrimInternal(const ExplodedNode* const * NBeg,
+ const ExplodedNode* const * NEnd,
+ InterExplodedGraphMap *M,
+ llvm::DenseMap<const void*, const void*> *InverseMap) const;
+
+ /// Enable tracking of recently allocated nodes for potential reclamation
+ /// when calling reclaimRecentlyAllocatedNodes().
+ void enableNodeReclamation() { reclaimNodes = true; }
+
+ /// Reclaim "uninteresting" nodes created since the last time this method
+ /// was called.
+ void reclaimRecentlyAllocatedNodes();
+
+private:
+ bool shouldCollect(const ExplodedNode *node);
+ void collectNode(ExplodedNode *node);
+};
+
+class ExplodedNodeSet {
+ typedef llvm::SmallPtrSet<ExplodedNode*,5> ImplTy;
+ ImplTy Impl;
+
+public:
+ ExplodedNodeSet(ExplodedNode *N) {
+ assert (N && !static_cast<ExplodedNode*>(N)->isSink());
+ Impl.insert(N);
+ }
+
+ ExplodedNodeSet() {}
+
+ inline void Add(ExplodedNode *N) {
+ if (N && !static_cast<ExplodedNode*>(N)->isSink()) Impl.insert(N);
+ }
+
+ typedef ImplTy::iterator iterator;
+ typedef ImplTy::const_iterator const_iterator;
+
+ unsigned size() const { return Impl.size(); }
+ bool empty() const { return Impl.empty(); }
+ bool erase(ExplodedNode *N) { return Impl.erase(N); }
+
+ void clear() { Impl.clear(); }
+ void insert(const ExplodedNodeSet &S) {
+ assert(&S != this);
+ if (empty())
+ Impl = S.Impl;
+ else
+ Impl.insert(S.begin(), S.end());
+ }
+
+ inline iterator begin() { return Impl.begin(); }
+ inline iterator end() { return Impl.end(); }
+
+ inline const_iterator begin() const { return Impl.begin(); }
+ inline const_iterator end() const { return Impl.end(); }
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+// GraphTraits
+
+namespace llvm {
+ template<> struct GraphTraits<clang::ento::ExplodedNode*> {
+ typedef clang::ento::ExplodedNode NodeType;
+ typedef NodeType::succ_iterator ChildIteratorType;
+ typedef llvm::df_iterator<NodeType*> nodes_iterator;
+
+ static inline NodeType* getEntryNode(NodeType* N) {
+ return N;
+ }
+
+ static inline ChildIteratorType child_begin(NodeType* N) {
+ return N->succ_begin();
+ }
+
+ static inline ChildIteratorType child_end(NodeType* N) {
+ return N->succ_end();
+ }
+
+ static inline nodes_iterator nodes_begin(NodeType* N) {
+ return df_begin(N);
+ }
+
+ static inline nodes_iterator nodes_end(NodeType* N) {
+ return df_end(N);
+ }
+ };
+
+ template<> struct GraphTraits<const clang::ento::ExplodedNode*> {
+ typedef const clang::ento::ExplodedNode NodeType;
+ typedef NodeType::const_succ_iterator ChildIteratorType;
+ typedef llvm::df_iterator<NodeType*> nodes_iterator;
+
+ static inline NodeType* getEntryNode(NodeType* N) {
+ return N;
+ }
+
+ static inline ChildIteratorType child_begin(NodeType* N) {
+ return N->succ_begin();
+ }
+
+ static inline ChildIteratorType child_end(NodeType* N) {
+ return N->succ_end();
+ }
+
+ static inline nodes_iterator nodes_begin(NodeType* N) {
+ return df_begin(N);
+ }
+
+ static inline nodes_iterator nodes_end(NodeType* N) {
+ return df_end(N);
+ }
+ };
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
new file mode 100644
index 0000000..2a21a03
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h
@@ -0,0 +1,496 @@
+//===-- ExprEngine.h - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a meta-engine for path-sensitive dataflow analysis that
+// is built on CoreEngine, but provides the boilerplate to execute transfer
+// functions and build the ExplodedGraph at the expression level.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_EXPRENGINE
+#define LLVM_CLANG_GR_EXPRENGINE
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+
+namespace clang {
+
+class AnalysisDeclContextManager;
+class CXXCatchStmt;
+class CXXConstructExpr;
+class CXXDeleteExpr;
+class CXXNewExpr;
+class CXXTemporaryObjectExpr;
+class CXXThisExpr;
+class MaterializeTemporaryExpr;
+class ObjCAtSynchronizedStmt;
+class ObjCForCollectionStmt;
+
+namespace ento {
+
+class AnalysisManager;
+class CallOrObjCMessage;
+class ObjCMessage;
+
+class ExprEngine : public SubEngine {
+ AnalysisManager &AMgr;
+
+ AnalysisDeclContextManager &AnalysisDeclContexts;
+
+ CoreEngine Engine;
+
+ /// G - the simulation graph.
+ ExplodedGraph& G;
+
+ /// StateMgr - Object that manages the data for all created states.
+ ProgramStateManager StateMgr;
+
+ /// SymMgr - Object that manages the symbol information.
+ SymbolManager& SymMgr;
+
+ /// svalBuilder - SValBuilder object that creates SVals from expressions.
+ SValBuilder &svalBuilder;
+
+ /// EntryNode - The immediate predecessor node.
+ ExplodedNode *EntryNode;
+
+ /// CleanedState - The state for EntryNode "cleaned" of all dead
+ /// variables and symbols (as determined by a liveness analysis).
+ ProgramStateRef CleanedState;
+
+ /// currentStmt - The current block-level statement.
+ const Stmt *currentStmt;
+ unsigned int currentStmtIdx;
+ const NodeBuilderContext *currentBuilderContext;
+
+ /// Obj-C Class Identifiers.
+ IdentifierInfo* NSExceptionII;
+
+ /// Obj-C Selectors.
+ Selector* NSExceptionInstanceRaiseSelectors;
+ Selector RaiseSel;
+
+ /// Whether or not GC is enabled in this analysis.
+ bool ObjCGCEnabled;
+
+ /// The BugReporter associated with this engine. It is important that
+ /// this object be placed at the very end of member variables so that its
+ /// destructor is called before the rest of the ExprEngine is destroyed.
+ GRBugReporter BR;
+
+public:
+ ExprEngine(AnalysisManager &mgr, bool gcEnabled,
+ SetOfConstDecls *VisitedCallees,
+ FunctionSummariesTy *FS);
+
+ ~ExprEngine();
+
+ /// Returns true if there is still simulation state on the worklist.
+ bool ExecuteWorkList(const LocationContext *L, unsigned Steps = 150000) {
+ return Engine.ExecuteWorkList(L, Steps, 0);
+ }
+
+ /// Execute the work list with an initial state. Nodes that reaches the exit
+ /// of the function are added into the Dst set, which represent the exit
+ /// state of the function call. Returns true if there is still simulation
+ /// state on the worklist.
+ bool ExecuteWorkListWithInitialState(const LocationContext *L, unsigned Steps,
+ ProgramStateRef InitState,
+ ExplodedNodeSet &Dst) {
+ return Engine.ExecuteWorkListWithInitialState(L, Steps, InitState, Dst);
+ }
+
+ /// getContext - Return the ASTContext associated with this analysis.
+ ASTContext &getContext() const { return AMgr.getASTContext(); }
+
+ virtual AnalysisManager &getAnalysisManager() { return AMgr; }
+
+ CheckerManager &getCheckerManager() const {
+ return *AMgr.getCheckerManager();
+ }
+
+ SValBuilder &getSValBuilder() { return svalBuilder; }
+
+ BugReporter& getBugReporter() { return BR; }
+
+ const NodeBuilderContext &getBuilderContext() {
+ assert(currentBuilderContext);
+ return *currentBuilderContext;
+ }
+
+ bool isObjCGCEnabled() { return ObjCGCEnabled; }
+
+ const Stmt *getStmt() const;
+
+ void GenerateAutoTransition(ExplodedNode *N);
+ void enqueueEndOfPath(ExplodedNodeSet &S);
+ void GenerateCallExitNode(ExplodedNode *N);
+
+ /// ViewGraph - Visualize the ExplodedGraph created by executing the
+ /// simulation.
+ void ViewGraph(bool trim = false);
+
+ void ViewGraph(ExplodedNode** Beg, ExplodedNode** End);
+
+ /// getInitialState - Return the initial state used for the root vertex
+ /// in the ExplodedGraph.
+ ProgramStateRef getInitialState(const LocationContext *InitLoc);
+
+ ExplodedGraph& getGraph() { return G; }
+ const ExplodedGraph& getGraph() const { return G; }
+
+ /// processCFGElement - Called by CoreEngine. Used to generate new successor
+ /// nodes by processing the 'effects' of a CFG element.
+ void processCFGElement(const CFGElement E, ExplodedNode *Pred,
+ unsigned StmtIdx, NodeBuilderContext *Ctx);
+
+ void ProcessStmt(const CFGStmt S, ExplodedNode *Pred);
+
+ void ProcessInitializer(const CFGInitializer I, ExplodedNode *Pred);
+
+ void ProcessImplicitDtor(const CFGImplicitDtor D, ExplodedNode *Pred);
+
+ void ProcessAutomaticObjDtor(const CFGAutomaticObjDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ void ProcessBaseDtor(const CFGBaseDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ void ProcessMemberDtor(const CFGMemberDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+ void ProcessTemporaryDtor(const CFGTemporaryDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// Called by CoreEngine when processing the entrance of a CFGBlock.
+ virtual void processCFGBlockEntrance(const BlockEdge &L,
+ NodeBuilderWithSinks &nodeBuilder);
+
+ /// ProcessBranch - Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a branch condition.
+ void processBranch(const Stmt *Condition, const Stmt *Term,
+ NodeBuilderContext& BuilderCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF);
+
+ /// processIndirectGoto - Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a computed goto jump.
+ void processIndirectGoto(IndirectGotoNodeBuilder& builder);
+
+ /// ProcessSwitch - Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a switch statement.
+ void processSwitch(SwitchNodeBuilder& builder);
+
+ /// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
+ /// nodes when the control reaches the end of a function.
+ void processEndOfFunction(NodeBuilderContext& BC);
+
+ /// Generate the entry node of the callee.
+ void processCallEnter(CallEnter CE, ExplodedNode *Pred);
+
+ /// Generate the first post callsite node.
+ void processCallExit(ExplodedNode *Pred);
+
+ /// Called by CoreEngine when the analysis worklist has terminated.
+ void processEndWorklist(bool hasWorkRemaining);
+
+ /// evalAssume - Callback function invoked by the ConstraintManager when
+ /// making assumptions about state values.
+ ProgramStateRef processAssume(ProgramStateRef state, SVal cond,bool assumption);
+
+ /// wantsRegionChangeUpdate - Called by ProgramStateManager to determine if a
+ /// region change should trigger a processRegionChanges update.
+ bool wantsRegionChangeUpdate(ProgramStateRef state);
+
+ /// processRegionChanges - Called by ProgramStateManager whenever a change is made
+ /// to the store. Used to update checkers that track region values.
+ ProgramStateRef
+ processRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call);
+
+ /// printState - Called by ProgramStateManager to print checker-specific data.
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep);
+
+ virtual ProgramStateManager& getStateManager() { return StateMgr; }
+
+ StoreManager& getStoreManager() { return StateMgr.getStoreManager(); }
+
+ ConstraintManager& getConstraintManager() {
+ return StateMgr.getConstraintManager();
+ }
+
+ // FIXME: Remove when we migrate over to just using SValBuilder.
+ BasicValueFactory& getBasicVals() {
+ return StateMgr.getBasicVals();
+ }
+ const BasicValueFactory& getBasicVals() const {
+ return StateMgr.getBasicVals();
+ }
+
+ // FIXME: Remove when we migrate over to just using ValueManager.
+ SymbolManager& getSymbolManager() { return SymMgr; }
+ const SymbolManager& getSymbolManager() const { return SymMgr; }
+
+ // Functions for external checking of whether we have unfinished work
+ bool wasBlocksExhausted() const { return Engine.wasBlocksExhausted(); }
+ bool hasEmptyWorkList() const { return !Engine.getWorkList()->hasWork(); }
+ bool hasWorkRemaining() const { return Engine.hasWorkRemaining(); }
+
+ const CoreEngine &getCoreEngine() const { return Engine; }
+
+public:
+ /// Visit - Transfer function logic for all statements. Dispatches to
+ /// other functions that handle specific kinds of statements.
+ void Visit(const Stmt *S, ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// VisitArraySubscriptExpr - Transfer function for array accesses.
+ void VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitAsmStmt - Transfer function logic for inline asm.
+ void VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// VisitBlockExpr - Transfer function logic for BlockExprs.
+ void VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitBinaryOperator - Transfer function logic for binary operators.
+ void VisitBinaryOperator(const BinaryOperator* B, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+
+ /// VisitCall - Transfer function for function calls.
+ void VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitCast - Transfer function logic for all casts (implicit and explicit).
+ void VisitCast(const CastExpr *CastE, const Expr *Ex, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitCompoundLiteralExpr - Transfer function logic for compound literals.
+ void VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// Transfer function logic for DeclRefExprs and BlockDeclRefExprs.
+ void VisitCommonDeclRefExpr(const Expr *DR, const NamedDecl *D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// VisitDeclStmt - Transfer function logic for DeclStmts.
+ void VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitGuardedExpr - Transfer function logic for ?, __builtin_choose
+ void VisitGuardedExpr(const Expr *Ex, const Expr *L, const Expr *R,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ void VisitInitListExpr(const InitListExpr *E, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitLogicalExpr - Transfer function logic for '&&', '||'
+ void VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitMemberExpr - Transfer function for member expressions.
+ void VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// Transfer function logic for ObjCAtSynchronizedStmts.
+ void VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// Transfer function logic for computing the lvalue of an Objective-C ivar.
+ void VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *DR, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitObjCForCollectionStmt - Transfer function logic for
+ /// ObjCForCollectionStmt.
+ void VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ void VisitObjCMessage(const ObjCMessage &msg, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitReturnStmt - Transfer function logic for return statements.
+ void VisitReturnStmt(const ReturnStmt *R, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitOffsetOfExpr - Transfer function for offsetof.
+ void VisitOffsetOfExpr(const OffsetOfExpr *Ex, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// VisitUnaryExprOrTypeTraitExpr - Transfer function for sizeof.
+ void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ /// VisitUnaryOperator - Transfer function logic for unary operators.
+ void VisitUnaryOperator(const UnaryOperator* B, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// Handle ++ and -- (both pre- and post-increment).
+ void VisitIncrementDecrementOperator(const UnaryOperator* U,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ void VisitCXXCatchStmt(const CXXCatchStmt *CS, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ void VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
+ ExplodedNodeSet & Dst);
+
+ void VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *expr,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ void VisitCXXConstructExpr(const CXXConstructExpr *E, const MemRegion *Dest,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ void VisitCXXDestructor(const CXXDestructorDecl *DD,
+ const MemRegion *Dest, const Stmt *S,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst);
+
+ void VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ void VisitCXXDeleteExpr(const CXXDeleteExpr *CDE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// Create a C++ temporary object for an rvalue.
+ void CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst);
+
+ /// Synthesize CXXThisRegion.
+ const CXXThisRegion *getCXXThisRegion(const CXXRecordDecl *RD,
+ const StackFrameContext *SFC);
+
+ const CXXThisRegion *getCXXThisRegion(const CXXMethodDecl *decl,
+ const StackFrameContext *frameCtx);
+
+ /// evalEagerlyAssume - Given the nodes in 'Src', eagerly assume symbolic
+ /// expressions of the form 'x != 0' and generate new nodes (stored in Dst)
+ /// with those assumptions.
+ void evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
+ const Expr *Ex);
+
+ std::pair<const ProgramPointTag *, const ProgramPointTag*>
+ getEagerlyAssumeTags();
+
+ SVal evalMinus(SVal X) {
+ return X.isValid() ? svalBuilder.evalMinus(cast<NonLoc>(X)) : X;
+ }
+
+ SVal evalComplement(SVal X) {
+ return X.isValid() ? svalBuilder.evalComplement(cast<NonLoc>(X)) : X;
+ }
+
+public:
+
+ SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
+ NonLoc L, NonLoc R, QualType T) {
+ return svalBuilder.evalBinOpNN(state, op, L, R, T);
+ }
+
+ SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
+ NonLoc L, SVal R, QualType T) {
+ return R.isValid() ? svalBuilder.evalBinOpNN(state,op,L, cast<NonLoc>(R), T) : R;
+ }
+
+ SVal evalBinOp(ProgramStateRef ST, BinaryOperator::Opcode Op,
+ SVal LHS, SVal RHS, QualType T) {
+ return svalBuilder.evalBinOp(ST, Op, LHS, RHS, T);
+ }
+
+protected:
+ void evalObjCMessage(StmtNodeBuilder &Bldr, const ObjCMessage &msg,
+ ExplodedNode *Pred, ProgramStateRef state,
+ bool GenSink);
+
+ ProgramStateRef invalidateArguments(ProgramStateRef State,
+ const CallOrObjCMessage &Call,
+ const LocationContext *LC);
+
+ ProgramStateRef MarkBranch(ProgramStateRef state,
+ const Stmt *Terminator,
+ const LocationContext *LCtx,
+ bool branchTaken);
+
+ /// evalBind - Handle the semantics of binding a value to a specific location.
+ /// This method is used by evalStore, VisitDeclStmt, and others.
+ void evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE, ExplodedNode *Pred,
+ SVal location, SVal Val, bool atDeclInit = false);
+
+public:
+ // FIXME: 'tag' should be removed, and a LocationContext should be used
+ // instead.
+ // FIXME: Comment on the meaning of the arguments, when 'St' may not
+ // be the same as Pred->state, and when 'location' may not be the
+ // same as state->getLValue(Ex).
+ /// Simulate a read of the result of Ex.
+ void evalLoad(ExplodedNodeSet &Dst,
+ const Expr *NodeEx, /* Eventually will be a CFGStmt */
+ const Expr *BoundExpr,
+ ExplodedNode *Pred,
+ ProgramStateRef St,
+ SVal location,
+ const ProgramPointTag *tag = 0,
+ QualType LoadTy = QualType());
+
+ // FIXME: 'tag' should be removed, and a LocationContext should be used
+ // instead.
+ void evalStore(ExplodedNodeSet &Dst, const Expr *AssignE, const Expr *StoreE,
+ ExplodedNode *Pred, ProgramStateRef St, SVal TargetLV, SVal Val,
+ const ProgramPointTag *tag = 0);
+private:
+ void evalLoadCommon(ExplodedNodeSet &Dst,
+ const Expr *NodeEx, /* Eventually will be a CFGStmt */
+ const Expr *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef St,
+ SVal location,
+ const ProgramPointTag *tag,
+ QualType LoadTy);
+
+ // FIXME: 'tag' should be removed, and a LocationContext should be used
+ // instead.
+ void evalLocation(ExplodedNodeSet &Dst,
+ const Stmt *NodeEx, /* This will eventually be a CFGStmt */
+ const Stmt *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef St, SVal location,
+ const ProgramPointTag *tag, bool isLoad);
+
+ bool shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred);
+ bool InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE, ExplodedNode *Pred);
+
+ bool replayWithoutInlining(ExplodedNode *P, const LocationContext *CalleeLC);
+};
+
+/// Traits for storing the call processing policy inside GDM.
+/// The GDM stores the corresponding CallExpr pointer.
+struct ReplayWithoutInlining{};
+template <>
+struct ProgramStateTrait<ReplayWithoutInlining> :
+ public ProgramStatePartialTrait<void*> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+};
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
new file mode 100644
index 0000000..42adff3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h
@@ -0,0 +1,107 @@
+//== FunctionSummary.h - Stores summaries of functions. ------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a summary of a function gathered/used by static analyzes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_FUNCTIONSUMMARY_H
+#define LLVM_CLANG_GR_FUNCTIONSUMMARY_H
+
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/BitVector.h"
+
+namespace clang {
+namespace ento {
+typedef llvm::SmallPtrSet<Decl*, 24> SetOfDecls;
+typedef llvm::SmallPtrSet<const Decl*, 24> SetOfConstDecls;
+
+class FunctionSummariesTy {
+ struct FunctionSummary {
+ /// True if this function has reached a max block count while inlined from
+ /// at least one call site.
+ bool MayReachMaxBlockCount;
+
+ /// Total number of blocks in the function.
+ unsigned TotalBasicBlocks;
+
+ /// Marks the IDs of the basic blocks visited during the analyzes.
+ llvm::BitVector VisitedBasicBlocks;
+
+ FunctionSummary() :
+ MayReachMaxBlockCount(false),
+ TotalBasicBlocks(0),
+ VisitedBasicBlocks(0) {}
+ };
+
+ typedef llvm::DenseMap<const Decl*, FunctionSummary*> MapTy;
+ MapTy Map;
+
+public:
+ ~FunctionSummariesTy();
+
+ MapTy::iterator findOrInsertSummary(const Decl *D) {
+ MapTy::iterator I = Map.find(D);
+ if (I != Map.end())
+ return I;
+ FunctionSummary *DS = new FunctionSummary();
+ I = Map.insert(std::pair<const Decl*, FunctionSummary*>(D, DS)).first;
+ assert(I != Map.end());
+ return I;
+ }
+
+ void markReachedMaxBlockCount(const Decl* D) {
+ MapTy::iterator I = findOrInsertSummary(D);
+ I->second->MayReachMaxBlockCount = true;
+ }
+
+ bool hasReachedMaxBlockCount(const Decl* D) {
+ MapTy::const_iterator I = Map.find(D);
+ if (I != Map.end())
+ return I->second->MayReachMaxBlockCount;
+ return false;
+ }
+
+ void markVisitedBasicBlock(unsigned ID, const Decl* D, unsigned TotalIDs) {
+ MapTy::iterator I = findOrInsertSummary(D);
+ llvm::BitVector &Blocks = I->second->VisitedBasicBlocks;
+ assert(ID < TotalIDs);
+ if (TotalIDs > Blocks.size()) {
+ Blocks.resize(TotalIDs);
+ I->second->TotalBasicBlocks = TotalIDs;
+ }
+ Blocks[ID] = true;
+ }
+
+ unsigned getNumVisitedBasicBlocks(const Decl* D) {
+ MapTy::const_iterator I = Map.find(D);
+ if (I != Map.end())
+ return I->second->VisitedBasicBlocks.count();
+ return 0;
+ }
+
+ /// Get the percentage of the reachable blocks.
+ unsigned getPercentBlocksReachable(const Decl *D) {
+ MapTy::const_iterator I = Map.find(D);
+ if (I != Map.end())
+ return ((I->second->VisitedBasicBlocks.count() * 100) /
+ I->second->TotalBasicBlocks);
+ return 0;
+ }
+
+ unsigned getTotalNumBasicBlocks();
+ unsigned getTotalNumVisitedBasicBlocks();
+
+};
+
+}} // end clang ento namespaces
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
new file mode 100644
index 0000000..87bc0df
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h
@@ -0,0 +1,1230 @@
+//== MemRegion.h - Abstract memory regions for static analysis --*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MemRegion and its subclasses. MemRegion defines a
+// partially-typed abstraction of memory useful for path-sensitive dataflow
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_MEMREGION_H
+#define LLVM_CLANG_GR_MEMREGION_H
+
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/FoldingSet.h"
+#include <string>
+
+namespace llvm {
+class BumpPtrAllocator;
+}
+
+namespace clang {
+
+class LocationContext;
+class StackFrameContext;
+
+namespace ento {
+
+class MemRegionManager;
+class MemSpaceRegion;
+class SValBuilder;
+class VarRegion;
+class CodeTextRegion;
+
+/// Represent a region's offset within the top level base region.
+class RegionOffset {
+ /// The base region.
+ const MemRegion *R;
+
+ /// The bit offset within the base region. It shouldn't be negative.
+ int64_t Offset;
+
+public:
+ RegionOffset(const MemRegion *r) : R(r), Offset(0) {}
+ RegionOffset(const MemRegion *r, int64_t off) : R(r), Offset(off) {}
+
+ const MemRegion *getRegion() const { return R; }
+ int64_t getOffset() const { return Offset; }
+};
+
+//===----------------------------------------------------------------------===//
+// Base region classes.
+//===----------------------------------------------------------------------===//
+
+/// MemRegion - The root abstract class for all memory regions.
+class MemRegion : public llvm::FoldingSetNode {
+ friend class MemRegionManager;
+public:
+ enum Kind {
+ // Memory spaces.
+ GenericMemSpaceRegionKind,
+ StackLocalsSpaceRegionKind,
+ StackArgumentsSpaceRegionKind,
+ HeapSpaceRegionKind,
+ UnknownSpaceRegionKind,
+ StaticGlobalSpaceRegionKind,
+ GlobalInternalSpaceRegionKind,
+ GlobalSystemSpaceRegionKind,
+ GlobalImmutableSpaceRegionKind,
+ BEG_NON_STATIC_GLOBAL_MEMSPACES = GlobalInternalSpaceRegionKind,
+ END_NON_STATIC_GLOBAL_MEMSPACES = GlobalImmutableSpaceRegionKind,
+ BEG_GLOBAL_MEMSPACES = StaticGlobalSpaceRegionKind,
+ END_GLOBAL_MEMSPACES = GlobalImmutableSpaceRegionKind,
+ BEG_MEMSPACES = GenericMemSpaceRegionKind,
+ END_MEMSPACES = GlobalImmutableSpaceRegionKind,
+ // Untyped regions.
+ SymbolicRegionKind,
+ AllocaRegionKind,
+ BlockDataRegionKind,
+ // Typed regions.
+ BEG_TYPED_REGIONS,
+ FunctionTextRegionKind = BEG_TYPED_REGIONS,
+ BlockTextRegionKind,
+ BEG_TYPED_VALUE_REGIONS,
+ CompoundLiteralRegionKind = BEG_TYPED_VALUE_REGIONS,
+ CXXThisRegionKind,
+ StringRegionKind,
+ ObjCStringRegionKind,
+ ElementRegionKind,
+ // Decl Regions.
+ BEG_DECL_REGIONS,
+ VarRegionKind = BEG_DECL_REGIONS,
+ FieldRegionKind,
+ ObjCIvarRegionKind,
+ END_DECL_REGIONS = ObjCIvarRegionKind,
+ CXXTempObjectRegionKind,
+ CXXBaseObjectRegionKind,
+ END_TYPED_VALUE_REGIONS = CXXBaseObjectRegionKind,
+ END_TYPED_REGIONS = CXXBaseObjectRegionKind
+ };
+
+private:
+ const Kind kind;
+
+protected:
+ MemRegion(Kind k) : kind(k) {}
+ virtual ~MemRegion();
+
+public:
+ ASTContext &getContext() const;
+
+ virtual void Profile(llvm::FoldingSetNodeID& ID) const = 0;
+
+ virtual MemRegionManager* getMemRegionManager() const = 0;
+
+ const MemSpaceRegion *getMemorySpace() const;
+
+ const MemRegion *getBaseRegion() const;
+
+ const MemRegion *StripCasts() const;
+
+ bool hasGlobalsOrParametersStorage() const;
+
+ bool hasStackStorage() const;
+
+ bool hasStackNonParametersStorage() const;
+
+ bool hasStackParametersStorage() const;
+
+ /// Compute the offset within the top level memory object.
+ RegionOffset getAsOffset() const;
+
+ /// \brief Get a string representation of a region for debug use.
+ std::string getString() const;
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ void dump() const;
+
+ /// \brief Print the region for use in diagnostics.
+ virtual void dumpPretty(raw_ostream &os) const;
+
+ Kind getKind() const { return kind; }
+
+ template<typename RegionTy> const RegionTy* getAs() const;
+
+ virtual bool isBoundable() const { return false; }
+
+ static bool classof(const MemRegion*) { return true; }
+};
+
+/// MemSpaceRegion - A memory region that represents a "memory space";
+/// for example, the set of global variables, the stack frame, etc.
+class MemSpaceRegion : public MemRegion {
+protected:
+ friend class MemRegionManager;
+
+ MemRegionManager *Mgr;
+
+ MemSpaceRegion(MemRegionManager *mgr, Kind k = GenericMemSpaceRegionKind)
+ : MemRegion(k), Mgr(mgr) {
+ assert(classof(this));
+ }
+
+ MemRegionManager* getMemRegionManager() const { return Mgr; }
+
+public:
+ bool isBoundable() const { return false; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ static bool classof(const MemRegion *R) {
+ Kind k = R->getKind();
+ return k >= BEG_MEMSPACES && k <= END_MEMSPACES;
+ }
+};
+
+class GlobalsSpaceRegion : public MemSpaceRegion {
+ virtual void anchor();
+protected:
+ GlobalsSpaceRegion(MemRegionManager *mgr, Kind k)
+ : MemSpaceRegion(mgr, k) {}
+public:
+ static bool classof(const MemRegion *R) {
+ Kind k = R->getKind();
+ return k >= BEG_GLOBAL_MEMSPACES && k <= END_GLOBAL_MEMSPACES;
+ }
+};
+
+/// \class The region of the static variables within the current CodeTextRegion
+/// scope.
+/// Currently, only the static locals are placed there, so we know that these
+/// variables do not get invalidated by calls to other functions.
+class StaticGlobalSpaceRegion : public GlobalsSpaceRegion {
+ friend class MemRegionManager;
+
+ const CodeTextRegion *CR;
+
+ StaticGlobalSpaceRegion(MemRegionManager *mgr, const CodeTextRegion *cr)
+ : GlobalsSpaceRegion(mgr, StaticGlobalSpaceRegionKind), CR(cr) {}
+
+public:
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ void dumpToStream(raw_ostream &os) const;
+
+ const CodeTextRegion *getCodeRegion() const { return CR; }
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == StaticGlobalSpaceRegionKind;
+ }
+};
+
+/// \class The region for all the non-static global variables.
+///
+/// This class is further split into subclasses for efficient implementation of
+/// invalidating a set of related global values as is done in
+/// RegionStoreManager::invalidateRegions (instead of finding all the dependent
+/// globals, we invalidate the whole parent region).
+class NonStaticGlobalSpaceRegion : public GlobalsSpaceRegion {
+ friend class MemRegionManager;
+
+protected:
+ NonStaticGlobalSpaceRegion(MemRegionManager *mgr, Kind k)
+ : GlobalsSpaceRegion(mgr, k) {}
+
+public:
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion *R) {
+ Kind k = R->getKind();
+ return k >= BEG_NON_STATIC_GLOBAL_MEMSPACES &&
+ k <= END_NON_STATIC_GLOBAL_MEMSPACES;
+ }
+};
+
+/// \class The region containing globals which are defined in system/external
+/// headers and are considered modifiable by system calls (ex: errno).
+class GlobalSystemSpaceRegion : public NonStaticGlobalSpaceRegion {
+ friend class MemRegionManager;
+
+ GlobalSystemSpaceRegion(MemRegionManager *mgr)
+ : NonStaticGlobalSpaceRegion(mgr, GlobalSystemSpaceRegionKind) {}
+
+public:
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == GlobalSystemSpaceRegionKind;
+ }
+};
+
+/// \class The region containing globals which are considered not to be modified
+/// or point to data which could be modified as a result of a function call
+/// (system or internal). Ex: Const global scalars would be modeled as part of
+/// this region. This region also includes most system globals since they have
+/// low chance of being modified.
+class GlobalImmutableSpaceRegion : public NonStaticGlobalSpaceRegion {
+ friend class MemRegionManager;
+
+ GlobalImmutableSpaceRegion(MemRegionManager *mgr)
+ : NonStaticGlobalSpaceRegion(mgr, GlobalImmutableSpaceRegionKind) {}
+
+public:
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == GlobalImmutableSpaceRegionKind;
+ }
+};
+
+/// \class The region containing globals which can be modified by calls to
+/// "internally" defined functions - (for now just) functions other then system
+/// calls.
+class GlobalInternalSpaceRegion : public NonStaticGlobalSpaceRegion {
+ friend class MemRegionManager;
+
+ GlobalInternalSpaceRegion(MemRegionManager *mgr)
+ : NonStaticGlobalSpaceRegion(mgr, GlobalInternalSpaceRegionKind) {}
+
+public:
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == GlobalInternalSpaceRegionKind;
+ }
+};
+
+class HeapSpaceRegion : public MemSpaceRegion {
+ virtual void anchor();
+ friend class MemRegionManager;
+
+ HeapSpaceRegion(MemRegionManager *mgr)
+ : MemSpaceRegion(mgr, HeapSpaceRegionKind) {}
+public:
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == HeapSpaceRegionKind;
+ }
+};
+
+class UnknownSpaceRegion : public MemSpaceRegion {
+ virtual void anchor();
+ friend class MemRegionManager;
+ UnknownSpaceRegion(MemRegionManager *mgr)
+ : MemSpaceRegion(mgr, UnknownSpaceRegionKind) {}
+public:
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == UnknownSpaceRegionKind;
+ }
+};
+
+class StackSpaceRegion : public MemSpaceRegion {
+private:
+ const StackFrameContext *SFC;
+
+protected:
+ StackSpaceRegion(MemRegionManager *mgr, Kind k, const StackFrameContext *sfc)
+ : MemSpaceRegion(mgr, k), SFC(sfc) {
+ assert(classof(this));
+ }
+
+public:
+ const StackFrameContext *getStackFrame() const { return SFC; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ static bool classof(const MemRegion *R) {
+ Kind k = R->getKind();
+ return k >= StackLocalsSpaceRegionKind &&
+ k <= StackArgumentsSpaceRegionKind;
+ }
+};
+
+class StackLocalsSpaceRegion : public StackSpaceRegion {
+ virtual void anchor();
+ friend class MemRegionManager;
+ StackLocalsSpaceRegion(MemRegionManager *mgr, const StackFrameContext *sfc)
+ : StackSpaceRegion(mgr, StackLocalsSpaceRegionKind, sfc) {}
+public:
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == StackLocalsSpaceRegionKind;
+ }
+};
+
+class StackArgumentsSpaceRegion : public StackSpaceRegion {
+private:
+ virtual void anchor();
+ friend class MemRegionManager;
+ StackArgumentsSpaceRegion(MemRegionManager *mgr, const StackFrameContext *sfc)
+ : StackSpaceRegion(mgr, StackArgumentsSpaceRegionKind, sfc) {}
+public:
+ static bool classof(const MemRegion *R) {
+ return R->getKind() == StackArgumentsSpaceRegionKind;
+ }
+};
+
+
+/// SubRegion - A region that subsets another larger region. Most regions
+/// are subclasses of SubRegion.
+class SubRegion : public MemRegion {
+private:
+ virtual void anchor();
+protected:
+ const MemRegion* superRegion;
+ SubRegion(const MemRegion* sReg, Kind k) : MemRegion(k), superRegion(sReg) {}
+public:
+ const MemRegion* getSuperRegion() const {
+ return superRegion;
+ }
+
+ /// getExtent - Returns the size of the region in bytes.
+ virtual DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const {
+ return UnknownVal();
+ }
+
+ MemRegionManager* getMemRegionManager() const;
+
+ bool isSubRegionOf(const MemRegion* R) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() > END_MEMSPACES;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// MemRegion subclasses.
+//===----------------------------------------------------------------------===//
+
+/// AllocaRegion - A region that represents an untyped blob of bytes created
+/// by a call to 'alloca'.
+class AllocaRegion : public SubRegion {
+ friend class MemRegionManager;
+protected:
+ unsigned Cnt; // Block counter. Used to distinguish different pieces of
+ // memory allocated by alloca at the same call site.
+ const Expr *Ex;
+
+ AllocaRegion(const Expr *ex, unsigned cnt, const MemRegion *superRegion)
+ : SubRegion(superRegion, AllocaRegionKind), Cnt(cnt), Ex(ex) {}
+
+public:
+
+ const Expr *getExpr() const { return Ex; }
+
+ bool isBoundable() const { return true; }
+
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
+
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, const Expr *Ex,
+ unsigned Cnt, const MemRegion *superRegion);
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == AllocaRegionKind;
+ }
+};
+
+/// TypedRegion - An abstract class representing regions that are typed.
+class TypedRegion : public SubRegion {
+public:
+ virtual void anchor();
+protected:
+ TypedRegion(const MemRegion* sReg, Kind k) : SubRegion(sReg, k) {}
+
+public:
+ virtual QualType getLocationType() const = 0;
+
+ QualType getDesugaredLocationType(ASTContext &Context) const {
+ return getLocationType().getDesugaredType(Context);
+ }
+
+ bool isBoundable() const { return true; }
+
+ static bool classof(const MemRegion* R) {
+ unsigned k = R->getKind();
+ return k >= BEG_TYPED_REGIONS && k <= END_TYPED_REGIONS;
+ }
+};
+
+/// TypedValueRegion - An abstract class representing regions having a typed value.
+class TypedValueRegion : public TypedRegion {
+public:
+ virtual void anchor();
+protected:
+ TypedValueRegion(const MemRegion* sReg, Kind k) : TypedRegion(sReg, k) {}
+
+public:
+ virtual QualType getValueType() const = 0;
+
+ virtual QualType getLocationType() const {
+ // FIXME: We can possibly optimize this later to cache this value.
+ QualType T = getValueType();
+ ASTContext &ctx = getContext();
+ if (T->getAs<ObjCObjectType>())
+ return ctx.getObjCObjectPointerType(T);
+ return ctx.getPointerType(getValueType());
+ }
+
+ QualType getDesugaredValueType(ASTContext &Context) const {
+ QualType T = getValueType();
+ return T.getTypePtrOrNull() ? T.getDesugaredType(Context) : T;
+ }
+
+ static bool classof(const MemRegion* R) {
+ unsigned k = R->getKind();
+ return k >= BEG_TYPED_VALUE_REGIONS && k <= END_TYPED_VALUE_REGIONS;
+ }
+};
+
+
+class CodeTextRegion : public TypedRegion {
+public:
+ virtual void anchor();
+protected:
+ CodeTextRegion(const MemRegion *sreg, Kind k) : TypedRegion(sreg, k) {}
+public:
+ bool isBoundable() const { return false; }
+
+ static bool classof(const MemRegion* R) {
+ Kind k = R->getKind();
+ return k >= FunctionTextRegionKind && k <= BlockTextRegionKind;
+ }
+};
+
+/// FunctionTextRegion - A region that represents code texts of function.
+class FunctionTextRegion : public CodeTextRegion {
+ const FunctionDecl *FD;
+public:
+ FunctionTextRegion(const FunctionDecl *fd, const MemRegion* sreg)
+ : CodeTextRegion(sreg, FunctionTextRegionKind), FD(fd) {}
+
+ QualType getLocationType() const {
+ return getContext().getPointerType(FD->getType());
+ }
+
+ const FunctionDecl *getDecl() const {
+ return FD;
+ }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, const FunctionDecl *FD,
+ const MemRegion*);
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == FunctionTextRegionKind;
+ }
+};
+
+
+/// BlockTextRegion - A region that represents code texts of blocks (closures).
+/// Blocks are represented with two kinds of regions. BlockTextRegions
+/// represent the "code", while BlockDataRegions represent instances of blocks,
+/// which correspond to "code+data". The distinction is important, because
+/// like a closure a block captures the values of externally referenced
+/// variables.
+class BlockTextRegion : public CodeTextRegion {
+ friend class MemRegionManager;
+
+ const BlockDecl *BD;
+ AnalysisDeclContext *AC;
+ CanQualType locTy;
+
+ BlockTextRegion(const BlockDecl *bd, CanQualType lTy,
+ AnalysisDeclContext *ac, const MemRegion* sreg)
+ : CodeTextRegion(sreg, BlockTextRegionKind), BD(bd), AC(ac), locTy(lTy) {}
+
+public:
+ QualType getLocationType() const {
+ return locTy;
+ }
+
+ const BlockDecl *getDecl() const {
+ return BD;
+ }
+
+ AnalysisDeclContext *getAnalysisDeclContext() const { return AC; }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, const BlockDecl *BD,
+ CanQualType, const AnalysisDeclContext*,
+ const MemRegion*);
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == BlockTextRegionKind;
+ }
+};
+
+/// BlockDataRegion - A region that represents a block instance.
+/// Blocks are represented with two kinds of regions. BlockTextRegions
+/// represent the "code", while BlockDataRegions represent instances of blocks,
+/// which correspond to "code+data". The distinction is important, because
+/// like a closure a block captures the values of externally referenced
+/// variables.
+class BlockDataRegion : public SubRegion {
+ friend class MemRegionManager;
+ const BlockTextRegion *BC;
+ const LocationContext *LC; // Can be null */
+ void *ReferencedVars;
+
+ BlockDataRegion(const BlockTextRegion *bc, const LocationContext *lc,
+ const MemRegion *sreg)
+ : SubRegion(sreg, BlockDataRegionKind), BC(bc), LC(lc), ReferencedVars(0) {}
+
+public:
+ const BlockTextRegion *getCodeRegion() const { return BC; }
+
+ const BlockDecl *getDecl() const { return BC->getDecl(); }
+
+ class referenced_vars_iterator {
+ const MemRegion * const *R;
+ public:
+ explicit referenced_vars_iterator(const MemRegion * const *r) : R(r) {}
+
+ operator const MemRegion * const *() const {
+ return R;
+ }
+
+ const VarRegion* operator*() const {
+ return cast<VarRegion>(*R);
+ }
+
+ bool operator==(const referenced_vars_iterator &I) const {
+ return I.R == R;
+ }
+ bool operator!=(const referenced_vars_iterator &I) const {
+ return I.R != R;
+ }
+ referenced_vars_iterator &operator++() {
+ ++R;
+ return *this;
+ }
+ };
+
+ referenced_vars_iterator referenced_vars_begin() const;
+ referenced_vars_iterator referenced_vars_end() const;
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+ static void ProfileRegion(llvm::FoldingSetNodeID&, const BlockTextRegion *,
+ const LocationContext *, const MemRegion *);
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == BlockDataRegionKind;
+ }
+private:
+ void LazyInitializeReferencedVars();
+};
+
+/// SymbolicRegion - A special, "non-concrete" region. Unlike other region
+/// clases, SymbolicRegion represents a region that serves as an alias for
+/// either a real region, a NULL pointer, etc. It essentially is used to
+/// map the concept of symbolic values into the domain of regions. Symbolic
+/// regions do not need to be typed.
+class SymbolicRegion : public SubRegion {
+protected:
+ const SymbolRef sym;
+
+public:
+ SymbolicRegion(const SymbolRef s, const MemRegion* sreg)
+ : SubRegion(sreg, SymbolicRegionKind), sym(s) {}
+
+ SymbolRef getSymbol() const {
+ return sym;
+ }
+
+ bool isBoundable() const { return true; }
+
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
+
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID,
+ SymbolRef sym,
+ const MemRegion* superRegion);
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == SymbolicRegionKind;
+ }
+};
+
+/// StringRegion - Region associated with a StringLiteral.
+class StringRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+ const StringLiteral* Str;
+protected:
+
+ StringRegion(const StringLiteral* str, const MemRegion* sreg)
+ : TypedValueRegion(sreg, StringRegionKind), Str(str) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const StringLiteral* Str,
+ const MemRegion* superRegion);
+
+public:
+
+ const StringLiteral* getStringLiteral() const { return Str; }
+
+ QualType getValueType() const {
+ return Str->getType();
+ }
+
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
+
+ bool isBoundable() const { return false; }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ProfileRegion(ID, Str, superRegion);
+ }
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == StringRegionKind;
+ }
+};
+
+/// The region associated with an ObjCStringLiteral.
+class ObjCStringRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+ const ObjCStringLiteral* Str;
+protected:
+
+ ObjCStringRegion(const ObjCStringLiteral* str, const MemRegion* sreg)
+ : TypedValueRegion(sreg, ObjCStringRegionKind), Str(str) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const ObjCStringLiteral* Str,
+ const MemRegion* superRegion);
+
+public:
+
+ const ObjCStringLiteral* getObjCStringLiteral() const { return Str; }
+
+ QualType getValueType() const {
+ return Str->getType();
+ }
+
+ bool isBoundable() const { return false; }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ProfileRegion(ID, Str, superRegion);
+ }
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == ObjCStringRegionKind;
+ }
+};
+
+/// CompoundLiteralRegion - A memory region representing a compound literal.
+/// Compound literals are essentially temporaries that are stack allocated
+/// or in the global constant pool.
+class CompoundLiteralRegion : public TypedValueRegion {
+private:
+ friend class MemRegionManager;
+ const CompoundLiteralExpr *CL;
+
+ CompoundLiteralRegion(const CompoundLiteralExpr *cl, const MemRegion* sReg)
+ : TypedValueRegion(sReg, CompoundLiteralRegionKind), CL(cl) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const CompoundLiteralExpr *CL,
+ const MemRegion* superRegion);
+public:
+ QualType getValueType() const {
+ return CL->getType();
+ }
+
+ bool isBoundable() const { return !CL->isFileScope(); }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+ void dumpToStream(raw_ostream &os) const;
+
+ const CompoundLiteralExpr *getLiteralExpr() const { return CL; }
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == CompoundLiteralRegionKind;
+ }
+};
+
+class DeclRegion : public TypedValueRegion {
+protected:
+ const Decl *D;
+
+ DeclRegion(const Decl *d, const MemRegion* sReg, Kind k)
+ : TypedValueRegion(sReg, k), D(d) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl *D,
+ const MemRegion* superRegion, Kind k);
+
+public:
+ const Decl *getDecl() const { return D; }
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
+
+ static bool classof(const MemRegion* R) {
+ unsigned k = R->getKind();
+ return k >= BEG_DECL_REGIONS && k <= END_DECL_REGIONS;
+ }
+};
+
+class VarRegion : public DeclRegion {
+ friend class MemRegionManager;
+
+ // Constructors and private methods.
+ VarRegion(const VarDecl *vd, const MemRegion* sReg)
+ : DeclRegion(vd, sReg, VarRegionKind) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, const VarDecl *VD,
+ const MemRegion *superRegion) {
+ DeclRegion::ProfileRegion(ID, VD, superRegion, VarRegionKind);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+public:
+ const VarDecl *getDecl() const { return cast<VarDecl>(D); }
+
+ const StackFrameContext *getStackFrame() const;
+
+ QualType getValueType() const {
+ // FIXME: We can cache this if needed.
+ return getDecl()->getType();
+ }
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == VarRegionKind;
+ }
+
+ void dumpPretty(raw_ostream &os) const;
+};
+
+/// CXXThisRegion - Represents the region for the implicit 'this' parameter
+/// in a call to a C++ method. This region doesn't represent the object
+/// referred to by 'this', but rather 'this' itself.
+class CXXThisRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+ CXXThisRegion(const PointerType *thisPointerTy,
+ const MemRegion *sReg)
+ : TypedValueRegion(sReg, CXXThisRegionKind), ThisPointerTy(thisPointerTy) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const PointerType *PT,
+ const MemRegion *sReg);
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+public:
+ QualType getValueType() const {
+ return QualType(ThisPointerTy, 0);
+ }
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == CXXThisRegionKind;
+ }
+
+private:
+ const PointerType *ThisPointerTy;
+};
+
+class FieldRegion : public DeclRegion {
+ friend class MemRegionManager;
+
+ FieldRegion(const FieldDecl *fd, const MemRegion* sReg)
+ : DeclRegion(fd, sReg, FieldRegionKind) {}
+
+public:
+ const FieldDecl *getDecl() const { return cast<FieldDecl>(D); }
+
+ QualType getValueType() const {
+ // FIXME: We can cache this if needed.
+ return getDecl()->getType();
+ }
+
+ DefinedOrUnknownSVal getExtent(SValBuilder &svalBuilder) const;
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, const FieldDecl *FD,
+ const MemRegion* superRegion) {
+ DeclRegion::ProfileRegion(ID, FD, superRegion, FieldRegionKind);
+ }
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == FieldRegionKind;
+ }
+
+ void dumpToStream(raw_ostream &os) const;
+ void dumpPretty(raw_ostream &os) const;
+};
+
+class ObjCIvarRegion : public DeclRegion {
+
+ friend class MemRegionManager;
+
+ ObjCIvarRegion(const ObjCIvarDecl *ivd, const MemRegion* sReg);
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, const ObjCIvarDecl *ivd,
+ const MemRegion* superRegion);
+
+public:
+ const ObjCIvarDecl *getDecl() const;
+ QualType getValueType() const;
+
+ void dumpToStream(raw_ostream &os) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == ObjCIvarRegionKind;
+ }
+};
+//===----------------------------------------------------------------------===//
+// Auxiliary data classes for use with MemRegions.
+//===----------------------------------------------------------------------===//
+
+class ElementRegion;
+
+class RegionRawOffset {
+private:
+ friend class ElementRegion;
+
+ const MemRegion *Region;
+ CharUnits Offset;
+
+ RegionRawOffset(const MemRegion* reg, CharUnits offset = CharUnits::Zero())
+ : Region(reg), Offset(offset) {}
+
+public:
+ // FIXME: Eventually support symbolic offsets.
+ CharUnits getOffset() const { return Offset; }
+ const MemRegion *getRegion() const { return Region; }
+
+ void dumpToStream(raw_ostream &os) const;
+ void dump() const;
+};
+
+/// \brief ElementRegin is used to represent both array elements and casts.
+class ElementRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+
+ QualType ElementType;
+ NonLoc Index;
+
+ ElementRegion(QualType elementType, NonLoc Idx, const MemRegion* sReg)
+ : TypedValueRegion(sReg, ElementRegionKind),
+ ElementType(elementType), Index(Idx) {
+ assert((!isa<nonloc::ConcreteInt>(&Idx) ||
+ cast<nonloc::ConcreteInt>(&Idx)->getValue().isSigned()) &&
+ "The index must be signed");
+ }
+
+ static void ProfileRegion(llvm::FoldingSetNodeID& ID, QualType elementType,
+ SVal Idx, const MemRegion* superRegion);
+
+public:
+
+ NonLoc getIndex() const { return Index; }
+
+ QualType getValueType() const {
+ return ElementType;
+ }
+
+ QualType getElementType() const {
+ return ElementType;
+ }
+ /// Compute the offset within the array. The array might also be a subobject.
+ RegionRawOffset getAsArrayOffset() const;
+
+ void dumpToStream(raw_ostream &os) const;
+
+ void Profile(llvm::FoldingSetNodeID& ID) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == ElementRegionKind;
+ }
+};
+
+// C++ temporary object associated with an expression.
+class CXXTempObjectRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+
+ Expr const *Ex;
+
+ CXXTempObjectRegion(Expr const *E, MemRegion const *sReg)
+ : TypedValueRegion(sReg, CXXTempObjectRegionKind), Ex(E) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID,
+ Expr const *E, const MemRegion *sReg);
+
+public:
+ const Expr *getExpr() const { return Ex; }
+
+ QualType getValueType() const {
+ return Ex->getType();
+ }
+
+ void dumpToStream(raw_ostream &os) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ static bool classof(const MemRegion* R) {
+ return R->getKind() == CXXTempObjectRegionKind;
+ }
+};
+
+// CXXBaseObjectRegion represents a base object within a C++ object. It is
+// identified by the base class declaration and the region of its parent object.
+class CXXBaseObjectRegion : public TypedValueRegion {
+ friend class MemRegionManager;
+
+ const CXXRecordDecl *decl;
+
+ CXXBaseObjectRegion(const CXXRecordDecl *d, const MemRegion *sReg)
+ : TypedValueRegion(sReg, CXXBaseObjectRegionKind), decl(d) {}
+
+ static void ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const CXXRecordDecl *decl, const MemRegion *sReg);
+
+public:
+ const CXXRecordDecl *getDecl() const { return decl; }
+
+ QualType getValueType() const;
+
+ void dumpToStream(raw_ostream &os) const;
+
+ void Profile(llvm::FoldingSetNodeID &ID) const;
+
+ static bool classof(const MemRegion *region) {
+ return region->getKind() == CXXBaseObjectRegionKind;
+ }
+};
+
+template<typename RegionTy>
+const RegionTy* MemRegion::getAs() const {
+ if (const RegionTy* RT = dyn_cast<RegionTy>(this))
+ return RT;
+
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// MemRegionManager - Factory object for creating regions.
+//===----------------------------------------------------------------------===//
+
+class MemRegionManager {
+ ASTContext &C;
+ llvm::BumpPtrAllocator& A;
+ llvm::FoldingSet<MemRegion> Regions;
+
+ GlobalInternalSpaceRegion *InternalGlobals;
+ GlobalSystemSpaceRegion *SystemGlobals;
+ GlobalImmutableSpaceRegion *ImmutableGlobals;
+
+
+ llvm::DenseMap<const StackFrameContext *, StackLocalsSpaceRegion *>
+ StackLocalsSpaceRegions;
+ llvm::DenseMap<const StackFrameContext *, StackArgumentsSpaceRegion *>
+ StackArgumentsSpaceRegions;
+ llvm::DenseMap<const CodeTextRegion *, StaticGlobalSpaceRegion *>
+ StaticsGlobalSpaceRegions;
+
+ HeapSpaceRegion *heap;
+ UnknownSpaceRegion *unknown;
+ MemSpaceRegion *code;
+
+public:
+ MemRegionManager(ASTContext &c, llvm::BumpPtrAllocator& a)
+ : C(c), A(a), InternalGlobals(0), SystemGlobals(0), ImmutableGlobals(0),
+ heap(0), unknown(0), code(0) {}
+
+ ~MemRegionManager();
+
+ ASTContext &getContext() { return C; }
+
+ llvm::BumpPtrAllocator &getAllocator() { return A; }
+
+ /// getStackLocalsRegion - Retrieve the memory region associated with the
+ /// specified stack frame.
+ const StackLocalsSpaceRegion *
+ getStackLocalsRegion(const StackFrameContext *STC);
+
+ /// getStackArgumentsRegion - Retrieve the memory region associated with
+ /// function/method arguments of the specified stack frame.
+ const StackArgumentsSpaceRegion *
+ getStackArgumentsRegion(const StackFrameContext *STC);
+
+ /// getGlobalsRegion - Retrieve the memory region associated with
+ /// global variables.
+ const GlobalsSpaceRegion *getGlobalsRegion(
+ MemRegion::Kind K = MemRegion::GlobalInternalSpaceRegionKind,
+ const CodeTextRegion *R = 0);
+
+ /// getHeapRegion - Retrieve the memory region associated with the
+ /// generic "heap".
+ const HeapSpaceRegion *getHeapRegion();
+
+ /// getUnknownRegion - Retrieve the memory region associated with unknown
+ /// memory space.
+ const MemSpaceRegion *getUnknownRegion();
+
+ const MemSpaceRegion *getCodeRegion();
+
+ /// getAllocaRegion - Retrieve a region associated with a call to alloca().
+ const AllocaRegion *getAllocaRegion(const Expr *Ex, unsigned Cnt,
+ const LocationContext *LC);
+
+ /// getCompoundLiteralRegion - Retrieve the region associated with a
+ /// given CompoundLiteral.
+ const CompoundLiteralRegion*
+ getCompoundLiteralRegion(const CompoundLiteralExpr *CL,
+ const LocationContext *LC);
+
+ /// getCXXThisRegion - Retrieve the [artificial] region associated with the
+ /// parameter 'this'.
+ const CXXThisRegion *getCXXThisRegion(QualType thisPointerTy,
+ const LocationContext *LC);
+
+ /// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
+ const SymbolicRegion* getSymbolicRegion(SymbolRef sym);
+
+ const StringRegion *getStringRegion(const StringLiteral* Str);
+
+ const ObjCStringRegion *getObjCStringRegion(const ObjCStringLiteral *Str);
+
+ /// getVarRegion - Retrieve or create the memory region associated with
+ /// a specified VarDecl and LocationContext.
+ const VarRegion* getVarRegion(const VarDecl *D, const LocationContext *LC);
+
+ /// getVarRegion - Retrieve or create the memory region associated with
+ /// a specified VarDecl and super region.
+ const VarRegion* getVarRegion(const VarDecl *D, const MemRegion *superR);
+
+ /// getElementRegion - Retrieve the memory region associated with the
+ /// associated element type, index, and super region.
+ const ElementRegion *getElementRegion(QualType elementType, NonLoc Idx,
+ const MemRegion *superRegion,
+ ASTContext &Ctx);
+
+ const ElementRegion *getElementRegionWithSuper(const ElementRegion *ER,
+ const MemRegion *superRegion) {
+ return getElementRegion(ER->getElementType(), ER->getIndex(),
+ superRegion, ER->getContext());
+ }
+
+ /// getFieldRegion - Retrieve or create the memory region associated with
+ /// a specified FieldDecl. 'superRegion' corresponds to the containing
+ /// memory region (which typically represents the memory representing
+ /// a structure or class).
+ const FieldRegion *getFieldRegion(const FieldDecl *fd,
+ const MemRegion* superRegion);
+
+ const FieldRegion *getFieldRegionWithSuper(const FieldRegion *FR,
+ const MemRegion *superRegion) {
+ return getFieldRegion(FR->getDecl(), superRegion);
+ }
+
+ /// getObjCIvarRegion - Retrieve or create the memory region associated with
+ /// a specified Objective-c instance variable. 'superRegion' corresponds
+ /// to the containing region (which typically represents the Objective-C
+ /// object).
+ const ObjCIvarRegion *getObjCIvarRegion(const ObjCIvarDecl *ivd,
+ const MemRegion* superRegion);
+
+ const CXXTempObjectRegion *getCXXTempObjectRegion(Expr const *Ex,
+ LocationContext const *LC);
+
+ const CXXBaseObjectRegion *getCXXBaseObjectRegion(const CXXRecordDecl *decl,
+ const MemRegion *superRegion);
+
+ /// Create a CXXBaseObjectRegion with the same CXXRecordDecl but a different
+ /// super region.
+ const CXXBaseObjectRegion *
+ getCXXBaseObjectRegionWithSuper(const CXXBaseObjectRegion *baseReg,
+ const MemRegion *superRegion) {
+ return getCXXBaseObjectRegion(baseReg->getDecl(), superRegion);
+ }
+
+ const FunctionTextRegion *getFunctionTextRegion(const FunctionDecl *FD);
+ const BlockTextRegion *getBlockTextRegion(const BlockDecl *BD,
+ CanQualType locTy,
+ AnalysisDeclContext *AC);
+
+ /// getBlockDataRegion - Get the memory region associated with an instance
+ /// of a block. Unlike many other MemRegions, the LocationContext*
+ /// argument is allowed to be NULL for cases where we have no known
+ /// context.
+ const BlockDataRegion *getBlockDataRegion(const BlockTextRegion *bc,
+ const LocationContext *lc = NULL);
+
+private:
+ template <typename RegionTy, typename A1>
+ RegionTy* getRegion(const A1 a1);
+
+ template <typename RegionTy, typename A1>
+ RegionTy* getSubRegion(const A1 a1, const MemRegion* superRegion);
+
+ template <typename RegionTy, typename A1, typename A2>
+ RegionTy* getRegion(const A1 a1, const A2 a2);
+
+ template <typename RegionTy, typename A1, typename A2>
+ RegionTy* getSubRegion(const A1 a1, const A2 a2,
+ const MemRegion* superRegion);
+
+ template <typename RegionTy, typename A1, typename A2, typename A3>
+ RegionTy* getSubRegion(const A1 a1, const A2 a2, const A3 a3,
+ const MemRegion* superRegion);
+
+ template <typename REG>
+ const REG* LazyAllocate(REG*& region);
+
+ template <typename REG, typename ARG>
+ const REG* LazyAllocate(REG*& region, ARG a);
+};
+
+//===----------------------------------------------------------------------===//
+// Out-of-line member definitions.
+//===----------------------------------------------------------------------===//
+
+inline ASTContext &MemRegion::getContext() const {
+ return getMemRegionManager()->getContext();
+}
+
+} // end GR namespace
+
+} // end clang namespace
+
+//===----------------------------------------------------------------------===//
+// Pretty-printing regions.
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+static inline raw_ostream &operator<<(raw_ostream &os,
+ const clang::ento::MemRegion* R) {
+ R->dumpToStream(os);
+ return os;
+}
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
new file mode 100644
index 0000000..d8aec09
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h
@@ -0,0 +1,293 @@
+//===- ObjCMessage.h - Wrapper for ObjC messages and dot syntax ---*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ObjCMessage which serves as a common wrapper for ObjC
+// message expressions or implicit messages for loading/storing ObjC properties.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_STATICANALYZER_PATHSENSITIVE_OBJCMESSAGE
+#define LLVM_CLANG_STATICANALYZER_PATHSENSITIVE_OBJCMESSAGE
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/PointerUnion.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace ento {
+using llvm::StrInStrNoCase;
+
+/// \brief Represents both explicit ObjC message expressions and implicit
+/// messages that are sent for handling properties in dot syntax.
+class ObjCMessage {
+ const ObjCMessageExpr *Msg;
+ const ObjCPropertyRefExpr *PE;
+ const bool IsPropSetter;
+public:
+ ObjCMessage() : Msg(0), PE(0), IsPropSetter(false) {}
+
+ ObjCMessage(const ObjCMessageExpr *E, const ObjCPropertyRefExpr *pe = 0,
+ bool isSetter = false)
+ : Msg(E), PE(pe), IsPropSetter(isSetter) {
+ assert(E && "should not be initialized with null expression");
+ }
+
+ bool isValid() const { return Msg; }
+
+ bool isPureMessageExpr() const { return !PE; }
+
+ bool isPropertyGetter() const { return PE && !IsPropSetter; }
+
+ bool isPropertySetter() const {
+ return IsPropSetter;
+ }
+
+ const Expr *getMessageExpr() const {
+ return Msg;
+ }
+
+ QualType getType(ASTContext &ctx) const {
+ return Msg->getType();
+ }
+
+ QualType getResultType(ASTContext &ctx) const {
+ if (const ObjCMethodDecl *MD = Msg->getMethodDecl())
+ return MD->getResultType();
+ return getType(ctx);
+ }
+
+ ObjCMethodFamily getMethodFamily() const {
+ return Msg->getMethodFamily();
+ }
+
+ Selector getSelector() const {
+ return Msg->getSelector();
+ }
+
+ const Expr *getInstanceReceiver() const {
+ return Msg->getInstanceReceiver();
+ }
+
+ SVal getInstanceReceiverSVal(ProgramStateRef State,
+ const LocationContext *LC) const {
+ if (!isInstanceMessage())
+ return UndefinedVal();
+ if (const Expr *Ex = getInstanceReceiver())
+ return State->getSValAsScalarOrLoc(Ex, LC);
+
+ // An instance message with no expression means we are sending to super.
+ // In this case the object reference is the same as 'self'.
+ const ImplicitParamDecl *SelfDecl = LC->getSelfDecl();
+ assert(SelfDecl && "No message receiver Expr, but not in an ObjC method");
+ return State->getSVal(State->getRegion(SelfDecl, LC));
+ }
+
+ bool isInstanceMessage() const {
+ return Msg->isInstanceMessage();
+ }
+
+ const ObjCMethodDecl *getMethodDecl() const {
+ return Msg->getMethodDecl();
+ }
+
+ const ObjCInterfaceDecl *getReceiverInterface() const {
+ return Msg->getReceiverInterface();
+ }
+
+ SourceLocation getSuperLoc() const {
+ if (PE)
+ return PE->getReceiverLocation();
+ return Msg->getSuperLoc();
+ }
+
+ SourceRange getSourceRange() const LLVM_READONLY {
+ if (PE)
+ return PE->getSourceRange();
+ return Msg->getSourceRange();
+ }
+
+ unsigned getNumArgs() const {
+ return Msg->getNumArgs();
+ }
+
+ SVal getArgSVal(unsigned i,
+ const LocationContext *LCtx,
+ ProgramStateRef state) const {
+ assert(i < getNumArgs() && "Invalid index for argument");
+ return state->getSVal(Msg->getArg(i), LCtx);
+ }
+
+ QualType getArgType(unsigned i) const {
+ assert(i < getNumArgs() && "Invalid index for argument");
+ return Msg->getArg(i)->getType();
+ }
+
+ const Expr *getArgExpr(unsigned i) const {
+ assert(i < getNumArgs() && "Invalid index for argument");
+ return Msg->getArg(i);
+ }
+
+ SourceRange getArgSourceRange(unsigned i) const {
+ const Expr *argE = getArgExpr(i);
+ return argE->getSourceRange();
+ }
+
+ SourceRange getReceiverSourceRange() const {
+ if (PE) {
+ if (PE->isObjectReceiver())
+ return PE->getBase()->getSourceRange();
+ }
+ else {
+ return Msg->getReceiverRange();
+ }
+
+ // FIXME: This isn't a range.
+ return PE->getReceiverLocation();
+ }
+};
+
+/// \brief Common wrapper for a call expression, ObjC message, or C++
+/// constructor, mainly to provide a common interface for their arguments.
+class CallOrObjCMessage {
+ llvm::PointerUnion<const CallExpr *, const CXXConstructExpr *> CallE;
+ ObjCMessage Msg;
+ ProgramStateRef State;
+ const LocationContext *LCtx;
+public:
+ CallOrObjCMessage(const CallExpr *callE, ProgramStateRef state,
+ const LocationContext *lctx)
+ : CallE(callE), State(state), LCtx(lctx) {}
+ CallOrObjCMessage(const CXXConstructExpr *consE, ProgramStateRef state,
+ const LocationContext *lctx)
+ : CallE(consE), State(state), LCtx(lctx) {}
+ CallOrObjCMessage(const ObjCMessage &msg, ProgramStateRef state,
+ const LocationContext *lctx)
+ : CallE((CallExpr *)0), Msg(msg), State(state), LCtx(lctx) {}
+
+ QualType getResultType(ASTContext &ctx) const;
+
+ bool isFunctionCall() const {
+ return CallE && CallE.is<const CallExpr *>();
+ }
+
+ bool isCXXConstructExpr() const {
+ return CallE && CallE.is<const CXXConstructExpr *>();
+ }
+
+ bool isObjCMessage() const {
+ return !CallE;
+ }
+
+ bool isCXXCall() const {
+ const CallExpr *ActualCallE = CallE.dyn_cast<const CallExpr *>();
+ return ActualCallE && isa<CXXMemberCallExpr>(ActualCallE);
+ }
+
+ /// Check if the callee is declared in the system header.
+ bool isInSystemHeader() const {
+ if (const Decl *FD = getDecl()) {
+ const SourceManager &SM =
+ State->getStateManager().getContext().getSourceManager();
+ return SM.isInSystemHeader(FD->getLocation());
+ }
+ return false;
+ }
+
+ const Expr *getOriginExpr() const {
+ if (!CallE)
+ return Msg.getMessageExpr();
+ if (const CXXConstructExpr *Ctor =
+ CallE.dyn_cast<const CXXConstructExpr *>())
+ return Ctor;
+ return CallE.get<const CallExpr *>();
+ }
+
+ SVal getFunctionCallee() const;
+ SVal getCXXCallee() const;
+ SVal getInstanceMessageReceiver(const LocationContext *LC) const;
+
+ /// Get the declaration of the function or method.
+ const Decl *getDecl() const;
+
+ unsigned getNumArgs() const {
+ if (!CallE)
+ return Msg.getNumArgs();
+ if (const CXXConstructExpr *Ctor =
+ CallE.dyn_cast<const CXXConstructExpr *>())
+ return Ctor->getNumArgs();
+ return CallE.get<const CallExpr *>()->getNumArgs();
+ }
+
+ SVal getArgSVal(unsigned i) const {
+ assert(i < getNumArgs());
+ if (!CallE)
+ return Msg.getArgSVal(i, LCtx, State);
+ return State->getSVal(getArg(i), LCtx);
+ }
+
+ const Expr *getArg(unsigned i) const {
+ assert(i < getNumArgs());
+ if (!CallE)
+ return Msg.getArgExpr(i);
+ if (const CXXConstructExpr *Ctor =
+ CallE.dyn_cast<const CXXConstructExpr *>())
+ return Ctor->getArg(i);
+ return CallE.get<const CallExpr *>()->getArg(i);
+ }
+
+ SourceRange getArgSourceRange(unsigned i) const {
+ assert(i < getNumArgs());
+ if (CallE)
+ return getArg(i)->getSourceRange();
+ return Msg.getArgSourceRange(i);
+ }
+
+ SourceRange getReceiverSourceRange() const {
+ assert(isObjCMessage());
+ return Msg.getReceiverSourceRange();
+ }
+
+ /// \brief Check if the name corresponds to a CoreFoundation or CoreGraphics
+ /// function that allows objects to escape.
+ ///
+ /// Many methods allow a tracked object to escape. For example:
+ ///
+ /// CFMutableDictionaryRef x = CFDictionaryCreateMutable(..., customDeallocator);
+ /// CFDictionaryAddValue(y, key, x);
+ ///
+ /// We handle this and similar cases with the following heuristic. If the
+ /// function name contains "InsertValue", "SetValue", "AddValue",
+ /// "AppendValue", or "SetAttribute", then we assume that arguments may
+ /// escape.
+ //
+ // TODO: To reduce false negatives here, we should track the container
+ // allocation site and check if a proper deallocator was set there.
+ static bool isCFCGAllowingEscape(StringRef FName) {
+ if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G'))
+ if (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
+ StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "WithData") != StringRef::npos ||
+ StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
+ StrInStrNoCase(FName, "SetAttribute") != StringRef::npos) {
+ return true;
+ }
+ return false;
+ }
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
new file mode 100644
index 0000000..360d648
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h
@@ -0,0 +1,796 @@
+//== ProgramState.h - Path-sensitive "State" for tracking values -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SymbolRef, ExprBindKey, and ProgramState*.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_VALUESTATE_H
+#define LLVM_CLANG_GR_VALUESTATE_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Environment.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h"
+#include "llvm/ADT/PointerIntPair.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+namespace llvm {
+class APSInt;
+class BumpPtrAllocator;
+}
+
+namespace clang {
+class ASTContext;
+
+namespace ento {
+
+class CallOrObjCMessage;
+
+typedef ConstraintManager* (*ConstraintManagerCreator)(ProgramStateManager&,
+ SubEngine&);
+typedef StoreManager* (*StoreManagerCreator)(ProgramStateManager&);
+
+//===----------------------------------------------------------------------===//
+// ProgramStateTrait - Traits used by the Generic Data Map of a ProgramState.
+//===----------------------------------------------------------------------===//
+
+template <typename T> struct ProgramStatePartialTrait;
+
+template <typename T> struct ProgramStateTrait {
+ typedef typename T::data_type data_type;
+ static inline void *GDMIndex() { return &T::TagInt; }
+ static inline void *MakeVoidPtr(data_type D) { return (void*) D; }
+ static inline data_type MakeData(void *const* P) {
+ return P ? (data_type) *P : (data_type) 0;
+ }
+};
+
+/// \class ProgramState
+/// ProgramState - This class encapsulates:
+///
+/// 1. A mapping from expressions to values (Environment)
+/// 2. A mapping from locations to values (Store)
+/// 3. Constraints on symbolic values (GenericDataMap)
+///
+/// Together these represent the "abstract state" of a program.
+///
+/// ProgramState is intended to be used as a functional object; that is,
+/// once it is created and made "persistent" in a FoldingSet, its
+/// values will never change.
+class ProgramState : public llvm::FoldingSetNode {
+public:
+ typedef llvm::ImmutableSet<llvm::APSInt*> IntSetTy;
+ typedef llvm::ImmutableMap<void*, void*> GenericDataMap;
+
+private:
+ void operator=(const ProgramState& R) const; // Do not implement.
+
+ friend class ProgramStateManager;
+ friend class ExplodedGraph;
+ friend class ExplodedNode;
+
+ ProgramStateManager *stateMgr;
+ Environment Env; // Maps a Stmt to its current SVal.
+ Store store; // Maps a location to its current value.
+ GenericDataMap GDM; // Custom data stored by a client of this class.
+ unsigned refCount;
+
+ /// makeWithStore - Return a ProgramState with the same values as the current
+ /// state with the exception of using the specified Store.
+ ProgramStateRef makeWithStore(const StoreRef &store) const;
+
+ void setStore(const StoreRef &storeRef);
+
+public:
+ /// This ctor is used when creating the first ProgramState object.
+ ProgramState(ProgramStateManager *mgr, const Environment& env,
+ StoreRef st, GenericDataMap gdm);
+
+ /// Copy ctor - We must explicitly define this or else the "Next" ptr
+ /// in FoldingSetNode will also get copied.
+ ProgramState(const ProgramState &RHS);
+
+ ~ProgramState();
+
+ /// Return the ProgramStateManager associated with this state.
+ ProgramStateManager &getStateManager() const { return *stateMgr; }
+
+ /// getEnvironment - Return the environment associated with this state.
+ /// The environment is the mapping from expressions to values.
+ const Environment& getEnvironment() const { return Env; }
+
+ /// Return the store associated with this state. The store
+ /// is a mapping from locations to values.
+ Store getStore() const { return store; }
+
+
+ /// getGDM - Return the generic data map associated with this state.
+ GenericDataMap getGDM() const { return GDM; }
+
+ void setGDM(GenericDataMap gdm) { GDM = gdm; }
+
+ /// Profile - Profile the contents of a ProgramState object for use in a
+ /// FoldingSet. Two ProgramState objects are considered equal if they
+ /// have the same Environment, Store, and GenericDataMap.
+ static void Profile(llvm::FoldingSetNodeID& ID, const ProgramState *V) {
+ V->Env.Profile(ID);
+ ID.AddPointer(V->store);
+ V->GDM.Profile(ID);
+ }
+
+ /// Profile - Used to profile the contents of this object for inclusion
+ /// in a FoldingSet.
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ Profile(ID, this);
+ }
+
+ BasicValueFactory &getBasicVals() const;
+ SymbolManager &getSymbolManager() const;
+
+ //==---------------------------------------------------------------------==//
+ // Constraints on values.
+ //==---------------------------------------------------------------------==//
+ //
+ // Each ProgramState records constraints on symbolic values. These constraints
+ // are managed using the ConstraintManager associated with a ProgramStateManager.
+ // As constraints gradually accrue on symbolic values, added constraints
+ // may conflict and indicate that a state is infeasible (as no real values
+ // could satisfy all the constraints). This is the principal mechanism
+ // for modeling path-sensitivity in ExprEngine/ProgramState.
+ //
+ // Various "assume" methods form the interface for adding constraints to
+ // symbolic values. A call to 'assume' indicates an assumption being placed
+ // on one or symbolic values. 'assume' methods take the following inputs:
+ //
+ // (1) A ProgramState object representing the current state.
+ //
+ // (2) The assumed constraint (which is specific to a given "assume" method).
+ //
+ // (3) A binary value "Assumption" that indicates whether the constraint is
+ // assumed to be true or false.
+ //
+ // The output of "assume*" is a new ProgramState object with the added constraints.
+ // If no new state is feasible, NULL is returned.
+ //
+
+ ProgramStateRef assume(DefinedOrUnknownSVal cond, bool assumption) const;
+
+ /// This method assumes both "true" and "false" for 'cond', and
+ /// returns both corresponding states. It's shorthand for doing
+ /// 'assume' twice.
+ std::pair<ProgramStateRef , ProgramStateRef >
+ assume(DefinedOrUnknownSVal cond) const;
+
+ ProgramStateRef assumeInBound(DefinedOrUnknownSVal idx,
+ DefinedOrUnknownSVal upperBound,
+ bool assumption,
+ QualType IndexType = QualType()) const;
+
+ /// Utility method for getting regions.
+ const VarRegion* getRegion(const VarDecl *D, const LocationContext *LC) const;
+
+ //==---------------------------------------------------------------------==//
+ // Binding and retrieving values to/from the environment and symbolic store.
+ //==---------------------------------------------------------------------==//
+
+ /// BindCompoundLiteral - Return the state that has the bindings currently
+ /// in this state plus the bindings for the CompoundLiteral.
+ ProgramStateRef bindCompoundLiteral(const CompoundLiteralExpr *CL,
+ const LocationContext *LC,
+ SVal V) const;
+
+ /// Create a new state by binding the value 'V' to the statement 'S' in the
+ /// state's environment.
+ ProgramStateRef BindExpr(const Stmt *S, const LocationContext *LCtx,
+ SVal V, bool Invalidate = true) const;
+
+ /// Create a new state by binding the value 'V' and location 'locaton' to the
+ /// statement 'S' in the state's environment.
+ ProgramStateRef bindExprAndLocation(const Stmt *S,
+ const LocationContext *LCtx,
+ SVal location, SVal V) const;
+
+ ProgramStateRef bindDecl(const VarRegion *VR, SVal V) const;
+
+ ProgramStateRef bindDeclWithNoInit(const VarRegion *VR) const;
+
+ ProgramStateRef bindLoc(Loc location, SVal V) const;
+
+ ProgramStateRef bindLoc(SVal location, SVal V) const;
+
+ ProgramStateRef bindDefault(SVal loc, SVal V) const;
+
+ ProgramStateRef unbindLoc(Loc LV) const;
+
+ /// invalidateRegions - Returns the state with bindings for the given regions
+ /// cleared from the store. The regions are provided as a continuous array
+ /// from Begin to End. Optionally invalidates global regions as well.
+ ProgramStateRef invalidateRegions(ArrayRef<const MemRegion *> Regions,
+ const Expr *E, unsigned BlockCount,
+ const LocationContext *LCtx,
+ StoreManager::InvalidatedSymbols *IS = 0,
+ const CallOrObjCMessage *Call = 0) const;
+
+ /// enterStackFrame - Returns the state for entry to the given stack frame,
+ /// preserving the current state.
+ ProgramStateRef enterStackFrame(const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx) const;
+
+ /// Get the lvalue for a variable reference.
+ Loc getLValue(const VarDecl *D, const LocationContext *LC) const;
+
+ Loc getLValue(const CompoundLiteralExpr *literal,
+ const LocationContext *LC) const;
+
+ /// Get the lvalue for an ivar reference.
+ SVal getLValue(const ObjCIvarDecl *decl, SVal base) const;
+
+ /// Get the lvalue for a field reference.
+ SVal getLValue(const FieldDecl *decl, SVal Base) const;
+
+ /// Get the lvalue for an array index.
+ SVal getLValue(QualType ElementType, SVal Idx, SVal Base) const;
+
+ const llvm::APSInt *getSymVal(SymbolRef sym) const;
+
+ /// Returns the SVal bound to the statement 'S' in the state's environment.
+ SVal getSVal(const Stmt *S, const LocationContext *LCtx,
+ bool useOnlyDirectBindings = false) const;
+
+ SVal getSValAsScalarOrLoc(const Stmt *Ex, const LocationContext *LCtx) const;
+
+ /// \brief Return the value bound to the specified location.
+ /// Returns UnknownVal() if none found.
+ SVal getSVal(Loc LV, QualType T = QualType()) const;
+
+ /// Returns the "raw" SVal bound to LV before any value simplfication.
+ SVal getRawSVal(Loc LV, QualType T= QualType()) const;
+
+ /// \brief Return the value bound to the specified location.
+ /// Returns UnknownVal() if none found.
+ SVal getSVal(const MemRegion* R) const;
+
+ SVal getSValAsScalarOrLoc(const MemRegion *R) const;
+
+ /// \brief Visits the symbols reachable from the given SVal using the provided
+ /// SymbolVisitor.
+ ///
+ /// This is a convenience API. Consider using ScanReachableSymbols class
+ /// directly when making multiple scans on the same state with the same
+ /// visitor to avoid repeated initialization cost.
+ /// \sa ScanReachableSymbols
+ bool scanReachableSymbols(SVal val, SymbolVisitor& visitor) const;
+
+ /// \brief Visits the symbols reachable from the SVals in the given range
+ /// using the provided SymbolVisitor.
+ bool scanReachableSymbols(const SVal *I, const SVal *E,
+ SymbolVisitor &visitor) const;
+
+ /// \brief Visits the symbols reachable from the regions in the given
+ /// MemRegions range using the provided SymbolVisitor.
+ bool scanReachableSymbols(const MemRegion * const *I,
+ const MemRegion * const *E,
+ SymbolVisitor &visitor) const;
+
+ template <typename CB> CB scanReachableSymbols(SVal val) const;
+ template <typename CB> CB scanReachableSymbols(const SVal *beg,
+ const SVal *end) const;
+
+ template <typename CB> CB
+ scanReachableSymbols(const MemRegion * const *beg,
+ const MemRegion * const *end) const;
+
+ /// Create a new state in which the statement is marked as tainted.
+ ProgramStateRef addTaint(const Stmt *S, const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric) const;
+
+ /// Create a new state in which the symbol is marked as tainted.
+ ProgramStateRef addTaint(SymbolRef S,
+ TaintTagType Kind = TaintTagGeneric) const;
+
+ /// Create a new state in which the region symbol is marked as tainted.
+ ProgramStateRef addTaint(const MemRegion *R,
+ TaintTagType Kind = TaintTagGeneric) const;
+
+ /// Check if the statement is tainted in the current state.
+ bool isTainted(const Stmt *S, const LocationContext *LCtx,
+ TaintTagType Kind = TaintTagGeneric) const;
+ bool isTainted(SVal V, TaintTagType Kind = TaintTagGeneric) const;
+ bool isTainted(SymbolRef Sym, TaintTagType Kind = TaintTagGeneric) const;
+ bool isTainted(const MemRegion *Reg, TaintTagType Kind=TaintTagGeneric) const;
+
+ //==---------------------------------------------------------------------==//
+ // Accessing the Generic Data Map (GDM).
+ //==---------------------------------------------------------------------==//
+
+ void *const* FindGDM(void *K) const;
+
+ template<typename T>
+ ProgramStateRef add(typename ProgramStateTrait<T>::key_type K) const;
+
+ template <typename T>
+ typename ProgramStateTrait<T>::data_type
+ get() const {
+ return ProgramStateTrait<T>::MakeData(FindGDM(ProgramStateTrait<T>::GDMIndex()));
+ }
+
+ template<typename T>
+ typename ProgramStateTrait<T>::lookup_type
+ get(typename ProgramStateTrait<T>::key_type key) const {
+ void *const* d = FindGDM(ProgramStateTrait<T>::GDMIndex());
+ return ProgramStateTrait<T>::Lookup(ProgramStateTrait<T>::MakeData(d), key);
+ }
+
+ template <typename T>
+ typename ProgramStateTrait<T>::context_type get_context() const;
+
+
+ template<typename T>
+ ProgramStateRef remove(typename ProgramStateTrait<T>::key_type K) const;
+
+ template<typename T>
+ ProgramStateRef remove(typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::context_type C) const;
+ template <typename T>
+ ProgramStateRef remove() const;
+
+ template<typename T>
+ ProgramStateRef set(typename ProgramStateTrait<T>::data_type D) const;
+
+ template<typename T>
+ ProgramStateRef set(typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::value_type E) const;
+
+ template<typename T>
+ ProgramStateRef set(typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::value_type E,
+ typename ProgramStateTrait<T>::context_type C) const;
+
+ template<typename T>
+ bool contains(typename ProgramStateTrait<T>::key_type key) const {
+ void *const* d = FindGDM(ProgramStateTrait<T>::GDMIndex());
+ return ProgramStateTrait<T>::Contains(ProgramStateTrait<T>::MakeData(d), key);
+ }
+
+ // Pretty-printing.
+ void print(raw_ostream &Out, const char *nl = "\n",
+ const char *sep = "") const;
+ void printDOT(raw_ostream &Out) const;
+ void printTaint(raw_ostream &Out, const char *nl = "\n",
+ const char *sep = "") const;
+
+ void dump() const;
+ void dumpTaint() const;
+
+private:
+ friend void ProgramStateRetain(const ProgramState *state);
+ friend void ProgramStateRelease(const ProgramState *state);
+
+ ProgramStateRef
+ invalidateRegionsImpl(ArrayRef<const MemRegion *> Regions,
+ const Expr *E, unsigned BlockCount,
+ const LocationContext *LCtx,
+ StoreManager::InvalidatedSymbols &IS,
+ const CallOrObjCMessage *Call) const;
+};
+
+//===----------------------------------------------------------------------===//
+// ProgramStateManager - Factory object for ProgramStates.
+//===----------------------------------------------------------------------===//
+
+class ProgramStateManager {
+ friend class ProgramState;
+ friend void ProgramStateRelease(const ProgramState *state);
+private:
+ /// Eng - The SubEngine that owns this state manager.
+ SubEngine *Eng; /* Can be null. */
+
+ EnvironmentManager EnvMgr;
+ OwningPtr<StoreManager> StoreMgr;
+ OwningPtr<ConstraintManager> ConstraintMgr;
+
+ ProgramState::GenericDataMap::Factory GDMFactory;
+
+ typedef llvm::DenseMap<void*,std::pair<void*,void (*)(void*)> > GDMContextsTy;
+ GDMContextsTy GDMContexts;
+
+ /// StateSet - FoldingSet containing all the states created for analyzing
+ /// a particular function. This is used to unique states.
+ llvm::FoldingSet<ProgramState> StateSet;
+
+ /// Object that manages the data for all created SVals.
+ OwningPtr<SValBuilder> svalBuilder;
+
+ /// A BumpPtrAllocator to allocate states.
+ llvm::BumpPtrAllocator &Alloc;
+
+ /// A vector of ProgramStates that we can reuse.
+ std::vector<ProgramState *> freeStates;
+
+public:
+ ProgramStateManager(ASTContext &Ctx,
+ StoreManagerCreator CreateStoreManager,
+ ConstraintManagerCreator CreateConstraintManager,
+ llvm::BumpPtrAllocator& alloc,
+ SubEngine &subeng)
+ : Eng(&subeng),
+ EnvMgr(alloc),
+ GDMFactory(alloc),
+ svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
+ Alloc(alloc) {
+ StoreMgr.reset((*CreateStoreManager)(*this));
+ ConstraintMgr.reset((*CreateConstraintManager)(*this, subeng));
+ }
+
+ ProgramStateManager(ASTContext &Ctx,
+ StoreManagerCreator CreateStoreManager,
+ ConstraintManager* ConstraintManagerPtr,
+ llvm::BumpPtrAllocator& alloc)
+ : Eng(0),
+ EnvMgr(alloc),
+ GDMFactory(alloc),
+ svalBuilder(createSimpleSValBuilder(alloc, Ctx, *this)),
+ Alloc(alloc) {
+ StoreMgr.reset((*CreateStoreManager)(*this));
+ ConstraintMgr.reset(ConstraintManagerPtr);
+ }
+
+ ~ProgramStateManager();
+
+ ProgramStateRef getInitialState(const LocationContext *InitLoc);
+
+ ASTContext &getContext() { return svalBuilder->getContext(); }
+ const ASTContext &getContext() const { return svalBuilder->getContext(); }
+
+ BasicValueFactory &getBasicVals() {
+ return svalBuilder->getBasicValueFactory();
+ }
+ const BasicValueFactory& getBasicVals() const {
+ return svalBuilder->getBasicValueFactory();
+ }
+
+ SValBuilder &getSValBuilder() {
+ return *svalBuilder;
+ }
+
+ SymbolManager &getSymbolManager() {
+ return svalBuilder->getSymbolManager();
+ }
+ const SymbolManager &getSymbolManager() const {
+ return svalBuilder->getSymbolManager();
+ }
+
+ llvm::BumpPtrAllocator& getAllocator() { return Alloc; }
+
+ MemRegionManager& getRegionManager() {
+ return svalBuilder->getRegionManager();
+ }
+ const MemRegionManager& getRegionManager() const {
+ return svalBuilder->getRegionManager();
+ }
+
+ StoreManager& getStoreManager() { return *StoreMgr; }
+ ConstraintManager& getConstraintManager() { return *ConstraintMgr; }
+ SubEngine* getOwningEngine() { return Eng; }
+
+ ProgramStateRef removeDeadBindings(ProgramStateRef St,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper);
+
+ /// Marshal a new state for the callee in another translation unit.
+ /// 'state' is owned by the caller's engine.
+ ProgramStateRef MarshalState(ProgramStateRef state, const StackFrameContext *L);
+
+public:
+
+ SVal ArrayToPointer(Loc Array) {
+ return StoreMgr->ArrayToPointer(Array);
+ }
+
+ // Methods that manipulate the GDM.
+ ProgramStateRef addGDM(ProgramStateRef St, void *Key, void *Data);
+ ProgramStateRef removeGDM(ProgramStateRef state, void *Key);
+
+ // Methods that query & manipulate the Store.
+
+ void iterBindings(ProgramStateRef state, StoreManager::BindingsHandler& F) {
+ StoreMgr->iterBindings(state->getStore(), F);
+ }
+
+ ProgramStateRef getPersistentState(ProgramState &Impl);
+ ProgramStateRef getPersistentStateWithGDM(ProgramStateRef FromState,
+ ProgramStateRef GDMState);
+
+ bool haveEqualEnvironments(ProgramStateRef S1, ProgramStateRef S2) {
+ return S1->Env == S2->Env;
+ }
+
+ bool haveEqualStores(ProgramStateRef S1, ProgramStateRef S2) {
+ return S1->store == S2->store;
+ }
+
+ //==---------------------------------------------------------------------==//
+ // Generic Data Map methods.
+ //==---------------------------------------------------------------------==//
+ //
+ // ProgramStateManager and ProgramState support a "generic data map" that allows
+ // different clients of ProgramState objects to embed arbitrary data within a
+ // ProgramState object. The generic data map is essentially an immutable map
+ // from a "tag" (that acts as the "key" for a client) and opaque values.
+ // Tags/keys and values are simply void* values. The typical way that clients
+ // generate unique tags are by taking the address of a static variable.
+ // Clients are responsible for ensuring that data values referred to by a
+ // the data pointer are immutable (and thus are essentially purely functional
+ // data).
+ //
+ // The templated methods below use the ProgramStateTrait<T> class
+ // to resolve keys into the GDM and to return data values to clients.
+ //
+
+ // Trait based GDM dispatch.
+ template <typename T>
+ ProgramStateRef set(ProgramStateRef st, typename ProgramStateTrait<T>::data_type D) {
+ return addGDM(st, ProgramStateTrait<T>::GDMIndex(),
+ ProgramStateTrait<T>::MakeVoidPtr(D));
+ }
+
+ template<typename T>
+ ProgramStateRef set(ProgramStateRef st,
+ typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::value_type V,
+ typename ProgramStateTrait<T>::context_type C) {
+
+ return addGDM(st, ProgramStateTrait<T>::GDMIndex(),
+ ProgramStateTrait<T>::MakeVoidPtr(ProgramStateTrait<T>::Set(st->get<T>(), K, V, C)));
+ }
+
+ template <typename T>
+ ProgramStateRef add(ProgramStateRef st,
+ typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::context_type C) {
+ return addGDM(st, ProgramStateTrait<T>::GDMIndex(),
+ ProgramStateTrait<T>::MakeVoidPtr(ProgramStateTrait<T>::Add(st->get<T>(), K, C)));
+ }
+
+ template <typename T>
+ ProgramStateRef remove(ProgramStateRef st,
+ typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::context_type C) {
+
+ return addGDM(st, ProgramStateTrait<T>::GDMIndex(),
+ ProgramStateTrait<T>::MakeVoidPtr(ProgramStateTrait<T>::Remove(st->get<T>(), K, C)));
+ }
+
+ template <typename T>
+ ProgramStateRef remove(ProgramStateRef st) {
+ return removeGDM(st, ProgramStateTrait<T>::GDMIndex());
+ }
+
+ void *FindGDMContext(void *index,
+ void *(*CreateContext)(llvm::BumpPtrAllocator&),
+ void (*DeleteContext)(void*));
+
+ template <typename T>
+ typename ProgramStateTrait<T>::context_type get_context() {
+ void *p = FindGDMContext(ProgramStateTrait<T>::GDMIndex(),
+ ProgramStateTrait<T>::CreateContext,
+ ProgramStateTrait<T>::DeleteContext);
+
+ return ProgramStateTrait<T>::MakeContext(p);
+ }
+
+ const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) {
+ return ConstraintMgr->getSymVal(St, sym);
+ }
+
+ void EndPath(ProgramStateRef St) {
+ ConstraintMgr->EndPath(St);
+ }
+};
+
+
+//===----------------------------------------------------------------------===//
+// Out-of-line method definitions for ProgramState.
+//===----------------------------------------------------------------------===//
+
+inline const VarRegion* ProgramState::getRegion(const VarDecl *D,
+ const LocationContext *LC) const
+{
+ return getStateManager().getRegionManager().getVarRegion(D, LC);
+}
+
+inline ProgramStateRef ProgramState::assume(DefinedOrUnknownSVal Cond,
+ bool Assumption) const {
+ if (Cond.isUnknown())
+ return this;
+
+ return getStateManager().ConstraintMgr->assume(this, cast<DefinedSVal>(Cond),
+ Assumption);
+}
+
+inline std::pair<ProgramStateRef , ProgramStateRef >
+ProgramState::assume(DefinedOrUnknownSVal Cond) const {
+ if (Cond.isUnknown())
+ return std::make_pair(this, this);
+
+ return getStateManager().ConstraintMgr->assumeDual(this,
+ cast<DefinedSVal>(Cond));
+}
+
+inline ProgramStateRef ProgramState::bindLoc(SVal LV, SVal V) const {
+ return !isa<Loc>(LV) ? this : bindLoc(cast<Loc>(LV), V);
+}
+
+inline Loc ProgramState::getLValue(const VarDecl *VD,
+ const LocationContext *LC) const {
+ return getStateManager().StoreMgr->getLValueVar(VD, LC);
+}
+
+inline Loc ProgramState::getLValue(const CompoundLiteralExpr *literal,
+ const LocationContext *LC) const {
+ return getStateManager().StoreMgr->getLValueCompoundLiteral(literal, LC);
+}
+
+inline SVal ProgramState::getLValue(const ObjCIvarDecl *D, SVal Base) const {
+ return getStateManager().StoreMgr->getLValueIvar(D, Base);
+}
+
+inline SVal ProgramState::getLValue(const FieldDecl *D, SVal Base) const {
+ return getStateManager().StoreMgr->getLValueField(D, Base);
+}
+
+inline SVal ProgramState::getLValue(QualType ElementType, SVal Idx, SVal Base) const{
+ if (NonLoc *N = dyn_cast<NonLoc>(&Idx))
+ return getStateManager().StoreMgr->getLValueElement(ElementType, *N, Base);
+ return UnknownVal();
+}
+
+inline const llvm::APSInt *ProgramState::getSymVal(SymbolRef sym) const {
+ return getStateManager().getSymVal(this, sym);
+}
+
+inline SVal ProgramState::getSVal(const Stmt *Ex, const LocationContext *LCtx,
+ bool useOnlyDirectBindings) const{
+ return Env.getSVal(EnvironmentEntry(Ex, LCtx),
+ *getStateManager().svalBuilder,
+ useOnlyDirectBindings);
+}
+
+inline SVal
+ProgramState::getSValAsScalarOrLoc(const Stmt *S,
+ const LocationContext *LCtx) const {
+ if (const Expr *Ex = dyn_cast<Expr>(S)) {
+ QualType T = Ex->getType();
+ if (Ex->isLValue() || Loc::isLocType(T) || T->isIntegerType())
+ return getSVal(S, LCtx);
+ }
+
+ return UnknownVal();
+}
+
+inline SVal ProgramState::getRawSVal(Loc LV, QualType T) const {
+ return getStateManager().StoreMgr->getBinding(getStore(), LV, T);
+}
+
+inline SVal ProgramState::getSVal(const MemRegion* R) const {
+ return getStateManager().StoreMgr->getBinding(getStore(),
+ loc::MemRegionVal(R));
+}
+
+inline BasicValueFactory &ProgramState::getBasicVals() const {
+ return getStateManager().getBasicVals();
+}
+
+inline SymbolManager &ProgramState::getSymbolManager() const {
+ return getStateManager().getSymbolManager();
+}
+
+template<typename T>
+ProgramStateRef ProgramState::add(typename ProgramStateTrait<T>::key_type K) const {
+ return getStateManager().add<T>(this, K, get_context<T>());
+}
+
+template <typename T>
+typename ProgramStateTrait<T>::context_type ProgramState::get_context() const {
+ return getStateManager().get_context<T>();
+}
+
+template<typename T>
+ProgramStateRef ProgramState::remove(typename ProgramStateTrait<T>::key_type K) const {
+ return getStateManager().remove<T>(this, K, get_context<T>());
+}
+
+template<typename T>
+ProgramStateRef ProgramState::remove(typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::context_type C) const {
+ return getStateManager().remove<T>(this, K, C);
+}
+
+template <typename T>
+ProgramStateRef ProgramState::remove() const {
+ return getStateManager().remove<T>(this);
+}
+
+template<typename T>
+ProgramStateRef ProgramState::set(typename ProgramStateTrait<T>::data_type D) const {
+ return getStateManager().set<T>(this, D);
+}
+
+template<typename T>
+ProgramStateRef ProgramState::set(typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::value_type E) const {
+ return getStateManager().set<T>(this, K, E, get_context<T>());
+}
+
+template<typename T>
+ProgramStateRef ProgramState::set(typename ProgramStateTrait<T>::key_type K,
+ typename ProgramStateTrait<T>::value_type E,
+ typename ProgramStateTrait<T>::context_type C) const {
+ return getStateManager().set<T>(this, K, E, C);
+}
+
+template <typename CB>
+CB ProgramState::scanReachableSymbols(SVal val) const {
+ CB cb(this);
+ scanReachableSymbols(val, cb);
+ return cb;
+}
+
+template <typename CB>
+CB ProgramState::scanReachableSymbols(const SVal *beg, const SVal *end) const {
+ CB cb(this);
+ scanReachableSymbols(beg, end, cb);
+ return cb;
+}
+
+template <typename CB>
+CB ProgramState::scanReachableSymbols(const MemRegion * const *beg,
+ const MemRegion * const *end) const {
+ CB cb(this);
+ scanReachableSymbols(beg, end, cb);
+ return cb;
+}
+
+/// \class ScanReachableSymbols
+/// A Utility class that allows to visit the reachable symbols using a custom
+/// SymbolVisitor.
+class ScanReachableSymbols : public SubRegionMap::Visitor {
+ virtual void anchor();
+ typedef llvm::DenseMap<const void*, unsigned> VisitedItems;
+
+ VisitedItems visited;
+ ProgramStateRef state;
+ SymbolVisitor &visitor;
+ OwningPtr<SubRegionMap> SRM;
+public:
+
+ ScanReachableSymbols(ProgramStateRef st, SymbolVisitor& v)
+ : state(st), visitor(v) {}
+
+ bool scan(nonloc::CompoundVal val);
+ bool scan(SVal val);
+ bool scan(const MemRegion *R);
+ bool scan(const SymExpr *sym);
+
+ // From SubRegionMap::Visitor.
+ bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) {
+ return scan(SubRegion);
+ }
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
new file mode 100644
index 0000000..1c7bedb
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h
@@ -0,0 +1,197 @@
+//ProgramStateTrait.h - Partial implementations of ProgramStateTrait -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines partial implementations of template specializations of
+// the class ProgramStateTrait<>. ProgramStateTrait<> is used by ProgramState
+// to implement set/get methods for manipulating a ProgramState's
+// generic data map.
+//
+//===----------------------------------------------------------------------===//
+
+
+#ifndef LLVM_CLANG_GR_PROGRAMSTATETRAIT_H
+#define LLVM_CLANG_GR_PROGRAMSTATETRAIT_H
+
+namespace llvm {
+ class BumpPtrAllocator;
+ template <typename K, typename D, typename I> class ImmutableMap;
+ template <typename K, typename I> class ImmutableSet;
+ template <typename T> class ImmutableList;
+ template <typename T> class ImmutableListImpl;
+}
+
+namespace clang {
+
+namespace ento {
+ template <typename T> struct ProgramStatePartialTrait;
+
+ // Partial-specialization for ImmutableMap.
+
+ template <typename Key, typename Data, typename Info>
+ struct ProgramStatePartialTrait< llvm::ImmutableMap<Key,Data,Info> > {
+ typedef llvm::ImmutableMap<Key,Data,Info> data_type;
+ typedef typename data_type::Factory& context_type;
+ typedef Key key_type;
+ typedef Data value_type;
+ typedef const value_type* lookup_type;
+
+ static inline data_type MakeData(void *const* p) {
+ return p ? data_type((typename data_type::TreeTy*) *p) : data_type(0);
+ }
+ static inline void *MakeVoidPtr(data_type B) {
+ return B.getRoot();
+ }
+ static lookup_type Lookup(data_type B, key_type K) {
+ return B.lookup(K);
+ }
+ static data_type Set(data_type B, key_type K, value_type E,context_type F){
+ return F.add(B, K, E);
+ }
+
+ static data_type Remove(data_type B, key_type K, context_type F) {
+ return F.remove(B, K);
+ }
+
+ static inline context_type MakeContext(void *p) {
+ return *((typename data_type::Factory*) p);
+ }
+
+ static void *CreateContext(llvm::BumpPtrAllocator& Alloc) {
+ return new typename data_type::Factory(Alloc);
+ }
+
+ static void DeleteContext(void *Ctx) {
+ delete (typename data_type::Factory*) Ctx;
+ }
+ };
+
+
+ // Partial-specialization for ImmutableSet.
+
+ template <typename Key, typename Info>
+ struct ProgramStatePartialTrait< llvm::ImmutableSet<Key,Info> > {
+ typedef llvm::ImmutableSet<Key,Info> data_type;
+ typedef typename data_type::Factory& context_type;
+ typedef Key key_type;
+
+ static inline data_type MakeData(void *const* p) {
+ return p ? data_type((typename data_type::TreeTy*) *p) : data_type(0);
+ }
+
+ static inline void *MakeVoidPtr(data_type B) {
+ return B.getRoot();
+ }
+
+ static data_type Add(data_type B, key_type K, context_type F) {
+ return F.add(B, K);
+ }
+
+ static data_type Remove(data_type B, key_type K, context_type F) {
+ return F.remove(B, K);
+ }
+
+ static bool Contains(data_type B, key_type K) {
+ return B.contains(K);
+ }
+
+ static inline context_type MakeContext(void *p) {
+ return *((typename data_type::Factory*) p);
+ }
+
+ static void *CreateContext(llvm::BumpPtrAllocator& Alloc) {
+ return new typename data_type::Factory(Alloc);
+ }
+
+ static void DeleteContext(void *Ctx) {
+ delete (typename data_type::Factory*) Ctx;
+ }
+ };
+
+ // Partial-specialization for ImmutableList.
+
+ template <typename T>
+ struct ProgramStatePartialTrait< llvm::ImmutableList<T> > {
+ typedef llvm::ImmutableList<T> data_type;
+ typedef T key_type;
+ typedef typename data_type::Factory& context_type;
+
+ static data_type Add(data_type L, key_type K, context_type F) {
+ return F.add(K, L);
+ }
+
+ static bool Contains(data_type L, key_type K) {
+ return L.contains(K);
+ }
+
+ static inline data_type MakeData(void *const* p) {
+ return p ? data_type((const llvm::ImmutableListImpl<T>*) *p)
+ : data_type(0);
+ }
+
+ static inline void *MakeVoidPtr(data_type D) {
+ return (void*) D.getInternalPointer();
+ }
+
+ static inline context_type MakeContext(void *p) {
+ return *((typename data_type::Factory*) p);
+ }
+
+ static void *CreateContext(llvm::BumpPtrAllocator& Alloc) {
+ return new typename data_type::Factory(Alloc);
+ }
+
+ static void DeleteContext(void *Ctx) {
+ delete (typename data_type::Factory*) Ctx;
+ }
+ };
+
+ // Partial specialization for bool.
+ template <> struct ProgramStatePartialTrait<bool> {
+ typedef bool data_type;
+
+ static inline data_type MakeData(void *const* p) {
+ return p ? (data_type) (uintptr_t) *p
+ : data_type();
+ }
+ static inline void *MakeVoidPtr(data_type d) {
+ return (void*) (uintptr_t) d;
+ }
+ };
+
+ // Partial specialization for unsigned.
+ template <> struct ProgramStatePartialTrait<unsigned> {
+ typedef unsigned data_type;
+
+ static inline data_type MakeData(void *const* p) {
+ return p ? (data_type) (uintptr_t) *p
+ : data_type();
+ }
+ static inline void *MakeVoidPtr(data_type d) {
+ return (void*) (uintptr_t) d;
+ }
+ };
+
+ // Partial specialization for void*.
+ template <> struct ProgramStatePartialTrait<void*> {
+ typedef void *data_type;
+
+ static inline data_type MakeData(void *const* p) {
+ return p ? *p
+ : data_type();
+ }
+ static inline void *MakeVoidPtr(data_type d) {
+ return d;
+ }
+ };
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h
new file mode 100644
index 0000000..371f3c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h
@@ -0,0 +1,43 @@
+//== ProgramState_Fwd.h - Incomplete declarations of ProgramState -*- C++ -*--=/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PROGRAMSTATE_FWD_H
+#define LLVM_CLANG_PROGRAMSTATE_FWD_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+
+namespace clang {
+namespace ento {
+ class ProgramState;
+ class ProgramStateManager;
+ void ProgramStateRetain(const ProgramState *state);
+ void ProgramStateRelease(const ProgramState *state);
+}
+}
+
+namespace llvm {
+ template <> struct IntrusiveRefCntPtrInfo<const clang::ento::ProgramState> {
+ static void retain(const clang::ento::ProgramState *state) {
+ clang::ento::ProgramStateRetain(state);
+ }
+ static void release(const clang::ento::ProgramState *state) {
+ clang::ento::ProgramStateRelease(state);
+ }
+ };
+}
+
+namespace clang {
+namespace ento {
+ typedef IntrusiveRefCntPtr<const ProgramState> ProgramStateRef;
+}
+}
+
+#endif
+
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
new file mode 100644
index 0000000..4ad36f9
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h
@@ -0,0 +1,320 @@
+// SValBuilder.h - Construction of SVals from evaluating expressions -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SValBuilder, a class that defines the interface for
+// "symbolical evaluators" which construct an SVal from an expression.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_SVALBUILDER
+#define LLVM_CLANG_GR_SVALBUILDER
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+
+namespace clang {
+
+class CXXBoolLiteralExpr;
+
+namespace ento {
+
+class SValBuilder {
+ virtual void anchor();
+protected:
+ ASTContext &Context;
+
+ /// Manager of APSInt values.
+ BasicValueFactory BasicVals;
+
+ /// Manages the creation of symbols.
+ SymbolManager SymMgr;
+
+ /// Manages the creation of memory regions.
+ MemRegionManager MemMgr;
+
+ ProgramStateManager &StateMgr;
+
+ /// The scalar type to use for array indices.
+ const QualType ArrayIndexTy;
+
+ /// The width of the scalar type used for array indices.
+ const unsigned ArrayIndexWidth;
+
+ virtual SVal evalCastFromNonLoc(NonLoc val, QualType castTy) = 0;
+ virtual SVal evalCastFromLoc(Loc val, QualType castTy) = 0;
+
+public:
+ // FIXME: Make these protected again once RegionStoreManager correctly
+ // handles loads from different bound value types.
+ virtual SVal dispatchCast(SVal val, QualType castTy) = 0;
+
+public:
+ SValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
+ ProgramStateManager &stateMgr)
+ : Context(context), BasicVals(context, alloc),
+ SymMgr(context, BasicVals, alloc),
+ MemMgr(context, alloc),
+ StateMgr(stateMgr),
+ ArrayIndexTy(context.IntTy),
+ ArrayIndexWidth(context.getTypeSize(ArrayIndexTy)) {}
+
+ virtual ~SValBuilder() {}
+
+ bool haveSameType(const SymExpr *Sym1, const SymExpr *Sym2) {
+ return haveSameType(Sym1->getType(Context), Sym2->getType(Context));
+ }
+
+ bool haveSameType(QualType Ty1, QualType Ty2) {
+ // FIXME: Remove the second disjunct when we support symbolic
+ // truncation/extension.
+ return (Context.getCanonicalType(Ty1) == Context.getCanonicalType(Ty2) ||
+ (Ty2->isIntegerType() && Ty2->isIntegerType()));
+ }
+
+ SVal evalCast(SVal val, QualType castTy, QualType originalType);
+
+ virtual SVal evalMinus(NonLoc val) = 0;
+
+ virtual SVal evalComplement(NonLoc val) = 0;
+
+ /// Create a new value which represents a binary expression with two non
+ /// location operands.
+ virtual SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy) = 0;
+
+ /// Create a new value which represents a binary expression with two memory
+ /// location operands.
+ virtual SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs, QualType resultTy) = 0;
+
+ /// Create a new value which represents a binary expression with a memory
+ /// location and non location operands. For example, this would be used to
+ /// evaluate a pointer arithmetic operation.
+ virtual SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
+ Loc lhs, NonLoc rhs, QualType resultTy) = 0;
+
+ /// Evaluates a given SVal. If the SVal has only one possible (integer) value,
+ /// that value is returned. Otherwise, returns NULL.
+ virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal val) = 0;
+
+ /// Handles generation of the value in case the builder is not smart enough to
+ /// handle the given binary expression. Depending on the state, decides to
+ /// either keep the expression or forget the history and generate an
+ /// UnknownVal.
+ SVal makeGenericVal(ProgramStateRef state, BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy);
+
+ SVal evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
+ SVal lhs, SVal rhs, QualType type);
+
+ DefinedOrUnknownSVal evalEQ(ProgramStateRef state, DefinedOrUnknownSVal lhs,
+ DefinedOrUnknownSVal rhs);
+
+ ASTContext &getContext() { return Context; }
+ const ASTContext &getContext() const { return Context; }
+
+ ProgramStateManager &getStateManager() { return StateMgr; }
+
+ QualType getConditionType() const {
+ return getContext().IntTy;
+ }
+
+ QualType getArrayIndexType() const {
+ return ArrayIndexTy;
+ }
+
+ BasicValueFactory &getBasicValueFactory() { return BasicVals; }
+ const BasicValueFactory &getBasicValueFactory() const { return BasicVals; }
+
+ SymbolManager &getSymbolManager() { return SymMgr; }
+ const SymbolManager &getSymbolManager() const { return SymMgr; }
+
+ MemRegionManager &getRegionManager() { return MemMgr; }
+ const MemRegionManager &getRegionManager() const { return MemMgr; }
+
+ // Forwarding methods to SymbolManager.
+
+ const SymbolConjured* getConjuredSymbol(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount,
+ const void *symbolTag = 0) {
+ return SymMgr.getConjuredSymbol(stmt, LCtx, type, visitCount, symbolTag);
+ }
+
+ const SymbolConjured* getConjuredSymbol(const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned visitCount,
+ const void *symbolTag = 0) {
+ return SymMgr.getConjuredSymbol(expr, LCtx, visitCount, symbolTag);
+ }
+
+ /// Construct an SVal representing '0' for the specified type.
+ DefinedOrUnknownSVal makeZeroVal(QualType type);
+
+ /// Make a unique symbol for value of region.
+ DefinedOrUnknownSVal getRegionValueSymbolVal(const TypedValueRegion *region);
+
+ /// \brief Create a new symbol with a unique 'name'.
+ ///
+ /// We resort to conjured symbols when we cannot construct a derived symbol.
+ /// The advantage of symbols derived/built from other symbols is that we
+ /// preserve the relation between related(or even equivalent) expressions, so
+ /// conjured symbols should be used sparingly.
+ DefinedOrUnknownSVal getConjuredSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned count);
+ DefinedOrUnknownSVal getConjuredSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned count);
+
+ DefinedOrUnknownSVal getConjuredSymbolVal(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount);
+
+ DefinedOrUnknownSVal getDerivedRegionValueSymbolVal(
+ SymbolRef parentSymbol, const TypedValueRegion *region);
+
+ DefinedSVal getMetadataSymbolVal(
+ const void *symbolTag, const MemRegion *region,
+ const Expr *expr, QualType type, unsigned count);
+
+ DefinedSVal getFunctionPointer(const FunctionDecl *func);
+
+ DefinedSVal getBlockPointer(const BlockDecl *block, CanQualType locTy,
+ const LocationContext *locContext);
+
+ NonLoc makeCompoundVal(QualType type, llvm::ImmutableList<SVal> vals) {
+ return nonloc::CompoundVal(BasicVals.getCompoundValData(type, vals));
+ }
+
+ NonLoc makeLazyCompoundVal(const StoreRef &store,
+ const TypedValueRegion *region) {
+ return nonloc::LazyCompoundVal(
+ BasicVals.getLazyCompoundValData(store, region));
+ }
+
+ NonLoc makeZeroArrayIndex() {
+ return nonloc::ConcreteInt(BasicVals.getValue(0, ArrayIndexTy));
+ }
+
+ NonLoc makeArrayIndex(uint64_t idx) {
+ return nonloc::ConcreteInt(BasicVals.getValue(idx, ArrayIndexTy));
+ }
+
+ SVal convertToArrayIndex(SVal val);
+
+ nonloc::ConcreteInt makeIntVal(const IntegerLiteral* integer) {
+ return nonloc::ConcreteInt(
+ BasicVals.getValue(integer->getValue(),
+ integer->getType()->isUnsignedIntegerOrEnumerationType()));
+ }
+
+ nonloc::ConcreteInt makeBoolVal(const ObjCBoolLiteralExpr *boolean) {
+ return makeTruthVal(boolean->getValue(), boolean->getType());
+ }
+
+ nonloc::ConcreteInt makeBoolVal(const CXXBoolLiteralExpr *boolean);
+
+ nonloc::ConcreteInt makeIntVal(const llvm::APSInt& integer) {
+ return nonloc::ConcreteInt(BasicVals.getValue(integer));
+ }
+
+ loc::ConcreteInt makeIntLocVal(const llvm::APSInt &integer) {
+ return loc::ConcreteInt(BasicVals.getValue(integer));
+ }
+
+ NonLoc makeIntVal(const llvm::APInt& integer, bool isUnsigned) {
+ return nonloc::ConcreteInt(BasicVals.getValue(integer, isUnsigned));
+ }
+
+ DefinedSVal makeIntVal(uint64_t integer, QualType type) {
+ if (Loc::isLocType(type))
+ return loc::ConcreteInt(BasicVals.getValue(integer, type));
+
+ return nonloc::ConcreteInt(BasicVals.getValue(integer, type));
+ }
+
+ NonLoc makeIntVal(uint64_t integer, bool isUnsigned) {
+ return nonloc::ConcreteInt(BasicVals.getIntValue(integer, isUnsigned));
+ }
+
+ NonLoc makeIntValWithPtrWidth(uint64_t integer, bool isUnsigned) {
+ return nonloc::ConcreteInt(
+ BasicVals.getIntWithPtrWidth(integer, isUnsigned));
+ }
+
+ NonLoc makeIntVal(uint64_t integer, unsigned bitWidth, bool isUnsigned) {
+ return nonloc::ConcreteInt(
+ BasicVals.getValue(integer, bitWidth, isUnsigned));
+ }
+
+ NonLoc makeLocAsInteger(Loc loc, unsigned bits) {
+ return nonloc::LocAsInteger(BasicVals.getPersistentSValWithData(loc, bits));
+ }
+
+ NonLoc makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const llvm::APSInt& rhs, QualType type);
+
+ NonLoc makeNonLoc(const llvm::APSInt& rhs, BinaryOperator::Opcode op,
+ const SymExpr *lhs, QualType type);
+
+ NonLoc makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType type);
+
+ /// \brief Create a NonLoc value for cast.
+ NonLoc makeNonLoc(const SymExpr *operand, QualType fromTy, QualType toTy);
+
+ nonloc::ConcreteInt makeTruthVal(bool b, QualType type) {
+ return nonloc::ConcreteInt(BasicVals.getTruthValue(b, type));
+ }
+
+ nonloc::ConcreteInt makeTruthVal(bool b) {
+ return nonloc::ConcreteInt(BasicVals.getTruthValue(b));
+ }
+
+ Loc makeNull() {
+ return loc::ConcreteInt(BasicVals.getZeroWithPtrWidth());
+ }
+
+ Loc makeLoc(SymbolRef sym) {
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+ }
+
+ Loc makeLoc(const MemRegion* region) {
+ return loc::MemRegionVal(region);
+ }
+
+ Loc makeLoc(const AddrLabelExpr *expr) {
+ return loc::GotoLabel(expr->getLabel());
+ }
+
+ Loc makeLoc(const llvm::APSInt& integer) {
+ return loc::ConcreteInt(BasicVals.getValue(integer));
+ }
+
+};
+
+SValBuilder* createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
+ ASTContext &context,
+ ProgramStateManager &stateMgr);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
new file mode 100644
index 0000000..ed01db2
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SVals.h
@@ -0,0 +1,517 @@
+//== SVals.h - Abstract Values for Static Analysis ---------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SVal, Loc, and NonLoc, classes that represent
+// abstract r-values for use with path-sensitive value tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_RVALUE_H
+#define LLVM_CLANG_GR_RVALUE_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
+#include "llvm/ADT/ImmutableList.h"
+
+//==------------------------------------------------------------------------==//
+// Base SVal types.
+//==------------------------------------------------------------------------==//
+
+namespace clang {
+
+namespace ento {
+
+class CompoundValData;
+class LazyCompoundValData;
+class ProgramState;
+class BasicValueFactory;
+class MemRegion;
+class TypedRegion;
+class MemRegionManager;
+class ProgramStateManager;
+class SValBuilder;
+
+/// SVal - This represents a symbolic expression, which can be either
+/// an L-value or an R-value.
+///
+class SVal {
+public:
+ enum BaseKind {
+ // The enumerators must be representable using 2 bits.
+ UndefinedKind = 0, // for subclass UndefinedVal (an uninitialized value)
+ UnknownKind = 1, // for subclass UnknownVal (a void value)
+ LocKind = 2, // for subclass Loc (an L-value)
+ NonLocKind = 3 // for subclass NonLoc (an R-value that's not
+ // an L-value)
+ };
+ enum { BaseBits = 2, BaseMask = 0x3 };
+
+protected:
+ const void *Data;
+
+ /// The lowest 2 bits are a BaseKind (0 -- 3).
+ /// The higher bits are an unsigned "kind" value.
+ unsigned Kind;
+
+ explicit SVal(const void *d, bool isLoc, unsigned ValKind)
+ : Data(d), Kind((isLoc ? LocKind : NonLocKind) | (ValKind << BaseBits)) {}
+
+ explicit SVal(BaseKind k, const void *D = NULL)
+ : Data(D), Kind(k) {}
+
+public:
+ explicit SVal() : Data(0), Kind(0) {}
+ ~SVal() {}
+
+ /// BufferTy - A temporary buffer to hold a set of SVals.
+ typedef SmallVector<SVal,5> BufferTy;
+
+ inline unsigned getRawKind() const { return Kind; }
+ inline BaseKind getBaseKind() const { return (BaseKind) (Kind & BaseMask); }
+ inline unsigned getSubKind() const { return (Kind & ~BaseMask) >> BaseBits; }
+
+ // This method is required for using SVal in a FoldingSetNode. It
+ // extracts a unique signature for this SVal object.
+ inline void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned) getRawKind());
+ ID.AddPointer(Data);
+ }
+
+ inline bool operator==(const SVal& R) const {
+ return getRawKind() == R.getRawKind() && Data == R.Data;
+ }
+
+ inline bool operator!=(const SVal& R) const {
+ return !(*this == R);
+ }
+
+ inline bool isUnknown() const {
+ return getRawKind() == UnknownKind;
+ }
+
+ inline bool isUndef() const {
+ return getRawKind() == UndefinedKind;
+ }
+
+ inline bool isUnknownOrUndef() const {
+ return getRawKind() <= UnknownKind;
+ }
+
+ inline bool isValid() const {
+ return getRawKind() > UnknownKind;
+ }
+
+ bool isConstant() const;
+
+ bool isConstant(int I) const;
+
+ bool isZeroConstant() const;
+
+ /// hasConjuredSymbol - If this SVal wraps a conjured symbol, return true;
+ bool hasConjuredSymbol() const;
+
+ /// getAsFunctionDecl - If this SVal is a MemRegionVal and wraps a
+ /// CodeTextRegion wrapping a FunctionDecl, return that FunctionDecl.
+ /// Otherwise return 0.
+ const FunctionDecl *getAsFunctionDecl() const;
+
+ /// If this SVal is a location (subclasses Loc) and
+ /// wraps a symbol, return that SymbolRef. Otherwise return 0.
+ SymbolRef getAsLocSymbol() const;
+
+ /// Get the symbol in the SVal or its base region.
+ SymbolRef getLocSymbolInBase() const;
+
+ /// If this SVal wraps a symbol return that SymbolRef.
+ /// Otherwise, return 0.
+ SymbolRef getAsSymbol() const;
+
+ /// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
+ /// return that expression. Otherwise return NULL.
+ const SymExpr *getAsSymbolicExpression() const;
+
+ const SymExpr* getAsSymExpr() const;
+
+ const MemRegion *getAsRegion() const;
+
+ void dumpToStream(raw_ostream &OS) const;
+ void dump() const;
+
+ SymExpr::symbol_iterator symbol_begin() const {
+ const SymExpr *SE = getAsSymbolicExpression();
+ if (SE)
+ return SE->symbol_begin();
+ else
+ return SymExpr::symbol_iterator();
+ }
+
+ SymExpr::symbol_iterator symbol_end() const {
+ return SymExpr::symbol_end();
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal*) { return true; }
+};
+
+
+class UndefinedVal : public SVal {
+public:
+ UndefinedVal() : SVal(UndefinedKind) {}
+ UndefinedVal(const void *D) : SVal(UndefinedKind, D) {}
+
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == UndefinedKind;
+ }
+
+ const void *getData() const { return Data; }
+};
+
+class DefinedOrUnknownSVal : public SVal {
+private:
+ // Do not implement. We want calling these methods to be a compiler
+ // error since they are tautologically false.
+ bool isUndef() const;
+ bool isValid() const;
+
+protected:
+ explicit DefinedOrUnknownSVal(const void *d, bool isLoc, unsigned ValKind)
+ : SVal(d, isLoc, ValKind) {}
+
+ explicit DefinedOrUnknownSVal(BaseKind k, void *D = NULL)
+ : SVal(k, D) {}
+
+public:
+ // Implement isa<T> support.
+ static inline bool classof(const SVal *V) {
+ return !V->isUndef();
+ }
+};
+
+class UnknownVal : public DefinedOrUnknownSVal {
+public:
+ explicit UnknownVal() : DefinedOrUnknownSVal(UnknownKind) {}
+
+ static inline bool classof(const SVal *V) {
+ return V->getBaseKind() == UnknownKind;
+ }
+};
+
+class DefinedSVal : public DefinedOrUnknownSVal {
+private:
+ // Do not implement. We want calling these methods to be a compiler
+ // error since they are tautologically true/false.
+ bool isUnknown() const;
+ bool isUnknownOrUndef() const;
+ bool isValid() const;
+protected:
+ explicit DefinedSVal(const void *d, bool isLoc, unsigned ValKind)
+ : DefinedOrUnknownSVal(d, isLoc, ValKind) {}
+public:
+ // Implement isa<T> support.
+ static inline bool classof(const SVal *V) {
+ return !V->isUnknownOrUndef();
+ }
+};
+
+class NonLoc : public DefinedSVal {
+protected:
+ explicit NonLoc(unsigned SubKind, const void *d)
+ : DefinedSVal(d, false, SubKind) {}
+
+public:
+ void dumpToStream(raw_ostream &Out) const;
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == NonLocKind;
+ }
+};
+
+class Loc : public DefinedSVal {
+protected:
+ explicit Loc(unsigned SubKind, const void *D)
+ : DefinedSVal(const_cast<void*>(D), true, SubKind) {}
+
+public:
+ void dumpToStream(raw_ostream &Out) const;
+
+ Loc(const Loc& X) : DefinedSVal(X.Data, true, X.getSubKind()) {}
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == LocKind;
+ }
+
+ static inline bool isLocType(QualType T) {
+ return T->isAnyPointerType() || T->isBlockPointerType() ||
+ T->isReferenceType();
+ }
+};
+
+//==------------------------------------------------------------------------==//
+// Subclasses of NonLoc.
+//==------------------------------------------------------------------------==//
+
+namespace nonloc {
+
+enum Kind { ConcreteIntKind, SymbolValKind, SymExprValKind,
+ LocAsIntegerKind, CompoundValKind, LazyCompoundValKind };
+
+/// \brief Represents symbolic expression.
+class SymbolVal : public NonLoc {
+public:
+ SymbolVal(SymbolRef sym) : NonLoc(SymbolValKind, sym) {}
+
+ SymbolRef getSymbol() const {
+ return (const SymExpr*) Data;
+ }
+
+ bool isExpression() {
+ return !isa<SymbolData>(getSymbol());
+ }
+
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == NonLocKind &&
+ V->getSubKind() == SymbolValKind;
+ }
+
+ static inline bool classof(const NonLoc* V) {
+ return V->getSubKind() == SymbolValKind;
+ }
+};
+
+/// \brief Value representing integer constant.
+class ConcreteInt : public NonLoc {
+public:
+ explicit ConcreteInt(const llvm::APSInt& V) : NonLoc(ConcreteIntKind, &V) {}
+
+ const llvm::APSInt& getValue() const {
+ return *static_cast<const llvm::APSInt*>(Data);
+ }
+
+ // Transfer functions for binary/unary operations on ConcreteInts.
+ SVal evalBinOp(SValBuilder &svalBuilder, BinaryOperator::Opcode Op,
+ const ConcreteInt& R) const;
+
+ ConcreteInt evalComplement(SValBuilder &svalBuilder) const;
+
+ ConcreteInt evalMinus(SValBuilder &svalBuilder) const;
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == NonLocKind &&
+ V->getSubKind() == ConcreteIntKind;
+ }
+
+ static inline bool classof(const NonLoc* V) {
+ return V->getSubKind() == ConcreteIntKind;
+ }
+};
+
+class LocAsInteger : public NonLoc {
+ friend class ento::SValBuilder;
+
+ explicit LocAsInteger(const std::pair<SVal, uintptr_t>& data) :
+ NonLoc(LocAsIntegerKind, &data) {
+ assert (isa<Loc>(data.first));
+ }
+
+public:
+
+ Loc getLoc() const {
+ return cast<Loc>(((std::pair<SVal, uintptr_t>*) Data)->first);
+ }
+
+ const Loc& getPersistentLoc() const {
+ const SVal& V = ((std::pair<SVal, uintptr_t>*) Data)->first;
+ return cast<Loc>(V);
+ }
+
+ unsigned getNumBits() const {
+ return ((std::pair<SVal, unsigned>*) Data)->second;
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == NonLocKind &&
+ V->getSubKind() == LocAsIntegerKind;
+ }
+
+ static inline bool classof(const NonLoc* V) {
+ return V->getSubKind() == LocAsIntegerKind;
+ }
+};
+
+class CompoundVal : public NonLoc {
+ friend class ento::SValBuilder;
+
+ explicit CompoundVal(const CompoundValData* D) : NonLoc(CompoundValKind, D) {}
+
+public:
+ const CompoundValData* getValue() const {
+ return static_cast<const CompoundValData*>(Data);
+ }
+
+ typedef llvm::ImmutableList<SVal>::iterator iterator;
+ iterator begin() const;
+ iterator end() const;
+
+ static bool classof(const SVal* V) {
+ return V->getBaseKind() == NonLocKind && V->getSubKind() == CompoundValKind;
+ }
+
+ static bool classof(const NonLoc* V) {
+ return V->getSubKind() == CompoundValKind;
+ }
+};
+
+class LazyCompoundVal : public NonLoc {
+ friend class ento::SValBuilder;
+
+ explicit LazyCompoundVal(const LazyCompoundValData *D)
+ : NonLoc(LazyCompoundValKind, D) {}
+public:
+ const LazyCompoundValData *getCVData() const {
+ return static_cast<const LazyCompoundValData*>(Data);
+ }
+ const void *getStore() const;
+ const TypedRegion *getRegion() const;
+
+ static bool classof(const SVal *V) {
+ return V->getBaseKind() == NonLocKind &&
+ V->getSubKind() == LazyCompoundValKind;
+ }
+ static bool classof(const NonLoc *V) {
+ return V->getSubKind() == LazyCompoundValKind;
+ }
+};
+
+} // end namespace ento::nonloc
+
+//==------------------------------------------------------------------------==//
+// Subclasses of Loc.
+//==------------------------------------------------------------------------==//
+
+namespace loc {
+
+enum Kind { GotoLabelKind, MemRegionKind, ConcreteIntKind, ObjCPropRefKind };
+
+class GotoLabel : public Loc {
+public:
+ explicit GotoLabel(LabelDecl *Label) : Loc(GotoLabelKind, Label) {}
+
+ const LabelDecl *getLabel() const {
+ return static_cast<const LabelDecl*>(Data);
+ }
+
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == LocKind && V->getSubKind() == GotoLabelKind;
+ }
+
+ static inline bool classof(const Loc* V) {
+ return V->getSubKind() == GotoLabelKind;
+ }
+};
+
+
+class MemRegionVal : public Loc {
+public:
+ explicit MemRegionVal(const MemRegion* r) : Loc(MemRegionKind, r) {}
+
+ const MemRegion* getRegion() const {
+ return static_cast<const MemRegion*>(Data);
+ }
+
+ const MemRegion* stripCasts() const;
+
+ template <typename REGION>
+ const REGION* getRegionAs() const {
+ return llvm::dyn_cast<REGION>(getRegion());
+ }
+
+ inline bool operator==(const MemRegionVal& R) const {
+ return getRegion() == R.getRegion();
+ }
+
+ inline bool operator!=(const MemRegionVal& R) const {
+ return getRegion() != R.getRegion();
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == LocKind &&
+ V->getSubKind() == MemRegionKind;
+ }
+
+ static inline bool classof(const Loc* V) {
+ return V->getSubKind() == MemRegionKind;
+ }
+};
+
+class ConcreteInt : public Loc {
+public:
+ explicit ConcreteInt(const llvm::APSInt& V) : Loc(ConcreteIntKind, &V) {}
+
+ const llvm::APSInt& getValue() const {
+ return *static_cast<const llvm::APSInt*>(Data);
+ }
+
+ // Transfer functions for binary/unary operations on ConcreteInts.
+ SVal evalBinOp(BasicValueFactory& BasicVals, BinaryOperator::Opcode Op,
+ const ConcreteInt& R) const;
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == LocKind &&
+ V->getSubKind() == ConcreteIntKind;
+ }
+
+ static inline bool classof(const Loc* V) {
+ return V->getSubKind() == ConcreteIntKind;
+ }
+};
+
+/// \brief Pseudo-location SVal used by the ExprEngine to simulate a "load" or
+/// "store" of an ObjC property for the dot syntax.
+class ObjCPropRef : public Loc {
+public:
+ explicit ObjCPropRef(const ObjCPropertyRefExpr *E)
+ : Loc(ObjCPropRefKind, E) {}
+
+ const ObjCPropertyRefExpr *getPropRefExpr() const {
+ return static_cast<const ObjCPropertyRefExpr *>(Data);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SVal* V) {
+ return V->getBaseKind() == LocKind &&
+ V->getSubKind() == ObjCPropRefKind;
+ }
+
+ static inline bool classof(const Loc* V) {
+ return V->getSubKind() == ObjCPropRefKind;
+ }
+};
+
+} // end ento::loc namespace
+} // end GR namespace
+
+} // end clang namespace
+
+namespace llvm {
+static inline raw_ostream &operator<<(raw_ostream &os,
+ clang::ento::SVal V) {
+ V.dumpToStream(os);
+ return os;
+}
+
+} // end llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
new file mode 100644
index 0000000..5315f4b
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/Store.h
@@ -0,0 +1,304 @@
+//== Store.h - Interface for maps from Locations to Values ------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the types Store and StoreManager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_STORE_H
+#define LLVM_CLANG_GR_STORE_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/Optional.h"
+
+namespace clang {
+
+class Stmt;
+class Expr;
+class ObjCIvarDecl;
+class StackFrameContext;
+
+namespace ento {
+
+class CallOrObjCMessage;
+class ProgramState;
+class ProgramStateManager;
+class SubRegionMap;
+
+class StoreManager {
+protected:
+ SValBuilder &svalBuilder;
+ ProgramStateManager &StateMgr;
+
+ /// MRMgr - Manages region objects associated with this StoreManager.
+ MemRegionManager &MRMgr;
+ ASTContext &Ctx;
+
+ StoreManager(ProgramStateManager &stateMgr);
+
+public:
+ virtual ~StoreManager() {}
+
+ /// Return the value bound to specified location in a given state.
+ /// \param[in] state The analysis state.
+ /// \param[in] loc The symbolic memory location.
+ /// \param[in] T An optional type that provides a hint indicating the
+ /// expected type of the returned value. This is used if the value is
+ /// lazily computed.
+ /// \return The value bound to the location \c loc.
+ virtual SVal getBinding(Store store, Loc loc, QualType T = QualType()) = 0;
+
+ /// Return a state with the specified value bound to the given location.
+ /// \param[in] state The analysis state.
+ /// \param[in] loc The symbolic memory location.
+ /// \param[in] val The value to bind to location \c loc.
+ /// \return A pointer to a ProgramState object that contains the same bindings as
+ /// \c state with the addition of having the value specified by \c val bound
+ /// to the location given for \c loc.
+ virtual StoreRef Bind(Store store, Loc loc, SVal val) = 0;
+
+ virtual StoreRef BindDefault(Store store, const MemRegion *R, SVal V);
+ virtual StoreRef Remove(Store St, Loc L) = 0;
+
+ /// BindCompoundLiteral - Return the store that has the bindings currently
+ /// in 'store' plus the bindings for the CompoundLiteral. 'R' is the region
+ /// for the compound literal and 'BegInit' and 'EndInit' represent an
+ /// array of initializer values.
+ virtual StoreRef BindCompoundLiteral(Store store,
+ const CompoundLiteralExpr *cl,
+ const LocationContext *LC, SVal v) = 0;
+
+ /// getInitialStore - Returns the initial "empty" store representing the
+ /// value bindings upon entry to an analyzed function.
+ virtual StoreRef getInitialStore(const LocationContext *InitLoc) = 0;
+
+ /// getRegionManager - Returns the internal RegionManager object that is
+ /// used to query and manipulate MemRegion objects.
+ MemRegionManager& getRegionManager() { return MRMgr; }
+
+ /// getSubRegionMap - Returns an opaque map object that clients can query
+ /// to get the subregions of a given MemRegion object. It is the
+ // caller's responsibility to 'delete' the returned map.
+ virtual SubRegionMap *getSubRegionMap(Store store) = 0;
+
+ virtual Loc getLValueVar(const VarDecl *VD, const LocationContext *LC) {
+ return svalBuilder.makeLoc(MRMgr.getVarRegion(VD, LC));
+ }
+
+ Loc getLValueCompoundLiteral(const CompoundLiteralExpr *CL,
+ const LocationContext *LC) {
+ return loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC));
+ }
+
+ virtual SVal getLValueIvar(const ObjCIvarDecl *decl, SVal base);
+
+ virtual SVal getLValueField(const FieldDecl *D, SVal Base) {
+ return getLValueFieldOrIvar(D, Base);
+ }
+
+ virtual SVal getLValueElement(QualType elementType, NonLoc offset, SVal Base);
+
+ // FIXME: This should soon be eliminated altogether; clients should deal with
+ // region extents directly.
+ virtual DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
+ const MemRegion *region,
+ QualType EleTy) {
+ return UnknownVal();
+ }
+
+ /// ArrayToPointer - Used by ExprEngine::VistCast to handle implicit
+ /// conversions between arrays and pointers.
+ virtual SVal ArrayToPointer(Loc Array) = 0;
+
+ /// Evaluates DerivedToBase casts.
+ virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType) = 0;
+
+ /// \brief Evaluates C++ dynamic_cast cast.
+ /// The callback may result in the following 3 scenarios:
+ /// - Successful cast (ex: derived is subclass of base).
+ /// - Failed cast (ex: derived is definitely not a subclass of base).
+ /// - We don't know (base is a symbolic region and we don't have
+ /// enough info to determine if the cast will succeed at run time).
+ /// The function returns an SVal representing the derived class; it's
+ /// valid only if Failed flag is set to false.
+ virtual SVal evalDynamicCast(SVal base, QualType derivedPtrType,
+ bool &Failed) = 0;
+
+ class CastResult {
+ ProgramStateRef state;
+ const MemRegion *region;
+ public:
+ ProgramStateRef getState() const { return state; }
+ const MemRegion* getRegion() const { return region; }
+ CastResult(ProgramStateRef s, const MemRegion* r = 0) : state(s), region(r){}
+ };
+
+ const ElementRegion *GetElementZeroRegion(const MemRegion *R, QualType T);
+
+ /// castRegion - Used by ExprEngine::VisitCast to handle casts from
+ /// a MemRegion* to a specific location type. 'R' is the region being
+ /// casted and 'CastToTy' the result type of the cast.
+ const MemRegion *castRegion(const MemRegion *region, QualType CastToTy);
+
+ virtual StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper) = 0;
+
+ virtual StoreRef BindDecl(Store store, const VarRegion *VR, SVal initVal) = 0;
+
+ virtual StoreRef BindDeclWithNoInit(Store store, const VarRegion *VR) = 0;
+
+ virtual bool includedInBindings(Store store,
+ const MemRegion *region) const = 0;
+
+ /// If the StoreManager supports it, increment the reference count of
+ /// the specified Store object.
+ virtual void incrementReferenceCount(Store store) {}
+
+ /// If the StoreManager supports it, decrement the reference count of
+ /// the specified Store object. If the reference count hits 0, the memory
+ /// associated with the object is recycled.
+ virtual void decrementReferenceCount(Store store) {}
+
+ typedef llvm::DenseSet<SymbolRef> InvalidatedSymbols;
+ typedef SmallVector<const MemRegion *, 8> InvalidatedRegions;
+
+ /// invalidateRegions - Clears out the specified regions from the store,
+ /// marking their values as unknown. Depending on the store, this may also
+ /// invalidate additional regions that may have changed based on accessing
+ /// the given regions. Optionally, invalidates non-static globals as well.
+ /// \param[in] store The initial store
+ /// \param[in] Begin A pointer to the first region to invalidate.
+ /// \param[in] End A pointer just past the last region to invalidate.
+ /// \param[in] E The current statement being evaluated. Used to conjure
+ /// symbols to mark the values of invalidated regions.
+ /// \param[in] Count The current block count. Used to conjure
+ /// symbols to mark the values of invalidated regions.
+ /// \param[in,out] IS A set to fill with any symbols that are no longer
+ /// accessible. Pass \c NULL if this information will not be used.
+ /// \param[in] Call The call expression which will be used to determine which
+ /// globals should get invalidated.
+ /// \param[in,out] Regions A vector to fill with any regions being
+ /// invalidated. This should include any regions explicitly invalidated
+ /// even if they do not currently have bindings. Pass \c NULL if this
+ /// information will not be used.
+ virtual StoreRef invalidateRegions(Store store,
+ ArrayRef<const MemRegion *> Regions,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ InvalidatedSymbols &IS,
+ const CallOrObjCMessage *Call,
+ InvalidatedRegions *Invalidated) = 0;
+
+ /// enterStackFrame - Let the StoreManager to do something when execution
+ /// engine is about to execute into a callee.
+ virtual StoreRef enterStackFrame(ProgramStateRef state,
+ const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx);
+
+ virtual void print(Store store, raw_ostream &Out,
+ const char* nl, const char *sep) = 0;
+
+ class BindingsHandler {
+ public:
+ virtual ~BindingsHandler();
+ virtual bool HandleBinding(StoreManager& SMgr, Store store,
+ const MemRegion *region, SVal val) = 0;
+ };
+
+ class FindUniqueBinding :
+ public BindingsHandler {
+ SymbolRef Sym;
+ const MemRegion* Binding;
+ bool First;
+
+ public:
+ FindUniqueBinding(SymbolRef sym) : Sym(sym), Binding(0), First(true) {}
+
+ bool HandleBinding(StoreManager& SMgr, Store store, const MemRegion* R,
+ SVal val);
+ operator bool() { return First && Binding; }
+ const MemRegion *getRegion() { return Binding; }
+ };
+
+ /// iterBindings - Iterate over the bindings in the Store.
+ virtual void iterBindings(Store store, BindingsHandler& f) = 0;
+
+protected:
+ const MemRegion *MakeElementRegion(const MemRegion *baseRegion,
+ QualType pointeeTy, uint64_t index = 0);
+
+ /// CastRetrievedVal - Used by subclasses of StoreManager to implement
+ /// implicit casts that arise from loads from regions that are reinterpreted
+ /// as another region.
+ SVal CastRetrievedVal(SVal val, const TypedValueRegion *region,
+ QualType castTy, bool performTestOnly = true);
+
+private:
+ SVal getLValueFieldOrIvar(const Decl *decl, SVal base);
+};
+
+
+inline StoreRef::StoreRef(Store store, StoreManager & smgr)
+ : store(store), mgr(smgr) {
+ if (store)
+ mgr.incrementReferenceCount(store);
+}
+
+inline StoreRef::StoreRef(const StoreRef &sr)
+ : store(sr.store), mgr(sr.mgr)
+{
+ if (store)
+ mgr.incrementReferenceCount(store);
+}
+
+inline StoreRef::~StoreRef() {
+ if (store)
+ mgr.decrementReferenceCount(store);
+}
+
+inline StoreRef &StoreRef::operator=(StoreRef const &newStore) {
+ assert(&newStore.mgr == &mgr);
+ if (store != newStore.store) {
+ mgr.incrementReferenceCount(newStore.store);
+ mgr.decrementReferenceCount(store);
+ store = newStore.getStore();
+ }
+ return *this;
+}
+
+// FIXME: Do we still need this?
+/// SubRegionMap - An abstract interface that represents a queryable map
+/// between MemRegion objects and their subregions.
+class SubRegionMap {
+ virtual void anchor();
+public:
+ virtual ~SubRegionMap() {}
+
+ class Visitor {
+ virtual void anchor();
+ public:
+ virtual ~Visitor() {}
+ virtual bool Visit(const MemRegion* Parent, const MemRegion* SubRegion) = 0;
+ };
+
+ virtual bool iterSubRegions(const MemRegion *region, Visitor& V) const = 0;
+};
+
+// FIXME: Do we need to pass ProgramStateManager anymore?
+StoreManager *CreateRegionStoreManager(ProgramStateManager& StMgr);
+StoreManager *CreateFieldsOnlyRegionStoreManager(ProgramStateManager& StMgr);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h
new file mode 100644
index 0000000..d5ba003
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h
@@ -0,0 +1,51 @@
+//== StoreRef.h - Smart pointer for store objects ---------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the type StoreRef.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_STOREREF_H
+#define LLVM_CLANG_GR_STOREREF_H
+
+#include <cassert>
+
+namespace clang {
+namespace ento {
+
+/// Store - This opaque type encapsulates an immutable mapping from
+/// locations to values. At a high-level, it represents the symbolic
+/// memory model. Different subclasses of StoreManager may choose
+/// different types to represent the locations and values.
+typedef const void *Store;
+
+class StoreManager;
+
+class StoreRef {
+ Store store;
+ StoreManager &mgr;
+public:
+ StoreRef(Store, StoreManager &);
+ StoreRef(const StoreRef &);
+ StoreRef &operator=(StoreRef const &);
+
+ bool operator==(const StoreRef &x) const {
+ assert(&mgr == &x.mgr);
+ return x.store == store;
+ }
+ bool operator!=(const StoreRef &x) const { return !operator==(x); }
+
+ ~StoreRef();
+
+ Store getStore() const { return store; }
+ const StoreManager &getStoreManager() const { return mgr; }
+};
+
+}}
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
new file mode 100644
index 0000000..baf57d4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h
@@ -0,0 +1,130 @@
+//== SubEngine.h - Interface of the subengine of CoreEngine --------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface of a subengine of the CoreEngine.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_GR_SUBENGINE_H
+#define LLVM_CLANG_GR_SUBENGINE_H
+
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+
+namespace clang {
+
+class CFGBlock;
+class CFGElement;
+class LocationContext;
+class Stmt;
+
+namespace ento {
+
+struct NodeBuilderContext;
+class AnalysisManager;
+class ExplodedNodeSet;
+class ExplodedNode;
+class ProgramState;
+class ProgramStateManager;
+class BlockCounter;
+class BranchNodeBuilder;
+class IndirectGotoNodeBuilder;
+class SwitchNodeBuilder;
+class EndOfFunctionNodeBuilder;
+class NodeBuilderWithSinks;
+class MemRegion;
+
+class SubEngine {
+ virtual void anchor();
+public:
+ virtual ~SubEngine() {}
+
+ virtual ProgramStateRef getInitialState(const LocationContext *InitLoc) = 0;
+
+ virtual AnalysisManager &getAnalysisManager() = 0;
+
+ virtual ProgramStateManager &getStateManager() = 0;
+
+ /// Called by CoreEngine. Used to generate new successor
+ /// nodes by processing the 'effects' of a block-level statement.
+ virtual void processCFGElement(const CFGElement E, ExplodedNode* Pred,
+ unsigned StmtIdx, NodeBuilderContext *Ctx)=0;
+
+ /// Called by CoreEngine when it starts processing a CFGBlock. The
+ /// SubEngine is expected to populate dstNodes with new nodes representing
+ /// updated analysis state, or generate no nodes at all if it doesn't.
+ virtual void processCFGBlockEntrance(const BlockEdge &L,
+ NodeBuilderWithSinks &nodeBuilder) = 0;
+
+ /// Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a branch condition.
+ virtual void processBranch(const Stmt *Condition, const Stmt *Term,
+ NodeBuilderContext& BuilderCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) = 0;
+
+ /// Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a computed goto jump.
+ virtual void processIndirectGoto(IndirectGotoNodeBuilder& builder) = 0;
+
+ /// Called by CoreEngine. Used to generate successor
+ /// nodes by processing the 'effects' of a switch statement.
+ virtual void processSwitch(SwitchNodeBuilder& builder) = 0;
+
+ /// Called by CoreEngine. Used to generate end-of-path
+ /// nodes when the control reaches the end of a function.
+ virtual void processEndOfFunction(NodeBuilderContext& BC) = 0;
+
+ // Generate the entry node of the callee.
+ virtual void processCallEnter(CallEnter CE, ExplodedNode *Pred) = 0;
+
+ // Generate the first post callsite node.
+ virtual void processCallExit(ExplodedNode *Pred) = 0;
+
+ /// Called by ConstraintManager. Used to call checker-specific
+ /// logic for handling assumptions on symbolic values.
+ virtual ProgramStateRef processAssume(ProgramStateRef state,
+ SVal cond, bool assumption) = 0;
+
+ /// wantsRegionChangeUpdate - Called by ProgramStateManager to determine if a
+ /// region change should trigger a processRegionChanges update.
+ virtual bool wantsRegionChangeUpdate(ProgramStateRef state) = 0;
+
+ /// processRegionChanges - Called by ProgramStateManager whenever a change is
+ /// made to the store. Used to update checkers that track region values.
+ virtual ProgramStateRef
+ processRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) = 0;
+
+
+ inline ProgramStateRef
+ processRegionChange(ProgramStateRef state,
+ const MemRegion* MR) {
+ return processRegionChanges(state, 0, MR, MR, 0);
+ }
+
+ /// printState - Called by ProgramStateManager to print checker-specific data.
+ virtual void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) = 0;
+
+ /// Called by CoreEngine when the analysis worklist is either empty or the
+ // maximum number of analysis steps have been reached.
+ virtual void processEndWorklist(bool hasWorkRemaining) = 0;
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SummaryManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SummaryManager.h
new file mode 100644
index 0000000..ed87851
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SummaryManager.h
@@ -0,0 +1,61 @@
+//== SummaryManager.h - Generic handling of function summaries --*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SummaryManager and related classes, which provides
+// a generic mechanism for managing function summaries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_SUMMARY
+#define LLVM_CLANG_GR_SUMMARY
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/Allocator.h"
+
+namespace clang {
+
+namespace ento {
+
+namespace summMgr {
+
+
+/* Key kinds:
+
+ - C functions
+ - C++ functions (name + parameter types)
+ - ObjC methods:
+ - Class, selector (class method)
+ - Class, selector (instance method)
+ - Category, selector (instance method)
+ - Protocol, selector (instance method)
+ - C++ methods
+ - Class, function name + parameter types + const
+ */
+
+class SummaryKey {
+
+};
+
+} // end namespace clang::summMgr
+
+class SummaryManagerImpl {
+
+};
+
+
+template <typename T>
+class SummaryManager : SummaryManagerImpl {
+
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
new file mode 100644
index 0000000..c7de7ef
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h
@@ -0,0 +1,668 @@
+//== SymbolManager.h - Management of Symbolic Values ------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SymbolManager, a class that manages symbolic values
+// created for use by ExprEngine and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_SYMMGR_H
+#define LLVM_CLANG_GR_SYMMGR_H
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/StoreRef.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+class BumpPtrAllocator;
+}
+
+namespace clang {
+ class ASTContext;
+ class StackFrameContext;
+
+namespace ento {
+ class BasicValueFactory;
+ class MemRegion;
+ class SubRegion;
+ class TypedValueRegion;
+ class VarRegion;
+
+/// \brief Symbolic value. These values used to capture symbolic execution of
+/// the program.
+class SymExpr : public llvm::FoldingSetNode {
+ virtual void anchor();
+public:
+ enum Kind { RegionValueKind, ConjuredKind, DerivedKind, ExtentKind,
+ MetadataKind,
+ BEGIN_SYMBOLS = RegionValueKind,
+ END_SYMBOLS = MetadataKind,
+ SymIntKind, IntSymKind, SymSymKind, CastSymbolKind };
+private:
+ Kind K;
+
+protected:
+ SymExpr(Kind k) : K(k) {}
+
+public:
+ virtual ~SymExpr() {}
+
+ Kind getKind() const { return K; }
+
+ virtual void dump() const;
+
+ virtual void dumpToStream(raw_ostream &os) const {}
+
+ virtual QualType getType(ASTContext&) const = 0;
+ virtual void Profile(llvm::FoldingSetNodeID& profile) = 0;
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr*) { return true; }
+
+ /// \brief Iterator over symbols that the current symbol depends on.
+ ///
+ /// For SymbolData, it's the symbol itself; for expressions, it's the
+ /// expression symbol and all the operands in it. Note, SymbolDerived is
+ /// treated as SymbolData - the iterator will NOT visit the parent region.
+ class symbol_iterator {
+ SmallVector<const SymExpr*, 5> itr;
+ void expand();
+ public:
+ symbol_iterator() {}
+ symbol_iterator(const SymExpr *SE);
+
+ symbol_iterator &operator++();
+ const SymExpr* operator*();
+
+ bool operator==(const symbol_iterator &X) const;
+ bool operator!=(const symbol_iterator &X) const;
+ };
+
+ symbol_iterator symbol_begin() const {
+ return symbol_iterator(this);
+ }
+ static symbol_iterator symbol_end() { return symbol_iterator(); }
+};
+
+typedef const SymExpr* SymbolRef;
+typedef llvm::SmallVector<SymbolRef, 2> SymbolRefSmallVectorTy;
+
+typedef unsigned SymbolID;
+/// \brief A symbol representing data which can be stored in a memory location
+/// (region).
+class SymbolData : public SymExpr {
+ virtual void anchor();
+ const SymbolID Sym;
+
+protected:
+ SymbolData(Kind k, SymbolID sym) : SymExpr(k), Sym(sym) {}
+
+public:
+ virtual ~SymbolData() {}
+
+ SymbolID getSymbolID() const { return Sym; }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ Kind k = SE->getKind();
+ return k >= BEGIN_SYMBOLS && k <= END_SYMBOLS;
+ }
+};
+
+///\brief A symbol representing the value stored at a MemRegion.
+class SymbolRegionValue : public SymbolData {
+ const TypedValueRegion *R;
+
+public:
+ SymbolRegionValue(SymbolID sym, const TypedValueRegion *r)
+ : SymbolData(RegionValueKind, sym), R(r) {}
+
+ const TypedValueRegion* getRegion() const { return R; }
+
+ static void Profile(llvm::FoldingSetNodeID& profile, const TypedValueRegion* R) {
+ profile.AddInteger((unsigned) RegionValueKind);
+ profile.AddPointer(R);
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID& profile) {
+ Profile(profile, R);
+ }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ QualType getType(ASTContext&) const;
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == RegionValueKind;
+ }
+};
+
+/// A symbol representing the result of an expression in the case when we do
+/// not know anything about what the expression is.
+class SymbolConjured : public SymbolData {
+ const Stmt *S;
+ QualType T;
+ unsigned Count;
+ const LocationContext *LCtx;
+ const void *SymbolTag;
+
+public:
+ SymbolConjured(SymbolID sym, const Stmt *s, const LocationContext *lctx,
+ QualType t, unsigned count,
+ const void *symbolTag)
+ : SymbolData(ConjuredKind, sym), S(s), T(t), Count(count),
+ LCtx(lctx),
+ SymbolTag(symbolTag) {}
+
+ const Stmt *getStmt() const { return S; }
+ unsigned getCount() const { return Count; }
+ const void *getTag() const { return SymbolTag; }
+
+ QualType getType(ASTContext&) const;
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ static void Profile(llvm::FoldingSetNodeID& profile, const Stmt *S,
+ QualType T, unsigned Count, const LocationContext *LCtx,
+ const void *SymbolTag) {
+ profile.AddInteger((unsigned) ConjuredKind);
+ profile.AddPointer(S);
+ profile.AddPointer(LCtx);
+ profile.Add(T);
+ profile.AddInteger(Count);
+ profile.AddPointer(SymbolTag);
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID& profile) {
+ Profile(profile, S, T, Count, LCtx, SymbolTag);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == ConjuredKind;
+ }
+};
+
+/// A symbol representing the value of a MemRegion whose parent region has
+/// symbolic value.
+class SymbolDerived : public SymbolData {
+ SymbolRef parentSymbol;
+ const TypedValueRegion *R;
+
+public:
+ SymbolDerived(SymbolID sym, SymbolRef parent, const TypedValueRegion *r)
+ : SymbolData(DerivedKind, sym), parentSymbol(parent), R(r) {}
+
+ SymbolRef getParentSymbol() const { return parentSymbol; }
+ const TypedValueRegion *getRegion() const { return R; }
+
+ QualType getType(ASTContext&) const;
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ static void Profile(llvm::FoldingSetNodeID& profile, SymbolRef parent,
+ const TypedValueRegion *r) {
+ profile.AddInteger((unsigned) DerivedKind);
+ profile.AddPointer(r);
+ profile.AddPointer(parent);
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID& profile) {
+ Profile(profile, parentSymbol, R);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == DerivedKind;
+ }
+};
+
+/// SymbolExtent - Represents the extent (size in bytes) of a bounded region.
+/// Clients should not ask the SymbolManager for a region's extent. Always use
+/// SubRegion::getExtent instead -- the value returned may not be a symbol.
+class SymbolExtent : public SymbolData {
+ const SubRegion *R;
+
+public:
+ SymbolExtent(SymbolID sym, const SubRegion *r)
+ : SymbolData(ExtentKind, sym), R(r) {}
+
+ const SubRegion *getRegion() const { return R; }
+
+ QualType getType(ASTContext&) const;
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ static void Profile(llvm::FoldingSetNodeID& profile, const SubRegion *R) {
+ profile.AddInteger((unsigned) ExtentKind);
+ profile.AddPointer(R);
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID& profile) {
+ Profile(profile, R);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == ExtentKind;
+ }
+};
+
+/// SymbolMetadata - Represents path-dependent metadata about a specific region.
+/// Metadata symbols remain live as long as they are marked as in use before
+/// dead-symbol sweeping AND their associated regions are still alive.
+/// Intended for use by checkers.
+class SymbolMetadata : public SymbolData {
+ const MemRegion* R;
+ const Stmt *S;
+ QualType T;
+ unsigned Count;
+ const void *Tag;
+public:
+ SymbolMetadata(SymbolID sym, const MemRegion* r, const Stmt *s, QualType t,
+ unsigned count, const void *tag)
+ : SymbolData(MetadataKind, sym), R(r), S(s), T(t), Count(count), Tag(tag) {}
+
+ const MemRegion *getRegion() const { return R; }
+ const Stmt *getStmt() const { return S; }
+ unsigned getCount() const { return Count; }
+ const void *getTag() const { return Tag; }
+
+ QualType getType(ASTContext&) const;
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ static void Profile(llvm::FoldingSetNodeID& profile, const MemRegion *R,
+ const Stmt *S, QualType T, unsigned Count,
+ const void *Tag) {
+ profile.AddInteger((unsigned) MetadataKind);
+ profile.AddPointer(R);
+ profile.AddPointer(S);
+ profile.Add(T);
+ profile.AddInteger(Count);
+ profile.AddPointer(Tag);
+ }
+
+ virtual void Profile(llvm::FoldingSetNodeID& profile) {
+ Profile(profile, R, S, T, Count, Tag);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == MetadataKind;
+ }
+};
+
+/// \brief Represents a cast expression.
+class SymbolCast : public SymExpr {
+ const SymExpr *Operand;
+ /// Type of the operand.
+ QualType FromTy;
+ /// The type of the result.
+ QualType ToTy;
+
+public:
+ SymbolCast(const SymExpr *In, QualType From, QualType To) :
+ SymExpr(CastSymbolKind), Operand(In), FromTy(From), ToTy(To) { }
+
+ QualType getType(ASTContext &C) const { return ToTy; }
+
+ const SymExpr *getOperand() const { return Operand; }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ static void Profile(llvm::FoldingSetNodeID& ID,
+ const SymExpr *In, QualType From, QualType To) {
+ ID.AddInteger((unsigned) CastSymbolKind);
+ ID.AddPointer(In);
+ ID.Add(From);
+ ID.Add(To);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) {
+ Profile(ID, Operand, FromTy, ToTy);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == CastSymbolKind;
+ }
+};
+
+/// SymIntExpr - Represents symbolic expression like 'x' + 3.
+class SymIntExpr : public SymExpr {
+ const SymExpr *LHS;
+ BinaryOperator::Opcode Op;
+ const llvm::APSInt& RHS;
+ QualType T;
+
+public:
+ SymIntExpr(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const llvm::APSInt& rhs, QualType t)
+ : SymExpr(SymIntKind), LHS(lhs), Op(op), RHS(rhs), T(t) {}
+
+ // FIXME: We probably need to make this out-of-line to avoid redundant
+ // generation of virtual functions.
+ QualType getType(ASTContext &C) const { return T; }
+
+ BinaryOperator::Opcode getOpcode() const { return Op; }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ const SymExpr *getLHS() const { return LHS; }
+ const llvm::APSInt &getRHS() const { return RHS; }
+
+ static void Profile(llvm::FoldingSetNodeID& ID, const SymExpr *lhs,
+ BinaryOperator::Opcode op, const llvm::APSInt& rhs,
+ QualType t) {
+ ID.AddInteger((unsigned) SymIntKind);
+ ID.AddPointer(lhs);
+ ID.AddInteger(op);
+ ID.AddPointer(&rhs);
+ ID.Add(t);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) {
+ Profile(ID, LHS, Op, RHS, T);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == SymIntKind;
+ }
+};
+
+/// IntSymExpr - Represents symbolic expression like 3 - 'x'.
+class IntSymExpr : public SymExpr {
+ const llvm::APSInt& LHS;
+ BinaryOperator::Opcode Op;
+ const SymExpr *RHS;
+ QualType T;
+
+public:
+ IntSymExpr(const llvm::APSInt& lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType t)
+ : SymExpr(IntSymKind), LHS(lhs), Op(op), RHS(rhs), T(t) {}
+
+ QualType getType(ASTContext &C) const { return T; }
+
+ BinaryOperator::Opcode getOpcode() const { return Op; }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ const SymExpr *getRHS() const { return RHS; }
+ const llvm::APSInt &getLHS() const { return LHS; }
+
+ static void Profile(llvm::FoldingSetNodeID& ID, const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op, const SymExpr *rhs,
+ QualType t) {
+ ID.AddInteger((unsigned) IntSymKind);
+ ID.AddPointer(&lhs);
+ ID.AddInteger(op);
+ ID.AddPointer(rhs);
+ ID.Add(t);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) {
+ Profile(ID, LHS, Op, RHS, T);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == IntSymKind;
+ }
+};
+
+/// SymSymExpr - Represents symbolic expression like 'x' + 'y'.
+class SymSymExpr : public SymExpr {
+ const SymExpr *LHS;
+ BinaryOperator::Opcode Op;
+ const SymExpr *RHS;
+ QualType T;
+
+public:
+ SymSymExpr(const SymExpr *lhs, BinaryOperator::Opcode op, const SymExpr *rhs,
+ QualType t)
+ : SymExpr(SymSymKind), LHS(lhs), Op(op), RHS(rhs), T(t) {}
+
+ BinaryOperator::Opcode getOpcode() const { return Op; }
+ const SymExpr *getLHS() const { return LHS; }
+ const SymExpr *getRHS() const { return RHS; }
+
+ // FIXME: We probably need to make this out-of-line to avoid redundant
+ // generation of virtual functions.
+ QualType getType(ASTContext &C) const { return T; }
+
+ virtual void dumpToStream(raw_ostream &os) const;
+
+ static void Profile(llvm::FoldingSetNodeID& ID, const SymExpr *lhs,
+ BinaryOperator::Opcode op, const SymExpr *rhs, QualType t) {
+ ID.AddInteger((unsigned) SymSymKind);
+ ID.AddPointer(lhs);
+ ID.AddInteger(op);
+ ID.AddPointer(rhs);
+ ID.Add(t);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) {
+ Profile(ID, LHS, Op, RHS, T);
+ }
+
+ // Implement isa<T> support.
+ static inline bool classof(const SymExpr *SE) {
+ return SE->getKind() == SymSymKind;
+ }
+};
+
+class SymbolManager {
+ typedef llvm::FoldingSet<SymExpr> DataSetTy;
+ typedef llvm::DenseMap<SymbolRef, SymbolRefSmallVectorTy*> SymbolDependTy;
+
+ DataSetTy DataSet;
+ /// Stores the extra dependencies between symbols: the data should be kept
+ /// alive as long as the key is live.
+ SymbolDependTy SymbolDependencies;
+ unsigned SymbolCounter;
+ llvm::BumpPtrAllocator& BPAlloc;
+ BasicValueFactory &BV;
+ ASTContext &Ctx;
+
+public:
+ SymbolManager(ASTContext &ctx, BasicValueFactory &bv,
+ llvm::BumpPtrAllocator& bpalloc)
+ : SymbolDependencies(16), SymbolCounter(0),
+ BPAlloc(bpalloc), BV(bv), Ctx(ctx) {}
+
+ ~SymbolManager();
+
+ static bool canSymbolicate(QualType T);
+
+ /// \brief Make a unique symbol for MemRegion R according to its kind.
+ const SymbolRegionValue* getRegionValueSymbol(const TypedValueRegion* R);
+
+ const SymbolConjured* getConjuredSymbol(const Stmt *E,
+ const LocationContext *LCtx,
+ QualType T,
+ unsigned VisitCount,
+ const void *SymbolTag = 0);
+
+ const SymbolConjured* getConjuredSymbol(const Expr *E,
+ const LocationContext *LCtx,
+ unsigned VisitCount,
+ const void *SymbolTag = 0) {
+ return getConjuredSymbol(E, LCtx, E->getType(),
+ VisitCount, SymbolTag);
+ }
+
+ const SymbolDerived *getDerivedSymbol(SymbolRef parentSymbol,
+ const TypedValueRegion *R);
+
+ const SymbolExtent *getExtentSymbol(const SubRegion *R);
+
+ /// \brief Creates a metadata symbol associated with a specific region.
+ ///
+ /// VisitCount can be used to differentiate regions corresponding to
+ /// different loop iterations, thus, making the symbol path-dependent.
+ const SymbolMetadata* getMetadataSymbol(const MemRegion* R, const Stmt *S,
+ QualType T, unsigned VisitCount,
+ const void *SymbolTag = 0);
+
+ const SymbolCast* getCastSymbol(const SymExpr *Operand,
+ QualType From, QualType To);
+
+ const SymIntExpr *getSymIntExpr(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const llvm::APSInt& rhs, QualType t);
+
+ const SymIntExpr *getSymIntExpr(const SymExpr &lhs, BinaryOperator::Opcode op,
+ const llvm::APSInt& rhs, QualType t) {
+ return getSymIntExpr(&lhs, op, rhs, t);
+ }
+
+ const IntSymExpr *getIntSymExpr(const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType t);
+
+ const SymSymExpr *getSymSymExpr(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType t);
+
+ QualType getType(const SymExpr *SE) const {
+ return SE->getType(Ctx);
+ }
+
+ /// \brief Add artificial symbol dependency.
+ ///
+ /// The dependent symbol should stay alive as long as the primary is alive.
+ void addSymbolDependency(const SymbolRef Primary, const SymbolRef Dependent);
+
+ const SymbolRefSmallVectorTy *getDependentSymbols(const SymbolRef Primary);
+
+ ASTContext &getContext() { return Ctx; }
+ BasicValueFactory &getBasicVals() { return BV; }
+};
+
+class SymbolReaper {
+ enum SymbolStatus {
+ NotProcessed,
+ HaveMarkedDependents
+ };
+
+ typedef llvm::DenseSet<SymbolRef> SymbolSetTy;
+ typedef llvm::DenseMap<SymbolRef, SymbolStatus> SymbolMapTy;
+ typedef llvm::DenseSet<const MemRegion *> RegionSetTy;
+
+ SymbolMapTy TheLiving;
+ SymbolSetTy MetadataInUse;
+ SymbolSetTy TheDead;
+
+ RegionSetTy RegionRoots;
+
+ const LocationContext *LCtx;
+ const Stmt *Loc;
+ SymbolManager& SymMgr;
+ StoreRef reapedStore;
+ llvm::DenseMap<const MemRegion *, unsigned> includedRegionCache;
+
+public:
+ SymbolReaper(const LocationContext *ctx, const Stmt *s, SymbolManager& symmgr,
+ StoreManager &storeMgr)
+ : LCtx(ctx), Loc(s), SymMgr(symmgr), reapedStore(0, storeMgr) {}
+
+ ~SymbolReaper() {}
+
+ const LocationContext *getLocationContext() const { return LCtx; }
+ const Stmt *getCurrentStatement() const { return Loc; }
+
+ bool isLive(SymbolRef sym);
+ bool isLiveRegion(const MemRegion *region);
+ bool isLive(const Stmt *ExprVal, const LocationContext *LCtx) const;
+ bool isLive(const VarRegion *VR, bool includeStoreBindings = false) const;
+
+ /// \brief Unconditionally marks a symbol as live.
+ ///
+ /// This should never be
+ /// used by checkers, only by the state infrastructure such as the store and
+ /// environment. Checkers should instead use metadata symbols and markInUse.
+ void markLive(SymbolRef sym);
+
+ /// \brief Marks a symbol as important to a checker.
+ ///
+ /// For metadata symbols,
+ /// this will keep the symbol alive as long as its associated region is also
+ /// live. For other symbols, this has no effect; checkers are not permitted
+ /// to influence the life of other symbols. This should be used before any
+ /// symbol marking has occurred, i.e. in the MarkLiveSymbols callback.
+ void markInUse(SymbolRef sym);
+
+ /// \brief If a symbol is known to be live, marks the symbol as live.
+ ///
+ /// Otherwise, if the symbol cannot be proven live, it is marked as dead.
+ /// Returns true if the symbol is dead, false if live.
+ bool maybeDead(SymbolRef sym);
+
+ typedef SymbolSetTy::const_iterator dead_iterator;
+ dead_iterator dead_begin() const { return TheDead.begin(); }
+ dead_iterator dead_end() const { return TheDead.end(); }
+
+ bool hasDeadSymbols() const {
+ return !TheDead.empty();
+ }
+
+ typedef RegionSetTy::const_iterator region_iterator;
+ region_iterator region_begin() const { return RegionRoots.begin(); }
+ region_iterator region_end() const { return RegionRoots.end(); }
+
+ /// \brief Returns whether or not a symbol has been confirmed dead.
+ ///
+ /// This should only be called once all marking of dead symbols has completed.
+ /// (For checkers, this means only in the evalDeadSymbols callback.)
+ bool isDead(SymbolRef sym) const {
+ return TheDead.count(sym);
+ }
+
+ void markLive(const MemRegion *region);
+
+ /// \brief Set to the value of the symbolic store after
+ /// StoreManager::removeDeadBindings has been called.
+ void setReapedStore(StoreRef st) { reapedStore = st; }
+
+private:
+ /// Mark the symbols dependent on the input symbol as live.
+ void markDependentsLive(SymbolRef sym);
+};
+
+class SymbolVisitor {
+public:
+ /// \brief A visitor method invoked by ProgramStateManager::scanReachableSymbols.
+ ///
+ /// The method returns \c true if symbols should continue be scanned and \c
+ /// false otherwise.
+ virtual bool VisitSymbol(SymbolRef sym) = 0;
+ virtual bool VisitMemRegion(const MemRegion *region) { return true; }
+ virtual ~SymbolVisitor();
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+namespace llvm {
+static inline raw_ostream &operator<<(raw_ostream &os,
+ const clang::ento::SymExpr *SE) {
+ SE->dumpToStream(os);
+ return os;
+}
+} // end llvm namespace
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h
new file mode 100644
index 0000000..53205d3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h
@@ -0,0 +1,40 @@
+//== TaintManager.h - Managing taint --------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides APIs for adding, removing, querying symbol taint.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TAINTMANAGER_H
+#define LLVM_CLANG_TAINTMANAGER_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h"
+
+namespace clang {
+namespace ento {
+
+/// The GDM component containing the tainted root symbols. We lazily infer the
+/// taint of the dependent symbols. Currently, this is a map from a symbol to
+/// tag kind. TODO: Should support multiple tag kinds.
+struct TaintMap {};
+typedef llvm::ImmutableMap<SymbolRef, TaintTagType> TaintMapImpl;
+template<> struct ProgramStateTrait<TaintMap>
+ : public ProgramStatePartialTrait<TaintMapImpl> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+};
+
+class TaintManager {
+
+ TaintManager() {}
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h
new file mode 100644
index 0000000..8ddc8b9d
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/TaintTag.h
@@ -0,0 +1,27 @@
+//== TaintTag.h - Path-sensitive "State" for tracking values -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines a set of taint tags. Several tags are used to differentiate kinds
+// of taint.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_TAINTTAG_H
+#define LLVM_CLANG_TAINTTAG_H
+
+namespace clang {
+namespace ento {
+
+/// The type of taint, which helps to differentiate between different types of
+/// taint.
+typedef unsigned TaintTagType;
+static const TaintTagType TaintTagGeneric = 0;
+
+}}
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h
new file mode 100644
index 0000000..51aa753
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Core/PathSensitive/WorkList.h
@@ -0,0 +1,102 @@
+//==- WorkList.h - Worklist class used by CoreEngine ---------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines WorkList, a pure virtual class that represents an opaque
+// worklist used by CoreEngine to explore the reachability state space.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_WORKLIST
+#define LLVM_CLANG_GR_WORKLIST
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
+#include <cstddef>
+
+namespace clang {
+
+class CFGBlock;
+
+namespace ento {
+
+class ExplodedNode;
+class ExplodedNodeImpl;
+
+class WorkListUnit {
+ ExplodedNode *node;
+ BlockCounter counter;
+ const CFGBlock *block;
+ unsigned blockIdx; // This is the index of the next statement.
+
+public:
+ WorkListUnit(ExplodedNode *N, BlockCounter C,
+ const CFGBlock *B, unsigned idx)
+ : node(N),
+ counter(C),
+ block(B),
+ blockIdx(idx) {}
+
+ explicit WorkListUnit(ExplodedNode *N, BlockCounter C)
+ : node(N),
+ counter(C),
+ block(NULL),
+ blockIdx(0) {}
+
+ /// Returns the node associated with the worklist unit.
+ ExplodedNode *getNode() const { return node; }
+
+ /// Returns the block counter map associated with the worklist unit.
+ BlockCounter getBlockCounter() const { return counter; }
+
+ /// Returns the CFGblock associated with the worklist unit.
+ const CFGBlock *getBlock() const { return block; }
+
+ /// Return the index within the CFGBlock for the worklist unit.
+ unsigned getIndex() const { return blockIdx; }
+};
+
+class WorkList {
+ BlockCounter CurrentCounter;
+public:
+ virtual ~WorkList();
+ virtual bool hasWork() const = 0;
+
+ virtual void enqueue(const WorkListUnit& U) = 0;
+
+ void enqueue(ExplodedNode *N, const CFGBlock *B, unsigned idx) {
+ enqueue(WorkListUnit(N, CurrentCounter, B, idx));
+ }
+
+ void enqueue(ExplodedNode *N) {
+ assert(N->getLocation().getKind() != ProgramPoint::PostStmtKind);
+ enqueue(WorkListUnit(N, CurrentCounter));
+ }
+
+ virtual WorkListUnit dequeue() = 0;
+
+ void setBlockCounter(BlockCounter C) { CurrentCounter = C; }
+ BlockCounter getBlockCounter() const { return CurrentCounter; }
+
+ class Visitor {
+ public:
+ Visitor() {}
+ virtual ~Visitor();
+ virtual bool visit(const WorkListUnit &U) = 0;
+ };
+ virtual bool visitItemsInWorkList(Visitor &V) = 0;
+
+ static WorkList *makeDFS();
+ static WorkList *makeBFS();
+ static WorkList *makeBFSBlockDFSContents();
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h
new file mode 100644
index 0000000..492edd4
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/CheckerRegistration.h
@@ -0,0 +1,33 @@
+//===-- CheckerRegistration.h - Checker Registration Function ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SA_FRONTEND_CHECKERREGISTRATION_H
+#define LLVM_CLANG_SA_FRONTEND_CHECKERREGISTRATION_H
+
+#include "clang/Basic/LLVM.h"
+#include <string>
+
+namespace clang {
+ class AnalyzerOptions;
+ class LangOptions;
+ class DiagnosticsEngine;
+
+namespace ento {
+ class CheckerManager;
+
+CheckerManager *createCheckerManager(const AnalyzerOptions &opts,
+ const LangOptions &langOpts,
+ ArrayRef<std::string> plugins,
+ DiagnosticsEngine &diags);
+
+} // end ento namespace
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
new file mode 100644
index 0000000..838ac92
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/StaticAnalyzer/Frontend/FrontendActions.h
@@ -0,0 +1,35 @@
+//===-- FrontendActions.h - Useful Frontend Actions -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_FRONTENDACTIONS_H
+#define LLVM_CLANG_GR_FRONTENDACTIONS_H
+
+#include "clang/Frontend/FrontendAction.h"
+
+namespace clang {
+
+namespace ento {
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+class AnalysisAction : public ASTFrontendAction {
+protected:
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile);
+};
+
+void printCheckerHelp(raw_ostream &OS, ArrayRef<std::string> plugins);
+
+} // end GR namespace
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
new file mode 100644
index 0000000..3430320
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/CompilationDatabase.h
@@ -0,0 +1,164 @@
+//===--- CompilationDatabase.h - --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides an interface and multiple implementations for
+// CompilationDatabases.
+//
+// While C++ refactoring and analysis tools are not compilers, and thus
+// don't run as part of the build system, they need the exact information
+// of a build in order to be able to correctly understand the C++ code of
+// the project. This information is provided via the CompilationDatabase
+// interface.
+//
+// To create a CompilationDatabase from a build directory one can call
+// CompilationDatabase::loadFromDirectory(), which deduces the correct
+// compilation database from the root of the build tree.
+//
+// See the concrete subclasses of CompilationDatabase for currently supported
+// formats.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_COMPILATION_DATABASE_H
+#define LLVM_CLANG_TOOLING_COMPILATION_DATABASE_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <string>
+#include <vector>
+
+namespace llvm {
+class MemoryBuffer;
+} // end namespace llvm
+
+namespace clang {
+namespace tooling {
+
+/// \brief Specifies the working directory and command of a compilation.
+struct CompileCommand {
+ CompileCommand() {}
+ CompileCommand(StringRef Directory, ArrayRef<std::string> CommandLine)
+ : Directory(Directory), CommandLine(CommandLine) {}
+
+ /// \brief The working directory the command was executed from.
+ std::string Directory;
+
+ /// \brief The command line that was executed.
+ std::vector<std::string> CommandLine;
+};
+
+/// \brief Interface for compilation databases.
+///
+/// A compilation database allows the user to retrieve all compile command lines
+/// that a specified file is compiled with in a project.
+/// The retrieved compile command lines can be used to run clang tools over
+/// a subset of the files in a project.
+class CompilationDatabase {
+public:
+ virtual ~CompilationDatabase();
+
+ /// \brief Loads a compilation database from a build directory.
+ ///
+ /// Looks at the specified 'BuildDirectory' and creates a compilation database
+ /// that allows to query compile commands for source files in the
+ /// corresponding source tree.
+ ///
+ /// Returns NULL and sets ErrorMessage if we were not able to build up a
+ /// compilation database for the build directory.
+ ///
+ /// FIXME: Currently only supports JSON compilation databases, which
+ /// are named 'compile_commands.json' in the given directory. Extend this
+ /// for other build types (like ninja build files).
+ static CompilationDatabase *loadFromDirectory(StringRef BuildDirectory,
+ std::string &ErrorMessage);
+
+ /// \brief Returns all compile commands in which the specified file was
+ /// compiled.
+ ///
+ /// This includes compile comamnds that span multiple source files.
+ /// For example, consider a project with the following compilations:
+ /// $ clang++ -o test a.cc b.cc t.cc
+ /// $ clang++ -o production a.cc b.cc -DPRODUCTION
+ /// A compilation database representing the project would return both command
+ /// lines for a.cc and b.cc and only the first command line for t.cc.
+ virtual std::vector<CompileCommand> getCompileCommands(
+ StringRef FilePath) const = 0;
+};
+
+/// \brief A JSON based compilation database.
+///
+/// JSON compilation database files must contain a list of JSON objects which
+/// provide the command lines in the attributes 'directory', 'command' and
+/// 'file':
+/// [
+/// { "directory": "<working directory of the compile>",
+/// "command": "<compile command line>",
+/// "file": "<path to source file>"
+/// },
+/// ...
+/// ]
+/// Each object entry defines one compile action. The specified file is
+/// considered to be the main source file for the translation unit.
+///
+/// JSON compilation databases can for example be generated in CMake projects
+/// by setting the flag -DCMAKE_EXPORT_COMPILE_COMMANDS.
+class JSONCompilationDatabase : public CompilationDatabase {
+public:
+
+ /// \brief Loads a JSON compilation database from the specified file.
+ ///
+ /// Returns NULL and sets ErrorMessage if the database could not be
+ /// loaded from the given file.
+ static JSONCompilationDatabase *loadFromFile(StringRef FilePath,
+ std::string &ErrorMessage);
+
+ /// \brief Loads a JSON compilation database from a data buffer.
+ ///
+ /// Returns NULL and sets ErrorMessage if the database could not be loaded.
+ static JSONCompilationDatabase *loadFromBuffer(StringRef DatabaseString,
+ std::string &ErrorMessage);
+
+ /// \brief Returns all compile comamnds in which the specified file was
+ /// compiled.
+ ///
+ /// FIXME: Currently FilePath must be an absolute path inside the
+ /// source directory which does not have symlinks resolved.
+ virtual std::vector<CompileCommand> getCompileCommands(
+ StringRef FilePath) const;
+
+private:
+ /// \brief Constructs a JSON compilation database on a memory buffer.
+ JSONCompilationDatabase(llvm::MemoryBuffer *Database)
+ : Database(Database) {}
+
+ /// \brief Parses the database file and creates the index.
+ ///
+ /// Returns whether parsing succeeded. Sets ErrorMessage if parsing
+ /// failed.
+ bool parse(std::string &ErrorMessage);
+
+ // Tuple (directory, commandline) where 'commandline' is a JSON escaped bash
+ // escaped command line.
+ typedef std::pair<StringRef, StringRef> CompileCommandRef;
+
+ // Maps file paths to the compile command lines for that file.
+ llvm::StringMap< std::vector<CompileCommandRef> > IndexByFile;
+
+ llvm::OwningPtr<llvm::MemoryBuffer> Database;
+};
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_COMPILATION_DATABASE_H
+
diff --git a/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h b/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
new file mode 100644
index 0000000..868eae3
--- /dev/null
+++ b/contrib/llvm/tools/clang/include/clang/Tooling/Tooling.h
@@ -0,0 +1,213 @@
+//===--- Tooling.h - Framework for standalone Clang tools -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements functions to run clang tools standalone instead
+// of running them as a plugin.
+//
+// A ClangTool is initialized with a CompilationDatabase and a set of files
+// to run over. The tool will then run a user-specified FrontendAction over
+// all TUs in which the given files are compiled.
+//
+// It is also possible to run a FrontendAction over a snippet of code by
+// calling runSyntaxOnlyToolOnCode, which is useful for unit testing.
+//
+// Applications that need more fine grained control over how to run
+// multiple FrontendActions over code can use ToolInvocation.
+//
+// Example tools:
+// - running clang -fsyntax-only over source code from an editor to get
+// fast syntax checks
+// - running match/replace tools over C++ code
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_TOOLING_TOOLING_H
+#define LLVM_CLANG_TOOLING_TOOLING_H
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/Twine.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Driver/Util.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+
+namespace driver {
+class Compilation;
+} // end namespace driver
+
+class CompilerInvocation;
+class SourceManager;
+class FrontendAction;
+
+namespace tooling {
+
+class CompilationDatabase;
+
+/// \brief Interface to generate clang::FrontendActions.
+class FrontendActionFactory {
+public:
+ virtual ~FrontendActionFactory();
+
+ /// \brief Returns a new clang::FrontendAction.
+ ///
+ /// The caller takes ownership of the returned action.
+ virtual clang::FrontendAction *create() = 0;
+};
+
+/// \brief Returns a new FrontendActionFactory for a given type.
+///
+/// T must extend clang::FrontendAction.
+///
+/// Example:
+/// FrontendActionFactory *Factory =
+/// newFrontendActionFactory<clang::SyntaxOnlyAction>();
+template <typename T>
+FrontendActionFactory *newFrontendActionFactory();
+
+/// \brief Returns a new FrontendActionFactory for any type that provides an
+/// implementation of newFrontendAction().
+///
+/// FactoryT must implement: FrontendAction *newFrontendAction().
+///
+/// Example:
+/// struct ProvidesFrontendActions {
+/// FrontendAction *newFrontendAction();
+/// } Factory;
+/// FrontendActionFactory *FactoryAdapter =
+/// newFrontendActionFactory(&Factory);
+template <typename FactoryT>
+FrontendActionFactory *newFrontendActionFactory(FactoryT *ActionFactory);
+
+/// \brief Runs (and deletes) the tool on 'Code' with the -fsyntax-only flag.
+///
+/// \param ToolAction The action to run over the code.
+/// \param Code C++ code.
+/// \param FileName The file name which 'Code' will be mapped as.
+///
+/// \return - True if 'ToolAction' was successfully executed.
+bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
+ const Twine &FileName = "input.cc");
+
+/// \brief Utility to run a FrontendAction in a single clang invocation.
+class ToolInvocation {
+ public:
+ /// \brief Create a tool invocation.
+ ///
+ /// \param CommandLine The command line arguments to clang.
+ /// \param ToolAction The action to be executed. Class takes ownership.
+ /// \param Files The FileManager used for the execution. Class does not take
+ /// ownership.
+ ToolInvocation(ArrayRef<std::string> CommandLine, FrontendAction *ToolAction,
+ FileManager *Files);
+
+ /// \brief Map a virtual file to be used while running the tool.
+ ///
+ /// \param FilePath The path at which the content will be mapped.
+ /// \param Content A null terminated buffer of the file's content.
+ void mapVirtualFile(StringRef FilePath, StringRef Content);
+
+ /// \brief Run the clang invocation.
+ ///
+ /// \returns True if there were no errors during execution.
+ bool run();
+
+ private:
+ void addFileMappingsTo(SourceManager &SourceManager);
+
+ bool runInvocation(const char *BinaryName,
+ clang::driver::Compilation *Compilation,
+ clang::CompilerInvocation *Invocation,
+ const clang::driver::ArgStringList &CC1Args,
+ clang::FrontendAction *ToolAction);
+
+ std::vector<std::string> CommandLine;
+ llvm::OwningPtr<FrontendAction> ToolAction;
+ FileManager *Files;
+ // Maps <file name> -> <file content>.
+ llvm::StringMap<StringRef> MappedFileContents;
+};
+
+/// \brief Utility to run a FrontendAction over a set of files.
+///
+/// This class is written to be usable for command line utilities.
+class ClangTool {
+ public:
+ /// \brief Constructs a clang tool to run over a list of files.
+ ///
+ /// \param Compilations The CompilationDatabase which contains the compile
+ /// command lines for the given source paths.
+ /// \param SourcePaths The source files to run over. If a source files is
+ /// not found in Compilations, it is skipped.
+ ClangTool(const CompilationDatabase &Compilations,
+ ArrayRef<std::string> SourcePaths);
+
+ /// \brief Map a virtual file to be used while running the tool.
+ ///
+ /// \param FilePath The path at which the content will be mapped.
+ /// \param Content A null terminated buffer of the file's content.
+ void mapVirtualFile(StringRef FilePath, StringRef Content);
+
+ /// Runs a frontend action over all files specified in the command line.
+ ///
+ /// \param ActionFactory Factory generating the frontend actions. The function
+ /// takes ownership of this parameter. A new action is generated for every
+ /// processed translation unit.
+ int run(FrontendActionFactory *ActionFactory);
+
+ /// \brief Returns the file manager used in the tool.
+ ///
+ /// The file manager is shared between all translation units.
+ FileManager &getFiles() { return Files; }
+
+ private:
+ // We store command lines as pair (file name, command line).
+ typedef std::pair< std::string, std::vector<std::string> > CommandLine;
+ std::vector<CommandLine> CommandLines;
+
+ FileManager Files;
+ // Contains a list of pairs (<file name>, <file content>).
+ std::vector< std::pair<StringRef, StringRef> > MappedFileContents;
+};
+
+template <typename T>
+FrontendActionFactory *newFrontendActionFactory() {
+ class SimpleFrontendActionFactory : public FrontendActionFactory {
+ public:
+ virtual clang::FrontendAction *create() { return new T; }
+ };
+
+ return new SimpleFrontendActionFactory;
+}
+
+template <typename FactoryT>
+FrontendActionFactory *newFrontendActionFactory(FactoryT *ActionFactory) {
+ class FrontendActionFactoryAdapter : public FrontendActionFactory {
+ public:
+ explicit FrontendActionFactoryAdapter(FactoryT *ActionFactory)
+ : ActionFactory(ActionFactory) {}
+
+ virtual clang::FrontendAction *create() {
+ return ActionFactory->newFrontendAction();
+ }
+
+ private:
+ FactoryT *ActionFactory;
+ };
+
+ return new FrontendActionFactoryAdapter(ActionFactory);
+}
+
+} // end namespace tooling
+} // end namespace clang
+
+#endif // LLVM_CLANG_TOOLING_TOOLING_H
+
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
new file mode 100644
index 0000000..9354dc3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMT.cpp
@@ -0,0 +1,626 @@
+//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Internals.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/DiagnosticCategories.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/ADT/Triple.h"
+using namespace clang;
+using namespace arcmt;
+
+bool CapturedDiagList::clearDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ if (range.isInvalid())
+ return false;
+
+ bool cleared = false;
+ ListTy::iterator I = List.begin();
+ while (I != List.end()) {
+ FullSourceLoc diagLoc = I->getLocation();
+ if ((IDs.empty() || // empty means clear all diagnostics in the range.
+ std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) &&
+ !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
+ (diagLoc == range.getEnd() ||
+ diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
+ cleared = true;
+ ListTy::iterator eraseS = I++;
+ while (I != List.end() && I->getLevel() == DiagnosticsEngine::Note)
+ ++I;
+ // Clear the diagnostic and any notes following it.
+ List.erase(eraseS, I);
+ continue;
+ }
+
+ ++I;
+ }
+
+ return cleared;
+}
+
+bool CapturedDiagList::hasDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) const {
+ if (range.isInvalid())
+ return false;
+
+ ListTy::const_iterator I = List.begin();
+ while (I != List.end()) {
+ FullSourceLoc diagLoc = I->getLocation();
+ if ((IDs.empty() || // empty means any diagnostic in the range.
+ std::find(IDs.begin(), IDs.end(), I->getID()) != IDs.end()) &&
+ !diagLoc.isBeforeInTranslationUnitThan(range.getBegin()) &&
+ (diagLoc == range.getEnd() ||
+ diagLoc.isBeforeInTranslationUnitThan(range.getEnd()))) {
+ return true;
+ }
+
+ ++I;
+ }
+
+ return false;
+}
+
+void CapturedDiagList::reportDiagnostics(DiagnosticsEngine &Diags) const {
+ for (ListTy::const_iterator I = List.begin(), E = List.end(); I != E; ++I)
+ Diags.Report(*I);
+}
+
+bool CapturedDiagList::hasErrors() const {
+ for (ListTy::const_iterator I = List.begin(), E = List.end(); I != E; ++I)
+ if (I->getLevel() >= DiagnosticsEngine::Error)
+ return true;
+
+ return false;
+}
+
+namespace {
+
+class CaptureDiagnosticConsumer : public DiagnosticConsumer {
+ DiagnosticsEngine &Diags;
+ CapturedDiagList &CapturedDiags;
+public:
+ CaptureDiagnosticConsumer(DiagnosticsEngine &diags,
+ CapturedDiagList &capturedDiags)
+ : Diags(diags), CapturedDiags(capturedDiags) { }
+
+ virtual void HandleDiagnostic(DiagnosticsEngine::Level level,
+ const Diagnostic &Info) {
+ if (DiagnosticIDs::isARCDiagnostic(Info.getID()) ||
+ level >= DiagnosticsEngine::Error || level == DiagnosticsEngine::Note) {
+ CapturedDiags.push_back(StoredDiagnostic(level, Info));
+ return;
+ }
+
+ // Non-ARC warnings are ignored.
+ Diags.setLastDiagnosticIgnored();
+ }
+
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const {
+ // Just drop any diagnostics that come from cloned consumers; they'll
+ // have different source managers anyway.
+ return new IgnoringDiagConsumer();
+ }
+};
+
+} // end anonymous namespace
+
+static inline StringRef SimulatorVersionDefineName() {
+ return "__IPHONE_OS_VERSION_MIN_REQUIRED=";
+}
+
+/// \brief Parse the simulator version define:
+/// __IPHONE_OS_VERSION_MIN_REQUIRED=([0-9])([0-9][0-9])([0-9][0-9])
+// and return the grouped values as integers, e.g:
+// __IPHONE_OS_VERSION_MIN_REQUIRED=40201
+// will return Major=4, Minor=2, Micro=1.
+static bool GetVersionFromSimulatorDefine(StringRef define,
+ unsigned &Major, unsigned &Minor,
+ unsigned &Micro) {
+ assert(define.startswith(SimulatorVersionDefineName()));
+ StringRef name, version;
+ llvm::tie(name, version) = define.split('=');
+ if (version.empty())
+ return false;
+ std::string verstr = version.str();
+ char *end;
+ unsigned num = (unsigned) strtol(verstr.c_str(), &end, 10);
+ if (*end != '\0')
+ return false;
+ Major = num / 10000;
+ num = num % 10000;
+ Minor = num / 100;
+ Micro = num % 100;
+ return true;
+}
+
+static bool HasARCRuntime(CompilerInvocation &origCI) {
+ // This duplicates some functionality from Darwin::AddDeploymentTarget
+ // but this function is well defined, so keep it decoupled from the driver
+ // and avoid unrelated complications.
+
+ for (unsigned i = 0, e = origCI.getPreprocessorOpts().Macros.size();
+ i != e; ++i) {
+ StringRef define = origCI.getPreprocessorOpts().Macros[i].first;
+ bool isUndef = origCI.getPreprocessorOpts().Macros[i].second;
+ if (isUndef)
+ continue;
+ if (!define.startswith(SimulatorVersionDefineName()))
+ continue;
+ unsigned Major = 0, Minor = 0, Micro = 0;
+ if (GetVersionFromSimulatorDefine(define, Major, Minor, Micro) &&
+ Major < 10 && Minor < 100 && Micro < 100)
+ return Major >= 5;
+ }
+
+ llvm::Triple triple(origCI.getTargetOpts().Triple);
+
+ if (triple.getOS() == llvm::Triple::IOS)
+ return triple.getOSMajorVersion() >= 5;
+
+ if (triple.getOS() == llvm::Triple::Darwin)
+ return triple.getOSMajorVersion() >= 11;
+
+ if (triple.getOS() == llvm::Triple::MacOSX) {
+ unsigned Major, Minor, Micro;
+ triple.getOSVersion(Major, Minor, Micro);
+ return Major > 10 || (Major == 10 && Minor >= 7);
+ }
+
+ return false;
+}
+
+static CompilerInvocation *
+createInvocationForMigration(CompilerInvocation &origCI) {
+ OwningPtr<CompilerInvocation> CInvok;
+ CInvok.reset(new CompilerInvocation(origCI));
+ CInvok->getPreprocessorOpts().ImplicitPCHInclude = std::string();
+ CInvok->getPreprocessorOpts().ImplicitPTHInclude = std::string();
+ std::string define = getARCMTMacroName();
+ define += '=';
+ CInvok->getPreprocessorOpts().addMacroDef(define);
+ CInvok->getLangOpts()->ObjCAutoRefCount = true;
+ CInvok->getLangOpts()->setGC(LangOptions::NonGC);
+ CInvok->getDiagnosticOpts().ErrorLimit = 0;
+ CInvok->getDiagnosticOpts().Warnings.push_back(
+ "error=arc-unsafe-retained-assign");
+ CInvok->getLangOpts()->ObjCRuntimeHasWeak = HasARCRuntime(origCI);
+
+ return CInvok.take();
+}
+
+static void emitPremigrationErrors(const CapturedDiagList &arcDiags,
+ const DiagnosticOptions &diagOpts,
+ Preprocessor &PP) {
+ TextDiagnosticPrinter printer(llvm::errs(), diagOpts);
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, &printer, /*ShouldOwnClient=*/false));
+ Diags->setSourceManager(&PP.getSourceManager());
+
+ printer.BeginSourceFile(PP.getLangOpts(), &PP);
+ arcDiags.reportDiagnostics(*Diags);
+ printer.EndSourceFile();
+}
+
+//===----------------------------------------------------------------------===//
+// checkForManualIssues.
+//===----------------------------------------------------------------------===//
+
+bool arcmt::checkForManualIssues(CompilerInvocation &origCI,
+ const FrontendInputFile &Input,
+ DiagnosticConsumer *DiagClient,
+ bool emitPremigrationARCErrors,
+ StringRef plistOut) {
+ if (!origCI.getLangOpts()->ObjC1)
+ return false;
+
+ LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
+ bool NoNSAllocReallocError = origCI.getMigratorOpts().NoNSAllocReallocError;
+ bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval;
+
+ std::vector<TransformFn> transforms = arcmt::getAllTransformations(OrigGCMode,
+ NoFinalizeRemoval);
+ assert(!transforms.empty());
+
+ OwningPtr<CompilerInvocation> CInvok;
+ CInvok.reset(createInvocationForMigration(origCI));
+ CInvok->getFrontendOpts().Inputs.clear();
+ CInvok->getFrontendOpts().Inputs.push_back(Input);
+
+ CapturedDiagList capturedDiags;
+
+ assert(DiagClient);
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ // Filter of all diagnostics.
+ CaptureDiagnosticConsumer errRec(*Diags, capturedDiags);
+ Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
+
+ OwningPtr<ASTUnit> Unit(
+ ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags));
+ if (!Unit)
+ return true;
+
+ // Don't filter diagnostics anymore.
+ Diags->setClient(DiagClient, /*ShouldOwnClient=*/false);
+
+ ASTContext &Ctx = Unit->getASTContext();
+
+ if (Diags->hasFatalErrorOccurred()) {
+ Diags->Reset();
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
+ capturedDiags.reportDiagnostics(*Diags);
+ DiagClient->EndSourceFile();
+ return true;
+ }
+
+ if (emitPremigrationARCErrors)
+ emitPremigrationErrors(capturedDiags, origCI.getDiagnosticOpts(),
+ Unit->getPreprocessor());
+ if (!plistOut.empty()) {
+ SmallVector<StoredDiagnostic, 8> arcDiags;
+ for (CapturedDiagList::iterator
+ I = capturedDiags.begin(), E = capturedDiags.end(); I != E; ++I)
+ arcDiags.push_back(*I);
+ writeARCDiagsToPlist(plistOut, arcDiags,
+ Ctx.getSourceManager(), Ctx.getLangOpts());
+ }
+
+ // After parsing of source files ended, we want to reuse the
+ // diagnostics objects to emit further diagnostics.
+ // We call BeginSourceFile because DiagnosticConsumer requires that
+ // diagnostics with source range information are emitted only in between
+ // BeginSourceFile() and EndSourceFile().
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
+
+ // No macros will be added since we are just checking and we won't modify
+ // source code.
+ std::vector<SourceLocation> ARCMTMacroLocs;
+
+ TransformActions testAct(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
+ MigrationPass pass(Ctx, OrigGCMode, Unit->getSema(), testAct, ARCMTMacroLocs);
+ pass.setNSAllocReallocError(NoNSAllocReallocError);
+ pass.setNoFinalizeRemoval(NoFinalizeRemoval);
+
+ for (unsigned i=0, e = transforms.size(); i != e; ++i)
+ transforms[i](pass);
+
+ capturedDiags.reportDiagnostics(*Diags);
+
+ DiagClient->EndSourceFile();
+
+ // If we are migrating code that gets the '-fobjc-arc' flag, make sure
+ // to remove it so that we don't get errors from normal compilation.
+ origCI.getLangOpts()->ObjCAutoRefCount = false;
+
+ return capturedDiags.hasErrors() || testAct.hasReportedErrors();
+}
+
+//===----------------------------------------------------------------------===//
+// applyTransformations.
+//===----------------------------------------------------------------------===//
+
+static bool applyTransforms(CompilerInvocation &origCI,
+ const FrontendInputFile &Input,
+ DiagnosticConsumer *DiagClient,
+ StringRef outputDir,
+ bool emitPremigrationARCErrors,
+ StringRef plistOut) {
+ if (!origCI.getLangOpts()->ObjC1)
+ return false;
+
+ LangOptions::GCMode OrigGCMode = origCI.getLangOpts()->getGC();
+
+ // Make sure checking is successful first.
+ CompilerInvocation CInvokForCheck(origCI);
+ if (arcmt::checkForManualIssues(CInvokForCheck, Input, DiagClient,
+ emitPremigrationARCErrors, plistOut))
+ return true;
+
+ CompilerInvocation CInvok(origCI);
+ CInvok.getFrontendOpts().Inputs.clear();
+ CInvok.getFrontendOpts().Inputs.push_back(Input);
+
+ MigrationProcess migration(CInvok, DiagClient, outputDir);
+ bool NoFinalizeRemoval = origCI.getMigratorOpts().NoFinalizeRemoval;
+
+ std::vector<TransformFn> transforms = arcmt::getAllTransformations(OrigGCMode,
+ NoFinalizeRemoval);
+ assert(!transforms.empty());
+
+ for (unsigned i=0, e = transforms.size(); i != e; ++i) {
+ bool err = migration.applyTransform(transforms[i]);
+ if (err) return true;
+ }
+
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ if (outputDir.empty()) {
+ origCI.getLangOpts()->ObjCAutoRefCount = true;
+ return migration.getRemapper().overwriteOriginal(*Diags);
+ } else {
+ // If we are migrating code that gets the '-fobjc-arc' flag, make sure
+ // to remove it so that we don't get errors from normal compilation.
+ origCI.getLangOpts()->ObjCAutoRefCount = false;
+ return migration.getRemapper().flushToDisk(outputDir, *Diags);
+ }
+}
+
+bool arcmt::applyTransformations(CompilerInvocation &origCI,
+ const FrontendInputFile &Input,
+ DiagnosticConsumer *DiagClient) {
+ return applyTransforms(origCI, Input, DiagClient,
+ StringRef(), false, StringRef());
+}
+
+bool arcmt::migrateWithTemporaryFiles(CompilerInvocation &origCI,
+ const FrontendInputFile &Input,
+ DiagnosticConsumer *DiagClient,
+ StringRef outputDir,
+ bool emitPremigrationARCErrors,
+ StringRef plistOut) {
+ assert(!outputDir.empty() && "Expected output directory path");
+ return applyTransforms(origCI, Input, DiagClient,
+ outputDir, emitPremigrationARCErrors, plistOut);
+}
+
+bool arcmt::getFileRemappings(std::vector<std::pair<std::string,std::string> > &
+ remap,
+ StringRef outputDir,
+ DiagnosticConsumer *DiagClient) {
+ assert(!outputDir.empty());
+
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ FileRemapper remapper;
+ bool err = remapper.initFromDisk(outputDir, *Diags,
+ /*ignoreIfFilesChanged=*/true);
+ if (err)
+ return true;
+
+ PreprocessorOptions PPOpts;
+ remapper.applyMappings(PPOpts);
+ remap = PPOpts.RemappedFiles;
+
+ return false;
+}
+
+bool arcmt::getFileRemappingsFromFileList(
+ std::vector<std::pair<std::string,std::string> > &remap,
+ ArrayRef<StringRef> remapFiles,
+ DiagnosticConsumer *DiagClient) {
+ bool hasErrorOccurred = false;
+ llvm::StringMap<bool> Uniquer;
+
+ llvm::IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ llvm::IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ for (ArrayRef<StringRef>::iterator
+ I = remapFiles.begin(), E = remapFiles.end(); I != E; ++I) {
+ StringRef file = *I;
+
+ FileRemapper remapper;
+ bool err = remapper.initFromFile(file, *Diags,
+ /*ignoreIfFilesChanged=*/true);
+ hasErrorOccurred = hasErrorOccurred || err;
+ if (err)
+ continue;
+
+ PreprocessorOptions PPOpts;
+ remapper.applyMappings(PPOpts);
+ for (PreprocessorOptions::remapped_file_iterator
+ RI = PPOpts.remapped_file_begin(), RE = PPOpts.remapped_file_end();
+ RI != RE; ++RI) {
+ bool &inserted = Uniquer[RI->first];
+ if (inserted)
+ continue;
+ inserted = true;
+ remap.push_back(*RI);
+ }
+ }
+
+ return hasErrorOccurred;
+}
+
+//===----------------------------------------------------------------------===//
+// CollectTransformActions.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARCMTMacroTrackerPPCallbacks : public PPCallbacks {
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+
+public:
+ ARCMTMacroTrackerPPCallbacks(std::vector<SourceLocation> &ARCMTMacroLocs)
+ : ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ virtual void MacroExpands(const Token &MacroNameTok, const MacroInfo *MI,
+ SourceRange Range) {
+ if (MacroNameTok.getIdentifierInfo()->getName() == getARCMTMacroName())
+ ARCMTMacroLocs.push_back(MacroNameTok.getLocation());
+ }
+};
+
+class ARCMTMacroTrackerAction : public ASTFrontendAction {
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+
+public:
+ ARCMTMacroTrackerAction(std::vector<SourceLocation> &ARCMTMacroLocs)
+ : ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ CI.getPreprocessor().addPPCallbacks(
+ new ARCMTMacroTrackerPPCallbacks(ARCMTMacroLocs));
+ return new ASTConsumer();
+ }
+};
+
+class RewritesApplicator : public TransformActions::RewriteReceiver {
+ Rewriter &rewriter;
+ ASTContext &Ctx;
+ MigrationProcess::RewriteListener *Listener;
+
+public:
+ RewritesApplicator(Rewriter &rewriter, ASTContext &ctx,
+ MigrationProcess::RewriteListener *listener)
+ : rewriter(rewriter), Ctx(ctx), Listener(listener) {
+ if (Listener)
+ Listener->start(ctx);
+ }
+ ~RewritesApplicator() {
+ if (Listener)
+ Listener->finish();
+ }
+
+ virtual void insert(SourceLocation loc, StringRef text) {
+ bool err = rewriter.InsertText(loc, text, /*InsertAfter=*/true,
+ /*indentNewLines=*/true);
+ if (!err && Listener)
+ Listener->insert(loc, text);
+ }
+
+ virtual void remove(CharSourceRange range) {
+ Rewriter::RewriteOptions removeOpts;
+ removeOpts.IncludeInsertsAtBeginOfRange = false;
+ removeOpts.IncludeInsertsAtEndOfRange = false;
+ removeOpts.RemoveLineIfEmpty = true;
+
+ bool err = rewriter.RemoveText(range, removeOpts);
+ if (!err && Listener)
+ Listener->remove(range);
+ }
+
+ virtual void increaseIndentation(CharSourceRange range,
+ SourceLocation parentIndent) {
+ rewriter.IncreaseIndentation(range, parentIndent);
+ }
+};
+
+} // end anonymous namespace.
+
+/// \brief Anchor for VTable.
+MigrationProcess::RewriteListener::~RewriteListener() { }
+
+MigrationProcess::MigrationProcess(const CompilerInvocation &CI,
+ DiagnosticConsumer *diagClient,
+ StringRef outputDir)
+ : OrigCI(CI), DiagClient(diagClient) {
+ if (!outputDir.empty()) {
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+ Remapper.initFromDisk(outputDir, *Diags, /*ignoreIfFilesChanges=*/true);
+ }
+}
+
+bool MigrationProcess::applyTransform(TransformFn trans,
+ RewriteListener *listener) {
+ OwningPtr<CompilerInvocation> CInvok;
+ CInvok.reset(createInvocationForMigration(OrigCI));
+ CInvok->getDiagnosticOpts().IgnoreWarnings = true;
+
+ Remapper.applyMappings(CInvok->getPreprocessorOpts());
+
+ CapturedDiagList capturedDiags;
+ std::vector<SourceLocation> ARCMTMacroLocs;
+
+ assert(DiagClient);
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, DiagClient, /*ShouldOwnClient=*/false));
+
+ // Filter of all diagnostics.
+ CaptureDiagnosticConsumer errRec(*Diags, capturedDiags);
+ Diags->setClient(&errRec, /*ShouldOwnClient=*/false);
+
+ OwningPtr<ARCMTMacroTrackerAction> ASTAction;
+ ASTAction.reset(new ARCMTMacroTrackerAction(ARCMTMacroLocs));
+
+ OwningPtr<ASTUnit> Unit(
+ ASTUnit::LoadFromCompilerInvocationAction(CInvok.take(), Diags,
+ ASTAction.get()));
+ if (!Unit)
+ return true;
+ Unit->setOwnsRemappedFileBuffers(false); // FileRemapper manages that.
+
+ // Don't filter diagnostics anymore.
+ Diags->setClient(DiagClient, /*ShouldOwnClient=*/false);
+
+ ASTContext &Ctx = Unit->getASTContext();
+
+ if (Diags->hasFatalErrorOccurred()) {
+ Diags->Reset();
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
+ capturedDiags.reportDiagnostics(*Diags);
+ DiagClient->EndSourceFile();
+ return true;
+ }
+
+ // After parsing of source files ended, we want to reuse the
+ // diagnostics objects to emit further diagnostics.
+ // We call BeginSourceFile because DiagnosticConsumer requires that
+ // diagnostics with source range information are emitted only in between
+ // BeginSourceFile() and EndSourceFile().
+ DiagClient->BeginSourceFile(Ctx.getLangOpts(), &Unit->getPreprocessor());
+
+ Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts());
+ TransformActions TA(*Diags, capturedDiags, Ctx, Unit->getPreprocessor());
+ MigrationPass pass(Ctx, OrigCI.getLangOpts()->getGC(),
+ Unit->getSema(), TA, ARCMTMacroLocs);
+
+ trans(pass);
+
+ {
+ RewritesApplicator applicator(rewriter, Ctx, listener);
+ TA.applyRewrites(applicator);
+ }
+
+ DiagClient->EndSourceFile();
+
+ if (DiagClient->getNumErrors())
+ return true;
+
+ for (Rewriter::buffer_iterator
+ I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
+ FileID FID = I->first;
+ RewriteBuffer &buf = I->second;
+ const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
+ assert(file);
+ std::string newFname = file->getName();
+ newFname += "-trans";
+ SmallString<512> newText;
+ llvm::raw_svector_ostream vecOS(newText);
+ buf.write(vecOS);
+ vecOS.flush();
+ llvm::MemoryBuffer *memBuf = llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(newText.data(), newText.size()), newFname);
+ SmallString<64> filePath(file->getName());
+ Unit->getFileManager().FixupRelativePath(filePath);
+ Remapper.remap(filePath.str(), memBuf);
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp
new file mode 100644
index 0000000..0ed36dd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ARCMTActions.cpp
@@ -0,0 +1,60 @@
+//===--- ARCMTActions.cpp - ARC Migrate Tool Frontend Actions ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ARCMigrate/ARCMTActions.h"
+#include "clang/ARCMigrate/ARCMT.h"
+#include "clang/Frontend/CompilerInstance.h"
+
+using namespace clang;
+using namespace arcmt;
+
+bool CheckAction::BeginInvocation(CompilerInstance &CI) {
+ if (arcmt::checkForManualIssues(CI.getInvocation(), getCurrentInput(),
+ CI.getDiagnostics().getClient()))
+ return false; // errors, stop the action.
+
+ // We only want to see warnings reported from arcmt::checkForManualIssues.
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ return true;
+}
+
+CheckAction::CheckAction(FrontendAction *WrappedAction)
+ : WrapperFrontendAction(WrappedAction) {}
+
+bool ModifyAction::BeginInvocation(CompilerInstance &CI) {
+ return !arcmt::applyTransformations(CI.getInvocation(), getCurrentInput(),
+ CI.getDiagnostics().getClient());
+}
+
+ModifyAction::ModifyAction(FrontendAction *WrappedAction)
+ : WrapperFrontendAction(WrappedAction) {}
+
+bool MigrateAction::BeginInvocation(CompilerInstance &CI) {
+ if (arcmt::migrateWithTemporaryFiles(CI.getInvocation(),
+ getCurrentInput(),
+ CI.getDiagnostics().getClient(),
+ MigrateDir,
+ EmitPremigrationARCErros,
+ PlistOut))
+ return false; // errors, stop the action.
+
+ // We only want to see diagnostics emitted by migrateWithTemporaryFiles.
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ return true;
+}
+
+MigrateAction::MigrateAction(FrontendAction *WrappedAction,
+ StringRef migrateDir,
+ StringRef plistOut,
+ bool emitPremigrationARCErrors)
+ : WrapperFrontendAction(WrappedAction), MigrateDir(migrateDir),
+ PlistOut(plistOut), EmitPremigrationARCErros(emitPremigrationARCErrors) {
+ if (MigrateDir.empty())
+ MigrateDir = "."; // user current directory if none is given.
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
new file mode 100644
index 0000000..474ce7d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/FileRemapper.cpp
@@ -0,0 +1,293 @@
+//===--- FileRemapper.cpp - File Remapping Helper -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ARCMigrate/FileRemapper.h"
+#include "clang/Frontend/PreprocessorOptions.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fstream>
+
+using namespace clang;
+using namespace arcmt;
+
+FileRemapper::FileRemapper() {
+ FileMgr.reset(new FileManager(FileSystemOptions()));
+}
+
+FileRemapper::~FileRemapper() {
+ clear();
+}
+
+void FileRemapper::clear(StringRef outputDir) {
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I)
+ resetTarget(I->second);
+ FromToMappings.clear();
+ assert(ToFromMappings.empty());
+ if (!outputDir.empty()) {
+ std::string infoFile = getRemapInfoFile(outputDir);
+ bool existed;
+ llvm::sys::fs::remove(infoFile, existed);
+ }
+}
+
+std::string FileRemapper::getRemapInfoFile(StringRef outputDir) {
+ assert(!outputDir.empty());
+ llvm::sys::Path dir(outputDir);
+ llvm::sys::Path infoFile = dir;
+ infoFile.appendComponent("remap");
+ return infoFile.str();
+}
+
+bool FileRemapper::initFromDisk(StringRef outputDir, DiagnosticsEngine &Diag,
+ bool ignoreIfFilesChanged) {
+ std::string infoFile = getRemapInfoFile(outputDir);
+ return initFromFile(infoFile, Diag, ignoreIfFilesChanged);
+}
+
+bool FileRemapper::initFromFile(StringRef filePath, DiagnosticsEngine &Diag,
+ bool ignoreIfFilesChanged) {
+ assert(FromToMappings.empty() &&
+ "initFromDisk should be called before any remap calls");
+ std::string infoFile = filePath;
+ bool fileExists = false;
+ llvm::sys::fs::exists(infoFile, fileExists);
+ if (!fileExists)
+ return false;
+
+ std::vector<std::pair<const FileEntry *, const FileEntry *> > pairs;
+
+ OwningPtr<llvm::MemoryBuffer> fileBuf;
+ if (llvm::MemoryBuffer::getFile(infoFile.c_str(), fileBuf))
+ return report("Error opening file: " + infoFile, Diag);
+
+ SmallVector<StringRef, 64> lines;
+ fileBuf->getBuffer().split(lines, "\n");
+
+ for (unsigned idx = 0; idx+3 <= lines.size(); idx += 3) {
+ StringRef fromFilename = lines[idx];
+ unsigned long long timeModified;
+ lines[idx+1].getAsInteger(10, timeModified);
+ StringRef toFilename = lines[idx+2];
+
+ const FileEntry *origFE = FileMgr->getFile(fromFilename);
+ if (!origFE) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report("File does not exist: " + fromFilename, Diag);
+ }
+ const FileEntry *newFE = FileMgr->getFile(toFilename);
+ if (!newFE) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report("File does not exist: " + toFilename, Diag);
+ }
+
+ if ((uint64_t)origFE->getModificationTime() != timeModified) {
+ if (ignoreIfFilesChanged)
+ continue;
+ return report("File was modified: " + fromFilename, Diag);
+ }
+
+ pairs.push_back(std::make_pair(origFE, newFE));
+ }
+
+ for (unsigned i = 0, e = pairs.size(); i != e; ++i)
+ remap(pairs[i].first, pairs[i].second);
+
+ return false;
+}
+
+bool FileRemapper::flushToDisk(StringRef outputDir, DiagnosticsEngine &Diag) {
+ using namespace llvm::sys;
+
+ bool existed;
+ if (fs::create_directory(outputDir, existed) != llvm::errc::success)
+ return report("Could not create directory: " + outputDir, Diag);
+
+ std::string infoFile = getRemapInfoFile(outputDir);
+ return flushToFile(infoFile, Diag);
+}
+
+bool FileRemapper::flushToFile(StringRef outputPath, DiagnosticsEngine &Diag) {
+ using namespace llvm::sys;
+
+ std::string errMsg;
+ std::string infoFile = outputPath;
+ llvm::raw_fd_ostream infoOut(infoFile.c_str(), errMsg,
+ llvm::raw_fd_ostream::F_Binary);
+ if (!errMsg.empty())
+ return report(errMsg, Diag);
+
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+
+ const FileEntry *origFE = I->first;
+ SmallString<200> origPath = StringRef(origFE->getName());
+ fs::make_absolute(origPath);
+ infoOut << origPath << '\n';
+ infoOut << (uint64_t)origFE->getModificationTime() << '\n';
+
+ if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ SmallString<200> newPath = StringRef(FE->getName());
+ fs::make_absolute(newPath);
+ infoOut << newPath << '\n';
+ } else {
+
+ SmallString<64> tempPath;
+ tempPath = path::filename(origFE->getName());
+ tempPath += "-%%%%%%%%";
+ tempPath += path::extension(origFE->getName());
+ int fd;
+ if (fs::unique_file(tempPath.str(), fd, tempPath) != llvm::errc::success)
+ return report("Could not create file: " + tempPath.str(), Diag);
+
+ llvm::raw_fd_ostream newOut(fd, /*shouldClose=*/true);
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ newOut.write(mem->getBufferStart(), mem->getBufferSize());
+ newOut.close();
+
+ const FileEntry *newE = FileMgr->getFile(tempPath);
+ remap(origFE, newE);
+ infoOut << newE->getName() << '\n';
+ }
+ }
+
+ infoOut.close();
+ return false;
+}
+
+bool FileRemapper::overwriteOriginal(DiagnosticsEngine &Diag,
+ StringRef outputDir) {
+ using namespace llvm::sys;
+
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+ const FileEntry *origFE = I->first;
+ if (const FileEntry *newFE = I->second.dyn_cast<const FileEntry *>()) {
+ if (fs::copy_file(newFE->getName(), origFE->getName(),
+ fs::copy_option::overwrite_if_exists) != llvm::errc::success)
+ return report(StringRef("Could not copy file '") + newFE->getName() +
+ "' to file '" + origFE->getName() + "'", Diag);
+ } else {
+
+ bool fileExists = false;
+ fs::exists(origFE->getName(), fileExists);
+ if (!fileExists)
+ return report(StringRef("File does not exist: ") + origFE->getName(),
+ Diag);
+
+ std::string errMsg;
+ llvm::raw_fd_ostream Out(origFE->getName(), errMsg,
+ llvm::raw_fd_ostream::F_Binary);
+ if (!errMsg.empty())
+ return report(errMsg, Diag);
+
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ Out.write(mem->getBufferStart(), mem->getBufferSize());
+ Out.close();
+ }
+ }
+
+ clear(outputDir);
+ return false;
+}
+
+void FileRemapper::applyMappings(PreprocessorOptions &PPOpts) const {
+ for (MappingsTy::const_iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+ if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ PPOpts.addRemappedFile(I->first->getName(), FE->getName());
+ } else {
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ PPOpts.addRemappedFile(I->first->getName(), mem);
+ }
+ }
+
+ PPOpts.RetainRemappedFileBuffers = true;
+}
+
+void FileRemapper::transferMappingsAndClear(PreprocessorOptions &PPOpts) {
+ for (MappingsTy::iterator
+ I = FromToMappings.begin(), E = FromToMappings.end(); I != E; ++I) {
+ if (const FileEntry *FE = I->second.dyn_cast<const FileEntry *>()) {
+ PPOpts.addRemappedFile(I->first->getName(), FE->getName());
+ } else {
+ llvm::MemoryBuffer *mem = I->second.get<llvm::MemoryBuffer *>();
+ PPOpts.addRemappedFile(I->first->getName(), mem);
+ }
+ I->second = Target();
+ }
+
+ PPOpts.RetainRemappedFileBuffers = false;
+ clear();
+}
+
+void FileRemapper::remap(StringRef filePath, llvm::MemoryBuffer *memBuf) {
+ remap(getOriginalFile(filePath), memBuf);
+}
+
+void FileRemapper::remap(StringRef filePath, StringRef newPath) {
+ const FileEntry *file = getOriginalFile(filePath);
+ const FileEntry *newfile = FileMgr->getFile(newPath);
+ remap(file, newfile);
+}
+
+void FileRemapper::remap(const FileEntry *file, llvm::MemoryBuffer *memBuf) {
+ assert(file);
+ Target &targ = FromToMappings[file];
+ resetTarget(targ);
+ targ = memBuf;
+}
+
+void FileRemapper::remap(const FileEntry *file, const FileEntry *newfile) {
+ assert(file && newfile);
+ Target &targ = FromToMappings[file];
+ resetTarget(targ);
+ targ = newfile;
+ ToFromMappings[newfile] = file;
+}
+
+const FileEntry *FileRemapper::getOriginalFile(StringRef filePath) {
+ const FileEntry *file = FileMgr->getFile(filePath);
+ // If we are updating a file that overriden an original file,
+ // actually update the original file.
+ llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
+ I = ToFromMappings.find(file);
+ if (I != ToFromMappings.end()) {
+ file = I->second;
+ assert(FromToMappings.find(file) != FromToMappings.end() &&
+ "Original file not in mappings!");
+ }
+ return file;
+}
+
+void FileRemapper::resetTarget(Target &targ) {
+ if (!targ)
+ return;
+
+ if (llvm::MemoryBuffer *oldmem = targ.dyn_cast<llvm::MemoryBuffer *>()) {
+ delete oldmem;
+ } else {
+ const FileEntry *toFE = targ.get<const FileEntry *>();
+ ToFromMappings.erase(toFE);
+ }
+}
+
+bool FileRemapper::report(const Twine &err, DiagnosticsEngine &Diag) {
+ SmallString<128> buf;
+ unsigned ID = Diag.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Error,
+ err.toStringRef(buf));
+ Diag.Report(ID);
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
new file mode 100644
index 0000000..59177c4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Internals.h
@@ -0,0 +1,170 @@
+//===-- Internals.h - Implementation Details---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
+#define LLVM_CLANG_LIB_ARCMIGRATE_INTERNALS_H
+
+#include "clang/ARCMigrate/ARCMT.h"
+#include "llvm/ADT/ArrayRef.h"
+
+namespace clang {
+ class Sema;
+ class Stmt;
+
+namespace arcmt {
+
+class CapturedDiagList {
+ typedef std::list<StoredDiagnostic> ListTy;
+ ListTy List;
+
+public:
+ void push_back(const StoredDiagnostic &diag) { List.push_back(diag); }
+
+ bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
+ bool hasDiagnostic(ArrayRef<unsigned> IDs, SourceRange range) const;
+
+ void reportDiagnostics(DiagnosticsEngine &diags) const;
+
+ bool hasErrors() const;
+
+ typedef ListTy::const_iterator iterator;
+ iterator begin() const { return List.begin(); }
+ iterator end() const { return List.end(); }
+};
+
+void writeARCDiagsToPlist(const std::string &outPath,
+ ArrayRef<StoredDiagnostic> diags,
+ SourceManager &SM, const LangOptions &LangOpts);
+
+class TransformActions {
+ DiagnosticsEngine &Diags;
+ CapturedDiagList &CapturedDiags;
+ bool ReportedErrors;
+ void *Impl; // TransformActionsImpl.
+
+public:
+ TransformActions(DiagnosticsEngine &diag, CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP);
+ ~TransformActions();
+
+ void startTransaction();
+ bool commitTransaction();
+ void abortTransaction();
+
+ void insert(SourceLocation loc, StringRef text);
+ void insertAfterToken(SourceLocation loc, StringRef text);
+ void remove(SourceRange range);
+ void removeStmt(Stmt *S);
+ void replace(SourceRange range, StringRef text);
+ void replace(SourceRange range, SourceRange replacementRange);
+ void replaceStmt(Stmt *S, StringRef text);
+ void replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText);
+ void increaseIndentation(SourceRange range,
+ SourceLocation parentIndent);
+
+ bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
+ bool clearAllDiagnostics(SourceRange range) {
+ return clearDiagnostic(ArrayRef<unsigned>(), range);
+ }
+ bool clearDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) {
+ unsigned IDs[] = { ID1, ID2 };
+ return clearDiagnostic(IDs, range);
+ }
+ bool clearDiagnostic(unsigned ID1, unsigned ID2, unsigned ID3,
+ SourceRange range) {
+ unsigned IDs[] = { ID1, ID2, ID3 };
+ return clearDiagnostic(IDs, range);
+ }
+
+ bool hasDiagnostic(unsigned ID, SourceRange range) {
+ return CapturedDiags.hasDiagnostic(ID, range);
+ }
+
+ bool hasDiagnostic(unsigned ID1, unsigned ID2, SourceRange range) {
+ unsigned IDs[] = { ID1, ID2 };
+ return CapturedDiags.hasDiagnostic(IDs, range);
+ }
+
+ void reportError(StringRef error, SourceLocation loc,
+ SourceRange range = SourceRange());
+ void reportWarning(StringRef warning, SourceLocation loc,
+ SourceRange range = SourceRange());
+ void reportNote(StringRef note, SourceLocation loc,
+ SourceRange range = SourceRange());
+
+ bool hasReportedErrors() const { return ReportedErrors; }
+
+ class RewriteReceiver {
+ public:
+ virtual ~RewriteReceiver();
+
+ virtual void insert(SourceLocation loc, StringRef text) = 0;
+ virtual void remove(CharSourceRange range) = 0;
+ virtual void increaseIndentation(CharSourceRange range,
+ SourceLocation parentIndent) = 0;
+ };
+
+ void applyRewrites(RewriteReceiver &receiver);
+};
+
+class Transaction {
+ TransformActions &TA;
+ bool Aborted;
+
+public:
+ Transaction(TransformActions &TA) : TA(TA), Aborted(false) {
+ TA.startTransaction();
+ }
+
+ ~Transaction() {
+ if (!isAborted())
+ TA.commitTransaction();
+ }
+
+ void abort() {
+ TA.abortTransaction();
+ Aborted = true;
+ }
+
+ bool isAborted() const { return Aborted; }
+};
+
+class MigrationPass {
+public:
+ ASTContext &Ctx;
+ LangOptions::GCMode OrigGCMode;
+ MigratorOptions MigOptions;
+ Sema &SemaRef;
+ TransformActions &TA;
+ std::vector<SourceLocation> &ARCMTMacroLocs;
+
+ MigrationPass(ASTContext &Ctx, LangOptions::GCMode OrigGCMode,
+ Sema &sema, TransformActions &TA,
+ std::vector<SourceLocation> &ARCMTMacroLocs)
+ : Ctx(Ctx), OrigGCMode(OrigGCMode), MigOptions(),
+ SemaRef(sema), TA(TA),
+ ARCMTMacroLocs(ARCMTMacroLocs) { }
+
+ bool isGCMigration() const { return OrigGCMode != LangOptions::NonGC; }
+ bool noNSAllocReallocError() const { return MigOptions.NoNSAllocReallocError; }
+ void setNSAllocReallocError(bool val) { MigOptions.NoNSAllocReallocError = val; }
+ bool noFinalizeRemoval() const { return MigOptions.NoFinalizeRemoval; }
+ void setNoFinalizeRemoval(bool val) {MigOptions.NoFinalizeRemoval = val; }
+};
+
+static inline StringRef getARCMTMacroName() {
+ return "__IMPL_ARCMT_REMOVED_EXPR__";
+}
+
+} // end namespace arcmt
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
new file mode 100644
index 0000000..e635274
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -0,0 +1,226 @@
+//===--- ObjCMT.cpp - ObjC Migrate Tool -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/ARCMigrate/ARCMTActions.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/NSAPI.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Edit/Rewriters.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace arcmt;
+
+namespace {
+
+class ObjCMigrateASTConsumer : public ASTConsumer {
+ void migrateDecl(Decl *D);
+
+public:
+ std::string MigrateDir;
+ bool MigrateLiterals;
+ bool MigrateSubscripting;
+ llvm::OwningPtr<NSAPI> NSAPIObj;
+ llvm::OwningPtr<edit::EditedSource> Editor;
+ FileRemapper &Remapper;
+ FileManager &FileMgr;
+ const PreprocessingRecord *PPRec;
+ bool IsOutputFile;
+
+ ObjCMigrateASTConsumer(StringRef migrateDir,
+ bool migrateLiterals,
+ bool migrateSubscripting,
+ FileRemapper &remapper,
+ FileManager &fileMgr,
+ const PreprocessingRecord *PPRec,
+ bool isOutputFile = false)
+ : MigrateDir(migrateDir),
+ MigrateLiterals(migrateLiterals),
+ MigrateSubscripting(migrateSubscripting),
+ Remapper(remapper), FileMgr(fileMgr), PPRec(PPRec),
+ IsOutputFile(isOutputFile) { }
+
+protected:
+ virtual void Initialize(ASTContext &Context) {
+ NSAPIObj.reset(new NSAPI(Context));
+ Editor.reset(new edit::EditedSource(Context.getSourceManager(),
+ Context.getLangOpts(),
+ PPRec));
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ migrateDecl(*I);
+ return true;
+ }
+ virtual void HandleInterestingDecl(DeclGroupRef DG) {
+ // Ignore decls from the PCH.
+ }
+ virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
+ ObjCMigrateASTConsumer::HandleTopLevelDecl(DG);
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx);
+};
+
+}
+
+ObjCMigrateAction::ObjCMigrateAction(FrontendAction *WrappedAction,
+ StringRef migrateDir,
+ bool migrateLiterals,
+ bool migrateSubscripting)
+ : WrapperFrontendAction(WrappedAction), MigrateDir(migrateDir),
+ MigrateLiterals(migrateLiterals), MigrateSubscripting(migrateSubscripting),
+ CompInst(0) {
+ if (MigrateDir.empty())
+ MigrateDir = "."; // user current directory if none is given.
+}
+
+ASTConsumer *ObjCMigrateAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ ASTConsumer *
+ WrappedConsumer = WrapperFrontendAction::CreateASTConsumer(CI, InFile);
+ ASTConsumer *MTConsumer = new ObjCMigrateASTConsumer(MigrateDir,
+ MigrateLiterals,
+ MigrateSubscripting,
+ Remapper,
+ CompInst->getFileManager(),
+ CompInst->getPreprocessor().getPreprocessingRecord());
+ ASTConsumer *Consumers[] = { MTConsumer, WrappedConsumer };
+ return new MultiplexConsumer(Consumers);
+}
+
+bool ObjCMigrateAction::BeginInvocation(CompilerInstance &CI) {
+ Remapper.initFromDisk(MigrateDir, CI.getDiagnostics(),
+ /*ignoreIfFilesChanges=*/true);
+ CompInst = &CI;
+ CI.getDiagnostics().setIgnoreAllWarnings(true);
+ CI.getPreprocessorOpts().DetailedRecord = true;
+ CI.getPreprocessorOpts().DetailedRecordConditionalDirectives = true;
+ return true;
+}
+
+namespace {
+class ObjCMigrator : public RecursiveASTVisitor<ObjCMigrator> {
+ ObjCMigrateASTConsumer &Consumer;
+
+public:
+ ObjCMigrator(ObjCMigrateASTConsumer &consumer) : Consumer(consumer) { }
+
+ bool shouldVisitTemplateInstantiations() const { return false; }
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (Consumer.MigrateLiterals) {
+ edit::Commit commit(*Consumer.Editor);
+ edit::rewriteToObjCLiteralSyntax(E, *Consumer.NSAPIObj, commit);
+ Consumer.Editor->commit(commit);
+ }
+
+ if (Consumer.MigrateSubscripting) {
+ edit::Commit commit(*Consumer.Editor);
+ edit::rewriteToObjCSubscriptSyntax(E, *Consumer.NSAPIObj, commit);
+ Consumer.Editor->commit(commit);
+ }
+
+ return true;
+ }
+
+ bool TraverseObjCMessageExpr(ObjCMessageExpr *E) {
+ // Do depth first; we want to rewrite the subexpressions first so that if
+ // we have to move expressions we will move them already rewritten.
+ for (Stmt::child_range range = E->children(); range; ++range)
+ if (!TraverseStmt(*range))
+ return false;
+
+ return WalkUpFromObjCMessageExpr(E);
+ }
+};
+}
+
+void ObjCMigrateASTConsumer::migrateDecl(Decl *D) {
+ if (!D)
+ return;
+ if (isa<ObjCMethodDecl>(D))
+ return; // Wait for the ObjC container declaration.
+
+ ObjCMigrator(*this).TraverseDecl(D);
+}
+
+namespace {
+
+class RewritesReceiver : public edit::EditsReceiver {
+ Rewriter &Rewrite;
+
+public:
+ RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
+
+ virtual void insert(SourceLocation loc, StringRef text) {
+ Rewrite.InsertText(loc, text);
+ }
+ virtual void replace(CharSourceRange range, StringRef text) {
+ Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
+ }
+};
+
+}
+
+void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
+ Rewriter rewriter(Ctx.getSourceManager(), Ctx.getLangOpts());
+ RewritesReceiver Rec(rewriter);
+ Editor->applyRewrites(Rec);
+
+ for (Rewriter::buffer_iterator
+ I = rewriter.buffer_begin(), E = rewriter.buffer_end(); I != E; ++I) {
+ FileID FID = I->first;
+ RewriteBuffer &buf = I->second;
+ const FileEntry *file = Ctx.getSourceManager().getFileEntryForID(FID);
+ assert(file);
+ llvm::SmallString<512> newText;
+ llvm::raw_svector_ostream vecOS(newText);
+ buf.write(vecOS);
+ vecOS.flush();
+ llvm::MemoryBuffer *memBuf = llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(newText.data(), newText.size()), file->getName());
+ llvm::SmallString<64> filePath(file->getName());
+ FileMgr.FixupRelativePath(filePath);
+ Remapper.remap(filePath.str(), memBuf);
+ }
+
+ if (IsOutputFile) {
+ Remapper.flushToFile(MigrateDir, Ctx.getDiagnostics());
+ } else {
+ Remapper.flushToDisk(MigrateDir, Ctx.getDiagnostics());
+ }
+}
+
+bool MigrateSourceAction::BeginInvocation(CompilerInstance &CI) {
+ CI.getPreprocessorOpts().DetailedRecord = true;
+ CI.getPreprocessorOpts().DetailedRecordConditionalDirectives = true;
+ return true;
+}
+
+ASTConsumer *MigrateSourceAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return new ObjCMigrateASTConsumer(CI.getFrontendOpts().OutputFile,
+ /*MigrateLiterals=*/true,
+ /*MigrateSubscripting=*/true,
+ Remapper,
+ CI.getFileManager(),
+ CI.getPreprocessor().getPreprocessingRecord(),
+ /*isOutputFile=*/true);
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp
new file mode 100644
index 0000000..d1bc90f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/PlistReporter.cpp
@@ -0,0 +1,195 @@
+//===--- PlistReporter.cpp - ARC Migrate Tool Plist Reporter ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Internals.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+using namespace clang;
+using namespace arcmt;
+
+// FIXME: This duplicates significant functionality from PlistDiagnostics.cpp,
+// it would be jolly good if there was a reusable PlistWriter or something.
+
+typedef llvm::DenseMap<FileID, unsigned> FIDMap;
+
+static void AddFID(FIDMap &FIDs, SmallVectorImpl<FileID> &V,
+ const SourceManager &SM, SourceLocation L) {
+
+ FileID FID = SM.getFileID(SM.getExpansionLoc(L));
+ FIDMap::iterator I = FIDs.find(FID);
+ if (I != FIDs.end()) return;
+ FIDs[FID] = V.size();
+ V.push_back(FID);
+}
+
+static unsigned GetFID(const FIDMap& FIDs, const SourceManager &SM,
+ SourceLocation L) {
+ FileID FID = SM.getFileID(SM.getExpansionLoc(L));
+ FIDMap::const_iterator I = FIDs.find(FID);
+ assert(I != FIDs.end());
+ return I->second;
+}
+
+static raw_ostream& Indent(raw_ostream& o, const unsigned indent) {
+ for (unsigned i = 0; i < indent; ++i) o << ' ';
+ return o;
+}
+
+static void EmitLocation(raw_ostream& o, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation L, const FIDMap &FM,
+ unsigned indent, bool extend = false) {
+
+ FullSourceLoc Loc(SM.getExpansionLoc(L), const_cast<SourceManager&>(SM));
+
+ // Add in the length of the token, so that we cover multi-char tokens.
+ unsigned offset =
+ extend ? Lexer::MeasureTokenLength(Loc, SM, LangOpts) - 1 : 0;
+
+ Indent(o, indent) << "<dict>\n";
+ Indent(o, indent) << " <key>line</key><integer>"
+ << Loc.getExpansionLineNumber() << "</integer>\n";
+ Indent(o, indent) << " <key>col</key><integer>"
+ << Loc.getExpansionColumnNumber() + offset << "</integer>\n";
+ Indent(o, indent) << " <key>file</key><integer>"
+ << GetFID(FM, SM, Loc) << "</integer>\n";
+ Indent(o, indent) << "</dict>\n";
+}
+
+static void EmitRange(raw_ostream& o, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ CharSourceRange R, const FIDMap &FM,
+ unsigned indent) {
+ Indent(o, indent) << "<array>\n";
+ EmitLocation(o, SM, LangOpts, R.getBegin(), FM, indent+1);
+ EmitLocation(o, SM, LangOpts, R.getEnd(), FM, indent+1, R.isTokenRange());
+ Indent(o, indent) << "</array>\n";
+}
+
+static raw_ostream& EmitString(raw_ostream& o,
+ StringRef s) {
+ o << "<string>";
+ for (StringRef::const_iterator I=s.begin(), E=s.end(); I!=E; ++I) {
+ char c = *I;
+ switch (c) {
+ default: o << c; break;
+ case '&': o << "&amp;"; break;
+ case '<': o << "&lt;"; break;
+ case '>': o << "&gt;"; break;
+ case '\'': o << "&apos;"; break;
+ case '\"': o << "&quot;"; break;
+ }
+ }
+ o << "</string>";
+ return o;
+}
+
+void arcmt::writeARCDiagsToPlist(const std::string &outPath,
+ ArrayRef<StoredDiagnostic> diags,
+ SourceManager &SM,
+ const LangOptions &LangOpts) {
+ DiagnosticIDs DiagIDs;
+
+ // Build up a set of FIDs that we use by scanning the locations and
+ // ranges of the diagnostics.
+ FIDMap FM;
+ SmallVector<FileID, 10> Fids;
+
+ for (ArrayRef<StoredDiagnostic>::iterator
+ I = diags.begin(), E = diags.end(); I != E; ++I) {
+ const StoredDiagnostic &D = *I;
+
+ AddFID(FM, Fids, SM, D.getLocation());
+
+ for (StoredDiagnostic::range_iterator
+ RI = D.range_begin(), RE = D.range_end(); RI != RE; ++RI) {
+ AddFID(FM, Fids, SM, RI->getBegin());
+ AddFID(FM, Fids, SM, RI->getEnd());
+ }
+ }
+
+ std::string errMsg;
+ llvm::raw_fd_ostream o(outPath.c_str(), errMsg);
+ if (!errMsg.empty()) {
+ llvm::errs() << "error: could not create file: " << outPath << '\n';
+ return;
+ }
+
+ // Write the plist header.
+ o << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" "
+ "\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
+ "<plist version=\"1.0\">\n";
+
+ // Write the root object: a <dict> containing...
+ // - "files", an <array> mapping from FIDs to file names
+ // - "diagnostics", an <array> containing the diagnostics
+ o << "<dict>\n"
+ " <key>files</key>\n"
+ " <array>\n";
+
+ for (SmallVectorImpl<FileID>::iterator I=Fids.begin(), E=Fids.end();
+ I!=E; ++I) {
+ o << " ";
+ EmitString(o, SM.getFileEntryForID(*I)->getName()) << '\n';
+ }
+
+ o << " </array>\n"
+ " <key>diagnostics</key>\n"
+ " <array>\n";
+
+ for (ArrayRef<StoredDiagnostic>::iterator
+ DI = diags.begin(), DE = diags.end(); DI != DE; ++DI) {
+
+ const StoredDiagnostic &D = *DI;
+
+ if (D.getLevel() == DiagnosticsEngine::Ignored)
+ continue;
+
+ o << " <dict>\n";
+
+ // Output the diagnostic.
+ o << " <key>description</key>";
+ EmitString(o, D.getMessage()) << '\n';
+ o << " <key>category</key>";
+ EmitString(o, DiagIDs.getCategoryNameFromID(
+ DiagIDs.getCategoryNumberForDiag(D.getID()))) << '\n';
+ o << " <key>type</key>";
+ if (D.getLevel() >= DiagnosticsEngine::Error)
+ EmitString(o, "error") << '\n';
+ else if (D.getLevel() == DiagnosticsEngine::Warning)
+ EmitString(o, "warning") << '\n';
+ else
+ EmitString(o, "note") << '\n';
+
+ // Output the location of the bug.
+ o << " <key>location</key>\n";
+ EmitLocation(o, SM, LangOpts, D.getLocation(), FM, 2);
+
+ // Output the ranges (if any).
+ StoredDiagnostic::range_iterator RI = D.range_begin(), RE = D.range_end();
+
+ if (RI != RE) {
+ o << " <key>ranges</key>\n";
+ o << " <array>\n";
+ for (; RI != RE; ++RI)
+ EmitRange(o, SM, LangOpts, *RI, FM, 4);
+ o << " </array>\n";
+ }
+
+ // Close up the entry.
+ o << " </dict>\n";
+ }
+
+ o << " </array>\n";
+
+ // Finish.
+ o << "</dict>\n</plist>";
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp
new file mode 100644
index 0000000..aaa82d8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAPIUses.cpp
@@ -0,0 +1,109 @@
+//===--- TransAPIUses.cpp - Tranformations to ARC mode --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// checkAPIUses:
+//
+// Emits error/fix with some API uses that are obsolete or not safe in ARC mode:
+//
+// - NSInvocation's [get/set]ReturnValue and [get/set]Argument are only safe
+// with __unsafe_unretained objects.
+// - Calling -zone gets replaced with 'nil'.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class APIChecker : public RecursiveASTVisitor<APIChecker> {
+ MigrationPass &Pass;
+
+ Selector getReturnValueSel, setReturnValueSel;
+ Selector getArgumentSel, setArgumentSel;
+
+ Selector zoneSel;
+public:
+ APIChecker(MigrationPass &pass) : Pass(pass) {
+ SelectorTable &sels = Pass.Ctx.Selectors;
+ IdentifierTable &ids = Pass.Ctx.Idents;
+ getReturnValueSel = sels.getUnarySelector(&ids.get("getReturnValue"));
+ setReturnValueSel = sels.getUnarySelector(&ids.get("setReturnValue"));
+
+ IdentifierInfo *selIds[2];
+ selIds[0] = &ids.get("getArgument");
+ selIds[1] = &ids.get("atIndex");
+ getArgumentSel = sels.getSelector(2, selIds);
+ selIds[0] = &ids.get("setArgument");
+ setArgumentSel = sels.getSelector(2, selIds);
+
+ zoneSel = sels.getNullarySelector(&ids.get("zone"));
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ // NSInvocation.
+ if (E->isInstanceMessage() &&
+ E->getReceiverInterface() &&
+ E->getReceiverInterface()->getName() == "NSInvocation") {
+ StringRef selName;
+ if (E->getSelector() == getReturnValueSel)
+ selName = "getReturnValue";
+ else if (E->getSelector() == setReturnValueSel)
+ selName = "setReturnValue";
+ else if (E->getSelector() == getArgumentSel)
+ selName = "getArgument";
+ else if (E->getSelector() == setArgumentSel)
+ selName = "setArgument";
+
+ if (selName.empty())
+ return true;
+
+ Expr *parm = E->getArg(0)->IgnoreParenCasts();
+ QualType pointee = parm->getType()->getPointeeType();
+ if (pointee.isNull())
+ return true;
+
+ if (pointee.getObjCLifetime() > Qualifiers::OCL_ExplicitNone) {
+ std::string err = "NSInvocation's ";
+ err += selName;
+ err += " is not safe to be used with an object with ownership other "
+ "than __unsafe_unretained";
+ Pass.TA.reportError(err, parm->getLocStart(), parm->getSourceRange());
+ }
+ return true;
+ }
+
+ // -zone.
+ if (E->isInstanceMessage() &&
+ E->getInstanceReceiver() &&
+ E->getSelector() == zoneSel &&
+ Pass.TA.hasDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ E->getInstanceReceiver()->getExprLoc())) {
+ // Calling -zone is meaningless in ARC, change it to nil.
+ Transaction Trans(Pass.TA);
+ Pass.TA.clearDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ E->getInstanceReceiver()->getExprLoc());
+ Pass.TA.replace(E->getSourceRange(), getNilString(Pass.Ctx));
+ }
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void trans::checkAPIUses(MigrationPass &pass) {
+ APIChecker(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
new file mode 100644
index 0000000..cfa6da1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransARCAssign.cpp
@@ -0,0 +1,77 @@
+//===--- TransARCAssign.cpp - Tranformations to ARC mode ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// makeAssignARCSafe:
+//
+// Add '__strong' where appropriate.
+//
+// for (id x in collection) {
+// x = 0;
+// }
+// ---->
+// for (__strong id x in collection) {
+// x = 0;
+// }
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class ARCAssignChecker : public RecursiveASTVisitor<ARCAssignChecker> {
+ MigrationPass &Pass;
+ llvm::DenseSet<VarDecl *> ModifiedVars;
+
+public:
+ ARCAssignChecker(MigrationPass &pass) : Pass(pass) { }
+
+ bool VisitBinaryOperator(BinaryOperator *Exp) {
+ if (Exp->getType()->isDependentType())
+ return true;
+
+ Expr *E = Exp->getLHS();
+ SourceLocation OrigLoc = E->getExprLoc();
+ SourceLocation Loc = OrigLoc;
+ DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
+ if (declRef && isa<VarDecl>(declRef->getDecl())) {
+ ASTContext &Ctx = Pass.Ctx;
+ Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(Ctx, &Loc);
+ if (IsLV != Expr::MLV_ConstQualified)
+ return true;
+ VarDecl *var = cast<VarDecl>(declRef->getDecl());
+ if (var->isARCPseudoStrong()) {
+ Transaction Trans(Pass.TA);
+ if (Pass.TA.clearDiagnostic(diag::err_typecheck_arr_assign_enumeration,
+ Exp->getOperatorLoc())) {
+ if (!ModifiedVars.count(var)) {
+ TypeLoc TLoc = var->getTypeSourceInfo()->getTypeLoc();
+ Pass.TA.insert(TLoc.getBeginLoc(), "__strong ");
+ ModifiedVars.insert(var);
+ }
+ }
+ }
+ }
+
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void trans::makeAssignARCSafe(MigrationPass &pass) {
+ ARCAssignChecker assignCheck(pass);
+ assignCheck.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
new file mode 100644
index 0000000..8787724
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransAutoreleasePool.cpp
@@ -0,0 +1,434 @@
+//===--- TransAutoreleasePool.cpp - Tranformations to ARC mode ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteAutoreleasePool:
+//
+// Calls to NSAutoreleasePools will be rewritten as an @autorelease scope.
+//
+// NSAutoreleasePool *pool = [[NSAutoreleasePool alloc] init];
+// ...
+// [pool release];
+// ---->
+// @autorelease {
+// ...
+// }
+//
+// An NSAutoreleasePool will not be touched if:
+// - There is not a corresponding -release/-drain in the same scope
+// - Not all references of the NSAutoreleasePool variable can be removed
+// - There is a variable that is declared inside the intended @autorelease scope
+// which is also used outside it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class ReleaseCollector : public RecursiveASTVisitor<ReleaseCollector> {
+ Decl *Dcl;
+ SmallVectorImpl<ObjCMessageExpr *> &Releases;
+
+public:
+ ReleaseCollector(Decl *D, SmallVectorImpl<ObjCMessageExpr *> &releases)
+ : Dcl(D), Releases(releases) { }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (!E->isInstanceMessage())
+ return true;
+ if (E->getMethodFamily() != OMF_release)
+ return true;
+ Expr *instance = E->getInstanceReceiver()->IgnoreParenCasts();
+ if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(instance)) {
+ if (DE->getDecl() == Dcl)
+ Releases.push_back(E);
+ }
+ return true;
+ }
+};
+
+}
+
+namespace {
+
+class AutoreleasePoolRewriter
+ : public RecursiveASTVisitor<AutoreleasePoolRewriter> {
+public:
+ AutoreleasePoolRewriter(MigrationPass &pass)
+ : Body(0), Pass(pass) {
+ PoolII = &pass.Ctx.Idents.get("NSAutoreleasePool");
+ DrainSel = pass.Ctx.Selectors.getNullarySelector(
+ &pass.Ctx.Idents.get("drain"));
+ }
+
+ void transformBody(Stmt *body) {
+ Body = body;
+ TraverseStmt(body);
+ }
+
+ ~AutoreleasePoolRewriter() {
+ SmallVector<VarDecl *, 8> VarsToHandle;
+
+ for (std::map<VarDecl *, PoolVarInfo>::iterator
+ I = PoolVars.begin(), E = PoolVars.end(); I != E; ++I) {
+ VarDecl *var = I->first;
+ PoolVarInfo &info = I->second;
+
+ // Check that we can handle/rewrite all references of the pool.
+
+ clearRefsIn(info.Dcl, info.Refs);
+ for (SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ clearRefsIn(*scope.Begin, info.Refs);
+ clearRefsIn(*scope.End, info.Refs);
+ clearRefsIn(scope.Releases.begin(), scope.Releases.end(), info.Refs);
+ }
+
+ // Even if one reference is not handled we will not do anything about that
+ // pool variable.
+ if (info.Refs.empty())
+ VarsToHandle.push_back(var);
+ }
+
+ for (unsigned i = 0, e = VarsToHandle.size(); i != e; ++i) {
+ PoolVarInfo &info = PoolVars[VarsToHandle[i]];
+
+ Transaction Trans(Pass.TA);
+
+ clearUnavailableDiags(info.Dcl);
+ Pass.TA.removeStmt(info.Dcl);
+
+ // Add "@autoreleasepool { }"
+ for (SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ clearUnavailableDiags(*scope.Begin);
+ clearUnavailableDiags(*scope.End);
+ if (scope.IsFollowedBySimpleReturnStmt) {
+ // Include the return in the scope.
+ Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {");
+ Pass.TA.removeStmt(*scope.End);
+ Stmt::child_iterator retI = scope.End;
+ ++retI;
+ SourceLocation afterSemi = findLocationAfterSemi((*retI)->getLocEnd(),
+ Pass.Ctx);
+ assert(afterSemi.isValid() &&
+ "Didn't we check before setting IsFollowedBySimpleReturnStmt "
+ "to true?");
+ Pass.TA.insertAfterToken(afterSemi, "\n}");
+ Pass.TA.increaseIndentation(
+ SourceRange(scope.getIndentedRange().getBegin(),
+ (*retI)->getLocEnd()),
+ scope.CompoundParent->getLocStart());
+ } else {
+ Pass.TA.replaceStmt(*scope.Begin, "@autoreleasepool {");
+ Pass.TA.replaceStmt(*scope.End, "}");
+ Pass.TA.increaseIndentation(scope.getIndentedRange(),
+ scope.CompoundParent->getLocStart());
+ }
+ }
+
+ // Remove rest of pool var references.
+ for (SmallVectorImpl<PoolScope>::iterator
+ scpI = info.Scopes.begin(),
+ scpE = info.Scopes.end(); scpI != scpE; ++scpI) {
+ PoolScope &scope = *scpI;
+ for (SmallVectorImpl<ObjCMessageExpr *>::iterator
+ relI = scope.Releases.begin(),
+ relE = scope.Releases.end(); relI != relE; ++relI) {
+ clearUnavailableDiags(*relI);
+ Pass.TA.removeStmt(*relI);
+ }
+ }
+ }
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ SmallVector<PoolScope, 4> Scopes;
+
+ for (Stmt::child_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ Stmt *child = getEssential(*I);
+ if (DeclStmt *DclS = dyn_cast<DeclStmt>(child)) {
+ if (DclS->isSingleDecl()) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(DclS->getSingleDecl())) {
+ if (isNSAutoreleasePool(VD->getType())) {
+ PoolVarInfo &info = PoolVars[VD];
+ info.Dcl = DclS;
+ collectRefs(VD, S, info.Refs);
+ // Does this statement follow the pattern:
+ // NSAutoreleasePool * pool = [NSAutoreleasePool new];
+ if (isPoolCreation(VD->getInit())) {
+ Scopes.push_back(PoolScope());
+ Scopes.back().PoolVar = VD;
+ Scopes.back().CompoundParent = S;
+ Scopes.back().Begin = I;
+ }
+ }
+ }
+ }
+ } else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(child)) {
+ if (DeclRefExpr *dref = dyn_cast<DeclRefExpr>(bop->getLHS())) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(dref->getDecl())) {
+ // Does this statement follow the pattern:
+ // pool = [NSAutoreleasePool new];
+ if (isNSAutoreleasePool(VD->getType()) &&
+ isPoolCreation(bop->getRHS())) {
+ Scopes.push_back(PoolScope());
+ Scopes.back().PoolVar = VD;
+ Scopes.back().CompoundParent = S;
+ Scopes.back().Begin = I;
+ }
+ }
+ }
+ }
+
+ if (Scopes.empty())
+ continue;
+
+ if (isPoolDrain(Scopes.back().PoolVar, child)) {
+ PoolScope &scope = Scopes.back();
+ scope.End = I;
+ handlePoolScope(scope, S);
+ Scopes.pop_back();
+ }
+ }
+ return true;
+ }
+
+private:
+ void clearUnavailableDiags(Stmt *S) {
+ if (S)
+ Pass.TA.clearDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ S->getSourceRange());
+ }
+
+ struct PoolScope {
+ VarDecl *PoolVar;
+ CompoundStmt *CompoundParent;
+ Stmt::child_iterator Begin;
+ Stmt::child_iterator End;
+ bool IsFollowedBySimpleReturnStmt;
+ SmallVector<ObjCMessageExpr *, 4> Releases;
+
+ PoolScope() : PoolVar(0), CompoundParent(0), Begin(), End(),
+ IsFollowedBySimpleReturnStmt(false) { }
+
+ SourceRange getIndentedRange() const {
+ Stmt::child_iterator rangeS = Begin;
+ ++rangeS;
+ if (rangeS == End)
+ return SourceRange();
+ Stmt::child_iterator rangeE = Begin;
+ for (Stmt::child_iterator I = rangeS; I != End; ++I)
+ ++rangeE;
+ return SourceRange((*rangeS)->getLocStart(), (*rangeE)->getLocEnd());
+ }
+ };
+
+ class NameReferenceChecker : public RecursiveASTVisitor<NameReferenceChecker>{
+ ASTContext &Ctx;
+ SourceRange ScopeRange;
+ SourceLocation &referenceLoc, &declarationLoc;
+
+ public:
+ NameReferenceChecker(ASTContext &ctx, PoolScope &scope,
+ SourceLocation &referenceLoc,
+ SourceLocation &declarationLoc)
+ : Ctx(ctx), referenceLoc(referenceLoc),
+ declarationLoc(declarationLoc) {
+ ScopeRange = SourceRange((*scope.Begin)->getLocStart(),
+ (*scope.End)->getLocStart());
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ return checkRef(E->getLocation(), E->getDecl()->getLocation());
+ }
+
+ bool VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ return checkRef(TL.getBeginLoc(), TL.getTypedefNameDecl()->getLocation());
+ }
+
+ bool VisitTagTypeLoc(TagTypeLoc TL) {
+ return checkRef(TL.getBeginLoc(), TL.getDecl()->getLocation());
+ }
+
+ private:
+ bool checkRef(SourceLocation refLoc, SourceLocation declLoc) {
+ if (isInScope(declLoc)) {
+ referenceLoc = refLoc;
+ declarationLoc = declLoc;
+ return false;
+ }
+ return true;
+ }
+
+ bool isInScope(SourceLocation loc) {
+ if (loc.isInvalid())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isBeforeInTranslationUnit(loc, ScopeRange.getBegin()))
+ return false;
+ return SM.isBeforeInTranslationUnit(loc, ScopeRange.getEnd());
+ }
+ };
+
+ void handlePoolScope(PoolScope &scope, CompoundStmt *compoundS) {
+ // Check that all names declared inside the scope are not used
+ // outside the scope.
+ {
+ bool nameUsedOutsideScope = false;
+ SourceLocation referenceLoc, declarationLoc;
+ Stmt::child_iterator SI = scope.End, SE = compoundS->body_end();
+ ++SI;
+ // Check if the autoreleasepool scope is followed by a simple return
+ // statement, in which case we will include the return in the scope.
+ if (SI != SE)
+ if (ReturnStmt *retS = dyn_cast<ReturnStmt>(*SI))
+ if ((retS->getRetValue() == 0 ||
+ isa<DeclRefExpr>(retS->getRetValue()->IgnoreParenCasts())) &&
+ findLocationAfterSemi(retS->getLocEnd(), Pass.Ctx).isValid()) {
+ scope.IsFollowedBySimpleReturnStmt = true;
+ ++SI; // the return will be included in scope, don't check it.
+ }
+
+ for (; SI != SE; ++SI) {
+ nameUsedOutsideScope = !NameReferenceChecker(Pass.Ctx, scope,
+ referenceLoc,
+ declarationLoc).TraverseStmt(*SI);
+ if (nameUsedOutsideScope)
+ break;
+ }
+
+ // If not all references were cleared it means some variables/typenames/etc
+ // declared inside the pool scope are used outside of it.
+ // We won't try to rewrite the pool.
+ if (nameUsedOutsideScope) {
+ Pass.TA.reportError("a name is referenced outside the "
+ "NSAutoreleasePool scope that it was declared in", referenceLoc);
+ Pass.TA.reportNote("name declared here", declarationLoc);
+ Pass.TA.reportNote("intended @autoreleasepool scope begins here",
+ (*scope.Begin)->getLocStart());
+ Pass.TA.reportNote("intended @autoreleasepool scope ends here",
+ (*scope.End)->getLocStart());
+ return;
+ }
+ }
+
+ // Collect all releases of the pool; they will be removed.
+ {
+ ReleaseCollector releaseColl(scope.PoolVar, scope.Releases);
+ Stmt::child_iterator I = scope.Begin;
+ ++I;
+ for (; I != scope.End; ++I)
+ releaseColl.TraverseStmt(*I);
+ }
+
+ PoolVars[scope.PoolVar].Scopes.push_back(scope);
+ }
+
+ bool isPoolCreation(Expr *E) {
+ if (!E) return false;
+ E = getEssential(E);
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E);
+ if (!ME) return false;
+ if (ME->getMethodFamily() == OMF_new &&
+ ME->getReceiverKind() == ObjCMessageExpr::Class &&
+ isNSAutoreleasePool(ME->getReceiverInterface()))
+ return true;
+ if (ME->getReceiverKind() == ObjCMessageExpr::Instance &&
+ ME->getMethodFamily() == OMF_init) {
+ Expr *rec = getEssential(ME->getInstanceReceiver());
+ if (ObjCMessageExpr *recME = dyn_cast_or_null<ObjCMessageExpr>(rec)) {
+ if (recME->getMethodFamily() == OMF_alloc &&
+ recME->getReceiverKind() == ObjCMessageExpr::Class &&
+ isNSAutoreleasePool(recME->getReceiverInterface()))
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool isPoolDrain(VarDecl *poolVar, Stmt *S) {
+ if (!S) return false;
+ S = getEssential(S);
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S);
+ if (!ME) return false;
+ if (ME->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Expr *rec = getEssential(ME->getInstanceReceiver());
+ if (DeclRefExpr *dref = dyn_cast<DeclRefExpr>(rec))
+ if (dref->getDecl() == poolVar)
+ return ME->getMethodFamily() == OMF_release ||
+ ME->getSelector() == DrainSel;
+ }
+
+ return false;
+ }
+
+ bool isNSAutoreleasePool(ObjCInterfaceDecl *IDecl) {
+ return IDecl && IDecl->getIdentifier() == PoolII;
+ }
+
+ bool isNSAutoreleasePool(QualType Ty) {
+ QualType pointee = Ty->getPointeeType();
+ if (pointee.isNull())
+ return false;
+ if (const ObjCInterfaceType *interT = pointee->getAs<ObjCInterfaceType>())
+ return isNSAutoreleasePool(interT->getDecl());
+ return false;
+ }
+
+ static Expr *getEssential(Expr *E) {
+ return cast<Expr>(getEssential((Stmt*)E));
+ }
+ static Stmt *getEssential(Stmt *S) {
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S))
+ S = EWC->getSubExpr();
+ if (Expr *E = dyn_cast<Expr>(S))
+ S = E->IgnoreParenCasts();
+ return S;
+ }
+
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ IdentifierInfo *PoolII;
+ Selector DrainSel;
+
+ struct PoolVarInfo {
+ DeclStmt *Dcl;
+ ExprSet Refs;
+ SmallVector<PoolScope, 2> Scopes;
+
+ PoolVarInfo() : Dcl(0) { }
+ };
+
+ std::map<VarDecl *, PoolVarInfo> PoolVars;
+};
+
+} // anonymous namespace
+
+void trans::rewriteAutoreleasePool(MigrationPass &pass) {
+ BodyTransform<AutoreleasePoolRewriter> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
new file mode 100644
index 0000000..3be8132
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransBlockObjCVariable.cpp
@@ -0,0 +1,150 @@
+//===--- TransBlockObjCVariable.cpp - Tranformations to ARC mode ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteBlockObjCVariable:
+//
+// Adding __block to an obj-c variable could be either because the the variable
+// is used for output storage or the user wanted to break a retain cycle.
+// This transformation checks whether a reference of the variable for the block
+// is actually needed (it is assigned to or its address is taken) or not.
+// If the reference is not needed it will assume __block was added to break a
+// cycle so it will remove '__block' and add __weak/__unsafe_unretained.
+// e.g
+//
+// __block Foo *x;
+// bar(^ { [x cake]; });
+// ---->
+// __weak Foo *x;
+// bar(^ { [x cake]; });
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class RootBlockObjCVarRewriter :
+ public RecursiveASTVisitor<RootBlockObjCVarRewriter> {
+ MigrationPass &Pass;
+ llvm::DenseSet<VarDecl *> &VarsToChange;
+
+ class BlockVarChecker : public RecursiveASTVisitor<BlockVarChecker> {
+ VarDecl *Var;
+
+ typedef RecursiveASTVisitor<BlockVarChecker> base;
+ public:
+ BlockVarChecker(VarDecl *var) : Var(var) { }
+
+ bool TraverseImplicitCastExpr(ImplicitCastExpr *castE) {
+ if (DeclRefExpr *
+ ref = dyn_cast<DeclRefExpr>(castE->getSubExpr())) {
+ if (ref->getDecl() == Var) {
+ if (castE->getCastKind() == CK_LValueToRValue)
+ return true; // Using the value of the variable.
+ if (castE->getCastKind() == CK_NoOp && castE->isLValue() &&
+ Var->getASTContext().getLangOpts().CPlusPlus)
+ return true; // Binding to const C++ reference.
+ }
+ }
+
+ return base::TraverseImplicitCastExpr(castE);
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E->getDecl() == Var)
+ return false; // The reference of the variable, and not just its value,
+ // is needed.
+ return true;
+ }
+ };
+
+public:
+ RootBlockObjCVarRewriter(MigrationPass &pass,
+ llvm::DenseSet<VarDecl *> &VarsToChange)
+ : Pass(pass), VarsToChange(VarsToChange) { }
+
+ bool VisitBlockDecl(BlockDecl *block) {
+ SmallVector<VarDecl *, 4> BlockVars;
+
+ for (BlockDecl::capture_iterator
+ I = block->capture_begin(), E = block->capture_end(); I != E; ++I) {
+ VarDecl *var = I->getVariable();
+ if (I->isByRef() &&
+ var->getType()->isObjCObjectPointerType() &&
+ isImplicitStrong(var->getType())) {
+ BlockVars.push_back(var);
+ }
+ }
+
+ for (unsigned i = 0, e = BlockVars.size(); i != e; ++i) {
+ VarDecl *var = BlockVars[i];
+
+ BlockVarChecker checker(var);
+ bool onlyValueOfVarIsNeeded = checker.TraverseStmt(block->getBody());
+ if (onlyValueOfVarIsNeeded)
+ VarsToChange.insert(var);
+ else
+ VarsToChange.erase(var);
+ }
+
+ return true;
+ }
+
+private:
+ bool isImplicitStrong(QualType ty) {
+ if (isa<AttributedType>(ty.getTypePtr()))
+ return false;
+ return ty.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong;
+ }
+};
+
+class BlockObjCVarRewriter : public RecursiveASTVisitor<BlockObjCVarRewriter> {
+ MigrationPass &Pass;
+ llvm::DenseSet<VarDecl *> &VarsToChange;
+
+public:
+ BlockObjCVarRewriter(MigrationPass &pass,
+ llvm::DenseSet<VarDecl *> &VarsToChange)
+ : Pass(pass), VarsToChange(VarsToChange) { }
+
+ bool TraverseBlockDecl(BlockDecl *block) {
+ RootBlockObjCVarRewriter(Pass, VarsToChange).TraverseDecl(block);
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void BlockObjCVariableTraverser::traverseBody(BodyContext &BodyCtx) {
+ MigrationPass &Pass = BodyCtx.getMigrationContext().Pass;
+ llvm::DenseSet<VarDecl *> VarsToChange;
+
+ BlockObjCVarRewriter trans(Pass, VarsToChange);
+ trans.TraverseStmt(BodyCtx.getTopStmt());
+
+ for (llvm::DenseSet<VarDecl *>::iterator
+ I = VarsToChange.begin(), E = VarsToChange.end(); I != E; ++I) {
+ VarDecl *var = *I;
+ BlocksAttr *attr = var->getAttr<BlocksAttr>();
+ if(!attr)
+ continue;
+ bool useWeak = canApplyWeak(Pass.Ctx, var->getType());
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ Transaction Trans(Pass.TA);
+ Pass.TA.replaceText(SM.getExpansionLoc(attr->getLocation()),
+ "__block",
+ useWeak ? "__weak" : "__unsafe_unretained");
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
new file mode 100644
index 0000000..0fb7141
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransEmptyStatementsAndDealloc.cpp
@@ -0,0 +1,258 @@
+//===--- TransEmptyStatements.cpp - Tranformations to ARC mode ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeEmptyStatementsAndDealloc:
+//
+// Removes empty statements that are leftovers from previous transformations.
+// e.g for
+//
+// [x retain];
+//
+// removeRetainReleaseDealloc will leave an empty ";" that removeEmptyStatements
+// will remove.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+static bool isEmptyARCMTMacroStatement(NullStmt *S,
+ std::vector<SourceLocation> &MacroLocs,
+ ASTContext &Ctx) {
+ if (!S->hasLeadingEmptyMacro())
+ return false;
+
+ SourceLocation SemiLoc = S->getSemiLoc();
+ if (SemiLoc.isInvalid() || SemiLoc.isMacroID())
+ return false;
+
+ if (MacroLocs.empty())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ std::vector<SourceLocation>::iterator
+ I = std::upper_bound(MacroLocs.begin(), MacroLocs.end(), SemiLoc,
+ SourceManager::LocBeforeThanCompare(SM));
+ --I;
+ SourceLocation
+ AfterMacroLoc = I->getLocWithOffset(getARCMTMacroName().size());
+ assert(AfterMacroLoc.isFileID());
+
+ if (AfterMacroLoc == SemiLoc)
+ return true;
+
+ int RelOffs = 0;
+ if (!SM.isInSameSLocAddrSpace(AfterMacroLoc, SemiLoc, &RelOffs))
+ return false;
+ if (RelOffs < 0)
+ return false;
+
+ // We make the reasonable assumption that a semicolon after 100 characters
+ // means that it is not the next token after our macro. If this assumption
+ // fails it is not critical, we will just fail to clear out, e.g., an empty
+ // 'if'.
+ if (RelOffs - getARCMTMacroName().size() > 100)
+ return false;
+
+ SourceLocation AfterMacroSemiLoc = findSemiAfterLocation(AfterMacroLoc, Ctx);
+ return AfterMacroSemiLoc == SemiLoc;
+}
+
+namespace {
+
+/// \brief Returns true if the statement became empty due to previous
+/// transformations.
+class EmptyChecker : public StmtVisitor<EmptyChecker, bool> {
+ ASTContext &Ctx;
+ std::vector<SourceLocation> &MacroLocs;
+
+public:
+ EmptyChecker(ASTContext &ctx, std::vector<SourceLocation> &macroLocs)
+ : Ctx(ctx), MacroLocs(macroLocs) { }
+
+ bool VisitNullStmt(NullStmt *S) {
+ return isEmptyARCMTMacroStatement(S, MacroLocs, Ctx);
+ }
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ if (S->body_empty())
+ return false; // was already empty, not because of transformations.
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I)
+ if (!Visit(*I))
+ return false;
+ return true;
+ }
+ bool VisitIfStmt(IfStmt *S) {
+ if (S->getConditionVariable())
+ return false;
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getThen() || !Visit(S->getThen()))
+ return false;
+ if (S->getElse() && !Visit(S->getElse()))
+ return false;
+ return true;
+ }
+ bool VisitWhileStmt(WhileStmt *S) {
+ if (S->getConditionVariable())
+ return false;
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitDoStmt(DoStmt *S) {
+ Expr *condE = S->getCond();
+ if (!condE)
+ return false;
+ if (hasSideEffects(condE, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ Expr *Exp = S->getCollection();
+ if (!Exp)
+ return false;
+ if (hasSideEffects(Exp, Ctx))
+ return false;
+ if (!S->getBody())
+ return false;
+ return Visit(S->getBody());
+ }
+ bool VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ if (!S->getSubStmt())
+ return false;
+ return Visit(S->getSubStmt());
+ }
+};
+
+class EmptyStatementsRemover :
+ public RecursiveASTVisitor<EmptyStatementsRemover> {
+ MigrationPass &Pass;
+
+public:
+ EmptyStatementsRemover(MigrationPass &pass) : Pass(pass) { }
+
+ bool TraverseStmtExpr(StmtExpr *E) {
+ CompoundStmt *S = E->getSubStmt();
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ if (I != E - 1)
+ check(*I);
+ TraverseStmt(*I);
+ }
+ return true;
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I)
+ check(*I);
+ return true;
+ }
+
+ ASTContext &getContext() { return Pass.Ctx; }
+
+private:
+ void check(Stmt *S) {
+ if (!S) return;
+ if (EmptyChecker(Pass.Ctx, Pass.ARCMTMacroLocs).Visit(S)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(S);
+ }
+ }
+};
+
+} // anonymous namespace
+
+static bool isBodyEmpty(CompoundStmt *body, ASTContext &Ctx,
+ std::vector<SourceLocation> &MacroLocs) {
+ for (CompoundStmt::body_iterator
+ I = body->body_begin(), E = body->body_end(); I != E; ++I)
+ if (!EmptyChecker(Ctx, MacroLocs).Visit(*I))
+ return false;
+
+ return true;
+}
+
+static void cleanupDeallocOrFinalize(MigrationPass &pass) {
+ ASTContext &Ctx = pass.Ctx;
+ TransformActions &TA = pass.TA;
+ DeclContext *DC = Ctx.getTranslationUnitDecl();
+ Selector FinalizeSel =
+ Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize"));
+
+ typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl>
+ impl_iterator;
+ for (impl_iterator I = impl_iterator(DC->decls_begin()),
+ E = impl_iterator(DC->decls_end()); I != E; ++I) {
+ ObjCMethodDecl *DeallocM = 0;
+ ObjCMethodDecl *FinalizeM = 0;
+ for (ObjCImplementationDecl::instmeth_iterator
+ MI = (*I)->instmeth_begin(),
+ ME = (*I)->instmeth_end(); MI != ME; ++MI) {
+ ObjCMethodDecl *MD = *MI;
+ if (!MD->hasBody())
+ continue;
+
+ if (MD->getMethodFamily() == OMF_dealloc) {
+ DeallocM = MD;
+ } else if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) {
+ FinalizeM = MD;
+ }
+ }
+
+ if (DeallocM) {
+ if (isBodyEmpty(DeallocM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
+ Transaction Trans(TA);
+ TA.remove(DeallocM->getSourceRange());
+ }
+
+ if (FinalizeM) {
+ Transaction Trans(TA);
+ TA.remove(FinalizeM->getSourceRange());
+ }
+
+ } else if (FinalizeM) {
+ if (isBodyEmpty(FinalizeM->getCompoundBody(), Ctx, pass.ARCMTMacroLocs)) {
+ Transaction Trans(TA);
+ TA.remove(FinalizeM->getSourceRange());
+ } else {
+ Transaction Trans(TA);
+ TA.replaceText(FinalizeM->getSelectorStartLoc(), "finalize", "dealloc");
+ }
+ }
+ }
+}
+
+void trans::removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass) {
+ EmptyStatementsRemover(pass).TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+
+ cleanupDeallocOrFinalize(pass);
+
+ for (unsigned i = 0, e = pass.ARCMTMacroLocs.size(); i != e; ++i) {
+ Transaction Trans(pass.TA);
+ pass.TA.remove(pass.ARCMTMacroLocs[i]);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
new file mode 100644
index 0000000..9f6066e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCAttrs.cpp
@@ -0,0 +1,358 @@
+//===--- TransGCAttrs.cpp - Transformations to ARC mode --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/TinyPtrVector.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+/// \brief Collects all the places where GC attributes __strong/__weak occur.
+class GCAttrsCollector : public RecursiveASTVisitor<GCAttrsCollector> {
+ MigrationContext &MigrateCtx;
+ bool FullyMigratable;
+ std::vector<ObjCPropertyDecl *> &AllProps;
+
+ typedef RecursiveASTVisitor<GCAttrsCollector> base;
+public:
+ GCAttrsCollector(MigrationContext &ctx,
+ std::vector<ObjCPropertyDecl *> &AllProps)
+ : MigrateCtx(ctx), FullyMigratable(false),
+ AllProps(AllProps) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ handleAttr(TL);
+ return true;
+ }
+
+ bool TraverseDecl(Decl *D) {
+ if (!D || D->isImplicit())
+ return true;
+
+ SaveAndRestore<bool> Save(FullyMigratable, isMigratable(D));
+
+ if (ObjCPropertyDecl *PropD = dyn_cast<ObjCPropertyDecl>(D)) {
+ lookForAttribute(PropD, PropD->getTypeSourceInfo());
+ AllProps.push_back(PropD);
+ } else if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ lookForAttribute(DD, DD->getTypeSourceInfo());
+ }
+ return base::TraverseDecl(D);
+ }
+
+ void lookForAttribute(Decl *D, TypeSourceInfo *TInfo) {
+ if (!TInfo)
+ return;
+ TypeLoc TL = TInfo->getTypeLoc();
+ while (TL) {
+ if (const QualifiedTypeLoc *QL = dyn_cast<QualifiedTypeLoc>(&TL)) {
+ TL = QL->getUnqualifiedLoc();
+ } else if (const AttributedTypeLoc *
+ Attr = dyn_cast<AttributedTypeLoc>(&TL)) {
+ if (handleAttr(*Attr, D))
+ break;
+ TL = Attr->getModifiedLoc();
+ } else if (const ArrayTypeLoc *Arr = dyn_cast<ArrayTypeLoc>(&TL)) {
+ TL = Arr->getElementLoc();
+ } else if (const PointerTypeLoc *PT = dyn_cast<PointerTypeLoc>(&TL)) {
+ TL = PT->getPointeeLoc();
+ } else if (const ReferenceTypeLoc *RT = dyn_cast<ReferenceTypeLoc>(&TL))
+ TL = RT->getPointeeLoc();
+ else
+ break;
+ }
+ }
+
+ bool handleAttr(AttributedTypeLoc TL, Decl *D = 0) {
+ if (TL.getAttrKind() != AttributedType::attr_objc_ownership)
+ return false;
+
+ SourceLocation Loc = TL.getAttrNameLoc();
+ unsigned RawLoc = Loc.getRawEncoding();
+ if (MigrateCtx.AttrSet.count(RawLoc))
+ return true;
+
+ ASTContext &Ctx = MigrateCtx.Pass.Ctx;
+ SourceManager &SM = Ctx.getSourceManager();
+ if (Loc.isMacroID())
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+ SmallString<32> Buf;
+ bool Invalid = false;
+ StringRef Spell = Lexer::getSpelling(
+ SM.getSpellingLoc(TL.getAttrEnumOperandLoc()),
+ Buf, SM, Ctx.getLangOpts(), &Invalid);
+ if (Invalid)
+ return false;
+ MigrationContext::GCAttrOccurrence::AttrKind Kind;
+ if (Spell == "strong")
+ Kind = MigrationContext::GCAttrOccurrence::Strong;
+ else if (Spell == "weak")
+ Kind = MigrationContext::GCAttrOccurrence::Weak;
+ else
+ return false;
+
+ MigrateCtx.AttrSet.insert(RawLoc);
+ MigrateCtx.GCAttrs.push_back(MigrationContext::GCAttrOccurrence());
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs.back();
+
+ Attr.Kind = Kind;
+ Attr.Loc = Loc;
+ Attr.ModifiedType = TL.getModifiedLoc().getType();
+ Attr.Dcl = D;
+ Attr.FullyMigratable = FullyMigratable;
+ return true;
+ }
+
+ bool isMigratable(Decl *D) {
+ if (isa<TranslationUnitDecl>(D))
+ return false;
+
+ if (isInMainFile(D))
+ return true;
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->hasBody();
+
+ if (ObjCContainerDecl *ContD = dyn_cast<ObjCContainerDecl>(D))
+ return hasObjCImpl(ContD);
+
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ for (CXXRecordDecl::method_iterator
+ MI = RD->method_begin(), ME = RD->method_end(); MI != ME; ++MI) {
+ if ((*MI)->isOutOfLine())
+ return true;
+ }
+ return false;
+ }
+
+ return isMigratable(cast<Decl>(D->getDeclContext()));
+ }
+
+ static bool hasObjCImpl(Decl *D) {
+ if (!D)
+ return false;
+ if (ObjCContainerDecl *ContD = dyn_cast<ObjCContainerDecl>(D)) {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ContD))
+ return ID->getImplementation() != 0;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ContD))
+ return CD->getImplementation() != 0;
+ if (isa<ObjCImplDecl>(ContD))
+ return true;
+ return false;
+ }
+ return false;
+ }
+
+ bool isInMainFile(Decl *D) {
+ if (!D)
+ return false;
+
+ for (Decl::redecl_iterator
+ I = D->redecls_begin(), E = D->redecls_end(); I != E; ++I)
+ if (!isInMainFile((*I)->getLocation()))
+ return false;
+
+ return true;
+ }
+
+ bool isInMainFile(SourceLocation Loc) {
+ if (Loc.isInvalid())
+ return false;
+
+ SourceManager &SM = MigrateCtx.Pass.Ctx.getSourceManager();
+ return SM.isInFileID(SM.getExpansionLoc(Loc), SM.getMainFileID());
+ }
+};
+
+} // anonymous namespace
+
+static void errorForGCAttrsOnNonObjC(MigrationContext &MigrateCtx) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ for (unsigned i = 0, e = MigrateCtx.GCAttrs.size(); i != e; ++i) {
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs[i];
+ if (Attr.FullyMigratable && Attr.Dcl) {
+ if (Attr.ModifiedType.isNull())
+ continue;
+ if (!Attr.ModifiedType->isObjCRetainableType()) {
+ TA.reportError("GC managed memory will become unmanaged in ARC",
+ Attr.Loc);
+ }
+ }
+ }
+}
+
+static void checkWeakGCAttrs(MigrationContext &MigrateCtx) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ for (unsigned i = 0, e = MigrateCtx.GCAttrs.size(); i != e; ++i) {
+ MigrationContext::GCAttrOccurrence &Attr = MigrateCtx.GCAttrs[i];
+ if (Attr.Kind == MigrationContext::GCAttrOccurrence::Weak) {
+ if (Attr.ModifiedType.isNull() ||
+ !Attr.ModifiedType->isObjCRetainableType())
+ continue;
+ if (!canApplyWeak(MigrateCtx.Pass.Ctx, Attr.ModifiedType,
+ /*AllowOnUnknownClass=*/true)) {
+ Transaction Trans(TA);
+ if (!MigrateCtx.RemovedAttrSet.count(Attr.Loc.getRawEncoding()))
+ TA.replaceText(Attr.Loc, "__weak", "__unsafe_unretained");
+ TA.clearDiagnostic(diag::err_arc_weak_no_runtime,
+ diag::err_arc_unsupported_weak_class,
+ Attr.Loc);
+ }
+ }
+ }
+}
+
+typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy;
+
+static void checkAllAtProps(MigrationContext &MigrateCtx,
+ SourceLocation AtLoc,
+ IndivPropsTy &IndProps) {
+ if (IndProps.empty())
+ return;
+
+ for (IndivPropsTy::iterator
+ PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) {
+ QualType T = (*PI)->getType();
+ if (T.isNull() || !T->isObjCRetainableType())
+ return;
+ }
+
+ SmallVector<std::pair<AttributedTypeLoc, ObjCPropertyDecl *>, 4> ATLs;
+ bool hasWeak = false, hasStrong = false;
+ ObjCPropertyDecl::PropertyAttributeKind
+ Attrs = ObjCPropertyDecl::OBJC_PR_noattr;
+ for (IndivPropsTy::iterator
+ PI = IndProps.begin(), PE = IndProps.end(); PI != PE; ++PI) {
+ ObjCPropertyDecl *PD = *PI;
+ Attrs = PD->getPropertyAttributesAsWritten();
+ TypeSourceInfo *TInfo = PD->getTypeSourceInfo();
+ if (!TInfo)
+ return;
+ TypeLoc TL = TInfo->getTypeLoc();
+ if (AttributedTypeLoc *ATL = dyn_cast<AttributedTypeLoc>(&TL)) {
+ ATLs.push_back(std::make_pair(*ATL, PD));
+ if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ hasWeak = true;
+ } else if (TInfo->getType().getObjCLifetime() == Qualifiers::OCL_Strong)
+ hasStrong = true;
+ else
+ return;
+ }
+ }
+ if (ATLs.empty())
+ return;
+ if (hasWeak && hasStrong)
+ return;
+
+ TransformActions &TA = MigrateCtx.Pass.TA;
+ Transaction Trans(TA);
+
+ if (GCAttrsCollector::hasObjCImpl(
+ cast<Decl>(IndProps.front()->getDeclContext()))) {
+ if (hasWeak)
+ MigrateCtx.AtPropsWeak.insert(AtLoc.getRawEncoding());
+
+ } else {
+ StringRef toAttr = "strong";
+ if (hasWeak) {
+ if (canApplyWeak(MigrateCtx.Pass.Ctx, IndProps.front()->getType(),
+ /*AllowOnUnkwownClass=*/true))
+ toAttr = "weak";
+ else
+ toAttr = "unsafe_unretained";
+ }
+ if (Attrs & ObjCPropertyDecl::OBJC_PR_assign)
+ MigrateCtx.rewritePropertyAttribute("assign", toAttr, AtLoc);
+ else
+ MigrateCtx.addPropertyAttribute(toAttr, AtLoc);
+ }
+
+ for (unsigned i = 0, e = ATLs.size(); i != e; ++i) {
+ SourceLocation Loc = ATLs[i].first.getAttrNameLoc();
+ if (Loc.isMacroID())
+ Loc = MigrateCtx.Pass.Ctx.getSourceManager()
+ .getImmediateExpansionRange(Loc).first;
+ TA.remove(Loc);
+ TA.clearDiagnostic(diag::err_objc_property_attr_mutually_exclusive, AtLoc);
+ TA.clearDiagnostic(diag::err_arc_inconsistent_property_ownership,
+ ATLs[i].second->getLocation());
+ MigrateCtx.RemovedAttrSet.insert(Loc.getRawEncoding());
+ }
+}
+
+static void checkAllProps(MigrationContext &MigrateCtx,
+ std::vector<ObjCPropertyDecl *> &AllProps) {
+ typedef llvm::TinyPtrVector<ObjCPropertyDecl *> IndivPropsTy;
+ llvm::DenseMap<unsigned, IndivPropsTy> AtProps;
+
+ for (unsigned i = 0, e = AllProps.size(); i != e; ++i) {
+ ObjCPropertyDecl *PD = AllProps[i];
+ if (PD->getPropertyAttributesAsWritten() &
+ (ObjCPropertyDecl::OBJC_PR_assign |
+ ObjCPropertyDecl::OBJC_PR_readonly)) {
+ SourceLocation AtLoc = PD->getAtLoc();
+ if (AtLoc.isInvalid())
+ continue;
+ unsigned RawAt = AtLoc.getRawEncoding();
+ AtProps[RawAt].push_back(PD);
+ }
+ }
+
+ for (llvm::DenseMap<unsigned, IndivPropsTy>::iterator
+ I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
+ SourceLocation AtLoc = SourceLocation::getFromRawEncoding(I->first);
+ IndivPropsTy &IndProps = I->second;
+ checkAllAtProps(MigrateCtx, AtLoc, IndProps);
+ }
+}
+
+void GCAttrsTraverser::traverseTU(MigrationContext &MigrateCtx) {
+ std::vector<ObjCPropertyDecl *> AllProps;
+ GCAttrsCollector(MigrateCtx, AllProps).TraverseDecl(
+ MigrateCtx.Pass.Ctx.getTranslationUnitDecl());
+
+ errorForGCAttrsOnNonObjC(MigrateCtx);
+ checkAllProps(MigrateCtx, AllProps);
+ checkWeakGCAttrs(MigrateCtx);
+}
+
+void MigrationContext::dumpGCAttrs() {
+ llvm::errs() << "\n################\n";
+ for (unsigned i = 0, e = GCAttrs.size(); i != e; ++i) {
+ GCAttrOccurrence &Attr = GCAttrs[i];
+ llvm::errs() << "KIND: "
+ << (Attr.Kind == GCAttrOccurrence::Strong ? "strong" : "weak");
+ llvm::errs() << "\nLOC: ";
+ Attr.Loc.dump(Pass.Ctx.getSourceManager());
+ llvm::errs() << "\nTYPE: ";
+ Attr.ModifiedType.dump();
+ if (Attr.Dcl) {
+ llvm::errs() << "DECL:\n";
+ Attr.Dcl->dump();
+ } else {
+ llvm::errs() << "DECL: NONE";
+ }
+ llvm::errs() << "\nMIGRATABLE: " << Attr.FullyMigratable;
+ llvm::errs() << "\n----------------\n";
+ }
+ llvm::errs() << "\n################\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
new file mode 100644
index 0000000..1be9020
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransGCCalls.cpp
@@ -0,0 +1,84 @@
+//===--- TransGCCalls.cpp - Tranformations to ARC mode --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class GCCollectableCallsChecker :
+ public RecursiveASTVisitor<GCCollectableCallsChecker> {
+ MigrationContext &MigrateCtx;
+ ParentMap &PMap;
+ IdentifierInfo *NSMakeCollectableII;
+ IdentifierInfo *CFMakeCollectableII;
+
+public:
+ GCCollectableCallsChecker(MigrationContext &ctx, ParentMap &map)
+ : MigrateCtx(ctx), PMap(map) {
+ IdentifierTable &Ids = MigrateCtx.Pass.Ctx.Idents;
+ NSMakeCollectableII = &Ids.get("NSMakeCollectable");
+ CFMakeCollectableII = &Ids.get("CFMakeCollectable");
+ }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool VisitCallExpr(CallExpr *E) {
+ TransformActions &TA = MigrateCtx.Pass.TA;
+
+ if (MigrateCtx.isGCOwnedNonObjC(E->getType())) {
+ if (MigrateCtx.Pass.noNSAllocReallocError())
+ TA.reportWarning("call returns pointer to GC managed memory; "
+ "it will become unmanaged in ARC",
+ E->getLocStart(), E->getSourceRange());
+ else
+ TA.reportError("call returns pointer to GC managed memory; "
+ "it will become unmanaged in ARC",
+ E->getLocStart(), E->getSourceRange());
+ return true;
+ }
+
+ Expr *CEE = E->getCallee()->IgnoreParenImpCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) {
+ if (FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(DRE->getDecl())) {
+ if (!FD->getDeclContext()->getRedeclContext()->isFileContext())
+ return true;
+
+ if (FD->getIdentifier() == NSMakeCollectableII) {
+ Transaction Trans(TA);
+ TA.clearDiagnostic(diag::err_unavailable,
+ diag::err_unavailable_message,
+ diag::err_ovl_deleted_call, // ObjC++
+ DRE->getSourceRange());
+ TA.replace(DRE->getSourceRange(), "CFBridgingRelease");
+
+ } else if (FD->getIdentifier() == CFMakeCollectableII) {
+ TA.reportError("CFMakeCollectable will leak the object that it "
+ "receives in ARC", DRE->getLocation(),
+ DRE->getSourceRange());
+ }
+ }
+ }
+
+ return true;
+ }
+};
+
+} // anonymous namespace
+
+void GCCollectableCallsTraverser::traverseBody(BodyContext &BodyCtx) {
+ GCCollectableCallsChecker(BodyCtx.getMigrationContext(),
+ BodyCtx.getParentMap())
+ .TraverseStmt(BodyCtx.getTopStmt());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
new file mode 100644
index 0000000..cc85fe2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransProperties.cpp
@@ -0,0 +1,411 @@
+//===--- TransProperties.cpp - Tranformations to ARC mode -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteProperties:
+//
+// - Adds strong/weak/unsafe_unretained ownership specifier to properties that
+// are missing one.
+// - Migrates properties from (retain) to (strong) and (assign) to
+// (unsafe_unretained/weak).
+// - If a property is synthesized, adds the ownership specifier in the ivar
+// backing the property.
+//
+// @interface Foo : NSObject {
+// NSObject *x;
+// }
+// @property (assign) id x;
+// @end
+// ---->
+// @interface Foo : NSObject {
+// NSObject *__weak x;
+// }
+// @property (weak) id x;
+// @end
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class PropertiesRewriter {
+ MigrationContext &MigrateCtx;
+ MigrationPass &Pass;
+ ObjCImplementationDecl *CurImplD;
+
+ enum PropActionKind {
+ PropAction_None,
+ PropAction_RetainReplacedWithStrong,
+ PropAction_AssignRemoved,
+ PropAction_AssignRewritten,
+ PropAction_MaybeAddWeakOrUnsafe
+ };
+
+ struct PropData {
+ ObjCPropertyDecl *PropD;
+ ObjCIvarDecl *IvarD;
+ ObjCPropertyImplDecl *ImplD;
+
+ PropData(ObjCPropertyDecl *propD) : PropD(propD), IvarD(0), ImplD(0) { }
+ };
+
+ typedef SmallVector<PropData, 2> PropsTy;
+ typedef std::map<unsigned, PropsTy> AtPropDeclsTy;
+ AtPropDeclsTy AtProps;
+ llvm::DenseMap<IdentifierInfo *, PropActionKind> ActionOnProp;
+
+public:
+ explicit PropertiesRewriter(MigrationContext &MigrateCtx)
+ : MigrateCtx(MigrateCtx), Pass(MigrateCtx.Pass) { }
+
+ static void collectProperties(ObjCContainerDecl *D, AtPropDeclsTy &AtProps,
+ AtPropDeclsTy *PrevAtProps = 0) {
+ for (ObjCInterfaceDecl::prop_iterator
+ propI = D->prop_begin(),
+ propE = D->prop_end(); propI != propE; ++propI) {
+ if (propI->getAtLoc().isInvalid())
+ continue;
+ unsigned RawLoc = propI->getAtLoc().getRawEncoding();
+ if (PrevAtProps)
+ if (PrevAtProps->find(RawLoc) != PrevAtProps->end())
+ continue;
+ PropsTy &props = AtProps[RawLoc];
+ props.push_back(*propI);
+ }
+ }
+
+ void doTransform(ObjCImplementationDecl *D) {
+ CurImplD = D;
+ ObjCInterfaceDecl *iface = D->getClassInterface();
+ if (!iface)
+ return;
+
+ collectProperties(iface, AtProps);
+
+ typedef DeclContext::specific_decl_iterator<ObjCPropertyImplDecl>
+ prop_impl_iterator;
+ for (prop_impl_iterator
+ I = prop_impl_iterator(D->decls_begin()),
+ E = prop_impl_iterator(D->decls_end()); I != E; ++I) {
+ ObjCPropertyImplDecl *implD = *I;
+ if (implD->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+ ObjCPropertyDecl *propD = implD->getPropertyDecl();
+ if (!propD || propD->isInvalidDecl())
+ continue;
+ ObjCIvarDecl *ivarD = implD->getPropertyIvarDecl();
+ if (!ivarD || ivarD->isInvalidDecl())
+ continue;
+ unsigned rawAtLoc = propD->getAtLoc().getRawEncoding();
+ AtPropDeclsTy::iterator findAtLoc = AtProps.find(rawAtLoc);
+ if (findAtLoc == AtProps.end())
+ continue;
+
+ PropsTy &props = findAtLoc->second;
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (I->PropD == propD) {
+ I->IvarD = ivarD;
+ I->ImplD = implD;
+ break;
+ }
+ }
+ }
+
+ for (AtPropDeclsTy::iterator
+ I = AtProps.begin(), E = AtProps.end(); I != E; ++I) {
+ SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first);
+ PropsTy &props = I->second;
+ if (!getPropertyType(props)->isObjCRetainableType())
+ continue;
+ if (hasIvarWithExplicitARCOwnership(props))
+ continue;
+
+ Transaction Trans(Pass.TA);
+ rewriteProperty(props, atLoc);
+ }
+
+ AtPropDeclsTy AtExtProps;
+ // Look through extensions.
+ for (ObjCCategoryDecl *Cat = iface->getCategoryList();
+ Cat; Cat = Cat->getNextClassCategory())
+ if (Cat->IsClassExtension())
+ collectProperties(Cat, AtExtProps, &AtProps);
+
+ for (AtPropDeclsTy::iterator
+ I = AtExtProps.begin(), E = AtExtProps.end(); I != E; ++I) {
+ SourceLocation atLoc = SourceLocation::getFromRawEncoding(I->first);
+ PropsTy &props = I->second;
+ Transaction Trans(Pass.TA);
+ doActionForExtensionProp(props, atLoc);
+ }
+ }
+
+private:
+ void doPropAction(PropActionKind kind,
+ PropsTy &props, SourceLocation atLoc,
+ bool markAction = true) {
+ if (markAction)
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ ActionOnProp[I->PropD->getIdentifier()] = kind;
+
+ switch (kind) {
+ case PropAction_None:
+ return;
+ case PropAction_RetainReplacedWithStrong: {
+ StringRef toAttr = "strong";
+ MigrateCtx.rewritePropertyAttribute("retain", toAttr, atLoc);
+ return;
+ }
+ case PropAction_AssignRemoved:
+ return removeAssignForDefaultStrong(props, atLoc);
+ case PropAction_AssignRewritten:
+ return rewriteAssign(props, atLoc);
+ case PropAction_MaybeAddWeakOrUnsafe:
+ return maybeAddWeakOrUnsafeUnretainedAttr(props, atLoc);
+ }
+ }
+
+ void doActionForExtensionProp(PropsTy &props, SourceLocation atLoc) {
+ llvm::DenseMap<IdentifierInfo *, PropActionKind>::iterator I;
+ I = ActionOnProp.find(props[0].PropD->getIdentifier());
+ if (I == ActionOnProp.end())
+ return;
+
+ doPropAction(I->second, props, atLoc, false);
+ }
+
+ void rewriteProperty(PropsTy &props, SourceLocation atLoc) {
+ ObjCPropertyDecl::PropertyAttributeKind propAttrs = getPropertyAttrs(props);
+
+ if (propAttrs & (ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_unsafe_unretained |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_weak))
+ return;
+
+ if (propAttrs & ObjCPropertyDecl::OBJC_PR_retain) {
+ // strong is the default.
+ return doPropAction(PropAction_RetainReplacedWithStrong, props, atLoc);
+ }
+
+ bool HasIvarAssignedAPlusOneObject = hasIvarAssignedAPlusOneObject(props);
+
+ if (propAttrs & ObjCPropertyDecl::OBJC_PR_assign) {
+ if (HasIvarAssignedAPlusOneObject)
+ return doPropAction(PropAction_AssignRemoved, props, atLoc);
+ return doPropAction(PropAction_AssignRewritten, props, atLoc);
+ }
+
+ if (HasIvarAssignedAPlusOneObject ||
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)))
+ return; // 'strong' by default.
+
+ return doPropAction(PropAction_MaybeAddWeakOrUnsafe, props, atLoc);
+ }
+
+ void removeAssignForDefaultStrong(PropsTy &props,
+ SourceLocation atLoc) const {
+ removeAttribute("retain", atLoc);
+ if (!removeAttribute("assign", atLoc))
+ return;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (I->ImplD)
+ Pass.TA.clearDiagnostic(diag::err_arc_assign_property_ownership,
+ I->ImplD->getLocation());
+ }
+ }
+
+ void rewriteAssign(PropsTy &props, SourceLocation atLoc) const {
+ bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props),
+ /*AllowOnUnknownClass=*/Pass.isGCMigration());
+ const char *toWhich =
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)) ? "strong" :
+ (canUseWeak ? "weak" : "unsafe_unretained");
+
+ bool rewroteAttr = rewriteAttribute("assign", toWhich, atLoc);
+ if (!rewroteAttr)
+ canUseWeak = false;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD)) {
+ if (I->IvarD &&
+ I->IvarD->getType().getObjCLifetime() != Qualifiers::OCL_Weak) {
+ const char *toWhich =
+ (Pass.isGCMigration() && !hasGCWeak(props, atLoc)) ? "__strong " :
+ (canUseWeak ? "__weak " : "__unsafe_unretained ");
+ Pass.TA.insert(I->IvarD->getLocation(), toWhich);
+ }
+ }
+ if (I->ImplD)
+ Pass.TA.clearDiagnostic(diag::err_arc_assign_property_ownership,
+ I->ImplD->getLocation());
+ }
+ }
+
+ void maybeAddWeakOrUnsafeUnretainedAttr(PropsTy &props,
+ SourceLocation atLoc) const {
+ bool canUseWeak = canApplyWeak(Pass.Ctx, getPropertyType(props),
+ /*AllowOnUnknownClass=*/Pass.isGCMigration());
+
+ bool addedAttr = addAttribute(canUseWeak ? "weak" : "unsafe_unretained",
+ atLoc);
+ if (!addedAttr)
+ canUseWeak = false;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD)) {
+ if (I->IvarD &&
+ I->IvarD->getType().getObjCLifetime() != Qualifiers::OCL_Weak)
+ Pass.TA.insert(I->IvarD->getLocation(),
+ canUseWeak ? "__weak " : "__unsafe_unretained ");
+ }
+ if (I->ImplD) {
+ Pass.TA.clearDiagnostic(diag::err_arc_assign_property_ownership,
+ I->ImplD->getLocation());
+ Pass.TA.clearDiagnostic(
+ diag::err_arc_objc_property_default_assign_on_object,
+ I->ImplD->getLocation());
+ }
+ }
+ }
+
+ bool removeAttribute(StringRef fromAttr, SourceLocation atLoc) const {
+ return MigrateCtx.removePropertyAttribute(fromAttr, atLoc);
+ }
+
+ bool rewriteAttribute(StringRef fromAttr, StringRef toAttr,
+ SourceLocation atLoc) const {
+ return MigrateCtx.rewritePropertyAttribute(fromAttr, toAttr, atLoc);
+ }
+
+ bool addAttribute(StringRef attr, SourceLocation atLoc) const {
+ return MigrateCtx.addPropertyAttribute(attr, atLoc);
+ }
+
+ class PlusOneAssign : public RecursiveASTVisitor<PlusOneAssign> {
+ ObjCIvarDecl *Ivar;
+ public:
+ PlusOneAssign(ObjCIvarDecl *D) : Ivar(D) {}
+
+ bool VisitBinAssign(BinaryOperator *E) {
+ Expr *lhs = E->getLHS()->IgnoreParenImpCasts();
+ if (ObjCIvarRefExpr *RE = dyn_cast<ObjCIvarRefExpr>(lhs)) {
+ if (RE->getDecl() != Ivar)
+ return true;
+
+ if (ObjCMessageExpr *
+ ME = dyn_cast<ObjCMessageExpr>(E->getRHS()->IgnoreParenCasts()))
+ if (ME->getMethodFamily() == OMF_retain)
+ return false;
+
+ ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(E->getRHS());
+ while (implCE && implCE->getCastKind() == CK_BitCast)
+ implCE = dyn_cast<ImplicitCastExpr>(implCE->getSubExpr());
+
+ if (implCE && implCE->getCastKind() == CK_ARCConsumeObject)
+ return false;
+ }
+
+ return true;
+ }
+ };
+
+ bool hasIvarAssignedAPlusOneObject(PropsTy &props) const {
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ PlusOneAssign oneAssign(I->IvarD);
+ bool notFound = oneAssign.TraverseDecl(CurImplD);
+ if (!notFound)
+ return true;
+ }
+
+ return false;
+ }
+
+ bool hasIvarWithExplicitARCOwnership(PropsTy &props) const {
+ if (Pass.isGCMigration())
+ return false;
+
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I) {
+ if (isUserDeclared(I->IvarD)) {
+ if (isa<AttributedType>(I->IvarD->getType()))
+ return true;
+ if (I->IvarD->getType().getLocalQualifiers().getObjCLifetime()
+ != Qualifiers::OCL_Strong)
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool hasAllIvarsBacked(PropsTy &props) const {
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ if (!isUserDeclared(I->IvarD))
+ return false;
+
+ return true;
+ }
+
+ // \brief Returns true if all declarations in the @property have GC __weak.
+ bool hasGCWeak(PropsTy &props, SourceLocation atLoc) const {
+ if (!Pass.isGCMigration())
+ return false;
+ if (props.empty())
+ return false;
+ return MigrateCtx.AtPropsWeak.count(atLoc.getRawEncoding());
+ }
+
+ bool isUserDeclared(ObjCIvarDecl *ivarD) const {
+ return ivarD && !ivarD->getSynthesize();
+ }
+
+ QualType getPropertyType(PropsTy &props) const {
+ assert(!props.empty());
+ QualType ty = props[0].PropD->getType().getUnqualifiedType();
+
+#ifndef NDEBUG
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ assert(ty == I->PropD->getType().getUnqualifiedType());
+#endif
+
+ return ty;
+ }
+
+ ObjCPropertyDecl::PropertyAttributeKind
+ getPropertyAttrs(PropsTy &props) const {
+ assert(!props.empty());
+ ObjCPropertyDecl::PropertyAttributeKind
+ attrs = props[0].PropD->getPropertyAttributesAsWritten();
+
+#ifndef NDEBUG
+ for (PropsTy::iterator I = props.begin(), E = props.end(); I != E; ++I)
+ assert(attrs == I->PropD->getPropertyAttributesAsWritten());
+#endif
+
+ return attrs;
+ }
+};
+
+} // anonymous namespace
+
+void PropertyRewriteTraverser::traverseObjCImplementation(
+ ObjCImplementationContext &ImplCtx) {
+ PropertiesRewriter(ImplCtx.getMigrationContext())
+ .doTransform(ImplCtx.getImplementationDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
new file mode 100644
index 0000000..11a6553
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransRetainReleaseDealloc.cpp
@@ -0,0 +1,303 @@
+//===--- TransRetainReleaseDealloc.cpp - Tranformations to ARC mode -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeRetainReleaseDealloc:
+//
+// Removes retain/release/autorelease/dealloc messages.
+//
+// return [[foo retain] autorelease];
+// ---->
+// return foo;
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class RetainReleaseDeallocRemover :
+ public RecursiveASTVisitor<RetainReleaseDeallocRemover> {
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ ExprSet Removables;
+ OwningPtr<ParentMap> StmtMap;
+
+ Selector DelegateSel, FinalizeSel;
+
+public:
+ RetainReleaseDeallocRemover(MigrationPass &pass)
+ : Body(0), Pass(pass) {
+ DelegateSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("delegate"));
+ FinalizeSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize"));
+ }
+
+ void transformBody(Stmt *body) {
+ Body = body;
+ collectRemovables(body, Removables);
+ StmtMap.reset(new ParentMap(body));
+ TraverseStmt(body);
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ switch (E->getMethodFamily()) {
+ default:
+ if (E->isInstanceMessage() && E->getSelector() == FinalizeSel)
+ break;
+ return true;
+ case OMF_autorelease:
+ if (isRemovable(E)) {
+ // An unused autorelease is badness. If we remove it the receiver
+ // will likely die immediately while previously it was kept alive
+ // by the autorelease pool. This is bad practice in general, leave it
+ // and emit an error to force the user to restructure his code.
+ Pass.TA.reportError("it is not safe to remove an unused 'autorelease' "
+ "message; its receiver may be destroyed immediately",
+ E->getLocStart(), E->getSourceRange());
+ return true;
+ }
+ // Pass through.
+ case OMF_retain:
+ case OMF_release:
+ if (E->getReceiverKind() == ObjCMessageExpr::Instance)
+ if (Expr *rec = E->getInstanceReceiver()) {
+ rec = rec->IgnoreParenImpCasts();
+ if (rec->getType().getObjCLifetime() == Qualifiers::OCL_ExplicitNone &&
+ (E->getMethodFamily() != OMF_retain || isRemovable(E))) {
+ std::string err = "it is not safe to remove '";
+ err += E->getSelector().getAsString() + "' message on "
+ "an __unsafe_unretained type";
+ Pass.TA.reportError(err, rec->getLocStart());
+ return true;
+ }
+
+ if (isGlobalVar(rec) &&
+ (E->getMethodFamily() != OMF_retain || isRemovable(E))) {
+ std::string err = "it is not safe to remove '";
+ err += E->getSelector().getAsString() + "' message on "
+ "a global variable";
+ Pass.TA.reportError(err, rec->getLocStart());
+ return true;
+ }
+
+ if (E->getMethodFamily() == OMF_release && isDelegateMessage(rec)) {
+ Pass.TA.reportError("it is not safe to remove 'retain' "
+ "message on the result of a 'delegate' message; "
+ "the object that was passed to 'setDelegate:' may not be "
+ "properly retained", rec->getLocStart());
+ return true;
+ }
+ }
+ case OMF_dealloc:
+ break;
+ }
+
+ switch (E->getReceiverKind()) {
+ default:
+ return true;
+ case ObjCMessageExpr::SuperInstance: {
+ Transaction Trans(Pass.TA);
+ clearDiagnostics(E->getSuperLoc());
+ if (tryRemoving(E))
+ return true;
+ Pass.TA.replace(E->getSourceRange(), "self");
+ return true;
+ }
+ case ObjCMessageExpr::Instance:
+ break;
+ }
+
+ Expr *rec = E->getInstanceReceiver();
+ if (!rec) return true;
+
+ Transaction Trans(Pass.TA);
+ clearDiagnostics(rec->getExprLoc());
+
+ ObjCMessageExpr *Msg = E;
+ Expr *RecContainer = Msg;
+ SourceRange RecRange = rec->getSourceRange();
+ checkForGCDOrXPC(Msg, RecContainer, rec, RecRange);
+
+ if (Msg->getMethodFamily() == OMF_release &&
+ isRemovable(RecContainer) && isInAtFinally(RecContainer)) {
+ // Change the -release to "receiver = nil" in a finally to avoid a leak
+ // when an exception is thrown.
+ Pass.TA.replace(RecContainer->getSourceRange(), RecRange);
+ std::string str = " = ";
+ str += getNilString(Pass.Ctx);
+ Pass.TA.insertAfterToken(RecRange.getEnd(), str);
+ return true;
+ }
+
+ if (!hasSideEffects(rec, Pass.Ctx)) {
+ if (tryRemoving(RecContainer))
+ return true;
+ }
+ Pass.TA.replace(RecContainer->getSourceRange(), RecRange);
+
+ return true;
+ }
+
+private:
+ /// \brief Check if the retain/release is due to a GCD/XPC macro that are
+ /// defined as:
+ ///
+ /// #define dispatch_retain(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); (void)[_o retain]; })
+ /// #define dispatch_release(object) ({ dispatch_object_t _o = (object); _dispatch_object_validate(_o); [_o release]; })
+ /// #define xpc_retain(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o retain]; })
+ /// #define xpc_release(object) ({ xpc_object_t _o = (object); _xpc_object_validate(_o); [_o release]; })
+ ///
+ /// and return the top container which is the StmtExpr and the macro argument
+ /// expression.
+ void checkForGCDOrXPC(ObjCMessageExpr *Msg, Expr *&RecContainer,
+ Expr *&Rec, SourceRange &RecRange) {
+ SourceLocation Loc = Msg->getExprLoc();
+ if (!Loc.isMacroID())
+ return;
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+ StringRef MacroName = Lexer::getImmediateMacroName(Loc, SM,
+ Pass.Ctx.getLangOpts());
+ bool isGCDOrXPC = llvm::StringSwitch<bool>(MacroName)
+ .Case("dispatch_retain", true)
+ .Case("dispatch_release", true)
+ .Case("xpc_retain", true)
+ .Case("xpc_release", true)
+ .Default(false);
+ if (!isGCDOrXPC)
+ return;
+
+ StmtExpr *StmtE = 0;
+ Stmt *S = Msg;
+ while (S) {
+ if (StmtExpr *SE = dyn_cast<StmtExpr>(S)) {
+ StmtE = SE;
+ break;
+ }
+ S = StmtMap->getParent(S);
+ }
+
+ if (!StmtE)
+ return;
+
+ Stmt::child_range StmtExprChild = StmtE->children();
+ if (!StmtExprChild)
+ return;
+ CompoundStmt *CompS = dyn_cast_or_null<CompoundStmt>(*StmtExprChild);
+ if (!CompS)
+ return;
+
+ Stmt::child_range CompStmtChild = CompS->children();
+ if (!CompStmtChild)
+ return;
+ DeclStmt *DeclS = dyn_cast_or_null<DeclStmt>(*CompStmtChild);
+ if (!DeclS)
+ return;
+ if (!DeclS->isSingleDecl())
+ return;
+ VarDecl *VD = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl());
+ if (!VD)
+ return;
+ Expr *Init = VD->getInit();
+ if (!Init)
+ return;
+
+ RecContainer = StmtE;
+ Rec = Init->IgnoreParenImpCasts();
+ if (ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Rec))
+ Rec = EWC->getSubExpr()->IgnoreParenImpCasts();
+ RecRange = Rec->getSourceRange();
+ if (SM.isMacroArgExpansion(RecRange.getBegin()))
+ RecRange.setBegin(SM.getImmediateSpellingLoc(RecRange.getBegin()));
+ if (SM.isMacroArgExpansion(RecRange.getEnd()))
+ RecRange.setEnd(SM.getImmediateSpellingLoc(RecRange.getEnd()));
+ }
+
+ void clearDiagnostics(SourceLocation loc) const {
+ Pass.TA.clearDiagnostic(diag::err_arc_illegal_explicit_message,
+ diag::err_unavailable,
+ diag::err_unavailable_message,
+ loc);
+ }
+
+ bool isDelegateMessage(Expr *E) const {
+ if (!E) return false;
+
+ E = E->IgnoreParenCasts();
+
+ // Also look through property-getter sugar.
+ if (PseudoObjectExpr *pseudoOp = dyn_cast<PseudoObjectExpr>(E))
+ E = pseudoOp->getResultExpr()->IgnoreImplicit();
+
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E))
+ return (ME->isInstanceMessage() && ME->getSelector() == DelegateSel);
+
+ return false;
+ }
+
+ bool isInAtFinally(Expr *E) const {
+ assert(E);
+ Stmt *S = E;
+ while (S) {
+ if (isa<ObjCAtFinallyStmt>(S))
+ return true;
+ S = StmtMap->getParent(S);
+ }
+
+ return false;
+ }
+
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+
+ bool tryRemoving(Expr *E) const {
+ if (isRemovable(E)) {
+ Pass.TA.removeStmt(E);
+ return true;
+ }
+
+ Stmt *parent = StmtMap->getParent(E);
+
+ if (ImplicitCastExpr *castE = dyn_cast_or_null<ImplicitCastExpr>(parent))
+ return tryRemoving(castE);
+
+ if (ParenExpr *parenE = dyn_cast_or_null<ParenExpr>(parent))
+ return tryRemoving(parenE);
+
+ if (BinaryOperator *
+ bopE = dyn_cast_or_null<BinaryOperator>(parent)) {
+ if (bopE->getOpcode() == BO_Comma && bopE->getLHS() == E &&
+ isRemovable(bopE)) {
+ Pass.TA.replace(bopE->getSourceRange(), bopE->getRHS()->getSourceRange());
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+};
+
+} // anonymous namespace
+
+void trans::removeRetainReleaseDeallocFinalize(MigrationPass &pass) {
+ BodyTransform<RetainReleaseDeallocRemover> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
new file mode 100644
index 0000000..48437c7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnbridgedCasts.cpp
@@ -0,0 +1,336 @@
+//===--- TransUnbridgedCasts.cpp - Tranformations to ARC mode -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// rewriteUnbridgedCasts:
+//
+// A cast of non-objc pointer to an objc one is checked. If the non-objc pointer
+// is from a file-level variable, __bridge cast is used to convert it.
+// For the result of a function call that we know is +1/+0,
+// __bridge/__bridge_transfer is used.
+//
+// NSString *str = (NSString *)kUTTypePlainText;
+// str = b ? kUTTypeRTF : kUTTypePlainText;
+// NSString *_uuidString = (NSString *)CFUUIDCreateString(kCFAllocatorDefault,
+// _uuid);
+// ---->
+// NSString *str = (__bridge NSString *)kUTTypePlainText;
+// str = (__bridge NSString *)(b ? kUTTypeRTF : kUTTypePlainText);
+// NSString *_uuidString = (__bridge_transfer NSString *)
+// CFUUIDCreateString(kCFAllocatorDefault, _uuid);
+//
+// For a C pointer to ObjC, for casting 'self', __bridge is used.
+//
+// CFStringRef str = (CFStringRef)self;
+// ---->
+// CFStringRef str = (__bridge CFStringRef)self;
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class UnbridgedCastRewriter : public RecursiveASTVisitor<UnbridgedCastRewriter>{
+ MigrationPass &Pass;
+ IdentifierInfo *SelfII;
+ OwningPtr<ParentMap> StmtMap;
+
+public:
+ UnbridgedCastRewriter(MigrationPass &pass) : Pass(pass) {
+ SelfII = &Pass.Ctx.Idents.get("self");
+ }
+
+ void transformBody(Stmt *body) {
+ StmtMap.reset(new ParentMap(body));
+ TraverseStmt(body);
+ }
+
+ bool VisitCastExpr(CastExpr *E) {
+ if (E->getCastKind() != CK_CPointerToObjCPointerCast
+ && E->getCastKind() != CK_BitCast)
+ return true;
+
+ QualType castType = E->getType();
+ Expr *castExpr = E->getSubExpr();
+ QualType castExprType = castExpr->getType();
+
+ if (castType->isObjCObjectPointerType() &&
+ castExprType->isObjCObjectPointerType())
+ return true;
+ if (!castType->isObjCObjectPointerType() &&
+ !castExprType->isObjCObjectPointerType())
+ return true;
+
+ bool exprRetainable = castExprType->isObjCIndirectLifetimeType();
+ bool castRetainable = castType->isObjCIndirectLifetimeType();
+ if (exprRetainable == castRetainable) return true;
+
+ if (castExpr->isNullPointerConstant(Pass.Ctx,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ SourceLocation loc = castExpr->getExprLoc();
+ if (loc.isValid() && Pass.Ctx.getSourceManager().isInSystemHeader(loc))
+ return true;
+
+ if (castType->isObjCObjectPointerType())
+ transformNonObjCToObjCCast(E);
+ else
+ transformObjCToNonObjCCast(E);
+
+ return true;
+ }
+
+private:
+ void transformNonObjCToObjCCast(CastExpr *E) {
+ if (!E) return;
+
+ // Global vars are assumed that are cast as unretained.
+ if (isGlobalVar(E))
+ if (E->getSubExpr()->getType()->isPointerType()) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+
+ // If the cast is directly over the result of a Core Foundation function
+ // try to figure out whether it should be cast as retained or unretained.
+ Expr *inner = E->IgnoreParenCasts();
+ if (CallExpr *callE = dyn_cast<CallExpr>(inner)) {
+ if (FunctionDecl *FD = callE->getDirectCallee()) {
+ if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ castToObjCObject(E, /*retained=*/true);
+ return;
+ }
+ if (FD->getAttr<CFReturnsNotRetainedAttr>()) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+ if (FD->isGlobal() &&
+ FD->getIdentifier() &&
+ ento::cocoa::isRefType(E->getSubExpr()->getType(), "CF",
+ FD->getIdentifier()->getName())) {
+ StringRef fname = FD->getIdentifier()->getName();
+ if (fname.endswith("Retain") ||
+ fname.find("Create") != StringRef::npos ||
+ fname.find("Copy") != StringRef::npos) {
+ // Do not migrate to couple of bridge transfer casts which
+ // cancel each other out. Leave it unchanged so error gets user
+ // attention instead.
+ if (FD->getName() == "CFRetain" &&
+ FD->getNumParams() == 1 &&
+ FD->getParent()->isTranslationUnit() &&
+ FD->getLinkage() == ExternalLinkage) {
+ Expr *Arg = callE->getArg(0);
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ const Expr *sub = ICE->getSubExpr();
+ QualType T = sub->getType();
+ if (T->isObjCObjectPointerType())
+ return;
+ }
+ }
+ castToObjCObject(E, /*retained=*/true);
+ return;
+ }
+
+ if (fname.find("Get") != StringRef::npos) {
+ castToObjCObject(E, /*retained=*/false);
+ return;
+ }
+ }
+ }
+ }
+ }
+
+ void castToObjCObject(CastExpr *E, bool retained) {
+ rewriteToBridgedCast(E, retained ? OBC_BridgeTransfer : OBC_Bridge);
+ }
+
+ void rewriteToBridgedCast(CastExpr *E, ObjCBridgeCastKind Kind) {
+ Transaction Trans(Pass.TA);
+ rewriteToBridgedCast(E, Kind, Trans);
+ }
+
+ void rewriteToBridgedCast(CastExpr *E, ObjCBridgeCastKind Kind,
+ Transaction &Trans) {
+ TransformActions &TA = Pass.TA;
+
+ // We will remove the compiler diagnostic.
+ if (!TA.hasDiagnostic(diag::err_arc_mismatched_cast,
+ diag::err_arc_cast_requires_bridge,
+ E->getLocStart())) {
+ Trans.abort();
+ return;
+ }
+
+ StringRef bridge;
+ switch(Kind) {
+ case OBC_Bridge:
+ bridge = "__bridge "; break;
+ case OBC_BridgeTransfer:
+ bridge = "__bridge_transfer "; break;
+ case OBC_BridgeRetained:
+ bridge = "__bridge_retained "; break;
+ }
+
+ TA.clearDiagnostic(diag::err_arc_mismatched_cast,
+ diag::err_arc_cast_requires_bridge,
+ E->getLocStart());
+ if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(E)) {
+ TA.insertAfterToken(CCE->getLParenLoc(), bridge);
+ } else {
+ SourceLocation insertLoc = E->getSubExpr()->getLocStart();
+ SmallString<128> newCast;
+ newCast += '(';
+ newCast += bridge;
+ newCast += E->getType().getAsString(Pass.Ctx.getPrintingPolicy());
+ newCast += ')';
+
+ if (isa<ParenExpr>(E->getSubExpr())) {
+ TA.insert(insertLoc, newCast.str());
+ } else {
+ newCast += '(';
+ TA.insert(insertLoc, newCast.str());
+ TA.insertAfterToken(E->getLocEnd(), ")");
+ }
+ }
+ }
+
+ void rewriteCastForCFRetain(CastExpr *castE, CallExpr *callE) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.replace(callE->getSourceRange(), callE->getArg(0)->getSourceRange());
+ rewriteToBridgedCast(castE, OBC_BridgeRetained, Trans);
+ }
+
+ void transformObjCToNonObjCCast(CastExpr *E) {
+ if (isSelf(E->getSubExpr()))
+ return rewriteToBridgedCast(E, OBC_Bridge);
+
+ CallExpr *callE;
+ if (isPassedToCFRetain(E, callE))
+ return rewriteCastForCFRetain(E, callE);
+
+ ObjCMethodFamily family = getFamilyOfMessage(E->getSubExpr());
+ if (family == OMF_retain)
+ return rewriteToBridgedCast(E, OBC_BridgeRetained);
+
+ if (family == OMF_autorelease || family == OMF_release) {
+ std::string err = "it is not safe to cast to '";
+ err += E->getType().getAsString(Pass.Ctx.getPrintingPolicy());
+ err += "' the result of '";
+ err += family == OMF_autorelease ? "autorelease" : "release";
+ err += "' message; a __bridge cast may result in a pointer to a "
+ "destroyed object and a __bridge_retained may leak the object";
+ Pass.TA.reportError(err, E->getLocStart(),
+ E->getSubExpr()->getSourceRange());
+ Stmt *parent = E;
+ do {
+ parent = StmtMap->getParentIgnoreParenImpCasts(parent);
+ } while (parent && isa<ExprWithCleanups>(parent));
+
+ if (ReturnStmt *retS = dyn_cast_or_null<ReturnStmt>(parent)) {
+ std::string note = "remove the cast and change return type of function "
+ "to '";
+ note += E->getSubExpr()->getType().getAsString(Pass.Ctx.getPrintingPolicy());
+ note += "' to have the object automatically autoreleased";
+ Pass.TA.reportNote(note, retS->getLocStart());
+ }
+ }
+
+ Expr *subExpr = E->getSubExpr();
+
+ // Look through pseudo-object expressions.
+ if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(subExpr)) {
+ subExpr = pseudo->getResultExpr();
+ assert(subExpr && "no result for pseudo-object of non-void type?");
+ }
+
+ if (ImplicitCastExpr *implCE = dyn_cast<ImplicitCastExpr>(subExpr)) {
+ if (implCE->getCastKind() == CK_ARCConsumeObject)
+ return rewriteToBridgedCast(E, OBC_BridgeRetained);
+ if (implCE->getCastKind() == CK_ARCReclaimReturnedObject)
+ return rewriteToBridgedCast(E, OBC_Bridge);
+ }
+
+ bool isConsumed = false;
+ if (isPassedToCParamWithKnownOwnership(E, isConsumed))
+ return rewriteToBridgedCast(E, isConsumed ? OBC_BridgeRetained
+ : OBC_Bridge);
+ }
+
+ static ObjCMethodFamily getFamilyOfMessage(Expr *E) {
+ E = E->IgnoreParenCasts();
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E))
+ return ME->getMethodFamily();
+
+ return OMF_None;
+ }
+
+ bool isPassedToCFRetain(Expr *E, CallExpr *&callE) const {
+ if ((callE = dyn_cast_or_null<CallExpr>(
+ StmtMap->getParentIgnoreParenImpCasts(E))))
+ if (FunctionDecl *
+ FD = dyn_cast_or_null<FunctionDecl>(callE->getCalleeDecl()))
+ if (FD->getName() == "CFRetain" && FD->getNumParams() == 1 &&
+ FD->getParent()->isTranslationUnit() &&
+ FD->getLinkage() == ExternalLinkage)
+ return true;
+
+ return false;
+ }
+
+ bool isPassedToCParamWithKnownOwnership(Expr *E, bool &isConsumed) const {
+ if (CallExpr *callE = dyn_cast_or_null<CallExpr>(
+ StmtMap->getParentIgnoreParenImpCasts(E)))
+ if (FunctionDecl *
+ FD = dyn_cast_or_null<FunctionDecl>(callE->getCalleeDecl())) {
+ unsigned i = 0;
+ for (unsigned e = callE->getNumArgs(); i != e; ++i) {
+ Expr *arg = callE->getArg(i);
+ if (arg == E || arg->IgnoreParenImpCasts() == E)
+ break;
+ }
+ if (i < callE->getNumArgs()) {
+ ParmVarDecl *PD = FD->getParamDecl(i);
+ if (PD->getAttr<CFConsumedAttr>()) {
+ isConsumed = true;
+ return true;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ bool isSelf(Expr *E) const {
+ E = E->IgnoreParenLValueCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (ImplicitParamDecl *IPD = dyn_cast<ImplicitParamDecl>(DRE->getDecl()))
+ if (IPD->getIdentifier() == SelfII)
+ return true;
+
+ return false;
+ }
+};
+
+} // end anonymous namespace
+
+void trans::rewriteUnbridgedCasts(MigrationPass &pass) {
+ BodyTransform<UnbridgedCastRewriter> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
new file mode 100644
index 0000000..60ed32a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransUnusedInitDelegate.cpp
@@ -0,0 +1,77 @@
+//===--- TransUnusedInitDelegate.cpp - Tranformations to ARC mode ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// Transformations:
+//===----------------------------------------------------------------------===//
+//
+// rewriteUnusedInitDelegate:
+//
+// Rewrites an unused result of calling a delegate initialization, to assigning
+// the result to self.
+// e.g
+// [self init];
+// ---->
+// self = [self init];
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class UnusedInitRewriter : public RecursiveASTVisitor<UnusedInitRewriter> {
+ Stmt *Body;
+ MigrationPass &Pass;
+
+ ExprSet Removables;
+
+public:
+ UnusedInitRewriter(MigrationPass &pass)
+ : Body(0), Pass(pass) { }
+
+ void transformBody(Stmt *body) {
+ Body = body;
+ collectRemovables(body, Removables);
+ TraverseStmt(body);
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ if (ME->isDelegateInitCall() &&
+ isRemovable(ME) &&
+ Pass.TA.hasDiagnostic(diag::err_arc_unused_init_message,
+ ME->getExprLoc())) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.clearDiagnostic(diag::err_arc_unused_init_message,
+ ME->getExprLoc());
+ SourceRange ExprRange = ME->getSourceRange();
+ Pass.TA.insert(ExprRange.getBegin(), "if (!(self = ");
+ std::string retStr = ")) return ";
+ retStr += getNilString(Pass.Ctx);
+ Pass.TA.insertAfterToken(ExprRange.getEnd(), retStr);
+ }
+ return true;
+ }
+
+private:
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+};
+
+} // anonymous namespace
+
+void trans::rewriteUnusedInitDelegate(MigrationPass &pass) {
+ BodyTransform<UnusedInitRewriter> trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
new file mode 100644
index 0000000..d1f08aa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransZeroOutPropsInDealloc.cpp
@@ -0,0 +1,228 @@
+//===--- TransZeroOutPropsInDealloc.cpp - Tranformations to ARC mode ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// removeZeroOutPropsInDealloc:
+//
+// Removes zero'ing out "strong" @synthesized properties in a -dealloc method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+namespace {
+
+class ZeroOutInDeallocRemover :
+ public RecursiveASTVisitor<ZeroOutInDeallocRemover> {
+ typedef RecursiveASTVisitor<ZeroOutInDeallocRemover> base;
+
+ MigrationPass &Pass;
+
+ llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*> SynthesizedProperties;
+ ImplicitParamDecl *SelfD;
+ ExprSet Removables;
+ Selector FinalizeSel;
+
+public:
+ ZeroOutInDeallocRemover(MigrationPass &pass) : Pass(pass), SelfD(0) {
+ FinalizeSel =
+ Pass.Ctx.Selectors.getNullarySelector(&Pass.Ctx.Idents.get("finalize"));
+ }
+
+ bool VisitObjCMessageExpr(ObjCMessageExpr *ME) {
+ ASTContext &Ctx = Pass.Ctx;
+ TransformActions &TA = Pass.TA;
+
+ if (ME->getReceiverKind() != ObjCMessageExpr::Instance)
+ return true;
+ Expr *receiver = ME->getInstanceReceiver();
+ if (!receiver)
+ return true;
+
+ DeclRefExpr *refE = dyn_cast<DeclRefExpr>(receiver->IgnoreParenCasts());
+ if (!refE || refE->getDecl() != SelfD)
+ return true;
+
+ bool BackedBySynthesizeSetter = false;
+ for (llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*>::iterator
+ P = SynthesizedProperties.begin(),
+ E = SynthesizedProperties.end(); P != E; ++P) {
+ ObjCPropertyDecl *PropDecl = P->first;
+ if (PropDecl->getSetterName() == ME->getSelector()) {
+ BackedBySynthesizeSetter = true;
+ break;
+ }
+ }
+ if (!BackedBySynthesizeSetter)
+ return true;
+
+ // Remove the setter message if RHS is null
+ Transaction Trans(TA);
+ Expr *RHS = ME->getArg(0);
+ bool RHSIsNull =
+ RHS->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull);
+ if (RHSIsNull && isRemovable(ME))
+ TA.removeStmt(ME);
+
+ return true;
+ }
+
+ bool VisitPseudoObjectExpr(PseudoObjectExpr *POE) {
+ if (isZeroingPropIvar(POE) && isRemovable(POE)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(POE);
+ }
+
+ return true;
+ }
+
+ bool VisitBinaryOperator(BinaryOperator *BOE) {
+ if (isZeroingPropIvar(BOE) && isRemovable(BOE)) {
+ Transaction Trans(Pass.TA);
+ Pass.TA.removeStmt(BOE);
+ }
+
+ return true;
+ }
+
+ bool TraverseObjCMethodDecl(ObjCMethodDecl *D) {
+ if (D->getMethodFamily() != OMF_dealloc &&
+ !(D->isInstanceMethod() && D->getSelector() == FinalizeSel))
+ return true;
+ if (!D->hasBody())
+ return true;
+
+ ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(D->getDeclContext());
+ if (!IMD)
+ return true;
+
+ SelfD = D->getSelfDecl();
+ collectRemovables(D->getBody(), Removables);
+
+ // For a 'dealloc' method use, find all property implementations in
+ // this class implementation.
+ for (ObjCImplDecl::propimpl_iterator
+ I = IMD->propimpl_begin(), EI = IMD->propimpl_end(); I != EI; ++I) {
+ ObjCPropertyImplDecl *PID = *I;
+ if (PID->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *setterM = PD->getSetterMethodDecl();
+ if (!(setterM && setterM->isDefined())) {
+ ObjCPropertyDecl::PropertyAttributeKind AttrKind =
+ PD->getPropertyAttributes();
+ if (AttrKind &
+ (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_strong))
+ SynthesizedProperties[PD] = PID;
+ }
+ }
+ }
+
+ // Now, remove all zeroing of ivars etc.
+ base::TraverseObjCMethodDecl(D);
+
+ // clear out for next method.
+ SynthesizedProperties.clear();
+ SelfD = 0;
+ Removables.clear();
+ return true;
+ }
+
+ bool TraverseFunctionDecl(FunctionDecl *D) { return true; }
+ bool TraverseBlockDecl(BlockDecl *block) { return true; }
+ bool TraverseBlockExpr(BlockExpr *block) { return true; }
+
+private:
+ bool isRemovable(Expr *E) const {
+ return Removables.count(E);
+ }
+
+ bool isZeroingPropIvar(Expr *E) {
+ E = E->IgnoreParens();
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E))
+ return isZeroingPropIvar(BO);
+ if (PseudoObjectExpr *PO = dyn_cast<PseudoObjectExpr>(E))
+ return isZeroingPropIvar(PO);
+ return false;
+ }
+
+ bool isZeroingPropIvar(BinaryOperator *BOE) {
+ if (BOE->getOpcode() == BO_Comma)
+ return isZeroingPropIvar(BOE->getLHS()) &&
+ isZeroingPropIvar(BOE->getRHS());
+
+ if (BOE->getOpcode() != BO_Assign)
+ return false;
+
+ Expr *LHS = BOE->getLHS();
+ if (ObjCIvarRefExpr *IV = dyn_cast<ObjCIvarRefExpr>(LHS)) {
+ ObjCIvarDecl *IVDecl = IV->getDecl();
+ if (!IVDecl->getType()->isObjCObjectPointerType())
+ return false;
+ bool IvarBacksPropertySynthesis = false;
+ for (llvm::DenseMap<ObjCPropertyDecl*, ObjCPropertyImplDecl*>::iterator
+ P = SynthesizedProperties.begin(),
+ E = SynthesizedProperties.end(); P != E; ++P) {
+ ObjCPropertyImplDecl *PropImpDecl = P->second;
+ if (PropImpDecl && PropImpDecl->getPropertyIvarDecl() == IVDecl) {
+ IvarBacksPropertySynthesis = true;
+ break;
+ }
+ }
+ if (!IvarBacksPropertySynthesis)
+ return false;
+ }
+ else
+ return false;
+
+ return isZero(BOE->getRHS());
+ }
+
+ bool isZeroingPropIvar(PseudoObjectExpr *PO) {
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(PO->getSyntacticForm());
+ if (!BO) return false;
+ if (BO->getOpcode() != BO_Assign) return false;
+
+ ObjCPropertyRefExpr *PropRefExp =
+ dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParens());
+ if (!PropRefExp) return false;
+
+ // TODO: Using implicit property decl.
+ if (PropRefExp->isImplicitProperty())
+ return false;
+
+ if (ObjCPropertyDecl *PDecl = PropRefExp->getExplicitProperty()) {
+ if (!SynthesizedProperties.count(PDecl))
+ return false;
+ }
+
+ return isZero(cast<OpaqueValueExpr>(BO->getRHS())->getSourceExpr());
+ }
+
+ bool isZero(Expr *E) {
+ if (E->isNullPointerConstant(Pass.Ctx, Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ return isZeroingPropIvar(E);
+ }
+};
+
+} // anonymous namespace
+
+void trans::removeZeroOutPropsInDeallocFinalize(MigrationPass &pass) {
+ ZeroOutInDeallocRemover trans(pass);
+ trans.TraverseDecl(pass.Ctx.getTranslationUnitDecl());
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
new file mode 100644
index 0000000..0ecfeb5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/TransformActions.cpp
@@ -0,0 +1,731 @@
+//===--- ARCMT.cpp - Migration to ARC mode --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Internals.h"
+#include "clang/AST/Expr.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseSet.h"
+#include <map>
+using namespace clang;
+using namespace arcmt;
+
+namespace {
+
+/// \brief Collects transformations and merges them before applying them with
+/// with applyRewrites(). E.g. if the same source range
+/// is requested to be removed twice, only one rewriter remove will be invoked.
+/// Rewrites happen in "transactions"; if one rewrite in the transaction cannot
+/// be done (e.g. it resides in a macro) all rewrites in the transaction are
+/// aborted.
+/// FIXME: "Transactional" rewrites support should be baked in the Rewriter.
+class TransformActionsImpl {
+ CapturedDiagList &CapturedDiags;
+ ASTContext &Ctx;
+ Preprocessor &PP;
+
+ bool IsInTransaction;
+
+ enum ActionKind {
+ Act_Insert, Act_InsertAfterToken,
+ Act_Remove, Act_RemoveStmt,
+ Act_Replace, Act_ReplaceText,
+ Act_IncreaseIndentation,
+ Act_ClearDiagnostic
+ };
+
+ struct ActionData {
+ ActionKind Kind;
+ SourceLocation Loc;
+ SourceRange R1, R2;
+ StringRef Text1, Text2;
+ Stmt *S;
+ SmallVector<unsigned, 2> DiagIDs;
+ };
+
+ std::vector<ActionData> CachedActions;
+
+ enum RangeComparison {
+ Range_Before,
+ Range_After,
+ Range_Contains,
+ Range_Contained,
+ Range_ExtendsBegin,
+ Range_ExtendsEnd
+ };
+
+ /// \brief A range to remove. It is a character range.
+ struct CharRange {
+ FullSourceLoc Begin, End;
+
+ CharRange(CharSourceRange range, SourceManager &srcMgr, Preprocessor &PP) {
+ SourceLocation beginLoc = range.getBegin(), endLoc = range.getEnd();
+ assert(beginLoc.isValid() && endLoc.isValid());
+ if (range.isTokenRange()) {
+ Begin = FullSourceLoc(srcMgr.getExpansionLoc(beginLoc), srcMgr);
+ End = FullSourceLoc(getLocForEndOfToken(endLoc, srcMgr, PP), srcMgr);
+ } else {
+ Begin = FullSourceLoc(srcMgr.getExpansionLoc(beginLoc), srcMgr);
+ End = FullSourceLoc(srcMgr.getExpansionLoc(endLoc), srcMgr);
+ }
+ assert(Begin.isValid() && End.isValid());
+ }
+
+ RangeComparison compareWith(const CharRange &RHS) const {
+ if (End.isBeforeInTranslationUnitThan(RHS.Begin))
+ return Range_Before;
+ if (RHS.End.isBeforeInTranslationUnitThan(Begin))
+ return Range_After;
+ if (!Begin.isBeforeInTranslationUnitThan(RHS.Begin) &&
+ !RHS.End.isBeforeInTranslationUnitThan(End))
+ return Range_Contained;
+ if (Begin.isBeforeInTranslationUnitThan(RHS.Begin) &&
+ RHS.End.isBeforeInTranslationUnitThan(End))
+ return Range_Contains;
+ if (Begin.isBeforeInTranslationUnitThan(RHS.Begin))
+ return Range_ExtendsBegin;
+ else
+ return Range_ExtendsEnd;
+ }
+
+ static RangeComparison compare(SourceRange LHS, SourceRange RHS,
+ SourceManager &SrcMgr, Preprocessor &PP) {
+ return CharRange(CharSourceRange::getTokenRange(LHS), SrcMgr, PP)
+ .compareWith(CharRange(CharSourceRange::getTokenRange(RHS),
+ SrcMgr, PP));
+ }
+ };
+
+ typedef SmallVector<StringRef, 2> TextsVec;
+ typedef std::map<FullSourceLoc, TextsVec, FullSourceLoc::BeforeThanCompare>
+ InsertsMap;
+ InsertsMap Inserts;
+ /// \brief A list of ranges to remove. They are always sorted and they never
+ /// intersect with each other.
+ std::list<CharRange> Removals;
+
+ llvm::DenseSet<Stmt *> StmtRemovals;
+
+ std::vector<std::pair<CharRange, SourceLocation> > IndentationRanges;
+
+ /// \brief Keeps text passed to transformation methods.
+ llvm::StringMap<bool> UniqueText;
+
+public:
+ TransformActionsImpl(CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP)
+ : CapturedDiags(capturedDiags), Ctx(ctx), PP(PP), IsInTransaction(false) { }
+
+ ASTContext &getASTContext() { return Ctx; }
+
+ void startTransaction();
+ bool commitTransaction();
+ void abortTransaction();
+
+ bool isInTransaction() const { return IsInTransaction; }
+
+ void insert(SourceLocation loc, StringRef text);
+ void insertAfterToken(SourceLocation loc, StringRef text);
+ void remove(SourceRange range);
+ void removeStmt(Stmt *S);
+ void replace(SourceRange range, StringRef text);
+ void replace(SourceRange range, SourceRange replacementRange);
+ void replaceStmt(Stmt *S, StringRef text);
+ void replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText);
+ void increaseIndentation(SourceRange range,
+ SourceLocation parentIndent);
+
+ bool clearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
+
+ void applyRewrites(TransformActions::RewriteReceiver &receiver);
+
+private:
+ bool canInsert(SourceLocation loc);
+ bool canInsertAfterToken(SourceLocation loc);
+ bool canRemoveRange(SourceRange range);
+ bool canReplaceRange(SourceRange range, SourceRange replacementRange);
+ bool canReplaceText(SourceLocation loc, StringRef text);
+
+ void commitInsert(SourceLocation loc, StringRef text);
+ void commitInsertAfterToken(SourceLocation loc, StringRef text);
+ void commitRemove(SourceRange range);
+ void commitRemoveStmt(Stmt *S);
+ void commitReplace(SourceRange range, SourceRange replacementRange);
+ void commitReplaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText);
+ void commitIncreaseIndentation(SourceRange range,SourceLocation parentIndent);
+ void commitClearDiagnostic(ArrayRef<unsigned> IDs, SourceRange range);
+
+ void addRemoval(CharSourceRange range);
+ void addInsertion(SourceLocation loc, StringRef text);
+
+ /// \brief Stores text passed to the transformation methods to keep the string
+ /// "alive". Since the vast majority of text will be the same, we also unique
+ /// the strings using a StringMap.
+ StringRef getUniqueText(StringRef text);
+
+ /// \brief Computes the source location just past the end of the token at
+ /// the given source location. If the location points at a macro, the whole
+ /// macro expansion is skipped.
+ static SourceLocation getLocForEndOfToken(SourceLocation loc,
+ SourceManager &SM,Preprocessor &PP);
+};
+
+} // anonymous namespace
+
+void TransformActionsImpl::startTransaction() {
+ assert(!IsInTransaction &&
+ "Cannot start a transaction in the middle of another one");
+ IsInTransaction = true;
+}
+
+bool TransformActionsImpl::commitTransaction() {
+ assert(IsInTransaction && "No transaction started");
+
+ if (CachedActions.empty()) {
+ IsInTransaction = false;
+ return false;
+ }
+
+ // Verify that all actions are possible otherwise abort the whole transaction.
+ bool AllActionsPossible = true;
+ for (unsigned i = 0, e = CachedActions.size(); i != e; ++i) {
+ ActionData &act = CachedActions[i];
+ switch (act.Kind) {
+ case Act_Insert:
+ if (!canInsert(act.Loc))
+ AllActionsPossible = false;
+ break;
+ case Act_InsertAfterToken:
+ if (!canInsertAfterToken(act.Loc))
+ AllActionsPossible = false;
+ break;
+ case Act_Remove:
+ if (!canRemoveRange(act.R1))
+ AllActionsPossible = false;
+ break;
+ case Act_RemoveStmt:
+ assert(act.S);
+ if (!canRemoveRange(act.S->getSourceRange()))
+ AllActionsPossible = false;
+ break;
+ case Act_Replace:
+ if (!canReplaceRange(act.R1, act.R2))
+ AllActionsPossible = false;
+ break;
+ case Act_ReplaceText:
+ if (!canReplaceText(act.Loc, act.Text1))
+ AllActionsPossible = false;
+ break;
+ case Act_IncreaseIndentation:
+ // This is not important, we don't care if it will fail.
+ break;
+ case Act_ClearDiagnostic:
+ // We are just checking source rewrites.
+ break;
+ }
+ if (!AllActionsPossible)
+ break;
+ }
+
+ if (!AllActionsPossible) {
+ abortTransaction();
+ return true;
+ }
+
+ for (unsigned i = 0, e = CachedActions.size(); i != e; ++i) {
+ ActionData &act = CachedActions[i];
+ switch (act.Kind) {
+ case Act_Insert:
+ commitInsert(act.Loc, act.Text1);
+ break;
+ case Act_InsertAfterToken:
+ commitInsertAfterToken(act.Loc, act.Text1);
+ break;
+ case Act_Remove:
+ commitRemove(act.R1);
+ break;
+ case Act_RemoveStmt:
+ commitRemoveStmt(act.S);
+ break;
+ case Act_Replace:
+ commitReplace(act.R1, act.R2);
+ break;
+ case Act_ReplaceText:
+ commitReplaceText(act.Loc, act.Text1, act.Text2);
+ break;
+ case Act_IncreaseIndentation:
+ commitIncreaseIndentation(act.R1, act.Loc);
+ break;
+ case Act_ClearDiagnostic:
+ commitClearDiagnostic(act.DiagIDs, act.R1);
+ break;
+ }
+ }
+
+ CachedActions.clear();
+ IsInTransaction = false;
+ return false;
+}
+
+void TransformActionsImpl::abortTransaction() {
+ assert(IsInTransaction && "No transaction started");
+ CachedActions.clear();
+ IsInTransaction = false;
+}
+
+void TransformActionsImpl::insert(SourceLocation loc, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ ActionData data;
+ data.Kind = Act_Insert;
+ data.Loc = loc;
+ data.Text1 = text;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::insertAfterToken(SourceLocation loc, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ ActionData data;
+ data.Kind = Act_InsertAfterToken;
+ data.Loc = loc;
+ data.Text1 = text;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::remove(SourceRange range) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_Remove;
+ data.R1 = range;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::removeStmt(Stmt *S) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_RemoveStmt;
+ data.S = S->IgnoreImplicit(); // important for uniquing
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replace(SourceRange range, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ remove(range);
+ insert(range.getBegin(), text);
+}
+
+void TransformActionsImpl::replace(SourceRange range,
+ SourceRange replacementRange) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_Replace;
+ data.R1 = range;
+ data.R2 = replacementRange;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText) {
+ text = getUniqueText(text);
+ replacementText = getUniqueText(replacementText);
+ ActionData data;
+ data.Kind = Act_ReplaceText;
+ data.Loc = loc;
+ data.Text1 = text;
+ data.Text2 = replacementText;
+ CachedActions.push_back(data);
+}
+
+void TransformActionsImpl::replaceStmt(Stmt *S, StringRef text) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ text = getUniqueText(text);
+ insert(S->getLocStart(), text);
+ removeStmt(S);
+}
+
+void TransformActionsImpl::increaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ if (range.isInvalid()) return;
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ ActionData data;
+ data.Kind = Act_IncreaseIndentation;
+ data.R1 = range;
+ data.Loc = parentIndent;
+ CachedActions.push_back(data);
+}
+
+bool TransformActionsImpl::clearDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ assert(IsInTransaction && "Actions only allowed during a transaction");
+ if (!CapturedDiags.hasDiagnostic(IDs, range))
+ return false;
+
+ ActionData data;
+ data.Kind = Act_ClearDiagnostic;
+ data.R1 = range;
+ data.DiagIDs.append(IDs.begin(), IDs.end());
+ CachedActions.push_back(data);
+ return true;
+}
+
+bool TransformActionsImpl::canInsert(SourceLocation loc) {
+ if (loc.isInvalid())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return false;
+
+ if (loc.isFileID())
+ return true;
+ return PP.isAtStartOfMacroExpansion(loc);
+}
+
+bool TransformActionsImpl::canInsertAfterToken(SourceLocation loc) {
+ if (loc.isInvalid())
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return false;
+
+ if (loc.isFileID())
+ return true;
+ return PP.isAtEndOfMacroExpansion(loc);
+}
+
+bool TransformActionsImpl::canRemoveRange(SourceRange range) {
+ return canInsert(range.getBegin()) && canInsertAfterToken(range.getEnd());
+}
+
+bool TransformActionsImpl::canReplaceRange(SourceRange range,
+ SourceRange replacementRange) {
+ return canRemoveRange(range) && canRemoveRange(replacementRange);
+}
+
+bool TransformActionsImpl::canReplaceText(SourceLocation loc, StringRef text) {
+ if (!canInsert(loc))
+ return false;
+
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getExpansionLoc(loc);
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ return file.substr(locInfo.second).startswith(text);
+}
+
+void TransformActionsImpl::commitInsert(SourceLocation loc, StringRef text) {
+ addInsertion(loc, text);
+}
+
+void TransformActionsImpl::commitInsertAfterToken(SourceLocation loc,
+ StringRef text) {
+ addInsertion(getLocForEndOfToken(loc, Ctx.getSourceManager(), PP), text);
+}
+
+void TransformActionsImpl::commitRemove(SourceRange range) {
+ addRemoval(CharSourceRange::getTokenRange(range));
+}
+
+void TransformActionsImpl::commitRemoveStmt(Stmt *S) {
+ assert(S);
+ if (StmtRemovals.count(S))
+ return; // already removed.
+
+ if (Expr *E = dyn_cast<Expr>(S)) {
+ commitRemove(E->getSourceRange());
+ commitInsert(E->getSourceRange().getBegin(), getARCMTMacroName());
+ } else
+ commitRemove(S->getSourceRange());
+
+ StmtRemovals.insert(S);
+}
+
+void TransformActionsImpl::commitReplace(SourceRange range,
+ SourceRange replacementRange) {
+ RangeComparison comp = CharRange::compare(replacementRange, range,
+ Ctx.getSourceManager(), PP);
+ assert(comp == Range_Contained);
+ if (comp != Range_Contained)
+ return; // Although we asserted, be extra safe for release build.
+ if (range.getBegin() != replacementRange.getBegin())
+ addRemoval(CharSourceRange::getCharRange(range.getBegin(),
+ replacementRange.getBegin()));
+ if (replacementRange.getEnd() != range.getEnd())
+ addRemoval(CharSourceRange::getTokenRange(
+ getLocForEndOfToken(replacementRange.getEnd(),
+ Ctx.getSourceManager(), PP),
+ range.getEnd()));
+}
+void TransformActionsImpl::commitReplaceText(SourceLocation loc,
+ StringRef text,
+ StringRef replacementText) {
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getExpansionLoc(loc);
+ // canReplaceText already checked if loc points at text.
+ SourceLocation afterText = loc.getLocWithOffset(text.size());
+
+ addRemoval(CharSourceRange::getCharRange(loc, afterText));
+ commitInsert(loc, replacementText);
+}
+
+void TransformActionsImpl::commitIncreaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ SourceManager &SM = Ctx.getSourceManager();
+ IndentationRanges.push_back(
+ std::make_pair(CharRange(CharSourceRange::getTokenRange(range),
+ SM, PP),
+ SM.getExpansionLoc(parentIndent)));
+}
+
+void TransformActionsImpl::commitClearDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ CapturedDiags.clearDiagnostic(IDs, range);
+}
+
+void TransformActionsImpl::addInsertion(SourceLocation loc, StringRef text) {
+ SourceManager &SM = Ctx.getSourceManager();
+ loc = SM.getExpansionLoc(loc);
+ for (std::list<CharRange>::reverse_iterator
+ I = Removals.rbegin(), E = Removals.rend(); I != E; ++I) {
+ if (!SM.isBeforeInTranslationUnit(loc, I->End))
+ break;
+ if (I->Begin.isBeforeInTranslationUnitThan(loc))
+ return;
+ }
+
+ Inserts[FullSourceLoc(loc, SM)].push_back(text);
+}
+
+void TransformActionsImpl::addRemoval(CharSourceRange range) {
+ CharRange newRange(range, Ctx.getSourceManager(), PP);
+ if (newRange.Begin == newRange.End)
+ return;
+
+ Inserts.erase(Inserts.upper_bound(newRange.Begin),
+ Inserts.lower_bound(newRange.End));
+
+ std::list<CharRange>::iterator I = Removals.end();
+ while (I != Removals.begin()) {
+ std::list<CharRange>::iterator RI = I;
+ --RI;
+ RangeComparison comp = newRange.compareWith(*RI);
+ switch (comp) {
+ case Range_Before:
+ --I;
+ break;
+ case Range_After:
+ Removals.insert(I, newRange);
+ return;
+ case Range_Contained:
+ return;
+ case Range_Contains:
+ RI->End = newRange.End;
+ case Range_ExtendsBegin:
+ newRange.End = RI->End;
+ Removals.erase(RI);
+ break;
+ case Range_ExtendsEnd:
+ RI->End = newRange.End;
+ return;
+ }
+ }
+
+ Removals.insert(Removals.begin(), newRange);
+}
+
+void TransformActionsImpl::applyRewrites(
+ TransformActions::RewriteReceiver &receiver) {
+ for (InsertsMap::iterator I = Inserts.begin(), E = Inserts.end(); I!=E; ++I) {
+ SourceLocation loc = I->first;
+ for (TextsVec::iterator
+ TI = I->second.begin(), TE = I->second.end(); TI != TE; ++TI) {
+ receiver.insert(loc, *TI);
+ }
+ }
+
+ for (std::vector<std::pair<CharRange, SourceLocation> >::iterator
+ I = IndentationRanges.begin(), E = IndentationRanges.end(); I!=E; ++I) {
+ CharSourceRange range = CharSourceRange::getCharRange(I->first.Begin,
+ I->first.End);
+ receiver.increaseIndentation(range, I->second);
+ }
+
+ for (std::list<CharRange>::iterator
+ I = Removals.begin(), E = Removals.end(); I != E; ++I) {
+ CharSourceRange range = CharSourceRange::getCharRange(I->Begin, I->End);
+ receiver.remove(range);
+ }
+}
+
+/// \brief Stores text passed to the transformation methods to keep the string
+/// "alive". Since the vast majority of text will be the same, we also unique
+/// the strings using a StringMap.
+StringRef TransformActionsImpl::getUniqueText(StringRef text) {
+ llvm::StringMapEntry<bool> &entry = UniqueText.GetOrCreateValue(text);
+ return entry.getKey();
+}
+
+/// \brief Computes the source location just past the end of the token at
+/// the given source location. If the location points at a macro, the whole
+/// macro expansion is skipped.
+SourceLocation TransformActionsImpl::getLocForEndOfToken(SourceLocation loc,
+ SourceManager &SM,
+ Preprocessor &PP) {
+ if (loc.isMacroID())
+ loc = SM.getExpansionRange(loc).second;
+ return PP.getLocForEndOfToken(loc);
+}
+
+TransformActions::RewriteReceiver::~RewriteReceiver() { }
+
+TransformActions::TransformActions(DiagnosticsEngine &diag,
+ CapturedDiagList &capturedDiags,
+ ASTContext &ctx, Preprocessor &PP)
+ : Diags(diag), CapturedDiags(capturedDiags), ReportedErrors(false) {
+ Impl = new TransformActionsImpl(capturedDiags, ctx, PP);
+}
+
+TransformActions::~TransformActions() {
+ delete static_cast<TransformActionsImpl*>(Impl);
+}
+
+void TransformActions::startTransaction() {
+ static_cast<TransformActionsImpl*>(Impl)->startTransaction();
+}
+
+bool TransformActions::commitTransaction() {
+ return static_cast<TransformActionsImpl*>(Impl)->commitTransaction();
+}
+
+void TransformActions::abortTransaction() {
+ static_cast<TransformActionsImpl*>(Impl)->abortTransaction();
+}
+
+
+void TransformActions::insert(SourceLocation loc, StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->insert(loc, text);
+}
+
+void TransformActions::insertAfterToken(SourceLocation loc,
+ StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->insertAfterToken(loc, text);
+}
+
+void TransformActions::remove(SourceRange range) {
+ static_cast<TransformActionsImpl*>(Impl)->remove(range);
+}
+
+void TransformActions::removeStmt(Stmt *S) {
+ static_cast<TransformActionsImpl*>(Impl)->removeStmt(S);
+}
+
+void TransformActions::replace(SourceRange range, StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->replace(range, text);
+}
+
+void TransformActions::replace(SourceRange range,
+ SourceRange replacementRange) {
+ static_cast<TransformActionsImpl*>(Impl)->replace(range, replacementRange);
+}
+
+void TransformActions::replaceStmt(Stmt *S, StringRef text) {
+ static_cast<TransformActionsImpl*>(Impl)->replaceStmt(S, text);
+}
+
+void TransformActions::replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText) {
+ static_cast<TransformActionsImpl*>(Impl)->replaceText(loc, text,
+ replacementText);
+}
+
+void TransformActions::increaseIndentation(SourceRange range,
+ SourceLocation parentIndent) {
+ static_cast<TransformActionsImpl*>(Impl)->increaseIndentation(range,
+ parentIndent);
+}
+
+bool TransformActions::clearDiagnostic(ArrayRef<unsigned> IDs,
+ SourceRange range) {
+ return static_cast<TransformActionsImpl*>(Impl)->clearDiagnostic(IDs, range);
+}
+
+void TransformActions::applyRewrites(RewriteReceiver &receiver) {
+ static_cast<TransformActionsImpl*>(Impl)->applyRewrites(receiver);
+}
+
+void TransformActions::reportError(StringRef error, SourceLocation loc,
+ SourceRange range) {
+ assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
+ "Errors should be emitted out of a transaction");
+
+ SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
+ getASTContext().getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return;
+
+ // FIXME: Use a custom category name to distinguish rewriter errors.
+ std::string rewriteErr = "[rewriter] ";
+ rewriteErr += error;
+ unsigned diagID
+ = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Error,
+ rewriteErr);
+ Diags.Report(loc, diagID) << range;
+ ReportedErrors = true;
+}
+
+void TransformActions::reportWarning(StringRef warning, SourceLocation loc,
+ SourceRange range) {
+ assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
+ "Warning should be emitted out of a transaction");
+
+ SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
+ getASTContext().getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return;
+
+ // FIXME: Use a custom category name to distinguish rewriter errors.
+ std::string rewriterWarn = "[rewriter] ";
+ rewriterWarn += warning;
+ unsigned diagID
+ = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Warning,
+ rewriterWarn);
+ Diags.Report(loc, diagID) << range;
+}
+
+void TransformActions::reportNote(StringRef note, SourceLocation loc,
+ SourceRange range) {
+ assert(!static_cast<TransformActionsImpl*>(Impl)->isInTransaction() &&
+ "Errors should be emitted out of a transaction");
+
+ SourceManager &SM = static_cast<TransformActionsImpl*>(Impl)->
+ getASTContext().getSourceManager();
+ if (SM.isInSystemHeader(SM.getExpansionLoc(loc)))
+ return;
+
+ // FIXME: Use a custom category name to distinguish rewriter errors.
+ std::string rewriteNote = "[rewriter] ";
+ rewriteNote += note;
+ unsigned diagID
+ = Diags.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Note,
+ rewriteNote);
+ Diags.Report(loc, diagID) << range;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
new file mode 100644
index 0000000..d342d1a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.cpp
@@ -0,0 +1,542 @@
+//===--- Tranforms.cpp - Tranformations to ARC mode -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Transforms.h"
+#include "Internals.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/DenseSet.h"
+#include <map>
+
+using namespace clang;
+using namespace arcmt;
+using namespace trans;
+
+ASTTraverser::~ASTTraverser() { }
+
+//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+bool trans::canApplyWeak(ASTContext &Ctx, QualType type,
+ bool AllowOnUnknownClass) {
+ if (!Ctx.getLangOpts().ObjCRuntimeHasWeak)
+ return false;
+
+ QualType T = type;
+ if (T.isNull())
+ return false;
+
+ // iOS is always safe to use 'weak'.
+ if (Ctx.getTargetInfo().getTriple().getOS() == llvm::Triple::IOS)
+ AllowOnUnknownClass = true;
+
+ while (const PointerType *ptr = T->getAs<PointerType>())
+ T = ptr->getPointeeType();
+ if (const ObjCObjectPointerType *ObjT = T->getAs<ObjCObjectPointerType>()) {
+ ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl();
+ if (!AllowOnUnknownClass && (!Class || Class->getName() == "NSObject"))
+ return false; // id/NSObject is not safe for weak.
+ if (!AllowOnUnknownClass && !Class->hasDefinition())
+ return false; // forward classes are not verifiable, therefore not safe.
+ if (Class->isArcWeakrefUnavailable())
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief 'Loc' is the end of a statement range. This returns the location
+/// immediately after the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation trans::findLocationAfterSemi(SourceLocation loc,
+ ASTContext &Ctx) {
+ SourceLocation SemiLoc = findSemiAfterLocation(loc, Ctx);
+ if (SemiLoc.isInvalid())
+ return SourceLocation();
+ return SemiLoc.getLocWithOffset(1);
+}
+
+/// \brief \arg Loc is the end of a statement range. This returns the location
+/// of the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation trans::findSemiAfterLocation(SourceLocation loc,
+ ASTContext &Ctx) {
+ SourceManager &SM = Ctx.getSourceManager();
+ if (loc.isMacroID()) {
+ if (!Lexer::isAtEndOfMacroExpansion(loc, SM, Ctx.getLangOpts(), &loc))
+ return SourceLocation();
+ }
+ loc = Lexer::getLocForEndOfToken(loc, /*Offset=*/0, SM, Ctx.getLangOpts());
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return SourceLocation();
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Ctx.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::semi))
+ return SourceLocation();
+
+ return tok.getLocation();
+}
+
+bool trans::hasSideEffects(Expr *E, ASTContext &Ctx) {
+ if (!E || !E->HasSideEffects(Ctx))
+ return false;
+
+ E = E->IgnoreParenCasts();
+ ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E);
+ if (!ME)
+ return true;
+ switch (ME->getMethodFamily()) {
+ case OMF_autorelease:
+ case OMF_dealloc:
+ case OMF_release:
+ case OMF_retain:
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::SuperInstance:
+ return false;
+ case ObjCMessageExpr::Instance:
+ return hasSideEffects(ME->getInstanceReceiver(), Ctx);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+bool trans::isGlobalVar(Expr *E) {
+ E = E->IgnoreParenCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return DRE->getDecl()->getDeclContext()->isFileContext() &&
+ DRE->getDecl()->getLinkage() == ExternalLinkage;
+ if (ConditionalOperator *condOp = dyn_cast<ConditionalOperator>(E))
+ return isGlobalVar(condOp->getTrueExpr()) &&
+ isGlobalVar(condOp->getFalseExpr());
+
+ return false;
+}
+
+StringRef trans::getNilString(ASTContext &Ctx) {
+ if (Ctx.Idents.get("nil").hasMacroDefinition())
+ return "nil";
+ else
+ return "0";
+}
+
+namespace {
+
+class ReferenceClear : public RecursiveASTVisitor<ReferenceClear> {
+ ExprSet &Refs;
+public:
+ ReferenceClear(ExprSet &refs) : Refs(refs) { }
+ bool VisitDeclRefExpr(DeclRefExpr *E) { Refs.erase(E); return true; }
+};
+
+class ReferenceCollector : public RecursiveASTVisitor<ReferenceCollector> {
+ ValueDecl *Dcl;
+ ExprSet &Refs;
+
+public:
+ ReferenceCollector(ValueDecl *D, ExprSet &refs)
+ : Dcl(D), Refs(refs) { }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E->getDecl() == Dcl)
+ Refs.insert(E);
+ return true;
+ }
+};
+
+class RemovablesCollector : public RecursiveASTVisitor<RemovablesCollector> {
+ ExprSet &Removables;
+
+public:
+ RemovablesCollector(ExprSet &removables)
+ : Removables(removables) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseStmtExpr(StmtExpr *E) {
+ CompoundStmt *S = E->getSubStmt();
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I) {
+ if (I != E - 1)
+ mark(*I);
+ TraverseStmt(*I);
+ }
+ return true;
+ }
+
+ bool VisitCompoundStmt(CompoundStmt *S) {
+ for (CompoundStmt::body_iterator
+ I = S->body_begin(), E = S->body_end(); I != E; ++I)
+ mark(*I);
+ return true;
+ }
+
+ bool VisitIfStmt(IfStmt *S) {
+ mark(S->getThen());
+ mark(S->getElse());
+ return true;
+ }
+
+ bool VisitWhileStmt(WhileStmt *S) {
+ mark(S->getBody());
+ return true;
+ }
+
+ bool VisitDoStmt(DoStmt *S) {
+ mark(S->getBody());
+ return true;
+ }
+
+ bool VisitForStmt(ForStmt *S) {
+ mark(S->getInit());
+ mark(S->getInc());
+ mark(S->getBody());
+ return true;
+ }
+
+private:
+ void mark(Stmt *S) {
+ if (!S) return;
+
+ while (LabelStmt *Label = dyn_cast<LabelStmt>(S))
+ S = Label->getSubStmt();
+ S = S->IgnoreImplicit();
+ if (Expr *E = dyn_cast<Expr>(S))
+ Removables.insert(E);
+ }
+};
+
+} // end anonymous namespace
+
+void trans::clearRefsIn(Stmt *S, ExprSet &refs) {
+ ReferenceClear(refs).TraverseStmt(S);
+}
+
+void trans::collectRefs(ValueDecl *D, Stmt *S, ExprSet &refs) {
+ ReferenceCollector(D, refs).TraverseStmt(S);
+}
+
+void trans::collectRemovables(Stmt *S, ExprSet &exprs) {
+ RemovablesCollector(exprs).TraverseStmt(S);
+}
+
+//===----------------------------------------------------------------------===//
+// MigrationContext
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ASTTransform : public RecursiveASTVisitor<ASTTransform> {
+ MigrationContext &MigrateCtx;
+ typedef RecursiveASTVisitor<ASTTransform> base;
+
+public:
+ ASTTransform(MigrationContext &MigrateCtx) : MigrateCtx(MigrateCtx) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ bool TraverseObjCImplementationDecl(ObjCImplementationDecl *D) {
+ ObjCImplementationContext ImplCtx(MigrateCtx, D);
+ for (MigrationContext::traverser_iterator
+ I = MigrateCtx.traversers_begin(),
+ E = MigrateCtx.traversers_end(); I != E; ++I)
+ (*I)->traverseObjCImplementation(ImplCtx);
+
+ return base::TraverseObjCImplementationDecl(D);
+ }
+
+ bool TraverseStmt(Stmt *rootS) {
+ if (!rootS)
+ return true;
+
+ BodyContext BodyCtx(MigrateCtx, rootS);
+ for (MigrationContext::traverser_iterator
+ I = MigrateCtx.traversers_begin(),
+ E = MigrateCtx.traversers_end(); I != E; ++I)
+ (*I)->traverseBody(BodyCtx);
+
+ return true;
+ }
+};
+
+}
+
+MigrationContext::~MigrationContext() {
+ for (traverser_iterator
+ I = traversers_begin(), E = traversers_end(); I != E; ++I)
+ delete *I;
+}
+
+bool MigrationContext::isGCOwnedNonObjC(QualType T) {
+ while (!T.isNull()) {
+ if (const AttributedType *AttrT = T->getAs<AttributedType>()) {
+ if (AttrT->getAttrKind() == AttributedType::attr_objc_ownership)
+ return !AttrT->getModifiedType()->isObjCRetainableType();
+ }
+
+ if (T->isArrayType())
+ T = Pass.Ctx.getBaseElementType(T);
+ else if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+ else
+ break;
+ }
+
+ return false;
+}
+
+bool MigrationContext::rewritePropertyAttribute(StringRef fromAttr,
+ StringRef toAttr,
+ SourceLocation atLoc) {
+ if (atLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Pass.Ctx.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::at)) return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (StringRef(tok.getRawIdentifierData(), tok.getLength())
+ != "property")
+ return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::l_paren)) return false;
+
+ Token BeforeTok = tok;
+ Token AfterTok;
+ AfterTok.startToken();
+ SourceLocation AttrLoc;
+
+ lexer.LexFromRawLexer(tok);
+ if (tok.is(tok::r_paren))
+ return false;
+
+ while (1) {
+ if (tok.isNot(tok::raw_identifier)) return false;
+ StringRef ident(tok.getRawIdentifierData(), tok.getLength());
+ if (ident == fromAttr) {
+ if (!toAttr.empty()) {
+ Pass.TA.replaceText(tok.getLocation(), fromAttr, toAttr);
+ return true;
+ }
+ // We want to remove the attribute.
+ AttrLoc = tok.getLocation();
+ }
+
+ do {
+ lexer.LexFromRawLexer(tok);
+ if (AttrLoc.isValid() && AfterTok.is(tok::unknown))
+ AfterTok = tok;
+ } while (tok.isNot(tok::comma) && tok.isNot(tok::r_paren));
+ if (tok.is(tok::r_paren))
+ break;
+ if (AttrLoc.isInvalid())
+ BeforeTok = tok;
+ lexer.LexFromRawLexer(tok);
+ }
+
+ if (toAttr.empty() && AttrLoc.isValid() && AfterTok.isNot(tok::unknown)) {
+ // We want to remove the attribute.
+ if (BeforeTok.is(tok::l_paren) && AfterTok.is(tok::r_paren)) {
+ Pass.TA.remove(SourceRange(BeforeTok.getLocation(),
+ AfterTok.getLocation()));
+ } else if (BeforeTok.is(tok::l_paren) && AfterTok.is(tok::comma)) {
+ Pass.TA.remove(SourceRange(AttrLoc, AfterTok.getLocation()));
+ } else {
+ Pass.TA.remove(SourceRange(BeforeTok.getLocation(), AttrLoc));
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+bool MigrationContext::addPropertyAttribute(StringRef attr,
+ SourceLocation atLoc) {
+ if (atLoc.isMacroID())
+ return false;
+
+ SourceManager &SM = Pass.Ctx.getSourceManager();
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(atLoc);
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first),
+ Pass.Ctx.getLangOpts(),
+ file.begin(), tokenBegin, file.end());
+ Token tok;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::at)) return false;
+ lexer.LexFromRawLexer(tok);
+ if (tok.isNot(tok::raw_identifier)) return false;
+ if (StringRef(tok.getRawIdentifierData(), tok.getLength())
+ != "property")
+ return false;
+ lexer.LexFromRawLexer(tok);
+
+ if (tok.isNot(tok::l_paren)) {
+ Pass.TA.insert(tok.getLocation(), std::string("(") + attr.str() + ") ");
+ return true;
+ }
+
+ lexer.LexFromRawLexer(tok);
+ if (tok.is(tok::r_paren)) {
+ Pass.TA.insert(tok.getLocation(), attr);
+ return true;
+ }
+
+ if (tok.isNot(tok::raw_identifier)) return false;
+
+ Pass.TA.insert(tok.getLocation(), std::string(attr) + ", ");
+ return true;
+}
+
+void MigrationContext::traverse(TranslationUnitDecl *TU) {
+ for (traverser_iterator
+ I = traversers_begin(), E = traversers_end(); I != E; ++I)
+ (*I)->traverseTU(*this);
+
+ ASTTransform(*this).TraverseDecl(TU);
+}
+
+static void GCRewriteFinalize(MigrationPass &pass) {
+ ASTContext &Ctx = pass.Ctx;
+ TransformActions &TA = pass.TA;
+ DeclContext *DC = Ctx.getTranslationUnitDecl();
+ Selector FinalizeSel =
+ Ctx.Selectors.getNullarySelector(&pass.Ctx.Idents.get("finalize"));
+
+ typedef DeclContext::specific_decl_iterator<ObjCImplementationDecl>
+ impl_iterator;
+ for (impl_iterator I = impl_iterator(DC->decls_begin()),
+ E = impl_iterator(DC->decls_end()); I != E; ++I) {
+ for (ObjCImplementationDecl::instmeth_iterator
+ MI = (*I)->instmeth_begin(),
+ ME = (*I)->instmeth_end(); MI != ME; ++MI) {
+ ObjCMethodDecl *MD = *MI;
+ if (!MD->hasBody())
+ continue;
+
+ if (MD->isInstanceMethod() && MD->getSelector() == FinalizeSel) {
+ ObjCMethodDecl *FinalizeM = MD;
+ Transaction Trans(TA);
+ TA.insert(FinalizeM->getSourceRange().getBegin(),
+ "#if !__has_feature(objc_arc)\n");
+ CharSourceRange::getTokenRange(FinalizeM->getSourceRange());
+ const SourceManager &SM = pass.Ctx.getSourceManager();
+ const LangOptions &LangOpts = pass.Ctx.getLangOpts();
+ bool Invalid;
+ std::string str = "\n#endif\n";
+ str += Lexer::getSourceText(
+ CharSourceRange::getTokenRange(FinalizeM->getSourceRange()),
+ SM, LangOpts, &Invalid);
+ TA.insertAfterToken(FinalizeM->getSourceRange().getEnd(), str);
+
+ break;
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// getAllTransformations.
+//===----------------------------------------------------------------------===//
+
+static void traverseAST(MigrationPass &pass) {
+ MigrationContext MigrateCtx(pass);
+
+ if (pass.isGCMigration()) {
+ MigrateCtx.addTraverser(new GCCollectableCallsTraverser);
+ MigrateCtx.addTraverser(new GCAttrsTraverser());
+ }
+ MigrateCtx.addTraverser(new PropertyRewriteTraverser());
+ MigrateCtx.addTraverser(new BlockObjCVariableTraverser());
+
+ MigrateCtx.traverse(pass.Ctx.getTranslationUnitDecl());
+}
+
+static void independentTransforms(MigrationPass &pass) {
+ rewriteAutoreleasePool(pass);
+ removeRetainReleaseDeallocFinalize(pass);
+ rewriteUnusedInitDelegate(pass);
+ removeZeroOutPropsInDeallocFinalize(pass);
+ makeAssignARCSafe(pass);
+ rewriteUnbridgedCasts(pass);
+ checkAPIUses(pass);
+ traverseAST(pass);
+}
+
+std::vector<TransformFn> arcmt::getAllTransformations(
+ LangOptions::GCMode OrigGCMode,
+ bool NoFinalizeRemoval) {
+ std::vector<TransformFn> transforms;
+
+ if (OrigGCMode == LangOptions::GCOnly && NoFinalizeRemoval)
+ transforms.push_back(GCRewriteFinalize);
+ transforms.push_back(independentTransforms);
+ // This depends on previous transformations removing various expressions.
+ transforms.push_back(removeEmptyStatementsAndDeallocFinalize);
+
+ return transforms;
+}
diff --git a/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
new file mode 100644
index 0000000..445c3e5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/ARCMigrate/Transforms.h
@@ -0,0 +1,207 @@
+//===-- Transforms.h - Tranformations to ARC mode ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H
+#define LLVM_CLANG_LIB_ARCMIGRATE_TRANSFORMS_H
+
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/ParentMap.h"
+#include "llvm/ADT/DenseSet.h"
+
+namespace clang {
+ class Decl;
+ class Stmt;
+ class BlockDecl;
+ class ObjCMethodDecl;
+ class FunctionDecl;
+
+namespace arcmt {
+ class MigrationPass;
+
+namespace trans {
+
+ class MigrationContext;
+
+//===----------------------------------------------------------------------===//
+// Transformations.
+//===----------------------------------------------------------------------===//
+
+void rewriteAutoreleasePool(MigrationPass &pass);
+void rewriteUnbridgedCasts(MigrationPass &pass);
+void makeAssignARCSafe(MigrationPass &pass);
+void removeRetainReleaseDeallocFinalize(MigrationPass &pass);
+void removeZeroOutPropsInDeallocFinalize(MigrationPass &pass);
+void rewriteUnusedInitDelegate(MigrationPass &pass);
+void checkAPIUses(MigrationPass &pass);
+
+void removeEmptyStatementsAndDeallocFinalize(MigrationPass &pass);
+
+class BodyContext {
+ MigrationContext &MigrateCtx;
+ ParentMap PMap;
+ Stmt *TopStmt;
+
+public:
+ BodyContext(MigrationContext &MigrateCtx, Stmt *S)
+ : MigrateCtx(MigrateCtx), PMap(S), TopStmt(S) {}
+
+ MigrationContext &getMigrationContext() { return MigrateCtx; }
+ ParentMap &getParentMap() { return PMap; }
+ Stmt *getTopStmt() { return TopStmt; }
+};
+
+class ObjCImplementationContext {
+ MigrationContext &MigrateCtx;
+ ObjCImplementationDecl *ImpD;
+
+public:
+ ObjCImplementationContext(MigrationContext &MigrateCtx,
+ ObjCImplementationDecl *D)
+ : MigrateCtx(MigrateCtx), ImpD(D) {}
+
+ MigrationContext &getMigrationContext() { return MigrateCtx; }
+ ObjCImplementationDecl *getImplementationDecl() { return ImpD; }
+};
+
+class ASTTraverser {
+public:
+ virtual ~ASTTraverser();
+ virtual void traverseTU(MigrationContext &MigrateCtx) { }
+ virtual void traverseBody(BodyContext &BodyCtx) { }
+ virtual void traverseObjCImplementation(ObjCImplementationContext &ImplCtx) {}
+};
+
+class MigrationContext {
+ std::vector<ASTTraverser *> Traversers;
+
+public:
+ MigrationPass &Pass;
+
+ struct GCAttrOccurrence {
+ enum AttrKind { Weak, Strong } Kind;
+ SourceLocation Loc;
+ QualType ModifiedType;
+ Decl *Dcl;
+ /// \brief true if the attribute is owned, e.g. it is in a body and not just
+ /// in an interface.
+ bool FullyMigratable;
+ };
+ std::vector<GCAttrOccurrence> GCAttrs;
+ llvm::DenseSet<unsigned> AttrSet;
+ llvm::DenseSet<unsigned> RemovedAttrSet;
+
+ /// \brief Set of raw '@' locations for 'assign' properties group that contain
+ /// GC __weak.
+ llvm::DenseSet<unsigned> AtPropsWeak;
+
+ explicit MigrationContext(MigrationPass &pass) : Pass(pass) {}
+ ~MigrationContext();
+
+ typedef std::vector<ASTTraverser *>::iterator traverser_iterator;
+ traverser_iterator traversers_begin() { return Traversers.begin(); }
+ traverser_iterator traversers_end() { return Traversers.end(); }
+
+ void addTraverser(ASTTraverser *traverser) {
+ Traversers.push_back(traverser);
+ }
+
+ bool isGCOwnedNonObjC(QualType T);
+ bool removePropertyAttribute(StringRef fromAttr, SourceLocation atLoc) {
+ return rewritePropertyAttribute(fromAttr, StringRef(), atLoc);
+ }
+ bool rewritePropertyAttribute(StringRef fromAttr, StringRef toAttr,
+ SourceLocation atLoc);
+ bool addPropertyAttribute(StringRef attr, SourceLocation atLoc);
+
+ void traverse(TranslationUnitDecl *TU);
+
+ void dumpGCAttrs();
+};
+
+class PropertyRewriteTraverser : public ASTTraverser {
+public:
+ virtual void traverseObjCImplementation(ObjCImplementationContext &ImplCtx);
+};
+
+class BlockObjCVariableTraverser : public ASTTraverser {
+public:
+ virtual void traverseBody(BodyContext &BodyCtx);
+};
+
+// GC transformations
+
+class GCAttrsTraverser : public ASTTraverser {
+public:
+ virtual void traverseTU(MigrationContext &MigrateCtx);
+};
+
+class GCCollectableCallsTraverser : public ASTTraverser {
+public:
+ virtual void traverseBody(BodyContext &BodyCtx);
+};
+
+//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+/// \brief Determine whether we can add weak to the given type.
+bool canApplyWeak(ASTContext &Ctx, QualType type,
+ bool AllowOnUnknownClass = false);
+
+/// \brief 'Loc' is the end of a statement range. This returns the location
+/// immediately after the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation findLocationAfterSemi(SourceLocation loc, ASTContext &Ctx);
+
+/// \brief \arg Loc is the end of a statement range. This returns the location
+/// of the semicolon following the statement.
+/// If no semicolon is found or the location is inside a macro, the returned
+/// source location will be invalid.
+SourceLocation findSemiAfterLocation(SourceLocation loc, ASTContext &Ctx);
+
+bool hasSideEffects(Expr *E, ASTContext &Ctx);
+bool isGlobalVar(Expr *E);
+/// \brief Returns "nil" or "0" if 'nil' macro is not actually defined.
+StringRef getNilString(ASTContext &Ctx);
+
+template <typename BODY_TRANS>
+class BodyTransform : public RecursiveASTVisitor<BodyTransform<BODY_TRANS> > {
+ MigrationPass &Pass;
+
+public:
+ BodyTransform(MigrationPass &pass) : Pass(pass) { }
+
+ bool TraverseStmt(Stmt *rootS) {
+ if (rootS)
+ BODY_TRANS(Pass).transformBody(rootS);
+ return true;
+ }
+};
+
+typedef llvm::DenseSet<Expr *> ExprSet;
+
+void clearRefsIn(Stmt *S, ExprSet &refs);
+template <typename iterator>
+void clearRefsIn(iterator begin, iterator end, ExprSet &refs) {
+ for (; begin != end; ++begin)
+ clearRefsIn(*begin, refs);
+}
+
+void collectRefs(ValueDecl *D, Stmt *S, ExprSet &refs);
+
+void collectRemovables(Stmt *S, ExprSet &exprs);
+
+} // end namespace trans
+
+} // end namespace arcmt
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/AST/APValue.cpp b/contrib/llvm/tools/clang/lib/AST/APValue.cpp
new file mode 100644
index 0000000..a31b3c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/APValue.cpp
@@ -0,0 +1,607 @@
+//===--- APValue.cpp - Union class for APFloat/APSInt/Complex -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the APValue class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+namespace {
+ struct LVBase {
+ llvm::PointerIntPair<APValue::LValueBase, 1, bool> BaseAndIsOnePastTheEnd;
+ CharUnits Offset;
+ unsigned PathLength;
+ unsigned CallIndex;
+ };
+}
+
+struct APValue::LV : LVBase {
+ static const unsigned InlinePathSpace =
+ (MaxSize - sizeof(LVBase)) / sizeof(LValuePathEntry);
+
+ /// Path - The sequence of base classes, fields and array indices to follow to
+ /// walk from Base to the subobject. When performing GCC-style folding, there
+ /// may not be such a path.
+ union {
+ LValuePathEntry Path[InlinePathSpace];
+ LValuePathEntry *PathPtr;
+ };
+
+ LV() { PathLength = (unsigned)-1; }
+ ~LV() { resizePath(0); }
+
+ void resizePath(unsigned Length) {
+ if (Length == PathLength)
+ return;
+ if (hasPathPtr())
+ delete [] PathPtr;
+ PathLength = Length;
+ if (hasPathPtr())
+ PathPtr = new LValuePathEntry[Length];
+ }
+
+ bool hasPath() const { return PathLength != (unsigned)-1; }
+ bool hasPathPtr() const { return hasPath() && PathLength > InlinePathSpace; }
+
+ LValuePathEntry *getPath() { return hasPathPtr() ? PathPtr : Path; }
+ const LValuePathEntry *getPath() const {
+ return hasPathPtr() ? PathPtr : Path;
+ }
+};
+
+namespace {
+ struct MemberPointerBase {
+ llvm::PointerIntPair<const ValueDecl*, 1, bool> MemberAndIsDerivedMember;
+ unsigned PathLength;
+ };
+}
+
+struct APValue::MemberPointerData : MemberPointerBase {
+ static const unsigned InlinePathSpace =
+ (MaxSize - sizeof(MemberPointerBase)) / sizeof(const CXXRecordDecl*);
+ typedef const CXXRecordDecl *PathElem;
+ union {
+ PathElem Path[InlinePathSpace];
+ PathElem *PathPtr;
+ };
+
+ MemberPointerData() { PathLength = 0; }
+ ~MemberPointerData() { resizePath(0); }
+
+ void resizePath(unsigned Length) {
+ if (Length == PathLength)
+ return;
+ if (hasPathPtr())
+ delete [] PathPtr;
+ PathLength = Length;
+ if (hasPathPtr())
+ PathPtr = new PathElem[Length];
+ }
+
+ bool hasPathPtr() const { return PathLength > InlinePathSpace; }
+
+ PathElem *getPath() { return hasPathPtr() ? PathPtr : Path; }
+ const PathElem *getPath() const {
+ return hasPathPtr() ? PathPtr : Path;
+ }
+};
+
+// FIXME: Reduce the malloc traffic here.
+
+APValue::Arr::Arr(unsigned NumElts, unsigned Size) :
+ Elts(new APValue[NumElts + (NumElts != Size ? 1 : 0)]),
+ NumElts(NumElts), ArrSize(Size) {}
+APValue::Arr::~Arr() { delete [] Elts; }
+
+APValue::StructData::StructData(unsigned NumBases, unsigned NumFields) :
+ Elts(new APValue[NumBases+NumFields]),
+ NumBases(NumBases), NumFields(NumFields) {}
+APValue::StructData::~StructData() {
+ delete [] Elts;
+}
+
+APValue::UnionData::UnionData() : Field(0), Value(new APValue) {}
+APValue::UnionData::~UnionData () {
+ delete Value;
+}
+
+APValue::APValue(const APValue &RHS) : Kind(Uninitialized) {
+ switch (RHS.getKind()) {
+ case Uninitialized:
+ break;
+ case Int:
+ MakeInt();
+ setInt(RHS.getInt());
+ break;
+ case Float:
+ MakeFloat();
+ setFloat(RHS.getFloat());
+ break;
+ case Vector:
+ MakeVector();
+ setVector(((const Vec *)(const char *)RHS.Data)->Elts,
+ RHS.getVectorLength());
+ break;
+ case ComplexInt:
+ MakeComplexInt();
+ setComplexInt(RHS.getComplexIntReal(), RHS.getComplexIntImag());
+ break;
+ case ComplexFloat:
+ MakeComplexFloat();
+ setComplexFloat(RHS.getComplexFloatReal(), RHS.getComplexFloatImag());
+ break;
+ case LValue:
+ MakeLValue();
+ if (RHS.hasLValuePath())
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), RHS.getLValuePath(),
+ RHS.isLValueOnePastTheEnd(), RHS.getLValueCallIndex());
+ else
+ setLValue(RHS.getLValueBase(), RHS.getLValueOffset(), NoLValuePath(),
+ RHS.getLValueCallIndex());
+ break;
+ case Array:
+ MakeArray(RHS.getArrayInitializedElts(), RHS.getArraySize());
+ for (unsigned I = 0, N = RHS.getArrayInitializedElts(); I != N; ++I)
+ getArrayInitializedElt(I) = RHS.getArrayInitializedElt(I);
+ if (RHS.hasArrayFiller())
+ getArrayFiller() = RHS.getArrayFiller();
+ break;
+ case Struct:
+ MakeStruct(RHS.getStructNumBases(), RHS.getStructNumFields());
+ for (unsigned I = 0, N = RHS.getStructNumBases(); I != N; ++I)
+ getStructBase(I) = RHS.getStructBase(I);
+ for (unsigned I = 0, N = RHS.getStructNumFields(); I != N; ++I)
+ getStructField(I) = RHS.getStructField(I);
+ break;
+ case Union:
+ MakeUnion();
+ setUnion(RHS.getUnionField(), RHS.getUnionValue());
+ break;
+ case MemberPointer:
+ MakeMemberPointer(RHS.getMemberPointerDecl(),
+ RHS.isMemberPointerToDerivedMember(),
+ RHS.getMemberPointerPath());
+ break;
+ case AddrLabelDiff:
+ MakeAddrLabelDiff();
+ setAddrLabelDiff(RHS.getAddrLabelDiffLHS(), RHS.getAddrLabelDiffRHS());
+ break;
+ }
+}
+
+void APValue::DestroyDataAndMakeUninit() {
+ if (Kind == Int)
+ ((APSInt*)(char*)Data)->~APSInt();
+ else if (Kind == Float)
+ ((APFloat*)(char*)Data)->~APFloat();
+ else if (Kind == Vector)
+ ((Vec*)(char*)Data)->~Vec();
+ else if (Kind == ComplexInt)
+ ((ComplexAPSInt*)(char*)Data)->~ComplexAPSInt();
+ else if (Kind == ComplexFloat)
+ ((ComplexAPFloat*)(char*)Data)->~ComplexAPFloat();
+ else if (Kind == LValue)
+ ((LV*)(char*)Data)->~LV();
+ else if (Kind == Array)
+ ((Arr*)(char*)Data)->~Arr();
+ else if (Kind == Struct)
+ ((StructData*)(char*)Data)->~StructData();
+ else if (Kind == Union)
+ ((UnionData*)(char*)Data)->~UnionData();
+ else if (Kind == MemberPointer)
+ ((MemberPointerData*)(char*)Data)->~MemberPointerData();
+ else if (Kind == AddrLabelDiff)
+ ((AddrLabelDiffData*)(char*)Data)->~AddrLabelDiffData();
+ Kind = Uninitialized;
+}
+
+void APValue::swap(APValue &RHS) {
+ std::swap(Kind, RHS.Kind);
+ char TmpData[MaxSize];
+ memcpy(TmpData, Data, MaxSize);
+ memcpy(Data, RHS.Data, MaxSize);
+ memcpy(RHS.Data, TmpData, MaxSize);
+}
+
+void APValue::dump() const {
+ dump(llvm::errs());
+ llvm::errs() << '\n';
+}
+
+static double GetApproxValue(const llvm::APFloat &F) {
+ llvm::APFloat V = F;
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+void APValue::dump(raw_ostream &OS) const {
+ switch (getKind()) {
+ case Uninitialized:
+ OS << "Uninitialized";
+ return;
+ case Int:
+ OS << "Int: " << getInt();
+ return;
+ case Float:
+ OS << "Float: " << GetApproxValue(getFloat());
+ return;
+ case Vector:
+ OS << "Vector: ";
+ getVectorElt(0).dump(OS);
+ for (unsigned i = 1; i != getVectorLength(); ++i) {
+ OS << ", ";
+ getVectorElt(i).dump(OS);
+ }
+ return;
+ case ComplexInt:
+ OS << "ComplexInt: " << getComplexIntReal() << ", " << getComplexIntImag();
+ return;
+ case ComplexFloat:
+ OS << "ComplexFloat: " << GetApproxValue(getComplexFloatReal())
+ << ", " << GetApproxValue(getComplexFloatImag());
+ return;
+ case LValue:
+ OS << "LValue: <todo>";
+ return;
+ case Array:
+ OS << "Array: ";
+ for (unsigned I = 0, N = getArrayInitializedElts(); I != N; ++I) {
+ getArrayInitializedElt(I).dump(OS);
+ if (I != getArraySize() - 1) OS << ", ";
+ }
+ if (hasArrayFiller()) {
+ OS << getArraySize() - getArrayInitializedElts() << " x ";
+ getArrayFiller().dump(OS);
+ }
+ return;
+ case Struct:
+ OS << "Struct ";
+ if (unsigned N = getStructNumBases()) {
+ OS << " bases: ";
+ getStructBase(0).dump(OS);
+ for (unsigned I = 1; I != N; ++I) {
+ OS << ", ";
+ getStructBase(I).dump(OS);
+ }
+ }
+ if (unsigned N = getStructNumFields()) {
+ OS << " fields: ";
+ getStructField(0).dump(OS);
+ for (unsigned I = 1; I != N; ++I) {
+ OS << ", ";
+ getStructField(I).dump(OS);
+ }
+ }
+ return;
+ case Union:
+ OS << "Union: ";
+ getUnionValue().dump(OS);
+ return;
+ case MemberPointer:
+ OS << "MemberPointer: <todo>";
+ return;
+ case AddrLabelDiff:
+ OS << "AddrLabelDiff: <todo>";
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
+void APValue::printPretty(raw_ostream &Out, ASTContext &Ctx, QualType Ty) const{
+ switch (getKind()) {
+ case APValue::Uninitialized:
+ Out << "<uninitialized>";
+ return;
+ case APValue::Int:
+ if (Ty->isBooleanType())
+ Out << (getInt().getBoolValue() ? "true" : "false");
+ else
+ Out << getInt();
+ return;
+ case APValue::Float:
+ Out << GetApproxValue(getFloat());
+ return;
+ case APValue::Vector: {
+ Out << '{';
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ getVectorElt(0).printPretty(Out, Ctx, ElemTy);
+ for (unsigned i = 1; i != getVectorLength(); ++i) {
+ Out << ", ";
+ getVectorElt(i).printPretty(Out, Ctx, ElemTy);
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::ComplexInt:
+ Out << getComplexIntReal() << "+" << getComplexIntImag() << "i";
+ return;
+ case APValue::ComplexFloat:
+ Out << GetApproxValue(getComplexFloatReal()) << "+"
+ << GetApproxValue(getComplexFloatImag()) << "i";
+ return;
+ case APValue::LValue: {
+ LValueBase Base = getLValueBase();
+ if (!Base) {
+ Out << "0";
+ return;
+ }
+
+ bool IsReference = Ty->isReferenceType();
+ QualType InnerTy
+ = IsReference ? Ty.getNonReferenceType() : Ty->getPointeeType();
+
+ if (!hasLValuePath()) {
+ // No lvalue path: just print the offset.
+ CharUnits O = getLValueOffset();
+ CharUnits S = Ctx.getTypeSizeInChars(InnerTy);
+ if (!O.isZero()) {
+ if (IsReference)
+ Out << "*(";
+ if (O % S) {
+ Out << "(char*)";
+ S = CharUnits::One();
+ }
+ Out << '&';
+ } else if (!IsReference)
+ Out << '&';
+
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>())
+ Out << *VD;
+ else
+ Base.get<const Expr*>()->printPretty(Out, Ctx, 0,
+ Ctx.getPrintingPolicy());
+ if (!O.isZero()) {
+ Out << " + " << (O / S);
+ if (IsReference)
+ Out << ')';
+ }
+ return;
+ }
+
+ // We have an lvalue path. Print it out nicely.
+ if (!IsReference)
+ Out << '&';
+ else if (isLValueOnePastTheEnd())
+ Out << "*(&";
+
+ QualType ElemTy;
+ if (const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>()) {
+ Out << *VD;
+ ElemTy = VD->getType();
+ } else {
+ const Expr *E = Base.get<const Expr*>();
+ E->printPretty(Out, Ctx, 0,Ctx.getPrintingPolicy());
+ ElemTy = E->getType();
+ }
+
+ ArrayRef<LValuePathEntry> Path = getLValuePath();
+ const CXXRecordDecl *CastToBase = 0;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (ElemTy->getAs<RecordType>()) {
+ // The lvalue refers to a class type, so the next path entry is a base
+ // or member.
+ const Decl *BaseOrMember =
+ BaseOrMemberType::getFromOpaqueValue(Path[I].BaseOrMember).getPointer();
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(BaseOrMember)) {
+ CastToBase = RD;
+ ElemTy = Ctx.getRecordType(RD);
+ } else {
+ const ValueDecl *VD = cast<ValueDecl>(BaseOrMember);
+ Out << ".";
+ if (CastToBase)
+ Out << *CastToBase << "::";
+ Out << *VD;
+ ElemTy = VD->getType();
+ }
+ } else {
+ // The lvalue must refer to an array.
+ Out << '[' << Path[I].ArrayIndex << ']';
+ ElemTy = Ctx.getAsArrayType(ElemTy)->getElementType();
+ }
+ }
+
+ // Handle formatting of one-past-the-end lvalues.
+ if (isLValueOnePastTheEnd()) {
+ // FIXME: If CastToBase is non-0, we should prefix the output with
+ // "(CastToBase*)".
+ Out << " + 1";
+ if (IsReference)
+ Out << ')';
+ }
+ return;
+ }
+ case APValue::Array: {
+ const ArrayType *AT = Ctx.getAsArrayType(Ty);
+ QualType ElemTy = AT->getElementType();
+ Out << '{';
+ if (unsigned N = getArrayInitializedElts()) {
+ getArrayInitializedElt(0).printPretty(Out, Ctx, ElemTy);
+ for (unsigned I = 1; I != N; ++I) {
+ Out << ", ";
+ if (I == 10) {
+ // Avoid printing out the entire contents of large arrays.
+ Out << "...";
+ break;
+ }
+ getArrayInitializedElt(I).printPretty(Out, Ctx, ElemTy);
+ }
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::Struct: {
+ Out << '{';
+ const RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ bool First = true;
+ if (unsigned N = getStructNumBases()) {
+ const CXXRecordDecl *CD = cast<CXXRecordDecl>(RD);
+ CXXRecordDecl::base_class_const_iterator BI = CD->bases_begin();
+ for (unsigned I = 0; I != N; ++I, ++BI) {
+ assert(BI != CD->bases_end());
+ if (!First)
+ Out << ", ";
+ getStructBase(I).printPretty(Out, Ctx, BI->getType());
+ First = false;
+ }
+ }
+ for (RecordDecl::field_iterator FI = RD->field_begin();
+ FI != RD->field_end(); ++FI) {
+ if (!First)
+ Out << ", ";
+ if ((*FI)->isUnnamedBitfield()) continue;
+ getStructField((*FI)->getFieldIndex()).
+ printPretty(Out, Ctx, (*FI)->getType());
+ First = false;
+ }
+ Out << '}';
+ return;
+ }
+ case APValue::Union:
+ Out << '{';
+ if (const FieldDecl *FD = getUnionField()) {
+ Out << "." << *FD << " = ";
+ getUnionValue().printPretty(Out, Ctx, FD->getType());
+ }
+ Out << '}';
+ return;
+ case APValue::MemberPointer:
+ // FIXME: This is not enough to unambiguously identify the member in a
+ // multiple-inheritance scenario.
+ if (const ValueDecl *VD = getMemberPointerDecl()) {
+ Out << '&' << *cast<CXXRecordDecl>(VD->getDeclContext()) << "::" << *VD;
+ return;
+ }
+ Out << "0";
+ return;
+ case APValue::AddrLabelDiff:
+ Out << "&&" << getAddrLabelDiffLHS()->getLabel()->getName();
+ Out << " - ";
+ Out << "&&" << getAddrLabelDiffRHS()->getLabel()->getName();
+ return;
+ }
+ llvm_unreachable("Unknown APValue kind!");
+}
+
+std::string APValue::getAsString(ASTContext &Ctx, QualType Ty) const {
+ std::string Result;
+ llvm::raw_string_ostream Out(Result);
+ printPretty(Out, Ctx, Ty);
+ Out.flush();
+ return Result;
+}
+
+const APValue::LValueBase APValue::getLValueBase() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getPointer();
+}
+
+bool APValue::isLValueOnePastTheEnd() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const void*)Data)->BaseAndIsOnePastTheEnd.getInt();
+}
+
+CharUnits &APValue::getLValueOffset() {
+ assert(isLValue() && "Invalid accessor");
+ return ((LV*)(void*)Data)->Offset;
+}
+
+bool APValue::hasLValuePath() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const char*)Data)->hasPath();
+}
+
+ArrayRef<APValue::LValuePathEntry> APValue::getLValuePath() const {
+ assert(isLValue() && hasLValuePath() && "Invalid accessor");
+ const LV &LVal = *((const LV*)(const char*)Data);
+ return ArrayRef<LValuePathEntry>(LVal.getPath(), LVal.PathLength);
+}
+
+unsigned APValue::getLValueCallIndex() const {
+ assert(isLValue() && "Invalid accessor");
+ return ((const LV*)(const char*)Data)->CallIndex;
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O, NoLValuePath,
+ unsigned CallIndex) {
+ assert(isLValue() && "Invalid accessor");
+ LV &LVal = *((LV*)(char*)Data);
+ LVal.BaseAndIsOnePastTheEnd.setPointer(B);
+ LVal.BaseAndIsOnePastTheEnd.setInt(false);
+ LVal.Offset = O;
+ LVal.CallIndex = CallIndex;
+ LVal.resizePath((unsigned)-1);
+}
+
+void APValue::setLValue(LValueBase B, const CharUnits &O,
+ ArrayRef<LValuePathEntry> Path, bool IsOnePastTheEnd,
+ unsigned CallIndex) {
+ assert(isLValue() && "Invalid accessor");
+ LV &LVal = *((LV*)(char*)Data);
+ LVal.BaseAndIsOnePastTheEnd.setPointer(B);
+ LVal.BaseAndIsOnePastTheEnd.setInt(IsOnePastTheEnd);
+ LVal.Offset = O;
+ LVal.CallIndex = CallIndex;
+ LVal.resizePath(Path.size());
+ memcpy(LVal.getPath(), Path.data(), Path.size() * sizeof(LValuePathEntry));
+}
+
+const ValueDecl *APValue::getMemberPointerDecl() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return MPD.MemberAndIsDerivedMember.getPointer();
+}
+
+bool APValue::isMemberPointerToDerivedMember() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return MPD.MemberAndIsDerivedMember.getInt();
+}
+
+ArrayRef<const CXXRecordDecl*> APValue::getMemberPointerPath() const {
+ assert(isMemberPointer() && "Invalid accessor");
+ const MemberPointerData &MPD = *((const MemberPointerData*)(const char*)Data);
+ return ArrayRef<const CXXRecordDecl*>(MPD.getPath(), MPD.PathLength);
+}
+
+void APValue::MakeLValue() {
+ assert(isUninit() && "Bad state change");
+ assert(sizeof(LV) <= MaxSize && "LV too big");
+ new ((void*)(char*)Data) LV();
+ Kind = LValue;
+}
+
+void APValue::MakeArray(unsigned InitElts, unsigned Size) {
+ assert(isUninit() && "Bad state change");
+ new ((void*)(char*)Data) Arr(InitElts, Size);
+ Kind = Array;
+}
+
+void APValue::MakeMemberPointer(const ValueDecl *Member, bool IsDerivedMember,
+ ArrayRef<const CXXRecordDecl*> Path) {
+ assert(isUninit() && "Bad state change");
+ MemberPointerData *MPD = new ((void*)(char*)Data) MemberPointerData;
+ Kind = MemberPointer;
+ MPD->MemberAndIsDerivedMember.setPointer(Member);
+ MPD->MemberAndIsDerivedMember.setInt(IsDerivedMember);
+ MPD->resizePath(Path.size());
+ memcpy(MPD->getPath(), Path.data(), Path.size()*sizeof(const CXXRecordDecl*));
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
new file mode 100644
index 0000000..1672bc8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTConsumer.cpp
@@ -0,0 +1,26 @@
+//===--- ASTConsumer.cpp - Abstract interface for reading ASTs --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTConsumer class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/DeclGroup.h"
+using namespace clang;
+
+bool ASTConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ return true;
+}
+
+void ASTConsumer::HandleInterestingDecl(DeclGroupRef D) {
+ HandleTopLevelDecl(D);
+}
+
+void ASTConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
new file mode 100644
index 0000000..acf5e0b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTContext.cpp
@@ -0,0 +1,6768 @@
+//===--- ASTContext.cpp - Context to hold long-lived AST nodes ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ASTContext interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Mangle.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Capacity.h"
+#include "CXXABI.h"
+#include <map>
+
+using namespace clang;
+
+unsigned ASTContext::NumImplicitDefaultConstructors;
+unsigned ASTContext::NumImplicitDefaultConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyConstructors;
+unsigned ASTContext::NumImplicitCopyConstructorsDeclared;
+unsigned ASTContext::NumImplicitMoveConstructors;
+unsigned ASTContext::NumImplicitMoveConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyAssignmentOperators;
+unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+unsigned ASTContext::NumImplicitMoveAssignmentOperators;
+unsigned ASTContext::NumImplicitMoveAssignmentOperatorsDeclared;
+unsigned ASTContext::NumImplicitDestructors;
+unsigned ASTContext::NumImplicitDestructorsDeclared;
+
+enum FloatingRank {
+ HalfRank, FloatRank, DoubleRank, LongDoubleRank
+};
+
+void
+ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *Parm) {
+ ID.AddInteger(Parm->getDepth());
+ ID.AddInteger(Parm->getPosition());
+ ID.AddBoolean(Parm->isParameterPack());
+
+ TemplateParameterList *Params = Parm->getTemplateParameters();
+ ID.AddInteger(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ ID.AddInteger(0);
+ ID.AddBoolean(TTP->isParameterPack());
+ continue;
+ }
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ ID.AddInteger(1);
+ ID.AddBoolean(NTTP->isParameterPack());
+ ID.AddPointer(NTTP->getType().getCanonicalType().getAsOpaquePtr());
+ if (NTTP->isExpandedParameterPack()) {
+ ID.AddBoolean(true);
+ ID.AddInteger(NTTP->getNumExpansionTypes());
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ QualType T = NTTP->getExpansionType(I);
+ ID.AddPointer(T.getCanonicalType().getAsOpaquePtr());
+ }
+ } else
+ ID.AddBoolean(false);
+ continue;
+ }
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+ ID.AddInteger(2);
+ Profile(ID, TTP);
+ }
+}
+
+TemplateTemplateParmDecl *
+ASTContext::getCanonicalTemplateTemplateParmDecl(
+ TemplateTemplateParmDecl *TTP) const {
+ // Check if we already have a canonical template template parameter.
+ llvm::FoldingSetNodeID ID;
+ CanonicalTemplateTemplateParm::Profile(ID, TTP);
+ void *InsertPos = 0;
+ CanonicalTemplateTemplateParm *Canonical
+ = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canonical)
+ return Canonical->getParam();
+
+ // Build a canonical template parameter list.
+ TemplateParameterList *Params = TTP->getTemplateParameters();
+ SmallVector<NamedDecl *, 4> CanonParams;
+ CanonParams.reserve(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
+ CanonParams.push_back(
+ TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ TTP->getDepth(),
+ TTP->getIndex(), 0, false,
+ TTP->isParameterPack()));
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ QualType T = getCanonicalType(NTTP->getType());
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(T);
+ NonTypeTemplateParmDecl *Param;
+ if (NTTP->isExpandedParameterPack()) {
+ SmallVector<QualType, 2> ExpandedTypes;
+ SmallVector<TypeSourceInfo *, 2> ExpandedTInfos;
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ ExpandedTypes.push_back(getCanonicalType(NTTP->getExpansionType(I)));
+ ExpandedTInfos.push_back(
+ getTrivialTypeSourceInfo(ExpandedTypes.back()));
+ }
+
+ Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ NTTP->getDepth(),
+ NTTP->getPosition(), 0,
+ T,
+ TInfo,
+ ExpandedTypes.data(),
+ ExpandedTypes.size(),
+ ExpandedTInfos.data());
+ } else {
+ Param = NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ NTTP->getDepth(),
+ NTTP->getPosition(), 0,
+ T,
+ NTTP->isParameterPack(),
+ TInfo);
+ }
+ CanonParams.push_back(Param);
+
+ } else
+ CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
+ cast<TemplateTemplateParmDecl>(*P)));
+ }
+
+ TemplateTemplateParmDecl *CanonTTP
+ = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), TTP->getDepth(),
+ TTP->getPosition(),
+ TTP->isParameterPack(),
+ 0,
+ TemplateParameterList::Create(*this, SourceLocation(),
+ SourceLocation(),
+ CanonParams.data(),
+ CanonParams.size(),
+ SourceLocation()));
+
+ // Get the new insert position for the node we care about.
+ Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ assert(Canonical == 0 && "Shouldn't be in the map!");
+ (void)Canonical;
+
+ // Create the canonical template template parameter entry.
+ Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
+ CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
+ return CanonTTP;
+}
+
+CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
+ if (!LangOpts.CPlusPlus) return 0;
+
+ switch (T.getCXXABI()) {
+ case CXXABI_ARM:
+ return CreateARMCXXABI(*this);
+ case CXXABI_Itanium:
+ return CreateItaniumCXXABI(*this);
+ case CXXABI_Microsoft:
+ return CreateMicrosoftCXXABI(*this);
+ }
+ llvm_unreachable("Invalid CXXABI type!");
+}
+
+static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
+ const LangOptions &LOpts) {
+ if (LOpts.FakeAddressSpaceMap) {
+ // The fake address space map must have a distinct entry for each
+ // language-specific address space.
+ static const unsigned FakeAddrSpaceMap[] = {
+ 1, // opencl_global
+ 2, // opencl_local
+ 3 // opencl_constant
+ };
+ return &FakeAddrSpaceMap;
+ } else {
+ return &T.getAddressSpaceMap();
+ }
+}
+
+ASTContext::ASTContext(LangOptions& LOpts, SourceManager &SM,
+ const TargetInfo *t,
+ IdentifierTable &idents, SelectorTable &sels,
+ Builtin::Context &builtins,
+ unsigned size_reserve,
+ bool DelayInitialization)
+ : FunctionProtoTypes(this_()),
+ TemplateSpecializationTypes(this_()),
+ DependentTemplateSpecializationTypes(this_()),
+ SubstTemplateTemplateParmPacks(this_()),
+ GlobalNestedNameSpecifier(0),
+ Int128Decl(0), UInt128Decl(0),
+ ObjCIdDecl(0), ObjCSelDecl(0), ObjCClassDecl(0), ObjCProtocolClassDecl(0),
+ CFConstantStringTypeDecl(0), ObjCInstanceTypeDecl(0),
+ FILEDecl(0),
+ jmp_bufDecl(0), sigjmp_bufDecl(0), ucontext_tDecl(0),
+ BlockDescriptorType(0), BlockDescriptorExtendedType(0),
+ cudaConfigureCallDecl(0),
+ NullTypeSourceInfo(QualType()),
+ FirstLocalImport(), LastLocalImport(),
+ SourceMgr(SM), LangOpts(LOpts),
+ AddrSpaceMap(0), Target(t), PrintingPolicy(LOpts),
+ Idents(idents), Selectors(sels),
+ BuiltinInfo(builtins),
+ DeclarationNames(*this),
+ ExternalSource(0), Listener(0),
+ LastSDM(0, 0),
+ UniqueBlockByRefTypeID(0)
+{
+ if (size_reserve > 0) Types.reserve(size_reserve);
+ TUDecl = TranslationUnitDecl::Create(*this);
+
+ if (!DelayInitialization) {
+ assert(t && "No target supplied for ASTContext initialization");
+ InitBuiltinTypes(*t);
+ }
+}
+
+ASTContext::~ASTContext() {
+ // Release the DenseMaps associated with DeclContext objects.
+ // FIXME: Is this the ideal solution?
+ ReleaseDeclContextMaps();
+
+ // Call all of the deallocation functions.
+ for (unsigned I = 0, N = Deallocations.size(); I != N; ++I)
+ Deallocations[I].first(Deallocations[I].second);
+
+ // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
+ // because they can contain DenseMaps.
+ for (llvm::DenseMap<const ObjCContainerDecl*,
+ const ASTRecordLayout*>::iterator
+ I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; )
+ // Increment in loop to prevent using deallocated memory.
+ if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ R->Destroy(*this);
+
+ for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
+ I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
+ // Increment in loop to prevent using deallocated memory.
+ if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ R->Destroy(*this);
+ }
+
+ for (llvm::DenseMap<const Decl*, AttrVec*>::iterator A = DeclAttrs.begin(),
+ AEnd = DeclAttrs.end();
+ A != AEnd; ++A)
+ A->second->~AttrVec();
+}
+
+void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
+ Deallocations.push_back(std::make_pair(Callback, Data));
+}
+
+void
+ASTContext::setExternalSource(OwningPtr<ExternalASTSource> &Source) {
+ ExternalSource.reset(Source.take());
+}
+
+void ASTContext::PrintStats() const {
+ llvm::errs() << "\n*** AST Context Stats:\n";
+ llvm::errs() << " " << Types.size() << " types total.\n";
+
+ unsigned counts[] = {
+#define TYPE(Name, Parent) 0,
+#define ABSTRACT_TYPE(Name, Parent)
+#include "clang/AST/TypeNodes.def"
+ 0 // Extra
+ };
+
+ for (unsigned i = 0, e = Types.size(); i != e; ++i) {
+ Type *T = Types[i];
+ counts[(unsigned)T->getTypeClass()]++;
+ }
+
+ unsigned Idx = 0;
+ unsigned TotalBytes = 0;
+#define TYPE(Name, Parent) \
+ if (counts[Idx]) \
+ llvm::errs() << " " << counts[Idx] << " " << #Name \
+ << " types\n"; \
+ TotalBytes += counts[Idx] * sizeof(Name##Type); \
+ ++Idx;
+#define ABSTRACT_TYPE(Name, Parent)
+#include "clang/AST/TypeNodes.def"
+
+ llvm::errs() << "Total bytes = " << TotalBytes << "\n";
+
+ // Implicit special member functions.
+ llvm::errs() << NumImplicitDefaultConstructorsDeclared << "/"
+ << NumImplicitDefaultConstructors
+ << " implicit default constructors created\n";
+ llvm::errs() << NumImplicitCopyConstructorsDeclared << "/"
+ << NumImplicitCopyConstructors
+ << " implicit copy constructors created\n";
+ if (getLangOpts().CPlusPlus)
+ llvm::errs() << NumImplicitMoveConstructorsDeclared << "/"
+ << NumImplicitMoveConstructors
+ << " implicit move constructors created\n";
+ llvm::errs() << NumImplicitCopyAssignmentOperatorsDeclared << "/"
+ << NumImplicitCopyAssignmentOperators
+ << " implicit copy assignment operators created\n";
+ if (getLangOpts().CPlusPlus)
+ llvm::errs() << NumImplicitMoveAssignmentOperatorsDeclared << "/"
+ << NumImplicitMoveAssignmentOperators
+ << " implicit move assignment operators created\n";
+ llvm::errs() << NumImplicitDestructorsDeclared << "/"
+ << NumImplicitDestructors
+ << " implicit destructors created\n";
+
+ if (ExternalSource.get()) {
+ llvm::errs() << "\n";
+ ExternalSource->PrintStats();
+ }
+
+ BumpAlloc.PrintStats();
+}
+
+TypedefDecl *ASTContext::getInt128Decl() const {
+ if (!Int128Decl) {
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(Int128Ty);
+ Int128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ &Idents.get("__int128_t"),
+ TInfo);
+ }
+
+ return Int128Decl;
+}
+
+TypedefDecl *ASTContext::getUInt128Decl() const {
+ if (!UInt128Decl) {
+ TypeSourceInfo *TInfo = getTrivialTypeSourceInfo(UnsignedInt128Ty);
+ UInt128Decl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ &Idents.get("__uint128_t"),
+ TInfo);
+ }
+
+ return UInt128Decl;
+}
+
+void ASTContext::InitBuiltinType(CanQualType &R, BuiltinType::Kind K) {
+ BuiltinType *Ty = new (*this, TypeAlignment) BuiltinType(K);
+ R = CanQualType::CreateUnsafe(QualType(Ty, 0));
+ Types.push_back(Ty);
+}
+
+void ASTContext::InitBuiltinTypes(const TargetInfo &Target) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Incorrect target reinitialization");
+ assert(VoidTy.isNull() && "Context reinitialized?");
+
+ this->Target = &Target;
+
+ ABI.reset(createCXXABI(Target));
+ AddrSpaceMap = getAddressSpaceMap(Target, LangOpts);
+
+ // C99 6.2.5p19.
+ InitBuiltinType(VoidTy, BuiltinType::Void);
+
+ // C99 6.2.5p2.
+ InitBuiltinType(BoolTy, BuiltinType::Bool);
+ // C99 6.2.5p3.
+ if (LangOpts.CharIsSigned)
+ InitBuiltinType(CharTy, BuiltinType::Char_S);
+ else
+ InitBuiltinType(CharTy, BuiltinType::Char_U);
+ // C99 6.2.5p4.
+ InitBuiltinType(SignedCharTy, BuiltinType::SChar);
+ InitBuiltinType(ShortTy, BuiltinType::Short);
+ InitBuiltinType(IntTy, BuiltinType::Int);
+ InitBuiltinType(LongTy, BuiltinType::Long);
+ InitBuiltinType(LongLongTy, BuiltinType::LongLong);
+
+ // C99 6.2.5p6.
+ InitBuiltinType(UnsignedCharTy, BuiltinType::UChar);
+ InitBuiltinType(UnsignedShortTy, BuiltinType::UShort);
+ InitBuiltinType(UnsignedIntTy, BuiltinType::UInt);
+ InitBuiltinType(UnsignedLongTy, BuiltinType::ULong);
+ InitBuiltinType(UnsignedLongLongTy, BuiltinType::ULongLong);
+
+ // C99 6.2.5p10.
+ InitBuiltinType(FloatTy, BuiltinType::Float);
+ InitBuiltinType(DoubleTy, BuiltinType::Double);
+ InitBuiltinType(LongDoubleTy, BuiltinType::LongDouble);
+
+ // GNU extension, 128-bit integers.
+ InitBuiltinType(Int128Ty, BuiltinType::Int128);
+ InitBuiltinType(UnsignedInt128Ty, BuiltinType::UInt128);
+
+ if (LangOpts.CPlusPlus) { // C++ 3.9.1p5
+ if (TargetInfo::isTypeSigned(Target.getWCharType()))
+ InitBuiltinType(WCharTy, BuiltinType::WChar_S);
+ else // -fshort-wchar makes wchar_t be unsigned.
+ InitBuiltinType(WCharTy, BuiltinType::WChar_U);
+ } else // C99
+ WCharTy = getFromTargetType(Target.getWCharType());
+
+ if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
+ InitBuiltinType(Char16Ty, BuiltinType::Char16);
+ else // C99
+ Char16Ty = getFromTargetType(Target.getChar16Type());
+
+ if (LangOpts.CPlusPlus) // C++0x 3.9.1p5, extension for C++
+ InitBuiltinType(Char32Ty, BuiltinType::Char32);
+ else // C99
+ Char32Ty = getFromTargetType(Target.getChar32Type());
+
+ // Placeholder type for type-dependent expressions whose type is
+ // completely unknown. No code should ever check a type against
+ // DependentTy and users should never see it; however, it is here to
+ // help diagnose failures to properly check for type-dependent
+ // expressions.
+ InitBuiltinType(DependentTy, BuiltinType::Dependent);
+
+ // Placeholder type for functions.
+ InitBuiltinType(OverloadTy, BuiltinType::Overload);
+
+ // Placeholder type for bound members.
+ InitBuiltinType(BoundMemberTy, BuiltinType::BoundMember);
+
+ // Placeholder type for pseudo-objects.
+ InitBuiltinType(PseudoObjectTy, BuiltinType::PseudoObject);
+
+ // "any" type; useful for debugger-like clients.
+ InitBuiltinType(UnknownAnyTy, BuiltinType::UnknownAny);
+
+ // Placeholder type for unbridged ARC casts.
+ InitBuiltinType(ARCUnbridgedCastTy, BuiltinType::ARCUnbridgedCast);
+
+ // C99 6.2.5p11.
+ FloatComplexTy = getComplexType(FloatTy);
+ DoubleComplexTy = getComplexType(DoubleTy);
+ LongDoubleComplexTy = getComplexType(LongDoubleTy);
+
+ BuiltinVaListType = QualType();
+
+ // Builtin types for 'id', 'Class', and 'SEL'.
+ InitBuiltinType(ObjCBuiltinIdTy, BuiltinType::ObjCId);
+ InitBuiltinType(ObjCBuiltinClassTy, BuiltinType::ObjCClass);
+ InitBuiltinType(ObjCBuiltinSelTy, BuiltinType::ObjCSel);
+
+ // Builtin type for __objc_yes and __objc_no
+ ObjCBuiltinBoolTy = SignedCharTy;
+
+ ObjCConstantStringType = QualType();
+
+ // void * type
+ VoidPtrTy = getPointerType(VoidTy);
+
+ // nullptr type (C++0x 2.14.7)
+ InitBuiltinType(NullPtrTy, BuiltinType::NullPtr);
+
+ // half type (OpenCL 6.1.1.1) / ARM NEON __fp16
+ InitBuiltinType(HalfTy, BuiltinType::Half);
+}
+
+DiagnosticsEngine &ASTContext::getDiagnostics() const {
+ return SourceMgr.getDiagnostics();
+}
+
+AttrVec& ASTContext::getDeclAttrs(const Decl *D) {
+ AttrVec *&Result = DeclAttrs[D];
+ if (!Result) {
+ void *Mem = Allocate(sizeof(AttrVec));
+ Result = new (Mem) AttrVec;
+ }
+
+ return *Result;
+}
+
+/// \brief Erase the attributes corresponding to the given declaration.
+void ASTContext::eraseDeclAttrs(const Decl *D) {
+ llvm::DenseMap<const Decl*, AttrVec*>::iterator Pos = DeclAttrs.find(D);
+ if (Pos != DeclAttrs.end()) {
+ Pos->second->~AttrVec();
+ DeclAttrs.erase(Pos);
+ }
+}
+
+MemberSpecializationInfo *
+ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
+ assert(Var->isStaticDataMember() && "Not a static data member");
+ llvm::DenseMap<const VarDecl *, MemberSpecializationInfo *>::iterator Pos
+ = InstantiatedFromStaticDataMember.find(Var);
+ if (Pos == InstantiatedFromStaticDataMember.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
+ TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ assert(Inst->isStaticDataMember() && "Not a static data member");
+ assert(Tmpl->isStaticDataMember() && "Not a static data member");
+ assert(!InstantiatedFromStaticDataMember[Inst] &&
+ "Already noted what static data member was instantiated from");
+ InstantiatedFromStaticDataMember[Inst]
+ = new (*this) MemberSpecializationInfo(Tmpl, TSK, PointOfInstantiation);
+}
+
+FunctionDecl *ASTContext::getClassScopeSpecializationPattern(
+ const FunctionDecl *FD){
+ assert(FD && "Specialization is 0");
+ llvm::DenseMap<const FunctionDecl*, FunctionDecl *>::const_iterator Pos
+ = ClassScopeSpecializationPattern.find(FD);
+ if (Pos == ClassScopeSpecializationPattern.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void ASTContext::setClassScopeSpecializationPattern(FunctionDecl *FD,
+ FunctionDecl *Pattern) {
+ assert(FD && "Specialization is 0");
+ assert(Pattern && "Class scope specialization pattern is 0");
+ ClassScopeSpecializationPattern[FD] = Pattern;
+}
+
+NamedDecl *
+ASTContext::getInstantiatedFromUsingDecl(UsingDecl *UUD) {
+ llvm::DenseMap<UsingDecl *, NamedDecl *>::const_iterator Pos
+ = InstantiatedFromUsingDecl.find(UUD);
+ if (Pos == InstantiatedFromUsingDecl.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromUsingDecl(UsingDecl *Inst, NamedDecl *Pattern) {
+ assert((isa<UsingDecl>(Pattern) ||
+ isa<UnresolvedUsingValueDecl>(Pattern) ||
+ isa<UnresolvedUsingTypenameDecl>(Pattern)) &&
+ "pattern decl is not a using decl");
+ assert(!InstantiatedFromUsingDecl[Inst] && "pattern already exists");
+ InstantiatedFromUsingDecl[Inst] = Pattern;
+}
+
+UsingShadowDecl *
+ASTContext::getInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst) {
+ llvm::DenseMap<UsingShadowDecl*, UsingShadowDecl*>::const_iterator Pos
+ = InstantiatedFromUsingShadowDecl.find(Inst);
+ if (Pos == InstantiatedFromUsingShadowDecl.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void
+ASTContext::setInstantiatedFromUsingShadowDecl(UsingShadowDecl *Inst,
+ UsingShadowDecl *Pattern) {
+ assert(!InstantiatedFromUsingShadowDecl[Inst] && "pattern already exists");
+ InstantiatedFromUsingShadowDecl[Inst] = Pattern;
+}
+
+FieldDecl *ASTContext::getInstantiatedFromUnnamedFieldDecl(FieldDecl *Field) {
+ llvm::DenseMap<FieldDecl *, FieldDecl *>::iterator Pos
+ = InstantiatedFromUnnamedFieldDecl.find(Field);
+ if (Pos == InstantiatedFromUnnamedFieldDecl.end())
+ return 0;
+
+ return Pos->second;
+}
+
+void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
+ FieldDecl *Tmpl) {
+ assert(!Inst->getDeclName() && "Instantiated field decl is not unnamed");
+ assert(!Tmpl->getDeclName() && "Template field decl is not unnamed");
+ assert(!InstantiatedFromUnnamedFieldDecl[Inst] &&
+ "Already noted what unnamed field was instantiated from");
+
+ InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
+}
+
+bool ASTContext::ZeroBitfieldFollowsNonBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (FD->isBitField() && LastFD && !LastFD->isBitField() &&
+ FD->getBitWidthValue(*this) == 0);
+}
+
+bool ASTContext::ZeroBitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (FD->isBitField() && LastFD && LastFD->isBitField() &&
+ FD->getBitWidthValue(*this) == 0 &&
+ LastFD->getBitWidthValue(*this) != 0);
+}
+
+bool ASTContext::BitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (FD->isBitField() && LastFD && LastFD->isBitField() &&
+ FD->getBitWidthValue(*this) &&
+ LastFD->getBitWidthValue(*this));
+}
+
+bool ASTContext::NonBitfieldFollowsBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (!FD->isBitField() && LastFD && LastFD->isBitField() &&
+ LastFD->getBitWidthValue(*this));
+}
+
+bool ASTContext::BitfieldFollowsNonBitfield(const FieldDecl *FD,
+ const FieldDecl *LastFD) const {
+ return (FD->isBitField() && LastFD && !LastFD->isBitField() &&
+ FD->getBitWidthValue(*this));
+}
+
+ASTContext::overridden_cxx_method_iterator
+ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.begin();
+}
+
+ASTContext::overridden_cxx_method_iterator
+ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.end();
+}
+
+unsigned
+ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.size();
+}
+
+void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
+ const CXXMethodDecl *Overridden) {
+ OverriddenMethods[Method].push_back(Overridden);
+}
+
+void ASTContext::addedLocalImportDecl(ImportDecl *Import) {
+ assert(!Import->NextLocalImport && "Import declaration already in the chain");
+ assert(!Import->isFromASTFile() && "Non-local import declaration");
+ if (!FirstLocalImport) {
+ FirstLocalImport = Import;
+ LastLocalImport = Import;
+ return;
+ }
+
+ LastLocalImport->NextLocalImport = Import;
+ LastLocalImport = Import;
+}
+
+//===----------------------------------------------------------------------===//
+// Type Sizing and Analysis
+//===----------------------------------------------------------------------===//
+
+/// getFloatTypeSemantics - Return the APFloat 'semantics' for the specified
+/// scalar floating point type.
+const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ assert(BT && "Not a floating point type!");
+ switch (BT->getKind()) {
+ default: llvm_unreachable("Not a floating point type!");
+ case BuiltinType::Half: return Target->getHalfFormat();
+ case BuiltinType::Float: return Target->getFloatFormat();
+ case BuiltinType::Double: return Target->getDoubleFormat();
+ case BuiltinType::LongDouble: return Target->getLongDoubleFormat();
+ }
+}
+
+/// getDeclAlign - Return a conservative estimate of the alignment of the
+/// specified decl. Note that bitfields do not have a valid alignment, so
+/// this method will assert on them.
+/// If @p RefAsPointee, references are treated like their underlying type
+/// (for alignof), else they're treated like pointers (for CodeGen).
+CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) const {
+ unsigned Align = Target->getCharWidth();
+
+ bool UseAlignAttrOnly = false;
+ if (unsigned AlignFromAttr = D->getMaxAlignment()) {
+ Align = AlignFromAttr;
+
+ // __attribute__((aligned)) can increase or decrease alignment
+ // *except* on a struct or struct member, where it only increases
+ // alignment unless 'packed' is also specified.
+ //
+ // It is an error for alignas to decrease alignment, so we can
+ // ignore that possibility; Sema should diagnose it.
+ if (isa<FieldDecl>(D)) {
+ UseAlignAttrOnly = D->hasAttr<PackedAttr>() ||
+ cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
+ } else {
+ UseAlignAttrOnly = true;
+ }
+ }
+ else if (isa<FieldDecl>(D))
+ UseAlignAttrOnly =
+ D->hasAttr<PackedAttr>() ||
+ cast<FieldDecl>(D)->getParent()->hasAttr<PackedAttr>();
+
+ // If we're using the align attribute only, just ignore everything
+ // else about the declaration and its type.
+ if (UseAlignAttrOnly) {
+ // do nothing
+
+ } else if (const ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ QualType T = VD->getType();
+ if (const ReferenceType* RT = T->getAs<ReferenceType>()) {
+ if (RefAsPointee)
+ T = RT->getPointeeType();
+ else
+ T = getPointerType(RT->getPointeeType());
+ }
+ if (!T->isIncompleteType() && !T->isFunctionType()) {
+ // Adjust alignments of declarations with array type by the
+ // large-array alignment on the target.
+ unsigned MinWidth = Target->getLargeArrayMinWidth();
+ const ArrayType *arrayType;
+ if (MinWidth && (arrayType = getAsArrayType(T))) {
+ if (isa<VariableArrayType>(arrayType))
+ Align = std::max(Align, Target->getLargeArrayAlign());
+ else if (isa<ConstantArrayType>(arrayType) &&
+ MinWidth <= getTypeSize(cast<ConstantArrayType>(arrayType)))
+ Align = std::max(Align, Target->getLargeArrayAlign());
+
+ // Walk through any array types while we're at it.
+ T = getBaseElementType(arrayType);
+ }
+ Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
+ }
+
+ // Fields can be subject to extra alignment constraints, like if
+ // the field is packed, the struct is packed, or the struct has a
+ // a max-field-alignment constraint (#pragma pack). So calculate
+ // the actual alignment of the field within the struct, and then
+ // (as we're expected to) constrain that by the alignment of the type.
+ if (const FieldDecl *field = dyn_cast<FieldDecl>(VD)) {
+ // So calculate the alignment of the field.
+ const ASTRecordLayout &layout = getASTRecordLayout(field->getParent());
+
+ // Start with the record's overall alignment.
+ unsigned fieldAlign = toBits(layout.getAlignment());
+
+ // Use the GCD of that and the offset within the record.
+ uint64_t offset = layout.getFieldOffset(field->getFieldIndex());
+ if (offset > 0) {
+ // Alignment is always a power of 2, so the GCD will be a power of 2,
+ // which means we get to do this crazy thing instead of Euclid's.
+ uint64_t lowBitOfOffset = offset & (~offset + 1);
+ if (lowBitOfOffset < fieldAlign)
+ fieldAlign = static_cast<unsigned>(lowBitOfOffset);
+ }
+
+ Align = std::min(Align, fieldAlign);
+ }
+ }
+
+ return toCharUnitsFromBits(Align);
+}
+
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoInChars(const Type *T) const {
+ std::pair<uint64_t, unsigned> Info = getTypeInfo(T);
+ return std::make_pair(toCharUnitsFromBits(Info.first),
+ toCharUnitsFromBits(Info.second));
+}
+
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoInChars(QualType T) const {
+ return getTypeInfoInChars(T.getTypePtr());
+}
+
+std::pair<uint64_t, unsigned> ASTContext::getTypeInfo(const Type *T) const {
+ TypeInfoMap::iterator it = MemoizedTypeInfo.find(T);
+ if (it != MemoizedTypeInfo.end())
+ return it->second;
+
+ std::pair<uint64_t, unsigned> Info = getTypeInfoImpl(T);
+ MemoizedTypeInfo.insert(std::make_pair(T, Info));
+ return Info;
+}
+
+/// getTypeInfoImpl - Return the size of the specified type, in bits. This
+/// method does not work on incomplete types.
+///
+/// FIXME: Pointers into different addr spaces could have different sizes and
+/// alignment requirements: getPointerInfo should take an AddrSpace, this
+/// should take a QualType, &c.
+std::pair<uint64_t, unsigned>
+ASTContext::getTypeInfoImpl(const Type *T) const {
+ uint64_t Width=0;
+ unsigned Align=8;
+ switch (T->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Should not see dependent types");
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // GCC extension: alignof(function) = 32 bits
+ Width = 0;
+ Align = 32;
+ break;
+
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ Width = 0;
+ Align = getTypeAlign(cast<ArrayType>(T)->getElementType());
+ break;
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(T);
+
+ std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(CAT->getElementType());
+ uint64_t Size = CAT->getSize().getZExtValue();
+ assert((Size == 0 || EltInfo.first <= (uint64_t)(-1)/Size) &&
+ "Overflow in array type bit size evaluation");
+ Width = EltInfo.first*Size;
+ Align = EltInfo.second;
+ Width = llvm::RoundUpToAlignment(Width, Align);
+ break;
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType *VT = cast<VectorType>(T);
+ std::pair<uint64_t, unsigned> EltInfo = getTypeInfo(VT->getElementType());
+ Width = EltInfo.first*VT->getNumElements();
+ Align = Width;
+ // If the alignment is not a power of 2, round up to the next power of 2.
+ // This happens for non-power-of-2 length vectors.
+ if (Align & (Align-1)) {
+ Align = llvm::NextPowerOf2(Align);
+ Width = llvm::RoundUpToAlignment(Width, Align);
+ }
+ break;
+ }
+
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default: llvm_unreachable("Unknown builtin type!");
+ case BuiltinType::Void:
+ // GCC extension: alignof(void) = 8 bits.
+ Width = 0;
+ Align = 8;
+ break;
+
+ case BuiltinType::Bool:
+ Width = Target->getBoolWidth();
+ Align = Target->getBoolAlign();
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ Width = Target->getCharWidth();
+ Align = Target->getCharAlign();
+ break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ Width = Target->getWCharWidth();
+ Align = Target->getWCharAlign();
+ break;
+ case BuiltinType::Char16:
+ Width = Target->getChar16Width();
+ Align = Target->getChar16Align();
+ break;
+ case BuiltinType::Char32:
+ Width = Target->getChar32Width();
+ Align = Target->getChar32Align();
+ break;
+ case BuiltinType::UShort:
+ case BuiltinType::Short:
+ Width = Target->getShortWidth();
+ Align = Target->getShortAlign();
+ break;
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ Width = Target->getIntWidth();
+ Align = Target->getIntAlign();
+ break;
+ case BuiltinType::ULong:
+ case BuiltinType::Long:
+ Width = Target->getLongWidth();
+ Align = Target->getLongAlign();
+ break;
+ case BuiltinType::ULongLong:
+ case BuiltinType::LongLong:
+ Width = Target->getLongLongWidth();
+ Align = Target->getLongLongAlign();
+ break;
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ Width = 128;
+ Align = 128; // int128_t is 128-bit aligned on all targets.
+ break;
+ case BuiltinType::Half:
+ Width = Target->getHalfWidth();
+ Align = Target->getHalfAlign();
+ break;
+ case BuiltinType::Float:
+ Width = Target->getFloatWidth();
+ Align = Target->getFloatAlign();
+ break;
+ case BuiltinType::Double:
+ Width = Target->getDoubleWidth();
+ Align = Target->getDoubleAlign();
+ break;
+ case BuiltinType::LongDouble:
+ Width = Target->getLongDoubleWidth();
+ Align = Target->getLongDoubleAlign();
+ break;
+ case BuiltinType::NullPtr:
+ Width = Target->getPointerWidth(0); // C++ 3.9.1p11: sizeof(nullptr_t)
+ Align = Target->getPointerAlign(0); // == sizeof(void*)
+ break;
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ Width = Target->getPointerWidth(0);
+ Align = Target->getPointerAlign(0);
+ break;
+ }
+ break;
+ case Type::ObjCObjectPointer:
+ Width = Target->getPointerWidth(0);
+ Align = Target->getPointerAlign(0);
+ break;
+ case Type::BlockPointer: {
+ unsigned AS = getTargetAddressSpace(
+ cast<BlockPointerType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ // alignof and sizeof should never enter this code path here, so we go
+ // the pointer route.
+ unsigned AS = getTargetAddressSpace(
+ cast<ReferenceType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::Pointer: {
+ unsigned AS = getTargetAddressSpace(cast<PointerType>(T)->getPointeeType());
+ Width = Target->getPointerWidth(AS);
+ Align = Target->getPointerAlign(AS);
+ break;
+ }
+ case Type::MemberPointer: {
+ const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ std::pair<uint64_t, unsigned> PtrDiffInfo =
+ getTypeInfo(getPointerDiffType());
+ Width = PtrDiffInfo.first * ABI->getMemberPointerSize(MPT);
+ Align = PtrDiffInfo.second;
+ break;
+ }
+ case Type::Complex: {
+ // Complex types have the same alignment as their elements, but twice the
+ // size.
+ std::pair<uint64_t, unsigned> EltInfo =
+ getTypeInfo(cast<ComplexType>(T)->getElementType());
+ Width = EltInfo.first*2;
+ Align = EltInfo.second;
+ break;
+ }
+ case Type::ObjCObject:
+ return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
+ case Type::ObjCInterface: {
+ const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
+ const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
+ Width = toBits(Layout.getSize());
+ Align = toBits(Layout.getAlignment());
+ break;
+ }
+ case Type::Record:
+ case Type::Enum: {
+ const TagType *TT = cast<TagType>(T);
+
+ if (TT->getDecl()->isInvalidDecl()) {
+ Width = 8;
+ Align = 8;
+ break;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(TT))
+ return getTypeInfo(ET->getDecl()->getIntegerType());
+
+ const RecordType *RT = cast<RecordType>(TT);
+ const ASTRecordLayout &Layout = getASTRecordLayout(RT->getDecl());
+ Width = toBits(Layout.getSize());
+ Align = toBits(Layout.getAlignment());
+ break;
+ }
+
+ case Type::SubstTemplateTypeParm:
+ return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
+ getReplacementType().getTypePtr());
+
+ case Type::Auto: {
+ const AutoType *A = cast<AutoType>(T);
+ assert(A->isDeduced() && "Cannot request the size of a dependent type");
+ return getTypeInfo(A->getDeducedType().getTypePtr());
+ }
+
+ case Type::Paren:
+ return getTypeInfo(cast<ParenType>(T)->getInnerType().getTypePtr());
+
+ case Type::Typedef: {
+ const TypedefNameDecl *Typedef = cast<TypedefType>(T)->getDecl();
+ std::pair<uint64_t, unsigned> Info
+ = getTypeInfo(Typedef->getUnderlyingType().getTypePtr());
+ // If the typedef has an aligned attribute on it, it overrides any computed
+ // alignment we have. This violates the GCC documentation (which says that
+ // attribute(aligned) can only round up) but matches its implementation.
+ if (unsigned AttrAlign = Typedef->getMaxAlignment())
+ Align = AttrAlign;
+ else
+ Align = Info.second;
+ Width = Info.first;
+ break;
+ }
+
+ case Type::TypeOfExpr:
+ return getTypeInfo(cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType()
+ .getTypePtr());
+
+ case Type::TypeOf:
+ return getTypeInfo(cast<TypeOfType>(T)->getUnderlyingType().getTypePtr());
+
+ case Type::Decltype:
+ return getTypeInfo(cast<DecltypeType>(T)->getUnderlyingExpr()->getType()
+ .getTypePtr());
+
+ case Type::UnaryTransform:
+ return getTypeInfo(cast<UnaryTransformType>(T)->getUnderlyingType());
+
+ case Type::Elaborated:
+ return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
+
+ case Type::Attributed:
+ return getTypeInfo(
+ cast<AttributedType>(T)->getEquivalentType().getTypePtr());
+
+ case Type::TemplateSpecialization: {
+ assert(getCanonicalType(T) != T &&
+ "Cannot request the size of a dependent type");
+ const TemplateSpecializationType *TST = cast<TemplateSpecializationType>(T);
+ // A type alias template specialization may refer to a typedef with the
+ // aligned attribute on it.
+ if (TST->isTypeAlias())
+ return getTypeInfo(TST->getAliasedType().getTypePtr());
+ else
+ return getTypeInfo(getCanonicalType(T));
+ }
+
+ case Type::Atomic: {
+ std::pair<uint64_t, unsigned> Info
+ = getTypeInfo(cast<AtomicType>(T)->getValueType());
+ Width = Info.first;
+ Align = Info.second;
+ if (Width != 0 && Width <= Target->getMaxAtomicPromoteWidth() &&
+ llvm::isPowerOf2_64(Width)) {
+ // We can potentially perform lock-free atomic operations for this
+ // type; promote the alignment appropriately.
+ // FIXME: We could potentially promote the width here as well...
+ // is that worthwhile? (Non-struct atomic types generally have
+ // power-of-two size anyway, but structs might not. Requires a bit
+ // of implementation work to make sure we zero out the extra bits.)
+ Align = static_cast<unsigned>(Width);
+ }
+ }
+
+ }
+
+ assert(llvm::isPowerOf2_32(Align) && "Alignment must be power of 2");
+ return std::make_pair(Width, Align);
+}
+
+/// toCharUnitsFromBits - Convert a size in bits to a size in characters.
+CharUnits ASTContext::toCharUnitsFromBits(int64_t BitSize) const {
+ return CharUnits::fromQuantity(BitSize / getCharWidth());
+}
+
+/// toBits - Convert a size in characters to a size in characters.
+int64_t ASTContext::toBits(CharUnits CharSize) const {
+ return CharSize.getQuantity() * getCharWidth();
+}
+
+/// getTypeSizeInChars - Return the size of the specified type, in characters.
+/// This method does not work on incomplete types.
+CharUnits ASTContext::getTypeSizeInChars(QualType T) const {
+ return toCharUnitsFromBits(getTypeSize(T));
+}
+CharUnits ASTContext::getTypeSizeInChars(const Type *T) const {
+ return toCharUnitsFromBits(getTypeSize(T));
+}
+
+/// getTypeAlignInChars - Return the ABI-specified alignment of a type, in
+/// characters. This method does not work on incomplete types.
+CharUnits ASTContext::getTypeAlignInChars(QualType T) const {
+ return toCharUnitsFromBits(getTypeAlign(T));
+}
+CharUnits ASTContext::getTypeAlignInChars(const Type *T) const {
+ return toCharUnitsFromBits(getTypeAlign(T));
+}
+
+/// getPreferredTypeAlign - Return the "preferred" alignment of the specified
+/// type for the current target in bits. This can be different than the ABI
+/// alignment in cases where it is beneficial for performance to overalign
+/// a data type.
+unsigned ASTContext::getPreferredTypeAlign(const Type *T) const {
+ unsigned ABIAlign = getTypeAlign(T);
+
+ // Double and long long should be naturally aligned if possible.
+ if (const ComplexType* CT = T->getAs<ComplexType>())
+ T = CT->getElementType().getTypePtr();
+ if (T->isSpecificBuiltinType(BuiltinType::Double) ||
+ T->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ T->isSpecificBuiltinType(BuiltinType::ULongLong))
+ return std::max(ABIAlign, (unsigned)getTypeSize(T));
+
+ return ABIAlign;
+}
+
+/// DeepCollectObjCIvars -
+/// This routine first collects all declared, but not synthesized, ivars in
+/// super class and then collects all ivars, including those synthesized for
+/// current class. This routine is used for implementation of current class
+/// when all ivars, declared and synthesized are known.
+///
+void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
+ bool leafClass,
+ SmallVectorImpl<const ObjCIvarDecl*> &Ivars) const {
+ if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
+ DeepCollectObjCIvars(SuperClass, false, Ivars);
+ if (!leafClass) {
+ for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(),
+ E = OI->ivar_end(); I != E; ++I)
+ Ivars.push_back(*I);
+ } else {
+ ObjCInterfaceDecl *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
+ for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
+ Iv= Iv->getNextIvar())
+ Ivars.push_back(Iv);
+ }
+}
+
+/// CollectInheritedProtocols - Collect all protocols in current class and
+/// those inherited by it.
+void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> &Protocols) {
+ if (const ObjCInterfaceDecl *OI = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ // We can use protocol_iterator here instead of
+ // all_referenced_protocol_iterator since we are walking all categories.
+ for (ObjCInterfaceDecl::all_protocol_iterator P = OI->all_referenced_protocol_begin(),
+ PE = OI->all_referenced_protocol_end(); P != PE; ++P) {
+ ObjCProtocolDecl *Proto = (*P);
+ Protocols.insert(Proto->getCanonicalDecl());
+ for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
+ PE = Proto->protocol_end(); P != PE; ++P) {
+ Protocols.insert((*P)->getCanonicalDecl());
+ CollectInheritedProtocols(*P, Protocols);
+ }
+ }
+
+ // Categories of this Interface.
+ for (const ObjCCategoryDecl *CDeclChain = OI->getCategoryList();
+ CDeclChain; CDeclChain = CDeclChain->getNextClassCategory())
+ CollectInheritedProtocols(CDeclChain, Protocols);
+ if (ObjCInterfaceDecl *SD = OI->getSuperClass())
+ while (SD) {
+ CollectInheritedProtocols(SD, Protocols);
+ SD = SD->getSuperClass();
+ }
+ } else if (const ObjCCategoryDecl *OC = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ for (ObjCCategoryDecl::protocol_iterator P = OC->protocol_begin(),
+ PE = OC->protocol_end(); P != PE; ++P) {
+ ObjCProtocolDecl *Proto = (*P);
+ Protocols.insert(Proto->getCanonicalDecl());
+ for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
+ PE = Proto->protocol_end(); P != PE; ++P)
+ CollectInheritedProtocols(*P, Protocols);
+ }
+ } else if (const ObjCProtocolDecl *OP = dyn_cast<ObjCProtocolDecl>(CDecl)) {
+ for (ObjCProtocolDecl::protocol_iterator P = OP->protocol_begin(),
+ PE = OP->protocol_end(); P != PE; ++P) {
+ ObjCProtocolDecl *Proto = (*P);
+ Protocols.insert(Proto->getCanonicalDecl());
+ for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
+ PE = Proto->protocol_end(); P != PE; ++P)
+ CollectInheritedProtocols(*P, Protocols);
+ }
+ }
+}
+
+unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) const {
+ unsigned count = 0;
+ // Count ivars declared in class extension.
+ for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension())
+ count += CDecl->ivar_size();
+
+ // Count ivar defined in this class's implementation. This
+ // includes synthesized ivars.
+ if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
+ count += ImplDecl->ivar_size();
+
+ return count;
+}
+
+bool ASTContext::isSentinelNullExpr(const Expr *E) {
+ if (!E)
+ return false;
+
+ // nullptr_t is always treated as null.
+ if (E->getType()->isNullPtrType()) return true;
+
+ if (E->getType()->isAnyPointerType() &&
+ E->IgnoreParenCasts()->isNullPointerConstant(*this,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ // Unfortunately, __null has type 'int'.
+ if (isa<GNUNullExpr>(E)) return true;
+
+ return false;
+}
+
+/// \brief Get the implementation of ObjCInterfaceDecl,or NULL if none exists.
+ObjCImplementationDecl *ASTContext::getObjCImplementation(ObjCInterfaceDecl *D) {
+ llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
+ I = ObjCImpls.find(D);
+ if (I != ObjCImpls.end())
+ return cast<ObjCImplementationDecl>(I->second);
+ return 0;
+}
+/// \brief Get the implementation of ObjCCategoryDecl, or NULL if none exists.
+ObjCCategoryImplDecl *ASTContext::getObjCImplementation(ObjCCategoryDecl *D) {
+ llvm::DenseMap<ObjCContainerDecl*, ObjCImplDecl*>::iterator
+ I = ObjCImpls.find(D);
+ if (I != ObjCImpls.end())
+ return cast<ObjCCategoryImplDecl>(I->second);
+ return 0;
+}
+
+/// \brief Set the implementation of ObjCInterfaceDecl.
+void ASTContext::setObjCImplementation(ObjCInterfaceDecl *IFaceD,
+ ObjCImplementationDecl *ImplD) {
+ assert(IFaceD && ImplD && "Passed null params");
+ ObjCImpls[IFaceD] = ImplD;
+}
+/// \brief Set the implementation of ObjCCategoryDecl.
+void ASTContext::setObjCImplementation(ObjCCategoryDecl *CatD,
+ ObjCCategoryImplDecl *ImplD) {
+ assert(CatD && ImplD && "Passed null params");
+ ObjCImpls[CatD] = ImplD;
+}
+
+ObjCInterfaceDecl *ASTContext::getObjContainingInterface(NamedDecl *ND) const {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ND->getDeclContext()))
+ return ID;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(ND->getDeclContext()))
+ return CD->getClassInterface();
+ if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(ND->getDeclContext()))
+ return IMD->getClassInterface();
+
+ return 0;
+}
+
+/// \brief Get the copy initialization expression of VarDecl,or NULL if
+/// none exists.
+Expr *ASTContext::getBlockVarCopyInits(const VarDecl*VD) {
+ assert(VD && "Passed null params");
+ assert(VD->hasAttr<BlocksAttr>() &&
+ "getBlockVarCopyInits - not __block var");
+ llvm::DenseMap<const VarDecl*, Expr*>::iterator
+ I = BlockVarCopyInits.find(VD);
+ return (I != BlockVarCopyInits.end()) ? cast<Expr>(I->second) : 0;
+}
+
+/// \brief Set the copy inialization expression of a block var decl.
+void ASTContext::setBlockVarCopyInits(VarDecl*VD, Expr* Init) {
+ assert(VD && Init && "Passed null params");
+ assert(VD->hasAttr<BlocksAttr>() &&
+ "setBlockVarCopyInits - not __block var");
+ BlockVarCopyInits[VD] = Init;
+}
+
+/// \brief Allocate an uninitialized TypeSourceInfo.
+///
+/// The caller should initialize the memory held by TypeSourceInfo using
+/// the TypeLoc wrappers.
+///
+/// \param T the type that will be the basis for type source info. This type
+/// should refer to how the declarator was written in source code, not to
+/// what type semantic analysis resolved the declarator to.
+TypeSourceInfo *ASTContext::CreateTypeSourceInfo(QualType T,
+ unsigned DataSize) const {
+ if (!DataSize)
+ DataSize = TypeLoc::getFullDataSizeForType(T);
+ else
+ assert(DataSize == TypeLoc::getFullDataSizeForType(T) &&
+ "incorrect data size provided to CreateTypeSourceInfo!");
+
+ TypeSourceInfo *TInfo =
+ (TypeSourceInfo*)BumpAlloc.Allocate(sizeof(TypeSourceInfo) + DataSize, 8);
+ new (TInfo) TypeSourceInfo(T);
+ return TInfo;
+}
+
+TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
+ SourceLocation L) const {
+ TypeSourceInfo *DI = CreateTypeSourceInfo(T);
+ DI->getTypeLoc().initialize(const_cast<ASTContext &>(*this), L);
+ return DI;
+}
+
+const ASTRecordLayout &
+ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) const {
+ return getObjCLayout(D, 0);
+}
+
+const ASTRecordLayout &
+ASTContext::getASTObjCImplementationLayout(
+ const ObjCImplementationDecl *D) const {
+ return getObjCLayout(D->getClassInterface(), D);
+}
+
+//===----------------------------------------------------------------------===//
+// Type creation/memoization methods
+//===----------------------------------------------------------------------===//
+
+QualType
+ASTContext::getExtQualType(const Type *baseType, Qualifiers quals) const {
+ unsigned fastQuals = quals.getFastQualifiers();
+ quals.removeFastQualifiers();
+
+ // Check if we've already instantiated this type.
+ llvm::FoldingSetNodeID ID;
+ ExtQuals::Profile(ID, baseType, quals);
+ void *insertPos = 0;
+ if (ExtQuals *eq = ExtQualNodes.FindNodeOrInsertPos(ID, insertPos)) {
+ assert(eq->getQualifiers() == quals);
+ return QualType(eq, fastQuals);
+ }
+
+ // If the base type is not canonical, make the appropriate canonical type.
+ QualType canon;
+ if (!baseType->isCanonicalUnqualified()) {
+ SplitQualType canonSplit = baseType->getCanonicalTypeInternal().split();
+ canonSplit.Quals.addConsistentQualifiers(quals);
+ canon = getExtQualType(canonSplit.Ty, canonSplit.Quals);
+
+ // Re-find the insert position.
+ (void) ExtQualNodes.FindNodeOrInsertPos(ID, insertPos);
+ }
+
+ ExtQuals *eq = new (*this, TypeAlignment) ExtQuals(baseType, canon, quals);
+ ExtQualNodes.InsertNode(eq, insertPos);
+ return QualType(eq, fastQuals);
+}
+
+QualType
+ASTContext::getAddrSpaceQualType(QualType T, unsigned AddressSpace) const {
+ QualType CanT = getCanonicalType(T);
+ if (CanT.getAddressSpace() == AddressSpace)
+ return T;
+
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ QualifierCollector Quals;
+ const Type *TypeNode = Quals.strip(T);
+
+ // If this type already has an address space specified, it cannot get
+ // another one.
+ assert(!Quals.hasAddressSpace() &&
+ "Type cannot be in multiple addr spaces!");
+ Quals.addAddressSpace(AddressSpace);
+
+ return getExtQualType(TypeNode, Quals);
+}
+
+QualType ASTContext::getObjCGCQualType(QualType T,
+ Qualifiers::GC GCAttr) const {
+ QualType CanT = getCanonicalType(T);
+ if (CanT.getObjCGCAttr() == GCAttr)
+ return T;
+
+ if (const PointerType *ptr = T->getAs<PointerType>()) {
+ QualType Pointee = ptr->getPointeeType();
+ if (Pointee->isAnyPointerType()) {
+ QualType ResultType = getObjCGCQualType(Pointee, GCAttr);
+ return getPointerType(ResultType);
+ }
+ }
+
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ QualifierCollector Quals;
+ const Type *TypeNode = Quals.strip(T);
+
+ // If this type already has an ObjCGC specified, it cannot get
+ // another one.
+ assert(!Quals.hasObjCGCAttr() &&
+ "Type cannot have multiple ObjCGCs!");
+ Quals.addObjCGCAttr(GCAttr);
+
+ return getExtQualType(TypeNode, Quals);
+}
+
+const FunctionType *ASTContext::adjustFunctionType(const FunctionType *T,
+ FunctionType::ExtInfo Info) {
+ if (T->getExtInfo() == Info)
+ return T;
+
+ QualType Result;
+ if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(T)) {
+ Result = getFunctionNoProtoType(FNPT->getResultType(), Info);
+ } else {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExtInfo = Info;
+ Result = getFunctionType(FPT->getResultType(), FPT->arg_type_begin(),
+ FPT->getNumArgs(), EPI);
+ }
+
+ return cast<FunctionType>(Result.getTypePtr());
+}
+
+/// getComplexType - Return the uniqued reference to the type for a complex
+/// number with the specified element type.
+QualType ASTContext::getComplexType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ComplexType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (ComplexType *CT = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(CT, 0);
+
+ // If the pointee type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getComplexType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ ComplexType *NewIP = ComplexTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ ComplexType *New = new (*this, TypeAlignment) ComplexType(T, Canonical);
+ Types.push_back(New);
+ ComplexTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getPointerType - Return the uniqued reference to the type for a pointer to
+/// the specified type.
+QualType ASTContext::getPointerType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ PointerType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (PointerType *PT = PointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the pointee type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getPointerType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ PointerType *NewIP = PointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ PointerType *New = new (*this, TypeAlignment) PointerType(T, Canonical);
+ Types.push_back(New);
+ PointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getBlockPointerType - Return the uniqued reference to the type for
+/// a pointer to the specified block.
+QualType ASTContext::getBlockPointerType(QualType T) const {
+ assert(T->isFunctionType() && "block of function types only");
+ // Unique pointers, to guarantee there is only one block of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ BlockPointerType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (BlockPointerType *PT =
+ BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the block pointee type isn't canonical, this won't be a canonical
+ // type either so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getBlockPointerType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ BlockPointerType *NewIP =
+ BlockPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ BlockPointerType *New
+ = new (*this, TypeAlignment) BlockPointerType(T, Canonical);
+ Types.push_back(New);
+ BlockPointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getLValueReferenceType - Return the uniqued reference to the type for an
+/// lvalue reference to the specified type.
+QualType
+ASTContext::getLValueReferenceType(QualType T, bool SpelledAsLValue) const {
+ assert(getCanonicalType(T) != OverloadTy &&
+ "Unresolved overloaded function type");
+
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ReferenceType::Profile(ID, T, SpelledAsLValue);
+
+ void *InsertPos = 0;
+ if (LValueReferenceType *RT =
+ LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(RT, 0);
+
+ const ReferenceType *InnerRef = T->getAs<ReferenceType>();
+
+ // If the referencee type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!SpelledAsLValue || InnerRef || !T.isCanonical()) {
+ QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
+ Canonical = getLValueReferenceType(getCanonicalType(PointeeType));
+
+ // Get the new insert position for the node we care about.
+ LValueReferenceType *NewIP =
+ LValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ LValueReferenceType *New
+ = new (*this, TypeAlignment) LValueReferenceType(T, Canonical,
+ SpelledAsLValue);
+ Types.push_back(New);
+ LValueReferenceTypes.InsertNode(New, InsertPos);
+
+ return QualType(New, 0);
+}
+
+/// getRValueReferenceType - Return the uniqued reference to the type for an
+/// rvalue reference to the specified type.
+QualType ASTContext::getRValueReferenceType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ ReferenceType::Profile(ID, T, false);
+
+ void *InsertPos = 0;
+ if (RValueReferenceType *RT =
+ RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(RT, 0);
+
+ const ReferenceType *InnerRef = T->getAs<ReferenceType>();
+
+ // If the referencee type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (InnerRef || !T.isCanonical()) {
+ QualType PointeeType = (InnerRef ? InnerRef->getPointeeType() : T);
+ Canonical = getRValueReferenceType(getCanonicalType(PointeeType));
+
+ // Get the new insert position for the node we care about.
+ RValueReferenceType *NewIP =
+ RValueReferenceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ RValueReferenceType *New
+ = new (*this, TypeAlignment) RValueReferenceType(T, Canonical);
+ Types.push_back(New);
+ RValueReferenceTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getMemberPointerType - Return the uniqued reference to the type for a
+/// member pointer to the specified type, in the specified class.
+QualType ASTContext::getMemberPointerType(QualType T, const Type *Cls) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ MemberPointerType::Profile(ID, T, Cls);
+
+ void *InsertPos = 0;
+ if (MemberPointerType *PT =
+ MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(PT, 0);
+
+ // If the pointee or class type isn't canonical, this won't be a canonical
+ // type either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical() || !Cls->isCanonicalUnqualified()) {
+ Canonical = getMemberPointerType(getCanonicalType(T),getCanonicalType(Cls));
+
+ // Get the new insert position for the node we care about.
+ MemberPointerType *NewIP =
+ MemberPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ MemberPointerType *New
+ = new (*this, TypeAlignment) MemberPointerType(T, Cls, Canonical);
+ Types.push_back(New);
+ MemberPointerTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getConstantArrayType - Return the unique reference to the type for an
+/// array of the specified element type.
+QualType ASTContext::getConstantArrayType(QualType EltTy,
+ const llvm::APInt &ArySizeIn,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals) const {
+ assert((EltTy->isDependentType() ||
+ EltTy->isIncompleteType() || EltTy->isConstantSizeType()) &&
+ "Constant array of VLAs is illegal!");
+
+ // Convert the array size into a canonical width matching the pointer size for
+ // the target.
+ llvm::APInt ArySize(ArySizeIn);
+ ArySize =
+ ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy)));
+
+ llvm::FoldingSetNodeID ID;
+ ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
+
+ void *InsertPos = 0;
+ if (ConstantArrayType *ATP =
+ ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(ATP, 0);
+
+ // If the element type isn't canonical or has qualifiers, this won't
+ // be a canonical type either, so fill in the canonical type field.
+ QualType Canon;
+ if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(EltTy).split();
+ Canon = getConstantArrayType(QualType(canonSplit.Ty, 0), ArySize,
+ ASM, IndexTypeQuals);
+ Canon = getQualifiedType(Canon, canonSplit.Quals);
+
+ // Get the new insert position for the node we care about.
+ ConstantArrayType *NewIP =
+ ConstantArrayTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ ConstantArrayType *New = new(*this,TypeAlignment)
+ ConstantArrayType(EltTy, Canon, ArySize, ASM, IndexTypeQuals);
+ ConstantArrayTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getVariableArrayDecayedType - Turns the given type, which may be
+/// variably-modified, into the corresponding type with all the known
+/// sizes replaced with [*].
+QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
+ // Vastly most common case.
+ if (!type->isVariablyModifiedType()) return type;
+
+ QualType result;
+
+ SplitQualType split = type.getSplitDesugaredType();
+ const Type *ty = split.Ty;
+ switch (ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't desugar past all non-canonical types?");
+
+ // These types should never be variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::DependentSizedExtVector:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::Record:
+ case Type::Enum:
+ case Type::UnresolvedUsing:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::DependentName:
+ case Type::InjectedClassName:
+ case Type::TemplateSpecialization:
+ case Type::DependentTemplateSpecialization:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::Auto:
+ case Type::PackExpansion:
+ llvm_unreachable("type should never be variably-modified");
+
+ // These types can be variably-modified but should never need to
+ // further decay.
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ case Type::BlockPointer:
+ case Type::MemberPointer:
+ return type;
+
+ // These types can be variably-modified. All these modifications
+ // preserve structure except as noted by comments.
+ // TODO: if we ever care about optimizing VLAs, there are no-op
+ // optimizations available here.
+ case Type::Pointer:
+ result = getPointerType(getVariableArrayDecayedType(
+ cast<PointerType>(ty)->getPointeeType()));
+ break;
+
+ case Type::LValueReference: {
+ const LValueReferenceType *lv = cast<LValueReferenceType>(ty);
+ result = getLValueReferenceType(
+ getVariableArrayDecayedType(lv->getPointeeType()),
+ lv->isSpelledAsLValue());
+ break;
+ }
+
+ case Type::RValueReference: {
+ const RValueReferenceType *lv = cast<RValueReferenceType>(ty);
+ result = getRValueReferenceType(
+ getVariableArrayDecayedType(lv->getPointeeType()));
+ break;
+ }
+
+ case Type::Atomic: {
+ const AtomicType *at = cast<AtomicType>(ty);
+ result = getAtomicType(getVariableArrayDecayedType(at->getValueType()));
+ break;
+ }
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *cat = cast<ConstantArrayType>(ty);
+ result = getConstantArrayType(
+ getVariableArrayDecayedType(cat->getElementType()),
+ cat->getSize(),
+ cat->getSizeModifier(),
+ cat->getIndexTypeCVRQualifiers());
+ break;
+ }
+
+ case Type::DependentSizedArray: {
+ const DependentSizedArrayType *dat = cast<DependentSizedArrayType>(ty);
+ result = getDependentSizedArrayType(
+ getVariableArrayDecayedType(dat->getElementType()),
+ dat->getSizeExpr(),
+ dat->getSizeModifier(),
+ dat->getIndexTypeCVRQualifiers(),
+ dat->getBracketsRange());
+ break;
+ }
+
+ // Turn incomplete types into [*] types.
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *iat = cast<IncompleteArrayType>(ty);
+ result = getVariableArrayType(
+ getVariableArrayDecayedType(iat->getElementType()),
+ /*size*/ 0,
+ ArrayType::Normal,
+ iat->getIndexTypeCVRQualifiers(),
+ SourceRange());
+ break;
+ }
+
+ // Turn VLA types into [*] types.
+ case Type::VariableArray: {
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+ result = getVariableArrayType(
+ getVariableArrayDecayedType(vat->getElementType()),
+ /*size*/ 0,
+ ArrayType::Star,
+ vat->getIndexTypeCVRQualifiers(),
+ vat->getBracketsRange());
+ break;
+ }
+ }
+
+ // Apply the top-level qualifiers from the original.
+ return getQualifiedType(result, split.Quals);
+}
+
+/// getVariableArrayType - Returns a non-unique reference to the type for a
+/// variable array of the specified element type.
+QualType ASTContext::getVariableArrayType(QualType EltTy,
+ Expr *NumElts,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned IndexTypeQuals,
+ SourceRange Brackets) const {
+ // Since we don't unique expressions, it isn't possible to unique VLA's
+ // that have an expression provided for their size.
+ QualType Canon;
+
+ // Be sure to pull qualifiers off the element type.
+ if (!EltTy.isCanonical() || EltTy.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(EltTy).split();
+ Canon = getVariableArrayType(QualType(canonSplit.Ty, 0), NumElts, ASM,
+ IndexTypeQuals, Brackets);
+ Canon = getQualifiedType(Canon, canonSplit.Quals);
+ }
+
+ VariableArrayType *New = new(*this, TypeAlignment)
+ VariableArrayType(EltTy, Canon, NumElts, ASM, IndexTypeQuals, Brackets);
+
+ VariableArrayTypes.push_back(New);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getDependentSizedArrayType - Returns a non-unique reference to
+/// the type for a dependently-sized array of the specified element
+/// type.
+QualType ASTContext::getDependentSizedArrayType(QualType elementType,
+ Expr *numElements,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned elementTypeQuals,
+ SourceRange brackets) const {
+ assert((!numElements || numElements->isTypeDependent() ||
+ numElements->isValueDependent()) &&
+ "Size must be type- or value-dependent!");
+
+ // Dependently-sized array types that do not have a specified number
+ // of elements will have their sizes deduced from a dependent
+ // initializer. We do no canonicalization here at all, which is okay
+ // because they can't be used in most locations.
+ if (!numElements) {
+ DependentSizedArrayType *newType
+ = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, elementType, QualType(),
+ numElements, ASM, elementTypeQuals,
+ brackets);
+ Types.push_back(newType);
+ return QualType(newType, 0);
+ }
+
+ // Otherwise, we actually build a new type every time, but we
+ // also build a canonical type.
+
+ SplitQualType canonElementType = getCanonicalType(elementType).split();
+
+ void *insertPos = 0;
+ llvm::FoldingSetNodeID ID;
+ DependentSizedArrayType::Profile(ID, *this,
+ QualType(canonElementType.Ty, 0),
+ ASM, elementTypeQuals, numElements);
+
+ // Look for an existing type with these properties.
+ DependentSizedArrayType *canonTy =
+ DependentSizedArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+
+ // If we don't have one, build one.
+ if (!canonTy) {
+ canonTy = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, QualType(canonElementType.Ty, 0),
+ QualType(), numElements, ASM, elementTypeQuals,
+ brackets);
+ DependentSizedArrayTypes.InsertNode(canonTy, insertPos);
+ Types.push_back(canonTy);
+ }
+
+ // Apply qualifiers from the element type to the array.
+ QualType canon = getQualifiedType(QualType(canonTy,0),
+ canonElementType.Quals);
+
+ // If we didn't need extra canonicalization for the element type,
+ // then just use that as our result.
+ if (QualType(canonElementType.Ty, 0) == elementType)
+ return canon;
+
+ // Otherwise, we need to build a type which follows the spelling
+ // of the element type.
+ DependentSizedArrayType *sugaredType
+ = new (*this, TypeAlignment)
+ DependentSizedArrayType(*this, elementType, canon, numElements,
+ ASM, elementTypeQuals, brackets);
+ Types.push_back(sugaredType);
+ return QualType(sugaredType, 0);
+}
+
+QualType ASTContext::getIncompleteArrayType(QualType elementType,
+ ArrayType::ArraySizeModifier ASM,
+ unsigned elementTypeQuals) const {
+ llvm::FoldingSetNodeID ID;
+ IncompleteArrayType::Profile(ID, elementType, ASM, elementTypeQuals);
+
+ void *insertPos = 0;
+ if (IncompleteArrayType *iat =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos))
+ return QualType(iat, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field. We also have to pull
+ // qualifiers off the element type.
+ QualType canon;
+
+ if (!elementType.isCanonical() || elementType.hasLocalQualifiers()) {
+ SplitQualType canonSplit = getCanonicalType(elementType).split();
+ canon = getIncompleteArrayType(QualType(canonSplit.Ty, 0),
+ ASM, elementTypeQuals);
+ canon = getQualifiedType(canon, canonSplit.Quals);
+
+ // Get the new insert position for the node we care about.
+ IncompleteArrayType *existing =
+ IncompleteArrayTypes.FindNodeOrInsertPos(ID, insertPos);
+ assert(!existing && "Shouldn't be in the map!"); (void) existing;
+ }
+
+ IncompleteArrayType *newType = new (*this, TypeAlignment)
+ IncompleteArrayType(elementType, canon, ASM, elementTypeQuals);
+
+ IncompleteArrayTypes.InsertNode(newType, insertPos);
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+/// getVectorType - Return the unique reference to a vector type of
+/// the specified element type and size. VectorType must be a built-in type.
+QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
+ VectorType::VectorKind VecKind) const {
+ assert(vecType->isBuiltinType());
+
+ // Check if we've already instantiated a vector of this type.
+ llvm::FoldingSetNodeID ID;
+ VectorType::Profile(ID, vecType, NumElts, Type::Vector, VecKind);
+
+ void *InsertPos = 0;
+ if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(VTP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!vecType.isCanonical()) {
+ Canonical = getVectorType(getCanonicalType(vecType), NumElts, VecKind);
+
+ // Get the new insert position for the node we care about.
+ VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ VectorType *New = new (*this, TypeAlignment)
+ VectorType(vecType, NumElts, Canonical, VecKind);
+ VectorTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getExtVectorType - Return the unique reference to an extended vector type of
+/// the specified element type and size. VectorType must be a built-in type.
+QualType
+ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) const {
+ assert(vecType->isBuiltinType() || vecType->isDependentType());
+
+ // Check if we've already instantiated a vector of this type.
+ llvm::FoldingSetNodeID ID;
+ VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
+ VectorType::GenericVector);
+ void *InsertPos = 0;
+ if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(VTP, 0);
+
+ // If the element type isn't canonical, this won't be a canonical type either,
+ // so fill in the canonical type field.
+ QualType Canonical;
+ if (!vecType.isCanonical()) {
+ Canonical = getExtVectorType(getCanonicalType(vecType), NumElts);
+
+ // Get the new insert position for the node we care about.
+ VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ ExtVectorType *New = new (*this, TypeAlignment)
+ ExtVectorType(vecType, NumElts, Canonical);
+ VectorTypes.InsertNode(New, InsertPos);
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+QualType
+ASTContext::getDependentSizedExtVectorType(QualType vecType,
+ Expr *SizeExpr,
+ SourceLocation AttrLoc) const {
+ llvm::FoldingSetNodeID ID;
+ DependentSizedExtVectorType::Profile(ID, *this, getCanonicalType(vecType),
+ SizeExpr);
+
+ void *InsertPos = 0;
+ DependentSizedExtVectorType *Canon
+ = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentSizedExtVectorType *New;
+ if (Canon) {
+ // We already have a canonical version of this array type; use it as
+ // the canonical type for a newly-built type.
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, QualType(Canon, 0),
+ SizeExpr, AttrLoc);
+ } else {
+ QualType CanonVecTy = getCanonicalType(vecType);
+ if (CanonVecTy == vecType) {
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, QualType(), SizeExpr,
+ AttrLoc);
+
+ DependentSizedExtVectorType *CanonCheck
+ = DependentSizedExtVectorTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CanonCheck && "Dependent-sized ext_vector canonical type broken");
+ (void)CanonCheck;
+ DependentSizedExtVectorTypes.InsertNode(New, InsertPos);
+ } else {
+ QualType Canon = getDependentSizedExtVectorType(CanonVecTy, SizeExpr,
+ SourceLocation());
+ New = new (*this, TypeAlignment)
+ DependentSizedExtVectorType(*this, vecType, Canon, SizeExpr, AttrLoc);
+ }
+ }
+
+ Types.push_back(New);
+ return QualType(New, 0);
+}
+
+/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
+///
+QualType
+ASTContext::getFunctionNoProtoType(QualType ResultTy,
+ const FunctionType::ExtInfo &Info) const {
+ const CallingConv DefaultCC = Info.getCC();
+ const CallingConv CallConv = (LangOpts.MRTD && DefaultCC == CC_Default) ?
+ CC_X86StdCall : DefaultCC;
+ // Unique functions, to guarantee there is only one function of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ FunctionNoProtoType::Profile(ID, ResultTy, Info);
+
+ void *InsertPos = 0;
+ if (FunctionNoProtoType *FT =
+ FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(FT, 0);
+
+ QualType Canonical;
+ if (!ResultTy.isCanonical() ||
+ getCanonicalCallConv(CallConv) != CallConv) {
+ Canonical =
+ getFunctionNoProtoType(getCanonicalType(ResultTy),
+ Info.withCallingConv(getCanonicalCallConv(CallConv)));
+
+ // Get the new insert position for the node we care about.
+ FunctionNoProtoType *NewIP =
+ FunctionNoProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ FunctionProtoType::ExtInfo newInfo = Info.withCallingConv(CallConv);
+ FunctionNoProtoType *New = new (*this, TypeAlignment)
+ FunctionNoProtoType(ResultTy, Canonical, newInfo);
+ Types.push_back(New);
+ FunctionNoProtoTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getFunctionType - Return a normal function type with a typed argument
+/// list. isVariadic indicates whether the argument list includes '...'.
+QualType
+ASTContext::getFunctionType(QualType ResultTy,
+ const QualType *ArgArray, unsigned NumArgs,
+ const FunctionProtoType::ExtProtoInfo &EPI) const {
+ // Unique functions, to guarantee there is only one function of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ FunctionProtoType::Profile(ID, ResultTy, ArgArray, NumArgs, EPI, *this);
+
+ void *InsertPos = 0;
+ if (FunctionProtoType *FTP =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(FTP, 0);
+
+ // Determine whether the type being created is already canonical or not.
+ bool isCanonical =
+ EPI.ExceptionSpecType == EST_None && ResultTy.isCanonical() &&
+ !EPI.HasTrailingReturn;
+ for (unsigned i = 0; i != NumArgs && isCanonical; ++i)
+ if (!ArgArray[i].isCanonicalAsParam())
+ isCanonical = false;
+
+ const CallingConv DefaultCC = EPI.ExtInfo.getCC();
+ const CallingConv CallConv = (LangOpts.MRTD && DefaultCC == CC_Default) ?
+ CC_X86StdCall : DefaultCC;
+
+ // If this type isn't canonical, get the canonical version of it.
+ // The exception spec is not part of the canonical type.
+ QualType Canonical;
+ if (!isCanonical || getCanonicalCallConv(CallConv) != CallConv) {
+ SmallVector<QualType, 16> CanonicalArgs;
+ CanonicalArgs.reserve(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ CanonicalArgs.push_back(getCanonicalParamType(ArgArray[i]));
+
+ FunctionProtoType::ExtProtoInfo CanonicalEPI = EPI;
+ CanonicalEPI.HasTrailingReturn = false;
+ CanonicalEPI.ExceptionSpecType = EST_None;
+ CanonicalEPI.NumExceptions = 0;
+ CanonicalEPI.ExtInfo
+ = CanonicalEPI.ExtInfo.withCallingConv(getCanonicalCallConv(CallConv));
+
+ Canonical = getFunctionType(getCanonicalType(ResultTy),
+ CanonicalArgs.data(), NumArgs,
+ CanonicalEPI);
+
+ // Get the new insert position for the node we care about.
+ FunctionProtoType *NewIP =
+ FunctionProtoTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+
+ // FunctionProtoType objects are allocated with extra bytes after
+ // them for three variable size arrays at the end:
+ // - parameter types
+ // - exception types
+ // - consumed-arguments flags
+ // Instead of the exception types, there could be a noexcept
+ // expression.
+ size_t Size = sizeof(FunctionProtoType) +
+ NumArgs * sizeof(QualType);
+ if (EPI.ExceptionSpecType == EST_Dynamic)
+ Size += EPI.NumExceptions * sizeof(QualType);
+ else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
+ Size += sizeof(Expr*);
+ }
+ if (EPI.ConsumedArguments)
+ Size += NumArgs * sizeof(bool);
+
+ FunctionProtoType *FTP = (FunctionProtoType*) Allocate(Size, TypeAlignment);
+ FunctionProtoType::ExtProtoInfo newEPI = EPI;
+ newEPI.ExtInfo = EPI.ExtInfo.withCallingConv(CallConv);
+ new (FTP) FunctionProtoType(ResultTy, ArgArray, NumArgs, Canonical, newEPI);
+ Types.push_back(FTP);
+ FunctionProtoTypes.InsertNode(FTP, InsertPos);
+ return QualType(FTP, 0);
+}
+
+#ifndef NDEBUG
+static bool NeedsInjectedClassNameType(const RecordDecl *D) {
+ if (!isa<CXXRecordDecl>(D)) return false;
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(D);
+ if (isa<ClassTemplatePartialSpecializationDecl>(RD))
+ return true;
+ if (RD->getDescribedClassTemplate() &&
+ !isa<ClassTemplateSpecializationDecl>(RD))
+ return true;
+ return false;
+}
+#endif
+
+/// getInjectedClassNameType - Return the unique reference to the
+/// injected class name type for the specified templated declaration.
+QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
+ QualType TST) const {
+ assert(NeedsInjectedClassNameType(Decl));
+ if (Decl->TypeForDecl) {
+ assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
+ } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDecl()) {
+ assert(PrevDecl->TypeForDecl && "previous declaration has no type");
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
+ } else {
+ Type *newType =
+ new (*this, TypeAlignment) InjectedClassNameType(Decl, TST);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ }
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// getTypeDeclType - Return the unique reference to the type for the
+/// specified type declaration.
+QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) const {
+ assert(Decl && "Passed null for Decl param");
+ assert(!Decl->TypeForDecl && "TypeForDecl present in slow case");
+
+ if (const TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Decl))
+ return getTypedefType(Typedef);
+
+ assert(!isa<TemplateTypeParmDecl>(Decl) &&
+ "Template type parameter types are always available.");
+
+ if (const RecordDecl *Record = dyn_cast<RecordDecl>(Decl)) {
+ assert(!Record->getPreviousDecl() &&
+ "struct/union has previous declaration");
+ assert(!NeedsInjectedClassNameType(Record));
+ return getRecordType(Record);
+ } else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
+ assert(!Enum->getPreviousDecl() &&
+ "enum has previous declaration");
+ return getEnumType(Enum);
+ } else if (const UnresolvedUsingTypenameDecl *Using =
+ dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
+ Type *newType = new (*this, TypeAlignment) UnresolvedUsingType(Using);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ } else
+ llvm_unreachable("TypeDecl without a type?");
+
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+/// getTypedefType - Return the unique reference to the type for the
+/// specified typedef name decl.
+QualType
+ASTContext::getTypedefType(const TypedefNameDecl *Decl,
+ QualType Canonical) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (Canonical.isNull())
+ Canonical = getCanonicalType(Decl->getUnderlyingType());
+ TypedefType *newType = new(*this, TypeAlignment)
+ TypedefType(Type::Typedef, Decl, Canonical);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getRecordType(const RecordDecl *Decl) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const RecordDecl *PrevDecl = Decl->getPreviousDecl())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ RecordType *newType = new (*this, TypeAlignment) RecordType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getEnumType(const EnumDecl *Decl) const {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const EnumDecl *PrevDecl = Decl->getPreviousDecl())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ EnumType *newType = new (*this, TypeAlignment) EnumType(Decl);
+ Decl->TypeForDecl = newType;
+ Types.push_back(newType);
+ return QualType(newType, 0);
+}
+
+QualType ASTContext::getAttributedType(AttributedType::Kind attrKind,
+ QualType modifiedType,
+ QualType equivalentType) {
+ llvm::FoldingSetNodeID id;
+ AttributedType::Profile(id, attrKind, modifiedType, equivalentType);
+
+ void *insertPos = 0;
+ AttributedType *type = AttributedTypes.FindNodeOrInsertPos(id, insertPos);
+ if (type) return QualType(type, 0);
+
+ QualType canon = getCanonicalType(equivalentType);
+ type = new (*this, TypeAlignment)
+ AttributedType(canon, attrKind, modifiedType, equivalentType);
+
+ Types.push_back(type);
+ AttributedTypes.InsertNode(type, insertPos);
+
+ return QualType(type, 0);
+}
+
+
+/// \brief Retrieve a substitution-result type.
+QualType
+ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
+ QualType Replacement) const {
+ assert(Replacement.isCanonical()
+ && "replacement types must always be canonical");
+
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTypeParmType::Profile(ID, Parm, Replacement);
+ void *InsertPos = 0;
+ SubstTemplateTypeParmType *SubstParm
+ = SubstTemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!SubstParm) {
+ SubstParm = new (*this, TypeAlignment)
+ SubstTemplateTypeParmType(Parm, Replacement);
+ Types.push_back(SubstParm);
+ SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
+ }
+
+ return QualType(SubstParm, 0);
+}
+
+/// \brief Retrieve a
+QualType ASTContext::getSubstTemplateTypeParmPackType(
+ const TemplateTypeParmType *Parm,
+ const TemplateArgument &ArgPack) {
+#ifndef NDEBUG
+ for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(),
+ PEnd = ArgPack.pack_end();
+ P != PEnd; ++P) {
+ assert(P->getKind() == TemplateArgument::Type &&"Pack contains a non-type");
+ assert(P->getAsType().isCanonical() && "Pack contains non-canonical type");
+ }
+#endif
+
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTypeParmPackType::Profile(ID, Parm, ArgPack);
+ void *InsertPos = 0;
+ if (SubstTemplateTypeParmPackType *SubstParm
+ = SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(SubstParm, 0);
+
+ QualType Canon;
+ if (!Parm->isCanonicalUnqualified()) {
+ Canon = getCanonicalType(QualType(Parm, 0));
+ Canon = getSubstTemplateTypeParmPackType(cast<TemplateTypeParmType>(Canon),
+ ArgPack);
+ SubstTemplateTypeParmPackTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ SubstTemplateTypeParmPackType *SubstParm
+ = new (*this, TypeAlignment) SubstTemplateTypeParmPackType(Parm, Canon,
+ ArgPack);
+ Types.push_back(SubstParm);
+ SubstTemplateTypeParmTypes.InsertNode(SubstParm, InsertPos);
+ return QualType(SubstParm, 0);
+}
+
+/// \brief Retrieve the template type parameter type for a template
+/// parameter or parameter pack with the given depth, index, and (optionally)
+/// name.
+QualType ASTContext::getTemplateTypeParmType(unsigned Depth, unsigned Index,
+ bool ParameterPack,
+ TemplateTypeParmDecl *TTPDecl) const {
+ llvm::FoldingSetNodeID ID;
+ TemplateTypeParmType::Profile(ID, Depth, Index, ParameterPack, TTPDecl);
+ void *InsertPos = 0;
+ TemplateTypeParmType *TypeParm
+ = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (TypeParm)
+ return QualType(TypeParm, 0);
+
+ if (TTPDecl) {
+ QualType Canon = getTemplateTypeParmType(Depth, Index, ParameterPack);
+ TypeParm = new (*this, TypeAlignment) TemplateTypeParmType(TTPDecl, Canon);
+
+ TemplateTypeParmType *TypeCheck
+ = TemplateTypeParmTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!TypeCheck && "Template type parameter canonical type broken");
+ (void)TypeCheck;
+ } else
+ TypeParm = new (*this, TypeAlignment)
+ TemplateTypeParmType(Depth, Index, ParameterPack);
+
+ Types.push_back(TypeParm);
+ TemplateTypeParmTypes.InsertNode(TypeParm, InsertPos);
+
+ return QualType(TypeParm, 0);
+}
+
+TypeSourceInfo *
+ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
+ SourceLocation NameLoc,
+ const TemplateArgumentListInfo &Args,
+ QualType Underlying) const {
+ assert(!Name.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+ QualType TST = getTemplateSpecializationType(Name, Args, Underlying);
+
+ TypeSourceInfo *DI = CreateTypeSourceInfo(TST);
+ TemplateSpecializationTypeLoc TL
+ = cast<TemplateSpecializationTypeLoc>(DI->getTypeLoc());
+ TL.setTemplateKeywordLoc(SourceLocation());
+ TL.setTemplateNameLoc(NameLoc);
+ TL.setLAngleLoc(Args.getLAngleLoc());
+ TL.setRAngleLoc(Args.getRAngleLoc());
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ TL.setArgLocInfo(i, Args[i].getLocInfo());
+ return DI;
+}
+
+QualType
+ASTContext::getTemplateSpecializationType(TemplateName Template,
+ const TemplateArgumentListInfo &Args,
+ QualType Underlying) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+
+ unsigned NumArgs = Args.size();
+
+ SmallVector<TemplateArgument, 4> ArgVec;
+ ArgVec.reserve(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ArgVec.push_back(Args[i].getArgument());
+
+ return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs,
+ Underlying);
+}
+
+#ifndef NDEBUG
+static bool hasAnyPackExpansions(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ for (unsigned I = 0; I != NumArgs; ++I)
+ if (Args[I].isPackExpansion())
+ return true;
+
+ return true;
+}
+#endif
+
+QualType
+ASTContext::getTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ QualType Underlying) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+ // Look through qualified template names.
+ if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Template = TemplateName(QTN->getTemplateDecl());
+
+ bool IsTypeAlias =
+ Template.getAsTemplateDecl() &&
+ isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
+ QualType CanonType;
+ if (!Underlying.isNull())
+ CanonType = getCanonicalType(Underlying);
+ else {
+ // We can get here with an alias template when the specialization contains
+ // a pack expansion that does not match up with a parameter pack.
+ assert((!IsTypeAlias || hasAnyPackExpansions(Args, NumArgs)) &&
+ "Caller must compute aliased type");
+ IsTypeAlias = false;
+ CanonType = getCanonicalTemplateSpecializationType(Template, Args,
+ NumArgs);
+ }
+
+ // Allocate the (non-canonical) template specialization type, but don't
+ // try to unique it: these types typically have location information that
+ // we don't unique and don't want to lose.
+ void *Mem = Allocate(sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs +
+ (IsTypeAlias? sizeof(QualType) : 0),
+ TypeAlignment);
+ TemplateSpecializationType *Spec
+ = new (Mem) TemplateSpecializationType(Template, Args, NumArgs, CanonType,
+ IsTypeAlias ? Underlying : QualType());
+
+ Types.push_back(Spec);
+ return QualType(Spec, 0);
+}
+
+QualType
+ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs) const {
+ assert(!Template.getAsDependentTemplateName() &&
+ "No dependent template names here!");
+
+ // Look through qualified template names.
+ if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Template = TemplateName(QTN->getTemplateDecl());
+
+ // Build the canonical template specialization type.
+ TemplateName CanonTemplate = getCanonicalTemplateName(Template);
+ SmallVector<TemplateArgument, 4> CanonArgs;
+ CanonArgs.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ CanonArgs.push_back(getCanonicalTemplateArgument(Args[I]));
+
+ // Determine whether this canonical template specialization type already
+ // exists.
+ llvm::FoldingSetNodeID ID;
+ TemplateSpecializationType::Profile(ID, CanonTemplate,
+ CanonArgs.data(), NumArgs, *this);
+
+ void *InsertPos = 0;
+ TemplateSpecializationType *Spec
+ = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Spec) {
+ // Allocate a new canonical template specialization type.
+ void *Mem = Allocate((sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
+ CanonArgs.data(), NumArgs,
+ QualType(), QualType());
+ Types.push_back(Spec);
+ TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
+ }
+
+ assert(Spec->isDependentType() &&
+ "Non-dependent template-id type must have a canonical type");
+ return QualType(Spec, 0);
+}
+
+QualType
+ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ QualType NamedType) const {
+ llvm::FoldingSetNodeID ID;
+ ElaboratedType::Profile(ID, Keyword, NNS, NamedType);
+
+ void *InsertPos = 0;
+ ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon = NamedType;
+ if (!Canon.isCanonical()) {
+ Canon = getCanonicalType(NamedType);
+ ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckT && "Elaborated canonical type broken");
+ (void)CheckT;
+ }
+
+ T = new (*this) ElaboratedType(Keyword, NNS, NamedType, Canon);
+ Types.push_back(T);
+ ElaboratedTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType
+ASTContext::getParenType(QualType InnerType) const {
+ llvm::FoldingSetNodeID ID;
+ ParenType::Profile(ID, InnerType);
+
+ void *InsertPos = 0;
+ ParenType *T = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon = InnerType;
+ if (!Canon.isCanonical()) {
+ Canon = getCanonicalType(InnerType);
+ ParenType *CheckT = ParenTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckT && "Paren canonical type broken");
+ (void)CheckT;
+ }
+
+ T = new (*this) ParenType(InnerType, Canon);
+ Types.push_back(T);
+ ParenTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ QualType Canon) const {
+ assert(NNS->isDependent() && "nested-name-specifier must be dependent");
+
+ if (Canon.isNull()) {
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None)
+ CanonKeyword = ETK_Typename;
+
+ if (CanonNNS != NNS || CanonKeyword != Keyword)
+ Canon = getDependentNameType(CanonKeyword, CanonNNS, Name);
+ }
+
+ llvm::FoldingSetNodeID ID;
+ DependentNameType::Profile(ID, Keyword, NNS, Name);
+
+ void *InsertPos = 0;
+ DependentNameType *T
+ = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ T = new (*this) DependentNameType(Keyword, NNS, Name, Canon);
+ Types.push_back(T);
+ DependentNameTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ const TemplateArgumentListInfo &Args) const {
+ // TODO: avoid this copy
+ SmallVector<TemplateArgument, 16> ArgCopy;
+ for (unsigned I = 0, E = Args.size(); I != E; ++I)
+ ArgCopy.push_back(Args[I].getArgument());
+ return getDependentTemplateSpecializationType(Keyword, NNS, Name,
+ ArgCopy.size(),
+ ArgCopy.data());
+}
+
+QualType
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "nested-name-specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
+ Name, NumArgs, Args);
+
+ void *InsertPos = 0;
+ DependentTemplateSpecializationType *T
+ = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
+
+ bool AnyNonCanonArgs = false;
+ SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
+ if (!CanonArgs[I].structurallyEquals(Args[I]))
+ AnyNonCanonArgs = true;
+ }
+
+ QualType Canon;
+ if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
+ Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
+ Name, NumArgs,
+ CanonArgs.data());
+
+ // Find the insert position again.
+ DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
+ Name, NumArgs, Args, Canon);
+ Types.push_back(T);
+ DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+QualType ASTContext::getPackExpansionType(QualType Pattern,
+ llvm::Optional<unsigned> NumExpansions) {
+ llvm::FoldingSetNodeID ID;
+ PackExpansionType::Profile(ID, Pattern, NumExpansions);
+
+ assert(Pattern->containsUnexpandedParameterPack() &&
+ "Pack expansions must expand one or more parameter packs");
+ void *InsertPos = 0;
+ PackExpansionType *T
+ = PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (T)
+ return QualType(T, 0);
+
+ QualType Canon;
+ if (!Pattern.isCanonical()) {
+ Canon = getPackExpansionType(getCanonicalType(Pattern), NumExpansions);
+
+ // Find the insert position again.
+ PackExpansionTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ T = new (*this) PackExpansionType(Pattern, Canon, NumExpansions);
+ Types.push_back(T);
+ PackExpansionTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+/// CmpProtocolNames - Comparison predicate for sorting protocols
+/// alphabetically.
+static bool CmpProtocolNames(const ObjCProtocolDecl *LHS,
+ const ObjCProtocolDecl *RHS) {
+ return LHS->getDeclName() < RHS->getDeclName();
+}
+
+static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) {
+ if (NumProtocols == 0) return true;
+
+ if (Protocols[0]->getCanonicalDecl() != Protocols[0])
+ return false;
+
+ for (unsigned i = 1; i != NumProtocols; ++i)
+ if (!CmpProtocolNames(Protocols[i-1], Protocols[i]) ||
+ Protocols[i]->getCanonicalDecl() != Protocols[i])
+ return false;
+ return true;
+}
+
+static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols,
+ unsigned &NumProtocols) {
+ ObjCProtocolDecl **ProtocolsEnd = Protocols+NumProtocols;
+
+ // Sort protocols, keyed by name.
+ std::sort(Protocols, Protocols+NumProtocols, CmpProtocolNames);
+
+ // Canonicalize.
+ for (unsigned I = 0, N = NumProtocols; I != N; ++I)
+ Protocols[I] = Protocols[I]->getCanonicalDecl();
+
+ // Remove duplicates.
+ ProtocolsEnd = std::unique(Protocols, ProtocolsEnd);
+ NumProtocols = ProtocolsEnd-Protocols;
+}
+
+QualType ASTContext::getObjCObjectType(QualType BaseType,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) const {
+ // If the base type is an interface and there aren't any protocols
+ // to add, then the interface type will do just fine.
+ if (!NumProtocols && isa<ObjCInterfaceType>(BaseType))
+ return BaseType;
+
+ // Look in the folding set for an existing type.
+ llvm::FoldingSetNodeID ID;
+ ObjCObjectTypeImpl::Profile(ID, BaseType, Protocols, NumProtocols);
+ void *InsertPos = 0;
+ if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
+
+ // Build the canonical type, which has the canonical base type and
+ // a sorted-and-uniqued list of protocols.
+ QualType Canonical;
+ bool ProtocolsSorted = areSortedAndUniqued(Protocols, NumProtocols);
+ if (!ProtocolsSorted || !BaseType.isCanonical()) {
+ if (!ProtocolsSorted) {
+ SmallVector<ObjCProtocolDecl*, 8> Sorted(Protocols,
+ Protocols + NumProtocols);
+ unsigned UniqueCount = NumProtocols;
+
+ SortAndUniqueProtocols(&Sorted[0], UniqueCount);
+ Canonical = getObjCObjectType(getCanonicalType(BaseType),
+ &Sorted[0], UniqueCount);
+ } else {
+ Canonical = getObjCObjectType(getCanonicalType(BaseType),
+ Protocols, NumProtocols);
+ }
+
+ // Regenerate InsertPos.
+ ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ unsigned Size = sizeof(ObjCObjectTypeImpl);
+ Size += NumProtocols * sizeof(ObjCProtocolDecl *);
+ void *Mem = Allocate(Size, TypeAlignment);
+ ObjCObjectTypeImpl *T =
+ new (Mem) ObjCObjectTypeImpl(Canonical, BaseType, Protocols, NumProtocols);
+
+ Types.push_back(T);
+ ObjCObjectTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
+}
+
+/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
+/// the given object type.
+QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) const {
+ llvm::FoldingSetNodeID ID;
+ ObjCObjectPointerType::Profile(ID, ObjectT);
+
+ void *InsertPos = 0;
+ if (ObjCObjectPointerType *QT =
+ ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
+
+ // Find the canonical object type.
+ QualType Canonical;
+ if (!ObjectT.isCanonical()) {
+ Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
+
+ // Regenerate InsertPos.
+ ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ // No match.
+ void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
+ ObjCObjectPointerType *QType =
+ new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
+
+ Types.push_back(QType);
+ ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
+ return QualType(QType, 0);
+}
+
+/// getObjCInterfaceType - Return the unique reference to the type for the
+/// specified ObjC interface decl. The list of protocols is optional.
+QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
+ ObjCInterfaceDecl *PrevDecl) const {
+ if (Decl->TypeForDecl)
+ return QualType(Decl->TypeForDecl, 0);
+
+ if (PrevDecl) {
+ assert(PrevDecl->TypeForDecl && "previous decl has no TypeForDecl");
+ Decl->TypeForDecl = PrevDecl->TypeForDecl;
+ return QualType(PrevDecl->TypeForDecl, 0);
+ }
+
+ // Prefer the definition, if there is one.
+ if (const ObjCInterfaceDecl *Def = Decl->getDefinition())
+ Decl = Def;
+
+ void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
+ ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl);
+ Decl->TypeForDecl = T;
+ Types.push_back(T);
+ return QualType(T, 0);
+}
+
+/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
+/// TypeOfExprType AST's (since expression's are never shared). For example,
+/// multiple declarations that refer to "typeof(x)" all contain different
+/// DeclRefExpr's. This doesn't effect the type checker, since it operates
+/// on canonical type's (which are always unique).
+QualType ASTContext::getTypeOfExprType(Expr *tofExpr) const {
+ TypeOfExprType *toe;
+ if (tofExpr->isTypeDependent()) {
+ llvm::FoldingSetNodeID ID;
+ DependentTypeOfExprType::Profile(ID, *this, tofExpr);
+
+ void *InsertPos = 0;
+ DependentTypeOfExprType *Canon
+ = DependentTypeOfExprTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canon) {
+ // We already have a "canonical" version of an identical, dependent
+ // typeof(expr) type. Use that as our canonical type.
+ toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr,
+ QualType((TypeOfExprType*)Canon, 0));
+ } else {
+ // Build a new, canonical typeof(expr) type.
+ Canon
+ = new (*this, TypeAlignment) DependentTypeOfExprType(*this, tofExpr);
+ DependentTypeOfExprTypes.InsertNode(Canon, InsertPos);
+ toe = Canon;
+ }
+ } else {
+ QualType Canonical = getCanonicalType(tofExpr->getType());
+ toe = new (*this, TypeAlignment) TypeOfExprType(tofExpr, Canonical);
+ }
+ Types.push_back(toe);
+ return QualType(toe, 0);
+}
+
+/// getTypeOfType - Unlike many "get<Type>" functions, we don't unique
+/// TypeOfType AST's. The only motivation to unique these nodes would be
+/// memory savings. Since typeof(t) is fairly uncommon, space shouldn't be
+/// an issue. This doesn't effect the type checker, since it operates
+/// on canonical type's (which are always unique).
+QualType ASTContext::getTypeOfType(QualType tofType) const {
+ QualType Canonical = getCanonicalType(tofType);
+ TypeOfType *tot = new (*this, TypeAlignment) TypeOfType(tofType, Canonical);
+ Types.push_back(tot);
+ return QualType(tot, 0);
+}
+
+
+/// getDecltypeType - Unlike many "get<Type>" functions, we don't unique
+/// DecltypeType AST's. The only motivation to unique these nodes would be
+/// memory savings. Since decltype(t) is fairly uncommon, space shouldn't be
+/// an issue. This doesn't effect the type checker, since it operates
+/// on canonical types (which are always unique).
+QualType ASTContext::getDecltypeType(Expr *e, QualType UnderlyingType) const {
+ DecltypeType *dt;
+
+ // C++0x [temp.type]p2:
+ // If an expression e involves a template parameter, decltype(e) denotes a
+ // unique dependent type. Two such decltype-specifiers refer to the same
+ // type only if their expressions are equivalent (14.5.6.1).
+ if (e->isInstantiationDependent()) {
+ llvm::FoldingSetNodeID ID;
+ DependentDecltypeType::Profile(ID, *this, e);
+
+ void *InsertPos = 0;
+ DependentDecltypeType *Canon
+ = DependentDecltypeTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canon) {
+ // We already have a "canonical" version of an equivalent, dependent
+ // decltype type. Use that as our canonical type.
+ dt = new (*this, TypeAlignment) DecltypeType(e, DependentTy,
+ QualType((DecltypeType*)Canon, 0));
+ } else {
+ // Build a new, canonical typeof(expr) type.
+ Canon = new (*this, TypeAlignment) DependentDecltypeType(*this, e);
+ DependentDecltypeTypes.InsertNode(Canon, InsertPos);
+ dt = Canon;
+ }
+ } else {
+ dt = new (*this, TypeAlignment) DecltypeType(e, UnderlyingType,
+ getCanonicalType(UnderlyingType));
+ }
+ Types.push_back(dt);
+ return QualType(dt, 0);
+}
+
+/// getUnaryTransformationType - We don't unique these, since the memory
+/// savings are minimal and these are rare.
+QualType ASTContext::getUnaryTransformType(QualType BaseType,
+ QualType UnderlyingType,
+ UnaryTransformType::UTTKind Kind)
+ const {
+ UnaryTransformType *Ty =
+ new (*this, TypeAlignment) UnaryTransformType (BaseType, UnderlyingType,
+ Kind,
+ UnderlyingType->isDependentType() ?
+ QualType() : getCanonicalType(UnderlyingType));
+ Types.push_back(Ty);
+ return QualType(Ty, 0);
+}
+
+/// getAutoType - We only unique auto types after they've been deduced.
+QualType ASTContext::getAutoType(QualType DeducedType) const {
+ void *InsertPos = 0;
+ if (!DeducedType.isNull()) {
+ // Look in the folding set for an existing type.
+ llvm::FoldingSetNodeID ID;
+ AutoType::Profile(ID, DeducedType);
+ if (AutoType *AT = AutoTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(AT, 0);
+ }
+
+ AutoType *AT = new (*this, TypeAlignment) AutoType(DeducedType);
+ Types.push_back(AT);
+ if (InsertPos)
+ AutoTypes.InsertNode(AT, InsertPos);
+ return QualType(AT, 0);
+}
+
+/// getAtomicType - Return the uniqued reference to the atomic type for
+/// the given value type.
+QualType ASTContext::getAtomicType(QualType T) const {
+ // Unique pointers, to guarantee there is only one pointer of a particular
+ // structure.
+ llvm::FoldingSetNodeID ID;
+ AtomicType::Profile(ID, T);
+
+ void *InsertPos = 0;
+ if (AtomicType *AT = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(AT, 0);
+
+ // If the atomic value type isn't canonical, this won't be a canonical type
+ // either, so fill in the canonical type field.
+ QualType Canonical;
+ if (!T.isCanonical()) {
+ Canonical = getAtomicType(getCanonicalType(T));
+
+ // Get the new insert position for the node we care about.
+ AtomicType *NewIP = AtomicTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(NewIP == 0 && "Shouldn't be in the map!"); (void)NewIP;
+ }
+ AtomicType *New = new (*this, TypeAlignment) AtomicType(T, Canonical);
+ Types.push_back(New);
+ AtomicTypes.InsertNode(New, InsertPos);
+ return QualType(New, 0);
+}
+
+/// getAutoDeductType - Get type pattern for deducing against 'auto'.
+QualType ASTContext::getAutoDeductType() const {
+ if (AutoDeductTy.isNull())
+ AutoDeductTy = getAutoType(QualType());
+ assert(!AutoDeductTy.isNull() && "can't build 'auto' pattern");
+ return AutoDeductTy;
+}
+
+/// getAutoRRefDeductType - Get type pattern for deducing against 'auto &&'.
+QualType ASTContext::getAutoRRefDeductType() const {
+ if (AutoRRefDeductTy.isNull())
+ AutoRRefDeductTy = getRValueReferenceType(getAutoDeductType());
+ assert(!AutoRRefDeductTy.isNull() && "can't build 'auto &&' pattern");
+ return AutoRRefDeductTy;
+}
+
+/// getTagDeclType - Return the unique reference to the type for the
+/// specified TagDecl (struct/union/class/enum) decl.
+QualType ASTContext::getTagDeclType(const TagDecl *Decl) const {
+ assert (Decl);
+ // FIXME: What is the design on getTagDeclType when it requires casting
+ // away const? mutable?
+ return getTypeDeclType(const_cast<TagDecl*>(Decl));
+}
+
+/// getSizeType - Return the unique type for "size_t" (C99 7.17), the result
+/// of the sizeof operator (C99 6.5.3.4p4). The value is target dependent and
+/// needs to agree with the definition in <stddef.h>.
+CanQualType ASTContext::getSizeType() const {
+ return getFromTargetType(Target->getSizeType());
+}
+
+/// getIntMaxType - Return the unique type for "intmax_t" (C99 7.18.1.5).
+CanQualType ASTContext::getIntMaxType() const {
+ return getFromTargetType(Target->getIntMaxType());
+}
+
+/// getUIntMaxType - Return the unique type for "uintmax_t" (C99 7.18.1.5).
+CanQualType ASTContext::getUIntMaxType() const {
+ return getFromTargetType(Target->getUIntMaxType());
+}
+
+/// getSignedWCharType - Return the type of "signed wchar_t".
+/// Used when in C++, as a GCC extension.
+QualType ASTContext::getSignedWCharType() const {
+ // FIXME: derive from "Target" ?
+ return WCharTy;
+}
+
+/// getUnsignedWCharType - Return the type of "unsigned wchar_t".
+/// Used when in C++, as a GCC extension.
+QualType ASTContext::getUnsignedWCharType() const {
+ // FIXME: derive from "Target" ?
+ return UnsignedIntTy;
+}
+
+/// getPointerDiffType - Return the unique type for "ptrdiff_t" (C99 7.17)
+/// defined in <stddef.h>. Pointer - pointer requires this (C99 6.5.6p9).
+QualType ASTContext::getPointerDiffType() const {
+ return getFromTargetType(Target->getPtrDiffType(0));
+}
+
+//===----------------------------------------------------------------------===//
+// Type Operators
+//===----------------------------------------------------------------------===//
+
+CanQualType ASTContext::getCanonicalParamType(QualType T) const {
+ // Push qualifiers into arrays, and then discard any remaining
+ // qualifiers.
+ T = getCanonicalType(T);
+ T = getVariableArrayDecayedType(T);
+ const Type *Ty = T.getTypePtr();
+ QualType Result;
+ if (isa<ArrayType>(Ty)) {
+ Result = getArrayDecayedType(QualType(Ty,0));
+ } else if (isa<FunctionType>(Ty)) {
+ Result = getPointerType(QualType(Ty, 0));
+ } else {
+ Result = QualType(Ty, 0);
+ }
+
+ return CanQualType::CreateUnsafe(Result);
+}
+
+QualType ASTContext::getUnqualifiedArrayType(QualType type,
+ Qualifiers &quals) {
+ SplitQualType splitType = type.getSplitUnqualifiedType();
+
+ // FIXME: getSplitUnqualifiedType() actually walks all the way to
+ // the unqualified desugared type and then drops it on the floor.
+ // We then have to strip that sugar back off with
+ // getUnqualifiedDesugaredType(), which is silly.
+ const ArrayType *AT =
+ dyn_cast<ArrayType>(splitType.Ty->getUnqualifiedDesugaredType());
+
+ // If we don't have an array, just use the results in splitType.
+ if (!AT) {
+ quals = splitType.Quals;
+ return QualType(splitType.Ty, 0);
+ }
+
+ // Otherwise, recurse on the array's element type.
+ QualType elementType = AT->getElementType();
+ QualType unqualElementType = getUnqualifiedArrayType(elementType, quals);
+
+ // If that didn't change the element type, AT has no qualifiers, so we
+ // can just use the results in splitType.
+ if (elementType == unqualElementType) {
+ assert(quals.empty()); // from the recursive call
+ quals = splitType.Quals;
+ return QualType(splitType.Ty, 0);
+ }
+
+ // Otherwise, add in the qualifiers from the outermost type, then
+ // build the type back up.
+ quals.addConsistentQualifiers(splitType.Quals);
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ return getConstantArrayType(unqualElementType, CAT->getSize(),
+ CAT->getSizeModifier(), 0);
+ }
+
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
+ return getIncompleteArrayType(unqualElementType, IAT->getSizeModifier(), 0);
+ }
+
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) {
+ return getVariableArrayType(unqualElementType,
+ VAT->getSizeExpr(),
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(),
+ VAT->getBracketsRange());
+ }
+
+ const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT);
+ return getDependentSizedArrayType(unqualElementType, DSAT->getSizeExpr(),
+ DSAT->getSizeModifier(), 0,
+ SourceRange());
+}
+
+/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
+/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
+/// they point to and return true. If T1 and T2 aren't pointer types
+/// or pointer-to-member types, or if they are not similar at this
+/// level, returns false and leaves T1 and T2 unchanged. Top-level
+/// qualifiers on T1 and T2 are ignored. This function will typically
+/// be called in a loop that successively "unwraps" pointer and
+/// pointer-to-member types to compare them at each level.
+bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
+ const PointerType *T1PtrType = T1->getAs<PointerType>(),
+ *T2PtrType = T2->getAs<PointerType>();
+ if (T1PtrType && T2PtrType) {
+ T1 = T1PtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+
+ const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
+ *T2MPType = T2->getAs<MemberPointerType>();
+ if (T1MPType && T2MPType &&
+ hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
+ QualType(T2MPType->getClass(), 0))) {
+ T1 = T1MPType->getPointeeType();
+ T2 = T2MPType->getPointeeType();
+ return true;
+ }
+
+ if (getLangOpts().ObjC1) {
+ const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
+ *T2OPType = T2->getAs<ObjCObjectPointerType>();
+ if (T1OPType && T2OPType) {
+ T1 = T1OPType->getPointeeType();
+ T2 = T2OPType->getPointeeType();
+ return true;
+ }
+ }
+
+ // FIXME: Block pointers, too?
+
+ return false;
+}
+
+DeclarationNameInfo
+ASTContext::getNameForTemplate(TemplateName Name,
+ SourceLocation NameLoc) const {
+ switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::Template:
+ // DNInfo work in progress: CHECKME: what about DNLoc?
+ return DeclarationNameInfo(Name.getAsTemplateDecl()->getDeclName(),
+ NameLoc);
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *Storage = Name.getAsOverloadedTemplate();
+ // DNInfo work in progress: CHECKME: what about DNLoc?
+ return DeclarationNameInfo((*Storage->begin())->getDeclName(), NameLoc);
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ DeclarationName DName;
+ if (DTN->isIdentifier()) {
+ DName = DeclarationNames.getIdentifier(DTN->getIdentifier());
+ return DeclarationNameInfo(DName, NameLoc);
+ } else {
+ DName = DeclarationNames.getCXXOperatorName(DTN->getOperator());
+ // DNInfo work in progress: FIXME: source locations?
+ DeclarationNameLoc DNLoc;
+ DNLoc.CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
+ DNLoc.CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
+ return DeclarationNameInfo(DName, NameLoc, DNLoc);
+ }
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ return DeclarationNameInfo(subst->getParameter()->getDeclName(),
+ NameLoc);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *subst
+ = Name.getAsSubstTemplateTemplateParmPack();
+ return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
+ NameLoc);
+ }
+ }
+
+ llvm_unreachable("bad template name kind!");
+}
+
+TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) const {
+ switch (Name.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ case TemplateName::Template: {
+ TemplateDecl *Template = Name.getAsTemplateDecl();
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ Template = getCanonicalTemplateTemplateParmDecl(TTP);
+
+ // The canonical template name is the canonical template declaration.
+ return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
+ }
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("cannot canonicalize overloaded template");
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = Name.getAsDependentTemplateName();
+ assert(DTN && "Non-dependent template names must refer to template decls.");
+ return DTN->CanonicalTemplateName;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ return getCanonicalTemplateName(subst->getReplacement());
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *subst
+ = Name.getAsSubstTemplateTemplateParmPack();
+ TemplateTemplateParmDecl *canonParameter
+ = getCanonicalTemplateTemplateParmDecl(subst->getParameterPack());
+ TemplateArgument canonArgPack
+ = getCanonicalTemplateArgument(subst->getArgumentPack());
+ return getSubstTemplateTemplateParmPack(canonParameter, canonArgPack);
+ }
+ }
+
+ llvm_unreachable("bad template name!");
+}
+
+bool ASTContext::hasSameTemplateName(TemplateName X, TemplateName Y) {
+ X = getCanonicalTemplateName(X);
+ Y = getCanonicalTemplateName(Y);
+ return X.getAsVoidPointer() == Y.getAsVoidPointer();
+}
+
+TemplateArgument
+ASTContext::getCanonicalTemplateArgument(const TemplateArgument &Arg) const {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ return Arg;
+
+ case TemplateArgument::Expression:
+ return Arg;
+
+ case TemplateArgument::Declaration: {
+ if (Decl *D = Arg.getAsDecl())
+ return TemplateArgument(D->getCanonicalDecl());
+ return TemplateArgument((Decl*)0);
+ }
+
+ case TemplateArgument::Template:
+ return TemplateArgument(getCanonicalTemplateName(Arg.getAsTemplate()));
+
+ case TemplateArgument::TemplateExpansion:
+ return TemplateArgument(getCanonicalTemplateName(
+ Arg.getAsTemplateOrTemplatePattern()),
+ Arg.getNumTemplateExpansions());
+
+ case TemplateArgument::Integral:
+ return TemplateArgument(*Arg.getAsIntegral(),
+ getCanonicalType(Arg.getIntegralType()));
+
+ case TemplateArgument::Type:
+ return TemplateArgument(getCanonicalType(Arg.getAsType()));
+
+ case TemplateArgument::Pack: {
+ if (Arg.pack_size() == 0)
+ return Arg;
+
+ TemplateArgument *CanonArgs
+ = new (*this) TemplateArgument[Arg.pack_size()];
+ unsigned Idx = 0;
+ for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
+ AEnd = Arg.pack_end();
+ A != AEnd; (void)++A, ++Idx)
+ CanonArgs[Idx] = getCanonicalTemplateArgument(*A);
+
+ return TemplateArgument(CanonArgs, Arg.pack_size());
+ }
+ }
+
+ // Silence GCC warning
+ llvm_unreachable("Unhandled template argument kind");
+}
+
+NestedNameSpecifier *
+ASTContext::getCanonicalNestedNameSpecifier(NestedNameSpecifier *NNS) const {
+ if (!NNS)
+ return 0;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ // Canonicalize the prefix but keep the identifier the same.
+ return NestedNameSpecifier::Create(*this,
+ getCanonicalNestedNameSpecifier(NNS->getPrefix()),
+ NNS->getAsIdentifier());
+
+ case NestedNameSpecifier::Namespace:
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ return NestedNameSpecifier::Create(*this, 0,
+ NNS->getAsNamespace()->getOriginalNamespace());
+
+ case NestedNameSpecifier::NamespaceAlias:
+ // A namespace is canonical; build a nested-name-specifier with
+ // this namespace and no prefix.
+ return NestedNameSpecifier::Create(*this, 0,
+ NNS->getAsNamespaceAlias()->getNamespace()
+ ->getOriginalNamespace());
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ QualType T = getCanonicalType(QualType(NNS->getAsType(), 0));
+
+ // If we have some kind of dependent-named type (e.g., "typename T::type"),
+ // break it apart into its prefix and identifier, then reconsititute those
+ // as the canonical nested-name-specifier. This is required to canonicalize
+ // a dependent nested-name-specifier involving typedefs of dependent-name
+ // types, e.g.,
+ // typedef typename T::type T1;
+ // typedef typename T1::type T2;
+ if (const DependentNameType *DNT = T->getAs<DependentNameType>())
+ return NestedNameSpecifier::Create(*this, DNT->getQualifier(),
+ const_cast<IdentifierInfo *>(DNT->getIdentifier()));
+
+ // Otherwise, just canonicalize the type, and force it to be a TypeSpec.
+ // FIXME: Why are TypeSpec and TypeSpecWithTemplate distinct in the
+ // first place?
+ return NestedNameSpecifier::Create(*this, 0, false,
+ const_cast<Type*>(T.getTypePtr()));
+ }
+
+ case NestedNameSpecifier::Global:
+ // The global specifier is canonical and unique.
+ return NNS;
+ }
+
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+
+const ArrayType *ASTContext::getAsArrayType(QualType T) const {
+ // Handle the non-qualified case efficiently.
+ if (!T.hasLocalQualifiers()) {
+ // Handle the common positive case fast.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return AT;
+ }
+
+ // Handle the common negative case fast.
+ if (!isa<ArrayType>(T.getCanonicalType()))
+ return 0;
+
+ // Apply any qualifiers from the array type to the element type. This
+ // implements C99 6.7.3p8: "If the specification of an array type includes
+ // any type qualifiers, the element type is so qualified, not the array type."
+
+ // If we get here, we either have type qualifiers on the type, or we have
+ // sugar such as a typedef in the way. If we have type qualifiers on the type
+ // we must propagate them down into the element type.
+
+ SplitQualType split = T.getSplitDesugaredType();
+ Qualifiers qs = split.Quals;
+
+ // If we have a simple case, just return now.
+ const ArrayType *ATy = dyn_cast<ArrayType>(split.Ty);
+ if (ATy == 0 || qs.empty())
+ return ATy;
+
+ // Otherwise, we have an array and we have qualifiers on it. Push the
+ // qualifiers into the array element type and return a new array type.
+ QualType NewEltTy = getQualifiedType(ATy->getElementType(), qs);
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(ATy))
+ return cast<ArrayType>(getConstantArrayType(NewEltTy, CAT->getSize(),
+ CAT->getSizeModifier(),
+ CAT->getIndexTypeCVRQualifiers()));
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(ATy))
+ return cast<ArrayType>(getIncompleteArrayType(NewEltTy,
+ IAT->getSizeModifier(),
+ IAT->getIndexTypeCVRQualifiers()));
+
+ if (const DependentSizedArrayType *DSAT
+ = dyn_cast<DependentSizedArrayType>(ATy))
+ return cast<ArrayType>(
+ getDependentSizedArrayType(NewEltTy,
+ DSAT->getSizeExpr(),
+ DSAT->getSizeModifier(),
+ DSAT->getIndexTypeCVRQualifiers(),
+ DSAT->getBracketsRange()));
+
+ const VariableArrayType *VAT = cast<VariableArrayType>(ATy);
+ return cast<ArrayType>(getVariableArrayType(NewEltTy,
+ VAT->getSizeExpr(),
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(),
+ VAT->getBracketsRange()));
+}
+
+QualType ASTContext::getAdjustedParameterType(QualType T) {
+ // C99 6.7.5.3p7:
+ // A declaration of a parameter as "array of type" shall be
+ // adjusted to "qualified pointer to type", where the type
+ // qualifiers (if any) are those specified within the [ and ] of
+ // the array type derivation.
+ if (T->isArrayType())
+ return getArrayDecayedType(T);
+
+ // C99 6.7.5.3p8:
+ // A declaration of a parameter as "function returning type"
+ // shall be adjusted to "pointer to function returning type", as
+ // in 6.3.2.1.
+ if (T->isFunctionType())
+ return getPointerType(T);
+
+ return T;
+}
+
+QualType ASTContext::getSignatureParameterType(QualType T) {
+ T = getVariableArrayDecayedType(T);
+ T = getAdjustedParameterType(T);
+ return T.getUnqualifiedType();
+}
+
+/// getArrayDecayedType - Return the properly qualified result of decaying the
+/// specified array type to a pointer. This operation is non-trivial when
+/// handling typedefs etc. The canonical type of "T" must be an array type,
+/// this returns a pointer to a properly qualified element of the array.
+///
+/// See C99 6.7.5.3p7 and C99 6.3.2.1p3.
+QualType ASTContext::getArrayDecayedType(QualType Ty) const {
+ // Get the element type with 'getAsArrayType' so that we don't lose any
+ // typedefs in the element type of the array. This also handles propagation
+ // of type qualifiers from the array type into the element type if present
+ // (C99 6.7.3p8).
+ const ArrayType *PrettyArrayType = getAsArrayType(Ty);
+ assert(PrettyArrayType && "Not an array type!");
+
+ QualType PtrTy = getPointerType(PrettyArrayType->getElementType());
+
+ // int x[restrict 4] -> int *restrict
+ return getQualifiedType(PtrTy, PrettyArrayType->getIndexTypeQualifiers());
+}
+
+QualType ASTContext::getBaseElementType(const ArrayType *array) const {
+ return getBaseElementType(array->getElementType());
+}
+
+QualType ASTContext::getBaseElementType(QualType type) const {
+ Qualifiers qs;
+ while (true) {
+ SplitQualType split = type.getSplitDesugaredType();
+ const ArrayType *array = split.Ty->getAsArrayTypeUnsafe();
+ if (!array) break;
+
+ type = array->getElementType();
+ qs.addConsistentQualifiers(split.Quals);
+ }
+
+ return getQualifiedType(type, qs);
+}
+
+/// getConstantArrayElementCount - Returns number of constant array elements.
+uint64_t
+ASTContext::getConstantArrayElementCount(const ConstantArrayType *CA) const {
+ uint64_t ElementCount = 1;
+ do {
+ ElementCount *= CA->getSize().getZExtValue();
+ CA = dyn_cast<ConstantArrayType>(CA->getElementType());
+ } while (CA);
+ return ElementCount;
+}
+
+/// getFloatingRank - Return a relative rank for floating point types.
+/// This routine will assert if passed a built-in type that isn't a float.
+static FloatingRank getFloatingRank(QualType T) {
+ if (const ComplexType *CT = T->getAs<ComplexType>())
+ return getFloatingRank(CT->getElementType());
+
+ assert(T->getAs<BuiltinType>() && "getFloatingRank(): not a floating type");
+ switch (T->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("getFloatingRank(): not a floating type");
+ case BuiltinType::Half: return HalfRank;
+ case BuiltinType::Float: return FloatRank;
+ case BuiltinType::Double: return DoubleRank;
+ case BuiltinType::LongDouble: return LongDoubleRank;
+ }
+}
+
+/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
+/// point or a complex type (based on typeDomain/typeSize).
+/// 'typeDomain' is a real floating point or complex type.
+/// 'typeSize' is a real floating point or complex type.
+QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
+ QualType Domain) const {
+ FloatingRank EltRank = getFloatingRank(Size);
+ if (Domain->isComplexType()) {
+ switch (EltRank) {
+ case HalfRank: llvm_unreachable("Complex half is not supported");
+ case FloatRank: return FloatComplexTy;
+ case DoubleRank: return DoubleComplexTy;
+ case LongDoubleRank: return LongDoubleComplexTy;
+ }
+ }
+
+ assert(Domain->isRealFloatingType() && "Unknown domain!");
+ switch (EltRank) {
+ case HalfRank: llvm_unreachable("Half ranks are not valid here");
+ case FloatRank: return FloatTy;
+ case DoubleRank: return DoubleTy;
+ case LongDoubleRank: return LongDoubleTy;
+ }
+ llvm_unreachable("getFloatingRank(): illegal value for rank");
+}
+
+/// getFloatingTypeOrder - Compare the rank of the two specified floating
+/// point types, ignoring the domain of the type (i.e. 'double' ==
+/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
+/// LHS < RHS, return -1.
+int ASTContext::getFloatingTypeOrder(QualType LHS, QualType RHS) const {
+ FloatingRank LHSR = getFloatingRank(LHS);
+ FloatingRank RHSR = getFloatingRank(RHS);
+
+ if (LHSR == RHSR)
+ return 0;
+ if (LHSR > RHSR)
+ return 1;
+ return -1;
+}
+
+/// getIntegerRank - Return an integer conversion rank (C99 6.3.1.1p1). This
+/// routine will assert if passed a built-in type that isn't an integer or enum,
+/// or if it is not canonicalized.
+unsigned ASTContext::getIntegerRank(const Type *T) const {
+ assert(T->isCanonicalUnqualified() && "T should be canonicalized");
+
+ switch (cast<BuiltinType>(T)->getKind()) {
+ default: llvm_unreachable("getIntegerRank(): not a built-in integer");
+ case BuiltinType::Bool:
+ return 1 + (getIntWidth(BoolTy) << 3);
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ return 2 + (getIntWidth(CharTy) << 3);
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return 3 + (getIntWidth(ShortTy) << 3);
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ return 4 + (getIntWidth(IntTy) << 3);
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ return 5 + (getIntWidth(LongTy) << 3);
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ return 6 + (getIntWidth(LongLongTy) << 3);
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return 7 + (getIntWidth(Int128Ty) << 3);
+ }
+}
+
+/// \brief Whether this is a promotable bitfield reference according
+/// to C99 6.3.1.1p2, bullet 2 (and GCC extensions).
+///
+/// \returns the type this bit-field will promote to, or NULL if no
+/// promotion occurs.
+QualType ASTContext::isPromotableBitField(Expr *E) const {
+ if (E->isTypeDependent() || E->isValueDependent())
+ return QualType();
+
+ FieldDecl *Field = E->getBitField();
+ if (!Field)
+ return QualType();
+
+ QualType FT = Field->getType();
+
+ uint64_t BitWidth = Field->getBitWidthValue(*this);
+ uint64_t IntSize = getTypeSize(IntTy);
+ // GCC extension compatibility: if the bit-field size is less than or equal
+ // to the size of int, it gets promoted no matter what its type is.
+ // For instance, unsigned long bf : 4 gets promoted to signed int.
+ if (BitWidth < IntSize)
+ return IntTy;
+
+ if (BitWidth == IntSize)
+ return FT->isSignedIntegerType() ? IntTy : UnsignedIntTy;
+
+ // Types bigger than int are not subject to promotions, and therefore act
+ // like the base type.
+ // FIXME: This doesn't quite match what gcc does, but what gcc does here
+ // is ridiculous.
+ return QualType();
+}
+
+/// getPromotedIntegerType - Returns the type that Promotable will
+/// promote to: C99 6.3.1.1p2, assuming that Promotable is a promotable
+/// integer type.
+QualType ASTContext::getPromotedIntegerType(QualType Promotable) const {
+ assert(!Promotable.isNull());
+ assert(Promotable->isPromotableIntegerType());
+ if (const EnumType *ET = Promotable->getAs<EnumType>())
+ return ET->getDecl()->getPromotionType();
+
+ if (const BuiltinType *BT = Promotable->getAs<BuiltinType>()) {
+ // C++ [conv.prom]: A prvalue of type char16_t, char32_t, or wchar_t
+ // (3.9.1) can be converted to a prvalue of the first of the following
+ // types that can represent all the values of its underlying type:
+ // int, unsigned int, long int, unsigned long int, long long int, or
+ // unsigned long long int [...]
+ // FIXME: Is there some better way to compute this?
+ if (BT->getKind() == BuiltinType::WChar_S ||
+ BT->getKind() == BuiltinType::WChar_U ||
+ BT->getKind() == BuiltinType::Char16 ||
+ BT->getKind() == BuiltinType::Char32) {
+ bool FromIsSigned = BT->getKind() == BuiltinType::WChar_S;
+ uint64_t FromSize = getTypeSize(BT);
+ QualType PromoteTypes[] = { IntTy, UnsignedIntTy, LongTy, UnsignedLongTy,
+ LongLongTy, UnsignedLongLongTy };
+ for (size_t Idx = 0; Idx < llvm::array_lengthof(PromoteTypes); ++Idx) {
+ uint64_t ToSize = getTypeSize(PromoteTypes[Idx]);
+ if (FromSize < ToSize ||
+ (FromSize == ToSize &&
+ FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType()))
+ return PromoteTypes[Idx];
+ }
+ llvm_unreachable("char type should fit into long long");
+ }
+ }
+
+ // At this point, we should have a signed or unsigned integer type.
+ if (Promotable->isSignedIntegerType())
+ return IntTy;
+ uint64_t PromotableSize = getTypeSize(Promotable);
+ uint64_t IntSize = getTypeSize(IntTy);
+ assert(Promotable->isUnsignedIntegerType() && PromotableSize <= IntSize);
+ return (PromotableSize != IntSize) ? IntTy : UnsignedIntTy;
+}
+
+/// \brief Recurses in pointer/array types until it finds an objc retainable
+/// type and returns its ownership.
+Qualifiers::ObjCLifetime ASTContext::getInnerObjCOwnership(QualType T) const {
+ while (!T.isNull()) {
+ if (T.getObjCLifetime() != Qualifiers::OCL_None)
+ return T.getObjCLifetime();
+ if (T->isArrayType())
+ T = getBaseElementType(T);
+ else if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+ else
+ break;
+ }
+
+ return Qualifiers::OCL_None;
+}
+
+/// getIntegerTypeOrder - Returns the highest ranked integer type:
+/// C99 6.3.1.8p1. If LHS > RHS, return 1. If LHS == RHS, return 0. If
+/// LHS < RHS, return -1.
+int ASTContext::getIntegerTypeOrder(QualType LHS, QualType RHS) const {
+ const Type *LHSC = getCanonicalType(LHS).getTypePtr();
+ const Type *RHSC = getCanonicalType(RHS).getTypePtr();
+ if (LHSC == RHSC) return 0;
+
+ bool LHSUnsigned = LHSC->isUnsignedIntegerType();
+ bool RHSUnsigned = RHSC->isUnsignedIntegerType();
+
+ unsigned LHSRank = getIntegerRank(LHSC);
+ unsigned RHSRank = getIntegerRank(RHSC);
+
+ if (LHSUnsigned == RHSUnsigned) { // Both signed or both unsigned.
+ if (LHSRank == RHSRank) return 0;
+ return LHSRank > RHSRank ? 1 : -1;
+ }
+
+ // Otherwise, the LHS is signed and the RHS is unsigned or visa versa.
+ if (LHSUnsigned) {
+ // If the unsigned [LHS] type is larger, return it.
+ if (LHSRank >= RHSRank)
+ return 1;
+
+ // If the signed type can represent all values of the unsigned type, it
+ // wins. Because we are dealing with 2's complement and types that are
+ // powers of two larger than each other, this is always safe.
+ return -1;
+ }
+
+ // If the unsigned [RHS] type is larger, return it.
+ if (RHSRank >= LHSRank)
+ return -1;
+
+ // If the signed type can represent all values of the unsigned type, it
+ // wins. Because we are dealing with 2's complement and types that are
+ // powers of two larger than each other, this is always safe.
+ return 1;
+}
+
+static RecordDecl *
+CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
+ DeclContext *DC, IdentifierInfo *Id) {
+ SourceLocation Loc;
+ if (Ctx.getLangOpts().CPlusPlus)
+ return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+ else
+ return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+}
+
+// getCFConstantStringType - Return the type used for constant CFStrings.
+QualType ASTContext::getCFConstantStringType() const {
+ if (!CFConstantStringTypeDecl) {
+ CFConstantStringTypeDecl =
+ CreateRecordDecl(*this, TTK_Struct, TUDecl,
+ &Idents.get("NSConstantString"));
+ CFConstantStringTypeDecl->startDefinition();
+
+ QualType FieldTypes[4];
+
+ // const int *isa;
+ FieldTypes[0] = getPointerType(IntTy.withConst());
+ // int flags;
+ FieldTypes[1] = IntTy;
+ // const char *str;
+ FieldTypes[2] = getPointerType(CharTy.withConst());
+ // long length;
+ FieldTypes[3] = LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this, CFConstantStringTypeDecl,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ CFConstantStringTypeDecl->addDecl(Field);
+ }
+
+ CFConstantStringTypeDecl->completeDefinition();
+ }
+
+ return getTagDeclType(CFConstantStringTypeDecl);
+}
+
+void ASTContext::setCFConstantStringType(QualType T) {
+ const RecordType *Rec = T->getAs<RecordType>();
+ assert(Rec && "Invalid CFConstantStringType");
+ CFConstantStringTypeDecl = Rec->getDecl();
+}
+
+QualType ASTContext::getBlockDescriptorType() const {
+ if (BlockDescriptorType)
+ return getTagDeclType(BlockDescriptorType);
+
+ RecordDecl *T;
+ // FIXME: Needs the FlagAppleBlock bit.
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl,
+ &Idents.get("__block_descriptor"));
+ T->startDefinition();
+
+ QualType FieldTypes[] = {
+ UnsignedLongTy,
+ UnsignedLongTy,
+ };
+
+ const char *FieldNames[] = {
+ "reserved",
+ "Size"
+ };
+
+ for (size_t i = 0; i < 2; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
+ SourceLocation(),
+ &Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ T->addDecl(Field);
+ }
+
+ T->completeDefinition();
+
+ BlockDescriptorType = T;
+
+ return getTagDeclType(BlockDescriptorType);
+}
+
+QualType ASTContext::getBlockDescriptorExtendedType() const {
+ if (BlockDescriptorExtendedType)
+ return getTagDeclType(BlockDescriptorExtendedType);
+
+ RecordDecl *T;
+ // FIXME: Needs the FlagAppleBlock bit.
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl,
+ &Idents.get("__block_descriptor_withcopydispose"));
+ T->startDefinition();
+
+ QualType FieldTypes[] = {
+ UnsignedLongTy,
+ UnsignedLongTy,
+ getPointerType(VoidPtrTy),
+ getPointerType(VoidPtrTy)
+ };
+
+ const char *FieldNames[] = {
+ "reserved",
+ "Size",
+ "CopyFuncPtr",
+ "DestroyFuncPtr"
+ };
+
+ for (size_t i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
+ SourceLocation(),
+ &Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ T->addDecl(Field);
+ }
+
+ T->completeDefinition();
+
+ BlockDescriptorExtendedType = T;
+
+ return getTagDeclType(BlockDescriptorExtendedType);
+}
+
+bool ASTContext::BlockRequiresCopying(QualType Ty) const {
+ if (Ty->isObjCRetainableType())
+ return true;
+ if (getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return RD->hasConstCopyConstructor();
+
+ }
+ }
+ return false;
+}
+
+QualType
+ASTContext::BuildByRefType(StringRef DeclName, QualType Ty) const {
+ // type = struct __Block_byref_1_X {
+ // void *__isa;
+ // struct __Block_byref_1_X *__forwarding;
+ // unsigned int __flags;
+ // unsigned int __size;
+ // void *__copy_helper; // as needed
+ // void *__destroy_help // as needed
+ // int X;
+ // } *
+
+ bool HasCopyAndDispose = BlockRequiresCopying(Ty);
+
+ // FIXME: Move up
+ SmallString<36> Name;
+ llvm::raw_svector_ostream(Name) << "__Block_byref_" <<
+ ++UniqueBlockByRefTypeID << '_' << DeclName;
+ RecordDecl *T;
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl, &Idents.get(Name.str()));
+ T->startDefinition();
+ QualType Int32Ty = IntTy;
+ assert(getIntWidth(IntTy) == 32 && "non-32bit int not supported");
+ QualType FieldTypes[] = {
+ getPointerType(VoidPtrTy),
+ getPointerType(getTagDeclType(T)),
+ Int32Ty,
+ Int32Ty,
+ getPointerType(VoidPtrTy),
+ getPointerType(VoidPtrTy),
+ Ty
+ };
+
+ StringRef FieldNames[] = {
+ "__isa",
+ "__forwarding",
+ "__flags",
+ "__size",
+ "__copy_helper",
+ "__destroy_helper",
+ DeclName,
+ };
+
+ for (size_t i = 0; i < 7; ++i) {
+ if (!HasCopyAndDispose && i >=4 && i <= 5)
+ continue;
+ FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
+ SourceLocation(),
+ &Idents.get(FieldNames[i]),
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0, /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ T->addDecl(Field);
+ }
+
+ T->completeDefinition();
+
+ return getPointerType(getTagDeclType(T));
+}
+
+TypedefDecl *ASTContext::getObjCInstanceTypeDecl() {
+ if (!ObjCInstanceTypeDecl)
+ ObjCInstanceTypeDecl = TypedefDecl::Create(*this,
+ getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(),
+ &Idents.get("instancetype"),
+ getTrivialTypeSourceInfo(getObjCIdType()));
+ return ObjCInstanceTypeDecl;
+}
+
+// This returns true if a type has been typedefed to BOOL:
+// typedef <type> BOOL;
+static bool isTypeTypedefedAsBOOL(QualType T) {
+ if (const TypedefType *TT = dyn_cast<TypedefType>(T))
+ if (IdentifierInfo *II = TT->getDecl()->getIdentifier())
+ return II->isStr("BOOL");
+
+ return false;
+}
+
+/// getObjCEncodingTypeSize returns size of type for objective-c encoding
+/// purpose.
+CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) const {
+ if (!type->isIncompleteArrayType() && type->isIncompleteType())
+ return CharUnits::Zero();
+
+ CharUnits sz = getTypeSizeInChars(type);
+
+ // Make all integer and enum types at least as large as an int
+ if (sz.isPositive() && type->isIntegralOrEnumerationType())
+ sz = std::max(sz, getTypeSizeInChars(IntTy));
+ // Treat arrays as pointers, since that's how they're passed in.
+ else if (type->isArrayType())
+ sz = getTypeSizeInChars(VoidPtrTy);
+ return sz;
+}
+
+static inline
+std::string charUnitsToString(const CharUnits &CU) {
+ return llvm::itostr(CU.getQuantity());
+}
+
+/// getObjCEncodingForBlock - Return the encoded type for this block
+/// declaration.
+std::string ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr) const {
+ std::string S;
+
+ const BlockDecl *Decl = Expr->getBlockDecl();
+ QualType BlockTy =
+ Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
+ // Encode result type.
+ getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), S);
+ // Compute size of all parameters.
+ // Start with computing size of a pointer in number of bytes.
+ // FIXME: There might(should) be a better way of doing this computation!
+ SourceLocation Loc;
+ CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
+ CharUnits ParmOffset = PtrSize;
+ for (BlockDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ assert (sz.isPositive() && "BlockExpr - Incomplete param type");
+ ParmOffset += sz;
+ }
+ // Size of the argument frame
+ S += charUnitsToString(ParmOffset);
+ // Block pointer and offset.
+ S += "@?0";
+
+ // Argument types.
+ ParmOffset = PtrSize;
+ for (BlockDecl::param_const_iterator PI = Decl->param_begin(), E =
+ Decl->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForType(PType, S);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return S;
+}
+
+bool ASTContext::getObjCEncodingForFunctionDecl(const FunctionDecl *Decl,
+ std::string& S) {
+ // Encode result type.
+ getObjCEncodingForType(Decl->getResultType(), S);
+ CharUnits ParmOffset;
+ // Compute size of all parameters.
+ for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ if (sz.isZero())
+ return true;
+
+ assert (sz.isPositive() &&
+ "getObjCEncodingForFunctionDecl - Incomplete param type");
+ ParmOffset += sz;
+ }
+ S += charUnitsToString(ParmOffset);
+ ParmOffset = CharUnits::Zero();
+
+ // Argument types.
+ for (FunctionDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForType(PType, S);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return false;
+}
+
+/// getObjCEncodingForMethodParameter - Return the encoded type for a single
+/// method parameter or return type. If Extended, include class names and
+/// block object types.
+void ASTContext::getObjCEncodingForMethodParameter(Decl::ObjCDeclQualifier QT,
+ QualType T, std::string& S,
+ bool Extended) const {
+ // Encode type qualifer, 'in', 'inout', etc. for the parameter.
+ getObjCEncodingForTypeQualifier(QT, S);
+ // Encode parameter type.
+ getObjCEncodingForTypeImpl(T, S, true, true, 0,
+ true /*OutermostType*/,
+ false /*EncodingProperty*/,
+ false /*StructField*/,
+ Extended /*EncodeBlockParameters*/,
+ Extended /*EncodeClassNames*/);
+}
+
+/// getObjCEncodingForMethodDecl - Return the encoded type for this method
+/// declaration.
+bool ASTContext::getObjCEncodingForMethodDecl(const ObjCMethodDecl *Decl,
+ std::string& S,
+ bool Extended) const {
+ // FIXME: This is not very efficient.
+ // Encode return type.
+ getObjCEncodingForMethodParameter(Decl->getObjCDeclQualifier(),
+ Decl->getResultType(), S, Extended);
+ // Compute size of all parameters.
+ // Start with computing size of a pointer in number of bytes.
+ // FIXME: There might(should) be a better way of doing this computation!
+ SourceLocation Loc;
+ CharUnits PtrSize = getTypeSizeInChars(VoidPtrTy);
+ // The first two arguments (self and _cmd) are pointers; account for
+ // their size.
+ CharUnits ParmOffset = 2 * PtrSize;
+ for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->sel_param_end(); PI != E; ++PI) {
+ QualType PType = (*PI)->getType();
+ CharUnits sz = getObjCEncodingTypeSize(PType);
+ if (sz.isZero())
+ return true;
+
+ assert (sz.isPositive() &&
+ "getObjCEncodingForMethodDecl - Incomplete param type");
+ ParmOffset += sz;
+ }
+ S += charUnitsToString(ParmOffset);
+ S += "@0:";
+ S += charUnitsToString(PtrSize);
+
+ // Argument types.
+ ParmOffset = 2 * PtrSize;
+ for (ObjCMethodDecl::param_const_iterator PI = Decl->param_begin(),
+ E = Decl->sel_param_end(); PI != E; ++PI) {
+ const ParmVarDecl *PVDecl = *PI;
+ QualType PType = PVDecl->getOriginalType();
+ if (const ArrayType *AT =
+ dyn_cast<ArrayType>(PType->getCanonicalTypeInternal())) {
+ // Use array's original type only if it has known number of
+ // elements.
+ if (!isa<ConstantArrayType>(AT))
+ PType = PVDecl->getType();
+ } else if (PType->isFunctionType())
+ PType = PVDecl->getType();
+ getObjCEncodingForMethodParameter(PVDecl->getObjCDeclQualifier(),
+ PType, S, Extended);
+ S += charUnitsToString(ParmOffset);
+ ParmOffset += getObjCEncodingTypeSize(PType);
+ }
+
+ return false;
+}
+
+/// getObjCEncodingForPropertyDecl - Return the encoded type for this
+/// property declaration. If non-NULL, Container must be either an
+/// ObjCCategoryImplDecl or ObjCImplementationDecl; it should only be
+/// NULL when getting encodings for protocol properties.
+/// Property attributes are stored as a comma-delimited C string. The simple
+/// attributes readonly and bycopy are encoded as single characters. The
+/// parametrized attributes, getter=name, setter=name, and ivar=name, are
+/// encoded as single characters, followed by an identifier. Property types
+/// are also encoded as a parametrized attribute. The characters used to encode
+/// these attributes are defined by the following enumeration:
+/// @code
+/// enum PropertyAttributes {
+/// kPropertyReadOnly = 'R', // property is read-only.
+/// kPropertyBycopy = 'C', // property is a copy of the value last assigned
+/// kPropertyByref = '&', // property is a reference to the value last assigned
+/// kPropertyDynamic = 'D', // property is dynamic
+/// kPropertyGetter = 'G', // followed by getter selector name
+/// kPropertySetter = 'S', // followed by setter selector name
+/// kPropertyInstanceVariable = 'V' // followed by instance variable name
+/// kPropertyType = 'T' // followed by old-style type encoding.
+/// kPropertyWeak = 'W' // 'weak' property
+/// kPropertyStrong = 'P' // property GC'able
+/// kPropertyNonAtomic = 'N' // property non-atomic
+/// };
+/// @endcode
+void ASTContext::getObjCEncodingForPropertyDecl(const ObjCPropertyDecl *PD,
+ const Decl *Container,
+ std::string& S) const {
+ // Collect information from the property implementation decl(s).
+ bool Dynamic = false;
+ ObjCPropertyImplDecl *SynthesizePID = 0;
+
+ // FIXME: Duplicated code due to poor abstraction.
+ if (Container) {
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(Container)) {
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ i = CID->propimpl_begin(), e = CID->propimpl_end();
+ i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl() == PD) {
+ if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
+ Dynamic = true;
+ } else {
+ SynthesizePID = PID;
+ }
+ }
+ }
+ } else {
+ const ObjCImplementationDecl *OID=cast<ObjCImplementationDecl>(Container);
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ i = OID->propimpl_begin(), e = OID->propimpl_end();
+ i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl() == PD) {
+ if (PID->getPropertyImplementation()==ObjCPropertyImplDecl::Dynamic) {
+ Dynamic = true;
+ } else {
+ SynthesizePID = PID;
+ }
+ }
+ }
+ }
+ }
+
+ // FIXME: This is not very efficient.
+ S = "T";
+
+ // Encode result type.
+ // GCC has some special rules regarding encoding of properties which
+ // closely resembles encoding of ivars.
+ getObjCEncodingForTypeImpl(PD->getType(), S, true, true, 0,
+ true /* outermost type */,
+ true /* encoding for property */);
+
+ if (PD->isReadOnly()) {
+ S += ",R";
+ } else {
+ switch (PD->getSetterKind()) {
+ case ObjCPropertyDecl::Assign: break;
+ case ObjCPropertyDecl::Copy: S += ",C"; break;
+ case ObjCPropertyDecl::Retain: S += ",&"; break;
+ case ObjCPropertyDecl::Weak: S += ",W"; break;
+ }
+ }
+
+ // It really isn't clear at all what this means, since properties
+ // are "dynamic by default".
+ if (Dynamic)
+ S += ",D";
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ S += ",N";
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ S += ",G";
+ S += PD->getGetterName().getAsString();
+ }
+
+ if (PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ S += ",S";
+ S += PD->getSetterName().getAsString();
+ }
+
+ if (SynthesizePID) {
+ const ObjCIvarDecl *OID = SynthesizePID->getPropertyIvarDecl();
+ S += ",V";
+ S += OID->getNameAsString();
+ }
+
+ // FIXME: OBJCGC: weak & strong
+}
+
+/// getLegacyIntegralTypeEncoding -
+/// Another legacy compatibility encoding: 32-bit longs are encoded as
+/// 'l' or 'L' , but not always. For typedefs, we need to use
+/// 'i' or 'I' instead if encoding a struct field, or a pointer!
+///
+void ASTContext::getLegacyIntegralTypeEncoding (QualType &PointeeTy) const {
+ if (isa<TypedefType>(PointeeTy.getTypePtr())) {
+ if (const BuiltinType *BT = PointeeTy->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::ULong && getIntWidth(PointeeTy) == 32)
+ PointeeTy = UnsignedIntTy;
+ else
+ if (BT->getKind() == BuiltinType::Long && getIntWidth(PointeeTy) == 32)
+ PointeeTy = IntTy;
+ }
+ }
+}
+
+void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
+ const FieldDecl *Field) const {
+ // We follow the behavior of gcc, expanding structures which are
+ // directly pointed to, and expanding embedded structures. Note that
+ // these rules are sufficient to prevent recursive encoding of the
+ // same type.
+ getObjCEncodingForTypeImpl(T, S, true, true, Field,
+ true /* outermost type */);
+}
+
+static char ObjCEncodingForPrimitiveKind(const ASTContext *C, QualType T) {
+ switch (T->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("Unhandled builtin type kind");
+ case BuiltinType::Void: return 'v';
+ case BuiltinType::Bool: return 'B';
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar: return 'C';
+ case BuiltinType::UShort: return 'S';
+ case BuiltinType::UInt: return 'I';
+ case BuiltinType::ULong:
+ return C->getIntWidth(T) == 32 ? 'L' : 'Q';
+ case BuiltinType::UInt128: return 'T';
+ case BuiltinType::ULongLong: return 'Q';
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: return 'c';
+ case BuiltinType::Short: return 's';
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Int: return 'i';
+ case BuiltinType::Long:
+ return C->getIntWidth(T) == 32 ? 'l' : 'q';
+ case BuiltinType::LongLong: return 'q';
+ case BuiltinType::Int128: return 't';
+ case BuiltinType::Float: return 'f';
+ case BuiltinType::Double: return 'd';
+ case BuiltinType::LongDouble: return 'D';
+ }
+}
+
+static char ObjCEncodingForEnumType(const ASTContext *C, const EnumType *ET) {
+ EnumDecl *Enum = ET->getDecl();
+
+ // The encoding of an non-fixed enum type is always 'i', regardless of size.
+ if (!Enum->isFixed())
+ return 'i';
+
+ // The encoding of a fixed enum type matches its fixed underlying type.
+ return ObjCEncodingForPrimitiveKind(C, Enum->getIntegerType());
+}
+
+static void EncodeBitField(const ASTContext *Ctx, std::string& S,
+ QualType T, const FieldDecl *FD) {
+ assert(FD->isBitField() && "not a bitfield - getObjCEncodingForTypeImpl");
+ S += 'b';
+ // The NeXT runtime encodes bit fields as b followed by the number of bits.
+ // The GNU runtime requires more information; bitfields are encoded as b,
+ // then the offset (in bits) of the first element, then the type of the
+ // bitfield, then the size in bits. For example, in this structure:
+ //
+ // struct
+ // {
+ // int integer;
+ // int flags:2;
+ // };
+ // On a 32-bit system, the encoding for flags would be b2 for the NeXT
+ // runtime, but b32i2 for the GNU runtime. The reason for this extra
+ // information is not especially sensible, but we're stuck with it for
+ // compatibility with GCC, although providing it breaks anything that
+ // actually uses runtime introspection and wants to work on both runtimes...
+ if (!Ctx->getLangOpts().NeXTRuntime) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
+ S += llvm::utostr(RL.getFieldOffset(FD->getFieldIndex()));
+ if (const EnumType *ET = T->getAs<EnumType>())
+ S += ObjCEncodingForEnumType(Ctx, ET);
+ else
+ S += ObjCEncodingForPrimitiveKind(Ctx, T);
+ }
+ S += llvm::utostr(FD->getBitWidthValue(*Ctx));
+}
+
+// FIXME: Use SmallString for accumulating string.
+void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
+ bool ExpandPointedToStructures,
+ bool ExpandStructures,
+ const FieldDecl *FD,
+ bool OutermostType,
+ bool EncodingProperty,
+ bool StructField,
+ bool EncodeBlockParameters,
+ bool EncodeClassNames) const {
+ if (T->getAs<BuiltinType>()) {
+ if (FD && FD->isBitField())
+ return EncodeBitField(this, S, T, FD);
+ S += ObjCEncodingForPrimitiveKind(this, T);
+ return;
+ }
+
+ if (const ComplexType *CT = T->getAs<ComplexType>()) {
+ S += 'j';
+ getObjCEncodingForTypeImpl(CT->getElementType(), S, false, false, 0, false,
+ false);
+ return;
+ }
+
+ // encoding for pointer or r3eference types.
+ QualType PointeeTy;
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ if (PT->isObjCSelType()) {
+ S += ':';
+ return;
+ }
+ PointeeTy = PT->getPointeeType();
+ }
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ PointeeTy = RT->getPointeeType();
+ if (!PointeeTy.isNull()) {
+ bool isReadOnly = false;
+ // For historical/compatibility reasons, the read-only qualifier of the
+ // pointee gets emitted _before_ the '^'. The read-only qualifier of
+ // the pointer itself gets ignored, _unless_ we are looking at a typedef!
+ // Also, do not emit the 'r' for anything but the outermost type!
+ if (isa<TypedefType>(T.getTypePtr())) {
+ if (OutermostType && T.isConstQualified()) {
+ isReadOnly = true;
+ S += 'r';
+ }
+ } else if (OutermostType) {
+ QualType P = PointeeTy;
+ while (P->getAs<PointerType>())
+ P = P->getAs<PointerType>()->getPointeeType();
+ if (P.isConstQualified()) {
+ isReadOnly = true;
+ S += 'r';
+ }
+ }
+ if (isReadOnly) {
+ // Another legacy compatibility encoding. Some ObjC qualifier and type
+ // combinations need to be rearranged.
+ // Rewrite "in const" from "nr" to "rn"
+ if (StringRef(S).endswith("nr"))
+ S.replace(S.end()-2, S.end(), "rn");
+ }
+
+ if (PointeeTy->isCharType()) {
+ // char pointer types should be encoded as '*' unless it is a
+ // type that has been typedef'd to 'BOOL'.
+ if (!isTypeTypedefedAsBOOL(PointeeTy)) {
+ S += '*';
+ return;
+ }
+ } else if (const RecordType *RTy = PointeeTy->getAs<RecordType>()) {
+ // GCC binary compat: Need to convert "struct objc_class *" to "#".
+ if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_class")) {
+ S += '#';
+ return;
+ }
+ // GCC binary compat: Need to convert "struct objc_object *" to "@".
+ if (RTy->getDecl()->getIdentifier() == &Idents.get("objc_object")) {
+ S += '@';
+ return;
+ }
+ // fall through...
+ }
+ S += '^';
+ getLegacyIntegralTypeEncoding(PointeeTy);
+
+ getObjCEncodingForTypeImpl(PointeeTy, S, false, ExpandPointedToStructures,
+ NULL);
+ return;
+ }
+
+ if (const ArrayType *AT =
+ // Ignore type qualifiers etc.
+ dyn_cast<ArrayType>(T->getCanonicalTypeInternal())) {
+ if (isa<IncompleteArrayType>(AT) && !StructField) {
+ // Incomplete arrays are encoded as a pointer to the array element.
+ S += '^';
+
+ getObjCEncodingForTypeImpl(AT->getElementType(), S,
+ false, ExpandStructures, FD);
+ } else {
+ S += '[';
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ if (getTypeSize(CAT->getElementType()) == 0)
+ S += '0';
+ else
+ S += llvm::utostr(CAT->getSize().getZExtValue());
+ } else {
+ //Variable length arrays are encoded as a regular array with 0 elements.
+ assert((isa<VariableArrayType>(AT) || isa<IncompleteArrayType>(AT)) &&
+ "Unknown array type!");
+ S += '0';
+ }
+
+ getObjCEncodingForTypeImpl(AT->getElementType(), S,
+ false, ExpandStructures, FD);
+ S += ']';
+ }
+ return;
+ }
+
+ if (T->getAs<FunctionType>()) {
+ S += '?';
+ return;
+ }
+
+ if (const RecordType *RTy = T->getAs<RecordType>()) {
+ RecordDecl *RDecl = RTy->getDecl();
+ S += RDecl->isUnion() ? '(' : '{';
+ // Anonymous structures print as '?'
+ if (const IdentifierInfo *II = RDecl->getIdentifier()) {
+ S += II->getName();
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ (*this).getPrintingPolicy());
+
+ S += TemplateArgsStr;
+ }
+ } else {
+ S += '?';
+ }
+ if (ExpandStructures) {
+ S += '=';
+ if (!RDecl->isUnion()) {
+ getObjCEncodingForStructureImpl(RDecl, S, FD);
+ } else {
+ for (RecordDecl::field_iterator Field = RDecl->field_begin(),
+ FieldEnd = RDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ if (FD) {
+ S += '"';
+ S += Field->getNameAsString();
+ S += '"';
+ }
+
+ // Special case bit-fields.
+ if (Field->isBitField()) {
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true,
+ (*Field));
+ } else {
+ QualType qt = Field->getType();
+ getLegacyIntegralTypeEncoding(qt);
+ getObjCEncodingForTypeImpl(qt, S, false, true,
+ FD, /*OutermostType*/false,
+ /*EncodingProperty*/false,
+ /*StructField*/true);
+ }
+ }
+ }
+ }
+ S += RDecl->isUnion() ? ')' : '}';
+ return;
+ }
+
+ if (const EnumType *ET = T->getAs<EnumType>()) {
+ if (FD && FD->isBitField())
+ EncodeBitField(this, S, T, FD);
+ else
+ S += ObjCEncodingForEnumType(this, ET);
+ return;
+ }
+
+ if (const BlockPointerType *BT = T->getAs<BlockPointerType>()) {
+ S += "@?"; // Unlike a pointer-to-function, which is "^?".
+ if (EncodeBlockParameters) {
+ const FunctionType *FT = BT->getPointeeType()->getAs<FunctionType>();
+
+ S += '<';
+ // Block return type
+ getObjCEncodingForTypeImpl(FT->getResultType(), S,
+ ExpandPointedToStructures, ExpandStructures,
+ FD,
+ false /* OutermostType */,
+ EncodingProperty,
+ false /* StructField */,
+ EncodeBlockParameters,
+ EncodeClassNames);
+ // Block self
+ S += "@?";
+ // Block parameters
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin(),
+ E = FPT->arg_type_end(); I && (I != E); ++I) {
+ getObjCEncodingForTypeImpl(*I, S,
+ ExpandPointedToStructures,
+ ExpandStructures,
+ FD,
+ false /* OutermostType */,
+ EncodingProperty,
+ false /* StructField */,
+ EncodeBlockParameters,
+ EncodeClassNames);
+ }
+ }
+ S += '>';
+ }
+ return;
+ }
+
+ // Ignore protocol qualifiers when mangling at this level.
+ if (const ObjCObjectType *OT = T->getAs<ObjCObjectType>())
+ T = OT->getBaseType();
+
+ if (const ObjCInterfaceType *OIT = T->getAs<ObjCInterfaceType>()) {
+ // @encode(class_name)
+ ObjCInterfaceDecl *OI = OIT->getDecl();
+ S += '{';
+ const IdentifierInfo *II = OI->getIdentifier();
+ S += II->getName();
+ S += '=';
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ DeepCollectObjCIvars(OI, true, Ivars);
+ for (unsigned i = 0, e = Ivars.size(); i != e; ++i) {
+ const FieldDecl *Field = cast<FieldDecl>(Ivars[i]);
+ if (Field->isBitField())
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true, Field);
+ else
+ getObjCEncodingForTypeImpl(Field->getType(), S, false, true, FD);
+ }
+ S += '}';
+ return;
+ }
+
+ if (const ObjCObjectPointerType *OPT = T->getAs<ObjCObjectPointerType>()) {
+ if (OPT->isObjCIdType()) {
+ S += '@';
+ return;
+ }
+
+ if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
+ // FIXME: Consider if we need to output qualifiers for 'Class<p>'.
+ // Since this is a binary compatibility issue, need to consult with runtime
+ // folks. Fortunately, this is a *very* obsure construct.
+ S += '#';
+ return;
+ }
+
+ if (OPT->isObjCQualifiedIdType()) {
+ getObjCEncodingForTypeImpl(getObjCIdType(), S,
+ ExpandPointedToStructures,
+ ExpandStructures, FD);
+ if (FD || EncodingProperty || EncodeClassNames) {
+ // Note that we do extended encoding of protocol qualifer list
+ // Only when doing ivar or property encoding.
+ S += '"';
+ for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
+ E = OPT->qual_end(); I != E; ++I) {
+ S += '<';
+ S += (*I)->getNameAsString();
+ S += '>';
+ }
+ S += '"';
+ }
+ return;
+ }
+
+ QualType PointeeTy = OPT->getPointeeType();
+ if (!EncodingProperty &&
+ isa<TypedefType>(PointeeTy.getTypePtr())) {
+ // Another historical/compatibility reason.
+ // We encode the underlying type which comes out as
+ // {...};
+ S += '^';
+ getObjCEncodingForTypeImpl(PointeeTy, S,
+ false, ExpandPointedToStructures,
+ NULL);
+ return;
+ }
+
+ S += '@';
+ if (OPT->getInterfaceDecl() &&
+ (FD || EncodingProperty || EncodeClassNames)) {
+ S += '"';
+ S += OPT->getInterfaceDecl()->getIdentifier()->getName();
+ for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
+ E = OPT->qual_end(); I != E; ++I) {
+ S += '<';
+ S += (*I)->getNameAsString();
+ S += '>';
+ }
+ S += '"';
+ }
+ return;
+ }
+
+ // gcc just blithely ignores member pointers.
+ // TODO: maybe there should be a mangling for these
+ if (T->getAs<MemberPointerType>())
+ return;
+
+ if (T->isVectorType()) {
+ // This matches gcc's encoding, even though technically it is
+ // insufficient.
+ // FIXME. We should do a better job than gcc.
+ return;
+ }
+
+ llvm_unreachable("@encode for type not implemented!");
+}
+
+void ASTContext::getObjCEncodingForStructureImpl(RecordDecl *RDecl,
+ std::string &S,
+ const FieldDecl *FD,
+ bool includeVBases) const {
+ assert(RDecl && "Expected non-null RecordDecl");
+ assert(!RDecl->isUnion() && "Should not be called for unions");
+ if (!RDecl->getDefinition())
+ return;
+
+ CXXRecordDecl *CXXRec = dyn_cast<CXXRecordDecl>(RDecl);
+ std::multimap<uint64_t, NamedDecl *> FieldOrBaseOffsets;
+ const ASTRecordLayout &layout = getASTRecordLayout(RDecl);
+
+ if (CXXRec) {
+ for (CXXRecordDecl::base_class_iterator
+ BI = CXXRec->bases_begin(),
+ BE = CXXRec->bases_end(); BI != BE; ++BI) {
+ if (!BI->isVirtual()) {
+ CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+ if (base->isEmpty())
+ continue;
+ uint64_t offs = layout.getBaseClassOffsetInBits(base);
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, base));
+ }
+ }
+ }
+
+ unsigned i = 0;
+ for (RecordDecl::field_iterator Field = RDecl->field_begin(),
+ FieldEnd = RDecl->field_end();
+ Field != FieldEnd; ++Field, ++i) {
+ uint64_t offs = layout.getFieldOffset(i);
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, *Field));
+ }
+
+ if (CXXRec && includeVBases) {
+ for (CXXRecordDecl::base_class_iterator
+ BI = CXXRec->vbases_begin(),
+ BE = CXXRec->vbases_end(); BI != BE; ++BI) {
+ CXXRecordDecl *base = BI->getType()->getAsCXXRecordDecl();
+ if (base->isEmpty())
+ continue;
+ uint64_t offs = layout.getVBaseClassOffsetInBits(base);
+ if (FieldOrBaseOffsets.find(offs) == FieldOrBaseOffsets.end())
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.end(),
+ std::make_pair(offs, base));
+ }
+ }
+
+ CharUnits size;
+ if (CXXRec) {
+ size = includeVBases ? layout.getSize() : layout.getNonVirtualSize();
+ } else {
+ size = layout.getSize();
+ }
+
+ uint64_t CurOffs = 0;
+ std::multimap<uint64_t, NamedDecl *>::iterator
+ CurLayObj = FieldOrBaseOffsets.begin();
+
+ if ((CurLayObj != FieldOrBaseOffsets.end() && CurLayObj->first != 0) ||
+ (CurLayObj == FieldOrBaseOffsets.end() &&
+ CXXRec && CXXRec->isDynamicClass())) {
+ assert(CXXRec && CXXRec->isDynamicClass() &&
+ "Offset 0 was empty but no VTable ?");
+ if (FD) {
+ S += "\"_vptr$";
+ std::string recname = CXXRec->getNameAsString();
+ if (recname.empty()) recname = "?";
+ S += recname;
+ S += '"';
+ }
+ S += "^^?";
+ CurOffs += getTypeSize(VoidPtrTy);
+ }
+
+ if (!RDecl->hasFlexibleArrayMember()) {
+ // Mark the end of the structure.
+ uint64_t offs = toBits(size);
+ FieldOrBaseOffsets.insert(FieldOrBaseOffsets.upper_bound(offs),
+ std::make_pair(offs, (NamedDecl*)0));
+ }
+
+ for (; CurLayObj != FieldOrBaseOffsets.end(); ++CurLayObj) {
+ assert(CurOffs <= CurLayObj->first);
+
+ if (CurOffs < CurLayObj->first) {
+ uint64_t padding = CurLayObj->first - CurOffs;
+ // FIXME: There doesn't seem to be a way to indicate in the encoding that
+ // packing/alignment of members is different that normal, in which case
+ // the encoding will be out-of-sync with the real layout.
+ // If the runtime switches to just consider the size of types without
+ // taking into account alignment, we could make padding explicit in the
+ // encoding (e.g. using arrays of chars). The encoding strings would be
+ // longer then though.
+ CurOffs += padding;
+ }
+
+ NamedDecl *dcl = CurLayObj->second;
+ if (dcl == 0)
+ break; // reached end of structure.
+
+ if (CXXRecordDecl *base = dyn_cast<CXXRecordDecl>(dcl)) {
+ // We expand the bases without their virtual bases since those are going
+ // in the initial structure. Note that this differs from gcc which
+ // expands virtual bases each time one is encountered in the hierarchy,
+ // making the encoding type bigger than it really is.
+ getObjCEncodingForStructureImpl(base, S, FD, /*includeVBases*/false);
+ assert(!base->isEmpty());
+ CurOffs += toBits(getASTRecordLayout(base).getNonVirtualSize());
+ } else {
+ FieldDecl *field = cast<FieldDecl>(dcl);
+ if (FD) {
+ S += '"';
+ S += field->getNameAsString();
+ S += '"';
+ }
+
+ if (field->isBitField()) {
+ EncodeBitField(this, S, field->getType(), field);
+ CurOffs += field->getBitWidthValue(*this);
+ } else {
+ QualType qt = field->getType();
+ getLegacyIntegralTypeEncoding(qt);
+ getObjCEncodingForTypeImpl(qt, S, false, true, FD,
+ /*OutermostType*/false,
+ /*EncodingProperty*/false,
+ /*StructField*/true);
+ CurOffs += getTypeSize(field->getType());
+ }
+ }
+ }
+}
+
+void ASTContext::getObjCEncodingForTypeQualifier(Decl::ObjCDeclQualifier QT,
+ std::string& S) const {
+ if (QT & Decl::OBJC_TQ_In)
+ S += 'n';
+ if (QT & Decl::OBJC_TQ_Inout)
+ S += 'N';
+ if (QT & Decl::OBJC_TQ_Out)
+ S += 'o';
+ if (QT & Decl::OBJC_TQ_Bycopy)
+ S += 'O';
+ if (QT & Decl::OBJC_TQ_Byref)
+ S += 'R';
+ if (QT & Decl::OBJC_TQ_Oneway)
+ S += 'V';
+}
+
+void ASTContext::setBuiltinVaListType(QualType T) {
+ assert(BuiltinVaListType.isNull() && "__builtin_va_list type already set!");
+
+ BuiltinVaListType = T;
+}
+
+TypedefDecl *ASTContext::getObjCIdDecl() const {
+ if (!ObjCIdDecl) {
+ QualType T = getObjCObjectType(ObjCBuiltinIdTy, 0, 0);
+ T = getObjCObjectPointerType(T);
+ TypeSourceInfo *IdInfo = getTrivialTypeSourceInfo(T);
+ ObjCIdDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Idents.get("id"), IdInfo);
+ }
+
+ return ObjCIdDecl;
+}
+
+TypedefDecl *ASTContext::getObjCSelDecl() const {
+ if (!ObjCSelDecl) {
+ QualType SelT = getPointerType(ObjCBuiltinSelTy);
+ TypeSourceInfo *SelInfo = getTrivialTypeSourceInfo(SelT);
+ ObjCSelDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Idents.get("SEL"), SelInfo);
+ }
+ return ObjCSelDecl;
+}
+
+TypedefDecl *ASTContext::getObjCClassDecl() const {
+ if (!ObjCClassDecl) {
+ QualType T = getObjCObjectType(ObjCBuiltinClassTy, 0, 0);
+ T = getObjCObjectPointerType(T);
+ TypeSourceInfo *ClassInfo = getTrivialTypeSourceInfo(T);
+ ObjCClassDecl = TypedefDecl::Create(const_cast<ASTContext &>(*this),
+ getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Idents.get("Class"), ClassInfo);
+ }
+
+ return ObjCClassDecl;
+}
+
+ObjCInterfaceDecl *ASTContext::getObjCProtocolDecl() const {
+ if (!ObjCProtocolClassDecl) {
+ ObjCProtocolClassDecl
+ = ObjCInterfaceDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(),
+ &Idents.get("Protocol"),
+ /*PrevDecl=*/0,
+ SourceLocation(), true);
+ }
+
+ return ObjCProtocolClassDecl;
+}
+
+void ASTContext::setObjCConstantStringInterface(ObjCInterfaceDecl *Decl) {
+ assert(ObjCConstantStringType.isNull() &&
+ "'NSConstantString' type already set!");
+
+ ObjCConstantStringType = getObjCInterfaceType(Decl);
+}
+
+/// \brief Retrieve the template name that corresponds to a non-empty
+/// lookup.
+TemplateName
+ASTContext::getOverloadedTemplateName(UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) const {
+ unsigned size = End - Begin;
+ assert(size > 1 && "set is not overloaded!");
+
+ void *memory = Allocate(sizeof(OverloadedTemplateStorage) +
+ size * sizeof(FunctionTemplateDecl*));
+ OverloadedTemplateStorage *OT = new(memory) OverloadedTemplateStorage(size);
+
+ NamedDecl **Storage = OT->getStorage();
+ for (UnresolvedSetIterator I = Begin; I != End; ++I) {
+ NamedDecl *D = *I;
+ assert(isa<FunctionTemplateDecl>(D) ||
+ (isa<UsingShadowDecl>(D) &&
+ isa<FunctionTemplateDecl>(D->getUnderlyingDecl())));
+ *Storage++ = D;
+ }
+
+ return TemplateName(OT);
+}
+
+/// \brief Retrieve the template name that represents a qualified
+/// template name such as \c std::vector.
+TemplateName
+ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ bool TemplateKeyword,
+ TemplateDecl *Template) const {
+ assert(NNS && "Missing nested-name-specifier in qualified template name");
+
+ // FIXME: Canonicalization?
+ llvm::FoldingSetNodeID ID;
+ QualifiedTemplateName::Profile(ID, NNS, TemplateKeyword, Template);
+
+ void *InsertPos = 0;
+ QualifiedTemplateName *QTN =
+ QualifiedTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ if (!QTN) {
+ QTN = new (*this,4) QualifiedTemplateName(NNS, TemplateKeyword, Template);
+ QualifiedTemplateNames.InsertNode(QTN, InsertPos);
+ }
+
+ return TemplateName(QTN);
+}
+
+/// \brief Retrieve the template name that represents a dependent
+/// template name such as \c MetaFun::template apply.
+TemplateName
+ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "Nested name specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateName::Profile(ID, NNS, Name);
+
+ void *InsertPos = 0;
+ DependentTemplateName *QTN =
+ DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (QTN)
+ return TemplateName(QTN);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ if (CanonNNS == NNS) {
+ QTN = new (*this,4) DependentTemplateName(NNS, Name);
+ } else {
+ TemplateName Canon = getDependentTemplateName(CanonNNS, Name);
+ QTN = new (*this,4) DependentTemplateName(NNS, Name, Canon);
+ DependentTemplateName *CheckQTN =
+ DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckQTN && "Dependent type name canonicalization broken");
+ (void)CheckQTN;
+ }
+
+ DependentTemplateNames.InsertNode(QTN, InsertPos);
+ return TemplateName(QTN);
+}
+
+/// \brief Retrieve the template name that represents a dependent
+/// template name such as \c MetaFun::template operator+.
+TemplateName
+ASTContext::getDependentTemplateName(NestedNameSpecifier *NNS,
+ OverloadedOperatorKind Operator) const {
+ assert((!NNS || NNS->isDependent()) &&
+ "Nested name specifier must be dependent");
+
+ llvm::FoldingSetNodeID ID;
+ DependentTemplateName::Profile(ID, NNS, Operator);
+
+ void *InsertPos = 0;
+ DependentTemplateName *QTN
+ = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (QTN)
+ return TemplateName(QTN);
+
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+ if (CanonNNS == NNS) {
+ QTN = new (*this,4) DependentTemplateName(NNS, Operator);
+ } else {
+ TemplateName Canon = getDependentTemplateName(CanonNNS, Operator);
+ QTN = new (*this,4) DependentTemplateName(NNS, Operator, Canon);
+
+ DependentTemplateName *CheckQTN
+ = DependentTemplateNames.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckQTN && "Dependent template name canonicalization broken");
+ (void)CheckQTN;
+ }
+
+ DependentTemplateNames.InsertNode(QTN, InsertPos);
+ return TemplateName(QTN);
+}
+
+TemplateName
+ASTContext::getSubstTemplateTemplateParm(TemplateTemplateParmDecl *param,
+ TemplateName replacement) const {
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTemplateParmStorage::Profile(ID, param, replacement);
+
+ void *insertPos = 0;
+ SubstTemplateTemplateParmStorage *subst
+ = SubstTemplateTemplateParms.FindNodeOrInsertPos(ID, insertPos);
+
+ if (!subst) {
+ subst = new (*this) SubstTemplateTemplateParmStorage(param, replacement);
+ SubstTemplateTemplateParms.InsertNode(subst, insertPos);
+ }
+
+ return TemplateName(subst);
+}
+
+TemplateName
+ASTContext::getSubstTemplateTemplateParmPack(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) const {
+ ASTContext &Self = const_cast<ASTContext &>(*this);
+ llvm::FoldingSetNodeID ID;
+ SubstTemplateTemplateParmPackStorage::Profile(ID, Self, Param, ArgPack);
+
+ void *InsertPos = 0;
+ SubstTemplateTemplateParmPackStorage *Subst
+ = SubstTemplateTemplateParmPacks.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Subst) {
+ Subst = new (*this) SubstTemplateTemplateParmPackStorage(Param,
+ ArgPack.pack_size(),
+ ArgPack.pack_begin());
+ SubstTemplateTemplateParmPacks.InsertNode(Subst, InsertPos);
+ }
+
+ return TemplateName(Subst);
+}
+
+/// getFromTargetType - Given one of the integer types provided by
+/// TargetInfo, produce the corresponding type. The unsigned @p Type
+/// is actually a value of type @c TargetInfo::IntType.
+CanQualType ASTContext::getFromTargetType(unsigned Type) const {
+ switch (Type) {
+ case TargetInfo::NoInt: return CanQualType();
+ case TargetInfo::SignedShort: return ShortTy;
+ case TargetInfo::UnsignedShort: return UnsignedShortTy;
+ case TargetInfo::SignedInt: return IntTy;
+ case TargetInfo::UnsignedInt: return UnsignedIntTy;
+ case TargetInfo::SignedLong: return LongTy;
+ case TargetInfo::UnsignedLong: return UnsignedLongTy;
+ case TargetInfo::SignedLongLong: return LongLongTy;
+ case TargetInfo::UnsignedLongLong: return UnsignedLongLongTy;
+ }
+
+ llvm_unreachable("Unhandled TargetInfo::IntType value");
+}
+
+//===----------------------------------------------------------------------===//
+// Type Predicates.
+//===----------------------------------------------------------------------===//
+
+/// getObjCGCAttr - Returns one of GCNone, Weak or Strong objc's
+/// garbage collection attribute.
+///
+Qualifiers::GC ASTContext::getObjCGCAttrKind(QualType Ty) const {
+ if (getLangOpts().getGC() == LangOptions::NonGC)
+ return Qualifiers::GCNone;
+
+ assert(getLangOpts().ObjC1);
+ Qualifiers::GC GCAttrs = Ty.getObjCGCAttr();
+
+ // Default behaviour under objective-C's gc is for ObjC pointers
+ // (or pointers to them) be treated as though they were declared
+ // as __strong.
+ if (GCAttrs == Qualifiers::GCNone) {
+ if (Ty->isObjCObjectPointerType() || Ty->isBlockPointerType())
+ return Qualifiers::Strong;
+ else if (Ty->isPointerType())
+ return getObjCGCAttrKind(Ty->getAs<PointerType>()->getPointeeType());
+ } else {
+ // It's not valid to set GC attributes on anything that isn't a
+ // pointer.
+#ifndef NDEBUG
+ QualType CT = Ty->getCanonicalTypeInternal();
+ while (const ArrayType *AT = dyn_cast<ArrayType>(CT))
+ CT = AT->getElementType();
+ assert(CT->isAnyPointerType() || CT->isBlockPointerType());
+#endif
+ }
+ return GCAttrs;
+}
+
+//===----------------------------------------------------------------------===//
+// Type Compatibility Testing
+//===----------------------------------------------------------------------===//
+
+/// areCompatVectorTypes - Return true if the two specified vector types are
+/// compatible.
+static bool areCompatVectorTypes(const VectorType *LHS,
+ const VectorType *RHS) {
+ assert(LHS->isCanonicalUnqualified() && RHS->isCanonicalUnqualified());
+ return LHS->getElementType() == RHS->getElementType() &&
+ LHS->getNumElements() == RHS->getNumElements();
+}
+
+bool ASTContext::areCompatibleVectorTypes(QualType FirstVec,
+ QualType SecondVec) {
+ assert(FirstVec->isVectorType() && "FirstVec should be a vector type");
+ assert(SecondVec->isVectorType() && "SecondVec should be a vector type");
+
+ if (hasSameUnqualifiedType(FirstVec, SecondVec))
+ return true;
+
+ // Treat Neon vector types and most AltiVec vector types as if they are the
+ // equivalent GCC vector types.
+ const VectorType *First = FirstVec->getAs<VectorType>();
+ const VectorType *Second = SecondVec->getAs<VectorType>();
+ if (First->getNumElements() == Second->getNumElements() &&
+ hasSameType(First->getElementType(), Second->getElementType()) &&
+ First->getVectorKind() != VectorType::AltiVecPixel &&
+ First->getVectorKind() != VectorType::AltiVecBool &&
+ Second->getVectorKind() != VectorType::AltiVecPixel &&
+ Second->getVectorKind() != VectorType::AltiVecBool)
+ return true;
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCQualifiedIdTypesAreCompatible - Compatibility testing for qualified id's.
+//===----------------------------------------------------------------------===//
+
+/// ProtocolCompatibleWithProtocol - return 'true' if 'lProto' is in the
+/// inheritance hierarchy of 'rProto'.
+bool
+ASTContext::ProtocolCompatibleWithProtocol(ObjCProtocolDecl *lProto,
+ ObjCProtocolDecl *rProto) const {
+ if (declaresSameEntity(lProto, rProto))
+ return true;
+ for (ObjCProtocolDecl::protocol_iterator PI = rProto->protocol_begin(),
+ E = rProto->protocol_end(); PI != E; ++PI)
+ if (ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ return false;
+}
+
+/// QualifiedIdConformsQualifiedId - compare id<p,...> with id<p1,...>
+/// return true if lhs's protocols conform to rhs's protocol; false
+/// otherwise.
+bool ASTContext::QualifiedIdConformsQualifiedId(QualType lhs, QualType rhs) {
+ if (lhs->isObjCQualifiedIdType() && rhs->isObjCQualifiedIdType())
+ return ObjCQualifiedIdTypesAreCompatible(lhs, rhs, false);
+ return false;
+}
+
+/// ObjCQualifiedClassTypesAreCompatible - compare Class<p,...> and
+/// Class<p1, ...>.
+bool ASTContext::ObjCQualifiedClassTypesAreCompatible(QualType lhs,
+ QualType rhs) {
+ const ObjCObjectPointerType *lhsQID = lhs->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+ assert ((lhsQID && rhsOPT) && "ObjCQualifiedClassTypesAreCompatible");
+
+ for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ bool match = false;
+ ObjCProtocolDecl *lhsProto = *I;
+ for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(),
+ E = rhsOPT->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto)) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ return true;
+}
+
+/// ObjCQualifiedIdTypesAreCompatible - We know that one of lhs/rhs is an
+/// ObjCQualifiedIDType.
+bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
+ bool compare) {
+ // Allow id<P..> and an 'id' or void* type in all cases.
+ if (lhs->isVoidPointerType() ||
+ lhs->isObjCIdType() || lhs->isObjCClassType())
+ return true;
+ else if (rhs->isVoidPointerType() ||
+ rhs->isObjCIdType() || rhs->isObjCClassType())
+ return true;
+
+ if (const ObjCObjectPointerType *lhsQID = lhs->getAsObjCQualifiedIdType()) {
+ const ObjCObjectPointerType *rhsOPT = rhs->getAs<ObjCObjectPointerType>();
+
+ if (!rhsOPT) return false;
+
+ if (rhsOPT->qual_empty()) {
+ // If the RHS is a unqualified interface pointer "NSString*",
+ // make sure we check the class hierarchy.
+ if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
+ for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ if (!rhsID->ClassImplementsProtocol(*I, true))
+ return false;
+ }
+ }
+ // If there are no qualifiers and no interface, we have an 'id'.
+ return true;
+ }
+ // Both the right and left sides have qualifiers.
+ for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ ObjCProtocolDecl *lhsProto = *I;
+ bool match = false;
+
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ for (ObjCObjectPointerType::qual_iterator J = rhsOPT->qual_begin(),
+ E = rhsOPT->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ // If the RHS is a qualified interface pointer "NSString<P>*",
+ // make sure we check the class hierarchy.
+ if (ObjCInterfaceDecl *rhsID = rhsOPT->getInterfaceDecl()) {
+ for (ObjCObjectPointerType::qual_iterator I = lhsQID->qual_begin(),
+ E = lhsQID->qual_end(); I != E; ++I) {
+ // when comparing an id<P> on lhs with a static type on rhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ if (rhsID->ClassImplementsProtocol(*I, true)) {
+ match = true;
+ break;
+ }
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ return true;
+ }
+
+ const ObjCObjectPointerType *rhsQID = rhs->getAsObjCQualifiedIdType();
+ assert(rhsQID && "One of the LHS/RHS should be id<x>");
+
+ if (const ObjCObjectPointerType *lhsOPT =
+ lhs->getAsObjCInterfacePointerType()) {
+ // If both the right and left sides have qualifiers.
+ for (ObjCObjectPointerType::qual_iterator I = lhsOPT->qual_begin(),
+ E = lhsOPT->qual_end(); I != E; ++I) {
+ ObjCProtocolDecl *lhsProto = *I;
+ bool match = false;
+
+ // when comparing an id<P> on rhs with a static type on lhs,
+ // see if static class implements all of id's protocols, directly or
+ // through its super class and categories.
+ // First, lhs protocols in the qualifier list must be found, direct
+ // or indirect in rhs's qualifier list or it is a mismatch.
+ for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
+ E = rhsQID->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+
+ // Static class's protocols, or its super class or category protocols
+ // must be found, direct or indirect in rhs's qualifier list or it is a mismatch.
+ if (ObjCInterfaceDecl *lhsID = lhsOPT->getInterfaceDecl()) {
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
+ CollectInheritedProtocols(lhsID, LHSInheritedProtocols);
+ // This is rather dubious but matches gcc's behavior. If lhs has
+ // no type qualifier and its class has no static protocol(s)
+ // assume that it is mismatch.
+ if (LHSInheritedProtocols.empty() && lhsOPT->qual_empty())
+ return false;
+ for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
+ LHSInheritedProtocols.begin(),
+ E = LHSInheritedProtocols.end(); I != E; ++I) {
+ bool match = false;
+ ObjCProtocolDecl *lhsProto = (*I);
+ for (ObjCObjectPointerType::qual_iterator J = rhsQID->qual_begin(),
+ E = rhsQID->qual_end(); J != E; ++J) {
+ ObjCProtocolDecl *rhsProto = *J;
+ if (ProtocolCompatibleWithProtocol(lhsProto, rhsProto) ||
+ (compare && ProtocolCompatibleWithProtocol(rhsProto, lhsProto))) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return false;
+ }
+ }
+ return true;
+ }
+ return false;
+}
+
+/// canAssignObjCInterfaces - Return true if the two interface types are
+/// compatible for assignment from RHS to LHS. This handles validation of any
+/// protocol qualifiers on the LHS or RHS.
+///
+bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT) {
+ const ObjCObjectType* LHS = LHSOPT->getObjectType();
+ const ObjCObjectType* RHS = RHSOPT->getObjectType();
+
+ // If either type represents the built-in 'id' or 'Class' types, return true.
+ if (LHS->isObjCUnqualifiedIdOrClass() ||
+ RHS->isObjCUnqualifiedIdOrClass())
+ return true;
+
+ if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId())
+ return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0),
+ false);
+
+ if (LHS->isObjCQualifiedClass() && RHS->isObjCQualifiedClass())
+ return ObjCQualifiedClassTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0));
+
+ // If we have 2 user-defined types, fall into that path.
+ if (LHS->getInterface() && RHS->getInterface())
+ return canAssignObjCInterfaces(LHS, RHS);
+
+ return false;
+}
+
+/// canAssignObjCInterfacesInBlockPointer - This routine is specifically written
+/// for providing type-safety for objective-c pointers used to pass/return
+/// arguments in block literals. When passed as arguments, passing 'A*' where
+/// 'id' is expected is not OK. Passing 'Sub *" where 'Super *" is expected is
+/// not OK. For the return type, the opposite is not OK.
+bool ASTContext::canAssignObjCInterfacesInBlockPointer(
+ const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT,
+ bool BlockReturnType) {
+ if (RHSOPT->isObjCBuiltinType() || LHSOPT->isObjCIdType())
+ return true;
+
+ if (LHSOPT->isObjCBuiltinType()) {
+ return RHSOPT->isObjCBuiltinType() || RHSOPT->isObjCQualifiedIdType();
+ }
+
+ if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
+ return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
+ QualType(RHSOPT,0),
+ false);
+
+ const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
+ const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
+ if (LHS && RHS) { // We have 2 user-defined types.
+ if (LHS != RHS) {
+ if (LHS->getDecl()->isSuperClassOf(RHS->getDecl()))
+ return BlockReturnType;
+ if (RHS->getDecl()->isSuperClassOf(LHS->getDecl()))
+ return !BlockReturnType;
+ }
+ else
+ return true;
+ }
+ return false;
+}
+
+/// getIntersectionOfProtocols - This routine finds the intersection of set
+/// of protocols inherited from two distinct objective-c pointer objects.
+/// It is used to build composite qualifier list of the composite type of
+/// the conditional expression involving two objective-c pointer objects.
+static
+void getIntersectionOfProtocols(ASTContext &Context,
+ const ObjCObjectPointerType *LHSOPT,
+ const ObjCObjectPointerType *RHSOPT,
+ SmallVectorImpl<ObjCProtocolDecl *> &IntersectionOfProtocols) {
+
+ const ObjCObjectType* LHS = LHSOPT->getObjectType();
+ const ObjCObjectType* RHS = RHSOPT->getObjectType();
+ assert(LHS->getInterface() && "LHS must have an interface base");
+ assert(RHS->getInterface() && "RHS must have an interface base");
+
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocolSet;
+ unsigned LHSNumProtocols = LHS->getNumProtocols();
+ if (LHSNumProtocols > 0)
+ InheritedProtocolSet.insert(LHS->qual_begin(), LHS->qual_end());
+ else {
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
+ Context.CollectInheritedProtocols(LHS->getInterface(),
+ LHSInheritedProtocols);
+ InheritedProtocolSet.insert(LHSInheritedProtocols.begin(),
+ LHSInheritedProtocols.end());
+ }
+
+ unsigned RHSNumProtocols = RHS->getNumProtocols();
+ if (RHSNumProtocols > 0) {
+ ObjCProtocolDecl **RHSProtocols =
+ const_cast<ObjCProtocolDecl **>(RHS->qual_begin());
+ for (unsigned i = 0; i < RHSNumProtocols; ++i)
+ if (InheritedProtocolSet.count(RHSProtocols[i]))
+ IntersectionOfProtocols.push_back(RHSProtocols[i]);
+ } else {
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSInheritedProtocols;
+ Context.CollectInheritedProtocols(RHS->getInterface(),
+ RHSInheritedProtocols);
+ for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
+ RHSInheritedProtocols.begin(),
+ E = RHSInheritedProtocols.end(); I != E; ++I)
+ if (InheritedProtocolSet.count((*I)))
+ IntersectionOfProtocols.push_back((*I));
+ }
+}
+
+/// areCommonBaseCompatible - Returns common base class of the two classes if
+/// one found. Note that this is O'2 algorithm. But it will be called as the
+/// last type comparison in a ?-exp of ObjC pointer types before a
+/// warning is issued. So, its invokation is extremely rare.
+QualType ASTContext::areCommonBaseCompatible(
+ const ObjCObjectPointerType *Lptr,
+ const ObjCObjectPointerType *Rptr) {
+ const ObjCObjectType *LHS = Lptr->getObjectType();
+ const ObjCObjectType *RHS = Rptr->getObjectType();
+ const ObjCInterfaceDecl* LDecl = LHS->getInterface();
+ const ObjCInterfaceDecl* RDecl = RHS->getInterface();
+ if (!LDecl || !RDecl || (declaresSameEntity(LDecl, RDecl)))
+ return QualType();
+
+ do {
+ LHS = cast<ObjCInterfaceType>(getObjCInterfaceType(LDecl));
+ if (canAssignObjCInterfaces(LHS, RHS)) {
+ SmallVector<ObjCProtocolDecl *, 8> Protocols;
+ getIntersectionOfProtocols(*this, Lptr, Rptr, Protocols);
+
+ QualType Result = QualType(LHS, 0);
+ if (!Protocols.empty())
+ Result = getObjCObjectType(Result, Protocols.data(), Protocols.size());
+ Result = getObjCObjectPointerType(Result);
+ return Result;
+ }
+ } while ((LDecl = LDecl->getSuperClass()));
+
+ return QualType();
+}
+
+bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
+ const ObjCObjectType *RHS) {
+ assert(LHS->getInterface() && "LHS is not an interface type");
+ assert(RHS->getInterface() && "RHS is not an interface type");
+
+ // Verify that the base decls are compatible: the RHS must be a subclass of
+ // the LHS.
+ if (!LHS->getInterface()->isSuperClassOf(RHS->getInterface()))
+ return false;
+
+ // RHS must have a superset of the protocols in the LHS. If the LHS is not
+ // protocol qualified at all, then we are good.
+ if (LHS->getNumProtocols() == 0)
+ return true;
+
+ // Okay, we know the LHS has protocol qualifiers. If the RHS doesn't,
+ // more detailed analysis is required.
+ if (RHS->getNumProtocols() == 0) {
+ // OK, if LHS is a superclass of RHS *and*
+ // this superclass is assignment compatible with LHS.
+ // false otherwise.
+ bool IsSuperClass =
+ LHS->getInterface()->isSuperClassOf(RHS->getInterface());
+ if (IsSuperClass) {
+ // OK if conversion of LHS to SuperClass results in narrowing of types
+ // ; i.e., SuperClass may implement at least one of the protocols
+ // in LHS's protocol list. Example, SuperObj<P1> = lhs<P1,P2> is ok.
+ // But not SuperObj<P1,P2,P3> = lhs<P1,P2>.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 8> SuperClassInheritedProtocols;
+ CollectInheritedProtocols(RHS->getInterface(), SuperClassInheritedProtocols);
+ // If super class has no protocols, it is not a match.
+ if (SuperClassInheritedProtocols.empty())
+ return false;
+
+ for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
+ LHSPE = LHS->qual_end();
+ LHSPI != LHSPE; LHSPI++) {
+ bool SuperImplementsProtocol = false;
+ ObjCProtocolDecl *LHSProto = (*LHSPI);
+
+ for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
+ SuperClassInheritedProtocols.begin(),
+ E = SuperClassInheritedProtocols.end(); I != E; ++I) {
+ ObjCProtocolDecl *SuperClassProto = (*I);
+ if (SuperClassProto->lookupProtocolNamed(LHSProto->getIdentifier())) {
+ SuperImplementsProtocol = true;
+ break;
+ }
+ }
+ if (!SuperImplementsProtocol)
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
+ LHSPE = LHS->qual_end();
+ LHSPI != LHSPE; LHSPI++) {
+ bool RHSImplementsProtocol = false;
+
+ // If the RHS doesn't implement the protocol on the left, the types
+ // are incompatible.
+ for (ObjCObjectType::qual_iterator RHSPI = RHS->qual_begin(),
+ RHSPE = RHS->qual_end();
+ RHSPI != RHSPE; RHSPI++) {
+ if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier())) {
+ RHSImplementsProtocol = true;
+ break;
+ }
+ }
+ // FIXME: For better diagnostics, consider passing back the protocol name.
+ if (!RHSImplementsProtocol)
+ return false;
+ }
+ // The RHS implements all protocols listed on the LHS.
+ return true;
+}
+
+bool ASTContext::areComparableObjCPointerTypes(QualType LHS, QualType RHS) {
+ // get the "pointed to" types
+ const ObjCObjectPointerType *LHSOPT = LHS->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *RHSOPT = RHS->getAs<ObjCObjectPointerType>();
+
+ if (!LHSOPT || !RHSOPT)
+ return false;
+
+ return canAssignObjCInterfaces(LHSOPT, RHSOPT) ||
+ canAssignObjCInterfaces(RHSOPT, LHSOPT);
+}
+
+bool ASTContext::canBindObjCObjectType(QualType To, QualType From) {
+ return canAssignObjCInterfaces(
+ getObjCObjectPointerType(To)->getAs<ObjCObjectPointerType>(),
+ getObjCObjectPointerType(From)->getAs<ObjCObjectPointerType>());
+}
+
+/// typesAreCompatible - C99 6.7.3p9: For two qualified types to be compatible,
+/// both shall have the identically qualified version of a compatible type.
+/// C99 6.2.7p1: Two types have compatible types if their types are the
+/// same. See 6.7.[2,3,5] for additional rules.
+bool ASTContext::typesAreCompatible(QualType LHS, QualType RHS,
+ bool CompareUnqualified) {
+ if (getLangOpts().CPlusPlus)
+ return hasSameType(LHS, RHS);
+
+ return !mergeTypes(LHS, RHS, false, CompareUnqualified).isNull();
+}
+
+bool ASTContext::propertyTypesAreCompatible(QualType LHS, QualType RHS) {
+ return typesAreCompatible(LHS, RHS);
+}
+
+bool ASTContext::typesAreBlockPointerCompatible(QualType LHS, QualType RHS) {
+ return !mergeTypes(LHS, RHS, true).isNull();
+}
+
+/// mergeTransparentUnionType - if T is a transparent union type and a member
+/// of T is compatible with SubType, return the merged type, else return
+/// QualType()
+QualType ASTContext::mergeTransparentUnionType(QualType T, QualType SubType,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ if (const RecordType *UT = T->getAsUnionType()) {
+ RecordDecl *UD = UT->getDecl();
+ if (UD->hasAttr<TransparentUnionAttr>()) {
+ for (RecordDecl::field_iterator it = UD->field_begin(),
+ itend = UD->field_end(); it != itend; ++it) {
+ QualType ET = it->getType().getUnqualifiedType();
+ QualType MT = mergeTypes(ET, SubType, OfBlockPointer, Unqualified);
+ if (!MT.isNull())
+ return MT;
+ }
+ }
+ }
+
+ return QualType();
+}
+
+/// mergeFunctionArgumentTypes - merge two types which appear as function
+/// argument types
+QualType ASTContext::mergeFunctionArgumentTypes(QualType lhs, QualType rhs,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ // GNU extension: two types are compatible if they appear as a function
+ // argument, one of the types is a transparent union type and the other
+ // type is compatible with a union member
+ QualType lmerge = mergeTransparentUnionType(lhs, rhs, OfBlockPointer,
+ Unqualified);
+ if (!lmerge.isNull())
+ return lmerge;
+
+ QualType rmerge = mergeTransparentUnionType(rhs, lhs, OfBlockPointer,
+ Unqualified);
+ if (!rmerge.isNull())
+ return rmerge;
+
+ return mergeTypes(lhs, rhs, OfBlockPointer, Unqualified);
+}
+
+QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
+ bool OfBlockPointer,
+ bool Unqualified) {
+ const FunctionType *lbase = lhs->getAs<FunctionType>();
+ const FunctionType *rbase = rhs->getAs<FunctionType>();
+ const FunctionProtoType *lproto = dyn_cast<FunctionProtoType>(lbase);
+ const FunctionProtoType *rproto = dyn_cast<FunctionProtoType>(rbase);
+ bool allLTypes = true;
+ bool allRTypes = true;
+
+ // Check return type
+ QualType retType;
+ if (OfBlockPointer) {
+ QualType RHS = rbase->getResultType();
+ QualType LHS = lbase->getResultType();
+ bool UnqualifiedResult = Unqualified;
+ if (!UnqualifiedResult)
+ UnqualifiedResult = (!RHS.hasQualifiers() && LHS.hasQualifiers());
+ retType = mergeTypes(LHS, RHS, true, UnqualifiedResult, true);
+ }
+ else
+ retType = mergeTypes(lbase->getResultType(), rbase->getResultType(), false,
+ Unqualified);
+ if (retType.isNull()) return QualType();
+
+ if (Unqualified)
+ retType = retType.getUnqualifiedType();
+
+ CanQualType LRetType = getCanonicalType(lbase->getResultType());
+ CanQualType RRetType = getCanonicalType(rbase->getResultType());
+ if (Unqualified) {
+ LRetType = LRetType.getUnqualifiedType();
+ RRetType = RRetType.getUnqualifiedType();
+ }
+
+ if (getCanonicalType(retType) != LRetType)
+ allLTypes = false;
+ if (getCanonicalType(retType) != RRetType)
+ allRTypes = false;
+
+ // FIXME: double check this
+ // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
+ // rbase->getRegParmAttr() != 0 &&
+ // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
+ FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
+ FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
+
+ // Compatible functions must have compatible calling conventions
+ if (!isSameCallConv(lbaseInfo.getCC(), rbaseInfo.getCC()))
+ return QualType();
+
+ // Regparm is part of the calling convention.
+ if (lbaseInfo.getHasRegParm() != rbaseInfo.getHasRegParm())
+ return QualType();
+ if (lbaseInfo.getRegParm() != rbaseInfo.getRegParm())
+ return QualType();
+
+ if (lbaseInfo.getProducesResult() != rbaseInfo.getProducesResult())
+ return QualType();
+
+ // functypes which return are preferred over those that do not.
+ if (lbaseInfo.getNoReturn() && !rbaseInfo.getNoReturn())
+ allLTypes = false;
+ else if (!lbaseInfo.getNoReturn() && rbaseInfo.getNoReturn())
+ allRTypes = false;
+ // FIXME: some uses, e.g. conditional exprs, really want this to be 'both'.
+ bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
+
+ FunctionType::ExtInfo einfo = lbaseInfo.withNoReturn(NoReturn);
+
+ if (lproto && rproto) { // two C99 style function prototypes
+ assert(!lproto->hasExceptionSpec() && !rproto->hasExceptionSpec() &&
+ "C++ shouldn't be here");
+ unsigned lproto_nargs = lproto->getNumArgs();
+ unsigned rproto_nargs = rproto->getNumArgs();
+
+ // Compatible functions must have the same number of arguments
+ if (lproto_nargs != rproto_nargs)
+ return QualType();
+
+ // Variadic and non-variadic functions aren't compatible
+ if (lproto->isVariadic() != rproto->isVariadic())
+ return QualType();
+
+ if (lproto->getTypeQuals() != rproto->getTypeQuals())
+ return QualType();
+
+ if (LangOpts.ObjCAutoRefCount &&
+ !FunctionTypesMatchOnNSConsumedAttrs(rproto, lproto))
+ return QualType();
+
+ // Check argument compatibility
+ SmallVector<QualType, 10> types;
+ for (unsigned i = 0; i < lproto_nargs; i++) {
+ QualType largtype = lproto->getArgType(i).getUnqualifiedType();
+ QualType rargtype = rproto->getArgType(i).getUnqualifiedType();
+ QualType argtype = mergeFunctionArgumentTypes(largtype, rargtype,
+ OfBlockPointer,
+ Unqualified);
+ if (argtype.isNull()) return QualType();
+
+ if (Unqualified)
+ argtype = argtype.getUnqualifiedType();
+
+ types.push_back(argtype);
+ if (Unqualified) {
+ largtype = largtype.getUnqualifiedType();
+ rargtype = rargtype.getUnqualifiedType();
+ }
+
+ if (getCanonicalType(argtype) != getCanonicalType(largtype))
+ allLTypes = false;
+ if (getCanonicalType(argtype) != getCanonicalType(rargtype))
+ allRTypes = false;
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+
+ FunctionProtoType::ExtProtoInfo EPI = lproto->getExtProtoInfo();
+ EPI.ExtInfo = einfo;
+ return getFunctionType(retType, types.begin(), types.size(), EPI);
+ }
+
+ if (lproto) allRTypes = false;
+ if (rproto) allLTypes = false;
+
+ const FunctionProtoType *proto = lproto ? lproto : rproto;
+ if (proto) {
+ assert(!proto->hasExceptionSpec() && "C++ shouldn't be here");
+ if (proto->isVariadic()) return QualType();
+ // Check that the types are compatible with the types that
+ // would result from default argument promotions (C99 6.7.5.3p15).
+ // The only types actually affected are promotable integer
+ // types and floats, which would be passed as a different
+ // type depending on whether the prototype is visible.
+ unsigned proto_nargs = proto->getNumArgs();
+ for (unsigned i = 0; i < proto_nargs; ++i) {
+ QualType argTy = proto->getArgType(i);
+
+ // Look at the promotion type of enum types, since that is the type used
+ // to pass enum values.
+ if (const EnumType *Enum = argTy->getAs<EnumType>())
+ argTy = Enum->getDecl()->getPromotionType();
+
+ if (argTy->isPromotableIntegerType() ||
+ getCanonicalType(argTy).getUnqualifiedType() == FloatTy)
+ return QualType();
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+
+ FunctionProtoType::ExtProtoInfo EPI = proto->getExtProtoInfo();
+ EPI.ExtInfo = einfo;
+ return getFunctionType(retType, proto->arg_type_begin(),
+ proto->getNumArgs(), EPI);
+ }
+
+ if (allLTypes) return lhs;
+ if (allRTypes) return rhs;
+ return getFunctionNoProtoType(retType, einfo);
+}
+
+QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
+ bool OfBlockPointer,
+ bool Unqualified, bool BlockReturnType) {
+ // C++ [expr]: If an expression initially has the type "reference to T", the
+ // type is adjusted to "T" prior to any further analysis, the expression
+ // designates the object or function denoted by the reference, and the
+ // expression is an lvalue unless the reference is an rvalue reference and
+ // the expression is a function call (possibly inside parentheses).
+ assert(!LHS->getAs<ReferenceType>() && "LHS is a reference type?");
+ assert(!RHS->getAs<ReferenceType>() && "RHS is a reference type?");
+
+ if (Unqualified) {
+ LHS = LHS.getUnqualifiedType();
+ RHS = RHS.getUnqualifiedType();
+ }
+
+ QualType LHSCan = getCanonicalType(LHS),
+ RHSCan = getCanonicalType(RHS);
+
+ // If two types are identical, they are compatible.
+ if (LHSCan == RHSCan)
+ return LHS;
+
+ // If the qualifiers are different, the types aren't compatible... mostly.
+ Qualifiers LQuals = LHSCan.getLocalQualifiers();
+ Qualifiers RQuals = RHSCan.getLocalQualifiers();
+ if (LQuals != RQuals) {
+ // If any of these qualifiers are different, we have a type
+ // mismatch.
+ if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
+ LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
+ LQuals.getObjCLifetime() != RQuals.getObjCLifetime())
+ return QualType();
+
+ // Exactly one GC qualifier difference is allowed: __strong is
+ // okay if the other type has no GC qualifier but is an Objective
+ // C object pointer (i.e. implicitly strong by default). We fix
+ // this by pretending that the unqualified type was actually
+ // qualified __strong.
+ Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
+ Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
+ assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
+
+ if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
+ return QualType();
+
+ if (GC_L == Qualifiers::Strong && RHSCan->isObjCObjectPointerType()) {
+ return mergeTypes(LHS, getObjCGCQualType(RHS, Qualifiers::Strong));
+ }
+ if (GC_R == Qualifiers::Strong && LHSCan->isObjCObjectPointerType()) {
+ return mergeTypes(getObjCGCQualType(LHS, Qualifiers::Strong), RHS);
+ }
+ return QualType();
+ }
+
+ // Okay, qualifiers are equal.
+
+ Type::TypeClass LHSClass = LHSCan->getTypeClass();
+ Type::TypeClass RHSClass = RHSCan->getTypeClass();
+
+ // We want to consider the two function types to be the same for these
+ // comparisons, just force one to the other.
+ if (LHSClass == Type::FunctionProto) LHSClass = Type::FunctionNoProto;
+ if (RHSClass == Type::FunctionProto) RHSClass = Type::FunctionNoProto;
+
+ // Same as above for arrays
+ if (LHSClass == Type::VariableArray || LHSClass == Type::IncompleteArray)
+ LHSClass = Type::ConstantArray;
+ if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
+ RHSClass = Type::ConstantArray;
+
+ // ObjCInterfaces are just specialized ObjCObjects.
+ if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
+ if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
+
+ // Canonicalize ExtVector -> Vector.
+ if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
+ if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
+
+ // If the canonical type classes don't match.
+ if (LHSClass != RHSClass) {
+ // C99 6.7.2.2p4: Each enumerated type shall be compatible with char,
+ // a signed integer type, or an unsigned integer type.
+ // Compatibility is based on the underlying type, not the promotion
+ // type.
+ if (const EnumType* ETy = LHS->getAs<EnumType>()) {
+ QualType TINT = ETy->getDecl()->getIntegerType();
+ if (!TINT.isNull() && hasSameType(TINT, RHSCan.getUnqualifiedType()))
+ return RHS;
+ }
+ if (const EnumType* ETy = RHS->getAs<EnumType>()) {
+ QualType TINT = ETy->getDecl()->getIntegerType();
+ if (!TINT.isNull() && hasSameType(TINT, LHSCan.getUnqualifiedType()))
+ return LHS;
+ }
+ // allow block pointer type to match an 'id' type.
+ if (OfBlockPointer && !BlockReturnType) {
+ if (LHS->isObjCIdType() && RHS->isBlockPointerType())
+ return LHS;
+ if (RHS->isObjCIdType() && LHS->isBlockPointerType())
+ return RHS;
+ }
+
+ return QualType();
+ }
+
+ // The canonical type classes match.
+ switch (LHSClass) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ llvm_unreachable("C++ should never be in mergeTypes");
+
+ case Type::ObjCInterface:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::FunctionProto:
+ case Type::ExtVector:
+ llvm_unreachable("Types are eliminated above");
+
+ case Type::Pointer:
+ {
+ // Merge two pointer types, while trying to preserve typedef info
+ QualType LHSPointee = LHS->getAs<PointerType>()->getPointeeType();
+ QualType RHSPointee = RHS->getAs<PointerType>()->getPointeeType();
+ if (Unqualified) {
+ LHSPointee = LHSPointee.getUnqualifiedType();
+ RHSPointee = RHSPointee.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSPointee, RHSPointee, false,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
+ return RHS;
+ return getPointerType(ResultType);
+ }
+ case Type::BlockPointer:
+ {
+ // Merge two block pointer types, while trying to preserve typedef info
+ QualType LHSPointee = LHS->getAs<BlockPointerType>()->getPointeeType();
+ QualType RHSPointee = RHS->getAs<BlockPointerType>()->getPointeeType();
+ if (Unqualified) {
+ LHSPointee = LHSPointee.getUnqualifiedType();
+ RHSPointee = RHSPointee.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSPointee) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSPointee) == getCanonicalType(ResultType))
+ return RHS;
+ return getBlockPointerType(ResultType);
+ }
+ case Type::Atomic:
+ {
+ // Merge two pointer types, while trying to preserve typedef info
+ QualType LHSValue = LHS->getAs<AtomicType>()->getValueType();
+ QualType RHSValue = RHS->getAs<AtomicType>()->getValueType();
+ if (Unqualified) {
+ LHSValue = LHSValue.getUnqualifiedType();
+ RHSValue = RHSValue.getUnqualifiedType();
+ }
+ QualType ResultType = mergeTypes(LHSValue, RHSValue, false,
+ Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (getCanonicalType(LHSValue) == getCanonicalType(ResultType))
+ return LHS;
+ if (getCanonicalType(RHSValue) == getCanonicalType(ResultType))
+ return RHS;
+ return getAtomicType(ResultType);
+ }
+ case Type::ConstantArray:
+ {
+ const ConstantArrayType* LCAT = getAsConstantArrayType(LHS);
+ const ConstantArrayType* RCAT = getAsConstantArrayType(RHS);
+ if (LCAT && RCAT && RCAT->getSize() != LCAT->getSize())
+ return QualType();
+
+ QualType LHSElem = getAsArrayType(LHS)->getElementType();
+ QualType RHSElem = getAsArrayType(RHS)->getElementType();
+ if (Unqualified) {
+ LHSElem = LHSElem.getUnqualifiedType();
+ RHSElem = RHSElem.getUnqualifiedType();
+ }
+
+ QualType ResultType = mergeTypes(LHSElem, RHSElem, false, Unqualified);
+ if (ResultType.isNull()) return QualType();
+ if (LCAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
+ return LHS;
+ if (RCAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
+ return RHS;
+ if (LCAT) return getConstantArrayType(ResultType, LCAT->getSize(),
+ ArrayType::ArraySizeModifier(), 0);
+ if (RCAT) return getConstantArrayType(ResultType, RCAT->getSize(),
+ ArrayType::ArraySizeModifier(), 0);
+ const VariableArrayType* LVAT = getAsVariableArrayType(LHS);
+ const VariableArrayType* RVAT = getAsVariableArrayType(RHS);
+ if (LVAT && getCanonicalType(LHSElem) == getCanonicalType(ResultType))
+ return LHS;
+ if (RVAT && getCanonicalType(RHSElem) == getCanonicalType(ResultType))
+ return RHS;
+ if (LVAT) {
+ // FIXME: This isn't correct! But tricky to implement because
+ // the array's size has to be the size of LHS, but the type
+ // has to be different.
+ return LHS;
+ }
+ if (RVAT) {
+ // FIXME: This isn't correct! But tricky to implement because
+ // the array's size has to be the size of RHS, but the type
+ // has to be different.
+ return RHS;
+ }
+ if (getCanonicalType(LHSElem) == getCanonicalType(ResultType)) return LHS;
+ if (getCanonicalType(RHSElem) == getCanonicalType(ResultType)) return RHS;
+ return getIncompleteArrayType(ResultType,
+ ArrayType::ArraySizeModifier(), 0);
+ }
+ case Type::FunctionNoProto:
+ return mergeFunctionTypes(LHS, RHS, OfBlockPointer, Unqualified);
+ case Type::Record:
+ case Type::Enum:
+ return QualType();
+ case Type::Builtin:
+ // Only exactly equal builtin types are compatible, which is tested above.
+ return QualType();
+ case Type::Complex:
+ // Distinct complex types are incompatible.
+ return QualType();
+ case Type::Vector:
+ // FIXME: The merged type should be an ExtVector!
+ if (areCompatVectorTypes(LHSCan->getAs<VectorType>(),
+ RHSCan->getAs<VectorType>()))
+ return LHS;
+ return QualType();
+ case Type::ObjCObject: {
+ // Check if the types are assignment compatible.
+ // FIXME: This should be type compatibility, e.g. whether
+ // "LHS x; RHS x;" at global scope is legal.
+ const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>();
+ const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>();
+ if (canAssignObjCInterfaces(LHSIface, RHSIface))
+ return LHS;
+
+ return QualType();
+ }
+ case Type::ObjCObjectPointer: {
+ if (OfBlockPointer) {
+ if (canAssignObjCInterfacesInBlockPointer(
+ LHS->getAs<ObjCObjectPointerType>(),
+ RHS->getAs<ObjCObjectPointerType>(),
+ BlockReturnType))
+ return LHS;
+ return QualType();
+ }
+ if (canAssignObjCInterfaces(LHS->getAs<ObjCObjectPointerType>(),
+ RHS->getAs<ObjCObjectPointerType>()))
+ return LHS;
+
+ return QualType();
+ }
+ }
+
+ llvm_unreachable("Invalid Type::Class!");
+}
+
+bool ASTContext::FunctionTypesMatchOnNSConsumedAttrs(
+ const FunctionProtoType *FromFunctionType,
+ const FunctionProtoType *ToFunctionType) {
+ if (FromFunctionType->hasAnyConsumedArgs() !=
+ ToFunctionType->hasAnyConsumedArgs())
+ return false;
+ FunctionProtoType::ExtProtoInfo FromEPI =
+ FromFunctionType->getExtProtoInfo();
+ FunctionProtoType::ExtProtoInfo ToEPI =
+ ToFunctionType->getExtProtoInfo();
+ if (FromEPI.ConsumedArguments && ToEPI.ConsumedArguments)
+ for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
+ ArgIdx != NumArgs; ++ArgIdx) {
+ if (FromEPI.ConsumedArguments[ArgIdx] !=
+ ToEPI.ConsumedArguments[ArgIdx])
+ return false;
+ }
+ return true;
+}
+
+/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
+/// 'RHS' attributes and returns the merged version; including for function
+/// return types.
+QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
+ QualType LHSCan = getCanonicalType(LHS),
+ RHSCan = getCanonicalType(RHS);
+ // If two types are identical, they are compatible.
+ if (LHSCan == RHSCan)
+ return LHS;
+ if (RHSCan->isFunctionType()) {
+ if (!LHSCan->isFunctionType())
+ return QualType();
+ QualType OldReturnType =
+ cast<FunctionType>(RHSCan.getTypePtr())->getResultType();
+ QualType NewReturnType =
+ cast<FunctionType>(LHSCan.getTypePtr())->getResultType();
+ QualType ResReturnType =
+ mergeObjCGCQualifiers(NewReturnType, OldReturnType);
+ if (ResReturnType.isNull())
+ return QualType();
+ if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
+ // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
+ // In either case, use OldReturnType to build the new function type.
+ const FunctionType *F = LHS->getAs<FunctionType>();
+ if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.ExtInfo = getFunctionExtInfo(LHS);
+ QualType ResultType
+ = getFunctionType(OldReturnType, FPT->arg_type_begin(),
+ FPT->getNumArgs(), EPI);
+ return ResultType;
+ }
+ }
+ return QualType();
+ }
+
+ // If the qualifiers are different, the types can still be merged.
+ Qualifiers LQuals = LHSCan.getLocalQualifiers();
+ Qualifiers RQuals = RHSCan.getLocalQualifiers();
+ if (LQuals != RQuals) {
+ // If any of these qualifiers are different, we have a type mismatch.
+ if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
+ LQuals.getAddressSpace() != RQuals.getAddressSpace())
+ return QualType();
+
+ // Exactly one GC qualifier difference is allowed: __strong is
+ // okay if the other type has no GC qualifier but is an Objective
+ // C object pointer (i.e. implicitly strong by default). We fix
+ // this by pretending that the unqualified type was actually
+ // qualified __strong.
+ Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
+ Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
+ assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
+
+ if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
+ return QualType();
+
+ if (GC_L == Qualifiers::Strong)
+ return LHS;
+ if (GC_R == Qualifiers::Strong)
+ return RHS;
+ return QualType();
+ }
+
+ if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
+ QualType LHSBaseQT = LHS->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType RHSBaseQT = RHS->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT);
+ if (ResQT == LHSBaseQT)
+ return LHS;
+ if (ResQT == RHSBaseQT)
+ return RHS;
+ }
+ return QualType();
+}
+
+//===----------------------------------------------------------------------===//
+// Integer Predicates
+//===----------------------------------------------------------------------===//
+
+unsigned ASTContext::getIntWidth(QualType T) const {
+ if (const EnumType *ET = dyn_cast<EnumType>(T))
+ T = ET->getDecl()->getIntegerType();
+ if (T->isBooleanType())
+ return 1;
+ // For builtin types, just use the standard type sizing method
+ return (unsigned)getTypeSize(T);
+}
+
+QualType ASTContext::getCorrespondingUnsignedType(QualType T) {
+ assert(T->hasSignedIntegerRepresentation() && "Unexpected type");
+
+ // Turn <4 x signed int> -> <4 x unsigned int>
+ if (const VectorType *VTy = T->getAs<VectorType>())
+ return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
+ VTy->getNumElements(), VTy->getVectorKind());
+
+ // For enums, we return the unsigned version of the base type.
+ if (const EnumType *ETy = T->getAs<EnumType>())
+ T = ETy->getDecl()->getIntegerType();
+
+ const BuiltinType *BTy = T->getAs<BuiltinType>();
+ assert(BTy && "Unexpected signed integer type");
+ switch (BTy->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return UnsignedCharTy;
+ case BuiltinType::Short:
+ return UnsignedShortTy;
+ case BuiltinType::Int:
+ return UnsignedIntTy;
+ case BuiltinType::Long:
+ return UnsignedLongTy;
+ case BuiltinType::LongLong:
+ return UnsignedLongLongTy;
+ case BuiltinType::Int128:
+ return UnsignedInt128Ty;
+ default:
+ llvm_unreachable("Unexpected signed integer type");
+ }
+}
+
+ASTMutationListener::~ASTMutationListener() { }
+
+
+//===----------------------------------------------------------------------===//
+// Builtin Type Computation
+//===----------------------------------------------------------------------===//
+
+/// DecodeTypeFromStr - This decodes one type descriptor from Str, advancing the
+/// pointer over the consumed characters. This returns the resultant type. If
+/// AllowTypeModifiers is false then modifier like * are not parsed, just basic
+/// types. This allows "v2i*" to be parsed as a pointer to a v2i instead of
+/// a vector of "i*".
+///
+/// RequiresICE is filled in on return to indicate whether the value is required
+/// to be an Integer Constant Expression.
+static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
+ ASTContext::GetBuiltinTypeError &Error,
+ bool &RequiresICE,
+ bool AllowTypeModifiers) {
+ // Modifiers.
+ int HowLong = 0;
+ bool Signed = false, Unsigned = false;
+ RequiresICE = false;
+
+ // Read the prefixed modifiers first.
+ bool Done = false;
+ while (!Done) {
+ switch (*Str++) {
+ default: Done = true; --Str; break;
+ case 'I':
+ RequiresICE = true;
+ break;
+ case 'S':
+ assert(!Unsigned && "Can't use both 'S' and 'U' modifiers!");
+ assert(!Signed && "Can't use 'S' modifier multiple times!");
+ Signed = true;
+ break;
+ case 'U':
+ assert(!Signed && "Can't use both 'S' and 'U' modifiers!");
+ assert(!Unsigned && "Can't use 'S' modifier multiple times!");
+ Unsigned = true;
+ break;
+ case 'L':
+ assert(HowLong <= 2 && "Can't have LLLL modifier");
+ ++HowLong;
+ break;
+ }
+ }
+
+ QualType Type;
+
+ // Read the base type.
+ switch (*Str++) {
+ default: llvm_unreachable("Unknown builtin type letter!");
+ case 'v':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'v'!");
+ Type = Context.VoidTy;
+ break;
+ case 'f':
+ assert(HowLong == 0 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'f'!");
+ Type = Context.FloatTy;
+ break;
+ case 'd':
+ assert(HowLong < 2 && !Signed && !Unsigned &&
+ "Bad modifiers used with 'd'!");
+ if (HowLong)
+ Type = Context.LongDoubleTy;
+ else
+ Type = Context.DoubleTy;
+ break;
+ case 's':
+ assert(HowLong == 0 && "Bad modifiers used with 's'!");
+ if (Unsigned)
+ Type = Context.UnsignedShortTy;
+ else
+ Type = Context.ShortTy;
+ break;
+ case 'i':
+ if (HowLong == 3)
+ Type = Unsigned ? Context.UnsignedInt128Ty : Context.Int128Ty;
+ else if (HowLong == 2)
+ Type = Unsigned ? Context.UnsignedLongLongTy : Context.LongLongTy;
+ else if (HowLong == 1)
+ Type = Unsigned ? Context.UnsignedLongTy : Context.LongTy;
+ else
+ Type = Unsigned ? Context.UnsignedIntTy : Context.IntTy;
+ break;
+ case 'c':
+ assert(HowLong == 0 && "Bad modifiers used with 'c'!");
+ if (Signed)
+ Type = Context.SignedCharTy;
+ else if (Unsigned)
+ Type = Context.UnsignedCharTy;
+ else
+ Type = Context.CharTy;
+ break;
+ case 'b': // boolean
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'b'!");
+ Type = Context.BoolTy;
+ break;
+ case 'z': // size_t.
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'z'!");
+ Type = Context.getSizeType();
+ break;
+ case 'F':
+ Type = Context.getCFConstantStringType();
+ break;
+ case 'G':
+ Type = Context.getObjCIdType();
+ break;
+ case 'H':
+ Type = Context.getObjCSelType();
+ break;
+ case 'a':
+ Type = Context.getBuiltinVaListType();
+ assert(!Type.isNull() && "builtin va list type not initialized!");
+ break;
+ case 'A':
+ // This is a "reference" to a va_list; however, what exactly
+ // this means depends on how va_list is defined. There are two
+ // different kinds of va_list: ones passed by value, and ones
+ // passed by reference. An example of a by-value va_list is
+ // x86, where va_list is a char*. An example of by-ref va_list
+ // is x86-64, where va_list is a __va_list_tag[1]. For x86,
+ // we want this argument to be a char*&; for x86-64, we want
+ // it to be a __va_list_tag*.
+ Type = Context.getBuiltinVaListType();
+ assert(!Type.isNull() && "builtin va list type not initialized!");
+ if (Type->isArrayType())
+ Type = Context.getArrayDecayedType(Type);
+ else
+ Type = Context.getLValueReferenceType(Type);
+ break;
+ case 'V': {
+ char *End;
+ unsigned NumElements = strtoul(Str, &End, 10);
+ assert(End != Str && "Missing vector size");
+ Str = End;
+
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error,
+ RequiresICE, false);
+ assert(!RequiresICE && "Can't require vector ICE");
+
+ // TODO: No way to make AltiVec vectors in builtins yet.
+ Type = Context.getVectorType(ElementType, NumElements,
+ VectorType::GenericVector);
+ break;
+ }
+ case 'X': {
+ QualType ElementType = DecodeTypeFromStr(Str, Context, Error, RequiresICE,
+ false);
+ assert(!RequiresICE && "Can't require complex ICE");
+ Type = Context.getComplexType(ElementType);
+ break;
+ }
+ case 'Y' : {
+ Type = Context.getPointerDiffType();
+ break;
+ }
+ case 'P':
+ Type = Context.getFILEType();
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_stdio;
+ return QualType();
+ }
+ break;
+ case 'J':
+ if (Signed)
+ Type = Context.getsigjmp_bufType();
+ else
+ Type = Context.getjmp_bufType();
+
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_setjmp;
+ return QualType();
+ }
+ break;
+ case 'K':
+ assert(HowLong == 0 && !Signed && !Unsigned && "Bad modifiers for 'K'!");
+ Type = Context.getucontext_tType();
+
+ if (Type.isNull()) {
+ Error = ASTContext::GE_Missing_ucontext;
+ return QualType();
+ }
+ break;
+ }
+
+ // If there are modifiers and if we're allowed to parse them, go for it.
+ Done = !AllowTypeModifiers;
+ while (!Done) {
+ switch (char c = *Str++) {
+ default: Done = true; --Str; break;
+ case '*':
+ case '&': {
+ // Both pointers and references can have their pointee types
+ // qualified with an address space.
+ char *End;
+ unsigned AddrSpace = strtoul(Str, &End, 10);
+ if (End != Str && AddrSpace != 0) {
+ Type = Context.getAddrSpaceQualType(Type, AddrSpace);
+ Str = End;
+ }
+ if (c == '*')
+ Type = Context.getPointerType(Type);
+ else
+ Type = Context.getLValueReferenceType(Type);
+ break;
+ }
+ // FIXME: There's no way to have a built-in with an rvalue ref arg.
+ case 'C':
+ Type = Type.withConst();
+ break;
+ case 'D':
+ Type = Context.getVolatileType(Type);
+ break;
+ case 'R':
+ Type = Type.withRestrict();
+ break;
+ }
+ }
+
+ assert((!RequiresICE || Type->isIntegralOrEnumerationType()) &&
+ "Integer constant 'I' type must be an integer");
+
+ return Type;
+}
+
+/// GetBuiltinType - Return the type for the specified builtin.
+QualType ASTContext::GetBuiltinType(unsigned Id,
+ GetBuiltinTypeError &Error,
+ unsigned *IntegerConstantArgs) const {
+ const char *TypeStr = BuiltinInfo.GetTypeString(Id);
+
+ SmallVector<QualType, 8> ArgTypes;
+
+ bool RequiresICE = false;
+ Error = GE_None;
+ QualType ResType = DecodeTypeFromStr(TypeStr, *this, Error,
+ RequiresICE, true);
+ if (Error != GE_None)
+ return QualType();
+
+ assert(!RequiresICE && "Result of intrinsic cannot be required to be an ICE");
+
+ while (TypeStr[0] && TypeStr[0] != '.') {
+ QualType Ty = DecodeTypeFromStr(TypeStr, *this, Error, RequiresICE, true);
+ if (Error != GE_None)
+ return QualType();
+
+ // If this argument is required to be an IntegerConstantExpression and the
+ // caller cares, fill in the bitmask we return.
+ if (RequiresICE && IntegerConstantArgs)
+ *IntegerConstantArgs |= 1 << ArgTypes.size();
+
+ // Do array -> pointer decay. The builtin should use the decayed type.
+ if (Ty->isArrayType())
+ Ty = getArrayDecayedType(Ty);
+
+ ArgTypes.push_back(Ty);
+ }
+
+ assert((TypeStr[0] != '.' || TypeStr[1] == 0) &&
+ "'.' should only occur at end of builtin type list!");
+
+ FunctionType::ExtInfo EI;
+ if (BuiltinInfo.isNoReturn(Id)) EI = EI.withNoReturn(true);
+
+ bool Variadic = (TypeStr[0] == '.');
+
+ // We really shouldn't be making a no-proto type here, especially in C++.
+ if (ArgTypes.empty() && Variadic)
+ return getFunctionNoProtoType(ResType, EI);
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = EI;
+ EPI.Variadic = Variadic;
+
+ return getFunctionType(ResType, ArgTypes.data(), ArgTypes.size(), EPI);
+}
+
+GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) {
+ GVALinkage External = GVA_StrongExternal;
+
+ Linkage L = FD->getLinkage();
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return GVA_Internal;
+
+ case ExternalLinkage:
+ switch (FD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ External = GVA_StrongExternal;
+ break;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return GVA_ExplicitTemplateInstantiation;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ImplicitInstantiation:
+ External = GVA_TemplateInstantiation;
+ break;
+ }
+ }
+
+ if (!FD->isInlined())
+ return External;
+
+ if (!getLangOpts().CPlusPlus || FD->hasAttr<GNUInlineAttr>()) {
+ // GNU or C99 inline semantics. Determine whether this symbol should be
+ // externally visible.
+ if (FD->isInlineDefinitionExternallyVisible())
+ return External;
+
+ // C99 inline semantics, where the symbol is not externally visible.
+ return GVA_C99Inline;
+ }
+
+ // C++0x [temp.explicit]p9:
+ // [ Note: The intent is that an inline function that is the subject of
+ // an explicit instantiation declaration will still be implicitly
+ // instantiated when used so that the body can be considered for
+ // inlining, but that no out-of-line copy of the inline function would be
+ // generated in the translation unit. -- end note ]
+ if (FD->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration)
+ return GVA_C99Inline;
+
+ return GVA_CXXInline;
+}
+
+GVALinkage ASTContext::GetGVALinkageForVariable(const VarDecl *VD) {
+ // If this is a static data member, compute the kind of template
+ // specialization. Otherwise, this variable is not part of a
+ // template.
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+ if (VD->isStaticDataMember())
+ TSK = VD->getTemplateSpecializationKind();
+
+ Linkage L = VD->getLinkage();
+ if (L == ExternalLinkage && getLangOpts().CPlusPlus &&
+ VD->getType()->getLinkage() == UniqueExternalLinkage)
+ L = UniqueExternalLinkage;
+
+ switch (L) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return GVA_Internal;
+
+ case ExternalLinkage:
+ switch (TSK) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return GVA_StrongExternal;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ llvm_unreachable("Variable should not be instantiated");
+ // Fall through to treat this like any other instantiation.
+
+ case TSK_ExplicitInstantiationDefinition:
+ return GVA_ExplicitTemplateInstantiation;
+
+ case TSK_ImplicitInstantiation:
+ return GVA_TemplateInstantiation;
+ }
+ }
+
+ llvm_unreachable("Invalid Linkage!");
+}
+
+bool ASTContext::DeclMustBeEmitted(const Decl *D) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->isFileVarDecl())
+ return false;
+ } else if (!isa<FunctionDecl>(D))
+ return false;
+
+ // Weak references don't produce any output by themselves.
+ if (D->hasAttr<WeakRefAttr>())
+ return false;
+
+ // Aliases and used decls are required.
+ if (D->hasAttr<AliasAttr>() || D->hasAttr<UsedAttr>())
+ return true;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Forward declarations aren't required.
+ if (!FD->doesThisDeclarationHaveABody())
+ return FD->doesDeclarationForceExternallyVisibleDefinition();
+
+ // Constructors and destructors are required.
+ if (FD->hasAttr<ConstructorAttr>() || FD->hasAttr<DestructorAttr>())
+ return true;
+
+ // The key function for a class is required.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ const CXXRecordDecl *RD = MD->getParent();
+ if (MD->isOutOfLine() && RD->isDynamicClass()) {
+ const CXXMethodDecl *KeyFunc = getKeyFunction(RD);
+ if (KeyFunc && KeyFunc->getCanonicalDecl() == MD->getCanonicalDecl())
+ return true;
+ }
+ }
+
+ GVALinkage Linkage = GetGVALinkageForFunction(FD);
+
+ // static, static inline, always_inline, and extern inline functions can
+ // always be deferred. Normal inline functions can be deferred in C99/C++.
+ // Implicit template instantiations can also be deferred in C++.
+ if (Linkage == GVA_Internal || Linkage == GVA_C99Inline ||
+ Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
+ return false;
+ return true;
+ }
+
+ const VarDecl *VD = cast<VarDecl>(D);
+ assert(VD->isFileVarDecl() && "Expected file scoped var");
+
+ if (VD->isThisDeclarationADefinition() == VarDecl::DeclarationOnly)
+ return false;
+
+ // Structs that have non-trivial constructors or destructors are required.
+
+ // FIXME: Handle references.
+ // FIXME: Be more selective about which constructors we care about.
+ if (const RecordType *RT = VD->getType()->getAs<RecordType>()) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (RD->hasDefinition() && !(RD->hasTrivialDefaultConstructor() &&
+ RD->hasTrivialCopyConstructor() &&
+ RD->hasTrivialMoveConstructor() &&
+ RD->hasTrivialDestructor()))
+ return true;
+ }
+ }
+
+ GVALinkage L = GetGVALinkageForVariable(VD);
+ if (L == GVA_Internal || L == GVA_TemplateInstantiation) {
+ if (!(VD->getInit() && VD->getInit()->HasSideEffects(*this)))
+ return false;
+ }
+
+ return true;
+}
+
+CallingConv ASTContext::getDefaultMethodCallConv() {
+ // Pass through to the C++ ABI object
+ return ABI->getDefaultMethodCallConv();
+}
+
+bool ASTContext::isNearlyEmpty(const CXXRecordDecl *RD) const {
+ // Pass through to the C++ ABI object
+ return ABI->isNearlyEmpty(RD);
+}
+
+MangleContext *ASTContext::createMangleContext() {
+ switch (Target->getCXXABI()) {
+ case CXXABI_ARM:
+ case CXXABI_Itanium:
+ return createItaniumMangleContext(*this, getDiagnostics());
+ case CXXABI_Microsoft:
+ return createMicrosoftMangleContext(*this, getDiagnostics());
+ }
+ llvm_unreachable("Unsupported ABI");
+}
+
+CXXABI::~CXXABI() {}
+
+size_t ASTContext::getSideTableAllocatedMemory() const {
+ return ASTRecordLayouts.getMemorySize()
+ + llvm::capacity_in_bytes(ObjCLayouts)
+ + llvm::capacity_in_bytes(KeyFunctions)
+ + llvm::capacity_in_bytes(ObjCImpls)
+ + llvm::capacity_in_bytes(BlockVarCopyInits)
+ + llvm::capacity_in_bytes(DeclAttrs)
+ + llvm::capacity_in_bytes(InstantiatedFromStaticDataMember)
+ + llvm::capacity_in_bytes(InstantiatedFromUsingDecl)
+ + llvm::capacity_in_bytes(InstantiatedFromUsingShadowDecl)
+ + llvm::capacity_in_bytes(InstantiatedFromUnnamedFieldDecl)
+ + llvm::capacity_in_bytes(OverriddenMethods)
+ + llvm::capacity_in_bytes(Types)
+ + llvm::capacity_in_bytes(VariableArrayTypes)
+ + llvm::capacity_in_bytes(ClassScopeSpecializationPattern);
+}
+
+unsigned ASTContext::getLambdaManglingNumber(CXXMethodDecl *CallOperator) {
+ CXXRecordDecl *Lambda = CallOperator->getParent();
+ return LambdaMangleContexts[Lambda->getDeclContext()]
+ .getManglingNumber(CallOperator);
+}
+
+
+void ASTContext::setParameterIndex(const ParmVarDecl *D, unsigned int index) {
+ ParamIndices[D] = index;
+}
+
+unsigned ASTContext::getParameterIndex(const ParmVarDecl *D) const {
+ ParameterIndexTable::const_iterator I = ParamIndices.find(D);
+ assert(I != ParamIndices.end() &&
+ "ParmIndices lacks entry set by ParmVarDecl");
+ return I->second;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
new file mode 100644
index 0000000..ca4fe26
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTDiagnostic.cpp
@@ -0,0 +1,331 @@
+//===--- ASTDiagnostic.cpp - Diagnostic Printing Hooks for AST Nodes ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a diagnostic formatting hook for AST elements.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTDiagnostic.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+// Returns a desugared version of the QualType, and marks ShouldAKA as true
+// whenever we remove significant sugar from the type.
+static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
+ QualifierCollector QC;
+
+ while (true) {
+ const Type *Ty = QC.strip(QT);
+
+ // Don't aka just because we saw an elaborated type...
+ if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(Ty)) {
+ QT = ET->desugar();
+ continue;
+ }
+ // ... or a paren type ...
+ if (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ QT = PT->desugar();
+ continue;
+ }
+ // ...or a substituted template type parameter ...
+ if (const SubstTemplateTypeParmType *ST =
+ dyn_cast<SubstTemplateTypeParmType>(Ty)) {
+ QT = ST->desugar();
+ continue;
+ }
+ // ...or an attributed type...
+ if (const AttributedType *AT = dyn_cast<AttributedType>(Ty)) {
+ QT = AT->desugar();
+ continue;
+ }
+ // ... or an auto type.
+ if (const AutoType *AT = dyn_cast<AutoType>(Ty)) {
+ if (!AT->isSugared())
+ break;
+ QT = AT->desugar();
+ continue;
+ }
+
+ // Don't desugar template specializations, unless it's an alias template.
+ if (const TemplateSpecializationType *TST
+ = dyn_cast<TemplateSpecializationType>(Ty))
+ if (!TST->isTypeAlias())
+ break;
+
+ // Don't desugar magic Objective-C types.
+ if (QualType(Ty,0) == Context.getObjCIdType() ||
+ QualType(Ty,0) == Context.getObjCClassType() ||
+ QualType(Ty,0) == Context.getObjCSelType() ||
+ QualType(Ty,0) == Context.getObjCProtoType())
+ break;
+
+ // Don't desugar va_list.
+ if (QualType(Ty,0) == Context.getBuiltinVaListType())
+ break;
+
+ // Otherwise, do a single-step desugar.
+ QualType Underlying;
+ bool IsSugar = false;
+ switch (Ty->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Base)
+#define TYPE(Class, Base) \
+case Type::Class: { \
+const Class##Type *CTy = cast<Class##Type>(Ty); \
+if (CTy->isSugared()) { \
+IsSugar = true; \
+Underlying = CTy->desugar(); \
+} \
+break; \
+}
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // If it wasn't sugared, we're done.
+ if (!IsSugar)
+ break;
+
+ // If the desugared type is a vector type, we don't want to expand
+ // it, it will turn into an attribute mess. People want their "vec4".
+ if (isa<VectorType>(Underlying))
+ break;
+
+ // Don't desugar through the primary typedef of an anonymous type.
+ if (const TagType *UTT = Underlying->getAs<TagType>())
+ if (const TypedefType *QTT = dyn_cast<TypedefType>(QT))
+ if (UTT->getDecl()->getTypedefNameForAnonDecl() == QTT->getDecl())
+ break;
+
+ // Record that we actually looked through an opaque type here.
+ ShouldAKA = true;
+ QT = Underlying;
+ }
+
+ // If we have a pointer-like type, desugar the pointee as well.
+ // FIXME: Handle other pointer-like types.
+ if (const PointerType *Ty = QT->getAs<PointerType>()) {
+ QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) {
+ QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const RValueReferenceType *Ty = QT->getAs<RValueReferenceType>()) {
+ QT = Context.getRValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ }
+
+ return QC.apply(Context, QT);
+}
+
+/// \brief Convert the given type to a string suitable for printing as part of
+/// a diagnostic.
+///
+/// There are four main criteria when determining whether we should have an
+/// a.k.a. clause when pretty-printing a type:
+///
+/// 1) Some types provide very minimal sugar that doesn't impede the
+/// user's understanding --- for example, elaborated type
+/// specifiers. If this is all the sugar we see, we don't want an
+/// a.k.a. clause.
+/// 2) Some types are technically sugared but are much more familiar
+/// when seen in their sugared form --- for example, va_list,
+/// vector types, and the magic Objective C types. We don't
+/// want to desugar these, even if we do produce an a.k.a. clause.
+/// 3) Some types may have already been desugared previously in this diagnostic.
+/// if this is the case, doing another "aka" would just be clutter.
+/// 4) Two different types within the same diagnostic have the same output
+/// string. In this case, force an a.k.a with the desugared type when
+/// doing so will provide additional information.
+///
+/// \param Context the context in which the type was allocated
+/// \param Ty the type to print
+/// \param QualTypeVals pointer values to QualTypes which are used in the
+/// diagnostic message
+static std::string
+ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
+ const DiagnosticsEngine::ArgumentValue *PrevArgs,
+ unsigned NumPrevArgs,
+ ArrayRef<intptr_t> QualTypeVals) {
+ // FIXME: Playing with std::string is really slow.
+ bool ForceAKA = false;
+ QualType CanTy = Ty.getCanonicalType();
+ std::string S = Ty.getAsString(Context.getPrintingPolicy());
+ std::string CanS = CanTy.getAsString(Context.getPrintingPolicy());
+
+ for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) {
+ QualType CompareTy =
+ QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I]));
+ if (CompareTy.isNull())
+ continue;
+ if (CompareTy == Ty)
+ continue; // Same types
+ QualType CompareCanTy = CompareTy.getCanonicalType();
+ if (CompareCanTy == CanTy)
+ continue; // Same canonical types
+ std::string CompareS = CompareTy.getAsString(Context.getPrintingPolicy());
+ bool aka;
+ QualType CompareDesugar = Desugar(Context, CompareTy, aka);
+ std::string CompareDesugarStr =
+ CompareDesugar.getAsString(Context.getPrintingPolicy());
+ if (CompareS != S && CompareDesugarStr != S)
+ continue; // The type string is different than the comparison string
+ // and the desugared comparison string.
+ std::string CompareCanS =
+ CompareCanTy.getAsString(Context.getPrintingPolicy());
+
+ if (CompareCanS == CanS)
+ continue; // No new info from canonical type
+
+ ForceAKA = true;
+ break;
+ }
+
+ // Check to see if we already desugared this type in this
+ // diagnostic. If so, don't do it again.
+ bool Repeated = false;
+ for (unsigned i = 0; i != NumPrevArgs; ++i) {
+ // TODO: Handle ak_declcontext case.
+ if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) {
+ void *Ptr = (void*)PrevArgs[i].second;
+ QualType PrevTy(QualType::getFromOpaquePtr(Ptr));
+ if (PrevTy == Ty) {
+ Repeated = true;
+ break;
+ }
+ }
+ }
+
+ // Consider producing an a.k.a. clause if removing all the direct
+ // sugar gives us something "significantly different".
+ if (!Repeated) {
+ bool ShouldAKA = false;
+ QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
+ if (ShouldAKA || ForceAKA) {
+ if (DesugaredTy == Ty) {
+ DesugaredTy = Ty.getCanonicalType();
+ }
+ std::string akaStr = DesugaredTy.getAsString(Context.getPrintingPolicy());
+ if (akaStr != S) {
+ S = "'" + S + "' (aka '" + akaStr + "')";
+ return S;
+ }
+ }
+ }
+
+ S = "'" + S + "'";
+ return S;
+}
+
+void clang::FormatASTNodeDiagnosticArgument(
+ DiagnosticsEngine::ArgumentKind Kind,
+ intptr_t Val,
+ const char *Modifier,
+ unsigned ModLen,
+ const char *Argument,
+ unsigned ArgLen,
+ const DiagnosticsEngine::ArgumentValue *PrevArgs,
+ unsigned NumPrevArgs,
+ SmallVectorImpl<char> &Output,
+ void *Cookie,
+ ArrayRef<intptr_t> QualTypeVals) {
+ ASTContext &Context = *static_cast<ASTContext*>(Cookie);
+
+ std::string S;
+ bool NeedQuotes = true;
+
+ switch (Kind) {
+ default: llvm_unreachable("unknown ArgumentKind");
+ case DiagnosticsEngine::ak_qualtype: {
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for QualType argument");
+
+ QualType Ty(QualType::getFromOpaquePtr(reinterpret_cast<void*>(Val)));
+ S = ConvertTypeToDiagnosticString(Context, Ty, PrevArgs, NumPrevArgs,
+ QualTypeVals);
+ NeedQuotes = false;
+ break;
+ }
+ case DiagnosticsEngine::ak_declarationname: {
+ DeclarationName N = DeclarationName::getFromOpaqueInteger(Val);
+ S = N.getAsString();
+
+ if (ModLen == 9 && !memcmp(Modifier, "objcclass", 9) && ArgLen == 0)
+ S = '+' + S;
+ else if (ModLen == 12 && !memcmp(Modifier, "objcinstance", 12)
+ && ArgLen==0)
+ S = '-' + S;
+ else
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for DeclarationName argument");
+ break;
+ }
+ case DiagnosticsEngine::ak_nameddecl: {
+ bool Qualified;
+ if (ModLen == 1 && Modifier[0] == 'q' && ArgLen == 0)
+ Qualified = true;
+ else {
+ assert(ModLen == 0 && ArgLen == 0 &&
+ "Invalid modifier for NamedDecl* argument");
+ Qualified = false;
+ }
+ const NamedDecl *ND = reinterpret_cast<const NamedDecl*>(Val);
+ ND->getNameForDiagnostic(S, Context.getPrintingPolicy(), Qualified);
+ break;
+ }
+ case DiagnosticsEngine::ak_nestednamespec: {
+ llvm::raw_string_ostream OS(S);
+ reinterpret_cast<NestedNameSpecifier*>(Val)->print(OS,
+ Context.getPrintingPolicy());
+ NeedQuotes = false;
+ break;
+ }
+ case DiagnosticsEngine::ak_declcontext: {
+ DeclContext *DC = reinterpret_cast<DeclContext *> (Val);
+ assert(DC && "Should never have a null declaration context");
+
+ if (DC->isTranslationUnit()) {
+ // FIXME: Get these strings from some localized place
+ if (Context.getLangOpts().CPlusPlus)
+ S = "the global namespace";
+ else
+ S = "the global scope";
+ } else if (TypeDecl *Type = dyn_cast<TypeDecl>(DC)) {
+ S = ConvertTypeToDiagnosticString(Context,
+ Context.getTypeDeclType(Type),
+ PrevArgs, NumPrevArgs, QualTypeVals);
+ } else {
+ // FIXME: Get these strings from some localized place
+ NamedDecl *ND = cast<NamedDecl>(DC);
+ if (isa<NamespaceDecl>(ND))
+ S += "namespace ";
+ else if (isa<ObjCMethodDecl>(ND))
+ S += "method ";
+ else if (isa<FunctionDecl>(ND))
+ S += "function ";
+
+ S += "'";
+ ND->getNameForDiagnostic(S, Context.getPrintingPolicy(), true);
+ S += "'";
+ }
+ NeedQuotes = false;
+ break;
+ }
+ }
+
+ if (NeedQuotes)
+ Output.push_back('\'');
+
+ Output.append(S.begin(), S.end());
+
+ if (NeedQuotes)
+ Output.push_back('\'');
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
new file mode 100644
index 0000000..3879907
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ASTImporter.cpp
@@ -0,0 +1,4676 @@
+//===--- ASTImporter.cpp - Importing ASTs from other Contexts ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTImporter class which imports AST nodes from one
+// context into another context.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTImporter.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <deque>
+
+namespace clang {
+ class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, QualType>,
+ public DeclVisitor<ASTNodeImporter, Decl *>,
+ public StmtVisitor<ASTNodeImporter, Stmt *> {
+ ASTImporter &Importer;
+
+ public:
+ explicit ASTNodeImporter(ASTImporter &Importer) : Importer(Importer) { }
+
+ using TypeVisitor<ASTNodeImporter, QualType>::Visit;
+ using DeclVisitor<ASTNodeImporter, Decl *>::Visit;
+ using StmtVisitor<ASTNodeImporter, Stmt *>::Visit;
+
+ // Importing types
+ QualType VisitType(const Type *T);
+ QualType VisitBuiltinType(const BuiltinType *T);
+ QualType VisitComplexType(const ComplexType *T);
+ QualType VisitPointerType(const PointerType *T);
+ QualType VisitBlockPointerType(const BlockPointerType *T);
+ QualType VisitLValueReferenceType(const LValueReferenceType *T);
+ QualType VisitRValueReferenceType(const RValueReferenceType *T);
+ QualType VisitMemberPointerType(const MemberPointerType *T);
+ QualType VisitConstantArrayType(const ConstantArrayType *T);
+ QualType VisitIncompleteArrayType(const IncompleteArrayType *T);
+ QualType VisitVariableArrayType(const VariableArrayType *T);
+ // FIXME: DependentSizedArrayType
+ // FIXME: DependentSizedExtVectorType
+ QualType VisitVectorType(const VectorType *T);
+ QualType VisitExtVectorType(const ExtVectorType *T);
+ QualType VisitFunctionNoProtoType(const FunctionNoProtoType *T);
+ QualType VisitFunctionProtoType(const FunctionProtoType *T);
+ // FIXME: UnresolvedUsingType
+ QualType VisitParenType(const ParenType *T);
+ QualType VisitTypedefType(const TypedefType *T);
+ QualType VisitTypeOfExprType(const TypeOfExprType *T);
+ // FIXME: DependentTypeOfExprType
+ QualType VisitTypeOfType(const TypeOfType *T);
+ QualType VisitDecltypeType(const DecltypeType *T);
+ QualType VisitUnaryTransformType(const UnaryTransformType *T);
+ QualType VisitAutoType(const AutoType *T);
+ // FIXME: DependentDecltypeType
+ QualType VisitRecordType(const RecordType *T);
+ QualType VisitEnumType(const EnumType *T);
+ // FIXME: TemplateTypeParmType
+ // FIXME: SubstTemplateTypeParmType
+ QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
+ QualType VisitElaboratedType(const ElaboratedType *T);
+ // FIXME: DependentNameType
+ // FIXME: DependentTemplateSpecializationType
+ QualType VisitObjCInterfaceType(const ObjCInterfaceType *T);
+ QualType VisitObjCObjectType(const ObjCObjectType *T);
+ QualType VisitObjCObjectPointerType(const ObjCObjectPointerType *T);
+
+ // Importing declarations
+ bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
+ DeclContext *&LexicalDC, DeclarationName &Name,
+ SourceLocation &Loc);
+ void ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD = 0);
+ void ImportDeclarationNameLoc(const DeclarationNameInfo &From,
+ DeclarationNameInfo& To);
+ void ImportDeclContext(DeclContext *FromDC, bool ForceImport = false);
+
+ /// \brief What we should import from the definition.
+ enum ImportDefinitionKind {
+ /// \brief Import the default subset of the definition, which might be
+ /// nothing (if minimal import is set) or might be everything (if minimal
+ /// import is not set).
+ IDK_Default,
+ /// \brief Import everything.
+ IDK_Everything,
+ /// \brief Import only the bare bones needed to establish a valid
+ /// DeclContext.
+ IDK_Basic
+ };
+
+ bool shouldForceImportDeclContext(ImportDefinitionKind IDK) {
+ return IDK == IDK_Everything ||
+ (IDK == IDK_Default && !Importer.isMinimalImport());
+ }
+
+ bool ImportDefinition(RecordDecl *From, RecordDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(EnumDecl *From, EnumDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(ObjCInterfaceDecl *From, ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ bool ImportDefinition(ObjCProtocolDecl *From, ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind = IDK_Default);
+ TemplateParameterList *ImportTemplateParameterList(
+ TemplateParameterList *Params);
+ TemplateArgument ImportTemplateArgument(const TemplateArgument &From);
+ bool ImportTemplateArguments(const TemplateArgument *FromArgs,
+ unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs);
+ bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord);
+ bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
+ bool IsStructuralMatch(ClassTemplateDecl *From, ClassTemplateDecl *To);
+ Decl *VisitDecl(Decl *D);
+ Decl *VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ Decl *VisitNamespaceDecl(NamespaceDecl *D);
+ Decl *VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias);
+ Decl *VisitTypedefDecl(TypedefDecl *D);
+ Decl *VisitTypeAliasDecl(TypeAliasDecl *D);
+ Decl *VisitEnumDecl(EnumDecl *D);
+ Decl *VisitRecordDecl(RecordDecl *D);
+ Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
+ Decl *VisitFunctionDecl(FunctionDecl *D);
+ Decl *VisitCXXMethodDecl(CXXMethodDecl *D);
+ Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
+ Decl *VisitFieldDecl(FieldDecl *D);
+ Decl *VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
+ Decl *VisitVarDecl(VarDecl *D);
+ Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
+ Decl *VisitParmVarDecl(ParmVarDecl *D);
+ Decl *VisitObjCMethodDecl(ObjCMethodDecl *D);
+ Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ Decl *VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ Decl *VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ Decl *VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ Decl *VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ Decl *VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ Decl *VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ Decl *VisitClassTemplateDecl(ClassTemplateDecl *D);
+ Decl *VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D);
+
+ // Importing statements
+ Stmt *VisitStmt(Stmt *S);
+
+ // Importing expressions
+ Expr *VisitExpr(Expr *E);
+ Expr *VisitDeclRefExpr(DeclRefExpr *E);
+ Expr *VisitIntegerLiteral(IntegerLiteral *E);
+ Expr *VisitCharacterLiteral(CharacterLiteral *E);
+ Expr *VisitParenExpr(ParenExpr *E);
+ Expr *VisitUnaryOperator(UnaryOperator *E);
+ Expr *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
+ Expr *VisitBinaryOperator(BinaryOperator *E);
+ Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E);
+ Expr *VisitImplicitCastExpr(ImplicitCastExpr *E);
+ Expr *VisitCStyleCastExpr(CStyleCastExpr *E);
+ };
+}
+using namespace clang;
+
+//----------------------------------------------------------------------------
+// Structural Equivalence
+//----------------------------------------------------------------------------
+
+namespace {
+ struct StructuralEquivalenceContext {
+ /// \brief AST contexts for which we are checking structural equivalence.
+ ASTContext &C1, &C2;
+
+ /// \brief The set of "tentative" equivalences between two canonical
+ /// declarations, mapping from a declaration in the first context to the
+ /// declaration in the second context that we believe to be equivalent.
+ llvm::DenseMap<Decl *, Decl *> TentativeEquivalences;
+
+ /// \brief Queue of declarations in the first context whose equivalence
+ /// with a declaration in the second context still needs to be verified.
+ std::deque<Decl *> DeclsToCheck;
+
+ /// \brief Declaration (from, to) pairs that are known not to be equivalent
+ /// (which we have already complained about).
+ llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls;
+
+ /// \brief Whether we're being strict about the spelling of types when
+ /// unifying two types.
+ bool StrictTypeSpelling;
+
+ StructuralEquivalenceContext(ASTContext &C1, ASTContext &C2,
+ llvm::DenseSet<std::pair<Decl *, Decl *> > &NonEquivalentDecls,
+ bool StrictTypeSpelling = false)
+ : C1(C1), C2(C2), NonEquivalentDecls(NonEquivalentDecls),
+ StrictTypeSpelling(StrictTypeSpelling) { }
+
+ /// \brief Determine whether the two declarations are structurally
+ /// equivalent.
+ bool IsStructurallyEquivalent(Decl *D1, Decl *D2);
+
+ /// \brief Determine whether the two types are structurally equivalent.
+ bool IsStructurallyEquivalent(QualType T1, QualType T2);
+
+ private:
+ /// \brief Finish checking all of the structural equivalences.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool Finish();
+
+ public:
+ DiagnosticBuilder Diag1(SourceLocation Loc, unsigned DiagID) {
+ return C1.getDiagnostics().Report(Loc, DiagID);
+ }
+
+ DiagnosticBuilder Diag2(SourceLocation Loc, unsigned DiagID) {
+ return C2.getDiagnostics().Report(Loc, DiagID);
+ }
+ };
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ QualType T1, QualType T2);
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Decl *D1, Decl *D2);
+
+/// \brief Determine if two APInts have the same value, after zero-extending
+/// one of them (if needed!) to ensure that the bit-widths match.
+static bool IsSameValue(const llvm::APInt &I1, const llvm::APInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth())
+ return I1 == I2;
+
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return I1 == I2.zext(I1.getBitWidth());
+
+ return I1.zext(I2.getBitWidth()) == I2;
+}
+
+/// \brief Determine if two APSInts have the same value, zero- or sign-extending
+/// as needed.
+static bool IsSameValue(const llvm::APSInt &I1, const llvm::APSInt &I2) {
+ if (I1.getBitWidth() == I2.getBitWidth() && I1.isSigned() == I2.isSigned())
+ return I1 == I2;
+
+ // Check for a bit-width mismatch.
+ if (I1.getBitWidth() > I2.getBitWidth())
+ return IsSameValue(I1, I2.extend(I1.getBitWidth()));
+ else if (I2.getBitWidth() > I1.getBitWidth())
+ return IsSameValue(I1.extend(I2.getBitWidth()), I2);
+
+ // We have a signedness mismatch. Turn the signed value into an unsigned
+ // value.
+ if (I1.isSigned()) {
+ if (I1.isNegative())
+ return false;
+
+ return llvm::APSInt(I1, true) == I2;
+ }
+
+ if (I2.isNegative())
+ return false;
+
+ return I1 == llvm::APSInt(I2, true);
+}
+
+/// \brief Determine structural equivalence of two expressions.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Expr *E1, Expr *E2) {
+ if (!E1 || !E2)
+ return E1 == E2;
+
+ // FIXME: Actually perform a structural comparison!
+ return true;
+}
+
+/// \brief Determine whether two identifiers are equivalent.
+static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
+ const IdentifierInfo *Name2) {
+ if (!Name1 || !Name2)
+ return Name1 == Name2;
+
+ return Name1->getName() == Name2->getName();
+}
+
+/// \brief Determine whether two nested-name-specifiers are equivalent.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ NestedNameSpecifier *NNS1,
+ NestedNameSpecifier *NNS2) {
+ // FIXME: Implement!
+ return true;
+}
+
+/// \brief Determine whether two template arguments are equivalent.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const TemplateArgument &Arg1,
+ const TemplateArgument &Arg2) {
+ if (Arg1.getKind() != Arg2.getKind())
+ return false;
+
+ switch (Arg1.getKind()) {
+ case TemplateArgument::Null:
+ return true;
+
+ case TemplateArgument::Type:
+ return Context.IsStructurallyEquivalent(Arg1.getAsType(), Arg2.getAsType());
+
+ case TemplateArgument::Integral:
+ if (!Context.IsStructurallyEquivalent(Arg1.getIntegralType(),
+ Arg2.getIntegralType()))
+ return false;
+
+ return IsSameValue(*Arg1.getAsIntegral(), *Arg2.getAsIntegral());
+
+ case TemplateArgument::Declaration:
+ if (!Arg1.getAsDecl() || !Arg2.getAsDecl())
+ return !Arg1.getAsDecl() && !Arg2.getAsDecl();
+ return Context.IsStructurallyEquivalent(Arg1.getAsDecl(), Arg2.getAsDecl());
+
+ case TemplateArgument::Template:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsTemplate(),
+ Arg2.getAsTemplate());
+
+ case TemplateArgument::TemplateExpansion:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsTemplateOrTemplatePattern(),
+ Arg2.getAsTemplateOrTemplatePattern());
+
+ case TemplateArgument::Expression:
+ return IsStructurallyEquivalent(Context,
+ Arg1.getAsExpr(), Arg2.getAsExpr());
+
+ case TemplateArgument::Pack:
+ if (Arg1.pack_size() != Arg2.pack_size())
+ return false;
+
+ for (unsigned I = 0, N = Arg1.pack_size(); I != N; ++I)
+ if (!IsStructurallyEquivalent(Context,
+ Arg1.pack_begin()[I],
+ Arg2.pack_begin()[I]))
+ return false;
+
+ return true;
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+}
+
+/// \brief Determine structural equivalence for the common part of array
+/// types.
+static bool IsArrayStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const ArrayType *Array1,
+ const ArrayType *Array2) {
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getElementType(),
+ Array2->getElementType()))
+ return false;
+ if (Array1->getSizeModifier() != Array2->getSizeModifier())
+ return false;
+ if (Array1->getIndexTypeQualifiers() != Array2->getIndexTypeQualifiers())
+ return false;
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two types.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ QualType T1, QualType T2) {
+ if (T1.isNull() || T2.isNull())
+ return T1.isNull() && T2.isNull();
+
+ if (!Context.StrictTypeSpelling) {
+ // We aren't being strict about token-to-token equivalence of types,
+ // so map down to the canonical type.
+ T1 = Context.C1.getCanonicalType(T1);
+ T2 = Context.C2.getCanonicalType(T2);
+ }
+
+ if (T1.getQualifiers() != T2.getQualifiers())
+ return false;
+
+ Type::TypeClass TC = T1->getTypeClass();
+
+ if (T1->getTypeClass() != T2->getTypeClass()) {
+ // Compare function types with prototypes vs. without prototypes as if
+ // both did not have prototypes.
+ if (T1->getTypeClass() == Type::FunctionProto &&
+ T2->getTypeClass() == Type::FunctionNoProto)
+ TC = Type::FunctionNoProto;
+ else if (T1->getTypeClass() == Type::FunctionNoProto &&
+ T2->getTypeClass() == Type::FunctionProto)
+ TC = Type::FunctionNoProto;
+ else
+ return false;
+ }
+
+ switch (TC) {
+ case Type::Builtin:
+ // FIXME: Deal with Char_S/Char_U.
+ if (cast<BuiltinType>(T1)->getKind() != cast<BuiltinType>(T2)->getKind())
+ return false;
+ break;
+
+ case Type::Complex:
+ if (!IsStructurallyEquivalent(Context,
+ cast<ComplexType>(T1)->getElementType(),
+ cast<ComplexType>(T2)->getElementType()))
+ return false;
+ break;
+
+ case Type::Pointer:
+ if (!IsStructurallyEquivalent(Context,
+ cast<PointerType>(T1)->getPointeeType(),
+ cast<PointerType>(T2)->getPointeeType()))
+ return false;
+ break;
+
+ case Type::BlockPointer:
+ if (!IsStructurallyEquivalent(Context,
+ cast<BlockPointerType>(T1)->getPointeeType(),
+ cast<BlockPointerType>(T2)->getPointeeType()))
+ return false;
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType *Ref1 = cast<ReferenceType>(T1);
+ const ReferenceType *Ref2 = cast<ReferenceType>(T2);
+ if (Ref1->isSpelledAsLValue() != Ref2->isSpelledAsLValue())
+ return false;
+ if (Ref1->isInnerRef() != Ref2->isInnerRef())
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Ref1->getPointeeTypeAsWritten(),
+ Ref2->getPointeeTypeAsWritten()))
+ return false;
+ break;
+ }
+
+ case Type::MemberPointer: {
+ const MemberPointerType *MemPtr1 = cast<MemberPointerType>(T1);
+ const MemberPointerType *MemPtr2 = cast<MemberPointerType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ MemPtr1->getPointeeType(),
+ MemPtr2->getPointeeType()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ QualType(MemPtr1->getClass(), 0),
+ QualType(MemPtr2->getClass(), 0)))
+ return false;
+ break;
+ }
+
+ case Type::ConstantArray: {
+ const ConstantArrayType *Array1 = cast<ConstantArrayType>(T1);
+ const ConstantArrayType *Array2 = cast<ConstantArrayType>(T2);
+ if (!IsSameValue(Array1->getSize(), Array2->getSize()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+ break;
+ }
+
+ case Type::IncompleteArray:
+ if (!IsArrayStructurallyEquivalent(Context,
+ cast<ArrayType>(T1),
+ cast<ArrayType>(T2)))
+ return false;
+ break;
+
+ case Type::VariableArray: {
+ const VariableArrayType *Array1 = cast<VariableArrayType>(T1);
+ const VariableArrayType *Array2 = cast<VariableArrayType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getSizeExpr(), Array2->getSizeExpr()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentSizedArray: {
+ const DependentSizedArrayType *Array1 = cast<DependentSizedArrayType>(T1);
+ const DependentSizedArrayType *Array2 = cast<DependentSizedArrayType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Array1->getSizeExpr(), Array2->getSizeExpr()))
+ return false;
+
+ if (!IsArrayStructurallyEquivalent(Context, Array1, Array2))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentSizedExtVector: {
+ const DependentSizedExtVectorType *Vec1
+ = cast<DependentSizedExtVectorType>(T1);
+ const DependentSizedExtVectorType *Vec2
+ = cast<DependentSizedExtVectorType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getSizeExpr(), Vec2->getSizeExpr()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getElementType(),
+ Vec2->getElementType()))
+ return false;
+ break;
+ }
+
+ case Type::Vector:
+ case Type::ExtVector: {
+ const VectorType *Vec1 = cast<VectorType>(T1);
+ const VectorType *Vec2 = cast<VectorType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Vec1->getElementType(),
+ Vec2->getElementType()))
+ return false;
+ if (Vec1->getNumElements() != Vec2->getNumElements())
+ return false;
+ if (Vec1->getVectorKind() != Vec2->getVectorKind())
+ return false;
+ break;
+ }
+
+ case Type::FunctionProto: {
+ const FunctionProtoType *Proto1 = cast<FunctionProtoType>(T1);
+ const FunctionProtoType *Proto2 = cast<FunctionProtoType>(T2);
+ if (Proto1->getNumArgs() != Proto2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Proto1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Proto1->getArgType(I),
+ Proto2->getArgType(I)))
+ return false;
+ }
+ if (Proto1->isVariadic() != Proto2->isVariadic())
+ return false;
+ if (Proto1->getExceptionSpecType() != Proto2->getExceptionSpecType())
+ return false;
+ if (Proto1->getExceptionSpecType() == EST_Dynamic) {
+ if (Proto1->getNumExceptions() != Proto2->getNumExceptions())
+ return false;
+ for (unsigned I = 0, N = Proto1->getNumExceptions(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Proto1->getExceptionType(I),
+ Proto2->getExceptionType(I)))
+ return false;
+ }
+ } else if (Proto1->getExceptionSpecType() == EST_ComputedNoexcept) {
+ if (!IsStructurallyEquivalent(Context,
+ Proto1->getNoexceptExpr(),
+ Proto2->getNoexceptExpr()))
+ return false;
+ }
+ if (Proto1->getTypeQuals() != Proto2->getTypeQuals())
+ return false;
+
+ // Fall through to check the bits common with FunctionNoProtoType.
+ }
+
+ case Type::FunctionNoProto: {
+ const FunctionType *Function1 = cast<FunctionType>(T1);
+ const FunctionType *Function2 = cast<FunctionType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Function1->getResultType(),
+ Function2->getResultType()))
+ return false;
+ if (Function1->getExtInfo() != Function2->getExtInfo())
+ return false;
+ break;
+ }
+
+ case Type::UnresolvedUsing:
+ if (!IsStructurallyEquivalent(Context,
+ cast<UnresolvedUsingType>(T1)->getDecl(),
+ cast<UnresolvedUsingType>(T2)->getDecl()))
+ return false;
+
+ break;
+
+ case Type::Attributed:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AttributedType>(T1)->getModifiedType(),
+ cast<AttributedType>(T2)->getModifiedType()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ cast<AttributedType>(T1)->getEquivalentType(),
+ cast<AttributedType>(T2)->getEquivalentType()))
+ return false;
+ break;
+
+ case Type::Paren:
+ if (!IsStructurallyEquivalent(Context,
+ cast<ParenType>(T1)->getInnerType(),
+ cast<ParenType>(T2)->getInnerType()))
+ return false;
+ break;
+
+ case Type::Typedef:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypedefType>(T1)->getDecl(),
+ cast<TypedefType>(T2)->getDecl()))
+ return false;
+ break;
+
+ case Type::TypeOfExpr:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypeOfExprType>(T1)->getUnderlyingExpr(),
+ cast<TypeOfExprType>(T2)->getUnderlyingExpr()))
+ return false;
+ break;
+
+ case Type::TypeOf:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TypeOfType>(T1)->getUnderlyingType(),
+ cast<TypeOfType>(T2)->getUnderlyingType()))
+ return false;
+ break;
+
+ case Type::UnaryTransform:
+ if (!IsStructurallyEquivalent(Context,
+ cast<UnaryTransformType>(T1)->getUnderlyingType(),
+ cast<UnaryTransformType>(T1)->getUnderlyingType()))
+ return false;
+ break;
+
+ case Type::Decltype:
+ if (!IsStructurallyEquivalent(Context,
+ cast<DecltypeType>(T1)->getUnderlyingExpr(),
+ cast<DecltypeType>(T2)->getUnderlyingExpr()))
+ return false;
+ break;
+
+ case Type::Auto:
+ if (!IsStructurallyEquivalent(Context,
+ cast<AutoType>(T1)->getDeducedType(),
+ cast<AutoType>(T2)->getDeducedType()))
+ return false;
+ break;
+
+ case Type::Record:
+ case Type::Enum:
+ if (!IsStructurallyEquivalent(Context,
+ cast<TagType>(T1)->getDecl(),
+ cast<TagType>(T2)->getDecl()))
+ return false;
+ break;
+
+ case Type::TemplateTypeParm: {
+ const TemplateTypeParmType *Parm1 = cast<TemplateTypeParmType>(T1);
+ const TemplateTypeParmType *Parm2 = cast<TemplateTypeParmType>(T2);
+ if (Parm1->getDepth() != Parm2->getDepth())
+ return false;
+ if (Parm1->getIndex() != Parm2->getIndex())
+ return false;
+ if (Parm1->isParameterPack() != Parm2->isParameterPack())
+ return false;
+
+ // Names of template type parameters are never significant.
+ break;
+ }
+
+ case Type::SubstTemplateTypeParm: {
+ const SubstTemplateTypeParmType *Subst1
+ = cast<SubstTemplateTypeParmType>(T1);
+ const SubstTemplateTypeParmType *Subst2
+ = cast<SubstTemplateTypeParmType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ QualType(Subst1->getReplacedParameter(), 0),
+ QualType(Subst2->getReplacedParameter(), 0)))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Subst1->getReplacementType(),
+ Subst2->getReplacementType()))
+ return false;
+ break;
+ }
+
+ case Type::SubstTemplateTypeParmPack: {
+ const SubstTemplateTypeParmPackType *Subst1
+ = cast<SubstTemplateTypeParmPackType>(T1);
+ const SubstTemplateTypeParmPackType *Subst2
+ = cast<SubstTemplateTypeParmPackType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ QualType(Subst1->getReplacedParameter(), 0),
+ QualType(Subst2->getReplacedParameter(), 0)))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Subst1->getArgumentPack(),
+ Subst2->getArgumentPack()))
+ return false;
+ break;
+ }
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *Spec1
+ = cast<TemplateSpecializationType>(T1);
+ const TemplateSpecializationType *Spec2
+ = cast<TemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getTemplateName(),
+ Spec2->getTemplateName()))
+ return false;
+ if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getArg(I), Spec2->getArg(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::Elaborated: {
+ const ElaboratedType *Elab1 = cast<ElaboratedType>(T1);
+ const ElaboratedType *Elab2 = cast<ElaboratedType>(T2);
+ // CHECKME: what if a keyword is ETK_None or ETK_typename ?
+ if (Elab1->getKeyword() != Elab2->getKeyword())
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Elab1->getQualifier(),
+ Elab2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ Elab1->getNamedType(),
+ Elab2->getNamedType()))
+ return false;
+ break;
+ }
+
+ case Type::InjectedClassName: {
+ const InjectedClassNameType *Inj1 = cast<InjectedClassNameType>(T1);
+ const InjectedClassNameType *Inj2 = cast<InjectedClassNameType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Inj1->getInjectedSpecializationType(),
+ Inj2->getInjectedSpecializationType()))
+ return false;
+ break;
+ }
+
+ case Type::DependentName: {
+ const DependentNameType *Typename1 = cast<DependentNameType>(T1);
+ const DependentNameType *Typename2 = cast<DependentNameType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Typename1->getQualifier(),
+ Typename2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Typename1->getIdentifier(),
+ Typename2->getIdentifier()))
+ return false;
+
+ break;
+ }
+
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *Spec1 =
+ cast<DependentTemplateSpecializationType>(T1);
+ const DependentTemplateSpecializationType *Spec2 =
+ cast<DependentTemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getQualifier(),
+ Spec2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Spec1->getIdentifier(),
+ Spec2->getIdentifier()))
+ return false;
+ if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getArg(I), Spec2->getArg(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::PackExpansion:
+ if (!IsStructurallyEquivalent(Context,
+ cast<PackExpansionType>(T1)->getPattern(),
+ cast<PackExpansionType>(T2)->getPattern()))
+ return false;
+ break;
+
+ case Type::ObjCInterface: {
+ const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1);
+ const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Iface1->getDecl(), Iface2->getDecl()))
+ return false;
+ break;
+ }
+
+ case Type::ObjCObject: {
+ const ObjCObjectType *Obj1 = cast<ObjCObjectType>(T1);
+ const ObjCObjectType *Obj2 = cast<ObjCObjectType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Obj1->getBaseType(),
+ Obj2->getBaseType()))
+ return false;
+ if (Obj1->getNumProtocols() != Obj2->getNumProtocols())
+ return false;
+ for (unsigned I = 0, N = Obj1->getNumProtocols(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Obj1->getProtocol(I),
+ Obj2->getProtocol(I)))
+ return false;
+ }
+ break;
+ }
+
+ case Type::ObjCObjectPointer: {
+ const ObjCObjectPointerType *Ptr1 = cast<ObjCObjectPointerType>(T1);
+ const ObjCObjectPointerType *Ptr2 = cast<ObjCObjectPointerType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Ptr1->getPointeeType(),
+ Ptr2->getPointeeType()))
+ return false;
+ break;
+ }
+
+ case Type::Atomic: {
+ if (!IsStructurallyEquivalent(Context,
+ cast<AtomicType>(T1)->getValueType(),
+ cast<AtomicType>(T2)->getValueType()))
+ return false;
+ break;
+ }
+
+ } // end switch
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two fields.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FieldDecl *Field1, FieldDecl *Field2) {
+ RecordDecl *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
+
+ if (!IsStructurallyEquivalent(Context,
+ Field1->getType(), Field2->getType())) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_field)
+ << Field2->getDeclName() << Field2->getType();
+ Context.Diag1(Field1->getLocation(), diag::note_odr_field)
+ << Field1->getDeclName() << Field1->getType();
+ return false;
+ }
+
+ if (Field1->isBitField() != Field2->isBitField()) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ if (Field1->isBitField()) {
+ Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
+ << Field1->getDeclName() << Field1->getType()
+ << Field1->getBitWidthValue(Context.C1);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_not_bit_field)
+ << Field2->getDeclName();
+ } else {
+ Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
+ << Field2->getDeclName() << Field2->getType()
+ << Field2->getBitWidthValue(Context.C2);
+ Context.Diag1(Field1->getLocation(), diag::note_odr_not_bit_field)
+ << Field1->getDeclName();
+ }
+ return false;
+ }
+
+ if (Field1->isBitField()) {
+ // Make sure that the bit-fields are the same length.
+ unsigned Bits1 = Field1->getBitWidthValue(Context.C1);
+ unsigned Bits2 = Field2->getBitWidthValue(Context.C2);
+
+ if (Bits1 != Bits2) {
+ Context.Diag2(Owner2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(Owner2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_bit_field)
+ << Field2->getDeclName() << Field2->getType() << Bits2;
+ Context.Diag1(Field1->getLocation(), diag::note_odr_bit_field)
+ << Field1->getDeclName() << Field1->getType() << Bits1;
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two records.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ RecordDecl *D1, RecordDecl *D2) {
+ if (D1->isUnion() != D2->isUnion()) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(D1->getLocation(), diag::note_odr_tag_kind_here)
+ << D1->getDeclName() << (unsigned)D1->getTagKind();
+ return false;
+ }
+
+ // If both declarations are class template specializations, we know
+ // the ODR applies, so check the template and template arguments.
+ ClassTemplateSpecializationDecl *Spec1
+ = dyn_cast<ClassTemplateSpecializationDecl>(D1);
+ ClassTemplateSpecializationDecl *Spec2
+ = dyn_cast<ClassTemplateSpecializationDecl>(D2);
+ if (Spec1 && Spec2) {
+ // Check that the specialized templates are the same.
+ if (!IsStructurallyEquivalent(Context, Spec1->getSpecializedTemplate(),
+ Spec2->getSpecializedTemplate()))
+ return false;
+
+ // Check that the template arguments are the same.
+ if (Spec1->getTemplateArgs().size() != Spec2->getTemplateArgs().size())
+ return false;
+
+ for (unsigned I = 0, N = Spec1->getTemplateArgs().size(); I != N; ++I)
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getTemplateArgs().get(I),
+ Spec2->getTemplateArgs().get(I)))
+ return false;
+ }
+ // If one is a class template specialization and the other is not, these
+ // structures are different.
+ else if (Spec1 || Spec2)
+ return false;
+
+ // Compare the definitions of these two records. If either or both are
+ // incomplete, we assume that they are equivalent.
+ D1 = D1->getDefinition();
+ D2 = D2->getDefinition();
+ if (!D1 || !D2)
+ return true;
+
+ if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D1)) {
+ if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
+ if (D1CXX->getNumBases() != D2CXX->getNumBases()) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(D2->getLocation(), diag::note_odr_number_of_bases)
+ << D2CXX->getNumBases();
+ Context.Diag1(D1->getLocation(), diag::note_odr_number_of_bases)
+ << D1CXX->getNumBases();
+ return false;
+ }
+
+ // Check the base classes.
+ for (CXXRecordDecl::base_class_iterator Base1 = D1CXX->bases_begin(),
+ BaseEnd1 = D1CXX->bases_end(),
+ Base2 = D2CXX->bases_begin();
+ Base1 != BaseEnd1;
+ ++Base1, ++Base2) {
+ if (!IsStructurallyEquivalent(Context,
+ Base1->getType(), Base2->getType())) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Base2->getLocStart(), diag::note_odr_base)
+ << Base2->getType()
+ << Base2->getSourceRange();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->getType()
+ << Base1->getSourceRange();
+ return false;
+ }
+
+ // Check virtual vs. non-virtual inheritance mismatch.
+ if (Base1->isVirtual() != Base2->isVirtual()) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Base2->getLocStart(),
+ diag::note_odr_virtual_base)
+ << Base2->isVirtual() << Base2->getSourceRange();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->isVirtual()
+ << Base1->getSourceRange();
+ return false;
+ }
+ }
+ } else if (D1CXX->getNumBases() > 0) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ const CXXBaseSpecifier *Base1 = D1CXX->bases_begin();
+ Context.Diag1(Base1->getLocStart(), diag::note_odr_base)
+ << Base1->getType()
+ << Base1->getSourceRange();
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_base);
+ return false;
+ }
+ }
+
+ // Check the fields for consistency.
+ CXXRecordDecl::field_iterator Field2 = D2->field_begin(),
+ Field2End = D2->field_end();
+ for (CXXRecordDecl::field_iterator Field1 = D1->field_begin(),
+ Field1End = D1->field_end();
+ Field1 != Field1End;
+ ++Field1, ++Field2) {
+ if (Field2 == Field2End) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(Field1->getLocation(), diag::note_odr_field)
+ << Field1->getDeclName() << Field1->getType();
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_field);
+ return false;
+ }
+
+ if (!IsStructurallyEquivalent(Context, *Field1, *Field2))
+ return false;
+ }
+
+ if (Field2 != Field2End) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(Field2->getLocation(), diag::note_odr_field)
+ << Field2->getDeclName() << Field2->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_field);
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Determine structural equivalence of two enums.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ EnumDecl *D1, EnumDecl *D2) {
+ EnumDecl::enumerator_iterator EC2 = D2->enumerator_begin(),
+ EC2End = D2->enumerator_end();
+ for (EnumDecl::enumerator_iterator EC1 = D1->enumerator_begin(),
+ EC1End = D1->enumerator_end();
+ EC1 != EC1End; ++EC1, ++EC2) {
+ if (EC2 == EC2End) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
+ << EC1->getDeclName()
+ << EC1->getInitVal().toString(10);
+ Context.Diag2(D2->getLocation(), diag::note_odr_missing_enumerator);
+ return false;
+ }
+
+ llvm::APSInt Val1 = EC1->getInitVal();
+ llvm::APSInt Val2 = EC2->getInitVal();
+ if (!IsSameValue(Val1, Val2) ||
+ !IsStructurallyEquivalent(EC1->getIdentifier(), EC2->getIdentifier())) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
+ << EC2->getDeclName()
+ << EC2->getInitVal().toString(10);
+ Context.Diag1(EC1->getLocation(), diag::note_odr_enumerator)
+ << EC1->getDeclName()
+ << EC1->getInitVal().toString(10);
+ return false;
+ }
+ }
+
+ if (EC2 != EC2End) {
+ Context.Diag2(D2->getLocation(), diag::warn_odr_tag_type_inconsistent)
+ << Context.C2.getTypeDeclType(D2);
+ Context.Diag2(EC2->getLocation(), diag::note_odr_enumerator)
+ << EC2->getDeclName()
+ << EC2->getInitVal().toString(10);
+ Context.Diag1(D1->getLocation(), diag::note_odr_missing_enumerator);
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateParameterList *Params1,
+ TemplateParameterList *Params2) {
+ if (Params1->size() != Params2->size()) {
+ Context.Diag2(Params2->getTemplateLoc(),
+ diag::err_odr_different_num_template_parameters)
+ << Params1->size() << Params2->size();
+ Context.Diag1(Params1->getTemplateLoc(),
+ diag::note_odr_template_parameter_list);
+ return false;
+ }
+
+ for (unsigned I = 0, N = Params1->size(); I != N; ++I) {
+ if (Params1->getParam(I)->getKind() != Params2->getParam(I)->getKind()) {
+ Context.Diag2(Params2->getParam(I)->getLocation(),
+ diag::err_odr_different_template_parameter_kind);
+ Context.Diag1(Params1->getParam(I)->getLocation(),
+ diag::note_odr_template_parameter_here);
+ return false;
+ }
+
+ if (!Context.IsStructurallyEquivalent(Params1->getParam(I),
+ Params2->getParam(I))) {
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateTypeParmDecl *D1,
+ TemplateTypeParmDecl *D2) {
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ NonTypeTemplateParmDecl *D1,
+ NonTypeTemplateParmDecl *D2) {
+ // FIXME: Enable once we have variadic templates.
+#if 0
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+#endif
+
+ // Check types.
+ if (!Context.IsStructurallyEquivalent(D1->getType(), D2->getType())) {
+ Context.Diag2(D2->getLocation(),
+ diag::err_odr_non_type_parameter_type_inconsistent)
+ << D2->getType() << D1->getType();
+ Context.Diag1(D1->getLocation(), diag::note_odr_value_here)
+ << D1->getType();
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ TemplateTemplateParmDecl *D1,
+ TemplateTemplateParmDecl *D2) {
+ // FIXME: Enable once we have variadic templates.
+#if 0
+ if (D1->isParameterPack() != D2->isParameterPack()) {
+ Context.Diag2(D2->getLocation(), diag::err_odr_parameter_pack_non_pack)
+ << D2->isParameterPack();
+ Context.Diag1(D1->getLocation(), diag::note_odr_parameter_pack_non_pack)
+ << D1->isParameterPack();
+ return false;
+ }
+#endif
+
+ // Check template parameter lists.
+ return IsStructurallyEquivalent(Context, D1->getTemplateParameters(),
+ D2->getTemplateParameters());
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ClassTemplateDecl *D1,
+ ClassTemplateDecl *D2) {
+ // Check template parameters.
+ if (!IsStructurallyEquivalent(Context,
+ D1->getTemplateParameters(),
+ D2->getTemplateParameters()))
+ return false;
+
+ // Check the templated declaration.
+ return Context.IsStructurallyEquivalent(D1->getTemplatedDecl(),
+ D2->getTemplatedDecl());
+}
+
+/// \brief Determine structural equivalence of two declarations.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ Decl *D1, Decl *D2) {
+ // FIXME: Check for known structural equivalences via a callback of some sort.
+
+ // Check whether we already know that these two declarations are not
+ // structurally equivalent.
+ if (Context.NonEquivalentDecls.count(std::make_pair(D1->getCanonicalDecl(),
+ D2->getCanonicalDecl())))
+ return false;
+
+ // Determine whether we've already produced a tentative equivalence for D1.
+ Decl *&EquivToD1 = Context.TentativeEquivalences[D1->getCanonicalDecl()];
+ if (EquivToD1)
+ return EquivToD1 == D2->getCanonicalDecl();
+
+ // Produce a tentative equivalence D1 <-> D2, which will be checked later.
+ EquivToD1 = D2->getCanonicalDecl();
+ Context.DeclsToCheck.push_back(D1->getCanonicalDecl());
+ return true;
+}
+
+bool StructuralEquivalenceContext::IsStructurallyEquivalent(Decl *D1,
+ Decl *D2) {
+ if (!::IsStructurallyEquivalent(*this, D1, D2))
+ return false;
+
+ return !Finish();
+}
+
+bool StructuralEquivalenceContext::IsStructurallyEquivalent(QualType T1,
+ QualType T2) {
+ if (!::IsStructurallyEquivalent(*this, T1, T2))
+ return false;
+
+ return !Finish();
+}
+
+bool StructuralEquivalenceContext::Finish() {
+ while (!DeclsToCheck.empty()) {
+ // Check the next declaration.
+ Decl *D1 = DeclsToCheck.front();
+ DeclsToCheck.pop_front();
+
+ Decl *D2 = TentativeEquivalences[D1];
+ assert(D2 && "Unrecorded tentative equivalence?");
+
+ bool Equivalent = true;
+
+ // FIXME: Switch on all declaration kinds. For now, we're just going to
+ // check the obvious ones.
+ if (RecordDecl *Record1 = dyn_cast<RecordDecl>(D1)) {
+ if (RecordDecl *Record2 = dyn_cast<RecordDecl>(D2)) {
+ // Check for equivalent structure names.
+ IdentifierInfo *Name1 = Record1->getIdentifier();
+ if (!Name1 && Record1->getTypedefNameForAnonDecl())
+ Name1 = Record1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = Record2->getIdentifier();
+ if (!Name2 && Record2->getTypedefNameForAnonDecl())
+ Name2 = Record2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2) ||
+ !::IsStructurallyEquivalent(*this, Record1, Record2))
+ Equivalent = false;
+ } else {
+ // Record/non-record mismatch.
+ Equivalent = false;
+ }
+ } else if (EnumDecl *Enum1 = dyn_cast<EnumDecl>(D1)) {
+ if (EnumDecl *Enum2 = dyn_cast<EnumDecl>(D2)) {
+ // Check for equivalent enum names.
+ IdentifierInfo *Name1 = Enum1->getIdentifier();
+ if (!Name1 && Enum1->getTypedefNameForAnonDecl())
+ Name1 = Enum1->getTypedefNameForAnonDecl()->getIdentifier();
+ IdentifierInfo *Name2 = Enum2->getIdentifier();
+ if (!Name2 && Enum2->getTypedefNameForAnonDecl())
+ Name2 = Enum2->getTypedefNameForAnonDecl()->getIdentifier();
+ if (!::IsStructurallyEquivalent(Name1, Name2) ||
+ !::IsStructurallyEquivalent(*this, Enum1, Enum2))
+ Equivalent = false;
+ } else {
+ // Enum/non-enum mismatch
+ Equivalent = false;
+ }
+ } else if (TypedefNameDecl *Typedef1 = dyn_cast<TypedefNameDecl>(D1)) {
+ if (TypedefNameDecl *Typedef2 = dyn_cast<TypedefNameDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(Typedef1->getIdentifier(),
+ Typedef2->getIdentifier()) ||
+ !::IsStructurallyEquivalent(*this,
+ Typedef1->getUnderlyingType(),
+ Typedef2->getUnderlyingType()))
+ Equivalent = false;
+ } else {
+ // Typedef/non-typedef mismatch.
+ Equivalent = false;
+ }
+ } else if (ClassTemplateDecl *ClassTemplate1
+ = dyn_cast<ClassTemplateDecl>(D1)) {
+ if (ClassTemplateDecl *ClassTemplate2 = dyn_cast<ClassTemplateDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(ClassTemplate1->getIdentifier(),
+ ClassTemplate2->getIdentifier()) ||
+ !::IsStructurallyEquivalent(*this, ClassTemplate1, ClassTemplate2))
+ Equivalent = false;
+ } else {
+ // Class template/non-class-template mismatch.
+ Equivalent = false;
+ }
+ } else if (TemplateTypeParmDecl *TTP1= dyn_cast<TemplateTypeParmDecl>(D1)) {
+ if (TemplateTypeParmDecl *TTP2 = dyn_cast<TemplateTypeParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (NonTypeTemplateParmDecl *NTTP1
+ = dyn_cast<NonTypeTemplateParmDecl>(D1)) {
+ if (NonTypeTemplateParmDecl *NTTP2
+ = dyn_cast<NonTypeTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, NTTP1, NTTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ } else if (TemplateTemplateParmDecl *TTP1
+ = dyn_cast<TemplateTemplateParmDecl>(D1)) {
+ if (TemplateTemplateParmDecl *TTP2
+ = dyn_cast<TemplateTemplateParmDecl>(D2)) {
+ if (!::IsStructurallyEquivalent(*this, TTP1, TTP2))
+ Equivalent = false;
+ } else {
+ // Kind mismatch.
+ Equivalent = false;
+ }
+ }
+
+ if (!Equivalent) {
+ // Note that these two declarations are not equivalent (and we already
+ // know about it).
+ NonEquivalentDecls.insert(std::make_pair(D1->getCanonicalDecl(),
+ D2->getCanonicalDecl()));
+ return true;
+ }
+ // FIXME: Check other declaration kinds!
+ }
+
+ return false;
+}
+
+//----------------------------------------------------------------------------
+// Import Types
+//----------------------------------------------------------------------------
+
+QualType ASTNodeImporter::VisitType(const Type *T) {
+ Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
+ << T->getTypeClassName();
+ return QualType();
+}
+
+QualType ASTNodeImporter::VisitBuiltinType(const BuiltinType *T) {
+ switch (T->getKind()) {
+#define SHARED_SINGLETON_TYPE(Expansion)
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id: return Importer.getToContext().SingletonId;
+#include "clang/AST/BuiltinTypes.def"
+
+ // FIXME: for Char16, Char32, and NullPtr, make sure that the "to"
+ // context supports C++.
+
+ // FIXME: for ObjCId, ObjCClass, and ObjCSel, make sure that the "to"
+ // context supports ObjC.
+
+ case BuiltinType::Char_U:
+ // The context we're importing from has an unsigned 'char'. If we're
+ // importing into a context with a signed 'char', translate to
+ // 'unsigned char' instead.
+ if (Importer.getToContext().getLangOpts().CharIsSigned)
+ return Importer.getToContext().UnsignedCharTy;
+
+ return Importer.getToContext().CharTy;
+
+ case BuiltinType::Char_S:
+ // The context we're importing from has an unsigned 'char'. If we're
+ // importing into a context with a signed 'char', translate to
+ // 'unsigned char' instead.
+ if (!Importer.getToContext().getLangOpts().CharIsSigned)
+ return Importer.getToContext().SignedCharTy;
+
+ return Importer.getToContext().CharTy;
+
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ // FIXME: If not in C++, shall we translate to the C equivalent of
+ // wchar_t?
+ return Importer.getToContext().WCharTy;
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+QualType ASTNodeImporter::VisitComplexType(const ComplexType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getComplexType(ToElementType);
+}
+
+QualType ASTNodeImporter::VisitPointerType(const PointerType *T) {
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getPointerType(ToPointeeType);
+}
+
+QualType ASTNodeImporter::VisitBlockPointerType(const BlockPointerType *T) {
+ // FIXME: Check for blocks support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getBlockPointerType(ToPointeeType);
+}
+
+QualType
+ASTNodeImporter::VisitLValueReferenceType(const LValueReferenceType *T) {
+ // FIXME: Check for C++ support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getLValueReferenceType(ToPointeeType);
+}
+
+QualType
+ASTNodeImporter::VisitRValueReferenceType(const RValueReferenceType *T) {
+ // FIXME: Check for C++0x support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeTypeAsWritten());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getRValueReferenceType(ToPointeeType);
+}
+
+QualType ASTNodeImporter::VisitMemberPointerType(const MemberPointerType *T) {
+ // FIXME: Check for C++ support in "to" context.
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ QualType ClassType = Importer.Import(QualType(T->getClass(), 0));
+ return Importer.getToContext().getMemberPointerType(ToPointeeType,
+ ClassType.getTypePtr());
+}
+
+QualType ASTNodeImporter::VisitConstantArrayType(const ConstantArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getConstantArrayType(ToElementType,
+ T->getSize(),
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers());
+}
+
+QualType
+ASTNodeImporter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getIncompleteArrayType(ToElementType,
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers());
+}
+
+QualType ASTNodeImporter::VisitVariableArrayType(const VariableArrayType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ Expr *Size = Importer.Import(T->getSizeExpr());
+ if (!Size)
+ return QualType();
+
+ SourceRange Brackets = Importer.Import(T->getBracketsRange());
+ return Importer.getToContext().getVariableArrayType(ToElementType, Size,
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(),
+ Brackets);
+}
+
+QualType ASTNodeImporter::VisitVectorType(const VectorType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getVectorType(ToElementType,
+ T->getNumElements(),
+ T->getVectorKind());
+}
+
+QualType ASTNodeImporter::VisitExtVectorType(const ExtVectorType *T) {
+ QualType ToElementType = Importer.Import(T->getElementType());
+ if (ToElementType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getExtVectorType(ToElementType,
+ T->getNumElements());
+}
+
+QualType
+ASTNodeImporter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
+ // FIXME: What happens if we're importing a function without a prototype
+ // into C++? Should we make it variadic?
+ QualType ToResultType = Importer.Import(T->getResultType());
+ if (ToResultType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getFunctionNoProtoType(ToResultType,
+ T->getExtInfo());
+}
+
+QualType ASTNodeImporter::VisitFunctionProtoType(const FunctionProtoType *T) {
+ QualType ToResultType = Importer.Import(T->getResultType());
+ if (ToResultType.isNull())
+ return QualType();
+
+ // Import argument types
+ SmallVector<QualType, 4> ArgTypes;
+ for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
+ AEnd = T->arg_type_end();
+ A != AEnd; ++A) {
+ QualType ArgType = Importer.Import(*A);
+ if (ArgType.isNull())
+ return QualType();
+ ArgTypes.push_back(ArgType);
+ }
+
+ // Import exception types
+ SmallVector<QualType, 4> ExceptionTypes;
+ for (FunctionProtoType::exception_iterator E = T->exception_begin(),
+ EEnd = T->exception_end();
+ E != EEnd; ++E) {
+ QualType ExceptionType = Importer.Import(*E);
+ if (ExceptionType.isNull())
+ return QualType();
+ ExceptionTypes.push_back(ExceptionType);
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI = T->getExtProtoInfo();
+ EPI.Exceptions = ExceptionTypes.data();
+
+ return Importer.getToContext().getFunctionType(ToResultType, ArgTypes.data(),
+ ArgTypes.size(), EPI);
+}
+
+QualType ASTNodeImporter::VisitParenType(const ParenType *T) {
+ QualType ToInnerType = Importer.Import(T->getInnerType());
+ if (ToInnerType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getParenType(ToInnerType);
+}
+
+QualType ASTNodeImporter::VisitTypedefType(const TypedefType *T) {
+ TypedefNameDecl *ToDecl
+ = dyn_cast_or_null<TypedefNameDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTypeDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitTypeOfExprType(const TypeOfExprType *T) {
+ Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
+ if (!ToExpr)
+ return QualType();
+
+ return Importer.getToContext().getTypeOfExprType(ToExpr);
+}
+
+QualType ASTNodeImporter::VisitTypeOfType(const TypeOfType *T) {
+ QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (ToUnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getTypeOfType(ToUnderlyingType);
+}
+
+QualType ASTNodeImporter::VisitDecltypeType(const DecltypeType *T) {
+ // FIXME: Make sure that the "to" context supports C++0x!
+ Expr *ToExpr = Importer.Import(T->getUnderlyingExpr());
+ if (!ToExpr)
+ return QualType();
+
+ QualType UnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (UnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getDecltypeType(ToExpr, UnderlyingType);
+}
+
+QualType ASTNodeImporter::VisitUnaryTransformType(const UnaryTransformType *T) {
+ QualType ToBaseType = Importer.Import(T->getBaseType());
+ QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
+ if (ToBaseType.isNull() || ToUnderlyingType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getUnaryTransformType(ToBaseType,
+ ToUnderlyingType,
+ T->getUTTKind());
+}
+
+QualType ASTNodeImporter::VisitAutoType(const AutoType *T) {
+ // FIXME: Make sure that the "to" context supports C++0x!
+ QualType FromDeduced = T->getDeducedType();
+ QualType ToDeduced;
+ if (!FromDeduced.isNull()) {
+ ToDeduced = Importer.Import(FromDeduced);
+ if (ToDeduced.isNull())
+ return QualType();
+ }
+
+ return Importer.getToContext().getAutoType(ToDeduced);
+}
+
+QualType ASTNodeImporter::VisitRecordType(const RecordType *T) {
+ RecordDecl *ToDecl
+ = dyn_cast_or_null<RecordDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTagDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitEnumType(const EnumType *T) {
+ EnumDecl *ToDecl
+ = dyn_cast_or_null<EnumDecl>(Importer.Import(T->getDecl()));
+ if (!ToDecl)
+ return QualType();
+
+ return Importer.getToContext().getTagDeclType(ToDecl);
+}
+
+QualType ASTNodeImporter::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T) {
+ TemplateName ToTemplate = Importer.Import(T->getTemplateName());
+ if (ToTemplate.isNull())
+ return QualType();
+
+ SmallVector<TemplateArgument, 2> ToTemplateArgs;
+ if (ImportTemplateArguments(T->getArgs(), T->getNumArgs(), ToTemplateArgs))
+ return QualType();
+
+ QualType ToCanonType;
+ if (!QualType(T, 0).isCanonical()) {
+ QualType FromCanonType
+ = Importer.getFromContext().getCanonicalType(QualType(T, 0));
+ ToCanonType =Importer.Import(FromCanonType);
+ if (ToCanonType.isNull())
+ return QualType();
+ }
+ return Importer.getToContext().getTemplateSpecializationType(ToTemplate,
+ ToTemplateArgs.data(),
+ ToTemplateArgs.size(),
+ ToCanonType);
+}
+
+QualType ASTNodeImporter::VisitElaboratedType(const ElaboratedType *T) {
+ NestedNameSpecifier *ToQualifier = 0;
+ // Note: the qualifier in an ElaboratedType is optional.
+ if (T->getQualifier()) {
+ ToQualifier = Importer.Import(T->getQualifier());
+ if (!ToQualifier)
+ return QualType();
+ }
+
+ QualType ToNamedType = Importer.Import(T->getNamedType());
+ if (ToNamedType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getElaboratedType(T->getKeyword(),
+ ToQualifier, ToNamedType);
+}
+
+QualType ASTNodeImporter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ ObjCInterfaceDecl *Class
+ = dyn_cast_or_null<ObjCInterfaceDecl>(Importer.Import(T->getDecl()));
+ if (!Class)
+ return QualType();
+
+ return Importer.getToContext().getObjCInterfaceType(Class);
+}
+
+QualType ASTNodeImporter::VisitObjCObjectType(const ObjCObjectType *T) {
+ QualType ToBaseType = Importer.Import(T->getBaseType());
+ if (ToBaseType.isNull())
+ return QualType();
+
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ for (ObjCObjectType::qual_iterator P = T->qual_begin(),
+ PEnd = T->qual_end();
+ P != PEnd; ++P) {
+ ObjCProtocolDecl *Protocol
+ = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(*P));
+ if (!Protocol)
+ return QualType();
+ Protocols.push_back(Protocol);
+ }
+
+ return Importer.getToContext().getObjCObjectType(ToBaseType,
+ Protocols.data(),
+ Protocols.size());
+}
+
+QualType
+ASTNodeImporter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ QualType ToPointeeType = Importer.Import(T->getPointeeType());
+ if (ToPointeeType.isNull())
+ return QualType();
+
+ return Importer.getToContext().getObjCObjectPointerType(ToPointeeType);
+}
+
+//----------------------------------------------------------------------------
+// Import Declarations
+//----------------------------------------------------------------------------
+bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
+ DeclContext *&LexicalDC,
+ DeclarationName &Name,
+ SourceLocation &Loc) {
+ // Import the context of this declaration.
+ DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return true;
+
+ LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return true;
+ }
+
+ // Import the name of this declaration.
+ Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return true;
+
+ // Import the location of this declaration.
+ Loc = Importer.Import(D->getLocation());
+ return false;
+}
+
+void ASTNodeImporter::ImportDefinitionIfNeeded(Decl *FromD, Decl *ToD) {
+ if (!FromD)
+ return;
+
+ if (!ToD) {
+ ToD = Importer.Import(FromD);
+ if (!ToD)
+ return;
+ }
+
+ if (RecordDecl *FromRecord = dyn_cast<RecordDecl>(FromD)) {
+ if (RecordDecl *ToRecord = cast_or_null<RecordDecl>(ToD)) {
+ if (FromRecord->getDefinition() && !ToRecord->getDefinition()) {
+ ImportDefinition(FromRecord, ToRecord);
+ }
+ }
+ return;
+ }
+
+ if (EnumDecl *FromEnum = dyn_cast<EnumDecl>(FromD)) {
+ if (EnumDecl *ToEnum = cast_or_null<EnumDecl>(ToD)) {
+ if (FromEnum->getDefinition() && !ToEnum->getDefinition()) {
+ ImportDefinition(FromEnum, ToEnum);
+ }
+ }
+ return;
+ }
+}
+
+void
+ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
+ DeclarationNameInfo& To) {
+ // NOTE: To.Name and To.Loc are already imported.
+ // We only have to import To.LocInfo.
+ switch (To.getName().getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ return;
+
+ case DeclarationName::CXXOperatorName: {
+ SourceRange Range = From.getCXXOperatorNameRange();
+ To.setCXXOperatorNameRange(Importer.Import(Range));
+ return;
+ }
+ case DeclarationName::CXXLiteralOperatorName: {
+ SourceLocation Loc = From.getCXXLiteralOperatorNameLoc();
+ To.setCXXLiteralOperatorNameLoc(Importer.Import(Loc));
+ return;
+ }
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName: {
+ TypeSourceInfo *FromTInfo = From.getNamedTypeInfo();
+ To.setNamedTypeInfo(Importer.Import(FromTInfo));
+ return;
+ }
+ }
+ llvm_unreachable("Unknown name kind.");
+}
+
+void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
+ if (Importer.isMinimalImport() && !ForceImport) {
+ Importer.ImportContext(FromDC);
+ return;
+ }
+
+ for (DeclContext::decl_iterator From = FromDC->decls_begin(),
+ FromEnd = FromDC->decls_end();
+ From != FromEnd;
+ ++From)
+ Importer.Import(*From);
+}
+
+bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition() || To->isBeingDefined()) {
+ if (Kind == IDK_Everything)
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ return false;
+ }
+
+ To->startDefinition();
+
+ // Add base classes.
+ if (CXXRecordDecl *ToCXX = dyn_cast<CXXRecordDecl>(To)) {
+ CXXRecordDecl *FromCXX = cast<CXXRecordDecl>(From);
+
+ struct CXXRecordDecl::DefinitionData &ToData = ToCXX->data();
+ struct CXXRecordDecl::DefinitionData &FromData = FromCXX->data();
+ ToData.UserDeclaredConstructor = FromData.UserDeclaredConstructor;
+ ToData.UserDeclaredCopyConstructor = FromData.UserDeclaredCopyConstructor;
+ ToData.UserDeclaredMoveConstructor = FromData.UserDeclaredMoveConstructor;
+ ToData.UserDeclaredCopyAssignment = FromData.UserDeclaredCopyAssignment;
+ ToData.UserDeclaredMoveAssignment = FromData.UserDeclaredMoveAssignment;
+ ToData.UserDeclaredDestructor = FromData.UserDeclaredDestructor;
+ ToData.Aggregate = FromData.Aggregate;
+ ToData.PlainOldData = FromData.PlainOldData;
+ ToData.Empty = FromData.Empty;
+ ToData.Polymorphic = FromData.Polymorphic;
+ ToData.Abstract = FromData.Abstract;
+ ToData.IsStandardLayout = FromData.IsStandardLayout;
+ ToData.HasNoNonEmptyBases = FromData.HasNoNonEmptyBases;
+ ToData.HasPrivateFields = FromData.HasPrivateFields;
+ ToData.HasProtectedFields = FromData.HasProtectedFields;
+ ToData.HasPublicFields = FromData.HasPublicFields;
+ ToData.HasMutableFields = FromData.HasMutableFields;
+ ToData.HasOnlyCMembers = FromData.HasOnlyCMembers;
+ ToData.HasTrivialDefaultConstructor = FromData.HasTrivialDefaultConstructor;
+ ToData.HasConstexprNonCopyMoveConstructor
+ = FromData.HasConstexprNonCopyMoveConstructor;
+ ToData.DefaultedDefaultConstructorIsConstexpr
+ = FromData.DefaultedDefaultConstructorIsConstexpr;
+ ToData.DefaultedCopyConstructorIsConstexpr
+ = FromData.DefaultedCopyConstructorIsConstexpr;
+ ToData.DefaultedMoveConstructorIsConstexpr
+ = FromData.DefaultedMoveConstructorIsConstexpr;
+ ToData.HasConstexprDefaultConstructor
+ = FromData.HasConstexprDefaultConstructor;
+ ToData.HasConstexprCopyConstructor = FromData.HasConstexprCopyConstructor;
+ ToData.HasConstexprMoveConstructor = FromData.HasConstexprMoveConstructor;
+ ToData.HasTrivialCopyConstructor = FromData.HasTrivialCopyConstructor;
+ ToData.HasTrivialMoveConstructor = FromData.HasTrivialMoveConstructor;
+ ToData.HasTrivialCopyAssignment = FromData.HasTrivialCopyAssignment;
+ ToData.HasTrivialMoveAssignment = FromData.HasTrivialMoveAssignment;
+ ToData.HasTrivialDestructor = FromData.HasTrivialDestructor;
+ ToData.HasIrrelevantDestructor = FromData.HasIrrelevantDestructor;
+ ToData.HasNonLiteralTypeFieldsOrBases
+ = FromData.HasNonLiteralTypeFieldsOrBases;
+ // ComputedVisibleConversions not imported.
+ ToData.UserProvidedDefaultConstructor
+ = FromData.UserProvidedDefaultConstructor;
+ ToData.DeclaredDefaultConstructor = FromData.DeclaredDefaultConstructor;
+ ToData.DeclaredCopyConstructor = FromData.DeclaredCopyConstructor;
+ ToData.DeclaredMoveConstructor = FromData.DeclaredMoveConstructor;
+ ToData.DeclaredCopyAssignment = FromData.DeclaredCopyAssignment;
+ ToData.DeclaredMoveAssignment = FromData.DeclaredMoveAssignment;
+ ToData.DeclaredDestructor = FromData.DeclaredDestructor;
+ ToData.FailedImplicitMoveConstructor
+ = FromData.FailedImplicitMoveConstructor;
+ ToData.FailedImplicitMoveAssignment = FromData.FailedImplicitMoveAssignment;
+ ToData.IsLambda = FromData.IsLambda;
+
+ SmallVector<CXXBaseSpecifier *, 4> Bases;
+ for (CXXRecordDecl::base_class_iterator
+ Base1 = FromCXX->bases_begin(),
+ FromBaseEnd = FromCXX->bases_end();
+ Base1 != FromBaseEnd;
+ ++Base1) {
+ QualType T = Importer.Import(Base1->getType());
+ if (T.isNull())
+ return true;
+
+ SourceLocation EllipsisLoc;
+ if (Base1->isPackExpansion())
+ EllipsisLoc = Importer.Import(Base1->getEllipsisLoc());
+
+ // Ensure that we have a definition for the base.
+ ImportDefinitionIfNeeded(Base1->getType()->getAsCXXRecordDecl());
+
+ Bases.push_back(
+ new (Importer.getToContext())
+ CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
+ Base1->isVirtual(),
+ Base1->isBaseOfClass(),
+ Base1->getAccessSpecifierAsWritten(),
+ Importer.Import(Base1->getTypeSourceInfo()),
+ EllipsisLoc));
+ }
+ if (!Bases.empty())
+ ToCXX->setBases(Bases.data(), Bases.size());
+ }
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ To->completeDefinition();
+ return false;
+}
+
+bool ASTNodeImporter::ImportDefinition(EnumDecl *From, EnumDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition() || To->isBeingDefined()) {
+ if (Kind == IDK_Everything)
+ ImportDeclContext(From, /*ForceImport=*/true);
+ return false;
+ }
+
+ To->startDefinition();
+
+ QualType T = Importer.Import(Importer.getFromContext().getTypeDeclType(From));
+ if (T.isNull())
+ return true;
+
+ QualType ToPromotionType = Importer.Import(From->getPromotionType());
+ if (ToPromotionType.isNull())
+ return true;
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From, /*ForceImport=*/true);
+
+ // FIXME: we might need to merge the number of positive or negative bits
+ // if the enumerator lists don't match.
+ To->completeDefinition(T, ToPromotionType,
+ From->getNumPositiveBits(),
+ From->getNumNegativeBits());
+ return false;
+}
+
+TemplateParameterList *ASTNodeImporter::ImportTemplateParameterList(
+ TemplateParameterList *Params) {
+ SmallVector<NamedDecl *, 4> ToParams;
+ ToParams.reserve(Params->size());
+ for (TemplateParameterList::iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ Decl *To = Importer.Import(*P);
+ if (!To)
+ return 0;
+
+ ToParams.push_back(cast<NamedDecl>(To));
+ }
+
+ return TemplateParameterList::Create(Importer.getToContext(),
+ Importer.Import(Params->getTemplateLoc()),
+ Importer.Import(Params->getLAngleLoc()),
+ ToParams.data(), ToParams.size(),
+ Importer.Import(Params->getRAngleLoc()));
+}
+
+TemplateArgument
+ASTNodeImporter::ImportTemplateArgument(const TemplateArgument &From) {
+ switch (From.getKind()) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
+
+ case TemplateArgument::Type: {
+ QualType ToType = Importer.Import(From.getAsType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(ToType);
+ }
+
+ case TemplateArgument::Integral: {
+ QualType ToType = Importer.Import(From.getIntegralType());
+ if (ToType.isNull())
+ return TemplateArgument();
+ return TemplateArgument(*From.getAsIntegral(), ToType);
+ }
+
+ case TemplateArgument::Declaration:
+ if (Decl *To = Importer.Import(From.getAsDecl()))
+ return TemplateArgument(To);
+ return TemplateArgument();
+
+ case TemplateArgument::Template: {
+ TemplateName ToTemplate = Importer.Import(From.getAsTemplate());
+ if (ToTemplate.isNull())
+ return TemplateArgument();
+
+ return TemplateArgument(ToTemplate);
+ }
+
+ case TemplateArgument::TemplateExpansion: {
+ TemplateName ToTemplate
+ = Importer.Import(From.getAsTemplateOrTemplatePattern());
+ if (ToTemplate.isNull())
+ return TemplateArgument();
+
+ return TemplateArgument(ToTemplate, From.getNumTemplateExpansions());
+ }
+
+ case TemplateArgument::Expression:
+ if (Expr *ToExpr = Importer.Import(From.getAsExpr()))
+ return TemplateArgument(ToExpr);
+ return TemplateArgument();
+
+ case TemplateArgument::Pack: {
+ SmallVector<TemplateArgument, 2> ToPack;
+ ToPack.reserve(From.pack_size());
+ if (ImportTemplateArguments(From.pack_begin(), From.pack_size(), ToPack))
+ return TemplateArgument();
+
+ TemplateArgument *ToArgs
+ = new (Importer.getToContext()) TemplateArgument[ToPack.size()];
+ std::copy(ToPack.begin(), ToPack.end(), ToArgs);
+ return TemplateArgument(ToArgs, ToPack.size());
+ }
+ }
+
+ llvm_unreachable("Invalid template argument kind");
+}
+
+bool ASTNodeImporter::ImportTemplateArguments(const TemplateArgument *FromArgs,
+ unsigned NumFromArgs,
+ SmallVectorImpl<TemplateArgument> &ToArgs) {
+ for (unsigned I = 0; I != NumFromArgs; ++I) {
+ TemplateArgument To = ImportTemplateArgument(FromArgs[I]);
+ if (To.isNull() && !FromArgs[I].isNull())
+ return true;
+
+ ToArgs.push_back(To);
+ }
+
+ return false;
+}
+
+bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
+ RecordDecl *ToRecord) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum);
+}
+
+bool ASTNodeImporter::IsStructuralMatch(ClassTemplateDecl *From,
+ ClassTemplateDecl *To) {
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
+ Importer.getToContext(),
+ Importer.getNonEquivalentDecls());
+ return Ctx.IsStructurallyEquivalent(From, To);
+}
+
+Decl *ASTNodeImporter::VisitDecl(Decl *D) {
+ Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
+ << D->getDeclKindName();
+ return 0;
+}
+
+Decl *ASTNodeImporter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ TranslationUnitDecl *ToD =
+ Importer.getToContext().getTranslationUnitDecl();
+
+ Importer.Imported(D, ToD);
+
+ return ToD;
+}
+
+Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
+ // Import the major distinguishing characteristics of this namespace.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ NamespaceDecl *MergeWithNamespace = 0;
+ if (!Name) {
+ // This is an anonymous namespace. Adopt an existing anonymous
+ // namespace if we can.
+ // FIXME: Not testable.
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ MergeWithNamespace = TU->getAnonymousNamespace();
+ else
+ MergeWithNamespace = cast<NamespaceDecl>(DC)->getAnonymousNamespace();
+ } else {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Namespace))
+ continue;
+
+ if (NamespaceDecl *FoundNS = dyn_cast<NamespaceDecl>(FoundDecls[I])) {
+ MergeWithNamespace = FoundNS;
+ ConflictingDecls.clear();
+ break;
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Namespace,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the "to" namespace, if needed.
+ NamespaceDecl *ToNamespace = MergeWithNamespace;
+ if (!ToNamespace) {
+ ToNamespace = NamespaceDecl::Create(Importer.getToContext(), DC,
+ D->isInline(),
+ Importer.Import(D->getLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/0);
+ ToNamespace->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToNamespace);
+
+ // If this is an anonymous namespace, register it as the anonymous
+ // namespace within its context.
+ if (!Name) {
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ TU->setAnonymousNamespace(ToNamespace);
+ else
+ cast<NamespaceDecl>(DC)->setAnonymousNamespace(ToNamespace);
+ }
+ }
+ Importer.Imported(D, ToNamespace);
+
+ ImportDeclContext(D);
+
+ return ToNamespace;
+}
+
+Decl *ASTNodeImporter::VisitTypedefNameDecl(TypedefNameDecl *D, bool IsAlias) {
+ // Import the major distinguishing characteristics of this typedef.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // If this typedef is not in block scope, determine whether we've
+ // seen a typedef with the same name (that we can merge with) or any
+ // other entity by that name (which name lookup could conflict with).
+ if (!DC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+ if (TypedefNameDecl *FoundTypedef =
+ dyn_cast<TypedefNameDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getUnderlyingType(),
+ FoundTypedef->getUnderlyingType()))
+ return Importer.Imported(D, FoundTypedef);
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return 0;
+ }
+ }
+
+ // Import the underlying type of this typedef;
+ QualType T = Importer.Import(D->getUnderlyingType());
+ if (T.isNull())
+ return 0;
+
+ // Create the new typedef node.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ SourceLocation StartL = Importer.Import(D->getLocStart());
+ TypedefNameDecl *ToTypedef;
+ if (IsAlias)
+ ToTypedef = TypeAliasDecl::Create(Importer.getToContext(), DC,
+ StartL, Loc,
+ Name.getAsIdentifierInfo(),
+ TInfo);
+ else
+ ToTypedef = TypedefDecl::Create(Importer.getToContext(), DC,
+ StartL, Loc,
+ Name.getAsIdentifierInfo(),
+ TInfo);
+
+ ToTypedef->setAccess(D->getAccess());
+ ToTypedef->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToTypedef);
+ LexicalDC->addDeclInternal(ToTypedef);
+
+ return ToTypedef;
+}
+
+Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
+ return VisitTypedefNameDecl(D, /*IsAlias=*/false);
+}
+
+Decl *ASTNodeImporter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ return VisitTypedefNameDecl(D, /*IsAlias=*/true);
+}
+
+Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
+ // Import the major distinguishing characteristics of this enum.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Figure out what enum name we're looking for.
+ unsigned IDNS = Decl::IDNS_Tag;
+ DeclarationName SearchName = Name;
+ if (!SearchName && D->getTypedefNameForAnonDecl()) {
+ SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ IDNS = Decl::IDNS_Ordinary;
+ } else if (Importer.getToContext().getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Ordinary;
+
+ // We may already have an enum of the same name; try to find and match it.
+ if (!DC->isFunctionOrMethod() && SearchName) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(SearchName, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
+ Found = Tag->getDecl();
+ }
+
+ if (EnumDecl *FoundEnum = dyn_cast<EnumDecl>(Found)) {
+ if (IsStructuralMatch(D, FoundEnum))
+ return Importer.Imported(D, FoundEnum);
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the enum declaration.
+ EnumDecl *D2 = EnumDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocStart()),
+ Loc, Name.getAsIdentifierInfo(), 0,
+ D->isScoped(), D->isScopedUsingClassTag(),
+ D->isFixed());
+ // Import the qualifier, if any.
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, D2);
+ LexicalDC->addDeclInternal(D2);
+
+ // Import the integer type.
+ QualType ToIntegerType = Importer.Import(D->getIntegerType());
+ if (ToIntegerType.isNull())
+ return 0;
+ D2->setIntegerType(ToIntegerType);
+
+ // Import the definition
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2))
+ return 0;
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ TagDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of this record.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Figure out what structure name we're looking for.
+ unsigned IDNS = Decl::IDNS_Tag;
+ DeclarationName SearchName = Name;
+ if (!SearchName && D->getTypedefNameForAnonDecl()) {
+ SearchName = Importer.Import(D->getTypedefNameForAnonDecl()->getDeclName());
+ IDNS = Decl::IDNS_Ordinary;
+ } else if (Importer.getToContext().getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Ordinary;
+
+ // We may already have a record of the same name; try to find and match it.
+ RecordDecl *AdoptDecl = 0;
+ if (!DC->isFunctionOrMethod() && SearchName) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(SearchName, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Found)) {
+ if (const TagType *Tag = Typedef->getUnderlyingType()->getAs<TagType>())
+ Found = Tag->getDecl();
+ }
+
+ if (RecordDecl *FoundRecord = dyn_cast<RecordDecl>(Found)) {
+ if (RecordDecl *FoundDef = FoundRecord->getDefinition()) {
+ if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) {
+ // The record types structurally match, or the "from" translation
+ // unit only had a forward declaration anyway; call it the same
+ // function.
+ // FIXME: For C++, we should also merge methods here.
+ return Importer.Imported(D, FoundDef);
+ }
+ } else {
+ // We have a forward declaration of this type, so adopt that forward
+ // declaration rather than building a new one.
+ AdoptDecl = FoundRecord;
+ continue;
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the record declaration.
+ RecordDecl *D2 = AdoptDecl;
+ SourceLocation StartLoc = Importer.Import(D->getLocStart());
+ if (!D2) {
+ if (isa<CXXRecordDecl>(D)) {
+ CXXRecordDecl *D2CXX = CXXRecordDecl::Create(Importer.getToContext(),
+ D->getTagKind(),
+ DC, StartLoc, Loc,
+ Name.getAsIdentifierInfo());
+ D2 = D2CXX;
+ D2->setAccess(D->getAccess());
+ } else {
+ D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(),
+ DC, StartLoc, Loc, Name.getAsIdentifierInfo());
+ }
+
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+ }
+
+ Importer.Imported(D, D2);
+
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2, IDK_Default))
+ return 0;
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ // Import the major distinguishing characteristics of this enumerator.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Determine whether there are any other declarations with the same name and
+ // in the same context.
+ if (!LexicalDC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return 0;
+ }
+ }
+
+ Expr *Init = Importer.Import(D->getInitExpr());
+ if (D->getInitExpr() && !Init)
+ return 0;
+
+ EnumConstantDecl *ToEnumerator
+ = EnumConstantDecl::Create(Importer.getToContext(), cast<EnumDecl>(DC), Loc,
+ Name.getAsIdentifierInfo(), T,
+ Init, D->getInitVal());
+ ToEnumerator->setAccess(D->getAccess());
+ ToEnumerator->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToEnumerator);
+ LexicalDC->addDeclInternal(ToEnumerator);
+ return ToEnumerator;
+}
+
+Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
+ // Import the major distinguishing characteristics of this function.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Try to find a function in our own ("to") context with the same name, same
+ // type, and in the same context as the function we're importing.
+ if (!LexicalDC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ if (FunctionDecl *FoundFunction = dyn_cast<FunctionDecl>(FoundDecls[I])) {
+ if (isExternalLinkage(FoundFunction->getLinkage()) &&
+ isExternalLinkage(D->getLinkage())) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundFunction->getType())) {
+ // FIXME: Actually try to merge the body and other attributes.
+ return Importer.Imported(D, FoundFunction);
+ }
+
+ // FIXME: Check for overloading more carefully, e.g., by boosting
+ // Sema::IsOverload out to the AST library.
+
+ // Function overloading is okay in C++.
+ if (Importer.getToContext().getLangOpts().CPlusPlus)
+ continue;
+
+ // Complain about inconsistent function types.
+ Importer.ToDiag(Loc, diag::err_odr_function_type_inconsistent)
+ << Name << D->getType() << FoundFunction->getType();
+ Importer.ToDiag(FoundFunction->getLocation(),
+ diag::note_odr_value_here)
+ << FoundFunction->getType();
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return 0;
+ }
+ }
+
+ DeclarationNameInfo NameInfo(Name, Loc);
+ // Import additional name location/type info.
+ ImportDeclarationNameLoc(D->getNameInfo(), NameInfo);
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Import the function parameters.
+ SmallVector<ParmVarDecl *, 8> Parameters;
+ for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
+ P != PEnd; ++P) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*P));
+ if (!ToP)
+ return 0;
+
+ Parameters.push_back(ToP);
+ }
+
+ // Create the imported function.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ FunctionDecl *ToFunction = 0;
+ if (CXXConstructorDecl *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
+ ToFunction = CXXConstructorDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo,
+ FromConstructor->isExplicit(),
+ D->isInlineSpecified(),
+ D->isImplicit(),
+ D->isConstexpr());
+ } else if (isa<CXXDestructorDecl>(D)) {
+ ToFunction = CXXDestructorDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo,
+ D->isInlineSpecified(),
+ D->isImplicit());
+ } else if (CXXConversionDecl *FromConversion
+ = dyn_cast<CXXConversionDecl>(D)) {
+ ToFunction = CXXConversionDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo,
+ D->isInlineSpecified(),
+ FromConversion->isExplicit(),
+ D->isConstexpr(),
+ Importer.Import(D->getLocEnd()));
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ ToFunction = CXXMethodDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo,
+ Method->isStatic(),
+ Method->getStorageClassAsWritten(),
+ Method->isInlineSpecified(),
+ D->isConstexpr(),
+ Importer.Import(D->getLocEnd()));
+ } else {
+ ToFunction = FunctionDecl::Create(Importer.getToContext(), DC,
+ D->getInnerLocStart(),
+ NameInfo, T, TInfo, D->getStorageClass(),
+ D->getStorageClassAsWritten(),
+ D->isInlineSpecified(),
+ D->hasWrittenPrototype(),
+ D->isConstexpr());
+ }
+
+ // Import the qualifier, if any.
+ ToFunction->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ ToFunction->setAccess(D->getAccess());
+ ToFunction->setLexicalDeclContext(LexicalDC);
+ ToFunction->setVirtualAsWritten(D->isVirtualAsWritten());
+ ToFunction->setTrivial(D->isTrivial());
+ ToFunction->setPure(D->isPure());
+ Importer.Imported(D, ToFunction);
+
+ // Set the parameters.
+ for (unsigned I = 0, N = Parameters.size(); I != N; ++I) {
+ Parameters[I]->setOwningFunction(ToFunction);
+ ToFunction->addDeclInternal(Parameters[I]);
+ }
+ ToFunction->setParams(Parameters);
+
+ // FIXME: Other bits to merge?
+
+ // Add this function to the lexical context.
+ LexicalDC->addDeclInternal(ToFunction);
+
+ return ToFunction;
+}
+
+Decl *ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ return VisitFunctionDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Determine whether we've already imported this field.
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (FieldDecl *FoundField = dyn_cast<FieldDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundField->getType())) {
+ Importer.Imported(D, FoundField);
+ return FoundField;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
+ << Name << D->getType() << FoundField->getType();
+ Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
+ << FoundField->getType();
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ Expr *BitWidth = Importer.Import(D->getBitWidth());
+ if (!BitWidth && D->getBitWidth())
+ return 0;
+
+ FieldDecl *ToField = FieldDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, BitWidth, D->isMutable(),
+ D->hasInClassInitializer());
+ ToField->setAccess(D->getAccess());
+ ToField->setLexicalDeclContext(LexicalDC);
+ if (ToField->hasInClassInitializer())
+ ToField->setInClassInitializer(D->getInClassInitializer());
+ Importer.Imported(D, ToField);
+ LexicalDC->addDeclInternal(ToField);
+ return ToField;
+}
+
+Decl *ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Determine whether we've already imported this field.
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (IndirectFieldDecl *FoundField
+ = dyn_cast<IndirectFieldDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundField->getType())) {
+ Importer.Imported(D, FoundField);
+ return FoundField;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_field_type_inconsistent)
+ << Name << D->getType() << FoundField->getType();
+ Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
+ << FoundField->getType();
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ NamedDecl **NamedChain =
+ new (Importer.getToContext())NamedDecl*[D->getChainingSize()];
+
+ unsigned i = 0;
+ for (IndirectFieldDecl::chain_iterator PI = D->chain_begin(),
+ PE = D->chain_end(); PI != PE; ++PI) {
+ Decl* D = Importer.Import(*PI);
+ if (!D)
+ return 0;
+ NamedChain[i++] = cast<NamedDecl>(D);
+ }
+
+ IndirectFieldDecl *ToIndirectField = IndirectFieldDecl::Create(
+ Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(), T,
+ NamedChain, D->getChainingSize());
+ ToIndirectField->setAccess(D->getAccess());
+ ToIndirectField->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToIndirectField);
+ LexicalDC->addDeclInternal(ToIndirectField);
+ return ToIndirectField;
+}
+
+Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ // Import the major distinguishing characteristics of an ivar.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Determine whether we've already imported this ivar
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(FoundDecls[I])) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundIvar->getType())) {
+ Importer.Imported(D, FoundIvar);
+ return FoundIvar;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_ivar_type_inconsistent)
+ << Name << D->getType() << FoundIvar->getType();
+ Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
+ << FoundIvar->getType();
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ Expr *BitWidth = Importer.Import(D->getBitWidth());
+ if (!BitWidth && D->getBitWidth())
+ return 0;
+
+ ObjCIvarDecl *ToIvar = ObjCIvarDecl::Create(Importer.getToContext(),
+ cast<ObjCContainerDecl>(DC),
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, D->getAccessControl(),
+ BitWidth, D->getSynthesize());
+ ToIvar->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToIvar);
+ LexicalDC->addDeclInternal(ToIvar);
+ return ToIvar;
+
+}
+
+Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
+ // Import the major distinguishing characteristics of a variable.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Try to find a variable in our own ("to") context with the same name and
+ // in the same context as the variable we're importing.
+ if (D->isFileVarDecl()) {
+ VarDecl *MergeWithVar = 0;
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
+ continue;
+
+ if (VarDecl *FoundVar = dyn_cast<VarDecl>(FoundDecls[I])) {
+ // We have found a variable that we may need to merge with. Check it.
+ if (isExternalLinkage(FoundVar->getLinkage()) &&
+ isExternalLinkage(D->getLinkage())) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundVar->getType())) {
+ MergeWithVar = FoundVar;
+ break;
+ }
+
+ const ArrayType *FoundArray
+ = Importer.getToContext().getAsArrayType(FoundVar->getType());
+ const ArrayType *TArray
+ = Importer.getToContext().getAsArrayType(D->getType());
+ if (FoundArray && TArray) {
+ if (isa<IncompleteArrayType>(FoundArray) &&
+ isa<ConstantArrayType>(TArray)) {
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ FoundVar->setType(T);
+ MergeWithVar = FoundVar;
+ break;
+ } else if (isa<IncompleteArrayType>(TArray) &&
+ isa<ConstantArrayType>(FoundArray)) {
+ MergeWithVar = FoundVar;
+ break;
+ }
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_variable_type_inconsistent)
+ << Name << D->getType() << FoundVar->getType();
+ Importer.ToDiag(FoundVar->getLocation(), diag::note_odr_value_here)
+ << FoundVar->getType();
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (MergeWithVar) {
+ // An equivalent variable with external linkage has been found. Link
+ // the two declarations, then merge them.
+ Importer.Imported(D, MergeWithVar);
+
+ if (VarDecl *DDef = D->getDefinition()) {
+ if (VarDecl *ExistingDef = MergeWithVar->getDefinition()) {
+ Importer.ToDiag(ExistingDef->getLocation(),
+ diag::err_odr_variable_multiple_def)
+ << Name;
+ Importer.FromDiag(DDef->getLocation(), diag::note_odr_defined_here);
+ } else {
+ Expr *Init = Importer.Import(DDef->getInit());
+ MergeWithVar->setInit(Init);
+ if (DDef->isInitKnownICE()) {
+ EvaluatedStmt *Eval = MergeWithVar->ensureEvaluatedStmt();
+ Eval->CheckedICE = true;
+ Eval->IsICE = DDef->isInitICE();
+ }
+ }
+ }
+
+ return MergeWithVar;
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, IDNS,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ if (!Name)
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Create the imported variable.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ VarDecl *ToVar = VarDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo,
+ D->getStorageClass(),
+ D->getStorageClassAsWritten());
+ ToVar->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+ ToVar->setAccess(D->getAccess());
+ ToVar->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToVar);
+ LexicalDC->addDeclInternal(ToVar);
+
+ // Merge the initializer.
+ // FIXME: Can we really import any initializer? Alternatively, we could force
+ // ourselves to import every declaration of a variable and then only use
+ // getInit() here.
+ ToVar->setInit(Importer.Import(const_cast<Expr *>(D->getAnyInitializer())));
+
+ // FIXME: Other bits to merge?
+
+ return ToVar;
+}
+
+Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ // Parameters are created in the translation unit's context, then moved
+ // into the function declaration's context afterward.
+ DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
+
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the parameter's type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Create the imported parameter.
+ ImplicitParamDecl *ToParm
+ = ImplicitParamDecl::Create(Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(),
+ T);
+ return Importer.Imported(D, ToParm);
+}
+
+Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
+ // Parameters are created in the translation unit's context, then moved
+ // into the function declaration's context afterward.
+ DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
+
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the parameter's type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Create the imported parameter.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ ParmVarDecl *ToParm = ParmVarDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getInnerLocStart()),
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, D->getStorageClass(),
+ D->getStorageClassAsWritten(),
+ /*FIXME: Default argument*/ 0);
+ ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg());
+ return Importer.Imported(D, ToParm);
+}
+
+Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ // Import the major distinguishing characteristics of a method.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCMethodDecl *FoundMethod = dyn_cast<ObjCMethodDecl>(FoundDecls[I])) {
+ if (FoundMethod->isInstanceMethod() != D->isInstanceMethod())
+ continue;
+
+ // Check return types.
+ if (!Importer.IsStructurallyEquivalent(D->getResultType(),
+ FoundMethod->getResultType())) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent)
+ << D->isInstanceMethod() << Name
+ << D->getResultType() << FoundMethod->getResultType();
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // Check the number of parameters.
+ if (D->param_size() != FoundMethod->param_size()) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_num_params_inconsistent)
+ << D->isInstanceMethod() << Name
+ << D->param_size() << FoundMethod->param_size();
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // Check parameter types.
+ for (ObjCMethodDecl::param_iterator P = D->param_begin(),
+ PEnd = D->param_end(), FoundP = FoundMethod->param_begin();
+ P != PEnd; ++P, ++FoundP) {
+ if (!Importer.IsStructurallyEquivalent((*P)->getType(),
+ (*FoundP)->getType())) {
+ Importer.FromDiag((*P)->getLocation(),
+ diag::err_odr_objc_method_param_type_inconsistent)
+ << D->isInstanceMethod() << Name
+ << (*P)->getType() << (*FoundP)->getType();
+ Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
+ << (*FoundP)->getType();
+ return 0;
+ }
+ }
+
+ // Check variadic/non-variadic.
+ // Check the number of parameters.
+ if (D->isVariadic() != FoundMethod->isVariadic()) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_variadic_inconsistent)
+ << D->isInstanceMethod() << Name;
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // FIXME: Any other bits we need to merge?
+ return Importer.Imported(D, FoundMethod);
+ }
+ }
+
+ // Import the result type.
+ QualType ResultTy = Importer.Import(D->getResultType());
+ if (ResultTy.isNull())
+ return 0;
+
+ TypeSourceInfo *ResultTInfo = Importer.Import(D->getResultTypeSourceInfo());
+
+ ObjCMethodDecl *ToMethod
+ = ObjCMethodDecl::Create(Importer.getToContext(),
+ Loc,
+ Importer.Import(D->getLocEnd()),
+ Name.getObjCSelector(),
+ ResultTy, ResultTInfo, DC,
+ D->isInstanceMethod(),
+ D->isVariadic(),
+ D->isSynthesized(),
+ D->isImplicit(),
+ D->isDefined(),
+ D->getImplementationControl(),
+ D->hasRelatedResultType());
+
+ // FIXME: When we decide to merge method definitions, we'll need to
+ // deal with implicit parameters.
+
+ // Import the parameters
+ SmallVector<ParmVarDecl *, 5> ToParams;
+ for (ObjCMethodDecl::param_iterator FromP = D->param_begin(),
+ FromPEnd = D->param_end();
+ FromP != FromPEnd;
+ ++FromP) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*FromP));
+ if (!ToP)
+ return 0;
+
+ ToParams.push_back(ToP);
+ }
+
+ // Set the parameters.
+ for (unsigned I = 0, N = ToParams.size(); I != N; ++I) {
+ ToParams[I]->setOwningFunction(ToMethod);
+ ToMethod->addDeclInternal(ToParams[I]);
+ }
+ SmallVector<SourceLocation, 12> SelLocs;
+ D->getSelectorLocs(SelLocs);
+ ToMethod->setMethodParams(Importer.getToContext(), ToParams, SelLocs);
+
+ ToMethod->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToMethod);
+ LexicalDC->addDeclInternal(ToMethod);
+ return ToMethod;
+}
+
+Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ // Import the major distinguishing characteristics of a category.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ ObjCInterfaceDecl *ToInterface
+ = cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
+ if (!ToInterface)
+ return 0;
+
+ // Determine if we've already encountered this category.
+ ObjCCategoryDecl *MergeWithCategory
+ = ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
+ ObjCCategoryDecl *ToCategory = MergeWithCategory;
+ if (!ToCategory) {
+ ToCategory = ObjCCategoryDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()),
+ Loc,
+ Importer.Import(D->getCategoryNameLoc()),
+ Name.getAsIdentifierInfo(),
+ ToInterface,
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc()));
+ ToCategory->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToCategory);
+ Importer.Imported(D, ToCategory);
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCCategoryDecl::protocol_loc_iterator FromProtoLoc
+ = D->protocol_loc_begin();
+ for (ObjCCategoryDecl::protocol_iterator FromProto = D->protocol_begin(),
+ FromProtoEnd = D->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return 0;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ ToCategory->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ } else {
+ Importer.Imported(D, ToCategory);
+ }
+
+ // Import all of the members of this category.
+ ImportDeclContext(D);
+
+ // If we have an implementation, import it as well.
+ if (D->getImplementation()) {
+ ObjCCategoryImplDecl *Impl
+ = cast_or_null<ObjCCategoryImplDecl>(
+ Importer.Import(D->getImplementation()));
+ if (!Impl)
+ return 0;
+
+ ToCategory->setImplementation(Impl);
+ }
+
+ return ToCategory;
+}
+
+bool ASTNodeImporter::ImportDefinition(ObjCProtocolDecl *From,
+ ObjCProtocolDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition()) {
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From);
+ return false;
+ }
+
+ // Start the protocol definition
+ To->startDefinition();
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCProtocolDecl::protocol_loc_iterator
+ FromProtoLoc = From->protocol_loc_begin();
+ for (ObjCProtocolDecl::protocol_iterator FromProto = From->protocol_begin(),
+ FromProtoEnd = From->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return true;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ To->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ if (shouldForceImportDeclContext(Kind)) {
+ // Import all of the members of this protocol.
+ ImportDeclContext(From, /*ForceImport=*/true);
+ }
+ return false;
+}
+
+Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ // If this protocol has a definition in the translation unit we're coming
+ // from, but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ObjCProtocolDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of a protocol.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ ObjCProtocolDecl *MergeWithProtocol = 0;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
+ continue;
+
+ if ((MergeWithProtocol = dyn_cast<ObjCProtocolDecl>(FoundDecls[I])))
+ break;
+ }
+
+ ObjCProtocolDecl *ToProto = MergeWithProtocol;
+ if (!ToProto) {
+ ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC,
+ Name.getAsIdentifierInfo(), Loc,
+ Importer.Import(D->getAtStartLoc()),
+ /*PrevDecl=*/0);
+ ToProto->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToProto);
+ }
+
+ Importer.Imported(D, ToProto);
+
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToProto))
+ return 0;
+
+ return ToProto;
+}
+
+bool ASTNodeImporter::ImportDefinition(ObjCInterfaceDecl *From,
+ ObjCInterfaceDecl *To,
+ ImportDefinitionKind Kind) {
+ if (To->getDefinition()) {
+ // Check consistency of superclass.
+ ObjCInterfaceDecl *FromSuper = From->getSuperClass();
+ if (FromSuper) {
+ FromSuper = cast_or_null<ObjCInterfaceDecl>(Importer.Import(FromSuper));
+ if (!FromSuper)
+ return true;
+ }
+
+ ObjCInterfaceDecl *ToSuper = To->getSuperClass();
+ if ((bool)FromSuper != (bool)ToSuper ||
+ (FromSuper && !declaresSameEntity(FromSuper, ToSuper))) {
+ Importer.ToDiag(To->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << To->getDeclName();
+ if (ToSuper)
+ Importer.ToDiag(To->getSuperClassLoc(), diag::note_odr_objc_superclass)
+ << To->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(To->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (From->getSuperClass())
+ Importer.FromDiag(From->getSuperClassLoc(),
+ diag::note_odr_objc_superclass)
+ << From->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(From->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ }
+
+ if (shouldForceImportDeclContext(Kind))
+ ImportDeclContext(From);
+ return false;
+ }
+
+ // Start the definition.
+ To->startDefinition();
+
+ // If this class has a superclass, import it.
+ if (From->getSuperClass()) {
+ ObjCInterfaceDecl *Super = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(From->getSuperClass()));
+ if (!Super)
+ return true;
+
+ To->setSuperClass(Super);
+ To->setSuperClassLoc(Importer.Import(From->getSuperClassLoc()));
+ }
+
+ // Import protocols
+ SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCInterfaceDecl::protocol_loc_iterator
+ FromProtoLoc = From->protocol_loc_begin();
+
+ for (ObjCInterfaceDecl::protocol_iterator FromProto = From->protocol_begin(),
+ FromProtoEnd = From->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return true;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ To->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ // Import categories. When the categories themselves are imported, they'll
+ // hook themselves into this interface.
+ for (ObjCCategoryDecl *FromCat = From->getCategoryList(); FromCat;
+ FromCat = FromCat->getNextClassCategory())
+ Importer.Import(FromCat);
+
+ // If we have an @implementation, import it as well.
+ if (From->getImplementation()) {
+ ObjCImplementationDecl *Impl = cast_or_null<ObjCImplementationDecl>(
+ Importer.Import(From->getImplementation()));
+ if (!Impl)
+ return true;
+
+ To->setImplementation(Impl);
+ }
+
+ if (shouldForceImportDeclContext(Kind)) {
+ // Import all of the members of this class.
+ ImportDeclContext(From, /*ForceImport=*/true);
+ }
+ return false;
+}
+
+Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ // If this class has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ ObjCInterfaceDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of an @interface.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Look for an existing interface with the same name.
+ ObjCInterfaceDecl *MergeWithIface = 0;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ if ((MergeWithIface = dyn_cast<ObjCInterfaceDecl>(FoundDecls[I])))
+ break;
+ }
+
+ // Create an interface declaration, if one does not already exist.
+ ObjCInterfaceDecl *ToIface = MergeWithIface;
+ if (!ToIface) {
+ ToIface = ObjCInterfaceDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getAtStartLoc()),
+ Name.getAsIdentifierInfo(),
+ /*PrevDecl=*/0,Loc,
+ D->isImplicitInterfaceDecl());
+ ToIface->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToIface);
+ }
+ Importer.Imported(D, ToIface);
+
+ if (D->isThisDeclarationADefinition() && ImportDefinition(D, ToIface))
+ return 0;
+
+ return ToIface;
+}
+
+Decl *ASTNodeImporter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ ObjCCategoryDecl *Category = cast_or_null<ObjCCategoryDecl>(
+ Importer.Import(D->getCategoryDecl()));
+ if (!Category)
+ return 0;
+
+ ObjCCategoryImplDecl *ToImpl = Category->getImplementation();
+ if (!ToImpl) {
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return 0;
+
+ SourceLocation CategoryNameLoc = Importer.Import(D->getCategoryNameLoc());
+ ToImpl = ObjCCategoryImplDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getIdentifier()),
+ Category->getClassInterface(),
+ Importer.Import(D->getLocation()),
+ Importer.Import(D->getAtStartLoc()),
+ CategoryNameLoc);
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+
+ ToImpl->setLexicalDeclContext(LexicalDC);
+ }
+
+ LexicalDC->addDeclInternal(ToImpl);
+ Category->setImplementation(ToImpl);
+ }
+
+ Importer.Imported(D, ToImpl);
+ ImportDeclContext(D);
+ return ToImpl;
+}
+
+Decl *ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ // Find the corresponding interface.
+ ObjCInterfaceDecl *Iface = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getClassInterface()));
+ if (!Iface)
+ return 0;
+
+ // Import the superclass, if any.
+ ObjCInterfaceDecl *Super = 0;
+ if (D->getSuperClass()) {
+ Super = cast_or_null<ObjCInterfaceDecl>(
+ Importer.Import(D->getSuperClass()));
+ if (!Super)
+ return 0;
+ }
+
+ ObjCImplementationDecl *Impl = Iface->getImplementation();
+ if (!Impl) {
+ // We haven't imported an implementation yet. Create a new @implementation
+ // now.
+ Impl = ObjCImplementationDecl::Create(Importer.getToContext(),
+ Importer.ImportContext(D->getDeclContext()),
+ Iface, Super,
+ Importer.Import(D->getLocation()),
+ Importer.Import(D->getAtStartLoc()),
+ Importer.Import(D->getIvarLBraceLoc()),
+ Importer.Import(D->getIvarRBraceLoc()));
+
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ DeclContext *LexicalDC
+ = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ Impl->setLexicalDeclContext(LexicalDC);
+ }
+
+ // Associate the implementation with the class it implements.
+ Iface->setImplementation(Impl);
+ Importer.Imported(D, Iface->getImplementation());
+ } else {
+ Importer.Imported(D, Iface->getImplementation());
+
+ // Verify that the existing @implementation has the same superclass.
+ if ((Super && !Impl->getSuperClass()) ||
+ (!Super && Impl->getSuperClass()) ||
+ (Super && Impl->getSuperClass() &&
+ !declaresSameEntity(Super->getCanonicalDecl(), Impl->getSuperClass()))) {
+ Importer.ToDiag(Impl->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << Iface->getDeclName();
+ // FIXME: It would be nice to have the location of the superclass
+ // below.
+ if (Impl->getSuperClass())
+ Importer.ToDiag(Impl->getLocation(),
+ diag::note_odr_objc_superclass)
+ << Impl->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(Impl->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (D->getSuperClass())
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_superclass)
+ << D->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ return 0;
+ }
+ }
+
+ // Import all of the members of this @implementation.
+ ImportDeclContext(D);
+
+ return Impl;
+}
+
+Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ // Import the major distinguishing characteristics of an @property.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Check whether we have already imported this property.
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (ObjCPropertyDecl *FoundProp
+ = dyn_cast<ObjCPropertyDecl>(FoundDecls[I])) {
+ // Check property types.
+ if (!Importer.IsStructurallyEquivalent(D->getType(),
+ FoundProp->getType())) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_property_type_inconsistent)
+ << Name << D->getType() << FoundProp->getType();
+ Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
+ << FoundProp->getType();
+ return 0;
+ }
+
+ // FIXME: Check property attributes, getters, setters, etc.?
+
+ // Consider these properties to be equivalent.
+ Importer.Imported(D, FoundProp);
+ return FoundProp;
+ }
+ }
+
+ // Import the type.
+ TypeSourceInfo *T = Importer.Import(D->getTypeSourceInfo());
+ if (!T)
+ return 0;
+
+ // Create the new property.
+ ObjCPropertyDecl *ToProperty
+ = ObjCPropertyDecl::Create(Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo(),
+ Importer.Import(D->getAtLoc()),
+ Importer.Import(D->getLParenLoc()),
+ T,
+ D->getPropertyImplementation());
+ Importer.Imported(D, ToProperty);
+ ToProperty->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(ToProperty);
+
+ ToProperty->setPropertyAttributes(D->getPropertyAttributes());
+ ToProperty->setPropertyAttributesAsWritten(
+ D->getPropertyAttributesAsWritten());
+ ToProperty->setGetterName(Importer.Import(D->getGetterName()));
+ ToProperty->setSetterName(Importer.Import(D->getSetterName()));
+ ToProperty->setGetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl())));
+ ToProperty->setSetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Importer.Import(D->getSetterMethodDecl())));
+ ToProperty->setPropertyIvarDecl(
+ cast_or_null<ObjCIvarDecl>(Importer.Import(D->getPropertyIvarDecl())));
+ return ToProperty;
+}
+
+Decl *ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ ObjCPropertyDecl *Property = cast_or_null<ObjCPropertyDecl>(
+ Importer.Import(D->getPropertyDecl()));
+ if (!Property)
+ return 0;
+
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return 0;
+
+ // Import the lexical declaration context.
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ }
+
+ ObjCImplDecl *InImpl = dyn_cast<ObjCImplDecl>(LexicalDC);
+ if (!InImpl)
+ return 0;
+
+ // Import the ivar (for an @synthesize).
+ ObjCIvarDecl *Ivar = 0;
+ if (D->getPropertyIvarDecl()) {
+ Ivar = cast_or_null<ObjCIvarDecl>(
+ Importer.Import(D->getPropertyIvarDecl()));
+ if (!Ivar)
+ return 0;
+ }
+
+ ObjCPropertyImplDecl *ToImpl
+ = InImpl->FindPropertyImplDecl(Property->getIdentifier());
+ if (!ToImpl) {
+ ToImpl = ObjCPropertyImplDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getLocStart()),
+ Importer.Import(D->getLocation()),
+ Property,
+ D->getPropertyImplementation(),
+ Ivar,
+ Importer.Import(D->getPropertyIvarDeclLoc()));
+ ToImpl->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToImpl);
+ LexicalDC->addDeclInternal(ToImpl);
+ } else {
+ // Check that we have the same kind of property implementation (@synthesize
+ // vs. @dynamic).
+ if (D->getPropertyImplementation() != ToImpl->getPropertyImplementation()) {
+ Importer.ToDiag(ToImpl->getLocation(),
+ diag::err_odr_objc_property_impl_kind_inconsistent)
+ << Property->getDeclName()
+ << (ToImpl->getPropertyImplementation()
+ == ObjCPropertyImplDecl::Dynamic);
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_property_impl_kind)
+ << D->getPropertyDecl()->getDeclName()
+ << (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
+ return 0;
+ }
+
+ // For @synthesize, check that we have the same
+ if (D->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize &&
+ Ivar != ToImpl->getPropertyIvarDecl()) {
+ Importer.ToDiag(ToImpl->getPropertyIvarDeclLoc(),
+ diag::err_odr_objc_synthesize_ivar_inconsistent)
+ << Property->getDeclName()
+ << ToImpl->getPropertyIvarDecl()->getDeclName()
+ << Ivar->getDeclName();
+ Importer.FromDiag(D->getPropertyIvarDeclLoc(),
+ diag::note_odr_objc_synthesize_ivar_here)
+ << D->getPropertyIvarDecl()->getDeclName();
+ return 0;
+ }
+
+ // Merge the existing implementation with the new implementation.
+ Importer.Imported(D, ToImpl);
+ }
+
+ return ToImpl;
+}
+
+Decl *ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ // For template arguments, we adopt the translation unit as our declaration
+ // context. This context will be fixed when the actual template declaration
+ // is created.
+
+ // FIXME: Import default argument.
+ return TemplateTypeParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getLocStart()),
+ Importer.Import(D->getLocation()),
+ D->getDepth(),
+ D->getIndex(),
+ Importer.Import(D->getIdentifier()),
+ D->wasDeclaredWithTypename(),
+ D->isParameterPack());
+}
+
+Decl *
+ASTNodeImporter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the type of this declaration.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Import type-source information.
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ if (D->getTypeSourceInfo() && !TInfo)
+ return 0;
+
+ // FIXME: Import default argument.
+
+ return NonTypeTemplateParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Importer.Import(D->getInnerLocStart()),
+ Loc, D->getDepth(), D->getPosition(),
+ Name.getAsIdentifierInfo(),
+ T, D->isParameterPack(), TInfo);
+}
+
+Decl *
+ASTNodeImporter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import template parameters.
+ TemplateParameterList *TemplateParams
+ = ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return 0;
+
+ // FIXME: Import default argument.
+
+ return TemplateTemplateParmDecl::Create(Importer.getToContext(),
+ Importer.getToContext().getTranslationUnitDecl(),
+ Loc, D->getDepth(), D->getPosition(),
+ D->isParameterPack(),
+ Name.getAsIdentifierInfo(),
+ TemplateParams);
+}
+
+Decl *ASTNodeImporter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ CXXRecordDecl *Definition
+ = cast_or_null<CXXRecordDecl>(D->getTemplatedDecl()->getDefinition());
+ if (Definition && Definition != D->getTemplatedDecl()) {
+ Decl *ImportedDef
+ = Importer.Import(Definition->getDescribedClassTemplate());
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ // Import the major distinguishing characteristics of this class template.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // We may already have a template of the same name; try to find and match it.
+ if (!DC->isFunctionOrMethod()) {
+ SmallVector<NamedDecl *, 4> ConflictingDecls;
+ llvm::SmallVector<NamedDecl *, 2> FoundDecls;
+ DC->localUncachedLookup(Name, FoundDecls);
+ for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
+ if (!FoundDecls[I]->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ Decl *Found = FoundDecls[I];
+ if (ClassTemplateDecl *FoundTemplate
+ = dyn_cast<ClassTemplateDecl>(Found)) {
+ if (IsStructuralMatch(D, FoundTemplate)) {
+ // The class templates structurally match; call it the same template.
+ // FIXME: We may be filling in a forward declaration here. Handle
+ // this case!
+ Importer.Imported(D->getTemplatedDecl(),
+ FoundTemplate->getTemplatedDecl());
+ return Importer.Imported(D, FoundTemplate);
+ }
+ }
+
+ ConflictingDecls.push_back(FoundDecls[I]);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+
+ if (!Name)
+ return 0;
+ }
+
+ CXXRecordDecl *DTemplated = D->getTemplatedDecl();
+
+ // Create the declaration that is being templated.
+ SourceLocation StartLoc = Importer.Import(DTemplated->getLocStart());
+ SourceLocation IdLoc = Importer.Import(DTemplated->getLocation());
+ CXXRecordDecl *D2Templated = CXXRecordDecl::Create(Importer.getToContext(),
+ DTemplated->getTagKind(),
+ DC, StartLoc, IdLoc,
+ Name.getAsIdentifierInfo());
+ D2Templated->setAccess(DTemplated->getAccess());
+ D2Templated->setQualifierInfo(Importer.Import(DTemplated->getQualifierLoc()));
+ D2Templated->setLexicalDeclContext(LexicalDC);
+
+ // Create the class template declaration itself.
+ TemplateParameterList *TemplateParams
+ = ImportTemplateParameterList(D->getTemplateParameters());
+ if (!TemplateParams)
+ return 0;
+
+ ClassTemplateDecl *D2 = ClassTemplateDecl::Create(Importer.getToContext(), DC,
+ Loc, Name, TemplateParams,
+ D2Templated,
+ /*PrevDecl=*/0);
+ D2Templated->setDescribedClassTemplate(D2);
+
+ D2->setAccess(D->getAccess());
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+
+ // Note the relationship between the class templates.
+ Importer.Imported(D, D2);
+ Importer.Imported(DTemplated, D2Templated);
+
+ if (DTemplated->isCompleteDefinition() &&
+ !D2Templated->isCompleteDefinition()) {
+ // FIXME: Import definition!
+ }
+
+ return D2;
+}
+
+Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ // If this record has a definition in the translation unit we're coming from,
+ // but this particular declaration is not that definition, import the
+ // definition and map to that.
+ TagDecl *Definition = D->getDefinition();
+ if (Definition && Definition != D) {
+ Decl *ImportedDef = Importer.Import(Definition);
+ if (!ImportedDef)
+ return 0;
+
+ return Importer.Imported(D, ImportedDef);
+ }
+
+ ClassTemplateDecl *ClassTemplate
+ = cast_or_null<ClassTemplateDecl>(Importer.Import(
+ D->getSpecializedTemplate()));
+ if (!ClassTemplate)
+ return 0;
+
+ // Import the context of this declaration.
+ DeclContext *DC = ClassTemplate->getDeclContext();
+ if (!DC)
+ return 0;
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ }
+
+ // Import the location of this declaration.
+ SourceLocation StartLoc = Importer.Import(D->getLocStart());
+ SourceLocation IdLoc = Importer.Import(D->getLocation());
+
+ // Import template arguments.
+ SmallVector<TemplateArgument, 2> TemplateArgs;
+ if (ImportTemplateArguments(D->getTemplateArgs().data(),
+ D->getTemplateArgs().size(),
+ TemplateArgs))
+ return 0;
+
+ // Try to find an existing specialization with these template arguments.
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *D2
+ = ClassTemplate->findSpecialization(TemplateArgs.data(),
+ TemplateArgs.size(), InsertPos);
+ if (D2) {
+ // We already have a class template specialization with these template
+ // arguments.
+
+ // FIXME: Check for specialization vs. instantiation errors.
+
+ if (RecordDecl *FoundDef = D2->getDefinition()) {
+ if (!D->isCompleteDefinition() || IsStructuralMatch(D, FoundDef)) {
+ // The record types structurally match, or the "from" translation
+ // unit only had a forward declaration anyway; call it the same
+ // function.
+ return Importer.Imported(D, FoundDef);
+ }
+ }
+ } else {
+ // Create a new specialization.
+ D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(),
+ D->getTagKind(), DC,
+ StartLoc, IdLoc,
+ ClassTemplate,
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ /*PrevDecl=*/0);
+ D2->setSpecializationKind(D->getSpecializationKind());
+
+ // Add this specialization to the class template.
+ ClassTemplate->AddSpecialization(D2, InsertPos);
+
+ // Import the qualifier, if any.
+ D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
+
+ // Add the specialization to this context.
+ D2->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDeclInternal(D2);
+ }
+ Importer.Imported(D, D2);
+
+ if (D->isCompleteDefinition() && ImportDefinition(D, D2))
+ return 0;
+
+ return D2;
+}
+
+//----------------------------------------------------------------------------
+// Import Statements
+//----------------------------------------------------------------------------
+
+Stmt *ASTNodeImporter::VisitStmt(Stmt *S) {
+ Importer.FromDiag(S->getLocStart(), diag::err_unsupported_ast_node)
+ << S->getStmtClassName();
+ return 0;
+}
+
+//----------------------------------------------------------------------------
+// Import Expressions
+//----------------------------------------------------------------------------
+Expr *ASTNodeImporter::VisitExpr(Expr *E) {
+ Importer.FromDiag(E->getLocStart(), diag::err_unsupported_ast_node)
+ << E->getStmtClassName();
+ return 0;
+}
+
+Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
+ ValueDecl *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
+ if (!ToD)
+ return 0;
+
+ NamedDecl *FoundD = 0;
+ if (E->getDecl() != E->getFoundDecl()) {
+ FoundD = cast_or_null<NamedDecl>(Importer.Import(E->getFoundDecl()));
+ if (!FoundD)
+ return 0;
+ }
+
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ DeclRefExpr *DRE = DeclRefExpr::Create(Importer.getToContext(),
+ Importer.Import(E->getQualifierLoc()),
+ Importer.Import(E->getTemplateKeywordLoc()),
+ ToD,
+ E->refersToEnclosingLocal(),
+ Importer.Import(E->getLocation()),
+ T, E->getValueKind(),
+ FoundD,
+ /*FIXME:TemplateArgs=*/0);
+ if (E->hadMultipleCandidates())
+ DRE->setHadMultipleCandidates(true);
+ return DRE;
+}
+
+Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ return IntegerLiteral::Create(Importer.getToContext(),
+ E->getValue(), T,
+ Importer.Import(E->getLocation()));
+}
+
+Expr *ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ return new (Importer.getToContext()) CharacterLiteral(E->getValue(),
+ E->getKind(), T,
+ Importer.Import(E->getLocation()));
+}
+
+Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext())
+ ParenExpr(Importer.Import(E->getLParen()),
+ Importer.Import(E->getRParen()),
+ SubExpr);
+}
+
+Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext()) UnaryOperator(SubExpr, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ Importer.Import(E->getOperatorLoc()));
+}
+
+Expr *ASTNodeImporter::VisitUnaryExprOrTypeTraitExpr(
+ UnaryExprOrTypeTraitExpr *E) {
+ QualType ResultType = Importer.Import(E->getType());
+
+ if (E->isArgumentType()) {
+ TypeSourceInfo *TInfo = Importer.Import(E->getArgumentTypeInfo());
+ if (!TInfo)
+ return 0;
+
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
+ TInfo, ResultType,
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getRParenLoc()));
+ }
+
+ Expr *SubExpr = Importer.Import(E->getArgumentExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext()) UnaryExprOrTypeTraitExpr(E->getKind(),
+ SubExpr, ResultType,
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getRParenLoc()));
+}
+
+Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *LHS = Importer.Import(E->getLHS());
+ if (!LHS)
+ return 0;
+
+ Expr *RHS = Importer.Import(E->getRHS());
+ if (!RHS)
+ return 0;
+
+ return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ Importer.Import(E->getOperatorLoc()));
+}
+
+Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ QualType CompLHSType = Importer.Import(E->getComputationLHSType());
+ if (CompLHSType.isNull())
+ return 0;
+
+ QualType CompResultType = Importer.Import(E->getComputationResultType());
+ if (CompResultType.isNull())
+ return 0;
+
+ Expr *LHS = Importer.Import(E->getLHS());
+ if (!LHS)
+ return 0;
+
+ Expr *RHS = Importer.Import(E->getRHS());
+ if (!RHS)
+ return 0;
+
+ return new (Importer.getToContext())
+ CompoundAssignOperator(LHS, RHS, E->getOpcode(),
+ T, E->getValueKind(),
+ E->getObjectKind(),
+ CompLHSType, CompResultType,
+ Importer.Import(E->getOperatorLoc()));
+}
+
+static bool ImportCastPath(CastExpr *E, CXXCastPath &Path) {
+ if (E->path_empty()) return false;
+
+ // TODO: import cast paths
+ return true;
+}
+
+Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ CXXCastPath BasePath;
+ if (ImportCastPath(E, BasePath))
+ return 0;
+
+ return ImplicitCastExpr::Create(Importer.getToContext(), T, E->getCastKind(),
+ SubExpr, &BasePath, E->getValueKind());
+}
+
+Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ TypeSourceInfo *TInfo = Importer.Import(E->getTypeInfoAsWritten());
+ if (!TInfo && E->getTypeInfoAsWritten())
+ return 0;
+
+ CXXCastPath BasePath;
+ if (ImportCastPath(E, BasePath))
+ return 0;
+
+ return CStyleCastExpr::Create(Importer.getToContext(), T,
+ E->getValueKind(), E->getCastKind(),
+ SubExpr, &BasePath, TInfo,
+ Importer.Import(E->getLParenLoc()),
+ Importer.Import(E->getRParenLoc()));
+}
+
+ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
+ ASTContext &FromContext, FileManager &FromFileManager,
+ bool MinimalImport)
+ : ToContext(ToContext), FromContext(FromContext),
+ ToFileManager(ToFileManager), FromFileManager(FromFileManager),
+ Minimal(MinimalImport)
+{
+ ImportedDecls[FromContext.getTranslationUnitDecl()]
+ = ToContext.getTranslationUnitDecl();
+}
+
+ASTImporter::~ASTImporter() { }
+
+QualType ASTImporter::Import(QualType FromT) {
+ if (FromT.isNull())
+ return QualType();
+
+ const Type *fromTy = FromT.getTypePtr();
+
+ // Check whether we've already imported this type.
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos
+ = ImportedTypes.find(fromTy);
+ if (Pos != ImportedTypes.end())
+ return ToContext.getQualifiedType(Pos->second, FromT.getLocalQualifiers());
+
+ // Import the type
+ ASTNodeImporter Importer(*this);
+ QualType ToT = Importer.Visit(fromTy);
+ if (ToT.isNull())
+ return ToT;
+
+ // Record the imported type.
+ ImportedTypes[fromTy] = ToT.getTypePtr();
+
+ return ToContext.getQualifiedType(ToT, FromT.getLocalQualifiers());
+}
+
+TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
+ if (!FromTSI)
+ return FromTSI;
+
+ // FIXME: For now we just create a "trivial" type source info based
+ // on the type and a single location. Implement a real version of this.
+ QualType T = Import(FromTSI->getType());
+ if (T.isNull())
+ return 0;
+
+ return ToContext.getTrivialTypeSourceInfo(T,
+ FromTSI->getTypeLoc().getLocStart());
+}
+
+Decl *ASTImporter::Import(Decl *FromD) {
+ if (!FromD)
+ return 0;
+
+ ASTNodeImporter Importer(*this);
+
+ // Check whether we've already imported this declaration.
+ llvm::DenseMap<Decl *, Decl *>::iterator Pos = ImportedDecls.find(FromD);
+ if (Pos != ImportedDecls.end()) {
+ Decl *ToD = Pos->second;
+ Importer.ImportDefinitionIfNeeded(FromD, ToD);
+ return ToD;
+ }
+
+ // Import the type
+ Decl *ToD = Importer.Visit(FromD);
+ if (!ToD)
+ return 0;
+
+ // Record the imported declaration.
+ ImportedDecls[FromD] = ToD;
+
+ if (TagDecl *FromTag = dyn_cast<TagDecl>(FromD)) {
+ // Keep track of anonymous tags that have an associated typedef.
+ if (FromTag->getTypedefNameForAnonDecl())
+ AnonTagsWithPendingTypedefs.push_back(FromTag);
+ } else if (TypedefNameDecl *FromTypedef = dyn_cast<TypedefNameDecl>(FromD)) {
+ // When we've finished transforming a typedef, see whether it was the
+ // typedef for an anonymous tag.
+ for (SmallVector<TagDecl *, 4>::iterator
+ FromTag = AnonTagsWithPendingTypedefs.begin(),
+ FromTagEnd = AnonTagsWithPendingTypedefs.end();
+ FromTag != FromTagEnd; ++FromTag) {
+ if ((*FromTag)->getTypedefNameForAnonDecl() == FromTypedef) {
+ if (TagDecl *ToTag = cast_or_null<TagDecl>(Import(*FromTag))) {
+ // We found the typedef for an anonymous tag; link them.
+ ToTag->setTypedefNameForAnonDecl(cast<TypedefNameDecl>(ToD));
+ AnonTagsWithPendingTypedefs.erase(FromTag);
+ break;
+ }
+ }
+ }
+ }
+
+ return ToD;
+}
+
+DeclContext *ASTImporter::ImportContext(DeclContext *FromDC) {
+ if (!FromDC)
+ return FromDC;
+
+ DeclContext *ToDC = cast_or_null<DeclContext>(Import(cast<Decl>(FromDC)));
+ if (!ToDC)
+ return 0;
+
+ // When we're using a record/enum/Objective-C class/protocol as a context, we
+ // need it to have a definition.
+ if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(ToDC)) {
+ RecordDecl *FromRecord = cast<RecordDecl>(FromDC);
+ if (ToRecord->isCompleteDefinition()) {
+ // Do nothing.
+ } else if (FromRecord->isCompleteDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromRecord, ToRecord,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToRecord);
+ }
+ } else if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(ToDC)) {
+ EnumDecl *FromEnum = cast<EnumDecl>(FromDC);
+ if (ToEnum->isCompleteDefinition()) {
+ // Do nothing.
+ } else if (FromEnum->isCompleteDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromEnum, ToEnum,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToEnum);
+ }
+ } else if (ObjCInterfaceDecl *ToClass = dyn_cast<ObjCInterfaceDecl>(ToDC)) {
+ ObjCInterfaceDecl *FromClass = cast<ObjCInterfaceDecl>(FromDC);
+ if (ToClass->getDefinition()) {
+ // Do nothing.
+ } else if (ObjCInterfaceDecl *FromDef = FromClass->getDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromDef, ToClass,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToClass);
+ }
+ } else if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(ToDC)) {
+ ObjCProtocolDecl *FromProto = cast<ObjCProtocolDecl>(FromDC);
+ if (ToProto->getDefinition()) {
+ // Do nothing.
+ } else if (ObjCProtocolDecl *FromDef = FromProto->getDefinition()) {
+ ASTNodeImporter(*this).ImportDefinition(FromDef, ToProto,
+ ASTNodeImporter::IDK_Basic);
+ } else {
+ CompleteDecl(ToProto);
+ }
+ }
+
+ return ToDC;
+}
+
+Expr *ASTImporter::Import(Expr *FromE) {
+ if (!FromE)
+ return 0;
+
+ return cast_or_null<Expr>(Import(cast<Stmt>(FromE)));
+}
+
+Stmt *ASTImporter::Import(Stmt *FromS) {
+ if (!FromS)
+ return 0;
+
+ // Check whether we've already imported this declaration.
+ llvm::DenseMap<Stmt *, Stmt *>::iterator Pos = ImportedStmts.find(FromS);
+ if (Pos != ImportedStmts.end())
+ return Pos->second;
+
+ // Import the type
+ ASTNodeImporter Importer(*this);
+ Stmt *ToS = Importer.Visit(FromS);
+ if (!ToS)
+ return 0;
+
+ // Record the imported declaration.
+ ImportedStmts[FromS] = ToS;
+ return ToS;
+}
+
+NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
+ if (!FromNNS)
+ return 0;
+
+ NestedNameSpecifier *prefix = Import(FromNNS->getPrefix());
+
+ switch (FromNNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (IdentifierInfo *II = Import(FromNNS->getAsIdentifier())) {
+ return NestedNameSpecifier::Create(ToContext, prefix, II);
+ }
+ return 0;
+
+ case NestedNameSpecifier::Namespace:
+ if (NamespaceDecl *NS =
+ cast<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
+ return NestedNameSpecifier::Create(ToContext, prefix, NS);
+ }
+ return 0;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ if (NamespaceAliasDecl *NSAD =
+ cast<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) {
+ return NestedNameSpecifier::Create(ToContext, prefix, NSAD);
+ }
+ return 0;
+
+ case NestedNameSpecifier::Global:
+ return NestedNameSpecifier::GlobalSpecifier(ToContext);
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ QualType T = Import(QualType(FromNNS->getAsType(), 0u));
+ if (!T.isNull()) {
+ bool bTemplate = FromNNS->getKind() ==
+ NestedNameSpecifier::TypeSpecWithTemplate;
+ return NestedNameSpecifier::Create(ToContext, prefix,
+ bTemplate, T.getTypePtr());
+ }
+ }
+ return 0;
+ }
+
+ llvm_unreachable("Invalid nested name specifier kind");
+}
+
+NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
+ // FIXME: Implement!
+ return NestedNameSpecifierLoc();
+}
+
+TemplateName ASTImporter::Import(TemplateName From) {
+ switch (From.getKind()) {
+ case TemplateName::Template:
+ if (TemplateDecl *ToTemplate
+ = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ return TemplateName(ToTemplate);
+
+ return TemplateName();
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *FromStorage = From.getAsOverloadedTemplate();
+ UnresolvedSet<2> ToTemplates;
+ for (OverloadedTemplateStorage::iterator I = FromStorage->begin(),
+ E = FromStorage->end();
+ I != E; ++I) {
+ if (NamedDecl *To = cast_or_null<NamedDecl>(Import(*I)))
+ ToTemplates.addDecl(To);
+ else
+ return TemplateName();
+ }
+ return ToContext.getOverloadedTemplateName(ToTemplates.begin(),
+ ToTemplates.end());
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QTN = From.getAsQualifiedTemplateName();
+ NestedNameSpecifier *Qualifier = Import(QTN->getQualifier());
+ if (!Qualifier)
+ return TemplateName();
+
+ if (TemplateDecl *ToTemplate
+ = cast_or_null<TemplateDecl>(Import(From.getAsTemplateDecl())))
+ return ToContext.getQualifiedTemplateName(Qualifier,
+ QTN->hasTemplateKeyword(),
+ ToTemplate);
+
+ return TemplateName();
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DTN = From.getAsDependentTemplateName();
+ NestedNameSpecifier *Qualifier = Import(DTN->getQualifier());
+ if (!Qualifier)
+ return TemplateName();
+
+ if (DTN->isIdentifier()) {
+ return ToContext.getDependentTemplateName(Qualifier,
+ Import(DTN->getIdentifier()));
+ }
+
+ return ToContext.getDependentTemplateName(Qualifier, DTN->getOperator());
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = From.getAsSubstTemplateTemplateParm();
+ TemplateTemplateParmDecl *param
+ = cast_or_null<TemplateTemplateParmDecl>(Import(subst->getParameter()));
+ if (!param)
+ return TemplateName();
+
+ TemplateName replacement = Import(subst->getReplacement());
+ if (replacement.isNull()) return TemplateName();
+
+ return ToContext.getSubstTemplateTemplateParm(param, replacement);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *SubstPack
+ = From.getAsSubstTemplateTemplateParmPack();
+ TemplateTemplateParmDecl *Param
+ = cast_or_null<TemplateTemplateParmDecl>(
+ Import(SubstPack->getParameterPack()));
+ if (!Param)
+ return TemplateName();
+
+ ASTNodeImporter Importer(*this);
+ TemplateArgument ArgPack
+ = Importer.ImportTemplateArgument(SubstPack->getArgumentPack());
+ if (ArgPack.isNull())
+ return TemplateName();
+
+ return ToContext.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+ }
+
+ llvm_unreachable("Invalid template name kind");
+}
+
+SourceLocation ASTImporter::Import(SourceLocation FromLoc) {
+ if (FromLoc.isInvalid())
+ return SourceLocation();
+
+ SourceManager &FromSM = FromContext.getSourceManager();
+
+ // For now, map everything down to its spelling location, so that we
+ // don't have to import macro expansions.
+ // FIXME: Import macro expansions!
+ FromLoc = FromSM.getSpellingLoc(FromLoc);
+ std::pair<FileID, unsigned> Decomposed = FromSM.getDecomposedLoc(FromLoc);
+ SourceManager &ToSM = ToContext.getSourceManager();
+ return ToSM.getLocForStartOfFile(Import(Decomposed.first))
+ .getLocWithOffset(Decomposed.second);
+}
+
+SourceRange ASTImporter::Import(SourceRange FromRange) {
+ return SourceRange(Import(FromRange.getBegin()), Import(FromRange.getEnd()));
+}
+
+FileID ASTImporter::Import(FileID FromID) {
+ llvm::DenseMap<FileID, FileID>::iterator Pos
+ = ImportedFileIDs.find(FromID);
+ if (Pos != ImportedFileIDs.end())
+ return Pos->second;
+
+ SourceManager &FromSM = FromContext.getSourceManager();
+ SourceManager &ToSM = ToContext.getSourceManager();
+ const SrcMgr::SLocEntry &FromSLoc = FromSM.getSLocEntry(FromID);
+ assert(FromSLoc.isFile() && "Cannot handle macro expansions yet");
+
+ // Include location of this file.
+ SourceLocation ToIncludeLoc = Import(FromSLoc.getFile().getIncludeLoc());
+
+ // Map the FileID for to the "to" source manager.
+ FileID ToID;
+ const SrcMgr::ContentCache *Cache = FromSLoc.getFile().getContentCache();
+ if (Cache->OrigEntry) {
+ // FIXME: We probably want to use getVirtualFile(), so we don't hit the
+ // disk again
+ // FIXME: We definitely want to re-use the existing MemoryBuffer, rather
+ // than mmap the files several times.
+ const FileEntry *Entry = ToFileManager.getFile(Cache->OrigEntry->getName());
+ ToID = ToSM.createFileID(Entry, ToIncludeLoc,
+ FromSLoc.getFile().getFileCharacteristic());
+ } else {
+ // FIXME: We want to re-use the existing MemoryBuffer!
+ const llvm::MemoryBuffer *
+ FromBuf = Cache->getBuffer(FromContext.getDiagnostics(), FromSM);
+ llvm::MemoryBuffer *ToBuf
+ = llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
+ FromBuf->getBufferIdentifier());
+ ToID = ToSM.createFileIDForMemBuffer(ToBuf);
+ }
+
+
+ ImportedFileIDs[FromID] = ToID;
+ return ToID;
+}
+
+void ASTImporter::ImportDefinition(Decl *From) {
+ Decl *To = Import(From);
+ if (!To)
+ return;
+
+ if (DeclContext *FromDC = cast<DeclContext>(From)) {
+ ASTNodeImporter Importer(*this);
+
+ if (RecordDecl *ToRecord = dyn_cast<RecordDecl>(To)) {
+ if (!ToRecord->getDefinition()) {
+ Importer.ImportDefinition(cast<RecordDecl>(FromDC), ToRecord,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (EnumDecl *ToEnum = dyn_cast<EnumDecl>(To)) {
+ if (!ToEnum->getDefinition()) {
+ Importer.ImportDefinition(cast<EnumDecl>(FromDC), ToEnum,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (ObjCInterfaceDecl *ToIFace = dyn_cast<ObjCInterfaceDecl>(To)) {
+ if (!ToIFace->getDefinition()) {
+ Importer.ImportDefinition(cast<ObjCInterfaceDecl>(FromDC), ToIFace,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ if (ObjCProtocolDecl *ToProto = dyn_cast<ObjCProtocolDecl>(To)) {
+ if (!ToProto->getDefinition()) {
+ Importer.ImportDefinition(cast<ObjCProtocolDecl>(FromDC), ToProto,
+ ASTNodeImporter::IDK_Everything);
+ return;
+ }
+ }
+
+ Importer.ImportDeclContext(FromDC, true);
+ }
+}
+
+DeclarationName ASTImporter::Import(DeclarationName FromName) {
+ if (!FromName)
+ return DeclarationName();
+
+ switch (FromName.getNameKind()) {
+ case DeclarationName::Identifier:
+ return Import(FromName.getAsIdentifierInfo());
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ return Import(FromName.getObjCSelector());
+
+ case DeclarationName::CXXConstructorName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXConstructorName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXDestructorName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXDestructorName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXConversionFunctionName: {
+ QualType T = Import(FromName.getCXXNameType());
+ if (T.isNull())
+ return DeclarationName();
+
+ return ToContext.DeclarationNames.getCXXConversionFunctionName(
+ ToContext.getCanonicalType(T));
+ }
+
+ case DeclarationName::CXXOperatorName:
+ return ToContext.DeclarationNames.getCXXOperatorName(
+ FromName.getCXXOverloadedOperator());
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return ToContext.DeclarationNames.getCXXLiteralOperatorName(
+ Import(FromName.getCXXLiteralIdentifier()));
+
+ case DeclarationName::CXXUsingDirective:
+ // FIXME: STATICS!
+ return DeclarationName::getUsingDirectiveName();
+ }
+
+ llvm_unreachable("Invalid DeclarationName Kind!");
+}
+
+IdentifierInfo *ASTImporter::Import(const IdentifierInfo *FromId) {
+ if (!FromId)
+ return 0;
+
+ return &ToContext.Idents.get(FromId->getName());
+}
+
+Selector ASTImporter::Import(Selector FromSel) {
+ if (FromSel.isNull())
+ return Selector();
+
+ SmallVector<IdentifierInfo *, 4> Idents;
+ Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0)));
+ for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I)
+ Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I)));
+ return ToContext.Selectors.getSelector(FromSel.getNumArgs(), Idents.data());
+}
+
+DeclarationName ASTImporter::HandleNameConflict(DeclarationName Name,
+ DeclContext *DC,
+ unsigned IDNS,
+ NamedDecl **Decls,
+ unsigned NumDecls) {
+ return Name;
+}
+
+DiagnosticBuilder ASTImporter::ToDiag(SourceLocation Loc, unsigned DiagID) {
+ return ToContext.getDiagnostics().Report(Loc, DiagID);
+}
+
+DiagnosticBuilder ASTImporter::FromDiag(SourceLocation Loc, unsigned DiagID) {
+ return FromContext.getDiagnostics().Report(Loc, DiagID);
+}
+
+void ASTImporter::CompleteDecl (Decl *D) {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (!ID->getDefinition())
+ ID->startDefinition();
+ }
+ else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (!PD->getDefinition())
+ PD->startDefinition();
+ }
+ else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (!TD->getDefinition() && !TD->isBeingDefined()) {
+ TD->startDefinition();
+ TD->setCompleteDefinition(true);
+ }
+ }
+ else {
+ assert (0 && "CompleteDecl called on a Decl that can't be completed");
+ }
+}
+
+Decl *ASTImporter::Imported(Decl *From, Decl *To) {
+ ImportedDecls[From] = To;
+ return To;
+}
+
+bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To) {
+ llvm::DenseMap<const Type *, const Type *>::iterator Pos
+ = ImportedTypes.find(From.getTypePtr());
+ if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To))
+ return true;
+
+ StructuralEquivalenceContext Ctx(FromContext, ToContext, NonEquivalentDecls);
+ return Ctx.IsStructurallyEquivalent(From, To);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
new file mode 100644
index 0000000..cffcc65
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/AttrImpl.cpp
@@ -0,0 +1,26 @@
+//===--- AttrImpl.cpp - Classes for representing attributes -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains out-of-line virtual methods for Attr classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/Expr.h"
+using namespace clang;
+
+Attr::~Attr() { }
+
+void InheritableAttr::anchor() { }
+
+void InheritableParamAttr::anchor() { }
+
+#include "clang/AST/AttrImpl.inc"
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXABI.h b/contrib/llvm/tools/clang/lib/AST/CXXABI.h
new file mode 100644
index 0000000..943c43e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CXXABI.h
@@ -0,0 +1,48 @@
+//===----- CXXABI.h - Interface to C++ ABIs ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ AST support. Concrete
+// subclasses of this implement AST support for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_AST_CXXABI_H
+#define LLVM_CLANG_AST_CXXABI_H
+
+#include "clang/AST/Type.h"
+
+namespace clang {
+
+class ASTContext;
+class MemberPointerType;
+
+/// Implements C++ ABI-specific semantic analysis functions.
+class CXXABI {
+public:
+ virtual ~CXXABI();
+
+ /// Returns the size of a member pointer in multiples of the target
+ /// pointer size.
+ virtual unsigned getMemberPointerSize(const MemberPointerType *MPT) const = 0;
+
+ /// Returns the default calling convention for C++ methods.
+ virtual CallingConv getDefaultMethodCallConv() const = 0;
+
+ // Returns whether the given class is nearly empty, with just virtual pointers
+ // and no data except possibly virtual bases.
+ virtual bool isNearlyEmpty(const CXXRecordDecl *RD) const = 0;
+};
+
+/// Creates an instance of a C++ ABI class.
+CXXABI *CreateARMCXXABI(ASTContext &Ctx);
+CXXABI *CreateItaniumCXXABI(ASTContext &Ctx);
+CXXABI *CreateMicrosoftCXXABI(ASTContext &Ctx);
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
new file mode 100644
index 0000000..2186730
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/CXXInheritance.cpp
@@ -0,0 +1,718 @@
+//===------ CXXInheritance.cpp - C++ Inheritance ----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides routines that help analyzing C++ inheritance hierarchies.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/DeclCXX.h"
+#include <algorithm>
+#include <set>
+
+using namespace clang;
+
+/// \brief Computes the set of declarations referenced by these base
+/// paths.
+void CXXBasePaths::ComputeDeclsFound() {
+ assert(NumDeclsFound == 0 && !DeclsFound &&
+ "Already computed the set of declarations");
+
+ SmallVector<NamedDecl *, 8> Decls;
+ for (paths_iterator Path = begin(), PathEnd = end(); Path != PathEnd; ++Path)
+ Decls.push_back(*Path->Decls.first);
+
+ // Eliminate duplicated decls.
+ llvm::array_pod_sort(Decls.begin(), Decls.end());
+ Decls.erase(std::unique(Decls.begin(), Decls.end()), Decls.end());
+
+ NumDeclsFound = Decls.size();
+ DeclsFound = new NamedDecl * [NumDeclsFound];
+ std::copy(Decls.begin(), Decls.end(), DeclsFound);
+}
+
+CXXBasePaths::decl_iterator CXXBasePaths::found_decls_begin() {
+ if (NumDeclsFound == 0)
+ ComputeDeclsFound();
+ return DeclsFound;
+}
+
+CXXBasePaths::decl_iterator CXXBasePaths::found_decls_end() {
+ if (NumDeclsFound == 0)
+ ComputeDeclsFound();
+ return DeclsFound + NumDeclsFound;
+}
+
+/// isAmbiguous - Determines whether the set of paths provided is
+/// ambiguous, i.e., there are two or more paths that refer to
+/// different base class subobjects of the same type. BaseType must be
+/// an unqualified, canonical class type.
+bool CXXBasePaths::isAmbiguous(CanQualType BaseType) {
+ BaseType = BaseType.getUnqualifiedType();
+ std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
+ return Subobjects.second + (Subobjects.first? 1 : 0) > 1;
+}
+
+/// clear - Clear out all prior path information.
+void CXXBasePaths::clear() {
+ Paths.clear();
+ ClassSubobjects.clear();
+ ScratchPath.clear();
+ DetectedVirtual = 0;
+}
+
+/// @brief Swaps the contents of this CXXBasePaths structure with the
+/// contents of Other.
+void CXXBasePaths::swap(CXXBasePaths &Other) {
+ std::swap(Origin, Other.Origin);
+ Paths.swap(Other.Paths);
+ ClassSubobjects.swap(Other.ClassSubobjects);
+ std::swap(FindAmbiguities, Other.FindAmbiguities);
+ std::swap(RecordPaths, Other.RecordPaths);
+ std::swap(DetectVirtual, Other.DetectVirtual);
+ std::swap(DetectedVirtual, Other.DetectedVirtual);
+}
+
+bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base) const {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+ return isDerivedFrom(Base, Paths);
+}
+
+bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
+ CXXBasePaths &Paths) const {
+ if (getCanonicalDecl() == Base->getCanonicalDecl())
+ return false;
+
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
+ return lookupInBases(&FindBaseClass,
+ const_cast<CXXRecordDecl*>(Base->getCanonicalDecl()),
+ Paths);
+}
+
+bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const {
+ if (!getNumVBases())
+ return false;
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+
+ if (getCanonicalDecl() == Base->getCanonicalDecl())
+ return false;
+
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
+ return lookupInBases(&FindVirtualBaseClass, Base->getCanonicalDecl(), Paths);
+}
+
+static bool BaseIsNot(const CXXRecordDecl *Base, void *OpaqueTarget) {
+ // OpaqueTarget is a CXXRecordDecl*.
+ return Base->getCanonicalDecl() != (const CXXRecordDecl*) OpaqueTarget;
+}
+
+bool CXXRecordDecl::isProvablyNotDerivedFrom(const CXXRecordDecl *Base) const {
+ return forallBases(BaseIsNot, (void*) Base->getCanonicalDecl());
+}
+
+bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
+ void *OpaqueData,
+ bool AllowShortCircuit) const {
+ SmallVector<const CXXRecordDecl*, 8> Queue;
+
+ const CXXRecordDecl *Record = this;
+ bool AllMatches = true;
+ while (true) {
+ for (CXXRecordDecl::base_class_const_iterator
+ I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
+ const RecordType *Ty = I->getType()->getAs<RecordType>();
+ if (!Ty) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+
+ CXXRecordDecl *Base =
+ cast_or_null<CXXRecordDecl>(Ty->getDecl()->getDefinition());
+ if (!Base) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+
+ Queue.push_back(Base);
+ if (!BaseMatches(Base, OpaqueData)) {
+ if (AllowShortCircuit) return false;
+ AllMatches = false;
+ continue;
+ }
+ }
+
+ if (Queue.empty()) break;
+ Record = Queue.back(); // not actually a queue.
+ Queue.pop_back();
+ }
+
+ return AllMatches;
+}
+
+bool CXXBasePaths::lookupInBases(ASTContext &Context,
+ const CXXRecordDecl *Record,
+ CXXRecordDecl::BaseMatchesCallback *BaseMatches,
+ void *UserData) {
+ bool FoundPath = false;
+
+ // The access of the path down to this record.
+ AccessSpecifier AccessToHere = ScratchPath.Access;
+ bool IsFirstStep = ScratchPath.empty();
+
+ for (CXXRecordDecl::base_class_const_iterator BaseSpec = Record->bases_begin(),
+ BaseSpecEnd = Record->bases_end();
+ BaseSpec != BaseSpecEnd;
+ ++BaseSpec) {
+ // Find the record of the base class subobjects for this type.
+ QualType BaseType = Context.getCanonicalType(BaseSpec->getType())
+ .getUnqualifiedType();
+
+ // C++ [temp.dep]p3:
+ // In the definition of a class template or a member of a class template,
+ // if a base class of the class template depends on a template-parameter,
+ // the base class scope is not examined during unqualified name lookup
+ // either at the point of definition of the class template or member or
+ // during an instantiation of the class tem- plate or member.
+ if (BaseType->isDependentType())
+ continue;
+
+ // Determine whether we need to visit this base class at all,
+ // updating the count of subobjects appropriately.
+ std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
+ bool VisitBase = true;
+ bool SetVirtual = false;
+ if (BaseSpec->isVirtual()) {
+ VisitBase = !Subobjects.first;
+ Subobjects.first = true;
+ if (isDetectingVirtual() && DetectedVirtual == 0) {
+ // If this is the first virtual we find, remember it. If it turns out
+ // there is no base path here, we'll reset it later.
+ DetectedVirtual = BaseType->getAs<RecordType>();
+ SetVirtual = true;
+ }
+ } else
+ ++Subobjects.second;
+
+ if (isRecordingPaths()) {
+ // Add this base specifier to the current path.
+ CXXBasePathElement Element;
+ Element.Base = &*BaseSpec;
+ Element.Class = Record;
+ if (BaseSpec->isVirtual())
+ Element.SubobjectNumber = 0;
+ else
+ Element.SubobjectNumber = Subobjects.second;
+ ScratchPath.push_back(Element);
+
+ // Calculate the "top-down" access to this base class.
+ // The spec actually describes this bottom-up, but top-down is
+ // equivalent because the definition works out as follows:
+ // 1. Write down the access along each step in the inheritance
+ // chain, followed by the access of the decl itself.
+ // For example, in
+ // class A { public: int foo; };
+ // class B : protected A {};
+ // class C : public B {};
+ // class D : private C {};
+ // we would write:
+ // private public protected public
+ // 2. If 'private' appears anywhere except far-left, access is denied.
+ // 3. Otherwise, overall access is determined by the most restrictive
+ // access in the sequence.
+ if (IsFirstStep)
+ ScratchPath.Access = BaseSpec->getAccessSpecifier();
+ else
+ ScratchPath.Access = CXXRecordDecl::MergeAccess(AccessToHere,
+ BaseSpec->getAccessSpecifier());
+ }
+
+ // Track whether there's a path involving this specific base.
+ bool FoundPathThroughBase = false;
+
+ if (BaseMatches(BaseSpec, ScratchPath, UserData)) {
+ // We've found a path that terminates at this base.
+ FoundPath = FoundPathThroughBase = true;
+ if (isRecordingPaths()) {
+ // We have a path. Make a copy of it before moving on.
+ Paths.push_back(ScratchPath);
+ } else if (!isFindingAmbiguities()) {
+ // We found a path and we don't care about ambiguities;
+ // return immediately.
+ return FoundPath;
+ }
+ } else if (VisitBase) {
+ CXXRecordDecl *BaseRecord
+ = cast<CXXRecordDecl>(BaseSpec->getType()->getAs<RecordType>()
+ ->getDecl());
+ if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) {
+ // C++ [class.member.lookup]p2:
+ // A member name f in one sub-object B hides a member name f in
+ // a sub-object A if A is a base class sub-object of B. Any
+ // declarations that are so hidden are eliminated from
+ // consideration.
+
+ // There is a path to a base class that meets the criteria. If we're
+ // not collecting paths or finding ambiguities, we're done.
+ FoundPath = FoundPathThroughBase = true;
+ if (!isFindingAmbiguities())
+ return FoundPath;
+ }
+ }
+
+ // Pop this base specifier off the current path (if we're
+ // collecting paths).
+ if (isRecordingPaths()) {
+ ScratchPath.pop_back();
+ }
+
+ // If we set a virtual earlier, and this isn't a path, forget it again.
+ if (SetVirtual && !FoundPathThroughBase) {
+ DetectedVirtual = 0;
+ }
+ }
+
+ // Reset the scratch path access.
+ ScratchPath.Access = AccessToHere;
+
+ return FoundPath;
+}
+
+bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
+ void *UserData,
+ CXXBasePaths &Paths) const {
+ // If we didn't find anything, report that.
+ if (!Paths.lookupInBases(getASTContext(), this, BaseMatches, UserData))
+ return false;
+
+ // If we're not recording paths or we won't ever find ambiguities,
+ // we're done.
+ if (!Paths.isRecordingPaths() || !Paths.isFindingAmbiguities())
+ return true;
+
+ // C++ [class.member.lookup]p6:
+ // When virtual base classes are used, a hidden declaration can be
+ // reached along a path through the sub-object lattice that does
+ // not pass through the hiding declaration. This is not an
+ // ambiguity. The identical use with nonvirtual base classes is an
+ // ambiguity; in that case there is no unique instance of the name
+ // that hides all the others.
+ //
+ // FIXME: This is an O(N^2) algorithm, but DPG doesn't see an easy
+ // way to make it any faster.
+ for (CXXBasePaths::paths_iterator P = Paths.begin(), PEnd = Paths.end();
+ P != PEnd; /* increment in loop */) {
+ bool Hidden = false;
+
+ for (CXXBasePath::iterator PE = P->begin(), PEEnd = P->end();
+ PE != PEEnd && !Hidden; ++PE) {
+ if (PE->Base->isVirtual()) {
+ CXXRecordDecl *VBase = 0;
+ if (const RecordType *Record = PE->Base->getType()->getAs<RecordType>())
+ VBase = cast<CXXRecordDecl>(Record->getDecl());
+ if (!VBase)
+ break;
+
+ // The declaration(s) we found along this path were found in a
+ // subobject of a virtual base. Check whether this virtual
+ // base is a subobject of any other path; if so, then the
+ // declaration in this path are hidden by that patch.
+ for (CXXBasePaths::paths_iterator HidingP = Paths.begin(),
+ HidingPEnd = Paths.end();
+ HidingP != HidingPEnd;
+ ++HidingP) {
+ CXXRecordDecl *HidingClass = 0;
+ if (const RecordType *Record
+ = HidingP->back().Base->getType()->getAs<RecordType>())
+ HidingClass = cast<CXXRecordDecl>(Record->getDecl());
+ if (!HidingClass)
+ break;
+
+ if (HidingClass->isVirtuallyDerivedFrom(VBase)) {
+ Hidden = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (Hidden)
+ P = Paths.Paths.erase(P);
+ else
+ ++P;
+ }
+
+ return true;
+}
+
+bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *BaseRecord) {
+ assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
+ "User data for FindBaseClass is not canonical!");
+ return Specifier->getType()->getAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
+}
+
+bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *BaseRecord) {
+ assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
+ "User data for FindBaseClass is not canonical!");
+ return Specifier->isVirtual() &&
+ Specifier->getType()->getAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
+}
+
+bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *Name) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
+ for (Path.Decls = BaseRecord->lookup(N);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ if ((*Path.Decls.first)->isInIdentifierNamespace(IDNS_Tag))
+ return true;
+ }
+
+ return false;
+}
+
+bool CXXRecordDecl::FindOrdinaryMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *Name) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ const unsigned IDNS = IDNS_Ordinary | IDNS_Tag | IDNS_Member;
+ DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
+ for (Path.Decls = BaseRecord->lookup(N);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ if ((*Path.Decls.first)->isInIdentifierNamespace(IDNS))
+ return true;
+ }
+
+ return false;
+}
+
+bool CXXRecordDecl::
+FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *Name) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
+ for (Path.Decls = BaseRecord->lookup(N);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ // FIXME: Refactor the "is it a nested-name-specifier?" check
+ if (isa<TypedefNameDecl>(*Path.Decls.first) ||
+ (*Path.Decls.first)->isInIdentifierNamespace(IDNS_Tag))
+ return true;
+ }
+
+ return false;
+}
+
+void OverridingMethods::add(unsigned OverriddenSubobject,
+ UniqueVirtualMethod Overriding) {
+ SmallVector<UniqueVirtualMethod, 4> &SubobjectOverrides
+ = Overrides[OverriddenSubobject];
+ if (std::find(SubobjectOverrides.begin(), SubobjectOverrides.end(),
+ Overriding) == SubobjectOverrides.end())
+ SubobjectOverrides.push_back(Overriding);
+}
+
+void OverridingMethods::add(const OverridingMethods &Other) {
+ for (const_iterator I = Other.begin(), IE = Other.end(); I != IE; ++I) {
+ for (overriding_const_iterator M = I->second.begin(),
+ MEnd = I->second.end();
+ M != MEnd;
+ ++M)
+ add(I->first, *M);
+ }
+}
+
+void OverridingMethods::replaceAll(UniqueVirtualMethod Overriding) {
+ for (iterator I = begin(), IEnd = end(); I != IEnd; ++I) {
+ I->second.clear();
+ I->second.push_back(Overriding);
+ }
+}
+
+
+namespace {
+ class FinalOverriderCollector {
+ /// \brief The number of subobjects of a given class type that
+ /// occur within the class hierarchy.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCount;
+
+ /// \brief Overriders for each virtual base subobject.
+ llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *> VirtualOverriders;
+
+ CXXFinalOverriderMap FinalOverriders;
+
+ public:
+ ~FinalOverriderCollector();
+
+ void Collect(const CXXRecordDecl *RD, bool VirtualBase,
+ const CXXRecordDecl *InVirtualSubobject,
+ CXXFinalOverriderMap &Overriders);
+ };
+}
+
+void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
+ bool VirtualBase,
+ const CXXRecordDecl *InVirtualSubobject,
+ CXXFinalOverriderMap &Overriders) {
+ unsigned SubobjectNumber = 0;
+ if (!VirtualBase)
+ SubobjectNumber
+ = ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];
+
+ for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
+ BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) {
+ if (const RecordType *RT = Base->getType()->getAs<RecordType>()) {
+ const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ if (Overriders.empty() && !Base->isVirtual()) {
+ // There are no other overriders of virtual member functions,
+ // so let the base class fill in our overriders for us.
+ Collect(BaseDecl, false, InVirtualSubobject, Overriders);
+ continue;
+ }
+
+ // Collect all of the overridders from the base class subobject
+ // and merge them into the set of overridders for this class.
+ // For virtual base classes, populate or use the cached virtual
+ // overrides so that we do not walk the virtual base class (and
+ // its base classes) more than once.
+ CXXFinalOverriderMap ComputedBaseOverriders;
+ CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
+ if (Base->isVirtual()) {
+ CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
+ if (!MyVirtualOverriders) {
+ MyVirtualOverriders = new CXXFinalOverriderMap;
+ Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders);
+ }
+
+ BaseOverriders = MyVirtualOverriders;
+ } else
+ Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders);
+
+ // Merge the overriders from this base class into our own set of
+ // overriders.
+ for (CXXFinalOverriderMap::iterator OM = BaseOverriders->begin(),
+ OMEnd = BaseOverriders->end();
+ OM != OMEnd;
+ ++OM) {
+ const CXXMethodDecl *CanonOM
+ = cast<CXXMethodDecl>(OM->first->getCanonicalDecl());
+ Overriders[CanonOM].add(OM->second);
+ }
+ }
+ }
+
+ for (CXXRecordDecl::method_iterator M = RD->method_begin(),
+ MEnd = RD->method_end();
+ M != MEnd;
+ ++M) {
+ // We only care about virtual methods.
+ if (!M->isVirtual())
+ continue;
+
+ CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl());
+
+ if (CanonM->begin_overridden_methods()
+ == CanonM->end_overridden_methods()) {
+ // This is a new virtual function that does not override any
+ // other virtual function. Add it to the map of virtual
+ // functions for which we are tracking overridders.
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+ continue;
+ }
+
+ // This virtual method overrides other virtual methods, so it does
+ // not add any new slots into the set of overriders. Instead, we
+ // replace entries in the set of overriders with the new
+ // overrider. To do so, we dig down to the original virtual
+ // functions using data recursion and update all of the methods it
+ // overrides.
+ typedef std::pair<CXXMethodDecl::method_iterator,
+ CXXMethodDecl::method_iterator> OverriddenMethods;
+ SmallVector<OverriddenMethods, 4> Stack;
+ Stack.push_back(std::make_pair(CanonM->begin_overridden_methods(),
+ CanonM->end_overridden_methods()));
+ while (!Stack.empty()) {
+ OverriddenMethods OverMethods = Stack.back();
+ Stack.pop_back();
+
+ for (; OverMethods.first != OverMethods.second; ++OverMethods.first) {
+ const CXXMethodDecl *CanonOM
+ = cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl());
+
+ // C++ [class.virtual]p2:
+ // A virtual member function C::vf of a class object S is
+ // a final overrider unless the most derived class (1.8)
+ // of which S is a base class subobject (if any) declares
+ // or inherits another member function that overrides vf.
+ //
+ // Treating this object like the most derived class, we
+ // replace any overrides from base classes with this
+ // overriding virtual function.
+ Overriders[CanonOM].replaceAll(
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+
+ if (CanonOM->begin_overridden_methods()
+ == CanonOM->end_overridden_methods())
+ continue;
+
+ // Continue recursion to the methods that this virtual method
+ // overrides.
+ Stack.push_back(std::make_pair(CanonOM->begin_overridden_methods(),
+ CanonOM->end_overridden_methods()));
+ }
+ }
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+ }
+}
+
+FinalOverriderCollector::~FinalOverriderCollector() {
+ for (llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *>::iterator
+ VO = VirtualOverriders.begin(), VOEnd = VirtualOverriders.end();
+ VO != VOEnd;
+ ++VO)
+ delete VO->second;
+}
+
+void
+CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const {
+ FinalOverriderCollector Collector;
+ Collector.Collect(this, false, 0, FinalOverriders);
+
+ // Weed out any final overriders that come from virtual base class
+ // subobjects that were hidden by other subobjects along any path.
+ // This is the final-overrider variant of C++ [class.member.lookup]p10.
+ for (CXXFinalOverriderMap::iterator OM = FinalOverriders.begin(),
+ OMEnd = FinalOverriders.end();
+ OM != OMEnd;
+ ++OM) {
+ for (OverridingMethods::iterator SO = OM->second.begin(),
+ SOEnd = OM->second.end();
+ SO != SOEnd;
+ ++SO) {
+ SmallVector<UniqueVirtualMethod, 4> &Overriding = SO->second;
+ if (Overriding.size() < 2)
+ continue;
+
+ for (SmallVector<UniqueVirtualMethod, 4>::iterator
+ Pos = Overriding.begin(), PosEnd = Overriding.end();
+ Pos != PosEnd;
+ /* increment in loop */) {
+ if (!Pos->InVirtualSubobject) {
+ ++Pos;
+ continue;
+ }
+
+ // We have an overriding method in a virtual base class
+ // subobject (or non-virtual base class subobject thereof);
+ // determine whether there exists an other overriding method
+ // in a base class subobject that hides the virtual base class
+ // subobject.
+ bool Hidden = false;
+ for (SmallVector<UniqueVirtualMethod, 4>::iterator
+ OP = Overriding.begin(), OPEnd = Overriding.end();
+ OP != OPEnd && !Hidden;
+ ++OP) {
+ if (Pos == OP)
+ continue;
+
+ if (OP->Method->getParent()->isVirtuallyDerivedFrom(
+ const_cast<CXXRecordDecl *>(Pos->InVirtualSubobject)))
+ Hidden = true;
+ }
+
+ if (Hidden) {
+ // The current overriding function is hidden by another
+ // overriding function; remove this one.
+ Pos = Overriding.erase(Pos);
+ PosEnd = Overriding.end();
+ } else {
+ ++Pos;
+ }
+ }
+ }
+ }
+}
+
+static void
+AddIndirectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
+ CXXIndirectPrimaryBaseSet& Bases) {
+ // If the record has a virtual primary base class, add it to our set.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.isPrimaryBaseVirtual())
+ Bases.insert(Layout.getPrimaryBase());
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot get indirect primary bases for class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Only bases with virtual bases participate in computing the
+ // indirect primary virtual base classes.
+ if (BaseDecl->getNumVBases())
+ AddIndirectPrimaryBases(BaseDecl, Context, Bases);
+ }
+
+}
+
+void
+CXXRecordDecl::getIndirectPrimaryBases(CXXIndirectPrimaryBaseSet& Bases) const {
+ ASTContext &Context = getASTContext();
+
+ if (!getNumVBases())
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator I = bases_begin(),
+ E = bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot get indirect primary bases for class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Only bases with virtual bases participate in computing the
+ // indirect primary virtual base classes.
+ if (BaseDecl->getNumVBases())
+ AddIndirectPrimaryBases(BaseDecl, Context, Bases);
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/Decl.cpp b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
new file mode 100644
index 0000000..399f2e4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Decl.cpp
@@ -0,0 +1,3074 @@
+//===--- Decl.cpp - Declaration AST Node Implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Module.h"
+#include "clang/Basic/Specifiers.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <algorithm>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// NamedDecl Implementation
+//===----------------------------------------------------------------------===//
+
+static llvm::Optional<Visibility> getVisibilityOf(const Decl *D) {
+ // If this declaration has an explicit visibility attribute, use it.
+ if (const VisibilityAttr *A = D->getAttr<VisibilityAttr>()) {
+ switch (A->getVisibility()) {
+ case VisibilityAttr::Default:
+ return DefaultVisibility;
+ case VisibilityAttr::Hidden:
+ return HiddenVisibility;
+ case VisibilityAttr::Protected:
+ return ProtectedVisibility;
+ }
+ }
+
+ // If we're on Mac OS X, an 'availability' for Mac OS X attribute
+ // implies visibility(default).
+ if (D->getASTContext().getTargetInfo().getTriple().isOSDarwin()) {
+ for (specific_attr_iterator<AvailabilityAttr>
+ A = D->specific_attr_begin<AvailabilityAttr>(),
+ AEnd = D->specific_attr_end<AvailabilityAttr>();
+ A != AEnd; ++A)
+ if ((*A)->getPlatform()->getName().equals("macosx"))
+ return DefaultVisibility;
+ }
+
+ return llvm::Optional<Visibility>();
+}
+
+typedef NamedDecl::LinkageInfo LinkageInfo;
+
+namespace {
+/// Flags controlling the computation of linkage and visibility.
+struct LVFlags {
+ bool ConsiderGlobalVisibility;
+ bool ConsiderVisibilityAttributes;
+ bool ConsiderTemplateParameterTypes;
+
+ LVFlags() : ConsiderGlobalVisibility(true),
+ ConsiderVisibilityAttributes(true),
+ ConsiderTemplateParameterTypes(true) {
+ }
+
+ /// \brief Returns a set of flags that is only useful for computing the
+ /// linkage, not the visibility, of a declaration.
+ static LVFlags CreateOnlyDeclLinkage() {
+ LVFlags F;
+ F.ConsiderGlobalVisibility = false;
+ F.ConsiderVisibilityAttributes = false;
+ F.ConsiderTemplateParameterTypes = false;
+ return F;
+ }
+
+ /// Returns a set of flags, otherwise based on these, which ignores
+ /// off all sources of visibility except template arguments.
+ LVFlags onlyTemplateVisibility() const {
+ LVFlags F = *this;
+ F.ConsiderGlobalVisibility = false;
+ F.ConsiderVisibilityAttributes = false;
+ F.ConsiderTemplateParameterTypes = false;
+ return F;
+ }
+};
+} // end anonymous namespace
+
+static LinkageInfo getLVForType(QualType T) {
+ std::pair<Linkage,Visibility> P = T->getLinkageAndVisibility();
+ return LinkageInfo(P.first, P.second, T->isVisibilityExplicit());
+}
+
+/// \brief Get the most restrictive linkage for the types in the given
+/// template parameter list.
+static LinkageInfo
+getLVForTemplateParameterList(const TemplateParameterList *Params) {
+ LinkageInfo LV(ExternalLinkage, DefaultVisibility, false);
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->isExpandedParameterPack()) {
+ for (unsigned I = 0, N = NTTP->getNumExpansionTypes(); I != N; ++I) {
+ QualType T = NTTP->getExpansionType(I);
+ if (!T->isDependentType())
+ LV.merge(getLVForType(T));
+ }
+ continue;
+ }
+
+ if (!NTTP->getType()->isDependentType()) {
+ LV.merge(getLVForType(NTTP->getType()));
+ continue;
+ }
+ }
+
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(*P)) {
+ LV.merge(getLVForTemplateParameterList(TTP->getTemplateParameters()));
+ }
+ }
+
+ return LV;
+}
+
+/// getLVForDecl - Get the linkage and visibility for the given declaration.
+static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags F);
+
+/// \brief Get the most restrictive linkage for the types and
+/// declarations in the given template argument list.
+static LinkageInfo getLVForTemplateArgumentList(const TemplateArgument *Args,
+ unsigned NumArgs,
+ LVFlags &F) {
+ LinkageInfo LV(ExternalLinkage, DefaultVisibility, false);
+
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ switch (Args[I].getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Expression:
+ break;
+
+ case TemplateArgument::Type:
+ LV.merge(getLVForType(Args[I].getAsType()));
+ break;
+
+ case TemplateArgument::Declaration:
+ // The decl can validly be null as the representation of nullptr
+ // arguments, valid only in C++0x.
+ if (Decl *D = Args[I].getAsDecl()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ LV = merge(LV, getLVForDecl(ND, F));
+ }
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ if (TemplateDecl *Template
+ = Args[I].getAsTemplateOrTemplatePattern().getAsTemplateDecl())
+ LV.merge(getLVForDecl(Template, F));
+ break;
+
+ case TemplateArgument::Pack:
+ LV.mergeWithMin(getLVForTemplateArgumentList(Args[I].pack_begin(),
+ Args[I].pack_size(),
+ F));
+ break;
+ }
+ }
+
+ return LV;
+}
+
+static LinkageInfo
+getLVForTemplateArgumentList(const TemplateArgumentList &TArgs,
+ LVFlags &F) {
+ return getLVForTemplateArgumentList(TArgs.data(), TArgs.size(), F);
+}
+
+static bool shouldConsiderTemplateLV(const FunctionDecl *fn,
+ const FunctionTemplateSpecializationInfo *spec) {
+ return !(spec->isExplicitSpecialization() &&
+ fn->hasAttr<VisibilityAttr>());
+}
+
+static bool shouldConsiderTemplateLV(const ClassTemplateSpecializationDecl *d) {
+ return !(d->isExplicitSpecialization() && d->hasAttr<VisibilityAttr>());
+}
+
+static LinkageInfo getLVForNamespaceScopeDecl(const NamedDecl *D, LVFlags F) {
+ assert(D->getDeclContext()->getRedeclContext()->isFileContext() &&
+ "Not a name having namespace scope");
+ ASTContext &Context = D->getASTContext();
+
+ // C++ [basic.link]p3:
+ // A name having namespace scope (3.3.6) has internal linkage if it
+ // is the name of
+ // - an object, reference, function or function template that is
+ // explicitly declared static; or,
+ // (This bullet corresponds to C99 6.2.2p3.)
+ if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ // Explicitly declared static.
+ if (Var->getStorageClass() == SC_Static)
+ return LinkageInfo::internal();
+
+ // - an object or reference that is explicitly declared const
+ // and neither explicitly declared extern nor previously
+ // declared to have external linkage; or
+ // (there is no equivalent in C99)
+ if (Context.getLangOpts().CPlusPlus &&
+ Var->getType().isConstant(Context) &&
+ Var->getStorageClass() != SC_Extern &&
+ Var->getStorageClass() != SC_PrivateExtern) {
+ bool FoundExtern = false;
+ for (const VarDecl *PrevVar = Var->getPreviousDecl();
+ PrevVar && !FoundExtern;
+ PrevVar = PrevVar->getPreviousDecl())
+ if (isExternalLinkage(PrevVar->getLinkage()))
+ FoundExtern = true;
+
+ if (!FoundExtern)
+ return LinkageInfo::internal();
+ }
+ if (Var->getStorageClass() == SC_None) {
+ const VarDecl *PrevVar = Var->getPreviousDecl();
+ for (; PrevVar; PrevVar = PrevVar->getPreviousDecl())
+ if (PrevVar->getStorageClass() == SC_PrivateExtern)
+ break;
+ if (PrevVar)
+ return PrevVar->getLinkageAndVisibility();
+ }
+ } else if (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)) {
+ // C++ [temp]p4:
+ // A non-member function template can have internal linkage; any
+ // other template name shall have external linkage.
+ const FunctionDecl *Function = 0;
+ if (const FunctionTemplateDecl *FunTmpl
+ = dyn_cast<FunctionTemplateDecl>(D))
+ Function = FunTmpl->getTemplatedDecl();
+ else
+ Function = cast<FunctionDecl>(D);
+
+ // Explicitly declared static.
+ if (Function->getStorageClass() == SC_Static)
+ return LinkageInfo(InternalLinkage, DefaultVisibility, false);
+ } else if (const FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
+ // - a data member of an anonymous union.
+ if (cast<RecordDecl>(Field->getDeclContext())->isAnonymousStructOrUnion())
+ return LinkageInfo::internal();
+ }
+
+ if (D->isInAnonymousNamespace()) {
+ const VarDecl *Var = dyn_cast<VarDecl>(D);
+ const FunctionDecl *Func = dyn_cast<FunctionDecl>(D);
+ if ((!Var || !Var->getDeclContext()->isExternCContext()) &&
+ (!Func || !Func->getDeclContext()->isExternCContext()))
+ return LinkageInfo::uniqueExternal();
+ }
+
+ // Set up the defaults.
+
+ // C99 6.2.2p5:
+ // If the declaration of an identifier for an object has file
+ // scope and no storage-class specifier, its linkage is
+ // external.
+ LinkageInfo LV;
+ LV.mergeVisibility(Context.getLangOpts().getVisibilityMode());
+
+ if (F.ConsiderVisibilityAttributes) {
+ if (llvm::Optional<Visibility> Vis = D->getExplicitVisibility()) {
+ LV.setVisibility(*Vis, true);
+ F.ConsiderGlobalVisibility = false;
+ } else {
+ // If we're declared in a namespace with a visibility attribute,
+ // use that namespace's visibility, but don't call it explicit.
+ for (const DeclContext *DC = D->getDeclContext();
+ !isa<TranslationUnitDecl>(DC);
+ DC = DC->getParent()) {
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ if (!ND) continue;
+ if (llvm::Optional<Visibility> Vis = ND->getExplicitVisibility()) {
+ LV.setVisibility(*Vis, true);
+ F.ConsiderGlobalVisibility = false;
+ break;
+ }
+ }
+ }
+ }
+
+ // C++ [basic.link]p4:
+
+ // A name having namespace scope has external linkage if it is the
+ // name of
+ //
+ // - an object or reference, unless it has internal linkage; or
+ if (const VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ // GCC applies the following optimization to variables and static
+ // data members, but not to functions:
+ //
+ // Modify the variable's LV by the LV of its type unless this is
+ // C or extern "C". This follows from [basic.link]p9:
+ // A type without linkage shall not be used as the type of a
+ // variable or function with external linkage unless
+ // - the entity has C language linkage, or
+ // - the entity is declared within an unnamed namespace, or
+ // - the entity is not used or is defined in the same
+ // translation unit.
+ // and [basic.link]p10:
+ // ...the types specified by all declarations referring to a
+ // given variable or function shall be identical...
+ // C does not have an equivalent rule.
+ //
+ // Ignore this if we've got an explicit attribute; the user
+ // probably knows what they're doing.
+ //
+ // Note that we don't want to make the variable non-external
+ // because of this, but unique-external linkage suits us.
+ if (Context.getLangOpts().CPlusPlus &&
+ !Var->getDeclContext()->isExternCContext()) {
+ LinkageInfo TypeLV = getLVForType(Var->getType());
+ if (TypeLV.linkage() != ExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+ LV.mergeVisibilityWithMin(TypeLV.visibility(),
+ TypeLV.visibilityExplicit());
+ }
+
+ if (Var->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility, true);
+
+ if (!Context.getLangOpts().CPlusPlus &&
+ (Var->getStorageClass() == SC_Extern ||
+ Var->getStorageClass() == SC_PrivateExtern)) {
+
+ // C99 6.2.2p4:
+ // For an identifier declared with the storage-class specifier
+ // extern in a scope in which a prior declaration of that
+ // identifier is visible, if the prior declaration specifies
+ // internal or external linkage, the linkage of the identifier
+ // at the later declaration is the same as the linkage
+ // specified at the prior declaration. If no prior declaration
+ // is visible, or if the prior declaration specifies no
+ // linkage, then the identifier has external linkage.
+ if (const VarDecl *PrevVar = Var->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(PrevVar, F);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+ }
+
+ // - a function, unless it has internal linkage; or
+ } else if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // In theory, we can modify the function's LV by the LV of its
+ // type unless it has C linkage (see comment above about variables
+ // for justification). In practice, GCC doesn't do this, so it's
+ // just too painful to make work.
+
+ if (Function->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility, true);
+
+ // C99 6.2.2p5:
+ // If the declaration of an identifier for a function has no
+ // storage-class specifier, its linkage is determined exactly
+ // as if it were declared with the storage-class specifier
+ // extern.
+ if (!Context.getLangOpts().CPlusPlus &&
+ (Function->getStorageClass() == SC_Extern ||
+ Function->getStorageClass() == SC_PrivateExtern ||
+ Function->getStorageClass() == SC_None)) {
+ // C99 6.2.2p4:
+ // For an identifier declared with the storage-class specifier
+ // extern in a scope in which a prior declaration of that
+ // identifier is visible, if the prior declaration specifies
+ // internal or external linkage, the linkage of the identifier
+ // at the later declaration is the same as the linkage
+ // specified at the prior declaration. If no prior declaration
+ // is visible, or if the prior declaration specifies no
+ // linkage, then the identifier has external linkage.
+ if (const FunctionDecl *PrevFunc = Function->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(PrevFunc, F);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+ }
+
+ // In C++, then if the type of the function uses a type with
+ // unique-external linkage, it's not legally usable from outside
+ // this translation unit. However, we should use the C linkage
+ // rules instead for extern "C" declarations.
+ if (Context.getLangOpts().CPlusPlus &&
+ !Function->getDeclContext()->isExternCContext() &&
+ Function->getType()->getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+
+ // Consider LV from the template and the template arguments unless
+ // this is an explicit specialization with a visibility attribute.
+ if (FunctionTemplateSpecializationInfo *specInfo
+ = Function->getTemplateSpecializationInfo()) {
+ if (shouldConsiderTemplateLV(Function, specInfo)) {
+ LV.merge(getLVForDecl(specInfo->getTemplate(),
+ F.onlyTemplateVisibility()));
+ const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
+ LV.mergeWithMin(getLVForTemplateArgumentList(templateArgs, F));
+ }
+ }
+
+ // - a named class (Clause 9), or an unnamed class defined in a
+ // typedef declaration in which the class has the typedef name
+ // for linkage purposes (7.1.3); or
+ // - a named enumeration (7.2), or an unnamed enumeration
+ // defined in a typedef declaration in which the enumeration
+ // has the typedef name for linkage purposes (7.1.3); or
+ } else if (const TagDecl *Tag = dyn_cast<TagDecl>(D)) {
+ // Unnamed tags have no linkage.
+ if (!Tag->getDeclName() && !Tag->getTypedefNameForAnonDecl())
+ return LinkageInfo::none();
+
+ // If this is a class template specialization, consider the
+ // linkage of the template and template arguments.
+ if (const ClassTemplateSpecializationDecl *spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Tag)) {
+ if (shouldConsiderTemplateLV(spec)) {
+ // From the template.
+ LV.merge(getLVForDecl(spec->getSpecializedTemplate(),
+ F.onlyTemplateVisibility()));
+
+ // The arguments at which the template was instantiated.
+ const TemplateArgumentList &TemplateArgs = spec->getTemplateArgs();
+ LV.mergeWithMin(getLVForTemplateArgumentList(TemplateArgs, F));
+ }
+ }
+
+ // Consider -fvisibility unless the type has C linkage.
+ if (F.ConsiderGlobalVisibility)
+ F.ConsiderGlobalVisibility =
+ (Context.getLangOpts().CPlusPlus &&
+ !Tag->getDeclContext()->isExternCContext());
+
+ // - an enumerator belonging to an enumeration with external linkage;
+ } else if (isa<EnumConstantDecl>(D)) {
+ LinkageInfo EnumLV = getLVForDecl(cast<NamedDecl>(D->getDeclContext()), F);
+ if (!isExternalLinkage(EnumLV.linkage()))
+ return LinkageInfo::none();
+ LV.merge(EnumLV);
+
+ // - a template, unless it is a function template that has
+ // internal linkage (Clause 14);
+ } else if (const TemplateDecl *temp = dyn_cast<TemplateDecl>(D)) {
+ if (F.ConsiderTemplateParameterTypes)
+ LV.merge(getLVForTemplateParameterList(temp->getTemplateParameters()));
+
+ // - a namespace (7.3), unless it is declared within an unnamed
+ // namespace.
+ } else if (isa<NamespaceDecl>(D) && !D->isInAnonymousNamespace()) {
+ return LV;
+
+ // By extension, we assign external linkage to Objective-C
+ // interfaces.
+ } else if (isa<ObjCInterfaceDecl>(D)) {
+ // fallout
+
+ // Everything not covered here has no linkage.
+ } else {
+ return LinkageInfo::none();
+ }
+
+ // If we ended up with non-external linkage, visibility should
+ // always be default.
+ if (LV.linkage() != ExternalLinkage)
+ return LinkageInfo(LV.linkage(), DefaultVisibility, false);
+
+ return LV;
+}
+
+static LinkageInfo getLVForClassMember(const NamedDecl *D, LVFlags F) {
+ // Only certain class members have linkage. Note that fields don't
+ // really have linkage, but it's convenient to say they do for the
+ // purposes of calculating linkage of pointer-to-data-member
+ // template arguments.
+ if (!(isa<CXXMethodDecl>(D) ||
+ isa<VarDecl>(D) ||
+ isa<FieldDecl>(D) ||
+ (isa<TagDecl>(D) &&
+ (D->getDeclName() || cast<TagDecl>(D)->getTypedefNameForAnonDecl()))))
+ return LinkageInfo::none();
+
+ LinkageInfo LV;
+ LV.mergeVisibility(D->getASTContext().getLangOpts().getVisibilityMode());
+
+ // The flags we're going to use to compute the class's visibility.
+ LVFlags ClassF = F;
+
+ // If we have an explicit visibility attribute, merge that in.
+ if (F.ConsiderVisibilityAttributes) {
+ if (llvm::Optional<Visibility> Vis = D->getExplicitVisibility()) {
+ LV.mergeVisibility(*Vis, true);
+
+ // Ignore global visibility later, but not this attribute.
+ F.ConsiderGlobalVisibility = false;
+
+ // Ignore both global visibility and attributes when computing our
+ // parent's visibility.
+ ClassF = F.onlyTemplateVisibility();
+ }
+ }
+
+ // Class members only have linkage if their class has external
+ // linkage.
+ LV.merge(getLVForDecl(cast<RecordDecl>(D->getDeclContext()), ClassF));
+ if (!isExternalLinkage(LV.linkage()))
+ return LinkageInfo::none();
+
+ // If the class already has unique-external linkage, we can't improve.
+ if (LV.linkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ // If the type of the function uses a type with unique-external
+ // linkage, it's not legally usable from outside this translation unit.
+ if (MD->getType()->getLinkage() == UniqueExternalLinkage)
+ return LinkageInfo::uniqueExternal();
+
+ TemplateSpecializationKind TSK = TSK_Undeclared;
+
+ // If this is a method template specialization, use the linkage for
+ // the template parameters and arguments.
+ if (FunctionTemplateSpecializationInfo *spec
+ = MD->getTemplateSpecializationInfo()) {
+ if (shouldConsiderTemplateLV(MD, spec)) {
+ LV.mergeWithMin(getLVForTemplateArgumentList(*spec->TemplateArguments,
+ F));
+ if (F.ConsiderTemplateParameterTypes)
+ LV.merge(getLVForTemplateParameterList(
+ spec->getTemplate()->getTemplateParameters()));
+ }
+
+ TSK = spec->getTemplateSpecializationKind();
+ } else if (MemberSpecializationInfo *MSI =
+ MD->getMemberSpecializationInfo()) {
+ TSK = MSI->getTemplateSpecializationKind();
+ }
+
+ // If we're paying attention to global visibility, apply
+ // -finline-visibility-hidden if this is an inline method.
+ //
+ // Note that ConsiderGlobalVisibility doesn't yet have information
+ // about whether containing classes have visibility attributes,
+ // and that's intentional.
+ if (TSK != TSK_ExplicitInstantiationDeclaration &&
+ TSK != TSK_ExplicitInstantiationDefinition &&
+ F.ConsiderGlobalVisibility &&
+ MD->getASTContext().getLangOpts().InlineVisibilityHidden) {
+ // InlineVisibilityHidden only applies to definitions, and
+ // isInlined() only gives meaningful answers on definitions
+ // anyway.
+ const FunctionDecl *Def = 0;
+ if (MD->hasBody(Def) && Def->isInlined())
+ LV.setVisibility(HiddenVisibility);
+ }
+
+ // Note that in contrast to basically every other situation, we
+ // *do* apply -fvisibility to method declarations.
+
+ } else if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (const ClassTemplateSpecializationDecl *spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD)) {
+ if (shouldConsiderTemplateLV(spec)) {
+ // Merge template argument/parameter information for member
+ // class template specializations.
+ LV.mergeWithMin(getLVForTemplateArgumentList(spec->getTemplateArgs(),
+ F));
+ if (F.ConsiderTemplateParameterTypes)
+ LV.merge(getLVForTemplateParameterList(
+ spec->getSpecializedTemplate()->getTemplateParameters()));
+ }
+ }
+
+ // Static data members.
+ } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // Modify the variable's linkage by its type, but ignore the
+ // type's visibility unless it's a definition.
+ LinkageInfo TypeLV = getLVForType(VD->getType());
+ if (TypeLV.linkage() != ExternalLinkage)
+ LV.mergeLinkage(UniqueExternalLinkage);
+ if (!LV.visibilityExplicit())
+ LV.mergeVisibility(TypeLV.visibility(), TypeLV.visibilityExplicit());
+ }
+
+ return LV;
+}
+
+static void clearLinkageForClass(const CXXRecordDecl *record) {
+ for (CXXRecordDecl::decl_iterator
+ i = record->decls_begin(), e = record->decls_end(); i != e; ++i) {
+ Decl *child = *i;
+ if (isa<NamedDecl>(child))
+ cast<NamedDecl>(child)->ClearLinkageCache();
+ }
+}
+
+void NamedDecl::anchor() { }
+
+void NamedDecl::ClearLinkageCache() {
+ // Note that we can't skip clearing the linkage of children just
+ // because the parent doesn't have cached linkage: we don't cache
+ // when computing linkage for parent contexts.
+
+ HasCachedLinkage = 0;
+
+ // If we're changing the linkage of a class, we need to reset the
+ // linkage of child declarations, too.
+ if (const CXXRecordDecl *record = dyn_cast<CXXRecordDecl>(this))
+ clearLinkageForClass(record);
+
+ if (ClassTemplateDecl *temp =
+ dyn_cast<ClassTemplateDecl>(const_cast<NamedDecl*>(this))) {
+ // Clear linkage for the template pattern.
+ CXXRecordDecl *record = temp->getTemplatedDecl();
+ record->HasCachedLinkage = 0;
+ clearLinkageForClass(record);
+
+ // We need to clear linkage for specializations, too.
+ for (ClassTemplateDecl::spec_iterator
+ i = temp->spec_begin(), e = temp->spec_end(); i != e; ++i)
+ i->ClearLinkageCache();
+ }
+
+ // Clear cached linkage for function template decls, too.
+ if (FunctionTemplateDecl *temp =
+ dyn_cast<FunctionTemplateDecl>(const_cast<NamedDecl*>(this))) {
+ temp->getTemplatedDecl()->ClearLinkageCache();
+ for (FunctionTemplateDecl::spec_iterator
+ i = temp->spec_begin(), e = temp->spec_end(); i != e; ++i)
+ i->ClearLinkageCache();
+ }
+
+}
+
+Linkage NamedDecl::getLinkage() const {
+ if (HasCachedLinkage) {
+ assert(Linkage(CachedLinkage) ==
+ getLVForDecl(this, LVFlags::CreateOnlyDeclLinkage()).linkage());
+ return Linkage(CachedLinkage);
+ }
+
+ CachedLinkage = getLVForDecl(this,
+ LVFlags::CreateOnlyDeclLinkage()).linkage();
+ HasCachedLinkage = 1;
+ return Linkage(CachedLinkage);
+}
+
+LinkageInfo NamedDecl::getLinkageAndVisibility() const {
+ LinkageInfo LI = getLVForDecl(this, LVFlags());
+ assert(!HasCachedLinkage || Linkage(CachedLinkage) == LI.linkage());
+ HasCachedLinkage = 1;
+ CachedLinkage = LI.linkage();
+ return LI;
+}
+
+llvm::Optional<Visibility> NamedDecl::getExplicitVisibility() const {
+ // Use the most recent declaration of a variable.
+ if (const VarDecl *var = dyn_cast<VarDecl>(this))
+ return getVisibilityOf(var->getMostRecentDecl());
+
+ // Use the most recent declaration of a function, and also handle
+ // function template specializations.
+ if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(this)) {
+ if (llvm::Optional<Visibility> V
+ = getVisibilityOf(fn->getMostRecentDecl()))
+ return V;
+
+ // If the function is a specialization of a template with an
+ // explicit visibility attribute, use that.
+ if (FunctionTemplateSpecializationInfo *templateInfo
+ = fn->getTemplateSpecializationInfo())
+ return getVisibilityOf(templateInfo->getTemplate()->getTemplatedDecl());
+
+ // If the function is a member of a specialization of a class template
+ // and the corresponding decl has explicit visibility, use that.
+ FunctionDecl *InstantiatedFrom = fn->getInstantiatedFromMemberFunction();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom);
+
+ return llvm::Optional<Visibility>();
+ }
+
+ // Otherwise, just check the declaration itself first.
+ if (llvm::Optional<Visibility> V = getVisibilityOf(this))
+ return V;
+
+ // If there wasn't explicit visibility there, and this is a
+ // specialization of a class template, check for visibility
+ // on the pattern.
+ if (const ClassTemplateSpecializationDecl *spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(this))
+ return getVisibilityOf(spec->getSpecializedTemplate()->getTemplatedDecl());
+
+ // If this is a member class of a specialization of a class template
+ // and the corresponding decl has explicit visibility, use that.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(this)) {
+ CXXRecordDecl *InstantiatedFrom = RD->getInstantiatedFromMemberClass();
+ if (InstantiatedFrom)
+ return getVisibilityOf(InstantiatedFrom);
+ }
+
+ return llvm::Optional<Visibility>();
+}
+
+static LinkageInfo getLVForDecl(const NamedDecl *D, LVFlags Flags) {
+ // Objective-C: treat all Objective-C declarations as having external
+ // linkage.
+ switch (D->getKind()) {
+ default:
+ break;
+ case Decl::ParmVar:
+ return LinkageInfo::none();
+ case Decl::TemplateTemplateParm: // count these as external
+ case Decl::NonTypeTemplateParm:
+ case Decl::ObjCAtDefsField:
+ case Decl::ObjCCategory:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCMethod:
+ case Decl::ObjCProperty:
+ case Decl::ObjCPropertyImpl:
+ case Decl::ObjCProtocol:
+ return LinkageInfo::external();
+
+ case Decl::CXXRecord: {
+ const CXXRecordDecl *Record = cast<CXXRecordDecl>(D);
+ if (Record->isLambda()) {
+ if (!Record->getLambdaManglingNumber()) {
+ // This lambda has no mangling number, so it's internal.
+ return LinkageInfo::internal();
+ }
+
+ // This lambda has its linkage/visibility determined by its owner.
+ const DeclContext *DC = D->getDeclContext()->getRedeclContext();
+ if (Decl *ContextDecl = Record->getLambdaContextDecl()) {
+ if (isa<ParmVarDecl>(ContextDecl))
+ DC = ContextDecl->getDeclContext()->getRedeclContext();
+ else
+ return getLVForDecl(cast<NamedDecl>(ContextDecl), Flags);
+ }
+
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DC))
+ return getLVForDecl(ND, Flags);
+
+ return LinkageInfo::external();
+ }
+
+ break;
+ }
+ }
+
+ // Handle linkage for namespace-scope names.
+ if (D->getDeclContext()->getRedeclContext()->isFileContext())
+ return getLVForNamespaceScopeDecl(D, Flags);
+
+ // C++ [basic.link]p5:
+ // In addition, a member function, static data member, a named
+ // class or enumeration of class scope, or an unnamed class or
+ // enumeration defined in a class-scope typedef declaration such
+ // that the class or enumeration has the typedef name for linkage
+ // purposes (7.1.3), has external linkage if the name of the class
+ // has external linkage.
+ if (D->getDeclContext()->isRecord())
+ return getLVForClassMember(D, Flags);
+
+ // C++ [basic.link]p6:
+ // The name of a function declared in block scope and the name of
+ // an object declared by a block scope extern declaration have
+ // linkage. If there is a visible declaration of an entity with
+ // linkage having the same name and type, ignoring entities
+ // declared outside the innermost enclosing namespace scope, the
+ // block scope declaration declares that same entity and receives
+ // the linkage of the previous declaration. If there is more than
+ // one such matching entity, the program is ill-formed. Otherwise,
+ // if no matching entity is found, the block scope entity receives
+ // external linkage.
+ if (D->getLexicalDeclContext()->isFunctionOrMethod()) {
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ if (Function->isInAnonymousNamespace() &&
+ !Function->getDeclContext()->isExternCContext())
+ return LinkageInfo::uniqueExternal();
+
+ LinkageInfo LV;
+ if (Flags.ConsiderVisibilityAttributes) {
+ if (llvm::Optional<Visibility> Vis = Function->getExplicitVisibility())
+ LV.setVisibility(*Vis);
+ }
+
+ if (const FunctionDecl *Prev = Function->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+
+ return LV;
+ }
+
+ if (const VarDecl *Var = dyn_cast<VarDecl>(D))
+ if (Var->getStorageClass() == SC_Extern ||
+ Var->getStorageClass() == SC_PrivateExtern) {
+ if (Var->isInAnonymousNamespace() &&
+ !Var->getDeclContext()->isExternCContext())
+ return LinkageInfo::uniqueExternal();
+
+ LinkageInfo LV;
+ if (Var->getStorageClass() == SC_PrivateExtern)
+ LV.setVisibility(HiddenVisibility);
+ else if (Flags.ConsiderVisibilityAttributes) {
+ if (llvm::Optional<Visibility> Vis = Var->getExplicitVisibility())
+ LV.setVisibility(*Vis);
+ }
+
+ if (const VarDecl *Prev = Var->getPreviousDecl()) {
+ LinkageInfo PrevLV = getLVForDecl(Prev, Flags);
+ if (PrevLV.linkage()) LV.setLinkage(PrevLV.linkage());
+ LV.mergeVisibility(PrevLV);
+ }
+
+ return LV;
+ }
+ }
+
+ // C++ [basic.link]p6:
+ // Names not covered by these rules have no linkage.
+ return LinkageInfo::none();
+}
+
+std::string NamedDecl::getQualifiedNameAsString() const {
+ return getQualifiedNameAsString(getASTContext().getPrintingPolicy());
+}
+
+std::string NamedDecl::getQualifiedNameAsString(const PrintingPolicy &P) const {
+ const DeclContext *Ctx = getDeclContext();
+
+ if (Ctx->isFunctionOrMethod())
+ return getNameAsString();
+
+ typedef SmallVector<const DeclContext *, 8> ContextsTy;
+ ContextsTy Contexts;
+
+ // Collect contexts.
+ while (Ctx && isa<NamedDecl>(Ctx)) {
+ Contexts.push_back(Ctx);
+ Ctx = Ctx->getParent();
+ };
+
+ std::string QualName;
+ llvm::raw_string_ostream OS(QualName);
+
+ for (ContextsTy::reverse_iterator I = Contexts.rbegin(), E = Contexts.rend();
+ I != E; ++I) {
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(*I)) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ P);
+ OS << Spec->getName() << TemplateArgsStr;
+ } else if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(*I)) {
+ if (ND->isAnonymousNamespace())
+ OS << "<anonymous namespace>";
+ else
+ OS << *ND;
+ } else if (const RecordDecl *RD = dyn_cast<RecordDecl>(*I)) {
+ if (!RD->getIdentifier())
+ OS << "<anonymous " << RD->getKindName() << '>';
+ else
+ OS << *RD;
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ const FunctionProtoType *FT = 0;
+ if (FD->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(FD->getType()->getAs<FunctionType>());
+
+ OS << *FD << '(';
+ if (FT) {
+ unsigned NumParams = FD->getNumParams();
+ for (unsigned i = 0; i < NumParams; ++i) {
+ if (i)
+ OS << ", ";
+ std::string Param;
+ FD->getParamDecl(i)->getType().getAsStringInternal(Param, P);
+ OS << Param;
+ }
+
+ if (FT->isVariadic()) {
+ if (NumParams > 0)
+ OS << ", ";
+ OS << "...";
+ }
+ }
+ OS << ')';
+ } else {
+ OS << *cast<NamedDecl>(*I);
+ }
+ OS << "::";
+ }
+
+ if (getDeclName())
+ OS << *this;
+ else
+ OS << "<anonymous>";
+
+ return OS.str();
+}
+
+bool NamedDecl::declarationReplaces(NamedDecl *OldD) const {
+ assert(getDeclName() == OldD->getDeclName() && "Declaration name mismatch");
+
+ // UsingDirectiveDecl's are not really NamedDecl's, and all have same name.
+ // We want to keep it, unless it nominates same namespace.
+ if (getKind() == Decl::UsingDirective) {
+ return cast<UsingDirectiveDecl>(this)->getNominatedNamespace()
+ ->getOriginalNamespace() ==
+ cast<UsingDirectiveDecl>(OldD)->getNominatedNamespace()
+ ->getOriginalNamespace();
+ }
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this))
+ // For function declarations, we keep track of redeclarations.
+ return FD->getPreviousDecl() == OldD;
+
+ // For function templates, the underlying function declarations are linked.
+ if (const FunctionTemplateDecl *FunctionTemplate
+ = dyn_cast<FunctionTemplateDecl>(this))
+ if (const FunctionTemplateDecl *OldFunctionTemplate
+ = dyn_cast<FunctionTemplateDecl>(OldD))
+ return FunctionTemplate->getTemplatedDecl()
+ ->declarationReplaces(OldFunctionTemplate->getTemplatedDecl());
+
+ // For method declarations, we keep track of redeclarations.
+ if (isa<ObjCMethodDecl>(this))
+ return false;
+
+ if (isa<ObjCInterfaceDecl>(this) && isa<ObjCCompatibleAliasDecl>(OldD))
+ return true;
+
+ if (isa<UsingShadowDecl>(this) && isa<UsingShadowDecl>(OldD))
+ return cast<UsingShadowDecl>(this)->getTargetDecl() ==
+ cast<UsingShadowDecl>(OldD)->getTargetDecl();
+
+ if (isa<UsingDecl>(this) && isa<UsingDecl>(OldD)) {
+ ASTContext &Context = getASTContext();
+ return Context.getCanonicalNestedNameSpecifier(
+ cast<UsingDecl>(this)->getQualifier()) ==
+ Context.getCanonicalNestedNameSpecifier(
+ cast<UsingDecl>(OldD)->getQualifier());
+ }
+
+ // A typedef of an Objective-C class type can replace an Objective-C class
+ // declaration or definition, and vice versa.
+ if ((isa<TypedefNameDecl>(this) && isa<ObjCInterfaceDecl>(OldD)) ||
+ (isa<ObjCInterfaceDecl>(this) && isa<TypedefNameDecl>(OldD)))
+ return true;
+
+ // For non-function declarations, if the declarations are of the
+ // same kind then this must be a redeclaration, or semantic analysis
+ // would not have given us the new declaration.
+ return this->getKind() == OldD->getKind();
+}
+
+bool NamedDecl::hasLinkage() const {
+ return getLinkage() != NoLinkage;
+}
+
+NamedDecl *NamedDecl::getUnderlyingDeclImpl() {
+ NamedDecl *ND = this;
+ while (UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(ND))
+ ND = UD->getTargetDecl();
+
+ if (ObjCCompatibleAliasDecl *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
+ return AD->getClassInterface();
+
+ return ND;
+}
+
+bool NamedDecl::isCXXInstanceMember() const {
+ if (!isCXXClassMember())
+ return false;
+
+ const NamedDecl *D = this;
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D))
+ return true;
+ if (isa<CXXMethodDecl>(D))
+ return cast<CXXMethodDecl>(D)->isInstance();
+ if (isa<FunctionTemplateDecl>(D))
+ return cast<CXXMethodDecl>(cast<FunctionTemplateDecl>(D)
+ ->getTemplatedDecl())->isInstance();
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// DeclaratorDecl Implementation
+//===----------------------------------------------------------------------===//
+
+template <typename DeclT>
+static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) {
+ if (decl->getNumTemplateParameterLists() > 0)
+ return decl->getTemplateParameterList(0)->getTemplateLoc();
+ else
+ return decl->getInnerLocStart();
+}
+
+SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const {
+ TypeSourceInfo *TSI = getTypeSourceInfo();
+ if (TSI) return TSI->getTypeLoc().getBeginLoc();
+ return SourceLocation();
+}
+
+void DeclaratorDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
+ if (QualifierLoc) {
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo()) {
+ // Save (non-extended) type source info pointer.
+ TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
+ // Allocate external info struct.
+ DeclInfo = new (getASTContext()) ExtInfo;
+ // Restore savedTInfo into (extended) decl info.
+ getExtInfo()->TInfo = savedTInfo;
+ }
+ // Set qualifier info.
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ } else {
+ // Here Qualifier == 0, i.e., we are removing the qualifier (if any).
+ if (hasExtInfo()) {
+ if (getExtInfo()->NumTemplParamLists == 0) {
+ // Save type source info pointer.
+ TypeSourceInfo *savedTInfo = getExtInfo()->TInfo;
+ // Deallocate the extended decl info.
+ getASTContext().Deallocate(getExtInfo());
+ // Restore savedTInfo into (non-extended) decl info.
+ DeclInfo = savedTInfo;
+ }
+ else
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ }
+ }
+}
+
+void
+DeclaratorDecl::setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ assert(NumTPLists > 0);
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo()) {
+ // Save (non-extended) type source info pointer.
+ TypeSourceInfo *savedTInfo = DeclInfo.get<TypeSourceInfo*>();
+ // Allocate external info struct.
+ DeclInfo = new (getASTContext()) ExtInfo;
+ // Restore savedTInfo into (extended) decl info.
+ getExtInfo()->TInfo = savedTInfo;
+ }
+ // Set the template parameter lists info.
+ getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
+}
+
+SourceLocation DeclaratorDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
+namespace {
+
+// Helper function: returns true if QT is or contains a type
+// having a postfix component.
+bool typeIsPostfix(clang::QualType QT) {
+ while (true) {
+ const Type* T = QT.getTypePtr();
+ switch (T->getTypeClass()) {
+ default:
+ return false;
+ case Type::Pointer:
+ QT = cast<PointerType>(T)->getPointeeType();
+ break;
+ case Type::BlockPointer:
+ QT = cast<BlockPointerType>(T)->getPointeeType();
+ break;
+ case Type::MemberPointer:
+ QT = cast<MemberPointerType>(T)->getPointeeType();
+ break;
+ case Type::LValueReference:
+ case Type::RValueReference:
+ QT = cast<ReferenceType>(T)->getPointeeType();
+ break;
+ case Type::PackExpansion:
+ QT = cast<PackExpansionType>(T)->getPattern();
+ break;
+ case Type::Paren:
+ case Type::ConstantArray:
+ case Type::DependentSizedArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return true;
+ }
+ }
+}
+
+} // namespace
+
+SourceRange DeclaratorDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocation();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
+ if (typeIsPostfix(TInfo->getType()))
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ }
+ return SourceRange(getOuterLocStart(), RangeEnd);
+}
+
+void
+QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ assert((NumTPLists == 0 || TPLists != 0) &&
+ "Empty array of template parameters with positive size!");
+
+ // Free previous template parameters (if any).
+ if (NumTemplParamLists > 0) {
+ Context.Deallocate(TemplParamLists);
+ TemplParamLists = 0;
+ NumTemplParamLists = 0;
+ }
+ // Set info on matched template parameter lists (if any).
+ if (NumTPLists > 0) {
+ TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
+ NumTemplParamLists = NumTPLists;
+ for (unsigned i = NumTPLists; i-- > 0; )
+ TemplParamLists[i] = TPLists[i];
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// VarDecl Implementation
+//===----------------------------------------------------------------------===//
+
+const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
+ switch (SC) {
+ case SC_None: break;
+ case SC_Auto: return "auto";
+ case SC_Extern: return "extern";
+ case SC_OpenCLWorkGroupLocal: return "<<work-group-local>>";
+ case SC_PrivateExtern: return "__private_extern__";
+ case SC_Register: return "register";
+ case SC_Static: return "static";
+ }
+
+ llvm_unreachable("Invalid storage class");
+}
+
+VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartL, SourceLocation IdL,
+ IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, StorageClass SCAsWritten) {
+ return new (C) VarDecl(Var, DC, StartL, IdL, Id, T, TInfo, S, SCAsWritten);
+}
+
+VarDecl *VarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(VarDecl));
+ return new (Mem) VarDecl(Var, 0, SourceLocation(), SourceLocation(), 0,
+ QualType(), 0, SC_None, SC_None);
+}
+
+void VarDecl::setStorageClass(StorageClass SC) {
+ assert(isLegalForVariable(SC));
+ if (getStorageClass() != SC)
+ ClearLinkageCache();
+
+ VarDeclBits.SClass = SC;
+}
+
+SourceRange VarDecl::getSourceRange() const {
+ if (getInit())
+ return SourceRange(getOuterLocStart(), getInit()->getLocEnd());
+ return DeclaratorDecl::getSourceRange();
+}
+
+bool VarDecl::isExternC() const {
+ if (getLinkage() != ExternalLinkage)
+ return false;
+
+ const DeclContext *DC = getDeclContext();
+ if (DC->isRecord())
+ return false;
+
+ ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
+ return DC->isExternCContext();
+}
+
+VarDecl *VarDecl::getCanonicalDecl() {
+ return getFirstDeclaration();
+}
+
+VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition(
+ ASTContext &C) const
+{
+ // C++ [basic.def]p2:
+ // A declaration is a definition unless [...] it contains the 'extern'
+ // specifier or a linkage-specification and neither an initializer [...],
+ // it declares a static data member in a class declaration [...].
+ // C++ [temp.expl.spec]p15:
+ // An explicit specialization of a static data member of a template is a
+ // definition if the declaration includes an initializer; otherwise, it is
+ // a declaration.
+ if (isStaticDataMember()) {
+ if (isOutOfLine() && (hasInit() ||
+ getTemplateSpecializationKind() != TSK_ExplicitSpecialization))
+ return Definition;
+ else
+ return DeclarationOnly;
+ }
+ // C99 6.7p5:
+ // A definition of an identifier is a declaration for that identifier that
+ // [...] causes storage to be reserved for that object.
+ // Note: that applies for all non-file-scope objects.
+ // C99 6.9.2p1:
+ // If the declaration of an identifier for an object has file scope and an
+ // initializer, the declaration is an external definition for the identifier
+ if (hasInit())
+ return Definition;
+ // AST for 'extern "C" int foo;' is annotated with 'extern'.
+ if (hasExternalStorage())
+ return DeclarationOnly;
+
+ if (getStorageClassAsWritten() == SC_Extern ||
+ getStorageClassAsWritten() == SC_PrivateExtern) {
+ for (const VarDecl *PrevVar = getPreviousDecl();
+ PrevVar; PrevVar = PrevVar->getPreviousDecl()) {
+ if (PrevVar->getLinkage() == InternalLinkage && PrevVar->hasInit())
+ return DeclarationOnly;
+ }
+ }
+ // C99 6.9.2p2:
+ // A declaration of an object that has file scope without an initializer,
+ // and without a storage class specifier or the scs 'static', constitutes
+ // a tentative definition.
+ // No such thing in C++.
+ if (!C.getLangOpts().CPlusPlus && isFileVarDecl())
+ return TentativeDefinition;
+
+ // What's left is (in C, block-scope) declarations without initializers or
+ // external storage. These are definitions.
+ return Definition;
+}
+
+VarDecl *VarDecl::getActingDefinition() {
+ DefinitionKind Kind = isThisDeclarationADefinition();
+ if (Kind != TentativeDefinition)
+ return 0;
+
+ VarDecl *LastTentative = 0;
+ VarDecl *First = getFirstDeclaration();
+ for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
+ I != E; ++I) {
+ Kind = (*I)->isThisDeclarationADefinition();
+ if (Kind == Definition)
+ return 0;
+ else if (Kind == TentativeDefinition)
+ LastTentative = *I;
+ }
+ return LastTentative;
+}
+
+bool VarDecl::isTentativeDefinitionNow() const {
+ DefinitionKind Kind = isThisDeclarationADefinition();
+ if (Kind != TentativeDefinition)
+ return false;
+
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if ((*I)->isThisDeclarationADefinition() == Definition)
+ return false;
+ }
+ return true;
+}
+
+VarDecl *VarDecl::getDefinition(ASTContext &C) {
+ VarDecl *First = getFirstDeclaration();
+ for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
+ I != E; ++I) {
+ if ((*I)->isThisDeclarationADefinition(C) == Definition)
+ return *I;
+ }
+ return 0;
+}
+
+VarDecl::DefinitionKind VarDecl::hasDefinition(ASTContext &C) const {
+ DefinitionKind Kind = DeclarationOnly;
+
+ const VarDecl *First = getFirstDeclaration();
+ for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
+ I != E; ++I) {
+ Kind = std::max(Kind, (*I)->isThisDeclarationADefinition(C));
+ if (Kind == Definition)
+ break;
+ }
+
+ return Kind;
+}
+
+const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
+ redecl_iterator I = redecls_begin(), E = redecls_end();
+ while (I != E && !I->getInit())
+ ++I;
+
+ if (I != E) {
+ D = *I;
+ return I->getInit();
+ }
+ return 0;
+}
+
+bool VarDecl::isOutOfLine() const {
+ if (Decl::isOutOfLine())
+ return true;
+
+ if (!isStaticDataMember())
+ return false;
+
+ // If this static data member was instantiated from a static data member of
+ // a class template, check whether that static data member was defined
+ // out-of-line.
+ if (VarDecl *VD = getInstantiatedFromStaticDataMember())
+ return VD->isOutOfLine();
+
+ return false;
+}
+
+VarDecl *VarDecl::getOutOfLineDefinition() {
+ if (!isStaticDataMember())
+ return 0;
+
+ for (VarDecl::redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
+ RD != RDEnd; ++RD) {
+ if (RD->getLexicalDeclContext()->isFileContext())
+ return *RD;
+ }
+
+ return 0;
+}
+
+void VarDecl::setInit(Expr *I) {
+ if (EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>()) {
+ Eval->~EvaluatedStmt();
+ getASTContext().Deallocate(Eval);
+ }
+
+ Init = I;
+}
+
+bool VarDecl::isUsableInConstantExpressions(ASTContext &C) const {
+ const LangOptions &Lang = C.getLangOpts();
+
+ if (!Lang.CPlusPlus)
+ return false;
+
+ // In C++11, any variable of reference type can be used in a constant
+ // expression if it is initialized by a constant expression.
+ if (Lang.CPlusPlus0x && getType()->isReferenceType())
+ return true;
+
+ // Only const objects can be used in constant expressions in C++. C++98 does
+ // not require the variable to be non-volatile, but we consider this to be a
+ // defect.
+ if (!getType().isConstQualified() || getType().isVolatileQualified())
+ return false;
+
+ // In C++, const, non-volatile variables of integral or enumeration types
+ // can be used in constant expressions.
+ if (getType()->isIntegralOrEnumerationType())
+ return true;
+
+ // Additionally, in C++11, non-volatile constexpr variables can be used in
+ // constant expressions.
+ return Lang.CPlusPlus0x && isConstexpr();
+}
+
+/// Convert the initializer for this declaration to the elaborated EvaluatedStmt
+/// form, which contains extra information on the evaluated value of the
+/// initializer.
+EvaluatedStmt *VarDecl::ensureEvaluatedStmt() const {
+ EvaluatedStmt *Eval = Init.dyn_cast<EvaluatedStmt *>();
+ if (!Eval) {
+ Stmt *S = Init.get<Stmt *>();
+ Eval = new (getASTContext()) EvaluatedStmt;
+ Eval->Value = S;
+ Init = Eval;
+ }
+ return Eval;
+}
+
+APValue *VarDecl::evaluateValue() const {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ return evaluateValue(Notes);
+}
+
+APValue *VarDecl::evaluateValue(
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+
+ // We only produce notes indicating why an initializer is non-constant the
+ // first time it is evaluated. FIXME: The notes won't always be emitted the
+ // first time we try evaluation, so might not be produced at all.
+ if (Eval->WasEvaluated)
+ return Eval->Evaluated.isUninit() ? 0 : &Eval->Evaluated;
+
+ const Expr *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
+ if (Eval->IsEvaluating) {
+ // FIXME: Produce a diagnostic for self-initialization.
+ Eval->CheckedICE = true;
+ Eval->IsICE = false;
+ return 0;
+ }
+
+ Eval->IsEvaluating = true;
+
+ bool Result = Init->EvaluateAsInitializer(Eval->Evaluated, getASTContext(),
+ this, Notes);
+
+ // Ensure the result is an uninitialized APValue if evaluation fails.
+ if (!Result)
+ Eval->Evaluated = APValue();
+
+ Eval->IsEvaluating = false;
+ Eval->WasEvaluated = true;
+
+ // In C++11, we have determined whether the initializer was a constant
+ // expression as a side-effect.
+ if (getASTContext().getLangOpts().CPlusPlus0x && !Eval->CheckedICE) {
+ Eval->CheckedICE = true;
+ Eval->IsICE = Result && Notes.empty();
+ }
+
+ return Result ? &Eval->Evaluated : 0;
+}
+
+bool VarDecl::checkInitIsICE() const {
+ // Initializers of weak variables are never ICEs.
+ if (isWeak())
+ return false;
+
+ EvaluatedStmt *Eval = ensureEvaluatedStmt();
+ if (Eval->CheckedICE)
+ // We have already checked whether this subexpression is an
+ // integral constant expression.
+ return Eval->IsICE;
+
+ const Expr *Init = cast<Expr>(Eval->Value);
+ assert(!Init->isValueDependent());
+
+ // In C++11, evaluate the initializer to check whether it's a constant
+ // expression.
+ if (getASTContext().getLangOpts().CPlusPlus0x) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ evaluateValue(Notes);
+ return Eval->IsICE;
+ }
+
+ // It's an ICE whether or not the definition we found is
+ // out-of-line. See DR 721 and the discussion in Clang PR
+ // 6206 for details.
+
+ if (Eval->CheckingICE)
+ return false;
+ Eval->CheckingICE = true;
+
+ Eval->IsICE = Init->isIntegerConstantExpr(getASTContext());
+ Eval->CheckingICE = false;
+ Eval->CheckedICE = true;
+ return Eval->IsICE;
+}
+
+bool VarDecl::extendsLifetimeOfTemporary() const {
+ assert(getType()->isReferenceType() &&"Non-references never extend lifetime");
+
+ const Expr *E = getInit();
+ if (!E)
+ return false;
+
+ if (const ExprWithCleanups *Cleanups = dyn_cast<ExprWithCleanups>(E))
+ E = Cleanups->getSubExpr();
+
+ return isa<MaterializeTemporaryExpr>(E);
+}
+
+VarDecl *VarDecl::getInstantiatedFromStaticDataMember() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return cast<VarDecl>(MSI->getInstantiatedFrom());
+
+ return 0;
+}
+
+TemplateSpecializationKind VarDecl::getTemplateSpecializationKind() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const {
+ return getASTContext().getInstantiatedFromStaticDataMember(this);
+}
+
+void VarDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ MemberSpecializationInfo *MSI = getMemberSpecializationInfo();
+ assert(MSI && "Not an instantiated static data member?");
+ MSI->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSI->getPointOfInstantiation().isInvalid())
+ MSI->setPointOfInstantiation(PointOfInstantiation);
+}
+
+//===----------------------------------------------------------------------===//
+// ParmVarDecl Implementation
+//===----------------------------------------------------------------------===//
+
+ParmVarDecl *ParmVarDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass S, StorageClass SCAsWritten,
+ Expr *DefArg) {
+ return new (C) ParmVarDecl(ParmVar, DC, StartLoc, IdLoc, Id, T, TInfo,
+ S, SCAsWritten, DefArg);
+}
+
+ParmVarDecl *ParmVarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ParmVarDecl));
+ return new (Mem) ParmVarDecl(ParmVar, 0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0, SC_None, SC_None, 0);
+}
+
+SourceRange ParmVarDecl::getSourceRange() const {
+ if (!hasInheritedDefaultArg()) {
+ SourceRange ArgRange = getDefaultArgRange();
+ if (ArgRange.isValid())
+ return SourceRange(getOuterLocStart(), ArgRange.getEnd());
+ }
+
+ return DeclaratorDecl::getSourceRange();
+}
+
+Expr *ParmVarDecl::getDefaultArg() {
+ assert(!hasUnparsedDefaultArg() && "Default argument is not yet parsed!");
+ assert(!hasUninstantiatedDefaultArg() &&
+ "Default argument is not yet instantiated!");
+
+ Expr *Arg = getInit();
+ if (ExprWithCleanups *E = dyn_cast_or_null<ExprWithCleanups>(Arg))
+ return E->getSubExpr();
+
+ return Arg;
+}
+
+SourceRange ParmVarDecl::getDefaultArgRange() const {
+ if (const Expr *E = getInit())
+ return E->getSourceRange();
+
+ if (hasUninstantiatedDefaultArg())
+ return getUninstantiatedDefaultArg()->getSourceRange();
+
+ return SourceRange();
+}
+
+bool ParmVarDecl::isParameterPack() const {
+ return isa<PackExpansionType>(getType());
+}
+
+void ParmVarDecl::setParameterIndexLarge(unsigned parameterIndex) {
+ getASTContext().setParameterIndex(this, parameterIndex);
+ ParmVarDeclBits.ParameterIndex = ParameterIndexSentinel;
+}
+
+unsigned ParmVarDecl::getParameterIndexLarge() const {
+ return getASTContext().getParameterIndex(this);
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FunctionDecl::getNameForDiagnostic(std::string &S,
+ const PrintingPolicy &Policy,
+ bool Qualified) const {
+ NamedDecl::getNameForDiagnostic(S, Policy, Qualified);
+ const TemplateArgumentList *TemplateArgs = getTemplateSpecializationArgs();
+ if (TemplateArgs)
+ S += TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs->data(),
+ TemplateArgs->size(),
+ Policy);
+
+}
+
+bool FunctionDecl::isVariadic() const {
+ if (const FunctionProtoType *FT = getType()->getAs<FunctionProtoType>())
+ return FT->isVariadic();
+ return false;
+}
+
+bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->Body || I->IsLateTemplateParsed) {
+ Definition = *I;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool FunctionDecl::hasTrivialBody() const
+{
+ Stmt *S = getBody();
+ if (!S) {
+ // Since we don't have a body for this function, we don't know if it's
+ // trivial or not.
+ return false;
+ }
+
+ if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty())
+ return true;
+ return false;
+}
+
+bool FunctionDecl::isDefined(const FunctionDecl *&Definition) const {
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->IsDeleted || I->IsDefaulted || I->Body || I->IsLateTemplateParsed) {
+ Definition = I->IsDeleted ? I->getCanonicalDecl() : *I;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->Body) {
+ Definition = *I;
+ return I->Body.get(getASTContext().getExternalSource());
+ } else if (I->IsLateTemplateParsed) {
+ Definition = *I;
+ return 0;
+ }
+ }
+
+ return 0;
+}
+
+void FunctionDecl::setBody(Stmt *B) {
+ Body = B;
+ if (B)
+ EndRangeLoc = B->getLocEnd();
+}
+
+void FunctionDecl::setPure(bool P) {
+ IsPure = P;
+ if (P)
+ if (CXXRecordDecl *Parent = dyn_cast<CXXRecordDecl>(getDeclContext()))
+ Parent->markedVirtualFunctionPure();
+}
+
+bool FunctionDecl::isMain() const {
+ const TranslationUnitDecl *tunit =
+ dyn_cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext());
+ return tunit &&
+ !tunit->getASTContext().getLangOpts().Freestanding &&
+ getIdentifier() &&
+ getIdentifier()->isStr("main");
+}
+
+bool FunctionDecl::isReservedGlobalPlacementOperator() const {
+ assert(getDeclName().getNameKind() == DeclarationName::CXXOperatorName);
+ assert(getDeclName().getCXXOverloadedOperator() == OO_New ||
+ getDeclName().getCXXOverloadedOperator() == OO_Delete ||
+ getDeclName().getCXXOverloadedOperator() == OO_Array_New ||
+ getDeclName().getCXXOverloadedOperator() == OO_Array_Delete);
+
+ if (isa<CXXRecordDecl>(getDeclContext())) return false;
+ assert(getDeclContext()->getRedeclContext()->isTranslationUnit());
+
+ const FunctionProtoType *proto = getType()->castAs<FunctionProtoType>();
+ if (proto->getNumArgs() != 2 || proto->isVariadic()) return false;
+
+ ASTContext &Context =
+ cast<TranslationUnitDecl>(getDeclContext()->getRedeclContext())
+ ->getASTContext();
+
+ // The result type and first argument type are constant across all
+ // these operators. The second argument must be exactly void*.
+ return (proto->getArgType(1).getCanonicalType() == Context.VoidPtrTy);
+}
+
+bool FunctionDecl::isExternC() const {
+ if (getLinkage() != ExternalLinkage)
+ return false;
+
+ if (getAttr<OverloadableAttr>())
+ return false;
+
+ const DeclContext *DC = getDeclContext();
+ if (DC->isRecord())
+ return false;
+
+ ASTContext &Context = getASTContext();
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
+
+ return isMain() || DC->isExternCContext();
+}
+
+bool FunctionDecl::isGlobal() const {
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(this))
+ return Method->isStatic();
+
+ if (getStorageClass() == SC_Static)
+ return false;
+
+ for (const DeclContext *DC = getDeclContext();
+ DC->isNamespace();
+ DC = DC->getParent()) {
+ if (const NamespaceDecl *Namespace = cast<NamespaceDecl>(DC)) {
+ if (!Namespace->getDeclName())
+ return false;
+ break;
+ }
+ }
+
+ return true;
+}
+
+void
+FunctionDecl::setPreviousDeclaration(FunctionDecl *PrevDecl) {
+ redeclarable_base::setPreviousDeclaration(PrevDecl);
+
+ if (FunctionTemplateDecl *FunTmpl = getDescribedFunctionTemplate()) {
+ FunctionTemplateDecl *PrevFunTmpl
+ = PrevDecl? PrevDecl->getDescribedFunctionTemplate() : 0;
+ assert((!PrevDecl || PrevFunTmpl) && "Function/function template mismatch");
+ FunTmpl->setPreviousDeclaration(PrevFunTmpl);
+ }
+
+ if (PrevDecl && PrevDecl->IsInline)
+ IsInline = true;
+}
+
+const FunctionDecl *FunctionDecl::getCanonicalDecl() const {
+ return getFirstDeclaration();
+}
+
+FunctionDecl *FunctionDecl::getCanonicalDecl() {
+ return getFirstDeclaration();
+}
+
+void FunctionDecl::setStorageClass(StorageClass SC) {
+ assert(isLegalForFunction(SC));
+ if (getStorageClass() != SC)
+ ClearLinkageCache();
+
+ SClass = SC;
+}
+
+/// \brief Returns a value indicating whether this function
+/// corresponds to a builtin function.
+///
+/// The function corresponds to a built-in function if it is
+/// declared at translation scope or within an extern "C" block and
+/// its name matches with the name of a builtin. The returned value
+/// will be 0 for functions that do not correspond to a builtin, a
+/// value of type \c Builtin::ID if in the target-independent range
+/// \c [1,Builtin::First), or a target-specific builtin value.
+unsigned FunctionDecl::getBuiltinID() const {
+ if (!getIdentifier())
+ return 0;
+
+ unsigned BuiltinID = getIdentifier()->getBuiltinID();
+ if (!BuiltinID)
+ return 0;
+
+ ASTContext &Context = getASTContext();
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return BuiltinID;
+
+ // This function has the name of a known C library
+ // function. Determine whether it actually refers to the C library
+ // function or whether it just has the same name.
+
+ // If this is a static function, it's not a builtin.
+ if (getStorageClass() == SC_Static)
+ return 0;
+
+ // If this function is at translation-unit scope and we're not in
+ // C++, it refers to the C library function.
+ if (!Context.getLangOpts().CPlusPlus &&
+ getDeclContext()->isTranslationUnit())
+ return BuiltinID;
+
+ // If the function is in an extern "C" linkage specification and is
+ // not marked "overloadable", it's the real function.
+ if (isa<LinkageSpecDecl>(getDeclContext()) &&
+ cast<LinkageSpecDecl>(getDeclContext())->getLanguage()
+ == LinkageSpecDecl::lang_c &&
+ !getAttr<OverloadableAttr>())
+ return BuiltinID;
+
+ // Not a builtin
+ return 0;
+}
+
+
+/// getNumParams - Return the number of parameters this function must have
+/// based on its FunctionType. This is the length of the ParamInfo array
+/// after it has been created.
+unsigned FunctionDecl::getNumParams() const {
+ const FunctionType *FT = getType()->getAs<FunctionType>();
+ if (isa<FunctionNoProtoType>(FT))
+ return 0;
+ return cast<FunctionProtoType>(FT)->getNumArgs();
+
+}
+
+void FunctionDecl::setParams(ASTContext &C,
+ llvm::ArrayRef<ParmVarDecl *> NewParamInfo) {
+ assert(ParamInfo == 0 && "Already has param info!");
+ assert(NewParamInfo.size() == getNumParams() && "Parameter count mismatch!");
+
+ // Zero params -> null pointer.
+ if (!NewParamInfo.empty()) {
+ ParamInfo = new (C) ParmVarDecl*[NewParamInfo.size()];
+ std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo);
+ }
+}
+
+void FunctionDecl::setDeclsInPrototypeScope(llvm::ArrayRef<NamedDecl *> NewDecls) {
+ assert(DeclsInPrototypeScope.empty() && "Already has prototype decls!");
+
+ if (!NewDecls.empty()) {
+ NamedDecl **A = new (getASTContext()) NamedDecl*[NewDecls.size()];
+ std::copy(NewDecls.begin(), NewDecls.end(), A);
+ DeclsInPrototypeScope = llvm::ArrayRef<NamedDecl*>(A, NewDecls.size());
+ }
+}
+
+/// getMinRequiredArguments - Returns the minimum number of arguments
+/// needed to call this function. This may be fewer than the number of
+/// function parameters, if some of the parameters have default
+/// arguments (in C++) or the last parameter is a parameter pack.
+unsigned FunctionDecl::getMinRequiredArguments() const {
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return getNumParams();
+
+ unsigned NumRequiredArgs = getNumParams();
+
+ // If the last parameter is a parameter pack, we don't need an argument for
+ // it.
+ if (NumRequiredArgs > 0 &&
+ getParamDecl(NumRequiredArgs - 1)->isParameterPack())
+ --NumRequiredArgs;
+
+ // If this parameter has a default argument, we don't need an argument for
+ // it.
+ while (NumRequiredArgs > 0 &&
+ getParamDecl(NumRequiredArgs-1)->hasDefaultArg())
+ --NumRequiredArgs;
+
+ // We might have parameter packs before the end. These can't be deduced,
+ // but they can still handle multiple arguments.
+ unsigned ArgIdx = NumRequiredArgs;
+ while (ArgIdx > 0) {
+ if (getParamDecl(ArgIdx - 1)->isParameterPack())
+ NumRequiredArgs = ArgIdx;
+
+ --ArgIdx;
+ }
+
+ return NumRequiredArgs;
+}
+
+bool FunctionDecl::isInlined() const {
+ if (IsInline)
+ return true;
+
+ if (isa<CXXMethodDecl>(this)) {
+ if (!isOutOfLine() || getCanonicalDecl()->isInlineSpecified())
+ return true;
+ }
+
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return false;
+
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ // Handle below.
+ break;
+ }
+
+ const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
+ bool HasPattern = false;
+ if (PatternDecl)
+ HasPattern = PatternDecl->hasBody(PatternDecl);
+
+ if (HasPattern && PatternDecl)
+ return PatternDecl->isInlined();
+
+ return false;
+}
+
+static bool RedeclForcesDefC99(const FunctionDecl *Redecl) {
+ // Only consider file-scope declarations in this test.
+ if (!Redecl->getLexicalDeclContext()->isTranslationUnit())
+ return false;
+
+ // Only consider explicit declarations; the presence of a builtin for a
+ // libcall shouldn't affect whether a definition is externally visible.
+ if (Redecl->isImplicit())
+ return false;
+
+ if (!Redecl->isInlineSpecified() || Redecl->getStorageClass() == SC_Extern)
+ return true; // Not an inline definition
+
+ return false;
+}
+
+/// \brief For a function declaration in C or C++, determine whether this
+/// declaration causes the definition to be externally visible.
+///
+/// Specifically, this determines if adding the current declaration to the set
+/// of redeclarations of the given functions causes
+/// isInlineDefinitionExternallyVisible to change from false to true.
+bool FunctionDecl::doesDeclarationForceExternallyVisibleDefinition() const {
+ assert(!doesThisDeclarationHaveABody() &&
+ "Must have a declaration without a body.");
+
+ ASTContext &Context = getASTContext();
+
+ if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
+ // With GNU inlining, a declaration with 'inline' but not 'extern', forces
+ // an externally visible definition.
+ //
+ // FIXME: What happens if gnu_inline gets added on after the first
+ // declaration?
+ if (!isInlineSpecified() || getStorageClassAsWritten() == SC_Extern)
+ return false;
+
+ const FunctionDecl *Prev = this;
+ bool FoundBody = false;
+ while ((Prev = Prev->getPreviousDecl())) {
+ FoundBody |= Prev->Body;
+
+ if (Prev->Body) {
+ // If it's not the case that both 'inline' and 'extern' are
+ // specified on the definition, then it is always externally visible.
+ if (!Prev->isInlineSpecified() ||
+ Prev->getStorageClassAsWritten() != SC_Extern)
+ return false;
+ } else if (Prev->isInlineSpecified() &&
+ Prev->getStorageClassAsWritten() != SC_Extern) {
+ return false;
+ }
+ }
+ return FoundBody;
+ }
+
+ if (Context.getLangOpts().CPlusPlus)
+ return false;
+
+ // C99 6.7.4p6:
+ // [...] If all of the file scope declarations for a function in a
+ // translation unit include the inline function specifier without extern,
+ // then the definition in that translation unit is an inline definition.
+ if (isInlineSpecified() && getStorageClass() != SC_Extern)
+ return false;
+ const FunctionDecl *Prev = this;
+ bool FoundBody = false;
+ while ((Prev = Prev->getPreviousDecl())) {
+ FoundBody |= Prev->Body;
+ if (RedeclForcesDefC99(Prev))
+ return false;
+ }
+ return FoundBody;
+}
+
+/// \brief For an inline function definition in C or C++, determine whether the
+/// definition will be externally visible.
+///
+/// Inline function definitions are always available for inlining optimizations.
+/// However, depending on the language dialect, declaration specifiers, and
+/// attributes, the definition of an inline function may or may not be
+/// "externally" visible to other translation units in the program.
+///
+/// In C99, inline definitions are not externally visible by default. However,
+/// if even one of the global-scope declarations is marked "extern inline", the
+/// inline definition becomes externally visible (C99 6.7.4p6).
+///
+/// In GNU89 mode, or if the gnu_inline attribute is attached to the function
+/// definition, we use the GNU semantics for inline, which are nearly the
+/// opposite of C99 semantics. In particular, "inline" by itself will create
+/// an externally visible symbol, but "extern inline" will not create an
+/// externally visible symbol.
+bool FunctionDecl::isInlineDefinitionExternallyVisible() const {
+ assert(doesThisDeclarationHaveABody() && "Must have the function definition");
+ assert(isInlined() && "Function must be inline");
+ ASTContext &Context = getASTContext();
+
+ if (Context.getLangOpts().GNUInline || hasAttr<GNUInlineAttr>()) {
+ // Note: If you change the logic here, please change
+ // doesDeclarationForceExternallyVisibleDefinition as well.
+ //
+ // If it's not the case that both 'inline' and 'extern' are
+ // specified on the definition, then this inline definition is
+ // externally visible.
+ if (!(isInlineSpecified() && getStorageClassAsWritten() == SC_Extern))
+ return true;
+
+ // If any declaration is 'inline' but not 'extern', then this definition
+ // is externally visible.
+ for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
+ Redecl != RedeclEnd;
+ ++Redecl) {
+ if (Redecl->isInlineSpecified() &&
+ Redecl->getStorageClassAsWritten() != SC_Extern)
+ return true;
+ }
+
+ return false;
+ }
+
+ // C99 6.7.4p6:
+ // [...] If all of the file scope declarations for a function in a
+ // translation unit include the inline function specifier without extern,
+ // then the definition in that translation unit is an inline definition.
+ for (redecl_iterator Redecl = redecls_begin(), RedeclEnd = redecls_end();
+ Redecl != RedeclEnd;
+ ++Redecl) {
+ if (RedeclForcesDefC99(*Redecl))
+ return true;
+ }
+
+ // C99 6.7.4p6:
+ // An inline definition does not provide an external definition for the
+ // function, and does not forbid an external definition in another
+ // translation unit.
+ return false;
+}
+
+/// getOverloadedOperator - Which C++ overloaded operator this
+/// function represents, if any.
+OverloadedOperatorKind FunctionDecl::getOverloadedOperator() const {
+ if (getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
+ return getDeclName().getCXXOverloadedOperator();
+ else
+ return OO_None;
+}
+
+/// getLiteralIdentifier - The literal suffix identifier this function
+/// represents, if any.
+const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const {
+ if (getDeclName().getNameKind() == DeclarationName::CXXLiteralOperatorName)
+ return getDeclName().getCXXLiteralIdentifier();
+ else
+ return 0;
+}
+
+FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const {
+ if (TemplateOrSpecialization.isNull())
+ return TK_NonTemplate;
+ if (TemplateOrSpecialization.is<FunctionTemplateDecl *>())
+ return TK_FunctionTemplate;
+ if (TemplateOrSpecialization.is<MemberSpecializationInfo *>())
+ return TK_MemberSpecialization;
+ if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>())
+ return TK_FunctionTemplateSpecialization;
+ if (TemplateOrSpecialization.is
+ <DependentFunctionTemplateSpecializationInfo*>())
+ return TK_DependentFunctionTemplateSpecialization;
+
+ llvm_unreachable("Did we miss a TemplateOrSpecialization type?");
+}
+
+FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const {
+ if (MemberSpecializationInfo *Info = getMemberSpecializationInfo())
+ return cast<FunctionDecl>(Info->getInstantiatedFrom());
+
+ return 0;
+}
+
+MemberSpecializationInfo *FunctionDecl::getMemberSpecializationInfo() const {
+ return TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>();
+}
+
+void
+FunctionDecl::setInstantiationOfMemberFunction(ASTContext &C,
+ FunctionDecl *FD,
+ TemplateSpecializationKind TSK) {
+ assert(TemplateOrSpecialization.isNull() &&
+ "Member function is already a specialization");
+ MemberSpecializationInfo *Info
+ = new (C) MemberSpecializationInfo(FD, TSK);
+ TemplateOrSpecialization = Info;
+}
+
+bool FunctionDecl::isImplicitlyInstantiable() const {
+ // If the function is invalid, it can't be implicitly instantiated.
+ if (isInvalidDecl())
+ return false;
+
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitInstantiationDefinition:
+ return false;
+
+ case TSK_ImplicitInstantiation:
+ return true;
+
+ // It is possible to instantiate TSK_ExplicitSpecialization kind
+ // if the FunctionDecl has a class scope specialization pattern.
+ case TSK_ExplicitSpecialization:
+ return getClassScopeSpecializationPattern() != 0;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // Handled below.
+ break;
+ }
+
+ // Find the actual template from which we will instantiate.
+ const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
+ bool HasPattern = false;
+ if (PatternDecl)
+ HasPattern = PatternDecl->hasBody(PatternDecl);
+
+ // C++0x [temp.explicit]p9:
+ // Except for inline functions, other explicit instantiation declarations
+ // have the effect of suppressing the implicit instantiation of the entity
+ // to which they refer.
+ if (!HasPattern || !PatternDecl)
+ return true;
+
+ return PatternDecl->isInlined();
+}
+
+bool FunctionDecl::isTemplateInstantiation() const {
+ switch (getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ return false;
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ return true;
+ }
+ llvm_unreachable("All TSK values handled.");
+}
+
+FunctionDecl *FunctionDecl::getTemplateInstantiationPattern() const {
+ // Handle class scope explicit specialization special case.
+ if (getTemplateSpecializationKind() == TSK_ExplicitSpecialization)
+ return getClassScopeSpecializationPattern();
+
+ if (FunctionTemplateDecl *Primary = getPrimaryTemplate()) {
+ while (Primary->getInstantiatedFromMemberTemplate()) {
+ // If we have hit a point where the user provided a specialization of
+ // this template, we're done looking.
+ if (Primary->isMemberSpecialization())
+ break;
+
+ Primary = Primary->getInstantiatedFromMemberTemplate();
+ }
+
+ return Primary->getTemplatedDecl();
+ }
+
+ return getInstantiatedFromMemberFunction();
+}
+
+FunctionTemplateDecl *FunctionDecl::getPrimaryTemplate() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->Template.getPointer();
+ }
+ return 0;
+}
+
+FunctionDecl *FunctionDecl::getClassScopeSpecializationPattern() const {
+ return getASTContext().getClassScopeSpecializationPattern(this);
+}
+
+const TemplateArgumentList *
+FunctionDecl::getTemplateSpecializationArgs() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->TemplateArguments;
+ }
+ return 0;
+}
+
+const ASTTemplateArgumentListInfo *
+FunctionDecl::getTemplateSpecializationArgsAsWritten() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->TemplateArgumentsAsWritten;
+ }
+ return 0;
+}
+
+void
+FunctionDecl::setFunctionTemplateSpecialization(ASTContext &C,
+ FunctionTemplateDecl *Template,
+ const TemplateArgumentList *TemplateArgs,
+ void *InsertPos,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation PointOfInstantiation) {
+ assert(TSK != TSK_Undeclared &&
+ "Must specify the type of function template specialization");
+ FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
+ if (!Info)
+ Info = FunctionTemplateSpecializationInfo::Create(C, this, Template, TSK,
+ TemplateArgs,
+ TemplateArgsAsWritten,
+ PointOfInstantiation);
+ TemplateOrSpecialization = Info;
+ Template->addSpecialization(Info, InsertPos);
+}
+
+void
+FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
+ const UnresolvedSetImpl &Templates,
+ const TemplateArgumentListInfo &TemplateArgs) {
+ assert(TemplateOrSpecialization.isNull());
+ size_t Size = sizeof(DependentFunctionTemplateSpecializationInfo);
+ Size += Templates.size() * sizeof(FunctionTemplateDecl*);
+ Size += TemplateArgs.size() * sizeof(TemplateArgumentLoc);
+ void *Buffer = Context.Allocate(Size);
+ DependentFunctionTemplateSpecializationInfo *Info =
+ new (Buffer) DependentFunctionTemplateSpecializationInfo(Templates,
+ TemplateArgs);
+ TemplateOrSpecialization = Info;
+}
+
+DependentFunctionTemplateSpecializationInfo::
+DependentFunctionTemplateSpecializationInfo(const UnresolvedSetImpl &Ts,
+ const TemplateArgumentListInfo &TArgs)
+ : AngleLocs(TArgs.getLAngleLoc(), TArgs.getRAngleLoc()) {
+
+ d.NumTemplates = Ts.size();
+ d.NumArgs = TArgs.size();
+
+ FunctionTemplateDecl **TsArray =
+ const_cast<FunctionTemplateDecl**>(getTemplates());
+ for (unsigned I = 0, E = Ts.size(); I != E; ++I)
+ TsArray[I] = cast<FunctionTemplateDecl>(Ts[I]->getUnderlyingDecl());
+
+ TemplateArgumentLoc *ArgsArray =
+ const_cast<TemplateArgumentLoc*>(getTemplateArgs());
+ for (unsigned I = 0, E = TArgs.size(); I != E; ++I)
+ new (&ArgsArray[I]) TemplateArgumentLoc(TArgs[I]);
+}
+
+TemplateSpecializationKind FunctionDecl::getTemplateSpecializationKind() const {
+ // For a function template specialization, query the specialization
+ // information object.
+ FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<FunctionTemplateSpecializationInfo*>();
+ if (FTSInfo)
+ return FTSInfo->getTemplateSpecializationKind();
+
+ MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>();
+ if (MSInfo)
+ return MSInfo->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void
+FunctionDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ if (FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<
+ FunctionTemplateSpecializationInfo*>()) {
+ FTSInfo->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ FTSInfo->getPointOfInstantiation().isInvalid())
+ FTSInfo->setPointOfInstantiation(PointOfInstantiation);
+ } else if (MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSInfo->getPointOfInstantiation().isInvalid())
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ } else
+ llvm_unreachable("Function cannot have a template specialization kind");
+}
+
+SourceLocation FunctionDecl::getPointOfInstantiation() const {
+ if (FunctionTemplateSpecializationInfo *FTSInfo
+ = TemplateOrSpecialization.dyn_cast<
+ FunctionTemplateSpecializationInfo*>())
+ return FTSInfo->getPointOfInstantiation();
+ else if (MemberSpecializationInfo *MSInfo
+ = TemplateOrSpecialization.dyn_cast<MemberSpecializationInfo*>())
+ return MSInfo->getPointOfInstantiation();
+
+ return SourceLocation();
+}
+
+bool FunctionDecl::isOutOfLine() const {
+ if (Decl::isOutOfLine())
+ return true;
+
+ // If this function was instantiated from a member function of a
+ // class template, check whether that member function was defined out-of-line.
+ if (FunctionDecl *FD = getInstantiatedFromMemberFunction()) {
+ const FunctionDecl *Definition;
+ if (FD->hasBody(Definition))
+ return Definition->isOutOfLine();
+ }
+
+ // If this function was instantiated from a function template,
+ // check whether that function template was defined out-of-line.
+ if (FunctionTemplateDecl *FunTmpl = getPrimaryTemplate()) {
+ const FunctionDecl *Definition;
+ if (FunTmpl->getTemplatedDecl()->hasBody(Definition))
+ return Definition->isOutOfLine();
+ }
+
+ return false;
+}
+
+SourceRange FunctionDecl::getSourceRange() const {
+ return SourceRange(getOuterLocStart(), EndRangeLoc);
+}
+
+unsigned FunctionDecl::getMemoryFunctionKind() const {
+ IdentifierInfo *FnInfo = getIdentifier();
+
+ if (!FnInfo)
+ return 0;
+
+ // Builtin handling.
+ switch (getBuiltinID()) {
+ case Builtin::BI__builtin_memset:
+ case Builtin::BI__builtin___memset_chk:
+ case Builtin::BImemset:
+ return Builtin::BImemset;
+
+ case Builtin::BI__builtin_memcpy:
+ case Builtin::BI__builtin___memcpy_chk:
+ case Builtin::BImemcpy:
+ return Builtin::BImemcpy;
+
+ case Builtin::BI__builtin_memmove:
+ case Builtin::BI__builtin___memmove_chk:
+ case Builtin::BImemmove:
+ return Builtin::BImemmove;
+
+ case Builtin::BIstrlcpy:
+ return Builtin::BIstrlcpy;
+ case Builtin::BIstrlcat:
+ return Builtin::BIstrlcat;
+
+ case Builtin::BI__builtin_memcmp:
+ case Builtin::BImemcmp:
+ return Builtin::BImemcmp;
+
+ case Builtin::BI__builtin_strncpy:
+ case Builtin::BI__builtin___strncpy_chk:
+ case Builtin::BIstrncpy:
+ return Builtin::BIstrncpy;
+
+ case Builtin::BI__builtin_strncmp:
+ case Builtin::BIstrncmp:
+ return Builtin::BIstrncmp;
+
+ case Builtin::BI__builtin_strncasecmp:
+ case Builtin::BIstrncasecmp:
+ return Builtin::BIstrncasecmp;
+
+ case Builtin::BI__builtin_strncat:
+ case Builtin::BI__builtin___strncat_chk:
+ case Builtin::BIstrncat:
+ return Builtin::BIstrncat;
+
+ case Builtin::BI__builtin_strndup:
+ case Builtin::BIstrndup:
+ return Builtin::BIstrndup;
+
+ case Builtin::BI__builtin_strlen:
+ case Builtin::BIstrlen:
+ return Builtin::BIstrlen;
+
+ default:
+ if (isExternC()) {
+ if (FnInfo->isStr("memset"))
+ return Builtin::BImemset;
+ else if (FnInfo->isStr("memcpy"))
+ return Builtin::BImemcpy;
+ else if (FnInfo->isStr("memmove"))
+ return Builtin::BImemmove;
+ else if (FnInfo->isStr("memcmp"))
+ return Builtin::BImemcmp;
+ else if (FnInfo->isStr("strncpy"))
+ return Builtin::BIstrncpy;
+ else if (FnInfo->isStr("strncmp"))
+ return Builtin::BIstrncmp;
+ else if (FnInfo->isStr("strncasecmp"))
+ return Builtin::BIstrncasecmp;
+ else if (FnInfo->isStr("strncat"))
+ return Builtin::BIstrncat;
+ else if (FnInfo->isStr("strndup"))
+ return Builtin::BIstrndup;
+ else if (FnInfo->isStr("strlen"))
+ return Builtin::BIstrlen;
+ }
+ break;
+ }
+ return 0;
+}
+
+//===----------------------------------------------------------------------===//
+// FieldDecl Implementation
+//===----------------------------------------------------------------------===//
+
+FieldDecl *FieldDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo, Expr *BW, bool Mutable,
+ bool HasInit) {
+ return new (C) FieldDecl(Decl::Field, DC, StartLoc, IdLoc, Id, T, TInfo,
+ BW, Mutable, HasInit);
+}
+
+FieldDecl *FieldDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FieldDecl));
+ return new (Mem) FieldDecl(Field, 0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0, 0, false, false);
+}
+
+bool FieldDecl::isAnonymousStructOrUnion() const {
+ if (!isImplicit() || getDeclName())
+ return false;
+
+ if (const RecordType *Record = getType()->getAs<RecordType>())
+ return Record->getDecl()->isAnonymousStructOrUnion();
+
+ return false;
+}
+
+unsigned FieldDecl::getBitWidthValue(const ASTContext &Ctx) const {
+ assert(isBitField() && "not a bitfield");
+ Expr *BitWidth = InitializerOrBitWidth.getPointer();
+ return BitWidth->EvaluateKnownConstInt(Ctx).getZExtValue();
+}
+
+unsigned FieldDecl::getFieldIndex() const {
+ if (CachedFieldIndex) return CachedFieldIndex - 1;
+
+ unsigned Index = 0;
+ const RecordDecl *RD = getParent();
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++Index) {
+ (*I)->CachedFieldIndex = Index + 1;
+
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored.
+ if (getASTContext().ZeroBitfieldFollowsNonBitfield((*I), LastFD)) {
+ --Index;
+ continue;
+ }
+ LastFD = (*I);
+ }
+ }
+
+ assert(CachedFieldIndex && "failed to find field in parent");
+ return CachedFieldIndex - 1;
+}
+
+SourceRange FieldDecl::getSourceRange() const {
+ if (const Expr *E = InitializerOrBitWidth.getPointer())
+ return SourceRange(getInnerLocStart(), E->getLocEnd());
+ return DeclaratorDecl::getSourceRange();
+}
+
+void FieldDecl::setInClassInitializer(Expr *Init) {
+ assert(!InitializerOrBitWidth.getPointer() &&
+ "bit width or initializer already set");
+ InitializerOrBitWidth.setPointer(Init);
+ InitializerOrBitWidth.setInt(0);
+}
+
+//===----------------------------------------------------------------------===//
+// TagDecl Implementation
+//===----------------------------------------------------------------------===//
+
+SourceLocation TagDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
+SourceRange TagDecl::getSourceRange() const {
+ SourceLocation E = RBraceLoc.isValid() ? RBraceLoc : getLocation();
+ return SourceRange(getOuterLocStart(), E);
+}
+
+TagDecl* TagDecl::getCanonicalDecl() {
+ return getFirstDeclaration();
+}
+
+void TagDecl::setTypedefNameForAnonDecl(TypedefNameDecl *TDD) {
+ TypedefNameDeclOrQualifier = TDD;
+ if (TypeForDecl)
+ const_cast<Type*>(TypeForDecl)->ClearLinkageCache();
+ ClearLinkageCache();
+}
+
+void TagDecl::startDefinition() {
+ IsBeingDefined = true;
+
+ if (isa<CXXRecordDecl>(this)) {
+ CXXRecordDecl *D = cast<CXXRecordDecl>(this);
+ struct CXXRecordDecl::DefinitionData *Data =
+ new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
+ cast<CXXRecordDecl>(*I)->DefinitionData = Data;
+ }
+}
+
+void TagDecl::completeDefinition() {
+ assert((!isa<CXXRecordDecl>(this) ||
+ cast<CXXRecordDecl>(this)->hasDefinition()) &&
+ "definition completed but not started");
+
+ IsCompleteDefinition = true;
+ IsBeingDefined = false;
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->CompletedTagDefinition(this);
+}
+
+TagDecl *TagDecl::getDefinition() const {
+ if (isCompleteDefinition())
+ return const_cast<TagDecl *>(this);
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(this))
+ return CXXRD->getDefinition();
+
+ for (redecl_iterator R = redecls_begin(), REnd = redecls_end();
+ R != REnd; ++R)
+ if (R->isCompleteDefinition())
+ return *R;
+
+ return 0;
+}
+
+void TagDecl::setQualifierInfo(NestedNameSpecifierLoc QualifierLoc) {
+ if (QualifierLoc) {
+ // Make sure the extended qualifier info is allocated.
+ if (!hasExtInfo())
+ TypedefNameDeclOrQualifier = new (getASTContext()) ExtInfo;
+ // Set qualifier info.
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ } else {
+ // Here Qualifier == 0, i.e., we are removing the qualifier (if any).
+ if (hasExtInfo()) {
+ if (getExtInfo()->NumTemplParamLists == 0) {
+ getASTContext().Deallocate(getExtInfo());
+ TypedefNameDeclOrQualifier = (TypedefNameDecl*) 0;
+ }
+ else
+ getExtInfo()->QualifierLoc = QualifierLoc;
+ }
+ }
+}
+
+void TagDecl::setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ assert(NumTPLists > 0);
+ // Make sure the extended decl info is allocated.
+ if (!hasExtInfo())
+ // Allocate external info struct.
+ TypedefNameDeclOrQualifier = new (getASTContext()) ExtInfo;
+ // Set the template parameter lists info.
+ getExtInfo()->setTemplateParameterListsInfo(Context, NumTPLists, TPLists);
+}
+
+//===----------------------------------------------------------------------===//
+// EnumDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void EnumDecl::anchor() { }
+
+EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ EnumDecl *PrevDecl, bool IsScoped,
+ bool IsScopedUsingClassTag, bool IsFixed) {
+ EnumDecl *Enum = new (C) EnumDecl(DC, StartLoc, IdLoc, Id, PrevDecl,
+ IsScoped, IsScopedUsingClassTag, IsFixed);
+ C.getTypeDeclType(Enum, PrevDecl);
+ return Enum;
+}
+
+EnumDecl *EnumDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumDecl));
+ return new (Mem) EnumDecl(0, SourceLocation(), SourceLocation(), 0, 0,
+ false, false, false);
+}
+
+void EnumDecl::completeDefinition(QualType NewType,
+ QualType NewPromotionType,
+ unsigned NumPositiveBits,
+ unsigned NumNegativeBits) {
+ assert(!isCompleteDefinition() && "Cannot redefine enums!");
+ if (!IntegerType)
+ IntegerType = NewType.getTypePtr();
+ PromotionType = NewPromotionType;
+ setNumPositiveBits(NumPositiveBits);
+ setNumNegativeBits(NumNegativeBits);
+ TagDecl::completeDefinition();
+}
+
+TemplateSpecializationKind EnumDecl::getTemplateSpecializationKind() const {
+ if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
+ return MSI->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void EnumDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
+ MemberSpecializationInfo *MSI = getMemberSpecializationInfo();
+ assert(MSI && "Not an instantiated member enumeration?");
+ MSI->setTemplateSpecializationKind(TSK);
+ if (TSK != TSK_ExplicitSpecialization &&
+ PointOfInstantiation.isValid() &&
+ MSI->getPointOfInstantiation().isInvalid())
+ MSI->setPointOfInstantiation(PointOfInstantiation);
+}
+
+EnumDecl *EnumDecl::getInstantiatedFromMemberEnum() const {
+ if (SpecializationInfo)
+ return cast<EnumDecl>(SpecializationInfo->getInstantiatedFrom());
+
+ return 0;
+}
+
+void EnumDecl::setInstantiationOfMemberEnum(ASTContext &C, EnumDecl *ED,
+ TemplateSpecializationKind TSK) {
+ assert(!SpecializationInfo && "Member enum is already a specialization");
+ SpecializationInfo = new (C) MemberSpecializationInfo(ED, TSK);
+}
+
+//===----------------------------------------------------------------------===//
+// RecordDecl Implementation
+//===----------------------------------------------------------------------===//
+
+RecordDecl::RecordDecl(Kind DK, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, RecordDecl *PrevDecl)
+ : TagDecl(DK, TK, DC, IdLoc, Id, PrevDecl, StartLoc) {
+ HasFlexibleArrayMember = false;
+ AnonymousStructOrUnion = false;
+ HasObjectMember = false;
+ LoadedFieldsFromExternalStorage = false;
+ assert(classof(static_cast<Decl*>(this)) && "Invalid Kind!");
+}
+
+RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, RecordDecl* PrevDecl) {
+ RecordDecl* R = new (C) RecordDecl(Record, TK, DC, StartLoc, IdLoc, Id,
+ PrevDecl);
+ C.getTypeDeclType(R, PrevDecl);
+ return R;
+}
+
+RecordDecl *RecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(RecordDecl));
+ return new (Mem) RecordDecl(Record, TTK_Struct, 0, SourceLocation(),
+ SourceLocation(), 0, 0);
+}
+
+bool RecordDecl::isInjectedClassName() const {
+ return isImplicit() && getDeclName() && getDeclContext()->isRecord() &&
+ cast<RecordDecl>(getDeclContext())->getDeclName() == getDeclName();
+}
+
+RecordDecl::field_iterator RecordDecl::field_begin() const {
+ if (hasExternalLexicalStorage() && !LoadedFieldsFromExternalStorage)
+ LoadFieldsFromExternalStorage();
+
+ return field_iterator(decl_iterator(FirstDecl));
+}
+
+/// completeDefinition - Notes that the definition of this type is now
+/// complete.
+void RecordDecl::completeDefinition() {
+ assert(!isCompleteDefinition() && "Cannot redefine record!");
+ TagDecl::completeDefinition();
+}
+
+void RecordDecl::LoadFieldsFromExternalStorage() const {
+ ExternalASTSource *Source = getASTContext().getExternalSource();
+ assert(hasExternalLexicalStorage() && Source && "No external storage?");
+
+ // Notify that we have a RecordDecl doing some initialization.
+ ExternalASTSource::Deserializing TheFields(Source);
+
+ SmallVector<Decl*, 64> Decls;
+ LoadedFieldsFromExternalStorage = true;
+ switch (Source->FindExternalLexicalDeclsBy<FieldDecl>(this, Decls)) {
+ case ELR_Success:
+ break;
+
+ case ELR_AlreadyLoaded:
+ case ELR_Failure:
+ return;
+ }
+
+#ifndef NDEBUG
+ // Check that all decls we got were FieldDecls.
+ for (unsigned i=0, e=Decls.size(); i != e; ++i)
+ assert(isa<FieldDecl>(Decls[i]));
+#endif
+
+ if (Decls.empty())
+ return;
+
+ llvm::tie(FirstDecl, LastDecl) = BuildDeclChain(Decls,
+ /*FieldsAlreadyLoaded=*/false);
+}
+
+//===----------------------------------------------------------------------===//
+// BlockDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void BlockDecl::setParams(llvm::ArrayRef<ParmVarDecl *> NewParamInfo) {
+ assert(ParamInfo == 0 && "Already has param info!");
+
+ // Zero params -> null pointer.
+ if (!NewParamInfo.empty()) {
+ NumParams = NewParamInfo.size();
+ ParamInfo = new (getASTContext()) ParmVarDecl*[NewParamInfo.size()];
+ std::copy(NewParamInfo.begin(), NewParamInfo.end(), ParamInfo);
+ }
+}
+
+void BlockDecl::setCaptures(ASTContext &Context,
+ const Capture *begin,
+ const Capture *end,
+ bool capturesCXXThis) {
+ CapturesCXXThis = capturesCXXThis;
+
+ if (begin == end) {
+ NumCaptures = 0;
+ Captures = 0;
+ return;
+ }
+
+ NumCaptures = end - begin;
+
+ // Avoid new Capture[] because we don't want to provide a default
+ // constructor.
+ size_t allocationSize = NumCaptures * sizeof(Capture);
+ void *buffer = Context.Allocate(allocationSize, /*alignment*/sizeof(void*));
+ memcpy(buffer, begin, allocationSize);
+ Captures = static_cast<Capture*>(buffer);
+}
+
+bool BlockDecl::capturesVariable(const VarDecl *variable) const {
+ for (capture_const_iterator
+ i = capture_begin(), e = capture_end(); i != e; ++i)
+ // Only auto vars can be captured, so no redeclaration worries.
+ if (i->getVariable() == variable)
+ return true;
+
+ return false;
+}
+
+SourceRange BlockDecl::getSourceRange() const {
+ return SourceRange(getLocation(), Body? Body->getLocEnd() : getLocation());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Decl Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+void TranslationUnitDecl::anchor() { }
+
+TranslationUnitDecl *TranslationUnitDecl::Create(ASTContext &C) {
+ return new (C) TranslationUnitDecl(C);
+}
+
+void LabelDecl::anchor() { }
+
+LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdentL, IdentifierInfo *II) {
+ return new (C) LabelDecl(DC, IdentL, II, 0, IdentL);
+}
+
+LabelDecl *LabelDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdentL, IdentifierInfo *II,
+ SourceLocation GnuLabelL) {
+ assert(GnuLabelL != IdentL && "Use this only for GNU local labels");
+ return new (C) LabelDecl(DC, IdentL, II, 0, GnuLabelL);
+}
+
+LabelDecl *LabelDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LabelDecl));
+ return new (Mem) LabelDecl(0, SourceLocation(), 0, 0, SourceLocation());
+}
+
+void ValueDecl::anchor() { }
+
+void ImplicitParamDecl::anchor() { }
+
+ImplicitParamDecl *ImplicitParamDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ QualType Type) {
+ return new (C) ImplicitParamDecl(DC, IdLoc, Id, Type);
+}
+
+ImplicitParamDecl *ImplicitParamDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ImplicitParamDecl));
+ return new (Mem) ImplicitParamDecl(0, SourceLocation(), 0, QualType());
+}
+
+FunctionDecl *FunctionDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ StorageClass SC, StorageClass SCAsWritten,
+ bool isInlineSpecified,
+ bool hasWrittenPrototype,
+ bool isConstexprSpecified) {
+ FunctionDecl *New = new (C) FunctionDecl(Function, DC, StartLoc, NameInfo,
+ T, TInfo, SC, SCAsWritten,
+ isInlineSpecified,
+ isConstexprSpecified);
+ New->HasWrittenPrototype = hasWrittenPrototype;
+ return New;
+}
+
+FunctionDecl *FunctionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionDecl));
+ return new (Mem) FunctionDecl(Function, 0, SourceLocation(),
+ DeclarationNameInfo(), QualType(), 0,
+ SC_None, SC_None, false, false);
+}
+
+BlockDecl *BlockDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L) {
+ return new (C) BlockDecl(DC, L);
+}
+
+BlockDecl *BlockDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(BlockDecl));
+ return new (Mem) BlockDecl(0, SourceLocation());
+}
+
+EnumConstantDecl *EnumConstantDecl::Create(ASTContext &C, EnumDecl *CD,
+ SourceLocation L,
+ IdentifierInfo *Id, QualType T,
+ Expr *E, const llvm::APSInt &V) {
+ return new (C) EnumConstantDecl(CD, L, Id, T, E, V);
+}
+
+EnumConstantDecl *
+EnumConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(EnumConstantDecl));
+ return new (Mem) EnumConstantDecl(0, SourceLocation(), 0, QualType(), 0,
+ llvm::APSInt());
+}
+
+void IndirectFieldDecl::anchor() { }
+
+IndirectFieldDecl *
+IndirectFieldDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
+ IdentifierInfo *Id, QualType T, NamedDecl **CH,
+ unsigned CHS) {
+ return new (C) IndirectFieldDecl(DC, L, Id, T, CH, CHS);
+}
+
+IndirectFieldDecl *IndirectFieldDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(IndirectFieldDecl));
+ return new (Mem) IndirectFieldDecl(0, SourceLocation(), DeclarationName(),
+ QualType(), 0, 0);
+}
+
+SourceRange EnumConstantDecl::getSourceRange() const {
+ SourceLocation End = getLocation();
+ if (Init)
+ End = Init->getLocEnd();
+ return SourceRange(getLocation(), End);
+}
+
+void TypeDecl::anchor() { }
+
+TypedefDecl *TypedefDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, TypeSourceInfo *TInfo) {
+ return new (C) TypedefDecl(DC, StartLoc, IdLoc, Id, TInfo);
+}
+
+void TypedefNameDecl::anchor() { }
+
+TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypedefDecl));
+ return new (Mem) TypedefDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+}
+
+TypeAliasDecl *TypeAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ TypeSourceInfo *TInfo) {
+ return new (C) TypeAliasDecl(DC, StartLoc, IdLoc, Id, TInfo);
+}
+
+TypeAliasDecl *TypeAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasDecl));
+ return new (Mem) TypeAliasDecl(0, SourceLocation(), SourceLocation(), 0, 0);
+}
+
+SourceRange TypedefDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocation();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo()) {
+ if (typeIsPostfix(TInfo->getType()))
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ }
+ return SourceRange(getLocStart(), RangeEnd);
+}
+
+SourceRange TypeAliasDecl::getSourceRange() const {
+ SourceLocation RangeEnd = getLocStart();
+ if (TypeSourceInfo *TInfo = getTypeSourceInfo())
+ RangeEnd = TInfo->getTypeLoc().getSourceRange().getEnd();
+ return SourceRange(getLocStart(), RangeEnd);
+}
+
+void FileScopeAsmDecl::anchor() { }
+
+FileScopeAsmDecl *FileScopeAsmDecl::Create(ASTContext &C, DeclContext *DC,
+ StringLiteral *Str,
+ SourceLocation AsmLoc,
+ SourceLocation RParenLoc) {
+ return new (C) FileScopeAsmDecl(DC, Str, AsmLoc, RParenLoc);
+}
+
+FileScopeAsmDecl *FileScopeAsmDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FileScopeAsmDecl));
+ return new (Mem) FileScopeAsmDecl(0, 0, SourceLocation(), SourceLocation());
+}
+
+//===----------------------------------------------------------------------===//
+// ImportDecl Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Retrieve the number of module identifiers needed to name the given
+/// module.
+static unsigned getNumModuleIdentifiers(Module *Mod) {
+ unsigned Result = 1;
+ while (Mod->Parent) {
+ Mod = Mod->Parent;
+ ++Result;
+ }
+ return Result;
+}
+
+ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
+ Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs)
+ : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, true),
+ NextLocalImport()
+{
+ assert(getNumModuleIdentifiers(Imported) == IdentifierLocs.size());
+ SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(this + 1);
+ memcpy(StoredLocs, IdentifierLocs.data(),
+ IdentifierLocs.size() * sizeof(SourceLocation));
+}
+
+ImportDecl::ImportDecl(DeclContext *DC, SourceLocation StartLoc,
+ Module *Imported, SourceLocation EndLoc)
+ : Decl(Import, DC, StartLoc), ImportedAndComplete(Imported, false),
+ NextLocalImport()
+{
+ *reinterpret_cast<SourceLocation *>(this + 1) = EndLoc;
+}
+
+ImportDecl *ImportDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, Module *Imported,
+ ArrayRef<SourceLocation> IdentifierLocs) {
+ void *Mem = C.Allocate(sizeof(ImportDecl) +
+ IdentifierLocs.size() * sizeof(SourceLocation));
+ return new (Mem) ImportDecl(DC, StartLoc, Imported, IdentifierLocs);
+}
+
+ImportDecl *ImportDecl::CreateImplicit(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc,
+ Module *Imported,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(sizeof(ImportDecl) + sizeof(SourceLocation));
+ ImportDecl *Import = new (Mem) ImportDecl(DC, StartLoc, Imported, EndLoc);
+ Import->setImplicit();
+ return Import;
+}
+
+ImportDecl *ImportDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumLocations) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ (sizeof(ImportDecl) +
+ NumLocations * sizeof(SourceLocation)));
+ return new (Mem) ImportDecl(EmptyShell());
+}
+
+ArrayRef<SourceLocation> ImportDecl::getIdentifierLocs() const {
+ if (!ImportedAndComplete.getInt())
+ return ArrayRef<SourceLocation>();
+
+ const SourceLocation *StoredLocs
+ = reinterpret_cast<const SourceLocation *>(this + 1);
+ return ArrayRef<SourceLocation>(StoredLocs,
+ getNumModuleIdentifiers(getImportedModule()));
+}
+
+SourceRange ImportDecl::getSourceRange() const {
+ if (!ImportedAndComplete.getInt())
+ return SourceRange(getLocation(),
+ *reinterpret_cast<const SourceLocation *>(this + 1));
+
+ return SourceRange(getLocation(), getIdentifierLocs().back());
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
new file mode 100644
index 0000000..47a0d25
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclBase.cpp
@@ -0,0 +1,1441 @@
+//===--- DeclBase.cpp - Declaration AST Node Implementation ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl and DeclContext classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependentDiagnostic.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Statistics
+//===----------------------------------------------------------------------===//
+
+#define DECL(DERIVED, BASE) static int n##DERIVED##s = 0;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+
+void *Decl::AllocateDeserializedDecl(const ASTContext &Context,
+ unsigned ID,
+ unsigned Size) {
+ // Allocate an extra 8 bytes worth of storage, which ensures that the
+ // resulting pointer will still be 8-byte aligned.
+ void *Start = Context.Allocate(Size + 8);
+ void *Result = (char*)Start + 8;
+
+ unsigned *PrefixPtr = (unsigned *)Result - 2;
+
+ // Zero out the first 4 bytes; this is used to store the owning module ID.
+ PrefixPtr[0] = 0;
+
+ // Store the global declaration ID in the second 4 bytes.
+ PrefixPtr[1] = ID;
+
+ return Result;
+}
+
+const char *Decl::getDeclKindName() const {
+ switch (DeclKind) {
+ default: llvm_unreachable("Declaration not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+void Decl::setInvalidDecl(bool Invalid) {
+ InvalidDecl = Invalid;
+ if (Invalid && !isa<ParmVarDecl>(this)) {
+ // Defensive maneuver for ill-formed code: we're likely not to make it to
+ // a point where we set the access specifier, so default it to "public"
+ // to avoid triggering asserts elsewhere in the front end.
+ setAccess(AS_public);
+ }
+}
+
+const char *DeclContext::getDeclKindName() const {
+ switch (DeclKind) {
+ default: llvm_unreachable("Declaration context not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+bool Decl::StatisticsEnabled = false;
+void Decl::EnableStatistics() {
+ StatisticsEnabled = true;
+}
+
+void Decl::PrintStats() {
+ llvm::errs() << "\n*** Decl Stats:\n";
+
+ int totalDecls = 0;
+#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ llvm::errs() << " " << totalDecls << " decls total.\n";
+
+ int totalBytes = 0;
+#define DECL(DERIVED, BASE) \
+ if (n##DERIVED##s > 0) { \
+ totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \
+ llvm::errs() << " " << n##DERIVED##s << " " #DERIVED " decls, " \
+ << sizeof(DERIVED##Decl) << " each (" \
+ << n##DERIVED##s * sizeof(DERIVED##Decl) \
+ << " bytes)\n"; \
+ }
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+
+ llvm::errs() << "Total bytes = " << totalBytes << "\n";
+}
+
+void Decl::add(Kind k) {
+ switch (k) {
+#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+}
+
+bool Decl::isTemplateParameterPack() const {
+ if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(this))
+ return TTP->isParameterPack();
+ if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(this))
+ return NTTP->isParameterPack();
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(this))
+ return TTP->isParameterPack();
+ return false;
+}
+
+bool Decl::isParameterPack() const {
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(this))
+ return Parm->isParameterPack();
+
+ return isTemplateParameterPack();
+}
+
+bool Decl::isFunctionOrFunctionTemplate() const {
+ if (const UsingShadowDecl *UD = dyn_cast<UsingShadowDecl>(this))
+ return UD->getTargetDecl()->isFunctionOrFunctionTemplate();
+
+ return isa<FunctionDecl>(this) || isa<FunctionTemplateDecl>(this);
+}
+
+bool Decl::isTemplateDecl() const {
+ return isa<TemplateDecl>(this);
+}
+
+const DeclContext *Decl::getParentFunctionOrMethod() const {
+ for (const DeclContext *DC = getDeclContext();
+ DC && !DC->isTranslationUnit() && !DC->isNamespace();
+ DC = DC->getParent())
+ if (DC->isFunctionOrMethod())
+ return DC;
+
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// PrettyStackTraceDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void PrettyStackTraceDecl::print(raw_ostream &OS) const {
+ SourceLocation TheLoc = Loc;
+ if (TheLoc.isInvalid() && TheDecl)
+ TheLoc = TheDecl->getLocation();
+
+ if (TheLoc.isValid()) {
+ TheLoc.print(OS, SM);
+ OS << ": ";
+ }
+
+ OS << Message;
+
+ if (const NamedDecl *DN = dyn_cast_or_null<NamedDecl>(TheDecl))
+ OS << " '" << DN->getQualifiedNameAsString() << '\'';
+ OS << '\n';
+}
+
+//===----------------------------------------------------------------------===//
+// Decl Implementation
+//===----------------------------------------------------------------------===//
+
+// Out-of-line virtual method providing a home for Decl.
+Decl::~Decl() { }
+
+void Decl::setDeclContext(DeclContext *DC) {
+ DeclCtx = DC;
+}
+
+void Decl::setLexicalDeclContext(DeclContext *DC) {
+ if (DC == getLexicalDeclContext())
+ return;
+
+ if (isInSemaDC()) {
+ setDeclContextsImpl(getDeclContext(), DC, getASTContext());
+ } else {
+ getMultipleDC()->LexicalDC = DC;
+ }
+}
+
+void Decl::setDeclContextsImpl(DeclContext *SemaDC, DeclContext *LexicalDC,
+ ASTContext &Ctx) {
+ if (SemaDC == LexicalDC) {
+ DeclCtx = SemaDC;
+ } else {
+ Decl::MultipleDC *MDC = new (Ctx) Decl::MultipleDC();
+ MDC->SemanticDC = SemaDC;
+ MDC->LexicalDC = LexicalDC;
+ DeclCtx = MDC;
+ }
+}
+
+bool Decl::isInAnonymousNamespace() const {
+ const DeclContext *DC = getDeclContext();
+ do {
+ if (const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC))
+ if (ND->isAnonymousNamespace())
+ return true;
+ } while ((DC = DC->getParent()));
+
+ return false;
+}
+
+TranslationUnitDecl *Decl::getTranslationUnitDecl() {
+ if (TranslationUnitDecl *TUD = dyn_cast<TranslationUnitDecl>(this))
+ return TUD;
+
+ DeclContext *DC = getDeclContext();
+ assert(DC && "This decl is not contained in a translation unit!");
+
+ while (!DC->isTranslationUnit()) {
+ DC = DC->getParent();
+ assert(DC && "This decl is not contained in a translation unit!");
+ }
+
+ return cast<TranslationUnitDecl>(DC);
+}
+
+ASTContext &Decl::getASTContext() const {
+ return getTranslationUnitDecl()->getASTContext();
+}
+
+ASTMutationListener *Decl::getASTMutationListener() const {
+ return getASTContext().getASTMutationListener();
+}
+
+bool Decl::isUsed(bool CheckUsedAttr) const {
+ if (Used)
+ return true;
+
+ // Check for used attribute.
+ if (CheckUsedAttr && hasAttr<UsedAttr>())
+ return true;
+
+ // Check redeclarations for used attribute.
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if ((CheckUsedAttr && I->hasAttr<UsedAttr>()) || I->Used)
+ return true;
+ }
+
+ return false;
+}
+
+bool Decl::isReferenced() const {
+ if (Referenced)
+ return true;
+
+ // Check redeclarations.
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
+ if (I->Referenced)
+ return true;
+
+ return false;
+}
+
+/// \brief Determine the availability of the given declaration based on
+/// the target platform.
+///
+/// When it returns an availability result other than \c AR_Available,
+/// if the \p Message parameter is non-NULL, it will be set to a
+/// string describing why the entity is unavailable.
+///
+/// FIXME: Make these strings localizable, since they end up in
+/// diagnostics.
+static AvailabilityResult CheckAvailability(ASTContext &Context,
+ const AvailabilityAttr *A,
+ std::string *Message) {
+ StringRef TargetPlatform = Context.getTargetInfo().getPlatformName();
+ StringRef PrettyPlatformName
+ = AvailabilityAttr::getPrettyPlatformName(TargetPlatform);
+ if (PrettyPlatformName.empty())
+ PrettyPlatformName = TargetPlatform;
+
+ VersionTuple TargetMinVersion = Context.getTargetInfo().getPlatformMinVersion();
+ if (TargetMinVersion.empty())
+ return AR_Available;
+
+ // Match the platform name.
+ if (A->getPlatform()->getName() != TargetPlatform)
+ return AR_Available;
+
+ std::string HintMessage;
+ if (!A->getMessage().empty()) {
+ HintMessage = " - ";
+ HintMessage += A->getMessage();
+ }
+
+ // Make sure that this declaration has not been marked 'unavailable'.
+ if (A->getUnavailable()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "not available on " << PrettyPlatformName
+ << HintMessage;
+ }
+
+ return AR_Unavailable;
+ }
+
+ // Make sure that this declaration has already been introduced.
+ if (!A->getIntroduced().empty() &&
+ TargetMinVersion < A->getIntroduced()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "introduced in " << PrettyPlatformName << ' '
+ << A->getIntroduced() << HintMessage;
+ }
+
+ return AR_NotYetIntroduced;
+ }
+
+ // Make sure that this declaration hasn't been obsoleted.
+ if (!A->getObsoleted().empty() && TargetMinVersion >= A->getObsoleted()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "obsoleted in " << PrettyPlatformName << ' '
+ << A->getObsoleted() << HintMessage;
+ }
+
+ return AR_Unavailable;
+ }
+
+ // Make sure that this declaration hasn't been deprecated.
+ if (!A->getDeprecated().empty() && TargetMinVersion >= A->getDeprecated()) {
+ if (Message) {
+ Message->clear();
+ llvm::raw_string_ostream Out(*Message);
+ Out << "first deprecated in " << PrettyPlatformName << ' '
+ << A->getDeprecated() << HintMessage;
+ }
+
+ return AR_Deprecated;
+ }
+
+ return AR_Available;
+}
+
+AvailabilityResult Decl::getAvailability(std::string *Message) const {
+ AvailabilityResult Result = AR_Available;
+ std::string ResultMessage;
+
+ for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) {
+ if (DeprecatedAttr *Deprecated = dyn_cast<DeprecatedAttr>(*A)) {
+ if (Result >= AR_Deprecated)
+ continue;
+
+ if (Message)
+ ResultMessage = Deprecated->getMessage();
+
+ Result = AR_Deprecated;
+ continue;
+ }
+
+ if (UnavailableAttr *Unavailable = dyn_cast<UnavailableAttr>(*A)) {
+ if (Message)
+ *Message = Unavailable->getMessage();
+ return AR_Unavailable;
+ }
+
+ if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) {
+ AvailabilityResult AR = CheckAvailability(getASTContext(), Availability,
+ Message);
+
+ if (AR == AR_Unavailable)
+ return AR_Unavailable;
+
+ if (AR > Result) {
+ Result = AR;
+ if (Message)
+ ResultMessage.swap(*Message);
+ }
+ continue;
+ }
+ }
+
+ if (Message)
+ Message->swap(ResultMessage);
+ return Result;
+}
+
+bool Decl::canBeWeakImported(bool &IsDefinition) const {
+ IsDefinition = false;
+ if (const VarDecl *Var = dyn_cast<VarDecl>(this)) {
+ if (!Var->hasExternalStorage() || Var->getInit()) {
+ IsDefinition = true;
+ return false;
+ }
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ if (FD->hasBody()) {
+ IsDefinition = true;
+ return false;
+ }
+ } else if (isa<ObjCPropertyDecl>(this) || isa<ObjCMethodDecl>(this))
+ return false;
+ else if (!(getASTContext().getLangOpts().ObjCNonFragileABI &&
+ isa<ObjCInterfaceDecl>(this)))
+ return false;
+
+ return true;
+}
+
+bool Decl::isWeakImported() const {
+ bool IsDefinition;
+ if (!canBeWeakImported(IsDefinition))
+ return false;
+
+ for (attr_iterator A = attr_begin(), AEnd = attr_end(); A != AEnd; ++A) {
+ if (isa<WeakImportAttr>(*A))
+ return true;
+
+ if (AvailabilityAttr *Availability = dyn_cast<AvailabilityAttr>(*A)) {
+ if (CheckAvailability(getASTContext(), Availability, 0)
+ == AR_NotYetIntroduced)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
+ switch (DeclKind) {
+ case Function:
+ case CXXMethod:
+ case CXXConstructor:
+ case CXXDestructor:
+ case CXXConversion:
+ case EnumConstant:
+ case Var:
+ case ImplicitParam:
+ case ParmVar:
+ case NonTypeTemplateParm:
+ case ObjCMethod:
+ case ObjCProperty:
+ return IDNS_Ordinary;
+ case Label:
+ return IDNS_Label;
+ case IndirectField:
+ return IDNS_Ordinary | IDNS_Member;
+
+ case ObjCCompatibleAlias:
+ case ObjCInterface:
+ return IDNS_Ordinary | IDNS_Type;
+
+ case Typedef:
+ case TypeAlias:
+ case TypeAliasTemplate:
+ case UnresolvedUsingTypename:
+ case TemplateTypeParm:
+ return IDNS_Ordinary | IDNS_Type;
+
+ case UsingShadow:
+ return 0; // we'll actually overwrite this later
+
+ case UnresolvedUsingValue:
+ return IDNS_Ordinary | IDNS_Using;
+
+ case Using:
+ return IDNS_Using;
+
+ case ObjCProtocol:
+ return IDNS_ObjCProtocol;
+
+ case Field:
+ case ObjCAtDefsField:
+ case ObjCIvar:
+ return IDNS_Member;
+
+ case Record:
+ case CXXRecord:
+ case Enum:
+ return IDNS_Tag | IDNS_Type;
+
+ case Namespace:
+ case NamespaceAlias:
+ return IDNS_Namespace;
+
+ case FunctionTemplate:
+ return IDNS_Ordinary;
+
+ case ClassTemplate:
+ case TemplateTemplateParm:
+ return IDNS_Ordinary | IDNS_Tag | IDNS_Type;
+
+ // Never have names.
+ case Friend:
+ case FriendTemplate:
+ case AccessSpec:
+ case LinkageSpec:
+ case FileScopeAsm:
+ case StaticAssert:
+ case ObjCPropertyImpl:
+ case Block:
+ case TranslationUnit:
+
+ case UsingDirective:
+ case ClassTemplateSpecialization:
+ case ClassTemplatePartialSpecialization:
+ case ClassScopeFunctionSpecialization:
+ case ObjCImplementation:
+ case ObjCCategory:
+ case ObjCCategoryImpl:
+ case Import:
+ // Never looked up by name.
+ return 0;
+ }
+
+ llvm_unreachable("Invalid DeclKind!");
+}
+
+void Decl::setAttrsImpl(const AttrVec &attrs, ASTContext &Ctx) {
+ assert(!HasAttrs && "Decl already contains attrs.");
+
+ AttrVec &AttrBlank = Ctx.getDeclAttrs(this);
+ assert(AttrBlank.empty() && "HasAttrs was wrong?");
+
+ AttrBlank = attrs;
+ HasAttrs = true;
+}
+
+void Decl::dropAttrs() {
+ if (!HasAttrs) return;
+
+ HasAttrs = false;
+ getASTContext().eraseDeclAttrs(this);
+}
+
+const AttrVec &Decl::getAttrs() const {
+ assert(HasAttrs && "No attrs to get!");
+ return getASTContext().getDeclAttrs(this);
+}
+
+void Decl::swapAttrs(Decl *RHS) {
+ bool HasLHSAttr = this->HasAttrs;
+ bool HasRHSAttr = RHS->HasAttrs;
+
+ // Usually, neither decl has attrs, nothing to do.
+ if (!HasLHSAttr && !HasRHSAttr) return;
+
+ // If 'this' has no attrs, swap the other way.
+ if (!HasLHSAttr)
+ return RHS->swapAttrs(this);
+
+ ASTContext &Context = getASTContext();
+
+ // Handle the case when both decls have attrs.
+ if (HasRHSAttr) {
+ std::swap(Context.getDeclAttrs(this), Context.getDeclAttrs(RHS));
+ return;
+ }
+
+ // Otherwise, LHS has an attr and RHS doesn't.
+ Context.getDeclAttrs(RHS) = Context.getDeclAttrs(this);
+ Context.eraseDeclAttrs(this);
+ this->HasAttrs = false;
+ RHS->HasAttrs = true;
+}
+
+Decl *Decl::castFromDeclContext (const DeclContext *D) {
+ Decl::Kind DK = D->getDeclKind();
+ switch(DK) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ }
+}
+
+DeclContext *Decl::castToDeclContext(const Decl *D) {
+ Decl::Kind DK = D->getKind();
+ switch(DK) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ }
+}
+
+SourceLocation Decl::getBodyRBrace() const {
+ // Special handling of FunctionDecl to avoid de-serializing the body from PCH.
+ // FunctionDecl stores EndRangeLoc for this purpose.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ const FunctionDecl *Definition;
+ if (FD->hasBody(Definition))
+ return Definition->getSourceRange().getEnd();
+ return SourceLocation();
+ }
+
+ if (Stmt *Body = getBody())
+ return Body->getSourceRange().getEnd();
+
+ return SourceLocation();
+}
+
+void Decl::CheckAccessDeclContext() const {
+#ifndef NDEBUG
+ // Suppress this check if any of the following hold:
+ // 1. this is the translation unit (and thus has no parent)
+ // 2. this is a template parameter (and thus doesn't belong to its context)
+ // 3. this is a non-type template parameter
+ // 4. the context is not a record
+ // 5. it's invalid
+ // 6. it's a C++0x static_assert.
+ if (isa<TranslationUnitDecl>(this) ||
+ isa<TemplateTypeParmDecl>(this) ||
+ isa<NonTypeTemplateParmDecl>(this) ||
+ !isa<CXXRecordDecl>(getDeclContext()) ||
+ isInvalidDecl() ||
+ isa<StaticAssertDecl>(this) ||
+ // FIXME: a ParmVarDecl can have ClassTemplateSpecialization
+ // as DeclContext (?).
+ isa<ParmVarDecl>(this) ||
+ // FIXME: a ClassTemplateSpecialization or CXXRecordDecl can have
+ // AS_none as access specifier.
+ isa<CXXRecordDecl>(this) ||
+ isa<ClassScopeFunctionSpecializationDecl>(this))
+ return;
+
+ assert(Access != AS_none &&
+ "Access specifier is AS_none inside a record decl");
+#endif
+}
+
+DeclContext *Decl::getNonClosureContext() {
+ return getDeclContext()->getNonClosureAncestor();
+}
+
+DeclContext *DeclContext::getNonClosureAncestor() {
+ DeclContext *DC = this;
+
+ // This is basically "while (DC->isClosure()) DC = DC->getParent();"
+ // except that it's significantly more efficient to cast to a known
+ // decl type and call getDeclContext() than to call getParent().
+ while (isa<BlockDecl>(DC))
+ DC = cast<BlockDecl>(DC)->getDeclContext();
+
+ assert(!DC->isClosure());
+ return DC;
+}
+
+//===----------------------------------------------------------------------===//
+// DeclContext Implementation
+//===----------------------------------------------------------------------===//
+
+bool DeclContext::classof(const Decl *D) {
+ switch (D->getKind()) {
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) case Decl::NAME:
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
+ return true;
+ default:
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (D->getKind() >= Decl::first##NAME && \
+ D->getKind() <= Decl::last##NAME) \
+ return true;
+#include "clang/AST/DeclNodes.inc"
+ return false;
+ }
+}
+
+DeclContext::~DeclContext() { }
+
+/// \brief Find the parent context of this context that will be
+/// used for unqualified name lookup.
+///
+/// Generally, the parent lookup context is the semantic context. However, for
+/// a friend function the parent lookup context is the lexical context, which
+/// is the class in which the friend is declared.
+DeclContext *DeclContext::getLookupParent() {
+ // FIXME: Find a better way to identify friends
+ if (isa<FunctionDecl>(this))
+ if (getParent()->getRedeclContext()->isFileContext() &&
+ getLexicalParent()->getRedeclContext()->isRecord())
+ return getLexicalParent();
+
+ return getParent();
+}
+
+bool DeclContext::isInlineNamespace() const {
+ return isNamespace() &&
+ cast<NamespaceDecl>(this)->isInline();
+}
+
+bool DeclContext::isDependentContext() const {
+ if (isFileContext())
+ return false;
+
+ if (isa<ClassTemplatePartialSpecializationDecl>(this))
+ return true;
+
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this)) {
+ if (Record->getDescribedClassTemplate())
+ return true;
+
+ if (Record->isDependentLambda())
+ return true;
+ }
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) {
+ if (Function->getDescribedFunctionTemplate())
+ return true;
+
+ // Friend function declarations are dependent if their *lexical*
+ // context is dependent.
+ if (cast<Decl>(this)->getFriendObjectKind())
+ return getLexicalParent()->isDependentContext();
+ }
+
+ return getParent() && getParent()->isDependentContext();
+}
+
+bool DeclContext::isTransparentContext() const {
+ if (DeclKind == Decl::Enum)
+ return !cast<EnumDecl>(this)->isScoped();
+ else if (DeclKind == Decl::LinkageSpec)
+ return true;
+
+ return false;
+}
+
+bool DeclContext::isExternCContext() const {
+ const DeclContext *DC = this;
+ while (DC->DeclKind != Decl::TranslationUnit) {
+ if (DC->DeclKind == Decl::LinkageSpec)
+ return cast<LinkageSpecDecl>(DC)->getLanguage()
+ == LinkageSpecDecl::lang_c;
+ DC = DC->getParent();
+ }
+ return false;
+}
+
+bool DeclContext::Encloses(const DeclContext *DC) const {
+ if (getPrimaryContext() != this)
+ return getPrimaryContext()->Encloses(DC);
+
+ for (; DC; DC = DC->getParent())
+ if (DC->getPrimaryContext() == this)
+ return true;
+ return false;
+}
+
+DeclContext *DeclContext::getPrimaryContext() {
+ switch (DeclKind) {
+ case Decl::TranslationUnit:
+ case Decl::LinkageSpec:
+ case Decl::Block:
+ // There is only one DeclContext for these entities.
+ return this;
+
+ case Decl::Namespace:
+ // The original namespace is our primary context.
+ return static_cast<NamespaceDecl*>(this)->getOriginalNamespace();
+
+ case Decl::ObjCMethod:
+ return this;
+
+ case Decl::ObjCInterface:
+ if (ObjCInterfaceDecl *Def = cast<ObjCInterfaceDecl>(this)->getDefinition())
+ return Def;
+
+ return this;
+
+ case Decl::ObjCProtocol:
+ if (ObjCProtocolDecl *Def = cast<ObjCProtocolDecl>(this)->getDefinition())
+ return Def;
+
+ return this;
+
+ case Decl::ObjCCategory:
+ return this;
+
+ case Decl::ObjCImplementation:
+ case Decl::ObjCCategoryImpl:
+ return this;
+
+ default:
+ if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) {
+ // If this is a tag type that has a definition or is currently
+ // being defined, that definition is our primary context.
+ TagDecl *Tag = cast<TagDecl>(this);
+ assert(isa<TagType>(Tag->TypeForDecl) ||
+ isa<InjectedClassNameType>(Tag->TypeForDecl));
+
+ if (TagDecl *Def = Tag->getDefinition())
+ return Def;
+
+ if (!isa<InjectedClassNameType>(Tag->TypeForDecl)) {
+ const TagType *TagTy = cast<TagType>(Tag->TypeForDecl);
+ if (TagTy->isBeingDefined())
+ // FIXME: is it necessarily being defined in the decl
+ // that owns the type?
+ return TagTy->getDecl();
+ }
+
+ return Tag;
+ }
+
+ assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction &&
+ "Unknown DeclContext kind");
+ return this;
+ }
+}
+
+void
+DeclContext::collectAllContexts(llvm::SmallVectorImpl<DeclContext *> &Contexts){
+ Contexts.clear();
+
+ if (DeclKind != Decl::Namespace) {
+ Contexts.push_back(this);
+ return;
+ }
+
+ NamespaceDecl *Self = static_cast<NamespaceDecl *>(this);
+ for (NamespaceDecl *N = Self->getMostRecentDecl(); N;
+ N = N->getPreviousDecl())
+ Contexts.push_back(N);
+
+ std::reverse(Contexts.begin(), Contexts.end());
+}
+
+std::pair<Decl *, Decl *>
+DeclContext::BuildDeclChain(ArrayRef<Decl*> Decls,
+ bool FieldsAlreadyLoaded) {
+ // Build up a chain of declarations via the Decl::NextInContextAndBits field.
+ Decl *FirstNewDecl = 0;
+ Decl *PrevDecl = 0;
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ if (FieldsAlreadyLoaded && isa<FieldDecl>(Decls[I]))
+ continue;
+
+ Decl *D = Decls[I];
+ if (PrevDecl)
+ PrevDecl->NextInContextAndBits.setPointer(D);
+ else
+ FirstNewDecl = D;
+
+ PrevDecl = D;
+ }
+
+ return std::make_pair(FirstNewDecl, PrevDecl);
+}
+
+/// \brief Load the declarations within this lexical storage from an
+/// external source.
+void
+DeclContext::LoadLexicalDeclsFromExternalStorage() const {
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ assert(hasExternalLexicalStorage() && Source && "No external storage?");
+
+ // Notify that we have a DeclContext that is initializing.
+ ExternalASTSource::Deserializing ADeclContext(Source);
+
+ // Load the external declarations, if any.
+ SmallVector<Decl*, 64> Decls;
+ ExternalLexicalStorage = false;
+ switch (Source->FindExternalLexicalDecls(this, Decls)) {
+ case ELR_Success:
+ break;
+
+ case ELR_Failure:
+ case ELR_AlreadyLoaded:
+ return;
+ }
+
+ if (Decls.empty())
+ return;
+
+ // We may have already loaded just the fields of this record, in which case
+ // we need to ignore them.
+ bool FieldsAlreadyLoaded = false;
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(this))
+ FieldsAlreadyLoaded = RD->LoadedFieldsFromExternalStorage;
+
+ // Splice the newly-read declarations into the beginning of the list
+ // of declarations.
+ Decl *ExternalFirst, *ExternalLast;
+ llvm::tie(ExternalFirst, ExternalLast) = BuildDeclChain(Decls,
+ FieldsAlreadyLoaded);
+ ExternalLast->NextInContextAndBits.setPointer(FirstDecl);
+ FirstDecl = ExternalFirst;
+ if (!LastDecl)
+ LastDecl = ExternalLast;
+}
+
+DeclContext::lookup_result
+ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name) {
+ ASTContext &Context = DC->getParentASTContext();
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr.getPointer()))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[Name];
+ assert(List.isNull());
+ (void) List;
+
+ return DeclContext::lookup_result();
+}
+
+DeclContext::lookup_result
+ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name,
+ ArrayRef<NamedDecl*> Decls) {
+ ASTContext &Context = DC->getParentASTContext();;
+
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr.getPointer()))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[Name];
+ for (ArrayRef<NamedDecl*>::iterator
+ I = Decls.begin(), E = Decls.end(); I != E; ++I) {
+ if (List.isNull())
+ List.setOnlyValue(*I);
+ else
+ List.AddSubsequentDecl(*I);
+ }
+
+ return List.getLookupResult();
+}
+
+DeclContext::decl_iterator DeclContext::noload_decls_begin() const {
+ return decl_iterator(FirstDecl);
+}
+
+DeclContext::decl_iterator DeclContext::noload_decls_end() const {
+ return decl_iterator();
+}
+
+DeclContext::decl_iterator DeclContext::decls_begin() const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+
+ return decl_iterator(FirstDecl);
+}
+
+DeclContext::decl_iterator DeclContext::decls_end() const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+
+ return decl_iterator();
+}
+
+bool DeclContext::decls_empty() const {
+ if (hasExternalLexicalStorage())
+ LoadLexicalDeclsFromExternalStorage();
+
+ return !FirstDecl;
+}
+
+void DeclContext::removeDecl(Decl *D) {
+ assert(D->getLexicalDeclContext() == this &&
+ "decl being removed from non-lexical context");
+ assert((D->NextInContextAndBits.getPointer() || D == LastDecl) &&
+ "decl is not in decls list");
+
+ // Remove D from the decl chain. This is O(n) but hopefully rare.
+ if (D == FirstDecl) {
+ if (D == LastDecl)
+ FirstDecl = LastDecl = 0;
+ else
+ FirstDecl = D->NextInContextAndBits.getPointer();
+ } else {
+ for (Decl *I = FirstDecl; true; I = I->NextInContextAndBits.getPointer()) {
+ assert(I && "decl not found in linked list");
+ if (I->NextInContextAndBits.getPointer() == D) {
+ I->NextInContextAndBits.setPointer(D->NextInContextAndBits.getPointer());
+ if (D == LastDecl) LastDecl = I;
+ break;
+ }
+ }
+ }
+
+ // Mark that D is no longer in the decl chain.
+ D->NextInContextAndBits.setPointer(0);
+
+ // Remove D from the lookup table if necessary.
+ if (isa<NamedDecl>(D)) {
+ NamedDecl *ND = cast<NamedDecl>(D);
+
+ // Remove only decls that have a name
+ if (!ND->getDeclName()) return;
+
+ StoredDeclsMap *Map = getPrimaryContext()->LookupPtr.getPointer();
+ if (!Map) return;
+
+ StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
+ assert(Pos != Map->end() && "no lookup entry for decl");
+ if (Pos->second.getAsVector() || Pos->second.getAsDecl() == ND)
+ Pos->second.remove(ND);
+ }
+}
+
+void DeclContext::addHiddenDecl(Decl *D) {
+ assert(D->getLexicalDeclContext() == this &&
+ "Decl inserted into wrong lexical context");
+ assert(!D->getNextDeclInContext() && D != LastDecl &&
+ "Decl already inserted into a DeclContext");
+
+ if (FirstDecl) {
+ LastDecl->NextInContextAndBits.setPointer(D);
+ LastDecl = D;
+ } else {
+ FirstDecl = LastDecl = D;
+ }
+
+ // Notify a C++ record declaration that we've added a member, so it can
+ // update it's class-specific state.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(this))
+ Record->addedMember(D);
+
+ // If this is a newly-created (not de-serialized) import declaration, wire
+ // it in to the list of local import declarations.
+ if (!D->isFromASTFile()) {
+ if (ImportDecl *Import = dyn_cast<ImportDecl>(D))
+ D->getASTContext().addedLocalImportDecl(Import);
+ }
+}
+
+void DeclContext::addDecl(Decl *D) {
+ addHiddenDecl(D);
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ ND->getDeclContext()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(ND, false, true);
+}
+
+void DeclContext::addDeclInternal(Decl *D) {
+ addHiddenDecl(D);
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ ND->getDeclContext()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(ND, true, true);
+}
+
+/// shouldBeHidden - Determine whether a declaration which was declared
+/// within its semantic context should be invisible to qualified name lookup.
+static bool shouldBeHidden(NamedDecl *D) {
+ // Skip unnamed declarations.
+ if (!D->getDeclName())
+ return true;
+
+ // Skip entities that can't be found by name lookup into a particular
+ // context.
+ if ((D->getIdentifierNamespace() == 0 && !isa<UsingDirectiveDecl>(D)) ||
+ D->isTemplateParameter())
+ return true;
+
+ // Skip template specializations.
+ // FIXME: This feels like a hack. Should DeclarationName support
+ // template-ids, or is there a better way to keep specializations
+ // from being visible?
+ if (isa<ClassTemplateSpecializationDecl>(D))
+ return true;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isFunctionTemplateSpecialization())
+ return true;
+
+ return false;
+}
+
+/// buildLookup - Build the lookup data structure with all of the
+/// declarations in this DeclContext (and any other contexts linked
+/// to it or transparent contexts nested within it) and return it.
+StoredDeclsMap *DeclContext::buildLookup() {
+ assert(this == getPrimaryContext() && "buildLookup called on non-primary DC");
+
+ if (!LookupPtr.getInt())
+ return LookupPtr.getPointer();
+
+ llvm::SmallVector<DeclContext *, 2> Contexts;
+ collectAllContexts(Contexts);
+ for (unsigned I = 0, N = Contexts.size(); I != N; ++I)
+ buildLookupImpl(Contexts[I]);
+
+ // We no longer have any lazy decls.
+ LookupPtr.setInt(false);
+ return LookupPtr.getPointer();
+}
+
+/// buildLookupImpl - Build part of the lookup data structure for the
+/// declarations contained within DCtx, which will either be this
+/// DeclContext, a DeclContext linked to it, or a transparent context
+/// nested within it.
+void DeclContext::buildLookupImpl(DeclContext *DCtx) {
+ for (decl_iterator I = DCtx->decls_begin(), E = DCtx->decls_end();
+ I != E; ++I) {
+ Decl *D = *I;
+
+ // Insert this declaration into the lookup structure, but only if
+ // it's semantically within its decl context. Any other decls which
+ // should be found in this context are added eagerly.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->getDeclContext() == DCtx && !shouldBeHidden(ND))
+ makeDeclVisibleInContextImpl(ND, false);
+
+ // If this declaration is itself a transparent declaration context
+ // or inline namespace, add the members of this declaration of that
+ // context (recursively).
+ if (DeclContext *InnerCtx = dyn_cast<DeclContext>(D))
+ if (InnerCtx->isTransparentContext() || InnerCtx->isInlineNamespace())
+ buildLookupImpl(InnerCtx);
+ }
+}
+
+DeclContext::lookup_result
+DeclContext::lookup(DeclarationName Name) {
+ assert(DeclKind != Decl::LinkageSpec &&
+ "Should not perform lookups into linkage specs!");
+
+ DeclContext *PrimaryContext = getPrimaryContext();
+ if (PrimaryContext != this)
+ return PrimaryContext->lookup(Name);
+
+ if (hasExternalVisibleStorage()) {
+ // If a PCH has a result for this name, and we have a local declaration, we
+ // will have imported the PCH result when adding the local declaration.
+ // FIXME: For modules, we could have had more declarations added by module
+ // imoprts since we saw the declaration of the local name.
+ if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
+ StoredDeclsMap::iterator I = Map->find(Name);
+ if (I != Map->end())
+ return I->second.getLookupResult();
+ }
+
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ return Source->FindExternalVisibleDeclsByName(this, Name);
+ }
+
+ StoredDeclsMap *Map = LookupPtr.getPointer();
+ if (LookupPtr.getInt())
+ Map = buildLookup();
+
+ if (!Map)
+ return lookup_result(lookup_iterator(0), lookup_iterator(0));
+
+ StoredDeclsMap::iterator I = Map->find(Name);
+ if (I == Map->end())
+ return lookup_result(lookup_iterator(0), lookup_iterator(0));
+
+ return I->second.getLookupResult();
+}
+
+DeclContext::lookup_const_result
+DeclContext::lookup(DeclarationName Name) const {
+ return const_cast<DeclContext*>(this)->lookup(Name);
+}
+
+void DeclContext::localUncachedLookup(DeclarationName Name,
+ llvm::SmallVectorImpl<NamedDecl *> &Results) {
+ Results.clear();
+
+ // If there's no external storage, just perform a normal lookup and copy
+ // the results.
+ if (!hasExternalVisibleStorage() && !hasExternalLexicalStorage()) {
+ lookup_result LookupResults = lookup(Name);
+ Results.insert(Results.end(), LookupResults.first, LookupResults.second);
+ return;
+ }
+
+ // If we have a lookup table, check there first. Maybe we'll get lucky.
+ if (StoredDeclsMap *Map = LookupPtr.getPointer()) {
+ StoredDeclsMap::iterator Pos = Map->find(Name);
+ if (Pos != Map->end()) {
+ Results.insert(Results.end(),
+ Pos->second.getLookupResult().first,
+ Pos->second.getLookupResult().second);
+ return;
+ }
+ }
+
+ // Slow case: grovel through the declarations in our chain looking for
+ // matches.
+ for (Decl *D = FirstDecl; D; D = D->getNextDeclInContext()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (ND->getDeclName() == Name)
+ Results.push_back(ND);
+ }
+}
+
+DeclContext *DeclContext::getRedeclContext() {
+ DeclContext *Ctx = this;
+ // Skip through transparent contexts.
+ while (Ctx->isTransparentContext())
+ Ctx = Ctx->getParent();
+ return Ctx;
+}
+
+DeclContext *DeclContext::getEnclosingNamespaceContext() {
+ DeclContext *Ctx = this;
+ // Skip through non-namespace, non-translation-unit contexts.
+ while (!Ctx->isFileContext())
+ Ctx = Ctx->getParent();
+ return Ctx->getPrimaryContext();
+}
+
+bool DeclContext::InEnclosingNamespaceSetOf(const DeclContext *O) const {
+ // For non-file contexts, this is equivalent to Equals.
+ if (!isFileContext())
+ return O->Equals(this);
+
+ do {
+ if (O->Equals(this))
+ return true;
+
+ const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(O);
+ if (!NS || !NS->isInline())
+ break;
+ O = NS->getParent();
+ } while (O);
+
+ return false;
+}
+
+void DeclContext::makeDeclVisibleInContext(NamedDecl *D) {
+ DeclContext *PrimaryDC = this->getPrimaryContext();
+ DeclContext *DeclDC = D->getDeclContext()->getPrimaryContext();
+ // If the decl is being added outside of its semantic decl context, we
+ // need to ensure that we eagerly build the lookup information for it.
+ PrimaryDC->makeDeclVisibleInContextWithFlags(D, false, PrimaryDC == DeclDC);
+}
+
+void DeclContext::makeDeclVisibleInContextWithFlags(NamedDecl *D, bool Internal,
+ bool Recoverable) {
+ assert(this == getPrimaryContext() && "expected a primary DC");
+
+ // Skip declarations within functions.
+ // FIXME: We shouldn't need to build lookup tables for function declarations
+ // ever, and we can't do so correctly because we can't model the nesting of
+ // scopes which occurs within functions. We use "qualified" lookup into
+ // function declarations when handling friend declarations inside nested
+ // classes, and consequently accept the following invalid code:
+ //
+ // void f() { void g(); { int g; struct S { friend void g(); }; } }
+ if (isFunctionOrMethod() && !isa<FunctionDecl>(D))
+ return;
+
+ // Skip declarations which should be invisible to name lookup.
+ if (shouldBeHidden(D))
+ return;
+
+ // If we already have a lookup data structure, perform the insertion into
+ // it. If we might have externally-stored decls with this name, look them
+ // up and perform the insertion. If this decl was declared outside its
+ // semantic context, buildLookup won't add it, so add it now.
+ //
+ // FIXME: As a performance hack, don't add such decls into the translation
+ // unit unless we're in C++, since qualified lookup into the TU is never
+ // performed.
+ if (LookupPtr.getPointer() || hasExternalVisibleStorage() ||
+ ((!Recoverable || D->getDeclContext() != D->getLexicalDeclContext()) &&
+ (getParentASTContext().getLangOpts().CPlusPlus ||
+ !isTranslationUnit()))) {
+ // If we have lazily omitted any decls, they might have the same name as
+ // the decl which we are adding, so build a full lookup table before adding
+ // this decl.
+ buildLookup();
+ makeDeclVisibleInContextImpl(D, Internal);
+ } else {
+ LookupPtr.setInt(true);
+ }
+
+ // If we are a transparent context or inline namespace, insert into our
+ // parent context, too. This operation is recursive.
+ if (isTransparentContext() || isInlineNamespace())
+ getParent()->getPrimaryContext()->
+ makeDeclVisibleInContextWithFlags(D, Internal, Recoverable);
+
+ Decl *DCAsDecl = cast<Decl>(this);
+ // Notify that a decl was made visible unless we are a Tag being defined.
+ if (!(isa<TagDecl>(DCAsDecl) && cast<TagDecl>(DCAsDecl)->isBeingDefined()))
+ if (ASTMutationListener *L = DCAsDecl->getASTMutationListener())
+ L->AddedVisibleDecl(this, D);
+}
+
+void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D, bool Internal) {
+ // Find or create the stored declaration map.
+ StoredDeclsMap *Map = LookupPtr.getPointer();
+ if (!Map) {
+ ASTContext *C = &getParentASTContext();
+ Map = CreateStoredDeclsMap(*C);
+ }
+
+ // If there is an external AST source, load any declarations it knows about
+ // with this declaration's name.
+ // If the lookup table contains an entry about this name it means that we
+ // have already checked the external source.
+ if (!Internal)
+ if (ExternalASTSource *Source = getParentASTContext().getExternalSource())
+ if (hasExternalVisibleStorage() &&
+ Map->find(D->getDeclName()) == Map->end())
+ Source->FindExternalVisibleDeclsByName(this, D->getDeclName());
+
+ // Insert this declaration into the map.
+ StoredDeclsList &DeclNameEntries = (*Map)[D->getDeclName()];
+ if (DeclNameEntries.isNull()) {
+ DeclNameEntries.setOnlyValue(D);
+ return;
+ }
+
+ if (DeclNameEntries.HandleRedeclaration(D)) {
+ // This declaration has replaced an existing one for which
+ // declarationReplaces returns true.
+ return;
+ }
+
+ // Put this declaration into the appropriate slot.
+ DeclNameEntries.AddSubsequentDecl(D);
+}
+
+/// Returns iterator range [First, Last) of UsingDirectiveDecls stored within
+/// this context.
+DeclContext::udir_iterator_range
+DeclContext::getUsingDirectives() const {
+ // FIXME: Use something more efficient than normal lookup for using
+ // directives. In C++, using directives are looked up more than anything else.
+ lookup_const_result Result = lookup(UsingDirectiveDecl::getName());
+ return udir_iterator_range(reinterpret_cast<udir_iterator>(Result.first),
+ reinterpret_cast<udir_iterator>(Result.second));
+}
+
+//===----------------------------------------------------------------------===//
+// Creation and Destruction of StoredDeclsMaps. //
+//===----------------------------------------------------------------------===//
+
+StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const {
+ assert(!LookupPtr.getPointer() && "context already has a decls map");
+ assert(getPrimaryContext() == this &&
+ "creating decls map on non-primary context");
+
+ StoredDeclsMap *M;
+ bool Dependent = isDependentContext();
+ if (Dependent)
+ M = new DependentStoredDeclsMap();
+ else
+ M = new StoredDeclsMap();
+ M->Previous = C.LastSDM;
+ C.LastSDM = llvm::PointerIntPair<StoredDeclsMap*,1>(M, Dependent);
+ LookupPtr.setPointer(M);
+ return M;
+}
+
+void ASTContext::ReleaseDeclContextMaps() {
+ // It's okay to delete DependentStoredDeclsMaps via a StoredDeclsMap
+ // pointer because the subclass doesn't add anything that needs to
+ // be deleted.
+ StoredDeclsMap::DestroyAll(LastSDM.getPointer(), LastSDM.getInt());
+}
+
+void StoredDeclsMap::DestroyAll(StoredDeclsMap *Map, bool Dependent) {
+ while (Map) {
+ // Advance the iteration before we invalidate memory.
+ llvm::PointerIntPair<StoredDeclsMap*,1> Next = Map->Previous;
+
+ if (Dependent)
+ delete static_cast<DependentStoredDeclsMap*>(Map);
+ else
+ delete Map;
+
+ Map = Next.getPointer();
+ Dependent = Next.getInt();
+ }
+}
+
+DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
+ DeclContext *Parent,
+ const PartialDiagnostic &PDiag) {
+ assert(Parent->isDependentContext()
+ && "cannot iterate dependent diagnostics of non-dependent context");
+ Parent = Parent->getPrimaryContext();
+ if (!Parent->LookupPtr.getPointer())
+ Parent->CreateStoredDeclsMap(C);
+
+ DependentStoredDeclsMap *Map
+ = static_cast<DependentStoredDeclsMap*>(Parent->LookupPtr.getPointer());
+
+ // Allocate the copy of the PartialDiagnostic via the ASTContext's
+ // BumpPtrAllocator, rather than the ASTContext itself.
+ PartialDiagnostic::Storage *DiagStorage = 0;
+ if (PDiag.hasStorage())
+ DiagStorage = new (C) PartialDiagnostic::Storage;
+
+ DependentDiagnostic *DD = new (C) DependentDiagnostic(PDiag, DiagStorage);
+
+ // TODO: Maybe we shouldn't reverse the order during insertion.
+ DD->NextDiagnostic = Map->FirstDiagnostic;
+ Map->FirstDiagnostic = DD;
+
+ return DD;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
new file mode 100644
index 0000000..114322b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclCXX.cpp
@@ -0,0 +1,2029 @@
+//===--- DeclCXX.cpp - C++ Declaration AST Node Implementation ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ related Decl classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Decl Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+void AccessSpecDecl::anchor() { }
+
+AccessSpecDecl *AccessSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(AccessSpecDecl));
+ return new (Mem) AccessSpecDecl(EmptyShell());
+}
+
+CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
+ : UserDeclaredConstructor(false), UserDeclaredCopyConstructor(false),
+ UserDeclaredMoveConstructor(false), UserDeclaredCopyAssignment(false),
+ UserDeclaredMoveAssignment(false), UserDeclaredDestructor(false),
+ Aggregate(true), PlainOldData(true), Empty(true), Polymorphic(false),
+ Abstract(false), IsStandardLayout(true), HasNoNonEmptyBases(true),
+ HasPrivateFields(false), HasProtectedFields(false), HasPublicFields(false),
+ HasMutableFields(false), HasOnlyCMembers(true),
+ HasTrivialDefaultConstructor(true),
+ HasConstexprNonCopyMoveConstructor(false),
+ DefaultedDefaultConstructorIsConstexpr(true),
+ DefaultedCopyConstructorIsConstexpr(true),
+ DefaultedMoveConstructorIsConstexpr(true),
+ HasConstexprDefaultConstructor(false), HasConstexprCopyConstructor(false),
+ HasConstexprMoveConstructor(false), HasTrivialCopyConstructor(true),
+ HasTrivialMoveConstructor(true), HasTrivialCopyAssignment(true),
+ HasTrivialMoveAssignment(true), HasTrivialDestructor(true),
+ HasIrrelevantDestructor(true),
+ HasNonLiteralTypeFieldsOrBases(false), ComputedVisibleConversions(false),
+ UserProvidedDefaultConstructor(false), DeclaredDefaultConstructor(false),
+ DeclaredCopyConstructor(false), DeclaredMoveConstructor(false),
+ DeclaredCopyAssignment(false), DeclaredMoveAssignment(false),
+ DeclaredDestructor(false), FailedImplicitMoveConstructor(false),
+ FailedImplicitMoveAssignment(false), IsLambda(false), NumBases(0),
+ NumVBases(0), Bases(), VBases(), Definition(D), FirstFriend(0) {
+}
+
+CXXRecordDecl::CXXRecordDecl(Kind K, TagKind TK, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, CXXRecordDecl *PrevDecl)
+ : RecordDecl(K, TK, DC, StartLoc, IdLoc, Id, PrevDecl),
+ DefinitionData(PrevDecl ? PrevDecl->DefinitionData : 0),
+ TemplateOrInstantiation() { }
+
+CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ CXXRecordDecl* PrevDecl,
+ bool DelayTypeCreation) {
+ CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TK, DC, StartLoc, IdLoc,
+ Id, PrevDecl);
+
+ // FIXME: DelayTypeCreation seems like such a hack
+ if (!DelayTypeCreation)
+ C.getTypeDeclType(R, PrevDecl);
+ return R;
+}
+
+CXXRecordDecl *CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
+ SourceLocation Loc, bool Dependent) {
+ CXXRecordDecl* R = new (C) CXXRecordDecl(CXXRecord, TTK_Class, DC, Loc, Loc,
+ 0, 0);
+ R->IsBeingDefined = true;
+ R->DefinitionData = new (C) struct LambdaDefinitionData(R, Dependent);
+ C.getTypeDeclType(R, /*PrevDecl=*/0);
+ return R;
+}
+
+CXXRecordDecl *
+CXXRecordDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXRecordDecl));
+ return new (Mem) CXXRecordDecl(CXXRecord, TTK_Struct, 0, SourceLocation(),
+ SourceLocation(), 0, 0);
+}
+
+void
+CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
+ unsigned NumBases) {
+ ASTContext &C = getASTContext();
+
+ if (!data().Bases.isOffset() && data().NumBases > 0)
+ C.Deallocate(data().getBases());
+
+ if (NumBases) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is [...] a class with [...] no base classes [...].
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+ }
+
+ // The set of seen virtual base types.
+ llvm::SmallPtrSet<CanQualType, 8> SeenVBaseTypes;
+
+ // The virtual bases of this class.
+ SmallVector<const CXXBaseSpecifier *, 8> VBases;
+
+ data().Bases = new(C) CXXBaseSpecifier [NumBases];
+ data().NumBases = NumBases;
+ for (unsigned i = 0; i < NumBases; ++i) {
+ data().getBases()[i] = *Bases[i];
+ // Keep track of inherited vbases for this base class.
+ const CXXBaseSpecifier *Base = Bases[i];
+ QualType BaseType = Base->getType();
+ // Skip dependent types; we can't do any checking on them now.
+ if (BaseType->isDependentType())
+ continue;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+ // A class with a non-empty base class is not empty.
+ // FIXME: Standard ref?
+ if (!BaseClassDecl->isEmpty()) {
+ if (!data().Empty) {
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- either has no non-static data members in the most derived
+ // class and at most one base class with non-static data members,
+ // or has no base classes with non-static data members, and
+ // If this is the second non-empty base, then neither of these two
+ // clauses can be true.
+ data().IsStandardLayout = false;
+ }
+
+ data().Empty = false;
+ data().HasNoNonEmptyBases = false;
+ }
+
+ // C++ [class.virtual]p1:
+ // A class that declares or inherits a virtual function is called a
+ // polymorphic class.
+ if (BaseClassDecl->isPolymorphic())
+ data().Polymorphic = true;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has no non-standard-layout base classes
+ if (!BaseClassDecl->isStandardLayout())
+ data().IsStandardLayout = false;
+
+ // Record if this base is the first non-literal field or base.
+ if (!hasNonLiteralTypeFieldsOrBases() && !BaseType->isLiteralType())
+ data().HasNonLiteralTypeFieldsOrBases = true;
+
+ // Now go through all virtual bases of this base and add them.
+ for (CXXRecordDecl::base_class_iterator VBase =
+ BaseClassDecl->vbases_begin(),
+ E = BaseClassDecl->vbases_end(); VBase != E; ++VBase) {
+ // Add this base if it's not already in the list.
+ if (SeenVBaseTypes.insert(C.getCanonicalType(VBase->getType())))
+ VBases.push_back(VBase);
+ }
+
+ if (Base->isVirtual()) {
+ // Add this base if it's not already in the list.
+ if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType)))
+ VBases.push_back(Base);
+
+ // C++0x [meta.unary.prop] is_empty:
+ // T is a class type, but not a union type, with ... no virtual base
+ // classes
+ data().Empty = false;
+
+ // C++ [class.ctor]p5:
+ // A default constructor is trivial [...] if:
+ // -- its class has [...] no virtual bases
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if it is neither
+ // user-provided nor deleted and if
+ // -- class X has no virtual functions and no virtual base classes, and
+ data().HasTrivialCopyConstructor = false;
+ data().HasTrivialMoveConstructor = false;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if it is
+ // neither user-provided nor deleted and if
+ // -- class X has no virtual functions and no virtual base classes, and
+ data().HasTrivialCopyAssignment = false;
+ data().HasTrivialMoveAssignment = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has [...] no virtual base classes
+ data().IsStandardLayout = false;
+
+ // C++11 [dcl.constexpr]p4:
+ // In the definition of a constexpr constructor [...]
+ // -- the class shall not have any virtual base classes
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ } else {
+ // C++ [class.ctor]p5:
+ // A default constructor is trivial [...] if:
+ // -- all the direct base classes of its class have trivial default
+ // constructors.
+ if (!BaseClassDecl->hasTrivialDefaultConstructor())
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if [...]
+ // [...]
+ // -- the constructor selected to copy/move each direct base class
+ // subobject is trivial, and
+ // FIXME: C++0x: We need to only consider the selected constructor
+ // instead of all of them.
+ if (!BaseClassDecl->hasTrivialCopyConstructor())
+ data().HasTrivialCopyConstructor = false;
+ if (!BaseClassDecl->hasTrivialMoveConstructor())
+ data().HasTrivialMoveConstructor = false;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if [...]
+ // [...]
+ // -- the assignment operator selected to copy/move each direct base
+ // class subobject is trivial, and
+ // FIXME: C++0x: We need to only consider the selected operator instead
+ // of all of them.
+ if (!BaseClassDecl->hasTrivialCopyAssignment())
+ data().HasTrivialCopyAssignment = false;
+ if (!BaseClassDecl->hasTrivialMoveAssignment())
+ data().HasTrivialMoveAssignment = false;
+
+ // C++11 [class.ctor]p6:
+ // If that user-written default constructor would satisfy the
+ // requirements of a constexpr constructor, the implicitly-defined
+ // default constructor is constexpr.
+ if (!BaseClassDecl->hasConstexprDefaultConstructor())
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ // C++11 [class.copy]p13:
+ // If the implicitly-defined constructor would satisfy the requirements
+ // of a constexpr constructor, the implicitly-defined constructor is
+ // constexpr.
+ // C++11 [dcl.constexpr]p4:
+ // -- every constructor involved in initializing [...] base class
+ // sub-objects shall be a constexpr constructor
+ if (!BaseClassDecl->hasConstexprCopyConstructor())
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ if (BaseClassDecl->hasDeclaredMoveConstructor() ||
+ BaseClassDecl->needsImplicitMoveConstructor())
+ // FIXME: If the implicit move constructor generated for the base class
+ // would be ill-formed, the implicit move constructor generated for the
+ // derived class calls the base class' copy constructor.
+ data().DefaultedMoveConstructorIsConstexpr &=
+ BaseClassDecl->hasConstexprMoveConstructor();
+ else if (!BaseClassDecl->hasConstexprCopyConstructor())
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ }
+
+ // C++ [class.ctor]p3:
+ // A destructor is trivial if all the direct base classes of its class
+ // have trivial destructors.
+ if (!BaseClassDecl->hasTrivialDestructor())
+ data().HasTrivialDestructor = false;
+
+ if (!BaseClassDecl->hasIrrelevantDestructor())
+ data().HasIrrelevantDestructor = false;
+
+ // A class has an Objective-C object member if... or any of its bases
+ // has an Objective-C object member.
+ if (BaseClassDecl->hasObjectMember())
+ setHasObjectMember(true);
+
+ // Keep track of the presence of mutable fields.
+ if (BaseClassDecl->hasMutableFields())
+ data().HasMutableFields = true;
+ }
+
+ if (VBases.empty())
+ return;
+
+ // Create base specifier for any direct or indirect virtual bases.
+ data().VBases = new (C) CXXBaseSpecifier[VBases.size()];
+ data().NumVBases = VBases.size();
+ for (int I = 0, E = VBases.size(); I != E; ++I)
+ data().getVBases()[I] = *VBases[I];
+}
+
+/// Callback function for CXXRecordDecl::forallBases that acknowledges
+/// that it saw a base class.
+static bool SawBase(const CXXRecordDecl *, void *) {
+ return true;
+}
+
+bool CXXRecordDecl::hasAnyDependentBases() const {
+ if (!isDependentContext())
+ return false;
+
+ return !forallBases(SawBase, 0);
+}
+
+bool CXXRecordDecl::hasConstCopyConstructor() const {
+ return getCopyConstructor(Qualifiers::Const) != 0;
+}
+
+bool CXXRecordDecl::isTriviallyCopyable() const {
+ // C++0x [class]p5:
+ // A trivially copyable class is a class that:
+ // -- has no non-trivial copy constructors,
+ if (!hasTrivialCopyConstructor()) return false;
+ // -- has no non-trivial move constructors,
+ if (!hasTrivialMoveConstructor()) return false;
+ // -- has no non-trivial copy assignment operators,
+ if (!hasTrivialCopyAssignment()) return false;
+ // -- has no non-trivial move assignment operators, and
+ if (!hasTrivialMoveAssignment()) return false;
+ // -- has a trivial destructor.
+ if (!hasTrivialDestructor()) return false;
+
+ return true;
+}
+
+/// \brief Perform a simplistic form of overload resolution that only considers
+/// cv-qualifiers on a single parameter, and return the best overload candidate
+/// (if there is one).
+static CXXMethodDecl *
+GetBestOverloadCandidateSimple(
+ const SmallVectorImpl<std::pair<CXXMethodDecl *, Qualifiers> > &Cands) {
+ if (Cands.empty())
+ return 0;
+ if (Cands.size() == 1)
+ return Cands[0].first;
+
+ unsigned Best = 0, N = Cands.size();
+ for (unsigned I = 1; I != N; ++I)
+ if (Cands[Best].second.compatiblyIncludes(Cands[I].second))
+ Best = I;
+
+ for (unsigned I = 1; I != N; ++I)
+ if (Cands[Best].second.compatiblyIncludes(Cands[I].second))
+ return 0;
+
+ return Cands[Best].first;
+}
+
+CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(unsigned TypeQuals) const{
+ ASTContext &Context = getASTContext();
+ QualType ClassType
+ = Context.getTypeDeclType(const_cast<CXXRecordDecl*>(this));
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType));
+ unsigned FoundTQs;
+ SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = this->lookup(ConstructorName);
+ Con != ConEnd; ++Con) {
+ // C++ [class.copy]p2:
+ // A non-template constructor for class X is a copy constructor if [...]
+ if (isa<FunctionTemplateDecl>(*Con))
+ continue;
+
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isCopyConstructor(FoundTQs)) {
+ if (((TypeQuals & Qualifiers::Const) == (FoundTQs & Qualifiers::Const)) ||
+ (!(TypeQuals & Qualifiers::Const) && (FoundTQs & Qualifiers::Const)))
+ Found.push_back(std::make_pair(
+ const_cast<CXXConstructorDecl *>(Constructor),
+ Qualifiers::fromCVRMask(FoundTQs)));
+ }
+ }
+
+ return cast_or_null<CXXConstructorDecl>(
+ GetBestOverloadCandidateSimple(Found));
+}
+
+CXXConstructorDecl *CXXRecordDecl::getMoveConstructor() const {
+ for (ctor_iterator I = ctor_begin(), E = ctor_end(); I != E; ++I)
+ if (I->isMoveConstructor())
+ return *I;
+
+ return 0;
+}
+
+CXXMethodDecl *CXXRecordDecl::getCopyAssignmentOperator(bool ArgIsConst) const {
+ ASTContext &Context = getASTContext();
+ QualType Class = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+
+ SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
+ DeclContext::lookup_const_iterator Op, OpEnd;
+ for (llvm::tie(Op, OpEnd) = this->lookup(Name); Op != OpEnd; ++Op) {
+ // C++ [class.copy]p9:
+ // A user-declared copy assignment operator is a non-static non-template
+ // member function of class X with exactly one parameter of type X, X&,
+ // const X&, volatile X& or const volatile X&.
+ const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op);
+ if (!Method || Method->isStatic() || Method->getPrimaryTemplate())
+ continue;
+
+ const FunctionProtoType *FnType
+ = Method->getType()->getAs<FunctionProtoType>();
+ assert(FnType && "Overloaded operator has no prototype.");
+ // Don't assert on this; an invalid decl might have been left in the AST.
+ if (FnType->getNumArgs() != 1 || FnType->isVariadic())
+ continue;
+
+ QualType ArgType = FnType->getArgType(0);
+ Qualifiers Quals;
+ if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()) {
+ ArgType = Ref->getPointeeType();
+ // If we have a const argument and we have a reference to a non-const,
+ // this function does not match.
+ if (ArgIsConst && !ArgType.isConstQualified())
+ continue;
+
+ Quals = ArgType.getQualifiers();
+ } else {
+ // By-value copy-assignment operators are treated like const X&
+ // copy-assignment operators.
+ Quals = Qualifiers::fromCVRMask(Qualifiers::Const);
+ }
+
+ if (!Context.hasSameUnqualifiedType(ArgType, Class))
+ continue;
+
+ // Save this copy-assignment operator. It might be "the one".
+ Found.push_back(std::make_pair(const_cast<CXXMethodDecl *>(Method), Quals));
+ }
+
+ // Use a simplistic form of overload resolution to find the candidate.
+ return GetBestOverloadCandidateSimple(Found);
+}
+
+CXXMethodDecl *CXXRecordDecl::getMoveAssignmentOperator() const {
+ for (method_iterator I = method_begin(), E = method_end(); I != E; ++I)
+ if (I->isMoveAssignmentOperator())
+ return *I;
+
+ return 0;
+}
+
+void CXXRecordDecl::markedVirtualFunctionPure() {
+ // C++ [class.abstract]p2:
+ // A class is abstract if it has at least one pure virtual function.
+ data().Abstract = true;
+}
+
+void CXXRecordDecl::addedMember(Decl *D) {
+ if (!D->isImplicit() &&
+ !isa<FieldDecl>(D) &&
+ !isa<IndirectFieldDecl>(D) &&
+ (!isa<TagDecl>(D) || cast<TagDecl>(D)->getTagKind() == TTK_Class))
+ data().HasOnlyCMembers = false;
+
+ // Ignore friends and invalid declarations.
+ if (D->getFriendObjectKind() || D->isInvalidDecl())
+ return;
+
+ FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (FunTmpl)
+ D = FunTmpl->getTemplatedDecl();
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isVirtual()) {
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with [...] no virtual functions.
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class...
+ data().PlainOldData = false;
+
+ // Virtual functions make the class non-empty.
+ // FIXME: Standard ref?
+ data().Empty = false;
+
+ // C++ [class.virtual]p1:
+ // A class that declares or inherits a virtual function is called a
+ // polymorphic class.
+ data().Polymorphic = true;
+
+ // C++0x [class.ctor]p5
+ // A default constructor is trivial [...] if:
+ // -- its class has no virtual functions [...]
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if [...]
+ // -- class X has no virtual functions [...]
+ data().HasTrivialCopyConstructor = false;
+ data().HasTrivialMoveConstructor = false;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if [...]
+ // -- class X has no virtual functions [...]
+ data().HasTrivialCopyAssignment = false;
+ data().HasTrivialMoveAssignment = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that: [...]
+ // -- has no virtual functions
+ data().IsStandardLayout = false;
+ }
+ }
+
+ if (D->isImplicit()) {
+ // Notify that an implicit member was added after the definition
+ // was completed.
+ if (!isBeingDefined())
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXImplicitMember(data().Definition, D);
+
+ // If this is a special member function, note that it was added and then
+ // return early.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (Constructor->isDefaultConstructor()) {
+ data().DeclaredDefaultConstructor = true;
+ if (Constructor->isConstexpr()) {
+ data().HasConstexprDefaultConstructor = true;
+ data().HasConstexprNonCopyMoveConstructor = true;
+ }
+ } else if (Constructor->isCopyConstructor()) {
+ data().DeclaredCopyConstructor = true;
+ if (Constructor->isConstexpr())
+ data().HasConstexprCopyConstructor = true;
+ } else if (Constructor->isMoveConstructor()) {
+ data().DeclaredMoveConstructor = true;
+ if (Constructor->isConstexpr())
+ data().HasConstexprMoveConstructor = true;
+ } else
+ goto NotASpecialMember;
+ return;
+ } else if (isa<CXXDestructorDecl>(D)) {
+ data().DeclaredDestructor = true;
+ return;
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isCopyAssignmentOperator())
+ data().DeclaredCopyAssignment = true;
+ else if (Method->isMoveAssignmentOperator())
+ data().DeclaredMoveAssignment = true;
+ else
+ goto NotASpecialMember;
+ return;
+ }
+
+NotASpecialMember:;
+ // Any other implicit declarations are handled like normal declarations.
+ }
+
+ // Handle (user-declared) constructors.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ // Note that we have a user-declared constructor.
+ data().UserDeclaredConstructor = true;
+
+ // Technically, "user-provided" is only defined for special member
+ // functions, but the intent of the standard is clearly that it should apply
+ // to all functions.
+ bool UserProvided = Constructor->isUserProvided();
+
+ if (Constructor->isDefaultConstructor()) {
+ data().DeclaredDefaultConstructor = true;
+ if (UserProvided) {
+ // C++0x [class.ctor]p5:
+ // A default constructor is trivial if it is not user-provided [...]
+ data().HasTrivialDefaultConstructor = false;
+ data().UserProvidedDefaultConstructor = true;
+ }
+ if (Constructor->isConstexpr()) {
+ data().HasConstexprDefaultConstructor = true;
+ data().HasConstexprNonCopyMoveConstructor = true;
+ }
+ }
+
+ // Note when we have a user-declared copy or move constructor, which will
+ // suppress the implicit declaration of those constructors.
+ if (!FunTmpl) {
+ if (Constructor->isCopyConstructor()) {
+ data().UserDeclaredCopyConstructor = true;
+ data().DeclaredCopyConstructor = true;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if it is not
+ // user-provided [...]
+ if (UserProvided)
+ data().HasTrivialCopyConstructor = false;
+
+ if (Constructor->isConstexpr())
+ data().HasConstexprCopyConstructor = true;
+ } else if (Constructor->isMoveConstructor()) {
+ data().UserDeclaredMoveConstructor = true;
+ data().DeclaredMoveConstructor = true;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if it is not
+ // user-provided [...]
+ if (UserProvided)
+ data().HasTrivialMoveConstructor = false;
+
+ if (Constructor->isConstexpr())
+ data().HasConstexprMoveConstructor = true;
+ }
+ }
+ if (Constructor->isConstexpr() && !Constructor->isCopyOrMoveConstructor()) {
+ // Record if we see any constexpr constructors which are neither copy
+ // nor move constructors.
+ data().HasConstexprNonCopyMoveConstructor = true;
+ }
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with no user-declared
+ // constructors [...].
+ // C++0x [dcl.init.aggr]p1:
+ // An aggregate is an array or a class with no user-provided
+ // constructors [...].
+ if (!getASTContext().getLangOpts().CPlusPlus0x || UserProvided)
+ data().Aggregate = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class [...]
+ // Since the POD bit is meant to be C++03 POD-ness, clear it even if the
+ // type is technically an aggregate in C++0x since it wouldn't be in 03.
+ data().PlainOldData = false;
+
+ return;
+ }
+
+ // Handle (user-declared) destructors.
+ if (CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D)) {
+ data().DeclaredDestructor = true;
+ data().UserDeclaredDestructor = true;
+ data().HasIrrelevantDestructor = false;
+
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class that has [...] no user-defined
+ // destructor.
+ // This bit is the C++03 POD bit, not the 0x one.
+ data().PlainOldData = false;
+
+ // C++11 [class.dtor]p5:
+ // A destructor is trivial if it is not user-provided and if
+ // -- the destructor is not virtual.
+ if (DD->isUserProvided() || DD->isVirtual()) {
+ data().HasTrivialDestructor = false;
+ // C++11 [dcl.constexpr]p1:
+ // The constexpr specifier shall be applied only to [...] the
+ // declaration of a static data member of a literal type.
+ // C++11 [basic.types]p10:
+ // A type is a literal type if it is [...] a class type that [...] has
+ // a trivial destructor.
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ }
+
+ return;
+ }
+
+ // Handle (user-declared) member functions.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Method->isCopyAssignmentOperator()) {
+ // C++ [class]p4:
+ // A POD-struct is an aggregate class that [...] has no user-defined
+ // copy assignment operator [...].
+ // This is the C++03 bit only.
+ data().PlainOldData = false;
+
+ // This is a copy assignment operator.
+
+ // Suppress the implicit declaration of a copy constructor.
+ data().UserDeclaredCopyAssignment = true;
+ data().DeclaredCopyAssignment = true;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if it is
+ // neither user-provided nor deleted [...]
+ if (Method->isUserProvided())
+ data().HasTrivialCopyAssignment = false;
+
+ return;
+ }
+
+ if (Method->isMoveAssignmentOperator()) {
+ // This is an extension in C++03 mode, but we'll keep consistency by
+ // taking a move assignment operator to induce non-POD-ness
+ data().PlainOldData = false;
+
+ // This is a move assignment operator.
+ data().UserDeclaredMoveAssignment = true;
+ data().DeclaredMoveAssignment = true;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if it is
+ // neither user-provided nor deleted [...]
+ if (Method->isUserProvided())
+ data().HasTrivialMoveAssignment = false;
+ }
+
+ // Keep the list of conversion functions up-to-date.
+ if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
+ // We don't record specializations.
+ if (Conversion->getPrimaryTemplate())
+ return;
+
+ // FIXME: We intentionally don't use the decl's access here because it
+ // hasn't been set yet. That's really just a misdesign in Sema.
+
+ if (FunTmpl) {
+ if (FunTmpl->getPreviousDecl())
+ data().Conversions.replace(FunTmpl->getPreviousDecl(),
+ FunTmpl);
+ else
+ data().Conversions.addDecl(FunTmpl);
+ } else {
+ if (Conversion->getPreviousDecl())
+ data().Conversions.replace(Conversion->getPreviousDecl(),
+ Conversion);
+ else
+ data().Conversions.addDecl(Conversion);
+ }
+ }
+
+ return;
+ }
+
+ // Handle non-static data members.
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(D)) {
+ // C++ [class.bit]p2:
+ // A declaration for a bit-field that omits the identifier declares an
+ // unnamed bit-field. Unnamed bit-fields are not members and cannot be
+ // initialized.
+ if (Field->isUnnamedBitfield())
+ return;
+
+ // C++ [dcl.init.aggr]p1:
+ // An aggregate is an array or a class (clause 9) with [...] no
+ // private or protected non-static data members (clause 11).
+ //
+ // A POD must be an aggregate.
+ if (D->getAccess() == AS_private || D->getAccess() == AS_protected) {
+ data().Aggregate = false;
+ data().PlainOldData = false;
+ }
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- has the same access control for all non-static data members,
+ switch (D->getAccess()) {
+ case AS_private: data().HasPrivateFields = true; break;
+ case AS_protected: data().HasProtectedFields = true; break;
+ case AS_public: data().HasPublicFields = true; break;
+ case AS_none: llvm_unreachable("Invalid access specifier");
+ };
+ if ((data().HasPrivateFields + data().HasProtectedFields +
+ data().HasPublicFields) > 1)
+ data().IsStandardLayout = false;
+
+ // Keep track of the presence of mutable fields.
+ if (Field->isMutable())
+ data().HasMutableFields = true;
+
+ // C++0x [class]p9:
+ // A POD struct is a class that is both a trivial class and a
+ // standard-layout class, and has no non-static data members of type
+ // non-POD struct, non-POD union (or array of such types).
+ //
+ // Automatic Reference Counting: the presence of a member of Objective-C pointer type
+ // that does not explicitly have no lifetime makes the class a non-POD.
+ // However, we delay setting PlainOldData to false in this case so that
+ // Sema has a chance to diagnostic causes where the same class will be
+ // non-POD with Automatic Reference Counting but a POD without Instant Objects.
+ // In this case, the class will become a non-POD class when we complete
+ // the definition.
+ ASTContext &Context = getASTContext();
+ QualType T = Context.getBaseElementType(Field->getType());
+ if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
+ if (!Context.getLangOpts().ObjCAutoRefCount ||
+ T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone)
+ setHasObjectMember(true);
+ } else if (!T.isPODType(Context))
+ data().PlainOldData = false;
+
+ if (T->isReferenceType()) {
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // -- has no non-static data members of type [...] reference,
+ data().IsStandardLayout = false;
+ }
+
+ // Record if this field is the first non-literal or volatile field or base.
+ if (!T->isLiteralType() || T.isVolatileQualified())
+ data().HasNonLiteralTypeFieldsOrBases = true;
+
+ if (Field->hasInClassInitializer()) {
+ // C++0x [class]p5:
+ // A default constructor is trivial if [...] no non-static data member
+ // of its class has a brace-or-equal-initializer.
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [dcl.init.aggr]p1:
+ // An aggregate is a [...] class with [...] no
+ // brace-or-equal-initializers for non-static data members.
+ data().Aggregate = false;
+
+ // C++0x [class]p10:
+ // A POD struct is [...] a trivial class.
+ data().PlainOldData = false;
+ }
+
+ if (const RecordType *RecordTy = T->getAs<RecordType>()) {
+ CXXRecordDecl* FieldRec = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (FieldRec->getDefinition()) {
+ // C++0x [class.ctor]p5:
+ // A default constructor is trivial [...] if:
+ // -- for all the non-static data members of its class that are of
+ // class type (or array thereof), each such class has a trivial
+ // default constructor.
+ if (!FieldRec->hasTrivialDefaultConstructor())
+ data().HasTrivialDefaultConstructor = false;
+
+ // C++0x [class.copy]p13:
+ // A copy/move constructor for class X is trivial if [...]
+ // [...]
+ // -- for each non-static data member of X that is of class type (or
+ // an array thereof), the constructor selected to copy/move that
+ // member is trivial;
+ // FIXME: C++0x: We don't correctly model 'selected' constructors.
+ if (!FieldRec->hasTrivialCopyConstructor())
+ data().HasTrivialCopyConstructor = false;
+ if (!FieldRec->hasTrivialMoveConstructor())
+ data().HasTrivialMoveConstructor = false;
+
+ // C++0x [class.copy]p27:
+ // A copy/move assignment operator for class X is trivial if [...]
+ // [...]
+ // -- for each non-static data member of X that is of class type (or
+ // an array thereof), the assignment operator selected to
+ // copy/move that member is trivial;
+ // FIXME: C++0x: We don't correctly model 'selected' operators.
+ if (!FieldRec->hasTrivialCopyAssignment())
+ data().HasTrivialCopyAssignment = false;
+ if (!FieldRec->hasTrivialMoveAssignment())
+ data().HasTrivialMoveAssignment = false;
+
+ if (!FieldRec->hasTrivialDestructor())
+ data().HasTrivialDestructor = false;
+ if (!FieldRec->hasIrrelevantDestructor())
+ data().HasIrrelevantDestructor = false;
+ if (FieldRec->hasObjectMember())
+ setHasObjectMember(true);
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // -- has no non-static data members of type non-standard-layout
+ // class (or array of such types) [...]
+ if (!FieldRec->isStandardLayout())
+ data().IsStandardLayout = false;
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- has no base classes of the same type as the first non-static
+ // data member.
+ // We don't want to expend bits in the state of the record decl
+ // tracking whether this is the first non-static data member so we
+ // cheat a bit and use some of the existing state: the empty bit.
+ // Virtual bases and virtual methods make a class non-empty, but they
+ // also make it non-standard-layout so we needn't check here.
+ // A non-empty base class may leave the class standard-layout, but not
+ // if we have arrived here, and have at least on non-static data
+ // member. If IsStandardLayout remains true, then the first non-static
+ // data member must come through here with Empty still true, and Empty
+ // will subsequently be set to false below.
+ if (data().IsStandardLayout && data().Empty) {
+ for (CXXRecordDecl::base_class_const_iterator BI = bases_begin(),
+ BE = bases_end();
+ BI != BE; ++BI) {
+ if (Context.hasSameUnqualifiedType(BI->getType(), T)) {
+ data().IsStandardLayout = false;
+ break;
+ }
+ }
+ }
+
+ // Keep track of the presence of mutable fields.
+ if (FieldRec->hasMutableFields())
+ data().HasMutableFields = true;
+
+ // C++11 [class.copy]p13:
+ // If the implicitly-defined constructor would satisfy the
+ // requirements of a constexpr constructor, the implicitly-defined
+ // constructor is constexpr.
+ // C++11 [dcl.constexpr]p4:
+ // -- every constructor involved in initializing non-static data
+ // members [...] shall be a constexpr constructor
+ if (!Field->hasInClassInitializer() &&
+ !FieldRec->hasConstexprDefaultConstructor())
+ // The standard requires any in-class initializer to be a constant
+ // expression. We consider this to be a defect.
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ if (!FieldRec->hasConstexprCopyConstructor())
+ data().DefaultedCopyConstructorIsConstexpr = false;
+
+ if (FieldRec->hasDeclaredMoveConstructor() ||
+ FieldRec->needsImplicitMoveConstructor())
+ // FIXME: If the implicit move constructor generated for the member's
+ // class would be ill-formed, the implicit move constructor generated
+ // for this class calls the member's copy constructor.
+ data().DefaultedMoveConstructorIsConstexpr &=
+ FieldRec->hasConstexprMoveConstructor();
+ else if (!FieldRec->hasConstexprCopyConstructor())
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ }
+ } else {
+ // Base element type of field is a non-class type.
+ if (!T->isLiteralType()) {
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ data().DefaultedCopyConstructorIsConstexpr = false;
+ data().DefaultedMoveConstructorIsConstexpr = false;
+ } else if (!Field->hasInClassInitializer())
+ data().DefaultedDefaultConstructorIsConstexpr = false;
+ }
+
+ // C++0x [class]p7:
+ // A standard-layout class is a class that:
+ // [...]
+ // -- either has no non-static data members in the most derived
+ // class and at most one base class with non-static data members,
+ // or has no base classes with non-static data members, and
+ // At this point we know that we have a non-static data member, so the last
+ // clause holds.
+ if (!data().HasNoNonEmptyBases)
+ data().IsStandardLayout = false;
+
+ // If this is not a zero-length bit-field, then the class is not empty.
+ if (data().Empty) {
+ if (!Field->isBitField() ||
+ (!Field->getBitWidth()->isTypeDependent() &&
+ !Field->getBitWidth()->isValueDependent() &&
+ Field->getBitWidthValue(Context) != 0))
+ data().Empty = false;
+ }
+ }
+
+ // Handle using declarations of conversion functions.
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(D))
+ if (Shadow->getDeclName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName)
+ data().Conversions.addDecl(Shadow, Shadow->getAccess());
+}
+
+bool CXXRecordDecl::isCLike() const {
+ if (getTagKind() == TTK_Class || !TemplateOrInstantiation.isNull())
+ return false;
+ if (!hasDefinition())
+ return true;
+
+ return isPOD() && data().HasOnlyCMembers;
+}
+
+void CXXRecordDecl::getCaptureFields(
+ llvm::DenseMap<const VarDecl *, FieldDecl *> &Captures,
+ FieldDecl *&ThisCapture) const {
+ Captures.clear();
+ ThisCapture = 0;
+
+ LambdaDefinitionData &Lambda = getLambdaData();
+ RecordDecl::field_iterator Field = field_begin();
+ for (LambdaExpr::Capture *C = Lambda.Captures, *CEnd = C + Lambda.NumCaptures;
+ C != CEnd; ++C, ++Field) {
+ if (C->capturesThis()) {
+ ThisCapture = *Field;
+ continue;
+ }
+
+ Captures[C->getCapturedVar()] = *Field;
+ }
+}
+
+
+static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
+ QualType T;
+ if (isa<UsingShadowDecl>(Conv))
+ Conv = cast<UsingShadowDecl>(Conv)->getTargetDecl();
+ if (FunctionTemplateDecl *ConvTemp = dyn_cast<FunctionTemplateDecl>(Conv))
+ T = ConvTemp->getTemplatedDecl()->getResultType();
+ else
+ T = cast<CXXConversionDecl>(Conv)->getConversionType();
+ return Context.getCanonicalType(T);
+}
+
+/// Collect the visible conversions of a base class.
+///
+/// \param Base a base class of the class we're considering
+/// \param InVirtual whether this base class is a virtual base (or a base
+/// of a virtual base)
+/// \param Access the access along the inheritance path to this base
+/// \param ParentHiddenTypes the conversions provided by the inheritors
+/// of this base
+/// \param Output the set to which to add conversions from non-virtual bases
+/// \param VOutput the set to which to add conversions from virtual bases
+/// \param HiddenVBaseCs the set of conversions which were hidden in a
+/// virtual base along some inheritance path
+static void CollectVisibleConversions(ASTContext &Context,
+ CXXRecordDecl *Record,
+ bool InVirtual,
+ AccessSpecifier Access,
+ const llvm::SmallPtrSet<CanQualType, 8> &ParentHiddenTypes,
+ UnresolvedSetImpl &Output,
+ UnresolvedSetImpl &VOutput,
+ llvm::SmallPtrSet<NamedDecl*, 8> &HiddenVBaseCs) {
+ // The set of types which have conversions in this class or its
+ // subclasses. As an optimization, we don't copy the derived set
+ // unless it might change.
+ const llvm::SmallPtrSet<CanQualType, 8> *HiddenTypes = &ParentHiddenTypes;
+ llvm::SmallPtrSet<CanQualType, 8> HiddenTypesBuffer;
+
+ // Collect the direct conversions and figure out which conversions
+ // will be hidden in the subclasses.
+ UnresolvedSetImpl &Cs = *Record->getConversionFunctions();
+ if (!Cs.empty()) {
+ HiddenTypesBuffer = ParentHiddenTypes;
+ HiddenTypes = &HiddenTypesBuffer;
+
+ for (UnresolvedSetIterator I = Cs.begin(), E = Cs.end(); I != E; ++I) {
+ bool Hidden =
+ !HiddenTypesBuffer.insert(GetConversionType(Context, I.getDecl()));
+
+ // If this conversion is hidden and we're in a virtual base,
+ // remember that it's hidden along some inheritance path.
+ if (Hidden && InVirtual)
+ HiddenVBaseCs.insert(cast<NamedDecl>(I.getDecl()->getCanonicalDecl()));
+
+ // If this conversion isn't hidden, add it to the appropriate output.
+ else if (!Hidden) {
+ AccessSpecifier IAccess
+ = CXXRecordDecl::MergeAccess(Access, I.getAccess());
+
+ if (InVirtual)
+ VOutput.addDecl(I.getDecl(), IAccess);
+ else
+ Output.addDecl(I.getDecl(), IAccess);
+ }
+ }
+ }
+
+ // Collect information recursively from any base classes.
+ for (CXXRecordDecl::base_class_iterator
+ I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
+ const RecordType *RT = I->getType()->getAs<RecordType>();
+ if (!RT) continue;
+
+ AccessSpecifier BaseAccess
+ = CXXRecordDecl::MergeAccess(Access, I->getAccessSpecifier());
+ bool BaseInVirtual = InVirtual || I->isVirtual();
+
+ CXXRecordDecl *Base = cast<CXXRecordDecl>(RT->getDecl());
+ CollectVisibleConversions(Context, Base, BaseInVirtual, BaseAccess,
+ *HiddenTypes, Output, VOutput, HiddenVBaseCs);
+ }
+}
+
+/// Collect the visible conversions of a class.
+///
+/// This would be extremely straightforward if it weren't for virtual
+/// bases. It might be worth special-casing that, really.
+static void CollectVisibleConversions(ASTContext &Context,
+ CXXRecordDecl *Record,
+ UnresolvedSetImpl &Output) {
+ // The collection of all conversions in virtual bases that we've
+ // found. These will be added to the output as long as they don't
+ // appear in the hidden-conversions set.
+ UnresolvedSet<8> VBaseCs;
+
+ // The set of conversions in virtual bases that we've determined to
+ // be hidden.
+ llvm::SmallPtrSet<NamedDecl*, 8> HiddenVBaseCs;
+
+ // The set of types hidden by classes derived from this one.
+ llvm::SmallPtrSet<CanQualType, 8> HiddenTypes;
+
+ // Go ahead and collect the direct conversions and add them to the
+ // hidden-types set.
+ UnresolvedSetImpl &Cs = *Record->getConversionFunctions();
+ Output.append(Cs.begin(), Cs.end());
+ for (UnresolvedSetIterator I = Cs.begin(), E = Cs.end(); I != E; ++I)
+ HiddenTypes.insert(GetConversionType(Context, I.getDecl()));
+
+ // Recursively collect conversions from base classes.
+ for (CXXRecordDecl::base_class_iterator
+ I = Record->bases_begin(), E = Record->bases_end(); I != E; ++I) {
+ const RecordType *RT = I->getType()->getAs<RecordType>();
+ if (!RT) continue;
+
+ CollectVisibleConversions(Context, cast<CXXRecordDecl>(RT->getDecl()),
+ I->isVirtual(), I->getAccessSpecifier(),
+ HiddenTypes, Output, VBaseCs, HiddenVBaseCs);
+ }
+
+ // Add any unhidden conversions provided by virtual bases.
+ for (UnresolvedSetIterator I = VBaseCs.begin(), E = VBaseCs.end();
+ I != E; ++I) {
+ if (!HiddenVBaseCs.count(cast<NamedDecl>(I.getDecl()->getCanonicalDecl())))
+ Output.addDecl(I.getDecl(), I.getAccess());
+ }
+}
+
+/// getVisibleConversionFunctions - get all conversion functions visible
+/// in current class; including conversion function templates.
+const UnresolvedSetImpl *CXXRecordDecl::getVisibleConversionFunctions() {
+ // If root class, all conversions are visible.
+ if (bases_begin() == bases_end())
+ return &data().Conversions;
+ // If visible conversion list is already evaluated, return it.
+ if (data().ComputedVisibleConversions)
+ return &data().VisibleConversions;
+ CollectVisibleConversions(getASTContext(), this, data().VisibleConversions);
+ data().ComputedVisibleConversions = true;
+ return &data().VisibleConversions;
+}
+
+void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
+ // This operation is O(N) but extremely rare. Sema only uses it to
+ // remove UsingShadowDecls in a class that were followed by a direct
+ // declaration, e.g.:
+ // class A : B {
+ // using B::operator int;
+ // operator int();
+ // };
+ // This is uncommon by itself and even more uncommon in conjunction
+ // with sufficiently large numbers of directly-declared conversions
+ // that asymptotic behavior matters.
+
+ UnresolvedSetImpl &Convs = *getConversionFunctions();
+ for (unsigned I = 0, E = Convs.size(); I != E; ++I) {
+ if (Convs[I].getDecl() == ConvDecl) {
+ Convs.erase(I);
+ assert(std::find(Convs.begin(), Convs.end(), ConvDecl) == Convs.end()
+ && "conversion was found multiple times in unresolved set");
+ return;
+ }
+ }
+
+ llvm_unreachable("conversion not found in set!");
+}
+
+CXXRecordDecl *CXXRecordDecl::getInstantiatedFromMemberClass() const {
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
+ return cast<CXXRecordDecl>(MSInfo->getInstantiatedFrom());
+
+ return 0;
+}
+
+MemberSpecializationInfo *CXXRecordDecl::getMemberSpecializationInfo() const {
+ return TemplateOrInstantiation.dyn_cast<MemberSpecializationInfo *>();
+}
+
+void
+CXXRecordDecl::setInstantiationOfMemberClass(CXXRecordDecl *RD,
+ TemplateSpecializationKind TSK) {
+ assert(TemplateOrInstantiation.isNull() &&
+ "Previous template or instantiation?");
+ assert(!isa<ClassTemplateSpecializationDecl>(this));
+ TemplateOrInstantiation
+ = new (getASTContext()) MemberSpecializationInfo(RD, TSK);
+}
+
+TemplateSpecializationKind CXXRecordDecl::getTemplateSpecializationKind() const{
+ if (const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(this))
+ return Spec->getSpecializationKind();
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo())
+ return MSInfo->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+void
+CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(this)) {
+ Spec->setSpecializationKind(TSK);
+ return;
+ }
+
+ if (MemberSpecializationInfo *MSInfo = getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ return;
+ }
+
+ llvm_unreachable("Not a class template or member class specialization");
+}
+
+CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
+ ASTContext &Context = getASTContext();
+ QualType ClassType = Context.getTypeDeclType(this);
+
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(ClassType));
+
+ DeclContext::lookup_const_iterator I, E;
+ llvm::tie(I, E) = lookup(Name);
+ if (I == E)
+ return 0;
+
+ CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(*I);
+ return Dtor;
+}
+
+void CXXRecordDecl::completeDefinition() {
+ completeDefinition(0);
+}
+
+void CXXRecordDecl::completeDefinition(CXXFinalOverriderMap *FinalOverriders) {
+ RecordDecl::completeDefinition();
+
+ if (hasObjectMember() && getASTContext().getLangOpts().ObjCAutoRefCount) {
+ // Objective-C Automatic Reference Counting:
+ // If a class has a non-static data member of Objective-C pointer
+ // type (or array thereof), it is a non-POD type and its
+ // default constructor (if any), copy constructor, copy assignment
+ // operator, and destructor are non-trivial.
+ struct DefinitionData &Data = data();
+ Data.PlainOldData = false;
+ Data.HasTrivialDefaultConstructor = false;
+ Data.HasTrivialCopyConstructor = false;
+ Data.HasTrivialCopyAssignment = false;
+ Data.HasTrivialDestructor = false;
+ Data.HasIrrelevantDestructor = false;
+ }
+
+ // If the class may be abstract (but hasn't been marked as such), check for
+ // any pure final overriders.
+ if (mayBeAbstract()) {
+ CXXFinalOverriderMap MyFinalOverriders;
+ if (!FinalOverriders) {
+ getFinalOverriders(MyFinalOverriders);
+ FinalOverriders = &MyFinalOverriders;
+ }
+
+ bool Done = false;
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders->begin(),
+ MEnd = FinalOverriders->end();
+ M != MEnd && !Done; ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd && !Done; ++SO) {
+ assert(SO->second.size() > 0 &&
+ "All virtual functions have overridding virtual functions");
+
+ // C++ [class.abstract]p4:
+ // A class is abstract if it contains or inherits at least one
+ // pure virtual function for which the final overrider is pure
+ // virtual.
+ if (SO->second.front().Method->isPure()) {
+ data().Abstract = true;
+ Done = true;
+ break;
+ }
+ }
+ }
+ }
+
+ // Set access bits correctly on the directly-declared conversions.
+ for (UnresolvedSetIterator I = data().Conversions.begin(),
+ E = data().Conversions.end();
+ I != E; ++I)
+ data().Conversions.setAccess(I, (*I)->getAccess());
+}
+
+bool CXXRecordDecl::mayBeAbstract() const {
+ if (data().Abstract || isInvalidDecl() || !data().Polymorphic ||
+ isDependentContext())
+ return false;
+
+ for (CXXRecordDecl::base_class_const_iterator B = bases_begin(),
+ BEnd = bases_end();
+ B != BEnd; ++B) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(B->getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl->isAbstract())
+ return true;
+ }
+
+ return false;
+}
+
+void CXXMethodDecl::anchor() { }
+
+CXXMethodDecl *
+CXXMethodDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isStatic, StorageClass SCAsWritten, bool isInline,
+ bool isConstexpr, SourceLocation EndLocation) {
+ return new (C) CXXMethodDecl(CXXMethod, RD, StartLoc, NameInfo, T, TInfo,
+ isStatic, SCAsWritten, isInline, isConstexpr,
+ EndLocation);
+}
+
+CXXMethodDecl *CXXMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXMethodDecl));
+ return new (Mem) CXXMethodDecl(CXXMethod, 0, SourceLocation(),
+ DeclarationNameInfo(), QualType(),
+ 0, false, SC_None, false, false,
+ SourceLocation());
+}
+
+bool CXXMethodDecl::isUsualDeallocationFunction() const {
+ if (getOverloadedOperator() != OO_Delete &&
+ getOverloadedOperator() != OO_Array_Delete)
+ return false;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // A template instance is never a usual deallocation function,
+ // regardless of its signature.
+ if (getPrimaryTemplate())
+ return false;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // If a class T has a member deallocation function named operator delete
+ // with exactly one parameter, then that function is a usual (non-placement)
+ // deallocation function. [...]
+ if (getNumParams() == 1)
+ return true;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // [...] If class T does not declare such an operator delete but does
+ // declare a member deallocation function named operator delete with
+ // exactly two parameters, the second of which has type std::size_t (18.1),
+ // then this function is a usual deallocation function.
+ ASTContext &Context = getASTContext();
+ if (getNumParams() != 2 ||
+ !Context.hasSameUnqualifiedType(getParamDecl(1)->getType(),
+ Context.getSizeType()))
+ return false;
+
+ // This function is a usual deallocation function if there are no
+ // single-parameter deallocation functions of the same kind.
+ for (DeclContext::lookup_const_result R = getDeclContext()->lookup(getDeclName());
+ R.first != R.second; ++R.first) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*R.first))
+ if (FD->getNumParams() == 1)
+ return false;
+ }
+
+ return true;
+}
+
+bool CXXMethodDecl::isCopyAssignmentOperator() const {
+ // C++0x [class.copy]p17:
+ // A user-declared copy assignment operator X::operator= is a non-static
+ // non-template member function of class X with exactly one parameter of
+ // type X, X&, const X&, volatile X& or const volatile X&.
+ if (/*operator=*/getOverloadedOperator() != OO_Equal ||
+ /*non-static*/ isStatic() ||
+ /*non-template*/getPrimaryTemplate() || getDescribedFunctionTemplate())
+ return false;
+
+ QualType ParamType = getParamDecl(0)->getType();
+ if (const LValueReferenceType *Ref = ParamType->getAs<LValueReferenceType>())
+ ParamType = Ref->getPointeeType();
+
+ ASTContext &Context = getASTContext();
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ return Context.hasSameUnqualifiedType(ClassType, ParamType);
+}
+
+bool CXXMethodDecl::isMoveAssignmentOperator() const {
+ // C++0x [class.copy]p19:
+ // A user-declared move assignment operator X::operator= is a non-static
+ // non-template member function of class X with exactly one parameter of type
+ // X&&, const X&&, volatile X&&, or const volatile X&&.
+ if (getOverloadedOperator() != OO_Equal || isStatic() ||
+ getPrimaryTemplate() || getDescribedFunctionTemplate())
+ return false;
+
+ QualType ParamType = getParamDecl(0)->getType();
+ if (!isa<RValueReferenceType>(ParamType))
+ return false;
+ ParamType = ParamType->getPointeeType();
+
+ ASTContext &Context = getASTContext();
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(getParent()));
+ return Context.hasSameUnqualifiedType(ClassType, ParamType);
+}
+
+void CXXMethodDecl::addOverriddenMethod(const CXXMethodDecl *MD) {
+ assert(MD->isCanonicalDecl() && "Method is not canonical!");
+ assert(!MD->getParent()->isDependentContext() &&
+ "Can't add an overridden method to a class template!");
+ assert(MD->isVirtual() && "Method is not virtual!");
+
+ getASTContext().addOverriddenMethod(this, MD);
+}
+
+CXXMethodDecl::method_iterator CXXMethodDecl::begin_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
+ return getASTContext().overridden_methods_begin(this);
+}
+
+CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
+ return getASTContext().overridden_methods_end(this);
+}
+
+unsigned CXXMethodDecl::size_overridden_methods() const {
+ if (isa<CXXConstructorDecl>(this)) return 0;
+ return getASTContext().overridden_methods_size(this);
+}
+
+QualType CXXMethodDecl::getThisType(ASTContext &C) const {
+ // C++ 9.3.2p1: The type of this in a member function of a class X is X*.
+ // If the member function is declared const, the type of this is const X*,
+ // if the member function is declared volatile, the type of this is
+ // volatile X*, and if the member function is declared const volatile,
+ // the type of this is const volatile X*.
+
+ assert(isInstance() && "No 'this' for static methods!");
+
+ QualType ClassTy = C.getTypeDeclType(getParent());
+ ClassTy = C.getQualifiedType(ClassTy,
+ Qualifiers::fromCVRMask(getTypeQualifiers()));
+ return C.getPointerType(ClassTy);
+}
+
+bool CXXMethodDecl::hasInlineBody() const {
+ // If this function is a template instantiation, look at the template from
+ // which it was instantiated.
+ const FunctionDecl *CheckFn = getTemplateInstantiationPattern();
+ if (!CheckFn)
+ CheckFn = this;
+
+ const FunctionDecl *fn;
+ return CheckFn->hasBody(fn) && !fn->isOutOfLine();
+}
+
+bool CXXMethodDecl::isLambdaStaticInvoker() const {
+ return getParent()->isLambda() &&
+ getIdentifier() && getIdentifier()->getName() == "__invoke";
+}
+
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ TypeSourceInfo *TInfo, bool IsVirtual,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ SourceLocation EllipsisLoc)
+ : Initializee(TInfo), MemberOrEllipsisLocation(EllipsisLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(IsVirtual),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ IndirectFieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(false), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ TypeSourceInfo *TInfo,
+ SourceLocation L, Expr *Init,
+ SourceLocation R)
+ : Initializee(TInfo), MemberOrEllipsisLocation(), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsDelegating(true), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXCtorInitializer::CXXCtorInitializer(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices)
+ : Initializee(Member), MemberOrEllipsisLocation(MemberLoc), Init(Init),
+ LParenLoc(L), RParenLoc(R), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices)
+{
+ VarDecl **MyIndices = reinterpret_cast<VarDecl **> (this + 1);
+ memcpy(MyIndices, Indices, NumIndices * sizeof(VarDecl *));
+}
+
+CXXCtorInitializer *CXXCtorInitializer::Create(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices) {
+ void *Mem = Context.Allocate(sizeof(CXXCtorInitializer) +
+ sizeof(VarDecl *) * NumIndices,
+ llvm::alignOf<CXXCtorInitializer>());
+ return new (Mem) CXXCtorInitializer(Context, Member, MemberLoc, L, Init, R,
+ Indices, NumIndices);
+}
+
+TypeLoc CXXCtorInitializer::getBaseClassLoc() const {
+ if (isBaseInitializer())
+ return Initializee.get<TypeSourceInfo*>()->getTypeLoc();
+ else
+ return TypeLoc();
+}
+
+const Type *CXXCtorInitializer::getBaseClass() const {
+ if (isBaseInitializer())
+ return Initializee.get<TypeSourceInfo*>()->getType().getTypePtr();
+ else
+ return 0;
+}
+
+SourceLocation CXXCtorInitializer::getSourceLocation() const {
+ if (isAnyMemberInitializer())
+ return getMemberLocation();
+
+ if (isInClassMemberInitializer())
+ return getAnyMember()->getLocation();
+
+ if (TypeSourceInfo *TSInfo = Initializee.get<TypeSourceInfo*>())
+ return TSInfo->getTypeLoc().getLocalSourceRange().getBegin();
+
+ return SourceLocation();
+}
+
+SourceRange CXXCtorInitializer::getSourceRange() const {
+ if (isInClassMemberInitializer()) {
+ FieldDecl *D = getAnyMember();
+ if (Expr *I = D->getInClassInitializer())
+ return I->getSourceRange();
+ return SourceRange();
+ }
+
+ return SourceRange(getSourceLocation(), getRParenLoc());
+}
+
+void CXXConstructorDecl::anchor() { }
+
+CXXConstructorDecl *
+CXXConstructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConstructorDecl));
+ return new (Mem) CXXConstructorDecl(0, SourceLocation(),DeclarationNameInfo(),
+ QualType(), 0, false, false, false,false);
+}
+
+CXXConstructorDecl *
+CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isExplicit, bool isInline,
+ bool isImplicitlyDeclared, bool isConstexpr) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXConstructorName &&
+ "Name must refer to a constructor");
+ return new (C) CXXConstructorDecl(RD, StartLoc, NameInfo, T, TInfo,
+ isExplicit, isInline, isImplicitlyDeclared,
+ isConstexpr);
+}
+
+CXXConstructorDecl *CXXConstructorDecl::getTargetConstructor() const {
+ assert(isDelegatingConstructor() && "Not a delegating constructor!");
+ Expr *E = (*init_begin())->getInit()->IgnoreImplicit();
+ if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(E))
+ return Construct->getConstructor();
+
+ return 0;
+}
+
+bool CXXConstructorDecl::isDefaultConstructor() const {
+ // C++ [class.ctor]p5:
+ // A default constructor for a class X is a constructor of class
+ // X that can be called without an argument.
+ return (getNumParams() == 0) ||
+ (getNumParams() > 0 && getParamDecl(0)->hasDefaultArg());
+}
+
+bool
+CXXConstructorDecl::isCopyConstructor(unsigned &TypeQuals) const {
+ return isCopyOrMoveConstructor(TypeQuals) &&
+ getParamDecl(0)->getType()->isLValueReferenceType();
+}
+
+bool CXXConstructorDecl::isMoveConstructor(unsigned &TypeQuals) const {
+ return isCopyOrMoveConstructor(TypeQuals) &&
+ getParamDecl(0)->getType()->isRValueReferenceType();
+}
+
+/// \brief Determine whether this is a copy or move constructor.
+bool CXXConstructorDecl::isCopyOrMoveConstructor(unsigned &TypeQuals) const {
+ // C++ [class.copy]p2:
+ // A non-template constructor for class X is a copy constructor
+ // if its first parameter is of type X&, const X&, volatile X& or
+ // const volatile X&, and either there are no other parameters
+ // or else all other parameters have default arguments (8.3.6).
+ // C++0x [class.copy]p3:
+ // A non-template constructor for class X is a move constructor if its
+ // first parameter is of type X&&, const X&&, volatile X&&, or
+ // const volatile X&&, and either there are no other parameters or else
+ // all other parameters have default arguments.
+ if ((getNumParams() < 1) ||
+ (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
+ (getPrimaryTemplate() != 0) ||
+ (getDescribedFunctionTemplate() != 0))
+ return false;
+
+ const ParmVarDecl *Param = getParamDecl(0);
+
+ // Do we have a reference type?
+ const ReferenceType *ParamRefType = Param->getType()->getAs<ReferenceType>();
+ if (!ParamRefType)
+ return false;
+
+ // Is it a reference to our class type?
+ ASTContext &Context = getASTContext();
+
+ CanQualType PointeeType
+ = Context.getCanonicalType(ParamRefType->getPointeeType());
+ CanQualType ClassTy
+ = Context.getCanonicalType(Context.getTagDeclType(getParent()));
+ if (PointeeType.getUnqualifiedType() != ClassTy)
+ return false;
+
+ // FIXME: other qualifiers?
+
+ // We have a copy or move constructor.
+ TypeQuals = PointeeType.getCVRQualifiers();
+ return true;
+}
+
+bool CXXConstructorDecl::isConvertingConstructor(bool AllowExplicit) const {
+ // C++ [class.conv.ctor]p1:
+ // A constructor declared without the function-specifier explicit
+ // that can be called with a single parameter specifies a
+ // conversion from the type of its first parameter to the type of
+ // its class. Such a constructor is called a converting
+ // constructor.
+ if (isExplicit() && !AllowExplicit)
+ return false;
+
+ return (getNumParams() == 0 &&
+ getType()->getAs<FunctionProtoType>()->isVariadic()) ||
+ (getNumParams() == 1) ||
+ (getNumParams() > 1 && getParamDecl(1)->hasDefaultArg());
+}
+
+bool CXXConstructorDecl::isSpecializationCopyingObject() const {
+ if ((getNumParams() < 1) ||
+ (getNumParams() > 1 && !getParamDecl(1)->hasDefaultArg()) ||
+ (getPrimaryTemplate() == 0) ||
+ (getDescribedFunctionTemplate() != 0))
+ return false;
+
+ const ParmVarDecl *Param = getParamDecl(0);
+
+ ASTContext &Context = getASTContext();
+ CanQualType ParamType = Context.getCanonicalType(Param->getType());
+
+ // Is it the same as our our class type?
+ CanQualType ClassTy
+ = Context.getCanonicalType(Context.getTagDeclType(getParent()));
+ if (ParamType.getUnqualifiedType() != ClassTy)
+ return false;
+
+ return true;
+}
+
+const CXXConstructorDecl *CXXConstructorDecl::getInheritedConstructor() const {
+ // Hack: we store the inherited constructor in the overridden method table
+ method_iterator It = getASTContext().overridden_methods_begin(this);
+ if (It == getASTContext().overridden_methods_end(this))
+ return 0;
+
+ return cast<CXXConstructorDecl>(*It);
+}
+
+void
+CXXConstructorDecl::setInheritedConstructor(const CXXConstructorDecl *BaseCtor){
+ // Hack: we store the inherited constructor in the overridden method table
+ assert(getASTContext().overridden_methods_size(this) == 0 &&
+ "Base ctor already set.");
+ getASTContext().addOverriddenMethod(this, BaseCtor);
+}
+
+void CXXDestructorDecl::anchor() { }
+
+CXXDestructorDecl *
+CXXDestructorDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXDestructorDecl));
+ return new (Mem) CXXDestructorDecl(0, SourceLocation(), DeclarationNameInfo(),
+ QualType(), 0, false, false);
+}
+
+CXXDestructorDecl *
+CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isImplicitlyDeclared) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXDestructorName &&
+ "Name must refer to a destructor");
+ return new (C) CXXDestructorDecl(RD, StartLoc, NameInfo, T, TInfo, isInline,
+ isImplicitlyDeclared);
+}
+
+void CXXConversionDecl::anchor() { }
+
+CXXConversionDecl *
+CXXConversionDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(CXXConversionDecl));
+ return new (Mem) CXXConversionDecl(0, SourceLocation(), DeclarationNameInfo(),
+ QualType(), 0, false, false, false,
+ SourceLocation());
+}
+
+CXXConversionDecl *
+CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD,
+ SourceLocation StartLoc,
+ const DeclarationNameInfo &NameInfo,
+ QualType T, TypeSourceInfo *TInfo,
+ bool isInline, bool isExplicit,
+ bool isConstexpr, SourceLocation EndLocation) {
+ assert(NameInfo.getName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName &&
+ "Name must refer to a conversion function");
+ return new (C) CXXConversionDecl(RD, StartLoc, NameInfo, T, TInfo,
+ isInline, isExplicit, isConstexpr,
+ EndLocation);
+}
+
+bool CXXConversionDecl::isLambdaToBlockPointerConversion() const {
+ return isImplicit() && getParent()->isLambda() &&
+ getConversionType()->isBlockPointerType();
+}
+
+void LinkageSpecDecl::anchor() { }
+
+LinkageSpecDecl *LinkageSpecDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation ExternLoc,
+ SourceLocation LangLoc,
+ LanguageIDs Lang,
+ SourceLocation RBraceLoc) {
+ return new (C) LinkageSpecDecl(DC, ExternLoc, LangLoc, Lang, RBraceLoc);
+}
+
+LinkageSpecDecl *LinkageSpecDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(LinkageSpecDecl));
+ return new (Mem) LinkageSpecDecl(0, SourceLocation(), SourceLocation(),
+ lang_c, SourceLocation());
+}
+
+void UsingDirectiveDecl::anchor() { }
+
+UsingDirectiveDecl *UsingDirectiveDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ SourceLocation NamespaceLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Used,
+ DeclContext *CommonAncestor) {
+ if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Used))
+ Used = NS->getOriginalNamespace();
+ return new (C) UsingDirectiveDecl(DC, L, NamespaceLoc, QualifierLoc,
+ IdentLoc, Used, CommonAncestor);
+}
+
+UsingDirectiveDecl *
+UsingDirectiveDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDirectiveDecl));
+ return new (Mem) UsingDirectiveDecl(0, SourceLocation(), SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0, 0);
+}
+
+NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
+ if (NamespaceAliasDecl *NA =
+ dyn_cast_or_null<NamespaceAliasDecl>(NominatedNamespace))
+ return NA->getNamespace();
+ return cast_or_null<NamespaceDecl>(NominatedNamespace);
+}
+
+void NamespaceDecl::anchor() { }
+
+NamespaceDecl::NamespaceDecl(DeclContext *DC, bool Inline,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl)
+ : NamedDecl(Namespace, DC, IdLoc, Id), DeclContext(Namespace),
+ LocStart(StartLoc), RBraceLoc(), AnonOrFirstNamespaceAndInline(0, Inline)
+{
+ setPreviousDeclaration(PrevDecl);
+
+ if (PrevDecl)
+ AnonOrFirstNamespaceAndInline.setPointer(PrevDecl->getOriginalNamespace());
+}
+
+NamespaceDecl *NamespaceDecl::Create(ASTContext &C, DeclContext *DC,
+ bool Inline, SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ NamespaceDecl *PrevDecl) {
+ return new (C) NamespaceDecl(DC, Inline, StartLoc, IdLoc, Id, PrevDecl);
+}
+
+NamespaceDecl *NamespaceDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceDecl));
+ return new (Mem) NamespaceDecl(0, false, SourceLocation(), SourceLocation(),
+ 0, 0);
+}
+
+void NamespaceAliasDecl::anchor() { }
+
+NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation IdentLoc,
+ NamedDecl *Namespace) {
+ if (NamespaceDecl *NS = dyn_cast_or_null<NamespaceDecl>(Namespace))
+ Namespace = NS->getOriginalNamespace();
+ return new (C) NamespaceAliasDecl(DC, UsingLoc, AliasLoc, Alias,
+ QualifierLoc, IdentLoc, Namespace);
+}
+
+NamespaceAliasDecl *
+NamespaceAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NamespaceAliasDecl));
+ return new (Mem) NamespaceAliasDecl(0, SourceLocation(), SourceLocation(), 0,
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0);
+}
+
+void UsingShadowDecl::anchor() { }
+
+UsingShadowDecl *
+UsingShadowDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingShadowDecl));
+ return new (Mem) UsingShadowDecl(0, SourceLocation(), 0, 0);
+}
+
+UsingDecl *UsingShadowDecl::getUsingDecl() const {
+ const UsingShadowDecl *Shadow = this;
+ while (const UsingShadowDecl *NextShadow =
+ dyn_cast<UsingShadowDecl>(Shadow->UsingOrNextShadow))
+ Shadow = NextShadow;
+ return cast<UsingDecl>(Shadow->UsingOrNextShadow);
+}
+
+void UsingDecl::anchor() { }
+
+void UsingDecl::addShadowDecl(UsingShadowDecl *S) {
+ assert(std::find(shadow_begin(), shadow_end(), S) == shadow_end() &&
+ "declaration already in set");
+ assert(S->getUsingDecl() == this);
+
+ if (FirstUsingShadow.getPointer())
+ S->UsingOrNextShadow = FirstUsingShadow.getPointer();
+ FirstUsingShadow.setPointer(S);
+}
+
+void UsingDecl::removeShadowDecl(UsingShadowDecl *S) {
+ assert(std::find(shadow_begin(), shadow_end(), S) != shadow_end() &&
+ "declaration not in set");
+ assert(S->getUsingDecl() == this);
+
+ // Remove S from the shadow decl chain. This is O(n) but hopefully rare.
+
+ if (FirstUsingShadow.getPointer() == S) {
+ FirstUsingShadow.setPointer(
+ dyn_cast<UsingShadowDecl>(S->UsingOrNextShadow));
+ S->UsingOrNextShadow = this;
+ return;
+ }
+
+ UsingShadowDecl *Prev = FirstUsingShadow.getPointer();
+ while (Prev->UsingOrNextShadow != S)
+ Prev = cast<UsingShadowDecl>(Prev->UsingOrNextShadow);
+ Prev->UsingOrNextShadow = S->UsingOrNextShadow;
+ S->UsingOrNextShadow = this;
+}
+
+UsingDecl *UsingDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation UL,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool IsTypeNameArg) {
+ return new (C) UsingDecl(DC, UL, QualifierLoc, NameInfo, IsTypeNameArg);
+}
+
+UsingDecl *UsingDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UsingDecl));
+ return new (Mem) UsingDecl(0, SourceLocation(), NestedNameSpecifierLoc(),
+ DeclarationNameInfo(), false);
+}
+
+void UnresolvedUsingValueDecl::anchor() { }
+
+UnresolvedUsingValueDecl *
+UnresolvedUsingValueDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const DeclarationNameInfo &NameInfo) {
+ return new (C) UnresolvedUsingValueDecl(DC, C.DependentTy, UsingLoc,
+ QualifierLoc, NameInfo);
+}
+
+UnresolvedUsingValueDecl *
+UnresolvedUsingValueDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(UnresolvedUsingValueDecl));
+ return new (Mem) UnresolvedUsingValueDecl(0, QualType(), SourceLocation(),
+ NestedNameSpecifierLoc(),
+ DeclarationNameInfo());
+}
+
+void UnresolvedUsingTypenameDecl::anchor() { }
+
+UnresolvedUsingTypenameDecl *
+UnresolvedUsingTypenameDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation UsingLoc,
+ SourceLocation TypenameLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TargetNameLoc,
+ DeclarationName TargetName) {
+ return new (C) UnresolvedUsingTypenameDecl(DC, UsingLoc, TypenameLoc,
+ QualifierLoc, TargetNameLoc,
+ TargetName.getAsIdentifierInfo());
+}
+
+UnresolvedUsingTypenameDecl *
+UnresolvedUsingTypenameDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(UnresolvedUsingTypenameDecl));
+ return new (Mem) UnresolvedUsingTypenameDecl(0, SourceLocation(),
+ SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(),
+ 0);
+}
+
+void StaticAssertDecl::anchor() { }
+
+StaticAssertDecl *StaticAssertDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ StringLiteral *Message,
+ SourceLocation RParenLoc) {
+ return new (C) StaticAssertDecl(DC, StaticAssertLoc, AssertExpr, Message,
+ RParenLoc);
+}
+
+StaticAssertDecl *StaticAssertDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(StaticAssertDecl));
+ return new (Mem) StaticAssertDecl(0, SourceLocation(), 0, 0,SourceLocation());
+}
+
+static const char *getAccessName(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_none:
+ llvm_unreachable("Invalid access specifier!");
+ case AS_public:
+ return "public";
+ case AS_private:
+ return "private";
+ case AS_protected:
+ return "protected";
+ }
+ llvm_unreachable("Invalid access specifier!");
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ AccessSpecifier AS) {
+ return DB << getAccessName(AS);
+}
+
+const PartialDiagnostic &clang::operator<<(const PartialDiagnostic &DB,
+ AccessSpecifier AS) {
+ return DB << getAccessName(AS);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
new file mode 100644
index 0000000..6e3bd8d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclFriend.cpp
@@ -0,0 +1,48 @@
+//===--- DeclFriend.cpp - C++ Friend Declaration AST Node Implementation --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the AST classes related to C++ friend
+// declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclTemplate.h"
+using namespace clang;
+
+void FriendDecl::anchor() { }
+
+FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ FriendUnion Friend,
+ SourceLocation FriendL) {
+#ifndef NDEBUG
+ if (Friend.is<NamedDecl*>()) {
+ NamedDecl *D = Friend.get<NamedDecl*>();
+ assert(isa<FunctionDecl>(D) ||
+ isa<CXXRecordDecl>(D) ||
+ isa<FunctionTemplateDecl>(D) ||
+ isa<ClassTemplateDecl>(D));
+
+ // As a temporary hack, we permit template instantiation to point
+ // to the original declaration when instantiating members.
+ assert(D->getFriendObjectKind() ||
+ (cast<CXXRecordDecl>(DC)->getTemplateSpecializationKind()));
+ }
+#endif
+
+ FriendDecl *FD = new (C) FriendDecl(DC, L, Friend, FriendL);
+ cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
+ return FD;
+}
+
+FriendDecl *FriendDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendDecl));
+ return new (Mem) FriendDecl(EmptyShell());
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp b/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp
new file mode 100644
index 0000000..036acc2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclGroup.cpp
@@ -0,0 +1,32 @@
+//===--- DeclGroup.cpp - Classes for representing groups of Decls -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DeclGroup and DeclGroupRef classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/Support/Allocator.h"
+using namespace clang;
+
+DeclGroup* DeclGroup::Create(ASTContext &C, Decl **Decls, unsigned NumDecls) {
+ assert(NumDecls > 1 && "Invalid DeclGroup");
+ unsigned Size = sizeof(DeclGroup) + sizeof(Decl*) * NumDecls;
+ void* Mem = C.Allocate(Size, llvm::AlignOf<DeclGroup>::Alignment);
+ new (Mem) DeclGroup(NumDecls, Decls);
+ return static_cast<DeclGroup*>(Mem);
+}
+
+DeclGroup::DeclGroup(unsigned numdecls, Decl** decls) : NumDecls(numdecls) {
+ assert(numdecls > 0);
+ assert(decls);
+ memcpy(this+1, decls, numdecls * sizeof(*decls));
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
new file mode 100644
index 0000000..2370d3c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclObjC.cpp
@@ -0,0 +1,1326 @@
+//===--- DeclObjC.cpp - ObjC Declaration AST Node Implementation ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Objective-C related Decl classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// ObjCListBase
+//===----------------------------------------------------------------------===//
+
+void ObjCListBase::set(void *const* InList, unsigned Elts, ASTContext &Ctx) {
+ List = 0;
+ if (Elts == 0) return; // Setting to an empty list is a noop.
+
+
+ List = new (Ctx) void*[Elts];
+ NumElts = Elts;
+ memcpy(List, InList, sizeof(void*)*Elts);
+}
+
+void ObjCProtocolList::set(ObjCProtocolDecl* const* InList, unsigned Elts,
+ const SourceLocation *Locs, ASTContext &Ctx) {
+ if (Elts == 0)
+ return;
+
+ Locations = new (Ctx) SourceLocation[Elts];
+ memcpy(Locations, Locs, sizeof(SourceLocation) * Elts);
+ set(InList, Elts, Ctx);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCInterfaceDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCContainerDecl::anchor() { }
+
+/// getIvarDecl - This method looks up an ivar in this ContextDecl.
+///
+ObjCIvarDecl *
+ObjCContainerDecl::getIvarDecl(IdentifierInfo *Id) const {
+ lookup_const_iterator Ivar, IvarEnd;
+ for (llvm::tie(Ivar, IvarEnd) = lookup(Id); Ivar != IvarEnd; ++Ivar) {
+ if (ObjCIvarDecl *ivar = dyn_cast<ObjCIvarDecl>(*Ivar))
+ return ivar;
+ }
+ return 0;
+}
+
+// Get the local instance/class method declared in this interface.
+ObjCMethodDecl *
+ObjCContainerDecl::getMethod(Selector Sel, bool isInstance) const {
+ // Since instance & class methods can have the same name, the loop below
+ // ensures we get the correct method.
+ //
+ // @interface Whatever
+ // - (int) class_method;
+ // + (float) class_method;
+ // @end
+ //
+ lookup_const_iterator Meth, MethEnd;
+ for (llvm::tie(Meth, MethEnd) = lookup(Sel); Meth != MethEnd; ++Meth) {
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth);
+ if (MD && MD->isInstanceMethod() == isInstance)
+ return MD;
+ }
+ return 0;
+}
+
+ObjCPropertyDecl *
+ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
+ IdentifierInfo *propertyID) {
+
+ DeclContext::lookup_const_iterator I, E;
+ llvm::tie(I, E) = DC->lookup(propertyID);
+ for ( ; I != E; ++I)
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(*I))
+ return PD;
+
+ return 0;
+}
+
+/// FindPropertyDeclaration - Finds declaration of the property given its name
+/// in 'PropertyId' and returns it. It returns 0, if not found.
+ObjCPropertyDecl *
+ObjCContainerDecl::FindPropertyDeclaration(IdentifierInfo *PropertyId) const {
+
+ if (ObjCPropertyDecl *PD =
+ ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
+ return PD;
+
+ switch (getKind()) {
+ default:
+ break;
+ case Decl::ObjCProtocol: {
+ const ObjCProtocolDecl *PID = cast<ObjCProtocolDecl>(this);
+ for (ObjCProtocolDecl::protocol_iterator I = PID->protocol_begin(),
+ E = PID->protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ return P;
+ break;
+ }
+ case Decl::ObjCInterface: {
+ const ObjCInterfaceDecl *OID = cast<ObjCInterfaceDecl>(this);
+ // Look through categories.
+ for (ObjCCategoryDecl *Cat = OID->getCategoryList();
+ Cat; Cat = Cat->getNextClassCategory())
+ if (!Cat->IsClassExtension())
+ if (ObjCPropertyDecl *P = Cat->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ // Look through protocols.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ I = OID->all_referenced_protocol_begin(),
+ E = OID->all_referenced_protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ // Finally, check the super class.
+ if (const ObjCInterfaceDecl *superClass = OID->getSuperClass())
+ return superClass->FindPropertyDeclaration(PropertyId);
+ break;
+ }
+ case Decl::ObjCCategory: {
+ const ObjCCategoryDecl *OCD = cast<ObjCCategoryDecl>(this);
+ // Look through protocols.
+ if (!OCD->IsClassExtension())
+ for (ObjCCategoryDecl::protocol_iterator
+ I = OCD->protocol_begin(), E = OCD->protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ break;
+ }
+ }
+ return 0;
+}
+
+void ObjCInterfaceDecl::anchor() { }
+
+/// FindPropertyVisibleInPrimaryClass - Finds declaration of the property
+/// with name 'PropertyId' in the primary class; including those in protocols
+/// (direct or indirect) used by the primary class.
+///
+ObjCPropertyDecl *
+ObjCInterfaceDecl::FindPropertyVisibleInPrimaryClass(
+ IdentifierInfo *PropertyId) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ if (ObjCPropertyDecl *PD =
+ ObjCPropertyDecl::findPropertyDecl(cast<DeclContext>(this), PropertyId))
+ return PD;
+
+ // Look through protocols.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ I = all_referenced_protocol_begin(),
+ E = all_referenced_protocol_end(); I != E; ++I)
+ if (ObjCPropertyDecl *P = (*I)->FindPropertyDeclaration(PropertyId))
+ return P;
+
+ return 0;
+}
+
+void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
+ ObjCProtocolDecl *const* ExtList, unsigned ExtNum,
+ ASTContext &C)
+{
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ if (data().AllReferencedProtocols.empty() &&
+ data().ReferencedProtocols.empty()) {
+ data().AllReferencedProtocols.set(ExtList, ExtNum, C);
+ return;
+ }
+
+ // Check for duplicate protocol in class's protocol list.
+ // This is O(n*m). But it is extremely rare and number of protocols in
+ // class or its extension are very few.
+ SmallVector<ObjCProtocolDecl*, 8> ProtocolRefs;
+ for (unsigned i = 0; i < ExtNum; i++) {
+ bool protocolExists = false;
+ ObjCProtocolDecl *ProtoInExtension = ExtList[i];
+ for (all_protocol_iterator
+ p = all_referenced_protocol_begin(),
+ e = all_referenced_protocol_end(); p != e; ++p) {
+ ObjCProtocolDecl *Proto = (*p);
+ if (C.ProtocolCompatibleWithProtocol(ProtoInExtension, Proto)) {
+ protocolExists = true;
+ break;
+ }
+ }
+ // Do we want to warn on a protocol in extension class which
+ // already exist in the class? Probably not.
+ if (!protocolExists)
+ ProtocolRefs.push_back(ProtoInExtension);
+ }
+
+ if (ProtocolRefs.empty())
+ return;
+
+ // Merge ProtocolRefs into class's protocol list;
+ for (all_protocol_iterator p = all_referenced_protocol_begin(),
+ e = all_referenced_protocol_end(); p != e; ++p) {
+ ProtocolRefs.push_back(*p);
+ }
+
+ data().AllReferencedProtocols.set(ProtocolRefs.data(), ProtocolRefs.size(),C);
+}
+
+void ObjCInterfaceDecl::allocateDefinitionData() {
+ assert(!hasDefinition() && "ObjC class already has a definition");
+ Data = new (getASTContext()) DefinitionData();
+ Data->Definition = this;
+
+ // Make the type point at the definition, now that we have one.
+ if (TypeForDecl)
+ cast<ObjCInterfaceType>(TypeForDecl)->Decl = this;
+}
+
+void ObjCInterfaceDecl::startDefinition() {
+ allocateDefinitionData();
+
+ // Update all of the declarations with a pointer to the definition.
+ for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
+ RD != RDEnd; ++RD) {
+ if (*RD != this)
+ RD->Data = Data;
+ }
+}
+
+/// getFirstClassExtension - Find first class extension of the given class.
+ObjCCategoryDecl* ObjCInterfaceDecl::getFirstClassExtension() const {
+ for (ObjCCategoryDecl *CDecl = getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->IsClassExtension())
+ return CDecl;
+ return 0;
+}
+
+/// getNextClassCategory - Find next class extension in list of categories.
+const ObjCCategoryDecl* ObjCCategoryDecl::getNextClassExtension() const {
+ for (const ObjCCategoryDecl *CDecl = getNextClassCategory(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->IsClassExtension())
+ return CDecl;
+ return 0;
+}
+
+ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
+ ObjCInterfaceDecl *&clsDeclared) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCInterfaceDecl* ClassDecl = this;
+ while (ClassDecl != NULL) {
+ if (ObjCIvarDecl *I = ClassDecl->getIvarDecl(ID)) {
+ clsDeclared = ClassDecl;
+ return I;
+ }
+ for (const ObjCCategoryDecl *CDecl = ClassDecl->getFirstClassExtension();
+ CDecl; CDecl = CDecl->getNextClassExtension()) {
+ if (ObjCIvarDecl *I = CDecl->getIvarDecl(ID)) {
+ clsDeclared = ClassDecl;
+ return I;
+ }
+ }
+
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+/// lookupInheritedClass - This method returns ObjCInterfaceDecl * of the super
+/// class whose name is passed as argument. If it is not one of the super classes
+/// the it returns NULL.
+ObjCInterfaceDecl *ObjCInterfaceDecl::lookupInheritedClass(
+ const IdentifierInfo*ICName) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCInterfaceDecl* ClassDecl = this;
+ while (ClassDecl != NULL) {
+ if (ClassDecl->getIdentifier() == ICName)
+ return ClassDecl;
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+/// lookupMethod - This method returns an instance/class method by looking in
+/// the class, its categories, and its super classes (using a linear search).
+ObjCMethodDecl *ObjCInterfaceDecl::lookupMethod(Selector Sel,
+ bool isInstance,
+ bool shallowCategoryLookup) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ const ObjCInterfaceDecl* ClassDecl = this;
+ ObjCMethodDecl *MethodDecl = 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ while (ClassDecl != NULL) {
+ if ((MethodDecl = ClassDecl->getMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ // Didn't find one yet - look through protocols.
+ for (ObjCInterfaceDecl::protocol_iterator I = ClassDecl->protocol_begin(),
+ E = ClassDecl->protocol_end();
+ I != E; ++I)
+ if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ // Didn't find one yet - now look through categories.
+ ObjCCategoryDecl *CatDecl = ClassDecl->getCategoryList();
+ while (CatDecl) {
+ if ((MethodDecl = CatDecl->getMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ if (!shallowCategoryLookup) {
+ // Didn't find one yet - look through protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols =
+ CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+ }
+ CatDecl = CatDecl->getNextClassCategory();
+ }
+
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return NULL;
+}
+
+ObjCMethodDecl *ObjCInterfaceDecl::lookupPrivateMethod(
+ const Selector &Sel,
+ bool Instance) {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ ObjCMethodDecl *Method = 0;
+ if (ObjCImplementationDecl *ImpDecl = getImplementation())
+ Method = Instance ? ImpDecl->getInstanceMethod(Sel)
+ : ImpDecl->getClassMethod(Sel);
+
+ if (!Method && getSuperClass())
+ return getSuperClass()->lookupPrivateMethod(Sel, Instance);
+ return Method;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCMethodDecl
+//===----------------------------------------------------------------------===//
+
+ObjCMethodDecl *ObjCMethodDecl::Create(ASTContext &C,
+ SourceLocation beginLoc,
+ SourceLocation endLoc,
+ Selector SelInfo, QualType T,
+ TypeSourceInfo *ResultTInfo,
+ DeclContext *contextDecl,
+ bool isInstance,
+ bool isVariadic,
+ bool isSynthesized,
+ bool isImplicitlyDeclared,
+ bool isDefined,
+ ImplementationControl impControl,
+ bool HasRelatedResultType) {
+ return new (C) ObjCMethodDecl(beginLoc, endLoc,
+ SelInfo, T, ResultTInfo, contextDecl,
+ isInstance,
+ isVariadic, isSynthesized, isImplicitlyDeclared,
+ isDefined,
+ impControl,
+ HasRelatedResultType);
+}
+
+ObjCMethodDecl *ObjCMethodDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCMethodDecl));
+ return new (Mem) ObjCMethodDecl(SourceLocation(), SourceLocation(),
+ Selector(), QualType(), 0, 0);
+}
+
+void ObjCMethodDecl::setAsRedeclaration(const ObjCMethodDecl *PrevMethod) {
+ assert(PrevMethod);
+ getASTContext().setObjCMethodRedeclaration(PrevMethod, this);
+ IsRedeclaration = true;
+ PrevMethod->HasRedeclaration = true;
+}
+
+void ObjCMethodDecl::setParamsAndSelLocs(ASTContext &C,
+ ArrayRef<ParmVarDecl*> Params,
+ ArrayRef<SourceLocation> SelLocs) {
+ ParamsAndSelLocs = 0;
+ NumParams = Params.size();
+ if (Params.empty() && SelLocs.empty())
+ return;
+
+ unsigned Size = sizeof(ParmVarDecl *) * NumParams +
+ sizeof(SourceLocation) * SelLocs.size();
+ ParamsAndSelLocs = C.Allocate(Size);
+ std::copy(Params.begin(), Params.end(), getParams());
+ std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+}
+
+void ObjCMethodDecl::getSelectorLocs(
+ SmallVectorImpl<SourceLocation> &SelLocs) const {
+ for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
+ SelLocs.push_back(getSelectorLoc(i));
+}
+
+void ObjCMethodDecl::setMethodParams(ASTContext &C,
+ ArrayRef<ParmVarDecl*> Params,
+ ArrayRef<SourceLocation> SelLocs) {
+ assert((!SelLocs.empty() || isImplicit()) &&
+ "No selector locs for non-implicit method");
+ if (isImplicit())
+ return setParamsAndSelLocs(C, Params, ArrayRef<SourceLocation>());
+
+ SelLocsKind = hasStandardSelectorLocs(getSelector(), SelLocs, Params, EndLoc);
+ if (SelLocsKind != SelLoc_NonStandard)
+ return setParamsAndSelLocs(C, Params, ArrayRef<SourceLocation>());
+
+ setParamsAndSelLocs(C, Params, SelLocs);
+}
+
+/// \brief A definition will return its interface declaration.
+/// An interface declaration will return its definition.
+/// Otherwise it will return itself.
+ObjCMethodDecl *ObjCMethodDecl::getNextRedeclaration() {
+ ASTContext &Ctx = getASTContext();
+ ObjCMethodDecl *Redecl = 0;
+ if (HasRedeclaration)
+ Redecl = const_cast<ObjCMethodDecl*>(Ctx.getObjCMethodRedeclaration(this));
+ if (Redecl)
+ return Redecl;
+
+ Decl *CtxD = cast<Decl>(getDeclContext());
+
+ if (ObjCInterfaceDecl *IFD = dyn_cast<ObjCInterfaceDecl>(CtxD)) {
+ if (ObjCImplementationDecl *ImplD = Ctx.getObjCImplementation(IFD))
+ Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CtxD)) {
+ if (ObjCCategoryImplDecl *ImplD = Ctx.getObjCImplementation(CD))
+ Redecl = ImplD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCImplementationDecl *ImplD =
+ dyn_cast<ObjCImplementationDecl>(CtxD)) {
+ if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
+ Redecl = IFD->getMethod(getSelector(), isInstanceMethod());
+
+ } else if (ObjCCategoryImplDecl *CImplD =
+ dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
+ if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
+ Redecl = CatD->getMethod(getSelector(), isInstanceMethod());
+ }
+
+ if (!Redecl && isRedeclaration()) {
+ // This is the last redeclaration, go back to the first method.
+ return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
+ isInstanceMethod());
+ }
+
+ return Redecl ? Redecl : this;
+}
+
+ObjCMethodDecl *ObjCMethodDecl::getCanonicalDecl() {
+ Decl *CtxD = cast<Decl>(getDeclContext());
+
+ if (ObjCImplementationDecl *ImplD = dyn_cast<ObjCImplementationDecl>(CtxD)) {
+ if (ObjCInterfaceDecl *IFD = ImplD->getClassInterface())
+ if (ObjCMethodDecl *MD = IFD->getMethod(getSelector(),
+ isInstanceMethod()))
+ return MD;
+
+ } else if (ObjCCategoryImplDecl *CImplD =
+ dyn_cast<ObjCCategoryImplDecl>(CtxD)) {
+ if (ObjCCategoryDecl *CatD = CImplD->getCategoryDecl())
+ if (ObjCMethodDecl *MD = CatD->getMethod(getSelector(),
+ isInstanceMethod()))
+ return MD;
+ }
+
+ if (isRedeclaration())
+ return cast<ObjCContainerDecl>(CtxD)->getMethod(getSelector(),
+ isInstanceMethod());
+
+ return this;
+}
+
+ObjCMethodFamily ObjCMethodDecl::getMethodFamily() const {
+ ObjCMethodFamily family = static_cast<ObjCMethodFamily>(Family);
+ if (family != static_cast<unsigned>(InvalidObjCMethodFamily))
+ return family;
+
+ // Check for an explicit attribute.
+ if (const ObjCMethodFamilyAttr *attr = getAttr<ObjCMethodFamilyAttr>()) {
+ // The unfortunate necessity of mapping between enums here is due
+ // to the attributes framework.
+ switch (attr->getFamily()) {
+ case ObjCMethodFamilyAttr::OMF_None: family = OMF_None; break;
+ case ObjCMethodFamilyAttr::OMF_alloc: family = OMF_alloc; break;
+ case ObjCMethodFamilyAttr::OMF_copy: family = OMF_copy; break;
+ case ObjCMethodFamilyAttr::OMF_init: family = OMF_init; break;
+ case ObjCMethodFamilyAttr::OMF_mutableCopy: family = OMF_mutableCopy; break;
+ case ObjCMethodFamilyAttr::OMF_new: family = OMF_new; break;
+ }
+ Family = static_cast<unsigned>(family);
+ return family;
+ }
+
+ family = getSelector().getMethodFamily();
+ switch (family) {
+ case OMF_None: break;
+
+ // init only has a conventional meaning for an instance method, and
+ // it has to return an object.
+ case OMF_init:
+ if (!isInstanceMethod() || !getResultType()->isObjCObjectPointerType())
+ family = OMF_None;
+ break;
+
+ // alloc/copy/new have a conventional meaning for both class and
+ // instance methods, but they require an object return.
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ if (!getResultType()->isObjCObjectPointerType())
+ family = OMF_None;
+ break;
+
+ // These selectors have a conventional meaning only for instance methods.
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_self:
+ if (!isInstanceMethod())
+ family = OMF_None;
+ break;
+
+ case OMF_performSelector:
+ if (!isInstanceMethod() ||
+ !getResultType()->isObjCIdType())
+ family = OMF_None;
+ else {
+ unsigned noParams = param_size();
+ if (noParams < 1 || noParams > 3)
+ family = OMF_None;
+ else {
+ ObjCMethodDecl::arg_type_iterator it = arg_type_begin();
+ QualType ArgT = (*it);
+ if (!ArgT->isObjCSelType()) {
+ family = OMF_None;
+ break;
+ }
+ while (--noParams) {
+ it++;
+ ArgT = (*it);
+ if (!ArgT->isObjCIdType()) {
+ family = OMF_None;
+ break;
+ }
+ }
+ }
+ }
+ break;
+
+ }
+
+ // Cache the result.
+ Family = static_cast<unsigned>(family);
+ return family;
+}
+
+void ObjCMethodDecl::createImplicitParams(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ QualType selfTy;
+ if (isInstanceMethod()) {
+ // There may be no interface context due to error in declaration
+ // of the interface (which has been reported). Recover gracefully.
+ if (OID) {
+ selfTy = Context.getObjCInterfaceType(OID);
+ selfTy = Context.getObjCObjectPointerType(selfTy);
+ } else {
+ selfTy = Context.getObjCIdType();
+ }
+ } else // we have a factory method.
+ selfTy = Context.getObjCClassType();
+
+ bool selfIsPseudoStrong = false;
+ bool selfIsConsumed = false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ if (isInstanceMethod()) {
+ selfIsConsumed = hasAttr<NSConsumesSelfAttr>();
+
+ // 'self' is always __strong. It's actually pseudo-strong except
+ // in init methods (or methods labeled ns_consumes_self), though.
+ Qualifiers qs;
+ qs.setObjCLifetime(Qualifiers::OCL_Strong);
+ selfTy = Context.getQualifiedType(selfTy, qs);
+
+ // In addition, 'self' is const unless this is an init method.
+ if (getMethodFamily() != OMF_init && !selfIsConsumed) {
+ selfTy = selfTy.withConst();
+ selfIsPseudoStrong = true;
+ }
+ }
+ else {
+ assert(isClassMethod());
+ // 'self' is always const in class methods.
+ selfTy = selfTy.withConst();
+ selfIsPseudoStrong = true;
+ }
+ }
+
+ ImplicitParamDecl *self
+ = ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("self"), selfTy);
+ setSelfDecl(self);
+
+ if (selfIsConsumed)
+ self->addAttr(new (Context) NSConsumedAttr(SourceLocation(), Context));
+
+ if (selfIsPseudoStrong)
+ self->setARCPseudoStrong(true);
+
+ setCmdDecl(ImplicitParamDecl::Create(Context, this, SourceLocation(),
+ &Context.Idents.get("_cmd"),
+ Context.getObjCSelType()));
+}
+
+ObjCInterfaceDecl *ObjCMethodDecl::getClassInterface() {
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(getDeclContext()))
+ return ID;
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(getDeclContext()))
+ return CD->getClassInterface();
+ if (ObjCImplDecl *IMD = dyn_cast<ObjCImplDecl>(getDeclContext()))
+ return IMD->getClassInterface();
+
+ assert(!isa<ObjCProtocolDecl>(getDeclContext()) && "It's a protocol method");
+ llvm_unreachable("unknown method context");
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCInterfaceDecl
+//===----------------------------------------------------------------------===//
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::Create(const ASTContext &C,
+ DeclContext *DC,
+ SourceLocation atLoc,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *PrevDecl,
+ SourceLocation ClassLoc,
+ bool isInternal){
+ ObjCInterfaceDecl *Result = new (C) ObjCInterfaceDecl(DC, atLoc, Id, ClassLoc,
+ PrevDecl, isInternal);
+ C.getObjCInterfaceType(Result, PrevDecl);
+ return Result;
+}
+
+ObjCInterfaceDecl *ObjCInterfaceDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCInterfaceDecl));
+ return new (Mem) ObjCInterfaceDecl(0, SourceLocation(), 0, SourceLocation(),
+ 0, false);
+}
+
+ObjCInterfaceDecl::
+ObjCInterfaceDecl(DeclContext *DC, SourceLocation atLoc, IdentifierInfo *Id,
+ SourceLocation CLoc, ObjCInterfaceDecl *PrevDecl,
+ bool isInternal)
+ : ObjCContainerDecl(ObjCInterface, DC, Id, CLoc, atLoc),
+ TypeForDecl(0), Data()
+{
+ setPreviousDeclaration(PrevDecl);
+
+ // Copy the 'data' pointer over.
+ if (PrevDecl)
+ Data = PrevDecl->Data;
+
+ setImplicit(isInternal);
+}
+
+void ObjCInterfaceDecl::LoadExternalDefinition() const {
+ assert(data().ExternallyCompleted && "Class is not externally completed");
+ data().ExternallyCompleted = false;
+ getASTContext().getExternalSource()->CompleteType(
+ const_cast<ObjCInterfaceDecl *>(this));
+}
+
+void ObjCInterfaceDecl::setExternallyCompleted() {
+ assert(getASTContext().getExternalSource() &&
+ "Class can't be externally completed without an external source");
+ assert(hasDefinition() &&
+ "Forward declarations can't be externally completed");
+ data().ExternallyCompleted = true;
+}
+
+ObjCImplementationDecl *ObjCInterfaceDecl::getImplementation() const {
+ if (const ObjCInterfaceDecl *Def = getDefinition()) {
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ return getASTContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl*>(Def));
+ }
+
+ // FIXME: Should make sure no callers ever do this.
+ return 0;
+}
+
+void ObjCInterfaceDecl::setImplementation(ObjCImplementationDecl *ImplD) {
+ getASTContext().setObjCImplementation(getDefinition(), ImplD);
+}
+
+/// all_declared_ivar_begin - return first ivar declared in this class,
+/// its extensions and its implementation. Lazily build the list on first
+/// access.
+ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().IvarList)
+ return data().IvarList;
+
+ ObjCIvarDecl *curIvar = 0;
+ if (!ivar_empty()) {
+ ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end();
+ data().IvarList = (*I); ++I;
+ for (curIvar = data().IvarList; I != E; curIvar = *I, ++I)
+ curIvar->setNextIvar(*I);
+ }
+
+ for (const ObjCCategoryDecl *CDecl = getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension()) {
+ if (!CDecl->ivar_empty()) {
+ ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
+ E = CDecl->ivar_end();
+ if (!data().IvarList) {
+ data().IvarList = (*I); ++I;
+ curIvar = data().IvarList;
+ }
+ for ( ;I != E; curIvar = *I, ++I)
+ curIvar->setNextIvar(*I);
+ }
+ }
+
+ if (ObjCImplementationDecl *ImplDecl = getImplementation()) {
+ if (!ImplDecl->ivar_empty()) {
+ ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(),
+ E = ImplDecl->ivar_end();
+ if (!data().IvarList) {
+ data().IvarList = (*I); ++I;
+ curIvar = data().IvarList;
+ }
+ for ( ;I != E; curIvar = *I, ++I)
+ curIvar->setNextIvar(*I);
+ }
+ }
+ return data().IvarList;
+}
+
+/// FindCategoryDeclaration - Finds category declaration in the list of
+/// categories for this class and returns it. Name of the category is passed
+/// in 'CategoryId'. If category not found, return 0;
+///
+ObjCCategoryDecl *
+ObjCInterfaceDecl::FindCategoryDeclaration(IdentifierInfo *CategoryId) const {
+ // FIXME: Should make sure no callers ever do this.
+ if (!hasDefinition())
+ return 0;
+
+ if (data().ExternallyCompleted)
+ LoadExternalDefinition();
+
+ for (ObjCCategoryDecl *Category = getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ if (Category->getIdentifier() == CategoryId)
+ return Category;
+ return 0;
+}
+
+ObjCMethodDecl *
+ObjCInterfaceDecl::getCategoryInstanceMethod(Selector Sel) const {
+ for (ObjCCategoryDecl *Category = getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ if (ObjCCategoryImplDecl *Impl = Category->getImplementation())
+ if (ObjCMethodDecl *MD = Impl->getInstanceMethod(Sel))
+ return MD;
+ return 0;
+}
+
+ObjCMethodDecl *ObjCInterfaceDecl::getCategoryClassMethod(Selector Sel) const {
+ for (ObjCCategoryDecl *Category = getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ if (ObjCCategoryImplDecl *Impl = Category->getImplementation())
+ if (ObjCMethodDecl *MD = Impl->getClassMethod(Sel))
+ return MD;
+ return 0;
+}
+
+/// ClassImplementsProtocol - Checks that 'lProto' protocol
+/// has been implemented in IDecl class, its super class or categories (if
+/// lookupCategory is true).
+bool ObjCInterfaceDecl::ClassImplementsProtocol(ObjCProtocolDecl *lProto,
+ bool lookupCategory,
+ bool RHSIsQualifiedID) {
+ if (!hasDefinition())
+ return false;
+
+ ObjCInterfaceDecl *IDecl = this;
+ // 1st, look up the class.
+ for (ObjCInterfaceDecl::protocol_iterator
+ PI = IDecl->protocol_begin(), E = IDecl->protocol_end(); PI != E; ++PI){
+ if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ // This is dubious and is added to be compatible with gcc. In gcc, it is
+ // also allowed assigning a protocol-qualified 'id' type to a LHS object
+ // when protocol in qualified LHS is in list of protocols in the rhs 'id'
+ // object. This IMO, should be a bug.
+ // FIXME: Treat this as an extension, and flag this as an error when GCC
+ // extensions are not enabled.
+ if (RHSIsQualifiedID &&
+ getASTContext().ProtocolCompatibleWithProtocol(*PI, lProto))
+ return true;
+ }
+
+ // 2nd, look up the category.
+ if (lookupCategory)
+ for (ObjCCategoryDecl *CDecl = IDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory()) {
+ for (ObjCCategoryDecl::protocol_iterator PI = CDecl->protocol_begin(),
+ E = CDecl->protocol_end(); PI != E; ++PI)
+ if (getASTContext().ProtocolCompatibleWithProtocol(lProto, *PI))
+ return true;
+ }
+
+ // 3rd, look up the super class(s)
+ if (IDecl->getSuperClass())
+ return
+ IDecl->getSuperClass()->ClassImplementsProtocol(lProto, lookupCategory,
+ RHSIsQualifiedID);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCIvarDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCIvarDecl::anchor() { }
+
+ObjCIvarDecl *ObjCIvarDecl::Create(ASTContext &C, ObjCContainerDecl *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo,
+ AccessControl ac, Expr *BW,
+ bool synthesized) {
+ if (DC) {
+ // Ivar's can only appear in interfaces, implementations (via synthesized
+ // properties), and class extensions (via direct declaration, or synthesized
+ // properties).
+ //
+ // FIXME: This should really be asserting this:
+ // (isa<ObjCCategoryDecl>(DC) &&
+ // cast<ObjCCategoryDecl>(DC)->IsClassExtension()))
+ // but unfortunately we sometimes place ivars into non-class extension
+ // categories on error. This breaks an AST invariant, and should not be
+ // fixed.
+ assert((isa<ObjCInterfaceDecl>(DC) || isa<ObjCImplementationDecl>(DC) ||
+ isa<ObjCCategoryDecl>(DC)) &&
+ "Invalid ivar decl context!");
+ // Once a new ivar is created in any of class/class-extension/implementation
+ // decl contexts, the previously built IvarList must be rebuilt.
+ ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(DC);
+ if (!ID) {
+ if (ObjCImplementationDecl *IM = dyn_cast<ObjCImplementationDecl>(DC)) {
+ ID = IM->getClassInterface();
+ if (BW)
+ IM->setHasSynthBitfield(true);
+ } else {
+ ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC);
+ ID = CD->getClassInterface();
+ if (BW)
+ CD->setHasSynthBitfield(true);
+ }
+ }
+ ID->setIvarList(0);
+ }
+
+ return new (C) ObjCIvarDecl(DC, StartLoc, IdLoc, Id, T, TInfo,
+ ac, BW, synthesized);
+}
+
+ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCIvarDecl));
+ return new (Mem) ObjCIvarDecl(0, SourceLocation(), SourceLocation(), 0,
+ QualType(), 0, ObjCIvarDecl::None, 0, false);
+}
+
+const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
+ const ObjCContainerDecl *DC = cast<ObjCContainerDecl>(getDeclContext());
+
+ switch (DC->getKind()) {
+ default:
+ case ObjCCategoryImpl:
+ case ObjCProtocol:
+ llvm_unreachable("invalid ivar container!");
+
+ // Ivars can only appear in class extension categories.
+ case ObjCCategory: {
+ const ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(DC);
+ assert(CD->IsClassExtension() && "invalid container for ivar!");
+ return CD->getClassInterface();
+ }
+
+ case ObjCImplementation:
+ return cast<ObjCImplementationDecl>(DC)->getClassInterface();
+
+ case ObjCInterface:
+ return cast<ObjCInterfaceDecl>(DC);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCAtDefsFieldDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCAtDefsFieldDecl::anchor() { }
+
+ObjCAtDefsFieldDecl
+*ObjCAtDefsFieldDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ IdentifierInfo *Id, QualType T, Expr *BW) {
+ return new (C) ObjCAtDefsFieldDecl(DC, StartLoc, IdLoc, Id, T, BW);
+}
+
+ObjCAtDefsFieldDecl *ObjCAtDefsFieldDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCAtDefsFieldDecl));
+ return new (Mem) ObjCAtDefsFieldDecl(0, SourceLocation(), SourceLocation(),
+ 0, QualType(), 0);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCProtocolDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCProtocolDecl::anchor() { }
+
+ObjCProtocolDecl::ObjCProtocolDecl(DeclContext *DC, IdentifierInfo *Id,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl)
+ : ObjCContainerDecl(ObjCProtocol, DC, Id, nameLoc, atStartLoc), Data()
+{
+ setPreviousDeclaration(PrevDecl);
+ if (PrevDecl)
+ Data = PrevDecl->Data;
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::Create(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ ObjCProtocolDecl *PrevDecl) {
+ ObjCProtocolDecl *Result
+ = new (C) ObjCProtocolDecl(DC, Id, nameLoc, atStartLoc, PrevDecl);
+
+ return Result;
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCProtocolDecl));
+ return new (Mem) ObjCProtocolDecl(0, 0, SourceLocation(), SourceLocation(),
+ 0);
+}
+
+ObjCProtocolDecl *ObjCProtocolDecl::lookupProtocolNamed(IdentifierInfo *Name) {
+ ObjCProtocolDecl *PDecl = this;
+
+ if (Name == getIdentifier())
+ return PDecl;
+
+ for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
+ if ((PDecl = (*I)->lookupProtocolNamed(Name)))
+ return PDecl;
+
+ return NULL;
+}
+
+// lookupMethod - Lookup a instance/class method in the protocol and protocols
+// it inherited.
+ObjCMethodDecl *ObjCProtocolDecl::lookupMethod(Selector Sel,
+ bool isInstance) const {
+ ObjCMethodDecl *MethodDecl = NULL;
+
+ if ((MethodDecl = getMethod(Sel, isInstance)))
+ return MethodDecl;
+
+ for (protocol_iterator I = protocol_begin(), E = protocol_end(); I != E; ++I)
+ if ((MethodDecl = (*I)->lookupMethod(Sel, isInstance)))
+ return MethodDecl;
+ return NULL;
+}
+
+void ObjCProtocolDecl::allocateDefinitionData() {
+ assert(!Data && "Protocol already has a definition!");
+ Data = new (getASTContext()) DefinitionData;
+ Data->Definition = this;
+}
+
+void ObjCProtocolDecl::startDefinition() {
+ allocateDefinitionData();
+
+ // Update all of the declarations with a pointer to the definition.
+ for (redecl_iterator RD = redecls_begin(), RDEnd = redecls_end();
+ RD != RDEnd; ++RD)
+ RD->Data = this->Data;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCategoryDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCategoryDecl::anchor() { }
+
+ObjCCategoryDecl *ObjCCategoryDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation AtLoc,
+ SourceLocation ClassNameLoc,
+ SourceLocation CategoryNameLoc,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *IDecl,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
+ ObjCCategoryDecl *CatDecl = new (C) ObjCCategoryDecl(DC, AtLoc, ClassNameLoc,
+ CategoryNameLoc, Id,
+ IDecl,
+ IvarLBraceLoc, IvarRBraceLoc);
+ if (IDecl) {
+ // Link this category into its class's category list.
+ CatDecl->NextClassCategory = IDecl->getCategoryList();
+ if (IDecl->hasDefinition()) {
+ IDecl->setCategoryList(CatDecl);
+ if (ASTMutationListener *L = C.getASTMutationListener())
+ L->AddedObjCCategoryToInterface(CatDecl, IDecl);
+ }
+ }
+
+ return CatDecl;
+}
+
+ObjCCategoryDecl *ObjCCategoryDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryDecl));
+ return new (Mem) ObjCCategoryDecl(0, SourceLocation(), SourceLocation(),
+ SourceLocation(), 0, 0);
+}
+
+ObjCCategoryImplDecl *ObjCCategoryDecl::getImplementation() const {
+ return getASTContext().getObjCImplementation(
+ const_cast<ObjCCategoryDecl*>(this));
+}
+
+void ObjCCategoryDecl::setImplementation(ObjCCategoryImplDecl *ImplD) {
+ getASTContext().setObjCImplementation(this, ImplD);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ObjCCategoryImplDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCategoryImplDecl::anchor() { }
+
+ObjCCategoryImplDecl *
+ObjCCategoryImplDecl::Create(ASTContext &C, DeclContext *DC,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl *ClassInterface,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ SourceLocation CategoryNameLoc) {
+ if (ClassInterface && ClassInterface->hasDefinition())
+ ClassInterface = ClassInterface->getDefinition();
+ return new (C) ObjCCategoryImplDecl(DC, Id, ClassInterface,
+ nameLoc, atStartLoc, CategoryNameLoc);
+}
+
+ObjCCategoryImplDecl *ObjCCategoryImplDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCategoryImplDecl));
+ return new (Mem) ObjCCategoryImplDecl(0, 0, 0, SourceLocation(),
+ SourceLocation(), SourceLocation());
+}
+
+ObjCCategoryDecl *ObjCCategoryImplDecl::getCategoryDecl() const {
+ // The class interface might be NULL if we are working with invalid code.
+ if (const ObjCInterfaceDecl *ID = getClassInterface())
+ return ID->FindCategoryDeclaration(getIdentifier());
+ return 0;
+}
+
+
+void ObjCImplDecl::anchor() { }
+
+void ObjCImplDecl::addPropertyImplementation(ObjCPropertyImplDecl *property) {
+ // FIXME: The context should be correct before we get here.
+ property->setLexicalDeclContext(this);
+ addDecl(property);
+}
+
+void ObjCImplDecl::setClassInterface(ObjCInterfaceDecl *IFace) {
+ ASTContext &Ctx = getASTContext();
+
+ if (ObjCImplementationDecl *ImplD
+ = dyn_cast_or_null<ObjCImplementationDecl>(this)) {
+ if (IFace)
+ Ctx.setObjCImplementation(IFace, ImplD);
+
+ } else if (ObjCCategoryImplDecl *ImplD =
+ dyn_cast_or_null<ObjCCategoryImplDecl>(this)) {
+ if (ObjCCategoryDecl *CD = IFace->FindCategoryDeclaration(getIdentifier()))
+ Ctx.setObjCImplementation(CD, ImplD);
+ }
+
+ ClassInterface = IFace;
+}
+
+/// FindPropertyImplIvarDecl - This method lookup the ivar in the list of
+/// properties implemented in this category @implementation block and returns
+/// the implemented property that uses it.
+///
+ObjCPropertyImplDecl *ObjCImplDecl::
+FindPropertyImplIvarDecl(IdentifierInfo *ivarId) const {
+ for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyIvarDecl() &&
+ PID->getPropertyIvarDecl()->getIdentifier() == ivarId)
+ return PID;
+ }
+ return 0;
+}
+
+/// FindPropertyImplDecl - This method looks up a previous ObjCPropertyImplDecl
+/// added to the list of those properties @synthesized/@dynamic in this
+/// category @implementation block.
+///
+ObjCPropertyImplDecl *ObjCImplDecl::
+FindPropertyImplDecl(IdentifierInfo *Id) const {
+ for (propimpl_iterator i = propimpl_begin(), e = propimpl_end(); i != e; ++i){
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyDecl()->getIdentifier() == Id)
+ return PID;
+ }
+ return 0;
+}
+
+raw_ostream &clang::operator<<(raw_ostream &OS,
+ const ObjCCategoryImplDecl &CID) {
+ OS << CID.getName();
+ return OS;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCImplementationDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCImplementationDecl::anchor() { }
+
+ObjCImplementationDecl *
+ObjCImplementationDecl::Create(ASTContext &C, DeclContext *DC,
+ ObjCInterfaceDecl *ClassInterface,
+ ObjCInterfaceDecl *SuperDecl,
+ SourceLocation nameLoc,
+ SourceLocation atStartLoc,
+ SourceLocation IvarLBraceLoc,
+ SourceLocation IvarRBraceLoc) {
+ if (ClassInterface && ClassInterface->hasDefinition())
+ ClassInterface = ClassInterface->getDefinition();
+ return new (C) ObjCImplementationDecl(DC, ClassInterface, SuperDecl,
+ nameLoc, atStartLoc,
+ IvarLBraceLoc, IvarRBraceLoc);
+}
+
+ObjCImplementationDecl *
+ObjCImplementationDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCImplementationDecl));
+ return new (Mem) ObjCImplementationDecl(0, 0, 0, SourceLocation(),
+ SourceLocation());
+}
+
+void ObjCImplementationDecl::setIvarInitializers(ASTContext &C,
+ CXXCtorInitializer ** initializers,
+ unsigned numInitializers) {
+ if (numInitializers > 0) {
+ NumIvarInitializers = numInitializers;
+ CXXCtorInitializer **ivarInitializers =
+ new (C) CXXCtorInitializer*[NumIvarInitializers];
+ memcpy(ivarInitializers, initializers,
+ numInitializers * sizeof(CXXCtorInitializer*));
+ IvarInitializers = ivarInitializers;
+ }
+}
+
+raw_ostream &clang::operator<<(raw_ostream &OS,
+ const ObjCImplementationDecl &ID) {
+ OS << ID.getName();
+ return OS;
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCCompatibleAliasDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCCompatibleAliasDecl::anchor() { }
+
+ObjCCompatibleAliasDecl *
+ObjCCompatibleAliasDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id,
+ ObjCInterfaceDecl* AliasedClass) {
+ return new (C) ObjCCompatibleAliasDecl(DC, L, Id, AliasedClass);
+}
+
+ObjCCompatibleAliasDecl *
+ObjCCompatibleAliasDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCCompatibleAliasDecl));
+ return new (Mem) ObjCCompatibleAliasDecl(0, SourceLocation(), 0, 0);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCPropertyDecl
+//===----------------------------------------------------------------------===//
+
+void ObjCPropertyDecl::anchor() { }
+
+ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
+ SourceLocation L,
+ IdentifierInfo *Id,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ TypeSourceInfo *T,
+ PropertyControl propControl) {
+ return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, LParenLoc, T);
+}
+
+ObjCPropertyDecl *ObjCPropertyDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void * Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyDecl));
+ return new (Mem) ObjCPropertyDecl(0, SourceLocation(), 0, SourceLocation(),
+ SourceLocation(),
+ 0);
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCPropertyImplDecl
+//===----------------------------------------------------------------------===//
+
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation atLoc,
+ SourceLocation L,
+ ObjCPropertyDecl *property,
+ Kind PK,
+ ObjCIvarDecl *ivar,
+ SourceLocation ivarLoc) {
+ return new (C) ObjCPropertyImplDecl(DC, atLoc, L, property, PK, ivar,
+ ivarLoc);
+}
+
+ObjCPropertyImplDecl *ObjCPropertyImplDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ObjCPropertyImplDecl));
+ return new (Mem) ObjCPropertyImplDecl(0, SourceLocation(), SourceLocation(),
+ 0, Dynamic, 0, SourceLocation());
+}
+
+SourceRange ObjCPropertyImplDecl::getSourceRange() const {
+ SourceLocation EndLoc = getLocation();
+ if (IvarLoc.isValid())
+ EndLoc = IvarLoc;
+
+ return SourceRange(AtLoc, EndLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
new file mode 100644
index 0000000..74e1c1b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclPrinter.cpp
@@ -0,0 +1,1072 @@
+//===--- DeclPrinter.cpp - Printing implementation for Decl ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Decl::dump method, which pretty print the
+// AST back out to C/Objective-C/C++/Objective-C++ code.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/Module.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ class DeclPrinter : public DeclVisitor<DeclPrinter> {
+ raw_ostream &Out;
+ ASTContext &Context;
+ PrintingPolicy Policy;
+ unsigned Indentation;
+ bool PrintInstantiation;
+
+ raw_ostream& Indent() { return Indent(Indentation); }
+ raw_ostream& Indent(unsigned Indentation);
+ void ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls);
+
+ void Print(AccessSpecifier AS);
+
+ public:
+ DeclPrinter(raw_ostream &Out, ASTContext &Context,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0,
+ bool PrintInstantiation = false)
+ : Out(Out), Context(Context), Policy(Policy), Indentation(Indentation),
+ PrintInstantiation(PrintInstantiation) { }
+
+ void VisitDeclContext(DeclContext *DC, bool Indent = true);
+
+ void VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ void VisitTypedefDecl(TypedefDecl *D);
+ void VisitTypeAliasDecl(TypeAliasDecl *D);
+ void VisitEnumDecl(EnumDecl *D);
+ void VisitRecordDecl(RecordDecl *D);
+ void VisitEnumConstantDecl(EnumConstantDecl *D);
+ void VisitFunctionDecl(FunctionDecl *D);
+ void VisitFieldDecl(FieldDecl *D);
+ void VisitVarDecl(VarDecl *D);
+ void VisitLabelDecl(LabelDecl *D);
+ void VisitParmVarDecl(ParmVarDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitImportDecl(ImportDecl *D);
+ void VisitStaticAssertDecl(StaticAssertDecl *D);
+ void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ void VisitCXXRecordDecl(CXXRecordDecl *D);
+ void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitTemplateDecl(const TemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
+
+ void PrintTemplateParameters(const TemplateParameterList *Params,
+ const TemplateArgumentList *Args);
+ void prettyPrintAttributes(Decl *D);
+ };
+}
+
+void Decl::print(raw_ostream &Out, unsigned Indentation,
+ bool PrintInstantiation) const {
+ print(Out, getASTContext().getPrintingPolicy(), Indentation, PrintInstantiation);
+}
+
+void Decl::print(raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation, bool PrintInstantiation) const {
+ DeclPrinter Printer(Out, getASTContext(), Policy, Indentation, PrintInstantiation);
+ Printer.Visit(const_cast<Decl*>(this));
+}
+
+static QualType GetBaseType(QualType T) {
+ // FIXME: This should be on the Type class!
+ QualType BaseType = T;
+ while (!BaseType->isSpecifierType()) {
+ if (isa<TypedefType>(BaseType))
+ break;
+ else if (const PointerType* PTy = BaseType->getAs<PointerType>())
+ BaseType = PTy->getPointeeType();
+ else if (const ArrayType* ATy = dyn_cast<ArrayType>(BaseType))
+ BaseType = ATy->getElementType();
+ else if (const FunctionType* FTy = BaseType->getAs<FunctionType>())
+ BaseType = FTy->getResultType();
+ else if (const VectorType *VTy = BaseType->getAs<VectorType>())
+ BaseType = VTy->getElementType();
+ else
+ llvm_unreachable("Unknown declarator!");
+ }
+ return BaseType;
+}
+
+static QualType getDeclType(Decl* D) {
+ if (TypedefNameDecl* TDD = dyn_cast<TypedefNameDecl>(D))
+ return TDD->getUnderlyingType();
+ if (ValueDecl* VD = dyn_cast<ValueDecl>(D))
+ return VD->getType();
+ return QualType();
+}
+
+void Decl::printGroup(Decl** Begin, unsigned NumDecls,
+ raw_ostream &Out, const PrintingPolicy &Policy,
+ unsigned Indentation) {
+ if (NumDecls == 1) {
+ (*Begin)->print(Out, Policy, Indentation);
+ return;
+ }
+
+ Decl** End = Begin + NumDecls;
+ TagDecl* TD = dyn_cast<TagDecl>(*Begin);
+ if (TD)
+ ++Begin;
+
+ PrintingPolicy SubPolicy(Policy);
+ if (TD && TD->isCompleteDefinition()) {
+ TD->print(Out, Policy, Indentation);
+ Out << " ";
+ SubPolicy.SuppressTag = true;
+ }
+
+ bool isFirst = true;
+ for ( ; Begin != End; ++Begin) {
+ if (isFirst) {
+ SubPolicy.SuppressSpecifiers = false;
+ isFirst = false;
+ } else {
+ if (!isFirst) Out << ", ";
+ SubPolicy.SuppressSpecifiers = true;
+ }
+
+ (*Begin)->print(Out, SubPolicy, Indentation);
+ }
+}
+
+void DeclContext::dumpDeclContext() const {
+ // Get the translation unit
+ const DeclContext *DC = this;
+ while (!DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
+ DeclPrinter Printer(llvm::errs(), Ctx, Ctx.getPrintingPolicy(), 0);
+ Printer.VisitDeclContext(const_cast<DeclContext *>(this), /*Indent=*/false);
+}
+
+void Decl::dump() const {
+ print(llvm::errs());
+}
+
+raw_ostream& DeclPrinter::Indent(unsigned Indentation) {
+ for (unsigned i = 0; i != Indentation; ++i)
+ Out << " ";
+ return Out;
+}
+
+void DeclPrinter::prettyPrintAttributes(Decl *D) {
+ if (D->hasAttrs()) {
+ AttrVec &Attrs = D->getAttrs();
+ for (AttrVec::const_iterator i=Attrs.begin(), e=Attrs.end(); i!=e; ++i) {
+ Attr *A = *i;
+ A->printPretty(Out, Context);
+ }
+ }
+}
+
+void DeclPrinter::ProcessDeclGroup(SmallVectorImpl<Decl*>& Decls) {
+ this->Indent();
+ Decl::printGroup(Decls.data(), Decls.size(), Out, Policy, Indentation);
+ Out << ";\n";
+ Decls.clear();
+
+}
+
+void DeclPrinter::Print(AccessSpecifier AS) {
+ switch(AS) {
+ case AS_none: llvm_unreachable("No access specifier!");
+ case AS_public: Out << "public"; break;
+ case AS_protected: Out << "protected"; break;
+ case AS_private: Out << "private"; break;
+ }
+}
+
+//----------------------------------------------------------------------------
+// Common C declarations
+//----------------------------------------------------------------------------
+
+void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
+ if (Indent)
+ Indentation += Policy.Indentation;
+
+ SmallVector<Decl*, 2> Decls;
+ for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
+ D != DEnd; ++D) {
+
+ // Don't print ObjCIvarDecls, as they are printed when visiting the
+ // containing ObjCInterfaceDecl.
+ if (isa<ObjCIvarDecl>(*D))
+ continue;
+
+ if (!Policy.Dump) {
+ // Skip over implicit declarations in pretty-printing mode.
+ if (D->isImplicit()) continue;
+ // FIXME: Ugly hack so we don't pretty-print the builtin declaration
+ // of __builtin_va_list or __[u]int128_t. There should be some other way
+ // to check that.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) {
+ if (IdentifierInfo *II = ND->getIdentifier()) {
+ if (II->isStr("__builtin_va_list") ||
+ II->isStr("__int128_t") || II->isStr("__uint128_t"))
+ continue;
+ }
+ }
+ }
+
+ // The next bits of code handles stuff like "struct {int x;} a,b"; we're
+ // forced to merge the declarations because there's no other way to
+ // refer to the struct in question. This limited merging is safe without
+ // a bunch of other checks because it only merges declarations directly
+ // referring to the tag, not typedefs.
+ //
+ // Check whether the current declaration should be grouped with a previous
+ // unnamed struct.
+ QualType CurDeclType = getDeclType(*D);
+ if (!Decls.empty() && !CurDeclType.isNull()) {
+ QualType BaseType = GetBaseType(CurDeclType);
+ if (!BaseType.isNull() && isa<TagType>(BaseType) &&
+ cast<TagType>(BaseType)->getDecl() == Decls[0]) {
+ Decls.push_back(*D);
+ continue;
+ }
+ }
+
+ // If we have a merged group waiting to be handled, handle it now.
+ if (!Decls.empty())
+ ProcessDeclGroup(Decls);
+
+ // If the current declaration is an unnamed tag type, save it
+ // so we can merge it with the subsequent declaration(s) using it.
+ if (isa<TagDecl>(*D) && !cast<TagDecl>(*D)->getIdentifier()) {
+ Decls.push_back(*D);
+ continue;
+ }
+
+ if (isa<AccessSpecDecl>(*D)) {
+ Indentation -= Policy.Indentation;
+ this->Indent();
+ Print(D->getAccess());
+ Out << ":\n";
+ Indentation += Policy.Indentation;
+ continue;
+ }
+
+ this->Indent();
+ Visit(*D);
+
+ // FIXME: Need to be able to tell the DeclPrinter when
+ const char *Terminator = 0;
+ if (isa<FunctionDecl>(*D) &&
+ cast<FunctionDecl>(*D)->isThisDeclarationADefinition())
+ Terminator = 0;
+ else if (isa<ObjCMethodDecl>(*D) && cast<ObjCMethodDecl>(*D)->getBody())
+ Terminator = 0;
+ else if (isa<NamespaceDecl>(*D) || isa<LinkageSpecDecl>(*D) ||
+ isa<ObjCImplementationDecl>(*D) ||
+ isa<ObjCInterfaceDecl>(*D) ||
+ isa<ObjCProtocolDecl>(*D) ||
+ isa<ObjCCategoryImplDecl>(*D) ||
+ isa<ObjCCategoryDecl>(*D))
+ Terminator = 0;
+ else if (isa<EnumConstantDecl>(*D)) {
+ DeclContext::decl_iterator Next = D;
+ ++Next;
+ if (Next != DEnd)
+ Terminator = ",";
+ } else
+ Terminator = ";";
+
+ if (Terminator)
+ Out << Terminator;
+ Out << "\n";
+ }
+
+ if (!Decls.empty())
+ ProcessDeclGroup(Decls);
+
+ if (Indent)
+ Indentation -= Policy.Indentation;
+}
+
+void DeclPrinter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ VisitDeclContext(D, false);
+}
+
+void DeclPrinter::VisitTypedefDecl(TypedefDecl *D) {
+ std::string S = D->getNameAsString();
+ D->getUnderlyingType().getAsStringInternal(S, Policy);
+ if (!Policy.SuppressSpecifiers) {
+ Out << "typedef ";
+
+ if (D->isModulePrivate())
+ Out << "__module_private__ ";
+ }
+ Out << S;
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ Out << "using " << *D << " = " << D->getUnderlyingType().getAsString(Policy);
+}
+
+void DeclPrinter::VisitEnumDecl(EnumDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << "enum ";
+ if (D->isScoped()) {
+ if (D->isScopedUsingClassTag())
+ Out << "class ";
+ else
+ Out << "struct ";
+ }
+ Out << *D;
+
+ if (D->isFixed()) {
+ std::string Underlying;
+ D->getIntegerType().getAsStringInternal(Underlying, Policy);
+ Out << " : " << Underlying;
+ }
+
+ if (D->isCompleteDefinition()) {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << D->getKindName();
+ if (D->getIdentifier())
+ Out << ' ' << *D;
+
+ if (D->isCompleteDefinition()) {
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+}
+
+void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ Out << *D;
+ if (Expr *Init = D->getInitExpr()) {
+ Out << " = ";
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+}
+
+void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
+ if (!Policy.SuppressSpecifiers) {
+ switch (D->getStorageClassAsWritten()) {
+ case SC_None: break;
+ case SC_Extern: Out << "extern "; break;
+ case SC_Static: Out << "static "; break;
+ case SC_PrivateExtern: Out << "__private_extern__ "; break;
+ case SC_Auto: case SC_Register: case SC_OpenCLWorkGroupLocal:
+ llvm_unreachable("invalid for functions");
+ }
+
+ if (D->isInlineSpecified()) Out << "inline ";
+ if (D->isVirtualAsWritten()) Out << "virtual ";
+ if (D->isModulePrivate()) Out << "__module_private__ ";
+ }
+
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressSpecifiers = false;
+ std::string Proto = D->getNameInfo().getAsString();
+
+ QualType Ty = D->getType();
+ while (const ParenType *PT = dyn_cast<ParenType>(Ty)) {
+ Proto = '(' + Proto + ')';
+ Ty = PT->getInnerType();
+ }
+
+ if (isa<FunctionType>(Ty)) {
+ const FunctionType *AFT = Ty->getAs<FunctionType>();
+ const FunctionProtoType *FT = 0;
+ if (D->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(AFT);
+
+ Proto += "(";
+ if (FT) {
+ llvm::raw_string_ostream POut(Proto);
+ DeclPrinter ParamPrinter(POut, Context, SubPolicy, Indentation);
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ if (i) POut << ", ";
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ }
+
+ if (FT->isVariadic()) {
+ if (D->getNumParams()) POut << ", ";
+ POut << "...";
+ }
+ } else if (D->doesThisDeclarationHaveABody() && !D->hasPrototype()) {
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ if (i)
+ Proto += ", ";
+ Proto += D->getParamDecl(i)->getNameAsString();
+ }
+ }
+
+ Proto += ")";
+
+ if (FT && FT->getTypeQuals()) {
+ unsigned TypeQuals = FT->getTypeQuals();
+ if (TypeQuals & Qualifiers::Const)
+ Proto += " const";
+ if (TypeQuals & Qualifiers::Volatile)
+ Proto += " volatile";
+ if (TypeQuals & Qualifiers::Restrict)
+ Proto += " restrict";
+ }
+
+ if (FT && FT->hasDynamicExceptionSpec()) {
+ Proto += " throw(";
+ if (FT->getExceptionSpecType() == EST_MSAny)
+ Proto += "...";
+ else
+ for (unsigned I = 0, N = FT->getNumExceptions(); I != N; ++I) {
+ if (I)
+ Proto += ", ";
+
+ std::string ExceptionType;
+ FT->getExceptionType(I).getAsStringInternal(ExceptionType, SubPolicy);
+ Proto += ExceptionType;
+ }
+ Proto += ")";
+ } else if (FT && isNoexceptExceptionSpec(FT->getExceptionSpecType())) {
+ Proto += " noexcept";
+ if (FT->getExceptionSpecType() == EST_ComputedNoexcept) {
+ Proto += "(";
+ llvm::raw_string_ostream EOut(Proto);
+ FT->getNoexceptExpr()->printPretty(EOut, Context, 0, SubPolicy,
+ Indentation);
+ EOut.flush();
+ Proto += EOut.str();
+ Proto += ")";
+ }
+ }
+
+ if (CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D)) {
+ bool HasInitializerList = false;
+ for (CXXConstructorDecl::init_const_iterator B = CDecl->init_begin(),
+ E = CDecl->init_end();
+ B != E; ++B) {
+ CXXCtorInitializer * BMInitializer = (*B);
+ if (BMInitializer->isInClassMemberInitializer())
+ continue;
+
+ if (!HasInitializerList) {
+ Proto += " : ";
+ Out << Proto;
+ Proto.clear();
+ HasInitializerList = true;
+ } else
+ Out << ", ";
+
+ if (BMInitializer->isAnyMemberInitializer()) {
+ FieldDecl *FD = BMInitializer->getAnyMember();
+ Out << *FD;
+ } else {
+ Out << QualType(BMInitializer->getBaseClass(), 0).getAsString(Policy);
+ }
+
+ Out << "(";
+ if (!BMInitializer->getInit()) {
+ // Nothing to print
+ } else {
+ Expr *Init = BMInitializer->getInit();
+ if (ExprWithCleanups *Tmp = dyn_cast<ExprWithCleanups>(Init))
+ Init = Tmp->getSubExpr();
+
+ Init = Init->IgnoreParens();
+
+ Expr *SimpleInit = 0;
+ Expr **Args = 0;
+ unsigned NumArgs = 0;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ } else if (CXXConstructExpr *Construct
+ = dyn_cast<CXXConstructExpr>(Init)) {
+ Args = Construct->getArgs();
+ NumArgs = Construct->getNumArgs();
+ } else
+ SimpleInit = Init;
+
+ if (SimpleInit)
+ SimpleInit->printPretty(Out, Context, 0, Policy, Indentation);
+ else {
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (isa<CXXDefaultArgExpr>(Args[I]))
+ break;
+
+ if (I)
+ Out << ", ";
+ Args[I]->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+ }
+ }
+ Out << ")";
+ }
+ }
+ else
+ AFT->getResultType().getAsStringInternal(Proto, Policy);
+ } else {
+ Ty.getAsStringInternal(Proto, Policy);
+ }
+
+ Out << Proto;
+ prettyPrintAttributes(D);
+
+ if (D->isPure())
+ Out << " = 0";
+ else if (D->isDeletedAsWritten())
+ Out << " = delete";
+ else if (D->doesThisDeclarationHaveABody()) {
+ if (!D->hasPrototype() && D->getNumParams()) {
+ // This is a K&R function definition, so we need to print the
+ // parameters.
+ Out << '\n';
+ DeclPrinter ParamPrinter(Out, Context, SubPolicy, Indentation);
+ Indentation += Policy.Indentation;
+ for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
+ Indent();
+ ParamPrinter.VisitParmVarDecl(D->getParamDecl(i));
+ Out << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ } else
+ Out << ' ';
+
+ D->getBody()->printPretty(Out, Context, 0, SubPolicy, Indentation);
+ Out << '\n';
+ }
+}
+
+void DeclPrinter::VisitFieldDecl(FieldDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isMutable())
+ Out << "mutable ";
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+
+ std::string Name = D->getNameAsString();
+ D->getType().getAsStringInternal(Name, Policy);
+ Out << Name;
+
+ if (D->isBitField()) {
+ Out << " : ";
+ D->getBitWidth()->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+
+ Expr *Init = D->getInClassInitializer();
+ if (!Policy.SuppressInitializers && Init) {
+ Out << " = ";
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitLabelDecl(LabelDecl *D) {
+ Out << *D << ":";
+}
+
+
+void DeclPrinter::VisitVarDecl(VarDecl *D) {
+ StorageClass SCAsWritten = D->getStorageClassAsWritten();
+ if (!Policy.SuppressSpecifiers && SCAsWritten != SC_None)
+ Out << VarDecl::getStorageClassSpecifierString(SCAsWritten) << " ";
+
+ if (!Policy.SuppressSpecifiers && D->isThreadSpecified())
+ Out << "__thread ";
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+
+ std::string Name = D->getNameAsString();
+ QualType T = D->getType();
+ if (ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D))
+ T = Parm->getOriginalType();
+ T.getAsStringInternal(Name, Policy);
+ Out << Name;
+ Expr *Init = D->getInit();
+ if (!Policy.SuppressInitializers && Init) {
+ bool ImplicitInit = false;
+ if (CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
+ ImplicitInit = D->getInitStyle() == VarDecl::CallInit &&
+ Construct->getNumArgs() == 0 && !Construct->isListInitialization();
+ if (!ImplicitInit) {
+ if (D->getInitStyle() == VarDecl::CallInit)
+ Out << "(";
+ else if (D->getInitStyle() == VarDecl::CInit) {
+ Out << " = ";
+ }
+ Init->printPretty(Out, Context, 0, Policy, Indentation);
+ if (D->getInitStyle() == VarDecl::CallInit)
+ Out << ")";
+ }
+ }
+ prettyPrintAttributes(D);
+}
+
+void DeclPrinter::VisitParmVarDecl(ParmVarDecl *D) {
+ VisitVarDecl(D);
+}
+
+void DeclPrinter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
+ Out << "__asm (";
+ D->getAsmString()->printPretty(Out, Context, 0, Policy, Indentation);
+ Out << ")";
+}
+
+void DeclPrinter::VisitImportDecl(ImportDecl *D) {
+ Out << "@__experimental_modules_import " << D->getImportedModule()->getFullModuleName()
+ << ";\n";
+}
+
+void DeclPrinter::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ Out << "static_assert(";
+ D->getAssertExpr()->printPretty(Out, Context, 0, Policy, Indentation);
+ Out << ", ";
+ D->getMessage()->printPretty(Out, Context, 0, Policy, Indentation);
+ Out << ")";
+}
+
+//----------------------------------------------------------------------------
+// C++ declarations
+//----------------------------------------------------------------------------
+void DeclPrinter::VisitNamespaceDecl(NamespaceDecl *D) {
+ Out << "namespace " << *D << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+}
+
+void DeclPrinter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ Out << "using namespace ";
+ if (D->getQualifier())
+ D->getQualifier()->print(Out, Policy);
+ Out << *D->getNominatedNamespaceAsWritten();
+}
+
+void DeclPrinter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ Out << "namespace " << *D << " = ";
+ if (D->getQualifier())
+ D->getQualifier()->print(Out, Policy);
+ Out << *D->getAliasedNamespace();
+}
+
+void DeclPrinter::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ if (!Policy.SuppressSpecifiers && D->isModulePrivate())
+ Out << "__module_private__ ";
+ Out << D->getKindName();
+ if (D->getIdentifier())
+ Out << ' ' << *D;
+
+ if (D->isCompleteDefinition()) {
+ // Print the base classes
+ if (D->getNumBases()) {
+ Out << " : ";
+ for (CXXRecordDecl::base_class_iterator Base = D->bases_begin(),
+ BaseEnd = D->bases_end(); Base != BaseEnd; ++Base) {
+ if (Base != D->bases_begin())
+ Out << ", ";
+
+ if (Base->isVirtual())
+ Out << "virtual ";
+
+ AccessSpecifier AS = Base->getAccessSpecifierAsWritten();
+ if (AS != AS_none)
+ Print(AS);
+ Out << " " << Base->getType().getAsString(Policy);
+
+ if (Base->isPackExpansion())
+ Out << "...";
+ }
+ }
+
+ // Print the class definition
+ // FIXME: Doesn't print access specifiers, e.g., "public:"
+ Out << " {\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ }
+}
+
+void DeclPrinter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ const char *l;
+ if (D->getLanguage() == LinkageSpecDecl::lang_c)
+ l = "C";
+ else {
+ assert(D->getLanguage() == LinkageSpecDecl::lang_cxx &&
+ "unknown language in linkage specification");
+ l = "C++";
+ }
+
+ Out << "extern \"" << l << "\" ";
+ if (D->hasBraces()) {
+ Out << "{\n";
+ VisitDeclContext(D);
+ Indent() << "}";
+ } else
+ Visit(*D->decls_begin());
+}
+
+void DeclPrinter::PrintTemplateParameters(
+ const TemplateParameterList *Params, const TemplateArgumentList *Args = 0) {
+ assert(Params);
+ assert(!Args || Params->size() == Args->size());
+
+ Out << "template <";
+
+ for (unsigned i = 0, e = Params->size(); i != e; ++i) {
+ if (i != 0)
+ Out << ", ";
+
+ const Decl *Param = Params->getParam(i);
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(Param)) {
+
+ if (TTP->wasDeclaredWithTypename())
+ Out << "typename ";
+ else
+ Out << "class ";
+
+ if (TTP->isParameterPack())
+ Out << "... ";
+
+ Out << *TTP;
+
+ if (Args) {
+ Out << " = ";
+ Args->get(i).print(Policy, Out);
+ } else if (TTP->hasDefaultArgument()) {
+ Out << " = ";
+ Out << TTP->getDefaultArgument().getAsString(Policy);
+ };
+ } else if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ Out << NTTP->getType().getAsString(Policy);
+
+ if (NTTP->isParameterPack() && !isa<PackExpansionType>(NTTP->getType()))
+ Out << "...";
+
+ if (IdentifierInfo *Name = NTTP->getIdentifier()) {
+ Out << ' ';
+ Out << Name->getName();
+ }
+
+ if (Args) {
+ Out << " = ";
+ Args->get(i).print(Policy, Out);
+ } else if (NTTP->hasDefaultArgument()) {
+ Out << " = ";
+ NTTP->getDefaultArgument()->printPretty(Out, Context, 0, Policy,
+ Indentation);
+ }
+ } else if (const TemplateTemplateParmDecl *TTPD =
+ dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ VisitTemplateDecl(TTPD);
+ // FIXME: print the default argument, if present.
+ }
+ }
+
+ Out << "> ";
+}
+
+void DeclPrinter::VisitTemplateDecl(const TemplateDecl *D) {
+ PrintTemplateParameters(D->getTemplateParameters());
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(D)) {
+ Out << "class ";
+ if (TTP->isParameterPack())
+ Out << "...";
+ Out << D->getName();
+ } else {
+ Visit(D->getTemplatedDecl());
+ }
+}
+
+void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ if (PrintInstantiation) {
+ TemplateParameterList *Params = D->getTemplateParameters();
+ for (FunctionTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end();
+ I != E; ++I) {
+ PrintTemplateParameters(Params, (*I)->getTemplateSpecializationArgs());
+ Visit(*I);
+ }
+ }
+
+ return VisitRedeclarableTemplateDecl(D);
+}
+
+void DeclPrinter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ if (PrintInstantiation) {
+ TemplateParameterList *Params = D->getTemplateParameters();
+ for (ClassTemplateDecl::spec_iterator I = D->spec_begin(), E = D->spec_end();
+ I != E; ++I) {
+ PrintTemplateParameters(Params, &(*I)->getTemplateArgs());
+ Visit(*I);
+ Out << '\n';
+ }
+ }
+
+ return VisitRedeclarableTemplateDecl(D);
+}
+
+//----------------------------------------------------------------------------
+// Objective-C declarations
+//----------------------------------------------------------------------------
+
+void DeclPrinter::VisitObjCMethodDecl(ObjCMethodDecl *OMD) {
+ if (OMD->isInstanceMethod())
+ Out << "- ";
+ else
+ Out << "+ ";
+ if (!OMD->getResultType().isNull())
+ Out << '(' << OMD->getResultType().getAsString(Policy) << ")";
+
+ std::string name = OMD->getSelector().getAsString();
+ std::string::size_type pos, lastPos = 0;
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ // FIXME: selector is missing here!
+ pos = name.find_first_of(':', lastPos);
+ Out << " " << name.substr(lastPos, pos - lastPos);
+ Out << ":(" << (*PI)->getType().getAsString(Policy) << ')' << **PI;
+ lastPos = pos + 1;
+ }
+
+ if (OMD->param_begin() == OMD->param_end())
+ Out << " " << name;
+
+ if (OMD->isVariadic())
+ Out << ", ...";
+
+ if (OMD->getBody()) {
+ Out << ' ';
+ OMD->getBody()->printPretty(Out, Context, 0, Policy);
+ Out << '\n';
+ }
+}
+
+void DeclPrinter::VisitObjCImplementationDecl(ObjCImplementationDecl *OID) {
+ std::string I = OID->getNameAsString();
+ ObjCInterfaceDecl *SID = OID->getSuperClass();
+
+ if (SID)
+ Out << "@implementation " << I << " : " << *SID;
+ else
+ Out << "@implementation " << I;
+ Out << "\n";
+ VisitDeclContext(OID, false);
+ Out << "@end";
+}
+
+void DeclPrinter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *OID) {
+ std::string I = OID->getNameAsString();
+ ObjCInterfaceDecl *SID = OID->getSuperClass();
+
+ if (!OID->isThisDeclarationADefinition()) {
+ Out << "@class " << I << ";";
+ return;
+ }
+
+ if (SID)
+ Out << "@interface " << I << " : " << *SID;
+ else
+ Out << "@interface " << I;
+
+ // Protocols?
+ const ObjCList<ObjCProtocolDecl> &Protocols = OID->getReferencedProtocols();
+ if (!Protocols.empty()) {
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end(); I != E; ++I)
+ Out << (I == Protocols.begin() ? '<' : ',') << **I;
+ }
+
+ if (!Protocols.empty())
+ Out << "> ";
+
+ if (OID->ivar_size() > 0) {
+ Out << "{\n";
+ Indentation += Policy.Indentation;
+ for (ObjCInterfaceDecl::ivar_iterator I = OID->ivar_begin(),
+ E = OID->ivar_end(); I != E; ++I) {
+ Indent() << (*I)->getType().getAsString(Policy) << ' ' << **I << ";\n";
+ }
+ Indentation -= Policy.Indentation;
+ Out << "}\n";
+ }
+
+ VisitDeclContext(OID, false);
+ Out << "@end";
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCProtocolDecl(ObjCProtocolDecl *PID) {
+ if (!PID->isThisDeclarationADefinition()) {
+ Out << "@protocol " << PID->getIdentifier() << ";\n";
+ return;
+ }
+
+ Out << "@protocol " << *PID << '\n';
+ VisitDeclContext(PID, false);
+ Out << "@end";
+}
+
+void DeclPrinter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *PID) {
+ Out << "@implementation " << *PID->getClassInterface() << '(' << *PID <<")\n";
+
+ VisitDeclContext(PID, false);
+ Out << "@end";
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCCategoryDecl(ObjCCategoryDecl *PID) {
+ Out << "@interface " << *PID->getClassInterface() << '(' << *PID << ")\n";
+ VisitDeclContext(PID, false);
+ Out << "@end";
+
+ // FIXME: implement the rest...
+}
+
+void DeclPrinter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *AID) {
+ Out << "@compatibility_alias " << *AID
+ << ' ' << *AID->getClassInterface() << ";\n";
+}
+
+/// PrintObjCPropertyDecl - print a property declaration.
+///
+void DeclPrinter::VisitObjCPropertyDecl(ObjCPropertyDecl *PDecl) {
+ if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Required)
+ Out << "@required\n";
+ else if (PDecl->getPropertyImplementation() == ObjCPropertyDecl::Optional)
+ Out << "@optional\n";
+
+ Out << "@property";
+ if (PDecl->getPropertyAttributes() != ObjCPropertyDecl::OBJC_PR_noattr) {
+ bool first = true;
+ Out << " (";
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readonly) {
+ Out << (first ? ' ' : ',') << "readonly";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_getter) {
+ Out << (first ? ' ' : ',') << "getter = "
+ << PDecl->getGetterName().getAsString();
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_setter) {
+ Out << (first ? ' ' : ',') << "setter = "
+ << PDecl->getSetterName().getAsString();
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_assign) {
+ Out << (first ? ' ' : ',') << "assign";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_readwrite) {
+ Out << (first ? ' ' : ',') << "readwrite";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_retain) {
+ Out << (first ? ' ' : ',') << "retain";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_strong) {
+ Out << (first ? ' ' : ',') << "strong";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_copy) {
+ Out << (first ? ' ' : ',') << "copy";
+ first = false;
+ }
+
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_nonatomic) {
+ Out << (first ? ' ' : ',') << "nonatomic";
+ first = false;
+ }
+ if (PDecl->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_atomic) {
+ Out << (first ? ' ' : ',') << "atomic";
+ first = false;
+ }
+
+ (void) first; // Silence dead store warning due to idiomatic code.
+ Out << " )";
+ }
+ Out << ' ' << PDecl->getType().getAsString(Policy) << ' ' << *PDecl;
+}
+
+void DeclPrinter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *PID) {
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize)
+ Out << "@synthesize ";
+ else
+ Out << "@dynamic ";
+ Out << *PID->getPropertyDecl();
+ if (PID->getPropertyIvarDecl())
+ Out << '=' << *PID->getPropertyIvarDecl();
+}
+
+void DeclPrinter::VisitUsingDecl(UsingDecl *D) {
+ Out << "using ";
+ D->getQualifier()->print(Out, Policy);
+ Out << *D;
+}
+
+void
+DeclPrinter::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
+ Out << "using typename ";
+ D->getQualifier()->print(Out, Policy);
+ Out << D->getDeclName();
+}
+
+void DeclPrinter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ Out << "using ";
+ D->getQualifier()->print(Out, Policy);
+ Out << D->getDeclName();
+}
+
+void DeclPrinter::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ // ignore
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
new file mode 100644
index 0000000..4590195
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclTemplate.cpp
@@ -0,0 +1,872 @@
+//===--- DeclTemplate.cpp - Template Declaration AST Node Implementation --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ related Decl classes for templates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/STLExtras.h"
+#include <memory>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// TemplateParameterList Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateParameterList::TemplateParameterList(SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ NamedDecl **Params, unsigned NumParams,
+ SourceLocation RAngleLoc)
+ : TemplateLoc(TemplateLoc), LAngleLoc(LAngleLoc), RAngleLoc(RAngleLoc),
+ NumParams(NumParams) {
+ for (unsigned Idx = 0; Idx < NumParams; ++Idx)
+ begin()[Idx] = Params[Idx];
+}
+
+TemplateParameterList *
+TemplateParameterList::Create(const ASTContext &C, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc, NamedDecl **Params,
+ unsigned NumParams, SourceLocation RAngleLoc) {
+ unsigned Size = sizeof(TemplateParameterList)
+ + sizeof(NamedDecl *) * NumParams;
+ unsigned Align = llvm::AlignOf<TemplateParameterList>::Alignment;
+ void *Mem = C.Allocate(Size, Align);
+ return new (Mem) TemplateParameterList(TemplateLoc, LAngleLoc, Params,
+ NumParams, RAngleLoc);
+}
+
+unsigned TemplateParameterList::getMinRequiredArguments() const {
+ unsigned NumRequiredArgs = 0;
+ for (iterator P = const_cast<TemplateParameterList *>(this)->begin(),
+ PEnd = const_cast<TemplateParameterList *>(this)->end();
+ P != PEnd; ++P) {
+ if ((*P)->isTemplateParameterPack()) {
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P))
+ if (NTTP->isExpandedParameterPack()) {
+ NumRequiredArgs += NTTP->getNumExpansionTypes();
+ continue;
+ }
+
+ break;
+ }
+
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ if (TTP->hasDefaultArgument())
+ break;
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->hasDefaultArgument())
+ break;
+ } else if (cast<TemplateTemplateParmDecl>(*P)->hasDefaultArgument())
+ break;
+
+ ++NumRequiredArgs;
+ }
+
+ return NumRequiredArgs;
+}
+
+unsigned TemplateParameterList::getDepth() const {
+ if (size() == 0)
+ return 0;
+
+ const NamedDecl *FirstParm = getParam(0);
+ if (const TemplateTypeParmDecl *TTP
+ = dyn_cast<TemplateTypeParmDecl>(FirstParm))
+ return TTP->getDepth();
+ else if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(FirstParm))
+ return NTTP->getDepth();
+ else
+ return cast<TemplateTemplateParmDecl>(FirstParm)->getDepth();
+}
+
+static void AdoptTemplateParameterList(TemplateParameterList *Params,
+ DeclContext *Owner) {
+ for (TemplateParameterList::iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ (*P)->setDeclContext(Owner);
+
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(*P))
+ AdoptTemplateParameterList(TTP->getTemplateParameters(), Owner);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// RedeclarableTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+RedeclarableTemplateDecl::CommonBase *RedeclarableTemplateDecl::getCommonPtr() {
+ if (!Common) {
+ // Walk the previous-declaration chain until we either find a declaration
+ // with a common pointer or we run out of previous declarations.
+ llvm::SmallVector<RedeclarableTemplateDecl *, 2> PrevDecls;
+ for (RedeclarableTemplateDecl *Prev = getPreviousDecl(); Prev;
+ Prev = Prev->getPreviousDecl()) {
+ if (Prev->Common) {
+ Common = Prev->Common;
+ break;
+ }
+
+ PrevDecls.push_back(Prev);
+ }
+
+ // If we never found a common pointer, allocate one now.
+ if (!Common) {
+ // FIXME: If any of the declarations is from an AST file, we probably
+ // need an update record to add the common data.
+
+ Common = newCommon(getASTContext());
+ }
+
+ // Update any previous declarations we saw with the common pointer.
+ for (unsigned I = 0, N = PrevDecls.size(); I != N; ++I)
+ PrevDecls[I]->Common = Common;
+ }
+
+ return Common;
+}
+
+template <class EntryType>
+typename RedeclarableTemplateDecl::SpecEntryTraits<EntryType>::DeclType*
+RedeclarableTemplateDecl::findSpecializationImpl(
+ llvm::FoldingSet<EntryType> &Specs,
+ const TemplateArgument *Args, unsigned NumArgs,
+ void *&InsertPos) {
+ typedef SpecEntryTraits<EntryType> SETraits;
+ llvm::FoldingSetNodeID ID;
+ EntryType::Profile(ID,Args,NumArgs, getASTContext());
+ EntryType *Entry = Specs.FindNodeOrInsertPos(ID, InsertPos);
+ return Entry ? SETraits::getMostRecentDecl(Entry) : 0;
+}
+
+/// \brief Generate the injected template arguments for the given template
+/// parameter list, e.g., for the injected-class-name of a class template.
+static void GenerateInjectedTemplateArgs(ASTContext &Context,
+ TemplateParameterList *Params,
+ TemplateArgument *Args) {
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; ++Param) {
+ TemplateArgument Arg;
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
+ QualType ArgType = Context.getTypeDeclType(TTP);
+ if (TTP->isParameterPack())
+ ArgType = Context.getPackExpansionType(ArgType,
+ llvm::Optional<unsigned>());
+
+ Arg = TemplateArgument(ArgType);
+ } else if (NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ Expr *E = new (Context) DeclRefExpr(NTTP, /*enclosing*/ false,
+ NTTP->getType().getNonLValueExprType(Context),
+ Expr::getValueKindForType(NTTP->getType()),
+ NTTP->getLocation());
+
+ if (NTTP->isParameterPack())
+ E = new (Context) PackExpansionExpr(Context.DependentTy, E,
+ NTTP->getLocation(),
+ llvm::Optional<unsigned>());
+ Arg = TemplateArgument(E);
+ } else {
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*Param);
+ if (TTP->isParameterPack())
+ Arg = TemplateArgument(TemplateName(TTP), llvm::Optional<unsigned>());
+ else
+ Arg = TemplateArgument(TemplateName(TTP));
+ }
+
+ if ((*Param)->isTemplateParameterPack())
+ Arg = TemplateArgument::CreatePackCopy(Context, &Arg, 1);
+
+ *Args++ = Arg;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// FunctionTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FunctionTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
+FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl) {
+ AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
+ return new (C) FunctionTemplateDecl(DC, L, Name, Params, Decl);
+}
+
+FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FunctionTemplateDecl));
+ return new (Mem) FunctionTemplateDecl(0, SourceLocation(), DeclarationName(),
+ 0, 0);
+}
+
+RedeclarableTemplateDecl::CommonBase *
+FunctionTemplateDecl::newCommon(ASTContext &C) {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+FunctionDecl *
+FunctionTemplateDecl::findSpecialization(const TemplateArgument *Args,
+ unsigned NumArgs, void *&InsertPos) {
+ return findSpecializationImpl(getSpecializations(), Args, NumArgs, InsertPos);
+}
+
+void FunctionTemplateDecl::addSpecialization(
+ FunctionTemplateSpecializationInfo *Info, void *InsertPos) {
+ if (InsertPos)
+ getSpecializations().InsertNode(Info, InsertPos);
+ else
+ getSpecializations().GetOrInsertNode(Info);
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, Info->Function);
+}
+
+std::pair<const TemplateArgument *, unsigned>
+FunctionTemplateDecl::getInjectedTemplateArgs() {
+ TemplateParameterList *Params = getTemplateParameters();
+ Common *CommonPtr = getCommonPtr();
+ if (!CommonPtr->InjectedArgs) {
+ CommonPtr->InjectedArgs
+ = new (getASTContext()) TemplateArgument [Params->size()];
+ GenerateInjectedTemplateArgs(getASTContext(), Params,
+ CommonPtr->InjectedArgs);
+ }
+
+ return std::make_pair(CommonPtr->InjectedArgs, Params->size());
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void ClassTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
+ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl,
+ ClassTemplateDecl *PrevDecl) {
+ AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
+ ClassTemplateDecl *New = new (C) ClassTemplateDecl(DC, L, Name, Params, Decl);
+ New->setPreviousDeclaration(PrevDecl);
+ return New;
+}
+
+ClassTemplateDecl *ClassTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(ClassTemplateDecl));
+ return new (Mem) ClassTemplateDecl(EmptyShell());
+}
+
+void ClassTemplateDecl::LoadLazySpecializations() {
+ Common *CommonPtr = getCommonPtr();
+ if (CommonPtr->LazySpecializations) {
+ ASTContext &Context = getASTContext();
+ uint32_t *Specs = CommonPtr->LazySpecializations;
+ CommonPtr->LazySpecializations = 0;
+ for (uint32_t I = 0, N = *Specs++; I != N; ++I)
+ (void)Context.getExternalSource()->GetExternalDecl(Specs[I]);
+ }
+}
+
+llvm::FoldingSet<ClassTemplateSpecializationDecl> &
+ClassTemplateDecl::getSpecializations() {
+ LoadLazySpecializations();
+ return getCommonPtr()->Specializations;
+}
+
+llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &
+ClassTemplateDecl::getPartialSpecializations() {
+ LoadLazySpecializations();
+ return getCommonPtr()->PartialSpecializations;
+}
+
+RedeclarableTemplateDecl::CommonBase *
+ClassTemplateDecl::newCommon(ASTContext &C) {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateDecl::findSpecialization(const TemplateArgument *Args,
+ unsigned NumArgs, void *&InsertPos) {
+ return findSpecializationImpl(getSpecializations(), Args, NumArgs, InsertPos);
+}
+
+void ClassTemplateDecl::AddSpecialization(ClassTemplateSpecializationDecl *D,
+ void *InsertPos) {
+ if (InsertPos)
+ getSpecializations().InsertNode(D, InsertPos);
+ else {
+ ClassTemplateSpecializationDecl *Existing
+ = getSpecializations().GetOrInsertNode(D);
+ (void)Existing;
+ assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
+ }
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, D);
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecialization(const TemplateArgument *Args,
+ unsigned NumArgs,
+ void *&InsertPos) {
+ return findSpecializationImpl(getPartialSpecializations(), Args, NumArgs,
+ InsertPos);
+}
+
+void ClassTemplateDecl::AddPartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *D,
+ void *InsertPos) {
+ if (InsertPos)
+ getPartialSpecializations().InsertNode(D, InsertPos);
+ else {
+ ClassTemplatePartialSpecializationDecl *Existing
+ = getPartialSpecializations().GetOrInsertNode(D);
+ (void)Existing;
+ assert(Existing->isCanonicalDecl() && "Non-canonical specialization?");
+ }
+
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->AddedCXXTemplateSpecialization(this, D);
+}
+
+void ClassTemplateDecl::getPartialSpecializations(
+ SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
+ llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &PartialSpecs
+ = getPartialSpecializations();
+ PS.clear();
+ PS.resize(PartialSpecs.size());
+ for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ P = PartialSpecs.begin(), PEnd = PartialSpecs.end();
+ P != PEnd; ++P) {
+ assert(!PS[P->getSequenceNumber()]);
+ PS[P->getSequenceNumber()] = P->getMostRecentDecl();
+ }
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecialization(QualType T) {
+ ASTContext &Context = getASTContext();
+ typedef llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ partial_spec_iterator;
+ for (partial_spec_iterator P = getPartialSpecializations().begin(),
+ PEnd = getPartialSpecializations().end();
+ P != PEnd; ++P) {
+ if (Context.hasSameType(P->getInjectedSpecializationType(), T))
+ return P->getMostRecentDecl();
+ }
+
+ return 0;
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplateDecl::findPartialSpecInstantiatedFromMember(
+ ClassTemplatePartialSpecializationDecl *D) {
+ Decl *DCanon = D->getCanonicalDecl();
+ for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
+ P = getPartialSpecializations().begin(),
+ PEnd = getPartialSpecializations().end();
+ P != PEnd; ++P) {
+ if (P->getInstantiatedFromMember()->getCanonicalDecl() == DCanon)
+ return P->getMostRecentDecl();
+ }
+
+ return 0;
+}
+
+QualType
+ClassTemplateDecl::getInjectedClassNameSpecialization() {
+ Common *CommonPtr = getCommonPtr();
+ if (!CommonPtr->InjectedClassNameType.isNull())
+ return CommonPtr->InjectedClassNameType;
+
+ // C++0x [temp.dep.type]p2:
+ // The template argument list of a primary template is a template argument
+ // list in which the nth template argument has the value of the nth template
+ // parameter of the class template. If the nth template parameter is a
+ // template parameter pack (14.5.3), the nth template argument is a pack
+ // expansion (14.5.3) whose pattern is the name of the template parameter
+ // pack.
+ ASTContext &Context = getASTContext();
+ TemplateParameterList *Params = getTemplateParameters();
+ SmallVector<TemplateArgument, 16> TemplateArgs;
+ TemplateArgs.resize(Params->size());
+ GenerateInjectedTemplateArgs(getASTContext(), Params, TemplateArgs.data());
+ CommonPtr->InjectedClassNameType
+ = Context.getTemplateSpecializationType(TemplateName(this),
+ &TemplateArgs[0],
+ TemplateArgs.size());
+ return CommonPtr->InjectedClassNameType;
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateTypeParm Allocation/Deallocation Method Implementations
+//===----------------------------------------------------------------------===//
+
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation KeyLoc, SourceLocation NameLoc,
+ unsigned D, unsigned P, IdentifierInfo *Id,
+ bool Typename, bool ParameterPack) {
+ TemplateTypeParmDecl *TTPDecl =
+ new (C) TemplateTypeParmDecl(DC, KeyLoc, NameLoc, Id, Typename);
+ QualType TTPType = C.getTemplateTypeParmType(D, P, ParameterPack, TTPDecl);
+ TTPDecl->TypeForDecl = TTPType.getTypePtr();
+ return TTPDecl;
+}
+
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTypeParmDecl));
+ return new (Mem) TemplateTypeParmDecl(0, SourceLocation(), SourceLocation(),
+ 0, false);
+}
+
+SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
+ return hasDefaultArgument()
+ ? DefaultArgument->getTypeLoc().getBeginLoc()
+ : SourceLocation();
+}
+
+SourceRange TemplateTypeParmDecl::getSourceRange() const {
+ if (hasDefaultArgument() && !defaultArgumentWasInherited())
+ return SourceRange(getLocStart(),
+ DefaultArgument->getTypeLoc().getEndLoc());
+ else
+ return TypeDecl::getSourceRange();
+}
+
+unsigned TemplateTypeParmDecl::getDepth() const {
+ return TypeForDecl->getAs<TemplateTypeParmType>()->getDepth();
+}
+
+unsigned TemplateTypeParmDecl::getIndex() const {
+ return TypeForDecl->getAs<TemplateTypeParmType>()->getIndex();
+}
+
+bool TemplateTypeParmDecl::isParameterPack() const {
+ return TypeForDecl->getAs<TemplateTypeParmType>()->isParameterPack();
+}
+
+//===----------------------------------------------------------------------===//
+// NonTypeTemplateParmDecl Method Implementations
+//===----------------------------------------------------------------------===//
+
+NonTypeTemplateParmDecl::NonTypeTemplateParmDecl(DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ unsigned D, unsigned P,
+ IdentifierInfo *Id,
+ QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos)
+ : DeclaratorDecl(NonTypeTemplateParm, DC, IdLoc, Id, T, TInfo, StartLoc),
+ TemplateParmPosition(D, P), DefaultArgumentAndInherited(0, false),
+ ParameterPack(true), ExpandedParameterPack(true),
+ NumExpandedTypes(NumExpandedTypes)
+{
+ if (ExpandedTypes && ExpandedTInfos) {
+ void **TypesAndInfos = reinterpret_cast<void **>(this + 1);
+ for (unsigned I = 0; I != NumExpandedTypes; ++I) {
+ TypesAndInfos[2*I] = ExpandedTypes[I].getAsOpaquePtr();
+ TypesAndInfos[2*I + 1] = ExpandedTInfos[I];
+ }
+ }
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ unsigned D, unsigned P, IdentifierInfo *Id,
+ QualType T, bool ParameterPack,
+ TypeSourceInfo *TInfo) {
+ return new (C) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc, D, P, Id,
+ T, ParameterPack, TInfo);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ unsigned D, unsigned P,
+ IdentifierInfo *Id, QualType T,
+ TypeSourceInfo *TInfo,
+ const QualType *ExpandedTypes,
+ unsigned NumExpandedTypes,
+ TypeSourceInfo **ExpandedTInfos) {
+ unsigned Size = sizeof(NonTypeTemplateParmDecl)
+ + NumExpandedTypes * 2 * sizeof(void*);
+ void *Mem = C.Allocate(Size);
+ return new (Mem) NonTypeTemplateParmDecl(DC, StartLoc, IdLoc,
+ D, P, Id, T, TInfo,
+ ExpandedTypes, NumExpandedTypes,
+ ExpandedTInfos);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(NonTypeTemplateParmDecl));
+ return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(),
+ SourceLocation(), 0, 0, 0,
+ QualType(), false, 0);
+}
+
+NonTypeTemplateParmDecl *
+NonTypeTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID,
+ unsigned NumExpandedTypes) {
+ unsigned Size = sizeof(NonTypeTemplateParmDecl)
+ + NumExpandedTypes * 2 * sizeof(void*);
+
+ void *Mem = AllocateDeserializedDecl(C, ID, Size);
+ return new (Mem) NonTypeTemplateParmDecl(0, SourceLocation(),
+ SourceLocation(), 0, 0, 0,
+ QualType(), 0, 0, NumExpandedTypes,
+ 0);
+}
+
+SourceRange NonTypeTemplateParmDecl::getSourceRange() const {
+ if (hasDefaultArgument() && !defaultArgumentWasInherited())
+ return SourceRange(getOuterLocStart(),
+ getDefaultArgument()->getSourceRange().getEnd());
+ return DeclaratorDecl::getSourceRange();
+}
+
+SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
+ return hasDefaultArgument()
+ ? getDefaultArgument()->getSourceRange().getBegin()
+ : SourceLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateTemplateParmDecl Method Implementations
+//===----------------------------------------------------------------------===//
+
+void TemplateTemplateParmDecl::anchor() { }
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::Create(const ASTContext &C, DeclContext *DC,
+ SourceLocation L, unsigned D, unsigned P,
+ bool ParameterPack, IdentifierInfo *Id,
+ TemplateParameterList *Params) {
+ return new (C) TemplateTemplateParmDecl(DC, L, D, P, ParameterPack, Id,
+ Params);
+}
+
+TemplateTemplateParmDecl *
+TemplateTemplateParmDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TemplateTemplateParmDecl));
+ return new (Mem) TemplateTemplateParmDecl(0, SourceLocation(), 0, 0, false,
+ 0, 0);
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgumentList Implementation
+//===----------------------------------------------------------------------===//
+TemplateArgumentList *
+TemplateArgumentList::CreateCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ std::size_t Size = sizeof(TemplateArgumentList)
+ + NumArgs * sizeof(TemplateArgument);
+ void *Mem = Context.Allocate(Size);
+ TemplateArgument *StoredArgs
+ = reinterpret_cast<TemplateArgument *>(
+ static_cast<TemplateArgumentList *>(Mem) + 1);
+ std::uninitialized_copy(Args, Args + NumArgs, StoredArgs);
+ return new (Mem) TemplateArgumentList(StoredArgs, NumArgs, true);
+}
+
+FunctionTemplateSpecializationInfo *
+FunctionTemplateSpecializationInfo::Create(ASTContext &C, FunctionDecl *FD,
+ FunctionTemplateDecl *Template,
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentList *TemplateArgs,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation POI) {
+ const ASTTemplateArgumentListInfo *ArgsAsWritten = 0;
+ if (TemplateArgsAsWritten)
+ ArgsAsWritten = ASTTemplateArgumentListInfo::Create(C,
+ *TemplateArgsAsWritten);
+
+ return new (C) FunctionTemplateSpecializationInfo(FD, Template, TSK,
+ TemplateArgs,
+ ArgsAsWritten,
+ POI);
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void TemplateDecl::anchor() { }
+
+//===----------------------------------------------------------------------===//
+// ClassTemplateSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+ClassTemplateSpecializationDecl::
+ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
+ DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ ClassTemplateSpecializationDecl *PrevDecl)
+ : CXXRecordDecl(DK, TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate->getIdentifier(),
+ PrevDecl),
+ SpecializedTemplate(SpecializedTemplate),
+ ExplicitInfo(0),
+ TemplateArgs(TemplateArgumentList::CreateCopy(Context, Args, NumArgs)),
+ SpecializationKind(TSK_Undeclared) {
+}
+
+ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(Kind DK)
+ : CXXRecordDecl(DK, TTK_Struct, 0, SourceLocation(), SourceLocation(), 0, 0),
+ ExplicitInfo(0),
+ SpecializationKind(TSK_Undeclared) {
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
+ DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ ClassTemplateSpecializationDecl *PrevDecl) {
+ ClassTemplateSpecializationDecl *Result
+ = new (Context)ClassTemplateSpecializationDecl(Context,
+ ClassTemplateSpecialization,
+ TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate,
+ Args, NumArgs,
+ PrevDecl);
+ Context.getTypeDeclType(Result, PrevDecl);
+ return Result;
+}
+
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassTemplateSpecializationDecl));
+ return new (Mem) ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
+}
+
+void
+ClassTemplateSpecializationDecl::getNameForDiagnostic(std::string &S,
+ const PrintingPolicy &Policy,
+ bool Qualified) const {
+ NamedDecl::getNameForDiagnostic(S, Policy, Qualified);
+
+ const TemplateArgumentList &TemplateArgs = getTemplateArgs();
+ S += TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ Policy);
+}
+
+ClassTemplateDecl *
+ClassTemplateSpecializationDecl::getSpecializedTemplate() const {
+ if (SpecializedPartialSpecialization *PartialSpec
+ = SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
+ return PartialSpec->PartialSpecialization->getSpecializedTemplate();
+ return SpecializedTemplate.get<ClassTemplateDecl*>();
+}
+
+SourceRange
+ClassTemplateSpecializationDecl::getSourceRange() const {
+ if (ExplicitInfo) {
+ SourceLocation Begin = getExternLoc();
+ if (Begin.isInvalid())
+ Begin = getTemplateKeywordLoc();
+ SourceLocation End = getRBraceLoc();
+ if (End.isInvalid())
+ End = getTypeAsWritten()->getTypeLoc().getEndLoc();
+ return SourceRange(Begin, End);
+ }
+ else {
+ // No explicit info available.
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ inst_from = getInstantiatedFrom();
+ if (inst_from.isNull())
+ return getSpecializedTemplate()->getSourceRange();
+ if (ClassTemplateDecl *ctd = inst_from.dyn_cast<ClassTemplateDecl*>())
+ return ctd->getSourceRange();
+ return inst_from.get<ClassTemplatePartialSpecializationDecl*>()
+ ->getSourceRange();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ClassTemplatePartialSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+void ClassTemplatePartialSpecializationDecl::anchor() { }
+
+ClassTemplatePartialSpecializationDecl::
+ClassTemplatePartialSpecializationDecl(ASTContext &Context, TagKind TK,
+ DeclContext *DC,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ TemplateArgumentLoc *ArgInfos,
+ unsigned NumArgInfos,
+ ClassTemplatePartialSpecializationDecl *PrevDecl,
+ unsigned SequenceNumber)
+ : ClassTemplateSpecializationDecl(Context,
+ ClassTemplatePartialSpecialization,
+ TK, DC, StartLoc, IdLoc,
+ SpecializedTemplate,
+ Args, NumArgs, PrevDecl),
+ TemplateParams(Params), ArgsAsWritten(ArgInfos),
+ NumArgsAsWritten(NumArgInfos), SequenceNumber(SequenceNumber),
+ InstantiatedFromMember(0, false)
+{
+ AdoptTemplateParameterList(Params, this);
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::
+Create(ASTContext &Context, TagKind TK,DeclContext *DC,
+ SourceLocation StartLoc, SourceLocation IdLoc,
+ TemplateParameterList *Params,
+ ClassTemplateDecl *SpecializedTemplate,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const TemplateArgumentListInfo &ArgInfos,
+ QualType CanonInjectedType,
+ ClassTemplatePartialSpecializationDecl *PrevDecl,
+ unsigned SequenceNumber) {
+ unsigned N = ArgInfos.size();
+ TemplateArgumentLoc *ClonedArgs = new (Context) TemplateArgumentLoc[N];
+ for (unsigned I = 0; I != N; ++I)
+ ClonedArgs[I] = ArgInfos[I];
+
+ ClassTemplatePartialSpecializationDecl *Result
+ = new (Context)ClassTemplatePartialSpecializationDecl(Context, TK, DC,
+ StartLoc, IdLoc,
+ Params,
+ SpecializedTemplate,
+ Args, NumArgs,
+ ClonedArgs, N,
+ PrevDecl,
+ SequenceNumber);
+ Result->setSpecializationKind(TSK_ExplicitSpecialization);
+
+ Context.getInjectedClassNameType(Result, CanonInjectedType);
+ return Result;
+}
+
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassTemplatePartialSpecializationDecl));
+ return new (Mem) ClassTemplatePartialSpecializationDecl();
+}
+
+//===----------------------------------------------------------------------===//
+// FriendTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void FriendTemplateDecl::anchor() { }
+
+FriendTemplateDecl *FriendTemplateDecl::Create(ASTContext &Context,
+ DeclContext *DC,
+ SourceLocation L,
+ unsigned NParams,
+ TemplateParameterList **Params,
+ FriendUnion Friend,
+ SourceLocation FLoc) {
+ FriendTemplateDecl *Result
+ = new (Context) FriendTemplateDecl(DC, L, NParams, Params, Friend, FLoc);
+ return Result;
+}
+
+FriendTemplateDecl *FriendTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(FriendTemplateDecl));
+ return new (Mem) FriendTemplateDecl(EmptyShell());
+}
+
+//===----------------------------------------------------------------------===//
+// TypeAliasTemplateDecl Implementation
+//===----------------------------------------------------------------------===//
+
+TypeAliasTemplateDecl *TypeAliasTemplateDecl::Create(ASTContext &C,
+ DeclContext *DC,
+ SourceLocation L,
+ DeclarationName Name,
+ TemplateParameterList *Params,
+ NamedDecl *Decl) {
+ AdoptTemplateParameterList(Params, DC);
+ return new (C) TypeAliasTemplateDecl(DC, L, Name, Params, Decl);
+}
+
+TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID, sizeof(TypeAliasTemplateDecl));
+ return new (Mem) TypeAliasTemplateDecl(0, SourceLocation(), DeclarationName(),
+ 0, 0);
+}
+
+void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+RedeclarableTemplateDecl::CommonBase *
+TypeAliasTemplateDecl::newCommon(ASTContext &C) {
+ Common *CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ return CommonPtr;
+}
+
+//===----------------------------------------------------------------------===//
+// ClassScopeFunctionSpecializationDecl Implementation
+//===----------------------------------------------------------------------===//
+
+void ClassScopeFunctionSpecializationDecl::anchor() { }
+
+ClassScopeFunctionSpecializationDecl *
+ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ void *Mem = AllocateDeserializedDecl(C, ID,
+ sizeof(ClassScopeFunctionSpecializationDecl));
+ return new (Mem) ClassScopeFunctionSpecializationDecl(0, SourceLocation(), 0);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
new file mode 100644
index 0000000..64924ad
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DeclarationName.cpp
@@ -0,0 +1,627 @@
+//===-- DeclarationName.cpp - Declaration names implementation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DeclarationName and DeclarationNameTable
+// classes.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclarationName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace clang {
+/// CXXSpecialName - Records the type associated with one of the
+/// "special" kinds of declaration names in C++, e.g., constructors,
+/// destructors, and conversion functions.
+class CXXSpecialName
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+public:
+ /// Type - The type associated with this declaration name.
+ QualType Type;
+
+ /// FETokenInfo - Extra information associated with this declaration
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(ExtraKindOrNumArgs);
+ ID.AddPointer(Type.getAsOpaquePtr());
+ }
+};
+
+/// CXXOperatorIdName - Contains extra information for the name of an
+/// overloaded operator in C++, such as "operator+.
+class CXXOperatorIdName : public DeclarationNameExtra {
+public:
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+};
+
+/// CXXLiteralOperatorName - Contains the actual identifier that makes up the
+/// name.
+///
+/// This identifier is stored here rather than directly in DeclarationName so as
+/// to allow Objective-C selectors, which are about a million times more common,
+/// to consume minimal memory.
+class CXXLiteralOperatorIdName
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+public:
+ IdentifierInfo *ID;
+
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
+ void Profile(llvm::FoldingSetNodeID &FSID) {
+ FSID.AddPointer(ID);
+ }
+};
+
+static int compareInt(unsigned A, unsigned B) {
+ return (A < B ? -1 : (A > B ? 1 : 0));
+}
+
+int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
+ if (LHS.getNameKind() != RHS.getNameKind())
+ return (LHS.getNameKind() < RHS.getNameKind() ? -1 : 1);
+
+ switch (LHS.getNameKind()) {
+ case DeclarationName::Identifier: {
+ IdentifierInfo *LII = LHS.getAsIdentifierInfo();
+ IdentifierInfo *RII = RHS.getAsIdentifierInfo();
+ if (!LII) return RII ? -1 : 0;
+ if (!RII) return 1;
+
+ return LII->getName().compare(RII->getName());
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector: {
+ Selector LHSSelector = LHS.getObjCSelector();
+ Selector RHSSelector = RHS.getObjCSelector();
+ unsigned LN = LHSSelector.getNumArgs(), RN = RHSSelector.getNumArgs();
+ for (unsigned I = 0, N = std::min(LN, RN); I != N; ++I) {
+ switch (LHSSelector.getNameForSlot(I).compare(
+ RHSSelector.getNameForSlot(I))) {
+ case -1: return true;
+ case 1: return false;
+ default: break;
+ }
+ }
+
+ return compareInt(LN, RN);
+ }
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (QualTypeOrdering()(LHS.getCXXNameType(), RHS.getCXXNameType()))
+ return -1;
+ if (QualTypeOrdering()(RHS.getCXXNameType(), LHS.getCXXNameType()))
+ return 1;
+ return 0;
+
+ case DeclarationName::CXXOperatorName:
+ return compareInt(LHS.getCXXOverloadedOperator(),
+ RHS.getCXXOverloadedOperator());
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return LHS.getCXXLiteralIdentifier()->getName().compare(
+ RHS.getCXXLiteralIdentifier()->getName());
+
+ case DeclarationName::CXXUsingDirective:
+ return 0;
+ }
+
+ llvm_unreachable("Invalid DeclarationName Kind!");
+}
+
+} // end namespace clang
+
+DeclarationName::DeclarationName(Selector Sel) {
+ if (!Sel.getAsOpaquePtr()) {
+ Ptr = 0;
+ return;
+ }
+
+ switch (Sel.getNumArgs()) {
+ case 0:
+ Ptr = reinterpret_cast<uintptr_t>(Sel.getAsIdentifierInfo());
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
+ Ptr |= StoredObjCZeroArgSelector;
+ break;
+
+ case 1:
+ Ptr = reinterpret_cast<uintptr_t>(Sel.getAsIdentifierInfo());
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned IdentifierInfo");
+ Ptr |= StoredObjCOneArgSelector;
+ break;
+
+ default:
+ Ptr = Sel.InfoPtr & ~Selector::ArgFlags;
+ assert((Ptr & PtrMask) == 0 && "Improperly aligned MultiKeywordSelector");
+ Ptr |= StoredDeclarationNameExtra;
+ break;
+ }
+}
+
+DeclarationName::NameKind DeclarationName::getNameKind() const {
+ switch (getStoredNameKind()) {
+ case StoredIdentifier: return Identifier;
+ case StoredObjCZeroArgSelector: return ObjCZeroArgSelector;
+ case StoredObjCOneArgSelector: return ObjCOneArgSelector;
+
+ case StoredDeclarationNameExtra:
+ switch (getExtra()->ExtraKindOrNumArgs) {
+ case DeclarationNameExtra::CXXConstructor:
+ return CXXConstructorName;
+
+ case DeclarationNameExtra::CXXDestructor:
+ return CXXDestructorName;
+
+ case DeclarationNameExtra::CXXConversionFunction:
+ return CXXConversionFunctionName;
+
+ case DeclarationNameExtra::CXXLiteralOperator:
+ return CXXLiteralOperatorName;
+
+ case DeclarationNameExtra::CXXUsingDirective:
+ return CXXUsingDirective;
+
+ default:
+ // Check if we have one of the CXXOperator* enumeration values.
+ if (getExtra()->ExtraKindOrNumArgs <
+ DeclarationNameExtra::CXXUsingDirective)
+ return CXXOperatorName;
+
+ return ObjCMultiArgSelector;
+ }
+ }
+
+ // Can't actually get here.
+ llvm_unreachable("This should be unreachable!");
+}
+
+bool DeclarationName::isDependentName() const {
+ QualType T = getCXXNameType();
+ return !T.isNull() && T->isDependentType();
+}
+
+std::string DeclarationName::getAsString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ printName(OS);
+ return OS.str();
+}
+
+void DeclarationName::printName(raw_ostream &OS) const {
+ switch (getNameKind()) {
+ case Identifier:
+ if (const IdentifierInfo *II = getAsIdentifierInfo())
+ OS << II->getName();
+ return;
+
+ case ObjCZeroArgSelector:
+ case ObjCOneArgSelector:
+ case ObjCMultiArgSelector:
+ OS << getObjCSelector().getAsString();
+ return;
+
+ case CXXConstructorName: {
+ QualType ClassType = getCXXNameType();
+ if (const RecordType *ClassRec = ClassType->getAs<RecordType>())
+ OS << *ClassRec->getDecl();
+ else
+ OS << ClassType.getAsString();
+ return;
+ }
+
+ case CXXDestructorName: {
+ OS << '~';
+ QualType Type = getCXXNameType();
+ if (const RecordType *Rec = Type->getAs<RecordType>())
+ OS << *Rec->getDecl();
+ else
+ OS << Type.getAsString();
+ return;
+ }
+
+ case CXXOperatorName: {
+ static const char* const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
+ 0,
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ Spelling,
+#include "clang/Basic/OperatorKinds.def"
+ };
+ const char *OpName = OperatorNames[getCXXOverloadedOperator()];
+ assert(OpName && "not an overloaded operator");
+
+ OS << "operator";
+ if (OpName[0] >= 'a' && OpName[0] <= 'z')
+ OS << ' ';
+ OS << OpName;
+ return;
+ }
+
+ case CXXLiteralOperatorName:
+ OS << "operator \"\" " << getCXXLiteralIdentifier()->getName();
+ return;
+
+ case CXXConversionFunctionName: {
+ OS << "operator ";
+ QualType Type = getCXXNameType();
+ if (const RecordType *Rec = Type->getAs<RecordType>())
+ OS << *Rec->getDecl();
+ else
+ OS << Type.getAsString();
+ return;
+ }
+ case CXXUsingDirective:
+ OS << "<using-directive>";
+ return;
+ }
+
+ llvm_unreachable("Unexpected declaration name kind");
+}
+
+QualType DeclarationName::getCXXNameType() const {
+ if (CXXSpecialName *CXXName = getAsCXXSpecialName())
+ return CXXName->Type;
+ else
+ return QualType();
+}
+
+OverloadedOperatorKind DeclarationName::getCXXOverloadedOperator() const {
+ if (CXXOperatorIdName *CXXOp = getAsCXXOperatorIdName()) {
+ unsigned value
+ = CXXOp->ExtraKindOrNumArgs - DeclarationNameExtra::CXXConversionFunction;
+ return static_cast<OverloadedOperatorKind>(value);
+ } else {
+ return OO_None;
+ }
+}
+
+IdentifierInfo *DeclarationName::getCXXLiteralIdentifier() const {
+ if (CXXLiteralOperatorIdName *CXXLit = getAsCXXLiteralOperatorIdName())
+ return CXXLit->ID;
+ else
+ return 0;
+}
+
+Selector DeclarationName::getObjCSelector() const {
+ switch (getNameKind()) {
+ case ObjCZeroArgSelector:
+ return Selector(reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask), 0);
+
+ case ObjCOneArgSelector:
+ return Selector(reinterpret_cast<IdentifierInfo *>(Ptr & ~PtrMask), 1);
+
+ case ObjCMultiArgSelector:
+ return Selector(reinterpret_cast<MultiKeywordSelector *>(Ptr & ~PtrMask));
+
+ default:
+ break;
+ }
+
+ return Selector();
+}
+
+void *DeclarationName::getFETokenInfoAsVoid() const {
+ switch (getNameKind()) {
+ case Identifier:
+ return getAsIdentifierInfo()->getFETokenInfo<void>();
+
+ case CXXConstructorName:
+ case CXXDestructorName:
+ case CXXConversionFunctionName:
+ return getAsCXXSpecialName()->FETokenInfo;
+
+ case CXXOperatorName:
+ return getAsCXXOperatorIdName()->FETokenInfo;
+
+ case CXXLiteralOperatorName:
+ return getAsCXXLiteralOperatorIdName()->FETokenInfo;
+
+ default:
+ llvm_unreachable("Declaration name has no FETokenInfo");
+ }
+}
+
+void DeclarationName::setFETokenInfo(void *T) {
+ switch (getNameKind()) {
+ case Identifier:
+ getAsIdentifierInfo()->setFETokenInfo(T);
+ break;
+
+ case CXXConstructorName:
+ case CXXDestructorName:
+ case CXXConversionFunctionName:
+ getAsCXXSpecialName()->FETokenInfo = T;
+ break;
+
+ case CXXOperatorName:
+ getAsCXXOperatorIdName()->FETokenInfo = T;
+ break;
+
+ case CXXLiteralOperatorName:
+ getAsCXXLiteralOperatorIdName()->FETokenInfo = T;
+ break;
+
+ default:
+ llvm_unreachable("Declaration name has no FETokenInfo");
+ }
+}
+
+DeclarationName DeclarationName::getUsingDirectiveName() {
+ // Single instance of DeclarationNameExtra for using-directive
+ static const DeclarationNameExtra UDirExtra =
+ { DeclarationNameExtra::CXXUsingDirective };
+
+ uintptr_t Ptr = reinterpret_cast<uintptr_t>(&UDirExtra);
+ Ptr |= StoredDeclarationNameExtra;
+
+ return DeclarationName(Ptr);
+}
+
+void DeclarationName::dump() const {
+ printName(llvm::errs());
+ llvm::errs() << '\n';
+}
+
+DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) {
+ CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>;
+ CXXLiteralOperatorNames = new llvm::FoldingSet<CXXLiteralOperatorIdName>;
+
+ // Initialize the overloaded operator names.
+ CXXOperatorNames = new (Ctx) CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
+ for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op) {
+ CXXOperatorNames[Op].ExtraKindOrNumArgs
+ = Op + DeclarationNameExtra::CXXConversionFunction;
+ CXXOperatorNames[Op].FETokenInfo = 0;
+ }
+}
+
+DeclarationNameTable::~DeclarationNameTable() {
+ llvm::FoldingSet<CXXSpecialName> *SpecialNames =
+ static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
+ llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
+ = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
+ (CXXLiteralOperatorNames);
+
+ delete SpecialNames;
+ delete LiteralNames;
+}
+
+DeclarationName
+DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
+ CanQualType Ty) {
+ assert(Kind >= DeclarationName::CXXConstructorName &&
+ Kind <= DeclarationName::CXXConversionFunctionName &&
+ "Kind must be a C++ special name kind");
+ llvm::FoldingSet<CXXSpecialName> *SpecialNames
+ = static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
+
+ DeclarationNameExtra::ExtraKind EKind;
+ switch (Kind) {
+ case DeclarationName::CXXConstructorName:
+ EKind = DeclarationNameExtra::CXXConstructor;
+ assert(!Ty.hasQualifiers() &&"Constructor type must be unqualified");
+ break;
+ case DeclarationName::CXXDestructorName:
+ EKind = DeclarationNameExtra::CXXDestructor;
+ assert(!Ty.hasQualifiers() && "Destructor type must be unqualified");
+ break;
+ case DeclarationName::CXXConversionFunctionName:
+ EKind = DeclarationNameExtra::CXXConversionFunction;
+ break;
+ default:
+ return DeclarationName();
+ }
+
+ // Unique selector, to guarantee there is one per name.
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(EKind);
+ ID.AddPointer(Ty.getAsOpaquePtr());
+
+ void *InsertPos = 0;
+ if (CXXSpecialName *Name = SpecialNames->FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName(Name);
+
+ CXXSpecialName *SpecialName = new (Ctx) CXXSpecialName;
+ SpecialName->ExtraKindOrNumArgs = EKind;
+ SpecialName->Type = Ty;
+ SpecialName->FETokenInfo = 0;
+
+ SpecialNames->InsertNode(SpecialName, InsertPos);
+ return DeclarationName(SpecialName);
+}
+
+DeclarationName
+DeclarationNameTable::getCXXOperatorName(OverloadedOperatorKind Op) {
+ return DeclarationName(&CXXOperatorNames[(unsigned)Op]);
+}
+
+DeclarationName
+DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
+ llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
+ = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
+ (CXXLiteralOperatorNames);
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(II);
+
+ void *InsertPos = 0;
+ if (CXXLiteralOperatorIdName *Name =
+ LiteralNames->FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName (Name);
+
+ CXXLiteralOperatorIdName *LiteralName = new (Ctx) CXXLiteralOperatorIdName;
+ LiteralName->ExtraKindOrNumArgs = DeclarationNameExtra::CXXLiteralOperator;
+ LiteralName->ID = II;
+ LiteralName->FETokenInfo = 0;
+
+ LiteralNames->InsertNode(LiteralName, InsertPos);
+ return DeclarationName(LiteralName);
+}
+
+unsigned
+llvm::DenseMapInfo<clang::DeclarationName>::
+getHashValue(clang::DeclarationName N) {
+ return DenseMapInfo<void*>::getHashValue(N.getAsOpaquePtr());
+}
+
+DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ NamedType.TInfo = 0;
+ break;
+ case DeclarationName::CXXOperatorName:
+ CXXOperatorName.BeginOpNameLoc = SourceLocation().getRawEncoding();
+ CXXOperatorName.EndOpNameLoc = SourceLocation().getRawEncoding();
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ CXXLiteralOperatorName.OpNameLoc = SourceLocation().getRawEncoding();
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ // FIXME: ?
+ break;
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+bool DeclarationNameInfo::containsUnexpandedParameterPack() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getType()->containsUnexpandedParameterPack();
+
+ return Name.getCXXNameType()->containsUnexpandedParameterPack();
+ }
+ llvm_unreachable("All name kinds handled.");
+}
+
+bool DeclarationNameInfo::isInstantiationDependent() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getType()->isInstantiationDependentType();
+
+ return Name.getCXXNameType()->isInstantiationDependentType();
+ }
+ llvm_unreachable("All name kinds handled.");
+}
+
+std::string DeclarationNameInfo::getAsString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ printName(OS);
+ return OS.str();
+}
+
+void DeclarationNameInfo::printName(raw_ostream &OS) const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ Name.printName(OS);
+ return;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo) {
+ if (Name.getNameKind() == DeclarationName::CXXDestructorName)
+ OS << '~';
+ else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName)
+ OS << "operator ";
+ OS << TInfo->getType().getAsString();
+ }
+ else
+ Name.printName(OS);
+ return;
+ }
+ llvm_unreachable("Unexpected declaration name kind");
+}
+
+SourceLocation DeclarationNameInfo::getEndLoc() const {
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ return NameLoc;
+
+ case DeclarationName::CXXOperatorName: {
+ unsigned raw = LocInfo.CXXOperatorName.EndOpNameLoc;
+ return SourceLocation::getFromRawEncoding(raw);
+ }
+
+ case DeclarationName::CXXLiteralOperatorName: {
+ unsigned raw = LocInfo.CXXLiteralOperatorName.OpNameLoc;
+ return SourceLocation::getFromRawEncoding(raw);
+ }
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ if (TypeSourceInfo *TInfo = LocInfo.NamedType.TInfo)
+ return TInfo->getTypeLoc().getEndLoc();
+ else
+ return NameLoc;
+
+ // DNInfo work in progress: FIXME.
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ return NameLoc;
+ }
+ llvm_unreachable("Unexpected declaration name kind");
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
new file mode 100644
index 0000000..4c7cd8a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/DumpXML.cpp
@@ -0,0 +1,1040 @@
+//===--- DumpXML.cpp - Detailed XML dumping ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Decl::dumpXML() method, a debugging tool to
+// print a detailed graph of an AST in an unspecified XML format.
+//
+// There is no guarantee of stability for this format.
+//
+//===----------------------------------------------------------------------===//
+
+// Only pay for this in code size in assertions-enabled builds.
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+
+#ifndef NDEBUG
+
+namespace {
+
+enum NodeState {
+ NS_Attrs, NS_LazyChildren, NS_Children
+};
+
+struct Node {
+ StringRef Name;
+ NodeState State;
+ Node(StringRef name) : Name(name), State(NS_Attrs) {}
+
+ bool isDoneWithAttrs() const { return State != NS_Attrs; }
+};
+
+template <class Impl> struct XMLDeclVisitor {
+#define DISPATCH(NAME, CLASS) \
+ static_cast<Impl*>(this)->NAME(static_cast<CLASS*>(D))
+
+ void dispatch(Decl *D) {
+ switch (D->getKind()) {
+#define DECL(DERIVED, BASE) \
+ case Decl::DERIVED: \
+ DISPATCH(dispatch##DERIVED##DeclAttrs, DERIVED##Decl); \
+ static_cast<Impl*>(this)->completeAttrs(); \
+ DISPATCH(dispatch##DERIVED##DeclChildren, DERIVED##Decl); \
+ DISPATCH(dispatch##DERIVED##DeclAsContext, DERIVED##Decl); \
+ break;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
+ }
+ }
+
+#define DECL(DERIVED, BASE) \
+ void dispatch##DERIVED##DeclAttrs(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##Attrs, BASE); \
+ DISPATCH(visit##DERIVED##DeclAttrs, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclAttrs(DERIVED##Decl *D) {} \
+ void dispatch##DERIVED##DeclChildren(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##Children, BASE); \
+ DISPATCH(visit##DERIVED##DeclChildren, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclChildren(DERIVED##Decl *D) {} \
+ void dispatch##DERIVED##DeclAsContext(DERIVED##Decl *D) { \
+ DISPATCH(dispatch##BASE##AsContext, BASE); \
+ DISPATCH(visit##DERIVED##DeclAsContext, DERIVED##Decl); \
+ } \
+ void visit##DERIVED##DeclAsContext(DERIVED##Decl *D) {}
+#include "clang/AST/DeclNodes.inc"
+
+ void dispatchDeclAttrs(Decl *D) {
+ DISPATCH(visitDeclAttrs, Decl);
+ }
+ void visitDeclAttrs(Decl *D) {}
+
+ void dispatchDeclChildren(Decl *D) {
+ DISPATCH(visitDeclChildren, Decl);
+ }
+ void visitDeclChildren(Decl *D) {}
+
+ void dispatchDeclAsContext(Decl *D) {
+ DISPATCH(visitDeclAsContext, Decl);
+ }
+ void visitDeclAsContext(Decl *D) {}
+
+#undef DISPATCH
+};
+
+template <class Impl> struct XMLTypeVisitor {
+#define DISPATCH(NAME, CLASS) \
+ static_cast<Impl*>(this)->NAME(static_cast<CLASS*>(T))
+
+ void dispatch(Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(DERIVED, BASE) \
+ case Type::DERIVED: \
+ DISPATCH(dispatch##DERIVED##TypeAttrs, DERIVED##Type); \
+ static_cast<Impl*>(this)->completeAttrs(); \
+ DISPATCH(dispatch##DERIVED##TypeChildren, DERIVED##Type); \
+ break;
+#define ABSTRACT_TYPE(DERIVED, BASE)
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+#define TYPE(DERIVED, BASE) \
+ void dispatch##DERIVED##TypeAttrs(DERIVED##Type *T) { \
+ DISPATCH(dispatch##BASE##Attrs, BASE); \
+ DISPATCH(visit##DERIVED##TypeAttrs, DERIVED##Type); \
+ } \
+ void visit##DERIVED##TypeAttrs(DERIVED##Type *T) {} \
+ void dispatch##DERIVED##TypeChildren(DERIVED##Type *T) { \
+ DISPATCH(dispatch##BASE##Children, BASE); \
+ DISPATCH(visit##DERIVED##TypeChildren, DERIVED##Type); \
+ } \
+ void visit##DERIVED##TypeChildren(DERIVED##Type *T) {}
+#include "clang/AST/TypeNodes.def"
+
+ void dispatchTypeAttrs(Type *T) {
+ DISPATCH(visitTypeAttrs, Type);
+ }
+ void visitTypeAttrs(Type *T) {}
+
+ void dispatchTypeChildren(Type *T) {
+ DISPATCH(visitTypeChildren, Type);
+ }
+ void visitTypeChildren(Type *T) {}
+
+#undef DISPATCH
+};
+
+static StringRef getTypeKindName(Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(DERIVED, BASE) case Type::DERIVED: return #DERIVED "Type";
+#define ABSTRACT_TYPE(DERIVED, BASE)
+#include "clang/AST/TypeNodes.def"
+ }
+
+ llvm_unreachable("unknown type kind!");
+}
+
+struct XMLDumper : public XMLDeclVisitor<XMLDumper>,
+ public XMLTypeVisitor<XMLDumper> {
+ raw_ostream &out;
+ ASTContext &Context;
+ SmallVector<Node, 16> Stack;
+ unsigned Indent;
+ explicit XMLDumper(raw_ostream &OS, ASTContext &context)
+ : out(OS), Context(context), Indent(0) {}
+
+ void indent() {
+ for (unsigned I = Indent; I; --I)
+ out << ' ';
+ }
+
+ /// Push a new node on the stack.
+ void push(StringRef name) {
+ if (!Stack.empty()) {
+ assert(Stack.back().isDoneWithAttrs());
+ if (Stack.back().State == NS_LazyChildren) {
+ Stack.back().State = NS_Children;
+ out << ">\n";
+ }
+ Indent++;
+ indent();
+ }
+ Stack.push_back(Node(name));
+ out << '<' << name;
+ }
+
+ /// Set the given attribute to the given value.
+ void set(StringRef attr, StringRef value) {
+ assert(!Stack.empty() && !Stack.back().isDoneWithAttrs());
+ out << ' ' << attr << '=' << '"' << value << '"'; // TODO: quotation
+ }
+
+ /// Finish attributes.
+ void completeAttrs() {
+ assert(!Stack.empty() && !Stack.back().isDoneWithAttrs());
+ Stack.back().State = NS_LazyChildren;
+ }
+
+ /// Pop a node.
+ void pop() {
+ assert(!Stack.empty() && Stack.back().isDoneWithAttrs());
+ if (Stack.back().State == NS_LazyChildren) {
+ out << "/>\n";
+ } else {
+ indent();
+ out << "</" << Stack.back().Name << ">\n";
+ }
+ if (Stack.size() > 1) Indent--;
+ Stack.pop_back();
+ }
+
+ //---- General utilities -------------------------------------------//
+
+ void setPointer(StringRef prop, const void *p) {
+ SmallString<10> buffer;
+ llvm::raw_svector_ostream os(buffer);
+ os << p;
+ os.flush();
+ set(prop, buffer);
+ }
+
+ void setPointer(void *p) {
+ setPointer("ptr", p);
+ }
+
+ void setInteger(StringRef prop, const llvm::APSInt &v) {
+ set(prop, v.toString(10));
+ }
+
+ void setInteger(StringRef prop, unsigned n) {
+ SmallString<10> buffer;
+ llvm::raw_svector_ostream os(buffer);
+ os << n;
+ os.flush();
+ set(prop, buffer);
+ }
+
+ void setFlag(StringRef prop, bool flag) {
+ if (flag) set(prop, "true");
+ }
+
+ void setName(DeclarationName Name) {
+ if (!Name)
+ return set("name", "");
+
+ // Common case.
+ if (Name.isIdentifier())
+ return set("name", Name.getAsIdentifierInfo()->getName());
+
+ set("name", Name.getAsString());
+ }
+
+ class TemporaryContainer {
+ XMLDumper &Dumper;
+ public:
+ TemporaryContainer(XMLDumper &dumper, StringRef name)
+ : Dumper(dumper) {
+ Dumper.push(name);
+ Dumper.completeAttrs();
+ }
+
+ ~TemporaryContainer() {
+ Dumper.pop();
+ }
+ };
+
+ void visitTemplateParameters(TemplateParameterList *L) {
+ push("template_parameters");
+ completeAttrs();
+ for (TemplateParameterList::iterator
+ I = L->begin(), E = L->end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+ }
+
+ void visitTemplateArguments(const TemplateArgumentList &L) {
+ push("template_arguments");
+ completeAttrs();
+ for (unsigned I = 0, E = L.size(); I != E; ++I)
+ dispatch(L[I]);
+ pop();
+ }
+
+ /// Visits a reference to the given declaration.
+ void visitDeclRef(Decl *D) {
+ push(D->getDeclKindName());
+ setPointer("ref", D);
+ completeAttrs();
+ pop();
+ }
+ void visitDeclRef(StringRef Name, Decl *D) {
+ TemporaryContainer C(*this, Name);
+ if (D) visitDeclRef(D);
+ }
+
+ void dispatch(const TemplateArgument &A) {
+ switch (A.getKind()) {
+ case TemplateArgument::Null: {
+ TemporaryContainer C(*this, "null");
+ break;
+ }
+ case TemplateArgument::Type: {
+ dispatch(A.getAsType());
+ break;
+ }
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ // FIXME: Implement!
+ break;
+
+ case TemplateArgument::Declaration: {
+ if (Decl *D = A.getAsDecl())
+ visitDeclRef(D);
+ break;
+ }
+ case TemplateArgument::Integral: {
+ push("integer");
+ setInteger("value", *A.getAsIntegral());
+ completeAttrs();
+ pop();
+ break;
+ }
+ case TemplateArgument::Expression: {
+ dispatch(A.getAsExpr());
+ break;
+ }
+ case TemplateArgument::Pack: {
+ for (TemplateArgument::pack_iterator P = A.pack_begin(),
+ PEnd = A.pack_end();
+ P != PEnd; ++P)
+ dispatch(*P);
+ break;
+ }
+ }
+ }
+
+ void dispatch(const TemplateArgumentLoc &A) {
+ dispatch(A.getArgument());
+ }
+
+ //---- Declarations ------------------------------------------------//
+ // Calls are made in this order:
+ // # Enter a new node.
+ // push("FieldDecl")
+ //
+ // # In this phase, attributes are set on the node.
+ // visitDeclAttrs(D)
+ // visitNamedDeclAttrs(D)
+ // ...
+ // visitFieldDeclAttrs(D)
+ //
+ // # No more attributes after this point.
+ // completeAttrs()
+ //
+ // # Create "header" child nodes, i.e. those which logically
+ // # belong to the declaration itself.
+ // visitDeclChildren(D)
+ // visitNamedDeclChildren(D)
+ // ...
+ // visitFieldDeclChildren(D)
+ //
+ // # Create nodes for the lexical children.
+ // visitDeclAsContext(D)
+ // visitNamedDeclAsContext(D)
+ // ...
+ // visitFieldDeclAsContext(D)
+ //
+ // # Finish the node.
+ // pop();
+ void dispatch(Decl *D) {
+ push(D->getDeclKindName());
+ XMLDeclVisitor<XMLDumper>::dispatch(D);
+ pop();
+ }
+ void visitDeclAttrs(Decl *D) {
+ setPointer(D);
+ }
+
+ /// Visit all the lexical decls in the given context.
+ void visitDeclContext(DeclContext *DC) {
+ for (DeclContext::decl_iterator
+ I = DC->decls_begin(), E = DC->decls_end(); I != E; ++I)
+ dispatch(*I);
+
+ // FIXME: point out visible declarations not in lexical context?
+ }
+
+ /// Set the "access" attribute on the current node according to the
+ /// given specifier.
+ void setAccess(AccessSpecifier AS) {
+ switch (AS) {
+ case AS_public: return set("access", "public");
+ case AS_protected: return set("access", "protected");
+ case AS_private: return set("access", "private");
+ case AS_none: llvm_unreachable("explicit forbidden access");
+ }
+ }
+
+ template <class T> void visitRedeclarableAttrs(T *D) {
+ if (T *Prev = D->getPreviousDecl())
+ setPointer("previous", Prev);
+ }
+
+
+ // TranslationUnitDecl
+ void visitTranslationUnitDeclAsContext(TranslationUnitDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // LinkageSpecDecl
+ void visitLinkageSpecDeclAttrs(LinkageSpecDecl *D) {
+ StringRef lang = "";
+ switch (D->getLanguage()) {
+ case LinkageSpecDecl::lang_c: lang = "C"; break;
+ case LinkageSpecDecl::lang_cxx: lang = "C++"; break;
+ }
+ set("lang", lang);
+ }
+ void visitLinkageSpecDeclAsContext(LinkageSpecDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // NamespaceDecl
+ void visitNamespaceDeclAttrs(NamespaceDecl *D) {
+ setFlag("inline", D->isInline());
+ if (!D->isOriginalNamespace())
+ setPointer("original", D->getOriginalNamespace());
+ }
+ void visitNamespaceDeclAsContext(NamespaceDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // NamedDecl
+ void visitNamedDeclAttrs(NamedDecl *D) {
+ setName(D->getDeclName());
+ }
+
+ // ValueDecl
+ void visitValueDeclChildren(ValueDecl *D) {
+ dispatch(D->getType());
+ }
+
+ // DeclaratorDecl
+ void visitDeclaratorDeclChildren(DeclaratorDecl *D) {
+ //dispatch(D->getTypeSourceInfo()->getTypeLoc());
+ }
+
+ // VarDecl
+ void visitVarDeclAttrs(VarDecl *D) {
+ visitRedeclarableAttrs(D);
+ if (D->getStorageClass() != SC_None)
+ set("storage",
+ VarDecl::getStorageClassSpecifierString(D->getStorageClass()));
+ StringRef initStyle = "";
+ switch (D->getInitStyle()) {
+ case VarDecl::CInit: initStyle = "c"; break;
+ case VarDecl::CallInit: initStyle = "call"; break;
+ case VarDecl::ListInit: initStyle = "list"; break;
+ }
+ set("initstyle", initStyle);
+ setFlag("nrvo", D->isNRVOVariable());
+ // TODO: instantiation, etc.
+ }
+ void visitVarDeclChildren(VarDecl *D) {
+ if (D->hasInit()) dispatch(D->getInit());
+ }
+
+ // ParmVarDecl?
+
+ // FunctionDecl
+ void visitFunctionDeclAttrs(FunctionDecl *D) {
+ visitRedeclarableAttrs(D);
+ setFlag("pure", D->isPure());
+ setFlag("trivial", D->isTrivial());
+ setFlag("returnzero", D->hasImplicitReturnZero());
+ setFlag("prototype", D->hasWrittenPrototype());
+ setFlag("deleted", D->isDeletedAsWritten());
+ if (D->getStorageClass() != SC_None)
+ set("storage",
+ VarDecl::getStorageClassSpecifierString(D->getStorageClass()));
+ setFlag("inline", D->isInlineSpecified());
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>())
+ set("asmlabel", ALA->getLabel());
+ // TODO: instantiation, etc.
+ }
+ void visitFunctionDeclChildren(FunctionDecl *D) {
+ for (FunctionDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ for (llvm::ArrayRef<NamedDecl*>::iterator
+ I = D->getDeclsInPrototypeScope().begin(), E = D->getDeclsInPrototypeScope().end();
+ I != E; ++I)
+ dispatch(*I);
+ if (D->doesThisDeclarationHaveABody())
+ dispatch(D->getBody());
+ }
+
+ // CXXMethodDecl ?
+ // CXXConstructorDecl ?
+ // CXXDestructorDecl ?
+ // CXXConversionDecl ?
+
+ void dispatch(CXXCtorInitializer *Init) {
+ // TODO
+ }
+
+ // FieldDecl
+ void visitFieldDeclAttrs(FieldDecl *D) {
+ setFlag("mutable", D->isMutable());
+ }
+ void visitFieldDeclChildren(FieldDecl *D) {
+ if (D->isBitField()) {
+ TemporaryContainer C(*this, "bitwidth");
+ dispatch(D->getBitWidth());
+ }
+ // TODO: C++0x member initializer
+ }
+
+ // EnumConstantDecl
+ void visitEnumConstantDeclChildren(EnumConstantDecl *D) {
+ // value in any case?
+ if (D->getInitExpr()) dispatch(D->getInitExpr());
+ }
+
+ // IndirectFieldDecl
+ void visitIndirectFieldDeclChildren(IndirectFieldDecl *D) {
+ for (IndirectFieldDecl::chain_iterator
+ I = D->chain_begin(), E = D->chain_end(); I != E; ++I) {
+ NamedDecl *VD = const_cast<NamedDecl*>(*I);
+ push(isa<VarDecl>(VD) ? "variable" : "field");
+ setPointer("ptr", VD);
+ completeAttrs();
+ pop();
+ }
+ }
+
+ // TypeDecl
+ void visitTypeDeclAttrs(TypeDecl *D) {
+ setPointer("typeptr", D->getTypeForDecl());
+ }
+
+ // TypedefDecl
+ void visitTypedefDeclAttrs(TypedefDecl *D) {
+ visitRedeclarableAttrs<TypedefNameDecl>(D);
+ }
+ void visitTypedefDeclChildren(TypedefDecl *D) {
+ dispatch(D->getTypeSourceInfo()->getTypeLoc());
+ }
+
+ // TypeAliasDecl
+ void visitTypeAliasDeclAttrs(TypeAliasDecl *D) {
+ visitRedeclarableAttrs<TypedefNameDecl>(D);
+ }
+ void visitTypeAliasDeclChildren(TypeAliasDecl *D) {
+ dispatch(D->getTypeSourceInfo()->getTypeLoc());
+ }
+
+ // TagDecl
+ void visitTagDeclAttrs(TagDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitTagDeclAsContext(TagDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // EnumDecl
+ void visitEnumDeclAttrs(EnumDecl *D) {
+ setFlag("scoped", D->isScoped());
+ setFlag("fixed", D->isFixed());
+ }
+ void visitEnumDeclChildren(EnumDecl *D) {
+ {
+ TemporaryContainer C(*this, "promotion_type");
+ dispatch(D->getPromotionType());
+ }
+ {
+ TemporaryContainer C(*this, "integer_type");
+ dispatch(D->getIntegerType());
+ }
+ }
+
+ // RecordDecl ?
+
+ void visitCXXRecordDeclChildren(CXXRecordDecl *D) {
+ if (!D->isThisDeclarationADefinition()) return;
+
+ for (CXXRecordDecl::base_class_iterator
+ I = D->bases_begin(), E = D->bases_end(); I != E; ++I) {
+ push("base");
+ setAccess(I->getAccessSpecifier());
+ completeAttrs();
+ dispatch(I->getTypeSourceInfo()->getTypeLoc());
+ pop();
+ }
+ }
+
+ // ClassTemplateSpecializationDecl ?
+
+ // FileScopeAsmDecl ?
+
+ // BlockDecl
+ void visitBlockDeclAttrs(BlockDecl *D) {
+ setFlag("variadic", D->isVariadic());
+ }
+ void visitBlockDeclChildren(BlockDecl *D) {
+ for (FunctionDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ dispatch(D->getBody());
+ }
+
+ // AccessSpecDecl
+ void visitAccessSpecDeclAttrs(AccessSpecDecl *D) {
+ setAccess(D->getAccess());
+ }
+
+ // TemplateDecl
+ void visitTemplateDeclChildren(TemplateDecl *D) {
+ visitTemplateParameters(D->getTemplateParameters());
+ if (D->getTemplatedDecl())
+ dispatch(D->getTemplatedDecl());
+ }
+
+ // FunctionTemplateDecl
+ void visitFunctionTemplateDeclAttrs(FunctionTemplateDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitFunctionTemplateDeclChildren(FunctionTemplateDecl *D) {
+ // Mention all the specializations which don't have explicit
+ // declarations elsewhere.
+ for (FunctionTemplateDecl::spec_iterator
+ I = D->spec_begin(), E = D->spec_end(); I != E; ++I) {
+ FunctionTemplateSpecializationInfo *Info
+ = I->getTemplateSpecializationInfo();
+
+ bool Unknown = false;
+ switch (Info->getTemplateSpecializationKind()) {
+ case TSK_ImplicitInstantiation: Unknown = false; break;
+ case TSK_Undeclared: Unknown = true; break;
+
+ // These will be covered at their respective sites.
+ case TSK_ExplicitSpecialization: continue;
+ case TSK_ExplicitInstantiationDeclaration: continue;
+ case TSK_ExplicitInstantiationDefinition: continue;
+ }
+
+ TemporaryContainer C(*this,
+ Unknown ? "uninstantiated" : "instantiation");
+ visitTemplateArguments(*Info->TemplateArguments);
+ dispatch(Info->Function);
+ }
+ }
+
+ // ClasTemplateDecl
+ void visitClassTemplateDeclAttrs(ClassTemplateDecl *D) {
+ visitRedeclarableAttrs(D);
+ }
+ void visitClassTemplateDeclChildren(ClassTemplateDecl *D) {
+ // Mention all the specializations which don't have explicit
+ // declarations elsewhere.
+ for (ClassTemplateDecl::spec_iterator
+ I = D->spec_begin(), E = D->spec_end(); I != E; ++I) {
+
+ bool Unknown = false;
+ switch (I->getTemplateSpecializationKind()) {
+ case TSK_ImplicitInstantiation: Unknown = false; break;
+ case TSK_Undeclared: Unknown = true; break;
+
+ // These will be covered at their respective sites.
+ case TSK_ExplicitSpecialization: continue;
+ case TSK_ExplicitInstantiationDeclaration: continue;
+ case TSK_ExplicitInstantiationDefinition: continue;
+ }
+
+ TemporaryContainer C(*this,
+ Unknown ? "uninstantiated" : "instantiation");
+ visitTemplateArguments(I->getTemplateArgs());
+ dispatch(*I);
+ }
+ }
+
+ // TemplateTypeParmDecl
+ void visitTemplateTypeParmDeclAttrs(TemplateTypeParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitTemplateTypeParmDeclChildren(TemplateTypeParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgumentInfo()->getTypeLoc());
+ // parameter pack?
+ }
+
+ // NonTypeTemplateParmDecl
+ void visitNonTypeTemplateParmDeclAttrs(NonTypeTemplateParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitNonTypeTemplateParmDeclChildren(NonTypeTemplateParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgument());
+ // parameter pack?
+ }
+
+ // TemplateTemplateParmDecl
+ void visitTemplateTemplateParmDeclAttrs(TemplateTemplateParmDecl *D) {
+ setInteger("depth", D->getDepth());
+ setInteger("index", D->getIndex());
+ }
+ void visitTemplateTemplateParmDeclChildren(TemplateTemplateParmDecl *D) {
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
+ dispatch(D->getDefaultArgument());
+ // parameter pack?
+ }
+
+ // FriendDecl
+ void visitFriendDeclChildren(FriendDecl *D) {
+ if (TypeSourceInfo *T = D->getFriendType())
+ dispatch(T->getTypeLoc());
+ else
+ dispatch(D->getFriendDecl());
+ }
+
+ // UsingDirectiveDecl ?
+ // UsingDecl ?
+ // UsingShadowDecl ?
+ // NamespaceAliasDecl ?
+ // UnresolvedUsingValueDecl ?
+ // UnresolvedUsingTypenameDecl ?
+ // StaticAssertDecl ?
+
+ // ObjCImplDecl
+ void visitObjCImplDeclChildren(ObjCImplDecl *D) {
+ visitDeclRef(D->getClassInterface());
+ }
+ void visitObjCImplDeclAsContext(ObjCImplDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCInterfaceDecl
+ void visitCategoryList(ObjCCategoryDecl *D) {
+ if (!D) return;
+
+ TemporaryContainer C(*this, "categories");
+ for (; D; D = D->getNextClassCategory())
+ visitDeclRef(D);
+ }
+ void visitObjCInterfaceDeclAttrs(ObjCInterfaceDecl *D) {
+ setPointer("typeptr", D->getTypeForDecl());
+ setFlag("forward_decl", !D->isThisDeclarationADefinition());
+ setFlag("implicit_interface", D->isImplicitInterfaceDecl());
+ }
+ void visitObjCInterfaceDeclChildren(ObjCInterfaceDecl *D) {
+ visitDeclRef("super", D->getSuperClass());
+ visitDeclRef("implementation", D->getImplementation());
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ visitCategoryList(D->getCategoryList());
+ }
+ void visitObjCInterfaceDeclAsContext(ObjCInterfaceDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCCategoryDecl
+ void visitObjCCategoryDeclAttrs(ObjCCategoryDecl *D) {
+ setFlag("extension", D->IsClassExtension());
+ setFlag("synth_bitfield", D->hasSynthBitfield());
+ }
+ void visitObjCCategoryDeclChildren(ObjCCategoryDecl *D) {
+ visitDeclRef("interface", D->getClassInterface());
+ visitDeclRef("implementation", D->getImplementation());
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCCategoryDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ }
+ void visitObjCCategoryDeclAsContext(ObjCCategoryDecl *D) {
+ visitDeclContext(D);
+ }
+
+ // ObjCCategoryImplDecl
+ void visitObjCCategoryImplDeclAttrs(ObjCCategoryImplDecl *D) {
+ set("identifier", D->getName());
+ }
+ void visitObjCCategoryImplDeclChildren(ObjCCategoryImplDecl *D) {
+ visitDeclRef(D->getCategoryDecl());
+ }
+
+ // ObjCImplementationDecl
+ void visitObjCImplementationDeclAttrs(ObjCImplementationDecl *D) {
+ setFlag("synth_bitfield", D->hasSynthBitfield());
+ set("identifier", D->getName());
+ }
+ void visitObjCImplementationDeclChildren(ObjCImplementationDecl *D) {
+ visitDeclRef("super", D->getSuperClass());
+ if (D->init_begin() != D->init_end()) {
+ TemporaryContainer C(*this, "initializers");
+ for (ObjCImplementationDecl::init_iterator
+ I = D->init_begin(), E = D->init_end(); I != E; ++I)
+ dispatch(*I);
+ }
+ }
+
+ // ObjCProtocolDecl
+ void visitObjCProtocolDeclChildren(ObjCProtocolDecl *D) {
+ if (!D->isThisDeclarationADefinition())
+ return;
+
+ if (D->protocol_begin() != D->protocol_end()) {
+ TemporaryContainer C(*this, "protocols");
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = D->protocol_begin(), E = D->protocol_end(); I != E; ++I)
+ visitDeclRef(*I);
+ }
+ }
+ void visitObjCProtocolDeclAsContext(ObjCProtocolDecl *D) {
+ if (!D->isThisDeclarationADefinition())
+ return;
+
+ visitDeclContext(D);
+ }
+
+ // ObjCMethodDecl
+ void visitObjCMethodDeclAttrs(ObjCMethodDecl *D) {
+ // decl qualifier?
+ // implementation control?
+
+ setFlag("instance", D->isInstanceMethod());
+ setFlag("variadic", D->isVariadic());
+ setFlag("synthesized", D->isSynthesized());
+ setFlag("defined", D->isDefined());
+ setFlag("related_result_type", D->hasRelatedResultType());
+ }
+ void visitObjCMethodDeclChildren(ObjCMethodDecl *D) {
+ dispatch(D->getResultType());
+ for (ObjCMethodDecl::param_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I)
+ dispatch(*I);
+ if (D->isThisDeclarationADefinition())
+ dispatch(D->getBody());
+ }
+
+ // ObjCIvarDecl
+ void setAccessControl(StringRef prop, ObjCIvarDecl::AccessControl AC) {
+ switch (AC) {
+ case ObjCIvarDecl::None: return set(prop, "none");
+ case ObjCIvarDecl::Private: return set(prop, "private");
+ case ObjCIvarDecl::Protected: return set(prop, "protected");
+ case ObjCIvarDecl::Public: return set(prop, "public");
+ case ObjCIvarDecl::Package: return set(prop, "package");
+ }
+ }
+ void visitObjCIvarDeclAttrs(ObjCIvarDecl *D) {
+ setFlag("synthesize", D->getSynthesize());
+ setAccessControl("access", D->getAccessControl());
+ }
+
+ // ObjCCompatibleAliasDecl
+ void visitObjCCompatibleAliasDeclChildren(ObjCCompatibleAliasDecl *D) {
+ visitDeclRef(D->getClassInterface());
+ }
+
+ // FIXME: ObjCPropertyDecl
+ // FIXME: ObjCPropertyImplDecl
+
+ //---- Types -----------------------------------------------------//
+ void dispatch(TypeLoc TL) {
+ dispatch(TL.getType()); // for now
+ }
+
+ void dispatch(QualType T) {
+ if (T.hasLocalQualifiers()) {
+ push("QualType");
+ Qualifiers Qs = T.getLocalQualifiers();
+ setFlag("const", Qs.hasConst());
+ setFlag("volatile", Qs.hasVolatile());
+ setFlag("restrict", Qs.hasRestrict());
+ if (Qs.hasAddressSpace()) setInteger("addrspace", Qs.getAddressSpace());
+ if (Qs.hasObjCGCAttr()) {
+ switch (Qs.getObjCGCAttr()) {
+ case Qualifiers::Weak: set("gc", "weak"); break;
+ case Qualifiers::Strong: set("gc", "strong"); break;
+ case Qualifiers::GCNone: llvm_unreachable("explicit none");
+ }
+ }
+
+ completeAttrs();
+ dispatch(QualType(T.getTypePtr(), 0));
+ pop();
+ return;
+ }
+
+ Type *Ty = const_cast<Type*>(T.getTypePtr());
+ push(getTypeKindName(Ty));
+ XMLTypeVisitor<XMLDumper>::dispatch(const_cast<Type*>(T.getTypePtr()));
+ pop();
+ }
+
+ void setCallingConv(CallingConv CC) {
+ switch (CC) {
+ case CC_Default: return;
+ case CC_C: return set("cc", "cdecl");
+ case CC_X86FastCall: return set("cc", "x86_fastcall");
+ case CC_X86StdCall: return set("cc", "x86_stdcall");
+ case CC_X86ThisCall: return set("cc", "x86_thiscall");
+ case CC_X86Pascal: return set("cc", "x86_pascal");
+ case CC_AAPCS: return set("cc", "aapcs");
+ case CC_AAPCS_VFP: return set("cc", "aapcs_vfp");
+ }
+ }
+
+ void visitTypeAttrs(Type *D) {
+ setPointer(D);
+ setFlag("dependent", D->isDependentType());
+ setFlag("variably_modified", D->isVariablyModifiedType());
+
+ setPointer("canonical", D->getCanonicalTypeInternal().getAsOpaquePtr());
+ }
+
+ void visitPointerTypeChildren(PointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitReferenceTypeChildren(ReferenceType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitObjCObjectPointerTypeChildren(ObjCObjectPointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+ void visitBlockPointerTypeChildren(BlockPointerType *T) {
+ dispatch(T->getPointeeType());
+ }
+
+ // Types that just wrap declarations.
+ void visitTagTypeChildren(TagType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitTypedefTypeChildren(TypedefType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitObjCInterfaceTypeChildren(ObjCInterfaceType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitUnresolvedUsingTypeChildren(UnresolvedUsingType *T) {
+ visitDeclRef(T->getDecl());
+ }
+ void visitInjectedClassNameTypeChildren(InjectedClassNameType *T) {
+ visitDeclRef(T->getDecl());
+ }
+
+ void visitFunctionTypeAttrs(FunctionType *T) {
+ setFlag("noreturn", T->getNoReturnAttr());
+ setCallingConv(T->getCallConv());
+ if (T->getHasRegParm()) setInteger("regparm", T->getRegParmType());
+ }
+ void visitFunctionTypeChildren(FunctionType *T) {
+ dispatch(T->getResultType());
+ }
+
+ void visitFunctionProtoTypeAttrs(FunctionProtoType *T) {
+ setFlag("const", T->getTypeQuals() & Qualifiers::Const);
+ setFlag("volatile", T->getTypeQuals() & Qualifiers::Volatile);
+ setFlag("restrict", T->getTypeQuals() & Qualifiers::Restrict);
+ }
+ void visitFunctionProtoTypeChildren(FunctionProtoType *T) {
+ push("parameters");
+ setFlag("variadic", T->isVariadic());
+ completeAttrs();
+ for (FunctionProtoType::arg_type_iterator
+ I = T->arg_type_begin(), E = T->arg_type_end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+
+ if (T->hasDynamicExceptionSpec()) {
+ push("exception_specifiers");
+ setFlag("any", T->getExceptionSpecType() == EST_MSAny);
+ completeAttrs();
+ for (FunctionProtoType::exception_iterator
+ I = T->exception_begin(), E = T->exception_end(); I != E; ++I)
+ dispatch(*I);
+ pop();
+ }
+ // FIXME: noexcept specifier
+ }
+
+ void visitTemplateSpecializationTypeChildren(TemplateSpecializationType *T) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ visitDeclRef(RT->getDecl());
+
+ // TODO: TemplateName
+
+ push("template_arguments");
+ completeAttrs();
+ for (unsigned I = 0, E = T->getNumArgs(); I != E; ++I)
+ dispatch(T->getArg(I));
+ pop();
+ }
+
+ //---- Statements ------------------------------------------------//
+ void dispatch(Stmt *S) {
+ // FIXME: this is not really XML at all
+ push("Stmt");
+ out << ">\n";
+ Stack.back().State = NS_Children; // explicitly become non-lazy
+ S->dump(out, Context.getSourceManager());
+ out << '\n';
+ pop();
+ }
+};
+}
+
+void Decl::dumpXML() const {
+ dumpXML(llvm::errs());
+}
+
+void Decl::dumpXML(raw_ostream &out) const {
+ XMLDumper(out, getASTContext()).dispatch(const_cast<Decl*>(this));
+}
+
+#else /* ifndef NDEBUG */
+
+void Decl::dumpXML() const {}
+void Decl::dumpXML(raw_ostream &out) const {}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/AST/Expr.cpp b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
new file mode 100644
index 0000000..868109e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Expr.cpp
@@ -0,0 +1,3903 @@
+//===--- Expr.cpp - Expression AST Node Implementation --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expr class and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstring>
+using namespace clang;
+
+/// isKnownToHaveBooleanValue - Return true if this is an integer expression
+/// that is known to return 0 or 1. This happens for _Bool/bool expressions
+/// but also int expressions which are produced by things like comparisons in
+/// C.
+bool Expr::isKnownToHaveBooleanValue() const {
+ const Expr *E = IgnoreParens();
+
+ // If this value has _Bool type, it is obvious 0/1.
+ if (E->getType()->isBooleanType()) return true;
+ // If this is a non-scalar-integer type, we don't care enough to try.
+ if (!E->getType()->isIntegralOrEnumerationType()) return false;
+
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ switch (UO->getOpcode()) {
+ case UO_Plus:
+ return UO->getSubExpr()->isKnownToHaveBooleanValue();
+ default:
+ return false;
+ }
+ }
+
+ // Only look through implicit casts. If the user writes
+ // '(int) (a && b)' treat it as an arbitrary int.
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E))
+ return CE->getSubExpr()->isKnownToHaveBooleanValue();
+
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+ default: return false;
+ case BO_LT: // Relational operators.
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ: // Equality operators.
+ case BO_NE:
+ case BO_LAnd: // AND operator.
+ case BO_LOr: // Logical OR operator.
+ return true;
+
+ case BO_And: // Bitwise AND operator.
+ case BO_Xor: // Bitwise XOR operator.
+ case BO_Or: // Bitwise OR operator.
+ // Handle things like (x==2)|(y==12).
+ return BO->getLHS()->isKnownToHaveBooleanValue() &&
+ BO->getRHS()->isKnownToHaveBooleanValue();
+
+ case BO_Comma:
+ case BO_Assign:
+ return BO->getRHS()->isKnownToHaveBooleanValue();
+ }
+ }
+
+ if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E))
+ return CO->getTrueExpr()->isKnownToHaveBooleanValue() &&
+ CO->getFalseExpr()->isKnownToHaveBooleanValue();
+
+ return false;
+}
+
+// Amusing macro metaprogramming hack: check whether a class provides
+// a more specific implementation of getExprLoc().
+//
+// See also Stmt.cpp:{getLocStart(),getLocEnd()}.
+namespace {
+ /// This implementation is used when a class provides a custom
+ /// implementation of getExprLoc.
+ template <class E, class T>
+ SourceLocation getExprLocImpl(const Expr *expr,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const E*>(expr)->getExprLoc();
+ }
+
+ /// This implementation is used when a class doesn't provide
+ /// a custom implementation of getExprLoc. Overload resolution
+ /// should pick it over the implementation above because it's
+ /// more specialized according to function template partial ordering.
+ template <class E>
+ SourceLocation getExprLocImpl(const Expr *expr,
+ SourceLocation (Expr::*v)() const) {
+ return static_cast<const E*>(expr)->getLocStart();
+ }
+}
+
+SourceLocation Expr::getExprLoc() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: llvm_unreachable(#type " is not an Expr"); break;
+#define EXPR(type, base) \
+ case Stmt::type##Class: return getExprLocImpl<type>(this, &type::getExprLoc);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+//===----------------------------------------------------------------------===//
+// Primary Expressions.
+//===----------------------------------------------------------------------===//
+
+/// \brief Compute the type-, value-, and instantiation-dependence of a
+/// declaration reference
+/// based on the declaration being referenced.
+static void computeDeclRefDependence(ASTContext &Ctx, NamedDecl *D, QualType T,
+ bool &TypeDependent,
+ bool &ValueDependent,
+ bool &InstantiationDependent) {
+ TypeDependent = false;
+ ValueDependent = false;
+ InstantiationDependent = false;
+
+ // (TD) C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ //
+ // and
+ //
+ // (VD) C++ [temp.dep.constexpr]p2:
+ // An identifier is value-dependent if it is:
+
+ // (TD) - an identifier that was declared with dependent type
+ // (VD) - a name declared with a dependent type,
+ if (T->isDependentType()) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ } else if (T->isInstantiationDependentType()) {
+ InstantiationDependent = true;
+ }
+
+ // (TD) - a conversion-function-id that specifies a dependent type
+ if (D->getDeclName().getNameKind()
+ == DeclarationName::CXXConversionFunctionName) {
+ QualType T = D->getDeclName().getCXXNameType();
+ if (T->isDependentType()) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ }
+
+ if (T->isInstantiationDependentType())
+ InstantiationDependent = true;
+ }
+
+ // (VD) - the name of a non-type template parameter,
+ if (isa<NonTypeTemplateParmDecl>(D)) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ return;
+ }
+
+ // (VD) - a constant with integral or enumeration type and is
+ // initialized with an expression that is value-dependent.
+ // (VD) - a constant with literal type and is initialized with an
+ // expression that is value-dependent [C++11].
+ // (VD) - FIXME: Missing from the standard:
+ // - an entity with reference type and is initialized with an
+ // expression that is value-dependent [C++11]
+ if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ if ((Ctx.getLangOpts().CPlusPlus0x ?
+ Var->getType()->isLiteralType() :
+ Var->getType()->isIntegralOrEnumerationType()) &&
+ (Var->getType().getCVRQualifiers() == Qualifiers::Const ||
+ Var->getType()->isReferenceType())) {
+ if (const Expr *Init = Var->getAnyInitializer())
+ if (Init->isValueDependent()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (Var->isStaticDataMember() &&
+ Var->getDeclContext()->isDependentContext()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+
+ return;
+ }
+
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext()) {
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+}
+
+void DeclRefExpr::computeDependence(ASTContext &Ctx) {
+ bool TypeDependent = false;
+ bool ValueDependent = false;
+ bool InstantiationDependent = false;
+ computeDeclRefDependence(Ctx, getDecl(), getType(), TypeDependent,
+ ValueDependent, InstantiationDependent);
+
+ // (TD) C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ //
+ // and
+ //
+ // (VD) C++ [temp.dep.constexpr]p2:
+ // An identifier is value-dependent if it is:
+ if (!TypeDependent && !ValueDependent &&
+ hasExplicitTemplateArgs() &&
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ getTemplateArgs(),
+ getNumTemplateArgs(),
+ InstantiationDependent)) {
+ TypeDependent = true;
+ ValueDependent = true;
+ InstantiationDependent = true;
+ }
+
+ ExprBits.TypeDependent = TypeDependent;
+ ExprBits.ValueDependent = ValueDependent;
+ ExprBits.InstantiationDependent = InstantiationDependent;
+
+ // Is the declaration a parameter pack?
+ if (getDecl()->isParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+}
+
+DeclRefExpr::DeclRefExpr(ASTContext &Ctx,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D, bool RefersToEnclosingLocal,
+ const DeclarationNameInfo &NameInfo,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs,
+ QualType T, ExprValueKind VK)
+ : Expr(DeclRefExprClass, T, VK, OK_Ordinary, false, false, false, false),
+ D(D), Loc(NameInfo.getLoc()), DNLoc(NameInfo.getInfo()) {
+ DeclRefExprBits.HasQualifier = QualifierLoc ? 1 : 0;
+ if (QualifierLoc)
+ getInternalQualifierLoc() = QualifierLoc;
+ DeclRefExprBits.HasFoundDecl = FoundD ? 1 : 0;
+ if (FoundD)
+ getInternalFoundDecl() = FoundD;
+ DeclRefExprBits.HasTemplateKWAndArgsInfo
+ = (TemplateArgs || TemplateKWLoc.isValid()) ? 1 : 0;
+ DeclRefExprBits.RefersToEnclosingLocal = RefersToEnclosingLocal;
+ if (TemplateArgs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ if (InstantiationDependent)
+ setInstantiationDependent(true);
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+ DeclRefExprBits.HadMultipleCandidates = 0;
+
+ computeDependence(Ctx);
+}
+
+DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D,
+ bool RefersToEnclosingLocal,
+ SourceLocation NameLoc,
+ QualType T,
+ ExprValueKind VK,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ return Create(Context, QualifierLoc, TemplateKWLoc, D,
+ RefersToEnclosingLocal,
+ DeclarationNameInfo(D->getDeclName(), NameLoc),
+ T, VK, FoundD, TemplateArgs);
+}
+
+DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *D,
+ bool RefersToEnclosingLocal,
+ const DeclarationNameInfo &NameInfo,
+ QualType T,
+ ExprValueKind VK,
+ NamedDecl *FoundD,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // Filter out cases where the found Decl is the same as the value refenenced.
+ if (D == FoundD)
+ FoundD = 0;
+
+ std::size_t Size = sizeof(DeclRefExpr);
+ if (QualifierLoc != 0)
+ Size += sizeof(NestedNameSpecifierLoc);
+ if (FoundD)
+ Size += sizeof(NamedDecl *);
+ if (TemplateArgs)
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
+ else if (TemplateKWLoc.isValid())
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+
+ void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
+ return new (Mem) DeclRefExpr(Context, QualifierLoc, TemplateKWLoc, D,
+ RefersToEnclosingLocal,
+ NameInfo, FoundD, TemplateArgs, T, VK);
+}
+
+DeclRefExpr *DeclRefExpr::CreateEmpty(ASTContext &Context,
+ bool HasQualifier,
+ bool HasFoundDecl,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ std::size_t Size = sizeof(DeclRefExpr);
+ if (HasQualifier)
+ Size += sizeof(NestedNameSpecifierLoc);
+ if (HasFoundDecl)
+ Size += sizeof(NamedDecl *);
+ if (HasTemplateKWAndArgsInfo)
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+
+ void *Mem = Context.Allocate(Size, llvm::alignOf<DeclRefExpr>());
+ return new (Mem) DeclRefExpr(EmptyShell());
+}
+
+SourceRange DeclRefExpr::getSourceRange() const {
+ SourceRange R = getNameInfo().getSourceRange();
+ if (hasQualifier())
+ R.setBegin(getQualifierLoc().getBeginLoc());
+ if (hasExplicitTemplateArgs())
+ R.setEnd(getRAngleLoc());
+ return R;
+}
+SourceLocation DeclRefExpr::getLocStart() const {
+ if (hasQualifier())
+ return getQualifierLoc().getBeginLoc();
+ return getNameInfo().getLocStart();
+}
+SourceLocation DeclRefExpr::getLocEnd() const {
+ if (hasExplicitTemplateArgs())
+ return getRAngleLoc();
+ return getNameInfo().getLocEnd();
+}
+
+// FIXME: Maybe this should use DeclPrinter with a special "print predefined
+// expr" policy instead.
+std::string PredefinedExpr::ComputeName(IdentType IT, const Decl *CurrentDecl) {
+ ASTContext &Context = CurrentDecl->getASTContext();
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurrentDecl)) {
+ if (IT != PrettyFunction && IT != PrettyFunctionNoVirtual)
+ return FD->getNameAsString();
+
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isVirtual() && IT != PrettyFunctionNoVirtual)
+ Out << "virtual ";
+ if (MD->isStatic())
+ Out << "static ";
+ }
+
+ PrintingPolicy Policy(Context.getLangOpts());
+ std::string Proto = FD->getQualifiedNameAsString(Policy);
+ llvm::raw_string_ostream POut(Proto);
+
+ const FunctionDecl *Decl = FD;
+ if (const FunctionDecl* Pattern = FD->getTemplateInstantiationPattern())
+ Decl = Pattern;
+ const FunctionType *AFT = Decl->getType()->getAs<FunctionType>();
+ const FunctionProtoType *FT = 0;
+ if (FD->hasWrittenPrototype())
+ FT = dyn_cast<FunctionProtoType>(AFT);
+
+ POut << "(";
+ if (FT) {
+ for (unsigned i = 0, e = Decl->getNumParams(); i != e; ++i) {
+ if (i) POut << ", ";
+ std::string Param;
+ Decl->getParamDecl(i)->getType().getAsStringInternal(Param, Policy);
+ POut << Param;
+ }
+
+ if (FT->isVariadic()) {
+ if (FD->getNumParams()) POut << ", ";
+ POut << "...";
+ }
+ }
+ POut << ")";
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ Qualifiers ThisQuals = Qualifiers::fromCVRMask(MD->getTypeQualifiers());
+ if (ThisQuals.hasConst())
+ POut << " const";
+ if (ThisQuals.hasVolatile())
+ POut << " volatile";
+ RefQualifierKind Ref = MD->getRefQualifier();
+ if (Ref == RQ_LValue)
+ POut << " &";
+ else if (Ref == RQ_RValue)
+ POut << " &&";
+ }
+
+ typedef SmallVector<const ClassTemplateSpecializationDecl *, 8> SpecsTy;
+ SpecsTy Specs;
+ const DeclContext *Ctx = FD->getDeclContext();
+ while (Ctx && isa<NamedDecl>(Ctx)) {
+ const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
+ if (Spec && !Spec->isExplicitSpecialization())
+ Specs.push_back(Spec);
+ Ctx = Ctx->getParent();
+ }
+
+ std::string TemplateParams;
+ llvm::raw_string_ostream TOut(TemplateParams);
+ for (SpecsTy::reverse_iterator I = Specs.rbegin(), E = Specs.rend();
+ I != E; ++I) {
+ const TemplateParameterList *Params
+ = (*I)->getSpecializedTemplate()->getTemplateParameters();
+ const TemplateArgumentList &Args = (*I)->getTemplateArgs();
+ assert(Params->size() == Args.size());
+ for (unsigned i = 0, numParams = Params->size(); i != numParams; ++i) {
+ StringRef Param = Params->getParam(i)->getName();
+ if (Param.empty()) continue;
+ TOut << Param << " = ";
+ Args.get(i).print(Policy, TOut);
+ TOut << ", ";
+ }
+ }
+
+ FunctionTemplateSpecializationInfo *FSI
+ = FD->getTemplateSpecializationInfo();
+ if (FSI && !FSI->isExplicitSpecialization()) {
+ const TemplateParameterList* Params
+ = FSI->getTemplate()->getTemplateParameters();
+ const TemplateArgumentList* Args = FSI->TemplateArguments;
+ assert(Params->size() == Args->size());
+ for (unsigned i = 0, e = Params->size(); i != e; ++i) {
+ StringRef Param = Params->getParam(i)->getName();
+ if (Param.empty()) continue;
+ TOut << Param << " = ";
+ Args->get(i).print(Policy, TOut);
+ TOut << ", ";
+ }
+ }
+
+ TOut.flush();
+ if (!TemplateParams.empty()) {
+ // remove the trailing comma and space
+ TemplateParams.resize(TemplateParams.size() - 2);
+ POut << " [" << TemplateParams << "]";
+ }
+
+ POut.flush();
+
+ if (!isa<CXXConstructorDecl>(FD) && !isa<CXXDestructorDecl>(FD))
+ AFT->getResultType().getAsStringInternal(Proto, Policy);
+
+ Out << Proto;
+
+ Out.flush();
+ return Name.str().str();
+ }
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CurrentDecl)) {
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ Out << (MD->isInstanceMethod() ? '-' : '+');
+ Out << '[';
+
+ // For incorrect code, there might not be an ObjCInterfaceDecl. Do
+ // a null check to avoid a crash.
+ if (const ObjCInterfaceDecl *ID = MD->getClassInterface())
+ Out << *ID;
+
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(MD->getDeclContext()))
+ Out << '(' << *CID << ')';
+
+ Out << ' ';
+ Out << MD->getSelector().getAsString();
+ Out << ']';
+
+ Out.flush();
+ return Name.str().str();
+ }
+ if (isa<TranslationUnitDecl>(CurrentDecl) && IT == PrettyFunction) {
+ // __PRETTY_FUNCTION__ -> "top level", the others produce an empty string.
+ return "top level";
+ }
+ return "";
+}
+
+void APNumericStorage::setIntValue(ASTContext &C, const llvm::APInt &Val) {
+ if (hasAllocation())
+ C.Deallocate(pVal);
+
+ BitWidth = Val.getBitWidth();
+ unsigned NumWords = Val.getNumWords();
+ const uint64_t* Words = Val.getRawData();
+ if (NumWords > 1) {
+ pVal = new (C) uint64_t[NumWords];
+ std::copy(Words, Words + NumWords, pVal);
+ } else if (NumWords == 1)
+ VAL = Words[0];
+ else
+ VAL = 0;
+}
+
+IntegerLiteral *
+IntegerLiteral::Create(ASTContext &C, const llvm::APInt &V,
+ QualType type, SourceLocation l) {
+ return new (C) IntegerLiteral(C, V, type, l);
+}
+
+IntegerLiteral *
+IntegerLiteral::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) IntegerLiteral(Empty);
+}
+
+FloatingLiteral *
+FloatingLiteral::Create(ASTContext &C, const llvm::APFloat &V,
+ bool isexact, QualType Type, SourceLocation L) {
+ return new (C) FloatingLiteral(C, V, isexact, Type, L);
+}
+
+FloatingLiteral *
+FloatingLiteral::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) FloatingLiteral(C, Empty);
+}
+
+/// getValueAsApproximateDouble - This returns the value as an inaccurate
+/// double. Note that this may cause loss of precision, but is useful for
+/// debugging dumps, etc.
+double FloatingLiteral::getValueAsApproximateDouble() const {
+ llvm::APFloat V = getValue();
+ bool ignored;
+ V.convert(llvm::APFloat::IEEEdouble, llvm::APFloat::rmNearestTiesToEven,
+ &ignored);
+ return V.convertToDouble();
+}
+
+int StringLiteral::mapCharByteWidth(TargetInfo const &target,StringKind k) {
+ int CharByteWidth = 0;
+ switch(k) {
+ case Ascii:
+ case UTF8:
+ CharByteWidth = target.getCharWidth();
+ break;
+ case Wide:
+ CharByteWidth = target.getWCharWidth();
+ break;
+ case UTF16:
+ CharByteWidth = target.getChar16Width();
+ break;
+ case UTF32:
+ CharByteWidth = target.getChar32Width();
+ break;
+ }
+ assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
+ CharByteWidth /= 8;
+ assert((CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4)
+ && "character byte widths supported are 1, 2, and 4 only");
+ return CharByteWidth;
+}
+
+StringLiteral *StringLiteral::Create(ASTContext &C, StringRef Str,
+ StringKind Kind, bool Pascal, QualType Ty,
+ const SourceLocation *Loc,
+ unsigned NumStrs) {
+ // Allocate enough space for the StringLiteral plus an array of locations for
+ // any concatenated string tokens.
+ void *Mem = C.Allocate(sizeof(StringLiteral)+
+ sizeof(SourceLocation)*(NumStrs-1),
+ llvm::alignOf<StringLiteral>());
+ StringLiteral *SL = new (Mem) StringLiteral(Ty);
+
+ // OPTIMIZE: could allocate this appended to the StringLiteral.
+ SL->setString(C,Str,Kind,Pascal);
+
+ SL->TokLocs[0] = Loc[0];
+ SL->NumConcatenated = NumStrs;
+
+ if (NumStrs != 1)
+ memcpy(&SL->TokLocs[1], Loc+1, sizeof(SourceLocation)*(NumStrs-1));
+ return SL;
+}
+
+StringLiteral *StringLiteral::CreateEmpty(ASTContext &C, unsigned NumStrs) {
+ void *Mem = C.Allocate(sizeof(StringLiteral)+
+ sizeof(SourceLocation)*(NumStrs-1),
+ llvm::alignOf<StringLiteral>());
+ StringLiteral *SL = new (Mem) StringLiteral(QualType());
+ SL->CharByteWidth = 0;
+ SL->Length = 0;
+ SL->NumConcatenated = NumStrs;
+ return SL;
+}
+
+void StringLiteral::setString(ASTContext &C, StringRef Str,
+ StringKind Kind, bool IsPascal) {
+ //FIXME: we assume that the string data comes from a target that uses the same
+ // code unit size and endianess for the type of string.
+ this->Kind = Kind;
+ this->IsPascal = IsPascal;
+
+ CharByteWidth = mapCharByteWidth(C.getTargetInfo(),Kind);
+ assert((Str.size()%CharByteWidth == 0)
+ && "size of data must be multiple of CharByteWidth");
+ Length = Str.size()/CharByteWidth;
+
+ switch(CharByteWidth) {
+ case 1: {
+ char *AStrData = new (C) char[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asChar = AStrData;
+ break;
+ }
+ case 2: {
+ uint16_t *AStrData = new (C) uint16_t[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asUInt16 = AStrData;
+ break;
+ }
+ case 4: {
+ uint32_t *AStrData = new (C) uint32_t[Length];
+ std::memcpy(AStrData,Str.data(),Str.size());
+ StrData.asUInt32 = AStrData;
+ break;
+ }
+ default:
+ assert(false && "unsupported CharByteWidth");
+ }
+}
+
+/// getLocationOfByte - Return a source location that points to the specified
+/// byte of this string literal.
+///
+/// Strings are amazingly complex. They can be formed from multiple tokens and
+/// can have escape sequences in them in addition to the usual trigraph and
+/// escaped newline business. This routine handles this complexity.
+///
+SourceLocation StringLiteral::
+getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
+ const LangOptions &Features, const TargetInfo &Target) const {
+ assert(Kind == StringLiteral::Ascii && "This only works for ASCII strings");
+
+ // Loop over all of the tokens in this string until we find the one that
+ // contains the byte we're looking for.
+ unsigned TokNo = 0;
+ while (1) {
+ assert(TokNo < getNumConcatenated() && "Invalid byte number!");
+ SourceLocation StrTokLoc = getStrTokenLoc(TokNo);
+
+ // Get the spelling of the string so that we can get the data that makes up
+ // the string literal, not the identifier for the macro it is potentially
+ // expanded through.
+ SourceLocation StrTokSpellingLoc = SM.getSpellingLoc(StrTokLoc);
+
+ // Re-lex the token to get its length and original spelling.
+ std::pair<FileID, unsigned> LocInfo =SM.getDecomposedLoc(StrTokSpellingLoc);
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return StrTokSpellingLoc;
+
+ const char *StrData = Buffer.data()+LocInfo.second;
+
+ // Create a langops struct and enable trigraphs. This is sufficient for
+ // relexing tokens.
+ LangOptions LangOpts;
+ LangOpts.Trigraphs = true;
+
+ // Create a lexer starting at the beginning of this token.
+ Lexer TheLexer(StrTokSpellingLoc, Features, Buffer.begin(), StrData,
+ Buffer.end());
+ Token TheTok;
+ TheLexer.LexFromRawLexer(TheTok);
+
+ // Use the StringLiteralParser to compute the length of the string in bytes.
+ StringLiteralParser SLP(&TheTok, 1, SM, Features, Target);
+ unsigned TokNumBytes = SLP.GetStringLength();
+
+ // If the byte is in this token, return the location of the byte.
+ if (ByteNo < TokNumBytes ||
+ (ByteNo == TokNumBytes && TokNo == getNumConcatenated() - 1)) {
+ unsigned Offset = SLP.getOffsetOfStringByte(TheTok, ByteNo);
+
+ // Now that we know the offset of the token in the spelling, use the
+ // preprocessor to get the offset in the original source.
+ return Lexer::AdvanceToTokenCharacter(StrTokLoc, Offset, SM, Features);
+ }
+
+ // Move to the next string token.
+ ++TokNo;
+ ByteNo -= TokNumBytes;
+ }
+}
+
+
+
+/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+/// corresponds to, e.g. "sizeof" or "[pre]++".
+const char *UnaryOperator::getOpcodeStr(Opcode Op) {
+ switch (Op) {
+ case UO_PostInc: return "++";
+ case UO_PostDec: return "--";
+ case UO_PreInc: return "++";
+ case UO_PreDec: return "--";
+ case UO_AddrOf: return "&";
+ case UO_Deref: return "*";
+ case UO_Plus: return "+";
+ case UO_Minus: return "-";
+ case UO_Not: return "~";
+ case UO_LNot: return "!";
+ case UO_Real: return "__real";
+ case UO_Imag: return "__imag";
+ case UO_Extension: return "__extension__";
+ }
+ llvm_unreachable("Unknown unary operator");
+}
+
+UnaryOperatorKind
+UnaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO, bool Postfix) {
+ switch (OO) {
+ default: llvm_unreachable("No unary operator for overloaded function");
+ case OO_PlusPlus: return Postfix ? UO_PostInc : UO_PreInc;
+ case OO_MinusMinus: return Postfix ? UO_PostDec : UO_PreDec;
+ case OO_Amp: return UO_AddrOf;
+ case OO_Star: return UO_Deref;
+ case OO_Plus: return UO_Plus;
+ case OO_Minus: return UO_Minus;
+ case OO_Tilde: return UO_Not;
+ case OO_Exclaim: return UO_LNot;
+ }
+}
+
+OverloadedOperatorKind UnaryOperator::getOverloadedOperator(Opcode Opc) {
+ switch (Opc) {
+ case UO_PostInc: case UO_PreInc: return OO_PlusPlus;
+ case UO_PostDec: case UO_PreDec: return OO_MinusMinus;
+ case UO_AddrOf: return OO_Amp;
+ case UO_Deref: return OO_Star;
+ case UO_Plus: return OO_Plus;
+ case UO_Minus: return OO_Minus;
+ case UO_Not: return OO_Tilde;
+ case UO_LNot: return OO_Exclaim;
+ default: return OO_None;
+ }
+}
+
+
+//===----------------------------------------------------------------------===//
+// Postfix Operators.
+//===----------------------------------------------------------------------===//
+
+CallExpr::CallExpr(ASTContext& C, StmtClass SC, Expr *fn, unsigned NumPreArgs,
+ Expr **args, unsigned numargs, QualType t, ExprValueKind VK,
+ SourceLocation rparenloc)
+ : Expr(SC, t, VK, OK_Ordinary,
+ fn->isTypeDependent(),
+ fn->isValueDependent(),
+ fn->isInstantiationDependent(),
+ fn->containsUnexpandedParameterPack()),
+ NumArgs(numargs) {
+
+ SubExprs = new (C) Stmt*[numargs+PREARGS_START+NumPreArgs];
+ SubExprs[FN] = fn;
+ for (unsigned i = 0; i != numargs; ++i) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i+PREARGS_START+NumPreArgs] = args[i];
+ }
+
+ CallExprBits.NumPreArgs = NumPreArgs;
+ RParenLoc = rparenloc;
+}
+
+CallExpr::CallExpr(ASTContext& C, Expr *fn, Expr **args, unsigned numargs,
+ QualType t, ExprValueKind VK, SourceLocation rparenloc)
+ : Expr(CallExprClass, t, VK, OK_Ordinary,
+ fn->isTypeDependent(),
+ fn->isValueDependent(),
+ fn->isInstantiationDependent(),
+ fn->containsUnexpandedParameterPack()),
+ NumArgs(numargs) {
+
+ SubExprs = new (C) Stmt*[numargs+PREARGS_START];
+ SubExprs[FN] = fn;
+ for (unsigned i = 0; i != numargs; ++i) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i+PREARGS_START] = args[i];
+ }
+
+ CallExprBits.NumPreArgs = 0;
+ RParenLoc = rparenloc;
+}
+
+CallExpr::CallExpr(ASTContext &C, StmtClass SC, EmptyShell Empty)
+ : Expr(SC, Empty), SubExprs(0), NumArgs(0) {
+ // FIXME: Why do we allocate this?
+ SubExprs = new (C) Stmt*[PREARGS_START];
+ CallExprBits.NumPreArgs = 0;
+}
+
+CallExpr::CallExpr(ASTContext &C, StmtClass SC, unsigned NumPreArgs,
+ EmptyShell Empty)
+ : Expr(SC, Empty), SubExprs(0), NumArgs(0) {
+ // FIXME: Why do we allocate this?
+ SubExprs = new (C) Stmt*[PREARGS_START+NumPreArgs];
+ CallExprBits.NumPreArgs = NumPreArgs;
+}
+
+Decl *CallExpr::getCalleeDecl() {
+ Expr *CEE = getCallee()->IgnoreParenImpCasts();
+
+ while (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(CEE)) {
+ CEE = NTTP->getReplacement()->IgnoreParenCasts();
+ }
+
+ // If we're calling a dereference, look at the pointer instead.
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CEE)) {
+ if (BO->isPtrMemOp())
+ CEE = BO->getRHS()->IgnoreParenCasts();
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(CEE)) {
+ if (UO->getOpcode() == UO_Deref)
+ CEE = UO->getSubExpr()->IgnoreParenCasts();
+ }
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE))
+ return DRE->getDecl();
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(CEE))
+ return ME->getMemberDecl();
+
+ return 0;
+}
+
+FunctionDecl *CallExpr::getDirectCallee() {
+ return dyn_cast_or_null<FunctionDecl>(getCalleeDecl());
+}
+
+/// setNumArgs - This changes the number of arguments present in this call.
+/// Any orphaned expressions are deleted by this, and any new operands are set
+/// to null.
+void CallExpr::setNumArgs(ASTContext& C, unsigned NumArgs) {
+ // No change, just return.
+ if (NumArgs == getNumArgs()) return;
+
+ // If shrinking # arguments, just delete the extras and forgot them.
+ if (NumArgs < getNumArgs()) {
+ this->NumArgs = NumArgs;
+ return;
+ }
+
+ // Otherwise, we are growing the # arguments. New an bigger argument array.
+ unsigned NumPreArgs = getNumPreArgs();
+ Stmt **NewSubExprs = new (C) Stmt*[NumArgs+PREARGS_START+NumPreArgs];
+ // Copy over args.
+ for (unsigned i = 0; i != getNumArgs()+PREARGS_START+NumPreArgs; ++i)
+ NewSubExprs[i] = SubExprs[i];
+ // Null out new args.
+ for (unsigned i = getNumArgs()+PREARGS_START+NumPreArgs;
+ i != NumArgs+PREARGS_START+NumPreArgs; ++i)
+ NewSubExprs[i] = 0;
+
+ if (SubExprs) C.Deallocate(SubExprs);
+ SubExprs = NewSubExprs;
+ this->NumArgs = NumArgs;
+}
+
+/// isBuiltinCall - If this is a call to a builtin, return the builtin ID. If
+/// not, return 0.
+unsigned CallExpr::isBuiltinCall() const {
+ // All simple function calls (e.g. func()) are implicitly cast to pointer to
+ // function. As a result, we try and obtain the DeclRefExpr from the
+ // ImplicitCastExpr.
+ const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(getCallee());
+ if (!ICE) // FIXME: deal with more complex calls (e.g. (func)(), (*func)()).
+ return 0;
+
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr());
+ if (!DRE)
+ return 0;
+
+ const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (!FDecl)
+ return 0;
+
+ if (!FDecl->getIdentifier())
+ return 0;
+
+ return FDecl->getBuiltinID();
+}
+
+QualType CallExpr::getCallReturnType() const {
+ QualType CalleeType = getCallee()->getType();
+ if (const PointerType *FnTypePtr = CalleeType->getAs<PointerType>())
+ CalleeType = FnTypePtr->getPointeeType();
+ else if (const BlockPointerType *BPT = CalleeType->getAs<BlockPointerType>())
+ CalleeType = BPT->getPointeeType();
+ else if (CalleeType->isSpecificPlaceholderType(BuiltinType::BoundMember))
+ // This should never be overloaded and so should never return null.
+ CalleeType = Expr::findBoundMemberType(getCallee());
+
+ const FunctionType *FnType = CalleeType->castAs<FunctionType>();
+ return FnType->getResultType();
+}
+
+SourceRange CallExpr::getSourceRange() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getSourceRange();
+
+ SourceLocation begin = getCallee()->getLocStart();
+ if (begin.isInvalid() && getNumArgs() > 0)
+ begin = getArg(0)->getLocStart();
+ SourceLocation end = getRParenLoc();
+ if (end.isInvalid() && getNumArgs() > 0)
+ end = getArg(getNumArgs() - 1)->getLocEnd();
+ return SourceRange(begin, end);
+}
+SourceLocation CallExpr::getLocStart() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getSourceRange().getBegin();
+
+ SourceLocation begin = getCallee()->getLocStart();
+ if (begin.isInvalid() && getNumArgs() > 0)
+ begin = getArg(0)->getLocStart();
+ return begin;
+}
+SourceLocation CallExpr::getLocEnd() const {
+ if (isa<CXXOperatorCallExpr>(this))
+ return cast<CXXOperatorCallExpr>(this)->getSourceRange().getEnd();
+
+ SourceLocation end = getRParenLoc();
+ if (end.isInvalid() && getNumArgs() > 0)
+ end = getArg(getNumArgs() - 1)->getLocEnd();
+ return end;
+}
+
+OffsetOfExpr *OffsetOfExpr::Create(ASTContext &C, QualType type,
+ SourceLocation OperatorLoc,
+ TypeSourceInfo *tsi,
+ OffsetOfNode* compsPtr, unsigned numComps,
+ Expr** exprsPtr, unsigned numExprs,
+ SourceLocation RParenLoc) {
+ void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
+ sizeof(OffsetOfNode) * numComps +
+ sizeof(Expr*) * numExprs);
+
+ return new (Mem) OffsetOfExpr(C, type, OperatorLoc, tsi, compsPtr, numComps,
+ exprsPtr, numExprs, RParenLoc);
+}
+
+OffsetOfExpr *OffsetOfExpr::CreateEmpty(ASTContext &C,
+ unsigned numComps, unsigned numExprs) {
+ void *Mem = C.Allocate(sizeof(OffsetOfExpr) +
+ sizeof(OffsetOfNode) * numComps +
+ sizeof(Expr*) * numExprs);
+ return new (Mem) OffsetOfExpr(numComps, numExprs);
+}
+
+OffsetOfExpr::OffsetOfExpr(ASTContext &C, QualType type,
+ SourceLocation OperatorLoc, TypeSourceInfo *tsi,
+ OffsetOfNode* compsPtr, unsigned numComps,
+ Expr** exprsPtr, unsigned numExprs,
+ SourceLocation RParenLoc)
+ : Expr(OffsetOfExprClass, type, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false,
+ /*ValueDependent=*/tsi->getType()->isDependentType(),
+ tsi->getType()->isInstantiationDependentType(),
+ tsi->getType()->containsUnexpandedParameterPack()),
+ OperatorLoc(OperatorLoc), RParenLoc(RParenLoc), TSInfo(tsi),
+ NumComps(numComps), NumExprs(numExprs)
+{
+ for(unsigned i = 0; i < numComps; ++i) {
+ setComponent(i, compsPtr[i]);
+ }
+
+ for(unsigned i = 0; i < numExprs; ++i) {
+ if (exprsPtr[i]->isTypeDependent() || exprsPtr[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (exprsPtr[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ setIndexExpr(i, exprsPtr[i]);
+ }
+}
+
+IdentifierInfo *OffsetOfExpr::OffsetOfNode::getFieldName() const {
+ assert(getKind() == Field || getKind() == Identifier);
+ if (getKind() == Field)
+ return getField()->getIdentifier();
+
+ return reinterpret_cast<IdentifierInfo *> (Data & ~(uintptr_t)Mask);
+}
+
+MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *memberdecl,
+ DeclAccessPair founddecl,
+ DeclarationNameInfo nameinfo,
+ const TemplateArgumentListInfo *targs,
+ QualType ty,
+ ExprValueKind vk,
+ ExprObjectKind ok) {
+ std::size_t Size = sizeof(MemberExpr);
+
+ bool hasQualOrFound = (QualifierLoc ||
+ founddecl.getDecl() != memberdecl ||
+ founddecl.getAccess() != memberdecl->getAccess());
+ if (hasQualOrFound)
+ Size += sizeof(MemberNameQualifier);
+
+ if (targs)
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(targs->size());
+ else if (TemplateKWLoc.isValid())
+ Size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<MemberExpr>());
+ MemberExpr *E = new (Mem) MemberExpr(base, isarrow, memberdecl, nameinfo,
+ ty, vk, ok);
+
+ if (hasQualOrFound) {
+ // FIXME: Wrong. We should be looking at the member declaration we found.
+ if (QualifierLoc && QualifierLoc.getNestedNameSpecifier()->isDependent()) {
+ E->setValueDependent(true);
+ E->setTypeDependent(true);
+ E->setInstantiationDependent(true);
+ }
+ else if (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())
+ E->setInstantiationDependent(true);
+
+ E->HasQualifierOrFoundDecl = true;
+
+ MemberNameQualifier *NQ = E->getMemberQualifier();
+ NQ->QualifierLoc = QualifierLoc;
+ NQ->FoundDecl = founddecl;
+ }
+
+ E->HasTemplateKWAndArgsInfo = (targs || TemplateKWLoc.isValid());
+
+ if (targs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *targs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ if (InstantiationDependent)
+ E->setInstantiationDependent(true);
+ } else if (TemplateKWLoc.isValid()) {
+ E->getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+
+ return E;
+}
+
+SourceRange MemberExpr::getSourceRange() const {
+ return SourceRange(getLocStart(), getLocEnd());
+}
+SourceLocation MemberExpr::getLocStart() const {
+ if (isImplicitAccess()) {
+ if (hasQualifier())
+ return getQualifierLoc().getBeginLoc();
+ return MemberLoc;
+ }
+
+ // FIXME: We don't want this to happen. Rather, we should be able to
+ // detect all kinds of implicit accesses more cleanly.
+ SourceLocation BaseStartLoc = getBase()->getLocStart();
+ if (BaseStartLoc.isValid())
+ return BaseStartLoc;
+ return MemberLoc;
+}
+SourceLocation MemberExpr::getLocEnd() const {
+ if (hasExplicitTemplateArgs())
+ return getRAngleLoc();
+ return getMemberNameInfo().getEndLoc();
+}
+
+void CastExpr::CheckCastConsistency() const {
+ switch (getCastKind()) {
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerived:
+ case CK_BaseToDerivedMemberPointer:
+ assert(!path_empty() && "Cast kind should have a base path!");
+ break;
+
+ case CK_CPointerToObjCPointerCast:
+ assert(getType()->isObjCObjectPointerType());
+ assert(getSubExpr()->getType()->isPointerType());
+ goto CheckNoBasePath;
+
+ case CK_BlockPointerToObjCPointerCast:
+ assert(getType()->isObjCObjectPointerType());
+ assert(getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ case CK_ReinterpretMemberPointer:
+ assert(getType()->isMemberPointerType());
+ assert(getSubExpr()->getType()->isMemberPointerType());
+ goto CheckNoBasePath;
+
+ case CK_BitCast:
+ // Arbitrary casts to C pointer types count as bitcasts.
+ // Otherwise, we should only have block and ObjC pointer casts
+ // here if they stay within the type kind.
+ if (!getType()->isPointerType()) {
+ assert(getType()->isObjCObjectPointerType() ==
+ getSubExpr()->getType()->isObjCObjectPointerType());
+ assert(getType()->isBlockPointerType() ==
+ getSubExpr()->getType()->isBlockPointerType());
+ }
+ goto CheckNoBasePath;
+
+ case CK_AnyPointerToBlockPointerCast:
+ assert(getType()->isBlockPointerType());
+ assert(getSubExpr()->getType()->isAnyPointerType() &&
+ !getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ case CK_CopyAndAutoreleaseBlockObject:
+ assert(getType()->isBlockPointerType());
+ assert(getSubExpr()->getType()->isBlockPointerType());
+ goto CheckNoBasePath;
+
+ // These should not have an inheritance path.
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToMemberPointer:
+ case CK_NullToPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ assert(!getType()->isBooleanType() && "unheralded conversion to bool");
+ goto CheckNoBasePath;
+
+ case CK_Dependent:
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_FloatingToBoolean:
+ case CK_MemberPointerToBoolean:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean:
+ case CK_LValueBitCast: // -> bool&
+ case CK_UserDefinedConversion: // operator bool()
+ CheckNoBasePath:
+ assert(path_empty() && "Cast kind should not have a base path!");
+ break;
+ }
+}
+
+const char *CastExpr::getCastKindName() const {
+ switch (getCastKind()) {
+ case CK_Dependent:
+ return "Dependent";
+ case CK_BitCast:
+ return "BitCast";
+ case CK_LValueBitCast:
+ return "LValueBitCast";
+ case CK_LValueToRValue:
+ return "LValueToRValue";
+ case CK_NoOp:
+ return "NoOp";
+ case CK_BaseToDerived:
+ return "BaseToDerived";
+ case CK_DerivedToBase:
+ return "DerivedToBase";
+ case CK_UncheckedDerivedToBase:
+ return "UncheckedDerivedToBase";
+ case CK_Dynamic:
+ return "Dynamic";
+ case CK_ToUnion:
+ return "ToUnion";
+ case CK_ArrayToPointerDecay:
+ return "ArrayToPointerDecay";
+ case CK_FunctionToPointerDecay:
+ return "FunctionToPointerDecay";
+ case CK_NullToMemberPointer:
+ return "NullToMemberPointer";
+ case CK_NullToPointer:
+ return "NullToPointer";
+ case CK_BaseToDerivedMemberPointer:
+ return "BaseToDerivedMemberPointer";
+ case CK_DerivedToBaseMemberPointer:
+ return "DerivedToBaseMemberPointer";
+ case CK_ReinterpretMemberPointer:
+ return "ReinterpretMemberPointer";
+ case CK_UserDefinedConversion:
+ return "UserDefinedConversion";
+ case CK_ConstructorConversion:
+ return "ConstructorConversion";
+ case CK_IntegralToPointer:
+ return "IntegralToPointer";
+ case CK_PointerToIntegral:
+ return "PointerToIntegral";
+ case CK_PointerToBoolean:
+ return "PointerToBoolean";
+ case CK_ToVoid:
+ return "ToVoid";
+ case CK_VectorSplat:
+ return "VectorSplat";
+ case CK_IntegralCast:
+ return "IntegralCast";
+ case CK_IntegralToBoolean:
+ return "IntegralToBoolean";
+ case CK_IntegralToFloating:
+ return "IntegralToFloating";
+ case CK_FloatingToIntegral:
+ return "FloatingToIntegral";
+ case CK_FloatingCast:
+ return "FloatingCast";
+ case CK_FloatingToBoolean:
+ return "FloatingToBoolean";
+ case CK_MemberPointerToBoolean:
+ return "MemberPointerToBoolean";
+ case CK_CPointerToObjCPointerCast:
+ return "CPointerToObjCPointerCast";
+ case CK_BlockPointerToObjCPointerCast:
+ return "BlockPointerToObjCPointerCast";
+ case CK_AnyPointerToBlockPointerCast:
+ return "AnyPointerToBlockPointerCast";
+ case CK_ObjCObjectLValueCast:
+ return "ObjCObjectLValueCast";
+ case CK_FloatingRealToComplex:
+ return "FloatingRealToComplex";
+ case CK_FloatingComplexToReal:
+ return "FloatingComplexToReal";
+ case CK_FloatingComplexToBoolean:
+ return "FloatingComplexToBoolean";
+ case CK_FloatingComplexCast:
+ return "FloatingComplexCast";
+ case CK_FloatingComplexToIntegralComplex:
+ return "FloatingComplexToIntegralComplex";
+ case CK_IntegralRealToComplex:
+ return "IntegralRealToComplex";
+ case CK_IntegralComplexToReal:
+ return "IntegralComplexToReal";
+ case CK_IntegralComplexToBoolean:
+ return "IntegralComplexToBoolean";
+ case CK_IntegralComplexCast:
+ return "IntegralComplexCast";
+ case CK_IntegralComplexToFloatingComplex:
+ return "IntegralComplexToFloatingComplex";
+ case CK_ARCConsumeObject:
+ return "ARCConsumeObject";
+ case CK_ARCProduceObject:
+ return "ARCProduceObject";
+ case CK_ARCReclaimReturnedObject:
+ return "ARCReclaimReturnedObject";
+ case CK_ARCExtendBlockObject:
+ return "ARCCExtendBlockObject";
+ case CK_AtomicToNonAtomic:
+ return "AtomicToNonAtomic";
+ case CK_NonAtomicToAtomic:
+ return "NonAtomicToAtomic";
+ case CK_CopyAndAutoreleaseBlockObject:
+ return "CopyAndAutoreleaseBlockObject";
+ }
+
+ llvm_unreachable("Unhandled cast kind!");
+}
+
+Expr *CastExpr::getSubExprAsWritten() {
+ Expr *SubExpr = 0;
+ CastExpr *E = this;
+ do {
+ SubExpr = E->getSubExpr();
+
+ // Skip through reference binding to temporary.
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(SubExpr))
+ SubExpr = Materialize->GetTemporaryExpr();
+
+ // Skip any temporary bindings; they're implicit.
+ if (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(SubExpr))
+ SubExpr = Binder->getSubExpr();
+
+ // Conversions by constructor and conversion functions have a
+ // subexpression describing the call; strip it off.
+ if (E->getCastKind() == CK_ConstructorConversion)
+ SubExpr = cast<CXXConstructExpr>(SubExpr)->getArg(0);
+ else if (E->getCastKind() == CK_UserDefinedConversion)
+ SubExpr = cast<CXXMemberCallExpr>(SubExpr)->getImplicitObjectArgument();
+
+ // If the subexpression we're left with is an implicit cast, look
+ // through that, too.
+ } while ((E = dyn_cast<ImplicitCastExpr>(SubExpr)));
+
+ return SubExpr;
+}
+
+CXXBaseSpecifier **CastExpr::path_buffer() {
+ switch (getStmtClass()) {
+#define ABSTRACT_STMT(x)
+#define CASTEXPR(Type, Base) \
+ case Stmt::Type##Class: \
+ return reinterpret_cast<CXXBaseSpecifier**>(static_cast<Type*>(this)+1);
+#define STMT(Type, Base)
+#include "clang/AST/StmtNodes.inc"
+ default:
+ llvm_unreachable("non-cast expressions not possible here");
+ }
+}
+
+void CastExpr::setCastPath(const CXXCastPath &Path) {
+ assert(Path.size() == path_size());
+ memcpy(path_buffer(), Path.data(), Path.size() * sizeof(CXXBaseSpecifier*));
+}
+
+ImplicitCastExpr *ImplicitCastExpr::Create(ASTContext &C, QualType T,
+ CastKind Kind, Expr *Operand,
+ const CXXCastPath *BasePath,
+ ExprValueKind VK) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer =
+ C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ ImplicitCastExpr *E =
+ new (Buffer) ImplicitCastExpr(T, Kind, Operand, PathSize, VK);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+ImplicitCastExpr *ImplicitCastExpr::CreateEmpty(ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer =
+ C.Allocate(sizeof(ImplicitCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) ImplicitCastExpr(EmptyShell(), PathSize);
+}
+
+
+CStyleCastExpr *CStyleCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK, CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L, SourceLocation R) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer =
+ C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ CStyleCastExpr *E =
+ new (Buffer) CStyleCastExpr(T, VK, K, Op, PathSize, WrittenTy, L, R);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CStyleCastExpr *CStyleCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
+ void *Buffer =
+ C.Allocate(sizeof(CStyleCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CStyleCastExpr(EmptyShell(), PathSize);
+}
+
+/// getOpcodeStr - Turn an Opcode enum value into the punctuation char it
+/// corresponds to, e.g. "<<=".
+const char *BinaryOperator::getOpcodeStr(Opcode Op) {
+ switch (Op) {
+ case BO_PtrMemD: return ".*";
+ case BO_PtrMemI: return "->*";
+ case BO_Mul: return "*";
+ case BO_Div: return "/";
+ case BO_Rem: return "%";
+ case BO_Add: return "+";
+ case BO_Sub: return "-";
+ case BO_Shl: return "<<";
+ case BO_Shr: return ">>";
+ case BO_LT: return "<";
+ case BO_GT: return ">";
+ case BO_LE: return "<=";
+ case BO_GE: return ">=";
+ case BO_EQ: return "==";
+ case BO_NE: return "!=";
+ case BO_And: return "&";
+ case BO_Xor: return "^";
+ case BO_Or: return "|";
+ case BO_LAnd: return "&&";
+ case BO_LOr: return "||";
+ case BO_Assign: return "=";
+ case BO_MulAssign: return "*=";
+ case BO_DivAssign: return "/=";
+ case BO_RemAssign: return "%=";
+ case BO_AddAssign: return "+=";
+ case BO_SubAssign: return "-=";
+ case BO_ShlAssign: return "<<=";
+ case BO_ShrAssign: return ">>=";
+ case BO_AndAssign: return "&=";
+ case BO_XorAssign: return "^=";
+ case BO_OrAssign: return "|=";
+ case BO_Comma: return ",";
+ }
+
+ llvm_unreachable("Invalid OpCode!");
+}
+
+BinaryOperatorKind
+BinaryOperator::getOverloadedOpcode(OverloadedOperatorKind OO) {
+ switch (OO) {
+ default: llvm_unreachable("Not an overloadable binary operator");
+ case OO_Plus: return BO_Add;
+ case OO_Minus: return BO_Sub;
+ case OO_Star: return BO_Mul;
+ case OO_Slash: return BO_Div;
+ case OO_Percent: return BO_Rem;
+ case OO_Caret: return BO_Xor;
+ case OO_Amp: return BO_And;
+ case OO_Pipe: return BO_Or;
+ case OO_Equal: return BO_Assign;
+ case OO_Less: return BO_LT;
+ case OO_Greater: return BO_GT;
+ case OO_PlusEqual: return BO_AddAssign;
+ case OO_MinusEqual: return BO_SubAssign;
+ case OO_StarEqual: return BO_MulAssign;
+ case OO_SlashEqual: return BO_DivAssign;
+ case OO_PercentEqual: return BO_RemAssign;
+ case OO_CaretEqual: return BO_XorAssign;
+ case OO_AmpEqual: return BO_AndAssign;
+ case OO_PipeEqual: return BO_OrAssign;
+ case OO_LessLess: return BO_Shl;
+ case OO_GreaterGreater: return BO_Shr;
+ case OO_LessLessEqual: return BO_ShlAssign;
+ case OO_GreaterGreaterEqual: return BO_ShrAssign;
+ case OO_EqualEqual: return BO_EQ;
+ case OO_ExclaimEqual: return BO_NE;
+ case OO_LessEqual: return BO_LE;
+ case OO_GreaterEqual: return BO_GE;
+ case OO_AmpAmp: return BO_LAnd;
+ case OO_PipePipe: return BO_LOr;
+ case OO_Comma: return BO_Comma;
+ case OO_ArrowStar: return BO_PtrMemI;
+ }
+}
+
+OverloadedOperatorKind BinaryOperator::getOverloadedOperator(Opcode Opc) {
+ static const OverloadedOperatorKind OverOps[] = {
+ /* .* Cannot be overloaded */OO_None, OO_ArrowStar,
+ OO_Star, OO_Slash, OO_Percent,
+ OO_Plus, OO_Minus,
+ OO_LessLess, OO_GreaterGreater,
+ OO_Less, OO_Greater, OO_LessEqual, OO_GreaterEqual,
+ OO_EqualEqual, OO_ExclaimEqual,
+ OO_Amp,
+ OO_Caret,
+ OO_Pipe,
+ OO_AmpAmp,
+ OO_PipePipe,
+ OO_Equal, OO_StarEqual,
+ OO_SlashEqual, OO_PercentEqual,
+ OO_PlusEqual, OO_MinusEqual,
+ OO_LessLessEqual, OO_GreaterGreaterEqual,
+ OO_AmpEqual, OO_CaretEqual,
+ OO_PipeEqual,
+ OO_Comma
+ };
+ return OverOps[Opc];
+}
+
+InitListExpr::InitListExpr(ASTContext &C, SourceLocation lbraceloc,
+ Expr **initExprs, unsigned numInits,
+ SourceLocation rbraceloc)
+ : Expr(InitListExprClass, QualType(), VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ InitExprs(C, numInits),
+ LBraceLoc(lbraceloc), RBraceLoc(rbraceloc), SyntacticForm(0)
+{
+ sawArrayRangeDesignator(false);
+ setInitializesStdInitializerList(false);
+ for (unsigned I = 0; I != numInits; ++I) {
+ if (initExprs[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (initExprs[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (initExprs[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (initExprs[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ }
+
+ InitExprs.insert(C, InitExprs.end(), initExprs, initExprs+numInits);
+}
+
+void InitListExpr::reserveInits(ASTContext &C, unsigned NumInits) {
+ if (NumInits > InitExprs.size())
+ InitExprs.reserve(C, NumInits);
+}
+
+void InitListExpr::resizeInits(ASTContext &C, unsigned NumInits) {
+ InitExprs.resize(C, NumInits, 0);
+}
+
+Expr *InitListExpr::updateInit(ASTContext &C, unsigned Init, Expr *expr) {
+ if (Init >= InitExprs.size()) {
+ InitExprs.insert(C, InitExprs.end(), Init - InitExprs.size() + 1, 0);
+ InitExprs.back() = expr;
+ return 0;
+ }
+
+ Expr *Result = cast_or_null<Expr>(InitExprs[Init]);
+ InitExprs[Init] = expr;
+ return Result;
+}
+
+void InitListExpr::setArrayFiller(Expr *filler) {
+ assert(!hasArrayFiller() && "Filler already set!");
+ ArrayFillerOrUnionFieldInit = filler;
+ // Fill out any "holes" in the array due to designated initializers.
+ Expr **inits = getInits();
+ for (unsigned i = 0, e = getNumInits(); i != e; ++i)
+ if (inits[i] == 0)
+ inits[i] = filler;
+}
+
+SourceRange InitListExpr::getSourceRange() const {
+ if (SyntacticForm)
+ return SyntacticForm->getSourceRange();
+ SourceLocation Beg = LBraceLoc, End = RBraceLoc;
+ if (Beg.isInvalid()) {
+ // Find the first non-null initializer.
+ for (InitExprsTy::const_iterator I = InitExprs.begin(),
+ E = InitExprs.end();
+ I != E; ++I) {
+ if (Stmt *S = *I) {
+ Beg = S->getLocStart();
+ break;
+ }
+ }
+ }
+ if (End.isInvalid()) {
+ // Find the first non-null initializer from the end.
+ for (InitExprsTy::const_reverse_iterator I = InitExprs.rbegin(),
+ E = InitExprs.rend();
+ I != E; ++I) {
+ if (Stmt *S = *I) {
+ End = S->getSourceRange().getEnd();
+ break;
+ }
+ }
+ }
+ return SourceRange(Beg, End);
+}
+
+/// getFunctionType - Return the underlying function type for this block.
+///
+const FunctionProtoType *BlockExpr::getFunctionType() const {
+ // The block pointer is never sugared, but the function type might be.
+ return cast<BlockPointerType>(getType())
+ ->getPointeeType()->castAs<FunctionProtoType>();
+}
+
+SourceLocation BlockExpr::getCaretLocation() const {
+ return TheBlock->getCaretLocation();
+}
+const Stmt *BlockExpr::getBody() const {
+ return TheBlock->getBody();
+}
+Stmt *BlockExpr::getBody() {
+ return TheBlock->getBody();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Generic Expression Routines
+//===----------------------------------------------------------------------===//
+
+/// isUnusedResultAWarning - Return true if this immediate expression should
+/// be warned about if the result is unused. If so, fill in Loc and Ranges
+/// with location to warn on and the source range[s] to report with the
+/// warning.
+bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
+ SourceRange &R2, ASTContext &Ctx) const {
+ // Don't warn if the expr is type dependent. The type could end up
+ // instantiating to void.
+ if (isTypeDependent())
+ return false;
+
+ switch (getStmtClass()) {
+ default:
+ if (getType()->isVoidType())
+ return false;
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()->
+ isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ case GenericSelectionExprClass:
+ return cast<GenericSelectionExpr>(this)->getResultExpr()->
+ isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ case UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(this);
+
+ switch (UO->getOpcode()) {
+ default: break;
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec: // ++/--
+ return false; // Not a warning.
+ case UO_Deref:
+ // Dereferencing a volatile pointer is a side-effect.
+ if (Ctx.getCanonicalType(getType()).isVolatileQualified())
+ return false;
+ break;
+ case UO_Real:
+ case UO_Imag:
+ // accessing a piece of a volatile complex is a side-effect.
+ if (Ctx.getCanonicalType(UO->getSubExpr()->getType())
+ .isVolatileQualified())
+ return false;
+ break;
+ case UO_Extension:
+ return UO->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ }
+ Loc = UO->getOperatorLoc();
+ R1 = UO->getSubExpr()->getSourceRange();
+ return true;
+ }
+ case BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(this);
+ switch (BO->getOpcode()) {
+ default:
+ break;
+ // Consider the RHS of comma for side effects. LHS was checked by
+ // Sema::CheckCommaOperands.
+ case BO_Comma:
+ // ((foo = <blah>), 0) is an idiom for hiding the result (and
+ // lvalue-ness) of an assignment written in a macro.
+ if (IntegerLiteral *IE =
+ dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens()))
+ if (IE->getValue() == 0)
+ return false;
+ return BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ // Consider '||', '&&' to have side effects if the LHS or RHS does.
+ case BO_LAnd:
+ case BO_LOr:
+ if (!BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) ||
+ !BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx))
+ return false;
+ break;
+ }
+ if (BO->isAssignmentOp())
+ return false;
+ Loc = BO->getOperatorLoc();
+ R1 = BO->getLHS()->getSourceRange();
+ R2 = BO->getRHS()->getSourceRange();
+ return true;
+ }
+ case CompoundAssignOperatorClass:
+ case VAArgExprClass:
+ case AtomicExprClass:
+ return false;
+
+ case ConditionalOperatorClass: {
+ // If only one of the LHS or RHS is a warning, the operator might
+ // be being used for control flow. Only warn if both the LHS and
+ // RHS are warnings.
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(this);
+ if (!Exp->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx))
+ return false;
+ if (!Exp->getLHS())
+ return true;
+ return Exp->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ }
+
+ case MemberExprClass:
+ // If the base pointer or element is to a volatile pointer/field, accessing
+ // it is a side effect.
+ if (Ctx.getCanonicalType(getType()).isVolatileQualified())
+ return false;
+ Loc = cast<MemberExpr>(this)->getMemberLoc();
+ R1 = SourceRange(Loc, Loc);
+ R2 = cast<MemberExpr>(this)->getBase()->getSourceRange();
+ return true;
+
+ case ArraySubscriptExprClass:
+ // If the base pointer or element is to a volatile pointer/field, accessing
+ // it is a side effect.
+ if (Ctx.getCanonicalType(getType()).isVolatileQualified())
+ return false;
+ Loc = cast<ArraySubscriptExpr>(this)->getRBracketLoc();
+ R1 = cast<ArraySubscriptExpr>(this)->getLHS()->getSourceRange();
+ R2 = cast<ArraySubscriptExpr>(this)->getRHS()->getSourceRange();
+ return true;
+
+ case CXXOperatorCallExprClass: {
+ // We warn about operator== and operator!= even when user-defined operator
+ // overloads as there is no reasonable way to define these such that they
+ // have non-trivial, desirable side-effects. See the -Wunused-comparison
+ // warning: these operators are commonly typo'ed, and so warning on them
+ // provides additional value as well. If this list is updated,
+ // DiagnoseUnusedComparison should be as well.
+ const CXXOperatorCallExpr *Op = cast<CXXOperatorCallExpr>(this);
+ if (Op->getOperator() == OO_EqualEqual ||
+ Op->getOperator() == OO_ExclaimEqual) {
+ Loc = Op->getOperatorLoc();
+ R1 = Op->getSourceRange();
+ return true;
+ }
+
+ // Fallthrough for generic call handling.
+ }
+ case CallExprClass:
+ case CXXMemberCallExprClass:
+ case UserDefinedLiteralClass: {
+ // If this is a direct call, get the callee.
+ const CallExpr *CE = cast<CallExpr>(this);
+ if (const Decl *FD = CE->getCalleeDecl()) {
+ // If the callee has attribute pure, const, or warn_unused_result, warn
+ // about it. void foo() { strlen("bar"); } should warn.
+ //
+ // Note: If new cases are added here, DiagnoseUnusedExprResult should be
+ // updated to match for QoI.
+ if (FD->getAttr<WarnUnusedResultAttr>() ||
+ FD->getAttr<PureAttr>() || FD->getAttr<ConstAttr>()) {
+ Loc = CE->getCallee()->getLocStart();
+ R1 = CE->getCallee()->getSourceRange();
+
+ if (unsigned NumArgs = CE->getNumArgs())
+ R2 = SourceRange(CE->getArg(0)->getLocStart(),
+ CE->getArg(NumArgs-1)->getLocEnd());
+ return true;
+ }
+ }
+ return false;
+ }
+
+ case CXXTemporaryObjectExprClass:
+ case CXXConstructExprClass:
+ return false;
+
+ case ObjCMessageExprClass: {
+ const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(this);
+ if (Ctx.getLangOpts().ObjCAutoRefCount &&
+ ME->isInstanceMessage() &&
+ !ME->getType()->isVoidType() &&
+ ME->getSelector().getIdentifierInfoForSlot(0) &&
+ ME->getSelector().getIdentifierInfoForSlot(0)
+ ->getName().startswith("init")) {
+ Loc = getExprLoc();
+ R1 = ME->getSourceRange();
+ return true;
+ }
+
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
+ Loc = getExprLoc();
+ return true;
+ }
+ return false;
+ }
+
+ case ObjCPropertyRefExprClass:
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+
+ case PseudoObjectExprClass: {
+ const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
+
+ // Only complain about things that have the form of a getter.
+ if (isa<UnaryOperator>(PO->getSyntacticForm()) ||
+ isa<BinaryOperator>(PO->getSyntacticForm()))
+ return false;
+
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ }
+
+ case StmtExprClass: {
+ // Statement exprs don't logically have side effects themselves, but are
+ // sometimes used in macros in ways that give them a type that is unused.
+ // For example ({ blah; foo(); }) will end up with a type if foo has a type.
+ // however, if the result of the stmt expr is dead, we don't want to emit a
+ // warning.
+ const CompoundStmt *CS = cast<StmtExpr>(this)->getSubStmt();
+ if (!CS->body_empty()) {
+ if (const Expr *E = dyn_cast<Expr>(CS->body_back()))
+ return E->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ if (const LabelStmt *Label = dyn_cast<LabelStmt>(CS->body_back()))
+ if (const Expr *E = dyn_cast<Expr>(Label->getSubStmt()))
+ return E->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ }
+
+ if (getType()->isVoidType())
+ return false;
+ Loc = cast<StmtExpr>(this)->getLParenLoc();
+ R1 = getSourceRange();
+ return true;
+ }
+ case CStyleCastExprClass:
+ // If this is an explicit cast to void, allow it. People do this when they
+ // think they know what they're doing :).
+ if (getType()->isVoidType())
+ return false;
+ Loc = cast<CStyleCastExpr>(this)->getLParenLoc();
+ R1 = cast<CStyleCastExpr>(this)->getSubExpr()->getSourceRange();
+ return true;
+ case CXXFunctionalCastExprClass: {
+ if (getType()->isVoidType())
+ return false;
+ const CastExpr *CE = cast<CastExpr>(this);
+
+ // If this is a cast to void or a constructor conversion, check the operand.
+ // Otherwise, the result of the cast is unused.
+ if (CE->getCastKind() == CK_ToVoid ||
+ CE->getCastKind() == CK_ConstructorConversion)
+ return (cast<CastExpr>(this)->getSubExpr()
+ ->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ Loc = cast<CXXFunctionalCastExpr>(this)->getTypeBeginLoc();
+ R1 = cast<CXXFunctionalCastExpr>(this)->getSubExpr()->getSourceRange();
+ return true;
+ }
+
+ case ImplicitCastExprClass:
+ // Check the operand, since implicit casts are inserted by Sema
+ return (cast<ImplicitCastExpr>(this)
+ ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+
+ case CXXDefaultArgExprClass:
+ return (cast<CXXDefaultArgExpr>(this)
+ ->getExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+
+ case CXXNewExprClass:
+ // FIXME: In theory, there might be new expressions that don't have side
+ // effects (e.g. a placement new with an uninitialized POD).
+ case CXXDeleteExprClass:
+ return false;
+ case CXXBindTemporaryExprClass:
+ return (cast<CXXBindTemporaryExpr>(this)
+ ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ case ExprWithCleanupsClass:
+ return (cast<ExprWithCleanups>(this)
+ ->getSubExpr()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ }
+}
+
+/// isOBJCGCCandidate - Check if an expression is objc gc'able.
+/// returns true, if it is; false otherwise.
+bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
+ const Expr *E = IgnoreParens();
+ switch (E->getStmtClass()) {
+ default:
+ return false;
+ case ObjCIvarRefExprClass:
+ return true;
+ case Expr::UnaryOperatorClass:
+ return cast<UnaryOperator>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case ImplicitCastExprClass:
+ return cast<ImplicitCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr()
+ ->isOBJCGCCandidate(Ctx);
+ case CStyleCastExprClass:
+ return cast<CStyleCastExpr>(E)->getSubExpr()->isOBJCGCCandidate(Ctx);
+ case DeclRefExprClass: {
+ const Decl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasGlobalStorage())
+ return true;
+ QualType T = VD->getType();
+ // dereferencing to a pointer is always a gc'able candidate,
+ // unless it is __weak.
+ return T->isPointerType() &&
+ (Ctx.getObjCGCAttrKind(T) != Qualifiers::Weak);
+ }
+ return false;
+ }
+ case MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(E);
+ return M->getBase()->isOBJCGCCandidate(Ctx);
+ }
+ case ArraySubscriptExprClass:
+ return cast<ArraySubscriptExpr>(E)->getBase()->isOBJCGCCandidate(Ctx);
+ }
+}
+
+bool Expr::isBoundMemberFunction(ASTContext &Ctx) const {
+ if (isTypeDependent())
+ return false;
+ return ClassifyLValue(Ctx) == Expr::LV_MemberFunction;
+}
+
+QualType Expr::findBoundMemberType(const Expr *expr) {
+ assert(expr->hasPlaceholderType(BuiltinType::BoundMember));
+
+ // Bound member expressions are always one of these possibilities:
+ // x->m x.m x->*y x.*y
+ // (possibly parenthesized)
+
+ expr = expr->IgnoreParens();
+ if (const MemberExpr *mem = dyn_cast<MemberExpr>(expr)) {
+ assert(isa<CXXMethodDecl>(mem->getMemberDecl()));
+ return mem->getMemberDecl()->getType();
+ }
+
+ if (const BinaryOperator *op = dyn_cast<BinaryOperator>(expr)) {
+ QualType type = op->getRHS()->getType()->castAs<MemberPointerType>()
+ ->getPointeeType();
+ assert(type->isFunctionType());
+ return type;
+ }
+
+ assert(isa<UnresolvedMemberExpr>(expr));
+ return QualType();
+}
+
+static Expr::CanThrowResult MergeCanThrow(Expr::CanThrowResult CT1,
+ Expr::CanThrowResult CT2) {
+ // CanThrowResult constants are ordered so that the maximum is the correct
+ // merge result.
+ return CT1 > CT2 ? CT1 : CT2;
+}
+
+static Expr::CanThrowResult CanSubExprsThrow(ASTContext &C, const Expr *CE) {
+ Expr *E = const_cast<Expr*>(CE);
+ Expr::CanThrowResult R = Expr::CT_Cannot;
+ for (Expr::child_range I = E->children(); I && R != Expr::CT_Can; ++I) {
+ R = MergeCanThrow(R, cast<Expr>(*I)->CanThrow(C));
+ }
+ return R;
+}
+
+static Expr::CanThrowResult CanCalleeThrow(ASTContext &Ctx, const Expr *E,
+ const Decl *D,
+ bool NullThrows = true) {
+ if (!D)
+ return NullThrows ? Expr::CT_Can : Expr::CT_Cannot;
+
+ // See if we can get a function type from the decl somehow.
+ const ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (!VD) // If we have no clue what we're calling, assume the worst.
+ return Expr::CT_Can;
+
+ // As an extension, we assume that __attribute__((nothrow)) functions don't
+ // throw.
+ if (isa<FunctionDecl>(D) && D->hasAttr<NoThrowAttr>())
+ return Expr::CT_Cannot;
+
+ QualType T = VD->getType();
+ const FunctionProtoType *FT;
+ if ((FT = T->getAs<FunctionProtoType>())) {
+ } else if (const PointerType *PT = T->getAs<PointerType>())
+ FT = PT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ FT = RT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const MemberPointerType *MT = T->getAs<MemberPointerType>())
+ FT = MT->getPointeeType()->getAs<FunctionProtoType>();
+ else if (const BlockPointerType *BT = T->getAs<BlockPointerType>())
+ FT = BT->getPointeeType()->getAs<FunctionProtoType>();
+
+ if (!FT)
+ return Expr::CT_Can;
+
+ if (FT->getExceptionSpecType() == EST_Delayed) {
+ assert(isa<CXXConstructorDecl>(D) &&
+ "only constructor exception specs can be unknown");
+ Ctx.getDiagnostics().Report(E->getLocStart(),
+ diag::err_exception_spec_unknown)
+ << E->getSourceRange();
+ return Expr::CT_Can;
+ }
+
+ return FT->isNothrow(Ctx) ? Expr::CT_Cannot : Expr::CT_Can;
+}
+
+static Expr::CanThrowResult CanDynamicCastThrow(const CXXDynamicCastExpr *DC) {
+ if (DC->isTypeDependent())
+ return Expr::CT_Dependent;
+
+ if (!DC->getTypeAsWritten()->isReferenceType())
+ return Expr::CT_Cannot;
+
+ if (DC->getSubExpr()->isTypeDependent())
+ return Expr::CT_Dependent;
+
+ return DC->getCastKind() == clang::CK_Dynamic? Expr::CT_Can : Expr::CT_Cannot;
+}
+
+static Expr::CanThrowResult CanTypeidThrow(ASTContext &C,
+ const CXXTypeidExpr *DC) {
+ if (DC->isTypeOperand())
+ return Expr::CT_Cannot;
+
+ Expr *Op = DC->getExprOperand();
+ if (Op->isTypeDependent())
+ return Expr::CT_Dependent;
+
+ const RecordType *RT = Op->getType()->getAs<RecordType>();
+ if (!RT)
+ return Expr::CT_Cannot;
+
+ if (!cast<CXXRecordDecl>(RT->getDecl())->isPolymorphic())
+ return Expr::CT_Cannot;
+
+ if (Op->Classify(C).isPRValue())
+ return Expr::CT_Cannot;
+
+ return Expr::CT_Can;
+}
+
+Expr::CanThrowResult Expr::CanThrow(ASTContext &C) const {
+ // C++ [expr.unary.noexcept]p3:
+ // [Can throw] if in a potentially-evaluated context the expression would
+ // contain:
+ switch (getStmtClass()) {
+ case CXXThrowExprClass:
+ // - a potentially evaluated throw-expression
+ return CT_Can;
+
+ case CXXDynamicCastExprClass: {
+ // - a potentially evaluated dynamic_cast expression dynamic_cast<T>(v),
+ // where T is a reference type, that requires a run-time check
+ CanThrowResult CT = CanDynamicCastThrow(cast<CXXDynamicCastExpr>(this));
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXTypeidExprClass:
+ // - a potentially evaluated typeid expression applied to a glvalue
+ // expression whose type is a polymorphic class type
+ return CanTypeidThrow(C, cast<CXXTypeidExpr>(this));
+
+ // - a potentially evaluated call to a function, member function, function
+ // pointer, or member function pointer that does not have a non-throwing
+ // exception-specification
+ case CallExprClass:
+ case CXXMemberCallExprClass:
+ case CXXOperatorCallExprClass:
+ case UserDefinedLiteralClass: {
+ const CallExpr *CE = cast<CallExpr>(this);
+ CanThrowResult CT;
+ if (isTypeDependent())
+ CT = CT_Dependent;
+ else if (isa<CXXPseudoDestructorExpr>(CE->getCallee()->IgnoreParens()))
+ CT = CT_Cannot;
+ else
+ CT = CanCalleeThrow(C, this, CE->getCalleeDecl());
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXConstructExprClass:
+ case CXXTemporaryObjectExprClass: {
+ CanThrowResult CT = CanCalleeThrow(C, this,
+ cast<CXXConstructExpr>(this)->getConstructor());
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case LambdaExprClass: {
+ const LambdaExpr *Lambda = cast<LambdaExpr>(this);
+ CanThrowResult CT = Expr::CT_Cannot;
+ for (LambdaExpr::capture_init_iterator Cap = Lambda->capture_init_begin(),
+ CapEnd = Lambda->capture_init_end();
+ Cap != CapEnd; ++Cap)
+ CT = MergeCanThrow(CT, (*Cap)->CanThrow(C));
+ return CT;
+ }
+
+ case CXXNewExprClass: {
+ CanThrowResult CT;
+ if (isTypeDependent())
+ CT = CT_Dependent;
+ else
+ CT = CanCalleeThrow(C, this, cast<CXXNewExpr>(this)->getOperatorNew());
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXDeleteExprClass: {
+ CanThrowResult CT;
+ QualType DTy = cast<CXXDeleteExpr>(this)->getDestroyedType();
+ if (DTy.isNull() || DTy->isDependentType()) {
+ CT = CT_Dependent;
+ } else {
+ CT = CanCalleeThrow(C, this,
+ cast<CXXDeleteExpr>(this)->getOperatorDelete());
+ if (const RecordType *RT = DTy->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ CT = MergeCanThrow(CT, CanCalleeThrow(C, this, RD->getDestructor()));
+ }
+ if (CT == CT_Can)
+ return CT;
+ }
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ case CXXBindTemporaryExprClass: {
+ // The bound temporary has to be destroyed again, which might throw.
+ CanThrowResult CT = CanCalleeThrow(C, this,
+ cast<CXXBindTemporaryExpr>(this)->getTemporary()->getDestructor());
+ if (CT == CT_Can)
+ return CT;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ // ObjC message sends are like function calls, but never have exception
+ // specs.
+ case ObjCMessageExprClass:
+ case ObjCPropertyRefExprClass:
+ case ObjCSubscriptRefExprClass:
+ return CT_Can;
+
+ // All the ObjC literals that are implemented as calls are
+ // potentially throwing unless we decide to close off that
+ // possibility.
+ case ObjCArrayLiteralClass:
+ case ObjCDictionaryLiteralClass:
+ case ObjCNumericLiteralClass:
+ return CT_Can;
+
+ // Many other things have subexpressions, so we have to test those.
+ // Some are simple:
+ case ConditionalOperatorClass:
+ case CompoundLiteralExprClass:
+ case CXXConstCastExprClass:
+ case CXXDefaultArgExprClass:
+ case CXXReinterpretCastExprClass:
+ case DesignatedInitExprClass:
+ case ExprWithCleanupsClass:
+ case ExtVectorElementExprClass:
+ case InitListExprClass:
+ case MemberExprClass:
+ case ObjCIsaExprClass:
+ case ObjCIvarRefExprClass:
+ case ParenExprClass:
+ case ParenListExprClass:
+ case ShuffleVectorExprClass:
+ case VAArgExprClass:
+ return CanSubExprsThrow(C, this);
+
+ // Some might be dependent for other reasons.
+ case ArraySubscriptExprClass:
+ case BinaryOperatorClass:
+ case CompoundAssignOperatorClass:
+ case CStyleCastExprClass:
+ case CXXStaticCastExprClass:
+ case CXXFunctionalCastExprClass:
+ case ImplicitCastExprClass:
+ case MaterializeTemporaryExprClass:
+ case UnaryOperatorClass: {
+ CanThrowResult CT = isTypeDependent() ? CT_Dependent : CT_Cannot;
+ return MergeCanThrow(CT, CanSubExprsThrow(C, this));
+ }
+
+ // FIXME: We should handle StmtExpr, but that opens a MASSIVE can of worms.
+ case StmtExprClass:
+ return CT_Can;
+
+ case ChooseExprClass:
+ if (isTypeDependent() || isValueDependent())
+ return CT_Dependent;
+ return cast<ChooseExpr>(this)->getChosenSubExpr(C)->CanThrow(C);
+
+ case GenericSelectionExprClass:
+ if (cast<GenericSelectionExpr>(this)->isResultDependent())
+ return CT_Dependent;
+ return cast<GenericSelectionExpr>(this)->getResultExpr()->CanThrow(C);
+
+ // Some expressions are always dependent.
+ case CXXDependentScopeMemberExprClass:
+ case CXXUnresolvedConstructExprClass:
+ case DependentScopeDeclRefExprClass:
+ return CT_Dependent;
+
+ case AtomicExprClass:
+ case AsTypeExprClass:
+ case BinaryConditionalOperatorClass:
+ case BlockExprClass:
+ case CUDAKernelCallExprClass:
+ case DeclRefExprClass:
+ case ObjCBridgedCastExprClass:
+ case ObjCIndirectCopyRestoreExprClass:
+ case ObjCProtocolExprClass:
+ case ObjCSelectorExprClass:
+ case OffsetOfExprClass:
+ case PackExpansionExprClass:
+ case PseudoObjectExprClass:
+ case SubstNonTypeTemplateParmExprClass:
+ case SubstNonTypeTemplateParmPackExprClass:
+ case UnaryExprOrTypeTraitExprClass:
+ case UnresolvedLookupExprClass:
+ case UnresolvedMemberExprClass:
+ // FIXME: Can any of the above throw? If so, when?
+ return CT_Cannot;
+
+ case AddrLabelExprClass:
+ case ArrayTypeTraitExprClass:
+ case BinaryTypeTraitExprClass:
+ case TypeTraitExprClass:
+ case CXXBoolLiteralExprClass:
+ case CXXNoexceptExprClass:
+ case CXXNullPtrLiteralExprClass:
+ case CXXPseudoDestructorExprClass:
+ case CXXScalarValueInitExprClass:
+ case CXXThisExprClass:
+ case CXXUuidofExprClass:
+ case CharacterLiteralClass:
+ case ExpressionTraitExprClass:
+ case FloatingLiteralClass:
+ case GNUNullExprClass:
+ case ImaginaryLiteralClass:
+ case ImplicitValueInitExprClass:
+ case IntegerLiteralClass:
+ case ObjCEncodeExprClass:
+ case ObjCStringLiteralClass:
+ case ObjCBoolLiteralExprClass:
+ case OpaqueValueExprClass:
+ case PredefinedExprClass:
+ case SizeOfPackExprClass:
+ case StringLiteralClass:
+ case UnaryTypeTraitExprClass:
+ // These expressions can never throw.
+ return CT_Cannot;
+
+#define STMT(CLASS, PARENT) case CLASS##Class:
+#define STMT_RANGE(Base, First, Last)
+#define LAST_STMT_RANGE(BASE, FIRST, LAST)
+#define EXPR(CLASS, PARENT)
+#define ABSTRACT_STMT(STMT)
+#include "clang/AST/StmtNodes.inc"
+ case NoStmtClass:
+ llvm_unreachable("Invalid class for expression");
+ }
+ llvm_unreachable("Bogus StmtClass");
+}
+
+Expr* Expr::IgnoreParens() {
+ Expr* E = this;
+ while (true) {
+ if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+/// IgnoreParenCasts - Ignore parentheses and casts. Strip off any ParenExpr
+/// or CastExprs or ImplicitCastExprs, returning their operand.
+Expr *Expr::IgnoreParenCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr* P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ return E;
+ }
+}
+
+/// IgnoreParenLValueCasts - Ignore parentheses and lvalue-to-rvalue
+/// casts. This is intended purely as a temporary workaround for code
+/// that hasn't yet been rewritten to do the right thing about those
+/// casts, and may disappear along with the last internal use.
+Expr *Expr::IgnoreParenLValueCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ } else if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ if (P->getCastKind() == CK_LValueToRValue) {
+ E = P->getSubExpr();
+ continue;
+ }
+ } else if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ } else if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ } else if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ } else if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ break;
+ }
+ return E;
+}
+
+Expr *Expr::IgnoreParenImpCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+ if (MaterializeTemporaryExpr *Materialize
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = Materialize->GetTemporaryExpr();
+ continue;
+ }
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+ return E;
+ }
+}
+
+Expr *Expr::IgnoreConversionOperator() {
+ if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(this)) {
+ if (MCE->getMethodDecl() && isa<CXXConversionDecl>(MCE->getMethodDecl()))
+ return MCE->getImplicitObjectArgument();
+ }
+ return this;
+}
+
+/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
+/// value (including ptr->int casts of the same size). Strip off any
+/// ParenExpr or CastExprs, returning their operand.
+Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E)) {
+ E = P->getSubExpr();
+ continue;
+ }
+
+ if (CastExpr *P = dyn_cast<CastExpr>(E)) {
+ // We ignore integer <-> casts that are of the same width, ptr<->ptr and
+ // ptr<->int casts of the same width. We also ignore all identity casts.
+ Expr *SE = P->getSubExpr();
+
+ if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) {
+ E = SE;
+ continue;
+ }
+
+ if ((E->getType()->isPointerType() ||
+ E->getType()->isIntegralType(Ctx)) &&
+ (SE->getType()->isPointerType() ||
+ SE->getType()->isIntegralType(Ctx)) &&
+ Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
+ E = SE;
+ continue;
+ }
+ }
+
+ if (UnaryOperator* P = dyn_cast<UnaryOperator>(E)) {
+ if (P->getOpcode() == UO_Extension) {
+ E = P->getSubExpr();
+ continue;
+ }
+ }
+
+ if (GenericSelectionExpr* P = dyn_cast<GenericSelectionExpr>(E)) {
+ if (!P->isResultDependent()) {
+ E = P->getResultExpr();
+ continue;
+ }
+ }
+
+ if (SubstNonTypeTemplateParmExpr *NTTP
+ = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ E = NTTP->getReplacement();
+ continue;
+ }
+
+ return E;
+ }
+}
+
+bool Expr::isDefaultArgument() const {
+ const Expr *E = this;
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExprAsWritten();
+
+ return isa<CXXDefaultArgExpr>(E);
+}
+
+/// \brief Skip over any no-op casts and any temporary-binding
+/// expressions.
+static const Expr *skipTemporaryBindingsNoOpCastsAndParens(const Expr *E) {
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ while (const CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = BE->getSubExpr();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ return E->IgnoreParens();
+}
+
+/// isTemporaryObject - Determines if this expression produces a
+/// temporary of the given class type.
+bool Expr::isTemporaryObject(ASTContext &C, const CXXRecordDecl *TempTy) const {
+ if (!C.hasSameUnqualifiedType(getType(), C.getTypeDeclType(TempTy)))
+ return false;
+
+ const Expr *E = skipTemporaryBindingsNoOpCastsAndParens(this);
+
+ // Temporaries are by definition pr-values of class type.
+ if (!E->Classify(C).isPRValue()) {
+ // In this context, property reference is a message call and is pr-value.
+ if (!isa<ObjCPropertyRefExpr>(E))
+ return false;
+ }
+
+ // Black-list a few cases which yield pr-values of class type that don't
+ // refer to temporaries of that type:
+
+ // - implicit derived-to-base conversions
+ if (isa<ImplicitCastExpr>(E)) {
+ switch (cast<ImplicitCastExpr>(E)->getCastKind()) {
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ return false;
+ default:
+ break;
+ }
+ }
+
+ // - member expressions (all)
+ if (isa<MemberExpr>(E))
+ return false;
+
+ // - opaque values (all)
+ if (isa<OpaqueValueExpr>(E))
+ return false;
+
+ return true;
+}
+
+bool Expr::isImplicitCXXThis() const {
+ const Expr *E = this;
+
+ // Strip away parentheses and casts we don't care about.
+ while (true) {
+ if (const ParenExpr *Paren = dyn_cast<ParenExpr>(E)) {
+ E = Paren->getSubExpr();
+ continue;
+ }
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_NoOp ||
+ ICE->getCastKind() == CK_LValueToRValue ||
+ ICE->getCastKind() == CK_DerivedToBase ||
+ ICE->getCastKind() == CK_UncheckedDerivedToBase) {
+ E = ICE->getSubExpr();
+ continue;
+ }
+ }
+
+ if (const UnaryOperator* UnOp = dyn_cast<UnaryOperator>(E)) {
+ if (UnOp->getOpcode() == UO_Extension) {
+ E = UnOp->getSubExpr();
+ continue;
+ }
+ }
+
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ E = M->GetTemporaryExpr();
+ continue;
+ }
+
+ break;
+ }
+
+ if (const CXXThisExpr *This = dyn_cast<CXXThisExpr>(E))
+ return This->isImplicit();
+
+ return false;
+}
+
+/// hasAnyTypeDependentArguments - Determines if any of the expressions
+/// in Exprs is type-dependent.
+bool Expr::hasAnyTypeDependentArguments(llvm::ArrayRef<Expr *> Exprs) {
+ for (unsigned I = 0; I < Exprs.size(); ++I)
+ if (Exprs[I]->isTypeDependent())
+ return true;
+
+ return false;
+}
+
+bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef) const {
+ // This function is attempting whether an expression is an initializer
+ // which can be evaluated at compile-time. isEvaluatable handles most
+ // of the cases, but it can't deal with some initializer-specific
+ // expressions, and it can't deal with aggregates; we deal with those here,
+ // and fall back to isEvaluatable for the other cases.
+
+ // If we ever capture reference-binding directly in the AST, we can
+ // kill the second parameter.
+
+ if (IsForRef) {
+ EvalResult Result;
+ return EvaluateAsLValue(Result, Ctx) && !Result.HasSideEffects;
+ }
+
+ switch (getStmtClass()) {
+ default: break;
+ case IntegerLiteralClass:
+ case FloatingLiteralClass:
+ case StringLiteralClass:
+ case ObjCStringLiteralClass:
+ case ObjCEncodeExprClass:
+ return true;
+ case CXXTemporaryObjectExprClass:
+ case CXXConstructExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(this);
+
+ // Only if it's
+ if (CE->getConstructor()->isTrivial()) {
+ // 1) an application of the trivial default constructor or
+ if (!CE->getNumArgs()) return true;
+
+ // 2) an elidable trivial copy construction of an operand which is
+ // itself a constant initializer. Note that we consider the
+ // operand on its own, *not* as a reference binding.
+ if (CE->isElidable() &&
+ CE->getArg(0)->isConstantInitializer(Ctx, false))
+ return true;
+ }
+
+ // 3) a foldable constexpr constructor.
+ break;
+ }
+ case CompoundLiteralExprClass: {
+ // This handles gcc's extension that allows global initializers like
+ // "struct x {int x;} x = (struct x) {};".
+ // FIXME: This accepts other cases it shouldn't!
+ const Expr *Exp = cast<CompoundLiteralExpr>(this)->getInitializer();
+ return Exp->isConstantInitializer(Ctx, false);
+ }
+ case InitListExprClass: {
+ // FIXME: This doesn't deal with fields with reference types correctly.
+ // FIXME: This incorrectly allows pointers cast to integers to be assigned
+ // to bitfields.
+ const InitListExpr *Exp = cast<InitListExpr>(this);
+ unsigned numInits = Exp->getNumInits();
+ for (unsigned i = 0; i < numInits; i++) {
+ if (!Exp->getInit(i)->isConstantInitializer(Ctx, false))
+ return false;
+ }
+ return true;
+ }
+ case ImplicitValueInitExprClass:
+ return true;
+ case ParenExprClass:
+ return cast<ParenExpr>(this)->getSubExpr()
+ ->isConstantInitializer(Ctx, IsForRef);
+ case GenericSelectionExprClass:
+ if (cast<GenericSelectionExpr>(this)->isResultDependent())
+ return false;
+ return cast<GenericSelectionExpr>(this)->getResultExpr()
+ ->isConstantInitializer(Ctx, IsForRef);
+ case ChooseExprClass:
+ return cast<ChooseExpr>(this)->getChosenSubExpr(Ctx)
+ ->isConstantInitializer(Ctx, IsForRef);
+ case UnaryOperatorClass: {
+ const UnaryOperator* Exp = cast<UnaryOperator>(this);
+ if (Exp->getOpcode() == UO_Extension)
+ return Exp->getSubExpr()->isConstantInitializer(Ctx, false);
+ break;
+ }
+ case CXXFunctionalCastExprClass:
+ case CXXStaticCastExprClass:
+ case ImplicitCastExprClass:
+ case CStyleCastExprClass: {
+ const CastExpr *CE = cast<CastExpr>(this);
+
+ // If we're promoting an integer to an _Atomic type then this is constant
+ // if the integer is constant. We also need to check the converse in case
+ // someone does something like:
+ //
+ // int a = (_Atomic(int))42;
+ //
+ // I doubt anyone would write code like this directly, but it's quite
+ // possible as the result of macro expansions.
+ if (CE->getCastKind() == CK_NonAtomicToAtomic ||
+ CE->getCastKind() == CK_AtomicToNonAtomic)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
+ // Handle bitcasts of vector constants.
+ if (getType()->isVectorType() && CE->getCastKind() == CK_BitCast)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
+ // Handle misc casts we want to ignore.
+ // FIXME: Is it really safe to ignore all these?
+ if (CE->getCastKind() == CK_NoOp ||
+ CE->getCastKind() == CK_LValueToRValue ||
+ CE->getCastKind() == CK_ToUnion ||
+ CE->getCastKind() == CK_ConstructorConversion)
+ return CE->getSubExpr()->isConstantInitializer(Ctx, false);
+
+ break;
+ }
+ case MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(this)->GetTemporaryExpr()
+ ->isConstantInitializer(Ctx, false);
+ }
+ return isEvaluatable(Ctx);
+}
+
+namespace {
+ /// \brief Look for a call to a non-trivial function within an expression.
+ class NonTrivialCallFinder : public EvaluatedExprVisitor<NonTrivialCallFinder>
+ {
+ typedef EvaluatedExprVisitor<NonTrivialCallFinder> Inherited;
+
+ bool NonTrivial;
+
+ public:
+ explicit NonTrivialCallFinder(ASTContext &Context)
+ : Inherited(Context), NonTrivial(false) { }
+
+ bool hasNonTrivialCall() const { return NonTrivial; }
+
+ void VisitCallExpr(CallExpr *E) {
+ if (CXXMethodDecl *Method
+ = dyn_cast_or_null<CXXMethodDecl>(E->getCalleeDecl())) {
+ if (Method->isTrivial()) {
+ // Recurse to children of the call.
+ Inherited::VisitStmt(E);
+ return;
+ }
+ }
+
+ NonTrivial = true;
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (E->getConstructor()->isTrivial()) {
+ // Recurse to children of the call.
+ Inherited::VisitStmt(E);
+ return;
+ }
+
+ NonTrivial = true;
+ }
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ if (E->getTemporary()->getDestructor()->isTrivial()) {
+ Inherited::VisitStmt(E);
+ return;
+ }
+
+ NonTrivial = true;
+ }
+ };
+}
+
+bool Expr::hasNonTrivialCall(ASTContext &Ctx) {
+ NonTrivialCallFinder Finder(Ctx);
+ Finder.Visit(this);
+ return Finder.hasNonTrivialCall();
+}
+
+/// isNullPointerConstant - C99 6.3.2.3p3 - Return whether this is a null
+/// pointer constant or not, as well as the specific kind of constant detected.
+/// Null pointer constants can be integer constant expressions with the
+/// value zero, casts of zero to void*, nullptr (C++0X), or __null
+/// (a GNU extension).
+Expr::NullPointerConstantKind
+Expr::isNullPointerConstant(ASTContext &Ctx,
+ NullPointerConstantValueDependence NPC) const {
+ if (isValueDependent()) {
+ switch (NPC) {
+ case NPC_NeverValueDependent:
+ llvm_unreachable("Unexpected value dependent expression!");
+ case NPC_ValueDependentIsNull:
+ if (isTypeDependent() || getType()->isIntegralType(Ctx))
+ return NPCK_ZeroInteger;
+ else
+ return NPCK_NotNull;
+
+ case NPC_ValueDependentIsNotNull:
+ return NPCK_NotNull;
+ }
+ }
+
+ // Strip off a cast to void*, if it exists. Except in C++.
+ if (const ExplicitCastExpr *CE = dyn_cast<ExplicitCastExpr>(this)) {
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // Check that it is a cast to void*.
+ if (const PointerType *PT = CE->getType()->getAs<PointerType>()) {
+ QualType Pointee = PT->getPointeeType();
+ if (!Pointee.hasQualifiers() &&
+ Pointee->isVoidType() && // to void*
+ CE->getSubExpr()->getType()->isIntegerType()) // from int.
+ return CE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ }
+ }
+ } else if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(this)) {
+ // Ignore the ImplicitCastExpr type entirely.
+ return ICE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const ParenExpr *PE = dyn_cast<ParenExpr>(this)) {
+ // Accept ((void*)0) as a null pointer constant, as many other
+ // implementations do.
+ return PE->getSubExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const GenericSelectionExpr *GE =
+ dyn_cast<GenericSelectionExpr>(this)) {
+ return GE->getResultExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const CXXDefaultArgExpr *DefaultArg
+ = dyn_cast<CXXDefaultArgExpr>(this)) {
+ // See through default argument expressions
+ return DefaultArg->getExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (isa<GNUNullExpr>(this)) {
+ // The GNU __null extension is always a null pointer constant.
+ return NPCK_GNUNull;
+ } else if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(this)) {
+ return M->GetTemporaryExpr()->isNullPointerConstant(Ctx, NPC);
+ } else if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(this)) {
+ if (const Expr *Source = OVE->getSourceExpr())
+ return Source->isNullPointerConstant(Ctx, NPC);
+ }
+
+ // C++0x nullptr_t is always a null pointer constant.
+ if (getType()->isNullPtrType())
+ return NPCK_CXX0X_nullptr;
+
+ if (const RecordType *UT = getType()->getAsUnionType())
+ if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(this)){
+ const Expr *InitExpr = CLE->getInitializer();
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(InitExpr))
+ return ILE->getInit(0)->isNullPointerConstant(Ctx, NPC);
+ }
+ // This expression must be an integer type.
+ if (!getType()->isIntegerType() ||
+ (Ctx.getLangOpts().CPlusPlus && getType()->isEnumeralType()))
+ return NPCK_NotNull;
+
+ // If we have an integer constant expression, we need to *evaluate* it and
+ // test for the value 0. Don't use the C++11 constant expression semantics
+ // for this, for now; once the dust settles on core issue 903, we might only
+ // allow a literal 0 here in C++11 mode.
+ if (Ctx.getLangOpts().CPlusPlus0x) {
+ if (!isCXX98IntegralConstantExpr(Ctx))
+ return NPCK_NotNull;
+ } else {
+ if (!isIntegerConstantExpr(Ctx))
+ return NPCK_NotNull;
+ }
+
+ return (EvaluateKnownConstInt(Ctx) == 0) ? NPCK_ZeroInteger : NPCK_NotNull;
+}
+
+/// \brief If this expression is an l-value for an Objective C
+/// property, find the underlying property reference expression.
+const ObjCPropertyRefExpr *Expr::getObjCProperty() const {
+ const Expr *E = this;
+ while (true) {
+ assert((E->getValueKind() == VK_LValue &&
+ E->getObjectKind() == OK_ObjCProperty) &&
+ "expression is not a property reference");
+ E = E->IgnoreParenCasts();
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ E = BO->getRHS();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return cast<ObjCPropertyRefExpr>(E);
+}
+
+FieldDecl *Expr::getBitField() {
+ Expr *E = this->IgnoreParens();
+
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_LValueToRValue ||
+ (ICE->getValueKind() != VK_RValue && ICE->getCastKind() == CK_NoOp))
+ E = ICE->getSubExpr()->IgnoreParens();
+ else
+ break;
+ }
+
+ if (MemberExpr *MemRef = dyn_cast<MemberExpr>(E))
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(MemRef->getMemberDecl()))
+ if (Field->isBitField())
+ return Field;
+
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E))
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(DeclRef->getDecl()))
+ if (Field->isBitField())
+ return Field;
+
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(E)) {
+ if (BinOp->isAssignmentOp() && BinOp->getLHS())
+ return BinOp->getLHS()->getBitField();
+
+ if (BinOp->getOpcode() == BO_Comma && BinOp->getRHS())
+ return BinOp->getRHS()->getBitField();
+ }
+
+ return 0;
+}
+
+bool Expr::refersToVectorElement() const {
+ const Expr *E = this->IgnoreParens();
+
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getValueKind() != VK_RValue &&
+ ICE->getCastKind() == CK_NoOp)
+ E = ICE->getSubExpr()->IgnoreParens();
+ else
+ break;
+ }
+
+ if (const ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E))
+ return ASE->getBase()->getType()->isVectorType();
+
+ if (isa<ExtVectorElementExpr>(E))
+ return true;
+
+ return false;
+}
+
+/// isArrow - Return true if the base expression is a pointer to vector,
+/// return false if the base expression is a vector.
+bool ExtVectorElementExpr::isArrow() const {
+ return getBase()->getType()->isPointerType();
+}
+
+unsigned ExtVectorElementExpr::getNumElements() const {
+ if (const VectorType *VT = getType()->getAs<VectorType>())
+ return VT->getNumElements();
+ return 1;
+}
+
+/// containsDuplicateElements - Return true if any element access is repeated.
+bool ExtVectorElementExpr::containsDuplicateElements() const {
+ // FIXME: Refactor this code to an accessor on the AST node which returns the
+ // "type" of component access, and share with code below and in Sema.
+ StringRef Comp = Accessor->getName();
+
+ // Halving swizzles do not contain duplicate elements.
+ if (Comp == "hi" || Comp == "lo" || Comp == "even" || Comp == "odd")
+ return false;
+
+ // Advance past s-char prefix on hex swizzles.
+ if (Comp[0] == 's' || Comp[0] == 'S')
+ Comp = Comp.substr(1);
+
+ for (unsigned i = 0, e = Comp.size(); i != e; ++i)
+ if (Comp.substr(i + 1).find(Comp[i]) != StringRef::npos)
+ return true;
+
+ return false;
+}
+
+/// getEncodedElementAccess - We encode the fields as a llvm ConstantArray.
+void ExtVectorElementExpr::getEncodedElementAccess(
+ SmallVectorImpl<unsigned> &Elts) const {
+ StringRef Comp = Accessor->getName();
+ if (Comp[0] == 's' || Comp[0] == 'S')
+ Comp = Comp.substr(1);
+
+ bool isHi = Comp == "hi";
+ bool isLo = Comp == "lo";
+ bool isEven = Comp == "even";
+ bool isOdd = Comp == "odd";
+
+ for (unsigned i = 0, e = getNumElements(); i != e; ++i) {
+ uint64_t Index;
+
+ if (isHi)
+ Index = e + i;
+ else if (isLo)
+ Index = i;
+ else if (isEven)
+ Index = 2 * i;
+ else if (isOdd)
+ Index = 2 * i + 1;
+ else
+ Index = ExtVectorType::getAccessorIdx(Comp[i]);
+
+ Elts.push_back(Index);
+ }
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ bool IsInstanceSuper,
+ QualType SuperType,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary,
+ /*TypeDependent=*/false, /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
+ : Sel.getAsOpaquePtr())),
+ Kind(IsInstanceSuper? SuperInstance : SuperClass),
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
+ SuperLoc(SuperLoc), LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+{
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(SuperType.getAsOpaquePtr());
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ TypeSourceInfo *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, T->isDependentType(),
+ T->isDependentType(), T->isInstantiationDependentType(),
+ T->containsUnexpandedParameterPack()),
+ SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
+ : Sel.getAsOpaquePtr())),
+ Kind(Class),
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
+ LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+{
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(Receiver);
+}
+
+ObjCMessageExpr::ObjCMessageExpr(QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ Expr *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit)
+ : Expr(ObjCMessageExprClass, T, VK, OK_Ordinary, Receiver->isTypeDependent(),
+ Receiver->isTypeDependent(),
+ Receiver->isInstantiationDependent(),
+ Receiver->containsUnexpandedParameterPack()),
+ SelectorOrMethod(reinterpret_cast<uintptr_t>(Method? Method
+ : Sel.getAsOpaquePtr())),
+ Kind(Instance),
+ HasMethod(Method != 0), IsDelegateInitCall(false), IsImplicit(isImplicit),
+ LBracLoc(LBracLoc), RBracLoc(RBracLoc)
+{
+ initArgsAndSelLocs(Args, SelLocs, SelLocsK);
+ setReceiverPointer(Receiver);
+}
+
+void ObjCMessageExpr::initArgsAndSelLocs(ArrayRef<Expr *> Args,
+ ArrayRef<SourceLocation> SelLocs,
+ SelectorLocationsKind SelLocsK) {
+ setNumArgs(Args.size());
+ Expr **MyArgs = getArgs();
+ for (unsigned I = 0; I != Args.size(); ++I) {
+ if (Args[I]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (Args[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Args[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ MyArgs[I] = Args[I];
+ }
+
+ SelLocsKind = SelLocsK;
+ if (!isImplicit()) {
+ if (SelLocsK == SelLoc_NonStandard)
+ std::copy(SelLocs.begin(), SelLocs.end(), getStoredSelLocs());
+ }
+}
+
+ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ bool IsInstanceSuper,
+ QualType SuperType,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, SuperLoc, IsInstanceSuper,
+ SuperType, Sel, SelLocs, SelLocsK,
+ Method, Args, RBracLoc, isImplicit);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ TypeSourceInfo *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
+ SelLocs, SelLocsK, Method, Args, RBracLoc,
+ isImplicit);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::Create(ASTContext &Context, QualType T,
+ ExprValueKind VK,
+ SourceLocation LBracLoc,
+ Expr *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ObjCMethodDecl *Method,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBracLoc,
+ bool isImplicit) {
+ assert((!SelLocs.empty() || isImplicit) &&
+ "No selector locs for non-implicit message");
+ ObjCMessageExpr *Mem;
+ SelectorLocationsKind SelLocsK = SelectorLocationsKind();
+ if (isImplicit)
+ Mem = alloc(Context, Args.size(), 0);
+ else
+ Mem = alloc(Context, Args, RBracLoc, SelLocs, Sel, SelLocsK);
+ return new (Mem) ObjCMessageExpr(T, VK, LBracLoc, Receiver, Sel,
+ SelLocs, SelLocsK, Method, Args, RBracLoc,
+ isImplicit);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::CreateEmpty(ASTContext &Context,
+ unsigned NumArgs,
+ unsigned NumStoredSelLocs) {
+ ObjCMessageExpr *Mem = alloc(Context, NumArgs, NumStoredSelLocs);
+ return new (Mem) ObjCMessageExpr(EmptyShell(), NumArgs);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::alloc(ASTContext &C,
+ ArrayRef<Expr *> Args,
+ SourceLocation RBraceLoc,
+ ArrayRef<SourceLocation> SelLocs,
+ Selector Sel,
+ SelectorLocationsKind &SelLocsK) {
+ SelLocsK = hasStandardSelectorLocs(Sel, SelLocs, Args, RBraceLoc);
+ unsigned NumStoredSelLocs = (SelLocsK == SelLoc_NonStandard) ? SelLocs.size()
+ : 0;
+ return alloc(C, Args.size(), NumStoredSelLocs);
+}
+
+ObjCMessageExpr *ObjCMessageExpr::alloc(ASTContext &C,
+ unsigned NumArgs,
+ unsigned NumStoredSelLocs) {
+ unsigned Size = sizeof(ObjCMessageExpr) + sizeof(void *) +
+ NumArgs * sizeof(Expr *) + NumStoredSelLocs * sizeof(SourceLocation);
+ return (ObjCMessageExpr *)C.Allocate(Size,
+ llvm::AlignOf<ObjCMessageExpr>::Alignment);
+}
+
+void ObjCMessageExpr::getSelectorLocs(
+ SmallVectorImpl<SourceLocation> &SelLocs) const {
+ for (unsigned i = 0, e = getNumSelectorLocs(); i != e; ++i)
+ SelLocs.push_back(getSelectorLoc(i));
+}
+
+SourceRange ObjCMessageExpr::getReceiverRange() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ return getInstanceReceiver()->getSourceRange();
+
+ case Class:
+ return getClassReceiverTypeInfo()->getTypeLoc().getSourceRange();
+
+ case SuperInstance:
+ case SuperClass:
+ return getSuperLoc();
+ }
+
+ llvm_unreachable("Invalid ReceiverKind!");
+}
+
+Selector ObjCMessageExpr::getSelector() const {
+ if (HasMethod)
+ return reinterpret_cast<const ObjCMethodDecl *>(SelectorOrMethod)
+ ->getSelector();
+ return Selector(SelectorOrMethod);
+}
+
+ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
+ switch (getReceiverKind()) {
+ case Instance:
+ if (const ObjCObjectPointerType *Ptr
+ = getInstanceReceiver()->getType()->getAs<ObjCObjectPointerType>())
+ return Ptr->getInterfaceDecl();
+ break;
+
+ case Class:
+ if (const ObjCObjectType *Ty
+ = getClassReceiver()->getAs<ObjCObjectType>())
+ return Ty->getInterface();
+ break;
+
+ case SuperInstance:
+ if (const ObjCObjectPointerType *Ptr
+ = getSuperType()->getAs<ObjCObjectPointerType>())
+ return Ptr->getInterfaceDecl();
+ break;
+
+ case SuperClass:
+ if (const ObjCObjectType *Iface
+ = getSuperType()->getAs<ObjCObjectType>())
+ return Iface->getInterface();
+ break;
+ }
+
+ return 0;
+}
+
+StringRef ObjCBridgedCastExpr::getBridgeKindName() const {
+ switch (getBridgeKind()) {
+ case OBC_Bridge:
+ return "__bridge";
+ case OBC_BridgeTransfer:
+ return "__bridge_transfer";
+ case OBC_BridgeRetained:
+ return "__bridge_retained";
+ }
+
+ llvm_unreachable("Invalid BridgeKind!");
+}
+
+bool ChooseExpr::isConditionTrue(const ASTContext &C) const {
+ return getCond()->EvaluateKnownConstInt(C) != 0;
+}
+
+ShuffleVectorExpr::ShuffleVectorExpr(ASTContext &C, Expr **args, unsigned nexpr,
+ QualType Type, SourceLocation BLoc,
+ SourceLocation RP)
+ : Expr(ShuffleVectorExprClass, Type, VK_RValue, OK_Ordinary,
+ Type->isDependentType(), Type->isDependentType(),
+ Type->isInstantiationDependentType(),
+ Type->containsUnexpandedParameterPack()),
+ BuiltinLoc(BLoc), RParenLoc(RP), NumExprs(nexpr)
+{
+ SubExprs = new (C) Stmt*[nexpr];
+ for (unsigned i = 0; i < nexpr; i++) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i] = args[i];
+ }
+}
+
+void ShuffleVectorExpr::setExprs(ASTContext &C, Expr ** Exprs,
+ unsigned NumExprs) {
+ if (SubExprs) C.Deallocate(SubExprs);
+
+ SubExprs = new (C) Stmt* [NumExprs];
+ this->NumExprs = NumExprs;
+ memcpy(SubExprs, Exprs, sizeof(Expr *) * NumExprs);
+}
+
+GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
+ SourceLocation GenericLoc, Expr *ControllingExpr,
+ TypeSourceInfo **AssocTypes, Expr **AssocExprs,
+ unsigned NumAssocs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack,
+ unsigned ResultIndex)
+ : Expr(GenericSelectionExprClass,
+ AssocExprs[ResultIndex]->getType(),
+ AssocExprs[ResultIndex]->getValueKind(),
+ AssocExprs[ResultIndex]->getObjectKind(),
+ AssocExprs[ResultIndex]->isTypeDependent(),
+ AssocExprs[ResultIndex]->isValueDependent(),
+ AssocExprs[ResultIndex]->isInstantiationDependent(),
+ ContainsUnexpandedParameterPack),
+ AssocTypes(new (Context) TypeSourceInfo*[NumAssocs]),
+ SubExprs(new (Context) Stmt*[END_EXPR+NumAssocs]), NumAssocs(NumAssocs),
+ ResultIndex(ResultIndex), GenericLoc(GenericLoc), DefaultLoc(DefaultLoc),
+ RParenLoc(RParenLoc) {
+ SubExprs[CONTROLLING] = ControllingExpr;
+ std::copy(AssocTypes, AssocTypes+NumAssocs, this->AssocTypes);
+ std::copy(AssocExprs, AssocExprs+NumAssocs, SubExprs+END_EXPR);
+}
+
+GenericSelectionExpr::GenericSelectionExpr(ASTContext &Context,
+ SourceLocation GenericLoc, Expr *ControllingExpr,
+ TypeSourceInfo **AssocTypes, Expr **AssocExprs,
+ unsigned NumAssocs, SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ bool ContainsUnexpandedParameterPack)
+ : Expr(GenericSelectionExprClass,
+ Context.DependentTy,
+ VK_RValue,
+ OK_Ordinary,
+ /*isTypeDependent=*/true,
+ /*isValueDependent=*/true,
+ /*isInstantiationDependent=*/true,
+ ContainsUnexpandedParameterPack),
+ AssocTypes(new (Context) TypeSourceInfo*[NumAssocs]),
+ SubExprs(new (Context) Stmt*[END_EXPR+NumAssocs]), NumAssocs(NumAssocs),
+ ResultIndex(-1U), GenericLoc(GenericLoc), DefaultLoc(DefaultLoc),
+ RParenLoc(RParenLoc) {
+ SubExprs[CONTROLLING] = ControllingExpr;
+ std::copy(AssocTypes, AssocTypes+NumAssocs, this->AssocTypes);
+ std::copy(AssocExprs, AssocExprs+NumAssocs, SubExprs+END_EXPR);
+}
+
+//===----------------------------------------------------------------------===//
+// DesignatedInitExpr
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo *DesignatedInitExpr::Designator::getFieldName() const {
+ assert(Kind == FieldDesignator && "Only valid on a field designator");
+ if (Field.NameOrField & 0x01)
+ return reinterpret_cast<IdentifierInfo *>(Field.NameOrField&~0x01);
+ else
+ return getField()->getIdentifier();
+}
+
+DesignatedInitExpr::DesignatedInitExpr(ASTContext &C, QualType Ty,
+ unsigned NumDesignators,
+ const Designator *Designators,
+ SourceLocation EqualOrColonLoc,
+ bool GNUSyntax,
+ Expr **IndexExprs,
+ unsigned NumIndexExprs,
+ Expr *Init)
+ : Expr(DesignatedInitExprClass, Ty,
+ Init->getValueKind(), Init->getObjectKind(),
+ Init->isTypeDependent(), Init->isValueDependent(),
+ Init->isInstantiationDependent(),
+ Init->containsUnexpandedParameterPack()),
+ EqualOrColonLoc(EqualOrColonLoc), GNUSyntax(GNUSyntax),
+ NumDesignators(NumDesignators), NumSubExprs(NumIndexExprs + 1) {
+ this->Designators = new (C) Designator[NumDesignators];
+
+ // Record the initializer itself.
+ child_range Child = children();
+ *Child++ = Init;
+
+ // Copy the designators and their subexpressions, computing
+ // value-dependence along the way.
+ unsigned IndexIdx = 0;
+ for (unsigned I = 0; I != NumDesignators; ++I) {
+ this->Designators[I] = Designators[I];
+
+ if (this->Designators[I].isArrayDesignator()) {
+ // Compute type- and value-dependence.
+ Expr *Index = IndexExprs[IndexIdx];
+ if (Index->isTypeDependent() || Index->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Index->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ // Propagate unexpanded parameter packs.
+ if (Index->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ // Copy the index expressions into permanent storage.
+ *Child++ = IndexExprs[IndexIdx++];
+ } else if (this->Designators[I].isArrayRangeDesignator()) {
+ // Compute type- and value-dependence.
+ Expr *Start = IndexExprs[IndexIdx];
+ Expr *End = IndexExprs[IndexIdx + 1];
+ if (Start->isTypeDependent() || Start->isValueDependent() ||
+ End->isTypeDependent() || End->isValueDependent()) {
+ ExprBits.ValueDependent = true;
+ ExprBits.InstantiationDependent = true;
+ } else if (Start->isInstantiationDependent() ||
+ End->isInstantiationDependent()) {
+ ExprBits.InstantiationDependent = true;
+ }
+
+ // Propagate unexpanded parameter packs.
+ if (Start->containsUnexpandedParameterPack() ||
+ End->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ // Copy the start/end expressions into permanent storage.
+ *Child++ = IndexExprs[IndexIdx++];
+ *Child++ = IndexExprs[IndexIdx++];
+ }
+ }
+
+ assert(IndexIdx == NumIndexExprs && "Wrong number of index expressions");
+}
+
+DesignatedInitExpr *
+DesignatedInitExpr::Create(ASTContext &C, Designator *Designators,
+ unsigned NumDesignators,
+ Expr **IndexExprs, unsigned NumIndexExprs,
+ SourceLocation ColonOrEqualLoc,
+ bool UsesColonSyntax, Expr *Init) {
+ void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
+ sizeof(Stmt *) * (NumIndexExprs + 1), 8);
+ return new (Mem) DesignatedInitExpr(C, C.VoidTy, NumDesignators, Designators,
+ ColonOrEqualLoc, UsesColonSyntax,
+ IndexExprs, NumIndexExprs, Init);
+}
+
+DesignatedInitExpr *DesignatedInitExpr::CreateEmpty(ASTContext &C,
+ unsigned NumIndexExprs) {
+ void *Mem = C.Allocate(sizeof(DesignatedInitExpr) +
+ sizeof(Stmt *) * (NumIndexExprs + 1), 8);
+ return new (Mem) DesignatedInitExpr(NumIndexExprs + 1);
+}
+
+void DesignatedInitExpr::setDesignators(ASTContext &C,
+ const Designator *Desigs,
+ unsigned NumDesigs) {
+ Designators = new (C) Designator[NumDesigs];
+ NumDesignators = NumDesigs;
+ for (unsigned I = 0; I != NumDesigs; ++I)
+ Designators[I] = Desigs[I];
+}
+
+SourceRange DesignatedInitExpr::getDesignatorsSourceRange() const {
+ DesignatedInitExpr *DIE = const_cast<DesignatedInitExpr*>(this);
+ if (size() == 1)
+ return DIE->getDesignator(0)->getSourceRange();
+ return SourceRange(DIE->getDesignator(0)->getStartLocation(),
+ DIE->getDesignator(size()-1)->getEndLocation());
+}
+
+SourceRange DesignatedInitExpr::getSourceRange() const {
+ SourceLocation StartLoc;
+ Designator &First =
+ *const_cast<DesignatedInitExpr*>(this)->designators_begin();
+ if (First.isFieldDesignator()) {
+ if (GNUSyntax)
+ StartLoc = SourceLocation::getFromRawEncoding(First.Field.FieldLoc);
+ else
+ StartLoc = SourceLocation::getFromRawEncoding(First.Field.DotLoc);
+ } else
+ StartLoc =
+ SourceLocation::getFromRawEncoding(First.ArrayOrRange.LBracketLoc);
+ return SourceRange(StartLoc, getInit()->getSourceRange().getEnd());
+}
+
+Expr *DesignatedInitExpr::getArrayIndex(const Designator& D) {
+ assert(D.Kind == Designator::ArrayDesignator && "Requires array designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
+}
+
+Expr *DesignatedInitExpr::getArrayRangeStart(const Designator& D) {
+ assert(D.Kind == Designator::ArrayRangeDesignator &&
+ "Requires array range designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 1));
+}
+
+Expr *DesignatedInitExpr::getArrayRangeEnd(const Designator& D) {
+ assert(D.Kind == Designator::ArrayRangeDesignator &&
+ "Requires array range designator");
+ char* Ptr = static_cast<char*>(static_cast<void *>(this));
+ Ptr += sizeof(DesignatedInitExpr);
+ Stmt **SubExprs = reinterpret_cast<Stmt**>(reinterpret_cast<void**>(Ptr));
+ return cast<Expr>(*(SubExprs + D.ArrayOrRange.Index + 2));
+}
+
+/// \brief Replaces the designator at index @p Idx with the series
+/// of designators in [First, Last).
+void DesignatedInitExpr::ExpandDesignator(ASTContext &C, unsigned Idx,
+ const Designator *First,
+ const Designator *Last) {
+ unsigned NumNewDesignators = Last - First;
+ if (NumNewDesignators == 0) {
+ std::copy_backward(Designators + Idx + 1,
+ Designators + NumDesignators,
+ Designators + Idx);
+ --NumNewDesignators;
+ return;
+ } else if (NumNewDesignators == 1) {
+ Designators[Idx] = *First;
+ return;
+ }
+
+ Designator *NewDesignators
+ = new (C) Designator[NumDesignators - 1 + NumNewDesignators];
+ std::copy(Designators, Designators + Idx, NewDesignators);
+ std::copy(First, Last, NewDesignators + Idx);
+ std::copy(Designators + Idx + 1, Designators + NumDesignators,
+ NewDesignators + Idx + NumNewDesignators);
+ Designators = NewDesignators;
+ NumDesignators = NumDesignators - 1 + NumNewDesignators;
+}
+
+ParenListExpr::ParenListExpr(ASTContext& C, SourceLocation lparenloc,
+ Expr **exprs, unsigned nexprs,
+ SourceLocation rparenloc)
+ : Expr(ParenListExprClass, QualType(), VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumExprs(nexprs), LParenLoc(lparenloc), RParenLoc(rparenloc) {
+ Exprs = new (C) Stmt*[nexprs];
+ for (unsigned i = 0; i != nexprs; ++i) {
+ if (exprs[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (exprs[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (exprs[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (exprs[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ Exprs[i] = exprs[i];
+ }
+}
+
+const OpaqueValueExpr *OpaqueValueExpr::findInCopyConstruct(const Expr *e) {
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(e))
+ e = ewc->getSubExpr();
+ if (const MaterializeTemporaryExpr *m = dyn_cast<MaterializeTemporaryExpr>(e))
+ e = m->GetTemporaryExpr();
+ e = cast<CXXConstructExpr>(e)->getArg(0);
+ while (const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
+ e = ice->getSubExpr();
+ return cast<OpaqueValueExpr>(e);
+}
+
+PseudoObjectExpr *PseudoObjectExpr::Create(ASTContext &Context, EmptyShell sh,
+ unsigned numSemanticExprs) {
+ void *buffer = Context.Allocate(sizeof(PseudoObjectExpr) +
+ (1 + numSemanticExprs) * sizeof(Expr*),
+ llvm::alignOf<PseudoObjectExpr>());
+ return new(buffer) PseudoObjectExpr(sh, numSemanticExprs);
+}
+
+PseudoObjectExpr::PseudoObjectExpr(EmptyShell shell, unsigned numSemanticExprs)
+ : Expr(PseudoObjectExprClass, shell) {
+ PseudoObjectExprBits.NumSubExprs = numSemanticExprs + 1;
+}
+
+PseudoObjectExpr *PseudoObjectExpr::Create(ASTContext &C, Expr *syntax,
+ ArrayRef<Expr*> semantics,
+ unsigned resultIndex) {
+ assert(syntax && "no syntactic expression!");
+ assert(semantics.size() && "no semantic expressions!");
+
+ QualType type;
+ ExprValueKind VK;
+ if (resultIndex == NoResult) {
+ type = C.VoidTy;
+ VK = VK_RValue;
+ } else {
+ assert(resultIndex < semantics.size());
+ type = semantics[resultIndex]->getType();
+ VK = semantics[resultIndex]->getValueKind();
+ assert(semantics[resultIndex]->getObjectKind() == OK_Ordinary);
+ }
+
+ void *buffer = C.Allocate(sizeof(PseudoObjectExpr) +
+ (1 + semantics.size()) * sizeof(Expr*),
+ llvm::alignOf<PseudoObjectExpr>());
+ return new(buffer) PseudoObjectExpr(type, VK, syntax, semantics,
+ resultIndex);
+}
+
+PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
+ Expr *syntax, ArrayRef<Expr*> semantics,
+ unsigned resultIndex)
+ : Expr(PseudoObjectExprClass, type, VK, OK_Ordinary,
+ /*filled in at end of ctor*/ false, false, false, false) {
+ PseudoObjectExprBits.NumSubExprs = semantics.size() + 1;
+ PseudoObjectExprBits.ResultIndex = resultIndex + 1;
+
+ for (unsigned i = 0, e = semantics.size() + 1; i != e; ++i) {
+ Expr *E = (i == 0 ? syntax : semantics[i-1]);
+ getSubExprsBuffer()[i] = E;
+
+ if (E->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (E->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (E->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (E->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ if (isa<OpaqueValueExpr>(E))
+ assert(cast<OpaqueValueExpr>(E)->getSourceExpr() != 0 &&
+ "opaque-value semantic expressions for pseudo-object "
+ "operations must have sources");
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ExprIterator.
+//===----------------------------------------------------------------------===//
+
+Expr* ExprIterator::operator[](size_t idx) { return cast<Expr>(I[idx]); }
+Expr* ExprIterator::operator*() const { return cast<Expr>(*I); }
+Expr* ExprIterator::operator->() const { return cast<Expr>(*I); }
+const Expr* ConstExprIterator::operator[](size_t idx) const {
+ return cast<Expr>(I[idx]);
+}
+const Expr* ConstExprIterator::operator*() const { return cast<Expr>(*I); }
+const Expr* ConstExprIterator::operator->() const { return cast<Expr>(*I); }
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+// UnaryExprOrTypeTraitExpr
+Stmt::child_range UnaryExprOrTypeTraitExpr::children() {
+ // If this is of a type and the type is a VLA type (and not a typedef), the
+ // size expression of the VLA needs to be treated as an executable expression.
+ // Why isn't this weirdness documented better in StmtIterator?
+ if (isArgumentType()) {
+ if (const VariableArrayType* T = dyn_cast<VariableArrayType>(
+ getArgumentType().getTypePtr()))
+ return child_range(child_iterator(T), child_iterator());
+ return child_range();
+ }
+ return child_range(&Argument.Ex, &Argument.Ex + 1);
+}
+
+// ObjCMessageExpr
+Stmt::child_range ObjCMessageExpr::children() {
+ Stmt **begin;
+ if (getReceiverKind() == Instance)
+ begin = reinterpret_cast<Stmt **>(this + 1);
+ else
+ begin = reinterpret_cast<Stmt **>(getArgs());
+ return child_range(begin,
+ reinterpret_cast<Stmt **>(getArgs() + getNumArgs()));
+}
+
+ObjCArrayLiteral::ObjCArrayLiteral(llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl *Method,
+ SourceRange SR)
+ : Expr(ObjCArrayLiteralClass, T, VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumElements(Elements.size()), Range(SR), ArrayWithObjectsMethod(Method)
+{
+ Expr **SaveElements = getElements();
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
+ if (Elements[I]->isTypeDependent() || Elements[I]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (Elements[I]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (Elements[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SaveElements[I] = Elements[I];
+ }
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::Create(ASTContext &C,
+ llvm::ArrayRef<Expr *> Elements,
+ QualType T, ObjCMethodDecl * Method,
+ SourceRange SR) {
+ void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
+ + Elements.size() * sizeof(Expr *));
+ return new (Mem) ObjCArrayLiteral(Elements, T, Method, SR);
+}
+
+ObjCArrayLiteral *ObjCArrayLiteral::CreateEmpty(ASTContext &C,
+ unsigned NumElements) {
+
+ void *Mem = C.Allocate(sizeof(ObjCArrayLiteral)
+ + NumElements * sizeof(Expr *));
+ return new (Mem) ObjCArrayLiteral(EmptyShell(), NumElements);
+}
+
+ObjCDictionaryLiteral::ObjCDictionaryLiteral(
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR)
+ : Expr(ObjCDictionaryLiteralClass, T, VK_RValue, OK_Ordinary, false, false,
+ false, false),
+ NumElements(VK.size()), HasPackExpansions(HasPackExpansions), Range(SR),
+ DictWithObjectsMethod(method)
+{
+ KeyValuePair *KeyValues = getKeyValues();
+ ExpansionData *Expansions = getExpansionData();
+ for (unsigned I = 0; I < NumElements; I++) {
+ if (VK[I].Key->isTypeDependent() || VK[I].Key->isValueDependent() ||
+ VK[I].Value->isTypeDependent() || VK[I].Value->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (VK[I].Key->isInstantiationDependent() ||
+ VK[I].Value->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (VK[I].EllipsisLoc.isInvalid() &&
+ (VK[I].Key->containsUnexpandedParameterPack() ||
+ VK[I].Value->containsUnexpandedParameterPack()))
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ KeyValues[I].Key = VK[I].Key;
+ KeyValues[I].Value = VK[I].Value;
+ if (Expansions) {
+ Expansions[I].EllipsisLoc = VK[I].EllipsisLoc;
+ if (VK[I].NumExpansions)
+ Expansions[I].NumExpansionsPlusOne = *VK[I].NumExpansions + 1;
+ else
+ Expansions[I].NumExpansionsPlusOne = 0;
+ }
+ }
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::Create(ASTContext &C,
+ ArrayRef<ObjCDictionaryElement> VK,
+ bool HasPackExpansions,
+ QualType T, ObjCMethodDecl *method,
+ SourceRange SR) {
+ unsigned ExpansionsSize = 0;
+ if (HasPackExpansions)
+ ExpansionsSize = sizeof(ExpansionData) * VK.size();
+
+ void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
+ sizeof(KeyValuePair) * VK.size() + ExpansionsSize);
+ return new (Mem) ObjCDictionaryLiteral(VK, HasPackExpansions, T, method, SR);
+}
+
+ObjCDictionaryLiteral *
+ObjCDictionaryLiteral::CreateEmpty(ASTContext &C, unsigned NumElements,
+ bool HasPackExpansions) {
+ unsigned ExpansionsSize = 0;
+ if (HasPackExpansions)
+ ExpansionsSize = sizeof(ExpansionData) * NumElements;
+ void *Mem = C.Allocate(sizeof(ObjCDictionaryLiteral) +
+ sizeof(KeyValuePair) * NumElements + ExpansionsSize);
+ return new (Mem) ObjCDictionaryLiteral(EmptyShell(), NumElements,
+ HasPackExpansions);
+}
+
+ObjCSubscriptRefExpr *ObjCSubscriptRefExpr::Create(ASTContext &C,
+ Expr *base,
+ Expr *key, QualType T,
+ ObjCMethodDecl *getMethod,
+ ObjCMethodDecl *setMethod,
+ SourceLocation RB) {
+ void *Mem = C.Allocate(sizeof(ObjCSubscriptRefExpr));
+ return new (Mem) ObjCSubscriptRefExpr(base, key, T, VK_LValue,
+ OK_ObjCSubscript,
+ getMethod, setMethod, RB);
+}
+
+AtomicExpr::AtomicExpr(SourceLocation BLoc, Expr **args, unsigned nexpr,
+ QualType t, AtomicOp op, SourceLocation RP)
+ : Expr(AtomicExprClass, t, VK_RValue, OK_Ordinary,
+ false, false, false, false),
+ NumSubExprs(nexpr), BuiltinLoc(BLoc), RParenLoc(RP), Op(op)
+{
+ assert(nexpr == getNumSubExprs(op) && "wrong number of subexpressions");
+ for (unsigned i = 0; i < nexpr; i++) {
+ if (args[i]->isTypeDependent())
+ ExprBits.TypeDependent = true;
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i] = args[i];
+ }
+}
+
+unsigned AtomicExpr::getNumSubExprs(AtomicOp Op) {
+ switch (Op) {
+ case AO__c11_atomic_init:
+ case AO__c11_atomic_load:
+ case AO__atomic_load_n:
+ return 2;
+
+ case AO__c11_atomic_store:
+ case AO__c11_atomic_exchange:
+ case AO__atomic_load:
+ case AO__atomic_store:
+ case AO__atomic_store_n:
+ case AO__atomic_exchange_n:
+ case AO__c11_atomic_fetch_add:
+ case AO__c11_atomic_fetch_sub:
+ case AO__c11_atomic_fetch_and:
+ case AO__c11_atomic_fetch_or:
+ case AO__c11_atomic_fetch_xor:
+ case AO__atomic_fetch_add:
+ case AO__atomic_fetch_sub:
+ case AO__atomic_fetch_and:
+ case AO__atomic_fetch_or:
+ case AO__atomic_fetch_xor:
+ case AO__atomic_fetch_nand:
+ case AO__atomic_add_fetch:
+ case AO__atomic_sub_fetch:
+ case AO__atomic_and_fetch:
+ case AO__atomic_or_fetch:
+ case AO__atomic_xor_fetch:
+ case AO__atomic_nand_fetch:
+ return 3;
+
+ case AO__atomic_exchange:
+ return 4;
+
+ case AO__c11_atomic_compare_exchange_strong:
+ case AO__c11_atomic_compare_exchange_weak:
+ return 5;
+
+ case AO__atomic_compare_exchange:
+ case AO__atomic_compare_exchange_n:
+ return 6;
+ }
+ llvm_unreachable("unknown atomic op");
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
new file mode 100644
index 0000000..8cf519c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExprCXX.cpp
@@ -0,0 +1,1335 @@
+//===--- ExprCXX.cpp - (C++) Expression AST Node Implementation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the subclesses of Expr class declared in ExprCXX.h
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+using namespace clang;
+
+
+//===----------------------------------------------------------------------===//
+// Child Iterators for iterating over subexpressions/substatements
+//===----------------------------------------------------------------------===//
+
+QualType CXXTypeidExpr::getTypeOperand() const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for typeid(expr)");
+ return Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType()
+ .getUnqualifiedType();
+}
+
+QualType CXXUuidofExpr::getTypeOperand() const {
+ assert(isTypeOperand() && "Cannot call getTypeOperand for __uuidof(expr)");
+ return Operand.get<TypeSourceInfo *>()->getType().getNonReferenceType()
+ .getUnqualifiedType();
+}
+
+// CXXScalarValueInitExpr
+SourceRange CXXScalarValueInitExpr::getSourceRange() const {
+ SourceLocation Start = RParenLoc;
+ if (TypeInfo)
+ Start = TypeInfo->getTypeLoc().getBeginLoc();
+ return SourceRange(Start, RParenLoc);
+}
+
+// CXXNewExpr
+CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
+ FunctionDecl *operatorDelete,
+ bool usualArrayDeleteWantsSize,
+ Expr **placementArgs, unsigned numPlaceArgs,
+ SourceRange typeIdParens, Expr *arraySize,
+ InitializationStyle initializationStyle,
+ Expr *initializer, QualType ty,
+ TypeSourceInfo *allocatedTypeInfo,
+ SourceLocation startLoc, SourceRange directInitRange)
+ : Expr(CXXNewExprClass, ty, VK_RValue, OK_Ordinary,
+ ty->isDependentType(), ty->isDependentType(),
+ ty->isInstantiationDependentType(),
+ ty->containsUnexpandedParameterPack()),
+ SubExprs(0), OperatorNew(operatorNew), OperatorDelete(operatorDelete),
+ AllocatedTypeInfo(allocatedTypeInfo), TypeIdParens(typeIdParens),
+ StartLoc(startLoc), DirectInitRange(directInitRange),
+ GlobalNew(globalNew), UsualArrayDeleteWantsSize(usualArrayDeleteWantsSize) {
+ assert((initializer != 0 || initializationStyle == NoInit) &&
+ "Only NoInit can have no initializer.");
+ StoredInitializationStyle = initializer ? initializationStyle + 1 : 0;
+ AllocateArgsArray(C, arraySize != 0, numPlaceArgs, initializer != 0);
+ unsigned i = 0;
+ if (Array) {
+ if (arraySize->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+
+ if (arraySize->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = arraySize;
+ }
+
+ if (initializer) {
+ if (initializer->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+
+ if (initializer->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = initializer;
+ }
+
+ for (unsigned j = 0; j < NumPlacementArgs; ++j) {
+ if (placementArgs[j]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (placementArgs[j]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ SubExprs[i++] = placementArgs[j];
+ }
+}
+
+void CXXNewExpr::AllocateArgsArray(ASTContext &C, bool isArray,
+ unsigned numPlaceArgs, bool hasInitializer){
+ assert(SubExprs == 0 && "SubExprs already allocated");
+ Array = isArray;
+ NumPlacementArgs = numPlaceArgs;
+
+ unsigned TotalSize = Array + hasInitializer + NumPlacementArgs;
+ SubExprs = new (C) Stmt*[TotalSize];
+}
+
+bool CXXNewExpr::shouldNullCheckAllocation(ASTContext &Ctx) const {
+ return getOperatorNew()->getType()->
+ castAs<FunctionProtoType>()->isNothrow(Ctx);
+}
+
+SourceLocation CXXNewExpr::getEndLoc() const {
+ switch (getInitializationStyle()) {
+ case NoInit:
+ return AllocatedTypeInfo->getTypeLoc().getEndLoc();
+ case CallInit:
+ return DirectInitRange.getEnd();
+ case ListInit:
+ return getInitializer()->getSourceRange().getEnd();
+ }
+ llvm_unreachable("bogus initialization style");
+}
+
+// CXXDeleteExpr
+QualType CXXDeleteExpr::getDestroyedType() const {
+ const Expr *Arg = getArgument();
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ if (ICE->getCastKind() != CK_UserDefinedConversion &&
+ ICE->getType()->isVoidPointerType())
+ Arg = ICE->getSubExpr();
+ else
+ break;
+ }
+ // The type-to-delete may not be a pointer if it's a dependent type.
+ const QualType ArgType = Arg->getType();
+
+ if (ArgType->isDependentType() && !ArgType->isPointerType())
+ return QualType();
+
+ return ArgType->getAs<PointerType>()->getPointeeType();
+}
+
+// CXXPseudoDestructorExpr
+PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
+ : Type(Info)
+{
+ Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
+}
+
+CXXPseudoDestructorExpr::CXXPseudoDestructorExpr(ASTContext &Context,
+ Expr *Base, bool isArrow, SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc, TypeSourceInfo *ScopeType,
+ SourceLocation ColonColonLoc, SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage DestroyedType)
+ : Expr(CXXPseudoDestructorExprClass,
+ Context.getPointerType(Context.getFunctionType(Context.VoidTy, 0, 0,
+ FunctionProtoType::ExtProtoInfo())),
+ VK_RValue, OK_Ordinary,
+ /*isTypeDependent=*/(Base->isTypeDependent() ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()->isDependentType())),
+ /*isValueDependent=*/Base->isValueDependent(),
+ (Base->isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent()) ||
+ (ScopeType &&
+ ScopeType->getType()->isInstantiationDependentType()) ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()
+ ->isInstantiationDependentType())),
+ // ContainsUnexpandedParameterPack
+ (Base->containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()) ||
+ (ScopeType &&
+ ScopeType->getType()->containsUnexpandedParameterPack()) ||
+ (DestroyedType.getTypeSourceInfo() &&
+ DestroyedType.getTypeSourceInfo()->getType()
+ ->containsUnexpandedParameterPack()))),
+ Base(static_cast<Stmt *>(Base)), IsArrow(isArrow),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ ScopeType(ScopeType), ColonColonLoc(ColonColonLoc), TildeLoc(TildeLoc),
+ DestroyedType(DestroyedType) { }
+
+QualType CXXPseudoDestructorExpr::getDestroyedType() const {
+ if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
+ return TInfo->getType();
+
+ return QualType();
+}
+
+SourceRange CXXPseudoDestructorExpr::getSourceRange() const {
+ SourceLocation End = DestroyedType.getLocation();
+ if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
+ End = TInfo->getTypeLoc().getLocalSourceRange().getEnd();
+ return SourceRange(Base->getLocStart(), End);
+}
+
+// UnresolvedLookupExpr
+UnresolvedLookupExpr *
+UnresolvedLookupExpr::Create(ASTContext &C,
+ CXXRecordDecl *NamingClass,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool ADL,
+ const TemplateArgumentListInfo *Args,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+{
+ assert(Args || TemplateKWLoc.isValid());
+ unsigned num_args = Args ? Args->size() : 0;
+ void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) +
+ ASTTemplateKWAndArgsInfo::sizeFor(num_args));
+ return new (Mem) UnresolvedLookupExpr(C, NamingClass, QualifierLoc,
+ TemplateKWLoc, NameInfo,
+ ADL, /*Overload*/ true, Args,
+ Begin, End, /*StdIsAssociated=*/false);
+}
+
+UnresolvedLookupExpr *
+UnresolvedLookupExpr::CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(UnresolvedLookupExpr);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedLookupExpr>());
+ UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End,
+ bool KnownDependent,
+ bool KnownInstantiationDependent,
+ bool KnownContainsUnexpandedParameterPack)
+ : Expr(K, C.OverloadTy, VK_LValue, OK_Ordinary, KnownDependent,
+ KnownDependent,
+ (KnownInstantiationDependent ||
+ NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
+ (KnownContainsUnexpandedParameterPack ||
+ NameInfo.containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()))),
+ NameInfo(NameInfo), QualifierLoc(QualifierLoc),
+ Results(0), NumResults(End - Begin),
+ HasTemplateKWAndArgsInfo(TemplateArgs != 0 || TemplateKWLoc.isValid())
+{
+ NumResults = End - Begin;
+ if (NumResults) {
+ // Determine whether this expression is type-dependent.
+ for (UnresolvedSetImpl::const_iterator I = Begin; I != End; ++I) {
+ if ((*I)->getDeclContext()->isDependentContext() ||
+ isa<UnresolvedUsingValueDecl>(*I)) {
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ }
+ }
+
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+ llvm::alignOf<DeclAccessPair>()));
+ memcpy(Results, &*Begin.getIterator(),
+ NumResults * sizeof(DeclAccessPair));
+ }
+
+ // If we have explicit template arguments, check for dependent
+ // template arguments and whether they contain any unexpanded pack
+ // expansions.
+ if (TemplateArgs) {
+ bool Dependent = false;
+ bool InstantiationDependent = false;
+ bool ContainsUnexpandedParameterPack = false;
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+
+ if (Dependent) {
+ ExprBits.TypeDependent = true;
+ ExprBits.ValueDependent = true;
+ }
+ if (InstantiationDependent)
+ ExprBits.InstantiationDependent = true;
+ if (ContainsUnexpandedParameterPack)
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+
+ if (isTypeDependent())
+ setType(C.DependentTy);
+}
+
+void OverloadExpr::initializeResults(ASTContext &C,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ assert(Results == 0 && "Results already initialized!");
+ NumResults = End - Begin;
+ if (NumResults) {
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+
+ llvm::alignOf<DeclAccessPair>()));
+ memcpy(Results, &*Begin.getIterator(),
+ NumResults * sizeof(DeclAccessPair));
+ }
+}
+
+CXXRecordDecl *OverloadExpr::getNamingClass() const {
+ if (isa<UnresolvedLookupExpr>(this))
+ return cast<UnresolvedLookupExpr>(this)->getNamingClass();
+ else
+ return cast<UnresolvedMemberExpr>(this)->getNamingClass();
+}
+
+// DependentScopeDeclRefExpr
+DependentScopeDeclRefExpr::DependentScopeDeclRefExpr(QualType T,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args)
+ : Expr(DependentScopeDeclRefExprClass, T, VK_LValue, OK_Ordinary,
+ true, true,
+ (NameInfo.isInstantiationDependent() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->isInstantiationDependent())),
+ (NameInfo.containsUnexpandedParameterPack() ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()))),
+ QualifierLoc(QualifierLoc), NameInfo(NameInfo),
+ HasTemplateKWAndArgsInfo(Args != 0 || TemplateKWLoc.isValid())
+{
+ if (Args) {
+ bool Dependent = true;
+ bool InstantiationDependent = true;
+ bool ContainsUnexpandedParameterPack
+ = ExprBits.ContainsUnexpandedParameterPack;
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *Args,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ ExprBits.ContainsUnexpandedParameterPack = ContainsUnexpandedParameterPack;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+}
+
+DependentScopeDeclRefExpr *
+DependentScopeDeclRefExpr::Create(ASTContext &C,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *Args) {
+ std::size_t size = sizeof(DependentScopeDeclRefExpr);
+ if (Args)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(Args->size());
+ else if (TemplateKWLoc.isValid())
+ size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+ void *Mem = C.Allocate(size);
+ return new (Mem) DependentScopeDeclRefExpr(C.DependentTy, QualifierLoc,
+ TemplateKWLoc, NameInfo, Args);
+}
+
+DependentScopeDeclRefExpr *
+DependentScopeDeclRefExpr::CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(DependentScopeDeclRefExpr);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+ void *Mem = C.Allocate(size);
+ DependentScopeDeclRefExpr *E
+ = new (Mem) DependentScopeDeclRefExpr(QualType(), NestedNameSpecifierLoc(),
+ SourceLocation(),
+ DeclarationNameInfo(), 0);
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+SourceRange CXXConstructExpr::getSourceRange() const {
+ if (isa<CXXTemporaryObjectExpr>(this))
+ return cast<CXXTemporaryObjectExpr>(this)->getSourceRange();
+
+ if (ParenRange.isValid())
+ return SourceRange(Loc, ParenRange.getEnd());
+
+ SourceLocation End = Loc;
+ for (unsigned I = getNumArgs(); I > 0; --I) {
+ const Expr *Arg = getArg(I-1);
+ if (!Arg->isDefaultArgument()) {
+ SourceLocation NewEnd = Arg->getLocEnd();
+ if (NewEnd.isValid()) {
+ End = NewEnd;
+ break;
+ }
+ }
+ }
+
+ return SourceRange(Loc, End);
+}
+
+SourceRange CXXOperatorCallExpr::getSourceRange() const {
+ OverloadedOperatorKind Kind = getOperator();
+ if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
+ if (getNumArgs() == 1)
+ // Prefix operator
+ return SourceRange(getOperatorLoc(),
+ getArg(0)->getSourceRange().getEnd());
+ else
+ // Postfix operator
+ return SourceRange(getArg(0)->getSourceRange().getBegin(),
+ getOperatorLoc());
+ } else if (Kind == OO_Arrow) {
+ return getArg(0)->getSourceRange();
+ } else if (Kind == OO_Call) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(), getRParenLoc());
+ } else if (Kind == OO_Subscript) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(), getRParenLoc());
+ } else if (getNumArgs() == 1) {
+ return SourceRange(getOperatorLoc(), getArg(0)->getSourceRange().getEnd());
+ } else if (getNumArgs() == 2) {
+ return SourceRange(getArg(0)->getSourceRange().getBegin(),
+ getArg(1)->getSourceRange().getEnd());
+ } else {
+ return SourceRange();
+ }
+}
+
+Expr *CXXMemberCallExpr::getImplicitObjectArgument() const {
+ if (const MemberExpr *MemExpr =
+ dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
+ return MemExpr->getBase();
+
+ // FIXME: Will eventually need to cope with member pointers.
+ return 0;
+}
+
+CXXMethodDecl *CXXMemberCallExpr::getMethodDecl() const {
+ if (const MemberExpr *MemExpr =
+ dyn_cast<MemberExpr>(getCallee()->IgnoreParens()))
+ return cast<CXXMethodDecl>(MemExpr->getMemberDecl());
+
+ // FIXME: Will eventually need to cope with member pointers.
+ return 0;
+}
+
+
+CXXRecordDecl *CXXMemberCallExpr::getRecordDecl() {
+ Expr* ThisArg = getImplicitObjectArgument();
+ if (!ThisArg)
+ return 0;
+
+ if (ThisArg->getType()->isAnyPointerType())
+ return ThisArg->getType()->getPointeeType()->getAsCXXRecordDecl();
+
+ return ThisArg->getType()->getAsCXXRecordDecl();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Named casts
+//===----------------------------------------------------------------------===//
+
+/// getCastName - Get the name of the C++ cast being used, e.g.,
+/// "static_cast", "dynamic_cast", "reinterpret_cast", or
+/// "const_cast". The returned pointer must not be freed.
+const char *CXXNamedCastExpr::getCastName() const {
+ switch (getStmtClass()) {
+ case CXXStaticCastExprClass: return "static_cast";
+ case CXXDynamicCastExprClass: return "dynamic_cast";
+ case CXXReinterpretCastExprClass: return "reinterpret_cast";
+ case CXXConstCastExprClass: return "const_cast";
+ default: return "<invalid cast>";
+ }
+}
+
+CXXStaticCastExpr *CXXStaticCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK,
+ CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(sizeof(CXXStaticCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ CXXStaticCastExpr *E =
+ new (Buffer) CXXStaticCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CXXStaticCastExpr *CXXStaticCastExpr::CreateEmpty(ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer =
+ C.Allocate(sizeof(CXXStaticCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CXXStaticCastExpr(EmptyShell(), PathSize);
+}
+
+CXXDynamicCastExpr *CXXDynamicCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK,
+ CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(sizeof(CXXDynamicCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ CXXDynamicCastExpr *E =
+ new (Buffer) CXXDynamicCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CXXDynamicCastExpr *CXXDynamicCastExpr::CreateEmpty(ASTContext &C,
+ unsigned PathSize) {
+ void *Buffer =
+ C.Allocate(sizeof(CXXDynamicCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CXXDynamicCastExpr(EmptyShell(), PathSize);
+}
+
+/// isAlwaysNull - Return whether the result of the dynamic_cast is proven
+/// to always be null. For example:
+///
+/// struct A { };
+/// struct B final : A { };
+/// struct C { };
+///
+/// C *f(B* b) { return dynamic_cast<C*>(b); }
+bool CXXDynamicCastExpr::isAlwaysNull() const
+{
+ QualType SrcType = getSubExpr()->getType();
+ QualType DestType = getType();
+
+ if (const PointerType *SrcPTy = SrcType->getAs<PointerType>()) {
+ SrcType = SrcPTy->getPointeeType();
+ DestType = DestType->castAs<PointerType>()->getPointeeType();
+ }
+
+ const CXXRecordDecl *SrcRD =
+ cast<CXXRecordDecl>(SrcType->castAs<RecordType>()->getDecl());
+
+ if (!SrcRD->hasAttr<FinalAttr>())
+ return false;
+
+ const CXXRecordDecl *DestRD =
+ cast<CXXRecordDecl>(DestType->castAs<RecordType>()->getDecl());
+
+ return !DestRD->isDerivedFrom(SrcRD);
+}
+
+CXXReinterpretCastExpr *
+CXXReinterpretCastExpr::Create(ASTContext &C, QualType T, ExprValueKind VK,
+ CastKind K, Expr *Op,
+ const CXXCastPath *BasePath,
+ TypeSourceInfo *WrittenTy, SourceLocation L,
+ SourceLocation RParenLoc) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer =
+ C.Allocate(sizeof(CXXReinterpretCastExpr) + PathSize * sizeof(CXXBaseSpecifier*));
+ CXXReinterpretCastExpr *E =
+ new (Buffer) CXXReinterpretCastExpr(T, VK, K, Op, PathSize, WrittenTy, L,
+ RParenLoc);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CXXReinterpretCastExpr *
+CXXReinterpretCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
+ void *Buffer = C.Allocate(sizeof(CXXReinterpretCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CXXReinterpretCastExpr(EmptyShell(), PathSize);
+}
+
+CXXConstCastExpr *CXXConstCastExpr::Create(ASTContext &C, QualType T,
+ ExprValueKind VK, Expr *Op,
+ TypeSourceInfo *WrittenTy,
+ SourceLocation L,
+ SourceLocation RParenLoc) {
+ return new (C) CXXConstCastExpr(T, VK, Op, WrittenTy, L, RParenLoc);
+}
+
+CXXConstCastExpr *CXXConstCastExpr::CreateEmpty(ASTContext &C) {
+ return new (C) CXXConstCastExpr(EmptyShell());
+}
+
+CXXFunctionalCastExpr *
+CXXFunctionalCastExpr::Create(ASTContext &C, QualType T, ExprValueKind VK,
+ TypeSourceInfo *Written, SourceLocation L,
+ CastKind K, Expr *Op, const CXXCastPath *BasePath,
+ SourceLocation R) {
+ unsigned PathSize = (BasePath ? BasePath->size() : 0);
+ void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ CXXFunctionalCastExpr *E =
+ new (Buffer) CXXFunctionalCastExpr(T, VK, Written, L, K, Op, PathSize, R);
+ if (PathSize) E->setCastPath(*BasePath);
+ return E;
+}
+
+CXXFunctionalCastExpr *
+CXXFunctionalCastExpr::CreateEmpty(ASTContext &C, unsigned PathSize) {
+ void *Buffer = C.Allocate(sizeof(CXXFunctionalCastExpr)
+ + PathSize * sizeof(CXXBaseSpecifier*));
+ return new (Buffer) CXXFunctionalCastExpr(EmptyShell(), PathSize);
+}
+
+UserDefinedLiteral::LiteralOperatorKind
+UserDefinedLiteral::getLiteralOperatorKind() const {
+ if (getNumArgs() == 0)
+ return LOK_Template;
+ if (getNumArgs() == 2)
+ return LOK_String;
+
+ assert(getNumArgs() == 1 && "unexpected #args in literal operator call");
+ QualType ParamTy =
+ cast<FunctionDecl>(getCalleeDecl())->getParamDecl(0)->getType();
+ if (ParamTy->isPointerType())
+ return LOK_Raw;
+ if (ParamTy->isAnyCharacterType())
+ return LOK_Character;
+ if (ParamTy->isIntegerType())
+ return LOK_Integer;
+ if (ParamTy->isFloatingType())
+ return LOK_Floating;
+
+ llvm_unreachable("unknown kind of literal operator");
+}
+
+Expr *UserDefinedLiteral::getCookedLiteral() {
+#ifndef NDEBUG
+ LiteralOperatorKind LOK = getLiteralOperatorKind();
+ assert(LOK != LOK_Template && LOK != LOK_Raw && "not a cooked literal");
+#endif
+ return getArg(0);
+}
+
+const IdentifierInfo *UserDefinedLiteral::getUDSuffix() const {
+ return cast<FunctionDecl>(getCalleeDecl())->getLiteralIdentifier();
+}
+
+CXXDefaultArgExpr *
+CXXDefaultArgExpr::Create(ASTContext &C, SourceLocation Loc,
+ ParmVarDecl *Param, Expr *SubExpr) {
+ void *Mem = C.Allocate(sizeof(CXXDefaultArgExpr) + sizeof(Stmt *));
+ return new (Mem) CXXDefaultArgExpr(CXXDefaultArgExprClass, Loc, Param,
+ SubExpr);
+}
+
+CXXTemporary *CXXTemporary::Create(ASTContext &C,
+ const CXXDestructorDecl *Destructor) {
+ return new (C) CXXTemporary(Destructor);
+}
+
+CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(ASTContext &C,
+ CXXTemporary *Temp,
+ Expr* SubExpr) {
+ assert((SubExpr->getType()->isRecordType() ||
+ SubExpr->getType()->isArrayType()) &&
+ "Expression bound to a temporary must have record or array type!");
+
+ return new (C) CXXBindTemporaryExpr(Temp, SubExpr);
+}
+
+CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(ASTContext &C,
+ CXXConstructorDecl *Cons,
+ TypeSourceInfo *Type,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceRange parenRange,
+ bool HadMultipleCandidates,
+ bool ZeroInitialization)
+ : CXXConstructExpr(C, CXXTemporaryObjectExprClass,
+ Type->getType().getNonReferenceType(),
+ Type->getTypeLoc().getBeginLoc(),
+ Cons, false, Args, NumArgs,
+ HadMultipleCandidates, /*FIXME*/false, ZeroInitialization,
+ CXXConstructExpr::CK_Complete, parenRange),
+ Type(Type) {
+}
+
+SourceRange CXXTemporaryObjectExpr::getSourceRange() const {
+ return SourceRange(Type->getTypeLoc().getBeginLoc(),
+ getParenRange().getEnd());
+}
+
+CXXConstructExpr *CXXConstructExpr::Create(ASTContext &C, QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *D, bool Elidable,
+ Expr **Args, unsigned NumArgs,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange) {
+ return new (C) CXXConstructExpr(C, CXXConstructExprClass, T, Loc, D,
+ Elidable, Args, NumArgs,
+ HadMultipleCandidates, ListInitialization,
+ ZeroInitialization, ConstructKind,
+ ParenRange);
+}
+
+CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *D, bool elidable,
+ Expr **args, unsigned numargs,
+ bool HadMultipleCandidates,
+ bool ListInitialization,
+ bool ZeroInitialization,
+ ConstructionKind ConstructKind,
+ SourceRange ParenRange)
+ : Expr(SC, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), T->isDependentType(),
+ T->isInstantiationDependentType(),
+ T->containsUnexpandedParameterPack()),
+ Constructor(D), Loc(Loc), ParenRange(ParenRange), NumArgs(numargs),
+ Elidable(elidable), HadMultipleCandidates(HadMultipleCandidates),
+ ListInitialization(ListInitialization),
+ ZeroInitialization(ZeroInitialization),
+ ConstructKind(ConstructKind), Args(0)
+{
+ if (NumArgs) {
+ Args = new (C) Stmt*[NumArgs];
+
+ for (unsigned i = 0; i != NumArgs; ++i) {
+ assert(args[i] && "NULL argument in CXXConstructExpr");
+
+ if (args[i]->isValueDependent())
+ ExprBits.ValueDependent = true;
+ if (args[i]->isInstantiationDependent())
+ ExprBits.InstantiationDependent = true;
+ if (args[i]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ Args[i] = args[i];
+ }
+ }
+}
+
+LambdaExpr::Capture::Capture(SourceLocation Loc, bool Implicit,
+ LambdaCaptureKind Kind, VarDecl *Var,
+ SourceLocation EllipsisLoc)
+ : VarAndBits(Var, 0), Loc(Loc), EllipsisLoc(EllipsisLoc)
+{
+ unsigned Bits = 0;
+ if (Implicit)
+ Bits |= Capture_Implicit;
+
+ switch (Kind) {
+ case LCK_This:
+ assert(Var == 0 && "'this' capture cannot have a variable!");
+ break;
+
+ case LCK_ByCopy:
+ Bits |= Capture_ByCopy;
+ // Fall through
+ case LCK_ByRef:
+ assert(Var && "capture must have a variable!");
+ break;
+ }
+ VarAndBits.setInt(Bits);
+}
+
+LambdaCaptureKind LambdaExpr::Capture::getCaptureKind() const {
+ if (capturesThis())
+ return LCK_This;
+
+ return (VarAndBits.getInt() & Capture_ByCopy)? LCK_ByCopy : LCK_ByRef;
+}
+
+LambdaExpr::LambdaExpr(QualType T,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace)
+ : Expr(LambdaExprClass, T, VK_RValue, OK_Ordinary,
+ T->isDependentType(), T->isDependentType(), T->isDependentType(),
+ /*ContainsUnexpandedParameterPack=*/false),
+ IntroducerRange(IntroducerRange),
+ NumCaptures(Captures.size()),
+ CaptureDefault(CaptureDefault),
+ ExplicitParams(ExplicitParams),
+ ExplicitResultType(ExplicitResultType),
+ ClosingBrace(ClosingBrace)
+{
+ assert(CaptureInits.size() == Captures.size() && "Wrong number of arguments");
+ CXXRecordDecl *Class = getLambdaClass();
+ CXXRecordDecl::LambdaDefinitionData &Data = Class->getLambdaData();
+
+ // FIXME: Propagate "has unexpanded parameter pack" bit.
+
+ // Copy captures.
+ ASTContext &Context = Class->getASTContext();
+ Data.NumCaptures = NumCaptures;
+ Data.NumExplicitCaptures = 0;
+ Data.Captures = (Capture *)Context.Allocate(sizeof(Capture) * NumCaptures);
+ Capture *ToCapture = Data.Captures;
+ for (unsigned I = 0, N = Captures.size(); I != N; ++I) {
+ if (Captures[I].isExplicit())
+ ++Data.NumExplicitCaptures;
+
+ *ToCapture++ = Captures[I];
+ }
+
+ // Copy initialization expressions for the non-static data members.
+ Stmt **Stored = getStoredStmts();
+ for (unsigned I = 0, N = CaptureInits.size(); I != N; ++I)
+ *Stored++ = CaptureInits[I];
+
+ // Copy the body of the lambda.
+ *Stored++ = getCallOperator()->getBody();
+
+ // Copy the array index variables, if any.
+ HasArrayIndexVars = !ArrayIndexVars.empty();
+ if (HasArrayIndexVars) {
+ assert(ArrayIndexStarts.size() == NumCaptures);
+ memcpy(getArrayIndexVars(), ArrayIndexVars.data(),
+ sizeof(VarDecl *) * ArrayIndexVars.size());
+ memcpy(getArrayIndexStarts(), ArrayIndexStarts.data(),
+ sizeof(unsigned) * Captures.size());
+ getArrayIndexStarts()[Captures.size()] = ArrayIndexVars.size();
+ }
+}
+
+LambdaExpr *LambdaExpr::Create(ASTContext &Context,
+ CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ ArrayRef<Capture> Captures,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ ArrayRef<Expr *> CaptureInits,
+ ArrayRef<VarDecl *> ArrayIndexVars,
+ ArrayRef<unsigned> ArrayIndexStarts,
+ SourceLocation ClosingBrace) {
+ // Determine the type of the expression (i.e., the type of the
+ // function object we're creating).
+ QualType T = Context.getTypeDeclType(Class);
+
+ unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (Captures.size() + 1);
+ if (!ArrayIndexVars.empty())
+ Size += sizeof(VarDecl *) * ArrayIndexVars.size()
+ + sizeof(unsigned) * (Captures.size() + 1);
+ void *Mem = Context.Allocate(Size);
+ return new (Mem) LambdaExpr(T, IntroducerRange, CaptureDefault,
+ Captures, ExplicitParams, ExplicitResultType,
+ CaptureInits, ArrayIndexVars, ArrayIndexStarts,
+ ClosingBrace);
+}
+
+LambdaExpr *LambdaExpr::CreateDeserialized(ASTContext &C, unsigned NumCaptures,
+ unsigned NumArrayIndexVars) {
+ unsigned Size = sizeof(LambdaExpr) + sizeof(Stmt *) * (NumCaptures + 1);
+ if (NumArrayIndexVars)
+ Size += sizeof(VarDecl) * NumArrayIndexVars
+ + sizeof(unsigned) * (NumCaptures + 1);
+ void *Mem = C.Allocate(Size);
+ return new (Mem) LambdaExpr(EmptyShell(), NumCaptures, NumArrayIndexVars > 0);
+}
+
+LambdaExpr::capture_iterator LambdaExpr::capture_begin() const {
+ return getLambdaClass()->getLambdaData().Captures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::capture_end() const {
+ return capture_begin() + NumCaptures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::explicit_capture_begin() const {
+ return capture_begin();
+}
+
+LambdaExpr::capture_iterator LambdaExpr::explicit_capture_end() const {
+ struct CXXRecordDecl::LambdaDefinitionData &Data
+ = getLambdaClass()->getLambdaData();
+ return Data.Captures + Data.NumExplicitCaptures;
+}
+
+LambdaExpr::capture_iterator LambdaExpr::implicit_capture_begin() const {
+ return explicit_capture_end();
+}
+
+LambdaExpr::capture_iterator LambdaExpr::implicit_capture_end() const {
+ return capture_end();
+}
+
+ArrayRef<VarDecl *>
+LambdaExpr::getCaptureInitIndexVars(capture_init_iterator Iter) const {
+ assert(HasArrayIndexVars && "No array index-var data?");
+
+ unsigned Index = Iter - capture_init_begin();
+ assert(Index < getLambdaClass()->getLambdaData().NumCaptures &&
+ "Capture index out-of-range");
+ VarDecl **IndexVars = getArrayIndexVars();
+ unsigned *IndexStarts = getArrayIndexStarts();
+ return ArrayRef<VarDecl *>(IndexVars + IndexStarts[Index],
+ IndexVars + IndexStarts[Index + 1]);
+}
+
+CXXRecordDecl *LambdaExpr::getLambdaClass() const {
+ return getType()->getAsCXXRecordDecl();
+}
+
+CXXMethodDecl *LambdaExpr::getCallOperator() const {
+ CXXRecordDecl *Record = getLambdaClass();
+ DeclarationName Name
+ = Record->getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_result Calls = Record->lookup(Name);
+ assert(Calls.first != Calls.second && "Missing lambda call operator!");
+ CXXMethodDecl *Result = cast<CXXMethodDecl>(*Calls.first++);
+ assert(Calls.first == Calls.second && "More than lambda one call operator?");
+ return Result;
+}
+
+CompoundStmt *LambdaExpr::getBody() const {
+ if (!getStoredStmts()[NumCaptures])
+ getStoredStmts()[NumCaptures] = getCallOperator()->getBody();
+
+ return reinterpret_cast<CompoundStmt *>(getStoredStmts()[NumCaptures]);
+}
+
+bool LambdaExpr::isMutable() const {
+ return (getCallOperator()->getTypeQualifiers() & Qualifiers::Const) == 0;
+}
+
+ExprWithCleanups::ExprWithCleanups(Expr *subexpr,
+ ArrayRef<CleanupObject> objects)
+ : Expr(ExprWithCleanupsClass, subexpr->getType(),
+ subexpr->getValueKind(), subexpr->getObjectKind(),
+ subexpr->isTypeDependent(), subexpr->isValueDependent(),
+ subexpr->isInstantiationDependent(),
+ subexpr->containsUnexpandedParameterPack()),
+ SubExpr(subexpr) {
+ ExprWithCleanupsBits.NumObjects = objects.size();
+ for (unsigned i = 0, e = objects.size(); i != e; ++i)
+ getObjectsBuffer()[i] = objects[i];
+}
+
+ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C, Expr *subexpr,
+ ArrayRef<CleanupObject> objects) {
+ size_t size = sizeof(ExprWithCleanups)
+ + objects.size() * sizeof(CleanupObject);
+ void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
+ return new (buffer) ExprWithCleanups(subexpr, objects);
+}
+
+ExprWithCleanups::ExprWithCleanups(EmptyShell empty, unsigned numObjects)
+ : Expr(ExprWithCleanupsClass, empty) {
+ ExprWithCleanupsBits.NumObjects = numObjects;
+}
+
+ExprWithCleanups *ExprWithCleanups::Create(ASTContext &C, EmptyShell empty,
+ unsigned numObjects) {
+ size_t size = sizeof(ExprWithCleanups) + numObjects * sizeof(CleanupObject);
+ void *buffer = C.Allocate(size, llvm::alignOf<ExprWithCleanups>());
+ return new (buffer) ExprWithCleanups(empty, numObjects);
+}
+
+CXXUnresolvedConstructExpr::CXXUnresolvedConstructExpr(TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation RParenLoc)
+ : Expr(CXXUnresolvedConstructExprClass,
+ Type->getType().getNonReferenceType(),
+ (Type->getType()->isLValueReferenceType() ? VK_LValue
+ :Type->getType()->isRValueReferenceType()? VK_XValue
+ :VK_RValue),
+ OK_Ordinary,
+ Type->getType()->isDependentType(), true, true,
+ Type->getType()->containsUnexpandedParameterPack()),
+ Type(Type),
+ LParenLoc(LParenLoc),
+ RParenLoc(RParenLoc),
+ NumArgs(NumArgs) {
+ Stmt **StoredArgs = reinterpret_cast<Stmt **>(this + 1);
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I]->containsUnexpandedParameterPack())
+ ExprBits.ContainsUnexpandedParameterPack = true;
+
+ StoredArgs[I] = Args[I];
+ }
+}
+
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::Create(ASTContext &C,
+ TypeSourceInfo *Type,
+ SourceLocation LParenLoc,
+ Expr **Args,
+ unsigned NumArgs,
+ SourceLocation RParenLoc) {
+ void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
+ sizeof(Expr *) * NumArgs);
+ return new (Mem) CXXUnresolvedConstructExpr(Type, LParenLoc,
+ Args, NumArgs, RParenLoc);
+}
+
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::CreateEmpty(ASTContext &C, unsigned NumArgs) {
+ Stmt::EmptyShell Empty;
+ void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
+ sizeof(Expr *) * NumArgs);
+ return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
+}
+
+SourceRange CXXUnresolvedConstructExpr::getSourceRange() const {
+ return SourceRange(Type->getTypeLoc().getBeginLoc(), RParenLoc);
+}
+
+CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs)
+ : Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
+ VK_LValue, OK_Ordinary, true, true, true,
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()
+ ->containsUnexpandedParameterPack()) ||
+ MemberNameInfo.containsUnexpandedParameterPack())),
+ Base(Base), BaseType(BaseType), IsArrow(IsArrow),
+ HasTemplateKWAndArgsInfo(TemplateArgs != 0 || TemplateKWLoc.isValid()),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ FirstQualifierFoundInScope(FirstQualifierFoundInScope),
+ MemberNameInfo(MemberNameInfo) {
+ if (TemplateArgs) {
+ bool Dependent = true;
+ bool InstantiationDependent = true;
+ bool ContainsUnexpandedParameterPack = false;
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc, *TemplateArgs,
+ Dependent,
+ InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ if (ContainsUnexpandedParameterPack)
+ ExprBits.ContainsUnexpandedParameterPack = true;
+ } else if (TemplateKWLoc.isValid()) {
+ getTemplateKWAndArgsInfo()->initializeFrom(TemplateKWLoc);
+ }
+}
+
+CXXDependentScopeMemberExpr::CXXDependentScopeMemberExpr(ASTContext &C,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo)
+ : Expr(CXXDependentScopeMemberExprClass, C.DependentTy,
+ VK_LValue, OK_Ordinary, true, true, true,
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ (QualifierLoc &&
+ QualifierLoc.getNestedNameSpecifier()->
+ containsUnexpandedParameterPack()) ||
+ MemberNameInfo.containsUnexpandedParameterPack())),
+ Base(Base), BaseType(BaseType), IsArrow(IsArrow),
+ HasTemplateKWAndArgsInfo(false),
+ OperatorLoc(OperatorLoc), QualifierLoc(QualifierLoc),
+ FirstQualifierFoundInScope(FirstQualifierFoundInScope),
+ MemberNameInfo(MemberNameInfo) { }
+
+CXXDependentScopeMemberExpr *
+CXXDependentScopeMemberExpr::Create(ASTContext &C,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierFoundInScope,
+ DeclarationNameInfo MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ if (!TemplateArgs && !TemplateKWLoc.isValid())
+ return new (C) CXXDependentScopeMemberExpr(C, Base, BaseType,
+ IsArrow, OperatorLoc,
+ QualifierLoc,
+ FirstQualifierFoundInScope,
+ MemberNameInfo);
+
+ unsigned NumTemplateArgs = TemplateArgs ? TemplateArgs->size() : 0;
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr)
+ + ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ return new (Mem) CXXDependentScopeMemberExpr(C, Base, BaseType,
+ IsArrow, OperatorLoc,
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierFoundInScope,
+ MemberNameInfo, TemplateArgs);
+}
+
+CXXDependentScopeMemberExpr *
+CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C,
+ bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ if (!HasTemplateKWAndArgsInfo)
+ return new (C) CXXDependentScopeMemberExpr(C, 0, QualType(),
+ 0, SourceLocation(),
+ NestedNameSpecifierLoc(), 0,
+ DeclarationNameInfo());
+
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
+ ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+ void *Mem = C.Allocate(size, llvm::alignOf<CXXDependentScopeMemberExpr>());
+ CXXDependentScopeMemberExpr *E
+ = new (Mem) CXXDependentScopeMemberExpr(C, 0, QualType(),
+ 0, SourceLocation(),
+ NestedNameSpecifierLoc(),
+ SourceLocation(), 0,
+ DeclarationNameInfo(), 0);
+ E->HasTemplateKWAndArgsInfo = true;
+ return E;
+}
+
+bool CXXDependentScopeMemberExpr::isImplicitAccess() const {
+ if (Base == 0)
+ return true;
+
+ return cast<Expr>(Base)->isImplicitCXXThis();
+}
+
+static bool hasOnlyNonStaticMemberFunctions(UnresolvedSetIterator begin,
+ UnresolvedSetIterator end) {
+ do {
+ NamedDecl *decl = *begin;
+ if (isa<UnresolvedUsingValueDecl>(decl))
+ return false;
+ if (isa<UsingShadowDecl>(decl))
+ decl = cast<UsingShadowDecl>(decl)->getUnderlyingDecl();
+
+ // Unresolved member expressions should only contain methods and
+ // method templates.
+ assert(isa<CXXMethodDecl>(decl) || isa<FunctionTemplateDecl>(decl));
+
+ if (isa<FunctionTemplateDecl>(decl))
+ decl = cast<FunctionTemplateDecl>(decl)->getTemplatedDecl();
+ if (cast<CXXMethodDecl>(decl)->isStatic())
+ return false;
+ } while (++begin != end);
+
+ return true;
+}
+
+UnresolvedMemberExpr::UnresolvedMemberExpr(ASTContext &C,
+ bool HasUnresolvedUsing,
+ Expr *Base, QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+ : OverloadExpr(UnresolvedMemberExprClass, C, QualifierLoc, TemplateKWLoc,
+ MemberNameInfo, TemplateArgs, Begin, End,
+ // Dependent
+ ((Base && Base->isTypeDependent()) ||
+ BaseType->isDependentType()),
+ ((Base && Base->isInstantiationDependent()) ||
+ BaseType->isInstantiationDependentType()),
+ // Contains unexpanded parameter pack
+ ((Base && Base->containsUnexpandedParameterPack()) ||
+ BaseType->containsUnexpandedParameterPack())),
+ IsArrow(IsArrow), HasUnresolvedUsing(HasUnresolvedUsing),
+ Base(Base), BaseType(BaseType), OperatorLoc(OperatorLoc) {
+
+ // Check whether all of the members are non-static member functions,
+ // and if so, mark give this bound-member type instead of overload type.
+ if (hasOnlyNonStaticMemberFunctions(Begin, End))
+ setType(C.BoundMemberTy);
+}
+
+bool UnresolvedMemberExpr::isImplicitAccess() const {
+ if (Base == 0)
+ return true;
+
+ return cast<Expr>(Base)->isImplicitCXXThis();
+}
+
+UnresolvedMemberExpr *
+UnresolvedMemberExpr::Create(ASTContext &C,
+ bool HasUnresolvedUsing,
+ Expr *Base, QualType BaseType, bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ std::size_t size = sizeof(UnresolvedMemberExpr);
+ if (TemplateArgs)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(TemplateArgs->size());
+ else if (TemplateKWLoc.isValid())
+ size += ASTTemplateKWAndArgsInfo::sizeFor(0);
+
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
+ return new (Mem) UnresolvedMemberExpr(C,
+ HasUnresolvedUsing, Base, BaseType,
+ IsArrow, OperatorLoc, QualifierLoc, TemplateKWLoc,
+ MemberNameInfo, TemplateArgs, Begin, End);
+}
+
+UnresolvedMemberExpr *
+UnresolvedMemberExpr::CreateEmpty(ASTContext &C, bool HasTemplateKWAndArgsInfo,
+ unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(UnresolvedMemberExpr);
+ if (HasTemplateKWAndArgsInfo)
+ size += ASTTemplateKWAndArgsInfo::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignOf<UnresolvedMemberExpr>());
+ UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
+ E->HasTemplateKWAndArgsInfo = HasTemplateKWAndArgsInfo;
+ return E;
+}
+
+CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
+ // Unlike for UnresolvedLookupExpr, it is very easy to re-derive this.
+
+ // If there was a nested name specifier, it names the naming class.
+ // It can't be dependent: after all, we were actually able to do the
+ // lookup.
+ CXXRecordDecl *Record = 0;
+ if (getQualifier()) {
+ const Type *T = getQualifier()->getAsType();
+ assert(T && "qualifier in member expression does not name type");
+ Record = T->getAsCXXRecordDecl();
+ assert(Record && "qualifier in member expression does not name record");
+ }
+ // Otherwise the naming class must have been the base class.
+ else {
+ QualType BaseType = getBaseType().getNonReferenceType();
+ if (isArrow()) {
+ const PointerType *PT = BaseType->getAs<PointerType>();
+ assert(PT && "base of arrow member access is not pointer");
+ BaseType = PT->getPointeeType();
+ }
+
+ Record = BaseType->getAsCXXRecordDecl();
+ assert(Record && "base of member expression does not name record");
+ }
+
+ return Record;
+}
+
+SubstNonTypeTemplateParmPackExpr::
+SubstNonTypeTemplateParmPackExpr(QualType T,
+ NonTypeTemplateParmDecl *Param,
+ SourceLocation NameLoc,
+ const TemplateArgument &ArgPack)
+ : Expr(SubstNonTypeTemplateParmPackExprClass, T, VK_RValue, OK_Ordinary,
+ true, true, true, true),
+ Param(Param), Arguments(ArgPack.pack_begin()),
+ NumArguments(ArgPack.pack_size()), NameLoc(NameLoc) { }
+
+TemplateArgument SubstNonTypeTemplateParmPackExpr::getArgumentPack() const {
+ return TemplateArgument(Arguments, NumArguments);
+}
+
+TypeTraitExpr::TypeTraitExpr(QualType T, SourceLocation Loc, TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value)
+ : Expr(TypeTraitExprClass, T, VK_RValue, OK_Ordinary,
+ /*TypeDependent=*/false,
+ /*ValueDependent=*/false,
+ /*InstantiationDependent=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ Loc(Loc), RParenLoc(RParenLoc)
+{
+ TypeTraitExprBits.Kind = Kind;
+ TypeTraitExprBits.Value = Value;
+ TypeTraitExprBits.NumArgs = Args.size();
+
+ TypeSourceInfo **ToArgs = getTypeSourceInfos();
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isDependentType())
+ setValueDependent(true);
+ if (Args[I]->getType()->isInstantiationDependentType())
+ setInstantiationDependent(true);
+ if (Args[I]->getType()->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack(true);
+
+ ToArgs[I] = Args[I];
+ }
+}
+
+TypeTraitExpr *TypeTraitExpr::Create(ASTContext &C, QualType T,
+ SourceLocation Loc,
+ TypeTrait Kind,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc,
+ bool Value) {
+ unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * Args.size();
+ void *Mem = C.Allocate(Size);
+ return new (Mem) TypeTraitExpr(T, Loc, Kind, Args, RParenLoc, Value);
+}
+
+TypeTraitExpr *TypeTraitExpr::CreateDeserialized(ASTContext &C,
+ unsigned NumArgs) {
+ unsigned Size = sizeof(TypeTraitExpr) + sizeof(TypeSourceInfo*) * NumArgs;
+ void *Mem = C.Allocate(Size);
+ return new (Mem) TypeTraitExpr(EmptyShell());
+}
+
+void ArrayTypeTraitExpr::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
new file mode 100644
index 0000000..b091e19
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExprClassification.cpp
@@ -0,0 +1,644 @@
+//===--- ExprClassification.cpp - Expression AST Node Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Expr::classify.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ErrorHandling.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+using namespace clang;
+
+typedef Expr::Classification Cl;
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E);
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D);
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T);
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E);
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E);
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
+ const Expr *trueExpr,
+ const Expr *falseExpr);
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc);
+
+static Cl::Kinds ClassifyExprValueKind(const LangOptions &Lang,
+ const Expr *E,
+ ExprValueKind Kind) {
+ switch (Kind) {
+ case VK_RValue:
+ return Lang.CPlusPlus && E->getType()->isRecordType() ?
+ Cl::CL_ClassTemporary : Cl::CL_PRValue;
+ case VK_LValue:
+ return Cl::CL_LValue;
+ case VK_XValue:
+ return Cl::CL_XValue;
+ }
+ llvm_unreachable("Invalid value category of implicit cast.");
+}
+
+Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
+ assert(!TR->isReferenceType() && "Expressions can't have reference type.");
+
+ Cl::Kinds kind = ClassifyInternal(Ctx, this);
+ // C99 6.3.2.1: An lvalue is an expression with an object type or an
+ // incomplete type other than void.
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // Thus, no functions.
+ if (TR->isFunctionType() || TR == Ctx.OverloadTy)
+ kind = Cl::CL_Function;
+ // No void either, but qualified void is OK because it is "other than void".
+ // Void "lvalues" are classified as addressable void values, which are void
+ // expressions whose address can be taken.
+ else if (TR->isVoidType() && !TR.hasQualifiers())
+ kind = (kind == Cl::CL_LValue ? Cl::CL_AddressableVoid : Cl::CL_Void);
+ }
+
+ // Enable this assertion for testing.
+ switch (kind) {
+ case Cl::CL_LValue: assert(getValueKind() == VK_LValue); break;
+ case Cl::CL_XValue: assert(getValueKind() == VK_XValue); break;
+ case Cl::CL_Function:
+ case Cl::CL_Void:
+ case Cl::CL_AddressableVoid:
+ case Cl::CL_DuplicateVectorComponents:
+ case Cl::CL_MemberFunction:
+ case Cl::CL_SubObjCPropertySetting:
+ case Cl::CL_ClassTemporary:
+ case Cl::CL_ObjCMessageRValue:
+ case Cl::CL_PRValue: assert(getValueKind() == VK_RValue); break;
+ }
+
+ Cl::ModifiableType modifiable = Cl::CM_Untested;
+ if (Loc)
+ modifiable = IsModifiable(Ctx, this, kind, *Loc);
+ return Classification(kind, modifiable);
+}
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
+ // This function takes the first stab at classifying expressions.
+ const LangOptions &Lang = Ctx.getLangOpts();
+
+ switch (E->getStmtClass()) {
+ case Stmt::NoStmtClass:
+#define ABSTRACT_STMT(Kind)
+#define STMT(Kind, Base) case Expr::Kind##Class:
+#define EXPR(Kind, Base)
+#include "clang/AST/StmtNodes.inc"
+ llvm_unreachable("cannot classify a statement");
+
+ // First come the expressions that are always lvalues, unconditionally.
+ case Expr::ObjCIsaExprClass:
+ // C++ [expr.prim.general]p1: A string literal is an lvalue.
+ case Expr::StringLiteralClass:
+ // @encode is equivalent to its string
+ case Expr::ObjCEncodeExprClass:
+ // __func__ and friends are too.
+ case Expr::PredefinedExprClass:
+ // Property references are lvalues
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ // C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
+ case Expr::CXXTypeidExprClass:
+ // Unresolved lookups get classified as lvalues.
+ // FIXME: Is this wise? Should they get their own kind?
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
+ // ObjC instance variables are lvalues
+ // FIXME: ObjC++0x might have different rules
+ case Expr::ObjCIvarRefExprClass:
+ return Cl::CL_LValue;
+
+ // C99 6.5.2.5p5 says that compound literals are lvalues.
+ // In C++, they're class temporaries.
+ case Expr::CompoundLiteralExprClass:
+ return Ctx.getLangOpts().CPlusPlus? Cl::CL_ClassTemporary
+ : Cl::CL_LValue;
+
+ // Expressions that are prvalues.
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::UnaryExprOrTypeTraitExprClass:
+ case Expr::CXXNewExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::GNUNullExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::CXXThrowExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::AddrLabelExprClass:
+ case Expr::CXXDeleteExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::BlockExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::CXXNoexceptExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::SizeOfPackExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::AtomicExprClass:
+ return Cl::CL_PRValue;
+
+ // Next come the complicated cases.
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return ClassifyInternal(Ctx,
+ cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+
+ // C++ [expr.sub]p1: The result is an lvalue of type "T".
+ // However, subscripting vector types is more like member access.
+ case Expr::ArraySubscriptExprClass:
+ if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType())
+ return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase());
+ return Cl::CL_LValue;
+
+ // C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
+ // function or variable and a prvalue otherwise.
+ case Expr::DeclRefExprClass:
+ if (E->getType() == Ctx.UnknownAnyTy)
+ return isa<FunctionDecl>(cast<DeclRefExpr>(E)->getDecl())
+ ? Cl::CL_PRValue : Cl::CL_LValue;
+ return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl());
+
+ // Member access is complex.
+ case Expr::MemberExprClass:
+ return ClassifyMemberExpr(Ctx, cast<MemberExpr>(E));
+
+ case Expr::UnaryOperatorClass:
+ switch (cast<UnaryOperator>(E)->getOpcode()) {
+ // C++ [expr.unary.op]p1: The unary * operator performs indirection:
+ // [...] the result is an lvalue referring to the object or function
+ // to which the expression points.
+ case UO_Deref:
+ return Cl::CL_LValue;
+
+ // GNU extensions, simply look through them.
+ case UO_Extension:
+ return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr());
+
+ // Treat _Real and _Imag basically as if they were member
+ // expressions: l-value only if the operand is a true l-value.
+ case UO_Real:
+ case UO_Imag: {
+ const Expr *Op = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
+ Cl::Kinds K = ClassifyInternal(Ctx, Op);
+ if (K != Cl::CL_LValue) return K;
+
+ if (isa<ObjCPropertyRefExpr>(Op))
+ return Cl::CL_SubObjCPropertySetting;
+ return Cl::CL_LValue;
+ }
+
+ // C++ [expr.pre.incr]p1: The result is the updated operand; it is an
+ // lvalue, [...]
+ // Not so in C.
+ case UO_PreInc:
+ case UO_PreDec:
+ return Lang.CPlusPlus ? Cl::CL_LValue : Cl::CL_PRValue;
+
+ default:
+ return Cl::CL_PRValue;
+ }
+
+ case Expr::OpaqueValueExprClass:
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+
+ // Pseudo-object expressions can produce l-values with reference magic.
+ case Expr::PseudoObjectExprClass:
+ return ClassifyExprValueKind(Lang, E,
+ cast<PseudoObjectExpr>(E)->getValueKind());
+
+ // Implicit casts are lvalues if they're lvalue casts. Other than that, we
+ // only specifically record class temporaries.
+ case Expr::ImplicitCastExprClass:
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+
+ // C++ [expr.prim.general]p4: The presence of parentheses does not affect
+ // whether the expression is an lvalue.
+ case Expr::ParenExprClass:
+ return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr());
+
+ // C11 6.5.1.1p4: [A generic selection] is an lvalue, a function designator,
+ // or a void expression if its result expression is, respectively, an
+ // lvalue, a function designator, or a void expression.
+ case Expr::GenericSelectionExprClass:
+ if (cast<GenericSelectionExpr>(E)->isResultDependent())
+ return Cl::CL_PRValue;
+ return ClassifyInternal(Ctx,cast<GenericSelectionExpr>(E)->getResultExpr());
+
+ case Expr::BinaryOperatorClass:
+ case Expr::CompoundAssignOperatorClass:
+ // C doesn't have any binary expressions that are lvalues.
+ if (Lang.CPlusPlus)
+ return ClassifyBinaryOp(Ctx, cast<BinaryOperator>(E));
+ return Cl::CL_PRValue;
+
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::UserDefinedLiteralClass:
+ case Expr::CUDAKernelCallExprClass:
+ return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType());
+
+ // __builtin_choose_expr is equivalent to the chosen expression.
+ case Expr::ChooseExprClass:
+ return ClassifyInternal(Ctx, cast<ChooseExpr>(E)->getChosenSubExpr(Ctx));
+
+ // Extended vector element access is an lvalue unless there are duplicates
+ // in the shuffle expression.
+ case Expr::ExtVectorElementExprClass:
+ return cast<ExtVectorElementExpr>(E)->containsDuplicateElements() ?
+ Cl::CL_DuplicateVectorComponents : Cl::CL_LValue;
+
+ // Simply look at the actual default argument.
+ case Expr::CXXDefaultArgExprClass:
+ return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr());
+
+ // Same idea for temporary binding.
+ case Expr::CXXBindTemporaryExprClass:
+ return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+
+ // And the cleanups guard.
+ case Expr::ExprWithCleanupsClass:
+ return ClassifyInternal(Ctx, cast<ExprWithCleanups>(E)->getSubExpr());
+
+ // Casts depend completely on the target type. All casts work the same.
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
+ // Only in C++ can casts be interesting at all.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
+
+ case Expr::CXXUnresolvedConstructExprClass:
+ return ClassifyUnnamed(Ctx,
+ cast<CXXUnresolvedConstructExpr>(E)->getTypeAsWritten());
+
+ case Expr::BinaryConditionalOperatorClass: {
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ const BinaryConditionalOperator *co = cast<BinaryConditionalOperator>(E);
+ return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
+ }
+
+ case Expr::ConditionalOperatorClass: {
+ // Once again, only C++ is interesting.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ const ConditionalOperator *co = cast<ConditionalOperator>(E);
+ return ClassifyConditional(Ctx, co->getTrueExpr(), co->getFalseExpr());
+ }
+
+ // ObjC message sends are effectively function calls, if the target function
+ // is known.
+ case Expr::ObjCMessageExprClass:
+ if (const ObjCMethodDecl *Method =
+ cast<ObjCMessageExpr>(E)->getMethodDecl()) {
+ Cl::Kinds kind = ClassifyUnnamed(Ctx, Method->getResultType());
+ return (kind == Cl::CL_PRValue) ? Cl::CL_ObjCMessageRValue : kind;
+ }
+ return Cl::CL_PRValue;
+
+ // Some C++ expressions are always class temporaries.
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::LambdaExprClass:
+ return Cl::CL_ClassTemporary;
+
+ case Expr::VAArgExprClass:
+ return ClassifyUnnamed(Ctx, E->getType());
+
+ case Expr::DesignatedInitExprClass:
+ return ClassifyInternal(Ctx, cast<DesignatedInitExpr>(E)->getInit());
+
+ case Expr::StmtExprClass: {
+ const CompoundStmt *S = cast<StmtExpr>(E)->getSubStmt();
+ if (const Expr *LastExpr = dyn_cast_or_null<Expr>(S->body_back()))
+ return ClassifyUnnamed(Ctx, LastExpr->getType());
+ return Cl::CL_PRValue;
+ }
+
+ case Expr::CXXUuidofExprClass:
+ return Cl::CL_LValue;
+
+ case Expr::PackExpansionExprClass:
+ return ClassifyInternal(Ctx, cast<PackExpansionExpr>(E)->getPattern());
+
+ case Expr::MaterializeTemporaryExprClass:
+ return cast<MaterializeTemporaryExpr>(E)->isBoundToLvalueReference()
+ ? Cl::CL_LValue
+ : Cl::CL_XValue;
+
+ case Expr::InitListExprClass:
+ // An init list can be an lvalue if it is bound to a reference and
+ // contains only one element. In that case, we look at that element
+ // for an exact classification. Init list creation takes care of the
+ // value kind for us, so we only need to fine-tune.
+ if (E->isRValue())
+ return ClassifyExprValueKind(Lang, E, E->getValueKind());
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only 1-element init lists can be glvalues.");
+ return ClassifyInternal(Ctx, cast<InitListExpr>(E)->getInit(0));
+ }
+
+ llvm_unreachable("unhandled expression kind in classification");
+}
+
+/// ClassifyDecl - Return the classification of an expression referencing the
+/// given declaration.
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
+ // C++ [expr.prim.general]p6: The result is an lvalue if the entity is a
+ // function, variable, or data member and a prvalue otherwise.
+ // In C, functions are not lvalues.
+ // In addition, NonTypeTemplateParmDecl derives from VarDecl but isn't an
+ // lvalue unless it's a reference type (C++ [temp.param]p6), so we need to
+ // special-case this.
+
+ if (isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance())
+ return Cl::CL_MemberFunction;
+
+ bool islvalue;
+ if (const NonTypeTemplateParmDecl *NTTParm =
+ dyn_cast<NonTypeTemplateParmDecl>(D))
+ islvalue = NTTParm->getType()->isReferenceType();
+ else
+ islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
+ isa<IndirectFieldDecl>(D) ||
+ (Ctx.getLangOpts().CPlusPlus &&
+ (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)));
+
+ return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
+}
+
+/// ClassifyUnnamed - Return the classification of an expression yielding an
+/// unnamed value of the given type. This applies in particular to function
+/// calls and casts.
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
+ // In C, function calls are always rvalues.
+ if (!Ctx.getLangOpts().CPlusPlus) return Cl::CL_PRValue;
+
+ // C++ [expr.call]p10: A function call is an lvalue if the result type is an
+ // lvalue reference type or an rvalue reference to function type, an xvalue
+ // if the result type is an rvalue reference to object type, and a prvalue
+ // otherwise.
+ if (T->isLValueReferenceType())
+ return Cl::CL_LValue;
+ const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
+ if (!RV) // Could still be a class temporary, though.
+ return T->isRecordType() ? Cl::CL_ClassTemporary : Cl::CL_PRValue;
+
+ return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue;
+}
+
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
+ if (E->getType() == Ctx.UnknownAnyTy)
+ return (isa<FunctionDecl>(E->getMemberDecl())
+ ? Cl::CL_PRValue : Cl::CL_LValue);
+
+ // Handle C first, it's easier.
+ if (!Ctx.getLangOpts().CPlusPlus) {
+ // C99 6.5.2.3p3
+ // For dot access, the expression is an lvalue if the first part is. For
+ // arrow access, it always is an lvalue.
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ // ObjC property accesses are not lvalues, but get special treatment.
+ Expr *Base = E->getBase()->IgnoreParens();
+ if (isa<ObjCPropertyRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
+ return ClassifyInternal(Ctx, Base);
+ }
+
+ NamedDecl *Member = E->getMemberDecl();
+ // C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2.
+ // C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then
+ // E1.E2 is an lvalue.
+ if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
+ if (Value->getType()->isReferenceType())
+ return Cl::CL_LValue;
+
+ // Otherwise, one of the following rules applies.
+ // -- If E2 is a static member [...] then E1.E2 is an lvalue.
+ if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
+ return Cl::CL_LValue;
+
+ // -- If E2 is a non-static data member [...]. If E1 is an lvalue, then
+ // E1.E2 is an lvalue; if E1 is an xvalue, then E1.E2 is an xvalue;
+ // otherwise, it is a prvalue.
+ if (isa<FieldDecl>(Member)) {
+ // *E1 is an lvalue
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ Expr *Base = E->getBase()->IgnoreParenImpCasts();
+ if (isa<ObjCPropertyRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
+ return ClassifyInternal(Ctx, E->getBase());
+ }
+
+ // -- If E2 is a [...] member function, [...]
+ // -- If it refers to a static member function [...], then E1.E2 is an
+ // lvalue; [...]
+ // -- Otherwise [...] E1.E2 is a prvalue.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
+ return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction;
+
+ // -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue.
+ // So is everything else we haven't handled yet.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
+ assert(Ctx.getLangOpts().CPlusPlus &&
+ "This is only relevant for C++.");
+ // C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand.
+ // Except we override this for writes to ObjC properties.
+ if (E->isAssignmentOp())
+ return (E->getLHS()->getObjectKind() == OK_ObjCProperty
+ ? Cl::CL_PRValue : Cl::CL_LValue);
+
+ // C++ [expr.comma]p1: the result is of the same value category as its right
+ // operand, [...].
+ if (E->getOpcode() == BO_Comma)
+ return ClassifyInternal(Ctx, E->getRHS());
+
+ // C++ [expr.mptr.oper]p6: The result of a .* expression whose second operand
+ // is a pointer to a data member is of the same value category as its first
+ // operand.
+ if (E->getOpcode() == BO_PtrMemD)
+ return (E->getType()->isFunctionType() ||
+ E->hasPlaceholderType(BuiltinType::BoundMember))
+ ? Cl::CL_MemberFunction
+ : ClassifyInternal(Ctx, E->getLHS());
+
+ // C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its
+ // second operand is a pointer to data member and a prvalue otherwise.
+ if (E->getOpcode() == BO_PtrMemI)
+ return (E->getType()->isFunctionType() ||
+ E->hasPlaceholderType(BuiltinType::BoundMember))
+ ? Cl::CL_MemberFunction
+ : Cl::CL_LValue;
+
+ // All other binary operations are prvalues.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx, const Expr *True,
+ const Expr *False) {
+ assert(Ctx.getLangOpts().CPlusPlus &&
+ "This is only relevant for C++.");
+
+ // C++ [expr.cond]p2
+ // If either the second or the third operand has type (cv) void, [...]
+ // the result [...] is a prvalue.
+ if (True->getType()->isVoidType() || False->getType()->isVoidType())
+ return Cl::CL_PRValue;
+
+ // Note that at this point, we have already performed all conversions
+ // according to [expr.cond]p3.
+ // C++ [expr.cond]p4: If the second and third operands are glvalues of the
+ // same value category [...], the result is of that [...] value category.
+ // C++ [expr.cond]p5: Otherwise, the result is a prvalue.
+ Cl::Kinds LCl = ClassifyInternal(Ctx, True),
+ RCl = ClassifyInternal(Ctx, False);
+ return LCl == RCl ? LCl : Cl::CL_PRValue;
+}
+
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc) {
+ // As a general rule, we only care about lvalues. But there are some rvalues
+ // for which we want to generate special results.
+ if (Kind == Cl::CL_PRValue) {
+ // For the sake of better diagnostics, we want to specifically recognize
+ // use of the GCC cast-as-lvalue extension.
+ if (const ExplicitCastExpr *CE =
+ dyn_cast<ExplicitCastExpr>(E->IgnoreParens())) {
+ if (CE->getSubExpr()->IgnoreParenImpCasts()->isLValue()) {
+ Loc = CE->getExprLoc();
+ return Cl::CM_LValueCast;
+ }
+ }
+ }
+ if (Kind != Cl::CL_LValue)
+ return Cl::CM_RValue;
+
+ // This is the lvalue case.
+ // Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6)
+ if (Ctx.getLangOpts().CPlusPlus && E->getType()->isFunctionType())
+ return Cl::CM_Function;
+
+ // Assignment to a property in ObjC is an implicit setter access. But a
+ // setter might not exist.
+ if (const ObjCPropertyRefExpr *Expr = dyn_cast<ObjCPropertyRefExpr>(E)) {
+ if (Expr->isImplicitProperty() && Expr->getImplicitPropertySetter() == 0)
+ return Cl::CM_NoSetterProperty;
+ }
+
+ CanQualType CT = Ctx.getCanonicalType(E->getType());
+ // Const stuff is obviously not modifiable.
+ if (CT.isConstQualified())
+ return Cl::CM_ConstQualified;
+
+ // Arrays are not modifiable, only their elements are.
+ if (CT->isArrayType())
+ return Cl::CM_ArrayType;
+ // Incomplete types are not modifiable.
+ if (CT->isIncompleteType())
+ return Cl::CM_IncompleteType;
+
+ // Records with any const fields (recursively) are not modifiable.
+ if (const RecordType *R = CT->getAs<RecordType>()) {
+ assert((E->getObjectKind() == OK_ObjCProperty ||
+ !Ctx.getLangOpts().CPlusPlus) &&
+ "C++ struct assignment should be resolved by the "
+ "copy assignment operator.");
+ if (R->hasConstFields())
+ return Cl::CM_ConstQualified;
+ }
+
+ return Cl::CM_Modifiable;
+}
+
+Expr::LValueClassification Expr::ClassifyLValue(ASTContext &Ctx) const {
+ Classification VC = Classify(Ctx);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: return LV_Valid;
+ case Cl::CL_XValue: return LV_InvalidExpression;
+ case Cl::CL_Function: return LV_NotObjectType;
+ case Cl::CL_Void: return LV_InvalidExpression;
+ case Cl::CL_AddressableVoid: return LV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return LV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return LV_ClassTemporary;
+ case Cl::CL_ObjCMessageRValue: return LV_InvalidMessageExpression;
+ case Cl::CL_PRValue: return LV_InvalidExpression;
+ }
+ llvm_unreachable("Unhandled kind");
+}
+
+Expr::isModifiableLvalueResult
+Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
+ SourceLocation dummy;
+ Classification VC = ClassifyModifiable(Ctx, Loc ? *Loc : dummy);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: break;
+ case Cl::CL_XValue: return MLV_InvalidExpression;
+ case Cl::CL_Function: return MLV_NotObjectType;
+ case Cl::CL_Void: return MLV_InvalidExpression;
+ case Cl::CL_AddressableVoid: return MLV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return MLV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return MLV_ClassTemporary;
+ case Cl::CL_ObjCMessageRValue: return MLV_InvalidMessageExpression;
+ case Cl::CL_PRValue:
+ return VC.getModifiable() == Cl::CM_LValueCast ?
+ MLV_LValueCast : MLV_InvalidExpression;
+ }
+ assert(VC.getKind() == Cl::CL_LValue && "Unhandled kind");
+ switch (VC.getModifiable()) {
+ case Cl::CM_Untested: llvm_unreachable("Did not test modifiability");
+ case Cl::CM_Modifiable: return MLV_Valid;
+ case Cl::CM_RValue: llvm_unreachable("CM_RValue and CL_LValue don't match");
+ case Cl::CM_Function: return MLV_NotObjectType;
+ case Cl::CM_LValueCast:
+ llvm_unreachable("CM_LValueCast and CL_LValue don't match");
+ case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty;
+ case Cl::CM_ConstQualified: return MLV_ConstQualified;
+ case Cl::CM_ArrayType: return MLV_ArrayType;
+ case Cl::CM_IncompleteType: return MLV_IncompleteType;
+ }
+ llvm_unreachable("Unhandled modifiable type");
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
new file mode 100644
index 0000000..01c9fe7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExprConstant.cpp
@@ -0,0 +1,6894 @@
+//===--- ExprConstant.cpp - Expression Constant Evaluator -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expr constant evaluator.
+//
+// Constant expression evaluation produces four main results:
+//
+// * A success/failure flag indicating whether constant folding was successful.
+// This is the 'bool' return value used by most of the code in this file. A
+// 'false' return value indicates that constant folding has failed, and any
+// appropriate diagnostic has already been produced.
+//
+// * An evaluated result, valid only if constant folding has not failed.
+//
+// * A flag indicating if evaluation encountered (unevaluated) side-effects.
+// These arise in cases such as (sideEffect(), 0) and (sideEffect() || 1),
+// where it is possible to determine the evaluated result regardless.
+//
+// * A set of notes indicating why the evaluation was not a constant expression
+// (under the C++11 rules only, at the moment), or, if folding failed too,
+// why the expression could not be folded.
+//
+// If we are checking for a potential constant expression, failure to constant
+// fold a potential constant sub-expression will be indicated by a 'false'
+// return value (the expression could not be folded) and no diagnostic (the
+// expression is not necessarily non-constant).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include <cstring>
+#include <functional>
+
+using namespace clang;
+using llvm::APSInt;
+using llvm::APFloat;
+
+static bool IsGlobalLValue(APValue::LValueBase B);
+
+namespace {
+ struct LValue;
+ struct CallStackFrame;
+ struct EvalInfo;
+
+ static QualType getType(APValue::LValueBase B) {
+ if (!B) return QualType();
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>())
+ return D->getType();
+ return B.get<const Expr*>()->getType();
+ }
+
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// field or base class.
+ static
+ APValue::BaseOrMemberType getAsBaseOrMember(APValue::LValuePathEntry E) {
+ APValue::BaseOrMemberType Value;
+ Value.setFromOpaqueValue(E.BaseOrMember);
+ return Value;
+ }
+
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// field declaration.
+ static const FieldDecl *getAsField(APValue::LValuePathEntry E) {
+ return dyn_cast<FieldDecl>(getAsBaseOrMember(E).getPointer());
+ }
+ /// Get an LValue path entry, which is known to not be an array index, as a
+ /// base class declaration.
+ static const CXXRecordDecl *getAsBaseClass(APValue::LValuePathEntry E) {
+ return dyn_cast<CXXRecordDecl>(getAsBaseOrMember(E).getPointer());
+ }
+ /// Determine whether this LValue path entry for a base class names a virtual
+ /// base class.
+ static bool isVirtualBaseClass(APValue::LValuePathEntry E) {
+ return getAsBaseOrMember(E).getInt();
+ }
+
+ /// Find the path length and type of the most-derived subobject in the given
+ /// path, and find the size of the containing array, if any.
+ static
+ unsigned findMostDerivedSubobject(ASTContext &Ctx, QualType Base,
+ ArrayRef<APValue::LValuePathEntry> Path,
+ uint64_t &ArraySize, QualType &Type) {
+ unsigned MostDerivedLength = 0;
+ Type = Base;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (Type->isArrayType()) {
+ const ConstantArrayType *CAT =
+ cast<ConstantArrayType>(Ctx.getAsArrayType(Type));
+ Type = CAT->getElementType();
+ ArraySize = CAT->getSize().getZExtValue();
+ MostDerivedLength = I + 1;
+ } else if (Type->isAnyComplexType()) {
+ const ComplexType *CT = Type->castAs<ComplexType>();
+ Type = CT->getElementType();
+ ArraySize = 2;
+ MostDerivedLength = I + 1;
+ } else if (const FieldDecl *FD = getAsField(Path[I])) {
+ Type = FD->getType();
+ ArraySize = 0;
+ MostDerivedLength = I + 1;
+ } else {
+ // Path[I] describes a base class.
+ ArraySize = 0;
+ }
+ }
+ return MostDerivedLength;
+ }
+
+ // The order of this enum is important for diagnostics.
+ enum CheckSubobjectKind {
+ CSK_Base, CSK_Derived, CSK_Field, CSK_ArrayToPointer, CSK_ArrayIndex,
+ CSK_This, CSK_Real, CSK_Imag
+ };
+
+ /// A path from a glvalue to a subobject of that glvalue.
+ struct SubobjectDesignator {
+ /// True if the subobject was named in a manner not supported by C++11. Such
+ /// lvalues can still be folded, but they are not core constant expressions
+ /// and we cannot perform lvalue-to-rvalue conversions on them.
+ bool Invalid : 1;
+
+ /// Is this a pointer one past the end of an object?
+ bool IsOnePastTheEnd : 1;
+
+ /// The length of the path to the most-derived object of which this is a
+ /// subobject.
+ unsigned MostDerivedPathLength : 30;
+
+ /// The size of the array of which the most-derived object is an element, or
+ /// 0 if the most-derived object is not an array element.
+ uint64_t MostDerivedArraySize;
+
+ /// The type of the most derived object referred to by this address.
+ QualType MostDerivedType;
+
+ typedef APValue::LValuePathEntry PathEntry;
+
+ /// The entries on the path from the glvalue to the designated subobject.
+ SmallVector<PathEntry, 8> Entries;
+
+ SubobjectDesignator() : Invalid(true) {}
+
+ explicit SubobjectDesignator(QualType T)
+ : Invalid(false), IsOnePastTheEnd(false), MostDerivedPathLength(0),
+ MostDerivedArraySize(0), MostDerivedType(T) {}
+
+ SubobjectDesignator(ASTContext &Ctx, const APValue &V)
+ : Invalid(!V.isLValue() || !V.hasLValuePath()), IsOnePastTheEnd(false),
+ MostDerivedPathLength(0), MostDerivedArraySize(0) {
+ if (!Invalid) {
+ IsOnePastTheEnd = V.isLValueOnePastTheEnd();
+ ArrayRef<PathEntry> VEntries = V.getLValuePath();
+ Entries.insert(Entries.end(), VEntries.begin(), VEntries.end());
+ if (V.getLValueBase())
+ MostDerivedPathLength =
+ findMostDerivedSubobject(Ctx, getType(V.getLValueBase()),
+ V.getLValuePath(), MostDerivedArraySize,
+ MostDerivedType);
+ }
+ }
+
+ void setInvalid() {
+ Invalid = true;
+ Entries.clear();
+ }
+
+ /// Determine whether this is a one-past-the-end pointer.
+ bool isOnePastTheEnd() const {
+ if (IsOnePastTheEnd)
+ return true;
+ if (MostDerivedArraySize &&
+ Entries[MostDerivedPathLength - 1].ArrayIndex == MostDerivedArraySize)
+ return true;
+ return false;
+ }
+
+ /// Check that this refers to a valid subobject.
+ bool isValidSubobject() const {
+ if (Invalid)
+ return false;
+ return !isOnePastTheEnd();
+ }
+ /// Check that this refers to a valid subobject, and if not, produce a
+ /// relevant diagnostic and set the designator as invalid.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK);
+
+ /// Update this designator to refer to the first element within this array.
+ void addArrayUnchecked(const ConstantArrayType *CAT) {
+ PathEntry Entry;
+ Entry.ArrayIndex = 0;
+ Entries.push_back(Entry);
+
+ // This is a most-derived object.
+ MostDerivedType = CAT->getElementType();
+ MostDerivedArraySize = CAT->getSize().getZExtValue();
+ MostDerivedPathLength = Entries.size();
+ }
+ /// Update this designator to refer to the given base or member of this
+ /// object.
+ void addDeclUnchecked(const Decl *D, bool Virtual = false) {
+ PathEntry Entry;
+ APValue::BaseOrMemberType Value(D, Virtual);
+ Entry.BaseOrMember = Value.getOpaqueValue();
+ Entries.push_back(Entry);
+
+ // If this isn't a base class, it's a new most-derived object.
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ MostDerivedType = FD->getType();
+ MostDerivedArraySize = 0;
+ MostDerivedPathLength = Entries.size();
+ }
+ }
+ /// Update this designator to refer to the given complex component.
+ void addComplexUnchecked(QualType EltTy, bool Imag) {
+ PathEntry Entry;
+ Entry.ArrayIndex = Imag;
+ Entries.push_back(Entry);
+
+ // This is technically a most-derived object, though in practice this
+ // is unlikely to matter.
+ MostDerivedType = EltTy;
+ MostDerivedArraySize = 2;
+ MostDerivedPathLength = Entries.size();
+ }
+ void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N);
+ /// Add N to the address of this subobject.
+ void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
+ if (Invalid) return;
+ if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize) {
+ Entries.back().ArrayIndex += N;
+ if (Entries.back().ArrayIndex > MostDerivedArraySize) {
+ diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
+ setInvalid();
+ }
+ return;
+ }
+ // [expr.add]p4: For the purposes of these operators, a pointer to a
+ // nonarray object behaves the same as a pointer to the first element of
+ // an array of length one with the type of the object as its element type.
+ if (IsOnePastTheEnd && N == (uint64_t)-1)
+ IsOnePastTheEnd = false;
+ else if (!IsOnePastTheEnd && N == 1)
+ IsOnePastTheEnd = true;
+ else if (N != 0) {
+ diagnosePointerArithmetic(Info, E, uint64_t(IsOnePastTheEnd) + N);
+ setInvalid();
+ }
+ }
+ };
+
+ /// A stack frame in the constexpr call stack.
+ struct CallStackFrame {
+ EvalInfo &Info;
+
+ /// Parent - The caller of this stack frame.
+ CallStackFrame *Caller;
+
+ /// CallLoc - The location of the call expression for this call.
+ SourceLocation CallLoc;
+
+ /// Callee - The function which was called.
+ const FunctionDecl *Callee;
+
+ /// Index - The call index of this call.
+ unsigned Index;
+
+ /// This - The binding for the this pointer in this call, if any.
+ const LValue *This;
+
+ /// ParmBindings - Parameter bindings for this function call, indexed by
+ /// parameters' function scope indices.
+ const APValue *Arguments;
+
+ typedef llvm::DenseMap<const Expr*, APValue> MapTy;
+ typedef MapTy::const_iterator temp_iterator;
+ /// Temporaries - Temporary lvalues materialized within this stack frame.
+ MapTy Temporaries;
+
+ CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ const APValue *Arguments);
+ ~CallStackFrame();
+ };
+
+ /// A partial diagnostic which we might know in advance that we are not going
+ /// to emit.
+ class OptionalDiagnostic {
+ PartialDiagnostic *Diag;
+
+ public:
+ explicit OptionalDiagnostic(PartialDiagnostic *Diag = 0) : Diag(Diag) {}
+
+ template<typename T>
+ OptionalDiagnostic &operator<<(const T &v) {
+ if (Diag)
+ *Diag << v;
+ return *this;
+ }
+
+ OptionalDiagnostic &operator<<(const APSInt &I) {
+ if (Diag) {
+ llvm::SmallVector<char, 32> Buffer;
+ I.toString(Buffer);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
+
+ OptionalDiagnostic &operator<<(const APFloat &F) {
+ if (Diag) {
+ llvm::SmallVector<char, 32> Buffer;
+ F.toString(Buffer);
+ *Diag << StringRef(Buffer.data(), Buffer.size());
+ }
+ return *this;
+ }
+ };
+
+ /// EvalInfo - This is a private struct used by the evaluator to capture
+ /// information about a subexpression as it is folded. It retains information
+ /// about the AST context, but also maintains information about the folded
+ /// expression.
+ ///
+ /// If an expression could be evaluated, it is still possible it is not a C
+ /// "integer constant expression" or constant expression. If not, this struct
+ /// captures information about how and why not.
+ ///
+ /// One bit of information passed *into* the request for constant folding
+ /// indicates whether the subexpression is "evaluated" or not according to C
+ /// rules. For example, the RHS of (0 && foo()) is not evaluated. We can
+ /// evaluate the expression regardless of what the RHS is, but C only allows
+ /// certain things in certain situations.
+ struct EvalInfo {
+ ASTContext &Ctx;
+
+ /// EvalStatus - Contains information about the evaluation.
+ Expr::EvalStatus &EvalStatus;
+
+ /// CurrentCall - The top of the constexpr call stack.
+ CallStackFrame *CurrentCall;
+
+ /// CallStackDepth - The number of calls in the call stack right now.
+ unsigned CallStackDepth;
+
+ /// NextCallIndex - The next call index to assign.
+ unsigned NextCallIndex;
+
+ typedef llvm::DenseMap<const OpaqueValueExpr*, APValue> MapTy;
+ /// OpaqueValues - Values used as the common expression in a
+ /// BinaryConditionalOperator.
+ MapTy OpaqueValues;
+
+ /// BottomFrame - The frame in which evaluation started. This must be
+ /// initialized after CurrentCall and CallStackDepth.
+ CallStackFrame BottomFrame;
+
+ /// EvaluatingDecl - This is the declaration whose initializer is being
+ /// evaluated, if any.
+ const VarDecl *EvaluatingDecl;
+
+ /// EvaluatingDeclValue - This is the value being constructed for the
+ /// declaration whose initializer is being evaluated, if any.
+ APValue *EvaluatingDeclValue;
+
+ /// HasActiveDiagnostic - Was the previous diagnostic stored? If so, further
+ /// notes attached to it will also be stored, otherwise they will not be.
+ bool HasActiveDiagnostic;
+
+ /// CheckingPotentialConstantExpression - Are we checking whether the
+ /// expression is a potential constant expression? If so, some diagnostics
+ /// are suppressed.
+ bool CheckingPotentialConstantExpression;
+
+ EvalInfo(const ASTContext &C, Expr::EvalStatus &S)
+ : Ctx(const_cast<ASTContext&>(C)), EvalStatus(S), CurrentCall(0),
+ CallStackDepth(0), NextCallIndex(1),
+ BottomFrame(*this, SourceLocation(), 0, 0, 0),
+ EvaluatingDecl(0), EvaluatingDeclValue(0), HasActiveDiagnostic(false),
+ CheckingPotentialConstantExpression(false) {}
+
+ const APValue *getOpaqueValue(const OpaqueValueExpr *e) const {
+ MapTy::const_iterator i = OpaqueValues.find(e);
+ if (i == OpaqueValues.end()) return 0;
+ return &i->second;
+ }
+
+ void setEvaluatingDecl(const VarDecl *VD, APValue &Value) {
+ EvaluatingDecl = VD;
+ EvaluatingDeclValue = &Value;
+ }
+
+ const LangOptions &getLangOpts() const { return Ctx.getLangOpts(); }
+
+ bool CheckCallLimit(SourceLocation Loc) {
+ // Don't perform any constexpr calls (other than the call we're checking)
+ // when checking a potential constant expression.
+ if (CheckingPotentialConstantExpression && CallStackDepth > 1)
+ return false;
+ if (NextCallIndex == 0) {
+ // NextCallIndex has wrapped around.
+ Diag(Loc, diag::note_constexpr_call_limit_exceeded);
+ return false;
+ }
+ if (CallStackDepth <= getLangOpts().ConstexprCallDepth)
+ return true;
+ Diag(Loc, diag::note_constexpr_depth_limit_exceeded)
+ << getLangOpts().ConstexprCallDepth;
+ return false;
+ }
+
+ CallStackFrame *getCallFrame(unsigned CallIndex) {
+ assert(CallIndex && "no call index in getCallFrame");
+ // We will eventually hit BottomFrame, which has Index 1, so Frame can't
+ // be null in this loop.
+ CallStackFrame *Frame = CurrentCall;
+ while (Frame->Index > CallIndex)
+ Frame = Frame->Caller;
+ return (Frame->Index == CallIndex) ? Frame : 0;
+ }
+
+ private:
+ /// Add a diagnostic to the diagnostics list.
+ PartialDiagnostic &addDiag(SourceLocation Loc, diag::kind DiagId) {
+ PartialDiagnostic PD(DiagId, Ctx.getDiagAllocator());
+ EvalStatus.Diag->push_back(std::make_pair(Loc, PD));
+ return EvalStatus.Diag->back().second;
+ }
+
+ /// Add notes containing a call stack to the current point of evaluation.
+ void addCallStack(unsigned Limit);
+
+ public:
+ /// Diagnose that the evaluation cannot be folded.
+ OptionalDiagnostic Diag(SourceLocation Loc, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ // If we have a prior diagnostic, it will be noting that the expression
+ // isn't a constant expression. This diagnostic is more important.
+ // FIXME: We might want to show both diagnostics to the user.
+ if (EvalStatus.Diag) {
+ unsigned CallStackNotes = CallStackDepth - 1;
+ unsigned Limit = Ctx.getDiagnostics().getConstexprBacktraceLimit();
+ if (Limit)
+ CallStackNotes = std::min(CallStackNotes, Limit + 1);
+ if (CheckingPotentialConstantExpression)
+ CallStackNotes = 0;
+
+ HasActiveDiagnostic = true;
+ EvalStatus.Diag->clear();
+ EvalStatus.Diag->reserve(1 + ExtraNotes + CallStackNotes);
+ addDiag(Loc, DiagId);
+ if (!CheckingPotentialConstantExpression)
+ addCallStack(Limit);
+ return OptionalDiagnostic(&(*EvalStatus.Diag)[0].second);
+ }
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+
+ OptionalDiagnostic Diag(const Expr *E, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ if (EvalStatus.Diag)
+ return Diag(E->getExprLoc(), DiagId, ExtraNotes);
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+
+ /// Diagnose that the evaluation does not produce a C++11 core constant
+ /// expression.
+ template<typename LocArg>
+ OptionalDiagnostic CCEDiag(LocArg Loc, diag::kind DiagId
+ = diag::note_invalid_subexpr_in_const_expr,
+ unsigned ExtraNotes = 0) {
+ // Don't override a previous diagnostic.
+ if (!EvalStatus.Diag || !EvalStatus.Diag->empty()) {
+ HasActiveDiagnostic = false;
+ return OptionalDiagnostic();
+ }
+ return Diag(Loc, DiagId, ExtraNotes);
+ }
+
+ /// Add a note to a prior diagnostic.
+ OptionalDiagnostic Note(SourceLocation Loc, diag::kind DiagId) {
+ if (!HasActiveDiagnostic)
+ return OptionalDiagnostic();
+ return OptionalDiagnostic(&addDiag(Loc, DiagId));
+ }
+
+ /// Add a stack of notes to a prior diagnostic.
+ void addNotes(ArrayRef<PartialDiagnosticAt> Diags) {
+ if (HasActiveDiagnostic) {
+ EvalStatus.Diag->insert(EvalStatus.Diag->end(),
+ Diags.begin(), Diags.end());
+ }
+ }
+
+ /// Should we continue evaluation as much as possible after encountering a
+ /// construct which can't be folded?
+ bool keepEvaluatingAfterFailure() {
+ return CheckingPotentialConstantExpression &&
+ EvalStatus.Diag && EvalStatus.Diag->empty();
+ }
+ };
+
+ /// Object used to treat all foldable expressions as constant expressions.
+ struct FoldConstant {
+ bool Enabled;
+
+ explicit FoldConstant(EvalInfo &Info)
+ : Enabled(Info.EvalStatus.Diag && Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects) {
+ }
+ // Treat the value we've computed since this object was created as constant.
+ void Fold(EvalInfo &Info) {
+ if (Enabled && !Info.EvalStatus.Diag->empty() &&
+ !Info.EvalStatus.HasSideEffects)
+ Info.EvalStatus.Diag->clear();
+ }
+ };
+
+ /// RAII object used to suppress diagnostics and side-effects from a
+ /// speculative evaluation.
+ class SpeculativeEvaluationRAII {
+ EvalInfo &Info;
+ Expr::EvalStatus Old;
+
+ public:
+ SpeculativeEvaluationRAII(EvalInfo &Info,
+ llvm::SmallVectorImpl<PartialDiagnosticAt>
+ *NewDiag = 0)
+ : Info(Info), Old(Info.EvalStatus) {
+ Info.EvalStatus.Diag = NewDiag;
+ }
+ ~SpeculativeEvaluationRAII() {
+ Info.EvalStatus = Old;
+ }
+ };
+}
+
+bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ if (Invalid)
+ return false;
+ if (isOnePastTheEnd()) {
+ Info.CCEDiag(E, diag::note_constexpr_past_end_subobject)
+ << CSK;
+ setInvalid();
+ return false;
+ }
+ return true;
+}
+
+void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
+ const Expr *E, uint64_t N) {
+ if (MostDerivedPathLength == Entries.size() && MostDerivedArraySize)
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << static_cast<int>(N) << /*array*/ 0
+ << static_cast<unsigned>(MostDerivedArraySize);
+ else
+ Info.CCEDiag(E, diag::note_constexpr_array_index)
+ << static_cast<int>(N) << /*non-array*/ 1;
+ setInvalid();
+}
+
+CallStackFrame::CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ const APValue *Arguments)
+ : Info(Info), Caller(Info.CurrentCall), CallLoc(CallLoc), Callee(Callee),
+ Index(Info.NextCallIndex++), This(This), Arguments(Arguments) {
+ Info.CurrentCall = this;
+ ++Info.CallStackDepth;
+}
+
+CallStackFrame::~CallStackFrame() {
+ assert(Info.CurrentCall == this && "calls retired out of order");
+ --Info.CallStackDepth;
+ Info.CurrentCall = Caller;
+}
+
+/// Produce a string describing the given constexpr call.
+static void describeCall(CallStackFrame *Frame, llvm::raw_ostream &Out) {
+ unsigned ArgIndex = 0;
+ bool IsMemberCall = isa<CXXMethodDecl>(Frame->Callee) &&
+ !isa<CXXConstructorDecl>(Frame->Callee) &&
+ cast<CXXMethodDecl>(Frame->Callee)->isInstance();
+
+ if (!IsMemberCall)
+ Out << *Frame->Callee << '(';
+
+ for (FunctionDecl::param_const_iterator I = Frame->Callee->param_begin(),
+ E = Frame->Callee->param_end(); I != E; ++I, ++ArgIndex) {
+ if (ArgIndex > (unsigned)IsMemberCall)
+ Out << ", ";
+
+ const ParmVarDecl *Param = *I;
+ const APValue &Arg = Frame->Arguments[ArgIndex];
+ Arg.printPretty(Out, Frame->Info.Ctx, Param->getType());
+
+ if (ArgIndex == 0 && IsMemberCall)
+ Out << "->" << *Frame->Callee << '(';
+ }
+
+ Out << ')';
+}
+
+void EvalInfo::addCallStack(unsigned Limit) {
+ // Determine which calls to skip, if any.
+ unsigned ActiveCalls = CallStackDepth - 1;
+ unsigned SkipStart = ActiveCalls, SkipEnd = SkipStart;
+ if (Limit && Limit < ActiveCalls) {
+ SkipStart = Limit / 2 + Limit % 2;
+ SkipEnd = ActiveCalls - Limit / 2;
+ }
+
+ // Walk the call stack and add the diagnostics.
+ unsigned CallIdx = 0;
+ for (CallStackFrame *Frame = CurrentCall; Frame != &BottomFrame;
+ Frame = Frame->Caller, ++CallIdx) {
+ // Skip this call?
+ if (CallIdx >= SkipStart && CallIdx < SkipEnd) {
+ if (CallIdx == SkipStart) {
+ // Note that we're skipping calls.
+ addDiag(Frame->CallLoc, diag::note_constexpr_calls_suppressed)
+ << unsigned(ActiveCalls - Limit);
+ }
+ continue;
+ }
+
+ llvm::SmallVector<char, 128> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ describeCall(Frame, Out);
+ addDiag(Frame->CallLoc, diag::note_constexpr_call_here) << Out.str();
+ }
+}
+
+namespace {
+ struct ComplexValue {
+ private:
+ bool IsInt;
+
+ public:
+ APSInt IntReal, IntImag;
+ APFloat FloatReal, FloatImag;
+
+ ComplexValue() : FloatReal(APFloat::Bogus), FloatImag(APFloat::Bogus) {}
+
+ void makeComplexFloat() { IsInt = false; }
+ bool isComplexFloat() const { return !IsInt; }
+ APFloat &getComplexFloatReal() { return FloatReal; }
+ APFloat &getComplexFloatImag() { return FloatImag; }
+
+ void makeComplexInt() { IsInt = true; }
+ bool isComplexInt() const { return IsInt; }
+ APSInt &getComplexIntReal() { return IntReal; }
+ APSInt &getComplexIntImag() { return IntImag; }
+
+ void moveInto(APValue &v) const {
+ if (isComplexFloat())
+ v = APValue(FloatReal, FloatImag);
+ else
+ v = APValue(IntReal, IntImag);
+ }
+ void setFrom(const APValue &v) {
+ assert(v.isComplexFloat() || v.isComplexInt());
+ if (v.isComplexFloat()) {
+ makeComplexFloat();
+ FloatReal = v.getComplexFloatReal();
+ FloatImag = v.getComplexFloatImag();
+ } else {
+ makeComplexInt();
+ IntReal = v.getComplexIntReal();
+ IntImag = v.getComplexIntImag();
+ }
+ }
+ };
+
+ struct LValue {
+ APValue::LValueBase Base;
+ CharUnits Offset;
+ unsigned CallIndex;
+ SubobjectDesignator Designator;
+
+ const APValue::LValueBase getLValueBase() const { return Base; }
+ CharUnits &getLValueOffset() { return Offset; }
+ const CharUnits &getLValueOffset() const { return Offset; }
+ unsigned getLValueCallIndex() const { return CallIndex; }
+ SubobjectDesignator &getLValueDesignator() { return Designator; }
+ const SubobjectDesignator &getLValueDesignator() const { return Designator;}
+
+ void moveInto(APValue &V) const {
+ if (Designator.Invalid)
+ V = APValue(Base, Offset, APValue::NoLValuePath(), CallIndex);
+ else
+ V = APValue(Base, Offset, Designator.Entries,
+ Designator.IsOnePastTheEnd, CallIndex);
+ }
+ void setFrom(ASTContext &Ctx, const APValue &V) {
+ assert(V.isLValue());
+ Base = V.getLValueBase();
+ Offset = V.getLValueOffset();
+ CallIndex = V.getLValueCallIndex();
+ Designator = SubobjectDesignator(Ctx, V);
+ }
+
+ void set(APValue::LValueBase B, unsigned I = 0) {
+ Base = B;
+ Offset = CharUnits::Zero();
+ CallIndex = I;
+ Designator = SubobjectDesignator(getType(B));
+ }
+
+ // Check that this LValue is not based on a null pointer. If it is, produce
+ // a diagnostic and mark the designator as invalid.
+ bool checkNullPointer(EvalInfo &Info, const Expr *E,
+ CheckSubobjectKind CSK) {
+ if (Designator.Invalid)
+ return false;
+ if (!Base) {
+ Info.CCEDiag(E, diag::note_constexpr_null_subobject)
+ << CSK;
+ Designator.setInvalid();
+ return false;
+ }
+ return true;
+ }
+
+ // Check this LValue refers to an object. If not, set the designator to be
+ // invalid and emit a diagnostic.
+ bool checkSubobject(EvalInfo &Info, const Expr *E, CheckSubobjectKind CSK) {
+ // Outside C++11, do not build a designator referring to a subobject of
+ // any object: we won't use such a designator for anything.
+ if (!Info.getLangOpts().CPlusPlus0x)
+ Designator.setInvalid();
+ return checkNullPointer(Info, E, CSK) &&
+ Designator.checkSubobject(Info, E, CSK);
+ }
+
+ void addDecl(EvalInfo &Info, const Expr *E,
+ const Decl *D, bool Virtual = false) {
+ if (checkSubobject(Info, E, isa<FieldDecl>(D) ? CSK_Field : CSK_Base))
+ Designator.addDeclUnchecked(D, Virtual);
+ }
+ void addArray(EvalInfo &Info, const Expr *E, const ConstantArrayType *CAT) {
+ if (checkSubobject(Info, E, CSK_ArrayToPointer))
+ Designator.addArrayUnchecked(CAT);
+ }
+ void addComplex(EvalInfo &Info, const Expr *E, QualType EltTy, bool Imag) {
+ if (checkSubobject(Info, E, Imag ? CSK_Imag : CSK_Real))
+ Designator.addComplexUnchecked(EltTy, Imag);
+ }
+ void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
+ if (checkNullPointer(Info, E, CSK_ArrayIndex))
+ Designator.adjustIndex(Info, E, N);
+ }
+ };
+
+ struct MemberPtr {
+ MemberPtr() {}
+ explicit MemberPtr(const ValueDecl *Decl) :
+ DeclAndIsDerivedMember(Decl, false), Path() {}
+
+ /// The member or (direct or indirect) field referred to by this member
+ /// pointer, or 0 if this is a null member pointer.
+ const ValueDecl *getDecl() const {
+ return DeclAndIsDerivedMember.getPointer();
+ }
+ /// Is this actually a member of some type derived from the relevant class?
+ bool isDerivedMember() const {
+ return DeclAndIsDerivedMember.getInt();
+ }
+ /// Get the class which the declaration actually lives in.
+ const CXXRecordDecl *getContainingRecord() const {
+ return cast<CXXRecordDecl>(
+ DeclAndIsDerivedMember.getPointer()->getDeclContext());
+ }
+
+ void moveInto(APValue &V) const {
+ V = APValue(getDecl(), isDerivedMember(), Path);
+ }
+ void setFrom(const APValue &V) {
+ assert(V.isMemberPointer());
+ DeclAndIsDerivedMember.setPointer(V.getMemberPointerDecl());
+ DeclAndIsDerivedMember.setInt(V.isMemberPointerToDerivedMember());
+ Path.clear();
+ ArrayRef<const CXXRecordDecl*> P = V.getMemberPointerPath();
+ Path.insert(Path.end(), P.begin(), P.end());
+ }
+
+ /// DeclAndIsDerivedMember - The member declaration, and a flag indicating
+ /// whether the member is a member of some class derived from the class type
+ /// of the member pointer.
+ llvm::PointerIntPair<const ValueDecl*, 1, bool> DeclAndIsDerivedMember;
+ /// Path - The path of base/derived classes from the member declaration's
+ /// class (exclusive) to the class type of the member pointer (inclusive).
+ SmallVector<const CXXRecordDecl*, 4> Path;
+
+ /// Perform a cast towards the class of the Decl (either up or down the
+ /// hierarchy).
+ bool castBack(const CXXRecordDecl *Class) {
+ assert(!Path.empty());
+ const CXXRecordDecl *Expected;
+ if (Path.size() >= 2)
+ Expected = Path[Path.size() - 2];
+ else
+ Expected = getContainingRecord();
+ if (Expected->getCanonicalDecl() != Class->getCanonicalDecl()) {
+ // C++11 [expr.static.cast]p12: In a conversion from (D::*) to (B::*),
+ // if B does not contain the original member and is not a base or
+ // derived class of the class containing the original member, the result
+ // of the cast is undefined.
+ // C++11 [conv.mem]p2 does not cover this case for a cast from (B::*) to
+ // (D::*). We consider that to be a language defect.
+ return false;
+ }
+ Path.pop_back();
+ return true;
+ }
+ /// Perform a base-to-derived member pointer cast.
+ bool castToDerived(const CXXRecordDecl *Derived) {
+ if (!getDecl())
+ return true;
+ if (!isDerivedMember()) {
+ Path.push_back(Derived);
+ return true;
+ }
+ if (!castBack(Derived))
+ return false;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(false);
+ return true;
+ }
+ /// Perform a derived-to-base member pointer cast.
+ bool castToBase(const CXXRecordDecl *Base) {
+ if (!getDecl())
+ return true;
+ if (Path.empty())
+ DeclAndIsDerivedMember.setInt(true);
+ if (isDerivedMember()) {
+ Path.push_back(Base);
+ return true;
+ }
+ return castBack(Base);
+ }
+ };
+
+ /// Compare two member pointers, which are assumed to be of the same type.
+ static bool operator==(const MemberPtr &LHS, const MemberPtr &RHS) {
+ if (!LHS.getDecl() || !RHS.getDecl())
+ return !LHS.getDecl() && !RHS.getDecl();
+ if (LHS.getDecl()->getCanonicalDecl() != RHS.getDecl()->getCanonicalDecl())
+ return false;
+ return LHS.Path == RHS.Path;
+ }
+
+ /// Kinds of constant expression checking, for diagnostics.
+ enum CheckConstantExpressionKind {
+ CCEK_Constant, ///< A normal constant.
+ CCEK_ReturnValue, ///< A constexpr function return value.
+ CCEK_MemberInit ///< A constexpr constructor mem-initializer.
+ };
+}
+
+static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E);
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info,
+ const LValue &This, const Expr *E,
+ CheckConstantExpressionKind CCEK = CCEK_Constant,
+ bool AllowNonLiteralTypes = false);
+static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
+ EvalInfo &Info);
+static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
+ EvalInfo &Info);
+static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
+static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
+
+//===----------------------------------------------------------------------===//
+// Misc utilities
+//===----------------------------------------------------------------------===//
+
+/// Should this call expression be treated as a string literal?
+static bool IsStringLiteralCall(const CallExpr *E) {
+ unsigned Builtin = E->isBuiltinCall();
+ return (Builtin == Builtin::BI__builtin___CFStringMakeConstantString ||
+ Builtin == Builtin::BI__builtin___NSStringMakeConstantString);
+}
+
+static bool IsGlobalLValue(APValue::LValueBase B) {
+ // C++11 [expr.const]p3 An address constant expression is a prvalue core
+ // constant expression of pointer type that evaluates to...
+
+ // ... a null pointer value, or a prvalue core constant expression of type
+ // std::nullptr_t.
+ if (!B) return true;
+
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ // ... the address of an object with static storage duration,
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->hasGlobalStorage();
+ // ... the address of a function,
+ return isa<FunctionDecl>(D);
+ }
+
+ const Expr *E = B.get<const Expr*>();
+ switch (E->getStmtClass()) {
+ default:
+ return false;
+ case Expr::CompoundLiteralExprClass: {
+ const CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ return CLE->isFileScope() && CLE->isLValue();
+ }
+ // A string literal has static storage duration.
+ case Expr::StringLiteralClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::CXXTypeidExprClass:
+ return true;
+ case Expr::CallExprClass:
+ return IsStringLiteralCall(cast<CallExpr>(E));
+ // For GCC compatibility, &&label has static storage duration.
+ case Expr::AddrLabelExprClass:
+ return true;
+ // A Block literal expression may be used as the initialization value for
+ // Block variables at global or local static scope.
+ case Expr::BlockExprClass:
+ return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
+ case Expr::ImplicitValueInitExprClass:
+ // FIXME:
+ // We can never form an lvalue with an implicit value initialization as its
+ // base through expression evaluation, so these only appear in one case: the
+ // implicit variable declaration we invent when checking whether a constexpr
+ // constructor can produce a constant expression. We must assume that such
+ // an expression might be a global lvalue.
+ return true;
+ }
+}
+
+static void NoteLValueLocation(EvalInfo &Info, APValue::LValueBase Base) {
+ assert(Base && "no location for a null lvalue");
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ if (VD)
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ else
+ Info.Note(Base.dyn_cast<const Expr*>()->getExprLoc(),
+ diag::note_constexpr_temporary_here);
+}
+
+/// Check that this reference or pointer core constant expression is a valid
+/// value for an address or reference constant expression. Return true if we
+/// can fold this expression, whether or not it's a constant expression.
+static bool CheckLValueConstantExpression(EvalInfo &Info, SourceLocation Loc,
+ QualType Type, const LValue &LVal) {
+ bool IsReferenceType = Type->isReferenceType();
+
+ APValue::LValueBase Base = LVal.getLValueBase();
+ const SubobjectDesignator &Designator = LVal.getLValueDesignator();
+
+ // Check that the object is a global. Note that the fake 'this' object we
+ // manufacture when checking potential constant expressions is conservatively
+ // assumed to be global here.
+ if (!IsGlobalLValue(Base)) {
+ if (Info.getLangOpts().CPlusPlus0x) {
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ Info.Diag(Loc, diag::note_constexpr_non_global, 1)
+ << IsReferenceType << !Designator.Entries.empty()
+ << !!VD << VD;
+ NoteLValueLocation(Info, Base);
+ } else {
+ Info.Diag(Loc);
+ }
+ // Don't allow references to temporaries to escape.
+ return false;
+ }
+ assert((Info.CheckingPotentialConstantExpression ||
+ LVal.getLValueCallIndex() == 0) &&
+ "have call index for global lvalue");
+
+ // Allow address constant expressions to be past-the-end pointers. This is
+ // an extension: the standard requires them to point to an object.
+ if (!IsReferenceType)
+ return true;
+
+ // A reference constant expression must refer to an object.
+ if (!Base) {
+ // FIXME: diagnostic
+ Info.CCEDiag(Loc);
+ return true;
+ }
+
+ // Does this refer one past the end of some object?
+ if (Designator.isOnePastTheEnd()) {
+ const ValueDecl *VD = Base.dyn_cast<const ValueDecl*>();
+ Info.Diag(Loc, diag::note_constexpr_past_end, 1)
+ << !Designator.Entries.empty() << !!VD << VD;
+ NoteLValueLocation(Info, Base);
+ }
+
+ return true;
+}
+
+/// Check that this core constant expression is of literal type, and if not,
+/// produce an appropriate diagnostic.
+static bool CheckLiteralType(EvalInfo &Info, const Expr *E) {
+ if (!E->isRValue() || E->getType()->isLiteralType())
+ return true;
+
+ // Prvalue constant expressions must be of literal types.
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.Diag(E, diag::note_constexpr_nonliteral)
+ << E->getType();
+ else
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+
+/// Check that this core constant expression value is a valid value for a
+/// constant expression. If not, report an appropriate diagnostic. Does not
+/// check that the expression is of literal type.
+static bool CheckConstantExpression(EvalInfo &Info, SourceLocation DiagLoc,
+ QualType Type, const APValue &Value) {
+ // Core issue 1454: For a literal constant expression of array or class type,
+ // each subobject of its value shall have been initialized by a constant
+ // expression.
+ if (Value.isArray()) {
+ QualType EltTy = Type->castAsArrayTypeUnsafe()->getElementType();
+ for (unsigned I = 0, N = Value.getArrayInitializedElts(); I != N; ++I) {
+ if (!CheckConstantExpression(Info, DiagLoc, EltTy,
+ Value.getArrayInitializedElt(I)))
+ return false;
+ }
+ if (!Value.hasArrayFiller())
+ return true;
+ return CheckConstantExpression(Info, DiagLoc, EltTy,
+ Value.getArrayFiller());
+ }
+ if (Value.isUnion() && Value.getUnionField()) {
+ return CheckConstantExpression(Info, DiagLoc,
+ Value.getUnionField()->getType(),
+ Value.getUnionValue());
+ }
+ if (Value.isStruct()) {
+ RecordDecl *RD = Type->castAs<RecordType>()->getDecl();
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ unsigned BaseIndex = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
+ End = CD->bases_end(); I != End; ++I, ++BaseIndex) {
+ if (!CheckConstantExpression(Info, DiagLoc, I->getType(),
+ Value.getStructBase(BaseIndex)))
+ return false;
+ }
+ }
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ if (!CheckConstantExpression(Info, DiagLoc, (*I)->getType(),
+ Value.getStructField((*I)->getFieldIndex())))
+ return false;
+ }
+ }
+
+ if (Value.isLValue()) {
+ LValue LVal;
+ LVal.setFrom(Info.Ctx, Value);
+ return CheckLValueConstantExpression(Info, DiagLoc, Type, LVal);
+ }
+
+ // Everything else is fine.
+ return true;
+}
+
+const ValueDecl *GetLValueBaseDecl(const LValue &LVal) {
+ return LVal.Base.dyn_cast<const ValueDecl*>();
+}
+
+static bool IsLiteralLValue(const LValue &Value) {
+ return Value.Base.dyn_cast<const Expr*>() && !Value.CallIndex;
+}
+
+static bool IsWeakLValue(const LValue &Value) {
+ const ValueDecl *Decl = GetLValueBaseDecl(Value);
+ return Decl && Decl->isWeak();
+}
+
+static bool EvalPointerValueAsBool(const APValue &Value, bool &Result) {
+ // A null base expression indicates a null pointer. These are always
+ // evaluatable, and they are false unless the offset is zero.
+ if (!Value.getLValueBase()) {
+ Result = !Value.getLValueOffset().isZero();
+ return true;
+ }
+
+ // We have a non-null base. These are generally known to be true, but if it's
+ // a weak declaration it can be null at runtime.
+ Result = true;
+ const ValueDecl *Decl = Value.getLValueBase().dyn_cast<const ValueDecl*>();
+ return !Decl || !Decl->isWeak();
+}
+
+static bool HandleConversionToBool(const APValue &Val, bool &Result) {
+ switch (Val.getKind()) {
+ case APValue::Uninitialized:
+ return false;
+ case APValue::Int:
+ Result = Val.getInt().getBoolValue();
+ return true;
+ case APValue::Float:
+ Result = !Val.getFloat().isZero();
+ return true;
+ case APValue::ComplexInt:
+ Result = Val.getComplexIntReal().getBoolValue() ||
+ Val.getComplexIntImag().getBoolValue();
+ return true;
+ case APValue::ComplexFloat:
+ Result = !Val.getComplexFloatReal().isZero() ||
+ !Val.getComplexFloatImag().isZero();
+ return true;
+ case APValue::LValue:
+ return EvalPointerValueAsBool(Val, Result);
+ case APValue::MemberPointer:
+ Result = Val.getMemberPointerDecl();
+ return true;
+ case APValue::Vector:
+ case APValue::Array:
+ case APValue::Struct:
+ case APValue::Union:
+ case APValue::AddrLabelDiff:
+ return false;
+ }
+
+ llvm_unreachable("unknown APValue kind");
+}
+
+static bool EvaluateAsBooleanCondition(const Expr *E, bool &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && "missing lvalue-to-rvalue conv in bool condition");
+ APValue Val;
+ if (!Evaluate(Val, Info, E))
+ return false;
+ return HandleConversionToBool(Val, Result);
+}
+
+template<typename T>
+static bool HandleOverflow(EvalInfo &Info, const Expr *E,
+ const T &SrcValue, QualType DestType) {
+ Info.Diag(E, diag::note_constexpr_overflow)
+ << SrcValue << DestType;
+ return false;
+}
+
+static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, const APFloat &Value,
+ QualType DestType, APSInt &Result) {
+ unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
+ // Determine whether we are converting to unsigned or signed.
+ bool DestSigned = DestType->isSignedIntegerOrEnumerationType();
+
+ Result = APSInt(DestWidth, !DestSigned);
+ bool ignored;
+ if (Value.convertToInteger(Result, llvm::APFloat::rmTowardZero, &ignored)
+ & APFloat::opInvalidOp)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, QualType DestType,
+ APFloat &Result) {
+ APFloat Value = Result;
+ bool ignored;
+ if (Result.convert(Info.Ctx.getFloatTypeSemantics(DestType),
+ APFloat::rmNearestTiesToEven, &ignored)
+ & APFloat::opOverflow)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static APSInt HandleIntToIntCast(EvalInfo &Info, const Expr *E,
+ QualType DestType, QualType SrcType,
+ APSInt &Value) {
+ unsigned DestWidth = Info.Ctx.getIntWidth(DestType);
+ APSInt Result = Value;
+ // Figure out if this is a truncate, extend or noop cast.
+ // If the input is signed, do a sign extend, noop, or truncate.
+ Result = Result.extOrTrunc(DestWidth);
+ Result.setIsUnsigned(DestType->isUnsignedIntegerOrEnumerationType());
+ return Result;
+}
+
+static bool HandleIntToFloatCast(EvalInfo &Info, const Expr *E,
+ QualType SrcType, const APSInt &Value,
+ QualType DestType, APFloat &Result) {
+ Result = APFloat(Info.Ctx.getFloatTypeSemantics(DestType), 1);
+ if (Result.convertFromAPInt(Value, Value.isSigned(),
+ APFloat::rmNearestTiesToEven)
+ & APFloat::opOverflow)
+ return HandleOverflow(Info, E, Value, DestType);
+ return true;
+}
+
+static bool EvalAndBitcastToAPInt(EvalInfo &Info, const Expr *E,
+ llvm::APInt &Res) {
+ APValue SVal;
+ if (!Evaluate(SVal, Info, E))
+ return false;
+ if (SVal.isInt()) {
+ Res = SVal.getInt();
+ return true;
+ }
+ if (SVal.isFloat()) {
+ Res = SVal.getFloat().bitcastToAPInt();
+ return true;
+ }
+ if (SVal.isVector()) {
+ QualType VecTy = E->getType();
+ unsigned VecSize = Info.Ctx.getTypeSize(VecTy);
+ QualType EltTy = VecTy->castAs<VectorType>()->getElementType();
+ unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+ Res = llvm::APInt::getNullValue(VecSize);
+ for (unsigned i = 0; i < SVal.getVectorLength(); i++) {
+ APValue &Elt = SVal.getVectorElt(i);
+ llvm::APInt EltAsInt;
+ if (Elt.isInt()) {
+ EltAsInt = Elt.getInt();
+ } else if (Elt.isFloat()) {
+ EltAsInt = Elt.getFloat().bitcastToAPInt();
+ } else {
+ // Don't try to handle vectors of anything other than int or float
+ // (not sure if it's possible to hit this case).
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ unsigned BaseEltSize = EltAsInt.getBitWidth();
+ if (BigEndian)
+ Res |= EltAsInt.zextOrTrunc(VecSize).rotr(i*EltSize+BaseEltSize);
+ else
+ Res |= EltAsInt.zextOrTrunc(VecSize).rotl(i*EltSize);
+ }
+ return true;
+ }
+ // Give up if the input isn't an int, float, or vector. For example, we
+ // reject "(v4i16)(intptr_t)&a".
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+}
+
+/// Cast an lvalue referring to a base subobject to a derived class, by
+/// truncating the lvalue's path to the given length.
+static bool CastToDerivedClass(EvalInfo &Info, const Expr *E, LValue &Result,
+ const RecordDecl *TruncatedType,
+ unsigned TruncatedElements) {
+ SubobjectDesignator &D = Result.Designator;
+
+ // Check we actually point to a derived class object.
+ if (TruncatedElements == D.Entries.size())
+ return true;
+ assert(TruncatedElements >= D.MostDerivedPathLength &&
+ "not casting to a derived class");
+ if (!Result.checkSubobject(Info, E, CSK_Derived))
+ return false;
+
+ // Truncate the path to the subobject, and remove any derived-to-base offsets.
+ const RecordDecl *RD = TruncatedType;
+ for (unsigned I = TruncatedElements, N = D.Entries.size(); I != N; ++I) {
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+ const CXXRecordDecl *Base = getAsBaseClass(D.Entries[I]);
+ if (isVirtualBaseClass(D.Entries[I]))
+ Result.Offset -= Layout.getVBaseClassOffset(Base);
+ else
+ Result.Offset -= Layout.getBaseClassOffset(Base);
+ RD = Base;
+ }
+ D.Entries.resize(TruncatedElements);
+ return true;
+}
+
+static void HandleLValueDirectBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ const ASTRecordLayout *RL = 0) {
+ if (!RL) RL = &Info.Ctx.getASTRecordLayout(Derived);
+ Obj.getLValueOffset() += RL->getBaseClassOffset(Base);
+ Obj.addDecl(Info, E, Base, /*Virtual*/ false);
+}
+
+static bool HandleLValueBase(EvalInfo &Info, const Expr *E, LValue &Obj,
+ const CXXRecordDecl *DerivedDecl,
+ const CXXBaseSpecifier *Base) {
+ const CXXRecordDecl *BaseDecl = Base->getType()->getAsCXXRecordDecl();
+
+ if (!Base->isVirtual()) {
+ HandleLValueDirectBase(Info, E, Obj, DerivedDecl, BaseDecl);
+ return true;
+ }
+
+ SubobjectDesignator &D = Obj.Designator;
+ if (D.Invalid)
+ return false;
+
+ // Extract most-derived object and corresponding type.
+ DerivedDecl = D.MostDerivedType->getAsCXXRecordDecl();
+ if (!CastToDerivedClass(Info, E, Obj, DerivedDecl, D.MostDerivedPathLength))
+ return false;
+
+ // Find the virtual base class.
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(DerivedDecl);
+ Obj.getLValueOffset() += Layout.getVBaseClassOffset(BaseDecl);
+ Obj.addDecl(Info, E, BaseDecl, /*Virtual*/ true);
+ return true;
+}
+
+/// Update LVal to refer to the given field, which must be a member of the type
+/// currently described by LVal.
+static void HandleLValueMember(EvalInfo &Info, const Expr *E, LValue &LVal,
+ const FieldDecl *FD,
+ const ASTRecordLayout *RL = 0) {
+ if (!RL)
+ RL = &Info.Ctx.getASTRecordLayout(FD->getParent());
+
+ unsigned I = FD->getFieldIndex();
+ LVal.Offset += Info.Ctx.toCharUnitsFromBits(RL->getFieldOffset(I));
+ LVal.addDecl(Info, E, FD);
+}
+
+/// Update LVal to refer to the given indirect field.
+static void HandleLValueIndirectMember(EvalInfo &Info, const Expr *E,
+ LValue &LVal,
+ const IndirectFieldDecl *IFD) {
+ for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
+ CE = IFD->chain_end(); C != CE; ++C)
+ HandleLValueMember(Info, E, LVal, cast<FieldDecl>(*C));
+}
+
+/// Get the size of the given type in char units.
+static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
+ QualType Type, CharUnits &Size) {
+ // sizeof(void), __alignof__(void), sizeof(function) = 1 as a gcc
+ // extension.
+ if (Type->isVoidType() || Type->isFunctionType()) {
+ Size = CharUnits::One();
+ return true;
+ }
+
+ if (!Type->isConstantSizeType()) {
+ // sizeof(vla) is not a constantexpr: C99 6.5.3.4p2.
+ // FIXME: Better diagnostic.
+ Info.Diag(Loc);
+ return false;
+ }
+
+ Size = Info.Ctx.getTypeSizeInChars(Type);
+ return true;
+}
+
+/// Update a pointer value to model pointer arithmetic.
+/// \param Info - Information about the ongoing evaluation.
+/// \param E - The expression being evaluated, for diagnostic purposes.
+/// \param LVal - The pointer value to be updated.
+/// \param EltTy - The pointee type represented by LVal.
+/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
+static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ int64_t Adjustment) {
+ CharUnits SizeOfPointee;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
+ return false;
+
+ // Compute the new offset in the appropriate width.
+ LVal.Offset += Adjustment * SizeOfPointee;
+ LVal.adjustIndex(Info, E, Adjustment);
+ return true;
+}
+
+/// Update an lvalue to refer to a component of a complex number.
+/// \param Info - Information about the ongoing evaluation.
+/// \param LVal - The lvalue to be updated.
+/// \param EltTy - The complex number's component type.
+/// \param Imag - False for the real component, true for the imaginary.
+static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ bool Imag) {
+ if (Imag) {
+ CharUnits SizeOfComponent;
+ if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfComponent))
+ return false;
+ LVal.Offset += SizeOfComponent;
+ }
+ LVal.addComplex(Info, E, EltTy, Imag);
+ return true;
+}
+
+/// Try to evaluate the initializer for a variable declaration.
+static bool EvaluateVarDeclInit(EvalInfo &Info, const Expr *E,
+ const VarDecl *VD,
+ CallStackFrame *Frame, APValue &Result) {
+ // If this is a parameter to an active constexpr function call, perform
+ // argument substitution.
+ if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
+ // Assume arguments of a potential constant expression are unknown
+ // constant expressions.
+ if (Info.CheckingPotentialConstantExpression)
+ return false;
+ if (!Frame || !Frame->Arguments) {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ Result = Frame->Arguments[PVD->getFunctionScopeIndex()];
+ return true;
+ }
+
+ // Dig out the initializer, and use the declaration which it's attached to.
+ const Expr *Init = VD->getAnyInitializer(VD);
+ if (!Init || Init->isValueDependent()) {
+ // If we're checking a potential constant expression, the variable could be
+ // initialized later.
+ if (!Info.CheckingPotentialConstantExpression)
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ // If we're currently evaluating the initializer of this declaration, use that
+ // in-flight value.
+ if (Info.EvaluatingDecl == VD) {
+ Result = *Info.EvaluatingDeclValue;
+ return !Result.isUninit();
+ }
+
+ // Never evaluate the initializer of a weak variable. We can't be sure that
+ // this is the definition which will be used.
+ if (VD->isWeak()) {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ // Check that we can fold the initializer. In C++, we will have already done
+ // this in the cases where it matters for conformance.
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ if (!VD->evaluateValue(Notes)) {
+ Info.Diag(E, diag::note_constexpr_var_init_non_constant,
+ Notes.size() + 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ Info.addNotes(Notes);
+ return false;
+ } else if (!VD->checkInitIsICE()) {
+ Info.CCEDiag(E, diag::note_constexpr_var_init_non_constant,
+ Notes.size() + 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ Info.addNotes(Notes);
+ }
+
+ Result = *VD->getEvaluatedValue();
+ return true;
+}
+
+static bool IsConstNonVolatile(QualType T) {
+ Qualifiers Quals = T.getQualifiers();
+ return Quals.hasConst() && !Quals.hasVolatile();
+}
+
+/// Get the base index of the given base class within an APValue representing
+/// the given derived class.
+static unsigned getBaseIndex(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base) {
+ Base = Base->getCanonicalDecl();
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Derived->bases_begin(),
+ E = Derived->bases_end(); I != E; ++I, ++Index) {
+ if (I->getType()->getAsCXXRecordDecl()->getCanonicalDecl() == Base)
+ return Index;
+ }
+
+ llvm_unreachable("base class missing from derived class's bases list");
+}
+
+/// Extract the value of a character from a string literal.
+static APSInt ExtractStringLiteralCharacter(EvalInfo &Info, const Expr *Lit,
+ uint64_t Index) {
+ // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant
+ const StringLiteral *S = dyn_cast<StringLiteral>(Lit);
+ assert(S && "unexpected string literal expression kind");
+
+ APSInt Value(S->getCharByteWidth() * Info.Ctx.getCharWidth(),
+ Lit->getType()->getArrayElementTypeNoTypeQual()->isUnsignedIntegerType());
+ if (Index < S->getLength())
+ Value = S->getCodeUnit(Index);
+ return Value;
+}
+
+/// Extract the designated sub-object of an rvalue.
+static bool ExtractSubobject(EvalInfo &Info, const Expr *E,
+ APValue &Obj, QualType ObjType,
+ const SubobjectDesignator &Sub, QualType SubType) {
+ if (Sub.Invalid)
+ // A diagnostic will have already been produced.
+ return false;
+ if (Sub.isOnePastTheEnd()) {
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ if (Sub.Entries.empty())
+ return true;
+ if (Info.CheckingPotentialConstantExpression && Obj.isUninit())
+ // This object might be initialized later.
+ return false;
+
+ APValue *O = &Obj;
+ // Walk the designator's path to find the subobject.
+ for (unsigned I = 0, N = Sub.Entries.size(); I != N; ++I) {
+ if (ObjType->isArrayType()) {
+ // Next subobject is an array element.
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(ObjType);
+ assert(CAT && "vla in literal type?");
+ uint64_t Index = Sub.Entries[I].ArrayIndex;
+ if (CAT->getSize().ule(Index)) {
+ // Note, it should not be possible to form a pointer with a valid
+ // designator which points more than one past the end of the array.
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ // An array object is represented as either an Array APValue or as an
+ // LValue which refers to a string literal.
+ if (O->isLValue()) {
+ assert(I == N - 1 && "extracting subobject of character?");
+ assert(!O->hasLValuePath() || O->getLValuePath().empty());
+ Obj = APValue(ExtractStringLiteralCharacter(
+ Info, O->getLValueBase().get<const Expr*>(), Index));
+ return true;
+ } else if (O->getArrayInitializedElts() > Index)
+ O = &O->getArrayInitializedElt(Index);
+ else
+ O = &O->getArrayFiller();
+ ObjType = CAT->getElementType();
+ } else if (ObjType->isAnyComplexType()) {
+ // Next subobject is a complex number.
+ uint64_t Index = Sub.Entries[I].ArrayIndex;
+ if (Index > 1) {
+ Info.Diag(E, Info.getLangOpts().CPlusPlus0x ?
+ (unsigned)diag::note_constexpr_read_past_end :
+ (unsigned)diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ assert(I == N - 1 && "extracting subobject of scalar?");
+ if (O->isComplexInt()) {
+ Obj = APValue(Index ? O->getComplexIntImag()
+ : O->getComplexIntReal());
+ } else {
+ assert(O->isComplexFloat());
+ Obj = APValue(Index ? O->getComplexFloatImag()
+ : O->getComplexFloatReal());
+ }
+ return true;
+ } else if (const FieldDecl *Field = getAsField(Sub.Entries[I])) {
+ if (Field->isMutable()) {
+ Info.Diag(E, diag::note_constexpr_ltor_mutable, 1)
+ << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ return false;
+ }
+
+ // Next subobject is a class, struct or union field.
+ RecordDecl *RD = ObjType->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ const FieldDecl *UnionField = O->getUnionField();
+ if (!UnionField ||
+ UnionField->getCanonicalDecl() != Field->getCanonicalDecl()) {
+ Info.Diag(E, diag::note_constexpr_read_inactive_union_member)
+ << Field << !UnionField << UnionField;
+ return false;
+ }
+ O = &O->getUnionValue();
+ } else
+ O = &O->getStructField(Field->getFieldIndex());
+ ObjType = Field->getType();
+
+ if (ObjType.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ // FIXME: Include a description of the path to the volatile subobject.
+ Info.Diag(E, diag::note_constexpr_ltor_volatile_obj, 1)
+ << 2 << Field;
+ Info.Note(Field->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return false;
+ }
+ } else {
+ // Next subobject is a base class.
+ const CXXRecordDecl *Derived = ObjType->getAsCXXRecordDecl();
+ const CXXRecordDecl *Base = getAsBaseClass(Sub.Entries[I]);
+ O = &O->getStructBase(getBaseIndex(Derived, Base));
+ ObjType = Info.Ctx.getRecordType(Base);
+ }
+
+ if (O->isUninit()) {
+ if (!Info.CheckingPotentialConstantExpression)
+ Info.Diag(E, diag::note_constexpr_read_uninit);
+ return false;
+ }
+ }
+
+ // This may look super-stupid, but it serves an important purpose: if we just
+ // swapped Obj and *O, we'd create an object which had itself as a subobject.
+ // To avoid the leak, we ensure that Tmp ends up owning the original complete
+ // object, which is destroyed by Tmp's destructor.
+ APValue Tmp;
+ O->swap(Tmp);
+ Obj.swap(Tmp);
+ return true;
+}
+
+/// Find the position where two subobject designators diverge, or equivalently
+/// the length of the common initial subsequence.
+static unsigned FindDesignatorMismatch(QualType ObjType,
+ const SubobjectDesignator &A,
+ const SubobjectDesignator &B,
+ bool &WasArrayIndex) {
+ unsigned I = 0, N = std::min(A.Entries.size(), B.Entries.size());
+ for (/**/; I != N; ++I) {
+ if (!ObjType.isNull() &&
+ (ObjType->isArrayType() || ObjType->isAnyComplexType())) {
+ // Next subobject is an array element.
+ if (A.Entries[I].ArrayIndex != B.Entries[I].ArrayIndex) {
+ WasArrayIndex = true;
+ return I;
+ }
+ if (ObjType->isAnyComplexType())
+ ObjType = ObjType->castAs<ComplexType>()->getElementType();
+ else
+ ObjType = ObjType->castAsArrayTypeUnsafe()->getElementType();
+ } else {
+ if (A.Entries[I].BaseOrMember != B.Entries[I].BaseOrMember) {
+ WasArrayIndex = false;
+ return I;
+ }
+ if (const FieldDecl *FD = getAsField(A.Entries[I]))
+ // Next subobject is a field.
+ ObjType = FD->getType();
+ else
+ // Next subobject is a base class.
+ ObjType = QualType();
+ }
+ }
+ WasArrayIndex = false;
+ return I;
+}
+
+/// Determine whether the given subobject designators refer to elements of the
+/// same array object.
+static bool AreElementsOfSameArray(QualType ObjType,
+ const SubobjectDesignator &A,
+ const SubobjectDesignator &B) {
+ if (A.Entries.size() != B.Entries.size())
+ return false;
+
+ bool IsArray = A.MostDerivedArraySize != 0;
+ if (IsArray && A.MostDerivedPathLength != A.Entries.size())
+ // A is a subobject of the array element.
+ return false;
+
+ // If A (and B) designates an array element, the last entry will be the array
+ // index. That doesn't have to match. Otherwise, we're in the 'implicit array
+ // of length 1' case, and the entire path must match.
+ bool WasArrayIndex;
+ unsigned CommonLength = FindDesignatorMismatch(ObjType, A, B, WasArrayIndex);
+ return CommonLength >= A.Entries.size() - IsArray;
+}
+
+/// HandleLValueToRValueConversion - Perform an lvalue-to-rvalue conversion on
+/// the given lvalue. This can also be used for 'lvalue-to-lvalue' conversions
+/// for looking up the glvalue referred to by an entity of reference type.
+///
+/// \param Info - Information about the ongoing evaluation.
+/// \param Conv - The expression for which we are performing the conversion.
+/// Used for diagnostics.
+/// \param Type - The type we expect this conversion to produce, before
+/// stripping cv-qualifiers in the case of a non-clas type.
+/// \param LVal - The glvalue on which we are attempting to perform this action.
+/// \param RVal - The produced value will be placed here.
+static bool HandleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
+ QualType Type,
+ const LValue &LVal, APValue &RVal) {
+ if (LVal.Designator.Invalid)
+ // A diagnostic will have already been produced.
+ return false;
+
+ const Expr *Base = LVal.Base.dyn_cast<const Expr*>();
+
+ if (!LVal.Base) {
+ // FIXME: Indirection through a null pointer deserves a specific diagnostic.
+ Info.Diag(Conv, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ CallStackFrame *Frame = 0;
+ if (LVal.CallIndex) {
+ Frame = Info.getCallFrame(LVal.CallIndex);
+ if (!Frame) {
+ Info.Diag(Conv, diag::note_constexpr_lifetime_ended, 1) << !Base;
+ NoteLValueLocation(Info, LVal.Base);
+ return false;
+ }
+ }
+
+ // C++11 DR1311: An lvalue-to-rvalue conversion on a volatile-qualified type
+ // is not a constant expression (even if the object is non-volatile). We also
+ // apply this rule to C++98, in order to conform to the expected 'volatile'
+ // semantics.
+ if (Type.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus)
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_type) << Type;
+ else
+ Info.Diag(Conv);
+ return false;
+ }
+
+ if (const ValueDecl *D = LVal.Base.dyn_cast<const ValueDecl*>()) {
+ // In C++98, const, non-volatile integers initialized with ICEs are ICEs.
+ // In C++11, constexpr, non-volatile variables initialized with constant
+ // expressions are constant expressions too. Inside constexpr functions,
+ // parameters are constant expressions even if they're non-const.
+ // In C, such things can also be folded, although they are not ICEs.
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD) {
+ if (const VarDecl *VDef = VD->getDefinition(Info.Ctx))
+ VD = VDef;
+ }
+ if (!VD || VD->isInvalidDecl()) {
+ Info.Diag(Conv);
+ return false;
+ }
+
+ // DR1313: If the object is volatile-qualified but the glvalue was not,
+ // behavior is undefined so the result is not a constant expression.
+ QualType VT = VD->getType();
+ if (VT.isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_obj, 1) << 1 << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+
+ if (!isa<ParmVarDecl>(VD)) {
+ if (VD->isConstexpr()) {
+ // OK, we can read this variable.
+ } else if (VT->isIntegralOrEnumerationType()) {
+ if (!VT.isConstQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_non_const_int, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+ } else if (VT->isFloatingType() && VT.isConstQualified()) {
+ // We support folding of const floating-point types, in order to make
+ // static const data members of such types (supported as an extension)
+ // more useful.
+ if (Info.getLangOpts().CPlusPlus0x) {
+ Info.CCEDiag(Conv, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.CCEDiag(Conv);
+ }
+ } else {
+ // FIXME: Allow folding of values of any literal type in all languages.
+ if (Info.getLangOpts().CPlusPlus0x) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_non_constexpr, 1) << VD;
+ Info.Note(VD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+ }
+
+ if (!EvaluateVarDeclInit(Info, Conv, VD, Frame, RVal))
+ return false;
+
+ if (isa<ParmVarDecl>(VD) || !VD->getAnyInitializer()->isLValue())
+ return ExtractSubobject(Info, Conv, RVal, VT, LVal.Designator, Type);
+
+ // The declaration was initialized by an lvalue, with no lvalue-to-rvalue
+ // conversion. This happens when the declaration and the lvalue should be
+ // considered synonymous, for instance when initializing an array of char
+ // from a string literal. Continue as if the initializer lvalue was the
+ // value we were originally given.
+ assert(RVal.getLValueOffset().isZero() &&
+ "offset for lvalue init of non-reference");
+ Base = RVal.getLValueBase().get<const Expr*>();
+
+ if (unsigned CallIndex = RVal.getLValueCallIndex()) {
+ Frame = Info.getCallFrame(CallIndex);
+ if (!Frame) {
+ Info.Diag(Conv, diag::note_constexpr_lifetime_ended, 1) << !Base;
+ NoteLValueLocation(Info, RVal.getLValueBase());
+ return false;
+ }
+ } else {
+ Frame = 0;
+ }
+ }
+
+ // Volatile temporary objects cannot be read in constant expressions.
+ if (Base->getType().isVolatileQualified()) {
+ if (Info.getLangOpts().CPlusPlus) {
+ Info.Diag(Conv, diag::note_constexpr_ltor_volatile_obj, 1) << 0;
+ Info.Note(Base->getExprLoc(), diag::note_constexpr_temporary_here);
+ } else {
+ Info.Diag(Conv);
+ }
+ return false;
+ }
+
+ if (Frame) {
+ // If this is a temporary expression with a nontrivial initializer, grab the
+ // value from the relevant stack frame.
+ RVal = Frame->Temporaries[Base];
+ } else if (const CompoundLiteralExpr *CLE
+ = dyn_cast<CompoundLiteralExpr>(Base)) {
+ // In C99, a CompoundLiteralExpr is an lvalue, and we defer evaluating the
+ // initializer until now for such expressions. Such an expression can't be
+ // an ICE in C, so this only matters for fold.
+ assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ if (!Evaluate(RVal, Info, CLE->getInitializer()))
+ return false;
+ } else if (isa<StringLiteral>(Base)) {
+ // We represent a string literal array as an lvalue pointing at the
+ // corresponding expression, rather than building an array of chars.
+ // FIXME: Support PredefinedExpr, ObjCEncodeExpr, MakeStringConstant
+ RVal = APValue(Base, CharUnits::Zero(), APValue::NoLValuePath(), 0);
+ } else {
+ Info.Diag(Conv, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ return ExtractSubobject(Info, Conv, RVal, Base->getType(), LVal.Designator,
+ Type);
+}
+
+/// Build an lvalue for the object argument of a member function call.
+static bool EvaluateObjectArgument(EvalInfo &Info, const Expr *Object,
+ LValue &This) {
+ if (Object->getType()->isPointerType())
+ return EvaluatePointer(Object, This, Info);
+
+ if (Object->isGLValue())
+ return EvaluateLValue(Object, This, Info);
+
+ if (Object->getType()->isLiteralType())
+ return EvaluateTemporary(Object, This, Info);
+
+ return false;
+}
+
+/// HandleMemberPointerAccess - Evaluate a member access operation and build an
+/// lvalue referring to the result.
+///
+/// \param Info - Information about the ongoing evaluation.
+/// \param BO - The member pointer access operation.
+/// \param LV - Filled in with a reference to the resulting object.
+/// \param IncludeMember - Specifies whether the member itself is included in
+/// the resulting LValue subobject designator. This is not possible when
+/// creating a bound member function.
+/// \return The field or method declaration to which the member pointer refers,
+/// or 0 if evaluation fails.
+static const ValueDecl *HandleMemberPointerAccess(EvalInfo &Info,
+ const BinaryOperator *BO,
+ LValue &LV,
+ bool IncludeMember = true) {
+ assert(BO->getOpcode() == BO_PtrMemD || BO->getOpcode() == BO_PtrMemI);
+
+ bool EvalObjOK = EvaluateObjectArgument(Info, BO->getLHS(), LV);
+ if (!EvalObjOK && !Info.keepEvaluatingAfterFailure())
+ return 0;
+
+ MemberPtr MemPtr;
+ if (!EvaluateMemberPointer(BO->getRHS(), MemPtr, Info))
+ return 0;
+
+ // C++11 [expr.mptr.oper]p6: If the second operand is the null pointer to
+ // member value, the behavior is undefined.
+ if (!MemPtr.getDecl())
+ return 0;
+
+ if (!EvalObjOK)
+ return 0;
+
+ if (MemPtr.isDerivedMember()) {
+ // This is a member of some derived class. Truncate LV appropriately.
+ // The end of the derived-to-base path for the base object must match the
+ // derived-to-base path for the member pointer.
+ if (LV.Designator.MostDerivedPathLength + MemPtr.Path.size() >
+ LV.Designator.Entries.size())
+ return 0;
+ unsigned PathLengthToMember =
+ LV.Designator.Entries.size() - MemPtr.Path.size();
+ for (unsigned I = 0, N = MemPtr.Path.size(); I != N; ++I) {
+ const CXXRecordDecl *LVDecl = getAsBaseClass(
+ LV.Designator.Entries[PathLengthToMember + I]);
+ const CXXRecordDecl *MPDecl = MemPtr.Path[I];
+ if (LVDecl->getCanonicalDecl() != MPDecl->getCanonicalDecl())
+ return 0;
+ }
+
+ // Truncate the lvalue to the appropriate derived class.
+ if (!CastToDerivedClass(Info, BO, LV, MemPtr.getContainingRecord(),
+ PathLengthToMember))
+ return 0;
+ } else if (!MemPtr.Path.empty()) {
+ // Extend the LValue path with the member pointer's path.
+ LV.Designator.Entries.reserve(LV.Designator.Entries.size() +
+ MemPtr.Path.size() + IncludeMember);
+
+ // Walk down to the appropriate base class.
+ QualType LVType = BO->getLHS()->getType();
+ if (const PointerType *PT = LVType->getAs<PointerType>())
+ LVType = PT->getPointeeType();
+ const CXXRecordDecl *RD = LVType->getAsCXXRecordDecl();
+ assert(RD && "member pointer access on non-class-type expression");
+ // The first class in the path is that of the lvalue.
+ for (unsigned I = 1, N = MemPtr.Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = MemPtr.Path[N - I - 1];
+ HandleLValueDirectBase(Info, BO, LV, RD, Base);
+ RD = Base;
+ }
+ // Finally cast to the class containing the member.
+ HandleLValueDirectBase(Info, BO, LV, RD, MemPtr.getContainingRecord());
+ }
+
+ // Add the member. Note that we cannot build bound member functions here.
+ if (IncludeMember) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(MemPtr.getDecl()))
+ HandleLValueMember(Info, BO, LV, FD);
+ else if (const IndirectFieldDecl *IFD =
+ dyn_cast<IndirectFieldDecl>(MemPtr.getDecl()))
+ HandleLValueIndirectMember(Info, BO, LV, IFD);
+ else
+ llvm_unreachable("can't construct reference to bound member function");
+ }
+
+ return MemPtr.getDecl();
+}
+
+/// HandleBaseToDerivedCast - Apply the given base-to-derived cast operation on
+/// the provided lvalue, which currently refers to the base object.
+static bool HandleBaseToDerivedCast(EvalInfo &Info, const CastExpr *E,
+ LValue &Result) {
+ SubobjectDesignator &D = Result.Designator;
+ if (D.Invalid || !Result.checkNullPointer(Info, E, CSK_Derived))
+ return false;
+
+ QualType TargetQT = E->getType();
+ if (const PointerType *PT = TargetQT->getAs<PointerType>())
+ TargetQT = PT->getPointeeType();
+
+ // Check this cast lands within the final derived-to-base subobject path.
+ if (D.MostDerivedPathLength + E->path_size() > D.Entries.size()) {
+ Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << D.MostDerivedType << TargetQT;
+ return false;
+ }
+
+ // Check the type of the final cast. We don't need to check the path,
+ // since a cast can only be formed if the path is unique.
+ unsigned NewEntriesSize = D.Entries.size() - E->path_size();
+ const CXXRecordDecl *TargetType = TargetQT->getAsCXXRecordDecl();
+ const CXXRecordDecl *FinalType;
+ if (NewEntriesSize == D.MostDerivedPathLength)
+ FinalType = D.MostDerivedType->getAsCXXRecordDecl();
+ else
+ FinalType = getAsBaseClass(D.Entries[NewEntriesSize - 1]);
+ if (FinalType->getCanonicalDecl() != TargetType->getCanonicalDecl()) {
+ Info.CCEDiag(E, diag::note_constexpr_invalid_downcast)
+ << D.MostDerivedType << TargetQT;
+ return false;
+ }
+
+ // Truncate the lvalue to the appropriate derived class.
+ return CastToDerivedClass(Info, E, Result, TargetType, NewEntriesSize);
+}
+
+namespace {
+enum EvalStmtResult {
+ /// Evaluation failed.
+ ESR_Failed,
+ /// Hit a 'return' statement.
+ ESR_Returned,
+ /// Evaluation succeeded.
+ ESR_Succeeded
+};
+}
+
+// Evaluate a statement.
+static EvalStmtResult EvaluateStmt(APValue &Result, EvalInfo &Info,
+ const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default:
+ return ESR_Failed;
+
+ case Stmt::NullStmtClass:
+ case Stmt::DeclStmtClass:
+ return ESR_Succeeded;
+
+ case Stmt::ReturnStmtClass: {
+ const Expr *RetExpr = cast<ReturnStmt>(S)->getRetValue();
+ if (!Evaluate(Result, Info, RetExpr))
+ return ESR_Failed;
+ return ESR_Returned;
+ }
+
+ case Stmt::CompoundStmtClass: {
+ const CompoundStmt *CS = cast<CompoundStmt>(S);
+ for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
+ BE = CS->body_end(); BI != BE; ++BI) {
+ EvalStmtResult ESR = EvaluateStmt(Result, Info, *BI);
+ if (ESR != ESR_Succeeded)
+ return ESR;
+ }
+ return ESR_Succeeded;
+ }
+ }
+}
+
+/// CheckTrivialDefaultConstructor - Check whether a constructor is a trivial
+/// default constructor. If so, we'll fold it whether or not it's marked as
+/// constexpr. If it is marked as constexpr, we will never implicitly define it,
+/// so we need special handling.
+static bool CheckTrivialDefaultConstructor(EvalInfo &Info, SourceLocation Loc,
+ const CXXConstructorDecl *CD,
+ bool IsValueInitialization) {
+ if (!CD->isTrivial() || !CD->isDefaultConstructor())
+ return false;
+
+ // Value-initialization does not call a trivial default constructor, so such a
+ // call is a core constant expression whether or not the constructor is
+ // constexpr.
+ if (!CD->isConstexpr() && !IsValueInitialization) {
+ if (Info.getLangOpts().CPlusPlus0x) {
+ // FIXME: If DiagDecl is an implicitly-declared special member function,
+ // we should be much more explicit about why it's not constexpr.
+ Info.CCEDiag(Loc, diag::note_constexpr_invalid_function, 1)
+ << /*IsConstexpr*/0 << /*IsConstructor*/1 << CD;
+ Info.Note(CD->getLocation(), diag::note_declared_at);
+ } else {
+ Info.CCEDiag(Loc, diag::note_invalid_subexpr_in_const_expr);
+ }
+ }
+ return true;
+}
+
+/// CheckConstexprFunction - Check that a function can be called in a constant
+/// expression.
+static bool CheckConstexprFunction(EvalInfo &Info, SourceLocation CallLoc,
+ const FunctionDecl *Declaration,
+ const FunctionDecl *Definition) {
+ // Potential constant expressions can contain calls to declared, but not yet
+ // defined, constexpr functions.
+ if (Info.CheckingPotentialConstantExpression && !Definition &&
+ Declaration->isConstexpr())
+ return false;
+
+ // Can we evaluate this function call?
+ if (Definition && Definition->isConstexpr() && !Definition->isInvalidDecl())
+ return true;
+
+ if (Info.getLangOpts().CPlusPlus0x) {
+ const FunctionDecl *DiagDecl = Definition ? Definition : Declaration;
+ // FIXME: If DiagDecl is an implicitly-declared special member function, we
+ // should be much more explicit about why it's not constexpr.
+ Info.Diag(CallLoc, diag::note_constexpr_invalid_function, 1)
+ << DiagDecl->isConstexpr() << isa<CXXConstructorDecl>(DiagDecl)
+ << DiagDecl;
+ Info.Note(DiagDecl->getLocation(), diag::note_declared_at);
+ } else {
+ Info.Diag(CallLoc, diag::note_invalid_subexpr_in_const_expr);
+ }
+ return false;
+}
+
+namespace {
+typedef SmallVector<APValue, 8> ArgVector;
+}
+
+/// EvaluateArgs - Evaluate the arguments to a function call.
+static bool EvaluateArgs(ArrayRef<const Expr*> Args, ArgVector &ArgValues,
+ EvalInfo &Info) {
+ bool Success = true;
+ for (ArrayRef<const Expr*>::iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ if (!Evaluate(ArgValues[I - Args.begin()], Info, *I)) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+ return Success;
+}
+
+/// Evaluate a function call.
+static bool HandleFunctionCall(SourceLocation CallLoc,
+ const FunctionDecl *Callee, const LValue *This,
+ ArrayRef<const Expr*> Args, const Stmt *Body,
+ EvalInfo &Info, APValue &Result) {
+ ArgVector ArgValues(Args.size());
+ if (!EvaluateArgs(Args, ArgValues, Info))
+ return false;
+
+ if (!Info.CheckCallLimit(CallLoc))
+ return false;
+
+ CallStackFrame Frame(Info, CallLoc, Callee, This, ArgValues.data());
+ return EvaluateStmt(Result, Info, Body) == ESR_Returned;
+}
+
+/// Evaluate a constructor call.
+static bool HandleConstructorCall(SourceLocation CallLoc, const LValue &This,
+ ArrayRef<const Expr*> Args,
+ const CXXConstructorDecl *Definition,
+ EvalInfo &Info, APValue &Result) {
+ ArgVector ArgValues(Args.size());
+ if (!EvaluateArgs(Args, ArgValues, Info))
+ return false;
+
+ if (!Info.CheckCallLimit(CallLoc))
+ return false;
+
+ const CXXRecordDecl *RD = Definition->getParent();
+ if (RD->getNumVBases()) {
+ Info.Diag(CallLoc, diag::note_constexpr_virtual_base) << RD;
+ return false;
+ }
+
+ CallStackFrame Frame(Info, CallLoc, Definition, &This, ArgValues.data());
+
+ // If it's a delegating constructor, just delegate.
+ if (Definition->isDelegatingConstructor()) {
+ CXXConstructorDecl::init_const_iterator I = Definition->init_begin();
+ return EvaluateInPlace(Result, Info, This, (*I)->getInit());
+ }
+
+ // For a trivial copy or move constructor, perform an APValue copy. This is
+ // essential for unions, where the operations performed by the constructor
+ // cannot be represented by ctor-initializers.
+ if (Definition->isDefaulted() &&
+ ((Definition->isCopyConstructor() && Definition->isTrivial()) ||
+ (Definition->isMoveConstructor() && Definition->isTrivial()))) {
+ LValue RHS;
+ RHS.setFrom(Info.Ctx, ArgValues[0]);
+ return HandleLValueToRValueConversion(Info, Args[0], Args[0]->getType(),
+ RHS, Result);
+ }
+
+ // Reserve space for the struct members.
+ if (!RD->isUnion() && Result.isUninit())
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ bool Success = true;
+ unsigned BasesSeen = 0;
+#ifndef NDEBUG
+ CXXRecordDecl::base_class_const_iterator BaseIt = RD->bases_begin();
+#endif
+ for (CXXConstructorDecl::init_const_iterator I = Definition->init_begin(),
+ E = Definition->init_end(); I != E; ++I) {
+ LValue Subobject = This;
+ APValue *Value = &Result;
+
+ // Determine the subobject to initialize.
+ if ((*I)->isBaseInitializer()) {
+ QualType BaseType((*I)->getBaseClass(), 0);
+#ifndef NDEBUG
+ // Non-virtual base classes are initialized in the order in the class
+ // definition. We have already checked for virtual base classes.
+ assert(!BaseIt->isVirtual() && "virtual base for literal type");
+ assert(Info.Ctx.hasSameType(BaseIt->getType(), BaseType) &&
+ "base class initializers not in expected order");
+ ++BaseIt;
+#endif
+ HandleLValueDirectBase(Info, (*I)->getInit(), Subobject, RD,
+ BaseType->getAsCXXRecordDecl(), &Layout);
+ Value = &Result.getStructBase(BasesSeen++);
+ } else if (FieldDecl *FD = (*I)->getMember()) {
+ HandleLValueMember(Info, (*I)->getInit(), Subobject, FD, &Layout);
+ if (RD->isUnion()) {
+ Result = APValue(FD);
+ Value = &Result.getUnionValue();
+ } else {
+ Value = &Result.getStructField(FD->getFieldIndex());
+ }
+ } else if (IndirectFieldDecl *IFD = (*I)->getIndirectMember()) {
+ // Walk the indirect field decl's chain to find the object to initialize,
+ // and make sure we've initialized every step along it.
+ for (IndirectFieldDecl::chain_iterator C = IFD->chain_begin(),
+ CE = IFD->chain_end();
+ C != CE; ++C) {
+ FieldDecl *FD = cast<FieldDecl>(*C);
+ CXXRecordDecl *CD = cast<CXXRecordDecl>(FD->getParent());
+ // Switch the union field if it differs. This happens if we had
+ // preceding zero-initialization, and we're now initializing a union
+ // subobject other than the first.
+ // FIXME: In this case, the values of the other subobjects are
+ // specified, since zero-initialization sets all padding bits to zero.
+ if (Value->isUninit() ||
+ (Value->isUnion() && Value->getUnionField() != FD)) {
+ if (CD->isUnion())
+ *Value = APValue(FD);
+ else
+ *Value = APValue(APValue::UninitStruct(), CD->getNumBases(),
+ std::distance(CD->field_begin(), CD->field_end()));
+ }
+ HandleLValueMember(Info, (*I)->getInit(), Subobject, FD);
+ if (CD->isUnion())
+ Value = &Value->getUnionValue();
+ else
+ Value = &Value->getStructField(FD->getFieldIndex());
+ }
+ } else {
+ llvm_unreachable("unknown base initializer kind");
+ }
+
+ if (!EvaluateInPlace(*Value, Info, Subobject, (*I)->getInit(),
+ (*I)->isBaseInitializer()
+ ? CCEK_Constant : CCEK_MemberInit)) {
+ // If we're checking for a potential constant expression, evaluate all
+ // initializers even if some of them fail.
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success;
+}
+
+namespace {
+class HasSideEffect
+ : public ConstStmtVisitor<HasSideEffect, bool> {
+ const ASTContext &Ctx;
+public:
+
+ HasSideEffect(const ASTContext &C) : Ctx(C) {}
+
+ // Unhandled nodes conservatively default to having side effects.
+ bool VisitStmt(const Stmt *S) {
+ return true;
+ }
+
+ bool VisitParenExpr(const ParenExpr *E) { return Visit(E->getSubExpr()); }
+ bool VisitGenericSelectionExpr(const GenericSelectionExpr *E) {
+ return Visit(E->getResultExpr());
+ }
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ return true;
+ return false;
+ }
+ bool VisitObjCIvarRefExpr(const ObjCIvarRefExpr *E) {
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ return true;
+ return false;
+ }
+
+ // We don't want to evaluate BlockExprs multiple times, as they generate
+ // a ton of code.
+ bool VisitBlockExpr(const BlockExpr *E) { return true; }
+ bool VisitPredefinedExpr(const PredefinedExpr *E) { return false; }
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E)
+ { return Visit(E->getInitializer()); }
+ bool VisitMemberExpr(const MemberExpr *E) { return Visit(E->getBase()); }
+ bool VisitIntegerLiteral(const IntegerLiteral *E) { return false; }
+ bool VisitFloatingLiteral(const FloatingLiteral *E) { return false; }
+ bool VisitStringLiteral(const StringLiteral *E) { return false; }
+ bool VisitCharacterLiteral(const CharacterLiteral *E) { return false; }
+ bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E)
+ { return false; }
+ bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E)
+ { return Visit(E->getLHS()) || Visit(E->getRHS()); }
+ bool VisitChooseExpr(const ChooseExpr *E)
+ { return Visit(E->getChosenSubExpr(Ctx)); }
+ bool VisitCastExpr(const CastExpr *E) { return Visit(E->getSubExpr()); }
+ bool VisitBinAssign(const BinaryOperator *E) { return true; }
+ bool VisitCompoundAssignOperator(const BinaryOperator *E) { return true; }
+ bool VisitBinaryOperator(const BinaryOperator *E)
+ { return Visit(E->getLHS()) || Visit(E->getRHS()); }
+ bool VisitUnaryPreInc(const UnaryOperator *E) { return true; }
+ bool VisitUnaryPostInc(const UnaryOperator *E) { return true; }
+ bool VisitUnaryPreDec(const UnaryOperator *E) { return true; }
+ bool VisitUnaryPostDec(const UnaryOperator *E) { return true; }
+ bool VisitUnaryDeref(const UnaryOperator *E) {
+ if (Ctx.getCanonicalType(E->getType()).isVolatileQualified())
+ return true;
+ return Visit(E->getSubExpr());
+ }
+ bool VisitUnaryOperator(const UnaryOperator *E) { return Visit(E->getSubExpr()); }
+
+ // Has side effects if any element does.
+ bool VisitInitListExpr(const InitListExpr *E) {
+ for (unsigned i = 0, e = E->getNumInits(); i != e; ++i)
+ if (Visit(E->getInit(i))) return true;
+ if (const Expr *filler = E->getArrayFiller())
+ return Visit(filler);
+ return false;
+ }
+
+ bool VisitSizeOfPackExpr(const SizeOfPackExpr *) { return false; }
+};
+
+class OpaqueValueEvaluation {
+ EvalInfo &info;
+ OpaqueValueExpr *opaqueValue;
+
+public:
+ OpaqueValueEvaluation(EvalInfo &info, OpaqueValueExpr *opaqueValue,
+ Expr *value)
+ : info(info), opaqueValue(opaqueValue) {
+
+ // If evaluation fails, fail immediately.
+ if (!Evaluate(info.OpaqueValues[opaqueValue], info, value)) {
+ this->opaqueValue = 0;
+ return;
+ }
+ }
+
+ bool hasError() const { return opaqueValue == 0; }
+
+ ~OpaqueValueEvaluation() {
+ // FIXME: For a recursive constexpr call, an outer stack frame might have
+ // been using this opaque value too, and will now have to re-evaluate the
+ // source expression.
+ if (opaqueValue) info.OpaqueValues.erase(opaqueValue);
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Generic Evaluation
+//===----------------------------------------------------------------------===//
+namespace {
+
+// FIXME: RetTy is always bool. Remove it.
+template <class Derived, typename RetTy=bool>
+class ExprEvaluatorBase
+ : public ConstStmtVisitor<Derived, RetTy> {
+private:
+ RetTy DerivedSuccess(const APValue &V, const Expr *E) {
+ return static_cast<Derived*>(this)->Success(V, E);
+ }
+ RetTy DerivedZeroInitialization(const Expr *E) {
+ return static_cast<Derived*>(this)->ZeroInitialization(E);
+ }
+
+ // Check whether a conditional operator with a non-constant condition is a
+ // potential constant expression. If neither arm is a potential constant
+ // expression, then the conditional operator is not either.
+ template<typename ConditionalOperator>
+ void CheckPotentialConstantConditional(const ConditionalOperator *E) {
+ assert(Info.CheckingPotentialConstantExpression);
+
+ // Speculatively evaluate both arms.
+ {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Diag;
+ SpeculativeEvaluationRAII Speculate(Info, &Diag);
+
+ StmtVisitorTy::Visit(E->getFalseExpr());
+ if (Diag.empty())
+ return;
+
+ Diag.clear();
+ StmtVisitorTy::Visit(E->getTrueExpr());
+ if (Diag.empty())
+ return;
+ }
+
+ Error(E, diag::note_constexpr_conditional_never_const);
+ }
+
+
+ template<typename ConditionalOperator>
+ bool HandleConditionalOperator(const ConditionalOperator *E) {
+ bool BoolResult;
+ if (!EvaluateAsBooleanCondition(E->getCond(), BoolResult, Info)) {
+ if (Info.CheckingPotentialConstantExpression)
+ CheckPotentialConstantConditional(E);
+ return false;
+ }
+
+ Expr *EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
+ return StmtVisitorTy::Visit(EvalExpr);
+ }
+
+protected:
+ EvalInfo &Info;
+ typedef ConstStmtVisitor<Derived, RetTy> StmtVisitorTy;
+ typedef ExprEvaluatorBase ExprEvaluatorBaseTy;
+
+ OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
+ return Info.CCEDiag(E, D);
+ }
+
+ RetTy ZeroInitialization(const Expr *E) { return Error(E); }
+
+public:
+ ExprEvaluatorBase(EvalInfo &Info) : Info(Info) {}
+
+ EvalInfo &getEvalInfo() { return Info; }
+
+ /// Report an evaluation error. This should only be called when an error is
+ /// first discovered. When propagating an error, just return false.
+ bool Error(const Expr *E, diag::kind D) {
+ Info.Diag(E, D);
+ return false;
+ }
+ bool Error(const Expr *E) {
+ return Error(E, diag::note_invalid_subexpr_in_const_expr);
+ }
+
+ RetTy VisitStmt(const Stmt *) {
+ llvm_unreachable("Expression evaluator should not be called on stmts");
+ }
+ RetTy VisitExpr(const Expr *E) {
+ return Error(E);
+ }
+
+ RetTy VisitParenExpr(const ParenExpr *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ RetTy VisitUnaryExtension(const UnaryOperator *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ RetTy VisitUnaryPlus(const UnaryOperator *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+ RetTy VisitChooseExpr(const ChooseExpr *E)
+ { return StmtVisitorTy::Visit(E->getChosenSubExpr(Info.Ctx)); }
+ RetTy VisitGenericSelectionExpr(const GenericSelectionExpr *E)
+ { return StmtVisitorTy::Visit(E->getResultExpr()); }
+ RetTy VisitSubstNonTypeTemplateParmExpr(const SubstNonTypeTemplateParmExpr *E)
+ { return StmtVisitorTy::Visit(E->getReplacement()); }
+ RetTy VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *E)
+ { return StmtVisitorTy::Visit(E->getExpr()); }
+ // We cannot create any objects for which cleanups are required, so there is
+ // nothing to do here; all cleanups must come from unevaluated subexpressions.
+ RetTy VisitExprWithCleanups(const ExprWithCleanups *E)
+ { return StmtVisitorTy::Visit(E->getSubExpr()); }
+
+ RetTy VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *E) {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 0;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+ RetTy VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *E) {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 1;
+ return static_cast<Derived*>(this)->VisitCastExpr(E);
+ }
+
+ RetTy VisitBinaryOperator(const BinaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+
+ case BO_Comma:
+ VisitIgnoredValue(E->getLHS());
+ return StmtVisitorTy::Visit(E->getRHS());
+
+ case BO_PtrMemD:
+ case BO_PtrMemI: {
+ LValue Obj;
+ if (!HandleMemberPointerAccess(Info, E, Obj))
+ return false;
+ APValue Result;
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), Obj, Result))
+ return false;
+ return DerivedSuccess(Result, E);
+ }
+ }
+ }
+
+ RetTy VisitBinaryConditionalOperator(const BinaryConditionalOperator *E) {
+ // Cache the value of the common expression.
+ OpaqueValueEvaluation opaque(Info, E->getOpaqueValue(), E->getCommon());
+ if (opaque.hasError())
+ return false;
+
+ return HandleConditionalOperator(E);
+ }
+
+ RetTy VisitConditionalOperator(const ConditionalOperator *E) {
+ bool IsBcpCall = false;
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // the result is a constant expression if it can be folded without
+ // side-effects. This is an important GNU extension. See GCC PR38377
+ // for discussion.
+ if (const CallExpr *CallCE =
+ dyn_cast<CallExpr>(E->getCond()->IgnoreParenCasts()))
+ if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+ IsBcpCall = true;
+
+ // Always assume __builtin_constant_p(...) ? ... : ... is a potential
+ // constant expression; we can't check whether it's potentially foldable.
+ if (Info.CheckingPotentialConstantExpression && IsBcpCall)
+ return false;
+
+ FoldConstant Fold(Info);
+
+ if (!HandleConditionalOperator(E))
+ return false;
+
+ if (IsBcpCall)
+ Fold.Fold(Info);
+
+ return true;
+ }
+
+ RetTy VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ const APValue *Value = Info.getOpaqueValue(E);
+ if (!Value) {
+ const Expr *Source = E->getSourceExpr();
+ if (!Source)
+ return Error(E);
+ if (Source == E) { // sanity checking.
+ assert(0 && "OpaqueValueExpr recursively refers to itself");
+ return Error(E);
+ }
+ return StmtVisitorTy::Visit(Source);
+ }
+ return DerivedSuccess(*Value, E);
+ }
+
+ RetTy VisitCallExpr(const CallExpr *E) {
+ const Expr *Callee = E->getCallee()->IgnoreParens();
+ QualType CalleeType = Callee->getType();
+
+ const FunctionDecl *FD = 0;
+ LValue *This = 0, ThisVal;
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ bool HasQualifier = false;
+
+ // Extract function decl and 'this' pointer from the callee.
+ if (CalleeType->isSpecificBuiltinType(BuiltinType::BoundMember)) {
+ const ValueDecl *Member = 0;
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Callee)) {
+ // Explicit bound member calls, such as x.f() or p->g();
+ if (!EvaluateObjectArgument(Info, ME->getBase(), ThisVal))
+ return false;
+ Member = ME->getMemberDecl();
+ This = &ThisVal;
+ HasQualifier = ME->hasQualifier();
+ } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(Callee)) {
+ // Indirect bound member calls ('.*' or '->*').
+ Member = HandleMemberPointerAccess(Info, BE, ThisVal, false);
+ if (!Member) return false;
+ This = &ThisVal;
+ } else
+ return Error(Callee);
+
+ FD = dyn_cast<FunctionDecl>(Member);
+ if (!FD)
+ return Error(Callee);
+ } else if (CalleeType->isFunctionPointerType()) {
+ LValue Call;
+ if (!EvaluatePointer(Callee, Call, Info))
+ return false;
+
+ if (!Call.getLValueOffset().isZero())
+ return Error(Callee);
+ FD = dyn_cast_or_null<FunctionDecl>(
+ Call.getLValueBase().dyn_cast<const ValueDecl*>());
+ if (!FD)
+ return Error(Callee);
+
+ // Overloaded operator calls to member functions are represented as normal
+ // calls with '*this' as the first argument.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD && !MD->isStatic()) {
+ // FIXME: When selecting an implicit conversion for an overloaded
+ // operator delete, we sometimes try to evaluate calls to conversion
+ // operators without a 'this' parameter!
+ if (Args.empty())
+ return Error(E);
+
+ if (!EvaluateObjectArgument(Info, Args[0], ThisVal))
+ return false;
+ This = &ThisVal;
+ Args = Args.slice(1);
+ }
+
+ // Don't call function pointers which have been cast to some other type.
+ if (!Info.Ctx.hasSameType(CalleeType->getPointeeType(), FD->getType()))
+ return Error(E);
+ } else
+ return Error(E);
+
+ if (This && !This->checkSubobject(Info, E, CSK_This))
+ return false;
+
+ // DR1358 allows virtual constexpr functions in some cases. Don't allow
+ // calls to such functions in constant expressions.
+ if (This && !HasQualifier &&
+ isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isVirtual())
+ return Error(E, diag::note_constexpr_virtual_call);
+
+ const FunctionDecl *Definition = 0;
+ Stmt *Body = FD->getBody(Definition);
+ APValue Result;
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition) ||
+ !HandleFunctionCall(E->getExprLoc(), Definition, This, Args, Body,
+ Info, Result))
+ return false;
+
+ return DerivedSuccess(Result, E);
+ }
+
+ RetTy VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ return StmtVisitorTy::Visit(E->getInitializer());
+ }
+ RetTy VisitInitListExpr(const InitListExpr *E) {
+ if (E->getNumInits() == 0)
+ return DerivedZeroInitialization(E);
+ if (E->getNumInits() == 1)
+ return StmtVisitorTy::Visit(E->getInit(0));
+ return Error(E);
+ }
+ RetTy VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+ RetTy VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+ RetTy VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return DerivedZeroInitialization(E);
+ }
+
+ /// A member expression where the object is a prvalue is itself a prvalue.
+ RetTy VisitMemberExpr(const MemberExpr *E) {
+ assert(!E->isArrow() && "missing call to bound member function?");
+
+ APValue Val;
+ if (!Evaluate(Val, Info, E->getBase()))
+ return false;
+
+ QualType BaseTy = E->getBase()->getType();
+
+ const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
+ if (!FD) return Error(E);
+ assert(!FD->getType()->isReferenceType() && "prvalue reference?");
+ assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+
+ SubobjectDesignator Designator(BaseTy);
+ Designator.addDeclUnchecked(FD);
+
+ return ExtractSubobject(Info, E, Val, BaseTy, Designator, E->getType()) &&
+ DerivedSuccess(Val, E);
+ }
+
+ RetTy VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ break;
+
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ return StmtVisitorTy::Visit(E->getSubExpr());
+
+ case CK_LValueToRValue: {
+ LValue LVal;
+ if (!EvaluateLValue(E->getSubExpr(), LVal, Info))
+ return false;
+ APValue RVal;
+ // Note, we use the subexpression's type in order to retain cv-qualifiers.
+ if (!HandleLValueToRValueConversion(Info, E, E->getSubExpr()->getType(),
+ LVal, RVal))
+ return false;
+ return DerivedSuccess(RVal, E);
+ }
+ }
+
+ return Error(E);
+ }
+
+ /// Visit a value which is evaluated, but whose value is ignored.
+ void VisitIgnoredValue(const Expr *E) {
+ APValue Scratch;
+ if (!Evaluate(Scratch, Info, E))
+ Info.EvalStatus.HasSideEffects = true;
+ }
+};
+
+}
+
+//===----------------------------------------------------------------------===//
+// Common base class for lvalue and temporary evaluation.
+//===----------------------------------------------------------------------===//
+namespace {
+template<class Derived>
+class LValueExprEvaluatorBase
+ : public ExprEvaluatorBase<Derived, bool> {
+protected:
+ LValue &Result;
+ typedef LValueExprEvaluatorBase LValueExprEvaluatorBaseTy;
+ typedef ExprEvaluatorBase<Derived, bool> ExprEvaluatorBaseTy;
+
+ bool Success(APValue::LValueBase B) {
+ Result.set(B);
+ return true;
+ }
+
+public:
+ LValueExprEvaluatorBase(EvalInfo &Info, LValue &Result) :
+ ExprEvaluatorBaseTy(Info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(this->Info.Ctx, V);
+ return true;
+ }
+
+ bool VisitMemberExpr(const MemberExpr *E) {
+ // Handle non-static data members.
+ QualType BaseTy;
+ if (E->isArrow()) {
+ if (!EvaluatePointer(E->getBase(), Result, this->Info))
+ return false;
+ BaseTy = E->getBase()->getType()->getAs<PointerType>()->getPointeeType();
+ } else if (E->getBase()->isRValue()) {
+ assert(E->getBase()->getType()->isRecordType());
+ if (!EvaluateTemporary(E->getBase(), Result, this->Info))
+ return false;
+ BaseTy = E->getBase()->getType();
+ } else {
+ if (!this->Visit(E->getBase()))
+ return false;
+ BaseTy = E->getBase()->getType();
+ }
+
+ const ValueDecl *MD = E->getMemberDecl();
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl())) {
+ assert(BaseTy->getAs<RecordType>()->getDecl()->getCanonicalDecl() ==
+ FD->getParent()->getCanonicalDecl() && "record / field mismatch");
+ (void)BaseTy;
+ HandleLValueMember(this->Info, E, Result, FD);
+ } else if (const IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(MD)) {
+ HandleLValueIndirectMember(this->Info, E, Result, IFD);
+ } else
+ return this->Error(E);
+
+ if (MD->getType()->isReferenceType()) {
+ APValue RefValue;
+ if (!HandleLValueToRValueConversion(this->Info, E, MD->getType(), Result,
+ RefValue))
+ return false;
+ return Success(RefValue, E);
+ }
+ return true;
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ return HandleMemberPointerAccess(this->Info, E, Result);
+ }
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ if (!this->Visit(E->getSubExpr()))
+ return false;
+
+ // Now figure out the necessary offset to add to the base LV to get from
+ // the derived class to the base class.
+ QualType Type = E->getSubExpr()->getType();
+
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ if (!HandleLValueBase(this->Info, E, Result, Type->getAsCXXRecordDecl(),
+ *PathI))
+ return false;
+ Type = (*PathI)->getType();
+ }
+
+ return true;
+ }
+ }
+ }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// LValue Evaluation
+//
+// This is used for evaluating lvalues (in C and C++), xvalues (in C++11),
+// function designators (in C), decl references to void objects (in C), and
+// temporaries (if building with -Wno-address-of-temporary).
+//
+// LValue evaluation produces values comprising a base expression of one of the
+// following types:
+// - Declarations
+// * VarDecl
+// * FunctionDecl
+// - Literals
+// * CompoundLiteralExpr in C
+// * StringLiteral
+// * CXXTypeidExpr
+// * PredefinedExpr
+// * ObjCStringLiteralExpr
+// * ObjCEncodeExpr
+// * AddrLabelExpr
+// * BlockExpr
+// * CallExpr for a MakeStringConstant builtin
+// - Locals and temporaries
+// * Any Expr, with a CallIndex indicating the function in which the temporary
+// was evaluated.
+// plus an offset in bytes.
+//===----------------------------------------------------------------------===//
+namespace {
+class LValueExprEvaluator
+ : public LValueExprEvaluatorBase<LValueExprEvaluator> {
+public:
+ LValueExprEvaluator(EvalInfo &Info, LValue &Result) :
+ LValueExprEvaluatorBaseTy(Info, Result) {}
+
+ bool VisitVarDecl(const Expr *E, const VarDecl *VD);
+
+ bool VisitDeclRefExpr(const DeclRefExpr *E);
+ bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); }
+ bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
+ bool VisitCompoundLiteralExpr(const CompoundLiteralExpr *E);
+ bool VisitMemberExpr(const MemberExpr *E);
+ bool VisitStringLiteral(const StringLiteral *E) { return Success(E); }
+ bool VisitObjCEncodeExpr(const ObjCEncodeExpr *E) { return Success(E); }
+ bool VisitCXXTypeidExpr(const CXXTypeidExpr *E);
+ bool VisitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ bool VisitUnaryDeref(const UnaryOperator *E);
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_LValueBitCast:
+ this->CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ if (!Visit(E->getSubExpr()))
+ return false;
+ Result.Designator.setInvalid();
+ return true;
+
+ case CK_BaseToDerived:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ return HandleBaseToDerivedCast(Info, E, Result);
+ }
+ }
+};
+} // end anonymous namespace
+
+/// Evaluate an expression as an lvalue. This can be legitimately called on
+/// expressions which are not glvalues, in a few cases:
+/// * function designators in C,
+/// * "extern void" objects,
+/// * temporaries, if building with -Wno-address-of-temporary.
+static bool EvaluateLValue(const Expr* E, LValue& Result, EvalInfo &Info) {
+ assert((E->isGLValue() || E->getType()->isFunctionType() ||
+ E->getType()->isVoidType() || isa<CXXTemporaryObjectExpr>(E)) &&
+ "can't evaluate expression as an lvalue");
+ return LValueExprEvaluator(Info, Result).Visit(E);
+}
+
+bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(E->getDecl()))
+ return Success(FD);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ return VisitVarDecl(E, VD);
+ return Error(E);
+}
+
+bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
+ if (!VD->getType()->isReferenceType()) {
+ if (isa<ParmVarDecl>(VD)) {
+ Result.set(VD, Info.CurrentCall->Index);
+ return true;
+ }
+ return Success(VD);
+ }
+
+ APValue V;
+ if (!EvaluateVarDeclInit(Info, E, VD, Info.CurrentCall, V))
+ return false;
+ return Success(V, E);
+}
+
+bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ if (E->GetTemporaryExpr()->isRValue()) {
+ if (E->getType()->isRecordType())
+ return EvaluateTemporary(E->GetTemporaryExpr(), Result, Info);
+
+ Result.set(E, Info.CurrentCall->Index);
+ return EvaluateInPlace(Info.CurrentCall->Temporaries[E], Info,
+ Result, E->GetTemporaryExpr());
+ }
+
+ // Materialization of an lvalue temporary occurs when we need to force a copy
+ // (for instance, if it's a bitfield).
+ // FIXME: The AST should contain an lvalue-to-rvalue node for such cases.
+ if (!Visit(E->GetTemporaryExpr()))
+ return false;
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), Result,
+ Info.CurrentCall->Temporaries[E]))
+ return false;
+ Result.set(E, Info.CurrentCall->Index);
+ return true;
+}
+
+bool
+LValueExprEvaluator::VisitCompoundLiteralExpr(const CompoundLiteralExpr *E) {
+ assert(!Info.getLangOpts().CPlusPlus && "lvalue compound literal in c++?");
+ // Defer visiting the literal until the lvalue-to-rvalue conversion. We can
+ // only see this when folding in C, so there's no standard to follow here.
+ return Success(E);
+}
+
+bool LValueExprEvaluator::VisitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ if (E->isTypeOperand())
+ return Success(E);
+ CXXRecordDecl *RD = E->getExprOperand()->getType()->getAsCXXRecordDecl();
+ if (RD && RD->isPolymorphic()) {
+ Info.Diag(E, diag::note_constexpr_typeid_polymorphic)
+ << E->getExprOperand()->getType()
+ << E->getExprOperand()->getSourceRange();
+ return false;
+ }
+ return Success(E);
+}
+
+bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
+ // Handle static data members.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(E->getMemberDecl())) {
+ VisitIgnoredValue(E->getBase());
+ return VisitVarDecl(E, VD);
+ }
+
+ // Handle static member functions.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(E->getMemberDecl())) {
+ if (MD->isStatic()) {
+ VisitIgnoredValue(E->getBase());
+ return Success(MD);
+ }
+ }
+
+ // Handle non-static data members.
+ return LValueExprEvaluatorBaseTy::VisitMemberExpr(E);
+}
+
+bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // FIXME: Deal with vectors as array subscript bases.
+ if (E->getBase()->getType()->isVectorType())
+ return Error(E);
+
+ if (!EvaluatePointer(E->getBase(), Result, Info))
+ return false;
+
+ APSInt Index;
+ if (!EvaluateInteger(E->getIdx(), Index, Info))
+ return false;
+ int64_t IndexValue
+ = Index.isSigned() ? Index.getSExtValue()
+ : static_cast<int64_t>(Index.getZExtValue());
+
+ return HandleLValueArrayAdjustment(Info, E, Result, E->getType(), IndexValue);
+}
+
+bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
+ return EvaluatePointer(E->getSubExpr(), Result, Info);
+}
+
+bool LValueExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ // __real is a no-op on scalar lvalues.
+ if (E->getSubExpr()->getType()->isAnyComplexType())
+ HandleLValueComplexElement(Info, E, Result, E->getType(), false);
+ return true;
+}
+
+bool LValueExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ assert(E->getSubExpr()->getType()->isAnyComplexType() &&
+ "lvalue __imag__ on scalar?");
+ if (!Visit(E->getSubExpr()))
+ return false;
+ HandleLValueComplexElement(Info, E, Result, E->getType(), true);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Pointer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PointerExprEvaluator
+ : public ExprEvaluatorBase<PointerExprEvaluator, bool> {
+ LValue &Result;
+
+ bool Success(const Expr *E) {
+ Result.set(E);
+ return true;
+ }
+public:
+
+ PointerExprEvaluator(EvalInfo &info, LValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(Info.Ctx, V);
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E) {
+ return Success((Expr*)0);
+ }
+
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitUnaryAddrOf(const UnaryOperator *E);
+ bool VisitObjCStringLiteral(const ObjCStringLiteral *E)
+ { return Success(E); }
+ bool VisitObjCNumericLiteral(const ObjCNumericLiteral *E)
+ { return Success(E); }
+ bool VisitAddrLabelExpr(const AddrLabelExpr *E)
+ { return Success(E); }
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitBlockExpr(const BlockExpr *E) {
+ if (!E->getBlockDecl()->hasCaptures())
+ return Success(E);
+ return Error(E);
+ }
+ bool VisitCXXThisExpr(const CXXThisExpr *E) {
+ if (!Info.CurrentCall->This)
+ return Error(E);
+ Result = *Info.CurrentCall->This;
+ return true;
+ }
+
+ // FIXME: Missing: @protocol, @selector
+};
+} // end anonymous namespace
+
+static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->hasPointerRepresentation());
+ return PointerExprEvaluator(Info, Result).Visit(E);
+}
+
+bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() != BO_Add &&
+ E->getOpcode() != BO_Sub)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ const Expr *PExp = E->getLHS();
+ const Expr *IExp = E->getRHS();
+ if (IExp->getType()->isPointerType())
+ std::swap(PExp, IExp);
+
+ bool EvalPtrOK = EvaluatePointer(PExp, Result, Info);
+ if (!EvalPtrOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ llvm::APSInt Offset;
+ if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK)
+ return false;
+ int64_t AdditionalOffset
+ = Offset.isSigned() ? Offset.getSExtValue()
+ : static_cast<int64_t>(Offset.getZExtValue());
+ if (E->getOpcode() == BO_Sub)
+ AdditionalOffset = -AdditionalOffset;
+
+ QualType Pointee = PExp->getType()->getAs<PointerType>()->getPointeeType();
+ return HandleLValueArrayAdjustment(Info, E, Result, Pointee,
+ AdditionalOffset);
+}
+
+bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ return EvaluateLValue(E->getSubExpr(), Result, Info);
+}
+
+bool PointerExprEvaluator::VisitCastExpr(const CastExpr* E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ switch (E->getCastKind()) {
+ default:
+ break;
+
+ case CK_BitCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ if (!Visit(SubExpr))
+ return false;
+ // Bitcasts to cv void* are static_casts, not reinterpret_casts, so are
+ // permitted in constant expressions in C++11. Bitcasts from cv void* are
+ // also static_casts, but we disallow them as a resolution to DR1312.
+ if (!E->getType()->isVoidPointerType()) {
+ Result.Designator.setInvalid();
+ if (SubExpr->getType()->isVoidPointerType())
+ CCEDiag(E, diag::note_constexpr_invalid_cast)
+ << 3 << SubExpr->getType();
+ else
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+ }
+ return true;
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ if (!EvaluatePointer(E->getSubExpr(), Result, Info))
+ return false;
+ if (!Result.Base && Result.Offset.isZero())
+ return true;
+
+ // Now figure out the necessary offset to add to the base LV to get from
+ // the derived class to the base class.
+ QualType Type =
+ E->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ if (!HandleLValueBase(Info, E, Result, Type->getAsCXXRecordDecl(),
+ *PathI))
+ return false;
+ Type = (*PathI)->getType();
+ }
+
+ return true;
+ }
+
+ case CK_BaseToDerived:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.Base && Result.Offset.isZero())
+ return true;
+ return HandleBaseToDerivedCast(Info, E, Result);
+
+ case CK_NullToPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+
+ case CK_IntegralToPointer: {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+
+ APValue Value;
+ if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
+ break;
+
+ if (Value.isInt()) {
+ unsigned Size = Info.Ctx.getTypeSize(E->getType());
+ uint64_t N = Value.getInt().extOrTrunc(Size).getZExtValue();
+ Result.Base = (Expr*)0;
+ Result.Offset = CharUnits::fromQuantity(N);
+ Result.CallIndex = 0;
+ Result.Designator.setInvalid();
+ return true;
+ } else {
+ // Cast is of an lvalue, no need to change value.
+ Result.setFrom(Info.Ctx, Value);
+ return true;
+ }
+ }
+ case CK_ArrayToPointerDecay:
+ if (SubExpr->isGLValue()) {
+ if (!EvaluateLValue(SubExpr, Result, Info))
+ return false;
+ } else {
+ Result.set(SubExpr, Info.CurrentCall->Index);
+ if (!EvaluateInPlace(Info.CurrentCall->Temporaries[SubExpr],
+ Info, Result, SubExpr))
+ return false;
+ }
+ // The result is a pointer to the first element of the array.
+ if (const ConstantArrayType *CAT
+ = Info.Ctx.getAsConstantArrayType(SubExpr->getType()))
+ Result.addArray(Info, E, CAT);
+ else
+ Result.Designator.setInvalid();
+ return true;
+
+ case CK_FunctionToPointerDecay:
+ return EvaluateLValue(SubExpr, Result, Info);
+ }
+
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+}
+
+bool PointerExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ if (IsStringLiteralCall(E))
+ return Success(E);
+
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Member Pointer Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MemberPointerExprEvaluator
+ : public ExprEvaluatorBase<MemberPointerExprEvaluator, bool> {
+ MemberPtr &Result;
+
+ bool Success(const ValueDecl *D) {
+ Result = MemberPtr(D);
+ return true;
+ }
+public:
+
+ MemberPointerExprEvaluator(EvalInfo &Info, MemberPtr &Result)
+ : ExprEvaluatorBaseTy(Info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result.setFrom(V);
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E) {
+ return Success((const ValueDecl*)0);
+ }
+
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitUnaryAddrOf(const UnaryOperator *E);
+};
+} // end anonymous namespace
+
+static bool EvaluateMemberPointer(const Expr *E, MemberPtr &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isMemberPointerType());
+ return MemberPointerExprEvaluator(Info, Result).Visit(E);
+}
+
+bool MemberPointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_NullToMemberPointer:
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+
+ case CK_BaseToDerivedMemberPointer: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (E->path_empty())
+ return true;
+ // Base-to-derived member pointer casts store the path in derived-to-base
+ // order, so iterate backwards. The CXXBaseSpecifier also provides us with
+ // the wrong end of the derived->base arc, so stagger the path by one class.
+ typedef std::reverse_iterator<CastExpr::path_const_iterator> ReverseIter;
+ for (ReverseIter PathI(E->path_end() - 1), PathE(E->path_begin());
+ PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
+ const CXXRecordDecl *Derived = (*PathI)->getType()->getAsCXXRecordDecl();
+ if (!Result.castToDerived(Derived))
+ return Error(E);
+ }
+ const Type *FinalTy = E->getType()->castAs<MemberPointerType>()->getClass();
+ if (!Result.castToDerived(FinalTy->getAsCXXRecordDecl()))
+ return Error(E);
+ return true;
+ }
+
+ case CK_DerivedToBaseMemberPointer:
+ if (!Visit(E->getSubExpr()))
+ return false;
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "memptr cast through vbase");
+ const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
+ if (!Result.castToBase(Base))
+ return Error(E);
+ }
+ return true;
+ }
+}
+
+bool MemberPointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ // C++11 [expr.unary.op]p3 has very strict rules on how the address of a
+ // member can be formed.
+ return Success(cast<DeclRefExpr>(E->getSubExpr())->getDecl());
+}
+
+//===----------------------------------------------------------------------===//
+// Record Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class RecordExprEvaluator
+ : public ExprEvaluatorBase<RecordExprEvaluator, bool> {
+ const LValue &This;
+ APValue &Result;
+ public:
+
+ RecordExprEvaluator(EvalInfo &info, const LValue &This, APValue &Result)
+ : ExprEvaluatorBaseTy(info), This(This), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
+
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ };
+}
+
+/// Perform zero-initialization on an object of non-union class type.
+/// C++11 [dcl.init]p5:
+/// To zero-initialize an object or reference of type T means:
+/// [...]
+/// -- if T is a (possibly cv-qualified) non-union class type,
+/// each non-static data member and each base-class subobject is
+/// zero-initialized
+static bool HandleClassZeroInitialization(EvalInfo &Info, const Expr *E,
+ const RecordDecl *RD,
+ const LValue &This, APValue &Result) {
+ assert(!RD->isUnion() && "Expected non-union class type");
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ Result = APValue(APValue::UninitStruct(), CD ? CD->getNumBases() : 0,
+ std::distance(RD->field_begin(), RD->field_end()));
+
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ if (CD) {
+ unsigned Index = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = CD->bases_begin(),
+ End = CD->bases_end(); I != End; ++I, ++Index) {
+ const CXXRecordDecl *Base = I->getType()->getAsCXXRecordDecl();
+ LValue Subobject = This;
+ HandleLValueDirectBase(Info, E, Subobject, CD, Base, &Layout);
+ if (!HandleClassZeroInitialization(Info, E, Base, Subobject,
+ Result.getStructBase(Index)))
+ return false;
+ }
+ }
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), End = RD->field_end();
+ I != End; ++I) {
+ // -- if T is a reference type, no initialization is performed.
+ if ((*I)->getType()->isReferenceType())
+ continue;
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, E, Subobject, *I, &Layout);
+
+ ImplicitValueInitExpr VIE((*I)->getType());
+ if (!EvaluateInPlace(
+ Result.getStructField((*I)->getFieldIndex()), Info, Subobject, &VIE))
+ return false;
+ }
+
+ return true;
+}
+
+bool RecordExprEvaluator::ZeroInitialization(const Expr *E) {
+ const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ if (RD->isUnion()) {
+ // C++11 [dcl.init]p5: If T is a (possibly cv-qualified) union type, the
+ // object's first non-static named data member is zero-initialized
+ RecordDecl::field_iterator I = RD->field_begin();
+ if (I == RD->field_end()) {
+ Result = APValue((const FieldDecl*)0);
+ return true;
+ }
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, E, Subobject, *I);
+ Result = APValue(*I);
+ ImplicitValueInitExpr VIE((*I)->getType());
+ return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, &VIE);
+ }
+
+ if (isa<CXXRecordDecl>(RD) && cast<CXXRecordDecl>(RD)->getNumVBases()) {
+ Info.Diag(E, diag::note_constexpr_virtual_base) << RD;
+ return false;
+ }
+
+ return HandleClassZeroInitialization(Info, E, RD, This, Result);
+}
+
+bool RecordExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_ConstructorConversion:
+ return Visit(E->getSubExpr());
+
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ APValue DerivedObject;
+ if (!Evaluate(DerivedObject, Info, E->getSubExpr()))
+ return false;
+ if (!DerivedObject.isStruct())
+ return Error(E->getSubExpr());
+
+ // Derived-to-base rvalue conversion: just slice off the derived part.
+ APValue *Value = &DerivedObject;
+ const CXXRecordDecl *RD = E->getSubExpr()->getType()->getAsCXXRecordDecl();
+ for (CastExpr::path_const_iterator PathI = E->path_begin(),
+ PathE = E->path_end(); PathI != PathE; ++PathI) {
+ assert(!(*PathI)->isVirtual() && "record rvalue with virtual base");
+ const CXXRecordDecl *Base = (*PathI)->getType()->getAsCXXRecordDecl();
+ Value = &Value->getStructBase(getBaseIndex(RD, Base));
+ RD = Base;
+ }
+ Result = *Value;
+ return true;
+ }
+ }
+}
+
+bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ // Cannot constant-evaluate std::initializer_list inits.
+ if (E->initializesStdInitializerList())
+ return false;
+
+ const RecordDecl *RD = E->getType()->castAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(RD);
+
+ if (RD->isUnion()) {
+ const FieldDecl *Field = E->getInitializedFieldInUnion();
+ Result = APValue(Field);
+ if (!Field)
+ return true;
+
+ // If the initializer list for a union does not contain any elements, the
+ // first element of the union is value-initialized.
+ ImplicitValueInitExpr VIE(Field->getType());
+ const Expr *InitExpr = E->getNumInits() ? E->getInit(0) : &VIE;
+
+ LValue Subobject = This;
+ HandleLValueMember(Info, InitExpr, Subobject, Field, &Layout);
+ return EvaluateInPlace(Result.getUnionValue(), Info, Subobject, InitExpr);
+ }
+
+ assert((!isa<CXXRecordDecl>(RD) || !cast<CXXRecordDecl>(RD)->getNumBases()) &&
+ "initializer list for class with base classes");
+ Result = APValue(APValue::UninitStruct(), 0,
+ std::distance(RD->field_begin(), RD->field_end()));
+ unsigned ElementNo = 0;
+ bool Success = true;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field) {
+ // Anonymous bit-fields are not considered members of the class for
+ // purposes of aggregate initialization.
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ LValue Subobject = This;
+
+ bool HaveInit = ElementNo < E->getNumInits();
+
+ // FIXME: Diagnostics here should point to the end of the initializer
+ // list, not the start.
+ HandleLValueMember(Info, HaveInit ? E->getInit(ElementNo) : E, Subobject,
+ *Field, &Layout);
+
+ // Perform an implicit value-initialization for members beyond the end of
+ // the initializer list.
+ ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
+
+ if (!EvaluateInPlace(
+ Result.getStructField((*Field)->getFieldIndex()),
+ Info, Subobject, HaveInit ? E->getInit(ElementNo++) : &VIE)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ return Success;
+}
+
+bool RecordExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ const CXXConstructorDecl *FD = E->getConstructor();
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
+ // If we've already performed zero-initialization, we're already done.
+ if (!Result.isUninit())
+ return true;
+
+ if (ZeroInit)
+ return ZeroInitialization(E);
+
+ const CXXRecordDecl *RD = FD->getParent();
+ if (RD->isUnion())
+ Result = APValue((FieldDecl*)0);
+ else
+ Result = APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+ return true;
+ }
+
+ const FunctionDecl *Definition = 0;
+ FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
+ return false;
+
+ // Avoid materializing a temporary for an elidable copy/move constructor.
+ if (E->isElidable() && !ZeroInit)
+ if (const MaterializeTemporaryExpr *ME
+ = dyn_cast<MaterializeTemporaryExpr>(E->getArg(0)))
+ return Visit(ME->GetTemporaryExpr());
+
+ if (ZeroInit && !ZeroInitialization(E))
+ return false;
+
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ return HandleConstructorCall(E->getExprLoc(), This, Args,
+ cast<CXXConstructorDecl>(Definition), Info,
+ Result);
+}
+
+static bool EvaluateRecord(const Expr *E, const LValue &This,
+ APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRecordType() &&
+ "can't evaluate expression as a record rvalue");
+ return RecordExprEvaluator(Info, This, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Temporary Evaluation
+//
+// Temporaries are represented in the AST as rvalues, but generally behave like
+// lvalues. The full-object of which the temporary is a subobject is implicitly
+// materialized so that a reference can bind to it.
+//===----------------------------------------------------------------------===//
+namespace {
+class TemporaryExprEvaluator
+ : public LValueExprEvaluatorBase<TemporaryExprEvaluator> {
+public:
+ TemporaryExprEvaluator(EvalInfo &Info, LValue &Result) :
+ LValueExprEvaluatorBaseTy(Info, Result) {}
+
+ /// Visit an expression which constructs the value of this temporary.
+ bool VisitConstructExpr(const Expr *E) {
+ Result.set(E, Info.CurrentCall->Index);
+ return EvaluateInPlace(Info.CurrentCall->Temporaries[E], Info, Result, E);
+ }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return LValueExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_ConstructorConversion:
+ return VisitConstructExpr(E->getSubExpr());
+ }
+ }
+ bool VisitInitListExpr(const InitListExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ return VisitConstructExpr(E);
+ }
+ bool VisitCallExpr(const CallExpr *E) {
+ return VisitConstructExpr(E);
+ }
+};
+} // end anonymous namespace
+
+/// Evaluate an expression of record type as a temporary.
+static bool EvaluateTemporary(const Expr *E, LValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRecordType());
+ return TemporaryExprEvaluator(Info, Result).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Vector Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class VectorExprEvaluator
+ : public ExprEvaluatorBase<VectorExprEvaluator, bool> {
+ APValue &Result;
+ public:
+
+ VectorExprEvaluator(EvalInfo &info, APValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(const ArrayRef<APValue> &V, const Expr *E) {
+ assert(V.size() == E->getType()->castAs<VectorType>()->getNumElements());
+ // FIXME: remove this APValue copy.
+ Result = APValue(V.data(), V.size());
+ return true;
+ }
+ bool Success(const APValue &V, const Expr *E) {
+ assert(V.isVector());
+ Result = V;
+ return true;
+ }
+ bool ZeroInitialization(const Expr *E);
+
+ bool VisitUnaryReal(const UnaryOperator *E)
+ { return Visit(E->getSubExpr()); }
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+ // FIXME: Missing: unary -, unary ~, binary add/sub/mul/div,
+ // binary comparisons, binary and/or/xor,
+ // shufflevector, ExtVectorElementExpr
+ };
+} // end anonymous namespace
+
+static bool EvaluateVector(const Expr* E, APValue& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isVectorType() &&"not a vector rvalue");
+ return VectorExprEvaluator(Info, Result).Visit(E);
+}
+
+bool VectorExprEvaluator::VisitCastExpr(const CastExpr* E) {
+ const VectorType *VTy = E->getType()->castAs<VectorType>();
+ unsigned NElts = VTy->getNumElements();
+
+ const Expr *SE = E->getSubExpr();
+ QualType SETy = SE->getType();
+
+ switch (E->getCastKind()) {
+ case CK_VectorSplat: {
+ APValue Val = APValue();
+ if (SETy->isIntegerType()) {
+ APSInt IntResult;
+ if (!EvaluateInteger(SE, IntResult, Info))
+ return false;
+ Val = APValue(IntResult);
+ } else if (SETy->isRealFloatingType()) {
+ APFloat F(0.0);
+ if (!EvaluateFloat(SE, F, Info))
+ return false;
+ Val = APValue(F);
+ } else {
+ return Error(E);
+ }
+
+ // Splat and create vector APValue.
+ SmallVector<APValue, 4> Elts(NElts, Val);
+ return Success(Elts, E);
+ }
+ case CK_BitCast: {
+ // Evaluate the operand into an APInt we can extract from.
+ llvm::APInt SValInt;
+ if (!EvalAndBitcastToAPInt(Info, SE, SValInt))
+ return false;
+ // Extract the elements
+ QualType EltTy = VTy->getElementType();
+ unsigned EltSize = Info.Ctx.getTypeSize(EltTy);
+ bool BigEndian = Info.Ctx.getTargetInfo().isBigEndian();
+ SmallVector<APValue, 4> Elts;
+ if (EltTy->isRealFloatingType()) {
+ const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(EltTy);
+ bool isIEESem = &Sem != &APFloat::PPCDoubleDouble;
+ unsigned FloatEltSize = EltSize;
+ if (&Sem == &APFloat::x87DoubleExtended)
+ FloatEltSize = 80;
+ for (unsigned i = 0; i < NElts; i++) {
+ llvm::APInt Elt;
+ if (BigEndian)
+ Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize);
+ else
+ Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize);
+ Elts.push_back(APValue(APFloat(Elt, isIEESem)));
+ }
+ } else if (EltTy->isIntegerType()) {
+ for (unsigned i = 0; i < NElts; i++) {
+ llvm::APInt Elt;
+ if (BigEndian)
+ Elt = SValInt.rotl(i*EltSize+EltSize).zextOrTrunc(EltSize);
+ else
+ Elt = SValInt.rotr(i*EltSize).zextOrTrunc(EltSize);
+ Elts.push_back(APValue(APSInt(Elt, EltTy->isSignedIntegerType())));
+ }
+ } else {
+ return Error(E);
+ }
+ return Success(Elts, E);
+ }
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ }
+}
+
+bool
+VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const VectorType *VT = E->getType()->castAs<VectorType>();
+ unsigned NumInits = E->getNumInits();
+ unsigned NumElements = VT->getNumElements();
+
+ QualType EltTy = VT->getElementType();
+ SmallVector<APValue, 4> Elements;
+
+ // The number of initializers can be less than the number of
+ // vector elements. For OpenCL, this can be due to nested vector
+ // initialization. For GCC compatibility, missing trailing elements
+ // should be initialized with zeroes.
+ unsigned CountInits = 0, CountElts = 0;
+ while (CountElts < NumElements) {
+ // Handle nested vector initialization.
+ if (CountInits < NumInits
+ && E->getInit(CountInits)->getType()->isExtVectorType()) {
+ APValue v;
+ if (!EvaluateVector(E->getInit(CountInits), v, Info))
+ return Error(E);
+ unsigned vlen = v.getVectorLength();
+ for (unsigned j = 0; j < vlen; j++)
+ Elements.push_back(v.getVectorElt(j));
+ CountElts += vlen;
+ } else if (EltTy->isIntegerType()) {
+ llvm::APSInt sInt(32);
+ if (CountInits < NumInits) {
+ if (!EvaluateInteger(E->getInit(CountInits), sInt, Info))
+ return false;
+ } else // trailing integer zero.
+ sInt = Info.Ctx.MakeIntValue(0, EltTy);
+ Elements.push_back(APValue(sInt));
+ CountElts++;
+ } else {
+ llvm::APFloat f(0.0);
+ if (CountInits < NumInits) {
+ if (!EvaluateFloat(E->getInit(CountInits), f, Info))
+ return false;
+ } else // trailing float zero.
+ f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ Elements.push_back(APValue(f));
+ CountElts++;
+ }
+ CountInits++;
+ }
+ return Success(Elements, E);
+}
+
+bool
+VectorExprEvaluator::ZeroInitialization(const Expr *E) {
+ const VectorType *VT = E->getType()->getAs<VectorType>();
+ QualType EltTy = VT->getElementType();
+ APValue ZeroElement;
+ if (EltTy->isIntegerType())
+ ZeroElement = APValue(Info.Ctx.MakeIntValue(0, EltTy));
+ else
+ ZeroElement =
+ APValue(APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy)));
+
+ SmallVector<APValue, 4> Elements(VT->getNumElements(), ZeroElement);
+ return Success(Elements, E);
+}
+
+bool VectorExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ VisitIgnoredValue(E->getSubExpr());
+ return ZeroInitialization(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Array Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class ArrayExprEvaluator
+ : public ExprEvaluatorBase<ArrayExprEvaluator, bool> {
+ const LValue &This;
+ APValue &Result;
+ public:
+
+ ArrayExprEvaluator(EvalInfo &Info, const LValue &This, APValue &Result)
+ : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *E) {
+ assert((V.isArray() || V.isLValue()) &&
+ "expected array or string literal");
+ Result = V;
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E) {
+ const ConstantArrayType *CAT =
+ Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ Result = APValue(APValue::UninitArray(), 0,
+ CAT->getSize().getZExtValue());
+ if (!Result.hasArrayFiller()) return true;
+
+ // Zero-initialize all elements.
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ }
+
+ bool VisitInitListExpr(const InitListExpr *E);
+ bool VisitCXXConstructExpr(const CXXConstructExpr *E);
+ };
+} // end anonymous namespace
+
+static bool EvaluateArray(const Expr *E, const LValue &This,
+ APValue &Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isArrayType() && "not an array rvalue");
+ return ArrayExprEvaluator(Info, This, Result).Visit(E);
+}
+
+bool ArrayExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ // C++11 [dcl.init.string]p1: A char array [...] can be initialized by [...]
+ // an appropriately-typed string literal enclosed in braces.
+ if (E->getNumInits() == 1 && E->getInit(0)->isGLValue() &&
+ Info.Ctx.hasSameUnqualifiedType(E->getType(), E->getInit(0)->getType())) {
+ LValue LV;
+ if (!EvaluateLValue(E->getInit(0), LV, Info))
+ return false;
+ APValue Val;
+ LV.moveInto(Val);
+ return Success(Val, E);
+ }
+
+ bool Success = true;
+
+ Result = APValue(APValue::UninitArray(), E->getNumInits(),
+ CAT->getSize().getZExtValue());
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ unsigned Index = 0;
+ for (InitListExpr::const_iterator I = E->begin(), End = E->end();
+ I != End; ++I, ++Index) {
+ if (!EvaluateInPlace(Result.getArrayInitializedElt(Index),
+ Info, Subobject, cast<Expr>(*I)) ||
+ !HandleLValueArrayAdjustment(Info, cast<Expr>(*I), Subobject,
+ CAT->getElementType(), 1)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ }
+
+ if (!Result.hasArrayFiller()) return Success;
+ assert(E->hasArrayFiller() && "no array filler for incomplete init list");
+ // FIXME: The Subobject here isn't necessarily right. This rarely matters,
+ // but sometimes does:
+ // struct S { constexpr S() : p(&p) {} void *p; };
+ // S s[10] = {};
+ return EvaluateInPlace(Result.getArrayFiller(), Info,
+ Subobject, E->getArrayFiller()) && Success;
+}
+
+bool ArrayExprEvaluator::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ const ConstantArrayType *CAT = Info.Ctx.getAsConstantArrayType(E->getType());
+ if (!CAT)
+ return Error(E);
+
+ bool HadZeroInit = !Result.isUninit();
+ if (!HadZeroInit)
+ Result = APValue(APValue::UninitArray(), 0, CAT->getSize().getZExtValue());
+ if (!Result.hasArrayFiller())
+ return true;
+
+ const CXXConstructorDecl *FD = E->getConstructor();
+
+ bool ZeroInit = E->requiresZeroInitialization();
+ if (CheckTrivialDefaultConstructor(Info, E->getExprLoc(), FD, ZeroInit)) {
+ if (HadZeroInit)
+ return true;
+
+ if (ZeroInit) {
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ return EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE);
+ }
+
+ const CXXRecordDecl *RD = FD->getParent();
+ if (RD->isUnion())
+ Result.getArrayFiller() = APValue((FieldDecl*)0);
+ else
+ Result.getArrayFiller() =
+ APValue(APValue::UninitStruct(), RD->getNumBases(),
+ std::distance(RD->field_begin(), RD->field_end()));
+ return true;
+ }
+
+ const FunctionDecl *Definition = 0;
+ FD->getBody(Definition);
+
+ if (!CheckConstexprFunction(Info, E->getExprLoc(), FD, Definition))
+ return false;
+
+ // FIXME: The Subobject here isn't necessarily right. This rarely matters,
+ // but sometimes does:
+ // struct S { constexpr S() : p(&p) {} void *p; };
+ // S s[10];
+ LValue Subobject = This;
+ Subobject.addArray(Info, E, CAT);
+
+ if (ZeroInit && !HadZeroInit) {
+ ImplicitValueInitExpr VIE(CAT->getElementType());
+ if (!EvaluateInPlace(Result.getArrayFiller(), Info, Subobject, &VIE))
+ return false;
+ }
+
+ llvm::ArrayRef<const Expr*> Args(E->getArgs(), E->getNumArgs());
+ return HandleConstructorCall(E->getExprLoc(), Subobject, Args,
+ cast<CXXConstructorDecl>(Definition),
+ Info, Result.getArrayFiller());
+}
+
+//===----------------------------------------------------------------------===//
+// Integer Evaluation
+//
+// As a GNU extension, we support casting pointers to sufficiently-wide integer
+// types and back in constant folding. Integer values are thus represented
+// either as an integer-valued APValue, or as an lvalue-valued APValue.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class IntExprEvaluator
+ : public ExprEvaluatorBase<IntExprEvaluator, bool> {
+ APValue &Result;
+public:
+ IntExprEvaluator(EvalInfo &info, APValue &result)
+ : ExprEvaluatorBaseTy(info), Result(result) {}
+
+ bool Success(const llvm::APSInt &SI, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(SI.isSigned() == E->getType()->isSignedIntegerOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(SI);
+ return true;
+ }
+ bool Success(const llvm::APSInt &SI, const Expr *E) {
+ return Success(SI, E, Result);
+ }
+
+ bool Success(const llvm::APInt &I, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
+ "Invalid evaluation result.");
+ Result = APValue(APSInt(I));
+ Result.getInt().setIsUnsigned(
+ E->getType()->isUnsignedIntegerOrEnumerationType());
+ return true;
+ }
+ bool Success(const llvm::APInt &I, const Expr *E) {
+ return Success(I, E, Result);
+ }
+
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
+ Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
+ return true;
+ }
+ bool Success(uint64_t Value, const Expr *E) {
+ return Success(Value, E, Result);
+ }
+
+ bool Success(CharUnits Size, const Expr *E) {
+ return Success(Size.getQuantity(), E);
+ }
+
+ bool Success(const APValue &V, const Expr *E) {
+ if (V.isLValue() || V.isAddrLabelDiff()) {
+ Result = V;
+ return true;
+ }
+ return Success(V.getInt(), E);
+ }
+
+ bool ZeroInitialization(const Expr *E) { return Success(0, E); }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ bool VisitIntegerLiteral(const IntegerLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+ bool VisitCharacterLiteral(const CharacterLiteral *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool CheckReferencedDecl(const Expr *E, const Decl *D);
+ bool VisitDeclRefExpr(const DeclRefExpr *E) {
+ if (CheckReferencedDecl(E, E->getDecl()))
+ return true;
+
+ return ExprEvaluatorBaseTy::VisitDeclRefExpr(E);
+ }
+ bool VisitMemberExpr(const MemberExpr *E) {
+ if (CheckReferencedDecl(E, E->getMemberDecl())) {
+ VisitIgnoredValue(E->getBase());
+ return true;
+ }
+
+ return ExprEvaluatorBaseTy::VisitMemberExpr(E);
+ }
+
+ bool VisitCallExpr(const CallExpr *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitOffsetOfExpr(const OffsetOfExpr *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+
+ bool VisitCastExpr(const CastExpr* E);
+ bool VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
+
+ bool VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ // Note, GNU defines __null as an integer, not a pointer.
+ bool VisitGNUNullExpr(const GNUNullExpr *E) {
+ return ZeroInitialization(E);
+ }
+
+ bool VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitTypeTraitExpr(const TypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
+ return Success(E->getValue(), E);
+ }
+
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+ bool VisitCXXNoexceptExpr(const CXXNoexceptExpr *E);
+ bool VisitSizeOfPackExpr(const SizeOfPackExpr *E);
+
+private:
+ CharUnits GetAlignOfExpr(const Expr *E);
+ CharUnits GetAlignOfType(QualType T);
+ static QualType GetObjectType(APValue::LValueBase B);
+ bool TryEvaluateBuiltinObjectSize(const CallExpr *E);
+ // FIXME: Missing: array subscript of vector, member of vector
+};
+} // end anonymous namespace
+
+/// EvaluateIntegerOrLValue - Evaluate an rvalue integral-typed expression, and
+/// produce either the integer value or a pointer.
+///
+/// GCC has a heinous extension which folds casts between pointer types and
+/// pointer-sized integral types. We support this by allowing the evaluation of
+/// an integer rvalue to produce a pointer (represented as an lvalue) instead.
+/// Some simple arithmetic on such values is supported (they are treated much
+/// like char*).
+static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isIntegralOrEnumerationType());
+ return IntExprEvaluator(Info, Result).Visit(E);
+}
+
+static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info) {
+ APValue Val;
+ if (!EvaluateIntegerOrLValue(E, Val, Info))
+ return false;
+ if (!Val.isInt()) {
+ // FIXME: It would be better to produce the diagnostic for casting
+ // a pointer to an integer.
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+ Result = Val.getInt();
+ return true;
+}
+
+/// Check whether the given declaration can be directly converted to an integral
+/// rvalue. If not, no diagnostic is produced; there are other things we can
+/// try.
+bool IntExprEvaluator::CheckReferencedDecl(const Expr* E, const Decl* D) {
+ // Enums are integer constant exprs.
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D)) {
+ // Check for signedness/width mismatches between E type and ECD value.
+ bool SameSign = (ECD->getInitVal().isSigned()
+ == E->getType()->isSignedIntegerOrEnumerationType());
+ bool SameWidth = (ECD->getInitVal().getBitWidth()
+ == Info.Ctx.getIntWidth(E->getType()));
+ if (SameSign && SameWidth)
+ return Success(ECD->getInitVal(), E);
+ else {
+ // Get rid of mismatch (otherwise Success assertions will fail)
+ // by computing a new value matching the type of E.
+ llvm::APSInt Val = ECD->getInitVal();
+ if (!SameSign)
+ Val.setIsSigned(!ECD->getInitVal().isSigned());
+ if (!SameWidth)
+ Val = Val.extOrTrunc(Info.Ctx.getIntWidth(E->getType()));
+ return Success(Val, E);
+ }
+ }
+ return false;
+}
+
+/// EvaluateBuiltinClassifyType - Evaluate __builtin_classify_type the same way
+/// as GCC.
+static int EvaluateBuiltinClassifyType(const CallExpr *E) {
+ // The following enum mimics the values returned by GCC.
+ // FIXME: Does GCC differ between lvalue and rvalue references here?
+ enum gcc_type_class {
+ no_type_class = -1,
+ void_type_class, integer_type_class, char_type_class,
+ enumeral_type_class, boolean_type_class,
+ pointer_type_class, reference_type_class, offset_type_class,
+ real_type_class, complex_type_class,
+ function_type_class, method_type_class,
+ record_type_class, union_type_class,
+ array_type_class, string_type_class,
+ lang_type_class
+ };
+
+ // If no argument was supplied, default to "no_type_class". This isn't
+ // ideal, however it is what gcc does.
+ if (E->getNumArgs() == 0)
+ return no_type_class;
+
+ QualType ArgTy = E->getArg(0)->getType();
+ if (ArgTy->isVoidType())
+ return void_type_class;
+ else if (ArgTy->isEnumeralType())
+ return enumeral_type_class;
+ else if (ArgTy->isBooleanType())
+ return boolean_type_class;
+ else if (ArgTy->isCharType())
+ return string_type_class; // gcc doesn't appear to use char_type_class
+ else if (ArgTy->isIntegerType())
+ return integer_type_class;
+ else if (ArgTy->isPointerType())
+ return pointer_type_class;
+ else if (ArgTy->isReferenceType())
+ return reference_type_class;
+ else if (ArgTy->isRealType())
+ return real_type_class;
+ else if (ArgTy->isComplexType())
+ return complex_type_class;
+ else if (ArgTy->isFunctionType())
+ return function_type_class;
+ else if (ArgTy->isStructureOrClassType())
+ return record_type_class;
+ else if (ArgTy->isUnionType())
+ return union_type_class;
+ else if (ArgTy->isArrayType())
+ return array_type_class;
+ else if (ArgTy->isUnionType())
+ return union_type_class;
+ else // FIXME: offset_type_class, method_type_class, & lang_type_class?
+ llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
+}
+
+/// EvaluateBuiltinConstantPForLValue - Determine the result of
+/// __builtin_constant_p when applied to the given lvalue.
+///
+/// An lvalue is only "constant" if it is a pointer or reference to the first
+/// character of a string literal.
+template<typename LValue>
+static bool EvaluateBuiltinConstantPForLValue(const LValue &LV) {
+ const Expr *E = LV.getLValueBase().template dyn_cast<const Expr*>();
+ return E && isa<StringLiteral>(E) && LV.getLValueOffset().isZero();
+}
+
+/// EvaluateBuiltinConstantP - Evaluate __builtin_constant_p as similarly to
+/// GCC as we can manage.
+static bool EvaluateBuiltinConstantP(ASTContext &Ctx, const Expr *Arg) {
+ QualType ArgType = Arg->getType();
+
+ // __builtin_constant_p always has one operand. The rules which gcc follows
+ // are not precisely documented, but are as follows:
+ //
+ // - If the operand is of integral, floating, complex or enumeration type,
+ // and can be folded to a known value of that type, it returns 1.
+ // - If the operand and can be folded to a pointer to the first character
+ // of a string literal (or such a pointer cast to an integral type), it
+ // returns 1.
+ //
+ // Otherwise, it returns 0.
+ //
+ // FIXME: GCC also intends to return 1 for literals of aggregate types, but
+ // its support for this does not currently work.
+ if (ArgType->isIntegralOrEnumerationType()) {
+ Expr::EvalResult Result;
+ if (!Arg->EvaluateAsRValue(Result, Ctx) || Result.HasSideEffects)
+ return false;
+
+ APValue &V = Result.Val;
+ if (V.getKind() == APValue::Int)
+ return true;
+
+ return EvaluateBuiltinConstantPForLValue(V);
+ } else if (ArgType->isFloatingType() || ArgType->isAnyComplexType()) {
+ return Arg->isEvaluatable(Ctx);
+ } else if (ArgType->isPointerType() || Arg->isGLValue()) {
+ LValue LV;
+ Expr::EvalStatus Status;
+ EvalInfo Info(Ctx, Status);
+ if ((Arg->isGLValue() ? EvaluateLValue(Arg, LV, Info)
+ : EvaluatePointer(Arg, LV, Info)) &&
+ !Status.HasSideEffects)
+ return EvaluateBuiltinConstantPForLValue(LV);
+ }
+
+ // Anything else isn't considered to be sufficiently constant.
+ return false;
+}
+
+/// Retrieves the "underlying object type" of the given expression,
+/// as used by __builtin_object_size.
+QualType IntExprEvaluator::GetObjectType(APValue::LValueBase B) {
+ if (const ValueDecl *D = B.dyn_cast<const ValueDecl*>()) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->getType();
+ } else if (const Expr *E = B.get<const Expr*>()) {
+ if (isa<CompoundLiteralExpr>(E))
+ return E->getType();
+ }
+
+ return QualType();
+}
+
+bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(const CallExpr *E) {
+ // TODO: Perhaps we should let LLVM lower this?
+ LValue Base;
+ if (!EvaluatePointer(E->getArg(0), Base, Info))
+ return false;
+
+ // If we can prove the base is null, lower to zero now.
+ if (!Base.getLValueBase()) return Success(0, E);
+
+ QualType T = GetObjectType(Base.getLValueBase());
+ if (T.isNull() ||
+ T->isIncompleteType() ||
+ T->isFunctionType() ||
+ T->isVariablyModifiedType() ||
+ T->isDependentType())
+ return Error(E);
+
+ CharUnits Size = Info.Ctx.getTypeSizeInChars(T);
+ CharUnits Offset = Base.getLValueOffset();
+
+ if (!Offset.isNegative() && Offset <= Size)
+ Size -= Offset;
+ else
+ Size = CharUnits::Zero();
+ return Success(Size, E);
+}
+
+bool IntExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (unsigned BuiltinOp = E->isBuiltinCall()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
+ case Builtin::BI__builtin_object_size: {
+ if (TryEvaluateBuiltinObjectSize(E))
+ return true;
+
+ // If evaluating the argument has side-effects we can't determine
+ // the size of the object and lower it to unknown now.
+ if (E->getArg(0)->HasSideEffects(Info.Ctx)) {
+ if (E->getArg(1)->EvaluateKnownConstInt(Info.Ctx).getZExtValue() <= 1)
+ return Success(-1ULL, E);
+ return Success(0, E);
+ }
+
+ return Error(E);
+ }
+
+ case Builtin::BI__builtin_classify_type:
+ return Success(EvaluateBuiltinClassifyType(E), E);
+
+ case Builtin::BI__builtin_constant_p:
+ return Success(EvaluateBuiltinConstantP(Info.Ctx, E->getArg(0)), E);
+
+ case Builtin::BI__builtin_eh_return_data_regno: {
+ int Operand = E->getArg(0)->EvaluateKnownConstInt(Info.Ctx).getZExtValue();
+ Operand = Info.Ctx.getTargetInfo().getEHDataRegisterNumber(Operand);
+ return Success(Operand, E);
+ }
+
+ case Builtin::BI__builtin_expect:
+ return Visit(E->getArg(0));
+
+ case Builtin::BIstrlen:
+ // A call to strlen is not a constant expression.
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.CCEDiag(E, diag::note_constexpr_invalid_function)
+ << /*isConstexpr*/0 << /*isConstructor*/0 << "'strlen'";
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ // Fall through.
+ case Builtin::BI__builtin_strlen:
+ // As an extension, we support strlen() and __builtin_strlen() as constant
+ // expressions when the argument is a string literal.
+ if (const StringLiteral *S
+ = dyn_cast<StringLiteral>(E->getArg(0)->IgnoreParenImpCasts())) {
+ // The string literal may have embedded null characters. Find the first
+ // one and truncate there.
+ StringRef Str = S->getString();
+ StringRef::size_type Pos = Str.find(0);
+ if (Pos != StringRef::npos)
+ Str = Str.substr(0, Pos);
+
+ return Success(Str.size(), E);
+ }
+
+ return Error(E);
+
+ case Builtin::BI__atomic_always_lock_free:
+ case Builtin::BI__atomic_is_lock_free:
+ case Builtin::BI__c11_atomic_is_lock_free: {
+ APSInt SizeVal;
+ if (!EvaluateInteger(E->getArg(0), SizeVal, Info))
+ return false;
+
+ // For __atomic_is_lock_free(sizeof(_Atomic(T))), if the size is a power
+ // of two less than the maximum inline atomic width, we know it is
+ // lock-free. If the size isn't a power of two, or greater than the
+ // maximum alignment where we promote atomics, we know it is not lock-free
+ // (at least not in the sense of atomic_is_lock_free). Otherwise,
+ // the answer can only be determined at runtime; for example, 16-byte
+ // atomics have lock-free implementations on some, but not all,
+ // x86-64 processors.
+
+ // Check power-of-two.
+ CharUnits Size = CharUnits::fromQuantity(SizeVal.getZExtValue());
+ if (Size.isPowerOfTwo()) {
+ // Check against inlining width.
+ unsigned InlineWidthBits =
+ Info.Ctx.getTargetInfo().getMaxAtomicInlineWidth();
+ if (Size <= Info.Ctx.toCharUnitsFromBits(InlineWidthBits)) {
+ if (BuiltinOp == Builtin::BI__c11_atomic_is_lock_free ||
+ Size == CharUnits::One() ||
+ E->getArg(1)->isNullPointerConstant(Info.Ctx,
+ Expr::NPC_NeverValueDependent))
+ // OK, we will inline appropriately-aligned operations of this size,
+ // and _Atomic(T) is appropriately-aligned.
+ return Success(1, E);
+
+ QualType PointeeType = E->getArg(1)->IgnoreImpCasts()->getType()->
+ castAs<PointerType>()->getPointeeType();
+ if (!PointeeType->isIncompleteType() &&
+ Info.Ctx.getTypeAlignInChars(PointeeType) >= Size) {
+ // OK, we will inline operations on this object.
+ return Success(1, E);
+ }
+ }
+ }
+
+ return BuiltinOp == Builtin::BI__atomic_always_lock_free ?
+ Success(0, E) : Error(E);
+ }
+ }
+}
+
+static bool HasSameBase(const LValue &A, const LValue &B) {
+ if (!A.getLValueBase())
+ return !B.getLValueBase();
+ if (!B.getLValueBase())
+ return false;
+
+ if (A.getLValueBase().getOpaqueValue() !=
+ B.getLValueBase().getOpaqueValue()) {
+ const Decl *ADecl = GetLValueBaseDecl(A);
+ if (!ADecl)
+ return false;
+ const Decl *BDecl = GetLValueBaseDecl(B);
+ if (!BDecl || ADecl->getCanonicalDecl() != BDecl->getCanonicalDecl())
+ return false;
+ }
+
+ return IsGlobalLValue(A.getLValueBase()) ||
+ A.getLValueCallIndex() == B.getLValueCallIndex();
+}
+
+/// Perform the given integer operation, which is known to need at most BitWidth
+/// bits, and check for overflow in the original type (if that type was not an
+/// unsigned type).
+template<typename Operation>
+static APSInt CheckedIntArithmetic(EvalInfo &Info, const Expr *E,
+ const APSInt &LHS, const APSInt &RHS,
+ unsigned BitWidth, Operation Op) {
+ if (LHS.isUnsigned())
+ return Op(LHS, RHS);
+
+ APSInt Value(Op(LHS.extend(BitWidth), RHS.extend(BitWidth)), false);
+ APSInt Result = Value.trunc(LHS.getBitWidth());
+ if (Result.extend(BitWidth) != Value)
+ HandleOverflow(Info, E, Value, E->getType());
+ return Result;
+}
+
+namespace {
+
+/// \brief Data recursive integer evaluator of certain binary operators.
+///
+/// We use a data recursive algorithm for binary operators so that we are able
+/// to handle extreme cases of chained binary operators without causing stack
+/// overflow.
+class DataRecursiveIntBinOpEvaluator {
+ struct EvalResult {
+ APValue Val;
+ bool Failed;
+
+ EvalResult() : Failed(false) { }
+
+ void swap(EvalResult &RHS) {
+ Val.swap(RHS.Val);
+ Failed = RHS.Failed;
+ RHS.Failed = false;
+ }
+ };
+
+ struct Job {
+ const Expr *E;
+ EvalResult LHSResult; // meaningful only for binary operator expression.
+ enum { AnyExprKind, BinOpKind, BinOpVisitedLHSKind } Kind;
+
+ Job() : StoredInfo(0) { }
+ void startSpeculativeEval(EvalInfo &Info) {
+ OldEvalStatus = Info.EvalStatus;
+ Info.EvalStatus.Diag = 0;
+ StoredInfo = &Info;
+ }
+ ~Job() {
+ if (StoredInfo) {
+ StoredInfo->EvalStatus = OldEvalStatus;
+ }
+ }
+ private:
+ EvalInfo *StoredInfo; // non-null if status changed.
+ Expr::EvalStatus OldEvalStatus;
+ };
+
+ SmallVector<Job, 16> Queue;
+
+ IntExprEvaluator &IntEval;
+ EvalInfo &Info;
+ APValue &FinalResult;
+
+public:
+ DataRecursiveIntBinOpEvaluator(IntExprEvaluator &IntEval, APValue &Result)
+ : IntEval(IntEval), Info(IntEval.getEvalInfo()), FinalResult(Result) { }
+
+ /// \brief True if \param E is a binary operator that we are going to handle
+ /// data recursively.
+ /// We handle binary operators that are comma, logical, or that have operands
+ /// with integral or enumeration type.
+ static bool shouldEnqueue(const BinaryOperator *E) {
+ return E->getOpcode() == BO_Comma ||
+ E->isLogicalOp() ||
+ (E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+ }
+
+ bool Traverse(const BinaryOperator *E) {
+ enqueue(E);
+ EvalResult PrevResult;
+ while (!Queue.empty())
+ process(PrevResult);
+
+ if (PrevResult.Failed) return false;
+
+ FinalResult.swap(PrevResult.Val);
+ return true;
+ }
+
+private:
+ bool Success(uint64_t Value, const Expr *E, APValue &Result) {
+ return IntEval.Success(Value, E, Result);
+ }
+ bool Success(const APSInt &Value, const Expr *E, APValue &Result) {
+ return IntEval.Success(Value, E, Result);
+ }
+ bool Error(const Expr *E) {
+ return IntEval.Error(E);
+ }
+ bool Error(const Expr *E, diag::kind D) {
+ return IntEval.Error(E, D);
+ }
+
+ OptionalDiagnostic CCEDiag(const Expr *E, diag::kind D) {
+ return Info.CCEDiag(E, D);
+ }
+
+ // \brief Returns true if visiting the RHS is necessary, false otherwise.
+ bool VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
+ bool &SuppressRHSDiags);
+
+ bool VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
+ const BinaryOperator *E, APValue &Result);
+
+ void EvaluateExpr(const Expr *E, EvalResult &Result) {
+ Result.Failed = !Evaluate(Result.Val, Info, E);
+ if (Result.Failed)
+ Result.Val = APValue();
+ }
+
+ void process(EvalResult &Result);
+
+ void enqueue(const Expr *E) {
+ E = E->IgnoreParens();
+ Queue.resize(Queue.size()+1);
+ Queue.back().E = E;
+ Queue.back().Kind = Job::AnyExprKind;
+ }
+};
+
+}
+
+bool DataRecursiveIntBinOpEvaluator::
+ VisitBinOpLHSOnly(EvalResult &LHSResult, const BinaryOperator *E,
+ bool &SuppressRHSDiags) {
+ if (E->getOpcode() == BO_Comma) {
+ // Ignore LHS but note if we could not evaluate it.
+ if (LHSResult.Failed)
+ Info.EvalStatus.HasSideEffects = true;
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ bool lhsResult;
+ if (HandleConversionToBool(LHSResult.Val, lhsResult)) {
+ // We were able to evaluate the LHS, see if we can get away with not
+ // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
+ if (lhsResult == (E->getOpcode() == BO_LOr)) {
+ Success(lhsResult, E, LHSResult.Val);
+ return false; // Ignore RHS
+ }
+ } else {
+ // Since we weren't able to evaluate the left hand side, it
+ // must have had side effects.
+ Info.EvalStatus.HasSideEffects = true;
+
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ // Don't ignore RHS and suppress diagnostics from this arm.
+ SuppressRHSDiags = true;
+ }
+
+ return true;
+ }
+
+ assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+
+ if (LHSResult.Failed && !Info.keepEvaluatingAfterFailure())
+ return false; // Ignore RHS;
+
+ return true;
+}
+
+bool DataRecursiveIntBinOpEvaluator::
+ VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
+ const BinaryOperator *E, APValue &Result) {
+ if (E->getOpcode() == BO_Comma) {
+ if (RHSResult.Failed)
+ return false;
+ Result = RHSResult.Val;
+ return true;
+ }
+
+ if (E->isLogicalOp()) {
+ bool lhsResult, rhsResult;
+ bool LHSIsOK = HandleConversionToBool(LHSResult.Val, lhsResult);
+ bool RHSIsOK = HandleConversionToBool(RHSResult.Val, rhsResult);
+
+ if (LHSIsOK) {
+ if (RHSIsOK) {
+ if (E->getOpcode() == BO_LOr)
+ return Success(lhsResult || rhsResult, E, Result);
+ else
+ return Success(lhsResult && rhsResult, E, Result);
+ }
+ } else {
+ if (RHSIsOK) {
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ if (rhsResult == (E->getOpcode() == BO_LOr))
+ return Success(rhsResult, E, Result);
+ }
+ }
+
+ return false;
+ }
+
+ assert(E->getLHS()->getType()->isIntegralOrEnumerationType() &&
+ E->getRHS()->getType()->isIntegralOrEnumerationType());
+
+ if (LHSResult.Failed || RHSResult.Failed)
+ return false;
+
+ const APValue &LHSVal = LHSResult.Val;
+ const APValue &RHSVal = RHSResult.Val;
+
+ // Handle cases like (unsigned long)&a + 4.
+ if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
+ Result = LHSVal;
+ CharUnits AdditionalOffset = CharUnits::fromQuantity(
+ RHSVal.getInt().getZExtValue());
+ if (E->getOpcode() == BO_Add)
+ Result.getLValueOffset() += AdditionalOffset;
+ else
+ Result.getLValueOffset() -= AdditionalOffset;
+ return true;
+ }
+
+ // Handle cases like 4 + (unsigned long)&a
+ if (E->getOpcode() == BO_Add &&
+ RHSVal.isLValue() && LHSVal.isInt()) {
+ Result = RHSVal;
+ Result.getLValueOffset() += CharUnits::fromQuantity(
+ LHSVal.getInt().getZExtValue());
+ return true;
+ }
+
+ if (E->getOpcode() == BO_Sub && LHSVal.isLValue() && RHSVal.isLValue()) {
+ // Handle (intptr_t)&&A - (intptr_t)&&B.
+ if (!LHSVal.getLValueOffset().isZero() ||
+ !RHSVal.getLValueOffset().isZero())
+ return false;
+ const Expr *LHSExpr = LHSVal.getLValueBase().dyn_cast<const Expr*>();
+ const Expr *RHSExpr = RHSVal.getLValueBase().dyn_cast<const Expr*>();
+ if (!LHSExpr || !RHSExpr)
+ return false;
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return false;
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return false;
+ Result = APValue(LHSAddrExpr, RHSAddrExpr);
+ return true;
+ }
+
+ // All the following cases expect both operands to be an integer
+ if (!LHSVal.isInt() || !RHSVal.isInt())
+ return Error(E);
+
+ const APSInt &LHS = LHSVal.getInt();
+ APSInt RHS = RHSVal.getInt();
+
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+ case BO_Mul:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() * 2,
+ std::multiplies<APSInt>()), E,
+ Result);
+ case BO_Add:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() + 1,
+ std::plus<APSInt>()), E, Result);
+ case BO_Sub:
+ return Success(CheckedIntArithmetic(Info, E, LHS, RHS,
+ LHS.getBitWidth() + 1,
+ std::minus<APSInt>()), E, Result);
+ case BO_And: return Success(LHS & RHS, E, Result);
+ case BO_Xor: return Success(LHS ^ RHS, E, Result);
+ case BO_Or: return Success(LHS | RHS, E, Result);
+ case BO_Div:
+ case BO_Rem:
+ if (RHS == 0)
+ return Error(E, diag::note_expr_divide_by_zero);
+ // Check for overflow case: INT_MIN / -1 or INT_MIN % -1. The latter is
+ // not actually undefined behavior in C++11 due to a language defect.
+ if (RHS.isNegative() && RHS.isAllOnesValue() &&
+ LHS.isSigned() && LHS.isMinSignedValue())
+ HandleOverflow(Info, E, -LHS.extend(LHS.getBitWidth() + 1), E->getType());
+ return Success(E->getOpcode() == BO_Rem ? LHS % RHS : LHS / RHS, E,
+ Result);
+ case BO_Shl: {
+ // During constant-folding, a negative shift is an opposite shift. Such
+ // a shift is not a constant expression.
+ if (RHS.isSigned() && RHS.isNegative()) {
+ CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ RHS = -RHS;
+ goto shift_right;
+ }
+
+ shift_left:
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of
+ // the shifted type.
+ unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ if (SA != RHS) {
+ CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHS << E->getType() << LHS.getBitWidth();
+ } else if (LHS.isSigned()) {
+ // C++11 [expr.shift]p2: A signed left shift must have a non-negative
+ // operand, and must not overflow the corresponding unsigned type.
+ if (LHS.isNegative())
+ CCEDiag(E, diag::note_constexpr_lshift_of_negative) << LHS;
+ else if (LHS.countLeadingZeros() < SA)
+ CCEDiag(E, diag::note_constexpr_lshift_discards);
+ }
+
+ return Success(LHS << SA, E, Result);
+ }
+ case BO_Shr: {
+ // During constant-folding, a negative shift is an opposite shift. Such a
+ // shift is not a constant expression.
+ if (RHS.isSigned() && RHS.isNegative()) {
+ CCEDiag(E, diag::note_constexpr_negative_shift) << RHS;
+ RHS = -RHS;
+ goto shift_left;
+ }
+
+ shift_right:
+ // C++11 [expr.shift]p1: Shift width must be less than the bit width of the
+ // shifted type.
+ unsigned SA = (unsigned) RHS.getLimitedValue(LHS.getBitWidth()-1);
+ if (SA != RHS)
+ CCEDiag(E, diag::note_constexpr_large_shift)
+ << RHS << E->getType() << LHS.getBitWidth();
+
+ return Success(LHS >> SA, E, Result);
+ }
+
+ case BO_LT: return Success(LHS < RHS, E, Result);
+ case BO_GT: return Success(LHS > RHS, E, Result);
+ case BO_LE: return Success(LHS <= RHS, E, Result);
+ case BO_GE: return Success(LHS >= RHS, E, Result);
+ case BO_EQ: return Success(LHS == RHS, E, Result);
+ case BO_NE: return Success(LHS != RHS, E, Result);
+ }
+}
+
+void DataRecursiveIntBinOpEvaluator::process(EvalResult &Result) {
+ Job &job = Queue.back();
+
+ switch (job.Kind) {
+ case Job::AnyExprKind: {
+ if (const BinaryOperator *Bop = dyn_cast<BinaryOperator>(job.E)) {
+ if (shouldEnqueue(Bop)) {
+ job.Kind = Job::BinOpKind;
+ enqueue(Bop->getLHS());
+ return;
+ }
+ }
+
+ EvaluateExpr(job.E, Result);
+ Queue.pop_back();
+ return;
+ }
+
+ case Job::BinOpKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ bool SuppressRHSDiags = false;
+ if (!VisitBinOpLHSOnly(Result, Bop, SuppressRHSDiags)) {
+ Queue.pop_back();
+ return;
+ }
+ if (SuppressRHSDiags)
+ job.startSpeculativeEval(Info);
+ job.LHSResult.swap(Result);
+ job.Kind = Job::BinOpVisitedLHSKind;
+ enqueue(Bop->getRHS());
+ return;
+ }
+
+ case Job::BinOpVisitedLHSKind: {
+ const BinaryOperator *Bop = cast<BinaryOperator>(job.E);
+ EvalResult RHS;
+ RHS.swap(Result);
+ Result.Failed = !VisitBinOp(job.LHSResult, RHS, Bop, Result.Val);
+ Queue.pop_back();
+ return;
+ }
+ }
+
+ llvm_unreachable("Invalid Job::Kind!");
+}
+
+bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isAssignmentOp())
+ return Error(E);
+
+ if (DataRecursiveIntBinOpEvaluator::shouldEnqueue(E))
+ return DataRecursiveIntBinOpEvaluator(*this, Result).Traverse(E);
+
+ QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
+
+ if (LHSTy->isAnyComplexType()) {
+ assert(RHSTy->isAnyComplexType() && "Invalid comparison");
+ ComplexValue LHS, RHS;
+
+ bool LHSOK = EvaluateComplex(E->getLHS(), LHS, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+
+ if (LHS.isComplexFloat()) {
+ APFloat::cmpResult CR_r =
+ LHS.getComplexFloatReal().compare(RHS.getComplexFloatReal());
+ APFloat::cmpResult CR_i =
+ LHS.getComplexFloatImag().compare(RHS.getComplexFloatImag());
+
+ if (E->getOpcode() == BO_EQ)
+ return Success((CR_r == APFloat::cmpEqual &&
+ CR_i == APFloat::cmpEqual), E);
+ else {
+ assert(E->getOpcode() == BO_NE &&
+ "Invalid complex comparison.");
+ return Success(((CR_r == APFloat::cmpGreaterThan ||
+ CR_r == APFloat::cmpLessThan ||
+ CR_r == APFloat::cmpUnordered) ||
+ (CR_i == APFloat::cmpGreaterThan ||
+ CR_i == APFloat::cmpLessThan ||
+ CR_i == APFloat::cmpUnordered)), E);
+ }
+ } else {
+ if (E->getOpcode() == BO_EQ)
+ return Success((LHS.getComplexIntReal() == RHS.getComplexIntReal() &&
+ LHS.getComplexIntImag() == RHS.getComplexIntImag()), E);
+ else {
+ assert(E->getOpcode() == BO_NE &&
+ "Invalid compex comparison.");
+ return Success((LHS.getComplexIntReal() != RHS.getComplexIntReal() ||
+ LHS.getComplexIntImag() != RHS.getComplexIntImag()), E);
+ }
+ }
+ }
+
+ if (LHSTy->isRealFloatingType() &&
+ RHSTy->isRealFloatingType()) {
+ APFloat RHS(0.0), LHS(0.0);
+
+ bool LHSOK = EvaluateFloat(E->getRHS(), RHS, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluateFloat(E->getLHS(), LHS, Info) || !LHSOK)
+ return false;
+
+ APFloat::cmpResult CR = LHS.compare(RHS);
+
+ switch (E->getOpcode()) {
+ default:
+ llvm_unreachable("Invalid binary operator!");
+ case BO_LT:
+ return Success(CR == APFloat::cmpLessThan, E);
+ case BO_GT:
+ return Success(CR == APFloat::cmpGreaterThan, E);
+ case BO_LE:
+ return Success(CR == APFloat::cmpLessThan || CR == APFloat::cmpEqual, E);
+ case BO_GE:
+ return Success(CR == APFloat::cmpGreaterThan || CR == APFloat::cmpEqual,
+ E);
+ case BO_EQ:
+ return Success(CR == APFloat::cmpEqual, E);
+ case BO_NE:
+ return Success(CR == APFloat::cmpGreaterThan
+ || CR == APFloat::cmpLessThan
+ || CR == APFloat::cmpUnordered, E);
+ }
+ }
+
+ if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
+ if (E->getOpcode() == BO_Sub || E->isComparisonOp()) {
+ LValue LHSValue, RHSValue;
+
+ bool LHSOK = EvaluatePointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluatePointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
+
+ // Reject differing bases from the normal codepath; we special-case
+ // comparisons to null.
+ if (!HasSameBase(LHSValue, RHSValue)) {
+ if (E->getOpcode() == BO_Sub) {
+ // Handle &&A - &&B.
+ if (!LHSValue.Offset.isZero() || !RHSValue.Offset.isZero())
+ return false;
+ const Expr *LHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
+ const Expr *RHSExpr = LHSValue.Base.dyn_cast<const Expr*>();
+ if (!LHSExpr || !RHSExpr)
+ return false;
+ const AddrLabelExpr *LHSAddrExpr = dyn_cast<AddrLabelExpr>(LHSExpr);
+ const AddrLabelExpr *RHSAddrExpr = dyn_cast<AddrLabelExpr>(RHSExpr);
+ if (!LHSAddrExpr || !RHSAddrExpr)
+ return false;
+ // Make sure both labels come from the same function.
+ if (LHSAddrExpr->getLabel()->getDeclContext() !=
+ RHSAddrExpr->getLabel()->getDeclContext())
+ return false;
+ Result = APValue(LHSAddrExpr, RHSAddrExpr);
+ return true;
+ }
+ // Inequalities and subtractions between unrelated pointers have
+ // unspecified or undefined behavior.
+ if (!E->isEqualityOp())
+ return Error(E);
+ // A constant address may compare equal to the address of a symbol.
+ // The one exception is that address of an object cannot compare equal
+ // to a null pointer constant.
+ if ((!LHSValue.Base && !LHSValue.Offset.isZero()) ||
+ (!RHSValue.Base && !RHSValue.Offset.isZero()))
+ return Error(E);
+ // It's implementation-defined whether distinct literals will have
+ // distinct addresses. In clang, the result of such a comparison is
+ // unspecified, so it is not a constant expression. However, we do know
+ // that the address of a literal will be non-null.
+ if ((IsLiteralLValue(LHSValue) || IsLiteralLValue(RHSValue)) &&
+ LHSValue.Base && RHSValue.Base)
+ return Error(E);
+ // We can't tell whether weak symbols will end up pointing to the same
+ // object.
+ if (IsWeakLValue(LHSValue) || IsWeakLValue(RHSValue))
+ return Error(E);
+ // Pointers with different bases cannot represent the same object.
+ // (Note that clang defaults to -fmerge-all-constants, which can
+ // lead to inconsistent results for comparisons involving the address
+ // of a constant; this generally doesn't matter in practice.)
+ return Success(E->getOpcode() == BO_NE, E);
+ }
+
+ const CharUnits &LHSOffset = LHSValue.getLValueOffset();
+ const CharUnits &RHSOffset = RHSValue.getLValueOffset();
+
+ SubobjectDesignator &LHSDesignator = LHSValue.getLValueDesignator();
+ SubobjectDesignator &RHSDesignator = RHSValue.getLValueDesignator();
+
+ if (E->getOpcode() == BO_Sub) {
+ // C++11 [expr.add]p6:
+ // Unless both pointers point to elements of the same array object, or
+ // one past the last element of the array object, the behavior is
+ // undefined.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ !AreElementsOfSameArray(getType(LHSValue.Base),
+ LHSDesignator, RHSDesignator))
+ CCEDiag(E, diag::note_constexpr_pointer_subtraction_not_same_array);
+
+ QualType Type = E->getLHS()->getType();
+ QualType ElementType = Type->getAs<PointerType>()->getPointeeType();
+
+ CharUnits ElementSize;
+ if (!HandleSizeof(Info, E->getExprLoc(), ElementType, ElementSize))
+ return false;
+
+ // FIXME: LLVM and GCC both compute LHSOffset - RHSOffset at runtime,
+ // and produce incorrect results when it overflows. Such behavior
+ // appears to be non-conforming, but is common, so perhaps we should
+ // assume the standard intended for such cases to be undefined behavior
+ // and check for them.
+
+ // Compute (LHSOffset - RHSOffset) / Size carefully, checking for
+ // overflow in the final conversion to ptrdiff_t.
+ APSInt LHS(
+ llvm::APInt(65, (int64_t)LHSOffset.getQuantity(), true), false);
+ APSInt RHS(
+ llvm::APInt(65, (int64_t)RHSOffset.getQuantity(), true), false);
+ APSInt ElemSize(
+ llvm::APInt(65, (int64_t)ElementSize.getQuantity(), true), false);
+ APSInt TrueResult = (LHS - RHS) / ElemSize;
+ APSInt Result = TrueResult.trunc(Info.Ctx.getIntWidth(E->getType()));
+
+ if (Result.extend(65) != TrueResult)
+ HandleOverflow(Info, E, TrueResult, E->getType());
+ return Success(Result, E);
+ }
+
+ // C++11 [expr.rel]p3:
+ // Pointers to void (after pointer conversions) can be compared, with a
+ // result defined as follows: If both pointers represent the same
+ // address or are both the null pointer value, the result is true if the
+ // operator is <= or >= and false otherwise; otherwise the result is
+ // unspecified.
+ // We interpret this as applying to pointers to *cv* void.
+ if (LHSTy->isVoidPointerType() && LHSOffset != RHSOffset &&
+ E->isRelationalOp())
+ CCEDiag(E, diag::note_constexpr_void_comparison);
+
+ // C++11 [expr.rel]p2:
+ // - If two pointers point to non-static data members of the same object,
+ // or to subobjects or array elements fo such members, recursively, the
+ // pointer to the later declared member compares greater provided the
+ // two members have the same access control and provided their class is
+ // not a union.
+ // [...]
+ // - Otherwise pointer comparisons are unspecified.
+ if (!LHSDesignator.Invalid && !RHSDesignator.Invalid &&
+ E->isRelationalOp()) {
+ bool WasArrayIndex;
+ unsigned Mismatch =
+ FindDesignatorMismatch(getType(LHSValue.Base), LHSDesignator,
+ RHSDesignator, WasArrayIndex);
+ // At the point where the designators diverge, the comparison has a
+ // specified value if:
+ // - we are comparing array indices
+ // - we are comparing fields of a union, or fields with the same access
+ // Otherwise, the result is unspecified and thus the comparison is not a
+ // constant expression.
+ if (!WasArrayIndex && Mismatch < LHSDesignator.Entries.size() &&
+ Mismatch < RHSDesignator.Entries.size()) {
+ const FieldDecl *LF = getAsField(LHSDesignator.Entries[Mismatch]);
+ const FieldDecl *RF = getAsField(RHSDesignator.Entries[Mismatch]);
+ if (!LF && !RF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_classes);
+ else if (!LF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ << getAsBaseClass(LHSDesignator.Entries[Mismatch])
+ << RF->getParent() << RF;
+ else if (!RF)
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_base_field)
+ << getAsBaseClass(RHSDesignator.Entries[Mismatch])
+ << LF->getParent() << LF;
+ else if (!LF->getParent()->isUnion() &&
+ LF->getAccess() != RF->getAccess())
+ CCEDiag(E, diag::note_constexpr_pointer_comparison_differing_access)
+ << LF << LF->getAccess() << RF << RF->getAccess()
+ << LF->getParent();
+ }
+ }
+
+ switch (E->getOpcode()) {
+ default: llvm_unreachable("missing comparison operator");
+ case BO_LT: return Success(LHSOffset < RHSOffset, E);
+ case BO_GT: return Success(LHSOffset > RHSOffset, E);
+ case BO_LE: return Success(LHSOffset <= RHSOffset, E);
+ case BO_GE: return Success(LHSOffset >= RHSOffset, E);
+ case BO_EQ: return Success(LHSOffset == RHSOffset, E);
+ case BO_NE: return Success(LHSOffset != RHSOffset, E);
+ }
+ }
+ }
+
+ if (LHSTy->isMemberPointerType()) {
+ assert(E->isEqualityOp() && "unexpected member pointer operation");
+ assert(RHSTy->isMemberPointerType() && "invalid comparison");
+
+ MemberPtr LHSValue, RHSValue;
+
+ bool LHSOK = EvaluateMemberPointer(E->getLHS(), LHSValue, Info);
+ if (!LHSOK && Info.keepEvaluatingAfterFailure())
+ return false;
+
+ if (!EvaluateMemberPointer(E->getRHS(), RHSValue, Info) || !LHSOK)
+ return false;
+
+ // C++11 [expr.eq]p2:
+ // If both operands are null, they compare equal. Otherwise if only one is
+ // null, they compare unequal.
+ if (!LHSValue.getDecl() || !RHSValue.getDecl()) {
+ bool Equal = !LHSValue.getDecl() && !RHSValue.getDecl();
+ return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ }
+
+ // Otherwise if either is a pointer to a virtual member function, the
+ // result is unspecified.
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(LHSValue.getDecl()))
+ if (MD->isVirtual())
+ CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(RHSValue.getDecl()))
+ if (MD->isVirtual())
+ CCEDiag(E, diag::note_constexpr_compare_virtual_mem_ptr) << MD;
+
+ // Otherwise they compare equal if and only if they would refer to the
+ // same member of the same most derived object or the same subobject if
+ // they were dereferenced with a hypothetical object of the associated
+ // class type.
+ bool Equal = LHSValue == RHSValue;
+ return Success(E->getOpcode() == BO_EQ ? Equal : !Equal, E);
+ }
+
+ if (LHSTy->isNullPtrType()) {
+ assert(E->isComparisonOp() && "unexpected nullptr operation");
+ assert(RHSTy->isNullPtrType() && "missing pointer conversion");
+ // C++11 [expr.rel]p4, [expr.eq]p3: If two operands of type std::nullptr_t
+ // are compared, the result is true of the operator is <=, >= or ==, and
+ // false otherwise.
+ BinaryOperator::Opcode Opcode = E->getOpcode();
+ return Success(Opcode == BO_EQ || Opcode == BO_LE || Opcode == BO_GE, E);
+ }
+
+ assert((!LHSTy->isIntegralOrEnumerationType() ||
+ !RHSTy->isIntegralOrEnumerationType()) &&
+ "DataRecursiveIntBinOpEvaluator should have handled integral types");
+ // We can't continue from here for non-integral types.
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+}
+
+CharUnits IntExprEvaluator::GetAlignOfType(QualType T) {
+ // C++ [expr.alignof]p3: "When alignof is applied to a reference type, the
+ // result shall be the alignment of the referenced type."
+ if (const ReferenceType *Ref = T->getAs<ReferenceType>())
+ T = Ref->getPointeeType();
+
+ // __alignof is defined to return the preferred alignment.
+ return Info.Ctx.toCharUnitsFromBits(
+ Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
+}
+
+CharUnits IntExprEvaluator::GetAlignOfExpr(const Expr *E) {
+ E = E->IgnoreParens();
+
+ // alignof decl is always accepted, even if it doesn't make sense: we default
+ // to 1 in those cases.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return Info.Ctx.getDeclAlign(DRE->getDecl(),
+ /*RefAsPointee*/true);
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ return Info.Ctx.getDeclAlign(ME->getMemberDecl(),
+ /*RefAsPointee*/true);
+
+ return GetAlignOfType(E->getType());
+}
+
+
+/// VisitUnaryExprOrTypeTraitExpr - Evaluate a sizeof, alignof or vec_step with
+/// a result as the expression's type.
+bool IntExprEvaluator::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
+ switch(E->getKind()) {
+ case UETT_AlignOf: {
+ if (E->isArgumentType())
+ return Success(GetAlignOfType(E->getArgumentType()), E);
+ else
+ return Success(GetAlignOfExpr(E->getArgumentExpr()), E);
+ }
+
+ case UETT_VecStep: {
+ QualType Ty = E->getTypeOfArgument();
+
+ if (Ty->isVectorType()) {
+ unsigned n = Ty->getAs<VectorType>()->getNumElements();
+
+ // The vec_step built-in functions that take a 3-component
+ // vector return 4. (OpenCL 1.1 spec 6.11.12)
+ if (n == 3)
+ n = 4;
+
+ return Success(n, E);
+ } else
+ return Success(1, E);
+ }
+
+ case UETT_SizeOf: {
+ QualType SrcTy = E->getTypeOfArgument();
+ // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
+ // the result is the size of the referenced type."
+ if (const ReferenceType *Ref = SrcTy->getAs<ReferenceType>())
+ SrcTy = Ref->getPointeeType();
+
+ CharUnits Sizeof;
+ if (!HandleSizeof(Info, E->getExprLoc(), SrcTy, Sizeof))
+ return false;
+ return Success(Sizeof, E);
+ }
+ }
+
+ llvm_unreachable("unknown expr/type trait");
+}
+
+bool IntExprEvaluator::VisitOffsetOfExpr(const OffsetOfExpr *OOE) {
+ CharUnits Result;
+ unsigned n = OOE->getNumComponents();
+ if (n == 0)
+ return Error(OOE);
+ QualType CurrentType = OOE->getTypeSourceInfo()->getType();
+ for (unsigned i = 0; i != n; ++i) {
+ OffsetOfExpr::OffsetOfNode ON = OOE->getComponent(i);
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array: {
+ const Expr *Idx = OOE->getIndexExpr(ON.getArrayExprIndex());
+ APSInt IdxResult;
+ if (!EvaluateInteger(Idx, IdxResult, Info))
+ return false;
+ const ArrayType *AT = Info.Ctx.getAsArrayType(CurrentType);
+ if (!AT)
+ return Error(OOE);
+ CurrentType = AT->getElementType();
+ CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(CurrentType);
+ Result += IdxResult.getSExtValue() * ElementSize;
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Field: {
+ FieldDecl *MemberDecl = ON.getField();
+ const RecordType *RT = CurrentType->getAs<RecordType>();
+ if (!RT)
+ return Error(OOE);
+ RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
+ unsigned i = MemberDecl->getFieldIndex();
+ assert(i < RL.getFieldCount() && "offsetof field in wrong type");
+ Result += Info.Ctx.toCharUnitsFromBits(RL.getFieldOffset(i));
+ CurrentType = MemberDecl->getType().getNonReferenceType();
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ llvm_unreachable("dependent __builtin_offsetof");
+
+ case OffsetOfExpr::OffsetOfNode::Base: {
+ CXXBaseSpecifier *BaseSpec = ON.getBase();
+ if (BaseSpec->isVirtual())
+ return Error(OOE);
+
+ // Find the layout of the class whose base we are looking into.
+ const RecordType *RT = CurrentType->getAs<RecordType>();
+ if (!RT)
+ return Error(OOE);
+ RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &RL = Info.Ctx.getASTRecordLayout(RD);
+
+ // Find the base class itself.
+ CurrentType = BaseSpec->getType();
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ if (!BaseRT)
+ return Error(OOE);
+
+ // Add the offset to the base.
+ Result += RL.getBaseClassOffset(cast<CXXRecordDecl>(BaseRT->getDecl()));
+ break;
+ }
+ }
+ }
+ return Success(Result, OOE);
+}
+
+bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ switch (E->getOpcode()) {
+ default:
+ // Address, indirect, pre/post inc/dec, etc are not valid constant exprs.
+ // See C99 6.6p3.
+ return Error(E);
+ case UO_Extension:
+ // FIXME: Should extension allow i-c-e extension expressions in its scope?
+ // If so, we could clear the diagnostic ID.
+ return Visit(E->getSubExpr());
+ case UO_Plus:
+ // The result is just the value.
+ return Visit(E->getSubExpr());
+ case UO_Minus: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isInt()) return Error(E);
+ const APSInt &Value = Result.getInt();
+ if (Value.isSigned() && Value.isMinSignedValue())
+ HandleOverflow(Info, E, -Value.extend(Value.getBitWidth() + 1),
+ E->getType());
+ return Success(-Value, E);
+ }
+ case UO_Not: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+ if (!Result.isInt()) return Error(E);
+ return Success(~Result.getInt(), E);
+ }
+ case UO_LNot: {
+ bool bres;
+ if (!EvaluateAsBooleanCondition(E->getSubExpr(), bres, Info))
+ return false;
+ return Success(!bres, E);
+ }
+ }
+}
+
+/// HandleCast - This is used to evaluate implicit or explicit casts where the
+/// result type is integer.
+bool IntExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr *SubExpr = E->getSubExpr();
+ QualType DestType = E->getType();
+ QualType SrcType = SubExpr->getType();
+
+ switch (E->getCastKind()) {
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralToFloating:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ llvm_unreachable("invalid cast kind for integral value");
+
+ case CK_BitCast:
+ case CK_Dependent:
+ case CK_LValueBitCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ return Error(E);
+
+ case CK_UserDefinedConversion:
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_MemberPointerToBoolean:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_FloatingToBoolean:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ bool BoolResult;
+ if (!EvaluateAsBooleanCondition(SubExpr, BoolResult, Info))
+ return false;
+ return Success(BoolResult, E);
+ }
+
+ case CK_IntegralCast: {
+ if (!Visit(SubExpr))
+ return false;
+
+ if (!Result.isInt()) {
+ // Allow casts of address-of-label differences if they are no-ops
+ // or narrowing. (The narrowing case isn't actually guaranteed to
+ // be constant-evaluatable except in some narrow cases which are hard
+ // to detect here. We let it through on the assumption the user knows
+ // what they are doing.)
+ if (Result.isAddrLabelDiff())
+ return Info.Ctx.getTypeSize(DestType) <= Info.Ctx.getTypeSize(SrcType);
+ // Only allow casts of lvalues if they are lossless.
+ return Info.Ctx.getTypeSize(DestType) == Info.Ctx.getTypeSize(SrcType);
+ }
+
+ return Success(HandleIntToIntCast(Info, E, DestType, SrcType,
+ Result.getInt()), E);
+ }
+
+ case CK_PointerToIntegral: {
+ CCEDiag(E, diag::note_constexpr_invalid_cast) << 2;
+
+ LValue LV;
+ if (!EvaluatePointer(SubExpr, LV, Info))
+ return false;
+
+ if (LV.getLValueBase()) {
+ // Only allow based lvalue casts if they are lossless.
+ // FIXME: Allow a larger integer size than the pointer size, and allow
+ // narrowing back down to pointer width in subsequent integral casts.
+ // FIXME: Check integer type's active bits, not its type size.
+ if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType))
+ return Error(E);
+
+ LV.Designator.setInvalid();
+ LV.moveInto(Result);
+ return true;
+ }
+
+ APSInt AsInt = Info.Ctx.MakeIntValue(LV.getLValueOffset().getQuantity(),
+ SrcType);
+ return Success(HandleIntToIntCast(Info, E, DestType, SrcType, AsInt), E);
+ }
+
+ case CK_IntegralComplexToReal: {
+ ComplexValue C;
+ if (!EvaluateComplex(SubExpr, C, Info))
+ return false;
+ return Success(C.getComplexIntReal(), E);
+ }
+
+ case CK_FloatingToIntegral: {
+ APFloat F(0.0);
+ if (!EvaluateFloat(SubExpr, F, Info))
+ return false;
+
+ APSInt Value;
+ if (!HandleFloatToIntCast(Info, E, SrcType, F, DestType, Value))
+ return false;
+ return Success(Value, E);
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in integral value");
+}
+
+bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue LV;
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info))
+ return false;
+ if (!LV.isComplexInt())
+ return Error(E);
+ return Success(LV.getComplexIntReal(), E);
+ }
+
+ return Visit(E->getSubExpr());
+}
+
+bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isComplexIntegerType()) {
+ ComplexValue LV;
+ if (!EvaluateComplex(E->getSubExpr(), LV, Info))
+ return false;
+ if (!LV.isComplexInt())
+ return Error(E);
+ return Success(LV.getComplexIntImag(), E);
+ }
+
+ VisitIgnoredValue(E->getSubExpr());
+ return Success(0, E);
+}
+
+bool IntExprEvaluator::VisitSizeOfPackExpr(const SizeOfPackExpr *E) {
+ return Success(E->getPackLength(), E);
+}
+
+bool IntExprEvaluator::VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return Success(E->getValue(), E);
+}
+
+//===----------------------------------------------------------------------===//
+// Float Evaluation
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FloatExprEvaluator
+ : public ExprEvaluatorBase<FloatExprEvaluator, bool> {
+ APFloat &Result;
+public:
+ FloatExprEvaluator(EvalInfo &info, APFloat &result)
+ : ExprEvaluatorBaseTy(info), Result(result) {}
+
+ bool Success(const APValue &V, const Expr *e) {
+ Result = V.getFloat();
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E) {
+ Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
+ return true;
+ }
+
+ bool VisitCallExpr(const CallExpr *E);
+
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitFloatingLiteral(const FloatingLiteral *E);
+ bool VisitCastExpr(const CastExpr *E);
+
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
+
+ // FIXME: Missing: array subscript of vector, member of vector
+};
+} // end anonymous namespace
+
+static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isRealFloatingType());
+ return FloatExprEvaluator(Info, Result).Visit(E);
+}
+
+static bool TryEvaluateBuiltinNaN(const ASTContext &Context,
+ QualType ResultTy,
+ const Expr *Arg,
+ bool SNaN,
+ llvm::APFloat &Result) {
+ const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
+ if (!S) return false;
+
+ const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy);
+
+ llvm::APInt fill;
+
+ // Treat empty strings as if they were zero.
+ if (S->getString().empty())
+ fill = llvm::APInt(32, 0);
+ else if (S->getString().getAsInteger(0, fill))
+ return false;
+
+ if (SNaN)
+ Result = llvm::APFloat::getSNaN(Sem, false, &fill);
+ else
+ Result = llvm::APFloat::getQNaN(Sem, false, &fill);
+ return true;
+}
+
+bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (E->isBuiltinCall()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+
+ case Builtin::BI__builtin_huge_val:
+ case Builtin::BI__builtin_huge_valf:
+ case Builtin::BI__builtin_huge_vall:
+ case Builtin::BI__builtin_inf:
+ case Builtin::BI__builtin_inff:
+ case Builtin::BI__builtin_infl: {
+ const llvm::fltSemantics &Sem =
+ Info.Ctx.getFloatTypeSemantics(E->getType());
+ Result = llvm::APFloat::getInf(Sem);
+ return true;
+ }
+
+ case Builtin::BI__builtin_nans:
+ case Builtin::BI__builtin_nansf:
+ case Builtin::BI__builtin_nansl:
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ true, Result))
+ return Error(E);
+ return true;
+
+ case Builtin::BI__builtin_nan:
+ case Builtin::BI__builtin_nanf:
+ case Builtin::BI__builtin_nanl:
+ // If this is __builtin_nan() turn this into a nan, otherwise we
+ // can't constant fold it.
+ if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ false, Result))
+ return Error(E);
+ return true;
+
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl:
+ if (!EvaluateFloat(E->getArg(0), Result, Info))
+ return false;
+
+ if (Result.isNegative())
+ Result.changeSign();
+ return true;
+
+ case Builtin::BI__builtin_copysign:
+ case Builtin::BI__builtin_copysignf:
+ case Builtin::BI__builtin_copysignl: {
+ APFloat RHS(0.);
+ if (!EvaluateFloat(E->getArg(0), Result, Info) ||
+ !EvaluateFloat(E->getArg(1), RHS, Info))
+ return false;
+ Result.copySign(RHS);
+ return true;
+ }
+ }
+}
+
+bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue CV;
+ if (!EvaluateComplex(E->getSubExpr(), CV, Info))
+ return false;
+ Result = CV.FloatReal;
+ return true;
+ }
+
+ return Visit(E->getSubExpr());
+}
+
+bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ if (E->getSubExpr()->getType()->isAnyComplexType()) {
+ ComplexValue CV;
+ if (!EvaluateComplex(E->getSubExpr(), CV, Info))
+ return false;
+ Result = CV.FloatImag;
+ return true;
+ }
+
+ VisitIgnoredValue(E->getSubExpr());
+ const llvm::fltSemantics &Sem = Info.Ctx.getFloatTypeSemantics(E->getType());
+ Result = llvm::APFloat::getZero(Sem);
+ return true;
+}
+
+bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ switch (E->getOpcode()) {
+ default: return Error(E);
+ case UO_Plus:
+ return EvaluateFloat(E->getSubExpr(), Result, Info);
+ case UO_Minus:
+ if (!EvaluateFloat(E->getSubExpr(), Result, Info))
+ return false;
+ Result.changeSign();
+ return true;
+ }
+}
+
+bool FloatExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ APFloat RHS(0.0);
+ bool LHSOK = EvaluateFloat(E->getLHS(), Result, Info);
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+ if (!EvaluateFloat(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+
+ switch (E->getOpcode()) {
+ default: return Error(E);
+ case BO_Mul:
+ Result.multiply(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Add:
+ Result.add(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Sub:
+ Result.subtract(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ case BO_Div:
+ Result.divide(RHS, APFloat::rmNearestTiesToEven);
+ break;
+ }
+
+ if (Result.isInfinity() || Result.isNaN())
+ CCEDiag(E, diag::note_constexpr_float_arithmetic) << Result.isNaN();
+ return true;
+}
+
+bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
+ Result = E->getValue();
+ return true;
+}
+
+bool FloatExprEvaluator::VisitCastExpr(const CastExpr *E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_IntegralToFloating: {
+ APSInt IntResult;
+ return EvaluateInteger(SubExpr, IntResult, Info) &&
+ HandleIntToFloatCast(Info, E, SubExpr->getType(), IntResult,
+ E->getType(), Result);
+ }
+
+ case CK_FloatingCast: {
+ if (!Visit(SubExpr))
+ return false;
+ return HandleFloatToFloatCast(Info, E, SubExpr->getType(), E->getType(),
+ Result);
+ }
+
+ case CK_FloatingComplexToReal: {
+ ComplexValue V;
+ if (!EvaluateComplex(SubExpr, V, Info))
+ return false;
+ Result = V.getComplexFloatReal();
+ return true;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Complex Evaluation (for float and integer)
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ComplexExprEvaluator
+ : public ExprEvaluatorBase<ComplexExprEvaluator, bool> {
+ ComplexValue &Result;
+
+public:
+ ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
+ : ExprEvaluatorBaseTy(info), Result(Result) {}
+
+ bool Success(const APValue &V, const Expr *e) {
+ Result.setFrom(V);
+ return true;
+ }
+
+ bool ZeroInitialization(const Expr *E);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ bool VisitImaginaryLiteral(const ImaginaryLiteral *E);
+ bool VisitCastExpr(const CastExpr *E);
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitUnaryOperator(const UnaryOperator *E);
+ bool VisitInitListExpr(const InitListExpr *E);
+};
+} // end anonymous namespace
+
+static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
+ EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isAnyComplexType());
+ return ComplexExprEvaluator(Info, Result).Visit(E);
+}
+
+bool ComplexExprEvaluator::ZeroInitialization(const Expr *E) {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ if (ElemTy->isRealFloatingType()) {
+ Result.makeComplexFloat();
+ APFloat Zero = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(ElemTy));
+ Result.FloatReal = Zero;
+ Result.FloatImag = Zero;
+ } else {
+ Result.makeComplexInt();
+ APSInt Zero = Info.Ctx.MakeIntValue(0, ElemTy);
+ Result.IntReal = Zero;
+ Result.IntImag = Zero;
+ }
+ return true;
+}
+
+bool ComplexExprEvaluator::VisitImaginaryLiteral(const ImaginaryLiteral *E) {
+ const Expr* SubExpr = E->getSubExpr();
+
+ if (SubExpr->getType()->isRealFloatingType()) {
+ Result.makeComplexFloat();
+ APFloat &Imag = Result.FloatImag;
+ if (!EvaluateFloat(SubExpr, Imag, Info))
+ return false;
+
+ Result.FloatReal = APFloat(Imag.getSemantics());
+ return true;
+ } else {
+ assert(SubExpr->getType()->isIntegerType() &&
+ "Unexpected imaginary literal.");
+
+ Result.makeComplexInt();
+ APSInt &Imag = Result.IntImag;
+ if (!EvaluateInteger(SubExpr, Imag, Info))
+ return false;
+
+ Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned());
+ return true;
+ }
+}
+
+bool ComplexExprEvaluator::VisitCastExpr(const CastExpr *E) {
+
+ switch (E->getCastKind()) {
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+
+ case CK_Dependent:
+ case CK_LValueBitCast:
+ case CK_UserDefinedConversion:
+ return Error(E);
+
+ case CK_FloatingRealToComplex: {
+ APFloat &Real = Result.FloatReal;
+ if (!EvaluateFloat(E->getSubExpr(), Real, Info))
+ return false;
+
+ Result.makeComplexFloat();
+ Result.FloatImag = APFloat(Real.getSemantics());
+ return true;
+ }
+
+ case CK_FloatingComplexCast: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+
+ return HandleFloatToFloatCast(Info, E, From, To, Result.FloatReal) &&
+ HandleFloatToFloatCast(Info, E, From, To, Result.FloatImag);
+ }
+
+ case CK_FloatingComplexToIntegralComplex: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+ Result.makeComplexInt();
+ return HandleFloatToIntCast(Info, E, From, Result.FloatReal,
+ To, Result.IntReal) &&
+ HandleFloatToIntCast(Info, E, From, Result.FloatImag,
+ To, Result.IntImag);
+ }
+
+ case CK_IntegralRealToComplex: {
+ APSInt &Real = Result.IntReal;
+ if (!EvaluateInteger(E->getSubExpr(), Real, Info))
+ return false;
+
+ Result.makeComplexInt();
+ Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
+ return true;
+ }
+
+ case CK_IntegralComplexCast: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+
+ Result.IntReal = HandleIntToIntCast(Info, E, To, From, Result.IntReal);
+ Result.IntImag = HandleIntToIntCast(Info, E, To, From, Result.IntImag);
+ return true;
+ }
+
+ case CK_IntegralComplexToFloatingComplex: {
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ QualType To = E->getType()->getAs<ComplexType>()->getElementType();
+ QualType From
+ = E->getSubExpr()->getType()->getAs<ComplexType>()->getElementType();
+ Result.makeComplexFloat();
+ return HandleIntToFloatCast(Info, E, From, Result.IntReal,
+ To, Result.FloatReal) &&
+ HandleIntToFloatCast(Info, E, From, Result.IntImag,
+ To, Result.FloatImag);
+ }
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
+}
+
+bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->isPtrMemOp() || E->isAssignmentOp() || E->getOpcode() == BO_Comma)
+ return ExprEvaluatorBaseTy::VisitBinaryOperator(E);
+
+ bool LHSOK = Visit(E->getLHS());
+ if (!LHSOK && !Info.keepEvaluatingAfterFailure())
+ return false;
+
+ ComplexValue RHS;
+ if (!EvaluateComplex(E->getRHS(), RHS, Info) || !LHSOK)
+ return false;
+
+ assert(Result.isComplexFloat() == RHS.isComplexFloat() &&
+ "Invalid operands to binary operator.");
+ switch (E->getOpcode()) {
+ default: return Error(E);
+ case BO_Add:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
+ APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().add(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ } else {
+ Result.getComplexIntReal() += RHS.getComplexIntReal();
+ Result.getComplexIntImag() += RHS.getComplexIntImag();
+ }
+ break;
+ case BO_Sub:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().subtract(RHS.getComplexFloatReal(),
+ APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().subtract(RHS.getComplexFloatImag(),
+ APFloat::rmNearestTiesToEven);
+ } else {
+ Result.getComplexIntReal() -= RHS.getComplexIntReal();
+ Result.getComplexIntImag() -= RHS.getComplexIntImag();
+ }
+ break;
+ case BO_Mul:
+ if (Result.isComplexFloat()) {
+ ComplexValue LHS = Result;
+ APFloat &LHS_r = LHS.getComplexFloatReal();
+ APFloat &LHS_i = LHS.getComplexFloatImag();
+ APFloat &RHS_r = RHS.getComplexFloatReal();
+ APFloat &RHS_i = RHS.getComplexFloatImag();
+
+ APFloat Tmp = LHS_r;
+ Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatReal() = Tmp;
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatReal().subtract(Tmp, APFloat::rmNearestTiesToEven);
+
+ Tmp = LHS_r;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag() = Tmp;
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Result.getComplexFloatImag().add(Tmp, APFloat::rmNearestTiesToEven);
+ } else {
+ ComplexValue LHS = Result;
+ Result.getComplexIntReal() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() -
+ LHS.getComplexIntImag() * RHS.getComplexIntImag());
+ Result.getComplexIntImag() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntImag() +
+ LHS.getComplexIntImag() * RHS.getComplexIntReal());
+ }
+ break;
+ case BO_Div:
+ if (Result.isComplexFloat()) {
+ ComplexValue LHS = Result;
+ APFloat &LHS_r = LHS.getComplexFloatReal();
+ APFloat &LHS_i = LHS.getComplexFloatImag();
+ APFloat &RHS_r = RHS.getComplexFloatReal();
+ APFloat &RHS_i = RHS.getComplexFloatImag();
+ APFloat &Res_r = Result.getComplexFloatReal();
+ APFloat &Res_i = Result.getComplexFloatImag();
+
+ APFloat Den = RHS_r;
+ Den.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ APFloat Tmp = RHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Den.add(Tmp, APFloat::rmNearestTiesToEven);
+
+ Res_r = LHS_r;
+ Res_r.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Tmp = LHS_i;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Res_r.add(Tmp, APFloat::rmNearestTiesToEven);
+ Res_r.divide(Den, APFloat::rmNearestTiesToEven);
+
+ Res_i = LHS_i;
+ Res_i.multiply(RHS_r, APFloat::rmNearestTiesToEven);
+ Tmp = LHS_r;
+ Tmp.multiply(RHS_i, APFloat::rmNearestTiesToEven);
+ Res_i.subtract(Tmp, APFloat::rmNearestTiesToEven);
+ Res_i.divide(Den, APFloat::rmNearestTiesToEven);
+ } else {
+ if (RHS.getComplexIntReal() == 0 && RHS.getComplexIntImag() == 0)
+ return Error(E, diag::note_expr_divide_by_zero);
+
+ ComplexValue LHS = Result;
+ APSInt Den = RHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ RHS.getComplexIntImag() * RHS.getComplexIntImag();
+ Result.getComplexIntReal() =
+ (LHS.getComplexIntReal() * RHS.getComplexIntReal() +
+ LHS.getComplexIntImag() * RHS.getComplexIntImag()) / Den;
+ Result.getComplexIntImag() =
+ (LHS.getComplexIntImag() * RHS.getComplexIntReal() -
+ LHS.getComplexIntReal() * RHS.getComplexIntImag()) / Den;
+ }
+ break;
+ }
+
+ return true;
+}
+
+bool ComplexExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
+ // Get the operand value into 'Result'.
+ if (!Visit(E->getSubExpr()))
+ return false;
+
+ switch (E->getOpcode()) {
+ default:
+ return Error(E);
+ case UO_Extension:
+ return true;
+ case UO_Plus:
+ // The result is always just the subexpr.
+ return true;
+ case UO_Minus:
+ if (Result.isComplexFloat()) {
+ Result.getComplexFloatReal().changeSign();
+ Result.getComplexFloatImag().changeSign();
+ }
+ else {
+ Result.getComplexIntReal() = -Result.getComplexIntReal();
+ Result.getComplexIntImag() = -Result.getComplexIntImag();
+ }
+ return true;
+ case UO_Not:
+ if (Result.isComplexFloat())
+ Result.getComplexFloatImag().changeSign();
+ else
+ Result.getComplexIntImag() = -Result.getComplexIntImag();
+ return true;
+ }
+}
+
+bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
+ if (E->getNumInits() == 2) {
+ if (E->getType()->isComplexType()) {
+ Result.makeComplexFloat();
+ if (!EvaluateFloat(E->getInit(0), Result.FloatReal, Info))
+ return false;
+ if (!EvaluateFloat(E->getInit(1), Result.FloatImag, Info))
+ return false;
+ } else {
+ Result.makeComplexInt();
+ if (!EvaluateInteger(E->getInit(0), Result.IntReal, Info))
+ return false;
+ if (!EvaluateInteger(E->getInit(1), Result.IntImag, Info))
+ return false;
+ }
+ return true;
+ }
+ return ExprEvaluatorBaseTy::VisitInitListExpr(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Void expression evaluation, primarily for a cast to void on the LHS of a
+// comma operator
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VoidExprEvaluator
+ : public ExprEvaluatorBase<VoidExprEvaluator, bool> {
+public:
+ VoidExprEvaluator(EvalInfo &Info) : ExprEvaluatorBaseTy(Info) {}
+
+ bool Success(const APValue &V, const Expr *e) { return true; }
+
+ bool VisitCastExpr(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ return ExprEvaluatorBaseTy::VisitCastExpr(E);
+ case CK_ToVoid:
+ VisitIgnoredValue(E->getSubExpr());
+ return true;
+ }
+ }
+};
+} // end anonymous namespace
+
+static bool EvaluateVoid(const Expr *E, EvalInfo &Info) {
+ assert(E->isRValue() && E->getType()->isVoidType());
+ return VoidExprEvaluator(Info).Visit(E);
+}
+
+//===----------------------------------------------------------------------===//
+// Top level Expr::EvaluateAsRValue method.
+//===----------------------------------------------------------------------===//
+
+static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
+ // In C, function designators are not lvalues, but we evaluate them as if they
+ // are.
+ if (E->isGLValue() || E->getType()->isFunctionType()) {
+ LValue LV;
+ if (!EvaluateLValue(E, LV, Info))
+ return false;
+ LV.moveInto(Result);
+ } else if (E->getType()->isVectorType()) {
+ if (!EvaluateVector(E, Result, Info))
+ return false;
+ } else if (E->getType()->isIntegralOrEnumerationType()) {
+ if (!IntExprEvaluator(Info, Result).Visit(E))
+ return false;
+ } else if (E->getType()->hasPointerRepresentation()) {
+ LValue LV;
+ if (!EvaluatePointer(E, LV, Info))
+ return false;
+ LV.moveInto(Result);
+ } else if (E->getType()->isRealFloatingType()) {
+ llvm::APFloat F(0.0);
+ if (!EvaluateFloat(E, F, Info))
+ return false;
+ Result = APValue(F);
+ } else if (E->getType()->isAnyComplexType()) {
+ ComplexValue C;
+ if (!EvaluateComplex(E, C, Info))
+ return false;
+ C.moveInto(Result);
+ } else if (E->getType()->isMemberPointerType()) {
+ MemberPtr P;
+ if (!EvaluateMemberPointer(E, P, Info))
+ return false;
+ P.moveInto(Result);
+ return true;
+ } else if (E->getType()->isArrayType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ if (!EvaluateArray(E, LV, Info.CurrentCall->Temporaries[E], Info))
+ return false;
+ Result = Info.CurrentCall->Temporaries[E];
+ } else if (E->getType()->isRecordType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ if (!EvaluateRecord(E, LV, Info.CurrentCall->Temporaries[E], Info))
+ return false;
+ Result = Info.CurrentCall->Temporaries[E];
+ } else if (E->getType()->isVoidType()) {
+ if (Info.getLangOpts().CPlusPlus0x)
+ Info.CCEDiag(E, diag::note_constexpr_nonliteral)
+ << E->getType();
+ else
+ Info.CCEDiag(E, diag::note_invalid_subexpr_in_const_expr);
+ if (!EvaluateVoid(E, Info))
+ return false;
+ } else if (Info.getLangOpts().CPlusPlus0x) {
+ Info.Diag(E, diag::note_constexpr_nonliteral) << E->getType();
+ return false;
+ } else {
+ Info.Diag(E, diag::note_invalid_subexpr_in_const_expr);
+ return false;
+ }
+
+ return true;
+}
+
+/// EvaluateInPlace - Evaluate an expression in-place in an APValue. In some
+/// cases, the in-place evaluation is essential, since later initializers for
+/// an object can indirectly refer to subobjects which were initialized earlier.
+static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
+ const Expr *E, CheckConstantExpressionKind CCEK,
+ bool AllowNonLiteralTypes) {
+ if (!AllowNonLiteralTypes && !CheckLiteralType(Info, E))
+ return false;
+
+ if (E->isRValue()) {
+ // Evaluate arrays and record types in-place, so that later initializers can
+ // refer to earlier-initialized members of the object.
+ if (E->getType()->isArrayType())
+ return EvaluateArray(E, This, Result, Info);
+ else if (E->getType()->isRecordType())
+ return EvaluateRecord(E, This, Result, Info);
+ }
+
+ // For any other type, in-place evaluation is unimportant.
+ return Evaluate(Result, Info, E);
+}
+
+/// EvaluateAsRValue - Try to evaluate this expression, performing an implicit
+/// lvalue-to-rvalue cast if it is an lvalue.
+static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result) {
+ if (!CheckLiteralType(Info, E))
+ return false;
+
+ if (!::Evaluate(Result, Info, E))
+ return false;
+
+ if (E->isGLValue()) {
+ LValue LV;
+ LV.setFrom(Info.Ctx, Result);
+ if (!HandleLValueToRValueConversion(Info, E, E->getType(), LV, Result))
+ return false;
+ }
+
+ // Check this core constant expression is a constant expression.
+ return CheckConstantExpression(Info, E->getExprLoc(), E->getType(), Result);
+}
+
+/// EvaluateAsRValue - Return true if this is a constant which we can fold using
+/// any crazy technique (that has nothing to do with language standards) that
+/// we want to. If this function returns true, it returns the folded constant
+/// in Result. If this expression is a glvalue, an lvalue-to-rvalue conversion
+/// will be applied to the result.
+bool Expr::EvaluateAsRValue(EvalResult &Result, const ASTContext &Ctx) const {
+ // Fast-path evaluations of integer literals, since we sometimes see files
+ // containing vast quantities of these.
+ if (const IntegerLiteral *L = dyn_cast<IntegerLiteral>(this)) {
+ Result.Val = APValue(APSInt(L->getValue(),
+ L->getType()->isUnsignedIntegerType()));
+ return true;
+ }
+
+ // FIXME: Evaluating values of large array and record types can cause
+ // performance problems. Only do so in C++11 for now.
+ if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
+ !Ctx.getLangOpts().CPlusPlus0x)
+ return false;
+
+ EvalInfo Info(Ctx, Result);
+ return ::EvaluateAsRValue(Info, this, Result.Val);
+}
+
+bool Expr::EvaluateAsBooleanCondition(bool &Result,
+ const ASTContext &Ctx) const {
+ EvalResult Scratch;
+ return EvaluateAsRValue(Scratch, Ctx) &&
+ HandleConversionToBool(Scratch.Val, Result);
+}
+
+bool Expr::EvaluateAsInt(APSInt &Result, const ASTContext &Ctx,
+ SideEffectsKind AllowSideEffects) const {
+ if (!getType()->isIntegralOrEnumerationType())
+ return false;
+
+ EvalResult ExprResult;
+ if (!EvaluateAsRValue(ExprResult, Ctx) || !ExprResult.Val.isInt() ||
+ (!AllowSideEffects && ExprResult.HasSideEffects))
+ return false;
+
+ Result = ExprResult.Val.getInt();
+ return true;
+}
+
+bool Expr::EvaluateAsLValue(EvalResult &Result, const ASTContext &Ctx) const {
+ EvalInfo Info(Ctx, Result);
+
+ LValue LV;
+ if (!EvaluateLValue(this, LV, Info) || Result.HasSideEffects ||
+ !CheckLValueConstantExpression(Info, getExprLoc(),
+ Ctx.getLValueReferenceType(getType()), LV))
+ return false;
+
+ LV.moveInto(Result.Val);
+ return true;
+}
+
+bool Expr::EvaluateAsInitializer(APValue &Value, const ASTContext &Ctx,
+ const VarDecl *VD,
+ llvm::SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
+ // FIXME: Evaluating initializers for large array and record types can cause
+ // performance problems. Only do so in C++11 for now.
+ if (isRValue() && (getType()->isArrayType() || getType()->isRecordType()) &&
+ !Ctx.getLangOpts().CPlusPlus0x)
+ return false;
+
+ Expr::EvalStatus EStatus;
+ EStatus.Diag = &Notes;
+
+ EvalInfo InitInfo(Ctx, EStatus);
+ InitInfo.setEvaluatingDecl(VD, Value);
+
+ LValue LVal;
+ LVal.set(VD);
+
+ // C++11 [basic.start.init]p2:
+ // Variables with static storage duration or thread storage duration shall be
+ // zero-initialized before any other initialization takes place.
+ // This behavior is not present in C.
+ if (Ctx.getLangOpts().CPlusPlus && !VD->hasLocalStorage() &&
+ !VD->getType()->isReferenceType()) {
+ ImplicitValueInitExpr VIE(VD->getType());
+ if (!EvaluateInPlace(Value, InitInfo, LVal, &VIE, CCEK_Constant,
+ /*AllowNonLiteralTypes=*/true))
+ return false;
+ }
+
+ if (!EvaluateInPlace(Value, InitInfo, LVal, this, CCEK_Constant,
+ /*AllowNonLiteralTypes=*/true) ||
+ EStatus.HasSideEffects)
+ return false;
+
+ return CheckConstantExpression(InitInfo, VD->getLocation(), VD->getType(),
+ Value);
+}
+
+/// isEvaluatable - Call EvaluateAsRValue to see if this expression can be
+/// constant folded, but discard the result.
+bool Expr::isEvaluatable(const ASTContext &Ctx) const {
+ EvalResult Result;
+ return EvaluateAsRValue(Result, Ctx) && !Result.HasSideEffects;
+}
+
+bool Expr::HasSideEffects(const ASTContext &Ctx) const {
+ return HasSideEffect(Ctx).Visit(this);
+}
+
+APSInt Expr::EvaluateKnownConstInt(const ASTContext &Ctx) const {
+ EvalResult EvalResult;
+ bool Result = EvaluateAsRValue(EvalResult, Ctx);
+ (void)Result;
+ assert(Result && "Could not evaluate expression");
+ assert(EvalResult.Val.isInt() && "Expression did not evaluate to integer");
+
+ return EvalResult.Val.getInt();
+}
+
+ bool Expr::EvalResult::isGlobalLValue() const {
+ assert(Val.isLValue());
+ return IsGlobalLValue(Val.getLValueBase());
+ }
+
+
+/// isIntegerConstantExpr - this recursive routine will test if an expression is
+/// an integer constant expression.
+
+/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
+/// comma, etc
+///
+/// FIXME: Handle offsetof. Two things to do: Handle GCC's __builtin_offsetof
+/// to support gcc 4.0+ and handle the idiom GCC recognizes with a null pointer
+/// cast+dereference.
+
+// CheckICE - This function does the fundamental ICE checking: the returned
+// ICEDiag contains a Val of 0, 1, or 2, and a possibly null SourceLocation.
+// Note that to reduce code duplication, this helper does no evaluation
+// itself; the caller checks whether the expression is evaluatable, and
+// in the rare cases where CheckICE actually cares about the evaluated
+// value, it calls into Evalute.
+//
+// Meanings of Val:
+// 0: This expression is an ICE.
+// 1: This expression is not an ICE, but if it isn't evaluated, it's
+// a legal subexpression for an ICE. This return value is used to handle
+// the comma operator in C99 mode.
+// 2: This expression is not an ICE, and is not a legal subexpression for one.
+
+namespace {
+
+struct ICEDiag {
+ unsigned Val;
+ SourceLocation Loc;
+
+ public:
+ ICEDiag(unsigned v, SourceLocation l) : Val(v), Loc(l) {}
+ ICEDiag() : Val(0) {}
+};
+
+}
+
+static ICEDiag NoDiag() { return ICEDiag(); }
+
+static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) {
+ Expr::EvalResult EVResult;
+ if (!E->EvaluateAsRValue(EVResult, Ctx) || EVResult.HasSideEffects ||
+ !EVResult.Val.isInt()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+ return NoDiag();
+}
+
+static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
+ assert(!E->isValueDependent() && "Should not see value dependent exprs!");
+ if (!E->getType()->isIntegralOrEnumerationType()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+
+ switch (E->getStmtClass()) {
+#define ABSTRACT_STMT(Node)
+#define STMT(Node, Base) case Expr::Node##Class:
+#define EXPR(Node, Base)
+#include "clang/AST/StmtNodes.inc"
+ case Expr::PredefinedExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::StringLiteralClass:
+ case Expr::ArraySubscriptExprClass:
+ case Expr::MemberExprClass:
+ case Expr::CompoundAssignOperatorClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::VAArgExprClass:
+ case Expr::AddrLabelExprClass:
+ case Expr::StmtExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CUDAKernelCallExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXTypeidExprClass:
+ case Expr::CXXUuidofExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::UserDefinedLiteralClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXThrowExprClass:
+ case Expr::CXXNewExprClass:
+ case Expr::CXXDeleteExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXBindTemporaryExprClass:
+ case Expr::ExprWithCleanupsClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXUnresolvedConstructExprClass:
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::BlockExprClass:
+ case Expr::NoStmtClass:
+ case Expr::OpaqueValueExprClass:
+ case Expr::PackExpansionExprClass:
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::MaterializeTemporaryExprClass:
+ case Expr::PseudoObjectExprClass:
+ case Expr::AtomicExprClass:
+ case Expr::InitListExprClass:
+ case Expr::LambdaExprClass:
+ return ICEDiag(2, E->getLocStart());
+
+ case Expr::SizeOfPackExprClass:
+ case Expr::GNUNullExprClass:
+ // GCC considers the GNU __null value to be an integral constant expression.
+ return NoDiag();
+
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return
+ CheckICE(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(), Ctx);
+
+ case Expr::ParenExprClass:
+ return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
+ case Expr::GenericSelectionExprClass:
+ return CheckICE(cast<GenericSelectionExpr>(E)->getResultExpr(), Ctx);
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::ObjCBoolLiteralExprClass:
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::CXXNoexceptExprClass:
+ return NoDiag();
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass: {
+ // C99 6.6/3 allows function calls within unevaluated subexpressions of
+ // constant expressions, but they can never be ICEs because an ICE cannot
+ // contain an operand of (pointer to) function type.
+ const CallExpr *CE = cast<CallExpr>(E);
+ if (CE->isBuiltinCall())
+ return CheckEvalInICE(E, Ctx);
+ return ICEDiag(2, E->getLocStart());
+ }
+ case Expr::DeclRefExprClass: {
+ if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
+ return NoDiag();
+ const ValueDecl *D = dyn_cast<ValueDecl>(cast<DeclRefExpr>(E)->getDecl());
+ if (Ctx.getLangOpts().CPlusPlus &&
+ D && IsConstNonVolatile(D->getType())) {
+ // Parameter variables are never constants. Without this check,
+ // getAnyInitializer() can find a default argument, which leads
+ // to chaos.
+ if (isa<ParmVarDecl>(D))
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+
+ // C++ 7.1.5.1p2
+ // A variable of non-volatile const-qualified integral or enumeration
+ // type initialized by an ICE can be used in ICEs.
+ if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
+ if (!Dcl->getType()->isIntegralOrEnumerationType())
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+
+ const VarDecl *VD;
+ // Look for a declaration of this variable that has an initializer, and
+ // check whether it is an ICE.
+ if (Dcl->getAnyInitializer(VD) && VD->checkInitIsICE())
+ return NoDiag();
+ else
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+ }
+ }
+ return ICEDiag(2, E->getLocStart());
+ }
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *Exp = cast<UnaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec:
+ case UO_AddrOf:
+ case UO_Deref:
+ // C99 6.6/3 allows increment and decrement within unevaluated
+ // subexpressions of constant expressions, but they can never be ICEs
+ // because an ICE cannot contain an lvalue operand.
+ return ICEDiag(2, E->getLocStart());
+ case UO_Extension:
+ case UO_LNot:
+ case UO_Plus:
+ case UO_Minus:
+ case UO_Not:
+ case UO_Real:
+ case UO_Imag:
+ return CheckICE(Exp->getSubExpr(), Ctx);
+ }
+
+ // OffsetOf falls through here.
+ }
+ case Expr::OffsetOfExprClass: {
+ // Note that per C99, offsetof must be an ICE. And AFAIK, using
+ // EvaluateAsRValue matches the proposed gcc behavior for cases like
+ // "offsetof(struct s{int x[4];}, x[1.0])". This doesn't affect
+ // compliance: we should warn earlier for offsetof expressions with
+ // array subscripts that aren't ICEs, and if the array subscripts
+ // are ICEs, the value of the offsetof must be an integer constant.
+ return CheckEvalInICE(E, Ctx);
+ }
+ case Expr::UnaryExprOrTypeTraitExprClass: {
+ const UnaryExprOrTypeTraitExpr *Exp = cast<UnaryExprOrTypeTraitExpr>(E);
+ if ((Exp->getKind() == UETT_SizeOf) &&
+ Exp->getTypeOfArgument()->isVariableArrayType())
+ return ICEDiag(2, E->getLocStart());
+ return NoDiag();
+ }
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *Exp = cast<BinaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Assign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ // C99 6.6/3 allows assignments within unevaluated subexpressions of
+ // constant expressions, but they can never be ICEs because an ICE cannot
+ // contain an lvalue operand.
+ return ICEDiag(2, E->getLocStart());
+
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ case BO_Comma: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (Exp->getOpcode() == BO_Div ||
+ Exp->getOpcode() == BO_Rem) {
+ // EvaluateAsRValue gives an error for undefined Div/Rem, so make sure
+ // we don't evaluate one.
+ if (LHSResult.Val == 0 && RHSResult.Val == 0) {
+ llvm::APSInt REval = Exp->getRHS()->EvaluateKnownConstInt(Ctx);
+ if (REval == 0)
+ return ICEDiag(1, E->getLocStart());
+ if (REval.isSigned() && REval.isAllOnesValue()) {
+ llvm::APSInt LEval = Exp->getLHS()->EvaluateKnownConstInt(Ctx);
+ if (LEval.isMinSignedValue())
+ return ICEDiag(1, E->getLocStart());
+ }
+ }
+ }
+ if (Exp->getOpcode() == BO_Comma) {
+ if (Ctx.getLangOpts().C99) {
+ // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
+ // if it isn't evaluated.
+ if (LHSResult.Val == 0 && RHSResult.Val == 0)
+ return ICEDiag(1, E->getLocStart());
+ } else {
+ // In both C89 and C++, commas in ICEs are illegal.
+ return ICEDiag(2, E->getLocStart());
+ }
+ }
+ if (LHSResult.Val >= RHSResult.Val)
+ return LHSResult;
+ return RHSResult;
+ }
+ case BO_LAnd:
+ case BO_LOr: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (LHSResult.Val == 0 && RHSResult.Val == 1) {
+ // Rare case where the RHS has a comma "side-effect"; we need
+ // to actually check the condition to see whether the side
+ // with the comma is evaluated.
+ if ((Exp->getOpcode() == BO_LAnd) !=
+ (Exp->getLHS()->EvaluateKnownConstInt(Ctx) == 0))
+ return RHSResult;
+ return NoDiag();
+ }
+
+ if (LHSResult.Val >= RHSResult.Val)
+ return LHSResult;
+ return RHSResult;
+ }
+ }
+ }
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass: {
+ const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
+ if (isa<ExplicitCastExpr>(E)) {
+ if (const FloatingLiteral *FL
+ = dyn_cast<FloatingLiteral>(SubExpr->IgnoreParenImpCasts())) {
+ unsigned DestWidth = Ctx.getIntWidth(E->getType());
+ bool DestSigned = E->getType()->isSignedIntegerOrEnumerationType();
+ APSInt IgnoredVal(DestWidth, !DestSigned);
+ bool Ignored;
+ // If the value does not fit in the destination type, the behavior is
+ // undefined, so we are not required to treat it as a constant
+ // expression.
+ if (FL->getValue().convertToInteger(IgnoredVal,
+ llvm::APFloat::rmTowardZero,
+ &Ignored) & APFloat::opInvalidOp)
+ return ICEDiag(2, E->getLocStart());
+ return NoDiag();
+ }
+ }
+ switch (cast<CastExpr>(E)->getCastKind()) {
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_IntegralToBoolean:
+ case CK_IntegralCast:
+ return CheckICE(SubExpr, Ctx);
+ default:
+ return ICEDiag(2, E->getLocStart());
+ }
+ }
+ case Expr::BinaryConditionalOperatorClass: {
+ const BinaryConditionalOperator *Exp = cast<BinaryConditionalOperator>(E);
+ ICEDiag CommonResult = CheckICE(Exp->getCommon(), Ctx);
+ if (CommonResult.Val == 2) return CommonResult;
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+ if (FalseResult.Val == 2) return FalseResult;
+ if (CommonResult.Val == 1) return CommonResult;
+ if (FalseResult.Val == 1 &&
+ Exp->getCommon()->EvaluateKnownConstInt(Ctx) == 0) return NoDiag();
+ return FalseResult;
+ }
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // then only the true side is actually considered in an integer constant
+ // expression, and it is fully evaluated. This is an important GNU
+ // extension. See GCC PR38377 for discussion.
+ if (const CallExpr *CallCE
+ = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
+ if (CallCE->isBuiltinCall() == Builtin::BI__builtin_constant_p)
+ return CheckEvalInICE(E, Ctx);
+ ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
+ if (CondResult.Val == 2)
+ return CondResult;
+
+ ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+
+ if (TrueResult.Val == 2)
+ return TrueResult;
+ if (FalseResult.Val == 2)
+ return FalseResult;
+ if (CondResult.Val == 1)
+ return CondResult;
+ if (TrueResult.Val == 0 && FalseResult.Val == 0)
+ return NoDiag();
+ // Rare case where the diagnostics depend on which side is evaluated
+ // Note that if we get here, CondResult is 0, and at least one of
+ // TrueResult and FalseResult is non-zero.
+ if (Exp->getCond()->EvaluateKnownConstInt(Ctx) == 0) {
+ return FalseResult;
+ }
+ return TrueResult;
+ }
+ case Expr::CXXDefaultArgExprClass:
+ return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx);
+ case Expr::ChooseExprClass: {
+ return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(Ctx), Ctx);
+ }
+ }
+
+ llvm_unreachable("Invalid StmtClass!");
+}
+
+/// Evaluate an expression as a C++11 integral constant expression.
+static bool EvaluateCPlusPlus11IntegralConstantExpr(ASTContext &Ctx,
+ const Expr *E,
+ llvm::APSInt *Value,
+ SourceLocation *Loc) {
+ if (!E->getType()->isIntegralOrEnumerationType()) {
+ if (Loc) *Loc = E->getExprLoc();
+ return false;
+ }
+
+ APValue Result;
+ if (!E->isCXX11ConstantExpr(Ctx, &Result, Loc))
+ return false;
+
+ assert(Result.isInt() && "pointer cast to int is not an ICE");
+ if (Value) *Value = Result.getInt();
+ return true;
+}
+
+bool Expr::isIntegerConstantExpr(ASTContext &Ctx, SourceLocation *Loc) const {
+ if (Ctx.getLangOpts().CPlusPlus0x)
+ return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, 0, Loc);
+
+ ICEDiag d = CheckICE(this, Ctx);
+ if (d.Val != 0) {
+ if (Loc) *Loc = d.Loc;
+ return false;
+ }
+ return true;
+}
+
+bool Expr::isIntegerConstantExpr(llvm::APSInt &Value, ASTContext &Ctx,
+ SourceLocation *Loc, bool isEvaluated) const {
+ if (Ctx.getLangOpts().CPlusPlus0x)
+ return EvaluateCPlusPlus11IntegralConstantExpr(Ctx, this, &Value, Loc);
+
+ if (!isIntegerConstantExpr(Ctx, Loc))
+ return false;
+ if (!EvaluateAsInt(Value, Ctx))
+ llvm_unreachable("ICE cannot be evaluated!");
+ return true;
+}
+
+bool Expr::isCXX98IntegralConstantExpr(ASTContext &Ctx) const {
+ return CheckICE(this, Ctx).Val == 0;
+}
+
+bool Expr::isCXX11ConstantExpr(ASTContext &Ctx, APValue *Result,
+ SourceLocation *Loc) const {
+ // We support this checking in C++98 mode in order to diagnose compatibility
+ // issues.
+ assert(Ctx.getLangOpts().CPlusPlus);
+
+ // Build evaluation settings.
+ Expr::EvalStatus Status;
+ llvm::SmallVector<PartialDiagnosticAt, 8> Diags;
+ Status.Diag = &Diags;
+ EvalInfo Info(Ctx, Status);
+
+ APValue Scratch;
+ bool IsConstExpr = ::EvaluateAsRValue(Info, this, Result ? *Result : Scratch);
+
+ if (!Diags.empty()) {
+ IsConstExpr = false;
+ if (Loc) *Loc = Diags[0].first;
+ } else if (!IsConstExpr) {
+ // FIXME: This shouldn't happen.
+ if (Loc) *Loc = getExprLoc();
+ }
+
+ return IsConstExpr;
+}
+
+bool Expr::isPotentialConstantExpr(const FunctionDecl *FD,
+ llvm::SmallVectorImpl<
+ PartialDiagnosticAt> &Diags) {
+ // FIXME: It would be useful to check constexpr function templates, but at the
+ // moment the constant expression evaluator cannot cope with the non-rigorous
+ // ASTs which we build for dependent expressions.
+ if (FD->isDependentContext())
+ return true;
+
+ Expr::EvalStatus Status;
+ Status.Diag = &Diags;
+
+ EvalInfo Info(FD->getASTContext(), Status);
+ Info.CheckingPotentialConstantExpression = true;
+
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ const CXXRecordDecl *RD = MD ? MD->getParent()->getCanonicalDecl() : 0;
+
+ // FIXME: Fabricate an arbitrary expression on the stack and pretend that it
+ // is a temporary being used as the 'this' pointer.
+ LValue This;
+ ImplicitValueInitExpr VIE(RD ? Info.Ctx.getRecordType(RD) : Info.Ctx.IntTy);
+ This.set(&VIE, Info.CurrentCall->Index);
+
+ ArrayRef<const Expr*> Args;
+
+ SourceLocation Loc = FD->getLocation();
+
+ APValue Scratch;
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ HandleConstructorCall(Loc, This, Args, CD, Info, Scratch);
+ else
+ HandleFunctionCall(Loc, FD, (MD && MD->isInstance()) ? &This : 0,
+ Args, FD->getBody(), Info, Scratch);
+
+ return Diags.empty();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp b/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp
new file mode 100644
index 0000000..fd616db
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ExternalASTSource.cpp
@@ -0,0 +1,59 @@
+//===- ExternalASTSource.cpp - Abstract External AST Interface --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides the default implementation of the ExternalASTSource
+// interface, which enables construction of AST nodes from some external
+// source.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/DeclarationName.h"
+
+using namespace clang;
+
+ExternalASTSource::~ExternalASTSource() { }
+
+void ExternalASTSource::PrintStats() { }
+
+Decl *ExternalASTSource::GetExternalDecl(uint32_t ID) {
+ return 0;
+}
+
+Selector ExternalASTSource::GetExternalSelector(uint32_t ID) {
+ return Selector();
+}
+
+uint32_t ExternalASTSource::GetNumExternalSelectors() {
+ return 0;
+}
+
+Stmt *ExternalASTSource::GetExternalDeclStmt(uint64_t Offset) {
+ return 0;
+}
+
+CXXBaseSpecifier *
+ExternalASTSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
+ return 0;
+}
+
+DeclContextLookupResult
+ExternalASTSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+ return DeclContext::lookup_result();
+}
+
+ExternalLoadResult
+ExternalASTSource::FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Result) {
+ return ELR_AlreadyLoaded;
+}
+
+void ExternalASTSource::getMemoryBufferSizes(MemoryBufferSizes &sizes) const { }
diff --git a/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp
new file mode 100644
index 0000000..b70520f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/InheritViz.cpp
@@ -0,0 +1,168 @@
+//===- InheritViz.cpp - Graphviz visualization for inheritance --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements CXXRecordDecl::viewInheritance, which
+// generates a GraphViz DOT file that depicts the class inheritance
+// diagram and then calls Graphviz/dot+gv on it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/TypeOrdering.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+
+using namespace llvm;
+
+namespace clang {
+
+/// InheritanceHierarchyWriter - Helper class that writes out a
+/// GraphViz file that diagrams the inheritance hierarchy starting at
+/// a given C++ class type. Note that we do not use LLVM's
+/// GraphWriter, because the interface does not permit us to properly
+/// differentiate between uses of types as virtual bases
+/// vs. non-virtual bases.
+class InheritanceHierarchyWriter {
+ ASTContext& Context;
+ raw_ostream &Out;
+ std::map<QualType, int, QualTypeOrdering> DirectBaseCount;
+ std::set<QualType, QualTypeOrdering> KnownVirtualBases;
+
+public:
+ InheritanceHierarchyWriter(ASTContext& Context, raw_ostream& Out)
+ : Context(Context), Out(Out) { }
+
+ void WriteGraph(QualType Type) {
+ Out << "digraph \"" << DOT::EscapeString(Type.getAsString()) << "\" {\n";
+ WriteNode(Type, false);
+ Out << "}\n";
+ }
+
+protected:
+ /// WriteNode - Write out the description of node in the inheritance
+ /// diagram, which may be a base class or it may be the root node.
+ void WriteNode(QualType Type, bool FromVirtual);
+
+ /// WriteNodeReference - Write out a reference to the given node,
+ /// using a unique identifier for each direct base and for the
+ /// (only) virtual base.
+ raw_ostream& WriteNodeReference(QualType Type, bool FromVirtual);
+};
+
+void InheritanceHierarchyWriter::WriteNode(QualType Type, bool FromVirtual) {
+ QualType CanonType = Context.getCanonicalType(Type);
+
+ if (FromVirtual) {
+ if (KnownVirtualBases.find(CanonType) != KnownVirtualBases.end())
+ return;
+
+ // We haven't seen this virtual base before, so display it and
+ // its bases.
+ KnownVirtualBases.insert(CanonType);
+ }
+
+ // Declare the node itself.
+ Out << " ";
+ WriteNodeReference(Type, FromVirtual);
+
+ // Give the node a label based on the name of the class.
+ std::string TypeName = Type.getAsString();
+ Out << " [ shape=\"box\", label=\"" << DOT::EscapeString(TypeName);
+
+ // If the name of the class was a typedef or something different
+ // from the "real" class name, show the real class name in
+ // parentheses so we don't confuse ourselves.
+ if (TypeName != CanonType.getAsString()) {
+ Out << "\\n(" << CanonType.getAsString() << ")";
+ }
+
+ // Finished describing the node.
+ Out << " \"];\n";
+
+ // Display the base classes.
+ const CXXRecordDecl *Decl
+ = static_cast<const CXXRecordDecl *>(Type->getAs<RecordType>()->getDecl());
+ for (CXXRecordDecl::base_class_const_iterator Base = Decl->bases_begin();
+ Base != Decl->bases_end(); ++Base) {
+ QualType CanonBaseType = Context.getCanonicalType(Base->getType());
+
+ // If this is not virtual inheritance, bump the direct base
+ // count for the type.
+ if (!Base->isVirtual())
+ ++DirectBaseCount[CanonBaseType];
+
+ // Write out the node (if we need to).
+ WriteNode(Base->getType(), Base->isVirtual());
+
+ // Write out the edge.
+ Out << " ";
+ WriteNodeReference(Type, FromVirtual);
+ Out << " -> ";
+ WriteNodeReference(Base->getType(), Base->isVirtual());
+
+ // Write out edge attributes to show the kind of inheritance.
+ if (Base->isVirtual()) {
+ Out << " [ style=\"dashed\" ]";
+ }
+ Out << ";";
+ }
+}
+
+/// WriteNodeReference - Write out a reference to the given node,
+/// using a unique identifier for each direct base and for the
+/// (only) virtual base.
+raw_ostream&
+InheritanceHierarchyWriter::WriteNodeReference(QualType Type,
+ bool FromVirtual) {
+ QualType CanonType = Context.getCanonicalType(Type);
+
+ Out << "Class_" << CanonType.getAsOpaquePtr();
+ if (!FromVirtual)
+ Out << "_" << DirectBaseCount[CanonType];
+ return Out;
+}
+
+/// viewInheritance - Display the inheritance hierarchy of this C++
+/// class using GraphViz.
+void CXXRecordDecl::viewInheritance(ASTContext& Context) const {
+ QualType Self = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
+ std::string ErrMsg;
+ sys::Path Filename = sys::Path::GetTemporaryDirectory(&ErrMsg);
+ if (Filename.isEmpty()) {
+ llvm::errs() << "Error: " << ErrMsg << "\n";
+ return;
+ }
+ Filename.appendComponent(Self.getAsString() + ".dot");
+ if (Filename.makeUnique(true,&ErrMsg)) {
+ llvm::errs() << "Error: " << ErrMsg << "\n";
+ return;
+ }
+
+ llvm::errs() << "Writing '" << Filename.c_str() << "'... ";
+
+ llvm::raw_fd_ostream O(Filename.c_str(), ErrMsg);
+
+ if (ErrMsg.empty()) {
+ InheritanceHierarchyWriter Writer(Context, O);
+ Writer.WriteGraph(Self);
+ llvm::errs() << " done. \n";
+
+ O.close();
+
+ // Display the graph
+ DisplayGraph(Filename);
+ } else {
+ llvm::errs() << "error opening file for writing!\n";
+ }
+}
+
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
new file mode 100644
index 0000000..0027dbf
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumCXXABI.cpp
@@ -0,0 +1,73 @@
+//===------- ItaniumCXXABI.cpp - AST support for the Itanium C++ ABI ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ AST support targeting the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//
+// It also supports the closely-related ARM C++ ABI, documented at:
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#include "CXXABI.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+namespace {
+class ItaniumCXXABI : public CXXABI {
+protected:
+ ASTContext &Context;
+public:
+ ItaniumCXXABI(ASTContext &Ctx) : Context(Ctx) { }
+
+ unsigned getMemberPointerSize(const MemberPointerType *MPT) const {
+ QualType Pointee = MPT->getPointeeType();
+ if (Pointee->isFunctionType()) return 2;
+ return 1;
+ }
+
+ CallingConv getDefaultMethodCallConv() const {
+ return CC_C;
+ }
+
+ // We cheat and just check that the class has a vtable pointer, and that it's
+ // only big enough to have a vtable pointer and nothing more (or less).
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+
+ // Check that the class has a vtable pointer.
+ if (!RD->isDynamicClass())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ CharUnits PointerSize =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ return Layout.getNonVirtualSize() == PointerSize;
+ }
+};
+
+class ARMCXXABI : public ItaniumCXXABI {
+public:
+ ARMCXXABI(ASTContext &Ctx) : ItaniumCXXABI(Ctx) { }
+};
+}
+
+CXXABI *clang::CreateItaniumCXXABI(ASTContext &Ctx) {
+ return new ItaniumCXXABI(Ctx);
+}
+
+CXXABI *clang::CreateARMCXXABI(ASTContext &Ctx) {
+ return new ARMCXXABI(Ctx);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
new file mode 100644
index 0000000..d7b6354
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ItaniumMangle.cpp
@@ -0,0 +1,3576 @@
+//===--- ItaniumMangle.cpp - Itanium C++ Name Mangling ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements C++ name mangling according to the Itanium C++ ABI,
+// which is used in GCC 3.2 and newer (and many compilers that are
+// ABI-compatible with GCC):
+//
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+
+namespace {
+
+/// \brief Retrieve the declaration context that should be used when mangling
+/// the given declaration.
+static const DeclContext *getEffectiveDeclContext(const Decl *D) {
+ // The ABI assumes that lambda closure types that occur within
+ // default arguments live in the context of the function. However, due to
+ // the way in which Clang parses and creates function declarations, this is
+ // not the case: the lambda closure type ends up living in the context
+ // where the function itself resides, because the function declaration itself
+ // had not yet been created. Fix the context here.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (RD->isLambda())
+ if (ParmVarDecl *ContextParam
+ = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl()))
+ return ContextParam->getDeclContext();
+ }
+
+ return D->getDeclContext();
+}
+
+static const DeclContext *getEffectiveParentContext(const DeclContext *DC) {
+ return getEffectiveDeclContext(cast<Decl>(DC));
+}
+
+static const CXXRecordDecl *GetLocalClassDecl(const NamedDecl *ND) {
+ const DeclContext *DC = dyn_cast<DeclContext>(ND);
+ if (!DC)
+ DC = getEffectiveDeclContext(ND);
+ while (!DC->isNamespace() && !DC->isTranslationUnit()) {
+ const DeclContext *Parent = getEffectiveDeclContext(cast<Decl>(DC));
+ if (isa<FunctionDecl>(Parent))
+ return dyn_cast<CXXRecordDecl>(DC);
+ DC = Parent;
+ }
+ return 0;
+}
+
+static const FunctionDecl *getStructor(const FunctionDecl *fn) {
+ if (const FunctionTemplateDecl *ftd = fn->getPrimaryTemplate())
+ return ftd->getTemplatedDecl();
+
+ return fn;
+}
+
+static const NamedDecl *getStructor(const NamedDecl *decl) {
+ const FunctionDecl *fn = dyn_cast_or_null<FunctionDecl>(decl);
+ return (fn ? getStructor(fn) : decl);
+}
+
+static const unsigned UnknownArity = ~0U;
+
+class ItaniumMangleContext : public MangleContext {
+ llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
+ unsigned Discriminator;
+ llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+
+public:
+ explicit ItaniumMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags)
+ : MangleContext(Context, Diags) { }
+
+ uint64_t getAnonymousStructId(const TagDecl *TD) {
+ std::pair<llvm::DenseMap<const TagDecl *,
+ uint64_t>::iterator, bool> Result =
+ AnonStructIds.insert(std::make_pair(TD, AnonStructIds.size()));
+ return Result.first->second;
+ }
+
+ void startNewFunction() {
+ MangleContext::startNewFunction();
+ mangleInitDiscriminator();
+ }
+
+ /// @name Mangler Entry Points
+ /// @{
+
+ bool shouldMangleDeclName(const NamedDecl *D);
+ void mangleName(const NamedDecl *D, raw_ostream &);
+ void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &);
+ void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &);
+ void mangleReferenceTemporary(const VarDecl *D,
+ raw_ostream &);
+ void mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &);
+ void mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &);
+ void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &);
+ void mangleCXXRTTI(QualType T, raw_ostream &);
+ void mangleCXXRTTIName(QualType T, raw_ostream &);
+ void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ raw_ostream &);
+ void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ raw_ostream &);
+
+ void mangleItaniumGuardVariable(const VarDecl *D, raw_ostream &);
+
+ void mangleInitDiscriminator() {
+ Discriminator = 0;
+ }
+
+ bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
+ // Lambda closure types with external linkage (indicated by a
+ // non-zero lambda mangling number) have their own numbering scheme, so
+ // they do not need a discriminator.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(ND))
+ if (RD->isLambda() && RD->getLambdaManglingNumber() > 0)
+ return false;
+
+ unsigned &discriminator = Uniquifier[ND];
+ if (!discriminator)
+ discriminator = ++Discriminator;
+ if (discriminator == 1)
+ return false;
+ disc = discriminator-2;
+ return true;
+ }
+ /// @}
+};
+
+/// CXXNameMangler - Manage the mangling of a single name.
+class CXXNameMangler {
+ ItaniumMangleContext &Context;
+ raw_ostream &Out;
+
+ /// The "structor" is the top-level declaration being mangled, if
+ /// that's not a template specialization; otherwise it's the pattern
+ /// for that specialization.
+ const NamedDecl *Structor;
+ unsigned StructorType;
+
+ /// SeqID - The next subsitution sequence number.
+ unsigned SeqID;
+
+ class FunctionTypeDepthState {
+ unsigned Bits;
+
+ enum { InResultTypeMask = 1 };
+
+ public:
+ FunctionTypeDepthState() : Bits(0) {}
+
+ /// The number of function types we're inside.
+ unsigned getDepth() const {
+ return Bits >> 1;
+ }
+
+ /// True if we're in the return type of the innermost function type.
+ bool isInResultType() const {
+ return Bits & InResultTypeMask;
+ }
+
+ FunctionTypeDepthState push() {
+ FunctionTypeDepthState tmp = *this;
+ Bits = (Bits & ~InResultTypeMask) + 2;
+ return tmp;
+ }
+
+ void enterResultType() {
+ Bits |= InResultTypeMask;
+ }
+
+ void leaveResultType() {
+ Bits &= ~InResultTypeMask;
+ }
+
+ void pop(FunctionTypeDepthState saved) {
+ assert(getDepth() == saved.getDepth() + 1);
+ Bits = saved.Bits;
+ }
+
+ } FunctionTypeDepth;
+
+ llvm::DenseMap<uintptr_t, unsigned> Substitutions;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ CXXNameMangler(ItaniumMangleContext &C, raw_ostream &Out_,
+ const NamedDecl *D = 0)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(0),
+ SeqID(0) {
+ // These can't be mangled without a ctor type or dtor type.
+ assert(!D || (!isa<CXXDestructorDecl>(D) &&
+ !isa<CXXConstructorDecl>(D)));
+ }
+ CXXNameMangler(ItaniumMangleContext &C, raw_ostream &Out_,
+ const CXXConstructorDecl *D, CXXCtorType Type)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
+ CXXNameMangler(ItaniumMangleContext &C, raw_ostream &Out_,
+ const CXXDestructorDecl *D, CXXDtorType Type)
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ SeqID(0) { }
+
+#if MANGLE_CHECKER
+ ~CXXNameMangler() {
+ if (Out.str()[0] == '\01')
+ return;
+
+ int status = 0;
+ char *result = abi::__cxa_demangle(Out.str().str().c_str(), 0, 0, &status);
+ assert(status == 0 && "Could not demangle mangled name!");
+ free(result);
+ }
+#endif
+ raw_ostream &getStream() { return Out; }
+
+ void mangle(const NamedDecl *D, StringRef Prefix = "_Z");
+ void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
+ void mangleNumber(const llvm::APSInt &I);
+ void mangleNumber(int64_t Number);
+ void mangleFloat(const llvm::APFloat &F);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleName(const NamedDecl *ND);
+ void mangleType(QualType T);
+ void mangleNameOrStandardSubstitution(const NamedDecl *ND);
+
+private:
+ bool mangleSubstitution(const NamedDecl *ND);
+ bool mangleSubstitution(QualType T);
+ bool mangleSubstitution(TemplateName Template);
+ bool mangleSubstitution(uintptr_t Ptr);
+
+ void mangleExistingSubstitution(QualType type);
+ void mangleExistingSubstitution(TemplateName name);
+
+ bool mangleStandardSubstitution(const NamedDecl *ND);
+
+ void addSubstitution(const NamedDecl *ND) {
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+
+ addSubstitution(reinterpret_cast<uintptr_t>(ND));
+ }
+ void addSubstitution(QualType T);
+ void addSubstitution(TemplateName Template);
+ void addSubstitution(uintptr_t Ptr);
+
+ void mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ bool recursive = false);
+ void mangleUnresolvedName(NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName name,
+ unsigned KnownArity = UnknownArity);
+
+ void mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName(), UnknownArity);
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name,
+ unsigned KnownArity);
+ void mangleUnscopedName(const NamedDecl *ND);
+ void mangleUnscopedTemplateName(const TemplateDecl *ND);
+ void mangleUnscopedTemplateName(TemplateName);
+ void mangleSourceName(const IdentifierInfo *II);
+ void mangleLocalName(const NamedDecl *ND);
+ void mangleLambda(const CXXRecordDecl *Lambda);
+ void mangleNestedName(const NamedDecl *ND, const DeclContext *DC,
+ bool NoFunction=false);
+ void mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void manglePrefix(NestedNameSpecifier *qualifier);
+ void manglePrefix(const DeclContext *DC, bool NoFunction=false);
+ void manglePrefix(QualType type);
+ void mangleTemplatePrefix(const TemplateDecl *ND);
+ void mangleTemplatePrefix(TemplateName Template);
+ void mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity);
+ void mangleQualifiers(Qualifiers Quals);
+ void mangleRefQualifier(RefQualifierKind RefQualifier);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(TemplateName);
+ void mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType);
+ void mangleNeonVectorType(const VectorType *T);
+
+ void mangleIntegerLiteral(QualType T, const llvm::APSInt &Value);
+ void mangleMemberExpr(const Expr *base, bool isArrow,
+ NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName name,
+ unsigned knownArity);
+ void mangleExpression(const Expr *E, unsigned Arity = UnknownArity);
+ void mangleCXXCtorType(CXXCtorType T);
+ void mangleCXXDtorType(CXXDtorType T);
+
+ void mangleTemplateArgs(const ASTTemplateArgumentListInfo &TemplateArgs);
+ void mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs);
+ void mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL);
+ void mangleTemplateArg(const NamedDecl *P, TemplateArgument A);
+ void mangleUnresolvedTemplateArgs(const TemplateArgument *args,
+ unsigned numArgs);
+
+ void mangleTemplateParameter(unsigned Index);
+
+ void mangleFunctionParam(const ParmVarDecl *parm);
+};
+
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = getEffectiveDeclContext(D);
+ !DC->isTranslationUnit(); DC = getEffectiveParentContext(DC)) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool ItaniumMangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return false;
+
+ // Variables at global scope with non-internal linkage are not mangled
+ if (!FD) {
+ const DeclContext *DC = getEffectiveDeclContext(D);
+ // Check for extern variable declared locally.
+ if (DC->isFunctionOrMethod() && D->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = getEffectiveParentContext(DC);
+ if (DC->isTranslationUnit() && D->getLinkage() != InternalLinkage)
+ return false;
+ }
+
+ // Class members are always mangled.
+ if (getEffectiveDeclContext(D)->isRecord())
+ return true;
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void CXXNameMangler::mangle(const NamedDecl *D, StringRef Prefix) {
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+
+ // Adding the prefix can cause problems when one file has a "foo" and
+ // another has a "\01foo". That is known to happen on ELF with the
+ // tricks normally used for producing aliases (PR9177). Fortunately the
+ // llvm mangler on ELF is a nop, so we can just avoid adding the \01
+ // marker. We also avoid adding the marker if this is an alias for an
+ // LLVM intrinsic.
+ StringRef UserLabelPrefix =
+ getASTContext().getTargetInfo().getUserLabelPrefix();
+ if (!UserLabelPrefix.empty() && !ALA->getLabel().startswith("llvm."))
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= _Z <encoding>
+ // ::= <data name>
+ // ::= <special-name>
+ Out << Prefix;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleName(VD);
+ else
+ mangleName(cast<FieldDecl>(D));
+}
+
+void CXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <encoding> ::= <function name> <bare-function-type>
+ mangleName(FD);
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // Whether the mangling of a function type includes the return type depends on
+ // the context and the nature of the function. The rules for deciding whether
+ // the return type is included are:
+ //
+ // 1. Template functions (names or types) have return types encoded, with
+ // the exceptions listed below.
+ // 2. Function types not appearing as part of a function name mangling,
+ // e.g. parameters, pointer types, etc., have return type encoded, with the
+ // exceptions listed below.
+ // 3. Non-template function names do not have return types encoded.
+ //
+ // The exceptions mentioned in (1) and (2) above, for which the return type is
+ // never included, are
+ // 1. Constructors.
+ // 2. Destructors.
+ // 3. Conversion operator functions, e.g. operator int.
+ bool MangleReturnType = false;
+ if (FunctionTemplateDecl *PrimaryTemplate = FD->getPrimaryTemplate()) {
+ if (!(isa<CXXConstructorDecl>(FD) || isa<CXXDestructorDecl>(FD) ||
+ isa<CXXConversionDecl>(FD)))
+ MangleReturnType = true;
+
+ // Mangle the type of the primary template.
+ FD = PrimaryTemplate->getTemplatedDecl();
+ }
+
+ mangleBareFunctionType(FD->getType()->getAs<FunctionType>(),
+ MangleReturnType);
+}
+
+static const DeclContext *IgnoreLinkageSpecDecls(const DeclContext *DC) {
+ while (isa<LinkageSpecDecl>(DC)) {
+ DC = getEffectiveParentContext(DC);
+ }
+
+ return DC;
+}
+
+/// isStd - Return whether a given namespace is the 'std' namespace.
+static bool isStd(const NamespaceDecl *NS) {
+ if (!IgnoreLinkageSpecDecls(getEffectiveParentContext(NS))
+ ->isTranslationUnit())
+ return false;
+
+ const IdentifierInfo *II = NS->getOriginalNamespace()->getIdentifier();
+ return II && II->isStr("std");
+}
+
+// isStdNamespace - Return whether a given decl context is a toplevel 'std'
+// namespace.
+static bool isStdNamespace(const DeclContext *DC) {
+ if (!DC->isNamespace())
+ return false;
+
+ return isStd(cast<NamespaceDecl>(DC));
+}
+
+static const TemplateDecl *
+isTemplate(const NamedDecl *ND, const TemplateArgumentList *&TemplateArgs) {
+ // Check if we have a function template.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)){
+ if (const TemplateDecl *TD = FD->getPrimaryTemplate()) {
+ TemplateArgs = FD->getTemplateSpecializationArgs();
+ return TD;
+ }
+ }
+
+ // Check if we have a class template.
+ if (const ClassTemplateSpecializationDecl *Spec =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ TemplateArgs = &Spec->getTemplateArgs();
+ return Spec->getSpecializedTemplate();
+ }
+
+ return 0;
+}
+
+static bool isLambda(const NamedDecl *ND) {
+ const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(ND);
+ if (!Record)
+ return false;
+
+ return Record->isLambda();
+}
+
+void CXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <nested-name>
+ // ::= <unscoped-name>
+ // ::= <unscoped-template-name> <template-args>
+ // ::= <local-name>
+ //
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ // FIXME: This is a hack; extern variables declared locally should have
+ // a proper semantic declaration context!
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage() && !isLambda(ND))
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = getEffectiveParentContext(DC);
+ else if (GetLocalClassDecl(ND)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ DC = IgnoreLinkageSpecDecls(DC);
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleUnscopedTemplateName(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ return;
+ }
+
+ mangleUnscopedName(ND);
+ return;
+ }
+
+ if (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)) {
+ mangleLocalName(ND);
+ return;
+ }
+
+ mangleNestedName(ND, DC);
+}
+void CXXNameMangler::mangleName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ const DeclContext *DC = IgnoreLinkageSpecDecls(getEffectiveDeclContext(TD));
+
+ if (DC->isTranslationUnit() || isStdNamespace(DC)) {
+ mangleUnscopedTemplateName(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+ } else {
+ mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
+ }
+}
+
+void CXXNameMangler::mangleUnscopedName(const NamedDecl *ND) {
+ // <unscoped-name> ::= <unqualified-name>
+ // ::= St <unqualified-name> # ::std::
+
+ if (isStdNamespace(IgnoreLinkageSpecDecls(getEffectiveDeclContext(ND))))
+ Out << "St";
+
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(const TemplateDecl *ND) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ return;
+ }
+
+ mangleUnscopedName(ND->getTemplatedDecl());
+ addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleUnscopedTemplateName(TemplateName Template) {
+ // <unscoped-template-name> ::= <unscoped-name>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleUnscopedTemplateName(TD);
+
+ if (mangleSubstitution(Template))
+ return;
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Not a dependent template name?");
+ if (const IdentifierInfo *Id = Dependent->getIdentifier())
+ mangleSourceName(Id);
+ else
+ mangleOperatorName(Dependent->getOperator(), UnknownArity);
+
+ addSubstitution(Template);
+}
+
+void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
+ // ABI:
+ // Floating-point literals are encoded using a fixed-length
+ // lowercase hexadecimal string corresponding to the internal
+ // representation (IEEE on Itanium), high-order bytes first,
+ // without leading zeroes. For example: "Lf bf800000 E" is -1.0f
+ // on Itanium.
+ // The 'without leading zeroes' thing seems to be an editorial
+ // mistake; see the discussion on cxx-abi-dev beginning on
+ // 2012-01-16.
+
+ // Our requirements here are just barely wierd enough to justify
+ // using a custom algorithm instead of post-processing APInt::toString().
+
+ llvm::APInt valueBits = f.bitcastToAPInt();
+ unsigned numCharacters = (valueBits.getBitWidth() + 3) / 4;
+ assert(numCharacters != 0);
+
+ // Allocate a buffer of the right number of characters.
+ llvm::SmallVector<char, 20> buffer;
+ buffer.set_size(numCharacters);
+
+ // Fill the buffer left-to-right.
+ for (unsigned stringIndex = 0; stringIndex != numCharacters; ++stringIndex) {
+ // The bit-index of the next hex digit.
+ unsigned digitBitIndex = 4 * (numCharacters - stringIndex - 1);
+
+ // Project out 4 bits starting at 'digitIndex'.
+ llvm::integerPart hexDigit
+ = valueBits.getRawData()[digitBitIndex / llvm::integerPartWidth];
+ hexDigit >>= (digitBitIndex % llvm::integerPartWidth);
+ hexDigit &= 0xF;
+
+ // Map that over to a lowercase hex digit.
+ static const char charForHex[16] = {
+ '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f'
+ };
+ buffer[stringIndex] = charForHex[hexDigit];
+ }
+
+ Out.write(buffer.data(), numCharacters);
+}
+
+void CXXNameMangler::mangleNumber(const llvm::APSInt &Value) {
+ if (Value.isSigned() && Value.isNegative()) {
+ Out << 'n';
+ Value.abs().print(Out, true);
+ } else
+ Value.print(Out, Value.isSigned());
+}
+
+void CXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [n] <non-negative decimal integer>
+ if (Number < 0) {
+ Out << 'n';
+ Number = -Number;
+ }
+
+ Out << Number;
+}
+
+void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) {
+ // <call-offset> ::= h <nv-offset> _
+ // ::= v <v-offset> _
+ // <nv-offset> ::= <offset number> # non-virtual base override
+ // <v-offset> ::= <offset number> _ <virtual offset number>
+ // # virtual base override, with vcall offset
+ if (!Virtual) {
+ Out << 'h';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ return;
+ }
+
+ Out << 'v';
+ mangleNumber(NonVirtual);
+ Out << '_';
+ mangleNumber(Virtual);
+ Out << '_';
+}
+
+void CXXNameMangler::manglePrefix(QualType type) {
+ if (const TemplateSpecializationType *TST =
+ type->getAs<TemplateSpecializationType>()) {
+ if (!mangleSubstitution(QualType(TST, 0))) {
+ mangleTemplatePrefix(TST->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(TST->getTemplateName(), TST->getArgs(),
+ TST->getNumArgs());
+ addSubstitution(QualType(TST, 0));
+ }
+ } else if (const DependentTemplateSpecializationType *DTST
+ = type->getAs<DependentTemplateSpecializationType>()) {
+ TemplateName Template
+ = getASTContext().getDependentTemplateName(DTST->getQualifier(),
+ DTST->getIdentifier());
+ mangleTemplatePrefix(Template);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(Template, DTST->getArgs(), DTST->getNumArgs());
+ } else {
+ // We use the QualType mangle type variant here because it handles
+ // substitutions.
+ mangleType(type);
+ }
+}
+
+/// Mangle everything prior to the base-unresolved-name in an unresolved-name.
+///
+/// \param firstQualifierLookup - the entity found by unqualified lookup
+/// for the first name in the qualifier, if this is for a member expression
+/// \param recursive - true if this is being called recursively,
+/// i.e. if there is more prefix "to the right".
+void CXXNameMangler::mangleUnresolvedPrefix(NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ bool recursive) {
+
+ // x, ::x
+ // <unresolved-name> ::= [gs] <base-unresolved-name>
+
+ // T::x / decltype(p)::x
+ // <unresolved-name> ::= sr <unresolved-type> <base-unresolved-name>
+
+ // T::N::x /decltype(p)::N::x
+ // <unresolved-name> ::= srN <unresolved-type> <unresolved-qualifier-level>+ E
+ // <base-unresolved-name>
+
+ // A::x, N::y, A<T>::z; "gs" means leading "::"
+ // <unresolved-name> ::= [gs] sr <unresolved-qualifier-level>+ E
+ // <base-unresolved-name>
+
+ switch (qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ Out << "gs";
+
+ // We want an 'sr' unless this is the entire NNS.
+ if (recursive)
+ Out << "sr";
+
+ // We never want an 'E' here.
+ return;
+
+ case NestedNameSpecifier::Namespace:
+ if (qualifier->getPrefix())
+ mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
+ /*recursive*/ true);
+ else
+ Out << "sr";
+ mangleSourceName(qualifier->getAsNamespace()->getIdentifier());
+ break;
+ case NestedNameSpecifier::NamespaceAlias:
+ if (qualifier->getPrefix())
+ mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
+ /*recursive*/ true);
+ else
+ Out << "sr";
+ mangleSourceName(qualifier->getAsNamespaceAlias()->getIdentifier());
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const Type *type = qualifier->getAsType();
+
+ // We only want to use an unresolved-type encoding if this is one of:
+ // - a decltype
+ // - a template type parameter
+ // - a template template parameter with arguments
+ // In all of these cases, we should have no prefix.
+ if (qualifier->getPrefix()) {
+ mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
+ /*recursive*/ true);
+ } else {
+ // Otherwise, all the cases want this.
+ Out << "sr";
+ }
+
+ // Only certain other types are valid as prefixes; enumerate them.
+ switch (type->getTypeClass()) {
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Enum:
+ case Type::Paren:
+ case Type::Elaborated:
+ case Type::Attributed:
+ case Type::Auto:
+ case Type::PackExpansion:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ case Type::Atomic:
+ llvm_unreachable("type is illegal as a nested name specifier");
+
+ case Type::SubstTemplateTypeParmPack:
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(decltype(T::foo(U())) x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+
+ // <unresolved-type> ::= <template-param>
+ // ::= <decltype>
+ // ::= <template-template-param> <template-args>
+ // (this last is not official yet)
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::TemplateTypeParm:
+ case Type::UnaryTransform:
+ case Type::SubstTemplateTypeParm:
+ unresolvedType:
+ assert(!qualifier->getPrefix());
+
+ // We only get here recursively if we're followed by identifiers.
+ if (recursive) Out << 'N';
+
+ // This seems to do everything we want. It's not really
+ // sanctioned for a substituted template parameter, though.
+ mangleType(QualType(type, 0));
+
+ // We never want to print 'E' directly after an unresolved-type,
+ // so we return directly.
+ return;
+
+ case Type::Typedef:
+ mangleSourceName(cast<TypedefType>(type)->getDecl()->getIdentifier());
+ break;
+
+ case Type::UnresolvedUsing:
+ mangleSourceName(cast<UnresolvedUsingType>(type)->getDecl()
+ ->getIdentifier());
+ break;
+
+ case Type::Record:
+ mangleSourceName(cast<RecordType>(type)->getDecl()->getIdentifier());
+ break;
+
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *tst
+ = cast<TemplateSpecializationType>(type);
+ TemplateName name = tst->getTemplateName();
+ switch (name.getKind()) {
+ case TemplateName::Template:
+ case TemplateName::QualifiedTemplate: {
+ TemplateDecl *temp = name.getAsTemplateDecl();
+
+ // If the base is a template template parameter, this is an
+ // unresolved type.
+ assert(temp && "no template for template specialization type");
+ if (isa<TemplateTemplateParmDecl>(temp)) goto unresolvedType;
+
+ mangleSourceName(temp->getIdentifier());
+ break;
+ }
+
+ case TemplateName::OverloadedTemplate:
+ case TemplateName::DependentTemplate:
+ llvm_unreachable("invalid base for a template specialization type");
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = name.getAsSubstTemplateTemplateParm();
+ mangleExistingSubstitution(subst->getReplacement());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ // FIXME: not clear how to mangle this!
+ // template <template <class U> class T...> class A {
+ // template <class U...> void foo(decltype(T<U>::foo) x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+ }
+ }
+
+ mangleUnresolvedTemplateArgs(tst->getArgs(), tst->getNumArgs());
+ break;
+ }
+
+ case Type::InjectedClassName:
+ mangleSourceName(cast<InjectedClassNameType>(type)->getDecl()
+ ->getIdentifier());
+ break;
+
+ case Type::DependentName:
+ mangleSourceName(cast<DependentNameType>(type)->getIdentifier());
+ break;
+
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *tst
+ = cast<DependentTemplateSpecializationType>(type);
+ mangleSourceName(tst->getIdentifier());
+ mangleUnresolvedTemplateArgs(tst->getArgs(), tst->getNumArgs());
+ break;
+ }
+ }
+ break;
+ }
+
+ case NestedNameSpecifier::Identifier:
+ // Member expressions can have these without prefixes.
+ if (qualifier->getPrefix()) {
+ mangleUnresolvedPrefix(qualifier->getPrefix(), firstQualifierLookup,
+ /*recursive*/ true);
+ } else if (firstQualifierLookup) {
+
+ // Try to make a proper qualifier out of the lookup result, and
+ // then just recurse on that.
+ NestedNameSpecifier *newQualifier;
+ if (TypeDecl *typeDecl = dyn_cast<TypeDecl>(firstQualifierLookup)) {
+ QualType type = getASTContext().getTypeDeclType(typeDecl);
+
+ // Pretend we had a different nested name specifier.
+ newQualifier = NestedNameSpecifier::Create(getASTContext(),
+ /*prefix*/ 0,
+ /*template*/ false,
+ type.getTypePtr());
+ } else if (NamespaceDecl *nspace =
+ dyn_cast<NamespaceDecl>(firstQualifierLookup)) {
+ newQualifier = NestedNameSpecifier::Create(getASTContext(),
+ /*prefix*/ 0,
+ nspace);
+ } else if (NamespaceAliasDecl *alias =
+ dyn_cast<NamespaceAliasDecl>(firstQualifierLookup)) {
+ newQualifier = NestedNameSpecifier::Create(getASTContext(),
+ /*prefix*/ 0,
+ alias);
+ } else {
+ // No sensible mangling to do here.
+ newQualifier = 0;
+ }
+
+ if (newQualifier)
+ return mangleUnresolvedPrefix(newQualifier, /*lookup*/ 0, recursive);
+
+ } else {
+ Out << "sr";
+ }
+
+ mangleSourceName(qualifier->getAsIdentifier());
+ break;
+ }
+
+ // If this was the innermost part of the NNS, and we fell out to
+ // here, append an 'E'.
+ if (!recursive)
+ Out << 'E';
+}
+
+/// Mangle an unresolved-name, which is generally used for names which
+/// weren't resolved to specific entities.
+void CXXNameMangler::mangleUnresolvedName(NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName name,
+ unsigned knownArity) {
+ if (qualifier) mangleUnresolvedPrefix(qualifier, firstQualifierLookup);
+ mangleUnqualifiedName(0, name, knownArity);
+}
+
+static const FieldDecl *FindFirstNamedDataMember(const RecordDecl *RD) {
+ assert(RD->isAnonymousStructOrUnion() &&
+ "Expected anonymous struct or union!");
+
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ if (FD->getIdentifier())
+ return FD;
+
+ if (const RecordType *RT = FD->getType()->getAs<RecordType>()) {
+ if (const FieldDecl *NamedDataMember =
+ FindFirstNamedDataMember(RT->getDecl()))
+ return NamedDataMember;
+ }
+ }
+
+ // We didn't find a named data member.
+ return 0;
+}
+
+void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name,
+ unsigned KnownArity) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ // We must avoid conflicts between internally- and externally-
+ // linked variable and function declaration names in the same TU:
+ // void test() { extern void foo(); }
+ // static void foo();
+ // This naming convention is the same as that followed by GCC,
+ // though it shouldn't actually matter.
+ if (ND && ND->getLinkage() == InternalLinkage &&
+ getEffectiveDeclContext(ND)->isFileContext())
+ Out << 'L';
+
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ // This is how gcc mangles these names.
+ Out << "12_GLOBAL__N_1";
+ break;
+ }
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // We must have an anonymous union or struct declaration.
+ const RecordDecl *RD =
+ cast<RecordDecl>(VD->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 5.1.2:
+ //
+ // For the purposes of mangling, the name of an anonymous union is
+ // considered to be the name of the first named data member found by a
+ // pre-order, depth-first, declaration-order walk of the data members of
+ // the anonymous union. If there is no such data member (i.e., if all of
+ // the data members in the union are unnamed), then there is no way for
+ // a program to refer to the anonymous union, and there is therefore no
+ // need to mangle its name.
+ const FieldDecl *FD = FindFirstNamedDataMember(RD);
+
+ // It's actually possible for various reasons for us to get here
+ // with an empty anonymous struct / union. Fortunately, it
+ // doesn't really matter what name we generate.
+ if (!FD) break;
+ assert(FD->getIdentifier() && "Data member name isn't an identifier!");
+
+ mangleSourceName(FD->getIdentifier());
+ break;
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // <unnamed-type-name> ::= <closure-type-name>
+ //
+ // <closure-type-name> ::= Ul <lambda-sig> E [ <nonnegative number> ] _
+ // <lambda-sig> ::= <parameter-type>+ # Parameter types or 'v' for 'void'.
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(TD)) {
+ if (Record->isLambda() && Record->getLambdaManglingNumber()) {
+ mangleLambda(Record);
+ break;
+ }
+ }
+
+ // Get a unique id for the anonymous struct.
+ uint64_t AnonStructId = Context.getAnonymousStructId(TD);
+
+ // Mangle it as a source name in the form
+ // [n] $_<id>
+ // where n is the length of the string.
+ SmallString<8> Str;
+ Str += "$_";
+ Str += llvm::utostr(AnonStructId);
+
+ Out << Str.size();
+ Out << Str.str();
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ llvm_unreachable("Can't mangle Objective-C selector names here!");
+
+ case DeclarationName::CXXConstructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ constructor we're mangling, use the type
+ // we were given.
+ mangleCXXCtorType(static_cast<CXXCtorType>(StructorType));
+ else
+ // Otherwise, use the complete constructor name. This is relevant if a
+ // class with a constructor is declared within a constructor.
+ mangleCXXCtorType(Ctor_Complete);
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (ND == Structor)
+ // If the named decl is the C++ destructor we're mangling, use the type we
+ // were given.
+ mangleCXXDtorType(static_cast<CXXDtorType>(StructorType));
+ else
+ // Otherwise, use the complete destructor name. This is relevant if a
+ // class with a destructor is declared within a destructor.
+ mangleCXXDtorType(Dtor_Complete);
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= cv <type> # (cast)
+ Out << "cv";
+ mangleType(Name.getCXXNameType());
+ break;
+
+ case DeclarationName::CXXOperatorName: {
+ unsigned Arity;
+ if (ND) {
+ Arity = cast<FunctionDecl>(ND)->getNumParams();
+
+ // If we have a C++ member function, we need to include the 'this' pointer.
+ // FIXME: This does not make sense for operators that are static, but their
+ // names stay the same regardless of the arity (operator new for instance).
+ if (isa<CXXMethodDecl>(ND))
+ Arity++;
+ } else
+ Arity = KnownArity;
+
+ mangleOperatorName(Name.getCXXOverloadedOperator(), Arity);
+ break;
+ }
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: This mangling is not yet official.
+ Out << "li";
+ mangleSourceName(Name.getCXXLiteralIdentifier());
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ llvm_unreachable("Can't mangle a using directive name!");
+ }
+}
+
+void CXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source-name> ::= <positive length number> <identifier>
+ // <number> ::= [n] <non-negative decimal integer>
+ // <identifier> ::= <unqualified source code identifier>
+ Out << II->getLength() << II->getName();
+}
+
+void CXXNameMangler::mangleNestedName(const NamedDecl *ND,
+ const DeclContext *DC,
+ bool NoFunction) {
+ // <nested-name>
+ // ::= N [<CV-qualifiers>] [<ref-qualifier>] <prefix> <unqualified-name> E
+ // ::= N [<CV-qualifiers>] [<ref-qualifier>] <template-prefix>
+ // <template-args> E
+
+ Out << 'N';
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(ND)) {
+ mangleQualifiers(Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+ mangleRefQualifier(Method->getRefQualifier());
+ }
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ }
+ else {
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND);
+ }
+
+ Out << 'E';
+}
+void CXXNameMangler::mangleNestedName(const TemplateDecl *TD,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <nested-name> ::= N [<CV-qualifiers>] <template-prefix> <template-args> E
+
+ Out << 'N';
+
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, TemplateArgs, NumTemplateArgs);
+
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
+ // <local-name> := Z <function encoding> E <entity name> [<discriminator>]
+ // := Z <function encoding> E s [<discriminator>]
+ // <local-name> := Z <function encoding> E d [ <parameter number> ]
+ // _ <entity name>
+ // <discriminator> := _ <non-negative number>
+ const DeclContext *DC = getEffectiveDeclContext(ND);
+ if (isa<ObjCMethodDecl>(DC) && isa<FunctionDecl>(ND)) {
+ // Don't add objc method name mangling to locally declared function
+ mangleUnqualifiedName(ND);
+ return;
+ }
+
+ Out << 'Z';
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(MD);
+ } else if (const CXXRecordDecl *RD = GetLocalClassDecl(ND)) {
+ mangleFunctionEncoding(cast<FunctionDecl>(getEffectiveDeclContext(RD)));
+ Out << 'E';
+
+ // The parameter number is omitted for the last parameter, 0 for the
+ // second-to-last parameter, 1 for the third-to-last parameter, etc. The
+ // <entity name> will of course contain a <closure-type-name>: Its
+ // numbering will be local to the particular argument in which it appears
+ // -- other default arguments do not affect its encoding.
+ bool SkipDiscriminator = false;
+ if (RD->isLambda()) {
+ if (const ParmVarDecl *Parm
+ = dyn_cast_or_null<ParmVarDecl>(RD->getLambdaContextDecl())) {
+ if (const FunctionDecl *Func
+ = dyn_cast<FunctionDecl>(Parm->getDeclContext())) {
+ Out << 'd';
+ unsigned Num = Func->getNumParams() - Parm->getFunctionScopeIndex();
+ if (Num > 1)
+ mangleNumber(Num - 2);
+ Out << '_';
+ SkipDiscriminator = true;
+ }
+ }
+ }
+
+ // Mangle the name relative to the closest enclosing function.
+ if (ND == RD) // equality ok because RD derived from ND above
+ mangleUnqualifiedName(ND);
+ else
+ mangleNestedName(ND, DC, true /*NoFunction*/);
+
+ if (!SkipDiscriminator) {
+ unsigned disc;
+ if (Context.getNextDiscriminator(RD, disc)) {
+ if (disc < 10)
+ Out << '_' << disc;
+ else
+ Out << "__" << disc << '_';
+ }
+ }
+
+ return;
+ }
+ else
+ mangleFunctionEncoding(cast<FunctionDecl>(DC));
+
+ Out << 'E';
+ mangleUnqualifiedName(ND);
+}
+
+void CXXNameMangler::mangleLambda(const CXXRecordDecl *Lambda) {
+ // If the context of a closure type is an initializer for a class member
+ // (static or nonstatic), it is encoded in a qualified name with a final
+ // <prefix> of the form:
+ //
+ // <data-member-prefix> := <member source-name> M
+ //
+ // Technically, the data-member-prefix is part of the <prefix>. However,
+ // since a closure type will always be mangled with a prefix, it's easier
+ // to emit that last part of the prefix here.
+ if (Decl *Context = Lambda->getLambdaContextDecl()) {
+ if ((isa<VarDecl>(Context) || isa<FieldDecl>(Context)) &&
+ Context->getDeclContext()->isRecord()) {
+ if (const IdentifierInfo *Name
+ = cast<NamedDecl>(Context)->getIdentifier()) {
+ mangleSourceName(Name);
+ Out << 'M';
+ }
+ }
+ }
+
+ Out << "Ul";
+ DeclarationName Name
+ = getASTContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ const FunctionProtoType *Proto
+ = cast<CXXMethodDecl>(*Lambda->lookup(Name).first)->getType()->
+ getAs<FunctionProtoType>();
+ mangleBareFunctionType(Proto, /*MangleReturnType=*/false);
+ Out << "E";
+
+ // The number is omitted for the first closure type with a given
+ // <lambda-sig> in a given context; it is n-2 for the nth closure type
+ // (in lexical order) with that same <lambda-sig> and context.
+ //
+ // The AST keeps track of the number for us.
+ unsigned Number = Lambda->getLambdaManglingNumber();
+ assert(Number > 0 && "Lambda should be mangled as an unnamed class");
+ if (Number > 1)
+ mangleNumber(Number - 2);
+ Out << '_';
+}
+
+void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
+ switch (qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ // nothing
+ return;
+
+ case NestedNameSpecifier::Namespace:
+ mangleName(qualifier->getAsNamespace());
+ return;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ mangleName(qualifier->getAsNamespaceAlias()->getNamespace());
+ return;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ manglePrefix(QualType(qualifier->getAsType(), 0));
+ return;
+
+ case NestedNameSpecifier::Identifier:
+ // Member expressions can have these without prefixes, but that
+ // should end up in mangleUnresolvedPrefix instead.
+ assert(qualifier->getPrefix());
+ manglePrefix(qualifier->getPrefix());
+
+ mangleSourceName(qualifier->getAsIdentifier());
+ return;
+ }
+
+ llvm_unreachable("unexpected nested name specifier");
+}
+
+void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
+ // <prefix> ::= <prefix> <unqualified-name>
+ // ::= <template-prefix> <template-args>
+ // ::= <template-param>
+ // ::= # empty
+ // ::= <substitution>
+
+ DC = IgnoreLinkageSpecDecls(DC);
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
+ manglePrefix(getEffectiveParentContext(DC), NoFunction);
+ SmallString<64> Name;
+ llvm::raw_svector_ostream NameStream(Name);
+ Context.mangleBlock(Block, NameStream);
+ NameStream.flush();
+ Out << Name.size() << Name;
+ return;
+ }
+
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (mangleSubstitution(ND))
+ return;
+
+ // Check if we have a template.
+ const TemplateArgumentList *TemplateArgs = 0;
+ if (const TemplateDecl *TD = isTemplate(ND, TemplateArgs)) {
+ mangleTemplatePrefix(TD);
+ TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
+ mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
+ }
+ else if(NoFunction && (isa<FunctionDecl>(ND) || isa<ObjCMethodDecl>(ND)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
+ mangleObjCMethodName(Method);
+ else {
+ manglePrefix(getEffectiveDeclContext(ND), NoFunction);
+ mangleUnqualifiedName(ND);
+ }
+
+ addSubstitution(ND);
+}
+
+void CXXNameMangler::mangleTemplatePrefix(TemplateName Template) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplatePrefix(TD);
+
+ if (QualifiedTemplateName *Qualified = Template.getAsQualifiedTemplateName())
+ manglePrefix(Qualified->getQualifier());
+
+ if (OverloadedTemplateStorage *Overloaded
+ = Template.getAsOverloadedTemplate()) {
+ mangleUnqualifiedName(0, (*Overloaded->begin())->getDeclName(),
+ UnknownArity);
+ return;
+ }
+
+ DependentTemplateName *Dependent = Template.getAsDependentTemplateName();
+ assert(Dependent && "Unknown template name kind?");
+ manglePrefix(Dependent->getQualifier());
+ mangleUnscopedTemplateName(Template);
+}
+
+void CXXNameMangler::mangleTemplatePrefix(const TemplateDecl *ND) {
+ // <template-prefix> ::= <prefix> <template unqualified-name>
+ // ::= <template-param>
+ // ::= <substitution>
+ // <template-template-param> ::= <template-param>
+ // <substitution>
+
+ if (mangleSubstitution(ND))
+ return;
+
+ // <template-template-param> ::= <template-param>
+ if (const TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(ND)) {
+ mangleTemplateParameter(TTP->getIndex());
+ return;
+ }
+
+ manglePrefix(getEffectiveDeclContext(ND));
+ mangleUnqualifiedName(ND->getTemplatedDecl());
+ addSubstitution(ND);
+}
+
+/// Mangles a template name under the production <type>. Required for
+/// template template arguments.
+/// <type> ::= <class-enum-type>
+/// ::= <template-param>
+/// ::= <substitution>
+void CXXNameMangler::mangleType(TemplateName TN) {
+ if (mangleSubstitution(TN))
+ return;
+
+ TemplateDecl *TD = 0;
+
+ switch (TN.getKind()) {
+ case TemplateName::QualifiedTemplate:
+ TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
+ goto HaveDecl;
+
+ case TemplateName::Template:
+ TD = TN.getAsTemplateDecl();
+ goto HaveDecl;
+
+ HaveDecl:
+ if (isa<TemplateTemplateParmDecl>(TD))
+ mangleTemplateParameter(cast<TemplateTemplateParmDecl>(TD)->getIndex());
+ else
+ mangleName(TD);
+ break;
+
+ case TemplateName::OverloadedTemplate:
+ llvm_unreachable("can't mangle an overloaded template name as a <type>");
+
+ case TemplateName::DependentTemplate: {
+ const DependentTemplateName *Dependent = TN.getAsDependentTemplateName();
+ assert(Dependent->isIdentifier());
+
+ // <class-enum-type> ::= <name>
+ // <name> ::= <nested-name>
+ mangleUnresolvedPrefix(Dependent->getQualifier(), 0);
+ mangleSourceName(Dependent->getIdentifier());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ // Substituted template parameters are mangled as the substituted
+ // template. This will check for the substitution twice, which is
+ // fine, but we have to return early so that we don't try to *add*
+ // the substitution twice.
+ SubstTemplateTemplateParmStorage *subst
+ = TN.getAsSubstTemplateTemplateParm();
+ mangleType(subst->getReplacement());
+ return;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ // FIXME: not clear how to mangle this!
+ // template <template <class> class T...> class A {
+ // template <template <class> class U...> void foo(B<T,U> x...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+ }
+ }
+
+ addSubstitution(TN);
+}
+
+void
+CXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO, unsigned Arity) {
+ switch (OO) {
+ // <operator-name> ::= nw # new
+ case OO_New: Out << "nw"; break;
+ // ::= na # new[]
+ case OO_Array_New: Out << "na"; break;
+ // ::= dl # delete
+ case OO_Delete: Out << "dl"; break;
+ // ::= da # delete[]
+ case OO_Array_Delete: Out << "da"; break;
+ // ::= ps # + (unary)
+ // ::= pl # + (binary or unknown)
+ case OO_Plus:
+ Out << (Arity == 1? "ps" : "pl"); break;
+ // ::= ng # - (unary)
+ // ::= mi # - (binary or unknown)
+ case OO_Minus:
+ Out << (Arity == 1? "ng" : "mi"); break;
+ // ::= ad # & (unary)
+ // ::= an # & (binary or unknown)
+ case OO_Amp:
+ Out << (Arity == 1? "ad" : "an"); break;
+ // ::= de # * (unary)
+ // ::= ml # * (binary or unknown)
+ case OO_Star:
+ // Use binary when unknown.
+ Out << (Arity == 1? "de" : "ml"); break;
+ // ::= co # ~
+ case OO_Tilde: Out << "co"; break;
+ // ::= dv # /
+ case OO_Slash: Out << "dv"; break;
+ // ::= rm # %
+ case OO_Percent: Out << "rm"; break;
+ // ::= or # |
+ case OO_Pipe: Out << "or"; break;
+ // ::= eo # ^
+ case OO_Caret: Out << "eo"; break;
+ // ::= aS # =
+ case OO_Equal: Out << "aS"; break;
+ // ::= pL # +=
+ case OO_PlusEqual: Out << "pL"; break;
+ // ::= mI # -=
+ case OO_MinusEqual: Out << "mI"; break;
+ // ::= mL # *=
+ case OO_StarEqual: Out << "mL"; break;
+ // ::= dV # /=
+ case OO_SlashEqual: Out << "dV"; break;
+ // ::= rM # %=
+ case OO_PercentEqual: Out << "rM"; break;
+ // ::= aN # &=
+ case OO_AmpEqual: Out << "aN"; break;
+ // ::= oR # |=
+ case OO_PipeEqual: Out << "oR"; break;
+ // ::= eO # ^=
+ case OO_CaretEqual: Out << "eO"; break;
+ // ::= ls # <<
+ case OO_LessLess: Out << "ls"; break;
+ // ::= rs # >>
+ case OO_GreaterGreater: Out << "rs"; break;
+ // ::= lS # <<=
+ case OO_LessLessEqual: Out << "lS"; break;
+ // ::= rS # >>=
+ case OO_GreaterGreaterEqual: Out << "rS"; break;
+ // ::= eq # ==
+ case OO_EqualEqual: Out << "eq"; break;
+ // ::= ne # !=
+ case OO_ExclaimEqual: Out << "ne"; break;
+ // ::= lt # <
+ case OO_Less: Out << "lt"; break;
+ // ::= gt # >
+ case OO_Greater: Out << "gt"; break;
+ // ::= le # <=
+ case OO_LessEqual: Out << "le"; break;
+ // ::= ge # >=
+ case OO_GreaterEqual: Out << "ge"; break;
+ // ::= nt # !
+ case OO_Exclaim: Out << "nt"; break;
+ // ::= aa # &&
+ case OO_AmpAmp: Out << "aa"; break;
+ // ::= oo # ||
+ case OO_PipePipe: Out << "oo"; break;
+ // ::= pp # ++
+ case OO_PlusPlus: Out << "pp"; break;
+ // ::= mm # --
+ case OO_MinusMinus: Out << "mm"; break;
+ // ::= cm # ,
+ case OO_Comma: Out << "cm"; break;
+ // ::= pm # ->*
+ case OO_ArrowStar: Out << "pm"; break;
+ // ::= pt # ->
+ case OO_Arrow: Out << "pt"; break;
+ // ::= cl # ()
+ case OO_Call: Out << "cl"; break;
+ // ::= ix # []
+ case OO_Subscript: Out << "ix"; break;
+
+ // ::= qu # ?
+ // The conditional operator can't be overloaded, but we still handle it when
+ // mangling expressions.
+ case OO_Conditional: Out << "qu"; break;
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Not an overloaded operator");
+ }
+}
+
+void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
+ // <CV-qualifiers> ::= [r] [V] [K] # restrict (C99), volatile, const
+ if (Quals.hasRestrict())
+ Out << 'r';
+ if (Quals.hasVolatile())
+ Out << 'V';
+ if (Quals.hasConst())
+ Out << 'K';
+
+ if (Quals.hasAddressSpace()) {
+ // Extension:
+ //
+ // <type> ::= U <address-space-number>
+ //
+ // where <address-space-number> is a source name consisting of 'AS'
+ // followed by the address space <number>.
+ SmallString<64> ASString;
+ ASString = "AS" + llvm::utostr_32(Quals.getAddressSpace());
+ Out << 'U' << ASString.size() << ASString;
+ }
+
+ StringRef LifetimeName;
+ switch (Quals.getObjCLifetime()) {
+ // Objective-C ARC Extension:
+ //
+ // <type> ::= U "__strong"
+ // <type> ::= U "__weak"
+ // <type> ::= U "__autoreleasing"
+ case Qualifiers::OCL_None:
+ break;
+
+ case Qualifiers::OCL_Weak:
+ LifetimeName = "__weak";
+ break;
+
+ case Qualifiers::OCL_Strong:
+ LifetimeName = "__strong";
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ LifetimeName = "__autoreleasing";
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ // The __unsafe_unretained qualifier is *not* mangled, so that
+ // __unsafe_unretained types in ARC produce the same manglings as the
+ // equivalent (but, naturally, unqualified) types in non-ARC, providing
+ // better ABI compatibility.
+ //
+ // It's safe to do this because unqualified 'id' won't show up
+ // in any type signatures that need to be mangled.
+ break;
+ }
+ if (!LifetimeName.empty())
+ Out << 'U' << LifetimeName.size() << LifetimeName;
+}
+
+void CXXNameMangler::mangleRefQualifier(RefQualifierKind RefQualifier) {
+ // <ref-qualifier> ::= R # lvalue reference
+ // ::= O # rvalue-reference
+ // Proposal to Itanium C++ ABI list on 1/26/11
+ switch (RefQualifier) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ Out << 'R';
+ break;
+
+ case RQ_RValue:
+ Out << 'O';
+ break;
+ }
+}
+
+void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ Context.mangleObjCMethodName(MD, Out);
+}
+
+void CXXNameMangler::mangleType(QualType T) {
+ // If our type is instantiation-dependent but not dependent, we mangle
+ // it as it was written in the source, removing any top-level sugar.
+ // Otherwise, use the canonical type.
+ //
+ // FIXME: This is an approximation of the instantiation-dependent name
+ // mangling rules, since we should really be using the type as written and
+ // augmented via semantic analysis (i.e., with implicit conversions and
+ // default template arguments) for any instantiation-dependent type.
+ // Unfortunately, that requires several changes to our AST:
+ // - Instantiation-dependent TemplateSpecializationTypes will need to be
+ // uniqued, so that we can handle substitutions properly
+ // - Default template arguments will need to be represented in the
+ // TemplateSpecializationType, since they need to be mangled even though
+ // they aren't written.
+ // - Conversions on non-type template arguments need to be expressed, since
+ // they can affect the mangling of sizeof/alignof.
+ if (!T->isInstantiationDependentType() || T->isDependentType())
+ T = T.getCanonicalType();
+ else {
+ // Desugar any types that are purely sugar.
+ do {
+ // Don't desugar through template specialization types that aren't
+ // type aliases. We need to mangle the template arguments as written.
+ if (const TemplateSpecializationType *TST
+ = dyn_cast<TemplateSpecializationType>(T))
+ if (!TST->isTypeAlias())
+ break;
+
+ QualType Desugared
+ = T.getSingleStepDesugaredType(Context.getASTContext());
+ if (Desugared == T)
+ break;
+
+ T = Desugared;
+ } while (true);
+ }
+ SplitQualType split = T.split();
+ Qualifiers quals = split.Quals;
+ const Type *ty = split.Ty;
+
+ bool isSubstitutable = quals || !isa<BuiltinType>(T);
+ if (isSubstitutable && mangleSubstitution(T))
+ return;
+
+ // If we're mangling a qualified array type, push the qualifiers to
+ // the element type.
+ if (quals && isa<ArrayType>(T)) {
+ ty = Context.getASTContext().getAsArrayType(T);
+ quals = Qualifiers();
+
+ // Note that we don't update T: we want to add the
+ // substitution at the original type.
+ }
+
+ if (quals) {
+ mangleQualifiers(quals);
+ // Recurse: even if the qualified type isn't yet substitutable,
+ // the unqualified type might be.
+ mangleType(QualType(ty, 0));
+ } else {
+ switch (ty->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+ return;
+#define TYPE(CLASS, PARENT) \
+ case Type::CLASS: \
+ mangleType(static_cast<const CLASS##Type*>(ty)); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+ // Add the substitution.
+ if (isSubstitutable)
+ addSubstitution(T);
+}
+
+void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
+ if (!mangleStandardSubstitution(ND))
+ mangleName(ND);
+}
+
+void CXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= v # void
+ // ::= w # wchar_t
+ // ::= b # bool
+ // ::= c # char
+ // ::= a # signed char
+ // ::= h # unsigned char
+ // ::= s # short
+ // ::= t # unsigned short
+ // ::= i # int
+ // ::= j # unsigned int
+ // ::= l # long
+ // ::= m # unsigned long
+ // ::= x # long long, __int64
+ // ::= y # unsigned long long, __int64
+ // ::= n # __int128
+ // UNSUPPORTED: ::= o # unsigned __int128
+ // ::= f # float
+ // ::= d # double
+ // ::= e # long double, __float80
+ // UNSUPPORTED: ::= g # __float128
+ // UNSUPPORTED: ::= Dd # IEEE 754r decimal floating point (64 bits)
+ // UNSUPPORTED: ::= De # IEEE 754r decimal floating point (128 bits)
+ // UNSUPPORTED: ::= Df # IEEE 754r decimal floating point (32 bits)
+ // ::= Dh # IEEE 754r half-precision floating point (16 bits)
+ // ::= Di # char32_t
+ // ::= Ds # char16_t
+ // ::= Dn # std::nullptr_t (i.e., decltype(nullptr))
+ // ::= u <source-name> # vendor extended type
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'v'; break;
+ case BuiltinType::Bool: Out << 'b'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'c'; break;
+ case BuiltinType::UChar: Out << 'h'; break;
+ case BuiltinType::UShort: Out << 't'; break;
+ case BuiltinType::UInt: Out << 'j'; break;
+ case BuiltinType::ULong: Out << 'm'; break;
+ case BuiltinType::ULongLong: Out << 'y'; break;
+ case BuiltinType::UInt128: Out << 'o'; break;
+ case BuiltinType::SChar: Out << 'a'; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: Out << 'w'; break;
+ case BuiltinType::Char16: Out << "Ds"; break;
+ case BuiltinType::Char32: Out << "Di"; break;
+ case BuiltinType::Short: Out << 's'; break;
+ case BuiltinType::Int: Out << 'i'; break;
+ case BuiltinType::Long: Out << 'l'; break;
+ case BuiltinType::LongLong: Out << 'x'; break;
+ case BuiltinType::Int128: Out << 'n'; break;
+ case BuiltinType::Half: Out << "Dh"; break;
+ case BuiltinType::Float: Out << 'f'; break;
+ case BuiltinType::Double: Out << 'd'; break;
+ case BuiltinType::LongDouble: Out << 'e'; break;
+ case BuiltinType::NullPtr: Out << "Dn"; break;
+
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("mangling a placeholder type");
+ case BuiltinType::ObjCId: Out << "11objc_object"; break;
+ case BuiltinType::ObjCClass: Out << "10objc_class"; break;
+ case BuiltinType::ObjCSel: Out << "13objc_selector"; break;
+ }
+}
+
+// <type> ::= <function-type>
+// <function-type> ::= F [Y] <bare-function-type> E
+void CXXNameMangler::mangleType(const FunctionProtoType *T) {
+ Out << 'F';
+ // FIXME: We don't have enough information in the AST to produce the 'Y'
+ // encoding for extern "C" function types.
+ mangleBareFunctionType(T, /*MangleReturnType=*/true);
+ Out << 'E';
+}
+void CXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+void CXXNameMangler::mangleBareFunctionType(const FunctionType *T,
+ bool MangleReturnType) {
+ // We should never be mangling something without a prototype.
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // Record that we're in a function type. See mangleFunctionParam
+ // for details on what we're trying to achieve here.
+ FunctionTypeDepthState saved = FunctionTypeDepth.push();
+
+ // <bare-function-type> ::= <signature type>+
+ if (MangleReturnType) {
+ FunctionTypeDepth.enterResultType();
+ mangleType(Proto->getResultType());
+ FunctionTypeDepth.leaveResultType();
+ }
+
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ // <builtin-type> ::= v # void
+ Out << 'v';
+
+ FunctionTypeDepth.pop(saved);
+ return;
+ }
+
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(Context.getASTContext().getSignatureParameterType(*Arg));
+
+ FunctionTypeDepth.pop(saved);
+
+ // <builtin-type> ::= z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'z';
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <class-enum-type>
+// <class-enum-type> ::= <name>
+void CXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void CXXNameMangler::mangleType(const TagType *T) {
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= A <positive dimension number> _ <element type>
+// ::= A [<dimension expression>] _ <element type>
+void CXXNameMangler::mangleType(const ConstantArrayType *T) {
+ Out << 'A' << T->getSize() << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const VariableArrayType *T) {
+ Out << 'A';
+ // decayed vla types (size 0) will just be skipped.
+ if (T->getSizeExpr())
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ Out << 'A';
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ Out << "A_";
+ mangleType(T->getElementType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= M <class type> <member type>
+void CXXNameMangler::mangleType(const MemberPointerType *T) {
+ Out << 'M';
+ mangleType(QualType(T->getClass(), 0));
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ mangleQualifiers(Qualifiers::fromCVRMask(FPT->getTypeQuals()));
+ mangleRefQualifier(FPT->getRefQualifier());
+ mangleType(FPT);
+
+ // Itanium C++ ABI 5.1.8:
+ //
+ // The type of a non-static member function is considered to be different,
+ // for the purposes of substitution, from the type of a namespace-scope or
+ // static member function whose type appears similar. The types of two
+ // non-static member functions are considered to be different, for the
+ // purposes of substitution, if the functions are members of different
+ // classes. In other words, for the purposes of substitution, the class of
+ // which the function is a member is considered part of the type of
+ // function.
+
+ // We increment the SeqID here to emulate adding an entry to the
+ // substitution table. We can't actually add it because we don't want this
+ // particular function type to be substituted.
+ ++SeqID;
+ } else
+ mangleType(PointeeType);
+}
+
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ mangleTemplateParameter(T->getIndex());
+}
+
+// <type> ::= <template-param>
+void CXXNameMangler::mangleType(const SubstTemplateTypeParmPackType *T) {
+ // FIXME: not clear how to mangle this!
+ // template <class T...> class A {
+ // template <class U...> void foo(T(*)(U) x...);
+ // };
+ Out << "_SUBSTPACK_";
+}
+
+// <type> ::= P <type> # pointer-to
+void CXXNameMangler::mangleType(const PointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+void CXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ Out << 'P';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= R <type> # reference-to
+void CXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'R';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= O <type> # rvalue reference-to (C++0x)
+void CXXNameMangler::mangleType(const RValueReferenceType *T) {
+ Out << 'O';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= C <type> # complex pair (C 2000)
+void CXXNameMangler::mangleType(const ComplexType *T) {
+ Out << 'C';
+ mangleType(T->getElementType());
+}
+
+// ARM's ABI for Neon vector types specifies that they should be mangled as
+// if they are structs (to match ARM's initial implementation). The
+// vector type must be one of the special types predefined by ARM.
+void CXXNameMangler::mangleNeonVectorType(const VectorType *T) {
+ QualType EltType = T->getElementType();
+ assert(EltType->isBuiltinType() && "Neon vector element not a BuiltinType");
+ const char *EltName = 0;
+ if (T->getVectorKind() == VectorType::NeonPolyVector) {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar: EltName = "poly8_t"; break;
+ case BuiltinType::Short: EltName = "poly16_t"; break;
+ default: llvm_unreachable("unexpected Neon polynomial vector element type");
+ }
+ } else {
+ switch (cast<BuiltinType>(EltType)->getKind()) {
+ case BuiltinType::SChar: EltName = "int8_t"; break;
+ case BuiltinType::UChar: EltName = "uint8_t"; break;
+ case BuiltinType::Short: EltName = "int16_t"; break;
+ case BuiltinType::UShort: EltName = "uint16_t"; break;
+ case BuiltinType::Int: EltName = "int32_t"; break;
+ case BuiltinType::UInt: EltName = "uint32_t"; break;
+ case BuiltinType::LongLong: EltName = "int64_t"; break;
+ case BuiltinType::ULongLong: EltName = "uint64_t"; break;
+ case BuiltinType::Float: EltName = "float32_t"; break;
+ default: llvm_unreachable("unexpected Neon vector element type");
+ }
+ }
+ const char *BaseName = 0;
+ unsigned BitSize = (T->getNumElements() *
+ getASTContext().getTypeSize(EltType));
+ if (BitSize == 64)
+ BaseName = "__simd64_";
+ else {
+ assert(BitSize == 128 && "Neon vector type not 64 or 128 bits");
+ BaseName = "__simd128_";
+ }
+ Out << strlen(BaseName) + strlen(EltName);
+ Out << BaseName << EltName;
+}
+
+// GNU extension: vector types
+// <type> ::= <vector-type>
+// <vector-type> ::= Dv <positive dimension number> _
+// <extended element type>
+// ::= Dv [<dimension expression>] _ <element type>
+// <extended element type> ::= <element type>
+// ::= p # AltiVec vector pixel
+void CXXNameMangler::mangleType(const VectorType *T) {
+ if ((T->getVectorKind() == VectorType::NeonVector ||
+ T->getVectorKind() == VectorType::NeonPolyVector)) {
+ mangleNeonVectorType(T);
+ return;
+ }
+ Out << "Dv" << T->getNumElements() << '_';
+ if (T->getVectorKind() == VectorType::AltiVecPixel)
+ Out << 'p';
+ else if (T->getVectorKind() == VectorType::AltiVecBool)
+ Out << 'b';
+ else
+ mangleType(T->getElementType());
+}
+void CXXNameMangler::mangleType(const ExtVectorType *T) {
+ mangleType(static_cast<const VectorType*>(T));
+}
+void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ Out << "Dv";
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
+ mangleType(T->getElementType());
+}
+
+void CXXNameMangler::mangleType(const PackExpansionType *T) {
+ // <type> ::= Dp <type> # pack expansion (C++0x)
+ Out << "Dp";
+ mangleType(T->getPattern());
+}
+
+void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ mangleSourceName(T->getDecl()->getIdentifier());
+}
+
+void CXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void CXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "U13block_pointer";
+ mangleType(T->getPointeeType());
+}
+
+void CXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ // Mangle injected class name types as if the user had written the
+ // specialization out fully. It may not actually be possible to see
+ // this mangling, though.
+ mangleType(T->getInjectedSpecializationType());
+}
+
+void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ if (TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl()) {
+ mangleName(TD, T->getArgs(), T->getNumArgs());
+ } else {
+ if (mangleSubstitution(QualType(T, 0)))
+ return;
+
+ mangleTemplatePrefix(T->getTemplateName());
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(T->getTemplateName(), T->getArgs(), T->getNumArgs());
+ addSubstitution(QualType(T, 0));
+ }
+}
+
+void CXXNameMangler::mangleType(const DependentNameType *T) {
+ // Typename types are always nested
+ Out << 'N';
+ manglePrefix(T->getQualifier());
+ mangleSourceName(T->getIdentifier());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const DependentTemplateSpecializationType *T) {
+ // Dependently-scoped template types are nested if they have a prefix.
+ Out << 'N';
+
+ // TODO: avoid making this TemplateName.
+ TemplateName Prefix =
+ getASTContext().getDependentTemplateName(T->getQualifier(),
+ T->getIdentifier());
+ mangleTemplatePrefix(Prefix);
+
+ // FIXME: GCC does not appear to mangle the template arguments when
+ // the template in question is a dependent template name. Should we
+ // emulate that badness?
+ mangleTemplateArgs(Prefix, T->getArgs(), T->getNumArgs());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const TypeOfType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const TypeOfExprType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const DecltypeType *T) {
+ Expr *E = T->getUnderlyingExpr();
+
+ // type ::= Dt <expression> E # decltype of an id-expression
+ // # or class member access
+ // ::= DT <expression> E # decltype of an expression
+
+ // This purports to be an exhaustive list of id-expressions and
+ // class member accesses. Note that we do not ignore parentheses;
+ // parentheses change the semantics of decltype for these
+ // expressions (and cause the mangler to use the other form).
+ if (isa<DeclRefExpr>(E) ||
+ isa<MemberExpr>(E) ||
+ isa<UnresolvedLookupExpr>(E) ||
+ isa<DependentScopeDeclRefExpr>(E) ||
+ isa<CXXDependentScopeMemberExpr>(E) ||
+ isa<UnresolvedMemberExpr>(E))
+ Out << "Dt";
+ else
+ Out << "DT";
+ mangleExpression(E);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleType(const UnaryTransformType *T) {
+ // If this is dependent, we need to record that. If not, we simply
+ // mangle it as the underlying type since they are equivalent.
+ if (T->isDependentType()) {
+ Out << 'U';
+
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ Out << "3eut";
+ break;
+ }
+ }
+
+ mangleType(T->getUnderlyingType());
+}
+
+void CXXNameMangler::mangleType(const AutoType *T) {
+ QualType D = T->getDeducedType();
+ // <builtin-type> ::= Da # dependent auto
+ if (D.isNull())
+ Out << "Da";
+ else
+ mangleType(D);
+}
+
+void CXXNameMangler::mangleType(const AtomicType *T) {
+ // <type> ::= U <source-name> <type> # vendor extended type qualifier
+ // (Until there's a standardized mangling...)
+ Out << "U7_Atomic";
+ mangleType(T->getValueType());
+}
+
+void CXXNameMangler::mangleIntegerLiteral(QualType T,
+ const llvm::APSInt &Value) {
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ Out << 'L';
+
+ mangleType(T);
+ if (T->isBooleanType()) {
+ // Boolean values are encoded as 0/1.
+ Out << (Value.getBoolValue() ? '1' : '0');
+ } else {
+ mangleNumber(Value);
+ }
+ Out << 'E';
+
+}
+
+/// Mangles a member expression. Implicit accesses are not handled,
+/// but that should be okay, because you shouldn't be able to
+/// make an implicit access in a function template declaration.
+void CXXNameMangler::mangleMemberExpr(const Expr *base,
+ bool isArrow,
+ NestedNameSpecifier *qualifier,
+ NamedDecl *firstQualifierLookup,
+ DeclarationName member,
+ unsigned arity) {
+ // <expression> ::= dt <expression> <unresolved-name>
+ // ::= pt <expression> <unresolved-name>
+ Out << (isArrow ? "pt" : "dt");
+ mangleExpression(base);
+ mangleUnresolvedName(qualifier, firstQualifierLookup, member, arity);
+}
+
+/// Look at the callee of the given call expression and determine if
+/// it's a parenthesized id-expression which would have triggered ADL
+/// otherwise.
+static bool isParenthesizedADLCallee(const CallExpr *call) {
+ const Expr *callee = call->getCallee();
+ const Expr *fn = callee->IgnoreParens();
+
+ // Must be parenthesized. IgnoreParens() skips __extension__ nodes,
+ // too, but for those to appear in the callee, it would have to be
+ // parenthesized.
+ if (callee == fn) return false;
+
+ // Must be an unresolved lookup.
+ const UnresolvedLookupExpr *lookup = dyn_cast<UnresolvedLookupExpr>(fn);
+ if (!lookup) return false;
+
+ assert(!lookup->requiresADL());
+
+ // Must be an unqualified lookup.
+ if (lookup->getQualifier()) return false;
+
+ // Must not have found a class member. Note that if one is a class
+ // member, they're all class members.
+ if (lookup->getNumDecls() > 0 &&
+ (*lookup->decls_begin())->isCXXClassMember())
+ return false;
+
+ // Otherwise, ADL would have been triggered.
+ return true;
+}
+
+void CXXNameMangler::mangleExpression(const Expr *E, unsigned Arity) {
+ // <expression> ::= <unary operator-name> <expression>
+ // ::= <binary operator-name> <expression> <expression>
+ // ::= <trinary operator-name> <expression> <expression> <expression>
+ // ::= cv <type> expression # conversion with one argument
+ // ::= cv <type> _ <expression>* E # conversion with a different number of arguments
+ // ::= st <type> # sizeof (a type)
+ // ::= at <type> # alignof (a type)
+ // ::= <template-param>
+ // ::= <function-param>
+ // ::= sr <type> <unqualified-name> # dependent name
+ // ::= sr <type> <unqualified-name> <template-args> # dependent template-id
+ // ::= ds <expression> <expression> # expr.*expr
+ // ::= sZ <template-param> # size of a parameter pack
+ // ::= sZ <function-param> # size of a function parameter pack
+ // ::= <expr-primary>
+ // <expr-primary> ::= L <type> <value number> E # integer literal
+ // ::= L <type <value float> E # floating literal
+ // ::= L <mangled-name> E # external name
+ QualType ImplicitlyConvertedToType;
+
+recurse:
+ switch (E->getStmtClass()) {
+ case Expr::NoStmtClass:
+#define ABSTRACT_STMT(Type)
+#define EXPR(Type, Base)
+#define STMT(Type, Base) \
+ case Expr::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ // fallthrough
+
+ // These all can only appear in local or variable-initialization
+ // contexts and so should never appear in a mangling.
+ case Expr::AddrLabelExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::LambdaExprClass:
+ llvm_unreachable("unexpected statement kind");
+
+ // FIXME: invent manglings for all these.
+ case Expr::BlockExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::ChooseExprClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::GenericSelectionExprClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass:
+ case Expr::ObjCSubscriptRefExprClass:
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Expr::OffsetOfExprClass:
+ case Expr::PredefinedExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::StmtExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ case Expr::BinaryTypeTraitExprClass:
+ case Expr::TypeTraitExprClass:
+ case Expr::ArrayTypeTraitExprClass:
+ case Expr::ExpressionTraitExprClass:
+ case Expr::VAArgExprClass:
+ case Expr::CXXUuidofExprClass:
+ case Expr::CXXNoexceptExprClass:
+ case Expr::CUDAKernelCallExprClass:
+ case Expr::AsTypeExprClass:
+ case Expr::PseudoObjectExprClass:
+ case Expr::AtomicExprClass:
+ {
+ // As bad as this diagnostic is, it's better than crashing.
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet mangle expression type %0");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ // Even gcc-4.5 doesn't mangle this.
+ case Expr::BinaryConditionalOperatorClass: {
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID =
+ Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "?: operator with omitted middle operand cannot be mangled");
+ Diags.Report(E->getExprLoc(), DiagID)
+ << E->getStmtClassName() << E->getSourceRange();
+ break;
+ }
+
+ // These are used for internal purposes and cannot be meaningfully mangled.
+ case Expr::OpaqueValueExprClass:
+ llvm_unreachable("cannot mangle opaque value; mangling wrong thing?");
+
+ case Expr::InitListExprClass: {
+ // Proposal by Jason Merrill, 2012-01-03
+ Out << "il";
+ const InitListExpr *InitList = cast<InitListExpr>(E);
+ for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i)
+ mangleExpression(InitList->getInit(i));
+ Out << "E";
+ break;
+ }
+
+ case Expr::CXXDefaultArgExprClass:
+ mangleExpression(cast<CXXDefaultArgExpr>(E)->getExpr(), Arity);
+ break;
+
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ mangleExpression(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement(),
+ Arity);
+ break;
+
+ case Expr::UserDefinedLiteralClass:
+ // We follow g++'s approach of mangling a UDL as a call to the literal
+ // operator.
+ case Expr::CXXMemberCallExprClass: // fallthrough
+ case Expr::CallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+
+ // <expression> ::= cp <simple-id> <expression>* E
+ // We use this mangling only when the call would use ADL except
+ // for being parenthesized. Per discussion with David
+ // Vandervoorde, 2011.04.25.
+ if (isParenthesizedADLCallee(CE)) {
+ Out << "cp";
+ // The callee here is a parenthesized UnresolvedLookupExpr with
+ // no qualifier and should always get mangled as a <simple-id>
+ // anyway.
+
+ // <expression> ::= cl <expression>* E
+ } else {
+ Out << "cl";
+ }
+
+ mangleExpression(CE->getCallee(), CE->getNumArgs());
+ for (unsigned I = 0, N = CE->getNumArgs(); I != N; ++I)
+ mangleExpression(CE->getArg(I));
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CXXNewExprClass: {
+ const CXXNewExpr *New = cast<CXXNewExpr>(E);
+ if (New->isGlobalNew()) Out << "gs";
+ Out << (New->isArray() ? "na" : "nw");
+ for (CXXNewExpr::const_arg_iterator I = New->placement_arg_begin(),
+ E = New->placement_arg_end(); I != E; ++I)
+ mangleExpression(*I);
+ Out << '_';
+ mangleType(New->getAllocatedType());
+ if (New->hasInitializer()) {
+ // Proposal by Jason Merrill, 2012-01-03
+ if (New->getInitializationStyle() == CXXNewExpr::ListInit)
+ Out << "il";
+ else
+ Out << "pi";
+ const Expr *Init = New->getInitializer();
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init)) {
+ // Directly inline the initializers.
+ for (CXXConstructExpr::const_arg_iterator I = CCE->arg_begin(),
+ E = CCE->arg_end();
+ I != E; ++I)
+ mangleExpression(*I);
+ } else if (const ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init)) {
+ for (unsigned i = 0, e = PLE->getNumExprs(); i != e; ++i)
+ mangleExpression(PLE->getExpr(i));
+ } else if (New->getInitializationStyle() == CXXNewExpr::ListInit &&
+ isa<InitListExpr>(Init)) {
+ // Only take InitListExprs apart for list-initialization.
+ const InitListExpr *InitList = cast<InitListExpr>(Init);
+ for (unsigned i = 0, e = InitList->getNumInits(); i != e; ++i)
+ mangleExpression(InitList->getInit(i));
+ } else
+ mangleExpression(Init);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), 0, ME->getMemberDecl()->getDeclName(),
+ Arity);
+ break;
+ }
+
+ case Expr::UnresolvedMemberExprClass: {
+ const UnresolvedMemberExpr *ME = cast<UnresolvedMemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), 0, ME->getMemberName(),
+ Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXDependentScopeMemberExprClass: {
+ const CXXDependentScopeMemberExpr *ME
+ = cast<CXXDependentScopeMemberExpr>(E);
+ mangleMemberExpr(ME->getBase(), ME->isArrow(),
+ ME->getQualifier(), ME->getFirstQualifierFoundInScope(),
+ ME->getMember(), Arity);
+ if (ME->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ME->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::UnresolvedLookupExprClass: {
+ const UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(E);
+ mangleUnresolvedName(ULE->getQualifier(), 0, ULE->getName(), Arity);
+
+ // All the <unresolved-name> productions end in a
+ // base-unresolved-name, where <template-args> are just tacked
+ // onto the end.
+ if (ULE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(ULE->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXUnresolvedConstructExprClass: {
+ const CXXUnresolvedConstructExpr *CE = cast<CXXUnresolvedConstructExpr>(E);
+ unsigned N = CE->arg_size();
+
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass: {
+ const CXXConstructExpr *CE = cast<CXXConstructExpr>(E);
+ unsigned N = CE->getNumArgs();
+
+ // Proposal by Jason Merrill, 2012-01-03
+ if (CE->isListInitialization())
+ Out << "tl";
+ else
+ Out << "cv";
+ mangleType(CE->getType());
+ if (N != 1) Out << '_';
+ for (unsigned I = 0; I != N; ++I) mangleExpression(CE->getArg(I));
+ if (N != 1) Out << 'E';
+ break;
+ }
+
+ case Expr::CXXScalarValueInitExprClass:
+ Out <<"cv";
+ mangleType(E->getType());
+ Out <<"_E";
+ break;
+
+ case Expr::UnaryExprOrTypeTraitExprClass: {
+ const UnaryExprOrTypeTraitExpr *SAE = cast<UnaryExprOrTypeTraitExpr>(E);
+
+ if (!SAE->isInstantiationDependent()) {
+ // Itanium C++ ABI:
+ // If the operand of a sizeof or alignof operator is not
+ // instantiation-dependent it is encoded as an integer literal
+ // reflecting the result of the operator.
+ //
+ // If the result of the operator is implicitly converted to a known
+ // integer type, that type is used for the literal; otherwise, the type
+ // of std::size_t or std::ptrdiff_t is used.
+ QualType T = (ImplicitlyConvertedToType.isNull() ||
+ !ImplicitlyConvertedToType->isIntegerType())? SAE->getType()
+ : ImplicitlyConvertedToType;
+ llvm::APSInt V = SAE->EvaluateKnownConstInt(Context.getASTContext());
+ mangleIntegerLiteral(T, V);
+ break;
+ }
+
+ switch(SAE->getKind()) {
+ case UETT_SizeOf:
+ Out << 's';
+ break;
+ case UETT_AlignOf:
+ Out << 'a';
+ break;
+ case UETT_VecStep:
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet mangle vec_step expression");
+ Diags.Report(DiagID);
+ return;
+ }
+ if (SAE->isArgumentType()) {
+ Out << 't';
+ mangleType(SAE->getArgumentType());
+ } else {
+ Out << 'z';
+ mangleExpression(SAE->getArgumentExpr());
+ }
+ break;
+ }
+
+ case Expr::CXXThrowExprClass: {
+ const CXXThrowExpr *TE = cast<CXXThrowExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TE->getSubExpr()) {
+ Out << "tw";
+ mangleExpression(TE->getSubExpr());
+ } else {
+ Out << "tr";
+ }
+ break;
+ }
+
+ case Expr::CXXTypeidExprClass: {
+ const CXXTypeidExpr *TIE = cast<CXXTypeidExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (TIE->isTypeOperand()) {
+ Out << "ti";
+ mangleType(TIE->getTypeOperand());
+ } else {
+ Out << "te";
+ mangleExpression(TIE->getExprOperand());
+ }
+ break;
+ }
+
+ case Expr::CXXDeleteExprClass: {
+ const CXXDeleteExpr *DE = cast<CXXDeleteExpr>(E);
+
+ // Proposal from David Vandervoorde, 2010.06.30
+ if (DE->isGlobalDelete()) Out << "gs";
+ Out << (DE->isArrayForm() ? "da" : "dl");
+ mangleExpression(DE->getArgument());
+ break;
+ }
+
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(E);
+ mangleOperatorName(UnaryOperator::getOverloadedOperator(UO->getOpcode()),
+ /*Arity=*/1);
+ mangleExpression(UO->getSubExpr());
+ break;
+ }
+
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(E);
+
+ // Array subscript is treated as a syntactically weird form of
+ // binary operator.
+ Out << "ix";
+ mangleExpression(AE->getLHS());
+ mangleExpression(AE->getRHS());
+ break;
+ }
+
+ case Expr::CompoundAssignOperatorClass: // fallthrough
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(E);
+ if (BO->getOpcode() == BO_PtrMemD)
+ Out << "ds";
+ else
+ mangleOperatorName(BinaryOperator::getOverloadedOperator(BO->getOpcode()),
+ /*Arity=*/2);
+ mangleExpression(BO->getLHS());
+ mangleExpression(BO->getRHS());
+ break;
+ }
+
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ mangleOperatorName(OO_Conditional, /*Arity=*/3);
+ mangleExpression(CO->getCond());
+ mangleExpression(CO->getLHS(), Arity);
+ mangleExpression(CO->getRHS(), Arity);
+ break;
+ }
+
+ case Expr::ImplicitCastExprClass: {
+ ImplicitlyConvertedToType = E->getType();
+ E = cast<ImplicitCastExpr>(E)->getSubExpr();
+ goto recurse;
+ }
+
+ case Expr::ObjCBridgedCastExprClass: {
+ // Mangle ownership casts as a vendor extended operator __bridge,
+ // __bridge_transfer, or __bridge_retain.
+ StringRef Kind = cast<ObjCBridgedCastExpr>(E)->getBridgeKindName();
+ Out << "v1U" << Kind.size() << Kind;
+ }
+ // Fall through to mangle the cast itself.
+
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::CXXFunctionalCastExprClass: {
+ const ExplicitCastExpr *ECE = cast<ExplicitCastExpr>(E);
+ Out << "cv";
+ mangleType(ECE->getType());
+ mangleExpression(ECE->getSubExpr());
+ break;
+ }
+
+ case Expr::CXXOperatorCallExprClass: {
+ const CXXOperatorCallExpr *CE = cast<CXXOperatorCallExpr>(E);
+ unsigned NumArgs = CE->getNumArgs();
+ mangleOperatorName(CE->getOperator(), /*Arity=*/NumArgs);
+ // Mangle the arguments.
+ for (unsigned i = 0; i != NumArgs; ++i)
+ mangleExpression(CE->getArg(i));
+ break;
+ }
+
+ case Expr::ParenExprClass:
+ mangleExpression(cast<ParenExpr>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::DeclRefExprClass: {
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ switch (D->getKind()) {
+ default:
+ // <expr-primary> ::= L <mangled-name> E # external name
+ Out << 'L';
+ mangle(D, "_Z");
+ Out << 'E';
+ break;
+
+ case Decl::ParmVar:
+ mangleFunctionParam(cast<ParmVarDecl>(D));
+ break;
+
+ case Decl::EnumConstant: {
+ const EnumConstantDecl *ED = cast<EnumConstantDecl>(D);
+ mangleIntegerLiteral(ED->getType(), ED->getInitVal());
+ break;
+ }
+
+ case Decl::NonTypeTemplateParm: {
+ const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
+ mangleTemplateParameter(PD->getIndex());
+ break;
+ }
+
+ }
+
+ break;
+ }
+
+ case Expr::SubstNonTypeTemplateParmPackExprClass:
+ // FIXME: not clear how to mangle this!
+ // template <unsigned N...> class A {
+ // template <class U...> void foo(U (&x)[N]...);
+ // };
+ Out << "_SUBSTPACK_";
+ break;
+
+ case Expr::DependentScopeDeclRefExprClass: {
+ const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
+ mangleUnresolvedName(DRE->getQualifier(), 0, DRE->getDeclName(), Arity);
+
+ // All the <unresolved-name> productions end in a
+ // base-unresolved-name, where <template-args> are just tacked
+ // onto the end.
+ if (DRE->hasExplicitTemplateArgs())
+ mangleTemplateArgs(DRE->getExplicitTemplateArgs());
+ break;
+ }
+
+ case Expr::CXXBindTemporaryExprClass:
+ mangleExpression(cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+ break;
+
+ case Expr::ExprWithCleanupsClass:
+ mangleExpression(cast<ExprWithCleanups>(E)->getSubExpr(), Arity);
+ break;
+
+ case Expr::FloatingLiteralClass: {
+ const FloatingLiteral *FL = cast<FloatingLiteral>(E);
+ Out << 'L';
+ mangleType(FL->getType());
+ mangleFloat(FL->getValue());
+ Out << 'E';
+ break;
+ }
+
+ case Expr::CharacterLiteralClass:
+ Out << 'L';
+ mangleType(E->getType());
+ Out << cast<CharacterLiteral>(E)->getValue();
+ Out << 'E';
+ break;
+
+ // FIXME. __objc_yes/__objc_no are mangled same as true/false
+ case Expr::ObjCBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<ObjCBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
+ case Expr::CXXBoolLiteralExprClass:
+ Out << "Lb";
+ Out << (cast<CXXBoolLiteralExpr>(E)->getValue() ? '1' : '0');
+ Out << 'E';
+ break;
+
+ case Expr::IntegerLiteralClass: {
+ llvm::APSInt Value(cast<IntegerLiteral>(E)->getValue());
+ if (E->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleIntegerLiteral(E->getType(), Value);
+ break;
+ }
+
+ case Expr::ImaginaryLiteralClass: {
+ const ImaginaryLiteral *IE = cast<ImaginaryLiteral>(E);
+ // Mangle as if a complex literal.
+ // Proposal from David Vandevoorde, 2010.06.30.
+ Out << 'L';
+ mangleType(E->getType());
+ if (const FloatingLiteral *Imag =
+ dyn_cast<FloatingLiteral>(IE->getSubExpr())) {
+ // Mangle a floating-point zero of the appropriate type.
+ mangleFloat(llvm::APFloat(Imag->getValue().getSemantics()));
+ Out << '_';
+ mangleFloat(Imag->getValue());
+ } else {
+ Out << "0_";
+ llvm::APSInt Value(cast<IntegerLiteral>(IE->getSubExpr())->getValue());
+ if (IE->getSubExpr()->getType()->isSignedIntegerType())
+ Value.setIsSigned(true);
+ mangleNumber(Value);
+ }
+ Out << 'E';
+ break;
+ }
+
+ case Expr::StringLiteralClass: {
+ // Revised proposal from David Vandervoorde, 2010.07.15.
+ Out << 'L';
+ assert(isa<ConstantArrayType>(E->getType()));
+ mangleType(E->getType());
+ Out << 'E';
+ break;
+ }
+
+ case Expr::GNUNullExprClass:
+ // FIXME: should this really be mangled the same as nullptr?
+ // fallthrough
+
+ case Expr::CXXNullPtrLiteralExprClass: {
+ // Proposal from David Vandervoorde, 2010.06.30, as
+ // modified by ABI list discussion.
+ Out << "LDnE";
+ break;
+ }
+
+ case Expr::PackExpansionExprClass:
+ Out << "sp";
+ mangleExpression(cast<PackExpansionExpr>(E)->getPattern());
+ break;
+
+ case Expr::SizeOfPackExprClass: {
+ Out << "sZ";
+ const NamedDecl *Pack = cast<SizeOfPackExpr>(E)->getPack();
+ if (const TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Pack))
+ mangleTemplateParameter(TTP->getIndex());
+ else if (const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Pack))
+ mangleTemplateParameter(NTTP->getIndex());
+ else if (const TemplateTemplateParmDecl *TempTP
+ = dyn_cast<TemplateTemplateParmDecl>(Pack))
+ mangleTemplateParameter(TempTP->getIndex());
+ else
+ mangleFunctionParam(cast<ParmVarDecl>(Pack));
+ break;
+ }
+
+ case Expr::MaterializeTemporaryExprClass: {
+ mangleExpression(cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr());
+ break;
+ }
+ }
+}
+
+/// Mangle an expression which refers to a parameter variable.
+///
+/// <expression> ::= <function-param>
+/// <function-param> ::= fp <top-level CV-qualifiers> _ # L == 0, I == 0
+/// <function-param> ::= fp <top-level CV-qualifiers>
+/// <parameter-2 non-negative number> _ # L == 0, I > 0
+/// <function-param> ::= fL <L-1 non-negative number>
+/// p <top-level CV-qualifiers> _ # L > 0, I == 0
+/// <function-param> ::= fL <L-1 non-negative number>
+/// p <top-level CV-qualifiers>
+/// <I-1 non-negative number> _ # L > 0, I > 0
+///
+/// L is the nesting depth of the parameter, defined as 1 if the
+/// parameter comes from the innermost function prototype scope
+/// enclosing the current context, 2 if from the next enclosing
+/// function prototype scope, and so on, with one special case: if
+/// we've processed the full parameter clause for the innermost
+/// function type, then L is one less. This definition conveniently
+/// makes it irrelevant whether a function's result type was written
+/// trailing or leading, but is otherwise overly complicated; the
+/// numbering was first designed without considering references to
+/// parameter in locations other than return types, and then the
+/// mangling had to be generalized without changing the existing
+/// manglings.
+///
+/// I is the zero-based index of the parameter within its parameter
+/// declaration clause. Note that the original ABI document describes
+/// this using 1-based ordinals.
+void CXXNameMangler::mangleFunctionParam(const ParmVarDecl *parm) {
+ unsigned parmDepth = parm->getFunctionScopeDepth();
+ unsigned parmIndex = parm->getFunctionScopeIndex();
+
+ // Compute 'L'.
+ // parmDepth does not include the declaring function prototype.
+ // FunctionTypeDepth does account for that.
+ assert(parmDepth < FunctionTypeDepth.getDepth());
+ unsigned nestingDepth = FunctionTypeDepth.getDepth() - parmDepth;
+ if (FunctionTypeDepth.isInResultType())
+ nestingDepth--;
+
+ if (nestingDepth == 0) {
+ Out << "fp";
+ } else {
+ Out << "fL" << (nestingDepth - 1) << 'p';
+ }
+
+ // Top-level qualifiers. We don't have to worry about arrays here,
+ // because parameters declared as arrays should already have been
+ // tranformed to have pointer type. FIXME: apparently these don't
+ // get mangled if used as an rvalue of a known non-class type?
+ assert(!parm->getType()->isArrayType()
+ && "parameter's type is still an array type?");
+ mangleQualifiers(parm->getType().getQualifiers());
+
+ // Parameter index.
+ if (parmIndex != 0) {
+ Out << (parmIndex - 1);
+ }
+ Out << '_';
+}
+
+void CXXNameMangler::mangleCXXCtorType(CXXCtorType T) {
+ // <ctor-dtor-name> ::= C1 # complete object constructor
+ // ::= C2 # base object constructor
+ // ::= C3 # complete object allocating constructor
+ //
+ switch (T) {
+ case Ctor_Complete:
+ Out << "C1";
+ break;
+ case Ctor_Base:
+ Out << "C2";
+ break;
+ case Ctor_CompleteAllocating:
+ Out << "C3";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleCXXDtorType(CXXDtorType T) {
+ // <ctor-dtor-name> ::= D0 # deleting destructor
+ // ::= D1 # complete object destructor
+ // ::= D2 # base object destructor
+ //
+ switch (T) {
+ case Dtor_Deleting:
+ Out << "D0";
+ break;
+ case Dtor_Complete:
+ Out << "D1";
+ break;
+ case Dtor_Base:
+ Out << "D2";
+ break;
+ }
+}
+
+void CXXNameMangler::mangleTemplateArgs(
+ const ASTTemplateArgumentListInfo &TemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0, e = TemplateArgs.NumTemplateArgs; i != e; ++i)
+ mangleTemplateArg(0, TemplateArgs.getTemplateArgs()[i].getArgument());
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(TemplateName Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleTemplateArgs(*TD->getTemplateParameters(), TemplateArgs,
+ NumTemplateArgs);
+
+ mangleUnresolvedTemplateArgs(TemplateArgs, NumTemplateArgs);
+}
+
+void CXXNameMangler::mangleUnresolvedTemplateArgs(const TemplateArgument *args,
+ unsigned numArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != numArgs; ++i)
+ mangleTemplateArg(0, args[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgumentList &AL) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0, e = AL.size(); i != e; ++i)
+ mangleTemplateArg(PL.getParam(i), AL[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArgs(const TemplateParameterList &PL,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs) {
+ // <template-args> ::= I <template-arg>+ E
+ Out << 'I';
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ mangleTemplateArg(PL.getParam(i), TemplateArgs[i]);
+ Out << 'E';
+}
+
+void CXXNameMangler::mangleTemplateArg(const NamedDecl *P,
+ TemplateArgument A) {
+ // <template-arg> ::= <type> # type or template
+ // ::= X <expression> E # expression
+ // ::= <expr-primary> # simple expressions
+ // ::= J <template-arg>* E # argument pack
+ // ::= sp <expression> # pack expansion of (C++0x)
+ if (!A.isInstantiationDependent() || A.isDependent())
+ A = Context.getASTContext().getCanonicalTemplateArgument(A);
+
+ switch (A.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Cannot mangle NULL template argument");
+
+ case TemplateArgument::Type:
+ mangleType(A.getAsType());
+ break;
+ case TemplateArgument::Template:
+ // This is mangled as <type>.
+ mangleType(A.getAsTemplate());
+ break;
+ case TemplateArgument::TemplateExpansion:
+ // <type> ::= Dp <type> # pack expansion (C++0x)
+ Out << "Dp";
+ mangleType(A.getAsTemplateOrTemplatePattern());
+ break;
+ case TemplateArgument::Expression: {
+ // It's possible to end up with a DeclRefExpr here in certain
+ // dependent cases, in which case we should mangle as a
+ // declaration.
+ const Expr *E = A.getAsExpr()->IgnoreParens();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *D = DRE->getDecl();
+ if (isa<VarDecl>(D) || isa<FunctionDecl>(D)) {
+ Out << "L";
+ mangle(D, "_Z");
+ Out << 'E';
+ break;
+ }
+ }
+
+ Out << 'X';
+ mangleExpression(E);
+ Out << 'E';
+ break;
+ }
+ case TemplateArgument::Integral:
+ mangleIntegerLiteral(A.getIntegralType(), *A.getAsIntegral());
+ break;
+ case TemplateArgument::Declaration: {
+ assert(P && "Missing template parameter for declaration argument");
+ // <expr-primary> ::= L <mangled-name> E # external name
+ // <expr-primary> ::= L <type> 0 E
+ // Clang produces AST's where pointer-to-member-function expressions
+ // and pointer-to-function expressions are represented as a declaration not
+ // an expression. We compensate for it here to produce the correct mangling.
+ const NonTypeTemplateParmDecl *Parameter = cast<NonTypeTemplateParmDecl>(P);
+
+ // Handle NULL pointer arguments.
+ if (!A.getAsDecl()) {
+ Out << "L";
+ mangleType(Parameter->getType());
+ Out << "0E";
+ break;
+ }
+
+
+ NamedDecl *D = cast<NamedDecl>(A.getAsDecl());
+ bool compensateMangling = !Parameter->getType()->isReferenceType();
+ if (compensateMangling) {
+ Out << 'X';
+ mangleOperatorName(OO_Amp, 1);
+ }
+
+ Out << 'L';
+ // References to external entities use the mangled name; if the name would
+ // not normally be manged then mangle it as unqualified.
+ //
+ // FIXME: The ABI specifies that external names here should have _Z, but
+ // gcc leaves this off.
+ if (compensateMangling)
+ mangle(D, "_Z");
+ else
+ mangle(D, "Z");
+ Out << 'E';
+
+ if (compensateMangling)
+ Out << 'E';
+
+ break;
+ }
+
+ case TemplateArgument::Pack: {
+ // Note: proposal by Mike Herrick on 12/20/10
+ Out << 'J';
+ for (TemplateArgument::pack_iterator PA = A.pack_begin(),
+ PAEnd = A.pack_end();
+ PA != PAEnd; ++PA)
+ mangleTemplateArg(P, *PA);
+ Out << 'E';
+ }
+ }
+}
+
+void CXXNameMangler::mangleTemplateParameter(unsigned Index) {
+ // <template-param> ::= T_ # first template parameter
+ // ::= T <parameter-2 non-negative number> _
+ if (Index == 0)
+ Out << "T_";
+ else
+ Out << 'T' << (Index - 1) << '_';
+}
+
+void CXXNameMangler::mangleExistingSubstitution(QualType type) {
+ bool result = mangleSubstitution(type);
+ assert(result && "no existing substitution for type");
+ (void) result;
+}
+
+void CXXNameMangler::mangleExistingSubstitution(TemplateName tname) {
+ bool result = mangleSubstitution(tname);
+ assert(result && "no existing substitution for template name");
+ (void) result;
+}
+
+// <substitution> ::= S <seq-id> _
+// ::= S_
+bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
+ // Try one of the standard substitutions first.
+ if (mangleStandardSubstitution(ND))
+ return true;
+
+ ND = cast<NamedDecl>(ND->getCanonicalDecl());
+ return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
+}
+
+/// \brief Determine whether the given type has any qualifiers that are
+/// relevant for substitutions.
+static bool hasMangledSubstitutionQualifiers(QualType T) {
+ Qualifiers Qs = T.getQualifiers();
+ return Qs.getCVRQualifiers() || Qs.hasAddressSpace();
+}
+
+bool CXXNameMangler::mangleSubstitution(QualType T) {
+ if (!hasMangledSubstitutionQualifiers(T)) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return mangleSubstitution(RT->getDecl());
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+
+ return mangleSubstitution(TypePtr);
+}
+
+bool CXXNameMangler::mangleSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return mangleSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ return mangleSubstitution(
+ reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+bool CXXNameMangler::mangleSubstitution(uintptr_t Ptr) {
+ llvm::DenseMap<uintptr_t, unsigned>::iterator I = Substitutions.find(Ptr);
+ if (I == Substitutions.end())
+ return false;
+
+ unsigned SeqID = I->second;
+ if (SeqID == 0)
+ Out << "S_";
+ else {
+ SeqID--;
+
+ // <seq-id> is encoded in base-36, using digits and upper case letters.
+ char Buffer[10];
+ char *BufferPtr = llvm::array_endof(Buffer);
+
+ if (SeqID == 0) *--BufferPtr = '0';
+
+ while (SeqID) {
+ assert(BufferPtr > Buffer && "Buffer overflow!");
+
+ char c = static_cast<char>(SeqID % 36);
+
+ *--BufferPtr = (c < 10 ? '0' + c : 'A' + c - 10);
+ SeqID /= 36;
+ }
+
+ Out << 'S'
+ << StringRef(BufferPtr, llvm::array_endof(Buffer)-BufferPtr)
+ << '_';
+ }
+
+ return true;
+}
+
+static bool isCharType(QualType T) {
+ if (T.isNull())
+ return false;
+
+ return T->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ T->isSpecificBuiltinType(BuiltinType::Char_U);
+}
+
+/// isCharSpecialization - Returns whether a given type is a template
+/// specialization of a given name with a single argument of type char.
+static bool isCharSpecialization(QualType T, const char *Name) {
+ if (T.isNull())
+ return false;
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (!SD)
+ return false;
+
+ if (!isStdNamespace(getEffectiveDeclContext(SD)))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 1)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ return SD->getIdentifier()->getName() == Name;
+}
+
+template <std::size_t StrLen>
+static bool isStreamCharSpecialization(const ClassTemplateSpecializationDecl*SD,
+ const char (&Str)[StrLen]) {
+ if (!SD->getIdentifier()->isStr(Str))
+ return false;
+
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+ if (TemplateArgs.size() != 2)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ return true;
+}
+
+bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
+ // <substitution> ::= St # ::std::
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (isStd(NS)) {
+ Out << "St";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateDecl *TD = dyn_cast<ClassTemplateDecl>(ND)) {
+ if (!isStdNamespace(getEffectiveDeclContext(TD)))
+ return false;
+
+ // <substitution> ::= Sa # ::std::allocator
+ if (TD->getIdentifier()->isStr("allocator")) {
+ Out << "Sa";
+ return true;
+ }
+
+ // <<substitution> ::= Sb # ::std::basic_string
+ if (TD->getIdentifier()->isStr("basic_string")) {
+ Out << "Sb";
+ return true;
+ }
+ }
+
+ if (const ClassTemplateSpecializationDecl *SD =
+ dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ if (!isStdNamespace(getEffectiveDeclContext(SD)))
+ return false;
+
+ // <substitution> ::= Ss # ::std::basic_string<char,
+ // ::std::char_traits<char>,
+ // ::std::allocator<char> >
+ if (SD->getIdentifier()->isStr("basic_string")) {
+ const TemplateArgumentList &TemplateArgs = SD->getTemplateArgs();
+
+ if (TemplateArgs.size() != 3)
+ return false;
+
+ if (!isCharType(TemplateArgs[0].getAsType()))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[1].getAsType(), "char_traits"))
+ return false;
+
+ if (!isCharSpecialization(TemplateArgs[2].getAsType(), "allocator"))
+ return false;
+
+ Out << "Ss";
+ return true;
+ }
+
+ // <substitution> ::= Si # ::std::basic_istream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_istream")) {
+ Out << "Si";
+ return true;
+ }
+
+ // <substitution> ::= So # ::std::basic_ostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_ostream")) {
+ Out << "So";
+ return true;
+ }
+
+ // <substitution> ::= Sd # ::std::basic_iostream<char,
+ // ::std::char_traits<char> >
+ if (isStreamCharSpecialization(SD, "basic_iostream")) {
+ Out << "Sd";
+ return true;
+ }
+ }
+ return false;
+}
+
+void CXXNameMangler::addSubstitution(QualType T) {
+ if (!hasMangledSubstitutionQualifiers(T)) {
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ addSubstitution(RT->getDecl());
+ return;
+ }
+ }
+
+ uintptr_t TypePtr = reinterpret_cast<uintptr_t>(T.getAsOpaquePtr());
+ addSubstitution(TypePtr);
+}
+
+void CXXNameMangler::addSubstitution(TemplateName Template) {
+ if (TemplateDecl *TD = Template.getAsTemplateDecl())
+ return addSubstitution(TD);
+
+ Template = Context.getASTContext().getCanonicalTemplateName(Template);
+ addSubstitution(reinterpret_cast<uintptr_t>(Template.getAsVoidPointer()));
+}
+
+void CXXNameMangler::addSubstitution(uintptr_t Ptr) {
+ assert(!Substitutions.count(Ptr) && "Substitution already exists!");
+ Substitutions[Ptr] = SeqID++;
+}
+
+//
+
+/// \brief Mangles the name of the declaration D and emits that name to the
+/// given output stream.
+///
+/// If the declaration D requires a mangled name, this routine will emit that
+/// mangled name to \p os and return true. Otherwise, \p os will be unchanged
+/// and this routine will return false. In this case, the caller should just
+/// emit the identifier of the declaration (\c D->getIdentifier()) as its
+/// name.
+void ItaniumMangleContext::mangleName(const NamedDecl *D,
+ raw_ostream &Out) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ CXXNameMangler Mangler(*this, Out, D);
+ return Mangler.mangle(D);
+}
+
+void ItaniumMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Type);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ raw_ostream &Out) {
+ CXXNameMangler Mangler(*this, Out, D, Type);
+ Mangler.mangle(D);
+}
+
+void ItaniumMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &Out) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // # first call-offset is 'this' adjustment
+ // # second call-offset is result adjustment
+
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Use mangleCXXDtor for destructor decls!");
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZT";
+ if (!Thunk.Return.isEmpty())
+ Mangler.getStream() << 'c';
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset);
+
+ // Mangle the return pointer adjustment if there is one.
+ if (!Thunk.Return.isEmpty())
+ Mangler.mangleCallOffset(Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(MD);
+}
+
+void
+ItaniumMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &Out) {
+ // <special-name> ::= T <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ CXXNameMangler Mangler(*this, Out, DD, Type);
+ Mangler.getStream() << "_ZT";
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
+ ThisAdjustment.VCallOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(DD);
+}
+
+/// mangleGuardVariable - Returns the mangled name for a guard variable
+/// for the passed in VarDecl.
+void ItaniumMangleContext::mangleItaniumGuardVariable(const VarDecl *D,
+ raw_ostream &Out) {
+ // <special-name> ::= GV <object name> # Guard variable for one-time
+ // # initialization
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZGV";
+ Mangler.mangleName(D);
+}
+
+void ItaniumMangleContext::mangleReferenceTemporary(const VarDecl *D,
+ raw_ostream &Out) {
+ // We match the GCC mangling here.
+ // <special-name> ::= GR <object name>
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZGR";
+ Mangler.mangleName(D);
+}
+
+void ItaniumMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &Out) {
+ // <special-name> ::= TV <type> # virtual table
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTV";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void ItaniumMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &Out) {
+ // <special-name> ::= TT <type> # VTT structure
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTT";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+}
+
+void ItaniumMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &Out) {
+ // <special-name> ::= TC <type> <offset number> _ <base type>
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTC";
+ Mangler.mangleNameOrStandardSubstitution(RD);
+ Mangler.getStream() << Offset;
+ Mangler.getStream() << '_';
+ Mangler.mangleNameOrStandardSubstitution(Type);
+}
+
+void ItaniumMangleContext::mangleCXXRTTI(QualType Ty,
+ raw_ostream &Out) {
+ // <special-name> ::= TI <type> # typeinfo structure
+ assert(!Ty.hasQualifiers() && "RTTI info cannot have top-level qualifiers");
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTI";
+ Mangler.mangleType(Ty);
+}
+
+void ItaniumMangleContext::mangleCXXRTTIName(QualType Ty,
+ raw_ostream &Out) {
+ // <special-name> ::= TS <type> # typeinfo name (null terminated byte string)
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZTS";
+ Mangler.mangleType(Ty);
+}
+
+MangleContext *clang::createItaniumMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags) {
+ return new ItaniumMangleContext(Context, Diags);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp b/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp
new file mode 100644
index 0000000..f5272a7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/LambdaMangleContext.cpp
@@ -0,0 +1,30 @@
+//===--- LambdaMangleContext.cpp - Context for mangling lambdas -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LambdaMangleContext class, which keeps track of
+// the Itanium C++ ABI mangling numbers for lambda expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/LambdaMangleContext.h"
+#include "clang/AST/DeclCXX.h"
+
+using namespace clang;
+
+unsigned LambdaMangleContext::getManglingNumber(CXXMethodDecl *CallOperator) {
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ ASTContext &Context = CallOperator->getASTContext();
+
+ QualType Key = Context.getFunctionType(Context.VoidTy,
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ FunctionProtoType::ExtProtoInfo());
+ Key = Context.getCanonicalType(Key);
+ return ++ManglingNumbers[Key->castAs<FunctionProtoType>()];
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Mangle.cpp b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
new file mode 100644
index 0000000..73c9f57
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Mangle.cpp
@@ -0,0 +1,142 @@
+//===--- Mangle.cpp - Mangle C++ Names --------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements generic name mangling support for blocks and Objective-C.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#define MANGLE_CHECKER 0
+
+#if MANGLE_CHECKER
+#include <cxxabi.h>
+#endif
+
+using namespace clang;
+
+// FIXME: For blocks we currently mimic GCC's mangling scheme, which leaves
+// much to be desired. Come up with a better mangling scheme.
+
+namespace {
+
+static void mangleFunctionBlock(MangleContext &Context,
+ StringRef Outer,
+ const BlockDecl *BD,
+ raw_ostream &Out) {
+ Out << "__" << Outer << "_block_invoke_" << Context.getBlockId(BD, true);
+}
+
+static void checkMangleDC(const DeclContext *DC, const BlockDecl *BD) {
+#ifndef NDEBUG
+ const DeclContext *ExpectedDC = BD->getDeclContext();
+ while (isa<BlockDecl>(ExpectedDC) || isa<EnumDecl>(ExpectedDC))
+ ExpectedDC = ExpectedDC->getParent();
+ // In-class initializers for non-static data members are lexically defined
+ // within the class, but are mangled as if they were specified as constructor
+ // member initializers.
+ if (isa<CXXRecordDecl>(ExpectedDC) && DC != ExpectedDC)
+ DC = DC->getParent();
+ assert(DC == ExpectedDC && "Given decl context did not match expected!");
+#endif
+}
+
+}
+
+void MangleContext::anchor() { }
+
+void MangleContext::mangleGlobalBlock(const BlockDecl *BD,
+ raw_ostream &Out) {
+ Out << "__block_global_" << getBlockId(BD, false);
+}
+
+void MangleContext::mangleCtorBlock(const CXXConstructorDecl *CD,
+ CXXCtorType CT, const BlockDecl *BD,
+ raw_ostream &ResStream) {
+ checkMangleDC(CD, BD);
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ mangleCXXCtor(CD, CT, Out);
+ Out.flush();
+ mangleFunctionBlock(*this, Buffer, BD, ResStream);
+}
+
+void MangleContext::mangleDtorBlock(const CXXDestructorDecl *DD,
+ CXXDtorType DT, const BlockDecl *BD,
+ raw_ostream &ResStream) {
+ checkMangleDC(DD, BD);
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ mangleCXXDtor(DD, DT, Out);
+ Out.flush();
+ mangleFunctionBlock(*this, Buffer, BD, ResStream);
+}
+
+void MangleContext::mangleBlock(const DeclContext *DC, const BlockDecl *BD,
+ raw_ostream &Out) {
+ assert(!isa<CXXConstructorDecl>(DC) && !isa<CXXDestructorDecl>(DC));
+ checkMangleDC(DC, BD);
+
+ SmallString<64> Buffer;
+ llvm::raw_svector_ostream Stream(Buffer);
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(Method, Stream);
+ } else {
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (IdentifierInfo *II = ND->getIdentifier())
+ Stream << II->getName();
+ else {
+ // FIXME: We were doing a mangleUnqualifiedName() before, but that's
+ // a private member of a class that will soon itself be private to the
+ // Itanium C++ ABI object. What should we do now? Right now, I'm just
+ // calling the mangleName() method on the MangleContext; is there a
+ // better way?
+ mangleName(ND, Stream);
+ }
+ }
+ Stream.flush();
+ mangleFunctionBlock(*this, Buffer, BD, Out);
+}
+
+void MangleContext::mangleObjCMethodName(const ObjCMethodDecl *MD,
+ raw_ostream &Out) {
+ SmallString<64> Name;
+ llvm::raw_svector_ostream OS(Name);
+
+ const ObjCContainerDecl *CD =
+ dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+ OS << '(' << *CID << ')';
+ OS << ' ' << MD->getSelector().getAsString() << ']';
+
+ Out << OS.str().size() << OS.str();
+}
+
+void MangleContext::mangleBlock(const BlockDecl *BD,
+ raw_ostream &Out) {
+ const DeclContext *DC = BD->getDeclContext();
+ while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
+ DC = DC->getParent();
+ if (DC->isFunctionOrMethod())
+ mangleBlock(DC, BD, Out);
+ else
+ mangleGlobalBlock(BD, Out);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..f33d6fe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -0,0 +1,71 @@
+//===------- MicrosoftCXXABI.cpp - AST support for the Microsoft C++ ABI --===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ AST support targeting the Microsoft Visual C++
+// ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CXXABI.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+namespace {
+class MicrosoftCXXABI : public CXXABI {
+ ASTContext &Context;
+public:
+ MicrosoftCXXABI(ASTContext &Ctx) : Context(Ctx) { }
+
+ unsigned getMemberPointerSize(const MemberPointerType *MPT) const;
+
+ CallingConv getDefaultMethodCallConv() const {
+ if (Context.getTargetInfo().getTriple().getArch() == llvm::Triple::x86)
+ return CC_X86ThisCall;
+ else
+ return CC_C;
+ }
+
+ bool isNearlyEmpty(const CXXRecordDecl *RD) const {
+ // FIXME: Audit the corners
+ if (!RD->isDynamicClass())
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // In the Microsoft ABI, classes can have one or two vtable pointers.
+ CharUnits PointerSize =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ return Layout.getNonVirtualSize() == PointerSize ||
+ Layout.getNonVirtualSize() == PointerSize * 2;
+ }
+};
+}
+
+unsigned MicrosoftCXXABI::getMemberPointerSize(const MemberPointerType *MPT) const {
+ QualType Pointee = MPT->getPointeeType();
+ CXXRecordDecl *RD = MPT->getClass()->getAsCXXRecordDecl();
+ if (RD->getNumVBases() > 0) {
+ if (Pointee->isFunctionType())
+ return 3;
+ else
+ return 2;
+ } else if (RD->getNumBases() > 1 && Pointee->isFunctionType())
+ return 2;
+ return 1;
+}
+
+CXXABI *clang::CreateMicrosoftCXXABI(ASTContext &Ctx) {
+ return new MicrosoftCXXABI(Ctx);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
new file mode 100644
index 0000000..ba9856a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/MicrosoftMangle.cpp
@@ -0,0 +1,1191 @@
+//===--- MicrosoftMangle.cpp - Microsoft Visual C++ Name Mangling ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ name mangling targeting the Microsoft Visual C++ ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Mangle.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/ABI.h"
+
+using namespace clang;
+
+namespace {
+
+/// MicrosoftCXXNameMangler - Manage the mangling of a single name for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftCXXNameMangler {
+ MangleContext &Context;
+ raw_ostream &Out;
+
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MicrosoftCXXNameMangler(MangleContext &C, raw_ostream &Out_)
+ : Context(C), Out(Out_) { }
+
+ void mangle(const NamedDecl *D, StringRef Prefix = "?");
+ void mangleName(const NamedDecl *ND);
+ void mangleFunctionEncoding(const FunctionDecl *FD);
+ void mangleVariableEncoding(const VarDecl *VD);
+ void mangleNumber(int64_t Number);
+ void mangleType(QualType T);
+
+private:
+ void mangleUnqualifiedName(const NamedDecl *ND) {
+ mangleUnqualifiedName(ND, ND->getDeclName());
+ }
+ void mangleUnqualifiedName(const NamedDecl *ND, DeclarationName Name);
+ void mangleSourceName(const IdentifierInfo *II);
+ void manglePostfix(const DeclContext *DC, bool NoFunction=false);
+ void mangleOperatorName(OverloadedOperatorKind OO);
+ void mangleQualifiers(Qualifiers Quals, bool IsMember);
+
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+
+ // Declare manglers for every type class.
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) void mangleType(const CLASS##Type *T);
+#include "clang/AST/TypeNodes.def"
+
+ void mangleType(const TagType*);
+ void mangleType(const FunctionType *T, const FunctionDecl *D,
+ bool IsStructor, bool IsInstMethod);
+ void mangleType(const ArrayType *T, bool IsGlobal);
+ void mangleExtraDimensions(QualType T);
+ void mangleFunctionClass(const FunctionDecl *FD);
+ void mangleCallingConvention(const FunctionType *T, bool IsInstMethod = false);
+ void mangleThrowSpecification(const FunctionProtoType *T);
+
+};
+
+/// MicrosoftMangleContext - Overrides the default MangleContext for the
+/// Microsoft Visual C++ ABI.
+class MicrosoftMangleContext : public MangleContext {
+public:
+ MicrosoftMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags) : MangleContext(Context, Diags) { }
+ virtual bool shouldMangleDeclName(const NamedDecl *D);
+ virtual void mangleName(const NamedDecl *D, raw_ostream &Out);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &);
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ raw_ostream &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &);
+ virtual void mangleCXXRTTI(QualType T, raw_ostream &);
+ virtual void mangleCXXRTTIName(QualType T, raw_ostream &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ raw_ostream &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ raw_ostream &);
+ virtual void mangleReferenceTemporary(const clang::VarDecl *,
+ raw_ostream &);
+};
+
+}
+
+static bool isInCLinkageSpecification(const Decl *D) {
+ D = D->getCanonicalDecl();
+ for (const DeclContext *DC = D->getDeclContext();
+ !DC->isTranslationUnit(); DC = DC->getParent()) {
+ if (const LinkageSpecDecl *Linkage = dyn_cast<LinkageSpecDecl>(DC))
+ return Linkage->getLanguage() == LinkageSpecDecl::lang_c;
+ }
+
+ return false;
+}
+
+bool MicrosoftMangleContext::shouldMangleDeclName(const NamedDecl *D) {
+ // In C, functions with no attributes never need to be mangled. Fastpath them.
+ if (!getASTContext().getLangOpts().CPlusPlus && !D->hasAttrs())
+ return false;
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (D->hasAttr<AsmLabelAttr>())
+ return true;
+
+ // Clang's "overloadable" attribute extension to C/C++ implies name mangling
+ // (always) as does passing a C++ member function and a function
+ // whose name is not a simple identifier.
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FD && (FD->hasAttr<OverloadableAttr>() || isa<CXXMethodDecl>(FD) ||
+ !FD->getDeclName().isIdentifier()))
+ return true;
+
+ // Otherwise, no mangling is done outside C++ mode.
+ if (!getASTContext().getLangOpts().CPlusPlus)
+ return false;
+
+ // Variables at global scope with internal linkage are not mangled.
+ if (!FD) {
+ const DeclContext *DC = D->getDeclContext();
+ if (DC->isTranslationUnit() && D->getLinkage() == InternalLinkage)
+ return false;
+ }
+
+ // C functions and "main" are not mangled.
+ if ((FD && FD->isMain()) || isInCLinkageSpecification(D))
+ return false;
+
+ return true;
+}
+
+void MicrosoftCXXNameMangler::mangle(const NamedDecl *D,
+ StringRef Prefix) {
+ // MSVC doesn't mangle C++ names the same way it mangles extern "C" names.
+ // Therefore it's really important that we don't decorate the
+ // name with leading underscores or leading/trailing at signs. So, emit a
+ // asm marker at the start so we get the name right.
+ Out << '\01'; // LLVM IR Marker for __asm("foo")
+
+ // Any decl can be declared with __asm("foo") on it, and this takes precedence
+ // over all other naming in the .o file.
+ if (const AsmLabelAttr *ALA = D->getAttr<AsmLabelAttr>()) {
+ // If we have an asm name, then we use it as the mangling.
+ Out << ALA->getLabel();
+ return;
+ }
+
+ // <mangled-name> ::= ? <name> <type-encoding>
+ Out << Prefix;
+ mangleName(D);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ mangleFunctionEncoding(FD);
+ else if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ mangleVariableEncoding(VD);
+ // TODO: Fields? Can MSVC even mangle them?
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionEncoding(const FunctionDecl *FD) {
+ // <type-encoding> ::= <function-class> <function-type>
+
+ // Don't mangle in the type if this isn't a decl we should typically mangle.
+ if (!Context.shouldMangleDeclName(FD))
+ return;
+
+ // We should never ever see a FunctionNoProtoType at this point.
+ // We don't even know how to mangle their types anyway :).
+ const FunctionProtoType *FT = cast<FunctionProtoType>(FD->getType());
+
+ bool InStructor = false, InInstMethod = false;
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
+ if (MD) {
+ if (MD->isInstance())
+ InInstMethod = true;
+ if (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD))
+ InStructor = true;
+ }
+
+ // First, the function class.
+ mangleFunctionClass(FD);
+
+ mangleType(FT, FD, InStructor, InInstMethod);
+}
+
+void MicrosoftCXXNameMangler::mangleVariableEncoding(const VarDecl *VD) {
+ // <type-encoding> ::= <storage-class> <variable-type>
+ // <storage-class> ::= 0 # private static member
+ // ::= 1 # protected static member
+ // ::= 2 # public static member
+ // ::= 3 # global
+ // ::= 4 # static local
+
+ // The first character in the encoding (after the name) is the storage class.
+ if (VD->isStaticDataMember()) {
+ // If it's a static member, it also encodes the access level.
+ switch (VD->getAccess()) {
+ default:
+ case AS_private: Out << '0'; break;
+ case AS_protected: Out << '1'; break;
+ case AS_public: Out << '2'; break;
+ }
+ }
+ else if (!VD->isStaticLocal())
+ Out << '3';
+ else
+ Out << '4';
+ // Now mangle the type.
+ // <variable-type> ::= <type> <cvr-qualifiers>
+ // ::= <type> A # pointers, references, arrays
+ // Pointers and references are odd. The type of 'int * const foo;' gets
+ // mangled as 'QAHA' instead of 'PAHB', for example.
+ QualType Ty = VD->getType();
+ if (Ty->isPointerType() || Ty->isReferenceType()) {
+ mangleType(Ty);
+ Out << 'A';
+ } else if (Ty->isArrayType()) {
+ // Global arrays are funny, too.
+ mangleType(cast<ArrayType>(Ty.getTypePtr()), true);
+ Out << 'A';
+ } else {
+ mangleType(Ty.getLocalUnqualifiedType());
+ mangleQualifiers(Ty.getLocalQualifiers(), false);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleName(const NamedDecl *ND) {
+ // <name> ::= <unscoped-name> {[<named-scope>]+ | [<nested-name>]}? @
+ const DeclContext *DC = ND->getDeclContext();
+
+ // Always start with the unqualified name.
+ mangleUnqualifiedName(ND);
+
+ // If this is an extern variable declared locally, the relevant DeclContext
+ // is that of the containing namespace, or the translation unit.
+ if (isa<FunctionDecl>(DC) && ND->hasLinkage())
+ while (!DC->isNamespace() && !DC->isTranslationUnit())
+ DC = DC->getParent();
+
+ manglePostfix(DC);
+
+ // Terminate the whole name with an '@'.
+ Out << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleNumber(int64_t Number) {
+ // <number> ::= [?] <decimal digit> # <= 9
+ // ::= [?] <hex digit>+ @ # > 9; A = 0, B = 1, etc...
+ if (Number < 0) {
+ Out << '?';
+ Number = -Number;
+ }
+ if (Number >= 1 && Number <= 10) {
+ Out << Number-1;
+ } else {
+ // We have to build up the encoding in reverse order, so it will come
+ // out right when we write it out.
+ char Encoding[16];
+ char *EndPtr = Encoding+sizeof(Encoding);
+ char *CurPtr = EndPtr;
+ while (Number) {
+ *--CurPtr = 'A' + (Number % 16);
+ Number /= 16;
+ }
+ Out.write(CurPtr, EndPtr-CurPtr);
+ Out << '@';
+ }
+}
+
+void
+MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
+ DeclarationName Name) {
+ // <unqualified-name> ::= <operator-name>
+ // ::= <ctor-dtor-name>
+ // ::= <source-name>
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier: {
+ if (const IdentifierInfo *II = Name.getAsIdentifierInfo()) {
+ mangleSourceName(II);
+ break;
+ }
+
+ // Otherwise, an anonymous entity. We must have a declaration.
+ assert(ND && "mangling empty name without declaration");
+
+ if (const NamespaceDecl *NS = dyn_cast<NamespaceDecl>(ND)) {
+ if (NS->isAnonymousNamespace()) {
+ Out << "?A";
+ break;
+ }
+ }
+
+ // We must have an anonymous struct.
+ const TagDecl *TD = cast<TagDecl>(ND);
+ if (const TypedefNameDecl *D = TD->getTypedefNameForAnonDecl()) {
+ assert(TD->getDeclContext() == D->getDeclContext() &&
+ "Typedef should not be in another decl context!");
+ assert(D->getDeclName().getAsIdentifierInfo() &&
+ "Typedef was not named!");
+ mangleSourceName(D->getDeclName().getAsIdentifierInfo());
+ break;
+ }
+
+ // When VC encounters an anonymous type with no tag and no typedef,
+ // it literally emits '<unnamed-tag>'.
+ Out << "<unnamed-tag>";
+ break;
+ }
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ llvm_unreachable("Can't mangle Objective-C selector names here!");
+
+ case DeclarationName::CXXConstructorName:
+ Out << "?0";
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ Out << "?1";
+ break;
+
+ case DeclarationName::CXXConversionFunctionName:
+ // <operator-name> ::= ?B # (cast)
+ // The target type is encoded as the return type.
+ Out << "?B";
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ mangleOperatorName(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ // FIXME: Was this added in VS2010? Does MS even know how to mangle this?
+ llvm_unreachable("Don't know how to mangle literal operators yet!");
+
+ case DeclarationName::CXXUsingDirective:
+ llvm_unreachable("Can't mangle a using directive name!");
+ }
+}
+
+void MicrosoftCXXNameMangler::manglePostfix(const DeclContext *DC,
+ bool NoFunction) {
+ // <postfix> ::= <unqualified-name> [<postfix>]
+ // ::= <template-postfix> <template-args> [<postfix>]
+ // ::= <template-param>
+ // ::= <substitution> [<postfix>]
+
+ if (!DC) return;
+
+ while (isa<LinkageSpecDecl>(DC))
+ DC = DC->getParent();
+
+ if (DC->isTranslationUnit())
+ return;
+
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ Context.mangleBlock(BD, Out);
+ Out << '@';
+ return manglePostfix(DC->getParent(), NoFunction);
+ }
+
+ if (NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
+ return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ mangleUnqualifiedName(cast<NamedDecl>(DC));
+ manglePostfix(DC->getParent(), NoFunction);
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleOperatorName(OverloadedOperatorKind OO) {
+ switch (OO) {
+ // ?0 # constructor
+ // ?1 # destructor
+ // <operator-name> ::= ?2 # new
+ case OO_New: Out << "?2"; break;
+ // <operator-name> ::= ?3 # delete
+ case OO_Delete: Out << "?3"; break;
+ // <operator-name> ::= ?4 # =
+ case OO_Equal: Out << "?4"; break;
+ // <operator-name> ::= ?5 # >>
+ case OO_GreaterGreater: Out << "?5"; break;
+ // <operator-name> ::= ?6 # <<
+ case OO_LessLess: Out << "?6"; break;
+ // <operator-name> ::= ?7 # !
+ case OO_Exclaim: Out << "?7"; break;
+ // <operator-name> ::= ?8 # ==
+ case OO_EqualEqual: Out << "?8"; break;
+ // <operator-name> ::= ?9 # !=
+ case OO_ExclaimEqual: Out << "?9"; break;
+ // <operator-name> ::= ?A # []
+ case OO_Subscript: Out << "?A"; break;
+ // ?B # conversion
+ // <operator-name> ::= ?C # ->
+ case OO_Arrow: Out << "?C"; break;
+ // <operator-name> ::= ?D # *
+ case OO_Star: Out << "?D"; break;
+ // <operator-name> ::= ?E # ++
+ case OO_PlusPlus: Out << "?E"; break;
+ // <operator-name> ::= ?F # --
+ case OO_MinusMinus: Out << "?F"; break;
+ // <operator-name> ::= ?G # -
+ case OO_Minus: Out << "?G"; break;
+ // <operator-name> ::= ?H # +
+ case OO_Plus: Out << "?H"; break;
+ // <operator-name> ::= ?I # &
+ case OO_Amp: Out << "?I"; break;
+ // <operator-name> ::= ?J # ->*
+ case OO_ArrowStar: Out << "?J"; break;
+ // <operator-name> ::= ?K # /
+ case OO_Slash: Out << "?K"; break;
+ // <operator-name> ::= ?L # %
+ case OO_Percent: Out << "?L"; break;
+ // <operator-name> ::= ?M # <
+ case OO_Less: Out << "?M"; break;
+ // <operator-name> ::= ?N # <=
+ case OO_LessEqual: Out << "?N"; break;
+ // <operator-name> ::= ?O # >
+ case OO_Greater: Out << "?O"; break;
+ // <operator-name> ::= ?P # >=
+ case OO_GreaterEqual: Out << "?P"; break;
+ // <operator-name> ::= ?Q # ,
+ case OO_Comma: Out << "?Q"; break;
+ // <operator-name> ::= ?R # ()
+ case OO_Call: Out << "?R"; break;
+ // <operator-name> ::= ?S # ~
+ case OO_Tilde: Out << "?S"; break;
+ // <operator-name> ::= ?T # ^
+ case OO_Caret: Out << "?T"; break;
+ // <operator-name> ::= ?U # |
+ case OO_Pipe: Out << "?U"; break;
+ // <operator-name> ::= ?V # &&
+ case OO_AmpAmp: Out << "?V"; break;
+ // <operator-name> ::= ?W # ||
+ case OO_PipePipe: Out << "?W"; break;
+ // <operator-name> ::= ?X # *=
+ case OO_StarEqual: Out << "?X"; break;
+ // <operator-name> ::= ?Y # +=
+ case OO_PlusEqual: Out << "?Y"; break;
+ // <operator-name> ::= ?Z # -=
+ case OO_MinusEqual: Out << "?Z"; break;
+ // <operator-name> ::= ?_0 # /=
+ case OO_SlashEqual: Out << "?_0"; break;
+ // <operator-name> ::= ?_1 # %=
+ case OO_PercentEqual: Out << "?_1"; break;
+ // <operator-name> ::= ?_2 # >>=
+ case OO_GreaterGreaterEqual: Out << "?_2"; break;
+ // <operator-name> ::= ?_3 # <<=
+ case OO_LessLessEqual: Out << "?_3"; break;
+ // <operator-name> ::= ?_4 # &=
+ case OO_AmpEqual: Out << "?_4"; break;
+ // <operator-name> ::= ?_5 # |=
+ case OO_PipeEqual: Out << "?_5"; break;
+ // <operator-name> ::= ?_6 # ^=
+ case OO_CaretEqual: Out << "?_6"; break;
+ // ?_7 # vftable
+ // ?_8 # vbtable
+ // ?_9 # vcall
+ // ?_A # typeof
+ // ?_B # local static guard
+ // ?_C # string
+ // ?_D # vbase destructor
+ // ?_E # vector deleting destructor
+ // ?_F # default constructor closure
+ // ?_G # scalar deleting destructor
+ // ?_H # vector constructor iterator
+ // ?_I # vector destructor iterator
+ // ?_J # vector vbase constructor iterator
+ // ?_K # virtual displacement map
+ // ?_L # eh vector constructor iterator
+ // ?_M # eh vector destructor iterator
+ // ?_N # eh vector vbase constructor iterator
+ // ?_O # copy constructor closure
+ // ?_P<name> # udt returning <name>
+ // ?_Q # <unknown>
+ // ?_R0 # RTTI Type Descriptor
+ // ?_R1 # RTTI Base Class Descriptor at (a,b,c,d)
+ // ?_R2 # RTTI Base Class Array
+ // ?_R3 # RTTI Class Hierarchy Descriptor
+ // ?_R4 # RTTI Complete Object Locator
+ // ?_S # local vftable
+ // ?_T # local vftable constructor closure
+ // <operator-name> ::= ?_U # new[]
+ case OO_Array_New: Out << "?_U"; break;
+ // <operator-name> ::= ?_V # delete[]
+ case OO_Array_Delete: Out << "?_V"; break;
+
+ case OO_Conditional:
+ llvm_unreachable("Don't know how to mangle ?:");
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Not an overloaded operator");
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleSourceName(const IdentifierInfo *II) {
+ // <source name> ::= <identifier> @
+ Out << II->getName() << '@';
+}
+
+void MicrosoftCXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ Context.mangleObjCMethodName(MD, Out);
+}
+
+void MicrosoftCXXNameMangler::mangleQualifiers(Qualifiers Quals,
+ bool IsMember) {
+ // <cvr-qualifiers> ::= [E] [F] [I] <base-cvr-qualifiers>
+ // 'E' means __ptr64 (32-bit only); 'F' means __unaligned (32/64-bit only);
+ // 'I' means __restrict (32/64-bit).
+ // Note that the MSVC __restrict keyword isn't the same as the C99 restrict
+ // keyword!
+ // <base-cvr-qualifiers> ::= A # near
+ // ::= B # near const
+ // ::= C # near volatile
+ // ::= D # near const volatile
+ // ::= E # far (16-bit)
+ // ::= F # far const (16-bit)
+ // ::= G # far volatile (16-bit)
+ // ::= H # far const volatile (16-bit)
+ // ::= I # huge (16-bit)
+ // ::= J # huge const (16-bit)
+ // ::= K # huge volatile (16-bit)
+ // ::= L # huge const volatile (16-bit)
+ // ::= M <basis> # based
+ // ::= N <basis> # based const
+ // ::= O <basis> # based volatile
+ // ::= P <basis> # based const volatile
+ // ::= Q # near member
+ // ::= R # near const member
+ // ::= S # near volatile member
+ // ::= T # near const volatile member
+ // ::= U # far member (16-bit)
+ // ::= V # far const member (16-bit)
+ // ::= W # far volatile member (16-bit)
+ // ::= X # far const volatile member (16-bit)
+ // ::= Y # huge member (16-bit)
+ // ::= Z # huge const member (16-bit)
+ // ::= 0 # huge volatile member (16-bit)
+ // ::= 1 # huge const volatile member (16-bit)
+ // ::= 2 <basis> # based member
+ // ::= 3 <basis> # based const member
+ // ::= 4 <basis> # based volatile member
+ // ::= 5 <basis> # based const volatile member
+ // ::= 6 # near function (pointers only)
+ // ::= 7 # far function (pointers only)
+ // ::= 8 # near method (pointers only)
+ // ::= 9 # far method (pointers only)
+ // ::= _A <basis> # based function (pointers only)
+ // ::= _B <basis> # based function (far?) (pointers only)
+ // ::= _C <basis> # based method (pointers only)
+ // ::= _D <basis> # based method (far?) (pointers only)
+ // ::= _E # block (Clang)
+ // <basis> ::= 0 # __based(void)
+ // ::= 1 # __based(segment)?
+ // ::= 2 <name> # __based(name)
+ // ::= 3 # ?
+ // ::= 4 # ?
+ // ::= 5 # not really based
+ if (!IsMember) {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'A';
+ else
+ Out << 'B';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'C';
+ else
+ Out << 'D';
+ }
+ } else {
+ if (!Quals.hasVolatile()) {
+ if (!Quals.hasConst())
+ Out << 'Q';
+ else
+ Out << 'R';
+ } else {
+ if (!Quals.hasConst())
+ Out << 'S';
+ else
+ Out << 'T';
+ }
+ }
+
+ // FIXME: For now, just drop all extension qualifiers on the floor.
+}
+
+void MicrosoftCXXNameMangler::mangleType(QualType T) {
+ // Only operate on the canonical type!
+ T = getASTContext().getCanonicalType(T);
+
+ Qualifiers Quals = T.getLocalQualifiers();
+ if (Quals) {
+ // We have to mangle these now, while we still have enough information.
+ // <pointer-cvr-qualifiers> ::= P # pointer
+ // ::= Q # const pointer
+ // ::= R # volatile pointer
+ // ::= S # const volatile pointer
+ if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ if (!Quals.hasVolatile())
+ Out << 'Q';
+ else {
+ if (!Quals.hasConst())
+ Out << 'R';
+ else
+ Out << 'S';
+ }
+ } else
+ // Just emit qualifiers like normal.
+ // NB: When we mangle a pointer/reference type, and the pointee
+ // type has no qualifiers, the lack of qualifier gets mangled
+ // in there.
+ mangleQualifiers(Quals, false);
+ } else if (T->isAnyPointerType() || T->isMemberPointerType() ||
+ T->isBlockPointerType()) {
+ Out << 'P';
+ }
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define NON_CANONICAL_TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+llvm_unreachable("can't mangle non-canonical type " #CLASS "Type"); \
+return;
+#define TYPE(CLASS, PARENT) \
+case Type::CLASS: \
+mangleType(static_cast<const CLASS##Type*>(T.getTypePtr())); \
+break;
+#include "clang/AST/TypeNodes.def"
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T) {
+ // <type> ::= <builtin-type>
+ // <builtin-type> ::= X # void
+ // ::= C # signed char
+ // ::= D # char
+ // ::= E # unsigned char
+ // ::= F # short
+ // ::= G # unsigned short (or wchar_t if it's not a builtin)
+ // ::= H # int
+ // ::= I # unsigned int
+ // ::= J # long
+ // ::= K # unsigned long
+ // L # <none>
+ // ::= M # float
+ // ::= N # double
+ // ::= O # long double (__float80 is mangled differently)
+ // ::= _J # long long, __int64
+ // ::= _K # unsigned long long, __int64
+ // ::= _L # __int128
+ // ::= _M # unsigned __int128
+ // ::= _N # bool
+ // _O # <array in parameter>
+ // ::= _T # __float80 (Intel)
+ // ::= _W # wchar_t
+ // ::= _Z # __float80 (Digital Mars)
+ switch (T->getKind()) {
+ case BuiltinType::Void: Out << 'X'; break;
+ case BuiltinType::SChar: Out << 'C'; break;
+ case BuiltinType::Char_U: case BuiltinType::Char_S: Out << 'D'; break;
+ case BuiltinType::UChar: Out << 'E'; break;
+ case BuiltinType::Short: Out << 'F'; break;
+ case BuiltinType::UShort: Out << 'G'; break;
+ case BuiltinType::Int: Out << 'H'; break;
+ case BuiltinType::UInt: Out << 'I'; break;
+ case BuiltinType::Long: Out << 'J'; break;
+ case BuiltinType::ULong: Out << 'K'; break;
+ case BuiltinType::Float: Out << 'M'; break;
+ case BuiltinType::Double: Out << 'N'; break;
+ // TODO: Determine size and mangle accordingly
+ case BuiltinType::LongDouble: Out << 'O'; break;
+ case BuiltinType::LongLong: Out << "_J"; break;
+ case BuiltinType::ULongLong: Out << "_K"; break;
+ case BuiltinType::Int128: Out << "_L"; break;
+ case BuiltinType::UInt128: Out << "_M"; break;
+ case BuiltinType::Bool: Out << "_N"; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: Out << "_W"; break;
+
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("placeholder types shouldn't get to name mangling");
+
+ case BuiltinType::ObjCId: Out << "PAUobjc_object@@"; break;
+ case BuiltinType::ObjCClass: Out << "PAUobjc_class@@"; break;
+ case BuiltinType::ObjCSel: Out << "PAUobjc_selector@@"; break;
+
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Half:
+ case BuiltinType::NullPtr:
+ assert(0 && "Don't know how to mangle this type yet");
+ }
+}
+
+// <type> ::= <function-type>
+void MicrosoftCXXNameMangler::mangleType(const FunctionProtoType *T) {
+ // Structors only appear in decls, so at this point we know it's not a
+ // structor type.
+ // I'll probably have mangleType(MemberPointerType) call the mangleType()
+ // method directly.
+ mangleType(T, NULL, false, false);
+}
+void MicrosoftCXXNameMangler::mangleType(const FunctionNoProtoType *T) {
+ llvm_unreachable("Can't mangle K&R function prototypes");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const FunctionType *T,
+ const FunctionDecl *D,
+ bool IsStructor,
+ bool IsInstMethod) {
+ // <function-type> ::= <this-cvr-qualifiers> <calling-convention>
+ // <return-type> <argument-list> <throw-spec>
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+
+ // If this is a C++ instance method, mangle the CVR qualifiers for the
+ // this pointer.
+ if (IsInstMethod)
+ mangleQualifiers(Qualifiers::fromCVRMask(Proto->getTypeQuals()), false);
+
+ mangleCallingConvention(T, IsInstMethod);
+
+ // <return-type> ::= <type>
+ // ::= @ # structors (they have no declared return type)
+ if (IsStructor)
+ Out << '@';
+ else
+ mangleType(Proto->getResultType());
+
+ // <argument-list> ::= X # void
+ // ::= <type>+ @
+ // ::= <type>* Z # varargs
+ if (Proto->getNumArgs() == 0 && !Proto->isVariadic()) {
+ Out << 'X';
+ } else {
+ if (D) {
+ // If we got a decl, use the "types-as-written" to make sure arrays
+ // get mangled right.
+ for (FunctionDecl::param_const_iterator Parm = D->param_begin(),
+ ParmEnd = D->param_end();
+ Parm != ParmEnd; ++Parm)
+ mangleType((*Parm)->getTypeSourceInfo()->getType());
+ } else {
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ mangleType(*Arg);
+ }
+ // <builtin-type> ::= Z # ellipsis
+ if (Proto->isVariadic())
+ Out << 'Z';
+ else
+ Out << '@';
+ }
+
+ mangleThrowSpecification(Proto);
+}
+
+void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
+ // <function-class> ::= A # private: near
+ // ::= B # private: far
+ // ::= C # private: static near
+ // ::= D # private: static far
+ // ::= E # private: virtual near
+ // ::= F # private: virtual far
+ // ::= G # private: thunk near
+ // ::= H # private: thunk far
+ // ::= I # protected: near
+ // ::= J # protected: far
+ // ::= K # protected: static near
+ // ::= L # protected: static far
+ // ::= M # protected: virtual near
+ // ::= N # protected: virtual far
+ // ::= O # protected: thunk near
+ // ::= P # protected: thunk far
+ // ::= Q # public: near
+ // ::= R # public: far
+ // ::= S # public: static near
+ // ::= T # public: static far
+ // ::= U # public: virtual near
+ // ::= V # public: virtual far
+ // ::= W # public: thunk near
+ // ::= X # public: thunk far
+ // ::= Y # global near
+ // ::= Z # global far
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ switch (MD->getAccess()) {
+ default:
+ case AS_private:
+ if (MD->isStatic())
+ Out << 'C';
+ else if (MD->isVirtual())
+ Out << 'E';
+ else
+ Out << 'A';
+ break;
+ case AS_protected:
+ if (MD->isStatic())
+ Out << 'K';
+ else if (MD->isVirtual())
+ Out << 'M';
+ else
+ Out << 'I';
+ break;
+ case AS_public:
+ if (MD->isStatic())
+ Out << 'S';
+ else if (MD->isVirtual())
+ Out << 'U';
+ else
+ Out << 'Q';
+ }
+ } else
+ Out << 'Y';
+}
+void MicrosoftCXXNameMangler::mangleCallingConvention(const FunctionType *T,
+ bool IsInstMethod) {
+ // <calling-convention> ::= A # __cdecl
+ // ::= B # __export __cdecl
+ // ::= C # __pascal
+ // ::= D # __export __pascal
+ // ::= E # __thiscall
+ // ::= F # __export __thiscall
+ // ::= G # __stdcall
+ // ::= H # __export __stdcall
+ // ::= I # __fastcall
+ // ::= J # __export __fastcall
+ // The 'export' calling conventions are from a bygone era
+ // (*cough*Win16*cough*) when functions were declared for export with
+ // that keyword. (It didn't actually export them, it just made them so
+ // that they could be in a DLL and somebody from another module could call
+ // them.)
+ CallingConv CC = T->getCallConv();
+ if (CC == CC_Default)
+ CC = IsInstMethod ? getASTContext().getDefaultMethodCallConv() : CC_C;
+ switch (CC) {
+ default:
+ llvm_unreachable("Unsupported CC for mangling");
+ case CC_Default:
+ case CC_C: Out << 'A'; break;
+ case CC_X86Pascal: Out << 'C'; break;
+ case CC_X86ThisCall: Out << 'E'; break;
+ case CC_X86StdCall: Out << 'G'; break;
+ case CC_X86FastCall: Out << 'I'; break;
+ }
+}
+void MicrosoftCXXNameMangler::mangleThrowSpecification(
+ const FunctionProtoType *FT) {
+ // <throw-spec> ::= Z # throw(...) (default)
+ // ::= @ # throw() or __declspec/__attribute__((nothrow))
+ // ::= <type>+
+ // NOTE: Since the Microsoft compiler ignores throw specifications, they are
+ // all actually mangled as 'Z'. (They're ignored because their associated
+ // functionality isn't implemented, and probably never will be.)
+ Out << 'Z';
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnresolvedUsingType *T) {
+ llvm_unreachable("Don't know how to mangle UnresolvedUsingTypes yet!");
+}
+
+// <type> ::= <union-type> | <struct-type> | <class-type> | <enum-type>
+// <union-type> ::= T <name>
+// <struct-type> ::= U <name>
+// <class-type> ::= V <name>
+// <enum-type> ::= W <size> <name>
+void MicrosoftCXXNameMangler::mangleType(const EnumType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const RecordType *T) {
+ mangleType(static_cast<const TagType*>(T));
+}
+void MicrosoftCXXNameMangler::mangleType(const TagType *T) {
+ switch (T->getDecl()->getTagKind()) {
+ case TTK_Union:
+ Out << 'T';
+ break;
+ case TTK_Struct:
+ Out << 'U';
+ break;
+ case TTK_Class:
+ Out << 'V';
+ break;
+ case TTK_Enum:
+ Out << 'W';
+ Out << getASTContext().getTypeSizeInChars(
+ cast<EnumDecl>(T->getDecl())->getIntegerType()).getQuantity();
+ break;
+ }
+ mangleName(T->getDecl());
+}
+
+// <type> ::= <array-type>
+// <array-type> ::= P <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as global
+// ::= Q <cvr-qualifiers> [Y <dimension-count> <dimension>+]
+// <element-type> # as param
+// It's supposed to be the other way around, but for some strange reason, it
+// isn't. Today this behavior is retained for the sole purpose of backwards
+// compatibility.
+void MicrosoftCXXNameMangler::mangleType(const ArrayType *T, bool IsGlobal) {
+ // This isn't a recursive mangling, so now we have to do it all in this
+ // one call.
+ if (IsGlobal)
+ Out << 'P';
+ else
+ Out << 'Q';
+ mangleExtraDimensions(T->getElementType());
+}
+void MicrosoftCXXNameMangler::mangleType(const ConstantArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const VariableArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleType(const IncompleteArrayType *T) {
+ mangleType(static_cast<const ArrayType *>(T), false);
+}
+void MicrosoftCXXNameMangler::mangleExtraDimensions(QualType ElementTy) {
+ SmallVector<llvm::APInt, 3> Dimensions;
+ for (;;) {
+ if (ElementTy->isConstantArrayType()) {
+ const ConstantArrayType *CAT =
+ static_cast<const ConstantArrayType *>(ElementTy.getTypePtr());
+ Dimensions.push_back(CAT->getSize());
+ ElementTy = CAT->getElementType();
+ } else if (ElementTy->isVariableArrayType()) {
+ llvm_unreachable("Don't know how to mangle VLAs!");
+ } else if (ElementTy->isDependentSizedArrayType()) {
+ // The dependent expression has to be folded into a constant (TODO).
+ llvm_unreachable("Don't know how to mangle dependent-sized arrays!");
+ } else if (ElementTy->isIncompleteArrayType()) continue;
+ else break;
+ }
+ mangleQualifiers(ElementTy.getQualifiers(), false);
+ // If there are any additional dimensions, mangle them now.
+ if (Dimensions.size() > 0) {
+ Out << 'Y';
+ // <dimension-count> ::= <number> # number of extra dimensions
+ mangleNumber(Dimensions.size());
+ for (unsigned Dim = 0; Dim < Dimensions.size(); ++Dim) {
+ mangleNumber(Dimensions[Dim].getLimitedValue());
+ }
+ }
+ mangleType(ElementTy.getLocalUnqualifiedType());
+}
+
+// <type> ::= <pointer-to-member-type>
+// <pointer-to-member-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers>
+// <class name> <type>
+void MicrosoftCXXNameMangler::mangleType(const MemberPointerType *T) {
+ QualType PointeeType = T->getPointeeType();
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(PointeeType)) {
+ Out << '8';
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(FPT, NULL, false, true);
+ } else {
+ mangleQualifiers(PointeeType.getQualifiers(), true);
+ mangleName(cast<RecordType>(T->getClass())->getDecl());
+ mangleType(PointeeType.getLocalUnqualifiedType());
+ }
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateTypeParmType *T) {
+ llvm_unreachable("Don't know how to mangle TemplateTypeParmTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const SubstTemplateTypeParmPackType *T) {
+ llvm_unreachable(
+ "Don't know how to mangle SubstTemplateTypeParmPackTypes yet!");
+}
+
+// <type> ::= <pointer-type>
+// <pointer-type> ::= <pointer-cvr-qualifiers> <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const PointerType *T) {
+ QualType PointeeTy = T->getPointeeType();
+ if (PointeeTy->isArrayType()) {
+ // Pointers to arrays are mangled like arrays.
+ mangleExtraDimensions(T->getPointeeType());
+ } else if (PointeeTy->isFunctionType()) {
+ // Function pointers are special.
+ Out << '6';
+ mangleType(static_cast<const FunctionType *>(PointeeTy.getTypePtr()),
+ NULL, false, false);
+ } else {
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+ }
+}
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectPointerType *T) {
+ // Object pointers never have qualifiers.
+ Out << 'A';
+ mangleType(T->getPointeeType());
+}
+
+// <type> ::= <reference-type>
+// <reference-type> ::= A <cvr-qualifiers> <type>
+void MicrosoftCXXNameMangler::mangleType(const LValueReferenceType *T) {
+ Out << 'A';
+ QualType PointeeTy = T->getPointeeType();
+ if (!PointeeTy.hasQualifiers())
+ // Lack of qualifiers is mangled as 'A'.
+ Out << 'A';
+ mangleType(PointeeTy);
+}
+
+void MicrosoftCXXNameMangler::mangleType(const RValueReferenceType *T) {
+ llvm_unreachable("Don't know how to mangle RValueReferenceTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ComplexType *T) {
+ llvm_unreachable("Don't know how to mangle ComplexTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const VectorType *T) {
+ llvm_unreachable("Don't know how to mangle VectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const ExtVectorType *T) {
+ llvm_unreachable("Don't know how to mangle ExtVectorTypes yet!");
+}
+void MicrosoftCXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
+ llvm_unreachable(
+ "Don't know how to mangle DependentSizedExtVectorTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCInterfaceType *T) {
+ // ObjC interfaces have structs underlying them.
+ Out << 'U';
+ mangleName(T->getDecl());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const BlockPointerType *T) {
+ Out << "_E";
+ mangleType(T->getPointeeType());
+}
+
+void MicrosoftCXXNameMangler::mangleType(const InjectedClassNameType *T) {
+ llvm_unreachable("Don't know how to mangle InjectedClassNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TemplateSpecializationType *T) {
+ llvm_unreachable("Don't know how to mangle TemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DependentNameType *T) {
+ llvm_unreachable("Don't know how to mangle DependentNameTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(
+ const DependentTemplateSpecializationType *T) {
+ llvm_unreachable(
+ "Don't know how to mangle DependentTemplateSpecializationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const PackExpansionType *T) {
+ llvm_unreachable("Don't know how to mangle PackExpansionTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfType *T) {
+ llvm_unreachable("Don't know how to mangle TypeOfTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const TypeOfExprType *T) {
+ llvm_unreachable("Don't know how to mangle TypeOfExprTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const DecltypeType *T) {
+ llvm_unreachable("Don't know how to mangle DecltypeTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const UnaryTransformType *T) {
+ llvm_unreachable("Don't know how to mangle UnaryTransformationTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AutoType *T) {
+ llvm_unreachable("Don't know how to mangle AutoTypes yet!");
+}
+
+void MicrosoftCXXNameMangler::mangleType(const AtomicType *T) {
+ llvm_unreachable("Don't know how to mangle AtomicTypes yet!");
+}
+
+void MicrosoftMangleContext::mangleName(const NamedDecl *D,
+ raw_ostream &Out) {
+ assert((isa<FunctionDecl>(D) || isa<VarDecl>(D)) &&
+ "Invalid mangleName() call, argument is not a variable or function!");
+ assert(!isa<CXXConstructorDecl>(D) && !isa<CXXDestructorDecl>(D) &&
+ "Invalid mangleName() call on 'structor decl!");
+
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ getASTContext().getSourceManager(),
+ "Mangling declaration");
+
+ MicrosoftCXXNameMangler Mangler(*this, Out);
+ return Mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle thunks!");
+}
+void MicrosoftMangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const ThisAdjustment &,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle destructor thunks!");
+}
+void MicrosoftMangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle virtual tables!");
+}
+void MicrosoftMangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
+ raw_ostream &) {
+ llvm_unreachable("The MS C++ ABI does not have virtual table tables!");
+}
+void MicrosoftMangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD,
+ int64_t Offset,
+ const CXXRecordDecl *Type,
+ raw_ostream &) {
+ llvm_unreachable("The MS C++ ABI does not have constructor vtables!");
+}
+void MicrosoftMangleContext::mangleCXXRTTI(QualType T,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle RTTI!");
+}
+void MicrosoftMangleContext::mangleCXXRTTIName(QualType T,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle RTTI names!");
+}
+void MicrosoftMangleContext::mangleCXXCtor(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ raw_ostream & Out) {
+ MicrosoftCXXNameMangler mangler(*this, Out);
+ mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleCXXDtor(const CXXDestructorDecl *D,
+ CXXDtorType Type,
+ raw_ostream & Out) {
+ MicrosoftCXXNameMangler mangler(*this, Out);
+ mangler.mangle(D);
+}
+void MicrosoftMangleContext::mangleReferenceTemporary(const clang::VarDecl *,
+ raw_ostream &) {
+ llvm_unreachable("Can't yet mangle reference temporaries!");
+}
+
+MangleContext *clang::createMicrosoftMangleContext(ASTContext &Context,
+ DiagnosticsEngine &Diags) {
+ return new MicrosoftMangleContext(Context, Diags);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
new file mode 100644
index 0000000..f5ea2c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/NSAPI.cpp
@@ -0,0 +1,312 @@
+//===--- NSAPI.cpp - NSFoundation APIs ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/NSAPI.h"
+#include "clang/AST/ASTContext.h"
+
+using namespace clang;
+
+NSAPI::NSAPI(ASTContext &ctx)
+ : Ctx(ctx), ClassIds() {
+}
+
+IdentifierInfo *NSAPI::getNSClassId(NSClassIdKindKind K) const {
+ static const char *ClassName[NumClassIds] = {
+ "NSObject",
+ "NSString",
+ "NSArray",
+ "NSMutableArray",
+ "NSDictionary",
+ "NSMutableDictionary",
+ "NSNumber"
+ };
+
+ if (!ClassIds[K])
+ return (ClassIds[K] = &Ctx.Idents.get(ClassName[K]));
+
+ return ClassIds[K];
+}
+
+Selector NSAPI::getNSStringSelector(NSStringMethodKind MK) const {
+ if (NSStringSelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSStr_stringWithString:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("stringWithString"));
+ break;
+ case NSStr_initWithString:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithString"));
+ break;
+ }
+ return (NSStringSelectors[MK] = Sel);
+ }
+
+ return NSStringSelectors[MK];
+}
+
+Selector NSAPI::getNSArraySelector(NSArrayMethodKind MK) const {
+ if (NSArraySelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSArr_array:
+ Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("array"));
+ break;
+ case NSArr_arrayWithArray:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithArray"));
+ break;
+ case NSArr_arrayWithObject:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObject"));
+ break;
+ case NSArr_arrayWithObjects:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("arrayWithObjects"));
+ break;
+ case NSArr_arrayWithObjectsCount: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("arrayWithObjects"),
+ &Ctx.Idents.get("count")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSArr_initWithArray:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithArray"));
+ break;
+ case NSArr_initWithObjects:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("initWithObjects"));
+ break;
+ case NSArr_objectAtIndex:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectAtIndex"));
+ break;
+ case NSMutableArr_replaceObjectAtIndex: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("replaceObjectAtIndex"),
+ &Ctx.Idents.get("withObject")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSArraySelectors[MK] = Sel);
+ }
+
+ return NSArraySelectors[MK];
+}
+
+llvm::Optional<NSAPI::NSArrayMethodKind>
+NSAPI::getNSArrayMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSArrayMethods; ++i) {
+ NSArrayMethodKind MK = NSArrayMethodKind(i);
+ if (Sel == getNSArraySelector(MK))
+ return MK;
+ }
+
+ return llvm::Optional<NSArrayMethodKind>();
+}
+
+Selector NSAPI::getNSDictionarySelector(
+ NSDictionaryMethodKind MK) const {
+ if (NSDictionarySelectors[MK].isNull()) {
+ Selector Sel;
+ switch (MK) {
+ case NSDict_dictionary:
+ Sel = Ctx.Selectors.getNullarySelector(&Ctx.Idents.get("dictionary"));
+ break;
+ case NSDict_dictionaryWithDictionary:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("dictionaryWithDictionary"));
+ break;
+ case NSDict_dictionaryWithObjectForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObject"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsForKeys: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"),
+ &Ctx.Idents.get("forKeys")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsForKeysCount: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("dictionaryWithObjects"),
+ &Ctx.Idents.get("forKeys"),
+ &Ctx.Idents.get("count")
+ };
+ Sel = Ctx.Selectors.getSelector(3, KeyIdents);
+ break;
+ }
+ case NSDict_dictionaryWithObjectsAndKeys:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("dictionaryWithObjectsAndKeys"));
+ break;
+ case NSDict_initWithDictionary:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithDictionary"));
+ break;
+ case NSDict_initWithObjectsAndKeys:
+ Sel = Ctx.Selectors.getUnarySelector(
+ &Ctx.Idents.get("initWithObjectsAndKeys"));
+ break;
+ case NSDict_objectForKey:
+ Sel = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get("objectForKey"));
+ break;
+ case NSMutableDict_setObjectForKey: {
+ IdentifierInfo *KeyIdents[] = {
+ &Ctx.Idents.get("setObject"),
+ &Ctx.Idents.get("forKey")
+ };
+ Sel = Ctx.Selectors.getSelector(2, KeyIdents);
+ break;
+ }
+ }
+ return (NSDictionarySelectors[MK] = Sel);
+ }
+
+ return NSDictionarySelectors[MK];
+}
+
+llvm::Optional<NSAPI::NSDictionaryMethodKind>
+NSAPI::getNSDictionaryMethodKind(Selector Sel) {
+ for (unsigned i = 0; i != NumNSDictionaryMethods; ++i) {
+ NSDictionaryMethodKind MK = NSDictionaryMethodKind(i);
+ if (Sel == getNSDictionarySelector(MK))
+ return MK;
+ }
+
+ return llvm::Optional<NSDictionaryMethodKind>();
+}
+
+Selector NSAPI::getNSNumberLiteralSelector(NSNumberLiteralMethodKind MK,
+ bool Instance) const {
+ static const char *ClassSelectorName[NumNSNumberLiteralMethods] = {
+ "numberWithChar",
+ "numberWithUnsignedChar",
+ "numberWithShort",
+ "numberWithUnsignedShort",
+ "numberWithInt",
+ "numberWithUnsignedInt",
+ "numberWithLong",
+ "numberWithUnsignedLong",
+ "numberWithLongLong",
+ "numberWithUnsignedLongLong",
+ "numberWithFloat",
+ "numberWithDouble",
+ "numberWithBool",
+ "numberWithInteger",
+ "numberWithUnsignedInteger"
+ };
+ static const char *InstanceSelectorName[NumNSNumberLiteralMethods] = {
+ "initWithChar",
+ "initWithUnsignedChar",
+ "initWithShort",
+ "initWithUnsignedShort",
+ "initWithInt",
+ "initWithUnsignedInt",
+ "initWithLong",
+ "initWithUnsignedLong",
+ "initWithLongLong",
+ "initWithUnsignedLongLong",
+ "initWithFloat",
+ "initWithDouble",
+ "initWithBool",
+ "initWithInteger",
+ "initWithUnsignedInteger"
+ };
+
+ Selector *Sels;
+ const char **Names;
+ if (Instance) {
+ Sels = NSNumberInstanceSelectors;
+ Names = InstanceSelectorName;
+ } else {
+ Sels = NSNumberClassSelectors;
+ Names = ClassSelectorName;
+ }
+
+ if (Sels[MK].isNull())
+ Sels[MK] = Ctx.Selectors.getUnarySelector(&Ctx.Idents.get(Names[MK]));
+ return Sels[MK];
+}
+
+llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+NSAPI::getNSNumberLiteralMethodKind(Selector Sel) const {
+ for (unsigned i = 0; i != NumNSNumberLiteralMethods; ++i) {
+ NSNumberLiteralMethodKind MK = NSNumberLiteralMethodKind(i);
+ if (isNSNumberLiteralSelector(MK, Sel))
+ return MK;
+ }
+
+ return llvm::Optional<NSNumberLiteralMethodKind>();
+}
+
+llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+NSAPI::getNSNumberFactoryMethodKind(QualType T) {
+ const BuiltinType *BT = T->getAs<BuiltinType>();
+ if (!BT)
+ return llvm::Optional<NSAPI::NSNumberLiteralMethodKind>();
+
+ switch (BT->getKind()) {
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return NSAPI::NSNumberWithChar;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return NSAPI::NSNumberWithUnsignedChar;
+ case BuiltinType::Short:
+ return NSAPI::NSNumberWithShort;
+ case BuiltinType::UShort:
+ return NSAPI::NSNumberWithUnsignedShort;
+ case BuiltinType::Int:
+ return NSAPI::NSNumberWithInt;
+ case BuiltinType::UInt:
+ return NSAPI::NSNumberWithUnsignedInt;
+ case BuiltinType::Long:
+ return NSAPI::NSNumberWithLong;
+ case BuiltinType::ULong:
+ return NSAPI::NSNumberWithUnsignedLong;
+ case BuiltinType::LongLong:
+ return NSAPI::NSNumberWithLongLong;
+ case BuiltinType::ULongLong:
+ return NSAPI::NSNumberWithUnsignedLongLong;
+ case BuiltinType::Float:
+ return NSAPI::NSNumberWithFloat;
+ case BuiltinType::Double:
+ return NSAPI::NSNumberWithDouble;
+ case BuiltinType::Bool:
+ return NSAPI::NSNumberWithBool;
+
+ case BuiltinType::Void:
+ case BuiltinType::WChar_U:
+ case BuiltinType::WChar_S:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::LongDouble:
+ case BuiltinType::UInt128:
+ case BuiltinType::NullPtr:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCSel:
+ case BuiltinType::BoundMember:
+ case BuiltinType::Dependent:
+ case BuiltinType::Overload:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ case BuiltinType::Half:
+ case BuiltinType::PseudoObject:
+ break;
+ }
+
+ return llvm::Optional<NSAPI::NSNumberLiteralMethodKind>();
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
new file mode 100644
index 0000000..dbf267b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/NestedNameSpecifier.cpp
@@ -0,0 +1,633 @@
+//===--- NestedNameSpecifier.cpp - C++ nested name specifiers -----*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the NestedNameSpecifier class, which represents
+// a C++ nested-name-specifier.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+
+using namespace clang;
+
+NestedNameSpecifier *
+NestedNameSpecifier::FindOrInsert(const ASTContext &Context,
+ const NestedNameSpecifier &Mockup) {
+ llvm::FoldingSetNodeID ID;
+ Mockup.Profile(ID);
+
+ void *InsertPos = 0;
+ NestedNameSpecifier *NNS
+ = Context.NestedNameSpecifiers.FindNodeOrInsertPos(ID, InsertPos);
+ if (!NNS) {
+ NNS = new (Context, 4) NestedNameSpecifier(Mockup);
+ Context.NestedNameSpecifiers.InsertNode(NNS, InsertPos);
+ }
+
+ return NNS;
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix, IdentifierInfo *II) {
+ assert(II && "Identifier cannot be NULL");
+ assert((!Prefix || Prefix->isDependent()) && "Prefix must be dependent");
+
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredIdentifier);
+ Mockup.Specifier = II;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix, NamespaceDecl *NS) {
+ assert(NS && "Namespace cannot be NULL");
+ assert((!Prefix ||
+ (Prefix->getAsType() == 0 && Prefix->getAsIdentifier() == 0)) &&
+ "Broken nested name specifier");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredNamespaceOrAlias);
+ Mockup.Specifier = NS;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ NamespaceAliasDecl *Alias) {
+ assert(Alias && "Namespace alias cannot be NULL");
+ assert((!Prefix ||
+ (Prefix->getAsType() == 0 && Prefix->getAsIdentifier() == 0)) &&
+ "Broken nested name specifier");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(StoredNamespaceOrAlias);
+ Mockup.Specifier = Alias;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context,
+ NestedNameSpecifier *Prefix,
+ bool Template, const Type *T) {
+ assert(T && "Type cannot be NULL");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(Prefix);
+ Mockup.Prefix.setInt(Template? StoredTypeSpecWithTemplate : StoredTypeSpec);
+ Mockup.Specifier = const_cast<Type*>(T);
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::Create(const ASTContext &Context, IdentifierInfo *II) {
+ assert(II && "Identifier cannot be NULL");
+ NestedNameSpecifier Mockup;
+ Mockup.Prefix.setPointer(0);
+ Mockup.Prefix.setInt(StoredIdentifier);
+ Mockup.Specifier = II;
+ return FindOrInsert(Context, Mockup);
+}
+
+NestedNameSpecifier *
+NestedNameSpecifier::GlobalSpecifier(const ASTContext &Context) {
+ if (!Context.GlobalNestedNameSpecifier)
+ Context.GlobalNestedNameSpecifier = new (Context, 4) NestedNameSpecifier();
+ return Context.GlobalNestedNameSpecifier;
+}
+
+NestedNameSpecifier::SpecifierKind NestedNameSpecifier::getKind() const {
+ if (Specifier == 0)
+ return Global;
+
+ switch (Prefix.getInt()) {
+ case StoredIdentifier:
+ return Identifier;
+
+ case StoredNamespaceOrAlias:
+ return isa<NamespaceDecl>(static_cast<NamedDecl *>(Specifier))? Namespace
+ : NamespaceAlias;
+
+ case StoredTypeSpec:
+ return TypeSpec;
+
+ case StoredTypeSpecWithTemplate:
+ return TypeSpecWithTemplate;
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Retrieve the namespace stored in this nested name
+/// specifier.
+NamespaceDecl *NestedNameSpecifier::getAsNamespace() const {
+ if (Prefix.getInt() == StoredNamespaceOrAlias)
+ return dyn_cast<NamespaceDecl>(static_cast<NamedDecl *>(Specifier));
+
+ return 0;
+}
+
+/// \brief Retrieve the namespace alias stored in this nested name
+/// specifier.
+NamespaceAliasDecl *NestedNameSpecifier::getAsNamespaceAlias() const {
+ if (Prefix.getInt() == StoredNamespaceOrAlias)
+ return dyn_cast<NamespaceAliasDecl>(static_cast<NamedDecl *>(Specifier));
+
+ return 0;
+}
+
+
+/// \brief Whether this nested name specifier refers to a dependent
+/// type or not.
+bool NestedNameSpecifier::isDependent() const {
+ switch (getKind()) {
+ case Identifier:
+ // Identifier specifiers always represent dependent types
+ return true;
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->isDependentType();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Whether this nested name specifier refers to a dependent
+/// type or not.
+bool NestedNameSpecifier::isInstantiationDependent() const {
+ switch (getKind()) {
+ case Identifier:
+ // Identifier specifiers always represent dependent types
+ return true;
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->isInstantiationDependentType();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+bool NestedNameSpecifier::containsUnexpandedParameterPack() const {
+ switch (getKind()) {
+ case Identifier:
+ return getPrefix() && getPrefix()->containsUnexpandedParameterPack();
+
+ case Namespace:
+ case NamespaceAlias:
+ case Global:
+ return false;
+
+ case TypeSpec:
+ case TypeSpecWithTemplate:
+ return getAsType()->containsUnexpandedParameterPack();
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+/// \brief Print this nested name specifier to the given output
+/// stream.
+void
+NestedNameSpecifier::print(raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ if (getPrefix())
+ getPrefix()->print(OS, Policy);
+
+ switch (getKind()) {
+ case Identifier:
+ OS << getAsIdentifier()->getName();
+ break;
+
+ case Namespace:
+ if (getAsNamespace()->isAnonymousNamespace())
+ return;
+
+ OS << getAsNamespace()->getName();
+ break;
+
+ case NamespaceAlias:
+ OS << getAsNamespaceAlias()->getName();
+ break;
+
+ case Global:
+ break;
+
+ case TypeSpecWithTemplate:
+ OS << "template ";
+ // Fall through to print the type.
+
+ case TypeSpec: {
+ std::string TypeStr;
+ const Type *T = getAsType();
+
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressScope = true;
+
+ // Nested-name-specifiers are intended to contain minimally-qualified
+ // types. An actual ElaboratedType will not occur, since we'll store
+ // just the type that is referred to in the nested-name-specifier (e.g.,
+ // a TypedefType, TagType, etc.). However, when we are dealing with
+ // dependent template-id types (e.g., Outer<T>::template Inner<U>),
+ // the type requires its own nested-name-specifier for uniqueness, so we
+ // suppress that nested-name-specifier during printing.
+ assert(!isa<ElaboratedType>(T) &&
+ "Elaborated type in nested-name-specifier");
+ if (const TemplateSpecializationType *SpecType
+ = dyn_cast<TemplateSpecializationType>(T)) {
+ // Print the template name without its corresponding
+ // nested-name-specifier.
+ SpecType->getTemplateName().print(OS, InnerPolicy, true);
+
+ // Print the template argument list.
+ TypeStr = TemplateSpecializationType::PrintTemplateArgumentList(
+ SpecType->getArgs(),
+ SpecType->getNumArgs(),
+ InnerPolicy);
+ } else {
+ // Print the type normally
+ TypeStr = QualType(T, 0).getAsString(InnerPolicy);
+ }
+ OS << TypeStr;
+ break;
+ }
+ }
+
+ OS << "::";
+}
+
+void NestedNameSpecifier::dump(const LangOptions &LO) {
+ print(llvm::errs(), PrintingPolicy(LO));
+}
+
+unsigned
+NestedNameSpecifierLoc::getLocalDataLength(NestedNameSpecifier *Qualifier) {
+ assert(Qualifier && "Expected a non-NULL qualifier");
+
+ // Location of the trailing '::'.
+ unsigned Length = sizeof(unsigned);
+
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ // Nothing more to add.
+ break;
+
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ // The location of the identifier or namespace name.
+ Length += sizeof(unsigned);
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec:
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ Length += sizeof(void *);
+ break;
+ }
+
+ return Length;
+}
+
+unsigned
+NestedNameSpecifierLoc::getDataLength(NestedNameSpecifier *Qualifier) {
+ unsigned Length = 0;
+ for (; Qualifier; Qualifier = Qualifier->getPrefix())
+ Length += getLocalDataLength(Qualifier);
+ return Length;
+}
+
+namespace {
+ /// \brief Load a (possibly unaligned) source location from a given address
+ /// and offset.
+ SourceLocation LoadSourceLocation(void *Data, unsigned Offset) {
+ unsigned Raw;
+ memcpy(&Raw, static_cast<char *>(Data) + Offset, sizeof(unsigned));
+ return SourceLocation::getFromRawEncoding(Raw);
+ }
+
+ /// \brief Load a (possibly unaligned) pointer from a given address and
+ /// offset.
+ void *LoadPointer(void *Data, unsigned Offset) {
+ void *Result;
+ memcpy(&Result, static_cast<char *>(Data) + Offset, sizeof(void*));
+ return Result;
+ }
+}
+
+SourceRange NestedNameSpecifierLoc::getSourceRange() const {
+ if (!Qualifier)
+ return SourceRange();
+
+ NestedNameSpecifierLoc First = *this;
+ while (NestedNameSpecifierLoc Prefix = First.getPrefix())
+ First = Prefix;
+
+ return SourceRange(First.getLocalSourceRange().getBegin(),
+ getLocalSourceRange().getEnd());
+}
+
+SourceRange NestedNameSpecifierLoc::getLocalSourceRange() const {
+ if (!Qualifier)
+ return SourceRange();
+
+ unsigned Offset = getDataLength(Qualifier->getPrefix());
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ return LoadSourceLocation(Data, Offset);
+
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ return SourceRange(LoadSourceLocation(Data, Offset),
+ LoadSourceLocation(Data, Offset + sizeof(unsigned)));
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec: {
+ // The "void*" that points at the TypeLoc data.
+ // Note: the 'template' keyword is part of the TypeLoc.
+ void *TypeData = LoadPointer(Data, Offset);
+ TypeLoc TL(Qualifier->getAsType(), TypeData);
+ return SourceRange(TL.getBeginLoc(),
+ LoadSourceLocation(Data, Offset + sizeof(void*)));
+ }
+ }
+
+ llvm_unreachable("Invalid NNS Kind!");
+}
+
+TypeLoc NestedNameSpecifierLoc::getTypeLoc() const {
+ assert((Qualifier->getKind() == NestedNameSpecifier::TypeSpec ||
+ Qualifier->getKind() == NestedNameSpecifier::TypeSpecWithTemplate) &&
+ "Nested-name-specifier location is not a type");
+
+ // The "void*" that points at the TypeLoc data.
+ unsigned Offset = getDataLength(Qualifier->getPrefix());
+ void *TypeData = LoadPointer(Data, Offset);
+ return TypeLoc(Qualifier->getAsType(), TypeData);
+}
+
+namespace {
+ void Append(char *Start, char *End, char *&Buffer, unsigned &BufferSize,
+ unsigned &BufferCapacity) {
+ if (BufferSize + (End - Start) > BufferCapacity) {
+ // Reallocate the buffer.
+ unsigned NewCapacity
+ = std::max((unsigned)(BufferCapacity? BufferCapacity * 2
+ : sizeof(void*) * 2),
+ (unsigned)(BufferSize + (End - Start)));
+ char *NewBuffer = static_cast<char *>(malloc(NewCapacity));
+ memcpy(NewBuffer, Buffer, BufferSize);
+
+ if (BufferCapacity)
+ free(Buffer);
+ Buffer = NewBuffer;
+ BufferCapacity = NewCapacity;
+ }
+
+ memcpy(Buffer + BufferSize, Start, End - Start);
+ BufferSize += End-Start;
+ }
+
+ /// \brief Save a source location to the given buffer.
+ void SaveSourceLocation(SourceLocation Loc, char *&Buffer,
+ unsigned &BufferSize, unsigned &BufferCapacity) {
+ unsigned Raw = Loc.getRawEncoding();
+ Append(reinterpret_cast<char *>(&Raw),
+ reinterpret_cast<char *>(&Raw) + sizeof(unsigned),
+ Buffer, BufferSize, BufferCapacity);
+ }
+
+ /// \brief Save a pointer to the given buffer.
+ void SavePointer(void *Ptr, char *&Buffer, unsigned &BufferSize,
+ unsigned &BufferCapacity) {
+ Append(reinterpret_cast<char *>(&Ptr),
+ reinterpret_cast<char *>(&Ptr) + sizeof(void *),
+ Buffer, BufferSize, BufferCapacity);
+ }
+}
+
+NestedNameSpecifierLocBuilder::
+NestedNameSpecifierLocBuilder(const NestedNameSpecifierLocBuilder &Other)
+ : Representation(Other.Representation), Buffer(0),
+ BufferSize(0), BufferCapacity(0)
+{
+ if (!Other.Buffer)
+ return;
+
+ if (Other.BufferCapacity == 0) {
+ // Shallow copy is okay.
+ Buffer = Other.Buffer;
+ BufferSize = Other.BufferSize;
+ return;
+ }
+
+ // Deep copy
+ BufferSize = Other.BufferSize;
+ BufferCapacity = Other.BufferSize;
+ Buffer = static_cast<char *>(malloc(BufferCapacity));
+ memcpy(Buffer, Other.Buffer, BufferSize);
+}
+
+NestedNameSpecifierLocBuilder &
+NestedNameSpecifierLocBuilder::
+operator=(const NestedNameSpecifierLocBuilder &Other) {
+ Representation = Other.Representation;
+
+ if (Buffer && Other.Buffer && BufferCapacity >= Other.BufferSize) {
+ // Re-use our storage.
+ BufferSize = Other.BufferSize;
+ memcpy(Buffer, Other.Buffer, BufferSize);
+ return *this;
+ }
+
+ // Free our storage, if we have any.
+ if (BufferCapacity) {
+ free(Buffer);
+ BufferCapacity = 0;
+ }
+
+ if (!Other.Buffer) {
+ // Empty.
+ Buffer = 0;
+ BufferSize = 0;
+ return *this;
+ }
+
+ if (Other.BufferCapacity == 0) {
+ // Shallow copy is okay.
+ Buffer = Other.Buffer;
+ BufferSize = Other.BufferSize;
+ return *this;
+ }
+
+ // Deep copy.
+ BufferSize = Other.BufferSize;
+ BufferCapacity = BufferSize;
+ Buffer = static_cast<char *>(malloc(BufferSize));
+ memcpy(Buffer, Other.Buffer, BufferSize);
+ return *this;
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ SourceLocation TemplateKWLoc,
+ TypeLoc TL,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ TemplateKWLoc.isValid(),
+ TL.getTypePtr());
+
+ // Push source-location info into the buffer.
+ SavePointer(TL.getOpaqueData(), Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ IdentifierInfo *Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ Identifier);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(IdentifierLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ NamespaceDecl *Namespace,
+ SourceLocation NamespaceLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation,
+ Namespace);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(NamespaceLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::Extend(ASTContext &Context,
+ NamespaceAliasDecl *Alias,
+ SourceLocation AliasLoc,
+ SourceLocation ColonColonLoc) {
+ Representation = NestedNameSpecifier::Create(Context, Representation, Alias);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(AliasLoc, Buffer, BufferSize, BufferCapacity);
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::MakeGlobal(ASTContext &Context,
+ SourceLocation ColonColonLoc) {
+ assert(!Representation && "Already have a nested-name-specifier!?");
+ Representation = NestedNameSpecifier::GlobalSpecifier(Context);
+
+ // Push source-location info into the buffer.
+ SaveSourceLocation(ColonColonLoc, Buffer, BufferSize, BufferCapacity);
+}
+
+void NestedNameSpecifierLocBuilder::MakeTrivial(ASTContext &Context,
+ NestedNameSpecifier *Qualifier,
+ SourceRange R) {
+ Representation = Qualifier;
+
+ // Construct bogus (but well-formed) source information for the
+ // nested-name-specifier.
+ BufferSize = 0;
+ SmallVector<NestedNameSpecifier *, 4> Stack;
+ for (NestedNameSpecifier *NNS = Qualifier; NNS; NNS = NNS->getPrefix())
+ Stack.push_back(NNS);
+ while (!Stack.empty()) {
+ NestedNameSpecifier *NNS = Stack.back();
+ Stack.pop_back();
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ SaveSourceLocation(R.getBegin(), Buffer, BufferSize, BufferCapacity);
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ TypeSourceInfo *TSInfo
+ = Context.getTrivialTypeSourceInfo(QualType(NNS->getAsType(), 0),
+ R.getBegin());
+ SavePointer(TSInfo->getTypeLoc().getOpaqueData(), Buffer, BufferSize,
+ BufferCapacity);
+ break;
+ }
+
+ case NestedNameSpecifier::Global:
+ break;
+ }
+
+ // Save the location of the '::'.
+ SaveSourceLocation(Stack.empty()? R.getEnd() : R.getBegin(),
+ Buffer, BufferSize, BufferCapacity);
+ }
+}
+
+void NestedNameSpecifierLocBuilder::Adopt(NestedNameSpecifierLoc Other) {
+ if (BufferCapacity)
+ free(Buffer);
+
+ if (!Other) {
+ Representation = 0;
+ BufferSize = 0;
+ return;
+ }
+
+ // Rather than copying the data (which is wasteful), "adopt" the
+ // pointer (which points into the ASTContext) but set the capacity to zero to
+ // indicate that we don't own it.
+ Representation = Other.getNestedNameSpecifier();
+ Buffer = static_cast<char *>(Other.getOpaqueData());
+ BufferSize = Other.getDataLength();
+ BufferCapacity = 0;
+}
+
+NestedNameSpecifierLoc
+NestedNameSpecifierLocBuilder::getWithLocInContext(ASTContext &Context) const {
+ if (!Representation)
+ return NestedNameSpecifierLoc();
+
+ // If we adopted our data pointer from elsewhere in the AST context, there's
+ // no need to copy the memory.
+ if (BufferCapacity == 0)
+ return NestedNameSpecifierLoc(Representation, Buffer);
+
+ // FIXME: After copying the source-location information, should we free
+ // our (temporary) buffer and adopt the ASTContext-allocated memory?
+ // Doing so would optimize repeated calls to getWithLocInContext().
+ void *Mem = Context.Allocate(BufferSize, llvm::alignOf<void *>());
+ memcpy(Mem, Buffer, BufferSize);
+ return NestedNameSpecifierLoc(Representation, Mem);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
new file mode 100644
index 0000000..64016d9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/ParentMap.cpp
@@ -0,0 +1,130 @@
+//===--- ParentMap.cpp - Mappings from Stmts to their Parents ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ParentMap class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "llvm/ADT/DenseMap.h"
+
+using namespace clang;
+
+typedef llvm::DenseMap<Stmt*, Stmt*> MapTy;
+
+static void BuildParentMap(MapTy& M, Stmt* S) {
+ for (Stmt::child_range I = S->children(); I; ++I)
+ if (*I) {
+ M[*I] = S;
+ BuildParentMap(M, *I);
+ }
+
+ // Also include the source expr tree of an OpaqueValueExpr in the map.
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S))
+ BuildParentMap(M, OVE->getSourceExpr());
+}
+
+ParentMap::ParentMap(Stmt* S) : Impl(0) {
+ if (S) {
+ MapTy *M = new MapTy();
+ BuildParentMap(*M, S);
+ Impl = M;
+ }
+}
+
+ParentMap::~ParentMap() {
+ delete (MapTy*) Impl;
+}
+
+void ParentMap::addStmt(Stmt* S) {
+ if (S) {
+ BuildParentMap(*(MapTy*) Impl, S);
+ }
+}
+
+Stmt* ParentMap::getParent(Stmt* S) const {
+ MapTy* M = (MapTy*) Impl;
+ MapTy::iterator I = M->find(S);
+ return I == M->end() ? 0 : I->second;
+}
+
+Stmt *ParentMap::getParentIgnoreParens(Stmt *S) const {
+ do { S = getParent(S); } while (S && isa<ParenExpr>(S));
+ return S;
+}
+
+Stmt *ParentMap::getParentIgnoreParenCasts(Stmt *S) const {
+ do {
+ S = getParent(S);
+ }
+ while (S && (isa<ParenExpr>(S) || isa<CastExpr>(S)));
+
+ return S;
+}
+
+Stmt *ParentMap::getParentIgnoreParenImpCasts(Stmt *S) const {
+ do {
+ S = getParent(S);
+ } while (S && isa<Expr>(S) && cast<Expr>(S)->IgnoreParenImpCasts() != S);
+
+ return S;
+}
+
+Stmt *ParentMap::getOuterParenParent(Stmt *S) const {
+ Stmt *Paren = 0;
+ while (isa<ParenExpr>(S)) {
+ Paren = S;
+ S = getParent(S);
+ };
+ return Paren;
+}
+
+bool ParentMap::isConsumedExpr(Expr* E) const {
+ Stmt *P = getParent(E);
+ Stmt *DirectChild = E;
+
+ // Ignore parents that are parentheses or casts.
+ while (P && (isa<ParenExpr>(P) || isa<CastExpr>(P))) {
+ DirectChild = P;
+ P = getParent(P);
+ }
+
+ if (!P)
+ return false;
+
+ switch (P->getStmtClass()) {
+ default:
+ return isa<Expr>(P);
+ case Stmt::DeclStmtClass:
+ return true;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperator *BE = cast<BinaryOperator>(P);
+ // If it is a comma, only the right side is consumed.
+ // If it isn't a comma, both sides are consumed.
+ return BE->getOpcode()!=BO_Comma ||DirectChild==BE->getRHS();
+ }
+ case Stmt::ForStmtClass:
+ return DirectChild == cast<ForStmt>(P)->getCond();
+ case Stmt::WhileStmtClass:
+ return DirectChild == cast<WhileStmt>(P)->getCond();
+ case Stmt::DoStmtClass:
+ return DirectChild == cast<DoStmt>(P)->getCond();
+ case Stmt::IfStmtClass:
+ return DirectChild == cast<IfStmt>(P)->getCond();
+ case Stmt::IndirectGotoStmtClass:
+ return DirectChild == cast<IndirectGotoStmt>(P)->getTarget();
+ case Stmt::SwitchStmtClass:
+ return DirectChild == cast<SwitchStmt>(P)->getCond();
+ case Stmt::ReturnStmtClass:
+ return true;
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
new file mode 100644
index 0000000..0114eba
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayout.cpp
@@ -0,0 +1,89 @@
+//===-- RecordLayout.cpp - Layout information for a struct/union -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the RecordLayout interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+
+using namespace clang;
+
+void ASTRecordLayout::Destroy(ASTContext &Ctx) {
+ if (FieldOffsets)
+ Ctx.Deallocate(FieldOffsets);
+ if (CXXInfo) {
+ Ctx.Deallocate(CXXInfo);
+ CXXInfo->~CXXRecordLayoutInfo();
+ }
+ this->~ASTRecordLayout();
+ Ctx.Deallocate(this);
+}
+
+ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx, CharUnits size,
+ CharUnits alignment, CharUnits datasize,
+ const uint64_t *fieldoffsets,
+ unsigned fieldcount)
+ : Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
+ FieldCount(fieldcount), CXXInfo(0) {
+ if (FieldCount > 0) {
+ FieldOffsets = new (Ctx) uint64_t[FieldCount];
+ memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
+ }
+}
+
+// Constructor for C++ records.
+ASTRecordLayout::ASTRecordLayout(const ASTContext &Ctx,
+ CharUnits size, CharUnits alignment,
+ CharUnits vfptroffset, CharUnits vbptroffset,
+ CharUnits datasize,
+ const uint64_t *fieldoffsets,
+ unsigned fieldcount,
+ CharUnits nonvirtualsize,
+ CharUnits nonvirtualalign,
+ CharUnits SizeOfLargestEmptySubobject,
+ const CXXRecordDecl *PrimaryBase,
+ bool IsPrimaryBaseVirtual,
+ const BaseOffsetsMapTy& BaseOffsets,
+ const BaseOffsetsMapTy& VBaseOffsets)
+ : Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
+ FieldCount(fieldcount), CXXInfo(new (Ctx) CXXRecordLayoutInfo)
+{
+ if (FieldCount > 0) {
+ FieldOffsets = new (Ctx) uint64_t[FieldCount];
+ memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
+ }
+
+ CXXInfo->PrimaryBase.setPointer(PrimaryBase);
+ CXXInfo->PrimaryBase.setInt(IsPrimaryBaseVirtual);
+ CXXInfo->NonVirtualSize = nonvirtualsize;
+ CXXInfo->NonVirtualAlign = nonvirtualalign;
+ CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
+ CXXInfo->BaseOffsets = BaseOffsets;
+ CXXInfo->VBaseOffsets = VBaseOffsets;
+ CXXInfo->VFPtrOffset = vfptroffset;
+ CXXInfo->VBPtrOffset = vbptroffset;
+
+#ifndef NDEBUG
+ if (const CXXRecordDecl *PrimaryBase = getPrimaryBase()) {
+ if (isPrimaryBaseVirtual()) {
+ // Microsoft ABI doesn't have primary virtual base
+ if (Ctx.getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
+ assert(getVBaseClassOffset(PrimaryBase).isZero() &&
+ "Primary virtual base must be at offset 0!");
+ }
+ } else {
+ assert(getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base must be at offset 0!");
+ }
+ }
+#endif
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
new file mode 100644
index 0000000..c2d9294
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -0,0 +1,2488 @@
+//=== RecordLayoutBuilder.cpp - Helper class for building record layouts ---==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "llvm/Support/Format.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+
+using namespace clang;
+
+namespace {
+
+/// BaseSubobjectInfo - Represents a single base subobject in a complete class.
+/// For a class hierarchy like
+///
+/// class A { };
+/// class B : A { };
+/// class C : A, B { };
+///
+/// The BaseSubobjectInfo graph for C will have three BaseSubobjectInfo
+/// instances, one for B and two for A.
+///
+/// If a base is virtual, it will only have one BaseSubobjectInfo allocated.
+struct BaseSubobjectInfo {
+ /// Class - The class for this base info.
+ const CXXRecordDecl *Class;
+
+ /// IsVirtual - Whether the BaseInfo represents a virtual base or not.
+ bool IsVirtual;
+
+ /// Bases - Information about the base subobjects.
+ SmallVector<BaseSubobjectInfo*, 4> Bases;
+
+ /// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base
+ /// of this base info (if one exists).
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo;
+
+ // FIXME: Document.
+ const BaseSubobjectInfo *Derived;
+};
+
+/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different
+/// offsets while laying out a C++ class.
+class EmptySubobjectMap {
+ const ASTContext &Context;
+ uint64_t CharWidth;
+
+ /// Class - The class whose empty entries we're keeping track of.
+ const CXXRecordDecl *Class;
+
+ /// EmptyClassOffsets - A map from offsets to empty record decls.
+ typedef SmallVector<const CXXRecordDecl *, 1> ClassVectorTy;
+ typedef llvm::DenseMap<CharUnits, ClassVectorTy> EmptyClassOffsetsMapTy;
+ EmptyClassOffsetsMapTy EmptyClassOffsets;
+
+ /// MaxEmptyClassOffset - The highest offset known to contain an empty
+ /// base subobject.
+ CharUnits MaxEmptyClassOffset;
+
+ /// ComputeEmptySubobjectSizes - Compute the size of the largest base or
+ /// member subobject that is empty.
+ void ComputeEmptySubobjectSizes();
+
+ void AddSubobjectAtOffset(const CXXRecordDecl *RD, CharUnits Offset);
+
+ void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ CharUnits Offset, bool PlacingEmptyBase);
+
+ void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset);
+ void UpdateEmptyFieldSubobjects(const FieldDecl *FD, CharUnits Offset);
+
+ /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
+ /// subobjects beyond the given offset.
+ bool AnyEmptySubobjectsBeyondOffset(CharUnits Offset) const {
+ return Offset <= MaxEmptyClassOffset;
+ }
+
+ CharUnits
+ getFieldOffset(const ASTRecordLayout &Layout, unsigned FieldNo) const {
+ uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
+ assert(FieldOffset % CharWidth == 0 &&
+ "Field offset not at char boundary!");
+
+ return Context.toCharUnitsFromBits(FieldOffset);
+ }
+
+protected:
+ bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) const;
+
+ bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) const;
+ bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ CharUnits Offset) const;
+
+public:
+ /// This holds the size of the largest empty subobject (either a base
+ /// or a member). Will be zero if the record being built doesn't contain
+ /// any empty classes.
+ CharUnits SizeOfLargestEmptySubobject;
+
+ EmptySubobjectMap(const ASTContext &Context, const CXXRecordDecl *Class)
+ : Context(Context), CharWidth(Context.getCharWidth()), Class(Class) {
+ ComputeEmptySubobjectSizes();
+ }
+
+ /// CanPlaceBaseAtOffset - Return whether the given base class can be placed
+ /// at the given offset.
+ /// Returns false if placing the record will result in two components
+ /// (direct or indirect) of the same type having the same offset.
+ bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ /// CanPlaceFieldAtOffset - Return whether a field can be placed at the given
+ /// offset.
+ bool CanPlaceFieldAtOffset(const FieldDecl *FD, CharUnits Offset);
+};
+
+void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
+ // Check the bases.
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits EmptySize;
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
+ if (BaseDecl->isEmpty()) {
+ // If the class decl is empty, get its size.
+ EmptySize = Layout.getSize();
+ } else {
+ // Otherwise, we get the largest empty subobject for the decl.
+ EmptySize = Layout.getSizeOfLargestEmptySubobject();
+ }
+
+ if (EmptySize > SizeOfLargestEmptySubobject)
+ SizeOfLargestEmptySubobject = EmptySize;
+ }
+
+ // Check the fields.
+ for (CXXRecordDecl::field_iterator I = Class->field_begin(),
+ E = Class->field_end(); I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ const RecordType *RT =
+ Context.getBaseElementType(FD->getType())->getAs<RecordType>();
+
+ // We only care about record types.
+ if (!RT)
+ continue;
+
+ CharUnits EmptySize;
+ const CXXRecordDecl *MemberDecl = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(MemberDecl);
+ if (MemberDecl->isEmpty()) {
+ // If the class decl is empty, get its size.
+ EmptySize = Layout.getSize();
+ } else {
+ // Otherwise, we get the largest empty subobject for the decl.
+ EmptySize = Layout.getSizeOfLargestEmptySubobject();
+ }
+
+ if (EmptySize > SizeOfLargestEmptySubobject)
+ SizeOfLargestEmptySubobject = EmptySize;
+ }
+}
+
+bool
+EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) const {
+ // We only need to check empty bases.
+ if (!RD->isEmpty())
+ return true;
+
+ EmptyClassOffsetsMapTy::const_iterator I = EmptyClassOffsets.find(Offset);
+ if (I == EmptyClassOffsets.end())
+ return true;
+
+ const ClassVectorTy& Classes = I->second;
+ if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end())
+ return true;
+
+ // There is already an empty class of the same type at this offset.
+ return false;
+}
+
+void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD,
+ CharUnits Offset) {
+ // We only care about empty bases.
+ if (!RD->isEmpty())
+ return;
+
+ // If we have empty structures inside an union, we can assign both
+ // the same offset. Just avoid pushing them twice in the list.
+ ClassVectorTy& Classes = EmptyClassOffsets[Offset];
+ if (std::find(Classes.begin(), Classes.end(), RD) != Classes.end())
+ return;
+
+ Classes.push_back(RD);
+
+ // Update the empty class offset.
+ if (Offset > MaxEmptyClassOffset)
+ MaxEmptyClassOffset = Offset;
+}
+
+bool
+EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(Info->Class, Offset))
+ return false;
+
+ // Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ BaseSubobjectInfo* Base = Info->Bases[I];
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+
+ if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset))
+ return false;
+ }
+
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
+
+ if (Info == PrimaryVirtualBaseInfo->Derived) {
+ if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset))
+ return false;
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+ if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ return false;
+ }
+
+ return true;
+}
+
+void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ CharUnits Offset,
+ bool PlacingEmptyBase) {
+ if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) {
+ // We know that the only empty subobjects that can conflict with empty
+ // subobject of non-empty bases, are empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty base
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ return;
+ }
+
+ AddSubobjectAtOffset(Info->Class, Offset);
+
+ // Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ BaseSubobjectInfo* Base = Info->Bases[I];
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase);
+ }
+
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
+
+ if (Info == PrimaryVirtualBaseInfo->Derived)
+ UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset,
+ PlacingEmptyBase);
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+ UpdateEmptyFieldSubobjects(FD, FieldOffset);
+ }
+}
+
+bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
+ // If we know this class doesn't have any empty subobjects we don't need to
+ // bother checking.
+ if (SizeOfLargestEmptySubobject.isZero())
+ return true;
+
+ if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
+ return false;
+
+ // We are able to place the base at this offset. Make sure to update the
+ // empty base subobject map.
+ UpdateEmptyBaseSubobjects(Info, Offset, Info->Class->isEmpty());
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(RD, Offset))
+ return false;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
+ return false;
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *VBaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
+ return false;
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+
+ if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ CharUnits Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ QualType T = FD->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return CanPlaceFieldSubobjectAtOffset(RD, RD, Offset);
+ }
+
+ // If we have an array type we need to look at every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ CharUnits ElementOffset = Offset;
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(ElementOffset))
+ return true;
+
+ if (!CanPlaceFieldSubobjectAtOffset(RD, RD, ElementOffset))
+ return false;
+
+ ElementOffset += Layout.getSize();
+ }
+ }
+
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD,
+ CharUnits Offset) {
+ if (!CanPlaceFieldSubobjectAtOffset(FD, Offset))
+ return false;
+
+ // We are able to place the member variable at this offset.
+ // Make sure to update the empty base subobject map.
+ UpdateEmptyFieldSubobjects(FD, Offset);
+ return true;
+}
+
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ CharUnits Offset) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at offset
+ // zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (Offset >= SizeOfLargestEmptySubobject)
+ return;
+
+ AddSubobjectAtOffset(RD, Offset);
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *VBaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+ if (FD->isBitField())
+ continue;
+
+ CharUnits FieldOffset = Offset + getFieldOffset(Layout, FieldNo);
+
+ UpdateEmptyFieldSubobjects(FD, FieldOffset);
+ }
+}
+
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
+ CharUnits Offset) {
+ QualType T = FD->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ UpdateEmptyFieldSubobjects(RD, RD, Offset);
+ return;
+ }
+
+ // If we have an array type we need to update every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ CharUnits ElementOffset = Offset;
+
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (ElementOffset >= SizeOfLargestEmptySubobject)
+ return;
+
+ UpdateEmptyFieldSubobjects(RD, RD, ElementOffset);
+ ElementOffset += Layout.getSize();
+ }
+ }
+}
+
+class RecordLayoutBuilder {
+protected:
+ // FIXME: Remove this and make the appropriate fields public.
+ friend class clang::ASTContext;
+
+ const ASTContext &Context;
+
+ EmptySubobjectMap *EmptySubobjects;
+
+ /// Size - The current size of the record layout.
+ uint64_t Size;
+
+ /// Alignment - The current alignment of the record layout.
+ CharUnits Alignment;
+
+ /// \brief The alignment if attribute packed is not used.
+ CharUnits UnpackedAlignment;
+
+ SmallVector<uint64_t, 16> FieldOffsets;
+
+ /// \brief Whether the external AST source has provided a layout for this
+ /// record.
+ unsigned ExternalLayout : 1;
+
+ /// \brief Whether we need to infer alignment, even when we have an
+ /// externally-provided layout.
+ unsigned InferAlignment : 1;
+
+ /// Packed - Whether the record is packed or not.
+ unsigned Packed : 1;
+
+ unsigned IsUnion : 1;
+
+ unsigned IsMac68kAlign : 1;
+
+ unsigned IsMsStruct : 1;
+
+ /// UnfilledBitsInLastByte - If the last field laid out was a bitfield,
+ /// this contains the number of bits in the last byte that can be used for
+ /// an adjacent bitfield if necessary.
+ unsigned char UnfilledBitsInLastByte;
+
+ /// MaxFieldAlignment - The maximum allowed field alignment. This is set by
+ /// #pragma pack.
+ CharUnits MaxFieldAlignment;
+
+ /// DataSize - The data size of the record being laid out.
+ uint64_t DataSize;
+
+ CharUnits NonVirtualSize;
+ CharUnits NonVirtualAlignment;
+
+ FieldDecl *ZeroLengthBitfield;
+
+ /// PrimaryBase - the primary base class (if one exists) of the class
+ /// we're laying out.
+ const CXXRecordDecl *PrimaryBase;
+
+ /// PrimaryBaseIsVirtual - Whether the primary base of the class we're laying
+ /// out is virtual.
+ bool PrimaryBaseIsVirtual;
+
+ /// VFPtrOffset - Virtual function table offset. Only for MS layout.
+ CharUnits VFPtrOffset;
+
+ /// VBPtrOffset - Virtual base table offset. Only for MS layout.
+ CharUnits VBPtrOffset;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits> BaseOffsetsMapTy;
+
+ /// Bases - base classes and their offsets in the record.
+ BaseOffsetsMapTy Bases;
+
+ // VBases - virtual base classes and their offsets in the record.
+ BaseOffsetsMapTy VBases;
+
+ /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
+ /// primary base classes for some other direct or indirect base class.
+ CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
+
+ /// FirstNearlyEmptyVBase - The first nearly empty virtual base class in
+ /// inheritance graph order. Used for determining the primary base class.
+ const CXXRecordDecl *FirstNearlyEmptyVBase;
+
+ /// VisitedVirtualBases - A set of all the visited virtual bases, used to
+ /// avoid visiting virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// \brief Externally-provided size.
+ uint64_t ExternalSize;
+
+ /// \brief Externally-provided alignment.
+ uint64_t ExternalAlign;
+
+ /// \brief Externally-provided field offsets.
+ llvm::DenseMap<const FieldDecl *, uint64_t> ExternalFieldOffsets;
+
+ /// \brief Externally-provided direct, non-virtual base offsets.
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalBaseOffsets;
+
+ /// \brief Externally-provided virtual base offsets.
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> ExternalVirtualBaseOffsets;
+
+ RecordLayoutBuilder(const ASTContext &Context,
+ EmptySubobjectMap *EmptySubobjects)
+ : Context(Context), EmptySubobjects(EmptySubobjects), Size(0),
+ Alignment(CharUnits::One()), UnpackedAlignment(CharUnits::One()),
+ ExternalLayout(false), InferAlignment(false),
+ Packed(false), IsUnion(false), IsMac68kAlign(false), IsMsStruct(false),
+ UnfilledBitsInLastByte(0), MaxFieldAlignment(CharUnits::Zero()),
+ DataSize(0), NonVirtualSize(CharUnits::Zero()),
+ NonVirtualAlignment(CharUnits::One()),
+ ZeroLengthBitfield(0), PrimaryBase(0),
+ PrimaryBaseIsVirtual(false),
+ VFPtrOffset(CharUnits::fromQuantity(-1)),
+ VBPtrOffset(CharUnits::fromQuantity(-1)),
+ FirstNearlyEmptyVBase(0) { }
+
+ /// Reset this RecordLayoutBuilder to a fresh state, using the given
+ /// alignment as the initial alignment. This is used for the
+ /// correct layout of vb-table pointers in MSVC.
+ void resetWithTargetAlignment(CharUnits TargetAlignment) {
+ const ASTContext &Context = this->Context;
+ EmptySubobjectMap *EmptySubobjects = this->EmptySubobjects;
+ this->~RecordLayoutBuilder();
+ new (this) RecordLayoutBuilder(Context, EmptySubobjects);
+ Alignment = UnpackedAlignment = TargetAlignment;
+ }
+
+ void Layout(const RecordDecl *D);
+ void Layout(const CXXRecordDecl *D);
+ void Layout(const ObjCInterfaceDecl *D);
+
+ void LayoutFields(const RecordDecl *D);
+ void LayoutField(const FieldDecl *D);
+ void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize,
+ bool FieldPacked, const FieldDecl *D);
+ void LayoutBitField(const FieldDecl *D);
+
+ bool isMicrosoftCXXABI() const {
+ return Context.getTargetInfo().getCXXABI() == CXXABI_Microsoft;
+ }
+
+ void MSLayoutVirtualBases(const CXXRecordDecl *RD);
+
+ /// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects.
+ llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, BaseSubobjectInfo *>
+ BaseSubobjectInfoMapTy;
+
+ /// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases
+ /// of the class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy VirtualBaseInfo;
+
+ /// NonVirtualBaseInfo - Map from all the direct non-virtual bases of the
+ /// class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy NonVirtualBaseInfo;
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for the
+ /// bases of the given class.
+ void ComputeBaseSubobjectInfo(const CXXRecordDecl *RD);
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for a
+ /// single class and all of its base classes.
+ BaseSubobjectInfo *ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived);
+
+ /// DeterminePrimaryBase - Determine the primary base of the given class.
+ void DeterminePrimaryBase(const CXXRecordDecl *RD);
+
+ void SelectPrimaryVBase(const CXXRecordDecl *RD);
+
+ void EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign);
+
+ /// LayoutNonVirtualBases - Determines the primary base class (if any) and
+ /// lays it out. Will then proceed to lay out all non-virtual base clasess.
+ void LayoutNonVirtualBases(const CXXRecordDecl *RD);
+
+ /// LayoutNonVirtualBase - Lays out a single non-virtual base.
+ void LayoutNonVirtualBase(const BaseSubobjectInfo *Base);
+
+ void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ CharUnits Offset);
+
+ bool needsVFTable(const CXXRecordDecl *RD) const;
+ bool hasNewVirtualFunction(const CXXRecordDecl *RD) const;
+ bool isPossiblePrimaryBase(const CXXRecordDecl *Base) const;
+
+ /// LayoutVirtualBases - Lays out all the virtual bases.
+ void LayoutVirtualBases(const CXXRecordDecl *RD,
+ const CXXRecordDecl *MostDerivedClass);
+
+ /// LayoutVirtualBase - Lays out a single virtual base.
+ void LayoutVirtualBase(const BaseSubobjectInfo *Base);
+
+ /// LayoutBase - Will lay out a base and return the offset where it was
+ /// placed, in chars.
+ CharUnits LayoutBase(const BaseSubobjectInfo *Base);
+
+ /// InitializeLayout - Initialize record layout for the given record decl.
+ void InitializeLayout(const Decl *D);
+
+ /// FinishLayout - Finalize record layout. Adjust record size based on the
+ /// alignment.
+ void FinishLayout(const NamedDecl *D);
+
+ void UpdateAlignment(CharUnits NewAlignment, CharUnits UnpackedNewAlignment);
+ void UpdateAlignment(CharUnits NewAlignment) {
+ UpdateAlignment(NewAlignment, NewAlignment);
+ }
+
+ /// \brief Retrieve the externally-supplied field offset for the given
+ /// field.
+ ///
+ /// \param Field The field whose offset is being queried.
+ /// \param ComputedOffset The offset that we've computed for this field.
+ uint64_t updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset);
+
+ void CheckFieldPadding(uint64_t Offset, uint64_t UnpaddedOffset,
+ uint64_t UnpackedOffset, unsigned UnpackedAlign,
+ bool isPacked, const FieldDecl *D);
+
+ DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
+
+ CharUnits getSize() const {
+ assert(Size % Context.getCharWidth() == 0);
+ return Context.toCharUnitsFromBits(Size);
+ }
+ uint64_t getSizeInBits() const { return Size; }
+
+ void setSize(CharUnits NewSize) { Size = Context.toBits(NewSize); }
+ void setSize(uint64_t NewSize) { Size = NewSize; }
+
+ CharUnits getAligment() const { return Alignment; }
+
+ CharUnits getDataSize() const {
+ assert(DataSize % Context.getCharWidth() == 0);
+ return Context.toCharUnitsFromBits(DataSize);
+ }
+ uint64_t getDataSizeInBits() const { return DataSize; }
+
+ void setDataSize(CharUnits NewSize) { DataSize = Context.toBits(NewSize); }
+ void setDataSize(uint64_t NewSize) { DataSize = NewSize; }
+
+ RecordLayoutBuilder(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
+ void operator=(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
+public:
+ static const CXXMethodDecl *ComputeKeyFunction(const CXXRecordDecl *RD);
+};
+} // end anonymous namespace
+
+void
+RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a nearly empty virtual base.
+ if (I->isVirtual() && Context.isNearlyEmpty(Base)) {
+ // If it's not an indirect primary base, then we've found our primary
+ // base.
+ if (!IndirectPrimaryBases.count(Base)) {
+ PrimaryBase = Base;
+ PrimaryBaseIsVirtual = true;
+ return;
+ }
+
+ // Is this the first nearly empty virtual base?
+ if (!FirstNearlyEmptyVBase)
+ FirstNearlyEmptyVBase = Base;
+ }
+
+ SelectPrimaryVBase(Base);
+ if (PrimaryBase)
+ return;
+ }
+}
+
+/// DeterminePrimaryBase - Determine the primary base of the given class.
+void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
+ // If the class isn't dynamic, it won't have a primary base.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Compute all the primary virtual bases for all of our direct and
+ // indirect bases, and record all their primary virtual base classes.
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+
+ // If the record has a dynamic base class, attempt to choose a primary base
+ // class. It is the first (in direct base class order) non-virtual dynamic
+ // base class, if one exists.
+ for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+ e = RD->bases_end(); i != e; ++i) {
+ // Ignore virtual bases.
+ if (i->isVirtual())
+ continue;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ if (isPossiblePrimaryBase(Base)) {
+ // We found it.
+ PrimaryBase = Base;
+ PrimaryBaseIsVirtual = false;
+ return;
+ }
+ }
+
+ // The Microsoft ABI doesn't have primary virtual bases.
+ if (isMicrosoftCXXABI()) {
+ assert(!PrimaryBase && "Should not get here with a primary base!");
+ return;
+ }
+
+ // Under the Itanium ABI, if there is no non-virtual primary base class,
+ // try to compute the primary virtual base. The primary virtual base is
+ // the first nearly empty virtual base that is not an indirect primary
+ // virtual base class, if one exists.
+ if (RD->getNumVBases() != 0) {
+ SelectPrimaryVBase(RD);
+ if (PrimaryBase)
+ return;
+ }
+
+ // Otherwise, it is the first indirect primary base class, if one exists.
+ if (FirstNearlyEmptyVBase) {
+ PrimaryBase = FirstNearlyEmptyVBase;
+ PrimaryBaseIsVirtual = true;
+ return;
+ }
+
+ assert(!PrimaryBase && "Should not get here with a primary base!");
+}
+
+BaseSubobjectInfo *
+RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived) {
+ BaseSubobjectInfo *Info;
+
+ if (IsVirtual) {
+ // Check if we already have info about this virtual base.
+ BaseSubobjectInfo *&InfoSlot = VirtualBaseInfo[RD];
+ if (InfoSlot) {
+ assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
+ return InfoSlot;
+ }
+
+ // We don't, create it.
+ InfoSlot = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ Info = InfoSlot;
+ } else {
+ Info = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ }
+
+ Info->Class = RD;
+ Info->IsVirtual = IsVirtual;
+ Info->Derived = 0;
+ Info->PrimaryVirtualBaseInfo = 0;
+
+ const CXXRecordDecl *PrimaryVirtualBase = 0;
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = 0;
+
+ // Check if this base has a primary virtual base.
+ if (RD->getNumVBases()) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.isPrimaryBaseVirtual()) {
+ // This base does have a primary virtual base.
+ PrimaryVirtualBase = Layout.getPrimaryBase();
+ assert(PrimaryVirtualBase && "Didn't have a primary virtual base!");
+
+ // Now check if we have base subobject info about this primary base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+
+ if (PrimaryVirtualBaseInfo) {
+ if (PrimaryVirtualBaseInfo->Derived) {
+ // We did have info about this primary base, and it turns out that it
+ // has already been claimed as a primary virtual base for another
+ // base.
+ PrimaryVirtualBase = 0;
+ } else {
+ // We can claim this base as our primary base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+ }
+ }
+ }
+
+ // Now go through all direct bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info));
+ }
+
+ if (PrimaryVirtualBase && !PrimaryVirtualBaseInfo) {
+ // Traversing the bases must have created the base info for our primary
+ // virtual base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+ assert(PrimaryVirtualBaseInfo &&
+ "Did not create a primary virtual base!");
+
+ // Claim the primary virtual base as our primary virtual base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+
+ return Info;
+}
+
+void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Compute the base subobject info for this base.
+ BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, 0);
+
+ if (IsVirtual) {
+ // ComputeBaseInfo has already added this base for us.
+ assert(VirtualBaseInfo.count(BaseDecl) &&
+ "Did not add virtual base!");
+ } else {
+ // Add the base info to the map of non-virtual bases.
+ assert(!NonVirtualBaseInfo.count(BaseDecl) &&
+ "Non-virtual base already exists!");
+ NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
+ }
+ }
+}
+
+void
+RecordLayoutBuilder::EnsureVTablePointerAlignment(CharUnits UnpackedBaseAlign) {
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
+
+ // Round up the current record size to pointer alignment.
+ setSize(getSize().RoundUpToAlignment(BaseAlign));
+ setDataSize(getSize());
+
+ // Update the alignment.
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+}
+
+void
+RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
+ // Then, determine the primary base class.
+ DeterminePrimaryBase(RD);
+
+ // Compute base subobject info.
+ ComputeBaseSubobjectInfo(RD);
+
+ // If we have a primary base class, lay it out.
+ if (PrimaryBase) {
+ if (PrimaryBaseIsVirtual) {
+ // If the primary virtual base was a primary virtual base of some other
+ // base class we'll have to steal it.
+ BaseSubobjectInfo *PrimaryBaseInfo = VirtualBaseInfo.lookup(PrimaryBase);
+ PrimaryBaseInfo->Derived = 0;
+
+ // We have a virtual primary base, insert it as an indirect primary base.
+ IndirectPrimaryBases.insert(PrimaryBase);
+
+ assert(!VisitedVirtualBases.count(PrimaryBase) &&
+ "vbase already visited!");
+ VisitedVirtualBases.insert(PrimaryBase);
+
+ LayoutVirtualBase(PrimaryBaseInfo);
+ } else {
+ BaseSubobjectInfo *PrimaryBaseInfo =
+ NonVirtualBaseInfo.lookup(PrimaryBase);
+ assert(PrimaryBaseInfo &&
+ "Did not find base info for non-virtual primary base!");
+
+ LayoutNonVirtualBase(PrimaryBaseInfo);
+ }
+
+ // If this class needs a vtable/vf-table and didn't get one from a
+ // primary base, add it in now.
+ } else if (needsVFTable(RD)) {
+ assert(DataSize == 0 && "Vtable pointer must be at offset zero!");
+ CharUnits PtrWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits PtrAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+ EnsureVTablePointerAlignment(PtrAlign);
+ if (isMicrosoftCXXABI())
+ VFPtrOffset = getSize();
+ setSize(getSize() + PtrWidth);
+ setDataSize(getSize());
+ }
+
+ bool HasDirectVirtualBases = false;
+ bool HasNonVirtualBaseWithVBTable = false;
+
+ // Now lay out the non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ // Ignore virtual bases, but remember that we saw one.
+ if (I->isVirtual()) {
+ HasDirectVirtualBases = true;
+ continue;
+ }
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Remember if this base has virtual bases itself.
+ if (BaseDecl->getNumVBases())
+ HasNonVirtualBaseWithVBTable = true;
+
+ // Skip the primary base, because we've already laid it out. The
+ // !PrimaryBaseIsVirtual check is required because we might have a
+ // non-virtual base of the same type as a primary virtual base.
+ if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual)
+ continue;
+
+ // Lay out the base.
+ BaseSubobjectInfo *BaseInfo = NonVirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find base info for non-virtual base!");
+
+ LayoutNonVirtualBase(BaseInfo);
+ }
+
+ // In the MS ABI, add the vb-table pointer if we need one, which is
+ // whenever we have a virtual base and we can't re-use a vb-table
+ // pointer from a non-virtual base.
+ if (isMicrosoftCXXABI() &&
+ HasDirectVirtualBases && !HasNonVirtualBaseWithVBTable) {
+ CharUnits PtrWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits PtrAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(0));
+
+ // MSVC potentially over-aligns the vb-table pointer by giving it
+ // the max alignment of all the non-virtual objects in the class.
+ // This is completely unnecessary, but we're not here to pass
+ // judgment.
+ //
+ // Note that we've only laid out the non-virtual bases, so on the
+ // first pass Alignment won't be set correctly here, but if the
+ // vb-table doesn't end up aligned correctly we'll come through
+ // and redo the layout from scratch with the right alignment.
+ //
+ // TODO: Instead of doing this, just lay out the fields as if the
+ // vb-table were at offset zero, then retroactively bump the field
+ // offsets up.
+ PtrAlign = std::max(PtrAlign, Alignment);
+
+ EnsureVTablePointerAlignment(PtrAlign);
+ VBPtrOffset = getSize();
+ setSize(getSize() + PtrWidth);
+ setDataSize(getSize());
+ }
+}
+
+void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
+ // Layout the base.
+ CharUnits Offset = LayoutBase(Base);
+
+ // Add its base class offset.
+ assert(!Bases.count(Base->Class) && "base offset already exists!");
+ Bases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
+}
+
+void
+RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ CharUnits Offset) {
+ // This base isn't interesting, it has no virtual bases.
+ if (!Info->Class->getNumVBases())
+ return;
+
+ // First, check if we have a virtual primary base to add offsets for.
+ if (Info->PrimaryVirtualBaseInfo) {
+ assert(Info->PrimaryVirtualBaseInfo->IsVirtual &&
+ "Primary virtual base is not virtual!");
+ if (Info->PrimaryVirtualBaseInfo->Derived == Info) {
+ // Add the offset.
+ assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) &&
+ "primary vbase offset already exists!");
+ VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class,
+ Offset));
+
+ // Traverse the primary virtual base.
+ AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset);
+ }
+ }
+
+ // Now go through all direct non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ const BaseSubobjectInfo *Base = Info->Bases[I];
+ if (Base->IsVirtual)
+ continue;
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ AddPrimaryVirtualBaseOffsets(Base, BaseOffset);
+ }
+}
+
+/// needsVFTable - Return true if this class needs a vtable or vf-table
+/// when laid out as a base class. These are treated the same because
+/// they're both always laid out at offset zero.
+///
+/// This function assumes that the class has no primary base.
+bool RecordLayoutBuilder::needsVFTable(const CXXRecordDecl *RD) const {
+ assert(!PrimaryBase);
+
+ // In the Itanium ABI, every dynamic class needs a vtable: even if
+ // this class has no virtual functions as a base class (i.e. it's
+ // non-polymorphic or only has virtual functions from virtual
+ // bases),x it still needs a vtable to locate its virtual bases.
+ if (!isMicrosoftCXXABI())
+ return RD->isDynamicClass();
+
+ // In the MS ABI, we need a vfptr if the class has virtual functions
+ // other than those declared by its virtual bases. The AST doesn't
+ // tell us that directly, and checking manually for virtual
+ // functions that aren't overrides is expensive, but there are
+ // some important shortcuts:
+
+ // - Non-polymorphic classes have no virtual functions at all.
+ if (!RD->isPolymorphic()) return false;
+
+ // - Polymorphic classes with no virtual bases must either declare
+ // virtual functions directly or inherit them, but in the latter
+ // case we would have a primary base.
+ if (RD->getNumVBases() == 0) return true;
+
+ return hasNewVirtualFunction(RD);
+}
+
+/// hasNewVirtualFunction - Does the given polymorphic class declare a
+/// virtual function that does not override a method from any of its
+/// base classes?
+bool
+RecordLayoutBuilder::hasNewVirtualFunction(const CXXRecordDecl *RD) const {
+ assert(RD->isPolymorphic());
+ if (!RD->getNumBases())
+ return true;
+
+ for (CXXRecordDecl::method_iterator method = RD->method_begin();
+ method != RD->method_end();
+ ++method) {
+ if (method->isVirtual() && !method->size_overridden_methods()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/// isPossiblePrimaryBase - Is the given base class an acceptable
+/// primary base class?
+bool
+RecordLayoutBuilder::isPossiblePrimaryBase(const CXXRecordDecl *Base) const {
+ // In the Itanium ABI, a class can be a primary base class if it has
+ // a vtable for any reason.
+ if (!isMicrosoftCXXABI())
+ return Base->isDynamicClass();
+
+ // In the MS ABI, a class can only be a primary base class if it
+ // provides a vf-table at a static offset. That means it has to be
+ // non-virtual base. The existence of a separate vb-table means
+ // that it's possible to get virtual functions only from a virtual
+ // base, which we have to guard against.
+
+ // First off, it has to have virtual functions.
+ if (!Base->isPolymorphic()) return false;
+
+ // If it has no virtual bases, then everything is at a static offset.
+ if (!Base->getNumVBases()) return true;
+
+ // Okay, just ask the base class's layout.
+ return (Context.getASTRecordLayout(Base).getVFPtrOffset()
+ != CharUnits::fromQuantity(-1));
+}
+
+void
+RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
+ const CXXRecordDecl *MostDerivedClass) {
+ const CXXRecordDecl *PrimaryBase;
+ bool PrimaryBaseIsVirtual;
+
+ if (MostDerivedClass == RD) {
+ PrimaryBase = this->PrimaryBase;
+ PrimaryBaseIsVirtual = this->PrimaryBaseIsVirtual;
+ } else {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ PrimaryBase = Layout.getPrimaryBase();
+ PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
+ }
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ if (I->isVirtual()) {
+ if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
+ bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl);
+
+ // Only lay out the virtual base if it's not an indirect primary base.
+ if (!IndirectPrimaryBase) {
+ // Only visit virtual bases once.
+ if (!VisitedVirtualBases.insert(BaseDecl))
+ continue;
+
+ const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find virtual base info!");
+ LayoutVirtualBase(BaseInfo);
+ }
+ }
+ }
+
+ if (!BaseDecl->getNumVBases()) {
+ // This base isn't interesting since it doesn't have any virtual bases.
+ continue;
+ }
+
+ LayoutVirtualBases(BaseDecl, MostDerivedClass);
+ }
+}
+
+void RecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD) {
+
+ if (!RD->getNumVBases())
+ return;
+
+ // This is substantially simplified because there are no virtual
+ // primary bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl = I->getType()->getAsCXXRecordDecl();
+ const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find virtual base info!");
+
+ LayoutVirtualBase(BaseInfo);
+ }
+}
+
+void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
+ assert(!Base->Derived && "Trying to lay out a primary virtual base!");
+
+ // Layout the base.
+ CharUnits Offset = LayoutBase(Base);
+
+ // Add its base class offset.
+ assert(!VBases.count(Base->Class) && "vbase offset already exists!");
+ VBases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
+}
+
+CharUnits RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
+
+
+ CharUnits Offset;
+
+ // Query the external layout to see if it provides an offset.
+ bool HasExternalLayout = false;
+ if (ExternalLayout) {
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits>::iterator Known;
+ if (Base->IsVirtual) {
+ Known = ExternalVirtualBaseOffsets.find(Base->Class);
+ if (Known != ExternalVirtualBaseOffsets.end()) {
+ Offset = Known->second;
+ HasExternalLayout = true;
+ }
+ } else {
+ Known = ExternalBaseOffsets.find(Base->Class);
+ if (Known != ExternalBaseOffsets.end()) {
+ Offset = Known->second;
+ HasExternalLayout = true;
+ }
+ }
+ }
+
+ // If we have an empty base class, try to place it at offset 0.
+ if (Base->Class->isEmpty() &&
+ (!HasExternalLayout || Offset == CharUnits::Zero()) &&
+ EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
+ setSize(std::max(getSize(), Layout.getSize()));
+
+ return CharUnits::Zero();
+ }
+
+ CharUnits UnpackedBaseAlign = Layout.getNonVirtualAlign();
+ CharUnits BaseAlign = (Packed) ? CharUnits::One() : UnpackedBaseAlign;
+
+ // The maximum field alignment overrides base align.
+ if (!MaxFieldAlignment.isZero()) {
+ BaseAlign = std::min(BaseAlign, MaxFieldAlignment);
+ UnpackedBaseAlign = std::min(UnpackedBaseAlign, MaxFieldAlignment);
+ }
+
+ if (!HasExternalLayout) {
+ // Round up the current record size to the base's alignment boundary.
+ Offset = getDataSize().RoundUpToAlignment(BaseAlign);
+
+ // Try to place the base.
+ while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
+ Offset += BaseAlign;
+ } else {
+ bool Allowed = EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset);
+ (void)Allowed;
+ assert(Allowed && "Base subobject externally placed at overlapping offset");
+ }
+
+ if (!Base->Class->isEmpty()) {
+ // Update the data size.
+ setDataSize(Offset + Layout.getNonVirtualSize());
+
+ setSize(std::max(getSize(), getDataSize()));
+ } else
+ setSize(std::max(getSize(), Offset + Layout.getSize()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(BaseAlign, UnpackedBaseAlign);
+
+ return Offset;
+}
+
+void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ IsUnion = RD->isUnion();
+
+ Packed = D->hasAttr<PackedAttr>();
+
+ IsMsStruct = D->hasAttr<MsStructAttr>();
+
+ // Honor the default struct packing maximum alignment flag.
+ if (unsigned DefaultMaxFieldAlignment = Context.getLangOpts().PackStruct) {
+ MaxFieldAlignment = CharUnits::fromQuantity(DefaultMaxFieldAlignment);
+ }
+
+ // mac68k alignment supersedes maximum field alignment and attribute aligned,
+ // and forces all structures to have 2-byte alignment. The IBM docs on it
+ // allude to additional (more complicated) semantics, especially with regard
+ // to bit-fields, but gcc appears not to follow that.
+ if (D->hasAttr<AlignMac68kAttr>()) {
+ IsMac68kAlign = true;
+ MaxFieldAlignment = CharUnits::fromQuantity(2);
+ Alignment = CharUnits::fromQuantity(2);
+ } else {
+ if (const MaxFieldAlignmentAttr *MFAA = D->getAttr<MaxFieldAlignmentAttr>())
+ MaxFieldAlignment = Context.toCharUnitsFromBits(MFAA->getAlignment());
+
+ if (unsigned MaxAlign = D->getMaxAlignment())
+ UpdateAlignment(Context.toCharUnitsFromBits(MaxAlign));
+ }
+
+ // If there is an external AST source, ask it for the various offsets.
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ if (ExternalASTSource *External = Context.getExternalSource()) {
+ ExternalLayout = External->layoutRecordType(RD,
+ ExternalSize,
+ ExternalAlign,
+ ExternalFieldOffsets,
+ ExternalBaseOffsets,
+ ExternalVirtualBaseOffsets);
+
+ // Update based on external alignment.
+ if (ExternalLayout) {
+ if (ExternalAlign > 0) {
+ Alignment = Context.toCharUnitsFromBits(ExternalAlign);
+ UnpackedAlignment = Alignment;
+ } else {
+ // The external source didn't have alignment information; infer it.
+ InferAlignment = true;
+ }
+ }
+ }
+}
+
+void RecordLayoutBuilder::Layout(const RecordDecl *D) {
+ InitializeLayout(D);
+ LayoutFields(D);
+
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ FinishLayout(D);
+}
+
+void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
+ InitializeLayout(RD);
+
+ // Lay out the vtable and the non-virtual bases.
+ LayoutNonVirtualBases(RD);
+
+ LayoutFields(RD);
+
+ NonVirtualSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.getTargetInfo().getCharAlign()));
+ NonVirtualAlignment = Alignment;
+
+ if (isMicrosoftCXXABI() &&
+ NonVirtualSize != NonVirtualSize.RoundUpToAlignment(Alignment)) {
+ CharUnits AlignMember =
+ NonVirtualSize.RoundUpToAlignment(Alignment) - NonVirtualSize;
+
+ setSize(getSize() + AlignMember);
+ setDataSize(getSize());
+
+ NonVirtualSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.getTargetInfo().getCharAlign()));
+
+ MSLayoutVirtualBases(RD);
+
+ } else {
+ // Lay out the virtual bases and add the primary virtual base offsets.
+ LayoutVirtualBases(RD, RD);
+ }
+
+ // Finally, round the size of the total struct up to the alignment
+ // of the struct itself.
+ FinishLayout(RD);
+
+#ifndef NDEBUG
+ // Check that we have base offsets for all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ assert(Bases.count(BaseDecl) && "Did not find base offset!");
+ }
+
+ // And all virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ assert(VBases.count(BaseDecl) && "Did not find base offset!");
+ }
+#endif
+}
+
+void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
+ if (ObjCInterfaceDecl *SD = D->getSuperClass()) {
+ const ASTRecordLayout &SL = Context.getASTObjCInterfaceLayout(SD);
+
+ UpdateAlignment(SL.getAlignment());
+
+ // We start laying out ivars not at the end of the superclass
+ // structure, but at the next byte following the last field.
+ setSize(SL.getDataSize());
+ setDataSize(getSize());
+ }
+
+ InitializeLayout(D);
+ // Layout each ivar sequentially.
+ for (const ObjCIvarDecl *IVD = D->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar())
+ LayoutField(IVD);
+
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ FinishLayout(D);
+}
+
+void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+ // Layout each field, for now, just sequentially, respecting alignment. In
+ // the future, this will need to be tweakable by targets.
+ const FieldDecl *LastFD = 0;
+ ZeroLengthBitfield = 0;
+ unsigned RemainingInAlignment = 0;
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end(); Field != FieldEnd; ++Field) {
+ if (IsMsStruct) {
+ FieldDecl *FD = (*Field);
+ if (Context.ZeroBitfieldFollowsBitfield(FD, LastFD))
+ ZeroLengthBitfield = FD;
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ else if (Context.ZeroBitfieldFollowsNonBitfield(FD, LastFD))
+ continue;
+ // FIXME. streamline these conditions into a simple one.
+ else if (Context.BitfieldFollowsBitfield(FD, LastFD) ||
+ Context.BitfieldFollowsNonBitfield(FD, LastFD) ||
+ Context.NonBitfieldFollowsBitfield(FD, LastFD)) {
+ // 1) Adjacent bit fields are packed into the same 1-, 2-, or
+ // 4-byte allocation unit if the integral types are the same
+ // size and if the next bit field fits into the current
+ // allocation unit without crossing the boundary imposed by the
+ // common alignment requirements of the bit fields.
+ // 2) Establish a new alignment for a bitfield following
+ // a non-bitfield if size of their types differ.
+ // 3) Establish a new alignment for a non-bitfield following
+ // a bitfield if size of their types differ.
+ std::pair<uint64_t, unsigned> FieldInfo =
+ Context.getTypeInfo(FD->getType());
+ uint64_t TypeSize = FieldInfo.first;
+ unsigned FieldAlign = FieldInfo.second;
+ // This check is needed for 'long long' in -m32 mode.
+ if (TypeSize > FieldAlign &&
+ (Context.hasSameType(FD->getType(),
+ Context.UnsignedLongLongTy)
+ ||Context.hasSameType(FD->getType(),
+ Context.LongLongTy)))
+ FieldAlign = TypeSize;
+ FieldInfo = Context.getTypeInfo(LastFD->getType());
+ uint64_t TypeSizeLastFD = FieldInfo.first;
+ unsigned FieldAlignLastFD = FieldInfo.second;
+ // This check is needed for 'long long' in -m32 mode.
+ if (TypeSizeLastFD > FieldAlignLastFD &&
+ (Context.hasSameType(LastFD->getType(),
+ Context.UnsignedLongLongTy)
+ || Context.hasSameType(LastFD->getType(),
+ Context.LongLongTy)))
+ FieldAlignLastFD = TypeSizeLastFD;
+
+ if (TypeSizeLastFD != TypeSize) {
+ if (RemainingInAlignment &&
+ LastFD && LastFD->isBitField() &&
+ LastFD->getBitWidthValue(Context)) {
+ // If previous field was a bitfield with some remaining unfilled
+ // bits, pad the field so current field starts on its type boundary.
+ uint64_t FieldOffset =
+ getDataSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t NewSizeInBits = RemainingInAlignment + FieldOffset;
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ RemainingInAlignment = 0;
+ }
+
+ uint64_t UnpaddedFieldOffset =
+ getDataSizeInBits() - UnfilledBitsInLastByte;
+ FieldAlign = std::max(FieldAlign, FieldAlignLastFD);
+
+ // The maximum field alignment overrides the aligned attribute.
+ if (!MaxFieldAlignment.isZero()) {
+ unsigned MaxFieldAlignmentInBits =
+ Context.toBits(MaxFieldAlignment);
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+ }
+
+ uint64_t NewSizeInBits =
+ llvm::RoundUpToAlignment(UnpaddedFieldOffset, FieldAlign);
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ UnfilledBitsInLastByte = getDataSizeInBits() - NewSizeInBits;
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ }
+ if (FD->isBitField()) {
+ uint64_t FieldSize = FD->getBitWidthValue(Context);
+ assert (FieldSize > 0 && "LayoutFields - ms_struct layout");
+ if (RemainingInAlignment < FieldSize)
+ RemainingInAlignment = TypeSize - FieldSize;
+ else
+ RemainingInAlignment -= FieldSize;
+ }
+ }
+ else if (FD->isBitField()) {
+ uint64_t FieldSize = FD->getBitWidthValue(Context);
+ std::pair<uint64_t, unsigned> FieldInfo =
+ Context.getTypeInfo(FD->getType());
+ uint64_t TypeSize = FieldInfo.first;
+ RemainingInAlignment = TypeSize - FieldSize;
+ }
+ LastFD = FD;
+ }
+ else if (!Context.getTargetInfo().useBitFieldTypeAlignment() &&
+ Context.getTargetInfo().useZeroLengthBitfieldAlignment()) {
+ FieldDecl *FD = (*Field);
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ ZeroLengthBitfield = FD;
+ }
+ LayoutField(*Field);
+ }
+ if (IsMsStruct && RemainingInAlignment &&
+ LastFD && LastFD->isBitField() && LastFD->getBitWidthValue(Context)) {
+ // If we ended a bitfield before the full length of the type then
+ // pad the struct out to the full length of the last type.
+ uint64_t FieldOffset =
+ getDataSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t NewSizeInBits = RemainingInAlignment + FieldOffset;
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+ }
+}
+
+void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
+ uint64_t TypeSize,
+ bool FieldPacked,
+ const FieldDecl *D) {
+ assert(Context.getLangOpts().CPlusPlus &&
+ "Can only have wide bit-fields in C++!");
+
+ // Itanium C++ ABI 2.4:
+ // If sizeof(T)*8 < n, let T' be the largest integral POD type with
+ // sizeof(T')*8 <= n.
+
+ QualType IntegralPODTypes[] = {
+ Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy,
+ Context.UnsignedLongTy, Context.UnsignedLongLongTy
+ };
+
+ QualType Type;
+ for (unsigned I = 0, E = llvm::array_lengthof(IntegralPODTypes);
+ I != E; ++I) {
+ uint64_t Size = Context.getTypeSize(IntegralPODTypes[I]);
+
+ if (Size > FieldSize)
+ break;
+
+ Type = IntegralPODTypes[I];
+ }
+ assert(!Type.isNull() && "Did not find a type!");
+
+ CharUnits TypeAlign = Context.getTypeAlignInChars(Type);
+
+ // We're not going to use any of the unfilled bits in the last byte.
+ UnfilledBitsInLastByte = 0;
+
+ uint64_t FieldOffset;
+ uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastByte;
+
+ if (IsUnion) {
+ setDataSize(std::max(getDataSizeInBits(), FieldSize));
+ FieldOffset = 0;
+ } else {
+ // The bitfield is allocated starting at the next offset aligned
+ // appropriately for T', with length n bits.
+ FieldOffset = llvm::RoundUpToAlignment(getDataSizeInBits(),
+ Context.toBits(TypeAlign));
+
+ uint64_t NewSizeInBits = FieldOffset + FieldSize;
+
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ UnfilledBitsInLastByte = getDataSizeInBits() - NewSizeInBits;
+ }
+
+ // Place this field at the current location.
+ FieldOffsets.push_back(FieldOffset);
+
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, FieldOffset,
+ Context.toBits(TypeAlign), FieldPacked, D);
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(TypeAlign);
+}
+
+void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
+ bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
+ uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t FieldOffset = IsUnion ? 0 : UnpaddedFieldOffset;
+ uint64_t FieldSize = D->getBitWidthValue(Context);
+
+ std::pair<uint64_t, unsigned> FieldInfo = Context.getTypeInfo(D->getType());
+ uint64_t TypeSize = FieldInfo.first;
+ unsigned FieldAlign = FieldInfo.second;
+
+ // This check is needed for 'long long' in -m32 mode.
+ if (IsMsStruct && (TypeSize > FieldAlign) &&
+ (Context.hasSameType(D->getType(),
+ Context.UnsignedLongLongTy)
+ || Context.hasSameType(D->getType(), Context.LongLongTy)))
+ FieldAlign = TypeSize;
+
+ if (ZeroLengthBitfield) {
+ std::pair<uint64_t, unsigned> FieldInfo;
+ unsigned ZeroLengthBitfieldAlignment;
+ if (IsMsStruct) {
+ // If a zero-length bitfield is inserted after a bitfield,
+ // and the alignment of the zero-length bitfield is
+ // greater than the member that follows it, `bar', `bar'
+ // will be aligned as the type of the zero-length bitfield.
+ if (ZeroLengthBitfield != D) {
+ FieldInfo = Context.getTypeInfo(ZeroLengthBitfield->getType());
+ ZeroLengthBitfieldAlignment = FieldInfo.second;
+ // Ignore alignment of subsequent zero-length bitfields.
+ if ((ZeroLengthBitfieldAlignment > FieldAlign) || (FieldSize == 0))
+ FieldAlign = ZeroLengthBitfieldAlignment;
+ if (FieldSize)
+ ZeroLengthBitfield = 0;
+ }
+ } else {
+ // The alignment of a zero-length bitfield affects the alignment
+ // of the next member. The alignment is the max of the zero
+ // length bitfield's alignment and a target specific fixed value.
+ unsigned ZeroLengthBitfieldBoundary =
+ Context.getTargetInfo().getZeroLengthBitfieldBoundary();
+ if (ZeroLengthBitfieldBoundary > FieldAlign)
+ FieldAlign = ZeroLengthBitfieldBoundary;
+ }
+ }
+
+ if (FieldSize > TypeSize) {
+ LayoutWideBitField(FieldSize, TypeSize, FieldPacked, D);
+ return;
+ }
+
+ // The align if the field is not packed. This is to check if the attribute
+ // was unnecessary (-Wpacked).
+ unsigned UnpackedFieldAlign = FieldAlign;
+ uint64_t UnpackedFieldOffset = FieldOffset;
+ if (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield)
+ UnpackedFieldAlign = 1;
+
+ if (FieldPacked ||
+ (!Context.getTargetInfo().useBitFieldTypeAlignment() && !ZeroLengthBitfield))
+ FieldAlign = 1;
+ FieldAlign = std::max(FieldAlign, D->getMaxAlignment());
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, D->getMaxAlignment());
+
+ // The maximum field alignment overrides the aligned attribute.
+ if (!MaxFieldAlignment.isZero() && FieldSize != 0) {
+ unsigned MaxFieldAlignmentInBits = Context.toBits(MaxFieldAlignment);
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignmentInBits);
+ UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignmentInBits);
+ }
+
+ // Check if we need to add padding to give the field the correct alignment.
+ if (FieldSize == 0 ||
+ (MaxFieldAlignment.isZero() &&
+ (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize))
+ FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
+
+ if (FieldSize == 0 ||
+ (MaxFieldAlignment.isZero() &&
+ (UnpackedFieldOffset & (UnpackedFieldAlign-1)) + FieldSize > TypeSize))
+ UnpackedFieldOffset = llvm::RoundUpToAlignment(UnpackedFieldOffset,
+ UnpackedFieldAlign);
+
+ // Padding members don't affect overall alignment, unless zero length bitfield
+ // alignment is enabled.
+ if (!D->getIdentifier() && !Context.getTargetInfo().useZeroLengthBitfieldAlignment())
+ FieldAlign = UnpackedFieldAlign = 1;
+
+ if (!IsMsStruct)
+ ZeroLengthBitfield = 0;
+
+ if (ExternalLayout)
+ FieldOffset = updateExternalFieldOffset(D, FieldOffset);
+
+ // Place this field at the current location.
+ FieldOffsets.push_back(FieldOffset);
+
+ if (!ExternalLayout)
+ CheckFieldPadding(FieldOffset, UnpaddedFieldOffset, UnpackedFieldOffset,
+ UnpackedFieldAlign, FieldPacked, D);
+
+ // Update DataSize to include the last byte containing (part of) the bitfield.
+ if (IsUnion) {
+ // FIXME: I think FieldSize should be TypeSize here.
+ setDataSize(std::max(getDataSizeInBits(), FieldSize));
+ } else {
+ uint64_t NewSizeInBits = FieldOffset + FieldSize;
+
+ setDataSize(llvm::RoundUpToAlignment(NewSizeInBits,
+ Context.getTargetInfo().getCharAlign()));
+ UnfilledBitsInLastByte = getDataSizeInBits() - NewSizeInBits;
+ }
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(Context.toCharUnitsFromBits(FieldAlign),
+ Context.toCharUnitsFromBits(UnpackedFieldAlign));
+}
+
+void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
+ if (D->isBitField()) {
+ LayoutBitField(D);
+ return;
+ }
+
+ uint64_t UnpaddedFieldOffset = getDataSizeInBits() - UnfilledBitsInLastByte;
+
+ // Reset the unfilled bits.
+ UnfilledBitsInLastByte = 0;
+
+ bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
+ CharUnits FieldOffset =
+ IsUnion ? CharUnits::Zero() : getDataSize();
+ CharUnits FieldSize;
+ CharUnits FieldAlign;
+
+ if (D->getType()->isIncompleteArrayType()) {
+ // This is a flexible array member; we can't directly
+ // query getTypeInfo about these, so we figure it out here.
+ // Flexible array members don't have any size, but they
+ // have to be aligned appropriately for their element type.
+ FieldSize = CharUnits::Zero();
+ const ArrayType* ATy = Context.getAsArrayType(D->getType());
+ FieldAlign = Context.getTypeAlignInChars(ATy->getElementType());
+ } else if (const ReferenceType *RT = D->getType()->getAs<ReferenceType>()) {
+ unsigned AS = RT->getPointeeType().getAddressSpace();
+ FieldSize =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(AS));
+ FieldAlign =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerAlign(AS));
+ } else {
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(D->getType());
+ FieldSize = FieldInfo.first;
+ FieldAlign = FieldInfo.second;
+
+ if (ZeroLengthBitfield) {
+ CharUnits ZeroLengthBitfieldBoundary =
+ Context.toCharUnitsFromBits(
+ Context.getTargetInfo().getZeroLengthBitfieldBoundary());
+ if (ZeroLengthBitfieldBoundary == CharUnits::Zero()) {
+ // If a zero-length bitfield is inserted after a bitfield,
+ // and the alignment of the zero-length bitfield is
+ // greater than the member that follows it, `bar', `bar'
+ // will be aligned as the type of the zero-length bitfield.
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(ZeroLengthBitfield->getType());
+ CharUnits ZeroLengthBitfieldAlignment = FieldInfo.second;
+ if (ZeroLengthBitfieldAlignment > FieldAlign)
+ FieldAlign = ZeroLengthBitfieldAlignment;
+ } else if (ZeroLengthBitfieldBoundary > FieldAlign) {
+ // Align 'bar' based on a fixed alignment specified by the target.
+ assert(Context.getTargetInfo().useZeroLengthBitfieldAlignment() &&
+ "ZeroLengthBitfieldBoundary should only be used in conjunction"
+ " with useZeroLengthBitfieldAlignment.");
+ FieldAlign = ZeroLengthBitfieldBoundary;
+ }
+ ZeroLengthBitfield = 0;
+ }
+
+ if (Context.getLangOpts().MSBitfields || IsMsStruct) {
+ // If MS bitfield layout is required, figure out what type is being
+ // laid out and align the field to the width of that type.
+
+ // Resolve all typedefs down to their base type and round up the field
+ // alignment if necessary.
+ QualType T = Context.getBaseElementType(D->getType());
+ if (const BuiltinType *BTy = T->getAs<BuiltinType>()) {
+ CharUnits TypeSize = Context.getTypeSizeInChars(BTy);
+ if (TypeSize > FieldAlign)
+ FieldAlign = TypeSize;
+ }
+ }
+ }
+
+ // The align if the field is not packed. This is to check if the attribute
+ // was unnecessary (-Wpacked).
+ CharUnits UnpackedFieldAlign = FieldAlign;
+ CharUnits UnpackedFieldOffset = FieldOffset;
+
+ if (FieldPacked)
+ FieldAlign = CharUnits::One();
+ CharUnits MaxAlignmentInChars =
+ Context.toCharUnitsFromBits(D->getMaxAlignment());
+ FieldAlign = std::max(FieldAlign, MaxAlignmentInChars);
+ UnpackedFieldAlign = std::max(UnpackedFieldAlign, MaxAlignmentInChars);
+
+ // The maximum field alignment overrides the aligned attribute.
+ if (!MaxFieldAlignment.isZero()) {
+ FieldAlign = std::min(FieldAlign, MaxFieldAlignment);
+ UnpackedFieldAlign = std::min(UnpackedFieldAlign, MaxFieldAlignment);
+ }
+
+ // Round up the current record size to the field's alignment boundary.
+ FieldOffset = FieldOffset.RoundUpToAlignment(FieldAlign);
+ UnpackedFieldOffset =
+ UnpackedFieldOffset.RoundUpToAlignment(UnpackedFieldAlign);
+
+ if (ExternalLayout) {
+ FieldOffset = Context.toCharUnitsFromBits(
+ updateExternalFieldOffset(D, Context.toBits(FieldOffset)));
+
+ if (!IsUnion && EmptySubobjects) {
+ // Record the fact that we're placing a field at this offset.
+ bool Allowed = EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset);
+ (void)Allowed;
+ assert(Allowed && "Externally-placed field cannot be placed here");
+ }
+ } else {
+ if (!IsUnion && EmptySubobjects) {
+ // Check if we can place the field at this offset.
+ while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
+ // We couldn't place the field at the offset. Try again at a new offset.
+ FieldOffset += FieldAlign;
+ }
+ }
+ }
+
+ // Place this field at the current location.
+ FieldOffsets.push_back(Context.toBits(FieldOffset));
+
+ if (!ExternalLayout)
+ CheckFieldPadding(Context.toBits(FieldOffset), UnpaddedFieldOffset,
+ Context.toBits(UnpackedFieldOffset),
+ Context.toBits(UnpackedFieldAlign), FieldPacked, D);
+
+ // Reserve space for this field.
+ uint64_t FieldSizeInBits = Context.toBits(FieldSize);
+ if (IsUnion)
+ setDataSize(std::max(getDataSizeInBits(), FieldSizeInBits));
+ else
+ setDataSize(FieldOffset + FieldSize);
+
+ // Update the size.
+ setSize(std::max(getSizeInBits(), getDataSizeInBits()));
+
+ // Remember max struct/class alignment.
+ UpdateAlignment(FieldAlign, UnpackedFieldAlign);
+}
+
+void RecordLayoutBuilder::FinishLayout(const NamedDecl *D) {
+ if (ExternalLayout) {
+ setSize(ExternalSize);
+ return;
+ }
+
+ // In C++, records cannot be of size 0.
+ if (Context.getLangOpts().CPlusPlus && getSizeInBits() == 0) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ // Compatibility with gcc requires a class (pod or non-pod)
+ // which is not empty but of size 0; such as having fields of
+ // array of zero-length, remains of Size 0
+ if (RD->isEmpty())
+ setSize(CharUnits::One());
+ }
+ else
+ setSize(CharUnits::One());
+ }
+
+ // MSVC doesn't round up to the alignment of the record with virtual bases.
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ if (isMicrosoftCXXABI() && RD->getNumVBases())
+ return;
+ }
+
+ // Finally, round the size of the record up to the alignment of the
+ // record itself.
+ uint64_t UnpaddedSize = getSizeInBits() - UnfilledBitsInLastByte;
+ uint64_t UnpackedSizeInBits =
+ llvm::RoundUpToAlignment(getSizeInBits(),
+ Context.toBits(UnpackedAlignment));
+ CharUnits UnpackedSize = Context.toCharUnitsFromBits(UnpackedSizeInBits);
+ setSize(llvm::RoundUpToAlignment(getSizeInBits(), Context.toBits(Alignment)));
+
+ unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
+ // Warn if padding was introduced to the struct/class/union.
+ if (getSizeInBits() > UnpaddedSize) {
+ unsigned PadSize = getSizeInBits() - UnpaddedSize;
+ bool InBits = true;
+ if (PadSize % CharBitNum == 0) {
+ PadSize = PadSize / CharBitNum;
+ InBits = false;
+ }
+ Diag(RD->getLocation(), diag::warn_padded_struct_size)
+ << Context.getTypeDeclType(RD)
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
+ }
+
+ // Warn if we packed it unnecessarily. If the alignment is 1 byte don't
+ // bother since there won't be alignment issues.
+ if (Packed && UnpackedAlignment > CharUnits::One() &&
+ getSize() == UnpackedSize)
+ Diag(D->getLocation(), diag::warn_unnecessary_packed)
+ << Context.getTypeDeclType(RD);
+ }
+}
+
+void RecordLayoutBuilder::UpdateAlignment(CharUnits NewAlignment,
+ CharUnits UnpackedNewAlignment) {
+ // The alignment is not modified when using 'mac68k' alignment or when
+ // we have an externally-supplied layout that also provides overall alignment.
+ if (IsMac68kAlign || (ExternalLayout && !InferAlignment))
+ return;
+
+ if (NewAlignment > Alignment) {
+ assert(llvm::isPowerOf2_32(NewAlignment.getQuantity() &&
+ "Alignment not a power of 2"));
+ Alignment = NewAlignment;
+ }
+
+ if (UnpackedNewAlignment > UnpackedAlignment) {
+ assert(llvm::isPowerOf2_32(UnpackedNewAlignment.getQuantity() &&
+ "Alignment not a power of 2"));
+ UnpackedAlignment = UnpackedNewAlignment;
+ }
+}
+
+uint64_t
+RecordLayoutBuilder::updateExternalFieldOffset(const FieldDecl *Field,
+ uint64_t ComputedOffset) {
+ assert(ExternalFieldOffsets.find(Field) != ExternalFieldOffsets.end() &&
+ "Field does not have an external offset");
+
+ uint64_t ExternalFieldOffset = ExternalFieldOffsets[Field];
+
+ if (InferAlignment && ExternalFieldOffset < ComputedOffset) {
+ // The externally-supplied field offset is before the field offset we
+ // computed. Assume that the structure is packed.
+ Alignment = CharUnits::fromQuantity(1);
+ InferAlignment = false;
+ }
+
+ // Use the externally-supplied field offset.
+ return ExternalFieldOffset;
+}
+
+void RecordLayoutBuilder::CheckFieldPadding(uint64_t Offset,
+ uint64_t UnpaddedOffset,
+ uint64_t UnpackedOffset,
+ unsigned UnpackedAlign,
+ bool isPacked,
+ const FieldDecl *D) {
+ // We let objc ivars without warning, objc interfaces generally are not used
+ // for padding tricks.
+ if (isa<ObjCIvarDecl>(D))
+ return;
+
+ // Don't warn about structs created without a SourceLocation. This can
+ // be done by clients of the AST, such as codegen.
+ if (D->getLocation().isInvalid())
+ return;
+
+ unsigned CharBitNum = Context.getTargetInfo().getCharWidth();
+
+ // Warn if padding was introduced to the struct/class.
+ if (!IsUnion && Offset > UnpaddedOffset) {
+ unsigned PadSize = Offset - UnpaddedOffset;
+ bool InBits = true;
+ if (PadSize % CharBitNum == 0) {
+ PadSize = PadSize / CharBitNum;
+ InBits = false;
+ }
+ if (D->getIdentifier())
+ Diag(D->getLocation(), diag::warn_padded_struct_field)
+ << (D->getParent()->isStruct() ? 0 : 1) // struct|class
+ << Context.getTypeDeclType(D->getParent())
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1) // plural or not
+ << D->getIdentifier();
+ else
+ Diag(D->getLocation(), diag::warn_padded_struct_anon_field)
+ << (D->getParent()->isStruct() ? 0 : 1) // struct|class
+ << Context.getTypeDeclType(D->getParent())
+ << PadSize
+ << (InBits ? 1 : 0) /*(byte|bit)*/ << (PadSize > 1); // plural or not
+ }
+
+ // Warn if we packed it unnecessarily. If the alignment is 1 byte don't
+ // bother since there won't be alignment issues.
+ if (isPacked && UnpackedAlign > CharBitNum && Offset == UnpackedOffset)
+ Diag(D->getLocation(), diag::warn_unnecessary_packed)
+ << D->getIdentifier();
+}
+
+const CXXMethodDecl *
+RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
+ // If a class isn't polymorphic it doesn't have a key function.
+ if (!RD->isPolymorphic())
+ return 0;
+
+ // A class that is not externally visible doesn't have a key function. (Or
+ // at least, there's no point to assigning a key function to such a class;
+ // this doesn't affect the ABI.)
+ if (RD->getLinkage() != ExternalLinkage)
+ return 0;
+
+ // Template instantiations don't have key functions,see Itanium C++ ABI 5.2.6.
+ // Same behavior as GCC.
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return 0;
+
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ if (MD->isPure())
+ continue;
+
+ // Ignore implicit member functions, they are always marked as inline, but
+ // they don't have a body until they're defined.
+ if (MD->isImplicit())
+ continue;
+
+ if (MD->isInlineSpecified())
+ continue;
+
+ if (MD->hasInlineBody())
+ continue;
+
+ // We found it.
+ return MD;
+ }
+
+ return 0;
+}
+
+DiagnosticBuilder
+RecordLayoutBuilder::Diag(SourceLocation Loc, unsigned DiagID) {
+ return Context.getDiagnostics().Report(Loc, DiagID);
+}
+
+/// getASTRecordLayout - Get or compute information about the layout of the
+/// specified record (struct/union/class), which indicates its size and field
+/// position information.
+const ASTRecordLayout &
+ASTContext::getASTRecordLayout(const RecordDecl *D) const {
+ // These asserts test different things. A record has a definition
+ // as soon as we begin to parse the definition. That definition is
+ // not a complete definition (which is what isDefinition() tests)
+ // until we *finish* parsing the definition.
+
+ if (D->hasExternalLexicalStorage() && !D->getDefinition())
+ getExternalSource()->CompleteType(const_cast<RecordDecl*>(D));
+
+ D = D->getDefinition();
+ assert(D && "Cannot get layout of forward declarations!");
+ assert(D->isCompleteDefinition() && "Cannot layout type before complete!");
+
+ // Look up this layout, if already laid out, return what we have.
+ // Note that we can't save a reference to the entry because this function
+ // is recursive.
+ const ASTRecordLayout *Entry = ASTRecordLayouts[D];
+ if (Entry) return *Entry;
+
+ const ASTRecordLayout *NewEntry;
+
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ EmptySubobjectMap EmptySubobjects(*this, RD);
+ RecordLayoutBuilder Builder(*this, &EmptySubobjects);
+ Builder.Layout(RD);
+
+ // MSVC gives the vb-table pointer an alignment equal to that of
+ // the non-virtual part of the structure. That's an inherently
+ // multi-pass operation. If our first pass doesn't give us
+ // adequate alignment, try again with the specified minimum
+ // alignment. This is *much* more maintainable than computing the
+ // alignment in advance in a separately-coded pass; it's also
+ // significantly more efficient in the common case where the
+ // vb-table doesn't need extra padding.
+ if (Builder.VBPtrOffset != CharUnits::fromQuantity(-1) &&
+ (Builder.VBPtrOffset % Builder.NonVirtualAlignment) != 0) {
+ Builder.resetWithTargetAlignment(Builder.NonVirtualAlignment);
+ Builder.Layout(RD);
+ }
+
+ // FIXME: This is not always correct. See the part about bitfields at
+ // http://www.codesourcery.com/public/cxx-abi/abi.html#POD for more info.
+ // FIXME: IsPODForThePurposeOfLayout should be stored in the record layout.
+ // This does not affect the calculations of MSVC layouts
+ bool IsPODForThePurposeOfLayout =
+ (!Builder.isMicrosoftCXXABI() && cast<CXXRecordDecl>(D)->isPOD());
+
+ // FIXME: This should be done in FinalizeLayout.
+ CharUnits DataSize =
+ IsPODForThePurposeOfLayout ? Builder.getSize() : Builder.getDataSize();
+ CharUnits NonVirtualSize =
+ IsPODForThePurposeOfLayout ? DataSize : Builder.NonVirtualSize;
+
+ NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.getSize(),
+ Builder.Alignment,
+ Builder.VFPtrOffset,
+ Builder.VBPtrOffset,
+ DataSize,
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size(),
+ NonVirtualSize,
+ Builder.NonVirtualAlignment,
+ EmptySubobjects.SizeOfLargestEmptySubobject,
+ Builder.PrimaryBase,
+ Builder.PrimaryBaseIsVirtual,
+ Builder.Bases, Builder.VBases);
+ } else {
+ RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
+ Builder.Layout(D);
+
+ NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.getSize(),
+ Builder.Alignment,
+ Builder.getSize(),
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+ }
+
+ ASTRecordLayouts[D] = NewEntry;
+
+ if (getLangOpts().DumpRecordLayouts) {
+ llvm::errs() << "\n*** Dumping AST Record Layout\n";
+ DumpRecordLayout(D, llvm::errs(), getLangOpts().DumpRecordLayoutsSimple);
+ }
+
+ return *NewEntry;
+}
+
+const CXXMethodDecl *ASTContext::getKeyFunction(const CXXRecordDecl *RD) {
+ RD = cast<CXXRecordDecl>(RD->getDefinition());
+ assert(RD && "Cannot get key function for forward declarations!");
+
+ const CXXMethodDecl *&Entry = KeyFunctions[RD];
+ if (!Entry)
+ Entry = RecordLayoutBuilder::ComputeKeyFunction(RD);
+
+ return Entry;
+}
+
+static uint64_t getFieldOffset(const ASTContext &C, const FieldDecl *FD) {
+ const ASTRecordLayout &Layout = C.getASTRecordLayout(FD->getParent());
+ return Layout.getFieldOffset(FD->getFieldIndex());
+}
+
+uint64_t ASTContext::getFieldOffset(const ValueDecl *VD) const {
+ uint64_t OffsetInBits;
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(VD)) {
+ OffsetInBits = ::getFieldOffset(*this, FD);
+ } else {
+ const IndirectFieldDecl *IFD = cast<IndirectFieldDecl>(VD);
+
+ OffsetInBits = 0;
+ for (IndirectFieldDecl::chain_iterator CI = IFD->chain_begin(),
+ CE = IFD->chain_end();
+ CI != CE; ++CI)
+ OffsetInBits += ::getFieldOffset(*this, cast<FieldDecl>(*CI));
+ }
+
+ return OffsetInBits;
+}
+
+/// getObjCLayout - Get or compute information about the layout of the
+/// given interface.
+///
+/// \param Impl - If given, also include the layout of the interface's
+/// implementation. This may differ by including synthesized ivars.
+const ASTRecordLayout &
+ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
+ const ObjCImplementationDecl *Impl) const {
+ // Retrieve the definition
+ if (D->hasExternalLexicalStorage() && !D->getDefinition())
+ getExternalSource()->CompleteType(const_cast<ObjCInterfaceDecl*>(D));
+ D = D->getDefinition();
+ assert(D && D->isThisDeclarationADefinition() && "Invalid interface decl!");
+
+ // Look up this layout, if already laid out, return what we have.
+ ObjCContainerDecl *Key =
+ Impl ? (ObjCContainerDecl*) Impl : (ObjCContainerDecl*) D;
+ if (const ASTRecordLayout *Entry = ObjCLayouts[Key])
+ return *Entry;
+
+ // Add in synthesized ivar count if laying out an implementation.
+ if (Impl) {
+ unsigned SynthCount = CountNonClassIvars(D);
+ // If there aren't any sythesized ivars then reuse the interface
+ // entry. Note we can't cache this because we simply free all
+ // entries later; however we shouldn't look up implementations
+ // frequently.
+ if (SynthCount == 0)
+ return getObjCLayout(D, 0);
+ }
+
+ RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
+ Builder.Layout(D);
+
+ const ASTRecordLayout *NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.getSize(),
+ Builder.Alignment,
+ Builder.getDataSize(),
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+
+ ObjCLayouts[Key] = NewEntry;
+
+ return *NewEntry;
+}
+
+static void PrintOffset(raw_ostream &OS,
+ CharUnits Offset, unsigned IndentLevel) {
+ OS << llvm::format("%4" PRId64 " | ", (int64_t)Offset.getQuantity());
+ OS.indent(IndentLevel * 2);
+}
+
+static void DumpCXXRecordLayout(raw_ostream &OS,
+ const CXXRecordDecl *RD, const ASTContext &C,
+ CharUnits Offset,
+ unsigned IndentLevel,
+ const char* Description,
+ bool IncludeVirtualBases) {
+ const ASTRecordLayout &Layout = C.getASTRecordLayout(RD);
+
+ PrintOffset(OS, Offset, IndentLevel);
+ OS << C.getTypeDeclType(const_cast<CXXRecordDecl *>(RD)).getAsString();
+ if (Description)
+ OS << ' ' << Description;
+ if (RD->isEmpty())
+ OS << " (empty)";
+ OS << '\n';
+
+ IndentLevel++;
+
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+ bool HasVfptr = Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1);
+ bool HasVbptr = Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1);
+
+ // Vtable pointer.
+ if (RD->isDynamicClass() && !PrimaryBase &&
+ C.getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
+ PrintOffset(OS, Offset, IndentLevel);
+ OS << '(' << *RD << " vtable pointer)\n";
+ }
+
+ // Dump (non-virtual) bases
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->getType()->isDependentType() &&
+ "Cannot layout class with dependent bases.");
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffset = Offset + Layout.getBaseClassOffset(Base);
+
+ DumpCXXRecordLayout(OS, Base, C, BaseOffset, IndentLevel,
+ Base == PrimaryBase ? "(primary base)" : "(base)",
+ /*IncludeVirtualBases=*/false);
+ }
+
+ // vfptr and vbptr (for Microsoft C++ ABI)
+ if (HasVfptr) {
+ PrintOffset(OS, Offset + Layout.getVFPtrOffset(), IndentLevel);
+ OS << '(' << *RD << " vftable pointer)\n";
+ }
+ if (HasVbptr) {
+ PrintOffset(OS, Offset + Layout.getVBPtrOffset(), IndentLevel);
+ OS << '(' << *RD << " vbtable pointer)\n";
+ }
+
+ // Dump fields.
+ uint64_t FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *Field = *I;
+ CharUnits FieldOffset = Offset +
+ C.toCharUnitsFromBits(Layout.getFieldOffset(FieldNo));
+
+ if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
+ if (const CXXRecordDecl *D = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ DumpCXXRecordLayout(OS, D, C, FieldOffset, IndentLevel,
+ Field->getName().data(),
+ /*IncludeVirtualBases=*/true);
+ continue;
+ }
+ }
+
+ PrintOffset(OS, FieldOffset, IndentLevel);
+ OS << Field->getType().getAsString() << ' ' << *Field << '\n';
+ }
+
+ if (!IncludeVirtualBases)
+ return;
+
+ // Dump virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ assert(I->isVirtual() && "Found non-virtual class!");
+ const CXXRecordDecl *VBase =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits VBaseOffset = Offset + Layout.getVBaseClassOffset(VBase);
+ DumpCXXRecordLayout(OS, VBase, C, VBaseOffset, IndentLevel,
+ VBase == PrimaryBase ?
+ "(primary virtual base)" : "(virtual base)",
+ /*IncludeVirtualBases=*/false);
+ }
+
+ OS << " sizeof=" << Layout.getSize().getQuantity();
+ OS << ", dsize=" << Layout.getDataSize().getQuantity();
+ OS << ", align=" << Layout.getAlignment().getQuantity() << '\n';
+ OS << " nvsize=" << Layout.getNonVirtualSize().getQuantity();
+ OS << ", nvalign=" << Layout.getNonVirtualAlign().getQuantity() << '\n';
+ OS << '\n';
+}
+
+void ASTContext::DumpRecordLayout(const RecordDecl *RD,
+ raw_ostream &OS,
+ bool Simple) const {
+ const ASTRecordLayout &Info = getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (!Simple)
+ return DumpCXXRecordLayout(OS, CXXRD, *this, CharUnits(), 0, 0,
+ /*IncludeVirtualBases=*/true);
+
+ OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
+ if (!Simple) {
+ OS << "Record: ";
+ RD->dump();
+ }
+ OS << "\nLayout: ";
+ OS << "<ASTRecordLayout\n";
+ OS << " Size:" << toBits(Info.getSize()) << "\n";
+ OS << " DataSize:" << toBits(Info.getDataSize()) << "\n";
+ OS << " Alignment:" << toBits(Info.getAlignment()) << "\n";
+ OS << " FieldOffsets: [";
+ for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i) {
+ if (i) OS << ", ";
+ OS << Info.getFieldOffset(i);
+ }
+ OS << "]>\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp b/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp
new file mode 100644
index 0000000..671207a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/SelectorLocationsKind.cpp
@@ -0,0 +1,128 @@
+//===--- SelectorLocationsKind.cpp - Kind of selector locations -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Describes whether the identifier locations for a selector are "standard"
+// or not.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/SelectorLocationsKind.h"
+#include "clang/AST/Expr.h"
+
+using namespace clang;
+
+static SourceLocation getStandardSelLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ SourceLocation ArgLoc,
+ SourceLocation EndLoc) {
+ unsigned NumSelArgs = Sel.getNumArgs();
+ if (NumSelArgs == 0) {
+ assert(Index == 0);
+ if (EndLoc.isInvalid())
+ return SourceLocation();
+ IdentifierInfo *II = Sel.getIdentifierInfoForSlot(0);
+ unsigned Len = II ? II->getLength() : 0;
+ return EndLoc.getLocWithOffset(-Len);
+ }
+
+ assert(Index < NumSelArgs);
+ if (ArgLoc.isInvalid())
+ return SourceLocation();
+ IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Index);
+ unsigned Len = /* selector id */ (II ? II->getLength() : 0) + /* ':' */ 1;
+ if (WithArgSpace)
+ ++Len;
+ return ArgLoc.getLocWithOffset(-Len);
+}
+
+namespace {
+
+template <typename T>
+SourceLocation getArgLoc(T* Arg);
+
+template <>
+SourceLocation getArgLoc<Expr>(Expr *Arg) {
+ return Arg->getLocStart();
+}
+
+template <>
+SourceLocation getArgLoc<ParmVarDecl>(ParmVarDecl *Arg) {
+ SourceLocation Loc = Arg->getLocStart();
+ if (Loc.isInvalid())
+ return Loc;
+ // -1 to point to left paren of the method parameter's type.
+ return Loc.getLocWithOffset(-1);
+}
+
+template <typename T>
+SourceLocation getArgLoc(unsigned Index, ArrayRef<T*> Args) {
+ return Index < Args.size() ? getArgLoc(Args[Index]) : SourceLocation();
+}
+
+template <typename T>
+SelectorLocationsKind hasStandardSelLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<T *> Args,
+ SourceLocation EndLoc) {
+ // Are selector locations in standard position with no space between args ?
+ unsigned i;
+ for (i = 0; i != SelLocs.size(); ++i) {
+ if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/false,
+ Args, EndLoc))
+ break;
+ }
+ if (i == SelLocs.size())
+ return SelLoc_StandardNoSpace;
+
+ // Are selector locations in standard position with space between args ?
+ for (i = 0; i != SelLocs.size(); ++i) {
+ if (SelLocs[i] != getStandardSelectorLoc(i, Sel, /*WithArgSpace=*/true,
+ Args, EndLoc))
+ return SelLoc_NonStandard;
+ }
+
+ return SelLoc_StandardWithSpace;
+}
+
+} // anonymous namespace
+
+SelectorLocationsKind
+clang::hasStandardSelectorLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<Expr *> Args,
+ SourceLocation EndLoc) {
+ return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc);
+}
+
+SourceLocation clang::getStandardSelectorLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ ArrayRef<Expr *> Args,
+ SourceLocation EndLoc) {
+ return getStandardSelLoc(Index, Sel, WithArgSpace,
+ getArgLoc(Index, Args), EndLoc);
+}
+
+SelectorLocationsKind
+clang::hasStandardSelectorLocs(Selector Sel,
+ ArrayRef<SourceLocation> SelLocs,
+ ArrayRef<ParmVarDecl *> Args,
+ SourceLocation EndLoc) {
+ return hasStandardSelLocs(Sel, SelLocs, Args, EndLoc);
+}
+
+SourceLocation clang::getStandardSelectorLoc(unsigned Index,
+ Selector Sel,
+ bool WithArgSpace,
+ ArrayRef<ParmVarDecl *> Args,
+ SourceLocation EndLoc) {
+ return getStandardSelLoc(Index, Sel, WithArgSpace,
+ getArgLoc(Index, Args), EndLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Stmt.cpp b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
new file mode 100644
index 0000000..6af20df
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Stmt.cpp
@@ -0,0 +1,865 @@
+//===--- Stmt.cpp - Statement AST Node Implementation ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt class and statement subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Stmt.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+static struct StmtClassNameTable {
+ const char *Name;
+ unsigned Counter;
+ unsigned Size;
+} StmtClassInfo[Stmt::lastStmtConstant+1];
+
+static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
+ static bool Initialized = false;
+ if (Initialized)
+ return StmtClassInfo[E];
+
+ // Intialize the table on the first use.
+ Initialized = true;
+#define ABSTRACT_STMT(STMT)
+#define STMT(CLASS, PARENT) \
+ StmtClassInfo[(unsigned)Stmt::CLASS##Class].Name = #CLASS; \
+ StmtClassInfo[(unsigned)Stmt::CLASS##Class].Size = sizeof(CLASS);
+#include "clang/AST/StmtNodes.inc"
+
+ return StmtClassInfo[E];
+}
+
+const char *Stmt::getStmtClassName() const {
+ return getStmtInfoTableEntry((StmtClass) StmtBits.sClass).Name;
+}
+
+void Stmt::PrintStats() {
+ // Ensure the table is primed.
+ getStmtInfoTableEntry(Stmt::NullStmtClass);
+
+ unsigned sum = 0;
+ llvm::errs() << "\n*** Stmt/Expr Stats:\n";
+ for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
+ if (StmtClassInfo[i].Name == 0) continue;
+ sum += StmtClassInfo[i].Counter;
+ }
+ llvm::errs() << " " << sum << " stmts/exprs total.\n";
+ sum = 0;
+ for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
+ if (StmtClassInfo[i].Name == 0) continue;
+ if (StmtClassInfo[i].Counter == 0) continue;
+ llvm::errs() << " " << StmtClassInfo[i].Counter << " "
+ << StmtClassInfo[i].Name << ", " << StmtClassInfo[i].Size
+ << " each (" << StmtClassInfo[i].Counter*StmtClassInfo[i].Size
+ << " bytes)\n";
+ sum += StmtClassInfo[i].Counter*StmtClassInfo[i].Size;
+ }
+
+ llvm::errs() << "Total bytes = " << sum << "\n";
+}
+
+void Stmt::addStmtClass(StmtClass s) {
+ ++getStmtInfoTableEntry(s).Counter;
+}
+
+bool Stmt::StatisticsEnabled = false;
+void Stmt::EnableStatistics() {
+ StatisticsEnabled = true;
+}
+
+Stmt *Stmt::IgnoreImplicit() {
+ Stmt *s = this;
+
+ if (ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(s))
+ s = ewc->getSubExpr();
+
+ while (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(s))
+ s = ice->getSubExpr();
+
+ return s;
+}
+
+/// \brief Strip off all label-like statements.
+///
+/// This will strip off label statements, case statements, and default
+/// statements recursively.
+const Stmt *Stmt::stripLabelLikeStatements() const {
+ const Stmt *S = this;
+ while (true) {
+ if (const LabelStmt *LS = dyn_cast<LabelStmt>(S))
+ S = LS->getSubStmt();
+ else if (const SwitchCase *SC = dyn_cast<SwitchCase>(S))
+ S = SC->getSubStmt();
+ else
+ return S;
+ }
+}
+
+namespace {
+ struct good {};
+ struct bad {};
+
+ // These silly little functions have to be static inline to suppress
+ // unused warnings, and they have to be defined to suppress other
+ // warnings.
+ static inline good is_good(good) { return good(); }
+
+ typedef Stmt::child_range children_t();
+ template <class T> good implements_children(children_t T::*) {
+ return good();
+ }
+ static inline bad implements_children(children_t Stmt::*) {
+ return bad();
+ }
+
+ typedef SourceRange getSourceRange_t() const;
+ template <class T> good implements_getSourceRange(getSourceRange_t T::*) {
+ return good();
+ }
+ static inline bad implements_getSourceRange(getSourceRange_t Stmt::*) {
+ return bad();
+ }
+
+#define ASSERT_IMPLEMENTS_children(type) \
+ (void) sizeof(is_good(implements_children(&type::children)))
+#define ASSERT_IMPLEMENTS_getSourceRange(type) \
+ (void) sizeof(is_good(implements_getSourceRange(&type::getSourceRange)))
+}
+
+/// Check whether the various Stmt classes implement their member
+/// functions.
+static inline void check_implementations() {
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ ASSERT_IMPLEMENTS_children(type); \
+ ASSERT_IMPLEMENTS_getSourceRange(type);
+#include "clang/AST/StmtNodes.inc"
+}
+
+Stmt::child_range Stmt::children() {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<type*>(this)->children();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind!");
+}
+
+SourceRange Stmt::getSourceRange() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return static_cast<const type*>(this)->getSourceRange();
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind!");
+}
+
+// Amusing macro metaprogramming hack: check whether a class provides
+// a more specific implementation of getLocStart() and getLocEnd().
+//
+// See also Expr.cpp:getExprLoc().
+namespace {
+ /// This implementation is used when a class provides a custom
+ /// implementation of getLocStart.
+ template <class S, class T>
+ SourceLocation getLocStartImpl(const Stmt *stmt,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const S*>(stmt)->getLocStart();
+ }
+
+ /// This implementation is used when a class doesn't provide a custom
+ /// implementation of getLocStart. Overload resolution should pick it over
+ /// the implementation above because it's more specialized according to
+ /// function template partial ordering.
+ template <class S>
+ SourceLocation getLocStartImpl(const Stmt *stmt,
+ SourceLocation (Stmt::*v)() const) {
+ return static_cast<const S*>(stmt)->getSourceRange().getBegin();
+ }
+
+ /// This implementation is used when a class provides a custom
+ /// implementation of getLocEnd.
+ template <class S, class T>
+ SourceLocation getLocEndImpl(const Stmt *stmt,
+ SourceLocation (T::*v)() const) {
+ return static_cast<const S*>(stmt)->getLocEnd();
+ }
+
+ /// This implementation is used when a class doesn't provide a custom
+ /// implementation of getLocEnd. Overload resolution should pick it over
+ /// the implementation above because it's more specialized according to
+ /// function template partial ordering.
+ template <class S>
+ SourceLocation getLocEndImpl(const Stmt *stmt,
+ SourceLocation (Stmt::*v)() const) {
+ return static_cast<const S*>(stmt)->getSourceRange().getEnd();
+ }
+}
+
+SourceLocation Stmt::getLocStart() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return getLocStartImpl<type>(this, &type::getLocStart);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+SourceLocation Stmt::getLocEnd() const {
+ switch (getStmtClass()) {
+ case Stmt::NoStmtClass: llvm_unreachable("statement without class");
+#define ABSTRACT_STMT(type)
+#define STMT(type, base) \
+ case Stmt::type##Class: \
+ return getLocEndImpl<type>(this, &type::getLocEnd);
+#include "clang/AST/StmtNodes.inc"
+ }
+ llvm_unreachable("unknown statement kind");
+}
+
+void CompoundStmt::setStmts(ASTContext &C, Stmt **Stmts, unsigned NumStmts) {
+ if (this->Body)
+ C.Deallocate(Body);
+ this->CompoundStmtBits.NumStmts = NumStmts;
+
+ Body = new (C) Stmt*[NumStmts];
+ memcpy(Body, Stmts, sizeof(Stmt *) * NumStmts);
+}
+
+const char *LabelStmt::getName() const {
+ return getDecl()->getIdentifier()->getNameStart();
+}
+
+// This is defined here to avoid polluting Stmt.h with importing Expr.h
+SourceRange ReturnStmt::getSourceRange() const {
+ if (RetExpr)
+ return SourceRange(RetLoc, RetExpr->getLocEnd());
+ else
+ return SourceRange(RetLoc);
+}
+
+bool Stmt::hasImplicitControlFlow() const {
+ switch (StmtBits.sClass) {
+ default:
+ return false;
+
+ case CallExprClass:
+ case ConditionalOperatorClass:
+ case ChooseExprClass:
+ case StmtExprClass:
+ case DeclStmtClass:
+ return true;
+
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator* B = cast<BinaryOperator>(this);
+ if (B->isLogicalOp() || B->getOpcode() == BO_Comma)
+ return true;
+ else
+ return false;
+ }
+ }
+}
+
+Expr *AsmStmt::getOutputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i]);
+}
+
+/// getOutputConstraint - Return the constraint string for the specified
+/// output operand. All output constraints are known to be non-empty (either
+/// '=' or '+').
+StringRef AsmStmt::getOutputConstraint(unsigned i) const {
+ return getOutputConstraintLiteral(i)->getString();
+}
+
+/// getNumPlusOperands - Return the number of output operands that have a "+"
+/// constraint.
+unsigned AsmStmt::getNumPlusOperands() const {
+ unsigned Res = 0;
+ for (unsigned i = 0, e = getNumOutputs(); i != e; ++i)
+ if (isOutputPlusConstraint(i))
+ ++Res;
+ return Res;
+}
+
+Expr *AsmStmt::getInputExpr(unsigned i) {
+ return cast<Expr>(Exprs[i + NumOutputs]);
+}
+void AsmStmt::setInputExpr(unsigned i, Expr *E) {
+ Exprs[i + NumOutputs] = E;
+}
+
+
+/// getInputConstraint - Return the specified input constraint. Unlike output
+/// constraints, these can be empty.
+StringRef AsmStmt::getInputConstraint(unsigned i) const {
+ return getInputConstraintLiteral(i)->getString();
+}
+
+
+void AsmStmt::setOutputsAndInputsAndClobbers(ASTContext &C,
+ IdentifierInfo **Names,
+ StringLiteral **Constraints,
+ Stmt **Exprs,
+ unsigned NumOutputs,
+ unsigned NumInputs,
+ StringLiteral **Clobbers,
+ unsigned NumClobbers) {
+ this->NumOutputs = NumOutputs;
+ this->NumInputs = NumInputs;
+ this->NumClobbers = NumClobbers;
+
+ unsigned NumExprs = NumOutputs + NumInputs;
+
+ C.Deallocate(this->Names);
+ this->Names = new (C) IdentifierInfo*[NumExprs];
+ std::copy(Names, Names + NumExprs, this->Names);
+
+ C.Deallocate(this->Exprs);
+ this->Exprs = new (C) Stmt*[NumExprs];
+ std::copy(Exprs, Exprs + NumExprs, this->Exprs);
+
+ C.Deallocate(this->Constraints);
+ this->Constraints = new (C) StringLiteral*[NumExprs];
+ std::copy(Constraints, Constraints + NumExprs, this->Constraints);
+
+ C.Deallocate(this->Clobbers);
+ this->Clobbers = new (C) StringLiteral*[NumClobbers];
+ std::copy(Clobbers, Clobbers + NumClobbers, this->Clobbers);
+}
+
+/// getNamedOperand - Given a symbolic operand reference like %[foo],
+/// translate this into a numeric value needed to reference the same operand.
+/// This returns -1 if the operand name is invalid.
+int AsmStmt::getNamedOperand(StringRef SymbolicName) const {
+ unsigned NumPlusOperands = 0;
+
+ // Check if this is an output operand.
+ for (unsigned i = 0, e = getNumOutputs(); i != e; ++i) {
+ if (getOutputName(i) == SymbolicName)
+ return i;
+ }
+
+ for (unsigned i = 0, e = getNumInputs(); i != e; ++i)
+ if (getInputName(i) == SymbolicName)
+ return getNumOutputs() + NumPlusOperands + i;
+
+ // Not found.
+ return -1;
+}
+
+/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
+/// it into pieces. If the asm string is erroneous, emit errors and return
+/// true, otherwise return false.
+unsigned AsmStmt::AnalyzeAsmString(SmallVectorImpl<AsmStringPiece>&Pieces,
+ ASTContext &C, unsigned &DiagOffs) const {
+ StringRef Str = getAsmString()->getString();
+ const char *StrStart = Str.begin();
+ const char *StrEnd = Str.end();
+ const char *CurPtr = StrStart;
+
+ // "Simple" inline asms have no constraints or operands, just convert the asm
+ // string to escape $'s.
+ if (isSimple()) {
+ std::string Result;
+ for (; CurPtr != StrEnd; ++CurPtr) {
+ switch (*CurPtr) {
+ case '$':
+ Result += "$$";
+ break;
+ default:
+ Result += *CurPtr;
+ break;
+ }
+ }
+ Pieces.push_back(AsmStringPiece(Result));
+ return 0;
+ }
+
+ // CurStringPiece - The current string that we are building up as we scan the
+ // asm string.
+ std::string CurStringPiece;
+
+ bool HasVariants = !C.getTargetInfo().hasNoAsmVariants();
+
+ while (1) {
+ // Done with the string?
+ if (CurPtr == StrEnd) {
+ if (!CurStringPiece.empty())
+ Pieces.push_back(AsmStringPiece(CurStringPiece));
+ return 0;
+ }
+
+ char CurChar = *CurPtr++;
+ switch (CurChar) {
+ case '$': CurStringPiece += "$$"; continue;
+ case '{': CurStringPiece += (HasVariants ? "$(" : "{"); continue;
+ case '|': CurStringPiece += (HasVariants ? "$|" : "|"); continue;
+ case '}': CurStringPiece += (HasVariants ? "$)" : "}"); continue;
+ case '%':
+ break;
+ default:
+ CurStringPiece += CurChar;
+ continue;
+ }
+
+ // Escaped "%" character in asm string.
+ if (CurPtr == StrEnd) {
+ // % at end of string is invalid (no escape).
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+
+ char EscapedChar = *CurPtr++;
+ if (EscapedChar == '%') { // %% -> %
+ // Escaped percentage sign.
+ CurStringPiece += '%';
+ continue;
+ }
+
+ if (EscapedChar == '=') { // %= -> Generate an unique ID.
+ CurStringPiece += "${:uid}";
+ continue;
+ }
+
+ // Otherwise, we have an operand. If we have accumulated a string so far,
+ // add it to the Pieces list.
+ if (!CurStringPiece.empty()) {
+ Pieces.push_back(AsmStringPiece(CurStringPiece));
+ CurStringPiece.clear();
+ }
+
+ // Handle %x4 and %x[foo] by capturing x as the modifier character.
+ char Modifier = '\0';
+ if (isalpha(EscapedChar)) {
+ if (CurPtr == StrEnd) { // Premature end.
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+ Modifier = EscapedChar;
+ EscapedChar = *CurPtr++;
+ }
+
+ if (isdigit(EscapedChar)) {
+ // %n - Assembler operand n
+ unsigned N = 0;
+
+ --CurPtr;
+ while (CurPtr != StrEnd && isdigit(*CurPtr))
+ N = N*10 + ((*CurPtr++)-'0');
+
+ unsigned NumOperands =
+ getNumOutputs() + getNumPlusOperands() + getNumInputs();
+ if (N >= NumOperands) {
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_operand_number;
+ }
+
+ Pieces.push_back(AsmStringPiece(N, Modifier));
+ continue;
+ }
+
+ // Handle %[foo], a symbolic operand reference.
+ if (EscapedChar == '[') {
+ DiagOffs = CurPtr-StrStart-1;
+
+ // Find the ']'.
+ const char *NameEnd = (const char*)memchr(CurPtr, ']', StrEnd-CurPtr);
+ if (NameEnd == 0)
+ return diag::err_asm_unterminated_symbolic_operand_name;
+ if (NameEnd == CurPtr)
+ return diag::err_asm_empty_symbolic_operand_name;
+
+ StringRef SymbolicName(CurPtr, NameEnd - CurPtr);
+
+ int N = getNamedOperand(SymbolicName);
+ if (N == -1) {
+ // Verify that an operand with that name exists.
+ DiagOffs = CurPtr-StrStart;
+ return diag::err_asm_unknown_symbolic_operand_name;
+ }
+ Pieces.push_back(AsmStringPiece(N, Modifier));
+
+ CurPtr = NameEnd+1;
+ continue;
+ }
+
+ DiagOffs = CurPtr-StrStart-1;
+ return diag::err_asm_invalid_escape;
+ }
+}
+
+QualType CXXCatchStmt::getCaughtType() const {
+ if (ExceptionDecl)
+ return ExceptionDecl->getType();
+ return QualType();
+}
+
+//===----------------------------------------------------------------------===//
+// Constructors
+//===----------------------------------------------------------------------===//
+
+AsmStmt::AsmStmt(ASTContext &C, SourceLocation asmloc, bool issimple,
+ bool isvolatile, bool msasm,
+ unsigned numoutputs, unsigned numinputs,
+ IdentifierInfo **names, StringLiteral **constraints,
+ Expr **exprs, StringLiteral *asmstr, unsigned numclobbers,
+ StringLiteral **clobbers, SourceLocation rparenloc)
+ : Stmt(AsmStmtClass), AsmLoc(asmloc), RParenLoc(rparenloc), AsmStr(asmstr)
+ , IsSimple(issimple), IsVolatile(isvolatile), MSAsm(msasm)
+ , NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) {
+
+ unsigned NumExprs = NumOutputs +NumInputs;
+
+ Names = new (C) IdentifierInfo*[NumExprs];
+ std::copy(names, names + NumExprs, Names);
+
+ Exprs = new (C) Stmt*[NumExprs];
+ std::copy(exprs, exprs + NumExprs, Exprs);
+
+ Constraints = new (C) StringLiteral*[NumExprs];
+ std::copy(constraints, constraints + NumExprs, Constraints);
+
+ Clobbers = new (C) StringLiteral*[NumClobbers];
+ std::copy(clobbers, clobbers + NumClobbers, Clobbers);
+}
+
+ObjCForCollectionStmt::ObjCForCollectionStmt(Stmt *Elem, Expr *Collect,
+ Stmt *Body, SourceLocation FCL,
+ SourceLocation RPL)
+: Stmt(ObjCForCollectionStmtClass) {
+ SubExprs[ELEM] = Elem;
+ SubExprs[COLLECTION] = reinterpret_cast<Stmt*>(Collect);
+ SubExprs[BODY] = Body;
+ ForLoc = FCL;
+ RParenLoc = RPL;
+}
+
+ObjCAtTryStmt::ObjCAtTryStmt(SourceLocation atTryLoc, Stmt *atTryStmt,
+ Stmt **CatchStmts, unsigned NumCatchStmts,
+ Stmt *atFinallyStmt)
+ : Stmt(ObjCAtTryStmtClass), AtTryLoc(atTryLoc),
+ NumCatchStmts(NumCatchStmts), HasFinally(atFinallyStmt != 0)
+{
+ Stmt **Stmts = getStmts();
+ Stmts[0] = atTryStmt;
+ for (unsigned I = 0; I != NumCatchStmts; ++I)
+ Stmts[I + 1] = CatchStmts[I];
+
+ if (HasFinally)
+ Stmts[NumCatchStmts + 1] = atFinallyStmt;
+}
+
+ObjCAtTryStmt *ObjCAtTryStmt::Create(ASTContext &Context,
+ SourceLocation atTryLoc,
+ Stmt *atTryStmt,
+ Stmt **CatchStmts,
+ unsigned NumCatchStmts,
+ Stmt *atFinallyStmt) {
+ unsigned Size = sizeof(ObjCAtTryStmt) +
+ (1 + NumCatchStmts + (atFinallyStmt != 0)) * sizeof(Stmt *);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ return new (Mem) ObjCAtTryStmt(atTryLoc, atTryStmt, CatchStmts, NumCatchStmts,
+ atFinallyStmt);
+}
+
+ObjCAtTryStmt *ObjCAtTryStmt::CreateEmpty(ASTContext &Context,
+ unsigned NumCatchStmts,
+ bool HasFinally) {
+ unsigned Size = sizeof(ObjCAtTryStmt) +
+ (1 + NumCatchStmts + HasFinally) * sizeof(Stmt *);
+ void *Mem = Context.Allocate(Size, llvm::alignOf<ObjCAtTryStmt>());
+ return new (Mem) ObjCAtTryStmt(EmptyShell(), NumCatchStmts, HasFinally);
+}
+
+SourceRange ObjCAtTryStmt::getSourceRange() const {
+ SourceLocation EndLoc;
+ if (HasFinally)
+ EndLoc = getFinallyStmt()->getLocEnd();
+ else if (NumCatchStmts)
+ EndLoc = getCatchStmt(NumCatchStmts - 1)->getLocEnd();
+ else
+ EndLoc = getTryBody()->getLocEnd();
+
+ return SourceRange(AtTryLoc, EndLoc);
+}
+
+CXXTryStmt *CXXTryStmt::Create(ASTContext &C, SourceLocation tryLoc,
+ Stmt *tryBlock, Stmt **handlers,
+ unsigned numHandlers) {
+ std::size_t Size = sizeof(CXXTryStmt);
+ Size += ((numHandlers + 1) * sizeof(Stmt));
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ return new (Mem) CXXTryStmt(tryLoc, tryBlock, handlers, numHandlers);
+}
+
+CXXTryStmt *CXXTryStmt::Create(ASTContext &C, EmptyShell Empty,
+ unsigned numHandlers) {
+ std::size_t Size = sizeof(CXXTryStmt);
+ Size += ((numHandlers + 1) * sizeof(Stmt));
+
+ void *Mem = C.Allocate(Size, llvm::alignOf<CXXTryStmt>());
+ return new (Mem) CXXTryStmt(Empty, numHandlers);
+}
+
+CXXTryStmt::CXXTryStmt(SourceLocation tryLoc, Stmt *tryBlock,
+ Stmt **handlers, unsigned numHandlers)
+ : Stmt(CXXTryStmtClass), TryLoc(tryLoc), NumHandlers(numHandlers) {
+ Stmt **Stmts = reinterpret_cast<Stmt **>(this + 1);
+ Stmts[0] = tryBlock;
+ std::copy(handlers, handlers + NumHandlers, Stmts + 1);
+}
+
+CXXForRangeStmt::CXXForRangeStmt(DeclStmt *Range, DeclStmt *BeginEndStmt,
+ Expr *Cond, Expr *Inc, DeclStmt *LoopVar,
+ Stmt *Body, SourceLocation FL,
+ SourceLocation CL, SourceLocation RPL)
+ : Stmt(CXXForRangeStmtClass), ForLoc(FL), ColonLoc(CL), RParenLoc(RPL) {
+ SubExprs[RANGE] = Range;
+ SubExprs[BEGINEND] = BeginEndStmt;
+ SubExprs[COND] = reinterpret_cast<Stmt*>(Cond);
+ SubExprs[INC] = reinterpret_cast<Stmt*>(Inc);
+ SubExprs[LOOPVAR] = LoopVar;
+ SubExprs[BODY] = Body;
+}
+
+Expr *CXXForRangeStmt::getRangeInit() {
+ DeclStmt *RangeStmt = getRangeStmt();
+ VarDecl *RangeDecl = dyn_cast_or_null<VarDecl>(RangeStmt->getSingleDecl());
+ assert(RangeDecl &&& "for-range should have a single var decl");
+ return RangeDecl->getInit();
+}
+
+const Expr *CXXForRangeStmt::getRangeInit() const {
+ return const_cast<CXXForRangeStmt*>(this)->getRangeInit();
+}
+
+VarDecl *CXXForRangeStmt::getLoopVariable() {
+ Decl *LV = cast<DeclStmt>(getLoopVarStmt())->getSingleDecl();
+ assert(LV && "No loop variable in CXXForRangeStmt");
+ return cast<VarDecl>(LV);
+}
+
+const VarDecl *CXXForRangeStmt::getLoopVariable() const {
+ return const_cast<CXXForRangeStmt*>(this)->getLoopVariable();
+}
+
+IfStmt::IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+ Stmt *then, SourceLocation EL, Stmt *elsev)
+ : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL)
+{
+ setConditionVariable(C, var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[THEN] = then;
+ SubExprs[ELSE] = elsev;
+}
+
+VarDecl *IfStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void IfStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
+ Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
+ SourceLocation RP)
+ : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
+{
+ SubExprs[INIT] = Init;
+ setConditionVariable(C, condVar);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(Cond);
+ SubExprs[INC] = reinterpret_cast<Stmt*>(Inc);
+ SubExprs[BODY] = Body;
+}
+
+VarDecl *ForStmt::getConditionVariable() const {
+ if (!SubExprs[CONDVAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void ForStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[CONDVAR] = 0;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
+ : Stmt(SwitchStmtClass), FirstCase(0), AllEnumCasesCovered(0)
+{
+ setConditionVariable(C, Var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = NULL;
+}
+
+VarDecl *SwitchStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void SwitchStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+Stmt *SwitchCase::getSubStmt() {
+ if (isa<CaseStmt>(this))
+ return cast<CaseStmt>(this)->getSubStmt();
+ return cast<DefaultStmt>(this)->getSubStmt();
+}
+
+WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+ SourceLocation WL)
+ : Stmt(WhileStmtClass) {
+ setConditionVariable(C, Var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = body;
+ WhileLoc = WL;
+}
+
+VarDecl *WhileStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void WhileStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SourceRange VarRange = V->getSourceRange();
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V), VarRange.getBegin(),
+ VarRange.getEnd());
+}
+
+// IndirectGotoStmt
+LabelDecl *IndirectGotoStmt::getConstantTarget() {
+ if (AddrLabelExpr *E =
+ dyn_cast<AddrLabelExpr>(getTarget()->IgnoreParenImpCasts()))
+ return E->getLabel();
+ return 0;
+}
+
+// ReturnStmt
+const Expr* ReturnStmt::getRetValue() const {
+ return cast_or_null<Expr>(RetExpr);
+}
+Expr* ReturnStmt::getRetValue() {
+ return cast_or_null<Expr>(RetExpr);
+}
+
+SEHTryStmt::SEHTryStmt(bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler)
+ : Stmt(SEHTryStmtClass),
+ IsCXXTry(IsCXXTry),
+ TryLoc(TryLoc)
+{
+ Children[TRY] = TryBlock;
+ Children[HANDLER] = Handler;
+}
+
+SEHTryStmt* SEHTryStmt::Create(ASTContext &C,
+ bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler) {
+ return new(C) SEHTryStmt(IsCXXTry,TryLoc,TryBlock,Handler);
+}
+
+SEHExceptStmt* SEHTryStmt::getExceptHandler() const {
+ return dyn_cast<SEHExceptStmt>(getHandler());
+}
+
+SEHFinallyStmt* SEHTryStmt::getFinallyHandler() const {
+ return dyn_cast<SEHFinallyStmt>(getHandler());
+}
+
+SEHExceptStmt::SEHExceptStmt(SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block)
+ : Stmt(SEHExceptStmtClass),
+ Loc(Loc)
+{
+ Children[FILTER_EXPR] = reinterpret_cast<Stmt*>(FilterExpr);
+ Children[BLOCK] = Block;
+}
+
+SEHExceptStmt* SEHExceptStmt::Create(ASTContext &C,
+ SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block) {
+ return new(C) SEHExceptStmt(Loc,FilterExpr,Block);
+}
+
+SEHFinallyStmt::SEHFinallyStmt(SourceLocation Loc,
+ Stmt *Block)
+ : Stmt(SEHFinallyStmtClass),
+ Loc(Loc),
+ Block(Block)
+{}
+
+SEHFinallyStmt* SEHFinallyStmt::Create(ASTContext &C,
+ SourceLocation Loc,
+ Stmt *Block) {
+ return new(C)SEHFinallyStmt(Loc,Block);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
new file mode 100644
index 0000000..b5e298c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtDumper.cpp
@@ -0,0 +1,763 @@
+//===--- StmtDumper.cpp - Dumping implementation for Stmt ASTs ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::dump/Stmt::print methods, which dump out the
+// AST in a form that exposes type details and other fields.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// StmtDumper Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class StmtDumper : public StmtVisitor<StmtDumper> {
+ SourceManager *SM;
+ raw_ostream &OS;
+ unsigned IndentLevel;
+
+ /// MaxDepth - When doing a normal dump (not dumpAll) we only want to dump
+ /// the first few levels of an AST. This keeps track of how many ast levels
+ /// are left.
+ unsigned MaxDepth;
+
+ /// LastLocFilename/LastLocLine - Keep track of the last location we print
+ /// out so that we can print out deltas from then on out.
+ const char *LastLocFilename;
+ unsigned LastLocLine;
+
+ public:
+ StmtDumper(SourceManager *sm, raw_ostream &os, unsigned maxDepth)
+ : SM(sm), OS(os), IndentLevel(0-1), MaxDepth(maxDepth) {
+ LastLocFilename = "";
+ LastLocLine = ~0U;
+ }
+
+ void DumpSubTree(Stmt *S) {
+ // Prune the recursion if not using dump all.
+ if (MaxDepth == 0) return;
+
+ ++IndentLevel;
+ if (S) {
+ if (DeclStmt* DS = dyn_cast<DeclStmt>(S))
+ VisitDeclStmt(DS);
+ else {
+ Visit(S);
+
+ // Print out children.
+ Stmt::child_range CI = S->children();
+ if (CI) {
+ while (CI) {
+ OS << '\n';
+ DumpSubTree(*CI++);
+ }
+ }
+ }
+ OS << ')';
+ } else {
+ Indent();
+ OS << "<<<NULL>>>";
+ }
+ --IndentLevel;
+ }
+
+ void DumpDeclarator(Decl *D);
+
+ void Indent() const {
+ for (int i = 0, e = IndentLevel; i < e; ++i)
+ OS << " ";
+ }
+
+ void DumpType(QualType T) {
+ SplitQualType T_split = T.split();
+ OS << "'" << QualType::getAsString(T_split) << "'";
+
+ if (!T.isNull()) {
+ // If the type is sugared, also dump a (shallow) desugared type.
+ SplitQualType D_split = T.getSplitDesugaredType();
+ if (T_split != D_split)
+ OS << ":'" << QualType::getAsString(D_split) << "'";
+ }
+ }
+ void DumpDeclRef(Decl *node);
+ void DumpStmt(const Stmt *Node) {
+ Indent();
+ OS << "(" << Node->getStmtClassName()
+ << " " << (void*)Node;
+ DumpSourceRange(Node);
+ }
+ void DumpValueKind(ExprValueKind K) {
+ switch (K) {
+ case VK_RValue: break;
+ case VK_LValue: OS << " lvalue"; break;
+ case VK_XValue: OS << " xvalue"; break;
+ }
+ }
+ void DumpObjectKind(ExprObjectKind K) {
+ switch (K) {
+ case OK_Ordinary: break;
+ case OK_BitField: OS << " bitfield"; break;
+ case OK_ObjCProperty: OS << " objcproperty"; break;
+ case OK_ObjCSubscript: OS << " objcsubscript"; break;
+ case OK_VectorComponent: OS << " vectorcomponent"; break;
+ }
+ }
+ void DumpExpr(const Expr *Node) {
+ DumpStmt(Node);
+ OS << ' ';
+ DumpType(Node->getType());
+ DumpValueKind(Node->getValueKind());
+ DumpObjectKind(Node->getObjectKind());
+ }
+ void DumpSourceRange(const Stmt *Node);
+ void DumpLocation(SourceLocation Loc);
+
+ // Stmts.
+ void VisitStmt(Stmt *Node);
+ void VisitDeclStmt(DeclStmt *Node);
+ void VisitLabelStmt(LabelStmt *Node);
+ void VisitGotoStmt(GotoStmt *Node);
+
+ // Exprs
+ void VisitExpr(Expr *Node);
+ void VisitCastExpr(CastExpr *Node);
+ void VisitDeclRefExpr(DeclRefExpr *Node);
+ void VisitPredefinedExpr(PredefinedExpr *Node);
+ void VisitCharacterLiteral(CharacterLiteral *Node);
+ void VisitIntegerLiteral(IntegerLiteral *Node);
+ void VisitFloatingLiteral(FloatingLiteral *Node);
+ void VisitStringLiteral(StringLiteral *Str);
+ void VisitUnaryOperator(UnaryOperator *Node);
+ void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node);
+ void VisitMemberExpr(MemberExpr *Node);
+ void VisitExtVectorElementExpr(ExtVectorElementExpr *Node);
+ void VisitBinaryOperator(BinaryOperator *Node);
+ void VisitCompoundAssignOperator(CompoundAssignOperator *Node);
+ void VisitAddrLabelExpr(AddrLabelExpr *Node);
+ void VisitBlockExpr(BlockExpr *Node);
+ void VisitOpaqueValueExpr(OpaqueValueExpr *Node);
+
+ // C++
+ void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
+ void VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node);
+ void VisitCXXThisExpr(CXXThisExpr *Node);
+ void VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node);
+ void VisitCXXConstructExpr(CXXConstructExpr *Node);
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node);
+ void VisitExprWithCleanups(ExprWithCleanups *Node);
+ void VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node);
+ void DumpCXXTemporary(CXXTemporary *Temporary);
+
+ // ObjC
+ void VisitObjCAtCatchStmt(ObjCAtCatchStmt *Node);
+ void VisitObjCEncodeExpr(ObjCEncodeExpr *Node);
+ void VisitObjCMessageExpr(ObjCMessageExpr* Node);
+ void VisitObjCSelectorExpr(ObjCSelectorExpr *Node);
+ void VisitObjCProtocolExpr(ObjCProtocolExpr *Node);
+ void VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node);
+ void VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node);
+ void VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node);
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::DumpLocation(SourceLocation Loc) {
+ SourceLocation SpellingLoc = SM->getSpellingLoc(Loc);
+
+ // The general format we print out is filename:line:col, but we drop pieces
+ // that haven't changed since the last loc printed.
+ PresumedLoc PLoc = SM->getPresumedLoc(SpellingLoc);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid sloc>";
+ return;
+ }
+
+ if (strcmp(PLoc.getFilename(), LastLocFilename) != 0) {
+ OS << PLoc.getFilename() << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ LastLocFilename = PLoc.getFilename();
+ LastLocLine = PLoc.getLine();
+ } else if (PLoc.getLine() != LastLocLine) {
+ OS << "line" << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ LastLocLine = PLoc.getLine();
+ } else {
+ OS << "col" << ':' << PLoc.getColumn();
+ }
+}
+
+void StmtDumper::DumpSourceRange(const Stmt *Node) {
+ // Can't translate locations if a SourceManager isn't available.
+ if (SM == 0) return;
+
+ // TODO: If the parent expression is available, we can print a delta vs its
+ // location.
+ SourceRange R = Node->getSourceRange();
+
+ OS << " <";
+ DumpLocation(R.getBegin());
+ if (R.getBegin() != R.getEnd()) {
+ OS << ", ";
+ DumpLocation(R.getEnd());
+ }
+ OS << ">";
+
+ // <t2.c:123:421[blah], t2.c:412:321>
+
+}
+
+
+//===----------------------------------------------------------------------===//
+// Stmt printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitStmt(Stmt *Node) {
+ DumpStmt(Node);
+}
+
+void StmtDumper::DumpDeclarator(Decl *D) {
+ // FIXME: Need to complete/beautify this... this code simply shows the
+ // nodes are where they need to be.
+ if (TypedefDecl *localType = dyn_cast<TypedefDecl>(D)) {
+ OS << "\"typedef " << localType->getUnderlyingType().getAsString()
+ << ' ' << *localType << '"';
+ } else if (TypeAliasDecl *localType = dyn_cast<TypeAliasDecl>(D)) {
+ OS << "\"using " << *localType << " = "
+ << localType->getUnderlyingType().getAsString() << '"';
+ } else if (ValueDecl *VD = dyn_cast<ValueDecl>(D)) {
+ OS << "\"";
+ // Emit storage class for vardecls.
+ if (VarDecl *V = dyn_cast<VarDecl>(VD)) {
+ if (V->getStorageClass() != SC_None)
+ OS << VarDecl::getStorageClassSpecifierString(V->getStorageClass())
+ << " ";
+ }
+
+ std::string Name = VD->getNameAsString();
+ VD->getType().getAsStringInternal(Name,
+ PrintingPolicy(VD->getASTContext().getLangOpts()));
+ OS << Name;
+
+ // If this is a vardecl with an initializer, emit it.
+ if (VarDecl *V = dyn_cast<VarDecl>(VD)) {
+ if (V->getInit()) {
+ OS << " =\n";
+ DumpSubTree(V->getInit());
+ }
+ }
+ OS << '"';
+ } else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ // print a free standing tag decl (e.g. "struct x;").
+ const char *tagname;
+ if (const IdentifierInfo *II = TD->getIdentifier())
+ tagname = II->getNameStart();
+ else
+ tagname = "<anonymous>";
+ OS << '"' << TD->getKindName() << ' ' << tagname << ";\"";
+ // FIXME: print tag bodies.
+ } else if (UsingDirectiveDecl *UD = dyn_cast<UsingDirectiveDecl>(D)) {
+ // print using-directive decl (e.g. "using namespace x;")
+ const char *ns;
+ if (const IdentifierInfo *II = UD->getNominatedNamespace()->getIdentifier())
+ ns = II->getNameStart();
+ else
+ ns = "<anonymous>";
+ OS << '"' << UD->getDeclKindName() << ns << ";\"";
+ } else if (UsingDecl *UD = dyn_cast<UsingDecl>(D)) {
+ // print using decl (e.g. "using std::string;")
+ const char *tn = UD->isTypeName() ? "typename " : "";
+ OS << '"' << UD->getDeclKindName() << tn;
+ UD->getQualifier()->print(OS,
+ PrintingPolicy(UD->getASTContext().getLangOpts()));
+ OS << ";\"";
+ } else if (LabelDecl *LD = dyn_cast<LabelDecl>(D)) {
+ OS << "label " << *LD;
+ } else if (StaticAssertDecl *SAD = dyn_cast<StaticAssertDecl>(D)) {
+ OS << "\"static_assert(\n";
+ DumpSubTree(SAD->getAssertExpr());
+ OS << ",\n";
+ DumpSubTree(SAD->getMessage());
+ OS << ");\"";
+ } else {
+ llvm_unreachable("Unexpected decl");
+ }
+}
+
+void StmtDumper::VisitDeclStmt(DeclStmt *Node) {
+ DumpStmt(Node);
+ OS << "\n";
+ for (DeclStmt::decl_iterator DI = Node->decl_begin(), DE = Node->decl_end();
+ DI != DE; ++DI) {
+ Decl* D = *DI;
+ ++IndentLevel;
+ Indent();
+ OS << (void*) D << " ";
+ DumpDeclarator(D);
+ if (DI+1 != DE)
+ OS << "\n";
+ --IndentLevel;
+ }
+}
+
+void StmtDumper::VisitLabelStmt(LabelStmt *Node) {
+ DumpStmt(Node);
+ OS << " '" << Node->getName() << "'";
+}
+
+void StmtDumper::VisitGotoStmt(GotoStmt *Node) {
+ DumpStmt(Node);
+ OS << " '" << Node->getLabel()->getName()
+ << "':" << (void*)Node->getLabel();
+}
+
+//===----------------------------------------------------------------------===//
+// Expr printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitExpr(Expr *Node) {
+ DumpExpr(Node);
+}
+
+static void DumpBasePath(raw_ostream &OS, CastExpr *Node) {
+ if (Node->path_empty())
+ return;
+
+ OS << " (";
+ bool First = true;
+ for (CastExpr::path_iterator
+ I = Node->path_begin(), E = Node->path_end(); I != E; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ if (!First)
+ OS << " -> ";
+
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual())
+ OS << "virtual ";
+ OS << RD->getName();
+ First = false;
+ }
+
+ OS << ')';
+}
+
+void StmtDumper::VisitCastExpr(CastExpr *Node) {
+ DumpExpr(Node);
+ OS << " <" << Node->getCastKindName();
+ DumpBasePath(OS, Node);
+ OS << ">";
+}
+
+void StmtDumper::VisitDeclRefExpr(DeclRefExpr *Node) {
+ DumpExpr(Node);
+
+ OS << " ";
+ DumpDeclRef(Node->getDecl());
+ if (Node->getDecl() != Node->getFoundDecl()) {
+ OS << " (";
+ DumpDeclRef(Node->getFoundDecl());
+ OS << ")";
+ }
+}
+
+void StmtDumper::DumpDeclRef(Decl *d) {
+ OS << d->getDeclKindName() << ' ' << (void*) d;
+
+ if (NamedDecl *nd = dyn_cast<NamedDecl>(d)) {
+ OS << " '";
+ nd->getDeclName().printName(OS);
+ OS << "'";
+ }
+
+ if (ValueDecl *vd = dyn_cast<ValueDecl>(d)) {
+ OS << ' '; DumpType(vd->getType());
+ }
+}
+
+void StmtDumper::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
+ DumpExpr(Node);
+ OS << " (";
+ if (!Node->requiresADL()) OS << "no ";
+ OS << "ADL) = '" << Node->getName() << '\'';
+
+ UnresolvedLookupExpr::decls_iterator
+ I = Node->decls_begin(), E = Node->decls_end();
+ if (I == E) OS << " empty";
+ for (; I != E; ++I)
+ OS << " " << (void*) *I;
+}
+
+void StmtDumper::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ DumpExpr(Node);
+
+ OS << " " << Node->getDecl()->getDeclKindName()
+ << "Decl='" << *Node->getDecl()
+ << "' " << (void*)Node->getDecl();
+ if (Node->isFreeIvar())
+ OS << " isFreeIvar";
+}
+
+void StmtDumper::VisitPredefinedExpr(PredefinedExpr *Node) {
+ DumpExpr(Node);
+ switch (Node->getIdentType()) {
+ default: llvm_unreachable("unknown case");
+ case PredefinedExpr::Func: OS << " __func__"; break;
+ case PredefinedExpr::Function: OS << " __FUNCTION__"; break;
+ case PredefinedExpr::PrettyFunction: OS << " __PRETTY_FUNCTION__";break;
+ }
+}
+
+void StmtDumper::VisitCharacterLiteral(CharacterLiteral *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getValue();
+}
+
+void StmtDumper::VisitIntegerLiteral(IntegerLiteral *Node) {
+ DumpExpr(Node);
+
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ OS << " " << Node->getValue().toString(10, isSigned);
+}
+void StmtDumper::VisitFloatingLiteral(FloatingLiteral *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getValueAsApproximateDouble();
+}
+
+void StmtDumper::VisitStringLiteral(StringLiteral *Str) {
+ DumpExpr(Str);
+ // FIXME: this doesn't print wstrings right.
+ OS << " ";
+ switch (Str->getKind()) {
+ case StringLiteral::Ascii: break; // No prefix
+ case StringLiteral::Wide: OS << 'L'; break;
+ case StringLiteral::UTF8: OS << "u8"; break;
+ case StringLiteral::UTF16: OS << 'u'; break;
+ case StringLiteral::UTF32: OS << 'U'; break;
+ }
+ OS << '"';
+ OS.write_escaped(Str->getString());
+ OS << '"';
+}
+
+void StmtDumper::VisitUnaryOperator(UnaryOperator *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->isPostfix() ? "postfix" : "prefix")
+ << " '" << UnaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+}
+void StmtDumper::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node) {
+ DumpExpr(Node);
+ switch(Node->getKind()) {
+ case UETT_SizeOf:
+ OS << " sizeof ";
+ break;
+ case UETT_AlignOf:
+ OS << " __alignof ";
+ break;
+ case UETT_VecStep:
+ OS << " vec_step ";
+ break;
+ }
+ if (Node->isArgumentType())
+ DumpType(Node->getArgumentType());
+}
+
+void StmtDumper::VisitMemberExpr(MemberExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->isArrow() ? "->" : ".")
+ << *Node->getMemberDecl() << ' '
+ << (void*)Node->getMemberDecl();
+}
+void StmtDumper::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getAccessor().getNameStart();
+}
+void StmtDumper::VisitBinaryOperator(BinaryOperator *Node) {
+ DumpExpr(Node);
+ OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode()) << "'";
+}
+void StmtDumper::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
+ DumpExpr(Node);
+ OS << " '" << BinaryOperator::getOpcodeStr(Node->getOpcode())
+ << "' ComputeLHSTy=";
+ DumpType(Node->getComputationLHSType());
+ OS << " ComputeResultTy=";
+ DumpType(Node->getComputationResultType());
+}
+
+void StmtDumper::VisitBlockExpr(BlockExpr *Node) {
+ DumpExpr(Node);
+
+ BlockDecl *block = Node->getBlockDecl();
+ OS << " decl=" << block;
+
+ IndentLevel++;
+ if (block->capturesCXXThis()) {
+ OS << '\n'; Indent(); OS << "(capture this)";
+ }
+ for (BlockDecl::capture_iterator
+ i = block->capture_begin(), e = block->capture_end(); i != e; ++i) {
+ OS << '\n';
+ Indent();
+ OS << "(capture ";
+ if (i->isByRef()) OS << "byref ";
+ if (i->isNested()) OS << "nested ";
+ if (i->getVariable())
+ DumpDeclRef(i->getVariable());
+ if (i->hasCopyExpr()) DumpSubTree(i->getCopyExpr());
+ OS << ")";
+ }
+ IndentLevel--;
+
+ OS << '\n';
+ DumpSubTree(block->getBody());
+}
+
+void StmtDumper::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
+ DumpExpr(Node);
+
+ if (Expr *Source = Node->getSourceExpr()) {
+ OS << '\n';
+ DumpSubTree(Source);
+ }
+}
+
+// GNU extensions.
+
+void StmtDumper::VisitAddrLabelExpr(AddrLabelExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getLabel()->getName()
+ << " " << (void*)Node->getLabel();
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << Node->getCastName()
+ << "<" << Node->getTypeAsWritten().getAsString() << ">"
+ << " <" << Node->getCastKindName();
+ DumpBasePath(OS, Node);
+ OS << ">";
+}
+
+void StmtDumper::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->getValue() ? "true" : "false");
+}
+
+void StmtDumper::VisitCXXThisExpr(CXXThisExpr *Node) {
+ DumpExpr(Node);
+ OS << " this";
+}
+
+void StmtDumper::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
+ DumpExpr(Node);
+ OS << " functional cast to " << Node->getTypeAsWritten().getAsString()
+ << " <" << Node->getCastKindName() << ">";
+}
+
+void StmtDumper::VisitCXXConstructExpr(CXXConstructExpr *Node) {
+ DumpExpr(Node);
+ CXXConstructorDecl *Ctor = Node->getConstructor();
+ DumpType(Ctor->getType());
+ if (Node->isElidable())
+ OS << " elidable";
+ if (Node->requiresZeroInitialization())
+ OS << " zeroing";
+}
+
+void StmtDumper::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
+ DumpExpr(Node);
+ OS << " ";
+ DumpCXXTemporary(Node->getTemporary());
+}
+
+void StmtDumper::VisitExprWithCleanups(ExprWithCleanups *Node) {
+ DumpExpr(Node);
+ ++IndentLevel;
+ for (unsigned i = 0, e = Node->getNumObjects(); i != e; ++i) {
+ OS << "\n";
+ Indent();
+ OS << "(cleanup ";
+ DumpDeclRef(Node->getObject(i));
+ OS << ")";
+ }
+ --IndentLevel;
+}
+
+void StmtDumper::DumpCXXTemporary(CXXTemporary *Temporary) {
+ OS << "(CXXTemporary " << (void *)Temporary << ")";
+}
+
+//===----------------------------------------------------------------------===//
+// Obj-C Expressions
+//===----------------------------------------------------------------------===//
+
+void StmtDumper::VisitObjCMessageExpr(ObjCMessageExpr* Node) {
+ DumpExpr(Node);
+ OS << " selector=" << Node->getSelector().getAsString();
+ switch (Node->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ break;
+
+ case ObjCMessageExpr::Class:
+ OS << " class=";
+ DumpType(Node->getClassReceiver());
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ OS << " super (instance)";
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ OS << " super (class)";
+ break;
+ }
+}
+
+void StmtDumper::VisitObjCAtCatchStmt(ObjCAtCatchStmt *Node) {
+ DumpStmt(Node);
+ if (VarDecl *CatchParam = Node->getCatchParamDecl()) {
+ OS << " catch parm = ";
+ DumpDeclarator(CatchParam);
+ } else {
+ OS << " catch all";
+ }
+}
+
+void StmtDumper::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
+ DumpExpr(Node);
+ OS << " ";
+ DumpType(Node->getEncodedType());
+}
+
+void StmtDumper::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ DumpExpr(Node);
+
+ OS << " " << Node->getSelector().getAsString();
+}
+
+void StmtDumper::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
+ DumpExpr(Node);
+
+ OS << ' ' <<* Node->getProtocol();
+}
+
+void StmtDumper::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
+ DumpExpr(Node);
+ if (Node->isImplicitProperty()) {
+ OS << " Kind=MethodRef Getter=\"";
+ if (Node->getImplicitPropertyGetter())
+ OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+ else
+ OS << "(null)";
+
+ OS << "\" Setter=\"";
+ if (ObjCMethodDecl *Setter = Node->getImplicitPropertySetter())
+ OS << Setter->getSelector().getAsString();
+ else
+ OS << "(null)";
+ OS << "\"";
+ } else {
+ OS << " Kind=PropertyRef Property=\"" << *Node->getExplicitProperty() <<'"';
+ }
+
+ if (Node->isSuperReceiver())
+ OS << " super";
+
+ OS << " Messaging=";
+ if (Node->isMessagingGetter() && Node->isMessagingSetter())
+ OS << "Getter&Setter";
+ else if (Node->isMessagingGetter())
+ OS << "Getter";
+ else if (Node->isMessagingSetter())
+ OS << "Setter";
+}
+
+void StmtDumper::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
+ DumpExpr(Node);
+ if (Node->isArraySubscriptRefExpr())
+ OS << " Kind=ArraySubscript GetterForArray=\"";
+ else
+ OS << " Kind=DictionarySubscript GetterForDictionary=\"";
+ if (Node->getAtIndexMethodDecl())
+ OS << Node->getAtIndexMethodDecl()->getSelector().getAsString();
+ else
+ OS << "(null)";
+
+ if (Node->isArraySubscriptRefExpr())
+ OS << "\" SetterForArray=\"";
+ else
+ OS << "\" SetterForDictionary=\"";
+ if (Node->setAtIndexMethodDecl())
+ OS << Node->setAtIndexMethodDecl()->getSelector().getAsString();
+ else
+ OS << "(null)";
+}
+
+void StmtDumper::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) {
+ DumpExpr(Node);
+ OS << " " << (Node->getValue() ? "__objc_yes" : "__objc_no");
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+/// dump - This does a local dump of the specified AST fragment. It dumps the
+/// specified node and a few nodes underneath it, but not the whole subtree.
+/// This is useful in a debugger.
+void Stmt::dump(SourceManager &SM) const {
+ dump(llvm::errs(), SM);
+}
+
+void Stmt::dump(raw_ostream &OS, SourceManager &SM) const {
+ StmtDumper P(&SM, OS, 4);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ OS << "\n";
+}
+
+/// dump - This does a local dump of the specified AST fragment. It dumps the
+/// specified node and a few nodes underneath it, but not the whole subtree.
+/// This is useful in a debugger.
+void Stmt::dump() const {
+ StmtDumper P(0, llvm::errs(), 4);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ llvm::errs() << "\n";
+}
+
+/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
+void Stmt::dumpAll(SourceManager &SM) const {
+ StmtDumper P(&SM, llvm::errs(), ~0U);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ llvm::errs() << "\n";
+}
+
+/// dumpAll - This does a dump of the specified AST fragment and all subtrees.
+void Stmt::dumpAll() const {
+ StmtDumper P(0, llvm::errs(), ~0U);
+ P.DumpSubTree(const_cast<Stmt*>(this));
+ llvm::errs() << "\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp b/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp
new file mode 100644
index 0000000..9bf4aea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtIterator.cpp
@@ -0,0 +1,155 @@
+//===--- StmtIterator.cpp - Iterators for Statements ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines internal methods for StmtIterator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtIterator.h"
+#include "clang/AST/Decl.h"
+
+using namespace clang;
+
+// FIXME: Add support for dependent-sized array types in C++?
+// Does it even make sense to build a CFG for an uninstantiated template?
+static inline const VariableArrayType *FindVA(const Type* t) {
+ while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
+ if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
+ if (vat->getSizeExpr())
+ return vat;
+
+ t = vt->getElementType().getTypePtr();
+ }
+
+ return NULL;
+}
+
+void StmtIteratorBase::NextVA() {
+ assert (getVAPtr());
+
+ const VariableArrayType *p = getVAPtr();
+ p = FindVA(p->getElementType().getTypePtr());
+ setVAPtr(p);
+
+ if (p)
+ return;
+
+ if (inDecl()) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(decl))
+ if (VD->Init)
+ return;
+
+ NextDecl();
+ }
+ else if (inDeclGroup()) {
+ if (VarDecl* VD = dyn_cast<VarDecl>(*DGI))
+ if (VD->Init)
+ return;
+
+ NextDecl();
+ }
+ else {
+ assert (inSizeOfTypeVA());
+ assert(!decl);
+ RawVAPtr = 0;
+ }
+}
+
+void StmtIteratorBase::NextDecl(bool ImmediateAdvance) {
+ assert (getVAPtr() == NULL);
+
+ if (inDecl()) {
+ assert(decl);
+
+ // FIXME: SIMPLIFY AWAY.
+ if (ImmediateAdvance)
+ decl = 0;
+ else if (HandleDecl(decl))
+ return;
+ }
+ else {
+ assert(inDeclGroup());
+
+ if (ImmediateAdvance)
+ ++DGI;
+
+ for ( ; DGI != DGE; ++DGI)
+ if (HandleDecl(*DGI))
+ return;
+ }
+
+ RawVAPtr = 0;
+}
+
+bool StmtIteratorBase::HandleDecl(Decl* D) {
+
+ if (VarDecl* VD = dyn_cast<VarDecl>(D)) {
+ if (const VariableArrayType* VAPtr = FindVA(VD->getType().getTypePtr())) {
+ setVAPtr(VAPtr);
+ return true;
+ }
+
+ if (VD->getInit())
+ return true;
+ }
+ else if (TypedefNameDecl* TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (const VariableArrayType* VAPtr =
+ FindVA(TD->getUnderlyingType().getTypePtr())) {
+ setVAPtr(VAPtr);
+ return true;
+ }
+ }
+ else if (EnumConstantDecl* ECD = dyn_cast<EnumConstantDecl>(D)) {
+ if (ECD->getInitExpr())
+ return true;
+ }
+
+ return false;
+}
+
+StmtIteratorBase::StmtIteratorBase(Decl *d, Stmt **s)
+ : stmt(s), decl(d), RawVAPtr(d ? DeclMode : 0) {
+ if (decl)
+ NextDecl(false);
+}
+
+StmtIteratorBase::StmtIteratorBase(Decl** dgi, Decl** dge)
+ : stmt(0), DGI(dgi), RawVAPtr(DeclGroupMode), DGE(dge) {
+ NextDecl(false);
+}
+
+StmtIteratorBase::StmtIteratorBase(const VariableArrayType* t)
+ : stmt(0), decl(0), RawVAPtr(SizeOfTypeVAMode) {
+ RawVAPtr |= reinterpret_cast<uintptr_t>(t);
+}
+
+Stmt*& StmtIteratorBase::GetDeclExpr() const {
+
+ if (const VariableArrayType* VAPtr = getVAPtr()) {
+ assert (VAPtr->SizeExpr);
+ return const_cast<Stmt*&>(VAPtr->SizeExpr);
+ }
+
+ assert (inDecl() || inDeclGroup());
+
+ if (inDeclGroup()) {
+ VarDecl* VD = cast<VarDecl>(*DGI);
+ return *VD->getInitAddress();
+ }
+
+ assert (inDecl());
+
+ if (VarDecl* VD = dyn_cast<VarDecl>(decl)) {
+ assert (VD->Init);
+ return *VD->getInitAddress();
+ }
+
+ EnumConstantDecl* ECD = cast<EnumConstantDecl>(decl);
+ return ECD->Init;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
new file mode 100644
index 0000000..3a44183
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtPrinter.cpp
@@ -0,0 +1,1885 @@
+//===--- StmtPrinter.cpp - Printing implementation for Stmt ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::dumpPretty/Stmt::printPretty methods, which
+// pretty print the AST back out to C code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// StmtPrinter Visitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class StmtPrinter : public StmtVisitor<StmtPrinter> {
+ raw_ostream &OS;
+ ASTContext &Context;
+ unsigned IndentLevel;
+ clang::PrinterHelper* Helper;
+ PrintingPolicy Policy;
+
+ public:
+ StmtPrinter(raw_ostream &os, ASTContext &C, PrinterHelper* helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation = 0)
+ : OS(os), Context(C), IndentLevel(Indentation), Helper(helper),
+ Policy(Policy) {}
+
+ void PrintStmt(Stmt *S) {
+ PrintStmt(S, Policy.Indentation);
+ }
+
+ void PrintStmt(Stmt *S, int SubIndent) {
+ IndentLevel += SubIndent;
+ if (S && isa<Expr>(S)) {
+ // If this is an expr used in a stmt context, indent and newline it.
+ Indent();
+ Visit(S);
+ OS << ";\n";
+ } else if (S) {
+ Visit(S);
+ } else {
+ Indent() << "<<<NULL STATEMENT>>>\n";
+ }
+ IndentLevel -= SubIndent;
+ }
+
+ void PrintRawCompoundStmt(CompoundStmt *S);
+ void PrintRawDecl(Decl *D);
+ void PrintRawDeclStmt(DeclStmt *S);
+ void PrintRawIfStmt(IfStmt *If);
+ void PrintRawCXXCatchStmt(CXXCatchStmt *Catch);
+ void PrintCallArgs(CallExpr *E);
+ void PrintRawSEHExceptHandler(SEHExceptStmt *S);
+ void PrintRawSEHFinallyStmt(SEHFinallyStmt *S);
+
+ void PrintExpr(Expr *E) {
+ if (E)
+ Visit(E);
+ else
+ OS << "<null expr>";
+ }
+
+ raw_ostream &Indent(int Delta = 0) {
+ for (int i = 0, e = IndentLevel+Delta; i < e; ++i)
+ OS << " ";
+ return OS;
+ }
+
+ void Visit(Stmt* S) {
+ if (Helper && Helper->handledStmt(S,OS))
+ return;
+ else StmtVisitor<StmtPrinter>::Visit(S);
+ }
+
+ void VisitStmt(Stmt *Node) LLVM_ATTRIBUTE_UNUSED {
+ Indent() << "<<unknown stmt type>>\n";
+ }
+ void VisitExpr(Expr *Node) LLVM_ATTRIBUTE_UNUSED {
+ OS << "<<unknown expr type>>";
+ }
+ void VisitCXXNamedCastExpr(CXXNamedCastExpr *Node);
+
+#define ABSTRACT_STMT(CLASS)
+#define STMT(CLASS, PARENT) \
+ void Visit##CLASS(CLASS *Node);
+#include "clang/AST/StmtNodes.inc"
+ };
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt printing methods.
+//===----------------------------------------------------------------------===//
+
+/// PrintRawCompoundStmt - Print a compound stmt without indenting the {, and
+/// with no newline after the }.
+void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
+ OS << "{\n";
+ for (CompoundStmt::body_iterator I = Node->body_begin(), E = Node->body_end();
+ I != E; ++I)
+ PrintStmt(*I);
+
+ Indent() << "}";
+}
+
+void StmtPrinter::PrintRawDecl(Decl *D) {
+ D->print(OS, Policy, IndentLevel);
+}
+
+void StmtPrinter::PrintRawDeclStmt(DeclStmt *S) {
+ DeclStmt::decl_iterator Begin = S->decl_begin(), End = S->decl_end();
+ SmallVector<Decl*, 2> Decls;
+ for ( ; Begin != End; ++Begin)
+ Decls.push_back(*Begin);
+
+ Decl::printGroup(Decls.data(), Decls.size(), OS, Policy, IndentLevel);
+}
+
+void StmtPrinter::VisitNullStmt(NullStmt *Node) {
+ Indent() << ";\n";
+}
+
+void StmtPrinter::VisitDeclStmt(DeclStmt *Node) {
+ Indent();
+ PrintRawDeclStmt(Node);
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitCompoundStmt(CompoundStmt *Node) {
+ Indent();
+ PrintRawCompoundStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitCaseStmt(CaseStmt *Node) {
+ Indent(-1) << "case ";
+ PrintExpr(Node->getLHS());
+ if (Node->getRHS()) {
+ OS << " ... ";
+ PrintExpr(Node->getRHS());
+ }
+ OS << ":\n";
+
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitDefaultStmt(DefaultStmt *Node) {
+ Indent(-1) << "default:\n";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::VisitLabelStmt(LabelStmt *Node) {
+ Indent(-1) << Node->getName() << ":\n";
+ PrintStmt(Node->getSubStmt(), 0);
+}
+
+void StmtPrinter::PrintRawIfStmt(IfStmt *If) {
+ OS << "if (";
+ PrintExpr(If->getCond());
+ OS << ')';
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(If->getThen())) {
+ OS << ' ';
+ PrintRawCompoundStmt(CS);
+ OS << (If->getElse() ? ' ' : '\n');
+ } else {
+ OS << '\n';
+ PrintStmt(If->getThen());
+ if (If->getElse()) Indent();
+ }
+
+ if (Stmt *Else = If->getElse()) {
+ OS << "else";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Else)) {
+ OS << ' ';
+ PrintRawCompoundStmt(CS);
+ OS << '\n';
+ } else if (IfStmt *ElseIf = dyn_cast<IfStmt>(Else)) {
+ OS << ' ';
+ PrintRawIfStmt(ElseIf);
+ } else {
+ OS << '\n';
+ PrintStmt(If->getElse());
+ }
+ }
+}
+
+void StmtPrinter::VisitIfStmt(IfStmt *If) {
+ Indent();
+ PrintRawIfStmt(If);
+}
+
+void StmtPrinter::VisitSwitchStmt(SwitchStmt *Node) {
+ Indent() << "switch (";
+ PrintExpr(Node->getCond());
+ OS << ")";
+
+ // Pretty print compoundstmt bodies (very common).
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ OS << " ";
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitWhileStmt(WhileStmt *Node) {
+ Indent() << "while (";
+ PrintExpr(Node->getCond());
+ OS << ")\n";
+ PrintStmt(Node->getBody());
+}
+
+void StmtPrinter::VisitDoStmt(DoStmt *Node) {
+ Indent() << "do ";
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << " ";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ Indent();
+ }
+
+ OS << "while (";
+ PrintExpr(Node->getCond());
+ OS << ");\n";
+}
+
+void StmtPrinter::VisitForStmt(ForStmt *Node) {
+ Indent() << "for (";
+ if (Node->getInit()) {
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getInit()))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(Node->getInit()));
+ }
+ OS << ";";
+ if (Node->getCond()) {
+ OS << " ";
+ PrintExpr(Node->getCond());
+ }
+ OS << ";";
+ if (Node->getInc()) {
+ OS << " ";
+ PrintExpr(Node->getInc());
+ }
+ OS << ") ";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *Node) {
+ Indent() << "for (";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(Node->getElement()))
+ PrintRawDeclStmt(DS);
+ else
+ PrintExpr(cast<Expr>(Node->getElement()));
+ OS << " in ";
+ PrintExpr(Node->getCollection());
+ OS << ") ";
+
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Node->getBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ } else {
+ OS << "\n";
+ PrintStmt(Node->getBody());
+ }
+}
+
+void StmtPrinter::VisitCXXForRangeStmt(CXXForRangeStmt *Node) {
+ Indent() << "for (";
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressInitializers = true;
+ Node->getLoopVariable()->print(OS, SubPolicy, IndentLevel);
+ OS << " : ";
+ PrintExpr(Node->getRangeInit());
+ OS << ") {\n";
+ PrintStmt(Node->getBody());
+ Indent() << "}\n";
+}
+
+void StmtPrinter::VisitMSDependentExistsStmt(MSDependentExistsStmt *Node) {
+ Indent();
+ if (Node->isIfExists())
+ OS << "__if_exists (";
+ else
+ OS << "__if_not_exists (";
+
+ if (NestedNameSpecifier *Qualifier
+ = Node->getQualifierLoc().getNestedNameSpecifier())
+ Qualifier->print(OS, Policy);
+
+ OS << Node->getNameInfo() << ") ";
+
+ PrintRawCompoundStmt(Node->getSubStmt());
+}
+
+void StmtPrinter::VisitGotoStmt(GotoStmt *Node) {
+ Indent() << "goto " << Node->getLabel()->getName() << ";\n";
+}
+
+void StmtPrinter::VisitIndirectGotoStmt(IndirectGotoStmt *Node) {
+ Indent() << "goto *";
+ PrintExpr(Node->getTarget());
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitContinueStmt(ContinueStmt *Node) {
+ Indent() << "continue;\n";
+}
+
+void StmtPrinter::VisitBreakStmt(BreakStmt *Node) {
+ Indent() << "break;\n";
+}
+
+
+void StmtPrinter::VisitReturnStmt(ReturnStmt *Node) {
+ Indent() << "return";
+ if (Node->getRetValue()) {
+ OS << " ";
+ PrintExpr(Node->getRetValue());
+ }
+ OS << ";\n";
+}
+
+
+void StmtPrinter::VisitAsmStmt(AsmStmt *Node) {
+ Indent() << "asm ";
+
+ if (Node->isVolatile())
+ OS << "volatile ";
+
+ OS << "(";
+ VisitStringLiteral(Node->getAsmString());
+
+ // Outputs
+ if (Node->getNumOutputs() != 0 || Node->getNumInputs() != 0 ||
+ Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumOutputs(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ if (!Node->getOutputName(i).empty()) {
+ OS << '[';
+ OS << Node->getOutputName(i);
+ OS << "] ";
+ }
+
+ VisitStringLiteral(Node->getOutputConstraintLiteral(i));
+ OS << " ";
+ Visit(Node->getOutputExpr(i));
+ }
+
+ // Inputs
+ if (Node->getNumInputs() != 0 || Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumInputs(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ if (!Node->getInputName(i).empty()) {
+ OS << '[';
+ OS << Node->getInputName(i);
+ OS << "] ";
+ }
+
+ VisitStringLiteral(Node->getInputConstraintLiteral(i));
+ OS << " ";
+ Visit(Node->getInputExpr(i));
+ }
+
+ // Clobbers
+ if (Node->getNumClobbers() != 0)
+ OS << " : ";
+
+ for (unsigned i = 0, e = Node->getNumClobbers(); i != e; ++i) {
+ if (i != 0)
+ OS << ", ";
+
+ VisitStringLiteral(Node->getClobber(i));
+ }
+
+ OS << ");\n";
+}
+
+void StmtPrinter::VisitObjCAtTryStmt(ObjCAtTryStmt *Node) {
+ Indent() << "@try";
+ if (CompoundStmt *TS = dyn_cast<CompoundStmt>(Node->getTryBody())) {
+ PrintRawCompoundStmt(TS);
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = Node->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *catchStmt = Node->getCatchStmt(I);
+ Indent() << "@catch(";
+ if (catchStmt->getCatchParamDecl()) {
+ if (Decl *DS = catchStmt->getCatchParamDecl())
+ PrintRawDecl(DS);
+ }
+ OS << ")";
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(catchStmt->getCatchBody())) {
+ PrintRawCompoundStmt(CS);
+ OS << "\n";
+ }
+ }
+
+ if (ObjCAtFinallyStmt *FS = static_cast<ObjCAtFinallyStmt *>(
+ Node->getFinallyStmt())) {
+ Indent() << "@finally";
+ PrintRawCompoundStmt(dyn_cast<CompoundStmt>(FS->getFinallyBody()));
+ OS << "\n";
+ }
+}
+
+void StmtPrinter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *Node) {
+}
+
+void StmtPrinter::VisitObjCAtCatchStmt (ObjCAtCatchStmt *Node) {
+ Indent() << "@catch (...) { /* todo */ } \n";
+}
+
+void StmtPrinter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *Node) {
+ Indent() << "@throw";
+ if (Node->getThrowExpr()) {
+ OS << " ";
+ PrintExpr(Node->getThrowExpr());
+ }
+ OS << ";\n";
+}
+
+void StmtPrinter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *Node) {
+ Indent() << "@synchronized (";
+ PrintExpr(Node->getSynchExpr());
+ OS << ")";
+ PrintRawCompoundStmt(Node->getSynchBody());
+ OS << "\n";
+}
+
+void StmtPrinter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *Node) {
+ Indent() << "@autoreleasepool";
+ PrintRawCompoundStmt(dyn_cast<CompoundStmt>(Node->getSubStmt()));
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawCXXCatchStmt(CXXCatchStmt *Node) {
+ OS << "catch (";
+ if (Decl *ExDecl = Node->getExceptionDecl())
+ PrintRawDecl(ExDecl);
+ else
+ OS << "...";
+ OS << ") ";
+ PrintRawCompoundStmt(cast<CompoundStmt>(Node->getHandlerBlock()));
+}
+
+void StmtPrinter::VisitCXXCatchStmt(CXXCatchStmt *Node) {
+ Indent();
+ PrintRawCXXCatchStmt(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitCXXTryStmt(CXXTryStmt *Node) {
+ Indent() << "try ";
+ PrintRawCompoundStmt(Node->getTryBlock());
+ for (unsigned i = 0, e = Node->getNumHandlers(); i < e; ++i) {
+ OS << " ";
+ PrintRawCXXCatchStmt(Node->getHandler(i));
+ }
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHTryStmt(SEHTryStmt *Node) {
+ Indent() << (Node->getIsCXXTry() ? "try " : "__try ");
+ PrintRawCompoundStmt(Node->getTryBlock());
+ SEHExceptStmt *E = Node->getExceptHandler();
+ SEHFinallyStmt *F = Node->getFinallyHandler();
+ if(E)
+ PrintRawSEHExceptHandler(E);
+ else {
+ assert(F && "Must have a finally block...");
+ PrintRawSEHFinallyStmt(F);
+ }
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawSEHFinallyStmt(SEHFinallyStmt *Node) {
+ OS << "__finally ";
+ PrintRawCompoundStmt(Node->getBlock());
+ OS << "\n";
+}
+
+void StmtPrinter::PrintRawSEHExceptHandler(SEHExceptStmt *Node) {
+ OS << "__except (";
+ VisitExpr(Node->getFilterExpr());
+ OS << ")\n";
+ PrintRawCompoundStmt(Node->getBlock());
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHExceptStmt(SEHExceptStmt *Node) {
+ Indent();
+ PrintRawSEHExceptHandler(Node);
+ OS << "\n";
+}
+
+void StmtPrinter::VisitSEHFinallyStmt(SEHFinallyStmt *Node) {
+ Indent();
+ PrintRawSEHFinallyStmt(Node);
+ OS << "\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Expr printing methods.
+//===----------------------------------------------------------------------===//
+
+void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+}
+
+void StmtPrinter::VisitDependentScopeDeclRefExpr(
+ DependentScopeDeclRefExpr *Node) {
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+}
+
+void StmtPrinter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *Node) {
+ if (Node->getQualifier())
+ Node->getQualifier()->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+}
+
+void StmtPrinter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ if (Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ OS << *Node->getDecl();
+}
+
+void StmtPrinter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *Node) {
+ if (Node->isSuperReceiver())
+ OS << "super.";
+ else if (Node->getBase()) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ }
+
+ if (Node->isImplicitProperty())
+ OS << Node->getImplicitPropertyGetter()->getSelector().getAsString();
+ else
+ OS << Node->getExplicitProperty()->getName();
+}
+
+void StmtPrinter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *Node) {
+
+ PrintExpr(Node->getBaseExpr());
+ OS << "[";
+ PrintExpr(Node->getKeyExpr());
+ OS << "]";
+}
+
+void StmtPrinter::VisitPredefinedExpr(PredefinedExpr *Node) {
+ switch (Node->getIdentType()) {
+ default:
+ llvm_unreachable("unknown case");
+ case PredefinedExpr::Func:
+ OS << "__func__";
+ break;
+ case PredefinedExpr::Function:
+ OS << "__FUNCTION__";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ OS << "__PRETTY_FUNCTION__";
+ break;
+ }
+}
+
+void StmtPrinter::VisitCharacterLiteral(CharacterLiteral *Node) {
+ unsigned value = Node->getValue();
+
+ switch (Node->getKind()) {
+ case CharacterLiteral::Ascii: break; // no prefix.
+ case CharacterLiteral::Wide: OS << 'L'; break;
+ case CharacterLiteral::UTF16: OS << 'u'; break;
+ case CharacterLiteral::UTF32: OS << 'U'; break;
+ }
+
+ switch (value) {
+ case '\\':
+ OS << "'\\\\'";
+ break;
+ case '\'':
+ OS << "'\\''";
+ break;
+ case '\a':
+ // TODO: K&R: the meaning of '\\a' is different in traditional C
+ OS << "'\\a'";
+ break;
+ case '\b':
+ OS << "'\\b'";
+ break;
+ // Nonstandard escape sequence.
+ /*case '\e':
+ OS << "'\\e'";
+ break;*/
+ case '\f':
+ OS << "'\\f'";
+ break;
+ case '\n':
+ OS << "'\\n'";
+ break;
+ case '\r':
+ OS << "'\\r'";
+ break;
+ case '\t':
+ OS << "'\\t'";
+ break;
+ case '\v':
+ OS << "'\\v'";
+ break;
+ default:
+ if (value < 256 && isprint(value)) {
+ OS << "'" << (char)value << "'";
+ } else if (value < 256) {
+ OS << "'\\x";
+ OS.write_hex(value) << "'";
+ } else {
+ // FIXME what to really do here?
+ OS << value;
+ }
+ }
+}
+
+void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
+ bool isSigned = Node->getType()->isSignedIntegerType();
+ OS << Node->getValue().toString(10, isSigned);
+
+ // Emit suffixes. Integer literals are always a builtin integer type.
+ switch (Node->getType()->getAs<BuiltinType>()->getKind()) {
+ default: llvm_unreachable("Unexpected type for integer literal!");
+ // FIXME: The Short and UShort cases are to handle cases where a short
+ // integeral literal is formed during template instantiation. They should
+ // be removed when template instantiation no longer needs integer literals.
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int: break; // no suffix.
+ case BuiltinType::UInt: OS << 'U'; break;
+ case BuiltinType::Long: OS << 'L'; break;
+ case BuiltinType::ULong: OS << "UL"; break;
+ case BuiltinType::LongLong: OS << "LL"; break;
+ case BuiltinType::ULongLong: OS << "ULL"; break;
+ case BuiltinType::Int128: OS << "i128"; break;
+ case BuiltinType::UInt128: OS << "Ui128"; break;
+ }
+}
+void StmtPrinter::VisitFloatingLiteral(FloatingLiteral *Node) {
+ SmallString<16> Str;
+ Node->getValue().toString(Str);
+ OS << Str;
+}
+
+void StmtPrinter::VisitImaginaryLiteral(ImaginaryLiteral *Node) {
+ PrintExpr(Node->getSubExpr());
+ OS << "i";
+}
+
+void StmtPrinter::VisitStringLiteral(StringLiteral *Str) {
+ switch (Str->getKind()) {
+ case StringLiteral::Ascii: break; // no prefix.
+ case StringLiteral::Wide: OS << 'L'; break;
+ case StringLiteral::UTF8: OS << "u8"; break;
+ case StringLiteral::UTF16: OS << 'u'; break;
+ case StringLiteral::UTF32: OS << 'U'; break;
+ }
+ OS << '"';
+ static char Hex[] = "0123456789ABCDEF";
+
+ unsigned LastSlashX = Str->getLength();
+ for (unsigned I = 0, N = Str->getLength(); I != N; ++I) {
+ switch (uint32_t Char = Str->getCodeUnit(I)) {
+ default:
+ // FIXME: Convert UTF-8 back to codepoints before rendering.
+
+ // Convert UTF-16 surrogate pairs back to codepoints before rendering.
+ // Leave invalid surrogates alone; we'll use \x for those.
+ if (Str->getKind() == StringLiteral::UTF16 && I != N - 1 &&
+ Char >= 0xd800 && Char <= 0xdbff) {
+ uint32_t Trail = Str->getCodeUnit(I + 1);
+ if (Trail >= 0xdc00 && Trail <= 0xdfff) {
+ Char = 0x10000 + ((Char - 0xd800) << 10) + (Trail - 0xdc00);
+ ++I;
+ }
+ }
+
+ if (Char > 0xff) {
+ // If this is a wide string, output characters over 0xff using \x
+ // escapes. Otherwise, this is a UTF-16 or UTF-32 string, and Char is a
+ // codepoint: use \x escapes for invalid codepoints.
+ if (Str->getKind() == StringLiteral::Wide ||
+ (Char >= 0xd800 && Char <= 0xdfff) || Char >= 0x110000) {
+ // FIXME: Is this the best way to print wchar_t?
+ OS << "\\x";
+ int Shift = 28;
+ while ((Char >> Shift) == 0)
+ Shift -= 4;
+ for (/**/; Shift >= 0; Shift -= 4)
+ OS << Hex[(Char >> Shift) & 15];
+ LastSlashX = I;
+ break;
+ }
+
+ if (Char > 0xffff)
+ OS << "\\U00"
+ << Hex[(Char >> 20) & 15]
+ << Hex[(Char >> 16) & 15];
+ else
+ OS << "\\u";
+ OS << Hex[(Char >> 12) & 15]
+ << Hex[(Char >> 8) & 15]
+ << Hex[(Char >> 4) & 15]
+ << Hex[(Char >> 0) & 15];
+ break;
+ }
+
+ // If we used \x... for the previous character, and this character is a
+ // hexadecimal digit, prevent it being slurped as part of the \x.
+ if (LastSlashX + 1 == I) {
+ switch (Char) {
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
+ OS << "\"\"";
+ }
+ }
+
+ if (Char <= 0xff && isprint(Char))
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0' + ((Char >> 6) & 7))
+ << (char)('0' + ((Char >> 3) & 7))
+ << (char)('0' + ((Char >> 0) & 7));
+ break;
+ // Handle some common non-printable cases to make dumps prettier.
+ case '\\': OS << "\\\\"; break;
+ case '"': OS << "\\\""; break;
+ case '\n': OS << "\\n"; break;
+ case '\t': OS << "\\t"; break;
+ case '\a': OS << "\\a"; break;
+ case '\b': OS << "\\b"; break;
+ }
+ }
+ OS << '"';
+}
+void StmtPrinter::VisitParenExpr(ParenExpr *Node) {
+ OS << "(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) {
+ if (!Node->isPostfix()) {
+ OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
+
+ // Print a space if this is an "identifier operator" like __real, or if
+ // it might be concatenated incorrectly like '+'.
+ switch (Node->getOpcode()) {
+ default: break;
+ case UO_Real:
+ case UO_Imag:
+ case UO_Extension:
+ OS << ' ';
+ break;
+ case UO_Plus:
+ case UO_Minus:
+ if (isa<UnaryOperator>(Node->getSubExpr()))
+ OS << ' ';
+ break;
+ }
+ }
+ PrintExpr(Node->getSubExpr());
+
+ if (Node->isPostfix())
+ OS << UnaryOperator::getOpcodeStr(Node->getOpcode());
+}
+
+void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
+ OS << "__builtin_offsetof(";
+ OS << Node->getTypeSourceInfo()->getType().getAsString(Policy) << ", ";
+ bool PrintedSomething = false;
+ for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) {
+ OffsetOfExpr::OffsetOfNode ON = Node->getComponent(i);
+ if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Array) {
+ // Array node
+ OS << "[";
+ PrintExpr(Node->getIndexExpr(ON.getArrayExprIndex()));
+ OS << "]";
+ PrintedSomething = true;
+ continue;
+ }
+
+ // Skip implicit base indirections.
+ if (ON.getKind() == OffsetOfExpr::OffsetOfNode::Base)
+ continue;
+
+ // Field or identifier node.
+ IdentifierInfo *Id = ON.getFieldName();
+ if (!Id)
+ continue;
+
+ if (PrintedSomething)
+ OS << ".";
+ else
+ PrintedSomething = true;
+ OS << Id->getName();
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *Node){
+ switch(Node->getKind()) {
+ case UETT_SizeOf:
+ OS << "sizeof";
+ break;
+ case UETT_AlignOf:
+ OS << "__alignof";
+ break;
+ case UETT_VecStep:
+ OS << "vec_step";
+ break;
+ }
+ if (Node->isArgumentType())
+ OS << "(" << Node->getArgumentType().getAsString(Policy) << ")";
+ else {
+ OS << " ";
+ PrintExpr(Node->getArgumentExpr());
+ }
+}
+
+void StmtPrinter::VisitGenericSelectionExpr(GenericSelectionExpr *Node) {
+ OS << "_Generic(";
+ PrintExpr(Node->getControllingExpr());
+ for (unsigned i = 0; i != Node->getNumAssocs(); ++i) {
+ OS << ", ";
+ QualType T = Node->getAssocType(i);
+ if (T.isNull())
+ OS << "default";
+ else
+ OS << T.getAsString(Policy);
+ OS << ": ";
+ PrintExpr(Node->getAssocExpr(i));
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitArraySubscriptExpr(ArraySubscriptExpr *Node) {
+ PrintExpr(Node->getLHS());
+ OS << "[";
+ PrintExpr(Node->getRHS());
+ OS << "]";
+}
+
+void StmtPrinter::PrintCallArgs(CallExpr *Call) {
+ for (unsigned i = 0, e = Call->getNumArgs(); i != e; ++i) {
+ if (isa<CXXDefaultArgExpr>(Call->getArg(i))) {
+ // Don't print any defaulted arguments
+ break;
+ }
+
+ if (i) OS << ", ";
+ PrintExpr(Call->getArg(i));
+ }
+}
+
+void StmtPrinter::VisitCallExpr(CallExpr *Call) {
+ PrintExpr(Call->getCallee());
+ OS << "(";
+ PrintCallArgs(Call);
+ OS << ")";
+}
+void StmtPrinter::VisitMemberExpr(MemberExpr *Node) {
+ // FIXME: Suppress printing implicit bases (like "this")
+ PrintExpr(Node->getBase());
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(Node->getMemberDecl()))
+ if (FD->isAnonymousStructOrUnion())
+ return;
+ OS << (Node->isArrow() ? "->" : ".");
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs())
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+}
+void StmtPrinter::VisitObjCIsaExpr(ObjCIsaExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->isa" : ".isa");
+}
+
+void StmtPrinter::VisitExtVectorElementExpr(ExtVectorElementExpr *Node) {
+ PrintExpr(Node->getBase());
+ OS << ".";
+ OS << Node->getAccessor().getName();
+}
+void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
+ OS << "(" << Node->getType().getAsString(Policy) << ")";
+ PrintExpr(Node->getSubExpr());
+}
+void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
+ OS << "(" << Node->getType().getAsString(Policy) << ")";
+ PrintExpr(Node->getInitializer());
+}
+void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
+ // No need to print anything, simply forward to the sub expression.
+ PrintExpr(Node->getSubExpr());
+}
+void StmtPrinter::VisitBinaryOperator(BinaryOperator *Node) {
+ PrintExpr(Node->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
+ PrintExpr(Node->getRHS());
+}
+void StmtPrinter::VisitCompoundAssignOperator(CompoundAssignOperator *Node) {
+ PrintExpr(Node->getLHS());
+ OS << " " << BinaryOperator::getOpcodeStr(Node->getOpcode()) << " ";
+ PrintExpr(Node->getRHS());
+}
+void StmtPrinter::VisitConditionalOperator(ConditionalOperator *Node) {
+ PrintExpr(Node->getCond());
+ OS << " ? ";
+ PrintExpr(Node->getLHS());
+ OS << " : ";
+ PrintExpr(Node->getRHS());
+}
+
+// GNU extensions.
+
+void
+StmtPrinter::VisitBinaryConditionalOperator(BinaryConditionalOperator *Node) {
+ PrintExpr(Node->getCommon());
+ OS << " ?: ";
+ PrintExpr(Node->getFalseExpr());
+}
+void StmtPrinter::VisitAddrLabelExpr(AddrLabelExpr *Node) {
+ OS << "&&" << Node->getLabel()->getName();
+}
+
+void StmtPrinter::VisitStmtExpr(StmtExpr *E) {
+ OS << "(";
+ PrintRawCompoundStmt(E->getSubStmt());
+ OS << ")";
+}
+
+void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) {
+ OS << "__builtin_choose_expr(";
+ PrintExpr(Node->getCond());
+ OS << ", ";
+ PrintExpr(Node->getLHS());
+ OS << ", ";
+ PrintExpr(Node->getRHS());
+ OS << ")";
+}
+
+void StmtPrinter::VisitGNUNullExpr(GNUNullExpr *) {
+ OS << "__null";
+}
+
+void StmtPrinter::VisitShuffleVectorExpr(ShuffleVectorExpr *Node) {
+ OS << "__builtin_shufflevector(";
+ for (unsigned i = 0, e = Node->getNumSubExprs(); i != e; ++i) {
+ if (i) OS << ", ";
+ PrintExpr(Node->getExpr(i));
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitInitListExpr(InitListExpr* Node) {
+ if (Node->getSyntacticForm()) {
+ Visit(Node->getSyntacticForm());
+ return;
+ }
+
+ OS << "{ ";
+ for (unsigned i = 0, e = Node->getNumInits(); i != e; ++i) {
+ if (i) OS << ", ";
+ if (Node->getInit(i))
+ PrintExpr(Node->getInit(i));
+ else
+ OS << "0";
+ }
+ OS << " }";
+}
+
+void StmtPrinter::VisitParenListExpr(ParenListExpr* Node) {
+ OS << "( ";
+ for (unsigned i = 0, e = Node->getNumExprs(); i != e; ++i) {
+ if (i) OS << ", ";
+ PrintExpr(Node->getExpr(i));
+ }
+ OS << " )";
+}
+
+void StmtPrinter::VisitDesignatedInitExpr(DesignatedInitExpr *Node) {
+ for (DesignatedInitExpr::designators_iterator D = Node->designators_begin(),
+ DEnd = Node->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ if (D->getDotLoc().isInvalid())
+ OS << D->getFieldName()->getName() << ":";
+ else
+ OS << "." << D->getFieldName()->getName();
+ } else {
+ OS << "[";
+ if (D->isArrayDesignator()) {
+ PrintExpr(Node->getArrayIndex(*D));
+ } else {
+ PrintExpr(Node->getArrayRangeStart(*D));
+ OS << " ... ";
+ PrintExpr(Node->getArrayRangeEnd(*D));
+ }
+ OS << "]";
+ }
+ }
+
+ OS << " = ";
+ PrintExpr(Node->getInit());
+}
+
+void StmtPrinter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *Node) {
+ if (Policy.LangOpts.CPlusPlus)
+ OS << "/*implicit*/" << Node->getType().getAsString(Policy) << "()";
+ else {
+ OS << "/*implicit*/(" << Node->getType().getAsString(Policy) << ")";
+ if (Node->getType()->isRecordType())
+ OS << "{}";
+ else
+ OS << 0;
+ }
+}
+
+void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
+ OS << "__builtin_va_arg(";
+ PrintExpr(Node->getSubExpr());
+ OS << ", ";
+ OS << Node->getType().getAsString(Policy);
+ OS << ")";
+}
+
+void StmtPrinter::VisitPseudoObjectExpr(PseudoObjectExpr *Node) {
+ PrintExpr(Node->getSyntacticForm());
+}
+
+void StmtPrinter::VisitAtomicExpr(AtomicExpr *Node) {
+ const char *Name = 0;
+ switch (Node->getOp()) {
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case AtomicExpr::AO ## ID: \
+ Name = #ID "("; \
+ break;
+#include "clang/Basic/Builtins.def"
+ }
+ OS << Name;
+
+ // AtomicExpr stores its subexpressions in a permuted order.
+ PrintExpr(Node->getPtr());
+ OS << ", ";
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_load &&
+ Node->getOp() != AtomicExpr::AO__atomic_load_n) {
+ PrintExpr(Node->getVal1());
+ OS << ", ";
+ }
+ if (Node->getOp() == AtomicExpr::AO__atomic_exchange ||
+ Node->isCmpXChg()) {
+ PrintExpr(Node->getVal2());
+ OS << ", ";
+ }
+ if (Node->getOp() == AtomicExpr::AO__atomic_compare_exchange ||
+ Node->getOp() == AtomicExpr::AO__atomic_compare_exchange_n) {
+ PrintExpr(Node->getWeak());
+ OS << ", ";
+ }
+ if (Node->getOp() != AtomicExpr::AO__c11_atomic_init)
+ PrintExpr(Node->getOrder());
+ if (Node->isCmpXChg()) {
+ OS << ", ";
+ PrintExpr(Node->getOrderFail());
+ }
+ OS << ")";
+}
+
+// C++
+void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
+ const char *OpStrings[NUM_OVERLOADED_OPERATORS] = {
+ "",
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ Spelling,
+#include "clang/Basic/OperatorKinds.def"
+ };
+
+ OverloadedOperatorKind Kind = Node->getOperator();
+ if (Kind == OO_PlusPlus || Kind == OO_MinusMinus) {
+ if (Node->getNumArgs() == 1) {
+ OS << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(0));
+ } else {
+ PrintExpr(Node->getArg(0));
+ OS << ' ' << OpStrings[Kind];
+ }
+ } else if (Kind == OO_Call) {
+ PrintExpr(Node->getArg(0));
+ OS << '(';
+ for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) {
+ if (ArgIdx > 1)
+ OS << ", ";
+ if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx)))
+ PrintExpr(Node->getArg(ArgIdx));
+ }
+ OS << ')';
+ } else if (Kind == OO_Subscript) {
+ PrintExpr(Node->getArg(0));
+ OS << '[';
+ PrintExpr(Node->getArg(1));
+ OS << ']';
+ } else if (Node->getNumArgs() == 1) {
+ OS << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(0));
+ } else if (Node->getNumArgs() == 2) {
+ PrintExpr(Node->getArg(0));
+ OS << ' ' << OpStrings[Kind] << ' ';
+ PrintExpr(Node->getArg(1));
+ } else {
+ llvm_unreachable("unknown overloaded operator");
+ }
+}
+
+void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
+ VisitCallExpr(cast<CallExpr>(Node));
+}
+
+void StmtPrinter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *Node) {
+ PrintExpr(Node->getCallee());
+ OS << "<<<";
+ PrintCallArgs(Node->getConfig());
+ OS << ">>>(";
+ PrintCallArgs(Node);
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
+ OS << Node->getCastName() << '<';
+ OS << Node->getTypeAsWritten().getAsString(Policy) << ">(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXStaticCastExpr(CXXStaticCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
+ VisitCXXNamedCastExpr(Node);
+}
+
+void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
+ OS << "typeid(";
+ if (Node->isTypeOperand()) {
+ OS << Node->getTypeOperand().getAsString(Policy);
+ } else {
+ PrintExpr(Node->getExprOperand());
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXUuidofExpr(CXXUuidofExpr *Node) {
+ OS << "__uuidof(";
+ if (Node->isTypeOperand()) {
+ OS << Node->getTypeOperand().getAsString(Policy);
+ } else {
+ PrintExpr(Node->getExprOperand());
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitUserDefinedLiteral(UserDefinedLiteral *Node) {
+ switch (Node->getLiteralOperatorKind()) {
+ case UserDefinedLiteral::LOK_Raw:
+ OS << cast<StringLiteral>(Node->getArg(0)->IgnoreImpCasts())->getString();
+ break;
+ case UserDefinedLiteral::LOK_Template: {
+ DeclRefExpr *DRE = cast<DeclRefExpr>(Node->getCallee()->IgnoreImpCasts());
+ const TemplateArgumentList *Args =
+ cast<FunctionDecl>(DRE->getDecl())->getTemplateSpecializationArgs();
+ assert(Args);
+ const TemplateArgument &Pack = Args->get(0);
+ for (TemplateArgument::pack_iterator I = Pack.pack_begin(),
+ E = Pack.pack_end(); I != E; ++I) {
+ char C = (char)I->getAsIntegral()->getZExtValue();
+ OS << C;
+ }
+ break;
+ }
+ case UserDefinedLiteral::LOK_Integer: {
+ // Print integer literal without suffix.
+ IntegerLiteral *Int = cast<IntegerLiteral>(Node->getCookedLiteral());
+ OS << Int->getValue().toString(10, /*isSigned*/false);
+ break;
+ }
+ case UserDefinedLiteral::LOK_Floating:
+ case UserDefinedLiteral::LOK_String:
+ case UserDefinedLiteral::LOK_Character:
+ PrintExpr(Node->getCookedLiteral());
+ break;
+ }
+ OS << Node->getUDSuffix()->getName();
+}
+
+void StmtPrinter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *Node) {
+ OS << (Node->getValue() ? "true" : "false");
+}
+
+void StmtPrinter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *Node) {
+ OS << "nullptr";
+}
+
+void StmtPrinter::VisitCXXThisExpr(CXXThisExpr *Node) {
+ OS << "this";
+}
+
+void StmtPrinter::VisitCXXThrowExpr(CXXThrowExpr *Node) {
+ if (Node->getSubExpr() == 0)
+ OS << "throw";
+ else {
+ OS << "throw ";
+ PrintExpr(Node->getSubExpr());
+ }
+}
+
+void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) {
+ // Nothing to print: we picked up the default argument
+}
+
+void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
+ OS << Node->getType().getAsString(Policy);
+ OS << "(";
+ PrintExpr(Node->getSubExpr());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
+ PrintExpr(Node->getSubExpr());
+}
+
+void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
+ OS << Node->getType().getAsString(Policy);
+ OS << "(";
+ for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(),
+ ArgEnd = Node->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ if (Arg != Node->arg_begin())
+ OS << ", ";
+ PrintExpr(*Arg);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitLambdaExpr(LambdaExpr *Node) {
+ OS << '[';
+ bool NeedComma = false;
+ switch (Node->getCaptureDefault()) {
+ case LCD_None:
+ break;
+
+ case LCD_ByCopy:
+ OS << '=';
+ NeedComma = true;
+ break;
+
+ case LCD_ByRef:
+ OS << '&';
+ NeedComma = true;
+ break;
+ }
+ for (LambdaExpr::capture_iterator C = Node->explicit_capture_begin(),
+ CEnd = Node->explicit_capture_end();
+ C != CEnd;
+ ++C) {
+ if (NeedComma)
+ OS << ", ";
+ NeedComma = true;
+
+ switch (C->getCaptureKind()) {
+ case LCK_This:
+ OS << "this";
+ break;
+
+ case LCK_ByRef:
+ if (Node->getCaptureDefault() != LCD_ByRef)
+ OS << '&';
+ OS << C->getCapturedVar()->getName();
+ break;
+
+ case LCK_ByCopy:
+ if (Node->getCaptureDefault() != LCD_ByCopy)
+ OS << '=';
+ OS << C->getCapturedVar()->getName();
+ break;
+ }
+ }
+ OS << ']';
+
+ if (Node->hasExplicitParameters()) {
+ OS << " (";
+ CXXMethodDecl *Method = Node->getCallOperator();
+ NeedComma = false;
+ for (CXXMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; ++P) {
+ if (NeedComma) {
+ OS << ", ";
+ } else {
+ NeedComma = true;
+ }
+ std::string ParamStr = (*P)->getNameAsString();
+ (*P)->getOriginalType().getAsStringInternal(ParamStr, Policy);
+ OS << ParamStr;
+ }
+ if (Method->isVariadic()) {
+ if (NeedComma)
+ OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+
+ if (Node->isMutable())
+ OS << " mutable";
+
+ const FunctionProtoType *Proto
+ = Method->getType()->getAs<FunctionProtoType>();
+ {
+ std::string ExceptionSpec;
+ Proto->printExceptionSpecification(ExceptionSpec, Policy);
+ OS << ExceptionSpec;
+ }
+
+ // FIXME: Attributes
+
+ // Print the trailing return type if it was specified in the source.
+ if (Node->hasExplicitResultType())
+ OS << " -> " << Proto->getResultType().getAsString(Policy);
+ }
+
+ // Print the body.
+ CompoundStmt *Body = Node->getBody();
+ OS << ' ';
+ PrintStmt(Body);
+}
+
+void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
+ if (TypeSourceInfo *TSInfo = Node->getTypeSourceInfo())
+ OS << TSInfo->getType().getAsString(Policy) << "()";
+ else
+ OS << Node->getType().getAsString(Policy) << "()";
+}
+
+void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->isGlobalNew())
+ OS << "::";
+ OS << "new ";
+ unsigned NumPlace = E->getNumPlacementArgs();
+ if (NumPlace > 0) {
+ OS << "(";
+ PrintExpr(E->getPlacementArg(0));
+ for (unsigned i = 1; i < NumPlace; ++i) {
+ OS << ", ";
+ PrintExpr(E->getPlacementArg(i));
+ }
+ OS << ") ";
+ }
+ if (E->isParenTypeId())
+ OS << "(";
+ std::string TypeS;
+ if (Expr *Size = E->getArraySize()) {
+ llvm::raw_string_ostream s(TypeS);
+ Size->printPretty(s, Context, Helper, Policy);
+ s.flush();
+ TypeS = "[" + TypeS + "]";
+ }
+ E->getAllocatedType().getAsStringInternal(TypeS, Policy);
+ OS << TypeS;
+ if (E->isParenTypeId())
+ OS << ")";
+
+ CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle();
+ if (InitStyle) {
+ if (InitStyle == CXXNewExpr::CallInit)
+ OS << "(";
+ PrintExpr(E->getInitializer());
+ if (InitStyle == CXXNewExpr::CallInit)
+ OS << ")";
+ }
+}
+
+void StmtPrinter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->isGlobalDelete())
+ OS << "::";
+ OS << "delete ";
+ if (E->isArrayForm())
+ OS << "[] ";
+ PrintExpr(E->getArgument());
+}
+
+void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ PrintExpr(E->getBase());
+ if (E->isArrow())
+ OS << "->";
+ else
+ OS << '.';
+ if (E->getQualifier())
+ E->getQualifier()->print(OS, Policy);
+
+ std::string TypeS;
+ if (IdentifierInfo *II = E->getDestroyedTypeIdentifier())
+ OS << II->getName();
+ else
+ E->getDestroyedType().getAsStringInternal(TypeS, Policy);
+ OS << TypeS;
+}
+
+void StmtPrinter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ if (isa<CXXDefaultArgExpr>(E->getArg(i))) {
+ // Don't print any defaulted arguments
+ break;
+ }
+
+ if (i) OS << ", ";
+ PrintExpr(E->getArg(i));
+ }
+}
+
+void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ // Just forward to the sub expression.
+ PrintExpr(E->getSubExpr());
+}
+
+void
+StmtPrinter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *Node) {
+ OS << Node->getTypeAsWritten().getAsString(Policy);
+ OS << "(";
+ for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
+ ArgEnd = Node->arg_end();
+ Arg != ArgEnd; ++Arg) {
+ if (Arg != Node->arg_begin())
+ OS << ", ";
+ PrintExpr(*Arg);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXDependentScopeMemberExpr(
+ CXXDependentScopeMemberExpr *Node) {
+ if (!Node->isImplicitAccess()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs()) {
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+ }
+}
+
+void StmtPrinter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *Node) {
+ if (!Node->isImplicitAccess()) {
+ PrintExpr(Node->getBase());
+ OS << (Node->isArrow() ? "->" : ".");
+ }
+ if (NestedNameSpecifier *Qualifier = Node->getQualifier())
+ Qualifier->print(OS, Policy);
+ if (Node->hasTemplateKeyword())
+ OS << "template ";
+ OS << Node->getMemberNameInfo();
+ if (Node->hasExplicitTemplateArgs()) {
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ Node->getTemplateArgs(),
+ Node->getNumTemplateArgs(),
+ Policy);
+ }
+}
+
+static const char *getTypeTraitName(UnaryTypeTrait UTT) {
+ switch (UTT) {
+ case UTT_HasNothrowAssign: return "__has_nothrow_assign";
+ case UTT_HasNothrowConstructor: return "__has_nothrow_constructor";
+ case UTT_HasNothrowCopy: return "__has_nothrow_copy";
+ case UTT_HasTrivialAssign: return "__has_trivial_assign";
+ case UTT_HasTrivialDefaultConstructor: return "__has_trivial_constructor";
+ case UTT_HasTrivialCopy: return "__has_trivial_copy";
+ case UTT_HasTrivialDestructor: return "__has_trivial_destructor";
+ case UTT_HasVirtualDestructor: return "__has_virtual_destructor";
+ case UTT_IsAbstract: return "__is_abstract";
+ case UTT_IsArithmetic: return "__is_arithmetic";
+ case UTT_IsArray: return "__is_array";
+ case UTT_IsClass: return "__is_class";
+ case UTT_IsCompleteType: return "__is_complete_type";
+ case UTT_IsCompound: return "__is_compound";
+ case UTT_IsConst: return "__is_const";
+ case UTT_IsEmpty: return "__is_empty";
+ case UTT_IsEnum: return "__is_enum";
+ case UTT_IsFinal: return "__is_final";
+ case UTT_IsFloatingPoint: return "__is_floating_point";
+ case UTT_IsFunction: return "__is_function";
+ case UTT_IsFundamental: return "__is_fundamental";
+ case UTT_IsIntegral: return "__is_integral";
+ case UTT_IsLiteral: return "__is_literal";
+ case UTT_IsLvalueReference: return "__is_lvalue_reference";
+ case UTT_IsMemberFunctionPointer: return "__is_member_function_pointer";
+ case UTT_IsMemberObjectPointer: return "__is_member_object_pointer";
+ case UTT_IsMemberPointer: return "__is_member_pointer";
+ case UTT_IsObject: return "__is_object";
+ case UTT_IsPOD: return "__is_pod";
+ case UTT_IsPointer: return "__is_pointer";
+ case UTT_IsPolymorphic: return "__is_polymorphic";
+ case UTT_IsReference: return "__is_reference";
+ case UTT_IsRvalueReference: return "__is_rvalue_reference";
+ case UTT_IsScalar: return "__is_scalar";
+ case UTT_IsSigned: return "__is_signed";
+ case UTT_IsStandardLayout: return "__is_standard_layout";
+ case UTT_IsTrivial: return "__is_trivial";
+ case UTT_IsTriviallyCopyable: return "__is_trivially_copyable";
+ case UTT_IsUnion: return "__is_union";
+ case UTT_IsUnsigned: return "__is_unsigned";
+ case UTT_IsVoid: return "__is_void";
+ case UTT_IsVolatile: return "__is_volatile";
+ }
+ llvm_unreachable("Type trait not covered by switch statement");
+}
+
+static const char *getTypeTraitName(BinaryTypeTrait BTT) {
+ switch (BTT) {
+ case BTT_IsBaseOf: return "__is_base_of";
+ case BTT_IsConvertible: return "__is_convertible";
+ case BTT_IsSame: return "__is_same";
+ case BTT_TypeCompatible: return "__builtin_types_compatible_p";
+ case BTT_IsConvertibleTo: return "__is_convertible_to";
+ case BTT_IsTriviallyAssignable: return "__is_trivially_assignable";
+ }
+ llvm_unreachable("Binary type trait not covered by switch");
+}
+
+static const char *getTypeTraitName(TypeTrait TT) {
+ switch (TT) {
+ case clang::TT_IsTriviallyConstructible:return "__is_trivially_constructible";
+ }
+ llvm_unreachable("Type trait not covered by switch");
+}
+
+static const char *getTypeTraitName(ArrayTypeTrait ATT) {
+ switch (ATT) {
+ case ATT_ArrayRank: return "__array_rank";
+ case ATT_ArrayExtent: return "__array_extent";
+ }
+ llvm_unreachable("Array type trait not covered by switch");
+}
+
+static const char *getExpressionTraitName(ExpressionTrait ET) {
+ switch (ET) {
+ case ET_IsLValueExpr: return "__is_lvalue_expr";
+ case ET_IsRValueExpr: return "__is_rvalue_expr";
+ }
+ llvm_unreachable("Expression type trait not covered by switch");
+}
+
+void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "("
+ << E->getQueriedType().getAsString(Policy) << ")";
+}
+
+void StmtPrinter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "("
+ << E->getLhsType().getAsString(Policy) << ","
+ << E->getRhsType().getAsString(Policy) << ")";
+}
+
+void StmtPrinter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "(";
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
+ if (I > 0)
+ OS << ", ";
+ OS << E->getArg(I)->getType().getAsString(Policy);
+ }
+ OS << ")";
+}
+
+void StmtPrinter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ OS << getTypeTraitName(E->getTrait()) << "("
+ << E->getQueriedType().getAsString(Policy) << ")";
+}
+
+void StmtPrinter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ OS << getExpressionTraitName(E->getTrait()) << "(";
+ PrintExpr(E->getQueriedExpression());
+ OS << ")";
+}
+
+void StmtPrinter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ OS << "noexcept(";
+ PrintExpr(E->getOperand());
+ OS << ")";
+}
+
+void StmtPrinter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ PrintExpr(E->getPattern());
+ OS << "...";
+}
+
+void StmtPrinter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ OS << "sizeof...(" << *E->getPack() << ")";
+}
+
+void StmtPrinter::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *Node) {
+ OS << *Node->getParameterPack();
+}
+
+void StmtPrinter::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *Node) {
+ Visit(Node->getReplacement());
+}
+
+void StmtPrinter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *Node){
+ PrintExpr(Node->GetTemporaryExpr());
+}
+
+// Obj-C
+
+void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
+ OS << "@";
+ VisitStringLiteral(Node->getString());
+}
+
+void StmtPrinter::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ OS << "@";
+ Visit(E->getNumber());
+}
+
+void StmtPrinter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ OS << "@[ ";
+ StmtRange ch = E->children();
+ if (ch.first != ch.second) {
+ while (1) {
+ Visit(*ch.first);
+ ++ch.first;
+ if (ch.first == ch.second) break;
+ OS << ", ";
+ }
+ }
+ OS << " ]";
+}
+
+void StmtPrinter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ OS << "@{ ";
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ if (I > 0)
+ OS << ", ";
+
+ ObjCDictionaryElement Element = E->getKeyValueElement(I);
+ Visit(Element.Key);
+ OS << " : ";
+ Visit(Element.Value);
+ if (Element.isPackExpansion())
+ OS << "...";
+ }
+ OS << " }";
+}
+
+void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
+ OS << "@encode(" << Node->getEncodedType().getAsString(Policy) << ')';
+}
+
+void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ OS << "@selector(" << Node->getSelector().getAsString() << ')';
+}
+
+void StmtPrinter::VisitObjCProtocolExpr(ObjCProtocolExpr *Node) {
+ OS << "@protocol(" << *Node->getProtocol() << ')';
+}
+
+void StmtPrinter::VisitObjCMessageExpr(ObjCMessageExpr *Mess) {
+ OS << "[";
+ switch (Mess->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ PrintExpr(Mess->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class:
+ OS << Mess->getClassReceiver().getAsString(Policy);
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ case ObjCMessageExpr::SuperClass:
+ OS << "Super";
+ break;
+ }
+
+ OS << ' ';
+ Selector selector = Mess->getSelector();
+ if (selector.isUnarySelector()) {
+ OS << selector.getNameForSlot(0);
+ } else {
+ for (unsigned i = 0, e = Mess->getNumArgs(); i != e; ++i) {
+ if (i < selector.getNumArgs()) {
+ if (i > 0) OS << ' ';
+ if (selector.getIdentifierInfoForSlot(i))
+ OS << selector.getIdentifierInfoForSlot(i)->getName() << ':';
+ else
+ OS << ":";
+ }
+ else OS << ", "; // Handle variadic methods.
+
+ PrintExpr(Mess->getArg(i));
+ }
+ }
+ OS << "]";
+}
+
+void StmtPrinter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Node) {
+ OS << (Node->getValue() ? "__objc_yes" : "__objc_no");
+}
+
+void
+StmtPrinter::VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ PrintExpr(E->getSubExpr());
+}
+
+void
+StmtPrinter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ OS << "(" << E->getBridgeKindName() << E->getType().getAsString(Policy)
+ << ")";
+ PrintExpr(E->getSubExpr());
+}
+
+void StmtPrinter::VisitBlockExpr(BlockExpr *Node) {
+ BlockDecl *BD = Node->getBlockDecl();
+ OS << "^";
+
+ const FunctionType *AFT = Node->getFunctionType();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ OS << "()";
+ } else if (!BD->param_empty() || cast<FunctionProtoType>(AFT)->isVariadic()) {
+ OS << '(';
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) OS << ", ";
+ ParamStr = (*AI)->getNameAsString();
+ (*AI)->getType().getAsStringInternal(ParamStr, Policy);
+ OS << ParamStr;
+ }
+
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) OS << ", ";
+ OS << "...";
+ }
+ OS << ')';
+ }
+}
+
+void StmtPrinter::VisitOpaqueValueExpr(OpaqueValueExpr *Node) {
+ PrintExpr(Node->getSourceExpr());
+}
+
+void StmtPrinter::VisitAsTypeExpr(AsTypeExpr *Node) {
+ OS << "__builtin_astype(";
+ PrintExpr(Node->getSrcExpr());
+ OS << ", " << Node->getType().getAsString();
+ OS << ")";
+}
+
+//===----------------------------------------------------------------------===//
+// Stmt method implementations
+//===----------------------------------------------------------------------===//
+
+void Stmt::dumpPretty(ASTContext& Context) const {
+ printPretty(llvm::errs(), Context, 0,
+ PrintingPolicy(Context.getLangOpts()));
+}
+
+void Stmt::printPretty(raw_ostream &OS, ASTContext& Context,
+ PrinterHelper* Helper,
+ const PrintingPolicy &Policy,
+ unsigned Indentation) const {
+ if (this == 0) {
+ OS << "<NULL>";
+ return;
+ }
+
+ if (Policy.Dump && &Context) {
+ dump(OS, Context.getSourceManager());
+ return;
+ }
+
+ StmtPrinter P(OS, Context, Helper, Policy, Indentation);
+ P.Visit(const_cast<Stmt*>(this));
+}
+
+//===----------------------------------------------------------------------===//
+// PrinterHelper
+//===----------------------------------------------------------------------===//
+
+// Implement virtual destructor.
+PrinterHelper::~PrinterHelper() {}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
new file mode 100644
index 0000000..e5526ce
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtProfile.cpp
@@ -0,0 +1,1178 @@
+//===---- StmtProfile.cpp - Profile implementation for Stmt ASTs ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Stmt::Profile method, which builds a unique bit
+// representation that identifies a statement/expression.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/FoldingSet.h"
+using namespace clang;
+
+namespace {
+ class StmtProfiler : public ConstStmtVisitor<StmtProfiler> {
+ llvm::FoldingSetNodeID &ID;
+ const ASTContext &Context;
+ bool Canonical;
+
+ public:
+ StmtProfiler(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool Canonical)
+ : ID(ID), Context(Context), Canonical(Canonical) { }
+
+ void VisitStmt(const Stmt *S);
+
+#define STMT(Node, Base) void Visit##Node(const Node *S);
+#include "clang/AST/StmtNodes.inc"
+
+ /// \brief Visit a declaration that is referenced within an expression
+ /// or statement.
+ void VisitDecl(const Decl *D);
+
+ /// \brief Visit a type that is referenced within an expression or
+ /// statement.
+ void VisitType(QualType T);
+
+ /// \brief Visit a name that occurs within an expression or statement.
+ void VisitName(DeclarationName Name);
+
+ /// \brief Visit a nested-name-specifier that occurs within an expression
+ /// or statement.
+ void VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+
+ /// \brief Visit a template name that occurs within an expression or
+ /// statement.
+ void VisitTemplateName(TemplateName Name);
+
+ /// \brief Visit template arguments that occur within an expression or
+ /// statement.
+ void VisitTemplateArguments(const TemplateArgumentLoc *Args,
+ unsigned NumArgs);
+
+ /// \brief Visit a single template argument.
+ void VisitTemplateArgument(const TemplateArgument &Arg);
+ };
+}
+
+void StmtProfiler::VisitStmt(const Stmt *S) {
+ ID.AddInteger(S->getStmtClass());
+ for (Stmt::const_child_range C = S->children(); C; ++C) {
+ if (*C)
+ Visit(*C);
+ else
+ ID.AddInteger(0);
+ }
+}
+
+void StmtProfiler::VisitDeclStmt(const DeclStmt *S) {
+ VisitStmt(S);
+ for (DeclStmt::const_decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
+ D != DEnd; ++D)
+ VisitDecl(*D);
+}
+
+void StmtProfiler::VisitNullStmt(const NullStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCompoundStmt(const CompoundStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSwitchCase(const SwitchCase *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCaseStmt(const CaseStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitDefaultStmt(const DefaultStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitLabelStmt(const LabelStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getDecl());
+}
+
+void StmtProfiler::VisitIfStmt(const IfStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitSwitchStmt(const SwitchStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitWhileStmt(const WhileStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getConditionVariable());
+}
+
+void StmtProfiler::VisitDoStmt(const DoStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitForStmt(const ForStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitGotoStmt(const GotoStmt *S) {
+ VisitStmt(S);
+ VisitDecl(S->getLabel());
+}
+
+void StmtProfiler::VisitIndirectGotoStmt(const IndirectGotoStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitContinueStmt(const ContinueStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitBreakStmt(const BreakStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitReturnStmt(const ReturnStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitAsmStmt(const AsmStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->isVolatile());
+ ID.AddBoolean(S->isSimple());
+ VisitStringLiteral(S->getAsmString());
+ ID.AddInteger(S->getNumOutputs());
+ for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
+ ID.AddString(S->getOutputName(I));
+ VisitStringLiteral(S->getOutputConstraintLiteral(I));
+ }
+ ID.AddInteger(S->getNumInputs());
+ for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
+ ID.AddString(S->getInputName(I));
+ VisitStringLiteral(S->getInputConstraintLiteral(I));
+ }
+ ID.AddInteger(S->getNumClobbers());
+ for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
+ VisitStringLiteral(S->getClobber(I));
+}
+
+void StmtProfiler::VisitCXXCatchStmt(const CXXCatchStmt *S) {
+ VisitStmt(S);
+ VisitType(S->getCaughtType());
+}
+
+void StmtProfiler::VisitCXXTryStmt(const CXXTryStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitCXXForRangeStmt(const CXXForRangeStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitMSDependentExistsStmt(const MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->isIfExists());
+ VisitNestedNameSpecifier(S->getQualifierLoc().getNestedNameSpecifier());
+ VisitName(S->getNameInfo().getName());
+}
+
+void StmtProfiler::VisitSEHTryStmt(const SEHTryStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSEHFinallyStmt(const SEHFinallyStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitSEHExceptStmt(const SEHExceptStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtCatchStmt(const ObjCAtCatchStmt *S) {
+ VisitStmt(S);
+ ID.AddBoolean(S->hasEllipsis());
+ if (S->getCatchParamDecl())
+ VisitType(S->getCatchParamDecl()->getType());
+}
+
+void StmtProfiler::VisitObjCAtFinallyStmt(const ObjCAtFinallyStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtTryStmt(const ObjCAtTryStmt *S) {
+ VisitStmt(S);
+}
+
+void
+StmtProfiler::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitObjCAtThrowStmt(const ObjCAtThrowStmt *S) {
+ VisitStmt(S);
+}
+
+void
+StmtProfiler::VisitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitExpr(const Expr *S) {
+ VisitStmt(S);
+}
+
+void StmtProfiler::VisitDeclRefExpr(const DeclRefExpr *S) {
+ VisitExpr(S);
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitDecl(S->getDecl());
+ if (!Canonical)
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitPredefinedExpr(const PredefinedExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getIdentType());
+}
+
+void StmtProfiler::VisitIntegerLiteral(const IntegerLiteral *S) {
+ VisitExpr(S);
+ S->getValue().Profile(ID);
+}
+
+void StmtProfiler::VisitCharacterLiteral(const CharacterLiteral *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getKind());
+ ID.AddInteger(S->getValue());
+}
+
+void StmtProfiler::VisitFloatingLiteral(const FloatingLiteral *S) {
+ VisitExpr(S);
+ S->getValue().Profile(ID);
+ ID.AddBoolean(S->isExact());
+}
+
+void StmtProfiler::VisitImaginaryLiteral(const ImaginaryLiteral *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitStringLiteral(const StringLiteral *S) {
+ VisitExpr(S);
+ ID.AddString(S->getBytes());
+ ID.AddInteger(S->getKind());
+}
+
+void StmtProfiler::VisitParenExpr(const ParenExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitParenListExpr(const ParenListExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitUnaryOperator(const UnaryOperator *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOpcode());
+}
+
+void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) {
+ VisitType(S->getTypeSourceInfo()->getType());
+ unsigned n = S->getNumComponents();
+ for (unsigned i = 0; i < n; ++i) {
+ const OffsetOfExpr::OffsetOfNode& ON = S->getComponent(i);
+ ID.AddInteger(ON.getKind());
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array:
+ // Expressions handled below.
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Field:
+ VisitDecl(ON.getField());
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ ID.AddPointer(ON.getFieldName());
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Base:
+ // These nodes are implicit, and therefore don't need profiling.
+ break;
+ }
+ }
+
+ VisitExpr(S);
+}
+
+void
+StmtProfiler::VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getKind());
+ if (S->isArgumentType())
+ VisitType(S->getArgumentType());
+}
+
+void StmtProfiler::VisitArraySubscriptExpr(const ArraySubscriptExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCallExpr(const CallExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitMemberExpr(const MemberExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getMemberDecl());
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
+ ID.AddBoolean(S->isArrow());
+}
+
+void StmtProfiler::VisitCompoundLiteralExpr(const CompoundLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isFileScope());
+}
+
+void StmtProfiler::VisitCastExpr(const CastExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitImplicitCastExpr(const ImplicitCastExpr *S) {
+ VisitCastExpr(S);
+ ID.AddInteger(S->getValueKind());
+}
+
+void StmtProfiler::VisitExplicitCastExpr(const ExplicitCastExpr *S) {
+ VisitCastExpr(S);
+ VisitType(S->getTypeAsWritten());
+}
+
+void StmtProfiler::VisitCStyleCastExpr(const CStyleCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void StmtProfiler::VisitBinaryOperator(const BinaryOperator *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOpcode());
+}
+
+void
+StmtProfiler::VisitCompoundAssignOperator(const CompoundAssignOperator *S) {
+ VisitBinaryOperator(S);
+}
+
+void StmtProfiler::VisitConditionalOperator(const ConditionalOperator *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitBinaryConditionalOperator(
+ const BinaryConditionalOperator *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitAddrLabelExpr(const AddrLabelExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getLabel());
+}
+
+void StmtProfiler::VisitStmtExpr(const StmtExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitShuffleVectorExpr(const ShuffleVectorExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitChooseExpr(const ChooseExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitGNUNullExpr(const GNUNullExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitVAArgExpr(const VAArgExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitInitListExpr(const InitListExpr *S) {
+ if (S->getSyntacticForm()) {
+ VisitInitListExpr(S->getSyntacticForm());
+ return;
+ }
+
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitDesignatedInitExpr(const DesignatedInitExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->usesGNUSyntax());
+ for (DesignatedInitExpr::const_designators_iterator D =
+ S->designators_begin(), DEnd = S->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ ID.AddInteger(0);
+ VisitName(D->getFieldName());
+ continue;
+ }
+
+ if (D->isArrayDesignator()) {
+ ID.AddInteger(1);
+ } else {
+ assert(D->isArrayRangeDesignator());
+ ID.AddInteger(2);
+ }
+ ID.AddInteger(D->getFirstExprIndex());
+ }
+}
+
+void StmtProfiler::VisitImplicitValueInitExpr(const ImplicitValueInitExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitExtVectorElementExpr(const ExtVectorElementExpr *S) {
+ VisitExpr(S);
+ VisitName(&S->getAccessor());
+}
+
+void StmtProfiler::VisitBlockExpr(const BlockExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getBlockDecl());
+}
+
+void StmtProfiler::VisitGenericSelectionExpr(const GenericSelectionExpr *S) {
+ VisitExpr(S);
+ for (unsigned i = 0; i != S->getNumAssocs(); ++i) {
+ QualType T = S->getAssocType(i);
+ if (T.isNull())
+ ID.AddPointer(0);
+ else
+ VisitType(T);
+ VisitExpr(S->getAssocExpr(i));
+ }
+}
+
+void StmtProfiler::VisitPseudoObjectExpr(const PseudoObjectExpr *S) {
+ VisitExpr(S);
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = S->semantics_begin(), e = S->semantics_end(); i != e; ++i)
+ // Normally, we would not profile the source expressions of OVEs.
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(*i))
+ Visit(OVE->getSourceExpr());
+}
+
+void StmtProfiler::VisitAtomicExpr(const AtomicExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getOp());
+}
+
+static Stmt::StmtClass DecodeOperatorCall(const CXXOperatorCallExpr *S,
+ UnaryOperatorKind &UnaryOp,
+ BinaryOperatorKind &BinaryOp) {
+ switch (S->getOperator()) {
+ case OO_None:
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Arrow:
+ case OO_Call:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Invalid operator call kind");
+
+ case OO_Plus:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Plus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Add;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Minus:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Minus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Sub;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Star:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_Minus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_Sub;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Slash:
+ BinaryOp = BO_Div;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Percent:
+ BinaryOp = BO_Rem;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Caret:
+ BinaryOp = BO_Xor;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Amp:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UO_AddrOf;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BO_And;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Pipe:
+ BinaryOp = BO_Or;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Tilde:
+ UnaryOp = UO_Not;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Exclaim:
+ UnaryOp = UO_LNot;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Equal:
+ BinaryOp = BO_Assign;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Less:
+ BinaryOp = BO_LT;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Greater:
+ BinaryOp = BO_GT;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PlusEqual:
+ BinaryOp = BO_AddAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_MinusEqual:
+ BinaryOp = BO_SubAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_StarEqual:
+ BinaryOp = BO_MulAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_SlashEqual:
+ BinaryOp = BO_DivAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_PercentEqual:
+ BinaryOp = BO_RemAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_CaretEqual:
+ BinaryOp = BO_XorAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_AmpEqual:
+ BinaryOp = BO_AndAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_PipeEqual:
+ BinaryOp = BO_OrAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_LessLess:
+ BinaryOp = BO_Shl;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_GreaterGreater:
+ BinaryOp = BO_Shr;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_LessLessEqual:
+ BinaryOp = BO_ShlAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_GreaterGreaterEqual:
+ BinaryOp = BO_ShrAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_EqualEqual:
+ BinaryOp = BO_EQ;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_ExclaimEqual:
+ BinaryOp = BO_NE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_LessEqual:
+ BinaryOp = BO_LE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_GreaterEqual:
+ BinaryOp = BO_GE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_AmpAmp:
+ BinaryOp = BO_LAnd;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PipePipe:
+ BinaryOp = BO_LOr;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PlusPlus:
+ UnaryOp = S->getNumArgs() == 1? UO_PreInc
+ : UO_PostInc;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_MinusMinus:
+ UnaryOp = S->getNumArgs() == 1? UO_PreDec
+ : UO_PostDec;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Comma:
+ BinaryOp = BO_Comma;
+ return Stmt::BinaryOperatorClass;
+
+
+ case OO_ArrowStar:
+ BinaryOp = BO_PtrMemI;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Subscript:
+ return Stmt::ArraySubscriptExprClass;
+ }
+
+ llvm_unreachable("Invalid overloaded operator expression");
+}
+
+
+void StmtProfiler::VisitCXXOperatorCallExpr(const CXXOperatorCallExpr *S) {
+ if (S->isTypeDependent()) {
+ // Type-dependent operator calls are profiled like their underlying
+ // syntactic operator.
+ UnaryOperatorKind UnaryOp = UO_Extension;
+ BinaryOperatorKind BinaryOp = BO_Comma;
+ Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp);
+
+ ID.AddInteger(SC);
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ Visit(S->getArg(I));
+ if (SC == Stmt::UnaryOperatorClass)
+ ID.AddInteger(UnaryOp);
+ else if (SC == Stmt::BinaryOperatorClass ||
+ SC == Stmt::CompoundAssignOperatorClass)
+ ID.AddInteger(BinaryOp);
+ else
+ assert(SC == Stmt::ArraySubscriptExprClass);
+
+ return;
+ }
+
+ VisitCallExpr(S);
+ ID.AddInteger(S->getOperator());
+}
+
+void StmtProfiler::VisitCXXMemberCallExpr(const CXXMemberCallExpr *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitCUDAKernelCallExpr(const CUDAKernelCallExpr *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitAsTypeExpr(const AsTypeExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXNamedCastExpr(const CXXNamedCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXStaticCastExpr(const CXXStaticCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXDynamicCastExpr(const CXXDynamicCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void
+StmtProfiler::VisitCXXReinterpretCastExpr(const CXXReinterpretCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitCXXConstCastExpr(const CXXConstCastExpr *S) {
+ VisitCXXNamedCastExpr(S);
+}
+
+void StmtProfiler::VisitUserDefinedLiteral(const UserDefinedLiteral *S) {
+ VisitCallExpr(S);
+}
+
+void StmtProfiler::VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->getValue());
+}
+
+void StmtProfiler::VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXTypeidExpr(const CXXTypeidExpr *S) {
+ VisitExpr(S);
+ if (S->isTypeOperand())
+ VisitType(S->getTypeOperand());
+}
+
+void StmtProfiler::VisitCXXUuidofExpr(const CXXUuidofExpr *S) {
+ VisitExpr(S);
+ if (S->isTypeOperand())
+ VisitType(S->getTypeOperand());
+}
+
+void StmtProfiler::VisitCXXThisExpr(const CXXThisExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXThrowExpr(const CXXThrowExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXDefaultArgExpr(const CXXDefaultArgExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParam());
+}
+
+void StmtProfiler::VisitCXXBindTemporaryExpr(const CXXBindTemporaryExpr *S) {
+ VisitExpr(S);
+ VisitDecl(
+ const_cast<CXXDestructorDecl *>(S->getTemporary()->getDestructor()));
+}
+
+void StmtProfiler::VisitCXXConstructExpr(const CXXConstructExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getConstructor());
+ ID.AddBoolean(S->isElidable());
+}
+
+void StmtProfiler::VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
+ VisitExplicitCastExpr(S);
+}
+
+void
+StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
+ VisitCXXConstructExpr(S);
+}
+
+void
+StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
+ VisitExpr(S);
+ for (LambdaExpr::capture_iterator C = S->explicit_capture_begin(),
+ CEnd = S->explicit_capture_end();
+ C != CEnd; ++C) {
+ ID.AddInteger(C->getCaptureKind());
+ if (C->capturesVariable()) {
+ VisitDecl(C->getCapturedVar());
+ ID.AddBoolean(C->isPackExpansion());
+ }
+ }
+ // Note: If we actually needed to be able to match lambda
+ // expressions, we would have to consider parameters and return type
+ // here, among other things.
+ VisitStmt(S->getBody());
+}
+
+void
+StmtProfiler::VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXDeleteExpr(const CXXDeleteExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isGlobalDelete());
+ ID.AddBoolean(S->isArrayForm());
+ VisitDecl(S->getOperatorDelete());
+}
+
+
+void StmtProfiler::VisitCXXNewExpr(const CXXNewExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getAllocatedType());
+ VisitDecl(S->getOperatorNew());
+ VisitDecl(S->getOperatorDelete());
+ ID.AddBoolean(S->isArray());
+ ID.AddInteger(S->getNumPlacementArgs());
+ ID.AddBoolean(S->isGlobalNew());
+ ID.AddBoolean(S->isParenTypeId());
+ ID.AddInteger(S->getInitializationStyle());
+}
+
+void
+StmtProfiler::VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitType(S->getDestroyedType());
+}
+
+void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) {
+ VisitExpr(S);
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getName());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getExplicitTemplateArgs().getTemplateArgs(),
+ S->getExplicitTemplateArgs().NumTemplateArgs);
+}
+
+void
+StmtProfiler::VisitUnresolvedLookupExpr(const UnresolvedLookupExpr *S) {
+ VisitOverloadExpr(S);
+}
+
+void StmtProfiler::VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitType(S->getQueriedType());
+}
+
+void StmtProfiler::VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitType(S->getLhsType());
+ VisitType(S->getRhsType());
+}
+
+void StmtProfiler::VisitTypeTraitExpr(const TypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ ID.AddInteger(S->getNumArgs());
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ VisitType(S->getArg(I)->getType());
+}
+
+void StmtProfiler::VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitType(S->getQueriedType());
+}
+
+void StmtProfiler::VisitExpressionTraitExpr(const ExpressionTraitExpr *S) {
+ VisitExpr(S);
+ ID.AddInteger(S->getTrait());
+ VisitExpr(S->getQueriedExpression());
+}
+
+void StmtProfiler::VisitDependentScopeDeclRefExpr(
+ const DependentScopeDeclRefExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getDeclName());
+ VisitNestedNameSpecifier(S->getQualifier());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitExprWithCleanups(const ExprWithCleanups *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitCXXUnresolvedConstructExpr(
+ const CXXUnresolvedConstructExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getTypeAsWritten());
+}
+
+void StmtProfiler::VisitCXXDependentScopeMemberExpr(
+ const CXXDependentScopeMemberExpr *S) {
+ ID.AddBoolean(S->isImplicitAccess());
+ if (!S->isImplicitAccess()) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ }
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getMember());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitUnresolvedMemberExpr(const UnresolvedMemberExpr *S) {
+ ID.AddBoolean(S->isImplicitAccess());
+ if (!S->isImplicitAccess()) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+ }
+ VisitNestedNameSpecifier(S->getQualifier());
+ VisitName(S->getMemberName());
+ ID.AddBoolean(S->hasExplicitTemplateArgs());
+ if (S->hasExplicitTemplateArgs())
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+}
+
+void StmtProfiler::VisitCXXNoexceptExpr(const CXXNoexceptExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitPackExpansionExpr(const PackExpansionExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitSizeOfPackExpr(const SizeOfPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getPack());
+}
+
+void StmtProfiler::VisitSubstNonTypeTemplateParmPackExpr(
+ const SubstNonTypeTemplateParmPackExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getParameterPack());
+ VisitTemplateArgument(S->getArgumentPack());
+}
+
+void StmtProfiler::VisitSubstNonTypeTemplateParmExpr(
+ const SubstNonTypeTemplateParmExpr *E) {
+ // Profile exactly as the replacement expression.
+ Visit(E->getReplacement());
+}
+
+void StmtProfiler::VisitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitOpaqueValueExpr(const OpaqueValueExpr *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCStringLiteral(const ObjCStringLiteral *S) {
+ VisitExpr(S);
+}
+
+void StmtProfiler::VisitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+}
+
+void StmtProfiler::VisitObjCEncodeExpr(const ObjCEncodeExpr *S) {
+ VisitExpr(S);
+ VisitType(S->getEncodedType());
+}
+
+void StmtProfiler::VisitObjCSelectorExpr(const ObjCSelectorExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getSelector());
+}
+
+void StmtProfiler::VisitObjCProtocolExpr(const ObjCProtocolExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getProtocol());
+}
+
+void StmtProfiler::VisitObjCIvarRefExpr(const ObjCIvarRefExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getDecl());
+ ID.AddBoolean(S->isArrow());
+ ID.AddBoolean(S->isFreeIvar());
+}
+
+void StmtProfiler::VisitObjCPropertyRefExpr(const ObjCPropertyRefExpr *S) {
+ VisitExpr(S);
+ if (S->isImplicitProperty()) {
+ VisitDecl(S->getImplicitPropertyGetter());
+ VisitDecl(S->getImplicitPropertySetter());
+ } else {
+ VisitDecl(S->getExplicitProperty());
+ }
+ if (S->isSuperReceiver()) {
+ ID.AddBoolean(S->isSuperReceiver());
+ VisitType(S->getSuperReceiverType());
+ }
+}
+
+void StmtProfiler::VisitObjCSubscriptRefExpr(const ObjCSubscriptRefExpr *S) {
+ VisitExpr(S);
+ VisitDecl(S->getAtIndexMethodDecl());
+ VisitDecl(S->setAtIndexMethodDecl());
+}
+
+void StmtProfiler::VisitObjCMessageExpr(const ObjCMessageExpr *S) {
+ VisitExpr(S);
+ VisitName(S->getSelector());
+ VisitDecl(S->getMethodDecl());
+}
+
+void StmtProfiler::VisitObjCIsaExpr(const ObjCIsaExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->isArrow());
+}
+
+void StmtProfiler::VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->getValue());
+}
+
+void StmtProfiler::VisitObjCIndirectCopyRestoreExpr(
+ const ObjCIndirectCopyRestoreExpr *S) {
+ VisitExpr(S);
+ ID.AddBoolean(S->shouldCopy());
+}
+
+void StmtProfiler::VisitObjCBridgedCastExpr(const ObjCBridgedCastExpr *S) {
+ VisitExplicitCastExpr(S);
+ ID.AddBoolean(S->getBridgeKind());
+}
+
+void StmtProfiler::VisitDecl(const Decl *D) {
+ ID.AddInteger(D? D->getKind() : 0);
+
+ if (Canonical && D) {
+ if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ ID.AddInteger(NTTP->getDepth());
+ ID.AddInteger(NTTP->getIndex());
+ ID.AddBoolean(NTTP->isParameterPack());
+ VisitType(NTTP->getType());
+ return;
+ }
+
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
+ // The Itanium C++ ABI uses the type, scope depth, and scope
+ // index of a parameter when mangling expressions that involve
+ // function parameters, so we will use the parameter's type for
+ // establishing function parameter identity. That way, our
+ // definition of "equivalent" (per C++ [temp.over.link]) is at
+ // least as strong as the definition of "equivalent" used for
+ // name mangling.
+ VisitType(Parm->getType());
+ ID.AddInteger(Parm->getFunctionScopeDepth());
+ ID.AddInteger(Parm->getFunctionScopeIndex());
+ return;
+ }
+
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+ }
+
+ ID.AddPointer(D? D->getCanonicalDecl() : 0);
+}
+
+void StmtProfiler::VisitType(QualType T) {
+ if (Canonical)
+ T = Context.getCanonicalType(T);
+
+ ID.AddPointer(T.getAsOpaquePtr());
+}
+
+void StmtProfiler::VisitName(DeclarationName Name) {
+ ID.AddPointer(Name.getAsOpaquePtr());
+}
+
+void StmtProfiler::VisitNestedNameSpecifier(NestedNameSpecifier *NNS) {
+ if (Canonical)
+ NNS = Context.getCanonicalNestedNameSpecifier(NNS);
+ ID.AddPointer(NNS);
+}
+
+void StmtProfiler::VisitTemplateName(TemplateName Name) {
+ if (Canonical)
+ Name = Context.getCanonicalTemplateName(Name);
+
+ Name.Profile(ID);
+}
+
+void StmtProfiler::VisitTemplateArguments(const TemplateArgumentLoc *Args,
+ unsigned NumArgs) {
+ ID.AddInteger(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ VisitTemplateArgument(Args[I].getArgument());
+}
+
+void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
+ // Mostly repetitive with TemplateArgument::Profile!
+ ID.AddInteger(Arg.getKind());
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+
+ case TemplateArgument::Type:
+ VisitType(Arg.getAsType());
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ VisitTemplateName(Arg.getAsTemplateOrTemplatePattern());
+ break;
+
+ case TemplateArgument::Declaration:
+ VisitDecl(Arg.getAsDecl());
+ break;
+
+ case TemplateArgument::Integral:
+ Arg.getAsIntegral()->Profile(ID);
+ VisitType(Arg.getIntegralType());
+ break;
+
+ case TemplateArgument::Expression:
+ Visit(Arg.getAsExpr());
+ break;
+
+ case TemplateArgument::Pack:
+ const TemplateArgument *Pack = Arg.pack_begin();
+ for (unsigned i = 0, e = Arg.pack_size(); i != e; ++i)
+ VisitTemplateArgument(Pack[i]);
+ break;
+ }
+}
+
+void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
+ bool Canonical) const {
+ StmtProfiler Profiler(ID, Context, Canonical);
+ Profiler.Visit(this);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp b/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp
new file mode 100644
index 0000000..8be287e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/StmtViz.cpp
@@ -0,0 +1,62 @@
+//===--- StmtViz.cpp - Graphviz visualization for Stmt ASTs -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Stmt::viewAST, which generates a Graphviz DOT file
+// that depicts the AST and then calls Graphviz/dot+gv on it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtGraphTraits.h"
+#include "clang/AST/Decl.h"
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+
+void Stmt::viewAST() const {
+#ifndef NDEBUG
+ llvm::ViewGraph(this,"AST");
+#else
+ llvm::errs() << "Stmt::viewAST is only available in debug builds on "
+ << "systems with Graphviz or gv!\n";
+#endif
+}
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const Stmt*> : public DefaultDOTGraphTraits {
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const Stmt* Node, const Stmt* Graph) {
+
+#ifndef NDEBUG
+ std::string OutSStr;
+ llvm::raw_string_ostream Out(OutSStr);
+
+ if (Node)
+ Out << Node->getStmtClassName();
+ else
+ Out << "<NULL>";
+
+ std::string OutStr = Out.str();
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ for (unsigned i = 0; i != OutStr.length(); ++i)
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ }
+
+ return OutStr;
+#else
+ return "";
+#endif
+ }
+};
+} // end namespace llvm
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
new file mode 100644
index 0000000..531e03e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateBase.cpp
@@ -0,0 +1,628 @@
+//===--- TemplateBase.cpp - Common template AST class implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements common classes used throughout C++ template
+// representations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+#include <cctype>
+
+using namespace clang;
+
+/// \brief Print a template integral argument value.
+///
+/// \param TemplArg the TemplateArgument instance to print.
+///
+/// \param Out the raw_ostream instance to use for printing.
+static void printIntegral(const TemplateArgument &TemplArg,
+ raw_ostream &Out) {
+ const ::clang::Type *T = TemplArg.getIntegralType().getTypePtr();
+ const llvm::APSInt *Val = TemplArg.getAsIntegral();
+
+ if (T->isBooleanType()) {
+ Out << (Val->getBoolValue() ? "true" : "false");
+ } else if (T->isCharType()) {
+ const char Ch = Val->getZExtValue();
+ Out << ((Ch == '\'') ? "'\\" : "'");
+ Out.write_escaped(StringRef(&Ch, 1), /*UseHexEscapes=*/ true);
+ Out << "'";
+ } else {
+ Out << Val->toString(10);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgument Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateArgument TemplateArgument::CreatePackCopy(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ if (NumArgs == 0)
+ return TemplateArgument(0, 0);
+
+ TemplateArgument *Storage = new (Context) TemplateArgument [NumArgs];
+ std::copy(Args, Args + NumArgs, Storage);
+ return TemplateArgument(Storage, NumArgs);
+}
+
+bool TemplateArgument::isDependent() const {
+ switch (getKind()) {
+ case Null:
+ llvm_unreachable("Should not have a NULL template argument");
+
+ case Type:
+ return getAsType()->isDependentType();
+
+ case Template:
+ return getAsTemplate().isDependent();
+
+ case TemplateExpansion:
+ return true;
+
+ case Declaration:
+ if (Decl *D = getAsDecl()) {
+ if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ return DC->isDependentContext();
+ return D->getDeclContext()->isDependentContext();
+ }
+
+ return false;
+
+ case Integral:
+ // Never dependent
+ return false;
+
+ case Expression:
+ return (getAsExpr()->isTypeDependent() || getAsExpr()->isValueDependent());
+
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) {
+ if (P->isDependent())
+ return true;
+ }
+
+ return false;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::isInstantiationDependent() const {
+ switch (getKind()) {
+ case Null:
+ llvm_unreachable("Should not have a NULL template argument");
+
+ case Type:
+ return getAsType()->isInstantiationDependentType();
+
+ case Template:
+ return getAsTemplate().isInstantiationDependent();
+
+ case TemplateExpansion:
+ return true;
+
+ case Declaration:
+ if (Decl *D = getAsDecl()) {
+ if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ return DC->isDependentContext();
+ return D->getDeclContext()->isDependentContext();
+ }
+ return false;
+
+ case Integral:
+ // Never dependent
+ return false;
+
+ case Expression:
+ return getAsExpr()->isInstantiationDependent();
+
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P) {
+ if (P->isInstantiationDependent())
+ return true;
+ }
+
+ return false;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::isPackExpansion() const {
+ switch (getKind()) {
+ case Null:
+ case Declaration:
+ case Integral:
+ case Pack:
+ case Template:
+ return false;
+
+ case TemplateExpansion:
+ return true;
+
+ case Type:
+ return isa<PackExpansionType>(getAsType());
+
+ case Expression:
+ return isa<PackExpansionExpr>(getAsExpr());
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+bool TemplateArgument::containsUnexpandedParameterPack() const {
+ switch (getKind()) {
+ case Null:
+ case Declaration:
+ case Integral:
+ case TemplateExpansion:
+ break;
+
+ case Type:
+ if (getAsType()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Template:
+ if (getAsTemplate().containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Expression:
+ if (getAsExpr()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case Pack:
+ for (pack_iterator P = pack_begin(), PEnd = pack_end(); P != PEnd; ++P)
+ if (P->containsUnexpandedParameterPack())
+ return true;
+
+ break;
+ }
+
+ return false;
+}
+
+llvm::Optional<unsigned> TemplateArgument::getNumTemplateExpansions() const {
+ assert(Kind == TemplateExpansion);
+ if (TemplateArg.NumExpansions)
+ return TemplateArg.NumExpansions - 1;
+
+ return llvm::Optional<unsigned>();
+}
+
+void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context) const {
+ ID.AddInteger(Kind);
+ switch (Kind) {
+ case Null:
+ break;
+
+ case Type:
+ getAsType().Profile(ID);
+ break;
+
+ case Declaration:
+ ID.AddPointer(getAsDecl()? getAsDecl()->getCanonicalDecl() : 0);
+ break;
+
+ case Template:
+ case TemplateExpansion: {
+ TemplateName Template = getAsTemplateOrTemplatePattern();
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Template.getAsTemplateDecl())) {
+ ID.AddBoolean(true);
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getPosition());
+ ID.AddBoolean(TTP->isParameterPack());
+ } else {
+ ID.AddBoolean(false);
+ ID.AddPointer(Context.getCanonicalTemplateName(Template)
+ .getAsVoidPointer());
+ }
+ break;
+ }
+
+ case Integral:
+ getAsIntegral()->Profile(ID);
+ getIntegralType().Profile(ID);
+ break;
+
+ case Expression:
+ getAsExpr()->Profile(ID, Context, true);
+ break;
+
+ case Pack:
+ ID.AddInteger(Args.NumArgs);
+ for (unsigned I = 0; I != Args.NumArgs; ++I)
+ Args.Args[I].Profile(ID, Context);
+ }
+}
+
+bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
+ if (getKind() != Other.getKind()) return false;
+
+ switch (getKind()) {
+ case Null:
+ case Type:
+ case Declaration:
+ case Expression:
+ case Template:
+ case TemplateExpansion:
+ return TypeOrValue == Other.TypeOrValue;
+
+ case Integral:
+ return getIntegralType() == Other.getIntegralType() &&
+ *getAsIntegral() == *Other.getAsIntegral();
+
+ case Pack:
+ if (Args.NumArgs != Other.Args.NumArgs) return false;
+ for (unsigned I = 0, E = Args.NumArgs; I != E; ++I)
+ if (!Args.Args[I].structurallyEquals(Other.Args.Args[I]))
+ return false;
+ return true;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+TemplateArgument TemplateArgument::getPackExpansionPattern() const {
+ assert(isPackExpansion());
+
+ switch (getKind()) {
+ case Type:
+ return getAsType()->getAs<PackExpansionType>()->getPattern();
+
+ case Expression:
+ return cast<PackExpansionExpr>(getAsExpr())->getPattern();
+
+ case TemplateExpansion:
+ return TemplateArgument(getAsTemplateOrTemplatePattern());
+
+ case Declaration:
+ case Integral:
+ case Pack:
+ case Null:
+ case Template:
+ return TemplateArgument();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+void TemplateArgument::print(const PrintingPolicy &Policy,
+ raw_ostream &Out) const {
+ switch (getKind()) {
+ case Null:
+ Out << "<no value>";
+ break;
+
+ case Type: {
+ PrintingPolicy SubPolicy(Policy);
+ SubPolicy.SuppressStrongLifetime = true;
+ std::string TypeStr;
+ getAsType().getAsStringInternal(TypeStr, SubPolicy);
+ Out << TypeStr;
+ break;
+ }
+
+ case Declaration: {
+ if (NamedDecl *ND = dyn_cast_or_null<NamedDecl>(getAsDecl())) {
+ if (ND->getDeclName()) {
+ Out << *ND;
+ } else {
+ Out << "<anonymous>";
+ }
+ } else {
+ Out << "nullptr";
+ }
+ break;
+ }
+
+ case Template:
+ getAsTemplate().print(Out, Policy);
+ break;
+
+ case TemplateExpansion:
+ getAsTemplateOrTemplatePattern().print(Out, Policy);
+ Out << "...";
+ break;
+
+ case Integral: {
+ printIntegral(*this, Out);
+ break;
+ }
+
+ case Expression:
+ getAsExpr()->printPretty(Out, 0, Policy);
+ break;
+
+ case Pack:
+ Out << "<";
+ bool First = true;
+ for (TemplateArgument::pack_iterator P = pack_begin(), PEnd = pack_end();
+ P != PEnd; ++P) {
+ if (First)
+ First = false;
+ else
+ Out << ", ";
+
+ P->print(Policy, Out);
+ }
+ Out << ">";
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// TemplateArgumentLoc Implementation
+//===----------------------------------------------------------------------===//
+
+TemplateArgumentLocInfo::TemplateArgumentLocInfo() {
+ memset((void*)this, 0, sizeof(TemplateArgumentLocInfo));
+}
+
+SourceRange TemplateArgumentLoc::getSourceRange() const {
+ switch (Argument.getKind()) {
+ case TemplateArgument::Expression:
+ return getSourceExpression()->getSourceRange();
+
+ case TemplateArgument::Declaration:
+ return getSourceDeclExpression()->getSourceRange();
+
+ case TemplateArgument::Type:
+ if (TypeSourceInfo *TSI = getTypeSourceInfo())
+ return TSI->getTypeLoc().getSourceRange();
+ else
+ return SourceRange();
+
+ case TemplateArgument::Template:
+ if (getTemplateQualifierLoc())
+ return SourceRange(getTemplateQualifierLoc().getBeginLoc(),
+ getTemplateNameLoc());
+ return SourceRange(getTemplateNameLoc());
+
+ case TemplateArgument::TemplateExpansion:
+ if (getTemplateQualifierLoc())
+ return SourceRange(getTemplateQualifierLoc().getBeginLoc(),
+ getTemplateEllipsisLoc());
+ return SourceRange(getTemplateNameLoc(), getTemplateEllipsisLoc());
+
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Null:
+ return SourceRange();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+TemplateArgumentLoc
+TemplateArgumentLoc::getPackExpansionPattern(SourceLocation &Ellipsis,
+ llvm::Optional<unsigned> &NumExpansions,
+ ASTContext &Context) const {
+ assert(Argument.isPackExpansion());
+
+ switch (Argument.getKind()) {
+ case TemplateArgument::Type: {
+ // FIXME: We shouldn't ever have to worry about missing
+ // type-source info!
+ TypeSourceInfo *ExpansionTSInfo = getTypeSourceInfo();
+ if (!ExpansionTSInfo)
+ ExpansionTSInfo = Context.getTrivialTypeSourceInfo(
+ getArgument().getAsType(),
+ Ellipsis);
+ PackExpansionTypeLoc Expansion
+ = cast<PackExpansionTypeLoc>(ExpansionTSInfo->getTypeLoc());
+ Ellipsis = Expansion.getEllipsisLoc();
+
+ TypeLoc Pattern = Expansion.getPatternLoc();
+ NumExpansions = Expansion.getTypePtr()->getNumExpansions();
+
+ // FIXME: This is horrible. We know where the source location data is for
+ // the pattern, and we have the pattern's type, but we are forced to copy
+ // them into an ASTContext because TypeSourceInfo bundles them together
+ // and TemplateArgumentLoc traffics in TypeSourceInfo pointers.
+ TypeSourceInfo *PatternTSInfo
+ = Context.CreateTypeSourceInfo(Pattern.getType(),
+ Pattern.getFullDataSize());
+ memcpy(PatternTSInfo->getTypeLoc().getOpaqueData(),
+ Pattern.getOpaqueData(), Pattern.getFullDataSize());
+ return TemplateArgumentLoc(TemplateArgument(Pattern.getType()),
+ PatternTSInfo);
+ }
+
+ case TemplateArgument::Expression: {
+ PackExpansionExpr *Expansion
+ = cast<PackExpansionExpr>(Argument.getAsExpr());
+ Expr *Pattern = Expansion->getPattern();
+ Ellipsis = Expansion->getEllipsisLoc();
+ NumExpansions = Expansion->getNumExpansions();
+ return TemplateArgumentLoc(Pattern, Pattern);
+ }
+
+ case TemplateArgument::TemplateExpansion:
+ Ellipsis = getTemplateEllipsisLoc();
+ NumExpansions = Argument.getNumTemplateExpansions();
+ return TemplateArgumentLoc(Argument.getPackExpansionPattern(),
+ getTemplateQualifierLoc(),
+ getTemplateNameLoc());
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Template:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Null:
+ return TemplateArgumentLoc();
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ const TemplateArgument &Arg) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ // This is bad, but not as bad as crashing because of argument
+ // count mismatches.
+ return DB << "(null template argument)";
+
+ case TemplateArgument::Type:
+ return DB << Arg.getAsType();
+
+ case TemplateArgument::Declaration:
+ if (Decl *D = Arg.getAsDecl())
+ return DB << D;
+ return DB << "nullptr";
+
+ case TemplateArgument::Integral:
+ return DB << Arg.getAsIntegral()->toString(10);
+
+ case TemplateArgument::Template:
+ return DB << Arg.getAsTemplate();
+
+ case TemplateArgument::TemplateExpansion:
+ return DB << Arg.getAsTemplateOrTemplatePattern() << "...";
+
+ case TemplateArgument::Expression: {
+ // This shouldn't actually ever happen, so it's okay that we're
+ // regurgitating an expression here.
+ // FIXME: We're guessing at LangOptions!
+ SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.getAsExpr()->printPretty(OS, 0, Policy);
+ return DB << OS.str();
+ }
+
+ case TemplateArgument::Pack: {
+ // FIXME: We're guessing at LangOptions!
+ SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.print(Policy, OS);
+ return DB << OS.str();
+ }
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+const ASTTemplateArgumentListInfo *
+ASTTemplateArgumentListInfo::Create(ASTContext &C,
+ const TemplateArgumentListInfo &List) {
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
+ ASTTemplateArgumentListInfo::sizeFor(List.size());
+ void *Mem = C.Allocate(size, llvm::alignOf<ASTTemplateArgumentListInfo>());
+ ASTTemplateArgumentListInfo *TAI = new (Mem) ASTTemplateArgumentListInfo();
+ TAI->initializeFrom(List);
+ return TAI;
+}
+
+void ASTTemplateArgumentListInfo::initializeFrom(
+ const TemplateArgumentListInfo &Info) {
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
+}
+
+void ASTTemplateArgumentListInfo::initializeFrom(
+ const TemplateArgumentListInfo &Info,
+ bool &Dependent,
+ bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack) {
+ LAngleLoc = Info.getLAngleLoc();
+ RAngleLoc = Info.getRAngleLoc();
+ NumTemplateArgs = Info.size();
+
+ TemplateArgumentLoc *ArgBuffer = getTemplateArgs();
+ for (unsigned i = 0; i != NumTemplateArgs; ++i) {
+ Dependent = Dependent || Info[i].getArgument().isDependent();
+ InstantiationDependent = InstantiationDependent ||
+ Info[i].getArgument().isInstantiationDependent();
+ ContainsUnexpandedParameterPack
+ = ContainsUnexpandedParameterPack ||
+ Info[i].getArgument().containsUnexpandedParameterPack();
+
+ new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
+ }
+}
+
+void ASTTemplateArgumentListInfo::copyInto(
+ TemplateArgumentListInfo &Info) const {
+ Info.setLAngleLoc(LAngleLoc);
+ Info.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0; I != NumTemplateArgs; ++I)
+ Info.addArgument(getTemplateArgs()[I]);
+}
+
+std::size_t ASTTemplateArgumentListInfo::sizeFor(unsigned NumTemplateArgs) {
+ return sizeof(ASTTemplateArgumentListInfo) +
+ sizeof(TemplateArgumentLoc) * NumTemplateArgs;
+}
+
+void
+ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &Info) {
+ Base::initializeFrom(Info);
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+void
+ASTTemplateKWAndArgsInfo
+::initializeFrom(SourceLocation TemplateKWLoc,
+ const TemplateArgumentListInfo &Info,
+ bool &Dependent,
+ bool &InstantiationDependent,
+ bool &ContainsUnexpandedParameterPack) {
+ Base::initializeFrom(Info, Dependent, InstantiationDependent,
+ ContainsUnexpandedParameterPack);
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+void
+ASTTemplateKWAndArgsInfo::initializeFrom(SourceLocation TemplateKWLoc) {
+ // No explicit template arguments, but template keyword loc is valid.
+ assert(TemplateKWLoc.isValid());
+ LAngleLoc = SourceLocation();
+ RAngleLoc = SourceLocation();
+ NumTemplateArgs = 0;
+ setTemplateKeywordLoc(TemplateKWLoc);
+}
+
+std::size_t
+ASTTemplateKWAndArgsInfo::sizeFor(unsigned NumTemplateArgs) {
+ // Add space for the template keyword location.
+ return Base::sizeFor(NumTemplateArgs) + sizeof(SourceLocation);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
new file mode 100644
index 0000000..e89ba53
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/TemplateName.cpp
@@ -0,0 +1,176 @@
+//===--- TemplateName.h - C++ Template Name Representation-------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TemplateName interface and subclasses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/TemplateName.h"
+#include "clang/AST/TemplateBase.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace llvm;
+
+TemplateArgument
+SubstTemplateTemplateParmPackStorage::getArgumentPack() const {
+ return TemplateArgument(Arguments, size());
+}
+
+void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, Parameter, Replacement);
+}
+
+void SubstTemplateTemplateParmStorage::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *parameter,
+ TemplateName replacement) {
+ ID.AddPointer(parameter);
+ ID.AddPointer(replacement.getAsVoidPointer());
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context) {
+ Profile(ID, Context, Parameter, TemplateArgument(Arguments, size()));
+}
+
+void SubstTemplateTemplateParmPackStorage::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context,
+ TemplateTemplateParmDecl *Parameter,
+ const TemplateArgument &ArgPack) {
+ ID.AddPointer(Parameter);
+ ArgPack.Profile(ID, Context);
+}
+
+TemplateName::NameKind TemplateName::getKind() const {
+ if (Storage.is<TemplateDecl *>())
+ return Template;
+ if (Storage.is<DependentTemplateName *>())
+ return DependentTemplate;
+ if (Storage.is<QualifiedTemplateName *>())
+ return QualifiedTemplate;
+
+ UncommonTemplateNameStorage *uncommon
+ = Storage.get<UncommonTemplateNameStorage*>();
+ if (uncommon->getAsOverloadedStorage())
+ return OverloadedTemplate;
+ if (uncommon->getAsSubstTemplateTemplateParm())
+ return SubstTemplateTemplateParm;
+ return SubstTemplateTemplateParmPack;
+}
+
+TemplateDecl *TemplateName::getAsTemplateDecl() const {
+ if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ return Template;
+
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
+ return QTN->getTemplateDecl();
+
+ if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm())
+ return sub->getReplacement().getAsTemplateDecl();
+
+ return 0;
+}
+
+bool TemplateName::isDependent() const {
+ if (TemplateDecl *Template = getAsTemplateDecl()) {
+ if (isa<TemplateTemplateParmDecl>(Template))
+ return true;
+ // FIXME: Hack, getDeclContext() can be null if Template is still
+ // initializing due to PCH reading, so we check it before using it.
+ // Should probably modify TemplateSpecializationType to allow constructing
+ // it without the isDependent() checking.
+ return Template->getDeclContext() &&
+ Template->getDeclContext()->isDependentContext();
+ }
+
+ assert(!getAsOverloadedTemplate() &&
+ "overloaded templates shouldn't survive to here");
+
+ return true;
+}
+
+bool TemplateName::isInstantiationDependent() const {
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (QTN->getQualifier()->isInstantiationDependent())
+ return true;
+ }
+
+ return isDependent();
+}
+
+bool TemplateName::containsUnexpandedParameterPack() const {
+ if (TemplateDecl *Template = getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ return TTP->isParameterPack();
+
+ return false;
+ }
+
+ if (DependentTemplateName *DTN = getAsDependentTemplateName())
+ return DTN->getQualifier() &&
+ DTN->getQualifier()->containsUnexpandedParameterPack();
+
+ return getAsSubstTemplateTemplateParmPack() != 0;
+}
+
+void
+TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
+ bool SuppressNNS) const {
+ if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ OS << *Template;
+ else if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName()) {
+ if (!SuppressNNS)
+ QTN->getQualifier()->print(OS, Policy);
+ if (QTN->hasTemplateKeyword())
+ OS << "template ";
+ OS << *QTN->getDecl();
+ } else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
+ if (!SuppressNNS && DTN->getQualifier())
+ DTN->getQualifier()->print(OS, Policy);
+ OS << "template ";
+
+ if (DTN->isIdentifier())
+ OS << DTN->getIdentifier()->getName();
+ else
+ OS << "operator " << getOperatorSpelling(DTN->getOperator());
+ } else if (SubstTemplateTemplateParmStorage *subst
+ = getAsSubstTemplateTemplateParm()) {
+ subst->getReplacement().print(OS, Policy, SuppressNNS);
+ } else if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = getAsSubstTemplateTemplateParmPack())
+ OS << *SubstPack->getParameterPack();
+ else {
+ OverloadedTemplateStorage *OTS = getAsOverloadedTemplate();
+ (*OTS->begin())->printName(OS);
+ }
+}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ TemplateName N) {
+ std::string NameStr;
+ raw_string_ostream OS(NameStr);
+ LangOptions LO;
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ N.print(OS, PrintingPolicy(LO));
+ OS.flush();
+ return DB << NameStr;
+}
+
+void TemplateName::dump() const {
+ LangOptions LO; // FIXME!
+ LO.CPlusPlus = true;
+ LO.Bool = true;
+ print(llvm::errs(), PrintingPolicy(LO));
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/Type.cpp b/contrib/llvm/tools/clang/lib/AST/Type.cpp
new file mode 100644
index 0000000..c82aeaa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/Type.cpp
@@ -0,0 +1,2246 @@
+//===--- Type.cpp - Type representation and manipulation ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements type-related functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+using namespace clang;
+
+bool Qualifiers::isStrictSupersetOf(Qualifiers Other) const {
+ return (*this != Other) &&
+ // CVR qualifiers superset
+ (((Mask & CVRMask) | (Other.Mask & CVRMask)) == (Mask & CVRMask)) &&
+ // ObjC GC qualifiers superset
+ ((getObjCGCAttr() == Other.getObjCGCAttr()) ||
+ (hasObjCGCAttr() && !Other.hasObjCGCAttr())) &&
+ // Address space superset.
+ ((getAddressSpace() == Other.getAddressSpace()) ||
+ (hasAddressSpace()&& !Other.hasAddressSpace())) &&
+ // Lifetime qualifier superset.
+ ((getObjCLifetime() == Other.getObjCLifetime()) ||
+ (hasObjCLifetime() && !Other.hasObjCLifetime()));
+}
+
+const IdentifierInfo* QualType::getBaseTypeIdentifier() const {
+ const Type* ty = getTypePtr();
+ NamedDecl *ND = NULL;
+ if (ty->isPointerType() || ty->isReferenceType())
+ return ty->getPointeeType().getBaseTypeIdentifier();
+ else if (ty->isRecordType())
+ ND = ty->getAs<RecordType>()->getDecl();
+ else if (ty->isEnumeralType())
+ ND = ty->getAs<EnumType>()->getDecl();
+ else if (ty->getTypeClass() == Type::Typedef)
+ ND = ty->getAs<TypedefType>()->getDecl();
+ else if (ty->isArrayType())
+ return ty->castAsArrayTypeUnsafe()->
+ getElementType().getBaseTypeIdentifier();
+
+ if (ND)
+ return ND->getIdentifier();
+ return NULL;
+}
+
+bool QualType::isConstant(QualType T, ASTContext &Ctx) {
+ if (T.isConstQualified())
+ return true;
+
+ if (const ArrayType *AT = Ctx.getAsArrayType(T))
+ return AT->getElementType().isConstant(Ctx);
+
+ return false;
+}
+
+unsigned ConstantArrayType::getNumAddressingBits(ASTContext &Context,
+ QualType ElementType,
+ const llvm::APInt &NumElements) {
+ llvm::APSInt SizeExtended(NumElements, true);
+ unsigned SizeTypeBits = Context.getTypeSize(Context.getSizeType());
+ SizeExtended = SizeExtended.extend(std::max(SizeTypeBits,
+ SizeExtended.getBitWidth()) * 2);
+
+ uint64_t ElementSize
+ = Context.getTypeSizeInChars(ElementType).getQuantity();
+ llvm::APSInt TotalSize(llvm::APInt(SizeExtended.getBitWidth(), ElementSize));
+ TotalSize *= SizeExtended;
+
+ return TotalSize.getActiveBits();
+}
+
+unsigned ConstantArrayType::getMaxSizeBits(ASTContext &Context) {
+ unsigned Bits = Context.getTypeSize(Context.getSizeType());
+
+ // GCC appears to only allow 63 bits worth of address space when compiling
+ // for 64-bit, so we do the same.
+ if (Bits == 64)
+ --Bits;
+
+ return Bits;
+}
+
+DependentSizedArrayType::DependentSizedArrayType(const ASTContext &Context,
+ QualType et, QualType can,
+ Expr *e, ArraySizeModifier sm,
+ unsigned tq,
+ SourceRange brackets)
+ : ArrayType(DependentSizedArray, et, can, sm, tq,
+ (et->containsUnexpandedParameterPack() ||
+ (e && e->containsUnexpandedParameterPack()))),
+ Context(Context), SizeExpr((Stmt*) e), Brackets(brackets)
+{
+}
+
+void DependentSizedArrayType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ QualType ET,
+ ArraySizeModifier SizeMod,
+ unsigned TypeQuals,
+ Expr *E) {
+ ID.AddPointer(ET.getAsOpaquePtr());
+ ID.AddInteger(SizeMod);
+ ID.AddInteger(TypeQuals);
+ E->Profile(ID, Context, true);
+}
+
+DependentSizedExtVectorType::DependentSizedExtVectorType(const
+ ASTContext &Context,
+ QualType ElementType,
+ QualType can,
+ Expr *SizeExpr,
+ SourceLocation loc)
+ : Type(DependentSizedExtVector, can, /*Dependent=*/true,
+ /*InstantiationDependent=*/true,
+ ElementType->isVariablyModifiedType(),
+ (ElementType->containsUnexpandedParameterPack() ||
+ (SizeExpr && SizeExpr->containsUnexpandedParameterPack()))),
+ Context(Context), SizeExpr(SizeExpr), ElementType(ElementType),
+ loc(loc)
+{
+}
+
+void
+DependentSizedExtVectorType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ QualType ElementType, Expr *SizeExpr) {
+ ID.AddPointer(ElementType.getAsOpaquePtr());
+ SizeExpr->Profile(ID, Context, true);
+}
+
+VectorType::VectorType(QualType vecType, unsigned nElements, QualType canonType,
+ VectorKind vecKind)
+ : Type(Vector, canonType, vecType->isDependentType(),
+ vecType->isInstantiationDependentType(),
+ vecType->isVariablyModifiedType(),
+ vecType->containsUnexpandedParameterPack()),
+ ElementType(vecType)
+{
+ VectorTypeBits.VecKind = vecKind;
+ VectorTypeBits.NumElements = nElements;
+}
+
+VectorType::VectorType(TypeClass tc, QualType vecType, unsigned nElements,
+ QualType canonType, VectorKind vecKind)
+ : Type(tc, canonType, vecType->isDependentType(),
+ vecType->isInstantiationDependentType(),
+ vecType->isVariablyModifiedType(),
+ vecType->containsUnexpandedParameterPack()),
+ ElementType(vecType)
+{
+ VectorTypeBits.VecKind = vecKind;
+ VectorTypeBits.NumElements = nElements;
+}
+
+/// getArrayElementTypeNoTypeQual - If this is an array type, return the
+/// element type of the array, potentially with type qualifiers missing.
+/// This method should never be used when type qualifiers are meaningful.
+const Type *Type::getArrayElementTypeNoTypeQual() const {
+ // If this is directly an array type, return it.
+ if (const ArrayType *ATy = dyn_cast<ArrayType>(this))
+ return ATy->getElementType().getTypePtr();
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (!isa<ArrayType>(CanonicalType))
+ return 0;
+
+ // If this is a typedef for an array type, strip the typedef off without
+ // losing all typedef information.
+ return cast<ArrayType>(getUnqualifiedDesugaredType())
+ ->getElementType().getTypePtr();
+}
+
+/// getDesugaredType - Return the specified type with any "sugar" removed from
+/// the type. This takes off typedefs, typeof's etc. If the outer level of
+/// the type is already concrete, it returns it unmodified. This is similar
+/// to getting the canonical type, but it doesn't remove *all* typedefs. For
+/// example, it returns "T*" as "T*", (not as "int*"), because the pointer is
+/// concrete.
+QualType QualType::getDesugaredType(QualType T, const ASTContext &Context) {
+ SplitQualType split = getSplitDesugaredType(T);
+ return Context.getQualifiedType(split.Ty, split.Quals);
+}
+
+QualType QualType::getSingleStepDesugaredTypeImpl(QualType type,
+ const ASTContext &Context) {
+ SplitQualType split = type.split();
+ QualType desugar = split.Ty->getLocallyUnqualifiedSingleStepDesugaredType();
+ return Context.getQualifiedType(desugar, split.Quals);
+}
+
+QualType Type::getLocallyUnqualifiedSingleStepDesugaredType() const {
+ switch (getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *ty = cast<Class##Type>(this); \
+ if (!ty->isSugared()) return QualType(ty, 0); \
+ return ty->desugar(); \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ llvm_unreachable("bad type kind!");
+}
+
+SplitQualType QualType::getSplitDesugaredType(QualType T) {
+ QualifierCollector Qs;
+
+ QualType Cur = T;
+ while (true) {
+ const Type *CurTy = Qs.strip(Cur);
+ switch (CurTy->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *Ty = cast<Class##Type>(CurTy); \
+ if (!Ty->isSugared()) \
+ return SplitQualType(Ty, Qs); \
+ Cur = Ty->desugar(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+}
+
+SplitQualType QualType::getSplitUnqualifiedTypeImpl(QualType type) {
+ SplitQualType split = type.split();
+
+ // All the qualifiers we've seen so far.
+ Qualifiers quals = split.Quals;
+
+ // The last type node we saw with any nodes inside it.
+ const Type *lastTypeWithQuals = split.Ty;
+
+ while (true) {
+ QualType next;
+
+ // Do a single-step desugar, aborting the loop if the type isn't
+ // sugared.
+ switch (split.Ty->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Type::Class: { \
+ const Class##Type *ty = cast<Class##Type>(split.Ty); \
+ if (!ty->isSugared()) goto done; \
+ next = ty->desugar(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // Otherwise, split the underlying type. If that yields qualifiers,
+ // update the information.
+ split = next.split();
+ if (!split.Quals.empty()) {
+ lastTypeWithQuals = split.Ty;
+ quals.addConsistentQualifiers(split.Quals);
+ }
+ }
+
+ done:
+ return SplitQualType(lastTypeWithQuals, quals);
+}
+
+QualType QualType::IgnoreParens(QualType T) {
+ // FIXME: this seems inherently un-qualifiers-safe.
+ while (const ParenType *PT = T->getAs<ParenType>())
+ T = PT->getInnerType();
+ return T;
+}
+
+/// getUnqualifiedDesugaredType - Pull any qualifiers and syntactic
+/// sugar off the given type. This should produce an object of the
+/// same dynamic type as the canonical type.
+const Type *Type::getUnqualifiedDesugaredType() const {
+ const Type *Cur = this;
+
+ while (true) {
+ switch (Cur->getTypeClass()) {
+#define ABSTRACT_TYPE(Class, Parent)
+#define TYPE(Class, Parent) \
+ case Class: { \
+ const Class##Type *Ty = cast<Class##Type>(Cur); \
+ if (!Ty->isSugared()) return Cur; \
+ Cur = Ty->desugar().getTypePtr(); \
+ break; \
+ }
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+}
+
+bool Type::isDerivedType() const {
+ switch (CanonicalType->getTypeClass()) {
+ case Pointer:
+ case VariableArray:
+ case ConstantArray:
+ case IncompleteArray:
+ case FunctionProto:
+ case FunctionNoProto:
+ case LValueReference:
+ case RValueReference:
+ case Record:
+ return true;
+ default:
+ return false;
+ }
+}
+bool Type::isClassType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isClass();
+ return false;
+}
+bool Type::isStructureType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isStruct();
+ return false;
+}
+bool Type::isStructureOrClassType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isStruct() || RT->getDecl()->isClass();
+ return false;
+}
+bool Type::isVoidPointerType() const {
+ if (const PointerType *PT = getAs<PointerType>())
+ return PT->getPointeeType()->isVoidType();
+ return false;
+}
+
+bool Type::isUnionType() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return RT->getDecl()->isUnion();
+ return false;
+}
+
+bool Type::isComplexType() const {
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isFloatingType();
+ return false;
+}
+
+bool Type::isComplexIntegerType() const {
+ // Check for GCC complex integer extension.
+ return getAsComplexIntegerType();
+}
+
+const ComplexType *Type::getAsComplexIntegerType() const {
+ if (const ComplexType *Complex = getAs<ComplexType>())
+ if (Complex->getElementType()->isIntegerType())
+ return Complex;
+ return 0;
+}
+
+QualType Type::getPointeeType() const {
+ if (const PointerType *PT = getAs<PointerType>())
+ return PT->getPointeeType();
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>())
+ return OPT->getPointeeType();
+ if (const BlockPointerType *BPT = getAs<BlockPointerType>())
+ return BPT->getPointeeType();
+ if (const ReferenceType *RT = getAs<ReferenceType>())
+ return RT->getPointeeType();
+ return QualType();
+}
+
+const RecordType *Type::getAsStructureType() const {
+ // If this is directly a structure type, return it.
+ if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (RT->getDecl()->isStruct())
+ return RT;
+ }
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (!RT->getDecl()->isStruct())
+ return 0;
+
+ // If this is a typedef for a structure type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getUnqualifiedDesugaredType());
+ }
+ return 0;
+}
+
+const RecordType *Type::getAsUnionType() const {
+ // If this is directly a union type, return it.
+ if (const RecordType *RT = dyn_cast<RecordType>(this)) {
+ if (RT->getDecl()->isUnion())
+ return RT;
+ }
+
+ // If the canonical form of this type isn't the right kind, reject it.
+ if (const RecordType *RT = dyn_cast<RecordType>(CanonicalType)) {
+ if (!RT->getDecl()->isUnion())
+ return 0;
+
+ // If this is a typedef for a union type, strip the typedef off without
+ // losing all typedef information.
+ return cast<RecordType>(getUnqualifiedDesugaredType());
+ }
+
+ return 0;
+}
+
+ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols)
+ : Type(ObjCObject, Canonical, false, false, false, false),
+ BaseType(Base)
+{
+ ObjCObjectTypeBits.NumProtocols = NumProtocols;
+ assert(getNumProtocols() == NumProtocols &&
+ "bitfield overflow in protocol count");
+ if (NumProtocols)
+ memcpy(getProtocolStorage(), Protocols,
+ NumProtocols * sizeof(ObjCProtocolDecl*));
+}
+
+const ObjCObjectType *Type::getAsObjCQualifiedInterfaceType() const {
+ // There is no sugar for ObjCObjectType's, just return the canonical
+ // type pointer if it is the right class. There is no typedef information to
+ // return and these cannot be Address-space qualified.
+ if (const ObjCObjectType *T = getAs<ObjCObjectType>())
+ if (T->getNumProtocols() && T->getInterface())
+ return T;
+ return 0;
+}
+
+bool Type::isObjCQualifiedInterfaceType() const {
+ return getAsObjCQualifiedInterfaceType() != 0;
+}
+
+const ObjCObjectPointerType *Type::getAsObjCQualifiedIdType() const {
+ // There is no sugar for ObjCQualifiedIdType's, just return the canonical
+ // type pointer if it is the right class.
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->isObjCQualifiedIdType())
+ return OPT;
+ }
+ return 0;
+}
+
+const ObjCObjectPointerType *Type::getAsObjCQualifiedClassType() const {
+ // There is no sugar for ObjCQualifiedClassType's, just return the canonical
+ // type pointer if it is the right class.
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->isObjCQualifiedClassType())
+ return OPT;
+ }
+ return 0;
+}
+
+const ObjCObjectPointerType *Type::getAsObjCInterfacePointerType() const {
+ if (const ObjCObjectPointerType *OPT = getAs<ObjCObjectPointerType>()) {
+ if (OPT->getInterfaceType())
+ return OPT;
+ }
+ return 0;
+}
+
+const CXXRecordDecl *Type::getCXXRecordDeclForPointerType() const {
+ if (const PointerType *PT = getAs<PointerType>())
+ if (const RecordType *RT = PT->getPointeeType()->getAs<RecordType>())
+ return dyn_cast<CXXRecordDecl>(RT->getDecl());
+ return 0;
+}
+
+CXXRecordDecl *Type::getAsCXXRecordDecl() const {
+ if (const RecordType *RT = getAs<RecordType>())
+ return dyn_cast<CXXRecordDecl>(RT->getDecl());
+ else if (const InjectedClassNameType *Injected
+ = getAs<InjectedClassNameType>())
+ return Injected->getDecl();
+
+ return 0;
+}
+
+namespace {
+ class GetContainedAutoVisitor :
+ public TypeVisitor<GetContainedAutoVisitor, AutoType*> {
+ public:
+ using TypeVisitor<GetContainedAutoVisitor, AutoType*>::Visit;
+ AutoType *Visit(QualType T) {
+ if (T.isNull())
+ return 0;
+ return Visit(T.getTypePtr());
+ }
+
+ // The 'auto' type itself.
+ AutoType *VisitAutoType(const AutoType *AT) {
+ return const_cast<AutoType*>(AT);
+ }
+
+ // Only these types can contain the desired 'auto' type.
+ AutoType *VisitPointerType(const PointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitBlockPointerType(const BlockPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitReferenceType(const ReferenceType *T) {
+ return Visit(T->getPointeeTypeAsWritten());
+ }
+ AutoType *VisitMemberPointerType(const MemberPointerType *T) {
+ return Visit(T->getPointeeType());
+ }
+ AutoType *VisitArrayType(const ArrayType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitVectorType(const VectorType *T) {
+ return Visit(T->getElementType());
+ }
+ AutoType *VisitFunctionType(const FunctionType *T) {
+ return Visit(T->getResultType());
+ }
+ AutoType *VisitParenType(const ParenType *T) {
+ return Visit(T->getInnerType());
+ }
+ AutoType *VisitAttributedType(const AttributedType *T) {
+ return Visit(T->getModifiedType());
+ }
+ };
+}
+
+AutoType *Type::getContainedAutoType() const {
+ return GetContainedAutoVisitor().Visit(this);
+}
+
+bool Type::hasIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isIntegerType();
+ else
+ return isIntegerType();
+}
+
+/// \brief Determine whether this type is an integral type.
+///
+/// This routine determines whether the given type is an integral type per
+/// C++ [basic.fundamental]p7. Although the C standard does not define the
+/// term "integral type", it has a similar term "integer type", and in C++
+/// the two terms are equivalent. However, C's "integer type" includes
+/// enumeration types, while C++'s "integer type" does not. The \c ASTContext
+/// parameter is used to determine whether we should be following the C or
+/// C++ rules when determining whether this type is an integral/integer type.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// type", use this routine.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// or enumeration type", use \c isIntegralOrEnumerationType() instead.
+///
+/// \param Ctx The context in which this type occurs.
+///
+/// \returns true if the type is considered an integral type, false otherwise.
+bool Type::isIntegralType(ASTContext &Ctx) const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ if (!Ctx.getLangOpts().CPlusPlus)
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete(); // Complete enum types are integral in C.
+
+ return false;
+}
+
+
+bool Type::isIntegralOrUnscopedEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ // Check for a complete enum type; incomplete enum types are not properly an
+ // enumeration type in the sense required here.
+ // C++0x: However, if the underlying type of the enum is fixed, it is
+ // considered complete.
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+
+ return false;
+}
+
+
+
+bool Type::isCharType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char_U ||
+ BT->getKind() == BuiltinType::UChar ||
+ BT->getKind() == BuiltinType::Char_S ||
+ BT->getKind() == BuiltinType::SChar;
+ return false;
+}
+
+bool Type::isWideCharType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::WChar_S ||
+ BT->getKind() == BuiltinType::WChar_U;
+ return false;
+}
+
+bool Type::isChar16Type() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char16;
+ return false;
+}
+
+bool Type::isChar32Type() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() == BuiltinType::Char32;
+ return false;
+}
+
+/// \brief Determine whether this type is any of the built-in character
+/// types.
+bool Type::isAnyCharacterType() const {
+ const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType);
+ if (BT == 0) return false;
+ switch (BT->getKind()) {
+ default: return false;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::WChar_S:
+ return true;
+ }
+}
+
+/// isSignedIntegerType - Return true if this is an integer type that is
+/// signed, according to C99 6.2.5p4 [char, signed char, short, int, long..],
+/// an enum decl which has a signed representation
+bool Type::isSignedIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Char_S &&
+ BT->getKind() <= BuiltinType::Int128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
+ return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::isSignedIntegerOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Char_S &&
+ BT->getKind() <= BuiltinType::Int128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (ET->getDecl()->isComplete())
+ return ET->getDecl()->getIntegerType()->isSignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::hasSignedIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isSignedIntegerType();
+ else
+ return isSignedIntegerType();
+}
+
+/// isUnsignedIntegerType - Return true if this is an integer type that is
+/// unsigned, according to C99 6.2.5p6 [which returns true for _Bool], an enum
+/// decl which has an unsigned representation
+bool Type::isUnsignedIntegerType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::UInt128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ // Incomplete enum types are not treated as integer types.
+ // FIXME: In C++, enum types are never integer types.
+ if (ET->getDecl()->isComplete() && !ET->getDecl()->isScoped())
+ return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::isUnsignedIntegerOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType)) {
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::UInt128;
+ }
+
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType)) {
+ if (ET->getDecl()->isComplete())
+ return ET->getDecl()->getIntegerType()->isUnsignedIntegerType();
+ }
+
+ return false;
+}
+
+bool Type::hasUnsignedIntegerRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isUnsignedIntegerType();
+ else
+ return isUnsignedIntegerType();
+}
+
+bool Type::isFloatingType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Half &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
+ return CT->getElementType()->isFloatingType();
+ return false;
+}
+
+bool Type::hasFloatingRepresentation() const {
+ if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
+ return VT->getElementType()->isFloatingType();
+ else
+ return isFloatingType();
+}
+
+bool Type::isRealFloatingType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->isFloatingPoint();
+ return false;
+}
+
+bool Type::isRealType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ return ET->getDecl()->isComplete() && !ET->getDecl()->isScoped();
+ return false;
+}
+
+bool Type::isArithmeticType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::LongDouble;
+ if (const EnumType *ET = dyn_cast<EnumType>(CanonicalType))
+ // GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
+ // If a body isn't seen by the time we get here, return false.
+ //
+ // C++0x: Enumerations are not arithmetic types. For now, just return
+ // false for scoped enumerations since that will disable any
+ // unwanted implicit conversions.
+ return !ET->getDecl()->isScoped() && ET->getDecl()->isComplete();
+ return isa<ComplexType>(CanonicalType);
+}
+
+Type::ScalarTypeKind Type::getScalarTypeKind() const {
+ assert(isScalarType());
+
+ const Type *T = CanonicalType.getTypePtr();
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T)) {
+ if (BT->getKind() == BuiltinType::Bool) return STK_Bool;
+ if (BT->getKind() == BuiltinType::NullPtr) return STK_CPointer;
+ if (BT->isInteger()) return STK_Integral;
+ if (BT->isFloatingPoint()) return STK_Floating;
+ llvm_unreachable("unknown scalar builtin type");
+ } else if (isa<PointerType>(T)) {
+ return STK_CPointer;
+ } else if (isa<BlockPointerType>(T)) {
+ return STK_BlockPointer;
+ } else if (isa<ObjCObjectPointerType>(T)) {
+ return STK_ObjCObjectPointer;
+ } else if (isa<MemberPointerType>(T)) {
+ return STK_MemberPointer;
+ } else if (isa<EnumType>(T)) {
+ assert(cast<EnumType>(T)->getDecl()->isComplete());
+ return STK_Integral;
+ } else if (const ComplexType *CT = dyn_cast<ComplexType>(T)) {
+ if (CT->getElementType()->isRealFloatingType())
+ return STK_FloatingComplex;
+ return STK_IntegralComplex;
+ }
+
+ llvm_unreachable("unknown scalar type");
+}
+
+/// \brief Determines whether the type is a C++ aggregate type or C
+/// aggregate or union type.
+///
+/// An aggregate type is an array or a class type (struct, union, or
+/// class) that has no user-declared constructors, no private or
+/// protected non-static data members, no base classes, and no virtual
+/// functions (C++ [dcl.init.aggr]p1). The notion of an aggregate type
+/// subsumes the notion of C aggregates (C99 6.2.5p21) because it also
+/// includes union types.
+bool Type::isAggregateType() const {
+ if (const RecordType *Record = dyn_cast<RecordType>(CanonicalType)) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(Record->getDecl()))
+ return ClassDecl->isAggregate();
+
+ return true;
+ }
+
+ return isa<ArrayType>(CanonicalType);
+}
+
+/// isConstantSizeType - Return true if this is not a variable sized type,
+/// according to the rules of C99 6.7.5p3. It is not legal to call this on
+/// incomplete types or dependent types.
+bool Type::isConstantSizeType() const {
+ assert(!isIncompleteType() && "This doesn't make sense for incomplete types");
+ assert(!isDependentType() && "This doesn't make sense for dependent types");
+ // The VAT must have a size, as it is known to be complete.
+ return !isa<VariableArrayType>(CanonicalType);
+}
+
+/// isIncompleteType - Return true if this is an incomplete type (C99 6.2.5p1)
+/// - a type that can describe objects, but which lacks information needed to
+/// determine its size.
+bool Type::isIncompleteType(NamedDecl **Def) const {
+ if (Def)
+ *Def = 0;
+
+ switch (CanonicalType->getTypeClass()) {
+ default: return false;
+ case Builtin:
+ // Void is the only incomplete builtin type. Per C99 6.2.5p19, it can never
+ // be completed.
+ return isVoidType();
+ case Enum: {
+ EnumDecl *EnumD = cast<EnumType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = EnumD;
+
+ // An enumeration with fixed underlying type is complete (C++0x 7.2p3).
+ if (EnumD->isFixed())
+ return false;
+
+ return !EnumD->isCompleteDefinition();
+ }
+ case Record: {
+ // A tagged type (struct/union/enum/class) is incomplete if the decl is a
+ // forward declaration, but not a full definition (C99 6.2.5p22).
+ RecordDecl *Rec = cast<RecordType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = Rec;
+ return !Rec->isCompleteDefinition();
+ }
+ case ConstantArray:
+ // An array is incomplete if its element type is incomplete
+ // (C++ [dcl.array]p1).
+ // We don't handle variable arrays (they're not allowed in C++) or
+ // dependent-sized arrays (dependent types are never treated as incomplete).
+ return cast<ArrayType>(CanonicalType)->getElementType()
+ ->isIncompleteType(Def);
+ case IncompleteArray:
+ // An array of unknown size is an incomplete type (C99 6.2.5p22).
+ return true;
+ case ObjCObject:
+ return cast<ObjCObjectType>(CanonicalType)->getBaseType()
+ ->isIncompleteType(Def);
+ case ObjCInterface: {
+ // ObjC interfaces are incomplete if they are @class, not @interface.
+ ObjCInterfaceDecl *Interface
+ = cast<ObjCInterfaceType>(CanonicalType)->getDecl();
+ if (Def)
+ *Def = Interface;
+ return !Interface->hasDefinition();
+ }
+ }
+}
+
+bool QualType::isPODType(ASTContext &Context) const {
+ // The compiler shouldn't query this for incomplete types, but the user might.
+ // We return false for that case. Except for incomplete arrays of PODs, which
+ // are PODs according to the standard.
+ if (isNull())
+ return 0;
+
+ if ((*this)->isIncompleteArrayType())
+ return Context.getBaseElementType(*this).isPODType(Context);
+
+ if ((*this)->isIncompleteType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ break;
+ }
+ }
+
+ QualType CanonicalType = getTypePtr()->CanonicalType;
+ switch (CanonicalType->getTypeClass()) {
+ // Everything not explicitly mentioned is not POD.
+ default: return false;
+ case Type::VariableArray:
+ case Type::ConstantArray:
+ // IncompleteArray is handled above.
+ return Context.getBaseElementType(*this).isPODType(Context);
+
+ case Type::ObjCObjectPointer:
+ case Type::BlockPointer:
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Pointer:
+ case Type::MemberPointer:
+ case Type::Vector:
+ case Type::ExtVector:
+ return true;
+
+ case Type::Enum:
+ return true;
+
+ case Type::Record:
+ if (CXXRecordDecl *ClassDecl
+ = dyn_cast<CXXRecordDecl>(cast<RecordType>(CanonicalType)->getDecl()))
+ return ClassDecl->isPOD();
+
+ // C struct/union is POD.
+ return true;
+ }
+}
+
+bool QualType::isTrivialType(ASTContext &Context) const {
+ // The compiler shouldn't query this for incomplete types, but the user might.
+ // We return false for that case. Except for incomplete arrays of PODs, which
+ // are PODs according to the standard.
+ if (isNull())
+ return 0;
+
+ if ((*this)->isArrayType())
+ return Context.getBaseElementType(*this).isTrivialType(Context);
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if ((*this)->isIncompleteType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if ((*this)->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ QualType CanonicalType = getTypePtr()->CanonicalType;
+ if (CanonicalType->isDependentType())
+ return false;
+
+ // C++0x [basic.types]p9:
+ // Scalar types, trivial class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called trivial
+ // types.
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
+ return true;
+ if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++0x [class]p5:
+ // A trivial class is a class that has a trivial default constructor
+ if (!ClassDecl->hasTrivialDefaultConstructor()) return false;
+ // and is trivially copyable.
+ if (!ClassDecl->isTriviallyCopyable()) return false;
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+bool QualType::isTriviallyCopyableType(ASTContext &Context) const {
+ if ((*this)->isArrayType())
+ return Context.getBaseElementType(*this).isTrivialType(Context);
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if ((*this)->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ // C++0x [basic.types]p9
+ // Scalar types, trivially copyable class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called trivial
+ // types.
+
+ QualType CanonicalType = getCanonicalType();
+ if (CanonicalType->isDependentType())
+ return false;
+
+ // Return false for incomplete types after skipping any incomplete array types
+ // which are expressly allowed by the standard and thus our API.
+ if (CanonicalType->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (CanonicalType->isScalarType() || CanonicalType->isVectorType())
+ return true;
+
+ if (const RecordType *RT = CanonicalType->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ if (!ClassDecl->isTriviallyCopyable()) return false;
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+
+
+bool Type::isLiteralType() const {
+ if (isDependentType())
+ return false;
+
+ // C++0x [basic.types]p10:
+ // A type is a literal type if it is:
+ // [...]
+ // -- an array of literal type.
+ // Extension: variable arrays cannot be literal types, since they're
+ // runtime-sized.
+ if (isVariableArrayType())
+ return false;
+ const Type *BaseTy = getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types; those are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // C++0x [basic.types]p10:
+ // A type is a literal type if it is:
+ // -- a scalar type; or
+ // As an extension, Clang treats vector types and complex types as
+ // literal types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType() ||
+ BaseTy->isAnyComplexType())
+ return true;
+ // -- a reference type; or
+ if (BaseTy->isReferenceType())
+ return true;
+ // -- a class type that has all of the following properties:
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ // -- a trivial destructor,
+ // -- every constructor call and full-expression in the
+ // brace-or-equal-initializers for non-static data members (if any)
+ // is a constant expression,
+ // -- it is an aggregate type or has at least one constexpr
+ // constructor or constructor template that is not a copy or move
+ // constructor, and
+ // -- all non-static data members and base classes of literal types
+ //
+ // We resolve DR1361 by ignoring the second bullet.
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ return ClassDecl->isLiteral();
+
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isStandardLayoutType() const {
+ if (isDependentType())
+ return false;
+
+ // C++0x [basic.types]p9:
+ // Scalar types, standard-layout class types, arrays of such types, and
+ // cv-qualified versions of these types are collectively called
+ // standard-layout types.
+ const Type *BaseTy = getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (!ClassDecl->isStandardLayout())
+ return false;
+
+ // Default to 'true' for non-C++ class types.
+ // FIXME: This is a bit dubious, but plain C structs should trivially meet
+ // all the requirements of standard layout classes.
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+// This is effectively the intersection of isTrivialType and
+// isStandardLayoutType. We implement it directly to avoid redundant
+// conversions from a type to a CXXRecordDecl.
+bool QualType::isCXX11PODType(ASTContext &Context) const {
+ const Type *ty = getTypePtr();
+ if (ty->isDependentType())
+ return false;
+
+ if (Context.getLangOpts().ObjCAutoRefCount) {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_None:
+ if (ty->isObjCLifetimeType())
+ return false;
+ break;
+ }
+ }
+
+ // C++11 [basic.types]p9:
+ // Scalar types, POD classes, arrays of such types, and cv-qualified
+ // versions of these types are collectively called trivial types.
+ const Type *BaseTy = ty->getBaseElementTypeUnsafe();
+ assert(BaseTy && "NULL element type");
+
+ // Return false for incomplete types after skipping any incomplete array
+ // types which are expressly allowed by the standard and thus our API.
+ if (BaseTy->isIncompleteType())
+ return false;
+
+ // As an extension, Clang treats vector types as Scalar types.
+ if (BaseTy->isScalarType() || BaseTy->isVectorType()) return true;
+ if (const RecordType *RT = BaseTy->getAs<RecordType>()) {
+ if (const CXXRecordDecl *ClassDecl =
+ dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class [...]
+ if (!ClassDecl->isTrivial()) return false;
+
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class and
+ // a standard-layout class [...]
+ if (!ClassDecl->isStandardLayout()) return false;
+
+ // C++11 [class]p10:
+ // A POD struct is a non-union class that is both a trivial class and
+ // a standard-layout class, and has no non-static data members of type
+ // non-POD struct, non-POD union (or array of such types). [...]
+ //
+ // We don't directly query the recursive aspect as the requiremets for
+ // both standard-layout classes and trivial classes apply recursively
+ // already.
+ }
+
+ return true;
+ }
+
+ // No other types can match.
+ return false;
+}
+
+bool Type::isPromotableIntegerType() const {
+ if (const BuiltinType *BT = getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ return true;
+ default:
+ return false;
+ }
+
+ // Enumerated types are promotable to their compatible integer types
+ // (C99 6.3.1.1) a.k.a. its underlying type (C++ [conv.prom]p2).
+ if (const EnumType *ET = getAs<EnumType>()){
+ if (this->isDependentType() || ET->getDecl()->getPromotionType().isNull()
+ || ET->getDecl()->isScoped())
+ return false;
+
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isSpecifierType() const {
+ // Note that this intentionally does not use the canonical type.
+ switch (getTypeClass()) {
+ case Builtin:
+ case Record:
+ case Enum:
+ case Typedef:
+ case Complex:
+ case TypeOfExpr:
+ case TypeOf:
+ case TemplateTypeParm:
+ case SubstTemplateTypeParm:
+ case TemplateSpecialization:
+ case Elaborated:
+ case DependentName:
+ case DependentTemplateSpecialization:
+ case ObjCInterface:
+ case ObjCObject:
+ case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers
+ return true;
+ default:
+ return false;
+ }
+}
+
+ElaboratedTypeKeyword
+TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
+ switch (TypeSpec) {
+ default: return ETK_None;
+ case TST_typename: return ETK_Typename;
+ case TST_class: return ETK_Class;
+ case TST_struct: return ETK_Struct;
+ case TST_union: return ETK_Union;
+ case TST_enum: return ETK_Enum;
+ }
+}
+
+TagTypeKind
+TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
+ switch(TypeSpec) {
+ case TST_class: return TTK_Class;
+ case TST_struct: return TTK_Struct;
+ case TST_union: return TTK_Union;
+ case TST_enum: return TTK_Enum;
+ }
+
+ llvm_unreachable("Type specifier is not a tag type kind.");
+}
+
+ElaboratedTypeKeyword
+TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
+ switch (Kind) {
+ case TTK_Class: return ETK_Class;
+ case TTK_Struct: return ETK_Struct;
+ case TTK_Union: return ETK_Union;
+ case TTK_Enum: return ETK_Enum;
+ }
+ llvm_unreachable("Unknown tag type kind.");
+}
+
+TagTypeKind
+TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_Class: return TTK_Class;
+ case ETK_Struct: return TTK_Struct;
+ case ETK_Union: return TTK_Union;
+ case ETK_Enum: return TTK_Enum;
+ case ETK_None: // Fall through.
+ case ETK_Typename:
+ llvm_unreachable("Elaborated type keyword is not a tag type kind.");
+ }
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+bool
+TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_None:
+ case ETK_Typename:
+ return false;
+ case ETK_Class:
+ case ETK_Struct:
+ case ETK_Union:
+ case ETK_Enum:
+ return true;
+ }
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+const char*
+TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_None: return "";
+ case ETK_Typename: return "typename";
+ case ETK_Class: return "class";
+ case ETK_Struct: return "struct";
+ case ETK_Union: return "union";
+ case ETK_Enum: return "enum";
+ }
+
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+DependentTemplateSpecializationType::DependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS, const IdentifierInfo *Name,
+ unsigned NumArgs, const TemplateArgument *Args,
+ QualType Canon)
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true, true,
+ /*VariablyModified=*/false,
+ NNS && NNS->containsUnexpandedParameterPack()),
+ NNS(NNS), Name(Name), NumArgs(NumArgs) {
+ assert((!NNS || NNS->isDependent()) &&
+ "DependentTemplateSpecializatonType requires dependent qualifier");
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I].containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ new (&getArgBuffer()[I]) TemplateArgument(Args[I]);
+ }
+}
+
+void
+DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) {
+ ID.AddInteger(Keyword);
+ ID.AddPointer(Qualifier);
+ ID.AddPointer(Name);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID, Context);
+}
+
+bool Type::isElaboratedTypeSpecifier() const {
+ ElaboratedTypeKeyword Keyword;
+ if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this))
+ Keyword = Elab->getKeyword();
+ else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this))
+ Keyword = DepName->getKeyword();
+ else if (const DependentTemplateSpecializationType *DepTST =
+ dyn_cast<DependentTemplateSpecializationType>(this))
+ Keyword = DepTST->getKeyword();
+ else
+ return false;
+
+ return TypeWithKeyword::KeywordIsTagTypeKind(Keyword);
+}
+
+const char *Type::getTypeClassName() const {
+ switch (TypeBits.TC) {
+#define ABSTRACT_TYPE(Derived, Base)
+#define TYPE(Derived, Base) case Derived: return #Derived;
+#include "clang/AST/TypeNodes.def"
+ }
+
+ llvm_unreachable("Invalid type class.");
+}
+
+const char *BuiltinType::getName(const PrintingPolicy &Policy) const {
+ switch (getKind()) {
+ case Void: return "void";
+ case Bool: return Policy.Bool ? "bool" : "_Bool";
+ case Char_S: return "char";
+ case Char_U: return "char";
+ case SChar: return "signed char";
+ case Short: return "short";
+ case Int: return "int";
+ case Long: return "long";
+ case LongLong: return "long long";
+ case Int128: return "__int128";
+ case UChar: return "unsigned char";
+ case UShort: return "unsigned short";
+ case UInt: return "unsigned int";
+ case ULong: return "unsigned long";
+ case ULongLong: return "unsigned long long";
+ case UInt128: return "unsigned __int128";
+ case Half: return "half";
+ case Float: return "float";
+ case Double: return "double";
+ case LongDouble: return "long double";
+ case WChar_S:
+ case WChar_U: return "wchar_t";
+ case Char16: return "char16_t";
+ case Char32: return "char32_t";
+ case NullPtr: return "nullptr_t";
+ case Overload: return "<overloaded function type>";
+ case BoundMember: return "<bound member function type>";
+ case PseudoObject: return "<pseudo-object type>";
+ case Dependent: return "<dependent type>";
+ case UnknownAny: return "<unknown type>";
+ case ARCUnbridgedCast: return "<ARC unbridged cast type>";
+ case ObjCId: return "id";
+ case ObjCClass: return "Class";
+ case ObjCSel: return "SEL";
+ }
+
+ llvm_unreachable("Invalid builtin type.");
+}
+
+QualType QualType::getNonLValueExprType(ASTContext &Context) const {
+ if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>())
+ return RefType->getPointeeType();
+
+ // C++0x [basic.lval]:
+ // Class prvalues can have cv-qualified types; non-class prvalues always
+ // have cv-unqualified types.
+ //
+ // See also C99 6.3.2.1p2.
+ if (!Context.getLangOpts().CPlusPlus ||
+ (!getTypePtr()->isDependentType() && !getTypePtr()->isRecordType()))
+ return getUnqualifiedType();
+
+ return *this;
+}
+
+StringRef FunctionType::getNameForCallConv(CallingConv CC) {
+ switch (CC) {
+ case CC_Default:
+ llvm_unreachable("no name for default cc");
+
+ case CC_C: return "cdecl";
+ case CC_X86StdCall: return "stdcall";
+ case CC_X86FastCall: return "fastcall";
+ case CC_X86ThisCall: return "thiscall";
+ case CC_X86Pascal: return "pascal";
+ case CC_AAPCS: return "aapcs";
+ case CC_AAPCS_VFP: return "aapcs-vfp";
+ }
+
+ llvm_unreachable("Invalid calling convention.");
+}
+
+FunctionProtoType::FunctionProtoType(QualType result, const QualType *args,
+ unsigned numArgs, QualType canonical,
+ const ExtProtoInfo &epi)
+ : FunctionType(FunctionProto, result, epi.TypeQuals, epi.RefQualifier,
+ canonical,
+ result->isDependentType(),
+ result->isInstantiationDependentType(),
+ result->isVariablyModifiedType(),
+ result->containsUnexpandedParameterPack(),
+ epi.ExtInfo),
+ NumArgs(numArgs), NumExceptions(epi.NumExceptions),
+ ExceptionSpecType(epi.ExceptionSpecType),
+ HasAnyConsumedArgs(epi.ConsumedArguments != 0),
+ Variadic(epi.Variadic), HasTrailingReturn(epi.HasTrailingReturn)
+{
+ // Fill in the trailing argument array.
+ QualType *argSlot = reinterpret_cast<QualType*>(this+1);
+ for (unsigned i = 0; i != numArgs; ++i) {
+ if (args[i]->isDependentType())
+ setDependent();
+ else if (args[i]->isInstantiationDependentType())
+ setInstantiationDependent();
+
+ if (args[i]->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ argSlot[i] = args[i];
+ }
+
+ if (getExceptionSpecType() == EST_Dynamic) {
+ // Fill in the exception array.
+ QualType *exnSlot = argSlot + numArgs;
+ for (unsigned i = 0, e = epi.NumExceptions; i != e; ++i) {
+ if (epi.Exceptions[i]->isDependentType())
+ setDependent();
+ else if (epi.Exceptions[i]->isInstantiationDependentType())
+ setInstantiationDependent();
+
+ if (epi.Exceptions[i]->containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ exnSlot[i] = epi.Exceptions[i];
+ }
+ } else if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ // Store the noexcept expression and context.
+ Expr **noexSlot = reinterpret_cast<Expr**>(argSlot + numArgs);
+ *noexSlot = epi.NoexceptExpr;
+
+ if (epi.NoexceptExpr) {
+ if (epi.NoexceptExpr->isValueDependent()
+ || epi.NoexceptExpr->isTypeDependent())
+ setDependent();
+ else if (epi.NoexceptExpr->isInstantiationDependent())
+ setInstantiationDependent();
+ }
+ }
+
+ if (epi.ConsumedArguments) {
+ bool *consumedArgs = const_cast<bool*>(getConsumedArgsBuffer());
+ for (unsigned i = 0; i != numArgs; ++i)
+ consumedArgs[i] = epi.ConsumedArguments[i];
+ }
+}
+
+FunctionProtoType::NoexceptResult
+FunctionProtoType::getNoexceptSpec(ASTContext &ctx) const {
+ ExceptionSpecificationType est = getExceptionSpecType();
+ if (est == EST_BasicNoexcept)
+ return NR_Nothrow;
+
+ if (est != EST_ComputedNoexcept)
+ return NR_NoNoexcept;
+
+ Expr *noexceptExpr = getNoexceptExpr();
+ if (!noexceptExpr)
+ return NR_BadNoexcept;
+ if (noexceptExpr->isValueDependent())
+ return NR_Dependent;
+
+ llvm::APSInt value;
+ bool isICE = noexceptExpr->isIntegerConstantExpr(value, ctx, 0,
+ /*evaluated*/false);
+ (void)isICE;
+ assert(isICE && "AST should not contain bad noexcept expressions.");
+
+ return value.getBoolValue() ? NR_Nothrow : NR_Throw;
+}
+
+bool FunctionProtoType::isTemplateVariadic() const {
+ for (unsigned ArgIdx = getNumArgs(); ArgIdx; --ArgIdx)
+ if (isa<PackExpansionType>(getArgType(ArgIdx - 1)))
+ return true;
+
+ return false;
+}
+
+void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
+ const QualType *ArgTys, unsigned NumArgs,
+ const ExtProtoInfo &epi,
+ const ASTContext &Context) {
+
+ // We have to be careful not to get ambiguous profile encodings.
+ // Note that valid type pointers are never ambiguous with anything else.
+ //
+ // The encoding grammar begins:
+ // type type* bool int bool
+ // If that final bool is true, then there is a section for the EH spec:
+ // bool type*
+ // This is followed by an optional "consumed argument" section of the
+ // same length as the first type sequence:
+ // bool*
+ // Finally, we have the ext info and trailing return type flag:
+ // int bool
+ //
+ // There is no ambiguity between the consumed arguments and an empty EH
+ // spec because of the leading 'bool' which unambiguously indicates
+ // whether the following bool is the EH spec or part of the arguments.
+
+ ID.AddPointer(Result.getAsOpaquePtr());
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddPointer(ArgTys[i].getAsOpaquePtr());
+ // This method is relatively performance sensitive, so as a performance
+ // shortcut, use one AddInteger call instead of four for the next four
+ // fields.
+ assert(!(unsigned(epi.Variadic) & ~1) &&
+ !(unsigned(epi.TypeQuals) & ~255) &&
+ !(unsigned(epi.RefQualifier) & ~3) &&
+ !(unsigned(epi.ExceptionSpecType) & ~7) &&
+ "Values larger than expected.");
+ ID.AddInteger(unsigned(epi.Variadic) +
+ (epi.TypeQuals << 1) +
+ (epi.RefQualifier << 9) +
+ (epi.ExceptionSpecType << 11));
+ if (epi.ExceptionSpecType == EST_Dynamic) {
+ for (unsigned i = 0; i != epi.NumExceptions; ++i)
+ ID.AddPointer(epi.Exceptions[i].getAsOpaquePtr());
+ } else if (epi.ExceptionSpecType == EST_ComputedNoexcept && epi.NoexceptExpr){
+ epi.NoexceptExpr->Profile(ID, Context, false);
+ }
+ if (epi.ConsumedArguments) {
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddBoolean(epi.ConsumedArguments[i]);
+ }
+ epi.ExtInfo.Profile(ID);
+ ID.AddBoolean(epi.HasTrailingReturn);
+}
+
+void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Ctx) {
+ Profile(ID, getResultType(), arg_type_begin(), NumArgs, getExtProtoInfo(),
+ Ctx);
+}
+
+QualType TypedefType::desugar() const {
+ return getDecl()->getUnderlyingType();
+}
+
+TypeOfExprType::TypeOfExprType(Expr *E, QualType can)
+ : Type(TypeOfExpr, can, E->isTypeDependent(),
+ E->isInstantiationDependent(),
+ E->getType()->isVariablyModifiedType(),
+ E->containsUnexpandedParameterPack()),
+ TOExpr(E) {
+}
+
+bool TypeOfExprType::isSugared() const {
+ return !TOExpr->isTypeDependent();
+}
+
+QualType TypeOfExprType::desugar() const {
+ if (isSugared())
+ return getUnderlyingExpr()->getType();
+
+ return QualType(this, 0);
+}
+
+void DependentTypeOfExprType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, Expr *E) {
+ E->Profile(ID, Context, true);
+}
+
+DecltypeType::DecltypeType(Expr *E, QualType underlyingType, QualType can)
+ // C++11 [temp.type]p2: "If an expression e involves a template parameter,
+ // decltype(e) denotes a unique dependent type." Hence a decltype type is
+ // type-dependent even if its expression is only instantiation-dependent.
+ : Type(Decltype, can, E->isInstantiationDependent(),
+ E->isInstantiationDependent(),
+ E->getType()->isVariablyModifiedType(),
+ E->containsUnexpandedParameterPack()),
+ E(E),
+ UnderlyingType(underlyingType) {
+}
+
+bool DecltypeType::isSugared() const { return !E->isInstantiationDependent(); }
+
+QualType DecltypeType::desugar() const {
+ if (isSugared())
+ return getUnderlyingType();
+
+ return QualType(this, 0);
+}
+
+DependentDecltypeType::DependentDecltypeType(const ASTContext &Context, Expr *E)
+ : DecltypeType(E, Context.DependentTy), Context(Context) { }
+
+void DependentDecltypeType::Profile(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, Expr *E) {
+ E->Profile(ID, Context, true);
+}
+
+TagType::TagType(TypeClass TC, const TagDecl *D, QualType can)
+ : Type(TC, can, D->isDependentType(),
+ /*InstantiationDependent=*/D->isDependentType(),
+ /*VariablyModified=*/false,
+ /*ContainsUnexpandedParameterPack=*/false),
+ decl(const_cast<TagDecl*>(D)) {}
+
+static TagDecl *getInterestingTagDecl(TagDecl *decl) {
+ for (TagDecl::redecl_iterator I = decl->redecls_begin(),
+ E = decl->redecls_end();
+ I != E; ++I) {
+ if (I->isCompleteDefinition() || I->isBeingDefined())
+ return *I;
+ }
+ // If there's no definition (not even in progress), return what we have.
+ return decl;
+}
+
+UnaryTransformType::UnaryTransformType(QualType BaseType,
+ QualType UnderlyingType,
+ UTTKind UKind,
+ QualType CanonicalType)
+ : Type(UnaryTransform, CanonicalType, UnderlyingType->isDependentType(),
+ UnderlyingType->isInstantiationDependentType(),
+ UnderlyingType->isVariablyModifiedType(),
+ BaseType->containsUnexpandedParameterPack())
+ , BaseType(BaseType), UnderlyingType(UnderlyingType), UKind(UKind)
+{}
+
+TagDecl *TagType::getDecl() const {
+ return getInterestingTagDecl(decl);
+}
+
+bool TagType::isBeingDefined() const {
+ return getDecl()->isBeingDefined();
+}
+
+CXXRecordDecl *InjectedClassNameType::getDecl() const {
+ return cast<CXXRecordDecl>(getInterestingTagDecl(Decl));
+}
+
+IdentifierInfo *TemplateTypeParmType::getIdentifier() const {
+ return isCanonicalUnqualified() ? 0 : getDecl()->getIdentifier();
+}
+
+SubstTemplateTypeParmPackType::
+SubstTemplateTypeParmPackType(const TemplateTypeParmType *Param,
+ QualType Canon,
+ const TemplateArgument &ArgPack)
+ : Type(SubstTemplateTypeParmPack, Canon, true, true, false, true),
+ Replaced(Param),
+ Arguments(ArgPack.pack_begin()), NumArguments(ArgPack.pack_size())
+{
+}
+
+TemplateArgument SubstTemplateTypeParmPackType::getArgumentPack() const {
+ return TemplateArgument(Arguments, NumArguments);
+}
+
+void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getReplacedParameter(), getArgumentPack());
+}
+
+void SubstTemplateTypeParmPackType::Profile(llvm::FoldingSetNodeID &ID,
+ const TemplateTypeParmType *Replaced,
+ const TemplateArgument &ArgPack) {
+ ID.AddPointer(Replaced);
+ ID.AddInteger(ArgPack.pack_size());
+ for (TemplateArgument::pack_iterator P = ArgPack.pack_begin(),
+ PEnd = ArgPack.pack_end();
+ P != PEnd; ++P)
+ ID.AddPointer(P->getAsType().getAsOpaquePtr());
+}
+
+bool TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgumentListInfo &Args,
+ bool &InstantiationDependent) {
+ return anyDependentTemplateArguments(Args.getArgumentArray(), Args.size(),
+ InstantiationDependent);
+}
+
+bool TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgumentLoc *Args, unsigned N,
+ bool &InstantiationDependent) {
+ for (unsigned i = 0; i != N; ++i) {
+ if (Args[i].getArgument().isDependent()) {
+ InstantiationDependent = true;
+ return true;
+ }
+
+ if (Args[i].getArgument().isInstantiationDependent())
+ InstantiationDependent = true;
+ }
+ return false;
+}
+
+bool TemplateSpecializationType::
+anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N,
+ bool &InstantiationDependent) {
+ for (unsigned i = 0; i != N; ++i) {
+ if (Args[i].isDependent()) {
+ InstantiationDependent = true;
+ return true;
+ }
+
+ if (Args[i].isInstantiationDependent())
+ InstantiationDependent = true;
+ }
+ return false;
+}
+
+TemplateSpecializationType::
+TemplateSpecializationType(TemplateName T,
+ const TemplateArgument *Args, unsigned NumArgs,
+ QualType Canon, QualType AliasedType)
+ : Type(TemplateSpecialization,
+ Canon.isNull()? QualType(this, 0) : Canon,
+ Canon.isNull()? T.isDependent() : Canon->isDependentType(),
+ Canon.isNull()? T.isDependent()
+ : Canon->isInstantiationDependentType(),
+ false,
+ Canon.isNull()? T.containsUnexpandedParameterPack()
+ : Canon->containsUnexpandedParameterPack()),
+ Template(T), NumArgs(NumArgs), TypeAlias(!AliasedType.isNull()) {
+ assert(!T.getAsDependentTemplateName() &&
+ "Use DependentTemplateSpecializationType for dependent template-name");
+ assert((T.getKind() == TemplateName::Template ||
+ T.getKind() == TemplateName::SubstTemplateTemplateParm ||
+ T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
+ "Unexpected template name for TemplateSpecializationType");
+ bool InstantiationDependent;
+ (void)InstantiationDependent;
+ assert((!Canon.isNull() ||
+ T.isDependent() ||
+ anyDependentTemplateArguments(Args, NumArgs,
+ InstantiationDependent)) &&
+ "No canonical type for non-dependent class template specialization");
+
+ TemplateArgument *TemplateArgs
+ = reinterpret_cast<TemplateArgument *>(this + 1);
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ // Update dependent and variably-modified bits.
+ // If the canonical type exists and is non-dependent, the template
+ // specialization type can be non-dependent even if one of the type
+ // arguments is. Given:
+ // template<typename T> using U = int;
+ // U<T> is always non-dependent, irrespective of the type T.
+ if (Canon.isNull() && Args[Arg].isDependent())
+ setDependent();
+ else if (Args[Arg].isInstantiationDependent())
+ setInstantiationDependent();
+
+ if (Args[Arg].getKind() == TemplateArgument::Type &&
+ Args[Arg].getAsType()->isVariablyModifiedType())
+ setVariablyModified();
+ if (Canon.isNull() && Args[Arg].containsUnexpandedParameterPack())
+ setContainsUnexpandedParameterPack();
+
+ new (&TemplateArgs[Arg]) TemplateArgument(Args[Arg]);
+ }
+
+ // Store the aliased type if this is a type alias template specialization.
+ if (TypeAlias) {
+ TemplateArgument *Begin = reinterpret_cast<TemplateArgument *>(this + 1);
+ *reinterpret_cast<QualType*>(Begin + getNumArgs()) = AliasedType;
+ }
+}
+
+void
+TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateName T,
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const ASTContext &Context) {
+ T.Profile(ID);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID, Context);
+}
+
+QualType
+QualifierCollector::apply(const ASTContext &Context, QualType QT) const {
+ if (!hasNonFastQualifiers())
+ return QT.withFastQualifiers(getFastQualifiers());
+
+ return Context.getQualifiedType(QT, *this);
+}
+
+QualType
+QualifierCollector::apply(const ASTContext &Context, const Type *T) const {
+ if (!hasNonFastQualifiers())
+ return QualType(T, getFastQualifiers());
+
+ return Context.getQualifiedType(T, *this);
+}
+
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID,
+ QualType BaseType,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) {
+ ID.AddPointer(BaseType.getAsOpaquePtr());
+ for (unsigned i = 0; i != NumProtocols; i++)
+ ID.AddPointer(Protocols[i]);
+}
+
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getBaseType(), qual_begin(), getNumProtocols());
+}
+
+namespace {
+
+/// \brief The cached properties of a type.
+class CachedProperties {
+ NamedDecl::LinkageInfo LV;
+ bool local;
+
+public:
+ CachedProperties(NamedDecl::LinkageInfo LV, bool local)
+ : LV(LV), local(local) {}
+
+ Linkage getLinkage() const { return LV.linkage(); }
+ Visibility getVisibility() const { return LV.visibility(); }
+ bool isVisibilityExplicit() const { return LV.visibilityExplicit(); }
+ bool hasLocalOrUnnamedType() const { return local; }
+
+ friend CachedProperties merge(CachedProperties L, CachedProperties R) {
+ NamedDecl::LinkageInfo MergedLV = L.LV;
+ MergedLV.merge(R.LV);
+ return CachedProperties(MergedLV,
+ L.hasLocalOrUnnamedType() | R.hasLocalOrUnnamedType());
+ }
+};
+}
+
+static CachedProperties computeCachedProperties(const Type *T);
+
+namespace clang {
+/// The type-property cache. This is templated so as to be
+/// instantiated at an internal type to prevent unnecessary symbol
+/// leakage.
+template <class Private> class TypePropertyCache {
+public:
+ static CachedProperties get(QualType T) {
+ return get(T.getTypePtr());
+ }
+
+ static CachedProperties get(const Type *T) {
+ ensure(T);
+ NamedDecl::LinkageInfo LV(T->TypeBits.getLinkage(),
+ T->TypeBits.getVisibility(),
+ T->TypeBits.isVisibilityExplicit());
+ return CachedProperties(LV, T->TypeBits.hasLocalOrUnnamedType());
+ }
+
+ static void ensure(const Type *T) {
+ // If the cache is valid, we're okay.
+ if (T->TypeBits.isCacheValid()) return;
+
+ // If this type is non-canonical, ask its canonical type for the
+ // relevant information.
+ if (!T->isCanonicalUnqualified()) {
+ const Type *CT = T->getCanonicalTypeInternal().getTypePtr();
+ ensure(CT);
+ T->TypeBits.CacheValidAndVisibility =
+ CT->TypeBits.CacheValidAndVisibility;
+ T->TypeBits.CachedExplicitVisibility =
+ CT->TypeBits.CachedExplicitVisibility;
+ T->TypeBits.CachedLinkage = CT->TypeBits.CachedLinkage;
+ T->TypeBits.CachedLocalOrUnnamed = CT->TypeBits.CachedLocalOrUnnamed;
+ return;
+ }
+
+ // Compute the cached properties and then set the cache.
+ CachedProperties Result = computeCachedProperties(T);
+ T->TypeBits.CacheValidAndVisibility = Result.getVisibility() + 1U;
+ T->TypeBits.CachedExplicitVisibility = Result.isVisibilityExplicit();
+ assert(T->TypeBits.isCacheValid() &&
+ T->TypeBits.getVisibility() == Result.getVisibility());
+ T->TypeBits.CachedLinkage = Result.getLinkage();
+ T->TypeBits.CachedLocalOrUnnamed = Result.hasLocalOrUnnamedType();
+ }
+};
+}
+
+// Instantiate the friend template at a private class. In a
+// reasonable implementation, these symbols will be internal.
+// It is terrible that this is the best way to accomplish this.
+namespace { class Private {}; }
+typedef TypePropertyCache<Private> Cache;
+
+static CachedProperties computeCachedProperties(const Type *T) {
+ switch (T->getTypeClass()) {
+#define TYPE(Class,Base)
+#define NON_CANONICAL_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("didn't expect a non-canonical type here");
+
+#define TYPE(Class,Base)
+#define DEPENDENT_TYPE(Class,Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class,Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ // Treat instantiation-dependent types as external.
+ assert(T->isInstantiationDependentType());
+ return CachedProperties(NamedDecl::LinkageInfo(), false);
+
+ case Type::Builtin:
+ // C++ [basic.link]p8:
+ // A type is said to have linkage if and only if:
+ // - it is a fundamental type (3.9.1); or
+ return CachedProperties(NamedDecl::LinkageInfo(), false);
+
+ case Type::Record:
+ case Type::Enum: {
+ const TagDecl *Tag = cast<TagType>(T)->getDecl();
+
+ // C++ [basic.link]p8:
+ // - it is a class or enumeration type that is named (or has a name
+ // for linkage purposes (7.1.3)) and the name has linkage; or
+ // - it is a specialization of a class template (14); or
+ NamedDecl::LinkageInfo LV = Tag->getLinkageAndVisibility();
+ bool IsLocalOrUnnamed =
+ Tag->getDeclContext()->isFunctionOrMethod() ||
+ (!Tag->getIdentifier() && !Tag->getTypedefNameForAnonDecl());
+ return CachedProperties(LV, IsLocalOrUnnamed);
+ }
+
+ // C++ [basic.link]p8:
+ // - it is a compound type (3.9.2) other than a class or enumeration,
+ // compounded exclusively from types that have linkage; or
+ case Type::Complex:
+ return Cache::get(cast<ComplexType>(T)->getElementType());
+ case Type::Pointer:
+ return Cache::get(cast<PointerType>(T)->getPointeeType());
+ case Type::BlockPointer:
+ return Cache::get(cast<BlockPointerType>(T)->getPointeeType());
+ case Type::LValueReference:
+ case Type::RValueReference:
+ return Cache::get(cast<ReferenceType>(T)->getPointeeType());
+ case Type::MemberPointer: {
+ const MemberPointerType *MPT = cast<MemberPointerType>(T);
+ return merge(Cache::get(MPT->getClass()),
+ Cache::get(MPT->getPointeeType()));
+ }
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ return Cache::get(cast<ArrayType>(T)->getElementType());
+ case Type::Vector:
+ case Type::ExtVector:
+ return Cache::get(cast<VectorType>(T)->getElementType());
+ case Type::FunctionNoProto:
+ return Cache::get(cast<FunctionType>(T)->getResultType());
+ case Type::FunctionProto: {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(T);
+ CachedProperties result = Cache::get(FPT->getResultType());
+ for (FunctionProtoType::arg_type_iterator ai = FPT->arg_type_begin(),
+ ae = FPT->arg_type_end(); ai != ae; ++ai)
+ result = merge(result, Cache::get(*ai));
+ return result;
+ }
+ case Type::ObjCInterface: {
+ NamedDecl::LinkageInfo LV =
+ cast<ObjCInterfaceType>(T)->getDecl()->getLinkageAndVisibility();
+ return CachedProperties(LV, false);
+ }
+ case Type::ObjCObject:
+ return Cache::get(cast<ObjCObjectType>(T)->getBaseType());
+ case Type::ObjCObjectPointer:
+ return Cache::get(cast<ObjCObjectPointerType>(T)->getPointeeType());
+ case Type::Atomic:
+ return Cache::get(cast<AtomicType>(T)->getValueType());
+ }
+
+ llvm_unreachable("unhandled type class");
+}
+
+/// \brief Determine the linkage of this type.
+Linkage Type::getLinkage() const {
+ Cache::ensure(this);
+ return TypeBits.getLinkage();
+}
+
+/// \brief Determine the linkage of this type.
+Visibility Type::getVisibility() const {
+ Cache::ensure(this);
+ return TypeBits.getVisibility();
+}
+
+bool Type::isVisibilityExplicit() const {
+ Cache::ensure(this);
+ return TypeBits.isVisibilityExplicit();
+}
+
+bool Type::hasUnnamedOrLocalType() const {
+ Cache::ensure(this);
+ return TypeBits.hasLocalOrUnnamedType();
+}
+
+std::pair<Linkage,Visibility> Type::getLinkageAndVisibility() const {
+ Cache::ensure(this);
+ return std::make_pair(TypeBits.getLinkage(), TypeBits.getVisibility());
+}
+
+void Type::ClearLinkageCache() {
+ TypeBits.CacheValidAndVisibility = 0;
+ if (QualType(this, 0) != CanonicalType)
+ CanonicalType->TypeBits.CacheValidAndVisibility = 0;
+}
+
+Qualifiers::ObjCLifetime Type::getObjCARCImplicitLifetime() const {
+ if (isObjCARCImplicitlyUnretainedType())
+ return Qualifiers::OCL_ExplicitNone;
+ return Qualifiers::OCL_Strong;
+}
+
+bool Type::isObjCARCImplicitlyUnretainedType() const {
+ assert(isObjCLifetimeType() &&
+ "cannot query implicit lifetime for non-inferrable type");
+
+ const Type *canon = getCanonicalTypeInternal().getTypePtr();
+
+ // Walk down to the base type. We don't care about qualifiers for this.
+ while (const ArrayType *array = dyn_cast<ArrayType>(canon))
+ canon = array->getElementType().getTypePtr();
+
+ if (const ObjCObjectPointerType *opt
+ = dyn_cast<ObjCObjectPointerType>(canon)) {
+ // Class and Class<Protocol> don't require retension.
+ if (opt->getObjectType()->isObjCClass())
+ return true;
+ }
+
+ return false;
+}
+
+bool Type::isObjCNSObjectType() const {
+ if (const TypedefType *typedefType = dyn_cast<TypedefType>(this))
+ return typedefType->getDecl()->hasAttr<ObjCNSObjectAttr>();
+ return false;
+}
+bool Type::isObjCRetainableType() const {
+ return isObjCObjectPointerType() ||
+ isBlockPointerType() ||
+ isObjCNSObjectType();
+}
+bool Type::isObjCIndirectLifetimeType() const {
+ if (isObjCLifetimeType())
+ return true;
+ if (const PointerType *OPT = getAs<PointerType>())
+ return OPT->getPointeeType()->isObjCIndirectLifetimeType();
+ if (const ReferenceType *Ref = getAs<ReferenceType>())
+ return Ref->getPointeeType()->isObjCIndirectLifetimeType();
+ if (const MemberPointerType *MemPtr = getAs<MemberPointerType>())
+ return MemPtr->getPointeeType()->isObjCIndirectLifetimeType();
+ return false;
+}
+
+/// Returns true if objects of this type have lifetime semantics under
+/// ARC.
+bool Type::isObjCLifetimeType() const {
+ const Type *type = this;
+ while (const ArrayType *array = type->getAsArrayTypeUnsafe())
+ type = array->getElementType().getTypePtr();
+ return type->isObjCRetainableType();
+}
+
+/// \brief Determine whether the given type T is a "bridgable" Objective-C type,
+/// which is either an Objective-C object pointer type or an
+bool Type::isObjCARCBridgableType() const {
+ return isObjCObjectPointerType() || isBlockPointerType();
+}
+
+/// \brief Determine whether the given type T is a "bridgeable" C type.
+bool Type::isCARCBridgableType() const {
+ const PointerType *Pointer = getAs<PointerType>();
+ if (!Pointer)
+ return false;
+
+ QualType Pointee = Pointer->getPointeeType();
+ return Pointee->isVoidType() || Pointee->isRecordType();
+}
+
+bool Type::hasSizedVLAType() const {
+ if (!isVariablyModifiedType()) return false;
+
+ if (const PointerType *ptr = getAs<PointerType>())
+ return ptr->getPointeeType()->hasSizedVLAType();
+ if (const ReferenceType *ref = getAs<ReferenceType>())
+ return ref->getPointeeType()->hasSizedVLAType();
+ if (const ArrayType *arr = getAsArrayTypeUnsafe()) {
+ if (isa<VariableArrayType>(arr) &&
+ cast<VariableArrayType>(arr)->getSizeExpr())
+ return true;
+
+ return arr->getElementType()->hasSizedVLAType();
+ }
+
+ return false;
+}
+
+QualType::DestructionKind QualType::isDestructedTypeImpl(QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ return DK_objc_strong_lifetime;
+ case Qualifiers::OCL_Weak:
+ return DK_objc_weak_lifetime;
+ }
+
+ /// Currently, the only destruction kind we recognize is C++ objects
+ /// with non-trivial destructors.
+ const CXXRecordDecl *record =
+ type->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ if (record && record->hasDefinition() && !record->hasTrivialDestructor())
+ return DK_cxx_destructor;
+
+ return DK_none;
+}
+
+bool QualType::hasTrivialAssignment(ASTContext &Context, bool Copying) const {
+ switch (getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Autoreleasing:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return !Context.getLangOpts().ObjCAutoRefCount;
+ }
+
+ if (const CXXRecordDecl *Record
+ = getTypePtr()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl())
+ return Copying ? Record->hasTrivialCopyAssignment() :
+ Record->hasTrivialMoveAssignment();
+
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
new file mode 100644
index 0000000..caa19b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/TypeLoc.cpp
@@ -0,0 +1,332 @@
+//===--- TypeLoc.cpp - Type Source Info Wrapper -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TypeLoc subclasses implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/raw_ostream.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/AST/Expr.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// TypeLoc Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class TypeLocRanger : public TypeLocVisitor<TypeLocRanger, SourceRange> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ SourceRange Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getLocalSourceRange(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+SourceRange TypeLoc::getLocalSourceRangeImpl(TypeLoc TL) {
+ if (TL.isNull()) return SourceRange();
+ return TypeLocRanger().Visit(TL);
+}
+
+namespace {
+ class TypeSizer : public TypeLocVisitor<TypeSizer, unsigned> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ unsigned Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getFullDataSize(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+/// \brief Returns the size of the type source info data block.
+unsigned TypeLoc::getFullDataSizeForType(QualType Ty) {
+ if (Ty.isNull()) return 0;
+ return TypeSizer().Visit(TypeLoc(Ty, 0));
+}
+
+namespace {
+ class NextLoc : public TypeLocVisitor<NextLoc, TypeLoc> {
+ public:
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ TypeLoc Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return TyLoc.getNextTypeLoc(); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+/// \brief Get the next TypeLoc pointed by this TypeLoc, e.g for "int*" the
+/// TypeLoc is a PointerLoc and next TypeLoc is for "int".
+TypeLoc TypeLoc::getNextTypeLocImpl(TypeLoc TL) {
+ return NextLoc().Visit(TL);
+}
+
+/// \brief Initializes a type location, and all of its children
+/// recursively, as if the entire tree had been written in the
+/// given location.
+void TypeLoc::initializeImpl(ASTContext &Context, TypeLoc TL,
+ SourceLocation Loc) {
+ while (true) {
+ switch (TL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case CLASS: { \
+ CLASS##TypeLoc TLCasted = cast<CLASS##TypeLoc>(TL); \
+ TLCasted.initializeLocal(Context, Loc); \
+ TL = TLCasted.getNextTypeLoc(); \
+ if (!TL) return; \
+ continue; \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ }
+ }
+}
+
+SourceLocation TypeLoc::getBeginLoc() const {
+ TypeLoc Cur = *this;
+ while (true) {
+ switch (Cur.getTypeLocClass()) {
+ // FIXME: Currently QualifiedTypeLoc does not have a source range
+ // case Qualified:
+ case Elaborated:
+ case DependentName:
+ case DependentTemplateSpecialization:
+ break;
+ default:
+ TypeLoc Next = Cur.getNextTypeLoc();
+ if (Next.isNull()) break;
+ Cur = Next;
+ continue;
+ }
+ break;
+ }
+ return Cur.getLocalSourceRange().getBegin();
+}
+
+SourceLocation TypeLoc::getEndLoc() const {
+ TypeLoc Cur = *this;
+ TypeLoc Last;
+ while (true) {
+ switch (Cur.getTypeLocClass()) {
+ default:
+ if (!Last)
+ Last = Cur;
+ return Last.getLocalSourceRange().getEnd();
+ case Paren:
+ case ConstantArray:
+ case DependentSizedArray:
+ case IncompleteArray:
+ case VariableArray:
+ case FunctionProto:
+ case FunctionNoProto:
+ Last = Cur;
+ break;
+ case Pointer:
+ case BlockPointer:
+ case MemberPointer:
+ case LValueReference:
+ case RValueReference:
+ case PackExpansion:
+ if (!Last)
+ Last = Cur;
+ break;
+ case Qualified:
+ case Elaborated:
+ break;
+ }
+ Cur = Cur.getNextTypeLoc();
+ }
+}
+
+
+namespace {
+ struct TSTChecker : public TypeLocVisitor<TSTChecker, bool> {
+ // Overload resolution does the real work for us.
+ static bool isTypeSpec(TypeSpecTypeLoc _) { return true; }
+ static bool isTypeSpec(TypeLoc _) { return false; }
+
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ bool Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
+ return isTypeSpec(TyLoc); \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ };
+}
+
+
+/// \brief Determines if the given type loc corresponds to a
+/// TypeSpecTypeLoc. Since there is not actually a TypeSpecType in
+/// the type hierarchy, this is made somewhat complicated.
+///
+/// There are a lot of types that currently use TypeSpecTypeLoc
+/// because it's a convenient base class. Ideally we would not accept
+/// those here, but ideally we would have better implementations for
+/// them.
+bool TypeSpecTypeLoc::classof(const TypeLoc *TL) {
+ if (TL->getType().hasLocalQualifiers()) return false;
+ return TSTChecker().Visit(*TL);
+}
+
+// Reimplemented to account for GNU/C++ extension
+// typeof unary-expression
+// where there are no parentheses.
+SourceRange TypeOfExprTypeLoc::getLocalSourceRange() const {
+ if (getRParenLoc().isValid())
+ return SourceRange(getTypeofLoc(), getRParenLoc());
+ else
+ return SourceRange(getTypeofLoc(),
+ getUnderlyingExpr()->getSourceRange().getEnd());
+}
+
+
+TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
+ if (needsExtraLocalData())
+ return static_cast<TypeSpecifierType>(getWrittenBuiltinSpecs().Type);
+ switch (getTypePtr()->getKind()) {
+ case BuiltinType::Void:
+ return TST_void;
+ case BuiltinType::Bool:
+ return TST_bool;
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ return TST_char;
+ case BuiltinType::Char16:
+ return TST_char16;
+ case BuiltinType::Char32:
+ return TST_char32;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ return TST_wchar;
+ case BuiltinType::UChar:
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::UInt128:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ case BuiltinType::LongLong:
+ case BuiltinType::Int128:
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ llvm_unreachable("Builtin type needs extra local data!");
+ // Fall through, if the impossible happens.
+
+ case BuiltinType::NullPtr:
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ case BuiltinType::BoundMember:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::ARCUnbridgedCast:
+ case BuiltinType::PseudoObject:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return TST_unspecified;
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+TypeLoc TypeLoc::IgnoreParensImpl(TypeLoc TL) {
+ while (ParenTypeLoc* PTL = dyn_cast<ParenTypeLoc>(&TL))
+ TL = PTL->getInnerLoc();
+ return TL;
+}
+
+void ElaboratedTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+}
+
+void DependentNameTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+ setNameLoc(Loc);
+}
+
+void
+DependentTemplateSpecializationTypeLoc::initializeLocal(ASTContext &Context,
+ SourceLocation Loc) {
+ setElaboratedKeywordLoc(Loc);
+ if (getTypePtr()->getQualifier()) {
+ NestedNameSpecifierLocBuilder Builder;
+ Builder.MakeTrivial(Context, getTypePtr()->getQualifier(), Loc);
+ setQualifierLoc(Builder.getWithLocInContext(Context));
+ } else {
+ setQualifierLoc(NestedNameSpecifierLoc());
+ }
+ setTemplateKeywordLoc(Loc);
+ setTemplateNameLoc(Loc);
+ setLAngleLoc(Loc);
+ setRAngleLoc(Loc);
+ TemplateSpecializationTypeLoc::initializeArgLocs(Context, getNumArgs(),
+ getTypePtr()->getArgs(),
+ getArgInfos(), Loc);
+}
+
+void TemplateSpecializationTypeLoc::initializeArgLocs(ASTContext &Context,
+ unsigned NumArgs,
+ const TemplateArgument *Args,
+ TemplateArgumentLocInfo *ArgInfos,
+ SourceLocation Loc) {
+ for (unsigned i = 0, e = NumArgs; i != e; ++i) {
+ switch (Args[i].getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ case TemplateArgument::Expression:
+ ArgInfos[i] = TemplateArgumentLocInfo(Args[i].getAsExpr());
+ break;
+
+ case TemplateArgument::Type:
+ ArgInfos[i] = TemplateArgumentLocInfo(
+ Context.getTrivialTypeSourceInfo(Args[i].getAsType(),
+ Loc));
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLocBuilder Builder;
+ TemplateName Template = Args[i].getAsTemplate();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
+ Builder.MakeTrivial(Context, DTN->getQualifier(), Loc);
+ else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Builder.MakeTrivial(Context, QTN->getQualifier(), Loc);
+
+ ArgInfos[i] = TemplateArgumentLocInfo(
+ Builder.getWithLocInContext(Context),
+ Loc,
+ Args[i].getKind() == TemplateArgument::Template
+ ? SourceLocation()
+ : Loc);
+ break;
+ }
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
new file mode 100644
index 0000000..3bf80e7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/TypePrinter.cpp
@@ -0,0 +1,1232 @@
+//===--- TypePrinter.cpp - Pretty-Print Clang Types -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to print types from Clang's type system.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+ /// \brief RAII object that enables printing of the ARC __strong lifetime
+ /// qualifier.
+ class IncludeStrongLifetimeRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+ public:
+ explicit IncludeStrongLifetimeRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressStrongLifetime) {
+ Policy.SuppressStrongLifetime = false;
+ }
+
+ ~IncludeStrongLifetimeRAII() {
+ Policy.SuppressStrongLifetime = Old;
+ }
+ };
+
+ class TypePrinter {
+ PrintingPolicy Policy;
+
+ public:
+ explicit TypePrinter(const PrintingPolicy &Policy) : Policy(Policy) { }
+
+ void print(const Type *ty, Qualifiers qs, std::string &buffer);
+ void print(QualType T, std::string &S);
+ void AppendScope(DeclContext *DC, std::string &S);
+ void printTag(TagDecl *T, std::string &S);
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) \
+ void print##CLASS(const CLASS##Type *T, std::string &S);
+#include "clang/AST/TypeNodes.def"
+ };
+}
+
+static void AppendTypeQualList(std::string &S, unsigned TypeQuals) {
+ if (TypeQuals & Qualifiers::Const) {
+ if (!S.empty()) S += ' ';
+ S += "const";
+ }
+ if (TypeQuals & Qualifiers::Volatile) {
+ if (!S.empty()) S += ' ';
+ S += "volatile";
+ }
+ if (TypeQuals & Qualifiers::Restrict) {
+ if (!S.empty()) S += ' ';
+ S += "restrict";
+ }
+}
+
+void TypePrinter::print(QualType t, std::string &buffer) {
+ SplitQualType split = t.split();
+ print(split.Ty, split.Quals, buffer);
+}
+
+void TypePrinter::print(const Type *T, Qualifiers Quals, std::string &buffer) {
+ if (!T) {
+ buffer += "NULL TYPE";
+ return;
+ }
+
+ if (Policy.SuppressSpecifiers && T->isSpecifierType())
+ return;
+
+ // Print qualifiers as appropriate.
+
+ // CanPrefixQualifiers - We prefer to print type qualifiers before the type,
+ // so that we get "const int" instead of "int const", but we can't do this if
+ // the type is complex. For example if the type is "int*", we *must* print
+ // "int * const", printing "const int *" is different. Only do this when the
+ // type expands to a simple string.
+ bool CanPrefixQualifiers = false;
+ bool NeedARCStrongQualifier = false;
+ Type::TypeClass TC = T->getTypeClass();
+ if (const AutoType *AT = dyn_cast<AutoType>(T))
+ TC = AT->desugar()->getTypeClass();
+ if (const SubstTemplateTypeParmType *Subst
+ = dyn_cast<SubstTemplateTypeParmType>(T))
+ TC = Subst->getReplacementType()->getTypeClass();
+
+ switch (TC) {
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::UnresolvedUsing:
+ case Type::Typedef:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ case Type::TemplateSpecialization:
+ case Type::InjectedClassName:
+ case Type::DependentName:
+ case Type::DependentTemplateSpecialization:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::Atomic:
+ CanPrefixQualifiers = true;
+ break;
+
+ case Type::ObjCObjectPointer:
+ CanPrefixQualifiers = T->isObjCIdType() || T->isObjCClassType() ||
+ T->isObjCQualifiedIdType() || T->isObjCQualifiedClassType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ NeedARCStrongQualifier = true;
+ // Fall through
+
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Paren:
+ case Type::Attributed:
+ case Type::PackExpansion:
+ case Type::SubstTemplateTypeParm:
+ case Type::Auto:
+ CanPrefixQualifiers = false;
+ break;
+ }
+
+ if (!CanPrefixQualifiers && !Quals.empty()) {
+ std::string qualsBuffer;
+ if (NeedARCStrongQualifier) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ } else {
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ }
+
+ if (!qualsBuffer.empty()) {
+ if (!buffer.empty()) {
+ qualsBuffer += ' ';
+ qualsBuffer += buffer;
+ }
+ std::swap(buffer, qualsBuffer);
+ }
+ }
+
+ switch (T->getTypeClass()) {
+#define ABSTRACT_TYPE(CLASS, PARENT)
+#define TYPE(CLASS, PARENT) case Type::CLASS: \
+ print##CLASS(cast<CLASS##Type>(T), buffer); \
+ break;
+#include "clang/AST/TypeNodes.def"
+ }
+
+ // If we're adding the qualifiers as a prefix, do it now.
+ if (CanPrefixQualifiers && !Quals.empty()) {
+ std::string qualsBuffer;
+ if (NeedARCStrongQualifier) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ } else {
+ Quals.getAsStringInternal(qualsBuffer, Policy);
+ }
+
+ if (!qualsBuffer.empty()) {
+ if (!buffer.empty()) {
+ qualsBuffer += ' ';
+ qualsBuffer += buffer;
+ }
+ std::swap(buffer, qualsBuffer);
+ }
+ }
+}
+
+void TypePrinter::printBuiltin(const BuiltinType *T, std::string &S) {
+ if (S.empty()) {
+ S = T->getName(Policy);
+ } else {
+ // Prefix the basic type, e.g. 'int X'.
+ S = ' ' + S;
+ S = T->getName(Policy) + S;
+ }
+}
+
+void TypePrinter::printComplex(const ComplexType *T, std::string &S) {
+ print(T->getElementType(), S);
+ S = "_Complex " + S;
+}
+
+void TypePrinter::printPointer(const PointerType *T, std::string &S) {
+ S = '*' + S;
+
+ // Handle things like 'int (*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ S = '(' + S + ')';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getPointeeType(), S);
+}
+
+void TypePrinter::printBlockPointer(const BlockPointerType *T, std::string &S) {
+ S = '^' + S;
+ print(T->getPointeeType(), S);
+}
+
+void TypePrinter::printLValueReference(const LValueReferenceType *T,
+ std::string &S) {
+ S = '&' + S;
+
+ // Handle things like 'int (&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ S = '(' + S + ')';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getPointeeTypeAsWritten(), S);
+}
+
+void TypePrinter::printRValueReference(const RValueReferenceType *T,
+ std::string &S) {
+ S = "&&" + S;
+
+ // Handle things like 'int (&&A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeTypeAsWritten()))
+ S = '(' + S + ')';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getPointeeTypeAsWritten(), S);
+}
+
+void TypePrinter::printMemberPointer(const MemberPointerType *T,
+ std::string &S) {
+ PrintingPolicy InnerPolicy(Policy);
+ Policy.SuppressTag = true;
+ std::string C = QualType(T->getClass(), 0).getAsString(InnerPolicy);
+ C += "::*";
+ S = C + S;
+
+ // Handle things like 'int (Cls::*A)[4];' correctly.
+ // FIXME: this should include vectors, but vectors use attributes I guess.
+ if (isa<ArrayType>(T->getPointeeType()))
+ S = '(' + S + ')';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getPointeeType(), S);
+}
+
+void TypePrinter::printConstantArray(const ConstantArrayType *T,
+ std::string &S) {
+ S += '[';
+ S += llvm::utostr(T->getSize().getZExtValue());
+ S += ']';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getElementType(), S);
+}
+
+void TypePrinter::printIncompleteArray(const IncompleteArrayType *T,
+ std::string &S) {
+ S += "[]";
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getElementType(), S);
+}
+
+void TypePrinter::printVariableArray(const VariableArrayType *T,
+ std::string &S) {
+ S += '[';
+
+ if (T->getIndexTypeQualifiers().hasQualifiers()) {
+ AppendTypeQualList(S, T->getIndexTypeCVRQualifiers());
+ S += ' ';
+ }
+
+ if (T->getSizeModifier() == VariableArrayType::Static)
+ S += "static";
+ else if (T->getSizeModifier() == VariableArrayType::Star)
+ S += '*';
+
+ if (T->getSizeExpr()) {
+ std::string SStr;
+ llvm::raw_string_ostream s(SStr);
+ T->getSizeExpr()->printPretty(s, 0, Policy);
+ S += s.str();
+ }
+ S += ']';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getElementType(), S);
+}
+
+void TypePrinter::printDependentSizedArray(const DependentSizedArrayType *T,
+ std::string &S) {
+ S += '[';
+
+ if (T->getSizeExpr()) {
+ std::string SStr;
+ llvm::raw_string_ostream s(SStr);
+ T->getSizeExpr()->printPretty(s, 0, Policy);
+ S += s.str();
+ }
+ S += ']';
+
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getElementType(), S);
+}
+
+void TypePrinter::printDependentSizedExtVector(
+ const DependentSizedExtVectorType *T,
+ std::string &S) {
+ print(T->getElementType(), S);
+
+ S += " __attribute__((ext_vector_type(";
+ if (T->getSizeExpr()) {
+ std::string SStr;
+ llvm::raw_string_ostream s(SStr);
+ T->getSizeExpr()->printPretty(s, 0, Policy);
+ S += s.str();
+ }
+ S += ")))";
+}
+
+void TypePrinter::printVector(const VectorType *T, std::string &S) {
+ switch (T->getVectorKind()) {
+ case VectorType::AltiVecPixel:
+ S = "__vector __pixel " + S;
+ break;
+ case VectorType::AltiVecBool:
+ print(T->getElementType(), S);
+ S = "__vector __bool " + S;
+ break;
+ case VectorType::AltiVecVector:
+ print(T->getElementType(), S);
+ S = "__vector " + S;
+ break;
+ case VectorType::NeonVector:
+ print(T->getElementType(), S);
+ S = ("__attribute__((neon_vector_type(" +
+ llvm::utostr_32(T->getNumElements()) + "))) " + S);
+ break;
+ case VectorType::NeonPolyVector:
+ print(T->getElementType(), S);
+ S = ("__attribute__((neon_polyvector_type(" +
+ llvm::utostr_32(T->getNumElements()) + "))) " + S);
+ break;
+ case VectorType::GenericVector: {
+ // FIXME: We prefer to print the size directly here, but have no way
+ // to get the size of the type.
+ print(T->getElementType(), S);
+ std::string V = "__attribute__((__vector_size__(";
+ V += llvm::utostr_32(T->getNumElements()); // convert back to bytes.
+ std::string ET;
+ print(T->getElementType(), ET);
+ V += " * sizeof(" + ET + ")))) ";
+ S = V + S;
+ break;
+ }
+ }
+}
+
+void TypePrinter::printExtVector(const ExtVectorType *T, std::string &S) {
+ S += " __attribute__((ext_vector_type(";
+ S += llvm::utostr_32(T->getNumElements());
+ S += ")))";
+ print(T->getElementType(), S);
+}
+
+void
+FunctionProtoType::printExceptionSpecification(std::string &S,
+ PrintingPolicy Policy) const {
+
+ if (hasDynamicExceptionSpec()) {
+ S += " throw(";
+ if (getExceptionSpecType() == EST_MSAny)
+ S += "...";
+ else
+ for (unsigned I = 0, N = getNumExceptions(); I != N; ++I) {
+ if (I)
+ S += ", ";
+
+ S += getExceptionType(I).getAsString(Policy);
+ }
+ S += ")";
+ } else if (isNoexceptExceptionSpec(getExceptionSpecType())) {
+ S += " noexcept";
+ if (getExceptionSpecType() == EST_ComputedNoexcept) {
+ S += "(";
+ llvm::raw_string_ostream EOut(S);
+ getNoexceptExpr()->printPretty(EOut, 0, Policy);
+ EOut.flush();
+ S += EOut.str();
+ S += ")";
+ }
+ }
+}
+
+void TypePrinter::printFunctionProto(const FunctionProtoType *T,
+ std::string &S) {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ if (!S.empty())
+ S = "(" + S + ")";
+
+ S += "(";
+ std::string Tmp;
+ PrintingPolicy ParamPolicy(Policy);
+ ParamPolicy.SuppressSpecifiers = false;
+ for (unsigned i = 0, e = T->getNumArgs(); i != e; ++i) {
+ if (i) S += ", ";
+ print(T->getArgType(i), Tmp);
+ S += Tmp;
+ Tmp.clear();
+ }
+
+ if (T->isVariadic()) {
+ if (T->getNumArgs())
+ S += ", ";
+ S += "...";
+ } else if (T->getNumArgs() == 0 && !Policy.LangOpts.CPlusPlus) {
+ // Do not emit int() if we have a proto, emit 'int(void)'.
+ S += "void";
+ }
+
+ S += ")";
+
+ FunctionType::ExtInfo Info = T->getExtInfo();
+ switch(Info.getCC()) {
+ case CC_Default: break;
+ case CC_C:
+ S += " __attribute__((cdecl))";
+ break;
+ case CC_X86StdCall:
+ S += " __attribute__((stdcall))";
+ break;
+ case CC_X86FastCall:
+ S += " __attribute__((fastcall))";
+ break;
+ case CC_X86ThisCall:
+ S += " __attribute__((thiscall))";
+ break;
+ case CC_X86Pascal:
+ S += " __attribute__((pascal))";
+ break;
+ case CC_AAPCS:
+ S += " __attribute__((pcs(\"aapcs\")))";
+ break;
+ case CC_AAPCS_VFP:
+ S += " __attribute__((pcs(\"aapcs-vfp\")))";
+ break;
+ }
+ if (Info.getNoReturn())
+ S += " __attribute__((noreturn))";
+ if (Info.getRegParm())
+ S += " __attribute__((regparm (" +
+ llvm::utostr_32(Info.getRegParm()) + ")))";
+
+ AppendTypeQualList(S, T->getTypeQuals());
+
+ switch (T->getRefQualifier()) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ S += " &";
+ break;
+
+ case RQ_RValue:
+ S += " &&";
+ break;
+ }
+ T->printExceptionSpecification(S, Policy);
+ if (T->hasTrailingReturn()) {
+ std::string ResultS;
+ print(T->getResultType(), ResultS);
+ S = "auto " + S + " -> " + ResultS;
+ } else
+ print(T->getResultType(), S);
+}
+
+void TypePrinter::printFunctionNoProto(const FunctionNoProtoType *T,
+ std::string &S) {
+ // If needed for precedence reasons, wrap the inner part in grouping parens.
+ if (!S.empty())
+ S = "(" + S + ")";
+
+ S += "()";
+ if (T->getNoReturnAttr())
+ S += " __attribute__((noreturn))";
+ print(T->getResultType(), S);
+}
+
+static void printTypeSpec(const NamedDecl *D, std::string &S) {
+ IdentifierInfo *II = D->getIdentifier();
+ if (S.empty())
+ S = II->getName().str();
+ else
+ S = II->getName().str() + ' ' + S;
+}
+
+void TypePrinter::printUnresolvedUsing(const UnresolvedUsingType *T,
+ std::string &S) {
+ printTypeSpec(T->getDecl(), S);
+}
+
+void TypePrinter::printTypedef(const TypedefType *T, std::string &S) {
+ printTypeSpec(T->getDecl(), S);
+}
+
+void TypePrinter::printTypeOfExpr(const TypeOfExprType *T, std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'typeof(e) X'.
+ S = ' ' + S;
+ std::string Str;
+ llvm::raw_string_ostream s(Str);
+ T->getUnderlyingExpr()->printPretty(s, 0, Policy);
+ S = "typeof " + s.str() + S;
+}
+
+void TypePrinter::printTypeOf(const TypeOfType *T, std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'typeof(t) X'.
+ S = ' ' + S;
+ std::string Tmp;
+ print(T->getUnderlyingType(), Tmp);
+ S = "typeof(" + Tmp + ")" + S;
+}
+
+void TypePrinter::printDecltype(const DecltypeType *T, std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'decltype(t) X'.
+ S = ' ' + S;
+ std::string Str;
+ llvm::raw_string_ostream s(Str);
+ T->getUnderlyingExpr()->printPretty(s, 0, Policy);
+ S = "decltype(" + s.str() + ")" + S;
+}
+
+void TypePrinter::printUnaryTransform(const UnaryTransformType *T,
+ std::string &S) {
+ if (!S.empty())
+ S = ' ' + S;
+ std::string Str;
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getBaseType(), Str);
+
+ switch (T->getUTTKind()) {
+ case UnaryTransformType::EnumUnderlyingType:
+ S = "__underlying_type(" + Str + ")" + S;
+ break;
+ }
+}
+
+void TypePrinter::printAuto(const AutoType *T, std::string &S) {
+ // If the type has been deduced, do not print 'auto'.
+ if (T->isDeduced()) {
+ print(T->getDeducedType(), S);
+ } else {
+ if (!S.empty()) // Prefix the basic type, e.g. 'auto X'.
+ S = ' ' + S;
+ S = "auto" + S;
+ }
+}
+
+void TypePrinter::printAtomic(const AtomicType *T, std::string &S) {
+ if (!S.empty())
+ S = ' ' + S;
+ std::string Str;
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getValueType(), Str);
+
+ S = "_Atomic(" + Str + ")" + S;
+}
+
+/// Appends the given scope to the end of a string.
+void TypePrinter::AppendScope(DeclContext *DC, std::string &Buffer) {
+ if (DC->isTranslationUnit()) return;
+ AppendScope(DC->getParent(), Buffer);
+
+ unsigned OldSize = Buffer.size();
+
+ if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(DC)) {
+ if (Policy.SuppressUnwrittenScope &&
+ (NS->isAnonymousNamespace() || NS->isInline()))
+ return;
+ if (NS->getIdentifier())
+ Buffer += NS->getNameAsString();
+ else
+ Buffer += "<anonymous>";
+ } else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(DC)) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.data(),
+ TemplateArgs.size(),
+ Policy);
+ Buffer += Spec->getIdentifier()->getName();
+ Buffer += TemplateArgsStr;
+ } else if (TagDecl *Tag = dyn_cast<TagDecl>(DC)) {
+ if (TypedefNameDecl *Typedef = Tag->getTypedefNameForAnonDecl())
+ Buffer += Typedef->getIdentifier()->getName();
+ else if (Tag->getIdentifier())
+ Buffer += Tag->getIdentifier()->getName();
+ else
+ return;
+ }
+
+ if (Buffer.size() != OldSize)
+ Buffer += "::";
+}
+
+void TypePrinter::printTag(TagDecl *D, std::string &InnerString) {
+ if (Policy.SuppressTag)
+ return;
+
+ std::string Buffer;
+ bool HasKindDecoration = false;
+
+ // bool SuppressTagKeyword
+ // = Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword;
+
+ // We don't print tags unless this is an elaborated type.
+ // In C, we just assume every RecordType is an elaborated type.
+ if (!(Policy.LangOpts.CPlusPlus || Policy.SuppressTagKeyword ||
+ D->getTypedefNameForAnonDecl())) {
+ HasKindDecoration = true;
+ Buffer += D->getKindName();
+ Buffer += ' ';
+ }
+
+ // Compute the full nested-name-specifier for this type.
+ // In C, this will always be empty except when the type
+ // being printed is anonymous within other Record.
+ if (!Policy.SuppressScope)
+ AppendScope(D->getDeclContext(), Buffer);
+
+ if (const IdentifierInfo *II = D->getIdentifier())
+ Buffer += II->getNameStart();
+ else if (TypedefNameDecl *Typedef = D->getTypedefNameForAnonDecl()) {
+ assert(Typedef->getIdentifier() && "Typedef without identifier?");
+ Buffer += Typedef->getIdentifier()->getNameStart();
+ } else {
+ // Make an unambiguous representation for anonymous types, e.g.
+ // <anonymous enum at /usr/include/string.h:120:9>
+ llvm::raw_string_ostream OS(Buffer);
+
+ if (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda()) {
+ OS << "<lambda";
+ HasKindDecoration = true;
+ } else {
+ OS << "<anonymous";
+ }
+
+ if (Policy.AnonymousTagLocations) {
+ // Suppress the redundant tag keyword if we just printed one.
+ // We don't have to worry about ElaboratedTypes here because you can't
+ // refer to an anonymous type with one.
+ if (!HasKindDecoration)
+ OS << " " << D->getKindName();
+
+ PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
+ D->getLocation());
+ if (PLoc.isValid()) {
+ OS << " at " << PLoc.getFilename()
+ << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ }
+ }
+
+ OS << '>';
+ }
+
+ // If this is a class template specialization, print the template
+ // arguments.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(D)) {
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(TAW->getType());
+ Args = TST->getArgs();
+ NumArgs = TST->getNumArgs();
+ } else {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ Args = TemplateArgs.data();
+ NumArgs = TemplateArgs.size();
+ }
+ IncludeStrongLifetimeRAII Strong(Policy);
+ Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+ }
+
+ if (!InnerString.empty()) {
+ Buffer += ' ';
+ Buffer += InnerString;
+ }
+
+ std::swap(Buffer, InnerString);
+}
+
+void TypePrinter::printRecord(const RecordType *T, std::string &S) {
+ printTag(T->getDecl(), S);
+}
+
+void TypePrinter::printEnum(const EnumType *T, std::string &S) {
+ printTag(T->getDecl(), S);
+}
+
+void TypePrinter::printTemplateTypeParm(const TemplateTypeParmType *T,
+ std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'parmname X'.
+ S = ' ' + S;
+
+ if (IdentifierInfo *Id = T->getIdentifier())
+ S = Id->getName().str() + S;
+ else
+ S = "type-parameter-" + llvm::utostr_32(T->getDepth()) + '-' +
+ llvm::utostr_32(T->getIndex()) + S;
+}
+
+void TypePrinter::printSubstTemplateTypeParm(const SubstTemplateTypeParmType *T,
+ std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ print(T->getReplacementType(), S);
+}
+
+void TypePrinter::printSubstTemplateTypeParmPack(
+ const SubstTemplateTypeParmPackType *T,
+ std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ printTemplateTypeParm(T->getReplacedParameter(), S);
+}
+
+void TypePrinter::printTemplateSpecialization(
+ const TemplateSpecializationType *T,
+ std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ std::string SpecString;
+
+ {
+ llvm::raw_string_ostream OS(SpecString);
+ T->getTemplateName().print(OS, Policy);
+ }
+
+ SpecString += TemplateSpecializationType::PrintTemplateArgumentList(
+ T->getArgs(),
+ T->getNumArgs(),
+ Policy);
+ if (S.empty())
+ S.swap(SpecString);
+ else
+ S = SpecString + ' ' + S;
+}
+
+void TypePrinter::printInjectedClassName(const InjectedClassNameType *T,
+ std::string &S) {
+ printTemplateSpecialization(T->getInjectedTST(), S);
+}
+
+void TypePrinter::printElaborated(const ElaboratedType *T, std::string &S) {
+ std::string MyString;
+
+ {
+ llvm::raw_string_ostream OS(MyString);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+ NestedNameSpecifier* Qualifier = T->getQualifier();
+ if (Qualifier)
+ Qualifier->print(OS, Policy);
+ }
+
+ std::string TypeStr;
+ PrintingPolicy InnerPolicy(Policy);
+ InnerPolicy.SuppressTagKeyword = true;
+ InnerPolicy.SuppressScope = true;
+ TypePrinter(InnerPolicy).print(T->getNamedType(), TypeStr);
+
+ MyString += TypeStr;
+ if (S.empty())
+ S.swap(MyString);
+ else
+ S = MyString + ' ' + S;
+}
+
+void TypePrinter::printParen(const ParenType *T, std::string &S) {
+ if (!S.empty() && !isa<FunctionType>(T->getInnerType()))
+ S = '(' + S + ')';
+ print(T->getInnerType(), S);
+}
+
+void TypePrinter::printDependentName(const DependentNameType *T, std::string &S) {
+ std::string MyString;
+
+ {
+ llvm::raw_string_ostream OS(MyString);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+
+ T->getQualifier()->print(OS, Policy);
+
+ OS << T->getIdentifier()->getName();
+ }
+
+ if (S.empty())
+ S.swap(MyString);
+ else
+ S = MyString + ' ' + S;
+}
+
+void TypePrinter::printDependentTemplateSpecialization(
+ const DependentTemplateSpecializationType *T, std::string &S) {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ std::string MyString;
+ {
+ llvm::raw_string_ostream OS(MyString);
+
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+
+ if (T->getQualifier())
+ T->getQualifier()->print(OS, Policy);
+ OS << T->getIdentifier()->getName();
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ T->getArgs(),
+ T->getNumArgs(),
+ Policy);
+ }
+
+ if (S.empty())
+ S.swap(MyString);
+ else
+ S = MyString + ' ' + S;
+}
+
+void TypePrinter::printPackExpansion(const PackExpansionType *T,
+ std::string &S) {
+ print(T->getPattern(), S);
+ S += "...";
+}
+
+void TypePrinter::printAttributed(const AttributedType *T,
+ std::string &S) {
+ // Prefer the macro forms of the GC and ownership qualifiers.
+ if (T->getAttrKind() == AttributedType::attr_objc_gc ||
+ T->getAttrKind() == AttributedType::attr_objc_ownership)
+ return print(T->getEquivalentType(), S);
+
+ print(T->getModifiedType(), S);
+
+ // TODO: not all attributes are GCC-style attributes.
+ S += " __attribute__((";
+ switch (T->getAttrKind()) {
+ case AttributedType::attr_address_space:
+ S += "address_space(";
+ S += T->getEquivalentType().getAddressSpace();
+ S += ")";
+ break;
+
+ case AttributedType::attr_vector_size: {
+ S += "__vector_size__(";
+ if (const VectorType *vector =T->getEquivalentType()->getAs<VectorType>()) {
+ S += vector->getNumElements();
+ S += " * sizeof(";
+
+ std::string tmp;
+ print(vector->getElementType(), tmp);
+ S += tmp;
+ S += ")";
+ }
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_neon_vector_type:
+ case AttributedType::attr_neon_polyvector_type: {
+ if (T->getAttrKind() == AttributedType::attr_neon_vector_type)
+ S += "neon_vector_type(";
+ else
+ S += "neon_polyvector_type(";
+ const VectorType *vector = T->getEquivalentType()->getAs<VectorType>();
+ S += llvm::utostr_32(vector->getNumElements());
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_regparm: {
+ S += "regparm(";
+ QualType t = T->getEquivalentType();
+ while (!t->isFunctionType())
+ t = t->getPointeeType();
+ S += t->getAs<FunctionType>()->getRegParmType();
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_objc_gc: {
+ S += "objc_gc(";
+
+ QualType tmp = T->getEquivalentType();
+ while (tmp.getObjCGCAttr() == Qualifiers::GCNone) {
+ QualType next = tmp->getPointeeType();
+ if (next == tmp) break;
+ tmp = next;
+ }
+
+ if (tmp.isObjCGCWeak())
+ S += "weak";
+ else
+ S += "strong";
+ S += ")";
+ break;
+ }
+
+ case AttributedType::attr_objc_ownership:
+ S += "objc_ownership(";
+ switch (T->getEquivalentType().getObjCLifetime()) {
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
+ case Qualifiers::OCL_ExplicitNone: S += "none"; break;
+ case Qualifiers::OCL_Strong: S += "strong"; break;
+ case Qualifiers::OCL_Weak: S += "weak"; break;
+ case Qualifiers::OCL_Autoreleasing: S += "autoreleasing"; break;
+ }
+ S += ")";
+ break;
+
+ case AttributedType::attr_noreturn: S += "noreturn"; break;
+ case AttributedType::attr_cdecl: S += "cdecl"; break;
+ case AttributedType::attr_fastcall: S += "fastcall"; break;
+ case AttributedType::attr_stdcall: S += "stdcall"; break;
+ case AttributedType::attr_thiscall: S += "thiscall"; break;
+ case AttributedType::attr_pascal: S += "pascal"; break;
+ case AttributedType::attr_pcs: {
+ S += "pcs(";
+ QualType t = T->getEquivalentType();
+ while (!t->isFunctionType())
+ t = t->getPointeeType();
+ S += (t->getAs<FunctionType>()->getCallConv() == CC_AAPCS ?
+ "\"aapcs\"" : "\"aapcs-vfp\"");
+ S += ")";
+ break;
+ }
+ }
+ S += "))";
+}
+
+void TypePrinter::printObjCInterface(const ObjCInterfaceType *T,
+ std::string &S) {
+ if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ S = ' ' + S;
+
+ std::string ObjCQIString = T->getDecl()->getNameAsString();
+ S = ObjCQIString + S;
+}
+
+void TypePrinter::printObjCObject(const ObjCObjectType *T,
+ std::string &S) {
+ if (T->qual_empty())
+ return print(T->getBaseType(), S);
+
+ std::string tmp;
+ print(T->getBaseType(), tmp);
+ tmp += '<';
+ bool isFirst = true;
+ for (ObjCObjectType::qual_iterator
+ I = T->qual_begin(), E = T->qual_end(); I != E; ++I) {
+ if (isFirst)
+ isFirst = false;
+ else
+ tmp += ',';
+ tmp += (*I)->getNameAsString();
+ }
+ tmp += '>';
+
+ if (!S.empty()) {
+ tmp += ' ';
+ tmp += S;
+ }
+ std::swap(tmp, S);
+}
+
+void TypePrinter::printObjCObjectPointer(const ObjCObjectPointerType *T,
+ std::string &S) {
+ std::string ObjCQIString;
+
+ T->getPointeeType().getLocalQualifiers().getAsStringInternal(ObjCQIString,
+ Policy);
+ if (!ObjCQIString.empty())
+ ObjCQIString += ' ';
+
+ if (T->isObjCIdType() || T->isObjCQualifiedIdType())
+ ObjCQIString += "id";
+ else if (T->isObjCClassType() || T->isObjCQualifiedClassType())
+ ObjCQIString += "Class";
+ else if (T->isObjCSelType())
+ ObjCQIString += "SEL";
+ else
+ ObjCQIString += T->getInterfaceDecl()->getNameAsString();
+
+ if (!T->qual_empty()) {
+ ObjCQIString += '<';
+ for (ObjCObjectPointerType::qual_iterator I = T->qual_begin(),
+ E = T->qual_end();
+ I != E; ++I) {
+ ObjCQIString += (*I)->getNameAsString();
+ if (I+1 != E)
+ ObjCQIString += ',';
+ }
+ ObjCQIString += '>';
+ }
+
+ if (!T->isObjCIdType() && !T->isObjCQualifiedIdType())
+ ObjCQIString += " *"; // Don't forget the implicit pointer.
+ else if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
+ S = ' ' + S;
+
+ S = ObjCQIString + S;
+}
+
+std::string TemplateSpecializationType::
+ PrintTemplateArgumentList(const TemplateArgumentListInfo &Args,
+ const PrintingPolicy &Policy) {
+ return PrintTemplateArgumentList(Args.getArgumentArray(),
+ Args.size(),
+ Policy);
+}
+
+std::string
+TemplateSpecializationType::PrintTemplateArgumentList(
+ const TemplateArgument *Args,
+ unsigned NumArgs,
+ const PrintingPolicy &Policy,
+ bool SkipBrackets) {
+ std::string SpecString;
+ if (!SkipBrackets)
+ SpecString += '<';
+
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ if (SpecString.size() > unsigned(!SkipBrackets))
+ SpecString += ", ";
+
+ // Print the argument into a string.
+ std::string ArgString;
+ if (Args[Arg].getKind() == TemplateArgument::Pack) {
+ ArgString = PrintTemplateArgumentList(Args[Arg].pack_begin(),
+ Args[Arg].pack_size(),
+ Policy, true);
+ } else {
+ llvm::raw_string_ostream ArgOut(ArgString);
+ Args[Arg].print(Policy, ArgOut);
+ }
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ SpecString += ' ';
+
+ SpecString += ArgString;
+ }
+
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (!SpecString.empty() && SpecString[SpecString.size() - 1] == '>')
+ SpecString += ' ';
+
+ if (!SkipBrackets)
+ SpecString += '>';
+
+ return SpecString;
+}
+
+// Sadly, repeat all that with TemplateArgLoc.
+std::string TemplateSpecializationType::
+PrintTemplateArgumentList(const TemplateArgumentLoc *Args, unsigned NumArgs,
+ const PrintingPolicy &Policy) {
+ std::string SpecString;
+ SpecString += '<';
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ if (SpecString.size() > 1)
+ SpecString += ", ";
+
+ // Print the argument into a string.
+ std::string ArgString;
+ if (Args[Arg].getArgument().getKind() == TemplateArgument::Pack) {
+ ArgString = PrintTemplateArgumentList(
+ Args[Arg].getArgument().pack_begin(),
+ Args[Arg].getArgument().pack_size(),
+ Policy, true);
+ } else {
+ llvm::raw_string_ostream ArgOut(ArgString);
+ Args[Arg].getArgument().print(Policy, ArgOut);
+ }
+
+ // If this is the first argument and its string representation
+ // begins with the global scope specifier ('::foo'), add a space
+ // to avoid printing the diagraph '<:'.
+ if (!Arg && !ArgString.empty() && ArgString[0] == ':')
+ SpecString += ' ';
+
+ SpecString += ArgString;
+ }
+
+ // If the last character of our string is '>', add another space to
+ // keep the two '>''s separate tokens. We don't *have* to do this in
+ // C++0x, but it's still good hygiene.
+ if (SpecString[SpecString.size() - 1] == '>')
+ SpecString += ' ';
+
+ SpecString += '>';
+
+ return SpecString;
+}
+
+void QualType::dump(const char *msg) const {
+ std::string R = "identifier";
+ LangOptions LO;
+ getAsStringInternal(R, PrintingPolicy(LO));
+ if (msg)
+ llvm::errs() << msg << ": ";
+ llvm::errs() << R << "\n";
+}
+void QualType::dump() const {
+ dump("");
+}
+
+void Type::dump() const {
+ QualType(this, 0).dump();
+}
+
+std::string Qualifiers::getAsString() const {
+ LangOptions LO;
+ return getAsString(PrintingPolicy(LO));
+}
+
+// Appends qualifiers to the given string, separated by spaces. Will
+// prefix a space if the string is non-empty. Will not append a final
+// space.
+void Qualifiers::getAsStringInternal(std::string &S,
+ const PrintingPolicy& Policy) const {
+ AppendTypeQualList(S, getCVRQualifiers());
+ if (unsigned addrspace = getAddressSpace()) {
+ if (!S.empty()) S += ' ';
+ switch (addrspace) {
+ case LangAS::opencl_global:
+ S += "__global";
+ break;
+ case LangAS::opencl_local:
+ S += "__local";
+ break;
+ case LangAS::opencl_constant:
+ S += "__constant";
+ break;
+ default:
+ S += "__attribute__((address_space(";
+ S += llvm::utostr_32(addrspace);
+ S += ")))";
+ }
+ }
+ if (Qualifiers::GC gc = getObjCGCAttr()) {
+ if (!S.empty()) S += ' ';
+ if (gc == Qualifiers::Weak)
+ S += "__weak";
+ else
+ S += "__strong";
+ }
+ if (Qualifiers::ObjCLifetime lifetime = getObjCLifetime()) {
+ if (!S.empty() &&
+ !(lifetime == Qualifiers::OCL_Strong && Policy.SuppressStrongLifetime))
+ S += ' ';
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("none but true");
+ case Qualifiers::OCL_ExplicitNone: S += "__unsafe_unretained"; break;
+ case Qualifiers::OCL_Strong:
+ if (!Policy.SuppressStrongLifetime)
+ S += "__strong";
+ break;
+
+ case Qualifiers::OCL_Weak: S += "__weak"; break;
+ case Qualifiers::OCL_Autoreleasing: S += "__autoreleasing"; break;
+ }
+ }
+}
+
+std::string QualType::getAsString(const Type *ty, Qualifiers qs) {
+ std::string buffer;
+ LangOptions options;
+ getAsStringInternal(ty, qs, buffer, PrintingPolicy(options));
+ return buffer;
+}
+
+void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
+ std::string &buffer,
+ const PrintingPolicy &policy) {
+ TypePrinter(policy).print(ty, qs, buffer);
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp
new file mode 100644
index 0000000..f5ff624
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/VTTBuilder.cpp
@@ -0,0 +1,212 @@
+//===--- VTTBuilder.cpp - C++ VTT layout builder --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with generation of the layout of virtual table
+// tables (VTT).
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/VTTBuilder.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+
+#define DUMP_OVERRIDERS 0
+
+VTTBuilder::VTTBuilder(ASTContext &Ctx,
+ const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition)
+ : Ctx(Ctx), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassLayout(Ctx.getASTRecordLayout(MostDerivedClass)),
+ GenerateDefinition(GenerateDefinition) {
+ // Lay out this VTT.
+ LayoutVTT(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ /*BaseIsVirtual=*/false);
+}
+
+void VTTBuilder::AddVTablePointer(BaseSubobject Base, uint64_t VTableIndex,
+ const CXXRecordDecl *VTableClass) {
+ // Store the vtable pointer index if we're generating the primary VTT.
+ if (VTableClass == MostDerivedClass) {
+ assert(!SecondaryVirtualPointerIndices.count(Base) &&
+ "A virtual pointer index already exists for this base subobject!");
+ SecondaryVirtualPointerIndices[Base] = VTTComponents.size();
+ }
+
+ if (!GenerateDefinition) {
+ VTTComponents.push_back(VTTComponent());
+ return;
+ }
+
+ VTTComponents.push_back(VTTComponent(VTableIndex, Base));
+}
+
+void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ // Don't layout virtual bases.
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+ CharUnits BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ // Layout the VTT for this base.
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ uint64_t VTableIndex,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy &VBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // We're not interested in bases that don't have virtual bases, and not
+ // morally virtual bases.
+ if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers are present for all bases with either
+ // virtual bases or virtual function declarations overridden along a
+ // virtual path.
+ //
+ // If the base class is not dynamic, we don't want to add it, nor any
+ // of its base classes.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
+ bool BaseDeclIsNonVirtualPrimaryBase = false;
+ CharUnits BaseOffset;
+ if (I->isVirtual()) {
+ // Ignore virtual bases that we've already visited.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseDeclIsMorallyVirtual = true;
+ } else {
+ const ASTRecordLayout &Layout = Ctx.getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ if (!Layout.isPrimaryBaseVirtual() &&
+ Layout.getPrimaryBase() == BaseDecl)
+ BaseDeclIsNonVirtualPrimaryBase = true;
+ }
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers: for each base class X which (a) has virtual
+ // bases or is reachable along a virtual path from D, and (b) is not a
+ // non-virtual primary base, the address of the virtual table for X-in-D
+ // or an appropriate construction virtual table.
+ if (!BaseDeclIsNonVirtualPrimaryBase &&
+ (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
+ // Add the vtable pointer.
+ AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTableIndex,
+ VTableClass);
+ }
+
+ // And lay out the secondary virtual pointers for the base class.
+ LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset),
+ BaseDeclIsMorallyVirtual, VTableIndex,
+ VTableClass, VBases);
+ }
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ uint64_t VTableIndex) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false,
+ VTableIndex, Base.getBase(), VBases);
+}
+
+void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base.
+ if (I->isVirtual()) {
+ // Check if we've seen this base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ CharUnits BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
+ }
+
+ // We only need to layout virtual VTTs for this base if it actually has
+ // virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVirtualVTTs(BaseDecl, VBases);
+ }
+}
+
+void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Itanium C++ ABI 2.6.2:
+ // An array of virtual table addresses, called the VTT, is declared for
+ // each class type that has indirect or direct virtual base classes.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ bool IsPrimaryVTT = Base.getBase() == MostDerivedClass;
+
+ if (!IsPrimaryVTT) {
+ // Remember the sub-VTT index.
+ SubVTTIndicies[Base] = VTTComponents.size();
+ }
+
+ uint64_t VTableIndex = VTTVTables.size();
+ VTTVTables.push_back(VTTVTable(Base, BaseIsVirtual));
+
+ // Add the primary vtable pointer.
+ AddVTablePointer(Base, VTableIndex, RD);
+
+ // Add the secondary VTTs.
+ LayoutSecondaryVTTs(Base);
+
+ // Add the secondary virtual pointers.
+ LayoutSecondaryVirtualPointers(Base, VTableIndex);
+
+ // If this is the primary VTT, we want to lay out virtual VTTs as well.
+ if (IsPrimaryVTT) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutVirtualVTTs(Base.getBase(), VBases);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
new file mode 100644
index 0000000..7a45972
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/AST/VTableBuilder.cpp
@@ -0,0 +1,2405 @@
+//===--- VTableBuilder.cpp - C++ vtable layout builder --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with generation of the layout of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/VTableBuilder.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/Format.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+
+#define DUMP_OVERRIDERS 0
+
+namespace {
+
+/// BaseOffset - Represents an offset from a derived class to a direct or
+/// indirect base class.
+struct BaseOffset {
+ /// DerivedClass - The derived class.
+ const CXXRecordDecl *DerivedClass;
+
+ /// VirtualBase - If the path from the derived class to the base class
+ /// involves a virtual base class, this holds its declaration.
+ const CXXRecordDecl *VirtualBase;
+
+ /// NonVirtualOffset - The offset from the derived class to the base class.
+ /// (Or the offset from the virtual base class to the base class, if the
+ /// path from the derived class to the base class involves a virtual base
+ /// class.
+ CharUnits NonVirtualOffset;
+
+ BaseOffset() : DerivedClass(0), VirtualBase(0),
+ NonVirtualOffset(CharUnits::Zero()) { }
+ BaseOffset(const CXXRecordDecl *DerivedClass,
+ const CXXRecordDecl *VirtualBase, CharUnits NonVirtualOffset)
+ : DerivedClass(DerivedClass), VirtualBase(VirtualBase),
+ NonVirtualOffset(NonVirtualOffset) { }
+
+ bool isEmpty() const { return NonVirtualOffset.isZero() && !VirtualBase; }
+};
+
+/// FinalOverriders - Contains the final overrider member functions for all
+/// member functions in the base subobjects of a class.
+class FinalOverriders {
+public:
+ /// OverriderInfo - Information about a final overrider.
+ struct OverriderInfo {
+ /// Method - The method decl of the overrider.
+ const CXXMethodDecl *Method;
+
+ /// Offset - the base offset of the overrider in the layout class.
+ CharUnits Offset;
+
+ OverriderInfo() : Method(0), Offset(CharUnits::Zero()) { }
+ };
+
+private:
+ /// MostDerivedClass - The most derived class for which the final overriders
+ /// are stored.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building final overriders for a
+ /// construction vtable, this holds the offset from the layout class to the
+ /// most derived class.
+ const CharUnits MostDerivedClassOffset;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if the final overriders are for a
+ /// construction vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ ASTContext &Context;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ /// MethodBaseOffsetPairTy - Uniquely identifies a member function
+ /// in a base subobject.
+ typedef std::pair<const CXXMethodDecl *, CharUnits> MethodBaseOffsetPairTy;
+
+ typedef llvm::DenseMap<MethodBaseOffsetPairTy,
+ OverriderInfo> OverridersMapTy;
+
+ /// OverridersMap - The final overriders for all virtual member functions of
+ /// all the base subobjects of the most derived class.
+ OverridersMapTy OverridersMap;
+
+ /// SubobjectsToOffsetsMapTy - A mapping from a base subobject (represented
+ /// as a record decl and a subobject number) and its offsets in the most
+ /// derived class as well as the layout class.
+ typedef llvm::DenseMap<std::pair<const CXXRecordDecl *, unsigned>,
+ CharUnits> SubobjectOffsetMapTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCountMapTy;
+
+ /// ComputeBaseOffsets - Compute the offsets for all base subobjects of the
+ /// given base.
+ void ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ CharUnits OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// dump - dump the final overriders for a base subobject, and all its direct
+ /// and indirect base subobjects.
+ void dump(raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy& VisitedVirtualBases);
+
+public:
+ FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass);
+
+ /// getOverrider - Get the final overrider for the given method declaration in
+ /// the subobject with the given base offset.
+ OverriderInfo getOverrider(const CXXMethodDecl *MD,
+ CharUnits BaseOffset) const {
+ assert(OverridersMap.count(std::make_pair(MD, BaseOffset)) &&
+ "Did not find overrider!");
+
+ return OverridersMap.lookup(std::make_pair(MD, BaseOffset));
+ }
+
+ /// dump - dump the final overriders.
+ void dump() {
+ VisitedVirtualBasesSetTy VisitedVirtualBases;
+ dump(llvm::errs(), BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ VisitedVirtualBases);
+ }
+
+};
+
+#define DUMP_OVERRIDERS 0
+
+FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ const CXXRecordDecl *LayoutClass)
+ : MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()),
+ MostDerivedClassLayout(Context.getASTRecordLayout(MostDerivedClass)) {
+
+ // Compute base offsets.
+ SubobjectOffsetMapTy SubobjectOffsets;
+ SubobjectOffsetMapTy SubobjectLayoutClassOffsets;
+ SubobjectCountMapTy SubobjectCounts;
+ ComputeBaseOffsets(BaseSubobject(MostDerivedClass, CharUnits::Zero()),
+ /*IsVirtual=*/false,
+ MostDerivedClassOffset,
+ SubobjectOffsets, SubobjectLayoutClassOffsets,
+ SubobjectCounts);
+
+ // Get the the final overriders.
+ CXXFinalOverriderMap FinalOverriders;
+ MostDerivedClass->getFinalOverriders(FinalOverriders);
+
+ for (CXXFinalOverriderMap::const_iterator I = FinalOverriders.begin(),
+ E = FinalOverriders.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const OverridingMethods& Methods = I->second;
+
+ for (OverridingMethods::const_iterator I = Methods.begin(),
+ E = Methods.end(); I != E; ++I) {
+ unsigned SubobjectNumber = I->first;
+ assert(SubobjectOffsets.count(std::make_pair(MD->getParent(),
+ SubobjectNumber)) &&
+ "Did not find subobject offset!");
+
+ CharUnits BaseOffset = SubobjectOffsets[std::make_pair(MD->getParent(),
+ SubobjectNumber)];
+
+ assert(I->second.size() == 1 && "Final overrider is not unique!");
+ const UniqueVirtualMethod &Method = I->second.front();
+
+ const CXXRecordDecl *OverriderRD = Method.Method->getParent();
+ assert(SubobjectLayoutClassOffsets.count(
+ std::make_pair(OverriderRD, Method.Subobject))
+ && "Did not find subobject offset!");
+ CharUnits OverriderOffset =
+ SubobjectLayoutClassOffsets[std::make_pair(OverriderRD,
+ Method.Subobject)];
+
+ OverriderInfo& Overrider = OverridersMap[std::make_pair(MD, BaseOffset)];
+ assert(!Overrider.Method && "Overrider should not exist yet!");
+
+ Overrider.Offset = OverriderOffset;
+ Overrider.Method = Method.Method;
+ }
+ }
+
+#if DUMP_OVERRIDERS
+ // And dump them (for now).
+ dump();
+#endif
+}
+
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedRD,
+ const CXXBasePath &Path) {
+ CharUnits NonVirtualOffset = CharUnits::Zero();
+
+ unsigned NonVirtualStart = 0;
+ const CXXRecordDecl *VirtualBase = 0;
+
+ // First, look for the virtual base class.
+ for (unsigned I = 0, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ if (Element.Base->isVirtual()) {
+ // FIXME: Can we break when we find the first virtual base?
+ // (If we can't, can't we just iterate over the path in reverse order?)
+ NonVirtualStart = I + 1;
+ QualType VBaseType = Element.Base->getType();
+ VirtualBase =
+ cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
+ }
+ }
+
+ // Now compute the non-virtual offset.
+ for (unsigned I = NonVirtualStart, E = Path.size(); I != E; ++I) {
+ const CXXBasePathElement &Element = Path[I];
+
+ // Check the base class offset.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
+
+ const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
+ const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
+
+ NonVirtualOffset += Layout.getBaseClassOffset(Base);
+ }
+
+ // FIXME: This should probably use CharUnits or something. Maybe we should
+ // even change the base offsets in ASTRecordLayout to be specified in
+ // CharUnits.
+ return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset);
+
+}
+
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *BaseRD,
+ const CXXRecordDecl *DerivedRD) {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false,
+ /*RecordPaths=*/true, /*DetectVirtual=*/false);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ llvm_unreachable("Class must be derived from the passed in base class!");
+ }
+
+ return ComputeBaseOffset(Context, DerivedRD, Paths.front());
+}
+
+static BaseOffset
+ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
+ const CXXMethodDecl *DerivedMD,
+ const CXXMethodDecl *BaseMD) {
+ const FunctionType *BaseFT = BaseMD->getType()->getAs<FunctionType>();
+ const FunctionType *DerivedFT = DerivedMD->getType()->getAs<FunctionType>();
+
+ // Canonicalize the return types.
+ CanQualType CanDerivedReturnType =
+ Context.getCanonicalType(DerivedFT->getResultType());
+ CanQualType CanBaseReturnType =
+ Context.getCanonicalType(BaseFT->getResultType());
+
+ assert(CanDerivedReturnType->getTypeClass() ==
+ CanBaseReturnType->getTypeClass() &&
+ "Types must have same type class!");
+
+ if (CanDerivedReturnType == CanBaseReturnType) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ if (isa<ReferenceType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<ReferenceType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<ReferenceType>()->getPointeeType();
+ } else if (isa<PointerType>(CanDerivedReturnType)) {
+ CanDerivedReturnType =
+ CanDerivedReturnType->getAs<PointerType>()->getPointeeType();
+ CanBaseReturnType =
+ CanBaseReturnType->getAs<PointerType>()->getPointeeType();
+ } else {
+ llvm_unreachable("Unexpected return type!");
+ }
+
+ // We need to compare unqualified types here; consider
+ // const T *Base::foo();
+ // T *Derived::foo();
+ if (CanDerivedReturnType.getUnqualifiedType() ==
+ CanBaseReturnType.getUnqualifiedType()) {
+ // No adjustment needed.
+ return BaseOffset();
+ }
+
+ const CXXRecordDecl *DerivedRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanDerivedReturnType)->getDecl());
+
+ const CXXRecordDecl *BaseRD =
+ cast<CXXRecordDecl>(cast<RecordType>(CanBaseReturnType)->getDecl());
+
+ return ComputeBaseOffset(Context, BaseRD, DerivedRD);
+}
+
+void
+FinalOverriders::ComputeBaseOffsets(BaseSubobject Base, bool IsVirtual,
+ CharUnits OffsetInLayoutClass,
+ SubobjectOffsetMapTy &SubobjectOffsets,
+ SubobjectOffsetMapTy &SubobjectLayoutClassOffsets,
+ SubobjectCountMapTy &SubobjectCounts) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ unsigned SubobjectNumber = 0;
+ if (!IsVirtual)
+ SubobjectNumber = ++SubobjectCounts[RD];
+
+ // Set up the subobject to offset mapping.
+ assert(!SubobjectOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+ assert(!SubobjectLayoutClassOffsets.count(std::make_pair(RD, SubobjectNumber))
+ && "Subobject offset already exists!");
+
+ SubobjectOffsets[std::make_pair(RD, SubobjectNumber)] = Base.getBaseOffset();
+ SubobjectLayoutClassOffsets[std::make_pair(RD, SubobjectNumber)] =
+ OffsetInLayoutClass;
+
+ // Traverse our bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffset;
+ CharUnits BaseOffsetInLayoutClass;
+ if (I->isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (SubobjectOffsets.count(std::make_pair(BaseDecl, 0)))
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ CharUnits Offset = Layout.getBaseClassOffset(BaseDecl);
+
+ BaseOffset = Base.getBaseOffset() + Offset;
+ BaseOffsetInLayoutClass = OffsetInLayoutClass + Offset;
+ }
+
+ ComputeBaseOffsets(BaseSubobject(BaseDecl, BaseOffset),
+ I->isVirtual(), BaseOffsetInLayoutClass,
+ SubobjectOffsets, SubobjectLayoutClassOffsets,
+ SubobjectCounts);
+ }
+}
+
+void FinalOverriders::dump(raw_ostream &Out, BaseSubobject Base,
+ VisitedVirtualBasesSetTy &VisitedVirtualBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have any virtual member functions.
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ CharUnits BaseOffset;
+ if (I->isVirtual()) {
+ if (!VisitedVirtualBases.insert(BaseDecl)) {
+ // We've visited this base before.
+ continue;
+ }
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
+ }
+
+ dump(Out, BaseSubobject(BaseDecl, BaseOffset), VisitedVirtualBases);
+ }
+
+ Out << "Final overriders for (" << RD->getQualifiedNameAsString() << ", ";
+ Out << Base.getBaseOffset().getQuantity() << ")\n";
+
+ // Now dump the overriders for this base subobject.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ OverriderInfo Overrider = getOverrider(MD, Base.getBaseOffset());
+
+ Out << " " << MD->getQualifiedNameAsString() << " - (";
+ Out << Overrider.Method->getQualifiedNameAsString();
+ Out << ", " << ", " << Overrider.Offset.getQuantity() << ')';
+
+ BaseOffset Offset;
+ if (!Overrider.Method->isPure())
+ Offset = ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+
+ if (!Offset.isEmpty()) {
+ Out << " [ret-adj: ";
+ if (Offset.VirtualBase)
+ Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
+
+ Out << Offset.NonVirtualOffset.getQuantity() << " nv]";
+ }
+
+ Out << "\n";
+ }
+}
+
+/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
+struct VCallOffsetMap {
+
+ typedef std::pair<const CXXMethodDecl *, CharUnits> MethodAndOffsetPairTy;
+
+ /// Offsets - Keeps track of methods and their offsets.
+ // FIXME: This should be a real map and not a vector.
+ SmallVector<MethodAndOffsetPairTy, 16> Offsets;
+
+ /// MethodsCanShareVCallOffset - Returns whether two virtual member functions
+ /// can share the same vcall offset.
+ static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS);
+
+public:
+ /// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
+ /// add was successful, or false if there was already a member function with
+ /// the same signature in the map.
+ bool AddVCallOffset(const CXXMethodDecl *MD, CharUnits OffsetOffset);
+
+ /// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
+ /// vtable address point) for the given virtual member function.
+ CharUnits getVCallOffsetOffset(const CXXMethodDecl *MD);
+
+ // empty - Return whether the offset map is empty or not.
+ bool empty() const { return Offsets.empty(); }
+};
+
+static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ const FunctionProtoType *LT =
+ cast<FunctionProtoType>(LHS->getType().getCanonicalType());
+ const FunctionProtoType *RT =
+ cast<FunctionProtoType>(RHS->getType().getCanonicalType());
+
+ // Fast-path matches in the canonical types.
+ if (LT == RT) return true;
+
+ // Force the signatures to match. We can't rely on the overrides
+ // list here because there isn't necessarily an inheritance
+ // relationship between the two methods.
+ if (LT->getTypeQuals() != RT->getTypeQuals() ||
+ LT->getNumArgs() != RT->getNumArgs())
+ return false;
+ for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
+ if (LT->getArgType(I) != RT->getArgType(I))
+ return false;
+ return true;
+}
+
+bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ assert(LHS->isVirtual() && "LHS must be virtual!");
+ assert(RHS->isVirtual() && "LHS must be virtual!");
+
+ // A destructor can share a vcall offset with another destructor.
+ if (isa<CXXDestructorDecl>(LHS))
+ return isa<CXXDestructorDecl>(RHS);
+
+ // FIXME: We need to check more things here.
+
+ // The methods must have the same name.
+ DeclarationName LHSName = LHS->getDeclName();
+ DeclarationName RHSName = RHS->getDeclName();
+ if (LHSName != RHSName)
+ return false;
+
+ // And the same signatures.
+ return HasSameVirtualSignature(LHS, RHS);
+}
+
+bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
+ CharUnits OffsetOffset) {
+ // Check if we can reuse an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return false;
+ }
+
+ // Add the offset.
+ Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset));
+ return true;
+}
+
+CharUnits VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
+ // Look for an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return Offsets[I].second;
+ }
+
+ llvm_unreachable("Should always find a vcall offset offset!");
+}
+
+/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
+class VCallAndVBaseOffsetBuilder {
+public:
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
+ VBaseOffsetOffsetsMapTy;
+
+private:
+ /// MostDerivedClass - The most derived class for which we're building vcall
+ /// and vbase offsets.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// Components - vcall and vbase offset components
+ typedef SmallVector<VTableComponent, 64> VTableComponentVectorTy;
+ VTableComponentVectorTy Components;
+
+ /// VisitedVirtualBases - Visited virtual bases.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// VCallOffsets - Keeps track of vcall offsets.
+ VCallOffsetMap VCallOffsets;
+
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets,
+ /// relative to the address point.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ /// (Can be null when we're not building a vtable of the most derived class).
+ const FinalOverriders *Overriders;
+
+ /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
+ /// given base subobject.
+ void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
+ CharUnits RealBaseOffset);
+
+ /// AddVCallOffsets - Add vcall offsets for the given base subobject.
+ void AddVCallOffsets(BaseSubobject Base, CharUnits VBaseOffset);
+
+ /// AddVBaseOffsets - Add vbase offsets for the given class.
+ void AddVBaseOffsets(const CXXRecordDecl *Base,
+ CharUnits OffsetInLayoutClass);
+
+ /// getCurrentOffsetOffset - Get the current vcall or vbase offset offset in
+ /// chars, relative to the vtable address point.
+ CharUnits getCurrentOffsetOffset() const;
+
+public:
+ VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
+ const CXXRecordDecl *LayoutClass,
+ const FinalOverriders *Overriders,
+ BaseSubobject Base, bool BaseIsVirtual,
+ CharUnits OffsetInLayoutClass)
+ : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
+
+ // Add vcall and vbase offsets.
+ AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
+ }
+
+ /// Methods for iterating over the components.
+ typedef VTableComponentVectorTy::const_reverse_iterator const_iterator;
+ const_iterator components_begin() const { return Components.rbegin(); }
+ const_iterator components_end() const { return Components.rend(); }
+
+ const VCallOffsetMap &getVCallOffsets() const { return VCallOffsets; }
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+};
+
+void
+VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
+ bool BaseIsVirtual,
+ CharUnits RealBaseOffset) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
+
+ // Itanium C++ ABI 2.5.2:
+ // ..in classes sharing a virtual table with a primary base class, the vcall
+ // and vbase offsets added by the derived class all come before the vcall
+ // and vbase offsets required by the base class, so that the latter may be
+ // laid out as required by the base class without regard to additions from
+ // the derived class(es).
+
+ // (Since we're emitting the vcall and vbase offsets in reverse order, we'll
+ // emit them for the primary base first).
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ bool PrimaryBaseIsVirtual = Layout.isPrimaryBaseVirtual();
+
+ CharUnits PrimaryBaseOffset;
+
+ // Get the base offset of the primary base.
+ if (PrimaryBaseIsVirtual) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ }
+
+ AddVCallAndVBaseOffsets(
+ BaseSubobject(PrimaryBase,PrimaryBaseOffset),
+ PrimaryBaseIsVirtual, RealBaseOffset);
+ }
+
+ AddVBaseOffsets(Base.getBase(), RealBaseOffset);
+
+ // We only want to add vcall offsets for virtual bases.
+ if (BaseIsVirtual)
+ AddVCallOffsets(Base, RealBaseOffset);
+}
+
+CharUnits VCallAndVBaseOffsetBuilder::getCurrentOffsetOffset() const {
+ // OffsetIndex is the index of this vcall or vbase offset, relative to the
+ // vtable address point. (We subtract 3 to account for the information just
+ // above the address point, the RTTI info, the offset to top, and the
+ // vcall offset itself).
+ int64_t OffsetIndex = -(int64_t)(3 + Components.size());
+
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ CharUnits OffsetOffset = PointerWidth * OffsetIndex;
+ return OffsetOffset;
+}
+
+void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
+ CharUnits VBaseOffset) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // Handle the primary base first.
+ // We only want to add vcall offsets if the base is non-virtual; a virtual
+ // primary base will have its vcall and vbase offsets emitted already.
+ if (PrimaryBase && !Layout.isPrimaryBaseVirtual()) {
+ // Get the base offset of the primary base.
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ AddVCallOffsets(BaseSubobject(PrimaryBase, Base.getBaseOffset()),
+ VBaseOffset);
+ }
+
+ // Add the vcall offsets.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ CharUnits OffsetOffset = getCurrentOffsetOffset();
+
+ // Don't add a vcall offset if we already have one for this member function
+ // signature.
+ if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
+ continue;
+
+ CharUnits Offset = CharUnits::Zero();
+
+ if (Overriders) {
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders->getOverrider(MD, Base.getBaseOffset());
+
+ /// The vcall offset is the offset from the virtual base to the object
+ /// where the function was overridden.
+ Offset = Overrider.Offset - VBaseOffset;
+ }
+
+ Components.push_back(
+ VTableComponent::MakeVCallOffset(Offset));
+ }
+
+ // And iterate over all non-virtual bases (ignoring the primary base).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl == PrimaryBase)
+ continue;
+
+ // Get the base offset of this base.
+ CharUnits BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset),
+ VBaseOffset);
+ }
+}
+
+void
+VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass) {
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Add vbase offsets.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base that we haven't visited before.
+ if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
+ CharUnits Offset =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl) - OffsetInLayoutClass;
+
+ // Add the vbase offset offset.
+ assert(!VBaseOffsetOffsets.count(BaseDecl) &&
+ "vbase offset offset already exists!");
+
+ CharUnits VBaseOffsetOffset = getCurrentOffsetOffset();
+ VBaseOffsetOffsets.insert(
+ std::make_pair(BaseDecl, VBaseOffsetOffset));
+
+ Components.push_back(
+ VTableComponent::MakeVBaseOffset(Offset));
+ }
+
+ // Check the base class looking for more vbase offsets.
+ AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
+ }
+}
+
+/// VTableBuilder - Class for building vtable layout information.
+class VTableBuilder {
+public:
+ /// PrimaryBasesSetVectorTy - A set vector of direct and indirect
+ /// primary bases.
+ typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
+ PrimaryBasesSetVectorTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, CharUnits>
+ VBaseOffsetOffsetsMapTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t>
+ AddressPointsMapTy;
+
+private:
+ /// VTables - Global vtable information.
+ VTableContext &VTables;
+
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// MostDerivedClassOffset - If we're building a construction vtable, this
+ /// holds the offset from the layout class to the most derived class.
+ const CharUnits MostDerivedClassOffset;
+
+ /// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
+ /// base. (This only makes sense when building a construction vtable).
+ bool MostDerivedClassIsVirtual;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ const FinalOverriders Overriders;
+
+ /// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual
+ /// bases in this vtable.
+ llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
+
+ /// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
+ /// the most derived class.
+ VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
+
+ /// Components - The components of the vtable being built.
+ SmallVector<VTableComponent, 64> Components;
+
+ /// AddressPoints - Address points for the vtable being built.
+ AddressPointsMapTy AddressPoints;
+
+ /// MethodInfo - Contains information about a method in a vtable.
+ /// (Used for computing 'this' pointer adjustment thunks.
+ struct MethodInfo {
+ /// BaseOffset - The base offset of this method.
+ const CharUnits BaseOffset;
+
+ /// BaseOffsetInLayoutClass - The base offset in the layout class of this
+ /// method.
+ const CharUnits BaseOffsetInLayoutClass;
+
+ /// VTableIndex - The index in the vtable that this method has.
+ /// (For destructors, this is the index of the complete destructor).
+ const uint64_t VTableIndex;
+
+ MethodInfo(CharUnits BaseOffset, CharUnits BaseOffsetInLayoutClass,
+ uint64_t VTableIndex)
+ : BaseOffset(BaseOffset),
+ BaseOffsetInLayoutClass(BaseOffsetInLayoutClass),
+ VTableIndex(VTableIndex) { }
+
+ MethodInfo()
+ : BaseOffset(CharUnits::Zero()),
+ BaseOffsetInLayoutClass(CharUnits::Zero()),
+ VTableIndex(0) { }
+ };
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
+
+ /// MethodInfoMap - The information for all methods in the vtable we're
+ /// currently building.
+ MethodInfoMapTy MethodInfoMap;
+
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> VTableThunksMapTy;
+
+ /// VTableThunks - The thunks by vtable index in the vtable currently being
+ /// built.
+ VTableThunksMapTy VTableThunks;
+
+ typedef SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - A map that contains all the thunks needed for all methods in the
+ /// most derived class for which the vtable is currently being built.
+ ThunksMapTy Thunks;
+
+ /// AddThunk - Add a thunk for the given method.
+ void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk);
+
+ /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
+ /// part of the vtable we're currently building.
+ void ComputeThisAdjustments();
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+
+ /// PrimaryVirtualBases - All known virtual bases who are a primary base of
+ /// some other base.
+ VisitedVirtualBasesSetTy PrimaryVirtualBases;
+
+ /// ComputeReturnAdjustment - Compute the return adjustment given a return
+ /// adjustment base offset.
+ ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset);
+
+ /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
+ /// the 'this' pointer from the base subobject to the derived subobject.
+ BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const;
+
+ /// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
+ /// given virtual member function, its offset in the layout class and its
+ /// final overrider.
+ ThisAdjustment
+ ComputeThisAdjustment(const CXXMethodDecl *MD,
+ CharUnits BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider);
+
+ /// AddMethod - Add a single virtual member function to the vtable
+ /// components vector.
+ void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
+
+ /// IsOverriderUsed - Returns whether the overrider will ever be used in this
+ /// part of the vtable.
+ ///
+ /// Itanium C++ ABI 2.5.2:
+ ///
+ /// struct A { virtual void f(); };
+ /// struct B : virtual public A { int i; };
+ /// struct C : virtual public A { int j; };
+ /// struct D : public B, public C {};
+ ///
+ /// When B and C are declared, A is a primary base in each case, so although
+ /// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this
+ /// adjustment is required and no thunk is generated. However, inside D
+ /// objects, A is no longer a primary base of C, so if we allowed calls to
+ /// C::f() to use the copy of A's vtable in the C subobject, we would need
+ /// to adjust this from C* to B::A*, which would require a third-party
+ /// thunk. Since we require that a call to C::f() first convert to A*,
+ /// C-in-D's copy of A's vtable is never referenced, so this is not
+ /// necessary.
+ bool IsOverriderUsed(const CXXMethodDecl *Overrider,
+ CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass) const;
+
+
+ /// AddMethods - Add the methods of this base subobject and all its
+ /// primary bases to the vtable components vector.
+ void AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases);
+
+ // LayoutVTable - Layout the vtable for the given base class, including its
+ // secondary vtables and any vtables for virtual bases.
+ void LayoutVTable();
+
+ /// LayoutPrimaryAndSecondaryVTables - Layout the primary vtable for the
+ /// given base subobject, as well as all its secondary vtables.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param BaseIsVirtualInLayoutClass - Whether the base subobject is virtual
+ /// in the layout class.
+ void LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ CharUnits OffsetInLayoutClass);
+
+ /// LayoutSecondaryVTables - Layout the secondary vtables for the given base
+ /// subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ void LayoutSecondaryVTables(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ CharUnits OffsetInLayoutClass);
+
+ /// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
+ /// class hierarchy.
+ void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTablesForVirtualBases - Layout vtables for all virtual bases of the
+ /// given base (excluding any primary bases).
+ void LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// isBuildingConstructionVTable - Return whether this vtable builder is
+ /// building a construction vtable.
+ bool isBuildingConstructorVTable() const {
+ return MostDerivedClass != LayoutClass;
+ }
+
+public:
+ VTableBuilder(VTableContext &VTables, const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ bool MostDerivedClassIsVirtual, const
+ CXXRecordDecl *LayoutClass)
+ : VTables(VTables), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset),
+ MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
+ LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
+ Overriders(MostDerivedClass, MostDerivedClassOffset, LayoutClass) {
+
+ LayoutVTable();
+
+ if (Context.getLangOpts().DumpVTableLayouts)
+ dumpLayout(llvm::errs());
+ }
+
+ uint64_t getNumThunks() const {
+ return Thunks.size();
+ }
+
+ ThunksMapTy::const_iterator thunks_begin() const {
+ return Thunks.begin();
+ }
+
+ ThunksMapTy::const_iterator thunks_end() const {
+ return Thunks.end();
+ }
+
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+
+ const AddressPointsMapTy &getAddressPoints() const {
+ return AddressPoints;
+ }
+
+ /// getNumVTableComponents - Return the number of components in the vtable
+ /// currently built.
+ uint64_t getNumVTableComponents() const {
+ return Components.size();
+ }
+
+ const VTableComponent *vtable_component_begin() const {
+ return Components.begin();
+ }
+
+ const VTableComponent *vtable_component_end() const {
+ return Components.end();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_begin() const {
+ return AddressPoints.begin();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_end() const {
+ return AddressPoints.end();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_begin() const {
+ return VTableThunks.begin();
+ }
+
+ VTableThunksMapTy::const_iterator vtable_thunks_end() const {
+ return VTableThunks.end();
+ }
+
+ /// dumpLayout - Dump the vtable layout.
+ void dumpLayout(raw_ostream&);
+};
+
+void VTableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
+ assert(!isBuildingConstructorVTable() &&
+ "Can't add thunks for construction vtable");
+
+ SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
+
+ // Check if we have this thunk already.
+ if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
+ ThunksVector.end())
+ return;
+
+ ThunksVector.push_back(Thunk);
+}
+
+typedef llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverriddenMethodsSetTy;
+
+/// ComputeAllOverriddenMethods - Given a method decl, will return a set of all
+/// the overridden methods that the function decl overrides.
+static void
+ComputeAllOverriddenMethods(const CXXMethodDecl *MD,
+ OverriddenMethodsSetTy& OverriddenMethods) {
+ assert(MD->isVirtual() && "Method is not virtual!");
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ OverriddenMethods.insert(OverriddenMD);
+
+ ComputeAllOverriddenMethods(OverriddenMD, OverriddenMethods);
+ }
+}
+
+void VTableBuilder::ComputeThisAdjustments() {
+ // Now go through the method info map and see if any of the methods need
+ // 'this' pointer adjustments.
+ for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
+ E = MethodInfoMap.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const MethodInfo &MethodInfo = I->second;
+
+ // Ignore adjustments for unused function pointers.
+ uint64_t VTableIndex = MethodInfo.VTableIndex;
+ if (Components[VTableIndex].getKind() ==
+ VTableComponent::CK_UnusedFunctionPointer)
+ continue;
+
+ // Get the final overrider for this method.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, MethodInfo.BaseOffset);
+
+ // Check if we need an adjustment at all.
+ if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
+ // When a return thunk is needed by a derived class that overrides a
+ // virtual base, gcc uses a virtual 'this' adjustment as well.
+ // While the thunk itself might be needed by vtables in subclasses or
+ // in construction vtables, there doesn't seem to be a reason for using
+ // the thunk in this vtable. Still, we do so to match gcc.
+ if (VTableThunks.lookup(VTableIndex).Return.isEmpty())
+ continue;
+ }
+
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
+
+ if (ThisAdjustment.isEmpty())
+ continue;
+
+ // Add it.
+ VTableThunks[VTableIndex].This = ThisAdjustment;
+
+ if (isa<CXXDestructorDecl>(MD)) {
+ // Add an adjustment for the deleting destructor as well.
+ VTableThunks[VTableIndex + 1].This = ThisAdjustment;
+ }
+ }
+
+ /// Clear the method info map.
+ MethodInfoMap.clear();
+
+ if (isBuildingConstructorVTable()) {
+ // We don't need to store thunk information for construction vtables.
+ return;
+ }
+
+ for (VTableThunksMapTy::const_iterator I = VTableThunks.begin(),
+ E = VTableThunks.end(); I != E; ++I) {
+ const VTableComponent &Component = Components[I->first];
+ const ThunkInfo &Thunk = I->second;
+ const CXXMethodDecl *MD;
+
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind!");
+ case VTableComponent::CK_FunctionPointer:
+ MD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ MD = Component.getDestructorDecl();
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ // We've already added the thunk when we saw the complete dtor pointer.
+ continue;
+ }
+
+ if (MD->getParent() == MostDerivedClass)
+ AddThunk(MD, Thunk);
+ }
+}
+
+ReturnAdjustment VTableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
+ ReturnAdjustment Adjustment;
+
+ if (!Offset.isEmpty()) {
+ if (Offset.VirtualBase) {
+ // Get the virtual base offset offset.
+ if (Offset.DerivedClass == MostDerivedClass) {
+ // We can get the offset offset directly from our map.
+ Adjustment.VBaseOffsetOffset =
+ VBaseOffsetOffsets.lookup(Offset.VirtualBase).getQuantity();
+ } else {
+ Adjustment.VBaseOffsetOffset =
+ VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
+ Offset.VirtualBase).getQuantity();
+ }
+ }
+
+ Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
+ }
+
+ return Adjustment;
+}
+
+BaseOffset
+VTableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const {
+ const CXXRecordDecl *BaseRD = Base.getBase();
+ const CXXRecordDecl *DerivedRD = Derived.getBase();
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true,
+ /*RecordPaths=*/true, /*DetectVirtual=*/true);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ llvm_unreachable("Class must be derived from the passed in base class!");
+ }
+
+ // We have to go through all the paths, and see which one leads us to the
+ // right base subobject.
+ for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end();
+ I != E; ++I) {
+ BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
+
+ CharUnits OffsetToBaseSubobject = Offset.NonVirtualOffset;
+
+ if (Offset.VirtualBase) {
+ // If we have a virtual base class, the non-virtual offset is relative
+ // to the virtual base class offset.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ /// Get the virtual base offset, relative to the most derived class
+ /// layout.
+ OffsetToBaseSubobject +=
+ LayoutClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ } else {
+ // Otherwise, the non-virtual offset is relative to the derived class
+ // offset.
+ OffsetToBaseSubobject += Derived.getBaseOffset();
+ }
+
+ // Check if this path gives us the right base subobject.
+ if (OffsetToBaseSubobject == Base.getBaseOffset()) {
+ // Since we're going from the base class _to_ the derived class, we'll
+ // invert the non-virtual offset here.
+ Offset.NonVirtualOffset = -Offset.NonVirtualOffset;
+ return Offset;
+ }
+ }
+
+ return BaseOffset();
+}
+
+ThisAdjustment
+VTableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
+ CharUnits BaseOffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider) {
+ // Ignore adjustments for pure virtual member functions.
+ if (Overrider.Method->isPure())
+ return ThisAdjustment();
+
+ BaseSubobject OverriddenBaseSubobject(MD->getParent(),
+ BaseOffsetInLayoutClass);
+
+ BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
+ Overrider.Offset);
+
+ // Compute the adjustment offset.
+ BaseOffset Offset = ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
+ OverriderBaseSubobject);
+ if (Offset.isEmpty())
+ return ThisAdjustment();
+
+ ThisAdjustment Adjustment;
+
+ if (Offset.VirtualBase) {
+ // Get the vcall offset map for this virtual base.
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
+
+ if (VCallOffsets.empty()) {
+ // We don't have vcall offsets for this virtual base, go ahead and
+ // build them.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
+ /*FinalOverriders=*/0,
+ BaseSubobject(Offset.VirtualBase,
+ CharUnits::Zero()),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/
+ CharUnits::Zero());
+
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ Adjustment.VCallOffsetOffset =
+ VCallOffsets.getVCallOffsetOffset(MD).getQuantity();
+ }
+
+ // Set the non-virtual part of the adjustment.
+ Adjustment.NonVirtual = Offset.NonVirtualOffset.getQuantity();
+
+ return Adjustment;
+}
+
+void
+VTableBuilder::AddMethod(const CXXMethodDecl *MD,
+ ReturnAdjustment ReturnAdjustment) {
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ assert(ReturnAdjustment.isEmpty() &&
+ "Destructor can't have return adjustment!");
+
+ // Add both the complete destructor and the deleting destructor.
+ Components.push_back(VTableComponent::MakeCompleteDtor(DD));
+ Components.push_back(VTableComponent::MakeDeletingDtor(DD));
+ } else {
+ // Add the return adjustment if necessary.
+ if (!ReturnAdjustment.isEmpty())
+ VTableThunks[Components.size()].Return = ReturnAdjustment;
+
+ // Add the function.
+ Components.push_back(VTableComponent::MakeFunction(MD));
+ }
+}
+
+/// OverridesIndirectMethodInBase - Return whether the given member function
+/// overrides any methods in the set of given bases.
+/// Unlike OverridesMethodInBase, this checks "overriders of overriders".
+/// For example, if we have:
+///
+/// struct A { virtual void f(); }
+/// struct B : A { virtual void f(); }
+/// struct C : B { virtual void f(); }
+///
+/// OverridesIndirectMethodInBase will return true if given C::f as the method
+/// and { A } as the set of bases.
+static bool
+OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ if (Bases.count(MD->getParent()))
+ return true;
+
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // Check "indirect overriders".
+ if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
+ return true;
+ }
+
+ return false;
+}
+
+bool
+VTableBuilder::IsOverriderUsed(const CXXMethodDecl *Overrider,
+ CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass) const {
+ // If the base and the first base in the primary base chain have the same
+ // offsets, then this overrider will be used.
+ if (BaseOffsetInLayoutClass == FirstBaseOffsetInLayoutClass)
+ return true;
+
+ // We know now that Base (or a direct or indirect base of it) is a primary
+ // base in part of the class hierarchy, but not a primary base in the most
+ // derived class.
+
+ // If the overrider is the first base in the primary base chain, we know
+ // that the overrider will be used.
+ if (Overrider->getParent() == FirstBaseInPrimaryBaseChain)
+ return true;
+
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+
+ const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain;
+ PrimaryBases.insert(RD);
+
+ // Now traverse the base chain, starting with the first base, until we find
+ // the base that is no longer a primary base.
+ while (true) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Now check if this is the primary base that is not a primary base in the
+ // most derived class.
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ FirstBaseOffsetInLayoutClass) {
+ // We found it, stop walking the chain.
+ break;
+ }
+ } else {
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+ }
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ llvm_unreachable("Found a duplicate primary base!");
+
+ RD = PrimaryBase;
+ }
+
+ // If the final overrider is an override of one of the primary bases,
+ // then we know that it will be used.
+ return OverridesIndirectMethodInBases(Overrider, PrimaryBases);
+}
+
+/// FindNearestOverriddenMethod - Given a method, returns the overridden method
+/// from the nearest base. Returns null if no method was found.
+static const CXXMethodDecl *
+FindNearestOverriddenMethod(const CXXMethodDecl *MD,
+ VTableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ OverriddenMethodsSetTy OverriddenMethods;
+ ComputeAllOverriddenMethods(MD, OverriddenMethods);
+
+ for (int I = Bases.size(), E = 0; I != E; --I) {
+ const CXXRecordDecl *PrimaryBase = Bases[I - 1];
+
+ // Now check the overriden methods.
+ for (OverriddenMethodsSetTy::const_iterator I = OverriddenMethods.begin(),
+ E = OverriddenMethods.end(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // We found our overridden method.
+ if (OverriddenMD->getParent() == PrimaryBase)
+ return OverriddenMD;
+ }
+ }
+
+ return 0;
+}
+
+void
+VTableBuilder::AddMethods(BaseSubobject Base, CharUnits BaseOffsetInLayoutClass,
+ const CXXRecordDecl *FirstBaseInPrimaryBaseChain,
+ CharUnits FirstBaseOffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ CharUnits PrimaryBaseOffset;
+ CharUnits PrimaryBaseOffsetInLayoutClass;
+ if (Layout.isPrimaryBaseVirtual()) {
+ assert(Layout.getVBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffsetInBits(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ PrimaryBaseOffsetInLayoutClass = BaseOffsetInLayoutClass;
+ }
+
+ AddMethods(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ PrimaryBaseOffsetInLayoutClass, FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass, PrimaryBases);
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ llvm_unreachable("Found a duplicate primary base!");
+ }
+
+ // Now go through all virtual member functions and add them.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(MD, Base.getBaseOffset());
+
+ // Check if this virtual member function overrides a method in a primary
+ // base. If this is the case, and the return type doesn't require adjustment
+ // then we can just use the member function from the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ if (ComputeReturnAdjustmentBaseOffset(Context, MD,
+ OverriddenMD).isEmpty()) {
+ // Replace the method info of the overridden method with our own
+ // method.
+ assert(MethodInfoMap.count(OverriddenMD) &&
+ "Did not find the overridden method!");
+ MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
+
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ OverriddenMethodInfo.VTableIndex);
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+ MethodInfoMap.erase(OverriddenMD);
+
+ // If the overridden method exists in a virtual base class or a direct
+ // or indirect base class of a virtual base class, we need to emit a
+ // thunk if we ever have a class hierarchy where the base class is not
+ // a primary base in the complete object.
+ if (!isBuildingConstructorVTable() && OverriddenMD != MD) {
+ // Compute the this adjustment.
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
+ Overrider);
+
+ if (ThisAdjustment.VCallOffsetOffset &&
+ Overrider.Method->getParent() == MostDerivedClass) {
+
+ // There's no return adjustment from OverriddenMD and MD,
+ // but that doesn't mean there isn't one between MD and
+ // the final overrider.
+ BaseOffset ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, Overrider.Method, MD);
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
+ // This is a virtual thunk for the most derived class, add it.
+ AddThunk(Overrider.Method,
+ ThunkInfo(ThisAdjustment, ReturnAdjustment));
+ }
+ }
+
+ continue;
+ }
+ }
+
+ // Insert the method info for this method.
+ MethodInfo MethodInfo(Base.getBaseOffset(), BaseOffsetInLayoutClass,
+ Components.size());
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+
+ // Check if this overrider is going to be used.
+ const CXXMethodDecl *OverriderMD = Overrider.Method;
+ if (!IsOverriderUsed(OverriderMD, BaseOffsetInLayoutClass,
+ FirstBaseInPrimaryBaseChain,
+ FirstBaseOffsetInLayoutClass)) {
+ Components.push_back(VTableComponent::MakeUnusedFunction(OverriderMD));
+ continue;
+ }
+
+ // Check if this overrider needs a return adjustment.
+ // We don't want to do this for pure virtual member functions.
+ BaseOffset ReturnAdjustmentOffset;
+ if (!OverriderMD->isPure()) {
+ ReturnAdjustmentOffset =
+ ComputeReturnAdjustmentBaseOffset(Context, OverriderMD, MD);
+ }
+
+ ReturnAdjustment ReturnAdjustment =
+ ComputeReturnAdjustment(ReturnAdjustmentOffset);
+
+ AddMethod(Overrider.Method, ReturnAdjustment);
+ }
+}
+
+void VTableBuilder::LayoutVTable() {
+ LayoutPrimaryAndSecondaryVTables(BaseSubobject(MostDerivedClass,
+ CharUnits::Zero()),
+ /*BaseIsMorallyVirtual=*/false,
+ MostDerivedClassIsVirtual,
+ MostDerivedClassOffset);
+
+ VisitedVirtualBasesSetTy VBases;
+
+ // Determine the primary virtual bases.
+ DeterminePrimaryVirtualBases(MostDerivedClass, MostDerivedClassOffset,
+ VBases);
+ VBases.clear();
+
+ LayoutVTablesForVirtualBases(MostDerivedClass, VBases);
+
+ // -fapple-kext adds an extra entry at end of vtbl.
+ bool IsAppleKext = Context.getLangOpts().AppleKext;
+ if (IsAppleKext)
+ Components.push_back(VTableComponent::MakeVCallOffset(CharUnits::Zero()));
+}
+
+void
+VTableBuilder::LayoutPrimaryAndSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsVirtualInLayoutClass,
+ CharUnits OffsetInLayoutClass) {
+ assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
+
+ // Add vcall and vbase offsets for this vtable.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
+ Base, BaseIsVirtualInLayoutClass,
+ OffsetInLayoutClass);
+ Components.append(Builder.components_begin(), Builder.components_end());
+
+ // Check if we need to add these vcall offsets.
+ if (BaseIsVirtualInLayoutClass && !Builder.getVCallOffsets().empty()) {
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
+
+ if (VCallOffsets.empty())
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ // If we're laying out the most derived class we want to keep track of the
+ // virtual base class offset offsets.
+ if (Base.getBase() == MostDerivedClass)
+ VBaseOffsetOffsets = Builder.getVBaseOffsetOffsets();
+
+ // Add the offset to top.
+ CharUnits OffsetToTop = MostDerivedClassOffset - OffsetInLayoutClass;
+ Components.push_back(
+ VTableComponent::MakeOffsetToTop(OffsetToTop));
+
+ // Next, add the RTTI.
+ Components.push_back(VTableComponent::MakeRTTI(MostDerivedClass));
+
+ uint64_t AddressPoint = Components.size();
+
+ // Now go through all virtual member functions and add them.
+ PrimaryBasesSetVectorTy PrimaryBases;
+ AddMethods(Base, OffsetInLayoutClass,
+ Base.getBase(), OffsetInLayoutClass,
+ PrimaryBases);
+
+ // Compute 'this' pointer adjustments.
+ ComputeThisAdjustments();
+
+ // Add all address points.
+ const CXXRecordDecl *RD = Base.getBase();
+ while (true) {
+ AddressPoints.insert(std::make_pair(
+ BaseSubobject(RD, OffsetInLayoutClass),
+ AddressPoint));
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.isPrimaryBaseVirtual()) {
+ // Check if this virtual primary base is a primary base in the layout
+ // class. If it's not, we don't want to add it.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ OffsetInLayoutClass) {
+ // We don't want to add this class (or any of its primary bases).
+ break;
+ }
+ }
+
+ RD = PrimaryBase;
+ }
+
+ // Layout secondary vtables.
+ LayoutSecondaryVTables(Base, BaseIsMorallyVirtual, OffsetInLayoutClass);
+}
+
+void VTableBuilder::LayoutSecondaryVTables(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ CharUnits OffsetInLayoutClass) {
+ // Itanium C++ ABI 2.5.2:
+ // Following the primary virtual table of a derived class are secondary
+ // virtual tables for each of its proper base classes, except any primary
+ // base(s) with which it shares its primary virtual table.
+
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ // Ignore virtual bases, we'll emit them later.
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore bases that don't have a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ if (isBuildingConstructorVTable()) {
+ // Itanium C++ ABI 2.6.4:
+ // Some of the base class subobjects may not need construction virtual
+ // tables, which will therefore not be present in the construction
+ // virtual table group, even though the subobject virtual tables are
+ // present in the main virtual table group for the complete object.
+ if (!BaseIsMorallyVirtual && !BaseDecl->getNumVBases())
+ continue;
+ }
+
+ // Get the base offset of this base.
+ CharUnits RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ CharUnits BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
+
+ CharUnits BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + RelativeBaseOffset;
+
+ // Don't emit a secondary vtable for a primary base. We might however want
+ // to emit secondary vtables for other bases of this base.
+ if (BaseDecl == PrimaryBase) {
+ LayoutSecondaryVTables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual, BaseOffsetInLayoutClass);
+ continue;
+ }
+
+ // Layout the primary vtable (and any secondary vtables) for this base.
+ LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject(BaseDecl, BaseOffset),
+ BaseIsMorallyVirtual,
+ /*BaseIsVirtualInLayoutClass=*/false,
+ BaseOffsetInLayoutClass);
+ }
+}
+
+void
+VTableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ CharUnits OffsetInLayoutClass,
+ VisitedVirtualBasesSetTy &VBases) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Check if this base has a primary base.
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+
+ // Check if it's virtual.
+ if (Layout.isPrimaryBaseVirtual()) {
+ bool IsPrimaryVirtualBase = true;
+
+ if (isBuildingConstructorVTable()) {
+ // Check if the base is actually a primary base in the class we use for
+ // layout.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ CharUnits PrimaryBaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(PrimaryBase);
+
+ // We know that the base is not a primary base in the layout class if
+ // the base offsets are different.
+ if (PrimaryBaseOffsetInLayoutClass != OffsetInLayoutClass)
+ IsPrimaryVirtualBase = false;
+ }
+
+ if (IsPrimaryVirtualBase)
+ PrimaryVirtualBases.insert(PrimaryBase);
+ }
+ }
+
+ // Traverse bases, looking for more primary virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ CharUnits BaseOffsetInLayoutClass;
+
+ if (I->isVirtual()) {
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+ } else {
+ BaseOffsetInLayoutClass =
+ OffsetInLayoutClass + Layout.getBaseClassOffset(BaseDecl);
+ }
+
+ DeterminePrimaryVirtualBases(BaseDecl, BaseOffsetInLayoutClass, VBases);
+ }
+}
+
+void
+VTableBuilder::LayoutVTablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ // Itanium C++ ABI 2.5.2:
+ // Then come the virtual base virtual tables, also in inheritance graph
+ // order, and again excluding primary bases (which share virtual tables with
+ // the classes for which they are primary).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this base needs a vtable. (If it's virtual, not a primary base
+ // of some other class, and we haven't visited it before).
+ if (I->isVirtual() && BaseDecl->isDynamicClass() &&
+ !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+ CharUnits BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+ CharUnits BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutPrimaryAndSecondaryVTables(
+ BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsMorallyVirtual=*/true,
+ /*BaseIsVirtualInLayoutClass=*/true,
+ BaseOffsetInLayoutClass);
+ }
+
+ // We only need to check the base for virtual base vtables if it actually
+ // has virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVTablesForVirtualBases(BaseDecl, VBases);
+ }
+}
+
+/// dumpLayout - Dump the vtable layout.
+void VTableBuilder::dumpLayout(raw_ostream& Out) {
+
+ if (isBuildingConstructorVTable()) {
+ Out << "Construction vtable for ('";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
+ Out << MostDerivedClassOffset.getQuantity() << ") in '";
+ Out << LayoutClass->getQualifiedNameAsString();
+ } else {
+ Out << "Vtable for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
+ }
+ Out << "' (" << Components.size() << " entries).\n";
+
+ // Iterate through the address points and insert them into a new map where
+ // they are keyed by the index and not the base object.
+ // Since an address point can be shared by multiple subobjects, we use an
+ // STL multimap.
+ std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
+ for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(),
+ E = AddressPoints.end(); I != E; ++I) {
+ const BaseSubobject& Base = I->first;
+ uint64_t Index = I->second;
+
+ AddressPointsByIndex.insert(std::make_pair(Index, Base));
+ }
+
+ for (unsigned I = 0, E = Components.size(); I != E; ++I) {
+ uint64_t Index = I;
+
+ Out << llvm::format("%4d | ", I);
+
+ const VTableComponent &Component = Components[I];
+
+ // Dump the component.
+ switch (Component.getKind()) {
+
+ case VTableComponent::CK_VCallOffset:
+ Out << "vcall_offset ("
+ << Component.getVCallOffset().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_VBaseOffset:
+ Out << "vbase_offset ("
+ << Component.getVBaseOffset().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_OffsetToTop:
+ Out << "offset_to_top ("
+ << Component.getOffsetToTop().getQuantity()
+ << ")";
+ break;
+
+ case VTableComponent::CK_RTTI:
+ Out << Component.getRTTIDecl()->getQualifiedNameAsString() << " RTTI";
+ break;
+
+ case VTableComponent::CK_FunctionPointer: {
+ const CXXMethodDecl *MD = Component.getFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this function pointer has a return adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "\n [return adjustment: ";
+ Out << Thunk.Return.NonVirtual << " non-virtual";
+
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ Out << ']';
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ bool IsComplete =
+ Component.getKind() == VTableComponent::CK_CompleteDtorPointer;
+
+ const CXXDestructorDecl *DD = Component.getDestructorDecl();
+
+ Out << DD->getQualifiedNameAsString();
+ if (IsComplete)
+ Out << "() [complete]";
+ else
+ Out << "() [deleting]";
+
+ if (DD->isPure())
+ Out << " [pure]";
+
+ ThunkInfo Thunk = VTableThunks.lookup(I);
+ if (!Thunk.isEmpty()) {
+ // If this destructor has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "\n [this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+
+ Out << ']';
+ }
+ }
+
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer: {
+ const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
+
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << "[unused] " << Str;
+ if (MD->isPure())
+ Out << " [pure]";
+ }
+
+ }
+
+ Out << '\n';
+
+ // Dump the next address point.
+ uint64_t NextIndex = Index + 1;
+ if (AddressPointsByIndex.count(NextIndex)) {
+ if (AddressPointsByIndex.count(NextIndex) == 1) {
+ const BaseSubobject &Base =
+ AddressPointsByIndex.find(NextIndex)->second;
+
+ Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
+ Out << ", " << Base.getBaseOffset().getQuantity();
+ Out << ") vtable address --\n";
+ } else {
+ CharUnits BaseOffset =
+ AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
+
+ // We store the class names in a set to get a stable order.
+ std::set<std::string> ClassNames;
+ for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
+ AddressPointsByIndex.lower_bound(NextIndex), E =
+ AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) {
+ assert(I->second.getBaseOffset() == BaseOffset &&
+ "Invalid base offset!");
+ const CXXRecordDecl *RD = I->second.getBase();
+ ClassNames.insert(RD->getQualifiedNameAsString());
+ }
+
+ for (std::set<std::string>::const_iterator I = ClassNames.begin(),
+ E = ClassNames.end(); I != E; ++I) {
+ Out << " -- (" << *I;
+ Out << ", " << BaseOffset.getQuantity() << ") vtable address --\n";
+ }
+ }
+ }
+ }
+
+ Out << '\n';
+
+ if (isBuildingConstructorVTable())
+ return;
+
+ if (MostDerivedClass->getNumVBases()) {
+ // We store the virtual base class names and their offsets in a map to get
+ // a stable order.
+
+ std::map<std::string, CharUnits> ClassNamesAndOffsets;
+ for (VBaseOffsetOffsetsMapTy::const_iterator I = VBaseOffsetOffsets.begin(),
+ E = VBaseOffsetOffsets.end(); I != E; ++I) {
+ std::string ClassName = I->first->getQualifiedNameAsString();
+ CharUnits OffsetOffset = I->second;
+ ClassNamesAndOffsets.insert(
+ std::make_pair(ClassName, OffsetOffset));
+ }
+
+ Out << "Virtual base offset offsets for '";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "' (";
+ Out << ClassNamesAndOffsets.size();
+ Out << (ClassNamesAndOffsets.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (std::map<std::string, CharUnits>::const_iterator I =
+ ClassNamesAndOffsets.begin(), E = ClassNamesAndOffsets.end();
+ I != E; ++I)
+ Out << " " << I->first << " | " << I->second.getQuantity() << '\n';
+
+ Out << "\n";
+ }
+
+ if (!Thunks.empty()) {
+ // We store the method names in a map to get a stable order.
+ std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
+
+ for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
+ I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ MethodNamesAndDecls.insert(std::make_pair(MethodName, MD));
+ }
+
+ for (std::map<std::string, const CXXMethodDecl *>::const_iterator I =
+ MethodNamesAndDecls.begin(), E = MethodNamesAndDecls.end();
+ I != E; ++I) {
+ const std::string &MethodName = I->first;
+ const CXXMethodDecl *MD = I->second;
+
+ ThunkInfoVectorTy ThunksVector = Thunks[MD];
+ std::sort(ThunksVector.begin(), ThunksVector.end());
+
+ Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
+ Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
+
+ for (unsigned I = 0, E = ThunksVector.size(); I != E; ++I) {
+ const ThunkInfo &Thunk = ThunksVector[I];
+
+ Out << llvm::format("%4d | ", I);
+
+ // If this function pointer has a return pointer adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "return adjustment: " << Thunk.This.NonVirtual;
+ Out << " non-virtual";
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ if (!Thunk.This.isEmpty())
+ Out << "\n ";
+ }
+
+ // If this function pointer has a 'this' pointer adjustment, dump it.
+ if (!Thunk.This.isEmpty()) {
+ Out << "this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
+
+ if (Thunk.This.VCallOffsetOffset) {
+ Out << ", " << Thunk.This.VCallOffsetOffset;
+ Out << " vcall offset offset";
+ }
+ }
+
+ Out << '\n';
+ }
+
+ Out << '\n';
+ }
+ }
+
+ // Compute the vtable indices for all the member functions.
+ // Store them in a map keyed by the index so we'll get a sorted table.
+ std::map<uint64_t, std::string> IndicesMap;
+
+ for (CXXRecordDecl::method_iterator i = MostDerivedClass->method_begin(),
+ e = MostDerivedClass->method_end(); i != e; ++i) {
+ const CXXMethodDecl *MD = *i;
+
+ // We only want virtual member functions.
+ if (!MD->isVirtual())
+ continue;
+
+ std::string MethodName =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ IndicesMap[VTables.getMethodVTableIndex(GlobalDecl(DD, Dtor_Complete))] =
+ MethodName + " [complete]";
+ IndicesMap[VTables.getMethodVTableIndex(GlobalDecl(DD, Dtor_Deleting))] =
+ MethodName + " [deleting]";
+ } else {
+ IndicesMap[VTables.getMethodVTableIndex(MD)] = MethodName;
+ }
+ }
+
+ // Print the vtable indices for all the member functions.
+ if (!IndicesMap.empty()) {
+ Out << "VTable indices for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
+ Out << "' (" << IndicesMap.size() << " entries).\n";
+
+ for (std::map<uint64_t, std::string>::const_iterator I = IndicesMap.begin(),
+ E = IndicesMap.end(); I != E; ++I) {
+ uint64_t VTableIndex = I->first;
+ const std::string &MethodName = I->second;
+
+ Out << llvm::format(" %4" PRIu64 " | ", VTableIndex) << MethodName
+ << '\n';
+ }
+ }
+
+ Out << '\n';
+}
+
+}
+
+VTableLayout::VTableLayout(uint64_t NumVTableComponents,
+ const VTableComponent *VTableComponents,
+ uint64_t NumVTableThunks,
+ const VTableThunkTy *VTableThunks,
+ const AddressPointsMapTy &AddressPoints)
+ : NumVTableComponents(NumVTableComponents),
+ VTableComponents(new VTableComponent[NumVTableComponents]),
+ NumVTableThunks(NumVTableThunks),
+ VTableThunks(new VTableThunkTy[NumVTableThunks]),
+ AddressPoints(AddressPoints) {
+ std::copy(VTableComponents, VTableComponents+NumVTableComponents,
+ this->VTableComponents);
+ std::copy(VTableThunks, VTableThunks+NumVTableThunks, this->VTableThunks);
+}
+
+VTableLayout::~VTableLayout() {
+ delete[] VTableComponents;
+}
+
+VTableContext::~VTableContext() {
+ llvm::DeleteContainerSeconds(VTableLayouts);
+}
+
+static void
+CollectPrimaryBases(const CXXRecordDecl *RD, ASTContext &Context,
+ VTableBuilder::PrimaryBasesSetVectorTy &PrimaryBases) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ return;
+
+ CollectPrimaryBases(PrimaryBase, Context, PrimaryBases);
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ llvm_unreachable("Found a duplicate primary base!");
+}
+
+void VTableContext::ComputeMethodVTableIndices(const CXXRecordDecl *RD) {
+
+ // Itanium C++ ABI 2.5.2:
+ // The order of the virtual function pointers in a virtual table is the
+ // order of declaration of the corresponding member functions in the class.
+ //
+ // There is an entry for any virtual function declared in a class,
+ // whether it is a new function or overrides a base class function,
+ // unless it overrides a function from the primary base, and conversion
+ // between their return types does not require an adjustment.
+
+ int64_t CurrentIndex = 0;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (PrimaryBase) {
+ assert(PrimaryBase->isCompleteDefinition() &&
+ "Should have the definition decl of the primary base!");
+
+ // Since the record decl shares its vtable pointer with the primary base
+ // we need to start counting at the end of the primary base's vtable.
+ CurrentIndex = getNumVirtualFunctionPointers(PrimaryBase);
+ }
+
+ // Collect all the primary bases, so we can check whether methods override
+ // a method from the base.
+ VTableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+ CollectPrimaryBases(RD, Context, PrimaryBases);
+
+ const CXXDestructorDecl *ImplicitVirtualDtor = 0;
+
+ for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+ e = RD->method_end(); i != e; ++i) {
+ const CXXMethodDecl *MD = *i;
+
+ // We only want virtual methods.
+ if (!MD->isVirtual())
+ continue;
+
+ // Check if this method overrides a method in the primary base.
+ if (const CXXMethodDecl *OverriddenMD =
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
+ // Check if converting from the return type of the method to the
+ // return type of the overridden method requires conversion.
+ if (ComputeReturnAdjustmentBaseOffset(Context, MD,
+ OverriddenMD).isEmpty()) {
+ // This index is shared between the index in the vtable of the primary
+ // base class.
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ const CXXDestructorDecl *OverriddenDD =
+ cast<CXXDestructorDecl>(OverriddenMD);
+
+ // Add both the complete and deleting entries.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Complete));
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] =
+ getMethodVTableIndex(GlobalDecl(OverriddenDD, Dtor_Deleting));
+ } else {
+ MethodVTableIndices[MD] = getMethodVTableIndex(OverriddenMD);
+ }
+
+ // We don't need to add an entry for this method.
+ continue;
+ }
+ }
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (MD->isImplicit()) {
+ assert(!ImplicitVirtualDtor &&
+ "Did already see an implicit virtual dtor!");
+ ImplicitVirtualDtor = DD;
+ continue;
+ }
+
+ // Add the complete dtor.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Complete)] = CurrentIndex++;
+
+ // Add the deleting dtor.
+ MethodVTableIndices[GlobalDecl(DD, Dtor_Deleting)] = CurrentIndex++;
+ } else {
+ // Add the entry.
+ MethodVTableIndices[MD] = CurrentIndex++;
+ }
+ }
+
+ if (ImplicitVirtualDtor) {
+ // Itanium C++ ABI 2.5.2:
+ // If a class has an implicitly-defined virtual destructor,
+ // its entries come after the declared virtual function pointers.
+
+ // Add the complete dtor.
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Complete)] =
+ CurrentIndex++;
+
+ // Add the deleting dtor.
+ MethodVTableIndices[GlobalDecl(ImplicitVirtualDtor, Dtor_Deleting)] =
+ CurrentIndex++;
+ }
+
+ NumVirtualFunctionPointers[RD] = CurrentIndex;
+}
+
+uint64_t VTableContext::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+ NumVirtualFunctionPointers.find(RD);
+ if (I != NumVirtualFunctionPointers.end())
+ return I->second;
+
+ ComputeMethodVTableIndices(RD);
+
+ I = NumVirtualFunctionPointers.find(RD);
+ assert(I != NumVirtualFunctionPointers.end() && "Did not find entry!");
+ return I->second;
+}
+
+uint64_t VTableContext::getMethodVTableIndex(GlobalDecl GD) {
+ MethodVTableIndicesTy::iterator I = MethodVTableIndices.find(GD);
+ if (I != MethodVTableIndices.end())
+ return I->second;
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ ComputeMethodVTableIndices(RD);
+
+ I = MethodVTableIndices.find(GD);
+ assert(I != MethodVTableIndices.end() && "Did not find index!");
+ return I->second;
+}
+
+CharUnits
+VTableContext::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase) {
+ ClassPairTy ClassPair(RD, VBase);
+
+ VirtualBaseClassOffsetOffsetsMapTy::iterator I =
+ VirtualBaseClassOffsetOffsets.find(ClassPair);
+ if (I != VirtualBaseClassOffsetOffsets.end())
+ return I->second;
+
+ VCallAndVBaseOffsetBuilder Builder(RD, RD, /*FinalOverriders=*/0,
+ BaseSubobject(RD, CharUnits::Zero()),
+ /*BaseIsVirtual=*/false,
+ /*OffsetInLayoutClass=*/CharUnits::Zero());
+
+ for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(
+ std::make_pair(ClassPair, I->second));
+ }
+
+ I = VirtualBaseClassOffsetOffsets.find(ClassPair);
+ assert(I != VirtualBaseClassOffsetOffsets.end() && "Did not find index!");
+
+ return I->second;
+}
+
+static VTableLayout *CreateVTableLayout(const VTableBuilder &Builder) {
+ SmallVector<VTableLayout::VTableThunkTy, 1>
+ VTableThunks(Builder.vtable_thunks_begin(), Builder.vtable_thunks_end());
+ std::sort(VTableThunks.begin(), VTableThunks.end());
+
+ return new VTableLayout(Builder.getNumVTableComponents(),
+ Builder.vtable_component_begin(),
+ VTableThunks.size(),
+ VTableThunks.data(),
+ Builder.getAddressPoints());
+}
+
+void VTableContext::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
+ const VTableLayout *&Entry = VTableLayouts[RD];
+
+ // Check if we've computed this information before.
+ if (Entry)
+ return;
+
+ VTableBuilder Builder(*this, RD, CharUnits::Zero(),
+ /*MostDerivedClassIsVirtual=*/0, RD);
+ Entry = CreateVTableLayout(Builder);
+
+ // Add the known thunks.
+ Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+ // If we don't have the vbase information for this class, insert it.
+ // getVirtualBaseOffsetOffset will compute it separately without computing
+ // the rest of the vtable related information.
+ if (!RD->getNumVBases())
+ return;
+
+ const RecordType *VBaseRT =
+ RD->vbases_begin()->getType()->getAs<RecordType>();
+ const CXXRecordDecl *VBase = cast<CXXRecordDecl>(VBaseRT->getDecl());
+
+ if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
+ return;
+
+ for (VTableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ }
+}
+
+VTableLayout *VTableContext::createConstructionVTableLayout(
+ const CXXRecordDecl *MostDerivedClass,
+ CharUnits MostDerivedClassOffset,
+ bool MostDerivedClassIsVirtual,
+ const CXXRecordDecl *LayoutClass) {
+ VTableBuilder Builder(*this, MostDerivedClass, MostDerivedClassOffset,
+ MostDerivedClassIsVirtual, LayoutClass);
+ return CreateVTableLayout(Builder);
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
new file mode 100644
index 0000000..659cc6d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -0,0 +1,463 @@
+//== AnalysisDeclContext.cpp - Analysis context for Path Sens analysis -*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AnalysisDeclContext, a class that manages the analysis context
+// data for path sensitive analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
+#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
+
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *d,
+ idx::TranslationUnit *tu,
+ const CFG::BuildOptions &buildOptions)
+ : Manager(Mgr),
+ D(d),
+ TU(tu),
+ cfgBuildOptions(buildOptions),
+ forcedBlkExprs(0),
+ builtCFG(false),
+ builtCompleteCFG(false),
+ ReferencedBlockVars(0),
+ ManagedAnalyses(0)
+{
+ cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
+}
+
+AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
+ const Decl *d,
+ idx::TranslationUnit *tu)
+: Manager(Mgr),
+ D(d),
+ TU(tu),
+ forcedBlkExprs(0),
+ builtCFG(false),
+ builtCompleteCFG(false),
+ ReferencedBlockVars(0),
+ ManagedAnalyses(0)
+{
+ cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
+}
+
+AnalysisDeclContextManager::AnalysisDeclContextManager(bool useUnoptimizedCFG,
+ bool addImplicitDtors,
+ bool addInitializers) {
+ cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
+ cfgBuildOptions.AddImplicitDtors = addImplicitDtors;
+ cfgBuildOptions.AddInitializers = addInitializers;
+}
+
+void AnalysisDeclContextManager::clear() {
+ for (ContextMap::iterator I = Contexts.begin(), E = Contexts.end(); I!=E; ++I)
+ delete I->second;
+ Contexts.clear();
+}
+
+Stmt *AnalysisDeclContext::getBody() const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getBody();
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getBody();
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->getBody();
+ else if (const FunctionTemplateDecl *FunTmpl
+ = dyn_cast_or_null<FunctionTemplateDecl>(D))
+ return FunTmpl->getTemplatedDecl()->getBody();
+
+ llvm_unreachable("unknown code decl");
+}
+
+const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getSelfDecl();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ // See if 'self' was captured by the block.
+ for (BlockDecl::capture_const_iterator it = BD->capture_begin(),
+ et = BD->capture_end(); it != et; ++it) {
+ const VarDecl *VD = it->getVariable();
+ if (VD->getName() == "self")
+ return dyn_cast<ImplicitParamDecl>(VD);
+ }
+ }
+
+ return NULL;
+}
+
+void AnalysisDeclContext::registerForcedBlockExpression(const Stmt *stmt) {
+ if (!forcedBlkExprs)
+ forcedBlkExprs = new CFG::BuildOptions::ForcedBlkExprs();
+ // Default construct an entry for 'stmt'.
+ if (const Expr *e = dyn_cast<Expr>(stmt))
+ stmt = e->IgnoreParens();
+ (void) (*forcedBlkExprs)[stmt];
+}
+
+const CFGBlock *
+AnalysisDeclContext::getBlockForRegisteredExpression(const Stmt *stmt) {
+ assert(forcedBlkExprs);
+ if (const Expr *e = dyn_cast<Expr>(stmt))
+ stmt = e->IgnoreParens();
+ CFG::BuildOptions::ForcedBlkExprs::const_iterator itr =
+ forcedBlkExprs->find(stmt);
+ assert(itr != forcedBlkExprs->end());
+ return itr->second;
+}
+
+CFG *AnalysisDeclContext::getCFG() {
+ if (!cfgBuildOptions.PruneTriviallyFalseEdges)
+ return getUnoptimizedCFG();
+
+ if (!builtCFG) {
+ cfg.reset(CFG::buildCFG(D, getBody(),
+ &D->getASTContext(), cfgBuildOptions));
+ // Even when the cfg is not successfully built, we don't
+ // want to try building it again.
+ builtCFG = true;
+ }
+ return cfg.get();
+}
+
+CFG *AnalysisDeclContext::getUnoptimizedCFG() {
+ if (!builtCompleteCFG) {
+ SaveAndRestore<bool> NotPrune(cfgBuildOptions.PruneTriviallyFalseEdges,
+ false);
+ completeCFG.reset(CFG::buildCFG(D, getBody(), &D->getASTContext(),
+ cfgBuildOptions));
+ // Even when the cfg is not successfully built, we don't
+ // want to try building it again.
+ builtCompleteCFG = true;
+ }
+ return completeCFG.get();
+}
+
+CFGStmtMap *AnalysisDeclContext::getCFGStmtMap() {
+ if (cfgStmtMap)
+ return cfgStmtMap.get();
+
+ if (CFG *c = getCFG()) {
+ cfgStmtMap.reset(CFGStmtMap::Build(c, &getParentMap()));
+ return cfgStmtMap.get();
+ }
+
+ return 0;
+}
+
+CFGReverseBlockReachabilityAnalysis *AnalysisDeclContext::getCFGReachablityAnalysis() {
+ if (CFA)
+ return CFA.get();
+
+ if (CFG *c = getCFG()) {
+ CFA.reset(new CFGReverseBlockReachabilityAnalysis(*c));
+ return CFA.get();
+ }
+
+ return 0;
+}
+
+void AnalysisDeclContext::dumpCFG(bool ShowColors) {
+ getCFG()->dump(getASTContext().getLangOpts(), ShowColors);
+}
+
+ParentMap &AnalysisDeclContext::getParentMap() {
+ if (!PM)
+ PM.reset(new ParentMap(getBody()));
+ return *PM;
+}
+
+PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
+ if (!PCA)
+ PCA.reset(new PseudoConstantAnalysis(getBody()));
+ return PCA.get();
+}
+
+AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D,
+ idx::TranslationUnit *TU) {
+ AnalysisDeclContext *&AC = Contexts[D];
+ if (!AC)
+ AC = new AnalysisDeclContext(this, D, TU, cfgBuildOptions);
+ return AC;
+}
+
+const StackFrameContext *
+AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
+ const CFGBlock *Blk, unsigned Idx) {
+ return getLocationContextManager().getStackFrame(this, Parent, S, Blk, Idx);
+}
+
+LocationContextManager & AnalysisDeclContext::getLocationContextManager() {
+ assert(Manager &&
+ "Cannot create LocationContexts without an AnalysisDeclContextManager!");
+ return Manager->getLocationContextManager();
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling.
+//===----------------------------------------------------------------------===//
+
+void LocationContext::ProfileCommon(llvm::FoldingSetNodeID &ID,
+ ContextKind ck,
+ AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const void *data) {
+ ID.AddInteger(ck);
+ ID.AddPointer(ctx);
+ ID.AddPointer(parent);
+ ID.AddPointer(data);
+}
+
+void StackFrameContext::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAnalysisDeclContext(), getParent(), CallSite, Block, Index);
+}
+
+void ScopeContext::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAnalysisDeclContext(), getParent(), Enter);
+}
+
+void BlockInvocationContext::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getAnalysisDeclContext(), getParent(), BD);
+}
+
+//===----------------------------------------------------------------------===//
+// LocationContext creation.
+//===----------------------------------------------------------------------===//
+
+template <typename LOC, typename DATA>
+const LOC*
+LocationContextManager::getLocationContext(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const DATA *d) {
+ llvm::FoldingSetNodeID ID;
+ LOC::Profile(ID, ctx, parent, d);
+ void *InsertPos;
+
+ LOC *L = cast_or_null<LOC>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
+
+ if (!L) {
+ L = new LOC(ctx, parent, d);
+ Contexts.InsertNode(L, InsertPos);
+ }
+ return L;
+}
+
+const StackFrameContext*
+LocationContextManager::getStackFrame(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const Stmt *s,
+ const CFGBlock *blk, unsigned idx) {
+ llvm::FoldingSetNodeID ID;
+ StackFrameContext::Profile(ID, ctx, parent, s, blk, idx);
+ void *InsertPos;
+ StackFrameContext *L =
+ cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
+ if (!L) {
+ L = new StackFrameContext(ctx, parent, s, blk, idx);
+ Contexts.InsertNode(L, InsertPos);
+ }
+ return L;
+}
+
+const ScopeContext *
+LocationContextManager::getScope(AnalysisDeclContext *ctx,
+ const LocationContext *parent,
+ const Stmt *s) {
+ return getLocationContext<ScopeContext, Stmt>(ctx, parent, s);
+}
+
+//===----------------------------------------------------------------------===//
+// LocationContext methods.
+//===----------------------------------------------------------------------===//
+
+const StackFrameContext *LocationContext::getCurrentStackFrame() const {
+ const LocationContext *LC = this;
+ while (LC) {
+ if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC))
+ return SFC;
+ LC = LC->getParent();
+ }
+ return NULL;
+}
+
+const StackFrameContext *
+LocationContext::getStackFrameForDeclContext(const DeclContext *DC) const {
+ const LocationContext *LC = this;
+ while (LC) {
+ if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC)) {
+ if (cast<DeclContext>(SFC->getDecl()) == DC)
+ return SFC;
+ }
+ LC = LC->getParent();
+ }
+ return NULL;
+}
+
+bool LocationContext::isParentOf(const LocationContext *LC) const {
+ do {
+ const LocationContext *Parent = LC->getParent();
+ if (Parent == this)
+ return true;
+ else
+ LC = Parent;
+ } while (LC);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Lazily generated map to query the external variables referenced by a Block.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindBlockDeclRefExprsVals : public StmtVisitor<FindBlockDeclRefExprsVals>{
+ BumpVector<const VarDecl*> &BEVals;
+ BumpVectorContext &BC;
+ llvm::SmallPtrSet<const VarDecl*, 4> Visited;
+ llvm::SmallPtrSet<const DeclContext*, 4> IgnoredContexts;
+public:
+ FindBlockDeclRefExprsVals(BumpVector<const VarDecl*> &bevals,
+ BumpVectorContext &bc)
+ : BEVals(bevals), BC(bc) {}
+
+ bool IsTrackedDecl(const VarDecl *VD) {
+ const DeclContext *DC = VD->getDeclContext();
+ return IgnoredContexts.count(DC) == 0;
+ }
+
+ void VisitStmt(Stmt *S) {
+ for (Stmt::child_range I = S->children(); I; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+ }
+
+ void VisitDeclRefExpr(DeclRefExpr *DR) {
+ // Non-local variables are also directly modified.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (!VD->hasLocalStorage()) {
+ if (Visited.insert(VD))
+ BEVals.push_back(VD, BC);
+ } else if (DR->refersToEnclosingLocal()) {
+ if (Visited.insert(VD) && IsTrackedDecl(VD))
+ BEVals.push_back(VD, BC);
+ }
+ }
+ }
+
+ void VisitBlockExpr(BlockExpr *BR) {
+ // Blocks containing blocks can transitively capture more variables.
+ IgnoredContexts.insert(BR->getBlockDecl());
+ Visit(BR->getBlockDecl()->getBody());
+ }
+
+ void VisitPseudoObjectExpr(PseudoObjectExpr *PE) {
+ for (PseudoObjectExpr::semantics_iterator it = PE->semantics_begin(),
+ et = PE->semantics_end(); it != et; ++it) {
+ Expr *Semantic = *it;
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
+ Semantic = OVE->getSourceExpr();
+ Visit(Semantic);
+ }
+ }
+};
+} // end anonymous namespace
+
+typedef BumpVector<const VarDecl*> DeclVec;
+
+static DeclVec* LazyInitializeReferencedDecls(const BlockDecl *BD,
+ void *&Vec,
+ llvm::BumpPtrAllocator &A) {
+ if (Vec)
+ return (DeclVec*) Vec;
+
+ BumpVectorContext BC(A);
+ DeclVec *BV = (DeclVec*) A.Allocate<DeclVec>();
+ new (BV) DeclVec(BC, 10);
+
+ // Find the referenced variables.
+ FindBlockDeclRefExprsVals F(*BV, BC);
+ F.Visit(BD->getBody());
+
+ Vec = BV;
+ return BV;
+}
+
+std::pair<AnalysisDeclContext::referenced_decls_iterator,
+ AnalysisDeclContext::referenced_decls_iterator>
+AnalysisDeclContext::getReferencedBlockVars(const BlockDecl *BD) {
+ if (!ReferencedBlockVars)
+ ReferencedBlockVars = new llvm::DenseMap<const BlockDecl*,void*>();
+
+ DeclVec *V = LazyInitializeReferencedDecls(BD, (*ReferencedBlockVars)[BD], A);
+ return std::make_pair(V->begin(), V->end());
+}
+
+ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) {
+ if (!ManagedAnalyses)
+ ManagedAnalyses = new ManagedAnalysisMap();
+ ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
+ return (*M)[tag];
+}
+
+//===----------------------------------------------------------------------===//
+// Cleanup.
+//===----------------------------------------------------------------------===//
+
+ManagedAnalysis::~ManagedAnalysis() {}
+
+AnalysisDeclContext::~AnalysisDeclContext() {
+ delete forcedBlkExprs;
+ delete ReferencedBlockVars;
+ // Release the managed analyses.
+ if (ManagedAnalyses) {
+ ManagedAnalysisMap *M = (ManagedAnalysisMap*) ManagedAnalyses;
+ for (ManagedAnalysisMap::iterator I = M->begin(), E = M->end(); I!=E; ++I)
+ delete I->second;
+ delete M;
+ }
+}
+
+AnalysisDeclContextManager::~AnalysisDeclContextManager() {
+ for (ContextMap::iterator I = Contexts.begin(), E = Contexts.end(); I!=E; ++I)
+ delete I->second;
+}
+
+LocationContext::~LocationContext() {}
+
+LocationContextManager::~LocationContextManager() {
+ clear();
+}
+
+void LocationContextManager::clear() {
+ for (llvm::FoldingSet<LocationContext>::iterator I = Contexts.begin(),
+ E = Contexts.end(); I != E; ) {
+ LocationContext *LC = &*I;
+ ++I;
+ delete LC;
+ }
+
+ Contexts.clear();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
new file mode 100644
index 0000000..d1334a5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFG.cpp
@@ -0,0 +1,3972 @@
+//===--- CFG.cpp - Classes for representing and building CFGs----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFG and CFGBuilder classes for representing and
+// building Control-Flow Graphs (CFGs) from ASTs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/SaveAndRestore.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/CharUnits.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Format.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/OwningPtr.h"
+
+using namespace clang;
+
+namespace {
+
+static SourceLocation GetEndLoc(Decl *D) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (Expr *Ex = VD->getInit())
+ return Ex->getSourceRange().getEnd();
+ return D->getLocation();
+}
+
+class CFGBuilder;
+
+/// The CFG builder uses a recursive algorithm to build the CFG. When
+/// we process an expression, sometimes we know that we must add the
+/// subexpressions as block-level expressions. For example:
+///
+/// exp1 || exp2
+///
+/// When processing the '||' expression, we know that exp1 and exp2
+/// need to be added as block-level expressions, even though they
+/// might not normally need to be. AddStmtChoice records this
+/// contextual information. If AddStmtChoice is 'NotAlwaysAdd', then
+/// the builder has an option not to add a subexpression as a
+/// block-level expression.
+///
+class AddStmtChoice {
+public:
+ enum Kind { NotAlwaysAdd = 0, AlwaysAdd = 1 };
+
+ AddStmtChoice(Kind a_kind = NotAlwaysAdd) : kind(a_kind) {}
+
+ bool alwaysAdd(CFGBuilder &builder,
+ const Stmt *stmt) const;
+
+ /// Return a copy of this object, except with the 'always-add' bit
+ /// set as specified.
+ AddStmtChoice withAlwaysAdd(bool alwaysAdd) const {
+ return AddStmtChoice(alwaysAdd ? AlwaysAdd : NotAlwaysAdd);
+ }
+
+private:
+ Kind kind;
+};
+
+/// LocalScope - Node in tree of local scopes created for C++ implicit
+/// destructor calls generation. It contains list of automatic variables
+/// declared in the scope and link to position in previous scope this scope
+/// began in.
+///
+/// The process of creating local scopes is as follows:
+/// - Init CFGBuilder::ScopePos with invalid position (equivalent for null),
+/// - Before processing statements in scope (e.g. CompoundStmt) create
+/// LocalScope object using CFGBuilder::ScopePos as link to previous scope
+/// and set CFGBuilder::ScopePos to the end of new scope,
+/// - On every occurrence of VarDecl increase CFGBuilder::ScopePos if it points
+/// at this VarDecl,
+/// - For every normal (without jump) end of scope add to CFGBlock destructors
+/// for objects in the current scope,
+/// - For every jump add to CFGBlock destructors for objects
+/// between CFGBuilder::ScopePos and local scope position saved for jump
+/// target. Thanks to C++ restrictions on goto jumps we can be sure that
+/// jump target position will be on the path to root from CFGBuilder::ScopePos
+/// (adding any variable that doesn't need constructor to be called to
+/// LocalScope can break this assumption),
+///
+class LocalScope {
+public:
+ typedef BumpVector<VarDecl*> AutomaticVarsTy;
+
+ /// const_iterator - Iterates local scope backwards and jumps to previous
+ /// scope on reaching the beginning of currently iterated scope.
+ class const_iterator {
+ const LocalScope* Scope;
+
+ /// VarIter is guaranteed to be greater then 0 for every valid iterator.
+ /// Invalid iterator (with null Scope) has VarIter equal to 0.
+ unsigned VarIter;
+
+ public:
+ /// Create invalid iterator. Dereferencing invalid iterator is not allowed.
+ /// Incrementing invalid iterator is allowed and will result in invalid
+ /// iterator.
+ const_iterator()
+ : Scope(NULL), VarIter(0) {}
+
+ /// Create valid iterator. In case when S.Prev is an invalid iterator and
+ /// I is equal to 0, this will create invalid iterator.
+ const_iterator(const LocalScope& S, unsigned I)
+ : Scope(&S), VarIter(I) {
+ // Iterator to "end" of scope is not allowed. Handle it by going up
+ // in scopes tree possibly up to invalid iterator in the root.
+ if (VarIter == 0 && Scope)
+ *this = Scope->Prev;
+ }
+
+ VarDecl *const* operator->() const {
+ assert (Scope && "Dereferencing invalid iterator is not allowed");
+ assert (VarIter != 0 && "Iterator has invalid value of VarIter member");
+ return &Scope->Vars[VarIter - 1];
+ }
+ VarDecl *operator*() const {
+ return *this->operator->();
+ }
+
+ const_iterator &operator++() {
+ if (!Scope)
+ return *this;
+
+ assert (VarIter != 0 && "Iterator has invalid value of VarIter member");
+ --VarIter;
+ if (VarIter == 0)
+ *this = Scope->Prev;
+ return *this;
+ }
+ const_iterator operator++(int) {
+ const_iterator P = *this;
+ ++*this;
+ return P;
+ }
+
+ bool operator==(const const_iterator &rhs) const {
+ return Scope == rhs.Scope && VarIter == rhs.VarIter;
+ }
+ bool operator!=(const const_iterator &rhs) const {
+ return !(*this == rhs);
+ }
+
+ operator bool() const {
+ return *this != const_iterator();
+ }
+
+ int distance(const_iterator L);
+ };
+
+ friend class const_iterator;
+
+private:
+ BumpVectorContext ctx;
+
+ /// Automatic variables in order of declaration.
+ AutomaticVarsTy Vars;
+ /// Iterator to variable in previous scope that was declared just before
+ /// begin of this scope.
+ const_iterator Prev;
+
+public:
+ /// Constructs empty scope linked to previous scope in specified place.
+ LocalScope(BumpVectorContext &ctx, const_iterator P)
+ : ctx(ctx), Vars(ctx, 4), Prev(P) {}
+
+ /// Begin of scope in direction of CFG building (backwards).
+ const_iterator begin() const { return const_iterator(*this, Vars.size()); }
+
+ void addVar(VarDecl *VD) {
+ Vars.push_back(VD, ctx);
+ }
+};
+
+/// distance - Calculates distance from this to L. L must be reachable from this
+/// (with use of ++ operator). Cost of calculating the distance is linear w.r.t.
+/// number of scopes between this and L.
+int LocalScope::const_iterator::distance(LocalScope::const_iterator L) {
+ int D = 0;
+ const_iterator F = *this;
+ while (F.Scope != L.Scope) {
+ assert (F != const_iterator()
+ && "L iterator is not reachable from F iterator.");
+ D += F.VarIter;
+ F = F.Scope->Prev;
+ }
+ D += F.VarIter - L.VarIter;
+ return D;
+}
+
+/// BlockScopePosPair - Structure for specifying position in CFG during its
+/// build process. It consists of CFGBlock that specifies position in CFG graph
+/// and LocalScope::const_iterator that specifies position in LocalScope graph.
+struct BlockScopePosPair {
+ BlockScopePosPair() : block(0) {}
+ BlockScopePosPair(CFGBlock *b, LocalScope::const_iterator scopePos)
+ : block(b), scopePosition(scopePos) {}
+
+ CFGBlock *block;
+ LocalScope::const_iterator scopePosition;
+};
+
+/// TryResult - a class representing a variant over the values
+/// 'true', 'false', or 'unknown'. This is returned by tryEvaluateBool,
+/// and is used by the CFGBuilder to decide if a branch condition
+/// can be decided up front during CFG construction.
+class TryResult {
+ int X;
+public:
+ TryResult(bool b) : X(b ? 1 : 0) {}
+ TryResult() : X(-1) {}
+
+ bool isTrue() const { return X == 1; }
+ bool isFalse() const { return X == 0; }
+ bool isKnown() const { return X >= 0; }
+ void negate() {
+ assert(isKnown());
+ X ^= 0x1;
+ }
+};
+
+/// CFGBuilder - This class implements CFG construction from an AST.
+/// The builder is stateful: an instance of the builder should be used to only
+/// construct a single CFG.
+///
+/// Example usage:
+///
+/// CFGBuilder builder;
+/// CFG* cfg = builder.BuildAST(stmt1);
+///
+/// CFG construction is done via a recursive walk of an AST. We actually parse
+/// the AST in reverse order so that the successor of a basic block is
+/// constructed prior to its predecessor. This allows us to nicely capture
+/// implicit fall-throughs without extra basic blocks.
+///
+class CFGBuilder {
+ typedef BlockScopePosPair JumpTarget;
+ typedef BlockScopePosPair JumpSource;
+
+ ASTContext *Context;
+ OwningPtr<CFG> cfg;
+
+ CFGBlock *Block;
+ CFGBlock *Succ;
+ JumpTarget ContinueJumpTarget;
+ JumpTarget BreakJumpTarget;
+ CFGBlock *SwitchTerminatedBlock;
+ CFGBlock *DefaultCaseBlock;
+ CFGBlock *TryTerminatedBlock;
+
+ // Current position in local scope.
+ LocalScope::const_iterator ScopePos;
+
+ // LabelMap records the mapping from Label expressions to their jump targets.
+ typedef llvm::DenseMap<LabelDecl*, JumpTarget> LabelMapTy;
+ LabelMapTy LabelMap;
+
+ // A list of blocks that end with a "goto" that must be backpatched to their
+ // resolved targets upon completion of CFG construction.
+ typedef std::vector<JumpSource> BackpatchBlocksTy;
+ BackpatchBlocksTy BackpatchBlocks;
+
+ // A list of labels whose address has been taken (for indirect gotos).
+ typedef llvm::SmallPtrSet<LabelDecl*, 5> LabelSetTy;
+ LabelSetTy AddressTakenLabels;
+
+ bool badCFG;
+ const CFG::BuildOptions &BuildOpts;
+
+ // State to track for building switch statements.
+ bool switchExclusivelyCovered;
+ Expr::EvalResult *switchCond;
+
+ CFG::BuildOptions::ForcedBlkExprs::value_type *cachedEntry;
+ const Stmt *lastLookup;
+
+ // Caches boolean evaluations of expressions to avoid multiple re-evaluations
+ // during construction of branches for chained logical operators.
+ typedef llvm::DenseMap<Expr *, TryResult> CachedBoolEvalsTy;
+ CachedBoolEvalsTy CachedBoolEvals;
+
+public:
+ explicit CFGBuilder(ASTContext *astContext,
+ const CFG::BuildOptions &buildOpts)
+ : Context(astContext), cfg(new CFG()), // crew a new CFG
+ Block(NULL), Succ(NULL),
+ SwitchTerminatedBlock(NULL), DefaultCaseBlock(NULL),
+ TryTerminatedBlock(NULL), badCFG(false), BuildOpts(buildOpts),
+ switchExclusivelyCovered(false), switchCond(0),
+ cachedEntry(0), lastLookup(0) {}
+
+ // buildCFG - Used by external clients to construct the CFG.
+ CFG* buildCFG(const Decl *D, Stmt *Statement);
+
+ bool alwaysAdd(const Stmt *stmt);
+
+private:
+ // Visitors to walk an AST and construct the CFG.
+ CFGBlock *VisitAddrLabelExpr(AddrLabelExpr *A, AddStmtChoice asc);
+ CFGBlock *VisitBinaryOperator(BinaryOperator *B, AddStmtChoice asc);
+ CFGBlock *VisitBreakStmt(BreakStmt *B);
+ CFGBlock *VisitCXXCatchStmt(CXXCatchStmt *S);
+ CFGBlock *VisitExprWithCleanups(ExprWithCleanups *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
+ CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
+ CFGBlock *VisitCXXForRangeStmt(CXXForRangeStmt *S);
+ CFGBlock *VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXConstructExpr(CXXConstructExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
+ AddStmtChoice asc);
+ CFGBlock *VisitCallExpr(CallExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCaseStmt(CaseStmt *C);
+ CFGBlock *VisitChooseExpr(ChooseExpr *C, AddStmtChoice asc);
+ CFGBlock *VisitCompoundStmt(CompoundStmt *C);
+ CFGBlock *VisitConditionalOperator(AbstractConditionalOperator *C,
+ AddStmtChoice asc);
+ CFGBlock *VisitContinueStmt(ContinueStmt *C);
+ CFGBlock *VisitDeclStmt(DeclStmt *DS);
+ CFGBlock *VisitDeclSubExpr(DeclStmt *DS);
+ CFGBlock *VisitDefaultStmt(DefaultStmt *D);
+ CFGBlock *VisitDoStmt(DoStmt *D);
+ CFGBlock *VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitForStmt(ForStmt *F);
+ CFGBlock *VisitGotoStmt(GotoStmt *G);
+ CFGBlock *VisitIfStmt(IfStmt *I);
+ CFGBlock *VisitImplicitCastExpr(ImplicitCastExpr *E, AddStmtChoice asc);
+ CFGBlock *VisitIndirectGotoStmt(IndirectGotoStmt *I);
+ CFGBlock *VisitLabelStmt(LabelStmt *L);
+ CFGBlock *VisitLambdaExpr(LambdaExpr *L);
+ CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
+ CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
+ CFGBlock *VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S);
+ CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ CFGBlock *VisitObjCAtThrowStmt(ObjCAtThrowStmt *S);
+ CFGBlock *VisitObjCAtTryStmt(ObjCAtTryStmt *S);
+ CFGBlock *VisitObjCForCollectionStmt(ObjCForCollectionStmt *S);
+ CFGBlock *VisitReturnStmt(ReturnStmt *R);
+ CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
+ CFGBlock *VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
+ AddStmtChoice asc);
+ CFGBlock *VisitStmtExpr(StmtExpr *S, AddStmtChoice asc);
+ CFGBlock *VisitSwitchStmt(SwitchStmt *S);
+ CFGBlock *VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc);
+ CFGBlock *VisitWhileStmt(WhileStmt *W);
+
+ CFGBlock *Visit(Stmt *S, AddStmtChoice asc = AddStmtChoice::NotAlwaysAdd);
+ CFGBlock *VisitStmt(Stmt *S, AddStmtChoice asc);
+ CFGBlock *VisitChildren(Stmt *S);
+ CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
+
+ // Visitors to walk an AST and generate destructors of temporaries in
+ // full expression.
+ CFGBlock *VisitForTemporaryDtors(Stmt *E, bool BindToTemporary = false);
+ CFGBlock *VisitChildrenForTemporaryDtors(Stmt *E);
+ CFGBlock *VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E);
+ CFGBlock *VisitCXXBindTemporaryExprForTemporaryDtors(CXXBindTemporaryExpr *E,
+ bool BindToTemporary);
+ CFGBlock *
+ VisitConditionalOperatorForTemporaryDtors(AbstractConditionalOperator *E,
+ bool BindToTemporary);
+
+ // NYS == Not Yet Supported
+ CFGBlock *NYS() {
+ badCFG = true;
+ return Block;
+ }
+
+ void autoCreateBlock() { if (!Block) Block = createBlock(); }
+ CFGBlock *createBlock(bool add_successor = true);
+ CFGBlock *createNoReturnBlock();
+
+ CFGBlock *addStmt(Stmt *S) {
+ return Visit(S, AddStmtChoice::AlwaysAdd);
+ }
+ CFGBlock *addInitializer(CXXCtorInitializer *I);
+ void addAutomaticObjDtors(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S);
+ void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD);
+
+ // Local scopes creation.
+ LocalScope* createOrReuseLocalScope(LocalScope* Scope);
+
+ void addLocalScopeForStmt(Stmt *S);
+ LocalScope* addLocalScopeForDeclStmt(DeclStmt *DS, LocalScope* Scope = NULL);
+ LocalScope* addLocalScopeForVarDecl(VarDecl *VD, LocalScope* Scope = NULL);
+
+ void addLocalScopeAndDtors(Stmt *S);
+
+ // Interface to CFGBlock - adding CFGElements.
+ void appendStmt(CFGBlock *B, const Stmt *S) {
+ if (alwaysAdd(S) && cachedEntry)
+ cachedEntry->second = B;
+
+ // All block-level expressions should have already been IgnoreParens()ed.
+ assert(!isa<Expr>(S) || cast<Expr>(S)->IgnoreParens() == S);
+ B->appendStmt(const_cast<Stmt*>(S), cfg->getBumpVectorContext());
+ }
+ void appendInitializer(CFGBlock *B, CXXCtorInitializer *I) {
+ B->appendInitializer(I, cfg->getBumpVectorContext());
+ }
+ void appendBaseDtor(CFGBlock *B, const CXXBaseSpecifier *BS) {
+ B->appendBaseDtor(BS, cfg->getBumpVectorContext());
+ }
+ void appendMemberDtor(CFGBlock *B, FieldDecl *FD) {
+ B->appendMemberDtor(FD, cfg->getBumpVectorContext());
+ }
+ void appendTemporaryDtor(CFGBlock *B, CXXBindTemporaryExpr *E) {
+ B->appendTemporaryDtor(E, cfg->getBumpVectorContext());
+ }
+ void appendAutomaticObjDtor(CFGBlock *B, VarDecl *VD, Stmt *S) {
+ B->appendAutomaticObjDtor(VD, S, cfg->getBumpVectorContext());
+ }
+
+ void prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
+ LocalScope::const_iterator B, LocalScope::const_iterator E);
+
+ void addSuccessor(CFGBlock *B, CFGBlock *S) {
+ B->addSuccessor(S, cfg->getBumpVectorContext());
+ }
+
+ /// Try and evaluate an expression to an integer constant.
+ bool tryEvaluate(Expr *S, Expr::EvalResult &outResult) {
+ if (!BuildOpts.PruneTriviallyFalseEdges)
+ return false;
+ return !S->isTypeDependent() &&
+ !S->isValueDependent() &&
+ S->EvaluateAsRValue(outResult, *Context);
+ }
+
+ /// tryEvaluateBool - Try and evaluate the Stmt and return 0 or 1
+ /// if we can evaluate to a known value, otherwise return -1.
+ TryResult tryEvaluateBool(Expr *S) {
+ if (!BuildOpts.PruneTriviallyFalseEdges ||
+ S->isTypeDependent() || S->isValueDependent())
+ return TryResult();
+
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(S)) {
+ if (Bop->isLogicalOp()) {
+ // Check the cache first.
+ CachedBoolEvalsTy::iterator I = CachedBoolEvals.find(S);
+ if (I != CachedBoolEvals.end())
+ return I->second; // already in map;
+
+ // Retrieve result at first, or the map might be updated.
+ TryResult Result = evaluateAsBooleanConditionNoCache(S);
+ CachedBoolEvals[S] = Result; // update or insert
+ return Result;
+ }
+ }
+
+ return evaluateAsBooleanConditionNoCache(S);
+ }
+
+ /// \brief Evaluate as boolean \param E without using the cache.
+ TryResult evaluateAsBooleanConditionNoCache(Expr *E) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(E)) {
+ if (Bop->isLogicalOp()) {
+ TryResult LHS = tryEvaluateBool(Bop->getLHS());
+ if (LHS.isKnown()) {
+ // We were able to evaluate the LHS, see if we can get away with not
+ // evaluating the RHS: 0 && X -> 0, 1 || X -> 1
+ if (LHS.isTrue() == (Bop->getOpcode() == BO_LOr))
+ return LHS.isTrue();
+
+ TryResult RHS = tryEvaluateBool(Bop->getRHS());
+ if (RHS.isKnown()) {
+ if (Bop->getOpcode() == BO_LOr)
+ return LHS.isTrue() || RHS.isTrue();
+ else
+ return LHS.isTrue() && RHS.isTrue();
+ }
+ } else {
+ TryResult RHS = tryEvaluateBool(Bop->getRHS());
+ if (RHS.isKnown()) {
+ // We can't evaluate the LHS; however, sometimes the result
+ // is determined by the RHS: X && 0 -> 0, X || 1 -> 1.
+ if (RHS.isTrue() == (Bop->getOpcode() == BO_LOr))
+ return RHS.isTrue();
+ }
+ }
+
+ return TryResult();
+ }
+ }
+
+ bool Result;
+ if (E->EvaluateAsBooleanCondition(Result, *Context))
+ return Result;
+
+ return TryResult();
+ }
+
+};
+
+inline bool AddStmtChoice::alwaysAdd(CFGBuilder &builder,
+ const Stmt *stmt) const {
+ return builder.alwaysAdd(stmt) || kind == AlwaysAdd;
+}
+
+bool CFGBuilder::alwaysAdd(const Stmt *stmt) {
+ bool shouldAdd = BuildOpts.alwaysAdd(stmt);
+
+ if (!BuildOpts.forcedBlkExprs)
+ return shouldAdd;
+
+ if (lastLookup == stmt) {
+ if (cachedEntry) {
+ assert(cachedEntry->first == stmt);
+ return true;
+ }
+ return shouldAdd;
+ }
+
+ lastLookup = stmt;
+
+ // Perform the lookup!
+ CFG::BuildOptions::ForcedBlkExprs *fb = *BuildOpts.forcedBlkExprs;
+
+ if (!fb) {
+ // No need to update 'cachedEntry', since it will always be null.
+ assert(cachedEntry == 0);
+ return shouldAdd;
+ }
+
+ CFG::BuildOptions::ForcedBlkExprs::iterator itr = fb->find(stmt);
+ if (itr == fb->end()) {
+ cachedEntry = 0;
+ return shouldAdd;
+ }
+
+ cachedEntry = &*itr;
+ return true;
+}
+
+// FIXME: Add support for dependent-sized array types in C++?
+// Does it even make sense to build a CFG for an uninstantiated template?
+static const VariableArrayType *FindVA(const Type *t) {
+ while (const ArrayType *vt = dyn_cast<ArrayType>(t)) {
+ if (const VariableArrayType *vat = dyn_cast<VariableArrayType>(vt))
+ if (vat->getSizeExpr())
+ return vat;
+
+ t = vt->getElementType().getTypePtr();
+ }
+
+ return 0;
+}
+
+/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can represent an
+/// arbitrary statement. Examples include a single expression or a function
+/// body (compound statement). The ownership of the returned CFG is
+/// transferred to the caller. If CFG construction fails, this method returns
+/// NULL.
+CFG* CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
+ assert(cfg.get());
+ if (!Statement)
+ return NULL;
+
+ // Create an empty block that will serve as the exit block for the CFG. Since
+ // this is the first block added to the CFG, it will be implicitly registered
+ // as the exit block.
+ Succ = createBlock();
+ assert(Succ == &cfg->getExit());
+ Block = NULL; // the EXIT block is empty. Create all other blocks lazily.
+
+ if (BuildOpts.AddImplicitDtors)
+ if (const CXXDestructorDecl *DD = dyn_cast_or_null<CXXDestructorDecl>(D))
+ addImplicitDtorsForDestructor(DD);
+
+ // Visit the statements and create the CFG.
+ CFGBlock *B = addStmt(Statement);
+
+ if (badCFG)
+ return NULL;
+
+ // For C++ constructor add initializers to CFG.
+ if (const CXXConstructorDecl *CD = dyn_cast_or_null<CXXConstructorDecl>(D)) {
+ for (CXXConstructorDecl::init_const_reverse_iterator I = CD->init_rbegin(),
+ E = CD->init_rend(); I != E; ++I) {
+ B = addInitializer(*I);
+ if (badCFG)
+ return NULL;
+ }
+ }
+
+ if (B)
+ Succ = B;
+
+ // Backpatch the gotos whose label -> block mappings we didn't know when we
+ // encountered them.
+ for (BackpatchBlocksTy::iterator I = BackpatchBlocks.begin(),
+ E = BackpatchBlocks.end(); I != E; ++I ) {
+
+ CFGBlock *B = I->block;
+ GotoStmt *G = cast<GotoStmt>(B->getTerminator());
+ LabelMapTy::iterator LI = LabelMap.find(G->getLabel());
+
+ // If there is no target for the goto, then we are looking at an
+ // incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ JumpTarget JT = LI->second;
+ prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
+ JT.scopePosition);
+ addSuccessor(B, JT.block);
+ }
+
+ // Add successors to the Indirect Goto Dispatch block (if we have one).
+ if (CFGBlock *B = cfg->getIndirectGotoBlock())
+ for (LabelSetTy::iterator I = AddressTakenLabels.begin(),
+ E = AddressTakenLabels.end(); I != E; ++I ) {
+
+ // Lookup the target block.
+ LabelMapTy::iterator LI = LabelMap.find(*I);
+
+ // If there is no target block that contains label, then we are looking
+ // at an incomplete AST. Handle this by not registering a successor.
+ if (LI == LabelMap.end()) continue;
+
+ addSuccessor(B, LI->second.block);
+ }
+
+ // Create an empty entry block that has no predecessors.
+ cfg->setEntry(createBlock());
+
+ return cfg.take();
+}
+
+/// createBlock - Used to lazily create blocks that are connected
+/// to the current (global) succcessor.
+CFGBlock *CFGBuilder::createBlock(bool add_successor) {
+ CFGBlock *B = cfg->createBlock();
+ if (add_successor && Succ)
+ addSuccessor(B, Succ);
+ return B;
+}
+
+/// createNoReturnBlock - Used to create a block is a 'noreturn' point in the
+/// CFG. It is *not* connected to the current (global) successor, and instead
+/// directly tied to the exit block in order to be reachable.
+CFGBlock *CFGBuilder::createNoReturnBlock() {
+ CFGBlock *B = createBlock(false);
+ B->setHasNoReturnElement();
+ addSuccessor(B, &cfg->getExit());
+ return B;
+}
+
+/// addInitializer - Add C++ base or member initializer element to CFG.
+CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
+ if (!BuildOpts.AddInitializers)
+ return Block;
+
+ bool IsReference = false;
+ bool HasTemporaries = false;
+
+ // Destructors of temporaries in initialization expression should be called
+ // after initialization finishes.
+ Expr *Init = I->getInit();
+ if (Init) {
+ if (FieldDecl *FD = I->getAnyMember())
+ IsReference = FD->getType()->isReferenceType();
+ HasTemporaries = isa<ExprWithCleanups>(Init);
+
+ if (BuildOpts.AddImplicitDtors && HasTemporaries) {
+ // Generate destructors for temporaries in initialization expression.
+ VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
+ IsReference);
+ }
+ }
+
+ autoCreateBlock();
+ appendInitializer(Block, I);
+
+ if (Init) {
+ if (HasTemporaries) {
+ // For expression with temporaries go directly to subexpression to omit
+ // generating destructors for the second time.
+ return Visit(cast<ExprWithCleanups>(Init)->getSubExpr());
+ }
+ return Visit(Init);
+ }
+
+ return Block;
+}
+
+/// \brief Retrieve the type of the temporary object whose lifetime was
+/// extended by a local reference with the given initializer.
+static QualType getReferenceInitTemporaryType(ASTContext &Context,
+ const Expr *Init) {
+ while (true) {
+ // Skip parentheses.
+ Init = Init->IgnoreParens();
+
+ // Skip through cleanups.
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init)) {
+ Init = EWC->getSubExpr();
+ continue;
+ }
+
+ // Skip through the temporary-materialization expression.
+ if (const MaterializeTemporaryExpr *MTE
+ = dyn_cast<MaterializeTemporaryExpr>(Init)) {
+ Init = MTE->GetTemporaryExpr();
+ continue;
+ }
+
+ // Skip derived-to-base and no-op casts.
+ if (const CastExpr *CE = dyn_cast<CastExpr>(Init)) {
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) &&
+ Init->getType()->isRecordType()) {
+ Init = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ // Skip member accesses into rvalues.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Init)) {
+ if (!ME->isArrow() && ME->getBase()->isRValue()) {
+ Init = ME->getBase();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return Init->getType();
+}
+
+/// addAutomaticObjDtors - Add to current block automatic objects destructors
+/// for objects in range of local scope positions. Use S as trigger statement
+/// for destructors.
+void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ if (B == E)
+ return;
+
+ // We need to append the destructors in reverse order, but any one of them
+ // may be a no-return destructor which changes the CFG. As a result, buffer
+ // this sequence up and replay them in reverse order when appending onto the
+ // CFGBlock(s).
+ SmallVector<VarDecl*, 10> Decls;
+ Decls.reserve(B.distance(E));
+ for (LocalScope::const_iterator I = B; I != E; ++I)
+ Decls.push_back(*I);
+
+ for (SmallVectorImpl<VarDecl*>::reverse_iterator I = Decls.rbegin(),
+ E = Decls.rend();
+ I != E; ++I) {
+ // If this destructor is marked as a no-return destructor, we need to
+ // create a new block for the destructor which does not have as a successor
+ // anything built thus far: control won't flow out of this block.
+ QualType Ty;
+ if ((*I)->getType()->isReferenceType()) {
+ Ty = getReferenceInitTemporaryType(*Context, (*I)->getInit());
+ } else {
+ Ty = Context->getBaseElementType((*I)->getType());
+ }
+
+ const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
+ if (cast<FunctionType>(Dtor->getType())->getNoReturnAttr())
+ Block = createNoReturnBlock();
+ else
+ autoCreateBlock();
+
+ appendAutomaticObjDtor(Block, *I, S);
+ }
+}
+
+/// addImplicitDtorsForDestructor - Add implicit destructors generated for
+/// base and member objects in destructor.
+void CFGBuilder::addImplicitDtorsForDestructor(const CXXDestructorDecl *DD) {
+ assert (BuildOpts.AddImplicitDtors
+ && "Can be called only when dtors should be added");
+ const CXXRecordDecl *RD = DD->getParent();
+
+ // At the end destroy virtual base objects.
+ for (CXXRecordDecl::base_class_const_iterator VI = RD->vbases_begin(),
+ VE = RD->vbases_end(); VI != VE; ++VI) {
+ const CXXRecordDecl *CD = VI->getType()->getAsCXXRecordDecl();
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendBaseDtor(Block, VI);
+ }
+ }
+
+ // Before virtual bases destroy direct base objects.
+ for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
+ BE = RD->bases_end(); BI != BE; ++BI) {
+ if (!BI->isVirtual()) {
+ const CXXRecordDecl *CD = BI->getType()->getAsCXXRecordDecl();
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendBaseDtor(Block, BI);
+ }
+ }
+ }
+
+ // First destroy member objects.
+ for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
+ FE = RD->field_end(); FI != FE; ++FI) {
+ // Check for constant size array. Set type to array element type.
+ QualType QT = FI->getType();
+ if (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
+ if (AT->getSize() == 0)
+ continue;
+ QT = AT->getElementType();
+ }
+
+ if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
+ if (!CD->hasTrivialDestructor()) {
+ autoCreateBlock();
+ appendMemberDtor(Block, *FI);
+ }
+ }
+}
+
+/// createOrReuseLocalScope - If Scope is NULL create new LocalScope. Either
+/// way return valid LocalScope object.
+LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
+ if (!Scope) {
+ llvm::BumpPtrAllocator &alloc = cfg->getAllocator();
+ Scope = alloc.Allocate<LocalScope>();
+ BumpVectorContext ctx(alloc);
+ new (Scope) LocalScope(ctx, ScopePos);
+ }
+ return Scope;
+}
+
+/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
+/// that should create implicit scope (e.g. if/else substatements).
+void CFGBuilder::addLocalScopeForStmt(Stmt *S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ LocalScope *Scope = 0;
+
+ // For compound statement we will be creating explicit scope.
+ if (CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
+ for (CompoundStmt::body_iterator BI = CS->body_begin(), BE = CS->body_end()
+ ; BI != BE; ++BI) {
+ Stmt *SI = (*BI)->stripLabelLikeStatements();
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(SI))
+ Scope = addLocalScopeForDeclStmt(DS, Scope);
+ }
+ return;
+ }
+
+ // For any other statement scope will be implicit and as such will be
+ // interesting only for DeclStmt.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S->stripLabelLikeStatements()))
+ addLocalScopeForDeclStmt(DS);
+}
+
+/// addLocalScopeForDeclStmt - Add LocalScope for declaration statement. Will
+/// reuse Scope if not NULL.
+LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS,
+ LocalScope* Scope) {
+ if (!BuildOpts.AddImplicitDtors)
+ return Scope;
+
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end()
+ ; DI != DE; ++DI) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(*DI))
+ Scope = addLocalScopeForVarDecl(VD, Scope);
+ }
+ return Scope;
+}
+
+/// addLocalScopeForVarDecl - Add LocalScope for variable declaration. It will
+/// create add scope for automatic objects and temporary objects bound to
+/// const reference. Will reuse Scope if not NULL.
+LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
+ LocalScope* Scope) {
+ if (!BuildOpts.AddImplicitDtors)
+ return Scope;
+
+ // Check if variable is local.
+ switch (VD->getStorageClass()) {
+ case SC_None:
+ case SC_Auto:
+ case SC_Register:
+ break;
+ default: return Scope;
+ }
+
+ // Check for const references bound to temporary. Set type to pointee.
+ QualType QT = VD->getType();
+ if (QT.getTypePtr()->isReferenceType()) {
+ if (!VD->extendsLifetimeOfTemporary())
+ return Scope;
+
+ QT = getReferenceInitTemporaryType(*Context, VD->getInit());
+ }
+
+ // Check for constant size array. Set type to array element type.
+ while (const ConstantArrayType *AT = Context->getAsConstantArrayType(QT)) {
+ if (AT->getSize() == 0)
+ return Scope;
+ QT = AT->getElementType();
+ }
+
+ // Check if type is a C++ class with non-trivial destructor.
+ if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
+ if (!CD->hasTrivialDestructor()) {
+ // Add the variable to scope
+ Scope = createOrReuseLocalScope(Scope);
+ Scope->addVar(VD);
+ ScopePos = Scope->begin();
+ }
+ return Scope;
+}
+
+/// addLocalScopeAndDtors - For given statement add local scope for it and
+/// add destructors that will cleanup the scope. Will reuse Scope if not NULL.
+void CFGBuilder::addLocalScopeAndDtors(Stmt *S) {
+ if (!BuildOpts.AddImplicitDtors)
+ return;
+
+ LocalScope::const_iterator scopeBeginPos = ScopePos;
+ addLocalScopeForStmt(S);
+ addAutomaticObjDtors(ScopePos, scopeBeginPos, S);
+}
+
+/// prependAutomaticObjDtorsWithTerminator - Prepend destructor CFGElements for
+/// variables with automatic storage duration to CFGBlock's elements vector.
+/// Elements will be prepended to physical beginning of the vector which
+/// happens to be logical end. Use blocks terminator as statement that specifies
+/// destructors call site.
+/// FIXME: This mechanism for adding automatic destructors doesn't handle
+/// no-return destructors properly.
+void CFGBuilder::prependAutomaticObjDtorsWithTerminator(CFGBlock *Blk,
+ LocalScope::const_iterator B, LocalScope::const_iterator E) {
+ BumpVectorContext &C = cfg->getBumpVectorContext();
+ CFGBlock::iterator InsertPos
+ = Blk->beginAutomaticObjDtorsInsert(Blk->end(), B.distance(E), C);
+ for (LocalScope::const_iterator I = B; I != E; ++I)
+ InsertPos = Blk->insertAutomaticObjDtor(InsertPos, *I,
+ Blk->getTerminator());
+}
+
+/// Visit - Walk the subtree of a statement and add extra
+/// blocks for ternary operators, &&, and ||. We also process "," and
+/// DeclStmts (which may contain nested control-flow).
+CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
+ if (!S) {
+ badCFG = true;
+ return 0;
+ }
+
+ if (Expr *E = dyn_cast<Expr>(S))
+ S = E->IgnoreParens();
+
+ switch (S->getStmtClass()) {
+ default:
+ return VisitStmt(S, asc);
+
+ case Stmt::AddrLabelExprClass:
+ return VisitAddrLabelExpr(cast<AddrLabelExpr>(S), asc);
+
+ case Stmt::BinaryConditionalOperatorClass:
+ return VisitConditionalOperator(cast<BinaryConditionalOperator>(S), asc);
+
+ case Stmt::BinaryOperatorClass:
+ return VisitBinaryOperator(cast<BinaryOperator>(S), asc);
+
+ case Stmt::BlockExprClass:
+ return VisitNoRecurse(cast<Expr>(S), asc);
+
+ case Stmt::BreakStmtClass:
+ return VisitBreakStmt(cast<BreakStmt>(S));
+
+ case Stmt::CallExprClass:
+ case Stmt::CXXOperatorCallExprClass:
+ case Stmt::CXXMemberCallExprClass:
+ case Stmt::UserDefinedLiteralClass:
+ return VisitCallExpr(cast<CallExpr>(S), asc);
+
+ case Stmt::CaseStmtClass:
+ return VisitCaseStmt(cast<CaseStmt>(S));
+
+ case Stmt::ChooseExprClass:
+ return VisitChooseExpr(cast<ChooseExpr>(S), asc);
+
+ case Stmt::CompoundStmtClass:
+ return VisitCompoundStmt(cast<CompoundStmt>(S));
+
+ case Stmt::ConditionalOperatorClass:
+ return VisitConditionalOperator(cast<ConditionalOperator>(S), asc);
+
+ case Stmt::ContinueStmtClass:
+ return VisitContinueStmt(cast<ContinueStmt>(S));
+
+ case Stmt::CXXCatchStmtClass:
+ return VisitCXXCatchStmt(cast<CXXCatchStmt>(S));
+
+ case Stmt::ExprWithCleanupsClass:
+ return VisitExprWithCleanups(cast<ExprWithCleanups>(S), asc);
+
+ case Stmt::CXXBindTemporaryExprClass:
+ return VisitCXXBindTemporaryExpr(cast<CXXBindTemporaryExpr>(S), asc);
+
+ case Stmt::CXXConstructExprClass:
+ return VisitCXXConstructExpr(cast<CXXConstructExpr>(S), asc);
+
+ case Stmt::CXXFunctionalCastExprClass:
+ return VisitCXXFunctionalCastExpr(cast<CXXFunctionalCastExpr>(S), asc);
+
+ case Stmt::CXXTemporaryObjectExprClass:
+ return VisitCXXTemporaryObjectExpr(cast<CXXTemporaryObjectExpr>(S), asc);
+
+ case Stmt::CXXThrowExprClass:
+ return VisitCXXThrowExpr(cast<CXXThrowExpr>(S));
+
+ case Stmt::CXXTryStmtClass:
+ return VisitCXXTryStmt(cast<CXXTryStmt>(S));
+
+ case Stmt::CXXForRangeStmtClass:
+ return VisitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
+
+ case Stmt::DeclStmtClass:
+ return VisitDeclStmt(cast<DeclStmt>(S));
+
+ case Stmt::DefaultStmtClass:
+ return VisitDefaultStmt(cast<DefaultStmt>(S));
+
+ case Stmt::DoStmtClass:
+ return VisitDoStmt(cast<DoStmt>(S));
+
+ case Stmt::ForStmtClass:
+ return VisitForStmt(cast<ForStmt>(S));
+
+ case Stmt::GotoStmtClass:
+ return VisitGotoStmt(cast<GotoStmt>(S));
+
+ case Stmt::IfStmtClass:
+ return VisitIfStmt(cast<IfStmt>(S));
+
+ case Stmt::ImplicitCastExprClass:
+ return VisitImplicitCastExpr(cast<ImplicitCastExpr>(S), asc);
+
+ case Stmt::IndirectGotoStmtClass:
+ return VisitIndirectGotoStmt(cast<IndirectGotoStmt>(S));
+
+ case Stmt::LabelStmtClass:
+ return VisitLabelStmt(cast<LabelStmt>(S));
+
+ case Stmt::LambdaExprClass:
+ return VisitLambdaExpr(cast<LambdaExpr>(S), asc);
+
+ case Stmt::MemberExprClass:
+ return VisitMemberExpr(cast<MemberExpr>(S), asc);
+
+ case Stmt::NullStmtClass:
+ return Block;
+
+ case Stmt::ObjCAtCatchStmtClass:
+ return VisitObjCAtCatchStmt(cast<ObjCAtCatchStmt>(S));
+
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ return VisitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(S));
+
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ return VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S));
+
+ case Stmt::ObjCAtThrowStmtClass:
+ return VisitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(S));
+
+ case Stmt::ObjCAtTryStmtClass:
+ return VisitObjCAtTryStmt(cast<ObjCAtTryStmt>(S));
+
+ case Stmt::ObjCForCollectionStmtClass:
+ return VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S));
+
+ case Stmt::OpaqueValueExprClass:
+ return Block;
+
+ case Stmt::PseudoObjectExprClass:
+ return VisitPseudoObjectExpr(cast<PseudoObjectExpr>(S));
+
+ case Stmt::ReturnStmtClass:
+ return VisitReturnStmt(cast<ReturnStmt>(S));
+
+ case Stmt::UnaryExprOrTypeTraitExprClass:
+ return VisitUnaryExprOrTypeTraitExpr(cast<UnaryExprOrTypeTraitExpr>(S),
+ asc);
+
+ case Stmt::StmtExprClass:
+ return VisitStmtExpr(cast<StmtExpr>(S), asc);
+
+ case Stmt::SwitchStmtClass:
+ return VisitSwitchStmt(cast<SwitchStmt>(S));
+
+ case Stmt::UnaryOperatorClass:
+ return VisitUnaryOperator(cast<UnaryOperator>(S), asc);
+
+ case Stmt::WhileStmtClass:
+ return VisitWhileStmt(cast<WhileStmt>(S));
+ }
+}
+
+CFGBlock *CFGBuilder::VisitStmt(Stmt *S, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, S)) {
+ autoCreateBlock();
+ appendStmt(Block, S);
+ }
+
+ return VisitChildren(S);
+}
+
+/// VisitChildren - Visit the children of a Stmt.
+CFGBlock *CFGBuilder::VisitChildren(Stmt *Terminator) {
+ CFGBlock *lastBlock = Block;
+ for (Stmt::child_range I = Terminator->children(); I; ++I)
+ if (Stmt *child = *I)
+ if (CFGBlock *b = Visit(child))
+ lastBlock = b;
+
+ return lastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitAddrLabelExpr(AddrLabelExpr *A,
+ AddStmtChoice asc) {
+ AddressTakenLabels.insert(A->getLabel());
+
+ if (asc.alwaysAdd(*this, A)) {
+ autoCreateBlock();
+ appendStmt(Block, A);
+ }
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitUnaryOperator(UnaryOperator *U,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, U)) {
+ autoCreateBlock();
+ appendStmt(Block, U);
+ }
+
+ return Visit(U->getSubExpr(), AddStmtChoice());
+}
+
+CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
+ AddStmtChoice asc) {
+ if (B->isLogicalOp()) { // && or ||
+ CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
+ appendStmt(ConfluenceBlock, B);
+
+ if (badCFG)
+ return 0;
+
+ // create the block evaluating the LHS
+ CFGBlock *LHSBlock = createBlock(false);
+ LHSBlock->setTerminator(B);
+
+ // create the block evaluating the RHS
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock *RHSBlock = addStmt(B->getRHS());
+
+ if (RHSBlock) {
+ if (badCFG)
+ return 0;
+ } else {
+ // Create an empty block for cases where the RHS doesn't require
+ // any explicit statements in the CFG.
+ RHSBlock = createBlock();
+ }
+
+ // Generate the blocks for evaluating the LHS.
+ Block = LHSBlock;
+ CFGBlock *EntryLHSBlock = addStmt(B->getLHS());
+
+ // See if this is a known constant.
+ TryResult KnownVal = tryEvaluateBool(B->getLHS());
+ if (KnownVal.isKnown() && (B->getOpcode() == BO_LOr))
+ KnownVal.negate();
+
+ // Now link the LHSBlock with RHSBlock.
+ if (B->getOpcode() == BO_LOr) {
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ } else {
+ assert(B->getOpcode() == BO_LAnd);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ }
+
+ return EntryLHSBlock;
+ }
+
+ if (B->getOpcode() == BO_Comma) { // ,
+ autoCreateBlock();
+ appendStmt(Block, B);
+ addStmt(B->getRHS());
+ return addStmt(B->getLHS());
+ }
+
+ if (B->isAssignmentOp()) {
+ if (asc.alwaysAdd(*this, B)) {
+ autoCreateBlock();
+ appendStmt(Block, B);
+ }
+ Visit(B->getLHS());
+ return Visit(B->getRHS());
+ }
+
+ if (asc.alwaysAdd(*this, B)) {
+ autoCreateBlock();
+ appendStmt(Block, B);
+ }
+
+ CFGBlock *RBlock = Visit(B->getRHS());
+ CFGBlock *LBlock = Visit(B->getLHS());
+ // If visiting RHS causes us to finish 'Block', e.g. the RHS is a StmtExpr
+ // containing a DoStmt, and the LHS doesn't create a new block, then we should
+ // return RBlock. Otherwise we'll incorrectly return NULL.
+ return (LBlock ? LBlock : RBlock);
+}
+
+CFGBlock *CFGBuilder::VisitNoRecurse(Expr *E, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ }
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitBreakStmt(BreakStmt *B) {
+ // "break" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (badCFG)
+ return 0;
+
+ // Now create a new block that ends with the break statement.
+ Block = createBlock(false);
+ Block->setTerminator(B);
+
+ // If there is no target for the break, then we are looking at an incomplete
+ // AST. This means that the CFG cannot be constructed.
+ if (BreakJumpTarget.block) {
+ addAutomaticObjDtors(ScopePos, BreakJumpTarget.scopePosition, B);
+ addSuccessor(Block, BreakJumpTarget.block);
+ } else
+ badCFG = true;
+
+
+ return Block;
+}
+
+static bool CanThrow(Expr *E, ASTContext &Ctx) {
+ QualType Ty = E->getType();
+ if (Ty->isFunctionPointerType())
+ Ty = Ty->getAs<PointerType>()->getPointeeType();
+ else if (Ty->isBlockPointerType())
+ Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
+
+ const FunctionType *FT = Ty->getAs<FunctionType>();
+ if (FT) {
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
+ if (Proto->isNothrow(Ctx))
+ return false;
+ }
+ return true;
+}
+
+CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
+ // Compute the callee type.
+ QualType calleeType = C->getCallee()->getType();
+ if (calleeType == Context->BoundMemberTy) {
+ QualType boundType = Expr::findBoundMemberType(C->getCallee());
+
+ // We should only get a null bound type if processing a dependent
+ // CFG. Recover by assuming nothing.
+ if (!boundType.isNull()) calleeType = boundType;
+ }
+
+ // If this is a call to a no-return function, this stops the block here.
+ bool NoReturn = getFunctionExtInfo(*calleeType).getNoReturn();
+
+ bool AddEHEdge = false;
+
+ // Languages without exceptions are assumed to not throw.
+ if (Context->getLangOpts().Exceptions) {
+ if (BuildOpts.AddEHEdges)
+ AddEHEdge = true;
+ }
+
+ if (FunctionDecl *FD = C->getDirectCallee()) {
+ if (FD->hasAttr<NoReturnAttr>())
+ NoReturn = true;
+ if (FD->hasAttr<NoThrowAttr>())
+ AddEHEdge = false;
+ }
+
+ if (!CanThrow(C->getCallee(), *Context))
+ AddEHEdge = false;
+
+ if (!NoReturn && !AddEHEdge)
+ return VisitStmt(C, asc.withAlwaysAdd(true));
+
+ if (Block) {
+ Succ = Block;
+ if (badCFG)
+ return 0;
+ }
+
+ if (NoReturn)
+ Block = createNoReturnBlock();
+ else
+ Block = createBlock();
+
+ appendStmt(Block, C);
+
+ if (AddEHEdge) {
+ // Add exceptional edges.
+ if (TryTerminatedBlock)
+ addSuccessor(Block, TryTerminatedBlock);
+ else
+ addSuccessor(Block, &cfg->getExit());
+ }
+
+ return VisitChildren(C);
+}
+
+CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C,
+ AddStmtChoice asc) {
+ CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
+ appendStmt(ConfluenceBlock, C);
+ if (badCFG)
+ return 0;
+
+ AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true);
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock *LHSBlock = Visit(C->getLHS(), alwaysAdd);
+ if (badCFG)
+ return 0;
+
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock *RHSBlock = Visit(C->getRHS(), alwaysAdd);
+ if (badCFG)
+ return 0;
+
+ Block = createBlock(false);
+ // See if this is a known constant.
+ const TryResult& KnownVal = tryEvaluateBool(C->getCond());
+ addSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+ addSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+ Block->setTerminator(C);
+ return addStmt(C->getCond());
+}
+
+
+CFGBlock *CFGBuilder::VisitCompoundStmt(CompoundStmt *C) {
+ addLocalScopeAndDtors(C);
+ CFGBlock *LastBlock = Block;
+
+ for (CompoundStmt::reverse_body_iterator I=C->body_rbegin(), E=C->body_rend();
+ I != E; ++I ) {
+ // If we hit a segment of code just containing ';' (NullStmts), we can
+ // get a null block back. In such cases, just use the LastBlock
+ if (CFGBlock *newBlock = addStmt(*I))
+ LastBlock = newBlock;
+
+ if (badCFG)
+ return NULL;
+ }
+
+ return LastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitConditionalOperator(AbstractConditionalOperator *C,
+ AddStmtChoice asc) {
+ const BinaryConditionalOperator *BCO = dyn_cast<BinaryConditionalOperator>(C);
+ const OpaqueValueExpr *opaqueValue = (BCO ? BCO->getOpaqueValue() : NULL);
+
+ // Create the confluence block that will "merge" the results of the ternary
+ // expression.
+ CFGBlock *ConfluenceBlock = Block ? Block : createBlock();
+ appendStmt(ConfluenceBlock, C);
+ if (badCFG)
+ return 0;
+
+ AddStmtChoice alwaysAdd = asc.withAlwaysAdd(true);
+
+ // Create a block for the LHS expression if there is an LHS expression. A
+ // GCC extension allows LHS to be NULL, causing the condition to be the
+ // value that is returned instead.
+ // e.g: x ?: y is shorthand for: x ? x : y;
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock *LHSBlock = 0;
+ const Expr *trueExpr = C->getTrueExpr();
+ if (trueExpr != opaqueValue) {
+ LHSBlock = Visit(C->getTrueExpr(), alwaysAdd);
+ if (badCFG)
+ return 0;
+ Block = NULL;
+ }
+ else
+ LHSBlock = ConfluenceBlock;
+
+ // Create the block for the RHS expression.
+ Succ = ConfluenceBlock;
+ CFGBlock *RHSBlock = Visit(C->getFalseExpr(), alwaysAdd);
+ if (badCFG)
+ return 0;
+
+ // Create the block that will contain the condition.
+ Block = createBlock(false);
+
+ // See if this is a known constant.
+ const TryResult& KnownVal = tryEvaluateBool(C->getCond());
+ addSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+ addSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+ Block->setTerminator(C);
+ Expr *condExpr = C->getCond();
+
+ if (opaqueValue) {
+ // Run the condition expression if it's not trivially expressed in
+ // terms of the opaque value (or if there is no opaque value).
+ if (condExpr != opaqueValue)
+ addStmt(condExpr);
+
+ // Before that, run the common subexpression if there was one.
+ // At least one of this or the above will be run.
+ return addStmt(BCO->getCommon());
+ }
+
+ return addStmt(condExpr);
+}
+
+CFGBlock *CFGBuilder::VisitDeclStmt(DeclStmt *DS) {
+ // Check if the Decl is for an __label__. If so, elide it from the
+ // CFG entirely.
+ if (isa<LabelDecl>(*DS->decl_begin()))
+ return Block;
+
+ // This case also handles static_asserts.
+ if (DS->isSingleDecl())
+ return VisitDeclSubExpr(DS);
+
+ CFGBlock *B = 0;
+
+ // FIXME: Add a reverse iterator for DeclStmt to avoid this extra copy.
+ typedef SmallVector<Decl*,10> BufTy;
+ BufTy Buf(DS->decl_begin(), DS->decl_end());
+
+ for (BufTy::reverse_iterator I = Buf.rbegin(), E = Buf.rend(); I != E; ++I) {
+ // Get the alignment of the new DeclStmt, padding out to >=8 bytes.
+ unsigned A = llvm::AlignOf<DeclStmt>::Alignment < 8
+ ? 8 : llvm::AlignOf<DeclStmt>::Alignment;
+
+ // Allocate the DeclStmt using the BumpPtrAllocator. It will get
+ // automatically freed with the CFG.
+ DeclGroupRef DG(*I);
+ Decl *D = *I;
+ void *Mem = cfg->getAllocator().Allocate(sizeof(DeclStmt), A);
+ DeclStmt *DSNew = new (Mem) DeclStmt(DG, D->getLocation(), GetEndLoc(D));
+
+ // Append the fake DeclStmt to block.
+ B = VisitDeclSubExpr(DSNew);
+ }
+
+ return B;
+}
+
+/// VisitDeclSubExpr - Utility method to add block-level expressions for
+/// DeclStmts and initializers in them.
+CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
+ assert(DS->isSingleDecl() && "Can handle single declarations only.");
+ Decl *D = DS->getSingleDecl();
+
+ if (isa<StaticAssertDecl>(D)) {
+ // static_asserts aren't added to the CFG because they do not impact
+ // runtime semantics.
+ return Block;
+ }
+
+ VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+
+ if (!VD) {
+ autoCreateBlock();
+ appendStmt(Block, DS);
+ return Block;
+ }
+
+ bool IsReference = false;
+ bool HasTemporaries = false;
+
+ // Destructors of temporaries in initialization expression should be called
+ // after initialization finishes.
+ Expr *Init = VD->getInit();
+ if (Init) {
+ IsReference = VD->getType()->isReferenceType();
+ HasTemporaries = isa<ExprWithCleanups>(Init);
+
+ if (BuildOpts.AddImplicitDtors && HasTemporaries) {
+ // Generate destructors for temporaries in initialization expression.
+ VisitForTemporaryDtors(cast<ExprWithCleanups>(Init)->getSubExpr(),
+ IsReference);
+ }
+ }
+
+ autoCreateBlock();
+ appendStmt(Block, DS);
+
+ // Keep track of the last non-null block, as 'Block' can be nulled out
+ // if the initializer expression is something like a 'while' in a
+ // statement-expression.
+ CFGBlock *LastBlock = Block;
+
+ if (Init) {
+ if (HasTemporaries) {
+ // For expression with temporaries go directly to subexpression to omit
+ // generating destructors for the second time.
+ ExprWithCleanups *EC = cast<ExprWithCleanups>(Init);
+ if (CFGBlock *newBlock = Visit(EC->getSubExpr()))
+ LastBlock = newBlock;
+ }
+ else {
+ if (CFGBlock *newBlock = Visit(Init))
+ LastBlock = newBlock;
+ }
+ }
+
+ // If the type of VD is a VLA, then we must process its size expressions.
+ for (const VariableArrayType* VA = FindVA(VD->getType().getTypePtr());
+ VA != 0; VA = FindVA(VA->getElementType().getTypePtr()))
+ Block = addStmt(VA->getSizeExpr());
+
+ // Remove variable from local scope.
+ if (ScopePos && VD == *ScopePos)
+ ++ScopePos;
+
+ return Block ? Block : LastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitIfStmt(IfStmt *I) {
+ // We may see an if statement in the middle of a basic block, or it may be the
+ // first statement we are processing. In either case, we create a new basic
+ // block. First, we create the blocks for the then...else statements, and
+ // then we create the block containing the if statement. If we were in the
+ // middle of a block, we stop processing that block. That block is then the
+ // implicit successor for the "then" and "else" clauses.
+
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl *VD = I->getConditionVariable()) {
+ LocalScope::const_iterator BeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, BeginScopePos, I);
+ }
+
+ // The block we were processing is now finished. Make it the successor
+ // block.
+ if (Block) {
+ Succ = Block;
+ if (badCFG)
+ return 0;
+ }
+
+ // Process the false branch.
+ CFGBlock *ElseBlock = Succ;
+
+ if (Stmt *Else = I->getElse()) {
+ SaveAndRestore<CFGBlock*> sv(Succ);
+
+ // NULL out Block so that the recursive call to Visit will
+ // create a new basic block.
+ Block = NULL;
+
+ // If branch is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Else))
+ addLocalScopeAndDtors(Else);
+
+ ElseBlock = addStmt(Else);
+
+ if (!ElseBlock) // Can occur when the Else body has all NullStmts.
+ ElseBlock = sv.get();
+ else if (Block) {
+ if (badCFG)
+ return 0;
+ }
+ }
+
+ // Process the true branch.
+ CFGBlock *ThenBlock;
+ {
+ Stmt *Then = I->getThen();
+ assert(Then);
+ SaveAndRestore<CFGBlock*> sv(Succ);
+ Block = NULL;
+
+ // If branch is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Then))
+ addLocalScopeAndDtors(Then);
+
+ ThenBlock = addStmt(Then);
+
+ if (!ThenBlock) {
+ // We can reach here if the "then" body has all NullStmts.
+ // Create an empty block so we can distinguish between true and false
+ // branches in path-sensitive analyses.
+ ThenBlock = createBlock(false);
+ addSuccessor(ThenBlock, sv.get());
+ } else if (Block) {
+ if (badCFG)
+ return 0;
+ }
+ }
+
+ // Now create a new block containing the if statement.
+ Block = createBlock(false);
+
+ // Set the terminator of the new block to the If statement.
+ Block->setTerminator(I);
+
+ // See if this is a known constant.
+ const TryResult &KnownVal = tryEvaluateBool(I->getCond());
+
+ // Now add the successors.
+ addSuccessor(Block, KnownVal.isFalse() ? NULL : ThenBlock);
+ addSuccessor(Block, KnownVal.isTrue()? NULL : ElseBlock);
+
+ // Add the condition as the last statement in the new block. This may create
+ // new blocks as the condition may contain control-flow. Any newly created
+ // blocks will be pointed to be "Block".
+ Block = addStmt(I->getCond());
+
+ // Finally, if the IfStmt contains a condition variable, add both the IfStmt
+ // and the condition variable initialization to the CFG.
+ if (VarDecl *VD = I->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, I->getConditionVariableDeclStmt());
+ addStmt(Init);
+ }
+ }
+
+ return Block;
+}
+
+
+CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
+ // If we were in the middle of a block we stop processing that block.
+ //
+ // NOTE: If a "return" appears in the middle of a block, this means that the
+ // code afterwards is DEAD (unreachable). We still keep a basic block
+ // for that code; a simple "mark-and-sweep" from the entry block will be
+ // able to report such dead blocks.
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ // The Exit block is the only successor.
+ addAutomaticObjDtors(ScopePos, LocalScope::const_iterator(), R);
+ addSuccessor(Block, &cfg->getExit());
+
+ // Add the return statement to the block. This may create new blocks if R
+ // contains control-flow (short-circuit operations).
+ return VisitStmt(R, AddStmtChoice::AlwaysAdd);
+}
+
+CFGBlock *CFGBuilder::VisitLabelStmt(LabelStmt *L) {
+ // Get the block of the labeled statement. Add it to our map.
+ addStmt(L->getSubStmt());
+ CFGBlock *LabelBlock = Block;
+
+ if (!LabelBlock) // This can happen when the body is empty, i.e.
+ LabelBlock = createBlock(); // scopes that only contains NullStmts.
+
+ assert(LabelMap.find(L->getDecl()) == LabelMap.end() &&
+ "label already in map");
+ LabelMap[L->getDecl()] = JumpTarget(LabelBlock, ScopePos);
+
+ // Labels partition blocks, so this is the end of the basic block we were
+ // processing (L is the block's label). Because this is label (and we have
+ // already processed the substatement) there is no extra control-flow to worry
+ // about.
+ LabelBlock->setLabel(L);
+ if (badCFG)
+ return 0;
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary);
+ Block = NULL;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = LabelBlock;
+
+ return LabelBlock;
+}
+
+CFGBlock *CFGBuilder::VisitLambdaExpr(LambdaExpr *E, AddStmtChoice asc) {
+ CFGBlock *LastBlock = VisitNoRecurse(E, asc);
+ for (LambdaExpr::capture_init_iterator it = E->capture_init_begin(),
+ et = E->capture_init_end(); it != et; ++it) {
+ if (Expr *Init = *it) {
+ CFGBlock *Tmp = Visit(Init);
+ if (Tmp != 0)
+ LastBlock = Tmp;
+ }
+ }
+ return LastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitGotoStmt(GotoStmt *G) {
+ // Goto is a control-flow statement. Thus we stop processing the current
+ // block and create a new one.
+
+ Block = createBlock(false);
+ Block->setTerminator(G);
+
+ // If we already know the mapping to the label block add the successor now.
+ LabelMapTy::iterator I = LabelMap.find(G->getLabel());
+
+ if (I == LabelMap.end())
+ // We will need to backpatch this block later.
+ BackpatchBlocks.push_back(JumpSource(Block, ScopePos));
+ else {
+ JumpTarget JT = I->second;
+ addAutomaticObjDtors(ScopePos, JT.scopePosition, G);
+ addSuccessor(Block, JT.block);
+ }
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
+ CFGBlock *LoopSuccessor = NULL;
+
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for init statement and possible condition variable.
+ // Add destructor for init statement and condition variable.
+ // Store scope position for continue statement.
+ if (Stmt *Init = F->getInit())
+ addLocalScopeForStmt(Init);
+ LocalScope::const_iterator LoopBeginScopePos = ScopePos;
+
+ if (VarDecl *VD = F->getConditionVariable())
+ addLocalScopeForVarDecl(VD);
+ LocalScope::const_iterator ContinueScopePos = ScopePos;
+
+ addAutomaticObjDtors(ScopePos, save_scope_pos.get(), F);
+
+ // "for" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (Block) {
+ if (badCFG)
+ return 0;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Save the current value for the break targets.
+ // All breaks should go to the code following the loop.
+ SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *ExitConditionBlock = createBlock(false);
+ CFGBlock *EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(F);
+
+ // Now add the actual condition to the condition block. Because the condition
+ // itself may contain control-flow, new blocks may be created.
+ if (Stmt *C = F->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ if (badCFG)
+ return 0;
+ assert(Block == EntryConditionBlock ||
+ (Block == 0 && EntryConditionBlock == Succ));
+
+ // If this block contains a condition variable, add both the condition
+ // variable and initializer to the CFG.
+ if (VarDecl *VD = F->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, F->getConditionVariableDeclStmt());
+ EntryConditionBlock = addStmt(Init);
+ assert(Block == EntryConditionBlock);
+ }
+ }
+
+ if (Block) {
+ if (badCFG)
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // See if this is a known constant.
+ TryResult KnownVal(true);
+
+ if (F->getCond())
+ KnownVal = tryEvaluateBool(F->getCond());
+
+ // Now create the loop body.
+ {
+ assert(F->getBody());
+
+ // Save the current values for Block, Succ, and continue targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
+
+ // Create a new block to contain the (bottom) of the loop body.
+ Block = NULL;
+
+ // Loop body should end with destructor of Condition variable (if any).
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, F);
+
+ if (Stmt *I = F->getInc()) {
+ // Generate increment code in its own basic block. This is the target of
+ // continue statements.
+ Succ = addStmt(I);
+ } else {
+ // No increment code. Create a special, empty, block that is used as the
+ // target block for "looping back" to the start of the loop.
+ assert(Succ == EntryConditionBlock);
+ Succ = Block ? Block : createBlock();
+ }
+
+ // Finish up the increment (or empty) block if it hasn't been already.
+ if (Block) {
+ assert(Block == Succ);
+ if (badCFG)
+ return 0;
+ Block = 0;
+ }
+
+ ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
+
+ // The starting block for the loop increment is the block that should
+ // represent the 'loop target' for looping back to the start of the loop.
+ ContinueJumpTarget.block->setLoopTarget(F);
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(F->getBody()))
+ addLocalScopeAndDtors(F->getBody());
+
+ // Now populate the body block, and in the process create new blocks as we
+ // walk the body of the loop.
+ CFGBlock *BodyBlock = addStmt(F->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = ContinueJumpTarget.block;//can happen for "for (...;...;...);"
+ else if (badCFG)
+ return 0;
+
+ // This new body block is a successor to our "exit" condition block.
+ addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+
+ // If the loop contains initialization, create a new block for those
+ // statements. This block can also contain statements that precede the loop.
+ if (Stmt *I = F->getInit()) {
+ Block = createBlock();
+ return addStmt(I);
+ }
+
+ // There is no loop initialization. We are thus basically a while loop.
+ // NULL out Block to force lazy block construction.
+ Block = NULL;
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
+}
+
+CFGBlock *CFGBuilder::VisitMemberExpr(MemberExpr *M, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, M)) {
+ autoCreateBlock();
+ appendStmt(Block, M);
+ }
+ return Visit(M->getBase());
+}
+
+CFGBlock *CFGBuilder::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ // Objective-C fast enumeration 'for' statements:
+ // http://developer.apple.com/documentation/Cocoa/Conceptual/ObjectiveC
+ //
+ // for ( Type newVariable in collection_expression ) { statements }
+ //
+ // becomes:
+ //
+ // prologue:
+ // 1. collection_expression
+ // T. jump to loop_entry
+ // loop_entry:
+ // 1. side-effects of element expression
+ // 1. ObjCForCollectionStmt [performs binding to newVariable]
+ // T. ObjCForCollectionStmt TB, FB [jumps to TB if newVariable != nil]
+ // TB:
+ // statements
+ // T. jump to loop_entry
+ // FB:
+ // what comes after
+ //
+ // and
+ //
+ // Type existingItem;
+ // for ( existingItem in expression ) { statements }
+ //
+ // becomes:
+ //
+ // the same with newVariable replaced with existingItem; the binding works
+ // the same except that for one ObjCForCollectionStmt::getElement() returns
+ // a DeclStmt and the other returns a DeclRefExpr.
+ //
+
+ CFGBlock *LoopSuccessor = 0;
+
+ if (Block) {
+ if (badCFG)
+ return 0;
+ LoopSuccessor = Block;
+ Block = 0;
+ } else
+ LoopSuccessor = Succ;
+
+ // Build the condition blocks.
+ CFGBlock *ExitConditionBlock = createBlock(false);
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(S);
+
+ // The last statement in the block should be the ObjCForCollectionStmt, which
+ // performs the actual binding to 'element' and determines if there are any
+ // more items in the collection.
+ appendStmt(ExitConditionBlock, S);
+ Block = ExitConditionBlock;
+
+ // Walk the 'element' expression to see if there are any side-effects. We
+ // generate new blocks as necessary. We DON'T add the statement by default to
+ // the CFG unless it contains control-flow.
+ CFGBlock *EntryConditionBlock = Visit(S->getElement(),
+ AddStmtChoice::NotAlwaysAdd);
+ if (Block) {
+ if (badCFG)
+ return 0;
+ Block = 0;
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // Now create the true branch.
+ {
+ // Save the current values for Succ, continue and break targets.
+ SaveAndRestore<CFGBlock*> save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
+
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+ ContinueJumpTarget = JumpTarget(EntryConditionBlock, ScopePos);
+
+ CFGBlock *BodyBlock = addStmt(S->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "for (X in Y) ;"
+ else if (Block) {
+ if (badCFG)
+ return 0;
+ }
+
+ // This new body block is a successor to our "exit" condition block.
+ addSuccessor(ExitConditionBlock, BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ addSuccessor(ExitConditionBlock, LoopSuccessor);
+
+ // Now create a prologue block to contain the collection expression.
+ Block = createBlock();
+ return addStmt(S->getCollection());
+}
+
+CFGBlock *CFGBuilder::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ // Inline the body.
+ return addStmt(S->getSubStmt());
+ // TODO: consider adding cleanups for the end of @autoreleasepool scope.
+}
+
+CFGBlock *CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ // FIXME: Add locking 'primitives' to CFG for @synchronized.
+
+ // Inline the body.
+ CFGBlock *SyncBlock = addStmt(S->getSynchBody());
+
+ // The sync body starts its own basic block. This makes it a little easier
+ // for diagnostic clients.
+ if (SyncBlock) {
+ if (badCFG)
+ return 0;
+
+ Block = 0;
+ Succ = SyncBlock;
+ }
+
+ // Add the @synchronized to the CFG.
+ autoCreateBlock();
+ appendStmt(Block, S);
+
+ // Inline the sync expression.
+ return addStmt(S->getSynchExpr());
+}
+
+CFGBlock *CFGBuilder::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ // FIXME
+ return NYS();
+}
+
+CFGBlock *CFGBuilder::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ autoCreateBlock();
+
+ // Add the PseudoObject as the last thing.
+ appendStmt(Block, E);
+
+ CFGBlock *lastBlock = Block;
+
+ // Before that, evaluate all of the semantics in order. In
+ // CFG-land, that means appending them in reverse order.
+ for (unsigned i = E->getNumSemanticExprs(); i != 0; ) {
+ Expr *Semantic = E->getSemanticExpr(--i);
+
+ // If the semantic is an opaque value, we're being asked to bind
+ // it to its source expression.
+ if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
+ Semantic = OVE->getSourceExpr();
+
+ if (CFGBlock *B = Visit(Semantic))
+ lastBlock = B;
+ }
+
+ return lastBlock;
+}
+
+CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
+ CFGBlock *LoopSuccessor = NULL;
+
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position for continue statement.
+ LocalScope::const_iterator LoopBeginScopePos = ScopePos;
+ if (VarDecl *VD = W->getConditionVariable()) {
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
+ }
+
+ // "while" is a control-flow statement. Thus we stop processing the current
+ // block.
+ if (Block) {
+ if (badCFG)
+ return 0;
+ LoopSuccessor = Block;
+ Block = 0;
+ } else
+ LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *ExitConditionBlock = createBlock(false);
+ CFGBlock *EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(W);
+
+ // Now add the actual condition to the condition block. Because the condition
+ // itself may contain control-flow, new blocks may be created. Thus we update
+ // "Succ" after adding the condition.
+ if (Stmt *C = W->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ // The condition might finish the current 'Block'.
+ Block = EntryConditionBlock;
+
+ // If this block contains a condition variable, add both the condition
+ // variable and initializer to the CFG.
+ if (VarDecl *VD = W->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, W->getConditionVariableDeclStmt());
+ EntryConditionBlock = addStmt(Init);
+ assert(Block == EntryConditionBlock);
+ }
+ }
+
+ if (Block) {
+ if (badCFG)
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = EntryConditionBlock;
+
+ // See if this is a known constant.
+ const TryResult& KnownVal = tryEvaluateBool(W->getCond());
+
+ // Process the loop body.
+ {
+ assert(W->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
+
+ // Create an empty block to represent the transition block for looping back
+ // to the head of the loop.
+ Block = 0;
+ assert(Succ == EntryConditionBlock);
+ Succ = createBlock();
+ Succ->setLoopTarget(W);
+ ContinueJumpTarget = JumpTarget(Succ, LoopBeginScopePos);
+
+ // All breaks should go to the code following the loop.
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+
+ // NULL out Block to force lazy instantiation of blocks for the body.
+ Block = NULL;
+
+ // Loop body should end with destructor of Condition variable (if any).
+ addAutomaticObjDtors(ScopePos, LoopBeginScopePos, W);
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(W->getBody()))
+ addLocalScopeAndDtors(W->getBody());
+
+ // Create the body. The returned block is the entry to the loop body.
+ CFGBlock *BodyBlock = addStmt(W->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = ContinueJumpTarget.block; // can happen for "while(...) ;"
+ else if (Block) {
+ if (badCFG)
+ return 0;
+ }
+
+ // Add the loop body entry as a successor to the condition.
+ addSuccessor(ExitConditionBlock, KnownVal.isFalse() ? NULL : BodyBlock);
+ }
+
+ // Link up the condition block with the code that follows the loop. (the
+ // false branch).
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+
+ // There can be no more statements in the condition block since we loop back
+ // to this block. NULL out Block to force lazy creation of another block.
+ Block = NULL;
+
+ // Return the condition block, which is the dominating block for the loop.
+ Succ = EntryConditionBlock;
+ return EntryConditionBlock;
+}
+
+
+CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ // FIXME: For now we pretend that @catch and the code it contains does not
+ // exit.
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ // FIXME: This isn't complete. We basically treat @throw like a return
+ // statement.
+
+ // If we were in the middle of a block we stop processing that block.
+ if (badCFG)
+ return 0;
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ // The Exit block is the only successor.
+ addSuccessor(Block, &cfg->getExit());
+
+ // Add the statement to the block. This may create new blocks if S contains
+ // control-flow (short-circuit operations).
+ return VisitStmt(S, AddStmtChoice::AlwaysAdd);
+}
+
+CFGBlock *CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr *T) {
+ // If we were in the middle of a block we stop processing that block.
+ if (badCFG)
+ return 0;
+
+ // Create the new block.
+ Block = createBlock(false);
+
+ if (TryTerminatedBlock)
+ // The current try statement is the only successor.
+ addSuccessor(Block, TryTerminatedBlock);
+ else
+ // otherwise the Exit block is the only successor.
+ addSuccessor(Block, &cfg->getExit());
+
+ // Add the statement to the block. This may create new blocks if S contains
+ // control-flow (short-circuit operations).
+ return VisitStmt(T, AddStmtChoice::AlwaysAdd);
+}
+
+CFGBlock *CFGBuilder::VisitDoStmt(DoStmt *D) {
+ CFGBlock *LoopSuccessor = NULL;
+
+ // "do...while" is a control-flow statement. Thus we stop processing the
+ // current block.
+ if (Block) {
+ if (badCFG)
+ return 0;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Because of short-circuit evaluation, the condition of the loop can span
+ // multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
+ // evaluate the condition.
+ CFGBlock *ExitConditionBlock = createBlock(false);
+ CFGBlock *EntryConditionBlock = ExitConditionBlock;
+
+ // Set the terminator for the "exit" condition block.
+ ExitConditionBlock->setTerminator(D);
+
+ // Now add the actual condition to the condition block. Because the condition
+ // itself may contain control-flow, new blocks may be created.
+ if (Stmt *C = D->getCond()) {
+ Block = ExitConditionBlock;
+ EntryConditionBlock = addStmt(C);
+ if (Block) {
+ if (badCFG)
+ return 0;
+ }
+ }
+
+ // The condition block is the implicit successor for the loop body.
+ Succ = EntryConditionBlock;
+
+ // See if this is a known constant.
+ const TryResult &KnownVal = tryEvaluateBool(D->getCond());
+
+ // Process the loop body.
+ CFGBlock *BodyBlock = NULL;
+ {
+ assert(D->getBody());
+
+ // Save the current values for Block, Succ, and continue and break targets
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget),
+ save_break(BreakJumpTarget);
+
+ // All continues within this loop should go to the condition block
+ ContinueJumpTarget = JumpTarget(EntryConditionBlock, ScopePos);
+
+ // All breaks should go to the code following the loop.
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+
+ // NULL out Block to force lazy instantiation of blocks for the body.
+ Block = NULL;
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(D->getBody()))
+ addLocalScopeAndDtors(D->getBody());
+
+ // Create the body. The returned block is the entry to the loop body.
+ BodyBlock = addStmt(D->getBody());
+
+ if (!BodyBlock)
+ BodyBlock = EntryConditionBlock; // can happen for "do ; while(...)"
+ else if (Block) {
+ if (badCFG)
+ return 0;
+ }
+
+ if (!KnownVal.isFalse()) {
+ // Add an intermediate block between the BodyBlock and the
+ // ExitConditionBlock to represent the "loop back" transition. Create an
+ // empty block to represent the transition block for looping back to the
+ // head of the loop.
+ // FIXME: Can we do this more efficiently without adding another block?
+ Block = NULL;
+ Succ = BodyBlock;
+ CFGBlock *LoopBackBlock = createBlock();
+ LoopBackBlock->setLoopTarget(D);
+
+ // Add the loop body entry as a successor to the condition.
+ addSuccessor(ExitConditionBlock, LoopBackBlock);
+ }
+ else
+ addSuccessor(ExitConditionBlock, NULL);
+ }
+
+ // Link up the condition block with the code that follows the loop.
+ // (the false branch).
+ addSuccessor(ExitConditionBlock, KnownVal.isTrue() ? NULL : LoopSuccessor);
+
+ // There can be no more statements in the body block(s) since we loop back to
+ // the body. NULL out Block to force lazy creation of another block.
+ Block = NULL;
+
+ // Return the loop body, which is the dominating block for the loop.
+ Succ = BodyBlock;
+ return BodyBlock;
+}
+
+CFGBlock *CFGBuilder::VisitContinueStmt(ContinueStmt *C) {
+ // "continue" is a control-flow statement. Thus we stop processing the
+ // current block.
+ if (badCFG)
+ return 0;
+
+ // Now create a new block that ends with the continue statement.
+ Block = createBlock(false);
+ Block->setTerminator(C);
+
+ // If there is no target for the continue, then we are looking at an
+ // incomplete AST. This means the CFG cannot be constructed.
+ if (ContinueJumpTarget.block) {
+ addAutomaticObjDtors(ScopePos, ContinueJumpTarget.scopePosition, C);
+ addSuccessor(Block, ContinueJumpTarget.block);
+ } else
+ badCFG = true;
+
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E,
+ AddStmtChoice asc) {
+
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ }
+
+ // VLA types have expressions that must be evaluated.
+ CFGBlock *lastBlock = Block;
+
+ if (E->isArgumentType()) {
+ for (const VariableArrayType *VA =FindVA(E->getArgumentType().getTypePtr());
+ VA != 0; VA = FindVA(VA->getElementType().getTypePtr()))
+ lastBlock = addStmt(VA->getSizeExpr());
+ }
+ return lastBlock;
+}
+
+/// VisitStmtExpr - Utility method to handle (nested) statement
+/// expressions (a GCC extension).
+CFGBlock *CFGBuilder::VisitStmtExpr(StmtExpr *SE, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, SE)) {
+ autoCreateBlock();
+ appendStmt(Block, SE);
+ }
+ return VisitCompoundStmt(SE->getSubStmt());
+}
+
+CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
+ // "switch" is a control-flow statement. Thus we stop processing the current
+ // block.
+ CFGBlock *SwitchSuccessor = NULL;
+
+ // Save local scope position because in case of condition variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible condition variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl *VD = Terminator->getConditionVariable()) {
+ LocalScope::const_iterator SwitchBeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, SwitchBeginScopePos, Terminator);
+ }
+
+ if (Block) {
+ if (badCFG)
+ return 0;
+ SwitchSuccessor = Block;
+ } else SwitchSuccessor = Succ;
+
+ // Save the current "switch" context.
+ SaveAndRestore<CFGBlock*> save_switch(SwitchTerminatedBlock),
+ save_default(DefaultCaseBlock);
+ SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+
+ // Set the "default" case to be the block after the switch statement. If the
+ // switch statement contains a "default:", this value will be overwritten with
+ // the block for that code.
+ DefaultCaseBlock = SwitchSuccessor;
+
+ // Create a new block that will contain the switch statement.
+ SwitchTerminatedBlock = createBlock(false);
+
+ // Now process the switch body. The code after the switch is the implicit
+ // successor.
+ Succ = SwitchSuccessor;
+ BreakJumpTarget = JumpTarget(SwitchSuccessor, ScopePos);
+
+ // When visiting the body, the case statements should automatically get linked
+ // up to the switch. We also don't keep a pointer to the body, since all
+ // control-flow from the switch goes to case/default statements.
+ assert(Terminator->getBody() && "switch must contain a non-NULL body");
+ Block = NULL;
+
+ // For pruning unreachable case statements, save the current state
+ // for tracking the condition value.
+ SaveAndRestore<bool> save_switchExclusivelyCovered(switchExclusivelyCovered,
+ false);
+
+ // Determine if the switch condition can be explicitly evaluated.
+ assert(Terminator->getCond() && "switch condition must be non-NULL");
+ Expr::EvalResult result;
+ bool b = tryEvaluate(Terminator->getCond(), result);
+ SaveAndRestore<Expr::EvalResult*> save_switchCond(switchCond,
+ b ? &result : 0);
+
+ // If body is not a compound statement create implicit scope
+ // and add destructors.
+ if (!isa<CompoundStmt>(Terminator->getBody()))
+ addLocalScopeAndDtors(Terminator->getBody());
+
+ addStmt(Terminator->getBody());
+ if (Block) {
+ if (badCFG)
+ return 0;
+ }
+
+ // If we have no "default:" case, the default transition is to the code
+ // following the switch body. Moreover, take into account if all the
+ // cases of a switch are covered (e.g., switching on an enum value).
+ addSuccessor(SwitchTerminatedBlock,
+ switchExclusivelyCovered || Terminator->isAllEnumCasesCovered()
+ ? 0 : DefaultCaseBlock);
+
+ // Add the terminator and condition in the switch block.
+ SwitchTerminatedBlock->setTerminator(Terminator);
+ Block = SwitchTerminatedBlock;
+ Block = addStmt(Terminator->getCond());
+
+ // Finally, if the SwitchStmt contains a condition variable, add both the
+ // SwitchStmt and the condition variable initialization to the CFG.
+ if (VarDecl *VD = Terminator->getConditionVariable()) {
+ if (Expr *Init = VD->getInit()) {
+ autoCreateBlock();
+ appendStmt(Block, Terminator->getConditionVariableDeclStmt());
+ addStmt(Init);
+ }
+ }
+
+ return Block;
+}
+
+static bool shouldAddCase(bool &switchExclusivelyCovered,
+ const Expr::EvalResult *switchCond,
+ const CaseStmt *CS,
+ ASTContext &Ctx) {
+ if (!switchCond)
+ return true;
+
+ bool addCase = false;
+
+ if (!switchExclusivelyCovered) {
+ if (switchCond->Val.isInt()) {
+ // Evaluate the LHS of the case value.
+ const llvm::APSInt &lhsInt = CS->getLHS()->EvaluateKnownConstInt(Ctx);
+ const llvm::APSInt &condInt = switchCond->Val.getInt();
+
+ if (condInt == lhsInt) {
+ addCase = true;
+ switchExclusivelyCovered = true;
+ }
+ else if (condInt < lhsInt) {
+ if (const Expr *RHS = CS->getRHS()) {
+ // Evaluate the RHS of the case value.
+ const llvm::APSInt &V2 = RHS->EvaluateKnownConstInt(Ctx);
+ if (V2 <= condInt) {
+ addCase = true;
+ switchExclusivelyCovered = true;
+ }
+ }
+ }
+ }
+ else
+ addCase = true;
+ }
+ return addCase;
+}
+
+CFGBlock *CFGBuilder::VisitCaseStmt(CaseStmt *CS) {
+ // CaseStmts are essentially labels, so they are the first statement in a
+ // block.
+ CFGBlock *TopBlock = 0, *LastBlock = 0;
+
+ if (Stmt *Sub = CS->getSubStmt()) {
+ // For deeply nested chains of CaseStmts, instead of doing a recursion
+ // (which can blow out the stack), manually unroll and create blocks
+ // along the way.
+ while (isa<CaseStmt>(Sub)) {
+ CFGBlock *currentBlock = createBlock(false);
+ currentBlock->setLabel(CS);
+
+ if (TopBlock)
+ addSuccessor(LastBlock, currentBlock);
+ else
+ TopBlock = currentBlock;
+
+ addSuccessor(SwitchTerminatedBlock,
+ shouldAddCase(switchExclusivelyCovered, switchCond,
+ CS, *Context)
+ ? currentBlock : 0);
+
+ LastBlock = currentBlock;
+ CS = cast<CaseStmt>(Sub);
+ Sub = CS->getSubStmt();
+ }
+
+ addStmt(Sub);
+ }
+
+ CFGBlock *CaseBlock = Block;
+ if (!CaseBlock)
+ CaseBlock = createBlock();
+
+ // Cases statements partition blocks, so this is the top of the basic block we
+ // were processing (the "case XXX:" is the label).
+ CaseBlock->setLabel(CS);
+
+ if (badCFG)
+ return 0;
+
+ // Add this block to the list of successors for the block with the switch
+ // statement.
+ assert(SwitchTerminatedBlock);
+ addSuccessor(SwitchTerminatedBlock,
+ shouldAddCase(switchExclusivelyCovered, switchCond,
+ CS, *Context)
+ ? CaseBlock : 0);
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = NULL;
+
+ if (TopBlock) {
+ addSuccessor(LastBlock, CaseBlock);
+ Succ = TopBlock;
+ } else {
+ // This block is now the implicit successor of other blocks.
+ Succ = CaseBlock;
+ }
+
+ return Succ;
+}
+
+CFGBlock *CFGBuilder::VisitDefaultStmt(DefaultStmt *Terminator) {
+ if (Terminator->getSubStmt())
+ addStmt(Terminator->getSubStmt());
+
+ DefaultCaseBlock = Block;
+
+ if (!DefaultCaseBlock)
+ DefaultCaseBlock = createBlock();
+
+ // Default statements partition blocks, so this is the top of the basic block
+ // we were processing (the "default:" is the label).
+ DefaultCaseBlock->setLabel(Terminator);
+
+ if (badCFG)
+ return 0;
+
+ // Unlike case statements, we don't add the default block to the successors
+ // for the switch statement immediately. This is done when we finish
+ // processing the switch statement. This allows for the default case
+ // (including a fall-through to the code after the switch statement) to always
+ // be the last successor of a switch-terminated block.
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = NULL;
+
+ // This block is now the implicit successor of other blocks.
+ Succ = DefaultCaseBlock;
+
+ return DefaultCaseBlock;
+}
+
+CFGBlock *CFGBuilder::VisitCXXTryStmt(CXXTryStmt *Terminator) {
+ // "try"/"catch" is a control-flow statement. Thus we stop processing the
+ // current block.
+ CFGBlock *TrySuccessor = NULL;
+
+ if (Block) {
+ if (badCFG)
+ return 0;
+ TrySuccessor = Block;
+ } else TrySuccessor = Succ;
+
+ CFGBlock *PrevTryTerminatedBlock = TryTerminatedBlock;
+
+ // Create a new block that will contain the try statement.
+ CFGBlock *NewTryTerminatedBlock = createBlock(false);
+ // Add the terminator in the try block.
+ NewTryTerminatedBlock->setTerminator(Terminator);
+
+ bool HasCatchAll = false;
+ for (unsigned h = 0; h <Terminator->getNumHandlers(); ++h) {
+ // The code after the try is the implicit successor.
+ Succ = TrySuccessor;
+ CXXCatchStmt *CS = Terminator->getHandler(h);
+ if (CS->getExceptionDecl() == 0) {
+ HasCatchAll = true;
+ }
+ Block = NULL;
+ CFGBlock *CatchBlock = VisitCXXCatchStmt(CS);
+ if (CatchBlock == 0)
+ return 0;
+ // Add this block to the list of successors for the block with the try
+ // statement.
+ addSuccessor(NewTryTerminatedBlock, CatchBlock);
+ }
+ if (!HasCatchAll) {
+ if (PrevTryTerminatedBlock)
+ addSuccessor(NewTryTerminatedBlock, PrevTryTerminatedBlock);
+ else
+ addSuccessor(NewTryTerminatedBlock, &cfg->getExit());
+ }
+
+ // The code after the try is the implicit successor.
+ Succ = TrySuccessor;
+
+ // Save the current "try" context.
+ SaveAndRestore<CFGBlock*> save_try(TryTerminatedBlock, NewTryTerminatedBlock);
+ cfg->addTryDispatchBlock(TryTerminatedBlock);
+
+ assert(Terminator->getTryBlock() && "try must contain a non-NULL body");
+ Block = NULL;
+ Block = addStmt(Terminator->getTryBlock());
+ return Block;
+}
+
+CFGBlock *CFGBuilder::VisitCXXCatchStmt(CXXCatchStmt *CS) {
+ // CXXCatchStmt are treated like labels, so they are the first statement in a
+ // block.
+
+ // Save local scope position because in case of exception variable ScopePos
+ // won't be restored when traversing AST.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scope for possible exception variable.
+ // Store scope position. Add implicit destructor.
+ if (VarDecl *VD = CS->getExceptionDecl()) {
+ LocalScope::const_iterator BeginScopePos = ScopePos;
+ addLocalScopeForVarDecl(VD);
+ addAutomaticObjDtors(ScopePos, BeginScopePos, CS);
+ }
+
+ if (CS->getHandlerBlock())
+ addStmt(CS->getHandlerBlock());
+
+ CFGBlock *CatchBlock = Block;
+ if (!CatchBlock)
+ CatchBlock = createBlock();
+
+ // CXXCatchStmt is more than just a label. They have semantic meaning
+ // as well, as they implicitly "initialize" the catch variable. Add
+ // it to the CFG as a CFGElement so that the control-flow of these
+ // semantics gets captured.
+ appendStmt(CatchBlock, CS);
+
+ // Also add the CXXCatchStmt as a label, to mirror handling of regular
+ // labels.
+ CatchBlock->setLabel(CS);
+
+ // Bail out if the CFG is bad.
+ if (badCFG)
+ return 0;
+
+ // We set Block to NULL to allow lazy creation of a new block (if necessary)
+ Block = NULL;
+
+ return CatchBlock;
+}
+
+CFGBlock *CFGBuilder::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ // C++0x for-range statements are specified as [stmt.ranged]:
+ //
+ // {
+ // auto && __range = range-init;
+ // for ( auto __begin = begin-expr,
+ // __end = end-expr;
+ // __begin != __end;
+ // ++__begin ) {
+ // for-range-declaration = *__begin;
+ // statement
+ // }
+ // }
+
+ // Save local scope position before the addition of the implicit variables.
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+
+ // Create local scopes and destructors for range, begin and end variables.
+ if (Stmt *Range = S->getRangeStmt())
+ addLocalScopeForStmt(Range);
+ if (Stmt *BeginEnd = S->getBeginEndStmt())
+ addLocalScopeForStmt(BeginEnd);
+ addAutomaticObjDtors(ScopePos, save_scope_pos.get(), S);
+
+ LocalScope::const_iterator ContinueScopePos = ScopePos;
+
+ // "for" is a control-flow statement. Thus we stop processing the current
+ // block.
+ CFGBlock *LoopSuccessor = NULL;
+ if (Block) {
+ if (badCFG)
+ return 0;
+ LoopSuccessor = Block;
+ } else
+ LoopSuccessor = Succ;
+
+ // Save the current value for the break targets.
+ // All breaks should go to the code following the loop.
+ SaveAndRestore<JumpTarget> save_break(BreakJumpTarget);
+ BreakJumpTarget = JumpTarget(LoopSuccessor, ScopePos);
+
+ // The block for the __begin != __end expression.
+ CFGBlock *ConditionBlock = createBlock(false);
+ ConditionBlock->setTerminator(S);
+
+ // Now add the actual condition to the condition block.
+ if (Expr *C = S->getCond()) {
+ Block = ConditionBlock;
+ CFGBlock *BeginConditionBlock = addStmt(C);
+ if (badCFG)
+ return 0;
+ assert(BeginConditionBlock == ConditionBlock &&
+ "condition block in for-range was unexpectedly complex");
+ (void)BeginConditionBlock;
+ }
+
+ // The condition block is the implicit successor for the loop body as well as
+ // any code above the loop.
+ Succ = ConditionBlock;
+
+ // See if this is a known constant.
+ TryResult KnownVal(true);
+
+ if (S->getCond())
+ KnownVal = tryEvaluateBool(S->getCond());
+
+ // Now create the loop body.
+ {
+ assert(S->getBody());
+
+ // Save the current values for Block, Succ, and continue targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ);
+ SaveAndRestore<JumpTarget> save_continue(ContinueJumpTarget);
+
+ // Generate increment code in its own basic block. This is the target of
+ // continue statements.
+ Block = 0;
+ Succ = addStmt(S->getInc());
+ ContinueJumpTarget = JumpTarget(Succ, ContinueScopePos);
+
+ // The starting block for the loop increment is the block that should
+ // represent the 'loop target' for looping back to the start of the loop.
+ ContinueJumpTarget.block->setLoopTarget(S);
+
+ // Finish up the increment block and prepare to start the loop body.
+ assert(Block);
+ if (badCFG)
+ return 0;
+ Block = 0;
+
+
+ // Add implicit scope and dtors for loop variable.
+ addLocalScopeAndDtors(S->getLoopVarStmt());
+
+ // Populate a new block to contain the loop body and loop variable.
+ Block = addStmt(S->getBody());
+ if (badCFG)
+ return 0;
+ Block = addStmt(S->getLoopVarStmt());
+ if (badCFG)
+ return 0;
+
+ // This new body block is a successor to our condition block.
+ addSuccessor(ConditionBlock, KnownVal.isFalse() ? 0 : Block);
+ }
+
+ // Link up the condition block with the code that follows the loop (the
+ // false branch).
+ addSuccessor(ConditionBlock, KnownVal.isTrue() ? 0 : LoopSuccessor);
+
+ // Add the initialization statements.
+ Block = createBlock();
+ addStmt(S->getBeginEndStmt());
+ return addStmt(S->getRangeStmt());
+}
+
+CFGBlock *CFGBuilder::VisitExprWithCleanups(ExprWithCleanups *E,
+ AddStmtChoice asc) {
+ if (BuildOpts.AddImplicitDtors) {
+ // If adding implicit destructors visit the full expression for adding
+ // destructors of temporaries.
+ VisitForTemporaryDtors(E->getSubExpr());
+
+ // Full expression has to be added as CFGStmt so it will be sequenced
+ // before destructors of it's temporaries.
+ asc = asc.withAlwaysAdd(true);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+
+ // We do not want to propagate the AlwaysAdd property.
+ asc = asc.withAlwaysAdd(false);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXConstructExpr(CXXConstructExpr *C,
+ AddStmtChoice asc) {
+ autoCreateBlock();
+ appendStmt(Block, C);
+
+ return VisitChildren(C);
+}
+
+CFGBlock *CFGBuilder::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ // We do not want to propagate the AlwaysAdd property.
+ asc = asc.withAlwaysAdd(false);
+ }
+ return Visit(E->getSubExpr(), asc);
+}
+
+CFGBlock *CFGBuilder::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
+ AddStmtChoice asc) {
+ autoCreateBlock();
+ appendStmt(Block, C);
+ return VisitChildren(C);
+}
+
+CFGBlock *CFGBuilder::VisitImplicitCastExpr(ImplicitCastExpr *E,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ }
+ return Visit(E->getSubExpr(), AddStmtChoice());
+}
+
+CFGBlock *CFGBuilder::VisitIndirectGotoStmt(IndirectGotoStmt *I) {
+ // Lazily create the indirect-goto dispatch block if there isn't one already.
+ CFGBlock *IBlock = cfg->getIndirectGotoBlock();
+
+ if (!IBlock) {
+ IBlock = createBlock(false);
+ cfg->setIndirectGotoBlock(IBlock);
+ }
+
+ // IndirectGoto is a control-flow statement. Thus we stop processing the
+ // current block and create a new one.
+ if (badCFG)
+ return 0;
+
+ Block = createBlock(false);
+ Block->setTerminator(I);
+ addSuccessor(Block, IBlock);
+ return addStmt(I->getTarget());
+}
+
+CFGBlock *CFGBuilder::VisitForTemporaryDtors(Stmt *E, bool BindToTemporary) {
+tryAgain:
+ if (!E) {
+ badCFG = true;
+ return NULL;
+ }
+ switch (E->getStmtClass()) {
+ default:
+ return VisitChildrenForTemporaryDtors(E);
+
+ case Stmt::BinaryOperatorClass:
+ return VisitBinaryOperatorForTemporaryDtors(cast<BinaryOperator>(E));
+
+ case Stmt::CXXBindTemporaryExprClass:
+ return VisitCXXBindTemporaryExprForTemporaryDtors(
+ cast<CXXBindTemporaryExpr>(E), BindToTemporary);
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ return VisitConditionalOperatorForTemporaryDtors(
+ cast<AbstractConditionalOperator>(E), BindToTemporary);
+
+ case Stmt::ImplicitCastExprClass:
+ // For implicit cast we want BindToTemporary to be passed further.
+ E = cast<CastExpr>(E)->getSubExpr();
+ goto tryAgain;
+
+ case Stmt::ParenExprClass:
+ E = cast<ParenExpr>(E)->getSubExpr();
+ goto tryAgain;
+
+ case Stmt::MaterializeTemporaryExprClass:
+ E = cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr();
+ goto tryAgain;
+ }
+}
+
+CFGBlock *CFGBuilder::VisitChildrenForTemporaryDtors(Stmt *E) {
+ // When visiting children for destructors we want to visit them in reverse
+ // order. Because there's no reverse iterator for children must to reverse
+ // them in helper vector.
+ typedef SmallVector<Stmt *, 4> ChildrenVect;
+ ChildrenVect ChildrenRev;
+ for (Stmt::child_range I = E->children(); I; ++I) {
+ if (*I) ChildrenRev.push_back(*I);
+ }
+
+ CFGBlock *B = Block;
+ for (ChildrenVect::reverse_iterator I = ChildrenRev.rbegin(),
+ L = ChildrenRev.rend(); I != L; ++I) {
+ if (CFGBlock *R = VisitForTemporaryDtors(*I))
+ B = R;
+ }
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitBinaryOperatorForTemporaryDtors(BinaryOperator *E) {
+ if (E->isLogicalOp()) {
+ // Destructors for temporaries in LHS expression should be called after
+ // those for RHS expression. Even if this will unnecessarily create a block,
+ // this block will be used at least by the full expression.
+ autoCreateBlock();
+ CFGBlock *ConfluenceBlock = VisitForTemporaryDtors(E->getLHS());
+ if (badCFG)
+ return NULL;
+
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+
+ if (RHSBlock) {
+ if (badCFG)
+ return NULL;
+
+ // If RHS expression did produce destructors we need to connect created
+ // blocks to CFG in same manner as for binary operator itself.
+ CFGBlock *LHSBlock = createBlock(false);
+ LHSBlock->setTerminator(CFGTerminator(E, true));
+
+ // For binary operator LHS block is before RHS in list of predecessors
+ // of ConfluenceBlock.
+ std::reverse(ConfluenceBlock->pred_begin(),
+ ConfluenceBlock->pred_end());
+
+ // See if this is a known constant.
+ TryResult KnownVal = tryEvaluateBool(E->getLHS());
+ if (KnownVal.isKnown() && (E->getOpcode() == BO_LOr))
+ KnownVal.negate();
+
+ // Link LHSBlock with RHSBlock exactly the same way as for binary operator
+ // itself.
+ if (E->getOpcode() == BO_LOr) {
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ } else {
+ assert (E->getOpcode() == BO_LAnd);
+ addSuccessor(LHSBlock, KnownVal.isFalse() ? NULL : RHSBlock);
+ addSuccessor(LHSBlock, KnownVal.isTrue() ? NULL : ConfluenceBlock);
+ }
+
+ Block = LHSBlock;
+ return LHSBlock;
+ }
+
+ Block = ConfluenceBlock;
+ return ConfluenceBlock;
+ }
+
+ if (E->isAssignmentOp()) {
+ // For assignment operator (=) LHS expression is visited
+ // before RHS expression. For destructors visit them in reverse order.
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+ CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS());
+ return LHSBlock ? LHSBlock : RHSBlock;
+ }
+
+ // For any other binary operator RHS expression is visited before
+ // LHS expression (order of children). For destructors visit them in reverse
+ // order.
+ CFGBlock *LHSBlock = VisitForTemporaryDtors(E->getLHS());
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getRHS());
+ return RHSBlock ? RHSBlock : LHSBlock;
+}
+
+CFGBlock *CFGBuilder::VisitCXXBindTemporaryExprForTemporaryDtors(
+ CXXBindTemporaryExpr *E, bool BindToTemporary) {
+ // First add destructors for temporaries in subexpression.
+ CFGBlock *B = VisitForTemporaryDtors(E->getSubExpr());
+ if (!BindToTemporary) {
+ // If lifetime of temporary is not prolonged (by assigning to constant
+ // reference) add destructor for it.
+
+ // If the destructor is marked as a no-return destructor, we need to create
+ // a new block for the destructor which does not have as a successor
+ // anything built thus far. Control won't flow out of this block.
+ const CXXDestructorDecl *Dtor = E->getTemporary()->getDestructor();
+ if (cast<FunctionType>(Dtor->getType())->getNoReturnAttr())
+ Block = createNoReturnBlock();
+ else
+ autoCreateBlock();
+
+ appendTemporaryDtor(Block, E);
+ B = Block;
+ }
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitConditionalOperatorForTemporaryDtors(
+ AbstractConditionalOperator *E, bool BindToTemporary) {
+ // First add destructors for condition expression. Even if this will
+ // unnecessarily create a block, this block will be used at least by the full
+ // expression.
+ autoCreateBlock();
+ CFGBlock *ConfluenceBlock = VisitForTemporaryDtors(E->getCond());
+ if (badCFG)
+ return NULL;
+ if (BinaryConditionalOperator *BCO
+ = dyn_cast<BinaryConditionalOperator>(E)) {
+ ConfluenceBlock = VisitForTemporaryDtors(BCO->getCommon());
+ if (badCFG)
+ return NULL;
+ }
+
+ // Try to add block with destructors for LHS expression.
+ CFGBlock *LHSBlock = NULL;
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ LHSBlock = VisitForTemporaryDtors(E->getTrueExpr(), BindToTemporary);
+ if (badCFG)
+ return NULL;
+
+ // Try to add block with destructors for RHS expression;
+ Succ = ConfluenceBlock;
+ Block = NULL;
+ CFGBlock *RHSBlock = VisitForTemporaryDtors(E->getFalseExpr(),
+ BindToTemporary);
+ if (badCFG)
+ return NULL;
+
+ if (!RHSBlock && !LHSBlock) {
+ // If neither LHS nor RHS expression had temporaries to destroy don't create
+ // more blocks.
+ Block = ConfluenceBlock;
+ return Block;
+ }
+
+ Block = createBlock(false);
+ Block->setTerminator(CFGTerminator(E, true));
+
+ // See if this is a known constant.
+ const TryResult &KnownVal = tryEvaluateBool(E->getCond());
+
+ if (LHSBlock) {
+ addSuccessor(Block, KnownVal.isFalse() ? NULL : LHSBlock);
+ } else if (KnownVal.isFalse()) {
+ addSuccessor(Block, NULL);
+ } else {
+ addSuccessor(Block, ConfluenceBlock);
+ std::reverse(ConfluenceBlock->pred_begin(), ConfluenceBlock->pred_end());
+ }
+
+ if (!RHSBlock)
+ RHSBlock = ConfluenceBlock;
+ addSuccessor(Block, KnownVal.isTrue() ? NULL : RHSBlock);
+
+ return Block;
+}
+
+} // end anonymous namespace
+
+/// createBlock - Constructs and adds a new CFGBlock to the CFG. The block has
+/// no successors or predecessors. If this is the first block created in the
+/// CFG, it is automatically set to be the Entry and Exit of the CFG.
+CFGBlock *CFG::createBlock() {
+ bool first_block = begin() == end();
+
+ // Create the block.
+ CFGBlock *Mem = getAllocator().Allocate<CFGBlock>();
+ new (Mem) CFGBlock(NumBlockIDs++, BlkBVC, this);
+ Blocks.push_back(Mem, BlkBVC);
+
+ // If this is the first block, set it as the Entry and Exit.
+ if (first_block)
+ Entry = Exit = &back();
+
+ // Return the block.
+ return &back();
+}
+
+/// buildCFG - Constructs a CFG from an AST. Ownership of the returned
+/// CFG is returned to the caller.
+CFG* CFG::buildCFG(const Decl *D, Stmt *Statement, ASTContext *C,
+ const BuildOptions &BO) {
+ CFGBuilder Builder(C, BO);
+ return Builder.buildCFG(D, Statement);
+}
+
+const CXXDestructorDecl *
+CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
+ switch (getKind()) {
+ case CFGElement::Invalid:
+ case CFGElement::Statement:
+ case CFGElement::Initializer:
+ llvm_unreachable("getDestructorDecl should only be used with "
+ "ImplicitDtors");
+ case CFGElement::AutomaticObjectDtor: {
+ const VarDecl *var = cast<CFGAutomaticObjDtor>(this)->getVarDecl();
+ QualType ty = var->getType();
+ ty = ty.getNonReferenceType();
+ while (const ArrayType *arrayType = astContext.getAsArrayType(ty)) {
+ ty = arrayType->getElementType();
+ }
+ const RecordType *recordType = ty->getAs<RecordType>();
+ const CXXRecordDecl *classDecl =
+ cast<CXXRecordDecl>(recordType->getDecl());
+ return classDecl->getDestructor();
+ }
+ case CFGElement::TemporaryDtor: {
+ const CXXBindTemporaryExpr *bindExpr =
+ cast<CFGTemporaryDtor>(this)->getBindTemporaryExpr();
+ const CXXTemporary *temp = bindExpr->getTemporary();
+ return temp->getDestructor();
+ }
+ case CFGElement::BaseDtor:
+ case CFGElement::MemberDtor:
+
+ // Not yet supported.
+ return 0;
+ }
+ llvm_unreachable("getKind() returned bogus value");
+}
+
+bool CFGImplicitDtor::isNoReturn(ASTContext &astContext) const {
+ if (const CXXDestructorDecl *cdecl = getDestructorDecl(astContext)) {
+ QualType ty = cdecl->getType();
+ return cast<FunctionType>(ty)->getNoReturnAttr();
+ }
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// CFG: Queries for BlkExprs.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ typedef llvm::DenseMap<const Stmt*,unsigned> BlkExprMapTy;
+}
+
+static void FindSubExprAssignments(const Stmt *S,
+ llvm::SmallPtrSet<const Expr*,50>& Set) {
+ if (!S)
+ return;
+
+ for (Stmt::const_child_range I = S->children(); I; ++I) {
+ const Stmt *child = *I;
+ if (!child)
+ continue;
+
+ if (const BinaryOperator* B = dyn_cast<BinaryOperator>(child))
+ if (B->isAssignmentOp()) Set.insert(B);
+
+ FindSubExprAssignments(child, Set);
+ }
+}
+
+static BlkExprMapTy* PopulateBlkExprMap(CFG& cfg) {
+ BlkExprMapTy* M = new BlkExprMapTy();
+
+ // Look for assignments that are used as subexpressions. These are the only
+ // assignments that we want to *possibly* register as a block-level
+ // expression. Basically, if an assignment occurs both in a subexpression and
+ // at the block-level, it is a block-level expression.
+ llvm::SmallPtrSet<const Expr*,50> SubExprAssignments;
+
+ for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I)
+ for (CFGBlock::iterator BI=(*I)->begin(), EI=(*I)->end(); BI != EI; ++BI)
+ if (const CFGStmt *S = BI->getAs<CFGStmt>())
+ FindSubExprAssignments(S->getStmt(), SubExprAssignments);
+
+ for (CFG::iterator I=cfg.begin(), E=cfg.end(); I != E; ++I) {
+
+ // Iterate over the statements again on identify the Expr* and Stmt* at the
+ // block-level that are block-level expressions.
+
+ for (CFGBlock::iterator BI=(*I)->begin(), EI=(*I)->end(); BI != EI; ++BI) {
+ const CFGStmt *CS = BI->getAs<CFGStmt>();
+ if (!CS)
+ continue;
+ if (const Expr *Exp = dyn_cast<Expr>(CS->getStmt())) {
+ assert((Exp->IgnoreParens() == Exp) && "No parens on block-level exps");
+
+ if (const BinaryOperator* B = dyn_cast<BinaryOperator>(Exp)) {
+ // Assignment expressions that are not nested within another
+ // expression are really "statements" whose value is never used by
+ // another expression.
+ if (B->isAssignmentOp() && !SubExprAssignments.count(Exp))
+ continue;
+ } else if (const StmtExpr *SE = dyn_cast<StmtExpr>(Exp)) {
+ // Special handling for statement expressions. The last statement in
+ // the statement expression is also a block-level expr.
+ const CompoundStmt *C = SE->getSubStmt();
+ if (!C->body_empty()) {
+ const Stmt *Last = C->body_back();
+ if (const Expr *LastEx = dyn_cast<Expr>(Last))
+ Last = LastEx->IgnoreParens();
+ unsigned x = M->size();
+ (*M)[Last] = x;
+ }
+ }
+
+ unsigned x = M->size();
+ (*M)[Exp] = x;
+ }
+ }
+
+ // Look at terminators. The condition is a block-level expression.
+
+ Stmt *S = (*I)->getTerminatorCondition();
+
+ if (S && M->find(S) == M->end()) {
+ unsigned x = M->size();
+ (*M)[S] = x;
+ }
+ }
+
+ return M;
+}
+
+CFG::BlkExprNumTy CFG::getBlkExprNum(const Stmt *S) {
+ assert(S != NULL);
+ if (!BlkExprMap) { BlkExprMap = (void*) PopulateBlkExprMap(*this); }
+
+ BlkExprMapTy* M = reinterpret_cast<BlkExprMapTy*>(BlkExprMap);
+ BlkExprMapTy::iterator I = M->find(S);
+ return (I == M->end()) ? CFG::BlkExprNumTy() : CFG::BlkExprNumTy(I->second);
+}
+
+unsigned CFG::getNumBlkExprs() {
+ if (const BlkExprMapTy* M = reinterpret_cast<const BlkExprMapTy*>(BlkExprMap))
+ return M->size();
+
+ // We assume callers interested in the number of BlkExprs will want
+ // the map constructed if it doesn't already exist.
+ BlkExprMap = (void*) PopulateBlkExprMap(*this);
+ return reinterpret_cast<BlkExprMapTy*>(BlkExprMap)->size();
+}
+
+//===----------------------------------------------------------------------===//
+// Filtered walking of the CFG.
+//===----------------------------------------------------------------------===//
+
+bool CFGBlock::FilterEdge(const CFGBlock::FilterOptions &F,
+ const CFGBlock *From, const CFGBlock *To) {
+
+ if (To && F.IgnoreDefaultsWithCoveredEnums) {
+ // If the 'To' has no label or is labeled but the label isn't a
+ // CaseStmt then filter this edge.
+ if (const SwitchStmt *S =
+ dyn_cast_or_null<SwitchStmt>(From->getTerminator().getStmt())) {
+ if (S->isAllEnumCasesCovered()) {
+ const Stmt *L = To->getLabel();
+ if (!L || !isa<CaseStmt>(L))
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Cleanup: CFG dstor.
+//===----------------------------------------------------------------------===//
+
+CFG::~CFG() {
+ delete reinterpret_cast<const BlkExprMapTy*>(BlkExprMap);
+}
+
+//===----------------------------------------------------------------------===//
+// CFG pretty printing
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class StmtPrinterHelper : public PrinterHelper {
+ typedef llvm::DenseMap<const Stmt*,std::pair<unsigned,unsigned> > StmtMapTy;
+ typedef llvm::DenseMap<const Decl*,std::pair<unsigned,unsigned> > DeclMapTy;
+ StmtMapTy StmtMap;
+ DeclMapTy DeclMap;
+ signed currentBlock;
+ unsigned currentStmt;
+ const LangOptions &LangOpts;
+public:
+
+ StmtPrinterHelper(const CFG* cfg, const LangOptions &LO)
+ : currentBlock(0), currentStmt(0), LangOpts(LO)
+ {
+ for (CFG::const_iterator I = cfg->begin(), E = cfg->end(); I != E; ++I ) {
+ unsigned j = 1;
+ for (CFGBlock::const_iterator BI = (*I)->begin(), BEnd = (*I)->end() ;
+ BI != BEnd; ++BI, ++j ) {
+ if (const CFGStmt *SE = BI->getAs<CFGStmt>()) {
+ const Stmt *stmt= SE->getStmt();
+ std::pair<unsigned, unsigned> P((*I)->getBlockID(), j);
+ StmtMap[stmt] = P;
+
+ switch (stmt->getStmtClass()) {
+ case Stmt::DeclStmtClass:
+ DeclMap[cast<DeclStmt>(stmt)->getSingleDecl()] = P;
+ break;
+ case Stmt::IfStmtClass: {
+ const VarDecl *var = cast<IfStmt>(stmt)->getConditionVariable();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ case Stmt::ForStmtClass: {
+ const VarDecl *var = cast<ForStmt>(stmt)->getConditionVariable();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ case Stmt::WhileStmtClass: {
+ const VarDecl *var =
+ cast<WhileStmt>(stmt)->getConditionVariable();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ case Stmt::SwitchStmtClass: {
+ const VarDecl *var =
+ cast<SwitchStmt>(stmt)->getConditionVariable();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ case Stmt::CXXCatchStmtClass: {
+ const VarDecl *var =
+ cast<CXXCatchStmt>(stmt)->getExceptionDecl();
+ if (var)
+ DeclMap[var] = P;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ }
+ }
+ }
+
+
+ virtual ~StmtPrinterHelper() {}
+
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ void setBlockID(signed i) { currentBlock = i; }
+ void setStmtID(unsigned i) { currentStmt = i; }
+
+ virtual bool handledStmt(Stmt *S, raw_ostream &OS) {
+ StmtMapTy::iterator I = StmtMap.find(S);
+
+ if (I == StmtMap.end())
+ return false;
+
+ if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
+ && I->second.second == currentStmt) {
+ return false;
+ }
+
+ OS << "[B" << I->second.first << "." << I->second.second << "]";
+ return true;
+ }
+
+ bool handleDecl(const Decl *D, raw_ostream &OS) {
+ DeclMapTy::iterator I = DeclMap.find(D);
+
+ if (I == DeclMap.end())
+ return false;
+
+ if (currentBlock >= 0 && I->second.first == (unsigned) currentBlock
+ && I->second.second == currentStmt) {
+ return false;
+ }
+
+ OS << "[B" << I->second.first << "." << I->second.second << "]";
+ return true;
+ }
+};
+} // end anonymous namespace
+
+
+namespace {
+class CFGBlockTerminatorPrint
+ : public StmtVisitor<CFGBlockTerminatorPrint,void> {
+
+ raw_ostream &OS;
+ StmtPrinterHelper* Helper;
+ PrintingPolicy Policy;
+public:
+ CFGBlockTerminatorPrint(raw_ostream &os, StmtPrinterHelper* helper,
+ const PrintingPolicy &Policy)
+ : OS(os), Helper(helper), Policy(Policy) {}
+
+ void VisitIfStmt(IfStmt *I) {
+ OS << "if ";
+ I->getCond()->printPretty(OS,Helper,Policy);
+ }
+
+ // Default case.
+ void VisitStmt(Stmt *Terminator) {
+ Terminator->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitForStmt(ForStmt *F) {
+ OS << "for (" ;
+ if (F->getInit())
+ OS << "...";
+ OS << "; ";
+ if (Stmt *C = F->getCond())
+ C->printPretty(OS, Helper, Policy);
+ OS << "; ";
+ if (F->getInc())
+ OS << "...";
+ OS << ")";
+ }
+
+ void VisitWhileStmt(WhileStmt *W) {
+ OS << "while " ;
+ if (Stmt *C = W->getCond())
+ C->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitDoStmt(DoStmt *D) {
+ OS << "do ... while ";
+ if (Stmt *C = D->getCond())
+ C->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitSwitchStmt(SwitchStmt *Terminator) {
+ OS << "switch ";
+ Terminator->getCond()->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitCXXTryStmt(CXXTryStmt *CS) {
+ OS << "try ...";
+ }
+
+ void VisitAbstractConditionalOperator(AbstractConditionalOperator* C) {
+ C->getCond()->printPretty(OS, Helper, Policy);
+ OS << " ? ... : ...";
+ }
+
+ void VisitChooseExpr(ChooseExpr *C) {
+ OS << "__builtin_choose_expr( ";
+ C->getCond()->printPretty(OS, Helper, Policy);
+ OS << " )";
+ }
+
+ void VisitIndirectGotoStmt(IndirectGotoStmt *I) {
+ OS << "goto *";
+ I->getTarget()->printPretty(OS, Helper, Policy);
+ }
+
+ void VisitBinaryOperator(BinaryOperator* B) {
+ if (!B->isLogicalOp()) {
+ VisitExpr(B);
+ return;
+ }
+
+ B->getLHS()->printPretty(OS, Helper, Policy);
+
+ switch (B->getOpcode()) {
+ case BO_LOr:
+ OS << " || ...";
+ return;
+ case BO_LAnd:
+ OS << " && ...";
+ return;
+ default:
+ llvm_unreachable("Invalid logical operator.");
+ }
+ }
+
+ void VisitExpr(Expr *E) {
+ E->printPretty(OS, Helper, Policy);
+ }
+};
+} // end anonymous namespace
+
+static void print_elem(raw_ostream &OS, StmtPrinterHelper* Helper,
+ const CFGElement &E) {
+ if (const CFGStmt *CS = E.getAs<CFGStmt>()) {
+ const Stmt *S = CS->getStmt();
+
+ if (Helper) {
+
+ // special printing for statement-expressions.
+ if (const StmtExpr *SE = dyn_cast<StmtExpr>(S)) {
+ const CompoundStmt *Sub = SE->getSubStmt();
+
+ if (Sub->children()) {
+ OS << "({ ... ; ";
+ Helper->handledStmt(*SE->getSubStmt()->body_rbegin(),OS);
+ OS << " })\n";
+ return;
+ }
+ }
+ // special printing for comma expressions.
+ if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (B->getOpcode() == BO_Comma) {
+ OS << "... , ";
+ Helper->handledStmt(B->getRHS(),OS);
+ OS << '\n';
+ return;
+ }
+ }
+ }
+ S->printPretty(OS, Helper, PrintingPolicy(Helper->getLangOpts()));
+
+ if (isa<CXXOperatorCallExpr>(S)) {
+ OS << " (OperatorCall)";
+ }
+ else if (isa<CXXBindTemporaryExpr>(S)) {
+ OS << " (BindTemporary)";
+ }
+ else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
+ OS << " (CXXConstructExpr, " << CCE->getType().getAsString() << ")";
+ }
+ else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
+ OS << " (" << CE->getStmtClassName() << ", "
+ << CE->getCastKindName()
+ << ", " << CE->getType().getAsString()
+ << ")";
+ }
+
+ // Expressions need a newline.
+ if (isa<Expr>(S))
+ OS << '\n';
+
+ } else if (const CFGInitializer *IE = E.getAs<CFGInitializer>()) {
+ const CXXCtorInitializer *I = IE->getInitializer();
+ if (I->isBaseInitializer())
+ OS << I->getBaseClass()->getAsCXXRecordDecl()->getName();
+ else OS << I->getAnyMember()->getName();
+
+ OS << "(";
+ if (Expr *IE = I->getInit())
+ IE->printPretty(OS, Helper, PrintingPolicy(Helper->getLangOpts()));
+ OS << ")";
+
+ if (I->isBaseInitializer())
+ OS << " (Base initializer)\n";
+ else OS << " (Member initializer)\n";
+
+ } else if (const CFGAutomaticObjDtor *DE = E.getAs<CFGAutomaticObjDtor>()){
+ const VarDecl *VD = DE->getVarDecl();
+ Helper->handleDecl(VD, OS);
+
+ const Type* T = VD->getType().getTypePtr();
+ if (const ReferenceType* RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType().getTypePtr();
+ else if (const Type *ET = T->getArrayElementTypeNoTypeQual())
+ T = ET;
+
+ OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
+ OS << " (Implicit destructor)\n";
+
+ } else if (const CFGBaseDtor *BE = E.getAs<CFGBaseDtor>()) {
+ const CXXBaseSpecifier *BS = BE->getBaseSpecifier();
+ OS << "~" << BS->getType()->getAsCXXRecordDecl()->getName() << "()";
+ OS << " (Base object destructor)\n";
+
+ } else if (const CFGMemberDtor *ME = E.getAs<CFGMemberDtor>()) {
+ const FieldDecl *FD = ME->getFieldDecl();
+
+ const Type *T = FD->getType().getTypePtr();
+ if (const Type *ET = T->getArrayElementTypeNoTypeQual())
+ T = ET;
+
+ OS << "this->" << FD->getName();
+ OS << ".~" << T->getAsCXXRecordDecl()->getName() << "()";
+ OS << " (Member object destructor)\n";
+
+ } else if (const CFGTemporaryDtor *TE = E.getAs<CFGTemporaryDtor>()) {
+ const CXXBindTemporaryExpr *BT = TE->getBindTemporaryExpr();
+ OS << "~" << BT->getType()->getAsCXXRecordDecl()->getName() << "()";
+ OS << " (Temporary object destructor)\n";
+ }
+}
+
+static void print_block(raw_ostream &OS, const CFG* cfg,
+ const CFGBlock &B,
+ StmtPrinterHelper* Helper, bool print_edges,
+ bool ShowColors) {
+
+ if (Helper)
+ Helper->setBlockID(B.getBlockID());
+
+ // Print the header.
+ if (ShowColors)
+ OS.changeColor(raw_ostream::YELLOW, true);
+
+ OS << "\n [B" << B.getBlockID();
+
+ if (&B == &cfg->getEntry())
+ OS << " (ENTRY)]\n";
+ else if (&B == &cfg->getExit())
+ OS << " (EXIT)]\n";
+ else if (&B == cfg->getIndirectGotoBlock())
+ OS << " (INDIRECT GOTO DISPATCH)]\n";
+ else
+ OS << "]\n";
+
+ if (ShowColors)
+ OS.resetColor();
+
+ // Print the label of this block.
+ if (Stmt *Label = const_cast<Stmt*>(B.getLabel())) {
+
+ if (print_edges)
+ OS << " ";
+
+ if (LabelStmt *L = dyn_cast<LabelStmt>(Label))
+ OS << L->getName();
+ else if (CaseStmt *C = dyn_cast<CaseStmt>(Label)) {
+ OS << "case ";
+ C->getLHS()->printPretty(OS, Helper,
+ PrintingPolicy(Helper->getLangOpts()));
+ if (C->getRHS()) {
+ OS << " ... ";
+ C->getRHS()->printPretty(OS, Helper,
+ PrintingPolicy(Helper->getLangOpts()));
+ }
+ } else if (isa<DefaultStmt>(Label))
+ OS << "default";
+ else if (CXXCatchStmt *CS = dyn_cast<CXXCatchStmt>(Label)) {
+ OS << "catch (";
+ if (CS->getExceptionDecl())
+ CS->getExceptionDecl()->print(OS, PrintingPolicy(Helper->getLangOpts()),
+ 0);
+ else
+ OS << "...";
+ OS << ")";
+
+ } else
+ llvm_unreachable("Invalid label statement in CFGBlock.");
+
+ OS << ":\n";
+ }
+
+ // Iterate through the statements in the block and print them.
+ unsigned j = 1;
+
+ for (CFGBlock::const_iterator I = B.begin(), E = B.end() ;
+ I != E ; ++I, ++j ) {
+
+ // Print the statement # in the basic block and the statement itself.
+ if (print_edges)
+ OS << " ";
+
+ OS << llvm::format("%3d", j) << ": ";
+
+ if (Helper)
+ Helper->setStmtID(j);
+
+ print_elem(OS, Helper, *I);
+ }
+
+ // Print the terminator of this block.
+ if (B.getTerminator()) {
+ if (ShowColors)
+ OS.changeColor(raw_ostream::GREEN);
+
+ OS << " T: ";
+
+ if (Helper) Helper->setBlockID(-1);
+
+ CFGBlockTerminatorPrint TPrinter(OS, Helper,
+ PrintingPolicy(Helper->getLangOpts()));
+ TPrinter.Visit(const_cast<Stmt*>(B.getTerminator().getStmt()));
+ OS << '\n';
+
+ if (ShowColors)
+ OS.resetColor();
+ }
+
+ if (print_edges) {
+ // Print the predecessors of this block.
+ if (!B.pred_empty()) {
+ const raw_ostream::Colors Color = raw_ostream::BLUE;
+ if (ShowColors)
+ OS.changeColor(Color);
+ OS << " Preds " ;
+ if (ShowColors)
+ OS.resetColor();
+ OS << '(' << B.pred_size() << "):";
+ unsigned i = 0;
+
+ if (ShowColors)
+ OS.changeColor(Color);
+
+ for (CFGBlock::const_pred_iterator I = B.pred_begin(), E = B.pred_end();
+ I != E; ++I, ++i) {
+
+ if (i == 8 || (i-8) == 0)
+ OS << "\n ";
+
+ OS << " B" << (*I)->getBlockID();
+ }
+
+ if (ShowColors)
+ OS.resetColor();
+
+ OS << '\n';
+ }
+
+ // Print the successors of this block.
+ if (!B.succ_empty()) {
+ const raw_ostream::Colors Color = raw_ostream::MAGENTA;
+ if (ShowColors)
+ OS.changeColor(Color);
+ OS << " Succs ";
+ if (ShowColors)
+ OS.resetColor();
+ OS << '(' << B.succ_size() << "):";
+ unsigned i = 0;
+
+ if (ShowColors)
+ OS.changeColor(Color);
+
+ for (CFGBlock::const_succ_iterator I = B.succ_begin(), E = B.succ_end();
+ I != E; ++I, ++i) {
+
+ if (i == 8 || (i-8) % 10 == 0)
+ OS << "\n ";
+
+ if (*I)
+ OS << " B" << (*I)->getBlockID();
+ else
+ OS << " NULL";
+ }
+
+ if (ShowColors)
+ OS.resetColor();
+ OS << '\n';
+ }
+ }
+}
+
+
+/// dump - A simple pretty printer of a CFG that outputs to stderr.
+void CFG::dump(const LangOptions &LO, bool ShowColors) const {
+ print(llvm::errs(), LO, ShowColors);
+}
+
+/// print - A simple pretty printer of a CFG that outputs to an ostream.
+void CFG::print(raw_ostream &OS, const LangOptions &LO, bool ShowColors) const {
+ StmtPrinterHelper Helper(this, LO);
+
+ // Print the entry block.
+ print_block(OS, this, getEntry(), &Helper, true, ShowColors);
+
+ // Iterate through the CFGBlocks and print them one by one.
+ for (const_iterator I = Blocks.begin(), E = Blocks.end() ; I != E ; ++I) {
+ // Skip the entry block, because we already printed it.
+ if (&(**I) == &getEntry() || &(**I) == &getExit())
+ continue;
+
+ print_block(OS, this, **I, &Helper, true, ShowColors);
+ }
+
+ // Print the exit block.
+ print_block(OS, this, getExit(), &Helper, true, ShowColors);
+ OS << '\n';
+ OS.flush();
+}
+
+/// dump - A simply pretty printer of a CFGBlock that outputs to stderr.
+void CFGBlock::dump(const CFG* cfg, const LangOptions &LO,
+ bool ShowColors) const {
+ print(llvm::errs(), cfg, LO, ShowColors);
+}
+
+/// print - A simple pretty printer of a CFGBlock that outputs to an ostream.
+/// Generally this will only be called from CFG::print.
+void CFGBlock::print(raw_ostream &OS, const CFG* cfg,
+ const LangOptions &LO, bool ShowColors) const {
+ StmtPrinterHelper Helper(cfg, LO);
+ print_block(OS, cfg, *this, &Helper, true, ShowColors);
+ OS << '\n';
+}
+
+/// printTerminator - A simple pretty printer of the terminator of a CFGBlock.
+void CFGBlock::printTerminator(raw_ostream &OS,
+ const LangOptions &LO) const {
+ CFGBlockTerminatorPrint TPrinter(OS, NULL, PrintingPolicy(LO));
+ TPrinter.Visit(const_cast<Stmt*>(getTerminator().getStmt()));
+}
+
+Stmt *CFGBlock::getTerminatorCondition() {
+ Stmt *Terminator = this->Terminator;
+ if (!Terminator)
+ return NULL;
+
+ Expr *E = NULL;
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::ForStmtClass:
+ E = cast<ForStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::WhileStmtClass:
+ E = cast<WhileStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::DoStmtClass:
+ E = cast<DoStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::IfStmtClass:
+ E = cast<IfStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::ChooseExprClass:
+ E = cast<ChooseExpr>(Terminator)->getCond();
+ break;
+
+ case Stmt::IndirectGotoStmtClass:
+ E = cast<IndirectGotoStmt>(Terminator)->getTarget();
+ break;
+
+ case Stmt::SwitchStmtClass:
+ E = cast<SwitchStmt>(Terminator)->getCond();
+ break;
+
+ case Stmt::BinaryConditionalOperatorClass:
+ E = cast<BinaryConditionalOperator>(Terminator)->getCond();
+ break;
+
+ case Stmt::ConditionalOperatorClass:
+ E = cast<ConditionalOperator>(Terminator)->getCond();
+ break;
+
+ case Stmt::BinaryOperatorClass: // '&&' and '||'
+ E = cast<BinaryOperator>(Terminator)->getLHS();
+ break;
+
+ case Stmt::ObjCForCollectionStmtClass:
+ return Terminator;
+ }
+
+ return E ? E->IgnoreParens() : NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// CFG Graphviz Visualization
+//===----------------------------------------------------------------------===//
+
+
+#ifndef NDEBUG
+static StmtPrinterHelper* GraphHelper;
+#endif
+
+void CFG::viewCFG(const LangOptions &LO) const {
+#ifndef NDEBUG
+ StmtPrinterHelper H(this, LO);
+ GraphHelper = &H;
+ llvm::ViewGraph(this,"CFG");
+ GraphHelper = NULL;
+#endif
+}
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const CFGBlock *Node, const CFG* Graph) {
+
+#ifndef NDEBUG
+ std::string OutSStr;
+ llvm::raw_string_ostream Out(OutSStr);
+ print_block(Out,Graph, *Node, GraphHelper, false, false);
+ std::string& OutStr = Out.str();
+
+ if (OutStr[0] == '\n') OutStr.erase(OutStr.begin());
+
+ // Process string output to make it nicer...
+ for (unsigned i = 0; i != OutStr.length(); ++i)
+ if (OutStr[i] == '\n') { // Left justify
+ OutStr[i] = '\\';
+ OutStr.insert(OutStr.begin()+i+1, 'l');
+ }
+
+ return OutStr;
+#else
+ return "";
+#endif
+ }
+};
+} // end namespace llvm
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp
new file mode 100644
index 0000000..e77e72f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFGReachabilityAnalysis.cpp
@@ -0,0 +1,76 @@
+//==- CFGReachabilityAnalysis.cpp - Basic reachability analysis --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a flow-sensitive, (mostly) path-insensitive reachability
+// analysis based on Clang's CFGs. Clients can query if a given basic block
+// is reachable within the CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/SmallVector.h"
+#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/Analysis/CFG.h"
+
+using namespace clang;
+
+CFGReverseBlockReachabilityAnalysis::CFGReverseBlockReachabilityAnalysis(const CFG &cfg)
+ : analyzed(cfg.getNumBlockIDs(), false) {}
+
+bool CFGReverseBlockReachabilityAnalysis::isReachable(const CFGBlock *Src,
+ const CFGBlock *Dst) {
+
+ const unsigned DstBlockID = Dst->getBlockID();
+
+ // If we haven't analyzed the destination node, run the analysis now
+ if (!analyzed[DstBlockID]) {
+ mapReachability(Dst);
+ analyzed[DstBlockID] = true;
+ }
+
+ // Return the cached result
+ return reachable[DstBlockID][Src->getBlockID()];
+}
+
+// Maps reachability to a common node by walking the predecessors of the
+// destination node.
+void CFGReverseBlockReachabilityAnalysis::mapReachability(const CFGBlock *Dst) {
+ SmallVector<const CFGBlock *, 11> worklist;
+ llvm::BitVector visited(analyzed.size());
+
+ ReachableSet &DstReachability = reachable[Dst->getBlockID()];
+ DstReachability.resize(analyzed.size(), false);
+
+ // Start searching from the destination node, since we commonly will perform
+ // multiple queries relating to a destination node.
+ worklist.push_back(Dst);
+ bool firstRun = true;
+
+ while (!worklist.empty()) {
+ const CFGBlock *block = worklist.back();
+ worklist.pop_back();
+
+ if (visited[block->getBlockID()])
+ continue;
+ visited[block->getBlockID()] = true;
+
+ // Update reachability information for this node -> Dst
+ if (!firstRun) {
+ // Don't insert Dst -> Dst unless it was a predecessor of itself
+ DstReachability[block->getBlockID()] = true;
+ }
+ else
+ firstRun = false;
+
+ // Add the predecessors to the worklist.
+ for (CFGBlock::const_pred_iterator i = block->pred_begin(),
+ e = block->pred_end(); i != e; ++i) {
+ worklist.push_back(*i);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp b/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp
new file mode 100644
index 0000000..16df676
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CFGStmtMap.cpp
@@ -0,0 +1,91 @@
+//===--- CFGStmtMap.h - Map from Stmt* to CFGBlock* -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the CFGStmtMap class, which defines a mapping from
+// Stmt* to CFGBlock*
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CFGStmtMap.h"
+
+using namespace clang;
+
+typedef llvm::DenseMap<const Stmt*, CFGBlock*> SMap;
+static SMap *AsMap(void *m) { return (SMap*) m; }
+
+CFGStmtMap::~CFGStmtMap() { delete AsMap(M); }
+
+CFGBlock *CFGStmtMap::getBlock(Stmt *S) {
+ SMap *SM = AsMap(M);
+ Stmt *X = S;
+
+ // If 'S' isn't in the map, walk the ParentMap to see if one of its ancestors
+ // is in the map.
+ while (X) {
+ SMap::iterator I = SM->find(X);
+ if (I != SM->end()) {
+ CFGBlock *B = I->second;
+ // Memoize this lookup.
+ if (X != S)
+ (*SM)[X] = B;
+ return B;
+ }
+
+ X = PM->getParentIgnoreParens(X);
+ }
+
+ return 0;
+}
+
+static void Accumulate(SMap &SM, CFGBlock *B) {
+ // First walk the block-level expressions.
+ for (CFGBlock::iterator I = B->begin(), E = B->end(); I != E; ++I) {
+ const CFGElement &CE = *I;
+ const CFGStmt *CS = CE.getAs<CFGStmt>();
+ if (!CS)
+ continue;
+
+ CFGBlock *&Entry = SM[CS->getStmt()];
+ // If 'Entry' is already initialized (e.g., a terminator was already),
+ // skip.
+ if (Entry)
+ continue;
+
+ Entry = B;
+
+ }
+
+ // Look at the label of the block.
+ if (Stmt *Label = B->getLabel())
+ SM[Label] = B;
+
+ // Finally, look at the terminator. If the terminator was already added
+ // because it is a block-level expression in another block, overwrite
+ // that mapping.
+ if (Stmt *Term = B->getTerminator())
+ SM[Term] = B;
+}
+
+CFGStmtMap *CFGStmtMap::Build(CFG *C, ParentMap *PM) {
+ if (!C || !PM)
+ return 0;
+
+ SMap *SM = new SMap();
+
+ // Walk all blocks, accumulating the block-level expressions, labels,
+ // and terminators.
+ for (CFG::iterator I = C->begin(), E = C->end(); I != E; ++I)
+ Accumulate(*SM, *I);
+
+ return new CFGStmtMap(PM, SM);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
new file mode 100644
index 0000000..96a16c3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CallGraph.cpp
@@ -0,0 +1,184 @@
+//== CallGraph.cpp - AST-based Call graph ----------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AST-based CallGraph.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Analysis/CallGraph.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/StmtVisitor.h"
+
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+
+namespace {
+/// A helper class, which walks the AST and locates all the call sites in the
+/// given function body.
+class CGBuilder : public StmtVisitor<CGBuilder> {
+ CallGraph *G;
+ const Decl *FD;
+ CallGraphNode *CallerNode;
+
+public:
+ CGBuilder(CallGraph *g, const Decl *D, CallGraphNode *N)
+ : G(g), FD(D), CallerNode(N) {}
+
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ void VisitCallExpr(CallExpr *CE) {
+ // TODO: We need to handle ObjC method calls as well.
+ if (FunctionDecl *CalleeDecl = CE->getDirectCallee())
+ if (G->includeInGraph(CalleeDecl)) {
+ CallGraphNode *CalleeNode = G->getOrInsertNode(CalleeDecl);
+ CallerNode->addCallee(CalleeNode, G);
+ }
+ }
+
+ void VisitChildren(Stmt *S) {
+ for (Stmt::child_range I = S->children(); I; ++I)
+ if (*I)
+ static_cast<CGBuilder*>(this)->Visit(*I);
+ }
+};
+
+} // end anonymous namespace
+
+CallGraph::CallGraph() {
+ Root = getOrInsertNode(0);
+}
+
+CallGraph::~CallGraph() {
+ if (!FunctionMap.empty()) {
+ for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
+ I != E; ++I)
+ delete I->second;
+ FunctionMap.clear();
+ }
+}
+
+bool CallGraph::includeInGraph(const Decl *D) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (!FD->isThisDeclarationADefinition() ||
+ FD->isDependentContext())
+ return false;
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II && II->getName().startswith("__inline"))
+ return false;
+ }
+
+ if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
+ if (!ID->isThisDeclarationADefinition())
+ return false;
+ }
+
+ return true;
+}
+
+void CallGraph::addNodeForDecl(Decl* D, bool IsGlobal) {
+ assert(D);
+
+ // Do nothing if the node already exists.
+ if (FunctionMap.find(D) != FunctionMap.end())
+ return;
+
+ // Allocate a new node, mark it as root, and process it's calls.
+ CallGraphNode *Node = getOrInsertNode(D);
+ if (IsGlobal)
+ Root->addCallee(Node, this);
+
+ // Process all the calls by this function as well.
+ CGBuilder builder(this, D, Node);
+ if (Stmt *Body = D->getBody())
+ builder.Visit(Body);
+}
+
+CallGraphNode *CallGraph::getNode(const Decl *F) const {
+ FunctionMapTy::const_iterator I = FunctionMap.find(F);
+ if (I == FunctionMap.end()) return 0;
+ return I->second;
+}
+
+CallGraphNode *CallGraph::getOrInsertNode(Decl *F) {
+ CallGraphNode *&Node = FunctionMap[F];
+ if (Node)
+ return Node;
+
+ Node = new CallGraphNode(F);
+ // If not root, add to the parentless list.
+ if (F != 0)
+ ParentlessNodes.insert(Node);
+ return Node;
+}
+
+void CallGraph::print(raw_ostream &OS) const {
+ OS << " --- Call graph Dump --- \n";
+ for (const_iterator I = begin(), E = end(); I != E; ++I) {
+ OS << " Function: ";
+ if (I->second == Root)
+ OS << "< root >";
+ else
+ I->second->print(OS);
+ OS << " calls: ";
+ for (CallGraphNode::iterator CI = I->second->begin(),
+ CE = I->second->end(); CI != CE; ++CI) {
+ assert(*CI != Root && "No one can call the root node.");
+ (*CI)->print(OS);
+ OS << " ";
+ }
+ OS << '\n';
+ }
+ OS.flush();
+}
+
+void CallGraph::dump() const {
+ print(llvm::errs());
+}
+
+void CallGraph::viewGraph() const {
+ llvm::ViewGraph(this, "CallGraph");
+}
+
+StringRef CallGraphNode::getName() const {
+ if (const FunctionDecl *D = dyn_cast_or_null<FunctionDecl>(FD))
+ if (const IdentifierInfo *II = D->getIdentifier())
+ return II->getName();
+ return "< >";
+}
+
+void CallGraphNode::print(raw_ostream &os) const {
+ os << getName();
+}
+
+void CallGraphNode::dump() const {
+ print(llvm::errs());
+}
+
+namespace llvm {
+
+template <>
+struct DOTGraphTraits<const CallGraph*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const CallGraphNode *Node,
+ const CallGraph *CG) {
+ if (CG->getRoot() == Node) {
+ return "< root >";
+ }
+ return Node->getName();
+ }
+
+};
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
new file mode 100644
index 0000000..7e9e38f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/CocoaConventions.cpp
@@ -0,0 +1,138 @@
+//===- CocoaConventions.h - Special handling of Cocoa conventions -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements cocoa naming convention analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace ento;
+
+bool cocoa::isRefType(QualType RetTy, StringRef Prefix,
+ StringRef Name) {
+ // Recursively walk the typedef stack, allowing typedefs of reference types.
+ while (const TypedefType *TD = dyn_cast<TypedefType>(RetTy.getTypePtr())) {
+ StringRef TDName = TD->getDecl()->getIdentifier()->getName();
+ if (TDName.startswith(Prefix) && TDName.endswith("Ref"))
+ return true;
+ // XPC unfortunately uses CF-style function names, but aren't CF types.
+ if (TDName.startswith("xpc_"))
+ return false;
+ RetTy = TD->getDecl()->getUnderlyingType();
+ }
+
+ if (Name.empty())
+ return false;
+
+ // Is the type void*?
+ const PointerType* PT = RetTy->getAs<PointerType>();
+ if (!(PT->getPointeeType().getUnqualifiedType()->isVoidType()))
+ return false;
+
+ // Does the name start with the prefix?
+ return Name.startswith(Prefix);
+}
+
+bool coreFoundation::isCFObjectRef(QualType T) {
+ return cocoa::isRefType(T, "CF") || // Core Foundation.
+ cocoa::isRefType(T, "CG") || // Core Graphics.
+ cocoa::isRefType(T, "DADisk") || // Disk Arbitration API.
+ cocoa::isRefType(T, "DADissenter") ||
+ cocoa::isRefType(T, "DASessionRef");
+}
+
+
+bool cocoa::isCocoaObjectRef(QualType Ty) {
+ if (!Ty->isObjCObjectPointerType())
+ return false;
+
+ const ObjCObjectPointerType *PT = Ty->getAs<ObjCObjectPointerType>();
+
+ // Can be true for objects with the 'NSObject' attribute.
+ if (!PT)
+ return true;
+
+ // We assume that id<..>, id, Class, and Class<..> all represent tracked
+ // objects.
+ if (PT->isObjCIdType() || PT->isObjCQualifiedIdType() ||
+ PT->isObjCClassType() || PT->isObjCQualifiedClassType())
+ return true;
+
+ // Does the interface subclass NSObject?
+ // FIXME: We can memoize here if this gets too expensive.
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+ // Assume that anything declared with a forward declaration and no
+ // @interface subclasses NSObject.
+ if (!ID->hasDefinition())
+ return true;
+
+ for ( ; ID ; ID = ID->getSuperClass())
+ if (ID->getIdentifier()->getName() == "NSObject")
+ return true;
+
+ return false;
+}
+
+bool coreFoundation::followsCreateRule(const FunctionDecl *fn) {
+ // For now, *just* base this on the function name, not on anything else.
+
+ const IdentifierInfo *ident = fn->getIdentifier();
+ if (!ident) return false;
+ StringRef functionName = ident->getName();
+
+ StringRef::iterator it = functionName.begin();
+ StringRef::iterator start = it;
+ StringRef::iterator endI = functionName.end();
+
+ while (true) {
+ // Scan for the start of 'create' or 'copy'.
+ for ( ; it != endI ; ++it) {
+ // Search for the first character. It can either be 'C' or 'c'.
+ char ch = *it;
+ if (ch == 'C' || ch == 'c') {
+ // Make sure this isn't something like 'recreate' or 'Scopy'.
+ if (ch == 'c' && it != start && isalpha(*(it - 1)))
+ continue;
+
+ ++it;
+ break;
+ }
+ }
+
+ // Did we hit the end of the string? If so, we didn't find a match.
+ if (it == endI)
+ return false;
+
+ // Scan for *lowercase* 'reate' or 'opy', followed by no lowercase
+ // character.
+ StringRef suffix = functionName.substr(it - start);
+ if (suffix.startswith("reate")) {
+ it += 5;
+ }
+ else if (suffix.startswith("opy")) {
+ it += 3;
+ } else {
+ // Keep scanning.
+ continue;
+ }
+
+ if (it == endI || !islower(*it))
+ return true;
+
+ // If we matched a lowercase character, it isn't the end of the
+ // word. Keep scanning.
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp b/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp
new file mode 100644
index 0000000..0e02c6d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/Dominators.cpp
@@ -0,0 +1,14 @@
+//=- Dominators.cpp - Implementation of dominators tree for Clang CFG C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/Dominators.h"
+
+using namespace clang;
+
+void DominatorTree::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
new file mode 100644
index 0000000..51fac49
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatString.cpp
@@ -0,0 +1,688 @@
+// FormatString.cpp - Common stuff for handling printf/scanf formats -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Shared details for processing format strings of printf and scanf
+// (and friends).
+//
+//===----------------------------------------------------------------------===//
+
+#include "FormatStringParsing.h"
+#include "clang/Basic/LangOptions.h"
+
+using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::FormatStringHandler;
+using clang::analyze_format_string::FormatSpecifier;
+using clang::analyze_format_string::LengthModifier;
+using clang::analyze_format_string::OptionalAmount;
+using clang::analyze_format_string::PositionContext;
+using clang::analyze_format_string::ConversionSpecifier;
+using namespace clang;
+
+// Key function to FormatStringHandler.
+FormatStringHandler::~FormatStringHandler() {}
+
+//===----------------------------------------------------------------------===//
+// Functions for parsing format strings components in both printf and
+// scanf format strings.
+//===----------------------------------------------------------------------===//
+
+OptionalAmount
+clang::analyze_format_string::ParseAmount(const char *&Beg, const char *E) {
+ const char *I = Beg;
+ UpdateOnReturn <const char*> UpdateBeg(Beg, I);
+
+ unsigned accumulator = 0;
+ bool hasDigits = false;
+
+ for ( ; I != E; ++I) {
+ char c = *I;
+ if (c >= '0' && c <= '9') {
+ hasDigits = true;
+ accumulator = (accumulator * 10) + (c - '0');
+ continue;
+ }
+
+ if (hasDigits)
+ return OptionalAmount(OptionalAmount::Constant, accumulator, Beg, I - Beg,
+ false);
+
+ break;
+ }
+
+ return OptionalAmount();
+}
+
+OptionalAmount
+clang::analyze_format_string::ParseNonPositionAmount(const char *&Beg,
+ const char *E,
+ unsigned &argIndex) {
+ if (*Beg == '*') {
+ ++Beg;
+ return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg, 0, false);
+ }
+
+ return ParseAmount(Beg, E);
+}
+
+OptionalAmount
+clang::analyze_format_string::ParsePositionAmount(FormatStringHandler &H,
+ const char *Start,
+ const char *&Beg,
+ const char *E,
+ PositionContext p) {
+ if (*Beg == '*') {
+ const char *I = Beg + 1;
+ const OptionalAmount &Amt = ParseAmount(I, E);
+
+ if (Amt.getHowSpecified() == OptionalAmount::NotSpecified) {
+ H.HandleInvalidPosition(Beg, I - Beg, p);
+ return OptionalAmount(false);
+ }
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return OptionalAmount(false);
+ }
+
+ assert(Amt.getHowSpecified() == OptionalAmount::Constant);
+
+ if (*I == '$') {
+ // Handle positional arguments
+
+ // Special case: '*0$', since this is an easy mistake.
+ if (Amt.getConstantAmount() == 0) {
+ H.HandleZeroPosition(Beg, I - Beg + 1);
+ return OptionalAmount(false);
+ }
+
+ const char *Tmp = Beg;
+ Beg = ++I;
+
+ return OptionalAmount(OptionalAmount::Arg, Amt.getConstantAmount() - 1,
+ Tmp, 0, true);
+ }
+
+ H.HandleInvalidPosition(Beg, I - Beg, p);
+ return OptionalAmount(false);
+ }
+
+ return ParseAmount(Beg, E);
+}
+
+
+bool
+clang::analyze_format_string::ParseFieldWidth(FormatStringHandler &H,
+ FormatSpecifier &CS,
+ const char *Start,
+ const char *&Beg, const char *E,
+ unsigned *argIndex) {
+ // FIXME: Support negative field widths.
+ if (argIndex) {
+ CS.setFieldWidth(ParseNonPositionAmount(Beg, E, *argIndex));
+ }
+ else {
+ const OptionalAmount Amt =
+ ParsePositionAmount(H, Start, Beg, E,
+ analyze_format_string::FieldWidthPos);
+
+ if (Amt.isInvalid())
+ return true;
+ CS.setFieldWidth(Amt);
+ }
+ return false;
+}
+
+bool
+clang::analyze_format_string::ParseArgPosition(FormatStringHandler &H,
+ FormatSpecifier &FS,
+ const char *Start,
+ const char *&Beg,
+ const char *E) {
+ const char *I = Beg;
+
+ const OptionalAmount &Amt = ParseAmount(I, E);
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ if (Amt.getHowSpecified() == OptionalAmount::Constant && *(I++) == '$') {
+ // Warn that positional arguments are non-standard.
+ H.HandlePosition(Start, I - Start);
+
+ // Special case: '%0$', since this is an easy mistake.
+ if (Amt.getConstantAmount() == 0) {
+ H.HandleZeroPosition(Start, I - Start);
+ return true;
+ }
+
+ FS.setArgIndex(Amt.getConstantAmount() - 1);
+ FS.setUsesPositionalArg();
+ // Update the caller's pointer if we decided to consume
+ // these characters.
+ Beg = I;
+ return false;
+ }
+
+ return false;
+}
+
+bool
+clang::analyze_format_string::ParseLengthModifier(FormatSpecifier &FS,
+ const char *&I,
+ const char *E,
+ const LangOptions &LO,
+ bool IsScanf) {
+ LengthModifier::Kind lmKind = LengthModifier::None;
+ const char *lmPosition = I;
+ switch (*I) {
+ default:
+ return false;
+ case 'h':
+ ++I;
+ lmKind = (I != E && *I == 'h') ? (++I, LengthModifier::AsChar)
+ : LengthModifier::AsShort;
+ break;
+ case 'l':
+ ++I;
+ lmKind = (I != E && *I == 'l') ? (++I, LengthModifier::AsLongLong)
+ : LengthModifier::AsLong;
+ break;
+ case 'j': lmKind = LengthModifier::AsIntMax; ++I; break;
+ case 'z': lmKind = LengthModifier::AsSizeT; ++I; break;
+ case 't': lmKind = LengthModifier::AsPtrDiff; ++I; break;
+ case 'L': lmKind = LengthModifier::AsLongDouble; ++I; break;
+ case 'q': lmKind = LengthModifier::AsQuad; ++I; break;
+ case 'a':
+ if (IsScanf && !LO.C99 && !LO.CPlusPlus0x) {
+ // For scanf in C90, look at the next character to see if this should
+ // be parsed as the GNU extension 'a' length modifier. If not, this
+ // will be parsed as a conversion specifier.
+ ++I;
+ if (I != E && (*I == 's' || *I == 'S' || *I == '[')) {
+ lmKind = LengthModifier::AsAllocate;
+ break;
+ }
+ --I;
+ }
+ return false;
+ case 'm':
+ if (IsScanf) {
+ lmKind = LengthModifier::AsMAllocate;
+ ++I;
+ break;
+ }
+ return false;
+ }
+ LengthModifier lm(lmPosition, lmKind);
+ FS.setLengthModifier(lm);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on ArgTypeResult.
+//===----------------------------------------------------------------------===//
+
+bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
+ switch (K) {
+ case InvalidTy:
+ llvm_unreachable("ArgTypeResult must be valid");
+
+ case UnknownTy:
+ return true;
+
+ case AnyCharTy: {
+ if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U:
+ return true;
+ }
+ return false;
+ }
+
+ case SpecificTy: {
+ argTy = C.getCanonicalType(argTy).getUnqualifiedType();
+ if (T == argTy)
+ return true;
+ // Check for "compatible types".
+ if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return T == C.UnsignedCharTy;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return T == C.SignedCharTy;
+ case BuiltinType::Short:
+ return T == C.UnsignedShortTy;
+ case BuiltinType::UShort:
+ return T == C.ShortTy;
+ case BuiltinType::Int:
+ return T == C.UnsignedIntTy;
+ case BuiltinType::UInt:
+ return T == C.IntTy;
+ case BuiltinType::Long:
+ return T == C.UnsignedLongTy;
+ case BuiltinType::ULong:
+ return T == C.LongTy;
+ case BuiltinType::LongLong:
+ return T == C.UnsignedLongLongTy;
+ case BuiltinType::ULongLong:
+ return T == C.LongLongTy;
+ }
+ return false;
+ }
+
+ case CStrTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+ QualType pointeeTy = PT->getPointeeType();
+ if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+ }
+
+ case WCStrTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+ QualType pointeeTy =
+ C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
+ return pointeeTy == C.getWCharType();
+ }
+
+ case WIntTy: {
+ // Instead of doing a lookup for the definition of 'wint_t' (which
+ // is defined by the system headers) instead see if wchar_t and
+ // the argument type promote to the same type.
+ QualType PromoWChar =
+ C.getWCharType()->isPromotableIntegerType()
+ ? C.getPromotedIntegerType(C.getWCharType()) : C.getWCharType();
+ QualType PromoArg =
+ argTy->isPromotableIntegerType()
+ ? C.getPromotedIntegerType(argTy) : argTy;
+
+ PromoWChar = C.getCanonicalType(PromoWChar).getUnqualifiedType();
+ PromoArg = C.getCanonicalType(PromoArg).getUnqualifiedType();
+
+ return PromoWChar == PromoArg;
+ }
+
+ case CPointerTy:
+ return argTy->isPointerType() || argTy->isObjCObjectPointerType() ||
+ argTy->isBlockPointerType() || argTy->isNullPtrType();
+
+ case ObjCPointerTy: {
+ if (argTy->getAs<ObjCObjectPointerType>() ||
+ argTy->getAs<BlockPointerType>())
+ return true;
+
+ // Handle implicit toll-free bridging.
+ if (const PointerType *PT = argTy->getAs<PointerType>()) {
+ // Things such as CFTypeRef are really just opaque pointers
+ // to C structs representing CF types that can often be bridged
+ // to Objective-C objects. Since the compiler doesn't know which
+ // structs can be toll-free bridged, we just accept them all.
+ QualType pointee = PT->getPointeeType();
+ if (pointee->getAsStructureType() || pointee->isVoidType())
+ return true;
+ }
+ return false;
+ }
+ }
+
+ llvm_unreachable("Invalid ArgTypeResult Kind!");
+}
+
+QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
+ switch (K) {
+ case InvalidTy:
+ llvm_unreachable("No representative type for Invalid ArgTypeResult");
+ case UnknownTy:
+ return QualType();
+ case AnyCharTy:
+ return C.CharTy;
+ case SpecificTy:
+ return T;
+ case CStrTy:
+ return C.getPointerType(C.CharTy);
+ case WCStrTy:
+ return C.getPointerType(C.getWCharType());
+ case ObjCPointerTy:
+ return C.ObjCBuiltinIdTy;
+ case CPointerTy:
+ return C.VoidPtrTy;
+ case WIntTy: {
+ QualType WC = C.getWCharType();
+ return WC->isPromotableIntegerType() ? C.getPromotedIntegerType(WC) : WC;
+ }
+ }
+
+ llvm_unreachable("Invalid ArgTypeResult Kind!");
+}
+
+std::string ArgTypeResult::getRepresentativeTypeName(ASTContext &C) const {
+ std::string S = getRepresentativeType(C).getAsString();
+ if (Name && S != Name)
+ return std::string("'") + Name + "' (aka '" + S + "')";
+ return std::string("'") + S + "'";
+}
+
+
+//===----------------------------------------------------------------------===//
+// Methods on OptionalAmount.
+//===----------------------------------------------------------------------===//
+
+ArgTypeResult
+analyze_format_string::OptionalAmount::getArgType(ASTContext &Ctx) const {
+ return Ctx.IntTy;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on LengthModifier.
+//===----------------------------------------------------------------------===//
+
+const char *
+analyze_format_string::LengthModifier::toString() const {
+ switch (kind) {
+ case AsChar:
+ return "hh";
+ case AsShort:
+ return "h";
+ case AsLong: // or AsWideChar
+ return "l";
+ case AsLongLong:
+ return "ll";
+ case AsQuad:
+ return "q";
+ case AsIntMax:
+ return "j";
+ case AsSizeT:
+ return "z";
+ case AsPtrDiff:
+ return "t";
+ case AsLongDouble:
+ return "L";
+ case AsAllocate:
+ return "a";
+ case AsMAllocate:
+ return "m";
+ case None:
+ return "";
+ }
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on ConversionSpecifier.
+//===----------------------------------------------------------------------===//
+
+const char *ConversionSpecifier::toString() const {
+ switch (kind) {
+ case dArg: return "d";
+ case iArg: return "i";
+ case oArg: return "o";
+ case uArg: return "u";
+ case xArg: return "x";
+ case XArg: return "X";
+ case fArg: return "f";
+ case FArg: return "F";
+ case eArg: return "e";
+ case EArg: return "E";
+ case gArg: return "g";
+ case GArg: return "G";
+ case aArg: return "a";
+ case AArg: return "A";
+ case cArg: return "c";
+ case sArg: return "s";
+ case pArg: return "p";
+ case nArg: return "n";
+ case PercentArg: return "%";
+ case ScanListArg: return "[";
+ case InvalidSpecifier: return NULL;
+
+ // MacOS X unicode extensions.
+ case CArg: return "C";
+ case SArg: return "S";
+
+ // Objective-C specific specifiers.
+ case ObjCObjArg: return "@";
+
+ // FreeBSD specific specifiers.
+ case bArg: return "b";
+ case DArg: return "D";
+ case rArg: return "r";
+
+ // GlibC specific specifiers.
+ case PrintErrno: return "m";
+ }
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on OptionalAmount.
+//===----------------------------------------------------------------------===//
+
+void OptionalAmount::toString(raw_ostream &os) const {
+ switch (hs) {
+ case Invalid:
+ case NotSpecified:
+ return;
+ case Arg:
+ if (UsesDotPrefix)
+ os << ".";
+ if (usesPositionalArg())
+ os << "*" << getPositionalArgIndex() << "$";
+ else
+ os << "*";
+ break;
+ case Constant:
+ if (UsesDotPrefix)
+ os << ".";
+ os << amt;
+ break;
+ }
+}
+
+bool FormatSpecifier::hasValidLengthModifier() const {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return true;
+
+ // Handle most integer flags
+ case LengthModifier::AsChar:
+ case LengthModifier::AsShort:
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ case LengthModifier::AsIntMax:
+ case LengthModifier::AsSizeT:
+ case LengthModifier::AsPtrDiff:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::nArg:
+ case ConversionSpecifier::rArg:
+ return true;
+ default:
+ return false;
+ }
+
+ // Handle 'l' flag
+ case LengthModifier::AsLong:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::nArg:
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::ScanListArg:
+ return true;
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsLongDouble:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+ // GNU extension.
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ return true;
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsAllocate:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ScanListArg:
+ return true;
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsMAllocate:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::SArg:
+ case ConversionSpecifier::ScanListArg:
+ return true;
+ default:
+ return false;
+ }
+ }
+ llvm_unreachable("Invalid LengthModifier Kind!");
+}
+
+bool FormatSpecifier::hasStandardLengthModifier() const {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ case LengthModifier::AsChar:
+ case LengthModifier::AsShort:
+ case LengthModifier::AsLong:
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsIntMax:
+ case LengthModifier::AsSizeT:
+ case LengthModifier::AsPtrDiff:
+ case LengthModifier::AsLongDouble:
+ return true;
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ case LengthModifier::AsQuad:
+ return false;
+ }
+ llvm_unreachable("Invalid LengthModifier Kind!");
+}
+
+bool FormatSpecifier::hasStandardConversionSpecifier(const LangOptions &LangOpt) const {
+ switch (CS.getKind()) {
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::pArg:
+ case ConversionSpecifier::nArg:
+ case ConversionSpecifier::ObjCObjArg:
+ case ConversionSpecifier::ScanListArg:
+ case ConversionSpecifier::PercentArg:
+ return true;
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::SArg:
+ return LangOpt.ObjC1 || LangOpt.ObjC2;
+ case ConversionSpecifier::InvalidSpecifier:
+ case ConversionSpecifier::bArg:
+ case ConversionSpecifier::DArg:
+ case ConversionSpecifier::rArg:
+ case ConversionSpecifier::PrintErrno:
+ return false;
+ }
+ llvm_unreachable("Invalid ConversionSpecifier Kind!");
+}
+
+bool FormatSpecifier::hasStandardLengthConversionCombination() const {
+ if (LM.getKind() == LengthModifier::AsLongDouble) {
+ switch(CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ return false;
+ default:
+ return true;
+ }
+ }
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h b/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h
new file mode 100644
index 0000000..f483ec6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/FormatStringParsing.h
@@ -0,0 +1,74 @@
+#ifndef LLVM_CLANG_FORMAT_PARSING_H
+#define LLVM_CLANG_FORMAT_PARSING_H
+
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+
+class LangOptions;
+
+template <typename T>
+class UpdateOnReturn {
+ T &ValueToUpdate;
+ const T &ValueToCopy;
+public:
+ UpdateOnReturn(T &valueToUpdate, const T &valueToCopy)
+ : ValueToUpdate(valueToUpdate), ValueToCopy(valueToCopy) {}
+
+ ~UpdateOnReturn() {
+ ValueToUpdate = ValueToCopy;
+ }
+};
+
+namespace analyze_format_string {
+
+OptionalAmount ParseAmount(const char *&Beg, const char *E);
+OptionalAmount ParseNonPositionAmount(const char *&Beg, const char *E,
+ unsigned &argIndex);
+
+OptionalAmount ParsePositionAmount(FormatStringHandler &H,
+ const char *Start, const char *&Beg,
+ const char *E, PositionContext p);
+
+bool ParseFieldWidth(FormatStringHandler &H,
+ FormatSpecifier &CS,
+ const char *Start, const char *&Beg, const char *E,
+ unsigned *argIndex);
+
+bool ParseArgPosition(FormatStringHandler &H,
+ FormatSpecifier &CS, const char *Start,
+ const char *&Beg, const char *E);
+
+/// Returns true if a LengthModifier was parsed and installed in the
+/// FormatSpecifier& argument, and false otherwise.
+bool ParseLengthModifier(FormatSpecifier &FS, const char *&Beg, const char *E,
+ const LangOptions &LO, bool IsScanf = false);
+
+template <typename T> class SpecifierResult {
+ T FS;
+ const char *Start;
+ bool Stop;
+public:
+ SpecifierResult(bool stop = false)
+ : Start(0), Stop(stop) {}
+ SpecifierResult(const char *start,
+ const T &fs)
+ : FS(fs), Start(start), Stop(false) {}
+
+ const char *getStart() const { return Start; }
+ bool shouldStop() const { return Stop; }
+ bool hasValue() const { return Start != 0; }
+ const T &getValue() const {
+ assert(hasValue());
+ return FS;
+ }
+ const T &getValue() { return FS; }
+};
+
+} // end analyze_format_string namespace
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
new file mode 100644
index 0000000..ff6607d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/LiveVariables.cpp
@@ -0,0 +1,607 @@
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+
+#include "clang/AST/Stmt.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/AST/StmtVisitor.h"
+
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/DenseMap.h"
+
+#include <deque>
+#include <algorithm>
+#include <vector>
+
+using namespace clang;
+
+namespace {
+
+class DataflowWorklist {
+ SmallVector<const CFGBlock *, 20> worklist;
+ llvm::BitVector enqueuedBlocks;
+ PostOrderCFGView *POV;
+public:
+ DataflowWorklist(const CFG &cfg, AnalysisDeclContext &Ctx)
+ : enqueuedBlocks(cfg.getNumBlockIDs()),
+ POV(Ctx.getAnalysis<PostOrderCFGView>()) {}
+
+ void enqueueBlock(const CFGBlock *block);
+ void enqueueSuccessors(const CFGBlock *block);
+ void enqueuePredecessors(const CFGBlock *block);
+
+ const CFGBlock *dequeue();
+
+ void sortWorklist();
+};
+
+}
+
+void DataflowWorklist::enqueueBlock(const clang::CFGBlock *block) {
+ if (block && !enqueuedBlocks[block->getBlockID()]) {
+ enqueuedBlocks[block->getBlockID()] = true;
+ worklist.push_back(block);
+ }
+}
+
+void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
+ const unsigned OldWorklistSize = worklist.size();
+ for (CFGBlock::const_succ_iterator I = block->succ_begin(),
+ E = block->succ_end(); I != E; ++I) {
+ enqueueBlock(*I);
+ }
+
+ if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
+ return;
+
+ sortWorklist();
+}
+
+void DataflowWorklist::enqueuePredecessors(const clang::CFGBlock *block) {
+ const unsigned OldWorklistSize = worklist.size();
+ for (CFGBlock::const_pred_iterator I = block->pred_begin(),
+ E = block->pred_end(); I != E; ++I) {
+ enqueueBlock(*I);
+ }
+
+ if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
+ return;
+
+ sortWorklist();
+}
+
+void DataflowWorklist::sortWorklist() {
+ std::sort(worklist.begin(), worklist.end(), POV->getComparator());
+}
+
+const CFGBlock *DataflowWorklist::dequeue() {
+ if (worklist.empty())
+ return 0;
+ const CFGBlock *b = worklist.back();
+ worklist.pop_back();
+ enqueuedBlocks[b->getBlockID()] = false;
+ return b;
+}
+
+namespace {
+class LiveVariablesImpl {
+public:
+ AnalysisDeclContext &analysisContext;
+ std::vector<LiveVariables::LivenessValues> cfgBlockValues;
+ llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
+ llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
+ llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
+ llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksBeginToLiveness;
+ llvm::DenseMap<const Stmt *, LiveVariables::LivenessValues> stmtsToLiveness;
+ llvm::DenseMap<const DeclRefExpr *, unsigned> inAssignment;
+ const bool killAtAssign;
+
+ LiveVariables::LivenessValues
+ merge(LiveVariables::LivenessValues valsA,
+ LiveVariables::LivenessValues valsB);
+
+ LiveVariables::LivenessValues runOnBlock(const CFGBlock *block,
+ LiveVariables::LivenessValues val,
+ LiveVariables::Observer *obs = 0);
+
+ void dumpBlockLiveness(const SourceManager& M);
+
+ LiveVariablesImpl(AnalysisDeclContext &ac, bool KillAtAssign)
+ : analysisContext(ac),
+ SSetFact(false), // Do not canonicalize ImmutableSets by default.
+ DSetFact(false), // This is a *major* performance win.
+ killAtAssign(KillAtAssign) {}
+};
+}
+
+static LiveVariablesImpl &getImpl(void *x) {
+ return *((LiveVariablesImpl *) x);
+}
+
+//===----------------------------------------------------------------------===//
+// Operations and queries on LivenessValues.
+//===----------------------------------------------------------------------===//
+
+bool LiveVariables::LivenessValues::isLive(const Stmt *S) const {
+ return liveStmts.contains(S);
+}
+
+bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
+ return liveDecls.contains(D);
+}
+
+namespace {
+ template <typename SET>
+ SET mergeSets(SET A, SET B) {
+ if (A.isEmpty())
+ return B;
+
+ for (typename SET::iterator it = B.begin(), ei = B.end(); it != ei; ++it) {
+ A = A.add(*it);
+ }
+ return A;
+ }
+}
+
+void LiveVariables::Observer::anchor() { }
+
+LiveVariables::LivenessValues
+LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
+ LiveVariables::LivenessValues valsB) {
+
+ llvm::ImmutableSetRef<const Stmt *>
+ SSetRefA(valsA.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory()),
+ SSetRefB(valsB.liveStmts.getRootWithoutRetain(), SSetFact.getTreeFactory());
+
+
+ llvm::ImmutableSetRef<const VarDecl *>
+ DSetRefA(valsA.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()),
+ DSetRefB(valsB.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory());
+
+
+ SSetRefA = mergeSets(SSetRefA, SSetRefB);
+ DSetRefA = mergeSets(DSetRefA, DSetRefB);
+
+ // asImmutableSet() canonicalizes the tree, allowing us to do an easy
+ // comparison afterwards.
+ return LiveVariables::LivenessValues(SSetRefA.asImmutableSet(),
+ DSetRefA.asImmutableSet());
+}
+
+bool LiveVariables::LivenessValues::equals(const LivenessValues &V) const {
+ return liveStmts == V.liveStmts && liveDecls == V.liveDecls;
+}
+
+//===----------------------------------------------------------------------===//
+// Query methods.
+//===----------------------------------------------------------------------===//
+
+static bool isAlwaysAlive(const VarDecl *D) {
+ return D->hasGlobalStorage();
+}
+
+bool LiveVariables::isLive(const CFGBlock *B, const VarDecl *D) {
+ return isAlwaysAlive(D) || getImpl(impl).blocksEndToLiveness[B].isLive(D);
+}
+
+bool LiveVariables::isLive(const Stmt *S, const VarDecl *D) {
+ return isAlwaysAlive(D) || getImpl(impl).stmtsToLiveness[S].isLive(D);
+}
+
+bool LiveVariables::isLive(const Stmt *Loc, const Stmt *S) {
+ return getImpl(impl).stmtsToLiveness[Loc].isLive(S);
+}
+
+//===----------------------------------------------------------------------===//
+// Dataflow computation.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class TransferFunctions : public StmtVisitor<TransferFunctions> {
+ LiveVariablesImpl &LV;
+ LiveVariables::LivenessValues &val;
+ LiveVariables::Observer *observer;
+ const CFGBlock *currentBlock;
+public:
+ TransferFunctions(LiveVariablesImpl &im,
+ LiveVariables::LivenessValues &Val,
+ LiveVariables::Observer *Observer,
+ const CFGBlock *CurrentBlock)
+ : LV(im), val(Val), observer(Observer), currentBlock(CurrentBlock) {}
+
+ void VisitBinaryOperator(BinaryOperator *BO);
+ void VisitBlockExpr(BlockExpr *BE);
+ void VisitDeclRefExpr(DeclRefExpr *DR);
+ void VisitDeclStmt(DeclStmt *DS);
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS);
+ void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE);
+ void VisitUnaryOperator(UnaryOperator *UO);
+ void Visit(Stmt *S);
+};
+}
+
+static const VariableArrayType *FindVA(QualType Ty) {
+ const Type *ty = Ty.getTypePtr();
+ while (const ArrayType *VT = dyn_cast<ArrayType>(ty)) {
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(VT))
+ if (VAT->getSizeExpr())
+ return VAT;
+
+ ty = VT->getElementType().getTypePtr();
+ }
+
+ return 0;
+}
+
+static const Stmt *LookThroughStmt(const Stmt *S) {
+ while (S) {
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreParens();
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(S)) {
+ S = EWC->getSubExpr();
+ continue;
+ }
+ if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(S)) {
+ S = OVE->getSourceExpr();
+ continue;
+ }
+ break;
+ }
+ return S;
+}
+
+static void AddLiveStmt(llvm::ImmutableSet<const Stmt *> &Set,
+ llvm::ImmutableSet<const Stmt *>::Factory &F,
+ const Stmt *S) {
+ Set = F.add(Set, LookThroughStmt(S));
+}
+
+void TransferFunctions::Visit(Stmt *S) {
+ if (observer)
+ observer->observeStmt(S, currentBlock, val);
+
+ StmtVisitor<TransferFunctions>::Visit(S);
+
+ if (isa<Expr>(S)) {
+ val.liveStmts = LV.SSetFact.remove(val.liveStmts, S);
+ }
+
+ // Mark all children expressions live.
+
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::StmtExprClass: {
+ // For statement expressions, look through the compound statement.
+ S = cast<StmtExpr>(S)->getSubStmt();
+ break;
+ }
+ case Stmt::CXXMemberCallExprClass: {
+ // Include the implicit "this" pointer as being live.
+ CXXMemberCallExpr *CE = cast<CXXMemberCallExpr>(S);
+ if (Expr *ImplicitObj = CE->getImplicitObjectArgument()) {
+ AddLiveStmt(val.liveStmts, LV.SSetFact, ImplicitObj);
+ }
+ break;
+ }
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(S);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl())) {
+ for (const VariableArrayType* VA = FindVA(VD->getType());
+ VA != 0; VA = FindVA(VA->getElementType())) {
+ AddLiveStmt(val.liveStmts, LV.SSetFact, VA->getSizeExpr());
+ }
+ }
+ break;
+ }
+ case Stmt::PseudoObjectExprClass: {
+ // A pseudo-object operation only directly consumes its result
+ // expression.
+ Expr *child = cast<PseudoObjectExpr>(S)->getResultExpr();
+ if (!child) return;
+ if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(child))
+ child = OV->getSourceExpr();
+ child = child->IgnoreParens();
+ val.liveStmts = LV.SSetFact.add(val.liveStmts, child);
+ return;
+ }
+
+ // FIXME: These cases eventually shouldn't be needed.
+ case Stmt::ExprWithCleanupsClass: {
+ S = cast<ExprWithCleanups>(S)->getSubExpr();
+ break;
+ }
+ case Stmt::CXXBindTemporaryExprClass: {
+ S = cast<CXXBindTemporaryExpr>(S)->getSubExpr();
+ break;
+ }
+ case Stmt::UnaryExprOrTypeTraitExprClass: {
+ // No need to unconditionally visit subexpressions.
+ return;
+ }
+ }
+
+ for (Stmt::child_iterator it = S->child_begin(), ei = S->child_end();
+ it != ei; ++it) {
+ if (Stmt *child = *it)
+ AddLiveStmt(val.liveStmts, LV.SSetFact, child);
+ }
+}
+
+void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
+ if (B->isAssignmentOp()) {
+ if (!LV.killAtAssign)
+ return;
+
+ // Assigning to a variable?
+ Expr *LHS = B->getLHS()->IgnoreParens();
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Assignments to references don't kill the ref's address
+ if (VD->getType()->isReferenceType())
+ return;
+
+ if (!isAlwaysAlive(VD)) {
+ // The variable is now dead.
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
+ }
+
+ if (observer)
+ observer->observerKill(DR);
+ }
+ }
+}
+
+void TransferFunctions::VisitBlockExpr(BlockExpr *BE) {
+ AnalysisDeclContext::referenced_decls_iterator I, E;
+ llvm::tie(I, E) =
+ LV.analysisContext.getReferencedBlockVars(BE->getBlockDecl());
+ for ( ; I != E ; ++I) {
+ const VarDecl *VD = *I;
+ if (isAlwaysAlive(VD))
+ continue;
+ val.liveDecls = LV.DSetFact.add(val.liveDecls, VD);
+ }
+}
+
+void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
+ if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
+ if (!isAlwaysAlive(D) && LV.inAssignment.find(DR) == LV.inAssignment.end())
+ val.liveDecls = LV.DSetFact.add(val.liveDecls, D);
+}
+
+void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
+ for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI)
+ if (VarDecl *VD = dyn_cast<VarDecl>(*DI)) {
+ if (!isAlwaysAlive(VD))
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
+ }
+}
+
+void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS) {
+ // Kill the iteration variable.
+ DeclRefExpr *DR = 0;
+ const VarDecl *VD = 0;
+
+ Stmt *element = OS->getElement();
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(element)) {
+ VD = cast<VarDecl>(DS->getSingleDecl());
+ }
+ else if ((DR = dyn_cast<DeclRefExpr>(cast<Expr>(element)->IgnoreParens()))) {
+ VD = cast<VarDecl>(DR->getDecl());
+ }
+
+ if (VD) {
+ val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
+ if (observer && DR)
+ observer->observerKill(DR);
+ }
+}
+
+void TransferFunctions::
+VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *UE)
+{
+ // While sizeof(var) doesn't technically extend the liveness of 'var', it
+ // does extent the liveness of metadata if 'var' is a VariableArrayType.
+ // We handle that special case here.
+ if (UE->getKind() != UETT_SizeOf || UE->isArgumentType())
+ return;
+
+ const Expr *subEx = UE->getArgumentExpr();
+ if (subEx->getType()->isVariableArrayType()) {
+ assert(subEx->isLValue());
+ val.liveStmts = LV.SSetFact.add(val.liveStmts, subEx->IgnoreParens());
+ }
+}
+
+void TransferFunctions::VisitUnaryOperator(UnaryOperator *UO) {
+ // Treat ++/-- as a kill.
+ // Note we don't actually have to do anything if we don't have an observer,
+ // since a ++/-- acts as both a kill and a "use".
+ if (!observer)
+ return;
+
+ switch (UO->getOpcode()) {
+ default:
+ return;
+ case UO_PostInc:
+ case UO_PostDec:
+ case UO_PreInc:
+ case UO_PreDec:
+ break;
+ }
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens()))
+ if (isa<VarDecl>(DR->getDecl())) {
+ // Treat ++/-- as a kill.
+ observer->observerKill(DR);
+ }
+}
+
+LiveVariables::LivenessValues
+LiveVariablesImpl::runOnBlock(const CFGBlock *block,
+ LiveVariables::LivenessValues val,
+ LiveVariables::Observer *obs) {
+
+ TransferFunctions TF(*this, val, obs, block);
+
+ // Visit the terminator (if any).
+ if (const Stmt *term = block->getTerminator())
+ TF.Visit(const_cast<Stmt*>(term));
+
+ // Apply the transfer function for all Stmts in the block.
+ for (CFGBlock::const_reverse_iterator it = block->rbegin(),
+ ei = block->rend(); it != ei; ++it) {
+ const CFGElement &elem = *it;
+ if (!isa<CFGStmt>(elem))
+ continue;
+
+ const Stmt *S = cast<CFGStmt>(elem).getStmt();
+ TF.Visit(const_cast<Stmt*>(S));
+ stmtsToLiveness[S] = val;
+ }
+ return val;
+}
+
+void LiveVariables::runOnAllBlocks(LiveVariables::Observer &obs) {
+ const CFG *cfg = getImpl(impl).analysisContext.getCFG();
+ for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it)
+ getImpl(impl).runOnBlock(*it, getImpl(impl).blocksEndToLiveness[*it], &obs);
+}
+
+LiveVariables::LiveVariables(void *im) : impl(im) {}
+
+LiveVariables::~LiveVariables() {
+ delete (LiveVariablesImpl*) impl;
+}
+
+LiveVariables *
+LiveVariables::computeLiveness(AnalysisDeclContext &AC,
+ bool killAtAssign) {
+
+ // No CFG? Bail out.
+ CFG *cfg = AC.getCFG();
+ if (!cfg)
+ return 0;
+
+ LiveVariablesImpl *LV = new LiveVariablesImpl(AC, killAtAssign);
+
+ // Construct the dataflow worklist. Enqueue the exit block as the
+ // start of the analysis.
+ DataflowWorklist worklist(*cfg, AC);
+ llvm::BitVector everAnalyzedBlock(cfg->getNumBlockIDs());
+
+ // FIXME: we should enqueue using post order.
+ for (CFG::const_iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) {
+ const CFGBlock *block = *it;
+ worklist.enqueueBlock(block);
+
+ // FIXME: Scan for DeclRefExprs using in the LHS of an assignment.
+ // We need to do this because we lack context in the reverse analysis
+ // to determine if a DeclRefExpr appears in such a context, and thus
+ // doesn't constitute a "use".
+ if (killAtAssign)
+ for (CFGBlock::const_iterator bi = block->begin(), be = block->end();
+ bi != be; ++bi) {
+ if (const CFGStmt *cs = bi->getAs<CFGStmt>()) {
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(cs->getStmt())) {
+ if (BO->getOpcode() == BO_Assign) {
+ if (const DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) {
+ LV->inAssignment[DR] = 1;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ worklist.sortWorklist();
+
+ while (const CFGBlock *block = worklist.dequeue()) {
+ // Determine if the block's end value has changed. If not, we
+ // have nothing left to do for this block.
+ LivenessValues &prevVal = LV->blocksEndToLiveness[block];
+
+ // Merge the values of all successor blocks.
+ LivenessValues val;
+ for (CFGBlock::const_succ_iterator it = block->succ_begin(),
+ ei = block->succ_end(); it != ei; ++it) {
+ if (const CFGBlock *succ = *it) {
+ val = LV->merge(val, LV->blocksBeginToLiveness[succ]);
+ }
+ }
+
+ if (!everAnalyzedBlock[block->getBlockID()])
+ everAnalyzedBlock[block->getBlockID()] = true;
+ else if (prevVal.equals(val))
+ continue;
+
+ prevVal = val;
+
+ // Update the dataflow value for the start of this block.
+ LV->blocksBeginToLiveness[block] = LV->runOnBlock(block, val);
+
+ // Enqueue the value to the predecessors.
+ worklist.enqueuePredecessors(block);
+ }
+
+ return new LiveVariables(LV);
+}
+
+static bool compare_entries(const CFGBlock *A, const CFGBlock *B) {
+ return A->getBlockID() < B->getBlockID();
+}
+
+static bool compare_vd_entries(const Decl *A, const Decl *B) {
+ SourceLocation ALoc = A->getLocStart();
+ SourceLocation BLoc = B->getLocStart();
+ return ALoc.getRawEncoding() < BLoc.getRawEncoding();
+}
+
+void LiveVariables::dumpBlockLiveness(const SourceManager &M) {
+ getImpl(impl).dumpBlockLiveness(M);
+}
+
+void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
+ std::vector<const CFGBlock *> vec;
+ for (llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues>::iterator
+ it = blocksEndToLiveness.begin(), ei = blocksEndToLiveness.end();
+ it != ei; ++it) {
+ vec.push_back(it->first);
+ }
+ std::sort(vec.begin(), vec.end(), compare_entries);
+
+ std::vector<const VarDecl*> declVec;
+
+ for (std::vector<const CFGBlock *>::iterator
+ it = vec.begin(), ei = vec.end(); it != ei; ++it) {
+ llvm::errs() << "\n[ B" << (*it)->getBlockID()
+ << " (live variables at block exit) ]\n";
+
+ LiveVariables::LivenessValues vals = blocksEndToLiveness[*it];
+ declVec.clear();
+
+ for (llvm::ImmutableSet<const VarDecl *>::iterator si =
+ vals.liveDecls.begin(),
+ se = vals.liveDecls.end(); si != se; ++si) {
+ declVec.push_back(*si);
+ }
+
+ std::sort(declVec.begin(), declVec.end(), compare_vd_entries);
+
+ for (std::vector<const VarDecl*>::iterator di = declVec.begin(),
+ de = declVec.end(); di != de; ++di) {
+ llvm::errs() << " " << (*di)->getDeclName().getAsString()
+ << " <";
+ (*di)->getLocation().dump(M);
+ llvm::errs() << ">\n";
+ }
+ }
+ llvm::errs() << "\n";
+}
+
+const void *LiveVariables::getTag() { static int x; return &x; }
+const void *RelaxedLiveVariables::getTag() { static int x; return &x; }
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp b/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp
new file mode 100644
index 0000000..cfd66f7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/PostOrderCFGView.cpp
@@ -0,0 +1,49 @@
+//===- PostOrderCFGView.cpp - Post order view of CFG blocks -------*- C++ --*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements post order view of the blocks in a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+
+using namespace clang;
+
+void PostOrderCFGView::anchor() { }
+
+PostOrderCFGView::PostOrderCFGView(const CFG *cfg) {
+ Blocks.reserve(cfg->getNumBlockIDs());
+ CFGBlockSet BSet(cfg);
+
+ for (po_iterator I = po_iterator::begin(cfg, BSet),
+ E = po_iterator::end(cfg, BSet); I != E; ++I) {
+ BlockOrder[*I] = Blocks.size() + 1;
+ Blocks.push_back(*I);
+ }
+}
+
+PostOrderCFGView *PostOrderCFGView::create(AnalysisDeclContext &ctx) {
+ const CFG *cfg = ctx.getCFG();
+ if (!cfg)
+ return 0;
+ return new PostOrderCFGView(cfg);
+}
+
+const void *PostOrderCFGView::getTag() { static int x; return &x; }
+
+bool PostOrderCFGView::BlockOrderCompare::operator()(const CFGBlock *b1,
+ const CFGBlock *b2) const {
+ PostOrderCFGView::BlockOrderTy::const_iterator b1It = POV.BlockOrder.find(b1);
+ PostOrderCFGView::BlockOrderTy::const_iterator b2It = POV.BlockOrder.find(b2);
+
+ unsigned b1V = (b1It == POV.BlockOrder.end()) ? 0 : b1It->second;
+ unsigned b2V = (b2It == POV.BlockOrder.end()) ? 0 : b2It->second;
+ return b1V > b2V;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
new file mode 100644
index 0000000..4b2a19e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/PrintfFormatString.cpp
@@ -0,0 +1,679 @@
+//== PrintfFormatString.cpp - Analysis of printf format strings --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handling of format string in printf and friends. The structure of format
+// strings for fprintf() are described in C99 7.19.6.1.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "FormatStringParsing.h"
+
+using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::FormatStringHandler;
+using clang::analyze_format_string::LengthModifier;
+using clang::analyze_format_string::OptionalAmount;
+using clang::analyze_format_string::ConversionSpecifier;
+using clang::analyze_printf::PrintfSpecifier;
+
+using namespace clang;
+
+typedef clang::analyze_format_string::SpecifierResult<PrintfSpecifier>
+ PrintfSpecifierResult;
+
+//===----------------------------------------------------------------------===//
+// Methods for parsing format strings.
+//===----------------------------------------------------------------------===//
+
+using analyze_format_string::ParseNonPositionAmount;
+
+static bool ParsePrecision(FormatStringHandler &H, PrintfSpecifier &FS,
+ const char *Start, const char *&Beg, const char *E,
+ unsigned *argIndex) {
+ if (argIndex) {
+ FS.setPrecision(ParseNonPositionAmount(Beg, E, *argIndex));
+ } else {
+ const OptionalAmount Amt = ParsePositionAmount(H, Start, Beg, E,
+ analyze_format_string::PrecisionPos);
+ if (Amt.isInvalid())
+ return true;
+ FS.setPrecision(Amt);
+ }
+ return false;
+}
+
+static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
+ const char *&Beg,
+ const char *E,
+ unsigned &argIndex,
+ const LangOptions &LO) {
+
+ using namespace clang::analyze_format_string;
+ using namespace clang::analyze_printf;
+
+ const char *I = Beg;
+ const char *Start = 0;
+ UpdateOnReturn <const char*> UpdateBeg(Beg, I);
+
+ // Look for a '%' character that indicates the start of a format specifier.
+ for ( ; I != E ; ++I) {
+ char c = *I;
+ if (c == '\0') {
+ // Detect spurious null characters, which are likely errors.
+ H.HandleNullChar(I);
+ return true;
+ }
+ if (c == '%') {
+ Start = I++; // Record the start of the format specifier.
+ break;
+ }
+ }
+
+ // No format specifier found?
+ if (!Start)
+ return false;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ PrintfSpecifier FS;
+ if (ParseArgPosition(H, FS, Start, I, E))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for flags (if any).
+ bool hasMore = true;
+ for ( ; I != E; ++I) {
+ switch (*I) {
+ default: hasMore = false; break;
+ case '\'':
+ // FIXME: POSIX specific. Always accept?
+ FS.setHasThousandsGrouping(I);
+ break;
+ case '-': FS.setIsLeftJustified(I); break;
+ case '+': FS.setHasPlusPrefix(I); break;
+ case ' ': FS.setHasSpacePrefix(I); break;
+ case '#': FS.setHasAlternativeForm(I); break;
+ case '0': FS.setHasLeadingZeros(I); break;
+ }
+ if (!hasMore)
+ break;
+ }
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for the field width (if any).
+ if (ParseFieldWidth(H, FS, Start, I, E,
+ FS.usesPositionalArg() ? 0 : &argIndex))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for the precision (if any).
+ if (*I == '.') {
+ ++I;
+ if (I == E) {
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ if (ParsePrecision(H, FS, Start, I, E,
+ FS.usesPositionalArg() ? 0 : &argIndex))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+ }
+
+ // Look for the length modifier.
+ if (ParseLengthModifier(FS, I, E, LO) && I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ if (*I == '\0') {
+ // Detect spurious null characters, which are likely errors.
+ H.HandleNullChar(I);
+ return true;
+ }
+
+ // Finally, look for the conversion specifier.
+ const char *conversionPosition = I++;
+ ConversionSpecifier::Kind k = ConversionSpecifier::InvalidSpecifier;
+ switch (*conversionPosition) {
+ default:
+ break;
+ // C99: 7.19.6.1 (section 8).
+ case '%': k = ConversionSpecifier::PercentArg; break;
+ case 'A': k = ConversionSpecifier::AArg; break;
+ case 'E': k = ConversionSpecifier::EArg; break;
+ case 'F': k = ConversionSpecifier::FArg; break;
+ case 'G': k = ConversionSpecifier::GArg; break;
+ case 'X': k = ConversionSpecifier::XArg; break;
+ case 'a': k = ConversionSpecifier::aArg; break;
+ case 'c': k = ConversionSpecifier::cArg; break;
+ case 'd': k = ConversionSpecifier::dArg; break;
+ case 'e': k = ConversionSpecifier::eArg; break;
+ case 'f': k = ConversionSpecifier::fArg; break;
+ case 'g': k = ConversionSpecifier::gArg; break;
+ case 'i': k = ConversionSpecifier::iArg; break;
+ case 'n': k = ConversionSpecifier::nArg; break;
+ case 'o': k = ConversionSpecifier::oArg; break;
+ case 'p': k = ConversionSpecifier::pArg; break;
+ case 's': k = ConversionSpecifier::sArg; break;
+ case 'u': k = ConversionSpecifier::uArg; break;
+ case 'x': k = ConversionSpecifier::xArg; break;
+ // POSIX specific.
+ case 'C': k = ConversionSpecifier::CArg; break;
+ case 'S': k = ConversionSpecifier::SArg; break;
+ // Objective-C.
+ case '@': k = ConversionSpecifier::ObjCObjArg; break;
+ // Glibc specific.
+ case 'm': k = ConversionSpecifier::PrintErrno; break;
+ // FreeBSD format extensions
+ case 'b': if (LO.FormatExtensions) k = ConversionSpecifier::bArg; break; /* check for int and then char * */
+ case 'r': if (LO.FormatExtensions) k = ConversionSpecifier::rArg; break;
+ case 'y': if (LO.FormatExtensions) k = ConversionSpecifier::iArg; break;
+ case 'D': if (LO.FormatExtensions) k = ConversionSpecifier::DArg; break; /* check for u_char * pointer and a char * string */
+ }
+ PrintfConversionSpecifier CS(conversionPosition, k);
+ FS.setConversionSpecifier(CS);
+ if (CS.consumesDataArgument() && !FS.usesPositionalArg())
+ FS.setArgIndex(argIndex++);
+ // FreeBSD extension
+ if (k == ConversionSpecifier::bArg || k == ConversionSpecifier::DArg)
+ argIndex++;
+
+ if (k == ConversionSpecifier::InvalidSpecifier) {
+ // Assume the conversion takes one argument.
+ return !H.HandleInvalidPrintfConversionSpecifier(FS, Start, I - Start);
+ }
+ return PrintfSpecifierResult(Start, FS);
+}
+
+bool clang::analyze_format_string::ParsePrintfString(FormatStringHandler &H,
+ const char *I,
+ const char *E,
+ const LangOptions &LO) {
+
+ unsigned argIndex = 0;
+
+ // Keep looking for a format specifier until we have exhausted the string.
+ while (I != E) {
+ const PrintfSpecifierResult &FSR = ParsePrintfSpecifier(H, I, E, argIndex,
+ LO);
+ // Did a fail-stop error of any kind occur when parsing the specifier?
+ // If so, don't do any more processing.
+ if (FSR.shouldStop())
+ return true;;
+ // Did we exhaust the string or encounter an error that
+ // we can recover from?
+ if (!FSR.hasValue())
+ continue;
+ // We have a format specifier. Pass it to the callback.
+ if (!H.HandlePrintfSpecifier(FSR.getValue(), FSR.getStart(),
+ I - FSR.getStart()))
+ return true;
+ }
+ assert(I == E && "Format string not exhausted");
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on PrintfSpecifier.
+//===----------------------------------------------------------------------===//
+
+ArgTypeResult PrintfSpecifier::getArgType(ASTContext &Ctx,
+ bool IsObjCLiteral) const {
+ const PrintfConversionSpecifier &CS = getConversionSpecifier();
+
+ if (!CS.consumesDataArgument())
+ return ArgTypeResult::Invalid();
+
+ if (CS.getKind() == ConversionSpecifier::cArg)
+ switch (LM.getKind()) {
+ case LengthModifier::None: return Ctx.IntTy;
+ case LengthModifier::AsLong:
+ return ArgTypeResult(ArgTypeResult::WIntTy, "wint_t");
+ default:
+ return ArgTypeResult::Invalid();
+ }
+
+ if (CS.isIntArg())
+ switch (LM.getKind()) {
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return Ctx.LongLongTy;
+ case LengthModifier::None: return Ctx.IntTy;
+ case LengthModifier::AsChar: return ArgTypeResult::AnyCharTy;
+ case LengthModifier::AsShort: return Ctx.ShortTy;
+ case LengthModifier::AsLong: return Ctx.LongTy;
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return Ctx.LongLongTy;
+ case LengthModifier::AsIntMax:
+ return ArgTypeResult(Ctx.getIntMaxType(), "intmax_t");
+ case LengthModifier::AsSizeT:
+ // FIXME: How to get the corresponding signed version of size_t?
+ return ArgTypeResult();
+ case LengthModifier::AsPtrDiff:
+ return ArgTypeResult(Ctx.getPointerDiffType(), "ptrdiff_t");
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgTypeResult::Invalid();
+ }
+
+ if (CS.isUIntArg())
+ switch (LM.getKind()) {
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return Ctx.UnsignedLongLongTy;
+ case LengthModifier::None: return Ctx.UnsignedIntTy;
+ case LengthModifier::AsChar: return Ctx.UnsignedCharTy;
+ case LengthModifier::AsShort: return Ctx.UnsignedShortTy;
+ case LengthModifier::AsLong: return Ctx.UnsignedLongTy;
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return Ctx.UnsignedLongLongTy;
+ case LengthModifier::AsIntMax:
+ return ArgTypeResult(Ctx.getUIntMaxType(), "uintmax_t");
+ case LengthModifier::AsSizeT:
+ return ArgTypeResult(Ctx.getSizeType(), "size_t");
+ case LengthModifier::AsPtrDiff:
+ // FIXME: How to get the corresponding unsigned
+ // version of ptrdiff_t?
+ return ArgTypeResult();
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ArgTypeResult::Invalid();
+ }
+
+ if (CS.isDoubleArg()) {
+ if (LM.getKind() == LengthModifier::AsLongDouble)
+ return Ctx.LongDoubleTy;
+ return Ctx.DoubleTy;
+ }
+
+ switch (CS.getKind()) {
+ case ConversionSpecifier::sArg:
+ if (LM.getKind() == LengthModifier::AsWideChar) {
+ if (IsObjCLiteral)
+ return Ctx.getPointerType(Ctx.UnsignedShortTy.withConst());
+ return ArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t *");
+ }
+ return ArgTypeResult::CStrTy;
+ case ConversionSpecifier::SArg:
+ if (IsObjCLiteral)
+ return Ctx.getPointerType(Ctx.UnsignedShortTy.withConst());
+ return ArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t *");
+ case ConversionSpecifier::CArg:
+ if (IsObjCLiteral)
+ return Ctx.UnsignedShortTy;
+ return ArgTypeResult(Ctx.WCharTy, "wchar_t");
+ case ConversionSpecifier::pArg:
+ return ArgTypeResult::CPointerTy;
+ case ConversionSpecifier::ObjCObjArg:
+ return ArgTypeResult::ObjCPointerTy;
+ default:
+ break;
+ }
+
+ // FIXME: Handle other cases.
+ return ArgTypeResult();
+}
+
+bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
+ ASTContext &Ctx, bool IsObjCLiteral) {
+ // Handle strings first (char *, wchar_t *)
+ if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) {
+ CS.setKind(ConversionSpecifier::sArg);
+
+ // Disable irrelevant flags
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+
+ // Set the long length modifier for wide characters
+ if (QT->getPointeeType()->isWideCharType())
+ LM.setKind(LengthModifier::AsWideChar);
+ else
+ LM.setKind(LengthModifier::None);
+
+ return true;
+ }
+
+ // We can only work with builtin types.
+ const BuiltinType *BT = QT->getAs<BuiltinType>();
+ if (!BT)
+ return false;
+
+ // Set length modifier
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::WChar_U:
+ case BuiltinType::WChar_S:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ case BuiltinType::Half:
+ // Various types which are non-trivial to correct.
+ return false;
+
+#define SIGNED_TYPE(Id, SingletonId)
+#define UNSIGNED_TYPE(Id, SingletonId)
+#define FLOATING_TYPE(Id, SingletonId)
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ // Misc other stuff which doesn't make sense here.
+ return false;
+
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ LM.setKind(LengthModifier::None);
+ break;
+
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ LM.setKind(LengthModifier::AsChar);
+ break;
+
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ LM.setKind(LengthModifier::AsShort);
+ break;
+
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ LM.setKind(LengthModifier::AsLong);
+ break;
+
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ LM.setKind(LengthModifier::AsLongLong);
+ break;
+
+ case BuiltinType::LongDouble:
+ LM.setKind(LengthModifier::AsLongDouble);
+ break;
+ }
+
+ // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
+ if (isa<TypedefType>(QT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) {
+ const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier();
+ if (Identifier->getName() == "size_t") {
+ LM.setKind(LengthModifier::AsSizeT);
+ } else if (Identifier->getName() == "ssize_t") {
+ // Not C99, but common in Unix.
+ LM.setKind(LengthModifier::AsSizeT);
+ } else if (Identifier->getName() == "intmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ } else if (Identifier->getName() == "uintmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ } else if (Identifier->getName() == "ptrdiff_t") {
+ LM.setKind(LengthModifier::AsPtrDiff);
+ }
+ }
+
+ // If fixing the length modifier was enough, we are done.
+ const analyze_printf::ArgTypeResult &ATR = getArgType(Ctx, IsObjCLiteral);
+ if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
+ return true;
+
+ // Set conversion specifier and disable any flags which do not apply to it.
+ // Let typedefs to char fall through to int, as %c is silly for uint8_t.
+ if (isa<TypedefType>(QT) && QT->isAnyCharacterType()) {
+ CS.setKind(ConversionSpecifier::cArg);
+ LM.setKind(LengthModifier::None);
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+ HasPlusPrefix = 0;
+ }
+ // Test for Floating type first as LongDouble can pass isUnsignedIntegerType
+ else if (QT->isRealFloatingType()) {
+ CS.setKind(ConversionSpecifier::fArg);
+ }
+ else if (QT->isSignedIntegerType()) {
+ CS.setKind(ConversionSpecifier::dArg);
+ HasAlternativeForm = 0;
+ }
+ else if (QT->isUnsignedIntegerType()) {
+ CS.setKind(ConversionSpecifier::uArg);
+ HasAlternativeForm = 0;
+ HasPlusPrefix = 0;
+ } else {
+ llvm_unreachable("Unexpected type");
+ }
+
+ return true;
+}
+
+void PrintfSpecifier::toString(raw_ostream &os) const {
+ // Whilst some features have no defined order, we are using the order
+ // appearing in the C99 standard (ISO/IEC 9899:1999 (E) 7.19.6.1)
+ os << "%";
+
+ // Positional args
+ if (usesPositionalArg()) {
+ os << getPositionalArgIndex() << "$";
+ }
+
+ // Conversion flags
+ if (IsLeftJustified) os << "-";
+ if (HasPlusPrefix) os << "+";
+ if (HasSpacePrefix) os << " ";
+ if (HasAlternativeForm) os << "#";
+ if (HasLeadingZeroes) os << "0";
+
+ // Minimum field width
+ FieldWidth.toString(os);
+ // Precision
+ Precision.toString(os);
+ // Length modifier
+ os << LM.toString();
+ // Conversion specifier
+ os << CS.toString();
+}
+
+bool PrintfSpecifier::hasValidPlusPrefix() const {
+ if (!HasPlusPrefix)
+ return true;
+
+ // The plus prefix only makes sense for signed conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::rArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidAlternativeForm() const {
+ if (!HasAlternativeForm)
+ return true;
+
+ // Alternate form flag only valid with the oxXaAeEfFgG conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::rArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidLeadingZeros() const {
+ if (!HasLeadingZeroes)
+ return true;
+
+ // Leading zeroes flag only valid with the diouxXaAeEfFgG conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidSpacePrefix() const {
+ if (!HasSpacePrefix)
+ return true;
+
+ // The space prefix only makes sense for signed conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidLeftJustified() const {
+ if (!IsLeftJustified)
+ return true;
+
+ // The left justified flag is valid for all conversions except n
+ switch (CS.getKind()) {
+ case ConversionSpecifier::nArg:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+bool PrintfSpecifier::hasValidThousandsGroupingPrefix() const {
+ if (!HasThousandsGrouping)
+ return true;
+
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool PrintfSpecifier::hasValidPrecision() const {
+ if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
+ return true;
+
+ // Precision is only valid with the diouxXaAeEfFgGs conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::sArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+bool PrintfSpecifier::hasValidFieldWidth() const {
+ if (FieldWidth.getHowSpecified() == OptionalAmount::NotSpecified)
+ return true;
+
+ // The field width is valid for all conversions except n
+ switch (CS.getKind()) {
+ case ConversionSpecifier::nArg:
+ return false;
+
+ default:
+ return true;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
new file mode 100644
index 0000000..3f711b4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ProgramPoint.cpp
@@ -0,0 +1,49 @@
+//==- ProgramPoint.cpp - Program Points for Path-Sensitive Analysis -*- C++ -*-/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the interface ProgramPoint, which identifies a
+// distinct location in a function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/ProgramPoint.h"
+
+using namespace clang;
+
+ProgramPointTag::~ProgramPointTag() {}
+
+ProgramPoint ProgramPoint::getProgramPoint(const Stmt *S, ProgramPoint::Kind K,
+ const LocationContext *LC,
+ const ProgramPointTag *tag){
+ switch (K) {
+ default:
+ llvm_unreachable("Unhandled ProgramPoint kind");
+ case ProgramPoint::PreStmtKind:
+ return PreStmt(S, LC, tag);
+ case ProgramPoint::PostStmtKind:
+ return PostStmt(S, LC, tag);
+ case ProgramPoint::PreLoadKind:
+ return PreLoad(S, LC, tag);
+ case ProgramPoint::PostLoadKind:
+ return PostLoad(S, LC, tag);
+ case ProgramPoint::PreStoreKind:
+ return PreStore(S, LC, tag);
+ case ProgramPoint::PostLValueKind:
+ return PostLValue(S, LC, tag);
+ case ProgramPoint::PostPurgeDeadSymbolsKind:
+ return PostPurgeDeadSymbols(S, LC, tag);
+ }
+}
+
+SimpleProgramPointTag::SimpleProgramPointTag(StringRef description)
+ : desc(description) {}
+
+StringRef SimpleProgramPointTag::getTagDescription() const {
+ return desc;
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
new file mode 100644
index 0000000..c8b491a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/PseudoConstantAnalysis.cpp
@@ -0,0 +1,227 @@
+//== PseudoConstantAnalysis.cpp - Find Pseudoconstants in the AST-*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file tracks the usage of variables in a Decl body to see if they are
+// never written to, implying that they constant. This is useful in static
+// analysis to see if a developer might have intended a variable to be const.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Stmt.h"
+#include <deque>
+
+using namespace clang;
+
+// The number of ValueDecls we want to keep track of by default (per-function)
+#define VARDECL_SET_SIZE 256
+typedef llvm::SmallPtrSet<const VarDecl*, VARDECL_SET_SIZE> VarDeclSet;
+
+PseudoConstantAnalysis::PseudoConstantAnalysis(const Stmt *DeclBody) :
+ DeclBody(DeclBody), Analyzed(false) {
+ NonConstantsImpl = new VarDeclSet;
+ UsedVarsImpl = new VarDeclSet;
+}
+
+PseudoConstantAnalysis::~PseudoConstantAnalysis() {
+ delete (VarDeclSet*)NonConstantsImpl;
+ delete (VarDeclSet*)UsedVarsImpl;
+}
+
+// Returns true if the given ValueDecl is never written to in the given DeclBody
+bool PseudoConstantAnalysis::isPseudoConstant(const VarDecl *VD) {
+ // Only local and static variables can be pseudoconstants
+ if (!VD->hasLocalStorage() && !VD->isStaticLocal())
+ return false;
+
+ if (!Analyzed) {
+ RunAnalysis();
+ Analyzed = true;
+ }
+
+ VarDeclSet *NonConstants = (VarDeclSet*)NonConstantsImpl;
+
+ return !NonConstants->count(VD);
+}
+
+// Returns true if the variable was used (self assignments don't count)
+bool PseudoConstantAnalysis::wasReferenced(const VarDecl *VD) {
+ if (!Analyzed) {
+ RunAnalysis();
+ Analyzed = true;
+ }
+
+ VarDeclSet *UsedVars = (VarDeclSet*)UsedVarsImpl;
+
+ return UsedVars->count(VD);
+}
+
+// Returns a Decl from a (Block)DeclRefExpr (if any)
+const Decl *PseudoConstantAnalysis::getDecl(const Expr *E) {
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+ return DR->getDecl();
+ else
+ return 0;
+}
+
+void PseudoConstantAnalysis::RunAnalysis() {
+ std::deque<const Stmt *> WorkList;
+ VarDeclSet *NonConstants = (VarDeclSet*)NonConstantsImpl;
+ VarDeclSet *UsedVars = (VarDeclSet*)UsedVarsImpl;
+
+ // Start with the top level statement of the function
+ WorkList.push_back(DeclBody);
+
+ while (!WorkList.empty()) {
+ const Stmt *Head = WorkList.front();
+ WorkList.pop_front();
+
+ if (const Expr *Ex = dyn_cast<Expr>(Head))
+ Head = Ex->IgnoreParenCasts();
+
+ switch (Head->getStmtClass()) {
+ // Case 1: Assignment operators modifying VarDecls
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(Head);
+ // Look for a Decl on the LHS
+ const Decl *LHSDecl = getDecl(BO->getLHS()->IgnoreParenCasts());
+ if (!LHSDecl)
+ break;
+
+ // We found a binary operator with a DeclRefExpr on the LHS. We now check
+ // for any of the assignment operators, implying that this Decl is being
+ // written to.
+ switch (BO->getOpcode()) {
+ // Self-assignments don't count as use of a variable
+ case BO_Assign: {
+ // Look for a DeclRef on the RHS
+ const Decl *RHSDecl = getDecl(BO->getRHS()->IgnoreParenCasts());
+
+ // If the Decls match, we have self-assignment
+ if (LHSDecl == RHSDecl)
+ // Do not visit the children
+ continue;
+
+ }
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_AndAssign:
+ case BO_OrAssign:
+ case BO_XorAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign: {
+ const VarDecl *VD = dyn_cast<VarDecl>(LHSDecl);
+ // The DeclRefExpr is being assigned to - mark it as non-constant
+ if (VD)
+ NonConstants->insert(VD);
+ break;
+ }
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Case 2: Pre/post increment/decrement and address of
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(Head);
+
+ // Look for a DeclRef in the subexpression
+ const Decl *D = getDecl(UO->getSubExpr()->IgnoreParenCasts());
+ if (!D)
+ break;
+
+ // We found a unary operator with a DeclRef as a subexpression. We now
+ // check for any of the increment/decrement operators, as well as
+ // addressOf.
+ switch (UO->getOpcode()) {
+ case UO_PostDec:
+ case UO_PostInc:
+ case UO_PreDec:
+ case UO_PreInc:
+ // The DeclRef is being changed - mark it as non-constant
+ case UO_AddrOf: {
+ // If we are taking the address of the DeclRefExpr, assume it is
+ // non-constant.
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (VD)
+ NonConstants->insert(VD);
+ break;
+ }
+
+ default:
+ break;
+ }
+ break;
+ }
+
+ // Case 3: Reference Declarations
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(Head);
+ // Iterate over each decl and see if any of them contain reference decls
+ for (DeclStmt::const_decl_iterator I = DS->decl_begin(),
+ E = DS->decl_end(); I != E; ++I) {
+ // We only care about VarDecls
+ const VarDecl *VD = dyn_cast<VarDecl>(*I);
+ if (!VD)
+ continue;
+
+ // We found a VarDecl; make sure it is a reference type
+ if (!VD->getType().getTypePtr()->isReferenceType())
+ continue;
+
+ // Try to find a Decl in the initializer
+ const Decl *D = getDecl(VD->getInit()->IgnoreParenCasts());
+ if (!D)
+ break;
+
+ // If the reference is to another var, add the var to the non-constant
+ // list
+ if (const VarDecl *RefVD = dyn_cast<VarDecl>(D)) {
+ NonConstants->insert(RefVD);
+ continue;
+ }
+ }
+ break;
+ }
+
+ // Case 4: Variable references
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DR = cast<DeclRefExpr>(Head);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Add the Decl to the used list
+ UsedVars->insert(VD);
+ continue;
+ }
+ break;
+ }
+
+ // Case 5: Block expressions
+ case Stmt::BlockExprClass: {
+ const BlockExpr *B = cast<BlockExpr>(Head);
+ // Add the body of the block to the list
+ WorkList.push_back(B->getBody());
+ continue;
+ }
+
+ default:
+ break;
+ } // switch (head->getStmtClass())
+
+ // Add all substatements to the worklist
+ for (Stmt::const_child_range I = Head->children(); I; ++I)
+ if (*I)
+ WorkList.push_back(*I);
+ } // while (!WorkList.empty())
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
new file mode 100644
index 0000000..bb63e2c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ReachableCode.cpp
@@ -0,0 +1,331 @@
+//=- ReachableCodePathInsensitive.cpp ---------------------------*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a flow-sensitive, path-insensitive analysis of
+// determining reachable blocks within a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+
+namespace {
+class DeadCodeScan {
+ llvm::BitVector Visited;
+ llvm::BitVector &Reachable;
+ llvm::SmallVector<const CFGBlock *, 10> WorkList;
+
+ typedef llvm::SmallVector<std::pair<const CFGBlock *, const Stmt *>, 12>
+ DeferredLocsTy;
+
+ DeferredLocsTy DeferredLocs;
+
+public:
+ DeadCodeScan(llvm::BitVector &reachable)
+ : Visited(reachable.size()),
+ Reachable(reachable) {}
+
+ void enqueue(const CFGBlock *block);
+ unsigned scanBackwards(const CFGBlock *Start,
+ clang::reachable_code::Callback &CB);
+
+ bool isDeadCodeRoot(const CFGBlock *Block);
+
+ const Stmt *findDeadCode(const CFGBlock *Block);
+
+ void reportDeadCode(const Stmt *S,
+ clang::reachable_code::Callback &CB);
+};
+}
+
+void DeadCodeScan::enqueue(const CFGBlock *block) {
+ unsigned blockID = block->getBlockID();
+ if (Reachable[blockID] || Visited[blockID])
+ return;
+ Visited[blockID] = true;
+ WorkList.push_back(block);
+}
+
+bool DeadCodeScan::isDeadCodeRoot(const clang::CFGBlock *Block) {
+ bool isDeadRoot = true;
+
+ for (CFGBlock::const_pred_iterator I = Block->pred_begin(),
+ E = Block->pred_end(); I != E; ++I) {
+ if (const CFGBlock *PredBlock = *I) {
+ unsigned blockID = PredBlock->getBlockID();
+ if (Visited[blockID]) {
+ isDeadRoot = false;
+ continue;
+ }
+ if (!Reachable[blockID]) {
+ isDeadRoot = false;
+ Visited[blockID] = true;
+ WorkList.push_back(PredBlock);
+ continue;
+ }
+ }
+ }
+
+ return isDeadRoot;
+}
+
+static bool isValidDeadStmt(const Stmt *S) {
+ if (S->getLocStart().isInvalid())
+ return false;
+ if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(S))
+ return BO->getOpcode() != BO_Comma;
+ return true;
+}
+
+const Stmt *DeadCodeScan::findDeadCode(const clang::CFGBlock *Block) {
+ for (CFGBlock::const_iterator I = Block->begin(), E = Block->end(); I!=E; ++I)
+ if (const CFGStmt *CS = I->getAs<CFGStmt>()) {
+ const Stmt *S = CS->getStmt();
+ if (isValidDeadStmt(S))
+ return S;
+ }
+
+ if (CFGTerminator T = Block->getTerminator()) {
+ const Stmt *S = T.getStmt();
+ if (isValidDeadStmt(S))
+ return S;
+ }
+
+ return 0;
+}
+
+static int SrcCmp(const void *p1, const void *p2) {
+ return
+ ((std::pair<const CFGBlock *, const Stmt *>*) p2)->second->getLocStart() <
+ ((std::pair<const CFGBlock *, const Stmt *>*) p1)->second->getLocStart();
+}
+
+unsigned DeadCodeScan::scanBackwards(const clang::CFGBlock *Start,
+ clang::reachable_code::Callback &CB) {
+
+ unsigned count = 0;
+ enqueue(Start);
+
+ while (!WorkList.empty()) {
+ const CFGBlock *Block = WorkList.pop_back_val();
+
+ // It is possible that this block has been marked reachable after
+ // it was enqueued.
+ if (Reachable[Block->getBlockID()])
+ continue;
+
+ // Look for any dead code within the block.
+ const Stmt *S = findDeadCode(Block);
+
+ if (!S) {
+ // No dead code. Possibly an empty block. Look at dead predecessors.
+ for (CFGBlock::const_pred_iterator I = Block->pred_begin(),
+ E = Block->pred_end(); I != E; ++I) {
+ if (const CFGBlock *predBlock = *I)
+ enqueue(predBlock);
+ }
+ continue;
+ }
+
+ // Specially handle macro-expanded code.
+ if (S->getLocStart().isMacroID()) {
+ count += clang::reachable_code::ScanReachableFromBlock(Block, Reachable);
+ continue;
+ }
+
+ if (isDeadCodeRoot(Block)) {
+ reportDeadCode(S, CB);
+ count += clang::reachable_code::ScanReachableFromBlock(Block, Reachable);
+ }
+ else {
+ // Record this statement as the possibly best location in a
+ // strongly-connected component of dead code for emitting a
+ // warning.
+ DeferredLocs.push_back(std::make_pair(Block, S));
+ }
+ }
+
+ // If we didn't find a dead root, then report the dead code with the
+ // earliest location.
+ if (!DeferredLocs.empty()) {
+ llvm::array_pod_sort(DeferredLocs.begin(), DeferredLocs.end(), SrcCmp);
+ for (DeferredLocsTy::iterator I = DeferredLocs.begin(),
+ E = DeferredLocs.end(); I != E; ++I) {
+ const CFGBlock *block = I->first;
+ if (Reachable[block->getBlockID()])
+ continue;
+ reportDeadCode(I->second, CB);
+ count += clang::reachable_code::ScanReachableFromBlock(block, Reachable);
+ }
+ }
+
+ return count;
+}
+
+static SourceLocation GetUnreachableLoc(const Stmt *S,
+ SourceRange &R1,
+ SourceRange &R2) {
+ R1 = R2 = SourceRange();
+
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreParenImpCasts();
+
+ switch (S->getStmtClass()) {
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(S);
+ return BO->getOperatorLoc();
+ }
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(S);
+ R1 = UO->getSubExpr()->getSourceRange();
+ return UO->getOperatorLoc();
+ }
+ case Expr::CompoundAssignOperatorClass: {
+ const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S);
+ R1 = CAO->getLHS()->getSourceRange();
+ R2 = CAO->getRHS()->getSourceRange();
+ return CAO->getOperatorLoc();
+ }
+ case Expr::BinaryConditionalOperatorClass:
+ case Expr::ConditionalOperatorClass: {
+ const AbstractConditionalOperator *CO =
+ cast<AbstractConditionalOperator>(S);
+ return CO->getQuestionLoc();
+ }
+ case Expr::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(S);
+ R1 = ME->getSourceRange();
+ return ME->getMemberLoc();
+ }
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S);
+ R1 = ASE->getLHS()->getSourceRange();
+ R2 = ASE->getRHS()->getSourceRange();
+ return ASE->getRBracketLoc();
+ }
+ case Expr::CStyleCastExprClass: {
+ const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S);
+ R1 = CSC->getSubExpr()->getSourceRange();
+ return CSC->getLParenLoc();
+ }
+ case Expr::CXXFunctionalCastExprClass: {
+ const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S);
+ R1 = CE->getSubExpr()->getSourceRange();
+ return CE->getTypeBeginLoc();
+ }
+ case Stmt::CXXTryStmtClass: {
+ return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
+ }
+ case Expr::ObjCBridgedCastExprClass: {
+ const ObjCBridgedCastExpr *CSC = cast<ObjCBridgedCastExpr>(S);
+ R1 = CSC->getSubExpr()->getSourceRange();
+ return CSC->getLParenLoc();
+ }
+ default: ;
+ }
+ R1 = S->getSourceRange();
+ return S->getLocStart();
+}
+
+void DeadCodeScan::reportDeadCode(const Stmt *S,
+ clang::reachable_code::Callback &CB) {
+ SourceRange R1, R2;
+ SourceLocation Loc = GetUnreachableLoc(S, R1, R2);
+ CB.HandleUnreachable(Loc, R1, R2);
+}
+
+namespace clang { namespace reachable_code {
+
+void Callback::anchor() { }
+
+unsigned ScanReachableFromBlock(const CFGBlock *Start,
+ llvm::BitVector &Reachable) {
+ unsigned count = 0;
+
+ // Prep work queue
+ SmallVector<const CFGBlock*, 32> WL;
+
+ // The entry block may have already been marked reachable
+ // by the caller.
+ if (!Reachable[Start->getBlockID()]) {
+ ++count;
+ Reachable[Start->getBlockID()] = true;
+ }
+
+ WL.push_back(Start);
+
+ // Find the reachable blocks from 'Start'.
+ while (!WL.empty()) {
+ const CFGBlock *item = WL.pop_back_val();
+
+ // Look at the successors and mark then reachable.
+ for (CFGBlock::const_succ_iterator I = item->succ_begin(),
+ E = item->succ_end(); I != E; ++I)
+ if (const CFGBlock *B = *I) {
+ unsigned blockID = B->getBlockID();
+ if (!Reachable[blockID]) {
+ Reachable.set(blockID);
+ WL.push_back(B);
+ ++count;
+ }
+ }
+ }
+ return count;
+}
+
+void FindUnreachableCode(AnalysisDeclContext &AC, Callback &CB) {
+ CFG *cfg = AC.getCFG();
+ if (!cfg)
+ return;
+
+ // Scan for reachable blocks from the entrance of the CFG.
+ // If there are no unreachable blocks, we're done.
+ llvm::BitVector reachable(cfg->getNumBlockIDs());
+ unsigned numReachable = ScanReachableFromBlock(&cfg->getEntry(), reachable);
+ if (numReachable == cfg->getNumBlockIDs())
+ return;
+
+ // If there aren't explicit EH edges, we should include the 'try' dispatch
+ // blocks as roots.
+ if (!AC.getCFGBuildOptions().AddEHEdges) {
+ for (CFG::try_block_iterator I = cfg->try_blocks_begin(),
+ E = cfg->try_blocks_end() ; I != E; ++I) {
+ numReachable += ScanReachableFromBlock(*I, reachable);
+ }
+ if (numReachable == cfg->getNumBlockIDs())
+ return;
+ }
+
+ // There are some unreachable blocks. We need to find the root blocks that
+ // contain code that should be considered unreachable.
+ for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
+ const CFGBlock *block = *I;
+ // A block may have been marked reachable during this loop.
+ if (reachable[block->getBlockID()])
+ continue;
+
+ DeadCodeScan DS(reachable);
+ numReachable += DS.scanBackwards(block, CB);
+
+ if (numReachable == cfg->getNumBlockIDs())
+ return;
+ }
+}
+
+}} // end namespace clang::reachable_code
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
new file mode 100644
index 0000000..6bc4adb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ScanfFormatString.cpp
@@ -0,0 +1,499 @@
+//= ScanfFormatString.cpp - Analysis of printf format strings --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Handling of format string in scanf and friends. The structure of format
+// strings for fscanf() are described in C99 7.19.6.2.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "FormatStringParsing.h"
+
+using clang::analyze_format_string::ArgTypeResult;
+using clang::analyze_format_string::FormatStringHandler;
+using clang::analyze_format_string::LengthModifier;
+using clang::analyze_format_string::OptionalAmount;
+using clang::analyze_format_string::ConversionSpecifier;
+using clang::analyze_scanf::ScanfArgTypeResult;
+using clang::analyze_scanf::ScanfConversionSpecifier;
+using clang::analyze_scanf::ScanfSpecifier;
+using clang::UpdateOnReturn;
+using namespace clang;
+
+typedef clang::analyze_format_string::SpecifierResult<ScanfSpecifier>
+ ScanfSpecifierResult;
+
+static bool ParseScanList(FormatStringHandler &H,
+ ScanfConversionSpecifier &CS,
+ const char *&Beg, const char *E) {
+ const char *I = Beg;
+ const char *start = I - 1;
+ UpdateOnReturn <const char*> UpdateBeg(Beg, I);
+
+ // No more characters?
+ if (I == E) {
+ H.HandleIncompleteScanList(start, I);
+ return true;
+ }
+
+ // Special case: ']' is the first character.
+ if (*I == ']') {
+ if (++I == E) {
+ H.HandleIncompleteScanList(start, I - 1);
+ return true;
+ }
+ }
+
+ // Look for a ']' character which denotes the end of the scan list.
+ while (*I != ']') {
+ if (++I == E) {
+ H.HandleIncompleteScanList(start, I - 1);
+ return true;
+ }
+ }
+
+ CS.setEndScanList(I);
+ return false;
+}
+
+// FIXME: Much of this is copy-paste from ParsePrintfSpecifier.
+// We can possibly refactor.
+static ScanfSpecifierResult ParseScanfSpecifier(FormatStringHandler &H,
+ const char *&Beg,
+ const char *E,
+ unsigned &argIndex,
+ const LangOptions &LO) {
+
+ using namespace clang::analyze_scanf;
+ const char *I = Beg;
+ const char *Start = 0;
+ UpdateOnReturn <const char*> UpdateBeg(Beg, I);
+
+ // Look for a '%' character that indicates the start of a format specifier.
+ for ( ; I != E ; ++I) {
+ char c = *I;
+ if (c == '\0') {
+ // Detect spurious null characters, which are likely errors.
+ H.HandleNullChar(I);
+ return true;
+ }
+ if (c == '%') {
+ Start = I++; // Record the start of the format specifier.
+ break;
+ }
+ }
+
+ // No format specifier found?
+ if (!Start)
+ return false;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ ScanfSpecifier FS;
+ if (ParseArgPosition(H, FS, Start, I, E))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Look for '*' flag if it is present.
+ if (*I == '*') {
+ FS.setSuppressAssignment(I);
+ if (++I == E) {
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+ }
+
+ // Look for the field width (if any). Unlike printf, this is either
+ // a fixed integer or isn't present.
+ const OptionalAmount &Amt = clang::analyze_format_string::ParseAmount(I, E);
+ if (Amt.getHowSpecified() != OptionalAmount::NotSpecified) {
+ assert(Amt.getHowSpecified() == OptionalAmount::Constant);
+ FS.setFieldWidth(Amt);
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+ }
+
+ // Look for the length modifier.
+ if (ParseLengthModifier(FS, I, E, LO, /*scanf=*/true) && I == E) {
+ // No more characters left?
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ // Detect spurious null characters, which are likely errors.
+ if (*I == '\0') {
+ H.HandleNullChar(I);
+ return true;
+ }
+
+ // Finally, look for the conversion specifier.
+ const char *conversionPosition = I++;
+ ScanfConversionSpecifier::Kind k = ScanfConversionSpecifier::InvalidSpecifier;
+ switch (*conversionPosition) {
+ default:
+ break;
+ case '%': k = ConversionSpecifier::PercentArg; break;
+ case 'A': k = ConversionSpecifier::AArg; break;
+ case 'E': k = ConversionSpecifier::EArg; break;
+ case 'F': k = ConversionSpecifier::FArg; break;
+ case 'G': k = ConversionSpecifier::GArg; break;
+ case 'X': k = ConversionSpecifier::XArg; break;
+ case 'a': k = ConversionSpecifier::aArg; break;
+ case 'd': k = ConversionSpecifier::dArg; break;
+ case 'e': k = ConversionSpecifier::eArg; break;
+ case 'f': k = ConversionSpecifier::fArg; break;
+ case 'g': k = ConversionSpecifier::gArg; break;
+ case 'i': k = ConversionSpecifier::iArg; break;
+ case 'n': k = ConversionSpecifier::nArg; break;
+ case 'c': k = ConversionSpecifier::cArg; break;
+ case 'C': k = ConversionSpecifier::CArg; break;
+ case 'S': k = ConversionSpecifier::SArg; break;
+ case '[': k = ConversionSpecifier::ScanListArg; break;
+ case 'u': k = ConversionSpecifier::uArg; break;
+ case 'x': k = ConversionSpecifier::xArg; break;
+ case 'o': k = ConversionSpecifier::oArg; break;
+ case 's': k = ConversionSpecifier::sArg; break;
+ case 'p': k = ConversionSpecifier::pArg; break;
+ }
+ ScanfConversionSpecifier CS(conversionPosition, k);
+ if (k == ScanfConversionSpecifier::ScanListArg) {
+ if (ParseScanList(H, CS, I, E))
+ return true;
+ }
+ FS.setConversionSpecifier(CS);
+ if (CS.consumesDataArgument() && !FS.getSuppressAssignment()
+ && !FS.usesPositionalArg())
+ FS.setArgIndex(argIndex++);
+
+ // FIXME: '%' and '*' doesn't make sense. Issue a warning.
+ // FIXME: 'ConsumedSoFar' and '*' doesn't make sense.
+
+ if (k == ScanfConversionSpecifier::InvalidSpecifier) {
+ // Assume the conversion takes one argument.
+ return !H.HandleInvalidScanfConversionSpecifier(FS, Beg, I - Beg);
+ }
+ return ScanfSpecifierResult(Start, FS);
+}
+
+ScanfArgTypeResult ScanfSpecifier::getArgType(ASTContext &Ctx) const {
+ const ScanfConversionSpecifier &CS = getConversionSpecifier();
+
+ if (!CS.consumesDataArgument())
+ return ScanfArgTypeResult::Invalid();
+
+ switch(CS.getKind()) {
+ // Signed int.
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None: return ArgTypeResult(Ctx.IntTy);
+ case LengthModifier::AsChar:
+ return ArgTypeResult(ArgTypeResult::AnyCharTy);
+ case LengthModifier::AsShort: return ArgTypeResult(Ctx.ShortTy);
+ case LengthModifier::AsLong: return ArgTypeResult(Ctx.LongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgTypeResult(Ctx.LongLongTy);
+ case LengthModifier::AsIntMax:
+ return ScanfArgTypeResult(Ctx.getIntMaxType(), "intmax_t *");
+ case LengthModifier::AsSizeT:
+ // FIXME: ssize_t.
+ return ScanfArgTypeResult();
+ case LengthModifier::AsPtrDiff:
+ return ScanfArgTypeResult(Ctx.getPointerDiffType(), "ptrdiff_t *");
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return ArgTypeResult(Ctx.LongLongTy);
+ case LengthModifier::AsAllocate: return ScanfArgTypeResult::Invalid();
+ case LengthModifier::AsMAllocate: return ScanfArgTypeResult::Invalid();
+ }
+
+ // Unsigned int.
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None: return ArgTypeResult(Ctx.UnsignedIntTy);
+ case LengthModifier::AsChar: return ArgTypeResult(Ctx.UnsignedCharTy);
+ case LengthModifier::AsShort: return ArgTypeResult(Ctx.UnsignedShortTy);
+ case LengthModifier::AsLong: return ArgTypeResult(Ctx.UnsignedLongTy);
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsQuad:
+ return ArgTypeResult(Ctx.UnsignedLongLongTy);
+ case LengthModifier::AsIntMax:
+ return ScanfArgTypeResult(Ctx.getUIntMaxType(), "uintmax_t *");
+ case LengthModifier::AsSizeT:
+ return ScanfArgTypeResult(Ctx.getSizeType(), "size_t *");
+ case LengthModifier::AsPtrDiff:
+ // FIXME: Unsigned version of ptrdiff_t?
+ return ScanfArgTypeResult();
+ case LengthModifier::AsLongDouble:
+ // GNU extension.
+ return ArgTypeResult(Ctx.UnsignedLongLongTy);
+ case LengthModifier::AsAllocate: return ScanfArgTypeResult::Invalid();
+ case LengthModifier::AsMAllocate: return ScanfArgTypeResult::Invalid();
+ }
+
+ // Float.
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None: return ArgTypeResult(Ctx.FloatTy);
+ case LengthModifier::AsLong: return ArgTypeResult(Ctx.DoubleTy);
+ case LengthModifier::AsLongDouble:
+ return ArgTypeResult(Ctx.LongDoubleTy);
+ default:
+ return ScanfArgTypeResult::Invalid();
+ }
+
+ // Char, string and scanlist.
+ case ConversionSpecifier::cArg:
+ case ConversionSpecifier::sArg:
+ case ConversionSpecifier::ScanListArg:
+ switch (LM.getKind()) {
+ case LengthModifier::None: return ScanfArgTypeResult::CStrTy;
+ case LengthModifier::AsLong:
+ return ScanfArgTypeResult(ScanfArgTypeResult::WCStrTy, "wchar_t *");
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ScanfArgTypeResult(ArgTypeResult::CStrTy);
+ default:
+ return ScanfArgTypeResult::Invalid();
+ }
+ case ConversionSpecifier::CArg:
+ case ConversionSpecifier::SArg:
+ // FIXME: Mac OS X specific?
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return ScanfArgTypeResult(ScanfArgTypeResult::WCStrTy, "wchar_t *");
+ case LengthModifier::AsAllocate:
+ case LengthModifier::AsMAllocate:
+ return ScanfArgTypeResult(ArgTypeResult::WCStrTy, "wchar_t **");
+ default:
+ return ScanfArgTypeResult::Invalid();
+ }
+
+ // Pointer.
+ case ConversionSpecifier::pArg:
+ return ScanfArgTypeResult(ArgTypeResult(ArgTypeResult::CPointerTy));
+
+ default:
+ break;
+ }
+
+ return ScanfArgTypeResult();
+}
+
+bool ScanfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
+ ASTContext &Ctx) {
+ if (!QT->isPointerType())
+ return false;
+
+ QualType PT = QT->getPointeeType();
+ const BuiltinType *BT = PT->getAs<BuiltinType>();
+ if (!BT)
+ return false;
+
+ // Pointer to a character.
+ if (PT->isAnyCharacterType()) {
+ CS.setKind(ConversionSpecifier::sArg);
+ if (PT->isWideCharType())
+ LM.setKind(LengthModifier::AsWideChar);
+ else
+ LM.setKind(LengthModifier::None);
+ return true;
+ }
+
+ // Figure out the length modifier.
+ switch (BT->getKind()) {
+ // no modifier
+ case BuiltinType::UInt:
+ case BuiltinType::Int:
+ case BuiltinType::Float:
+ LM.setKind(LengthModifier::None);
+ break;
+
+ // hh
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ LM.setKind(LengthModifier::AsChar);
+ break;
+
+ // h
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ LM.setKind(LengthModifier::AsShort);
+ break;
+
+ // l
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::Double:
+ LM.setKind(LengthModifier::AsLong);
+ break;
+
+ // ll
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ LM.setKind(LengthModifier::AsLongLong);
+ break;
+
+ // L
+ case BuiltinType::LongDouble:
+ LM.setKind(LengthModifier::AsLongDouble);
+ break;
+
+ // Don't know.
+ default:
+ return false;
+ }
+
+ // Handle size_t, ptrdiff_t, etc. that have dedicated length modifiers in C99.
+ if (isa<TypedefType>(PT) && (LangOpt.C99 || LangOpt.CPlusPlus0x)) {
+ const IdentifierInfo *Identifier = QT.getBaseTypeIdentifier();
+ if (Identifier->getName() == "size_t") {
+ LM.setKind(LengthModifier::AsSizeT);
+ } else if (Identifier->getName() == "ssize_t") {
+ // Not C99, but common in Unix.
+ LM.setKind(LengthModifier::AsSizeT);
+ } else if (Identifier->getName() == "intmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ } else if (Identifier->getName() == "uintmax_t") {
+ LM.setKind(LengthModifier::AsIntMax);
+ } else if (Identifier->getName() == "ptrdiff_t") {
+ LM.setKind(LengthModifier::AsPtrDiff);
+ }
+ }
+
+ // If fixing the length modifier was enough, we are done.
+ const analyze_scanf::ScanfArgTypeResult &ATR = getArgType(Ctx);
+ if (hasValidLengthModifier() && ATR.isValid() && ATR.matchesType(Ctx, QT))
+ return true;
+
+ // Figure out the conversion specifier.
+ if (PT->isRealFloatingType())
+ CS.setKind(ConversionSpecifier::fArg);
+ else if (PT->isSignedIntegerType())
+ CS.setKind(ConversionSpecifier::dArg);
+ else if (PT->isUnsignedIntegerType())
+ CS.setKind(ConversionSpecifier::uArg);
+ else
+ llvm_unreachable("Unexpected type");
+
+ return true;
+}
+
+void ScanfSpecifier::toString(raw_ostream &os) const {
+ os << "%";
+
+ if (usesPositionalArg())
+ os << getPositionalArgIndex() << "$";
+ if (SuppressAssignment)
+ os << "*";
+
+ FieldWidth.toString(os);
+ os << LM.toString();
+ os << CS.toString();
+}
+
+bool clang::analyze_format_string::ParseScanfString(FormatStringHandler &H,
+ const char *I,
+ const char *E,
+ const LangOptions &LO) {
+
+ unsigned argIndex = 0;
+
+ // Keep looking for a format specifier until we have exhausted the string.
+ while (I != E) {
+ const ScanfSpecifierResult &FSR = ParseScanfSpecifier(H, I, E, argIndex,
+ LO);
+ // Did a fail-stop error of any kind occur when parsing the specifier?
+ // If so, don't do any more processing.
+ if (FSR.shouldStop())
+ return true;;
+ // Did we exhaust the string or encounter an error that
+ // we can recover from?
+ if (!FSR.hasValue())
+ continue;
+ // We have a format specifier. Pass it to the callback.
+ if (!H.HandleScanfSpecifier(FSR.getValue(), FSR.getStart(),
+ I - FSR.getStart())) {
+ return true;
+ }
+ }
+ assert(I == E && "Format string not exhausted");
+ return false;
+}
+
+bool ScanfArgTypeResult::matchesType(ASTContext& C, QualType argTy) const {
+ switch (K) {
+ case InvalidTy:
+ llvm_unreachable("ArgTypeResult must be valid");
+ case UnknownTy:
+ return true;
+ case CStrTy:
+ return ArgTypeResult(ArgTypeResult::CStrTy).matchesType(C, argTy);
+ case WCStrTy:
+ return ArgTypeResult(ArgTypeResult::WCStrTy).matchesType(C, argTy);
+ case PtrToArgTypeResultTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+ return A.matchesType(C, PT->getPointeeType());
+ }
+ }
+
+ llvm_unreachable("Invalid ScanfArgTypeResult Kind!");
+}
+
+QualType ScanfArgTypeResult::getRepresentativeType(ASTContext &C) const {
+ switch (K) {
+ case InvalidTy:
+ llvm_unreachable("No representative type for Invalid ArgTypeResult");
+ case UnknownTy:
+ return QualType();
+ case CStrTy:
+ return C.getPointerType(C.CharTy);
+ case WCStrTy:
+ return C.getPointerType(C.getWCharType());
+ case PtrToArgTypeResultTy:
+ return C.getPointerType(A.getRepresentativeType(C));
+ }
+
+ llvm_unreachable("Invalid ScanfArgTypeResult Kind!");
+}
+
+std::string ScanfArgTypeResult::getRepresentativeTypeName(ASTContext& C) const {
+ std::string S = getRepresentativeType(C).getAsString();
+ if (!Name)
+ return std::string("'") + S + "'";
+ return std::string("'") + Name + "' (aka '" + S + "')";
+}
diff --git a/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
new file mode 100644
index 0000000..2f7e794
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/ThreadSafety.cpp
@@ -0,0 +1,1726 @@
+//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// A intra-procedural analysis for thread safety (e.g. deadlocks and race
+// conditions), based off of an annotation system.
+//
+// See http://clang.llvm.org/docs/LanguageExtensions.html#threadsafety for more
+// information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/Analyses/ThreadSafety.h"
+#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace thread_safety;
+
+// Key method definition
+ThreadSafetyHandler::~ThreadSafetyHandler() {}
+
+namespace {
+
+/// \brief A MutexID object uniquely identifies a particular mutex, and
+/// is built from an Expr* (i.e. calling a lock function).
+///
+/// Thread-safety analysis works by comparing lock expressions. Within the
+/// body of a function, an expression such as "x->foo->bar.mu" will resolve to
+/// a particular mutex object at run-time. Subsequent occurrences of the same
+/// expression (where "same" means syntactic equality) will refer to the same
+/// run-time object if three conditions hold:
+/// (1) Local variables in the expression, such as "x" have not changed.
+/// (2) Values on the heap that affect the expression have not changed.
+/// (3) The expression involves only pure function calls.
+///
+/// The current implementation assumes, but does not verify, that multiple uses
+/// of the same lock expression satisfies these criteria.
+///
+/// Clang introduces an additional wrinkle, which is that it is difficult to
+/// derive canonical expressions, or compare expressions directly for equality.
+/// Thus, we identify a mutex not by an Expr, but by the list of named
+/// declarations that are referenced by the Expr. In other words,
+/// x->foo->bar.mu will be a four element vector with the Decls for
+/// mu, bar, and foo, and x. The vector will uniquely identify the expression
+/// for all practical purposes. Null is used to denote 'this'.
+///
+/// Note we will need to perform substitution on "this" and function parameter
+/// names when constructing a lock expression.
+///
+/// For example:
+/// class C { Mutex Mu; void lock() EXCLUSIVE_LOCK_FUNCTION(this->Mu); };
+/// void myFunc(C *X) { ... X->lock() ... }
+/// The original expression for the mutex acquired by myFunc is "this->Mu", but
+/// "X" is substituted for "this" so we get X->Mu();
+///
+/// For another example:
+/// foo(MyList *L) EXCLUSIVE_LOCKS_REQUIRED(L->Mu) { ... }
+/// MyList *MyL;
+/// foo(MyL); // requires lock MyL->Mu to be held
+class MutexID {
+ SmallVector<NamedDecl*, 2> DeclSeq;
+
+ /// Build a Decl sequence representing the lock from the given expression.
+ /// Recursive function that terminates on DeclRefExpr.
+ /// Note: this function merely creates a MutexID; it does not check to
+ /// ensure that the original expression is a valid mutex expression.
+ void buildMutexID(Expr *Exp, const NamedDecl *D, Expr *Parent,
+ unsigned NumArgs, Expr **FunArgs) {
+ if (!Exp) {
+ DeclSeq.clear();
+ return;
+ }
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Exp)) {
+ NamedDecl *ND = cast<NamedDecl>(DRE->getDecl()->getCanonicalDecl());
+ ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(ND);
+ if (PV) {
+ FunctionDecl *FD =
+ cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
+ unsigned i = PV->getFunctionScopeIndex();
+
+ if (FunArgs && FD == D->getCanonicalDecl()) {
+ // Substitute call arguments for references to function parameters
+ assert(i < NumArgs);
+ buildMutexID(FunArgs[i], D, 0, 0, 0);
+ return;
+ }
+ // Map the param back to the param of the original function declaration.
+ DeclSeq.push_back(FD->getParamDecl(i));
+ return;
+ }
+ // Not a function parameter -- just store the reference.
+ DeclSeq.push_back(ND);
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
+ NamedDecl *ND = ME->getMemberDecl();
+ DeclSeq.push_back(ND);
+ buildMutexID(ME->getBase(), D, Parent, NumArgs, FunArgs);
+ } else if (isa<CXXThisExpr>(Exp)) {
+ if (Parent)
+ buildMutexID(Parent, D, 0, 0, 0);
+ else {
+ DeclSeq.push_back(0); // Use 0 to represent 'this'.
+ return; // mutexID is still valid in this case
+ }
+ } else if (CXXMemberCallExpr *CMCE = dyn_cast<CXXMemberCallExpr>(Exp)) {
+ DeclSeq.push_back(CMCE->getMethodDecl()->getCanonicalDecl());
+ buildMutexID(CMCE->getImplicitObjectArgument(),
+ D, Parent, NumArgs, FunArgs);
+ unsigned NumCallArgs = CMCE->getNumArgs();
+ Expr** CallArgs = CMCE->getArgs();
+ for (unsigned i = 0; i < NumCallArgs; ++i) {
+ buildMutexID(CallArgs[i], D, Parent, NumArgs, FunArgs);
+ }
+ } else if (CallExpr *CE = dyn_cast<CallExpr>(Exp)) {
+ buildMutexID(CE->getCallee(), D, Parent, NumArgs, FunArgs);
+ unsigned NumCallArgs = CE->getNumArgs();
+ Expr** CallArgs = CE->getArgs();
+ for (unsigned i = 0; i < NumCallArgs; ++i) {
+ buildMutexID(CallArgs[i], D, Parent, NumArgs, FunArgs);
+ }
+ } else if (BinaryOperator *BOE = dyn_cast<BinaryOperator>(Exp)) {
+ buildMutexID(BOE->getLHS(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(BOE->getRHS(), D, Parent, NumArgs, FunArgs);
+ } else if (UnaryOperator *UOE = dyn_cast<UnaryOperator>(Exp)) {
+ buildMutexID(UOE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ } else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(Exp)) {
+ buildMutexID(ASE->getBase(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(ASE->getIdx(), D, Parent, NumArgs, FunArgs);
+ } else if (AbstractConditionalOperator *CE =
+ dyn_cast<AbstractConditionalOperator>(Exp)) {
+ buildMutexID(CE->getCond(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(CE->getTrueExpr(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(CE->getFalseExpr(), D, Parent, NumArgs, FunArgs);
+ } else if (ChooseExpr *CE = dyn_cast<ChooseExpr>(Exp)) {
+ buildMutexID(CE->getCond(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(CE->getLHS(), D, Parent, NumArgs, FunArgs);
+ buildMutexID(CE->getRHS(), D, Parent, NumArgs, FunArgs);
+ } else if (CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
+ buildMutexID(CE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ } else if (ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
+ buildMutexID(PE->getSubExpr(), D, Parent, NumArgs, FunArgs);
+ } else if (isa<CharacterLiteral>(Exp) ||
+ isa<CXXNullPtrLiteralExpr>(Exp) ||
+ isa<GNUNullExpr>(Exp) ||
+ isa<CXXBoolLiteralExpr>(Exp) ||
+ isa<FloatingLiteral>(Exp) ||
+ isa<ImaginaryLiteral>(Exp) ||
+ isa<IntegerLiteral>(Exp) ||
+ isa<StringLiteral>(Exp) ||
+ isa<ObjCStringLiteral>(Exp)) {
+ return; // FIXME: Ignore literals for now
+ } else {
+ // Ignore. FIXME: mark as invalid expression?
+ }
+ }
+
+ /// \brief Construct a MutexID from an expression.
+ /// \param MutexExp The original mutex expression within an attribute
+ /// \param DeclExp An expression involving the Decl on which the attribute
+ /// occurs.
+ /// \param D The declaration to which the lock/unlock attribute is attached.
+ void buildMutexIDFromExp(Expr *MutexExp, Expr *DeclExp, const NamedDecl *D) {
+ Expr *Parent = 0;
+ unsigned NumArgs = 0;
+ Expr **FunArgs = 0;
+
+ // If we are processing a raw attribute expression, with no substitutions.
+ if (DeclExp == 0) {
+ buildMutexID(MutexExp, D, 0, 0, 0);
+ return;
+ }
+
+ // Examine DeclExp to find Parent and FunArgs, which are used to substitute
+ // for formal parameters when we call buildMutexID later.
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
+ Parent = ME->getBase();
+ } else if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
+ Parent = CE->getImplicitObjectArgument();
+ NumArgs = CE->getNumArgs();
+ FunArgs = CE->getArgs();
+ } else if (CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
+ NumArgs = CE->getNumArgs();
+ FunArgs = CE->getArgs();
+ } else if (CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
+ Parent = 0; // FIXME -- get the parent from DeclStmt
+ NumArgs = CE->getNumArgs();
+ FunArgs = CE->getArgs();
+ } else if (D && isa<CXXDestructorDecl>(D)) {
+ // There's no such thing as a "destructor call" in the AST.
+ Parent = DeclExp;
+ }
+
+ // If the attribute has no arguments, then assume the argument is "this".
+ if (MutexExp == 0) {
+ buildMutexID(Parent, D, 0, 0, 0);
+ return;
+ }
+
+ buildMutexID(MutexExp, D, Parent, NumArgs, FunArgs);
+ }
+
+public:
+ explicit MutexID(clang::Decl::EmptyShell e) {
+ DeclSeq.clear();
+ }
+
+ /// \param MutexExp The original mutex expression within an attribute
+ /// \param DeclExp An expression involving the Decl on which the attribute
+ /// occurs.
+ /// \param D The declaration to which the lock/unlock attribute is attached.
+ /// Caller must check isValid() after construction.
+ MutexID(Expr* MutexExp, Expr *DeclExp, const NamedDecl* D) {
+ buildMutexIDFromExp(MutexExp, DeclExp, D);
+ }
+
+ /// Return true if this is a valid decl sequence.
+ /// Caller must call this by hand after construction to handle errors.
+ bool isValid() const {
+ return !DeclSeq.empty();
+ }
+
+ /// Issue a warning about an invalid lock expression
+ static void warnInvalidLock(ThreadSafetyHandler &Handler, Expr* MutexExp,
+ Expr *DeclExp, const NamedDecl* D) {
+ SourceLocation Loc;
+ if (DeclExp)
+ Loc = DeclExp->getExprLoc();
+
+ // FIXME: add a note about the attribute location in MutexExp or D
+ if (Loc.isValid())
+ Handler.handleInvalidLockExp(Loc);
+ }
+
+ bool operator==(const MutexID &other) const {
+ return DeclSeq == other.DeclSeq;
+ }
+
+ bool operator!=(const MutexID &other) const {
+ return !(*this == other);
+ }
+
+ // SmallVector overloads Operator< to do lexicographic ordering. Note that
+ // we use pointer equality (and <) to compare NamedDecls. This means the order
+ // of MutexIDs in a lockset is nondeterministic. In order to output
+ // diagnostics in a deterministic ordering, we must order all diagnostics to
+ // output by SourceLocation when iterating through this lockset.
+ bool operator<(const MutexID &other) const {
+ return DeclSeq < other.DeclSeq;
+ }
+
+ /// \brief Returns the name of the first Decl in the list for a given MutexID;
+ /// e.g. the lock expression foo.bar() has name "bar".
+ /// The caret will point unambiguously to the lock expression, so using this
+ /// name in diagnostics is a way to get simple, and consistent, mutex names.
+ /// We do not want to output the entire expression text for security reasons.
+ std::string getName() const {
+ assert(isValid());
+ if (!DeclSeq.front())
+ return "this"; // Use 0 to represent 'this'.
+ return DeclSeq.front()->getNameAsString();
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ for (SmallVectorImpl<NamedDecl*>::const_iterator I = DeclSeq.begin(),
+ E = DeclSeq.end(); I != E; ++I) {
+ ID.AddPointer(*I);
+ }
+ }
+};
+
+
+/// \brief This is a helper class that stores info about the most recent
+/// accquire of a Lock.
+///
+/// The main body of the analysis maps MutexIDs to LockDatas.
+struct LockData {
+ SourceLocation AcquireLoc;
+
+ /// \brief LKind stores whether a lock is held shared or exclusively.
+ /// Note that this analysis does not currently support either re-entrant
+ /// locking or lock "upgrading" and "downgrading" between exclusive and
+ /// shared.
+ ///
+ /// FIXME: add support for re-entrant locking and lock up/downgrading
+ LockKind LKind;
+ MutexID UnderlyingMutex; // for ScopedLockable objects
+
+ LockData(SourceLocation AcquireLoc, LockKind LKind)
+ : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Decl::EmptyShell())
+ {}
+
+ LockData(SourceLocation AcquireLoc, LockKind LKind, const MutexID &Mu)
+ : AcquireLoc(AcquireLoc), LKind(LKind), UnderlyingMutex(Mu) {}
+
+ bool operator==(const LockData &other) const {
+ return AcquireLoc == other.AcquireLoc && LKind == other.LKind;
+ }
+
+ bool operator!=(const LockData &other) const {
+ return !(*this == other);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(AcquireLoc.getRawEncoding());
+ ID.AddInteger(LKind);
+ }
+};
+
+
+/// A Lockset maps each MutexID (defined above) to information about how it has
+/// been locked.
+typedef llvm::ImmutableMap<MutexID, LockData> Lockset;
+typedef llvm::ImmutableMap<NamedDecl*, unsigned> LocalVarContext;
+
+class LocalVariableMap;
+
+/// A side (entry or exit) of a CFG node.
+enum CFGBlockSide { CBS_Entry, CBS_Exit };
+
+/// CFGBlockInfo is a struct which contains all the information that is
+/// maintained for each block in the CFG. See LocalVariableMap for more
+/// information about the contexts.
+struct CFGBlockInfo {
+ Lockset EntrySet; // Lockset held at entry to block
+ Lockset ExitSet; // Lockset held at exit from block
+ LocalVarContext EntryContext; // Context held at entry to block
+ LocalVarContext ExitContext; // Context held at exit from block
+ SourceLocation EntryLoc; // Location of first statement in block
+ SourceLocation ExitLoc; // Location of last statement in block.
+ unsigned EntryIndex; // Used to replay contexts later
+
+ const Lockset &getSet(CFGBlockSide Side) const {
+ return Side == CBS_Entry ? EntrySet : ExitSet;
+ }
+ SourceLocation getLocation(CFGBlockSide Side) const {
+ return Side == CBS_Entry ? EntryLoc : ExitLoc;
+ }
+
+private:
+ CFGBlockInfo(Lockset EmptySet, LocalVarContext EmptyCtx)
+ : EntrySet(EmptySet), ExitSet(EmptySet),
+ EntryContext(EmptyCtx), ExitContext(EmptyCtx)
+ { }
+
+public:
+ static CFGBlockInfo getEmptyBlockInfo(Lockset::Factory &F,
+ LocalVariableMap &M);
+};
+
+
+
+// A LocalVariableMap maintains a map from local variables to their currently
+// valid definitions. It provides SSA-like functionality when traversing the
+// CFG. Like SSA, each definition or assignment to a variable is assigned a
+// unique name (an integer), which acts as the SSA name for that definition.
+// The total set of names is shared among all CFG basic blocks.
+// Unlike SSA, we do not rewrite expressions to replace local variables declrefs
+// with their SSA-names. Instead, we compute a Context for each point in the
+// code, which maps local variables to the appropriate SSA-name. This map
+// changes with each assignment.
+//
+// The map is computed in a single pass over the CFG. Subsequent analyses can
+// then query the map to find the appropriate Context for a statement, and use
+// that Context to look up the definitions of variables.
+class LocalVariableMap {
+public:
+ typedef LocalVarContext Context;
+
+ /// A VarDefinition consists of an expression, representing the value of the
+ /// variable, along with the context in which that expression should be
+ /// interpreted. A reference VarDefinition does not itself contain this
+ /// information, but instead contains a pointer to a previous VarDefinition.
+ struct VarDefinition {
+ public:
+ friend class LocalVariableMap;
+
+ NamedDecl *Dec; // The original declaration for this variable.
+ Expr *Exp; // The expression for this variable, OR
+ unsigned Ref; // Reference to another VarDefinition
+ Context Ctx; // The map with which Exp should be interpreted.
+
+ bool isReference() { return !Exp; }
+
+ private:
+ // Create ordinary variable definition
+ VarDefinition(NamedDecl *D, Expr *E, Context C)
+ : Dec(D), Exp(E), Ref(0), Ctx(C)
+ { }
+
+ // Create reference to previous definition
+ VarDefinition(NamedDecl *D, unsigned R, Context C)
+ : Dec(D), Exp(0), Ref(R), Ctx(C)
+ { }
+ };
+
+private:
+ Context::Factory ContextFactory;
+ std::vector<VarDefinition> VarDefinitions;
+ std::vector<unsigned> CtxIndices;
+ std::vector<std::pair<Stmt*, Context> > SavedContexts;
+
+public:
+ LocalVariableMap() {
+ // index 0 is a placeholder for undefined variables (aka phi-nodes).
+ VarDefinitions.push_back(VarDefinition(0, 0u, getEmptyContext()));
+ }
+
+ /// Look up a definition, within the given context.
+ const VarDefinition* lookup(NamedDecl *D, Context Ctx) {
+ const unsigned *i = Ctx.lookup(D);
+ if (!i)
+ return 0;
+ assert(*i < VarDefinitions.size());
+ return &VarDefinitions[*i];
+ }
+
+ /// Look up the definition for D within the given context. Returns
+ /// NULL if the expression is not statically known. If successful, also
+ /// modifies Ctx to hold the context of the return Expr.
+ Expr* lookupExpr(NamedDecl *D, Context &Ctx) {
+ const unsigned *P = Ctx.lookup(D);
+ if (!P)
+ return 0;
+
+ unsigned i = *P;
+ while (i > 0) {
+ if (VarDefinitions[i].Exp) {
+ Ctx = VarDefinitions[i].Ctx;
+ return VarDefinitions[i].Exp;
+ }
+ i = VarDefinitions[i].Ref;
+ }
+ return 0;
+ }
+
+ Context getEmptyContext() { return ContextFactory.getEmptyMap(); }
+
+ /// Return the next context after processing S. This function is used by
+ /// clients of the class to get the appropriate context when traversing the
+ /// CFG. It must be called for every assignment or DeclStmt.
+ Context getNextContext(unsigned &CtxIndex, Stmt *S, Context C) {
+ if (SavedContexts[CtxIndex+1].first == S) {
+ CtxIndex++;
+ Context Result = SavedContexts[CtxIndex].second;
+ return Result;
+ }
+ return C;
+ }
+
+ void dumpVarDefinitionName(unsigned i) {
+ if (i == 0) {
+ llvm::errs() << "Undefined";
+ return;
+ }
+ NamedDecl *Dec = VarDefinitions[i].Dec;
+ if (!Dec) {
+ llvm::errs() << "<<NULL>>";
+ return;
+ }
+ Dec->printName(llvm::errs());
+ llvm::errs() << "." << i << " " << ((void*) Dec);
+ }
+
+ /// Dumps an ASCII representation of the variable map to llvm::errs()
+ void dump() {
+ for (unsigned i = 1, e = VarDefinitions.size(); i < e; ++i) {
+ Expr *Exp = VarDefinitions[i].Exp;
+ unsigned Ref = VarDefinitions[i].Ref;
+
+ dumpVarDefinitionName(i);
+ llvm::errs() << " = ";
+ if (Exp) Exp->dump();
+ else {
+ dumpVarDefinitionName(Ref);
+ llvm::errs() << "\n";
+ }
+ }
+ }
+
+ /// Dumps an ASCII representation of a Context to llvm::errs()
+ void dumpContext(Context C) {
+ for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
+ NamedDecl *D = I.getKey();
+ D->printName(llvm::errs());
+ const unsigned *i = C.lookup(D);
+ llvm::errs() << " -> ";
+ dumpVarDefinitionName(*i);
+ llvm::errs() << "\n";
+ }
+ }
+
+ /// Builds the variable map.
+ void traverseCFG(CFG *CFGraph, PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo);
+
+protected:
+ // Get the current context index
+ unsigned getContextIndex() { return SavedContexts.size()-1; }
+
+ // Save the current context for later replay
+ void saveContext(Stmt *S, Context C) {
+ SavedContexts.push_back(std::make_pair(S,C));
+ }
+
+ // Adds a new definition to the given context, and returns a new context.
+ // This method should be called when declaring a new variable.
+ Context addDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
+ assert(!Ctx.contains(D));
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.add(Ctx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
+ return NewCtx;
+ }
+
+ // Add a new reference to an existing definition.
+ Context addReference(NamedDecl *D, unsigned i, Context Ctx) {
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.add(Ctx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, i, Ctx));
+ return NewCtx;
+ }
+
+ // Updates a definition only if that definition is already in the map.
+ // This method should be called when assigning to an existing variable.
+ Context updateDefinition(NamedDecl *D, Expr *Exp, Context Ctx) {
+ if (Ctx.contains(D)) {
+ unsigned newID = VarDefinitions.size();
+ Context NewCtx = ContextFactory.remove(Ctx, D);
+ NewCtx = ContextFactory.add(NewCtx, D, newID);
+ VarDefinitions.push_back(VarDefinition(D, Exp, Ctx));
+ return NewCtx;
+ }
+ return Ctx;
+ }
+
+ // Removes a definition from the context, but keeps the variable name
+ // as a valid variable. The index 0 is a placeholder for cleared definitions.
+ Context clearDefinition(NamedDecl *D, Context Ctx) {
+ Context NewCtx = Ctx;
+ if (NewCtx.contains(D)) {
+ NewCtx = ContextFactory.remove(NewCtx, D);
+ NewCtx = ContextFactory.add(NewCtx, D, 0);
+ }
+ return NewCtx;
+ }
+
+ // Remove a definition entirely frmo the context.
+ Context removeDefinition(NamedDecl *D, Context Ctx) {
+ Context NewCtx = Ctx;
+ if (NewCtx.contains(D)) {
+ NewCtx = ContextFactory.remove(NewCtx, D);
+ }
+ return NewCtx;
+ }
+
+ Context intersectContexts(Context C1, Context C2);
+ Context createReferenceContext(Context C);
+ void intersectBackEdge(Context C1, Context C2);
+
+ friend class VarMapBuilder;
+};
+
+
+// This has to be defined after LocalVariableMap.
+CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(Lockset::Factory &F,
+ LocalVariableMap &M) {
+ return CFGBlockInfo(F.getEmptyMap(), M.getEmptyContext());
+}
+
+
+/// Visitor which builds a LocalVariableMap
+class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
+public:
+ LocalVariableMap* VMap;
+ LocalVariableMap::Context Ctx;
+
+ VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
+ : VMap(VM), Ctx(C) {}
+
+ void VisitDeclStmt(DeclStmt *S);
+ void VisitBinaryOperator(BinaryOperator *BO);
+};
+
+
+// Add new local variables to the variable map
+void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
+ bool modifiedCtx = false;
+ DeclGroupRef DGrp = S->getDeclGroup();
+ for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
+ if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) {
+ Expr *E = VD->getInit();
+
+ // Add local variables with trivial type to the variable map
+ QualType T = VD->getType();
+ if (T.isTrivialType(VD->getASTContext())) {
+ Ctx = VMap->addDefinition(VD, E, Ctx);
+ modifiedCtx = true;
+ }
+ }
+ }
+ if (modifiedCtx)
+ VMap->saveContext(S, Ctx);
+}
+
+// Update local variable definitions in variable map
+void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
+ if (!BO->isAssignmentOp())
+ return;
+
+ Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
+
+ // Update the variable map and current context.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
+ ValueDecl *VDec = DRE->getDecl();
+ if (Ctx.lookup(VDec)) {
+ if (BO->getOpcode() == BO_Assign)
+ Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
+ else
+ // FIXME -- handle compound assignment operators
+ Ctx = VMap->clearDefinition(VDec, Ctx);
+ VMap->saveContext(BO, Ctx);
+ }
+ }
+}
+
+
+// Computes the intersection of two contexts. The intersection is the
+// set of variables which have the same definition in both contexts;
+// variables with different definitions are discarded.
+LocalVariableMap::Context
+LocalVariableMap::intersectContexts(Context C1, Context C2) {
+ Context Result = C1;
+ for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
+ NamedDecl *Dec = I.getKey();
+ unsigned i1 = I.getData();
+ const unsigned *i2 = C2.lookup(Dec);
+ if (!i2) // variable doesn't exist on second path
+ Result = removeDefinition(Dec, Result);
+ else if (*i2 != i1) // variable exists, but has different definition
+ Result = clearDefinition(Dec, Result);
+ }
+ return Result;
+}
+
+// For every variable in C, create a new variable that refers to the
+// definition in C. Return a new context that contains these new variables.
+// (We use this for a naive implementation of SSA on loop back-edges.)
+LocalVariableMap::Context LocalVariableMap::createReferenceContext(Context C) {
+ Context Result = getEmptyContext();
+ for (Context::iterator I = C.begin(), E = C.end(); I != E; ++I) {
+ NamedDecl *Dec = I.getKey();
+ unsigned i = I.getData();
+ Result = addReference(Dec, i, Result);
+ }
+ return Result;
+}
+
+// This routine also takes the intersection of C1 and C2, but it does so by
+// altering the VarDefinitions. C1 must be the result of an earlier call to
+// createReferenceContext.
+void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
+ for (Context::iterator I = C1.begin(), E = C1.end(); I != E; ++I) {
+ NamedDecl *Dec = I.getKey();
+ unsigned i1 = I.getData();
+ VarDefinition *VDef = &VarDefinitions[i1];
+ assert(VDef->isReference());
+
+ const unsigned *i2 = C2.lookup(Dec);
+ if (!i2 || (*i2 != i1))
+ VDef->Ref = 0; // Mark this variable as undefined
+ }
+}
+
+
+// Traverse the CFG in topological order, so all predecessors of a block
+// (excluding back-edges) are visited before the block itself. At
+// each point in the code, we calculate a Context, which holds the set of
+// variable definitions which are visible at that point in execution.
+// Visible variables are mapped to their definitions using an array that
+// contains all definitions.
+//
+// At join points in the CFG, the set is computed as the intersection of
+// the incoming sets along each edge, E.g.
+//
+// { Context | VarDefinitions }
+// int x = 0; { x -> x1 | x1 = 0 }
+// int y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
+// if (b) x = 1; { x -> x2, y -> y1 | x2 = 1, y1 = 0, ... }
+// else x = 2; { x -> x3, y -> y1 | x3 = 2, x2 = 1, ... }
+// ... { y -> y1 (x is unknown) | x3 = 2, x2 = 1, ... }
+//
+// This is essentially a simpler and more naive version of the standard SSA
+// algorithm. Those definitions that remain in the intersection are from blocks
+// that strictly dominate the current block. We do not bother to insert proper
+// phi nodes, because they are not used in our analysis; instead, wherever
+// a phi node would be required, we simply remove that definition from the
+// context (E.g. x above).
+//
+// The initial traversal does not capture back-edges, so those need to be
+// handled on a separate pass. Whenever the first pass encounters an
+// incoming back edge, it duplicates the context, creating new definitions
+// that refer back to the originals. (These correspond to places where SSA
+// might have to insert a phi node.) On the second pass, these definitions are
+// set to NULL if the the variable has changed on the back-edge (i.e. a phi
+// node was actually required.) E.g.
+//
+// { Context | VarDefinitions }
+// int x = 0, y = 0; { x -> x1, y -> y1 | y1 = 0, x1 = 0 }
+// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
+// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
+// ... { y -> y1 | x3 = 2, x2 = 1, ... }
+//
+void LocalVariableMap::traverseCFG(CFG *CFGraph,
+ PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo) {
+ PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
+
+ CtxIndices.resize(CFGraph->getNumBlockIDs());
+
+ for (PostOrderCFGView::iterator I = SortedGraph->begin(),
+ E = SortedGraph->end(); I!= E; ++I) {
+ const CFGBlock *CurrBlock = *I;
+ int CurrBlockID = CurrBlock->getBlockID();
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
+
+ VisitedBlocks.insert(CurrBlock);
+
+ // Calculate the entry context for the current block
+ bool HasBackEdges = false;
+ bool CtxInit = true;
+ for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
+ PE = CurrBlock->pred_end(); PI != PE; ++PI) {
+ // if *PI -> CurrBlock is a back edge, so skip it
+ if (*PI == 0 || !VisitedBlocks.alreadySet(*PI)) {
+ HasBackEdges = true;
+ continue;
+ }
+
+ int PrevBlockID = (*PI)->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
+ if (CtxInit) {
+ CurrBlockInfo->EntryContext = PrevBlockInfo->ExitContext;
+ CtxInit = false;
+ }
+ else {
+ CurrBlockInfo->EntryContext =
+ intersectContexts(CurrBlockInfo->EntryContext,
+ PrevBlockInfo->ExitContext);
+ }
+ }
+
+ // Duplicate the context if we have back-edges, so we can call
+ // intersectBackEdges later.
+ if (HasBackEdges)
+ CurrBlockInfo->EntryContext =
+ createReferenceContext(CurrBlockInfo->EntryContext);
+
+ // Create a starting context index for the current block
+ saveContext(0, CurrBlockInfo->EntryContext);
+ CurrBlockInfo->EntryIndex = getContextIndex();
+
+ // Visit all the statements in the basic block.
+ VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
+ for (CFGBlock::const_iterator BI = CurrBlock->begin(),
+ BE = CurrBlock->end(); BI != BE; ++BI) {
+ switch (BI->getKind()) {
+ case CFGElement::Statement: {
+ const CFGStmt *CS = cast<CFGStmt>(&*BI);
+ VMapBuilder.Visit(const_cast<Stmt*>(CS->getStmt()));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ CurrBlockInfo->ExitContext = VMapBuilder.Ctx;
+
+ // Mark variables on back edges as "unknown" if they've been changed.
+ for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
+ SE = CurrBlock->succ_end(); SI != SE; ++SI) {
+ // if CurrBlock -> *SI is *not* a back edge
+ if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
+ continue;
+
+ CFGBlock *FirstLoopBlock = *SI;
+ Context LoopBegin = BlockInfo[FirstLoopBlock->getBlockID()].EntryContext;
+ Context LoopEnd = CurrBlockInfo->ExitContext;
+ intersectBackEdge(LoopBegin, LoopEnd);
+ }
+ }
+
+ // Put an extra entry at the end of the indexed context array
+ unsigned exitID = CFGraph->getExit().getBlockID();
+ saveContext(0, BlockInfo[exitID].ExitContext);
+}
+
+/// Find the appropriate source locations to use when producing diagnostics for
+/// each block in the CFG.
+static void findBlockLocations(CFG *CFGraph,
+ PostOrderCFGView *SortedGraph,
+ std::vector<CFGBlockInfo> &BlockInfo) {
+ for (PostOrderCFGView::iterator I = SortedGraph->begin(),
+ E = SortedGraph->end(); I!= E; ++I) {
+ const CFGBlock *CurrBlock = *I;
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlock->getBlockID()];
+
+ // Find the source location of the last statement in the block, if the
+ // block is not empty.
+ if (const Stmt *S = CurrBlock->getTerminator()) {
+ CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc = S->getLocStart();
+ } else {
+ for (CFGBlock::const_reverse_iterator BI = CurrBlock->rbegin(),
+ BE = CurrBlock->rend(); BI != BE; ++BI) {
+ // FIXME: Handle other CFGElement kinds.
+ if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) {
+ CurrBlockInfo->ExitLoc = CS->getStmt()->getLocStart();
+ break;
+ }
+ }
+ }
+
+ if (!CurrBlockInfo->ExitLoc.isInvalid()) {
+ // This block contains at least one statement. Find the source location
+ // of the first statement in the block.
+ for (CFGBlock::const_iterator BI = CurrBlock->begin(),
+ BE = CurrBlock->end(); BI != BE; ++BI) {
+ // FIXME: Handle other CFGElement kinds.
+ if (const CFGStmt *CS = dyn_cast<CFGStmt>(&*BI)) {
+ CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
+ break;
+ }
+ }
+ } else if (CurrBlock->pred_size() == 1 && *CurrBlock->pred_begin() &&
+ CurrBlock != &CFGraph->getExit()) {
+ // The block is empty, and has a single predecessor. Use its exit
+ // location.
+ CurrBlockInfo->EntryLoc = CurrBlockInfo->ExitLoc =
+ BlockInfo[(*CurrBlock->pred_begin())->getBlockID()].ExitLoc;
+ }
+ }
+}
+
+/// \brief Class which implements the core thread safety analysis routines.
+class ThreadSafetyAnalyzer {
+ friend class BuildLockset;
+
+ ThreadSafetyHandler &Handler;
+ Lockset::Factory LocksetFactory;
+ LocalVariableMap LocalVarMap;
+
+public:
+ ThreadSafetyAnalyzer(ThreadSafetyHandler &H) : Handler(H) {}
+
+ Lockset intersectAndWarn(const CFGBlockInfo &Block1, CFGBlockSide Side1,
+ const CFGBlockInfo &Block2, CFGBlockSide Side2,
+ LockErrorKind LEK);
+
+ Lockset addLock(Lockset &LSet, Expr *MutexExp, const NamedDecl *D,
+ LockKind LK, SourceLocation Loc);
+
+ void runAnalysis(AnalysisDeclContext &AC);
+};
+
+
+/// \brief We use this class to visit different types of expressions in
+/// CFGBlocks, and build up the lockset.
+/// An expression may cause us to add or remove locks from the lockset, or else
+/// output error messages related to missing locks.
+/// FIXME: In future, we may be able to not inherit from a visitor.
+class BuildLockset : public StmtVisitor<BuildLockset> {
+ friend class ThreadSafetyAnalyzer;
+
+ ThreadSafetyHandler &Handler;
+ Lockset::Factory &LocksetFactory;
+ LocalVariableMap &LocalVarMap;
+
+ Lockset LSet;
+ LocalVariableMap::Context LVarCtx;
+ unsigned CtxIndex;
+
+ // Helper functions
+ void addLock(const MutexID &Mutex, const LockData &LDat);
+ void removeLock(const MutexID &Mutex, SourceLocation UnlockLoc);
+
+ template <class AttrType>
+ void addLocksToSet(LockKind LK, AttrType *Attr,
+ Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
+ void removeLocksFromSet(UnlockFunctionAttr *Attr,
+ Expr *Exp, NamedDecl* FunDecl);
+
+ const ValueDecl *getValueDecl(Expr *Exp);
+ void warnIfMutexNotHeld (const NamedDecl *D, Expr *Exp, AccessKind AK,
+ Expr *MutexExp, ProtectedOperationKind POK);
+ void checkAccess(Expr *Exp, AccessKind AK);
+ void checkDereference(Expr *Exp, AccessKind AK);
+ void handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD = 0);
+
+ template <class AttrType>
+ void addTrylock(LockKind LK, AttrType *Attr, Expr *Exp, NamedDecl *FunDecl,
+ const CFGBlock* PredBlock, const CFGBlock *CurrBlock,
+ Expr *BrE, bool Neg);
+ CallExpr* getTrylockCallExpr(Stmt *Cond, LocalVariableMap::Context C,
+ bool &Negate);
+ void handleTrylock(Stmt *Cond, const CFGBlock* PredBlock,
+ const CFGBlock *CurrBlock);
+
+ /// \brief Returns true if the lockset contains a lock, regardless of whether
+ /// the lock is held exclusively or shared.
+ bool locksetContains(const MutexID &Lock) const {
+ return LSet.lookup(Lock);
+ }
+
+ /// \brief Returns true if the lockset contains a lock with the passed in
+ /// locktype.
+ bool locksetContains(const MutexID &Lock, LockKind KindRequested) const {
+ const LockData *LockHeld = LSet.lookup(Lock);
+ return (LockHeld && KindRequested == LockHeld->LKind);
+ }
+
+ /// \brief Returns true if the lockset contains a lock with at least the
+ /// passed in locktype. So for example, if we pass in LK_Shared, this function
+ /// returns true if the lock is held LK_Shared or LK_Exclusive. If we pass in
+ /// LK_Exclusive, this function returns true if the lock is held LK_Exclusive.
+ bool locksetContainsAtLeast(const MutexID &Lock,
+ LockKind KindRequested) const {
+ switch (KindRequested) {
+ case LK_Shared:
+ return locksetContains(Lock);
+ case LK_Exclusive:
+ return locksetContains(Lock, KindRequested);
+ }
+ llvm_unreachable("Unknown LockKind");
+ }
+
+public:
+ BuildLockset(ThreadSafetyAnalyzer *analyzer, CFGBlockInfo &Info)
+ : StmtVisitor<BuildLockset>(),
+ Handler(analyzer->Handler),
+ LocksetFactory(analyzer->LocksetFactory),
+ LocalVarMap(analyzer->LocalVarMap),
+ LSet(Info.EntrySet),
+ LVarCtx(Info.EntryContext),
+ CtxIndex(Info.EntryIndex)
+ {}
+
+ void VisitUnaryOperator(UnaryOperator *UO);
+ void VisitBinaryOperator(BinaryOperator *BO);
+ void VisitCastExpr(CastExpr *CE);
+ void VisitCallExpr(CallExpr *Exp);
+ void VisitCXXConstructExpr(CXXConstructExpr *Exp);
+ void VisitDeclStmt(DeclStmt *S);
+};
+
+/// \brief Add a new lock to the lockset, warning if the lock is already there.
+/// \param Mutex -- the Mutex expression for the lock
+/// \param LDat -- the LockData for the lock
+void BuildLockset::addLock(const MutexID &Mutex, const LockData& LDat) {
+ // FIXME: deal with acquired before/after annotations.
+ // FIXME: Don't always warn when we have support for reentrant locks.
+ if (locksetContains(Mutex))
+ Handler.handleDoubleLock(Mutex.getName(), LDat.AcquireLoc);
+ else
+ LSet = LocksetFactory.add(LSet, Mutex, LDat);
+}
+
+/// \brief Remove a lock from the lockset, warning if the lock is not there.
+/// \param LockExp The lock expression corresponding to the lock to be removed
+/// \param UnlockLoc The source location of the unlock (only used in error msg)
+void BuildLockset::removeLock(const MutexID &Mutex, SourceLocation UnlockLoc) {
+ const LockData *LDat = LSet.lookup(Mutex);
+ if (!LDat)
+ Handler.handleUnmatchedUnlock(Mutex.getName(), UnlockLoc);
+ else {
+ // For scoped-lockable vars, remove the mutex associated with this var.
+ if (LDat->UnderlyingMutex.isValid())
+ removeLock(LDat->UnderlyingMutex, UnlockLoc);
+ LSet = LocksetFactory.remove(LSet, Mutex);
+ }
+}
+
+/// \brief This function, parameterized by an attribute type, is used to add a
+/// set of locks specified as attribute arguments to the lockset.
+template <typename AttrType>
+void BuildLockset::addLocksToSet(LockKind LK, AttrType *Attr,
+ Expr *Exp, NamedDecl* FunDecl, VarDecl *VD) {
+ typedef typename AttrType::args_iterator iterator_type;
+
+ SourceLocation ExpLocation = Exp->getExprLoc();
+
+ // Figure out if we're calling the constructor of scoped lockable class
+ bool isScopedVar = false;
+ if (VD) {
+ if (CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FunDecl)) {
+ CXXRecordDecl* PD = CD->getParent();
+ if (PD && PD->getAttr<ScopedLockableAttr>())
+ isScopedVar = true;
+ }
+ }
+
+ if (Attr->args_size() == 0) {
+ // The mutex held is the "this" object.
+ MutexID Mutex(0, Exp, FunDecl);
+ if (!Mutex.isValid())
+ MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
+ else
+ addLock(Mutex, LockData(ExpLocation, LK));
+ return;
+ }
+
+ for (iterator_type I=Attr->args_begin(), E=Attr->args_end(); I != E; ++I) {
+ MutexID Mutex(*I, Exp, FunDecl);
+ if (!Mutex.isValid())
+ MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
+ else {
+ addLock(Mutex, LockData(ExpLocation, LK));
+ if (isScopedVar) {
+ // For scoped lockable vars, map this var to its underlying mutex.
+ DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue, VD->getLocation());
+ MutexID SMutex(&DRE, 0, 0);
+ addLock(SMutex, LockData(VD->getLocation(), LK, Mutex));
+ }
+ }
+ }
+}
+
+/// \brief This function removes a set of locks specified as attribute
+/// arguments from the lockset.
+void BuildLockset::removeLocksFromSet(UnlockFunctionAttr *Attr,
+ Expr *Exp, NamedDecl* FunDecl) {
+ SourceLocation ExpLocation;
+ if (Exp) ExpLocation = Exp->getExprLoc();
+
+ if (Attr->args_size() == 0) {
+ // The mutex held is the "this" object.
+ MutexID Mu(0, Exp, FunDecl);
+ if (!Mu.isValid())
+ MutexID::warnInvalidLock(Handler, 0, Exp, FunDecl);
+ else
+ removeLock(Mu, ExpLocation);
+ return;
+ }
+
+ for (UnlockFunctionAttr::args_iterator I = Attr->args_begin(),
+ E = Attr->args_end(); I != E; ++I) {
+ MutexID Mutex(*I, Exp, FunDecl);
+ if (!Mutex.isValid())
+ MutexID::warnInvalidLock(Handler, *I, Exp, FunDecl);
+ else
+ removeLock(Mutex, ExpLocation);
+ }
+}
+
+/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs
+const ValueDecl *BuildLockset::getValueDecl(Expr *Exp) {
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Exp))
+ return DR->getDecl();
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp))
+ return ME->getMemberDecl();
+
+ return 0;
+}
+
+/// \brief Warn if the LSet does not contain a lock sufficient to protect access
+/// of at least the passed in AccessKind.
+void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, Expr *Exp,
+ AccessKind AK, Expr *MutexExp,
+ ProtectedOperationKind POK) {
+ LockKind LK = getLockKindFromAccessKind(AK);
+
+ MutexID Mutex(MutexExp, Exp, D);
+ if (!Mutex.isValid())
+ MutexID::warnInvalidLock(Handler, MutexExp, Exp, D);
+ else if (!locksetContainsAtLeast(Mutex, LK))
+ Handler.handleMutexNotHeld(D, POK, Mutex.getName(), LK, Exp->getExprLoc());
+}
+
+/// \brief This method identifies variable dereferences and checks pt_guarded_by
+/// and pt_guarded_var annotations. Note that we only check these annotations
+/// at the time a pointer is dereferenced.
+/// FIXME: We need to check for other types of pointer dereferences
+/// (e.g. [], ->) and deal with them here.
+/// \param Exp An expression that has been read or written.
+void BuildLockset::checkDereference(Expr *Exp, AccessKind AK) {
+ UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp);
+ if (!UO || UO->getOpcode() != clang::UO_Deref)
+ return;
+ Exp = UO->getSubExpr()->IgnoreParenCasts();
+
+ const ValueDecl *D = getValueDecl(Exp);
+ if(!D || !D->hasAttrs())
+ return;
+
+ if (D->getAttr<PtGuardedVarAttr>() && LSet.isEmpty())
+ Handler.handleNoMutexHeld(D, POK_VarDereference, AK, Exp->getExprLoc());
+
+ const AttrVec &ArgAttrs = D->getAttrs();
+ for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
+ if (PtGuardedByAttr *PGBAttr = dyn_cast<PtGuardedByAttr>(ArgAttrs[i]))
+ warnIfMutexNotHeld(D, Exp, AK, PGBAttr->getArg(), POK_VarDereference);
+}
+
+/// \brief Checks guarded_by and guarded_var attributes.
+/// Whenever we identify an access (read or write) of a DeclRefExpr or
+/// MemberExpr, we need to check whether there are any guarded_by or
+/// guarded_var attributes, and make sure we hold the appropriate mutexes.
+void BuildLockset::checkAccess(Expr *Exp, AccessKind AK) {
+ const ValueDecl *D = getValueDecl(Exp);
+ if(!D || !D->hasAttrs())
+ return;
+
+ if (D->getAttr<GuardedVarAttr>() && LSet.isEmpty())
+ Handler.handleNoMutexHeld(D, POK_VarAccess, AK, Exp->getExprLoc());
+
+ const AttrVec &ArgAttrs = D->getAttrs();
+ for(unsigned i = 0, Size = ArgAttrs.size(); i < Size; ++i)
+ if (GuardedByAttr *GBAttr = dyn_cast<GuardedByAttr>(ArgAttrs[i]))
+ warnIfMutexNotHeld(D, Exp, AK, GBAttr->getArg(), POK_VarAccess);
+}
+
+/// \brief Process a function call, method call, constructor call,
+/// or destructor call. This involves looking at the attributes on the
+/// corresponding function/method/constructor/destructor, issuing warnings,
+/// and updating the locksets accordingly.
+///
+/// FIXME: For classes annotated with one of the guarded annotations, we need
+/// to treat const method calls as reads and non-const method calls as writes,
+/// and check that the appropriate locks are held. Non-const method calls with
+/// the same signature as const method calls can be also treated as reads.
+///
+/// FIXME: We need to also visit CallExprs to catch/check global functions.
+///
+/// FIXME: Do not flag an error for member variables accessed in constructors/
+/// destructors
+void BuildLockset::handleCall(Expr *Exp, NamedDecl *D, VarDecl *VD) {
+ AttrVec &ArgAttrs = D->getAttrs();
+ for(unsigned i = 0; i < ArgAttrs.size(); ++i) {
+ Attr *Attr = ArgAttrs[i];
+ switch (Attr->getKind()) {
+ // When we encounter an exclusive lock function, we need to add the lock
+ // to our lockset with kind exclusive.
+ case attr::ExclusiveLockFunction: {
+ ExclusiveLockFunctionAttr *A = cast<ExclusiveLockFunctionAttr>(Attr);
+ addLocksToSet(LK_Exclusive, A, Exp, D, VD);
+ break;
+ }
+
+ // When we encounter a shared lock function, we need to add the lock
+ // to our lockset with kind shared.
+ case attr::SharedLockFunction: {
+ SharedLockFunctionAttr *A = cast<SharedLockFunctionAttr>(Attr);
+ addLocksToSet(LK_Shared, A, Exp, D, VD);
+ break;
+ }
+
+ // When we encounter an unlock function, we need to remove unlocked
+ // mutexes from the lockset, and flag a warning if they are not there.
+ case attr::UnlockFunction: {
+ UnlockFunctionAttr *UFAttr = cast<UnlockFunctionAttr>(Attr);
+ removeLocksFromSet(UFAttr, Exp, D);
+ break;
+ }
+
+ case attr::ExclusiveLocksRequired: {
+ ExclusiveLocksRequiredAttr *ELRAttr =
+ cast<ExclusiveLocksRequiredAttr>(Attr);
+
+ for (ExclusiveLocksRequiredAttr::args_iterator
+ I = ELRAttr->args_begin(), E = ELRAttr->args_end(); I != E; ++I)
+ warnIfMutexNotHeld(D, Exp, AK_Written, *I, POK_FunctionCall);
+ break;
+ }
+
+ case attr::SharedLocksRequired: {
+ SharedLocksRequiredAttr *SLRAttr = cast<SharedLocksRequiredAttr>(Attr);
+
+ for (SharedLocksRequiredAttr::args_iterator I = SLRAttr->args_begin(),
+ E = SLRAttr->args_end(); I != E; ++I)
+ warnIfMutexNotHeld(D, Exp, AK_Read, *I, POK_FunctionCall);
+ break;
+ }
+
+ case attr::LocksExcluded: {
+ LocksExcludedAttr *LEAttr = cast<LocksExcludedAttr>(Attr);
+ for (LocksExcludedAttr::args_iterator I = LEAttr->args_begin(),
+ E = LEAttr->args_end(); I != E; ++I) {
+ MutexID Mutex(*I, Exp, D);
+ if (!Mutex.isValid())
+ MutexID::warnInvalidLock(Handler, *I, Exp, D);
+ else if (locksetContains(Mutex))
+ Handler.handleFunExcludesLock(D->getName(), Mutex.getName(),
+ Exp->getExprLoc());
+ }
+ break;
+ }
+
+ // Ignore other (non thread-safety) attributes
+ default:
+ break;
+ }
+ }
+}
+
+
+/// \brief Add lock to set, if the current block is in the taken branch of a
+/// trylock.
+template <class AttrType>
+void BuildLockset::addTrylock(LockKind LK, AttrType *Attr, Expr *Exp,
+ NamedDecl *FunDecl, const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock, Expr *BrE, bool Neg) {
+ // Find out which branch has the lock
+ bool branch = 0;
+ if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE)) {
+ branch = BLE->getValue();
+ }
+ else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE)) {
+ branch = ILE->getValue().getBoolValue();
+ }
+ int branchnum = branch ? 0 : 1;
+ if (Neg) branchnum = !branchnum;
+
+ // If we've taken the trylock branch, then add the lock
+ int i = 0;
+ for (CFGBlock::const_succ_iterator SI = PredBlock->succ_begin(),
+ SE = PredBlock->succ_end(); SI != SE && i < 2; ++SI, ++i) {
+ if (*SI == CurrBlock && i == branchnum) {
+ addLocksToSet(LK, Attr, Exp, FunDecl, 0);
+ }
+ }
+}
+
+
+// If Cond can be traced back to a function call, return the call expression.
+// The negate variable should be called with false, and will be set to true
+// if the function call is negated, e.g. if (!mu.tryLock(...))
+CallExpr* BuildLockset::getTrylockCallExpr(Stmt *Cond,
+ LocalVariableMap::Context C,
+ bool &Negate) {
+ if (!Cond)
+ return 0;
+
+ if (CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
+ return CallExp;
+ }
+ else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
+ return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
+ }
+ else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
+ Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
+ return getTrylockCallExpr(E, C, Negate);
+ }
+ else if (UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
+ if (UOP->getOpcode() == UO_LNot) {
+ Negate = !Negate;
+ return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
+ }
+ }
+ // FIXME -- handle && and || as well.
+ return NULL;
+}
+
+
+/// \brief Process a conditional branch from a previous block to the current
+/// block, looking for trylock calls.
+void BuildLockset::handleTrylock(Stmt *Cond, const CFGBlock *PredBlock,
+ const CFGBlock *CurrBlock) {
+ bool Negate = false;
+ CallExpr *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
+ if (!Exp)
+ return;
+
+ NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ if(!FunDecl || !FunDecl->hasAttrs())
+ return;
+
+ // If the condition is a call to a Trylock function, then grab the attributes
+ AttrVec &ArgAttrs = FunDecl->getAttrs();
+ for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
+ Attr *Attr = ArgAttrs[i];
+ switch (Attr->getKind()) {
+ case attr::ExclusiveTrylockFunction: {
+ ExclusiveTrylockFunctionAttr *A =
+ cast<ExclusiveTrylockFunctionAttr>(Attr);
+ addTrylock(LK_Exclusive, A, Exp, FunDecl, PredBlock, CurrBlock,
+ A->getSuccessValue(), Negate);
+ break;
+ }
+ case attr::SharedTrylockFunction: {
+ SharedTrylockFunctionAttr *A =
+ cast<SharedTrylockFunctionAttr>(Attr);
+ addTrylock(LK_Shared, A, Exp, FunDecl, PredBlock, CurrBlock,
+ A->getSuccessValue(), Negate);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+
+/// \brief For unary operations which read and write a variable, we need to
+/// check whether we hold any required mutexes. Reads are checked in
+/// VisitCastExpr.
+void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
+ switch (UO->getOpcode()) {
+ case clang::UO_PostDec:
+ case clang::UO_PostInc:
+ case clang::UO_PreDec:
+ case clang::UO_PreInc: {
+ Expr *SubExp = UO->getSubExpr()->IgnoreParenCasts();
+ checkAccess(SubExp, AK_Written);
+ checkDereference(SubExp, AK_Written);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+/// For binary operations which assign to a variable (writes), we need to check
+/// whether we hold any required mutexes.
+/// FIXME: Deal with non-primitive types.
+void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
+ if (!BO->isAssignmentOp())
+ return;
+
+ // adjust the context
+ LVarCtx = LocalVarMap.getNextContext(CtxIndex, BO, LVarCtx);
+
+ Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
+ checkAccess(LHSExp, AK_Written);
+ checkDereference(LHSExp, AK_Written);
+}
+
+/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
+/// need to ensure we hold any required mutexes.
+/// FIXME: Deal with non-primitive types.
+void BuildLockset::VisitCastExpr(CastExpr *CE) {
+ if (CE->getCastKind() != CK_LValueToRValue)
+ return;
+ Expr *SubExp = CE->getSubExpr()->IgnoreParenCasts();
+ checkAccess(SubExp, AK_Read);
+ checkDereference(SubExp, AK_Read);
+}
+
+
+void BuildLockset::VisitCallExpr(CallExpr *Exp) {
+ NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ if(!D || !D->hasAttrs())
+ return;
+ handleCall(Exp, D);
+}
+
+void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
+ // FIXME -- only handles constructors in DeclStmt below.
+}
+
+void BuildLockset::VisitDeclStmt(DeclStmt *S) {
+ // adjust the context
+ LVarCtx = LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
+
+ DeclGroupRef DGrp = S->getDeclGroup();
+ for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
+ Decl *D = *I;
+ if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
+ Expr *E = VD->getInit();
+ if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
+ NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
+ if (!CtorD || !CtorD->hasAttrs())
+ return;
+ handleCall(CE, CtorD, VD);
+ }
+ }
+ }
+}
+
+
+/// \brief Compute the intersection of two locksets and issue warnings for any
+/// locks in the symmetric difference.
+///
+/// This function is used at a merge point in the CFG when comparing the lockset
+/// of each branch being merged. For example, given the following sequence:
+/// A; if () then B; else C; D; we need to check that the lockset after B and C
+/// are the same. In the event of a difference, we use the intersection of these
+/// two locksets at the start of D.
+Lockset ThreadSafetyAnalyzer::intersectAndWarn(const CFGBlockInfo &Block1,
+ CFGBlockSide Side1,
+ const CFGBlockInfo &Block2,
+ CFGBlockSide Side2,
+ LockErrorKind LEK) {
+ Lockset LSet1 = Block1.getSet(Side1);
+ Lockset LSet2 = Block2.getSet(Side2);
+
+ Lockset Intersection = LSet1;
+ for (Lockset::iterator I = LSet2.begin(), E = LSet2.end(); I != E; ++I) {
+ const MutexID &LSet2Mutex = I.getKey();
+ const LockData &LSet2LockData = I.getData();
+ if (const LockData *LD = LSet1.lookup(LSet2Mutex)) {
+ if (LD->LKind != LSet2LockData.LKind) {
+ Handler.handleExclusiveAndShared(LSet2Mutex.getName(),
+ LSet2LockData.AcquireLoc,
+ LD->AcquireLoc);
+ if (LD->LKind != LK_Exclusive)
+ Intersection = LocksetFactory.add(Intersection, LSet2Mutex,
+ LSet2LockData);
+ }
+ } else {
+ Handler.handleMutexHeldEndOfScope(LSet2Mutex.getName(),
+ LSet2LockData.AcquireLoc,
+ Block1.getLocation(Side1), LEK);
+ }
+ }
+
+ for (Lockset::iterator I = LSet1.begin(), E = LSet1.end(); I != E; ++I) {
+ if (!LSet2.contains(I.getKey())) {
+ const MutexID &Mutex = I.getKey();
+ const LockData &MissingLock = I.getData();
+ Handler.handleMutexHeldEndOfScope(Mutex.getName(),
+ MissingLock.AcquireLoc,
+ Block2.getLocation(Side2), LEK);
+ Intersection = LocksetFactory.remove(Intersection, Mutex);
+ }
+ }
+ return Intersection;
+}
+
+Lockset ThreadSafetyAnalyzer::addLock(Lockset &LSet, Expr *MutexExp,
+ const NamedDecl *D,
+ LockKind LK, SourceLocation Loc) {
+ MutexID Mutex(MutexExp, 0, D);
+ if (!Mutex.isValid()) {
+ MutexID::warnInvalidLock(Handler, MutexExp, 0, D);
+ return LSet;
+ }
+ LockData NewLock(Loc, LK);
+ return LocksetFactory.add(LSet, Mutex, NewLock);
+}
+
+/// \brief Check a function's CFG for thread-safety violations.
+///
+/// We traverse the blocks in the CFG, compute the set of mutexes that are held
+/// at the end of each block, and issue warnings for thread safety violations.
+/// Each block in the CFG is traversed exactly once.
+void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
+ CFG *CFGraph = AC.getCFG();
+ if (!CFGraph) return;
+ const NamedDecl *D = dyn_cast_or_null<NamedDecl>(AC.getDecl());
+
+ if (!D)
+ return; // Ignore anonymous functions for now.
+ if (D->getAttr<NoThreadSafetyAnalysisAttr>())
+ return;
+ // FIXME: Do something a bit more intelligent inside constructor and
+ // destructor code. Constructors and destructors must assume unique access
+ // to 'this', so checks on member variable access is disabled, but we should
+ // still enable checks on other objects.
+ if (isa<CXXConstructorDecl>(D))
+ return; // Don't check inside constructors.
+ if (isa<CXXDestructorDecl>(D))
+ return; // Don't check inside destructors.
+
+ std::vector<CFGBlockInfo> BlockInfo(CFGraph->getNumBlockIDs(),
+ CFGBlockInfo::getEmptyBlockInfo(LocksetFactory, LocalVarMap));
+
+ // We need to explore the CFG via a "topological" ordering.
+ // That way, we will be guaranteed to have information about required
+ // predecessor locksets when exploring a new block.
+ PostOrderCFGView *SortedGraph = AC.getAnalysis<PostOrderCFGView>();
+ PostOrderCFGView::CFGBlockSet VisitedBlocks(CFGraph);
+
+ // Compute SSA names for local variables
+ LocalVarMap.traverseCFG(CFGraph, SortedGraph, BlockInfo);
+
+ // Fill in source locations for all CFGBlocks.
+ findBlockLocations(CFGraph, SortedGraph, BlockInfo);
+
+ // Add locks from exclusive_locks_required and shared_locks_required
+ // to initial lockset. Also turn off checking for lock and unlock functions.
+ // FIXME: is there a more intelligent way to check lock/unlock functions?
+ if (!SortedGraph->empty() && D->hasAttrs()) {
+ const CFGBlock *FirstBlock = *SortedGraph->begin();
+ Lockset &InitialLockset = BlockInfo[FirstBlock->getBlockID()].EntrySet;
+ const AttrVec &ArgAttrs = D->getAttrs();
+ for (unsigned i = 0; i < ArgAttrs.size(); ++i) {
+ Attr *Attr = ArgAttrs[i];
+ SourceLocation AttrLoc = Attr->getLocation();
+ if (SharedLocksRequiredAttr *SLRAttr
+ = dyn_cast<SharedLocksRequiredAttr>(Attr)) {
+ for (SharedLocksRequiredAttr::args_iterator
+ SLRIter = SLRAttr->args_begin(),
+ SLREnd = SLRAttr->args_end(); SLRIter != SLREnd; ++SLRIter)
+ InitialLockset = addLock(InitialLockset,
+ *SLRIter, D, LK_Shared,
+ AttrLoc);
+ } else if (ExclusiveLocksRequiredAttr *ELRAttr
+ = dyn_cast<ExclusiveLocksRequiredAttr>(Attr)) {
+ for (ExclusiveLocksRequiredAttr::args_iterator
+ ELRIter = ELRAttr->args_begin(),
+ ELREnd = ELRAttr->args_end(); ELRIter != ELREnd; ++ELRIter)
+ InitialLockset = addLock(InitialLockset,
+ *ELRIter, D, LK_Exclusive,
+ AttrLoc);
+ } else if (isa<UnlockFunctionAttr>(Attr)) {
+ // Don't try to check unlock functions for now
+ return;
+ } else if (isa<ExclusiveLockFunctionAttr>(Attr)) {
+ // Don't try to check lock functions for now
+ return;
+ } else if (isa<SharedLockFunctionAttr>(Attr)) {
+ // Don't try to check lock functions for now
+ return;
+ }
+ }
+ }
+
+ for (PostOrderCFGView::iterator I = SortedGraph->begin(),
+ E = SortedGraph->end(); I!= E; ++I) {
+ const CFGBlock *CurrBlock = *I;
+ int CurrBlockID = CurrBlock->getBlockID();
+ CFGBlockInfo *CurrBlockInfo = &BlockInfo[CurrBlockID];
+
+ // Use the default initial lockset in case there are no predecessors.
+ VisitedBlocks.insert(CurrBlock);
+
+ // Iterate through the predecessor blocks and warn if the lockset for all
+ // predecessors is not the same. We take the entry lockset of the current
+ // block to be the intersection of all previous locksets.
+ // FIXME: By keeping the intersection, we may output more errors in future
+ // for a lock which is not in the intersection, but was in the union. We
+ // may want to also keep the union in future. As an example, let's say
+ // the intersection contains Mutex L, and the union contains L and M.
+ // Later we unlock M. At this point, we would output an error because we
+ // never locked M; although the real error is probably that we forgot to
+ // lock M on all code paths. Conversely, let's say that later we lock M.
+ // In this case, we should compare against the intersection instead of the
+ // union because the real error is probably that we forgot to unlock M on
+ // all code paths.
+ bool LocksetInitialized = false;
+ llvm::SmallVector<CFGBlock*, 8> SpecialBlocks;
+ for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
+ PE = CurrBlock->pred_end(); PI != PE; ++PI) {
+
+ // if *PI -> CurrBlock is a back edge
+ if (*PI == 0 || !VisitedBlocks.alreadySet(*PI))
+ continue;
+
+ // Ignore edges from blocks that can't return.
+ if ((*PI)->hasNoReturnElement())
+ continue;
+
+ // If the previous block ended in a 'continue' or 'break' statement, then
+ // a difference in locksets is probably due to a bug in that block, rather
+ // than in some other predecessor. In that case, keep the other
+ // predecessor's lockset.
+ if (const Stmt *Terminator = (*PI)->getTerminator()) {
+ if (isa<ContinueStmt>(Terminator) || isa<BreakStmt>(Terminator)) {
+ SpecialBlocks.push_back(*PI);
+ continue;
+ }
+ }
+
+ int PrevBlockID = (*PI)->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
+ if (!LocksetInitialized) {
+ CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
+ LocksetInitialized = true;
+ } else {
+ CurrBlockInfo->EntrySet =
+ intersectAndWarn(*CurrBlockInfo, CBS_Entry,
+ *PrevBlockInfo, CBS_Exit,
+ LEK_LockedSomePredecessors);
+ }
+ }
+
+ // Process continue and break blocks. Assume that the lockset for the
+ // resulting block is unaffected by any discrepancies in them.
+ for (unsigned SpecialI = 0, SpecialN = SpecialBlocks.size();
+ SpecialI < SpecialN; ++SpecialI) {
+ CFGBlock *PrevBlock = SpecialBlocks[SpecialI];
+ int PrevBlockID = PrevBlock->getBlockID();
+ CFGBlockInfo *PrevBlockInfo = &BlockInfo[PrevBlockID];
+
+ if (!LocksetInitialized) {
+ CurrBlockInfo->EntrySet = PrevBlockInfo->ExitSet;
+ LocksetInitialized = true;
+ } else {
+ // Determine whether this edge is a loop terminator for diagnostic
+ // purposes. FIXME: A 'break' statement might be a loop terminator, but
+ // it might also be part of a switch. Also, a subsequent destructor
+ // might add to the lockset, in which case the real issue might be a
+ // double lock on the other path.
+ const Stmt *Terminator = PrevBlock->getTerminator();
+ bool IsLoop = Terminator && isa<ContinueStmt>(Terminator);
+
+ // Do not update EntrySet.
+ intersectAndWarn(*CurrBlockInfo, CBS_Entry, *PrevBlockInfo, CBS_Exit,
+ IsLoop ? LEK_LockedSomeLoopIterations
+ : LEK_LockedSomePredecessors);
+ }
+ }
+
+ BuildLockset LocksetBuilder(this, *CurrBlockInfo);
+ CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
+ PE = CurrBlock->pred_end();
+ if (PI != PE) {
+ // If the predecessor ended in a branch, then process any trylocks.
+ // FIXME -- check to make sure there's only one predecessor.
+ if (Stmt *TCE = (*PI)->getTerminatorCondition()) {
+ LocksetBuilder.handleTrylock(TCE, *PI, CurrBlock);
+ }
+ }
+
+ // Visit all the statements in the basic block.
+ for (CFGBlock::const_iterator BI = CurrBlock->begin(),
+ BE = CurrBlock->end(); BI != BE; ++BI) {
+ switch (BI->getKind()) {
+ case CFGElement::Statement: {
+ const CFGStmt *CS = cast<CFGStmt>(&*BI);
+ LocksetBuilder.Visit(const_cast<Stmt*>(CS->getStmt()));
+ break;
+ }
+ // Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
+ case CFGElement::AutomaticObjectDtor: {
+ const CFGAutomaticObjDtor *AD = cast<CFGAutomaticObjDtor>(&*BI);
+ CXXDestructorDecl *DD = const_cast<CXXDestructorDecl*>(
+ AD->getDestructorDecl(AC.getASTContext()));
+ if (!DD->hasAttrs())
+ break;
+
+ // Create a dummy expression,
+ VarDecl *VD = const_cast<VarDecl*>(AD->getVarDecl());
+ DeclRefExpr DRE(VD, false, VD->getType(), VK_LValue,
+ AD->getTriggerStmt()->getLocEnd());
+ LocksetBuilder.handleCall(&DRE, DD);
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ CurrBlockInfo->ExitSet = LocksetBuilder.LSet;
+
+ // For every back edge from CurrBlock (the end of the loop) to another block
+ // (FirstLoopBlock) we need to check that the Lockset of Block is equal to
+ // the one held at the beginning of FirstLoopBlock. We can look up the
+ // Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
+ for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
+ SE = CurrBlock->succ_end(); SI != SE; ++SI) {
+
+ // if CurrBlock -> *SI is *not* a back edge
+ if (*SI == 0 || !VisitedBlocks.alreadySet(*SI))
+ continue;
+
+ CFGBlock *FirstLoopBlock = *SI;
+ CFGBlockInfo &PreLoop = BlockInfo[FirstLoopBlock->getBlockID()];
+ CFGBlockInfo &LoopEnd = BlockInfo[CurrBlockID];
+ intersectAndWarn(LoopEnd, CBS_Exit, PreLoop, CBS_Entry,
+ LEK_LockedSomeLoopIterations);
+ }
+ }
+
+ CFGBlockInfo &Initial = BlockInfo[CFGraph->getEntry().getBlockID()];
+ CFGBlockInfo &Final = BlockInfo[CFGraph->getExit().getBlockID()];
+
+ // FIXME: Should we call this function for all blocks which exit the function?
+ intersectAndWarn(Initial, CBS_Entry, Final, CBS_Exit,
+ LEK_LockedAtEndOfFunction);
+}
+
+} // end anonymous namespace
+
+
+namespace clang {
+namespace thread_safety {
+
+/// \brief Check a function's CFG for thread-safety violations.
+///
+/// We traverse the blocks in the CFG, compute the set of mutexes that are held
+/// at the end of each block, and issue warnings for thread safety violations.
+/// Each block in the CFG is traversed exactly once.
+void runThreadSafetyAnalysis(AnalysisDeclContext &AC,
+ ThreadSafetyHandler &Handler) {
+ ThreadSafetyAnalyzer Analyzer(Handler);
+ Analyzer.runAnalysis(AC);
+}
+
+/// \brief Helper function that returns a LockKind required for the given level
+/// of access.
+LockKind getLockKindFromAccessKind(AccessKind AK) {
+ switch (AK) {
+ case AK_Read :
+ return LK_Shared;
+ case AK_Written :
+ return LK_Exclusive;
+ }
+ llvm_unreachable("Unknown AccessKind");
+}
+
+}} // end namespace clang::thread_safety
diff --git a/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
new file mode 100644
index 0000000..6e5da25
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Analysis/UninitializedValues.cpp
@@ -0,0 +1,724 @@
+//==- UninitializedValues.cpp - Find Uninitialized Values -------*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements uninitialized values analysis for source-level CFGs.
+//
+//===----------------------------------------------------------------------===//
+
+#include <utility>
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/PackedVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/Decl.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/Analysis/Analyses/UninitializedValues.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+
+static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
+ if (vd->isLocalVarDecl() && !vd->hasGlobalStorage() &&
+ !vd->isExceptionVariable() &&
+ vd->getDeclContext() == dc) {
+ QualType ty = vd->getType();
+ return ty->isScalarType() || ty->isVectorType();
+ }
+ return false;
+}
+
+//------------------------------------------------------------------------====//
+// DeclToIndex: a mapping from Decls we track to value indices.
+//====------------------------------------------------------------------------//
+
+namespace {
+class DeclToIndex {
+ llvm::DenseMap<const VarDecl *, unsigned> map;
+public:
+ DeclToIndex() {}
+
+ /// Compute the actual mapping from declarations to bits.
+ void computeMap(const DeclContext &dc);
+
+ /// Return the number of declarations in the map.
+ unsigned size() const { return map.size(); }
+
+ /// Returns the bit vector index for a given declaration.
+ llvm::Optional<unsigned> getValueIndex(const VarDecl *d) const;
+};
+}
+
+void DeclToIndex::computeMap(const DeclContext &dc) {
+ unsigned count = 0;
+ DeclContext::specific_decl_iterator<VarDecl> I(dc.decls_begin()),
+ E(dc.decls_end());
+ for ( ; I != E; ++I) {
+ const VarDecl *vd = *I;
+ if (isTrackedVar(vd, &dc))
+ map[vd] = count++;
+ }
+}
+
+llvm::Optional<unsigned> DeclToIndex::getValueIndex(const VarDecl *d) const {
+ llvm::DenseMap<const VarDecl *, unsigned>::const_iterator I = map.find(d);
+ if (I == map.end())
+ return llvm::Optional<unsigned>();
+ return I->second;
+}
+
+//------------------------------------------------------------------------====//
+// CFGBlockValues: dataflow values for CFG blocks.
+//====------------------------------------------------------------------------//
+
+// These values are defined in such a way that a merge can be done using
+// a bitwise OR.
+enum Value { Unknown = 0x0, /* 00 */
+ Initialized = 0x1, /* 01 */
+ Uninitialized = 0x2, /* 10 */
+ MayUninitialized = 0x3 /* 11 */ };
+
+static bool isUninitialized(const Value v) {
+ return v >= Uninitialized;
+}
+static bool isAlwaysUninit(const Value v) {
+ return v == Uninitialized;
+}
+
+namespace {
+
+typedef llvm::PackedVector<Value, 2> ValueVector;
+typedef std::pair<ValueVector *, ValueVector *> BVPair;
+
+class CFGBlockValues {
+ const CFG &cfg;
+ BVPair *vals;
+ ValueVector scratch;
+ DeclToIndex declToIndex;
+
+ ValueVector &lazyCreate(ValueVector *&bv);
+public:
+ CFGBlockValues(const CFG &cfg);
+ ~CFGBlockValues();
+
+ unsigned getNumEntries() const { return declToIndex.size(); }
+
+ void computeSetOfDeclarations(const DeclContext &dc);
+ ValueVector &getValueVector(const CFGBlock *block,
+ const CFGBlock *dstBlock);
+
+ BVPair &getValueVectors(const CFGBlock *block, bool shouldLazyCreate);
+
+ void mergeIntoScratch(ValueVector const &source, bool isFirst);
+ bool updateValueVectorWithScratch(const CFGBlock *block);
+ bool updateValueVectors(const CFGBlock *block, const BVPair &newVals);
+
+ bool hasNoDeclarations() const {
+ return declToIndex.size() == 0;
+ }
+
+ void resetScratch();
+ ValueVector &getScratch() { return scratch; }
+
+ ValueVector::reference operator[](const VarDecl *vd);
+};
+} // end anonymous namespace
+
+CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {
+ unsigned n = cfg.getNumBlockIDs();
+ if (!n)
+ return;
+ vals = new std::pair<ValueVector*, ValueVector*>[n];
+ memset((void*)vals, 0, sizeof(*vals) * n);
+}
+
+CFGBlockValues::~CFGBlockValues() {
+ unsigned n = cfg.getNumBlockIDs();
+ if (n == 0)
+ return;
+ for (unsigned i = 0; i < n; ++i) {
+ delete vals[i].first;
+ delete vals[i].second;
+ }
+ delete [] vals;
+}
+
+void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
+ declToIndex.computeMap(dc);
+ scratch.resize(declToIndex.size());
+}
+
+ValueVector &CFGBlockValues::lazyCreate(ValueVector *&bv) {
+ if (!bv)
+ bv = new ValueVector(declToIndex.size());
+ return *bv;
+}
+
+/// This function pattern matches for a '&&' or '||' that appears at
+/// the beginning of a CFGBlock that also (1) has a terminator and
+/// (2) has no other elements. If such an expression is found, it is returned.
+static const BinaryOperator *getLogicalOperatorInChain(const CFGBlock *block) {
+ if (block->empty())
+ return 0;
+
+ const CFGStmt *cstmt = block->front().getAs<CFGStmt>();
+ if (!cstmt)
+ return 0;
+
+ const BinaryOperator *b = dyn_cast_or_null<BinaryOperator>(cstmt->getStmt());
+
+ if (!b || !b->isLogicalOp())
+ return 0;
+
+ if (block->pred_size() == 2) {
+ if (block->getTerminatorCondition() == b) {
+ if (block->succ_size() == 2)
+ return b;
+ }
+ else if (block->size() == 1)
+ return b;
+ }
+
+ return 0;
+}
+
+ValueVector &CFGBlockValues::getValueVector(const CFGBlock *block,
+ const CFGBlock *dstBlock) {
+ unsigned idx = block->getBlockID();
+ if (dstBlock && getLogicalOperatorInChain(block)) {
+ if (*block->succ_begin() == dstBlock)
+ return lazyCreate(vals[idx].first);
+ assert(*(block->succ_begin()+1) == dstBlock);
+ return lazyCreate(vals[idx].second);
+ }
+
+ assert(vals[idx].second == 0);
+ return lazyCreate(vals[idx].first);
+}
+
+BVPair &CFGBlockValues::getValueVectors(const clang::CFGBlock *block,
+ bool shouldLazyCreate) {
+ unsigned idx = block->getBlockID();
+ lazyCreate(vals[idx].first);
+ if (shouldLazyCreate)
+ lazyCreate(vals[idx].second);
+ return vals[idx];
+}
+
+#if 0
+static void printVector(const CFGBlock *block, ValueVector &bv,
+ unsigned num) {
+
+ llvm::errs() << block->getBlockID() << " :";
+ for (unsigned i = 0; i < bv.size(); ++i) {
+ llvm::errs() << ' ' << bv[i];
+ }
+ llvm::errs() << " : " << num << '\n';
+}
+
+static void printVector(const char *name, ValueVector const &bv) {
+ llvm::errs() << name << " : ";
+ for (unsigned i = 0; i < bv.size(); ++i) {
+ llvm::errs() << ' ' << bv[i];
+ }
+ llvm::errs() << "\n";
+}
+#endif
+
+void CFGBlockValues::mergeIntoScratch(ValueVector const &source,
+ bool isFirst) {
+ if (isFirst)
+ scratch = source;
+ else
+ scratch |= source;
+}
+
+bool CFGBlockValues::updateValueVectorWithScratch(const CFGBlock *block) {
+ ValueVector &dst = getValueVector(block, 0);
+ bool changed = (dst != scratch);
+ if (changed)
+ dst = scratch;
+#if 0
+ printVector(block, scratch, 0);
+#endif
+ return changed;
+}
+
+bool CFGBlockValues::updateValueVectors(const CFGBlock *block,
+ const BVPair &newVals) {
+ BVPair &vals = getValueVectors(block, true);
+ bool changed = *newVals.first != *vals.first ||
+ *newVals.second != *vals.second;
+ *vals.first = *newVals.first;
+ *vals.second = *newVals.second;
+#if 0
+ printVector(block, *vals.first, 1);
+ printVector(block, *vals.second, 2);
+#endif
+ return changed;
+}
+
+void CFGBlockValues::resetScratch() {
+ scratch.reset();
+}
+
+ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
+ const llvm::Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
+ assert(idx.hasValue());
+ return scratch[idx.getValue()];
+}
+
+//------------------------------------------------------------------------====//
+// Worklist: worklist for dataflow analysis.
+//====------------------------------------------------------------------------//
+
+namespace {
+class DataflowWorklist {
+ SmallVector<const CFGBlock *, 20> worklist;
+ llvm::BitVector enqueuedBlocks;
+public:
+ DataflowWorklist(const CFG &cfg) : enqueuedBlocks(cfg.getNumBlockIDs()) {}
+
+ void enqueueSuccessors(const CFGBlock *block);
+ const CFGBlock *dequeue();
+};
+}
+
+void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
+ unsigned OldWorklistSize = worklist.size();
+ for (CFGBlock::const_succ_iterator I = block->succ_begin(),
+ E = block->succ_end(); I != E; ++I) {
+ const CFGBlock *Successor = *I;
+ if (!Successor || enqueuedBlocks[Successor->getBlockID()])
+ continue;
+ worklist.push_back(Successor);
+ enqueuedBlocks[Successor->getBlockID()] = true;
+ }
+ if (OldWorklistSize == 0 || OldWorklistSize == worklist.size())
+ return;
+
+ // Rotate the newly added blocks to the start of the worklist so that it forms
+ // a proper queue when we pop off the end of the worklist.
+ std::rotate(worklist.begin(), worklist.begin() + OldWorklistSize,
+ worklist.end());
+}
+
+const CFGBlock *DataflowWorklist::dequeue() {
+ if (worklist.empty())
+ return 0;
+ const CFGBlock *b = worklist.back();
+ worklist.pop_back();
+ enqueuedBlocks[b->getBlockID()] = false;
+ return b;
+}
+
+//------------------------------------------------------------------------====//
+// Transfer function for uninitialized values analysis.
+//====------------------------------------------------------------------------//
+
+namespace {
+class FindVarResult {
+ const VarDecl *vd;
+ const DeclRefExpr *dr;
+public:
+ FindVarResult(VarDecl *vd, DeclRefExpr *dr) : vd(vd), dr(dr) {}
+
+ const DeclRefExpr *getDeclRefExpr() const { return dr; }
+ const VarDecl *getDecl() const { return vd; }
+};
+
+class TransferFunctions : public StmtVisitor<TransferFunctions> {
+ CFGBlockValues &vals;
+ const CFG &cfg;
+ AnalysisDeclContext &ac;
+ UninitVariablesHandler *handler;
+
+ /// The last DeclRefExpr seen when analyzing a block. Used to
+ /// cheat when detecting cases when the address of a variable is taken.
+ DeclRefExpr *lastDR;
+
+ /// The last lvalue-to-rvalue conversion of a variable whose value
+ /// was uninitialized. Normally this results in a warning, but it is
+ /// possible to either silence the warning in some cases, or we
+ /// propagate the uninitialized value.
+ CastExpr *lastLoad;
+
+ /// For some expressions, we want to ignore any post-processing after
+ /// visitation.
+ bool skipProcessUses;
+
+public:
+ TransferFunctions(CFGBlockValues &vals, const CFG &cfg,
+ AnalysisDeclContext &ac,
+ UninitVariablesHandler *handler)
+ : vals(vals), cfg(cfg), ac(ac), handler(handler),
+ lastDR(0), lastLoad(0),
+ skipProcessUses(false) {}
+
+ void reportUninit(const DeclRefExpr *ex, const VarDecl *vd,
+ bool isAlwaysUninit);
+
+ void VisitBlockExpr(BlockExpr *be);
+ void VisitDeclStmt(DeclStmt *ds);
+ void VisitDeclRefExpr(DeclRefExpr *dr);
+ void VisitUnaryOperator(UnaryOperator *uo);
+ void VisitBinaryOperator(BinaryOperator *bo);
+ void VisitCastExpr(CastExpr *ce);
+ void VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs);
+ void Visit(Stmt *s);
+
+ bool isTrackedVar(const VarDecl *vd) {
+ return ::isTrackedVar(vd, cast<DeclContext>(ac.getDecl()));
+ }
+
+ FindVarResult findBlockVarDecl(Expr *ex);
+
+ void ProcessUses(Stmt *s = 0);
+};
+}
+
+static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
+ while (Ex) {
+ Ex = Ex->IgnoreParenNoopCasts(C);
+ if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ if (CE->getCastKind() == CK_LValueBitCast) {
+ Ex = CE->getSubExpr();
+ continue;
+ }
+ }
+ break;
+ }
+ return Ex;
+}
+
+void TransferFunctions::reportUninit(const DeclRefExpr *ex,
+ const VarDecl *vd, bool isAlwaysUnit) {
+ if (handler) handler->handleUseOfUninitVariable(ex, vd, isAlwaysUnit);
+}
+
+FindVarResult TransferFunctions::findBlockVarDecl(Expr *ex) {
+ if (DeclRefExpr *dr = dyn_cast<DeclRefExpr>(ex->IgnoreParenCasts()))
+ if (VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl()))
+ if (isTrackedVar(vd))
+ return FindVarResult(vd, dr);
+ return FindVarResult(0, 0);
+}
+
+void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *fs) {
+ // This represents an initialization of the 'element' value.
+ Stmt *element = fs->getElement();
+ const VarDecl *vd = 0;
+
+ if (DeclStmt *ds = dyn_cast<DeclStmt>(element)) {
+ vd = cast<VarDecl>(ds->getSingleDecl());
+ if (!isTrackedVar(vd))
+ vd = 0;
+ } else {
+ // Initialize the value of the reference variable.
+ const FindVarResult &res = findBlockVarDecl(cast<Expr>(element));
+ vd = res.getDecl();
+ }
+
+ if (vd)
+ vals[vd] = Initialized;
+}
+
+void TransferFunctions::VisitBlockExpr(BlockExpr *be) {
+ const BlockDecl *bd = be->getBlockDecl();
+ for (BlockDecl::capture_const_iterator i = bd->capture_begin(),
+ e = bd->capture_end() ; i != e; ++i) {
+ const VarDecl *vd = i->getVariable();
+ if (!isTrackedVar(vd))
+ continue;
+ if (i->isByRef()) {
+ vals[vd] = Initialized;
+ continue;
+ }
+ Value v = vals[vd];
+ if (handler && isUninitialized(v))
+ handler->handleUseOfUninitVariable(be, vd, isAlwaysUninit(v));
+ }
+}
+
+void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *dr) {
+ // Record the last DeclRefExpr seen. This is an lvalue computation.
+ // We use this value to later detect if a variable "escapes" the analysis.
+ if (const VarDecl *vd = dyn_cast<VarDecl>(dr->getDecl()))
+ if (isTrackedVar(vd)) {
+ ProcessUses();
+ lastDR = dr;
+ }
+}
+
+void TransferFunctions::VisitDeclStmt(DeclStmt *ds) {
+ for (DeclStmt::decl_iterator DI = ds->decl_begin(), DE = ds->decl_end();
+ DI != DE; ++DI) {
+ if (VarDecl *vd = dyn_cast<VarDecl>(*DI)) {
+ if (isTrackedVar(vd)) {
+ if (Expr *init = vd->getInit()) {
+ // If the initializer consists solely of a reference to itself, we
+ // explicitly mark the variable as uninitialized. This allows code
+ // like the following:
+ //
+ // int x = x;
+ //
+ // to deliberately leave a variable uninitialized. Different analysis
+ // clients can detect this pattern and adjust their reporting
+ // appropriately, but we need to continue to analyze subsequent uses
+ // of the variable.
+ if (init == lastLoad) {
+ const DeclRefExpr *DR
+ = cast<DeclRefExpr>(stripCasts(ac.getASTContext(),
+ lastLoad->getSubExpr()));
+ if (DR->getDecl() == vd) {
+ // int x = x;
+ // Propagate uninitialized value, but don't immediately report
+ // a problem.
+ vals[vd] = Uninitialized;
+ lastLoad = 0;
+ lastDR = 0;
+ if (handler)
+ handler->handleSelfInit(vd);
+ return;
+ }
+ }
+
+ // All other cases: treat the new variable as initialized.
+ // This is a minor optimization to reduce the propagation
+ // of the analysis, since we will have already reported
+ // the use of the uninitialized value (which visiting the
+ // initializer).
+ vals[vd] = Initialized;
+ }
+ }
+ }
+ }
+}
+
+void TransferFunctions::VisitBinaryOperator(clang::BinaryOperator *bo) {
+ if (bo->isAssignmentOp()) {
+ const FindVarResult &res = findBlockVarDecl(bo->getLHS());
+ if (const VarDecl *vd = res.getDecl()) {
+ ValueVector::reference val = vals[vd];
+ if (isUninitialized(val)) {
+ if (bo->getOpcode() != BO_Assign)
+ reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val));
+ else
+ val = Initialized;
+ }
+ }
+ }
+}
+
+void TransferFunctions::VisitUnaryOperator(clang::UnaryOperator *uo) {
+ switch (uo->getOpcode()) {
+ case clang::UO_PostDec:
+ case clang::UO_PostInc:
+ case clang::UO_PreDec:
+ case clang::UO_PreInc: {
+ const FindVarResult &res = findBlockVarDecl(uo->getSubExpr());
+ if (const VarDecl *vd = res.getDecl()) {
+ assert(res.getDeclRefExpr() == lastDR);
+ // We null out lastDR to indicate we have fully processed it
+ // and we don't want the auto-value setting in Visit().
+ lastDR = 0;
+
+ ValueVector::reference val = vals[vd];
+ if (isUninitialized(val))
+ reportUninit(res.getDeclRefExpr(), vd, isAlwaysUninit(val));
+ }
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+void TransferFunctions::VisitCastExpr(clang::CastExpr *ce) {
+ if (ce->getCastKind() == CK_LValueToRValue) {
+ const FindVarResult &res = findBlockVarDecl(ce->getSubExpr());
+ if (res.getDecl()) {
+ assert(res.getDeclRefExpr() == lastDR);
+ lastLoad = ce;
+ }
+ }
+ else if (ce->getCastKind() == CK_NoOp ||
+ ce->getCastKind() == CK_LValueBitCast) {
+ skipProcessUses = true;
+ }
+ else if (CStyleCastExpr *cse = dyn_cast<CStyleCastExpr>(ce)) {
+ if (cse->getType()->isVoidType()) {
+ // e.g. (void) x;
+ if (lastLoad == cse->getSubExpr()) {
+ // Squelch any detected load of an uninitialized value if
+ // we cast it to void.
+ lastLoad = 0;
+ lastDR = 0;
+ }
+ }
+ }
+}
+
+void TransferFunctions::Visit(clang::Stmt *s) {
+ skipProcessUses = false;
+ StmtVisitor<TransferFunctions>::Visit(s);
+ if (!skipProcessUses)
+ ProcessUses(s);
+}
+
+void TransferFunctions::ProcessUses(Stmt *s) {
+ // This method is typically called after visiting a CFGElement statement
+ // in the CFG. We delay processing of reporting many loads of uninitialized
+ // values until here.
+ if (lastLoad) {
+ // If we just visited the lvalue-to-rvalue cast, there is nothing
+ // left to do.
+ if (lastLoad == s)
+ return;
+
+ const DeclRefExpr *DR =
+ cast<DeclRefExpr>(stripCasts(ac.getASTContext(),
+ lastLoad->getSubExpr()));
+ const VarDecl *VD = cast<VarDecl>(DR->getDecl());
+
+ // If we reach here, we may have seen a load of an uninitialized value
+ // and it hasn't been casted to void or otherwise handled. In this
+ // situation, report the incident.
+ if (isUninitialized(vals[VD]))
+ reportUninit(DR, VD, isAlwaysUninit(vals[VD]));
+
+ lastLoad = 0;
+
+ if (DR == lastDR) {
+ lastDR = 0;
+ return;
+ }
+ }
+
+ // Any other uses of 'lastDR' involve taking an lvalue of variable.
+ // In this case, it "escapes" the analysis.
+ if (lastDR && lastDR != s) {
+ vals[cast<VarDecl>(lastDR->getDecl())] = Initialized;
+ lastDR = 0;
+ }
+}
+
+//------------------------------------------------------------------------====//
+// High-level "driver" logic for uninitialized values analysis.
+//====------------------------------------------------------------------------//
+
+static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
+ AnalysisDeclContext &ac, CFGBlockValues &vals,
+ llvm::BitVector &wasAnalyzed,
+ UninitVariablesHandler *handler = 0) {
+
+ wasAnalyzed[block->getBlockID()] = true;
+
+ if (const BinaryOperator *b = getLogicalOperatorInChain(block)) {
+ CFGBlock::const_pred_iterator itr = block->pred_begin();
+ BVPair vA = vals.getValueVectors(*itr, false);
+ ++itr;
+ BVPair vB = vals.getValueVectors(*itr, false);
+
+ BVPair valsAB;
+
+ if (b->getOpcode() == BO_LAnd) {
+ // Merge the 'F' bits from the first and second.
+ vals.mergeIntoScratch(*(vA.second ? vA.second : vA.first), true);
+ vals.mergeIntoScratch(*(vB.second ? vB.second : vB.first), false);
+ valsAB.first = vA.first;
+ valsAB.second = &vals.getScratch();
+ } else {
+ // Merge the 'T' bits from the first and second.
+ assert(b->getOpcode() == BO_LOr);
+ vals.mergeIntoScratch(*vA.first, true);
+ vals.mergeIntoScratch(*vB.first, false);
+ valsAB.first = &vals.getScratch();
+ valsAB.second = vA.second ? vA.second : vA.first;
+ }
+ return vals.updateValueVectors(block, valsAB);
+ }
+
+ // Default behavior: merge in values of predecessor blocks.
+ vals.resetScratch();
+ bool isFirst = true;
+ for (CFGBlock::const_pred_iterator I = block->pred_begin(),
+ E = block->pred_end(); I != E; ++I) {
+ const CFGBlock *pred = *I;
+ if (wasAnalyzed[pred->getBlockID()]) {
+ vals.mergeIntoScratch(vals.getValueVector(pred, block), isFirst);
+ isFirst = false;
+ }
+ }
+ // Apply the transfer function.
+ TransferFunctions tf(vals, cfg, ac, handler);
+ for (CFGBlock::const_iterator I = block->begin(), E = block->end();
+ I != E; ++I) {
+ if (const CFGStmt *cs = dyn_cast<CFGStmt>(&*I)) {
+ tf.Visit(const_cast<Stmt*>(cs->getStmt()));
+ }
+ }
+ tf.ProcessUses();
+ return vals.updateValueVectorWithScratch(block);
+}
+
+void clang::runUninitializedVariablesAnalysis(
+ const DeclContext &dc,
+ const CFG &cfg,
+ AnalysisDeclContext &ac,
+ UninitVariablesHandler &handler,
+ UninitVariablesAnalysisStats &stats) {
+ CFGBlockValues vals(cfg);
+ vals.computeSetOfDeclarations(dc);
+ if (vals.hasNoDeclarations())
+ return;
+
+ stats.NumVariablesAnalyzed = vals.getNumEntries();
+
+ // Mark all variables uninitialized at the entry.
+ const CFGBlock &entry = cfg.getEntry();
+ for (CFGBlock::const_succ_iterator i = entry.succ_begin(),
+ e = entry.succ_end(); i != e; ++i) {
+ if (const CFGBlock *succ = *i) {
+ ValueVector &vec = vals.getValueVector(&entry, succ);
+ const unsigned n = vals.getNumEntries();
+ for (unsigned j = 0; j < n ; ++j) {
+ vec[j] = Uninitialized;
+ }
+ }
+ }
+
+ // Proceed with the workist.
+ DataflowWorklist worklist(cfg);
+ llvm::BitVector previouslyVisited(cfg.getNumBlockIDs());
+ worklist.enqueueSuccessors(&cfg.getEntry());
+ llvm::BitVector wasAnalyzed(cfg.getNumBlockIDs(), false);
+ wasAnalyzed[cfg.getEntry().getBlockID()] = true;
+
+ while (const CFGBlock *block = worklist.dequeue()) {
+ // Did the block change?
+ bool changed = runOnBlock(block, cfg, ac, vals, wasAnalyzed);
+ ++stats.NumBlockVisits;
+ if (changed || !previouslyVisited[block->getBlockID()])
+ worklist.enqueueSuccessors(block);
+ previouslyVisited[block->getBlockID()] = true;
+ }
+
+ // Run through the blocks one more time, and report uninitialized variabes.
+ for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
+ const CFGBlock *block = *BI;
+ if (wasAnalyzed[block->getBlockID()]) {
+ runOnBlock(block, cfg, ac, vals, wasAnalyzed, &handler);
+ ++stats.NumBlockVisits;
+ }
+ }
+}
+
+UninitVariablesHandler::~UninitVariablesHandler() {}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp b/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
new file mode 100644
index 0000000..c78a292
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Builtins.cpp
@@ -0,0 +1,120 @@
+//===--- Builtins.cpp - Builtin function implementation -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements various things for builtin functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+
+static const Builtin::Info BuiltinInfo[] = {
+ { "not a builtin function", 0, 0, 0, ALL_LANGUAGES },
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER, BUILTIN_LANG) { #ID, TYPE, ATTRS, HEADER,\
+ BUILTIN_LANG },
+#include "clang/Basic/Builtins.def"
+};
+
+const Builtin::Info &Builtin::Context::GetRecord(unsigned ID) const {
+ if (ID < Builtin::FirstTSBuiltin)
+ return BuiltinInfo[ID];
+ assert(ID - Builtin::FirstTSBuiltin < NumTSRecords && "Invalid builtin ID!");
+ return TSRecords[ID - Builtin::FirstTSBuiltin];
+}
+
+Builtin::Context::Context() {
+ // Get the target specific builtins from the target.
+ TSRecords = 0;
+ NumTSRecords = 0;
+}
+
+void Builtin::Context::InitializeTarget(const TargetInfo &Target) {
+ assert(NumTSRecords == 0 && "Already initialized target?");
+ Target.getTargetBuiltins(TSRecords, NumTSRecords);
+}
+
+/// InitializeBuiltins - Mark the identifiers for all the builtins with their
+/// appropriate builtin ID # and mark any non-portable builtin identifiers as
+/// such.
+void Builtin::Context::InitializeBuiltins(IdentifierTable &Table,
+ const LangOptions& LangOpts) {
+ // Step #1: mark all target-independent builtins with their ID's.
+ for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
+ if (!LangOpts.NoBuiltin || !strchr(BuiltinInfo[i].Attributes, 'f')) {
+ if (LangOpts.ObjC1 ||
+ BuiltinInfo[i].builtin_lang != clang::OBJC_LANG)
+ Table.get(BuiltinInfo[i].Name).setBuiltinID(i);
+ }
+
+ // Step #2: Register target-specific builtins.
+ for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
+ if (!LangOpts.NoBuiltin || !strchr(TSRecords[i].Attributes, 'f'))
+ Table.get(TSRecords[i].Name).setBuiltinID(i+Builtin::FirstTSBuiltin);
+}
+
+void
+Builtin::Context::GetBuiltinNames(SmallVectorImpl<const char *> &Names,
+ bool NoBuiltins) {
+ // Final all target-independent names
+ for (unsigned i = Builtin::NotBuiltin+1; i != Builtin::FirstTSBuiltin; ++i)
+ if (!NoBuiltins || !strchr(BuiltinInfo[i].Attributes, 'f'))
+ Names.push_back(BuiltinInfo[i].Name);
+
+ // Find target-specific names.
+ for (unsigned i = 0, e = NumTSRecords; i != e; ++i)
+ if (!NoBuiltins || !strchr(TSRecords[i].Attributes, 'f'))
+ Names.push_back(TSRecords[i].Name);
+}
+
+void Builtin::Context::ForgetBuiltin(unsigned ID, IdentifierTable &Table) {
+ Table.get(GetRecord(ID).Name).setBuiltinID(0);
+}
+
+bool
+Builtin::Context::isPrintfLike(unsigned ID, unsigned &FormatIdx,
+ bool &HasVAListArg) {
+ const char *Printf = strpbrk(GetRecord(ID).Attributes, "pP");
+ if (!Printf)
+ return false;
+
+ HasVAListArg = (*Printf == 'P');
+
+ ++Printf;
+ assert(*Printf == ':' && "p or P specifier must have be followed by a ':'");
+ ++Printf;
+
+ assert(strchr(Printf, ':') && "printf specifier must end with a ':'");
+ FormatIdx = strtol(Printf, 0, 10);
+ return true;
+}
+
+// FIXME: Refactor with isPrintfLike.
+bool
+Builtin::Context::isScanfLike(unsigned ID, unsigned &FormatIdx,
+ bool &HasVAListArg) {
+ const char *Scanf = strpbrk(GetRecord(ID).Attributes, "sS");
+ if (!Scanf)
+ return false;
+
+ HasVAListArg = (*Scanf == 'S');
+
+ ++Scanf;
+ assert(*Scanf == ':' && "s or S specifier must have be followed by a ':'");
+ ++Scanf;
+
+ assert(strchr(Scanf, ':') && "printf specifier must end with a ':'");
+ FormatIdx = strtol(Scanf, 0, 10);
+ return true;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c b/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
new file mode 100644
index 0000000..e197003
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/ConvertUTF.c
@@ -0,0 +1,564 @@
+/*===--- ConvertUTF.c - Universal Character Names conversions ---------------===
+ *
+ * The LLVM Compiler Infrastructure
+ *
+ * This file is distributed under the University of Illinois Open Source
+ * License. See LICENSE.TXT for details.
+ *
+ *===------------------------------------------------------------------------=*/
+/*
+ * Copyright 2001-2004 Unicode, Inc.
+ *
+ * Disclaimer
+ *
+ * This source code is provided as is by Unicode, Inc. No claims are
+ * made as to fitness for any particular purpose. No warranties of any
+ * kind are expressed or implied. The recipient agrees to determine
+ * applicability of information provided. If this file has been
+ * purchased on magnetic or optical media from Unicode, Inc., the
+ * sole remedy for any claim will be exchange of defective media
+ * within 90 days of receipt.
+ *
+ * Limitations on Rights to Redistribute This Code
+ *
+ * Unicode, Inc. hereby grants the right to freely use the information
+ * supplied in this file in the creation of products supporting the
+ * Unicode Standard, and to make copies of this file in any form
+ * for internal or external distribution as long as this notice
+ * remains attached.
+ */
+
+/* ---------------------------------------------------------------------
+
+ Conversions between UTF32, UTF-16, and UTF-8. Source code file.
+ Author: Mark E. Davis, 1994.
+ Rev History: Rick McGowan, fixes & updates May 2001.
+ Sept 2001: fixed const & error conditions per
+ mods suggested by S. Parent & A. Lillich.
+ June 2002: Tim Dodd added detection and handling of incomplete
+ source sequences, enhanced error detection, added casts
+ to eliminate compiler warnings.
+ July 2003: slight mods to back out aggressive FFFE detection.
+ Jan 2004: updated switches in from-UTF8 conversions.
+ Oct 2004: updated to use UNI_MAX_LEGAL_UTF32 in UTF-32 conversions.
+
+ See the header file "ConvertUTF.h" for complete documentation.
+
+------------------------------------------------------------------------ */
+
+
+#include "clang/Basic/ConvertUTF.h"
+#ifdef CVTUTF_DEBUG
+#include <stdio.h>
+#endif
+
+static const int halfShift = 10; /* used for shifting by 10 bits */
+
+static const UTF32 halfBase = 0x0010000UL;
+static const UTF32 halfMask = 0x3FFUL;
+
+#define UNI_SUR_HIGH_START (UTF32)0xD800
+#define UNI_SUR_HIGH_END (UTF32)0xDBFF
+#define UNI_SUR_LOW_START (UTF32)0xDC00
+#define UNI_SUR_LOW_END (UTF32)0xDFFF
+#define false 0
+#define true 1
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Index into the table below with the first byte of a UTF-8 sequence to
+ * get the number of trailing bytes that are supposed to follow it.
+ * Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
+ * left as-is for anyone who may want to do such conversion, which was
+ * allowed in earlier algorithms.
+ */
+static const char trailingBytesForUTF8[256] = {
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
+ 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
+ 2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
+};
+
+/*
+ * Magic values subtracted from a buffer value during UTF8 conversion.
+ * This table contains as many values as there might be trailing bytes
+ * in a UTF-8 sequence.
+ */
+static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL,
+ 0x03C82080UL, 0xFA082080UL, 0x82082080UL };
+
+/*
+ * Once the bits are split out into bytes of UTF-8, this is a mask OR-ed
+ * into the first byte, depending on how many bytes follow. There are
+ * as many entries in this table as there are UTF-8 sequence types.
+ * (I.e., one byte sequence, two byte... etc.). Remember that sequencs
+ * for *legal* UTF-8 will be 4 or fewer bytes total.
+ */
+static const UTF8 firstByteMark[7] = { 0x00, 0x00, 0xC0, 0xE0, 0xF0, 0xF8, 0xFC };
+
+/* --------------------------------------------------------------------- */
+
+/* The interface converts a whole buffer to avoid function-call overhead.
+ * Constants have been gathered. Loops & conditionals have been removed as
+ * much as possible for efficiency, in favor of drop-through switches.
+ * (See "Note A" at the bottom of the file for equivalent code.)
+ * If your compiler supports it, the "isLegalUTF8" call can be turned
+ * into an inline function.
+ */
+
+#ifdef CLANG_NEEDS_THESE_ONE_DAY
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF32toUTF16 (
+ const UTF32** sourceStart, const UTF32* sourceEnd,
+ UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF32* source = *sourceStart;
+ UTF16* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch;
+ if (target >= targetEnd) {
+ result = targetExhausted; break;
+ }
+ ch = *source++;
+ if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
+ /* UTF-16 surrogate values are illegal in UTF-32; 0xffff or 0xfffe are both reserved values */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ if (flags == strictConversion) {
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ *target++ = (UTF16)ch; /* normal case */
+ }
+ } else if (ch > UNI_MAX_LEGAL_UTF32) {
+ if (flags == strictConversion) {
+ result = sourceIllegal;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ /* target is a character in range 0xFFFF - 0x10FFFF. */
+ if (target + 1 >= targetEnd) {
+ --source; /* Back up source pointer! */
+ result = targetExhausted; break;
+ }
+ ch -= halfBase;
+ *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
+ *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF16toUTF32 (
+ const UTF16** sourceStart, const UTF16* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF16* source = *sourceStart;
+ UTF32* target = *targetStart;
+ UTF32 ch, ch2;
+ while (source < sourceEnd) {
+ const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */
+ ch = *source++;
+ /* If we have a surrogate pair, convert to UTF32 first. */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+ /* If the 16 bits following the high surrogate are in the source buffer... */
+ if (source < sourceEnd) {
+ ch2 = *source;
+ /* If it's a low surrogate, convert to UTF32. */
+ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
+ ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+ + (ch2 - UNI_SUR_LOW_START) + halfBase;
+ ++source;
+ } else if (flags == strictConversion) { /* it's an unpaired high surrogate */
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ } else { /* We don't have the 16 bits following the high surrogate. */
+ --source; /* return to the high surrogate */
+ result = sourceExhausted;
+ break;
+ }
+ } else if (flags == strictConversion) {
+ /* UTF-16 surrogate values are illegal in UTF-32 */
+ if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ }
+ if (target >= targetEnd) {
+ source = oldSource; /* Back up source pointer! */
+ result = targetExhausted; break;
+ }
+ *target++ = ch;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+#ifdef CVTUTF_DEBUG
+if (result == sourceIllegal) {
+ fprintf(stderr, "ConvertUTF16toUTF32 illegal seq 0x%04x,%04x\n", ch, ch2);
+ fflush(stderr);
+}
+#endif
+ return result;
+}
+ConversionResult ConvertUTF16toUTF8 (
+ const UTF16** sourceStart, const UTF16* sourceEnd,
+ UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF16* source = *sourceStart;
+ UTF8* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch;
+ unsigned short bytesToWrite = 0;
+ const UTF32 byteMask = 0xBF;
+ const UTF32 byteMark = 0x80;
+ const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */
+ ch = *source++;
+ /* If we have a surrogate pair, convert to UTF32 first. */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_HIGH_END) {
+ /* If the 16 bits following the high surrogate are in the source buffer... */
+ if (source < sourceEnd) {
+ UTF32 ch2 = *source;
+ /* If it's a low surrogate, convert to UTF32. */
+ if (ch2 >= UNI_SUR_LOW_START && ch2 <= UNI_SUR_LOW_END) {
+ ch = ((ch - UNI_SUR_HIGH_START) << halfShift)
+ + (ch2 - UNI_SUR_LOW_START) + halfBase;
+ ++source;
+ } else if (flags == strictConversion) { /* it's an unpaired high surrogate */
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ } else { /* We don't have the 16 bits following the high surrogate. */
+ --source; /* return to the high surrogate */
+ result = sourceExhausted;
+ break;
+ }
+ } else if (flags == strictConversion) {
+ /* UTF-16 surrogate values are illegal in UTF-32 */
+ if (ch >= UNI_SUR_LOW_START && ch <= UNI_SUR_LOW_END) {
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ }
+ /* Figure out how many bytes the result will require */
+ if (ch < (UTF32)0x80) { bytesToWrite = 1;
+ } else if (ch < (UTF32)0x800) { bytesToWrite = 2;
+ } else if (ch < (UTF32)0x10000) { bytesToWrite = 3;
+ } else if (ch < (UTF32)0x110000) { bytesToWrite = 4;
+ } else { bytesToWrite = 3;
+ ch = UNI_REPLACEMENT_CHAR;
+ }
+
+ target += bytesToWrite;
+ if (target > targetEnd) {
+ source = oldSource; /* Back up source pointer! */
+ target -= bytesToWrite; result = targetExhausted; break;
+ }
+ switch (bytesToWrite) { /* note: everything falls through. */
+ case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 1: *--target = (UTF8)(ch | firstByteMark[bytesToWrite]);
+ }
+ target += bytesToWrite;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF32toUTF8 (
+ const UTF32** sourceStart, const UTF32* sourceEnd,
+ UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF32* source = *sourceStart;
+ UTF8* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch;
+ unsigned short bytesToWrite = 0;
+ const UTF32 byteMask = 0xBF;
+ const UTF32 byteMark = 0x80;
+ ch = *source++;
+ if (flags == strictConversion ) {
+ /* UTF-16 surrogate values are illegal in UTF-32 */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ --source; /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ }
+ }
+ /*
+ * Figure out how many bytes the result will require. Turn any
+ * illegally large UTF32 things (> Plane 17) into replacement chars.
+ */
+ if (ch < (UTF32)0x80) { bytesToWrite = 1;
+ } else if (ch < (UTF32)0x800) { bytesToWrite = 2;
+ } else if (ch < (UTF32)0x10000) { bytesToWrite = 3;
+ } else if (ch <= UNI_MAX_LEGAL_UTF32) { bytesToWrite = 4;
+ } else { bytesToWrite = 3;
+ ch = UNI_REPLACEMENT_CHAR;
+ result = sourceIllegal;
+ }
+
+ target += bytesToWrite;
+ if (target > targetEnd) {
+ --source; /* Back up source pointer! */
+ target -= bytesToWrite; result = targetExhausted; break;
+ }
+ switch (bytesToWrite) { /* note: everything falls through. */
+ case 4: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 3: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 2: *--target = (UTF8)((ch | byteMark) & byteMask); ch >>= 6;
+ case 1: *--target = (UTF8) (ch | firstByteMark[bytesToWrite]);
+ }
+ target += bytesToWrite;
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+#endif
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Utility routine to tell whether a sequence of bytes is legal UTF-8.
+ * This must be called with the length pre-determined by the first byte.
+ * If not calling this from ConvertUTF8to*, then the length can be set by:
+ * length = trailingBytesForUTF8[*source]+1;
+ * and the sequence is illegal right away if there aren't that many bytes
+ * available.
+ * If presented with a length > 4, this returns false. The Unicode
+ * definition of UTF-8 goes up to 4-byte sequences.
+ */
+
+static Boolean isLegalUTF8(const UTF8 *source, int length) {
+ UTF8 a;
+ const UTF8 *srcptr = source+length;
+ switch (length) {
+ default: return false;
+ /* Everything else falls through when "true"... */
+ case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false;
+ case 2: if ((a = (*--srcptr)) > 0xBF) return false;
+
+ switch (*source) {
+ /* no fall-through in this inner switch */
+ case 0xE0: if (a < 0xA0) return false; break;
+ case 0xED: if (a > 0x9F) return false; break;
+ case 0xF0: if (a < 0x90) return false; break;
+ case 0xF4: if (a > 0x8F) return false; break;
+ default: if (a < 0x80) return false;
+ }
+
+ case 1: if (*source >= 0x80 && *source < 0xC2) return false;
+ }
+ if (*source > 0xF4) return false;
+ return true;
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Exported function to return whether a UTF-8 sequence is legal or not.
+ * This is not used here; it's just exported.
+ */
+Boolean isLegalUTF8Sequence(const UTF8 *source, const UTF8 *sourceEnd) {
+ int length = trailingBytesForUTF8[*source]+1;
+ if (length > sourceEnd - source) {
+ return false;
+ }
+ return isLegalUTF8(source, length);
+}
+
+/* --------------------------------------------------------------------- */
+
+/*
+ * Exported function to return whether a UTF-8 string is legal or not.
+ * This is not used here; it's just exported.
+ */
+Boolean isLegalUTF8String(const UTF8 *source, const UTF8 *sourceEnd) {
+ while (source != sourceEnd) {
+ int length = trailingBytesForUTF8[*source] + 1;
+ if (length > sourceEnd - source || !isLegalUTF8(source, length))
+ return false;
+ source += length;
+ }
+ return true;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF8toUTF16 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF8* source = *sourceStart;
+ UTF16* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch = 0;
+ unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
+ if (extraBytesToRead >= sourceEnd - source) {
+ result = sourceExhausted; break;
+ }
+ /* Do this check whether lenient or strict */
+ if (!isLegalUTF8(source, extraBytesToRead+1)) {
+ result = sourceIllegal;
+ break;
+ }
+ /*
+ * The cases all fall through. See "Note A" below.
+ */
+ switch (extraBytesToRead) {
+ case 5: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */
+ case 4: ch += *source++; ch <<= 6; /* remember, illegal UTF-8 */
+ case 3: ch += *source++; ch <<= 6;
+ case 2: ch += *source++; ch <<= 6;
+ case 1: ch += *source++; ch <<= 6;
+ case 0: ch += *source++;
+ }
+ ch -= offsetsFromUTF8[extraBytesToRead];
+
+ if (target >= targetEnd) {
+ source -= (extraBytesToRead+1); /* Back up source pointer! */
+ result = targetExhausted; break;
+ }
+ if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
+ /* UTF-16 surrogate values are illegal in UTF-32 */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ if (flags == strictConversion) {
+ source -= (extraBytesToRead+1); /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ *target++ = (UTF16)ch; /* normal case */
+ }
+ } else if (ch > UNI_MAX_UTF16) {
+ if (flags == strictConversion) {
+ result = sourceIllegal;
+ source -= (extraBytesToRead+1); /* return to the start */
+ break; /* Bail out; shouldn't continue */
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ /* target is a character in range 0xFFFF - 0x10FFFF. */
+ if (target + 1 >= targetEnd) {
+ source -= (extraBytesToRead+1); /* Back up source pointer! */
+ result = targetExhausted; break;
+ }
+ ch -= halfBase;
+ *target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
+ *target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/* --------------------------------------------------------------------- */
+
+ConversionResult ConvertUTF8toUTF32 (
+ const UTF8** sourceStart, const UTF8* sourceEnd,
+ UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
+ ConversionResult result = conversionOK;
+ const UTF8* source = *sourceStart;
+ UTF32* target = *targetStart;
+ while (source < sourceEnd) {
+ UTF32 ch = 0;
+ unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
+ if (extraBytesToRead >= sourceEnd - source) {
+ result = sourceExhausted; break;
+ }
+ /* Do this check whether lenient or strict */
+ if (!isLegalUTF8(source, extraBytesToRead+1)) {
+ result = sourceIllegal;
+ break;
+ }
+ /*
+ * The cases all fall through. See "Note A" below.
+ */
+ switch (extraBytesToRead) {
+ case 5: ch += *source++; ch <<= 6;
+ case 4: ch += *source++; ch <<= 6;
+ case 3: ch += *source++; ch <<= 6;
+ case 2: ch += *source++; ch <<= 6;
+ case 1: ch += *source++; ch <<= 6;
+ case 0: ch += *source++;
+ }
+ ch -= offsetsFromUTF8[extraBytesToRead];
+
+ if (target >= targetEnd) {
+ source -= (extraBytesToRead+1); /* Back up the source pointer! */
+ result = targetExhausted; break;
+ }
+ if (ch <= UNI_MAX_LEGAL_UTF32) {
+ /*
+ * UTF-16 surrogate values are illegal in UTF-32, and anything
+ * over Plane 17 (> 0x10FFFF) is illegal.
+ */
+ if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
+ if (flags == strictConversion) {
+ source -= (extraBytesToRead+1); /* return to the illegal value itself */
+ result = sourceIllegal;
+ break;
+ } else {
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ } else {
+ *target++ = ch;
+ }
+ } else { /* i.e., ch > UNI_MAX_LEGAL_UTF32 */
+ result = sourceIllegal;
+ *target++ = UNI_REPLACEMENT_CHAR;
+ }
+ }
+ *sourceStart = source;
+ *targetStart = target;
+ return result;
+}
+
+/* ---------------------------------------------------------------------
+
+ Note A.
+ The fall-through switches in UTF-8 reading code save a
+ temp variable, some decrements & conditionals. The switches
+ are equivalent to the following loop:
+ {
+ int tmpBytesToRead = extraBytesToRead+1;
+ do {
+ ch += *source++;
+ --tmpBytesToRead;
+ if (tmpBytesToRead) ch <<= 6;
+ } while (tmpBytesToRead > 0);
+ }
+ In UTF-8 writing code, the switches on "bytesToWrite" are
+ similarly unrolled loops.
+
+ --------------------------------------------------------------------- */
diff --git a/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
new file mode 100644
index 0000000..f7d5d87
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Diagnostic.cpp
@@ -0,0 +1,878 @@
+//===--- Diagnostic.cpp - C Language Family Diagnostic Handling -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Diagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+
+using namespace clang;
+
+static void DummyArgToStringFn(DiagnosticsEngine::ArgumentKind AK, intptr_t QT,
+ const char *Modifier, unsigned ML,
+ const char *Argument, unsigned ArgLen,
+ const DiagnosticsEngine::ArgumentValue *PrevArgs,
+ unsigned NumPrevArgs,
+ SmallVectorImpl<char> &Output,
+ void *Cookie,
+ ArrayRef<intptr_t> QualTypeVals) {
+ const char *Str = "<can't format argument>";
+ Output.append(Str, Str+strlen(Str));
+}
+
+
+DiagnosticsEngine::DiagnosticsEngine(
+ const IntrusiveRefCntPtr<DiagnosticIDs> &diags,
+ DiagnosticConsumer *client, bool ShouldOwnClient)
+ : Diags(diags), Client(client), OwnsDiagClient(ShouldOwnClient),
+ SourceMgr(0) {
+ ArgToStringFn = DummyArgToStringFn;
+ ArgToStringCookie = 0;
+
+ AllExtensionsSilenced = 0;
+ IgnoreAllWarnings = false;
+ WarningsAsErrors = false;
+ EnableAllWarnings = false;
+ ErrorsAsFatal = false;
+ SuppressSystemWarnings = false;
+ SuppressAllDiagnostics = false;
+ ShowOverloads = Ovl_All;
+ ExtBehavior = Ext_Ignore;
+
+ ErrorLimit = 0;
+ TemplateBacktraceLimit = 0;
+ ConstexprBacktraceLimit = 0;
+
+ Reset();
+}
+
+DiagnosticsEngine::~DiagnosticsEngine() {
+ if (OwnsDiagClient)
+ delete Client;
+}
+
+void DiagnosticsEngine::setClient(DiagnosticConsumer *client,
+ bool ShouldOwnClient) {
+ if (OwnsDiagClient && Client)
+ delete Client;
+
+ Client = client;
+ OwnsDiagClient = ShouldOwnClient;
+}
+
+void DiagnosticsEngine::pushMappings(SourceLocation Loc) {
+ DiagStateOnPushStack.push_back(GetCurDiagState());
+}
+
+bool DiagnosticsEngine::popMappings(SourceLocation Loc) {
+ if (DiagStateOnPushStack.empty())
+ return false;
+
+ if (DiagStateOnPushStack.back() != GetCurDiagState()) {
+ // State changed at some point between push/pop.
+ PushDiagStatePoint(DiagStateOnPushStack.back(), Loc);
+ }
+ DiagStateOnPushStack.pop_back();
+ return true;
+}
+
+void DiagnosticsEngine::Reset() {
+ ErrorOccurred = false;
+ FatalErrorOccurred = false;
+ UnrecoverableErrorOccurred = false;
+
+ NumWarnings = 0;
+ NumErrors = 0;
+ NumErrorsSuppressed = 0;
+ TrapNumErrorsOccurred = 0;
+ TrapNumUnrecoverableErrorsOccurred = 0;
+
+ CurDiagID = ~0U;
+ // Set LastDiagLevel to an "unset" state. If we set it to 'Ignored', notes
+ // using a DiagnosticsEngine associated to a translation unit that follow
+ // diagnostics from a DiagnosticsEngine associated to anoter t.u. will not be
+ // displayed.
+ LastDiagLevel = (DiagnosticIDs::Level)-1;
+ DelayedDiagID = 0;
+
+ // Clear state related to #pragma diagnostic.
+ DiagStates.clear();
+ DiagStatePoints.clear();
+ DiagStateOnPushStack.clear();
+
+ // Create a DiagState and DiagStatePoint representing diagnostic changes
+ // through command-line.
+ DiagStates.push_back(DiagState());
+ PushDiagStatePoint(&DiagStates.back(), SourceLocation());
+}
+
+void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1,
+ StringRef Arg2) {
+ if (DelayedDiagID)
+ return;
+
+ DelayedDiagID = DiagID;
+ DelayedDiagArg1 = Arg1.str();
+ DelayedDiagArg2 = Arg2.str();
+}
+
+void DiagnosticsEngine::ReportDelayed() {
+ Report(DelayedDiagID) << DelayedDiagArg1 << DelayedDiagArg2;
+ DelayedDiagID = 0;
+ DelayedDiagArg1.clear();
+ DelayedDiagArg2.clear();
+}
+
+DiagnosticsEngine::DiagStatePointsTy::iterator
+DiagnosticsEngine::GetDiagStatePointForLoc(SourceLocation L) const {
+ assert(!DiagStatePoints.empty());
+ assert(DiagStatePoints.front().Loc.isInvalid() &&
+ "Should have created a DiagStatePoint for command-line");
+
+ FullSourceLoc Loc(L, *SourceMgr);
+ if (Loc.isInvalid())
+ return DiagStatePoints.end() - 1;
+
+ DiagStatePointsTy::iterator Pos = DiagStatePoints.end();
+ FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
+ if (LastStateChangePos.isValid() &&
+ Loc.isBeforeInTranslationUnitThan(LastStateChangePos))
+ Pos = std::upper_bound(DiagStatePoints.begin(), DiagStatePoints.end(),
+ DiagStatePoint(0, Loc));
+ --Pos;
+ return Pos;
+}
+
+/// \brief This allows the client to specify that certain
+/// warnings are ignored. Notes can never be mapped, errors can only be
+/// mapped to fatal, and WARNINGs and EXTENSIONs can be mapped arbitrarily.
+///
+/// \param The source location that this change of diagnostic state should
+/// take affect. It can be null if we are setting the latest state.
+void DiagnosticsEngine::setDiagnosticMapping(diag::kind Diag, diag::Mapping Map,
+ SourceLocation L) {
+ assert(Diag < diag::DIAG_UPPER_LIMIT &&
+ "Can only map builtin diagnostics");
+ assert((Diags->isBuiltinWarningOrExtension(Diag) ||
+ (Map == diag::MAP_FATAL || Map == diag::MAP_ERROR)) &&
+ "Cannot map errors into warnings!");
+ assert(!DiagStatePoints.empty());
+
+ FullSourceLoc Loc(L, *SourceMgr);
+ FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
+ // Don't allow a mapping to a warning override an error/fatal mapping.
+ if (Map == diag::MAP_WARNING) {
+ DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(Diag);
+ if (Info.getMapping() == diag::MAP_ERROR ||
+ Info.getMapping() == diag::MAP_FATAL)
+ Map = Info.getMapping();
+ }
+ DiagnosticMappingInfo MappingInfo = makeMappingInfo(Map, L);
+
+ // Common case; setting all the diagnostics of a group in one place.
+ if (Loc.isInvalid() || Loc == LastStateChangePos) {
+ GetCurDiagState()->setMappingInfo(Diag, MappingInfo);
+ return;
+ }
+
+ // Another common case; modifying diagnostic state in a source location
+ // after the previous one.
+ if ((Loc.isValid() && LastStateChangePos.isInvalid()) ||
+ LastStateChangePos.isBeforeInTranslationUnitThan(Loc)) {
+ // A diagnostic pragma occurred, create a new DiagState initialized with
+ // the current one and a new DiagStatePoint to record at which location
+ // the new state became active.
+ DiagStates.push_back(*GetCurDiagState());
+ PushDiagStatePoint(&DiagStates.back(), Loc);
+ GetCurDiagState()->setMappingInfo(Diag, MappingInfo);
+ return;
+ }
+
+ // We allow setting the diagnostic state in random source order for
+ // completeness but it should not be actually happening in normal practice.
+
+ DiagStatePointsTy::iterator Pos = GetDiagStatePointForLoc(Loc);
+ assert(Pos != DiagStatePoints.end());
+
+ // Update all diagnostic states that are active after the given location.
+ for (DiagStatePointsTy::iterator
+ I = Pos+1, E = DiagStatePoints.end(); I != E; ++I) {
+ GetCurDiagState()->setMappingInfo(Diag, MappingInfo);
+ }
+
+ // If the location corresponds to an existing point, just update its state.
+ if (Pos->Loc == Loc) {
+ GetCurDiagState()->setMappingInfo(Diag, MappingInfo);
+ return;
+ }
+
+ // Create a new state/point and fit it into the vector of DiagStatePoints
+ // so that the vector is always ordered according to location.
+ Pos->Loc.isBeforeInTranslationUnitThan(Loc);
+ DiagStates.push_back(*Pos->State);
+ DiagState *NewState = &DiagStates.back();
+ GetCurDiagState()->setMappingInfo(Diag, MappingInfo);
+ DiagStatePoints.insert(Pos+1, DiagStatePoint(NewState,
+ FullSourceLoc(Loc, *SourceMgr)));
+}
+
+bool DiagnosticsEngine::setDiagnosticGroupMapping(
+ StringRef Group, diag::Mapping Map, SourceLocation Loc)
+{
+ // Get the diagnostics in this group.
+ llvm::SmallVector<diag::kind, 8> GroupDiags;
+ if (Diags->getDiagnosticsInGroup(Group, GroupDiags))
+ return true;
+
+ // Set the mapping.
+ for (unsigned i = 0, e = GroupDiags.size(); i != e; ++i)
+ setDiagnosticMapping(GroupDiags[i], Map, Loc);
+
+ return false;
+}
+
+void DiagnosticsEngine::setDiagnosticWarningAsError(diag::kind Diag,
+ bool Enabled) {
+ // If we are enabling this feature, just set the diagnostic mappings to map to
+ // errors.
+ if (Enabled)
+ setDiagnosticMapping(Diag, diag::MAP_ERROR, SourceLocation());
+
+ // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
+ // potentially downgrade anything already mapped to be a warning.
+ DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(Diag);
+
+ if (Info.getMapping() == diag::MAP_ERROR ||
+ Info.getMapping() == diag::MAP_FATAL)
+ Info.setMapping(diag::MAP_WARNING);
+
+ Info.setNoWarningAsError(true);
+}
+
+bool DiagnosticsEngine::setDiagnosticGroupWarningAsError(StringRef Group,
+ bool Enabled) {
+ // If we are enabling this feature, just set the diagnostic mappings to map to
+ // errors.
+ if (Enabled)
+ return setDiagnosticGroupMapping(Group, diag::MAP_ERROR);
+
+ // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
+ // potentially downgrade anything already mapped to be a warning.
+
+ // Get the diagnostics in this group.
+ llvm::SmallVector<diag::kind, 8> GroupDiags;
+ if (Diags->getDiagnosticsInGroup(Group, GroupDiags))
+ return true;
+
+ // Perform the mapping change.
+ for (unsigned i = 0, e = GroupDiags.size(); i != e; ++i) {
+ DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(
+ GroupDiags[i]);
+
+ if (Info.getMapping() == diag::MAP_ERROR ||
+ Info.getMapping() == diag::MAP_FATAL)
+ Info.setMapping(diag::MAP_WARNING);
+
+ Info.setNoWarningAsError(true);
+ }
+
+ return false;
+}
+
+void DiagnosticsEngine::setDiagnosticErrorAsFatal(diag::kind Diag,
+ bool Enabled) {
+ // If we are enabling this feature, just set the diagnostic mappings to map to
+ // errors.
+ if (Enabled)
+ setDiagnosticMapping(Diag, diag::MAP_FATAL, SourceLocation());
+
+ // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
+ // potentially downgrade anything already mapped to be a warning.
+ DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(Diag);
+
+ if (Info.getMapping() == diag::MAP_FATAL)
+ Info.setMapping(diag::MAP_ERROR);
+
+ Info.setNoErrorAsFatal(true);
+}
+
+bool DiagnosticsEngine::setDiagnosticGroupErrorAsFatal(StringRef Group,
+ bool Enabled) {
+ // If we are enabling this feature, just set the diagnostic mappings to map to
+ // fatal errors.
+ if (Enabled)
+ return setDiagnosticGroupMapping(Group, diag::MAP_FATAL);
+
+ // Otherwise, we want to set the diagnostic mapping's "no Werror" bit, and
+ // potentially downgrade anything already mapped to be an error.
+
+ // Get the diagnostics in this group.
+ llvm::SmallVector<diag::kind, 8> GroupDiags;
+ if (Diags->getDiagnosticsInGroup(Group, GroupDiags))
+ return true;
+
+ // Perform the mapping change.
+ for (unsigned i = 0, e = GroupDiags.size(); i != e; ++i) {
+ DiagnosticMappingInfo &Info = GetCurDiagState()->getOrAddMappingInfo(
+ GroupDiags[i]);
+
+ if (Info.getMapping() == diag::MAP_FATAL)
+ Info.setMapping(diag::MAP_ERROR);
+
+ Info.setNoErrorAsFatal(true);
+ }
+
+ return false;
+}
+
+void DiagnosticsEngine::setMappingToAllDiagnostics(diag::Mapping Map,
+ SourceLocation Loc) {
+ // Get all the diagnostics.
+ llvm::SmallVector<diag::kind, 64> AllDiags;
+ Diags->getAllDiagnostics(AllDiags);
+
+ // Set the mapping.
+ for (unsigned i = 0, e = AllDiags.size(); i != e; ++i)
+ if (Diags->isBuiltinWarningOrExtension(AllDiags[i]))
+ setDiagnosticMapping(AllDiags[i], Map, Loc);
+}
+
+void DiagnosticsEngine::Report(const StoredDiagnostic &storedDiag) {
+ assert(CurDiagID == ~0U && "Multiple diagnostics in flight at once!");
+
+ CurDiagLoc = storedDiag.getLocation();
+ CurDiagID = storedDiag.getID();
+ NumDiagArgs = 0;
+
+ NumDiagRanges = storedDiag.range_size();
+ assert(NumDiagRanges < DiagnosticsEngine::MaxRanges &&
+ "Too many arguments to diagnostic!");
+ unsigned i = 0;
+ for (StoredDiagnostic::range_iterator
+ RI = storedDiag.range_begin(),
+ RE = storedDiag.range_end(); RI != RE; ++RI)
+ DiagRanges[i++] = *RI;
+
+ assert(NumDiagRanges < DiagnosticsEngine::MaxFixItHints &&
+ "Too many arguments to diagnostic!");
+ NumDiagFixItHints = 0;
+ for (StoredDiagnostic::fixit_iterator
+ FI = storedDiag.fixit_begin(),
+ FE = storedDiag.fixit_end(); FI != FE; ++FI)
+ DiagFixItHints[NumDiagFixItHints++] = *FI;
+
+ assert(Client && "DiagnosticConsumer not set!");
+ Level DiagLevel = storedDiag.getLevel();
+ Diagnostic Info(this, storedDiag.getMessage());
+ Client->HandleDiagnostic(DiagLevel, Info);
+ if (Client->IncludeInDiagnosticCounts()) {
+ if (DiagLevel == DiagnosticsEngine::Warning)
+ ++NumWarnings;
+ }
+
+ CurDiagID = ~0U;
+}
+
+bool DiagnosticsEngine::EmitCurrentDiagnostic() {
+ // Process the diagnostic, sending the accumulated information to the
+ // DiagnosticConsumer.
+ bool Emitted = ProcessDiag();
+
+ // Clear out the current diagnostic object.
+ unsigned DiagID = CurDiagID;
+ Clear();
+
+ // If there was a delayed diagnostic, emit it now.
+ if (DelayedDiagID && DelayedDiagID != DiagID)
+ ReportDelayed();
+
+ return Emitted;
+}
+
+
+DiagnosticConsumer::~DiagnosticConsumer() {}
+
+void DiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ if (!IncludeInDiagnosticCounts())
+ return;
+
+ if (DiagLevel == DiagnosticsEngine::Warning)
+ ++NumWarnings;
+ else if (DiagLevel >= DiagnosticsEngine::Error)
+ ++NumErrors;
+}
+
+/// ModifierIs - Return true if the specified modifier matches specified string.
+template <std::size_t StrLen>
+static bool ModifierIs(const char *Modifier, unsigned ModifierLen,
+ const char (&Str)[StrLen]) {
+ return StrLen-1 == ModifierLen && !memcmp(Modifier, Str, StrLen-1);
+}
+
+/// ScanForward - Scans forward, looking for the given character, skipping
+/// nested clauses and escaped characters.
+static const char *ScanFormat(const char *I, const char *E, char Target) {
+ unsigned Depth = 0;
+
+ for ( ; I != E; ++I) {
+ if (Depth == 0 && *I == Target) return I;
+ if (Depth != 0 && *I == '}') Depth--;
+
+ if (*I == '%') {
+ I++;
+ if (I == E) break;
+
+ // Escaped characters get implicitly skipped here.
+
+ // Format specifier.
+ if (!isdigit(*I) && !ispunct(*I)) {
+ for (I++; I != E && !isdigit(*I) && *I != '{'; I++) ;
+ if (I == E) break;
+ if (*I == '{')
+ Depth++;
+ }
+ }
+ }
+ return E;
+}
+
+/// HandleSelectModifier - Handle the integer 'select' modifier. This is used
+/// like this: %select{foo|bar|baz}2. This means that the integer argument
+/// "%2" has a value from 0-2. If the value is 0, the diagnostic prints 'foo'.
+/// If the value is 1, it prints 'bar'. If it has the value 2, it prints 'baz'.
+/// This is very useful for certain classes of variant diagnostics.
+static void HandleSelectModifier(const Diagnostic &DInfo, unsigned ValNo,
+ const char *Argument, unsigned ArgumentLen,
+ SmallVectorImpl<char> &OutStr) {
+ const char *ArgumentEnd = Argument+ArgumentLen;
+
+ // Skip over 'ValNo' |'s.
+ while (ValNo) {
+ const char *NextVal = ScanFormat(Argument, ArgumentEnd, '|');
+ assert(NextVal != ArgumentEnd && "Value for integer select modifier was"
+ " larger than the number of options in the diagnostic string!");
+ Argument = NextVal+1; // Skip this string.
+ --ValNo;
+ }
+
+ // Get the end of the value. This is either the } or the |.
+ const char *EndPtr = ScanFormat(Argument, ArgumentEnd, '|');
+
+ // Recursively format the result of the select clause into the output string.
+ DInfo.FormatDiagnostic(Argument, EndPtr, OutStr);
+}
+
+/// HandleIntegerSModifier - Handle the integer 's' modifier. This adds the
+/// letter 's' to the string if the value is not 1. This is used in cases like
+/// this: "you idiot, you have %4 parameter%s4!".
+static void HandleIntegerSModifier(unsigned ValNo,
+ SmallVectorImpl<char> &OutStr) {
+ if (ValNo != 1)
+ OutStr.push_back('s');
+}
+
+/// HandleOrdinalModifier - Handle the integer 'ord' modifier. This
+/// prints the ordinal form of the given integer, with 1 corresponding
+/// to the first ordinal. Currently this is hard-coded to use the
+/// English form.
+static void HandleOrdinalModifier(unsigned ValNo,
+ SmallVectorImpl<char> &OutStr) {
+ assert(ValNo != 0 && "ValNo must be strictly positive!");
+
+ llvm::raw_svector_ostream Out(OutStr);
+
+ // We could use text forms for the first N ordinals, but the numeric
+ // forms are actually nicer in diagnostics because they stand out.
+ Out << ValNo;
+
+ // It is critically important that we do this perfectly for
+ // user-written sequences with over 100 elements.
+ switch (ValNo % 100) {
+ case 11:
+ case 12:
+ case 13:
+ Out << "th"; return;
+ default:
+ switch (ValNo % 10) {
+ case 1: Out << "st"; return;
+ case 2: Out << "nd"; return;
+ case 3: Out << "rd"; return;
+ default: Out << "th"; return;
+ }
+ }
+}
+
+
+/// PluralNumber - Parse an unsigned integer and advance Start.
+static unsigned PluralNumber(const char *&Start, const char *End) {
+ // Programming 101: Parse a decimal number :-)
+ unsigned Val = 0;
+ while (Start != End && *Start >= '0' && *Start <= '9') {
+ Val *= 10;
+ Val += *Start - '0';
+ ++Start;
+ }
+ return Val;
+}
+
+/// TestPluralRange - Test if Val is in the parsed range. Modifies Start.
+static bool TestPluralRange(unsigned Val, const char *&Start, const char *End) {
+ if (*Start != '[') {
+ unsigned Ref = PluralNumber(Start, End);
+ return Ref == Val;
+ }
+
+ ++Start;
+ unsigned Low = PluralNumber(Start, End);
+ assert(*Start == ',' && "Bad plural expression syntax: expected ,");
+ ++Start;
+ unsigned High = PluralNumber(Start, End);
+ assert(*Start == ']' && "Bad plural expression syntax: expected )");
+ ++Start;
+ return Low <= Val && Val <= High;
+}
+
+/// EvalPluralExpr - Actual expression evaluator for HandlePluralModifier.
+static bool EvalPluralExpr(unsigned ValNo, const char *Start, const char *End) {
+ // Empty condition?
+ if (*Start == ':')
+ return true;
+
+ while (1) {
+ char C = *Start;
+ if (C == '%') {
+ // Modulo expression
+ ++Start;
+ unsigned Arg = PluralNumber(Start, End);
+ assert(*Start == '=' && "Bad plural expression syntax: expected =");
+ ++Start;
+ unsigned ValMod = ValNo % Arg;
+ if (TestPluralRange(ValMod, Start, End))
+ return true;
+ } else {
+ assert((C == '[' || (C >= '0' && C <= '9')) &&
+ "Bad plural expression syntax: unexpected character");
+ // Range expression
+ if (TestPluralRange(ValNo, Start, End))
+ return true;
+ }
+
+ // Scan for next or-expr part.
+ Start = std::find(Start, End, ',');
+ if (Start == End)
+ break;
+ ++Start;
+ }
+ return false;
+}
+
+/// HandlePluralModifier - Handle the integer 'plural' modifier. This is used
+/// for complex plural forms, or in languages where all plurals are complex.
+/// The syntax is: %plural{cond1:form1|cond2:form2|:form3}, where condn are
+/// conditions that are tested in order, the form corresponding to the first
+/// that applies being emitted. The empty condition is always true, making the
+/// last form a default case.
+/// Conditions are simple boolean expressions, where n is the number argument.
+/// Here are the rules.
+/// condition := expression | empty
+/// empty := -> always true
+/// expression := numeric [',' expression] -> logical or
+/// numeric := range -> true if n in range
+/// | '%' number '=' range -> true if n % number in range
+/// range := number
+/// | '[' number ',' number ']' -> ranges are inclusive both ends
+///
+/// Here are some examples from the GNU gettext manual written in this form:
+/// English:
+/// {1:form0|:form1}
+/// Latvian:
+/// {0:form2|%100=11,%10=0,%10=[2,9]:form1|:form0}
+/// Gaeilge:
+/// {1:form0|2:form1|:form2}
+/// Romanian:
+/// {1:form0|0,%100=[1,19]:form1|:form2}
+/// Lithuanian:
+/// {%10=0,%100=[10,19]:form2|%10=1:form0|:form1}
+/// Russian (requires repeated form):
+/// {%100=[11,14]:form2|%10=1:form0|%10=[2,4]:form1|:form2}
+/// Slovak
+/// {1:form0|[2,4]:form1|:form2}
+/// Polish (requires repeated form):
+/// {1:form0|%100=[10,20]:form2|%10=[2,4]:form1|:form2}
+static void HandlePluralModifier(const Diagnostic &DInfo, unsigned ValNo,
+ const char *Argument, unsigned ArgumentLen,
+ SmallVectorImpl<char> &OutStr) {
+ const char *ArgumentEnd = Argument + ArgumentLen;
+ while (1) {
+ assert(Argument < ArgumentEnd && "Plural expression didn't match.");
+ const char *ExprEnd = Argument;
+ while (*ExprEnd != ':') {
+ assert(ExprEnd != ArgumentEnd && "Plural missing expression end");
+ ++ExprEnd;
+ }
+ if (EvalPluralExpr(ValNo, Argument, ExprEnd)) {
+ Argument = ExprEnd + 1;
+ ExprEnd = ScanFormat(Argument, ArgumentEnd, '|');
+
+ // Recursively format the result of the plural clause into the
+ // output string.
+ DInfo.FormatDiagnostic(Argument, ExprEnd, OutStr);
+ return;
+ }
+ Argument = ScanFormat(Argument, ArgumentEnd - 1, '|') + 1;
+ }
+}
+
+
+/// FormatDiagnostic - Format this diagnostic into a string, substituting the
+/// formal arguments into the %0 slots. The result is appended onto the Str
+/// array.
+void Diagnostic::
+FormatDiagnostic(SmallVectorImpl<char> &OutStr) const {
+ if (!StoredDiagMessage.empty()) {
+ OutStr.append(StoredDiagMessage.begin(), StoredDiagMessage.end());
+ return;
+ }
+
+ StringRef Diag =
+ getDiags()->getDiagnosticIDs()->getDescription(getID());
+
+ FormatDiagnostic(Diag.begin(), Diag.end(), OutStr);
+}
+
+void Diagnostic::
+FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
+ SmallVectorImpl<char> &OutStr) const {
+
+ /// FormattedArgs - Keep track of all of the arguments formatted by
+ /// ConvertArgToString and pass them into subsequent calls to
+ /// ConvertArgToString, allowing the implementation to avoid redundancies in
+ /// obvious cases.
+ SmallVector<DiagnosticsEngine::ArgumentValue, 8> FormattedArgs;
+
+ /// QualTypeVals - Pass a vector of arrays so that QualType names can be
+ /// compared to see if more information is needed to be printed.
+ SmallVector<intptr_t, 2> QualTypeVals;
+ for (unsigned i = 0, e = getNumArgs(); i < e; ++i)
+ if (getArgKind(i) == DiagnosticsEngine::ak_qualtype)
+ QualTypeVals.push_back(getRawArg(i));
+
+ while (DiagStr != DiagEnd) {
+ if (DiagStr[0] != '%') {
+ // Append non-%0 substrings to Str if we have one.
+ const char *StrEnd = std::find(DiagStr, DiagEnd, '%');
+ OutStr.append(DiagStr, StrEnd);
+ DiagStr = StrEnd;
+ continue;
+ } else if (ispunct(DiagStr[1])) {
+ OutStr.push_back(DiagStr[1]); // %% -> %.
+ DiagStr += 2;
+ continue;
+ }
+
+ // Skip the %.
+ ++DiagStr;
+
+ // This must be a placeholder for a diagnostic argument. The format for a
+ // placeholder is one of "%0", "%modifier0", or "%modifier{arguments}0".
+ // The digit is a number from 0-9 indicating which argument this comes from.
+ // The modifier is a string of digits from the set [-a-z]+, arguments is a
+ // brace enclosed string.
+ const char *Modifier = 0, *Argument = 0;
+ unsigned ModifierLen = 0, ArgumentLen = 0;
+
+ // Check to see if we have a modifier. If so eat it.
+ if (!isdigit(DiagStr[0])) {
+ Modifier = DiagStr;
+ while (DiagStr[0] == '-' ||
+ (DiagStr[0] >= 'a' && DiagStr[0] <= 'z'))
+ ++DiagStr;
+ ModifierLen = DiagStr-Modifier;
+
+ // If we have an argument, get it next.
+ if (DiagStr[0] == '{') {
+ ++DiagStr; // Skip {.
+ Argument = DiagStr;
+
+ DiagStr = ScanFormat(DiagStr, DiagEnd, '}');
+ assert(DiagStr != DiagEnd && "Mismatched {}'s in diagnostic string!");
+ ArgumentLen = DiagStr-Argument;
+ ++DiagStr; // Skip }.
+ }
+ }
+
+ assert(isdigit(*DiagStr) && "Invalid format for argument in diagnostic");
+ unsigned ArgNo = *DiagStr++ - '0';
+
+ DiagnosticsEngine::ArgumentKind Kind = getArgKind(ArgNo);
+
+ switch (Kind) {
+ // ---- STRINGS ----
+ case DiagnosticsEngine::ak_std_string: {
+ const std::string &S = getArgStdStr(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+ OutStr.append(S.begin(), S.end());
+ break;
+ }
+ case DiagnosticsEngine::ak_c_string: {
+ const char *S = getArgCStr(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+
+ // Don't crash if get passed a null pointer by accident.
+ if (!S)
+ S = "(null)";
+
+ OutStr.append(S, S + strlen(S));
+ break;
+ }
+ // ---- INTEGERS ----
+ case DiagnosticsEngine::ak_sint: {
+ int Val = getArgSInt(ArgNo);
+
+ if (ModifierIs(Modifier, ModifierLen, "select")) {
+ HandleSelectModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "s")) {
+ HandleIntegerSModifier(Val, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
+ HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
+ HandleOrdinalModifier((unsigned)Val, OutStr);
+ } else {
+ assert(ModifierLen == 0 && "Unknown integer modifier");
+ llvm::raw_svector_ostream(OutStr) << Val;
+ }
+ break;
+ }
+ case DiagnosticsEngine::ak_uint: {
+ unsigned Val = getArgUInt(ArgNo);
+
+ if (ModifierIs(Modifier, ModifierLen, "select")) {
+ HandleSelectModifier(*this, Val, Argument, ArgumentLen, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "s")) {
+ HandleIntegerSModifier(Val, OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "plural")) {
+ HandlePluralModifier(*this, (unsigned)Val, Argument, ArgumentLen,
+ OutStr);
+ } else if (ModifierIs(Modifier, ModifierLen, "ordinal")) {
+ HandleOrdinalModifier(Val, OutStr);
+ } else {
+ assert(ModifierLen == 0 && "Unknown integer modifier");
+ llvm::raw_svector_ostream(OutStr) << Val;
+ }
+ break;
+ }
+ // ---- NAMES and TYPES ----
+ case DiagnosticsEngine::ak_identifierinfo: {
+ const IdentifierInfo *II = getArgIdentifier(ArgNo);
+ assert(ModifierLen == 0 && "No modifiers for strings yet");
+
+ // Don't crash if get passed a null pointer by accident.
+ if (!II) {
+ const char *S = "(null)";
+ OutStr.append(S, S + strlen(S));
+ continue;
+ }
+
+ llvm::raw_svector_ostream(OutStr) << '\'' << II->getName() << '\'';
+ break;
+ }
+ case DiagnosticsEngine::ak_qualtype:
+ case DiagnosticsEngine::ak_declarationname:
+ case DiagnosticsEngine::ak_nameddecl:
+ case DiagnosticsEngine::ak_nestednamespec:
+ case DiagnosticsEngine::ak_declcontext:
+ getDiags()->ConvertArgToString(Kind, getRawArg(ArgNo),
+ Modifier, ModifierLen,
+ Argument, ArgumentLen,
+ FormattedArgs.data(), FormattedArgs.size(),
+ OutStr, QualTypeVals);
+ break;
+ }
+
+ // Remember this argument info for subsequent formatting operations. Turn
+ // std::strings into a null terminated string to make it be the same case as
+ // all the other ones.
+ if (Kind != DiagnosticsEngine::ak_std_string)
+ FormattedArgs.push_back(std::make_pair(Kind, getRawArg(ArgNo)));
+ else
+ FormattedArgs.push_back(std::make_pair(DiagnosticsEngine::ak_c_string,
+ (intptr_t)getArgStdStr(ArgNo).c_str()));
+
+ }
+}
+
+StoredDiagnostic::StoredDiagnostic() { }
+
+StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
+ StringRef Message)
+ : ID(ID), Level(Level), Loc(), Message(Message) { }
+
+StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info)
+ : ID(Info.getID()), Level(Level)
+{
+ assert((Info.getLocation().isInvalid() || Info.hasSourceManager()) &&
+ "Valid source location without setting a source manager for diagnostic");
+ if (Info.getLocation().isValid())
+ Loc = FullSourceLoc(Info.getLocation(), Info.getSourceManager());
+ SmallString<64> Message;
+ Info.FormatDiagnostic(Message);
+ this->Message.assign(Message.begin(), Message.end());
+
+ Ranges.reserve(Info.getNumRanges());
+ for (unsigned I = 0, N = Info.getNumRanges(); I != N; ++I)
+ Ranges.push_back(Info.getRange(I));
+
+ FixIts.reserve(Info.getNumFixItHints());
+ for (unsigned I = 0, N = Info.getNumFixItHints(); I != N; ++I)
+ FixIts.push_back(Info.getFixItHint(I));
+}
+
+StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
+ StringRef Message, FullSourceLoc Loc,
+ ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> Fixits)
+ : ID(ID), Level(Level), Loc(Loc), Message(Message)
+{
+ this->Ranges.assign(Ranges.begin(), Ranges.end());
+ this->FixIts.assign(FixIts.begin(), FixIts.end());
+}
+
+StoredDiagnostic::~StoredDiagnostic() { }
+
+/// IncludeInDiagnosticCounts - This method (whose default implementation
+/// returns true) indicates whether the diagnostics handled by this
+/// DiagnosticConsumer should be included in the number of diagnostics
+/// reported by DiagnosticsEngine.
+bool DiagnosticConsumer::IncludeInDiagnosticCounts() const { return true; }
+
+void IgnoringDiagConsumer::anchor() { }
+
+PartialDiagnostic::StorageAllocator::StorageAllocator() {
+ for (unsigned I = 0; I != NumCached; ++I)
+ FreeList[I] = Cached + I;
+ NumFreeListEntries = NumCached;
+}
+
+PartialDiagnostic::StorageAllocator::~StorageAllocator() {
+ // Don't assert if we are in a CrashRecovery context, as this invariant may
+ // be invalidated during a crash.
+ assert((NumFreeListEntries == NumCached ||
+ llvm::CrashRecoveryContext::isRecoveringFromCrash()) &&
+ "A partial is on the lamb");
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
new file mode 100644
index 0000000..8c33a96
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/DiagnosticIDs.cpp
@@ -0,0 +1,697 @@
+//===--- DiagnosticIDs.cpp - Diagnostic IDs Handling ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Diagnostic IDs-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/DiagnosticIDs.h"
+#include "clang/Basic/AllDiagnostics.h"
+#include "clang/Basic/DiagnosticCategories.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <map>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Builtin Diagnostic information
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+// Diagnostic classes.
+enum {
+ CLASS_NOTE = 0x01,
+ CLASS_WARNING = 0x02,
+ CLASS_EXTENSION = 0x03,
+ CLASS_ERROR = 0x04
+};
+
+struct StaticDiagInfoRec {
+ unsigned short DiagID;
+ unsigned Mapping : 3;
+ unsigned Class : 3;
+ unsigned SFINAE : 1;
+ unsigned AccessControl : 1;
+ unsigned WarnNoWerror : 1;
+ unsigned WarnShowInSystemHeader : 1;
+ unsigned Category : 5;
+
+ uint16_t OptionGroupIndex;
+
+ uint16_t DescriptionLen;
+ const char *DescriptionStr;
+
+ unsigned getOptionGroupIndex() const {
+ return OptionGroupIndex;
+ }
+
+ StringRef getDescription() const {
+ return StringRef(DescriptionStr, DescriptionLen);
+ }
+
+ bool operator<(const StaticDiagInfoRec &RHS) const {
+ return DiagID < RHS.DiagID;
+ }
+};
+
+} // namespace anonymous
+
+static const StaticDiagInfoRec StaticDiagInfo[] = {
+#define DIAG(ENUM,CLASS,DEFAULT_MAPPING,DESC,GROUP, \
+ SFINAE,ACCESS,NOWERROR,SHOWINSYSHEADER, \
+ CATEGORY) \
+ { diag::ENUM, DEFAULT_MAPPING, CLASS, SFINAE, ACCESS, \
+ NOWERROR, SHOWINSYSHEADER, CATEGORY, GROUP, \
+ STR_SIZE(DESC, uint16_t), DESC },
+#include "clang/Basic/DiagnosticCommonKinds.inc"
+#include "clang/Basic/DiagnosticDriverKinds.inc"
+#include "clang/Basic/DiagnosticFrontendKinds.inc"
+#include "clang/Basic/DiagnosticSerializationKinds.inc"
+#include "clang/Basic/DiagnosticLexKinds.inc"
+#include "clang/Basic/DiagnosticParseKinds.inc"
+#include "clang/Basic/DiagnosticASTKinds.inc"
+#include "clang/Basic/DiagnosticSemaKinds.inc"
+#include "clang/Basic/DiagnosticAnalysisKinds.inc"
+#undef DIAG
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
+};
+
+static const unsigned StaticDiagInfoSize =
+ sizeof(StaticDiagInfo)/sizeof(StaticDiagInfo[0])-1;
+
+/// GetDiagInfo - Return the StaticDiagInfoRec entry for the specified DiagID,
+/// or null if the ID is invalid.
+static const StaticDiagInfoRec *GetDiagInfo(unsigned DiagID) {
+ // If assertions are enabled, verify that the StaticDiagInfo array is sorted.
+#ifndef NDEBUG
+ static bool IsFirst = true;
+ if (IsFirst) {
+ for (unsigned i = 1; i != StaticDiagInfoSize; ++i) {
+ assert(StaticDiagInfo[i-1].DiagID != StaticDiagInfo[i].DiagID &&
+ "Diag ID conflict, the enums at the start of clang::diag (in "
+ "DiagnosticIDs.h) probably need to be increased");
+
+ assert(StaticDiagInfo[i-1] < StaticDiagInfo[i] &&
+ "Improperly sorted diag info");
+ }
+ IsFirst = false;
+ }
+#endif
+
+ // Search the diagnostic table with a binary search.
+ StaticDiagInfoRec Find = { static_cast<unsigned short>(DiagID),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+
+ const StaticDiagInfoRec *Found =
+ std::lower_bound(StaticDiagInfo, StaticDiagInfo + StaticDiagInfoSize, Find);
+ if (Found == StaticDiagInfo + StaticDiagInfoSize ||
+ Found->DiagID != DiagID)
+ return 0;
+
+ return Found;
+}
+
+static DiagnosticMappingInfo GetDefaultDiagMappingInfo(unsigned DiagID) {
+ DiagnosticMappingInfo Info = DiagnosticMappingInfo::Make(
+ diag::MAP_FATAL, /*IsUser=*/false, /*IsPragma=*/false);
+
+ if (const StaticDiagInfoRec *StaticInfo = GetDiagInfo(DiagID)) {
+ Info.setMapping((diag::Mapping) StaticInfo->Mapping);
+
+ if (StaticInfo->WarnNoWerror) {
+ assert(Info.getMapping() == diag::MAP_WARNING &&
+ "Unexpected mapping with no-Werror bit!");
+ Info.setNoWarningAsError(true);
+ }
+
+ if (StaticInfo->WarnShowInSystemHeader) {
+ assert(Info.getMapping() == diag::MAP_WARNING &&
+ "Unexpected mapping with show-in-system-header bit!");
+ Info.setShowInSystemHeader(true);
+ }
+ }
+
+ return Info;
+}
+
+/// getCategoryNumberForDiag - Return the category number that a specified
+/// DiagID belongs to, or 0 if no category.
+unsigned DiagnosticIDs::getCategoryNumberForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Category;
+ return 0;
+}
+
+namespace {
+ // The diagnostic category names.
+ struct StaticDiagCategoryRec {
+ const char *NameStr;
+ uint8_t NameLen;
+
+ StringRef getName() const {
+ return StringRef(NameStr, NameLen);
+ }
+ };
+}
+
+// Unfortunately, the split between DiagnosticIDs and Diagnostic is not
+// particularly clean, but for now we just implement this method here so we can
+// access GetDefaultDiagMapping.
+DiagnosticMappingInfo &DiagnosticsEngine::DiagState::getOrAddMappingInfo(
+ diag::kind Diag)
+{
+ std::pair<iterator, bool> Result = DiagMap.insert(
+ std::make_pair(Diag, DiagnosticMappingInfo()));
+
+ // Initialize the entry if we added it.
+ if (Result.second)
+ Result.first->second = GetDefaultDiagMappingInfo(Diag);
+
+ return Result.first->second;
+}
+
+static const StaticDiagCategoryRec CategoryNameTable[] = {
+#define GET_CATEGORY_TABLE
+#define CATEGORY(X, ENUM) { X, STR_SIZE(X, uint8_t) },
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_CATEGORY_TABLE
+ { 0, 0 }
+};
+
+/// getNumberOfCategories - Return the number of categories
+unsigned DiagnosticIDs::getNumberOfCategories() {
+ return sizeof(CategoryNameTable) / sizeof(CategoryNameTable[0])-1;
+}
+
+/// getCategoryNameFromID - Given a category ID, return the name of the
+/// category, an empty string if CategoryID is zero, or null if CategoryID is
+/// invalid.
+StringRef DiagnosticIDs::getCategoryNameFromID(unsigned CategoryID) {
+ if (CategoryID >= getNumberOfCategories())
+ return StringRef();
+ return CategoryNameTable[CategoryID].getName();
+}
+
+
+
+DiagnosticIDs::SFINAEResponse
+DiagnosticIDs::getDiagnosticSFINAEResponse(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) {
+ if (Info->AccessControl)
+ return SFINAE_AccessControl;
+
+ if (!Info->SFINAE)
+ return SFINAE_Report;
+
+ if (Info->Class == CLASS_ERROR)
+ return SFINAE_SubstitutionFailure;
+
+ // Suppress notes, warnings, and extensions;
+ return SFINAE_Suppress;
+ }
+
+ return SFINAE_Report;
+}
+
+/// getBuiltinDiagClass - Return the class field of the diagnostic.
+///
+static unsigned getBuiltinDiagClass(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Class;
+ return ~0U;
+}
+
+//===----------------------------------------------------------------------===//
+// Custom Diagnostic information
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ namespace diag {
+ class CustomDiagInfo {
+ typedef std::pair<DiagnosticIDs::Level, std::string> DiagDesc;
+ std::vector<DiagDesc> DiagInfo;
+ std::map<DiagDesc, unsigned> DiagIDs;
+ public:
+
+ /// getDescription - Return the description of the specified custom
+ /// diagnostic.
+ StringRef getDescription(unsigned DiagID) const {
+ assert(this && DiagID-DIAG_UPPER_LIMIT < DiagInfo.size() &&
+ "Invalid diagnosic ID");
+ return DiagInfo[DiagID-DIAG_UPPER_LIMIT].second;
+ }
+
+ /// getLevel - Return the level of the specified custom diagnostic.
+ DiagnosticIDs::Level getLevel(unsigned DiagID) const {
+ assert(this && DiagID-DIAG_UPPER_LIMIT < DiagInfo.size() &&
+ "Invalid diagnosic ID");
+ return DiagInfo[DiagID-DIAG_UPPER_LIMIT].first;
+ }
+
+ unsigned getOrCreateDiagID(DiagnosticIDs::Level L, StringRef Message,
+ DiagnosticIDs &Diags) {
+ DiagDesc D(L, Message);
+ // Check to see if it already exists.
+ std::map<DiagDesc, unsigned>::iterator I = DiagIDs.lower_bound(D);
+ if (I != DiagIDs.end() && I->first == D)
+ return I->second;
+
+ // If not, assign a new ID.
+ unsigned ID = DiagInfo.size()+DIAG_UPPER_LIMIT;
+ DiagIDs.insert(std::make_pair(D, ID));
+ DiagInfo.push_back(D);
+ return ID;
+ }
+ };
+
+ } // end diag namespace
+} // end clang namespace
+
+
+//===----------------------------------------------------------------------===//
+// Common Diagnostic implementation
+//===----------------------------------------------------------------------===//
+
+DiagnosticIDs::DiagnosticIDs() {
+ CustomDiagInfo = 0;
+}
+
+DiagnosticIDs::~DiagnosticIDs() {
+ delete CustomDiagInfo;
+}
+
+/// getCustomDiagID - Return an ID for a diagnostic with the specified message
+/// and level. If this is the first request for this diagnosic, it is
+/// registered and created, otherwise the existing ID is returned.
+unsigned DiagnosticIDs::getCustomDiagID(Level L, StringRef Message) {
+ if (CustomDiagInfo == 0)
+ CustomDiagInfo = new diag::CustomDiagInfo();
+ return CustomDiagInfo->getOrCreateDiagID(L, Message, *this);
+}
+
+
+/// isBuiltinWarningOrExtension - Return true if the unmapped diagnostic
+/// level of the specified diagnostic ID is a Warning or Extension.
+/// This only works on builtin diagnostics, not custom ones, and is not legal to
+/// call on NOTEs.
+bool DiagnosticIDs::isBuiltinWarningOrExtension(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) != CLASS_ERROR;
+}
+
+/// \brief Determine whether the given built-in diagnostic ID is a
+/// Note.
+bool DiagnosticIDs::isBuiltinNote(unsigned DiagID) {
+ return DiagID < diag::DIAG_UPPER_LIMIT &&
+ getBuiltinDiagClass(DiagID) == CLASS_NOTE;
+}
+
+/// isBuiltinExtensionDiag - Determine whether the given built-in diagnostic
+/// ID is for an extension of some sort. This also returns EnabledByDefault,
+/// which is set to indicate whether the diagnostic is ignored by default (in
+/// which case -pedantic enables it) or treated as a warning/error by default.
+///
+bool DiagnosticIDs::isBuiltinExtensionDiag(unsigned DiagID,
+ bool &EnabledByDefault) {
+ if (DiagID >= diag::DIAG_UPPER_LIMIT ||
+ getBuiltinDiagClass(DiagID) != CLASS_EXTENSION)
+ return false;
+
+ EnabledByDefault =
+ GetDefaultDiagMappingInfo(DiagID).getMapping() != diag::MAP_IGNORE;
+ return true;
+}
+
+bool DiagnosticIDs::isDefaultMappingAsError(unsigned DiagID) {
+ if (DiagID >= diag::DIAG_UPPER_LIMIT)
+ return false;
+
+ return GetDefaultDiagMappingInfo(DiagID).getMapping() == diag::MAP_ERROR;
+}
+
+/// getDescription - Given a diagnostic ID, return a description of the
+/// issue.
+StringRef DiagnosticIDs::getDescription(unsigned DiagID) const {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->getDescription();
+ return CustomDiagInfo->getDescription(DiagID);
+}
+
+/// getDiagnosticLevel - Based on the way the client configured the
+/// DiagnosticsEngine object, classify the specified diagnostic ID into a Level,
+/// by consumable the DiagnosticClient.
+DiagnosticIDs::Level
+DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, SourceLocation Loc,
+ const DiagnosticsEngine &Diag) const {
+ // Handle custom diagnostics, which cannot be mapped.
+ if (DiagID >= diag::DIAG_UPPER_LIMIT)
+ return CustomDiagInfo->getLevel(DiagID);
+
+ unsigned DiagClass = getBuiltinDiagClass(DiagID);
+ assert(DiagClass != CLASS_NOTE && "Cannot get diagnostic level of a note!");
+ return getDiagnosticLevel(DiagID, DiagClass, Loc, Diag);
+}
+
+/// \brief Based on the way the client configured the Diagnostic
+/// object, classify the specified diagnostic ID into a Level, consumable by
+/// the DiagnosticClient.
+///
+/// \param Loc The source location we are interested in finding out the
+/// diagnostic state. Can be null in order to query the latest state.
+DiagnosticIDs::Level
+DiagnosticIDs::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass,
+ SourceLocation Loc,
+ const DiagnosticsEngine &Diag) const {
+ // Specific non-error diagnostics may be mapped to various levels from ignored
+ // to error. Errors can only be mapped to fatal.
+ DiagnosticIDs::Level Result = DiagnosticIDs::Fatal;
+
+ DiagnosticsEngine::DiagStatePointsTy::iterator
+ Pos = Diag.GetDiagStatePointForLoc(Loc);
+ DiagnosticsEngine::DiagState *State = Pos->State;
+
+ // Get the mapping information, or compute it lazily.
+ DiagnosticMappingInfo &MappingInfo = State->getOrAddMappingInfo(
+ (diag::kind)DiagID);
+
+ switch (MappingInfo.getMapping()) {
+ case diag::MAP_IGNORE:
+ Result = DiagnosticIDs::Ignored;
+ break;
+ case diag::MAP_WARNING:
+ Result = DiagnosticIDs::Warning;
+ break;
+ case diag::MAP_ERROR:
+ Result = DiagnosticIDs::Error;
+ break;
+ case diag::MAP_FATAL:
+ Result = DiagnosticIDs::Fatal;
+ break;
+ }
+
+ // Upgrade ignored diagnostics if -Weverything is enabled.
+ if (Diag.EnableAllWarnings && Result == DiagnosticIDs::Ignored &&
+ !MappingInfo.isUser())
+ Result = DiagnosticIDs::Warning;
+
+ // Ignore -pedantic diagnostics inside __extension__ blocks.
+ // (The diagnostics controlled by -pedantic are the extension diagnostics
+ // that are not enabled by default.)
+ bool EnabledByDefault = false;
+ bool IsExtensionDiag = isBuiltinExtensionDiag(DiagID, EnabledByDefault);
+ if (Diag.AllExtensionsSilenced && IsExtensionDiag && !EnabledByDefault)
+ return DiagnosticIDs::Ignored;
+
+ // For extension diagnostics that haven't been explicitly mapped, check if we
+ // should upgrade the diagnostic.
+ if (IsExtensionDiag && !MappingInfo.isUser()) {
+ switch (Diag.ExtBehavior) {
+ case DiagnosticsEngine::Ext_Ignore:
+ break;
+ case DiagnosticsEngine::Ext_Warn:
+ // Upgrade ignored diagnostics to warnings.
+ if (Result == DiagnosticIDs::Ignored)
+ Result = DiagnosticIDs::Warning;
+ break;
+ case DiagnosticsEngine::Ext_Error:
+ // Upgrade ignored or warning diagnostics to errors.
+ if (Result == DiagnosticIDs::Ignored || Result == DiagnosticIDs::Warning)
+ Result = DiagnosticIDs::Error;
+ break;
+ }
+ }
+
+ // At this point, ignored errors can no longer be upgraded.
+ if (Result == DiagnosticIDs::Ignored)
+ return Result;
+
+ // Honor -w, which is lower in priority than pedantic-errors, but higher than
+ // -Werror.
+ if (Result == DiagnosticIDs::Warning && Diag.IgnoreAllWarnings)
+ return DiagnosticIDs::Ignored;
+
+ // If -Werror is enabled, map warnings to errors unless explicitly disabled.
+ if (Result == DiagnosticIDs::Warning) {
+ if (Diag.WarningsAsErrors && !MappingInfo.hasNoWarningAsError())
+ Result = DiagnosticIDs::Error;
+ }
+
+ // If -Wfatal-errors is enabled, map errors to fatal unless explicity
+ // disabled.
+ if (Result == DiagnosticIDs::Error) {
+ if (Diag.ErrorsAsFatal && !MappingInfo.hasNoErrorAsFatal())
+ Result = DiagnosticIDs::Fatal;
+ }
+
+ // If we are in a system header, we ignore it. We look at the diagnostic class
+ // because we also want to ignore extensions and warnings in -Werror and
+ // -pedantic-errors modes, which *map* warnings/extensions to errors.
+ if (Result >= DiagnosticIDs::Warning &&
+ DiagClass != CLASS_ERROR &&
+ // Custom diagnostics always are emitted in system headers.
+ DiagID < diag::DIAG_UPPER_LIMIT &&
+ !MappingInfo.hasShowInSystemHeader() &&
+ Diag.SuppressSystemWarnings &&
+ Loc.isValid() &&
+ Diag.getSourceManager().isInSystemHeader(
+ Diag.getSourceManager().getExpansionLoc(Loc)))
+ return DiagnosticIDs::Ignored;
+
+ return Result;
+}
+
+struct clang::WarningOption {
+ // Be safe with the size of 'NameLen' because we don't statically check if
+ // the size will fit in the field; the struct size won't decrease with a
+ // shorter type anyway.
+ size_t NameLen;
+ const char *NameStr;
+ const short *Members;
+ const short *SubGroups;
+
+ StringRef getName() const {
+ return StringRef(NameStr, NameLen);
+ }
+};
+
+#define GET_DIAG_ARRAYS
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_DIAG_ARRAYS
+
+// Second the table of options, sorted by name for fast binary lookup.
+static const WarningOption OptionTable[] = {
+#define GET_DIAG_TABLE
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_DIAG_TABLE
+};
+static const size_t OptionTableSize =
+sizeof(OptionTable) / sizeof(OptionTable[0]);
+
+static bool WarningOptionCompare(const WarningOption &LHS,
+ const WarningOption &RHS) {
+ return LHS.getName() < RHS.getName();
+}
+
+/// getWarningOptionForDiag - Return the lowest-level warning option that
+/// enables the specified diagnostic. If there is no -Wfoo flag that controls
+/// the diagnostic, this returns null.
+StringRef DiagnosticIDs::getWarningOptionForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return OptionTable[Info->getOptionGroupIndex()].getName();
+ return StringRef();
+}
+
+void DiagnosticIDs::getDiagnosticsInGroup(
+ const WarningOption *Group,
+ llvm::SmallVectorImpl<diag::kind> &Diags) const
+{
+ // Add the members of the option diagnostic set.
+ if (const short *Member = Group->Members) {
+ for (; *Member != -1; ++Member)
+ Diags.push_back(*Member);
+ }
+
+ // Add the members of the subgroups.
+ if (const short *SubGroups = Group->SubGroups) {
+ for (; *SubGroups != (short)-1; ++SubGroups)
+ getDiagnosticsInGroup(&OptionTable[(short)*SubGroups], Diags);
+ }
+}
+
+bool DiagnosticIDs::getDiagnosticsInGroup(
+ StringRef Group,
+ llvm::SmallVectorImpl<diag::kind> &Diags) const
+{
+ WarningOption Key = { Group.size(), Group.data(), 0, 0 };
+ const WarningOption *Found =
+ std::lower_bound(OptionTable, OptionTable + OptionTableSize, Key,
+ WarningOptionCompare);
+ if (Found == OptionTable + OptionTableSize ||
+ Found->getName() != Group)
+ return true; // Option not found.
+
+ getDiagnosticsInGroup(Found, Diags);
+ return false;
+}
+
+void DiagnosticIDs::getAllDiagnostics(
+ llvm::SmallVectorImpl<diag::kind> &Diags) const {
+ for (unsigned i = 0; i != StaticDiagInfoSize; ++i)
+ Diags.push_back(StaticDiagInfo[i].DiagID);
+}
+
+StringRef DiagnosticIDs::getNearestWarningOption(StringRef Group) {
+ StringRef Best;
+ unsigned BestDistance = Group.size() + 1; // Sanity threshold.
+ for (const WarningOption *i = OptionTable, *e = OptionTable + OptionTableSize;
+ i != e; ++i) {
+ // Don't suggest ignored warning flags.
+ if (!i->Members && !i->SubGroups)
+ continue;
+
+ unsigned Distance = i->getName().edit_distance(Group, true, BestDistance);
+ if (Distance == BestDistance) {
+ // Two matches with the same distance, don't prefer one over the other.
+ Best = "";
+ } else if (Distance < BestDistance) {
+ // This is a better match.
+ Best = i->getName();
+ BestDistance = Distance;
+ }
+ }
+
+ return Best;
+}
+
+/// ProcessDiag - This is the method used to report a diagnostic that is
+/// finally fully formed.
+bool DiagnosticIDs::ProcessDiag(DiagnosticsEngine &Diag) const {
+ Diagnostic Info(&Diag);
+
+ if (Diag.SuppressAllDiagnostics)
+ return false;
+
+ assert(Diag.getClient() && "DiagnosticClient not set!");
+
+ // Figure out the diagnostic level of this message.
+ DiagnosticIDs::Level DiagLevel;
+ unsigned DiagID = Info.getID();
+
+ if (DiagID >= diag::DIAG_UPPER_LIMIT) {
+ // Handle custom diagnostics, which cannot be mapped.
+ DiagLevel = CustomDiagInfo->getLevel(DiagID);
+ } else {
+ // Get the class of the diagnostic. If this is a NOTE, map it onto whatever
+ // the diagnostic level was for the previous diagnostic so that it is
+ // filtered the same as the previous diagnostic.
+ unsigned DiagClass = getBuiltinDiagClass(DiagID);
+ if (DiagClass == CLASS_NOTE) {
+ DiagLevel = DiagnosticIDs::Note;
+ } else {
+ DiagLevel = getDiagnosticLevel(DiagID, DiagClass, Info.getLocation(),
+ Diag);
+ }
+ }
+
+ if (DiagLevel != DiagnosticIDs::Note) {
+ // Record that a fatal error occurred only when we see a second
+ // non-note diagnostic. This allows notes to be attached to the
+ // fatal error, but suppresses any diagnostics that follow those
+ // notes.
+ if (Diag.LastDiagLevel == DiagnosticIDs::Fatal)
+ Diag.FatalErrorOccurred = true;
+
+ Diag.LastDiagLevel = DiagLevel;
+ }
+
+ // Update counts for DiagnosticErrorTrap even if a fatal error occurred.
+ if (DiagLevel >= DiagnosticIDs::Error) {
+ ++Diag.TrapNumErrorsOccurred;
+ if (isUnrecoverable(DiagID))
+ ++Diag.TrapNumUnrecoverableErrorsOccurred;
+ }
+
+ // If a fatal error has already been emitted, silence all subsequent
+ // diagnostics.
+ if (Diag.FatalErrorOccurred) {
+ if (DiagLevel >= DiagnosticIDs::Error &&
+ Diag.Client->IncludeInDiagnosticCounts()) {
+ ++Diag.NumErrors;
+ ++Diag.NumErrorsSuppressed;
+ }
+
+ return false;
+ }
+
+ // If the client doesn't care about this message, don't issue it. If this is
+ // a note and the last real diagnostic was ignored, ignore it too.
+ if (DiagLevel == DiagnosticIDs::Ignored ||
+ (DiagLevel == DiagnosticIDs::Note &&
+ Diag.LastDiagLevel == DiagnosticIDs::Ignored))
+ return false;
+
+ if (DiagLevel >= DiagnosticIDs::Error) {
+ if (isUnrecoverable(DiagID))
+ Diag.UnrecoverableErrorOccurred = true;
+
+ if (Diag.Client->IncludeInDiagnosticCounts()) {
+ Diag.ErrorOccurred = true;
+ ++Diag.NumErrors;
+ }
+
+ // If we've emitted a lot of errors, emit a fatal error instead of it to
+ // stop a flood of bogus errors.
+ if (Diag.ErrorLimit && Diag.NumErrors > Diag.ErrorLimit &&
+ DiagLevel == DiagnosticIDs::Error) {
+ Diag.SetDelayedDiagnostic(diag::fatal_too_many_errors);
+ return false;
+ }
+ }
+
+ // Finally, report it.
+ Diag.Client->HandleDiagnostic((DiagnosticsEngine::Level)DiagLevel, Info);
+ if (Diag.Client->IncludeInDiagnosticCounts()) {
+ if (DiagLevel == DiagnosticIDs::Warning)
+ ++Diag.NumWarnings;
+ }
+
+ Diag.CurDiagID = ~0U;
+
+ return true;
+}
+
+bool DiagnosticIDs::isUnrecoverable(unsigned DiagID) const {
+ if (DiagID >= diag::DIAG_UPPER_LIMIT) {
+ // Custom diagnostics.
+ return CustomDiagInfo->getLevel(DiagID) >= DiagnosticIDs::Error;
+ }
+
+ // Only errors may be unrecoverable.
+ if (getBuiltinDiagClass(DiagID) < CLASS_ERROR)
+ return false;
+
+ if (DiagID == diag::err_unavailable ||
+ DiagID == diag::err_unavailable_message)
+ return false;
+
+ // Currently we consider all ARC errors as recoverable.
+ if (isARCDiagnostic(DiagID))
+ return false;
+
+ return true;
+}
+
+bool DiagnosticIDs::isARCDiagnostic(unsigned DiagID) {
+ unsigned cat = getCategoryNumberForDiag(DiagID);
+ return DiagnosticIDs::getCategoryNameFromID(cat).startswith("ARC ");
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
new file mode 100644
index 0000000..fd6d334
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/FileManager.cpp
@@ -0,0 +1,600 @@
+//===--- FileManager.cpp - File System Probing and Caching ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the FileManager interface.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: This should index all interesting directories with dirent calls.
+// getdirentries ?
+// opendir/readdir_r/closedir ?
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
+#include "llvm/Config/llvm-config.h"
+#include <map>
+#include <set>
+#include <string>
+
+// FIXME: This is terrible, we need this for ::close.
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#include <sys/uio.h>
+#else
+#include <io.h>
+#endif
+using namespace clang;
+
+// FIXME: Enhance libsystem to support inode and other fields.
+#include <sys/stat.h>
+
+/// NON_EXISTENT_DIR - A special value distinct from null that is used to
+/// represent a dir name that doesn't exist on the disk.
+#define NON_EXISTENT_DIR reinterpret_cast<DirectoryEntry*>((intptr_t)-1)
+
+/// NON_EXISTENT_FILE - A special value distinct from null that is used to
+/// represent a filename that doesn't exist on the disk.
+#define NON_EXISTENT_FILE reinterpret_cast<FileEntry*>((intptr_t)-1)
+
+
+FileEntry::~FileEntry() {
+ // If this FileEntry owns an open file descriptor that never got used, close
+ // it.
+ if (FD != -1) ::close(FD);
+}
+
+//===----------------------------------------------------------------------===//
+// Windows.
+//===----------------------------------------------------------------------===//
+
+#ifdef LLVM_ON_WIN32
+
+namespace {
+ static std::string GetFullPath(const char *relPath) {
+ char *absPathStrPtr = _fullpath(NULL, relPath, 0);
+ assert(absPathStrPtr && "_fullpath() returned NULL!");
+
+ std::string absPath(absPathStrPtr);
+
+ free(absPathStrPtr);
+ return absPath;
+ }
+}
+
+class FileManager::UniqueDirContainer {
+ /// UniqueDirs - Cache from full path to existing directories/files.
+ ///
+ llvm::StringMap<DirectoryEntry> UniqueDirs;
+
+public:
+ /// getDirectory - Return an existing DirectoryEntry with the given
+ /// name if there is already one; otherwise create and return a
+ /// default-constructed DirectoryEntry.
+ DirectoryEntry &getDirectory(const char *Name,
+ const struct stat & /*StatBuf*/) {
+ std::string FullPath(GetFullPath(Name));
+ return UniqueDirs.GetOrCreateValue(FullPath).getValue();
+ }
+
+ size_t size() const { return UniqueDirs.size(); }
+};
+
+class FileManager::UniqueFileContainer {
+ /// UniqueFiles - Cache from full path to existing directories/files.
+ ///
+ llvm::StringMap<FileEntry, llvm::BumpPtrAllocator> UniqueFiles;
+
+public:
+ /// getFile - Return an existing FileEntry with the given name if
+ /// there is already one; otherwise create and return a
+ /// default-constructed FileEntry.
+ FileEntry &getFile(const char *Name, const struct stat & /*StatBuf*/) {
+ std::string FullPath(GetFullPath(Name));
+
+ // Lowercase string because Windows filesystem is case insensitive.
+ FullPath = StringRef(FullPath).lower();
+ return UniqueFiles.GetOrCreateValue(FullPath).getValue();
+ }
+
+ size_t size() const { return UniqueFiles.size(); }
+};
+
+//===----------------------------------------------------------------------===//
+// Unix-like Systems.
+//===----------------------------------------------------------------------===//
+
+#else
+
+class FileManager::UniqueDirContainer {
+ /// UniqueDirs - Cache from ID's to existing directories/files.
+ std::map<std::pair<dev_t, ino_t>, DirectoryEntry> UniqueDirs;
+
+public:
+ /// getDirectory - Return an existing DirectoryEntry with the given
+ /// ID's if there is already one; otherwise create and return a
+ /// default-constructed DirectoryEntry.
+ DirectoryEntry &getDirectory(const char * /*Name*/,
+ const struct stat &StatBuf) {
+ return UniqueDirs[std::make_pair(StatBuf.st_dev, StatBuf.st_ino)];
+ }
+
+ size_t size() const { return UniqueDirs.size(); }
+};
+
+class FileManager::UniqueFileContainer {
+ /// UniqueFiles - Cache from ID's to existing directories/files.
+ std::set<FileEntry> UniqueFiles;
+
+public:
+ /// getFile - Return an existing FileEntry with the given ID's if
+ /// there is already one; otherwise create and return a
+ /// default-constructed FileEntry.
+ FileEntry &getFile(const char * /*Name*/, const struct stat &StatBuf) {
+ return
+ const_cast<FileEntry&>(
+ *UniqueFiles.insert(FileEntry(StatBuf.st_dev,
+ StatBuf.st_ino,
+ StatBuf.st_mode)).first);
+ }
+
+ size_t size() const { return UniqueFiles.size(); }
+};
+
+#endif
+
+//===----------------------------------------------------------------------===//
+// Common logic.
+//===----------------------------------------------------------------------===//
+
+FileManager::FileManager(const FileSystemOptions &FSO)
+ : FileSystemOpts(FSO),
+ UniqueRealDirs(*new UniqueDirContainer()),
+ UniqueRealFiles(*new UniqueFileContainer()),
+ SeenDirEntries(64), SeenFileEntries(64), NextFileUID(0) {
+ NumDirLookups = NumFileLookups = 0;
+ NumDirCacheMisses = NumFileCacheMisses = 0;
+}
+
+FileManager::~FileManager() {
+ delete &UniqueRealDirs;
+ delete &UniqueRealFiles;
+ for (unsigned i = 0, e = VirtualFileEntries.size(); i != e; ++i)
+ delete VirtualFileEntries[i];
+ for (unsigned i = 0, e = VirtualDirectoryEntries.size(); i != e; ++i)
+ delete VirtualDirectoryEntries[i];
+}
+
+void FileManager::addStatCache(FileSystemStatCache *statCache,
+ bool AtBeginning) {
+ assert(statCache && "No stat cache provided?");
+ if (AtBeginning || StatCache.get() == 0) {
+ statCache->setNextStatCache(StatCache.take());
+ StatCache.reset(statCache);
+ return;
+ }
+
+ FileSystemStatCache *LastCache = StatCache.get();
+ while (LastCache->getNextStatCache())
+ LastCache = LastCache->getNextStatCache();
+
+ LastCache->setNextStatCache(statCache);
+}
+
+void FileManager::removeStatCache(FileSystemStatCache *statCache) {
+ if (!statCache)
+ return;
+
+ if (StatCache.get() == statCache) {
+ // This is the first stat cache.
+ StatCache.reset(StatCache->takeNextStatCache());
+ return;
+ }
+
+ // Find the stat cache in the list.
+ FileSystemStatCache *PrevCache = StatCache.get();
+ while (PrevCache && PrevCache->getNextStatCache() != statCache)
+ PrevCache = PrevCache->getNextStatCache();
+
+ assert(PrevCache && "Stat cache not found for removal");
+ PrevCache->setNextStatCache(statCache->getNextStatCache());
+}
+
+/// \brief Retrieve the directory that the given file name resides in.
+/// Filename can point to either a real file or a virtual file.
+static const DirectoryEntry *getDirectoryFromFile(FileManager &FileMgr,
+ StringRef Filename,
+ bool CacheFailure) {
+ if (Filename.empty())
+ return NULL;
+
+ if (llvm::sys::path::is_separator(Filename[Filename.size() - 1]))
+ return NULL; // If Filename is a directory.
+
+ StringRef DirName = llvm::sys::path::parent_path(Filename);
+ // Use the current directory if file has no path component.
+ if (DirName.empty())
+ DirName = ".";
+
+ return FileMgr.getDirectory(DirName, CacheFailure);
+}
+
+/// Add all ancestors of the given path (pointing to either a file or
+/// a directory) as virtual directories.
+void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
+ StringRef DirName = llvm::sys::path::parent_path(Path);
+ if (DirName.empty())
+ return;
+
+ llvm::StringMapEntry<DirectoryEntry *> &NamedDirEnt =
+ SeenDirEntries.GetOrCreateValue(DirName);
+
+ // When caching a virtual directory, we always cache its ancestors
+ // at the same time. Therefore, if DirName is already in the cache,
+ // we don't need to recurse as its ancestors must also already be in
+ // the cache.
+ if (NamedDirEnt.getValue())
+ return;
+
+ // Add the virtual directory to the cache.
+ DirectoryEntry *UDE = new DirectoryEntry;
+ UDE->Name = NamedDirEnt.getKeyData();
+ NamedDirEnt.setValue(UDE);
+ VirtualDirectoryEntries.push_back(UDE);
+
+ // Recursively add the other ancestors.
+ addAncestorsAsVirtualDirs(DirName);
+}
+
+/// getDirectory - Lookup, cache, and verify the specified directory
+/// (real or virtual). This returns NULL if the directory doesn't
+/// exist.
+///
+const DirectoryEntry *FileManager::getDirectory(StringRef DirName,
+ bool CacheFailure) {
+ // stat doesn't like trailing separators.
+ // At least, on Win32 MSVCRT, stat() cannot strip trailing '/'.
+ // (though it can strip '\\')
+ if (DirName.size() > 1 && llvm::sys::path::is_separator(DirName.back()))
+ DirName = DirName.substr(0, DirName.size()-1);
+
+ ++NumDirLookups;
+ llvm::StringMapEntry<DirectoryEntry *> &NamedDirEnt =
+ SeenDirEntries.GetOrCreateValue(DirName);
+
+ // See if there was already an entry in the map. Note that the map
+ // contains both virtual and real directories.
+ if (NamedDirEnt.getValue())
+ return NamedDirEnt.getValue() == NON_EXISTENT_DIR
+ ? 0 : NamedDirEnt.getValue();
+
+ ++NumDirCacheMisses;
+
+ // By default, initialize it to invalid.
+ NamedDirEnt.setValue(NON_EXISTENT_DIR);
+
+ // Get the null-terminated directory name as stored as the key of the
+ // SeenDirEntries map.
+ const char *InterndDirName = NamedDirEnt.getKeyData();
+
+ // Check to see if the directory exists.
+ struct stat StatBuf;
+ if (getStatValue(InterndDirName, StatBuf, 0/*directory lookup*/)) {
+ // There's no real directory at the given path.
+ if (!CacheFailure)
+ SeenDirEntries.erase(DirName);
+ return 0;
+ }
+
+ // It exists. See if we have already opened a directory with the
+ // same inode (this occurs on Unix-like systems when one dir is
+ // symlinked to another, for example) or the same path (on
+ // Windows).
+ DirectoryEntry &UDE = UniqueRealDirs.getDirectory(InterndDirName, StatBuf);
+
+ NamedDirEnt.setValue(&UDE);
+ if (!UDE.getName()) {
+ // We don't have this directory yet, add it. We use the string
+ // key from the SeenDirEntries map as the string.
+ UDE.Name = InterndDirName;
+ }
+
+ return &UDE;
+}
+
+/// getFile - Lookup, cache, and verify the specified file (real or
+/// virtual). This returns NULL if the file doesn't exist.
+///
+const FileEntry *FileManager::getFile(StringRef Filename, bool openFile,
+ bool CacheFailure) {
+ ++NumFileLookups;
+
+ // See if there is already an entry in the map.
+ llvm::StringMapEntry<FileEntry *> &NamedFileEnt =
+ SeenFileEntries.GetOrCreateValue(Filename);
+
+ // See if there is already an entry in the map.
+ if (NamedFileEnt.getValue())
+ return NamedFileEnt.getValue() == NON_EXISTENT_FILE
+ ? 0 : NamedFileEnt.getValue();
+
+ ++NumFileCacheMisses;
+
+ // By default, initialize it to invalid.
+ NamedFileEnt.setValue(NON_EXISTENT_FILE);
+
+ // Get the null-terminated file name as stored as the key of the
+ // SeenFileEntries map.
+ const char *InterndFileName = NamedFileEnt.getKeyData();
+
+ // Look up the directory for the file. When looking up something like
+ // sys/foo.h we'll discover all of the search directories that have a 'sys'
+ // subdirectory. This will let us avoid having to waste time on known-to-fail
+ // searches when we go to find sys/bar.h, because all the search directories
+ // without a 'sys' subdir will get a cached failure result.
+ const DirectoryEntry *DirInfo = getDirectoryFromFile(*this, Filename,
+ CacheFailure);
+ if (DirInfo == 0) { // Directory doesn't exist, file can't exist.
+ if (!CacheFailure)
+ SeenFileEntries.erase(Filename);
+
+ return 0;
+ }
+
+ // FIXME: Use the directory info to prune this, before doing the stat syscall.
+ // FIXME: This will reduce the # syscalls.
+
+ // Nope, there isn't. Check to see if the file exists.
+ int FileDescriptor = -1;
+ struct stat StatBuf;
+ if (getStatValue(InterndFileName, StatBuf, &FileDescriptor)) {
+ // There's no real file at the given path.
+ if (!CacheFailure)
+ SeenFileEntries.erase(Filename);
+
+ return 0;
+ }
+
+ if (FileDescriptor != -1 && !openFile) {
+ close(FileDescriptor);
+ FileDescriptor = -1;
+ }
+
+ // It exists. See if we have already opened a file with the same inode.
+ // This occurs when one dir is symlinked to another, for example.
+ FileEntry &UFE = UniqueRealFiles.getFile(InterndFileName, StatBuf);
+
+ NamedFileEnt.setValue(&UFE);
+ if (UFE.getName()) { // Already have an entry with this inode, return it.
+ // If the stat process opened the file, close it to avoid a FD leak.
+ if (FileDescriptor != -1)
+ close(FileDescriptor);
+
+ return &UFE;
+ }
+
+ // Otherwise, we don't have this directory yet, add it.
+ // FIXME: Change the name to be a char* that points back to the
+ // 'SeenFileEntries' key.
+ UFE.Name = InterndFileName;
+ UFE.Size = StatBuf.st_size;
+ UFE.ModTime = StatBuf.st_mtime;
+ UFE.Dir = DirInfo;
+ UFE.UID = NextFileUID++;
+ UFE.FD = FileDescriptor;
+ return &UFE;
+}
+
+const FileEntry *
+FileManager::getVirtualFile(StringRef Filename, off_t Size,
+ time_t ModificationTime) {
+ ++NumFileLookups;
+
+ // See if there is already an entry in the map.
+ llvm::StringMapEntry<FileEntry *> &NamedFileEnt =
+ SeenFileEntries.GetOrCreateValue(Filename);
+
+ // See if there is already an entry in the map.
+ if (NamedFileEnt.getValue() && NamedFileEnt.getValue() != NON_EXISTENT_FILE)
+ return NamedFileEnt.getValue();
+
+ ++NumFileCacheMisses;
+
+ // By default, initialize it to invalid.
+ NamedFileEnt.setValue(NON_EXISTENT_FILE);
+
+ addAncestorsAsVirtualDirs(Filename);
+ FileEntry *UFE = 0;
+
+ // Now that all ancestors of Filename are in the cache, the
+ // following call is guaranteed to find the DirectoryEntry from the
+ // cache.
+ const DirectoryEntry *DirInfo = getDirectoryFromFile(*this, Filename,
+ /*CacheFailure=*/true);
+ assert(DirInfo &&
+ "The directory of a virtual file should already be in the cache.");
+
+ // Check to see if the file exists. If so, drop the virtual file
+ int FileDescriptor = -1;
+ struct stat StatBuf;
+ const char *InterndFileName = NamedFileEnt.getKeyData();
+ if (getStatValue(InterndFileName, StatBuf, &FileDescriptor) == 0) {
+ // If the stat process opened the file, close it to avoid a FD leak.
+ if (FileDescriptor != -1)
+ close(FileDescriptor);
+
+ StatBuf.st_size = Size;
+ StatBuf.st_mtime = ModificationTime;
+ UFE = &UniqueRealFiles.getFile(InterndFileName, StatBuf);
+
+ NamedFileEnt.setValue(UFE);
+
+ // If we had already opened this file, close it now so we don't
+ // leak the descriptor. We're not going to use the file
+ // descriptor anyway, since this is a virtual file.
+ if (UFE->FD != -1) {
+ close(UFE->FD);
+ UFE->FD = -1;
+ }
+
+ // If we already have an entry with this inode, return it.
+ if (UFE->getName())
+ return UFE;
+ }
+
+ if (!UFE) {
+ UFE = new FileEntry();
+ VirtualFileEntries.push_back(UFE);
+ NamedFileEnt.setValue(UFE);
+ }
+
+ UFE->Name = InterndFileName;
+ UFE->Size = Size;
+ UFE->ModTime = ModificationTime;
+ UFE->Dir = DirInfo;
+ UFE->UID = NextFileUID++;
+ UFE->FD = -1;
+ return UFE;
+}
+
+void FileManager::FixupRelativePath(SmallVectorImpl<char> &path) const {
+ StringRef pathRef(path.data(), path.size());
+
+ if (FileSystemOpts.WorkingDir.empty()
+ || llvm::sys::path::is_absolute(pathRef))
+ return;
+
+ SmallString<128> NewPath(FileSystemOpts.WorkingDir);
+ llvm::sys::path::append(NewPath, pathRef);
+ path = NewPath;
+}
+
+llvm::MemoryBuffer *FileManager::
+getBufferForFile(const FileEntry *Entry, std::string *ErrorStr) {
+ OwningPtr<llvm::MemoryBuffer> Result;
+ llvm::error_code ec;
+
+ const char *Filename = Entry->getName();
+ // If the file is already open, use the open file descriptor.
+ if (Entry->FD != -1) {
+ ec = llvm::MemoryBuffer::getOpenFile(Entry->FD, Filename, Result,
+ Entry->getSize());
+ if (ErrorStr)
+ *ErrorStr = ec.message();
+
+ close(Entry->FD);
+ Entry->FD = -1;
+ return Result.take();
+ }
+
+ // Otherwise, open the file.
+
+ if (FileSystemOpts.WorkingDir.empty()) {
+ ec = llvm::MemoryBuffer::getFile(Filename, Result, Entry->getSize());
+ if (ec && ErrorStr)
+ *ErrorStr = ec.message();
+ return Result.take();
+ }
+
+ SmallString<128> FilePath(Entry->getName());
+ FixupRelativePath(FilePath);
+ ec = llvm::MemoryBuffer::getFile(FilePath.str(), Result, Entry->getSize());
+ if (ec && ErrorStr)
+ *ErrorStr = ec.message();
+ return Result.take();
+}
+
+llvm::MemoryBuffer *FileManager::
+getBufferForFile(StringRef Filename, std::string *ErrorStr) {
+ OwningPtr<llvm::MemoryBuffer> Result;
+ llvm::error_code ec;
+ if (FileSystemOpts.WorkingDir.empty()) {
+ ec = llvm::MemoryBuffer::getFile(Filename, Result);
+ if (ec && ErrorStr)
+ *ErrorStr = ec.message();
+ return Result.take();
+ }
+
+ SmallString<128> FilePath(Filename);
+ FixupRelativePath(FilePath);
+ ec = llvm::MemoryBuffer::getFile(FilePath.c_str(), Result);
+ if (ec && ErrorStr)
+ *ErrorStr = ec.message();
+ return Result.take();
+}
+
+/// getStatValue - Get the 'stat' information for the specified path,
+/// using the cache to accelerate it if possible. This returns true
+/// if the path points to a virtual file or does not exist, or returns
+/// false if it's an existent real file. If FileDescriptor is NULL,
+/// do directory look-up instead of file look-up.
+bool FileManager::getStatValue(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ // FIXME: FileSystemOpts shouldn't be passed in here, all paths should be
+ // absolute!
+ if (FileSystemOpts.WorkingDir.empty())
+ return FileSystemStatCache::get(Path, StatBuf, FileDescriptor,
+ StatCache.get());
+
+ SmallString<128> FilePath(Path);
+ FixupRelativePath(FilePath);
+
+ return FileSystemStatCache::get(FilePath.c_str(), StatBuf, FileDescriptor,
+ StatCache.get());
+}
+
+bool FileManager::getNoncachedStatValue(StringRef Path,
+ struct stat &StatBuf) {
+ SmallString<128> FilePath(Path);
+ FixupRelativePath(FilePath);
+
+ return ::stat(FilePath.c_str(), &StatBuf) != 0;
+}
+
+void FileManager::GetUniqueIDMapping(
+ SmallVectorImpl<const FileEntry *> &UIDToFiles) const {
+ UIDToFiles.clear();
+ UIDToFiles.resize(NextFileUID);
+
+ // Map file entries
+ for (llvm::StringMap<FileEntry*, llvm::BumpPtrAllocator>::const_iterator
+ FE = SeenFileEntries.begin(), FEEnd = SeenFileEntries.end();
+ FE != FEEnd; ++FE)
+ if (FE->getValue() && FE->getValue() != NON_EXISTENT_FILE)
+ UIDToFiles[FE->getValue()->getUID()] = FE->getValue();
+
+ // Map virtual file entries
+ for (SmallVector<FileEntry*, 4>::const_iterator
+ VFE = VirtualFileEntries.begin(), VFEEnd = VirtualFileEntries.end();
+ VFE != VFEEnd; ++VFE)
+ if (*VFE && *VFE != NON_EXISTENT_FILE)
+ UIDToFiles[(*VFE)->getUID()] = *VFE;
+}
+
+
+void FileManager::PrintStats() const {
+ llvm::errs() << "\n*** File Manager Stats:\n";
+ llvm::errs() << UniqueRealFiles.size() << " real files found, "
+ << UniqueRealDirs.size() << " real dirs found.\n";
+ llvm::errs() << VirtualFileEntries.size() << " virtual files found, "
+ << VirtualDirectoryEntries.size() << " virtual dirs found.\n";
+ llvm::errs() << NumDirLookups << " dir lookups, "
+ << NumDirCacheMisses << " dir cache misses.\n";
+ llvm::errs() << NumFileLookups << " file lookups, "
+ << NumFileCacheMisses << " file cache misses.\n";
+
+ //llvm::errs() << PagesMapped << BytesOfPagesMapped << FSLookups;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp b/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
new file mode 100644
index 0000000..875d397
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/FileSystemStatCache.cpp
@@ -0,0 +1,122 @@
+//===--- FileSystemStatCache.cpp - Caching for 'stat' calls ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the FileSystemStatCache interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/FileSystemStatCache.h"
+#include "llvm/Support/Path.h"
+#include <fcntl.h>
+
+// FIXME: This is terrible, we need this for ::close.
+#if !defined(_MSC_VER) && !defined(__MINGW32__)
+#include <unistd.h>
+#include <sys/uio.h>
+#else
+#include <io.h>
+#endif
+using namespace clang;
+
+#if defined(_MSC_VER)
+#define S_ISDIR(s) ((_S_IFDIR & s) !=0)
+#endif
+
+void FileSystemStatCache::anchor() { }
+
+/// FileSystemStatCache::get - Get the 'stat' information for the specified
+/// path, using the cache to accelerate it if possible. This returns true if
+/// the path does not exist or false if it exists.
+///
+/// If FileDescriptor is non-null, then this lookup should only return success
+/// for files (not directories). If it is null this lookup should only return
+/// success for directories (not files). On a successful file lookup, the
+/// implementation can optionally fill in FileDescriptor with a valid
+/// descriptor and the client guarantees that it will close it.
+bool FileSystemStatCache::get(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor, FileSystemStatCache *Cache) {
+ LookupResult R;
+ bool isForDir = FileDescriptor == 0;
+
+ // If we have a cache, use it to resolve the stat query.
+ if (Cache)
+ R = Cache->getStat(Path, StatBuf, FileDescriptor);
+ else if (isForDir) {
+ // If this is a directory and we have no cache, just go to the file system.
+ R = ::stat(Path, &StatBuf) != 0 ? CacheMissing : CacheExists;
+ } else {
+ // Otherwise, we have to go to the filesystem. We can always just use
+ // 'stat' here, but (for files) the client is asking whether the file exists
+ // because it wants to turn around and *open* it. It is more efficient to
+ // do "open+fstat" on success than it is to do "stat+open".
+ //
+ // Because of this, check to see if the file exists with 'open'. If the
+ // open succeeds, use fstat to get the stat info.
+ int OpenFlags = O_RDONLY;
+#ifdef O_BINARY
+ OpenFlags |= O_BINARY; // Open input file in binary mode on win32.
+#endif
+ *FileDescriptor = ::open(Path, OpenFlags);
+
+ if (*FileDescriptor == -1) {
+ // If the open fails, our "stat" fails.
+ R = CacheMissing;
+ } else {
+ // Otherwise, the open succeeded. Do an fstat to get the information
+ // about the file. We'll end up returning the open file descriptor to the
+ // client to do what they please with it.
+ if (::fstat(*FileDescriptor, &StatBuf) == 0)
+ R = CacheExists;
+ else {
+ // fstat rarely fails. If it does, claim the initial open didn't
+ // succeed.
+ R = CacheMissing;
+ ::close(*FileDescriptor);
+ *FileDescriptor = -1;
+ }
+ }
+ }
+
+ // If the path doesn't exist, return failure.
+ if (R == CacheMissing) return true;
+
+ // If the path exists, make sure that its "directoryness" matches the clients
+ // demands.
+ if (S_ISDIR(StatBuf.st_mode) != isForDir) {
+ // If not, close the file if opened.
+ if (FileDescriptor && *FileDescriptor != -1) {
+ ::close(*FileDescriptor);
+ *FileDescriptor = -1;
+ }
+
+ return true;
+ }
+
+ return false;
+}
+
+
+MemorizeStatCalls::LookupResult
+MemorizeStatCalls::getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ LookupResult Result = statChained(Path, StatBuf, FileDescriptor);
+
+ // Do not cache failed stats, it is easy to construct common inconsistent
+ // situations if we do, and they are not important for PCH performance (which
+ // currently only needs the stats to construct the initial FileManager
+ // entries).
+ if (Result == CacheMissing)
+ return Result;
+
+ // Cache file 'stat' results and directories with absolutely paths.
+ if (!S_ISDIR(StatBuf.st_mode) || llvm::sys::path::is_absolute(Path))
+ StatCalls[Path] = StatBuf;
+
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
new file mode 100644
index 0000000..43899f0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/IdentifierTable.cpp
@@ -0,0 +1,524 @@
+//===--- IdentifierTable.cpp - Hash table for identifier lookup -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the IdentifierInfo, IdentifierVisitor, and
+// IdentifierTable interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdio>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// IdentifierInfo Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierInfo::IdentifierInfo() {
+ TokenID = tok::identifier;
+ ObjCOrBuiltinID = 0;
+ HasMacro = false;
+ IsExtension = false;
+ IsCXX11CompatKeyword = false;
+ IsPoisoned = false;
+ IsCPPOperatorKeyword = false;
+ NeedsHandleIdentifier = false;
+ IsFromAST = false;
+ ChangedAfterLoad = false;
+ RevertedTokenID = false;
+ OutOfDate = false;
+ IsModulesImport = false;
+ FETokenInfo = 0;
+ Entry = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// IdentifierTable Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierIterator::~IdentifierIterator() { }
+
+IdentifierInfoLookup::~IdentifierInfoLookup() {}
+
+namespace {
+ /// \brief A simple identifier lookup iterator that represents an
+ /// empty sequence of identifiers.
+ class EmptyLookupIterator : public IdentifierIterator
+ {
+ public:
+ virtual StringRef Next() { return StringRef(); }
+ };
+}
+
+IdentifierIterator *IdentifierInfoLookup::getIdentifiers() const {
+ return new EmptyLookupIterator();
+}
+
+ExternalIdentifierLookup::~ExternalIdentifierLookup() {}
+
+IdentifierTable::IdentifierTable(const LangOptions &LangOpts,
+ IdentifierInfoLookup* externalLookup)
+ : HashTable(8192), // Start with space for 8K identifiers.
+ ExternalLookup(externalLookup) {
+
+ // Populate the identifier table with info about keywords for the current
+ // language.
+ AddKeywords(LangOpts);
+
+
+ // Add the '_experimental_modules_import' contextual keyword.
+ get("__experimental_modules_import").setModulesImport(true);
+}
+
+//===----------------------------------------------------------------------===//
+// Language Keyword Implementation
+//===----------------------------------------------------------------------===//
+
+// Constants for TokenKinds.def
+namespace {
+ enum {
+ KEYC99 = 0x1,
+ KEYCXX = 0x2,
+ KEYCXX0X = 0x4,
+ KEYGNU = 0x8,
+ KEYMS = 0x10,
+ BOOLSUPPORT = 0x20,
+ KEYALTIVEC = 0x40,
+ KEYNOCXX = 0x80,
+ KEYBORLAND = 0x100,
+ KEYOPENCL = 0x200,
+ KEYC11 = 0x400,
+ KEYARC = 0x800,
+ KEYALL = 0x0fff
+ };
+}
+
+/// AddKeyword - This method is used to associate a token ID with specific
+/// identifiers because they are language keywords. This causes the lexer to
+/// automatically map matching identifiers to specialized token codes.
+///
+/// The C90/C99/CPP/CPP0x flags are set to 3 if the token is a keyword in a
+/// future language standard, set to 2 if the token should be enabled in the
+/// specified language, set to 1 if it is an extension in the specified
+/// language, and set to 0 if disabled in the specified language.
+static void AddKeyword(StringRef Keyword,
+ tok::TokenKind TokenCode, unsigned Flags,
+ const LangOptions &LangOpts, IdentifierTable &Table) {
+ unsigned AddResult = 0;
+ if (Flags == KEYALL) AddResult = 2;
+ else if (LangOpts.CPlusPlus && (Flags & KEYCXX)) AddResult = 2;
+ else if (LangOpts.CPlusPlus0x && (Flags & KEYCXX0X)) AddResult = 2;
+ else if (LangOpts.C99 && (Flags & KEYC99)) AddResult = 2;
+ else if (LangOpts.GNUKeywords && (Flags & KEYGNU)) AddResult = 1;
+ else if (LangOpts.MicrosoftExt && (Flags & KEYMS)) AddResult = 1;
+ else if (LangOpts.Borland && (Flags & KEYBORLAND)) AddResult = 1;
+ else if (LangOpts.Bool && (Flags & BOOLSUPPORT)) AddResult = 2;
+ else if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) AddResult = 2;
+ else if (LangOpts.OpenCL && (Flags & KEYOPENCL)) AddResult = 2;
+ else if (!LangOpts.CPlusPlus && (Flags & KEYNOCXX)) AddResult = 2;
+ else if (LangOpts.C11 && (Flags & KEYC11)) AddResult = 2;
+ // We treat bridge casts as objective-C keywords so we can warn on them
+ // in non-arc mode.
+ else if (LangOpts.ObjC2 && (Flags & KEYARC)) AddResult = 2;
+ else if (LangOpts.CPlusPlus && (Flags & KEYCXX0X)) AddResult = 3;
+
+ // Don't add this keyword if disabled in this language.
+ if (AddResult == 0) return;
+
+ IdentifierInfo &Info =
+ Table.get(Keyword, AddResult == 3 ? tok::identifier : TokenCode);
+ Info.setIsExtensionToken(AddResult == 1);
+ Info.setIsCXX11CompatKeyword(AddResult == 3);
+}
+
+/// AddCXXOperatorKeyword - Register a C++ operator keyword alternative
+/// representations.
+static void AddCXXOperatorKeyword(StringRef Keyword,
+ tok::TokenKind TokenCode,
+ IdentifierTable &Table) {
+ IdentifierInfo &Info = Table.get(Keyword, TokenCode);
+ Info.setIsCPlusPlusOperatorKeyword();
+}
+
+/// AddObjCKeyword - Register an Objective-C @keyword like "class" "selector" or
+/// "property".
+static void AddObjCKeyword(StringRef Name,
+ tok::ObjCKeywordKind ObjCID,
+ IdentifierTable &Table) {
+ Table.get(Name).setObjCKeywordID(ObjCID);
+}
+
+/// AddKeywords - Add all keywords to the symbol table.
+///
+void IdentifierTable::AddKeywords(const LangOptions &LangOpts) {
+ // Add keywords and tokens for the current language.
+#define KEYWORD(NAME, FLAGS) \
+ AddKeyword(StringRef(#NAME), tok::kw_ ## NAME, \
+ FLAGS, LangOpts, *this);
+#define ALIAS(NAME, TOK, FLAGS) \
+ AddKeyword(StringRef(NAME), tok::kw_ ## TOK, \
+ FLAGS, LangOpts, *this);
+#define CXX_KEYWORD_OPERATOR(NAME, ALIAS) \
+ if (LangOpts.CXXOperatorNames) \
+ AddCXXOperatorKeyword(StringRef(#NAME), tok::ALIAS, *this);
+#define OBJC1_AT_KEYWORD(NAME) \
+ if (LangOpts.ObjC1) \
+ AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
+#define OBJC2_AT_KEYWORD(NAME) \
+ if (LangOpts.ObjC2) \
+ AddObjCKeyword(StringRef(#NAME), tok::objc_##NAME, *this);
+#define TESTING_KEYWORD(NAME, FLAGS)
+#include "clang/Basic/TokenKinds.def"
+
+ if (LangOpts.ParseUnknownAnytype)
+ AddKeyword("__unknown_anytype", tok::kw___unknown_anytype, KEYALL,
+ LangOpts, *this);
+}
+
+tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
+ // We use a perfect hash function here involving the length of the keyword,
+ // the first and third character. For preprocessor ID's there are no
+ // collisions (if there were, the switch below would complain about duplicate
+ // case values). Note that this depends on 'if' being null terminated.
+
+#define HASH(LEN, FIRST, THIRD) \
+ (LEN << 5) + (((FIRST-'a') + (THIRD-'a')) & 31)
+#define CASE(LEN, FIRST, THIRD, NAME) \
+ case HASH(LEN, FIRST, THIRD): \
+ return memcmp(Name, #NAME, LEN) ? tok::pp_not_keyword : tok::pp_ ## NAME
+
+ unsigned Len = getLength();
+ if (Len < 2) return tok::pp_not_keyword;
+ const char *Name = getNameStart();
+ switch (HASH(Len, Name[0], Name[2])) {
+ default: return tok::pp_not_keyword;
+ CASE( 2, 'i', '\0', if);
+ CASE( 4, 'e', 'i', elif);
+ CASE( 4, 'e', 's', else);
+ CASE( 4, 'l', 'n', line);
+ CASE( 4, 's', 'c', sccs);
+ CASE( 5, 'e', 'd', endif);
+ CASE( 5, 'e', 'r', error);
+ CASE( 5, 'i', 'e', ident);
+ CASE( 5, 'i', 'd', ifdef);
+ CASE( 5, 'u', 'd', undef);
+
+ CASE( 6, 'a', 's', assert);
+ CASE( 6, 'd', 'f', define);
+ CASE( 6, 'i', 'n', ifndef);
+ CASE( 6, 'i', 'p', import);
+ CASE( 6, 'p', 'a', pragma);
+
+ CASE( 7, 'd', 'f', defined);
+ CASE( 7, 'i', 'c', include);
+ CASE( 7, 'w', 'r', warning);
+
+ CASE( 8, 'u', 'a', unassert);
+ CASE(12, 'i', 'c', include_next);
+
+ CASE(14, '_', 'p', __public_macro);
+
+ CASE(15, '_', 'p', __private_macro);
+
+ CASE(16, '_', 'i', __include_macros);
+#undef CASE
+#undef HASH
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Stats Implementation
+//===----------------------------------------------------------------------===//
+
+/// PrintStats - Print statistics about how well the identifier table is doing
+/// at hashing identifiers.
+void IdentifierTable::PrintStats() const {
+ unsigned NumBuckets = HashTable.getNumBuckets();
+ unsigned NumIdentifiers = HashTable.getNumItems();
+ unsigned NumEmptyBuckets = NumBuckets-NumIdentifiers;
+ unsigned AverageIdentifierSize = 0;
+ unsigned MaxIdentifierLength = 0;
+
+ // TODO: Figure out maximum times an identifier had to probe for -stats.
+ for (llvm::StringMap<IdentifierInfo*, llvm::BumpPtrAllocator>::const_iterator
+ I = HashTable.begin(), E = HashTable.end(); I != E; ++I) {
+ unsigned IdLen = I->getKeyLength();
+ AverageIdentifierSize += IdLen;
+ if (MaxIdentifierLength < IdLen)
+ MaxIdentifierLength = IdLen;
+ }
+
+ fprintf(stderr, "\n*** Identifier Table Stats:\n");
+ fprintf(stderr, "# Identifiers: %d\n", NumIdentifiers);
+ fprintf(stderr, "# Empty Buckets: %d\n", NumEmptyBuckets);
+ fprintf(stderr, "Hash density (#identifiers per bucket): %f\n",
+ NumIdentifiers/(double)NumBuckets);
+ fprintf(stderr, "Ave identifier length: %f\n",
+ (AverageIdentifierSize/(double)NumIdentifiers));
+ fprintf(stderr, "Max identifier length: %d\n", MaxIdentifierLength);
+
+ // Compute statistics about the memory allocated for identifiers.
+ HashTable.getAllocator().PrintStats();
+}
+
+//===----------------------------------------------------------------------===//
+// SelectorTable Implementation
+//===----------------------------------------------------------------------===//
+
+unsigned llvm::DenseMapInfo<clang::Selector>::getHashValue(clang::Selector S) {
+ return DenseMapInfo<void*>::getHashValue(S.getAsOpaquePtr());
+}
+
+namespace clang {
+/// MultiKeywordSelector - One of these variable length records is kept for each
+/// selector containing more than one keyword. We use a folding set
+/// to unique aggregate names (keyword selectors in ObjC parlance). Access to
+/// this class is provided strictly through Selector.
+class MultiKeywordSelector
+ : public DeclarationNameExtra, public llvm::FoldingSetNode {
+ MultiKeywordSelector(unsigned nKeys) {
+ ExtraKindOrNumArgs = NUM_EXTRA_KINDS + nKeys;
+ }
+public:
+ // Constructor for keyword selectors.
+ MultiKeywordSelector(unsigned nKeys, IdentifierInfo **IIV) {
+ assert((nKeys > 1) && "not a multi-keyword selector");
+ ExtraKindOrNumArgs = NUM_EXTRA_KINDS + nKeys;
+
+ // Fill in the trailing keyword array.
+ IdentifierInfo **KeyInfo = reinterpret_cast<IdentifierInfo **>(this+1);
+ for (unsigned i = 0; i != nKeys; ++i)
+ KeyInfo[i] = IIV[i];
+ }
+
+ // getName - Derive the full selector name and return it.
+ std::string getName() const;
+
+ unsigned getNumArgs() const { return ExtraKindOrNumArgs - NUM_EXTRA_KINDS; }
+
+ typedef IdentifierInfo *const *keyword_iterator;
+ keyword_iterator keyword_begin() const {
+ return reinterpret_cast<keyword_iterator>(this+1);
+ }
+ keyword_iterator keyword_end() const {
+ return keyword_begin()+getNumArgs();
+ }
+ IdentifierInfo *getIdentifierInfoForSlot(unsigned i) const {
+ assert(i < getNumArgs() && "getIdentifierInfoForSlot(): illegal index");
+ return keyword_begin()[i];
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ keyword_iterator ArgTys, unsigned NumArgs) {
+ ID.AddInteger(NumArgs);
+ for (unsigned i = 0; i != NumArgs; ++i)
+ ID.AddPointer(ArgTys[i]);
+ }
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, keyword_begin(), getNumArgs());
+ }
+};
+} // end namespace clang.
+
+unsigned Selector::getNumArgs() const {
+ unsigned IIF = getIdentifierInfoFlag();
+ if (IIF == ZeroArg)
+ return 0;
+ if (IIF == OneArg)
+ return 1;
+ // We point to a MultiKeywordSelector (pointer doesn't contain any flags).
+ MultiKeywordSelector *SI = reinterpret_cast<MultiKeywordSelector *>(InfoPtr);
+ return SI->getNumArgs();
+}
+
+IdentifierInfo *Selector::getIdentifierInfoForSlot(unsigned argIndex) const {
+ if (getIdentifierInfoFlag()) {
+ assert(argIndex == 0 && "illegal keyword index");
+ return getAsIdentifierInfo();
+ }
+ // We point to a MultiKeywordSelector (pointer doesn't contain any flags).
+ MultiKeywordSelector *SI = reinterpret_cast<MultiKeywordSelector *>(InfoPtr);
+ return SI->getIdentifierInfoForSlot(argIndex);
+}
+
+StringRef Selector::getNameForSlot(unsigned int argIndex) const {
+ IdentifierInfo *II = getIdentifierInfoForSlot(argIndex);
+ return II? II->getName() : StringRef();
+}
+
+std::string MultiKeywordSelector::getName() const {
+ SmallString<256> Str;
+ llvm::raw_svector_ostream OS(Str);
+ for (keyword_iterator I = keyword_begin(), E = keyword_end(); I != E; ++I) {
+ if (*I)
+ OS << (*I)->getName();
+ OS << ':';
+ }
+
+ return OS.str();
+}
+
+std::string Selector::getAsString() const {
+ if (InfoPtr == 0)
+ return "<null selector>";
+
+ if (InfoPtr & ArgFlags) {
+ IdentifierInfo *II = getAsIdentifierInfo();
+
+ // If the number of arguments is 0 then II is guaranteed to not be null.
+ if (getNumArgs() == 0)
+ return II->getName();
+
+ if (!II)
+ return ":";
+
+ return II->getName().str() + ":";
+ }
+
+ // We have a multiple keyword selector (no embedded flags).
+ return reinterpret_cast<MultiKeywordSelector *>(InfoPtr)->getName();
+}
+
+/// Interpreting the given string using the normal CamelCase
+/// conventions, determine whether the given string starts with the
+/// given "word", which is assumed to end in a lowercase letter.
+static bool startsWithWord(StringRef name, StringRef word) {
+ if (name.size() < word.size()) return false;
+ return ((name.size() == word.size() ||
+ !islower(name[word.size()]))
+ && name.startswith(word));
+}
+
+ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
+ IdentifierInfo *first = sel.getIdentifierInfoForSlot(0);
+ if (!first) return OMF_None;
+
+ StringRef name = first->getName();
+ if (sel.isUnarySelector()) {
+ if (name == "autorelease") return OMF_autorelease;
+ if (name == "dealloc") return OMF_dealloc;
+ if (name == "finalize") return OMF_finalize;
+ if (name == "release") return OMF_release;
+ if (name == "retain") return OMF_retain;
+ if (name == "retainCount") return OMF_retainCount;
+ if (name == "self") return OMF_self;
+ }
+
+ if (name == "performSelector") return OMF_performSelector;
+
+ // The other method families may begin with a prefix of underscores.
+ while (!name.empty() && name.front() == '_')
+ name = name.substr(1);
+
+ if (name.empty()) return OMF_None;
+ switch (name.front()) {
+ case 'a':
+ if (startsWithWord(name, "alloc")) return OMF_alloc;
+ break;
+ case 'c':
+ if (startsWithWord(name, "copy")) return OMF_copy;
+ break;
+ case 'i':
+ if (startsWithWord(name, "init")) return OMF_init;
+ break;
+ case 'm':
+ if (startsWithWord(name, "mutableCopy")) return OMF_mutableCopy;
+ break;
+ case 'n':
+ if (startsWithWord(name, "new")) return OMF_new;
+ break;
+ default:
+ break;
+ }
+
+ return OMF_None;
+}
+
+namespace {
+ struct SelectorTableImpl {
+ llvm::FoldingSet<MultiKeywordSelector> Table;
+ llvm::BumpPtrAllocator Allocator;
+ };
+} // end anonymous namespace.
+
+static SelectorTableImpl &getSelectorTableImpl(void *P) {
+ return *static_cast<SelectorTableImpl*>(P);
+}
+
+/*static*/ Selector
+SelectorTable::constructSetterName(IdentifierTable &Idents,
+ SelectorTable &SelTable,
+ const IdentifierInfo *Name) {
+ SmallString<100> SelectorName;
+ SelectorName = "set";
+ SelectorName += Name->getName();
+ SelectorName[3] = toupper(SelectorName[3]);
+ IdentifierInfo *SetterName = &Idents.get(SelectorName);
+ return SelTable.getUnarySelector(SetterName);
+}
+
+size_t SelectorTable::getTotalMemory() const {
+ SelectorTableImpl &SelTabImpl = getSelectorTableImpl(Impl);
+ return SelTabImpl.Allocator.getTotalMemory();
+}
+
+Selector SelectorTable::getSelector(unsigned nKeys, IdentifierInfo **IIV) {
+ if (nKeys < 2)
+ return Selector(IIV[0], nKeys);
+
+ SelectorTableImpl &SelTabImpl = getSelectorTableImpl(Impl);
+
+ // Unique selector, to guarantee there is one per name.
+ llvm::FoldingSetNodeID ID;
+ MultiKeywordSelector::Profile(ID, IIV, nKeys);
+
+ void *InsertPos = 0;
+ if (MultiKeywordSelector *SI =
+ SelTabImpl.Table.FindNodeOrInsertPos(ID, InsertPos))
+ return Selector(SI);
+
+ // MultiKeywordSelector objects are not allocated with new because they have a
+ // variable size array (for parameter types) at the end of them.
+ unsigned Size = sizeof(MultiKeywordSelector) + nKeys*sizeof(IdentifierInfo *);
+ MultiKeywordSelector *SI =
+ (MultiKeywordSelector*)SelTabImpl.Allocator.Allocate(Size,
+ llvm::alignOf<MultiKeywordSelector>());
+ new (SI) MultiKeywordSelector(nKeys, IIV);
+ SelTabImpl.Table.InsertNode(SI, InsertPos);
+ return Selector(SI);
+}
+
+SelectorTable::SelectorTable() {
+ Impl = new SelectorTableImpl();
+}
+
+SelectorTable::~SelectorTable() {
+ delete &getSelectorTableImpl(Impl);
+}
+
+const char *clang::getOperatorSpelling(OverloadedOperatorKind Operator) {
+ switch (Operator) {
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ return 0;
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case OO_##Name: return Spelling;
+#include "clang/Basic/OperatorKinds.def"
+ }
+
+ llvm_unreachable("Invalid OverloadedOperatorKind!");
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp b/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp
new file mode 100644
index 0000000..991992a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/LangOptions.cpp
@@ -0,0 +1,32 @@
+//===--- LangOptions.cpp - C Language Family Language Options ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the LangOptions class.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/LangOptions.h"
+
+using namespace clang;
+
+LangOptions::LangOptions() {
+#define LANGOPT(Name, Bits, Default, Description) Name = Default;
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) set##Name(Default);
+#include "clang/Basic/LangOptions.def"
+}
+
+void LangOptions::resetNonModularOptions() {
+#define LANGOPT(Name, Bits, Default, Description)
+#define BENIGN_LANGOPT(Name, Bits, Default, Description) Name = Default;
+#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ Name = Default;
+#include "clang/Basic/LangOptions.def"
+
+ CurrentModule.clear();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/Module.cpp b/contrib/llvm/tools/clang/lib/Basic/Module.cpp
new file mode 100644
index 0000000..6348840
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Module.cpp
@@ -0,0 +1,274 @@
+//===--- Module.h - Describe a module ---------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Module class, which describes a module in the source
+// code.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/Module.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
+ bool IsFramework, bool IsExplicit)
+ : Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent),
+ Umbrella(), IsAvailable(true), IsFromModuleFile(false),
+ IsFramework(IsFramework), IsExplicit(IsExplicit), IsSystem(false),
+ InferSubmodules(false), InferExplicitSubmodules(false),
+ InferExportWildcard(false), NameVisibility(Hidden)
+{
+ if (Parent) {
+ if (!Parent->isAvailable())
+ IsAvailable = false;
+ if (Parent->IsSystem)
+ IsSystem = true;
+
+ Parent->SubModuleIndex[Name] = Parent->SubModules.size();
+ Parent->SubModules.push_back(this);
+ }
+}
+
+Module::~Module() {
+ for (submodule_iterator I = submodule_begin(), IEnd = submodule_end();
+ I != IEnd; ++I) {
+ delete *I;
+ }
+
+}
+
+/// \brief Determine whether a translation unit built using the current
+/// language options has the given feature.
+static bool hasFeature(StringRef Feature, const LangOptions &LangOpts,
+ const TargetInfo &Target) {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("altivec", LangOpts.AltiVec)
+ .Case("blocks", LangOpts.Blocks)
+ .Case("cplusplus", LangOpts.CPlusPlus)
+ .Case("cplusplus11", LangOpts.CPlusPlus0x)
+ .Case("objc", LangOpts.ObjC1)
+ .Case("objc_arc", LangOpts.ObjCAutoRefCount)
+ .Case("opencl", LangOpts.OpenCL)
+ .Case("tls", Target.isTLSSupported())
+ .Default(Target.hasFeature(Feature));
+}
+
+bool
+Module::isAvailable(const LangOptions &LangOpts, const TargetInfo &Target,
+ StringRef &Feature) const {
+ if (IsAvailable)
+ return true;
+
+ for (const Module *Current = this; Current; Current = Current->Parent) {
+ for (unsigned I = 0, N = Current->Requires.size(); I != N; ++I) {
+ if (!hasFeature(Current->Requires[I], LangOpts, Target)) {
+ Feature = Current->Requires[I];
+ return false;
+ }
+ }
+ }
+
+ llvm_unreachable("could not find a reason why module is unavailable");
+}
+
+bool Module::isSubModuleOf(Module *Other) const {
+ const Module *This = this;
+ do {
+ if (This == Other)
+ return true;
+
+ This = This->Parent;
+ } while (This);
+
+ return false;
+}
+
+const Module *Module::getTopLevelModule() const {
+ const Module *Result = this;
+ while (Result->Parent)
+ Result = Result->Parent;
+
+ return Result;
+}
+
+std::string Module::getFullModuleName() const {
+ llvm::SmallVector<StringRef, 2> Names;
+
+ // Build up the set of module names (from innermost to outermost).
+ for (const Module *M = this; M; M = M->Parent)
+ Names.push_back(M->Name);
+
+ std::string Result;
+ for (llvm::SmallVector<StringRef, 2>::reverse_iterator I = Names.rbegin(),
+ IEnd = Names.rend();
+ I != IEnd; ++I) {
+ if (!Result.empty())
+ Result += '.';
+
+ Result += *I;
+ }
+
+ return Result;
+}
+
+const DirectoryEntry *Module::getUmbrellaDir() const {
+ if (const FileEntry *Header = getUmbrellaHeader())
+ return Header->getDir();
+
+ return Umbrella.dyn_cast<const DirectoryEntry *>();
+}
+
+void Module::addRequirement(StringRef Feature, const LangOptions &LangOpts,
+ const TargetInfo &Target) {
+ Requires.push_back(Feature);
+
+ // If this feature is currently available, we're done.
+ if (hasFeature(Feature, LangOpts, Target))
+ return;
+
+ if (!IsAvailable)
+ return;
+
+ llvm::SmallVector<Module *, 2> Stack;
+ Stack.push_back(this);
+ while (!Stack.empty()) {
+ Module *Current = Stack.back();
+ Stack.pop_back();
+
+ if (!Current->IsAvailable)
+ continue;
+
+ Current->IsAvailable = false;
+ for (submodule_iterator Sub = Current->submodule_begin(),
+ SubEnd = Current->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ if ((*Sub)->IsAvailable)
+ Stack.push_back(*Sub);
+ }
+ }
+}
+
+Module *Module::findSubmodule(StringRef Name) const {
+ llvm::StringMap<unsigned>::const_iterator Pos = SubModuleIndex.find(Name);
+ if (Pos == SubModuleIndex.end())
+ return 0;
+
+ return SubModules[Pos->getValue()];
+}
+
+static void printModuleId(llvm::raw_ostream &OS, const ModuleId &Id) {
+ for (unsigned I = 0, N = Id.size(); I != N; ++I) {
+ if (I)
+ OS << ".";
+ OS << Id[I].first;
+ }
+}
+
+void Module::print(llvm::raw_ostream &OS, unsigned Indent) const {
+ OS.indent(Indent);
+ if (IsFramework)
+ OS << "framework ";
+ if (IsExplicit)
+ OS << "explicit ";
+ OS << "module " << Name;
+
+ if (IsSystem) {
+ OS.indent(Indent + 2);
+ OS << " [system]";
+ }
+
+ OS << " {\n";
+
+ if (!Requires.empty()) {
+ OS.indent(Indent + 2);
+ OS << "requires ";
+ for (unsigned I = 0, N = Requires.size(); I != N; ++I) {
+ if (I)
+ OS << ", ";
+ OS << Requires[I];
+ }
+ OS << "\n";
+ }
+
+ if (const FileEntry *UmbrellaHeader = getUmbrellaHeader()) {
+ OS.indent(Indent + 2);
+ OS << "umbrella header \"";
+ OS.write_escaped(UmbrellaHeader->getName());
+ OS << "\"\n";
+ } else if (const DirectoryEntry *UmbrellaDir = getUmbrellaDir()) {
+ OS.indent(Indent + 2);
+ OS << "umbrella \"";
+ OS.write_escaped(UmbrellaDir->getName());
+ OS << "\"\n";
+ }
+
+ for (unsigned I = 0, N = Headers.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "header \"";
+ OS.write_escaped(Headers[I]->getName());
+ OS << "\"\n";
+ }
+
+ for (submodule_const_iterator MI = submodule_begin(), MIEnd = submodule_end();
+ MI != MIEnd; ++MI)
+ (*MI)->print(OS, Indent + 2);
+
+ for (unsigned I = 0, N = Exports.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "export ";
+ if (Module *Restriction = Exports[I].getPointer()) {
+ OS << Restriction->getFullModuleName();
+ if (Exports[I].getInt())
+ OS << ".*";
+ } else {
+ OS << "*";
+ }
+ OS << "\n";
+ }
+
+ for (unsigned I = 0, N = UnresolvedExports.size(); I != N; ++I) {
+ OS.indent(Indent + 2);
+ OS << "export ";
+ printModuleId(OS, UnresolvedExports[I].Id);
+ if (UnresolvedExports[I].Wildcard) {
+ if (UnresolvedExports[I].Id.empty())
+ OS << "*";
+ else
+ OS << ".*";
+ }
+ OS << "\n";
+ }
+
+ if (InferSubmodules) {
+ OS.indent(Indent + 2);
+ if (InferExplicitSubmodules)
+ OS << "explicit ";
+ OS << "module * {\n";
+ if (InferExportWildcard) {
+ OS.indent(Indent + 4);
+ OS << "export *\n";
+ }
+ OS.indent(Indent + 2);
+ OS << "}\n";
+ }
+
+ OS.indent(Indent);
+ OS << "}\n";
+}
+
+void Module::dump() const {
+ print(llvm::errs());
+}
+
+
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
new file mode 100644
index 0000000..bb5a10a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceLocation.cpp
@@ -0,0 +1,138 @@
+//==--- SourceLocation.cpp - Compact identifier for Source Files -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines accessor methods for the FullSourceLoc class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// PrettyStackTraceLoc
+//===----------------------------------------------------------------------===//
+
+void PrettyStackTraceLoc::print(raw_ostream &OS) const {
+ if (Loc.isValid()) {
+ Loc.print(OS, SM);
+ OS << ": ";
+ }
+ OS << Message << '\n';
+}
+
+//===----------------------------------------------------------------------===//
+// SourceLocation
+//===----------------------------------------------------------------------===//
+
+void SourceLocation::print(raw_ostream &OS, const SourceManager &SM)const{
+ if (!isValid()) {
+ OS << "<invalid loc>";
+ return;
+ }
+
+ if (isFileID()) {
+ PresumedLoc PLoc = SM.getPresumedLoc(*this);
+
+ if (PLoc.isInvalid()) {
+ OS << "<invalid>";
+ return;
+ }
+ // The macro expansion and spelling pos is identical for file locs.
+ OS << PLoc.getFilename() << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ return;
+ }
+
+ SM.getExpansionLoc(*this).print(OS, SM);
+
+ OS << " <Spelling=";
+ SM.getSpellingLoc(*this).print(OS, SM);
+ OS << '>';
+}
+
+void SourceLocation::dump(const SourceManager &SM) const {
+ print(llvm::errs(), SM);
+}
+
+//===----------------------------------------------------------------------===//
+// FullSourceLoc
+//===----------------------------------------------------------------------===//
+
+FileID FullSourceLoc::getFileID() const {
+ assert(isValid());
+ return SrcMgr->getFileID(*this);
+}
+
+
+FullSourceLoc FullSourceLoc::getExpansionLoc() const {
+ assert(isValid());
+ return FullSourceLoc(SrcMgr->getExpansionLoc(*this), *SrcMgr);
+}
+
+FullSourceLoc FullSourceLoc::getSpellingLoc() const {
+ assert(isValid());
+ return FullSourceLoc(SrcMgr->getSpellingLoc(*this), *SrcMgr);
+}
+
+unsigned FullSourceLoc::getExpansionLineNumber(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getExpansionLineNumber(*this, Invalid);
+}
+
+unsigned FullSourceLoc::getExpansionColumnNumber(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getExpansionColumnNumber(*this, Invalid);
+}
+
+unsigned FullSourceLoc::getSpellingLineNumber(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getSpellingLineNumber(*this, Invalid);
+}
+
+unsigned FullSourceLoc::getSpellingColumnNumber(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getSpellingColumnNumber(*this, Invalid);
+}
+
+bool FullSourceLoc::isInSystemHeader() const {
+ assert(isValid());
+ return SrcMgr->isInSystemHeader(*this);
+}
+
+bool FullSourceLoc::isBeforeInTranslationUnitThan(SourceLocation Loc) const {
+ assert(isValid());
+ return SrcMgr->isBeforeInTranslationUnit(*this, Loc);
+}
+
+void FullSourceLoc::dump() const {
+ SourceLocation::dump(*SrcMgr);
+}
+
+const char *FullSourceLoc::getCharacterData(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getCharacterData(*this, Invalid);
+}
+
+const llvm::MemoryBuffer* FullSourceLoc::getBuffer(bool *Invalid) const {
+ assert(isValid());
+ return SrcMgr->getBuffer(SrcMgr->getFileID(*this), Invalid);
+}
+
+StringRef FullSourceLoc::getBufferData(bool *Invalid) const {
+ return getBuffer(Invalid)->getBuffer();
+}
+
+std::pair<FileID, unsigned> FullSourceLoc::getDecomposedLoc() const {
+ return SrcMgr->getDecomposedLoc(*this);
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
new file mode 100644
index 0000000..cef091c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/SourceManager.cpp
@@ -0,0 +1,1896 @@
+//===--- SourceManager.cpp - Track and cache source files -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the SourceManager interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Capacity.h"
+#include <algorithm>
+#include <string>
+#include <cstring>
+#include <sys/stat.h>
+
+using namespace clang;
+using namespace SrcMgr;
+using llvm::MemoryBuffer;
+
+//===----------------------------------------------------------------------===//
+// SourceManager Helper Classes
+//===----------------------------------------------------------------------===//
+
+ContentCache::~ContentCache() {
+ if (shouldFreeBuffer())
+ delete Buffer.getPointer();
+}
+
+/// getSizeBytesMapped - Returns the number of bytes actually mapped for this
+/// ContentCache. This can be 0 if the MemBuffer was not actually expanded.
+unsigned ContentCache::getSizeBytesMapped() const {
+ return Buffer.getPointer() ? Buffer.getPointer()->getBufferSize() : 0;
+}
+
+/// Returns the kind of memory used to back the memory buffer for
+/// this content cache. This is used for performance analysis.
+llvm::MemoryBuffer::BufferKind ContentCache::getMemoryBufferKind() const {
+ assert(Buffer.getPointer());
+
+ // Should be unreachable, but keep for sanity.
+ if (!Buffer.getPointer())
+ return llvm::MemoryBuffer::MemoryBuffer_Malloc;
+
+ const llvm::MemoryBuffer *buf = Buffer.getPointer();
+ return buf->getBufferKind();
+}
+
+/// getSize - Returns the size of the content encapsulated by this ContentCache.
+/// This can be the size of the source file or the size of an arbitrary
+/// scratch buffer. If the ContentCache encapsulates a source file, that
+/// file is not lazily brought in from disk to satisfy this query.
+unsigned ContentCache::getSize() const {
+ return Buffer.getPointer() ? (unsigned) Buffer.getPointer()->getBufferSize()
+ : (unsigned) ContentsEntry->getSize();
+}
+
+void ContentCache::replaceBuffer(const llvm::MemoryBuffer *B,
+ bool DoNotFree) {
+ if (B == Buffer.getPointer()) {
+ assert(0 && "Replacing with the same buffer");
+ Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
+ return;
+ }
+
+ if (shouldFreeBuffer())
+ delete Buffer.getPointer();
+ Buffer.setPointer(B);
+ Buffer.setInt(DoNotFree? DoNotFreeFlag : 0);
+}
+
+const llvm::MemoryBuffer *ContentCache::getBuffer(DiagnosticsEngine &Diag,
+ const SourceManager &SM,
+ SourceLocation Loc,
+ bool *Invalid) const {
+ // Lazily create the Buffer for ContentCaches that wrap files. If we already
+ // computed it, just return what we have.
+ if (Buffer.getPointer() || ContentsEntry == 0) {
+ if (Invalid)
+ *Invalid = isBufferInvalid();
+
+ return Buffer.getPointer();
+ }
+
+ std::string ErrorStr;
+ Buffer.setPointer(SM.getFileManager().getBufferForFile(ContentsEntry, &ErrorStr));
+
+ // If we were unable to open the file, then we are in an inconsistent
+ // situation where the content cache referenced a file which no longer
+ // exists. Most likely, we were using a stat cache with an invalid entry but
+ // the file could also have been removed during processing. Since we can't
+ // really deal with this situation, just create an empty buffer.
+ //
+ // FIXME: This is definitely not ideal, but our immediate clients can't
+ // currently handle returning a null entry here. Ideally we should detect
+ // that we are in an inconsistent situation and error out as quickly as
+ // possible.
+ if (!Buffer.getPointer()) {
+ const StringRef FillStr("<<<MISSING SOURCE FILE>>>\n");
+ Buffer.setPointer(MemoryBuffer::getNewMemBuffer(ContentsEntry->getSize(),
+ "<invalid>"));
+ char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart());
+ for (unsigned i = 0, e = ContentsEntry->getSize(); i != e; ++i)
+ Ptr[i] = FillStr[i % FillStr.size()];
+
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
+ ContentsEntry->getName(), ErrorStr);
+ else
+ Diag.Report(Loc, diag::err_cannot_open_file)
+ << ContentsEntry->getName() << ErrorStr;
+
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
+
+ if (Invalid) *Invalid = true;
+ return Buffer.getPointer();
+ }
+
+ // Check that the file's size is the same as in the file entry (which may
+ // have come from a stat cache).
+ if (getRawBuffer()->getBufferSize() != (size_t)ContentsEntry->getSize()) {
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_file_modified,
+ ContentsEntry->getName());
+ else
+ Diag.Report(Loc, diag::err_file_modified)
+ << ContentsEntry->getName();
+
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
+ if (Invalid) *Invalid = true;
+ return Buffer.getPointer();
+ }
+
+ // If the buffer is valid, check to see if it has a UTF Byte Order Mark
+ // (BOM). We only support UTF-8 with and without a BOM right now. See
+ // http://en.wikipedia.org/wiki/Byte_order_mark for more information.
+ StringRef BufStr = Buffer.getPointer()->getBuffer();
+ const char *InvalidBOM = llvm::StringSwitch<const char *>(BufStr)
+ .StartsWith("\xFE\xFF", "UTF-16 (BE)")
+ .StartsWith("\xFF\xFE", "UTF-16 (LE)")
+ .StartsWith("\x00\x00\xFE\xFF", "UTF-32 (BE)")
+ .StartsWith("\xFF\xFE\x00\x00", "UTF-32 (LE)")
+ .StartsWith("\x2B\x2F\x76", "UTF-7")
+ .StartsWith("\xF7\x64\x4C", "UTF-1")
+ .StartsWith("\xDD\x73\x66\x73", "UTF-EBCDIC")
+ .StartsWith("\x0E\xFE\xFF", "SDSU")
+ .StartsWith("\xFB\xEE\x28", "BOCU-1")
+ .StartsWith("\x84\x31\x95\x33", "GB-18030")
+ .Default(0);
+
+ if (InvalidBOM) {
+ Diag.Report(Loc, diag::err_unsupported_bom)
+ << InvalidBOM << ContentsEntry->getName();
+ Buffer.setInt(Buffer.getInt() | InvalidFlag);
+ }
+
+ if (Invalid)
+ *Invalid = isBufferInvalid();
+
+ return Buffer.getPointer();
+}
+
+unsigned LineTableInfo::getLineTableFilenameID(StringRef Name) {
+ // Look up the filename in the string table, returning the pre-existing value
+ // if it exists.
+ llvm::StringMapEntry<unsigned> &Entry =
+ FilenameIDs.GetOrCreateValue(Name, ~0U);
+ if (Entry.getValue() != ~0U)
+ return Entry.getValue();
+
+ // Otherwise, assign this the next available ID.
+ Entry.setValue(FilenamesByID.size());
+ FilenamesByID.push_back(&Entry);
+ return FilenamesByID.size()-1;
+}
+
+/// AddLineNote - Add a line note to the line table that indicates that there
+/// is a #line at the specified FID/Offset location which changes the presumed
+/// location to LineNo/FilenameID.
+void LineTableInfo::AddLineNote(int FID, unsigned Offset,
+ unsigned LineNo, int FilenameID) {
+ std::vector<LineEntry> &Entries = LineEntries[FID];
+
+ assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
+ "Adding line entries out of order!");
+
+ SrcMgr::CharacteristicKind Kind = SrcMgr::C_User;
+ unsigned IncludeOffset = 0;
+
+ if (!Entries.empty()) {
+ // If this is a '#line 4' after '#line 42 "foo.h"', make sure to remember
+ // that we are still in "foo.h".
+ if (FilenameID == -1)
+ FilenameID = Entries.back().FilenameID;
+
+ // If we are after a line marker that switched us to system header mode, or
+ // that set #include information, preserve it.
+ Kind = Entries.back().FileKind;
+ IncludeOffset = Entries.back().IncludeOffset;
+ }
+
+ Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, Kind,
+ IncludeOffset));
+}
+
+/// AddLineNote This is the same as the previous version of AddLineNote, but is
+/// used for GNU line markers. If EntryExit is 0, then this doesn't change the
+/// presumed #include stack. If it is 1, this is a file entry, if it is 2 then
+/// this is a file exit. FileKind specifies whether this is a system header or
+/// extern C system header.
+void LineTableInfo::AddLineNote(int FID, unsigned Offset,
+ unsigned LineNo, int FilenameID,
+ unsigned EntryExit,
+ SrcMgr::CharacteristicKind FileKind) {
+ assert(FilenameID != -1 && "Unspecified filename should use other accessor");
+
+ std::vector<LineEntry> &Entries = LineEntries[FID];
+
+ assert((Entries.empty() || Entries.back().FileOffset < Offset) &&
+ "Adding line entries out of order!");
+
+ unsigned IncludeOffset = 0;
+ if (EntryExit == 0) { // No #include stack change.
+ IncludeOffset = Entries.empty() ? 0 : Entries.back().IncludeOffset;
+ } else if (EntryExit == 1) {
+ IncludeOffset = Offset-1;
+ } else if (EntryExit == 2) {
+ assert(!Entries.empty() && Entries.back().IncludeOffset &&
+ "PPDirectives should have caught case when popping empty include stack");
+
+ // Get the include loc of the last entries' include loc as our include loc.
+ IncludeOffset = 0;
+ if (const LineEntry *PrevEntry =
+ FindNearestLineEntry(FID, Entries.back().IncludeOffset))
+ IncludeOffset = PrevEntry->IncludeOffset;
+ }
+
+ Entries.push_back(LineEntry::get(Offset, LineNo, FilenameID, FileKind,
+ IncludeOffset));
+}
+
+
+/// FindNearestLineEntry - Find the line entry nearest to FID that is before
+/// it. If there is no line entry before Offset in FID, return null.
+const LineEntry *LineTableInfo::FindNearestLineEntry(int FID,
+ unsigned Offset) {
+ const std::vector<LineEntry> &Entries = LineEntries[FID];
+ assert(!Entries.empty() && "No #line entries for this FID after all!");
+
+ // It is very common for the query to be after the last #line, check this
+ // first.
+ if (Entries.back().FileOffset <= Offset)
+ return &Entries.back();
+
+ // Do a binary search to find the maximal element that is still before Offset.
+ std::vector<LineEntry>::const_iterator I =
+ std::upper_bound(Entries.begin(), Entries.end(), Offset);
+ if (I == Entries.begin()) return 0;
+ return &*--I;
+}
+
+/// \brief Add a new line entry that has already been encoded into
+/// the internal representation of the line table.
+void LineTableInfo::AddEntry(int FID,
+ const std::vector<LineEntry> &Entries) {
+ LineEntries[FID] = Entries;
+}
+
+/// getLineTableFilenameID - Return the uniqued ID for the specified filename.
+///
+unsigned SourceManager::getLineTableFilenameID(StringRef Name) {
+ if (LineTable == 0)
+ LineTable = new LineTableInfo();
+ return LineTable->getLineTableFilenameID(Name);
+}
+
+
+/// AddLineNote - Add a line note to the line table for the FileID and offset
+/// specified by Loc. If FilenameID is -1, it is considered to be
+/// unspecified.
+void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
+ int FilenameID) {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
+ if (!Entry.isFile() || Invalid)
+ return;
+
+ const SrcMgr::FileInfo &FileInfo = Entry.getFile();
+
+ // Remember that this file has #line directives now if it doesn't already.
+ const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
+
+ if (LineTable == 0)
+ LineTable = new LineTableInfo();
+ LineTable->AddLineNote(LocInfo.first.ID, LocInfo.second, LineNo, FilenameID);
+}
+
+/// AddLineNote - Add a GNU line marker to the line table.
+void SourceManager::AddLineNote(SourceLocation Loc, unsigned LineNo,
+ int FilenameID, bool IsFileEntry,
+ bool IsFileExit, bool IsSystemHeader,
+ bool IsExternCHeader) {
+ // If there is no filename and no flags, this is treated just like a #line,
+ // which does not change the flags of the previous line marker.
+ if (FilenameID == -1) {
+ assert(!IsFileEntry && !IsFileExit && !IsSystemHeader && !IsExternCHeader &&
+ "Can't set flags without setting the filename!");
+ return AddLineNote(Loc, LineNo, FilenameID);
+ }
+
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
+ if (!Entry.isFile() || Invalid)
+ return;
+
+ const SrcMgr::FileInfo &FileInfo = Entry.getFile();
+
+ // Remember that this file has #line directives now if it doesn't already.
+ const_cast<SrcMgr::FileInfo&>(FileInfo).setHasLineDirectives();
+
+ if (LineTable == 0)
+ LineTable = new LineTableInfo();
+
+ SrcMgr::CharacteristicKind FileKind;
+ if (IsExternCHeader)
+ FileKind = SrcMgr::C_ExternCSystem;
+ else if (IsSystemHeader)
+ FileKind = SrcMgr::C_System;
+ else
+ FileKind = SrcMgr::C_User;
+
+ unsigned EntryExit = 0;
+ if (IsFileEntry)
+ EntryExit = 1;
+ else if (IsFileExit)
+ EntryExit = 2;
+
+ LineTable->AddLineNote(LocInfo.first.ID, LocInfo.second, LineNo, FilenameID,
+ EntryExit, FileKind);
+}
+
+LineTableInfo &SourceManager::getLineTable() {
+ if (LineTable == 0)
+ LineTable = new LineTableInfo();
+ return *LineTable;
+}
+
+//===----------------------------------------------------------------------===//
+// Private 'Create' methods.
+//===----------------------------------------------------------------------===//
+
+SourceManager::SourceManager(DiagnosticsEngine &Diag, FileManager &FileMgr)
+ : Diag(Diag), FileMgr(FileMgr), OverridenFilesKeepOriginalName(true),
+ ExternalSLocEntries(0), LineTable(0), NumLinearScans(0),
+ NumBinaryProbes(0), FakeBufferForRecovery(0),
+ FakeContentCacheForRecovery(0) {
+ clearIDTables();
+ Diag.setSourceManager(this);
+}
+
+SourceManager::~SourceManager() {
+ delete LineTable;
+
+ // Delete FileEntry objects corresponding to content caches. Since the actual
+ // content cache objects are bump pointer allocated, we just have to run the
+ // dtors, but we call the deallocate method for completeness.
+ for (unsigned i = 0, e = MemBufferInfos.size(); i != e; ++i) {
+ if (MemBufferInfos[i]) {
+ MemBufferInfos[i]->~ContentCache();
+ ContentCacheAlloc.Deallocate(MemBufferInfos[i]);
+ }
+ }
+ for (llvm::DenseMap<const FileEntry*, SrcMgr::ContentCache*>::iterator
+ I = FileInfos.begin(), E = FileInfos.end(); I != E; ++I) {
+ if (I->second) {
+ I->second->~ContentCache();
+ ContentCacheAlloc.Deallocate(I->second);
+ }
+ }
+
+ delete FakeBufferForRecovery;
+ delete FakeContentCacheForRecovery;
+
+ for (llvm::DenseMap<FileID, MacroArgsMap *>::iterator
+ I = MacroArgsCacheMap.begin(),E = MacroArgsCacheMap.end(); I!=E; ++I) {
+ delete I->second;
+ }
+}
+
+void SourceManager::clearIDTables() {
+ MainFileID = FileID();
+ LocalSLocEntryTable.clear();
+ LoadedSLocEntryTable.clear();
+ SLocEntryLoaded.clear();
+ LastLineNoFileIDQuery = FileID();
+ LastLineNoContentCache = 0;
+ LastFileIDLookup = FileID();
+
+ if (LineTable)
+ LineTable->clear();
+
+ // Use up FileID #0 as an invalid expansion.
+ NextLocalOffset = 0;
+ CurrentLoadedOffset = MaxLoadedOffset;
+ createExpansionLoc(SourceLocation(),SourceLocation(),SourceLocation(), 1);
+}
+
+/// getOrCreateContentCache - Create or return a cached ContentCache for the
+/// specified file.
+const ContentCache *
+SourceManager::getOrCreateContentCache(const FileEntry *FileEnt) {
+ assert(FileEnt && "Didn't specify a file entry to use?");
+
+ // Do we already have information about this file?
+ ContentCache *&Entry = FileInfos[FileEnt];
+ if (Entry) return Entry;
+
+ // Nope, create a new Cache entry. Make sure it is at least 8-byte aligned
+ // so that FileInfo can use the low 3 bits of the pointer for its own
+ // nefarious purposes.
+ unsigned EntryAlign = llvm::AlignOf<ContentCache>::Alignment;
+ EntryAlign = std::max(8U, EntryAlign);
+ Entry = ContentCacheAlloc.Allocate<ContentCache>(1, EntryAlign);
+
+ // If the file contents are overridden with contents from another file,
+ // pass that file to ContentCache.
+ llvm::DenseMap<const FileEntry *, const FileEntry *>::iterator
+ overI = OverriddenFiles.find(FileEnt);
+ if (overI == OverriddenFiles.end())
+ new (Entry) ContentCache(FileEnt);
+ else
+ new (Entry) ContentCache(OverridenFilesKeepOriginalName ? FileEnt
+ : overI->second,
+ overI->second);
+
+ return Entry;
+}
+
+
+/// createMemBufferContentCache - Create a new ContentCache for the specified
+/// memory buffer. This does no caching.
+const ContentCache*
+SourceManager::createMemBufferContentCache(const MemoryBuffer *Buffer) {
+ // Add a new ContentCache to the MemBufferInfos list and return it. Make sure
+ // it is at least 8-byte aligned so that FileInfo can use the low 3 bits of
+ // the pointer for its own nefarious purposes.
+ unsigned EntryAlign = llvm::AlignOf<ContentCache>::Alignment;
+ EntryAlign = std::max(8U, EntryAlign);
+ ContentCache *Entry = ContentCacheAlloc.Allocate<ContentCache>(1, EntryAlign);
+ new (Entry) ContentCache();
+ MemBufferInfos.push_back(Entry);
+ Entry->setBuffer(Buffer);
+ return Entry;
+}
+
+const SrcMgr::SLocEntry &SourceManager::loadSLocEntry(unsigned Index,
+ bool *Invalid) const {
+ assert(!SLocEntryLoaded[Index]);
+ if (ExternalSLocEntries->ReadSLocEntry(-(static_cast<int>(Index) + 2))) {
+ if (Invalid)
+ *Invalid = true;
+ // If the file of the SLocEntry changed we could still have loaded it.
+ if (!SLocEntryLoaded[Index]) {
+ // Try to recover; create a SLocEntry so the rest of clang can handle it.
+ LoadedSLocEntryTable[Index] = SLocEntry::get(0,
+ FileInfo::get(SourceLocation(),
+ getFakeContentCacheForRecovery(),
+ SrcMgr::C_User));
+ }
+ }
+
+ return LoadedSLocEntryTable[Index];
+}
+
+std::pair<int, unsigned>
+SourceManager::AllocateLoadedSLocEntries(unsigned NumSLocEntries,
+ unsigned TotalSize) {
+ assert(ExternalSLocEntries && "Don't have an external sloc source");
+ LoadedSLocEntryTable.resize(LoadedSLocEntryTable.size() + NumSLocEntries);
+ SLocEntryLoaded.resize(LoadedSLocEntryTable.size());
+ CurrentLoadedOffset -= TotalSize;
+ assert(CurrentLoadedOffset >= NextLocalOffset && "Out of source locations");
+ int ID = LoadedSLocEntryTable.size();
+ return std::make_pair(-ID - 1, CurrentLoadedOffset);
+}
+
+/// \brief As part of recovering from missing or changed content, produce a
+/// fake, non-empty buffer.
+const llvm::MemoryBuffer *SourceManager::getFakeBufferForRecovery() const {
+ if (!FakeBufferForRecovery)
+ FakeBufferForRecovery
+ = llvm::MemoryBuffer::getMemBuffer("<<<INVALID BUFFER>>");
+
+ return FakeBufferForRecovery;
+}
+
+/// \brief As part of recovering from missing or changed content, produce a
+/// fake content cache.
+const SrcMgr::ContentCache *
+SourceManager::getFakeContentCacheForRecovery() const {
+ if (!FakeContentCacheForRecovery) {
+ FakeContentCacheForRecovery = new ContentCache();
+ FakeContentCacheForRecovery->replaceBuffer(getFakeBufferForRecovery(),
+ /*DoNotFree=*/true);
+ }
+ return FakeContentCacheForRecovery;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods to create new FileID's and macro expansions.
+//===----------------------------------------------------------------------===//
+
+/// createFileID - Create a new FileID for the specified ContentCache and
+/// include position. This works regardless of whether the ContentCache
+/// corresponds to a file or some other input source.
+FileID SourceManager::createFileID(const ContentCache *File,
+ SourceLocation IncludePos,
+ SrcMgr::CharacteristicKind FileCharacter,
+ int LoadedID, unsigned LoadedOffset) {
+ if (LoadedID < 0) {
+ assert(LoadedID != -1 && "Loading sentinel FileID");
+ unsigned Index = unsigned(-LoadedID) - 2;
+ assert(Index < LoadedSLocEntryTable.size() && "FileID out of range");
+ assert(!SLocEntryLoaded[Index] && "FileID already loaded");
+ LoadedSLocEntryTable[Index] = SLocEntry::get(LoadedOffset,
+ FileInfo::get(IncludePos, File, FileCharacter));
+ SLocEntryLoaded[Index] = true;
+ return FileID::get(LoadedID);
+ }
+ LocalSLocEntryTable.push_back(SLocEntry::get(NextLocalOffset,
+ FileInfo::get(IncludePos, File,
+ FileCharacter)));
+ unsigned FileSize = File->getSize();
+ assert(NextLocalOffset + FileSize + 1 > NextLocalOffset &&
+ NextLocalOffset + FileSize + 1 <= CurrentLoadedOffset &&
+ "Ran out of source locations!");
+ // We do a +1 here because we want a SourceLocation that means "the end of the
+ // file", e.g. for the "no newline at the end of the file" diagnostic.
+ NextLocalOffset += FileSize + 1;
+
+ // Set LastFileIDLookup to the newly created file. The next getFileID call is
+ // almost guaranteed to be from that file.
+ FileID FID = FileID::get(LocalSLocEntryTable.size()-1);
+ return LastFileIDLookup = FID;
+}
+
+SourceLocation
+SourceManager::createMacroArgExpansionLoc(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLoc,
+ unsigned TokLength) {
+ ExpansionInfo Info = ExpansionInfo::createForMacroArg(SpellingLoc,
+ ExpansionLoc);
+ return createExpansionLocImpl(Info, TokLength);
+}
+
+SourceLocation
+SourceManager::createExpansionLoc(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd,
+ unsigned TokLength,
+ int LoadedID,
+ unsigned LoadedOffset) {
+ ExpansionInfo Info = ExpansionInfo::create(SpellingLoc, ExpansionLocStart,
+ ExpansionLocEnd);
+ return createExpansionLocImpl(Info, TokLength, LoadedID, LoadedOffset);
+}
+
+SourceLocation
+SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
+ unsigned TokLength,
+ int LoadedID,
+ unsigned LoadedOffset) {
+ if (LoadedID < 0) {
+ assert(LoadedID != -1 && "Loading sentinel FileID");
+ unsigned Index = unsigned(-LoadedID) - 2;
+ assert(Index < LoadedSLocEntryTable.size() && "FileID out of range");
+ assert(!SLocEntryLoaded[Index] && "FileID already loaded");
+ LoadedSLocEntryTable[Index] = SLocEntry::get(LoadedOffset, Info);
+ SLocEntryLoaded[Index] = true;
+ return SourceLocation::getMacroLoc(LoadedOffset);
+ }
+ LocalSLocEntryTable.push_back(SLocEntry::get(NextLocalOffset, Info));
+ assert(NextLocalOffset + TokLength + 1 > NextLocalOffset &&
+ NextLocalOffset + TokLength + 1 <= CurrentLoadedOffset &&
+ "Ran out of source locations!");
+ // See createFileID for that +1.
+ NextLocalOffset += TokLength + 1;
+ return SourceLocation::getMacroLoc(NextLocalOffset - (TokLength + 1));
+}
+
+const llvm::MemoryBuffer *
+SourceManager::getMemoryBufferForFile(const FileEntry *File,
+ bool *Invalid) {
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(File);
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+ return IR->getBuffer(Diag, *this, SourceLocation(), Invalid);
+}
+
+void SourceManager::overrideFileContents(const FileEntry *SourceFile,
+ const llvm::MemoryBuffer *Buffer,
+ bool DoNotFree) {
+ const SrcMgr::ContentCache *IR = getOrCreateContentCache(SourceFile);
+ assert(IR && "getOrCreateContentCache() cannot return NULL");
+
+ const_cast<SrcMgr::ContentCache *>(IR)->replaceBuffer(Buffer, DoNotFree);
+ const_cast<SrcMgr::ContentCache *>(IR)->BufferOverridden = true;
+}
+
+void SourceManager::overrideFileContents(const FileEntry *SourceFile,
+ const FileEntry *NewFile) {
+ assert(SourceFile->getSize() == NewFile->getSize() &&
+ "Different sizes, use the FileManager to create a virtual file with "
+ "the correct size");
+ assert(FileInfos.count(SourceFile) == 0 &&
+ "This function should be called at the initialization stage, before "
+ "any parsing occurs.");
+ OverriddenFiles[SourceFile] = NewFile;
+}
+
+StringRef SourceManager::getBufferData(FileID FID, bool *Invalid) const {
+ bool MyInvalid = false;
+ const SLocEntry &SLoc = getSLocEntry(FID, &MyInvalid);
+ if (!SLoc.isFile() || MyInvalid) {
+ if (Invalid)
+ *Invalid = true;
+ return "<<<<<INVALID SOURCE LOCATION>>>>>";
+ }
+
+ const llvm::MemoryBuffer *Buf
+ = SLoc.getFile().getContentCache()->getBuffer(Diag, *this, SourceLocation(),
+ &MyInvalid);
+ if (Invalid)
+ *Invalid = MyInvalid;
+
+ if (MyInvalid)
+ return "<<<<<INVALID SOURCE LOCATION>>>>>";
+
+ return Buf->getBuffer();
+}
+
+//===----------------------------------------------------------------------===//
+// SourceLocation manipulation methods.
+//===----------------------------------------------------------------------===//
+
+/// \brief Return the FileID for a SourceLocation.
+///
+/// This is the cache-miss path of getFileID. Not as hot as that function, but
+/// still very important. It is responsible for finding the entry in the
+/// SLocEntry tables that contains the specified location.
+FileID SourceManager::getFileIDSlow(unsigned SLocOffset) const {
+ if (!SLocOffset)
+ return FileID::get(0);
+
+ // Now it is time to search for the correct file. See where the SLocOffset
+ // sits in the global view and consult local or loaded buffers for it.
+ if (SLocOffset < NextLocalOffset)
+ return getFileIDLocal(SLocOffset);
+ return getFileIDLoaded(SLocOffset);
+}
+
+/// \brief Return the FileID for a SourceLocation with a low offset.
+///
+/// This function knows that the SourceLocation is in a local buffer, not a
+/// loaded one.
+FileID SourceManager::getFileIDLocal(unsigned SLocOffset) const {
+ assert(SLocOffset < NextLocalOffset && "Bad function choice");
+
+ // After the first and second level caches, I see two common sorts of
+ // behavior: 1) a lot of searched FileID's are "near" the cached file
+ // location or are "near" the cached expansion location. 2) others are just
+ // completely random and may be a very long way away.
+ //
+ // To handle this, we do a linear search for up to 8 steps to catch #1 quickly
+ // then we fall back to a less cache efficient, but more scalable, binary
+ // search to find the location.
+
+ // See if this is near the file point - worst case we start scanning from the
+ // most newly created FileID.
+ std::vector<SrcMgr::SLocEntry>::const_iterator I;
+
+ if (LastFileIDLookup.ID < 0 ||
+ LocalSLocEntryTable[LastFileIDLookup.ID].getOffset() < SLocOffset) {
+ // Neither loc prunes our search.
+ I = LocalSLocEntryTable.end();
+ } else {
+ // Perhaps it is near the file point.
+ I = LocalSLocEntryTable.begin()+LastFileIDLookup.ID;
+ }
+
+ // Find the FileID that contains this. "I" is an iterator that points to a
+ // FileID whose offset is known to be larger than SLocOffset.
+ unsigned NumProbes = 0;
+ while (1) {
+ --I;
+ if (I->getOffset() <= SLocOffset) {
+ FileID Res = FileID::get(int(I - LocalSLocEntryTable.begin()));
+
+ // If this isn't an expansion, remember it. We have good locality across
+ // FileID lookups.
+ if (!I->isExpansion())
+ LastFileIDLookup = Res;
+ NumLinearScans += NumProbes+1;
+ return Res;
+ }
+ if (++NumProbes == 8)
+ break;
+ }
+
+ // Convert "I" back into an index. We know that it is an entry whose index is
+ // larger than the offset we are looking for.
+ unsigned GreaterIndex = I - LocalSLocEntryTable.begin();
+ // LessIndex - This is the lower bound of the range that we're searching.
+ // We know that the offset corresponding to the FileID is is less than
+ // SLocOffset.
+ unsigned LessIndex = 0;
+ NumProbes = 0;
+ while (1) {
+ bool Invalid = false;
+ unsigned MiddleIndex = (GreaterIndex-LessIndex)/2+LessIndex;
+ unsigned MidOffset = getLocalSLocEntry(MiddleIndex, &Invalid).getOffset();
+ if (Invalid)
+ return FileID::get(0);
+
+ ++NumProbes;
+
+ // If the offset of the midpoint is too large, chop the high side of the
+ // range to the midpoint.
+ if (MidOffset > SLocOffset) {
+ GreaterIndex = MiddleIndex;
+ continue;
+ }
+
+ // If the middle index contains the value, succeed and return.
+ // FIXME: This could be made faster by using a function that's aware of
+ // being in the local area.
+ if (isOffsetInFileID(FileID::get(MiddleIndex), SLocOffset)) {
+ FileID Res = FileID::get(MiddleIndex);
+
+ // If this isn't a macro expansion, remember it. We have good locality
+ // across FileID lookups.
+ if (!LocalSLocEntryTable[MiddleIndex].isExpansion())
+ LastFileIDLookup = Res;
+ NumBinaryProbes += NumProbes;
+ return Res;
+ }
+
+ // Otherwise, move the low-side up to the middle index.
+ LessIndex = MiddleIndex;
+ }
+}
+
+/// \brief Return the FileID for a SourceLocation with a high offset.
+///
+/// This function knows that the SourceLocation is in a loaded buffer, not a
+/// local one.
+FileID SourceManager::getFileIDLoaded(unsigned SLocOffset) const {
+ // Sanity checking, otherwise a bug may lead to hanging in release build.
+ if (SLocOffset < CurrentLoadedOffset) {
+ assert(0 && "Invalid SLocOffset or bad function choice");
+ return FileID();
+ }
+
+ // Essentially the same as the local case, but the loaded array is sorted
+ // in the other direction.
+
+ // First do a linear scan from the last lookup position, if possible.
+ unsigned I;
+ int LastID = LastFileIDLookup.ID;
+ if (LastID >= 0 || getLoadedSLocEntryByID(LastID).getOffset() < SLocOffset)
+ I = 0;
+ else
+ I = (-LastID - 2) + 1;
+
+ unsigned NumProbes;
+ for (NumProbes = 0; NumProbes < 8; ++NumProbes, ++I) {
+ // Make sure the entry is loaded!
+ const SrcMgr::SLocEntry &E = getLoadedSLocEntry(I);
+ if (E.getOffset() <= SLocOffset) {
+ FileID Res = FileID::get(-int(I) - 2);
+
+ if (!E.isExpansion())
+ LastFileIDLookup = Res;
+ NumLinearScans += NumProbes + 1;
+ return Res;
+ }
+ }
+
+ // Linear scan failed. Do the binary search. Note the reverse sorting of the
+ // table: GreaterIndex is the one where the offset is greater, which is
+ // actually a lower index!
+ unsigned GreaterIndex = I;
+ unsigned LessIndex = LoadedSLocEntryTable.size();
+ NumProbes = 0;
+ while (1) {
+ ++NumProbes;
+ unsigned MiddleIndex = (LessIndex - GreaterIndex) / 2 + GreaterIndex;
+ const SrcMgr::SLocEntry &E = getLoadedSLocEntry(MiddleIndex);
+
+ ++NumProbes;
+
+ if (E.getOffset() > SLocOffset) {
+ GreaterIndex = MiddleIndex;
+ continue;
+ }
+
+ if (isOffsetInFileID(FileID::get(-int(MiddleIndex) - 2), SLocOffset)) {
+ FileID Res = FileID::get(-int(MiddleIndex) - 2);
+ if (!E.isExpansion())
+ LastFileIDLookup = Res;
+ NumBinaryProbes += NumProbes;
+ return Res;
+ }
+
+ LessIndex = MiddleIndex;
+ }
+}
+
+SourceLocation SourceManager::
+getExpansionLocSlowCase(SourceLocation Loc) const {
+ do {
+ // Note: If Loc indicates an offset into a token that came from a macro
+ // expansion (e.g. the 5th character of the token) we do not want to add
+ // this offset when going to the expansion location. The expansion
+ // location is the macro invocation, which the offset has nothing to do
+ // with. This is unlike when we get the spelling loc, because the offset
+ // directly correspond to the token whose spelling we're inspecting.
+ Loc = getSLocEntry(getFileID(Loc)).getExpansion().getExpansionLocStart();
+ } while (!Loc.isFileID());
+
+ return Loc;
+}
+
+SourceLocation SourceManager::getSpellingLocSlowCase(SourceLocation Loc) const {
+ do {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
+ Loc = getSLocEntry(LocInfo.first).getExpansion().getSpellingLoc();
+ Loc = Loc.getLocWithOffset(LocInfo.second);
+ } while (!Loc.isFileID());
+ return Loc;
+}
+
+SourceLocation SourceManager::getFileLocSlowCase(SourceLocation Loc) const {
+ do {
+ if (isMacroArgExpansion(Loc))
+ Loc = getImmediateSpellingLoc(Loc);
+ else
+ Loc = getImmediateExpansionRange(Loc).first;
+ } while (!Loc.isFileID());
+ return Loc;
+}
+
+
+std::pair<FileID, unsigned>
+SourceManager::getDecomposedExpansionLocSlowCase(
+ const SrcMgr::SLocEntry *E) const {
+ // If this is an expansion record, walk through all the expansion points.
+ FileID FID;
+ SourceLocation Loc;
+ unsigned Offset;
+ do {
+ Loc = E->getExpansion().getExpansionLocStart();
+
+ FID = getFileID(Loc);
+ E = &getSLocEntry(FID);
+ Offset = Loc.getOffset()-E->getOffset();
+ } while (!Loc.isFileID());
+
+ return std::make_pair(FID, Offset);
+}
+
+std::pair<FileID, unsigned>
+SourceManager::getDecomposedSpellingLocSlowCase(const SrcMgr::SLocEntry *E,
+ unsigned Offset) const {
+ // If this is an expansion record, walk through all the expansion points.
+ FileID FID;
+ SourceLocation Loc;
+ do {
+ Loc = E->getExpansion().getSpellingLoc();
+ Loc = Loc.getLocWithOffset(Offset);
+
+ FID = getFileID(Loc);
+ E = &getSLocEntry(FID);
+ Offset = Loc.getOffset()-E->getOffset();
+ } while (!Loc.isFileID());
+
+ return std::make_pair(FID, Offset);
+}
+
+/// getImmediateSpellingLoc - Given a SourceLocation object, return the
+/// spelling location referenced by the ID. This is the first level down
+/// towards the place where the characters that make up the lexed token can be
+/// found. This should not generally be used by clients.
+SourceLocation SourceManager::getImmediateSpellingLoc(SourceLocation Loc) const{
+ if (Loc.isFileID()) return Loc;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(Loc);
+ Loc = getSLocEntry(LocInfo.first).getExpansion().getSpellingLoc();
+ return Loc.getLocWithOffset(LocInfo.second);
+}
+
+
+/// getImmediateExpansionRange - Loc is required to be an expansion location.
+/// Return the start/end of the expansion information.
+std::pair<SourceLocation,SourceLocation>
+SourceManager::getImmediateExpansionRange(SourceLocation Loc) const {
+ assert(Loc.isMacroID() && "Not a macro expansion loc!");
+ const ExpansionInfo &Expansion = getSLocEntry(getFileID(Loc)).getExpansion();
+ return Expansion.getExpansionLocRange();
+}
+
+/// getExpansionRange - Given a SourceLocation object, return the range of
+/// tokens covered by the expansion in the ultimate file.
+std::pair<SourceLocation,SourceLocation>
+SourceManager::getExpansionRange(SourceLocation Loc) const {
+ if (Loc.isFileID()) return std::make_pair(Loc, Loc);
+
+ std::pair<SourceLocation,SourceLocation> Res =
+ getImmediateExpansionRange(Loc);
+
+ // Fully resolve the start and end locations to their ultimate expansion
+ // points.
+ while (!Res.first.isFileID())
+ Res.first = getImmediateExpansionRange(Res.first).first;
+ while (!Res.second.isFileID())
+ Res.second = getImmediateExpansionRange(Res.second).second;
+ return Res;
+}
+
+bool SourceManager::isMacroArgExpansion(SourceLocation Loc) const {
+ if (!Loc.isMacroID()) return false;
+
+ FileID FID = getFileID(Loc);
+ const SrcMgr::SLocEntry *E = &getSLocEntry(FID);
+ const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
+ return Expansion.isMacroArgExpansion();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Queries about the code at a SourceLocation.
+//===----------------------------------------------------------------------===//
+
+/// getCharacterData - Return a pointer to the start of the specified location
+/// in the appropriate MemoryBuffer.
+const char *SourceManager::getCharacterData(SourceLocation SL,
+ bool *Invalid) const {
+ // Note that this is a hot function in the getSpelling() path, which is
+ // heavily used by -E mode.
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(SL);
+
+ // Note that calling 'getBuffer()' may lazily page in a source file.
+ bool CharDataInvalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &CharDataInvalid);
+ if (CharDataInvalid || !Entry.isFile()) {
+ if (Invalid)
+ *Invalid = true;
+
+ return "<<<<INVALID BUFFER>>>>";
+ }
+ const llvm::MemoryBuffer *Buffer
+ = Entry.getFile().getContentCache()
+ ->getBuffer(Diag, *this, SourceLocation(), &CharDataInvalid);
+ if (Invalid)
+ *Invalid = CharDataInvalid;
+ return Buffer->getBufferStart() + (CharDataInvalid? 0 : LocInfo.second);
+}
+
+
+/// getColumnNumber - Return the column # for the specified file position.
+/// this is significantly cheaper to compute than the line number.
+unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
+ bool *Invalid) const {
+ bool MyInvalid = false;
+ const llvm::MemoryBuffer *MemBuf = getBuffer(FID, &MyInvalid);
+ if (Invalid)
+ *Invalid = MyInvalid;
+
+ if (MyInvalid)
+ return 1;
+
+ if (FilePos >= MemBuf->getBufferSize()) {
+ if (Invalid)
+ *Invalid = MyInvalid;
+ return 1;
+ }
+
+ const char *Buf = MemBuf->getBufferStart();
+ unsigned LineStart = FilePos;
+ while (LineStart && Buf[LineStart-1] != '\n' && Buf[LineStart-1] != '\r')
+ --LineStart;
+ return FilePos-LineStart+1;
+}
+
+// isInvalid - Return the result of calling loc.isInvalid(), and
+// if Invalid is not null, set its value to same.
+static bool isInvalid(SourceLocation Loc, bool *Invalid) {
+ bool MyInvalid = Loc.isInvalid();
+ if (Invalid)
+ *Invalid = MyInvalid;
+ return MyInvalid;
+}
+
+unsigned SourceManager::getSpellingColumnNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
+ return getColumnNumber(LocInfo.first, LocInfo.second, Invalid);
+}
+
+unsigned SourceManager::getExpansionColumnNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+ return getColumnNumber(LocInfo.first, LocInfo.second, Invalid);
+}
+
+unsigned SourceManager::getPresumedColumnNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ return getPresumedLoc(Loc).getColumn();
+}
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+
+static LLVM_ATTRIBUTE_NOINLINE void
+ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
+ llvm::BumpPtrAllocator &Alloc,
+ const SourceManager &SM, bool &Invalid);
+static void ComputeLineNumbers(DiagnosticsEngine &Diag, ContentCache *FI,
+ llvm::BumpPtrAllocator &Alloc,
+ const SourceManager &SM, bool &Invalid) {
+ // Note that calling 'getBuffer()' may lazily page in the file.
+ const MemoryBuffer *Buffer = FI->getBuffer(Diag, SM, SourceLocation(),
+ &Invalid);
+ if (Invalid)
+ return;
+
+ // Find the file offsets of all of the *physical* source lines. This does
+ // not look at trigraphs, escaped newlines, or anything else tricky.
+ SmallVector<unsigned, 256> LineOffsets;
+
+ // Line #1 starts at char 0.
+ LineOffsets.push_back(0);
+
+ const unsigned char *Buf = (const unsigned char *)Buffer->getBufferStart();
+ const unsigned char *End = (const unsigned char *)Buffer->getBufferEnd();
+ unsigned Offs = 0;
+ while (1) {
+ // Skip over the contents of the line.
+ const unsigned char *NextBuf = (const unsigned char *)Buf;
+
+#ifdef __SSE2__
+ // Try to skip to the next newline using SSE instructions. This is very
+ // performance sensitive for programs with lots of diagnostics and in -E
+ // mode.
+ __m128i CRs = _mm_set1_epi8('\r');
+ __m128i LFs = _mm_set1_epi8('\n');
+
+ // First fix up the alignment to 16 bytes.
+ while (((uintptr_t)NextBuf & 0xF) != 0) {
+ if (*NextBuf == '\n' || *NextBuf == '\r' || *NextBuf == '\0')
+ goto FoundSpecialChar;
+ ++NextBuf;
+ }
+
+ // Scan 16 byte chunks for '\r' and '\n'. Ignore '\0'.
+ while (NextBuf+16 <= End) {
+ __m128i Chunk = *(__m128i*)NextBuf;
+ __m128i Cmp = _mm_or_si128(_mm_cmpeq_epi8(Chunk, CRs),
+ _mm_cmpeq_epi8(Chunk, LFs));
+ unsigned Mask = _mm_movemask_epi8(Cmp);
+
+ // If we found a newline, adjust the pointer and jump to the handling code.
+ if (Mask != 0) {
+ NextBuf += llvm::CountTrailingZeros_32(Mask);
+ goto FoundSpecialChar;
+ }
+ NextBuf += 16;
+ }
+#endif
+
+ while (*NextBuf != '\n' && *NextBuf != '\r' && *NextBuf != '\0')
+ ++NextBuf;
+
+#ifdef __SSE2__
+FoundSpecialChar:
+#endif
+ Offs += NextBuf-Buf;
+ Buf = NextBuf;
+
+ if (Buf[0] == '\n' || Buf[0] == '\r') {
+ // If this is \n\r or \r\n, skip both characters.
+ if ((Buf[1] == '\n' || Buf[1] == '\r') && Buf[0] != Buf[1])
+ ++Offs, ++Buf;
+ ++Offs, ++Buf;
+ LineOffsets.push_back(Offs);
+ } else {
+ // Otherwise, this is a null. If end of file, exit.
+ if (Buf == End) break;
+ // Otherwise, skip the null.
+ ++Offs, ++Buf;
+ }
+ }
+
+ // Copy the offsets into the FileInfo structure.
+ FI->NumLines = LineOffsets.size();
+ FI->SourceLineCache = Alloc.Allocate<unsigned>(LineOffsets.size());
+ std::copy(LineOffsets.begin(), LineOffsets.end(), FI->SourceLineCache);
+}
+
+/// getLineNumber - Given a SourceLocation, return the spelling line number
+/// for the position indicated. This requires building and caching a table of
+/// line offsets for the MemoryBuffer, so this is not cheap: use only when
+/// about to emit a diagnostic.
+unsigned SourceManager::getLineNumber(FileID FID, unsigned FilePos,
+ bool *Invalid) const {
+ if (FID.isInvalid()) {
+ if (Invalid)
+ *Invalid = true;
+ return 1;
+ }
+
+ ContentCache *Content;
+ if (LastLineNoFileIDQuery == FID)
+ Content = LastLineNoContentCache;
+ else {
+ bool MyInvalid = false;
+ const SLocEntry &Entry = getSLocEntry(FID, &MyInvalid);
+ if (MyInvalid || !Entry.isFile()) {
+ if (Invalid)
+ *Invalid = true;
+ return 1;
+ }
+
+ Content = const_cast<ContentCache*>(Entry.getFile().getContentCache());
+ }
+
+ // If this is the first use of line information for this buffer, compute the
+ /// SourceLineCache for it on demand.
+ if (Content->SourceLineCache == 0) {
+ bool MyInvalid = false;
+ ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
+ if (Invalid)
+ *Invalid = MyInvalid;
+ if (MyInvalid)
+ return 1;
+ } else if (Invalid)
+ *Invalid = false;
+
+ // Okay, we know we have a line number table. Do a binary search to find the
+ // line number that this character position lands on.
+ unsigned *SourceLineCache = Content->SourceLineCache;
+ unsigned *SourceLineCacheStart = SourceLineCache;
+ unsigned *SourceLineCacheEnd = SourceLineCache + Content->NumLines;
+
+ unsigned QueriedFilePos = FilePos+1;
+
+ // FIXME: I would like to be convinced that this code is worth being as
+ // complicated as it is, binary search isn't that slow.
+ //
+ // If it is worth being optimized, then in my opinion it could be more
+ // performant, simpler, and more obviously correct by just "galloping" outward
+ // from the queried file position. In fact, this could be incorporated into a
+ // generic algorithm such as lower_bound_with_hint.
+ //
+ // If someone gives me a test case where this matters, and I will do it! - DWD
+
+ // If the previous query was to the same file, we know both the file pos from
+ // that query and the line number returned. This allows us to narrow the
+ // search space from the entire file to something near the match.
+ if (LastLineNoFileIDQuery == FID) {
+ if (QueriedFilePos >= LastLineNoFilePos) {
+ // FIXME: Potential overflow?
+ SourceLineCache = SourceLineCache+LastLineNoResult-1;
+
+ // The query is likely to be nearby the previous one. Here we check to
+ // see if it is within 5, 10 or 20 lines. It can be far away in cases
+ // where big comment blocks and vertical whitespace eat up lines but
+ // contribute no tokens.
+ if (SourceLineCache+5 < SourceLineCacheEnd) {
+ if (SourceLineCache[5] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+5;
+ else if (SourceLineCache+10 < SourceLineCacheEnd) {
+ if (SourceLineCache[10] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+10;
+ else if (SourceLineCache+20 < SourceLineCacheEnd) {
+ if (SourceLineCache[20] > QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCache+20;
+ }
+ }
+ }
+ } else {
+ if (LastLineNoResult < Content->NumLines)
+ SourceLineCacheEnd = SourceLineCache+LastLineNoResult+1;
+ }
+ }
+
+ // If the spread is large, do a "radix" test as our initial guess, based on
+ // the assumption that lines average to approximately the same length.
+ // NOTE: This is currently disabled, as it does not appear to be profitable in
+ // initial measurements.
+ if (0 && SourceLineCacheEnd-SourceLineCache > 20) {
+ unsigned FileLen = Content->SourceLineCache[Content->NumLines-1];
+
+ // Take a stab at guessing where it is.
+ unsigned ApproxPos = Content->NumLines*QueriedFilePos / FileLen;
+
+ // Check for -10 and +10 lines.
+ unsigned LowerBound = std::max(int(ApproxPos-10), 0);
+ unsigned UpperBound = std::min(ApproxPos+10, FileLen);
+
+ // If the computed lower bound is less than the query location, move it in.
+ if (SourceLineCache < SourceLineCacheStart+LowerBound &&
+ SourceLineCacheStart[LowerBound] < QueriedFilePos)
+ SourceLineCache = SourceLineCacheStart+LowerBound;
+
+ // If the computed upper bound is greater than the query location, move it.
+ if (SourceLineCacheEnd > SourceLineCacheStart+UpperBound &&
+ SourceLineCacheStart[UpperBound] >= QueriedFilePos)
+ SourceLineCacheEnd = SourceLineCacheStart+UpperBound;
+ }
+
+ unsigned *Pos
+ = std::lower_bound(SourceLineCache, SourceLineCacheEnd, QueriedFilePos);
+ unsigned LineNo = Pos-SourceLineCacheStart;
+
+ LastLineNoFileIDQuery = FID;
+ LastLineNoContentCache = Content;
+ LastLineNoFilePos = QueriedFilePos;
+ LastLineNoResult = LineNo;
+ return LineNo;
+}
+
+unsigned SourceManager::getSpellingLineNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedSpellingLoc(Loc);
+ return getLineNumber(LocInfo.first, LocInfo.second);
+}
+unsigned SourceManager::getExpansionLineNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+ return getLineNumber(LocInfo.first, LocInfo.second);
+}
+unsigned SourceManager::getPresumedLineNumber(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return 0;
+ return getPresumedLoc(Loc).getLine();
+}
+
+/// getFileCharacteristic - return the file characteristic of the specified
+/// source location, indicating whether this is a normal file, a system
+/// header, or an "implicit extern C" system header.
+///
+/// This state can be modified with flags on GNU linemarker directives like:
+/// # 4 "foo.h" 3
+/// which changes all source locations in the current file after that to be
+/// considered to be from a system header.
+SrcMgr::CharacteristicKind
+SourceManager::getFileCharacteristic(SourceLocation Loc) const {
+ assert(!Loc.isInvalid() && "Can't get file characteristic of invalid loc!");
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+ bool Invalid = false;
+ const SLocEntry &SEntry = getSLocEntry(LocInfo.first, &Invalid);
+ if (Invalid || !SEntry.isFile())
+ return C_User;
+
+ const SrcMgr::FileInfo &FI = SEntry.getFile();
+
+ // If there are no #line directives in this file, just return the whole-file
+ // state.
+ if (!FI.hasLineDirectives())
+ return FI.getFileCharacteristic();
+
+ assert(LineTable && "Can't have linetable entries without a LineTable!");
+ // See if there is a #line directive before the location.
+ const LineEntry *Entry =
+ LineTable->FindNearestLineEntry(LocInfo.first.ID, LocInfo.second);
+
+ // If this is before the first line marker, use the file characteristic.
+ if (!Entry)
+ return FI.getFileCharacteristic();
+
+ return Entry->FileKind;
+}
+
+/// Return the filename or buffer identifier of the buffer the location is in.
+/// Note that this name does not respect #line directives. Use getPresumedLoc
+/// for normal clients.
+const char *SourceManager::getBufferName(SourceLocation Loc,
+ bool *Invalid) const {
+ if (isInvalid(Loc, Invalid)) return "<invalid loc>";
+
+ return getBuffer(getFileID(Loc), Invalid)->getBufferIdentifier();
+}
+
+
+/// getPresumedLoc - This method returns the "presumed" location of a
+/// SourceLocation specifies. A "presumed location" can be modified by #line
+/// or GNU line marker directives. This provides a view on the data that a
+/// user should see in diagnostics, for example.
+///
+/// Note that a presumed location is always given as the expansion point of an
+/// expansion location, not at the spelling location.
+PresumedLoc SourceManager::getPresumedLoc(SourceLocation Loc) const {
+ if (Loc.isInvalid()) return PresumedLoc();
+
+ // Presumed locations are always for expansion points.
+ std::pair<FileID, unsigned> LocInfo = getDecomposedExpansionLoc(Loc);
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(LocInfo.first, &Invalid);
+ if (Invalid || !Entry.isFile())
+ return PresumedLoc();
+
+ const SrcMgr::FileInfo &FI = Entry.getFile();
+ const SrcMgr::ContentCache *C = FI.getContentCache();
+
+ // To get the source name, first consult the FileEntry (if one exists)
+ // before the MemBuffer as this will avoid unnecessarily paging in the
+ // MemBuffer.
+ const char *Filename;
+ if (C->OrigEntry)
+ Filename = C->OrigEntry->getName();
+ else
+ Filename = C->getBuffer(Diag, *this)->getBufferIdentifier();
+
+ unsigned LineNo = getLineNumber(LocInfo.first, LocInfo.second, &Invalid);
+ if (Invalid)
+ return PresumedLoc();
+ unsigned ColNo = getColumnNumber(LocInfo.first, LocInfo.second, &Invalid);
+ if (Invalid)
+ return PresumedLoc();
+
+ SourceLocation IncludeLoc = FI.getIncludeLoc();
+
+ // If we have #line directives in this file, update and overwrite the physical
+ // location info if appropriate.
+ if (FI.hasLineDirectives()) {
+ assert(LineTable && "Can't have linetable entries without a LineTable!");
+ // See if there is a #line directive before this. If so, get it.
+ if (const LineEntry *Entry =
+ LineTable->FindNearestLineEntry(LocInfo.first.ID, LocInfo.second)) {
+ // If the LineEntry indicates a filename, use it.
+ if (Entry->FilenameID != -1)
+ Filename = LineTable->getFilename(Entry->FilenameID);
+
+ // Use the line number specified by the LineEntry. This line number may
+ // be multiple lines down from the line entry. Add the difference in
+ // physical line numbers from the query point and the line marker to the
+ // total.
+ unsigned MarkerLineNo = getLineNumber(LocInfo.first, Entry->FileOffset);
+ LineNo = Entry->LineNo + (LineNo-MarkerLineNo-1);
+
+ // Note that column numbers are not molested by line markers.
+
+ // Handle virtual #include manipulation.
+ if (Entry->IncludeOffset) {
+ IncludeLoc = getLocForStartOfFile(LocInfo.first);
+ IncludeLoc = IncludeLoc.getLocWithOffset(Entry->IncludeOffset);
+ }
+ }
+ }
+
+ return PresumedLoc(Filename, LineNo, ColNo, IncludeLoc);
+}
+
+/// \brief The size of the SLocEnty that \arg FID represents.
+unsigned SourceManager::getFileIDSize(FileID FID) const {
+ bool Invalid = false;
+ const SrcMgr::SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return 0;
+
+ int ID = FID.ID;
+ unsigned NextOffset;
+ if ((ID > 0 && unsigned(ID+1) == local_sloc_entry_size()))
+ NextOffset = getNextLocalOffset();
+ else if (ID+1 == -1)
+ NextOffset = MaxLoadedOffset;
+ else
+ NextOffset = getSLocEntry(FileID::get(ID+1)).getOffset();
+
+ return NextOffset - Entry.getOffset() - 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Other miscellaneous methods.
+//===----------------------------------------------------------------------===//
+
+/// \brief Retrieve the inode for the given file entry, if possible.
+///
+/// This routine involves a system call, and therefore should only be used
+/// in non-performance-critical code.
+static llvm::Optional<ino_t> getActualFileInode(const FileEntry *File) {
+ if (!File)
+ return llvm::Optional<ino_t>();
+
+ struct stat StatBuf;
+ if (::stat(File->getName(), &StatBuf))
+ return llvm::Optional<ino_t>();
+
+ return StatBuf.st_ino;
+}
+
+/// \brief Get the source location for the given file:line:col triplet.
+///
+/// If the source file is included multiple times, the source location will
+/// be based upon an arbitrary inclusion.
+SourceLocation SourceManager::translateFileLineCol(const FileEntry *SourceFile,
+ unsigned Line,
+ unsigned Col) const {
+ assert(SourceFile && "Null source file!");
+ assert(Line && Col && "Line and column should start from 1!");
+
+ FileID FirstFID = translateFile(SourceFile);
+ return translateLineCol(FirstFID, Line, Col);
+}
+
+/// \brief Get the FileID for the given file.
+///
+/// If the source file is included multiple times, the FileID will be the
+/// first inclusion.
+FileID SourceManager::translateFile(const FileEntry *SourceFile) const {
+ assert(SourceFile && "Null source file!");
+
+ // Find the first file ID that corresponds to the given file.
+ FileID FirstFID;
+
+ // First, check the main file ID, since it is common to look for a
+ // location in the main file.
+ llvm::Optional<ino_t> SourceFileInode;
+ llvm::Optional<StringRef> SourceFileName;
+ if (!MainFileID.isInvalid()) {
+ bool Invalid = false;
+ const SLocEntry &MainSLoc = getSLocEntry(MainFileID, &Invalid);
+ if (Invalid)
+ return FileID();
+
+ if (MainSLoc.isFile()) {
+ const ContentCache *MainContentCache
+ = MainSLoc.getFile().getContentCache();
+ if (!MainContentCache) {
+ // Can't do anything
+ } else if (MainContentCache->OrigEntry == SourceFile) {
+ FirstFID = MainFileID;
+ } else {
+ // Fall back: check whether we have the same base name and inode
+ // as the main file.
+ const FileEntry *MainFile = MainContentCache->OrigEntry;
+ SourceFileName = llvm::sys::path::filename(SourceFile->getName());
+ if (*SourceFileName == llvm::sys::path::filename(MainFile->getName())) {
+ SourceFileInode = getActualFileInode(SourceFile);
+ if (SourceFileInode) {
+ if (llvm::Optional<ino_t> MainFileInode
+ = getActualFileInode(MainFile)) {
+ if (*SourceFileInode == *MainFileInode) {
+ FirstFID = MainFileID;
+ SourceFile = MainFile;
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+
+ if (FirstFID.isInvalid()) {
+ // The location we're looking for isn't in the main file; look
+ // through all of the local source locations.
+ for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
+ bool Invalid = false;
+ const SLocEntry &SLoc = getLocalSLocEntry(I, &Invalid);
+ if (Invalid)
+ return FileID();
+
+ if (SLoc.isFile() &&
+ SLoc.getFile().getContentCache() &&
+ SLoc.getFile().getContentCache()->OrigEntry == SourceFile) {
+ FirstFID = FileID::get(I);
+ break;
+ }
+ }
+ // If that still didn't help, try the modules.
+ if (FirstFID.isInvalid()) {
+ for (unsigned I = 0, N = loaded_sloc_entry_size(); I != N; ++I) {
+ const SLocEntry &SLoc = getLoadedSLocEntry(I);
+ if (SLoc.isFile() &&
+ SLoc.getFile().getContentCache() &&
+ SLoc.getFile().getContentCache()->OrigEntry == SourceFile) {
+ FirstFID = FileID::get(-int(I) - 2);
+ break;
+ }
+ }
+ }
+ }
+
+ // If we haven't found what we want yet, try again, but this time stat()
+ // each of the files in case the files have changed since we originally
+ // parsed the file.
+ if (FirstFID.isInvalid() &&
+ (SourceFileName ||
+ (SourceFileName = llvm::sys::path::filename(SourceFile->getName()))) &&
+ (SourceFileInode ||
+ (SourceFileInode = getActualFileInode(SourceFile)))) {
+ bool Invalid = false;
+ for (unsigned I = 0, N = local_sloc_entry_size(); I != N; ++I) {
+ FileID IFileID;
+ IFileID.ID = I;
+ const SLocEntry &SLoc = getSLocEntry(IFileID, &Invalid);
+ if (Invalid)
+ return FileID();
+
+ if (SLoc.isFile()) {
+ const ContentCache *FileContentCache
+ = SLoc.getFile().getContentCache();
+ const FileEntry *Entry =FileContentCache? FileContentCache->OrigEntry : 0;
+ if (Entry &&
+ *SourceFileName == llvm::sys::path::filename(Entry->getName())) {
+ if (llvm::Optional<ino_t> EntryInode = getActualFileInode(Entry)) {
+ if (*SourceFileInode == *EntryInode) {
+ FirstFID = FileID::get(I);
+ SourceFile = Entry;
+ break;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return FirstFID;
+}
+
+/// \brief Get the source location in \arg FID for the given line:col.
+/// Returns null location if \arg FID is not a file SLocEntry.
+SourceLocation SourceManager::translateLineCol(FileID FID,
+ unsigned Line,
+ unsigned Col) const {
+ if (FID.isInvalid())
+ return SourceLocation();
+
+ bool Invalid = false;
+ const SLocEntry &Entry = getSLocEntry(FID, &Invalid);
+ if (Invalid)
+ return SourceLocation();
+
+ if (!Entry.isFile())
+ return SourceLocation();
+
+ SourceLocation FileLoc = SourceLocation::getFileLoc(Entry.getOffset());
+
+ if (Line == 1 && Col == 1)
+ return FileLoc;
+
+ ContentCache *Content
+ = const_cast<ContentCache *>(Entry.getFile().getContentCache());
+ if (!Content)
+ return SourceLocation();
+
+ // If this is the first use of line information for this buffer, compute the
+ // SourceLineCache for it on demand.
+ if (Content->SourceLineCache == 0) {
+ bool MyInvalid = false;
+ ComputeLineNumbers(Diag, Content, ContentCacheAlloc, *this, MyInvalid);
+ if (MyInvalid)
+ return SourceLocation();
+ }
+
+ if (Line > Content->NumLines) {
+ unsigned Size = Content->getBuffer(Diag, *this)->getBufferSize();
+ if (Size > 0)
+ --Size;
+ return FileLoc.getLocWithOffset(Size);
+ }
+
+ const llvm::MemoryBuffer *Buffer = Content->getBuffer(Diag, *this);
+ unsigned FilePos = Content->SourceLineCache[Line - 1];
+ const char *Buf = Buffer->getBufferStart() + FilePos;
+ unsigned BufLength = Buffer->getBufferSize() - FilePos;
+ if (BufLength == 0)
+ return FileLoc.getLocWithOffset(FilePos);
+
+ unsigned i = 0;
+
+ // Check that the given column is valid.
+ while (i < BufLength-1 && i < Col-1 && Buf[i] != '\n' && Buf[i] != '\r')
+ ++i;
+ if (i < Col-1)
+ return FileLoc.getLocWithOffset(FilePos + i);
+
+ return FileLoc.getLocWithOffset(FilePos + Col - 1);
+}
+
+/// \brief Compute a map of macro argument chunks to their expanded source
+/// location. Chunks that are not part of a macro argument will map to an
+/// invalid source location. e.g. if a file contains one macro argument at
+/// offset 100 with length 10, this is how the map will be formed:
+/// 0 -> SourceLocation()
+/// 100 -> Expanded macro arg location
+/// 110 -> SourceLocation()
+void SourceManager::computeMacroArgsCache(MacroArgsMap *&CachePtr,
+ FileID FID) const {
+ assert(!FID.isInvalid());
+ assert(!CachePtr);
+
+ CachePtr = new MacroArgsMap();
+ MacroArgsMap &MacroArgsCache = *CachePtr;
+ // Initially no macro argument chunk is present.
+ MacroArgsCache.insert(std::make_pair(0, SourceLocation()));
+
+ int ID = FID.ID;
+ while (1) {
+ ++ID;
+ // Stop if there are no more FileIDs to check.
+ if (ID > 0) {
+ if (unsigned(ID) >= local_sloc_entry_size())
+ return;
+ } else if (ID == -1) {
+ return;
+ }
+
+ const SrcMgr::SLocEntry &Entry = getSLocEntryByID(ID);
+ if (Entry.isFile()) {
+ SourceLocation IncludeLoc = Entry.getFile().getIncludeLoc();
+ if (IncludeLoc.isInvalid())
+ continue;
+ if (!isInFileID(IncludeLoc, FID))
+ return; // No more files/macros that may be "contained" in this file.
+
+ // Skip the files/macros of the #include'd file, we only care about macros
+ // that lexed macro arguments from our file.
+ if (Entry.getFile().NumCreatedFIDs)
+ ID += Entry.getFile().NumCreatedFIDs - 1/*because of next ++ID*/;
+ continue;
+ }
+
+ const ExpansionInfo &ExpInfo = Entry.getExpansion();
+
+ if (ExpInfo.getExpansionLocStart().isFileID()) {
+ if (!isInFileID(ExpInfo.getExpansionLocStart(), FID))
+ return; // No more files/macros that may be "contained" in this file.
+ }
+
+ if (!ExpInfo.isMacroArgExpansion())
+ continue;
+
+ SourceLocation SpellLoc = ExpInfo.getSpellingLoc();
+ while (!SpellLoc.isFileID()) {
+ std::pair<FileID, unsigned> LocInfo = getDecomposedLoc(SpellLoc);
+ const ExpansionInfo &Info = getSLocEntry(LocInfo.first).getExpansion();
+ if (!Info.isMacroArgExpansion())
+ break;
+ SpellLoc = Info.getSpellingLoc().getLocWithOffset(LocInfo.second);
+ }
+ if (!SpellLoc.isFileID())
+ continue;
+
+ unsigned BeginOffs;
+ if (!isInFileID(SpellLoc, FID, &BeginOffs))
+ continue;
+
+ unsigned EndOffs = BeginOffs + getFileIDSize(FileID::get(ID));
+
+ // Add a new chunk for this macro argument. A previous macro argument chunk
+ // may have been lexed again, so e.g. if the map is
+ // 0 -> SourceLocation()
+ // 100 -> Expanded loc #1
+ // 110 -> SourceLocation()
+ // and we found a new macro FileID that lexed from offet 105 with length 3,
+ // the new map will be:
+ // 0 -> SourceLocation()
+ // 100 -> Expanded loc #1
+ // 105 -> Expanded loc #2
+ // 108 -> Expanded loc #1
+ // 110 -> SourceLocation()
+ //
+ // Since re-lexed macro chunks will always be the same size or less of
+ // previous chunks, we only need to find where the ending of the new macro
+ // chunk is mapped to and update the map with new begin/end mappings.
+
+ MacroArgsMap::iterator I = MacroArgsCache.upper_bound(EndOffs);
+ --I;
+ SourceLocation EndOffsMappedLoc = I->second;
+ MacroArgsCache[BeginOffs] = SourceLocation::getMacroLoc(Entry.getOffset());
+ MacroArgsCache[EndOffs] = EndOffsMappedLoc;
+ }
+}
+
+/// \brief If \arg Loc points inside a function macro argument, the returned
+/// location will be the macro location in which the argument was expanded.
+/// If a macro argument is used multiple times, the expanded location will
+/// be at the first expansion of the argument.
+/// e.g.
+/// MY_MACRO(foo);
+/// ^
+/// Passing a file location pointing at 'foo', will yield a macro location
+/// where 'foo' was expanded into.
+SourceLocation
+SourceManager::getMacroArgExpandedLocation(SourceLocation Loc) const {
+ if (Loc.isInvalid() || !Loc.isFileID())
+ return Loc;
+
+ FileID FID;
+ unsigned Offset;
+ llvm::tie(FID, Offset) = getDecomposedLoc(Loc);
+ if (FID.isInvalid())
+ return Loc;
+
+ MacroArgsMap *&MacroArgsCache = MacroArgsCacheMap[FID];
+ if (!MacroArgsCache)
+ computeMacroArgsCache(MacroArgsCache, FID);
+
+ assert(!MacroArgsCache->empty());
+ MacroArgsMap::iterator I = MacroArgsCache->upper_bound(Offset);
+ --I;
+
+ unsigned MacroArgBeginOffs = I->first;
+ SourceLocation MacroArgExpandedLoc = I->second;
+ if (MacroArgExpandedLoc.isValid())
+ return MacroArgExpandedLoc.getLocWithOffset(Offset - MacroArgBeginOffs);
+
+ return Loc;
+}
+
+/// Given a decomposed source location, move it up the include/expansion stack
+/// to the parent source location. If this is possible, return the decomposed
+/// version of the parent in Loc and return false. If Loc is the top-level
+/// entry, return true and don't modify it.
+static bool MoveUpIncludeHierarchy(std::pair<FileID, unsigned> &Loc,
+ const SourceManager &SM) {
+ SourceLocation UpperLoc;
+ const SrcMgr::SLocEntry &Entry = SM.getSLocEntry(Loc.first);
+ if (Entry.isExpansion())
+ UpperLoc = Entry.getExpansion().getExpansionLocStart();
+ else
+ UpperLoc = Entry.getFile().getIncludeLoc();
+
+ if (UpperLoc.isInvalid())
+ return true; // We reached the top.
+
+ Loc = SM.getDecomposedLoc(UpperLoc);
+ return false;
+}
+
+
+/// \brief Determines the order of 2 source locations in the translation unit.
+///
+/// \returns true if LHS source location comes before RHS, false otherwise.
+bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
+ SourceLocation RHS) const {
+ assert(LHS.isValid() && RHS.isValid() && "Passed invalid source location!");
+ if (LHS == RHS)
+ return false;
+
+ std::pair<FileID, unsigned> LOffs = getDecomposedLoc(LHS);
+ std::pair<FileID, unsigned> ROffs = getDecomposedLoc(RHS);
+
+ // If the source locations are in the same file, just compare offsets.
+ if (LOffs.first == ROffs.first)
+ return LOffs.second < ROffs.second;
+
+ // If we are comparing a source location with multiple locations in the same
+ // file, we get a big win by caching the result.
+ if (IsBeforeInTUCache.isCacheValid(LOffs.first, ROffs.first))
+ return IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second);
+
+ // Okay, we missed in the cache, start updating the cache for this query.
+ IsBeforeInTUCache.setQueryFIDs(LOffs.first, ROffs.first,
+ /*isLFIDBeforeRFID=*/LOffs.first.ID < ROffs.first.ID);
+
+ // We need to find the common ancestor. The only way of doing this is to
+ // build the complete include chain for one and then walking up the chain
+ // of the other looking for a match.
+ // We use a map from FileID to Offset to store the chain. Easier than writing
+ // a custom set hash info that only depends on the first part of a pair.
+ typedef llvm::DenseMap<FileID, unsigned> LocSet;
+ LocSet LChain;
+ do {
+ LChain.insert(LOffs);
+ // We catch the case where LOffs is in a file included by ROffs and
+ // quit early. The other way round unfortunately remains suboptimal.
+ } while (LOffs.first != ROffs.first && !MoveUpIncludeHierarchy(LOffs, *this));
+ LocSet::iterator I;
+ while((I = LChain.find(ROffs.first)) == LChain.end()) {
+ if (MoveUpIncludeHierarchy(ROffs, *this))
+ break; // Met at topmost file.
+ }
+ if (I != LChain.end())
+ LOffs = *I;
+
+ // If we exited because we found a nearest common ancestor, compare the
+ // locations within the common file and cache them.
+ if (LOffs.first == ROffs.first) {
+ IsBeforeInTUCache.setCommonLoc(LOffs.first, LOffs.second, ROffs.second);
+ return IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second);
+ }
+
+ // This can happen if a location is in a built-ins buffer.
+ // But see PR5662.
+ // Clear the lookup cache, it depends on a common location.
+ IsBeforeInTUCache.clear();
+ bool LIsBuiltins = strcmp("<built-in>",
+ getBuffer(LOffs.first)->getBufferIdentifier()) == 0;
+ bool RIsBuiltins = strcmp("<built-in>",
+ getBuffer(ROffs.first)->getBufferIdentifier()) == 0;
+ // built-in is before non-built-in
+ if (LIsBuiltins != RIsBuiltins)
+ return LIsBuiltins;
+ assert(LIsBuiltins && RIsBuiltins &&
+ "Non-built-in locations must be rooted in the main file");
+ // Both are in built-in buffers, but from different files. We just claim that
+ // lower IDs come first.
+ return LOffs.first < ROffs.first;
+}
+
+/// PrintStats - Print statistics to stderr.
+///
+void SourceManager::PrintStats() const {
+ llvm::errs() << "\n*** Source Manager Stats:\n";
+ llvm::errs() << FileInfos.size() << " files mapped, " << MemBufferInfos.size()
+ << " mem buffers mapped.\n";
+ llvm::errs() << LocalSLocEntryTable.size() << " local SLocEntry's allocated ("
+ << llvm::capacity_in_bytes(LocalSLocEntryTable)
+ << " bytes of capacity), "
+ << NextLocalOffset << "B of Sloc address space used.\n";
+ llvm::errs() << LoadedSLocEntryTable.size()
+ << " loaded SLocEntries allocated, "
+ << MaxLoadedOffset - CurrentLoadedOffset
+ << "B of Sloc address space used.\n";
+
+ unsigned NumLineNumsComputed = 0;
+ unsigned NumFileBytesMapped = 0;
+ for (fileinfo_iterator I = fileinfo_begin(), E = fileinfo_end(); I != E; ++I){
+ NumLineNumsComputed += I->second->SourceLineCache != 0;
+ NumFileBytesMapped += I->second->getSizeBytesMapped();
+ }
+ unsigned NumMacroArgsComputed = MacroArgsCacheMap.size();
+
+ llvm::errs() << NumFileBytesMapped << " bytes of files mapped, "
+ << NumLineNumsComputed << " files with line #'s computed, "
+ << NumMacroArgsComputed << " files with macro args computed.\n";
+ llvm::errs() << "FileID scans: " << NumLinearScans << " linear, "
+ << NumBinaryProbes << " binary.\n";
+}
+
+ExternalSLocEntrySource::~ExternalSLocEntrySource() { }
+
+/// Return the amount of memory used by memory buffers, breaking down
+/// by heap-backed versus mmap'ed memory.
+SourceManager::MemoryBufferSizes SourceManager::getMemoryBufferSizes() const {
+ size_t malloc_bytes = 0;
+ size_t mmap_bytes = 0;
+
+ for (unsigned i = 0, e = MemBufferInfos.size(); i != e; ++i)
+ if (size_t sized_mapped = MemBufferInfos[i]->getSizeBytesMapped())
+ switch (MemBufferInfos[i]->getMemoryBufferKind()) {
+ case llvm::MemoryBuffer::MemoryBuffer_MMap:
+ mmap_bytes += sized_mapped;
+ break;
+ case llvm::MemoryBuffer::MemoryBuffer_Malloc:
+ malloc_bytes += sized_mapped;
+ break;
+ }
+
+ return MemoryBufferSizes(malloc_bytes, mmap_bytes);
+}
+
+size_t SourceManager::getDataStructureSizes() const {
+ return llvm::capacity_in_bytes(MemBufferInfos)
+ + llvm::capacity_in_bytes(LocalSLocEntryTable)
+ + llvm::capacity_in_bytes(LoadedSLocEntryTable)
+ + llvm::capacity_in_bytes(SLocEntryLoaded)
+ + llvm::capacity_in_bytes(FileInfos)
+ + llvm::capacity_in_bytes(OverriddenFiles);
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
new file mode 100644
index 0000000..f938b5a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/TargetInfo.cpp
@@ -0,0 +1,490 @@
+//===--- TargetInfo.cpp - Information about Target machine ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TargetInfo and TargetInfoImpl interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cctype>
+#include <cstdlib>
+using namespace clang;
+
+static const LangAS::Map DefaultAddrSpaceMap = { 0 };
+
+// TargetInfo Constructor.
+TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
+ // Set defaults. Defaults are set for a 32-bit RISC platform, like PPC or
+ // SPARC. These should be overridden by concrete targets as needed.
+ BigEndian = true;
+ TLSSupported = true;
+ NoAsmVariants = false;
+ PointerWidth = PointerAlign = 32;
+ BoolWidth = BoolAlign = 8;
+ IntWidth = IntAlign = 32;
+ LongWidth = LongAlign = 32;
+ LongLongWidth = LongLongAlign = 64;
+ SuitableAlign = 64;
+ HalfWidth = 16;
+ HalfAlign = 16;
+ FloatWidth = 32;
+ FloatAlign = 32;
+ DoubleWidth = 64;
+ DoubleAlign = 64;
+ LongDoubleWidth = 64;
+ LongDoubleAlign = 64;
+ LargeArrayMinWidth = 0;
+ LargeArrayAlign = 0;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 0;
+ SizeType = UnsignedLong;
+ PtrDiffType = SignedLong;
+ IntMaxType = SignedLongLong;
+ UIntMaxType = UnsignedLongLong;
+ IntPtrType = SignedLong;
+ WCharType = SignedInt;
+ WIntType = SignedInt;
+ Char16Type = UnsignedShort;
+ Char32Type = UnsignedInt;
+ Int64Type = SignedLongLong;
+ SigAtomicType = SignedInt;
+ UseBitFieldTypeAlignment = true;
+ UseZeroLengthBitfieldAlignment = false;
+ ZeroLengthBitfieldBoundary = 0;
+ HalfFormat = &llvm::APFloat::IEEEhalf;
+ FloatFormat = &llvm::APFloat::IEEEsingle;
+ DoubleFormat = &llvm::APFloat::IEEEdouble;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-n32";
+ UserLabelPrefix = "_";
+ MCountName = "mcount";
+ RegParmMax = 0;
+ SSERegParmMax = 0;
+ HasAlignMac68kSupport = false;
+
+ // Default to no types using fpret.
+ RealTypeUsesObjCFPRet = 0;
+
+ // Default to not using fp2ret for __Complex long double
+ ComplexLongDoubleUsesFP2Ret = false;
+
+ // Default to using the Itanium ABI.
+ CXXABI = CXXABI_Itanium;
+
+ // Default to an empty address space map.
+ AddrSpaceMap = &DefaultAddrSpaceMap;
+
+ // Default to an unknown platform name.
+ PlatformName = "unknown";
+ PlatformMinVersion = VersionTuple();
+}
+
+// Out of line virtual dtor for TargetInfo.
+TargetInfo::~TargetInfo() {}
+
+/// getTypeName - Return the user string for the specified integer type enum.
+/// For example, SignedShort -> "short".
+const char *TargetInfo::getTypeName(IntType T) {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedShort: return "short";
+ case UnsignedShort: return "unsigned short";
+ case SignedInt: return "int";
+ case UnsignedInt: return "unsigned int";
+ case SignedLong: return "long int";
+ case UnsignedLong: return "long unsigned int";
+ case SignedLongLong: return "long long int";
+ case UnsignedLongLong: return "long long unsigned int";
+ }
+}
+
+/// getTypeConstantSuffix - Return the constant suffix for the specified
+/// integer type enum. For example, SignedLong -> "L".
+const char *TargetInfo::getTypeConstantSuffix(IntType T) {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedShort:
+ case SignedInt: return "";
+ case SignedLong: return "L";
+ case SignedLongLong: return "LL";
+ case UnsignedShort:
+ case UnsignedInt: return "U";
+ case UnsignedLong: return "UL";
+ case UnsignedLongLong: return "ULL";
+ }
+}
+
+/// getTypeWidth - Return the width (in bits) of the specified integer type
+/// enum. For example, SignedInt -> getIntWidth().
+unsigned TargetInfo::getTypeWidth(IntType T) const {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedShort:
+ case UnsignedShort: return getShortWidth();
+ case SignedInt:
+ case UnsignedInt: return getIntWidth();
+ case SignedLong:
+ case UnsignedLong: return getLongWidth();
+ case SignedLongLong:
+ case UnsignedLongLong: return getLongLongWidth();
+ };
+}
+
+/// getTypeAlign - Return the alignment (in bits) of the specified integer type
+/// enum. For example, SignedInt -> getIntAlign().
+unsigned TargetInfo::getTypeAlign(IntType T) const {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedShort:
+ case UnsignedShort: return getShortAlign();
+ case SignedInt:
+ case UnsignedInt: return getIntAlign();
+ case SignedLong:
+ case UnsignedLong: return getLongAlign();
+ case SignedLongLong:
+ case UnsignedLongLong: return getLongLongAlign();
+ };
+}
+
+/// isTypeSigned - Return whether an integer types is signed. Returns true if
+/// the type is signed; false otherwise.
+bool TargetInfo::isTypeSigned(IntType T) {
+ switch (T) {
+ default: llvm_unreachable("not an integer!");
+ case SignedShort:
+ case SignedInt:
+ case SignedLong:
+ case SignedLongLong:
+ return true;
+ case UnsignedShort:
+ case UnsignedInt:
+ case UnsignedLong:
+ case UnsignedLongLong:
+ return false;
+ };
+}
+
+/// setForcedLangOptions - Set forced language options.
+/// Apply changes to the target information with respect to certain
+/// language options which change the target configuration.
+void TargetInfo::setForcedLangOptions(LangOptions &Opts) {
+ if (Opts.NoBitFieldTypeAlign)
+ UseBitFieldTypeAlignment = false;
+ if (Opts.ShortWChar)
+ WCharType = UnsignedShort;
+}
+
+//===----------------------------------------------------------------------===//
+
+
+static StringRef removeGCCRegisterPrefix(StringRef Name) {
+ if (Name[0] == '%' || Name[0] == '#')
+ Name = Name.substr(1);
+
+ return Name;
+}
+
+/// isValidClobber - Returns whether the passed in string is
+/// a valid clobber in an inline asm statement. This is used by
+/// Sema.
+bool TargetInfo::isValidClobber(StringRef Name) const {
+ return (isValidGCCRegisterName(Name) ||
+ Name == "memory" || Name == "cc");
+}
+
+/// isValidGCCRegisterName - Returns whether the passed in string
+/// is a valid register name according to GCC. This is used by Sema for
+/// inline asm statements.
+bool TargetInfo::isValidGCCRegisterName(StringRef Name) const {
+ if (Name.empty())
+ return false;
+
+ const char * const *Names;
+ unsigned NumNames;
+
+ // Get rid of any register prefix.
+ Name = removeGCCRegisterPrefix(Name);
+
+ getGCCRegNames(Names, NumNames);
+
+ // If we have a number it maps to an entry in the register name array.
+ if (isdigit(Name[0])) {
+ int n;
+ if (!Name.getAsInteger(0, n))
+ return n >= 0 && (unsigned)n < NumNames;
+ }
+
+ // Check register names.
+ for (unsigned i = 0; i < NumNames; i++) {
+ if (Name == Names[i])
+ return true;
+ }
+
+ // Check any additional names that we have.
+ const AddlRegName *AddlNames;
+ unsigned NumAddlNames;
+ getGCCAddlRegNames(AddlNames, NumAddlNames);
+ for (unsigned i = 0; i < NumAddlNames; i++)
+ for (unsigned j = 0; j < llvm::array_lengthof(AddlNames[i].Names); j++) {
+ if (!AddlNames[i].Names[j])
+ break;
+ // Make sure the register that the additional name is for is within
+ // the bounds of the register names from above.
+ if (AddlNames[i].Names[j] == Name && AddlNames[i].RegNum < NumNames)
+ return true;
+ }
+
+ // Now check aliases.
+ const GCCRegAlias *Aliases;
+ unsigned NumAliases;
+
+ getGCCRegAliases(Aliases, NumAliases);
+ for (unsigned i = 0; i < NumAliases; i++) {
+ for (unsigned j = 0 ; j < llvm::array_lengthof(Aliases[i].Aliases); j++) {
+ if (!Aliases[i].Aliases[j])
+ break;
+ if (Aliases[i].Aliases[j] == Name)
+ return true;
+ }
+ }
+
+ return false;
+}
+
+StringRef
+TargetInfo::getNormalizedGCCRegisterName(StringRef Name) const {
+ assert(isValidGCCRegisterName(Name) && "Invalid register passed in");
+
+ // Get rid of any register prefix.
+ Name = removeGCCRegisterPrefix(Name);
+
+ const char * const *Names;
+ unsigned NumNames;
+
+ getGCCRegNames(Names, NumNames);
+
+ // First, check if we have a number.
+ if (isdigit(Name[0])) {
+ int n;
+ if (!Name.getAsInteger(0, n)) {
+ assert(n >= 0 && (unsigned)n < NumNames &&
+ "Out of bounds register number!");
+ return Names[n];
+ }
+ }
+
+ // Check any additional names that we have.
+ const AddlRegName *AddlNames;
+ unsigned NumAddlNames;
+ getGCCAddlRegNames(AddlNames, NumAddlNames);
+ for (unsigned i = 0; i < NumAddlNames; i++)
+ for (unsigned j = 0; j < llvm::array_lengthof(AddlNames[i].Names); j++) {
+ if (!AddlNames[i].Names[j])
+ break;
+ // Make sure the register that the additional name is for is within
+ // the bounds of the register names from above.
+ if (AddlNames[i].Names[j] == Name && AddlNames[i].RegNum < NumNames)
+ return Name;
+ }
+
+ // Now check aliases.
+ const GCCRegAlias *Aliases;
+ unsigned NumAliases;
+
+ getGCCRegAliases(Aliases, NumAliases);
+ for (unsigned i = 0; i < NumAliases; i++) {
+ for (unsigned j = 0 ; j < llvm::array_lengthof(Aliases[i].Aliases); j++) {
+ if (!Aliases[i].Aliases[j])
+ break;
+ if (Aliases[i].Aliases[j] == Name)
+ return Aliases[i].Register;
+ }
+ }
+
+ return Name;
+}
+
+bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
+ const char *Name = Info.getConstraintStr().c_str();
+ // An output constraint must start with '=' or '+'
+ if (*Name != '=' && *Name != '+')
+ return false;
+
+ if (*Name == '+')
+ Info.setIsReadWrite();
+
+ Name++;
+ while (*Name) {
+ switch (*Name) {
+ default:
+ if (!validateAsmConstraint(Name, Info)) {
+ // FIXME: We temporarily return false
+ // so we can add more constraints as we hit it.
+ // Eventually, an unknown constraint should just be treated as 'g'.
+ return false;
+ }
+ case '&': // early clobber.
+ break;
+ case '%': // commutative.
+ // FIXME: Check that there is a another register after this one.
+ break;
+ case 'r': // general register.
+ Info.setAllowsRegister();
+ break;
+ case 'm': // memory operand.
+ case 'o': // offsetable memory operand.
+ case 'V': // non-offsetable memory operand.
+ case '<': // autodecrement memory operand.
+ case '>': // autoincrement memory operand.
+ Info.setAllowsMemory();
+ break;
+ case 'g': // general register, memory operand or immediate integer.
+ case 'X': // any operand.
+ Info.setAllowsRegister();
+ Info.setAllowsMemory();
+ break;
+ case ',': // multiple alternative constraint. Pass it.
+ // Handle additional optional '=' or '+' modifiers.
+ if (Name[1] == '=' || Name[1] == '+')
+ Name++;
+ break;
+ case '?': // Disparage slightly code.
+ case '!': // Disparage severely.
+ break; // Pass them.
+ }
+
+ Name++;
+ }
+
+ return true;
+}
+
+bool TargetInfo::resolveSymbolicName(const char *&Name,
+ ConstraintInfo *OutputConstraints,
+ unsigned NumOutputs,
+ unsigned &Index) const {
+ assert(*Name == '[' && "Symbolic name did not start with '['");
+ Name++;
+ const char *Start = Name;
+ while (*Name && *Name != ']')
+ Name++;
+
+ if (!*Name) {
+ // Missing ']'
+ return false;
+ }
+
+ std::string SymbolicName(Start, Name - Start);
+
+ for (Index = 0; Index != NumOutputs; ++Index)
+ if (SymbolicName == OutputConstraints[Index].getName())
+ return true;
+
+ return false;
+}
+
+bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
+ unsigned NumOutputs,
+ ConstraintInfo &Info) const {
+ const char *Name = Info.ConstraintStr.c_str();
+
+ while (*Name) {
+ switch (*Name) {
+ default:
+ // Check if we have a matching constraint
+ if (*Name >= '0' && *Name <= '9') {
+ unsigned i = *Name - '0';
+
+ // Check if matching constraint is out of bounds.
+ if (i >= NumOutputs)
+ return false;
+
+ // A number must refer to an output only operand.
+ if (OutputConstraints[i].isReadWrite())
+ return false;
+
+ // If the constraint is already tied, it must be tied to the
+ // same operand referenced to by the number.
+ if (Info.hasTiedOperand() && Info.getTiedOperand() != i)
+ return false;
+
+ // The constraint should have the same info as the respective
+ // output constraint.
+ Info.setTiedOperand(i, OutputConstraints[i]);
+ } else if (!validateAsmConstraint(Name, Info)) {
+ // FIXME: This error return is in place temporarily so we can
+ // add more constraints as we hit it. Eventually, an unknown
+ // constraint should just be treated as 'g'.
+ return false;
+ }
+ break;
+ case '[': {
+ unsigned Index = 0;
+ if (!resolveSymbolicName(Name, OutputConstraints, NumOutputs, Index))
+ return false;
+
+ // If the constraint is already tied, it must be tied to the
+ // same operand referenced to by the number.
+ if (Info.hasTiedOperand() && Info.getTiedOperand() != Index)
+ return false;
+
+ Info.setTiedOperand(Index, OutputConstraints[Index]);
+ break;
+ }
+ case '%': // commutative
+ // FIXME: Fail if % is used with the last operand.
+ break;
+ case 'i': // immediate integer.
+ case 'n': // immediate integer with a known value.
+ break;
+ case 'I': // Various constant constraints with target-specific meanings.
+ case 'J':
+ case 'K':
+ case 'L':
+ case 'M':
+ case 'N':
+ case 'O':
+ case 'P':
+ break;
+ case 'r': // general register.
+ Info.setAllowsRegister();
+ break;
+ case 'm': // memory operand.
+ case 'o': // offsettable memory operand.
+ case 'V': // non-offsettable memory operand.
+ case '<': // autodecrement memory operand.
+ case '>': // autoincrement memory operand.
+ Info.setAllowsMemory();
+ break;
+ case 'g': // general register, memory operand or immediate integer.
+ case 'X': // any operand.
+ Info.setAllowsRegister();
+ Info.setAllowsMemory();
+ break;
+ case 'E': // immediate floating point.
+ case 'F': // immediate floating point.
+ case 'p': // address operand.
+ break;
+ case ',': // multiple alternative constraint. Ignore comma.
+ break;
+ case '?': // Disparage slightly code.
+ case '!': // Disparage severely.
+ break; // Pass them.
+ }
+
+ Name++;
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Targets.cpp b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
new file mode 100644
index 0000000..1ad37c4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Targets.cpp
@@ -0,0 +1,4205 @@
+//===--- Targets.cpp - Implement -arch option and targets -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements construction of a TargetInfo object from a
+// target triple.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Type.h"
+#include <algorithm>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Common code shared among targets.
+//===----------------------------------------------------------------------===//
+
+/// DefineStd - Define a macro name and standard variants. For example if
+/// MacroName is "unix", then this will define "__unix", "__unix__", and "unix"
+/// when in GNU mode.
+static void DefineStd(MacroBuilder &Builder, StringRef MacroName,
+ const LangOptions &Opts) {
+ assert(MacroName[0] != '_' && "Identifier should be in the user's namespace");
+
+ // If in GNU mode (e.g. -std=gnu99 but not -std=c99) define the raw identifier
+ // in the user's namespace.
+ if (Opts.GNUMode)
+ Builder.defineMacro(MacroName);
+
+ // Define __unix.
+ Builder.defineMacro("__" + MacroName);
+
+ // Define __unix__.
+ Builder.defineMacro("__" + MacroName + "__");
+}
+
+static void defineCPUMacros(MacroBuilder &Builder, StringRef CPUName,
+ bool Tuning = true) {
+ Builder.defineMacro("__" + CPUName);
+ Builder.defineMacro("__" + CPUName + "__");
+ if (Tuning)
+ Builder.defineMacro("__tune_" + CPUName + "__");
+}
+
+//===----------------------------------------------------------------------===//
+// Defines specific to certain operating systems.
+//===----------------------------------------------------------------------===//
+
+namespace {
+template<typename TgtInfo>
+class OSTargetInfo : public TgtInfo {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const=0;
+public:
+ OSTargetInfo(const std::string& triple) : TgtInfo(triple) {}
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ TgtInfo::getTargetDefines(Opts, Builder);
+ getOSDefines(Opts, TgtInfo::getTriple(), Builder);
+ }
+
+};
+} // end anonymous namespace
+
+
+static void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
+ const llvm::Triple &Triple,
+ StringRef &PlatformName,
+ VersionTuple &PlatformMinVersion) {
+ Builder.defineMacro("__APPLE_CC__", "5621");
+ Builder.defineMacro("__APPLE__");
+ Builder.defineMacro("__MACH__");
+ Builder.defineMacro("OBJC_NEW_PROPERTIES");
+
+ if (!Opts.ObjCAutoRefCount) {
+ // __weak is always defined, for use in blocks and with objc pointers.
+ Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
+
+ // Darwin defines __strong even in C mode (just to nothing).
+ if (Opts.getGC() != LangOptions::NonGC)
+ Builder.defineMacro("__strong", "__attribute__((objc_gc(strong)))");
+ else
+ Builder.defineMacro("__strong", "");
+
+ // __unsafe_unretained is defined to nothing in non-ARC mode. We even
+ // allow this in C, since one might have block pointers in structs that
+ // are used in pure C code and in Objective-C ARC.
+ Builder.defineMacro("__unsafe_unretained", "");
+ }
+
+ if (Opts.Static)
+ Builder.defineMacro("__STATIC__");
+ else
+ Builder.defineMacro("__DYNAMIC__");
+
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+
+ // Get the platform type and version number from the triple.
+ unsigned Maj, Min, Rev;
+ if (Triple.isMacOSX()) {
+ Triple.getMacOSXVersion(Maj, Min, Rev);
+ PlatformName = "macosx";
+ } else {
+ Triple.getOSVersion(Maj, Min, Rev);
+ PlatformName = llvm::Triple::getOSTypeName(Triple.getOS());
+ }
+
+ // If -target arch-pc-win32-macho option specified, we're
+ // generating code for Win32 ABI. No need to emit
+ // __ENVIRONMENT_XX_OS_VERSION_MIN_REQUIRED__.
+ if (PlatformName == "win32") {
+ PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+ return;
+ }
+
+ // Set the appropriate OS version define.
+ if (Triple.getOS() == llvm::Triple::IOS) {
+ assert(Maj < 10 && Min < 100 && Rev < 100 && "Invalid version!");
+ char Str[6];
+ Str[0] = '0' + Maj;
+ Str[1] = '0' + (Min / 10);
+ Str[2] = '0' + (Min % 10);
+ Str[3] = '0' + (Rev / 10);
+ Str[4] = '0' + (Rev % 10);
+ Str[5] = '\0';
+ Builder.defineMacro("__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__", Str);
+ } else {
+ // Note that the Driver allows versions which aren't representable in the
+ // define (because we only get a single digit for the minor and micro
+ // revision numbers). So, we limit them to the maximum representable
+ // version.
+ assert(Triple.getEnvironmentName().empty() && "Invalid environment!");
+ assert(Maj < 100 && Min < 100 && Rev < 100 && "Invalid version!");
+ char Str[5];
+ Str[0] = '0' + (Maj / 10);
+ Str[1] = '0' + (Maj % 10);
+ Str[2] = '0' + std::min(Min, 9U);
+ Str[3] = '0' + std::min(Rev, 9U);
+ Str[4] = '\0';
+ Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str);
+ }
+
+ PlatformMinVersion = VersionTuple(Maj, Min, Rev);
+}
+
+namespace {
+template<typename Target>
+class DarwinTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ getDarwinDefines(Builder, Opts, Triple, this->PlatformName,
+ this->PlatformMinVersion);
+ }
+
+public:
+ DarwinTargetInfo(const std::string& triple) :
+ OSTargetInfo<Target>(triple) {
+ llvm::Triple T = llvm::Triple(triple);
+ this->TLSSupported = T.isMacOSX() && !T.isMacOSXVersionLT(10,7);
+ this->MCountName = "\01mcount";
+ }
+
+ virtual std::string isValidSectionSpecifier(StringRef SR) const {
+ // Let MCSectionMachO validate this.
+ StringRef Segment, Section;
+ unsigned TAA, StubSize;
+ bool HasTAA;
+ return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section,
+ TAA, HasTAA, StubSize);
+ }
+
+ virtual const char *getStaticInitSectionSpecifier() const {
+ // FIXME: We should return 0 when building kexts.
+ return "__TEXT,__StaticInit,regular,pure_instructions";
+ }
+
+ /// Darwin does not support protected visibility. Darwin's "default"
+ /// is very similar to ELF's "protected"; Darwin requires a "weak"
+ /// attribute on declarations that can be dynamically replaced.
+ virtual bool hasProtectedVisibility() const {
+ return false;
+ }
+};
+
+
+// DragonFlyBSD Target
+template<typename Target>
+class DragonFlyBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // DragonFly defines; list based off of gcc output
+ Builder.defineMacro("__DragonFly__");
+ Builder.defineMacro("__DragonFly_cc_version", "100001");
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
+ Builder.defineMacro("__tune_i386__");
+ DefineStd(Builder, "unix", Opts);
+ }
+public:
+ DragonFlyBSDTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+
+ llvm::Triple Triple(triple);
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->MCountName = ".mcount";
+ break;
+ }
+ }
+};
+
+// FreeBSD Target
+template<typename Target>
+class FreeBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // FreeBSD defines; list based off of gcc output
+
+ unsigned Release = Triple.getOSMajorVersion();
+ if (Release == 0U)
+ Release = 8;
+
+ Builder.defineMacro("__FreeBSD__", Twine(Release));
+ Builder.defineMacro("__FreeBSD_cc_version", Twine(Release * 100000U + 1U));
+ Builder.defineMacro("__KPRINTF_ATTRIBUTE__");
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ }
+public:
+ FreeBSDTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+
+ llvm::Triple Triple(triple);
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ this->MCountName = ".mcount";
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ this->MCountName = "_mcount";
+ break;
+ case llvm::Triple::arm:
+ this->MCountName = "__mcount";
+ break;
+ }
+
+ }
+};
+
+// Minix Target
+template<typename Target>
+class MinixTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // Minix defines
+
+ Builder.defineMacro("__minix", "3");
+ Builder.defineMacro("_EM_WSIZE", "4");
+ Builder.defineMacro("_EM_PSIZE", "4");
+ Builder.defineMacro("_EM_SSIZE", "2");
+ Builder.defineMacro("_EM_LSIZE", "4");
+ Builder.defineMacro("_EM_FSIZE", "4");
+ Builder.defineMacro("_EM_DSIZE", "8");
+ Builder.defineMacro("__ELF__");
+ DefineStd(Builder, "unix", Opts);
+ }
+public:
+ MinixTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
+// Linux target
+template<typename Target>
+class LinuxTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // Linux defines; list based off of gcc output
+ DefineStd(Builder, "unix", Opts);
+ DefineStd(Builder, "linux", Opts);
+ Builder.defineMacro("__gnu_linux__");
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+public:
+ LinuxTargetInfo(const std::string& triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ this->WIntType = TargetInfo::UnsignedInt;
+ }
+
+ virtual const char *getStaticInitSectionSpecifier() const {
+ return ".text.startup";
+ }
+};
+
+// NetBSD Target
+template<typename Target>
+class NetBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // NetBSD defines; list based off of gcc output
+ Builder.defineMacro("__NetBSD__");
+ Builder.defineMacro("__unix__");
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_POSIX_THREADS");
+ }
+public:
+ NetBSDTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
+// OpenBSD Target
+template<typename Target>
+class OpenBSDTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // OpenBSD defines; list based off of gcc output
+
+ Builder.defineMacro("__OpenBSD__");
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_POSIX_THREADS");
+ }
+public:
+ OpenBSDTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+
+ llvm::Triple Triple(triple);
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ case llvm::Triple::arm:
+ case llvm::Triple::sparc:
+ this->MCountName = "__mcount";
+ break;
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::ppc:
+ case llvm::Triple::sparcv9:
+ this->MCountName = "_mcount";
+ break;
+ }
+ }
+};
+
+// PSP Target
+template<typename Target>
+class PSPTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // PSP defines; list based on the output of the pspdev gcc toolchain.
+ Builder.defineMacro("PSP");
+ Builder.defineMacro("_PSP");
+ Builder.defineMacro("__psp__");
+ Builder.defineMacro("__ELF__");
+ }
+public:
+ PSPTargetInfo(const std::string& triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
+// PS3 PPU Target
+template<typename Target>
+class PS3PPUTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // PS3 PPU defines.
+ Builder.defineMacro("__PPC__");
+ Builder.defineMacro("__PPU__");
+ Builder.defineMacro("__CELLOS_LV2__");
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__LP32__");
+ Builder.defineMacro("_ARCH_PPC64");
+ Builder.defineMacro("__powerpc64__");
+ }
+public:
+ PS3PPUTargetInfo(const std::string& triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ this->LongWidth = this->LongAlign = 32;
+ this->PointerWidth = this->PointerAlign = 32;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->UIntMaxType = TargetInfo::UnsignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
+ this->SizeType = TargetInfo::UnsignedInt;
+ this->DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
+ }
+};
+
+// FIXME: Need a real SPU target.
+// PS3 SPU Target
+template<typename Target>
+class PS3SPUTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // PS3 PPU defines.
+ Builder.defineMacro("__SPU__");
+ Builder.defineMacro("__ELF__");
+ }
+public:
+ PS3SPUTargetInfo(const std::string& triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
+// AuroraUX target
+template<typename Target>
+class AuroraUXTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "sun", Opts);
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__svr4__");
+ Builder.defineMacro("__SVR4");
+ }
+public:
+ AuroraUXTargetInfo(const std::string& triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ this->WCharType = this->SignedLong;
+ // FIXME: WIntType should be SignedLong
+ }
+};
+
+// Solaris target
+template<typename Target>
+class SolarisTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "sun", Opts);
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__svr4__");
+ Builder.defineMacro("__SVR4");
+ // Solaris headers require _XOPEN_SOURCE to be set to 600 for C99 and
+ // newer, but to 500 for everything else. feature_test.h has a check to
+ // ensure that you are not using C99 with an old version of X/Open or C89
+ // with a new version.
+ if (Opts.C99 || Opts.C11)
+ Builder.defineMacro("_XOPEN_SOURCE", "600");
+ else
+ Builder.defineMacro("_XOPEN_SOURCE", "500");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("__C99FEATURES__");
+ Builder.defineMacro("_LARGEFILE_SOURCE");
+ Builder.defineMacro("_LARGEFILE64_SOURCE");
+ Builder.defineMacro("__EXTENSIONS__");
+ Builder.defineMacro("_REENTRANT");
+ }
+public:
+ SolarisTargetInfo(const std::string& triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ this->WCharType = this->SignedInt;
+ // FIXME: WIntType should be SignedLong
+ }
+};
+
+// Windows target
+template<typename Target>
+class WindowsTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("_WIN32");
+ }
+ void getVisualStudioDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ if (Opts.CPlusPlus) {
+ if (Opts.RTTI)
+ Builder.defineMacro("_CPPRTTI");
+
+ if (Opts.Exceptions)
+ Builder.defineMacro("_CPPUNWIND");
+ }
+
+ if (!Opts.CharIsSigned)
+ Builder.defineMacro("_CHAR_UNSIGNED");
+
+ // FIXME: POSIXThreads isn't exactly the option this should be defined for,
+ // but it works for now.
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_MT");
+
+ if (Opts.MSCVersion != 0)
+ Builder.defineMacro("_MSC_VER", Twine(Opts.MSCVersion));
+
+ if (Opts.MicrosoftExt) {
+ Builder.defineMacro("_MSC_EXTENSIONS");
+
+ if (Opts.CPlusPlus0x) {
+ Builder.defineMacro("_RVALUE_REFERENCES_V2_SUPPORTED");
+ Builder.defineMacro("_RVALUE_REFERENCES_SUPPORTED");
+ Builder.defineMacro("_NATIVE_NULLPTR_SUPPORTED");
+ }
+ }
+
+ Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
+ }
+
+public:
+ WindowsTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {}
+};
+
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Specific target implementations.
+//===----------------------------------------------------------------------===//
+
+namespace {
+// PPC abstract base class
+class PPCTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+ static const char * const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+public:
+ PPCTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble;
+ }
+
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = BuiltinInfo;
+ NumRecords = clang::PPC::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ }
+
+ virtual bool isCLZForZeroUndef() const { return false; }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+
+ virtual bool hasFeature(StringRef Feature) const;
+
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default: return false;
+ case 'O': // Zero
+ break;
+ case 'b': // Base register
+ case 'f': // Floating point register
+ Info.setAllowsRegister();
+ break;
+ // FIXME: The following are added to allow parsing.
+ // I just took a guess at what the actions should be.
+ // Also, is more specific checking needed? I.e. specific registers?
+ case 'd': // Floating point register (containing 64-bit value)
+ case 'v': // Altivec vector register
+ Info.setAllowsRegister();
+ break;
+ case 'w':
+ switch (Name[1]) {
+ case 'd':// VSX vector register to hold vector double data
+ case 'f':// VSX vector register to hold vector float data
+ case 's':// VSX vector register to hold scalar float data
+ case 'a':// Any VSX register
+ break;
+ default:
+ return false;
+ }
+ Info.setAllowsRegister();
+ Name++; // Skip over 'w'.
+ break;
+ case 'h': // `MQ', `CTR', or `LINK' register
+ case 'q': // `MQ' register
+ case 'c': // `CTR' register
+ case 'l': // `LINK' register
+ case 'x': // `CR' register (condition register) number 0
+ case 'y': // `CR' register (condition register)
+ case 'z': // `XER[CA]' carry bit (part of the XER register)
+ Info.setAllowsRegister();
+ break;
+ case 'I': // Signed 16-bit constant
+ case 'J': // Unsigned 16-bit constant shifted left 16 bits
+ // (use `L' instead for SImode constants)
+ case 'K': // Unsigned 16-bit constant
+ case 'L': // Signed 16-bit constant shifted left 16 bits
+ case 'M': // Constant larger than 31
+ case 'N': // Exact power of 2
+ case 'P': // Constant whose negation is a signed 16-bit constant
+ case 'G': // Floating point constant that can be loaded into a
+ // register with one instruction per word
+ case 'H': // Integer/Floating point constant that can be loaded
+ // into a register using three instructions
+ break;
+ case 'm': // Memory operand. Note that on PowerPC targets, m can
+ // include addresses that update the base register. It
+ // is therefore only safe to use `m' in an asm statement
+ // if that asm statement accesses the operand exactly once.
+ // The asm statement must also use `%U<opno>' as a
+ // placeholder for the "update" flag in the corresponding
+ // load or store instruction. For example:
+ // asm ("st%U0 %1,%0" : "=m" (mem) : "r" (val));
+ // is correct but:
+ // asm ("st %1,%0" : "=m" (mem) : "r" (val));
+ // is not. Use es rather than m if you don't want the base
+ // register to be updated.
+ case 'e':
+ if (Name[1] != 's')
+ return false;
+ // es: A "stable" memory operand; that is, one which does not
+ // include any automodification of the base register. Unlike
+ // `m', this constraint can be used in asm statements that
+ // might access the operand several times, or that might not
+ // access it at all.
+ Info.setAllowsMemory();
+ Name++; // Skip over 'e'.
+ break;
+ case 'Q': // Memory operand that is an offset from a register (it is
+ // usually better to use `m' or `es' in asm statements)
+ case 'Z': // Memory operand that is an indexed or indirect from a
+ // register (it is usually better to use `m' or `es' in
+ // asm statements)
+ Info.setAllowsMemory();
+ Info.setAllowsRegister();
+ break;
+ case 'R': // AIX TOC entry
+ case 'a': // Address operand that is an indexed or indirect from a
+ // register (`p' is preferable for asm statements)
+ case 'S': // Constant suitable as a 64-bit mask operand
+ case 'T': // Constant suitable as a 32-bit mask operand
+ case 'U': // System V Release 4 small data area reference
+ case 't': // AND masks that can be performed by two rldic{l, r}
+ // instructions
+ case 'W': // Vector constant that does not require memory
+ case 'j': // Vector constant that is all zeros.
+ break;
+ // End FIXME.
+ }
+ return true;
+ }
+ virtual const char *getClobbers() const {
+ return "";
+ }
+};
+
+const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsPPC.def"
+};
+
+
+/// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific
+/// #defines that are not tied to a specific subtarget.
+void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Target identification.
+ Builder.defineMacro("__ppc__");
+ Builder.defineMacro("_ARCH_PPC");
+ Builder.defineMacro("__powerpc__");
+ Builder.defineMacro("__POWERPC__");
+ if (PointerWidth == 64) {
+ Builder.defineMacro("_ARCH_PPC64");
+ Builder.defineMacro("_LP64");
+ Builder.defineMacro("__LP64__");
+ Builder.defineMacro("__powerpc64__");
+ Builder.defineMacro("__ppc64__");
+ } else {
+ Builder.defineMacro("__ppc__");
+ }
+
+ // Target properties.
+ if (getTriple().getOS() != llvm::Triple::NetBSD)
+ Builder.defineMacro("_BIG_ENDIAN");
+ Builder.defineMacro("__BIG_ENDIAN__");
+
+ // Subtarget options.
+ Builder.defineMacro("__NATURAL_ALIGNMENT__");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ // FIXME: Should be controlled by command line option.
+ Builder.defineMacro("__LONG_DOUBLE_128__");
+
+ if (Opts.AltiVec) {
+ Builder.defineMacro("__VEC__", "10206");
+ Builder.defineMacro("__ALTIVEC__");
+ }
+}
+
+bool PPCTargetInfo::hasFeature(StringRef Feature) const {
+ return Feature == "powerpc";
+}
+
+
+const char * const PPCTargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15",
+ "f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23",
+ "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31",
+ "mq", "lr", "ctr", "ap",
+ "cr0", "cr1", "cr2", "cr3", "cr4", "cr5", "cr6", "cr7",
+ "xer",
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
+ "vrsave", "vscr",
+ "spe_acc", "spefscr",
+ "sfp"
+};
+
+void PPCTargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias PPCTargetInfo::GCCRegAliases[] = {
+ // While some of these aliases do map to different registers
+ // they still share the same register name.
+ { { "0" }, "r0" },
+ { { "1"}, "r1" },
+ { { "2" }, "r2" },
+ { { "3" }, "r3" },
+ { { "4" }, "r4" },
+ { { "5" }, "r5" },
+ { { "6" }, "r6" },
+ { { "7" }, "r7" },
+ { { "8" }, "r8" },
+ { { "9" }, "r9" },
+ { { "10" }, "r10" },
+ { { "11" }, "r11" },
+ { { "12" }, "r12" },
+ { { "13" }, "r13" },
+ { { "14" }, "r14" },
+ { { "15" }, "r15" },
+ { { "16" }, "r16" },
+ { { "17" }, "r17" },
+ { { "18" }, "r18" },
+ { { "19" }, "r19" },
+ { { "20" }, "r20" },
+ { { "21" }, "r21" },
+ { { "22" }, "r22" },
+ { { "23" }, "r23" },
+ { { "24" }, "r24" },
+ { { "25" }, "r25" },
+ { { "26" }, "r26" },
+ { { "27" }, "r27" },
+ { { "28" }, "r28" },
+ { { "29" }, "r29" },
+ { { "30" }, "r30" },
+ { { "31" }, "r31" },
+ { { "fr0" }, "f0" },
+ { { "fr1" }, "f1" },
+ { { "fr2" }, "f2" },
+ { { "fr3" }, "f3" },
+ { { "fr4" }, "f4" },
+ { { "fr5" }, "f5" },
+ { { "fr6" }, "f6" },
+ { { "fr7" }, "f7" },
+ { { "fr8" }, "f8" },
+ { { "fr9" }, "f9" },
+ { { "fr10" }, "f10" },
+ { { "fr11" }, "f11" },
+ { { "fr12" }, "f12" },
+ { { "fr13" }, "f13" },
+ { { "fr14" }, "f14" },
+ { { "fr15" }, "f15" },
+ { { "fr16" }, "f16" },
+ { { "fr17" }, "f17" },
+ { { "fr18" }, "f18" },
+ { { "fr19" }, "f19" },
+ { { "fr20" }, "f20" },
+ { { "fr21" }, "f21" },
+ { { "fr22" }, "f22" },
+ { { "fr23" }, "f23" },
+ { { "fr24" }, "f24" },
+ { { "fr25" }, "f25" },
+ { { "fr26" }, "f26" },
+ { { "fr27" }, "f27" },
+ { { "fr28" }, "f28" },
+ { { "fr29" }, "f29" },
+ { { "fr30" }, "f30" },
+ { { "fr31" }, "f31" },
+ { { "cc" }, "cr0" },
+};
+
+void PPCTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+} // end anonymous namespace.
+
+namespace {
+class PPC32TargetInfo : public PPCTargetInfo {
+public:
+ PPC32TargetInfo(const std::string &triple) : PPCTargetInfo(triple) {
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
+
+ switch (getTriple().getOS()) {
+ case llvm::Triple::Linux:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::NetBSD:
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ break;
+ default:
+ break;
+ }
+
+ if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+ }
+
+ virtual const char *getVAListDeclaration() const {
+ // This is the ELF definition, and is overridden by the Darwin sub-target
+ return "typedef struct __va_list_tag {"
+ " unsigned char gpr;"
+ " unsigned char fpr;"
+ " unsigned short reserved;"
+ " void* overflow_arg_area;"
+ " void* reg_save_area;"
+ "} __builtin_va_list[1];";
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+class PPC64TargetInfo : public PPCTargetInfo {
+public:
+ PPC64TargetInfo(const std::string& triple) : PPCTargetInfo(triple) {
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ IntMaxType = SignedLong;
+ UIntMaxType = UnsignedLong;
+ Int64Type = SignedLong;
+ DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64";
+
+ if (getTriple().getOS() == llvm::Triple::FreeBSD) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+};
+} // end anonymous namespace.
+
+
+namespace {
+class DarwinPPC32TargetInfo :
+ public DarwinTargetInfo<PPC32TargetInfo> {
+public:
+ DarwinPPC32TargetInfo(const std::string& triple)
+ : DarwinTargetInfo<PPC32TargetInfo>(triple) {
+ HasAlignMac68kSupport = true;
+ BoolWidth = BoolAlign = 32; //XXX support -mone-byte-bool?
+ LongLongAlign = 32;
+ SuitableAlign = 128;
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:32:64-f32:32:32-f64:64:64-v128:128:128-n32";
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+};
+
+class DarwinPPC64TargetInfo :
+ public DarwinTargetInfo<PPC64TargetInfo> {
+public:
+ DarwinPPC64TargetInfo(const std::string& triple)
+ : DarwinTargetInfo<PPC64TargetInfo>(triple) {
+ HasAlignMac68kSupport = true;
+ SuitableAlign = 128;
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+ static const unsigned PTXAddrSpaceMap[] = {
+ 0, // opencl_global
+ 4, // opencl_local
+ 1 // opencl_constant
+ };
+ class PTXTargetInfo : public TargetInfo {
+ static const char * const GCCRegNames[];
+ static const Builtin::Info BuiltinInfo[];
+ std::vector<llvm::StringRef> AvailableFeatures;
+ public:
+ PTXTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ BigEndian = false;
+ TLSSupported = false;
+ LongWidth = LongAlign = 64;
+ AddrSpaceMap = &PTXAddrSpaceMap;
+ // Define available target features
+ // These must be defined in sorted order!
+ AvailableFeatures.push_back("compute10");
+ AvailableFeatures.push_back("compute11");
+ AvailableFeatures.push_back("compute12");
+ AvailableFeatures.push_back("compute13");
+ AvailableFeatures.push_back("compute20");
+ AvailableFeatures.push_back("double");
+ AvailableFeatures.push_back("no-fma");
+ AvailableFeatures.push_back("ptx20");
+ AvailableFeatures.push_back("ptx21");
+ AvailableFeatures.push_back("ptx22");
+ AvailableFeatures.push_back("ptx23");
+ AvailableFeatures.push_back("sm10");
+ AvailableFeatures.push_back("sm11");
+ AvailableFeatures.push_back("sm12");
+ AvailableFeatures.push_back("sm13");
+ AvailableFeatures.push_back("sm20");
+ AvailableFeatures.push_back("sm21");
+ AvailableFeatures.push_back("sm22");
+ AvailableFeatures.push_back("sm23");
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__PTX__");
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = BuiltinInfo;
+ NumRecords = clang::PTX::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ }
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "ptx";
+ }
+
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ // No aliases.
+ Aliases = 0;
+ NumAliases = 0;
+ }
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const {
+ // FIXME: implement
+ return true;
+ }
+ virtual const char *getClobbers() const {
+ // FIXME: Is this really right?
+ return "";
+ }
+ virtual const char *getVAListDeclaration() const {
+ // FIXME: implement
+ return "typedef char* __builtin_va_list;";
+ }
+
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const;
+ };
+
+ const Builtin::Info PTXTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsPTX.def"
+ };
+
+ const char * const PTXTargetInfo::GCCRegNames[] = {
+ "r0"
+ };
+
+ void PTXTargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+ }
+
+ bool PTXTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
+ if(std::binary_search(AvailableFeatures.begin(), AvailableFeatures.end(),
+ Name)) {
+ Features[Name] = Enabled;
+ return true;
+ } else {
+ return false;
+ }
+ }
+
+ class PTX32TargetInfo : public PTXTargetInfo {
+ public:
+ PTX32TargetInfo(const std::string& triple) : PTXTargetInfo(triple) {
+ PointerWidth = PointerAlign = 32;
+ SizeType = PtrDiffType = IntPtrType = TargetInfo::UnsignedInt;
+ DescriptionString
+ = "e-p:32:32-i64:64:64-f64:64:64-n1:8:16:32:64";
+ }
+ };
+
+ class PTX64TargetInfo : public PTXTargetInfo {
+ public:
+ PTX64TargetInfo(const std::string& triple) : PTXTargetInfo(triple) {
+ PointerWidth = PointerAlign = 64;
+ SizeType = PtrDiffType = IntPtrType = TargetInfo::UnsignedLongLong;
+ DescriptionString
+ = "e-p:64:64-i64:64:64-f64:64:64-n1:8:16:32:64";
+ }
+ };
+}
+
+namespace {
+// MBlaze abstract base class
+class MBlazeTargetInfo : public TargetInfo {
+ static const char * const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+
+public:
+ MBlazeTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ DescriptionString = "E-p:32:32:32-i8:8:8-i16:16:16";
+ }
+
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ // FIXME: Implement.
+ Records = 0;
+ NumRecords = 0;
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "mblaze";
+ }
+
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+ virtual const char *getTargetPrefix() const {
+ return "mblaze";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default: return false;
+ case 'O': // Zero
+ return true;
+ case 'b': // Base register
+ case 'f': // Floating point register
+ Info.setAllowsRegister();
+ return true;
+ }
+ }
+ virtual const char *getClobbers() const {
+ return "";
+ }
+};
+
+/// MBlazeTargetInfo::getTargetDefines - Return a set of the MBlaze-specific
+/// #defines that are not tied to a specific subtarget.
+void MBlazeTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Target identification.
+ Builder.defineMacro("__microblaze__");
+ Builder.defineMacro("_ARCH_MICROBLAZE");
+ Builder.defineMacro("__MICROBLAZE__");
+
+ // Target properties.
+ Builder.defineMacro("_BIG_ENDIAN");
+ Builder.defineMacro("__BIG_ENDIAN__");
+
+ // Subtarget options.
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+}
+
+
+const char * const MBlazeTargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
+ "hi", "lo", "accum","rmsr", "$fcc1","$fcc2","$fcc3","$fcc4",
+ "$fcc5","$fcc6","$fcc7","$ap", "$rap", "$frp"
+};
+
+void MBlazeTargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias MBlazeTargetInfo::GCCRegAliases[] = {
+ { {"f0"}, "r0" },
+ { {"f1"}, "r1" },
+ { {"f2"}, "r2" },
+ { {"f3"}, "r3" },
+ { {"f4"}, "r4" },
+ { {"f5"}, "r5" },
+ { {"f6"}, "r6" },
+ { {"f7"}, "r7" },
+ { {"f8"}, "r8" },
+ { {"f9"}, "r9" },
+ { {"f10"}, "r10" },
+ { {"f11"}, "r11" },
+ { {"f12"}, "r12" },
+ { {"f13"}, "r13" },
+ { {"f14"}, "r14" },
+ { {"f15"}, "r15" },
+ { {"f16"}, "r16" },
+ { {"f17"}, "r17" },
+ { {"f18"}, "r18" },
+ { {"f19"}, "r19" },
+ { {"f20"}, "r20" },
+ { {"f21"}, "r21" },
+ { {"f22"}, "r22" },
+ { {"f23"}, "r23" },
+ { {"f24"}, "r24" },
+ { {"f25"}, "r25" },
+ { {"f26"}, "r26" },
+ { {"f27"}, "r27" },
+ { {"f28"}, "r28" },
+ { {"f29"}, "r29" },
+ { {"f30"}, "r30" },
+ { {"f31"}, "r31" },
+};
+
+void MBlazeTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+} // end anonymous namespace.
+
+namespace {
+// Namespace for x86 abstract base class
+const Builtin::Info BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsX86.def"
+};
+
+static const char* const GCCRegNames[] = {
+ "ax", "dx", "cx", "bx", "si", "di", "bp", "sp",
+ "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
+ "argp", "flags", "fpcr", "fpsr", "dirflag", "frame",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15",
+ "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7",
+ "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15",
+};
+
+const TargetInfo::AddlRegName AddlRegNames[] = {
+ { { "al", "ah", "eax", "rax" }, 0 },
+ { { "bl", "bh", "ebx", "rbx" }, 3 },
+ { { "cl", "ch", "ecx", "rcx" }, 2 },
+ { { "dl", "dh", "edx", "rdx" }, 1 },
+ { { "esi", "rsi" }, 4 },
+ { { "edi", "rdi" }, 5 },
+ { { "esp", "rsp" }, 7 },
+ { { "ebp", "rbp" }, 6 },
+};
+
+// X86 target abstract base class; x86-32 and x86-64 are very close, so
+// most of the implementation can be shared.
+class X86TargetInfo : public TargetInfo {
+ enum X86SSEEnum {
+ NoSSE, SSE1, SSE2, SSE3, SSSE3, SSE41, SSE42, AVX, AVX2
+ } SSELevel;
+ enum MMX3DNowEnum {
+ NoMMX3DNow, MMX, AMD3DNow, AMD3DNowAthlon
+ } MMX3DNowLevel;
+
+ bool HasAES;
+ bool HasLZCNT;
+ bool HasBMI;
+ bool HasBMI2;
+ bool HasPOPCNT;
+ bool HasFMA4;
+
+ /// \brief Enumeration of all of the X86 CPUs supported by Clang.
+ ///
+ /// Each enumeration represents a particular CPU supported by Clang. These
+ /// loosely correspond to the options passed to '-march' or '-mtune' flags.
+ enum CPUKind {
+ CK_Generic,
+
+ /// \name i386
+ /// i386-generation processors.
+ //@{
+ CK_i386,
+ //@}
+
+ /// \name i486
+ /// i486-generation processors.
+ //@{
+ CK_i486,
+ CK_WinChipC6,
+ CK_WinChip2,
+ CK_C3,
+ //@}
+
+ /// \name i586
+ /// i586-generation processors, P5 microarchitecture based.
+ //@{
+ CK_i586,
+ CK_Pentium,
+ CK_PentiumMMX,
+ //@}
+
+ /// \name i686
+ /// i686-generation processors, P6 / Pentium M microarchitecture based.
+ //@{
+ CK_i686,
+ CK_PentiumPro,
+ CK_Pentium2,
+ CK_Pentium3,
+ CK_Pentium3M,
+ CK_PentiumM,
+ CK_C3_2,
+
+ /// This enumerator is a bit odd, as GCC no longer accepts -march=yonah.
+ /// Clang however has some logic to suport this.
+ // FIXME: Warn, deprecate, and potentially remove this.
+ CK_Yonah,
+ //@}
+
+ /// \name Netburst
+ /// Netburst microarchitecture based processors.
+ //@{
+ CK_Pentium4,
+ CK_Pentium4M,
+ CK_Prescott,
+ CK_Nocona,
+ //@}
+
+ /// \name Core
+ /// Core microarchitecture based processors.
+ //@{
+ CK_Core2,
+
+ /// This enumerator, like \see CK_Yonah, is a bit odd. It is another
+ /// codename which GCC no longer accepts as an option to -march, but Clang
+ /// has some logic for recognizing it.
+ // FIXME: Warn, deprecate, and potentially remove this.
+ CK_Penryn,
+ //@}
+
+ /// \name Atom
+ /// Atom processors
+ //@{
+ CK_Atom,
+ //@}
+
+ /// \name Nehalem
+ /// Nehalem microarchitecture based processors.
+ //@{
+ CK_Corei7,
+ CK_Corei7AVX,
+ CK_CoreAVXi,
+ CK_CoreAVX2,
+ //@}
+
+ /// \name K6
+ /// K6 architecture processors.
+ //@{
+ CK_K6,
+ CK_K6_2,
+ CK_K6_3,
+ //@}
+
+ /// \name K7
+ /// K7 architecture processors.
+ //@{
+ CK_Athlon,
+ CK_AthlonThunderbird,
+ CK_Athlon4,
+ CK_AthlonXP,
+ CK_AthlonMP,
+ //@}
+
+ /// \name K8
+ /// K8 architecture processors.
+ //@{
+ CK_Athlon64,
+ CK_Athlon64SSE3,
+ CK_AthlonFX,
+ CK_K8,
+ CK_K8SSE3,
+ CK_Opteron,
+ CK_OpteronSSE3,
+ CK_AMDFAM10,
+ //@}
+
+ /// \name Bobcat
+ /// Bobcat architecture processors.
+ //@{
+ CK_BTVER1,
+ //@}
+
+ /// \name Bulldozer
+ /// Bulldozer architecture processors.
+ //@{
+ CK_BDVER1,
+ CK_BDVER2,
+ //@}
+
+ /// This specification is deprecated and will be removed in the future.
+ /// Users should prefer \see CK_K8.
+ // FIXME: Warn on this when the CPU is set to it.
+ CK_x86_64,
+ //@}
+
+ /// \name Geode
+ /// Geode processors.
+ //@{
+ CK_Geode
+ //@}
+ } CPU;
+
+public:
+ X86TargetInfo(const std::string& triple)
+ : TargetInfo(triple), SSELevel(NoSSE), MMX3DNowLevel(NoMMX3DNow),
+ HasAES(false), HasLZCNT(false), HasBMI(false), HasBMI2(false),
+ HasPOPCNT(false), HasFMA4(false), CPU(CK_Generic) {
+ BigEndian = false;
+ LongDoubleFormat = &llvm::APFloat::x87DoubleExtended;
+ }
+ virtual unsigned getFloatEvalMethod() const {
+ // X87 evaluates with 80 bits "long double" precision.
+ return SSELevel == NoSSE ? 2 : 0;
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = BuiltinInfo;
+ NumRecords = clang::X86::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+ }
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = 0;
+ NumAliases = 0;
+ }
+ virtual void getGCCAddlRegNames(const AddlRegName *&Names,
+ unsigned &NumNames) const {
+ Names = AddlRegNames;
+ NumNames = llvm::array_lengthof(AddlRegNames);
+ }
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const;
+ virtual std::string convertConstraint(const char *&Constraint) const;
+ virtual const char *getClobbers() const {
+ return "~{dirflag},~{fpsr},~{flags}";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const;
+ virtual void getDefaultFeatures(llvm::StringMap<bool> &Features) const;
+ virtual bool hasFeature(StringRef Feature) const;
+ virtual void HandleTargetFeatures(std::vector<std::string> &Features);
+ virtual const char* getABI() const {
+ if (PointerWidth == 64 && SSELevel >= AVX)
+ return "avx";
+ else if (PointerWidth == 32 && MMX3DNowLevel == NoMMX3DNow)
+ return "no-mmx";
+ return "";
+ }
+ virtual bool setCPU(const std::string &Name) {
+ CPU = llvm::StringSwitch<CPUKind>(Name)
+ .Case("i386", CK_i386)
+ .Case("i486", CK_i486)
+ .Case("winchip-c6", CK_WinChipC6)
+ .Case("winchip2", CK_WinChip2)
+ .Case("c3", CK_C3)
+ .Case("i586", CK_i586)
+ .Case("pentium", CK_Pentium)
+ .Case("pentium-mmx", CK_PentiumMMX)
+ .Case("i686", CK_i686)
+ .Case("pentiumpro", CK_PentiumPro)
+ .Case("pentium2", CK_Pentium2)
+ .Case("pentium3", CK_Pentium3)
+ .Case("pentium3m", CK_Pentium3M)
+ .Case("pentium-m", CK_PentiumM)
+ .Case("c3-2", CK_C3_2)
+ .Case("yonah", CK_Yonah)
+ .Case("pentium4", CK_Pentium4)
+ .Case("pentium4m", CK_Pentium4M)
+ .Case("prescott", CK_Prescott)
+ .Case("nocona", CK_Nocona)
+ .Case("core2", CK_Core2)
+ .Case("penryn", CK_Penryn)
+ .Case("atom", CK_Atom)
+ .Case("corei7", CK_Corei7)
+ .Case("corei7-avx", CK_Corei7AVX)
+ .Case("core-avx-i", CK_CoreAVXi)
+ .Case("core-avx2", CK_CoreAVX2)
+ .Case("k6", CK_K6)
+ .Case("k6-2", CK_K6_2)
+ .Case("k6-3", CK_K6_3)
+ .Case("athlon", CK_Athlon)
+ .Case("athlon-tbird", CK_AthlonThunderbird)
+ .Case("athlon-4", CK_Athlon4)
+ .Case("athlon-xp", CK_AthlonXP)
+ .Case("athlon-mp", CK_AthlonMP)
+ .Case("athlon64", CK_Athlon64)
+ .Case("athlon64-sse3", CK_Athlon64SSE3)
+ .Case("athlon-fx", CK_AthlonFX)
+ .Case("k8", CK_K8)
+ .Case("k8-sse3", CK_K8SSE3)
+ .Case("opteron", CK_Opteron)
+ .Case("opteron-sse3", CK_OpteronSSE3)
+ .Case("amdfam10", CK_AMDFAM10)
+ .Case("btver1", CK_BTVER1)
+ .Case("bdver1", CK_BDVER1)
+ .Case("bdver2", CK_BDVER2)
+ .Case("x86-64", CK_x86_64)
+ .Case("geode", CK_Geode)
+ .Default(CK_Generic);
+
+ // Perform any per-CPU checks necessary to determine if this CPU is
+ // acceptable.
+ // FIXME: This results in terrible diagnostics. Clang just says the CPU is
+ // invalid without explaining *why*.
+ switch (CPU) {
+ case CK_Generic:
+ // No processor selected!
+ return false;
+
+ case CK_i386:
+ case CK_i486:
+ case CK_WinChipC6:
+ case CK_WinChip2:
+ case CK_C3:
+ case CK_i586:
+ case CK_Pentium:
+ case CK_PentiumMMX:
+ case CK_i686:
+ case CK_PentiumPro:
+ case CK_Pentium2:
+ case CK_Pentium3:
+ case CK_Pentium3M:
+ case CK_PentiumM:
+ case CK_Yonah:
+ case CK_C3_2:
+ case CK_Pentium4:
+ case CK_Pentium4M:
+ case CK_Prescott:
+ case CK_K6:
+ case CK_K6_2:
+ case CK_K6_3:
+ case CK_Athlon:
+ case CK_AthlonThunderbird:
+ case CK_Athlon4:
+ case CK_AthlonXP:
+ case CK_AthlonMP:
+ case CK_Geode:
+ // Only accept certain architectures when compiling in 32-bit mode.
+ if (PointerWidth != 32)
+ return false;
+
+ // Fallthrough
+ case CK_Nocona:
+ case CK_Core2:
+ case CK_Penryn:
+ case CK_Atom:
+ case CK_Corei7:
+ case CK_Corei7AVX:
+ case CK_CoreAVXi:
+ case CK_CoreAVX2:
+ case CK_Athlon64:
+ case CK_Athlon64SSE3:
+ case CK_AthlonFX:
+ case CK_K8:
+ case CK_K8SSE3:
+ case CK_Opteron:
+ case CK_OpteronSSE3:
+ case CK_AMDFAM10:
+ case CK_BTVER1:
+ case CK_BDVER1:
+ case CK_BDVER2:
+ case CK_x86_64:
+ return true;
+ }
+ llvm_unreachable("Unhandled CPU kind");
+ }
+};
+
+void X86TargetInfo::getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+ // FIXME: This should not be here.
+ Features["3dnow"] = false;
+ Features["3dnowa"] = false;
+ Features["mmx"] = false;
+ Features["sse"] = false;
+ Features["sse2"] = false;
+ Features["sse3"] = false;
+ Features["ssse3"] = false;
+ Features["sse41"] = false;
+ Features["sse42"] = false;
+ Features["sse4a"] = false;
+ Features["aes"] = false;
+ Features["avx"] = false;
+ Features["avx2"] = false;
+ Features["lzcnt"] = false;
+ Features["bmi"] = false;
+ Features["bmi2"] = false;
+ Features["popcnt"] = false;
+ Features["fma4"] = false;
+
+ // FIXME: This *really* should not be here.
+
+ // X86_64 always has SSE2.
+ if (PointerWidth == 64)
+ Features["sse2"] = Features["sse"] = Features["mmx"] = true;
+
+ switch (CPU) {
+ case CK_Generic:
+ case CK_i386:
+ case CK_i486:
+ case CK_i586:
+ case CK_Pentium:
+ case CK_i686:
+ case CK_PentiumPro:
+ break;
+ case CK_PentiumMMX:
+ case CK_Pentium2:
+ setFeatureEnabled(Features, "mmx", true);
+ break;
+ case CK_Pentium3:
+ case CK_Pentium3M:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse", true);
+ break;
+ case CK_PentiumM:
+ case CK_Pentium4:
+ case CK_Pentium4M:
+ case CK_x86_64:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse2", true);
+ break;
+ case CK_Yonah:
+ case CK_Prescott:
+ case CK_Nocona:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse3", true);
+ break;
+ case CK_Core2:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "ssse3", true);
+ break;
+ case CK_Penryn:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse4.1", true);
+ break;
+ case CK_Atom:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "ssse3", true);
+ break;
+ case CK_Corei7:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse4", true);
+ setFeatureEnabled(Features, "aes", true);
+ break;
+ case CK_Corei7AVX:
+ case CK_CoreAVXi:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse4", true);
+ setFeatureEnabled(Features, "aes", true);
+ //setFeatureEnabled(Features, "avx", true);
+ break;
+ case CK_CoreAVX2:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse4", true);
+ setFeatureEnabled(Features, "aes", true);
+ setFeatureEnabled(Features, "lzcnt", true);
+ setFeatureEnabled(Features, "bmi", true);
+ setFeatureEnabled(Features, "bmi2", true);
+ //setFeatureEnabled(Features, "avx2", true);
+ break;
+ case CK_K6:
+ case CK_WinChipC6:
+ setFeatureEnabled(Features, "mmx", true);
+ break;
+ case CK_K6_2:
+ case CK_K6_3:
+ case CK_WinChip2:
+ case CK_C3:
+ setFeatureEnabled(Features, "3dnow", true);
+ break;
+ case CK_Athlon:
+ case CK_AthlonThunderbird:
+ case CK_Geode:
+ setFeatureEnabled(Features, "3dnowa", true);
+ break;
+ case CK_Athlon4:
+ case CK_AthlonXP:
+ case CK_AthlonMP:
+ setFeatureEnabled(Features, "sse", true);
+ setFeatureEnabled(Features, "3dnowa", true);
+ break;
+ case CK_K8:
+ case CK_Opteron:
+ case CK_Athlon64:
+ case CK_AthlonFX:
+ setFeatureEnabled(Features, "sse2", true);
+ setFeatureEnabled(Features, "3dnowa", true);
+ break;
+ case CK_K8SSE3:
+ case CK_OpteronSSE3:
+ case CK_Athlon64SSE3:
+ setFeatureEnabled(Features, "sse3", true);
+ setFeatureEnabled(Features, "3dnowa", true);
+ break;
+ case CK_AMDFAM10:
+ setFeatureEnabled(Features, "sse3", true);
+ setFeatureEnabled(Features, "sse4a", true);
+ setFeatureEnabled(Features, "3dnowa", true);
+ break;
+ case CK_BTVER1:
+ setFeatureEnabled(Features, "ssse3", true);
+ setFeatureEnabled(Features, "sse4a", true);
+ case CK_BDVER1:
+ case CK_BDVER2:
+ setFeatureEnabled(Features, "sse4", true);
+ setFeatureEnabled(Features, "sse4a", true);
+ setFeatureEnabled(Features, "aes", true);
+ break;
+ case CK_C3_2:
+ setFeatureEnabled(Features, "mmx", true);
+ setFeatureEnabled(Features, "sse", true);
+ break;
+ }
+}
+
+bool X86TargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
+ // FIXME: This *really* should not be here. We need some way of translating
+ // options into llvm subtarget features.
+ if (!Features.count(Name) &&
+ (Name != "sse4" && Name != "sse4.2" && Name != "sse4.1"))
+ return false;
+
+ // FIXME: this should probably use a switch with fall through.
+
+ if (Enabled) {
+ if (Name == "mmx")
+ Features["mmx"] = true;
+ else if (Name == "sse")
+ Features["mmx"] = Features["sse"] = true;
+ else if (Name == "sse2")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = true;
+ else if (Name == "sse3")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ true;
+ else if (Name == "ssse3")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = true;
+ else if (Name == "sse4" || Name == "sse4.2")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = true;
+ else if (Name == "sse4.1")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = true;
+ else if (Name == "3dnow")
+ Features["mmx"] = Features["3dnow"] = true;
+ else if (Name == "3dnowa")
+ Features["mmx"] = Features["3dnow"] = Features["3dnowa"] = true;
+ else if (Name == "aes")
+ Features["aes"] = true;
+ else if (Name == "avx")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = Features["avx"] = true;
+ else if (Name == "avx2")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = Features["avx"] = Features["avx2"] = true;
+ else if (Name == "fma4")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["popcnt"] = Features["avx"] = Features["fma4"] = true;
+ else if (Name == "sse4a")
+ Features["mmx"] = Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["lzcnt"] = Features["popcnt"] = Features["sse4a"] = true;
+ else if (Name == "lzcnt")
+ Features["lzcnt"] = true;
+ else if (Name == "bmi")
+ Features["bmi"] = true;
+ else if (Name == "bmi2")
+ Features["bmi2"] = true;
+ else if (Name == "popcnt")
+ Features["popcnt"] = true;
+ } else {
+ if (Name == "mmx")
+ Features["mmx"] = Features["3dnow"] = Features["3dnowa"] = false;
+ else if (Name == "sse")
+ Features["sse"] = Features["sse2"] = Features["sse3"] =
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] =
+ Features["sse4a"] = false;
+ else if (Name == "sse2")
+ Features["sse2"] = Features["sse3"] = Features["ssse3"] =
+ Features["sse41"] = Features["sse42"] = Features["sse4a"] = false;
+ else if (Name == "sse3")
+ Features["sse3"] = Features["ssse3"] = Features["sse41"] =
+ Features["sse42"] = Features["sse4a"] = false;
+ else if (Name == "ssse3")
+ Features["ssse3"] = Features["sse41"] = Features["sse42"] = false;
+ else if (Name == "sse4" || Name == "sse4.1")
+ Features["sse41"] = Features["sse42"] = false;
+ else if (Name == "sse4.2")
+ Features["sse42"] = false;
+ else if (Name == "3dnow")
+ Features["3dnow"] = Features["3dnowa"] = false;
+ else if (Name == "3dnowa")
+ Features["3dnowa"] = false;
+ else if (Name == "aes")
+ Features["aes"] = false;
+ else if (Name == "avx")
+ Features["avx"] = Features["avx2"] = Features["fma4"] = false;
+ else if (Name == "avx2")
+ Features["avx2"] = false;
+ else if (Name == "sse4a")
+ Features["sse4a"] = false;
+ else if (Name == "lzcnt")
+ Features["lzcnt"] = false;
+ else if (Name == "bmi")
+ Features["bmi"] = false;
+ else if (Name == "bmi2")
+ Features["bmi2"] = false;
+ else if (Name == "popcnt")
+ Features["popcnt"] = false;
+ else if (Name == "fma4")
+ Features["fma4"] = false;
+ }
+
+ return true;
+}
+
+/// HandleTargetOptions - Perform initialization based on the user
+/// configured set of features.
+void X86TargetInfo::HandleTargetFeatures(std::vector<std::string> &Features) {
+ // Remember the maximum enabled sselevel.
+ for (unsigned i = 0, e = Features.size(); i !=e; ++i) {
+ // Ignore disabled features.
+ if (Features[i][0] == '-')
+ continue;
+
+ StringRef Feature = StringRef(Features[i]).substr(1);
+
+ if (Feature == "aes") {
+ HasAES = true;
+ continue;
+ }
+
+ if (Feature == "lzcnt") {
+ HasLZCNT = true;
+ continue;
+ }
+
+ if (Feature == "bmi") {
+ HasBMI = true;
+ continue;
+ }
+
+ if (Feature == "bmi2") {
+ HasBMI2 = true;
+ continue;
+ }
+
+ if (Feature == "popcnt") {
+ HasPOPCNT = true;
+ continue;
+ }
+
+ if (Feature == "fma4") {
+ HasFMA4 = true;
+ continue;
+ }
+
+ assert(Features[i][0] == '+' && "Invalid target feature!");
+ X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
+ .Case("avx2", AVX2)
+ .Case("avx", AVX)
+ .Case("sse42", SSE42)
+ .Case("sse41", SSE41)
+ .Case("ssse3", SSSE3)
+ .Case("sse3", SSE3)
+ .Case("sse2", SSE2)
+ .Case("sse", SSE1)
+ .Default(NoSSE);
+ SSELevel = std::max(SSELevel, Level);
+
+ MMX3DNowEnum ThreeDNowLevel =
+ llvm::StringSwitch<MMX3DNowEnum>(Feature)
+ .Case("3dnowa", AMD3DNowAthlon)
+ .Case("3dnow", AMD3DNow)
+ .Case("mmx", MMX)
+ .Default(NoMMX3DNow);
+
+ MMX3DNowLevel = std::max(MMX3DNowLevel, ThreeDNowLevel);
+ }
+
+ // Don't tell the backend if we're turning off mmx; it will end up disabling
+ // SSE, which we don't want.
+ std::vector<std::string>::iterator it;
+ it = std::find(Features.begin(), Features.end(), "-mmx");
+ if (it != Features.end())
+ Features.erase(it);
+}
+
+/// X86TargetInfo::getTargetDefines - Return the set of the X86-specific macro
+/// definitions for this particular subtarget.
+void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Target identification.
+ if (PointerWidth == 64) {
+ if (getLongWidth() == 64) {
+ Builder.defineMacro("_LP64");
+ Builder.defineMacro("__LP64__");
+ }
+ Builder.defineMacro("__amd64__");
+ Builder.defineMacro("__amd64");
+ Builder.defineMacro("__x86_64");
+ Builder.defineMacro("__x86_64__");
+ } else {
+ DefineStd(Builder, "i386", Opts);
+ }
+
+ // Subtarget options.
+ // FIXME: We are hard-coding the tune parameters based on the CPU, but they
+ // truly should be based on -mtune options.
+ switch (CPU) {
+ case CK_Generic:
+ break;
+ case CK_i386:
+ // The rest are coming from the i386 define above.
+ Builder.defineMacro("__tune_i386__");
+ break;
+ case CK_i486:
+ case CK_WinChipC6:
+ case CK_WinChip2:
+ case CK_C3:
+ defineCPUMacros(Builder, "i486");
+ break;
+ case CK_PentiumMMX:
+ Builder.defineMacro("__pentium_mmx__");
+ Builder.defineMacro("__tune_pentium_mmx__");
+ // Fallthrough
+ case CK_i586:
+ case CK_Pentium:
+ defineCPUMacros(Builder, "i586");
+ defineCPUMacros(Builder, "pentium");
+ break;
+ case CK_Pentium3:
+ case CK_Pentium3M:
+ case CK_PentiumM:
+ Builder.defineMacro("__tune_pentium3__");
+ // Fallthrough
+ case CK_Pentium2:
+ case CK_C3_2:
+ Builder.defineMacro("__tune_pentium2__");
+ // Fallthrough
+ case CK_PentiumPro:
+ Builder.defineMacro("__tune_i686__");
+ Builder.defineMacro("__tune_pentiumpro__");
+ // Fallthrough
+ case CK_i686:
+ Builder.defineMacro("__i686");
+ Builder.defineMacro("__i686__");
+ // Strangely, __tune_i686__ isn't defined by GCC when CPU == i686.
+ Builder.defineMacro("__pentiumpro");
+ Builder.defineMacro("__pentiumpro__");
+ break;
+ case CK_Pentium4:
+ case CK_Pentium4M:
+ defineCPUMacros(Builder, "pentium4");
+ break;
+ case CK_Yonah:
+ case CK_Prescott:
+ case CK_Nocona:
+ defineCPUMacros(Builder, "nocona");
+ break;
+ case CK_Core2:
+ case CK_Penryn:
+ defineCPUMacros(Builder, "core2");
+ break;
+ case CK_Atom:
+ defineCPUMacros(Builder, "atom");
+ break;
+ case CK_Corei7:
+ case CK_Corei7AVX:
+ case CK_CoreAVXi:
+ case CK_CoreAVX2:
+ defineCPUMacros(Builder, "corei7");
+ break;
+ case CK_K6_2:
+ Builder.defineMacro("__k6_2__");
+ Builder.defineMacro("__tune_k6_2__");
+ // Fallthrough
+ case CK_K6_3:
+ if (CPU != CK_K6_2) { // In case of fallthrough
+ // FIXME: GCC may be enabling these in cases where some other k6
+ // architecture is specified but -m3dnow is explicitly provided. The
+ // exact semantics need to be determined and emulated here.
+ Builder.defineMacro("__k6_3__");
+ Builder.defineMacro("__tune_k6_3__");
+ }
+ // Fallthrough
+ case CK_K6:
+ defineCPUMacros(Builder, "k6");
+ break;
+ case CK_Athlon:
+ case CK_AthlonThunderbird:
+ case CK_Athlon4:
+ case CK_AthlonXP:
+ case CK_AthlonMP:
+ defineCPUMacros(Builder, "athlon");
+ if (SSELevel != NoSSE) {
+ Builder.defineMacro("__athlon_sse__");
+ Builder.defineMacro("__tune_athlon_sse__");
+ }
+ break;
+ case CK_K8:
+ case CK_K8SSE3:
+ case CK_x86_64:
+ case CK_Opteron:
+ case CK_OpteronSSE3:
+ case CK_Athlon64:
+ case CK_Athlon64SSE3:
+ case CK_AthlonFX:
+ defineCPUMacros(Builder, "k8");
+ break;
+ case CK_AMDFAM10:
+ defineCPUMacros(Builder, "amdfam10");
+ break;
+ case CK_BTVER1:
+ defineCPUMacros(Builder, "btver1");
+ break;
+ case CK_BDVER1:
+ defineCPUMacros(Builder, "bdver1");
+ break;
+ case CK_BDVER2:
+ defineCPUMacros(Builder, "bdver2");
+ break;
+ case CK_Geode:
+ defineCPUMacros(Builder, "geode");
+ break;
+ }
+
+ // Target properties.
+ Builder.defineMacro("__LITTLE_ENDIAN__");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ // Define __NO_MATH_INLINES on linux/x86 so that we don't get inline
+ // functions in glibc header files that use FP Stack inline asm which the
+ // backend can't deal with (PR879).
+ Builder.defineMacro("__NO_MATH_INLINES");
+
+ if (HasAES)
+ Builder.defineMacro("__AES__");
+
+ if (HasLZCNT)
+ Builder.defineMacro("__LZCNT__");
+
+ if (HasBMI)
+ Builder.defineMacro("__BMI__");
+
+ if (HasBMI2)
+ Builder.defineMacro("__BMI2__");
+
+ if (HasPOPCNT)
+ Builder.defineMacro("__POPCNT__");
+
+ if (HasFMA4)
+ Builder.defineMacro("__FMA4__");
+
+ // Each case falls through to the previous one here.
+ switch (SSELevel) {
+ case AVX2:
+ Builder.defineMacro("__AVX2__");
+ case AVX:
+ Builder.defineMacro("__AVX__");
+ case SSE42:
+ Builder.defineMacro("__SSE4_2__");
+ case SSE41:
+ Builder.defineMacro("__SSE4_1__");
+ case SSSE3:
+ Builder.defineMacro("__SSSE3__");
+ case SSE3:
+ Builder.defineMacro("__SSE3__");
+ case SSE2:
+ Builder.defineMacro("__SSE2__");
+ Builder.defineMacro("__SSE2_MATH__"); // -mfp-math=sse always implied.
+ case SSE1:
+ Builder.defineMacro("__SSE__");
+ Builder.defineMacro("__SSE_MATH__"); // -mfp-math=sse always implied.
+ case NoSSE:
+ break;
+ }
+
+ if (Opts.MicrosoftExt && PointerWidth == 32) {
+ switch (SSELevel) {
+ case AVX2:
+ case AVX:
+ case SSE42:
+ case SSE41:
+ case SSSE3:
+ case SSE3:
+ case SSE2:
+ Builder.defineMacro("_M_IX86_FP", Twine(2));
+ break;
+ case SSE1:
+ Builder.defineMacro("_M_IX86_FP", Twine(1));
+ break;
+ default:
+ Builder.defineMacro("_M_IX86_FP", Twine(0));
+ }
+ }
+
+ // Each case falls through to the previous one here.
+ switch (MMX3DNowLevel) {
+ case AMD3DNowAthlon:
+ Builder.defineMacro("__3dNOW_A__");
+ case AMD3DNow:
+ Builder.defineMacro("__3dNOW__");
+ case MMX:
+ Builder.defineMacro("__MMX__");
+ case NoMMX3DNow:
+ break;
+ }
+}
+
+bool X86TargetInfo::hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("aes", HasAES)
+ .Case("avx", SSELevel >= AVX)
+ .Case("avx2", SSELevel >= AVX2)
+ .Case("bmi", HasBMI)
+ .Case("bmi2", HasBMI2)
+ .Case("fma4", HasFMA4)
+ .Case("lzcnt", HasLZCNT)
+ .Case("mm3dnow", MMX3DNowLevel >= AMD3DNow)
+ .Case("mm3dnowa", MMX3DNowLevel >= AMD3DNowAthlon)
+ .Case("mmx", MMX3DNowLevel >= MMX)
+ .Case("popcnt", HasPOPCNT)
+ .Case("sse", SSELevel >= SSE1)
+ .Case("sse2", SSELevel >= SSE2)
+ .Case("sse3", SSELevel >= SSE3)
+ .Case("ssse3", SSELevel >= SSSE3)
+ .Case("sse41", SSELevel >= SSE41)
+ .Case("sse42", SSELevel >= SSE42)
+ .Case("x86", true)
+ .Case("x86_32", PointerWidth == 32)
+ .Case("x86_64", PointerWidth == 64)
+ .Default(false);
+}
+
+bool
+X86TargetInfo::validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default: return false;
+ case 'Y': // first letter of a pair:
+ switch (*(Name+1)) {
+ default: return false;
+ case '0': // First SSE register.
+ case 't': // Any SSE register, when SSE2 is enabled.
+ case 'i': // Any SSE register, when SSE2 and inter-unit moves enabled.
+ case 'm': // any MMX register, when inter-unit moves enabled.
+ break; // falls through to setAllowsRegister.
+ }
+ case 'a': // eax.
+ case 'b': // ebx.
+ case 'c': // ecx.
+ case 'd': // edx.
+ case 'S': // esi.
+ case 'D': // edi.
+ case 'A': // edx:eax.
+ case 'f': // any x87 floating point stack register.
+ case 't': // top of floating point stack.
+ case 'u': // second from top of floating point stack.
+ case 'q': // Any register accessible as [r]l: a, b, c, and d.
+ case 'y': // Any MMX register.
+ case 'x': // Any SSE register.
+ case 'Q': // Any register accessible as [r]h: a, b, c, and d.
+ case 'R': // "Legacy" registers: ax, bx, cx, dx, di, si, sp, bp.
+ case 'l': // "Index" registers: any general register that can be used as an
+ // index in a base+index memory access.
+ Info.setAllowsRegister();
+ return true;
+ case 'C': // SSE floating point constant.
+ case 'G': // x87 floating point constant.
+ case 'e': // 32-bit signed integer constant for use with zero-extending
+ // x86_64 instructions.
+ case 'Z': // 32-bit unsigned integer constant for use with zero-extending
+ // x86_64 instructions.
+ return true;
+ }
+}
+
+
+std::string
+X86TargetInfo::convertConstraint(const char *&Constraint) const {
+ switch (*Constraint) {
+ case 'a': return std::string("{ax}");
+ case 'b': return std::string("{bx}");
+ case 'c': return std::string("{cx}");
+ case 'd': return std::string("{dx}");
+ case 'S': return std::string("{si}");
+ case 'D': return std::string("{di}");
+ case 'p': // address
+ return std::string("im");
+ case 't': // top of floating point stack.
+ return std::string("{st}");
+ case 'u': // second from top of floating point stack.
+ return std::string("{st(1)}"); // second from top of floating point stack.
+ default:
+ return std::string(1, *Constraint);
+ }
+}
+} // end anonymous namespace
+
+namespace {
+// X86-32 generic target
+class X86_32TargetInfo : public X86TargetInfo {
+public:
+ X86_32TargetInfo(const std::string& triple) : X86TargetInfo(triple) {
+ DoubleAlign = LongLongAlign = 32;
+ LongDoubleWidth = 96;
+ LongDoubleAlign = 32;
+ SuitableAlign = 128;
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-"
+ "a0:0:64-f80:32:32-n8:16:32-S128";
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ RegParmMax = 3;
+
+ // Use fpret for all types.
+ RealTypeUsesObjCFPRet = ((1 << TargetInfo::Float) |
+ (1 << TargetInfo::Double) |
+ (1 << TargetInfo::LongDouble));
+
+ // x86-32 has atomics up to 8 bytes
+ // FIXME: Check that we actually have cmpxchg8b before setting
+ // MaxAtomicInlineWidth. (cmpxchg8b is an i586 instruction.)
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const {
+ if (RegNo == 0) return 0;
+ if (RegNo == 1) return 2;
+ return -1;
+ }
+};
+} // end anonymous namespace
+
+namespace {
+class NetBSDI386TargetInfo : public NetBSDTargetInfo<X86_32TargetInfo> {
+public:
+ NetBSDI386TargetInfo(const std::string &triple) :
+ NetBSDTargetInfo<X86_32TargetInfo>(triple) {
+ }
+
+ virtual unsigned getFloatEvalMethod() const {
+ // NetBSD defaults to "double" rounding
+ return 1;
+ }
+};
+} // end anonymous namespace
+
+namespace {
+class OpenBSDI386TargetInfo : public OpenBSDTargetInfo<X86_32TargetInfo> {
+public:
+ OpenBSDI386TargetInfo(const std::string& triple) :
+ OpenBSDTargetInfo<X86_32TargetInfo>(triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ }
+};
+} // end anonymous namespace
+
+namespace {
+class DarwinI386TargetInfo : public DarwinTargetInfo<X86_32TargetInfo> {
+public:
+ DarwinI386TargetInfo(const std::string& triple) :
+ DarwinTargetInfo<X86_32TargetInfo>(triple) {
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ SuitableAlign = 128;
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-"
+ "a0:0:64-f80:128:128-n8:16:32-S128";
+ HasAlignMac68kSupport = true;
+ }
+
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 Windows target
+class WindowsX86_32TargetInfo : public WindowsTargetInfo<X86_32TargetInfo> {
+public:
+ WindowsX86_32TargetInfo(const std::string& triple)
+ : WindowsTargetInfo<X86_32TargetInfo>(triple) {
+ TLSSupported = false;
+ WCharType = UnsignedShort;
+ DoubleAlign = LongLongAlign = 64;
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f80:128:128-v64:64:64-"
+ "v128:128:128-a0:0:64-f80:32:32-n8:16:32-S32";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ WindowsTargetInfo<X86_32TargetInfo>::getTargetDefines(Opts, Builder);
+ }
+};
+} // end anonymous namespace
+
+namespace {
+
+// x86-32 Windows Visual Studio target
+class VisualStudioWindowsX86_32TargetInfo : public WindowsX86_32TargetInfo {
+public:
+ VisualStudioWindowsX86_32TargetInfo(const std::string& triple)
+ : WindowsX86_32TargetInfo(triple) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
+ WindowsX86_32TargetInfo::getVisualStudioDefines(Opts, Builder);
+ // The value of the following reflects processor type.
+ // 300=386, 400=486, 500=Pentium, 600=Blend (default)
+ // We lost the original triple, so we use the default.
+ Builder.defineMacro("_M_IX86", "600");
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 MinGW target
+class MinGWX86_32TargetInfo : public WindowsX86_32TargetInfo {
+public:
+ MinGWX86_32TargetInfo(const std::string& triple)
+ : WindowsX86_32TargetInfo(triple) {
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ WindowsX86_32TargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "WIN32", Opts);
+ DefineStd(Builder, "WINNT", Opts);
+ Builder.defineMacro("_X86_");
+ Builder.defineMacro("__MSVCRT__");
+ Builder.defineMacro("__MINGW32__");
+
+ // mingw32-gcc provides __declspec(a) as alias of __attribute__((a)).
+ // In contrast, clang-cc1 provides __declspec(a) with -fms-extensions.
+ if (Opts.MicrosoftExt)
+ // Provide "as-is" __declspec.
+ Builder.defineMacro("__declspec", "__declspec");
+ else
+ // Provide alias of __attribute__ like mingw32-gcc.
+ Builder.defineMacro("__declspec(a)", "__attribute__((a))");
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 Cygwin target
+class CygwinX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ CygwinX86_32TargetInfo(const std::string& triple)
+ : X86_32TargetInfo(triple) {
+ TLSSupported = false;
+ WCharType = UnsignedShort;
+ DoubleAlign = LongLongAlign = 64;
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-"
+ "a0:0:64-f80:32:32-n8:16:32-S32";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__CYGWIN__");
+ Builder.defineMacro("__CYGWIN32__");
+ DefineStd(Builder, "unix", Opts);
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-32 Haiku target
+class HaikuX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ HaikuX86_32TargetInfo(const std::string& triple)
+ : X86_32TargetInfo(triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ this->UserLabelPrefix = "";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__INTEL__");
+ Builder.defineMacro("__HAIKU__");
+ }
+};
+} // end anonymous namespace
+
+// RTEMS Target
+template<typename Target>
+class RTEMSTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // RTEMS defines; list based off of gcc output
+
+ Builder.defineMacro("__rtems__");
+ Builder.defineMacro("__ELF__");
+ }
+public:
+ RTEMSTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+
+ llvm::Triple Triple(triple);
+ switch (Triple.getArch()) {
+ default:
+ case llvm::Triple::x86:
+ // this->MCountName = ".mcount";
+ break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ // this->MCountName = "_mcount";
+ break;
+ case llvm::Triple::arm:
+ // this->MCountName = "__mcount";
+ break;
+ }
+
+ }
+};
+
+namespace {
+// x86-32 RTEMS target
+class RTEMSX86_32TargetInfo : public X86_32TargetInfo {
+public:
+ RTEMSX86_32TargetInfo(const std::string& triple)
+ : X86_32TargetInfo(triple) {
+ SizeType = UnsignedLong;
+ IntPtrType = SignedLong;
+ PtrDiffType = SignedLong;
+ this->UserLabelPrefix = "";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ X86_32TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("__INTEL__");
+ Builder.defineMacro("__rtems__");
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 generic target
+class X86_64TargetInfo : public X86TargetInfo {
+public:
+ X86_64TargetInfo(const std::string &triple) : X86TargetInfo(triple) {
+ LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ LargeArrayMinWidth = 128;
+ LargeArrayAlign = 128;
+ SuitableAlign = 128;
+ IntMaxType = SignedLong;
+ UIntMaxType = UnsignedLong;
+ Int64Type = SignedLong;
+ RegParmMax = 6;
+
+ DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-"
+ "a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128";
+
+ // Use fpret only for long double.
+ RealTypeUsesObjCFPRet = (1 << TargetInfo::LongDouble);
+
+ // Use fp2ret for _Complex long double.
+ ComplexLongDoubleUsesFP2Ret = true;
+
+ // x86-64 has atomics up to 16 bytes.
+ // FIXME: Once the backend is fixed, increase MaxAtomicInlineWidth to 128
+ // on CPUs with cmpxchg16b
+ MaxAtomicPromoteWidth = 128;
+ MaxAtomicInlineWidth = 64;
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef struct __va_list_tag {"
+ " unsigned gp_offset;"
+ " unsigned fp_offset;"
+ " void* overflow_arg_area;"
+ " void* reg_save_area;"
+ "} __va_list_tag;"
+ "typedef __va_list_tag __builtin_va_list[1];";
+ }
+
+ int getEHDataRegisterNumber(unsigned RegNo) const {
+ if (RegNo == 0) return 0;
+ if (RegNo == 1) return 1;
+ return -1;
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 Windows target
+class WindowsX86_64TargetInfo : public WindowsTargetInfo<X86_64TargetInfo> {
+public:
+ WindowsX86_64TargetInfo(const std::string& triple)
+ : WindowsTargetInfo<X86_64TargetInfo>(triple) {
+ TLSSupported = false;
+ WCharType = UnsignedShort;
+ LongWidth = LongAlign = 32;
+ DoubleAlign = LongLongAlign = 64;
+ IntMaxType = SignedLongLong;
+ UIntMaxType = UnsignedLongLong;
+ Int64Type = SignedLongLong;
+ SizeType = UnsignedLongLong;
+ PtrDiffType = SignedLongLong;
+ IntPtrType = SignedLongLong;
+ this->UserLabelPrefix = "";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ WindowsTargetInfo<X86_64TargetInfo>::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("_WIN64");
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 Windows Visual Studio target
+class VisualStudioWindowsX86_64TargetInfo : public WindowsX86_64TargetInfo {
+public:
+ VisualStudioWindowsX86_64TargetInfo(const std::string& triple)
+ : WindowsX86_64TargetInfo(triple) {
+ LongDoubleWidth = LongDoubleAlign = 64;
+ LongDoubleFormat = &llvm::APFloat::IEEEdouble;
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
+ WindowsX86_64TargetInfo::getVisualStudioDefines(Opts, Builder);
+ Builder.defineMacro("_M_X64");
+ Builder.defineMacro("_M_AMD64");
+ }
+};
+} // end anonymous namespace
+
+namespace {
+// x86-64 MinGW target
+class MinGWX86_64TargetInfo : public WindowsX86_64TargetInfo {
+public:
+ MinGWX86_64TargetInfo(const std::string& triple)
+ : WindowsX86_64TargetInfo(triple) {
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ WindowsX86_64TargetInfo::getTargetDefines(Opts, Builder);
+ DefineStd(Builder, "WIN64", Opts);
+ Builder.defineMacro("__MSVCRT__");
+ Builder.defineMacro("__MINGW32__");
+ Builder.defineMacro("__MINGW64__");
+
+ // mingw32-gcc provides __declspec(a) as alias of __attribute__((a)).
+ // In contrast, clang-cc1 provides __declspec(a) with -fms-extensions.
+ if (Opts.MicrosoftExt)
+ // Provide "as-is" __declspec.
+ Builder.defineMacro("__declspec", "__declspec");
+ else
+ // Provide alias of __attribute__ like mingw32-gcc.
+ Builder.defineMacro("__declspec(a)", "__attribute__((a))");
+ }
+};
+} // end anonymous namespace
+
+namespace {
+class DarwinX86_64TargetInfo : public DarwinTargetInfo<X86_64TargetInfo> {
+public:
+ DarwinX86_64TargetInfo(const std::string& triple)
+ : DarwinTargetInfo<X86_64TargetInfo>(triple) {
+ Int64Type = SignedLongLong;
+ }
+};
+} // end anonymous namespace
+
+namespace {
+class OpenBSDX86_64TargetInfo : public OpenBSDTargetInfo<X86_64TargetInfo> {
+public:
+ OpenBSDX86_64TargetInfo(const std::string& triple)
+ : OpenBSDTargetInfo<X86_64TargetInfo>(triple) {
+ IntMaxType = SignedLongLong;
+ UIntMaxType = UnsignedLongLong;
+ Int64Type = SignedLongLong;
+ }
+};
+} // end anonymous namespace
+
+namespace {
+class ARMTargetInfo : public TargetInfo {
+ // Possible FPU choices.
+ enum FPUMode {
+ NoFPU,
+ VFP2FPU,
+ VFP3FPU,
+ NeonFPU
+ };
+
+ static bool FPUModeIsVFP(FPUMode Mode) {
+ return Mode >= VFP2FPU && Mode <= NeonFPU;
+ }
+
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ static const char * const GCCRegNames[];
+
+ std::string ABI, CPU;
+
+ unsigned FPU : 3;
+
+ unsigned IsThumb : 1;
+
+ // Initialized via features.
+ unsigned SoftFloat : 1;
+ unsigned SoftFloatABI : 1;
+
+ static const Builtin::Info BuiltinInfo[];
+
+public:
+ ARMTargetInfo(const std::string &TripleStr)
+ : TargetInfo(TripleStr), ABI("aapcs-linux"), CPU("arm1136j-s")
+ {
+ BigEndian = false;
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ // AAPCS 7.1.1, ARM-Linux ABI 2.4: type of wchar_t is unsigned int.
+ WCharType = UnsignedInt;
+
+ // {} in inline assembly are neon specifiers, not assembly variant
+ // specifiers.
+ NoAsmVariants = true;
+
+ // FIXME: Should we just treat this as a feature?
+ IsThumb = getTriple().getArchName().startswith("thumb");
+ if (IsThumb) {
+ // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
+ // so set preferred for small types to 32.
+ DescriptionString = ("e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-"
+ "v64:64:64-v128:64:128-a0:0:32-n32-S64");
+ } else {
+ DescriptionString = ("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-"
+ "v64:64:64-v128:64:128-a0:0:64-n32-S64");
+ }
+
+ // ARM targets default to using the ARM C++ ABI.
+ CXXABI = CXXABI_ARM;
+
+ // ARM has atomics up to 8 bytes
+ // FIXME: Set MaxAtomicInlineWidth if we have the feature v6e
+ MaxAtomicPromoteWidth = 64;
+
+ // Do force alignment of members that follow zero length bitfields. If
+ // the alignment of the zero-length bitfield is greater than the member
+ // that follows it, `bar', `bar' will be aligned as the type of the
+ // zero length bitfield.
+ UseZeroLengthBitfieldAlignment = true;
+ }
+ virtual const char *getABI() const { return ABI.c_str(); }
+ virtual bool setABI(const std::string &Name) {
+ ABI = Name;
+
+ // The defaults (above) are for AAPCS, check if we need to change them.
+ //
+ // FIXME: We need support for -meabi... we could just mangle it into the
+ // name.
+ if (Name == "apcs-gnu") {
+ DoubleAlign = LongLongAlign = LongDoubleAlign = SuitableAlign = 32;
+ SizeType = UnsignedLong;
+
+ // Revert to using SignedInt on apcs-gnu to comply with existing behaviour.
+ WCharType = SignedInt;
+
+ // Do not respect the alignment of bit-field types when laying out
+ // structures. This corresponds to PCC_BITFIELD_TYPE_MATTERS in gcc.
+ UseBitFieldTypeAlignment = false;
+
+ /// gcc forces the alignment to 4 bytes, regardless of the type of the
+ /// zero length bitfield. This corresponds to EMPTY_FIELD_BOUNDARY in
+ /// gcc.
+ ZeroLengthBitfieldBoundary = 32;
+
+ if (IsThumb) {
+ // Thumb1 add sp, #imm requires the immediate value be multiple of 4,
+ // so set preferred for small types to 32.
+ DescriptionString = ("e-p:32:32:32-i1:8:32-i8:8:32-i16:16:32-i32:32:32-"
+ "i64:32:64-f32:32:32-f64:32:64-"
+ "v64:32:64-v128:32:128-a0:0:32-n32-S32");
+ } else {
+ DescriptionString = ("e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:32:64-f32:32:32-f64:32:64-"
+ "v64:32:64-v128:32:128-a0:0:32-n32-S32");
+ }
+
+ // FIXME: Override "preferred align" for double and long long.
+ } else if (Name == "aapcs") {
+ // FIXME: Enumerated types are variable width in straight AAPCS.
+ } else if (Name == "aapcs-linux") {
+ ;
+ } else
+ return false;
+
+ return true;
+ }
+
+ void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+ if (CPU == "arm1136jf-s" || CPU == "arm1176jzf-s" || CPU == "mpcore")
+ Features["vfp2"] = true;
+ else if (CPU == "cortex-a8" || CPU == "cortex-a9")
+ Features["neon"] = true;
+ }
+
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
+ if (Name == "soft-float" || Name == "soft-float-abi" ||
+ Name == "vfp2" || Name == "vfp3" || Name == "neon" || Name == "d16" ||
+ Name == "neonfp") {
+ Features[Name] = Enabled;
+ } else
+ return false;
+
+ return true;
+ }
+
+ virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
+ FPU = NoFPU;
+ SoftFloat = SoftFloatABI = false;
+ for (unsigned i = 0, e = Features.size(); i != e; ++i) {
+ if (Features[i] == "+soft-float")
+ SoftFloat = true;
+ else if (Features[i] == "+soft-float-abi")
+ SoftFloatABI = true;
+ else if (Features[i] == "+vfp2")
+ FPU = VFP2FPU;
+ else if (Features[i] == "+vfp3")
+ FPU = VFP3FPU;
+ else if (Features[i] == "+neon")
+ FPU = NeonFPU;
+ }
+
+ // Remove front-end specific options which the backend handles differently.
+ std::vector<std::string>::iterator it;
+ it = std::find(Features.begin(), Features.end(), "+soft-float");
+ if (it != Features.end())
+ Features.erase(it);
+ it = std::find(Features.begin(), Features.end(), "+soft-float-abi");
+ if (it != Features.end())
+ Features.erase(it);
+ }
+
+ virtual bool hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("arm", true)
+ .Case("softfloat", SoftFloat)
+ .Case("thumb", IsThumb)
+ .Case("neon", FPU == NeonFPU && !SoftFloat &&
+ StringRef(getCPUDefineSuffix(CPU)).startswith("7"))
+ .Default(false);
+ }
+ static const char *getCPUDefineSuffix(StringRef Name) {
+ return llvm::StringSwitch<const char*>(Name)
+ .Cases("arm8", "arm810", "4")
+ .Cases("strongarm", "strongarm110", "strongarm1100", "strongarm1110", "4")
+ .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "arm720t", "arm9", "4T")
+ .Cases("arm9tdmi", "arm920", "arm920t", "arm922t", "arm940t", "4T")
+ .Case("ep9312", "4T")
+ .Cases("arm10tdmi", "arm1020t", "5T")
+ .Cases("arm9e", "arm946e-s", "arm966e-s", "arm968e-s", "5TE")
+ .Case("arm926ej-s", "5TEJ")
+ .Cases("arm10e", "arm1020e", "arm1022e", "5TE")
+ .Cases("xscale", "iwmmxt", "5TE")
+ .Case("arm1136j-s", "6J")
+ .Cases("arm1176jz-s", "arm1176jzf-s", "6ZK")
+ .Cases("arm1136jf-s", "mpcorenovfp", "mpcore", "6K")
+ .Cases("arm1156t2-s", "arm1156t2f-s", "6T2")
+ .Cases("cortex-a8", "cortex-a9", "7A")
+ .Case("cortex-m3", "7M")
+ .Case("cortex-m4", "7M")
+ .Case("cortex-m0", "6M")
+ .Default(0);
+ }
+ virtual bool setCPU(const std::string &Name) {
+ if (!getCPUDefineSuffix(Name))
+ return false;
+
+ CPU = Name;
+ return true;
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Target identification.
+ Builder.defineMacro("__arm");
+ Builder.defineMacro("__arm__");
+
+ // Target properties.
+ Builder.defineMacro("__ARMEL__");
+ Builder.defineMacro("__LITTLE_ENDIAN__");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ StringRef CPUArch = getCPUDefineSuffix(CPU);
+ Builder.defineMacro("__ARM_ARCH_" + CPUArch + "__");
+
+ // Subtarget options.
+
+ // FIXME: It's more complicated than this and we don't really support
+ // interworking.
+ if ('5' <= CPUArch[0] && CPUArch[0] <= '7')
+ Builder.defineMacro("__THUMB_INTERWORK__");
+
+ if (ABI == "aapcs" || ABI == "aapcs-linux")
+ Builder.defineMacro("__ARM_EABI__");
+
+ if (SoftFloat)
+ Builder.defineMacro("__SOFTFP__");
+
+ if (CPU == "xscale")
+ Builder.defineMacro("__XSCALE__");
+
+ bool IsARMv7 = CPUArch.startswith("7");
+ if (IsThumb) {
+ Builder.defineMacro("__THUMBEL__");
+ Builder.defineMacro("__thumb__");
+ if (CPUArch == "6T2" || IsARMv7)
+ Builder.defineMacro("__thumb2__");
+ }
+
+ // Note, this is always on in gcc, even though it doesn't make sense.
+ Builder.defineMacro("__APCS_32__");
+
+ if (FPUModeIsVFP((FPUMode) FPU))
+ Builder.defineMacro("__VFP_FP__");
+
+ // This only gets set when Neon instructions are actually available, unlike
+ // the VFP define, hence the soft float and arch check. This is subtly
+ // different from gcc, we follow the intent which was that it should be set
+ // when Neon instructions are actually available.
+ if (FPU == NeonFPU && !SoftFloat && IsARMv7)
+ Builder.defineMacro("__ARM_NEON__");
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = BuiltinInfo;
+ NumRecords = clang::ARM::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ }
+ virtual bool isCLZForZeroUndef() const { return false; }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef void* __builtin_va_list;";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ // FIXME: Check if this is complete
+ switch (*Name) {
+ default:
+ case 'l': // r0-r7
+ case 'h': // r8-r15
+ case 'w': // VFP Floating point register single precision
+ case 'P': // VFP Floating point register double precision
+ Info.setAllowsRegister();
+ return true;
+ case 'Q': // A memory address that is a single base register.
+ Info.setAllowsMemory();
+ return true;
+ case 'U': // a memory reference...
+ switch (Name[1]) {
+ case 'q': // ...ARMV4 ldrsb
+ case 'v': // ...VFP load/store (reg+constant offset)
+ case 'y': // ...iWMMXt load/store
+ case 't': // address valid for load/store opaque types wider
+ // than 128-bits
+ case 'n': // valid address for Neon doubleword vector load/store
+ case 'm': // valid address for Neon element and structure load/store
+ case 's': // valid address for non-offset loads/stores of quad-word
+ // values in four ARM registers
+ Info.setAllowsMemory();
+ Name++;
+ return true;
+ }
+ }
+ return false;
+ }
+ virtual std::string convertConstraint(const char *&Constraint) const {
+ std::string R;
+ switch (*Constraint) {
+ case 'U': // Two-character constraint; add "^" hint for later parsing.
+ R = std::string("^") + std::string(Constraint, 2);
+ Constraint++;
+ break;
+ case 'p': // 'p' should be translated to 'r' by default.
+ R = std::string("r");
+ break;
+ default:
+ return std::string(1, *Constraint);
+ }
+ return R;
+ }
+ virtual const char *getClobbers() const {
+ // FIXME: Is this really right?
+ return "";
+ }
+};
+
+const char * const ARMTargetInfo::GCCRegNames[] = {
+ // Integer registers
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "sp", "lr", "pc",
+
+ // Float registers
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+ "s8", "s9", "s10", "s11", "s12", "s13", "s14", "s15",
+ "s16", "s17", "s18", "s19", "s20", "s21", "s22", "s23",
+ "s24", "s25", "s26", "s27", "s28", "s29", "s30", "s31",
+
+ // Double registers
+ "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11", "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19", "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31",
+
+ // Quad registers
+ "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+};
+
+void ARMTargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias ARMTargetInfo::GCCRegAliases[] = {
+ { { "a1" }, "r0" },
+ { { "a2" }, "r1" },
+ { { "a3" }, "r2" },
+ { { "a4" }, "r3" },
+ { { "v1" }, "r4" },
+ { { "v2" }, "r5" },
+ { { "v3" }, "r6" },
+ { { "v4" }, "r7" },
+ { { "v5" }, "r8" },
+ { { "v6", "rfp" }, "r9" },
+ { { "sl" }, "r10" },
+ { { "fp" }, "r11" },
+ { { "ip" }, "r12" },
+ { { "r13" }, "sp" },
+ { { "r14" }, "lr" },
+ { { "r15" }, "pc" },
+ // The S, D and Q registers overlap, but aren't really aliases; we
+ // don't want to substitute one of these for a different-sized one.
+};
+
+void ARMTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+
+const Builtin::Info ARMTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsARM.def"
+};
+} // end anonymous namespace.
+
+namespace {
+class DarwinARMTargetInfo :
+ public DarwinTargetInfo<ARMTargetInfo> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ getDarwinDefines(Builder, Opts, Triple, PlatformName, PlatformMinVersion);
+ }
+
+public:
+ DarwinARMTargetInfo(const std::string& triple)
+ : DarwinTargetInfo<ARMTargetInfo>(triple) {
+ HasAlignMac68kSupport = true;
+ // iOS always has 64-bit atomic instructions.
+ // FIXME: This should be based off of the target features in ARMTargetInfo.
+ MaxAtomicInlineWidth = 64;
+ }
+};
+} // end anonymous namespace.
+
+
+namespace {
+// Hexagon abstract base class
+class HexagonTargetInfo : public TargetInfo {
+ static const Builtin::Info BuiltinInfo[];
+ static const char * const GCCRegNames[];
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ std::string CPU;
+public:
+ HexagonTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ BigEndian = false;
+ DescriptionString = ("e-p:32:32:32-"
+ "i64:64:64-i32:32:32-"
+ "i16:16:16-i1:32:32-a:0:0");
+
+ // {} in inline assembly are packet specifiers, not assembly variant
+ // specifiers.
+ NoAsmVariants = true;
+ }
+
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ Records = BuiltinInfo;
+ NumRecords = clang::Hexagon::LastTSBuiltin-Builtin::FirstTSBuiltin;
+ }
+
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ return true;
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const;
+
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "hexagon";
+ }
+
+ virtual const char *getVAListDeclaration() const {
+ return "typedef char* __builtin_va_list;";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual const char *getClobbers() const {
+ return "";
+ }
+
+ static const char *getHexagonCPUSuffix(StringRef Name) {
+ return llvm::StringSwitch<const char*>(Name)
+ .Case("hexagonv2", "2")
+ .Case("hexagonv3", "3")
+ .Case("hexagonv4", "4")
+ .Default(0);
+ }
+
+ virtual bool setCPU(const std::string &Name) {
+ if (!getHexagonCPUSuffix(Name))
+ return false;
+
+ CPU = Name;
+ return true;
+ }
+};
+
+void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("qdsp6");
+ Builder.defineMacro("__qdsp6", "1");
+ Builder.defineMacro("__qdsp6__", "1");
+
+ Builder.defineMacro("hexagon");
+ Builder.defineMacro("__hexagon", "1");
+ Builder.defineMacro("__hexagon__", "1");
+
+ if(CPU == "hexagonv1") {
+ Builder.defineMacro("__HEXAGON_V1__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "1");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V1__");
+ Builder.defineMacro("__QDSP6_ARCH__", "1");
+ }
+ }
+ else if(CPU == "hexagonv2") {
+ Builder.defineMacro("__HEXAGON_V2__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "2");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V2__");
+ Builder.defineMacro("__QDSP6_ARCH__", "2");
+ }
+ }
+ else if(CPU == "hexagonv3") {
+ Builder.defineMacro("__HEXAGON_V3__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "3");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V3__");
+ Builder.defineMacro("__QDSP6_ARCH__", "3");
+ }
+ }
+ else if(CPU == "hexagonv4") {
+ Builder.defineMacro("__HEXAGON_V4__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "4");
+ if(Opts.HexagonQdsp6Compat) {
+ Builder.defineMacro("__QDSP6_V4__");
+ Builder.defineMacro("__QDSP6_ARCH__", "4");
+ }
+ }
+}
+
+const char * const HexagonTargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31",
+ "p0", "p1", "p2", "p3",
+ "sa0", "lc0", "sa1", "lc1", "m0", "m1", "usr", "ugp"
+};
+
+void HexagonTargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+
+const TargetInfo::GCCRegAlias HexagonTargetInfo::GCCRegAliases[] = {
+ { { "sp" }, "r29" },
+ { { "fp" }, "r30" },
+ { { "lr" }, "r31" },
+ };
+
+void HexagonTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+
+
+const Builtin::Info HexagonTargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) { #ID, TYPE, ATTRS, 0, ALL_LANGUAGES },
+#define LIBBUILTIN(ID, TYPE, ATTRS, HEADER) { #ID, TYPE, ATTRS, HEADER,\
+ ALL_LANGUAGES },
+#include "clang/Basic/BuiltinsHexagon.def"
+};
+}
+
+
+namespace {
+class SparcV8TargetInfo : public TargetInfo {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[];
+ static const char * const GCCRegNames[];
+ bool SoftFloat;
+public:
+ SparcV8TargetInfo(const std::string& triple) : TargetInfo(triple) {
+ // FIXME: Support Sparc quad-precision long double?
+ BigEndian = false;
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
+ }
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
+ if (Name == "soft-float")
+ Features[Name] = Enabled;
+ else
+ return false;
+
+ return true;
+ }
+ virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
+ SoftFloat = false;
+ for (unsigned i = 0, e = Features.size(); i != e; ++i)
+ if (Features[i] == "+soft-float")
+ SoftFloat = true;
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "sparc", Opts);
+ Builder.defineMacro("__sparcv8");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+
+ if (SoftFloat)
+ Builder.defineMacro("SOFT_FLOAT", "1");
+ }
+
+ virtual bool hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("softfloat", SoftFloat)
+ .Case("sparc", true)
+ .Default(false);
+ }
+
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ // FIXME: Implement!
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef void* __builtin_va_list;";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const {
+ // FIXME: Implement!
+ return false;
+ }
+ virtual const char *getClobbers() const {
+ // FIXME: Implement!
+ return "";
+ }
+};
+
+const char * const SparcV8TargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
+};
+
+void SparcV8TargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+}
+
+const TargetInfo::GCCRegAlias SparcV8TargetInfo::GCCRegAliases[] = {
+ { { "g0" }, "r0" },
+ { { "g1" }, "r1" },
+ { { "g2" }, "r2" },
+ { { "g3" }, "r3" },
+ { { "g4" }, "r4" },
+ { { "g5" }, "r5" },
+ { { "g6" }, "r6" },
+ { { "g7" }, "r7" },
+ { { "o0" }, "r8" },
+ { { "o1" }, "r9" },
+ { { "o2" }, "r10" },
+ { { "o3" }, "r11" },
+ { { "o4" }, "r12" },
+ { { "o5" }, "r13" },
+ { { "o6", "sp" }, "r14" },
+ { { "o7" }, "r15" },
+ { { "l0" }, "r16" },
+ { { "l1" }, "r17" },
+ { { "l2" }, "r18" },
+ { { "l3" }, "r19" },
+ { { "l4" }, "r20" },
+ { { "l5" }, "r21" },
+ { { "l6" }, "r22" },
+ { { "l7" }, "r23" },
+ { { "i0" }, "r24" },
+ { { "i1" }, "r25" },
+ { { "i2" }, "r26" },
+ { { "i3" }, "r27" },
+ { { "i4" }, "r28" },
+ { { "i5" }, "r29" },
+ { { "i6", "fp" }, "r30" },
+ { { "i7" }, "r31" },
+};
+
+void SparcV8TargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+}
+} // end anonymous namespace.
+
+namespace {
+class AuroraUXSparcV8TargetInfo : public AuroraUXTargetInfo<SparcV8TargetInfo> {
+public:
+ AuroraUXSparcV8TargetInfo(const std::string& triple) :
+ AuroraUXTargetInfo<SparcV8TargetInfo>(triple) {
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ }
+};
+class SolarisSparcV8TargetInfo : public SolarisTargetInfo<SparcV8TargetInfo> {
+public:
+ SolarisSparcV8TargetInfo(const std::string& triple) :
+ SolarisTargetInfo<SparcV8TargetInfo>(triple) {
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+ class MSP430TargetInfo : public TargetInfo {
+ static const char * const GCCRegNames[];
+ public:
+ MSP430TargetInfo(const std::string& triple) : TargetInfo(triple) {
+ BigEndian = false;
+ TLSSupported = false;
+ IntWidth = 16; IntAlign = 16;
+ LongWidth = 32; LongLongWidth = 64;
+ LongAlign = LongLongAlign = 16;
+ PointerWidth = 16; PointerAlign = 16;
+ SuitableAlign = 16;
+ SizeType = UnsignedInt;
+ IntMaxType = SignedLong;
+ UIntMaxType = UnsignedLong;
+ IntPtrType = SignedShort;
+ PtrDiffType = SignedInt;
+ SigAtomicType = SignedLong;
+ DescriptionString = "e-p:16:16:16-i8:8:8-i16:16:16-i32:16:32-n8:16";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("MSP430");
+ Builder.defineMacro("__MSP430__");
+ // FIXME: defines for different 'flavours' of MCU
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ // FIXME: Implement.
+ Records = 0;
+ NumRecords = 0;
+ }
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "msp430";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ // No aliases.
+ Aliases = 0;
+ NumAliases = 0;
+ }
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const {
+ // No target constraints for now.
+ return false;
+ }
+ virtual const char *getClobbers() const {
+ // FIXME: Is this really right?
+ return "";
+ }
+ virtual const char *getVAListDeclaration() const {
+ // FIXME: implement
+ return "typedef char* __builtin_va_list;";
+ }
+ };
+
+ const char * const MSP430TargetInfo::GCCRegNames[] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+ };
+
+ void MSP430TargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+ }
+}
+
+namespace {
+
+ // LLVM and Clang cannot be used directly to output native binaries for
+ // target, but is used to compile C code to llvm bitcode with correct
+ // type and alignment information.
+ //
+ // TCE uses the llvm bitcode as input and uses it for generating customized
+ // target processor and program binary. TCE co-design environment is
+ // publicly available in http://tce.cs.tut.fi
+
+ static const unsigned TCEOpenCLAddrSpaceMap[] = {
+ 3, // opencl_global
+ 4, // opencl_local
+ 5 // opencl_constant
+ };
+
+ class TCETargetInfo : public TargetInfo{
+ public:
+ TCETargetInfo(const std::string& triple) : TargetInfo(triple) {
+ TLSSupported = false;
+ IntWidth = 32;
+ LongWidth = LongLongWidth = 32;
+ PointerWidth = 32;
+ IntAlign = 32;
+ LongAlign = LongLongAlign = 32;
+ PointerAlign = 32;
+ SuitableAlign = 32;
+ SizeType = UnsignedInt;
+ IntMaxType = SignedLong;
+ UIntMaxType = UnsignedLong;
+ IntPtrType = SignedInt;
+ PtrDiffType = SignedInt;
+ FloatWidth = 32;
+ FloatAlign = 32;
+ DoubleWidth = 32;
+ DoubleAlign = 32;
+ LongDoubleWidth = 32;
+ LongDoubleAlign = 32;
+ FloatFormat = &llvm::APFloat::IEEEsingle;
+ DoubleFormat = &llvm::APFloat::IEEEsingle;
+ LongDoubleFormat = &llvm::APFloat::IEEEsingle;
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-"
+ "i16:16:32-i32:32:32-i64:32:32-"
+ "f32:32:32-f64:32:32-v64:32:32-"
+ "v128:32:32-a0:0:32-n32";
+ AddrSpaceMap = &TCEOpenCLAddrSpaceMap;
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "tce", Opts);
+ Builder.defineMacro("__TCE__");
+ Builder.defineMacro("__TCE_V1__");
+ }
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "tce";
+ }
+
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {}
+ virtual const char *getClobbers() const {
+ return "";
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef void* __builtin_va_list;";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {}
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const {
+ return true;
+ }
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {}
+ };
+}
+
+namespace {
+class MipsTargetInfoBase : public TargetInfo {
+ std::string CPU;
+ bool SoftFloat;
+ bool SingleFloat;
+
+protected:
+ std::string ABI;
+
+public:
+ MipsTargetInfoBase(const std::string& triple,
+ const std::string& ABIStr,
+ const std::string& CPUStr)
+ : TargetInfo(triple),
+ CPU(CPUStr),
+ SoftFloat(false), SingleFloat(false),
+ ABI(ABIStr)
+ {}
+
+ virtual const char *getABI() const { return ABI.c_str(); }
+ virtual bool setABI(const std::string &Name) = 0;
+ virtual bool setCPU(const std::string &Name) {
+ CPU = Name;
+ return true;
+ }
+ void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+ Features[ABI] = true;
+ Features[CPU] = true;
+ }
+
+ virtual void getArchDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ if (SoftFloat)
+ Builder.defineMacro("__mips_soft_float", Twine(1));
+ else if (SingleFloat)
+ Builder.defineMacro("__mips_single_float", Twine(1));
+ else if (!SoftFloat && !SingleFloat)
+ Builder.defineMacro("__mips_hard_float", Twine(1));
+ else
+ llvm_unreachable("Invalid float ABI for Mips.");
+
+ Builder.defineMacro("_MIPS_SZPTR", Twine(getPointerWidth(0)));
+ Builder.defineMacro("_MIPS_SZINT", Twine(getIntWidth()));
+ Builder.defineMacro("_MIPS_SZLONG", Twine(getLongWidth()));
+ }
+
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const = 0;
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ // FIXME: Implement!
+ }
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "mips";
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef void* __builtin_va_list;";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ static const char * const GCCRegNames[] = {
+ // CPU register names
+ // Must match second column of GCCRegAliases
+ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
+ "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
+ "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
+ "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
+ // Floating point register names
+ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
+ "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
+ "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
+ "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "$f30", "$f31",
+ // Hi/lo and condition register names
+ "hi", "lo", "", "$fcc0","$fcc1","$fcc2","$fcc3","$fcc4",
+ "$fcc5","$fcc6","$fcc7"
+ };
+ Names = GCCRegNames;
+ NumNames = llvm::array_lengthof(GCCRegNames);
+ }
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const = 0;
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default:
+ return false;
+
+ case 'r': // CPU registers.
+ case 'd': // Equivalent to "r" unless generating MIPS16 code.
+ case 'y': // Equivalent to "r", backwards compatibility only.
+ case 'f': // floating-point registers.
+ case 'c': // $25 for indirect jumps
+ case 'l': // lo register
+ case 'x': // hilo register pair
+ Info.setAllowsRegister();
+ return true;
+ }
+ }
+
+ virtual const char *getClobbers() const {
+ // FIXME: Implement!
+ return "";
+ }
+
+ virtual bool setFeatureEnabled(llvm::StringMap<bool> &Features,
+ StringRef Name,
+ bool Enabled) const {
+ if (Name == "soft-float" || Name == "single-float") {
+ Features[Name] = Enabled;
+ return true;
+ }
+ return false;
+ }
+
+ virtual void HandleTargetFeatures(std::vector<std::string> &Features) {
+ SoftFloat = false;
+ SingleFloat = false;
+
+ for (std::vector<std::string>::iterator it = Features.begin(),
+ ie = Features.end(); it != ie; ++it) {
+ if (*it == "+single-float") {
+ SingleFloat = true;
+ break;
+ }
+
+ if (*it == "+soft-float") {
+ SoftFloat = true;
+ // This option is front-end specific.
+ // Do not need to pass it to the backend.
+ Features.erase(it);
+ break;
+ }
+ }
+ }
+};
+
+class Mips32TargetInfoBase : public MipsTargetInfoBase {
+public:
+ Mips32TargetInfoBase(const std::string& triple) :
+ MipsTargetInfoBase(triple, "o32", "mips32") {
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ }
+ virtual bool setABI(const std::string &Name) {
+ if ((Name == "o32") || (Name == "eabi")) {
+ ABI = Name;
+ return true;
+ } else
+ return false;
+ }
+ virtual void getArchDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ MipsTargetInfoBase::getArchDefines(Opts, Builder);
+
+ if (ABI == "o32") {
+ Builder.defineMacro("__mips_o32");
+ Builder.defineMacro("_ABIO32", "1");
+ Builder.defineMacro("_MIPS_SIM", "_ABIO32");
+ }
+ else if (ABI == "eabi")
+ Builder.defineMacro("__mips_eabi");
+ else
+ llvm_unreachable("Invalid ABI for Mips32.");
+ }
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ { { "at" }, "$1" },
+ { { "v0" }, "$2" },
+ { { "v1" }, "$3" },
+ { { "a0" }, "$4" },
+ { { "a1" }, "$5" },
+ { { "a2" }, "$6" },
+ { { "a3" }, "$7" },
+ { { "t0" }, "$8" },
+ { { "t1" }, "$9" },
+ { { "t2" }, "$10" },
+ { { "t3" }, "$11" },
+ { { "t4" }, "$12" },
+ { { "t5" }, "$13" },
+ { { "t6" }, "$14" },
+ { { "t7" }, "$15" },
+ { { "s0" }, "$16" },
+ { { "s1" }, "$17" },
+ { { "s2" }, "$18" },
+ { { "s3" }, "$19" },
+ { { "s4" }, "$20" },
+ { { "s5" }, "$21" },
+ { { "s6" }, "$22" },
+ { { "s7" }, "$23" },
+ { { "t8" }, "$24" },
+ { { "t9" }, "$25" },
+ { { "k0" }, "$26" },
+ { { "k1" }, "$27" },
+ { { "gp" }, "$28" },
+ { { "sp","$sp" }, "$29" },
+ { { "fp","$fp" }, "$30" },
+ { { "ra" }, "$31" }
+ };
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+ }
+};
+
+class Mips32EBTargetInfo : public Mips32TargetInfoBase {
+public:
+ Mips32EBTargetInfo(const std::string& triple) : Mips32TargetInfoBase(triple) {
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "mips", Opts);
+ Builder.defineMacro("_mips");
+ DefineStd(Builder, "MIPSEB", Opts);
+ Builder.defineMacro("_MIPSEB");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+ getArchDefines(Opts, Builder);
+ }
+};
+
+class Mips32ELTargetInfo : public Mips32TargetInfoBase {
+public:
+ Mips32ELTargetInfo(const std::string& triple) : Mips32TargetInfoBase(triple) {
+ BigEndian = false;
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v64:64:64-n32";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "mips", Opts);
+ Builder.defineMacro("_mips");
+ DefineStd(Builder, "MIPSEL", Opts);
+ Builder.defineMacro("_MIPSEL");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+ getArchDefines(Opts, Builder);
+ }
+};
+
+class Mips64TargetInfoBase : public MipsTargetInfoBase {
+ virtual void SetDescriptionString(const std::string &Name) = 0;
+public:
+ Mips64TargetInfoBase(const std::string& triple) :
+ MipsTargetInfoBase(triple, "n64", "mips64") {
+ LongWidth = LongAlign = 64;
+ PointerWidth = PointerAlign = 64;
+ LongDoubleWidth = LongDoubleAlign = 128;
+ LongDoubleFormat = &llvm::APFloat::IEEEquad;
+ SuitableAlign = 128;
+ }
+ virtual bool setABI(const std::string &Name) {
+ SetDescriptionString(Name);
+
+ if (Name != "n32" && Name != "n64")
+ return false;
+
+ ABI = Name;
+
+ if (Name == "n32") {
+ LongWidth = LongAlign = 32;
+ PointerWidth = PointerAlign = 32;
+ }
+
+ return true;
+ }
+ virtual void getArchDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ MipsTargetInfoBase::getArchDefines(Opts, Builder);
+
+ if (ABI == "n32") {
+ Builder.defineMacro("__mips_n32");
+ Builder.defineMacro("_ABIN32", "2");
+ Builder.defineMacro("_MIPS_SIM", "_ABIN32");
+ }
+ else if (ABI == "n64") {
+ Builder.defineMacro("__mips_n64");
+ Builder.defineMacro("_ABI64", "3");
+ Builder.defineMacro("_MIPS_SIM", "_ABI64");
+ }
+ else
+ llvm_unreachable("Invalid ABI for Mips64.");
+ }
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ { { "at" }, "$1" },
+ { { "v0" }, "$2" },
+ { { "v1" }, "$3" },
+ { { "a0" }, "$4" },
+ { { "a1" }, "$5" },
+ { { "a2" }, "$6" },
+ { { "a3" }, "$7" },
+ { { "a4" }, "$8" },
+ { { "a5" }, "$9" },
+ { { "a6" }, "$10" },
+ { { "a7" }, "$11" },
+ { { "t0" }, "$12" },
+ { { "t1" }, "$13" },
+ { { "t2" }, "$14" },
+ { { "t3" }, "$15" },
+ { { "s0" }, "$16" },
+ { { "s1" }, "$17" },
+ { { "s2" }, "$18" },
+ { { "s3" }, "$19" },
+ { { "s4" }, "$20" },
+ { { "s5" }, "$21" },
+ { { "s6" }, "$22" },
+ { { "s7" }, "$23" },
+ { { "t8" }, "$24" },
+ { { "t9" }, "$25" },
+ { { "k0" }, "$26" },
+ { { "k1" }, "$27" },
+ { { "gp" }, "$28" },
+ { { "sp","$sp" }, "$29" },
+ { { "fp","$fp" }, "$30" },
+ { { "ra" }, "$31" }
+ };
+ Aliases = GCCRegAliases;
+ NumAliases = llvm::array_lengthof(GCCRegAliases);
+ }
+};
+
+class Mips64EBTargetInfo : public Mips64TargetInfoBase {
+ virtual void SetDescriptionString(const std::string &Name) {
+ // Change DescriptionString only if ABI is n32.
+ if (Name == "n32")
+ DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v64:64:64-n32";
+ }
+public:
+ Mips64EBTargetInfo(const std::string& triple) : Mips64TargetInfoBase(triple) {
+ // Default ABI is n64.
+ DescriptionString = "E-p:64:64:64-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v64:64:64-n32";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "mips", Opts);
+ Builder.defineMacro("_mips");
+ DefineStd(Builder, "MIPSEB", Opts);
+ Builder.defineMacro("_MIPSEB");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+ getArchDefines(Opts, Builder);
+ }
+};
+
+class Mips64ELTargetInfo : public Mips64TargetInfoBase {
+ virtual void SetDescriptionString(const std::string &Name) {
+ // Change DescriptionString only if ABI is n32.
+ if (Name == "n32")
+ DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128"
+ "-v64:64:64-n32";
+ }
+public:
+ Mips64ELTargetInfo(const std::string& triple) : Mips64TargetInfoBase(triple) {
+ // Default ABI is n64.
+ BigEndian = false;
+ DescriptionString = "e-p:64:64:64-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-f128:128:128-"
+ "v64:64:64-n32";
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "mips", Opts);
+ Builder.defineMacro("_mips");
+ DefineStd(Builder, "MIPSEL", Opts);
+ Builder.defineMacro("_MIPSEL");
+ Builder.defineMacro("__REGISTER_PREFIX__", "");
+ getArchDefines(Opts, Builder);
+ }
+};
+} // end anonymous namespace.
+
+namespace {
+class PNaClTargetInfo : public TargetInfo {
+public:
+ PNaClTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ BigEndian = false;
+ this->UserLabelPrefix = "";
+ this->LongAlign = 32;
+ this->LongWidth = 32;
+ this->PointerAlign = 32;
+ this->PointerWidth = 32;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->UIntMaxType = TargetInfo::UnsignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
+ this->DoubleAlign = 64;
+ this->LongDoubleWidth = 64;
+ this->LongDoubleAlign = 64;
+ this->SizeType = TargetInfo::UnsignedInt;
+ this->PtrDiffType = TargetInfo::SignedInt;
+ this->IntPtrType = TargetInfo::SignedInt;
+ this->RegParmMax = 2;
+ DescriptionString = "e-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-"
+ "f32:32:32-f64:64:64-p:32:32:32-v128:32:32";
+ }
+
+ void getDefaultFeatures(llvm::StringMap<bool> &Features) const {
+ }
+ virtual void getArchDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__le32__");
+ Builder.defineMacro("__pnacl__");
+ }
+ virtual void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "unix", Opts);
+ Builder.defineMacro("__ELF__");
+ if (Opts.POSIXThreads)
+ Builder.defineMacro("_REENTRANT");
+ if (Opts.CPlusPlus)
+ Builder.defineMacro("_GNU_SOURCE");
+
+ Builder.defineMacro("__LITTLE_ENDIAN__");
+ Builder.defineMacro("__native_client__");
+ getArchDefines(Opts, Builder);
+ }
+ virtual bool hasFeature(StringRef Feature) const {
+ return Feature == "pnacl";
+ }
+ virtual void getTargetBuiltins(const Builtin::Info *&Records,
+ unsigned &NumRecords) const {
+ }
+ virtual const char *getVAListDeclaration() const {
+ return "typedef int __builtin_va_list[4];";
+ }
+ virtual void getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const;
+ virtual void getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const;
+ virtual bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &Info) const {
+ return false;
+ }
+
+ virtual const char *getClobbers() const {
+ return "";
+ }
+};
+
+void PNaClTargetInfo::getGCCRegNames(const char * const *&Names,
+ unsigned &NumNames) const {
+ Names = NULL;
+ NumNames = 0;
+}
+
+void PNaClTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
+ unsigned &NumAliases) const {
+ Aliases = NULL;
+ NumAliases = 0;
+}
+} // end anonymous namespace.
+
+
+//===----------------------------------------------------------------------===//
+// Driver code
+//===----------------------------------------------------------------------===//
+
+static TargetInfo *AllocateTarget(const std::string &T) {
+ llvm::Triple Triple(T);
+ llvm::Triple::OSType os = Triple.getOS();
+
+ switch (Triple.getArch()) {
+ default:
+ return NULL;
+
+ case llvm::Triple::hexagon:
+ return new HexagonTargetInfo(T);
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (Triple.isOSDarwin())
+ return new DarwinARMTargetInfo(T);
+
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<ARMTargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<ARMTargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<ARMTargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<ARMTargetInfo>(T);
+ default:
+ return new ARMTargetInfo(T);
+ }
+
+ case llvm::Triple::msp430:
+ return new MSP430TargetInfo(T);
+
+ case llvm::Triple::mips:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<Mips32EBTargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<Mips32EBTargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<Mips32EBTargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<Mips32EBTargetInfo>(T);
+ default:
+ return new Mips32EBTargetInfo(T);
+ }
+
+ case llvm::Triple::mipsel:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<Mips32ELTargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<Mips32ELTargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<Mips32ELTargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<Mips32ELTargetInfo>(T);
+ default:
+ return new Mips32ELTargetInfo(T);
+ }
+
+ case llvm::Triple::mips64:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<Mips64EBTargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<Mips64EBTargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<Mips64EBTargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<Mips64EBTargetInfo>(T);
+ default:
+ return new Mips64EBTargetInfo(T);
+ }
+
+ case llvm::Triple::mips64el:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<Mips64ELTargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<Mips64ELTargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<Mips64ELTargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<Mips64ELTargetInfo>(T);
+ default:
+ return new Mips64ELTargetInfo(T);
+ }
+
+ case llvm::Triple::le32:
+ switch (os) {
+ case llvm::Triple::NativeClient:
+ return new PNaClTargetInfo(T);
+ default:
+ return NULL;
+ }
+
+ case llvm::Triple::ppc:
+ if (Triple.isOSDarwin())
+ return new DarwinPPC32TargetInfo(T);
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<PPC32TargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<PPC32TargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<PPC32TargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<PPC32TargetInfo>(T);
+ default:
+ return new PPC32TargetInfo(T);
+ }
+
+ case llvm::Triple::ppc64:
+ if (Triple.isOSDarwin())
+ return new DarwinPPC64TargetInfo(T);
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<PPC64TargetInfo>(T);
+ case llvm::Triple::Lv2:
+ return new PS3PPUTargetInfo<PPC64TargetInfo>(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<PPC64TargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<PPC64TargetInfo>(T);
+ default:
+ return new PPC64TargetInfo(T);
+ }
+
+ case llvm::Triple::ptx32:
+ return new PTX32TargetInfo(T);
+ case llvm::Triple::ptx64:
+ return new PTX64TargetInfo(T);
+
+ case llvm::Triple::mblaze:
+ return new MBlazeTargetInfo(T);
+
+ case llvm::Triple::sparc:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<SparcV8TargetInfo>(T);
+ case llvm::Triple::AuroraUX:
+ return new AuroraUXSparcV8TargetInfo(T);
+ case llvm::Triple::Solaris:
+ return new SolarisSparcV8TargetInfo(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<SparcV8TargetInfo>(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSTargetInfo<SparcV8TargetInfo>(T);
+ default:
+ return new SparcV8TargetInfo(T);
+ }
+
+ // FIXME: Need a real SPU target.
+ case llvm::Triple::cellspu:
+ return new PS3SPUTargetInfo<PPC64TargetInfo>(T);
+
+ case llvm::Triple::tce:
+ return new TCETargetInfo(T);
+
+ case llvm::Triple::x86:
+ if (Triple.isOSDarwin())
+ return new DarwinI386TargetInfo(T);
+
+ switch (os) {
+ case llvm::Triple::AuroraUX:
+ return new AuroraUXTargetInfo<X86_32TargetInfo>(T);
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<X86_32TargetInfo>(T);
+ case llvm::Triple::DragonFly:
+ return new DragonFlyBSDTargetInfo<X86_32TargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDI386TargetInfo(T);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDI386TargetInfo(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<X86_32TargetInfo>(T);
+ case llvm::Triple::Minix:
+ return new MinixTargetInfo<X86_32TargetInfo>(T);
+ case llvm::Triple::Solaris:
+ return new SolarisTargetInfo<X86_32TargetInfo>(T);
+ case llvm::Triple::Cygwin:
+ return new CygwinX86_32TargetInfo(T);
+ case llvm::Triple::MinGW32:
+ return new MinGWX86_32TargetInfo(T);
+ case llvm::Triple::Win32:
+ return new VisualStudioWindowsX86_32TargetInfo(T);
+ case llvm::Triple::Haiku:
+ return new HaikuX86_32TargetInfo(T);
+ case llvm::Triple::RTEMS:
+ return new RTEMSX86_32TargetInfo(T);
+ default:
+ return new X86_32TargetInfo(T);
+ }
+
+ case llvm::Triple::x86_64:
+ if (Triple.isOSDarwin() || Triple.getEnvironment() == llvm::Triple::MachO)
+ return new DarwinX86_64TargetInfo(T);
+
+ switch (os) {
+ case llvm::Triple::AuroraUX:
+ return new AuroraUXTargetInfo<X86_64TargetInfo>(T);
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<X86_64TargetInfo>(T);
+ case llvm::Triple::DragonFly:
+ return new DragonFlyBSDTargetInfo<X86_64TargetInfo>(T);
+ case llvm::Triple::NetBSD:
+ return new NetBSDTargetInfo<X86_64TargetInfo>(T);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDX86_64TargetInfo(T);
+ case llvm::Triple::FreeBSD:
+ return new FreeBSDTargetInfo<X86_64TargetInfo>(T);
+ case llvm::Triple::Solaris:
+ return new SolarisTargetInfo<X86_64TargetInfo>(T);
+ case llvm::Triple::MinGW32:
+ return new MinGWX86_64TargetInfo(T);
+ case llvm::Triple::Win32: // This is what Triple.h supports now.
+ return new VisualStudioWindowsX86_64TargetInfo(T);
+ default:
+ return new X86_64TargetInfo(T);
+ }
+ }
+}
+
+/// CreateTargetInfo - Return the target info object for the specified target
+/// triple.
+TargetInfo *TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
+ TargetOptions &Opts) {
+ llvm::Triple Triple(Opts.Triple);
+
+ // Construct the target
+ OwningPtr<TargetInfo> Target(AllocateTarget(Triple.str()));
+ if (!Target) {
+ Diags.Report(diag::err_target_unknown_triple) << Triple.str();
+ return 0;
+ }
+
+ // Set the target CPU if specified.
+ if (!Opts.CPU.empty() && !Target->setCPU(Opts.CPU)) {
+ Diags.Report(diag::err_target_unknown_cpu) << Opts.CPU;
+ return 0;
+ }
+
+ // Set the target ABI if specified.
+ if (!Opts.ABI.empty() && !Target->setABI(Opts.ABI)) {
+ Diags.Report(diag::err_target_unknown_abi) << Opts.ABI;
+ return 0;
+ }
+
+ // Set the target C++ ABI.
+ if (!Opts.CXXABI.empty() && !Target->setCXXABI(Opts.CXXABI)) {
+ Diags.Report(diag::err_target_unknown_cxxabi) << Opts.CXXABI;
+ return 0;
+ }
+
+ // Compute the default target features, we need the target to handle this
+ // because features may have dependencies on one another.
+ llvm::StringMap<bool> Features;
+ Target->getDefaultFeatures(Features);
+
+ // Apply the user specified deltas.
+ // First the enables.
+ for (std::vector<std::string>::const_iterator it = Opts.Features.begin(),
+ ie = Opts.Features.end(); it != ie; ++it) {
+ const char *Name = it->c_str();
+
+ if (Name[0] != '+')
+ continue;
+
+ // Apply the feature via the target.
+ if (!Target->setFeatureEnabled(Features, Name + 1, true)) {
+ Diags.Report(diag::err_target_invalid_feature) << Name;
+ return 0;
+ }
+ }
+
+ // Then the disables.
+ for (std::vector<std::string>::const_iterator it = Opts.Features.begin(),
+ ie = Opts.Features.end(); it != ie; ++it) {
+ const char *Name = it->c_str();
+
+ if (Name[0] == '+')
+ continue;
+
+ // Apply the feature via the target.
+ if (Name[0] != '-' ||
+ !Target->setFeatureEnabled(Features, Name + 1, false)) {
+ Diags.Report(diag::err_target_invalid_feature) << Name;
+ return 0;
+ }
+ }
+
+ // Add the features to the compile options.
+ //
+ // FIXME: If we are completely confident that we have the right set, we only
+ // need to pass the minuses.
+ Opts.Features.clear();
+ for (llvm::StringMap<bool>::const_iterator it = Features.begin(),
+ ie = Features.end(); it != ie; ++it)
+ Opts.Features.push_back((it->second ? "+" : "-") + it->first().str());
+ Target->HandleTargetFeatures(Opts.Features);
+
+ return Target.take();
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/TokenKinds.cpp b/contrib/llvm/tools/clang/lib/Basic/TokenKinds.cpp
new file mode 100644
index 0000000..8cdc1e3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/TokenKinds.cpp
@@ -0,0 +1,39 @@
+//===--- TokenKinds.cpp - Token Kinds Support -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenKind enum and support functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TokenKinds.h"
+
+#include <cassert>
+using namespace clang;
+
+static const char * const TokNames[] = {
+#define TOK(X) #X,
+#define KEYWORD(X,Y) #X,
+#include "clang/Basic/TokenKinds.def"
+ 0
+};
+
+const char *tok::getTokenName(enum TokenKind Kind) {
+ assert(Kind < tok::NUM_TOKENS);
+ return TokNames[Kind];
+}
+
+const char *tok::getTokenSimpleSpelling(enum TokenKind Kind) {
+ switch (Kind) {
+#define PUNCTUATOR(X,Y) case X: return Y;
+#include "clang/Basic/TokenKinds.def"
+ default: break;
+ }
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/Basic/Version.cpp b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
new file mode 100644
index 0000000..36138ac
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/Version.cpp
@@ -0,0 +1,146 @@
+//===- Version.cpp - Clang Version Number -----------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several version-related utility functions for Clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Version.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Config/config.h"
+#include <cstring>
+#include <cstdlib>
+
+namespace clang {
+
+std::string getClangRepositoryPath() {
+#if defined(CLANG_REPOSITORY_STRING)
+ return CLANG_REPOSITORY_STRING;
+#else
+#ifdef SVN_REPOSITORY
+ StringRef URL(SVN_REPOSITORY);
+#else
+ StringRef URL("");
+#endif
+
+ // If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
+ // pick up a tag in an SVN export, for example.
+ static StringRef SVNRepository("$URL: http://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
+ if (URL.empty()) {
+ URL = SVNRepository.slice(SVNRepository.find(':'),
+ SVNRepository.find("/lib/Basic"));
+ }
+
+ // Strip off version from a build from an integration branch.
+ URL = URL.slice(0, URL.find("/src/tools/clang"));
+
+ // Trim path prefix off, assuming path came from standard cfe path.
+ size_t Start = URL.find("cfe/");
+ if (Start != StringRef::npos)
+ URL = URL.substr(Start + 4);
+
+ return URL;
+#endif
+}
+
+std::string getLLVMRepositoryPath() {
+#ifdef LLVM_REPOSITORY
+ StringRef URL(LLVM_REPOSITORY);
+#else
+ StringRef URL("");
+#endif
+
+ // Trim path prefix off, assuming path came from standard llvm path.
+ // Leave "llvm/" prefix to distinguish the following llvm revision from the
+ // clang revision.
+ size_t Start = URL.find("llvm/");
+ if (Start != StringRef::npos)
+ URL = URL.substr(Start);
+
+ return URL;
+}
+
+std::string getClangRevision() {
+#ifdef SVN_REVISION
+ return SVN_REVISION;
+#else
+ return "";
+#endif
+}
+
+std::string getLLVMRevision() {
+#ifdef LLVM_REVISION
+ return LLVM_REVISION;
+#else
+ return "";
+#endif
+}
+
+std::string getClangFullRepositoryVersion() {
+ std::string buf;
+ llvm::raw_string_ostream OS(buf);
+ std::string Path = getClangRepositoryPath();
+ std::string Revision = getClangRevision();
+ if (!Path.empty() || !Revision.empty()) {
+ OS << '(';
+ if (!Path.empty())
+ OS << Path;
+ if (!Revision.empty()) {
+ if (!Path.empty())
+ OS << ' ';
+ OS << Revision;
+ }
+ OS << ')';
+ }
+ // Support LLVM in a separate repository.
+ std::string LLVMRev = getLLVMRevision();
+ if (!LLVMRev.empty() && LLVMRev != Revision) {
+ OS << " (";
+ std::string LLVMRepo = getLLVMRepositoryPath();
+ if (!LLVMRepo.empty())
+ OS << LLVMRepo << ' ';
+ OS << LLVMRev << ')';
+ }
+ return OS.str();
+}
+
+std::string getClangFullVersion() {
+ std::string buf;
+ llvm::raw_string_ostream OS(buf);
+#ifdef CLANG_VENDOR
+ OS << CLANG_VENDOR;
+#endif
+ OS << "clang version " CLANG_VERSION_STRING " "
+ << getClangFullRepositoryVersion();
+
+#ifdef CLANG_VENDOR_SUFFIX
+ OS << CLANG_VENDOR_SUFFIX;
+#elif defined(CLANG_VENDOR)
+ // If vendor supplied, include the base LLVM version as well.
+ OS << " (based on LLVM " << PACKAGE_VERSION << ")";
+#endif
+
+ return OS.str();
+}
+
+std::string getClangFullCPPVersion() {
+ // The version string we report in __VERSION__ is just a compacted version of
+ // the one we report on the command line.
+ std::string buf;
+ llvm::raw_string_ostream OS(buf);
+#ifdef CLANG_VENDOR
+ OS << CLANG_VENDOR;
+#endif
+ OS << "Clang " CLANG_VERSION_STRING " ("
+ << getClangFullRepositoryVersion() << ')';
+ return OS.str();
+}
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp b/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp
new file mode 100644
index 0000000..77aad39
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Basic/VersionTuple.cpp
@@ -0,0 +1,36 @@
+//===- VersionTuple.cpp - Version Number Handling ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the VersionTuple class, which represents a version in
+// the form major[.minor[.subminor]].
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/VersionTuple.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+std::string VersionTuple::getAsString() const {
+ std::string Result;
+ {
+ llvm::raw_string_ostream Out(Result);
+ Out << *this;
+ }
+ return Result;
+}
+
+raw_ostream& clang::operator<<(raw_ostream &Out,
+ const VersionTuple &V) {
+ Out << V.getMajor();
+ if (llvm::Optional<unsigned> Minor = V.getMinor())
+ Out << '.' << *Minor;
+ if (llvm::Optional<unsigned> Subminor = V.getSubminor())
+ Out << '.' << *Subminor;
+ return Out;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
new file mode 100644
index 0000000..2853bc8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ABIInfo.h
@@ -0,0 +1,181 @@
+//===----- ABIInfo.h - ABI information access & encapsulation ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_ABIINFO_H
+#define CLANG_CODEGEN_ABIINFO_H
+
+#include "clang/AST/Type.h"
+#include "llvm/Type.h"
+
+namespace llvm {
+ class Value;
+ class LLVMContext;
+ class TargetData;
+}
+
+namespace clang {
+ class ASTContext;
+
+ namespace CodeGen {
+ class CGFunctionInfo;
+ class CodeGenFunction;
+ class CodeGenTypes;
+ }
+
+ // FIXME: All of this stuff should be part of the target interface
+ // somehow. It is currently here because it is not clear how to factor
+ // the targets to support this, since the Targets currently live in a
+ // layer below types n'stuff.
+
+ /// ABIArgInfo - Helper class to encapsulate information about how a
+ /// specific C type should be passed to or returned from a function.
+ class ABIArgInfo {
+ public:
+ enum Kind {
+ /// Direct - Pass the argument directly using the normal converted LLVM
+ /// type, or by coercing to another specified type stored in
+ /// 'CoerceToType'). If an offset is specified (in UIntData), then the
+ /// argument passed is offset by some number of bytes in the memory
+ /// representation. A dummy argument is emitted before the real argument
+ /// if the specified type stored in "PaddingType" is not zero.
+ Direct,
+
+ /// Extend - Valid only for integer argument types. Same as 'direct'
+ /// but also emit a zero/sign extension attribute.
+ Extend,
+
+ /// Indirect - Pass the argument indirectly via a hidden pointer
+ /// with the specified alignment (0 indicates default alignment).
+ Indirect,
+
+ /// Ignore - Ignore the argument (treat as void). Useful for void and
+ /// empty structs.
+ Ignore,
+
+ /// Expand - Only valid for aggregate argument types. The structure should
+ /// be expanded into consecutive arguments for its constituent fields.
+ /// Currently expand is only allowed on structures whose fields
+ /// are all scalar types or are themselves expandable types.
+ Expand,
+
+ KindFirst=Direct, KindLast=Expand
+ };
+
+ private:
+ Kind TheKind;
+ llvm::Type *TypeData;
+ llvm::Type *PaddingType; // Currently allowed only for Direct.
+ unsigned UIntData;
+ bool BoolData0;
+ bool BoolData1;
+
+ ABIArgInfo(Kind K, llvm::Type *TD=0, unsigned UI=0,
+ bool B0 = false, bool B1 = false, llvm::Type* P = 0)
+ : TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
+ BoolData1(B1) {}
+
+ public:
+ ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
+
+ static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
+ llvm::Type *Padding = 0) {
+ return ABIArgInfo(Direct, T, Offset, false, false, Padding);
+ }
+ static ABIArgInfo getExtend(llvm::Type *T = 0) {
+ return ABIArgInfo(Extend, T, 0);
+ }
+ static ABIArgInfo getIgnore() {
+ return ABIArgInfo(Ignore);
+ }
+ static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
+ , bool Realign = false) {
+ return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign);
+ }
+ static ABIArgInfo getExpand() {
+ return ABIArgInfo(Expand);
+ }
+
+ Kind getKind() const { return TheKind; }
+ bool isDirect() const { return TheKind == Direct; }
+ bool isExtend() const { return TheKind == Extend; }
+ bool isIgnore() const { return TheKind == Ignore; }
+ bool isIndirect() const { return TheKind == Indirect; }
+ bool isExpand() const { return TheKind == Expand; }
+
+ bool canHaveCoerceToType() const {
+ return TheKind == Direct || TheKind == Extend;
+ }
+
+ // Direct/Extend accessors
+ unsigned getDirectOffset() const {
+ assert((isDirect() || isExtend()) && "Not a direct or extend kind");
+ return UIntData;
+ }
+
+ llvm::Type *getPaddingType() const {
+ return PaddingType;
+ }
+
+ llvm::Type *getCoerceToType() const {
+ assert(canHaveCoerceToType() && "Invalid kind!");
+ return TypeData;
+ }
+
+ void setCoerceToType(llvm::Type *T) {
+ assert(canHaveCoerceToType() && "Invalid kind!");
+ TypeData = T;
+ }
+
+ // Indirect accessors
+ unsigned getIndirectAlign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return UIntData;
+ }
+
+ bool getIndirectByVal() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData0;
+ }
+
+ bool getIndirectRealign() const {
+ assert(TheKind == Indirect && "Invalid kind!");
+ return BoolData1;
+ }
+
+ void dump() const;
+ };
+
+ /// ABIInfo - Target specific hooks for defining how a type should be
+ /// passed or returned from functions.
+ class ABIInfo {
+ public:
+ CodeGen::CodeGenTypes &CGT;
+
+ ABIInfo(CodeGen::CodeGenTypes &cgt) : CGT(cgt) {}
+ virtual ~ABIInfo();
+
+ ASTContext &getContext() const;
+ llvm::LLVMContext &getVMContext() const;
+ const llvm::TargetData &getTargetData() const;
+
+ virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
+
+ /// EmitVAArg - Emit the target dependent code to load a value of
+ /// \arg Ty from the va_list pointed to by \arg VAListAddr.
+
+ // FIXME: This is a gaping layering violation if we wanted to drop
+ // the ABI information any lower than CodeGen. Of course, for
+ // VAArg handling it has to be at this level; there is no way to
+ // abstract this out.
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGen::CodeGenFunction &CGF) const = 0;
+ };
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
new file mode 100644
index 0000000..2f44711
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/BackendUtil.cpp
@@ -0,0 +1,460 @@
+//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Transforms/Instrumentation.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Scalar.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class EmitAssemblyHelper {
+ DiagnosticsEngine &Diags;
+ const CodeGenOptions &CodeGenOpts;
+ const clang::TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
+ Module *TheModule;
+
+ Timer CodeGenerationTime;
+
+ mutable PassManager *CodeGenPasses;
+ mutable PassManager *PerModulePasses;
+ mutable FunctionPassManager *PerFunctionPasses;
+
+private:
+ PassManager *getCodeGenPasses() const {
+ if (!CodeGenPasses) {
+ CodeGenPasses = new PassManager();
+ CodeGenPasses->add(new TargetData(TheModule));
+ }
+ return CodeGenPasses;
+ }
+
+ PassManager *getPerModulePasses() const {
+ if (!PerModulePasses) {
+ PerModulePasses = new PassManager();
+ PerModulePasses->add(new TargetData(TheModule));
+ }
+ return PerModulePasses;
+ }
+
+ FunctionPassManager *getPerFunctionPasses() const {
+ if (!PerFunctionPasses) {
+ PerFunctionPasses = new FunctionPassManager(TheModule);
+ PerFunctionPasses->add(new TargetData(TheModule));
+ }
+ return PerFunctionPasses;
+ }
+
+ void CreatePasses();
+
+ /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
+ ///
+ /// \return True on success.
+ bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
+
+public:
+ EmitAssemblyHelper(DiagnosticsEngine &_Diags,
+ const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts,
+ Module *M)
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
+ TheModule(M), CodeGenerationTime("Code Generation Time"),
+ CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {}
+
+ ~EmitAssemblyHelper() {
+ delete CodeGenPasses;
+ delete PerModulePasses;
+ delete PerFunctionPasses;
+ }
+
+ void EmitAssembly(BackendAction Action, raw_ostream *OS);
+};
+
+}
+
+static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCAPElimPass());
+}
+
+static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCExpandPass());
+}
+
+static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
+ if (Builder.OptLevel > 0)
+ PM.add(createObjCARCOptPass());
+}
+
+static void addAddressSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createAddressSanitizerPass());
+}
+
+static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ PM.add(createThreadSanitizerPass());
+}
+
+void EmitAssemblyHelper::CreatePasses() {
+ unsigned OptLevel = CodeGenOpts.OptimizationLevel;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
+
+ // Handle disabling of LLVM optimization, where we want to preserve the
+ // internal module before any optimization.
+ if (CodeGenOpts.DisableLLVMOpts) {
+ OptLevel = 0;
+ Inlining = CodeGenOpts.NoInlining;
+ }
+
+ PassManagerBuilder PMBuilder;
+ PMBuilder.OptLevel = OptLevel;
+ PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
+
+ PMBuilder.DisableSimplifyLibCalls = !CodeGenOpts.SimplifyLibCalls;
+ PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
+ PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+
+ // In ObjC ARC mode, add the main ARC optimization passes.
+ if (LangOpts.ObjCAutoRefCount) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addObjCARCExpandPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
+ addObjCARCAPElimPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addObjCARCOptPass);
+ }
+
+ if (LangOpts.AddressSanitizer) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
+ addAddressSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addAddressSanitizerPass);
+ }
+
+ if (LangOpts.ThreadSanitizer) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addThreadSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addThreadSanitizerPass);
+ }
+
+ // Figure out TargetLibraryInfo.
+ Triple TargetTriple(TheModule->getTargetTriple());
+ PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
+ if (!CodeGenOpts.SimplifyLibCalls)
+ PMBuilder.LibraryInfo->disableAllFunctions();
+
+ switch (Inlining) {
+ case CodeGenOptions::NoInlining: break;
+ case CodeGenOptions::NormalInlining: {
+ // FIXME: Derive these constants in a principled fashion.
+ unsigned Threshold = 225;
+ if (CodeGenOpts.OptimizeSize == 1) // -Os
+ Threshold = 75;
+ else if (CodeGenOpts.OptimizeSize == 2) // -Oz
+ Threshold = 25;
+ else if (OptLevel > 2)
+ Threshold = 275;
+ PMBuilder.Inliner = createFunctionInliningPass(Threshold);
+ break;
+ }
+ case CodeGenOptions::OnlyAlwaysInlining:
+ // Respect always_inline.
+ if (OptLevel == 0)
+ // Do not insert lifetime intrinsics at -O0.
+ PMBuilder.Inliner = createAlwaysInlinerPass(false);
+ else
+ PMBuilder.Inliner = createAlwaysInlinerPass();
+ break;
+ }
+
+
+ // Set up the per-function pass manager.
+ FunctionPassManager *FPM = getPerFunctionPasses();
+ if (CodeGenOpts.VerifyModule)
+ FPM->add(createVerifierPass());
+ PMBuilder.populateFunctionPassManager(*FPM);
+
+ // Set up the per-module pass manager.
+ PassManager *MPM = getPerModulePasses();
+
+ if (CodeGenOpts.EmitGcovArcs || CodeGenOpts.EmitGcovNotes) {
+ MPM->add(createGCOVProfilerPass(CodeGenOpts.EmitGcovNotes,
+ CodeGenOpts.EmitGcovArcs,
+ TargetTriple.isMacOSX()));
+
+ if (!CodeGenOpts.DebugInfo)
+ MPM->add(createStripSymbolsPass(true));
+ }
+
+
+ PMBuilder.populateModulePassManager(*MPM);
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ formatted_raw_ostream &OS) {
+ // Create the TargetMachine for generating code.
+ std::string Error;
+ std::string Triple = TheModule->getTargetTriple();
+ const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
+ if (!TheTarget) {
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return false;
+ }
+
+ // FIXME: Expose these capabilities via actual APIs!!!! Aside from just
+ // being gross, this is also totally broken if we ever care about
+ // concurrency.
+
+ TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
+
+ TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
+ TargetMachine::setDataSections (CodeGenOpts.DataSections);
+
+ // FIXME: Parse this earlier.
+ llvm::CodeModel::Model CM;
+ if (CodeGenOpts.CodeModel == "small") {
+ CM = llvm::CodeModel::Small;
+ } else if (CodeGenOpts.CodeModel == "kernel") {
+ CM = llvm::CodeModel::Kernel;
+ } else if (CodeGenOpts.CodeModel == "medium") {
+ CM = llvm::CodeModel::Medium;
+ } else if (CodeGenOpts.CodeModel == "large") {
+ CM = llvm::CodeModel::Large;
+ } else {
+ assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
+ CM = llvm::CodeModel::Default;
+ }
+
+ SmallVector<const char *, 16> BackendArgs;
+ BackendArgs.push_back("clang"); // Fake program name.
+ if (!CodeGenOpts.DebugPass.empty()) {
+ BackendArgs.push_back("-debug-pass");
+ BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
+ }
+ if (!CodeGenOpts.LimitFloatPrecision.empty()) {
+ BackendArgs.push_back("-limit-float-precision");
+ BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
+ }
+ if (llvm::TimePassesIsEnabled)
+ BackendArgs.push_back("-time-passes");
+ for (unsigned i = 0, e = CodeGenOpts.BackendOptions.size(); i != e; ++i)
+ BackendArgs.push_back(CodeGenOpts.BackendOptions[i].c_str());
+ if (CodeGenOpts.NoGlobalMerge)
+ BackendArgs.push_back("-global-merge=false");
+ BackendArgs.push_back(0);
+ llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
+ BackendArgs.data());
+
+ std::string FeaturesStr;
+ if (TargetOpts.Features.size()) {
+ SubtargetFeatures Features;
+ for (std::vector<std::string>::const_iterator
+ it = TargetOpts.Features.begin(),
+ ie = TargetOpts.Features.end(); it != ie; ++it)
+ Features.AddFeature(*it);
+ FeaturesStr = Features.getString();
+ }
+
+ llvm::Reloc::Model RM = llvm::Reloc::Default;
+ if (CodeGenOpts.RelocationModel == "static") {
+ RM = llvm::Reloc::Static;
+ } else if (CodeGenOpts.RelocationModel == "pic") {
+ RM = llvm::Reloc::PIC_;
+ } else {
+ assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
+ "Invalid PIC model!");
+ RM = llvm::Reloc::DynamicNoPIC;
+ }
+
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ llvm::TargetOptions Options;
+
+ // Set frame pointer elimination mode.
+ if (!CodeGenOpts.DisableFPElim) {
+ Options.NoFramePointerElim = false;
+ Options.NoFramePointerElimNonLeaf = false;
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ Options.NoFramePointerElim = false;
+ Options.NoFramePointerElimNonLeaf = true;
+ } else {
+ Options.NoFramePointerElim = true;
+ Options.NoFramePointerElimNonLeaf = true;
+ }
+
+ // Set float ABI type.
+ if (CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp")
+ Options.FloatABIType = llvm::FloatABI::Soft;
+ else if (CodeGenOpts.FloatABI == "hard")
+ Options.FloatABIType = llvm::FloatABI::Hard;
+ else {
+ assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
+ Options.FloatABIType = llvm::FloatABI::Default;
+ }
+
+ Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
+ Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
+ Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+ Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+ Options.UseSoftFloat = CodeGenOpts.SoftFloat;
+ Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
+ Options.RealignStack = CodeGenOpts.StackRealignment;
+ Options.DisableTailCalls = CodeGenOpts.DisableTailCalls;
+ Options.TrapFuncName = CodeGenOpts.TrapFuncName;
+ Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
+
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
+ FeaturesStr, Options,
+ RM, CM, OptLevel);
+
+ if (CodeGenOpts.RelaxAll)
+ TM->setMCRelaxAll(true);
+ if (CodeGenOpts.SaveTempLabels)
+ TM->setMCSaveTempLabels(true);
+ if (CodeGenOpts.NoDwarf2CFIAsm)
+ TM->setMCUseCFI(false);
+ if (!CodeGenOpts.NoDwarfDirectoryAsm)
+ TM->setMCUseDwarfDirectory(true);
+ if (CodeGenOpts.NoExecStack)
+ TM->setMCNoExecStack(true);
+
+ // Create the code generator passes.
+ PassManager *PM = getCodeGenPasses();
+
+ // Add LibraryInfo.
+ TargetLibraryInfo *TLI = new TargetLibraryInfo();
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ PM->add(TLI);
+
+ // Normal mode, emit a .s or .o file by running the code generator. Note,
+ // this also adds codegenerator level optimization passes.
+ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
+ if (Action == Backend_EmitObj)
+ CGFT = TargetMachine::CGFT_ObjectFile;
+ else if (Action == Backend_EmitMCNull)
+ CGFT = TargetMachine::CGFT_Null;
+ else
+ assert(Action == Backend_EmitAssembly && "Invalid action!");
+
+ // Add ObjC ARC final-cleanup optimizations. This is done as part of the
+ // "codegen" passes so that it isn't run multiple times when there is
+ // inlining happening.
+ if (LangOpts.ObjCAutoRefCount &&
+ CodeGenOpts.OptimizationLevel > 0)
+ PM->add(createObjCARCContractPass());
+
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT,
+ /*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
+ Diags.Report(diag::err_fe_unable_to_interface_with_target);
+ return false;
+ }
+
+ return true;
+}
+
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
+ llvm::formatted_raw_ostream FormattedOS;
+
+ CreatePasses();
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ getPerModulePasses()->add(createBitcodeWriterPass(*OS));
+ break;
+
+ case Backend_EmitLL:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ getPerModulePasses()->add(createPrintModulePass(&FormattedOS));
+ break;
+
+ default:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ if (!AddEmitPasses(Action, FormattedOS))
+ return;
+ }
+
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ // Run passes. For now we do all passes at once, but eventually we
+ // would like to have the option of streaming code generation.
+
+ if (PerFunctionPasses) {
+ PrettyStackTraceString CrashInfo("Per-function optimization");
+
+ PerFunctionPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ PerFunctionPasses->run(*I);
+ PerFunctionPasses->doFinalization();
+ }
+
+ if (PerModulePasses) {
+ PrettyStackTraceString CrashInfo("Per-module optimization passes");
+ PerModulePasses->run(*TheModule);
+ }
+
+ if (CodeGenPasses) {
+ PrettyStackTraceString CrashInfo("Code generation");
+ CodeGenPasses->run(*TheModule);
+ }
+}
+
+void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
+ const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts,
+ Module *M,
+ BackendAction Action, raw_ostream *OS) {
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M);
+
+ AsmHelper.EmitAssembly(Action, OS);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
new file mode 100644
index 0000000..27bb4ef
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.cpp
@@ -0,0 +1,2042 @@
+//===--- CGBlocks.cpp - Emit LLVM Code for declarations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit blocks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CGBlocks.h"
+#include "clang/AST/DeclObjC.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Target/TargetData.h"
+#include <algorithm>
+
+using namespace clang;
+using namespace CodeGen;
+
+CGBlockInfo::CGBlockInfo(const BlockDecl *block, StringRef name)
+ : Name(name), CXXThisIndex(0), CanBeGlobal(false), NeedsCopyDispose(false),
+ HasCXXObject(false), UsesStret(false), StructureType(0), Block(block),
+ DominatingIP(0) {
+
+ // Skip asm prefix, if any. 'name' is usually taken directly from
+ // the mangled name of the enclosing function.
+ if (!name.empty() && name[0] == '\01')
+ name = name.substr(1);
+}
+
+// Anchor the vtable to this translation unit.
+CodeGenModule::ByrefHelpers::~ByrefHelpers() {}
+
+/// Build the given block as a global block.
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn);
+
+/// Build the helper function to copy a block.
+static llvm::Constant *buildCopyHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateCopyHelperFunction(blockInfo);
+}
+
+/// Build the helper function to dipose of a block.
+static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return CodeGenFunction(CGM).GenerateDestroyHelperFunction(blockInfo);
+}
+
+/// Build the block descriptor constant for a block.
+static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ ASTContext &C = CGM.getContext();
+
+ llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
+ llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+
+ SmallVector<llvm::Constant*, 6> elements;
+
+ // reserved
+ elements.push_back(llvm::ConstantInt::get(ulong, 0));
+
+ // Size
+ // FIXME: What is the right way to say this doesn't fit? We should give
+ // a user diagnostic in that case. Better fix would be to change the
+ // API to size_t.
+ elements.push_back(llvm::ConstantInt::get(ulong,
+ blockInfo.BlockSize.getQuantity()));
+
+ // Optional copy/dispose helpers.
+ if (blockInfo.NeedsCopyDispose) {
+ // copy_func_helper_decl
+ elements.push_back(buildCopyHelper(CGM, blockInfo));
+
+ // destroy_func_decl
+ elements.push_back(buildDisposeHelper(CGM, blockInfo));
+ }
+
+ // Signature. Mandatory ObjC-style method descriptor @encode sequence.
+ std::string typeAtEncoding =
+ CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
+ elements.push_back(llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(typeAtEncoding), i8p));
+
+ // GC layout.
+ if (C.getLangOpts().ObjC1)
+ elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ else
+ elements.push_back(llvm::Constant::getNullValue(i8p));
+
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(elements);
+
+ llvm::GlobalVariable *global =
+ new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
+ llvm::GlobalValue::InternalLinkage,
+ init, "__block_descriptor_tmp");
+
+ return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
+}
+
+/*
+ Purely notional variadic template describing the layout of a block.
+
+ template <class _ResultType, class... _ParamTypes, class... _CaptureTypes>
+ struct Block_literal {
+ /// Initialized to one of:
+ /// extern void *_NSConcreteStackBlock[];
+ /// extern void *_NSConcreteGlobalBlock[];
+ ///
+ /// In theory, we could start one off malloc'ed by setting
+ /// BLOCK_NEEDS_FREE, giving it a refcount of 1, and using
+ /// this isa:
+ /// extern void *_NSConcreteMallocBlock[];
+ struct objc_class *isa;
+
+ /// These are the flags (with corresponding bit number) that the
+ /// compiler is actually supposed to know about.
+ /// 25. BLOCK_HAS_COPY_DISPOSE - indicates that the block
+ /// descriptor provides copy and dispose helper functions
+ /// 26. BLOCK_HAS_CXX_OBJ - indicates that there's a captured
+ /// object with a nontrivial destructor or copy constructor
+ /// 28. BLOCK_IS_GLOBAL - indicates that the block is allocated
+ /// as global memory
+ /// 29. BLOCK_USE_STRET - indicates that the block function
+ /// uses stret, which objc_msgSend needs to know about
+ /// 30. BLOCK_HAS_SIGNATURE - indicates that the block has an
+ /// @encoded signature string
+ /// And we're not supposed to manipulate these:
+ /// 24. BLOCK_NEEDS_FREE - indicates that the block has been moved
+ /// to malloc'ed memory
+ /// 27. BLOCK_IS_GC - indicates that the block has been moved to
+ /// to GC-allocated memory
+ /// Additionally, the bottom 16 bits are a reference count which
+ /// should be zero on the stack.
+ int flags;
+
+ /// Reserved; should be zero-initialized.
+ int reserved;
+
+ /// Function pointer generated from block literal.
+ _ResultType (*invoke)(Block_literal *, _ParamTypes...);
+
+ /// Block description metadata generated from block literal.
+ struct Block_descriptor *block_descriptor;
+
+ /// Captured values follow.
+ _CapturesTypes captures...;
+ };
+ */
+
+/// The number of fields in a block header.
+const unsigned BlockHeaderSize = 5;
+
+namespace {
+ /// A chunk of data that we actually have to capture in the block.
+ struct BlockLayoutChunk {
+ CharUnits Alignment;
+ CharUnits Size;
+ const BlockDecl::Capture *Capture; // null for 'this'
+ llvm::Type *Type;
+
+ BlockLayoutChunk(CharUnits align, CharUnits size,
+ const BlockDecl::Capture *capture,
+ llvm::Type *type)
+ : Alignment(align), Size(size), Capture(capture), Type(type) {}
+
+ /// Tell the block info that this chunk has the given field index.
+ void setIndex(CGBlockInfo &info, unsigned index) {
+ if (!Capture)
+ info.CXXThisIndex = index;
+ else
+ info.Captures[Capture->getVariable()]
+ = CGBlockInfo::Capture::makeIndex(index);
+ }
+ };
+
+ /// Order by descending alignment.
+ bool operator<(const BlockLayoutChunk &left, const BlockLayoutChunk &right) {
+ return left.Alignment > right.Alignment;
+ }
+}
+
+/// Determines if the given type is safe for constant capture in C++.
+static bool isSafeForCXXConstantCapture(QualType type) {
+ const RecordType *recordType =
+ type->getBaseElementTypeUnsafe()->getAs<RecordType>();
+
+ // Only records can be unsafe.
+ if (!recordType) return true;
+
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(recordType->getDecl());
+
+ // Maintain semantics for classes with non-trivial dtors or copy ctors.
+ if (!record->hasTrivialDestructor()) return false;
+ if (!record->hasTrivialCopyConstructor()) return false;
+
+ // Otherwise, we just have to make sure there aren't any mutable
+ // fields that might have changed since initialization.
+ return !record->hasMutableFields();
+}
+
+/// It is illegal to modify a const object after initialization.
+/// Therefore, if a const object has a constant initializer, we don't
+/// actually need to keep storage for it in the block; we'll just
+/// rematerialize it at the start of the block function. This is
+/// acceptable because we make no promises about address stability of
+/// captured variables.
+static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ const VarDecl *var) {
+ QualType type = var->getType();
+
+ // We can only do this if the variable is const.
+ if (!type.isConstQualified()) return 0;
+
+ // Furthermore, in C++ we have to worry about mutable fields:
+ // C++ [dcl.type.cv]p4:
+ // Except that any class member declared mutable can be
+ // modified, any attempt to modify a const object during its
+ // lifetime results in undefined behavior.
+ if (CGM.getLangOpts().CPlusPlus && !isSafeForCXXConstantCapture(type))
+ return 0;
+
+ // If the variable doesn't have any initializer (shouldn't this be
+ // invalid?), it's not clear what we should do. Maybe capture as
+ // zero?
+ const Expr *init = var->getInit();
+ if (!init) return 0;
+
+ return CGM.EmitConstantInit(*var, CGF);
+}
+
+/// Get the low bit of a nonzero character count. This is the
+/// alignment of the nth byte if the 0th byte is universally aligned.
+static CharUnits getLowBit(CharUnits v) {
+ return CharUnits::fromQuantity(v.getQuantity() & (~v.getQuantity() + 1));
+}
+
+static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
+ SmallVectorImpl<llvm::Type*> &elementTypes) {
+ ASTContext &C = CGM.getContext();
+
+ // The header is basically a 'struct { void *; int; int; void *; void *; }'.
+ CharUnits ptrSize, ptrAlign, intSize, intAlign;
+ llvm::tie(ptrSize, ptrAlign) = C.getTypeInfoInChars(C.VoidPtrTy);
+ llvm::tie(intSize, intAlign) = C.getTypeInfoInChars(C.IntTy);
+
+ // Are there crazy embedded platforms where this isn't true?
+ assert(intSize <= ptrSize && "layout assumptions horribly violated");
+
+ CharUnits headerSize = ptrSize;
+ if (2 * intSize < ptrAlign) headerSize += ptrSize;
+ else headerSize += 2 * intSize;
+ headerSize += 2 * ptrSize;
+
+ info.BlockAlign = ptrAlign;
+ info.BlockSize = headerSize;
+
+ assert(elementTypes.empty());
+ llvm::Type *i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ llvm::Type *intTy = CGM.getTypes().ConvertType(C.IntTy);
+ elementTypes.push_back(i8p);
+ elementTypes.push_back(intTy);
+ elementTypes.push_back(intTy);
+ elementTypes.push_back(i8p);
+ elementTypes.push_back(CGM.getBlockDescriptorType());
+
+ assert(elementTypes.size() == BlockHeaderSize);
+}
+
+/// Compute the layout of the given block. Attempts to lay the block
+/// out with minimal space requirements.
+static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
+ CGBlockInfo &info) {
+ ASTContext &C = CGM.getContext();
+ const BlockDecl *block = info.getBlockDecl();
+
+ SmallVector<llvm::Type*, 8> elementTypes;
+ initializeForBlockHeader(CGM, info, elementTypes);
+
+ if (!block->hasCaptures()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
+
+ // Collect the layout chunks.
+ SmallVector<BlockLayoutChunk, 16> layout;
+ layout.reserve(block->capturesCXXThis() +
+ (block->capture_end() - block->capture_begin()));
+
+ CharUnits maxFieldAlign;
+
+ // First, 'this'.
+ if (block->capturesCXXThis()) {
+ const DeclContext *DC = block->getDeclContext();
+ for (; isa<BlockDecl>(DC); DC = cast<BlockDecl>(DC)->getDeclContext())
+ ;
+ QualType thisType;
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC))
+ thisType = C.getPointerType(C.getRecordType(RD));
+ else
+ thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
+
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(thisType);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first, 0, llvmType));
+ }
+
+ // Next, all the block captures.
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+
+ if (ci->isByRef()) {
+ // We have to copy/dispose of the __block reference.
+ info.NeedsCopyDispose = true;
+
+ // Just use void* instead of a pointer to the byref type.
+ QualType byRefPtrTy = C.VoidPtrTy;
+
+ llvm::Type *llvmType = CGM.getTypes().ConvertType(byRefPtrTy);
+ std::pair<CharUnits,CharUnits> tinfo
+ = CGM.getContext().getTypeInfoInChars(byRefPtrTy);
+ maxFieldAlign = std::max(maxFieldAlign, tinfo.second);
+
+ layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
+ &*ci, llvmType));
+ continue;
+ }
+
+ // Otherwise, build a layout chunk with the size and alignment of
+ // the declaration.
+ if (llvm::Constant *constant = tryCaptureAsConstant(CGM, CGF, variable)) {
+ info.Captures[variable] = CGBlockInfo::Capture::makeConstant(constant);
+ continue;
+ }
+
+ // If we have a lifetime qualifier, honor it for capture purposes.
+ // That includes *not* copying it if it's __unsafe_unretained.
+ if (Qualifiers::ObjCLifetime lifetime
+ = variable->getType().getObjCLifetime()) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ info.NeedsCopyDispose = true;
+ }
+
+ // Block pointers require copy/dispose. So do Objective-C pointers.
+ } else if (variable->getType()->isObjCRetainableType()) {
+ info.NeedsCopyDispose = true;
+
+ // So do types that require non-trivial copy construction.
+ } else if (ci->hasCopyExpr()) {
+ info.NeedsCopyDispose = true;
+ info.HasCXXObject = true;
+
+ // And so do types with destructors.
+ } else if (CGM.getLangOpts().CPlusPlus) {
+ if (const CXXRecordDecl *record =
+ variable->getType()->getAsCXXRecordDecl()) {
+ if (!record->hasTrivialDestructor()) {
+ info.HasCXXObject = true;
+ info.NeedsCopyDispose = true;
+ }
+ }
+ }
+
+ QualType VT = variable->getType();
+ CharUnits size = C.getTypeSizeInChars(VT);
+ CharUnits align = C.getDeclAlign(variable);
+
+ maxFieldAlign = std::max(maxFieldAlign, align);
+
+ llvm::Type *llvmType =
+ CGM.getTypes().ConvertTypeForMem(VT);
+
+ layout.push_back(BlockLayoutChunk(align, size, &*ci, llvmType));
+ }
+
+ // If that was everything, we're done here.
+ if (layout.empty()) {
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+ info.CanBeGlobal = true;
+ return;
+ }
+
+ // Sort the layout by alignment. We have to use a stable sort here
+ // to get reproducible results. There should probably be an
+ // llvm::array_pod_stable_sort.
+ std::stable_sort(layout.begin(), layout.end());
+
+ CharUnits &blockSize = info.BlockSize;
+ info.BlockAlign = std::max(maxFieldAlign, info.BlockAlign);
+
+ // Assuming that the first byte in the header is maximally aligned,
+ // get the alignment of the first byte following the header.
+ CharUnits endAlign = getLowBit(blockSize);
+
+ // If the end of the header isn't satisfactorily aligned for the
+ // maximum thing, look for things that are okay with the header-end
+ // alignment, and keep appending them until we get something that's
+ // aligned right. This algorithm is only guaranteed optimal if
+ // that condition is satisfied at some point; otherwise we can get
+ // things like:
+ // header // next byte has alignment 4
+ // something_with_size_5; // next byte has alignment 1
+ // something_with_alignment_8;
+ // which has 7 bytes of padding, as opposed to the naive solution
+ // which might have less (?).
+ if (endAlign < maxFieldAlign) {
+ SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin() + 1, le = layout.end();
+
+ // Look for something that the header end is already
+ // satisfactorily aligned for.
+ for (; li != le && endAlign < li->Alignment; ++li)
+ ;
+
+ // If we found something that's naturally aligned for the end of
+ // the header, keep adding things...
+ if (li != le) {
+ SmallVectorImpl<BlockLayoutChunk>::iterator first = li;
+ for (; li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+
+ li->setIndex(info, elementTypes.size());
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+
+ // ...until we get to the alignment of the maximum field.
+ if (endAlign >= maxFieldAlign)
+ break;
+ }
+
+ // Don't re-append everything we just appended.
+ layout.erase(first, li);
+ }
+ }
+
+ // At this point, we just have to add padding if the end align still
+ // isn't aligned right.
+ if (endAlign < maxFieldAlign) {
+ CharUnits padding = maxFieldAlign - endAlign;
+
+ elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
+ padding.getQuantity()));
+ blockSize += padding;
+
+ endAlign = getLowBit(blockSize);
+ assert(endAlign >= maxFieldAlign);
+ }
+
+ // Slam everything else on now. This works because they have
+ // strictly decreasing alignment and we expect that size is always a
+ // multiple of alignment.
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator
+ li = layout.begin(), le = layout.end(); li != le; ++li) {
+ assert(endAlign >= li->Alignment);
+ li->setIndex(info, elementTypes.size());
+ elementTypes.push_back(li->Type);
+ blockSize += li->Size;
+ endAlign = getLowBit(blockSize);
+ }
+
+ info.StructureType =
+ llvm::StructType::get(CGM.getLLVMContext(), elementTypes, true);
+}
+
+/// Enter the scope of a block. This should be run at the entrance to
+/// a full-expression so that the block's cleanups are pushed at the
+/// right place in the stack.
+static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
+ // Allocate the block info and place it at the head of the list.
+ CGBlockInfo &blockInfo =
+ *new CGBlockInfo(block, CGF.CurFn->getName());
+ blockInfo.NextBlockInfo = CGF.FirstBlockInfo;
+ CGF.FirstBlockInfo = &blockInfo;
+
+ // Compute information about the layout, etc., of this block,
+ // pushing cleanups as necessary.
+ computeBlockInfo(CGF.CGM, &CGF, blockInfo);
+
+ // Nothing else to do if it can be global.
+ if (blockInfo.CanBeGlobal) return;
+
+ // Make the allocation for the block.
+ blockInfo.Address =
+ CGF.CreateTempAlloca(blockInfo.StructureType, "block");
+ blockInfo.Address->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // If there are cleanups to emit, enter them (but inactive).
+ if (!blockInfo.NeedsCopyDispose) return;
+
+ // Walk through the captures (in order) and find the ones not
+ // captured by constant.
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ // Ignore __block captures; there's nothing special in the
+ // on-stack block that we need to do for them.
+ if (ci->isByRef()) continue;
+
+ // Ignore variables that are constant-captured.
+ const VarDecl *variable = ci->getVariable();
+ CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ // Ignore objects that aren't destructed.
+ QualType::DestructionKind dtorKind =
+ variable->getType().isDestructedType();
+ if (dtorKind == QualType::DK_none) continue;
+
+ CodeGenFunction::Destroyer *destroyer;
+
+ // Block captures count as local values and have imprecise semantics.
+ // They also can't be arrays, so need to worry about that.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ } else {
+ destroyer = CGF.getDestroyer(dtorKind);
+ }
+
+ // GEP down to the address.
+ llvm::Value *addr = CGF.Builder.CreateStructGEP(blockInfo.Address,
+ capture.getIndex());
+
+ // We can use that GEP as the dominating IP.
+ if (!blockInfo.DominatingIP)
+ blockInfo.DominatingIP = cast<llvm::Instruction>(addr);
+
+ CleanupKind cleanupKind = InactiveNormalCleanup;
+ bool useArrayEHCleanup = CGF.needsEHCleanup(dtorKind);
+ if (useArrayEHCleanup)
+ cleanupKind = InactiveNormalAndEHCleanup;
+
+ CGF.pushDestroy(cleanupKind, addr, variable->getType(),
+ destroyer, useArrayEHCleanup);
+
+ // Remember where that cleanup was.
+ capture.setCleanup(CGF.EHStack.stable_begin());
+ }
+}
+
+/// Enter a full-expression with a non-trivial number of objects to
+/// clean up. This is in this file because, at the moment, the only
+/// kind of cleanup object is a BlockDecl*.
+void CodeGenFunction::enterNonTrivialFullExpression(const ExprWithCleanups *E) {
+ assert(E->getNumObjects() != 0);
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups = E->getObjects();
+ for (ArrayRef<ExprWithCleanups::CleanupObject>::iterator
+ i = cleanups.begin(), e = cleanups.end(); i != e; ++i) {
+ enterBlockScope(*this, *i);
+ }
+}
+
+/// Find the layout for the given block in a linked list and remove it.
+static CGBlockInfo *findAndRemoveBlockInfo(CGBlockInfo **head,
+ const BlockDecl *block) {
+ while (true) {
+ assert(head && *head);
+ CGBlockInfo *cur = *head;
+
+ // If this is the block we're looking for, splice it out of the list.
+ if (cur->getBlockDecl() == block) {
+ *head = cur->NextBlockInfo;
+ return cur;
+ }
+
+ head = &cur->NextBlockInfo;
+ }
+}
+
+/// Destroy a chain of block layouts.
+void CodeGenFunction::destroyBlockInfos(CGBlockInfo *head) {
+ assert(head && "destroying an empty chain");
+ do {
+ CGBlockInfo *cur = head;
+ head = cur->NextBlockInfo;
+ delete cur;
+ } while (head != 0);
+}
+
+/// Emit a block literal expression in the current function.
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
+ // If the block has no captures, we won't have a pre-computed
+ // layout for it.
+ if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
+ computeBlockInfo(CGM, this, blockInfo);
+ blockInfo.BlockExpression = blockExpr;
+ return EmitBlockLiteral(blockInfo);
+ }
+
+ // Find the block info for this block and take ownership of it.
+ OwningPtr<CGBlockInfo> blockInfo;
+ blockInfo.reset(findAndRemoveBlockInfo(&FirstBlockInfo,
+ blockExpr->getBlockDecl()));
+
+ blockInfo->BlockExpression = blockExpr;
+ return EmitBlockLiteral(*blockInfo);
+}
+
+llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
+ // Using the computed layout, generate the actual block function.
+ bool isLambdaConv = blockInfo.getBlockDecl()->isConversionFromLambda();
+ llvm::Constant *blockFn
+ = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, blockInfo,
+ CurFuncDecl, LocalDeclMap,
+ isLambdaConv);
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
+
+ // If there is nothing to capture, we can emit this as a global block.
+ if (blockInfo.CanBeGlobal)
+ return buildGlobalBlock(CGM, blockInfo, blockFn);
+
+ // Otherwise, we have to emit this as a local block.
+
+ llvm::Constant *isa = CGM.getNSConcreteStackBlock();
+ isa = llvm::ConstantExpr::getBitCast(isa, VoidPtrTy);
+
+ // Build the block descriptor.
+ llvm::Constant *descriptor = buildBlockDescriptor(CGM, blockInfo);
+
+ llvm::AllocaInst *blockAddr = blockInfo.Address;
+ assert(blockAddr && "block has no address!");
+
+ // Compute the initial on-stack block flags.
+ BlockFlags flags = BLOCK_HAS_SIGNATURE;
+ if (blockInfo.NeedsCopyDispose) flags |= BLOCK_HAS_COPY_DISPOSE;
+ if (blockInfo.HasCXXObject) flags |= BLOCK_HAS_CXX_OBJ;
+ if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
+
+ // Initialize the block literal.
+ Builder.CreateStore(isa, Builder.CreateStructGEP(blockAddr, 0, "block.isa"));
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ Builder.CreateStructGEP(blockAddr, 1, "block.flags"));
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, 0),
+ Builder.CreateStructGEP(blockAddr, 2, "block.reserved"));
+ Builder.CreateStore(blockFn, Builder.CreateStructGEP(blockAddr, 3,
+ "block.invoke"));
+ Builder.CreateStore(descriptor, Builder.CreateStructGEP(blockAddr, 4,
+ "block.descriptor"));
+
+ // Finally, capture all the values into the block.
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // First, 'this'.
+ if (blockDecl->capturesCXXThis()) {
+ llvm::Value *addr = Builder.CreateStructGEP(blockAddr,
+ blockInfo.CXXThisIndex,
+ "block.captured-this.addr");
+ Builder.CreateStore(LoadCXXThis(), addr);
+ }
+
+ // Next, captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ QualType type = variable->getType();
+
+ // This will be a [[type]]*, except that a byref entry will just be
+ // an i8**.
+ llvm::Value *blockField =
+ Builder.CreateStructGEP(blockAddr, capture.getIndex(),
+ "block.captured");
+
+ // Compute the address of the thing we're going to move into the
+ // block literal.
+ llvm::Value *src;
+ if (ci->isNested()) {
+ // We need to use the capture from the enclosing block.
+ const CGBlockInfo::Capture &enclosingCapture =
+ BlockInfo->getCapture(variable);
+
+ // This is a [[type]]*, except that a byref entry wil just be an i8**.
+ src = Builder.CreateStructGEP(LoadBlockStruct(),
+ enclosingCapture.getIndex(),
+ "block.capture.addr");
+ } else if (blockDecl->isConversionFromLambda()) {
+ // The lambda capture in a lambda's conversion-to-block-pointer is
+ // special; we'll simply emit it directly.
+ src = 0;
+ } else {
+ // This is a [[type]]*.
+ src = LocalDeclMap[variable];
+ }
+
+ // For byrefs, we just write the pointer to the byref struct into
+ // the block field. There's no need to chase the forwarding
+ // pointer at this point, since we're building something that will
+ // live a shorter life than the stack byref anyway.
+ if (ci->isByRef()) {
+ // Get a void* that points to the byref struct.
+ if (ci->isNested())
+ src = Builder.CreateLoad(src, "byref.capture");
+ else
+ src = Builder.CreateBitCast(src, VoidPtrTy);
+
+ // Write that void* into the capture field.
+ Builder.CreateStore(src, blockField);
+
+ // If we have a copy constructor, evaluate that into the block field.
+ } else if (const Expr *copyExpr = ci->getCopyExpr()) {
+ if (blockDecl->isConversionFromLambda()) {
+ // If we have a lambda conversion, emit the expression
+ // directly into the block instead.
+ CharUnits Align = getContext().getTypeAlignInChars(type);
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(blockField, Align, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(copyExpr, Slot);
+ } else {
+ EmitSynthesizedCXXCopyCtor(blockField, src, copyExpr);
+ }
+
+ // If it's a reference variable, copy the reference into the block field.
+ } else if (type->isReferenceType()) {
+ Builder.CreateStore(Builder.CreateLoad(src, "ref.val"), blockField);
+
+ // Otherwise, fake up a POD copy into the block field.
+ } else {
+ // Fake up a new variable so that EmitScalarInit doesn't think
+ // we're referring to the variable in its own initializer.
+ ImplicitParamDecl blockFieldPseudoVar(/*DC*/ 0, SourceLocation(),
+ /*name*/ 0, type);
+
+ // We use one of these or the other depending on whether the
+ // reference is nested.
+ DeclRefExpr declRef(const_cast<VarDecl*>(variable),
+ /*refersToEnclosing*/ ci->isNested(), type,
+ VK_LValue, SourceLocation());
+
+ ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
+ &declRef, VK_RValue);
+ EmitExprAsInit(&l2r, &blockFieldPseudoVar,
+ MakeAddrLValue(blockField, type,
+ getContext().getDeclAlign(variable)),
+ /*captured by init*/ false);
+ }
+
+ // Activate the cleanup if layout pushed one.
+ if (!ci->isByRef()) {
+ EHScopeStack::stable_iterator cleanup = capture.getCleanup();
+ if (cleanup.isValid())
+ ActivateCleanupBlock(cleanup, blockInfo.DominatingIP);
+ }
+ }
+
+ // Cast to the converted block-pointer type, which happens (somewhat
+ // unfortunately) to be a pointer to function type.
+ llvm::Value *result =
+ Builder.CreateBitCast(blockAddr,
+ ConvertType(blockInfo.getBlockExpr()->getType()));
+
+ return result;
+}
+
+
+llvm::Type *CodeGenModule::getBlockDescriptorType() {
+ if (BlockDescriptorType)
+ return BlockDescriptorType;
+
+ llvm::Type *UnsignedLongTy =
+ getTypes().ConvertType(getContext().UnsignedLongTy);
+
+ // struct __block_descriptor {
+ // unsigned long reserved;
+ // unsigned long block_size;
+ //
+ // // later, the following will be added
+ //
+ // struct {
+ // void (*copyHelper)();
+ // void (*copyHelper)();
+ // } helpers; // !!! optional
+ //
+ // const char *signature; // the block signature
+ // const char *layout; // reserved
+ // };
+ BlockDescriptorType =
+ llvm::StructType::create("struct.__block_descriptor",
+ UnsignedLongTy, UnsignedLongTy, NULL);
+
+ // Now form a pointer to that.
+ BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
+ return BlockDescriptorType;
+}
+
+llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
+ if (GenericBlockLiteralType)
+ return GenericBlockLiteralType;
+
+ llvm::Type *BlockDescPtrTy = getBlockDescriptorType();
+
+ // struct __block_literal_generic {
+ // void *__isa;
+ // int __flags;
+ // int __reserved;
+ // void (*__invoke)(void *);
+ // struct __block_descriptor *__descriptor;
+ // };
+ GenericBlockLiteralType =
+ llvm::StructType::create("struct.__block_literal_generic",
+ VoidPtrTy, IntTy, IntTy, VoidPtrTy,
+ BlockDescPtrTy, NULL);
+
+ return GenericBlockLiteralType;
+}
+
+
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
+ ReturnValueSlot ReturnValue) {
+ const BlockPointerType *BPT =
+ E->getCallee()->getType()->getAs<BlockPointerType>();
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+
+ // Get a pointer to the generic block literal.
+ llvm::Type *BlockLiteralTy =
+ llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+
+ // Bitcast the callee to a block literal.
+ llvm::Value *BlockLiteral =
+ Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+
+ // Get the function pointer from the literal.
+ llvm::Value *FuncPtr = Builder.CreateStructGEP(BlockLiteral, 3);
+
+ BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy);
+
+ // Add the block literal.
+ CallArgList Args;
+ Args.add(RValue::get(BlockLiteral), getContext().VoidPtrTy);
+
+ QualType FnType = BPT->getPointeeType();
+
+ // And the rest of the arguments.
+ EmitCallArgs(Args, FnType->getAs<FunctionProtoType>(),
+ E->arg_begin(), E->arg_end());
+
+ // Load the function.
+ llvm::Value *Func = Builder.CreateLoad(FuncPtr);
+
+ const FunctionType *FuncTy = FnType->castAs<FunctionType>();
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FuncTy);
+
+ // Cast the function pointer to the right type.
+ llvm::Type *BlockFTy = CGM.getTypes().GetFunctionType(FnInfo);
+
+ llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
+ Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+
+ // And call the block.
+ return EmitCall(FnInfo, Func, ReturnValue, Args);
+}
+
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
+ bool isByRef) {
+ assert(BlockInfo && "evaluating block ref without block information?");
+ const CGBlockInfo::Capture &capture = BlockInfo->getCapture(variable);
+
+ // Handle constant captures.
+ if (capture.isConstant()) return LocalDeclMap[variable];
+
+ llvm::Value *addr =
+ Builder.CreateStructGEP(LoadBlockStruct(), capture.getIndex(),
+ "block.capture.addr");
+
+ if (isByRef) {
+ // addr should be a void** right now. Load, then cast the result
+ // to byref*.
+
+ addr = Builder.CreateLoad(addr);
+ llvm::PointerType *byrefPointerType
+ = llvm::PointerType::get(BuildByRefType(variable), 0);
+ addr = Builder.CreateBitCast(addr, byrefPointerType,
+ "byref.addr");
+
+ // Follow the forwarding pointer.
+ addr = Builder.CreateStructGEP(addr, 1, "byref.forwarding");
+ addr = Builder.CreateLoad(addr, "byref.addr.forwarded");
+
+ // Cast back to byref* and GEP over to the actual object.
+ addr = Builder.CreateBitCast(addr, byrefPointerType);
+ addr = Builder.CreateStructGEP(addr, getByRefValueLLVMField(variable),
+ variable->getNameAsString());
+ }
+
+ if (variable->getType()->isReferenceType())
+ addr = Builder.CreateLoad(addr, "ref.tmp");
+
+ return addr;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
+ const char *name) {
+ CGBlockInfo blockInfo(blockExpr->getBlockDecl(), name);
+ blockInfo.BlockExpression = blockExpr;
+
+ // Compute information about the layout, etc., of this block.
+ computeBlockInfo(*this, 0, blockInfo);
+
+ // Using that metadata, generate the actual block function.
+ llvm::Constant *blockFn;
+ {
+ llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
+ blockFn = CodeGenFunction(*this).GenerateBlockFunction(GlobalDecl(),
+ blockInfo,
+ 0, LocalDeclMap,
+ false);
+ }
+ blockFn = llvm::ConstantExpr::getBitCast(blockFn, VoidPtrTy);
+
+ return buildGlobalBlock(*this, blockInfo, blockFn);
+}
+
+static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo,
+ llvm::Constant *blockFn) {
+ assert(blockInfo.CanBeGlobal);
+
+ // Generate the constants for the block literal initializer.
+ llvm::Constant *fields[BlockHeaderSize];
+
+ // isa
+ fields[0] = CGM.getNSConcreteGlobalBlock();
+
+ // __flags
+ BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
+ if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
+
+ fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask());
+
+ // Reserved
+ fields[2] = llvm::Constant::getNullValue(CGM.IntTy);
+
+ // Function
+ fields[3] = blockFn;
+
+ // Descriptor
+ fields[4] = buildBlockDescriptor(CGM, blockInfo);
+
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(fields);
+
+ llvm::GlobalVariable *literal =
+ new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ true,
+ llvm::GlobalVariable::InternalLinkage,
+ init,
+ "__block_literal_global");
+ literal->setAlignment(blockInfo.BlockAlign.getQuantity());
+
+ // Return a constant of the appropriately-casted type.
+ llvm::Type *requiredType =
+ CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
+ return llvm::ConstantExpr::getBitCast(literal, requiredType);
+}
+
+llvm::Function *
+CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
+ const CGBlockInfo &blockInfo,
+ const Decl *outerFnDecl,
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock) {
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Check if we should generate debug info for this block function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ BlockInfo = &blockInfo;
+
+ // Arrange for local static and local extern declarations to appear
+ // to be local to this function as well, in case they're directly
+ // referenced in a block.
+ for (DeclMapTy::const_iterator i = ldm.begin(), e = ldm.end(); i != e; ++i) {
+ const VarDecl *var = dyn_cast<VarDecl>(i->first);
+ if (var && !var->hasLocalStorage())
+ LocalDeclMap[var] = i->second;
+ }
+
+ // Begin building the function declaration.
+
+ // Build the argument list.
+ FunctionArgList args;
+
+ // The first argument is the block pointer. Just take it as a void*
+ // and cast it later.
+ QualType selfTy = getContext().VoidPtrTy;
+ IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
+
+ ImplicitParamDecl selfDecl(const_cast<BlockDecl*>(blockDecl),
+ SourceLocation(), II, selfTy);
+ args.push_back(&selfDecl);
+
+ // Now add the rest of the parameters.
+ for (BlockDecl::param_const_iterator i = blockDecl->param_begin(),
+ e = blockDecl->param_end(); i != e; ++i)
+ args.push_back(*i);
+
+ // Create the function declaration.
+ const FunctionProtoType *fnType = blockInfo.getBlockExpr()->getFunctionType();
+ const CGFunctionInfo &fnInfo =
+ CGM.getTypes().arrangeFunctionDeclaration(fnType->getResultType(), args,
+ fnType->getExtInfo(),
+ fnType->isVariadic());
+ if (CGM.ReturnTypeUsesSRet(fnInfo))
+ blockInfo.UsesStret = true;
+
+ llvm::FunctionType *fnLLVMType = CGM.getTypes().GetFunctionType(fnInfo);
+
+ MangleBuffer name;
+ CGM.getBlockMangledName(GD, name, blockDecl);
+ llvm::Function *fn =
+ llvm::Function::Create(fnLLVMType, llvm::GlobalValue::InternalLinkage,
+ name.getString(), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(blockDecl, fn, fnInfo);
+
+ // Begin generating the function.
+ StartFunction(blockDecl, fnType->getResultType(), fn, fnInfo, args,
+ blockInfo.getBlockExpr()->getBody()->getLocStart());
+ CurFuncDecl = outerFnDecl; // StartFunction sets this to blockDecl
+
+ // Okay. Undo some of what StartFunction did.
+
+ // Pull the 'self' reference out of the local decl map.
+ llvm::Value *blockAddr = LocalDeclMap[&selfDecl];
+ LocalDeclMap.erase(&selfDecl);
+ BlockPointer = Builder.CreateBitCast(blockAddr,
+ blockInfo.StructureType->getPointerTo(),
+ "block");
+
+ // If we have a C++ 'this' reference, go ahead and force it into
+ // existence now.
+ if (blockDecl->capturesCXXThis()) {
+ llvm::Value *addr = Builder.CreateStructGEP(BlockPointer,
+ blockInfo.CXXThisIndex,
+ "block.captured-this");
+ CXXThisValue = Builder.CreateLoad(addr, "this");
+ }
+
+ // LoadObjCSelf() expects there to be an entry for 'self' in LocalDeclMap;
+ // appease it.
+ if (const ObjCMethodDecl *method
+ = dyn_cast_or_null<ObjCMethodDecl>(CurFuncDecl)) {
+ const VarDecl *self = method->getSelfDecl();
+
+ // There might not be a capture for 'self', but if there is...
+ if (blockInfo.Captures.count(self)) {
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(self);
+ llvm::Value *selfAddr = Builder.CreateStructGEP(BlockPointer,
+ capture.getIndex(),
+ "block.captured-self");
+ LocalDeclMap[self] = selfAddr;
+ }
+ }
+
+ // Also force all the constant captures.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (!capture.isConstant()) continue;
+
+ unsigned align = getContext().getDeclAlign(variable).getQuantity();
+
+ llvm::AllocaInst *alloca =
+ CreateMemTemp(variable->getType(), "block.captured-const");
+ alloca->setAlignment(align);
+
+ Builder.CreateStore(capture.getConstant(), alloca, align);
+
+ LocalDeclMap[variable] = alloca;
+ }
+
+ // Save a spot to insert the debug information for all the DeclRefExprs.
+ llvm::BasicBlock *entry = Builder.GetInsertBlock();
+ llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
+ --entry_ptr;
+
+ if (IsLambdaConversionToBlock)
+ EmitLambdaBlockInvokeBody();
+ else
+ EmitStmt(blockDecl->getBody());
+
+ // Remember where we were...
+ llvm::BasicBlock *resume = Builder.GetInsertBlock();
+
+ // Go back to the entry.
+ ++entry_ptr;
+ Builder.SetInsertPoint(entry, entry_ptr);
+
+ // Emit debug information for all the DeclRefExprs.
+ // FIXME: also for 'this'
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ DI->EmitLocation(Builder, variable->getLocation());
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) {
+ DI->EmitDeclareOfAutoVariable(variable, LocalDeclMap[variable],
+ Builder);
+ continue;
+ }
+
+ DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointer,
+ Builder, blockInfo);
+ }
+ }
+
+ // And resume where we left off.
+ if (resume == 0)
+ Builder.ClearInsertionPoint();
+ else
+ Builder.SetInsertPoint(resume);
+
+ FinishFunction(cast<CompoundStmt>(blockDecl->getBody())->getRBracLoc());
+
+ return fn;
+}
+
+/*
+ notes.push_back(HelperInfo());
+ HelperInfo &note = notes.back();
+ note.index = capture.getIndex();
+ note.RequiresCopying = (ci->hasCopyExpr() || BlockRequiresCopying(type));
+ note.cxxbar_import = ci->getCopyExpr();
+
+ if (ci->isByRef()) {
+ note.flag = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ note.flag |= BLOCK_FIELD_IS_WEAK;
+ } else if (type->isBlockPointerType()) {
+ note.flag = BLOCK_FIELD_IS_BLOCK;
+ } else {
+ note.flag = BLOCK_FIELD_IS_OBJECT;
+ }
+ */
+
+
+
+llvm::Constant *
+CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+
+ // FIXME: it would be nice if these were mergeable with things with
+ // identical semantics.
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_block_", &CGM.getModule());
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_block_");
+
+ // Check if we should generate debug info for this block helper function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
+ src = Builder.CreateLoad(src);
+ src = Builder.CreateBitCast(src, structPtrTy, "block.source");
+
+ llvm::Value *dst = GetAddrOfLocalVar(&dstDecl);
+ dst = Builder.CreateLoad(dst);
+ dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ const Expr *copyExpr = ci->getCopyExpr();
+ BlockFieldFlags flags;
+
+ bool isARCWeakCapture = false;
+
+ if (copyExpr) {
+ assert(!ci->isByRef());
+ // don't bother computing flags
+
+ } else if (ci->isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ } else if (type->isObjCRetainableType()) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures:
+ if (getLangOpts().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
+
+ // Don't generate special copy logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
+
+ unsigned index = capture.getIndex();
+ llvm::Value *srcField = Builder.CreateStructGEP(src, index);
+ llvm::Value *dstField = Builder.CreateStructGEP(dst, index);
+
+ // If there's an explicit copy expression, we do that.
+ if (copyExpr) {
+ EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
+ } else if (isARCWeakCapture) {
+ EmitARCCopyWeak(dstField, srcField);
+ } else {
+ llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
+ srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
+ llvm::Value *dstAddr = Builder.CreateBitCast(dstField, VoidPtrTy);
+ Builder.CreateCall3(CGM.getBlockObjectAssign(), dstAddr, srcValue,
+ llvm::ConstantInt::get(Int32Ty, flags.getBitMask()));
+ }
+ }
+
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
+ ASTContext &C = getContext();
+
+ FunctionArgList args;
+ ImplicitParamDecl srcDecl(0, SourceLocation(), 0, C.VoidPtrTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__destroy_helper_block_", &CGM.getModule());
+
+ // Check if we should generate debug info for this block destroy function.
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__destroy_helper_block_");
+
+ FunctionDecl *FD = FunctionDecl::Create(C, C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false, false);
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
+
+ llvm::Value *src = GetAddrOfLocalVar(&srcDecl);
+ src = Builder.CreateLoad(src);
+ src = Builder.CreateBitCast(src, structPtrTy, "block");
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
+
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+ if (capture.isConstant()) continue;
+
+ BlockFieldFlags flags;
+ const CXXDestructorDecl *dtor = 0;
+
+ bool isARCWeakCapture = false;
+
+ if (ci->isByRef()) {
+ flags = BLOCK_FIELD_IS_BYREF;
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+ } else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ if (record->hasTrivialDestructor())
+ continue;
+ dtor = record->getDestructor();
+ } else if (type->isObjCRetainableType()) {
+ flags = BLOCK_FIELD_IS_OBJECT;
+ if (type->isBlockPointerType())
+ flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures.
+ if (getLangOpts().ObjCAutoRefCount) {
+ Qualifiers qs = type.getQualifiers();
+
+ // Don't generate special dispose logic for a captured object
+ // unless it's __strong or __weak.
+ if (!qs.hasStrongOrWeakObjCLifetime())
+ continue;
+
+ // Support __weak direct captures.
+ if (qs.getObjCLifetime() == Qualifiers::OCL_Weak)
+ isARCWeakCapture = true;
+ }
+ } else {
+ continue;
+ }
+
+ unsigned index = capture.getIndex();
+ llvm::Value *srcField = Builder.CreateStructGEP(src, index);
+
+ // If there's an explicit copy expression, we do that.
+ if (dtor) {
+ PushDestructorCleanup(dtor, srcField);
+
+ // If this is a __weak capture, emit the release directly.
+ } else if (isARCWeakCapture) {
+ EmitARCDestroyWeak(srcField);
+
+ // Otherwise we call _Block_object_dispose. It wouldn't be too
+ // hard to just emit this as a cleanup if we wanted to make sure
+ // that things were done in reverse.
+ } else {
+ llvm::Value *value = Builder.CreateLoad(srcField);
+ value = Builder.CreateBitCast(value, VoidPtrTy);
+ BuildBlockRelease(value, flags);
+ }
+ }
+
+ cleanups.ForceCleanup();
+
+ FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+}
+
+namespace {
+
+/// Emits the copy/dispose helper functions for a __block object of id type.
+class ObjectByrefHelpers : public CodeGenModule::ByrefHelpers {
+ BlockFieldFlags Flags;
+
+public:
+ ObjectByrefHelpers(CharUnits alignment, BlockFieldFlags flags)
+ : ByrefHelpers(alignment), Flags(flags) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
+
+ srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
+ llvm::Value *srcValue = CGF.Builder.CreateLoad(srcField);
+
+ unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask();
+
+ llvm::Value *flagsVal = llvm::ConstantInt::get(CGF.Int32Ty, flags);
+ llvm::Value *fn = CGF.CGM.getBlockObjectAssign();
+ CGF.Builder.CreateCall3(fn, destField, srcValue, flagsVal);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
+ llvm::Value *value = CGF.Builder.CreateLoad(field);
+
+ CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Flags.getBitMask());
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __weak variable.
+class ARCWeakByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCWeakByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ CGF.EmitARCMoveWeak(destField, srcField);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ CGF.EmitARCDestroyWeak(field);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 0 is distinguishable from all pointers and byref flags
+ id.AddInteger(0);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong variable
+/// that's not of block-pointer type.
+class ARCStrongByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do a "move" by copying the value and then zeroing out the old
+ // variable.
+
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(srcField);
+ value->setAlignment(Alignment.getQuantity());
+
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(value->getType()));
+
+ llvm::StoreInst *store = CGF.Builder.CreateStore(value, destField);
+ store->setAlignment(Alignment.getQuantity());
+
+ store = CGF.Builder.CreateStore(null, srcField);
+ store->setAlignment(Alignment.getQuantity());
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
+ value->setAlignment(Alignment.getQuantity());
+
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 1 is distinguishable from all pointers and byref flags
+ id.AddInteger(1);
+ }
+};
+
+/// Emits the copy/dispose helpers for an ARC __block __strong
+/// variable that's of block-pointer type.
+class ARCStrongBlockByrefHelpers : public CodeGenModule::ByrefHelpers {
+public:
+ ARCStrongBlockByrefHelpers(CharUnits alignment) : ByrefHelpers(alignment) {}
+
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ // Do the copy with objc_retainBlock; that's all that
+ // _Block_object_assign would do anyway, and we'd have to pass the
+ // right arguments to make sure it doesn't get no-op'ed.
+ llvm::LoadInst *oldValue = CGF.Builder.CreateLoad(srcField);
+ oldValue->setAlignment(Alignment.getQuantity());
+
+ llvm::Value *copy = CGF.EmitARCRetainBlock(oldValue, /*mandatory*/ true);
+
+ llvm::StoreInst *store = CGF.Builder.CreateStore(copy, destField);
+ store->setAlignment(Alignment.getQuantity());
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ llvm::LoadInst *value = CGF.Builder.CreateLoad(field);
+ value->setAlignment(Alignment.getQuantity());
+
+ CGF.EmitARCRelease(value, /*precise*/ false);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ // 2 is distinguishable from all pointers and byref flags
+ id.AddInteger(2);
+ }
+};
+
+/// Emits the copy/dispose helpers for a __block variable with a
+/// nontrivial copy constructor or destructor.
+class CXXByrefHelpers : public CodeGenModule::ByrefHelpers {
+ QualType VarType;
+ const Expr *CopyExpr;
+
+public:
+ CXXByrefHelpers(CharUnits alignment, QualType type,
+ const Expr *copyExpr)
+ : ByrefHelpers(alignment), VarType(type), CopyExpr(copyExpr) {}
+
+ bool needsCopy() const { return CopyExpr != 0; }
+ void emitCopy(CodeGenFunction &CGF, llvm::Value *destField,
+ llvm::Value *srcField) {
+ if (!CopyExpr) return;
+ CGF.EmitSynthesizedCXXCopyCtor(destField, srcField, CopyExpr);
+ }
+
+ void emitDispose(CodeGenFunction &CGF, llvm::Value *field) {
+ EHScopeStack::stable_iterator cleanupDepth = CGF.EHStack.stable_begin();
+ CGF.PushDestructorCleanup(VarType, field);
+ CGF.PopCleanupBlocks(cleanupDepth);
+ }
+
+ void profileImpl(llvm::FoldingSetNodeID &id) const {
+ id.AddPointer(VarType.getCanonicalType().getAsOpaquePtr());
+ }
+};
+} // end anonymous namespace
+
+static llvm::Constant *
+generateByrefCopyHelper(CodeGenFunction &CGF,
+ llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &byrefInfo) {
+ ASTContext &Context = CGF.getContext();
+
+ QualType R = Context.VoidTy;
+
+ FunctionArgList args;
+ ImplicitParamDecl dst(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&dst);
+
+ ImplicitParamDecl src(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&src);
+
+ const CGFunctionInfo &FI =
+ CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+
+ CodeGenTypes &Types = CGF.CGM.getTypes();
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_object_copy_", &CGF.CGM.getModule());
+
+ IdentifierInfo *II
+ = &Context.Idents.get("__Block_byref_object_copy_");
+
+ FunctionDecl *FD = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, R, 0,
+ SC_Static,
+ SC_None,
+ false, false);
+
+ CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
+
+ if (byrefInfo.needsCopy()) {
+ llvm::Type *byrefPtrType = byrefType.getPointerTo(0);
+
+ // dst->x
+ llvm::Value *destField = CGF.GetAddrOfLocalVar(&dst);
+ destField = CGF.Builder.CreateLoad(destField);
+ destField = CGF.Builder.CreateBitCast(destField, byrefPtrType);
+ destField = CGF.Builder.CreateStructGEP(destField, 6, "x");
+
+ // src->x
+ llvm::Value *srcField = CGF.GetAddrOfLocalVar(&src);
+ srcField = CGF.Builder.CreateLoad(srcField);
+ srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType);
+ srcField = CGF.Builder.CreateStructGEP(srcField, 6, "x");
+
+ byrefInfo.emitCopy(CGF, destField, srcField);
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+}
+
+/// Build the copy helper for a __block variable.
+static llvm::Constant *buildByrefCopyHelper(CodeGenModule &CGM,
+ llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &info) {
+ CodeGenFunction CGF(CGM);
+ return generateByrefCopyHelper(CGF, byrefType, info);
+}
+
+/// Generate code for a __block variable's dispose helper.
+static llvm::Constant *
+generateByrefDisposeHelper(CodeGenFunction &CGF,
+ llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &byrefInfo) {
+ ASTContext &Context = CGF.getContext();
+ QualType R = Context.VoidTy;
+
+ FunctionArgList args;
+ ImplicitParamDecl src(0, SourceLocation(), 0, Context.VoidPtrTy);
+ args.push_back(&src);
+
+ const CGFunctionInfo &FI =
+ CGF.CGM.getTypes().arrangeFunctionDeclaration(R, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+
+ CodeGenTypes &Types = CGF.CGM.getTypes();
+ llvm::FunctionType *LTy = Types.GetFunctionType(FI);
+
+ // FIXME: We'd like to put these into a mergable by content, with
+ // internal linkage.
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__Block_byref_object_dispose_",
+ &CGF.CGM.getModule());
+
+ IdentifierInfo *II
+ = &Context.Idents.get("__Block_byref_object_dispose_");
+
+ FunctionDecl *FD = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, R, 0,
+ SC_Static,
+ SC_None,
+ false, false);
+ CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
+
+ if (byrefInfo.needsDispose()) {
+ llvm::Value *V = CGF.GetAddrOfLocalVar(&src);
+ V = CGF.Builder.CreateLoad(V);
+ V = CGF.Builder.CreateBitCast(V, byrefType.getPointerTo(0));
+ V = CGF.Builder.CreateStructGEP(V, 6, "x");
+
+ byrefInfo.emitDispose(CGF, V);
+ }
+
+ CGF.FinishFunction();
+
+ return llvm::ConstantExpr::getBitCast(Fn, CGF.Int8PtrTy);
+}
+
+/// Build the dispose helper for a __block variable.
+static llvm::Constant *buildByrefDisposeHelper(CodeGenModule &CGM,
+ llvm::StructType &byrefType,
+ CodeGenModule::ByrefHelpers &info) {
+ CodeGenFunction CGF(CGM);
+ return generateByrefDisposeHelper(CGF, byrefType, info);
+}
+
+///
+template <class T> static T *buildByrefHelpers(CodeGenModule &CGM,
+ llvm::StructType &byrefTy,
+ T &byrefInfo) {
+ // Increase the field's alignment to be at least pointer alignment,
+ // since the layout of the byref struct will guarantee at least that.
+ byrefInfo.Alignment = std::max(byrefInfo.Alignment,
+ CharUnits::fromQuantity(CGM.PointerAlignInBytes));
+
+ llvm::FoldingSetNodeID id;
+ byrefInfo.Profile(id);
+
+ void *insertPos;
+ CodeGenModule::ByrefHelpers *node
+ = CGM.ByrefHelpersCache.FindNodeOrInsertPos(id, insertPos);
+ if (node) return static_cast<T*>(node);
+
+ byrefInfo.CopyHelper = buildByrefCopyHelper(CGM, byrefTy, byrefInfo);
+ byrefInfo.DisposeHelper = buildByrefDisposeHelper(CGM, byrefTy, byrefInfo);
+
+ T *copy = new (CGM.getContext()) T(byrefInfo);
+ CGM.ByrefHelpersCache.InsertNode(copy, insertPos);
+ return copy;
+}
+
+CodeGenModule::ByrefHelpers *
+CodeGenFunction::buildByrefHelpers(llvm::StructType &byrefType,
+ const AutoVarEmission &emission) {
+ const VarDecl &var = *emission.Variable;
+ QualType type = var.getType();
+
+ if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
+ const Expr *copyExpr = CGM.getContext().getBlockVarCopyInits(&var);
+ if (!copyExpr && record->hasTrivialDestructor()) return 0;
+
+ CXXByrefHelpers byrefInfo(emission.Alignment, type, copyExpr);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+
+ // Otherwise, if we don't have a retainable type, there's nothing to do.
+ // that the runtime does extra copies.
+ if (!type->isObjCRetainableType()) return 0;
+
+ Qualifiers qs = type.getQualifiers();
+
+ // If we have lifetime, that dominates.
+ if (Qualifiers::ObjCLifetime lifetime = qs.getObjCLifetime()) {
+ assert(getLangOpts().ObjCAutoRefCount);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None: llvm_unreachable("impossible");
+
+ // These are just bits as far as the runtime is concerned.
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return 0;
+
+ // Tell the runtime that this is ARC __weak, called by the
+ // byref routines.
+ case Qualifiers::OCL_Weak: {
+ ARCWeakByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+
+ // ARC __strong __block variables need to be retained.
+ case Qualifiers::OCL_Strong:
+ // Block pointers need to be copied, and there's no direct
+ // transfer possible.
+ if (type->isBlockPointerType()) {
+ ARCStrongBlockByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+
+ // Otherwise, we transfer ownership of the retain from the stack
+ // to the heap.
+ } else {
+ ARCStrongByrefHelpers byrefInfo(emission.Alignment);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+ }
+ }
+ llvm_unreachable("fell out of lifetime switch!");
+ }
+
+ BlockFieldFlags flags;
+ if (type->isBlockPointerType()) {
+ flags |= BLOCK_FIELD_IS_BLOCK;
+ } else if (CGM.getContext().isObjCNSObjectType(type) ||
+ type->isObjCObjectPointerType()) {
+ flags |= BLOCK_FIELD_IS_OBJECT;
+ } else {
+ return 0;
+ }
+
+ if (type.isObjCGCWeak())
+ flags |= BLOCK_FIELD_IS_WEAK;
+
+ ObjectByrefHelpers byrefInfo(emission.Alignment, flags);
+ return ::buildByrefHelpers(CGM, byrefType, byrefInfo);
+}
+
+unsigned CodeGenFunction::getByRefValueLLVMField(const ValueDecl *VD) const {
+ assert(ByRefValueInfo.count(VD) && "Did not find value!");
+
+ return ByRefValueInfo.find(VD)->second.second;
+}
+
+llvm::Value *CodeGenFunction::BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V) {
+ llvm::Value *Loc = Builder.CreateStructGEP(BaseAddr, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc);
+ Loc = Builder.CreateStructGEP(Loc, getByRefValueLLVMField(V),
+ V->getNameAsString());
+ return Loc;
+}
+
+/// BuildByRefType - This routine changes a __block variable declared as T x
+/// into:
+///
+/// struct {
+/// void *__isa;
+/// void *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__copy_helper; // only if needed
+/// void *__destroy_helper; // only if needed
+/// char padding[X]; // only if needed
+/// T x;
+/// } x
+///
+llvm::Type *CodeGenFunction::BuildByRefType(const VarDecl *D) {
+ std::pair<llvm::Type *, unsigned> &Info = ByRefValueInfo[D];
+ if (Info.first)
+ return Info.first;
+
+ QualType Ty = D->getType();
+
+ SmallVector<llvm::Type *, 8> types;
+
+ llvm::StructType *ByRefType =
+ llvm::StructType::create(getLLVMContext(),
+ "struct.__block_byref_" + D->getNameAsString());
+
+ // void *__isa;
+ types.push_back(Int8PtrTy);
+
+ // void *__forwarding;
+ types.push_back(llvm::PointerType::getUnqual(ByRefType));
+
+ // int32_t __flags;
+ types.push_back(Int32Ty);
+
+ // int32_t __size;
+ types.push_back(Int32Ty);
+
+ bool HasCopyAndDispose =
+ (Ty->isObjCRetainableType()) || getContext().getBlockVarCopyInits(D);
+ if (HasCopyAndDispose) {
+ /// void *__copy_helper;
+ types.push_back(Int8PtrTy);
+
+ /// void *__destroy_helper;
+ types.push_back(Int8PtrTy);
+ }
+
+ bool Packed = false;
+ CharUnits Align = getContext().getDeclAlign(D);
+ if (Align > getContext().toCharUnitsFromBits(Target.getPointerAlign(0))) {
+ // We have to insert padding.
+
+ // The struct above has 2 32-bit integers.
+ unsigned CurrentOffsetInBytes = 4 * 2;
+
+ // And either 2 or 4 pointers.
+ CurrentOffsetInBytes += (HasCopyAndDispose ? 4 : 2) *
+ CGM.getTargetData().getTypeAllocSize(Int8PtrTy);
+
+ // Align the offset.
+ unsigned AlignedOffsetInBytes =
+ llvm::RoundUpToAlignment(CurrentOffsetInBytes, Align.getQuantity());
+
+ unsigned NumPaddingBytes = AlignedOffsetInBytes - CurrentOffsetInBytes;
+ if (NumPaddingBytes > 0) {
+ llvm::Type *Ty = Int8Ty;
+ // FIXME: We need a sema error for alignment larger than the minimum of
+ // the maximal stack alignment and the alignment of malloc on the system.
+ if (NumPaddingBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPaddingBytes);
+
+ types.push_back(Ty);
+
+ // We want a packed struct.
+ Packed = true;
+ }
+ }
+
+ // T x;
+ types.push_back(ConvertTypeForMem(Ty));
+
+ ByRefType->setBody(types, Packed);
+
+ Info.first = ByRefType;
+
+ Info.second = types.size() - 1;
+
+ return Info.first;
+}
+
+/// Initialize the structural components of a __block variable, i.e.
+/// everything but the actual object.
+void CodeGenFunction::emitByrefStructureInit(const AutoVarEmission &emission) {
+ // Find the address of the local.
+ llvm::Value *addr = emission.Address;
+
+ // That's an alloca of the byref structure type.
+ llvm::StructType *byrefType = cast<llvm::StructType>(
+ cast<llvm::PointerType>(addr->getType())->getElementType());
+
+ // Build the byref helpers if necessary. This is null if we don't need any.
+ CodeGenModule::ByrefHelpers *helpers =
+ buildByrefHelpers(*byrefType, emission);
+
+ const VarDecl &D = *emission.Variable;
+ QualType type = D.getType();
+
+ llvm::Value *V;
+
+ // Initialize the 'isa', which is just 0 or 1.
+ int isa = 0;
+ if (type.isObjCGCWeak())
+ isa = 1;
+ V = Builder.CreateIntToPtr(Builder.getInt32(isa), Int8PtrTy, "isa");
+ Builder.CreateStore(V, Builder.CreateStructGEP(addr, 0, "byref.isa"));
+
+ // Store the address of the variable into its own forwarding pointer.
+ Builder.CreateStore(addr,
+ Builder.CreateStructGEP(addr, 1, "byref.forwarding"));
+
+ // Blocks ABI:
+ // c) the flags field is set to either 0 if no helper functions are
+ // needed or BLOCK_HAS_COPY_DISPOSE if they are,
+ BlockFlags flags;
+ if (helpers) flags |= BLOCK_HAS_COPY_DISPOSE;
+ Builder.CreateStore(llvm::ConstantInt::get(IntTy, flags.getBitMask()),
+ Builder.CreateStructGEP(addr, 2, "byref.flags"));
+
+ CharUnits byrefSize = CGM.GetTargetTypeStoreSize(byrefType);
+ V = llvm::ConstantInt::get(IntTy, byrefSize.getQuantity());
+ Builder.CreateStore(V, Builder.CreateStructGEP(addr, 3, "byref.size"));
+
+ if (helpers) {
+ llvm::Value *copy_helper = Builder.CreateStructGEP(addr, 4);
+ Builder.CreateStore(helpers->CopyHelper, copy_helper);
+
+ llvm::Value *destroy_helper = Builder.CreateStructGEP(addr, 5);
+ Builder.CreateStore(helpers->DisposeHelper, destroy_helper);
+ }
+}
+
+void CodeGenFunction::BuildBlockRelease(llvm::Value *V, BlockFieldFlags flags) {
+ llvm::Value *F = CGM.getBlockObjectDispose();
+ llvm::Value *N;
+ V = Builder.CreateBitCast(V, Int8PtrTy);
+ N = llvm::ConstantInt::get(Int32Ty, flags.getBitMask());
+ Builder.CreateCall2(F, V, N);
+}
+
+namespace {
+ struct CallBlockRelease : EHScopeStack::Cleanup {
+ llvm::Value *Addr;
+ CallBlockRelease(llvm::Value *Addr) : Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Should we be passing FIELD_IS_WEAK here?
+ CGF.BuildBlockRelease(Addr, BLOCK_FIELD_IS_BYREF);
+ }
+ };
+}
+
+/// Enter a cleanup to destroy a __block variable. Note that this
+/// cleanup should be a no-op if the variable hasn't left the stack
+/// yet; if a cleanup is required for the variable itself, that needs
+/// to be done externally.
+void CodeGenFunction::enterByrefCleanup(const AutoVarEmission &emission) {
+ // We don't enter this cleanup if we're in pure-GC mode.
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly)
+ return;
+
+ EHStack.pushCleanup<CallBlockRelease>(NormalAndEHCleanup, emission.Address);
+}
+
+/// Adjust the declaration of something from the blocks API.
+static void configureBlocksRuntimeObject(CodeGenModule &CGM,
+ llvm::Constant *C) {
+ if (!CGM.getLangOpts().BlocksRuntimeOptional) return;
+
+ llvm::GlobalValue *GV = cast<llvm::GlobalValue>(C->stripPointerCasts());
+ if (GV->isDeclaration() &&
+ GV->getLinkage() == llvm::GlobalValue::ExternalLinkage)
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectDispose() {
+ if (BlockObjectDispose)
+ return BlockObjectDispose;
+
+ llvm::Type *args[] = { Int8PtrTy, Int32Ty };
+ llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
+ BlockObjectDispose = CreateRuntimeFunction(fty, "_Block_object_dispose");
+ configureBlocksRuntimeObject(*this, BlockObjectDispose);
+ return BlockObjectDispose;
+}
+
+llvm::Constant *CodeGenModule::getBlockObjectAssign() {
+ if (BlockObjectAssign)
+ return BlockObjectAssign;
+
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, Int32Ty };
+ llvm::FunctionType *fty
+ = llvm::FunctionType::get(VoidTy, args, false);
+ BlockObjectAssign = CreateRuntimeFunction(fty, "_Block_object_assign");
+ configureBlocksRuntimeObject(*this, BlockObjectAssign);
+ return BlockObjectAssign;
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteGlobalBlock() {
+ if (NSConcreteGlobalBlock)
+ return NSConcreteGlobalBlock;
+
+ NSConcreteGlobalBlock = GetOrCreateLLVMGlobal("_NSConcreteGlobalBlock",
+ Int8PtrTy->getPointerTo(), 0);
+ configureBlocksRuntimeObject(*this, NSConcreteGlobalBlock);
+ return NSConcreteGlobalBlock;
+}
+
+llvm::Constant *CodeGenModule::getNSConcreteStackBlock() {
+ if (NSConcreteStackBlock)
+ return NSConcreteStackBlock;
+
+ NSConcreteStackBlock = GetOrCreateLLVMGlobal("_NSConcreteStackBlock",
+ Int8PtrTy->getPointerTo(), 0);
+ configureBlocksRuntimeObject(*this, NSConcreteStackBlock);
+ return NSConcreteStackBlock;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
new file mode 100644
index 0000000..095cfdb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBlocks.h
@@ -0,0 +1,229 @@
+//===-- CGBlocks.h - state for LLVM CodeGen for blocks ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal state used for llvm translation for block literals.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBLOCKS_H
+#define CLANG_CODEGEN_CGBLOCKS_H
+
+#include "CodeGenTypes.h"
+#include "clang/AST/Type.h"
+#include "llvm/Module.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+
+#include "CodeGenFunction.h"
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class PointerType;
+ class Value;
+ class LLVMContext;
+}
+
+namespace clang {
+
+namespace CodeGen {
+
+class CodeGenModule;
+class CGBlockInfo;
+
+enum BlockFlag_t {
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_USE_STRET = (1 << 29),
+ BLOCK_HAS_SIGNATURE = (1 << 30)
+};
+class BlockFlags {
+ uint32_t flags;
+
+ BlockFlags(uint32_t flags) : flags(flags) {}
+public:
+ BlockFlags() : flags(0) {}
+ BlockFlags(BlockFlag_t flag) : flags(flag) {}
+
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
+
+ friend BlockFlags operator|(BlockFlags l, BlockFlags r) {
+ return BlockFlags(l.flags | r.flags);
+ }
+ friend BlockFlags &operator|=(BlockFlags &l, BlockFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFlags l, BlockFlags r) {
+ return (l.flags & r.flags);
+ }
+};
+inline BlockFlags operator|(BlockFlag_t l, BlockFlag_t r) {
+ return BlockFlags(l) | BlockFlags(r);
+}
+
+enum BlockFieldFlag_t {
+ BLOCK_FIELD_IS_OBJECT = 0x03, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 0x07, /* a block variable */
+
+ BLOCK_FIELD_IS_BYREF = 0x08, /* the on stack structure holding the __block
+ variable */
+ BLOCK_FIELD_IS_WEAK = 0x10, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_FIELD_IS_ARC = 0x40, /* field has ARC-specific semantics */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+};
+
+class BlockFieldFlags {
+ uint32_t flags;
+
+ BlockFieldFlags(uint32_t flags) : flags(flags) {}
+public:
+ BlockFieldFlags() : flags(0) {}
+ BlockFieldFlags(BlockFieldFlag_t flag) : flags(flag) {}
+
+ uint32_t getBitMask() const { return flags; }
+ bool empty() const { return flags == 0; }
+
+ /// Answers whether the flags indicate that this field is an object
+ /// or block pointer that requires _Block_object_assign/dispose.
+ bool isSpecialPointer() const { return flags & BLOCK_FIELD_IS_OBJECT; }
+
+ friend BlockFieldFlags operator|(BlockFieldFlags l, BlockFieldFlags r) {
+ return BlockFieldFlags(l.flags | r.flags);
+ }
+ friend BlockFieldFlags &operator|=(BlockFieldFlags &l, BlockFieldFlags r) {
+ l.flags |= r.flags;
+ return l;
+ }
+ friend bool operator&(BlockFieldFlags l, BlockFieldFlags r) {
+ return (l.flags & r.flags);
+ }
+};
+inline BlockFieldFlags operator|(BlockFieldFlag_t l, BlockFieldFlag_t r) {
+ return BlockFieldFlags(l) | BlockFieldFlags(r);
+}
+
+/// CGBlockInfo - Information to generate a block literal.
+class CGBlockInfo {
+public:
+ /// Name - The name of the block, kindof.
+ llvm::StringRef Name;
+
+ /// The field index of 'this' within the block, if there is one.
+ unsigned CXXThisIndex;
+
+ class Capture {
+ uintptr_t Data;
+ EHScopeStack::stable_iterator Cleanup;
+
+ public:
+ bool isIndex() const { return (Data & 1) != 0; }
+ bool isConstant() const { return !isIndex(); }
+ unsigned getIndex() const { assert(isIndex()); return Data >> 1; }
+ llvm::Value *getConstant() const {
+ assert(isConstant());
+ return reinterpret_cast<llvm::Value*>(Data);
+ }
+ EHScopeStack::stable_iterator getCleanup() const {
+ assert(isIndex());
+ return Cleanup;
+ }
+ void setCleanup(EHScopeStack::stable_iterator cleanup) {
+ assert(isIndex());
+ Cleanup = cleanup;
+ }
+
+ static Capture makeIndex(unsigned index) {
+ Capture v;
+ v.Data = (index << 1) | 1;
+ return v;
+ }
+
+ static Capture makeConstant(llvm::Value *value) {
+ Capture v;
+ v.Data = reinterpret_cast<uintptr_t>(value);
+ return v;
+ }
+ };
+
+ /// CanBeGlobal - True if the block can be global, i.e. it has
+ /// no non-constant captures.
+ bool CanBeGlobal : 1;
+
+ /// True if the block needs a custom copy or dispose function.
+ bool NeedsCopyDispose : 1;
+
+ /// HasCXXObject - True if the block's custom copy/dispose functions
+ /// need to be run even in GC mode.
+ bool HasCXXObject : 1;
+
+ /// UsesStret : True if the block uses an stret return. Mutable
+ /// because it gets set later in the block-creation process.
+ mutable bool UsesStret : 1;
+
+ /// The mapping of allocated indexes within the block.
+ llvm::DenseMap<const VarDecl*, Capture> Captures;
+
+ llvm::AllocaInst *Address;
+ llvm::StructType *StructureType;
+ const BlockDecl *Block;
+ const BlockExpr *BlockExpression;
+ CharUnits BlockSize;
+ CharUnits BlockAlign;
+
+ /// An instruction which dominates the full-expression that the
+ /// block is inside.
+ llvm::Instruction *DominatingIP;
+
+ /// The next block in the block-info chain. Invalid if this block
+ /// info is not part of the CGF's block-info chain, which is true
+ /// if it corresponds to a global block or a block whose expression
+ /// has been encountered.
+ CGBlockInfo *NextBlockInfo;
+
+ const Capture &getCapture(const VarDecl *var) const {
+ return const_cast<CGBlockInfo*>(this)->getCapture(var);
+ }
+ Capture &getCapture(const VarDecl *var) {
+ llvm::DenseMap<const VarDecl*, Capture>::iterator
+ it = Captures.find(var);
+ assert(it != Captures.end() && "no entry for variable!");
+ return it->second;
+ }
+
+ const BlockDecl *getBlockDecl() const { return Block; }
+ const BlockExpr *getBlockExpr() const {
+ assert(BlockExpression);
+ assert(BlockExpression->getBlockDecl() == Block);
+ return BlockExpression;
+ }
+
+ CGBlockInfo(const BlockDecl *blockDecl, llvm::StringRef Name);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
new file mode 100644
index 0000000..8120217
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuilder.h
@@ -0,0 +1,28 @@
+//===-- CGBuilder.h - Choose IRBuilder implementation ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGBUILDER_H
+#define CLANG_CODEGEN_CGBUILDER_H
+
+#include "llvm/Support/IRBuilder.h"
+
+namespace clang {
+namespace CodeGen {
+
+// Don't preserve names on values in an optimized build.
+#ifdef NDEBUG
+typedef llvm::IRBuilder<false> CGBuilderTy;
+#else
+typedef llvm::IRBuilder<> CGBuilderTy;
+#endif
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
new file mode 100644
index 0000000..e30b513
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGBuiltin.cpp
@@ -0,0 +1,4524 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Builtin calls as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+/// getBuiltinLibFunction - Given a builtin id for a function like
+/// "__builtin_fabsf", return a Function* for "fabsf".
+llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
+ assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
+
+ // Get the name, skip over the __builtin_ prefix (if necessary).
+ StringRef Name;
+ GlobalDecl D(FD);
+
+ // If the builtin has been declared explicitly with an assembler label,
+ // use the mangled name. This differs from the plain label on platforms
+ // that prefix labels.
+ if (FD->hasAttr<AsmLabelAttr>())
+ Name = getMangledName(D);
+ else
+ Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
+
+ llvm::FunctionType *Ty =
+ cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
+
+ return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
+}
+
+/// Emit the conversions required to turn the given value into an
+/// integer of the given size.
+static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, llvm::IntegerType *IntType) {
+ V = CGF.EmitToMemory(V, T);
+
+ if (V->getType()->isPointerTy())
+ return CGF.Builder.CreatePtrToInt(V, IntType);
+
+ assert(V->getType() == IntType);
+ return V;
+}
+
+static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
+ QualType T, llvm::Type *ResultType) {
+ V = CGF.EmitFromMemory(V, T);
+
+ if (ResultType->isPointerTy())
+ return CGF.Builder.CreateIntToPtr(V, ResultType);
+
+ assert(V->getType() == ResultType);
+ return V;
+}
+
+/// Utility to insert an atomic instruction based on Instrinsic::ID
+/// and the expression node.
+static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E) {
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ llvm::Value *Args[2];
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
+}
+
+/// Utility to insert an atomic instruction based Instrinsic::ID and
+/// the expression node, where the return value is the result of the
+/// operation.
+static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
+ llvm::AtomicRMWInst::BinOp Kind,
+ const CallExpr *E,
+ Instruction::BinaryOps Op) {
+ QualType T = E->getType();
+ assert(E->getArg(0)->getType()->isPointerType());
+ assert(CGF.getContext().hasSameUnqualifiedType(T,
+ E->getArg(0)->getType()->getPointeeType()));
+ assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
+
+ llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(CGF.getLLVMContext(),
+ CGF.getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ llvm::Value *Args[2];
+ Args[1] = CGF.EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(CGF, Args[1], T, IntType);
+ Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
+
+ llvm::Value *Result =
+ CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
+ llvm::SequentiallyConsistent);
+ Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
+ Result = EmitFromInt(CGF, Result, T, ValueType);
+ return RValue::get(Result);
+}
+
+/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
+/// which must be a scalar floating point type.
+static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
+ const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
+ assert(ValTyP && "isn't scalar fp type!");
+
+ StringRef FnName;
+ switch (ValTyP->getKind()) {
+ default: llvm_unreachable("Isn't a scalar fp type!");
+ case BuiltinType::Float: FnName = "fabsf"; break;
+ case BuiltinType::Double: FnName = "fabs"; break;
+ case BuiltinType::LongDouble: FnName = "fabsl"; break;
+ }
+
+ // The prototype is something that takes and returns whatever V's type is.
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
+ false);
+ llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
+
+ return CGF.Builder.CreateCall(Fn, V, "abs");
+}
+
+static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
+ const CallExpr *E, llvm::Value *calleeValue) {
+ return CGF.EmitCall(E->getCallee()->getType(), calleeValue,
+ ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
+}
+
+RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E) {
+ // See if we can constant fold this builtin. If so, don't emit it at all.
+ Expr::EvalResult Result;
+ if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
+ !Result.hasSideEffects()) {
+ if (Result.Val.isInt())
+ return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
+ Result.Val.getInt()));
+ if (Result.Val.isFloat())
+ return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
+ Result.Val.getFloat()));
+ }
+
+ switch (BuiltinID) {
+ default: break; // Handle intrinsics and libm functions below.
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ case Builtin::BI__builtin___NSStringMakeConstantString:
+ return RValue::get(CGM.EmitConstantExpr(E, E->getType(), 0));
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ case Builtin::BI__builtin_va_end: {
+ Value *ArgValue = EmitVAListRef(E->getArg(0));
+ llvm::Type *DestType = Int8PtrTy;
+ if (ArgValue->getType() != DestType)
+ ArgValue = Builder.CreateBitCast(ArgValue, DestType,
+ ArgValue->getName().data());
+
+ Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
+ Intrinsic::vaend : Intrinsic::vastart;
+ return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
+ }
+ case Builtin::BI__builtin_va_copy: {
+ Value *DstPtr = EmitVAListRef(E->getArg(0));
+ Value *SrcPtr = EmitVAListRef(E->getArg(1));
+
+ llvm::Type *Type = Int8PtrTy;
+
+ DstPtr = Builder.CreateBitCast(DstPtr, Type);
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
+ return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
+ DstPtr, SrcPtr));
+ }
+ case Builtin::BI__builtin_abs:
+ case Builtin::BI__builtin_labs:
+ case Builtin::BI__builtin_llabs: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
+ Value *CmpResult =
+ Builder.CreateICmpSGE(ArgValue,
+ llvm::Constant::getNullValue(ArgValue->getType()),
+ "abscond");
+ Value *Result =
+ Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
+
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ctzs:
+ case Builtin::BI__builtin_ctz:
+ case Builtin::BI__builtin_ctzl:
+ case Builtin::BI__builtin_ctzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_clzs:
+ case Builtin::BI__builtin_clz:
+ case Builtin::BI__builtin_clzl:
+ case Builtin::BI__builtin_clzll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *ZeroUndef = Builder.getInt1(Target.isCLZForZeroUndef());
+ Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_ffs:
+ case Builtin::BI__builtin_ffsl:
+ case Builtin::BI__builtin_ffsll: {
+ // ffs(x) -> x ? cttz(x) + 1 : 0
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
+ Builder.getTrue()),
+ llvm::ConstantInt::get(ArgType, 1));
+ Value *Zero = llvm::Constant::getNullValue(ArgType);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
+ Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_parity:
+ case Builtin::BI__builtin_parityl:
+ case Builtin::BI__builtin_parityll: {
+ // parity(x) -> ctpop(x) & 1
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Tmp = Builder.CreateCall(F, ArgValue);
+ Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_popcount:
+ case Builtin::BI__builtin_popcountl:
+ case Builtin::BI__builtin_popcountll: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
+
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Result = Builder.CreateCall(F, ArgValue);
+ if (Result->getType() != ResultType)
+ Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
+ "cast");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_expect: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+
+ Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
+ Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
+
+ Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
+ "expval");
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_bswap32:
+ case Builtin::BI__builtin_bswap64: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = ArgValue->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
+ return RValue::get(Builder.CreateCall(F, ArgValue));
+ }
+ case Builtin::BI__builtin_object_size: {
+ // We pass this builtin onto the optimizer so that it can
+ // figure out the object size in more complex cases.
+ llvm::Type *ResType = ConvertType(E->getType());
+
+ // LLVM only supports 0 and 2, make sure that we pass along that
+ // as a boolean.
+ Value *Ty = EmitScalarExpr(E->getArg(1));
+ ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
+ assert(CI);
+ uint64_t val = CI->getZExtValue();
+ CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
+ return RValue::get(Builder.CreateCall2(F,
+ EmitScalarExpr(E->getArg(0)),
+ CI));
+ }
+ case Builtin::BI__builtin_prefetch: {
+ Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
+ // FIXME: Technically these constants should of type 'int', yes?
+ RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
+ llvm::ConstantInt::get(Int32Ty, 0);
+ Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
+ llvm::ConstantInt::get(Int32Ty, 3);
+ Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
+ }
+ case Builtin::BI__builtin_trap: {
+ Value *F = CGM.getIntrinsic(Intrinsic::trap);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_unreachable: {
+ if (CatchUndefined)
+ EmitBranch(getTrapBB());
+ else
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("unreachable.cont"));
+
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__builtin_powi:
+ case Builtin::BI__builtin_powif:
+ case Builtin::BI__builtin_powil: {
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent));
+ }
+
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered: {
+ // Ordered comparisons: we know the arguments to these are matching scalar
+ // floating point values.
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unknown ordered comparison");
+ case Builtin::BI__builtin_isgreater:
+ LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isgreaterequal:
+ LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isless:
+ LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessequal:
+ LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_islessgreater:
+ LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
+ break;
+ case Builtin::BI__builtin_isunordered:
+ LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
+ break;
+ }
+ // ZExt bool to int type.
+ return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_isnan: {
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = Builder.CreateFCmpUNO(V, V, "cmp");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isinf: {
+ // isinf(x) --> fabs(x) == infinity
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = EmitFAbs(*this, V, E->getArg(0)->getType());
+
+ V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ // TODO: BI__builtin_isinf_sign
+ // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
+
+ case Builtin::BI__builtin_isnormal: {
+ // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsLessThanInf =
+ Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
+ V = Builder.CreateAnd(V, IsNormal, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isfinite: {
+ // isfinite(x) --> x == x && fabs(x) != infinity;
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsNotInf =
+ Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+
+ V = Builder.CreateAnd(Eq, IsNotInf, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_fpclassify: {
+ Value *V = EmitScalarExpr(E->getArg(5));
+ llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
+
+ // Create Result
+ BasicBlock *Begin = Builder.GetInsertBlock();
+ BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
+ Builder.SetInsertPoint(End);
+ PHINode *Result =
+ Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
+ "fpclassify_result");
+
+ // if (V==0) return FP_ZERO
+ Builder.SetInsertPoint(Begin);
+ Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
+ "iszero");
+ Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
+ BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
+ Builder.CreateCondBr(IsZero, End, NotZero);
+ Result->addIncoming(ZeroLiteral, Begin);
+
+ // if (V != V) return FP_NAN
+ Builder.SetInsertPoint(NotZero);
+ Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
+ Value *NanLiteral = EmitScalarExpr(E->getArg(0));
+ BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
+ Builder.CreateCondBr(IsNan, End, NotNan);
+ Result->addIncoming(NanLiteral, NotZero);
+
+ // if (fabs(V) == infinity) return FP_INFINITY
+ Builder.SetInsertPoint(NotNan);
+ Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
+ Value *IsInf =
+ Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
+ "isinf");
+ Value *InfLiteral = EmitScalarExpr(E->getArg(1));
+ BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
+ Builder.CreateCondBr(IsInf, End, NotInf);
+ Result->addIncoming(InfLiteral, NotNan);
+
+ // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
+ Builder.SetInsertPoint(NotInf);
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ Value *NormalResult =
+ Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
+ EmitScalarExpr(E->getArg(3)));
+ Builder.CreateBr(End);
+ Result->addIncoming(NormalResult, NotInf);
+
+ // return Result
+ Builder.SetInsertPoint(End);
+ return RValue::get(Result);
+ }
+
+ case Builtin::BIalloca:
+ case Builtin::BI__builtin_alloca: {
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
+ }
+ case Builtin::BIbzero:
+ case Builtin::BI__builtin_bzero: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SizeVal = EmitScalarExpr(E->getArg(1));
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, Builder.getInt8(0), SizeVal, Align, false);
+ return RValue::get(Address);
+ }
+ case Builtin::BImemcpy:
+ case Builtin::BI__builtin_memcpy: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemCpy(Address, SrcAddr, SizeVal, Align, false);
+ return RValue::get(Address);
+ }
+
+ case Builtin::BI__builtin___memcpy_chk: {
+ // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Value *Dest = EmitScalarExpr(E->getArg(0));
+ Value *Src = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemCpy(Dest, Src, SizeVal, Align, false);
+ return RValue::get(Dest);
+ }
+
+ case Builtin::BI__builtin_objc_memmove_collectable: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
+ Address, SrcAddr, SizeVal);
+ return RValue::get(Address);
+ }
+
+ case Builtin::BI__builtin___memmove_chk: {
+ // fold __builtin_memmove_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Value *Dest = EmitScalarExpr(E->getArg(0));
+ Value *Src = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemMove(Dest, Src, SizeVal, Align, false);
+ return RValue::get(Dest);
+ }
+
+ case Builtin::BImemmove:
+ case Builtin::BI__builtin_memmove: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *SrcAddr = EmitScalarExpr(E->getArg(1));
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ unsigned Align = std::min(GetPointeeAlignment(E->getArg(0)),
+ GetPointeeAlignment(E->getArg(1)));
+ Builder.CreateMemMove(Address, SrcAddr, SizeVal, Align, false);
+ return RValue::get(Address);
+ }
+ case Builtin::BImemset:
+ case Builtin::BI__builtin_memset: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
+ Value *SizeVal = EmitScalarExpr(E->getArg(2));
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin___memset_chk: {
+ // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
+ llvm::APSInt Size, DstSize;
+ if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
+ !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
+ break;
+ if (Size.ugt(DstSize))
+ break;
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
+ Builder.getInt8Ty());
+ Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
+ unsigned Align = GetPointeeAlignment(E->getArg(0));
+ Builder.CreateMemSet(Address, ByteVal, SizeVal, Align, false);
+
+ return RValue::get(Address);
+ }
+ case Builtin::BI__builtin_dwarf_cfa: {
+ // The offset in bytes from the first argument to the CFA.
+ //
+ // Why on earth is this in the frontend? Is there any reason at
+ // all that the backend can't reasonably determine this while
+ // lowering llvm.eh.dwarf.cfa()?
+ //
+ // TODO: If there's a satisfactory reason, add a target hook for
+ // this instead of hard-coding 0, which is correct for most targets.
+ int32_t Offset = 0;
+
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
+ return RValue::get(Builder.CreateCall(F,
+ llvm::ConstantInt::get(Int32Ty, Offset)));
+ }
+ case Builtin::BI__builtin_return_address: {
+ Value *Depth = EmitScalarExpr(E->getArg(0));
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_frame_address: {
+ Value *Depth = EmitScalarExpr(E->getArg(0));
+ Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
+ Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
+ return RValue::get(Builder.CreateCall(F, Depth));
+ }
+ case Builtin::BI__builtin_extract_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_frob_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_dwarf_sp_column: {
+ llvm::IntegerType *Ty
+ = cast<llvm::IntegerType>(ConvertType(E->getType()));
+ int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
+ if (Column == -1) {
+ CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
+ return RValue::get(llvm::UndefValue::get(Ty));
+ }
+ return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
+ }
+ case Builtin::BI__builtin_init_dwarf_reg_size_table: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
+ CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_eh_return: {
+ Value *Int = EmitScalarExpr(E->getArg(0));
+ Value *Ptr = EmitScalarExpr(E->getArg(1));
+
+ llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
+ "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
+ Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
+ ? Intrinsic::eh_return_i32
+ : Intrinsic::eh_return_i64);
+ Builder.CreateCall2(F, Int, Ptr);
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("builtin_eh_return.cont"));
+
+ return RValue::get(0);
+ }
+ case Builtin::BI__builtin_unwind_init: {
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
+ return RValue::get(Builder.CreateCall(F));
+ }
+ case Builtin::BI__builtin_extend_pointer: {
+ // Extends a pointer to the size of an _Unwind_Word, which is
+ // uint64_t on all platforms. Generally this gets poked into a
+ // register and eventually used as an address, so if the
+ // addressing registers are wider than pointers and the platform
+ // doesn't implicitly ignore high-order bits when doing
+ // addressing, we need to make sure we zext / sext based on
+ // the platform's expectations.
+ //
+ // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
+
+ // Cast the pointer to intptr_t.
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
+
+ // If that's 64 bits, we're done.
+ if (IntPtrTy->getBitWidth() == 64)
+ return RValue::get(Result);
+
+ // Otherwise, ask the codegen data what to do.
+ if (getTargetHooks().extendPointerWithSExt())
+ return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
+ else
+ return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
+ }
+ case Builtin::BI__builtin_setjmp: {
+ // Buffer is a void**.
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+
+ // Store the frame pointer to the setjmp buffer.
+ Value *FrameAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
+ ConstantInt::get(Int32Ty, 0));
+ Builder.CreateStore(FrameAddr, Buf);
+
+ // Store the stack pointer to the setjmp buffer.
+ Value *StackAddr =
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
+ Value *StackSaveSlot =
+ Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
+ Builder.CreateStore(StackAddr, StackSaveSlot);
+
+ // Call LLVM's EH setjmp, which is lightweight.
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
+ return RValue::get(Builder.CreateCall(F, Buf));
+ }
+ case Builtin::BI__builtin_longjmp: {
+ Value *Buf = EmitScalarExpr(E->getArg(0));
+ Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
+
+ // Call LLVM's EH longjmp, which is lightweight.
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
+
+ // longjmp doesn't return; mark this as unreachable.
+ Builder.CreateUnreachable();
+
+ // We do need to preserve an insertion point.
+ EmitBlock(createBasicBlock("longjmp.cont"));
+
+ return RValue::get(0);
+ }
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_swap:
+ llvm_unreachable("Shouldn't make it through sema");
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
+
+ // Clang extensions: not overloaded yet.
+ case Builtin::BI__sync_fetch_and_min:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
+ case Builtin::BI__sync_fetch_and_max:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
+ case Builtin::BI__sync_fetch_and_umin:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
+ case Builtin::BI__sync_fetch_and_umax:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
+
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
+ llvm::Instruction::Add);
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
+ llvm::Instruction::Sub);
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
+ llvm::Instruction::And);
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
+ llvm::Instruction::Or);
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
+ llvm::Instruction::Xor);
+
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16: {
+ QualType T = E->getType();
+ llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ Value *Args[3];
+ Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ValueType = Args[1]->getType();
+ Args[1] = EmitToInt(*this, Args[1], T, IntType);
+ Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
+
+ Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent);
+ Result = EmitFromInt(*this, Result, T, ValueType);
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16: {
+ QualType T = E->getArg(1)->getType();
+ llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+
+ llvm::IntegerType *IntType =
+ llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(T));
+ llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
+
+ Value *Args[3];
+ Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
+ Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
+ Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
+
+ Value *OldVal = Args[1];
+ Value *PrevVal = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
+ llvm::SequentiallyConsistent);
+ Value *Result = Builder.CreateICmpEQ(PrevVal, OldVal);
+ // zext bool to int.
+ Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
+ return RValue::get(Result);
+ }
+
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ StoreSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
+ Store->setAlignment(StoreSize.getQuantity());
+ Store->setAtomic(llvm::Release);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__sync_synchronize: {
+ // We assume this is supposed to correspond to a C++0x-style
+ // sequentially-consistent fence (i.e. this is only usable for
+ // synchonization, not device I/O or anything like that). This intrinsic
+ // is really badly designed in the sense that in theory, there isn't
+ // any way to safely use it... but in practice, it mostly works
+ // to use it with non-atomic loads and stores to get acquire/release
+ // semantics.
+ Builder.CreateFence(llvm::SequentiallyConsistent);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__c11_atomic_is_lock_free:
+ case Builtin::BI__atomic_is_lock_free: {
+ // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
+ // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
+ // _Atomic(T) is always properly-aligned.
+ const char *LibCallName = "__atomic_is_lock_free";
+ CallArgList Args;
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
+ getContext().getSizeType());
+ if (BuiltinID == Builtin::BI__atomic_is_lock_free)
+ Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
+ getContext().VoidPtrTy);
+ else
+ Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
+ getContext().VoidPtrTy);
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionCall(E->getType(), Args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ }
+
+ case Builtin::BI__atomic_test_and_set: {
+ // Look at the argument type to determine whether this is a volatile
+ // operation. The parameter type is always volatile.
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(1);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ AtomicRMWInst *Result = 0;
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal,
+ llvm::SequentiallyConsistent);
+ break;
+ }
+ Result->setVolatile(Volatile);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[5] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("acquire", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("acqrel", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[5] = {
+ llvm::Monotonic, llvm::Acquire, llvm::Release,
+ llvm::AcquireRelease, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ Builder.SetInsertPoint(ContBB);
+ PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
+
+ for (unsigned i = 0; i < 5; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
+ Ptr, NewVal, Orders[i]);
+ RMW->setVolatile(Volatile);
+ Result->addIncoming(RMW, BBs[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(1), BBs[1]);
+ SI->addCase(Builder.getInt32(2), BBs[1]);
+ SI->addCase(Builder.getInt32(3), BBs[2]);
+ SI->addCase(Builder.getInt32(4), BBs[3]);
+ SI->addCase(Builder.getInt32(5), BBs[4]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
+ }
+
+ case Builtin::BI__atomic_clear: {
+ QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
+ bool Volatile =
+ PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
+
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ unsigned AddrSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Value *NewVal = Builder.getInt8(0);
+ Value *Order = EmitScalarExpr(E->getArg(1));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ Store->setOrdering(llvm::Monotonic);
+ break;
+ case 3: // memory_order_release
+ Store->setOrdering(llvm::Release);
+ break;
+ case 5: // memory_order_seq_cst
+ Store->setOrdering(llvm::SequentiallyConsistent);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ llvm::BasicBlock *BBs[3] = {
+ createBasicBlock("monotonic", CurFn),
+ createBasicBlock("release", CurFn),
+ createBasicBlock("seqcst", CurFn)
+ };
+ llvm::AtomicOrdering Orders[3] = {
+ llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
+ };
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
+
+ for (unsigned i = 0; i < 3; ++i) {
+ Builder.SetInsertPoint(BBs[i]);
+ StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
+ Store->setAlignment(1);
+ Store->setOrdering(Orders[i]);
+ Builder.CreateBr(ContBB);
+ }
+
+ SI->addCase(Builder.getInt32(0), BBs[0]);
+ SI->addCase(Builder.getInt32(3), BBs[1]);
+ SI->addCase(Builder.getInt32(5), BBs[2]);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(0);
+ }
+
+ case Builtin::BI__atomic_thread_fence:
+ case Builtin::BI__atomic_signal_fence:
+ case Builtin::BI__c11_atomic_thread_fence:
+ case Builtin::BI__c11_atomic_signal_fence: {
+ llvm::SynchronizationScope Scope;
+ if (BuiltinID == Builtin::BI__atomic_signal_fence ||
+ BuiltinID == Builtin::BI__c11_atomic_signal_fence)
+ Scope = llvm::SingleThread;
+ else
+ Scope = llvm::CrossThread;
+ Value *Order = EmitScalarExpr(E->getArg(0));
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ default: // invalid order
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ Builder.CreateFence(llvm::Acquire, Scope);
+ break;
+ case 3: // memory_order_release
+ Builder.CreateFence(llvm::Release, Scope);
+ break;
+ case 4: // memory_order_acq_rel
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ break;
+ case 5: // memory_order_seq_cst
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ break;
+ }
+ return RValue::get(0);
+ }
+
+ llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ ReleaseBB = createBasicBlock("release", CurFn);
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
+
+ Builder.SetInsertPoint(AcquireBB);
+ Builder.CreateFence(llvm::Acquire, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(1), AcquireBB);
+ SI->addCase(Builder.getInt32(2), AcquireBB);
+
+ Builder.SetInsertPoint(ReleaseBB);
+ Builder.CreateFence(llvm::Release, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(3), ReleaseBB);
+
+ Builder.SetInsertPoint(AcqRelBB);
+ Builder.CreateFence(llvm::AcquireRelease, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(4), AcqRelBB);
+
+ Builder.SetInsertPoint(SeqCstBB);
+ Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+ Builder.SetInsertPoint(ContBB);
+ return RValue::get(0);
+ }
+
+ // Library functions with special handling.
+ case Builtin::BIsqrt:
+ case Builtin::BIsqrtf:
+ case Builtin::BIsqrtl: {
+ // TODO: there is currently no set of optimizer flags
+ // sufficient for us to rewrite sqrt to @llvm.sqrt.
+ // -fmath-errno=0 is not good enough; we need finiteness.
+ // We could probably precondition the call with an ult
+ // against 0, but is that worth the complexity?
+ break;
+ }
+
+ case Builtin::BIpow:
+ case Builtin::BIpowf:
+ case Builtin::BIpowl: {
+ // Rewrite sqrt to intrinsic if allowed.
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ Value *Base = EmitScalarExpr(E->getArg(0));
+ Value *Exponent = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ArgType = Base->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
+ return RValue::get(Builder.CreateCall2(F, Base, Exponent));
+ }
+
+ case Builtin::BIfma:
+ case Builtin::BIfmaf:
+ case Builtin::BIfmal:
+ case Builtin::BI__builtin_fma:
+ case Builtin::BI__builtin_fmaf:
+ case Builtin::BI__builtin_fmal: {
+ // Rewrite fma to intrinsic.
+ Value *FirstArg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = FirstArg->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
+ return RValue::get(Builder.CreateCall3(F, FirstArg,
+ EmitScalarExpr(E->getArg(1)),
+ EmitScalarExpr(E->getArg(2))));
+ }
+
+ case Builtin::BI__builtin_signbit:
+ case Builtin::BI__builtin_signbitf:
+ case Builtin::BI__builtin_signbitl: {
+ LLVMContext &C = CGM.getLLVMContext();
+
+ Value *Arg = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgTy = Arg->getType();
+ if (ArgTy->isPPC_FP128Ty())
+ break; // FIXME: I'm not sure what the right implementation is here.
+ int ArgWidth = ArgTy->getPrimitiveSizeInBits();
+ llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
+ Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
+ Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
+ return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
+ }
+ case Builtin::BI__builtin_annotation: {
+ llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
+ AnnVal->getType());
+
+ // Get the annotation string, go through casts. Sema requires this to be a
+ // non-wide string literal, potentially casted, so the cast<> is safe.
+ const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
+ llvm::StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
+ return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
+ }
+ }
+
+ // If this is an alias for a lib function (e.g. __builtin_sin), emit
+ // the call using the normal call path, but using the unmangled
+ // version of the function name.
+ if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E,
+ CGM.getBuiltinLibFunction(FD, BuiltinID));
+
+ // If this is a predefined lib function (e.g. malloc), emit the call
+ // using exactly the normal call path.
+ if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
+
+ // See if we have a target specific intrinsic.
+ const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
+ Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
+ if (const char *Prefix =
+ llvm::Triple::getArchTypePrefix(Target.getTriple().getArch()))
+ IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+
+ if (IntrinsicID != Intrinsic::not_intrinsic) {
+ SmallVector<Value*, 16> Args;
+
+ // Find out if any arguments are required to be integer constant
+ // expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ Function *F = CGM.getIntrinsic(IntrinsicID);
+ llvm::FunctionType *FTy = F->getFunctionType();
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
+ Value *ArgValue;
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ ArgValue = EmitScalarExpr(E->getArg(i));
+ } else {
+ // If this is required to be a constant, constant fold it so that we
+ // know that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
+ assert(IsConst && "Constant arg isn't actually constant?");
+ (void)IsConst;
+ ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
+ }
+
+ // If the intrinsic arg type is different from the builtin arg type
+ // we need to do a bit cast.
+ llvm::Type *PTy = FTy->getParamType(i);
+ if (PTy != ArgValue->getType()) {
+ assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
+ "Must be able to losslessly bit cast to param");
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ }
+
+ Args.push_back(ArgValue);
+ }
+
+ Value *V = Builder.CreateCall(F, Args);
+ QualType BuiltinRetType = E->getType();
+
+ llvm::Type *RetTy = VoidTy;
+ if (!BuiltinRetType->isVoidType())
+ RetTy = ConvertType(BuiltinRetType);
+
+ if (RetTy != V->getType()) {
+ assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
+ "Must be able to losslessly bit cast result type");
+ V = Builder.CreateBitCast(V, RetTy);
+ }
+
+ return RValue::get(V);
+ }
+
+ // See if we have a target specific builtin that needs to be lowered.
+ if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
+ return RValue::get(V);
+
+ ErrorUnsupported(E, "builtin function");
+
+ // Unknown builtin, for now just dump it out and return undef.
+ if (hasAggregateLLVMType(E->getType()))
+ return RValue::getAggregate(CreateMemTemp(E->getType()));
+ return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
+}
+
+Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ switch (Target.getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ return EmitARMBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return EmitX86BuiltinExpr(BuiltinID, E);
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ return EmitPPCBuiltinExpr(BuiltinID, E);
+ case llvm::Triple::hexagon:
+ return EmitHexagonBuiltinExpr(BuiltinID, E);
+ default:
+ return 0;
+ }
+}
+
+static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
+ NeonTypeFlags TypeFlags) {
+ int IsQuad = TypeFlags.isQuad();
+ switch (TypeFlags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return llvm::VectorType::get(CGF->Int8Ty, 8 << IsQuad);
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ case NeonTypeFlags::Float16:
+ return llvm::VectorType::get(CGF->Int16Ty, 4 << IsQuad);
+ case NeonTypeFlags::Int32:
+ return llvm::VectorType::get(CGF->Int32Ty, 2 << IsQuad);
+ case NeonTypeFlags::Int64:
+ return llvm::VectorType::get(CGF->Int64Ty, 1 << IsQuad);
+ case NeonTypeFlags::Float32:
+ return llvm::VectorType::get(CGF->FloatTy, 2 << IsQuad);
+ }
+ llvm_unreachable("Invalid NeonTypeFlags element type!");
+}
+
+Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
+ unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
+ Value* SV = llvm::ConstantVector::getSplat(nElts, C);
+ return Builder.CreateShuffleVector(V, V, SV, "lane");
+}
+
+Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
+ const char *name,
+ unsigned shift, bool rightshift) {
+ unsigned j = 0;
+ for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
+ ai != ae; ++ai, ++j)
+ if (shift > 0 && shift == j)
+ Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
+ else
+ Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
+
+ return Builder.CreateCall(F, Ops, name);
+}
+
+Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
+ bool neg) {
+ int SV = cast<ConstantInt>(V)->getSExtValue();
+
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
+ return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
+}
+
+/// GetPointeeAlignment - Given an expression with a pointer type, find the
+/// alignment of the type referenced by the pointer. Skip over implicit
+/// casts.
+unsigned CodeGenFunction::GetPointeeAlignment(const Expr *Addr) {
+ unsigned Align = 1;
+ // Check if the type is a pointer. The implicit cast operand might not be.
+ while (Addr->getType()->isPointerType()) {
+ QualType PtTy = Addr->getType()->getPointeeType();
+
+ // Can't get alignment of incomplete types.
+ if (!PtTy->isIncompleteType()) {
+ unsigned NewA = getContext().getTypeAlignInChars(PtTy).getQuantity();
+ if (NewA > Align)
+ Align = NewA;
+ }
+
+ // If the address is an implicit cast, repeat with the cast operand.
+ if (const ImplicitCastExpr *CastAddr = dyn_cast<ImplicitCastExpr>(Addr)) {
+ Addr = CastAddr->getSubExpr();
+ continue;
+ }
+ break;
+ }
+ return Align;
+}
+
+/// GetPointeeAlignmentValue - Given an expression with a pointer type, find
+/// the alignment of the type referenced by the pointer. Skip over implicit
+/// casts. Return the alignment as an llvm::Value.
+Value *CodeGenFunction::GetPointeeAlignmentValue(const Expr *Addr) {
+ return llvm::ConstantInt::get(Int32Ty, GetPointeeAlignment(Addr));
+}
+
+Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (BuiltinID == ARM::BI__clear_cache) {
+ const FunctionDecl *FD = E->getDirectCallee();
+ // Oddly people write this call without args on occasion and gcc accepts
+ // it - it's also marked as varargs in the description file.
+ SmallVector<Value*, 2> Ops;
+ for (unsigned i = 0; i < E->getNumArgs(); i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
+ llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
+ StringRef Name = FD->getName();
+ return Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_ldrexd) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
+
+ Value *LdPtr = EmitScalarExpr(E->getArg(0));
+ Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd");
+
+ Value *Val0 = Builder.CreateExtractValue(Val, 1);
+ Value *Val1 = Builder.CreateExtractValue(Val, 0);
+ Val0 = Builder.CreateZExt(Val0, Int64Ty);
+ Val1 = Builder.CreateZExt(Val1, Int64Ty);
+
+ Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
+ Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
+ return Builder.CreateOr(Val, Val1);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_strexd) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
+
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int64Ty, One);
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Builder.CreateStore(Val, Tmp);
+
+ Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+ Val = Builder.CreateLoad(LdPtr);
+
+ Value *Arg0 = Builder.CreateExtractValue(Val, 0);
+ Value *Arg1 = Builder.CreateExtractValue(Val, 1);
+ Value *StPtr = EmitScalarExpr(E->getArg(1));
+ return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
+ }
+
+ SmallVector<Value*, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ // vget_lane and vset_lane are not overloaded and do not have an extra
+ // argument that specifies the vector type.
+ switch (BuiltinID) {
+ default: break;
+ case ARM::BI__builtin_neon_vget_lane_i8:
+ case ARM::BI__builtin_neon_vget_lane_i16:
+ case ARM::BI__builtin_neon_vget_lane_i32:
+ case ARM::BI__builtin_neon_vget_lane_i64:
+ case ARM::BI__builtin_neon_vget_lane_f32:
+ case ARM::BI__builtin_neon_vgetq_lane_i8:
+ case ARM::BI__builtin_neon_vgetq_lane_i16:
+ case ARM::BI__builtin_neon_vgetq_lane_i32:
+ case ARM::BI__builtin_neon_vgetq_lane_i64:
+ case ARM::BI__builtin_neon_vgetq_lane_f32:
+ return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
+ "vget_lane");
+ case ARM::BI__builtin_neon_vset_lane_i8:
+ case ARM::BI__builtin_neon_vset_lane_i16:
+ case ARM::BI__builtin_neon_vset_lane_i32:
+ case ARM::BI__builtin_neon_vset_lane_i64:
+ case ARM::BI__builtin_neon_vset_lane_f32:
+ case ARM::BI__builtin_neon_vsetq_lane_i8:
+ case ARM::BI__builtin_neon_vsetq_lane_i16:
+ case ARM::BI__builtin_neon_vsetq_lane_i32:
+ case ARM::BI__builtin_neon_vsetq_lane_i64:
+ case ARM::BI__builtin_neon_vsetq_lane_f32:
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
+ return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
+ }
+
+ // Get the last argument, which specifies the vector type.
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ if (!Arg->isIntegerConstantExpr(Result, getContext()))
+ return 0;
+
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
+ BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
+ // Determine the overloaded type of this builtin.
+ llvm::Type *Ty;
+ if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
+ Ty = FloatTy;
+ else
+ Ty = DoubleTy;
+
+ // Determine whether this is an unsigned conversion or not.
+ bool usgn = Result.getZExtValue() == 1;
+ unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
+
+ // Call the appropriate intrinsic.
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ return Builder.CreateCall(F, Ops, "vcvtr");
+ }
+
+ // Determine the type of this overloaded NEON intrinsic.
+ NeonTypeFlags Type(Result.getZExtValue());
+ bool usgn = Type.isUnsigned();
+ bool quad = Type.isQuad();
+ bool rightShift = false;
+
+ llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::Type *Ty = VTy;
+ if (!Ty)
+ return 0;
+
+ unsigned Int;
+ switch (BuiltinID) {
+ default: return 0;
+ case ARM::BI__builtin_neon_vabd_v:
+ case ARM::BI__builtin_neon_vabdq_v:
+ Int = usgn ? Intrinsic::arm_neon_vabdu : Intrinsic::arm_neon_vabds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
+ case ARM::BI__builtin_neon_vabs_v:
+ case ARM::BI__builtin_neon_vabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
+ Ops, "vabs");
+ case ARM::BI__builtin_neon_vaddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, Ty),
+ Ops, "vaddhn");
+ case ARM::BI__builtin_neon_vcale_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcage_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacged);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcaleq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcageq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case ARM::BI__builtin_neon_vcalt_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagt_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtd);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcaltq_v:
+ std::swap(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vcagtq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case ARM::BI__builtin_neon_vcls_v:
+ case ARM::BI__builtin_neon_vclsq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcls, Ty);
+ return EmitNeonCall(F, Ops, "vcls");
+ }
+ case ARM::BI__builtin_neon_vclz_v:
+ case ARM::BI__builtin_neon_vclzq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vclz, Ty);
+ return EmitNeonCall(F, Ops, "vclz");
+ }
+ case ARM::BI__builtin_neon_vcnt_v:
+ case ARM::BI__builtin_neon_vcntq_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcnt, Ty);
+ return EmitNeonCall(F, Ops, "vcnt");
+ }
+ case ARM::BI__builtin_neon_vcvt_f16_v: {
+ assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
+ "unexpected vcvt_f16_v builtin");
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvtfp2hf);
+ return EmitNeonCall(F, Ops, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_f32_f16: {
+ assert(Type.getEltType() == NeonTypeFlags::Float16 && !quad &&
+ "unexpected vcvt_f32_f16 builtin");
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vcvthf2fp);
+ return EmitNeonCall(F, Ops, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_f32_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ case ARM::BI__builtin_neon_vcvt_s32_v:
+ case ARM::BI__builtin_neon_vcvt_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_u32_v: {
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
+ return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_f32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_f32_v: {
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ llvm::Type *Tys[2] = { FloatTy, Ty };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
+ : Intrinsic::arm_neon_vcvtfxs2fp;
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vcvt_n_s32_v:
+ case ARM::BI__builtin_neon_vcvt_n_u32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_s32_v:
+ case ARM::BI__builtin_neon_vcvtq_n_u32_v: {
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, quad));
+ llvm::Type *Tys[2] = { Ty, FloatTy };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
+ : Intrinsic::arm_neon_vcvtfp2fxs;
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case ARM::BI__builtin_neon_vext_v:
+ case ARM::BI__builtin_neon_vextq_v: {
+ int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Value *SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
+ }
+ case ARM::BI__builtin_neon_vhadd_v:
+ case ARM::BI__builtin_neon_vhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhaddu : Intrinsic::arm_neon_vhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhadd");
+ case ARM::BI__builtin_neon_vhsub_v:
+ case ARM::BI__builtin_neon_vhsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vhsubu : Intrinsic::arm_neon_vhsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vhsub");
+ case ARM::BI__builtin_neon_vld1_v:
+ case ARM::BI__builtin_neon_vld1q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty),
+ Ops, "vld1");
+ case ARM::BI__builtin_neon_vld1_lane_v:
+ case ARM::BI__builtin_neon_vld1q_lane_v: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
+ }
+ case ARM::BI__builtin_neon_vld1_dup_v:
+ case ARM::BI__builtin_neon_vld1q_dup_v: {
+ Value *V = UndefValue::get(Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
+ return EmitNeonSplat(Ops[0], CI);
+ }
+ case ARM::BI__builtin_neon_vld2_v:
+ case ARM::BI__builtin_neon_vld2q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2, Ty);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld2");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_v:
+ case ARM::BI__builtin_neon_vld3q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3, Ty);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld3");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_v:
+ case ARM::BI__builtin_neon_vld4q_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4, Ty);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld4");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_lane_v:
+ case ARM::BI__builtin_neon_vld2q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld2lane, Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld3_lane_v:
+ case ARM::BI__builtin_neon_vld3q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld3lane, Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld4_lane_v:
+ case ARM::BI__builtin_neon_vld4q_lane_v: {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld4lane, Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
+ Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
+ Ops[5] = Builder.CreateBitCast(Ops[5], Ty);
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ case ARM::BI__builtin_neon_vld4_dup_v: {
+ // Handle 64-bit elements as a special-case. There is no "dup" needed.
+ if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld3;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld4;
+ break;
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ Value *Align = GetPointeeAlignmentValue(E->getArg(1));
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ switch (BuiltinID) {
+ case ARM::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case ARM::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld3lane;
+ break;
+ case ARM::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld4lane;
+ break;
+ default: llvm_unreachable("unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+
+ SmallVector<Value*, 6> Args;
+ Args.push_back(Ops[1]);
+ Args.append(STy->getNumElements(), UndefValue::get(Ty));
+
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Args.push_back(CI);
+ Args.push_back(GetPointeeAlignmentValue(E->getArg(1)));
+
+ Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
+ // splat lane 0 to all elts in each vector of the result.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Value *Val = Builder.CreateExtractValue(Ops[1], i);
+ Value *Elt = Builder.CreateBitCast(Val, Ty);
+ Elt = EmitNeonSplat(Elt, CI);
+ Elt = Builder.CreateBitCast(Elt, Val->getType());
+ Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
+ }
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case ARM::BI__builtin_neon_vmax_v:
+ case ARM::BI__builtin_neon_vmaxq_v:
+ Int = usgn ? Intrinsic::arm_neon_vmaxu : Intrinsic::arm_neon_vmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
+ case ARM::BI__builtin_neon_vmin_v:
+ case ARM::BI__builtin_neon_vminq_v:
+ Int = usgn ? Intrinsic::arm_neon_vminu : Intrinsic::arm_neon_vmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
+ case ARM::BI__builtin_neon_vmovl_v: {
+ llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
+ if (usgn)
+ return Builder.CreateZExt(Ops[0], Ty, "vmovl");
+ return Builder.CreateSExt(Ops[0], Ty, "vmovl");
+ }
+ case ARM::BI__builtin_neon_vmovn_v: {
+ llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
+ return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
+ }
+ case ARM::BI__builtin_neon_vmul_v:
+ case ARM::BI__builtin_neon_vmulq_v:
+ assert(Type.isPoly() && "vmul builtin only supported for polynomial types");
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
+ Ops, "vmul");
+ case ARM::BI__builtin_neon_vmull_v:
+ Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
+ Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
+ case ARM::BI__builtin_neon_vpadal_v:
+ case ARM::BI__builtin_neon_vpadalq_v: {
+ Int = usgn ? Intrinsic::arm_neon_vpadalu : Intrinsic::arm_neon_vpadals;
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ llvm::Type *EltTy =
+ llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpadal");
+ }
+ case ARM::BI__builtin_neon_vpadd_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vpadd, Ty),
+ Ops, "vpadd");
+ case ARM::BI__builtin_neon_vpaddl_v:
+ case ARM::BI__builtin_neon_vpaddlq_v: {
+ Int = usgn ? Intrinsic::arm_neon_vpaddlu : Intrinsic::arm_neon_vpaddls;
+ // The source operand type has twice as many elements of half the size.
+ unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
+ llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
+ llvm::Type *NarrowTy =
+ llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
+ llvm::Type *Tys[2] = { Ty, NarrowTy };
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
+ }
+ case ARM::BI__builtin_neon_vpmax_v:
+ Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
+ case ARM::BI__builtin_neon_vpmin_v:
+ Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
+ case ARM::BI__builtin_neon_vqabs_v:
+ case ARM::BI__builtin_neon_vqabsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqabs, Ty),
+ Ops, "vqabs");
+ case ARM::BI__builtin_neon_vqadd_v:
+ case ARM::BI__builtin_neon_vqaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
+ case ARM::BI__builtin_neon_vqdmlal_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, Ty),
+ Ops, "vqdmlal");
+ case ARM::BI__builtin_neon_vqdmlsl_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, Ty),
+ Ops, "vqdmlsl");
+ case ARM::BI__builtin_neon_vqdmulh_v:
+ case ARM::BI__builtin_neon_vqdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
+ Ops, "vqdmulh");
+ case ARM::BI__builtin_neon_vqdmull_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
+ Ops, "vqdmull");
+ case ARM::BI__builtin_neon_vqmovn_v:
+ Int = usgn ? Intrinsic::arm_neon_vqmovnu : Intrinsic::arm_neon_vqmovns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqmovn");
+ case ARM::BI__builtin_neon_vqmovun_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqmovnsu, Ty),
+ Ops, "vqdmull");
+ case ARM::BI__builtin_neon_vqneg_v:
+ case ARM::BI__builtin_neon_vqnegq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqneg, Ty),
+ Ops, "vqneg");
+ case ARM::BI__builtin_neon_vqrdmulh_v:
+ case ARM::BI__builtin_neon_vqrdmulhq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrdmulh, Ty),
+ Ops, "vqrdmulh");
+ case ARM::BI__builtin_neon_vqrshl_v:
+ case ARM::BI__builtin_neon_vqrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftu : Intrinsic::arm_neon_vqrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshl");
+ case ARM::BI__builtin_neon_vqrshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
+ 1, true);
+ case ARM::BI__builtin_neon_vqrshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
+ Ops, "vqrshrun_n", 1, true);
+ case ARM::BI__builtin_neon_vqshl_v:
+ case ARM::BI__builtin_neon_vqshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl");
+ case ARM::BI__builtin_neon_vqshl_n_v:
+ case ARM::BI__builtin_neon_vqshlq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftu : Intrinsic::arm_neon_vqshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
+ 1, false);
+ case ARM::BI__builtin_neon_vqshlu_n_v:
+ case ARM::BI__builtin_neon_vqshluq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
+ Ops, "vqshlu", 1, false);
+ case ARM::BI__builtin_neon_vqshrn_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
+ 1, true);
+ case ARM::BI__builtin_neon_vqshrun_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
+ Ops, "vqshrun_n", 1, true);
+ case ARM::BI__builtin_neon_vqsub_v:
+ case ARM::BI__builtin_neon_vqsubq_v:
+ Int = usgn ? Intrinsic::arm_neon_vqsubu : Intrinsic::arm_neon_vqsubs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqsub");
+ case ARM::BI__builtin_neon_vraddhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vraddhn, Ty),
+ Ops, "vraddhn");
+ case ARM::BI__builtin_neon_vrecpe_v:
+ case ARM::BI__builtin_neon_vrecpeq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
+ Ops, "vrecpe");
+ case ARM::BI__builtin_neon_vrecps_v:
+ case ARM::BI__builtin_neon_vrecpsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecps, Ty),
+ Ops, "vrecps");
+ case ARM::BI__builtin_neon_vrhadd_v:
+ case ARM::BI__builtin_neon_vrhaddq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrhaddu : Intrinsic::arm_neon_vrhadds;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrhadd");
+ case ARM::BI__builtin_neon_vrshl_v:
+ case ARM::BI__builtin_neon_vrshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshl");
+ case ARM::BI__builtin_neon_vrshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
+ Ops, "vrshrn_n", 1, true);
+ case ARM::BI__builtin_neon_vrshr_n_v:
+ case ARM::BI__builtin_neon_vrshrq_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
+ case ARM::BI__builtin_neon_vrsqrte_v:
+ case ARM::BI__builtin_neon_vrsqrteq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrte, Ty),
+ Ops, "vrsqrte");
+ case ARM::BI__builtin_neon_vrsqrts_v:
+ case ARM::BI__builtin_neon_vrsqrtsq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsqrts, Ty),
+ Ops, "vrsqrts");
+ case ARM::BI__builtin_neon_vrsra_n_v:
+ case ARM::BI__builtin_neon_vrsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
+ Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
+ case ARM::BI__builtin_neon_vrsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrsubhn, Ty),
+ Ops, "vrsubhn");
+ case ARM::BI__builtin_neon_vshl_v:
+ case ARM::BI__builtin_neon_vshlq_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftu : Intrinsic::arm_neon_vshifts;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshl");
+ case ARM::BI__builtin_neon_vshll_n_v:
+ Int = usgn ? Intrinsic::arm_neon_vshiftlu : Intrinsic::arm_neon_vshiftls;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vshll", 1);
+ case ARM::BI__builtin_neon_vshl_n_v:
+ case ARM::BI__builtin_neon_vshlq_n_v:
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1], "vshl_n");
+ case ARM::BI__builtin_neon_vshrn_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftn, Ty),
+ Ops, "vshrn_n", 1, true);
+ case ARM::BI__builtin_neon_vshr_n_v:
+ case ARM::BI__builtin_neon_vshrq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
+ if (usgn)
+ return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
+ else
+ return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
+ case ARM::BI__builtin_neon_vsri_n_v:
+ case ARM::BI__builtin_neon_vsriq_n_v:
+ rightShift = true;
+ case ARM::BI__builtin_neon_vsli_n_v:
+ case ARM::BI__builtin_neon_vsliq_n_v:
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
+ Ops, "vsli_n");
+ case ARM::BI__builtin_neon_vsra_n_v:
+ case ARM::BI__builtin_neon_vsraq_n_v:
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
+ if (usgn)
+ Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
+ else
+ Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+ return Builder.CreateAdd(Ops[0], Ops[1]);
+ case ARM::BI__builtin_neon_vst1_v:
+ case ARM::BI__builtin_neon_vst1q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst1_lane_v:
+ case ARM::BI__builtin_neon_vst1q_lane_v: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ StoreInst *St = Builder.CreateStore(Ops[1],
+ Builder.CreateBitCast(Ops[0], Ty));
+ Value *Align = GetPointeeAlignmentValue(E->getArg(0));
+ St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return St;
+ }
+ case ARM::BI__builtin_neon_vst2_v:
+ case ARM::BI__builtin_neon_vst2q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst2_lane_v:
+ case ARM::BI__builtin_neon_vst2q_lane_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst2lane, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_v:
+ case ARM::BI__builtin_neon_vst3q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst3_lane_v:
+ case ARM::BI__builtin_neon_vst3q_lane_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst3lane, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_v:
+ case ARM::BI__builtin_neon_vst4q_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vst4_lane_v:
+ case ARM::BI__builtin_neon_vst4q_lane_v:
+ Ops.push_back(GetPointeeAlignmentValue(E->getArg(0)));
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
+ Ops, "");
+ case ARM::BI__builtin_neon_vsubhn_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, Ty),
+ Ops, "vsubhn");
+ case ARM::BI__builtin_neon_vtbl1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
+ Ops, "vtbl1");
+ case ARM::BI__builtin_neon_vtbl2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
+ Ops, "vtbl2");
+ case ARM::BI__builtin_neon_vtbl3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
+ Ops, "vtbl3");
+ case ARM::BI__builtin_neon_vtbl4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
+ Ops, "vtbl4");
+ case ARM::BI__builtin_neon_vtbx1_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
+ Ops, "vtbx1");
+ case ARM::BI__builtin_neon_vtbx2_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
+ Ops, "vtbx2");
+ case ARM::BI__builtin_neon_vtbx3_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
+ Ops, "vtbx3");
+ case ARM::BI__builtin_neon_vtbx4_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
+ Ops, "vtbx4");
+ case ARM::BI__builtin_neon_vtst_v:
+ case ARM::BI__builtin_neon_vtstq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
+ Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
+ ConstantAggregateZero::get(Ty));
+ return Builder.CreateSExt(Ops[0], Ty, "vtst");
+ }
+ case ARM::BI__builtin_neon_vtrn_v:
+ case ARM::BI__builtin_neon_vtrnq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = 0;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(Builder.getInt32(i+vi));
+ Indices.push_back(Builder.getInt32(i+e+vi));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vuzp_v:
+ case ARM::BI__builtin_neon_vuzpq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = 0;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
+ Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
+
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ case ARM::BI__builtin_neon_vzip_v:
+ case ARM::BI__builtin_neon_vzipq_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Value *SV = 0;
+
+ for (unsigned vi = 0; vi != 2; ++vi) {
+ SmallVector<Constant*, 16> Indices;
+ for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
+ Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
+ Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
+ }
+ Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
+ SV = llvm::ConstantVector::get(Indices);
+ SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
+ SV = Builder.CreateStore(SV, Addr);
+ }
+ return SV;
+ }
+ }
+}
+
+llvm::Value *CodeGenFunction::
+BuildVector(ArrayRef<llvm::Value*> Ops) {
+ assert((Ops.size() & (Ops.size() - 1)) == 0 &&
+ "Not a power-of-two sized vector!");
+ bool AllConstants = true;
+ for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
+ AllConstants &= isa<Constant>(Ops[i]);
+
+ // If this is a constant vector, create a ConstantVector.
+ if (AllConstants) {
+ SmallVector<llvm::Constant*, 16> CstOps;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ CstOps.push_back(cast<Constant>(Ops[i]));
+ return llvm::ConstantVector::get(CstOps);
+ }
+
+ // Otherwise, insertelement the values to build the vector.
+ Value *Result =
+ llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
+
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
+
+ return Result;
+}
+
+Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ SmallVector<Value*, 4> Ops;
+
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ assert(Error == ASTContext::GE_None && "Should not codegen an error");
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ continue;
+ }
+
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ llvm::APSInt Result;
+ bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
+ assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
+ Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
+ }
+
+ switch (BuiltinID) {
+ default: return 0;
+ case X86::BI__builtin_ia32_vec_init_v8qi:
+ case X86::BI__builtin_ia32_vec_init_v4hi:
+ case X86::BI__builtin_ia32_vec_init_v2si:
+ return Builder.CreateBitCast(BuildVector(Ops),
+ llvm::Type::getX86_MMXTy(getLLVMContext()));
+ case X86::BI__builtin_ia32_vec_ext_v2si:
+ return Builder.CreateExtractElement(Ops[0],
+ llvm::ConstantInt::get(Ops[1]->getType(), 0));
+ case X86::BI__builtin_ia32_ldmxcsr: {
+ llvm::Type *PtrTy = Int8PtrTy;
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
+ Builder.CreateStore(Ops[0], Tmp);
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ }
+ case X86::BI__builtin_ia32_stmxcsr: {
+ llvm::Type *PtrTy = Int8PtrTy;
+ Value *One = llvm::ConstantInt::get(Int32Ty, 1);
+ Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
+ Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
+ Builder.CreateBitCast(Tmp, PtrTy));
+ return Builder.CreateLoad(Tmp, "stmxcsr");
+ }
+ case X86::BI__builtin_ia32_storehps:
+ case X86::BI__builtin_ia32_storelps: {
+ llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+
+ // cast val v2i64
+ Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
+
+ // extract (0, 1)
+ unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
+ llvm::Value *Idx = llvm::ConstantInt::get(Int32Ty, Index);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
+
+ // cast pointer to i64 & store
+ Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case X86::BI__builtin_ia32_palignr: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 9 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 8) {
+ SmallVector<llvm::Constant*, 8> Indices;
+ for (unsigned i = 0; i != 8; ++i)
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 8 but less
+ // than 16 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 16) {
+ // MMX has these as 1 x i64 vectors for some odd optimization reasons.
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ case X86::BI__builtin_ia32_palignr128: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ SmallVector<llvm::Constant*, 16> Indices;
+ for (unsigned i = 0; i != 16; ++i)
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
+
+ Value* SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 32) {
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ case X86::BI__builtin_ia32_palignr256: {
+ unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
+
+ // If palignr is shifting the pair of input vectors less than 17 bytes,
+ // emit a shuffle instruction.
+ if (shiftVal <= 16) {
+ SmallVector<llvm::Constant*, 32> Indices;
+ // 256-bit palignr operates on 128-bit lanes so we need to handle that
+ for (unsigned l = 0; l != 2; ++l) {
+ unsigned LaneStart = l * 16;
+ unsigned LaneEnd = (l+1) * 16;
+ for (unsigned i = 0; i != 16; ++i) {
+ unsigned Idx = shiftVal + i + LaneStart;
+ if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
+ Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
+ }
+ }
+
+ Value* SV = llvm::ConstantVector::get(Indices);
+ return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
+ }
+
+ // If palignr is shifting the pair of input vectors more than 16 but less
+ // than 32 bytes, emit a logical right shift of the destination.
+ if (shiftVal < 32) {
+ llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
+
+ Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
+ Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
+
+ // create i32 constant
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
+ return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ }
+
+ // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ }
+ case X86::BI__builtin_ia32_movntps:
+ case X86::BI__builtin_ia32_movntpd:
+ case X86::BI__builtin_ia32_movntdq:
+ case X86::BI__builtin_ia32_movnti: {
+ llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
+ Builder.getInt32(1));
+
+ // Convert the type of the pointer to a pointer to the stored type.
+ Value *BC = Builder.CreateBitCast(Ops[0],
+ llvm::PointerType::getUnqual(Ops[1]->getType()),
+ "cast");
+ StoreInst *SI = Builder.CreateStore(Ops[1], BC);
+ SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
+ SI->setAlignment(16);
+ return SI;
+ }
+ // 3DNow!
+ case X86::BI__builtin_ia32_pswapdsf:
+ case X86::BI__builtin_ia32_pswapdsi: {
+ const char *name = 0;
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ switch(BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_pswapdsf:
+ case X86::BI__builtin_ia32_pswapdsi:
+ name = "pswapd";
+ ID = Intrinsic::x86_3dnowa_pswapd;
+ break;
+ }
+ llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
+ Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, name);
+ }
+ }
+}
+
+
+Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ llvm::SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeq:
+ ID = Intrinsic::hexagon_C2_cmpeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgt:
+ ID = Intrinsic::hexagon_C2_cmpgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtu:
+ ID = Intrinsic::hexagon_C2_cmpgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeqp:
+ ID = Intrinsic::hexagon_C2_cmpeqp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtp:
+ ID = Intrinsic::hexagon_C2_cmpgtp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtup:
+ ID = Intrinsic::hexagon_C2_cmpgtup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsset:
+ ID = Intrinsic::hexagon_C2_bitsset; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsclr:
+ ID = Intrinsic::hexagon_C2_bitsclr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpeqi:
+ ID = Intrinsic::hexagon_C2_cmpeqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgti:
+ ID = Intrinsic::hexagon_C2_cmpgti; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgtui:
+ ID = Intrinsic::hexagon_C2_cmpgtui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgei:
+ ID = Intrinsic::hexagon_C2_cmpgei; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpgeui:
+ ID = Intrinsic::hexagon_C2_cmpgeui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmplt:
+ ID = Intrinsic::hexagon_C2_cmplt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_cmpltu:
+ ID = Intrinsic::hexagon_C2_cmpltu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_bitsclri:
+ ID = Intrinsic::hexagon_C2_bitsclri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_and:
+ ID = Intrinsic::hexagon_C2_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_or:
+ ID = Intrinsic::hexagon_C2_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_xor:
+ ID = Intrinsic::hexagon_C2_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_andn:
+ ID = Intrinsic::hexagon_C2_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_not:
+ ID = Intrinsic::hexagon_C2_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_orn:
+ ID = Intrinsic::hexagon_C2_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_pxfer_map:
+ ID = Intrinsic::hexagon_C2_pxfer_map; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_any8:
+ ID = Intrinsic::hexagon_C2_any8; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_all8:
+ ID = Intrinsic::hexagon_C2_all8; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_vitpack:
+ ID = Intrinsic::hexagon_C2_vitpack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_mux:
+ ID = Intrinsic::hexagon_C2_mux; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxii:
+ ID = Intrinsic::hexagon_C2_muxii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxir:
+ ID = Intrinsic::hexagon_C2_muxir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_muxri:
+ ID = Intrinsic::hexagon_C2_muxri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_vmux:
+ ID = Intrinsic::hexagon_C2_vmux; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_mask:
+ ID = Intrinsic::hexagon_C2_mask; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpbeq:
+ ID = Intrinsic::hexagon_A2_vcmpbeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpbgtu:
+ ID = Intrinsic::hexagon_A2_vcmpbgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpheq:
+ ID = Intrinsic::hexagon_A2_vcmpheq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmphgt:
+ ID = Intrinsic::hexagon_A2_vcmphgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmphgtu:
+ ID = Intrinsic::hexagon_A2_vcmphgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpweq:
+ ID = Intrinsic::hexagon_A2_vcmpweq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgt:
+ ID = Intrinsic::hexagon_A2_vcmpwgt; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vcmpwgtu:
+ ID = Intrinsic::hexagon_A2_vcmpwgtu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_tfrpr:
+ ID = Intrinsic::hexagon_C2_tfrpr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C2_tfrrp:
+ ID = Intrinsic::hexagon_C2_tfrrp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_acc_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_acc_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_nac_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_nac_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpy_sat_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyd_rnd_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyd_rnd_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyu_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyu_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_acc_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_acc_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_nac_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_nac_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_hh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_hh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_hl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_hl_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_hl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_lh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_lh_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_lh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s0:
+ ID = Intrinsic::hexagon_M2_mpyud_ll_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyud_ll_s1:
+ ID = Intrinsic::hexagon_M2_mpyud_ll_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpysmi:
+ ID = Intrinsic::hexagon_M2_mpysmi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_macsip:
+ ID = Intrinsic::hexagon_M2_macsip; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_macsin:
+ ID = Intrinsic::hexagon_M2_macsin; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_acc_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_acc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_nac_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_nac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_acc_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_acc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyuu_nac_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyuu_nac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpy_up:
+ ID = Intrinsic::hexagon_M2_mpy_up; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyu_up:
+ ID = Intrinsic::hexagon_M2_mpyu_up; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_dpmpyss_rnd_s0:
+ ID = Intrinsic::hexagon_M2_dpmpyss_rnd_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyi:
+ ID = Intrinsic::hexagon_M2_mpyi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mpyui:
+ ID = Intrinsic::hexagon_M2_mpyui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_maci:
+ ID = Intrinsic::hexagon_M2_maci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_acci:
+ ID = Intrinsic::hexagon_M2_acci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_accii:
+ ID = Intrinsic::hexagon_M2_accii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_nacci:
+ ID = Intrinsic::hexagon_M2_nacci; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_naccii:
+ ID = Intrinsic::hexagon_M2_naccii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_subacc:
+ ID = Intrinsic::hexagon_M2_subacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s0:
+ ID = Intrinsic::hexagon_M2_vmac2s_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2s_s1:
+ ID = Intrinsic::hexagon_M2_vmac2s_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s0pack:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s0pack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2s_s1pack:
+ ID = Intrinsic::hexagon_M2_vmpy2s_s1pack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2:
+ ID = Intrinsic::hexagon_M2_vmac2; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s0:
+ ID = Intrinsic::hexagon_M2_vmpy2es_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmpy2es_s1:
+ ID = Intrinsic::hexagon_M2_vmpy2es_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s0:
+ ID = Intrinsic::hexagon_M2_vmac2es_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es_s1:
+ ID = Intrinsic::hexagon_M2_vmac2es_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vmac2es:
+ ID = Intrinsic::hexagon_M2_vmac2es; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrmac_s0:
+ ID = Intrinsic::hexagon_M2_vrmac_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrmpy_s0:
+ ID = Intrinsic::hexagon_M2_vrmpy_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s0:
+ ID = Intrinsic::hexagon_M2_vdmpyrs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpyrs_s1:
+ ID = Intrinsic::hexagon_M2_vdmpyrs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s0:
+ ID = Intrinsic::hexagon_M2_vdmacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmacs_s1:
+ ID = Intrinsic::hexagon_M2_vdmacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s0:
+ ID = Intrinsic::hexagon_M2_vdmpys_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vdmpys_s1:
+ ID = Intrinsic::hexagon_M2_vdmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s0:
+ ID = Intrinsic::hexagon_M2_cmpyrs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrs_s1:
+ ID = Intrinsic::hexagon_M2_cmpyrs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s0:
+ ID = Intrinsic::hexagon_M2_cmpyrsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyrsc_s1:
+ ID = Intrinsic::hexagon_M2_cmpyrsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s0:
+ ID = Intrinsic::hexagon_M2_cmacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacs_s1:
+ ID = Intrinsic::hexagon_M2_cmacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s0:
+ ID = Intrinsic::hexagon_M2_cmacsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacsc_s1:
+ ID = Intrinsic::hexagon_M2_cmacsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s0:
+ ID = Intrinsic::hexagon_M2_cmpys_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpys_s1:
+ ID = Intrinsic::hexagon_M2_cmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s0:
+ ID = Intrinsic::hexagon_M2_cmpysc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpysc_s1:
+ ID = Intrinsic::hexagon_M2_cmpysc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s0:
+ ID = Intrinsic::hexagon_M2_cnacs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacs_s1:
+ ID = Intrinsic::hexagon_M2_cnacs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s0:
+ ID = Intrinsic::hexagon_M2_cnacsc_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cnacsc_s1:
+ ID = Intrinsic::hexagon_M2_cnacsc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1:
+ ID = Intrinsic::hexagon_M2_vrcmpys_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_acc_s1:
+ ID = Intrinsic::hexagon_M2_vrcmpys_acc_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpys_s1rp:
+ ID = Intrinsic::hexagon_M2_vrcmpys_s1rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s0:
+ ID = Intrinsic::hexagon_M2_mmacls_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_s1:
+ ID = Intrinsic::hexagon_M2_mmacls_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s0:
+ ID = Intrinsic::hexagon_M2_mmachs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_s1:
+ ID = Intrinsic::hexagon_M2_mmachs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s0:
+ ID = Intrinsic::hexagon_M2_mmpyl_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_s1:
+ ID = Intrinsic::hexagon_M2_mmpyl_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s0:
+ ID = Intrinsic::hexagon_M2_mmpyh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_s1:
+ ID = Intrinsic::hexagon_M2_mmpyh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs0:
+ ID = Intrinsic::hexagon_M2_mmacls_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacls_rs1:
+ ID = Intrinsic::hexagon_M2_mmacls_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs0:
+ ID = Intrinsic::hexagon_M2_mmachs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmachs_rs1:
+ ID = Intrinsic::hexagon_M2_mmachs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyl_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyl_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyl_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyh_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyh_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_hmmpyl_rs1:
+ ID = Intrinsic::hexagon_M2_hmmpyl_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_hmmpyh_rs1:
+ ID = Intrinsic::hexagon_M2_hmmpyh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s0:
+ ID = Intrinsic::hexagon_M2_mmaculs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_s1:
+ ID = Intrinsic::hexagon_M2_mmaculs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s0:
+ ID = Intrinsic::hexagon_M2_mmacuhs_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_s1:
+ ID = Intrinsic::hexagon_M2_mmacuhs_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s0:
+ ID = Intrinsic::hexagon_M2_mmpyul_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_s1:
+ ID = Intrinsic::hexagon_M2_mmpyul_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s0:
+ ID = Intrinsic::hexagon_M2_mmpyuh_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_s1:
+ ID = Intrinsic::hexagon_M2_mmpyuh_s1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs0:
+ ID = Intrinsic::hexagon_M2_mmaculs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmaculs_rs1:
+ ID = Intrinsic::hexagon_M2_mmaculs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs0:
+ ID = Intrinsic::hexagon_M2_mmacuhs_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmacuhs_rs1:
+ ID = Intrinsic::hexagon_M2_mmacuhs_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyul_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyul_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyul_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs0:
+ ID = Intrinsic::hexagon_M2_mmpyuh_rs0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_mmpyuh_rs1:
+ ID = Intrinsic::hexagon_M2_mmpyuh_rs1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0:
+ ID = Intrinsic::hexagon_M2_vrcmaci_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0:
+ ID = Intrinsic::hexagon_M2_vrcmacr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmaci_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmaci_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmacr_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmacr_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmaci_s0:
+ ID = Intrinsic::hexagon_M2_cmaci_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmacr_s0:
+ ID = Intrinsic::hexagon_M2_cmacr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0:
+ ID = Intrinsic::hexagon_M2_vrcmpyi_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0:
+ ID = Intrinsic::hexagon_M2_vrcmpyr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyi_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmpyi_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vrcmpyr_s0c:
+ ID = Intrinsic::hexagon_M2_vrcmpyr_s0c; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyi_s0:
+ ID = Intrinsic::hexagon_M2_cmpyi_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_cmpyr_s0:
+ ID = Intrinsic::hexagon_M2_cmpyr_s0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s0_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmpy_s0_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmpy_s1_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmpy_s1_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_i:
+ ID = Intrinsic::hexagon_M2_vcmac_s0_sat_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vcmac_s0_sat_r:
+ ID = Intrinsic::hexagon_M2_vcmac_s0_sat_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vcrotate:
+ ID = Intrinsic::hexagon_S2_vcrotate; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_add:
+ ID = Intrinsic::hexagon_A2_add; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sub:
+ ID = Intrinsic::hexagon_A2_sub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addsat:
+ ID = Intrinsic::hexagon_A2_addsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subsat:
+ ID = Intrinsic::hexagon_A2_subsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addi:
+ ID = Intrinsic::hexagon_A2_addi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_ll:
+ ID = Intrinsic::hexagon_A2_addh_l16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_hl:
+ ID = Intrinsic::hexagon_A2_addh_l16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_ll:
+ ID = Intrinsic::hexagon_A2_addh_l16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_l16_sat_hl:
+ ID = Intrinsic::hexagon_A2_addh_l16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_ll:
+ ID = Intrinsic::hexagon_A2_subh_l16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_hl:
+ ID = Intrinsic::hexagon_A2_subh_l16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_ll:
+ ID = Intrinsic::hexagon_A2_subh_l16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_l16_sat_hl:
+ ID = Intrinsic::hexagon_A2_subh_l16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_ll:
+ ID = Intrinsic::hexagon_A2_addh_h16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_lh:
+ ID = Intrinsic::hexagon_A2_addh_h16_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hl:
+ ID = Intrinsic::hexagon_A2_addh_h16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_hh:
+ ID = Intrinsic::hexagon_A2_addh_h16_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_ll:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_lh:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hl:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addh_h16_sat_hh:
+ ID = Intrinsic::hexagon_A2_addh_h16_sat_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_ll:
+ ID = Intrinsic::hexagon_A2_subh_h16_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_lh:
+ ID = Intrinsic::hexagon_A2_subh_h16_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hl:
+ ID = Intrinsic::hexagon_A2_subh_h16_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_hh:
+ ID = Intrinsic::hexagon_A2_subh_h16_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_ll:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_lh:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hl:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subh_h16_sat_hh:
+ ID = Intrinsic::hexagon_A2_subh_h16_sat_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_aslh:
+ ID = Intrinsic::hexagon_A2_aslh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_asrh:
+ ID = Intrinsic::hexagon_A2_asrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addp:
+ ID = Intrinsic::hexagon_A2_addp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addpsat:
+ ID = Intrinsic::hexagon_A2_addpsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_addsp:
+ ID = Intrinsic::hexagon_A2_addsp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subp:
+ ID = Intrinsic::hexagon_A2_subp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_neg:
+ ID = Intrinsic::hexagon_A2_neg; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_negsat:
+ ID = Intrinsic::hexagon_A2_negsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_abs:
+ ID = Intrinsic::hexagon_A2_abs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_abssat:
+ ID = Intrinsic::hexagon_A2_abssat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vconj:
+ ID = Intrinsic::hexagon_A2_vconj; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_negp:
+ ID = Intrinsic::hexagon_A2_negp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_absp:
+ ID = Intrinsic::hexagon_A2_absp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_max:
+ ID = Intrinsic::hexagon_A2_max; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxu:
+ ID = Intrinsic::hexagon_A2_maxu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_min:
+ ID = Intrinsic::hexagon_A2_min; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minu:
+ ID = Intrinsic::hexagon_A2_minu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxp:
+ ID = Intrinsic::hexagon_A2_maxp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_maxup:
+ ID = Intrinsic::hexagon_A2_maxup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minp:
+ ID = Intrinsic::hexagon_A2_minp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_minup:
+ ID = Intrinsic::hexagon_A2_minup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfr:
+ ID = Intrinsic::hexagon_A2_tfr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrsi:
+ ID = Intrinsic::hexagon_A2_tfrsi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrp:
+ ID = Intrinsic::hexagon_A2_tfrp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrpi:
+ ID = Intrinsic::hexagon_A2_tfrpi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_zxtb:
+ ID = Intrinsic::hexagon_A2_zxtb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxtb:
+ ID = Intrinsic::hexagon_A2_sxtb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_zxth:
+ ID = Intrinsic::hexagon_A2_zxth; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxth:
+ ID = Intrinsic::hexagon_A2_sxth; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combinew:
+ ID = Intrinsic::hexagon_A2_combinew; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combineii:
+ ID = Intrinsic::hexagon_A2_combineii; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_hh:
+ ID = Intrinsic::hexagon_A2_combine_hh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_hl:
+ ID = Intrinsic::hexagon_A2_combine_hl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_lh:
+ ID = Intrinsic::hexagon_A2_combine_lh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_combine_ll:
+ ID = Intrinsic::hexagon_A2_combine_ll; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfril:
+ ID = Intrinsic::hexagon_A2_tfril; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_tfrih:
+ ID = Intrinsic::hexagon_A2_tfrih; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_and:
+ ID = Intrinsic::hexagon_A2_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_or:
+ ID = Intrinsic::hexagon_A2_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_xor:
+ ID = Intrinsic::hexagon_A2_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_not:
+ ID = Intrinsic::hexagon_A2_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_xor_xacc:
+ ID = Intrinsic::hexagon_M2_xor_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_subri:
+ ID = Intrinsic::hexagon_A2_subri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_andir:
+ ID = Intrinsic::hexagon_A2_andir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_orir:
+ ID = Intrinsic::hexagon_A2_orir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_andp:
+ ID = Intrinsic::hexagon_A2_andp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_orp:
+ ID = Intrinsic::hexagon_A2_orp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_xorp:
+ ID = Intrinsic::hexagon_A2_xorp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_notp:
+ ID = Intrinsic::hexagon_A2_notp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sxtw:
+ ID = Intrinsic::hexagon_A2_sxtw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sat:
+ ID = Intrinsic::hexagon_A2_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_sath:
+ ID = Intrinsic::hexagon_A2_sath; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satuh:
+ ID = Intrinsic::hexagon_A2_satuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satub:
+ ID = Intrinsic::hexagon_A2_satub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_satb:
+ ID = Intrinsic::hexagon_A2_satb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddub:
+ ID = Intrinsic::hexagon_A2_vaddub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddubs:
+ ID = Intrinsic::hexagon_A2_vaddubs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddh:
+ ID = Intrinsic::hexagon_A2_vaddh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddhs:
+ ID = Intrinsic::hexagon_A2_vaddhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vadduhs:
+ ID = Intrinsic::hexagon_A2_vadduhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddw:
+ ID = Intrinsic::hexagon_A2_vaddw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vaddws:
+ ID = Intrinsic::hexagon_A2_vaddws; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svavgh:
+ ID = Intrinsic::hexagon_A2_svavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svavghs:
+ ID = Intrinsic::hexagon_A2_svavghs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svnavgh:
+ ID = Intrinsic::hexagon_A2_svnavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svaddh:
+ ID = Intrinsic::hexagon_A2_svaddh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svaddhs:
+ ID = Intrinsic::hexagon_A2_svaddhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svadduhs:
+ ID = Intrinsic::hexagon_A2_svadduhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubh:
+ ID = Intrinsic::hexagon_A2_svsubh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubhs:
+ ID = Intrinsic::hexagon_A2_svsubhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_svsubuhs:
+ ID = Intrinsic::hexagon_A2_svsubuhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vraddub:
+ ID = Intrinsic::hexagon_A2_vraddub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vraddub_acc:
+ ID = Intrinsic::hexagon_A2_vraddub_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vradduh:
+ ID = Intrinsic::hexagon_M2_vradduh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubub:
+ ID = Intrinsic::hexagon_A2_vsubub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsububs:
+ ID = Intrinsic::hexagon_A2_vsububs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubh:
+ ID = Intrinsic::hexagon_A2_vsubh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubhs:
+ ID = Intrinsic::hexagon_A2_vsubhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubuhs:
+ ID = Intrinsic::hexagon_A2_vsubuhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubw:
+ ID = Intrinsic::hexagon_A2_vsubw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vsubws:
+ ID = Intrinsic::hexagon_A2_vsubws; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabsh:
+ ID = Intrinsic::hexagon_A2_vabsh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabshsat:
+ ID = Intrinsic::hexagon_A2_vabshsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabsw:
+ ID = Intrinsic::hexagon_A2_vabsw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vabswsat:
+ ID = Intrinsic::hexagon_A2_vabswsat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffw:
+ ID = Intrinsic::hexagon_M2_vabsdiffw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M2_vabsdiffh:
+ ID = Intrinsic::hexagon_M2_vabsdiffh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vrsadub:
+ ID = Intrinsic::hexagon_A2_vrsadub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vrsadub_acc:
+ ID = Intrinsic::hexagon_A2_vrsadub_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgub:
+ ID = Intrinsic::hexagon_A2_vavgub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguh:
+ ID = Intrinsic::hexagon_A2_vavguh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgh:
+ ID = Intrinsic::hexagon_A2_vavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgh:
+ ID = Intrinsic::hexagon_A2_vnavgh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgw:
+ ID = Intrinsic::hexagon_A2_vavgw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgw:
+ ID = Intrinsic::hexagon_A2_vnavgw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgwr:
+ ID = Intrinsic::hexagon_A2_vavgwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgwr:
+ ID = Intrinsic::hexagon_A2_vnavgwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgwcr:
+ ID = Intrinsic::hexagon_A2_vavgwcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavgwcr:
+ ID = Intrinsic::hexagon_A2_vnavgwcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavghcr:
+ ID = Intrinsic::hexagon_A2_vavghcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavghcr:
+ ID = Intrinsic::hexagon_A2_vnavghcr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguw:
+ ID = Intrinsic::hexagon_A2_vavguw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguwr:
+ ID = Intrinsic::hexagon_A2_vavguwr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavgubr:
+ ID = Intrinsic::hexagon_A2_vavgubr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavguhr:
+ ID = Intrinsic::hexagon_A2_vavguhr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vavghr:
+ ID = Intrinsic::hexagon_A2_vavghr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vnavghr:
+ ID = Intrinsic::hexagon_A2_vnavghr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminh:
+ ID = Intrinsic::hexagon_A2_vminh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxh:
+ ID = Intrinsic::hexagon_A2_vmaxh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminub:
+ ID = Intrinsic::hexagon_A2_vminub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxub:
+ ID = Intrinsic::hexagon_A2_vmaxub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminuh:
+ ID = Intrinsic::hexagon_A2_vminuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxuh:
+ ID = Intrinsic::hexagon_A2_vmaxuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminw:
+ ID = Intrinsic::hexagon_A2_vminw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxw:
+ ID = Intrinsic::hexagon_A2_vmaxw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vminuw:
+ ID = Intrinsic::hexagon_A2_vminuw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_vmaxuw:
+ ID = Intrinsic::hexagon_A2_vmaxuw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r:
+ ID = Intrinsic::hexagon_S2_asr_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r:
+ ID = Intrinsic::hexagon_S2_asl_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r:
+ ID = Intrinsic::hexagon_S2_lsr_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r:
+ ID = Intrinsic::hexagon_S2_lsl_r_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p:
+ ID = Intrinsic::hexagon_S2_asr_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p:
+ ID = Intrinsic::hexagon_S2_asl_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p:
+ ID = Intrinsic::hexagon_S2_lsr_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p:
+ ID = Intrinsic::hexagon_S2_lsl_r_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_acc:
+ ID = Intrinsic::hexagon_S2_asr_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_acc:
+ ID = Intrinsic::hexagon_S2_asl_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_acc:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_acc:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_acc:
+ ID = Intrinsic::hexagon_S2_asr_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_acc:
+ ID = Intrinsic::hexagon_S2_asl_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_acc:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_acc:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_nac:
+ ID = Intrinsic::hexagon_S2_asr_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_nac:
+ ID = Intrinsic::hexagon_S2_asl_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_nac:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_nac:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_nac:
+ ID = Intrinsic::hexagon_S2_asr_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_nac:
+ ID = Intrinsic::hexagon_S2_asl_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_nac:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_nac:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_and:
+ ID = Intrinsic::hexagon_S2_asr_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_and:
+ ID = Intrinsic::hexagon_S2_asl_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_and:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_and:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_or:
+ ID = Intrinsic::hexagon_S2_asr_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_or:
+ ID = Intrinsic::hexagon_S2_asl_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_r_or:
+ ID = Intrinsic::hexagon_S2_lsr_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_r_or:
+ ID = Intrinsic::hexagon_S2_lsl_r_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_and:
+ ID = Intrinsic::hexagon_S2_asr_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_and:
+ ID = Intrinsic::hexagon_S2_asl_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_and:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_and:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_p_or:
+ ID = Intrinsic::hexagon_S2_asr_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_p_or:
+ ID = Intrinsic::hexagon_S2_asl_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_p_or:
+ ID = Intrinsic::hexagon_S2_lsr_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_p_or:
+ ID = Intrinsic::hexagon_S2_lsl_r_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_r_sat:
+ ID = Intrinsic::hexagon_S2_asr_r_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_r_sat:
+ ID = Intrinsic::hexagon_S2_asl_r_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r:
+ ID = Intrinsic::hexagon_S2_asr_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r:
+ ID = Intrinsic::hexagon_S2_lsr_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r:
+ ID = Intrinsic::hexagon_S2_asl_i_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p:
+ ID = Intrinsic::hexagon_S2_asr_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p:
+ ID = Intrinsic::hexagon_S2_lsr_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p:
+ ID = Intrinsic::hexagon_S2_asl_i_p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_acc:
+ ID = Intrinsic::hexagon_S2_asr_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_acc:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_acc:
+ ID = Intrinsic::hexagon_S2_asl_i_r_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_acc:
+ ID = Intrinsic::hexagon_S2_asr_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_acc:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_acc:
+ ID = Intrinsic::hexagon_S2_asl_i_p_acc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_nac:
+ ID = Intrinsic::hexagon_S2_asr_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_nac:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_nac:
+ ID = Intrinsic::hexagon_S2_asl_i_r_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_nac:
+ ID = Intrinsic::hexagon_S2_asr_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_nac:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_nac:
+ ID = Intrinsic::hexagon_S2_asl_i_p_nac; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_xacc:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_xacc:
+ ID = Intrinsic::hexagon_S2_asl_i_r_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_xacc:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_xacc:
+ ID = Intrinsic::hexagon_S2_asl_i_p_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_and:
+ ID = Intrinsic::hexagon_S2_asr_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_and:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_and:
+ ID = Intrinsic::hexagon_S2_asl_i_r_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_or:
+ ID = Intrinsic::hexagon_S2_asr_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_r_or:
+ ID = Intrinsic::hexagon_S2_lsr_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_or:
+ ID = Intrinsic::hexagon_S2_asl_i_r_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_and:
+ ID = Intrinsic::hexagon_S2_asr_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_and:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_and:
+ ID = Intrinsic::hexagon_S2_asl_i_p_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_p_or:
+ ID = Intrinsic::hexagon_S2_asr_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_p_or:
+ ID = Intrinsic::hexagon_S2_lsr_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_p_or:
+ ID = Intrinsic::hexagon_S2_asl_i_p_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_r_sat:
+ ID = Intrinsic::hexagon_S2_asl_i_r_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd:
+ ID = Intrinsic::hexagon_S2_asr_i_r_rnd; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax:
+ ID = Intrinsic::hexagon_S2_asr_i_r_rnd_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_addasl_rrri:
+ ID = Intrinsic::hexagon_S2_addasl_rrri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_valignib:
+ ID = Intrinsic::hexagon_S2_valignib; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_valignrb:
+ ID = Intrinsic::hexagon_S2_valignrb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vspliceib:
+ ID = Intrinsic::hexagon_S2_vspliceib; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplicerb:
+ ID = Intrinsic::hexagon_S2_vsplicerb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplatrh:
+ ID = Intrinsic::hexagon_S2_vsplatrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsplatrb:
+ ID = Intrinsic::hexagon_S2_vsplatrb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insert:
+ ID = Intrinsic::hexagon_S2_insert; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxb_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxb_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxh_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxh_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxw_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxw_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tableidxd_goodsyntax:
+ ID = Intrinsic::hexagon_S2_tableidxd_goodsyntax; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractu:
+ ID = Intrinsic::hexagon_S2_extractu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insertp:
+ ID = Intrinsic::hexagon_S2_insertp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractup:
+ ID = Intrinsic::hexagon_S2_extractup; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insert_rp:
+ ID = Intrinsic::hexagon_S2_insert_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractu_rp:
+ ID = Intrinsic::hexagon_S2_extractu_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_insertp_rp:
+ ID = Intrinsic::hexagon_S2_insertp_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_extractup_rp:
+ ID = Intrinsic::hexagon_S2_extractup_rp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tstbit_i:
+ ID = Intrinsic::hexagon_S2_tstbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_setbit_i:
+ ID = Intrinsic::hexagon_S2_setbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_togglebit_i:
+ ID = Intrinsic::hexagon_S2_togglebit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clrbit_i:
+ ID = Intrinsic::hexagon_S2_clrbit_i; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_tstbit_r:
+ ID = Intrinsic::hexagon_S2_tstbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_setbit_r:
+ ID = Intrinsic::hexagon_S2_setbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_togglebit_r:
+ ID = Intrinsic::hexagon_S2_togglebit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clrbit_r:
+ ID = Intrinsic::hexagon_S2_clrbit_r; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vh:
+ ID = Intrinsic::hexagon_S2_asr_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vh:
+ ID = Intrinsic::hexagon_S2_lsr_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vh:
+ ID = Intrinsic::hexagon_S2_asl_i_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vh:
+ ID = Intrinsic::hexagon_S2_asr_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vh:
+ ID = Intrinsic::hexagon_S2_asl_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vh:
+ ID = Intrinsic::hexagon_S2_lsr_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vh:
+ ID = Intrinsic::hexagon_S2_lsl_r_vh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_vw:
+ ID = Intrinsic::hexagon_S2_asr_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_i_svw_trun:
+ ID = Intrinsic::hexagon_S2_asr_i_svw_trun; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_svw_trun:
+ ID = Intrinsic::hexagon_S2_asr_r_svw_trun; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_i_vw:
+ ID = Intrinsic::hexagon_S2_lsr_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_i_vw:
+ ID = Intrinsic::hexagon_S2_asl_i_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asr_r_vw:
+ ID = Intrinsic::hexagon_S2_asr_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_asl_r_vw:
+ ID = Intrinsic::hexagon_S2_asl_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsr_r_vw:
+ ID = Intrinsic::hexagon_S2_lsr_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lsl_r_vw:
+ ID = Intrinsic::hexagon_S2_lsl_r_vw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwh:
+ ID = Intrinsic::hexagon_S2_vrndpackwh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vrndpackwhs:
+ ID = Intrinsic::hexagon_S2_vrndpackwhs; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsxtbh:
+ ID = Intrinsic::hexagon_S2_vsxtbh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vzxtbh:
+ ID = Intrinsic::hexagon_S2_vzxtbh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathub:
+ ID = Intrinsic::hexagon_S2_vsathub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_svsathub:
+ ID = Intrinsic::hexagon_S2_svsathub; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_svsathb:
+ ID = Intrinsic::hexagon_S2_svsathb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathb:
+ ID = Intrinsic::hexagon_S2_vsathb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunohb:
+ ID = Intrinsic::hexagon_S2_vtrunohb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunewh:
+ ID = Intrinsic::hexagon_S2_vtrunewh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunowh:
+ ID = Intrinsic::hexagon_S2_vtrunowh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vtrunehb:
+ ID = Intrinsic::hexagon_S2_vtrunehb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsxthw:
+ ID = Intrinsic::hexagon_S2_vsxthw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vzxthw:
+ ID = Intrinsic::hexagon_S2_vzxthw; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwh:
+ ID = Intrinsic::hexagon_S2_vsatwh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh:
+ ID = Intrinsic::hexagon_S2_vsatwuh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_packhl:
+ ID = Intrinsic::hexagon_S2_packhl; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A2_swiz:
+ ID = Intrinsic::hexagon_A2_swiz; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathub_nopack:
+ ID = Intrinsic::hexagon_S2_vsathub_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsathb_nopack:
+ ID = Intrinsic::hexagon_S2_vsathb_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwh_nopack:
+ ID = Intrinsic::hexagon_S2_vsatwh_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_vsatwuh_nopack:
+ ID = Intrinsic::hexagon_S2_vsatwuh_nopack; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffob:
+ ID = Intrinsic::hexagon_S2_shuffob; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffeb:
+ ID = Intrinsic::hexagon_S2_shuffeb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffoh:
+ ID = Intrinsic::hexagon_S2_shuffoh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_shuffeh:
+ ID = Intrinsic::hexagon_S2_shuffeh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_parityp:
+ ID = Intrinsic::hexagon_S2_parityp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_lfsp:
+ ID = Intrinsic::hexagon_S2_lfsp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clbnorm:
+ ID = Intrinsic::hexagon_S2_clbnorm; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clb:
+ ID = Intrinsic::hexagon_S2_clb; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl0:
+ ID = Intrinsic::hexagon_S2_cl0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl1:
+ ID = Intrinsic::hexagon_S2_cl1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_clbp:
+ ID = Intrinsic::hexagon_S2_clbp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl0p:
+ ID = Intrinsic::hexagon_S2_cl0p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_cl1p:
+ ID = Intrinsic::hexagon_S2_cl1p; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_brev:
+ ID = Intrinsic::hexagon_S2_brev; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_ct0:
+ ID = Intrinsic::hexagon_S2_ct0; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_ct1:
+ ID = Intrinsic::hexagon_S2_ct1; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_interleave:
+ ID = Intrinsic::hexagon_S2_interleave; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S2_deinterleave:
+ ID = Intrinsic::hexagon_S2_deinterleave; break;
+
+ case Hexagon::BI__builtin_SI_to_SXTHI_asrh:
+ ID = Intrinsic::hexagon_SI_to_SXTHI_asrh; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_orn:
+ ID = Intrinsic::hexagon_A4_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_andn:
+ ID = Intrinsic::hexagon_A4_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_ornp:
+ ID = Intrinsic::hexagon_A4_ornp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_andnp:
+ ID = Intrinsic::hexagon_A4_andnp; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_combineir:
+ ID = Intrinsic::hexagon_A4_combineir; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_combineri:
+ ID = Intrinsic::hexagon_A4_combineri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpneqi:
+ ID = Intrinsic::hexagon_C4_cmpneqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpneq:
+ ID = Intrinsic::hexagon_C4_cmpneq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmpltei:
+ ID = Intrinsic::hexagon_C4_cmpltei; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplte:
+ ID = Intrinsic::hexagon_C4_cmplte; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplteui:
+ ID = Intrinsic::hexagon_C4_cmplteui; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_cmplteu:
+ ID = Intrinsic::hexagon_C4_cmplteu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpneq:
+ ID = Intrinsic::hexagon_A4_rcmpneq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpneqi:
+ ID = Intrinsic::hexagon_A4_rcmpneqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpeq:
+ ID = Intrinsic::hexagon_A4_rcmpeq; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_rcmpeqi:
+ ID = Intrinsic::hexagon_A4_rcmpeqi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9:
+ ID = Intrinsic::hexagon_C4_fastcorner9; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_fastcorner9_not:
+ ID = Intrinsic::hexagon_C4_fastcorner9_not; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_andn:
+ ID = Intrinsic::hexagon_C4_and_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_and:
+ ID = Intrinsic::hexagon_C4_and_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_orn:
+ ID = Intrinsic::hexagon_C4_and_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_and_or:
+ ID = Intrinsic::hexagon_C4_and_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_andn:
+ ID = Intrinsic::hexagon_C4_or_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_and:
+ ID = Intrinsic::hexagon_C4_or_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_orn:
+ ID = Intrinsic::hexagon_C4_or_orn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_C4_or_or:
+ ID = Intrinsic::hexagon_C4_or_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_addaddi:
+ ID = Intrinsic::hexagon_S4_addaddi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_subaddi:
+ ID = Intrinsic::hexagon_S4_subaddi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_xacc:
+ ID = Intrinsic::hexagon_M4_xor_xacc; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_and:
+ ID = Intrinsic::hexagon_M4_and_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_or:
+ ID = Intrinsic::hexagon_M4_and_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_xor:
+ ID = Intrinsic::hexagon_M4_and_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_and_andn:
+ ID = Intrinsic::hexagon_M4_and_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_and:
+ ID = Intrinsic::hexagon_M4_xor_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_or:
+ ID = Intrinsic::hexagon_M4_xor_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_xor_andn:
+ ID = Intrinsic::hexagon_M4_xor_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_and:
+ ID = Intrinsic::hexagon_M4_or_and; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_or:
+ ID = Intrinsic::hexagon_M4_or_or; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_xor:
+ ID = Intrinsic::hexagon_M4_or_xor; break;
+
+ case Hexagon::BI__builtin_HEXAGON_M4_or_andn:
+ ID = Intrinsic::hexagon_M4_or_andn; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_andix:
+ ID = Intrinsic::hexagon_S4_or_andix; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_andi:
+ ID = Intrinsic::hexagon_S4_or_andi; break;
+
+ case Hexagon::BI__builtin_HEXAGON_S4_or_ori:
+ ID = Intrinsic::hexagon_S4_or_ori; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_modwrapu:
+ ID = Intrinsic::hexagon_A4_modwrapu; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_cround_rr:
+ ID = Intrinsic::hexagon_A4_cround_rr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_ri:
+ ID = Intrinsic::hexagon_A4_round_ri; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_rr:
+ ID = Intrinsic::hexagon_A4_round_rr; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_ri_sat:
+ ID = Intrinsic::hexagon_A4_round_ri_sat; break;
+
+ case Hexagon::BI__builtin_HEXAGON_A4_round_rr_sat:
+ ID = Intrinsic::hexagon_A4_round_rr_sat; break;
+
+ }
+
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+}
+
+Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ SmallVector<Value*, 4> Ops;
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Intrinsic::ID ID = Intrinsic::not_intrinsic;
+
+ switch (BuiltinID) {
+ default: return 0;
+
+ // vec_ld, vec_lvsl, vec_lvsr
+ case PPC::BI__builtin_altivec_lvx:
+ case PPC::BI__builtin_altivec_lvxl:
+ case PPC::BI__builtin_altivec_lvebx:
+ case PPC::BI__builtin_altivec_lvehx:
+ case PPC::BI__builtin_altivec_lvewx:
+ case PPC::BI__builtin_altivec_lvsl:
+ case PPC::BI__builtin_altivec_lvsr:
+ {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
+ case PPC::BI__builtin_altivec_lvx:
+ ID = Intrinsic::ppc_altivec_lvx;
+ break;
+ case PPC::BI__builtin_altivec_lvxl:
+ ID = Intrinsic::ppc_altivec_lvxl;
+ break;
+ case PPC::BI__builtin_altivec_lvebx:
+ ID = Intrinsic::ppc_altivec_lvebx;
+ break;
+ case PPC::BI__builtin_altivec_lvehx:
+ ID = Intrinsic::ppc_altivec_lvehx;
+ break;
+ case PPC::BI__builtin_altivec_lvewx:
+ ID = Intrinsic::ppc_altivec_lvewx;
+ break;
+ case PPC::BI__builtin_altivec_lvsl:
+ ID = Intrinsic::ppc_altivec_lvsl;
+ break;
+ case PPC::BI__builtin_altivec_lvsr:
+ ID = Intrinsic::ppc_altivec_lvsr;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+ }
+
+ // vec_st
+ case PPC::BI__builtin_altivec_stvx:
+ case PPC::BI__builtin_altivec_stvxl:
+ case PPC::BI__builtin_altivec_stvebx:
+ case PPC::BI__builtin_altivec_stvehx:
+ case PPC::BI__builtin_altivec_stvewx:
+ {
+ Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
+ Ops.pop_back();
+
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported st intrinsic!");
+ case PPC::BI__builtin_altivec_stvx:
+ ID = Intrinsic::ppc_altivec_stvx;
+ break;
+ case PPC::BI__builtin_altivec_stvxl:
+ ID = Intrinsic::ppc_altivec_stvxl;
+ break;
+ case PPC::BI__builtin_altivec_stvebx:
+ ID = Intrinsic::ppc_altivec_stvebx;
+ break;
+ case PPC::BI__builtin_altivec_stvehx:
+ ID = Intrinsic::ppc_altivec_stvehx;
+ break;
+ case PPC::BI__builtin_altivec_stvewx:
+ ID = Intrinsic::ppc_altivec_stvewx;
+ break;
+ }
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, "");
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp
new file mode 100644
index 0000000..88a0bdc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDANV.cpp
@@ -0,0 +1,126 @@
+//===----- CGCUDANV.cpp - Interface to NVIDIA CUDA Runtime ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a class for CUDA code generation targeting the NVIDIA CUDA
+// runtime library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCUDARuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Decl.h"
+#include "llvm/BasicBlock.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Support/CallSite.h"
+
+#include <vector>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+class CGNVCUDARuntime : public CGCUDARuntime {
+
+private:
+ llvm::Type *IntTy, *SizeTy;
+ llvm::PointerType *CharPtrTy, *VoidPtrTy;
+
+ llvm::Constant *getSetupArgumentFn() const;
+ llvm::Constant *getLaunchFn() const;
+
+public:
+ CGNVCUDARuntime(CodeGenModule &CGM);
+
+ void EmitDeviceStubBody(CodeGenFunction &CGF, FunctionArgList &Args);
+};
+
+}
+
+CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM) : CGCUDARuntime(CGM) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ SizeTy = Types.ConvertType(Ctx.getSizeType());
+
+ CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
+ VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
+}
+
+llvm::Constant *CGNVCUDARuntime::getSetupArgumentFn() const {
+ // cudaError_t cudaSetupArgument(void *, size_t, size_t)
+ std::vector<llvm::Type*> Params;
+ Params.push_back(VoidPtrTy);
+ Params.push_back(SizeTy);
+ Params.push_back(SizeTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
+ Params, false),
+ "cudaSetupArgument");
+}
+
+llvm::Constant *CGNVCUDARuntime::getLaunchFn() const {
+ // cudaError_t cudaLaunch(char *)
+ std::vector<llvm::Type*> Params;
+ Params.push_back(CharPtrTy);
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(IntTy,
+ Params, false),
+ "cudaLaunch");
+}
+
+void CGNVCUDARuntime::EmitDeviceStubBody(CodeGenFunction &CGF,
+ FunctionArgList &Args) {
+ // Build the argument value list and the argument stack struct type.
+ llvm::SmallVector<llvm::Value *, 16> ArgValues;
+ std::vector<llvm::Type *> ArgTypes;
+ for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ I != E; ++I) {
+ llvm::Value *V = CGF.GetAddrOfLocalVar(*I);
+ ArgValues.push_back(V);
+ assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType");
+ ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType());
+ }
+ llvm::StructType *ArgStackTy = llvm::StructType::get(
+ CGF.getLLVMContext(), ArgTypes);
+
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
+
+ // Emit the calls to cudaSetupArgument
+ llvm::Constant *cudaSetupArgFn = getSetupArgumentFn();
+ for (unsigned I = 0, E = Args.size(); I != E; ++I) {
+ llvm::Value *Args[3];
+ llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
+ Args[0] = CGF.Builder.CreatePointerCast(ArgValues[I], VoidPtrTy);
+ Args[1] = CGF.Builder.CreateIntCast(
+ llvm::ConstantExpr::getSizeOf(ArgTypes[I]),
+ SizeTy, false);
+ Args[2] = CGF.Builder.CreateIntCast(
+ llvm::ConstantExpr::getOffsetOf(ArgStackTy, I),
+ SizeTy, false);
+ llvm::CallSite CS = CGF.EmitCallOrInvoke(cudaSetupArgFn, Args);
+ llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
+ llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero);
+ CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock);
+ CGF.EmitBlock(NextBlock);
+ }
+
+ // Emit the call to cudaLaunch
+ llvm::Constant *cudaLaunchFn = getLaunchFn();
+ llvm::Value *Arg = CGF.Builder.CreatePointerCast(CGF.CurFn, CharPtrTy);
+ CGF.EmitCallOrInvoke(cudaLaunchFn, Arg);
+ CGF.EmitBranch(EndBlock);
+
+ CGF.EmitBlock(EndBlock);
+}
+
+CGCUDARuntime *CodeGen::CreateNVCUDARuntime(CodeGenModule &CGM) {
+ return new CGNVCUDARuntime(CGM);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.cpp
new file mode 100644
index 0000000..77dc248
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.cpp
@@ -0,0 +1,55 @@
+//===----- CGCUDARuntime.cpp - Interface to CUDA Runtimes -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for CUDA code generation. Concrete
+// subclasses of this implement code generation for specific CUDA
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCUDARuntime.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ExprCXX.h"
+#include "CGCall.h"
+#include "CodeGenFunction.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCUDARuntime::~CGCUDARuntime() {}
+
+RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ llvm::BasicBlock *ConfigOKBlock = CGF.createBasicBlock("kcall.configok");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("kcall.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getConfig(), ContBlock, ConfigOKBlock);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(ConfigOKBlock);
+
+ const Decl *TargetDecl = 0;
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ TargetDecl = DRE->getDecl();
+ }
+ }
+
+ llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
+ CGF.EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
+ E->arg_begin(), E->arg_end(), TargetDecl);
+ CGF.EmitBranch(ContBlock);
+
+ CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
+
+ return RValue::get(0);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h
new file mode 100644
index 0000000..a99a67a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCUDARuntime.h
@@ -0,0 +1,54 @@
+//===----- CGCUDARuntime.h - Interface to CUDA Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for CUDA code generation. Concrete
+// subclasses of this implement code generation for specific CUDA
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CUDARUNTIME_H
+#define CLANG_CODEGEN_CUDARUNTIME_H
+
+namespace clang {
+
+class CUDAKernelCallExpr;
+
+namespace CodeGen {
+
+class CodeGenFunction;
+class CodeGenModule;
+class FunctionArgList;
+class ReturnValueSlot;
+class RValue;
+
+class CGCUDARuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGCUDARuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGCUDARuntime();
+
+ virtual RValue EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
+ const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ virtual void EmitDeviceStubBody(CodeGenFunction &CGF,
+ FunctionArgList &Args) = 0;
+
+};
+
+/// Creates an instance of a CUDA runtime class.
+CGCUDARuntime *CreateNVCUDARuntime(CodeGenModule &CGM);
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
new file mode 100644
index 0000000..7c08650
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXX.cpp
@@ -0,0 +1,392 @@
+//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation.
+//
+//===----------------------------------------------------------------------===//
+
+// We might split this into multiple files if it gets too unwieldy
+
+#include "CGCXXABI.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace CodeGen;
+
+/// Try to emit a base destructor as an alias to its primary
+/// base-class destructor.
+bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // If the destructor doesn't have a trivial body, we have to emit it
+ // separately.
+ if (!D->hasTrivialBody())
+ return true;
+
+ const CXXRecordDecl *Class = D->getParent();
+
+ // If we need to manipulate a VTT parameter, give up.
+ if (Class->getNumVBases()) {
+ // Extra Credit: passing extra parameters is perfectly safe
+ // in many calling conventions, so only bail out if the ctor's
+ // calling convention is nonstandard.
+ return true;
+ }
+
+ // If any field has a non-trivial destructor, we have to emit the
+ // destructor separately.
+ for (CXXRecordDecl::field_iterator I = Class->field_begin(),
+ E = Class->field_end(); I != E; ++I)
+ if ((*I)->getType().isDestructedType())
+ return true;
+
+ // Try to find a unique base class with a non-trivial destructor.
+ const CXXRecordDecl *UniqueBase = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+
+ // We're in the base destructor, so skip virtual bases.
+ if (I->isVirtual()) continue;
+
+ // Skip base classes with trivial destructors.
+ const CXXRecordDecl *Base
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (Base->hasTrivialDestructor()) continue;
+
+ // If we've already found a base class with a non-trivial
+ // destructor, give up.
+ if (UniqueBase) return true;
+ UniqueBase = Base;
+ }
+
+ // If we didn't find any bases with a non-trivial destructor, then
+ // the base destructor is actually effectively trivial, which can
+ // happen if it was needlessly user-defined or if there are virtual
+ // bases with non-trivial destructors.
+ if (!UniqueBase)
+ return true;
+
+ /// If we don't have a definition for the destructor yet, don't
+ /// emit. We can't emit aliases to declarations; that's just not
+ /// how aliases work.
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
+ if (!BaseD->isImplicit() && !BaseD->hasBody())
+ return true;
+
+ // If the base is at a non-zero offset, give up.
+ const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
+ if (ClassLayout.getBaseClassOffsetInBits(UniqueBase) != 0)
+ return true;
+
+ return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
+ GlobalDecl(BaseD, Dtor_Base));
+}
+
+/// Try to emit a definition as a global alias for another definition.
+bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
+ GlobalDecl TargetDecl) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // The alias will use the linkage of the referrent. If we can't
+ // support aliases with that linkage, fail.
+ llvm::GlobalValue::LinkageTypes Linkage
+ = getFunctionLinkage(cast<FunctionDecl>(AliasDecl.getDecl()));
+
+ switch (Linkage) {
+ // We can definitely emit aliases to definitions with external linkage.
+ case llvm::GlobalValue::ExternalLinkage:
+ case llvm::GlobalValue::ExternalWeakLinkage:
+ break;
+
+ // Same with local linkage.
+ case llvm::GlobalValue::InternalLinkage:
+ case llvm::GlobalValue::PrivateLinkage:
+ case llvm::GlobalValue::LinkerPrivateLinkage:
+ break;
+
+ // We should try to support linkonce linkages.
+ case llvm::GlobalValue::LinkOnceAnyLinkage:
+ case llvm::GlobalValue::LinkOnceODRLinkage:
+ return true;
+
+ // Other linkages will probably never be supported.
+ default:
+ return true;
+ }
+
+ llvm::GlobalValue::LinkageTypes TargetLinkage
+ = getFunctionLinkage(cast<FunctionDecl>(TargetDecl.getDecl()));
+
+ if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
+ return true;
+
+ // Derive the type for the alias.
+ llvm::PointerType *AliasType
+ = getTypes().GetFunctionType(AliasDecl)->getPointerTo();
+
+ // Find the referrent. Some aliases might require a bitcast, in
+ // which case the caller is responsible for ensuring the soundness
+ // of these semantics.
+ llvm::GlobalValue *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
+ llvm::Constant *Aliasee = Ref;
+ if (Ref->getType() != AliasType)
+ Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+
+ // Create the alias with no name.
+ llvm::GlobalAlias *Alias =
+ new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
+
+ // Switch any previous uses to the alias.
+ StringRef MangledName = getMangledName(AliasDecl);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ assert(Entry->isDeclaration() && "definition already exists for alias");
+ assert(Entry->getType() == AliasType &&
+ "declaration exists with different type");
+ Alias->takeName(Entry);
+ Entry->replaceAllUsesWith(Alias);
+ Entry->eraseFromParent();
+ } else {
+ Alias->setName(MangledName);
+ }
+
+ // Finally, set up the alias with its proper name and attributes.
+ SetCommonAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+
+ return false;
+}
+
+void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ // The constructor used for constructing this as a complete class;
+ // constucts the virtual bases, then calls the base constructor.
+ if (!D->getParent()->isAbstract()) {
+ // We don't need to emit the complete ctor if the class is abstract.
+ EmitGlobal(GlobalDecl(D, Ctor_Complete));
+ }
+
+ // The constructor used for constructing this as a base class;
+ // ignores virtual bases.
+ EmitGlobal(GlobalDecl(D, Ctor_Base));
+}
+
+void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType) {
+ // The complete constructor is equivalent to the base constructor
+ // for classes with no virtual bases. Try to emit it as an alias.
+ if (ctorType == Ctor_Complete &&
+ !ctor->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(ctor, Ctor_Complete),
+ GlobalDecl(ctor, Ctor_Base)))
+ return;
+
+ const CGFunctionInfo &fnInfo =
+ getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
+
+ llvm::Function *fn =
+ cast<llvm::Function>(GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo));
+ setFunctionLinkage(ctor, fn);
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(ctor, ctorType), fn, fnInfo);
+
+ SetFunctionDefinitionAttributes(ctor, fn);
+ SetLLVMFunctionAttributesForDefinition(ctor, fn);
+}
+
+llvm::GlobalValue *
+CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType,
+ const CGFunctionInfo *fnInfo) {
+ GlobalDecl GD(ctor, ctorType);
+
+ StringRef name = getMangledName(GD);
+ if (llvm::GlobalValue *existing = GetGlobalValue(name))
+ return existing;
+
+ if (!fnInfo)
+ fnInfo = &getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
+
+ llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
+ /*ForVTable=*/false));
+}
+
+void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ // The destructor in a virtual table is always a 'deleting'
+ // destructor, which calls the complete destructor and then uses the
+ // appropriate operator delete.
+ if (D->isVirtual())
+ EmitGlobal(GlobalDecl(D, Dtor_Deleting));
+
+ // The destructor used for destructing this as a most-derived class;
+ // call the base destructor and then destructs any virtual bases.
+ EmitGlobal(GlobalDecl(D, Dtor_Complete));
+
+ // The destructor used for destructing this as a base class; ignores
+ // virtual bases.
+ EmitGlobal(GlobalDecl(D, Dtor_Base));
+}
+
+void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType) {
+ // The complete destructor is equivalent to the base destructor for
+ // classes with no virtual bases, so try to emit it as an alias.
+ if (dtorType == Dtor_Complete &&
+ !dtor->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(dtor, Dtor_Complete),
+ GlobalDecl(dtor, Dtor_Base)))
+ return;
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (dtorType == Dtor_Base && !TryEmitBaseDestructorAsAlias(dtor))
+ return;
+
+ const CGFunctionInfo &fnInfo =
+ getTypes().arrangeCXXDestructor(dtor, dtorType);
+
+ llvm::Function *fn =
+ cast<llvm::Function>(GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo));
+ setFunctionLinkage(dtor, fn);
+
+ CodeGenFunction(*this).GenerateCode(GlobalDecl(dtor, dtorType), fn, fnInfo);
+
+ SetFunctionDefinitionAttributes(dtor, fn);
+ SetLLVMFunctionAttributesForDefinition(dtor, fn);
+}
+
+llvm::GlobalValue *
+CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType,
+ const CGFunctionInfo *fnInfo) {
+ GlobalDecl GD(dtor, dtorType);
+
+ StringRef name = getMangledName(GD);
+ if (llvm::GlobalValue *existing = GetGlobalValue(name))
+ return existing;
+
+ if (!fnInfo) fnInfo = &getTypes().arrangeCXXDestructor(dtor, dtorType);
+
+ llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
+ /*ForVTable=*/false));
+}
+
+static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
+ llvm::Value *This, llvm::Type *Ty) {
+ Ty = Ty->getPointerTo()->getPointerTo();
+
+ llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
+ llvm::Value *VFuncPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ return CGF.Builder.CreateLoad(VFuncPtr);
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+ llvm::Type *Ty) {
+ MD = MD->getCanonicalDecl();
+ uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(MD);
+
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
+}
+
+/// BuildVirtualCall - This routine is to support gcc's kext ABI making
+/// indirect call to virtual functions. It makes the call through indexing
+/// into the vtable.
+llvm::Value *
+CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty) {
+ llvm::Value *VTable = 0;
+ assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
+ "BuildAppleKextVirtualCall - bad Qual kind");
+
+ const Type *QTy = Qual->getAsType();
+ QualType T = QualType(QTy, 0);
+ const RecordType *RT = T->getAs<RecordType>();
+ assert(RT && "BuildAppleKextVirtualCall - Qual type must be record");
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD))
+ return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
+
+ VTable = CGM.getVTables().GetAddrOfVTable(RD);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = Builder.CreateBitCast(VTable, Ty);
+ assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
+ MD = MD->getCanonicalDecl();
+ uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(MD);
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(RD)
+ .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ return Builder.CreateLoad(VFuncPtr);
+}
+
+/// BuildVirtualCall - This routine makes indirect vtable call for
+/// call to virtual destructors. It returns 0 if it could not do it.
+llvm::Value *
+CodeGenFunction::BuildAppleKextVirtualDestructorCall(
+ const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD) {
+ llvm::Value * Callee = 0;
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(DD);
+ // FIXME. Dtor_Base dtor is always direct!!
+ // It need be somehow inline expanded into the caller.
+ // -O does that. But need to support -O0 as well.
+ if (MD->isVirtual() && Type != Dtor_Base) {
+ // Compute the function type we're calling.
+ const CGFunctionInfo &FInfo =
+ CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
+
+ llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
+ Ty = Ty->getPointerTo()->getPointerTo();
+ VTable = Builder.CreateBitCast(VTable, Ty);
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTableContext().getMethodVTableIndex(GlobalDecl(DD, Type));
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(RD)
+ .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
+ Callee = Builder.CreateLoad(VFuncPtr);
+ }
+ return Callee;
+}
+
+llvm::Value *
+CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *This, llvm::Type *Ty) {
+ DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
+ uint64_t VTableIndex =
+ CGM.getVTableContext().getMethodVTableIndex(GlobalDecl(DD, Type));
+
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
new file mode 100644
index 0000000..befebbe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.cpp
@@ -0,0 +1,199 @@
+//===----- CGCXXABI.cpp - Interface to C++ ABIs -----------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGCXXABI::~CGCXXABI() { }
+
+static void ErrorUnsupportedABI(CodeGenFunction &CGF,
+ StringRef S) {
+ DiagnosticsEngine &Diags = CGF.CGM.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot yet compile %1 in this ABI");
+ Diags.Report(CGF.getContext().getFullLoc(CGF.CurCodeDecl->getLocation()),
+ DiagID)
+ << S;
+}
+
+static llvm::Constant *GetBogusMemberPointer(CodeGenModule &CGM,
+ QualType T) {
+ return llvm::Constant::getNullValue(CGM.getTypes().ConvertType(T));
+}
+
+llvm::Type *
+CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+}
+
+llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "calls through member pointers");
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
+ return llvm::Constant::getNullValue(FTy->getPointerTo());
+}
+
+llvm::Value *CGCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "loads of member pointers");
+ llvm::Type *Ty = CGF.ConvertType(MPT->getPointeeType())->getPointerTo();
+ return llvm::Constant::getNullValue(Ty);
+}
+
+llvm::Value *CGCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src) {
+ ErrorUnsupportedABI(CGF, "member function pointer conversions");
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src) {
+ return GetBogusMemberPointer(CGM, E->getType());
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ ErrorUnsupportedABI(CGF, "member function pointer comparison");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Value *
+CGCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ ErrorUnsupportedABI(CGF, "member function pointer null testing");
+ return CGF.Builder.getFalse();
+}
+
+llvm::Constant *
+CGCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return GetBogusMemberPointer(CGM,
+ CGM.getContext().getMemberPointerType(MD->getType(),
+ MD->getParent()->getTypeForDecl()));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ return GetBogusMemberPointer(CGM, QualType(MPT, 0));
+}
+
+llvm::Constant *CGCXXABI::EmitMemberPointer(const APValue &MP, QualType MPT) {
+ return GetBogusMemberPointer(CGM, MPT);
+}
+
+bool CGCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ // Fake answer.
+ return true;
+}
+
+void CGCXXABI::BuildThisParam(CodeGenFunction &CGF, FunctionArgList &params) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+
+ // FIXME: I'm not entirely sure I like using a fake decl just for code
+ // generation. Maybe we can come up with a better way?
+ ImplicitParamDecl *ThisDecl
+ = ImplicitParamDecl::Create(CGM.getContext(), 0, MD->getLocation(),
+ &CGM.getContext().Idents.get("this"),
+ MD->getThisType(CGM.getContext()));
+ params.push_back(ThisDecl);
+ getThisDecl(CGF) = ThisDecl;
+}
+
+void CGCXXABI::EmitThisParam(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ assert(getThisDecl(CGF) && "no 'this' variable for function");
+ getThisValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getThisDecl(CGF)),
+ "this");
+}
+
+void CGCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ CGF.EmitReturnOfRValue(RV, ResultType);
+}
+
+CharUnits CGCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ return CharUnits::Zero();
+}
+
+llvm::Value *CGCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ // Should never be called.
+ ErrorUnsupportedABI(CGF, "array cookie initialization");
+ return 0;
+}
+
+void CGCXXABI::ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr, QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ ErrorUnsupportedABI(CGF, "array cookie reading");
+
+ // This should be enough to avoid assertions.
+ NumElements = 0;
+ AllocPtr = llvm::Constant::getNullValue(CGF.Builder.getInt8PtrTy());
+ CookieSize = CharUnits::Zero();
+}
+
+void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *GV,
+ bool PerformInit) {
+ ErrorUnsupportedABI(CGF, "static local variable initialization");
+}
+
+/// Returns the adjustment, in bytes, required for the given
+/// member-pointer operation. Returns null if no adjustment is
+/// required.
+llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer);
+
+ QualType derivedType;
+ if (E->getCastKind() == CK_DerivedToBaseMemberPointer)
+ derivedType = E->getSubExpr()->getType();
+ else
+ derivedType = E->getType();
+
+ const CXXRecordDecl *derivedClass =
+ derivedType->castAs<MemberPointerType>()->getClass()->getAsCXXRecordDecl();
+
+ return CGM.GetNonVirtualBaseClassOffset(derivedClass,
+ E->path_begin(),
+ E->path_end());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
new file mode 100644
index 0000000..4e045f5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCXXABI.h
@@ -0,0 +1,262 @@
+//===----- CGCXXABI.h - Interface to C++ ABIs -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CXXABI_H
+#define CLANG_CODEGEN_CXXABI_H
+
+#include "clang/Basic/LLVM.h"
+
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Constant;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+ class CastExpr;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class CXXRecordDecl;
+ class FieldDecl;
+ class MangleContext;
+
+namespace CodeGen {
+ class CodeGenFunction;
+ class CodeGenModule;
+
+/// Implements C++ ABI-specific code generation functions.
+class CGCXXABI {
+protected:
+ CodeGenModule &CGM;
+ OwningPtr<MangleContext> MangleCtx;
+
+ CGCXXABI(CodeGenModule &CGM)
+ : CGM(CGM), MangleCtx(CGM.getContext().createMangleContext()) {}
+
+protected:
+ ImplicitParamDecl *&getThisDecl(CodeGenFunction &CGF) {
+ return CGF.CXXABIThisDecl;
+ }
+ llvm::Value *&getThisValue(CodeGenFunction &CGF) {
+ return CGF.CXXABIThisValue;
+ }
+
+ ImplicitParamDecl *&getVTTDecl(CodeGenFunction &CGF) {
+ return CGF.CXXVTTDecl;
+ }
+ llvm::Value *&getVTTValue(CodeGenFunction &CGF) {
+ return CGF.CXXVTTValue;
+ }
+
+ /// Build a parameter variable suitable for 'this'.
+ void BuildThisParam(CodeGenFunction &CGF, FunctionArgList &Params);
+
+ /// Perform prolog initialization of the parameter variable suitable
+ /// for 'this' emitted by BuildThisParam.
+ void EmitThisParam(CodeGenFunction &CGF);
+
+ ASTContext &getContext() const { return CGM.getContext(); }
+
+public:
+
+ virtual ~CGCXXABI();
+
+ /// Gets the mangle context.
+ MangleContext &getMangleContext() {
+ return *MangleCtx;
+ }
+
+ /// Find the LLVM type used to represent the given member pointer
+ /// type.
+ virtual llvm::Type *
+ ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ /// Load a member function from an object and a member function
+ /// pointer. Apply the this-adjustment and set 'This' to the
+ /// adjusted value.
+ virtual llvm::Value *
+ EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Calculate an l-value from an object and a data member pointer.
+ virtual llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion.
+ virtual llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+
+ /// Perform a derived-to-base, base-to-derived, or bitcast member
+ /// pointer conversion on a constant value.
+ virtual llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
+
+ /// Return true if the given member pointer can be zero-initialized
+ /// (in the C++ sense) with an LLVM zeroinitializer.
+ virtual bool isZeroInitializable(const MemberPointerType *MPT);
+
+ /// Create a null member pointer of the given type.
+ virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ /// Create a member pointer for the given method.
+ virtual llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+
+ /// Create a member pointer for the given field.
+ virtual llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
+
+ /// Create a member pointer for the given member pointer constant.
+ virtual llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+
+ /// Emit a comparison between two member pointers. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ /// Determine if a member pointer is non-null. Returns an i1.
+ virtual llvm::Value *
+ EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+protected:
+ /// A utility method for computing the offset required for the given
+ /// base-to-derived or derived-to-base member-pointer conversion.
+ /// Does not handle virtual conversions (in case we ever fully
+ /// support an ABI that allows this). Returns null if no adjustment
+ /// is required.
+ llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
+
+public:
+ /// Build the signature of the given constructor variant by adding
+ /// any required parameters. For convenience, ResTy has been
+ /// initialized to 'void', and ArgTys has been initialized with the
+ /// type of 'this' (although this may be changed by the ABI) and
+ /// will have the formal parameters added to it afterwards.
+ ///
+ /// If there are ever any ABIs where the implicit parameters are
+ /// intermixed with the formal parameters, we can address those
+ /// then.
+ virtual void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Build the signature of the given destructor variant by adding
+ /// any required parameters. For convenience, ResTy has been
+ /// initialized to 'void' and ArgTys has been initialized with the
+ /// type of 'this' (although this may be changed by the ABI).
+ virtual void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
+
+ /// Build the ABI-specific portion of the parameter list for a
+ /// function. This generally involves a 'this' parameter and
+ /// possibly some extra data for constructors and destructors.
+ ///
+ /// ABIs may also choose to override the return type, which has been
+ /// initialized with the formal return type of the function.
+ virtual void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) = 0;
+
+ /// Emit the ABI-specific prolog for the function.
+ virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
+
+ virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType);
+
+ /**************************** Array cookies ******************************/
+
+ /// Returns the extra size required in order to store the array
+ /// cookie for the given type. May return 0 to indicate that no
+ /// array cookie is required.
+ ///
+ /// Several cases are filtered out before this method is called:
+ /// - non-array allocations never need a cookie
+ /// - calls to ::operator new(size_t, void*) never need a cookie
+ ///
+ /// \param ElementType - the allocated type of the expression,
+ /// i.e. the pointee type of the expression result type
+ virtual CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+
+ /// Initialize the array cookie for the given allocation.
+ ///
+ /// \param NewPtr - a char* which is the presumed-non-null
+ /// return value of the allocation function
+ /// \param NumElements - the computed number of elements,
+ /// potentially collapsed from the multidimensional array case
+ /// \param ElementType - the base element allocated type,
+ /// i.e. the allocated type after stripping all array types
+ virtual llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+
+ /// Reads the array cookie associated with the given pointer,
+ /// if it has one.
+ ///
+ /// \param Ptr - a pointer to the first element in the array
+ /// \param ElementType - the base element type of elements of the array
+ /// \param NumElements - an out parameter which will be initialized
+ /// with the number of elements allocated, or zero if there is no
+ /// cookie
+ /// \param AllocPtr - an out parameter which will be initialized
+ /// with a char* pointing to the address returned by the allocation
+ /// function
+ /// \param CookieSize - an out parameter which will be initialized
+ /// with the size of the cookie, or zero if there is no cookie
+ virtual void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+ /*************************** Static local guards ****************************/
+
+ /// Emits the guarded initializer and destructor setup for the given
+ /// variable, given that it couldn't be emitted as a constant.
+ /// If \p PerformInit is false, the initialization has been folded to a
+ /// constant and should not be performed.
+ ///
+ /// The variable may be:
+ /// - a static local variable
+ /// - a static data member of a class template instantiation
+ virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr, bool PerformInit);
+
+};
+
+/// Creates an instance of a C++ ABI class.
+CGCXXABI *CreateARMCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+CGCXXABI *CreateMicrosoftCXXABI(CodeGenModule &CGM);
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
new file mode 100644
index 0000000..4455f1a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.cpp
@@ -0,0 +1,2145 @@
+//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Attributes.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Transforms/Utils/Local.h"
+using namespace clang;
+using namespace CodeGen;
+
+/***/
+
+static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
+ switch (CC) {
+ default: return llvm::CallingConv::C;
+ case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
+ case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+ case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
+ case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
+ // TODO: add support for CC_X86Pascal to llvm
+ }
+}
+
+/// Derives the 'this' type for codegen purposes, i.e. ignoring method
+/// qualification.
+/// FIXME: address space qualification?
+static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
+ QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
+}
+
+/// Returns the canonical formal type of the given C++ method.
+static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
+ return MD->getType()->getCanonicalTypeUnqualified()
+ .getAs<FunctionProtoType>();
+}
+
+/// Returns the "extra-canonicalized" return type, which discards
+/// qualifiers on the return type. Codegen doesn't care about them,
+/// and it makes ABI code a little easier to be able to assume that
+/// all parameter and return types are top-level unqualified.
+static CanQualType GetReturnType(QualType RetTy) {
+ return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
+}
+
+/// Arrange the argument and result information for a value of the
+/// given unprototyped function type.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
+ // When translating an unprototyped function type, always use a
+ // variadic type.
+ return arrangeFunctionType(FTNP->getResultType().getUnqualifiedType(),
+ ArrayRef<CanQualType>(),
+ FTNP->getExtInfo(),
+ RequiredArgs(0));
+}
+
+/// Arrange the argument and result information for a value of the
+/// given function type, on top of any implicit parameters already
+/// stored.
+static const CGFunctionInfo &arrangeFunctionType(CodeGenTypes &CGT,
+ SmallVectorImpl<CanQualType> &argTypes,
+ CanQual<FunctionProtoType> FTP) {
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+ // FIXME: Kill copy.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ argTypes.push_back(FTP->getArgType(i));
+ CanQualType resultType = FTP->getResultType().getUnqualifiedType();
+ return CGT.arrangeFunctionType(resultType, argTypes,
+ FTP->getExtInfo(), required);
+}
+
+/// Arrange the argument and result information for a value of the
+/// given function type.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQual<FunctionProtoType> FTP) {
+ SmallVector<CanQualType, 16> argTypes;
+ return ::arrangeFunctionType(*this, argTypes, FTP);
+}
+
+static CallingConv getCallingConventionForDecl(const Decl *D) {
+ // Set the appropriate calling convention for the Function.
+ if (D->hasAttr<StdCallAttr>())
+ return CC_X86StdCall;
+
+ if (D->hasAttr<FastCallAttr>())
+ return CC_X86FastCall;
+
+ if (D->hasAttr<ThisCallAttr>())
+ return CC_X86ThisCall;
+
+ if (D->hasAttr<PascalAttr>())
+ return CC_X86Pascal;
+
+ if (PcsAttr *PCS = D->getAttr<PcsAttr>())
+ return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
+
+ return CC_C;
+}
+
+/// Arrange the argument and result information for a call to an
+/// unknown C++ non-static member function of the given abstract type.
+/// The member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP) {
+ SmallVector<CanQualType, 16> argTypes;
+
+ // Add the 'this' pointer.
+ argTypes.push_back(GetThisType(Context, RD));
+
+ return ::arrangeFunctionType(*this, argTypes,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
+}
+
+/// Arrange the argument and result information for a declaration or
+/// definition of the given C++ non-static member function. The
+/// member function must be an ordinary function, i.e. not a
+/// constructor or destructor.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
+ assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
+ assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
+
+ CanQual<FunctionProtoType> prototype = GetFormalType(MD);
+
+ if (MD->isInstance()) {
+ // The abstract case is perfectly fine.
+ return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
+ }
+
+ return arrangeFunctionType(prototype);
+}
+
+/// Arrange the argument and result information for a declaration
+/// or definition to the given constructor variant.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
+ CXXCtorType ctorKind) {
+ SmallVector<CanQualType, 16> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
+
+ TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
+
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, argTypes.size());
+
+ // Add the formal parameters.
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ argTypes.push_back(FTP->getArgType(i));
+
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(), required);
+}
+
+/// Arrange the argument and result information for a declaration,
+/// definition, or call to the given destructor variant. It so
+/// happens that all three cases produce the same information.
+const CGFunctionInfo &
+CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType dtorKind) {
+ SmallVector<CanQualType, 2> argTypes;
+ argTypes.push_back(GetThisType(Context, D->getParent()));
+ CanQualType resultType = Context.VoidTy;
+
+ TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
+
+ CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ assert(FTP->getNumArgs() == 0 && "dtor with formal parameters");
+
+ return arrangeFunctionType(resultType, argTypes, FTP->getExtInfo(),
+ RequiredArgs::All);
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of the given function.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance())
+ return arrangeCXXMethodDeclaration(MD);
+
+ CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+
+ assert(isa<FunctionType>(FTy));
+
+ // When declaring a function without a prototype, always use a
+ // non-variadic type.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
+ return arrangeFunctionType(noProto->getResultType(),
+ ArrayRef<CanQualType>(),
+ noProto->getExtInfo(),
+ RequiredArgs::All);
+ }
+
+ assert(isa<FunctionProtoType>(FTy));
+ return arrangeFunctionType(FTy.getAs<FunctionProtoType>());
+}
+
+/// Arrange the argument and result information for the declaration or
+/// definition of an Objective-C method.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
+ // It happens that this is the same as a call with no optional
+ // arguments, except also using the formal 'self' type.
+ return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
+}
+
+/// Arrange the argument and result information for the function type
+/// through which to perform a send to the given Objective-C method,
+/// using the given receiver type. The receiver type is not always
+/// the 'self' type of the method or even an Objective-C pointer type.
+/// This is *not* the right method for actually performing such a
+/// message send, due to the possibility of optional arguments.
+const CGFunctionInfo &
+CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType) {
+ SmallVector<CanQualType, 16> argTys;
+ argTys.push_back(Context.getCanonicalParamType(receiverType));
+ argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
+ // FIXME: Kill copy?
+ for (ObjCMethodDecl::param_const_iterator i = MD->param_begin(),
+ e = MD->param_end(); i != e; ++i) {
+ argTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+ }
+
+ FunctionType::ExtInfo einfo;
+ einfo = einfo.withCallingConv(getCallingConventionForDecl(MD));
+
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
+ MD->hasAttr<NSReturnsRetainedAttr>())
+ einfo = einfo.withProducesResult(true);
+
+ RequiredArgs required =
+ (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
+
+ return arrangeFunctionType(GetReturnType(MD->getResultType()), argTys,
+ einfo, required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
+ // FIXME: Do we need to handle ObjCMethodDecl?
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
+ return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
+
+ if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
+ return arrangeCXXDestructor(DD, GD.getDtorType());
+
+ return arrangeFunctionDeclaration(FD);
+}
+
+/// Figure out the rules for calling a function with the given formal
+/// type using the given arguments. The arguments are necessary
+/// because the function might be unprototyped, in which case it's
+/// target-dependent in crazy ways.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(const CallArgList &args,
+ const FunctionType *fnType) {
+ RequiredArgs required = RequiredArgs::All;
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
+ if (proto->isVariadic())
+ required = RequiredArgs(proto->getNumArgs());
+ } else if (CGM.getTargetCodeGenInfo()
+ .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
+ required = RequiredArgs(0);
+ }
+
+ return arrangeFunctionCall(fnType->getResultType(), args,
+ fnType->getExtInfo(), required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionCall(QualType resultType,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (CallArgList::const_iterator i = args.begin(), e = args.end();
+ i != e; ++i)
+ argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
+}
+
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionDeclaration(QualType resultType,
+ const FunctionArgList &args,
+ const FunctionType::ExtInfo &info,
+ bool isVariadic) {
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
+ i != e; ++i)
+ argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
+
+ RequiredArgs required =
+ (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
+ return arrangeFunctionType(GetReturnType(resultType), argTypes, info,
+ required);
+}
+
+const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
+ return arrangeFunctionType(getContext().VoidTy, ArrayRef<CanQualType>(),
+ FunctionType::ExtInfo(), RequiredArgs::All);
+}
+
+/// Arrange the argument and result information for an abstract value
+/// of a given function type. This is the method which all of the
+/// above functions ultimately defer to.
+const CGFunctionInfo &
+CodeGenTypes::arrangeFunctionType(CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required) {
+#ifndef NDEBUG
+ for (ArrayRef<CanQualType>::const_iterator
+ I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
+ assert(I->isCanonicalAsParam());
+#endif
+
+ unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
+
+ // Lookup or create unique function info.
+ llvm::FoldingSetNodeID ID;
+ CGFunctionInfo::Profile(ID, info, required, resultType, argTypes);
+
+ void *insertPos = 0;
+ CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
+ if (FI)
+ return *FI;
+
+ // Construct the function info. We co-allocate the ArgInfos.
+ FI = CGFunctionInfo::create(CC, info, resultType, argTypes, required);
+ FunctionInfos.InsertNode(FI, insertPos);
+
+ bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
+ assert(inserted && "Recursively being processed?");
+
+ // Compute ABI information.
+ getABIInfo().computeInfo(*FI);
+
+ // Loop over all of the computed argument and return value info. If any of
+ // them are direct or extend without a specified coerce type, specify the
+ // default now.
+ ABIArgInfo &retInfo = FI->getReturnInfo();
+ if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == 0)
+ retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
+
+ for (CGFunctionInfo::arg_iterator I = FI->arg_begin(), E = FI->arg_end();
+ I != E; ++I)
+ if (I->info.canHaveCoerceToType() && I->info.getCoerceToType() == 0)
+ I->info.setCoerceToType(ConvertType(I->type));
+
+ bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
+ assert(erased && "Not in set?");
+
+ return *FI;
+}
+
+CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
+ const FunctionType::ExtInfo &info,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required) {
+ void *buffer = operator new(sizeof(CGFunctionInfo) +
+ sizeof(ArgInfo) * (argTypes.size() + 1));
+ CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
+ FI->CallingConvention = llvmCC;
+ FI->EffectiveCallingConvention = llvmCC;
+ FI->ASTCallingConvention = info.getCC();
+ FI->NoReturn = info.getNoReturn();
+ FI->ReturnsRetained = info.getProducesResult();
+ FI->Required = required;
+ FI->HasRegParm = info.getHasRegParm();
+ FI->RegParm = info.getRegParm();
+ FI->NumArgs = argTypes.size();
+ FI->getArgsBuffer()[0].type = resultType;
+ for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
+ FI->getArgsBuffer()[i + 1].type = argTypes[i];
+ return FI;
+}
+
+/***/
+
+void CodeGenTypes::GetExpandedTypes(QualType type,
+ SmallVectorImpl<llvm::Type*> &expandedTypes) {
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
+ uint64_t NumElts = AT->getSize().getZExtValue();
+ for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
+ GetExpandedTypes(AT->getElementType(), expandedTypes);
+ } else if (const RecordType *RT = type->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl();
+ assert(!RD->hasFlexibleArrayMember() &&
+ "Cannot expand structure with flexible array.");
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ assert(!FD->isBitField() &&
+ "Cannot expand structure with bit-field members.");
+ GetExpandedTypes(FD->getType(), expandedTypes);
+ }
+ } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
+ llvm::Type *EltTy = ConvertType(CT->getElementType());
+ expandedTypes.push_back(EltTy);
+ expandedTypes.push_back(EltTy);
+ } else
+ expandedTypes.push_back(ConvertType(type));
+}
+
+llvm::Function::arg_iterator
+CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
+ llvm::Function::arg_iterator AI) {
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
+ llvm::Value *Addr = LV.getAddress();
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ unsigned NumElts = AT->getSize().getZExtValue();
+ QualType EltTy = AT->getElementType();
+ for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
+ LValue LV = MakeAddrLValue(EltAddr, EltTy);
+ AI = ExpandTypeFromArgs(EltTy, LV, AI);
+ }
+ } else if (const RecordType *RT = Ty->getAsStructureType()) {
+ RecordDecl *RD = RT->getDecl();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ AI = ExpandTypeFromArgs(FT, LV, AI);
+ }
+ } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType EltTy = CT->getElementType();
+ llvm::Value *RealAddr = Builder.CreateStructGEP(Addr, 0, "real");
+ EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
+ llvm::Value *ImagAddr = Builder.CreateStructGEP(Addr, 1, "imag");
+ EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
+ } else {
+ EmitStoreThroughLValue(RValue::get(AI), LV);
+ ++AI;
+ }
+
+ return AI;
+}
+
+/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
+/// accessing some number of bytes out of it, try to gep into the struct to get
+/// at its inner goodness. Dive as deep as possible without entering an element
+/// with an in-memory size smaller than DstSize.
+static llvm::Value *
+EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
+ llvm::StructType *SrcSTy,
+ uint64_t DstSize, CodeGenFunction &CGF) {
+ // We can't dive into a zero-element struct.
+ if (SrcSTy->getNumElements() == 0) return SrcPtr;
+
+ llvm::Type *FirstElt = SrcSTy->getElementType(0);
+
+ // If the first elt is at least as large as what we're looking for, or if the
+ // first element is the same size as the whole struct, we can enter it.
+ uint64_t FirstEltSize =
+ CGF.CGM.getTargetData().getTypeAllocSize(FirstElt);
+ if (FirstEltSize < DstSize &&
+ FirstEltSize < CGF.CGM.getTargetData().getTypeAllocSize(SrcSTy))
+ return SrcPtr;
+
+ // GEP into the first element.
+ SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
+
+ // If the first element is a struct, recurse.
+ llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
+ return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+
+ return SrcPtr;
+}
+
+/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
+/// are either integers or pointers. This does a truncation of the value if it
+/// is too large or a zero extension if it is too small.
+static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
+ llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ if (Val->getType() == Ty)
+ return Val;
+
+ if (isa<llvm::PointerType>(Val->getType())) {
+ // If this is Pointer->Pointer avoid conversion to and from int.
+ if (isa<llvm::PointerType>(Ty))
+ return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
+
+ // Convert the pointer to an integer so we can play with its width.
+ Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
+ }
+
+ llvm::Type *DestIntTy = Ty;
+ if (isa<llvm::PointerType>(DestIntTy))
+ DestIntTy = CGF.IntPtrTy;
+
+ if (Val->getType() != DestIntTy)
+ Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
+
+ if (isa<llvm::PointerType>(Ty))
+ Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
+ return Val;
+}
+
+
+
+/// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
+/// a pointer to an object of type \arg Ty.
+///
+/// This safely handles the case when the src type is smaller than the
+/// destination type; in this situation the values of bits which not
+/// present in the src are undefined.
+static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
+ llvm::Type *Ty,
+ CodeGenFunction &CGF) {
+ llvm::Type *SrcTy =
+ cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+
+ // If SrcTy and Ty are the same, just do a load.
+ if (SrcTy == Ty)
+ return CGF.Builder.CreateLoad(SrcPtr);
+
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(Ty);
+
+ if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
+ SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
+ SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
+ }
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
+ (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
+ return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
+ }
+
+ // If load is legal, just bitcast the src pointer.
+ if (SrcSize >= DstSize) {
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ return Load;
+ }
+
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(SrcTy));
+ llvm::StoreInst *Store =
+ CGF.Builder.CreateStore(CGF.Builder.CreateLoad(SrcPtr), Casted);
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ Store->setAlignment(1);
+ return CGF.Builder.CreateLoad(Tmp);
+}
+
+// Function to store a first-class aggregate into memory. We prefer to
+// store the elements rather than the aggregate to be more friendly to
+// fast-isel.
+// FIXME: Do we need to recurse here?
+static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
+ llvm::Value *DestPtr, bool DestIsVolatile,
+ bool LowAlignment) {
+ // Prefer scalar stores to first-class aggregate stores.
+ if (llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(Val->getType())) {
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
+ llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
+ llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
+ DestIsVolatile);
+ if (LowAlignment)
+ SI->setAlignment(1);
+ }
+ } else {
+ llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
+ if (LowAlignment)
+ SI->setAlignment(1);
+ }
+}
+
+/// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
+/// where the source and destination may have different types.
+///
+/// This safely handles the case when the src type is larger than the
+/// destination type; the upper bits of the src will be lost.
+static void CreateCoercedStore(llvm::Value *Src,
+ llvm::Value *DstPtr,
+ bool DstIsVolatile,
+ CodeGenFunction &CGF) {
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy =
+ cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ if (SrcTy == DstTy) {
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
+ uint64_t SrcSize = CGF.CGM.getTargetData().getTypeAllocSize(SrcTy);
+
+ if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
+ DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
+ DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
+ }
+
+ // If the source and destination are integer or pointer types, just do an
+ // extension or truncation to the desired type.
+ if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
+ (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
+ Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
+ CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
+ return;
+ }
+
+ uint64_t DstSize = CGF.CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ // If store is legal, just bitcast the src pointer.
+ if (SrcSize <= DstSize) {
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
+ // FIXME: Use better alignment / avoid requiring aligned store.
+ BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
+ } else {
+ // Otherwise do coercion through memory. This is stupid, but
+ // simple.
+
+ // Generally SrcSize is never greater than DstSize, since this means we are
+ // losing bits. However, this can happen in cases where the structure has
+ // additional padding, for example due to a user specified alignment.
+ //
+ // FIXME: Assert that we aren't truncating non-padding bits when have access
+ // to that information.
+ llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
+ CGF.Builder.CreateStore(Src, Tmp);
+ llvm::Value *Casted =
+ CGF.Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(DstTy));
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
+ // FIXME: Use better alignment / avoid requiring aligned load.
+ Load->setAlignment(1);
+ CGF.Builder.CreateStore(Load, DstPtr, DstIsVolatile);
+ }
+}
+
+/***/
+
+bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
+ return FI.getReturnInfo().isIndirect();
+}
+
+bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
+ if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ default:
+ return false;
+ case BuiltinType::Float:
+ return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Float);
+ case BuiltinType::Double:
+ return getContext().getTargetInfo().useObjCFPRetForRealType(TargetInfo::Double);
+ case BuiltinType::LongDouble:
+ return getContext().getTargetInfo().useObjCFPRetForRealType(
+ TargetInfo::LongDouble);
+ }
+ }
+
+ return false;
+}
+
+bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
+ if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
+ if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
+ if (BT->getKind() == BuiltinType::LongDouble)
+ return getContext().getTargetInfo().useObjCFP2RetForComplexLongDouble();
+ }
+ }
+
+ return false;
+}
+
+llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
+ return GetFunctionType(FI);
+}
+
+llvm::FunctionType *
+CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
+
+ bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
+ assert(Inserted && "Recursively being processed?");
+
+ SmallVector<llvm::Type*, 8> argTypes;
+ llvm::Type *resultType = 0;
+
+ const ABIArgInfo &retAI = FI.getReturnInfo();
+ switch (retAI.getKind()) {
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ resultType = retAI.getCoerceToType();
+ break;
+
+ case ABIArgInfo::Indirect: {
+ assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
+ resultType = llvm::Type::getVoidTy(getLLVMContext());
+
+ QualType ret = FI.getReturnType();
+ llvm::Type *ty = ConvertType(ret);
+ unsigned addressSpace = Context.getTargetAddressSpace(ret);
+ argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ resultType = llvm::Type::getVoidTy(getLLVMContext());
+ break;
+ }
+
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ const ABIArgInfo &argAI = it->info;
+
+ switch (argAI.getKind()) {
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Indirect: {
+ // indirect arguments are always on the stack, which is addr space #0.
+ llvm::Type *LTy = ConvertTypeForMem(it->type);
+ argTypes.push_back(LTy->getPointerTo());
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // Insert a padding type to ensure proper alignment.
+ if (llvm::Type *PaddingType = argAI.getPaddingType())
+ argTypes.push_back(PaddingType);
+ // If the coerce-to type is a first class aggregate, flatten it. Either
+ // way is semantically identical, but fast-isel and the optimizer
+ // generally likes scalar values better than FCAs.
+ llvm::Type *argType = argAI.getCoerceToType();
+ if (llvm::StructType *st = dyn_cast<llvm::StructType>(argType)) {
+ for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
+ argTypes.push_back(st->getElementType(i));
+ } else {
+ argTypes.push_back(argType);
+ }
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ GetExpandedTypes(it->type, argTypes);
+ break;
+ }
+ }
+
+ bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
+ assert(Erased && "Not in set?");
+
+ return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
+}
+
+llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+
+ if (!isFuncTypeConvertible(FPT))
+ return llvm::StructType::get(getLLVMContext());
+
+ const CGFunctionInfo *Info;
+ if (isa<CXXDestructorDecl>(MD))
+ Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ else
+ Info = &arrangeCXXMethodDeclaration(MD);
+ return GetFunctionType(*Info);
+}
+
+void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
+ const Decl *TargetDecl,
+ AttributeListType &PAL,
+ unsigned &CallingConv) {
+ llvm::Attributes FuncAttrs;
+ llvm::Attributes RetAttrs;
+
+ CallingConv = FI.getEffectiveCallingConvention();
+
+ if (FI.isNoReturn())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+
+ // FIXME: handle sseregparm someday...
+ if (TargetDecl) {
+ if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
+ FuncAttrs |= llvm::Attribute::ReturnsTwice;
+ if (TargetDecl->hasAttr<NoThrowAttr>())
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ else if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
+ const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
+ if (FPT && FPT->isNothrow(getContext()))
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ }
+
+ if (TargetDecl->hasAttr<NoReturnAttr>())
+ FuncAttrs |= llvm::Attribute::NoReturn;
+
+ if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
+ FuncAttrs |= llvm::Attribute::ReturnsTwice;
+
+ // 'const' and 'pure' attribute functions are also nounwind.
+ if (TargetDecl->hasAttr<ConstAttr>()) {
+ FuncAttrs |= llvm::Attribute::ReadNone;
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ } else if (TargetDecl->hasAttr<PureAttr>()) {
+ FuncAttrs |= llvm::Attribute::ReadOnly;
+ FuncAttrs |= llvm::Attribute::NoUnwind;
+ }
+ if (TargetDecl->hasAttr<MallocAttr>())
+ RetAttrs |= llvm::Attribute::NoAlias;
+ }
+
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs |= llvm::Attribute::OptimizeForSize;
+ if (CodeGenOpts.DisableRedZone)
+ FuncAttrs |= llvm::Attribute::NoRedZone;
+ if (CodeGenOpts.NoImplicitFloat)
+ FuncAttrs |= llvm::Attribute::NoImplicitFloat;
+
+ QualType RetTy = FI.getReturnType();
+ unsigned Index = 1;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (RetTy->hasSignedIntegerRepresentation())
+ RetAttrs |= llvm::Attribute::SExt;
+ else if (RetTy->hasUnsignedIntegerRepresentation())
+ RetAttrs |= llvm::Attribute::ZExt;
+ break;
+ case ABIArgInfo::Direct:
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Indirect:
+ PAL.push_back(llvm::AttributeWithIndex::get(Index,
+ llvm::Attribute::StructRet));
+ ++Index;
+ // sret disables readnone and readonly
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ if (RetAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(0, RetAttrs));
+
+ // FIXME: RegParm should be reduced in case of global register variable.
+ signed RegParm;
+ if (FI.getHasRegParm())
+ RegParm = FI.getRegParm();
+ else
+ RegParm = CodeGenOpts.NumRegisterParameters;
+
+ unsigned PointerWidth = getContext().getTargetInfo().getPointerWidth(0);
+ for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = FI.arg_end(); it != ie; ++it) {
+ QualType ParamType = it->type;
+ const ABIArgInfo &AI = it->info;
+ llvm::Attributes Attrs;
+
+ // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
+ // have the corresponding parameter variable. It doesn't make
+ // sense to do it here because parameters are so messed up.
+ switch (AI.getKind()) {
+ case ABIArgInfo::Extend:
+ if (ParamType->isSignedIntegerOrEnumerationType())
+ Attrs |= llvm::Attribute::SExt;
+ else if (ParamType->isUnsignedIntegerOrEnumerationType())
+ Attrs |= llvm::Attribute::ZExt;
+ // FALL THROUGH
+ case ABIArgInfo::Direct:
+ if (RegParm > 0 &&
+ (ParamType->isIntegerType() || ParamType->isPointerType() ||
+ ParamType->isReferenceType())) {
+ RegParm -=
+ (Context.getTypeSize(ParamType) + PointerWidth - 1) / PointerWidth;
+ if (RegParm >= 0)
+ Attrs |= llvm::Attribute::InReg;
+ }
+ // FIXME: handle sseregparm someday...
+
+ // Increment Index if there is padding.
+ Index += (AI.getPaddingType() != 0);
+
+ if (llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(AI.getCoerceToType()))
+ Index += STy->getNumElements()-1; // 1 will be added below.
+ break;
+
+ case ABIArgInfo::Indirect:
+ if (AI.getIndirectByVal())
+ Attrs |= llvm::Attribute::ByVal;
+
+ Attrs |=
+ llvm::Attribute::constructAlignmentFromInt(AI.getIndirectAlign());
+ // byval disables readnone and readonly.
+ FuncAttrs &= ~(llvm::Attribute::ReadOnly |
+ llvm::Attribute::ReadNone);
+ break;
+
+ case ABIArgInfo::Ignore:
+ // Skip increment, no matching LLVM parameter.
+ continue;
+
+ case ABIArgInfo::Expand: {
+ SmallVector<llvm::Type*, 8> types;
+ // FIXME: This is rather inefficient. Do we ever actually need to do
+ // anything here? The result should be just reconstructed on the other
+ // side, so extension should be a non-issue.
+ getTypes().GetExpandedTypes(ParamType, types);
+ Index += types.size();
+ continue;
+ }
+ }
+
+ if (Attrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(Index, Attrs));
+ ++Index;
+ }
+ if (FuncAttrs)
+ PAL.push_back(llvm::AttributeWithIndex::get(~0, FuncAttrs));
+}
+
+/// An argument came in as a promoted argument; demote it back to its
+/// declared type.
+static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
+ const VarDecl *var,
+ llvm::Value *value) {
+ llvm::Type *varType = CGF.ConvertType(var->getType());
+
+ // This can happen with promotions that actually don't change the
+ // underlying type, like the enum promotions.
+ if (value->getType() == varType) return value;
+
+ assert((varType->isIntegerTy() || varType->isFloatingPointTy())
+ && "unexpected promotion type");
+
+ if (isa<llvm::IntegerType>(varType))
+ return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
+
+ return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
+}
+
+void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args) {
+ // If this is an implicit-return-zero function, go ahead and
+ // initialize the return value. TODO: it might be nice to have
+ // a more general mechanism for this that didn't require synthesized
+ // return statements.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
+ if (FD->hasImplicitReturnZero()) {
+ QualType RetTy = FD->getResultType().getUnqualifiedType();
+ llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
+ llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
+ Builder.CreateStore(Zero, ReturnValue);
+ }
+ }
+
+ // FIXME: We no longer need the types from FunctionArgList; lift up and
+ // simplify.
+
+ // Emit allocs for param decls. Give the LLVM Argument nodes names.
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+
+ // Name the struct return argument.
+ if (CGM.ReturnTypeUsesSRet(FI)) {
+ AI->setName("agg.result");
+ AI->addAttr(llvm::Attribute::NoAlias);
+ ++AI;
+ }
+
+ assert(FI.arg_size() == Args.size() &&
+ "Mismatch between function signature & arguments.");
+ unsigned ArgNo = 1;
+ CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i, ++info_it, ++ArgNo) {
+ const VarDecl *Arg = *i;
+ QualType Ty = info_it->type;
+ const ABIArgInfo &ArgI = info_it->info;
+
+ bool isPromoted =
+ isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
+
+ switch (ArgI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ llvm::Value *V = AI;
+
+ if (hasAggregateLLVMType(Ty)) {
+ // Aggregates and complex variables are accessed by reference. All we
+ // need to do is realign the value, if requested
+ if (ArgI.getIndirectRealign()) {
+ llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
+
+ // Copy from the incoming argument pointer to the temporary with the
+ // appropriate alignment.
+ //
+ // FIXME: We should have a common utility for generating an aggregate
+ // copy.
+ llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
+ CharUnits Size = getContext().getTypeSizeInChars(Ty);
+ llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
+ llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
+ Builder.CreateMemCpy(Dst,
+ Src,
+ llvm::ConstantInt::get(IntPtrTy,
+ Size.getQuantity()),
+ ArgI.getIndirectAlign(),
+ false);
+ V = AlignedTemp;
+ }
+ } else {
+ // Load scalar value from indirect argument.
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
+ V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
+
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+ }
+ EmitParmDecl(*Arg, V, ArgNo);
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // Skip the dummy padding argument.
+ if (ArgI.getPaddingType())
+ ++AI;
+
+ // If we have the trivial case, handle it with no muss and fuss.
+ if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
+ ArgI.getCoerceToType() == ConvertType(Ty) &&
+ ArgI.getDirectOffset() == 0) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ llvm::Value *V = AI;
+
+ if (Arg->getType().isRestrictQualified())
+ AI->addAttr(llvm::Attribute::NoAlias);
+
+ // Ensure the argument is the correct type.
+ if (V->getType() != ArgI.getCoerceToType())
+ V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
+
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+
+ EmitParmDecl(*Arg, V, ArgNo);
+ break;
+ }
+
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
+
+ // The alignment we need to use is the max of the requested alignment for
+ // the argument plus the alignment required by our access code below.
+ unsigned AlignmentToUse =
+ CGM.getTargetData().getABITypeAlignment(ArgI.getCoerceToType());
+ AlignmentToUse = std::max(AlignmentToUse,
+ (unsigned)getContext().getDeclAlign(Arg).getQuantity());
+
+ Alloca->setAlignment(AlignmentToUse);
+ llvm::Value *V = Alloca;
+ llvm::Value *Ptr = V; // Pointer to store into.
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgI.getDirectOffset()) {
+ Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
+ Ptr = Builder.CreateBitCast(Ptr,
+ llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
+ }
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ if (STy && STy->getNumElements() > 1) {
+ uint64_t SrcSize = CGM.getTargetData().getTypeAllocSize(STy);
+ llvm::Type *DstTy =
+ cast<llvm::PointerType>(Ptr->getType())->getElementType();
+ uint64_t DstSize = CGM.getTargetData().getTypeAllocSize(DstTy);
+
+ if (SrcSize <= DstSize) {
+ Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+ } else {
+ llvm::AllocaInst *TempAlloca =
+ CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
+ TempAlloca->setAlignment(AlignmentToUse);
+ llvm::Value *TempV = TempAlloca;
+
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
+ Builder.CreateStore(AI++, EltPtr);
+ }
+
+ Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
+ }
+ } else {
+ // Simple case, just do a coerced store of the argument into the alloca.
+ assert(AI != Fn->arg_end() && "Argument mismatch!");
+ AI->setName(Arg->getName() + ".coerce");
+ CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
+ }
+
+
+ // Match to what EmitParmDecl is expecting for this type.
+ if (!CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
+ if (isPromoted)
+ V = emitArgumentDemotion(*this, Arg, V);
+ }
+ EmitParmDecl(*Arg, V, ArgNo);
+ continue; // Skip ++AI increment, already done.
+ }
+
+ case ABIArgInfo::Expand: {
+ // If this structure was expanded into multiple arguments then
+ // we need to create a temporary and reconstruct it from the
+ // arguments.
+ llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
+ CharUnits Align = getContext().getDeclAlign(Arg);
+ Alloca->setAlignment(Align.getQuantity());
+ LValue LV = MakeAddrLValue(Alloca, Ty, Align);
+ llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
+ EmitParmDecl(*Arg, Alloca, ArgNo);
+
+ // Name the arguments used in expansion and increment AI.
+ unsigned Index = 0;
+ for (; AI != End; ++AI, ++Index)
+ AI->setName(Arg->getName() + "." + Twine(Index));
+ continue;
+ }
+
+ case ABIArgInfo::Ignore:
+ // Initialize the local variable appropriately.
+ if (hasAggregateLLVMType(Ty))
+ EmitParmDecl(*Arg, CreateMemTemp(Ty), ArgNo);
+ else
+ EmitParmDecl(*Arg, llvm::UndefValue::get(ConvertType(Arg->getType())),
+ ArgNo);
+
+ // Skip increment, no matching LLVM parameter.
+ continue;
+ }
+
+ ++AI;
+ }
+ assert(AI == Fn->arg_end() && "Argument mismatch!");
+}
+
+static void eraseUnusedBitCasts(llvm::Instruction *insn) {
+ while (insn->use_empty()) {
+ llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
+ if (!bitcast) return;
+
+ // This is "safe" because we would have used a ConstantExpr otherwise.
+ insn = cast<llvm::Instruction>(bitcast->getOperand(0));
+ bitcast->eraseFromParent();
+ }
+}
+
+/// Try to emit a fused autorelease of a return result.
+static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // We must be immediately followed the cast.
+ llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
+ if (BB->empty()) return 0;
+ if (&BB->back() != result) return 0;
+
+ llvm::Type *resultType = result->getType();
+
+ // result is in a BasicBlock and is therefore an Instruction.
+ llvm::Instruction *generator = cast<llvm::Instruction>(result);
+
+ SmallVector<llvm::Instruction*,4> insnsToKill;
+
+ // Look for:
+ // %generator = bitcast %type1* %generator2 to %type2*
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
+ // We would have emitted this as a constant if the operand weren't
+ // an Instruction.
+ generator = cast<llvm::Instruction>(bitcast->getOperand(0));
+
+ // Require the generator to be immediately followed by the cast.
+ if (generator->getNextNode() != bitcast)
+ return 0;
+
+ insnsToKill.push_back(bitcast);
+ }
+
+ // Look for:
+ // %generator = call i8* @objc_retain(i8* %originalResult)
+ // or
+ // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
+ llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
+ if (!call) return 0;
+
+ bool doRetainAutorelease;
+
+ if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
+ doRetainAutorelease = true;
+ } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
+ .objc_retainAutoreleasedReturnValue) {
+ doRetainAutorelease = false;
+
+ // Look for an inline asm immediately preceding the call and kill it, too.
+ llvm::Instruction *prev = call->getPrevNode();
+ if (llvm::CallInst *asmCall = dyn_cast_or_null<llvm::CallInst>(prev))
+ if (asmCall->getCalledValue()
+ == CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker)
+ insnsToKill.push_back(prev);
+ } else {
+ return 0;
+ }
+
+ result = call->getArgOperand(0);
+ insnsToKill.push_back(call);
+
+ // Keep killing bitcasts, for sanity. Note that we no longer care
+ // about precise ordering as long as there's exactly one use.
+ while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
+ if (!bitcast->hasOneUse()) break;
+ insnsToKill.push_back(bitcast);
+ result = bitcast->getOperand(0);
+ }
+
+ // Delete all the unnecessary instructions, from latest to earliest.
+ for (SmallVectorImpl<llvm::Instruction*>::iterator
+ i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
+ (*i)->eraseFromParent();
+
+ // Do the fused retain/autorelease if we were asked to.
+ if (doRetainAutorelease)
+ result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
+
+ // Cast back to the result type.
+ return CGF.Builder.CreateBitCast(result, resultType);
+}
+
+/// If this is a +1 of the value of an immutable 'self', remove it.
+static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // This is only applicable to a method with an immutable 'self'.
+ const ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+ if (!method) return 0;
+ const VarDecl *self = method->getSelfDecl();
+ if (!self->getType().isConstQualified()) return 0;
+
+ // Look for a retain call.
+ llvm::CallInst *retainCall =
+ dyn_cast<llvm::CallInst>(result->stripPointerCasts());
+ if (!retainCall ||
+ retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
+ return 0;
+
+ // Look for an ordinary load of 'self'.
+ llvm::Value *retainedValue = retainCall->getArgOperand(0);
+ llvm::LoadInst *load =
+ dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
+ if (!load || load->isAtomic() || load->isVolatile() ||
+ load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
+ return 0;
+
+ // Okay! Burn it all down. This relies for correctness on the
+ // assumption that the retain is emitted as part of the return and
+ // that thereafter everything is used "linearly".
+ llvm::Type *resultType = result->getType();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(result));
+ assert(retainCall->use_empty());
+ retainCall->eraseFromParent();
+ eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
+
+ return CGF.Builder.CreateBitCast(load, resultType);
+}
+
+/// Emit an ARC autorelease of the result of a function.
+///
+/// \return the value to actually return from the function
+static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
+ llvm::Value *result) {
+ // If we're returning 'self', kill the initial retain. This is a
+ // heuristic attempt to "encourage correctness" in the really unfortunate
+ // case where we have a return of self during a dealloc and we desperately
+ // need to avoid the possible autorelease.
+ if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
+ return self;
+
+ // At -O0, try to emit a fused retain/autorelease.
+ if (CGF.shouldUseFusedARCCalls())
+ if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
+ return fused;
+
+ return CGF.EmitARCAutoreleaseReturnValue(result);
+}
+
+/// Heuristically search for a dominating store to the return-value slot.
+static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
+ // If there are multiple uses of the return-value slot, just check
+ // for something immediately preceding the IP. Sometimes this can
+ // happen with how we generate implicit-returns; it can also happen
+ // with noreturn cleanups.
+ if (!CGF.ReturnValue->hasOneUse()) {
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ if (IP->empty()) return 0;
+ llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
+ if (!store) return 0;
+ if (store->getPointerOperand() != CGF.ReturnValue) return 0;
+ assert(!store->isAtomic() && !store->isVolatile()); // see below
+ return store;
+ }
+
+ llvm::StoreInst *store =
+ dyn_cast<llvm::StoreInst>(CGF.ReturnValue->use_back());
+ if (!store) return 0;
+
+ // These aren't actually possible for non-coerced returns, and we
+ // only care about non-coerced returns on this code path.
+ assert(!store->isAtomic() && !store->isVolatile());
+
+ // Now do a first-and-dirty dominance check: just walk up the
+ // single-predecessors chain from the current insertion point.
+ llvm::BasicBlock *StoreBB = store->getParent();
+ llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
+ while (IP != StoreBB) {
+ if (!(IP = IP->getSinglePredecessor()))
+ return 0;
+ }
+
+ // Okay, the store's basic block dominates the insertion point; we
+ // can do our thing.
+ return store;
+}
+
+void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI) {
+ // Functions with no result always return void.
+ if (ReturnValue == 0) {
+ Builder.CreateRetVoid();
+ return;
+ }
+
+ llvm::DebugLoc RetDbgLoc;
+ llvm::Value *RV = 0;
+ QualType RetTy = FI.getReturnType();
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType()) {
+ ComplexPairTy RT = LoadComplexFromAddr(ReturnValue, false);
+ StoreComplexToAddr(RT, CurFn->arg_begin(), false);
+ } else if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ // Do nothing; aggregrates get evaluated directly into the destination.
+ } else {
+ EmitStoreOfScalar(Builder.CreateLoad(ReturnValue), CurFn->arg_begin(),
+ false, Alignment, RetTy);
+ }
+ break;
+ }
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct:
+ if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
+ RetAI.getDirectOffset() == 0) {
+ // The internal return value temp always will have pointer-to-return-type
+ // type, just do a load.
+
+ // If there is a dominating store to ReturnValue, we can elide
+ // the load, zap the store, and usually zap the alloca.
+ if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
+ // Get the stored value and nuke the now-dead store.
+ RetDbgLoc = SI->getDebugLoc();
+ RV = SI->getValueOperand();
+ SI->eraseFromParent();
+
+ // If that was the only use of the return value, nuke it as well now.
+ if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
+ cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
+ ReturnValue = 0;
+ }
+
+ // Otherwise, we have to do a simple load.
+ } else {
+ RV = Builder.CreateLoad(ReturnValue);
+ }
+ } else {
+ llvm::Value *V = ReturnValue;
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
+ V = Builder.CreateConstGEP1_32(V, Offs);
+ V = Builder.CreateBitCast(V,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+
+ RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
+ }
+
+ // In ARC, end functions that return a retainable type with a call
+ // to objc_autoreleaseReturnValue.
+ if (AutoreleaseResult) {
+ assert(getLangOpts().ObjCAutoRefCount &&
+ !FI.isReturnsRetained() &&
+ RetTy->isObjCRetainableType());
+ RV = emitAutoreleaseOfResult(*this, RV);
+ }
+
+ break;
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
+ if (!RetDbgLoc.isUnknown())
+ Ret->setDebugLoc(RetDbgLoc);
+}
+
+void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
+ const VarDecl *param) {
+ // StartFunction converted the ABI-lowered parameter(s) into a
+ // local alloca. We need to turn that into an r-value suitable
+ // for EmitCall.
+ llvm::Value *local = GetAddrOfLocalVar(param);
+
+ QualType type = param->getType();
+
+ // For the most part, we just need to load the alloca, except:
+ // 1) aggregate r-values are actually pointers to temporaries, and
+ // 2) references to aggregates are pointers directly to the aggregate.
+ // I don't know why references to non-aggregates are different here.
+ if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
+ if (hasAggregateLLVMType(ref->getPointeeType()))
+ return args.add(RValue::getAggregate(local), type);
+
+ // Locals which are references to scalars are represented
+ // with allocas holding the pointer.
+ return args.add(RValue::get(Builder.CreateLoad(local)), type);
+ }
+
+ if (type->isAnyComplexType()) {
+ ComplexPairTy complex = LoadComplexFromAddr(local, /*volatile*/ false);
+ return args.add(RValue::getComplex(complex), type);
+ }
+
+ if (hasAggregateLLVMType(type))
+ return args.add(RValue::getAggregate(local), type);
+
+ unsigned alignment = getContext().getDeclAlign(param).getQuantity();
+ llvm::Value *value = EmitLoadOfScalar(local, false, alignment, type);
+ return args.add(RValue::get(value), type);
+}
+
+static bool isProvablyNull(llvm::Value *addr) {
+ return isa<llvm::ConstantPointerNull>(addr);
+}
+
+static bool isProvablyNonNull(llvm::Value *addr) {
+ return isa<llvm::AllocaInst>(addr);
+}
+
+/// Emit the actual writing-back of a writeback.
+static void emitWriteback(CodeGenFunction &CGF,
+ const CallArgList::Writeback &writeback) {
+ llvm::Value *srcAddr = writeback.Address;
+ assert(!isProvablyNull(srcAddr) &&
+ "shouldn't have writeback for provably null argument");
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the argument wasn't provably non-null, we need to null check
+ // before doing the store.
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (!provablyNonNull) {
+ llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
+ contBB = CGF.createBasicBlock("icr.done");
+
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+ CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
+ CGF.EmitBlock(writebackBB);
+ }
+
+ // Load the value to writeback.
+ llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
+
+ // Cast it back, in case we're writing an id to a Foo* or something.
+ value = CGF.Builder.CreateBitCast(value,
+ cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
+ "icr.writeback-cast");
+
+ // Perform the writeback.
+ QualType srcAddrType = writeback.AddressType;
+ CGF.EmitStoreThroughLValue(RValue::get(value),
+ CGF.MakeAddrLValue(srcAddr, srcAddrType));
+
+ // Jump to the continuation block.
+ if (!provablyNonNull)
+ CGF.EmitBlock(contBB);
+}
+
+static void emitWritebacks(CodeGenFunction &CGF,
+ const CallArgList &args) {
+ for (CallArgList::writeback_iterator
+ i = args.writeback_begin(), e = args.writeback_end(); i != e; ++i)
+ emitWriteback(CGF, *i);
+}
+
+/// Emit an argument that's being passed call-by-writeback. That is,
+/// we are passing the address of
+static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
+ const ObjCIndirectCopyRestoreExpr *CRE) {
+ llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
+
+ // The dest and src types don't necessarily match in LLVM terms
+ // because of the crazy ObjC compatibility rules.
+
+ llvm::PointerType *destType =
+ cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+
+ // If the address is a constant null, just pass the appropriate null.
+ if (isProvablyNull(srcAddr)) {
+ args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
+ CRE->getType());
+ return;
+ }
+
+ QualType srcAddrType =
+ CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
+
+ // Create the temporary.
+ llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
+ "icr.temp");
+
+ // Zero-initialize it if we're not doing a copy-initialization.
+ bool shouldCopy = CRE->shouldCopy();
+ if (!shouldCopy) {
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(destType->getElementType()));
+ CGF.Builder.CreateStore(null, temp);
+ }
+
+ llvm::BasicBlock *contBB = 0;
+
+ // If the address is *not* known to be non-null, we need to switch.
+ llvm::Value *finalArgument;
+
+ bool provablyNonNull = isProvablyNonNull(srcAddr);
+ if (provablyNonNull) {
+ finalArgument = temp;
+ } else {
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
+
+ finalArgument = CGF.Builder.CreateSelect(isNull,
+ llvm::ConstantPointerNull::get(destType),
+ temp, "icr.argument");
+
+ // If we need to copy, then the load has to be conditional, which
+ // means we need control flow.
+ if (shouldCopy) {
+ contBB = CGF.createBasicBlock("icr.cont");
+ llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
+ CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
+ CGF.EmitBlock(copyBB);
+ }
+ }
+
+ // Perform a copy if necessary.
+ if (shouldCopy) {
+ LValue srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
+ RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
+ assert(srcRV.isScalar());
+
+ llvm::Value *src = srcRV.getScalarVal();
+ src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
+ "icr.cast");
+
+ // Use an ordinary store, not a store-to-lvalue.
+ CGF.Builder.CreateStore(src, temp);
+ }
+
+ // Finish the control flow if we needed it.
+ if (shouldCopy && !provablyNonNull)
+ CGF.EmitBlock(contBB);
+
+ args.addWriteback(srcAddr, srcAddrType, temp);
+ args.add(RValue::get(finalArgument), CRE->getType());
+}
+
+void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
+ QualType type) {
+ if (const ObjCIndirectCopyRestoreExpr *CRE
+ = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
+ assert(getContext().getLangOpts().ObjCAutoRefCount);
+ assert(getContext().hasSameType(E->getType(), type));
+ return emitWritebackArg(*this, args, CRE);
+ }
+
+ assert(type->isReferenceType() == E->isGLValue() &&
+ "reference binding to unmaterialized r-value!");
+
+ if (E->isGLValue()) {
+ assert(E->getObjectKind() == OK_Ordinary);
+ return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
+ type);
+ }
+
+ if (hasAggregateLLVMType(type) && !E->getType()->isAnyComplexType() &&
+ isa<ImplicitCastExpr>(E) &&
+ cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
+ LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
+ assert(L.isSimple());
+ args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
+ return;
+ }
+
+ args.add(EmitAnyExprToTemp(E), type);
+}
+
+// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+// optimizer it can aggressively ignore unwind edges.
+void
+CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
+ if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
+ !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
+ Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
+ CGM.getNoObjCARCExceptionsMetadata());
+}
+
+/// Emits a call or invoke instruction to the given function, depending
+/// on the current state of the EH stack.
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name) {
+ llvm::BasicBlock *InvokeDest = getInvokeDest();
+
+ llvm::Instruction *Inst;
+ if (!InvokeDest)
+ Inst = Builder.CreateCall(Callee, Args, Name);
+ else {
+ llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
+ Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
+ EmitBlock(ContBB);
+ }
+
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(Inst);
+
+ return Inst;
+}
+
+llvm::CallSite
+CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
+ const Twine &Name) {
+ return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
+}
+
+static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
+ llvm::FunctionType *FTy) {
+ if (ArgNo < FTy->getNumParams())
+ assert(Elt->getType() == FTy->getParamType(ArgNo));
+ else
+ assert(FTy->isVarArg());
+ ++ArgNo;
+}
+
+void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
+ SmallVector<llvm::Value*,16> &Args,
+ llvm::FunctionType *IRFuncTy) {
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ unsigned NumElts = AT->getSize().getZExtValue();
+ QualType EltTy = AT->getElementType();
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
+ LValue LV = MakeAddrLValue(EltAddr, EltTy);
+ RValue EltRV;
+ if (EltTy->isAnyComplexType())
+ // FIXME: Volatile?
+ EltRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(EltTy))
+ EltRV = LV.asAggregateRValue();
+ else
+ EltRV = EmitLoadOfLValue(LV);
+ ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
+ }
+ } else if (const RecordType *RT = Ty->getAsStructureType()) {
+ RecordDecl *RD = RT->getDecl();
+ assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // FIXME: What are the right qualifiers here?
+ LValue LV = EmitLValueForField(Addr, FD, 0);
+ RValue FldRV;
+ if (FT->isAnyComplexType())
+ // FIXME: Volatile?
+ FldRV = RValue::getComplex(LoadComplexFromAddr(LV.getAddress(), false));
+ else if (CodeGenFunction::hasAggregateLLVMType(FT))
+ FldRV = LV.asAggregateRValue();
+ else
+ FldRV = EmitLoadOfLValue(LV);
+ ExpandTypeToArgs(FT, FldRV, Args, IRFuncTy);
+ }
+ } else if (Ty->isAnyComplexType()) {
+ ComplexPairTy CV = RV.getComplexVal();
+ Args.push_back(CV.first);
+ Args.push_back(CV.second);
+ } else {
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+
+ // Insert a bitcast as needed.
+ llvm::Value *V = RV.getScalarVal();
+ if (Args.size() < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(Args.size()))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
+
+ Args.push_back(V);
+ }
+}
+
+
+RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &CallArgs,
+ const Decl *TargetDecl,
+ llvm::Instruction **callOrInvoke) {
+ // FIXME: We no longer need the types from CallArgs; lift up and simplify.
+ SmallVector<llvm::Value*, 16> Args;
+
+ // Handle struct-return functions by passing a pointer to the
+ // location that we would like to return into.
+ QualType RetTy = CallInfo.getReturnType();
+ const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
+
+ // IRArgNo - Keep track of the argument number in the callee we're looking at.
+ unsigned IRArgNo = 0;
+ llvm::FunctionType *IRFuncTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(Callee->getType())->getElementType());
+
+ // If the call returns a temporary with struct return, create a temporary
+ // alloca to hold the result, unless one is given to us.
+ if (CGM.ReturnTypeUsesSRet(CallInfo)) {
+ llvm::Value *Value = ReturnValue.getValue();
+ if (!Value)
+ Value = CreateMemTemp(RetTy);
+ Args.push_back(Value);
+ checkArgMatches(Value, IRArgNo, IRFuncTy);
+ }
+
+ assert(CallInfo.arg_size() == CallArgs.size() &&
+ "Mismatch between function signature & arguments.");
+ CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
+ for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
+ I != E; ++I, ++info_it) {
+ const ABIArgInfo &ArgInfo = info_it->info;
+ RValue RV = I->RV;
+
+ unsigned TypeAlign =
+ getContext().getTypeAlignInChars(I->Ty).getQuantity();
+ switch (ArgInfo.getKind()) {
+ case ABIArgInfo::Indirect: {
+ if (RV.isScalar() || RV.isComplex()) {
+ // Make a temporary alloca to pass the argument.
+ llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
+ if (ArgInfo.getIndirectAlign() > AI->getAlignment())
+ AI->setAlignment(ArgInfo.getIndirectAlign());
+ Args.push_back(AI);
+
+ if (RV.isScalar())
+ EmitStoreOfScalar(RV.getScalarVal(), Args.back(), false,
+ TypeAlign, I->Ty);
+ else
+ StoreComplexToAddr(RV.getComplexVal(), Args.back(), false);
+
+ // Validate argument match.
+ checkArgMatches(AI, IRArgNo, IRFuncTy);
+ } else {
+ // We want to avoid creating an unnecessary temporary+copy here;
+ // however, we need one in two cases:
+ // 1. If the argument is not byval, and we are required to copy the
+ // source. (This case doesn't occur on any common architecture.)
+ // 2. If the argument is byval, RV is not sufficiently aligned, and
+ // we cannot force it to be sufficiently aligned.
+ llvm::Value *Addr = RV.getAggregateAddr();
+ unsigned Align = ArgInfo.getIndirectAlign();
+ const llvm::TargetData *TD = &CGM.getTargetData();
+ if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
+ (ArgInfo.getIndirectByVal() && TypeAlign < Align &&
+ llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align)) {
+ // Create an aligned temporary, and copy to it.
+ llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
+ if (Align > AI->getAlignment())
+ AI->setAlignment(Align);
+ Args.push_back(AI);
+ EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
+
+ // Validate argument match.
+ checkArgMatches(AI, IRArgNo, IRFuncTy);
+ } else {
+ // Skip the extra memcpy call.
+ Args.push_back(Addr);
+
+ // Validate argument match.
+ checkArgMatches(Addr, IRArgNo, IRFuncTy);
+ }
+ }
+ break;
+ }
+
+ case ABIArgInfo::Ignore:
+ break;
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // Insert a padding argument to ensure proper alignment.
+ if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
+ Args.push_back(llvm::UndefValue::get(PaddingType));
+ ++IRArgNo;
+ }
+
+ if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
+ ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
+ ArgInfo.getDirectOffset() == 0) {
+ llvm::Value *V;
+ if (RV.isScalar())
+ V = RV.getScalarVal();
+ else
+ V = Builder.CreateLoad(RV.getAggregateAddr());
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ if (IRArgNo < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(IRArgNo))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
+ Args.push_back(V);
+
+ checkArgMatches(V, IRArgNo, IRFuncTy);
+ break;
+ }
+
+ // FIXME: Avoid the conversion through memory if possible.
+ llvm::Value *SrcPtr;
+ if (RV.isScalar()) {
+ SrcPtr = CreateMemTemp(I->Ty, "coerce");
+ EmitStoreOfScalar(RV.getScalarVal(), SrcPtr, false, TypeAlign, I->Ty);
+ } else if (RV.isComplex()) {
+ SrcPtr = CreateMemTemp(I->Ty, "coerce");
+ StoreComplexToAddr(RV.getComplexVal(), SrcPtr, false);
+ } else
+ SrcPtr = RV.getAggregateAddr();
+
+ // If the value is offset in memory, apply the offset now.
+ if (unsigned Offs = ArgInfo.getDirectOffset()) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
+ SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
+
+ }
+
+ // If the coerce-to type is a first class aggregate, we flatten it and
+ // pass the elements. Either way is semantically identical, but fast-isel
+ // and the optimizer generally likes scalar values better than FCAs.
+ if (llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType())) {
+ SrcPtr = Builder.CreateBitCast(SrcPtr,
+ llvm::PointerType::getUnqual(STy));
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
+ llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
+ // We don't know what we're loading from.
+ LI->setAlignment(1);
+ Args.push_back(LI);
+
+ // Validate argument match.
+ checkArgMatches(LI, IRArgNo, IRFuncTy);
+ }
+ } else {
+ // In the simple case, just pass the coerced loaded value.
+ Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
+ *this));
+
+ // Validate argument match.
+ checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
+ }
+
+ break;
+ }
+
+ case ABIArgInfo::Expand:
+ ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
+ IRArgNo = Args.size();
+ break;
+ }
+ }
+
+ // If the callee is a bitcast of a function to a varargs pointer to function
+ // type, check to see if we can remove the bitcast. This handles some cases
+ // with unprototyped functions.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
+ if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
+ llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
+ llvm::FunctionType *CurFT =
+ cast<llvm::FunctionType>(CurPT->getElementType());
+ llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
+
+ if (CE->getOpcode() == llvm::Instruction::BitCast &&
+ ActualFT->getReturnType() == CurFT->getReturnType() &&
+ ActualFT->getNumParams() == CurFT->getNumParams() &&
+ ActualFT->getNumParams() == Args.size() &&
+ (CurFT->isVarArg() || !ActualFT->isVarArg())) {
+ bool ArgsMatch = true;
+ for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
+ if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
+ ArgsMatch = false;
+ break;
+ }
+
+ // Strip the cast if we can get away with it. This is a nice cleanup,
+ // but also allows us to inline the function at -O0 if it is marked
+ // always_inline.
+ if (ArgsMatch)
+ Callee = CalleeF;
+ }
+ }
+
+ unsigned CallingConv;
+ CodeGen::AttributeListType AttributeList;
+ CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList, CallingConv);
+ llvm::AttrListPtr Attrs = llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.end());
+
+ llvm::BasicBlock *InvokeDest = 0;
+ if (!(Attrs.getFnAttributes() & llvm::Attribute::NoUnwind))
+ InvokeDest = getInvokeDest();
+
+ llvm::CallSite CS;
+ if (!InvokeDest) {
+ CS = Builder.CreateCall(Callee, Args);
+ } else {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
+ EmitBlock(Cont);
+ }
+ if (callOrInvoke)
+ *callOrInvoke = CS.getInstruction();
+
+ CS.setAttributes(Attrs);
+ CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+
+ // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
+ // optimizer it can aggressively ignore unwind edges.
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ AddObjCARCExceptionMetadata(CS.getInstruction());
+
+ // If the call doesn't return, finish the basic block and clear the
+ // insertion point; this allows the rest of IRgen to discard
+ // unreachable code.
+ if (CS.doesNotReturn()) {
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+
+ // FIXME: For now, emit a dummy basic block because expr emitters in
+ // generally are not ready to handle emitting expressions at unreachable
+ // points.
+ EnsureInsertPoint();
+
+ // Return a reasonable RValue.
+ return GetUndefRValue(RetTy);
+ }
+
+ llvm::Instruction *CI = CS.getInstruction();
+ if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
+ CI->setName("call");
+
+ // Emit any writebacks immediately. Arguably this should happen
+ // after any return-value munging.
+ if (CallArgs.hasWritebacks())
+ emitWritebacks(*this, CallArgs);
+
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::Indirect: {
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Args[0], false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(Args[0]);
+ return RValue::get(EmitLoadOfScalar(Args[0], false, Alignment, RetTy));
+ }
+
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
+
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
+ if (RetTy->isAnyComplexType()) {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy)) {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
+ return RValue::getAggregate(DestPtr);
+ }
+
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
+ }
+
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
+ }
+
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
+
+ unsigned Alignment = getContext().getTypeAlignInChars(RetTy).getQuantity();
+ if (RetTy->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(DestPtr, false));
+ if (CodeGenFunction::hasAggregateLLVMType(RetTy))
+ return RValue::getAggregate(DestPtr);
+ return RValue::get(EmitLoadOfScalar(DestPtr, false, Alignment, RetTy));
+ }
+
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
+ }
+
+ llvm_unreachable("Unhandled ABIArgInfo::Kind");
+}
+
+/* VarArg handling */
+
+llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
+ return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
new file mode 100644
index 0000000..dead7bd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCall.h
@@ -0,0 +1,306 @@
+//===----- CGCall.h - Encapsulate calling convention details ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCALL_H
+#define CLANG_CODEGEN_CGCALL_H
+
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/Value.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
+
+#include "CGValue.h"
+
+// FIXME: Restructure so we don't have to expose so much stuff.
+#include "ABIInfo.h"
+
+namespace llvm {
+ struct AttributeWithIndex;
+ class Function;
+ class Type;
+ class Value;
+
+ template<typename T, unsigned> class SmallVector;
+}
+
+namespace clang {
+ class ASTContext;
+ class Decl;
+ class FunctionDecl;
+ class ObjCMethodDecl;
+ class VarDecl;
+
+namespace CodeGen {
+ typedef SmallVector<llvm::AttributeWithIndex, 8> AttributeListType;
+
+ struct CallArg {
+ RValue RV;
+ QualType Ty;
+ bool NeedsCopy;
+ CallArg(RValue rv, QualType ty, bool needscopy)
+ : RV(rv), Ty(ty), NeedsCopy(needscopy)
+ { }
+ };
+
+ /// CallArgList - Type for representing both the value and type of
+ /// arguments in a call.
+ class CallArgList :
+ public SmallVector<CallArg, 16> {
+ public:
+ struct Writeback {
+ /// The original argument.
+ llvm::Value *Address;
+
+ /// The pointee type of the original argument.
+ QualType AddressType;
+
+ /// The temporary alloca.
+ llvm::Value *Temporary;
+ };
+
+ void add(RValue rvalue, QualType type, bool needscopy = false) {
+ push_back(CallArg(rvalue, type, needscopy));
+ }
+
+ void addFrom(const CallArgList &other) {
+ insert(end(), other.begin(), other.end());
+ Writebacks.insert(Writebacks.end(),
+ other.Writebacks.begin(), other.Writebacks.end());
+ }
+
+ void addWriteback(llvm::Value *address, QualType addressType,
+ llvm::Value *temporary) {
+ Writeback writeback;
+ writeback.Address = address;
+ writeback.AddressType = addressType;
+ writeback.Temporary = temporary;
+ Writebacks.push_back(writeback);
+ }
+
+ bool hasWritebacks() const { return !Writebacks.empty(); }
+
+ typedef SmallVectorImpl<Writeback>::const_iterator writeback_iterator;
+ writeback_iterator writeback_begin() const { return Writebacks.begin(); }
+ writeback_iterator writeback_end() const { return Writebacks.end(); }
+
+ private:
+ SmallVector<Writeback, 1> Writebacks;
+ };
+
+ /// A class for recording the number of arguments that a function
+ /// signature requires.
+ class RequiredArgs {
+ /// The number of required arguments, or ~0 if the signature does
+ /// not permit optional arguments.
+ unsigned NumRequired;
+ public:
+ enum All_t { All };
+
+ RequiredArgs(All_t _) : NumRequired(~0U) {}
+ explicit RequiredArgs(unsigned n) : NumRequired(n) {
+ assert(n != ~0U);
+ }
+
+ /// Compute the arguments required by the given formal prototype,
+ /// given that there may be some additional, non-formal arguments
+ /// in play.
+ static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype,
+ unsigned additional) {
+ if (!prototype->isVariadic()) return All;
+ return RequiredArgs(prototype->getNumArgs() + additional);
+ }
+
+ static RequiredArgs forPrototype(const FunctionProtoType *prototype) {
+ return forPrototypePlus(prototype, 0);
+ }
+
+ static RequiredArgs forPrototype(CanQual<FunctionProtoType> prototype) {
+ return forPrototype(prototype.getTypePtr());
+ }
+
+ static RequiredArgs forPrototypePlus(CanQual<FunctionProtoType> prototype,
+ unsigned additional) {
+ return forPrototypePlus(prototype.getTypePtr(), additional);
+ }
+
+ bool allowsOptionalArgs() const { return NumRequired != ~0U; }
+ bool getNumRequiredArgs() const {
+ assert(allowsOptionalArgs());
+ return NumRequired;
+ }
+
+ unsigned getOpaqueData() const { return NumRequired; }
+ static RequiredArgs getFromOpaqueData(unsigned value) {
+ if (value == ~0U) return All;
+ return RequiredArgs(value);
+ }
+ };
+
+ /// FunctionArgList - Type for representing both the decl and type
+ /// of parameters to a function. The decl must be either a
+ /// ParmVarDecl or ImplicitParamDecl.
+ class FunctionArgList : public SmallVector<const VarDecl*, 16> {
+ };
+
+ /// CGFunctionInfo - Class to encapsulate the information about a
+ /// function definition.
+ class CGFunctionInfo : public llvm::FoldingSetNode {
+ struct ArgInfo {
+ CanQualType type;
+ ABIArgInfo info;
+ };
+
+ /// The LLVM::CallingConv to use for this function (as specified by the
+ /// user).
+ unsigned CallingConvention : 8;
+
+ /// The LLVM::CallingConv to actually use for this function, which may
+ /// depend on the ABI.
+ unsigned EffectiveCallingConvention : 8;
+
+ /// The clang::CallingConv that this was originally created with.
+ unsigned ASTCallingConvention : 8;
+
+ /// Whether this function is noreturn.
+ unsigned NoReturn : 1;
+
+ /// Whether this function is returns-retained.
+ unsigned ReturnsRetained : 1;
+
+ /// How many arguments to pass inreg.
+ unsigned HasRegParm : 1;
+ unsigned RegParm : 4;
+
+ RequiredArgs Required;
+
+ unsigned NumArgs;
+ ArgInfo *getArgsBuffer() {
+ return reinterpret_cast<ArgInfo*>(this+1);
+ }
+ const ArgInfo *getArgsBuffer() const {
+ return reinterpret_cast<const ArgInfo*>(this + 1);
+ }
+
+ CGFunctionInfo() : Required(RequiredArgs::All) {}
+
+ public:
+ static CGFunctionInfo *create(unsigned llvmCC,
+ const FunctionType::ExtInfo &extInfo,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes,
+ RequiredArgs required);
+
+ typedef const ArgInfo *const_arg_iterator;
+ typedef ArgInfo *arg_iterator;
+
+ const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; }
+ const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; }
+ arg_iterator arg_begin() { return getArgsBuffer() + 1; }
+ arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; }
+
+ unsigned arg_size() const { return NumArgs; }
+
+ bool isVariadic() const { return Required.allowsOptionalArgs(); }
+ RequiredArgs getRequiredArgs() const { return Required; }
+
+ bool isNoReturn() const { return NoReturn; }
+
+ /// In ARC, whether this function retains its return value. This
+ /// is not always reliable for call sites.
+ bool isReturnsRetained() const { return ReturnsRetained; }
+
+ /// getASTCallingConvention() - Return the AST-specified calling
+ /// convention.
+ CallingConv getASTCallingConvention() const {
+ return CallingConv(ASTCallingConvention);
+ }
+
+ /// getCallingConvention - Return the user specified calling
+ /// convention, which has been translated into an LLVM CC.
+ unsigned getCallingConvention() const { return CallingConvention; }
+
+ /// getEffectiveCallingConvention - Return the actual calling convention to
+ /// use, which may depend on the ABI.
+ unsigned getEffectiveCallingConvention() const {
+ return EffectiveCallingConvention;
+ }
+ void setEffectiveCallingConvention(unsigned Value) {
+ EffectiveCallingConvention = Value;
+ }
+
+ bool getHasRegParm() const { return HasRegParm; }
+ unsigned getRegParm() const { return RegParm; }
+
+ FunctionType::ExtInfo getExtInfo() const {
+ return FunctionType::ExtInfo(isNoReturn(),
+ getHasRegParm(), getRegParm(),
+ getASTCallingConvention(),
+ isReturnsRetained());
+ }
+
+ CanQualType getReturnType() const { return getArgsBuffer()[0].type; }
+
+ ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; }
+ const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; }
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddInteger(getASTCallingConvention());
+ ID.AddBoolean(NoReturn);
+ ID.AddBoolean(ReturnsRetained);
+ ID.AddBoolean(HasRegParm);
+ ID.AddInteger(RegParm);
+ ID.AddInteger(Required.getOpaqueData());
+ getReturnType().Profile(ID);
+ for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
+ it->type.Profile(ID);
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required,
+ CanQualType resultType,
+ ArrayRef<CanQualType> argTypes) {
+ ID.AddInteger(info.getCC());
+ ID.AddBoolean(info.getNoReturn());
+ ID.AddBoolean(info.getProducesResult());
+ ID.AddBoolean(info.getHasRegParm());
+ ID.AddInteger(info.getRegParm());
+ ID.AddInteger(required.getOpaqueData());
+ resultType.Profile(ID);
+ for (ArrayRef<CanQualType>::iterator
+ i = argTypes.begin(), e = argTypes.end(); i != e; ++i) {
+ i->Profile(ID);
+ }
+ }
+ };
+
+ /// ReturnValueSlot - Contains the address where the return value of a
+ /// function can be stored, and whether the address is volatile or not.
+ class ReturnValueSlot {
+ llvm::PointerIntPair<llvm::Value *, 1, bool> Value;
+
+ public:
+ ReturnValueSlot() {}
+ ReturnValueSlot(llvm::Value *Value, bool IsVolatile)
+ : Value(Value, IsVolatile) {}
+
+ bool isNull() const { return !getValue(); }
+
+ bool isVolatile() const { return Value.getInt(); }
+ llvm::Value *getValue() const { return Value.getPointer(); }
+ };
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
new file mode 100644
index 0000000..6303e20
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGClass.cpp
@@ -0,0 +1,1836 @@
+//===--- CGClass.cpp - Emit LLVM Code for C++ classes ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of classes
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGBlocks.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static CharUnits
+ComputeNonVirtualBaseClassOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedClass,
+ CastExpr::path_const_iterator Start,
+ CastExpr::path_const_iterator End) {
+ CharUnits Offset = CharUnits::Zero();
+
+ const CXXRecordDecl *RD = DerivedClass;
+
+ for (CastExpr::path_const_iterator I = Start; I != End; ++I) {
+ const CXXBaseSpecifier *Base = *I;
+ assert(!Base->isVirtual() && "Should not see virtual bases here!");
+
+ // Get the layout.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ // Add the offset.
+ Offset += Layout.getBaseClassOffset(BaseDecl);
+
+ RD = BaseDecl;
+ }
+
+ return Offset;
+}
+
+llvm::Constant *
+CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ CharUnits Offset =
+ ComputeNonVirtualBaseClassOffset(getContext(), ClassDecl,
+ PathBegin, PathEnd);
+ if (Offset.isZero())
+ return 0;
+
+ llvm::Type *PtrDiffTy =
+ Types.ConvertType(getContext().getPointerDiffType());
+
+ return llvm::ConstantInt::get(PtrDiffTy, Offset.getQuantity());
+}
+
+/// Gets the address of a direct base class within a complete object.
+/// This should only be used for (1) non-virtual bases or (2) virtual bases
+/// when the type is known to be complete (e.g. in complete destructors).
+///
+/// The object pointed to by 'This' is assumed to be non-null.
+llvm::Value *
+CodeGenFunction::GetAddressOfDirectBaseInCompleteClass(llvm::Value *This,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual) {
+ // 'this' must be a pointer (in some address space) to Derived.
+ assert(This->getType()->isPointerTy() &&
+ cast<llvm::PointerType>(This->getType())->getElementType()
+ == ConvertType(Derived));
+
+ // Compute the offset of the virtual base.
+ CharUnits Offset;
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+ if (BaseIsVirtual)
+ Offset = Layout.getVBaseClassOffset(Base);
+ else
+ Offset = Layout.getBaseClassOffset(Base);
+
+ // Shift and cast down to the base type.
+ // TODO: for complete types, this should be possible with a GEP.
+ llvm::Value *V = This;
+ if (Offset.isPositive()) {
+ V = Builder.CreateBitCast(V, Int8PtrTy);
+ V = Builder.CreateConstInBoundsGEP1_64(V, Offset.getQuantity());
+ }
+ V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
+
+ return V;
+}
+
+static llvm::Value *
+ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ThisPtr,
+ CharUnits NonVirtual, llvm::Value *Virtual) {
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Value *NonVirtualOffset = 0;
+ if (!NonVirtual.isZero())
+ NonVirtualOffset = llvm::ConstantInt::get(PtrDiffTy,
+ NonVirtual.getQuantity());
+
+ llvm::Value *BaseOffset;
+ if (Virtual) {
+ if (NonVirtualOffset)
+ BaseOffset = CGF.Builder.CreateAdd(Virtual, NonVirtualOffset);
+ else
+ BaseOffset = Virtual;
+ } else
+ BaseOffset = NonVirtualOffset;
+
+ // Apply the base offset.
+ ThisPtr = CGF.Builder.CreateBitCast(ThisPtr, CGF.Int8PtrTy);
+ ThisPtr = CGF.Builder.CreateGEP(ThisPtr, BaseOffset, "add.ptr");
+
+ return ThisPtr;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ CastExpr::path_const_iterator Start = PathBegin;
+ const CXXRecordDecl *VBase = 0;
+
+ // Get the virtual base.
+ if ((*Start)->isVirtual()) {
+ VBase =
+ cast<CXXRecordDecl>((*Start)->getType()->getAs<RecordType>()->getDecl());
+ ++Start;
+ }
+
+ CharUnits NonVirtualOffset =
+ ComputeNonVirtualBaseClassOffset(getContext(), VBase ? VBase : Derived,
+ Start, PathEnd);
+
+ // Get the base pointer type.
+ llvm::Type *BasePtrTy =
+ ConvertType((PathEnd[-1])->getType())->getPointerTo();
+
+ if (NonVirtualOffset.isZero() && !VBase) {
+ // Just cast back.
+ return Builder.CreateBitCast(Value, BasePtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ llvm::Value *VirtualOffset = 0;
+
+ if (VBase) {
+ if (Derived->hasAttr<FinalAttr>()) {
+ VirtualOffset = 0;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(Derived);
+
+ CharUnits VBaseOffset = Layout.getVBaseClassOffset(VBase);
+ NonVirtualOffset += VBaseOffset;
+ } else
+ VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ }
+
+ // Apply the offsets.
+ Value = ApplyNonVirtualAndVirtualOffset(*this, Value,
+ NonVirtualOffset,
+ VirtualOffset);
+
+ // Cast back.
+ Value = Builder.CreateBitCast(Value, BasePtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
+ CastNull);
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+llvm::Value *
+CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue) {
+ assert(PathBegin != PathEnd && "Base path should not be empty!");
+
+ QualType DerivedTy =
+ getContext().getCanonicalType(getContext().getTagDeclType(Derived));
+ llvm::Type *DerivedPtrTy = ConvertType(DerivedTy)->getPointerTo();
+
+ llvm::Value *NonVirtualOffset =
+ CGM.GetNonVirtualBaseClassOffset(Derived, PathBegin, PathEnd);
+
+ if (!NonVirtualOffset) {
+ // No offset, we can just cast back.
+ return Builder.CreateBitCast(Value, DerivedPtrTy);
+ }
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = 0;
+
+ if (NullCheckValue) {
+ CastNull = createBasicBlock("cast.null");
+ CastNotNull = createBasicBlock("cast.notnull");
+ CastEnd = createBasicBlock("cast.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ // Apply the offset.
+ Value = Builder.CreateBitCast(Value, Int8PtrTy);
+ Value = Builder.CreateGEP(Value, Builder.CreateNeg(NonVirtualOffset),
+ "sub.ptr");
+
+ // Just cast.
+ Value = Builder.CreateBitCast(Value, DerivedPtrTy);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastNull);
+ Builder.CreateBr(CastEnd);
+ EmitBlock(CastEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()),
+ CastNull);
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+/// GetVTTParameter - Return the VTT parameter that should be passed to a
+/// base constructor/destructor with virtual bases.
+static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
+ bool ForVirtualBase) {
+ if (!CodeGenVTables::needsVTTParameter(GD)) {
+ // This constructor/destructor does not need a VTT parameter.
+ return 0;
+ }
+
+ const CXXRecordDecl *RD = cast<CXXMethodDecl>(CGF.CurFuncDecl)->getParent();
+ const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
+
+ llvm::Value *VTT;
+
+ uint64_t SubVTTIndex;
+
+ // If the record matches the base, this is the complete ctor/dtor
+ // variant calling the base variant in a class with virtual bases.
+ if (RD == Base) {
+ assert(!CodeGenVTables::needsVTTParameter(CGF.CurGD) &&
+ "doing no-op VTT offset in base dtor/ctor?");
+ assert(!ForVirtualBase && "Can't have same class as virtual base!");
+ SubVTTIndex = 0;
+ } else {
+ const ASTRecordLayout &Layout =
+ CGF.getContext().getASTRecordLayout(RD);
+ CharUnits BaseOffset = ForVirtualBase ?
+ Layout.getVBaseClassOffset(Base) :
+ Layout.getBaseClassOffset(Base);
+
+ SubVTTIndex =
+ CGF.CGM.getVTables().getSubVTTIndex(RD, BaseSubobject(Base, BaseOffset));
+ assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+ }
+
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
+ // A VTT parameter was passed to the constructor, use it.
+ VTT = CGF.LoadCXXVTT();
+ VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
+ } else {
+ // We're the complete constructor, so get the VTT by name.
+ VTT = CGF.CGM.getVTables().GetAddrOfVTT(RD);
+ VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
+ }
+
+ return VTT;
+}
+
+namespace {
+ /// Call the destructor for a direct base class.
+ struct CallBaseDtor : EHScopeStack::Cleanup {
+ const CXXRecordDecl *BaseClass;
+ bool BaseIsVirtual;
+ CallBaseDtor(const CXXRecordDecl *Base, bool BaseIsVirtual)
+ : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
+
+ const CXXDestructorDecl *D = BaseClass->getDestructor();
+ llvm::Value *Addr =
+ CGF.GetAddressOfDirectBaseInCompleteClass(CGF.LoadCXXThis(),
+ DerivedClass, BaseClass,
+ BaseIsVirtual);
+ CGF.EmitCXXDestructorCall(D, Dtor_Base, BaseIsVirtual, Addr);
+ }
+ };
+
+ /// A visitor which checks whether an initializer uses 'this' in a
+ /// way which requires the vtable to be properly set.
+ struct DynamicThisUseChecker : EvaluatedExprVisitor<DynamicThisUseChecker> {
+ typedef EvaluatedExprVisitor<DynamicThisUseChecker> super;
+
+ bool UsesThis;
+
+ DynamicThisUseChecker(ASTContext &C) : super(C), UsesThis(false) {}
+
+ // Black-list all explicit and implicit references to 'this'.
+ //
+ // Do we need to worry about external references to 'this' derived
+ // from arbitrary code? If so, then anything which runs arbitrary
+ // external code might potentially access the vtable.
+ void VisitCXXThisExpr(CXXThisExpr *E) { UsesThis = true; }
+ };
+}
+
+static bool BaseInitializerUsesThis(ASTContext &C, const Expr *Init) {
+ DynamicThisUseChecker Checker(C);
+ Checker.Visit(const_cast<Expr*>(Init));
+ return Checker.UsesThis;
+}
+
+static void EmitBaseInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXCtorInitializer *BaseInit,
+ CXXCtorType CtorType) {
+ assert(BaseInit->isBaseInitializer() &&
+ "Must have base initializer!");
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+
+ const Type *BaseType = BaseInit->getBaseClass();
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
+
+ bool isBaseVirtual = BaseInit->isBaseVirtual();
+
+ // The base constructor doesn't construct virtual bases.
+ if (CtorType == Ctor_Base && isBaseVirtual)
+ return;
+
+ // If the initializer for the base (other than the constructor
+ // itself) accesses 'this' in any way, we need to initialize the
+ // vtables.
+ if (BaseInitializerUsesThis(CGF.getContext(), BaseInit->getInit()))
+ CGF.InitializeVTablePointers(ClassDecl);
+
+ // We can pretend to be a complete class because it only matters for
+ // virtual bases, and we only do virtual bases for complete ctors.
+ llvm::Value *V =
+ CGF.GetAddressOfDirectBaseInCompleteClass(ThisPtr, ClassDecl,
+ BaseClassDecl,
+ isBaseVirtual);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(BaseType);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(V, Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(BaseInit->getInit(), AggSlot);
+
+ if (CGF.CGM.getLangOpts().Exceptions &&
+ !BaseClassDecl->hasTrivialDestructor())
+ CGF.EHStack.pushCleanup<CallBaseDtor>(EHCleanup, BaseClassDecl,
+ isBaseVirtual);
+}
+
+static void EmitAggMemberInitializer(CodeGenFunction &CGF,
+ LValue LHS,
+ Expr *Init,
+ llvm::Value *ArrayIndexVar,
+ QualType T,
+ ArrayRef<VarDecl *> ArrayIndexes,
+ unsigned Index) {
+ if (Index == ArrayIndexes.size()) {
+ LValue LV = LHS;
+ { // Scope for Cleanups.
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *Dest = LHS.getAddress();
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+
+ // Update the LValue.
+ LV.setAddress(Dest);
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
+ LV.setAlignment(std::min(Align, LV.getAlignment()));
+ }
+
+ if (!CGF.hasAggregateLLVMType(T)) {
+ CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
+ } else if (T->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, LV.getAddress(),
+ LV.isVolatileQualified());
+ } else {
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(Init, Slot);
+ }
+ }
+
+ // Now, outside of the initializer cleanup scope, destroy the backing array
+ // for a std::initializer_list member.
+ CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init);
+
+ return;
+ }
+
+ const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
+ assert(Array && "Array initialization without the array type?");
+ llvm::Value *IndexVar
+ = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
+ assert(IndexVar && "Array index variable not loaded");
+
+ // Initialize this index variable to zero.
+ llvm::Value* Zero
+ = llvm::Constant::getNullValue(
+ CGF.ConvertType(CGF.getContext().getSizeType()));
+ CGF.Builder.CreateStore(Zero, IndexVar);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
+
+ CGF.EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
+ // Generate: if (loop-index < number-of-elements) fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ uint64_t NumElements = Array->getSize().getZExtValue();
+ llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
+ llvm::Value *NumElementsPtr =
+ llvm::ConstantInt::get(Counter->getType(), NumElements);
+ llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
+ "isless");
+
+ // If the condition is true, execute the body.
+ CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ CGF.EmitBlock(ForBody);
+ llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
+
+ {
+ CodeGenFunction::RunCleanupsScope Cleanups(CGF);
+
+ // Inside the loop body recurse to emit the inner loop or, eventually, the
+ // constructor call.
+ EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
+ Array->getElementType(), ArrayIndexes, Index + 1);
+ }
+
+ CGF.EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+ Counter = CGF.Builder.CreateLoad(IndexVar);
+ NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
+ CGF.Builder.CreateStore(NextVal, IndexVar);
+
+ // Finally, branch back up to the condition for the next iteration.
+ CGF.EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ CGF.EmitBlock(AfterFor, true);
+}
+
+namespace {
+ struct CallMemberDtor : EHScopeStack::Cleanup {
+ llvm::Value *V;
+ CXXDestructorDecl *Dtor;
+
+ CallMemberDtor(llvm::Value *V, CXXDestructorDecl *Dtor)
+ : V(V), Dtor(Dtor) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ V);
+ }
+ };
+}
+
+static bool hasTrivialCopyOrMoveConstructor(const CXXRecordDecl *Record,
+ bool Moving) {
+ return Moving ? Record->hasTrivialMoveConstructor() :
+ Record->hasTrivialCopyConstructor();
+}
+
+static void EmitMemberInitializer(CodeGenFunction &CGF,
+ const CXXRecordDecl *ClassDecl,
+ CXXCtorInitializer *MemberInit,
+ const CXXConstructorDecl *Constructor,
+ FunctionArgList &Args) {
+ assert(MemberInit->isAnyMemberInitializer() &&
+ "Must have member initializer!");
+ assert(MemberInit->getInit() && "Must have initializer!");
+
+ // non-static data member initializers.
+ FieldDecl *Field = MemberInit->getAnyMember();
+ QualType FieldType = Field->getType();
+
+ llvm::Value *ThisPtr = CGF.LoadCXXThis();
+ LValue LHS;
+
+ // If we are initializing an anonymous union field, drill down to the field.
+ if (MemberInit->isIndirectMemberInitializer()) {
+ LHS = CGF.EmitLValueForAnonRecordField(ThisPtr,
+ MemberInit->getIndirectMember(), 0);
+ FieldType = MemberInit->getIndirectMember()->getAnonField()->getType();
+ } else {
+ LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
+ }
+
+ // Special case: if we are in a copy or move constructor, and we are copying
+ // an array of PODs or classes with trivial copy constructors, ignore the
+ // AST and perform the copy we know is equivalent.
+ // FIXME: This is hacky at best... if we had a bit more explicit information
+ // in the AST, we could generalize it more easily.
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isImplicitlyDefined() &&
+ Constructor->isCopyOrMoveConstructor()) {
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ const CXXRecordDecl *Record = BaseElementTy->getAsCXXRecordDecl();
+ if (BaseElementTy.isPODType(CGF.getContext()) ||
+ (Record && hasTrivialCopyOrMoveConstructor(Record,
+ Constructor->isMoveConstructor()))) {
+ // Find the source pointer. We knows it's the last argument because
+ // we know we're in a copy constructor.
+ unsigned SrcArgIndex = Args.size() - 1;
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
+ LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ return;
+ }
+ }
+
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (MemberInit->getNumArrayIndices())
+ ArrayIndexes = MemberInit->getArrayIndexes();
+ CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
+}
+
+void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
+ LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes) {
+ QualType FieldType = Field->getType();
+ if (!hasAggregateLLVMType(FieldType)) {
+ if (LHS.isSimple()) {
+ EmitExprAsInit(Init, Field, LHS, false);
+ } else {
+ RValue RHS = RValue::get(EmitScalarExpr(Init));
+ EmitStoreThroughLValue(RHS, LHS);
+ }
+ } else if (FieldType->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(Init, LHS.getAddress(), LHS.isVolatileQualified());
+ } else {
+ llvm::Value *ArrayIndexVar = 0;
+ if (ArrayIndexes.size()) {
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+
+ // The LHS is a pointer to the first object we'll be constructing, as
+ // a flat array.
+ QualType BaseElementTy = getContext().getBaseElementType(FieldType);
+ llvm::Type *BasePtr = ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(),
+ BasePtr);
+ LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
+
+ // Create an array index that will be used to walk over all of the
+ // objects we're constructing.
+ ArrayIndexVar = CreateTempAlloca(SizeTy, "object.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ Builder.CreateStore(Zero, ArrayIndexVar);
+
+
+ // Emit the block variables for the array indices, if any.
+ for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
+ EmitAutoVarDecl(*ArrayIndexes[I]);
+ }
+
+ EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
+ ArrayIndexes, 0);
+
+ if (!CGM.getLangOpts().Exceptions)
+ return;
+
+ // FIXME: If we have an array of classes w/ non-trivial destructors,
+ // we need to destroy in reverse order of construction along the exception
+ // path.
+ const RecordType *RT = FieldType->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD->hasTrivialDestructor())
+ EHStack.pushCleanup<CallMemberDtor>(EHCleanup, LHS.getAddress(),
+ RD->getDestructor());
+ }
+}
+
+/// Checks whether the given constructor is a valid subject for the
+/// complete-to-base constructor delegation optimization, i.e.
+/// emitting the complete constructor as a simple call to the base
+/// constructor.
+static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
+
+ // Currently we disable the optimization for classes with virtual
+ // bases because (1) the addresses of parameter variables need to be
+ // consistent across all initializers but (2) the delegate function
+ // call necessarily creates a second copy of the parameter variable.
+ //
+ // The limiting example (purely theoretical AFAIK):
+ // struct A { A(int &c) { c++; } };
+ // struct B : virtual A {
+ // B(int count) : A(count) { printf("%d\n", count); }
+ // };
+ // ...although even this example could in principle be emitted as a
+ // delegation since the address of the parameter doesn't escape.
+ if (Ctor->getParent()->getNumVBases()) {
+ // TODO: white-list trivial vbase initializers. This case wouldn't
+ // be subject to the restrictions below.
+
+ // TODO: white-list cases where:
+ // - there are no non-reference parameters to the constructor
+ // - the initializers don't access any non-reference parameters
+ // - the initializers don't take the address of non-reference
+ // parameters
+ // - etc.
+ // If we ever add any of the above cases, remember that:
+ // - function-try-blocks will always blacklist this optimization
+ // - we need to perform the constructor prologue and cleanup in
+ // EmitConstructorBody.
+
+ return false;
+ }
+
+ // We also disable the optimization for variadic functions because
+ // it's impossible to "re-pass" varargs.
+ if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
+ return false;
+
+ // FIXME: Decide if we can do a delegation of a delegating constructor.
+ if (Ctor->isDelegatingConstructor())
+ return false;
+
+ return true;
+}
+
+/// EmitConstructorBody - Emits the body of the current constructor.
+void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
+ CXXCtorType CtorType = CurGD.getCtorType();
+
+ // Before we go any further, try the complete->base constructor
+ // delegation optimization.
+ if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, Ctor->getLocEnd());
+ EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
+ return;
+ }
+
+ Stmt *Body = Ctor->getBody();
+
+ // Enter the function-try-block before the constructor prologue if
+ // applicable.
+ bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
+ if (IsTryBody)
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+
+ EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+
+ // TODO: in restricted cases, we can emit the vbase initializers of
+ // a complete ctor and then delegate to the base ctor.
+
+ // Emit the constructor prologue, i.e. the base and member
+ // initializers.
+ EmitCtorPrologue(Ctor, CtorType, Args);
+
+ // Emit the body of the statement.
+ if (IsTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+
+ // Emit any cleanup blocks associated with the member or base
+ // initializers, which includes (along the exceptional path) the
+ // destructors for those members and bases that were fully
+ // constructed.
+ PopCleanupBlocks(CleanupDepth);
+
+ if (IsTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+}
+
+/// EmitCtorPrologue - This routine generates necessary code to initialize
+/// base classes and non-static data members belonging to this constructor.
+void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
+ CXXCtorType CtorType,
+ FunctionArgList &Args) {
+ if (CD->isDelegatingConstructor())
+ return EmitDelegatingCXXConstructorCall(CD, Args);
+
+ const CXXRecordDecl *ClassDecl = CD->getParent();
+
+ SmallVector<CXXCtorInitializer *, 8> MemberInitializers;
+
+ for (CXXConstructorDecl::init_const_iterator B = CD->init_begin(),
+ E = CD->init_end();
+ B != E; ++B) {
+ CXXCtorInitializer *Member = (*B);
+
+ if (Member->isBaseInitializer()) {
+ EmitBaseInitializer(*this, ClassDecl, Member, CtorType);
+ } else {
+ assert(Member->isAnyMemberInitializer() &&
+ "Delegating initializer on non-delegating constructor");
+ MemberInitializers.push_back(Member);
+ }
+ }
+
+ InitializeVTablePointers(ClassDecl);
+
+ for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I)
+ EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
+}
+
+static bool
+FieldHasTrivialDestructorBody(ASTContext &Context, const FieldDecl *Field);
+
+static bool
+HasTrivialDestructorBody(ASTContext &Context,
+ const CXXRecordDecl *BaseClassDecl,
+ const CXXRecordDecl *MostDerivedClassDecl)
+{
+ // If the destructor is trivial we don't have to check anything else.
+ if (BaseClassDecl->hasTrivialDestructor())
+ return true;
+
+ if (!BaseClassDecl->getDestructor()->hasTrivialBody())
+ return false;
+
+ // Check fields.
+ for (CXXRecordDecl::field_iterator I = BaseClassDecl->field_begin(),
+ E = BaseClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *Field = *I;
+
+ if (!FieldHasTrivialDestructorBody(Context, Field))
+ return false;
+ }
+
+ // Check non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ BaseClassDecl->bases_begin(), E = BaseClassDecl->bases_end();
+ I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *NonVirtualBase =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ if (!HasTrivialDestructorBody(Context, NonVirtualBase,
+ MostDerivedClassDecl))
+ return false;
+ }
+
+ if (BaseClassDecl == MostDerivedClassDecl) {
+ // Check virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ BaseClassDecl->vbases_begin(), E = BaseClassDecl->vbases_end();
+ I != E; ++I) {
+ const CXXRecordDecl *VirtualBase =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+ if (!HasTrivialDestructorBody(Context, VirtualBase,
+ MostDerivedClassDecl))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+FieldHasTrivialDestructorBody(ASTContext &Context,
+ const FieldDecl *Field)
+{
+ QualType FieldBaseElementType = Context.getBaseElementType(Field->getType());
+
+ const RecordType *RT = FieldBaseElementType->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ return HasTrivialDestructorBody(Context, FieldClassDecl, FieldClassDecl);
+}
+
+/// CanSkipVTablePointerInitialization - Check whether we need to initialize
+/// any vtable pointers before calling this destructor.
+static bool CanSkipVTablePointerInitialization(ASTContext &Context,
+ const CXXDestructorDecl *Dtor) {
+ if (!Dtor->hasTrivialBody())
+ return false;
+
+ // Check the fields.
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *Field = *I;
+
+ if (!FieldHasTrivialDestructorBody(Context, Field))
+ return false;
+ }
+
+ return true;
+}
+
+/// EmitDestructorBody - Emits the body of the current destructor.
+void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
+ CXXDtorType DtorType = CurGD.getDtorType();
+
+ // The call to operator delete in a deleting destructor happens
+ // outside of the function-try-block, which means it's always
+ // possible to delegate the destructor body to the complete
+ // destructor. Do so.
+ if (DtorType == Dtor_Deleting) {
+ EnterDtorCleanups(Dtor, Dtor_Deleting);
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ PopCleanupBlock();
+ return;
+ }
+
+ Stmt *Body = Dtor->getBody();
+
+ // If the body is a function-try-block, enter the try before
+ // anything else.
+ bool isTryBody = (Body && isa<CXXTryStmt>(Body));
+ if (isTryBody)
+ EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+
+ // Enter the epilogue cleanups.
+ RunCleanupsScope DtorEpilogue(*this);
+
+ // If this is the complete variant, just invoke the base variant;
+ // the epilogue will destruct the virtual bases. But we can't do
+ // this optimization if the body is a function-try-block, because
+ // we'd introduce *two* handler blocks.
+ switch (DtorType) {
+ case Dtor_Deleting: llvm_unreachable("already handled deleting case");
+
+ case Dtor_Complete:
+ // Enter the cleanup scopes for virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Complete);
+
+ if (!isTryBody) {
+ EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
+ LoadCXXThis());
+ break;
+ }
+ // Fallthrough: act like we're in the base variant.
+
+ case Dtor_Base:
+ // Enter the cleanup scopes for fields and non-virtual bases.
+ EnterDtorCleanups(Dtor, Dtor_Base);
+
+ // Initialize the vtable pointers before entering the body.
+ if (!CanSkipVTablePointerInitialization(getContext(), Dtor))
+ InitializeVTablePointers(Dtor->getParent());
+
+ if (isTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+ else {
+ assert(Dtor->isImplicit() && "bodyless dtor not implicit");
+ // nothing to do besides what's in the epilogue
+ }
+ // -fapple-kext must inline any call to this dtor into
+ // the caller's body.
+ if (getContext().getLangOpts().AppleKext)
+ CurFn->addFnAttr(llvm::Attribute::AlwaysInline);
+ break;
+ }
+
+ // Jump out through the epilogue cleanups.
+ DtorEpilogue.ForceCleanup();
+
+ // Exit the try if applicable.
+ if (isTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+}
+
+namespace {
+ /// Call the operator delete associated with the current destructor.
+ struct CallDtorDelete : EHScopeStack::Cleanup {
+ CallDtorDelete() {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CGF.CurCodeDecl);
+ const CXXRecordDecl *ClassDecl = Dtor->getParent();
+ CGF.EmitDeleteCall(Dtor->getOperatorDelete(), CGF.LoadCXXThis(),
+ CGF.getContext().getTagDeclType(ClassDecl));
+ }
+ };
+
+ class DestroyField : public EHScopeStack::Cleanup {
+ const FieldDecl *field;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+
+ public:
+ DestroyField(const FieldDecl *field, CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : field(field), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Find the address of the field.
+ llvm::Value *thisValue = CGF.LoadCXXThis();
+ LValue LV = CGF.EmitLValueForField(thisValue, field, /*CVRQualifiers=*/0);
+ assert(LV.isSimple());
+
+ CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
+ }
+ };
+}
+
+/// EmitDtorEpilogue - Emit all code that comes at the end of class's
+/// destructor. This is to call destructors on members and base classes
+/// in reverse order of their construction.
+void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
+ CXXDtorType DtorType) {
+ assert(!DD->isTrivial() &&
+ "Should not emit dtor epilogue for trivial dtor!");
+
+ // The deleting-destructor phase just needs to call the appropriate
+ // operator delete that Sema picked up.
+ if (DtorType == Dtor_Deleting) {
+ assert(DD->getOperatorDelete() &&
+ "operator delete missing - EmitDtorEpilogue");
+ EHStack.pushCleanup<CallDtorDelete>(NormalAndEHCleanup);
+ return;
+ }
+
+ const CXXRecordDecl *ClassDecl = DD->getParent();
+
+ // Unions have no bases and do not call field destructors.
+ if (ClassDecl->isUnion())
+ return;
+
+ // The complete-destructor phase just destructs all the virtual bases.
+ if (DtorType == Dtor_Complete) {
+
+ // We push them in the forward order so that they'll be popped in
+ // the reverse order.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->vbases_begin(), E = ClassDecl->vbases_end();
+ I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ true);
+ }
+
+ return;
+ }
+
+ assert(DtorType == Dtor_Base);
+
+ // Destroy non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I =
+ ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+
+ // Ignore virtual bases.
+ if (Base.isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ }
+
+ // Destroy direct fields.
+ SmallVector<const FieldDecl *, 16> FieldDecls;
+ for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); I != E; ++I) {
+ const FieldDecl *field = *I;
+ QualType type = field->getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ // Anonymous union members do not have their destructors called.
+ const RecordType *RT = type->getAsUnionType();
+ if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ EHStack.pushCleanup<DestroyField>(cleanupKind, field,
+ getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
+ }
+}
+
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param argBegin,argEnd the arguments to evaluate and pass to the
+/// constructor
+/// \param arrayType the type of the array to initialize
+/// \param arrayBegin an arrayType*
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ const ConstantArrayType *arrayType,
+ llvm::Value *arrayBegin,
+ CallExpr::const_arg_iterator argBegin,
+ CallExpr::const_arg_iterator argEnd,
+ bool zeroInitialize) {
+ QualType elementType;
+ llvm::Value *numElements =
+ emitArrayLength(arrayType, elementType, arrayBegin);
+
+ EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
+ argBegin, argEnd, zeroInitialize);
+}
+
+/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
+/// constructor for each of several members of an array.
+///
+/// \param ctor the constructor to call for each element
+/// \param numElements the number of elements in the array;
+/// may be zero
+/// \param argBegin,argEnd the arguments to evaluate and pass to the
+/// constructor
+/// \param arrayBegin a T*, where T is the type constructed by ctor
+/// \param zeroInitialize true if each element should be
+/// zero-initialized before it is constructed
+void
+CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ llvm::Value *numElements,
+ llvm::Value *arrayBegin,
+ CallExpr::const_arg_iterator argBegin,
+ CallExpr::const_arg_iterator argEnd,
+ bool zeroInitialize) {
+
+ // It's legal for numElements to be zero. This can happen both
+ // dynamically, because x can be zero in 'new A[x]', and statically,
+ // because of GCC extensions that permit zero-length arrays. There
+ // are probably legitimate places where we could assume that this
+ // doesn't happen, but it's not clear that it's worth it.
+ llvm::BranchInst *zeroCheckBranch = 0;
+
+ // Optimize for a constant count.
+ llvm::ConstantInt *constantCount
+ = dyn_cast<llvm::ConstantInt>(numElements);
+ if (constantCount) {
+ // Just skip out if the constant count is zero.
+ if (constantCount->isZero()) return;
+
+ // Otherwise, emit the check.
+ } else {
+ llvm::BasicBlock *loopBB = createBasicBlock("new.ctorloop");
+ llvm::Value *iszero = Builder.CreateIsNull(numElements, "isempty");
+ zeroCheckBranch = Builder.CreateCondBr(iszero, loopBB, loopBB);
+ EmitBlock(loopBB);
+ }
+
+ // Find the end of the array.
+ llvm::Value *arrayEnd = Builder.CreateInBoundsGEP(arrayBegin, numElements,
+ "arrayctor.end");
+
+ // Enter the loop, setting up a phi for the current location to initialize.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = createBasicBlock("arrayctor.loop");
+ EmitBlock(loopBB);
+ llvm::PHINode *cur = Builder.CreatePHI(arrayBegin->getType(), 2,
+ "arrayctor.cur");
+ cur->addIncoming(arrayBegin, entryBB);
+
+ // Inside the loop body, emit the constructor call on the array element.
+
+ QualType type = getContext().getTypeDeclType(ctor->getParent());
+
+ // Zero initialize the storage, if requested.
+ if (zeroInitialize)
+ EmitNullInitialization(cur, type);
+
+ // C++ [class.temporary]p4:
+ // There are two contexts in which temporaries are destroyed at a different
+ // point than the end of the full-expression. The first context is when a
+ // default constructor is called to initialize an element of an array.
+ // If the constructor has one or more default arguments, the destruction of
+ // every temporary created in a default argument expression is sequenced
+ // before the construction of the next array element, if any.
+
+ {
+ RunCleanupsScope Scope(*this);
+
+ // Evaluate the constructor and its arguments in a regular
+ // partial-destroy cleanup.
+ if (getLangOpts().Exceptions &&
+ !ctor->getParent()->hasTrivialDestructor()) {
+ Destroyer *destroyer = destroyCXXObject;
+ pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
+ }
+
+ EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
+ cur, argBegin, argEnd);
+ }
+
+ // Go to the next element.
+ llvm::Value *next =
+ Builder.CreateInBoundsGEP(cur, llvm::ConstantInt::get(SizeTy, 1),
+ "arrayctor.next");
+ cur->addIncoming(next, Builder.GetInsertBlock());
+
+ // Check whether that's the end of the loop.
+ llvm::Value *done = Builder.CreateICmpEQ(next, arrayEnd, "arrayctor.done");
+ llvm::BasicBlock *contBB = createBasicBlock("arrayctor.cont");
+ Builder.CreateCondBr(done, contBB, loopBB);
+
+ // Patch the earlier check to skip over the loop.
+ if (zeroCheckBranch) zeroCheckBranch->setSuccessor(0, contBB);
+
+ EmitBlock(contBB);
+}
+
+void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ const RecordType *rtype = type->castAs<RecordType>();
+ const CXXRecordDecl *record = cast<CXXRecordDecl>(rtype->getDecl());
+ const CXXDestructorDecl *dtor = record->getDestructor();
+ assert(!dtor->isTrivial());
+ CGF.EmitCXXDestructorCall(dtor, Dtor_Complete, /*for vbase*/ false,
+ addr);
+}
+
+void
+CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type, bool ForVirtualBase,
+ llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI && CGM.getCodeGenOpts().LimitDebugInfo) {
+ // If debug info for this class has not been emitted then this is the
+ // right time to do so.
+ const CXXRecordDecl *Parent = D->getParent();
+ DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent),
+ Parent->getLocation());
+ }
+
+ if (D->isTrivial()) {
+ if (ArgBeg == ArgEnd) {
+ // Trivial default constructor, no codegen required.
+ assert(D->isDefaultConstructor() &&
+ "trivial 0-arg ctor not a default ctor");
+ return;
+ }
+
+ assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(D->isCopyOrMoveConstructor() &&
+ "trivial 1-arg ctor not a copy/move ctor");
+
+ const Expr *E = (*ArgBeg);
+ QualType Ty = E->getType();
+ llvm::Value *Src = EmitLValue(E).getAddress();
+ EmitAggregateCopy(This, Src, Ty);
+ return;
+ }
+
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(D, Type), ForVirtualBase);
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+
+ EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
+}
+
+void
+CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ llvm::Value *This, llvm::Value *Src,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ if (D->isTrivial()) {
+ assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(D->isCopyOrMoveConstructor() &&
+ "trivial 1-arg ctor not a copy/move ctor");
+ EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
+ return;
+ }
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D,
+ clang::Ctor_Complete);
+ assert(D->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.add(RValue::get(This), D->getThisType(getContext()));
+
+
+ // Push the src ptr.
+ QualType QT = *(FPT->arg_type_begin());
+ llvm::Type *t = CGM.getTypes().ConvertType(QT);
+ Src = Builder.CreateBitCast(Src, t);
+ Args.add(RValue::get(Src), QT);
+
+ // Skip over first argument (Src).
+ ++ArgBeg;
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+ for (FunctionProtoType::arg_type_iterator I = FPT->arg_type_begin()+1,
+ E = FPT->arg_type_end(); I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ EmitCallArg(Args, *Arg, *I);
+ }
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || FPT->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg) {
+ QualType ArgType = Arg->getType();
+ EmitCallArg(Args, *Arg, ArgType);
+ }
+
+ EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
+ ReturnValueSlot(), Args, D);
+}
+
+void
+CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args) {
+ CallArgList DelegateArgs;
+
+ FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ assert(I != E && "no parameters to constructor");
+
+ // this
+ DelegateArgs.add(RValue::get(LoadCXXThis()), (*I)->getType());
+ ++I;
+
+ // vtt
+ if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType),
+ /*ForVirtualBase=*/false)) {
+ QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
+ DelegateArgs.add(RValue::get(VTT), VoidPP);
+
+ if (CodeGenVTables::needsVTTParameter(CurGD)) {
+ assert(I != E && "cannot skip vtt parameter, already done with args");
+ assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
+ ++I;
+ }
+ }
+
+ // Explicit arguments.
+ for (; I != E; ++I) {
+ const VarDecl *param = *I;
+ EmitDelegateCallArg(DelegateArgs, param);
+ }
+
+ EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
+ CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
+ ReturnValueSlot(), DelegateArgs, Ctor);
+}
+
+namespace {
+ struct CallDelegatingCtorDtor : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *Addr;
+ CXXDtorType Type;
+
+ CallDelegatingCtorDtor(const CXXDestructorDecl *D, llvm::Value *Addr,
+ CXXDtorType Type)
+ : Dtor(D), Addr(Addr), Type(Type) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCXXDestructorCall(Dtor, Type, /*ForVirtualBase=*/false,
+ Addr);
+ }
+ };
+}
+
+void
+CodeGenFunction::EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ const FunctionArgList &Args) {
+ assert(Ctor->isDelegatingConstructor());
+
+ llvm::Value *ThisPtr = LoadCXXThis();
+
+ QualType Ty = getContext().getTagDeclType(Ctor->getParent());
+ CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
+ AggValueSlot AggSlot =
+ AggValueSlot::forAddr(ThisPtr, Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ EmitAggExpr(Ctor->init_begin()[0]->getInit(), AggSlot);
+
+ const CXXRecordDecl *ClassDecl = Ctor->getParent();
+ if (CGM.getLangOpts().Exceptions && !ClassDecl->hasTrivialDestructor()) {
+ CXXDtorType Type =
+ CurGD.getCtorType() == Ctor_Complete ? Dtor_Complete : Dtor_Base;
+
+ EHStack.pushCleanup<CallDelegatingCtorDtor>(EHCleanup,
+ ClassDecl->getDestructor(),
+ ThisPtr, Type);
+ }
+}
+
+void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ bool ForVirtualBase,
+ llvm::Value *This) {
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(DD, Type),
+ ForVirtualBase);
+ llvm::Value *Callee = 0;
+ if (getContext().getLangOpts().AppleKext)
+ Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
+ DD->getParent());
+
+ if (!Callee)
+ Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
+
+ EmitCXXMemberCall(DD, Callee, ReturnValueSlot(), This, VTT, 0, 0);
+}
+
+namespace {
+ struct CallLocalDtor : EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *Addr;
+
+ CallLocalDtor(const CXXDestructorDecl *D, llvm::Value *Addr)
+ : Dtor(D), Addr(Addr) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Addr);
+ }
+ };
+}
+
+void CodeGenFunction::PushDestructorCleanup(const CXXDestructorDecl *D,
+ llvm::Value *Addr) {
+ EHStack.pushCleanup<CallLocalDtor>(NormalAndEHCleanup, D, Addr);
+}
+
+void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
+ CXXRecordDecl *ClassDecl = T->getAsCXXRecordDecl();
+ if (!ClassDecl) return;
+ if (ClassDecl->hasTrivialDestructor()) return;
+
+ const CXXDestructorDecl *D = ClassDecl->getDestructor();
+ assert(D && D->isUsed() && "destructor not marked as used!");
+ PushDestructorCleanup(D, Addr);
+}
+
+llvm::Value *
+CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) {
+ llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
+ CharUnits VBaseOffsetOffset =
+ CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
+
+ llvm::Value *VBaseOffsetPtr =
+ Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
+ "vbase.offset.ptr");
+ llvm::Type *PtrDiffTy =
+ ConvertType(getContext().getPointerDiffType());
+
+ VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
+ PtrDiffTy->getPointerTo());
+
+ llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
+
+ return VBaseOffset;
+}
+
+void
+CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Compute the address point.
+ llvm::Value *VTableAddressPoint;
+
+ // Check if we need to use a vtable from the VTT.
+ if (CodeGenVTables::needsVTTParameter(CurGD) &&
+ (RD->getNumVBases() || NearestVBase)) {
+ // Get the secondary vpointer index.
+ uint64_t VirtualPointerIndex =
+ CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
+
+ /// Load the VTT.
+ llvm::Value *VTT = LoadCXXVTT();
+ if (VirtualPointerIndex)
+ VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
+
+ // And load the address point from the VTT.
+ VTableAddressPoint = Builder.CreateLoad(VTT);
+ } else {
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
+ VTableAddressPoint =
+ Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
+ }
+
+ // Compute where to store the address point.
+ llvm::Value *VirtualOffset = 0;
+ CharUnits NonVirtualOffset = CharUnits::Zero();
+
+ if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
+ // We need to use the virtual base offset offset because the virtual base
+ // might have a different offset in the most derived class.
+ VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
+ NearestVBase);
+ NonVirtualOffset = OffsetFromNearestVBase;
+ } else {
+ // We can just use the base offset in the complete class.
+ NonVirtualOffset = Base.getBaseOffset();
+ }
+
+ // Apply the offsets.
+ llvm::Value *VTableField = LoadCXXThis();
+
+ if (!NonVirtualOffset.isZero() || VirtualOffset)
+ VTableField = ApplyNonVirtualAndVirtualOffset(*this, VTableField,
+ NonVirtualOffset,
+ VirtualOffset);
+
+ // Finally, store the address point.
+ llvm::Type *AddressPointPtrTy =
+ VTableAddressPoint->getType()->getPointerTo();
+ VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
+ llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
+ CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
+}
+
+void
+CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases) {
+ // If this base is a non-virtual primary base the address point has already
+ // been set.
+ if (!BaseIsNonVirtualPrimaryBase) {
+ // Initialize the vtable pointer for this base.
+ InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
+ VTable, VTableClass);
+ }
+
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Traverse bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore classes without a vtable.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ CharUnits BaseOffset;
+ CharUnits BaseOffsetFromNearestVBase;
+ bool BaseDeclIsNonVirtualPrimaryBase;
+
+ if (I->isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(VTableClass);
+
+ BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase = CharUnits::Zero();
+ BaseDeclIsNonVirtualPrimaryBase = false;
+ } else {
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseOffsetFromNearestVBase =
+ OffsetFromNearestVBase + Layout.getBaseClassOffset(BaseDecl);
+ BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
+ }
+
+ InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
+ I->isVirtual() ? BaseDecl : NearestVBase,
+ BaseOffsetFromNearestVBase,
+ BaseDeclIsNonVirtualPrimaryBase,
+ VTable, VTableClass, VBases);
+ }
+}
+
+void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
+ // Ignore classes without a vtable.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Get the VTable.
+ llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
+
+ // Initialize the vtable pointers for this class and all of its bases.
+ VisitedVirtualBasesSetTy VBases;
+ InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
+ /*NearestVBase=*/0,
+ /*OffsetFromNearestVBase=*/CharUnits::Zero(),
+ /*BaseIsNonVirtualPrimaryBase=*/false,
+ VTable, RD, VBases);
+}
+
+llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
+ llvm::Type *Ty) {
+ llvm::Value *VTablePtrSrc = Builder.CreateBitCast(This, Ty->getPointerTo());
+ llvm::Instruction *VTable = Builder.CreateLoad(VTablePtrSrc, "vtable");
+ CGM.DecorateInstruction(VTable, CGM.getTBAAInfoForVTablePtr());
+ return VTable;
+}
+
+static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
+ const Expr *E = Base;
+
+ while (true) {
+ E = E->IgnoreParens();
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
+}
+
+// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
+// quite what we want.
+static const Expr *skipNoOpCastsAndParens(const Expr *E) {
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Extension) {
+ E = UO->getSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+/// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member
+/// function call on the given expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
+ const CXXMethodDecl *MD) {
+ // If the most derived class is marked final, we know that no subclass can
+ // override this member function and so we can devirtualize it. For example:
+ //
+ // struct A { virtual void f(); }
+ // struct B final : A { };
+ //
+ // void f(B *b) {
+ // b->f();
+ // }
+ //
+ const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ if (MostDerivedClassDecl->hasAttr<FinalAttr>())
+ return true;
+
+ // If the member function is marked 'final', we know that it can't be
+ // overridden and can therefore devirtualize it.
+ if (MD->hasAttr<FinalAttr>())
+ return true;
+
+ // Similarly, if the class itself is marked 'final' it can't be overridden
+ // and we can therefore devirtualize the member function call.
+ if (MD->getParent()->hasAttr<FinalAttr>())
+ return true;
+
+ Base = skipNoOpCastsAndParens(Base);
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXConstructExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+static bool UseVirtualCall(ASTContext &Context,
+ const CXXOperatorCallExpr *CE,
+ const CXXMethodDecl *MD) {
+ if (!MD->isVirtual())
+ return false;
+
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (Context.getLangOpts().AppleKext)
+ return true;
+
+ return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
+}
+
+llvm::Value *
+CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ llvm::Value *This) {
+ llvm::FunctionType *fnType =
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodDeclaration(MD));
+
+ if (UseVirtualCall(getContext(), E, MD))
+ return BuildVirtualCall(MD, This, fnType);
+
+ return CGM.GetAddrOfFunction(MD, fnType);
+}
+
+void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs) {
+ // Lookup the call operator
+ DeclarationName Name
+ = getContext().DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclContext::lookup_const_result Calls = Lambda->lookup(Name);
+ CXXMethodDecl *CallOperator = cast<CXXMethodDecl>(*Calls.first++);
+ const FunctionProtoType *FPT =
+ CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+
+ // Get the address of the call operator.
+ GlobalDecl GD(CallOperator);
+ const CGFunctionInfo &CalleeFnInfo =
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(CalleeFnInfo);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(CalleeFnInfo, Callee, Slot, CallArgs, CallOperator);
+
+ // Forward the returned value
+ if (!ResultType->isVoidType() && Slot.isNull())
+ EmitReturnOfRValue(RV, ResultType);
+}
+
+void CodeGenFunction::EmitLambdaBlockInvokeBody() {
+ const BlockDecl *BD = BlockInfo->getBlockDecl();
+ const VarDecl *variable = BD->capture_begin()->getVariable();
+ const CXXRecordDecl *Lambda = variable->getType()->getAsCXXRecordDecl();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = GetAddrOfBlockDecl(variable, false);
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (BlockDecl::param_const_iterator I = BD->param_begin(),
+ E = BD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
+ if (cast<CXXMethodDecl>(CurFuncDecl)->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(CurFuncDecl, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitFunctionBody(Args);
+}
+
+void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
+ const CXXRecordDecl *Lambda = MD->getParent();
+
+ // Start building arguments for forwarding call
+ CallArgList CallArgs;
+
+ QualType ThisType = getContext().getPointerType(getContext().getRecordType(Lambda));
+ llvm::Value *ThisPtr = llvm::UndefValue::get(getTypes().ConvertType(ThisType));
+ CallArgs.add(RValue::get(ThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ EmitForwardingCallToLambda(Lambda, CallArgs);
+}
+
+void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
+ if (MD->isVariadic()) {
+ // FIXME: Making this work correctly is nasty because it requires either
+ // cloning the body of the call operator or making the call operator forward.
+ CGM.ErrorUnsupported(MD, "lambda conversion to variadic function");
+ return;
+ }
+
+ EmitLambdaDelegatingInvokeBody(MD);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
new file mode 100644
index 0000000..b00e2a2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.cpp
@@ -0,0 +1,1103 @@
+//===--- CGCleanup.cpp - Bookkeeping and code emission for cleanups -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains code dealing with the IR generation for cleanups
+// and related information.
+//
+// A "cleanup" is a piece of code which needs to be executed whenever
+// control transfers out of a particular scope. This can be
+// conditionalized to occur only on exceptional control flow, only on
+// normal control flow, or both.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+bool DominatingValue<RValue>::saved_type::needsSaving(RValue rv) {
+ if (rv.isScalar())
+ return DominatingLLVMValue::needsSaving(rv.getScalarVal());
+ if (rv.isAggregate())
+ return DominatingLLVMValue::needsSaving(rv.getAggregateAddr());
+ return true;
+}
+
+DominatingValue<RValue>::saved_type
+DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
+ if (rv.isScalar()) {
+ llvm::Value *V = rv.getScalarVal();
+
+ // These automatically dominate and don't need to be saved.
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, ScalarLiteral);
+
+ // Everything else needs an alloca.
+ llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr, ScalarAddress);
+ }
+
+ if (rv.isComplex()) {
+ CodeGenFunction::ComplexPairTy V = rv.getComplexVal();
+ llvm::Type *ComplexTy =
+ llvm::StructType::get(V.first->getType(), V.second->getType(),
+ (void*) 0);
+ llvm::Value *addr = CGF.CreateTempAlloca(ComplexTy, "saved-complex");
+ CGF.StoreComplexToAddr(V, addr, /*volatile*/ false);
+ return saved_type(addr, ComplexAddress);
+ }
+
+ assert(rv.isAggregate());
+ llvm::Value *V = rv.getAggregateAddr(); // TODO: volatile?
+ if (!DominatingLLVMValue::needsSaving(V))
+ return saved_type(V, AggregateLiteral);
+
+ llvm::Value *addr = CGF.CreateTempAlloca(V->getType(), "saved-rvalue");
+ CGF.Builder.CreateStore(V, addr);
+ return saved_type(addr, AggregateAddress);
+}
+
+/// Given a saved r-value produced by SaveRValue, perform the code
+/// necessary to restore it to usability at the current insertion
+/// point.
+RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
+ switch (K) {
+ case ScalarLiteral:
+ return RValue::get(Value);
+ case ScalarAddress:
+ return RValue::get(CGF.Builder.CreateLoad(Value));
+ case AggregateLiteral:
+ return RValue::getAggregate(Value);
+ case AggregateAddress:
+ return RValue::getAggregate(CGF.Builder.CreateLoad(Value));
+ case ComplexAddress:
+ return RValue::getComplex(CGF.LoadComplexFromAddr(Value, false));
+ }
+
+ llvm_unreachable("bad saved r-value kind");
+}
+
+/// Push an entry of the given size onto this protected-scope stack.
+char *EHScopeStack::allocate(size_t Size) {
+ if (!StartOfBuffer) {
+ unsigned Capacity = 1024;
+ while (Capacity < Size) Capacity *= 2;
+ StartOfBuffer = new char[Capacity];
+ StartOfData = EndOfBuffer = StartOfBuffer + Capacity;
+ } else if (static_cast<size_t>(StartOfData - StartOfBuffer) < Size) {
+ unsigned CurrentCapacity = EndOfBuffer - StartOfBuffer;
+ unsigned UsedCapacity = CurrentCapacity - (StartOfData - StartOfBuffer);
+
+ unsigned NewCapacity = CurrentCapacity;
+ do {
+ NewCapacity *= 2;
+ } while (NewCapacity < UsedCapacity + Size);
+
+ char *NewStartOfBuffer = new char[NewCapacity];
+ char *NewEndOfBuffer = NewStartOfBuffer + NewCapacity;
+ char *NewStartOfData = NewEndOfBuffer - UsedCapacity;
+ memcpy(NewStartOfData, StartOfData, UsedCapacity);
+ delete [] StartOfBuffer;
+ StartOfBuffer = NewStartOfBuffer;
+ EndOfBuffer = NewEndOfBuffer;
+ StartOfData = NewStartOfData;
+ }
+
+ assert(StartOfBuffer + Size <= StartOfData);
+ StartOfData -= Size;
+ return StartOfData;
+}
+
+EHScopeStack::stable_iterator
+EHScopeStack::getInnermostActiveNormalCleanup() const {
+ for (stable_iterator si = getInnermostNormalCleanup(), se = stable_end();
+ si != se; ) {
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*find(si));
+ if (cleanup.isActive()) return si;
+ si = cleanup.getEnclosingNormalCleanup();
+ }
+ return stable_end();
+}
+
+EHScopeStack::stable_iterator EHScopeStack::getInnermostActiveEHScope() const {
+ for (stable_iterator si = getInnermostEHScope(), se = stable_end();
+ si != se; ) {
+ // Skip over inactive cleanups.
+ EHCleanupScope *cleanup = dyn_cast<EHCleanupScope>(&*find(si));
+ if (cleanup && !cleanup->isActive()) {
+ si = cleanup->getEnclosingEHScope();
+ continue;
+ }
+
+ // All other scopes are always active.
+ return si;
+ }
+
+ return stable_end();
+}
+
+
+void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
+ assert(((Size % sizeof(void*)) == 0) && "cleanup type is misaligned");
+ char *Buffer = allocate(EHCleanupScope::getSizeForCleanupSize(Size));
+ bool IsNormalCleanup = Kind & NormalCleanup;
+ bool IsEHCleanup = Kind & EHCleanup;
+ bool IsActive = !(Kind & InactiveCleanup);
+ EHCleanupScope *Scope =
+ new (Buffer) EHCleanupScope(IsNormalCleanup,
+ IsEHCleanup,
+ IsActive,
+ Size,
+ BranchFixups.size(),
+ InnermostNormalCleanup,
+ InnermostEHScope);
+ if (IsNormalCleanup)
+ InnermostNormalCleanup = stable_begin();
+ if (IsEHCleanup)
+ InnermostEHScope = stable_begin();
+
+ return Scope->getCleanupBuffer();
+}
+
+void EHScopeStack::popCleanup() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ assert(isa<EHCleanupScope>(*begin()));
+ EHCleanupScope &Cleanup = cast<EHCleanupScope>(*begin());
+ InnermostNormalCleanup = Cleanup.getEnclosingNormalCleanup();
+ InnermostEHScope = Cleanup.getEnclosingEHScope();
+ StartOfData += Cleanup.getAllocatedSize();
+
+ // Destroy the cleanup.
+ Cleanup.~EHCleanupScope();
+
+ // Check whether we can shrink the branch-fixups stack.
+ if (!BranchFixups.empty()) {
+ // If we no longer have any normal cleanups, all the fixups are
+ // complete.
+ if (!hasNormalCleanups())
+ BranchFixups.clear();
+
+ // Otherwise we can still trim out unnecessary nulls.
+ else
+ popNullFixups();
+ }
+}
+
+EHFilterScope *EHScopeStack::pushFilter(unsigned numFilters) {
+ assert(getInnermostEHScope() == stable_end());
+ char *buffer = allocate(EHFilterScope::getSizeForNumFilters(numFilters));
+ EHFilterScope *filter = new (buffer) EHFilterScope(numFilters);
+ InnermostEHScope = stable_begin();
+ return filter;
+}
+
+void EHScopeStack::popFilter() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHFilterScope &filter = cast<EHFilterScope>(*begin());
+ StartOfData += EHFilterScope::getSizeForNumFilters(filter.getNumFilters());
+
+ InnermostEHScope = filter.getEnclosingEHScope();
+}
+
+EHCatchScope *EHScopeStack::pushCatch(unsigned numHandlers) {
+ char *buffer = allocate(EHCatchScope::getSizeForNumHandlers(numHandlers));
+ EHCatchScope *scope =
+ new (buffer) EHCatchScope(numHandlers, InnermostEHScope);
+ InnermostEHScope = stable_begin();
+ return scope;
+}
+
+void EHScopeStack::pushTerminate() {
+ char *Buffer = allocate(EHTerminateScope::getSize());
+ new (Buffer) EHTerminateScope(InnermostEHScope);
+ InnermostEHScope = stable_begin();
+}
+
+/// Remove any 'null' fixups on the stack. However, we can't pop more
+/// fixups than the fixup depth on the innermost normal cleanup, or
+/// else fixups that we try to add to that cleanup will end up in the
+/// wrong place. We *could* try to shrink fixup depths, but that's
+/// actually a lot of work for little benefit.
+void EHScopeStack::popNullFixups() {
+ // We expect this to only be called when there's still an innermost
+ // normal cleanup; otherwise there really shouldn't be any fixups.
+ assert(hasNormalCleanups());
+
+ EHScopeStack::iterator it = find(InnermostNormalCleanup);
+ unsigned MinSize = cast<EHCleanupScope>(*it).getFixupDepth();
+ assert(BranchFixups.size() >= MinSize && "fixup stack out of order");
+
+ while (BranchFixups.size() > MinSize &&
+ BranchFixups.back().Destination == 0)
+ BranchFixups.pop_back();
+}
+
+void CodeGenFunction::initFullExprCleanup() {
+ // Create a variable to decide whether the cleanup needs to be run.
+ llvm::AllocaInst *active
+ = CreateTempAlloca(Builder.getInt1Ty(), "cleanup.cond");
+
+ // Initialize it to false at a site that's guaranteed to be run
+ // before each evaluation.
+ setBeforeOutermostConditional(Builder.getFalse(), active);
+
+ // Initialize it to true at the current location.
+ Builder.CreateStore(Builder.getTrue(), active);
+
+ // Set that as the active flag in the cleanup.
+ EHCleanupScope &cleanup = cast<EHCleanupScope>(*EHStack.begin());
+ assert(cleanup.getActiveFlag() == 0 && "cleanup already has active flag?");
+ cleanup.setActiveFlag(active);
+
+ if (cleanup.isNormalCleanup()) cleanup.setTestFlagInNormalCleanup();
+ if (cleanup.isEHCleanup()) cleanup.setTestFlagInEHCleanup();
+}
+
+void EHScopeStack::Cleanup::anchor() {}
+
+/// All the branch fixups on the EH stack have propagated out past the
+/// outermost normal cleanup; resolve them all by adding cases to the
+/// given switch instruction.
+static void ResolveAllBranchFixups(CodeGenFunction &CGF,
+ llvm::SwitchInst *Switch,
+ llvm::BasicBlock *CleanupEntry) {
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> CasesAdded;
+
+ for (unsigned I = 0, E = CGF.EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination isn't set.
+ BranchFixup &Fixup = CGF.EHStack.getBranchFixup(I);
+ if (Fixup.Destination == 0) continue;
+
+ // If there isn't an OptimisticBranchBlock, then InitialBranch is
+ // still pointing directly to its destination; forward it to the
+ // appropriate cleanup entry. This is required in the specific
+ // case of
+ // { std::string s; goto lbl; }
+ // lbl:
+ // i.e. where there's an unresolved fixup inside a single cleanup
+ // entry which we're currently popping.
+ if (Fixup.OptimisticBranchBlock == 0) {
+ new llvm::StoreInst(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ CGF.getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, CleanupEntry);
+ }
+
+ // Don't add this case to the switch statement twice.
+ if (!CasesAdded.insert(Fixup.Destination)) continue;
+
+ Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
+ Fixup.Destination);
+ }
+
+ CGF.EHStack.clearFixups();
+}
+
+/// Transitions the terminator of the given exit-block of a cleanup to
+/// be a cleanup switch.
+static llvm::SwitchInst *TransitionToCleanupSwitch(CodeGenFunction &CGF,
+ llvm::BasicBlock *Block) {
+ // If it's a branch, turn it into a switch whose default
+ // destination is its original target.
+ llvm::TerminatorInst *Term = Block->getTerminator();
+ assert(Term && "can't transition block without terminator");
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional());
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(CGF.getNormalCleanupDestSlot(), "cleanup.dest", Term);
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Br->getSuccessor(0), 4, Block);
+ Br->eraseFromParent();
+ return Switch;
+ } else {
+ return cast<llvm::SwitchInst>(Term);
+ }
+}
+
+void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
+ assert(Block && "resolving a null target block");
+ if (!EHStack.getNumBranchFixups()) return;
+
+ assert(EHStack.hasNormalCleanups() &&
+ "branch fixups exist with no normal cleanups on stack");
+
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> ModifiedOptimisticBlocks;
+ bool ResolvedAny = false;
+
+ for (unsigned I = 0, E = EHStack.getNumBranchFixups(); I != E; ++I) {
+ // Skip this fixup if its destination doesn't match.
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (Fixup.Destination != Block) continue;
+
+ Fixup.Destination = 0;
+ ResolvedAny = true;
+
+ // If it doesn't have an optimistic branch block, LatestBranch is
+ // already pointing to the right place.
+ llvm::BasicBlock *BranchBB = Fixup.OptimisticBranchBlock;
+ if (!BranchBB)
+ continue;
+
+ // Don't process the same optimistic branch block twice.
+ if (!ModifiedOptimisticBlocks.insert(BranchBB))
+ continue;
+
+ llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
+
+ // Add a case to the switch.
+ Switch->addCase(Builder.getInt32(Fixup.DestinationIndex), Block);
+ }
+
+ if (ResolvedAny)
+ EHStack.popNullFixups();
+}
+
+/// Pops cleanup blocks until the given savepoint is reached.
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+ assert(Old.isValid());
+
+ while (EHStack.stable_begin() != Old) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+
+ // As long as Old strictly encloses the scope's enclosing normal
+ // cleanup, we're going to emit another normal cleanup which
+ // fallthrough can propagate through.
+ bool FallThroughIsBranchThrough =
+ Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
+
+ PopCleanupBlock(FallThroughIsBranchThrough);
+ }
+}
+
+static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &Scope) {
+ assert(Scope.isNormalCleanup());
+ llvm::BasicBlock *Entry = Scope.getNormalBlock();
+ if (!Entry) {
+ Entry = CGF.createBasicBlock("cleanup");
+ Scope.setNormalBlock(Entry);
+ }
+ return Entry;
+}
+
+/// Attempts to reduce a cleanup's entry block to a fallthrough. This
+/// is basically llvm::MergeBlockIntoPredecessor, except
+/// simplified/optimized for the tighter constraints on cleanup blocks.
+///
+/// Returns the new block, whatever it is.
+static llvm::BasicBlock *SimplifyCleanupEntry(CodeGenFunction &CGF,
+ llvm::BasicBlock *Entry) {
+ llvm::BasicBlock *Pred = Entry->getSinglePredecessor();
+ if (!Pred) return Entry;
+
+ llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Pred->getTerminator());
+ if (!Br || Br->isConditional()) return Entry;
+ assert(Br->getSuccessor(0) == Entry);
+
+ // If we were previously inserting at the end of the cleanup entry
+ // block, we'll need to continue inserting at the end of the
+ // predecessor.
+ bool WasInsertBlock = CGF.Builder.GetInsertBlock() == Entry;
+ assert(!WasInsertBlock || CGF.Builder.GetInsertPoint() == Entry->end());
+
+ // Kill the branch.
+ Br->eraseFromParent();
+
+ // Replace all uses of the entry with the predecessor, in case there
+ // are phis in the cleanup.
+ Entry->replaceAllUsesWith(Pred);
+
+ // Merge the blocks.
+ Pred->getInstList().splice(Pred->end(), Entry->getInstList());
+
+ // Kill the entry block.
+ Entry->eraseFromParent();
+
+ if (WasInsertBlock)
+ CGF.Builder.SetInsertPoint(Pred);
+
+ return Pred;
+}
+
+static void EmitCleanup(CodeGenFunction &CGF,
+ EHScopeStack::Cleanup *Fn,
+ EHScopeStack::Cleanup::Flags flags,
+ llvm::Value *ActiveFlag) {
+ // EH cleanups always occur within a terminate scope.
+ if (flags.isForEHCleanup()) CGF.EHStack.pushTerminate();
+
+ // If there's an active flag, load it and skip the cleanup if it's
+ // false.
+ llvm::BasicBlock *ContBB = 0;
+ if (ActiveFlag) {
+ ContBB = CGF.createBasicBlock("cleanup.done");
+ llvm::BasicBlock *CleanupBB = CGF.createBasicBlock("cleanup.action");
+ llvm::Value *IsActive
+ = CGF.Builder.CreateLoad(ActiveFlag, "cleanup.is_active");
+ CGF.Builder.CreateCondBr(IsActive, CleanupBB, ContBB);
+ CGF.EmitBlock(CleanupBB);
+ }
+
+ // Ask the cleanup to emit itself.
+ Fn->Emit(CGF, flags);
+ assert(CGF.HaveInsertPoint() && "cleanup ended with no insertion point?");
+
+ // Emit the continuation block if there was an active flag.
+ if (ActiveFlag)
+ CGF.EmitBlock(ContBB);
+
+ // Leave the terminate scope.
+ if (flags.isForEHCleanup()) CGF.EHStack.popTerminate();
+}
+
+static void ForwardPrebranchedFallthrough(llvm::BasicBlock *Exit,
+ llvm::BasicBlock *From,
+ llvm::BasicBlock *To) {
+ // Exit is the exit block of a cleanup, so it always terminates in
+ // an unconditional branch or a switch.
+ llvm::TerminatorInst *Term = Exit->getTerminator();
+
+ if (llvm::BranchInst *Br = dyn_cast<llvm::BranchInst>(Term)) {
+ assert(Br->isUnconditional() && Br->getSuccessor(0) == From);
+ Br->setSuccessor(0, To);
+ } else {
+ llvm::SwitchInst *Switch = cast<llvm::SwitchInst>(Term);
+ for (unsigned I = 0, E = Switch->getNumSuccessors(); I != E; ++I)
+ if (Switch->getSuccessor(I) == From)
+ Switch->setSuccessor(I, To);
+ }
+}
+
+/// We don't need a normal entry block for the given cleanup.
+/// Optimistic fixup branches can cause these blocks to come into
+/// existence anyway; if so, destroy it.
+///
+/// The validity of this transformation is very much specific to the
+/// exact ways in which we form branches to cleanup entries.
+static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
+ EHCleanupScope &scope) {
+ llvm::BasicBlock *entry = scope.getNormalBlock();
+ if (!entry) return;
+
+ // Replace all the uses with unreachable.
+ llvm::BasicBlock *unreachableBB = CGF.getUnreachableBlock();
+ for (llvm::BasicBlock::use_iterator
+ i = entry->use_begin(), e = entry->use_end(); i != e; ) {
+ llvm::Use &use = i.getUse();
+ ++i;
+
+ use.set(unreachableBB);
+
+ // The only uses should be fixup switches.
+ llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
+ if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
+ // Replace the switch with a branch.
+ llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
+
+ // The switch operand is a load from the cleanup-dest alloca.
+ llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
+
+ // Destroy the switch.
+ si->eraseFromParent();
+
+ // Destroy the load.
+ assert(condition->getOperand(0) == CGF.NormalCleanupDest);
+ assert(condition->use_empty());
+ condition->eraseFromParent();
+ }
+ }
+
+ assert(entry->use_empty());
+ delete entry;
+}
+
+/// Pops a cleanup block. If the block includes a normal cleanup, the
+/// current insertion point is threaded through the cleanup, as are
+/// any branch fixups on the cleanup.
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
+ assert(!EHStack.empty() && "cleanup stack is empty!");
+ assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());
+
+ // Remember activation information.
+ bool IsActive = Scope.isActive();
+ llvm::Value *NormalActiveFlag =
+ Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : 0;
+ llvm::Value *EHActiveFlag =
+ Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : 0;
+
+ // Check whether we need an EH cleanup. This is only true if we've
+ // generated a lazy EH cleanup block.
+ llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
+ assert(Scope.hasEHBranches() == (EHEntry != 0));
+ bool RequiresEHCleanup = (EHEntry != 0);
+ EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();
+
+ // Check the three conditions which might require a normal cleanup:
+
+ // - whether there are branch fix-ups through this cleanup
+ unsigned FixupDepth = Scope.getFixupDepth();
+ bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;
+
+ // - whether there are branch-throughs or branch-afters
+ bool HasExistingBranches = Scope.hasBranches();
+
+ // - whether there's a fallthrough
+ llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
+ bool HasFallthrough = (FallthroughSource != 0 && IsActive);
+
+ // Branch-through fall-throughs leave the insertion point set to the
+ // end of the last cleanup, which points to the current scope. The
+ // rest of IR gen doesn't need to worry about this; it only happens
+ // during the execution of PopCleanupBlocks().
+ bool HasPrebranchedFallthrough =
+ (FallthroughSource && FallthroughSource->getTerminator());
+
+ // If this is a normal cleanup, then having a prebranched
+ // fallthrough implies that the fallthrough source unconditionally
+ // jumps here.
+ assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
+ (Scope.getNormalBlock() &&
+ FallthroughSource->getTerminator()->getSuccessor(0)
+ == Scope.getNormalBlock()));
+
+ bool RequiresNormalCleanup = false;
+ if (Scope.isNormalCleanup() &&
+ (HasFixups || HasExistingBranches || HasFallthrough)) {
+ RequiresNormalCleanup = true;
+ }
+
+ // If we have a prebranched fallthrough into an inactive normal
+ // cleanup, rewrite it so that it leads to the appropriate place.
+ if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
+ llvm::BasicBlock *prebranchDest;
+
+ // If the prebranch is semantically branching through the next
+ // cleanup, just forward it to the next block, leaving the
+ // insertion point in the prebranched block.
+ if (FallthroughIsBranchThrough) {
+ EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));
+
+ // Otherwise, we need to make a new block. If the normal cleanup
+ // isn't being used at all, we could actually reuse the normal
+ // entry block, but this is simpler, and it avoids conflicts with
+ // dead optimistic fixup branches.
+ } else {
+ prebranchDest = createBasicBlock("forwarded-prebranch");
+ EmitBlock(prebranchDest);
+ }
+
+ llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
+ assert(normalEntry && !normalEntry->use_empty());
+
+ ForwardPrebranchedFallthrough(FallthroughSource,
+ normalEntry, prebranchDest);
+ }
+
+ // If we don't need the cleanup at all, we're done.
+ if (!RequiresNormalCleanup && !RequiresEHCleanup) {
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup(); // safe because there are no fixups
+ assert(EHStack.getNumBranchFixups() == 0 ||
+ EHStack.hasNormalCleanups());
+ return;
+ }
+
+ // Copy the cleanup emission data out. Note that SmallVector
+ // guarantees maximal alignment for its buffer regardless of its
+ // type parameter.
+ SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
+ CleanupBuffer.reserve(Scope.getCleanupSize());
+ memcpy(CleanupBuffer.data(),
+ Scope.getCleanupBuffer(), Scope.getCleanupSize());
+ CleanupBuffer.set_size(Scope.getCleanupSize());
+ EHScopeStack::Cleanup *Fn =
+ reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());
+
+ EHScopeStack::Cleanup::Flags cleanupFlags;
+ if (Scope.isNormalCleanup())
+ cleanupFlags.setIsNormalCleanupKind();
+ if (Scope.isEHCleanup())
+ cleanupFlags.setIsEHCleanupKind();
+
+ if (!RequiresNormalCleanup) {
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup();
+ } else {
+ // If we have a fallthrough and no other need for the cleanup,
+ // emit it directly.
+ if (HasFallthrough && !HasPrebranchedFallthrough &&
+ !HasFixups && !HasExistingBranches) {
+
+ destroyOptimisticNormalEntry(*this, Scope);
+ EHStack.popCleanup();
+
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
+
+ // Otherwise, the best approach is to thread everything through
+ // the cleanup block and then try to clean up after ourselves.
+ } else {
+ // Force the entry block to exist.
+ llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);
+
+ // I. Set up the fallthrough edge in.
+
+ CGBuilderTy::InsertPoint savedInactiveFallthroughIP;
+
+ // If there's a fallthrough, we need to store the cleanup
+ // destination index. For fall-throughs this is always zero.
+ if (HasFallthrough) {
+ if (!HasPrebranchedFallthrough)
+ Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());
+
+ // Otherwise, save and clear the IP if we don't have fallthrough
+ // because the cleanup is inactive.
+ } else if (FallthroughSource) {
+ assert(!IsActive && "source without fallthrough for active cleanup");
+ savedInactiveFallthroughIP = Builder.saveAndClearIP();
+ }
+
+ // II. Emit the entry block. This implicitly branches to it if
+ // we have fallthrough. All the fixups and existing branches
+ // should already be branched to it.
+ EmitBlock(NormalEntry);
+
+ // III. Figure out where we're going and build the cleanup
+ // epilogue.
+
+ bool HasEnclosingCleanups =
+ (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());
+
+ // Compute the branch-through dest if we need it:
+ // - if there are branch-throughs threaded through the scope
+ // - if fall-through is a branch-through
+ // - if there are fixups that will be optimistically forwarded
+ // to the enclosing cleanup
+ llvm::BasicBlock *BranchThroughDest = 0;
+ if (Scope.hasBranchThroughs() ||
+ (FallthroughSource && FallthroughIsBranchThrough) ||
+ (HasFixups && HasEnclosingCleanups)) {
+ assert(HasEnclosingCleanups);
+ EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
+ BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
+ }
+
+ llvm::BasicBlock *FallthroughDest = 0;
+ SmallVector<llvm::Instruction*, 2> InstsToAppend;
+
+ // If there's exactly one branch-after and no other threads,
+ // we can route it without a switch.
+ if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
+ Scope.getNumBranchAfters() == 1) {
+ assert(!BranchThroughDest || !IsActive);
+
+ // TODO: clean up the possibly dead stores to the cleanup dest slot.
+ llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));
+
+ // Build a switch-out if we need it:
+ // - if there are branch-afters threaded through the scope
+ // - if fall-through is a branch-after
+ // - if there are fixups that have nowhere left to go and
+ // so must be immediately resolved
+ } else if (Scope.getNumBranchAfters() ||
+ (HasFallthrough && !FallthroughIsBranchThrough) ||
+ (HasFixups && !HasEnclosingCleanups)) {
+
+ llvm::BasicBlock *Default =
+ (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());
+
+ // TODO: base this on the number of branch-afters and fixups
+ const unsigned SwitchCapacity = 10;
+
+ llvm::LoadInst *Load =
+ new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
+ llvm::SwitchInst *Switch =
+ llvm::SwitchInst::Create(Load, Default, SwitchCapacity);
+
+ InstsToAppend.push_back(Load);
+ InstsToAppend.push_back(Switch);
+
+ // Branch-after fallthrough.
+ if (FallthroughSource && !FallthroughIsBranchThrough) {
+ FallthroughDest = createBasicBlock("cleanup.cont");
+ if (HasFallthrough)
+ Switch->addCase(Builder.getInt32(0), FallthroughDest);
+ }
+
+ for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
+ Switch->addCase(Scope.getBranchAfterIndex(I),
+ Scope.getBranchAfterBlock(I));
+ }
+
+ // If there aren't any enclosing cleanups, we can resolve all
+ // the fixups now.
+ if (HasFixups && !HasEnclosingCleanups)
+ ResolveAllBranchFixups(*this, Switch, NormalEntry);
+ } else {
+ // We should always have a branch-through destination in this case.
+ assert(BranchThroughDest);
+ InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
+ }
+
+ // IV. Pop the cleanup and emit it.
+ EHStack.popCleanup();
+ assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);
+
+ EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);
+
+ // Append the prepared cleanup prologue from above.
+ llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
+ for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
+ NormalExit->getInstList().push_back(InstsToAppend[I]);
+
+ // Optimistically hope that any fixups will continue falling through.
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I) {
+ BranchFixup &Fixup = EHStack.getBranchFixup(I);
+ if (!Fixup.Destination) continue;
+ if (!Fixup.OptimisticBranchBlock) {
+ new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
+ getNormalCleanupDestSlot(),
+ Fixup.InitialBranch);
+ Fixup.InitialBranch->setSuccessor(0, NormalEntry);
+ }
+ Fixup.OptimisticBranchBlock = NormalExit;
+ }
+
+ // V. Set up the fallthrough edge out.
+
+ // Case 1: a fallthrough source exists but doesn't branch to the
+ // cleanup because the cleanup is inactive.
+ if (!HasFallthrough && FallthroughSource) {
+ // Prebranched fallthrough was forwarded earlier.
+ // Non-prebranched fallthrough doesn't need to be forwarded.
+ // Either way, all we need to do is restore the IP we cleared before.
+ assert(!IsActive);
+ Builder.restoreIP(savedInactiveFallthroughIP);
+
+ // Case 2: a fallthrough source exists and should branch to the
+ // cleanup, but we're not supposed to branch through to the next
+ // cleanup.
+ } else if (HasFallthrough && FallthroughDest) {
+ assert(!FallthroughIsBranchThrough);
+ EmitBlock(FallthroughDest);
+
+ // Case 3: a fallthrough source exists and should branch to the
+ // cleanup and then through to the next.
+ } else if (HasFallthrough) {
+ // Everything is already set up for this.
+
+ // Case 4: no fallthrough source exists.
+ } else {
+ Builder.ClearInsertionPoint();
+ }
+
+ // VI. Assorted cleaning.
+
+ // Check whether we can merge NormalEntry into a single predecessor.
+ // This might invalidate (non-IR) pointers to NormalEntry.
+ llvm::BasicBlock *NewNormalEntry =
+ SimplifyCleanupEntry(*this, NormalEntry);
+
+ // If it did invalidate those pointers, and NormalEntry was the same
+ // as NormalExit, go back and patch up the fixups.
+ if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
+ for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
+ I < E; ++I)
+ EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
+ }
+ }
+
+ assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);
+
+ // Emit the EH cleanup if required.
+ if (RequiresEHCleanup) {
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ EmitBlock(EHEntry);
+
+ cleanupFlags.setIsForEHCleanup();
+ EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
+
+ Builder.CreateBr(getEHDispatchBlock(EHParent));
+
+ Builder.restoreIP(SavedIP);
+
+ SimplifyCleanupEntry(*this, EHEntry);
+ }
+}
+
+/// isObviouslyBranchWithoutCleanups - Return true if a branch to the
+/// specified destination obviously has no cleanups to run. 'false' is always
+/// a conservatively correct answer for this method.
+bool CodeGenFunction::isObviouslyBranchWithoutCleanups(JumpDest Dest) const {
+ assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
+ && "stale jump destination");
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator TopCleanup =
+ EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) // works for invalid
+ return true;
+
+ // Otherwise, we might need some cleanups.
+ return false;
+}
+
+
+/// Terminate the current block by emitting a branch which might leave
+/// the current cleanup-protected scope. The target scope may not yet
+/// be known, in which case this will require a fixup.
+///
+/// As a side-effect, this method clears the insertion point.
+void CodeGenFunction::EmitBranchThroughCleanup(JumpDest Dest) {
+ assert(Dest.getScopeDepth().encloses(EHStack.stable_begin())
+ && "stale jump destination");
+
+ if (!HaveInsertPoint())
+ return;
+
+ // Create the branch.
+ llvm::BranchInst *BI = Builder.CreateBr(Dest.getBlock());
+
+ // Calculate the innermost active normal cleanup.
+ EHScopeStack::stable_iterator
+ TopCleanup = EHStack.getInnermostActiveNormalCleanup();
+
+ // If we're not in an active normal cleanup scope, or if the
+ // destination scope is within the innermost active normal cleanup
+ // scope, we don't need to worry about fixups.
+ if (TopCleanup == EHStack.stable_end() ||
+ TopCleanup.encloses(Dest.getScopeDepth())) { // works for invalid
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // If we can't resolve the destination cleanup scope, just add this
+ // to the current cleanup scope as a branch fixup.
+ if (!Dest.getScopeDepth().isValid()) {
+ BranchFixup &Fixup = EHStack.addBranchFixup();
+ Fixup.Destination = Dest.getBlock();
+ Fixup.DestinationIndex = Dest.getDestIndex();
+ Fixup.InitialBranch = BI;
+ Fixup.OptimisticBranchBlock = 0;
+
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
+ // Otherwise, thread through all the normal cleanups in scope.
+
+ // Store the index at the start.
+ llvm::ConstantInt *Index = Builder.getInt32(Dest.getDestIndex());
+ new llvm::StoreInst(Index, getNormalCleanupDestSlot(), BI);
+
+ // Adjust BI to point to the first cleanup block.
+ {
+ EHCleanupScope &Scope =
+ cast<EHCleanupScope>(*EHStack.find(TopCleanup));
+ BI->setSuccessor(0, CreateNormalEntry(*this, Scope));
+ }
+
+ // Add this destination to all the scopes involved.
+ EHScopeStack::stable_iterator I = TopCleanup;
+ EHScopeStack::stable_iterator E = Dest.getScopeDepth();
+ if (E.strictlyEncloses(I)) {
+ while (true) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(I));
+ assert(Scope.isNormalCleanup());
+ I = Scope.getEnclosingNormalCleanup();
+
+ // If this is the last cleanup we're propagating through, tell it
+ // that there's a resolved jump moving through it.
+ if (!E.strictlyEncloses(I)) {
+ Scope.addBranchAfter(Index, Dest.getBlock());
+ break;
+ }
+
+ // Otherwise, tell the scope that there's a jump propoagating
+ // through it. If this isn't new information, all the rest of
+ // the work has been done before.
+ if (!Scope.addBranchThrough(Dest.getBlock()))
+ break;
+ }
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+static bool IsUsedAsNormalCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator C) {
+ // If we needed a normal block for any reason, that counts.
+ if (cast<EHCleanupScope>(*EHStack.find(C)).getNormalBlock())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ I = EHStack.getInnermostNormalCleanup();
+ I != C; ) {
+ assert(C.strictlyEncloses(I));
+ EHCleanupScope &S = cast<EHCleanupScope>(*EHStack.find(I));
+ if (S.getNormalBlock()) return true;
+ I = S.getEnclosingNormalCleanup();
+ }
+
+ return false;
+}
+
+static bool IsUsedAsEHCleanup(EHScopeStack &EHStack,
+ EHScopeStack::stable_iterator cleanup) {
+ // If we needed an EH block for any reason, that counts.
+ if (EHStack.find(cleanup)->hasEHBranches())
+ return true;
+
+ // Check whether any enclosed cleanups were needed.
+ for (EHScopeStack::stable_iterator
+ i = EHStack.getInnermostEHScope(); i != cleanup; ) {
+ assert(cleanup.strictlyEncloses(i));
+
+ EHScope &scope = *EHStack.find(i);
+ if (scope.hasEHBranches())
+ return true;
+
+ i = scope.getEnclosingEHScope();
+ }
+
+ return false;
+}
+
+enum ForActivation_t {
+ ForActivation,
+ ForDeactivation
+};
+
+/// The given cleanup block is changing activation state. Configure a
+/// cleanup variable if necessary.
+///
+/// It would be good if we had some way of determining if there were
+/// extra uses *after* the change-over point.
+static void SetupCleanupBlockActivation(CodeGenFunction &CGF,
+ EHScopeStack::stable_iterator C,
+ ForActivation_t kind,
+ llvm::Instruction *dominatingIP) {
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*CGF.EHStack.find(C));
+
+ // We always need the flag if we're activating the cleanup in a
+ // conditional context, because we have to assume that the current
+ // location doesn't necessarily dominate the cleanup's code.
+ bool isActivatedInConditional =
+ (kind == ForActivation && CGF.isInConditionalBranch());
+
+ bool needFlag = false;
+
+ // Calculate whether the cleanup was used:
+
+ // - as a normal cleanup
+ if (Scope.isNormalCleanup() &&
+ (isActivatedInConditional || IsUsedAsNormalCleanup(CGF.EHStack, C))) {
+ Scope.setTestFlagInNormalCleanup();
+ needFlag = true;
+ }
+
+ // - as an EH cleanup
+ if (Scope.isEHCleanup() &&
+ (isActivatedInConditional || IsUsedAsEHCleanup(CGF.EHStack, C))) {
+ Scope.setTestFlagInEHCleanup();
+ needFlag = true;
+ }
+
+ // If it hasn't yet been used as either, we're done.
+ if (!needFlag) return;
+
+ llvm::AllocaInst *var = Scope.getActiveFlag();
+ if (!var) {
+ var = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "cleanup.isactive");
+ Scope.setActiveFlag(var);
+
+ assert(dominatingIP && "no existing variable and no dominating IP!");
+
+ // Initialize to true or false depending on whether it was
+ // active up to this point.
+ llvm::Value *value = CGF.Builder.getInt1(kind == ForDeactivation);
+
+ // If we're in a conditional block, ignore the dominating IP and
+ // use the outermost conditional branch.
+ if (CGF.isInConditionalBranch()) {
+ CGF.setBeforeOutermostConditional(value, var);
+ } else {
+ new llvm::StoreInst(value, var, dominatingIP);
+ }
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(kind == ForActivation), var);
+}
+
+/// Activate a cleanup that was created in an inactivated state.
+void CodeGenFunction::ActivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
+ assert(C != EHStack.stable_end() && "activating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(!Scope.isActive() && "double activation");
+
+ SetupCleanupBlockActivation(*this, C, ForActivation, dominatingIP);
+
+ Scope.setActive(true);
+}
+
+/// Deactive a cleanup that was created in an active state.
+void CodeGenFunction::DeactivateCleanupBlock(EHScopeStack::stable_iterator C,
+ llvm::Instruction *dominatingIP) {
+ assert(C != EHStack.stable_end() && "deactivating bottom of stack?");
+ EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.find(C));
+ assert(Scope.isActive() && "double deactivation");
+
+ // If it's the top of the stack, just pop it.
+ if (C == EHStack.stable_begin()) {
+ // If it's a normal cleanup, we need to pretend that the
+ // fallthrough is unreachable.
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+ PopCleanupBlock();
+ Builder.restoreIP(SavedIP);
+ return;
+ }
+
+ // Otherwise, follow the general case.
+ SetupCleanupBlockActivation(*this, C, ForDeactivation, dominatingIP);
+
+ Scope.setActive(false);
+}
+
+llvm::Value *CodeGenFunction::getNormalCleanupDestSlot() {
+ if (!NormalCleanupDest)
+ NormalCleanupDest =
+ CreateTempAlloca(Builder.getInt32Ty(), "cleanup.dest.slot");
+ return NormalCleanupDest;
+}
+
+/// Emits all the code to cause the given temporary to be cleaned up.
+void CodeGenFunction::EmitCXXTemporary(const CXXTemporary *Temporary,
+ QualType TempType,
+ llvm::Value *Ptr) {
+ pushDestroy(NormalAndEHCleanup, Ptr, TempType, destroyCXXObject,
+ /*useEHCleanup*/ true);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
new file mode 100644
index 0000000..7726e44
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGCleanup.h
@@ -0,0 +1,539 @@
+//===-- CGCleanup.h - Classes for cleanups IR generation --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes support the generation of LLVM IR for cleanups.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGCLEANUP_H
+#define CLANG_CODEGEN_CGCLEANUP_H
+
+/// EHScopeStack is defined in CodeGenFunction.h, but its
+/// implementation is in this file and in CGCleanup.cpp.
+#include "CodeGenFunction.h"
+
+namespace llvm {
+ class Value;
+ class BasicBlock;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// A protected scope for zero-cost EH handling.
+class EHScope {
+ llvm::BasicBlock *CachedLandingPad;
+ llvm::BasicBlock *CachedEHDispatchBlock;
+
+ EHScopeStack::stable_iterator EnclosingEHScope;
+
+ class CommonBitFields {
+ friend class EHScope;
+ unsigned Kind : 2;
+ };
+ enum { NumCommonBits = 2 };
+
+protected:
+ class CatchBitFields {
+ friend class EHCatchScope;
+ unsigned : NumCommonBits;
+
+ unsigned NumHandlers : 32 - NumCommonBits;
+ };
+
+ class CleanupBitFields {
+ friend class EHCleanupScope;
+ unsigned : NumCommonBits;
+
+ /// Whether this cleanup needs to be run along normal edges.
+ unsigned IsNormalCleanup : 1;
+
+ /// Whether this cleanup needs to be run along exception edges.
+ unsigned IsEHCleanup : 1;
+
+ /// Whether this cleanup is currently active.
+ unsigned IsActive : 1;
+
+ /// Whether the normal cleanup should test the activation flag.
+ unsigned TestFlagInNormalCleanup : 1;
+
+ /// Whether the EH cleanup should test the activation flag.
+ unsigned TestFlagInEHCleanup : 1;
+
+ /// The amount of extra storage needed by the Cleanup.
+ /// Always a multiple of the scope-stack alignment.
+ unsigned CleanupSize : 12;
+
+ /// The number of fixups required by enclosing scopes (not including
+ /// this one). If this is the top cleanup scope, all the fixups
+ /// from this index onwards belong to this scope.
+ unsigned FixupDepth : 32 - 17 - NumCommonBits; // currently 13
+ };
+
+ class FilterBitFields {
+ friend class EHFilterScope;
+ unsigned : NumCommonBits;
+
+ unsigned NumFilters : 32 - NumCommonBits;
+ };
+
+ union {
+ CommonBitFields CommonBits;
+ CatchBitFields CatchBits;
+ CleanupBitFields CleanupBits;
+ FilterBitFields FilterBits;
+ };
+
+public:
+ enum Kind { Cleanup, Catch, Terminate, Filter };
+
+ EHScope(Kind kind, EHScopeStack::stable_iterator enclosingEHScope)
+ : CachedLandingPad(0), CachedEHDispatchBlock(0),
+ EnclosingEHScope(enclosingEHScope) {
+ CommonBits.Kind = kind;
+ }
+
+ Kind getKind() const { return static_cast<Kind>(CommonBits.Kind); }
+
+ llvm::BasicBlock *getCachedLandingPad() const {
+ return CachedLandingPad;
+ }
+
+ void setCachedLandingPad(llvm::BasicBlock *block) {
+ CachedLandingPad = block;
+ }
+
+ llvm::BasicBlock *getCachedEHDispatchBlock() const {
+ return CachedEHDispatchBlock;
+ }
+
+ void setCachedEHDispatchBlock(llvm::BasicBlock *block) {
+ CachedEHDispatchBlock = block;
+ }
+
+ bool hasEHBranches() const {
+ if (llvm::BasicBlock *block = getCachedEHDispatchBlock())
+ return !block->use_empty();
+ return false;
+ }
+
+ EHScopeStack::stable_iterator getEnclosingEHScope() const {
+ return EnclosingEHScope;
+ }
+};
+
+/// A scope which attempts to handle some, possibly all, types of
+/// exceptions.
+///
+/// Objective C @finally blocks are represented using a cleanup scope
+/// after the catch scope.
+class EHCatchScope : public EHScope {
+ // In effect, we have a flexible array member
+ // Handler Handlers[0];
+ // But that's only standard in C99, not C++, so we have to do
+ // annoying pointer arithmetic instead.
+
+public:
+ struct Handler {
+ /// A type info value, or null (C++ null, not an LLVM null pointer)
+ /// for a catch-all.
+ llvm::Value *Type;
+
+ /// The catch handler for this type.
+ llvm::BasicBlock *Block;
+
+ bool isCatchAll() const { return Type == 0; }
+ };
+
+private:
+ friend class EHScopeStack;
+
+ Handler *getHandlers() {
+ return reinterpret_cast<Handler*>(this+1);
+ }
+
+ const Handler *getHandlers() const {
+ return reinterpret_cast<const Handler*>(this+1);
+ }
+
+public:
+ static size_t getSizeForNumHandlers(unsigned N) {
+ return sizeof(EHCatchScope) + N * sizeof(Handler);
+ }
+
+ EHCatchScope(unsigned numHandlers,
+ EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(Catch, enclosingEHScope) {
+ CatchBits.NumHandlers = numHandlers;
+ }
+
+ unsigned getNumHandlers() const {
+ return CatchBits.NumHandlers;
+ }
+
+ void setCatchAllHandler(unsigned I, llvm::BasicBlock *Block) {
+ setHandler(I, /*catchall*/ 0, Block);
+ }
+
+ void setHandler(unsigned I, llvm::Value *Type, llvm::BasicBlock *Block) {
+ assert(I < getNumHandlers());
+ getHandlers()[I].Type = Type;
+ getHandlers()[I].Block = Block;
+ }
+
+ const Handler &getHandler(unsigned I) const {
+ assert(I < getNumHandlers());
+ return getHandlers()[I];
+ }
+
+ typedef const Handler *iterator;
+ iterator begin() const { return getHandlers(); }
+ iterator end() const { return getHandlers() + getNumHandlers(); }
+
+ static bool classof(const EHScope *Scope) {
+ return Scope->getKind() == Catch;
+ }
+};
+
+/// A cleanup scope which generates the cleanup blocks lazily.
+class EHCleanupScope : public EHScope {
+ /// The nearest normal cleanup scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingNormal;
+
+ /// The nearest EH scope enclosing this one.
+ EHScopeStack::stable_iterator EnclosingEH;
+
+ /// The dual entry/exit block along the normal edge. This is lazily
+ /// created if needed before the cleanup is popped.
+ llvm::BasicBlock *NormalBlock;
+
+ /// An optional i1 variable indicating whether this cleanup has been
+ /// activated yet.
+ llvm::AllocaInst *ActiveFlag;
+
+ /// Extra information required for cleanups that have resolved
+ /// branches through them. This has to be allocated on the side
+ /// because everything on the cleanup stack has be trivially
+ /// movable.
+ struct ExtInfo {
+ /// The destinations of normal branch-afters and branch-throughs.
+ llvm::SmallPtrSet<llvm::BasicBlock*, 4> Branches;
+
+ /// Normal branch-afters.
+ SmallVector<std::pair<llvm::BasicBlock*,llvm::ConstantInt*>, 4>
+ BranchAfters;
+ };
+ mutable struct ExtInfo *ExtInfo;
+
+ struct ExtInfo &getExtInfo() {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+ const struct ExtInfo &getExtInfo() const {
+ if (!ExtInfo) ExtInfo = new struct ExtInfo();
+ return *ExtInfo;
+ }
+
+public:
+ /// Gets the size required for a lazy cleanup scope with the given
+ /// cleanup-data requirements.
+ static size_t getSizeForCleanupSize(size_t Size) {
+ return sizeof(EHCleanupScope) + Size;
+ }
+
+ size_t getAllocatedSize() const {
+ return sizeof(EHCleanupScope) + CleanupBits.CleanupSize;
+ }
+
+ EHCleanupScope(bool isNormal, bool isEH, bool isActive,
+ unsigned cleanupSize, unsigned fixupDepth,
+ EHScopeStack::stable_iterator enclosingNormal,
+ EHScopeStack::stable_iterator enclosingEH)
+ : EHScope(EHScope::Cleanup, enclosingEH), EnclosingNormal(enclosingNormal),
+ NormalBlock(0), ActiveFlag(0), ExtInfo(0) {
+ CleanupBits.IsNormalCleanup = isNormal;
+ CleanupBits.IsEHCleanup = isEH;
+ CleanupBits.IsActive = isActive;
+ CleanupBits.TestFlagInNormalCleanup = false;
+ CleanupBits.TestFlagInEHCleanup = false;
+ CleanupBits.CleanupSize = cleanupSize;
+ CleanupBits.FixupDepth = fixupDepth;
+
+ assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow");
+ }
+
+ ~EHCleanupScope() {
+ delete ExtInfo;
+ }
+
+ bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; }
+ llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
+ void setNormalBlock(llvm::BasicBlock *BB) { NormalBlock = BB; }
+
+ bool isEHCleanup() const { return CleanupBits.IsEHCleanup; }
+ llvm::BasicBlock *getEHBlock() const { return getCachedEHDispatchBlock(); }
+ void setEHBlock(llvm::BasicBlock *BB) { setCachedEHDispatchBlock(BB); }
+
+ bool isActive() const { return CleanupBits.IsActive; }
+ void setActive(bool A) { CleanupBits.IsActive = A; }
+
+ llvm::AllocaInst *getActiveFlag() const { return ActiveFlag; }
+ void setActiveFlag(llvm::AllocaInst *Var) { ActiveFlag = Var; }
+
+ void setTestFlagInNormalCleanup() {
+ CleanupBits.TestFlagInNormalCleanup = true;
+ }
+ bool shouldTestFlagInNormalCleanup() const {
+ return CleanupBits.TestFlagInNormalCleanup;
+ }
+
+ void setTestFlagInEHCleanup() {
+ CleanupBits.TestFlagInEHCleanup = true;
+ }
+ bool shouldTestFlagInEHCleanup() const {
+ return CleanupBits.TestFlagInEHCleanup;
+ }
+
+ unsigned getFixupDepth() const { return CleanupBits.FixupDepth; }
+ EHScopeStack::stable_iterator getEnclosingNormalCleanup() const {
+ return EnclosingNormal;
+ }
+
+ size_t getCleanupSize() const { return CleanupBits.CleanupSize; }
+ void *getCleanupBuffer() { return this + 1; }
+
+ EHScopeStack::Cleanup *getCleanup() {
+ return reinterpret_cast<EHScopeStack::Cleanup*>(getCleanupBuffer());
+ }
+
+ /// True if this cleanup scope has any branch-afters or branch-throughs.
+ bool hasBranches() const { return ExtInfo && !ExtInfo->Branches.empty(); }
+
+ /// Add a branch-after to this cleanup scope. A branch-after is a
+ /// branch from a point protected by this (normal) cleanup to a
+ /// point in the normal cleanup scope immediately containing it.
+ /// For example,
+ /// for (;;) { A a; break; }
+ /// contains a branch-after.
+ ///
+ /// Branch-afters each have their own destination out of the
+ /// cleanup, guaranteed distinct from anything else threaded through
+ /// it. Therefore branch-afters usually force a switch after the
+ /// cleanup.
+ void addBranchAfter(llvm::ConstantInt *Index,
+ llvm::BasicBlock *Block) {
+ struct ExtInfo &ExtInfo = getExtInfo();
+ if (ExtInfo.Branches.insert(Block))
+ ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
+ }
+
+ /// Return the number of unique branch-afters on this scope.
+ unsigned getNumBranchAfters() const {
+ return ExtInfo ? ExtInfo->BranchAfters.size() : 0;
+ }
+
+ llvm::BasicBlock *getBranchAfterBlock(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].first;
+ }
+
+ llvm::ConstantInt *getBranchAfterIndex(unsigned I) const {
+ assert(I < getNumBranchAfters());
+ return ExtInfo->BranchAfters[I].second;
+ }
+
+ /// Add a branch-through to this cleanup scope. A branch-through is
+ /// a branch from a scope protected by this (normal) cleanup to an
+ /// enclosing scope other than the immediately-enclosing normal
+ /// cleanup scope.
+ ///
+ /// In the following example, the branch through B's scope is a
+ /// branch-through, while the branch through A's scope is a
+ /// branch-after:
+ /// for (;;) { A a; B b; break; }
+ ///
+ /// All branch-throughs have a common destination out of the
+ /// cleanup, one possibly shared with the fall-through. Therefore
+ /// branch-throughs usually don't force a switch after the cleanup.
+ ///
+ /// \return true if the branch-through was new to this scope
+ bool addBranchThrough(llvm::BasicBlock *Block) {
+ return getExtInfo().Branches.insert(Block);
+ }
+
+ /// Determines if this cleanup scope has any branch throughs.
+ bool hasBranchThroughs() const {
+ if (!ExtInfo) return false;
+ return (ExtInfo->BranchAfters.size() != ExtInfo->Branches.size());
+ }
+
+ static bool classof(const EHScope *Scope) {
+ return (Scope->getKind() == Cleanup);
+ }
+};
+
+/// An exceptions scope which filters exceptions thrown through it.
+/// Only exceptions matching the filter types will be permitted to be
+/// thrown.
+///
+/// This is used to implement C++ exception specifications.
+class EHFilterScope : public EHScope {
+ // Essentially ends in a flexible array member:
+ // llvm::Value *FilterTypes[0];
+
+ llvm::Value **getFilters() {
+ return reinterpret_cast<llvm::Value**>(this+1);
+ }
+
+ llvm::Value * const *getFilters() const {
+ return reinterpret_cast<llvm::Value* const *>(this+1);
+ }
+
+public:
+ EHFilterScope(unsigned numFilters)
+ : EHScope(Filter, EHScopeStack::stable_end()) {
+ FilterBits.NumFilters = numFilters;
+ }
+
+ static size_t getSizeForNumFilters(unsigned numFilters) {
+ return sizeof(EHFilterScope) + numFilters * sizeof(llvm::Value*);
+ }
+
+ unsigned getNumFilters() const { return FilterBits.NumFilters; }
+
+ void setFilter(unsigned i, llvm::Value *filterValue) {
+ assert(i < getNumFilters());
+ getFilters()[i] = filterValue;
+ }
+
+ llvm::Value *getFilter(unsigned i) const {
+ assert(i < getNumFilters());
+ return getFilters()[i];
+ }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Filter;
+ }
+};
+
+/// An exceptions scope which calls std::terminate if any exception
+/// reaches it.
+class EHTerminateScope : public EHScope {
+public:
+ EHTerminateScope(EHScopeStack::stable_iterator enclosingEHScope)
+ : EHScope(Terminate, enclosingEHScope) {}
+ static size_t getSize() { return sizeof(EHTerminateScope); }
+
+ static bool classof(const EHScope *scope) {
+ return scope->getKind() == Terminate;
+ }
+};
+
+/// A non-stable pointer into the scope stack.
+class EHScopeStack::iterator {
+ char *Ptr;
+
+ friend class EHScopeStack;
+ explicit iterator(char *Ptr) : Ptr(Ptr) {}
+
+public:
+ iterator() : Ptr(0) {}
+
+ EHScope *get() const {
+ return reinterpret_cast<EHScope*>(Ptr);
+ }
+
+ EHScope *operator->() const { return get(); }
+ EHScope &operator*() const { return *get(); }
+
+ iterator &operator++() {
+ switch (get()->getKind()) {
+ case EHScope::Catch:
+ Ptr += EHCatchScope::getSizeForNumHandlers(
+ static_cast<const EHCatchScope*>(get())->getNumHandlers());
+ break;
+
+ case EHScope::Filter:
+ Ptr += EHFilterScope::getSizeForNumFilters(
+ static_cast<const EHFilterScope*>(get())->getNumFilters());
+ break;
+
+ case EHScope::Cleanup:
+ Ptr += static_cast<const EHCleanupScope*>(get())
+ ->getAllocatedSize();
+ break;
+
+ case EHScope::Terminate:
+ Ptr += EHTerminateScope::getSize();
+ break;
+ }
+
+ return *this;
+ }
+
+ iterator next() {
+ iterator copy = *this;
+ ++copy;
+ return copy;
+ }
+
+ iterator operator++(int) {
+ iterator copy = *this;
+ operator++();
+ return copy;
+ }
+
+ bool encloses(iterator other) const { return Ptr >= other.Ptr; }
+ bool strictlyEncloses(iterator other) const { return Ptr > other.Ptr; }
+
+ bool operator==(iterator other) const { return Ptr == other.Ptr; }
+ bool operator!=(iterator other) const { return Ptr != other.Ptr; }
+};
+
+inline EHScopeStack::iterator EHScopeStack::begin() const {
+ return iterator(StartOfData);
+}
+
+inline EHScopeStack::iterator EHScopeStack::end() const {
+ return iterator(EndOfBuffer);
+}
+
+inline void EHScopeStack::popCatch() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHCatchScope &scope = cast<EHCatchScope>(*begin());
+ InnermostEHScope = scope.getEnclosingEHScope();
+ StartOfData += EHCatchScope::getSizeForNumHandlers(scope.getNumHandlers());
+}
+
+inline void EHScopeStack::popTerminate() {
+ assert(!empty() && "popping exception stack when not empty");
+
+ EHTerminateScope &scope = cast<EHTerminateScope>(*begin());
+ InnermostEHScope = scope.getEnclosingEHScope();
+ StartOfData += EHTerminateScope::getSize();
+}
+
+inline EHScopeStack::iterator EHScopeStack::find(stable_iterator sp) const {
+ assert(sp.isValid() && "finding invalid savepoint");
+ assert(sp.Size <= stable_begin().Size && "finding savepoint after pop");
+ return iterator(EndOfBuffer - sp.Size);
+}
+
+inline EHScopeStack::stable_iterator
+EHScopeStack::stabilize(iterator ir) const {
+ assert(StartOfData <= ir.Ptr && ir.Ptr <= EndOfBuffer);
+ return stable_iterator(EndOfBuffer - ir.Ptr);
+}
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
new file mode 100644
index 0000000..7301d20
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -0,0 +1,2668 @@
+//===--- CGDebugInfo.cpp - Emit Debug Information for a Module ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the debug information generation while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGBlocks.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Constants.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Instructions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/Dwarf.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace clang::CodeGen;
+
+CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
+ : CGM(CGM), DBuilder(CGM.getModule()),
+ BlockLiteralGenericSet(false) {
+ CreateCompileUnit();
+}
+
+CGDebugInfo::~CGDebugInfo() {
+ assert(LexicalBlockStack.empty() &&
+ "Region stack mismatch, stack not empty!");
+}
+
+void CGDebugInfo::setLocation(SourceLocation Loc) {
+ // If the new location isn't valid return.
+ if (!Loc.isValid()) return;
+
+ CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
+
+ // If we've changed files in the middle of a lexical scope go ahead
+ // and create a new lexical scope with file node if it's different
+ // from the one in the scope.
+ if (LexicalBlockStack.empty()) return;
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PCLoc = SM.getPresumedLoc(CurLoc);
+ PresumedLoc PPLoc = SM.getPresumedLoc(PrevLoc);
+
+ if (PCLoc.isInvalid() || PPLoc.isInvalid() ||
+ !strcmp(PPLoc.getFilename(), PCLoc.getFilename()))
+ return;
+
+ llvm::MDNode *LB = LexicalBlockStack.back();
+ llvm::DIScope Scope = llvm::DIScope(LB);
+ if (Scope.isLexicalBlockFile()) {
+ llvm::DILexicalBlockFile LBF = llvm::DILexicalBlockFile(LB);
+ llvm::DIDescriptor D
+ = DBuilder.createLexicalBlockFile(LBF.getScope(),
+ getOrCreateFile(CurLoc));
+ llvm::MDNode *N = D;
+ LexicalBlockStack.pop_back();
+ LexicalBlockStack.push_back(N);
+ } else if (Scope.isLexicalBlock()) {
+ llvm::DIDescriptor D
+ = DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc));
+ llvm::MDNode *N = D;
+ LexicalBlockStack.pop_back();
+ LexicalBlockStack.push_back(N);
+ }
+}
+
+/// getContextDescriptor - Get context info for the decl.
+llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context) {
+ if (!Context)
+ return TheCU;
+
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+ I = RegionMap.find(Context);
+ if (I != RegionMap.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
+
+ if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) {
+ if (!RDecl->isDependentType()) {
+ llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ getOrCreateMainFile());
+ return llvm::DIDescriptor(Ty);
+ }
+ }
+ return TheCU;
+}
+
+/// getFunctionName - Get function name for the given FunctionDecl. If the
+/// name is constructred on demand (e.g. C++ destructor) then the name
+/// is stored on the side.
+StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
+ assert (FD && "Invalid FunctionDecl!");
+ IdentifierInfo *FII = FD->getIdentifier();
+ FunctionTemplateSpecializationInfo *Info
+ = FD->getTemplateSpecializationInfo();
+ if (!Info && FII)
+ return FII->getName();
+
+ // Otherwise construct human readable name for debug info.
+ std::string NS = FD->getNameAsString();
+
+ // Add any template specialization args.
+ if (Info) {
+ const TemplateArgumentList *TArgs = Info->TemplateArguments;
+ const TemplateArgument *Args = TArgs->data();
+ unsigned NumArgs = TArgs->size();
+ PrintingPolicy Policy(CGM.getLangOpts());
+ NS += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+ }
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(NS.length());
+ memcpy(StrPtr, NS.data(), NS.length());
+ return StringRef(StrPtr, NS.length());
+}
+
+StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
+ SmallString<256> MethodName;
+ llvm::raw_svector_ostream OS(MethodName);
+ OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
+ const DeclContext *DC = OMD->getDeclContext();
+ if (const ObjCImplementationDecl *OID =
+ dyn_cast<const ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCInterfaceDecl *OID =
+ dyn_cast<const ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
+ } else if (const ObjCCategoryImplDecl *OCD =
+ dyn_cast<const ObjCCategoryImplDecl>(DC)){
+ OS << ((NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
+ OCD->getIdentifier()->getNameStart() << ')';
+ }
+ OS << ' ' << OMD->getSelector().getAsString() << ']';
+
+ char *StrPtr = DebugInfoNames.Allocate<char>(OS.tell());
+ memcpy(StrPtr, MethodName.begin(), OS.tell());
+ return StringRef(StrPtr, OS.tell());
+}
+
+/// getSelectorName - Return selector name. This is used for debugging
+/// info.
+StringRef CGDebugInfo::getSelectorName(Selector S) {
+ const std::string &SName = S.getAsString();
+ char *StrPtr = DebugInfoNames.Allocate<char>(SName.size());
+ memcpy(StrPtr, SName.data(), SName.size());
+ return StringRef(StrPtr, SName.size());
+}
+
+/// getClassName - Get class name including template argument list.
+StringRef
+CGDebugInfo::getClassName(const RecordDecl *RD) {
+ const ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD);
+ if (!Spec)
+ return RD->getName();
+
+ const TemplateArgument *Args;
+ unsigned NumArgs;
+ std::string Buffer;
+ if (TypeSourceInfo *TAW = Spec->getTypeAsWritten()) {
+ const TemplateSpecializationType *TST =
+ cast<TemplateSpecializationType>(TAW->getType());
+ Args = TST->getArgs();
+ NumArgs = TST->getNumArgs();
+ } else {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ Args = TemplateArgs.data();
+ NumArgs = TemplateArgs.size();
+ }
+ Buffer = RD->getIdentifier()->getNameStart();
+ PrintingPolicy Policy(CGM.getLangOpts());
+ Buffer += TemplateSpecializationType::PrintTemplateArgumentList(Args,
+ NumArgs,
+ Policy);
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(Buffer.length());
+ memcpy(StrPtr, Buffer.data(), Buffer.length());
+ return StringRef(StrPtr, Buffer.length());
+}
+
+/// getOrCreateFile - Get the file debug info descriptor for the input location.
+llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
+ if (!Loc.isValid())
+ // If Location is not valid then use main input file.
+ return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory());
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+
+ if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
+ // If the location is not valid then use main input file.
+ return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory());
+
+ // Cache the results.
+ const char *fname = PLoc.getFilename();
+ llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
+ DIFileCache.find(fname);
+
+ if (it != DIFileCache.end()) {
+ // Verify that the information still exists.
+ if (&*it->second)
+ return llvm::DIFile(cast<llvm::MDNode>(it->second));
+ }
+
+ llvm::DIFile F = DBuilder.createFile(PLoc.getFilename(), getCurrentDirname());
+
+ DIFileCache[fname] = F;
+ return F;
+}
+
+/// getOrCreateMainFile - Get the file info for main compile unit.
+llvm::DIFile CGDebugInfo::getOrCreateMainFile() {
+ return DBuilder.createFile(TheCU.getFilename(), TheCU.getDirectory());
+}
+
+/// getLineNumber - Get line number for the location. If location is invalid
+/// then use current location.
+unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.isValid()? PLoc.getLine() : 0;
+}
+
+/// getColumnNumber - Get column number for the location. If location is
+/// invalid then use current location.
+unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
+ if (Loc.isInvalid() && CurLoc.isInvalid())
+ return 0;
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.isValid()? PLoc.getColumn() : 0;
+}
+
+StringRef CGDebugInfo::getCurrentDirname() {
+ if (!CGM.getCodeGenOpts().DebugCompilationDir.empty())
+ return CGM.getCodeGenOpts().DebugCompilationDir;
+
+ if (!CWDName.empty())
+ return CWDName;
+ SmallString<256> CWD;
+ llvm::sys::fs::current_path(CWD);
+ char *CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
+ memcpy(CompDirnamePtr, CWD.data(), CWD.size());
+ return CWDName = StringRef(CompDirnamePtr, CWD.size());
+}
+
+/// CreateCompileUnit - Create new compile unit.
+void CGDebugInfo::CreateCompileUnit() {
+
+ // Get absolute path name.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ std::string MainFileName = CGM.getCodeGenOpts().MainFileName;
+ if (MainFileName.empty())
+ MainFileName = "<unknown>";
+
+ // The main file name provided via the "-main-file-name" option contains just
+ // the file name itself with no path information. This file name may have had
+ // a relative path, so we look into the actual file entry for the main
+ // file to determine the real absolute path for the file.
+ std::string MainFileDir;
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ MainFileDir = MainFile->getDir()->getName();
+ if (MainFileDir != ".")
+ MainFileName = MainFileDir + "/" + MainFileName;
+ }
+
+ // Save filename string.
+ char *FilenamePtr = DebugInfoNames.Allocate<char>(MainFileName.length());
+ memcpy(FilenamePtr, MainFileName.c_str(), MainFileName.length());
+ StringRef Filename(FilenamePtr, MainFileName.length());
+
+ unsigned LangTag;
+ const LangOptions &LO = CGM.getLangOpts();
+ if (LO.CPlusPlus) {
+ if (LO.ObjC1)
+ LangTag = llvm::dwarf::DW_LANG_ObjC_plus_plus;
+ else
+ LangTag = llvm::dwarf::DW_LANG_C_plus_plus;
+ } else if (LO.ObjC1) {
+ LangTag = llvm::dwarf::DW_LANG_ObjC;
+ } else if (LO.C99) {
+ LangTag = llvm::dwarf::DW_LANG_C99;
+ } else {
+ LangTag = llvm::dwarf::DW_LANG_C89;
+ }
+
+ std::string Producer = getClangFullVersion();
+
+ // Figure out which version of the ObjC runtime we have.
+ unsigned RuntimeVers = 0;
+ if (LO.ObjC1)
+ RuntimeVers = LO.ObjCNonFragileABI ? 2 : 1;
+
+ // Create new compile unit.
+ DBuilder.createCompileUnit(
+ LangTag, Filename, getCurrentDirname(),
+ Producer,
+ LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers);
+ // FIXME - Eliminate TheCU.
+ TheCU = llvm::DICompileUnit(DBuilder.getCU());
+}
+
+/// CreateType - Get the Basic type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
+ unsigned Encoding = 0;
+ const char *BTName = NULL;
+ switch (BT->getKind()) {
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ case BuiltinType::Dependent:
+ llvm_unreachable("Unexpected builtin type");
+ case BuiltinType::NullPtr:
+ return DBuilder.
+ createNullPtrType(BT->getName(CGM.getContext().getLangOpts()));
+ case BuiltinType::Void:
+ return llvm::DIType();
+ case BuiltinType::ObjCClass:
+ return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", getOrCreateMainFile(),
+ 0);
+ case BuiltinType::ObjCId: {
+ // typedef struct objc_class *Class;
+ // typedef struct objc_object {
+ // Class isa;
+ // } *id;
+
+ // TODO: Cache these two types to avoid duplicates.
+ llvm::DIType OCTy =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_class", getOrCreateMainFile(),
+ 0);
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+
+ llvm::DIType ISATy = DBuilder.createPointerType(OCTy, Size);
+
+ SmallVector<llvm::Value *, 16> EltTys;
+ llvm::DIType FieldTy =
+ DBuilder.createMemberType(getOrCreateMainFile(), "isa",
+ getOrCreateMainFile(), 0, Size,
+ 0, 0, 0, ISATy);
+ EltTys.push_back(FieldTy);
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
+
+ return DBuilder.createStructType(TheCU, "objc_object",
+ getOrCreateMainFile(),
+ 0, 0, 0, 0, Elements);
+ }
+ case BuiltinType::ObjCSel: {
+ return
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ "objc_selector", getOrCreateMainFile(),
+ 0);
+ }
+ case BuiltinType::UChar:
+ case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::Char16:
+ case BuiltinType::Char32: Encoding = llvm::dwarf::DW_ATE_UTF; break;
+ case BuiltinType::UShort:
+ case BuiltinType::UInt:
+ case BuiltinType::UInt128:
+ case BuiltinType::ULong:
+ case BuiltinType::WChar_U:
+ case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+ case BuiltinType::Short:
+ case BuiltinType::Int:
+ case BuiltinType::Int128:
+ case BuiltinType::Long:
+ case BuiltinType::WChar_S:
+ case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
+ case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
+ }
+
+ switch (BT->getKind()) {
+ case BuiltinType::Long: BTName = "long int"; break;
+ case BuiltinType::LongLong: BTName = "long long int"; break;
+ case BuiltinType::ULong: BTName = "long unsigned int"; break;
+ case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
+ default:
+ BTName = BT->getName(CGM.getContext().getLangOpts());
+ break;
+ }
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(BT);
+ uint64_t Align = CGM.getContext().getTypeAlign(BT);
+ llvm::DIType DbgTy =
+ DBuilder.createBasicType(BTName, Size, Align, Encoding);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) {
+ // Bit size, align and offset of the type.
+ unsigned Encoding = llvm::dwarf::DW_ATE_complex_float;
+ if (Ty->isComplexIntegerType())
+ Encoding = llvm::dwarf::DW_ATE_lo_user;
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ llvm::DIType DbgTy =
+ DBuilder.createBasicType("complex", Size, Align, Encoding);
+
+ return DbgTy;
+}
+
+/// CreateCVRType - Get the qualified type from the cache or create
+/// a new one if necessary.
+llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
+ QualifierCollector Qc;
+ const Type *T = Qc.strip(Ty);
+
+ // Ignore these qualifiers for now.
+ Qc.removeObjCGCAttr();
+ Qc.removeAddressSpace();
+ Qc.removeObjCLifetime();
+
+ // We will create one Derived type for one qualifier and recurse to handle any
+ // additional ones.
+ unsigned Tag;
+ if (Qc.hasConst()) {
+ Tag = llvm::dwarf::DW_TAG_const_type;
+ Qc.removeConst();
+ } else if (Qc.hasVolatile()) {
+ Tag = llvm::dwarf::DW_TAG_volatile_type;
+ Qc.removeVolatile();
+ } else if (Qc.hasRestrict()) {
+ Tag = llvm::dwarf::DW_TAG_restrict_type;
+ Qc.removeRestrict();
+ } else {
+ assert(Qc.empty() && "Unknown type qualifier for debug info");
+ return getOrCreateType(QualType(T, 0), Unit);
+ }
+
+ llvm::DIType FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit);
+
+ // No need to fill in the Name, Line, Size, Alignment, Offset in case of
+ // CVR derived types.
+ llvm::DIType DbgTy = DBuilder.createQualifiedType(Tag, FromTy);
+
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile Unit) {
+ llvm::DIType DbgTy =
+ CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ Ty->getPointeeType(), Unit);
+}
+
+// Creates a forward declaration for a RecordDecl in the given context.
+llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
+ llvm::DIDescriptor Ctx) {
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = RD->getName();
+
+ // Get the tag.
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ unsigned Tag = 0;
+ if (CXXDecl) {
+ RDName = getClassName(RD);
+ Tag = llvm::dwarf::DW_TAG_class_type;
+ }
+ else if (RD->isStruct())
+ Tag = llvm::dwarf::DW_TAG_structure_type;
+ else if (RD->isUnion())
+ Tag = llvm::dwarf::DW_TAG_union_type;
+ else
+ llvm_unreachable("Unknown RecordDecl type!");
+
+ // Create the type.
+ return DBuilder.createForwardDecl(Tag, RDName, DefUnit, Line);
+}
+
+// Walk up the context chain and create forward decls for record decls,
+// and normal descriptors for namespaces.
+llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
+ if (!Context)
+ return TheCU;
+
+ // See if we already have the parent.
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
+ I = RegionMap.find(Context);
+ if (I != RegionMap.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(&*I->second));
+
+ // Check namespace.
+ if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
+ return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
+
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(Context)) {
+ if (!RD->isDependentType()) {
+ llvm::DIType Ty = getOrCreateLimitedType(CGM.getContext().getTypeDeclType(RD),
+ getOrCreateMainFile());
+ return llvm::DIDescriptor(Ty);
+ }
+ }
+ return TheCU;
+}
+
+/// CreatePointeeType - Create Pointee type. If Pointee is a record
+/// then emit record's fwd if debug info size reduction is enabled.
+llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
+ llvm::DIFile Unit) {
+ if (!CGM.getCodeGenOpts().LimitDebugInfo)
+ return getOrCreateType(PointeeTy, Unit);
+
+ // Limit debug info for the pointee type.
+
+ // If we have an existing type, use that, it's still smaller than creating
+ // a new type.
+ llvm::DIType Ty = getTypeOrNull(PointeeTy);
+ if (Ty.Verify()) return Ty;
+
+ // Handle qualifiers.
+ if (PointeeTy.hasLocalQualifiers())
+ return CreateQualifiedType(PointeeTy, Unit);
+
+ if (const RecordType *RTy = dyn_cast<RecordType>(PointeeTy)) {
+ RecordDecl *RD = RTy->getDecl();
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+ llvm::DIType RetTy = createRecordFwdDecl(RD, FDContext);
+ TypeCache[QualType(RTy, 0).getAsOpaquePtr()] = RetTy;
+ return RetTy;
+ }
+ return getOrCreateType(PointeeTy, Unit);
+
+}
+
+llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
+ const Type *Ty,
+ QualType PointeeTy,
+ llvm::DIFile Unit) {
+ if (Tag == llvm::dwarf::DW_TAG_reference_type)
+ return DBuilder.createReferenceType(CreatePointeeType(PointeeTy, Unit));
+
+ // Bit size, align and offset of the type.
+ // Size is always the size of a pointer. We can't use getTypeSize here
+ // because that does not return the correct value for references.
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return DBuilder.createPointerType(CreatePointeeType(PointeeTy, Unit),
+ Size, Align);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
+ llvm::DIFile Unit) {
+ if (BlockLiteralGenericSet)
+ return BlockLiteralGeneric;
+
+ SmallVector<llvm::Value *, 8> EltTys;
+ llvm::DIType FieldTy;
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+ llvm::DIArray Elements;
+ llvm::DIType EltTy, DescTy;
+
+ FieldOffset = 0;
+ FType = CGM.getContext().UnsignedLongTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "reserved", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "Size", &FieldOffset));
+
+ Elements = DBuilder.getOrCreateArray(EltTys);
+ EltTys.clear();
+
+ unsigned Flags = llvm::DIDescriptor::FlagAppleBlock;
+ unsigned LineNo = getLineNumber(CurLoc);
+
+ EltTy = DBuilder.createStructType(Unit, "__block_descriptor",
+ Unit, LineNo, FieldOffset, 0,
+ Flags, Elements);
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+
+ DescTy = DBuilder.createPointerType(EltTy, Size);
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
+
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ FieldTy = DescTy;
+ FieldSize = CGM.getContext().getTypeSize(Ty);
+ FieldAlign = CGM.getContext().getTypeAlign(Ty);
+ FieldTy = DBuilder.createMemberType(Unit, "__descriptor", Unit,
+ LineNo, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+
+ FieldOffset += FieldSize;
+ Elements = DBuilder.getOrCreateArray(EltTys);
+
+ EltTy = DBuilder.createStructType(Unit, "__block_literal_generic",
+ Unit, LineNo, FieldOffset, 0,
+ Flags, Elements);
+
+ BlockLiteralGenericSet = true;
+ BlockLiteralGeneric = DBuilder.createPointerType(EltTy, Size);
+ return BlockLiteralGeneric;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile Unit) {
+ // Typedefs are derived from some other type. If we have a typedef of a
+ // typedef, make sure to emit the whole chain.
+ llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
+ if (!Src.Verify())
+ return llvm::DIType();
+ // We don't set size information, but do specify where the typedef was
+ // declared.
+ unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
+ const TypedefNameDecl *TyDecl = Ty->getDecl();
+
+ llvm::DIDescriptor TypedefContext =
+ getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
+
+ return
+ DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TypedefContext);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
+ llvm::DIFile Unit) {
+ SmallVector<llvm::Value *, 16> EltTys;
+
+ // Add the result type at least.
+ EltTys.push_back(getOrCreateType(Ty->getResultType(), Unit));
+
+ // Set up remainder of arguments if there is a prototype.
+ // FIXME: IF NOT, HOW IS THIS REPRESENTED? llvm-gcc doesn't represent '...'!
+ if (isa<FunctionNoProtoType>(Ty))
+ EltTys.push_back(DBuilder.createUnspecifiedParameter());
+ else if (const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(Ty)) {
+ for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
+ EltTys.push_back(getOrCreateType(FTP->getArgType(i), Unit));
+ }
+
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
+
+ llvm::DIType DbgTy = DBuilder.createSubroutineType(Unit, EltTypeArray);
+ return DbgTy;
+}
+
+
+void CGDebugInfo::
+CollectRecordStaticVars(const RecordDecl *RD, llvm::DIType FwdDecl) {
+
+ for (RecordDecl::decl_iterator I = RD->decls_begin(), E = RD->decls_end();
+ I != E; ++I)
+ if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
+ if (V->getInit()) {
+ const APValue *Value = V->evaluateValue();
+ if (Value && Value->isInt()) {
+ llvm::ConstantInt *CI
+ = llvm::ConstantInt::get(CGM.getLLVMContext(), Value->getInt());
+
+ // Create the descriptor for static variable.
+ llvm::DIFile VUnit = getOrCreateFile(V->getLocation());
+ StringRef VName = V->getName();
+ llvm::DIType VTy = getOrCreateType(V->getType(), VUnit);
+ // Do not use DIGlobalVariable for enums.
+ if (VTy.getTag() != llvm::dwarf::DW_TAG_enumeration_type) {
+ DBuilder.createStaticVariable(FwdDecl, VName, VName, VUnit,
+ getLineNumber(V->getLocation()),
+ VTy, true, CI);
+ }
+ }
+ }
+ }
+}
+
+llvm::DIType CGDebugInfo::createFieldType(StringRef name,
+ QualType type,
+ uint64_t sizeInBitsOverride,
+ SourceLocation loc,
+ AccessSpecifier AS,
+ uint64_t offsetInBits,
+ llvm::DIFile tunit,
+ llvm::DIDescriptor scope) {
+ llvm::DIType debugType = getOrCreateType(type, tunit);
+
+ // Get the location for the field.
+ llvm::DIFile file = getOrCreateFile(loc);
+ unsigned line = getLineNumber(loc);
+
+ uint64_t sizeInBits = 0;
+ unsigned alignInBits = 0;
+ if (!type->isIncompleteArrayType()) {
+ llvm::tie(sizeInBits, alignInBits) = CGM.getContext().getTypeInfo(type);
+
+ if (sizeInBitsOverride)
+ sizeInBits = sizeInBitsOverride;
+ }
+
+ unsigned flags = 0;
+ if (AS == clang::AS_private)
+ flags |= llvm::DIDescriptor::FlagPrivate;
+ else if (AS == clang::AS_protected)
+ flags |= llvm::DIDescriptor::FlagProtected;
+
+ return DBuilder.createMemberType(scope, name, file, line, sizeInBits,
+ alignInBits, offsetInBits, flags, debugType);
+}
+
+/// CollectRecordFields - A helper function to collect debug info for
+/// record fields. This is used while creating debug info entry for a Record.
+void CGDebugInfo::
+CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
+ SmallVectorImpl<llvm::Value *> &elements,
+ llvm::DIType RecordTy) {
+ unsigned fieldNo = 0;
+ const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
+
+ // For C++11 Lambdas a Fields will be the same as a Capture, but the Capture
+ // has the name and the location of the variable so we should iterate over
+ // both concurrently.
+ if (CXXDecl && CXXDecl->isLambda()) {
+ RecordDecl::field_iterator Field = CXXDecl->field_begin();
+ unsigned fieldno = 0;
+ for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
+ E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) {
+ const LambdaExpr::Capture C = *I;
+ // TODO: Need to handle 'this' in some way by probably renaming the
+ // this of the lambda class and having a field member of 'this'.
+ if (C.capturesVariable()) {
+ VarDecl *V = C.getCapturedVar();
+ llvm::DIFile VUnit = getOrCreateFile(C.getLocation());
+ StringRef VName = V->getName();
+ uint64_t SizeInBitsOverride = 0;
+ if (Field->isBitField()) {
+ SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+ llvm::DIType fieldType
+ = createFieldType(VName, Field->getType(), SizeInBitsOverride, C.getLocation(),
+ Field->getAccess(), layout.getFieldOffset(fieldno),
+ VUnit, RecordTy);
+ elements.push_back(fieldType);
+ }
+ }
+ } else {
+ bool IsMsStruct = record->hasAttr<MsStructAttr>();
+ const FieldDecl *LastFD = 0;
+ for (RecordDecl::field_iterator I = record->field_begin(),
+ E = record->field_end();
+ I != E; ++I, ++fieldNo) {
+ FieldDecl *field = *I;
+
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are ignored
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD)) {
+ --fieldNo;
+ continue;
+ }
+ LastFD = field;
+ }
+
+ StringRef name = field->getName();
+ QualType type = field->getType();
+
+ // Ignore unnamed fields unless they're anonymous structs/unions.
+ if (name.empty() && !type->isRecordType()) {
+ LastFD = field;
+ continue;
+ }
+
+ uint64_t SizeInBitsOverride = 0;
+ if (field->isBitField()) {
+ SizeInBitsOverride = field->getBitWidthValue(CGM.getContext());
+ assert(SizeInBitsOverride && "found named 0-width bitfield");
+ }
+
+ llvm::DIType fieldType
+ = createFieldType(name, type, SizeInBitsOverride,
+ field->getLocation(), field->getAccess(),
+ layout.getFieldOffset(fieldNo), tunit, RecordTy);
+
+ elements.push_back(fieldType);
+ }
+ }
+}
+
+/// getOrCreateMethodType - CXXMethodDecl's type is a FunctionType. This
+/// function type is not updated to include implicit "this" pointer. Use this
+/// routine to get a method type which includes "this" pointer.
+llvm::DIType
+CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile Unit) {
+ llvm::DIType FnTy
+ = getOrCreateType(QualType(Method->getType()->getAs<FunctionProtoType>(),
+ 0),
+ Unit);
+
+ // Add "this" pointer.
+ llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
+ assert (Args.getNumElements() && "Invalid number of arguments!");
+
+ SmallVector<llvm::Value *, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ Elts.push_back(Args.getElement(0));
+
+ if (!Method->isStatic()) {
+ // "this" pointer is always first argument.
+ QualType ThisPtr = Method->getThisType(CGM.getContext());
+
+ const CXXRecordDecl *RD = Method->getParent();
+ if (isa<ClassTemplateSpecializationDecl>(RD)) {
+ // Create pointer type directly in this case.
+ const PointerType *ThisPtrTy = cast<PointerType>(ThisPtr);
+ QualType PointeeTy = ThisPtrTy->getPointeeType();
+ unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getContext().getTargetInfo().getPointerWidth(AS);
+ uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
+ llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit);
+ llvm::DIType ThisPtrType = DBuilder.createPointerType(PointeeType, Size, Align);
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ // TODO: This and the artificial type below are misleading, the
+ // types aren't artificial the argument is, but the current
+ // metadata doesn't represent that.
+ ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ } else {
+ llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit);
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ ThisPtrType = DBuilder.createArtificialType(ThisPtrType);
+ Elts.push_back(ThisPtrType);
+ }
+ }
+
+ // Copy rest of the arguments.
+ for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
+ Elts.push_back(Args.getElement(i));
+
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+
+ return DBuilder.createSubroutineType(Unit, EltTypeArray);
+}
+
+/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
+/// inside a function.
+static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
+ if (const CXXRecordDecl *NRD = dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
+ return isFunctionLocalClass(NRD);
+ if (isa<FunctionDecl>(RD->getDeclContext()))
+ return true;
+ return false;
+}
+
+/// CreateCXXMemberFunction - A helper function to create a DISubprogram for
+/// a single member function GlobalDecl.
+llvm::DISubprogram
+CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile Unit,
+ llvm::DIType RecordTy) {
+ bool IsCtorOrDtor =
+ isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
+
+ StringRef MethodName = getFunctionName(Method);
+ llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
+
+ // Since a single ctor/dtor corresponds to multiple functions, it doesn't
+ // make sense to give a single ctor/dtor a linkage name.
+ StringRef MethodLinkageName;
+ if (!IsCtorOrDtor && !isFunctionLocalClass(Method->getParent()))
+ MethodLinkageName = CGM.getMangledName(Method);
+
+ // Get the location for the method.
+ llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation());
+ unsigned MethodLine = getLineNumber(Method->getLocation());
+
+ // Collect virtual method info.
+ llvm::DIType ContainingType;
+ unsigned Virtuality = 0;
+ unsigned VIndex = 0;
+
+ if (Method->isVirtual()) {
+ if (Method->isPure())
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
+ else
+ Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
+
+ // It doesn't make sense to give a virtual destructor a vtable index,
+ // since a single destructor has two entries in the vtable.
+ if (!isa<CXXDestructorDecl>(Method))
+ VIndex = CGM.getVTableContext().getMethodVTableIndex(Method);
+ ContainingType = RecordTy;
+ }
+
+ unsigned Flags = 0;
+ if (Method->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
+ AccessSpecifier Access = Method->getAccess();
+ if (Access == clang::AS_private)
+ Flags |= llvm::DIDescriptor::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ Flags |= llvm::DIDescriptor::FlagProtected;
+ if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DIDescriptor::FlagExplicit;
+ } else if (const CXXConversionDecl *CXXC =
+ dyn_cast<CXXConversionDecl>(Method)) {
+ if (CXXC->isExplicit())
+ Flags |= llvm::DIDescriptor::FlagExplicit;
+ }
+ if (Method->hasPrototype())
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+
+ llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
+ llvm::DISubprogram SP =
+ DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
+ MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ /* isDefinition=*/ false,
+ Virtuality, VIndex, ContainingType,
+ Flags, CGM.getLangOpts().Optimize, NULL,
+ TParamsArray);
+
+ SPCache[Method->getCanonicalDecl()] = llvm::WeakVH(SP);
+
+ return SP;
+}
+
+/// CollectCXXMemberFunctions - A helper function to collect debug info for
+/// C++ member functions. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy) {
+
+ // Since we want more than just the individual member decls if we
+ // have templated functions iterate over every declaration to gather
+ // the functions.
+ for(DeclContext::decl_iterator I = RD->decls_begin(),
+ E = RD->decls_end(); I != E; ++I) {
+ Decl *D = *I;
+ if (D->isImplicit() && !D->isUsed())
+ continue;
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
+ SE = FTD->spec_end(); SI != SE; ++SI) {
+ FunctionDecl *FD = *SI;
+ if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(FD))
+ EltTys.push_back(CreateCXXMemberFunction(M, Unit, RecordTy));
+ }
+ }
+}
+
+/// CollectCXXFriends - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy) {
+ for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
+ BE = RD->friend_end(); BI != BE; ++BI) {
+ if ((*BI)->isUnsupportedFriend())
+ continue;
+ if (TypeSourceInfo *TInfo = (*BI)->getFriendType())
+ EltTys.push_back(DBuilder.createFriend(RecordTy,
+ getOrCreateType(TInfo->getType(),
+ Unit)));
+ }
+}
+
+/// CollectCXXBases - A helper function to collect debug info for
+/// C++ base classes. This is used while creating debug info entry for
+/// a Record.
+void CGDebugInfo::
+CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy) {
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (CXXRecordDecl::base_class_const_iterator BI = RD->bases_begin(),
+ BE = RD->bases_end(); BI != BE; ++BI) {
+ unsigned BFlags = 0;
+ uint64_t BaseOffset;
+
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(BI->getType()->getAs<RecordType>()->getDecl());
+
+ if (BI->isVirtual()) {
+ // virtual base offset offset is -ve. The code generator emits dwarf
+ // expression where it expects +ve number.
+ BaseOffset =
+ 0 - CGM.getVTableContext()
+ .getVirtualBaseOffsetOffset(RD, Base).getQuantity();
+ BFlags = llvm::DIDescriptor::FlagVirtual;
+ } else
+ BaseOffset = RL.getBaseClassOffsetInBits(Base);
+ // FIXME: Inconsistent units for BaseOffset. It is in bytes when
+ // BI->isVirtual() and bits when not.
+
+ AccessSpecifier Access = BI->getAccessSpecifier();
+ if (Access == clang::AS_private)
+ BFlags |= llvm::DIDescriptor::FlagPrivate;
+ else if (Access == clang::AS_protected)
+ BFlags |= llvm::DIDescriptor::FlagProtected;
+
+ llvm::DIType DTy =
+ DBuilder.createInheritance(RecordTy,
+ getOrCreateType(BI->getType(), Unit),
+ BaseOffset, BFlags);
+ EltTys.push_back(DTy);
+ }
+}
+
+/// CollectTemplateParams - A helper function to collect template parameters.
+llvm::DIArray CGDebugInfo::
+CollectTemplateParams(const TemplateParameterList *TPList,
+ const TemplateArgumentList &TAList,
+ llvm::DIFile Unit) {
+ SmallVector<llvm::Value *, 16> TemplateParams;
+ for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
+ const TemplateArgument &TA = TAList[i];
+ const NamedDecl *ND = TPList->getParam(i);
+ if (TA.getKind() == TemplateArgument::Type) {
+ llvm::DIType TTy = getOrCreateType(TA.getAsType(), Unit);
+ llvm::DITemplateTypeParameter TTP =
+ DBuilder.createTemplateTypeParameter(TheCU, ND->getName(), TTy);
+ TemplateParams.push_back(TTP);
+ } else if (TA.getKind() == TemplateArgument::Integral) {
+ llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.createTemplateValueParameter(TheCU, ND->getName(), TTy,
+ TA.getAsIntegral()->getZExtValue());
+ TemplateParams.push_back(TVP);
+ }
+ }
+ return DBuilder.getOrCreateArray(TemplateParams);
+}
+
+/// CollectFunctionTemplateParams - A helper function to collect debug
+/// info for function template parameters.
+llvm::DIArray CGDebugInfo::
+CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
+ if (FD->getTemplatedKind() ==
+ FunctionDecl::TK_FunctionTemplateSpecialization) {
+ const TemplateParameterList *TList =
+ FD->getTemplateSpecializationInfo()->getTemplate()
+ ->getTemplateParameters();
+ return
+ CollectTemplateParams(TList, *FD->getTemplateSpecializationArgs(), Unit);
+ }
+ return llvm::DIArray();
+}
+
+/// CollectCXXTemplateParams - A helper function to collect debug info for
+/// template parameters.
+llvm::DIArray CGDebugInfo::
+CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TSpecial,
+ llvm::DIFile Unit) {
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *>
+ PU = TSpecial->getSpecializedTemplateOrPartial();
+
+ TemplateParameterList *TPList = PU.is<ClassTemplateDecl *>() ?
+ PU.get<ClassTemplateDecl *>()->getTemplateParameters() :
+ PU.get<ClassTemplatePartialSpecializationDecl *>()->getTemplateParameters();
+ const TemplateArgumentList &TAList = TSpecial->getTemplateInstantiationArgs();
+ return CollectTemplateParams(TPList, TAList, Unit);
+}
+
+/// getOrCreateVTablePtrType - Return debug info descriptor for vtable.
+llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
+ if (VTablePtrType.isValid())
+ return VTablePtrType;
+
+ ASTContext &Context = CGM.getContext();
+
+ /* Function type */
+ llvm::Value *STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DIArray SElements = DBuilder.getOrCreateArray(STy);
+ llvm::DIType SubTy = DBuilder.createSubroutineType(Unit, SElements);
+ unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
+ llvm::DIType vtbl_ptr_type = DBuilder.createPointerType(SubTy, Size, 0,
+ "__vtbl_ptr_type");
+ VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size);
+ return VTablePtrType;
+}
+
+/// getVTableName - Get vtable name for the given Class.
+StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
+ // Construct gdb compatible name name.
+ std::string Name = "_vptr$" + RD->getNameAsString();
+
+ // Copy this name on the side and use its reference.
+ char *StrPtr = DebugInfoNames.Allocate<char>(Name.length());
+ memcpy(StrPtr, Name.data(), Name.length());
+ return StringRef(StrPtr, Name.length());
+}
+
+
+/// CollectVTableInfo - If the C++ class has vtable info then insert appropriate
+/// debug info entry in EltTys vector.
+void CGDebugInfo::
+CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Value *> &EltTys) {
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+
+ // If there is a primary base then it will hold vtable info.
+ if (RL.getPrimaryBase())
+ return;
+
+ // If this class is not dynamic then there is not any vtable info to collect.
+ if (!RD->isDynamicClass())
+ return;
+
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ llvm::DIType VPTR
+ = DBuilder.createMemberType(Unit, getVTableName(RD), Unit,
+ 0, Size, 0, 0, 0,
+ getOrCreateVTablePtrType(Unit));
+ EltTys.push_back(VPTR);
+}
+
+/// getOrCreateRecordType - Emit record type's standalone debug info.
+llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
+ SourceLocation Loc) {
+ llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
+ return T;
+}
+
+/// getOrCreateInterfaceType - Emit an objective c interface type standalone
+/// debug info.
+llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
+ SourceLocation Loc) {
+ llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
+ DBuilder.retainType(T);
+ return T;
+}
+
+/// CreateType - get structure or union type.
+llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+
+ // Records and classes and unions can all be recursive. To handle them, we
+ // first generate a debug descriptor for the struct as a forward declaration.
+ // Then (if it is a definition) we go through and get debug info for all of
+ // its members. Finally, we create a descriptor for the complete type (which
+ // may refer to the forward decl if the struct is recursive) and replace all
+ // uses of the forward declaration with the final definition.
+
+ llvm::DIType FwdDecl = getOrCreateLimitedType(QualType(Ty, 0), DefUnit);
+
+ if (FwdDecl.isForwardDecl())
+ return FwdDecl;
+
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode(FwdDecl);
+
+ // Push the struct on region stack.
+ LexicalBlockStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+
+ // Add this to the completed types cache since we're completing it.
+ CompletedTypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
+
+ // Convert all the elements.
+ SmallVector<llvm::Value *, 16> EltTys;
+
+ // Note: The split of CXXDecl information here is intentional, the
+ // gdb tests will depend on a certain ordering at printout. The debug
+ // information offsets are still correct if we merge them all together
+ // though.
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ if (CXXDecl) {
+ CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectVTableInfo(CXXDecl, DefUnit, EltTys);
+ }
+
+ // Collect static variables with initializers and other fields.
+ CollectRecordStaticVars(RD, FwdDecl);
+ CollectRecordFields(RD, DefUnit, EltTys, FwdDecl);
+ llvm::DIArray TParamsArray;
+ if (CXXDecl) {
+ CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
+ CollectCXXFriends(CXXDecl, DefUnit, EltTys, FwdDecl);
+ if (const ClassTemplateSpecializationDecl *TSpecial
+ = dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ TParamsArray = CollectCXXTemplateParams(TSpecial, DefUnit);
+ }
+
+ LexicalBlockStack.pop_back();
+ RegionMap.erase(Ty->getDecl());
+
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
+ // FIXME: Magic numbers ahoy! These should be changed when we
+ // get some enums in llvm/Analysis/DebugInfo.h to refer to
+ // them.
+ if (RD->isUnion())
+ FwdDeclNode->replaceOperandWith(10, Elements);
+ else if (CXXDecl) {
+ FwdDeclNode->replaceOperandWith(10, Elements);
+ FwdDeclNode->replaceOperandWith(13, TParamsArray);
+ } else
+ FwdDeclNode->replaceOperandWith(10, Elements);
+
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDeclNode);
+ return llvm::DIType(FwdDeclNode);
+}
+
+/// CreateType - get objective-c object type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
+ llvm::DIFile Unit) {
+ // Ignore protocols.
+ return getOrCreateType(Ty->getBaseType(), Unit);
+}
+
+/// CreateType - get objective-c interface type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
+ llvm::DIFile Unit) {
+ ObjCInterfaceDecl *ID = Ty->getDecl();
+ if (!ID)
+ return llvm::DIType();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
+ unsigned Line = getLineNumber(ID->getLocation());
+ unsigned RuntimeLang = TheCU.getLanguage();
+
+ // If this is just a forward declaration return a special forward-declaration
+ // debug type since we won't be able to lay out the entire type.
+ ObjCInterfaceDecl *Def = ID->getDefinition();
+ if (!Def) {
+ llvm::DIType FwdDecl =
+ DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ ID->getName(), DefUnit, Line,
+ RuntimeLang);
+ return FwdDecl;
+ }
+
+ ID = Def;
+
+ // Bit size, align and offset of the type.
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ unsigned Flags = 0;
+ if (ID->getImplementation())
+ Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
+
+ llvm::DIType RealDecl =
+ DBuilder.createStructType(Unit, ID->getName(), DefUnit,
+ Line, Size, Align, Flags,
+ llvm::DIArray(), RuntimeLang);
+
+ // Otherwise, insert it into the CompletedTypeCache so that recursive uses
+ // will find it and we're emitting the complete type.
+ CompletedTypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
+ // Push the struct on region stack.
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode(RealDecl);
+
+ LexicalBlockStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
+
+ // Convert all the elements.
+ SmallVector<llvm::Value *, 16> EltTys;
+
+ ObjCInterfaceDecl *SClass = ID->getSuperClass();
+ if (SClass) {
+ llvm::DIType SClassTy =
+ getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ if (!SClassTy.isValid())
+ return llvm::DIType();
+
+ llvm::DIType InhTag =
+ DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
+ EltTys.push_back(InhTag);
+ }
+
+ for (ObjCContainerDecl::prop_iterator I = ID->prop_begin(),
+ E = ID->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ llvm::MDNode *PropertyNode =
+ DBuilder.createObjCProperty(PD->getName(),
+ PUnit, PLine,
+ (Getter && Getter->isImplicit()) ? "" :
+ getSelectorName(PD->getGetterName()),
+ (Setter && Setter->isImplicit()) ? "" :
+ getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ EltTys.push_back(PropertyNode);
+ }
+
+ const ASTRecordLayout &RL = CGM.getContext().getASTObjCInterfaceLayout(ID);
+ unsigned FieldNo = 0;
+ for (ObjCIvarDecl *Field = ID->all_declared_ivar_begin(); Field;
+ Field = Field->getNextIvar(), ++FieldNo) {
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+ if (!FieldTy.isValid())
+ return llvm::DIType();
+
+ StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields.
+ if (FieldName.empty())
+ continue;
+
+ // Get the location for the field.
+ llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
+ QualType FType = Field->getType();
+ uint64_t FieldSize = 0;
+ unsigned FieldAlign = 0;
+
+ if (!FType->isIncompleteArrayType()) {
+
+ // Bit size, align and offset of the type.
+ FieldSize = Field->isBitField()
+ ? Field->getBitWidthValue(CGM.getContext())
+ : CGM.getContext().getTypeSize(FType);
+ FieldAlign = CGM.getContext().getTypeAlign(FType);
+ }
+
+ // We can't know the offset of our ivar in the structure if we're using
+ // the non-fragile abi and the debugger should ignore the value anyways.
+ // Call it the FieldNo+1 due to how debuggers use the information,
+ // e.g. negating the value when it needs a lookup in the dynamic table.
+ uint64_t FieldOffset = CGM.getLangOpts().ObjCNonFragileABI ? FieldNo+1
+ : RL.getFieldOffset(FieldNo);
+
+ unsigned Flags = 0;
+ if (Field->getAccessControl() == ObjCIvarDecl::Protected)
+ Flags = llvm::DIDescriptor::FlagProtected;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Private)
+ Flags = llvm::DIDescriptor::FlagPrivate;
+
+ llvm::MDNode *PropertyNode = NULL;
+ if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
+ if (ObjCPropertyImplDecl *PImpD =
+ ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
+ if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
+ SourceLocation Loc = PD->getLocation();
+ llvm::DIFile PUnit = getOrCreateFile(Loc);
+ unsigned PLine = getLineNumber(Loc);
+ ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
+ ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
+ PropertyNode =
+ DBuilder.createObjCProperty(PD->getName(),
+ PUnit, PLine,
+ (Getter && Getter->isImplicit()) ? "" :
+ getSelectorName(PD->getGetterName()),
+ (Setter && Setter->isImplicit()) ? "" :
+ getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
+ }
+ }
+ }
+ FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit,
+ FieldLine, FieldSize, FieldAlign,
+ FieldOffset, Flags, FieldTy,
+ PropertyNode);
+ EltTys.push_back(FieldTy);
+ }
+
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
+ FwdDeclNode->replaceOperandWith(10, Elements);
+
+ LexicalBlockStack.pop_back();
+ return llvm::DIType(FwdDeclNode);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
+ llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
+ int64_t NumElems = Ty->getNumElements();
+ int64_t LowerBound = 0;
+ if (NumElems == 0)
+ // If number of elements are not known then this is an unbounded array.
+ // Use Low = 1, Hi = 0 to express such arrays.
+ LowerBound = 1;
+ else
+ --NumElems;
+
+ llvm::Value *Subscript = DBuilder.getOrCreateSubrange(LowerBound, NumElems);
+ llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return
+ DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
+ llvm::DIFile Unit) {
+ uint64_t Size;
+ uint64_t Align;
+
+
+ // FIXME: make getTypeAlign() aware of VLAs and incomplete array types
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ Size = 0;
+ Align =
+ CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ } else if (Ty->isIncompleteArrayType()) {
+ Size = 0;
+ Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ } else if (Ty->isDependentSizedArrayType() || Ty->isIncompleteType()) {
+ Size = 0;
+ Align = 0;
+ } else {
+ // Size and align of the whole array, not the element type.
+ Size = CGM.getContext().getTypeSize(Ty);
+ Align = CGM.getContext().getTypeAlign(Ty);
+ }
+
+ // Add the dimensions of the array. FIXME: This loses CV qualifiers from
+ // interior arrays, do we care? Why aren't nested arrays represented the
+ // obvious/recursive way?
+ SmallVector<llvm::Value *, 8> Subscripts;
+ QualType EltTy(Ty, 0);
+ if (Ty->isIncompleteArrayType())
+ EltTy = Ty->getElementType();
+ else {
+ while ((Ty = dyn_cast<ArrayType>(EltTy))) {
+ int64_t UpperBound = 0;
+ int64_t LowerBound = 0;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty)) {
+ if (CAT->getSize().getZExtValue())
+ UpperBound = CAT->getSize().getZExtValue() - 1;
+ } else
+ // This is an unbounded array. Use Low = 1, Hi = 0 to express such
+ // arrays.
+ LowerBound = 1;
+
+ // FIXME: Verify this is right for VLAs.
+ Subscripts.push_back(DBuilder.getOrCreateSubrange(LowerBound,
+ UpperBound));
+ EltTy = Ty->getElementType();
+ }
+ }
+
+ llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
+
+ llvm::DIType DbgTy =
+ DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit),
+ SubscriptArray);
+ return DbgTy;
+}
+
+llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type,
+ Ty, Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty,
+ llvm::DIFile Unit) {
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type,
+ Ty, Ty->getPointeeType(), Unit);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
+ llvm::DIFile U) {
+ QualType PointerDiffTy = CGM.getContext().getPointerDiffType();
+ llvm::DIType PointerDiffDITy = getOrCreateType(PointerDiffTy, U);
+
+ if (!Ty->getPointeeType()->isFunctionType()) {
+ // We have a data member pointer type.
+ return PointerDiffDITy;
+ }
+
+ // We have a member function pointer type. Treat it as a struct with two
+ // ptrdiff_t members.
+ std::pair<uint64_t, unsigned> Info = CGM.getContext().getTypeInfo(Ty);
+
+ uint64_t FieldOffset = 0;
+ llvm::Value *ElementTypes[2];
+
+ // FIXME: This should probably be a function type instead.
+ ElementTypes[0] =
+ DBuilder.createMemberType(U, "ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
+ FieldOffset += Info.first;
+
+ ElementTypes[1] =
+ DBuilder.createMemberType(U, "ptr", U, 0,
+ Info.first, Info.second, FieldOffset, 0,
+ PointerDiffDITy);
+
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(ElementTypes);
+
+ return DBuilder.createStructType(U, StringRef("test"),
+ U, 0, FieldOffset,
+ 0, 0, Elements);
+}
+
+llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
+ llvm::DIFile U) {
+ // Ignore the atomic wrapping
+ // FIXME: What is the correct representation?
+ return getOrCreateType(Ty->getValueType(), U);
+}
+
+/// CreateEnumType - get enumeration type.
+llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
+ llvm::DIFile Unit = getOrCreateFile(ED->getLocation());
+ SmallVector<llvm::Value *, 16> Enumerators;
+
+ // Create DIEnumerator elements for each enumerator.
+ for (EnumDecl::enumerator_iterator
+ Enum = ED->enumerator_begin(), EnumEnd = ED->enumerator_end();
+ Enum != EnumEnd; ++Enum) {
+ Enumerators.push_back(
+ DBuilder.createEnumerator(Enum->getName(),
+ Enum->getInitVal().getZExtValue()));
+ }
+
+ // Return a CompositeType for the enum itself.
+ llvm::DIArray EltArray = DBuilder.getOrCreateArray(Enumerators);
+
+ llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
+ uint64_t Size = 0;
+ uint64_t Align = 0;
+ if (!ED->getTypeForDecl()->isIncompleteType()) {
+ Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
+ Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ }
+ llvm::DIDescriptor EnumContext =
+ getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIType DbgTy =
+ DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
+ Size, Align, EltArray);
+ return DbgTy;
+}
+
+static QualType UnwrapTypeForDebugInfo(QualType T) {
+ do {
+ QualType LastT = T;
+ switch (T->getTypeClass()) {
+ default:
+ return T;
+ case Type::TemplateSpecialization:
+ T = cast<TemplateSpecializationType>(T)->desugar();
+ break;
+ case Type::TypeOfExpr:
+ T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
+ break;
+ case Type::TypeOf:
+ T = cast<TypeOfType>(T)->getUnderlyingType();
+ break;
+ case Type::Decltype:
+ T = cast<DecltypeType>(T)->getUnderlyingType();
+ break;
+ case Type::UnaryTransform:
+ T = cast<UnaryTransformType>(T)->getUnderlyingType();
+ break;
+ case Type::Attributed:
+ T = cast<AttributedType>(T)->getEquivalentType();
+ break;
+ case Type::Elaborated:
+ T = cast<ElaboratedType>(T)->getNamedType();
+ break;
+ case Type::Paren:
+ T = cast<ParenType>(T)->getInnerType();
+ break;
+ case Type::SubstTemplateTypeParm:
+ T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
+ break;
+ case Type::Auto:
+ T = cast<AutoType>(T)->getDeducedType();
+ break;
+ }
+
+ assert(T != LastT && "Type unwrapping failed to unwrap!");
+ if (T == LastT)
+ return T;
+ } while (true);
+}
+
+/// getType - Get the type from the cache or return null type if it doesn't exist.
+llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) {
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ // Check for existing entry.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(Ty.getAsOpaquePtr());
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ return llvm::DIType();
+}
+
+/// getCompletedTypeOrNull - Get the type from the cache or return null if it
+/// doesn't exist.
+llvm::DIType CGDebugInfo::getCompletedTypeOrNull(QualType Ty) {
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ // Check for existing entry.
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ CompletedTypeCache.find(Ty.getAsOpaquePtr());
+ if (it != CompletedTypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ return llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ return llvm::DIType();
+}
+
+
+/// getOrCreateType - Get the type from the cache or create a new
+/// one if necessary.
+llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ llvm::DIType T = getCompletedTypeOrNull(Ty);
+
+ if (T.Verify()) return T;
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateTypeNode(Ty, Unit);
+
+ llvm::DIType TC = getTypeOrNull(Ty);
+ if (TC.Verify() && TC.isForwardDecl())
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), TC));
+
+ // And update the type cache.
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+
+ if (!Res.isForwardDecl())
+ CompletedTypeCache[Ty.getAsOpaquePtr()] = Res;
+ return Res;
+}
+
+/// CreateTypeNode - Create a new debug type node.
+llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
+ // Handle qualifiers, which recursively handles what they refer to.
+ if (Ty.hasLocalQualifiers())
+ return CreateQualifiedType(Ty, Unit);
+
+ const char *Diag = 0;
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Dependent types cannot show up in debug information");
+
+ case Type::ExtVector:
+ case Type::Vector:
+ return CreateType(cast<VectorType>(Ty), Unit);
+ case Type::ObjCObjectPointer:
+ return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
+ case Type::ObjCObject:
+ return CreateType(cast<ObjCObjectType>(Ty), Unit);
+ case Type::ObjCInterface:
+ return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
+ case Type::Builtin:
+ return CreateType(cast<BuiltinType>(Ty));
+ case Type::Complex:
+ return CreateType(cast<ComplexType>(Ty));
+ case Type::Pointer:
+ return CreateType(cast<PointerType>(Ty), Unit);
+ case Type::BlockPointer:
+ return CreateType(cast<BlockPointerType>(Ty), Unit);
+ case Type::Typedef:
+ return CreateType(cast<TypedefType>(Ty), Unit);
+ case Type::Record:
+ return CreateType(cast<RecordType>(Ty));
+ case Type::Enum:
+ return CreateEnumType(cast<EnumType>(Ty)->getDecl());
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return CreateType(cast<FunctionType>(Ty), Unit);
+ case Type::ConstantArray:
+ case Type::VariableArray:
+ case Type::IncompleteArray:
+ return CreateType(cast<ArrayType>(Ty), Unit);
+
+ case Type::LValueReference:
+ return CreateType(cast<LValueReferenceType>(Ty), Unit);
+ case Type::RValueReference:
+ return CreateType(cast<RValueReferenceType>(Ty), Unit);
+
+ case Type::MemberPointer:
+ return CreateType(cast<MemberPointerType>(Ty), Unit);
+
+ case Type::Atomic:
+ return CreateType(cast<AtomicType>(Ty), Unit);
+
+ case Type::Attributed:
+ case Type::TemplateSpecialization:
+ case Type::Elaborated:
+ case Type::Paren:
+ case Type::SubstTemplateTypeParm:
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::Auto:
+ llvm_unreachable("type should have been unwrapped!");
+ }
+
+ assert(Diag && "Fall through without a diagnostic?");
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "debug information for %0 is not yet supported");
+ CGM.getDiags().Report(DiagID)
+ << Diag;
+ return llvm::DIType();
+}
+
+/// getOrCreateLimitedType - Get the type from the cache or create a new
+/// limited type if necessary.
+llvm::DIType CGDebugInfo::getOrCreateLimitedType(QualType Ty,
+ llvm::DIFile Unit) {
+ if (Ty.isNull())
+ return llvm::DIType();
+
+ // Unwrap the type as needed for debug information.
+ Ty = UnwrapTypeForDebugInfo(Ty);
+
+ llvm::DIType T = getTypeOrNull(Ty);
+
+ // We may have cached a forward decl when we could have created
+ // a non-forward decl. Go ahead and create a non-forward decl
+ // now.
+ if (T.Verify() && !T.isForwardDecl()) return T;
+
+ // Otherwise create the type.
+ llvm::DIType Res = CreateLimitedTypeNode(Ty, Unit);
+
+ if (T.Verify() && T.isForwardDecl())
+ ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(), T));
+
+ // And update the type cache.
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
+ return Res;
+}
+
+// TODO: Currently used for context chains when limiting debug info.
+llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
+
+ // Get overall information about the record type for the debug info.
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
+ StringRef RDName = RD->getName();
+
+ llvm::DIDescriptor RDContext;
+ if (CGM.getCodeGenOpts().LimitDebugInfo)
+ RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
+ else
+ RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+
+ // If this is just a forward declaration, construct an appropriately
+ // marked node and just return it.
+ if (!RD->getDefinition())
+ return createRecordFwdDecl(RD, RDContext);
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ llvm::TrackingVH<llvm::MDNode> RealDecl;
+
+ if (RD->isUnion())
+ RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, llvm::DIArray());
+ else if (CXXDecl) {
+ RDName = getClassName(RD);
+
+ // FIXME: This could be a struct type giving a default visibility different
+ // than C++ class type, but needs llvm metadata changes first.
+ RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, 0, llvm::DIType(),
+ llvm::DIArray(), llvm::DIType(),
+ llvm::DIArray());
+ } else
+ RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
+ Size, Align, 0, llvm::DIArray());
+
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = llvm::DIType(RealDecl);
+
+ if (CXXDecl) {
+ // A class's primary base or the class itself contains the vtable.
+ llvm::MDNode *ContainingType = NULL;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
+ // Seek non virtual primary base root.
+ while (1) {
+ const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
+ const CXXRecordDecl *PBT = BRL.getPrimaryBase();
+ if (PBT && !BRL.isPrimaryBaseVirtual())
+ PBase = PBT;
+ else
+ break;
+ }
+ ContainingType =
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), DefUnit);
+ }
+ else if (CXXDecl->isDynamicClass())
+ ContainingType = RealDecl;
+
+ RealDecl->replaceOperandWith(12, ContainingType);
+ }
+ return llvm::DIType(RealDecl);
+}
+
+/// CreateLimitedTypeNode - Create a new debug type node, but only forward
+/// declare composite types that haven't been processed yet.
+llvm::DIType CGDebugInfo::CreateLimitedTypeNode(QualType Ty,llvm::DIFile Unit) {
+
+ // Work out details of type.
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+ #include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Dependent types cannot show up in debug information");
+
+ case Type::Record:
+ return CreateLimitedType(cast<RecordType>(Ty));
+ default:
+ return CreateTypeNode(Ty, Unit);
+ }
+}
+
+/// CreateMemberType - Create new member and increase Offset by FType's size.
+llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
+ StringRef Name,
+ uint64_t *Offset) {
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
+ unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
+ llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0,
+ FieldSize, FieldAlign,
+ *Offset, 0, FieldTy);
+ *Offset += FieldSize;
+ return Ty;
+}
+
+/// getFunctionDeclaration - Return debug info descriptor to describe method
+/// declaration for the given method definition.
+llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return llvm::DISubprogram();
+
+ // Setup context.
+ getContextDescriptor(cast<Decl>(D->getDeclContext()));
+
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ MI = SPCache.find(FD->getCanonicalDecl());
+ if (MI != SPCache.end()) {
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
+ if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
+ return SP;
+ }
+
+ for (FunctionDecl::redecl_iterator I = FD->redecls_begin(),
+ E = FD->redecls_end(); I != E; ++I) {
+ const FunctionDecl *NextFD = *I;
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ MI = SPCache.find(NextFD->getCanonicalDecl());
+ if (MI != SPCache.end()) {
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(&*MI->second));
+ if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
+ return SP;
+ }
+ }
+ return llvm::DISubprogram();
+}
+
+// getOrCreateFunctionType - Construct DIType. If it is a c++ method, include
+// implicit parameter "this".
+llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl * D,
+ QualType FnType,
+ llvm::DIFile F) {
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ return getOrCreateMethodType(Method, F);
+ if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
+ // Add "self" and "_cmd"
+ SmallVector<llvm::Value *, 16> Elts;
+
+ // First element is always return type. For 'void' functions it is NULL.
+ Elts.push_back(getOrCreateType(OMethod->getResultType(), F));
+ // "self" pointer is always first argument.
+ Elts.push_back(getOrCreateType(OMethod->getSelfDecl()->getType(), F));
+ // "cmd" pointer is always second argument.
+ Elts.push_back(getOrCreateType(OMethod->getCmdDecl()->getType(), F));
+ // Get rest of the arguments.
+ for (ObjCMethodDecl::param_const_iterator PI = OMethod->param_begin(),
+ PE = OMethod->param_end(); PI != PE; ++PI)
+ Elts.push_back(getOrCreateType((*PI)->getType(), F));
+
+ llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ return DBuilder.createSubroutineType(F, EltTypeArray);
+ }
+ return getOrCreateType(FnType, F);
+}
+
+/// EmitFunctionStart - Constructs the debug code for entering a function.
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
+ llvm::Function *Fn,
+ CGBuilderTy &Builder) {
+
+ StringRef Name;
+ StringRef LinkageName;
+
+ FnBeginRegionCount.push_back(LexicalBlockStack.size());
+
+ const Decl *D = GD.getDecl();
+ // Use the location of the declaration.
+ SourceLocation Loc = D->getLocation();
+
+ unsigned Flags = 0;
+ llvm::DIFile Unit = getOrCreateFile(Loc);
+ llvm::DIDescriptor FDContext(Unit);
+ llvm::DIArray TParamsArray;
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // If there is a DISubprogram for this function available then use it.
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
+ FI = SPCache.find(FD->getCanonicalDecl());
+ if (FI != SPCache.end()) {
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(&*FI->second));
+ if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
+ llvm::MDNode *SPN = SP;
+ LexicalBlockStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
+ return;
+ }
+ }
+ Name = getFunctionName(FD);
+ // Use mangled name as linkage name for c/c++ functions.
+ if (FD->hasPrototype()) {
+ LinkageName = CGM.getMangledName(GD);
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ }
+ if (LinkageName == Name)
+ LinkageName = StringRef();
+
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
+ else if (const RecordDecl *RDecl =
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
+ FDContext = getContextDescriptor(cast<Decl>(RDecl->getDeclContext()));
+
+ // Collect template parameters.
+ TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ } else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
+ Name = getObjCMethodName(OMD);
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ } else {
+ // Use llvm function name.
+ Name = Fn->getName();
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ }
+ if (!Name.empty() && Name[0] == '\01')
+ Name = Name.substr(1);
+
+ unsigned LineNo = getLineNumber(Loc);
+ if (D->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
+
+ llvm::DISubprogram SPDecl = getFunctionDeclaration(D);
+ llvm::DISubprogram SP =
+ DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
+ LineNo, getOrCreateFunctionType(D, FnType, Unit),
+ Fn->hasInternalLinkage(), true/*definition*/,
+ getLineNumber(CurLoc),
+ Flags, CGM.getLangOpts().Optimize, Fn,
+ TParamsArray, SPDecl);
+
+ // Push function on region stack.
+ llvm::MDNode *SPN = SP;
+ LexicalBlockStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
+}
+
+/// EmitLocation - Emit metadata to indicate a change in line/column
+/// information in the source file.
+void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
+
+ // Update our current location
+ setLocation(Loc);
+
+ if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
+
+ // Don't bother if things are the same as last time.
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ if (CurLoc == PrevLoc ||
+ SM.getExpansionLoc(CurLoc) == SM.getExpansionLoc(PrevLoc))
+ // New Builder may not be in sync with CGDebugInfo.
+ if (!Builder.getCurrentDebugLocation().isUnknown())
+ return;
+
+ // Update last state.
+ PrevLoc = CurLoc;
+
+ llvm::MDNode *Scope = LexicalBlockStack.back();
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(CurLoc),
+ getColumnNumber(CurLoc),
+ Scope));
+}
+
+/// CreateLexicalBlock - Creates a new lexical block node and pushes it on
+/// the stack.
+void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
+ llvm::DIDescriptor D =
+ DBuilder.createLexicalBlock(LexicalBlockStack.empty() ?
+ llvm::DIDescriptor() :
+ llvm::DIDescriptor(LexicalBlockStack.back()),
+ getOrCreateFile(CurLoc),
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
+ llvm::MDNode *DN = D;
+ LexicalBlockStack.push_back(DN);
+}
+
+/// EmitLexicalBlockStart - Constructs the debug code for entering a declarative
+/// region - beginning of a DW_TAG_lexical_block.
+void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc) {
+ // Set our current location.
+ setLocation(Loc);
+
+ // Create a new lexical block and push it on the stack.
+ CreateLexicalBlock(Loc);
+
+ // Emit a line table change for the current location inside the new scope.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(Loc),
+ getColumnNumber(Loc),
+ LexicalBlockStack.back()));
+}
+
+/// EmitLexicalBlockEnd - Constructs the debug code for exiting a declarative
+/// region - end of a DW_TAG_lexical_block.
+void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ // Provide an entry in the line table for the end of the block.
+ EmitLocation(Builder, Loc);
+
+ LexicalBlockStack.pop_back();
+}
+
+/// EmitFunctionEnd - Constructs the debug code for exiting a function.
+void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+ unsigned RCount = FnBeginRegionCount.back();
+ assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch");
+
+ // Pop all regions for this function.
+ while (LexicalBlockStack.size() != RCount)
+ EmitLexicalBlockEnd(Builder, CurLoc);
+ FnBeginRegionCount.pop_back();
+}
+
+// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+// See BuildByRefType.
+llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
+ uint64_t *XOffset) {
+
+ SmallVector<llvm::Value *, 5> EltTys;
+ QualType FType;
+ uint64_t FieldSize, FieldOffset;
+ unsigned FieldAlign;
+
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ QualType Type = VD->getType();
+
+ FieldOffset = 0;
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__isa", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__forwarding", &FieldOffset));
+ FType = CGM.getContext().IntTy;
+ EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__size", &FieldOffset));
+
+ bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type);
+ if (HasCopyAndDispose) {
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
+ &FieldOffset));
+ EltTys.push_back(CreateMemberType(Unit, FType, "__destroy_helper",
+ &FieldOffset));
+ }
+
+ CharUnits Align = CGM.getContext().getDeclAlign(VD);
+ if (Align > CGM.getContext().toCharUnitsFromBits(
+ CGM.getContext().getTargetInfo().getPointerAlign(0))) {
+ CharUnits FieldOffsetInBytes
+ = CGM.getContext().toCharUnitsFromBits(FieldOffset);
+ CharUnits AlignedOffsetInBytes
+ = FieldOffsetInBytes.RoundUpToAlignment(Align);
+ CharUnits NumPaddingBytes
+ = AlignedOffsetInBytes - FieldOffsetInBytes;
+
+ if (NumPaddingBytes.isPositive()) {
+ llvm::APInt pad(32, NumPaddingBytes.getQuantity());
+ FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
+ pad, ArrayType::Normal, 0);
+ EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
+ }
+ }
+
+ FType = Type;
+ llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
+ FieldSize = CGM.getContext().getTypeSize(FType);
+ FieldAlign = CGM.getContext().toBits(Align);
+
+ *XOffset = FieldOffset;
+ FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit,
+ 0, FieldSize, FieldAlign,
+ FieldOffset, 0, FieldTy);
+ EltTys.push_back(FieldTy);
+ FieldOffset += FieldSize;
+
+ llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
+
+ unsigned Flags = llvm::DIDescriptor::FlagBlockByrefStruct;
+
+ return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
+ Elements);
+}
+
+/// EmitDeclare - Emit local variable declaration debug info.
+void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
+ llvm::Value *Storage,
+ unsigned ArgNo, CGBuilderTy &Builder) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType Ty;
+ uint64_t XOffset = 0;
+ if (VD->hasAttr<BlocksAttr>())
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // If there is not any debug info for type then do not emit debug info
+ // for this variable.
+ if (!Ty)
+ return;
+
+ if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage)) {
+ // If Storage is an aggregate returned as 'sret' then let debugger know
+ // about this.
+ if (Arg->hasStructRetAttr())
+ Ty = DBuilder.createReferenceType(Ty);
+ else if (CXXRecordDecl *Record = VD->getType()->getAsCXXRecordDecl()) {
+ // If an aggregate variable has non trivial destructor or non trivial copy
+ // constructor than it is pass indirectly. Let debug info know about this
+ // by using reference of the aggregate type as a argument type.
+ if (!Record->hasTrivialCopyConstructor() ||
+ !Record->hasTrivialDestructor())
+ Ty = DBuilder.createReferenceType(Ty);
+ }
+ }
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+ unsigned Flags = 0;
+ if (VD->isImplicit())
+ Flags |= llvm::DIDescriptor::FlagArtificial;
+ llvm::MDNode *Scope = LexicalBlockStack.back();
+
+ StringRef Name = VD->getName();
+ if (!Name.empty()) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ CharUnits offset = CharUnits::fromQuantity(32);
+ SmallVector<llvm::Value *, 9> addr;
+ llvm::Type *Int64Ty = CGM.Int64Ty;
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of __forwarding field
+ offset = CGM.getContext().toCharUnitsFromBits(
+ CGM.getContext().getTargetInfo().getPointerWidth(0));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of x field
+ offset = CGM.getContext().toCharUnitsFromBits(XOffset);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.createComplexVariable(Tag,
+ llvm::DIDescriptor(Scope),
+ VD->getName(), Unit, Line, Ty,
+ addr, ArgNo);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
+ Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags, ArgNo);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ return;
+ }
+
+ // If VD is an anonymous union then Storage represents value for
+ // all union fields.
+ if (const RecordType *RT = dyn_cast<RecordType>(VD->getType())) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (RD->isUnion()) {
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end();
+ I != E; ++I) {
+ FieldDecl *Field = *I;
+ llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
+ StringRef FieldName = Field->getName();
+
+ // Ignore unnamed fields. Do not ignore unnamed records.
+ if (FieldName.empty() && !isa<RecordType>(Field->getType()))
+ continue;
+
+ // Use VarDecl's Tag, Scope and Line number.
+ llvm::DIVariable D =
+ DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
+ FieldName, Unit, Line, FieldTy,
+ CGM.getLangOpts().Optimize, Flags,
+ ArgNo);
+
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
+ }
+ }
+ }
+}
+
+void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
+ llvm::Value *Storage,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
+}
+
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo) {
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+
+ if (Builder.GetInsertBlock() == 0)
+ return;
+
+ bool isByRef = VD->hasAttr<BlocksAttr>();
+
+ uint64_t XOffset = 0;
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ llvm::DIType Ty;
+ if (isByRef)
+ Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
+
+ // Get location information.
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
+
+ const llvm::TargetData &target = CGM.getTargetData();
+
+ CharUnits offset = CharUnits::fromQuantity(
+ target.getStructLayout(blockInfo.StructureType)
+ ->getElementOffset(blockInfo.getCapture(VD).getIndex()));
+
+ SmallVector<llvm::Value *, 9> addr;
+ llvm::Type *Int64Ty = CGM.Int64Ty;
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ if (isByRef) {
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of __forwarding field
+ offset = CGM.getContext()
+ .toCharUnitsFromBits(target.getPointerSizeInBits());
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ // offset of x field
+ offset = CGM.getContext().toCharUnitsFromBits(XOffset);
+ addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ }
+
+ // Create the descriptor for the variable.
+ llvm::DIVariable D =
+ DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
+ llvm::DIDescriptor(LexicalBlockStack.back()),
+ VD->getName(), Unit, Line, Ty, addr);
+ // Insert an llvm.dbg.declare into the current block.
+ llvm::Instruction *Call =
+ DBuilder.insertDeclare(Storage, D, Builder.GetInsertPoint());
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column,
+ LexicalBlockStack.back()));
+}
+
+/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+/// variable declaration.
+void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
+ unsigned ArgNo,
+ CGBuilderTy &Builder) {
+ EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
+}
+
+namespace {
+ struct BlockLayoutChunk {
+ uint64_t OffsetInBits;
+ const BlockDecl::Capture *Capture;
+ };
+ bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
+ return l.OffsetInBits < r.OffsetInBits;
+ }
+}
+
+void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
+ llvm::Value *addr,
+ CGBuilderTy &Builder) {
+ ASTContext &C = CGM.getContext();
+ const BlockDecl *blockDecl = block.getBlockDecl();
+
+ // Collect some general information about the block's location.
+ SourceLocation loc = blockDecl->getCaretLocation();
+ llvm::DIFile tunit = getOrCreateFile(loc);
+ unsigned line = getLineNumber(loc);
+ unsigned column = getColumnNumber(loc);
+
+ // Build the debug-info type for the block literal.
+ getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
+
+ const llvm::StructLayout *blockLayout =
+ CGM.getTargetData().getStructLayout(block.StructureType);
+
+ SmallVector<llvm::Value*, 16> fields;
+ fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(0),
+ tunit, tunit));
+ fields.push_back(createFieldType("__flags", C.IntTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(1),
+ tunit, tunit));
+ fields.push_back(createFieldType("__reserved", C.IntTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(2),
+ tunit, tunit));
+ fields.push_back(createFieldType("__FuncPtr", C.VoidPtrTy, 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(3),
+ tunit, tunit));
+ fields.push_back(createFieldType("__descriptor",
+ C.getPointerType(block.NeedsCopyDispose ?
+ C.getBlockDescriptorExtendedType() :
+ C.getBlockDescriptorType()),
+ 0, loc, AS_public,
+ blockLayout->getElementOffsetInBits(4),
+ tunit, tunit));
+
+ // We want to sort the captures by offset, not because DWARF
+ // requires this, but because we're paranoid about debuggers.
+ SmallVector<BlockLayoutChunk, 8> chunks;
+
+ // 'this' capture.
+ if (blockDecl->capturesCXXThis()) {
+ BlockLayoutChunk chunk;
+ chunk.OffsetInBits =
+ blockLayout->getElementOffsetInBits(block.CXXThisIndex);
+ chunk.Capture = 0;
+ chunks.push_back(chunk);
+ }
+
+ // Variable captures.
+ for (BlockDecl::capture_const_iterator
+ i = blockDecl->capture_begin(), e = blockDecl->capture_end();
+ i != e; ++i) {
+ const BlockDecl::Capture &capture = *i;
+ const VarDecl *variable = capture.getVariable();
+ const CGBlockInfo::Capture &captureInfo = block.getCapture(variable);
+
+ // Ignore constant captures.
+ if (captureInfo.isConstant())
+ continue;
+
+ BlockLayoutChunk chunk;
+ chunk.OffsetInBits =
+ blockLayout->getElementOffsetInBits(captureInfo.getIndex());
+ chunk.Capture = &capture;
+ chunks.push_back(chunk);
+ }
+
+ // Sort by offset.
+ llvm::array_pod_sort(chunks.begin(), chunks.end());
+
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator
+ i = chunks.begin(), e = chunks.end(); i != e; ++i) {
+ uint64_t offsetInBits = i->OffsetInBits;
+ const BlockDecl::Capture *capture = i->Capture;
+
+ // If we have a null capture, this must be the C++ 'this' capture.
+ if (!capture) {
+ const CXXMethodDecl *method =
+ cast<CXXMethodDecl>(blockDecl->getNonClosureContext());
+ QualType type = method->getThisType(C);
+
+ fields.push_back(createFieldType("this", type, 0, loc, AS_public,
+ offsetInBits, tunit, tunit));
+ continue;
+ }
+
+ const VarDecl *variable = capture->getVariable();
+ StringRef name = variable->getName();
+
+ llvm::DIType fieldType;
+ if (capture->isByRef()) {
+ std::pair<uint64_t,unsigned> ptrInfo = C.getTypeInfo(C.VoidPtrTy);
+
+ // FIXME: this creates a second copy of this type!
+ uint64_t xoffset;
+ fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
+ fieldType = DBuilder.createPointerType(fieldType, ptrInfo.first);
+ fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
+ ptrInfo.first, ptrInfo.second,
+ offsetInBits, 0, fieldType);
+ } else {
+ fieldType = createFieldType(name, variable->getType(), 0,
+ loc, AS_public, offsetInBits, tunit, tunit);
+ }
+ fields.push_back(fieldType);
+ }
+
+ SmallString<36> typeName;
+ llvm::raw_svector_ostream(typeName)
+ << "__block_literal_" << CGM.getUniqueBlockCount();
+
+ llvm::DIArray fieldsArray = DBuilder.getOrCreateArray(fields);
+
+ llvm::DIType type =
+ DBuilder.createStructType(tunit, typeName.str(), tunit, line,
+ CGM.getContext().toBits(block.BlockSize),
+ CGM.getContext().toBits(block.BlockAlign),
+ 0, fieldsArray);
+ type = DBuilder.createPointerType(type, CGM.PointerWidthInBits);
+
+ // Get overall information about the block.
+ unsigned flags = llvm::DIDescriptor::FlagArtificial;
+ llvm::MDNode *scope = LexicalBlockStack.back();
+ StringRef name = ".block_descriptor";
+
+ // Create the descriptor for the parameter.
+ llvm::DIVariable debugVar =
+ DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
+ llvm::DIDescriptor(scope),
+ name, tunit, line, type,
+ CGM.getLangOpts().Optimize, flags,
+ cast<llvm::Argument>(addr)->getArgNo() + 1);
+
+ // Insert an llvm.dbg.value into the current block.
+ llvm::Instruction *declare =
+ DBuilder.insertDbgValueIntrinsic(addr, 0, debugVar,
+ Builder.GetInsertBlock());
+ declare->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
+}
+
+/// EmitGlobalVariable - Emit information about a global variable.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ const VarDecl *D) {
+ // Create global variable debug descriptor.
+ llvm::DIFile Unit = getOrCreateFile(D->getLocation());
+ unsigned LineNo = getLineNumber(D->getLocation());
+
+ setLocation(D->getLocation());
+
+ QualType T = D->getType();
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+ StringRef DeclName = D->getName();
+ StringRef LinkageName;
+ if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext())
+ && !isa<ObjCMethodDecl>(D->getDeclContext()))
+ LinkageName = Var->getName();
+ if (LinkageName == DeclName)
+ LinkageName = StringRef();
+ llvm::DIDescriptor DContext =
+ getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()));
+ DBuilder.createStaticVariable(DContext, DeclName, LinkageName,
+ Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var);
+}
+
+/// EmitGlobalVariable - Emit information about an objective-c interface.
+void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
+ ObjCInterfaceDecl *ID) {
+ // Create global variable debug descriptor.
+ llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
+ unsigned LineNo = getLineNumber(ID->getLocation());
+
+ StringRef Name = ID->getName();
+
+ QualType T = CGM.getContext().getObjCInterfaceType(ID);
+ if (T->isIncompleteArrayType()) {
+
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APSInt ConstVal(32);
+
+ ConstVal = 1;
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ DBuilder.createGlobalVariable(Name, Unit, LineNo,
+ getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var);
+}
+
+/// EmitGlobalVariable - Emit global variable's debug info.
+void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
+ llvm::Constant *Init) {
+ // Create the descriptor for the variable.
+ llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ StringRef Name = VD->getName();
+ llvm::DIType Ty = getOrCreateType(VD->getType(), Unit);
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(VD)) {
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(ECD->getDeclContext()))
+ Ty = CreateEnumType(ED);
+ }
+ // Do not use DIGlobalVariable for enums.
+ if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
+ return;
+ DBuilder.createStaticVariable(Unit, Name, Name, Unit,
+ getLineNumber(VD->getLocation()),
+ Ty, true, Init);
+}
+
+/// getOrCreateNamesSpace - Return namespace descriptor for the given
+/// namespace decl.
+llvm::DINameSpace
+CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
+ NameSpaceCache.find(NSDecl);
+ if (I != NameSpaceCache.end())
+ return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
+
+ unsigned LineNo = getLineNumber(NSDecl->getLocation());
+ llvm::DIFile FileD = getOrCreateFile(NSDecl->getLocation());
+ llvm::DIDescriptor Context =
+ getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()));
+ llvm::DINameSpace NS =
+ DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
+ NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
+ return NS;
+}
+
+void CGDebugInfo::finalize(void) {
+ for (std::vector<std::pair<void *, llvm::WeakVH> >::const_iterator VI
+ = ReplaceMap.begin(), VE = ReplaceMap.end(); VI != VE; ++VI) {
+ llvm::DIType Ty, RepTy;
+ // Verify that the debug info still exists.
+ if (&*VI->second)
+ Ty = llvm::DIType(cast<llvm::MDNode>(VI->second));
+
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
+ TypeCache.find(VI->first);
+ if (it != TypeCache.end()) {
+ // Verify that the debug info still exists.
+ if (&*it->second)
+ RepTy = llvm::DIType(cast<llvm::MDNode>(it->second));
+ }
+
+ if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify()) {
+ Ty.replaceAllUsesWith(RepTy);
+ }
+ }
+ DBuilder.finalize();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
new file mode 100644
index 0000000..ec7705c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDebugInfo.h
@@ -0,0 +1,322 @@
+//===--- CGDebugInfo.h - DebugInfo for LLVM CodeGen -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the source level debug info generator for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
+#define CLANG_CODEGEN_CGDEBUGINFO_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/DIBuilder.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Allocator.h"
+
+#include "CGBuilder.h"
+
+namespace llvm {
+ class MDNode;
+}
+
+namespace clang {
+ class VarDecl;
+ class ObjCInterfaceDecl;
+ class ClassTemplateSpecializationDecl;
+ class GlobalDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CodeGenFunction;
+ class CGBlockInfo;
+
+/// CGDebugInfo - This class gathers all debug information during compilation
+/// and is responsible for emitting to llvm globals or pass directly to
+/// the backend.
+class CGDebugInfo {
+ CodeGenModule &CGM;
+ llvm::DIBuilder DBuilder;
+ llvm::DICompileUnit TheCU;
+ SourceLocation CurLoc, PrevLoc;
+ llvm::DIType VTablePtrType;
+
+ /// TypeCache - Cache of previously constructed Types.
+ llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
+
+ /// CompleteTypeCache - Cache of previously constructed complete RecordTypes.
+ llvm::DenseMap<void *, llvm::WeakVH> CompletedTypeCache;
+
+ /// ReplaceMap - Cache of forward declared types to RAUW at the end of
+ /// compilation.
+ std::vector<std::pair<void *, llvm::WeakVH> >ReplaceMap;
+
+ bool BlockLiteralGenericSet;
+ llvm::DIType BlockLiteralGeneric;
+
+ // LexicalBlockStack - Keep track of our current nested lexical block.
+ std::vector<llvm::TrackingVH<llvm::MDNode> > LexicalBlockStack;
+ llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
+ // FnBeginRegionCount - Keep track of LexicalBlockStack counter at the
+ // beginning of a function. This is used to pop unbalanced regions at
+ // the end of a function.
+ std::vector<unsigned> FnBeginRegionCount;
+
+ /// DebugInfoNames - This is a storage for names that are
+ /// constructed on demand. For example, C++ destructors, C++ operators etc..
+ llvm::BumpPtrAllocator DebugInfoNames;
+ StringRef CWDName;
+
+ llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
+
+ /// Helper functions for getOrCreateType.
+ llvm::DIType CreateType(const BuiltinType *Ty);
+ llvm::DIType CreateType(const ComplexType *Ty);
+ llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
+ llvm::DIFile F);
+ llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const RecordType *Ty);
+ llvm::DIType CreateLimitedType(const RecordType *Ty);
+ llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const RValueReferenceType *Ty, llvm::DIFile Unit);
+ llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const AtomicType *Ty, llvm::DIFile F);
+ llvm::DIType CreateEnumType(const EnumDecl *ED);
+ llvm::DIType getTypeOrNull(const QualType);
+ llvm::DIType getCompletedTypeOrNull(const QualType);
+ llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile F);
+ llvm::DIType getOrCreateFunctionType(const Decl *D, QualType FnType,
+ llvm::DIFile F);
+ llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
+ llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N);
+ llvm::DIType CreatePointeeType(QualType PointeeTy, llvm::DIFile F);
+ llvm::DIType CreatePointerLikeType(unsigned Tag,
+ const Type *Ty, QualType PointeeTy,
+ llvm::DIFile F);
+
+ llvm::DISubprogram CreateCXXMemberFunction(const CXXMethodDecl *Method,
+ llvm::DIFile F,
+ llvm::DIType RecordTy);
+
+ void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &E,
+ llvm::DIType T);
+
+ void CollectCXXFriends(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy);
+
+ void CollectCXXBases(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &EltTys,
+ llvm::DIType RecordTy);
+
+ llvm::DIArray
+ CollectTemplateParams(const TemplateParameterList *TPList,
+ const TemplateArgumentList &TAList,
+ llvm::DIFile Unit);
+ llvm::DIArray
+ CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit);
+ llvm::DIArray
+ CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS,
+ llvm::DIFile F);
+
+ llvm::DIType createFieldType(StringRef name, QualType type,
+ uint64_t sizeInBitsOverride, SourceLocation loc,
+ AccessSpecifier AS, uint64_t offsetInBits,
+ llvm::DIFile tunit,
+ llvm::DIDescriptor scope);
+ void CollectRecordStaticVars(const RecordDecl *, llvm::DIType);
+ void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &E,
+ llvm::DIType RecordTy);
+
+ void CollectVTableInfo(const CXXRecordDecl *Decl,
+ llvm::DIFile F,
+ SmallVectorImpl<llvm::Value *> &EltTys);
+
+ // CreateLexicalBlock - Create a new lexical block node and push it on
+ // the stack.
+ void CreateLexicalBlock(SourceLocation Loc);
+
+public:
+ CGDebugInfo(CodeGenModule &CGM);
+ ~CGDebugInfo();
+
+ void finalize(void);
+
+ /// setLocation - Update the current source location. If \arg loc is
+ /// invalid it is ignored.
+ void setLocation(SourceLocation Loc);
+
+ /// EmitLocation - Emit metadata to indicate a change in line/column
+ /// information in the source file.
+ void EmitLocation(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// EmitFunctionStart - Emit a call to llvm.dbg.function.start to indicate
+ /// start of a new function.
+ void EmitFunctionStart(GlobalDecl GD, QualType FnType,
+ llvm::Function *Fn, CGBuilderTy &Builder);
+
+ /// EmitFunctionEnd - Constructs the debug code for exiting a function.
+ void EmitFunctionEnd(CGBuilderTy &Builder);
+
+ /// EmitLexicalBlockStart - Emit metadata to indicate the beginning of a
+ /// new lexical block and push the block onto the stack.
+ void EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// EmitLexicalBlockEnd - Emit metadata to indicate the end of a new lexical
+ /// block and pop the current block.
+ void EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc);
+
+ /// EmitDeclareOfAutoVariable - Emit call to llvm.dbg.declare for an automatic
+ /// variable declaration.
+ void EmitDeclareOfAutoVariable(const VarDecl *Decl, llvm::Value *AI,
+ CGBuilderTy &Builder);
+
+ /// EmitDeclareOfBlockDeclRefVariable - Emit call to llvm.dbg.declare for an
+ /// imported variable declaration in a block.
+ void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
+ llvm::Value *storage,
+ CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo);
+
+ /// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
+ /// variable declaration.
+ void EmitDeclareOfArgVariable(const VarDecl *Decl, llvm::Value *AI,
+ unsigned ArgNo, CGBuilderTy &Builder);
+
+ /// EmitDeclareOfBlockLiteralArgVariable - Emit call to
+ /// llvm.dbg.declare for the block-literal argument to a block
+ /// invocation function.
+ void EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
+ llvm::Value *addr,
+ CGBuilderTy &Builder);
+
+ /// EmitGlobalVariable - Emit information about a global variable.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+
+ /// EmitGlobalVariable - Emit information about an objective-c interface.
+ void EmitGlobalVariable(llvm::GlobalVariable *GV, ObjCInterfaceDecl *Decl);
+
+ /// EmitGlobalVariable - Emit global variable's debug info.
+ void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
+
+ /// getOrCreateRecordType - Emit record type's standalone debug info.
+ llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L);
+
+ /// getOrCreateInterfaceType - Emit an objective c interface type standalone
+ /// debug info.
+ llvm::DIType getOrCreateInterfaceType(QualType Ty,
+ SourceLocation Loc);
+
+private:
+ /// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
+ void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
+ unsigned ArgNo, CGBuilderTy &Builder);
+
+ // EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+ // See BuildByRefType.
+ llvm::DIType EmitTypeForVarWithBlocksAttr(const ValueDecl *VD,
+ uint64_t *OffSet);
+
+ /// getContextDescriptor - Get context info for the decl.
+ llvm::DIDescriptor getContextDescriptor(const Decl *Decl);
+
+ /// createRecordFwdDecl - Create a forward decl for a RecordType in a given
+ /// context.
+ llvm::DIType createRecordFwdDecl(const RecordDecl *, llvm::DIDescriptor);
+
+ /// createContextChain - Create a set of decls for the context chain.
+ llvm::DIDescriptor createContextChain(const Decl *Decl);
+
+ /// getCurrentDirname - Return current directory name.
+ StringRef getCurrentDirname();
+
+ /// CreateCompileUnit - Create new compile unit.
+ void CreateCompileUnit();
+
+ /// getOrCreateFile - Get the file debug info descriptor for the input
+ /// location.
+ llvm::DIFile getOrCreateFile(SourceLocation Loc);
+
+ /// getOrCreateMainFile - Get the file info for main compile unit.
+ llvm::DIFile getOrCreateMainFile();
+
+ /// getOrCreateType - Get the type from the cache or create a new type if
+ /// necessary.
+ llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
+
+ /// getOrCreateLimitedType - Get the type from the cache or create a new
+ /// partial type if necessary.
+ llvm::DIType getOrCreateLimitedType(QualType Ty, llvm::DIFile F);
+
+ /// CreateTypeNode - Create type metadata for a source language type.
+ llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile F);
+
+ /// CreateLimitedTypeNode - Create type metadata for a source language
+ /// type, but only partial types for records.
+ llvm::DIType CreateLimitedTypeNode(QualType Ty, llvm::DIFile F);
+
+ /// CreateMemberType - Create new member and increase Offset by FType's size.
+ llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
+ StringRef Name, uint64_t *Offset);
+
+ /// getFunctionDeclaration - Return debug info descriptor to describe method
+ /// declaration for the given method definition.
+ llvm::DISubprogram getFunctionDeclaration(const Decl *D);
+
+ /// getFunctionName - Get function name for the given FunctionDecl. If the
+ /// name is constructred on demand (e.g. C++ destructor) then the name
+ /// is stored on the side.
+ StringRef getFunctionName(const FunctionDecl *FD);
+
+ /// getObjCMethodName - Returns the unmangled name of an Objective-C method.
+ /// This is the display name for the debugging info.
+ StringRef getObjCMethodName(const ObjCMethodDecl *FD);
+
+ /// getSelectorName - Return selector name. This is used for debugging
+ /// info.
+ StringRef getSelectorName(Selector S);
+
+ /// getClassName - Get class name including template argument list.
+ StringRef getClassName(const RecordDecl *RD);
+
+ /// getVTableName - Get vtable name for the given Class.
+ StringRef getVTableName(const CXXRecordDecl *Decl);
+
+ /// getLineNumber - Get line number for the location. If location is invalid
+ /// then use current location.
+ unsigned getLineNumber(SourceLocation Loc);
+
+ /// getColumnNumber - Get column number for the location. If location is
+ /// invalid then use current location.
+ unsigned getColumnNumber(SourceLocation Loc);
+};
+} // namespace CodeGen
+} // namespace clang
+
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
new file mode 100644
index 0000000..8c154f0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDecl.cpp
@@ -0,0 +1,1560 @@
+//===--- CGDecl.cpp - Emit LLVM Code for declarations ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Decl nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGOpenCLRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+
+void CodeGenFunction::EmitDecl(const Decl &D) {
+ switch (D.getKind()) {
+ case Decl::TranslationUnit:
+ case Decl::Namespace:
+ case Decl::UnresolvedUsingTypename:
+ case Decl::ClassTemplateSpecialization:
+ case Decl::ClassTemplatePartialSpecialization:
+ case Decl::TemplateTypeParm:
+ case Decl::UnresolvedUsingValue:
+ case Decl::NonTypeTemplateParm:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ case Decl::Field:
+ case Decl::IndirectField:
+ case Decl::ObjCIvar:
+ case Decl::ObjCAtDefsField:
+ case Decl::ParmVar:
+ case Decl::ImplicitParam:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::TypeAliasTemplate:
+ case Decl::TemplateTemplateParm:
+ case Decl::ObjCMethod:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategoryImpl:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCProperty:
+ case Decl::ObjCCompatibleAlias:
+ case Decl::AccessSpec:
+ case Decl::LinkageSpec:
+ case Decl::ObjCPropertyImpl:
+ case Decl::FileScopeAsm:
+ case Decl::Friend:
+ case Decl::FriendTemplate:
+ case Decl::Block:
+ case Decl::ClassScopeFunctionSpecialization:
+ llvm_unreachable("Declaration should not be in declstmts!");
+ case Decl::Function: // void X();
+ case Decl::Record: // struct/union/class X;
+ case Decl::Enum: // enum X;
+ case Decl::EnumConstant: // enum ? { X = ? }
+ case Decl::CXXRecord: // struct/union/class X; [C++]
+ case Decl::Using: // using X; [C++]
+ case Decl::UsingShadow:
+ case Decl::UsingDirective: // using namespace X; [C++]
+ case Decl::NamespaceAlias:
+ case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
+ case Decl::Label: // __label__ x;
+ case Decl::Import:
+ // None of these decls require codegen support.
+ return;
+
+ case Decl::Var: {
+ const VarDecl &VD = cast<VarDecl>(D);
+ assert(VD.isLocalVarDecl() &&
+ "Should not see file-scope variables inside a function!");
+ return EmitVarDecl(VD);
+ }
+
+ case Decl::Typedef: // typedef int X;
+ case Decl::TypeAlias: { // using X = int; [C++0x]
+ const TypedefNameDecl &TD = cast<TypedefNameDecl>(D);
+ QualType Ty = TD.getUnderlyingType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+ }
+ }
+}
+
+/// EmitVarDecl - This method handles emission of any variable declaration
+/// inside a function, including static vars etc.
+void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
+ switch (D.getStorageClass()) {
+ case SC_None:
+ case SC_Auto:
+ case SC_Register:
+ return EmitAutoVarDecl(D);
+ case SC_Static: {
+ llvm::GlobalValue::LinkageTypes Linkage =
+ llvm::GlobalValue::InternalLinkage;
+
+ // If the function definition has some sort of weak linkage, its
+ // static variables should also be weak so that they get properly
+ // uniqued. We can't do this in C, though, because there's no
+ // standard way to agree on which variables are the same (i.e.
+ // there's no mangling).
+ if (getContext().getLangOpts().CPlusPlus)
+ if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
+ Linkage = CurFn->getLinkage();
+
+ return EmitStaticVarDecl(D, Linkage);
+ }
+ case SC_Extern:
+ case SC_PrivateExtern:
+ // Don't emit it now, allow it to be emitted lazily on its first use.
+ return;
+ case SC_OpenCLWorkGroupLocal:
+ return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
+ }
+
+ llvm_unreachable("Unknown storage class");
+}
+
+static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
+ const char *Separator) {
+ CodeGenModule &CGM = CGF.CGM;
+ if (CGF.getContext().getLangOpts().CPlusPlus) {
+ StringRef Name = CGM.getMangledName(&D);
+ return Name.str();
+ }
+
+ std::string ContextName;
+ if (!CGF.CurFuncDecl) {
+ // Better be in a block declared in global scope.
+ const NamedDecl *ND = cast<NamedDecl>(&D);
+ const DeclContext *DC = ND->getDeclContext();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC)) {
+ MangleBuffer Name;
+ CGM.getBlockMangledName(GlobalDecl(), Name, BD);
+ ContextName = Name.getString();
+ }
+ else
+ llvm_unreachable("Unknown context for block static var decl");
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl)) {
+ StringRef Name = CGM.getMangledName(FD);
+ ContextName = Name.str();
+ } else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
+ ContextName = CGF.CurFn->getName();
+ else
+ llvm_unreachable("Unknown context for static var decl");
+
+ return ContextName + Separator + D.getNameAsString();
+}
+
+llvm::GlobalVariable *
+CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ QualType Ty = D.getType();
+ assert(Ty->isConstantSizeType() && "VLAs can't be static");
+
+ // Use the label if the variable is renamed with the asm-label extension.
+ std::string Name;
+ if (D.hasAttr<AsmLabelAttr>())
+ Name = CGM.getMangledName(&D);
+ else
+ Name = GetStaticDeclName(*this, D, Separator);
+
+ llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), LTy,
+ Ty.isConstant(getContext()), Linkage,
+ CGM.EmitNullConstant(D.getType()), Name, 0,
+ D.isThreadSpecified(),
+ CGM.getContext().getTargetAddressSpace(Ty));
+ GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ if (Linkage != llvm::GlobalValue::InternalLinkage)
+ GV->setVisibility(CurFn->getVisibility());
+ return GV;
+}
+
+/// hasNontrivialDestruction - Determine whether a type's destruction is
+/// non-trivial. If so, and the variable uses static initialization, we must
+/// register its destructor to run on exit.
+static bool hasNontrivialDestruction(QualType T) {
+ CXXRecordDecl *RD = T->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return RD && !RD->hasTrivialDestructor();
+}
+
+/// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
+/// global variable that has already been created for it. If the initializer
+/// has a different type than GV does, this may free GV and return a different
+/// one. Otherwise it just returns GV.
+llvm::GlobalVariable *
+CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV) {
+ llvm::Constant *Init = CGM.EmitConstantInit(D, this);
+
+ // If constant emission failed, then this should be a C++ static
+ // initializer.
+ if (!Init) {
+ if (!getContext().getLangOpts().CPlusPlus)
+ CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else if (Builder.GetInsertBlock()) {
+ // Since we have a static initializer, this global variable can't
+ // be constant.
+ GV->setConstant(false);
+
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/true);
+ }
+ return GV;
+ }
+
+ // The initializer may differ in type from the global. Rewrite
+ // the global to match the initializer. (We have to do this
+ // because some types, like unions, can't be completely represented
+ // in the LLVM type system.)
+ if (GV->getType()->getElementType() != Init->getType()) {
+ llvm::GlobalVariable *OldGV = GV;
+
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ OldGV->isConstant(),
+ OldGV->getLinkage(), Init, "",
+ /*InsertBefore*/ OldGV,
+ D.isThreadSpecified(),
+ CGM.getContext().getTargetAddressSpace(D.getType()));
+ GV->setVisibility(OldGV->getVisibility());
+
+ // Steal the name of the old global
+ GV->takeName(OldGV);
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ OldGV->eraseFromParent();
+ }
+
+ GV->setConstant(CGM.isTypeConstant(D.getType(), true));
+ GV->setInitializer(Init);
+
+ if (hasNontrivialDestruction(D.getType())) {
+ // We have a constant initializer, but a nontrivial destructor. We still
+ // need to perform a guarded "initialization" in order to register the
+ // destructor.
+ EmitCXXGuardedInit(D, GV, /*PerformInit*/false);
+ }
+
+ return GV;
+}
+
+void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+
+ // Check to see if we already have a global variable for this
+ // declaration. This can happen when double-emitting function
+ // bodies, e.g. with complete and base constructors.
+ llvm::Constant *addr =
+ CGM.getStaticLocalDeclAddress(&D);
+
+ llvm::GlobalVariable *var;
+ if (addr) {
+ var = cast<llvm::GlobalVariable>(addr->stripPointerCasts());
+ } else {
+ addr = var = CreateStaticVarDecl(D, ".", Linkage);
+ }
+
+ // Store into LocalDeclMap before generating initializer to handle
+ // circular references.
+ DMEntry = addr;
+ CGM.setStaticLocalDeclAddress(&D, addr);
+
+ // We can't have a VLA here, but we can have a pointer to a VLA,
+ // even though that doesn't really make any sense.
+ // Make sure to evaluate VLA bounds now so that we have them for later.
+ if (D.getType()->isVariablyModifiedType())
+ EmitVariablyModifiedType(D.getType());
+
+ // Save the type in case adding the initializer forces a type change.
+ llvm::Type *expectedType = addr->getType();
+
+ // If this value has an initializer, emit it.
+ if (D.getInit())
+ var = AddInitializerToStaticVarDecl(D, var);
+
+ var->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+
+ if (D.hasAttr<AnnotateAttr>())
+ CGM.AddGlobalAnnotations(&D, var);
+
+ if (const SectionAttr *SA = D.getAttr<SectionAttr>())
+ var->setSection(SA->getName());
+
+ if (D.hasAttr<UsedAttr>())
+ CGM.AddUsedGlobal(var);
+
+ // We may have to cast the constant because of the initializer
+ // mismatch above.
+ //
+ // FIXME: It is really dangerous to store this in the map; if anyone
+ // RAUW's the GV uses of this constant will be invalid.
+ llvm::Constant *castedAddr = llvm::ConstantExpr::getBitCast(var, expectedType);
+ DMEntry = castedAddr;
+ CGM.setStaticLocalDeclAddress(&D, castedAddr);
+
+ // Emit global variable debug descriptor for static vars.
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI) {
+ DI->setLocation(D.getLocation());
+ DI->EmitGlobalVariable(var, &D);
+ }
+}
+
+namespace {
+ struct DestroyObject : EHScopeStack::Cleanup {
+ DestroyObject(llvm::Value *addr, QualType type,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), type(type), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ llvm::Value *addr;
+ QualType type;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Don't use an EH cleanup recursively from an EH cleanup.
+ bool useEHCleanupForArray =
+ flags.isForNormalCleanup() && this->useEHCleanupForArray;
+
+ CGF.emitDestroy(addr, type, destroyer, useEHCleanupForArray);
+ }
+ };
+
+ struct DestroyNRVOVariable : EHScopeStack::Cleanup {
+ DestroyNRVOVariable(llvm::Value *addr,
+ const CXXDestructorDecl *Dtor,
+ llvm::Value *NRVOFlag)
+ : Dtor(Dtor), NRVOFlag(NRVOFlag), Loc(addr) {}
+
+ const CXXDestructorDecl *Dtor;
+ llvm::Value *NRVOFlag;
+ llvm::Value *Loc;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Along the exceptions path we always execute the dtor.
+ bool NRVO = flags.isForNormalCleanup() && NRVOFlag;
+
+ llvm::BasicBlock *SkipDtorBB = 0;
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *RunDtorBB = CGF.createBasicBlock("nrvo.unused");
+ SkipDtorBB = CGF.createBasicBlock("nrvo.skipdtor");
+ llvm::Value *DidNRVO = CGF.Builder.CreateLoad(NRVOFlag, "nrvo.val");
+ CGF.Builder.CreateCondBr(DidNRVO, SkipDtorBB, RunDtorBB);
+ CGF.EmitBlock(RunDtorBB);
+ }
+
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Loc);
+
+ if (NRVO) CGF.EmitBlock(SkipDtorBB);
+ }
+ };
+
+ struct CallStackRestore : EHScopeStack::Cleanup {
+ llvm::Value *Stack;
+ CallStackRestore(llvm::Value *Stack) : Stack(Stack) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *V = CGF.Builder.CreateLoad(Stack);
+ llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
+ CGF.Builder.CreateCall(F, V);
+ }
+ };
+
+ struct ExtendGCLifetime : EHScopeStack::Cleanup {
+ const VarDecl &Var;
+ ExtendGCLifetime(const VarDecl *var) : Var(*var) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Compute the address of the local variable, in case it's a
+ // byref or something.
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
+ llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE));
+ CGF.EmitExtendGCLifetime(value);
+ }
+ };
+
+ struct CallCleanupFunction : EHScopeStack::Cleanup {
+ llvm::Constant *CleanupFn;
+ const CGFunctionInfo &FnInfo;
+ const VarDecl &Var;
+
+ CallCleanupFunction(llvm::Constant *CleanupFn, const CGFunctionInfo *Info,
+ const VarDecl *Var)
+ : CleanupFn(CleanupFn), FnInfo(*Info), Var(*Var) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
+ Var.getType(), VK_LValue, SourceLocation());
+ // Compute the address of the local variable, in case it's a byref
+ // or something.
+ llvm::Value *Addr = CGF.EmitDeclRefLValue(&DRE).getAddress();
+
+ // In some cases, the type of the function argument will be different from
+ // the type of the pointer. An example of this is
+ // void f(void* arg);
+ // __attribute__((cleanup(f))) void *g;
+ //
+ // To fix this we insert a bitcast here.
+ QualType ArgTy = FnInfo.arg_begin()->type;
+ llvm::Value *Arg =
+ CGF.Builder.CreateBitCast(Addr, CGF.ConvertType(ArgTy));
+
+ CallArgList Args;
+ Args.add(RValue::get(Arg),
+ CGF.getContext().getPointerType(Var.getType()));
+ CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
+ }
+ };
+}
+
+/// EmitAutoVarWithLifetime - Does the setup required for an automatic
+/// variable with lifetime.
+static void EmitAutoVarWithLifetime(CodeGenFunction &CGF, const VarDecl &var,
+ llvm::Value *addr,
+ Qualifiers::ObjCLifetime lifetime) {
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ CodeGenFunction::Destroyer *destroyer =
+ (var.hasAttr<ObjCPreciseLifetimeAttr>()
+ ? CodeGenFunction::destroyARCStrongPrecise
+ : CodeGenFunction::destroyARCStrongImprecise);
+
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind, addr, var.getType(), destroyer,
+ cleanupKind & EHCleanup);
+ break;
+ }
+ case Qualifiers::OCL_Autoreleasing:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ CGF.pushDestroy(NormalAndEHCleanup, addr, var.getType(),
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanup*/ true);
+ break;
+ }
+}
+
+static bool isAccessedBy(const VarDecl &var, const Stmt *s) {
+ if (const Expr *e = dyn_cast<Expr>(s)) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ s = e = e->IgnoreParenCasts();
+
+ if (const DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e))
+ return (ref->getDecl() == &var);
+ }
+
+ for (Stmt::const_child_range children = s->children(); children; ++children)
+ // children might be null; as in missing decl or conditional of an if-stmt.
+ if ((*children) && isAccessedBy(var, *children))
+ return true;
+
+ return false;
+}
+
+static bool isAccessedBy(const ValueDecl *decl, const Expr *e) {
+ if (!decl) return false;
+ if (!isa<VarDecl>(decl)) return false;
+ const VarDecl *var = cast<VarDecl>(decl);
+ return isAccessedBy(*var, e);
+}
+
+static void drillIntoBlockVariable(CodeGenFunction &CGF,
+ LValue &lvalue,
+ const VarDecl *var) {
+ lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
+}
+
+void CodeGenFunction::EmitScalarInit(const Expr *init,
+ const ValueDecl *D,
+ LValue lvalue,
+ bool capturedByInit) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime) {
+ llvm::Value *value = EmitScalarExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(RValue::get(value), lvalue, true);
+ return;
+ }
+
+ // If we're emitting a value with lifetime, we have to do the
+ // initialization *before* we leave the cleanup scopes.
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(init)) {
+ enterFullExpression(ewc);
+ init = ewc->getSubExpr();
+ }
+ CodeGenFunction::RunCleanupsScope Scope(*this);
+
+ // We have to maintain the illusion that the variable is
+ // zero-initialized. If the variable might be accessed in its
+ // initializer, zero-initialize before running the initializer, then
+ // actually perform the initialization with an assign.
+ bool accessedByInit = false;
+ if (lifetime != Qualifiers::OCL_ExplicitNone)
+ accessedByInit = (capturedByInit || isAccessedBy(D, init));
+ if (accessedByInit) {
+ LValue tempLV = lvalue;
+ // Drill down to the __block object if necessary.
+ if (capturedByInit) {
+ // We can use a simple GEP for this because it can't have been
+ // moved yet.
+ tempLV.setAddress(Builder.CreateStructGEP(tempLV.getAddress(),
+ getByRefValueLLVMField(cast<VarDecl>(D))));
+ }
+
+ llvm::PointerType *ty
+ = cast<llvm::PointerType>(tempLV.getAddress()->getType());
+ ty = cast<llvm::PointerType>(ty->getElementType());
+
+ llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
+
+ // If __weak, we want to use a barrier under certain conditions.
+ if (lifetime == Qualifiers::OCL_Weak)
+ EmitARCInitWeak(tempLV.getAddress(), zero);
+
+ // Otherwise just do a simple store.
+ else
+ EmitStoreOfScalar(zero, tempLV, /* isInitialization */ true);
+ }
+
+ // Emit the initializer.
+ llvm::Value *value = 0;
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ value = EmitScalarExpr(init);
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ value = EmitARCRetainScalarExpr(init);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // No way to optimize a producing initializer into this. It's not
+ // worth optimizing for, because the value will immediately
+ // disappear in the common case.
+ value = EmitScalarExpr(init);
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ if (accessedByInit)
+ EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
+ else
+ EmitARCInitWeak(lvalue.getAddress(), value);
+ return;
+ }
+
+ case Qualifiers::OCL_Autoreleasing:
+ value = EmitARCRetainAutoreleaseScalarExpr(init);
+ break;
+ }
+
+ if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+
+ // If the variable might have been accessed by its initializer, we
+ // might have to initialize with a barrier. We have to do this for
+ // both __weak and __strong, but __weak got filtered out above.
+ if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
+ EmitARCRelease(oldValue, /*precise*/ false);
+ return;
+ }
+
+ EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
+}
+
+/// EmitScalarInit - Initialize the given lvalue with the given object.
+void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
+ Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
+ if (!lifetime)
+ return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing to do
+ break;
+
+ case Qualifiers::OCL_Strong:
+ init = EmitARCRetain(lvalue.getType(), init);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ // Initialize and then skip the primitive store.
+ EmitARCInitWeak(lvalue.getAddress(), init);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ init = EmitARCRetainAutorelease(lvalue.getType(), init);
+ break;
+ }
+
+ EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
+}
+
+/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
+/// non-zero parts of the specified initializer with equal or fewer than
+/// NumStores scalar stores.
+static bool canEmitInitWithFewStoresAfterMemset(llvm::Constant *Init,
+ unsigned &NumStores) {
+ // Zero and Undef never requires any extra stores.
+ if (isa<llvm::ConstantAggregateZero>(Init) ||
+ isa<llvm::ConstantPointerNull>(Init) ||
+ isa<llvm::UndefValue>(Init))
+ return true;
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init))
+ return Init->isNullValue() || NumStores--;
+
+ // See if we can emit each element.
+ if (isa<llvm::ConstantArray>(Init) || isa<llvm::ConstantStruct>(Init)) {
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+ if (!canEmitInitWithFewStoresAfterMemset(Elt, NumStores))
+ return false;
+ }
+ return true;
+ }
+
+ // Anything else is hard and scary.
+ return false;
+}
+
+/// emitStoresForInitAfterMemset - For inits that
+/// canEmitInitWithFewStoresAfterMemset returned true for, emit the scalar
+/// stores that would be required.
+static void emitStoresForInitAfterMemset(llvm::Constant *Init, llvm::Value *Loc,
+ bool isVolatile, CGBuilderTy &Builder) {
+ // Zero doesn't require a store.
+ if (Init->isNullValue() || isa<llvm::UndefValue>(Init))
+ return;
+
+ if (isa<llvm::ConstantInt>(Init) || isa<llvm::ConstantFP>(Init) ||
+ isa<llvm::ConstantVector>(Init) || isa<llvm::BlockAddress>(Init) ||
+ isa<llvm::ConstantExpr>(Init)) {
+ Builder.CreateStore(Init, Loc, isVolatile);
+ return;
+ }
+
+ if (llvm::ConstantDataSequential *CDS =
+ dyn_cast<llvm::ConstantDataSequential>(Init)) {
+ for (unsigned i = 0, e = CDS->getNumElements(); i != e; ++i) {
+ llvm::Constant *Elt = CDS->getElementAsConstant(i);
+
+ // Get a pointer to the element and emit it.
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
+ }
+ return;
+ }
+
+ assert((isa<llvm::ConstantStruct>(Init) || isa<llvm::ConstantArray>(Init)) &&
+ "Unknown value type!");
+
+ for (unsigned i = 0, e = Init->getNumOperands(); i != e; ++i) {
+ llvm::Constant *Elt = cast<llvm::Constant>(Init->getOperand(i));
+ // Get a pointer to the element and emit it.
+ emitStoresForInitAfterMemset(Elt, Builder.CreateConstGEP2_32(Loc, 0, i),
+ isVolatile, Builder);
+ }
+}
+
+
+/// shouldUseMemSetPlusStoresToInitialize - Decide whether we should use memset
+/// plus some stores to initialize a local variable instead of using a memcpy
+/// from a constant global. It is beneficial to use memset if the global is all
+/// zeros, or mostly zeros and large.
+static bool shouldUseMemSetPlusStoresToInitialize(llvm::Constant *Init,
+ uint64_t GlobalSize) {
+ // If a global is all zeros, always use a memset.
+ if (isa<llvm::ConstantAggregateZero>(Init)) return true;
+
+
+ // If a non-zero global is <= 32 bytes, always use a memcpy. If it is large,
+ // do it if it will require 6 or fewer scalar stores.
+ // TODO: Should budget depends on the size? Avoiding a large global warrants
+ // plopping in more stores.
+ unsigned StoreBudget = 6;
+ uint64_t SizeLimit = 32;
+
+ return GlobalSize > SizeLimit &&
+ canEmitInitWithFewStoresAfterMemset(Init, StoreBudget);
+}
+
+
+/// EmitAutoVarDecl - Emit code and set up an entry in LocalDeclMap for a
+/// variable declaration with auto, register, or no storage class specifier.
+/// These turn into simple stack objects, or GlobalValues depending on target.
+void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
+ AutoVarEmission emission = EmitAutoVarAlloca(D);
+ EmitAutoVarInit(emission);
+ EmitAutoVarCleanups(emission);
+}
+
+/// EmitAutoVarAlloca - Emit the alloca and debug information for a
+/// local variable. Does not emit initalization or destruction.
+CodeGenFunction::AutoVarEmission
+CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
+ QualType Ty = D.getType();
+
+ AutoVarEmission emission(D);
+
+ bool isByRef = D.hasAttr<BlocksAttr>();
+ emission.IsByRef = isByRef;
+
+ CharUnits alignment = getContext().getDeclAlign(&D);
+ emission.Alignment = alignment;
+
+ // If the type is variably-modified, emit all the VLA sizes for it.
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+
+ llvm::Value *DeclPtr;
+ if (Ty->isConstantSizeType()) {
+ if (!Target.useGlobalsForAutomaticVariables()) {
+ bool NRVO = getContext().getLangOpts().ElideConstructors &&
+ D.isNRVOVariable();
+
+ // If this value is a POD array or struct with a statically
+ // determinable constant initializer, there are optimizations we can do.
+ //
+ // TODO: We should constant-evaluate the initializer of any variable,
+ // as long as it is initialized by a constant expression. Currently,
+ // isConstantInitializer produces wrong answers for structs with
+ // reference or bitfield members, and a few other cases, and checking
+ // for POD-ness protects us from some of these.
+ if (D.getInit() &&
+ (Ty->isArrayType() || Ty->isRecordType()) &&
+ (Ty.isPODType(getContext()) ||
+ getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
+ D.getInit()->isConstantInitializer(getContext(), false)) {
+
+ // If the variable's a const type, and it's neither an NRVO
+ // candidate nor a __block variable and has no mutable members,
+ // emit it as a global instead.
+ if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
+ CGM.isTypeConstant(Ty, true)) {
+ EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
+
+ emission.Address = 0; // signal this condition to later callbacks
+ assert(emission.wasEmittedAsGlobal());
+ return emission;
+ }
+
+ // Otherwise, tell the initialization code that we're in this case.
+ emission.IsConstantAggregate = true;
+ }
+
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ // unless it's an NRVO variable.
+ llvm::Type *LTy = ConvertTypeForMem(Ty);
+
+ if (NRVO) {
+ // The named return value optimization: allocate this variable in the
+ // return slot, so that we can elide the copy when returning this
+ // variable (C++0x [class.copy]p34).
+ DeclPtr = ReturnValue;
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
+ // Create a flag that is used to indicate when the NRVO was applied
+ // to this variable. Set it to zero to indicate that NRVO was not
+ // applied.
+ llvm::Value *Zero = Builder.getFalse();
+ llvm::Value *NRVOFlag = CreateTempAlloca(Zero->getType(), "nrvo");
+ EnsureInsertPoint();
+ Builder.CreateStore(Zero, NRVOFlag);
+
+ // Record the NRVO flag for this variable.
+ NRVOFlags[&D] = NRVOFlag;
+ emission.NRVOFlag = NRVOFlag;
+ }
+ }
+ } else {
+ if (isByRef)
+ LTy = BuildByRefType(&D);
+
+ llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+ Alloc->setName(D.getName());
+
+ CharUnits allocaAlignment = alignment;
+ if (isByRef)
+ allocaAlignment = std::max(allocaAlignment,
+ getContext().toCharUnitsFromBits(Target.getPointerAlign(0)));
+ Alloc->setAlignment(allocaAlignment.getQuantity());
+ DeclPtr = Alloc;
+ }
+ } else {
+ // Targets that don't support recursion emit locals as globals.
+ const char *Class =
+ D.getStorageClass() == SC_Register ? ".reg." : ".auto.";
+ DeclPtr = CreateStaticVarDecl(D, Class,
+ llvm::GlobalValue::InternalLinkage);
+ }
+ } else {
+ EnsureInsertPoint();
+
+ if (!DidCallStackSave) {
+ // Save the stack.
+ llvm::Value *Stack = CreateTempAlloca(Int8PtrTy, "saved_stack");
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::stacksave);
+ llvm::Value *V = Builder.CreateCall(F);
+
+ Builder.CreateStore(V, Stack);
+
+ DidCallStackSave = true;
+
+ // Push a cleanup block and restore the stack there.
+ // FIXME: in general circumstances, this should be an EH cleanup.
+ EHStack.pushCleanup<CallStackRestore>(NormalCleanup, Stack);
+ }
+
+ llvm::Value *elementCount;
+ QualType elementType;
+ llvm::tie(elementCount, elementType) = getVLASize(Ty);
+
+ llvm::Type *llvmTy = ConvertTypeForMem(elementType);
+
+ // Allocate memory for the array.
+ llvm::AllocaInst *vla = Builder.CreateAlloca(llvmTy, elementCount, "vla");
+ vla->setAlignment(alignment.getQuantity());
+
+ DeclPtr = vla;
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+ emission.Address = DeclPtr;
+
+ // Emit debug info for local var declaration.
+ if (HaveInsertPoint())
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ if (Target.useGlobalsForAutomaticVariables()) {
+ DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(DeclPtr), &D);
+ } else
+ DI->EmitDeclareOfAutoVariable(&D, DeclPtr, Builder);
+ }
+
+ if (D.hasAttr<AnnotateAttr>())
+ EmitVarAnnotations(&D, emission.Address);
+
+ return emission;
+}
+
+/// Determines whether the given __block variable is potentially
+/// captured by the given expression.
+static bool isCapturedBy(const VarDecl &var, const Expr *e) {
+ // Skip the most common kinds of expressions that make
+ // hierarchy-walking expensive.
+ e = e->IgnoreParenCasts();
+
+ if (const BlockExpr *be = dyn_cast<BlockExpr>(e)) {
+ const BlockDecl *block = be->getBlockDecl();
+ for (BlockDecl::capture_const_iterator i = block->capture_begin(),
+ e = block->capture_end(); i != e; ++i) {
+ if (i->getVariable() == &var)
+ return true;
+ }
+
+ // No need to walk into the subexpressions.
+ return false;
+ }
+
+ if (const StmtExpr *SE = dyn_cast<StmtExpr>(e)) {
+ const CompoundStmt *CS = SE->getSubStmt();
+ for (CompoundStmt::const_body_iterator BI = CS->body_begin(),
+ BE = CS->body_end(); BI != BE; ++BI)
+ if (Expr *E = dyn_cast<Expr>((*BI))) {
+ if (isCapturedBy(var, E))
+ return true;
+ }
+ else if (DeclStmt *DS = dyn_cast<DeclStmt>((*BI))) {
+ // special case declarations
+ for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
+ I != E; ++I) {
+ if (VarDecl *VD = dyn_cast<VarDecl>((*I))) {
+ Expr *Init = VD->getInit();
+ if (Init && isCapturedBy(var, Init))
+ return true;
+ }
+ }
+ }
+ else
+ // FIXME. Make safe assumption assuming arbitrary statements cause capturing.
+ // Later, provide code to poke into statements for capture analysis.
+ return true;
+ return false;
+ }
+
+ for (Stmt::const_child_range children = e->children(); children; ++children)
+ if (isCapturedBy(var, cast<Expr>(*children)))
+ return true;
+
+ return false;
+}
+
+/// \brief Determine whether the given initializer is trivial in the sense
+/// that it requires no code to be generated.
+static bool isTrivialInitializer(const Expr *Init) {
+ if (!Init)
+ return true;
+
+ if (const CXXConstructExpr *Construct = dyn_cast<CXXConstructExpr>(Init))
+ if (CXXConstructorDecl *Constructor = Construct->getConstructor())
+ if (Constructor->isTrivial() &&
+ Constructor->isDefaultConstructor() &&
+ !Construct->requiresZeroInitialization())
+ return true;
+
+ return false;
+}
+void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
+ assert(emission.Variable && "emission was not valid!");
+
+ // If this was emitted as a global constant, we're done.
+ if (emission.wasEmittedAsGlobal()) return;
+
+ const VarDecl &D = *emission.Variable;
+ QualType type = D.getType();
+
+ // If this local has an initializer, emit it now.
+ const Expr *Init = D.getInit();
+
+ // If we are at an unreachable point, we don't need to emit the initializer
+ // unless it contains a label.
+ if (!HaveInsertPoint()) {
+ if (!Init || !ContainsLabel(Init)) return;
+ EnsureInsertPoint();
+ }
+
+ // Initialize the structure of a __block variable.
+ if (emission.IsByRef)
+ emitByrefStructureInit(emission);
+
+ if (isTrivialInitializer(Init))
+ return;
+
+ CharUnits alignment = emission.Alignment;
+
+ // Check whether this is a byref variable that's potentially
+ // captured and moved by its own initializer. If so, we'll need to
+ // emit the initializer first, then copy into the variable.
+ bool capturedByInit = emission.IsByRef && isCapturedBy(D, Init);
+
+ llvm::Value *Loc =
+ capturedByInit ? emission.Address : emission.getObjectAddress(*this);
+
+ llvm::Constant *constant = 0;
+ if (emission.IsConstantAggregate) {
+ assert(!capturedByInit && "constant init contains a capturing block?");
+ constant = CGM.EmitConstantInit(D, this);
+ }
+
+ if (!constant) {
+ LValue lv = MakeAddrLValue(Loc, type, alignment);
+ lv.setNonGC(true);
+ return EmitExprAsInit(Init, &D, lv, capturedByInit);
+ }
+
+ // If this is a simple aggregate initialization, we can optimize it
+ // in various ways.
+ bool isVolatile = type.isVolatileQualified();
+
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(IntPtrTy,
+ getContext().getTypeSizeInChars(type).getQuantity());
+
+ llvm::Type *BP = Int8PtrTy;
+ if (Loc->getType() != BP)
+ Loc = Builder.CreateBitCast(Loc, BP);
+
+ // If the initializer is all or mostly zeros, codegen with memset then do
+ // a few stores afterward.
+ if (shouldUseMemSetPlusStoresToInitialize(constant,
+ CGM.getTargetData().getTypeAllocSize(constant->getType()))) {
+ Builder.CreateMemSet(Loc, llvm::ConstantInt::get(Int8Ty, 0), SizeVal,
+ alignment.getQuantity(), isVolatile);
+ if (!constant->isNullValue()) {
+ Loc = Builder.CreateBitCast(Loc, constant->getType()->getPointerTo());
+ emitStoresForInitAfterMemset(constant, Loc, isVolatile, Builder);
+ }
+ } else {
+ // Otherwise, create a temporary global with the initializer then
+ // memcpy from the global to the alloca.
+ std::string Name = GetStaticDeclName(*this, D, ".");
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
+ llvm::GlobalValue::PrivateLinkage,
+ constant, Name, 0, false, 0);
+ GV->setAlignment(alignment.getQuantity());
+ GV->setUnnamedAddr(true);
+
+ llvm::Value *SrcPtr = GV;
+ if (SrcPtr->getType() != BP)
+ SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
+
+ Builder.CreateMemCpy(Loc, SrcPtr, SizeVal, alignment.getQuantity(),
+ isVolatile);
+ }
+}
+
+/// Emit an expression as an initializer for a variable at the given
+/// location. The expression is not necessarily the normal
+/// initializer for the variable, and the address is not necessarily
+/// its normal location.
+///
+/// \param init the initializing expression
+/// \param var the variable to act as if we're initializing
+/// \param loc the address to initialize; its type is a pointer
+/// to the LLVM mapping of the variable's type
+/// \param alignment the alignment of the address
+/// \param capturedByInit true if the variable is a __block variable
+/// whose address is potentially changed by the initializer
+void CodeGenFunction::EmitExprAsInit(const Expr *init,
+ const ValueDecl *D,
+ LValue lvalue,
+ bool capturedByInit) {
+ QualType type = D->getType();
+
+ if (type->isReferenceType()) {
+ RValue rvalue = EmitReferenceBindingToExpr(init, D);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitStoreThroughLValue(rvalue, lvalue, true);
+ } else if (!hasAggregateLLVMType(type)) {
+ EmitScalarInit(init, D, lvalue, capturedByInit);
+ } else if (type->isAnyComplexType()) {
+ ComplexPairTy complex = EmitComplexExpr(init);
+ if (capturedByInit)
+ drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ StoreComplexToAddr(complex, lvalue.getAddress(), lvalue.isVolatile());
+ } else {
+ // TODO: how can we delay here if D is captured by its initializer?
+ EmitAggExpr(init, AggValueSlot::forLValue(lvalue,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ MaybeEmitStdInitializerListCleanup(lvalue.getAddress(), init);
+ }
+}
+
+/// Enter a destroy cleanup for the given local variable.
+void CodeGenFunction::emitAutoVarTypeCleanup(
+ const CodeGenFunction::AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind) {
+ assert(dtorKind != QualType::DK_none);
+
+ // Note that for __block variables, we want to destroy the
+ // original stack object, not the possibly forwarded object.
+ llvm::Value *addr = emission.getObjectAddress(*this);
+
+ const VarDecl *var = emission.Variable;
+ QualType type = var->getType();
+
+ CleanupKind cleanupKind = NormalAndEHCleanup;
+ CodeGenFunction::Destroyer *destroyer = 0;
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ llvm_unreachable("no cleanup for trivially-destructible variable");
+
+ case QualType::DK_cxx_destructor:
+ // If there's an NRVO flag on the emission, we need a different
+ // cleanup.
+ if (emission.NRVOFlag) {
+ assert(!type->isArrayType());
+ CXXDestructorDecl *dtor = type->getAsCXXRecordDecl()->getDestructor();
+ EHStack.pushCleanup<DestroyNRVOVariable>(cleanupKind, addr, dtor,
+ emission.NRVOFlag);
+ return;
+ }
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ // Suppress cleanups for pseudo-strong variables.
+ if (var->isARCPseudoStrong()) return;
+
+ // Otherwise, consider whether to use an EH cleanup or not.
+ cleanupKind = getARCCleanupKind();
+
+ // Use the imprecise destroyer by default.
+ if (!var->hasAttr<ObjCPreciseLifetimeAttr>())
+ destroyer = CodeGenFunction::destroyARCStrongImprecise;
+ break;
+
+ case QualType::DK_objc_weak_lifetime:
+ break;
+ }
+
+ // If we haven't chosen a more specific destroyer, use the default.
+ if (!destroyer) destroyer = getDestroyer(dtorKind);
+
+ // Use an EH cleanup in array destructors iff the destructor itself
+ // is being pushed as an EH cleanup.
+ bool useEHCleanup = (cleanupKind & EHCleanup);
+ EHStack.pushCleanup<DestroyObject>(cleanupKind, addr, type, destroyer,
+ useEHCleanup);
+}
+
+void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
+ assert(emission.Variable && "emission was not valid!");
+
+ // If this was emitted as a global constant, we're done.
+ if (emission.wasEmittedAsGlobal()) return;
+
+ const VarDecl &D = *emission.Variable;
+
+ // Check the type for a cleanup.
+ if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
+ emitAutoVarTypeCleanup(emission, dtorKind);
+
+ // In GC mode, honor objc_precise_lifetime.
+ if (getLangOpts().getGC() != LangOptions::NonGC &&
+ D.hasAttr<ObjCPreciseLifetimeAttr>()) {
+ EHStack.pushCleanup<ExtendGCLifetime>(NormalCleanup, &D);
+ }
+
+ // Handle the cleanup attribute.
+ if (const CleanupAttr *CA = D.getAttr<CleanupAttr>()) {
+ const FunctionDecl *FD = CA->getFunctionDecl();
+
+ llvm::Constant *F = CGM.GetAddrOfFunction(FD);
+ assert(F && "Could not find function!");
+
+ const CGFunctionInfo &Info = CGM.getTypes().arrangeFunctionDeclaration(FD);
+ EHStack.pushCleanup<CallCleanupFunction>(NormalAndEHCleanup, F, &Info, &D);
+ }
+
+ // If this is a block variable, call _Block_object_destroy
+ // (on the unforwarded address).
+ if (emission.IsByRef)
+ enterByrefCleanup(emission);
+}
+
+CodeGenFunction::Destroyer *
+CodeGenFunction::getDestroyer(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none: llvm_unreachable("no destroyer for trivial dtor");
+ case QualType::DK_cxx_destructor:
+ return destroyCXXObject;
+ case QualType::DK_objc_strong_lifetime:
+ return destroyARCStrongPrecise;
+ case QualType::DK_objc_weak_lifetime:
+ return destroyARCWeak;
+ }
+ llvm_unreachable("Unknown DestructionKind");
+}
+
+/// pushDestroy - Push the standard destructor for the given type.
+void CodeGenFunction::pushDestroy(QualType::DestructionKind dtorKind,
+ llvm::Value *addr, QualType type) {
+ assert(dtorKind && "cannot push destructor for trivial type");
+
+ CleanupKind cleanupKind = getCleanupKind(dtorKind);
+ pushDestroy(cleanupKind, addr, type, getDestroyer(dtorKind),
+ cleanupKind & EHCleanup);
+}
+
+void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
+ QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ pushFullExprCleanup<DestroyObject>(cleanupKind, addr, type,
+ destroyer, useEHCleanupForArray);
+}
+
+/// emitDestroy - Immediately perform the destruction of the given
+/// object.
+///
+/// \param addr - the address of the object; a type*
+/// \param type - the type of the object; if an array type, all
+/// objects are destroyed in reverse order
+/// \param destroyer - the function to call to destroy individual
+/// elements
+/// \param useEHCleanupForArray - whether an EH cleanup should be
+/// used when destroying array elements, in case one of the
+/// destructions throws an exception
+void CodeGenFunction::emitDestroy(llvm::Value *addr, QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ const ArrayType *arrayType = getContext().getAsArrayType(type);
+ if (!arrayType)
+ return destroyer(*this, addr, type);
+
+ llvm::Value *begin = addr;
+ llvm::Value *length = emitArrayLength(arrayType, type, begin);
+
+ // Normally we have to check whether the array is zero-length.
+ bool checkZeroLength = true;
+
+ // But if the array length is constant, we can suppress that.
+ if (llvm::ConstantInt *constLength = dyn_cast<llvm::ConstantInt>(length)) {
+ // ...and if it's constant zero, we can just skip the entire thing.
+ if (constLength->isZero()) return;
+ checkZeroLength = false;
+ }
+
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin, length);
+ emitArrayDestroy(begin, end, type, destroyer,
+ checkZeroLength, useEHCleanupForArray);
+}
+
+/// emitArrayDestroy - Destroys all the elements of the given array,
+/// beginning from last to first. The array cannot be zero-length.
+///
+/// \param begin - a type* denoting the first element of the array
+/// \param end - a type* denoting one past the end of the array
+/// \param type - the element type of the array
+/// \param destroyer - the function to call to destroy elements
+/// \param useEHCleanup - whether to push an EH cleanup to destroy
+/// the remaining elements in case the destruction of a single
+/// element throws
+void CodeGenFunction::emitArrayDestroy(llvm::Value *begin,
+ llvm::Value *end,
+ QualType type,
+ Destroyer *destroyer,
+ bool checkZeroLength,
+ bool useEHCleanup) {
+ assert(!type->isArrayType());
+
+ // The basic structure here is a do-while loop, because we don't
+ // need to check for the zero-element case.
+ llvm::BasicBlock *bodyBB = createBasicBlock("arraydestroy.body");
+ llvm::BasicBlock *doneBB = createBasicBlock("arraydestroy.done");
+
+ if (checkZeroLength) {
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(begin, end,
+ "arraydestroy.isempty");
+ Builder.CreateCondBr(isEmpty, doneBB, bodyBB);
+ }
+
+ // Enter the loop body, making that address the current address.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ EmitBlock(bodyBB);
+ llvm::PHINode *elementPast =
+ Builder.CreatePHI(begin->getType(), 2, "arraydestroy.elementPast");
+ elementPast->addIncoming(end, entryBB);
+
+ // Shift the address back by one element.
+ llvm::Value *negativeOne = llvm::ConstantInt::get(SizeTy, -1, true);
+ llvm::Value *element = Builder.CreateInBoundsGEP(elementPast, negativeOne,
+ "arraydestroy.element");
+
+ if (useEHCleanup)
+ pushRegularPartialArrayCleanup(begin, element, type, destroyer);
+
+ // Perform the actual destruction there.
+ destroyer(*this, element, type);
+
+ if (useEHCleanup)
+ PopCleanupBlock();
+
+ // Check whether we've reached the end.
+ llvm::Value *done = Builder.CreateICmpEQ(element, begin, "arraydestroy.done");
+ Builder.CreateCondBr(done, doneBB, bodyBB);
+ elementPast->addIncoming(element, Builder.GetInsertBlock());
+
+ // Done.
+ EmitBlock(doneBB);
+}
+
+/// Perform partial array destruction as if in an EH cleanup. Unlike
+/// emitArrayDestroy, the element type here may still be an array type.
+static void emitPartialArrayDestroy(CodeGenFunction &CGF,
+ llvm::Value *begin, llvm::Value *end,
+ QualType type,
+ CodeGenFunction::Destroyer *destroyer) {
+ // If the element type is itself an array, drill down.
+ unsigned arrayDepth = 0;
+ while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
+ // VLAs don't require a GEP index to walk into.
+ if (!isa<VariableArrayType>(arrayType))
+ arrayDepth++;
+ type = arrayType->getElementType();
+ }
+
+ if (arrayDepth) {
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, arrayDepth+1);
+
+ SmallVector<llvm::Value*,4> gepIndices(arrayDepth, zero);
+ begin = CGF.Builder.CreateInBoundsGEP(begin, gepIndices, "pad.arraybegin");
+ end = CGF.Builder.CreateInBoundsGEP(end, gepIndices, "pad.arrayend");
+ }
+
+ // Destroy the array. We don't ever need an EH cleanup because we
+ // assume that we're in an EH cleanup ourselves, so a throwing
+ // destructor causes an immediate terminate.
+ CGF.emitArrayDestroy(begin, end, type, destroyer,
+ /*checkZeroLength*/ true, /*useEHCleanup*/ false);
+}
+
+namespace {
+ /// RegularPartialArrayDestroy - a cleanup which performs a partial
+ /// array destroy where the end pointer is regularly determined and
+ /// does not need to be loaded from a local.
+ class RegularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEnd;
+ QualType ElementType;
+ CodeGenFunction::Destroyer *Destroyer;
+ public:
+ RegularPartialArrayDestroy(llvm::Value *arrayBegin, llvm::Value *arrayEnd,
+ QualType elementType,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEnd(arrayEnd),
+ ElementType(elementType), Destroyer(destroyer) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ emitPartialArrayDestroy(CGF, ArrayBegin, ArrayEnd,
+ ElementType, Destroyer);
+ }
+ };
+
+ /// IrregularPartialArrayDestroy - a cleanup which performs a
+ /// partial array destroy where the end pointer is irregularly
+ /// determined and must be loaded from a local.
+ class IrregularPartialArrayDestroy : public EHScopeStack::Cleanup {
+ llvm::Value *ArrayBegin;
+ llvm::Value *ArrayEndPointer;
+ QualType ElementType;
+ CodeGenFunction::Destroyer *Destroyer;
+ public:
+ IrregularPartialArrayDestroy(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ CodeGenFunction::Destroyer *destroyer)
+ : ArrayBegin(arrayBegin), ArrayEndPointer(arrayEndPointer),
+ ElementType(elementType), Destroyer(destroyer) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::Value *arrayEnd = CGF.Builder.CreateLoad(ArrayEndPointer);
+ emitPartialArrayDestroy(CGF, ArrayBegin, arrayEnd,
+ ElementType, Destroyer);
+ }
+ };
+}
+
+/// pushIrregularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+/// \param array - a value of type elementType*
+/// \param destructionKind - the kind of destruction required
+/// \param initializedElementCount - a value of type size_t* holding
+/// the number of successfully-constructed elements
+void CodeGenFunction::pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ Destroyer *destroyer) {
+ pushFullExprCleanup<IrregularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEndPointer,
+ elementType, destroyer);
+}
+
+/// pushRegularPartialArrayCleanup - Push an EH cleanup to destroy
+/// already-constructed elements of the given array. The cleanup
+/// may be popped with DeactivateCleanupBlock or PopCleanupBlock.
+///
+/// \param elementType - the immediate element type of the array;
+/// possibly still an array type
+/// \param array - a value of type elementType*
+/// \param destructionKind - the kind of destruction required
+/// \param initializedElementCount - a value of type size_t* holding
+/// the number of successfully-constructed elements
+void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ Destroyer *destroyer) {
+ pushFullExprCleanup<RegularPartialArrayDestroy>(EHCleanup,
+ arrayBegin, arrayEnd,
+ elementType, destroyer);
+}
+
+namespace {
+ /// A cleanup to perform a release of an object at the end of a
+ /// function. This is used to balance out the incoming +1 of a
+ /// ns_consumed argument when we can't reasonably do that just by
+ /// not doing the initial retain for a __block argument.
+ struct ConsumeARCParameter : EHScopeStack::Cleanup {
+ ConsumeARCParameter(llvm::Value *param) : Param(param) {}
+
+ llvm::Value *Param;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitARCRelease(Param, /*precise*/ false);
+ }
+ };
+}
+
+/// Emit an alloca (or GlobalValue depending on target)
+/// for the specified parameter and set up LocalDeclMap.
+void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
+ unsigned ArgNo) {
+ // FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
+ assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
+ "Invalid argument to EmitParmDecl");
+
+ Arg->setName(D.getName());
+
+ // Use better IR generation for certain implicit parameters.
+ if (isa<ImplicitParamDecl>(D)) {
+ // The only implicit argument a block has is its literal.
+ if (BlockInfo) {
+ LocalDeclMap[&D] = Arg;
+
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(D.getLocation());
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, Builder);
+ }
+
+ return;
+ }
+ }
+
+ QualType Ty = D.getType();
+
+ llvm::Value *DeclPtr;
+ // If this is an aggregate or variable sized value, reuse the input pointer.
+ if (!Ty->isConstantSizeType() ||
+ CodeGenFunction::hasAggregateLLVMType(Ty)) {
+ DeclPtr = Arg;
+ } else {
+ // Otherwise, create a temporary to hold the value.
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
+ D.getName() + ".addr");
+ Alloc->setAlignment(getContext().getDeclAlign(&D).getQuantity());
+ DeclPtr = Alloc;
+
+ bool doStore = true;
+
+ Qualifiers qs = Ty.getQualifiers();
+
+ if (Qualifiers::ObjCLifetime lt = qs.getObjCLifetime()) {
+ // We honor __attribute__((ns_consumed)) for types with lifetime.
+ // For __strong, it's handled by just skipping the initial retain;
+ // otherwise we have to balance out the initial +1 with an extra
+ // cleanup to do the release at the end of the function.
+ bool isConsumed = D.hasAttr<NSConsumedAttr>();
+
+ // 'self' is always formally __strong, but if this is not an
+ // init method then we don't want to retain it.
+ if (D.isARCPseudoStrong()) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CurCodeDecl);
+ assert(&D == method->getSelfDecl());
+ assert(lt == Qualifiers::OCL_Strong);
+ assert(qs.hasConst());
+ assert(method->getMethodFamily() != OMF_init);
+ (void) method;
+ lt = Qualifiers::OCL_ExplicitNone;
+ }
+
+ if (lt == Qualifiers::OCL_Strong) {
+ if (!isConsumed)
+ // Don't use objc_retainBlock for block pointers, because we
+ // don't want to Block_copy something just because we got it
+ // as a parameter.
+ Arg = EmitARCRetainNonBlock(Arg);
+ } else {
+ // Push the cleanup for a consumed parameter.
+ if (isConsumed)
+ EHStack.pushCleanup<ConsumeARCParameter>(getARCCleanupKind(), Arg);
+
+ if (lt == Qualifiers::OCL_Weak) {
+ EmitARCInitWeak(DeclPtr, Arg);
+ doStore = false; // The weak init is a store, no need to do two.
+ }
+ }
+
+ // Enter the cleanup scope.
+ EmitAutoVarWithLifetime(*this, D, DeclPtr, lt);
+ }
+
+ // Store the initial value into the alloca.
+ if (doStore) {
+ LValue lv = MakeAddrLValue(DeclPtr, Ty,
+ getContext().getDeclAlign(&D));
+ EmitStoreOfScalar(Arg, lv, /* isInitialization */ true);
+ }
+ }
+
+ llvm::Value *&DMEntry = LocalDeclMap[&D];
+ assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
+ DMEntry = DeclPtr;
+
+ // Emit debug info for param declaration.
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitDeclareOfArgVariable(&D, DeclPtr, ArgNo, Builder);
+
+ if (D.hasAttr<AnnotateAttr>())
+ EmitVarAnnotations(&D, DeclPtr);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
new file mode 100644
index 0000000..10f0b83
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -0,0 +1,464 @@
+//===--- CGDeclCXX.cpp - Emit LLVM Code for C++ declarations --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ declarations
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
+#include "CGCXXABI.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+ assert(D.hasGlobalStorage() && "VarDecl must have global storage!");
+ assert(!D.getType()->isReferenceType() &&
+ "Should not call EmitDeclInit on a reference!");
+
+ ASTContext &Context = CGF.getContext();
+
+ CharUnits alignment = Context.getDeclAlign(&D);
+ QualType type = D.getType();
+ LValue lv = CGF.MakeAddrLValue(DeclPtr, type, alignment);
+
+ const Expr *Init = D.getInit();
+ if (!CGF.hasAggregateLLVMType(type)) {
+ CodeGenModule &CGM = CGF.CGM;
+ if (lv.isObjCStrong())
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr, D.isThreadSpecified());
+ else if (lv.isObjCWeak())
+ CGM.getObjCRuntime().EmitObjCWeakAssign(CGF, CGF.EmitScalarExpr(Init),
+ DeclPtr);
+ else
+ CGF.EmitScalarInit(Init, &D, lv, false);
+ } else if (type->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(Init, DeclPtr, lv.isVolatile());
+ } else {
+ CGF.EmitAggExpr(Init, AggValueSlot::forLValue(lv,AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ }
+}
+
+/// Emit code to cause the destruction of the given variable with
+/// static storage duration.
+static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *addr) {
+ CodeGenModule &CGM = CGF.CGM;
+
+ // FIXME: __attribute__((cleanup)) ?
+
+ QualType type = D.getType();
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+
+ switch (dtorKind) {
+ case QualType::DK_none:
+ return;
+
+ case QualType::DK_cxx_destructor:
+ break;
+
+ case QualType::DK_objc_strong_lifetime:
+ case QualType::DK_objc_weak_lifetime:
+ // We don't care about releasing objects during process teardown.
+ return;
+ }
+
+ llvm::Constant *function;
+ llvm::Constant *argument;
+
+ // Special-case non-array C++ destructors, where there's a function
+ // with the right signature that we can just call.
+ const CXXRecordDecl *record = 0;
+ if (dtorKind == QualType::DK_cxx_destructor &&
+ (record = type->getAsCXXRecordDecl())) {
+ assert(!record->hasTrivialDestructor());
+ CXXDestructorDecl *dtor = record->getDestructor();
+
+ function = CGM.GetAddrOfCXXDestructor(dtor, Dtor_Complete);
+ argument = addr;
+
+ // Otherwise, the standard logic requires a helper function.
+ } else {
+ function = CodeGenFunction(CGM).generateDestroyHelper(addr, type,
+ CGF.getDestroyer(dtorKind),
+ CGF.needsEHCleanup(dtorKind));
+ argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ }
+
+ CGF.EmitCXXGlobalDtorRegistration(function, argument);
+}
+
+/// Emit code to cause the variable at the given address to be considered as
+/// constant from this point onwards.
+static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *Addr) {
+ // Don't emit the intrinsic if we're not optimizing.
+ if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // Grab the llvm.invariant.start intrinsic.
+ llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
+ llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);
+
+ // Emit a call with the size in bytes of the object.
+ CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
+ uint64_t Width = WidthChars.getQuantity();
+ llvm::Value *Args[2] = { llvm::ConstantInt::getSigned(CGF.Int64Ty, Width),
+ llvm::ConstantExpr::getBitCast(Addr, CGF.Int8PtrTy)};
+ CGF.Builder.CreateCall(InvariantStart, Args);
+}
+
+void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
+ llvm::Constant *DeclPtr,
+ bool PerformInit) {
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+
+ if (!T->isReferenceType()) {
+ if (PerformInit)
+ EmitDeclInit(*this, D, DeclPtr);
+ if (CGM.isTypeConstant(D.getType(), true))
+ EmitDeclInvariant(*this, D, DeclPtr);
+ else
+ EmitDeclDestroy(*this, D, DeclPtr);
+ return;
+ }
+
+ assert(PerformInit && "cannot have constant initializer which needs "
+ "destruction for reference");
+ unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
+ RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
+}
+
+/// Register a global destructor using __cxa_atexit.
+static void emitGlobalDtorWithCXAAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // We're assuming that the destructor function is something we can
+ // reasonably call with the default CC. Go ahead and cast it to the
+ // right prototype.
+ llvm::Type *dtorTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, false)->getPointerTo();
+
+ // extern "C" int __cxa_atexit(void (*f)(void *), void *p, void *d);
+ llvm::Type *paramTys[] = { dtorTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, paramTys, false);
+
+ // Fetch the actual function.
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "__cxa_atexit");
+ if (llvm::Function *fn = dyn_cast<llvm::Function>(atexit))
+ fn->setDoesNotThrow();
+
+ // Create a variable that binds the atexit to this shared object.
+ llvm::Constant *handle =
+ CGF.CGM.CreateRuntimeVariable(CGF.Int8Ty, "__dso_handle");
+
+ llvm::Value *args[] = {
+ llvm::ConstantExpr::getBitCast(dtor, dtorTy),
+ llvm::ConstantExpr::getBitCast(addr, CGF.Int8PtrTy),
+ handle
+ };
+ CGF.Builder.CreateCall(atexit, args);
+}
+
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ llvm::FunctionType *ty,
+ const Twine &name);
+
+/// Create a stub function, suitable for being passed to atexit,
+/// which passes the given address to the given destructor function.
+static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Get the destructor function type, void(*)(void).
+ llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
+ llvm::Function *fn =
+ CreateGlobalInitOrDestructFunction(CGM, ty,
+ Twine("__dtor_", addr->getName()));
+
+ CodeGenFunction CGF(CGM);
+
+ CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, fn,
+ CGM.getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
+
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *dtorFn =
+ dyn_cast<llvm::Function>(dtor->stripPointerCasts()))
+ call->setCallingConv(dtorFn->getCallingConv());
+
+ CGF.FinishFunction();
+
+ return fn;
+}
+
+/// Register a global destructor using atexit.
+static void emitGlobalDtorWithAtExit(CodeGenFunction &CGF,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Create a function which calls the destructor.
+ llvm::Constant *dtorStub = createAtExitStub(CGF.CGM, dtor, addr);
+
+ // extern "C" int atexit(void (*f)(void));
+ llvm::FunctionType *atexitTy =
+ llvm::FunctionType::get(CGF.IntTy, dtorStub->getType(), false);
+
+ llvm::Constant *atexit =
+ CGF.CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
+ atexitFn->setDoesNotThrow();
+
+ CGF.Builder.CreateCall(atexit, dtorStub);
+}
+
+void CodeGenFunction::EmitCXXGlobalDtorRegistration(llvm::Constant *dtor,
+ llvm::Constant *addr) {
+ // Use __cxa_atexit if available.
+ if (CGM.getCodeGenOpts().CXAAtExit) {
+ emitGlobalDtorWithCXAAtExit(*this, dtor, addr);
+ return;
+ }
+
+ // In Apple kexts, we want to add a global destructor entry.
+ // FIXME: shouldn't this be guarded by some variable?
+ if (CGM.getContext().getLangOpts().AppleKext) {
+ // Generate a global destructor entry.
+ CGM.AddCXXDtorEntry(dtor, addr);
+ return;
+ }
+
+ // Otherwise, we just use atexit.
+ emitGlobalDtorWithAtExit(*this, dtor, addr);
+}
+
+void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) {
+ // If we've been asked to forbid guard variables, emit an error now.
+ // This diagnostic is hard-coded for Darwin's use case; we can find
+ // better phrasing if someone else needs it.
+ if (CGM.getCodeGenOpts().ForbidGuardVariables)
+ CGM.Error(D.getLocation(),
+ "this initialization requires a guard variable, which "
+ "the kernel does not support");
+
+ CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
+}
+
+static llvm::Function *
+CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
+ llvm::FunctionType *FTy,
+ const Twine &Name) {
+ llvm::Function *Fn =
+ llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
+ Name, &CGM.getModule());
+ if (!CGM.getContext().getLangOpts().AppleKext) {
+ // Set the section if needed.
+ if (const char *Section =
+ CGM.getContext().getTargetInfo().getStaticInitSectionSpecifier())
+ Fn->setSection(Section);
+ }
+
+ if (!CGM.getLangOpts().Exceptions)
+ Fn->setDoesNotThrow();
+
+ return Fn;
+}
+
+void
+CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ // Create a variable initialization function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
+
+ CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
+ PerformInit);
+
+ if (D->hasAttr<InitPriorityAttr>()) {
+ unsigned int order = D->getAttr<InitPriorityAttr>()->getPriority();
+ OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
+ PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
+ DelayedCXXInitPosition.erase(D);
+ }
+ else {
+ llvm::DenseMap<const Decl *, unsigned>::iterator I =
+ DelayedCXXInitPosition.find(D);
+ if (I == DelayedCXXInitPosition.end()) {
+ CXXGlobalInits.push_back(Fn);
+ } else {
+ assert(CXXGlobalInits[I->second] == 0);
+ CXXGlobalInits[I->second] = Fn;
+ DelayedCXXInitPosition.erase(I);
+ }
+ }
+}
+
+void
+CodeGenModule::EmitCXXGlobalInitFunc() {
+ while (!CXXGlobalInits.empty() && !CXXGlobalInits.back())
+ CXXGlobalInits.pop_back();
+
+ if (CXXGlobalInits.empty() && PrioritizedCXXGlobalInits.empty())
+ return;
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ // Create our global initialization function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__I_a");
+
+ if (!PrioritizedCXXGlobalInits.empty()) {
+ SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
+ llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
+ PrioritizedCXXGlobalInits.end());
+ for (unsigned i = 0; i < PrioritizedCXXGlobalInits.size(); i++) {
+ llvm::Function *Fn = PrioritizedCXXGlobalInits[i].second;
+ LocalCXXGlobalInits.push_back(Fn);
+ }
+ LocalCXXGlobalInits.append(CXXGlobalInits.begin(), CXXGlobalInits.end());
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &LocalCXXGlobalInits[0],
+ LocalCXXGlobalInits.size());
+ }
+ else
+ CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn,
+ &CXXGlobalInits[0],
+ CXXGlobalInits.size());
+ AddGlobalCtor(Fn);
+ CXXGlobalInits.clear();
+ PrioritizedCXXGlobalInits.clear();
+}
+
+void CodeGenModule::EmitCXXGlobalDtorFunc() {
+ if (CXXGlobalDtors.empty())
+ return;
+
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ // Create our global destructor function.
+ llvm::Function *Fn =
+ CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
+
+ CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
+ AddGlobalDtor(Fn);
+}
+
+/// Emit the code necessary to initialize the given global variable.
+void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ // Use guarded initialization if the global variable is weak. This
+ // occurs for, e.g., instantiated static data members and
+ // definitions explicitly marked weak.
+ if (Addr->getLinkage() == llvm::GlobalValue::WeakODRLinkage ||
+ Addr->getLinkage() == llvm::GlobalValue::WeakAnyLinkage) {
+ EmitCXXGuardedInit(*D, Addr, PerformInit);
+ } else {
+ EmitCXXGlobalVarDeclInit(*D, Addr, PerformInit);
+ }
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ llvm::Constant **Decls,
+ unsigned NumDecls) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ RunCleanupsScope Scope(*this);
+
+ // When building in Objective-C++ ARC mode, create an autorelease pool
+ // around the global initializers.
+ if (getLangOpts().ObjCAutoRefCount && getLangOpts().CPlusPlus) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EmitObjCAutoreleasePoolCleanup(token);
+ }
+
+ for (unsigned i = 0; i != NumDecls; ++i)
+ if (Decls[i])
+ Builder.CreateCall(Decls[i]);
+
+ Scope.ForceCleanup();
+
+ FinishFunction();
+}
+
+void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
+ &DtorsAndObjects) {
+ StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
+ getTypes().arrangeNullaryFunction(),
+ FunctionArgList(), SourceLocation());
+
+ // Emit the dtors, in reverse order from construction.
+ for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
+ llvm::Value *Callee = DtorsAndObjects[e - i - 1].first;
+ llvm::CallInst *CI = Builder.CreateCall(Callee,
+ DtorsAndObjects[e - i - 1].second);
+ // Make sure the call and the callee agree on calling convention.
+ if (llvm::Function *F = dyn_cast<llvm::Function>(Callee))
+ CI->setCallingConv(F->getCallingConv());
+ }
+
+ FinishFunction();
+}
+
+/// generateDestroyHelper - Generates a helper function which, when
+/// invoked, destroys the given object.
+llvm::Function *
+CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
+ QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray) {
+ FunctionArgList args;
+ ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
+ args.push_back(&dst);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ /*variadic*/ false);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
+ llvm::Function *fn =
+ CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+
+ StartFunction(GlobalDecl(), getContext().VoidTy, fn, FI, args,
+ SourceLocation());
+
+ emitDestroy(addr, type, destroyer, useEHCleanupForArray);
+
+ FinishFunction();
+
+ return fn;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
new file mode 100644
index 0000000..95e0030
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGException.cpp
@@ -0,0 +1,1595 @@
+//===--- CGException.cpp - Emit LLVM Code for C++ exceptions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ exception related code generation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+#include "CGObjCRuntime.h"
+#include "TargetInfo.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::Constant *getAllocateExceptionFn(CodeGenFunction &CGF) {
+ // void *__cxa_allocate_exception(size_t thrown_size);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.SizeTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_allocate_exception");
+}
+
+static llvm::Constant *getFreeExceptionFn(CodeGenFunction &CGF) {
+ // void __cxa_free_exception(void *thrown_exception);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_free_exception");
+}
+
+static llvm::Constant *getThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_throw(void *thrown_exception, std::type_info *tinfo,
+ // void (*dest) (void *));
+
+ llvm::Type *Args[3] = { CGF.Int8PtrTy, CGF.Int8PtrTy, CGF.Int8PtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, Args, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
+}
+
+static llvm::Constant *getReThrowFn(CodeGenFunction &CGF) {
+ // void __cxa_rethrow();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
+}
+
+static llvm::Constant *getGetExceptionPtrFn(CodeGenFunction &CGF) {
+ // void *__cxa_get_exception_ptr(void*);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_get_exception_ptr");
+}
+
+static llvm::Constant *getBeginCatchFn(CodeGenFunction &CGF) {
+ // void *__cxa_begin_catch(void*);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.Int8PtrTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_begin_catch");
+}
+
+static llvm::Constant *getEndCatchFn(CodeGenFunction &CGF) {
+ // void __cxa_end_catch();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_end_catch");
+}
+
+static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
+ // void __cxa_call_unexepcted(void *thrown_exception);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
+}
+
+llvm::Constant *CodeGenFunction::getUnwindResumeFn() {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+
+ if (CGM.getLangOpts().SjLjExceptions)
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume");
+}
+
+llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, Int8PtrTy, /*IsVarArgs=*/false);
+
+ if (CGM.getLangOpts().SjLjExceptions)
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume_or_Rethrow");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
+}
+
+static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
+ // void __terminate();
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, /*IsVarArgs=*/false);
+
+ StringRef name;
+
+ // In C++, use std::terminate().
+ if (CGF.getLangOpts().CPlusPlus)
+ name = "_ZSt9terminatev"; // FIXME: mangling!
+ else if (CGF.getLangOpts().ObjC1 &&
+ CGF.CGM.getCodeGenOpts().ObjCRuntimeHasTerminate)
+ name = "objc_terminate";
+ else
+ name = "abort";
+ return CGF.CGM.CreateRuntimeFunction(FTy, name);
+}
+
+static llvm::Constant *getCatchallRethrowFn(CodeGenFunction &CGF,
+ StringRef Name) {
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGF.VoidTy, CGF.Int8PtrTy, /*IsVarArgs=*/false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, Name);
+}
+
+namespace {
+ /// The exceptions personality for a function.
+ struct EHPersonality {
+ const char *PersonalityFn;
+
+ // If this is non-null, this personality requires a non-standard
+ // function for rethrowing an exception after a catchall cleanup.
+ // This function must have prototype void(void*).
+ const char *CatchallRethrowFn;
+
+ static const EHPersonality &get(const LangOptions &Lang);
+ static const EHPersonality GNU_C;
+ static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_ObjC;
+ static const EHPersonality GNU_ObjCXX;
+ static const EHPersonality NeXT_ObjC;
+ static const EHPersonality GNU_CPlusPlus;
+ static const EHPersonality GNU_CPlusPlus_SJLJ;
+ };
+}
+
+const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", 0 };
+const EHPersonality EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", 0 };
+const EHPersonality EHPersonality::NeXT_ObjC = { "__objc_personality_v0", 0 };
+const EHPersonality EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", 0};
+const EHPersonality
+EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", 0 };
+const EHPersonality
+EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
+const EHPersonality
+EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", 0 };
+
+static const EHPersonality &getCPersonality(const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_C_SJLJ;
+ return EHPersonality::GNU_C;
+}
+
+static const EHPersonality &getObjCPersonality(const LangOptions &L) {
+ if (L.NeXTRuntime) {
+ if (L.ObjCNonFragileABI) return EHPersonality::NeXT_ObjC;
+ else return getCPersonality(L);
+ } else {
+ return EHPersonality::GNU_ObjC;
+ }
+}
+
+static const EHPersonality &getCXXPersonality(const LangOptions &L) {
+ if (L.SjLjExceptions)
+ return EHPersonality::GNU_CPlusPlus_SJLJ;
+ else
+ return EHPersonality::GNU_CPlusPlus;
+}
+
+/// Determines the personality function to use when both C++
+/// and Objective-C exceptions are being caught.
+static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
+ // The ObjC personality defers to the C++ personality for non-ObjC
+ // handlers. Unlike the C++ case, we use the same personality
+ // function on targets using (backend-driven) SJLJ EH.
+ if (L.NeXTRuntime) {
+ if (L.ObjCNonFragileABI)
+ return EHPersonality::NeXT_ObjC;
+
+ // In the fragile ABI, just use C++ exception handling and hope
+ // they're not doing crazy exception mixing.
+ else
+ return getCXXPersonality(L);
+ }
+
+ // The GNU runtime's personality function inherently doesn't support
+ // mixed EH. Use the C++ personality just to avoid returning null.
+ return EHPersonality::GNU_ObjCXX;
+}
+
+const EHPersonality &EHPersonality::get(const LangOptions &L) {
+ if (L.CPlusPlus && L.ObjC1)
+ return getObjCXXPersonality(L);
+ else if (L.CPlusPlus)
+ return getCXXPersonality(L);
+ else if (L.ObjC1)
+ return getObjCPersonality(L);
+ else
+ return getCPersonality(L);
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
+ llvm::Constant *Fn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
+ Personality.PersonalityFn);
+ return Fn;
+}
+
+static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
+ const EHPersonality &Personality) {
+ llvm::Constant *Fn = getPersonalityFn(CGM, Personality);
+ return llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
+}
+
+/// Check whether a personality function could reasonably be swapped
+/// for a C++ personality function.
+static bool PersonalityHasOnlyCXXUses(llvm::Constant *Fn) {
+ for (llvm::Constant::use_iterator
+ I = Fn->use_begin(), E = Fn->use_end(); I != E; ++I) {
+ llvm::User *User = *I;
+
+ // Conditionally white-list bitcasts.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(User)) {
+ if (CE->getOpcode() != llvm::Instruction::BitCast) return false;
+ if (!PersonalityHasOnlyCXXUses(CE))
+ return false;
+ continue;
+ }
+
+ // Otherwise, it has to be a landingpad instruction.
+ llvm::LandingPadInst *LPI = dyn_cast<llvm::LandingPadInst>(User);
+ if (!LPI) return false;
+
+ for (unsigned I = 0, E = LPI->getNumClauses(); I != E; ++I) {
+ // Look for something that would've been returned by the ObjC
+ // runtime's GetEHType() method.
+ llvm::Value *Val = LPI->getClause(I)->stripPointerCasts();
+ if (LPI->isCatch(I)) {
+ // Check if the catch value has the ObjC prefix.
+ if (llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Val))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ } else {
+ // Check if any of the filter values have the ObjC prefix.
+ llvm::Constant *CVal = cast<llvm::Constant>(Val);
+ for (llvm::User::op_iterator
+ II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II) {
+ if (llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>((*II)->stripPointerCasts()))
+ // ObjC EH selector entries are always global variables with
+ // names starting like this.
+ if (GV->getName().startswith("OBJC_EHTYPE"))
+ return false;
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+/// Try to use the C++ personality function in ObjC++. Not doing this
+/// can cause some incompatibilities with gcc, which is more
+/// aggressive about only using the ObjC++ personality in a function
+/// when it really needs it.
+void CodeGenModule::SimplifyPersonality() {
+ // For now, this is really a Darwin-specific operation.
+ if (!Context.getTargetInfo().getTriple().isOSDarwin())
+ return;
+
+ // If we're not in ObjC++ -fexceptions, there's nothing to do.
+ if (!LangOpts.CPlusPlus || !LangOpts.ObjC1 || !LangOpts.Exceptions)
+ return;
+
+ const EHPersonality &ObjCXX = EHPersonality::get(LangOpts);
+ const EHPersonality &CXX = getCXXPersonality(LangOpts);
+ if (&ObjCXX == &CXX)
+ return;
+
+ assert(std::strcmp(ObjCXX.PersonalityFn, CXX.PersonalityFn) != 0 &&
+ "Different EHPersonalities using the same personality function.");
+
+ llvm::Function *Fn = getModule().getFunction(ObjCXX.PersonalityFn);
+
+ // Nothing to do if it's unused.
+ if (!Fn || Fn->use_empty()) return;
+
+ // Can't do the optimization if it has non-C++ uses.
+ if (!PersonalityHasOnlyCXXUses(Fn)) return;
+
+ // Create the C++ personality function and kill off the old
+ // function.
+ llvm::Constant *CXXFn = getPersonalityFn(*this, CXX);
+
+ // This can happen if the user is screwing with us.
+ if (Fn->getType() != CXXFn->getType()) return;
+
+ Fn->replaceAllUsesWith(CXXFn);
+ Fn->eraseFromParent();
+}
+
+/// Returns the value to inject into a selector to indicate the
+/// presence of a catch-all.
+static llvm::Constant *getCatchAllValue(CodeGenFunction &CGF) {
+ // Possibly we should use @llvm.eh.catch.all.value here.
+ return llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
+}
+
+namespace {
+ /// A cleanup to free the exception object if its initialization
+ /// throws.
+ struct FreeException : EHScopeStack::Cleanup {
+ llvm::Value *exn;
+ FreeException(llvm::Value *exn) : exn(exn) {}
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.Builder.CreateCall(getFreeExceptionFn(CGF), exn)
+ ->setDoesNotThrow();
+ }
+ };
+}
+
+// Emits an exception expression into the given location. This
+// differs from EmitAnyExprToMem only in that, if a final copy-ctor
+// call is required, an exception within that copy ctor causes
+// std::terminate to be invoked.
+static void EmitAnyExprToExn(CodeGenFunction &CGF, const Expr *e,
+ llvm::Value *addr) {
+ // Make sure the exception object is cleaned up if there's an
+ // exception during initialization.
+ CGF.pushFullExprCleanup<FreeException>(EHCleanup, addr);
+ EHScopeStack::stable_iterator cleanup = CGF.EHStack.stable_begin();
+
+ // __cxa_allocate_exception returns a void*; we need to cast this
+ // to the appropriate type for the object.
+ llvm::Type *ty = CGF.ConvertTypeForMem(e->getType())->getPointerTo();
+ llvm::Value *typedAddr = CGF.Builder.CreateBitCast(addr, ty);
+
+ // FIXME: this isn't quite right! If there's a final unelided call
+ // to a copy constructor, then according to [except.terminate]p1 we
+ // must call std::terminate() if that constructor throws, because
+ // technically that copy occurs after the exception expression is
+ // evaluated but before the exception is caught. But the best way
+ // to handle that is to teach EmitAggExpr to do the final copy
+ // differently if it can't be elided.
+ CGF.EmitAnyExprToMem(e, typedAddr, e->getType().getQualifiers(),
+ /*IsInit*/ true);
+
+ // Deactivate the cleanup block.
+ CGF.DeactivateCleanupBlock(cleanup, cast<llvm::Instruction>(typedAddr));
+}
+
+llvm::Value *CodeGenFunction::getExceptionSlot() {
+ if (!ExceptionSlot)
+ ExceptionSlot = CreateTempAlloca(Int8PtrTy, "exn.slot");
+ return ExceptionSlot;
+}
+
+llvm::Value *CodeGenFunction::getEHSelectorSlot() {
+ if (!EHSelectorSlot)
+ EHSelectorSlot = CreateTempAlloca(Int32Ty, "ehselector.slot");
+ return EHSelectorSlot;
+}
+
+llvm::Value *CodeGenFunction::getExceptionFromSlot() {
+ return Builder.CreateLoad(getExceptionSlot(), "exn");
+}
+
+llvm::Value *CodeGenFunction::getSelectorFromSlot() {
+ return Builder.CreateLoad(getEHSelectorSlot(), "sel");
+}
+
+void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E) {
+ if (!E->getSubExpr()) {
+ if (getInvokeDest()) {
+ Builder.CreateInvoke(getReThrowFn(*this),
+ getUnreachableBlock(),
+ getInvokeDest())
+ ->setDoesNotReturn();
+ } else {
+ Builder.CreateCall(getReThrowFn(*this))->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ EmitBlock(createBasicBlock("throw.cont"));
+
+ return;
+ }
+
+ QualType ThrowType = E->getSubExpr()->getType();
+
+ // Now allocate the exception object.
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ uint64_t TypeSize = getContext().getTypeSizeInChars(ThrowType).getQuantity();
+
+ llvm::Constant *AllocExceptionFn = getAllocateExceptionFn(*this);
+ llvm::CallInst *ExceptionPtr =
+ Builder.CreateCall(AllocExceptionFn,
+ llvm::ConstantInt::get(SizeTy, TypeSize),
+ "exception");
+ ExceptionPtr->setDoesNotThrow();
+
+ EmitAnyExprToExn(*this, E->getSubExpr(), ExceptionPtr);
+
+ // Now throw the exception.
+ llvm::Constant *TypeInfo = CGM.GetAddrOfRTTIDescriptor(ThrowType,
+ /*ForEH=*/true);
+
+ // The address of the destructor. If the exception type has a
+ // trivial destructor (or isn't a record), we just pass null.
+ llvm::Constant *Dtor = 0;
+ if (const RecordType *RecordTy = ThrowType->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!Record->hasTrivialDestructor()) {
+ CXXDestructorDecl *DtorD = Record->getDestructor();
+ Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
+ Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
+ }
+ }
+ if (!Dtor) Dtor = llvm::Constant::getNullValue(Int8PtrTy);
+
+ if (getInvokeDest()) {
+ llvm::InvokeInst *ThrowCall =
+ Builder.CreateInvoke3(getThrowFn(*this),
+ getUnreachableBlock(), getInvokeDest(),
+ ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ } else {
+ llvm::CallInst *ThrowCall =
+ Builder.CreateCall3(getThrowFn(*this), ExceptionPtr, TypeInfo, Dtor);
+ ThrowCall->setDoesNotReturn();
+ Builder.CreateUnreachable();
+ }
+
+ // throw is an expression, and the expression emitters expect us
+ // to leave ourselves at a valid insertion point.
+ EmitBlock(createBasicBlock("throw.cont"));
+}
+
+void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
+ if (!CGM.getLangOpts().CXXExceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD == 0)
+ return;
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (Proto == 0)
+ return;
+
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ if (isNoexceptExceptionSpec(EST)) {
+ if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
+ // noexcept functions are simple terminate scopes.
+ EHStack.pushTerminate();
+ }
+ } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ unsigned NumExceptions = Proto->getNumExceptions();
+ EHFilterScope *Filter = EHStack.pushFilter(NumExceptions);
+
+ for (unsigned I = 0; I != NumExceptions; ++I) {
+ QualType Ty = Proto->getExceptionType(I);
+ QualType ExceptType = Ty.getNonReferenceType().getUnqualifiedType();
+ llvm::Value *EHType = CGM.GetAddrOfRTTIDescriptor(ExceptType,
+ /*ForEH=*/true);
+ Filter->setFilter(I, EHType);
+ }
+ }
+}
+
+/// Emit the dispatch block for a filter scope if necessary.
+static void emitFilterDispatchBlock(CodeGenFunction &CGF,
+ EHFilterScope &filterScope) {
+ llvm::BasicBlock *dispatchBlock = filterScope.getCachedEHDispatchBlock();
+ if (!dispatchBlock) return;
+ if (dispatchBlock->use_empty()) {
+ delete dispatchBlock;
+ return;
+ }
+
+ CGF.EmitBlockAfterUses(dispatchBlock);
+
+ // If this isn't a catch-all filter, we need to check whether we got
+ // here because the filter triggered.
+ if (filterScope.getNumFilters()) {
+ // Load the selector value.
+ llvm::Value *selector = CGF.getSelectorFromSlot();
+ llvm::BasicBlock *unexpectedBB = CGF.createBasicBlock("ehspec.unexpected");
+
+ llvm::Value *zero = CGF.Builder.getInt32(0);
+ llvm::Value *failsFilter =
+ CGF.Builder.CreateICmpSLT(selector, zero, "ehspec.fails");
+ CGF.Builder.CreateCondBr(failsFilter, unexpectedBB, CGF.getEHResumeBlock());
+
+ CGF.EmitBlock(unexpectedBB);
+ }
+
+ // Call __cxa_call_unexpected. This doesn't need to be an invoke
+ // because __cxa_call_unexpected magically filters exceptions
+ // according to the last landing pad the exception was thrown
+ // into. Seriously.
+ llvm::Value *exn = CGF.getExceptionFromSlot();
+ CGF.Builder.CreateCall(getUnexpectedFn(CGF), exn)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
+ if (!CGM.getLangOpts().CXXExceptions)
+ return;
+
+ const FunctionDecl* FD = dyn_cast_or_null<FunctionDecl>(D);
+ if (FD == 0)
+ return;
+ const FunctionProtoType *Proto = FD->getType()->getAs<FunctionProtoType>();
+ if (Proto == 0)
+ return;
+
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+ if (isNoexceptExceptionSpec(EST)) {
+ if (Proto->getNoexceptSpec(getContext()) == FunctionProtoType::NR_Nothrow) {
+ EHStack.popTerminate();
+ }
+ } else if (EST == EST_Dynamic || EST == EST_DynamicNone) {
+ EHFilterScope &filterScope = cast<EHFilterScope>(*EHStack.begin());
+ emitFilterDispatchBlock(*this, filterScope);
+ EHStack.popFilter();
+ }
+}
+
+void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
+ EnterCXXTryStmt(S);
+ EmitStmt(S.getTryBlock());
+ ExitCXXTryStmt(S);
+}
+
+void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope *CatchScope = EHStack.pushCatch(NumHandlers);
+
+ for (unsigned I = 0; I != NumHandlers; ++I) {
+ const CXXCatchStmt *C = S.getHandler(I);
+
+ llvm::BasicBlock *Handler = createBasicBlock("catch");
+ if (C->getExceptionDecl()) {
+ // FIXME: Dropping the reference type on the type into makes it
+ // impossible to correctly implement catch-by-reference
+ // semantics for pointers. Unfortunately, this is what all
+ // existing compilers do, and it's not clear that the standard
+ // personality routine is capable of doing this right. See C++ DR 388:
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
+ QualType CaughtType = C->getCaughtType();
+ CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
+
+ llvm::Value *TypeInfo = 0;
+ if (CaughtType->isObjCObjectPointerType())
+ TypeInfo = CGM.getObjCRuntime().GetEHType(CaughtType);
+ else
+ TypeInfo = CGM.GetAddrOfRTTIDescriptor(CaughtType, /*ForEH=*/true);
+ CatchScope->setHandler(I, TypeInfo, Handler);
+ } else {
+ // No exception decl indicates '...', a catch-all.
+ CatchScope->setCatchAllHandler(I, Handler);
+ }
+ }
+}
+
+llvm::BasicBlock *
+CodeGenFunction::getEHDispatchBlock(EHScopeStack::stable_iterator si) {
+ // The dispatch block for the end of the scope chain is a block that
+ // just resumes unwinding.
+ if (si == EHStack.stable_end())
+ return getEHResumeBlock();
+
+ // Otherwise, we should look at the actual scope.
+ EHScope &scope = *EHStack.find(si);
+
+ llvm::BasicBlock *dispatchBlock = scope.getCachedEHDispatchBlock();
+ if (!dispatchBlock) {
+ switch (scope.getKind()) {
+ case EHScope::Catch: {
+ // Apply a special case to a single catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(scope);
+ if (catchScope.getNumHandlers() == 1 &&
+ catchScope.getHandler(0).isCatchAll()) {
+ dispatchBlock = catchScope.getHandler(0).Block;
+
+ // Otherwise, make a dispatch block.
+ } else {
+ dispatchBlock = createBasicBlock("catch.dispatch");
+ }
+ break;
+ }
+
+ case EHScope::Cleanup:
+ dispatchBlock = createBasicBlock("ehcleanup");
+ break;
+
+ case EHScope::Filter:
+ dispatchBlock = createBasicBlock("filter.dispatch");
+ break;
+
+ case EHScope::Terminate:
+ dispatchBlock = getTerminateHandler();
+ break;
+ }
+ scope.setCachedEHDispatchBlock(dispatchBlock);
+ }
+ return dispatchBlock;
+}
+
+/// Check whether this is a non-EH scope, i.e. a scope which doesn't
+/// affect exception handling. Currently, the only non-EH scopes are
+/// normal-only cleanup scopes.
+static bool isNonEHScope(const EHScope &S) {
+ switch (S.getKind()) {
+ case EHScope::Cleanup:
+ return !cast<EHCleanupScope>(S).isEHCleanup();
+ case EHScope::Filter:
+ case EHScope::Catch:
+ case EHScope::Terminate:
+ return false;
+ }
+
+ llvm_unreachable("Invalid EHScope Kind!");
+}
+
+llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
+ assert(EHStack.requiresLandingPad());
+ assert(!EHStack.empty());
+
+ if (!CGM.getLangOpts().Exceptions)
+ return 0;
+
+ // Check the innermost scope for a cached landing pad. If this is
+ // a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
+ llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
+ if (LP) return LP;
+
+ // Build the landing pad for this scope.
+ LP = EmitLandingPad();
+ assert(LP);
+
+ // Cache the landing pad on the innermost scope. If this is a
+ // non-EH scope, cache the landing pad on the enclosing scope, too.
+ for (EHScopeStack::iterator ir = EHStack.begin(); true; ++ir) {
+ ir->setCachedLandingPad(LP);
+ if (!isNonEHScope(*ir)) break;
+ }
+
+ return LP;
+}
+
+// This code contains a hack to work around a design flaw in
+// LLVM's EH IR which breaks semantics after inlining. This same
+// hack is implemented in llvm-gcc.
+//
+// The LLVM EH abstraction is basically a thin veneer over the
+// traditional GCC zero-cost design: for each range of instructions
+// in the function, there is (at most) one "landing pad" with an
+// associated chain of EH actions. A language-specific personality
+// function interprets this chain of actions and (1) decides whether
+// or not to resume execution at the landing pad and (2) if so,
+// provides an integer indicating why it's stopping. In LLVM IR,
+// the association of a landing pad with a range of instructions is
+// achieved via an invoke instruction, the chain of actions becomes
+// the arguments to the @llvm.eh.selector call, and the selector
+// call returns the integer indicator. Other than the required
+// presence of two intrinsic function calls in the landing pad,
+// the IR exactly describes the layout of the output code.
+//
+// A principal advantage of this design is that it is completely
+// language-agnostic; in theory, the LLVM optimizers can treat
+// landing pads neutrally, and targets need only know how to lower
+// the intrinsics to have a functioning exceptions system (assuming
+// that platform exceptions follow something approximately like the
+// GCC design). Unfortunately, landing pads cannot be combined in a
+// language-agnostic way: given selectors A and B, there is no way
+// to make a single landing pad which faithfully represents the
+// semantics of propagating an exception first through A, then
+// through B, without knowing how the personality will interpret the
+// (lowered form of the) selectors. This means that inlining has no
+// choice but to crudely chain invokes (i.e., to ignore invokes in
+// the inlined function, but to turn all unwindable calls into
+// invokes), which is only semantically valid if every unwind stops
+// at every landing pad.
+//
+// Therefore, the invoke-inline hack is to guarantee that every
+// landing pad has a catch-all.
+enum CleanupHackLevel_t {
+ /// A level of hack that requires that all landing pads have
+ /// catch-alls.
+ CHL_MandatoryCatchall,
+
+ /// A level of hack that requires that all landing pads handle
+ /// cleanups.
+ CHL_MandatoryCleanup,
+
+ /// No hacks at all; ideal IR generation.
+ CHL_Ideal
+};
+const CleanupHackLevel_t CleanupHackLevel = CHL_MandatoryCleanup;
+
+llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
+ assert(EHStack.requiresLandingPad());
+
+ EHScope &innermostEHScope = *EHStack.find(EHStack.getInnermostEHScope());
+ switch (innermostEHScope.getKind()) {
+ case EHScope::Terminate:
+ return getTerminateLandingPad();
+
+ case EHScope::Catch:
+ case EHScope::Cleanup:
+ case EHScope::Filter:
+ if (llvm::BasicBlock *lpad = innermostEHScope.getCachedLandingPad())
+ return lpad;
+ }
+
+ // Save the current IR generation state.
+ CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
+
+ const EHPersonality &personality = EHPersonality::get(getLangOpts());
+
+ // Create and configure the landing pad.
+ llvm::BasicBlock *lpad = createBasicBlock("lpad");
+ EmitBlock(lpad);
+
+ llvm::LandingPadInst *LPadInst =
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ getOpaquePersonalityFn(CGM, personality), 0);
+
+ llvm::Value *LPadExn = Builder.CreateExtractValue(LPadInst, 0);
+ Builder.CreateStore(LPadExn, getExceptionSlot());
+ llvm::Value *LPadSel = Builder.CreateExtractValue(LPadInst, 1);
+ Builder.CreateStore(LPadSel, getEHSelectorSlot());
+
+ // Save the exception pointer. It's safe to use a single exception
+ // pointer per function because EH cleanups can never have nested
+ // try/catches.
+ // Build the landingpad instruction.
+
+ // Accumulate all the handlers in scope.
+ bool hasCatchAll = false;
+ bool hasCleanup = false;
+ bool hasFilter = false;
+ SmallVector<llvm::Value*, 4> filterTypes;
+ llvm::SmallPtrSet<llvm::Value*, 4> catchTypes;
+ for (EHScopeStack::iterator I = EHStack.begin(), E = EHStack.end();
+ I != E; ++I) {
+
+ switch (I->getKind()) {
+ case EHScope::Cleanup:
+ // If we have a cleanup, remember that.
+ hasCleanup = (hasCleanup || cast<EHCleanupScope>(*I).isEHCleanup());
+ continue;
+
+ case EHScope::Filter: {
+ assert(I.next() == EHStack.end() && "EH filter is not end of EH stack");
+ assert(!hasCatchAll && "EH filter reached after catch-all");
+
+ // Filter scopes get added to the landingpad in weird ways.
+ EHFilterScope &filter = cast<EHFilterScope>(*I);
+ hasFilter = true;
+
+ // Add all the filter values.
+ for (unsigned i = 0, e = filter.getNumFilters(); i != e; ++i)
+ filterTypes.push_back(filter.getFilter(i));
+ goto done;
+ }
+
+ case EHScope::Terminate:
+ // Terminate scopes are basically catch-alls.
+ assert(!hasCatchAll);
+ hasCatchAll = true;
+ goto done;
+
+ case EHScope::Catch:
+ break;
+ }
+
+ EHCatchScope &catchScope = cast<EHCatchScope>(*I);
+ for (unsigned hi = 0, he = catchScope.getNumHandlers(); hi != he; ++hi) {
+ EHCatchScope::Handler handler = catchScope.getHandler(hi);
+
+ // If this is a catch-all, register that and abort.
+ if (!handler.Type) {
+ assert(!hasCatchAll);
+ hasCatchAll = true;
+ goto done;
+ }
+
+ // Check whether we already have a handler for this type.
+ if (catchTypes.insert(handler.Type))
+ // If not, add it directly to the landingpad.
+ LPadInst->addClause(handler.Type);
+ }
+ }
+
+ done:
+ // If we have a catch-all, add null to the landingpad.
+ assert(!(hasCatchAll && hasFilter));
+ if (hasCatchAll) {
+ LPadInst->addClause(getCatchAllValue(*this));
+
+ // If we have an EH filter, we need to add those handlers in the
+ // right place in the landingpad, which is to say, at the end.
+ } else if (hasFilter) {
+ // Create a filter expression: a constant array indicating which filter
+ // types there are. The personality routine only lands here if the filter
+ // doesn't match.
+ llvm::SmallVector<llvm::Constant*, 8> Filters;
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(!filterTypes.empty() ?
+ filterTypes[0]->getType() : Int8PtrTy,
+ filterTypes.size());
+
+ for (unsigned i = 0, e = filterTypes.size(); i != e; ++i)
+ Filters.push_back(cast<llvm::Constant>(filterTypes[i]));
+ llvm::Constant *FilterArray = llvm::ConstantArray::get(AType, Filters);
+ LPadInst->addClause(FilterArray);
+
+ // Also check whether we need a cleanup.
+ if (hasCleanup)
+ LPadInst->setCleanup(true);
+
+ // Otherwise, signal that we at least have cleanups.
+ } else if (CleanupHackLevel == CHL_MandatoryCatchall || hasCleanup) {
+ if (CleanupHackLevel == CHL_MandatoryCatchall)
+ LPadInst->addClause(getCatchAllValue(*this));
+ else
+ LPadInst->setCleanup(true);
+ }
+
+ assert((LPadInst->getNumClauses() > 0 || LPadInst->isCleanup()) &&
+ "landingpad instruction has no clauses!");
+
+ // Tell the backend how to generate the landing pad.
+ Builder.CreateBr(getEHDispatchBlock(EHStack.getInnermostEHScope()));
+
+ // Restore the old IR generation state.
+ Builder.restoreIP(savedIP);
+
+ return lpad;
+}
+
+namespace {
+ /// A cleanup to call __cxa_end_catch. In many cases, the caught
+ /// exception type lets us state definitively that the thrown exception
+ /// type does not have a destructor. In particular:
+ /// - Catch-alls tell us nothing, so we have to conservatively
+ /// assume that the thrown exception might have a destructor.
+ /// - Catches by reference behave according to their base types.
+ /// - Catches of non-record types will only trigger for exceptions
+ /// of non-record types, which never have destructors.
+ /// - Catches of record types can trigger for arbitrary subclasses
+ /// of the caught type, so we have to assume the actual thrown
+ /// exception type might have a throwing destructor, even if the
+ /// caught type's destructor is trivial or nothrow.
+ struct CallEndCatch : EHScopeStack::Cleanup {
+ CallEndCatch(bool MightThrow) : MightThrow(MightThrow) {}
+ bool MightThrow;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(getEndCatchFn(CGF))->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(getEndCatchFn(CGF));
+ }
+ };
+}
+
+/// Emits a call to __cxa_begin_catch and enters a cleanup to call
+/// __cxa_end_catch.
+///
+/// \param EndMightThrow - true if __cxa_end_catch might throw
+static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
+ llvm::Value *Exn,
+ bool EndMightThrow) {
+ llvm::CallInst *Call = CGF.Builder.CreateCall(getBeginCatchFn(CGF), Exn);
+ Call->setDoesNotThrow();
+
+ CGF.EHStack.pushCleanup<CallEndCatch>(NormalAndEHCleanup, EndMightThrow);
+
+ return Call;
+}
+
+/// A "special initializer" callback for initializing a catch
+/// parameter during catch initialization.
+static void InitCatchParam(CodeGenFunction &CGF,
+ const VarDecl &CatchParam,
+ llvm::Value *ParamAddr) {
+ // Load the exception from where the landing pad saved it.
+ llvm::Value *Exn = CGF.getExceptionFromSlot();
+
+ CanQualType CatchType =
+ CGF.CGM.getContext().getCanonicalType(CatchParam.getType());
+ llvm::Type *LLVMCatchTy = CGF.ConvertTypeForMem(CatchType);
+
+ // If we're catching by reference, we can just cast the object
+ // pointer to the appropriate pointer.
+ if (isa<ReferenceType>(CatchType)) {
+ QualType CaughtType = cast<ReferenceType>(CatchType)->getPointeeType();
+ bool EndCatchMightThrow = CaughtType->isRecordType();
+
+ // __cxa_begin_catch returns the adjusted object pointer.
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, EndCatchMightThrow);
+
+ // We have no way to tell the personality function that we're
+ // catching by reference, so if we're catching a pointer,
+ // __cxa_begin_catch will actually return that pointer by value.
+ if (const PointerType *PT = dyn_cast<PointerType>(CaughtType)) {
+ QualType PointeeType = PT->getPointeeType();
+
+ // When catching by reference, generally we should just ignore
+ // this by-value pointer and use the exception object instead.
+ if (!PointeeType->isRecordType()) {
+
+ // Exn points to the struct _Unwind_Exception header, which
+ // we have to skip past in order to reach the exception data.
+ unsigned HeaderSize =
+ CGF.CGM.getTargetCodeGenInfo().getSizeOfUnwindException();
+ AdjustedExn = CGF.Builder.CreateConstGEP1_32(Exn, HeaderSize);
+
+ // However, if we're catching a pointer-to-record type that won't
+ // work, because the personality function might have adjusted
+ // the pointer. There's actually no way for us to fully satisfy
+ // the language/ABI contract here: we can't use Exn because it
+ // might have the wrong adjustment, but we can't use the by-value
+ // pointer because it's off by a level of abstraction.
+ //
+ // The current solution is to dump the adjusted pointer into an
+ // alloca, which breaks language semantics (because changing the
+ // pointer doesn't change the exception) but at least works.
+ // The better solution would be to filter out non-exact matches
+ // and rethrow them, but this is tricky because the rethrow
+ // really needs to be catchable by other sites at this landing
+ // pad. The best solution is to fix the personality function.
+ } else {
+ // Pull the pointer for the reference type off.
+ llvm::Type *PtrTy =
+ cast<llvm::PointerType>(LLVMCatchTy)->getElementType();
+
+ // Create the temporary and write the adjusted pointer into it.
+ llvm::Value *ExnPtrTmp = CGF.CreateTempAlloca(PtrTy, "exn.byref.tmp");
+ llvm::Value *Casted = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+ CGF.Builder.CreateStore(Casted, ExnPtrTmp);
+
+ // Bind the reference to the temporary.
+ AdjustedExn = ExnPtrTmp;
+ }
+ }
+
+ llvm::Value *ExnCast =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.byref");
+ CGF.Builder.CreateStore(ExnCast, ParamAddr);
+ return;
+ }
+
+ // Non-aggregates (plus complexes).
+ bool IsComplex = false;
+ if (!CGF.hasAggregateLLVMType(CatchType) ||
+ (IsComplex = CatchType->isAnyComplexType())) {
+ llvm::Value *AdjustedExn = CallBeginCatch(CGF, Exn, false);
+
+ // If the catch type is a pointer type, __cxa_begin_catch returns
+ // the pointer by value.
+ if (CatchType->hasPointerRepresentation()) {
+ llvm::Value *CastExn =
+ CGF.Builder.CreateBitCast(AdjustedExn, LLVMCatchTy, "exn.casted");
+
+ switch (CatchType.getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, ParamAddr);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(ParamAddr, CastExn);
+ return;
+ }
+ llvm_unreachable("bad ownership qualifier!");
+ }
+
+ // Otherwise, it returns a pointer into the exception object.
+
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+ llvm::Value *Cast = CGF.Builder.CreateBitCast(AdjustedExn, PtrTy);
+
+ if (IsComplex) {
+ CGF.StoreComplexToAddr(CGF.LoadComplexFromAddr(Cast, /*volatile*/ false),
+ ParamAddr, /*volatile*/ false);
+ } else {
+ unsigned Alignment =
+ CGF.getContext().getDeclAlign(&CatchParam).getQuantity();
+ llvm::Value *ExnLoad = CGF.Builder.CreateLoad(Cast, "exn.scalar");
+ CGF.EmitStoreOfScalar(ExnLoad, ParamAddr, /*volatile*/ false, Alignment,
+ CatchType);
+ }
+ return;
+ }
+
+ assert(isa<RecordType>(CatchType) && "unexpected catch type!");
+
+ llvm::Type *PtrTy = LLVMCatchTy->getPointerTo(0); // addrspace 0 ok
+
+ // Check for a copy expression. If we don't have a copy expression,
+ // that means a trivial copy is okay.
+ const Expr *copyExpr = CatchParam.getInit();
+ if (!copyExpr) {
+ llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
+ llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+ CGF.EmitAggregateCopy(ParamAddr, adjustedExn, CatchType);
+ return;
+ }
+
+ // We have to call __cxa_get_exception_ptr to get the adjusted
+ // pointer before copying.
+ llvm::CallInst *rawAdjustedExn =
+ CGF.Builder.CreateCall(getGetExceptionPtrFn(CGF), Exn);
+ rawAdjustedExn->setDoesNotThrow();
+
+ // Cast that to the appropriate type.
+ llvm::Value *adjustedExn = CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy);
+
+ // The copy expression is defined in terms of an OpaqueValueExpr.
+ // Find it and map it to the adjusted expression.
+ CodeGenFunction::OpaqueValueMapping
+ opaque(CGF, OpaqueValueExpr::findInCopyConstruct(copyExpr),
+ CGF.MakeAddrLValue(adjustedExn, CatchParam.getType()));
+
+ // Call the copy ctor in a terminate scope.
+ CGF.EHStack.pushTerminate();
+
+ // Perform the copy construction.
+ CharUnits Alignment = CGF.getContext().getDeclAlign(&CatchParam);
+ CGF.EmitAggExpr(copyExpr,
+ AggValueSlot::forAddr(ParamAddr, Alignment, Qualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+
+ // Leave the terminate scope.
+ CGF.EHStack.popTerminate();
+
+ // Undo the opaque value mapping.
+ opaque.pop();
+
+ // Finally we can call __cxa_begin_catch.
+ CallBeginCatch(CGF, Exn, true);
+}
+
+/// Begins a catch statement by initializing the catch variable and
+/// calling __cxa_begin_catch.
+static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
+ // We have to be very careful with the ordering of cleanups here:
+ // C++ [except.throw]p4:
+ // The destruction [of the exception temporary] occurs
+ // immediately after the destruction of the object declared in
+ // the exception-declaration in the handler.
+ //
+ // So the precise ordering is:
+ // 1. Construct catch variable.
+ // 2. __cxa_begin_catch
+ // 3. Enter __cxa_end_catch cleanup
+ // 4. Enter dtor cleanup
+ //
+ // We do this by using a slightly abnormal initialization process.
+ // Delegation sequence:
+ // - ExitCXXTryStmt opens a RunCleanupsScope
+ // - EmitAutoVarAlloca creates the variable and debug info
+ // - InitCatchParam initializes the variable from the exception
+ // - CallBeginCatch calls __cxa_begin_catch
+ // - CallBeginCatch enters the __cxa_end_catch cleanup
+ // - EmitAutoVarCleanups enters the variable destructor cleanup
+ // - EmitCXXTryStmt emits the code for the catch body
+ // - EmitCXXTryStmt close the RunCleanupsScope
+
+ VarDecl *CatchParam = S->getExceptionDecl();
+ if (!CatchParam) {
+ llvm::Value *Exn = CGF.getExceptionFromSlot();
+ CallBeginCatch(CGF, Exn, true);
+ return;
+ }
+
+ // Emit the local.
+ CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
+ InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF));
+ CGF.EmitAutoVarCleanups(var);
+}
+
+namespace {
+ struct CallRethrow : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitCallOrInvoke(getReThrowFn(CGF));
+ }
+ };
+}
+
+/// Emit the structure of the dispatch block for the given catch scope.
+/// It is an invariant that the dispatch block already exists.
+static void emitCatchDispatchBlock(CodeGenFunction &CGF,
+ EHCatchScope &catchScope) {
+ llvm::BasicBlock *dispatchBlock = catchScope.getCachedEHDispatchBlock();
+ assert(dispatchBlock);
+
+ // If there's only a single catch-all, getEHDispatchBlock returned
+ // that catch-all as the dispatch block.
+ if (catchScope.getNumHandlers() == 1 &&
+ catchScope.getHandler(0).isCatchAll()) {
+ assert(dispatchBlock == catchScope.getHandler(0).Block);
+ return;
+ }
+
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveIP();
+ CGF.EmitBlockAfterUses(dispatchBlock);
+
+ // Select the right handler.
+ llvm::Value *llvm_eh_typeid_for =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::eh_typeid_for);
+
+ // Load the selector value.
+ llvm::Value *selector = CGF.getSelectorFromSlot();
+
+ // Test against each of the exception types we claim to catch.
+ for (unsigned i = 0, e = catchScope.getNumHandlers(); ; ++i) {
+ assert(i < e && "ran off end of handlers!");
+ const EHCatchScope::Handler &handler = catchScope.getHandler(i);
+
+ llvm::Value *typeValue = handler.Type;
+ assert(typeValue && "fell into catch-all case!");
+ typeValue = CGF.Builder.CreateBitCast(typeValue, CGF.Int8PtrTy);
+
+ // Figure out the next block.
+ bool nextIsEnd;
+ llvm::BasicBlock *nextBlock;
+
+ // If this is the last handler, we're at the end, and the next
+ // block is the block for the enclosing EH scope.
+ if (i + 1 == e) {
+ nextBlock = CGF.getEHDispatchBlock(catchScope.getEnclosingEHScope());
+ nextIsEnd = true;
+
+ // If the next handler is a catch-all, we're at the end, and the
+ // next block is that handler.
+ } else if (catchScope.getHandler(i+1).isCatchAll()) {
+ nextBlock = catchScope.getHandler(i+1).Block;
+ nextIsEnd = true;
+
+ // Otherwise, we're not at the end and we need a new block.
+ } else {
+ nextBlock = CGF.createBasicBlock("catch.fallthrough");
+ nextIsEnd = false;
+ }
+
+ // Figure out the catch type's index in the LSDA's type table.
+ llvm::CallInst *typeIndex =
+ CGF.Builder.CreateCall(llvm_eh_typeid_for, typeValue);
+ typeIndex->setDoesNotThrow();
+
+ llvm::Value *matchesTypeIndex =
+ CGF.Builder.CreateICmpEQ(selector, typeIndex, "matches");
+ CGF.Builder.CreateCondBr(matchesTypeIndex, handler.Block, nextBlock);
+
+ // If the next handler is a catch-all, we're completely done.
+ if (nextIsEnd) {
+ CGF.Builder.restoreIP(savedIP);
+ return;
+ }
+ // Otherwise we need to emit and continue at that block.
+ CGF.EmitBlock(nextBlock);
+ }
+}
+
+void CodeGenFunction::popCatchScope() {
+ EHCatchScope &catchScope = cast<EHCatchScope>(*EHStack.begin());
+ if (catchScope.hasEHBranches())
+ emitCatchDispatchBlock(*this, catchScope);
+ EHStack.popCatch();
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
+ unsigned NumHandlers = S.getNumHandlers();
+ EHCatchScope &CatchScope = cast<EHCatchScope>(*EHStack.begin());
+ assert(CatchScope.getNumHandlers() == NumHandlers);
+
+ // If the catch was not required, bail out now.
+ if (!CatchScope.hasEHBranches()) {
+ EHStack.popCatch();
+ return;
+ }
+
+ // Emit the structure of the EH dispatch for this catch.
+ emitCatchDispatchBlock(*this, CatchScope);
+
+ // Copy the handler blocks off before we pop the EH stack. Emitting
+ // the handlers might scribble on this memory.
+ SmallVector<EHCatchScope::Handler, 8> Handlers(NumHandlers);
+ memcpy(Handlers.data(), CatchScope.begin(),
+ NumHandlers * sizeof(EHCatchScope::Handler));
+
+ EHStack.popCatch();
+
+ // The fall-through block.
+ llvm::BasicBlock *ContBB = createBasicBlock("try.cont");
+
+ // We just emitted the body of the try; jump to the continue block.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+
+ // Determine if we need an implicit rethrow for all these catch handlers.
+ bool ImplicitRethrow = false;
+ if (IsFnTryBlock)
+ ImplicitRethrow = isa<CXXDestructorDecl>(CurCodeDecl) ||
+ isa<CXXConstructorDecl>(CurCodeDecl);
+
+ // Perversely, we emit the handlers backwards precisely because we
+ // want them to appear in source order. In all of these cases, the
+ // catch block will have exactly one predecessor, which will be a
+ // particular block in the catch dispatch. However, in the case of
+ // a catch-all, one of the dispatch blocks will branch to two
+ // different handlers, and EmitBlockAfterUses will cause the second
+ // handler to be moved before the first.
+ for (unsigned I = NumHandlers; I != 0; --I) {
+ llvm::BasicBlock *CatchBlock = Handlers[I-1].Block;
+ EmitBlockAfterUses(CatchBlock);
+
+ // Catch the exception if this isn't a catch-all.
+ const CXXCatchStmt *C = S.getHandler(I-1);
+
+ // Enter a cleanup scope, including the catch variable and the
+ // end-catch.
+ RunCleanupsScope CatchScope(*this);
+
+ // Initialize the catch variable and set up the cleanups.
+ BeginCatch(*this, C);
+
+ // If there's an implicit rethrow, push a normal "cleanup" to call
+ // _cxa_rethrow. This needs to happen before __cxa_end_catch is
+ // called, and so it is pushed after BeginCatch.
+ if (ImplicitRethrow)
+ EHStack.pushCleanup<CallRethrow>(NormalCleanup);
+
+ // Perform the body of the catch.
+ EmitStmt(C->getHandlerBlock());
+
+ // Fall out through the catch cleanups.
+ CatchScope.ForceCleanup();
+
+ // Branch out of the try.
+ if (HaveInsertPoint())
+ Builder.CreateBr(ContBB);
+ }
+
+ EmitBlock(ContBB);
+}
+
+namespace {
+ struct CallEndCatchForFinally : EHScopeStack::Cleanup {
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ CallEndCatchForFinally(llvm::Value *ForEHVar, llvm::Value *EndCatchFn)
+ : ForEHVar(ForEHVar), EndCatchFn(EndCatchFn) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ llvm::BasicBlock *EndCatchBB = CGF.createBasicBlock("finally.endcatch");
+ llvm::BasicBlock *CleanupContBB =
+ CGF.createBasicBlock("finally.cleanup.cont");
+
+ llvm::Value *ShouldEndCatch =
+ CGF.Builder.CreateLoad(ForEHVar, "finally.endcatch");
+ CGF.Builder.CreateCondBr(ShouldEndCatch, EndCatchBB, CleanupContBB);
+ CGF.EmitBlock(EndCatchBB);
+ CGF.EmitCallOrInvoke(EndCatchFn); // catch-all, so might throw
+ CGF.EmitBlock(CleanupContBB);
+ }
+ };
+
+ struct PerformFinally : EHScopeStack::Cleanup {
+ const Stmt *Body;
+ llvm::Value *ForEHVar;
+ llvm::Value *EndCatchFn;
+ llvm::Value *RethrowFn;
+ llvm::Value *SavedExnVar;
+
+ PerformFinally(const Stmt *Body, llvm::Value *ForEHVar,
+ llvm::Value *EndCatchFn,
+ llvm::Value *RethrowFn, llvm::Value *SavedExnVar)
+ : Body(Body), ForEHVar(ForEHVar), EndCatchFn(EndCatchFn),
+ RethrowFn(RethrowFn), SavedExnVar(SavedExnVar) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Enter a cleanup to call the end-catch function if one was provided.
+ if (EndCatchFn)
+ CGF.EHStack.pushCleanup<CallEndCatchForFinally>(NormalAndEHCleanup,
+ ForEHVar, EndCatchFn);
+
+ // Save the current cleanup destination in case there are
+ // cleanups in the finally block.
+ llvm::Value *SavedCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot(),
+ "cleanup.dest.saved");
+
+ // Emit the finally block.
+ CGF.EmitStmt(Body);
+
+ // If the end of the finally is reachable, check whether this was
+ // for EH. If so, rethrow.
+ if (CGF.HaveInsertPoint()) {
+ llvm::BasicBlock *RethrowBB = CGF.createBasicBlock("finally.rethrow");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("finally.cont");
+
+ llvm::Value *ShouldRethrow =
+ CGF.Builder.CreateLoad(ForEHVar, "finally.shouldthrow");
+ CGF.Builder.CreateCondBr(ShouldRethrow, RethrowBB, ContBB);
+
+ CGF.EmitBlock(RethrowBB);
+ if (SavedExnVar) {
+ CGF.EmitCallOrInvoke(RethrowFn, CGF.Builder.CreateLoad(SavedExnVar));
+ } else {
+ CGF.EmitCallOrInvoke(RethrowFn);
+ }
+ CGF.Builder.CreateUnreachable();
+
+ CGF.EmitBlock(ContBB);
+
+ // Restore the cleanup destination.
+ CGF.Builder.CreateStore(SavedCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ }
+
+ // Leave the end-catch cleanup. As an optimization, pretend that
+ // the fallthrough path was inaccessible; we've dynamically proven
+ // that we're not in the EH case along that path.
+ if (EndCatchFn) {
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.PopCleanupBlock();
+ CGF.Builder.restoreIP(SavedIP);
+ }
+
+ // Now make sure we actually have an insertion point or the
+ // cleanup gods will hate us.
+ CGF.EnsureInsertPoint();
+ }
+ };
+}
+
+/// Enters a finally block for an implementation using zero-cost
+/// exceptions. This is mostly general, but hard-codes some
+/// language/ABI-specific behavior in the catch-all sections.
+void CodeGenFunction::FinallyInfo::enter(CodeGenFunction &CGF,
+ const Stmt *body,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn) {
+ assert((beginCatchFn != 0) == (endCatchFn != 0) &&
+ "begin/end catch functions not paired");
+ assert(rethrowFn && "rethrow function is required");
+
+ BeginCatchFn = beginCatchFn;
+
+ // The rethrow function has one of the following two types:
+ // void (*)()
+ // void (*)(void*)
+ // In the latter case we need to pass it the exception object.
+ // But we can't use the exception slot because the @finally might
+ // have a landing pad (which would overwrite the exception slot).
+ llvm::FunctionType *rethrowFnTy =
+ cast<llvm::FunctionType>(
+ cast<llvm::PointerType>(rethrowFn->getType())->getElementType());
+ SavedExnVar = 0;
+ if (rethrowFnTy->getNumParams())
+ SavedExnVar = CGF.CreateTempAlloca(CGF.Int8PtrTy, "finally.exn");
+
+ // A finally block is a statement which must be executed on any edge
+ // out of a given scope. Unlike a cleanup, the finally block may
+ // contain arbitrary control flow leading out of itself. In
+ // addition, finally blocks should always be executed, even if there
+ // are no catch handlers higher on the stack. Therefore, we
+ // surround the protected scope with a combination of a normal
+ // cleanup (to catch attempts to break out of the block via normal
+ // control flow) and an EH catch-all (semantically "outside" any try
+ // statement to which the finally block might have been attached).
+ // The finally block itself is generated in the context of a cleanup
+ // which conditionally leaves the catch-all.
+
+ // Jump destination for performing the finally block on an exception
+ // edge. We'll never actually reach this block, so unreachable is
+ // fine.
+ RethrowDest = CGF.getJumpDestInCurrentScope(CGF.getUnreachableBlock());
+
+ // Whether the finally block is being executed for EH purposes.
+ ForEHVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(), "finally.for-eh");
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), ForEHVar);
+
+ // Enter a normal cleanup which will perform the @finally block.
+ CGF.EHStack.pushCleanup<PerformFinally>(NormalCleanup, body,
+ ForEHVar, endCatchFn,
+ rethrowFn, SavedExnVar);
+
+ // Enter a catch-all scope.
+ llvm::BasicBlock *catchBB = CGF.createBasicBlock("finally.catchall");
+ EHCatchScope *catchScope = CGF.EHStack.pushCatch(1);
+ catchScope->setCatchAllHandler(0, catchBB);
+}
+
+void CodeGenFunction::FinallyInfo::exit(CodeGenFunction &CGF) {
+ // Leave the finally catch-all.
+ EHCatchScope &catchScope = cast<EHCatchScope>(*CGF.EHStack.begin());
+ llvm::BasicBlock *catchBB = catchScope.getHandler(0).Block;
+
+ CGF.popCatchScope();
+
+ // If there are any references to the catch-all block, emit it.
+ if (catchBB->use_empty()) {
+ delete catchBB;
+ } else {
+ CGBuilderTy::InsertPoint savedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(catchBB);
+
+ llvm::Value *exn = 0;
+
+ // If there's a begin-catch function, call it.
+ if (BeginCatchFn) {
+ exn = CGF.getExceptionFromSlot();
+ CGF.Builder.CreateCall(BeginCatchFn, exn)->setDoesNotThrow();
+ }
+
+ // If we need to remember the exception pointer to rethrow later, do so.
+ if (SavedExnVar) {
+ if (!exn) exn = CGF.getExceptionFromSlot();
+ CGF.Builder.CreateStore(exn, SavedExnVar);
+ }
+
+ // Tell the cleanups in the finally block that we're do this for EH.
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), ForEHVar);
+
+ // Thread a jump through the finally cleanup.
+ CGF.EmitBranchThroughCleanup(RethrowDest);
+
+ CGF.Builder.restoreIP(savedIP);
+ }
+
+ // Finally, leave the @finally cleanup.
+ CGF.PopCleanupBlock();
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
+ if (TerminateLandingPad)
+ return TerminateLandingPad;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // This will get inserted at the end of the function.
+ TerminateLandingPad = createBasicBlock("terminate.lpad");
+ Builder.SetInsertPoint(TerminateLandingPad);
+
+ // Tell the backend that this is a landing pad.
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
+ llvm::LandingPadInst *LPadInst =
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ getOpaquePersonalityFn(CGM, Personality), 0);
+ LPadInst->addClause(getCatchAllValue(*this));
+
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateLandingPad;
+}
+
+llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
+ if (TerminateHandler)
+ return TerminateHandler;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
+
+ // Set up the terminate handler. This block is inserted at the very
+ // end of the function by FinishFunction.
+ TerminateHandler = createBasicBlock("terminate.handler");
+ Builder.SetInsertPoint(TerminateHandler);
+ llvm::CallInst *TerminateCall = Builder.CreateCall(getTerminateFn(*this));
+ TerminateCall->setDoesNotReturn();
+ TerminateCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ // Restore the saved insertion state.
+ Builder.restoreIP(SavedIP);
+
+ return TerminateHandler;
+}
+
+llvm::BasicBlock *CodeGenFunction::getEHResumeBlock() {
+ if (EHResumeBlock) return EHResumeBlock;
+
+ CGBuilderTy::InsertPoint SavedIP = Builder.saveIP();
+
+ // We emit a jump to a notional label at the outermost unwind state.
+ EHResumeBlock = createBasicBlock("eh.resume");
+ Builder.SetInsertPoint(EHResumeBlock);
+
+ const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
+
+ // This can always be a call because we necessarily didn't find
+ // anything on the EH stack which needs our help.
+ const char *RethrowName = Personality.CatchallRethrowFn;
+ if (RethrowName != 0) {
+ Builder.CreateCall(getCatchallRethrowFn(*this, RethrowName),
+ getExceptionFromSlot())
+ ->setDoesNotReturn();
+ } else {
+ switch (CleanupHackLevel) {
+ case CHL_MandatoryCatchall:
+ // In mandatory-catchall mode, we need to use
+ // _Unwind_Resume_or_Rethrow, or whatever the personality's
+ // equivalent is.
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ getExceptionFromSlot())
+ ->setDoesNotReturn();
+ break;
+ case CHL_MandatoryCleanup: {
+ // In mandatory-cleanup mode, we should use 'resume'.
+
+ // Recreate the landingpad's return value for the 'resume' instruction.
+ llvm::Value *Exn = getExceptionFromSlot();
+ llvm::Value *Sel = getSelectorFromSlot();
+
+ llvm::Type *LPadType = llvm::StructType::get(Exn->getType(),
+ Sel->getType(), NULL);
+ llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
+ LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
+ LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
+
+ Builder.CreateResume(LPadVal);
+ Builder.restoreIP(SavedIP);
+ return EHResumeBlock;
+ }
+ case CHL_Ideal:
+ // In an idealized mode where we don't have to worry about the
+ // optimizer combining landing pads, we should just use
+ // _Unwind_Resume (or the personality's equivalent).
+ Builder.CreateCall(getUnwindResumeFn(), getExceptionFromSlot())
+ ->setDoesNotReturn();
+ break;
+ }
+ }
+
+ Builder.CreateUnreachable();
+
+ Builder.restoreIP(SavedIP);
+
+ return EHResumeBlock;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
new file mode 100644
index 0000000..08970fd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExpr.cpp
@@ -0,0 +1,3249 @@
+//===--- CGExpr.cpp - Emit LLVM Code from Expressions ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "CGRecordLayout.h"
+#include "CGObjCRuntime.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===--------------------------------------------------------------------===//
+// Miscellaneous Helper Methods
+//===--------------------------------------------------------------------===//
+
+llvm::Value *CodeGenFunction::EmitCastToVoidPtr(llvm::Value *value) {
+ unsigned addressSpace =
+ cast<llvm::PointerType>(value->getType())->getAddressSpace();
+
+ llvm::PointerType *destType = Int8PtrTy;
+ if (addressSpace)
+ destType = llvm::Type::getInt8PtrTy(getLLVMContext(), addressSpace);
+
+ if (value->getType() == destType) return value;
+ return Builder.CreateBitCast(value, destType);
+}
+
+/// CreateTempAlloca - This creates a alloca and inserts it into the entry
+/// block.
+llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
+ const Twine &Name) {
+ if (!Builder.isNamePreserving())
+ return new llvm::AllocaInst(Ty, 0, "", AllocaInsertPt);
+ return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
+}
+
+void CodeGenFunction::InitTempAlloca(llvm::AllocaInst *Var,
+ llvm::Value *Init) {
+ llvm::StoreInst *Store = new llvm::StoreInst(Init, Var);
+ llvm::BasicBlock *Block = AllocaInsertPt->getParent();
+ Block->getInstList().insertAfter(&*AllocaInsertPt, Store);
+}
+
+llvm::AllocaInst *CodeGenFunction::CreateIRTemp(QualType Ty,
+ const Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+llvm::AllocaInst *CodeGenFunction::CreateMemTemp(QualType Ty,
+ const Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+/// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+/// expression and compare the result against zero, returning an Int1Ty value.
+llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
+ if (const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>()) {
+ llvm::Value *MemPtr = EmitScalarExpr(E);
+ return CGM.getCXXABI().EmitMemberPointerIsNotNull(*this, MemPtr, MPT);
+ }
+
+ QualType BoolTy = getContext().BoolTy;
+ if (!E->getType()->isAnyComplexType())
+ return EmitScalarConversion(EmitScalarExpr(E), E->getType(), BoolTy);
+
+ return EmitComplexToScalarConversion(EmitComplexExpr(E), E->getType(),BoolTy);
+}
+
+/// EmitIgnoredExpr - Emit code to compute the specified expression,
+/// ignoring the result.
+void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
+ if (E->isRValue())
+ return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
+
+ // Just emit it as an l-value and drop the result.
+ EmitLValue(E);
+}
+
+/// EmitAnyExpr - Emit code to compute the specified expression which
+/// can have any type. The result is returned as an RValue struct.
+/// If this is an aggregate expression, AggSlot indicates where the
+/// result should be returned.
+RValue CodeGenFunction::EmitAnyExpr(const Expr *E, AggValueSlot AggSlot,
+ bool IgnoreResult) {
+ if (!hasAggregateLLVMType(E->getType()))
+ return RValue::get(EmitScalarExpr(E, IgnoreResult));
+ else if (E->getType()->isAnyComplexType())
+ return RValue::getComplex(EmitComplexExpr(E, IgnoreResult, IgnoreResult));
+
+ EmitAggExpr(E, AggSlot, IgnoreResult);
+ return AggSlot.asRValue();
+}
+
+/// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+/// always be accessible even if no aggregate location is provided.
+RValue CodeGenFunction::EmitAnyExprToTemp(const Expr *E) {
+ AggValueSlot AggSlot = AggValueSlot::ignored();
+
+ if (hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType())
+ AggSlot = CreateAggTemp(E->getType(), "agg.tmp");
+ return EmitAnyExpr(E, AggSlot);
+}
+
+/// EmitAnyExprToMem - Evaluate an expression into a given memory
+/// location.
+void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
+ llvm::Value *Location,
+ Qualifiers Quals,
+ bool IsInit) {
+ // FIXME: This function should take an LValue as an argument.
+ if (E->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(E, Location, Quals.hasVolatile());
+ } else if (hasAggregateLLVMType(E->getType())) {
+ CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
+ EmitAggExpr(E, AggValueSlot::forAddr(Location, Alignment, Quals,
+ AggValueSlot::IsDestructed_t(IsInit),
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsAliased_t(!IsInit)));
+ } else {
+ RValue RV = RValue::get(EmitScalarExpr(E, /*Ignore*/ false));
+ LValue LV = MakeAddrLValue(Location, E->getType());
+ EmitStoreThroughLValue(RV, LV);
+ }
+}
+
+namespace {
+/// \brief An adjustment to be made to the temporary created when emitting a
+/// reference binding, which accesses a particular subobject of that temporary.
+ struct SubobjectAdjustment {
+ enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+
+ union {
+ struct {
+ const CastExpr *BasePath;
+ const CXXRecordDecl *DerivedClass;
+ } DerivedToBase;
+
+ FieldDecl *Field;
+ };
+
+ SubobjectAdjustment(const CastExpr *BasePath,
+ const CXXRecordDecl *DerivedClass)
+ : Kind(DerivedToBaseAdjustment) {
+ DerivedToBase.BasePath = BasePath;
+ DerivedToBase.DerivedClass = DerivedClass;
+ }
+
+ SubobjectAdjustment(FieldDecl *Field)
+ : Kind(FieldAdjustment) {
+ this->Field = Field;
+ }
+ };
+}
+
+static llvm::Value *
+CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
+ const NamedDecl *InitializedDecl) {
+ if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (VD->hasGlobalStorage()) {
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
+ Out.flush();
+
+ llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
+
+ // Create the reference temporary.
+ llvm::GlobalValue *RefTemp =
+ new llvm::GlobalVariable(CGF.CGM.getModule(),
+ RefTempTy, /*isConstant=*/false,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(RefTempTy),
+ Name.str());
+ return RefTemp;
+ }
+ }
+
+ return CGF.CreateMemTemp(Type, "ref.tmp");
+}
+
+static llvm::Value *
+EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
+ llvm::Value *&ReferenceTemporary,
+ const CXXDestructorDecl *&ReferenceTemporaryDtor,
+ QualType &ObjCARCReferenceLifetimeType,
+ const NamedDecl *InitializedDecl) {
+ // Look through single-element init lists that claim to be lvalues. They're
+ // just syntactic wrappers in this case.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
+ if (ILE->getNumInits() == 1 && ILE->isGLValue())
+ E = ILE->getInit(0);
+ }
+
+ // Look through expressions for materialized temporaries (for now).
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E)) {
+ // Objective-C++ ARC:
+ // If we are binding a reference to a temporary that has ownership, we
+ // need to perform retain/release operations on the temporary.
+ if (CGF.getContext().getLangOpts().ObjCAutoRefCount &&
+ E->getType()->isObjCLifetimeType() &&
+ (E->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
+ E->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
+ ObjCARCReferenceLifetimeType = E->getType();
+
+ E = M->GetTemporaryExpr();
+ }
+
+ if (const CXXDefaultArgExpr *DAE = dyn_cast<CXXDefaultArgExpr>(E))
+ E = DAE->getExpr();
+
+ if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
+ CGF.enterFullExpression(EWC);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+
+ return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
+ ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
+ }
+
+ RValue RV;
+ if (E->isGLValue()) {
+ // Emit the expression as an lvalue.
+ LValue LV = CGF.EmitLValue(E);
+
+ if (LV.isSimple())
+ return LV.getAddress();
+
+ // We have to load the lvalue.
+ RV = CGF.EmitLoadOfLValue(LV);
+ } else {
+ if (!ObjCARCReferenceLifetimeType.isNull()) {
+ ReferenceTemporary = CreateReferenceTemporary(CGF,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
+
+
+ LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
+ ObjCARCReferenceLifetimeType);
+
+ CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
+ RefTempDst, false);
+
+ bool ExtendsLifeOfTemporary = false;
+ if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
+ if (Var->extendsLifetimeOfTemporary())
+ ExtendsLifeOfTemporary = true;
+ } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
+ ExtendsLifeOfTemporary = true;
+ }
+
+ if (!ExtendsLifeOfTemporary) {
+ // Since the lifetime of this temporary isn't going to be extended,
+ // we need to clean it up ourselves at the end of the full expression.
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CleanupKind cleanupKind = CGF.getARCCleanupKind();
+ CGF.pushDestroy(cleanupKind,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ assert(!ObjCARCReferenceLifetimeType->isArrayType());
+ CGF.pushDestroy(NormalAndEHCleanup,
+ ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ CodeGenFunction::destroyARCWeak,
+ /*useEHCleanupForArray*/ true);
+ break;
+ }
+
+ ObjCARCReferenceLifetimeType = QualType();
+ }
+
+ return ReferenceTemporary;
+ }
+
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ while (true) {
+ E = E->IgnoreParens();
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if ((CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase) &&
+ E->getType()->isRecordType()) {
+ E = CE->getSubExpr();
+ CXXRecordDecl *Derived
+ = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ Adjustments.push_back(SubobjectAdjustment(CE, Derived));
+ continue;
+ }
+
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (!ME->isArrow() && ME->getBase()->isRValue()) {
+ assert(ME->getBase()->getType()->isRecordType());
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ E = ME->getBase();
+ Adjustments.push_back(SubobjectAdjustment(Field));
+ continue;
+ }
+ }
+ }
+
+ if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
+ if (opaque->getType()->isRecordType())
+ return CGF.EmitOpaqueValueLValue(opaque).getAddress();
+
+ // Nothing changed.
+ break;
+ }
+
+ // Create a reference temporary if necessary.
+ AggValueSlot AggSlot = AggValueSlot::ignored();
+ if (CGF.hasAggregateLLVMType(E->getType()) &&
+ !E->getType()->isAnyComplexType()) {
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
+ AggValueSlot::IsDestructed_t isDestructed
+ = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
+ AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
+ Qualifiers(), isDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ }
+
+ if (InitializedDecl) {
+ // Get the destructor for the reference temporary.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->hasTrivialDestructor())
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
+ }
+ }
+
+ RV = CGF.EmitAnyExpr(E, AggSlot);
+
+ // Check if need to perform derived-to-base casts and/or field accesses, to
+ // get from the temporary object we created (and, potentially, for which we
+ // extended the lifetime) to the subobject we're binding the reference to.
+ if (!Adjustments.empty()) {
+ llvm::Value *Object = RV.getAggregateAddr();
+ for (unsigned I = Adjustments.size(); I != 0; --I) {
+ SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ switch (Adjustment.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Object =
+ CGF.GetAddressOfBaseClass(Object,
+ Adjustment.DerivedToBase.DerivedClass,
+ Adjustment.DerivedToBase.BasePath->path_begin(),
+ Adjustment.DerivedToBase.BasePath->path_end(),
+ /*NullCheckValue=*/false);
+ break;
+
+ case SubobjectAdjustment::FieldAdjustment: {
+ LValue LV =
+ CGF.EmitLValueForField(Object, Adjustment.Field, 0);
+ if (LV.isSimple()) {
+ Object = LV.getAddress();
+ break;
+ }
+
+ // For non-simple lvalues, we actually have to create a copy of
+ // the object we're binding to.
+ QualType T = Adjustment.Field->getType().getNonReferenceType()
+ .getUnqualifiedType();
+ Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
+ LValue TempLV = CGF.MakeAddrLValue(Object,
+ Adjustment.Field->getType());
+ CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
+ break;
+ }
+
+ }
+ }
+
+ return Object;
+ }
+ }
+
+ if (RV.isAggregate())
+ return RV.getAggregateAddr();
+
+ // Create a temporary variable that we can bind the reference to.
+ ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
+ InitializedDecl);
+
+
+ unsigned Alignment =
+ CGF.getContext().getTypeAlignInChars(E->getType()).getQuantity();
+ if (RV.isScalar())
+ CGF.EmitStoreOfScalar(RV.getScalarVal(), ReferenceTemporary,
+ /*Volatile=*/false, Alignment, E->getType());
+ else
+ CGF.StoreComplexToAddr(RV.getComplexVal(), ReferenceTemporary,
+ /*Volatile=*/false);
+ return ReferenceTemporary;
+}
+
+RValue
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
+ const NamedDecl *InitializedDecl) {
+ llvm::Value *ReferenceTemporary = 0;
+ const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ QualType ObjCARCReferenceLifetimeType;
+ llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
+ ReferenceTemporaryDtor,
+ ObjCARCReferenceLifetimeType,
+ InitializedDecl);
+ if (!ReferenceTemporaryDtor && ObjCARCReferenceLifetimeType.isNull())
+ return RValue::get(Value);
+
+ // Make sure to call the destructor for the reference temporary.
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
+ if (VD && VD->hasGlobalStorage()) {
+ if (ReferenceTemporaryDtor) {
+ llvm::Constant *DtorFn =
+ CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
+ EmitCXXGlobalDtorRegistration(DtorFn,
+ cast<llvm::Constant>(ReferenceTemporary));
+ } else {
+ assert(!ObjCARCReferenceLifetimeType.isNull());
+ // Note: We intentionally do not register a global "destructor" to
+ // release the object.
+ }
+
+ return RValue::get(Value);
+ }
+
+ if (ReferenceTemporaryDtor)
+ PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
+ else {
+ switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable(
+ "Not a reference temporary that needs to be deallocated");
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ // Nothing to do.
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
+ CleanupKind cleanupKind = getARCCleanupKind();
+ pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
+ precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
+ cleanupKind & EHCleanup);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak: {
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
+ ObjCARCReferenceLifetimeType, destroyARCWeak, true);
+ break;
+ }
+ }
+ }
+
+ return RValue::get(Value);
+}
+
+
+/// getAccessedFieldNo - Given an encoded value and a result number, return the
+/// input field number being accessed.
+unsigned CodeGenFunction::getAccessedFieldNo(unsigned Idx,
+ const llvm::Constant *Elts) {
+ return cast<llvm::ConstantInt>(Elts->getAggregateElement(Idx))
+ ->getZExtValue();
+}
+
+void CodeGenFunction::EmitCheck(llvm::Value *Address, unsigned Size) {
+ if (!CatchUndefined)
+ return;
+
+ // This needs to be to the standard address space.
+ Address = Builder.CreateBitCast(Address, Int8PtrTy);
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
+
+ // In time, people may want to control this and use a 1 here.
+ llvm::Value *Arg = Builder.getFalse();
+ llvm::Value *C = Builder.CreateCall2(F, Address, Arg);
+ llvm::BasicBlock *Cont = createBasicBlock();
+ llvm::BasicBlock *Check = createBasicBlock();
+ llvm::Value *NegativeOne = llvm::ConstantInt::get(IntPtrTy, -1ULL);
+ Builder.CreateCondBr(Builder.CreateICmpEQ(C, NegativeOne), Cont, Check);
+
+ EmitBlock(Check);
+ Builder.CreateCondBr(Builder.CreateICmpUGE(C,
+ llvm::ConstantInt::get(IntPtrTy, Size)),
+ Cont, getTrapBB());
+ EmitBlock(Cont);
+}
+
+
+CodeGenFunction::ComplexPairTy CodeGenFunction::
+EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ ComplexPairTy InVal = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+
+ llvm::Value *NextVal;
+ if (isa<llvm::IntegerType>(InVal.first->getType())) {
+ uint64_t AmountVal = isInc ? 1 : -1;
+ NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ } else {
+ QualType ElemTy = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::APFloat FVal(getContext().getFloatTypeSemantics(ElemTy), 1);
+ if (!isInc)
+ FVal.changeSign();
+ NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
+
+ // Add the inc/dec to the real part.
+ NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
+ }
+
+ ComplexPairTy IncVal(NextVal, InVal.second);
+
+ // Store the updated result through the lvalue.
+ StoreComplexToAddr(IncVal, LV.getAddress(), LV.isVolatileQualified());
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? IncVal : InVal;
+}
+
+
+//===----------------------------------------------------------------------===//
+// LValue Expression Emission
+//===----------------------------------------------------------------------===//
+
+RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
+ if (Ty->isVoidType())
+ return RValue::get(0);
+
+ if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
+ llvm::Type *EltTy = ConvertType(CTy->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return RValue::getComplex(std::make_pair(U, U));
+ }
+
+ // If this is a use of an undefined aggregate type, the aggregate must have an
+ // identifiable address. Just because the contents of the value are undefined
+ // doesn't mean that the address can't be taken and compared.
+ if (hasAggregateLLVMType(Ty)) {
+ llvm::Value *DestPtr = CreateMemTemp(Ty, "undef.agg.tmp");
+ return RValue::getAggregate(DestPtr);
+ }
+
+ return RValue::get(llvm::UndefValue::get(ConvertType(Ty)));
+}
+
+RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ return GetUndefRValue(E->getType());
+}
+
+LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
+ const char *Name) {
+ ErrorUnsupported(E, Name);
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
+ return MakeAddrLValue(llvm::UndefValue::get(Ty), E->getType());
+}
+
+LValue CodeGenFunction::EmitCheckedLValue(const Expr *E) {
+ LValue LV = EmitLValue(E);
+ if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
+ EmitCheck(LV.getAddress(),
+ getContext().getTypeSizeInChars(E->getType()).getQuantity());
+ return LV;
+}
+
+/// EmitLValue - Emit code to compute a designator that specifies the location
+/// of the expression.
+///
+/// This can return one of two things: a simple address or a bitfield reference.
+/// In either case, the LLVM Value* in the LValue structure is guaranteed to be
+/// an LLVM pointer type.
+///
+/// If this returns a bitfield reference, nothing about the pointee type of the
+/// LLVM value is known: For example, it may not be a pointer to an integer.
+///
+/// If this returns a normal address, and if the lvalue's C type is fixed size,
+/// this method guarantees that the returned pointer type will point to an LLVM
+/// type of the same size of the lvalue's type. If the lvalue has a variable
+/// length type, this is not possible.
+///
+LValue CodeGenFunction::EmitLValue(const Expr *E) {
+ switch (E->getStmtClass()) {
+ default: return EmitUnsupportedLValue(E, "l-value expression");
+
+ case Expr::ObjCPropertyRefExprClass:
+ llvm_unreachable("cannot emit a property reference directly");
+
+ case Expr::ObjCSelectorExprClass:
+ return EmitObjCSelectorLValue(cast<ObjCSelectorExpr>(E));
+ case Expr::ObjCIsaExprClass:
+ return EmitObjCIsaExpr(cast<ObjCIsaExpr>(E));
+ case Expr::BinaryOperatorClass:
+ return EmitBinaryOperatorLValue(cast<BinaryOperator>(E));
+ case Expr::CompoundAssignOperatorClass:
+ if (!E->getType()->isAnyComplexType())
+ return EmitCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ return EmitComplexCompoundAssignmentLValue(cast<CompoundAssignOperator>(E));
+ case Expr::CallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::UserDefinedLiteralClass:
+ return EmitCallExprLValue(cast<CallExpr>(E));
+ case Expr::VAArgExprClass:
+ return EmitVAArgExprLValue(cast<VAArgExpr>(E));
+ case Expr::DeclRefExprClass:
+ return EmitDeclRefLValue(cast<DeclRefExpr>(E));
+ case Expr::ParenExprClass:
+ return EmitLValue(cast<ParenExpr>(E)->getSubExpr());
+ case Expr::GenericSelectionExprClass:
+ return EmitLValue(cast<GenericSelectionExpr>(E)->getResultExpr());
+ case Expr::PredefinedExprClass:
+ return EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ case Expr::StringLiteralClass:
+ return EmitStringLiteralLValue(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return EmitObjCEncodeExprLValue(cast<ObjCEncodeExpr>(E));
+ case Expr::PseudoObjectExprClass:
+ return EmitPseudoObjectLValue(cast<PseudoObjectExpr>(E));
+ case Expr::InitListExprClass:
+ assert(cast<InitListExpr>(E)->getNumInits() == 1 &&
+ "Only single-element init list can be lvalue.");
+ return EmitLValue(cast<InitListExpr>(E)->getInit(0));
+
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXConstructExprClass:
+ return EmitCXXConstructLValue(cast<CXXConstructExpr>(E));
+ case Expr::CXXBindTemporaryExprClass:
+ return EmitCXXBindTemporaryLValue(cast<CXXBindTemporaryExpr>(E));
+ case Expr::LambdaExprClass:
+ return EmitLambdaLValue(cast<LambdaExpr>(E));
+
+ case Expr::ExprWithCleanupsClass: {
+ const ExprWithCleanups *cleanups = cast<ExprWithCleanups>(E);
+ enterFullExpression(cleanups);
+ RunCleanupsScope Scope(*this);
+ return EmitLValue(cleanups->getSubExpr());
+ }
+
+ case Expr::CXXScalarValueInitExprClass:
+ return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
+ case Expr::CXXDefaultArgExprClass:
+ return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
+ case Expr::CXXTypeidExprClass:
+ return EmitCXXTypeidLValue(cast<CXXTypeidExpr>(E));
+
+ case Expr::ObjCMessageExprClass:
+ return EmitObjCMessageExprLValue(cast<ObjCMessageExpr>(E));
+ case Expr::ObjCIvarRefExprClass:
+ return EmitObjCIvarRefLValue(cast<ObjCIvarRefExpr>(E));
+ case Expr::StmtExprClass:
+ return EmitStmtExprLValue(cast<StmtExpr>(E));
+ case Expr::UnaryOperatorClass:
+ return EmitUnaryOpLValue(cast<UnaryOperator>(E));
+ case Expr::ArraySubscriptExprClass:
+ return EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E));
+ case Expr::ExtVectorElementExprClass:
+ return EmitExtVectorElementExpr(cast<ExtVectorElementExpr>(E));
+ case Expr::MemberExprClass:
+ return EmitMemberExpr(cast<MemberExpr>(E));
+ case Expr::CompoundLiteralExprClass:
+ return EmitCompoundLiteralLValue(cast<CompoundLiteralExpr>(E));
+ case Expr::ConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<ConditionalOperator>(E));
+ case Expr::BinaryConditionalOperatorClass:
+ return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
+ case Expr::ChooseExprClass:
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ case Expr::OpaqueValueExprClass:
+ return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
+ case Expr::SubstNonTypeTemplateParmExprClass:
+ return EmitLValue(cast<SubstNonTypeTemplateParmExpr>(E)->getReplacement());
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ case Expr::ObjCBridgedCastExprClass:
+ return EmitCastLValue(cast<CastExpr>(E));
+
+ case Expr::MaterializeTemporaryExprClass:
+ return EmitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(E));
+ }
+}
+
+/// Given an object of the given canonical type, can we safely copy a
+/// value out of it based on its initializer?
+static bool isConstantEmittableObjectType(QualType type) {
+ assert(type.isCanonical());
+ assert(!type->isReferenceType());
+
+ // Must be const-qualified but non-volatile.
+ Qualifiers qs = type.getLocalQualifiers();
+ if (!qs.hasConst() || qs.hasVolatile()) return false;
+
+ // Otherwise, all object types satisfy this except C++ classes with
+ // mutable subobjects or non-trivial copy/destroy behavior.
+ if (const RecordType *RT = dyn_cast<RecordType>(type))
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
+ if (RD->hasMutableFields() || !RD->isTrivial())
+ return false;
+
+ return true;
+}
+
+/// Can we constant-emit a load of a reference to a variable of the
+/// given type? This is different from predicates like
+/// Decl::isUsableInConstantExpressions because we do want it to apply
+/// in situations that don't necessarily satisfy the language's rules
+/// for this (e.g. C++'s ODR-use rules). For example, we want to able
+/// to do this with const float variables even if those variables
+/// aren't marked 'constexpr'.
+enum ConstantEmissionKind {
+ CEK_None,
+ CEK_AsReferenceOnly,
+ CEK_AsValueOrReference,
+ CEK_AsValueOnly
+};
+static ConstantEmissionKind checkVarTypeForConstantEmission(QualType type) {
+ type = type.getCanonicalType();
+ if (const ReferenceType *ref = dyn_cast<ReferenceType>(type)) {
+ if (isConstantEmittableObjectType(ref->getPointeeType()))
+ return CEK_AsValueOrReference;
+ return CEK_AsReferenceOnly;
+ }
+ if (isConstantEmittableObjectType(type))
+ return CEK_AsValueOnly;
+ return CEK_None;
+}
+
+/// Try to emit a reference to the given value without producing it as
+/// an l-value. This is actually more than an optimization: we can't
+/// produce an l-value for variables that we never actually captured
+/// in a block or lambda, which means const int variables or constexpr
+/// literals or similar.
+CodeGenFunction::ConstantEmission
+CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
+ ValueDecl *value = refExpr->getDecl();
+
+ // The value needs to be an enum constant or a constant variable.
+ ConstantEmissionKind CEK;
+ if (isa<ParmVarDecl>(value)) {
+ CEK = CEK_None;
+ } else if (VarDecl *var = dyn_cast<VarDecl>(value)) {
+ CEK = checkVarTypeForConstantEmission(var->getType());
+ } else if (isa<EnumConstantDecl>(value)) {
+ CEK = CEK_AsValueOnly;
+ } else {
+ CEK = CEK_None;
+ }
+ if (CEK == CEK_None) return ConstantEmission();
+
+ Expr::EvalResult result;
+ bool resultIsReference;
+ QualType resultType;
+
+ // It's best to evaluate all the way as an r-value if that's permitted.
+ if (CEK != CEK_AsReferenceOnly &&
+ refExpr->EvaluateAsRValue(result, getContext())) {
+ resultIsReference = false;
+ resultType = refExpr->getType();
+
+ // Otherwise, try to evaluate as an l-value.
+ } else if (CEK != CEK_AsValueOnly &&
+ refExpr->EvaluateAsLValue(result, getContext())) {
+ resultIsReference = true;
+ resultType = value->getType();
+
+ // Failure.
+ } else {
+ return ConstantEmission();
+ }
+
+ // In any case, if the initializer has side-effects, abandon ship.
+ if (result.HasSideEffects)
+ return ConstantEmission();
+
+ // Emit as a constant.
+ llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
+
+ // Make sure we emit a debug reference to the global variable.
+ // This should probably fire even for
+ if (isa<VarDecl>(value)) {
+ if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
+ EmitDeclRefExprDbgValue(refExpr, C);
+ } else {
+ assert(isa<EnumConstantDecl>(value));
+ EmitDeclRefExprDbgValue(refExpr, C);
+ }
+
+ // If we emitted a reference constant, we need to dereference that.
+ if (resultIsReference)
+ return ConstantEmission::forReference(C);
+
+ return ConstantEmission::forValue(C);
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
+ return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getAlignment().getQuantity(),
+ lvalue.getType(), lvalue.getTBAAInfo());
+}
+
+static bool hasBooleanRepresentation(QualType Ty) {
+ if (Ty->isBooleanType())
+ return true;
+
+ if (const EnumType *ET = Ty->getAs<EnumType>())
+ return ET->getDecl()->getIntegerType()->isBooleanType();
+
+ if (const AtomicType *AT = Ty->getAs<AtomicType>())
+ return hasBooleanRepresentation(AT->getValueType());
+
+ return false;
+}
+
+llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
+ const EnumType *ET = Ty->getAs<EnumType>();
+ bool IsRegularCPlusPlusEnum = (getLangOpts().CPlusPlus && ET &&
+ CGM.getCodeGenOpts().StrictEnums &&
+ !ET->getDecl()->isFixed());
+ bool IsBool = hasBooleanRepresentation(Ty);
+ llvm::Type *LTy;
+ if (!IsBool && !IsRegularCPlusPlusEnum)
+ return NULL;
+
+ llvm::APInt Min;
+ llvm::APInt End;
+ if (IsBool) {
+ Min = llvm::APInt(8, 0);
+ End = llvm::APInt(8, 2);
+ LTy = Int8Ty;
+ } else {
+ const EnumDecl *ED = ET->getDecl();
+ LTy = ConvertTypeForMem(ED->getIntegerType());
+ unsigned Bitwidth = LTy->getScalarSizeInBits();
+ unsigned NumNegativeBits = ED->getNumNegativeBits();
+ unsigned NumPositiveBits = ED->getNumPositiveBits();
+
+ if (NumNegativeBits) {
+ unsigned NumBits = std::max(NumNegativeBits, NumPositiveBits + 1);
+ assert(NumBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << (NumBits - 1);
+ Min = -End;
+ } else {
+ assert(NumPositiveBits <= Bitwidth);
+ End = llvm::APInt(Bitwidth, 1) << NumPositiveBits;
+ Min = llvm::APInt(Bitwidth, 0);
+ }
+ }
+
+ if (End == Min)
+ return NULL;
+
+ llvm::Value *LowAndHigh[2];
+ LowAndHigh[0] = llvm::ConstantInt::get(LTy, Min);
+ LowAndHigh[1] = llvm::ConstantInt::get(LTy, End);
+
+ llvm::LLVMContext &C = getLLVMContext();
+ llvm::MDNode *Range = llvm::MDNode::get(C, LowAndHigh);
+ return Range;
+}
+
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Addr);
+ if (Volatile)
+ Load->setVolatile(true);
+ if (Alignment)
+ Load->setAlignment(Alignment);
+ if (TBAAInfo)
+ CGM.DecorateInstruction(Load, TBAAInfo);
+ // If this is an atomic type, all normal reads must be atomic
+ if (Ty->isAtomicType())
+ Load->setAtomic(llvm::SequentiallyConsistent);
+
+ if (CGM.getCodeGenOpts().OptimizationLevel > 0)
+ if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
+ Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
+
+ return EmitFromMemory(Load, Ty);
+}
+
+llvm::Value *CodeGenFunction::EmitToMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (hasBooleanRepresentation(Ty)) {
+ // This should really always be an i1, but sometimes it's already
+ // an i8, and it's awkward to track those cases down.
+ if (Value->getType()->isIntegerTy(1))
+ return Builder.CreateZExt(Value, Builder.getInt8Ty(), "frombool");
+ assert(Value->getType()->isIntegerTy(8) && "value rep of bool not i1/i8");
+ }
+
+ return Value;
+}
+
+llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
+ // Bool has a different representation in memory than in registers.
+ if (hasBooleanRepresentation(Ty)) {
+ assert(Value->getType()->isIntegerTy(8) && "memory rep of bool not i8");
+ return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
+ }
+
+ return Value;
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, unsigned Alignment,
+ QualType Ty,
+ llvm::MDNode *TBAAInfo,
+ bool isInit) {
+ Value = EmitToMemory(Value, Ty);
+
+ llvm::StoreInst *Store = Builder.CreateStore(Value, Addr, Volatile);
+ if (Alignment)
+ Store->setAlignment(Alignment);
+ if (TBAAInfo)
+ CGM.DecorateInstruction(Store, TBAAInfo);
+ if (!isInit && Ty->isAtomicType())
+ Store->setAtomic(llvm::SequentiallyConsistent);
+}
+
+void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
+ bool isInit) {
+ EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
+ lvalue.getAlignment().getQuantity(), lvalue.getType(),
+ lvalue.getTBAAInfo(), isInit);
+}
+
+/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
+/// method emits the address of the lvalue, then loads the result as an rvalue,
+/// returning the rvalue.
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
+ if (LV.isObjCWeak()) {
+ // load of a __weak object.
+ llvm::Value *AddrWeakObj = LV.getAddress();
+ return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
+ AddrWeakObj));
+ }
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak)
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
+
+ if (LV.isSimple()) {
+ assert(!LV.getType()->isFunctionType());
+
+ // Everything needs a load.
+ return RValue::get(EmitLoadOfScalar(LV));
+ }
+
+ if (LV.isVectorElt()) {
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ return RValue::get(Builder.CreateExtractElement(Load, LV.getVectorIdx(),
+ "vecext"));
+ }
+
+ // If this is a reference to a subset of the elements of a vector, either
+ // shuffle the input or extract/insert them as appropriate.
+ if (LV.isExtVectorElt())
+ return EmitLoadOfExtVectorElementLValue(LV);
+
+ assert(LV.isBitField() && "Unknown LValue type!");
+ return EmitLoadOfBitfieldLValue(LV);
+}
+
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
+ const CGBitFieldInfo &Info = LV.getBitFieldInfo();
+
+ // Get the output type.
+ llvm::Type *ResLTy = ConvertType(LV.getType());
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+
+ // Compute the result as an OR of all of the individual component accesses.
+ llvm::Value *Res = 0;
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Get the field pointer.
+ llvm::Value *Ptr = LV.getBitFieldBaseAddr();
+
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
+
+ // Offset by the byte offset, if used.
+ if (!AI.FieldByteOffset.isZero()) {
+ Ptr = EmitCastToVoidPtr(Ptr);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
+ "bf.field.offs");
+ }
+
+ // Cast to the access type.
+ llvm::Type *PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), AI.AccessWidth,
+ CGM.getContext().getTargetAddressSpace(LV.getType()));
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Perform the load.
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, LV.isVolatileQualified());
+ if (!AI.AccessAlignment.isZero())
+ Load->setAlignment(AI.AccessAlignment.getQuantity());
+
+ // Shift out unused low bits and mask out unused high bits.
+ llvm::Value *Val = Load;
+ if (AI.FieldBitStart)
+ Val = Builder.CreateLShr(Load, AI.FieldBitStart);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(AI.AccessWidth,
+ AI.TargetBitWidth),
+ "bf.clear");
+
+ // Extend or truncate to the target size.
+ if (AI.AccessWidth < ResSizeInBits)
+ Val = Builder.CreateZExt(Val, ResLTy);
+ else if (AI.AccessWidth > ResSizeInBits)
+ Val = Builder.CreateTrunc(Val, ResLTy);
+
+ // Shift into place, and OR into the result.
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateShl(Val, AI.TargetBitOffset);
+ Res = Res ? Builder.CreateOr(Res, Val) : Val;
+ }
+
+ // If the bit-field is signed, perform the sign-extension.
+ //
+ // FIXME: This can easily be folded into the load of the high bits, which
+ // could also eliminate the mask of high bits in some situations.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ Res = Builder.CreateAShr(Builder.CreateShl(Res, ExtraBits),
+ ExtraBits, "bf.val.sext");
+ }
+
+ return RValue::get(Res);
+}
+
+// If this is a reference to a subset of the elements of a vector, create an
+// appropriate shufflevector.
+RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
+ llvm::LoadInst *Load = Builder.CreateLoad(LV.getExtVectorAddr(),
+ LV.isVolatileQualified());
+ Load->setAlignment(LV.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+
+ // If the result of the expression is a non-vector type, we must be extracting
+ // a single element. Just codegen as an extractelement.
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
+ if (!ExprVT) {
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
+ return RValue::get(Builder.CreateExtractElement(Vec, Elt));
+ }
+
+ // Always use shuffle vector to try to retain the original program structure
+ unsigned NumResultElts = ExprVT->getNumElements();
+
+ SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumResultElts; ++i)
+ Mask.push_back(Builder.getInt32(getAccessedFieldNo(i, Elts)));
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(Vec, llvm::UndefValue::get(Vec->getType()),
+ MaskV);
+ return RValue::get(Vec);
+}
+
+
+
+/// EmitStoreThroughLValue - Store the specified rvalue into the specified
+/// lvalue, where both are guaranteed to the have the same type, and that type
+/// is 'Ty'.
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
+ if (!Dst.isSimple()) {
+ if (Dst.isVectorElt()) {
+ // Read/modify/write the vector, inserting the new element.
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
+ Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
+ Dst.getVectorIdx(), "vecins");
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
+ return;
+ }
+
+ // If this is an update of extended vector elements, insert them as
+ // appropriate.
+ if (Dst.isExtVectorElt())
+ return EmitStoreThroughExtVectorComponentLValue(Src, Dst);
+
+ assert(Dst.isBitField() && "Unknown LValue type");
+ return EmitStoreThroughBitfieldLValue(Src, Dst);
+ }
+
+ // There's special magic for assigning into an ARC-qualified l-value.
+ if (Qualifiers::ObjCLifetime Lifetime = Dst.getQuals().getObjCLifetime()) {
+ switch (Lifetime) {
+ case Qualifiers::OCL_None:
+ llvm_unreachable("present but none");
+
+ case Qualifiers::OCL_ExplicitNone:
+ // nothing special
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
+ return;
+
+ case Qualifiers::OCL_Autoreleasing:
+ Src = RValue::get(EmitObjCExtendObjectLifetime(Dst.getType(),
+ Src.getScalarVal()));
+ // fall into the normal path
+ break;
+ }
+ }
+
+ if (Dst.isObjCWeak() && !Dst.isNonGC()) {
+ // load of a __weak object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ if (Dst.isObjCStrong() && !Dst.isNonGC()) {
+ // load of a __strong object.
+ llvm::Value *LvalueDst = Dst.getAddress();
+ llvm::Value *src = Src.getScalarVal();
+ if (Dst.isObjCIvar()) {
+ assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
+ llvm::Type *ResultType = ConvertType(getContext().LongTy);
+ llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
+ llvm::Value *dst = RHS;
+ RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
+ llvm::Value *LHS =
+ Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
+ llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
+ CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
+ BytesBetween);
+ } else if (Dst.isGlobalObjCRef()) {
+ CGM.getObjCRuntime().EmitObjCGlobalAssign(*this, src, LvalueDst,
+ Dst.isThreadLocalRef());
+ }
+ else
+ CGM.getObjCRuntime().EmitObjCStrongCastAssign(*this, src, LvalueDst);
+ return;
+ }
+
+ assert(Src.isScalar() && "Can't emit an agg store with this method");
+ EmitStoreOfScalar(Src.getScalarVal(), Dst, isInit);
+}
+
+void CodeGenFunction::EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ llvm::Value **Result) {
+ const CGBitFieldInfo &Info = Dst.getBitFieldInfo();
+
+ // Get the output type.
+ llvm::Type *ResLTy = ConvertTypeForMem(Dst.getType());
+ unsigned ResSizeInBits = CGM.getTargetData().getTypeSizeInBits(ResLTy);
+
+ // Get the source value, truncated to the width of the bit-field.
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (hasBooleanRepresentation(Dst.getType()))
+ SrcVal = Builder.CreateIntCast(SrcVal, ResLTy, /*IsSigned=*/false);
+
+ SrcVal = Builder.CreateAnd(SrcVal, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ Info.getSize()),
+ "bf.value");
+
+ // Return the new value of the bit-field, if requested.
+ if (Result) {
+ // Cast back to the proper type for result.
+ llvm::Type *SrcTy = Src.getScalarVal()->getType();
+ llvm::Value *ReloadVal = Builder.CreateIntCast(SrcVal, SrcTy, false,
+ "bf.reload.val");
+
+ // Sign extend if necessary.
+ if (Info.isSigned()) {
+ unsigned ExtraBits = ResSizeInBits - Info.getSize();
+ if (ExtraBits)
+ ReloadVal = Builder.CreateAShr(Builder.CreateShl(ReloadVal, ExtraBits),
+ ExtraBits, "bf.reload.sext");
+ }
+
+ *Result = ReloadVal;
+ }
+
+ // Iterate over the components, writing each piece to memory.
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Get the field pointer.
+ llvm::Value *Ptr = Dst.getBitFieldBaseAddr();
+ unsigned addressSpace =
+ cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+
+ // Only offset by the field index if used, so that incoming values are not
+ // required to be structures.
+ if (AI.FieldIndex)
+ Ptr = Builder.CreateStructGEP(Ptr, AI.FieldIndex, "bf.field");
+
+ // Offset by the byte offset, if used.
+ if (!AI.FieldByteOffset.isZero()) {
+ Ptr = EmitCastToVoidPtr(Ptr);
+ Ptr = Builder.CreateConstGEP1_32(Ptr, AI.FieldByteOffset.getQuantity(),
+ "bf.field.offs");
+ }
+
+ // Cast to the access type.
+ llvm::Type *AccessLTy =
+ llvm::Type::getIntNTy(getLLVMContext(), AI.AccessWidth);
+
+ llvm::Type *PTy = AccessLTy->getPointerTo(addressSpace);
+ Ptr = Builder.CreateBitCast(Ptr, PTy);
+
+ // Extract the piece of the bit-field value to write in this access, limited
+ // to the values that are part of this access.
+ llvm::Value *Val = SrcVal;
+ if (AI.TargetBitOffset)
+ Val = Builder.CreateLShr(Val, AI.TargetBitOffset);
+ Val = Builder.CreateAnd(Val, llvm::APInt::getLowBitsSet(ResSizeInBits,
+ AI.TargetBitWidth));
+
+ // Extend or truncate to the access size.
+ if (ResSizeInBits < AI.AccessWidth)
+ Val = Builder.CreateZExt(Val, AccessLTy);
+ else if (ResSizeInBits > AI.AccessWidth)
+ Val = Builder.CreateTrunc(Val, AccessLTy);
+
+ // Shift into the position in memory.
+ if (AI.FieldBitStart)
+ Val = Builder.CreateShl(Val, AI.FieldBitStart);
+
+ // If necessary, load and OR in bits that are outside of the bit-field.
+ if (AI.TargetBitWidth != AI.AccessWidth) {
+ llvm::LoadInst *Load = Builder.CreateLoad(Ptr, Dst.isVolatileQualified());
+ if (!AI.AccessAlignment.isZero())
+ Load->setAlignment(AI.AccessAlignment.getQuantity());
+
+ // Compute the mask for zeroing the bits that are part of the bit-field.
+ llvm::APInt InvMask =
+ ~llvm::APInt::getBitsSet(AI.AccessWidth, AI.FieldBitStart,
+ AI.FieldBitStart + AI.TargetBitWidth);
+
+ // Apply the mask and OR in to the value to write.
+ Val = Builder.CreateOr(Builder.CreateAnd(Load, InvMask), Val);
+ }
+
+ // Write the value.
+ llvm::StoreInst *Store = Builder.CreateStore(Val, Ptr,
+ Dst.isVolatileQualified());
+ if (!AI.AccessAlignment.isZero())
+ Store->setAlignment(AI.AccessAlignment.getQuantity());
+ }
+}
+
+void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
+ LValue Dst) {
+ // This access turns into a read/modify/write of the vector. Load the input
+ // value now.
+ llvm::LoadInst *Load = Builder.CreateLoad(Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Load->setAlignment(Dst.getAlignment().getQuantity());
+ llvm::Value *Vec = Load;
+ const llvm::Constant *Elts = Dst.getExtVectorElts();
+
+ llvm::Value *SrcVal = Src.getScalarVal();
+
+ if (const VectorType *VTy = Dst.getType()->getAs<VectorType>()) {
+ unsigned NumSrcElts = VTy->getNumElements();
+ unsigned NumDstElts =
+ cast<llvm::VectorType>(Vec->getType())->getNumElements();
+ if (NumDstElts == NumSrcElts) {
+ // Use shuffle vector is the src and destination are the same number of
+ // elements and restore the vector mask since it is on the side it will be
+ // stored.
+ SmallVector<llvm::Constant*, 4> Mask(NumDstElts);
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i);
+
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(Vec->getType()),
+ MaskV);
+ } else if (NumDstElts > NumSrcElts) {
+ // Extended the source vector to the same length and then shuffle it
+ // into the destination.
+ // FIXME: since we're shuffling with undef, can we just use the indices
+ // into that? This could be simpler.
+ SmallVector<llvm::Constant*, 4> ExtMask;
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ ExtMask.push_back(Builder.getInt32(i));
+ ExtMask.resize(NumDstElts, llvm::UndefValue::get(Int32Ty));
+ llvm::Value *ExtMaskV = llvm::ConstantVector::get(ExtMask);
+ llvm::Value *ExtSrcVal =
+ Builder.CreateShuffleVector(SrcVal,
+ llvm::UndefValue::get(SrcVal->getType()),
+ ExtMaskV);
+ // build identity
+ SmallVector<llvm::Constant*, 4> Mask;
+ for (unsigned i = 0; i != NumDstElts; ++i)
+ Mask.push_back(Builder.getInt32(i));
+
+ // modify when what gets shuffled in
+ for (unsigned i = 0; i != NumSrcElts; ++i)
+ Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Vec = Builder.CreateShuffleVector(Vec, ExtSrcVal, MaskV);
+ } else {
+ // We should never shorten the vector
+ llvm_unreachable("unexpected shorten vector length");
+ }
+ } else {
+ // If the Src is a scalar (not a vector) it must be updating one element.
+ unsigned InIdx = getAccessedFieldNo(0, Elts);
+ llvm::Value *Elt = llvm::ConstantInt::get(Int32Ty, InIdx);
+ Vec = Builder.CreateInsertElement(Vec, SrcVal, Elt);
+ }
+
+ llvm::StoreInst *Store = Builder.CreateStore(Vec, Dst.getExtVectorAddr(),
+ Dst.isVolatileQualified());
+ Store->setAlignment(Dst.getAlignment().getQuantity());
+}
+
+// setObjCGCLValueClass - sets class of he lvalue for the purpose of
+// generating write-barries API. It is currently a global, ivar,
+// or neither.
+static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
+ LValue &LV,
+ bool IsMemberAccess=false) {
+ if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
+ return;
+
+ if (isa<ObjCIvarRefExpr>(E)) {
+ QualType ExpTy = E->getType();
+ if (IsMemberAccess && ExpTy->isPointerType()) {
+ // If ivar is a structure pointer, assigning to field of
+ // this struct follows gcc's behavior and makes it a non-ivar
+ // writer-barrier conservatively.
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType()) {
+ LV.setObjCIvar(false);
+ return;
+ }
+ }
+ LV.setObjCIvar(true);
+ ObjCIvarRefExpr *Exp = cast<ObjCIvarRefExpr>(const_cast<Expr*>(E));
+ LV.setBaseIvarExp(Exp->getBase());
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+
+ if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
+ if (VD->hasGlobalStorage()) {
+ LV.setGlobalObjCRef(true);
+ LV.setThreadLocalRef(VD->isThreadSpecified());
+ }
+ }
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+
+ if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ if (LV.isObjCIvar()) {
+ // If cast is to a structure pointer, follow gcc's behavior and make it
+ // a non-ivar write-barrier.
+ QualType ExpTy = E->getType();
+ if (ExpTy->isPointerType())
+ ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
+ if (ExpTy->isRecordType())
+ LV.setObjCIvar(false);
+ }
+ return;
+ }
+
+ if (const GenericSelectionExpr *Exp = dyn_cast<GenericSelectionExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getResultExpr(), LV);
+ return;
+ }
+
+ if (const ImplicitCastExpr *Exp = dyn_cast<ImplicitCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const ObjCBridgedCastExpr *Exp = dyn_cast<ObjCBridgedCastExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
+ return;
+ }
+
+ if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
+ if (LV.isObjCIvar() && !LV.isObjCArray())
+ // Using array syntax to assigning to what an ivar points to is not
+ // same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
+ LV.setObjCIvar(false);
+ else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
+ // Using array syntax to assigning to what global points to is not
+ // same as assigning to the global itself. {id *G;} G[i] = 0;
+ LV.setGlobalObjCRef(false);
+ return;
+ }
+
+ if (const MemberExpr *Exp = dyn_cast<MemberExpr>(E)) {
+ setObjCGCLValueClass(Ctx, Exp->getBase(), LV, true);
+ // We don't know if member is an 'ivar', but this flag is looked at
+ // only in the context of LV.isObjCIvar().
+ LV.setObjCArray(E->getType()->isArrayType());
+ return;
+ }
+}
+
+static llvm::Value *
+EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
+ llvm::Value *V, llvm::Type *IRType,
+ StringRef Name = StringRef()) {
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
+}
+
+static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const VarDecl *VD) {
+ assert((VD->hasExternalStorage() || VD->isFileVarDecl()) &&
+ "Var decl must have external storage or be a file var decl!");
+
+ llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
+ llvm::Type *RealVarTy = CGF.getTypes().ConvertTypeForMem(VD->getType());
+ V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
+ CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
+ QualType T = E->getType();
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = CGF.MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = CGF.MakeAddrLValue(V, E->getType(), Alignment);
+ }
+ setObjCGCLValueClass(CGF.getContext(), E, LV);
+ return LV;
+}
+
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const FunctionDecl *FD) {
+ llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
+ if (!FD->hasPrototype()) {
+ if (const FunctionProtoType *Proto =
+ FD->getType()->getAs<FunctionProtoType>()) {
+ // Ugly case: for a K&R-style definition, the type of the definition
+ // isn't the same as the type of a use. Correct for this with a
+ // bitcast.
+ QualType NoProtoType =
+ CGF.getContext().getFunctionNoProtoType(Proto->getResultType());
+ NoProtoType = CGF.getContext().getPointerType(NoProtoType);
+ V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
+ }
+ }
+ CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
+ return CGF.MakeAddrLValue(V, E->getType(), Alignment);
+}
+
+LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
+ const NamedDecl *ND = E->getDecl();
+ CharUnits Alignment = getContext().getDeclAlign(ND);
+ QualType T = E->getType();
+
+ // FIXME: We should be able to assert this for FunctionDecls as well!
+ // FIXME: We should be able to assert this for all DeclRefExprs, not just
+ // those with a valid source location.
+ assert((ND->isUsed(false) || !isa<VarDecl>(ND) ||
+ !E->getLocation().isValid()) &&
+ "Should not use decl without marking it used!");
+
+ if (ND->hasAttr<WeakRefAttr>()) {
+ const ValueDecl *VD = cast<ValueDecl>(ND);
+ llvm::Constant *Aliasee = CGM.GetWeakRefReference(VD);
+ return MakeAddrLValue(Aliasee, E->getType(), Alignment);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ // Check if this is a global variable.
+ if (VD->hasExternalStorage() || VD->isFileVarDecl())
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ bool isBlockVariable = VD->hasAttr<BlocksAttr>();
+
+ bool NonGCable = VD->hasLocalStorage() &&
+ !VD->getType()->isReferenceType() &&
+ !isBlockVariable;
+
+ llvm::Value *V = LocalDeclMap[VD];
+ if (!V && VD->isStaticLocal())
+ V = CGM.getStaticLocalDeclAddress(VD);
+
+ // Use special handling for lambdas.
+ if (!V) {
+ if (FieldDecl *FD = LambdaCaptureFields.lookup(VD))
+ return EmitLValueForField(CXXABIThisValue, FD, 0);
+
+ assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
+ CharUnits alignment = getContext().getDeclAlign(VD);
+ return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
+ E->getType(), alignment);
+ }
+
+ assert(V && "DeclRefExpr not entered in LocalDeclMap?");
+
+ if (isBlockVariable)
+ V = BuildBlockByrefAddress(V, VD);
+
+ LValue LV;
+ if (VD->getType()->isReferenceType()) {
+ llvm::LoadInst *LI = Builder.CreateLoad(V);
+ LI->setAlignment(Alignment.getQuantity());
+ V = LI;
+ LV = MakeNaturalAlignAddrLValue(V, T);
+ } else {
+ LV = MakeAddrLValue(V, T, Alignment);
+ }
+
+ if (NonGCable) {
+ LV.getQuals().removeObjCGCAttr();
+ LV.setNonGC(true);
+ }
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, fn);
+
+ llvm_unreachable("Unhandled DeclRefExpr");
+}
+
+LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
+ // __extension__ doesn't affect lvalue-ness.
+ if (E->getOpcode() == UO_Extension)
+ return EmitLValue(E->getSubExpr());
+
+ QualType ExprTy = getContext().getCanonicalType(E->getSubExpr()->getType());
+ switch (E->getOpcode()) {
+ default: llvm_unreachable("Unknown unary operator lvalue!");
+ case UO_Deref: {
+ QualType T = E->getSubExpr()->getType()->getPointeeType();
+ assert(!T.isNull() && "CodeGenFunction::EmitUnaryOpLValue: Illegal type");
+
+ LValue LV = MakeNaturalAlignAddrLValue(EmitScalarExpr(E->getSubExpr()), T);
+ LV.getQuals().setAddressSpace(ExprTy.getAddressSpace());
+
+ // We should not generate __weak write barrier on indirect reference
+ // of a pointer to object; as in void foo (__weak id *param); *param = 0;
+ // But, we continue to generate __strong write barrier on indirect write
+ // into a pointer to object.
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC &&
+ LV.isObjCWeak())
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
+ return LV;
+ }
+ case UO_Real:
+ case UO_Imag: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ assert(LV.isSimple() && "real/imag on non-ordinary l-value");
+ llvm::Value *Addr = LV.getAddress();
+
+ // __real is valid on scalars. This is a faster way of testing that.
+ // __imag can only produce an rvalue on scalars.
+ if (E->getOpcode() == UO_Real &&
+ !cast<llvm::PointerType>(Addr->getType())
+ ->getElementType()->isStructTy()) {
+ assert(E->getSubExpr()->getType()->isArithmeticType());
+ return LV;
+ }
+
+ assert(E->getSubExpr()->getType()->isAnyComplexType());
+
+ unsigned Idx = E->getOpcode() == UO_Imag;
+ return MakeAddrLValue(Builder.CreateStructGEP(LV.getAddress(),
+ Idx, "idx"),
+ ExprTy);
+ }
+ case UO_PreInc:
+ case UO_PreDec: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ bool isInc = E->getOpcode() == UO_PreInc;
+
+ if (E->getType()->isAnyComplexType())
+ EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ else
+ EmitScalarPrePostIncDec(E, LV, isInc, true/*isPre*/);
+ return LV;
+ }
+ }
+}
+
+LValue CodeGenFunction::EmitStringLiteralLValue(const StringLiteral *E) {
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromLiteral(E),
+ E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
+ return MakeAddrLValue(CGM.GetAddrOfConstantStringFromObjCEncode(E),
+ E->getType());
+}
+
+
+LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
+ switch (E->getIdentType()) {
+ default:
+ return EmitUnsupportedLValue(E, "predefined expression");
+
+ case PredefinedExpr::Func:
+ case PredefinedExpr::Function:
+ case PredefinedExpr::PrettyFunction: {
+ unsigned Type = E->getIdentType();
+ std::string GlobalVarName;
+
+ switch (Type) {
+ default: llvm_unreachable("Invalid type");
+ case PredefinedExpr::Func:
+ GlobalVarName = "__func__.";
+ break;
+ case PredefinedExpr::Function:
+ GlobalVarName = "__FUNCTION__.";
+ break;
+ case PredefinedExpr::PrettyFunction:
+ GlobalVarName = "__PRETTY_FUNCTION__.";
+ break;
+ }
+
+ StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ GlobalVarName += FnName;
+
+ const Decl *CurDecl = CurCodeDecl;
+ if (CurDecl == 0)
+ CurDecl = getContext().getTranslationUnitDecl();
+
+ std::string FunctionName =
+ (isa<BlockDecl>(CurDecl)
+ ? FnName.str()
+ : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)Type, CurDecl));
+
+ llvm::Constant *C =
+ CGM.GetAddrOfConstantCString(FunctionName, GlobalVarName.c_str());
+ return MakeAddrLValue(C, E->getType());
+ }
+ }
+}
+
+llvm::BasicBlock *CodeGenFunction::getTrapBB() {
+ const CodeGenOptions &GCO = CGM.getCodeGenOpts();
+
+ // If we are not optimzing, don't collapse all calls to trap in the function
+ // to the same call, that way, in the debugger they can see which operation
+ // did in fact fail. If we are optimizing, we collapse all calls to trap down
+ // to just one per function to save on codesize.
+ if (GCO.OptimizationLevel && TrapBB)
+ return TrapBB;
+
+ llvm::BasicBlock *Cont = 0;
+ if (HaveInsertPoint()) {
+ Cont = createBasicBlock("cont");
+ EmitBranch(Cont);
+ }
+ TrapBB = createBasicBlock("trap");
+ EmitBlock(TrapBB);
+
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::trap);
+ llvm::CallInst *TrapCall = Builder.CreateCall(F);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+
+ if (Cont)
+ EmitBlock(Cont);
+ return TrapBB;
+}
+
+/// isSimpleArrayDecayOperand - If the specified expr is a simple decay from an
+/// array to pointer, return the array subexpression.
+static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
+ // If this isn't just an array->pointer decay, bail out.
+ const CastExpr *CE = dyn_cast<CastExpr>(E);
+ if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
+ return 0;
+
+ // If this is a decay from variable width array, bail out.
+ const Expr *SubExpr = CE->getSubExpr();
+ if (SubExpr->getType()->isVariableArrayType())
+ return 0;
+
+ return SubExpr;
+}
+
+LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
+ // The index must always be an integer, which is not an aggregate. Emit it.
+ llvm::Value *Idx = EmitScalarExpr(E->getIdx());
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
+
+ // If the base is a vector type, then we are forming a vector element lvalue
+ // with this subscript.
+ if (E->getBase()->getType()->isVectorType()) {
+ // Emit the vector as an lvalue to get its address.
+ LValue LHS = EmitLValue(E->getBase());
+ assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
+ Idx = Builder.CreateIntCast(Idx, Int32Ty, IdxSigned, "vidx");
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx,
+ E->getBase()->getType(), LHS.getAlignment());
+ }
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ if (Idx->getType() != IntPtrTy)
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
+
+ // FIXME: As llvm implements the object size checking, this can come out.
+ if (CatchUndefined) {
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E->getBase())){
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ICE->getSubExpr())) {
+ if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
+ if (const ConstantArrayType *CAT
+ = getContext().getAsConstantArrayType(DRE->getType())) {
+ llvm::APInt Size = CAT->getSize();
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ Builder.CreateCondBr(Builder.CreateICmpULE(Idx,
+ llvm::ConstantInt::get(Idx->getType(), Size)),
+ Cont, getTrapBB());
+ EmitBlock(Cont);
+ }
+ }
+ }
+ }
+ }
+
+ // We know that the pointer points to a type of the correct size, unless the
+ // size is a VLA or Objective-C interface.
+ llvm::Value *Address = 0;
+ CharUnits ArrayAlignment;
+ if (const VariableArrayType *vla =
+ getContext().getAsVariableArrayType(E->getType())) {
+ // The base must be a pointer, which is not an aggregate. Emit
+ // it. It needs to be emitted first in case it's what captures
+ // the VLA bounds.
+ Address = EmitScalarExpr(E->getBase());
+
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (getLangOpts().isSignedOverflowDefined()) {
+ Idx = Builder.CreateMul(Idx, numElements);
+ Address = Builder.CreateGEP(Address, Idx, "arrayidx");
+ } else {
+ Idx = Builder.CreateNSWMul(Idx, numElements);
+ Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
+ }
+ } else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
+ // Indexing over an interface, as in "NSString *P; P[4];"
+ llvm::Value *InterfaceSize =
+ llvm::ConstantInt::get(Idx->getType(),
+ getContext().getTypeSizeInChars(OIT).getQuantity());
+
+ Idx = Builder.CreateMul(Idx, InterfaceSize);
+
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+ Address = EmitCastToVoidPtr(Base);
+ Address = Builder.CreateGEP(Address, Idx, "arrayidx");
+ Address = Builder.CreateBitCast(Address, Base->getType());
+ } else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
+ // If this is A[i] where A is an array, the frontend will have decayed the
+ // base to be a ArrayToPointerDecay implicit cast. While correct, it is
+ // inefficient at -O0 to emit a "gep A, 0, 0" when codegen'ing it, then a
+ // "gep x, i" here. Emit one "gep A, 0, i".
+ assert(Array->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+ LValue ArrayLV = EmitLValue(Array);
+ llvm::Value *ArrayPtr = ArrayLV.getAddress();
+ llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
+ llvm::Value *Args[] = { Zero, Idx };
+
+ // Propagate the alignment from the array itself to the result.
+ ArrayAlignment = ArrayLV.getAlignment();
+
+ if (getContext().getLangOpts().isSignedOverflowDefined())
+ Address = Builder.CreateGEP(ArrayPtr, Args, "arrayidx");
+ else
+ Address = Builder.CreateInBoundsGEP(ArrayPtr, Args, "arrayidx");
+ } else {
+ // The base must be a pointer, which is not an aggregate. Emit it.
+ llvm::Value *Base = EmitScalarExpr(E->getBase());
+ if (getContext().getLangOpts().isSignedOverflowDefined())
+ Address = Builder.CreateGEP(Base, Idx, "arrayidx");
+ else
+ Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
+ }
+
+ QualType T = E->getBase()->getType()->getPointeeType();
+ assert(!T.isNull() &&
+ "CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
+
+
+ // Limit the alignment to that of the result type.
+ LValue LV;
+ if (!ArrayAlignment.isZero()) {
+ CharUnits Align = getContext().getTypeAlignInChars(T);
+ ArrayAlignment = std::min(Align, ArrayAlignment);
+ LV = MakeAddrLValue(Address, T, ArrayAlignment);
+ } else {
+ LV = MakeNaturalAlignAddrLValue(Address, T);
+ }
+
+ LV.getQuals().setAddressSpace(E->getBase()->getType().getAddressSpace());
+
+ if (getContext().getLangOpts().ObjC1 &&
+ getContext().getLangOpts().getGC() != LangOptions::NonGC) {
+ LV.setNonGC(!E->isOBJCGCCandidate(getContext()));
+ setObjCGCLValueClass(getContext(), E, LV);
+ }
+ return LV;
+}
+
+static
+llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
+ SmallVector<unsigned, 4> &Elts) {
+ SmallVector<llvm::Constant*, 4> CElts;
+ for (unsigned i = 0, e = Elts.size(); i != e; ++i)
+ CElts.push_back(Builder.getInt32(Elts[i]));
+
+ return llvm::ConstantVector::get(CElts);
+}
+
+LValue CodeGenFunction::
+EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
+ // Emit the base vector as an l-value.
+ LValue Base;
+
+ // ExtVectorElementExpr's base can either be a vector or pointer to vector.
+ if (E->isArrow()) {
+ // If it is a pointer to a vector, emit the address and form an lvalue with
+ // it.
+ llvm::Value *Ptr = EmitScalarExpr(E->getBase());
+ const PointerType *PT = E->getBase()->getType()->getAs<PointerType>();
+ Base = MakeAddrLValue(Ptr, PT->getPointeeType());
+ Base.getQuals().removeObjCGCAttr();
+ } else if (E->getBase()->isGLValue()) {
+ // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
+ // emit the base as an lvalue.
+ assert(E->getBase()->getType()->isVectorType());
+ Base = EmitLValue(E->getBase());
+ } else {
+ // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
+ assert(E->getBase()->getType()->isVectorType() &&
+ "Result must be a vector");
+ llvm::Value *Vec = EmitScalarExpr(E->getBase());
+
+ // Store the vector to memory (because LValue wants an address).
+ llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
+ Builder.CreateStore(Vec, VecMem);
+ Base = MakeAddrLValue(VecMem, E->getBase()->getType());
+ }
+
+ QualType type =
+ E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
+
+ // Encode the element access list into a vector of unsigned indices.
+ SmallVector<unsigned, 4> Indices;
+ E->getEncodedElementAccess(Indices);
+
+ if (Base.isSimple()) {
+ llvm::Constant *CV = GenerateConstantVector(Builder, Indices);
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
+ Base.getAlignment());
+ }
+ assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
+
+ llvm::Constant *BaseElts = Base.getExtVectorElts();
+ SmallVector<llvm::Constant *, 4> CElts;
+
+ for (unsigned i = 0, e = Indices.size(); i != e; ++i)
+ CElts.push_back(BaseElts->getAggregateElement(Indices[i]));
+ llvm::Constant *CV = llvm::ConstantVector::get(CElts);
+ return LValue::MakeExtVectorElt(Base.getExtVectorAddr(), CV, type,
+ Base.getAlignment());
+}
+
+LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
+ bool isNonGC = false;
+ Expr *BaseExpr = E->getBase();
+ llvm::Value *BaseValue = NULL;
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy =
+ BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ if (BaseLV.isNonGC())
+ isNonGC = true;
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ NamedDecl *ND = E->getMemberDecl();
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ND)) {
+ LValue LV = EmitLValueForField(BaseValue, Field,
+ BaseQuals.getCVRQualifiers());
+ LV.setNonGC(isNonGC);
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+ }
+
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ return EmitGlobalVarDeclLValue(*this, E, VD);
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
+
+ llvm_unreachable("Unhandled member declaration!");
+}
+
+LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value *BaseValue,
+ const FieldDecl *Field,
+ unsigned CVRQualifiers) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ const CGBitFieldInfo &Info = RL.getBitFieldInfo(Field);
+ return LValue::MakeBitfield(BaseValue, Info,
+ Field->getType().withCVRQualifiers(CVRQualifiers));
+}
+
+/// EmitLValueForAnonRecordField - Given that the field is a member of
+/// an anonymous struct or union buried inside a record, and given
+/// that the base value is a pointer to the enclosing record, derive
+/// an lvalue for the ultimate field.
+LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
+ const IndirectFieldDecl *Field,
+ unsigned CVRQualifiers) {
+ IndirectFieldDecl::chain_iterator I = Field->chain_begin(),
+ IEnd = Field->chain_end();
+ while (true) {
+ LValue LV = EmitLValueForField(BaseValue, cast<FieldDecl>(*I),
+ CVRQualifiers);
+ if (++I == IEnd) return LV;
+
+ assert(LV.isSimple());
+ BaseValue = LV.getAddress();
+ CVRQualifiers |= LV.getVRQualifiers();
+ }
+}
+
+LValue CodeGenFunction::EmitLValueForField(llvm::Value *baseAddr,
+ const FieldDecl *field,
+ unsigned cvr) {
+ if (field->isBitField())
+ return EmitLValueForBitfield(baseAddr, field, cvr);
+
+ const RecordDecl *rec = field->getParent();
+ QualType type = field->getType();
+ CharUnits alignment = getContext().getDeclAlign(field);
+
+ bool mayAlias = rec->hasAttr<MayAliasAttr>();
+
+ llvm::Value *addr = baseAddr;
+ if (rec->isUnion()) {
+ // For unions, there is no pointer adjustment.
+ assert(!type->isReferenceType() && "union has reference member");
+ } else {
+ // For structs, we GEP to the field that the record layout suggests.
+ unsigned idx = CGM.getTypes().getCGRecordLayout(rec).getLLVMFieldNo(field);
+ addr = Builder.CreateStructGEP(addr, idx, field->getName());
+
+ // If this is a reference field, load the reference right now.
+ if (const ReferenceType *refType = type->getAs<ReferenceType>()) {
+ llvm::LoadInst *load = Builder.CreateLoad(addr, "ref");
+ if (cvr & Qualifiers::Volatile) load->setVolatile(true);
+ load->setAlignment(alignment.getQuantity());
+
+ if (CGM.shouldUseTBAA()) {
+ llvm::MDNode *tbaa;
+ if (mayAlias)
+ tbaa = CGM.getTBAAInfo(getContext().CharTy);
+ else
+ tbaa = CGM.getTBAAInfo(type);
+ CGM.DecorateInstruction(load, tbaa);
+ }
+
+ addr = load;
+ mayAlias = false;
+ type = refType->getPointeeType();
+ if (type->isIncompleteType())
+ alignment = CharUnits();
+ else
+ alignment = getContext().getTypeAlignInChars(type);
+ cvr = 0; // qualifiers don't recursively apply to referencee
+ }
+ }
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ addr = EmitBitCastOfLValueToProperType(*this, addr,
+ CGM.getTypes().ConvertTypeForMem(type),
+ field->getName());
+
+ if (field->hasAttr<AnnotateAttr>())
+ addr = EmitFieldAnnotations(field, addr);
+
+ LValue LV = MakeAddrLValue(addr, type, alignment);
+ LV.getQuals().addCVRQualifiers(cvr);
+
+ // __weak attribute on a field is ignored.
+ if (LV.getQuals().getObjCGCAttr() == Qualifiers::Weak)
+ LV.getQuals().removeObjCGCAttr();
+
+ // Fields of may_alias structs act like 'char' for TBAA purposes.
+ // FIXME: this should get propagated down through anonymous structs
+ // and unions.
+ if (mayAlias && LV.getTBAAInfo())
+ LV.setTBAAInfo(CGM.getTBAAInfo(getContext().CharTy));
+
+ return LV;
+}
+
+LValue
+CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value *BaseValue,
+ const FieldDecl *Field,
+ unsigned CVRQualifiers) {
+ QualType FieldType = Field->getType();
+
+ if (!FieldType->isReferenceType())
+ return EmitLValueForField(BaseValue, Field, CVRQualifiers);
+
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ unsigned idx = RL.getLLVMFieldNo(Field);
+ llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx);
+ assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
+
+
+ // Make sure that the address is pointing to the right type. This is critical
+ // for both unions and structs. A union needs a bitcast, a struct element
+ // will need a bitcast if the LLVM type laid out doesn't match the desired
+ // type.
+ llvm::Type *llvmType = ConvertTypeForMem(FieldType);
+ unsigned AS = cast<llvm::PointerType>(V->getType())->getAddressSpace();
+ V = Builder.CreateBitCast(V, llvmType->getPointerTo(AS));
+
+ CharUnits Alignment = getContext().getDeclAlign(Field);
+ return MakeAddrLValue(V, FieldType, Alignment);
+}
+
+LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
+ if (E->isFileScope()) {
+ llvm::Value *GlobalPtr = CGM.GetAddrOfConstantCompoundLiteral(E);
+ return MakeAddrLValue(GlobalPtr, E->getType());
+ }
+
+ llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
+ const Expr *InitExpr = E->getInitializer();
+ LValue Result = MakeAddrLValue(DeclPtr, E->getType());
+
+ EmitAnyExprToMem(InitExpr, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
+
+ return Result;
+}
+
+LValue CodeGenFunction::
+EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert((hasAggregateLLVMType(expr->getType()) &&
+ !expr->getType()->isAnyComplexType()) &&
+ "Unexpected conditional operator!");
+ return EmitAggExprToLValue(expr);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+
+ const Expr *condExpr = expr->getCond();
+ bool CondExprBool;
+ if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
+ const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
+ if (!CondExprBool) std::swap(live, dead);
+
+ if (!ContainsLabel(dead))
+ return EmitLValue(live);
+ }
+
+ llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
+ llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
+
+ // Any temporaries created here are conditional.
+ EmitBlock(lhsBlock);
+ eval.begin(*this);
+ LValue lhs = EmitLValue(expr->getTrueExpr());
+ eval.end(*this);
+
+ if (!lhs.isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
+
+ lhsBlock = Builder.GetInsertBlock();
+ Builder.CreateBr(contBlock);
+
+ // Any temporaries created here are conditional.
+ EmitBlock(rhsBlock);
+ eval.begin(*this);
+ LValue rhs = EmitLValue(expr->getFalseExpr());
+ eval.end(*this);
+ if (!rhs.isSimple())
+ return EmitUnsupportedLValue(expr, "conditional operator");
+ rhsBlock = Builder.GetInsertBlock();
+
+ EmitBlock(contBlock);
+
+ llvm::PHINode *phi = Builder.CreatePHI(lhs.getAddress()->getType(), 2,
+ "cond-lvalue");
+ phi->addIncoming(lhs.getAddress(), lhsBlock);
+ phi->addIncoming(rhs.getAddress(), rhsBlock);
+ return MakeAddrLValue(phi, expr->getType());
+}
+
+/// EmitCastLValue - Casts are never lvalues unless that cast is a dynamic_cast.
+/// If the cast is a dynamic_cast, we can have the usual lvalue result,
+/// otherwise if a cast is needed by the code generator in an lvalue context,
+/// then it must mean that we need the address of an aggregate in order to
+/// access one of its fields. This can happen for all the reasons that casts
+/// are permitted with aggregate result, including noop aggregate casts, and
+/// cast from scalar to union.
+LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
+ switch (E->getCastKind()) {
+ case CK_ToVoid:
+ return EmitUnsupportedLValue(E, "unexpected cast lvalue");
+
+ case CK_Dependent:
+ llvm_unreachable("dependent cast kind in IR gen!");
+
+ // These two casts are currently treated as no-ops, although they could
+ // potentially be real operations depending on the target's ABI.
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
+
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ if (!E->getSubExpr()->Classify(getContext()).isPRValue()
+ || E->getType()->isRecordType())
+ return EmitLValue(E->getSubExpr());
+ // Fall through to synthesize a temporary.
+
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToMemberPointer:
+ case CK_NullToPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject: {
+ // These casts only produce lvalues when we're binding a reference to a
+ // temporary realized from a (converted) pure rvalue. Emit the expression
+ // as a value, copy it into a temporary, and return an lvalue referring to
+ // that temporary.
+ llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
+ EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
+ return MakeAddrLValue(V, E->getType());
+ }
+
+ case CK_Dynamic: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *V = LV.getAddress();
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(E);
+ return MakeAddrLValue(EmitDynamicCast(V, DCE), E->getType());
+ }
+
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ return EmitLValue(E->getSubExpr());
+
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getSubExpr()->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *This = LV.getAddress();
+
+ // Perform the derived-to-base conversion
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, DerivedClassDecl,
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
+
+ return MakeAddrLValue(Base, E->getType());
+ }
+ case CK_ToUnion:
+ return EmitAggExprToLValue(E);
+ case CK_BaseToDerived: {
+ const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ LValue LV = EmitLValue(E->getSubExpr());
+
+ // Perform the base-to-derived conversion
+ llvm::Value *Derived =
+ GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
+
+ return MakeAddrLValue(Derived, E->getType());
+ }
+ case CK_LValueBitCast: {
+ // This must be a reinterpret_cast (or c-style equivalent).
+ const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
+
+ LValue LV = EmitLValue(E->getSubExpr());
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(CE->getTypeAsWritten()));
+ return MakeAddrLValue(V, E->getType());
+ }
+ case CK_ObjCObjectLValueCast: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ QualType ToType = getContext().getLValueReferenceType(E->getType());
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ ConvertType(ToType));
+ return MakeAddrLValue(V, E->getType());
+ }
+ }
+
+ llvm_unreachable("Unhandled lvalue cast kind?");
+}
+
+LValue CodeGenFunction::EmitNullInitializationLValue(
+ const CXXScalarValueInitExpr *E) {
+ QualType Ty = E->getType();
+ LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
+ EmitNullInitialization(LV.getAddress(), Ty);
+ return LV;
+}
+
+LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMappingData::shouldBindAsLValue(e));
+ return getOpaqueLValueMapping(e);
+}
+
+LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *E) {
+ RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+
+//===--------------------------------------------------------------------===//
+// Expression Emission
+//===--------------------------------------------------------------------===//
+
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, E->getLocStart());
+
+ // Builtins never have block type.
+ if (E->getCallee()->getType()->isBlockPointerType())
+ return EmitBlockCallExpr(E, ReturnValue);
+
+ if (const CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(E))
+ return EmitCXXMemberCallExpr(CE, ReturnValue);
+
+ if (const CUDAKernelCallExpr *CE = dyn_cast<CUDAKernelCallExpr>(E))
+ return EmitCUDAKernelCallExpr(CE, ReturnValue);
+
+ const Decl *TargetDecl = E->getCalleeDecl();
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
+ if (unsigned builtinID = FD->getBuiltinID())
+ return EmitBuiltinExpr(FD, builtinID, E);
+ }
+
+ if (const CXXOperatorCallExpr *CE = dyn_cast<CXXOperatorCallExpr>(E))
+ if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
+
+ if (const CXXPseudoDestructorExpr *PseudoDtor
+ = dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
+ QualType DestroyedType = PseudoDtor->getDestroyedType();
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
+ DestroyedType->isObjCLifetimeType() &&
+ (DestroyedType.getObjCLifetime() == Qualifiers::OCL_Strong ||
+ DestroyedType.getObjCLifetime() == Qualifiers::OCL_Weak)) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or
+ // strong lifetime, the object shall be released.
+ Expr *BaseExpr = PseudoDtor->getBase();
+ llvm::Value *BaseValue = NULL;
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (PseudoDtor->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ PseudoDtor->getDestroyedType().isVolatileQualified()),
+ /*precise*/ true);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitScalarExpr(E->getCallee());
+ }
+
+ return RValue::get(0);
+ }
+
+ llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
+ E->arg_begin(), E->arg_end(), TargetDecl);
+}
+
+LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
+ // Comma expressions just emit their LHS then their RHS as an l-value.
+ if (E->getOpcode() == BO_Comma) {
+ EmitIgnoredExpr(E->getLHS());
+ EnsureInsertPoint();
+ return EmitLValue(E->getRHS());
+ }
+
+ if (E->getOpcode() == BO_PtrMemD ||
+ E->getOpcode() == BO_PtrMemI)
+ return EmitPointerToDataMemberBinaryExpr(E);
+
+ assert(E->getOpcode() == BO_Assign && "unexpected binary l-value");
+
+ // Note that in all of these cases, __block variables need the RHS
+ // evaluated first just in case the variable gets moved by the RHS.
+
+ if (!hasAggregateLLVMType(E->getType())) {
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ return EmitARCStoreStrong(E, /*ignored*/ false).first;
+
+ case Qualifiers::OCL_Autoreleasing:
+ return EmitARCStoreAutoreleasing(E).first;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Weak:
+ break;
+ }
+
+ RValue RV = EmitAnyExpr(E->getRHS());
+ LValue LV = EmitLValue(E->getLHS());
+ EmitStoreThroughLValue(RV, LV);
+ return LV;
+ }
+
+ if (E->getType()->isAnyComplexType())
+ return EmitComplexAssignmentLValue(E);
+
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
+ RValue RV = EmitCallExpr(E);
+
+ if (!RV.isScalar())
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+
+ assert(E->getCallReturnType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+LValue CodeGenFunction::EmitVAArgExprLValue(const VAArgExpr *E) {
+ // FIXME: This shouldn't require another copy.
+ return EmitAggExprToLValue(E);
+}
+
+LValue CodeGenFunction::EmitCXXConstructLValue(const CXXConstructExpr *E) {
+ assert(E->getType()->getAsCXXRecordDecl()->hasTrivialDestructor()
+ && "binding l-value to type which needs a temporary");
+ AggValueSlot Slot = CreateAggTemp(E->getType());
+ EmitCXXConstructExpr(E, Slot);
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
+ return MakeAddrLValue(EmitCXXTypeidExpr(E), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ Slot.setExternallyDestructed();
+ EmitAggExpr(E->getSubExpr(), Slot);
+ EmitCXXTemporary(E->getTemporary(), E->getType(), Slot.getAddr());
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue
+CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
+ AggValueSlot Slot = CreateAggTemp(E->getType(), "temp.lvalue");
+ EmitLambdaExpr(E, Slot);
+ return MakeAddrLValue(Slot.getAddr(), E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
+ RValue RV = EmitObjCMessageExpr(E);
+
+ if (!RV.isScalar())
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+
+ assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
+ "Can't have a scalar return unless the return type is a "
+ "reference type!");
+
+ return MakeAddrLValue(RV.getScalarVal(), E->getType());
+}
+
+LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
+ llvm::Value *V =
+ CGM.getObjCRuntime().GetSelector(Builder, E->getSelector(), true);
+ return MakeAddrLValue(V, E->getType());
+}
+
+llvm::Value *CodeGenFunction::EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGM.getObjCRuntime().EmitIvarOffset(*this, Interface, Ivar);
+}
+
+LValue CodeGenFunction::EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ return CGM.getObjCRuntime().EmitObjCValueForIvar(*this, ObjectTy, BaseValue,
+ Ivar, CVRQualifiers);
+}
+
+LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
+ // FIXME: A lot of the code below could be shared with EmitMemberExpr.
+ llvm::Value *BaseValue = 0;
+ const Expr *BaseExpr = E->getBase();
+ Qualifiers BaseQuals;
+ QualType ObjectTy;
+ if (E->isArrow()) {
+ BaseValue = EmitScalarExpr(BaseExpr);
+ ObjectTy = BaseExpr->getType()->getPointeeType();
+ BaseQuals = ObjectTy.getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ // FIXME: this isn't right for bitfields.
+ BaseValue = BaseLV.getAddress();
+ ObjectTy = BaseExpr->getType();
+ BaseQuals = ObjectTy.getQualifiers();
+ }
+
+ LValue LV =
+ EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
+ BaseQuals.getCVRQualifiers());
+ setObjCGCLValueClass(getContext(), E, LV);
+ return LV;
+}
+
+LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
+ // Can only get l-value for message expression returning aggregate type
+ RValue RV = EmitAnyExprToTemp(E);
+ return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
+}
+
+RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl) {
+ // Get the actual function type. The callee type will always be a pointer to
+ // function type or a block pointer type.
+ assert(CalleeType->isFunctionPointerType() &&
+ "Call must have function pointer type!");
+
+ CalleeType = getContext().getCanonicalType(CalleeType);
+
+ const FunctionType *FnType
+ = cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+
+ CallArgList Args;
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
+
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().arrangeFunctionCall(Args, FnType);
+
+ // C99 6.5.2.2p6:
+ // If the expression that denotes the called function has a type
+ // that does not include a prototype, [the default argument
+ // promotions are performed]. If the number of arguments does not
+ // equal the number of parameters, the behavior is undefined. If
+ // the function is defined with a type that includes a prototype,
+ // and either the prototype ends with an ellipsis (, ...) or the
+ // types of the arguments after promotion are not compatible with
+ // the types of the parameters, the behavior is undefined. If the
+ // function is defined with a type that does not include a
+ // prototype, and the types of the arguments after promotion are
+ // not compatible with those of the parameters after promotion,
+ // the behavior is undefined [except in some trivial cases].
+ // That is, in the general case, we should assume that a call
+ // through an unprototyped function type works like a *non-variadic*
+ // call. The way we make this work is to cast to the exact type
+ // of the promoted arguments.
+ if (isa<FunctionNoProtoType>(FnType) && !FnInfo.isVariadic()) {
+ llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
+ CalleeTy = CalleeTy->getPointerTo();
+ Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
+ }
+
+ return EmitCall(FnInfo, Callee, ReturnValue, Args, TargetDecl);
+}
+
+LValue CodeGenFunction::
+EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
+ llvm::Value *BaseV;
+ if (E->getOpcode() == BO_PtrMemI)
+ BaseV = EmitScalarExpr(E->getLHS());
+ else
+ BaseV = EmitLValue(E->getLHS()).getAddress();
+
+ llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
+
+ const MemberPointerType *MPT
+ = E->getRHS()->getType()->getAs<MemberPointerType>();
+
+ llvm::Value *AddV =
+ CGM.getCXXABI().EmitMemberDataPointerAddress(*this, BaseV, OffsetV, MPT);
+
+ return MakeAddrLValue(AddV, MPT->getPointeeType());
+}
+
+static void
+EmitAtomicOp(CodeGenFunction &CGF, AtomicExpr *E, llvm::Value *Dest,
+ llvm::Value *Ptr, llvm::Value *Val1, llvm::Value *Val2,
+ uint64_t Size, unsigned Align, llvm::AtomicOrdering Order) {
+ llvm::AtomicRMWInst::BinOp Op = llvm::AtomicRMWInst::Add;
+ llvm::Instruction::BinaryOps PostOp = (llvm::Instruction::BinaryOps)0;
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n: {
+ // Note that cmpxchg only supports specifying one ordering and
+ // doesn't support weak cmpxchg, at least at the moment.
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::LoadInst *LoadVal2 = CGF.Builder.CreateLoad(Val2);
+ LoadVal2->setAlignment(Align);
+ llvm::AtomicCmpXchgInst *CXI =
+ CGF.Builder.CreateAtomicCmpXchg(Ptr, LoadVal1, LoadVal2, Order);
+ CXI->setVolatile(E->isVolatile());
+ llvm::StoreInst *StoreVal1 = CGF.Builder.CreateStore(CXI, Val1);
+ StoreVal1->setAlignment(Align);
+ llvm::Value *Cmp = CGF.Builder.CreateICmpEQ(CXI, LoadVal1);
+ CGF.EmitStoreOfScalar(Cmp, CGF.MakeAddrLValue(Dest, E->getType()));
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ case AtomicExpr::AO__atomic_load: {
+ llvm::LoadInst *Load = CGF.Builder.CreateLoad(Ptr);
+ Load->setAtomic(Order);
+ Load->setAlignment(Size);
+ Load->setVolatile(E->isVolatile());
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Load, Dest);
+ StoreDest->setAlignment(Align);
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n: {
+ assert(!Dest && "Store does not return a value");
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::StoreInst *Store = CGF.Builder.CreateStore(LoadVal1, Ptr);
+ Store->setAtomic(Order);
+ Store->setAlignment(Size);
+ Store->setVolatile(E->isVolatile());
+ return;
+ }
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ Op = llvm::AtomicRMWInst::Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_add_fetch:
+ PostOp = llvm::Instruction::Add;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ Op = llvm::AtomicRMWInst::Add;
+ break;
+
+ case AtomicExpr::AO__atomic_sub_fetch:
+ PostOp = llvm::Instruction::Sub;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ Op = llvm::AtomicRMWInst::Sub;
+ break;
+
+ case AtomicExpr::AO__atomic_and_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ Op = llvm::AtomicRMWInst::And;
+ break;
+
+ case AtomicExpr::AO__atomic_or_fetch:
+ PostOp = llvm::Instruction::Or;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ Op = llvm::AtomicRMWInst::Or;
+ break;
+
+ case AtomicExpr::AO__atomic_xor_fetch:
+ PostOp = llvm::Instruction::Xor;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ Op = llvm::AtomicRMWInst::Xor;
+ break;
+
+ case AtomicExpr::AO__atomic_nand_fetch:
+ PostOp = llvm::Instruction::And;
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_nand:
+ Op = llvm::AtomicRMWInst::Nand;
+ break;
+ }
+
+ llvm::LoadInst *LoadVal1 = CGF.Builder.CreateLoad(Val1);
+ LoadVal1->setAlignment(Align);
+ llvm::AtomicRMWInst *RMWI =
+ CGF.Builder.CreateAtomicRMW(Op, Ptr, LoadVal1, Order);
+ RMWI->setVolatile(E->isVolatile());
+
+ // For __atomic_*_fetch operations, perform the operation again to
+ // determine the value which was written.
+ llvm::Value *Result = RMWI;
+ if (PostOp)
+ Result = CGF.Builder.CreateBinOp(PostOp, RMWI, LoadVal1);
+ if (E->getOp() == AtomicExpr::AO__atomic_nand_fetch)
+ Result = CGF.Builder.CreateNot(Result);
+ llvm::StoreInst *StoreDest = CGF.Builder.CreateStore(Result, Dest);
+ StoreDest->setAlignment(Align);
+}
+
+// This function emits any expression (scalar, complex, or aggregate)
+// into a temporary alloca.
+static llvm::Value *
+EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
+ llvm::Value *DeclPtr = CGF.CreateMemTemp(E->getType(), ".atomictmp");
+ CGF.EmitAnyExprToMem(E, DeclPtr, E->getType().getQualifiers(),
+ /*Init*/ true);
+ return DeclPtr;
+}
+
+static RValue ConvertTempToRValue(CodeGenFunction &CGF, QualType Ty,
+ llvm::Value *Dest) {
+ if (Ty->isAnyComplexType())
+ return RValue::getComplex(CGF.LoadComplexFromAddr(Dest, false));
+ if (CGF.hasAggregateLLVMType(Ty))
+ return RValue::getAggregate(Dest);
+ return RValue::get(CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(Dest, Ty)));
+}
+
+RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
+ QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
+ QualType MemTy = AtomicTy;
+ if (const AtomicType *AT = AtomicTy->getAs<AtomicType>())
+ MemTy = AT->getValueType();
+ CharUnits sizeChars = getContext().getTypeSizeInChars(AtomicTy);
+ uint64_t Size = sizeChars.getQuantity();
+ CharUnits alignChars = getContext().getTypeAlignInChars(AtomicTy);
+ unsigned Align = alignChars.getQuantity();
+ unsigned MaxInlineWidth =
+ getContext().getTargetInfo().getMaxAtomicInlineWidth();
+ bool UseLibcall = (Size != Align || Size > MaxInlineWidth);
+
+
+
+ llvm::Value *Ptr, *Order, *OrderFail = 0, *Val1 = 0, *Val2 = 0;
+ Ptr = EmitScalarExpr(E->getPtr());
+
+ if (E->getOp() == AtomicExpr::AO__c11_atomic_init) {
+ assert(!Dest && "Init does not return a value");
+ if (!hasAggregateLLVMType(E->getVal1()->getType())) {
+ QualType PointeeType
+ = E->getPtr()->getType()->getAs<PointerType>()->getPointeeType();
+ EmitScalarInit(EmitScalarExpr(E->getVal1()),
+ LValue::MakeAddr(Ptr, PointeeType, alignChars,
+ getContext()));
+ } else if (E->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(E->getVal1(), Ptr, E->isVolatile());
+ } else {
+ AggValueSlot Slot = AggValueSlot::forAddr(Ptr, alignChars,
+ AtomicTy.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(E->getVal1(), Slot);
+ }
+ return RValue::get(0);
+ }
+
+ Order = EmitScalarExpr(E->getOrder());
+
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_init:
+ llvm_unreachable("Already handled!");
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ break;
+
+ case AtomicExpr::AO__atomic_load:
+ Dest = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_store:
+ Val1 = EmitScalarExpr(E->getVal1());
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
+ Val1 = EmitScalarExpr(E->getVal1());
+ Dest = EmitScalarExpr(E->getVal2());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ Val1 = EmitScalarExpr(E->getVal1());
+ if (E->getOp() == AtomicExpr::AO__atomic_compare_exchange)
+ Val2 = EmitScalarExpr(E->getVal2());
+ else
+ Val2 = EmitValToTemp(*this, E->getVal2());
+ OrderFail = EmitScalarExpr(E->getOrderFail());
+ // Evaluate and discard the 'weak' argument.
+ if (E->getNumSubExprs() == 6)
+ EmitScalarExpr(E->getWeak());
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ if (MemTy->isPointerType()) {
+ // For pointer arithmetic, we're required to do a bit of math:
+ // adding 1 to an int* is not the same as adding 1 to a uintptr_t.
+ // ... but only for the C11 builtins. The GNU builtins expect the
+ // user to multiply by sizeof(T).
+ QualType Val1Ty = E->getVal1()->getType();
+ llvm::Value *Val1Scalar = EmitScalarExpr(E->getVal1());
+ CharUnits PointeeIncAmt =
+ getContext().getTypeSizeInChars(MemTy->getPointeeType());
+ Val1Scalar = Builder.CreateMul(Val1Scalar, CGM.getSize(PointeeIncAmt));
+ Val1 = CreateMemTemp(Val1Ty, ".atomictmp");
+ EmitStoreOfScalar(Val1Scalar, MakeAddrLValue(Val1, Val1Ty));
+ break;
+ }
+ // Fall through.
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_store_n:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ Val1 = EmitValToTemp(*this, E->getVal1());
+ break;
+ }
+
+ if (!E->getType()->isVoidType() && !Dest)
+ Dest = CreateMemTemp(E->getType(), ".atomicdst");
+
+ // Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
+ if (UseLibcall) {
+
+ llvm::SmallVector<QualType, 5> Params;
+ CallArgList Args;
+ // Size is always the first parameter
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+ // Atomic address is always the second parameter
+ Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
+ getContext().VoidPtrTy);
+
+ const char* LibCallName;
+ QualType RetTy = getContext().VoidTy;
+ switch (E->getOp()) {
+ // There is only one libcall for compare an exchange, because there is no
+ // optimisation benefit possible from a libcall version of a weak compare
+ // and exchange.
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure)
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ LibCallName = "__atomic_compare_exchange";
+ RetTy = getContext().BoolTy;
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+ Order = OrderFail;
+ break;
+ // void __atomic_exchange(size_t size, void *mem, void *val, void *return,
+ // int order)
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ case AtomicExpr::AO__atomic_exchange:
+ LibCallName = "__atomic_exchange";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_store(size_t size, void *mem, void *val, int order)
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ LibCallName = "__atomic_store";
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
+ getContext().VoidPtrTy);
+ break;
+ // void __atomic_load(size_t size, void *mem, void *return, int order)
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ LibCallName = "__atomic_load";
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ break;
+#if 0
+ // These are only defined for 1-16 byte integers. It is not clear what
+ // their semantics would be on anything else...
+ case AtomicExpr::Add: LibCallName = "__atomic_fetch_add_generic"; break;
+ case AtomicExpr::Sub: LibCallName = "__atomic_fetch_sub_generic"; break;
+ case AtomicExpr::And: LibCallName = "__atomic_fetch_and_generic"; break;
+ case AtomicExpr::Or: LibCallName = "__atomic_fetch_or_generic"; break;
+ case AtomicExpr::Xor: LibCallName = "__atomic_fetch_xor_generic"; break;
+#endif
+ default: return EmitUnsupportedRValue(E, "atomic library call");
+ }
+ // order is always the last parameter
+ Args.add(RValue::get(Order),
+ getContext().IntTy);
+
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionCall(RetTy, Args,
+ FunctionType::ExtInfo(), RequiredArgs::All);
+ llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
+ RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ if (E->isCmpXChg())
+ return Res;
+ if (E->getType()->isVoidType())
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), Dest);
+ }
+
+ llvm::Type *IPtrTy =
+ llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
+ llvm::Value *OrigDest = Dest;
+ Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
+ if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
+ if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
+ if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
+
+ if (isa<llvm::ConstantInt>(Order)) {
+ int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
+ switch (ord) {
+ case 0: // memory_order_relaxed
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Monotonic);
+ break;
+ case 1: // memory_order_consume
+ case 2: // memory_order_acquire
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Acquire);
+ break;
+ case 3: // memory_order_release
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Release);
+ break;
+ case 4: // memory_order_acq_rel
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::AcquireRelease);
+ break;
+ case 5: // memory_order_seq_cst
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::SequentiallyConsistent);
+ break;
+ default: // invalid order
+ // We should not ever get here normally, but it's hard to
+ // enforce that in general.
+ break;
+ }
+ if (E->getType()->isVoidType())
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), OrigDest);
+ }
+
+ // Long case, when Order isn't obviously constant.
+
+ bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store ||
+ E->getOp() == AtomicExpr::AO__atomic_store_n;
+ bool IsLoad = E->getOp() == AtomicExpr::AO__c11_atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load ||
+ E->getOp() == AtomicExpr::AO__atomic_load_n;
+
+ // Create all the relevant BB's
+ llvm::BasicBlock *MonotonicBB = 0, *AcquireBB = 0, *ReleaseBB = 0,
+ *AcqRelBB = 0, *SeqCstBB = 0;
+ MonotonicBB = createBasicBlock("monotonic", CurFn);
+ if (!IsStore)
+ AcquireBB = createBasicBlock("acquire", CurFn);
+ if (!IsLoad)
+ ReleaseBB = createBasicBlock("release", CurFn);
+ if (!IsLoad && !IsStore)
+ AcqRelBB = createBasicBlock("acqrel", CurFn);
+ SeqCstBB = createBasicBlock("seqcst", CurFn);
+ llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
+
+ // Create the switch for the split
+ // MonotonicBB is arbitrarily chosen as the default case; in practice, this
+ // doesn't matter unless someone is crazy enough to use something that
+ // doesn't fold to a constant for the ordering.
+ Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
+ llvm::SwitchInst *SI = Builder.CreateSwitch(Order, MonotonicBB);
+
+ // Emit all the different atomics
+ Builder.SetInsertPoint(MonotonicBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Monotonic);
+ Builder.CreateBr(ContBB);
+ if (!IsStore) {
+ Builder.SetInsertPoint(AcquireBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Acquire);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(1), AcquireBB);
+ SI->addCase(Builder.getInt32(2), AcquireBB);
+ }
+ if (!IsLoad) {
+ Builder.SetInsertPoint(ReleaseBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::Release);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(3), ReleaseBB);
+ }
+ if (!IsLoad && !IsStore) {
+ Builder.SetInsertPoint(AcqRelBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::AcquireRelease);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(4), AcqRelBB);
+ }
+ Builder.SetInsertPoint(SeqCstBB);
+ EmitAtomicOp(*this, E, Dest, Ptr, Val1, Val2, Size, Align,
+ llvm::SequentiallyConsistent);
+ Builder.CreateBr(ContBB);
+ SI->addCase(Builder.getInt32(5), SeqCstBB);
+
+ // Cleanup and return
+ Builder.SetInsertPoint(ContBB);
+ if (E->getType()->isVoidType())
+ return RValue::get(0);
+ return ConvertTempToRValue(*this, E->getType(), OrigDest);
+}
+
+void CodeGenFunction::SetFPAccuracy(llvm::Value *Val, float Accuracy) {
+ assert(Val->getType()->isFPOrFPVectorTy());
+ if (Accuracy == 0.0 || !isa<llvm::Instruction>(Val))
+ return;
+
+ llvm::Value *ULPs = llvm::ConstantFP::get(Builder.getFloatTy(), Accuracy);
+ llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(), ULPs);
+
+ cast<llvm::Instruction>(Val)->setMetadata(llvm::LLVMContext::MD_fpaccuracy,
+ Node);
+}
+
+namespace {
+ struct LValueOrRValue {
+ LValue LV;
+ RValue RV;
+ };
+}
+
+static LValueOrRValue emitPseudoObjectExpr(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E,
+ bool forLValue,
+ AggValueSlot slot) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression, if any.
+ const Expr *resultExpr = E->getResultExpr();
+ LValueOrRValue result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+
+ // If this is the result expression, we may need to evaluate
+ // directly into the slot.
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+ if (ov == resultExpr && ov->isRValue() && !forLValue &&
+ CodeGenFunction::hasAggregateLLVMType(ov->getType()) &&
+ !ov->getType()->isAnyComplexType()) {
+ CGF.EmitAggExpr(ov->getSourceExpr(), slot);
+
+ LValue LV = CGF.MakeAddrLValue(slot.getAddr(), ov->getType());
+ opaqueData = OVMA::bind(CGF, ov, LV);
+ result.RV = slot.asRValue();
+
+ // Otherwise, emit as normal.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+
+ // If this is the result, also evaluate the result now.
+ if (ov == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(ov);
+ else
+ result.RV = CGF.EmitAnyExpr(ov, slot);
+ }
+ }
+
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ if (forLValue)
+ result.LV = CGF.EmitLValue(semantic);
+ else
+ result.RV = CGF.EmitAnyExpr(semantic, slot);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+RValue CodeGenFunction::EmitPseudoObjectRValue(const PseudoObjectExpr *E,
+ AggValueSlot slot) {
+ return emitPseudoObjectExpr(*this, E, false, slot).RV;
+}
+
+LValue CodeGenFunction::EmitPseudoObjectLValue(const PseudoObjectExpr *E) {
+ return emitPseudoObjectExpr(*this, E, true, AggValueSlot::ignored()).LV;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
new file mode 100644
index 0000000..b6efc1c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprAgg.cpp
@@ -0,0 +1,1343 @@
+//===--- CGExprAgg.cpp - Emit LLVM Code from Aggregate Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Aggregate Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGObjCRuntime.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Aggregate Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ AggValueSlot Dest;
+ bool IgnoreResult;
+
+ /// We want to use 'dest' as the return slot except under two
+ /// conditions:
+ /// - The destination slot requires garbage collection, so we
+ /// need to use the GC API.
+ /// - The destination slot is potentially aliased.
+ bool shouldUseDestForReturnSlot() const {
+ return !(Dest.requiresGCollection() || Dest.isPotentiallyAliased());
+ }
+
+ ReturnValueSlot getReturnValueSlot() const {
+ if (!shouldUseDestForReturnSlot())
+ return ReturnValueSlot();
+
+ return ReturnValueSlot(Dest.getAddr(), Dest.isVolatile());
+ }
+
+ AggValueSlot EnsureSlot(QualType T) {
+ if (!Dest.isIgnored()) return Dest;
+ return CGF.CreateAggTemp(T, "agg.tmp.ensured");
+ }
+
+public:
+ AggExprEmitter(CodeGenFunction &cgf, AggValueSlot Dest,
+ bool ignore)
+ : CGF(cgf), Builder(CGF.Builder), Dest(Dest),
+ IgnoreResult(ignore) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ /// EmitAggLoadOfLValue - Given an expression with aggregate type that
+ /// represents a value lvalue, this method emits the address of the lvalue,
+ /// then loads the result into DestPtr.
+ void EmitAggLoadOfLValue(const Expr *E);
+
+ /// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+ void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
+ void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false,
+ unsigned Alignment = 0);
+
+ void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
+
+ void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList);
+ void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E);
+
+ AggValueSlot::NeedsGCBarriers_t needsGC(QualType T) {
+ if (CGF.getLangOpts().getGC() && TypeRequiresGCollection(T))
+ return AggValueSlot::NeedsGCBarriers;
+ return AggValueSlot::DoesNotNeedGCBarriers;
+ }
+
+ bool TypeRequiresGCollection(QualType T);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ void VisitStmt(Stmt *S) {
+ CGF.ErrorUnsupported(S, "aggregate expression");
+ }
+ void VisitParenExpr(ParenExpr *PE) { Visit(PE->getSubExpr()); }
+ void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ Visit(GE->getResultExpr());
+ }
+ void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
+ void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
+
+ // l-values.
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // For aggregates, we should always be able to emit the variable
+ // as an l-value unless it's a reference. This is due to the fact
+ // that we can't actually ever see a normal l2r conversion on an
+ // aggregate in C++, and in C there's no language standard
+ // actively preventing us from listing variables in the captures
+ // list of a block.
+ if (E->getDecl()->getType()->isReferenceType()) {
+ if (CodeGenFunction::ConstantEmission result
+ = CGF.tryEmitAsConstant(E)) {
+ EmitFinalDestCopy(E, result.getReferenceLValue(CGF, E));
+ return;
+ }
+ }
+
+ EmitAggLoadOfLValue(E);
+ }
+
+ void VisitMemberExpr(MemberExpr *ME) { EmitAggLoadOfLValue(ME); }
+ void VisitUnaryDeref(UnaryOperator *E) { EmitAggLoadOfLValue(E); }
+ void VisitStringLiteral(StringLiteral *E) { EmitAggLoadOfLValue(E); }
+ void VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ void VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+ void VisitPredefinedExpr(const PredefinedExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ // Operators.
+ void VisitCastExpr(CastExpr *E);
+ void VisitCallExpr(const CallExpr *E);
+ void VisitStmtExpr(const StmtExpr *E);
+ void VisitBinaryOperator(const BinaryOperator *BO);
+ void VisitPointerToDataMemberBinaryOperator(const BinaryOperator *BO);
+ void VisitBinAssign(const BinaryOperator *E);
+ void VisitBinComma(const BinaryOperator *E);
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *E);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ EmitAggLoadOfLValue(E);
+ }
+
+ void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
+ void VisitChooseExpr(const ChooseExpr *CE);
+ void VisitInitListExpr(InitListExpr *E);
+ void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ Visit(DAE->getExpr());
+ }
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
+ void VisitCXXConstructExpr(const CXXConstructExpr *E);
+ void VisitLambdaExpr(LambdaExpr *E);
+ void VisitExprWithCleanups(ExprWithCleanups *E);
+ void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
+ void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
+ void VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E);
+ void VisitOpaqueValueExpr(OpaqueValueExpr *E);
+
+ void VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ if (E->isGLValue()) {
+ LValue LV = CGF.EmitPseudoObjectLValue(E);
+ return EmitFinalDestCopy(E, LV);
+ }
+
+ CGF.EmitPseudoObjectRValue(E, EnsureSlot(E->getType()));
+ }
+
+ void VisitVAArgExpr(VAArgExpr *E);
+
+ void EmitInitializationToLValue(Expr *E, LValue Address);
+ void EmitNullInitializationToLValue(LValue Address);
+ // case Expr::ChooseExprClass:
+ void VisitCXXThrowExpr(const CXXThrowExpr *E) { CGF.EmitCXXThrowExpr(E); }
+ void VisitAtomicExpr(AtomicExpr *E) {
+ CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitAggLoadOfLValue - Given an expression with aggregate type that
+/// represents a value lvalue, this method emits the address of the lvalue,
+/// then loads the result into DestPtr.
+void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
+ LValue LV = CGF.EmitLValue(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+/// \brief True if the given aggregate type requires special GC API calls.
+bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
+ // Only record types have members that might require garbage collection.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy) return false;
+
+ // Don't mess with non-trivial C++ types.
+ RecordDecl *Record = RecordTy->getDecl();
+ if (isa<CXXRecordDecl>(Record) &&
+ (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
+ !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
+ return false;
+
+ // Check whether the type has an object member.
+ return Record->hasObjectMember();
+}
+
+/// \brief Perform the final move to DestPtr if for some reason
+/// getReturnValueSlot() didn't use it directly.
+///
+/// The idea is that you do something like this:
+/// RValue Result = EmitSomething(..., getReturnValueSlot());
+/// EmitMoveFromReturnSlot(E, Result);
+///
+/// If nothing interferes, this will cause the result to be emitted
+/// directly into the return value slot. Otherwise, a final move
+/// will be performed.
+void AggExprEmitter::EmitMoveFromReturnSlot(const Expr *E, RValue Src) {
+ if (shouldUseDestForReturnSlot()) {
+ // Logically, Dest.getAddr() should equal Src.getAggregateAddr().
+ // The possibility of undef rvalues complicates that a lot,
+ // though, so we can't really assert.
+ return;
+ }
+
+ // Otherwise, do a final copy,
+ assert(Dest.getAddr() != Src.getAggregateAddr());
+ EmitFinalDestCopy(E, Src, /*Ignore*/ true);
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore,
+ unsigned Alignment) {
+ assert(Src.isAggregate() && "value must be aggregate value!");
+
+ // If Dest is ignored, then we're evaluating an aggregate expression
+ // in a context (like an expression statement) that doesn't care
+ // about the result. C says that an lvalue-to-rvalue conversion is
+ // performed in these cases; C++ says that it is not. In either
+ // case, we don't actually need to do anything unless the value is
+ // volatile.
+ if (Dest.isIgnored()) {
+ if (!Src.isVolatileQualified() ||
+ CGF.CGM.getLangOpts().CPlusPlus ||
+ (IgnoreResult && Ignore))
+ return;
+
+ // If the source is volatile, we must read from it; to do that, we need
+ // some place to put it.
+ Dest = CGF.CreateAggTemp(E->getType(), "agg.tmp");
+ }
+
+ if (Dest.requiresGCollection()) {
+ CharUnits size = CGF.getContext().getTypeSizeInChars(E->getType());
+ llvm::Type *SizeTy = CGF.ConvertType(CGF.getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF,
+ Dest.getAddr(),
+ Src.getAggregateAddr(),
+ SizeVal);
+ return;
+ }
+ // If the result of the assignment is used, copy the LHS there also.
+ // FIXME: Pass VolatileDest as well. I think we also need to merge volatile
+ // from the source as well, as we can't eliminate it if either operand
+ // is volatile, unless copy has volatile for both source and destination..
+ CGF.EmitAggregateCopy(Dest.getAddr(), Src.getAggregateAddr(), E->getType(),
+ Dest.isVolatile()|Src.isVolatileQualified(),
+ Alignment);
+}
+
+/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
+void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
+ assert(Src.isSimple() && "Can't have aggregate bitfield, vector, etc");
+
+ CharUnits Alignment = std::min(Src.getAlignment(), Dest.getAlignment());
+ EmitFinalDestCopy(E, Src.asAggregateRValue(), Ignore, Alignment.getQuantity());
+}
+
+static QualType GetStdInitializerListElementType(QualType T) {
+ // Just assume that this is really std::initializer_list.
+ ClassTemplateSpecializationDecl *specialization =
+ cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl());
+ return specialization->getTemplateArgs()[0].getAsType();
+}
+
+/// \brief Prepare cleanup for the temporary array.
+static void EmitStdInitializerListCleanup(CodeGenFunction &CGF,
+ QualType arrayType,
+ llvm::Value *addr,
+ const InitListExpr *initList) {
+ QualType::DestructionKind dtorKind = arrayType.isDestructedType();
+ if (!dtorKind)
+ return; // Type doesn't need destroying.
+ if (dtorKind != QualType::DK_cxx_destructor) {
+ CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list");
+ return;
+ }
+
+ CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind);
+ CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer,
+ /*EHCleanup=*/true);
+}
+
+/// \brief Emit the initializer for a std::initializer_list initialized with a
+/// real initializer list.
+void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr,
+ InitListExpr *initList) {
+ // We emit an array containing the elements, then have the init list point
+ // at the array.
+ ASTContext &ctx = CGF.getContext();
+ unsigned numInits = initList->getNumInits();
+ QualType element = GetStdInitializerListElementType(initList->getType());
+ llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0);
+ llvm::Type *LTy = CGF.ConvertTypeForMem(array);
+ llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy);
+ alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity());
+ alloc->setName(".initlist.");
+
+ EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList);
+
+ // FIXME: The diagnostics are somewhat out of place here.
+ RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator field = record->field_begin();
+ if (field == record->field_end()) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+
+ QualType elementPtr = ctx.getPointerType(element.withConst());
+
+ // Start pointer.
+ if (!ctx.hasSameType(field->getType(), elementPtr)) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+ LValue start = CGF.EmitLValueForFieldInitialization(destPtr, *field, 0);
+ llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart");
+ CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start);
+ ++field;
+
+ if (field == record->field_end()) {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+ LValue endOrLength = CGF.EmitLValueForFieldInitialization(destPtr, *field, 0);
+ if (ctx.hasSameType(field->getType(), elementPtr)) {
+ // End pointer.
+ llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend");
+ CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
+ } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ // Length.
+ CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength);
+ } else {
+ CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ return;
+ }
+
+ if (!Dest.isExternallyDestructed())
+ EmitStdInitializerListCleanup(CGF, array, alloc, initList);
+}
+
+/// \brief Emit initialization of an array from an initializer list.
+void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
+ QualType elementType, InitListExpr *E) {
+ uint64_t NumInitElements = E->getNumInits();
+
+ uint64_t NumArrayElements = AType->getNumElements();
+ assert(NumInitElements <= NumArrayElements);
+
+ // DestPtr is an array*. Construct an elementType* by drilling
+ // down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = { zero, zero };
+ llvm::Value *begin =
+ Builder.CreateInBoundsGEP(DestPtr, indices, "arrayinit.begin");
+
+ // Exception safety requires us to destroy all the
+ // already-constructed members if an initializer throws.
+ // For that, we'll need an EH cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ llvm::AllocaInst *endOfInit = 0;
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ if (CGF.needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CGF.CreateTempAlloca(begin->getType(),
+ "arrayinit.endOfInit");
+ cleanupDominator = Builder.CreateStore(begin, endOfInit);
+ CGF.pushIrregularPartialArrayCleanup(begin, endOfInit, elementType,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+
+ // Otherwise, remember that we didn't need a cleanup.
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ llvm::Value *one = llvm::ConstantInt::get(CGF.SizeTy, 1);
+
+ // The 'current element to initialize'. The invariants on this
+ // variable are complicated. Essentially, after each iteration of
+ // the loop, it points to the last initialized element, except
+ // that it points to the beginning of the array before any
+ // elements have been initialized.
+ llvm::Value *element = begin;
+
+ // Emit the explicit initializers.
+ for (uint64_t i = 0; i != NumInitElements; ++i) {
+ // Advance to the next element.
+ if (i > 0) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.element");
+
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // If these are nested std::initializer_list inits, do them directly,
+ // because they are conceptually the same "location".
+ InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i));
+ if (initList && initList->initializesStdInitializerList()) {
+ EmitStdInitializerList(element, initList);
+ } else {
+ LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ EmitInitializationToLValue(E->getInit(i), elementLV);
+ }
+ }
+
+ // Check whether there's a non-trivial array-fill expression.
+ // Note that this will be a CXXConstructExpr even if the element
+ // type is an array (or array of array, etc.) of class type.
+ Expr *filler = E->getArrayFiller();
+ bool hasTrivialFiller = true;
+ if (CXXConstructExpr *cons = dyn_cast_or_null<CXXConstructExpr>(filler)) {
+ assert(cons->getConstructor()->isDefaultConstructor());
+ hasTrivialFiller = cons->getConstructor()->isTrivial();
+ }
+
+ // Any remaining elements need to be zero-initialized, possibly
+ // using the filler expression. We can skip this if the we're
+ // emitting to zeroed memory.
+ if (NumInitElements != NumArrayElements &&
+ !(Dest.isZeroed() && hasTrivialFiller &&
+ CGF.getTypes().isZeroInitializable(elementType))) {
+
+ // Use an actual loop. This is basically
+ // do { *array++ = filler; } while (array != end);
+
+ // Advance to the start of the rest of the array.
+ if (NumInitElements) {
+ element = Builder.CreateInBoundsGEP(element, one, "arrayinit.start");
+ if (endOfInit) Builder.CreateStore(element, endOfInit);
+ }
+
+ // Compute the end of the array.
+ llvm::Value *end = Builder.CreateInBoundsGEP(begin,
+ llvm::ConstantInt::get(CGF.SizeTy, NumArrayElements),
+ "arrayinit.end");
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *currentElement =
+ Builder.CreatePHI(element->getType(), 2, "arrayinit.cur");
+ currentElement->addIncoming(element, entryBB);
+
+ // Emit the actual filler expression.
+ LValue elementLV = CGF.MakeAddrLValue(currentElement, elementType);
+ if (filler)
+ EmitInitializationToLValue(filler, elementLV);
+ else
+ EmitNullInitializationToLValue(elementLV);
+
+ // Move on to the next element.
+ llvm::Value *nextElement =
+ Builder.CreateInBoundsGEP(currentElement, one, "arrayinit.next");
+
+ // Tell the EH cleanup that we finished with the last element.
+ if (endOfInit) Builder.CreateStore(nextElement, endOfInit);
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(nextElement, end,
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+ currentElement->addIncoming(nextElement, Builder.GetInsertBlock());
+
+ CGF.EmitBlock(endBB);
+ }
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind) CGF.DeactivateCleanupBlock(cleanup, cleanupDominator);
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+void AggExprEmitter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E){
+ Visit(E->GetTemporaryExpr());
+}
+
+void AggExprEmitter::VisitOpaqueValueExpr(OpaqueValueExpr *e) {
+ EmitFinalDestCopy(e, CGF.getOpaqueLValueMapping(e));
+}
+
+void
+AggExprEmitter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ if (E->getType().isPODType(CGF.getContext())) {
+ // For a POD type, just emit a load of the lvalue + a copy, because our
+ // compound literal might alias the destination.
+ // FIXME: This is a band-aid; the real problem appears to be in our handling
+ // of assignments, where we store directly into the LHS without checking
+ // whether anything in the RHS aliases.
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitAggExpr(E->getInitializer(), Slot);
+}
+
+
+void AggExprEmitter::VisitCastExpr(CastExpr *E) {
+ switch (E->getCastKind()) {
+ case CK_Dynamic: {
+ assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
+ LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
+ // FIXME: Do we also need to handle property references here?
+ if (LV.isSimple())
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
+ else
+ CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
+
+ if (!Dest.isIgnored())
+ CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
+ break;
+ }
+
+ case CK_ToUnion: {
+ if (Dest.isIgnored()) break;
+
+ // GCC union extension
+ QualType Ty = E->getSubExpr()->getType();
+ QualType PtrTy = CGF.getContext().getPointerType(Ty);
+ llvm::Value *CastPtr = Builder.CreateBitCast(Dest.getAddr(),
+ CGF.ConvertType(PtrTy));
+ EmitInitializationToLValue(E->getSubExpr(),
+ CGF.MakeAddrLValue(CastPtr, Ty));
+ break;
+ }
+
+ case CK_DerivedToBase:
+ case CK_BaseToDerived:
+ case CK_UncheckedDerivedToBase: {
+ llvm_unreachable("cannot perform hierarchy conversion in EmitAggExpr: "
+ "should have been unpacked before we got here");
+ }
+
+ case CK_LValueToRValue: // hope for downstream optimization
+ case CK_NoOp:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getSubExpr()->getType(),
+ E->getType()) &&
+ "Implicit cast types must be compatible");
+ Visit(E->getSubExpr());
+ break;
+
+ case CK_LValueBitCast:
+ llvm_unreachable("should not be emitting lvalue bitcast as rvalue");
+
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ llvm_unreachable("cast kind invalid for aggregate types");
+ }
+}
+
+void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType()) {
+ EmitAggLoadOfLValue(E);
+ return;
+ }
+
+ RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
+ EmitMoveFromReturnSlot(E, RV);
+}
+
+void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
+ EmitMoveFromReturnSlot(E, RV);
+}
+
+void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ Visit(E->getRHS());
+}
+
+void AggExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ CGF.EmitCompoundStmt(*E->getSubStmt(), true, Dest);
+}
+
+void AggExprEmitter::VisitBinaryOperator(const BinaryOperator *E) {
+ if (E->getOpcode() == BO_PtrMemD || E->getOpcode() == BO_PtrMemI)
+ VisitPointerToDataMemberBinaryOperator(E);
+ else
+ CGF.ErrorUnsupported(E, "aggregate binary expression");
+}
+
+void AggExprEmitter::VisitPointerToDataMemberBinaryOperator(
+ const BinaryOperator *E) {
+ LValue LV = CGF.EmitPointerToDataMemberBinaryExpr(E);
+ EmitFinalDestCopy(E, LV);
+}
+
+void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ // For an assignment to work, the value on the right has
+ // to be compatible with the value on the left.
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType())
+ && "Invalid assignment");
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->getLHS()))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasAttr<BlocksAttr>() &&
+ E->getRHS()->HasSideEffects(CGF.getContext())) {
+ // When __block variable on LHS, the RHS must be evaluated first
+ // as it may change the 'forwarding' field via call to Block_copy.
+ LValue RHS = CGF.EmitLValue(E->getRHS());
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+ Dest = AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
+ EmitFinalDestCopy(E, RHS, true);
+ return;
+ }
+
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Codegen the RHS so that it stores directly into the LHS.
+ AggValueSlot LHSSlot =
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
+ needsGC(E->getLHS()->getType()),
+ AggValueSlot::IsAliased);
+ CGF.EmitAggExpr(E->getRHS(), LHSSlot, false);
+ EmitFinalDestCopy(E, LHS, true);
+}
+
+void AggExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ // Save whether the destination's lifetime is externally managed.
+ bool isExternallyDestructed = Dest.isExternallyDestructed();
+
+ eval.begin(CGF);
+ CGF.EmitBlock(LHSBlock);
+ Visit(E->getTrueExpr());
+ eval.end(CGF);
+
+ assert(CGF.HaveInsertPoint() && "expression evaluation ended with no IP!");
+ CGF.Builder.CreateBr(ContBlock);
+
+ // If the result of an agg expression is unused, then the emission
+ // of the LHS might need to create a destination slot. That's fine
+ // with us, and we can safely emit the RHS into the same slot, but
+ // we shouldn't claim that it's already being destructed.
+ Dest.setExternallyDestructed(isExternallyDestructed);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ Visit(E->getFalseExpr());
+ eval.end(CGF);
+
+ CGF.EmitBlock(ContBlock);
+}
+
+void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
+ Visit(CE->getChosenSubExpr(CGF.getContext()));
+}
+
+void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(VE, "aggregate va_arg expression");
+ return;
+ }
+
+ EmitFinalDestCopy(VE, CGF.MakeAddrLValue(ArgPtr, VE->getType()));
+}
+
+void AggExprEmitter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ // Ensure that we have a slot, but if we already do, remember
+ // whether it was externally destructed.
+ bool wasExternallyDestructed = Dest.isExternallyDestructed();
+ Dest = EnsureSlot(E->getType());
+
+ // We're going to push a destructor if there isn't already one.
+ Dest.setExternallyDestructed();
+
+ Visit(E->getSubExpr());
+
+ // Push that destructor we promised.
+ if (!wasExternallyDestructed)
+ CGF.EmitCXXTemporary(E->getTemporary(), E->getType(), Dest.getAddr());
+}
+
+void
+AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitCXXConstructExpr(E, Slot);
+}
+
+void
+AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
+ AggValueSlot Slot = EnsureSlot(E->getType());
+ CGF.EmitLambdaExpr(E, Slot);
+}
+
+void AggExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+ Visit(E->getSubExpr());
+}
+
+void AggExprEmitter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
+}
+
+void AggExprEmitter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ QualType T = E->getType();
+ AggValueSlot Slot = EnsureSlot(T);
+ EmitNullInitializationToLValue(CGF.MakeAddrLValue(Slot.getAddr(), T));
+}
+
+/// isSimpleZero - If emitting this value will obviously just cause a store of
+/// zero to memory, return true. This can return false if uncertain, so it just
+/// handles simple cases.
+static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
+
+ // 0
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+ return IL->getValue() == 0;
+ // +0.0
+ if (const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(E))
+ return FL->getValue().isPosZero();
+ // int()
+ if ((isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ return true;
+ // (int*)0 - Null pointer expressions.
+ if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
+ return ICE->getCastKind() == CK_NullToPointer;
+ // '\0'
+ if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
+ return CL->getValue() == 0;
+
+ // Otherwise, hard case: conservatively return false.
+ return false;
+}
+
+
+void
+AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
+ QualType type = LV.getType();
+ // FIXME: Ignore result?
+ // FIXME: Are initializers affected by volatile?
+ if (Dest.isZeroed() && isSimpleZero(E, CGF)) {
+ // Storing "i32 0" to a zero'd memory location is a noop.
+ } else if (isa<ImplicitValueInitExpr>(E)) {
+ EmitNullInitializationToLValue(LV);
+ } else if (type->isReferenceType()) {
+ RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
+ CGF.EmitStoreThroughLValue(RV, LV);
+ } else if (type->isAnyComplexType()) {
+ CGF.EmitComplexExprIntoAddr(E, LV.getAddress(), false);
+ } else if (CGF.hasAggregateLLVMType(type)) {
+ CGF.EmitAggExpr(E, AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ Dest.isZeroed()));
+ } else if (LV.isSimple()) {
+ CGF.EmitScalarInit(E, /*D=*/0, LV, /*Captured=*/false);
+ } else {
+ CGF.EmitStoreThroughLValue(RValue::get(CGF.EmitScalarExpr(E)), LV);
+ }
+}
+
+void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
+ QualType type = lv.getType();
+
+ // If the destination slot is already zeroed out before the aggregate is
+ // copied into it, we don't have to emit any zeros here.
+ if (Dest.isZeroed() && CGF.getTypes().isZeroInitializable(type))
+ return;
+
+ if (!CGF.hasAggregateLLVMType(type)) {
+ // For non-aggregates, we can store zero.
+ llvm::Value *null = llvm::Constant::getNullValue(CGF.ConvertType(type));
+ // Note that the following is not equivalent to
+ // EmitStoreThroughBitfieldLValue for ARC types.
+ if (lv.isBitField()) {
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(null), lv);
+ } else {
+ assert(lv.isSimple());
+ CGF.EmitStoreOfScalar(null, lv, /* isInitialization */ true);
+ }
+ } else {
+ // There's a potential optimization opportunity in combining
+ // memsets; that would be easy for arrays, but relatively
+ // difficult for structures with the current code.
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
+ }
+}
+
+void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
+#if 0
+ // FIXME: Assess perf here? Figure out what cases are worth optimizing here
+ // (Length of globals? Chunks of zeroed-out space?).
+ //
+ // If we can, prefer a copy from a global; this is a lot less code for long
+ // globals, and it's easier for the current optimizers to analyze.
+ if (llvm::Constant* C = CGF.CGM.EmitConstantExpr(E, E->getType(), &CGF)) {
+ llvm::GlobalVariable* GV =
+ new llvm::GlobalVariable(CGF.CGM.getModule(), C->getType(), true,
+ llvm::GlobalValue::InternalLinkage, C, "");
+ EmitFinalDestCopy(E, CGF.MakeAddrLValue(GV, E->getType()));
+ return;
+ }
+#endif
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ if (E->initializesStdInitializerList()) {
+ EmitStdInitializerList(Dest.getAddr(), E);
+ return;
+ }
+
+ llvm::Value *DestPtr = EnsureSlot(E->getType()).getAddr();
+
+ // Handle initialization of an array.
+ if (E->getType()->isArrayType()) {
+ if (E->getNumInits() > 0) {
+ QualType T1 = E->getType();
+ QualType T2 = E->getInit(0)->getType();
+ if (CGF.getContext().hasSameUnqualifiedType(T1, T2)) {
+ EmitAggLoadOfLValue(E->getInit(0));
+ return;
+ }
+ }
+
+ QualType elementType =
+ CGF.getContext().getAsArrayType(E->getType())->getElementType();
+
+ llvm::PointerType *APType =
+ cast<llvm::PointerType>(DestPtr->getType());
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(APType->getElementType());
+
+ EmitArrayInit(DestPtr, AType, elementType, E);
+ return;
+ }
+
+ assert(E->getType()->isRecordType() && "Only support structs/unions here!");
+
+ // Do struct initialization; this code just sets each individual member
+ // to the approprate value. This makes bitfield support automatic;
+ // the disadvantage is that the generated code is more difficult for
+ // the optimizer, especially with bitfields.
+ unsigned NumInitElements = E->getNumInits();
+ RecordDecl *record = E->getType()->castAs<RecordType>()->getDecl();
+
+ if (record->isUnion()) {
+ // Only initialize one field of a union. The field itself is
+ // specified by the initializer list.
+ if (!E->getInitializedFieldInUnion()) {
+ // Empty union; we have nothing to do.
+
+#ifndef NDEBUG
+ // Make sure that it's really an empty and not a failure of
+ // semantic analysis.
+ for (RecordDecl::field_iterator Field = record->field_begin(),
+ FieldEnd = record->field_end();
+ Field != FieldEnd; ++Field)
+ assert(Field->isUnnamedBitfield() && "Only unnamed bitfields allowed");
+#endif
+ return;
+ }
+
+ // FIXME: volatility
+ FieldDecl *Field = E->getInitializedFieldInUnion();
+
+ LValue FieldLoc = CGF.EmitLValueForFieldInitialization(DestPtr, Field, 0);
+ if (NumInitElements) {
+ // Store the initializer into the field
+ EmitInitializationToLValue(E->getInit(0), FieldLoc);
+ } else {
+ // Default-initialize to null.
+ EmitNullInitializationToLValue(FieldLoc);
+ }
+
+ return;
+ }
+
+ // We'll need to enter cleanup scopes in case any of the member
+ // initializers throw an exception.
+ SmallVector<EHScopeStack::stable_iterator, 16> cleanups;
+ llvm::Instruction *cleanupDominator = 0;
+
+ // Here we iterate over the fields; this makes it simpler to both
+ // default-initialize fields and skip over unnamed fields.
+ unsigned curInitIndex = 0;
+ for (RecordDecl::field_iterator field = record->field_begin(),
+ fieldEnd = record->field_end();
+ field != fieldEnd; ++field) {
+ // We're done once we hit the flexible array member.
+ if (field->getType()->isIncompleteArrayType())
+ break;
+
+ // Always skip anonymous bitfields.
+ if (field->isUnnamedBitfield())
+ continue;
+
+ // We're done if we reach the end of the explicit initializers, we
+ // have a zeroed object, and the rest of the fields are
+ // zero-initializable.
+ if (curInitIndex == NumInitElements && Dest.isZeroed() &&
+ CGF.getTypes().isZeroInitializable(E->getType()))
+ break;
+
+ // FIXME: volatility
+ LValue LV = CGF.EmitLValueForFieldInitialization(DestPtr, *field, 0);
+ // We never generate write-barries for initialized fields.
+ LV.setNonGC(true);
+
+ if (curInitIndex < NumInitElements) {
+ // Store the initializer into the field.
+ EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
+ } else {
+ // We're out of initalizers; default-initialize to null
+ EmitNullInitializationToLValue(LV);
+ }
+
+ // Push a destructor if necessary.
+ // FIXME: if we have an array of structures, all explicitly
+ // initialized, we can end up pushing a linear number of cleanups.
+ bool pushedCleanup = false;
+ if (QualType::DestructionKind dtorKind
+ = field->getType().isDestructedType()) {
+ assert(LV.isSimple());
+ if (CGF.needsEHCleanup(dtorKind)) {
+ if (!cleanupDominator)
+ cleanupDominator = CGF.Builder.CreateUnreachable(); // placeholder
+
+ CGF.pushDestroy(EHCleanup, LV.getAddress(), field->getType(),
+ CGF.getDestroyer(dtorKind), false);
+ cleanups.push_back(CGF.EHStack.stable_begin());
+ pushedCleanup = true;
+ }
+ }
+
+ // If the GEP didn't get used because of a dead zero init or something
+ // else, clean it up for -O0 builds and general tidiness.
+ if (!pushedCleanup && LV.isSimple())
+ if (llvm::GetElementPtrInst *GEP =
+ dyn_cast<llvm::GetElementPtrInst>(LV.getAddress()))
+ if (GEP->use_empty())
+ GEP->eraseFromParent();
+ }
+
+ // Deactivate all the partial cleanups in reverse order, which
+ // generally means popping them.
+ for (unsigned i = cleanups.size(); i != 0; --i)
+ CGF.DeactivateCleanupBlock(cleanups[i-1], cleanupDominator);
+
+ // Destroy the placeholder if we made one.
+ if (cleanupDominator)
+ cleanupDominator->eraseFromParent();
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Points into this File
+//===----------------------------------------------------------------------===//
+
+/// GetNumNonZeroBytesInInit - Get an approximate count of the number of
+/// non-zero bytes that will be stored when outputting the initializer for the
+/// specified initializer expression.
+static CharUnits GetNumNonZeroBytesInInit(const Expr *E, CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
+
+ // 0 and 0.0 won't require any non-zero stores!
+ if (isSimpleZero(E, CGF)) return CharUnits::Zero();
+
+ // If this is an initlist expr, sum up the size of sizes of the (present)
+ // elements. If this is something weird, assume the whole thing is non-zero.
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(E);
+ if (ILE == 0 || !CGF.getTypes().isZeroInitializable(ILE->getType()))
+ return CGF.getContext().getTypeSizeInChars(E->getType());
+
+ // InitListExprs for structs have to be handled carefully. If there are
+ // reference members, we need to consider the size of the reference, not the
+ // referencee. InitListExprs for unions and arrays can't have references.
+ if (const RecordType *RT = E->getType()->getAs<RecordType>()) {
+ if (!RT->isUnionType()) {
+ RecordDecl *SD = E->getType()->getAs<RecordType>()->getDecl();
+ CharUnits NumNonZeroBytes = CharUnits::Zero();
+
+ unsigned ILEElement = 0;
+ for (RecordDecl::field_iterator Field = SD->field_begin(),
+ FieldEnd = SD->field_end(); Field != FieldEnd; ++Field) {
+ // We're done once we hit the flexible array member or run out of
+ // InitListExpr elements.
+ if (Field->getType()->isIncompleteArrayType() ||
+ ILEElement == ILE->getNumInits())
+ break;
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ const Expr *E = ILE->getInit(ILEElement++);
+
+ // Reference values are always non-null and have the width of a pointer.
+ if (Field->getType()->isReferenceType())
+ NumNonZeroBytes += CGF.getContext().toCharUnitsFromBits(
+ CGF.getContext().getTargetInfo().getPointerWidth(0));
+ else
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(E, CGF);
+ }
+
+ return NumNonZeroBytes;
+ }
+ }
+
+
+ CharUnits NumNonZeroBytes = CharUnits::Zero();
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i)
+ NumNonZeroBytes += GetNumNonZeroBytesInInit(ILE->getInit(i), CGF);
+ return NumNonZeroBytes;
+}
+
+/// CheckAggExprForMemSetUse - If the initializer is large and has a lot of
+/// zeros in it, emit a memset and avoid storing the individual zeros.
+///
+static void CheckAggExprForMemSetUse(AggValueSlot &Slot, const Expr *E,
+ CodeGenFunction &CGF) {
+ // If the slot is already known to be zeroed, nothing to do. Don't mess with
+ // volatile stores.
+ if (Slot.isZeroed() || Slot.isVolatile() || Slot.getAddr() == 0) return;
+
+ // C++ objects with a user-declared constructor don't need zero'ing.
+ if (CGF.getContext().getLangOpts().CPlusPlus)
+ if (const RecordType *RT = CGF.getContext()
+ .getBaseElementType(E->getType())->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasUserDeclaredConstructor())
+ return;
+ }
+
+ // If the type is 16-bytes or smaller, prefer individual stores over memset.
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ CGF.getContext().getTypeInfoInChars(E->getType());
+ if (TypeInfo.first <= CharUnits::fromQuantity(16))
+ return;
+
+ // Check to see if over 3/4 of the initializer are known to be zero. If so,
+ // we prefer to emit memset + individual stores for the rest.
+ CharUnits NumNonZeroBytes = GetNumNonZeroBytesInInit(E, CGF);
+ if (NumNonZeroBytes*4 > TypeInfo.first)
+ return;
+
+ // Okay, it seems like a good idea to use an initial memset, emit the call.
+ llvm::Constant *SizeVal = CGF.Builder.getInt64(TypeInfo.first.getQuantity());
+ CharUnits Align = TypeInfo.second;
+
+ llvm::Value *Loc = Slot.getAddr();
+
+ Loc = CGF.Builder.CreateBitCast(Loc, CGF.Int8PtrTy);
+ CGF.Builder.CreateMemSet(Loc, CGF.Builder.getInt8(0), SizeVal,
+ Align.getQuantity(), false);
+
+ // Tell the AggExprEmitter that the slot is known zero.
+ Slot.setZeroed();
+}
+
+
+
+
+/// EmitAggExpr - Emit the computation of the specified expression of aggregate
+/// type. The result is computed into DestPtr. Note that if DestPtr is null,
+/// the value of the aggregate expression is not needed. If VolatileDest is
+/// true, DestPtr cannot be 0.
+///
+/// \param IsInitializer - true if this evaluation is initializing an
+/// object whose lifetime is already being managed.
+void CodeGenFunction::EmitAggExpr(const Expr *E, AggValueSlot Slot,
+ bool IgnoreResult) {
+ assert(E && hasAggregateLLVMType(E->getType()) &&
+ "Invalid aggregate expression to emit");
+ assert((Slot.getAddr() != 0 || Slot.isIgnored()) &&
+ "slot has bits but no address");
+
+ // Optimize the slot if possible.
+ CheckAggExprForMemSetUse(Slot, E, *this);
+
+ AggExprEmitter(*this, Slot, IgnoreResult).Visit(const_cast<Expr*>(E));
+}
+
+LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
+ assert(hasAggregateLLVMType(E->getType()) && "Invalid argument!");
+ llvm::Value *Temp = CreateMemTemp(E->getType());
+ LValue LV = MakeAddrLValue(Temp, E->getType());
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ return LV;
+}
+
+void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
+ llvm::Value *SrcPtr, QualType Ty,
+ bool isVolatile, unsigned Alignment) {
+ assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
+
+ if (getContext().getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ assert((Record->hasTrivialCopyConstructor() ||
+ Record->hasTrivialCopyAssignment() ||
+ Record->hasTrivialMoveConstructor() ||
+ Record->hasTrivialMoveAssignment()) &&
+ "Trying to aggregate-copy a type without a trivial copy "
+ "constructor or assignment operator");
+ // Ignore empty classes in C++.
+ if (Record->isEmpty())
+ return;
+ }
+ }
+
+ // Aggregate assignment turns into llvm.memcpy. This is almost valid per
+ // C99 6.5.16.1p3, which states "If the value being stored in an object is
+ // read from another object that overlaps in anyway the storage of the first
+ // object, then the overlap shall be exact and the two objects shall have
+ // qualified or unqualified versions of a compatible type."
+ //
+ // memcpy is not defined if the source and destination pointers are exactly
+ // equal, but other compilers do this optimization, and almost every memcpy
+ // implementation handles this case safely. If there is a libc that does not
+ // safely handle this, we can add a target hook.
+
+ // Get size and alignment info for this aggregate.
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ getContext().getTypeInfoInChars(Ty);
+
+ if (!Alignment)
+ Alignment = TypeInfo.second.getQuantity();
+
+ // FIXME: Handle variable sized types.
+
+ // FIXME: If we have a volatile struct, the optimizer can remove what might
+ // appear to be `extra' memory ops:
+ //
+ // volatile struct { int i; } a, b;
+ //
+ // int main() {
+ // a = b;
+ // a = b;
+ // }
+ //
+ // we need to use a different call here. We use isVolatile to indicate when
+ // either the source or the destination is volatile.
+
+ llvm::PointerType *DPT = cast<llvm::PointerType>(DestPtr->getType());
+ llvm::Type *DBP =
+ llvm::Type::getInt8PtrTy(getLLVMContext(), DPT->getAddressSpace());
+ DestPtr = Builder.CreateBitCast(DestPtr, DBP);
+
+ llvm::PointerType *SPT = cast<llvm::PointerType>(SrcPtr->getType());
+ llvm::Type *SBP =
+ llvm::Type::getInt8PtrTy(getLLVMContext(), SPT->getAddressSpace());
+ SrcPtr = Builder.CreateBitCast(SrcPtr, SBP);
+
+ // Don't do any of the memmove_collectable tests if GC isn't set.
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ // fall through
+ } else if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ RecordDecl *Record = RecordTy->getDecl();
+ if (Record->hasObjectMember()) {
+ CharUnits size = TypeInfo.first;
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal = llvm::ConstantInt::get(SizeTy, size.getQuantity());
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ } else if (Ty->isArrayType()) {
+ QualType BaseType = getContext().getBaseElementType(Ty);
+ if (const RecordType *RecordTy = BaseType->getAs<RecordType>()) {
+ if (RecordTy->getDecl()->hasObjectMember()) {
+ CharUnits size = TypeInfo.first;
+ llvm::Type *SizeTy = ConvertType(getContext().getSizeType());
+ llvm::Value *SizeVal =
+ llvm::ConstantInt::get(SizeTy, size.getQuantity());
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, DestPtr, SrcPtr,
+ SizeVal);
+ return;
+ }
+ }
+ }
+
+ Builder.CreateMemCpy(DestPtr, SrcPtr,
+ llvm::ConstantInt::get(IntPtrTy,
+ TypeInfo.first.getQuantity()),
+ Alignment, isVolatile);
+}
+
+void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
+ const Expr *init) {
+ const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init);
+ if (cleanups)
+ init = cleanups->getSubExpr();
+
+ if (isa<InitListExpr>(init) &&
+ cast<InitListExpr>(init)->initializesStdInitializerList()) {
+ // We initialized this std::initializer_list with an initializer list.
+ // A backing array was created. Push a cleanup for it.
+ EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init));
+ }
+}
+
+static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF,
+ llvm::Value *arrayStart,
+ const InitListExpr *init) {
+ // Check if there are any recursive cleanups to do, i.e. if we have
+ // std::initializer_list<std::initializer_list<obj>> list = {{obj()}};
+ // then we need to destroy the inner array as well.
+ for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) {
+ const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i));
+ if (!subInit || !subInit->initializesStdInitializerList())
+ continue;
+
+ // This one needs to be destroyed. Get the address of the std::init_list.
+ llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i);
+ llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset,
+ "std.initlist");
+ CGF.EmitStdInitializerListCleanup(loc, subInit);
+ }
+}
+
+void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc,
+ const InitListExpr *init) {
+ ASTContext &ctx = getContext();
+ QualType element = GetStdInitializerListElementType(init->getType());
+ unsigned numInits = init->getNumInits();
+ llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0);
+ QualType arrayPtr = ctx.getPointerType(array);
+ llvm::Type *arrayPtrType = ConvertType(arrayPtr);
+
+ // lvalue is the location of a std::initializer_list, which as its first
+ // element has a pointer to the array we want to destroy.
+ llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer");
+ llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress");
+
+ ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init);
+
+ llvm::Value *arrayAddress =
+ Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress");
+ ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
new file mode 100644
index 0000000..d3ba770
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprCXX.cpp
@@ -0,0 +1,1830 @@
+//===--- CGExprCXX.cpp - Emit LLVM Code for C++ expressions ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with code generation of C++ expressions
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CodeGenOptions.h"
+#include "CodeGenFunction.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGDebugInfo.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+
+ CallArgList Args;
+
+ // Push the this ptr.
+ Args.add(RValue::get(This), MD->getThisType(getContext()));
+
+ // If there is a VTT parameter, emit it.
+ if (VTT) {
+ QualType T = getContext().getPointerType(getContext().VoidPtrTy);
+ Args.add(RValue::get(VTT), T);
+ }
+
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
+
+ // And the rest of the call args.
+ EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+
+ return EmitCall(CGM.getTypes().arrangeFunctionCall(FPT->getResultType(), Args,
+ FPT->getExtInfo(),
+ required),
+ Callee, ReturnValue, Args, MD);
+}
+
+static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
+ const Expr *E = Base;
+
+ while (true) {
+ E = E->IgnoreParens();
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_DerivedToBase ||
+ CE->getCastKind() == CK_UncheckedDerivedToBase ||
+ CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ QualType DerivedType = E->getType();
+ if (const PointerType *PTy = DerivedType->getAs<PointerType>())
+ DerivedType = PTy->getPointeeType();
+
+ return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
+}
+
+// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
+// quite what we want.
+static const Expr *skipNoOpCastsAndParens(const Expr *E) {
+ while (true) {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ }
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (UO->getOpcode() == UO_Extension) {
+ E = UO->getSubExpr();
+ continue;
+ }
+ }
+ return E;
+ }
+}
+
+/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
+/// expr can be devirtualized.
+static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
+ const Expr *Base,
+ const CXXMethodDecl *MD) {
+
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (Context.getLangOpts().AppleKext)
+ return false;
+
+ // If the most derived class is marked final, we know that no subclass can
+ // override this member function and so we can devirtualize it. For example:
+ //
+ // struct A { virtual void f(); }
+ // struct B final : A { };
+ //
+ // void f(B *b) {
+ // b->f();
+ // }
+ //
+ const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ if (MostDerivedClassDecl->hasAttr<FinalAttr>())
+ return true;
+
+ // If the member function is marked 'final', we know that it can't be
+ // overridden and can therefore devirtualize it.
+ if (MD->hasAttr<FinalAttr>())
+ return true;
+
+ // Similarly, if the class itself is marked 'final' it can't be overridden
+ // and we can therefore devirtualize the member function call.
+ if (MD->getParent()->hasAttr<FinalAttr>())
+ return true;
+
+ Base = skipNoOpCastsAndParens(Base);
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // This is a record decl. We know the type and can devirtualize it.
+ return VD->getType()->isRecordType();
+ }
+
+ return false;
+ }
+
+ // We can always devirtualize calls on temporary object expressions.
+ if (isa<CXXConstructExpr>(Base))
+ return true;
+
+ // And calls on bound temporaries.
+ if (isa<CXXBindTemporaryExpr>(Base))
+ return true;
+
+ // Check if this is a call expr that returns a record type.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
+ return CE->getCallReturnType()->isRecordType();
+
+ // We can't devirtualize the call.
+ return false;
+}
+
+// Note: This function also emit constructor calls to support a MSVC
+// extensions allowing explicit constructor function call.
+RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
+ ReturnValueSlot ReturnValue) {
+ const Expr *callee = CE->getCallee()->IgnoreParens();
+
+ if (isa<BinaryOperator>(callee))
+ return EmitCXXMemberPointerCallExpr(CE, ReturnValue);
+
+ const MemberExpr *ME = cast<MemberExpr>(callee);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI && CGM.getCodeGenOpts().LimitDebugInfo
+ && !isa<CallExpr>(ME->getBase())) {
+ QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
+ if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
+ MD->getParent()->getLocation());
+ }
+ }
+
+ if (MD->isStatic()) {
+ // The method is static, emit it as we would a regular call.
+ llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), Callee,
+ ReturnValue, CE->arg_begin(), CE->arg_end());
+ }
+
+ // Compute the object pointer.
+ llvm::Value *This;
+ if (ME->isArrow())
+ This = EmitScalarExpr(ME->getBase());
+ else
+ This = EmitLValue(ME->getBase()).getAddress();
+
+ if (MD->isTrivial()) {
+ if (isa<CXXDestructorDecl>(MD)) return RValue::get(0);
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
+ return RValue::get(0);
+
+ if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
+ // We don't like to generate the trivial copy/move assignment operator
+ // when it isn't necessary; just produce the proper effect here.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
+ // Trivial move and copy ctor are the same.
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
+ CE->arg_begin(), CE->arg_end());
+ return RValue::get(This);
+ }
+ llvm_unreachable("unknown trivial member function");
+ }
+
+ // Compute the function type we're calling.
+ const CGFunctionInfo *FInfo = 0;
+ if (isa<CXXDestructorDecl>(MD))
+ FInfo = &CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
+ Dtor_Complete);
+ else if (isa<CXXConstructorDecl>(MD))
+ FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(
+ cast<CXXConstructorDecl>(MD),
+ Ctor_Complete);
+ else
+ FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(MD);
+
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
+
+ // C++ [class.virtual]p12:
+ // Explicit qualification with the scope operator (5.1) suppresses the
+ // virtual call mechanism.
+ //
+ // We also don't emit a virtual call if the base expression has a record type
+ // because then we know what the type is.
+ bool UseVirtualCall;
+ UseVirtualCall = MD->isVirtual() && !ME->hasQualifier()
+ && !canDevirtualizeMemberFunctionCalls(getContext(),
+ ME->getBase(), MD);
+ llvm::Value *Callee;
+ if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (UseVirtualCall) {
+ Callee = BuildVirtualCall(Dtor, Dtor_Complete, This, Ty);
+ } else {
+ if (getContext().getLangOpts().AppleKext &&
+ MD->isVirtual() &&
+ ME->hasQualifier())
+ Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
+ }
+ } else if (const CXXConstructorDecl *Ctor =
+ dyn_cast<CXXConstructorDecl>(MD)) {
+ Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
+ } else if (UseVirtualCall) {
+ Callee = BuildVirtualCall(MD, This, Ty);
+ } else {
+ if (getContext().getLangOpts().AppleKext &&
+ MD->isVirtual() &&
+ ME->hasQualifier())
+ Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ else
+ Callee = CGM.GetAddrOfFunction(MD, Ty);
+ }
+
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ CE->arg_begin(), CE->arg_end());
+}
+
+RValue
+CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ const BinaryOperator *BO =
+ cast<BinaryOperator>(E->getCallee()->IgnoreParens());
+ const Expr *BaseExpr = BO->getLHS();
+ const Expr *MemFnExpr = BO->getRHS();
+
+ const MemberPointerType *MPT =
+ MemFnExpr->getType()->castAs<MemberPointerType>();
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->castAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
+
+ // Emit the 'this' pointer.
+ llvm::Value *This;
+
+ if (BO->getOpcode() == BO_PtrMemI)
+ This = EmitScalarExpr(BaseExpr);
+ else
+ This = EmitLValue(BaseExpr).getAddress();
+
+ // Ask the ABI to load the callee. Note that This is modified.
+ llvm::Value *Callee =
+ CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, This, MemFnPtr, MPT);
+
+ CallArgList Args;
+
+ QualType ThisType =
+ getContext().getPointerType(getContext().getTagDeclType(RD));
+
+ // Push the this ptr.
+ Args.add(RValue::get(This), ThisType);
+
+ // And the rest of the call args
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+ return EmitCall(CGM.getTypes().arrangeFunctionCall(Args, FPT), Callee,
+ ReturnValue, Args);
+}
+
+RValue
+CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue) {
+ assert(MD->isInstance() &&
+ "Trying to emit a member call expr on a static method!");
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This = LV.getAddress();
+
+ if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
+ MD->isTrivial()) {
+ llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
+ QualType Ty = E->getType();
+ EmitAggregateCopy(This, Src, Ty);
+ return RValue::get(This);
+ }
+
+ llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
+ return EmitCXXMemberCall(MD, Callee, ReturnValue, This, /*VTT=*/0,
+ E->arg_begin() + 1, E->arg_end());
+}
+
+RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ return CGM.getCUDARuntime().EmitCUDAKernelCallExpr(*this, E, ReturnValue);
+}
+
+static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ const CXXRecordDecl *Base) {
+ if (Base->isEmpty())
+ return;
+
+ DestPtr = CGF.EmitCastToVoidPtr(DestPtr);
+
+ const ASTRecordLayout &Layout = CGF.getContext().getASTRecordLayout(Base);
+ CharUnits Size = Layout.getNonVirtualSize();
+ CharUnits Align = Layout.getNonVirtualAlign();
+
+ llvm::Value *SizeVal = CGF.CGM.getSize(Size);
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
+ // TODO: isZeroInitializable can be over-conservative in the case where a
+ // virtual base contains a member pointer.
+ if (!CGF.CGM.getTypes().isZeroInitializable(Base)) {
+ llvm::Constant *NullConstant = CGF.CGM.EmitNullConstantForBase(Base);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGF.CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, Twine());
+ NullVariable->setAlignment(Align.getQuantity());
+ llvm::Value *SrcPtr = CGF.EmitCastToVoidPtr(NullVariable);
+
+ // Get and call the appropriate llvm.memcpy overload.
+ CGF.Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity());
+ return;
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+ CGF.Builder.CreateMemSet(DestPtr, CGF.Builder.getInt8(0), SizeVal,
+ Align.getQuantity());
+}
+
+void
+CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
+ AggValueSlot Dest) {
+ assert(!Dest.isIgnored() && "Must have a destination!");
+ const CXXConstructorDecl *CD = E->getConstructor();
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now, unless destination is
+ // already zeroed.
+ if (E->requiresZeroInitialization() && !Dest.isZeroed()) {
+ switch (E->getConstructionKind()) {
+ case CXXConstructExpr::CK_Delegating:
+ assert(0 && "Delegating constructor should not need zeroing");
+ case CXXConstructExpr::CK_Complete:
+ EmitNullInitialization(Dest.getAddr(), E->getType());
+ break;
+ case CXXConstructExpr::CK_VirtualBase:
+ case CXXConstructExpr::CK_NonVirtualBase:
+ EmitNullBaseClassInitialization(*this, Dest.getAddr(), CD->getParent());
+ break;
+ }
+ }
+
+ // If this is a call to a trivial default constructor, do nothing.
+ if (CD->isTrivial() && CD->isDefaultConstructor())
+ return;
+
+ // Elide the constructor if we're constructing from a temporary.
+ // The temporary check is required because Sema sets this on NRVO
+ // returns.
+ if (getContext().getLangOpts().ElideConstructors && E->isElidable()) {
+ assert(getContext().hasSameUnqualifiedType(E->getType(),
+ E->getArg(0)->getType()));
+ if (E->getArg(0)->isTemporaryObject(getContext(), CD->getParent())) {
+ EmitAggExpr(E->getArg(0), Dest);
+ return;
+ }
+ }
+
+ if (const ConstantArrayType *arrayType
+ = getContext().getAsConstantArrayType(E->getType())) {
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
+ E->arg_begin(), E->arg_end());
+ } else {
+ CXXCtorType Type = Ctor_Complete;
+ bool ForVirtualBase = false;
+
+ switch (E->getConstructionKind()) {
+ case CXXConstructExpr::CK_Delegating:
+ // We should be emitting a constructor; GlobalDecl will assert this
+ Type = CurGD.getCtorType();
+ break;
+
+ case CXXConstructExpr::CK_Complete:
+ Type = Ctor_Complete;
+ break;
+
+ case CXXConstructExpr::CK_VirtualBase:
+ ForVirtualBase = true;
+ // fall-through
+
+ case CXXConstructExpr::CK_NonVirtualBase:
+ Type = Ctor_Base;
+ }
+
+ // Call the constructor.
+ EmitCXXConstructorCall(CD, Type, ForVirtualBase, Dest.getAddr(),
+ E->arg_begin(), E->arg_end());
+ }
+}
+
+void
+CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
+ llvm::Value *Src,
+ const Expr *Exp) {
+ if (const ExprWithCleanups *E = dyn_cast<ExprWithCleanups>(Exp))
+ Exp = E->getSubExpr();
+ assert(isa<CXXConstructExpr>(Exp) &&
+ "EmitSynthesizedCXXCopyCtor - unknown copy ctor expr");
+ const CXXConstructExpr* E = cast<CXXConstructExpr>(Exp);
+ const CXXConstructorDecl *CD = E->getConstructor();
+ RunCleanupsScope Scope(*this);
+
+ // If we require zero initialization before (or instead of) calling the
+ // constructor, as can be the case with a non-user-provided default
+ // constructor, emit the zero initialization now.
+ // FIXME. Do I still need this for a copy ctor synthesis?
+ if (E->requiresZeroInitialization())
+ EmitNullInitialization(Dest, E->getType());
+
+ assert(!getContext().getAsConstantArrayType(E->getType())
+ && "EmitSynthesizedCXXCopyCtor - Copied-in Array");
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
+ E->arg_begin(), E->arg_end());
+}
+
+static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
+ const CXXNewExpr *E) {
+ if (!E->isArray())
+ return CharUnits::Zero();
+
+ // No cookie is required if the operator new[] being used is the
+ // reserved placement operator new[].
+ if (E->getOperatorNew()->isReservedGlobalPlacementOperator())
+ return CharUnits::Zero();
+
+ return CGF.CGM.getCXXABI().GetArrayCookieSize(E);
+}
+
+static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
+ const CXXNewExpr *e,
+ unsigned minElements,
+ llvm::Value *&numElements,
+ llvm::Value *&sizeWithoutCookie) {
+ QualType type = e->getAllocatedType();
+
+ if (!e->isArray()) {
+ CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
+ sizeWithoutCookie
+ = llvm::ConstantInt::get(CGF.SizeTy, typeSize.getQuantity());
+ return sizeWithoutCookie;
+ }
+
+ // The width of size_t.
+ unsigned sizeWidth = CGF.SizeTy->getBitWidth();
+
+ // Figure out the cookie size.
+ llvm::APInt cookieSize(sizeWidth,
+ CalculateCookiePadding(CGF, e).getQuantity());
+
+ // Emit the array size expression.
+ // We multiply the size of all dimensions for NumElements.
+ // e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
+ numElements = CGF.EmitScalarExpr(e->getArraySize());
+ assert(isa<llvm::IntegerType>(numElements->getType()));
+
+ // The number of elements can be have an arbitrary integer type;
+ // essentially, we need to multiply it by a constant factor, add a
+ // cookie size, and verify that the result is representable as a
+ // size_t. That's just a gloss, though, and it's wrong in one
+ // important way: if the count is negative, it's an error even if
+ // the cookie size would bring the total size >= 0.
+ bool isSigned
+ = e->getArraySize()->getType()->isSignedIntegerOrEnumerationType();
+ llvm::IntegerType *numElementsType
+ = cast<llvm::IntegerType>(numElements->getType());
+ unsigned numElementsWidth = numElementsType->getBitWidth();
+
+ // Compute the constant factor.
+ llvm::APInt arraySizeMultiplier(sizeWidth, 1);
+ while (const ConstantArrayType *CAT
+ = CGF.getContext().getAsConstantArrayType(type)) {
+ type = CAT->getElementType();
+ arraySizeMultiplier *= CAT->getSize();
+ }
+
+ CharUnits typeSize = CGF.getContext().getTypeSizeInChars(type);
+ llvm::APInt typeSizeMultiplier(sizeWidth, typeSize.getQuantity());
+ typeSizeMultiplier *= arraySizeMultiplier;
+
+ // This will be a size_t.
+ llvm::Value *size;
+
+ // If someone is doing 'new int[42]' there is no need to do a dynamic check.
+ // Don't bloat the -O0 code.
+ if (llvm::ConstantInt *numElementsC =
+ dyn_cast<llvm::ConstantInt>(numElements)) {
+ const llvm::APInt &count = numElementsC->getValue();
+
+ bool hasAnyOverflow = false;
+
+ // If 'count' was a negative number, it's an overflow.
+ if (isSigned && count.isNegative())
+ hasAnyOverflow = true;
+
+ // We want to do all this arithmetic in size_t. If numElements is
+ // wider than that, check whether it's already too big, and if so,
+ // overflow.
+ else if (numElementsWidth > sizeWidth &&
+ numElementsWidth - sizeWidth > count.countLeadingZeros())
+ hasAnyOverflow = true;
+
+ // Okay, compute a count at the right width.
+ llvm::APInt adjustedCount = count.zextOrTrunc(sizeWidth);
+
+ // If there is a brace-initializer, we cannot allocate fewer elements than
+ // there are initializers. If we do, that's treated like an overflow.
+ if (adjustedCount.ult(minElements))
+ hasAnyOverflow = true;
+
+ // Scale numElements by that. This might overflow, but we don't
+ // care because it only overflows if allocationSize does, too, and
+ // if that overflows then we shouldn't use this.
+ numElements = llvm::ConstantInt::get(CGF.SizeTy,
+ adjustedCount * arraySizeMultiplier);
+
+ // Compute the size before cookie, and track whether it overflowed.
+ bool overflow;
+ llvm::APInt allocationSize
+ = adjustedCount.umul_ov(typeSizeMultiplier, overflow);
+ hasAnyOverflow |= overflow;
+
+ // Add in the cookie, and check whether it's overflowed.
+ if (cookieSize != 0) {
+ // Save the current size without a cookie. This shouldn't be
+ // used if there was overflow.
+ sizeWithoutCookie = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
+
+ allocationSize = allocationSize.uadd_ov(cookieSize, overflow);
+ hasAnyOverflow |= overflow;
+ }
+
+ // On overflow, produce a -1 so operator new will fail.
+ if (hasAnyOverflow) {
+ size = llvm::Constant::getAllOnesValue(CGF.SizeTy);
+ } else {
+ size = llvm::ConstantInt::get(CGF.SizeTy, allocationSize);
+ }
+
+ // Otherwise, we might need to use the overflow intrinsics.
+ } else {
+ // There are up to five conditions we need to test for:
+ // 1) if isSigned, we need to check whether numElements is negative;
+ // 2) if numElementsWidth > sizeWidth, we need to check whether
+ // numElements is larger than something representable in size_t;
+ // 3) if minElements > 0, we need to check whether numElements is smaller
+ // than that.
+ // 4) we need to compute
+ // sizeWithoutCookie := numElements * typeSizeMultiplier
+ // and check whether it overflows; and
+ // 5) if we need a cookie, we need to compute
+ // size := sizeWithoutCookie + cookieSize
+ // and check whether it overflows.
+
+ llvm::Value *hasOverflow = 0;
+
+ // If numElementsWidth > sizeWidth, then one way or another, we're
+ // going to have to do a comparison for (2), and this happens to
+ // take care of (1), too.
+ if (numElementsWidth > sizeWidth) {
+ llvm::APInt threshold(numElementsWidth, 1);
+ threshold <<= sizeWidth;
+
+ llvm::Value *thresholdV
+ = llvm::ConstantInt::get(numElementsType, threshold);
+
+ hasOverflow = CGF.Builder.CreateICmpUGE(numElements, thresholdV);
+ numElements = CGF.Builder.CreateTrunc(numElements, CGF.SizeTy);
+
+ // Otherwise, if we're signed, we want to sext up to size_t.
+ } else if (isSigned) {
+ if (numElementsWidth < sizeWidth)
+ numElements = CGF.Builder.CreateSExt(numElements, CGF.SizeTy);
+
+ // If there's a non-1 type size multiplier, then we can do the
+ // signedness check at the same time as we do the multiply
+ // because a negative number times anything will cause an
+ // unsigned overflow. Otherwise, we have to do it here. But at least
+ // in this case, we can subsume the >= minElements check.
+ if (typeSizeMultiplier == 1)
+ hasOverflow = CGF.Builder.CreateICmpSLT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
+
+ // Otherwise, zext up to size_t if necessary.
+ } else if (numElementsWidth < sizeWidth) {
+ numElements = CGF.Builder.CreateZExt(numElements, CGF.SizeTy);
+ }
+
+ assert(numElements->getType() == CGF.SizeTy);
+
+ if (minElements) {
+ // Don't allow allocation of fewer elements than we have initializers.
+ if (!hasOverflow) {
+ hasOverflow = CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements));
+ } else if (numElementsWidth > sizeWidth) {
+ // The other existing overflow subsumes this check.
+ // We do an unsigned comparison, since any signed value < -1 is
+ // taken care of either above or below.
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow,
+ CGF.Builder.CreateICmpULT(numElements,
+ llvm::ConstantInt::get(CGF.SizeTy, minElements)));
+ }
+ }
+
+ size = numElements;
+
+ // Multiply by the type size if necessary. This multiplier
+ // includes all the factors for nested arrays.
+ //
+ // This step also causes numElements to be scaled up by the
+ // nested-array factor if necessary. Overflow on this computation
+ // can be ignored because the result shouldn't be used if
+ // allocation fails.
+ if (typeSizeMultiplier != 1) {
+ llvm::Value *umul_with_overflow
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::umul_with_overflow, CGF.SizeTy);
+
+ llvm::Value *tsmV =
+ llvm::ConstantInt::get(CGF.SizeTy, typeSizeMultiplier);
+ llvm::Value *result =
+ CGF.Builder.CreateCall2(umul_with_overflow, size, tsmV);
+
+ llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
+ if (hasOverflow)
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
+ else
+ hasOverflow = overflowed;
+
+ size = CGF.Builder.CreateExtractValue(result, 0);
+
+ // Also scale up numElements by the array size multiplier.
+ if (arraySizeMultiplier != 1) {
+ // If the base element type size is 1, then we can re-use the
+ // multiply we just did.
+ if (typeSize.isOne()) {
+ assert(arraySizeMultiplier == typeSizeMultiplier);
+ numElements = size;
+
+ // Otherwise we need a separate multiply.
+ } else {
+ llvm::Value *asmV =
+ llvm::ConstantInt::get(CGF.SizeTy, arraySizeMultiplier);
+ numElements = CGF.Builder.CreateMul(numElements, asmV);
+ }
+ }
+ } else {
+ // numElements doesn't need to be scaled.
+ assert(arraySizeMultiplier == 1);
+ }
+
+ // Add in the cookie size if necessary.
+ if (cookieSize != 0) {
+ sizeWithoutCookie = size;
+
+ llvm::Value *uadd_with_overflow
+ = CGF.CGM.getIntrinsic(llvm::Intrinsic::uadd_with_overflow, CGF.SizeTy);
+
+ llvm::Value *cookieSizeV = llvm::ConstantInt::get(CGF.SizeTy, cookieSize);
+ llvm::Value *result =
+ CGF.Builder.CreateCall2(uadd_with_overflow, size, cookieSizeV);
+
+ llvm::Value *overflowed = CGF.Builder.CreateExtractValue(result, 1);
+ if (hasOverflow)
+ hasOverflow = CGF.Builder.CreateOr(hasOverflow, overflowed);
+ else
+ hasOverflow = overflowed;
+
+ size = CGF.Builder.CreateExtractValue(result, 0);
+ }
+
+ // If we had any possibility of dynamic overflow, make a select to
+ // overwrite 'size' with an all-ones value, which should cause
+ // operator new to throw.
+ if (hasOverflow)
+ size = CGF.Builder.CreateSelect(hasOverflow,
+ llvm::Constant::getAllOnesValue(CGF.SizeTy),
+ size);
+ }
+
+ if (cookieSize == 0)
+ sizeWithoutCookie = size;
+ else
+ assert(sizeWithoutCookie && "didn't set sizeWithoutCookie?");
+
+ return size;
+}
+
+static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
+ QualType AllocType, llvm::Value *NewPtr) {
+
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
+ if (!CGF.hasAggregateLLVMType(AllocType))
+ CGF.EmitScalarInit(Init, 0, CGF.MakeAddrLValue(NewPtr, AllocType,
+ Alignment),
+ false);
+ else if (AllocType->isAnyComplexType())
+ CGF.EmitComplexExprIntoAddr(Init, NewPtr,
+ AllocType.isVolatileQualified());
+ else {
+ AggValueSlot Slot
+ = AggValueSlot::forAddr(NewPtr, Alignment, AllocType.getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ CGF.EmitAggExpr(Init, Slot);
+
+ CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
+ }
+}
+
+void
+CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
+ QualType elementType,
+ llvm::Value *beginPtr,
+ llvm::Value *numElements) {
+ if (!E->hasInitializer())
+ return; // We have a POD type.
+
+ llvm::Value *explicitPtr = beginPtr;
+ // Find the end of the array, hoisted out of the loop.
+ llvm::Value *endPtr =
+ Builder.CreateInBoundsGEP(beginPtr, numElements, "array.end");
+
+ unsigned initializerElements = 0;
+
+ const Expr *Init = E->getInitializer();
+ llvm::AllocaInst *endOfInit = 0;
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ EHScopeStack::stable_iterator cleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ // If the initializer is an initializer list, first do the explicit elements.
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ initializerElements = ILE->getNumInits();
+
+ // Enter a partial-destruction cleanup if necessary.
+ if (needsEHCleanup(dtorKind)) {
+ // In principle we could tell the cleanup where we are more
+ // directly, but the control flow can get so varied here that it
+ // would actually be quite complex. Therefore we go through an
+ // alloca.
+ endOfInit = CreateTempAlloca(beginPtr->getType(), "array.endOfInit");
+ cleanupDominator = Builder.CreateStore(beginPtr, endOfInit);
+ pushIrregularPartialArrayCleanup(beginPtr, endOfInit, elementType,
+ getDestroyer(dtorKind));
+ cleanup = EHStack.stable_begin();
+ }
+
+ for (unsigned i = 0, e = ILE->getNumInits(); i != e; ++i) {
+ // Tell the cleanup that it needs to destroy up to this
+ // element. TODO: some of these stores can be trivially
+ // observed to be unnecessary.
+ if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
+ StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
+ explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
+ }
+
+ // The remaining elements are filled with the array filler expression.
+ Init = ILE->getArrayFiller();
+ }
+
+ // Create the continuation block.
+ llvm::BasicBlock *contBB = createBasicBlock("new.loop.end");
+
+ // If the number of elements isn't constant, we have to now check if there is
+ // anything left to initialize.
+ if (llvm::ConstantInt *constNum = dyn_cast<llvm::ConstantInt>(numElements)) {
+ // If all elements have already been initialized, skip the whole loop.
+ if (constNum->getZExtValue() <= initializerElements) {
+ // If there was a cleanup, deactivate it.
+ if (cleanupDominator)
+ DeactivateCleanupBlock(cleanup, cleanupDominator);;
+ return;
+ }
+ } else {
+ llvm::BasicBlock *nonEmptyBB = createBasicBlock("new.loop.nonempty");
+ llvm::Value *isEmpty = Builder.CreateICmpEQ(explicitPtr, endPtr,
+ "array.isempty");
+ Builder.CreateCondBr(isEmpty, contBB, nonEmptyBB);
+ EmitBlock(nonEmptyBB);
+ }
+
+ // Enter the loop.
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = createBasicBlock("new.loop");
+
+ EmitBlock(loopBB);
+
+ // Set up the current-element phi.
+ llvm::PHINode *curPtr =
+ Builder.CreatePHI(explicitPtr->getType(), 2, "array.cur");
+ curPtr->addIncoming(explicitPtr, entryBB);
+
+ // Store the new cleanup position for irregular cleanups.
+ if (endOfInit) Builder.CreateStore(curPtr, endOfInit);
+
+ // Enter a partial-destruction cleanup if necessary.
+ if (!cleanupDominator && needsEHCleanup(dtorKind)) {
+ pushRegularPartialArrayCleanup(beginPtr, curPtr, elementType,
+ getDestroyer(dtorKind));
+ cleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
+ }
+
+ // Emit the initializer into this element.
+ StoreAnyExprIntoOneUnit(*this, Init, E->getAllocatedType(), curPtr);
+
+ // Leave the cleanup if we entered one.
+ if (cleanupDominator) {
+ DeactivateCleanupBlock(cleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
+
+ // Advance to the next element.
+ llvm::Value *nextPtr = Builder.CreateConstGEP1_32(curPtr, 1, "array.next");
+
+ // Check whether we've gotten to the end of the array and, if so,
+ // exit the loop.
+ llvm::Value *isEnd = Builder.CreateICmpEQ(nextPtr, endPtr, "array.atend");
+ Builder.CreateCondBr(isEnd, contBB, loopBB);
+ curPtr->addIncoming(nextPtr, Builder.GetInsertBlock());
+
+ EmitBlock(contBB);
+}
+
+static void EmitZeroMemSet(CodeGenFunction &CGF, QualType T,
+ llvm::Value *NewPtr, llvm::Value *Size) {
+ CGF.EmitCastToVoidPtr(NewPtr);
+ CharUnits Alignment = CGF.getContext().getTypeAlignInChars(T);
+ CGF.Builder.CreateMemSet(NewPtr, CGF.Builder.getInt8(0), Size,
+ Alignment.getQuantity(), false);
+}
+
+static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
+ QualType ElementType,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ llvm::Value *AllocSizeWithoutCookie) {
+ const Expr *Init = E->getInitializer();
+ if (E->isArray()) {
+ if (const CXXConstructExpr *CCE = dyn_cast_or_null<CXXConstructExpr>(Init)){
+ CXXConstructorDecl *Ctor = CCE->getConstructor();
+ bool RequiresZeroInitialization = false;
+ if (Ctor->isTrivial()) {
+ // If new expression did not specify value-initialization, then there
+ // is no initialization.
+ if (!CCE->requiresZeroInitialization() || Ctor->getParent()->isEmpty())
+ return;
+
+ if (CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+ EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
+ return;
+ }
+
+ RequiresZeroInitialization = true;
+ }
+
+ CGF.EmitCXXAggrConstructorCall(Ctor, NumElements, NewPtr,
+ CCE->arg_begin(), CCE->arg_end(),
+ RequiresZeroInitialization);
+ return;
+ } else if (Init && isa<ImplicitValueInitExpr>(Init) &&
+ CGF.CGM.getTypes().isZeroInitializable(ElementType)) {
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+ EmitZeroMemSet(CGF, ElementType, NewPtr, AllocSizeWithoutCookie);
+ return;
+ }
+ CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements);
+ return;
+ }
+
+ if (!Init)
+ return;
+
+ StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
+}
+
+namespace {
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression.
+ class CallDeleteDuringNew : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *Ptr;
+ llvm::Value *AllocSize;
+
+ RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(RValue);
+ }
+
+ CallDeleteDuringNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ llvm::Value *AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, RValue Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+ (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+ DeleteArgs.add(RValue::get(Ptr), *AI++);
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumArgs() == NumPlacementArgs + 2)
+ DeleteArgs.add(RValue::get(AllocSize), *AI++);
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I)
+ DeleteArgs.add(getPlacementArgs()[I], *AI++);
+
+ // Call 'operator delete'.
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ }
+ };
+
+ /// A cleanup to call the given 'operator delete' function upon
+ /// abnormal exit from a new expression when the new expression is
+ /// conditional.
+ class CallDeleteDuringConditionalNew : public EHScopeStack::Cleanup {
+ size_t NumPlacementArgs;
+ const FunctionDecl *OperatorDelete;
+ DominatingValue<RValue>::saved_type Ptr;
+ DominatingValue<RValue>::saved_type AllocSize;
+
+ DominatingValue<RValue>::saved_type *getPlacementArgs() {
+ return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
+ }
+
+ public:
+ static size_t getExtraSize(size_t NumPlacementArgs) {
+ return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
+ }
+
+ CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete,
+ DominatingValue<RValue>::saved_type Ptr,
+ DominatingValue<RValue>::saved_type AllocSize)
+ : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
+ Ptr(Ptr), AllocSize(AllocSize) {}
+
+ void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
+ assert(I < NumPlacementArgs && "index out of range");
+ getPlacementArgs()[I] = Arg;
+ }
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const FunctionProtoType *FPT
+ = OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(FPT->getNumArgs() == NumPlacementArgs + 1 ||
+ (FPT->getNumArgs() == 2 && NumPlacementArgs == 0));
+
+ CallArgList DeleteArgs;
+
+ // The first argument is always a void*.
+ FunctionProtoType::arg_type_iterator AI = FPT->arg_type_begin();
+ DeleteArgs.add(Ptr.restore(CGF), *AI++);
+
+ // A member 'operator delete' can take an extra 'size_t' argument.
+ if (FPT->getNumArgs() == NumPlacementArgs + 2) {
+ RValue RV = AllocSize.restore(CGF);
+ DeleteArgs.add(RV, *AI++);
+ }
+
+ // Pass the rest of the arguments, which must match exactly.
+ for (unsigned I = 0; I != NumPlacementArgs; ++I) {
+ RValue RV = getPlacementArgs()[I].restore(CGF);
+ DeleteArgs.add(RV, *AI++);
+ }
+
+ // Call 'operator delete'.
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFunctionCall(DeleteArgs, FPT),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ }
+ };
+}
+
+/// Enter a cleanup to call 'operator delete' if the initializer in a
+/// new-expression throws.
+static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
+ const CXXNewExpr *E,
+ llvm::Value *NewPtr,
+ llvm::Value *AllocSize,
+ const CallArgList &NewArgs) {
+ // If we're not inside a conditional branch, then the cleanup will
+ // dominate and we can do the easier (and more efficient) thing.
+ if (!CGF.isInConditionalBranch()) {
+ CallDeleteDuringNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ NewPtr, AllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
+
+ return;
+ }
+
+ // Otherwise, we need to save all this stuff.
+ DominatingValue<RValue>::saved_type SavedNewPtr =
+ DominatingValue<RValue>::save(CGF, RValue::get(NewPtr));
+ DominatingValue<RValue>::saved_type SavedAllocSize =
+ DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
+
+ CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ SavedNewPtr,
+ SavedAllocSize);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
+ Cleanup->setPlacementArg(I,
+ DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
+
+ CGF.initFullExprCleanup();
+}
+
+llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
+ // The element type being allocated.
+ QualType allocType = getContext().getBaseElementType(E->getAllocatedType());
+
+ // 1. Build a call to the allocation function.
+ FunctionDecl *allocator = E->getOperatorNew();
+ const FunctionProtoType *allocatorType =
+ allocator->getType()->castAs<FunctionProtoType>();
+
+ CallArgList allocatorArgs;
+
+ // The allocation size is the first argument.
+ QualType sizeType = getContext().getSizeType();
+
+ // If there is a brace-initializer, cannot allocate fewer elements than inits.
+ unsigned minElements = 0;
+ if (E->isArray() && E->hasInitializer()) {
+ if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
+ minElements = ILE->getNumInits();
+ }
+
+ llvm::Value *numElements = 0;
+ llvm::Value *allocSizeWithoutCookie = 0;
+ llvm::Value *allocSize =
+ EmitCXXNewAllocSize(*this, E, minElements, numElements,
+ allocSizeWithoutCookie);
+
+ allocatorArgs.add(RValue::get(allocSize), sizeType);
+
+ // Emit the rest of the arguments.
+ // FIXME: Ideally, this should just use EmitCallArgs.
+ CXXNewExpr::const_arg_iterator placementArg = E->placement_arg_begin();
+
+ // First, use the types from the function type.
+ // We start at 1 here because the first argument (the allocation size)
+ // has already been emitted.
+ for (unsigned i = 1, e = allocatorType->getNumArgs(); i != e;
+ ++i, ++placementArg) {
+ QualType argType = allocatorType->getArgType(i);
+
+ assert(getContext().hasSameUnqualifiedType(argType.getNonReferenceType(),
+ placementArg->getType()) &&
+ "type mismatch in call argument!");
+
+ EmitCallArg(allocatorArgs, *placementArg, argType);
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((placementArg == E->placement_arg_end() ||
+ allocatorType->isVariadic()) &&
+ "Extra arguments to non-variadic function!");
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (CXXNewExpr::const_arg_iterator placementArgsEnd = E->placement_arg_end();
+ placementArg != placementArgsEnd; ++placementArg) {
+ EmitCallArg(allocatorArgs, *placementArg, placementArg->getType());
+ }
+
+ // Emit the allocation call. If the allocator is a global placement
+ // operator, just "inline" it directly.
+ RValue RV;
+ if (allocator->isReservedGlobalPlacementOperator()) {
+ assert(allocatorArgs.size() == 2);
+ RV = allocatorArgs[1].RV;
+ // TODO: kill any unnecessary computations done for the size
+ // argument.
+ } else {
+ RV = EmitCall(CGM.getTypes().arrangeFunctionCall(allocatorArgs,
+ allocatorType),
+ CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
+ allocatorArgs, allocator);
+ }
+
+ // Emit a null check on the allocation result if the allocation
+ // function is allowed to return null (because it has a non-throwing
+ // exception spec; for this part, we inline
+ // CXXNewExpr::shouldNullCheckAllocation()) and we have an
+ // interesting initializer.
+ bool nullCheck = allocatorType->isNothrow(getContext()) &&
+ (!allocType.isPODType(getContext()) || E->hasInitializer());
+
+ llvm::BasicBlock *nullCheckBB = 0;
+ llvm::BasicBlock *contBB = 0;
+
+ llvm::Value *allocation = RV.getScalarVal();
+ unsigned AS =
+ cast<llvm::PointerType>(allocation->getType())->getAddressSpace();
+
+ // The null-check means that the initializer is conditionally
+ // evaluated.
+ ConditionalEvaluation conditional(*this);
+
+ if (nullCheck) {
+ conditional.begin(*this);
+
+ nullCheckBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *notNullBB = createBasicBlock("new.notnull");
+ contBB = createBasicBlock("new.cont");
+
+ llvm::Value *isNull = Builder.CreateIsNull(allocation, "new.isnull");
+ Builder.CreateCondBr(isNull, contBB, notNullBB);
+ EmitBlock(notNullBB);
+ }
+
+ // If there's an operator delete, enter a cleanup to call it if an
+ // exception is thrown.
+ EHScopeStack::stable_iterator operatorDeleteCleanup;
+ llvm::Instruction *cleanupDominator = 0;
+ if (E->getOperatorDelete() &&
+ !E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
+ EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
+ operatorDeleteCleanup = EHStack.stable_begin();
+ cleanupDominator = Builder.CreateUnreachable();
+ }
+
+ assert((allocSize == allocSizeWithoutCookie) ==
+ CalculateCookiePadding(*this, E).isZero());
+ if (allocSize != allocSizeWithoutCookie) {
+ assert(E->isArray());
+ allocation = CGM.getCXXABI().InitializeArrayCookie(*this, allocation,
+ numElements,
+ E, allocType);
+ }
+
+ llvm::Type *elementPtrTy
+ = ConvertTypeForMem(allocType)->getPointerTo(AS);
+ llvm::Value *result = Builder.CreateBitCast(allocation, elementPtrTy);
+
+ EmitNewInitializer(*this, E, allocType, result, numElements,
+ allocSizeWithoutCookie);
+ if (E->isArray()) {
+ // NewPtr is a pointer to the base element type. If we're
+ // allocating an array of arrays, we'll need to cast back to the
+ // array pointer type.
+ llvm::Type *resultType = ConvertTypeForMem(E->getType());
+ if (result->getType() != resultType)
+ result = Builder.CreateBitCast(result, resultType);
+ }
+
+ // Deactivate the 'operator delete' cleanup if we finished
+ // initialization.
+ if (operatorDeleteCleanup.isValid()) {
+ DeactivateCleanupBlock(operatorDeleteCleanup, cleanupDominator);
+ cleanupDominator->eraseFromParent();
+ }
+
+ if (nullCheck) {
+ conditional.end(*this);
+
+ llvm::BasicBlock *notNullBB = Builder.GetInsertBlock();
+ EmitBlock(contBB);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(result->getType(), 2);
+ PHI->addIncoming(result, notNullBB);
+ PHI->addIncoming(llvm::Constant::getNullValue(result->getType()),
+ nullCheckBB);
+
+ result = PHI;
+ }
+
+ return result;
+}
+
+void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
+ llvm::Value *Ptr,
+ QualType DeleteTy) {
+ assert(DeleteFD->getOverloadedOperator() == OO_Delete);
+
+ const FunctionProtoType *DeleteFTy =
+ DeleteFD->getType()->getAs<FunctionProtoType>();
+
+ CallArgList DeleteArgs;
+
+ // Check if we need to pass the size to the delete operator.
+ llvm::Value *Size = 0;
+ QualType SizeTy;
+ if (DeleteFTy->getNumArgs() == 2) {
+ SizeTy = DeleteFTy->getArgType(1);
+ CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+ Size = llvm::ConstantInt::get(ConvertType(SizeTy),
+ DeleteTypeSize.getQuantity());
+ }
+
+ QualType ArgTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
+ DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
+
+ if (Size)
+ DeleteArgs.add(RValue::get(Size), SizeTy);
+
+ // Emit the call to delete.
+ EmitCall(CGM.getTypes().arrangeFunctionCall(DeleteArgs, DeleteFTy),
+ CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
+ DeleteArgs, DeleteFD);
+}
+
+namespace {
+ /// Calls the given 'operator delete' on a single object.
+ struct CallObjectDelete : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ QualType ElementType;
+
+ CallObjectDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ QualType ElementType)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), ElementType(ElementType) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType);
+ }
+ };
+}
+
+/// Emit the code for deleting a single object.
+static void EmitObjectDelete(CodeGenFunction &CGF,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *Ptr,
+ QualType ElementType,
+ bool UseGlobalDelete) {
+ // Find the destructor for the type, if applicable. If the
+ // destructor is virtual, we'll just emit the vcall and return.
+ const CXXDestructorDecl *Dtor = 0;
+ if (const RecordType *RT = ElementType->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasDefinition() && !RD->hasTrivialDestructor()) {
+ Dtor = RD->getDestructor();
+
+ if (Dtor->isVirtual()) {
+ if (UseGlobalDelete) {
+ // If we're supposed to call the global delete, make sure we do so
+ // even if the destructor throws.
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr, OperatorDelete,
+ ElementType);
+ }
+
+ llvm::Type *Ty =
+ CGF.getTypes().GetFunctionType(
+ CGF.getTypes().arrangeCXXDestructor(Dtor, Dtor_Complete));
+
+ llvm::Value *Callee
+ = CGF.BuildVirtualCall(Dtor,
+ UseGlobalDelete? Dtor_Complete : Dtor_Deleting,
+ Ptr, Ty);
+ CGF.EmitCXXMemberCall(Dtor, Callee, ReturnValueSlot(), Ptr, /*VTT=*/0,
+ 0, 0);
+
+ if (UseGlobalDelete) {
+ CGF.PopCleanupBlock();
+ }
+
+ return;
+ }
+ }
+ }
+
+ // Make sure that we call delete even if the dtor throws.
+ // This doesn't have to a conditional cleanup because we're going
+ // to pop it off in a second.
+ CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
+ Ptr, OperatorDelete, ElementType);
+
+ if (Dtor)
+ CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete,
+ /*ForVirtualBase=*/false, Ptr);
+ else if (CGF.getLangOpts().ObjCAutoRefCount &&
+ ElementType->isObjCLifetimeType()) {
+ switch (ElementType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong: {
+ // Load the pointer value.
+ llvm::Value *PtrValue = CGF.Builder.CreateLoad(Ptr,
+ ElementType.isVolatileQualified());
+
+ CGF.EmitARCRelease(PtrValue, /*precise*/ true);
+ break;
+ }
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCDestroyWeak(Ptr);
+ break;
+ }
+ }
+
+ CGF.PopCleanupBlock();
+}
+
+namespace {
+ /// Calls the given 'operator delete' on an array of objects.
+ struct CallArrayDelete : EHScopeStack::Cleanup {
+ llvm::Value *Ptr;
+ const FunctionDecl *OperatorDelete;
+ llvm::Value *NumElements;
+ QualType ElementType;
+ CharUnits CookieSize;
+
+ CallArrayDelete(llvm::Value *Ptr,
+ const FunctionDecl *OperatorDelete,
+ llvm::Value *NumElements,
+ QualType ElementType,
+ CharUnits CookieSize)
+ : Ptr(Ptr), OperatorDelete(OperatorDelete), NumElements(NumElements),
+ ElementType(ElementType), CookieSize(CookieSize) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const FunctionProtoType *DeleteFTy =
+ OperatorDelete->getType()->getAs<FunctionProtoType>();
+ assert(DeleteFTy->getNumArgs() == 1 || DeleteFTy->getNumArgs() == 2);
+
+ CallArgList Args;
+
+ // Pass the pointer as the first argument.
+ QualType VoidPtrTy = DeleteFTy->getArgType(0);
+ llvm::Value *DeletePtr
+ = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
+ Args.add(RValue::get(DeletePtr), VoidPtrTy);
+
+ // Pass the original requested size as the second argument.
+ if (DeleteFTy->getNumArgs() == 2) {
+ QualType size_t = DeleteFTy->getArgType(1);
+ llvm::IntegerType *SizeTy
+ = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
+
+ CharUnits ElementTypeSize =
+ CGF.CGM.getContext().getTypeSizeInChars(ElementType);
+
+ // The size of an element, multiplied by the number of elements.
+ llvm::Value *Size
+ = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
+ Size = CGF.Builder.CreateMul(Size, NumElements);
+
+ // Plus the size of the cookie if applicable.
+ if (!CookieSize.isZero()) {
+ llvm::Value *CookieSizeV
+ = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
+ Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
+ }
+
+ Args.add(RValue::get(Size), size_t);
+ }
+
+ // Emit the call to delete.
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Args, DeleteFTy),
+ CGF.CGM.GetAddrOfFunction(OperatorDelete),
+ ReturnValueSlot(), Args, OperatorDelete);
+ }
+ };
+}
+
+/// Emit the code for deleting an array of objects.
+static void EmitArrayDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *E,
+ llvm::Value *deletedPtr,
+ QualType elementType) {
+ llvm::Value *numElements = 0;
+ llvm::Value *allocatedPtr = 0;
+ CharUnits cookieSize;
+ CGF.CGM.getCXXABI().ReadArrayCookie(CGF, deletedPtr, E, elementType,
+ numElements, allocatedPtr, cookieSize);
+
+ assert(allocatedPtr && "ReadArrayCookie didn't set allocated pointer");
+
+ // Make sure that we call delete even if one of the dtors throws.
+ const FunctionDecl *operatorDelete = E->getOperatorDelete();
+ CGF.EHStack.pushCleanup<CallArrayDelete>(NormalAndEHCleanup,
+ allocatedPtr, operatorDelete,
+ numElements, elementType,
+ cookieSize);
+
+ // Destroy the elements.
+ if (QualType::DestructionKind dtorKind = elementType.isDestructedType()) {
+ assert(numElements && "no element count for a type with a destructor!");
+
+ llvm::Value *arrayEnd =
+ CGF.Builder.CreateInBoundsGEP(deletedPtr, numElements, "delete.end");
+
+ // Note that it is legal to allocate a zero-length array, and we
+ // can never fold the check away because the length should always
+ // come from a cookie.
+ CGF.emitArrayDestroy(deletedPtr, arrayEnd, elementType,
+ CGF.getDestroyer(dtorKind),
+ /*checkZeroLength*/ true,
+ CGF.needsEHCleanup(dtorKind));
+ }
+
+ // Pop the cleanup block.
+ CGF.PopCleanupBlock();
+}
+
+void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
+
+ // Get at the argument before we performed the implicit conversion
+ // to void*.
+ const Expr *Arg = E->getArgument();
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg)) {
+ if (ICE->getCastKind() != CK_UserDefinedConversion &&
+ ICE->getType()->isVoidPointerType())
+ Arg = ICE->getSubExpr();
+ else
+ break;
+ }
+
+ llvm::Value *Ptr = EmitScalarExpr(Arg);
+
+ // Null check the pointer.
+ llvm::BasicBlock *DeleteNotNull = createBasicBlock("delete.notnull");
+ llvm::BasicBlock *DeleteEnd = createBasicBlock("delete.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Ptr, "isnull");
+
+ Builder.CreateCondBr(IsNull, DeleteEnd, DeleteNotNull);
+ EmitBlock(DeleteNotNull);
+
+ // We might be deleting a pointer to array. If so, GEP down to the
+ // first non-array element.
+ // (this assumes that A(*)[3][7] is converted to [3 x [7 x %A]]*)
+ QualType DeleteTy = Arg->getType()->getAs<PointerType>()->getPointeeType();
+ if (DeleteTy->isConstantArrayType()) {
+ llvm::Value *Zero = Builder.getInt32(0);
+ SmallVector<llvm::Value*,8> GEP;
+
+ GEP.push_back(Zero); // point at the outermost array
+
+ // For each layer of array type we're pointing at:
+ while (const ConstantArrayType *Arr
+ = getContext().getAsConstantArrayType(DeleteTy)) {
+ // 1. Unpeel the array type.
+ DeleteTy = Arr->getElementType();
+
+ // 2. GEP to the first element of the array.
+ GEP.push_back(Zero);
+ }
+
+ Ptr = Builder.CreateInBoundsGEP(Ptr, GEP, "del.first");
+ }
+
+ assert(ConvertTypeForMem(DeleteTy) ==
+ cast<llvm::PointerType>(Ptr->getType())->getElementType());
+
+ if (E->isArrayForm()) {
+ EmitArrayDelete(*this, E, Ptr, DeleteTy);
+ } else {
+ EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
+ E->isGlobalDelete());
+ }
+
+ EmitBlock(DeleteEnd);
+}
+
+static llvm::Constant *getBadTypeidFn(CodeGenFunction &CGF) {
+ // void __cxa_bad_typeid();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_typeid");
+}
+
+static void EmitBadTypeidCall(CodeGenFunction &CGF) {
+ llvm::Value *Fn = getBadTypeidFn(CGF);
+ CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF,
+ const Expr *E,
+ llvm::Type *StdTypeInfoPtrTy) {
+ // Get the vtable pointer.
+ llvm::Value *ThisPtr = CGF.EmitLValue(E).getAddress();
+
+ // C++ [expr.typeid]p2:
+ // If the glvalue expression is obtained by applying the unary * operator to
+ // a pointer and the pointer is a null pointer value, the typeid expression
+ // throws the std::bad_typeid exception.
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParens())) {
+ if (UO->getOpcode() == UO_Deref) {
+ llvm::BasicBlock *BadTypeidBlock =
+ CGF.createBasicBlock("typeid.bad_typeid");
+ llvm::BasicBlock *EndBlock =
+ CGF.createBasicBlock("typeid.end");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ThisPtr);
+ CGF.Builder.CreateCondBr(IsNull, BadTypeidBlock, EndBlock);
+
+ CGF.EmitBlock(BadTypeidBlock);
+ EmitBadTypeidCall(CGF);
+ CGF.EmitBlock(EndBlock);
+ }
+ }
+
+ llvm::Value *Value = CGF.GetVTablePtr(ThisPtr,
+ StdTypeInfoPtrTy->getPointerTo());
+
+ // Load the type info.
+ Value = CGF.Builder.CreateConstInBoundsGEP1_64(Value, -1ULL);
+ return CGF.Builder.CreateLoad(Value);
+}
+
+llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
+ llvm::Type *StdTypeInfoPtrTy =
+ ConvertType(E->getType())->getPointerTo();
+
+ if (E->isTypeOperand()) {
+ llvm::Constant *TypeInfo =
+ CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
+ return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
+ }
+
+ // C++ [expr.typeid]p2:
+ // When typeid is applied to a glvalue expression whose type is a
+ // polymorphic class type, the result refers to a std::type_info object
+ // representing the type of the most derived object (that is, the dynamic
+ // type) to which the glvalue refers.
+ if (E->getExprOperand()->isGLValue()) {
+ if (const RecordType *RT =
+ E->getExprOperand()->getType()->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->isPolymorphic())
+ return EmitTypeidFromVTable(*this, E->getExprOperand(),
+ StdTypeInfoPtrTy);
+ }
+ }
+
+ QualType OperandTy = E->getExprOperand()->getType();
+ return Builder.CreateBitCast(CGM.GetAddrOfRTTIDescriptor(OperandTy),
+ StdTypeInfoPtrTy);
+}
+
+static llvm::Constant *getDynamicCastFn(CodeGenFunction &CGF) {
+ // void *__dynamic_cast(const void *sub,
+ // const abi::__class_type_info *src,
+ // const abi::__class_type_info *dst,
+ // std::ptrdiff_t src2dst_offset);
+
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ llvm::Type *Args[4] = { Int8PtrTy, Int8PtrTy, Int8PtrTy, PtrDiffTy };
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(Int8PtrTy, Args, false);
+
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__dynamic_cast");
+}
+
+static llvm::Constant *getBadCastFn(CodeGenFunction &CGF) {
+ // void __cxa_bad_cast();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGF.VoidTy, false);
+ return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_bad_cast");
+}
+
+static void EmitBadCastCall(CodeGenFunction &CGF) {
+ llvm::Value *Fn = getBadCastFn(CGF);
+ CGF.EmitCallOrInvoke(Fn).setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+}
+
+static llvm::Value *
+EmitDynamicCastCall(CodeGenFunction &CGF, llvm::Value *Value,
+ QualType SrcTy, QualType DestTy,
+ llvm::BasicBlock *CastEnd) {
+ llvm::Type *PtrDiffLTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+
+ if (const PointerType *PTy = DestTy->getAs<PointerType>()) {
+ if (PTy->getPointeeType()->isVoidType()) {
+ // C++ [expr.dynamic.cast]p7:
+ // If T is "pointer to cv void," then the result is a pointer to the
+ // most derived object pointed to by v.
+
+ // Get the vtable pointer.
+ llvm::Value *VTable = CGF.GetVTablePtr(Value, PtrDiffLTy->getPointerTo());
+
+ // Get the offset-to-top from the vtable.
+ llvm::Value *OffsetToTop =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, -2ULL);
+ OffsetToTop = CGF.Builder.CreateLoad(OffsetToTop, "offset.to.top");
+
+ // Finally, add the offset to the pointer.
+ Value = CGF.EmitCastToVoidPtr(Value);
+ Value = CGF.Builder.CreateInBoundsGEP(Value, OffsetToTop);
+
+ return CGF.Builder.CreateBitCast(Value, DestLTy);
+ }
+ }
+
+ QualType SrcRecordTy;
+ QualType DestRecordTy;
+
+ if (const PointerType *DestPTy = DestTy->getAs<PointerType>()) {
+ SrcRecordTy = SrcTy->castAs<PointerType>()->getPointeeType();
+ DestRecordTy = DestPTy->getPointeeType();
+ } else {
+ SrcRecordTy = SrcTy;
+ DestRecordTy = DestTy->castAs<ReferenceType>()->getPointeeType();
+ }
+
+ assert(SrcRecordTy->isRecordType() && "source type must be a record type!");
+ assert(DestRecordTy->isRecordType() && "dest type must be a record type!");
+
+ llvm::Value *SrcRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(SrcRecordTy.getUnqualifiedType());
+ llvm::Value *DestRTTI =
+ CGF.CGM.GetAddrOfRTTIDescriptor(DestRecordTy.getUnqualifiedType());
+
+ // FIXME: Actually compute a hint here.
+ llvm::Value *OffsetHint = llvm::ConstantInt::get(PtrDiffLTy, -1ULL);
+
+ // Emit the call to __dynamic_cast.
+ Value = CGF.EmitCastToVoidPtr(Value);
+ Value = CGF.Builder.CreateCall4(getDynamicCastFn(CGF), Value,
+ SrcRTTI, DestRTTI, OffsetHint);
+ Value = CGF.Builder.CreateBitCast(Value, DestLTy);
+
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ if (DestTy->isReferenceType()) {
+ llvm::BasicBlock *BadCastBlock =
+ CGF.createBasicBlock("dynamic_cast.bad_cast");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(Value);
+ CGF.Builder.CreateCondBr(IsNull, BadCastBlock, CastEnd);
+
+ CGF.EmitBlock(BadCastBlock);
+ EmitBadCastCall(CGF);
+ }
+
+ return Value;
+}
+
+static llvm::Value *EmitDynamicCastToNull(CodeGenFunction &CGF,
+ QualType DestTy) {
+ llvm::Type *DestLTy = CGF.ConvertType(DestTy);
+ if (DestTy->isPointerType())
+ return llvm::Constant::getNullValue(DestLTy);
+
+ /// C++ [expr.dynamic.cast]p9:
+ /// A failed cast to reference type throws std::bad_cast
+ EmitBadCastCall(CGF);
+
+ CGF.EmitBlock(CGF.createBasicBlock("dynamic_cast.end"));
+ return llvm::UndefValue::get(DestLTy);
+}
+
+llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
+ const CXXDynamicCastExpr *DCE) {
+ QualType DestTy = DCE->getTypeAsWritten();
+
+ if (DCE->isAlwaysNull())
+ return EmitDynamicCastToNull(*this, DestTy);
+
+ QualType SrcTy = DCE->getSubExpr()->getType();
+
+ // C++ [expr.dynamic.cast]p4:
+ // If the value of v is a null pointer value in the pointer case, the result
+ // is the null pointer value of type T.
+ bool ShouldNullCheckSrcValue = SrcTy->isPointerType();
+
+ llvm::BasicBlock *CastNull = 0;
+ llvm::BasicBlock *CastNotNull = 0;
+ llvm::BasicBlock *CastEnd = createBasicBlock("dynamic_cast.end");
+
+ if (ShouldNullCheckSrcValue) {
+ CastNull = createBasicBlock("dynamic_cast.null");
+ CastNotNull = createBasicBlock("dynamic_cast.notnull");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(Value);
+ Builder.CreateCondBr(IsNull, CastNull, CastNotNull);
+ EmitBlock(CastNotNull);
+ }
+
+ Value = EmitDynamicCastCall(*this, Value, SrcTy, DestTy, CastEnd);
+
+ if (ShouldNullCheckSrcValue) {
+ EmitBranch(CastEnd);
+
+ EmitBlock(CastNull);
+ EmitBranch(CastEnd);
+ }
+
+ EmitBlock(CastEnd);
+
+ if (ShouldNullCheckSrcValue) {
+ llvm::PHINode *PHI = Builder.CreatePHI(Value->getType(), 2);
+ PHI->addIncoming(Value, CastNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(Value->getType()), CastNull);
+
+ Value = PHI;
+ }
+
+ return Value;
+}
+
+void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
+ RunCleanupsScope Scope(*this);
+
+ CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
+ for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
+ e = E->capture_init_end();
+ i != e; ++i, ++CurField) {
+ // Emit initialization
+ LValue LV = EmitLValueForFieldInitialization(Slot.getAddr(), *CurField, 0);
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (CurField->getType()->isArrayType())
+ ArrayIndexes = E->getCaptureInitIndexVars(i);
+ EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
new file mode 100644
index 0000000..0233745
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprComplex.cpp
@@ -0,0 +1,839 @@
+//===--- CGExprComplex.cpp - Emit LLVM Code for Complex Exprs -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with complex types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Complex Expression Emitter
+//===----------------------------------------------------------------------===//
+
+typedef CodeGenFunction::ComplexPairTy ComplexPairTy;
+
+namespace {
+class ComplexExprEmitter
+ : public StmtVisitor<ComplexExprEmitter, ComplexPairTy> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ // True is we should ignore the value of a
+ bool IgnoreReal;
+ bool IgnoreImag;
+public:
+ ComplexExprEmitter(CodeGenFunction &cgf, bool ir=false, bool ii=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreReal(ir), IgnoreImag(ii) {
+ }
+
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreReal() {
+ bool I = IgnoreReal;
+ IgnoreReal = false;
+ return I;
+ }
+ bool TestAndClearIgnoreImag() {
+ bool I = IgnoreImag;
+ IgnoreImag = false;
+ return I;
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ ComplexPairTy EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(CGF.EmitLValue(E));
+ }
+
+ ComplexPairTy EmitLoadOfLValue(LValue LV) {
+ assert(LV.isSimple() && "complex l-value must be simple");
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+ }
+
+ /// EmitLoadOfComplex - Given a pointer to a complex value, emit code to load
+ /// the real and imaginary pieces.
+ ComplexPairTy EmitLoadOfComplex(llvm::Value *SrcPtr, bool isVolatile);
+
+ /// EmitStoreThroughLValue - Given an l-value of complex type, store
+ /// a complex number into it.
+ void EmitStoreThroughLValue(ComplexPairTy Val, LValue LV) {
+ assert(LV.isSimple() && "complex l-value must be simple");
+ return EmitStoreOfComplex(Val, LV.getAddress(), LV.isVolatileQualified());
+ }
+
+ /// EmitStoreOfComplex - Store the specified real/imag parts into the
+ /// specified value pointer.
+ void EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *ResPtr, bool isVol);
+
+ /// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
+ QualType DestType);
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ ComplexPairTy Visit(Expr *E) {
+ return StmtVisitor<ComplexExprEmitter, ComplexPairTy>::Visit(E);
+ }
+
+ ComplexPairTy VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ llvm_unreachable("Stmt can't have complex result type!");
+ }
+ ComplexPairTy VisitExpr(Expr *S);
+ ComplexPairTy VisitParenExpr(ParenExpr *PE) { return Visit(PE->getSubExpr());}
+ ComplexPairTy VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+ ComplexPairTy VisitImaginaryLiteral(const ImaginaryLiteral *IL);
+ ComplexPairTy
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
+
+ // l-values.
+ ComplexPairTy VisitDeclRefExpr(DeclRefExpr *E) {
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+
+ llvm::ConstantStruct *pair =
+ cast<llvm::ConstantStruct>(result.getValue());
+ return ComplexPairTy(pair->getOperand(0), pair->getOperand(1));
+ }
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ ComplexPairTy VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ return CGF.EmitObjCMessageExpr(E).getComplexVal();
+ }
+ ComplexPairTy VisitArraySubscriptExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
+ return CGF.getOpaqueRValueMapping(E).getComplexVal();
+ }
+
+ ComplexPairTy VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getComplexVal();
+ }
+
+ // FIXME: CompoundLiteralExpr
+
+ ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
+ ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // Unlike for scalars, we don't have to worry about function->ptr demotion
+ // here.
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCastExpr(CastExpr *E) {
+ return EmitCast(E->getCastKind(), E->getSubExpr(), E->getType());
+ }
+ ComplexPairTy VisitCallExpr(const CallExpr *E);
+ ComplexPairTy VisitStmtExpr(const StmtExpr *E);
+
+ // Operators.
+ ComplexPairTy VisitPrePostIncDec(const UnaryOperator *E,
+ bool isInc, bool isPre) {
+ LValue LV = CGF.EmitLValue(E->getSubExpr());
+ return CGF.EmitComplexPrePostIncDec(E, LV, isInc, isPre);
+ }
+ ComplexPairTy VisitUnaryPostDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, false);
+ }
+ ComplexPairTy VisitUnaryPostInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, false);
+ }
+ ComplexPairTy VisitUnaryPreDec(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, false, true);
+ }
+ ComplexPairTy VisitUnaryPreInc(const UnaryOperator *E) {
+ return VisitPrePostIncDec(E, true, true);
+ }
+ ComplexPairTy VisitUnaryDeref(const Expr *E) { return EmitLoadOfLValue(E); }
+ ComplexPairTy VisitUnaryPlus (const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitUnaryMinus (const UnaryOperator *E);
+ ComplexPairTy VisitUnaryNot (const UnaryOperator *E);
+ // LNot,Real,Imag never return complex.
+ ComplexPairTy VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
+ }
+ ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null = llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+ ComplexPairTy VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ assert(E->getType()->isAnyComplexType() && "Expected complex type!");
+ QualType Elem = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Constant *Null =
+ llvm::Constant::getNullValue(CGF.ConvertType(Elem));
+ return ComplexPairTy(Null, Null);
+ }
+
+ struct BinOpInfo {
+ ComplexPairTy LHS;
+ ComplexPairTy RHS;
+ QualType Ty; // Computation Type.
+ };
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &),
+ ComplexPairTy &Val);
+ ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)
+ (const BinOpInfo &));
+
+ ComplexPairTy EmitBinAdd(const BinOpInfo &Op);
+ ComplexPairTy EmitBinSub(const BinOpInfo &Op);
+ ComplexPairTy EmitBinMul(const BinOpInfo &Op);
+ ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+
+ ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
+ return EmitBinAdd(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinSub(const BinaryOperator *E) {
+ return EmitBinSub(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinMul(const BinaryOperator *E) {
+ return EmitBinMul(EmitBinOps(E));
+ }
+ ComplexPairTy VisitBinDiv(const BinaryOperator *E) {
+ return EmitBinDiv(EmitBinOps(E));
+ }
+
+ // Compound assignments.
+ ComplexPairTy VisitBinAddAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinAdd);
+ }
+ ComplexPairTy VisitBinSubAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinSub);
+ }
+ ComplexPairTy VisitBinMulAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinMul);
+ }
+ ComplexPairTy VisitBinDivAssign(const CompoundAssignOperator *E) {
+ return EmitCompoundAssign(E, &ComplexExprEmitter::EmitBinDiv);
+ }
+
+ // GCC rejects rem/and/or/xor for integer complex.
+ // Logical and/or always return int, never complex.
+
+ // No comparisons produce a complex result.
+
+ LValue EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val);
+ ComplexPairTy VisitBinAssign (const BinaryOperator *E);
+ ComplexPairTy VisitBinComma (const BinaryOperator *E);
+
+
+ ComplexPairTy
+ VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
+ ComplexPairTy VisitChooseExpr(ChooseExpr *CE);
+
+ ComplexPairTy VisitInitListExpr(InitListExpr *E);
+
+ ComplexPairTy VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ ComplexPairTy VisitVAArgExpr(VAArgExpr *E);
+
+ ComplexPairTy VisitAtomicExpr(AtomicExpr *E) {
+ return CGF.EmitAtomicExpr(E).getComplexVal();
+ }
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitLoadOfComplex - Given an RValue reference for a complex, emit code to
+/// load the real and imaginary pieces, returning them as Real/Imag.
+ComplexPairTy ComplexExprEmitter::EmitLoadOfComplex(llvm::Value *SrcPtr,
+ bool isVolatile) {
+ llvm::Value *Real=0, *Imag=0;
+
+ if (!IgnoreReal || isVolatile) {
+ llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0,
+ SrcPtr->getName() + ".realp");
+ Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real");
+ }
+
+ if (!IgnoreImag || isVolatile) {
+ llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1,
+ SrcPtr->getName() + ".imagp");
+ Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag");
+ }
+ return ComplexPairTy(Real, Imag);
+}
+
+/// EmitStoreOfComplex - Store the specified real/imag parts into the
+/// specified value pointer.
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, llvm::Value *Ptr,
+ bool isVolatile) {
+ llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
+ llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
+
+ Builder.CreateStore(Val.first, RealPtr, isVolatile);
+ Builder.CreateStore(Val.second, ImagPtr, isVolatile);
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+ComplexPairTy ComplexExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "complex expression");
+ llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitImaginaryLiteral(const ImaginaryLiteral *IL) {
+ llvm::Value *Imag = CGF.EmitScalarExpr(IL->getSubExpr());
+ return ComplexPairTy(llvm::Constant::getNullValue(Imag->getType()), Imag);
+}
+
+
+ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getComplexVal();
+}
+
+ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
+}
+
+/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
+ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
+ QualType SrcType,
+ QualType DestType) {
+ // Get the src/dest element type.
+ SrcType = SrcType->getAs<ComplexType>()->getElementType();
+ DestType = DestType->getAs<ComplexType>()->getElementType();
+
+ // C99 6.3.1.6: When a value of complex type is converted to another
+ // complex type, both the real and imaginary parts follow the conversion
+ // rules for the corresponding real types.
+ Val.first = CGF.EmitScalarConversion(Val.first, SrcType, DestType);
+ Val.second = CGF.EmitScalarConversion(Val.second, SrcType, DestType);
+ return Val;
+}
+
+ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
+ QualType DestTy) {
+ switch (CK) {
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
+ // Atomic to non-atomic casts may be more than a no-op for some platforms and
+ // for some types.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ case CK_UserDefinedConversion:
+ return Visit(Op);
+
+ case CK_LValueBitCast: {
+ llvm::Value *V = CGF.EmitLValue(Op).getAddress();
+ V = Builder.CreateBitCast(V,
+ CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
+ // FIXME: Are the qualifiers correct here?
+ return EmitLoadOfComplex(V, DestTy.isVolatileQualified());
+ }
+
+ case CK_BitCast:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_ToUnion:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_MemberPointerToBoolean:
+ case CK_ReinterpretMemberPointer:
+ case CK_ConstructorConversion:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_ToVoid:
+ case CK_VectorSplat:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ llvm_unreachable("invalid cast kind for complex value");
+
+ case CK_FloatingRealToComplex:
+ case CK_IntegralRealToComplex: {
+ llvm::Value *Elt = CGF.EmitScalarExpr(Op);
+
+ // Convert the input element to the element type of the complex.
+ DestTy = DestTy->getAs<ComplexType>()->getElementType();
+ Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
+ }
+
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ return EmitComplexToComplexCast(Visit(Op), Op->getType(), DestTy);
+ }
+
+ llvm_unreachable("unknown cast resulting in complex value");
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ ComplexPairTy Op = Visit(E->getSubExpr());
+
+ llvm::Value *ResR, *ResI;
+ if (Op.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFNeg(Op.first, "neg.r");
+ ResI = Builder.CreateFNeg(Op.second, "neg.i");
+ } else {
+ ResR = Builder.CreateNeg(Op.first, "neg.r");
+ ResI = Builder.CreateNeg(Op.second, "neg.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ // ~(a+ib) = a + i*-b
+ ComplexPairTy Op = Visit(E->getSubExpr());
+ llvm::Value *ResI;
+ if (Op.second->getType()->isFloatingPointTy())
+ ResI = Builder.CreateFNeg(Op.second, "conj.i");
+ else
+ ResI = Builder.CreateNeg(Op.second, "conj.i");
+
+ return ComplexPairTy(Op.first, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ } else {
+ ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
+ llvm::Value *ResR, *ResI;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ } else {
+ ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+
+ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
+ using llvm::Value;
+ Value *ResR, *ResI;
+
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ ResR = Builder.CreateFSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateFAdd(ResIl, ResIr, "mul.i");
+ } else {
+ Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
+ Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
+ ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+
+ Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
+ Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
+ ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ }
+ return ComplexPairTy(ResR, ResI);
+}
+
+ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
+ llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
+ llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
+
+
+ llvm::Value *DSTr, *DSTi;
+ if (Op.LHS.first->getType()->isFloatingPointTy()) {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
+ llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
+ llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); // c*c
+ llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); // d*d
+ llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); // b*c
+ llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); // a*d
+ llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); // bc-ad
+
+ DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
+ } else {
+ // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
+ llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
+ llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
+ llvm::Value *Tmp3 = Builder.CreateAdd(Tmp1, Tmp2); // ac+bd
+
+ llvm::Value *Tmp4 = Builder.CreateMul(RHSr, RHSr); // c*c
+ llvm::Value *Tmp5 = Builder.CreateMul(RHSi, RHSi); // d*d
+ llvm::Value *Tmp6 = Builder.CreateAdd(Tmp4, Tmp5); // cc+dd
+
+ llvm::Value *Tmp7 = Builder.CreateMul(LHSi, RHSr); // b*c
+ llvm::Value *Tmp8 = Builder.CreateMul(LHSr, RHSi); // a*d
+ llvm::Value *Tmp9 = Builder.CreateSub(Tmp7, Tmp8); // bc-ad
+
+ if (Op.Ty->getAs<ComplexType>()->getElementType()->isUnsignedIntegerType()) {
+ DSTr = Builder.CreateUDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateUDiv(Tmp9, Tmp6);
+ } else {
+ DSTr = Builder.CreateSDiv(Tmp3, Tmp6);
+ DSTi = Builder.CreateSDiv(Tmp9, Tmp6);
+ }
+ }
+
+ return ComplexPairTy(DSTr, DSTi);
+}
+
+ComplexExprEmitter::BinOpInfo
+ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ BinOpInfo Ops;
+ Ops.LHS = Visit(E->getLHS());
+ Ops.RHS = Visit(E->getRHS());
+ Ops.Ty = E->getType();
+ return Ops;
+}
+
+
+LValue ComplexExprEmitter::
+EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
+ ComplexPairTy &Val) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ QualType LHSTy = E->getLHS()->getType();
+
+ BinOpInfo OpInfo;
+
+ // Load the RHS and LHS operands.
+ // __block variables need to have the rhs evaluated first, plus this should
+ // improve codegen a little.
+ OpInfo.Ty = E->getComputationResultType();
+
+ // The RHS should have been converted to the computation type.
+ assert(OpInfo.Ty->isAnyComplexType());
+ assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
+ E->getRHS()->getType()));
+ OpInfo.RHS = Visit(E->getRHS());
+
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Load from the l-value.
+ ComplexPairTy LHSComplexPair = EmitLoadOfLValue(LHS);
+
+ OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
+
+ // Expand the binary operator.
+ ComplexPairTy Result = (this->*Func)(OpInfo);
+
+ // Truncate the result back to the LHS type.
+ Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+ Val = Result;
+
+ // Store the result value into the LHS lvalue.
+ EmitStoreThroughLValue(Result, LHS);
+
+ return LHS;
+}
+
+// Compound assignments.
+ComplexPairTy ComplexExprEmitter::
+EmitCompoundAssign(const CompoundAssignOperator *E,
+ ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
+ ComplexPairTy Val;
+ LValue LV = EmitCompoundAssignLValue(E, Func, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
+ return Val;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+}
+
+LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
+ ComplexPairTy &Val) {
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getLHS()->getType(),
+ E->getRHS()->getType()) &&
+ "Invalid assignment");
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+
+ // Emit the RHS. __block variables need the RHS evaluated first.
+ Val = Visit(E->getRHS());
+
+ // Compute the address to store into.
+ LValue LHS = CGF.EmitLValue(E->getLHS());
+
+ // Store the result value into the LHS lvalue.
+ EmitStoreThroughLValue(Val, LHS);
+
+ return LHS;
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ ComplexPairTy Val;
+ LValue LV = EmitBinAssignLValue(E, Val);
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
+ return Val;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LV.isVolatileQualified())
+ return Val;
+
+ return EmitLoadOfComplex(LV.getAddress(), LV.isVolatileQualified());
+}
+
+ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ return Visit(E->getRHS());
+}
+
+ComplexPairTy ComplexExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ TestAndClearIgnoreReal();
+ TestAndClearIgnoreImag();
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getCond(), LHSBlock, RHSBlock);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(LHSBlock);
+ ComplexPairTy LHS = Visit(E->getTrueExpr());
+ LHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBranch(ContBlock);
+ eval.end(CGF);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ ComplexPairTy RHS = Visit(E->getFalseExpr());
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBlock);
+ eval.end(CGF);
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *RealPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.r");
+ RealPN->addIncoming(LHS.first, LHSBlock);
+ RealPN->addIncoming(RHS.first, RHSBlock);
+
+ // Create a PHI node for the imaginary part.
+ llvm::PHINode *ImagPN = Builder.CreatePHI(LHS.first->getType(), 2, "cond.i");
+ ImagPN->addIncoming(LHS.second, LHSBlock);
+ ImagPN->addIncoming(RHS.second, RHSBlock);
+
+ return ComplexPairTy(RealPN, ImagPN);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreReal();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ Ignore = TestAndClearIgnoreImag();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+
+ if (E->getNumInits() == 2) {
+ llvm::Value *Real = CGF.EmitScalarExpr(E->getInit(0));
+ llvm::Value *Imag = CGF.EmitScalarExpr(E->getInit(1));
+ return ComplexPairTy(Real, Imag);
+ } else if (E->getNumInits() == 1) {
+ return Visit(E->getInit(0));
+ }
+
+ // Empty init list intializes to null
+ assert(E->getNumInits() == 0 && "Unexpected number of inits");
+ QualType Ty = E->getType()->getAs<ComplexType>()->getElementType();
+ llvm::Type* LTy = CGF.ConvertType(Ty);
+ llvm::Value* zeroConstant = llvm::Constant::getNullValue(LTy);
+ return ComplexPairTy(zeroConstant, zeroConstant);
+}
+
+ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(E->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, E->getType());
+
+ if (!ArgPtr) {
+ CGF.ErrorUnsupported(E, "complex va_arg expression");
+ llvm::Type *EltTy =
+ CGF.ConvertType(E->getType()->getAs<ComplexType>()->getElementType());
+ llvm::Value *U = llvm::UndefValue::get(EltTy);
+ return ComplexPairTy(U, U);
+ }
+
+ // FIXME Volatility.
+ return EmitLoadOfComplex(ArgPtr, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitComplexExpr - Emit the computation of the specified expression of
+/// complex type, ignoring the result.
+ComplexPairTy CodeGenFunction::EmitComplexExpr(const Expr *E, bool IgnoreReal,
+ bool IgnoreImag) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+
+ return ComplexExprEmitter(*this, IgnoreReal, IgnoreImag)
+ .Visit(const_cast<Expr*>(E));
+}
+
+/// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+/// of complex type, storing into the specified Value*.
+void CodeGenFunction::EmitComplexExprIntoAddr(const Expr *E,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ assert(E && E->getType()->isAnyComplexType() &&
+ "Invalid complex expression to emit");
+ ComplexExprEmitter Emitter(*this);
+ ComplexPairTy Val = Emitter.Visit(const_cast<Expr*>(E));
+ Emitter.EmitStoreOfComplex(Val, DestAddr, DestIsVolatile);
+}
+
+/// StoreComplexToAddr - Store a complex number into the specified address.
+void CodeGenFunction::StoreComplexToAddr(ComplexPairTy V,
+ llvm::Value *DestAddr,
+ bool DestIsVolatile) {
+ ComplexExprEmitter(*this).EmitStoreOfComplex(V, DestAddr, DestIsVolatile);
+}
+
+/// LoadComplexFromAddr - Load a complex number from the specified address.
+ComplexPairTy CodeGenFunction::LoadComplexFromAddr(llvm::Value *SrcAddr,
+ bool SrcIsVolatile) {
+ return ComplexExprEmitter(*this).EmitLoadOfComplex(SrcAddr, SrcIsVolatile);
+}
+
+LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
+ assert(E->getOpcode() == BO_Assign);
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
+}
+
+LValue CodeGenFunction::
+EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
+ ComplexPairTy(ComplexExprEmitter::*Op)(const ComplexExprEmitter::BinOpInfo &);
+ switch (E->getOpcode()) {
+ case BO_MulAssign: Op = &ComplexExprEmitter::EmitBinMul; break;
+ case BO_DivAssign: Op = &ComplexExprEmitter::EmitBinDiv; break;
+ case BO_SubAssign: Op = &ComplexExprEmitter::EmitBinSub; break;
+ case BO_AddAssign: Op = &ComplexExprEmitter::EmitBinAdd; break;
+
+ default:
+ llvm_unreachable("unexpected complex compound assignment");
+ }
+
+ ComplexPairTy Val; // ignored
+ return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
new file mode 100644
index 0000000..d528e0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprConstant.cpp
@@ -0,0 +1,1500 @@
+//===--- CGExprConstant.cpp - Emit LLVM Code from Constant Expressions ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Constant Expr nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
+#include "clang/AST/APValue.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// ConstStructBuilder
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ConstStructBuilder {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+
+ bool Packed;
+ CharUnits NextFieldOffsetInChars;
+ CharUnits LLVMStructAlignment;
+ SmallVector<llvm::Constant *, 32> Elements;
+public:
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ InitListExpr *ILE);
+ static llvm::Constant *BuildStruct(CodeGenModule &CGM, CodeGenFunction *CGF,
+ const APValue &Value, QualType ValTy);
+
+private:
+ ConstStructBuilder(CodeGenModule &CGM, CodeGenFunction *CGF)
+ : CGM(CGM), CGF(CGF), Packed(false),
+ NextFieldOffsetInChars(CharUnits::Zero()),
+ LLVMStructAlignment(CharUnits::One()) { }
+
+ void AppendVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitExpr);
+
+ void AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst);
+
+ void AppendBitField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::ConstantInt *InitExpr);
+
+ void AppendPadding(CharUnits PadSize);
+
+ void AppendTailPadding(CharUnits RecordSize);
+
+ void ConvertStructToPacked();
+
+ bool Build(InitListExpr *ILE);
+ void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
+ llvm::Constant *VTable, const CXXRecordDecl *VTableClass,
+ CharUnits BaseOffset);
+ llvm::Constant *Finalize(QualType Ty);
+
+ CharUnits getAlignment(const llvm::Constant *C) const {
+ if (Packed) return CharUnits::One();
+ return CharUnits::fromQuantity(
+ CGM.getTargetData().getABITypeAlignment(C->getType()));
+ }
+
+ CharUnits getSizeInChars(const llvm::Constant *C) const {
+ return CharUnits::fromQuantity(
+ CGM.getTargetData().getTypeAllocSize(C->getType()));
+ }
+};
+
+void ConstStructBuilder::AppendVTablePointer(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ // Find the appropriate vtable within the vtable group.
+ uint64_t AddressPoint =
+ CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
+ llvm::Value *Indices[] = {
+ llvm::ConstantInt::get(CGM.Int64Ty, 0),
+ llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
+ };
+ llvm::Constant *VTableAddressPoint =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Indices);
+
+ // Add the vtable at the start of the object.
+ AppendBytes(Base.getBaseOffset(), VTableAddressPoint);
+}
+
+void ConstStructBuilder::
+AppendField(const FieldDecl *Field, uint64_t FieldOffset,
+ llvm::Constant *InitCst) {
+ const ASTContext &Context = CGM.getContext();
+
+ CharUnits FieldOffsetInChars = Context.toCharUnitsFromBits(FieldOffset);
+
+ AppendBytes(FieldOffsetInChars, InitCst);
+}
+
+void ConstStructBuilder::
+AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
+
+ assert(NextFieldOffsetInChars <= FieldOffsetInChars
+ && "Field offset mismatch!");
+
+ CharUnits FieldAlignment = getAlignment(InitCst);
+
+ // Round up the field offset to the alignment of the field type.
+ CharUnits AlignedNextFieldOffsetInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
+
+ if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
+ }
+
+ if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
+
+ assert(NextFieldOffsetInChars == FieldOffsetInChars &&
+ "Did not add enough padding!");
+
+ AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
+ }
+
+ // Add the field.
+ Elements.push_back(InitCst);
+ NextFieldOffsetInChars = AlignedNextFieldOffsetInChars +
+ getSizeInChars(InitCst);
+
+ if (Packed)
+ assert(LLVMStructAlignment == CharUnits::One() &&
+ "Packed struct not byte-aligned!");
+ else
+ LLVMStructAlignment = std::max(LLVMStructAlignment, FieldAlignment);
+}
+
+void ConstStructBuilder::AppendBitField(const FieldDecl *Field,
+ uint64_t FieldOffset,
+ llvm::ConstantInt *CI) {
+ const ASTContext &Context = CGM.getContext();
+ const uint64_t CharWidth = Context.getCharWidth();
+ uint64_t NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
+ if (FieldOffset > NextFieldOffsetInBits) {
+ // We need to add padding.
+ CharUnits PadSize = Context.toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(FieldOffset - NextFieldOffsetInBits,
+ Context.getTargetInfo().getCharAlign()));
+
+ AppendPadding(PadSize);
+ }
+
+ uint64_t FieldSize = Field->getBitWidthValue(Context);
+
+ llvm::APInt FieldValue = CI->getValue();
+
+ // Promote the size of FieldValue if necessary
+ // FIXME: This should never occur, but currently it can because initializer
+ // constants are cast to bool, and because clang is not enforcing bitfield
+ // width limits.
+ if (FieldSize > FieldValue.getBitWidth())
+ FieldValue = FieldValue.zext(FieldSize);
+
+ // Truncate the size of FieldValue to the bit field size.
+ if (FieldSize < FieldValue.getBitWidth())
+ FieldValue = FieldValue.trunc(FieldSize);
+
+ NextFieldOffsetInBits = Context.toBits(NextFieldOffsetInChars);
+ if (FieldOffset < NextFieldOffsetInBits) {
+ // Either part of the field or the entire field can go into the previous
+ // byte.
+ assert(!Elements.empty() && "Elements can't be empty!");
+
+ unsigned BitsInPreviousByte = NextFieldOffsetInBits - FieldOffset;
+
+ bool FitsCompletelyInPreviousByte =
+ BitsInPreviousByte >= FieldValue.getBitWidth();
+
+ llvm::APInt Tmp = FieldValue;
+
+ if (!FitsCompletelyInPreviousByte) {
+ unsigned NewFieldWidth = FieldSize - BitsInPreviousByte;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ Tmp = Tmp.lshr(NewFieldWidth);
+ Tmp = Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining high bits.
+ FieldValue = FieldValue.trunc(NewFieldWidth);
+ } else {
+ Tmp = Tmp.trunc(BitsInPreviousByte);
+
+ // We want the remaining low bits.
+ FieldValue = FieldValue.lshr(BitsInPreviousByte);
+ FieldValue = FieldValue.trunc(NewFieldWidth);
+ }
+ }
+
+ Tmp = Tmp.zext(CharWidth);
+ if (CGM.getTargetData().isBigEndian()) {
+ if (FitsCompletelyInPreviousByte)
+ Tmp = Tmp.shl(BitsInPreviousByte - FieldValue.getBitWidth());
+ } else {
+ Tmp = Tmp.shl(CharWidth - BitsInPreviousByte);
+ }
+
+ // 'or' in the bits that go into the previous byte.
+ llvm::Value *LastElt = Elements.back();
+ if (llvm::ConstantInt *Val = dyn_cast<llvm::ConstantInt>(LastElt))
+ Tmp |= Val->getValue();
+ else {
+ assert(isa<llvm::UndefValue>(LastElt));
+ // If there is an undef field that we're adding to, it can either be a
+ // scalar undef (in which case, we just replace it with our field) or it
+ // is an array. If it is an array, we have to pull one byte off the
+ // array so that the other undef bytes stay around.
+ if (!isa<llvm::IntegerType>(LastElt->getType())) {
+ // The undef padding will be a multibyte array, create a new smaller
+ // padding and then an hole for our i8 to get plopped into.
+ assert(isa<llvm::ArrayType>(LastElt->getType()) &&
+ "Expected array padding of undefs");
+ llvm::ArrayType *AT = cast<llvm::ArrayType>(LastElt->getType());
+ assert(AT->getElementType()->isIntegerTy(CharWidth) &&
+ AT->getNumElements() != 0 &&
+ "Expected non-empty array padding of undefs");
+
+ // Remove the padding array.
+ NextFieldOffsetInChars -= CharUnits::fromQuantity(AT->getNumElements());
+ Elements.pop_back();
+
+ // Add the padding back in two chunks.
+ AppendPadding(CharUnits::fromQuantity(AT->getNumElements()-1));
+ AppendPadding(CharUnits::One());
+ assert(isa<llvm::UndefValue>(Elements.back()) &&
+ Elements.back()->getType()->isIntegerTy(CharWidth) &&
+ "Padding addition didn't work right");
+ }
+ }
+
+ Elements.back() = llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp);
+
+ if (FitsCompletelyInPreviousByte)
+ return;
+ }
+
+ while (FieldValue.getBitWidth() > CharWidth) {
+ llvm::APInt Tmp;
+
+ if (CGM.getTargetData().isBigEndian()) {
+ // We want the high bits.
+ Tmp =
+ FieldValue.lshr(FieldValue.getBitWidth() - CharWidth).trunc(CharWidth);
+ } else {
+ // We want the low bits.
+ Tmp = FieldValue.trunc(CharWidth);
+
+ FieldValue = FieldValue.lshr(CharWidth);
+ }
+
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(), Tmp));
+ ++NextFieldOffsetInChars;
+
+ FieldValue = FieldValue.trunc(FieldValue.getBitWidth() - CharWidth);
+ }
+
+ assert(FieldValue.getBitWidth() > 0 &&
+ "Should have at least one bit left!");
+ assert(FieldValue.getBitWidth() <= CharWidth &&
+ "Should not have more than a byte left!");
+
+ if (FieldValue.getBitWidth() < CharWidth) {
+ if (CGM.getTargetData().isBigEndian()) {
+ unsigned BitWidth = FieldValue.getBitWidth();
+
+ FieldValue = FieldValue.zext(CharWidth) << (CharWidth - BitWidth);
+ } else
+ FieldValue = FieldValue.zext(CharWidth);
+ }
+
+ // Append the last element.
+ Elements.push_back(llvm::ConstantInt::get(CGM.getLLVMContext(),
+ FieldValue));
+ ++NextFieldOffsetInChars;
+}
+
+void ConstStructBuilder::AppendPadding(CharUnits PadSize) {
+ if (PadSize.isZero())
+ return;
+
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (PadSize > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, PadSize.getQuantity());
+
+ llvm::Constant *C = llvm::UndefValue::get(Ty);
+ Elements.push_back(C);
+ assert(getAlignment(C) == CharUnits::One() &&
+ "Padding must have 1 byte alignment!");
+
+ NextFieldOffsetInChars += getSizeInChars(C);
+}
+
+void ConstStructBuilder::AppendTailPadding(CharUnits RecordSize) {
+ assert(NextFieldOffsetInChars <= RecordSize &&
+ "Size mismatch!");
+
+ AppendPadding(RecordSize - NextFieldOffsetInChars);
+}
+
+void ConstStructBuilder::ConvertStructToPacked() {
+ SmallVector<llvm::Constant *, 16> PackedElements;
+ CharUnits ElementOffsetInChars = CharUnits::Zero();
+
+ for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
+ llvm::Constant *C = Elements[i];
+
+ CharUnits ElementAlign = CharUnits::fromQuantity(
+ CGM.getTargetData().getABITypeAlignment(C->getType()));
+ CharUnits AlignedElementOffsetInChars =
+ ElementOffsetInChars.RoundUpToAlignment(ElementAlign);
+
+ if (AlignedElementOffsetInChars > ElementOffsetInChars) {
+ // We need some padding.
+ CharUnits NumChars =
+ AlignedElementOffsetInChars - ElementOffsetInChars;
+
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (NumChars > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, NumChars.getQuantity());
+
+ llvm::Constant *Padding = llvm::UndefValue::get(Ty);
+ PackedElements.push_back(Padding);
+ ElementOffsetInChars += getSizeInChars(Padding);
+ }
+
+ PackedElements.push_back(C);
+ ElementOffsetInChars += getSizeInChars(C);
+ }
+
+ assert(ElementOffsetInChars == NextFieldOffsetInChars &&
+ "Packing the struct changed its size!");
+
+ Elements.swap(PackedElements);
+ LLVMStructAlignment = CharUnits::One();
+ Packed = true;
+}
+
+bool ConstStructBuilder::Build(InitListExpr *ILE) {
+ if (ILE->initializesStdInitializerList()) {
+ //CGM.ErrorUnsupported(ILE, "global std::initializer_list");
+ return false;
+ }
+
+ RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ unsigned FieldNo = 0;
+ unsigned ElementNo = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = (*Field);
+ }
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield()) {
+ LastFD = (*Field);
+ continue;
+ }
+
+ // Get the initializer. A struct can include fields without initializers,
+ // we just use explicit null values for them.
+ llvm::Constant *EltInit;
+ if (ElementNo < ILE->getNumInits())
+ EltInit = CGM.EmitConstantExpr(ILE->getInit(ElementNo++),
+ Field->getType(), CGF);
+ else
+ EltInit = CGM.EmitNullConstant(Field->getType());
+
+ if (!EltInit)
+ return false;
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ AppendField(*Field, Layout.getFieldOffset(FieldNo), EltInit);
+ } else {
+ // Otherwise we have a bitfield.
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo),
+ cast<llvm::ConstantInt>(EltInit));
+ }
+ }
+
+ return true;
+}
+
+namespace {
+struct BaseInfo {
+ BaseInfo(const CXXRecordDecl *Decl, CharUnits Offset, unsigned Index)
+ : Decl(Decl), Offset(Offset), Index(Index) {
+ }
+
+ const CXXRecordDecl *Decl;
+ CharUnits Offset;
+ unsigned Index;
+
+ bool operator<(const BaseInfo &O) const { return Offset < O.Offset; }
+};
+}
+
+void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
+ bool IsPrimaryBase, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ CharUnits Offset) {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
+ // Add a vtable pointer, if we need one and it hasn't already been added.
+ if (CD->isDynamicClass() && !IsPrimaryBase)
+ AppendVTablePointer(BaseSubobject(CD, Offset), VTable, VTableClass);
+
+ // Accumulate and sort bases, in order to visit them in address order, which
+ // may not be the same as declaration order.
+ llvm::SmallVector<BaseInfo, 8> Bases;
+ Bases.reserve(CD->getNumBases());
+ unsigned BaseNo = 0;
+ for (CXXRecordDecl::base_class_const_iterator Base = CD->bases_begin(),
+ BaseEnd = CD->bases_end(); Base != BaseEnd; ++Base, ++BaseNo) {
+ assert(!Base->isVirtual() && "should not have virtual bases here");
+ const CXXRecordDecl *BD = Base->getType()->getAsCXXRecordDecl();
+ CharUnits BaseOffset = Layout.getBaseClassOffset(BD);
+ Bases.push_back(BaseInfo(BD, BaseOffset, BaseNo));
+ }
+ std::stable_sort(Bases.begin(), Bases.end());
+
+ for (unsigned I = 0, N = Bases.size(); I != N; ++I) {
+ BaseInfo &Base = Bases[I];
+
+ bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
+ Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
+ VTable, VTableClass, Offset + Base.Offset);
+ }
+ }
+
+ unsigned FieldNo = 0;
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = RD->hasAttr<MsStructAttr>();
+ uint64_t OffsetBits = CGM.getContext().toBits(Offset);
+
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((*Field), LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = (*Field);
+ }
+
+ // If this is a union, skip all the fields that aren't being initialized.
+ if (RD->isUnion() && Val.getUnionField() != *Field)
+ continue;
+
+ // Don't emit anonymous bitfields, they just affect layout.
+ if (Field->isUnnamedBitfield()) {
+ LastFD = (*Field);
+ continue;
+ }
+
+ // Emit the value of the initializer.
+ const APValue &FieldValue =
+ RD->isUnion() ? Val.getUnionValue() : Val.getStructField(FieldNo);
+ llvm::Constant *EltInit =
+ CGM.EmitConstantValueForMemory(FieldValue, Field->getType(), CGF);
+ assert(EltInit && "EmitConstantValue can't fail");
+
+ if (!Field->isBitField()) {
+ // Handle non-bitfield members.
+ AppendField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits, EltInit);
+ } else {
+ // Otherwise we have a bitfield.
+ AppendBitField(*Field, Layout.getFieldOffset(FieldNo) + OffsetBits,
+ cast<llvm::ConstantInt>(EltInit));
+ }
+ }
+}
+
+llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
+ RecordDecl *RD = Ty->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ CharUnits LayoutSizeInChars = Layout.getSize();
+
+ if (NextFieldOffsetInChars > LayoutSizeInChars) {
+ // If the struct is bigger than the size of the record type,
+ // we must have a flexible array member at the end.
+ assert(RD->hasFlexibleArrayMember() &&
+ "Must have flexible array member if struct is bigger than type!");
+
+ // No tail padding is necessary.
+ } else {
+ // Append tail padding if necessary.
+ AppendTailPadding(LayoutSizeInChars);
+
+ CharUnits LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ // Check if we need to convert the struct to a packed struct.
+ if (NextFieldOffsetInChars <= LayoutSizeInChars &&
+ LLVMSizeInChars > LayoutSizeInChars) {
+ assert(!Packed && "Size mismatch!");
+
+ ConvertStructToPacked();
+ assert(NextFieldOffsetInChars <= LayoutSizeInChars &&
+ "Converting to packed did not help!");
+ }
+
+ assert(LayoutSizeInChars == NextFieldOffsetInChars &&
+ "Tail padding mismatch!");
+ }
+
+ // Pick the type to use. If the type is layout identical to the ConvertType
+ // type then use it, otherwise use whatever the builder produced for us.
+ llvm::StructType *STy =
+ llvm::ConstantStruct::getTypeForElements(CGM.getLLVMContext(),
+ Elements, Packed);
+ llvm::Type *ValTy = CGM.getTypes().ConvertType(Ty);
+ if (llvm::StructType *ValSTy = dyn_cast<llvm::StructType>(ValTy)) {
+ if (ValSTy->isLayoutIdentical(STy))
+ STy = ValSTy;
+ }
+
+ llvm::Constant *Result = llvm::ConstantStruct::get(STy, Elements);
+
+ assert(NextFieldOffsetInChars.RoundUpToAlignment(getAlignment(Result)) ==
+ getSizeInChars(Result) && "Size mismatch!");
+
+ return Result;
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ InitListExpr *ILE) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ if (!Builder.Build(ILE))
+ return 0;
+
+ return Builder.Finalize(ILE->getType());
+}
+
+llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
+ CodeGenFunction *CGF,
+ const APValue &Val,
+ QualType ValTy) {
+ ConstStructBuilder Builder(CGM, CGF);
+
+ const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
+ const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
+ llvm::Constant *VTable = 0;
+ if (CD && CD->isDynamicClass())
+ VTable = CGM.getVTables().GetAddrOfVTable(CD);
+
+ Builder.Build(Val, RD, false, VTable, CD, CharUnits::Zero());
+
+ return Builder.Finalize(ValTy);
+}
+
+
+//===----------------------------------------------------------------------===//
+// ConstExprEmitter
+//===----------------------------------------------------------------------===//
+
+/// This class only needs to handle two cases:
+/// 1) Literals (this is used by APValue emission to emit literals).
+/// 2) Arrays, structs and unions (outside C++11 mode, we don't currently
+/// constant fold these types).
+class ConstExprEmitter :
+ public StmtVisitor<ConstExprEmitter, llvm::Constant*> {
+ CodeGenModule &CGM;
+ CodeGenFunction *CGF;
+ llvm::LLVMContext &VMContext;
+public:
+ ConstExprEmitter(CodeGenModule &cgm, CodeGenFunction *cgf)
+ : CGM(cgm), CGF(cgf), VMContext(cgm.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ llvm::Constant *VisitStmt(Stmt *S) {
+ return 0;
+ }
+
+ llvm::Constant *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+
+ llvm::Constant *
+ VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
+ return Visit(PE->getReplacement());
+ }
+
+ llvm::Constant *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+
+ llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return Visit(E->getInitializer());
+ }
+
+ llvm::Constant *VisitCastExpr(CastExpr* E) {
+ Expr *subExpr = E->getSubExpr();
+ llvm::Constant *C = CGM.EmitConstantExpr(subExpr, subExpr->getType(), CGF);
+ if (!C) return 0;
+
+ llvm::Type *destType = ConvertType(E->getType());
+
+ switch (E->getCastKind()) {
+ case CK_ToUnion: {
+ // GCC cast to union extension
+ assert(E->getType()->isUnionType() &&
+ "Destination type is not union type!");
+
+ // Build a struct with the union sub-element as the first member,
+ // and padded to the appropriate size
+ SmallVector<llvm::Constant*, 2> Elts;
+ SmallVector<llvm::Type*, 2> Types;
+ Elts.push_back(C);
+ Types.push_back(C->getType());
+ unsigned CurSize = CGM.getTargetData().getTypeAllocSize(C->getType());
+ unsigned TotalSize = CGM.getTargetData().getTypeAllocSize(destType);
+
+ assert(CurSize <= TotalSize && "Union size mismatch!");
+ if (unsigned NumPadBytes = TotalSize - CurSize) {
+ llvm::Type *Ty = CGM.Int8Ty;
+ if (NumPadBytes > 1)
+ Ty = llvm::ArrayType::get(Ty, NumPadBytes);
+
+ Elts.push_back(llvm::UndefValue::get(Ty));
+ Types.push_back(Ty);
+ }
+
+ llvm::StructType* STy =
+ llvm::StructType::get(C->getType()->getContext(), Types, false);
+ return llvm::ConstantStruct::get(STy, Elts);
+ }
+
+ case CK_LValueToRValue:
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ return C;
+
+ case CK_Dependent: llvm_unreachable("saw dependent cast!");
+
+ case CK_ReinterpretMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ return CGM.getCXXABI().EmitMemberPointerConversion(E, C);
+
+ // These will never be supported.
+ case CK_ObjCObjectLValueCast:
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ return 0;
+
+ // These don't need to be handled here because Evaluate knows how to
+ // evaluate them in the cases where they can be folded.
+ case CK_BitCast:
+ case CK_ToVoid:
+ case CK_Dynamic:
+ case CK_LValueBitCast:
+ case CK_NullToMemberPointer:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_MemberPointerToBoolean:
+ case CK_VectorSplat:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_NullToPointer:
+ case CK_IntegralCast:
+ case CK_IntegralToPointer:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ return 0;
+ }
+ llvm_unreachable("Invalid CastKind");
+ }
+
+ llvm::Constant *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+
+ llvm::Constant *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ return Visit(E->GetTemporaryExpr());
+ }
+
+ llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
+ unsigned NumInitElements = ILE->getNumInits();
+ if (NumInitElements == 1 &&
+ CGM.getContext().hasSameUnqualifiedType(ILE->getType(),
+ ILE->getInit(0)->getType()) &&
+ (isa<StringLiteral>(ILE->getInit(0)) ||
+ isa<ObjCEncodeExpr>(ILE->getInit(0))))
+ return Visit(ILE->getInit(0));
+
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(ConvertType(ILE->getType()));
+ llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
+
+ // Initialising an array requires us to automatically
+ // initialise any elements that have not been initialised explicitly
+ unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+
+ // Copy initializer elements.
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumInitableElts + NumElements);
+
+ bool RewriteType = false;
+ for (unsigned i = 0; i < NumInitableElts; ++i) {
+ Expr *Init = ILE->getInit(i);
+ llvm::Constant *C = CGM.EmitConstantExpr(Init, Init->getType(), CGF);
+ if (!C)
+ return 0;
+ RewriteType |= (C->getType() != ElemTy);
+ Elts.push_back(C);
+ }
+
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ llvm::Constant *fillC;
+ if (Expr *filler = ILE->getArrayFiller())
+ fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
+ else
+ fillC = llvm::Constant::getNullValue(ElemTy);
+ if (!fillC)
+ return 0;
+ RewriteType |= (fillC->getType() != ElemTy);
+ Elts.resize(NumElements, fillC);
+
+ if (RewriteType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<llvm::Type*> Types;
+ Types.reserve(NumInitableElts + NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(AType->getContext(),
+ Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+
+ llvm::Constant *EmitStructInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *EmitUnionInitialization(InitListExpr *ILE) {
+ return ConstStructBuilder::BuildStruct(CGM, CGF, ILE);
+ }
+
+ llvm::Constant *VisitImplicitValueInitExpr(ImplicitValueInitExpr* E) {
+ return CGM.EmitNullConstant(E->getType());
+ }
+
+ llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->getType()->isArrayType())
+ return EmitArrayInitialization(ILE);
+
+ if (ILE->getType()->isRecordType())
+ return EmitStructInitialization(ILE);
+
+ if (ILE->getType()->isUnionType())
+ return EmitUnionInitialization(ILE);
+
+ return 0;
+ }
+
+ llvm::Constant *VisitCXXConstructExpr(CXXConstructExpr *E) {
+ if (!E->getConstructor()->isTrivial())
+ return 0;
+
+ QualType Ty = E->getType();
+
+ // FIXME: We should not have to call getBaseElementType here.
+ const RecordType *RT =
+ CGM.getContext().getBaseElementType(Ty)->getAs<RecordType>();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // If the class doesn't have a trivial destructor, we can't emit it as a
+ // constant expr.
+ if (!RD->hasTrivialDestructor())
+ return 0;
+
+ // Only copy and default constructors can be trivial.
+
+
+ if (E->getNumArgs()) {
+ assert(E->getNumArgs() == 1 && "trivial ctor with > 1 argument");
+ assert(E->getConstructor()->isCopyOrMoveConstructor() &&
+ "trivial ctor has argument but isn't a copy/move ctor");
+
+ Expr *Arg = E->getArg(0);
+ assert(CGM.getContext().hasSameUnqualifiedType(Ty, Arg->getType()) &&
+ "argument to copy ctor is of wrong type");
+
+ return Visit(Arg);
+ }
+
+ return CGM.EmitNullConstant(Ty);
+ }
+
+ llvm::Constant *VisitStringLiteral(StringLiteral *E) {
+ return CGM.GetConstantArrayFromStringLiteral(E);
+ }
+
+ llvm::Constant *VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ // This must be an @encode initializing an array in a static initializer.
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ std::string Str;
+ CGM.getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(E->getType());
+
+ // Resize the string to the right size, adding zeros at the end, or
+ // truncating as needed.
+ Str.resize(CAT->getSize().getZExtValue(), '\0');
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
+ }
+
+ llvm::Constant *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // Utility methods
+ llvm::Type *ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+ }
+
+public:
+ llvm::Constant *EmitLValue(APValue::LValueBase LVBase) {
+ if (const ValueDecl *Decl = LVBase.dyn_cast<const ValueDecl*>()) {
+ if (Decl->hasAttr<WeakRefAttr>())
+ return CGM.GetWeakRefReference(Decl);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Decl))
+ return CGM.GetAddrOfFunction(FD);
+ if (const VarDecl* VD = dyn_cast<VarDecl>(Decl)) {
+ // We can never refer to a variable with local storage.
+ if (!VD->hasLocalStorage()) {
+ if (VD->isFileVarDecl() || VD->hasExternalStorage())
+ return CGM.GetAddrOfGlobalVar(VD);
+ else if (VD->isLocalVarDecl()) {
+ assert(CGF && "Can't access static local vars without CGF");
+ return CGF->GetAddrOfStaticLocalVar(VD);
+ }
+ }
+ }
+ return 0;
+ }
+
+ Expr *E = const_cast<Expr*>(LVBase.get<const Expr*>());
+ switch (E->getStmtClass()) {
+ default: break;
+ case Expr::CompoundLiteralExprClass: {
+ // Note that due to the nature of compound literals, this is guaranteed
+ // to be the only use of the variable, so we just generate it here.
+ CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
+ CLE->getType(), CGF);
+ // FIXME: "Leaked" on failure.
+ if (C)
+ C = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
+ E->getType().isConstant(CGM.getContext()),
+ llvm::GlobalValue::InternalLinkage,
+ C, ".compoundliteral", 0, false,
+ CGM.getContext().getTargetAddressSpace(E->getType()));
+ return C;
+ }
+ case Expr::StringLiteralClass:
+ return CGM.GetAddrOfConstantStringFromLiteral(cast<StringLiteral>(E));
+ case Expr::ObjCEncodeExprClass:
+ return CGM.GetAddrOfConstantStringFromObjCEncode(cast<ObjCEncodeExpr>(E));
+ case Expr::ObjCStringLiteralClass: {
+ ObjCStringLiteral* SL = cast<ObjCStringLiteral>(E);
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(SL->getString());
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+ }
+ case Expr::PredefinedExprClass: {
+ unsigned Type = cast<PredefinedExpr>(E)->getIdentType();
+ if (CGF) {
+ LValue Res = CGF->EmitPredefinedLValue(cast<PredefinedExpr>(E));
+ return cast<llvm::Constant>(Res.getAddress());
+ } else if (Type == PredefinedExpr::PrettyFunction) {
+ return CGM.GetAddrOfConstantCString("top level", ".tmp");
+ }
+
+ return CGM.GetAddrOfConstantCString("", ".tmp");
+ }
+ case Expr::AddrLabelExprClass: {
+ assert(CGF && "Invalid address of label expression outside function.");
+ llvm::Constant *Ptr =
+ CGF->GetAddrOfLabel(cast<AddrLabelExpr>(E)->getLabel());
+ return llvm::ConstantExpr::getBitCast(Ptr, ConvertType(E->getType()));
+ }
+ case Expr::CallExprClass: {
+ CallExpr* CE = cast<CallExpr>(E);
+ unsigned builtin = CE->isBuiltinCall();
+ if (builtin !=
+ Builtin::BI__builtin___CFStringMakeConstantString &&
+ builtin !=
+ Builtin::BI__builtin___NSStringMakeConstantString)
+ break;
+ const Expr *Arg = CE->getArg(0)->IgnoreParenCasts();
+ const StringLiteral *Literal = cast<StringLiteral>(Arg);
+ if (builtin ==
+ Builtin::BI__builtin___NSStringMakeConstantString) {
+ return CGM.getObjCRuntime().GenerateConstantString(Literal);
+ }
+ // FIXME: need to deal with UCN conversion issues.
+ return CGM.GetAddrOfConstantCFString(Literal);
+ }
+ case Expr::BlockExprClass: {
+ std::string FunctionName;
+ if (CGF)
+ FunctionName = CGF->CurFn->getName();
+ else
+ FunctionName = "global";
+
+ return CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ }
+ case Expr::CXXTypeidExprClass: {
+ CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
+ QualType T;
+ if (Typeid->isTypeOperand())
+ T = Typeid->getTypeOperand();
+ else
+ T = Typeid->getExprOperand()->getType();
+ return CGM.GetAddrOfRTTIDescriptor(T);
+ }
+ }
+
+ return 0;
+ }
+};
+
+} // end anonymous namespace.
+
+llvm::Constant *CodeGenModule::EmitConstantInit(const VarDecl &D,
+ CodeGenFunction *CGF) {
+ if (const APValue *Value = D.evaluateValue())
+ return EmitConstantValueForMemory(*Value, D.getType(), CGF);
+
+ // FIXME: Implement C++11 [basic.start.init]p2: if the initializer of a
+ // reference is a constant expression, and the reference binds to a temporary,
+ // then constant initialization is performed. ConstExprEmitter will
+ // incorrectly emit a prvalue constant in this case, and the calling code
+ // interprets that as the (pointer) value of the reference, rather than the
+ // desired value of the referee.
+ if (D.getType()->isReferenceType())
+ return 0;
+
+ const Expr *E = D.getInit();
+ assert(E && "No initializer to emit");
+
+ llvm::Constant* C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ Expr::EvalResult Result;
+
+ bool Success = false;
+
+ if (DestType->isReferenceType())
+ Success = E->EvaluateAsLValue(Result, Context);
+ else
+ Success = E->EvaluateAsRValue(Result, Context);
+
+ llvm::Constant *C = 0;
+ if (Success && !Result.HasSideEffects)
+ C = EmitConstantValue(Result.Val, DestType, CGF);
+ else
+ C = ConstExprEmitter(*this, CGF).Visit(const_cast<Expr*>(E));
+
+ if (C && C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(E->getType());
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ switch (Value.getKind()) {
+ case APValue::Uninitialized:
+ llvm_unreachable("Constant expressions should be initialized.");
+ case APValue::LValue: {
+ llvm::Type *DestTy = getTypes().ConvertTypeForMem(DestType);
+ llvm::Constant *Offset =
+ llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
+
+ llvm::Constant *C;
+ if (APValue::LValueBase LVBase = Value.getLValueBase()) {
+ // An array can be represented as an lvalue referring to the base.
+ if (isa<llvm::ArrayType>(DestTy)) {
+ assert(Offset->isNullValue() && "offset on array initializer");
+ return ConstExprEmitter(*this, CGF).Visit(
+ const_cast<Expr*>(LVBase.get<const Expr*>()));
+ }
+
+ C = ConstExprEmitter(*this, CGF).EmitLValue(LVBase);
+
+ // Apply offset if necessary.
+ if (!Offset->isNullValue()) {
+ llvm::Constant *Casted = llvm::ConstantExpr::getBitCast(C, Int8PtrTy);
+ Casted = llvm::ConstantExpr::getGetElementPtr(Casted, Offset);
+ C = llvm::ConstantExpr::getBitCast(Casted, C->getType());
+ }
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getBitCast(C, DestTy);
+
+ return llvm::ConstantExpr::getPtrToInt(C, DestTy);
+ } else {
+ C = Offset;
+
+ // Convert to the appropriate type; this could be an lvalue for
+ // an integer.
+ if (isa<llvm::PointerType>(DestTy))
+ return llvm::ConstantExpr::getIntToPtr(C, DestTy);
+
+ // If the types don't match this should only be a truncate.
+ if (C->getType() != DestTy)
+ return llvm::ConstantExpr::getTrunc(C, DestTy);
+
+ return C;
+ }
+ }
+ case APValue::Int:
+ return llvm::ConstantInt::get(VMContext, Value.getInt());
+ case APValue::ComplexInt: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntReal());
+ Complex[1] = llvm::ConstantInt::get(VMContext,
+ Value.getComplexIntImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Float: {
+ const llvm::APFloat &Init = Value.getFloat();
+ if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf)
+ return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
+ else
+ return llvm::ConstantFP::get(VMContext, Init);
+ }
+ case APValue::ComplexFloat: {
+ llvm::Constant *Complex[2];
+
+ Complex[0] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatReal());
+ Complex[1] = llvm::ConstantFP::get(VMContext,
+ Value.getComplexFloatImag());
+
+ // FIXME: the target may want to specify that this is packed.
+ llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
+ Complex[1]->getType(),
+ NULL);
+ return llvm::ConstantStruct::get(STy, Complex);
+ }
+ case APValue::Vector: {
+ SmallVector<llvm::Constant *, 4> Inits;
+ unsigned NumElts = Value.getVectorLength();
+
+ for (unsigned i = 0; i != NumElts; ++i) {
+ const APValue &Elt = Value.getVectorElt(i);
+ if (Elt.isInt())
+ Inits.push_back(llvm::ConstantInt::get(VMContext, Elt.getInt()));
+ else
+ Inits.push_back(llvm::ConstantFP::get(VMContext, Elt.getFloat()));
+ }
+ return llvm::ConstantVector::get(Inits);
+ }
+ case APValue::AddrLabelDiff: {
+ const AddrLabelExpr *LHSExpr = Value.getAddrLabelDiffLHS();
+ const AddrLabelExpr *RHSExpr = Value.getAddrLabelDiffRHS();
+ llvm::Constant *LHS = EmitConstantExpr(LHSExpr, LHSExpr->getType(), CGF);
+ llvm::Constant *RHS = EmitConstantExpr(RHSExpr, RHSExpr->getType(), CGF);
+
+ // Compute difference
+ llvm::Type *ResultType = getTypes().ConvertType(DestType);
+ LHS = llvm::ConstantExpr::getPtrToInt(LHS, IntPtrTy);
+ RHS = llvm::ConstantExpr::getPtrToInt(RHS, IntPtrTy);
+ llvm::Constant *AddrLabelDiff = llvm::ConstantExpr::getSub(LHS, RHS);
+
+ // LLVM is a bit sensitive about the exact format of the
+ // address-of-label difference; make sure to truncate after
+ // the subtraction.
+ return llvm::ConstantExpr::getTruncOrBitCast(AddrLabelDiff, ResultType);
+ }
+ case APValue::Struct:
+ case APValue::Union:
+ return ConstStructBuilder::BuildStruct(*this, CGF, Value, DestType);
+ case APValue::Array: {
+ const ArrayType *CAT = Context.getAsArrayType(DestType);
+ unsigned NumElements = Value.getArraySize();
+ unsigned NumInitElts = Value.getArrayInitializedElts();
+
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumElements);
+
+ // Emit array filler, if there is one.
+ llvm::Constant *Filler = 0;
+ if (Value.hasArrayFiller())
+ Filler = EmitConstantValueForMemory(Value.getArrayFiller(),
+ CAT->getElementType(), CGF);
+
+ // Emit initializer elements.
+ llvm::Type *CommonElementType = 0;
+ for (unsigned I = 0; I < NumElements; ++I) {
+ llvm::Constant *C = Filler;
+ if (I < NumInitElts)
+ C = EmitConstantValueForMemory(Value.getArrayInitializedElt(I),
+ CAT->getElementType(), CGF);
+ if (I == 0)
+ CommonElementType = C->getType();
+ else if (C->getType() != CommonElementType)
+ CommonElementType = 0;
+ Elts.push_back(C);
+ }
+
+ if (!CommonElementType) {
+ // FIXME: Try to avoid packing the array
+ std::vector<llvm::Type*> Types;
+ Types.reserve(NumElements);
+ for (unsigned i = 0, e = Elts.size(); i < e; ++i)
+ Types.push_back(Elts[i]->getType());
+ llvm::StructType *SType = llvm::StructType::get(VMContext, Types, true);
+ return llvm::ConstantStruct::get(SType, Elts);
+ }
+
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(CommonElementType, NumElements);
+ return llvm::ConstantArray::get(AType, Elts);
+ }
+ case APValue::MemberPointer:
+ return getCXXABI().EmitMemberPointer(Value, DestType);
+ }
+ llvm_unreachable("Unknown APValue kind");
+}
+
+llvm::Constant *
+CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF) {
+ llvm::Constant *C = EmitConstantValue(Value, DestType, CGF);
+ if (C->getType()->isIntegerTy(1)) {
+ llvm::Type *BoolTy = getTypes().ConvertTypeForMem(DestType);
+ C = llvm::ConstantExpr::getZExt(C, BoolTy);
+ }
+ return C;
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
+ assert(E->isFileScope() && "not a file-scope compound literal expr");
+ return ConstExprEmitter(*this, 0).EmitLValue(E);
+}
+
+llvm::Constant *
+CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
+ // Member pointer constants always have a very particular form.
+ const MemberPointerType *type = cast<MemberPointerType>(uo->getType());
+ const ValueDecl *decl = cast<DeclRefExpr>(uo->getSubExpr())->getDecl();
+
+ // A member function pointer.
+ if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(decl))
+ return getCXXABI().EmitMemberPointer(method);
+
+ // Otherwise, a member data pointer.
+ uint64_t fieldOffset = getContext().getFieldOffset(decl);
+ CharUnits chars = getContext().toCharUnitsFromBits((int64_t) fieldOffset);
+ return getCXXABI().EmitMemberDataPointer(type, chars);
+}
+
+static void
+FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
+ SmallVectorImpl<llvm::Constant *> &Elements,
+ uint64_t StartOffset) {
+ assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
+ "StartOffset not byte aligned!");
+
+ if (CGM.getTypes().isZeroInitializable(T))
+ return;
+
+ if (const ConstantArrayType *CAT =
+ CGM.getContext().getAsConstantArrayType(T)) {
+ QualType ElementTy = CAT->getElementType();
+ uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
+
+ for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
+ FillInNullDataMemberPointers(CGM, ElementTy, Elements,
+ StartOffset + I * ElementSize);
+ }
+ } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ // Go through all bases and fill in any null pointer to data members.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual()) {
+ // Ignore virtual bases.
+ continue;
+ }
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (BaseDecl->isEmpty())
+ continue;
+
+ // Ignore bases that don't have any pointer to data members.
+ if (CGM.getTypes().isZeroInitializable(BaseDecl))
+ continue;
+
+ uint64_t BaseOffset = Layout.getBaseClassOffsetInBits(BaseDecl);
+ FillInNullDataMemberPointers(CGM, I->getType(),
+ Elements, StartOffset + BaseOffset);
+ }
+
+ // Visit all fields.
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++FieldNo) {
+ QualType FieldType = I->getType();
+
+ if (CGM.getTypes().isZeroInitializable(FieldType))
+ continue;
+
+ uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
+ FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
+ }
+ } else {
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
+
+ CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
+ CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
+
+ // FIXME: hardcodes Itanium member pointer representation!
+ llvm::Constant *NegativeOne =
+ llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
+
+ // Fill in the null data member pointer.
+ for (CharUnits I = StartIndex; I != EndIndex; ++I)
+ Elements[I.getQuantity()] = NegativeOne;
+ }
+}
+
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ llvm::Type *baseType,
+ const CXXRecordDecl *base);
+
+static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
+ const CXXRecordDecl *record,
+ bool asCompleteObject) {
+ const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
+ llvm::StructType *structure =
+ (asCompleteObject ? layout.getLLVMType()
+ : layout.getBaseSubobjectLLVMType());
+
+ unsigned numElements = structure->getNumElements();
+ std::vector<llvm::Constant *> elements(numElements);
+
+ // Fill in all the bases.
+ for (CXXRecordDecl::base_class_const_iterator
+ I = record->bases_begin(), E = record->bases_end(); I != E; ++I) {
+ if (I->isVirtual()) {
+ // Ignore virtual bases; if we're laying out for a complete
+ // object, we'll lay these out later.
+ continue;
+ }
+
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+
+ // Fill in all the fields.
+ for (RecordDecl::field_iterator I = record->field_begin(),
+ E = record->field_end(); I != E; ++I) {
+ const FieldDecl *field = *I;
+
+ // Fill in non-bitfields. (Bitfields always use a zero pattern, which we
+ // will fill in later.)
+ if (!field->isBitField()) {
+ unsigned fieldIndex = layout.getLLVMFieldNo(field);
+ elements[fieldIndex] = CGM.EmitNullConstant(field->getType());
+ }
+
+ // For unions, stop after the first named field.
+ if (record->isUnion() && field->getDeclName())
+ break;
+ }
+
+ // Fill in the virtual bases, if we're working with the complete object.
+ if (asCompleteObject) {
+ for (CXXRecordDecl::base_class_const_iterator
+ I = record->vbases_begin(), E = record->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (base->isEmpty())
+ continue;
+
+ unsigned fieldIndex = layout.getVirtualBaseIndex(base);
+
+ // We might have already laid this field out.
+ if (elements[fieldIndex]) continue;
+
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
+ }
+
+ // Now go through all other fields and zero them out.
+ for (unsigned i = 0; i != numElements; ++i) {
+ if (!elements[i])
+ elements[i] = llvm::Constant::getNullValue(structure->getElementType(i));
+ }
+
+ return llvm::ConstantStruct::get(structure, elements);
+}
+
+/// Emit the null constant for a base subobject.
+static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
+ llvm::Type *baseType,
+ const CXXRecordDecl *base) {
+ const CGRecordLayout &baseLayout = CGM.getTypes().getCGRecordLayout(base);
+
+ // Just zero out bases that don't have any pointer to data members.
+ if (baseLayout.isZeroInitializableAsBase())
+ return llvm::Constant::getNullValue(baseType);
+
+ // If the base type is a struct, we can just use its null constant.
+ if (isa<llvm::StructType>(baseType)) {
+ return EmitNullConstant(CGM, base, /*complete*/ false);
+ }
+
+ // Otherwise, some bases are represented as arrays of i8 if the size
+ // of the base is smaller than its corresponding LLVM type. Figure
+ // out how many elements this base array has.
+ llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
+ unsigned numBaseElements = baseArrayType->getNumElements();
+
+ // Fill in null data member pointers.
+ SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
+ FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
+ baseElements, 0);
+
+ // Now go through all other elements and zero them out.
+ if (numBaseElements) {
+ llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
+ for (unsigned i = 0; i != numBaseElements; ++i) {
+ if (!baseElements[i])
+ baseElements[i] = i8_zero;
+ }
+ }
+
+ return llvm::ConstantArray::get(baseArrayType, baseElements);
+}
+
+llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ if (getTypes().isZeroInitializable(T))
+ return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
+
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
+ llvm::ArrayType *ATy =
+ cast<llvm::ArrayType>(getTypes().ConvertTypeForMem(T));
+
+ QualType ElementTy = CAT->getElementType();
+
+ llvm::Constant *Element = EmitNullConstant(ElementTy);
+ unsigned NumElements = CAT->getSize().getZExtValue();
+
+ if (Element->isNullValue())
+ return llvm::ConstantAggregateZero::get(ATy);
+
+ SmallVector<llvm::Constant *, 8> Array(NumElements, Element);
+ return llvm::ConstantArray::get(ATy, Array);
+ }
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return ::EmitNullConstant(*this, RD, /*complete object*/ true);
+ }
+
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
+}
+
+llvm::Constant *
+CodeGenModule::EmitNullConstantForBase(const CXXRecordDecl *Record) {
+ return ::EmitNullConstant(*this, Record, false);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
new file mode 100644
index 0000000..18891f7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGExprScalar.cpp
@@ -0,0 +1,2857 @@
+//===--- CGExprScalar.cpp - Emit LLVM Code for Scalar Exprs ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Expr nodes with scalar LLVM types as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CodeGenOptions.h"
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/GlobalVariable.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace CodeGen;
+using llvm::Value;
+
+//===----------------------------------------------------------------------===//
+// Scalar Expression Emitter
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct BinOpInfo {
+ Value *LHS;
+ Value *RHS;
+ QualType Ty; // Computation Type.
+ BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
+ const Expr *E; // Entire expr, for error unsupported. May not be binop.
+};
+
+static bool MustVisitNullValue(const Expr *E) {
+ // If a null pointer expression's type is the C++0x nullptr_t, then
+ // it's not necessarily a simple constant and it must be evaluated
+ // for its potential side effects.
+ return E->getType()->isNullPtrType();
+}
+
+class ScalarExprEmitter
+ : public StmtVisitor<ScalarExprEmitter, Value*> {
+ CodeGenFunction &CGF;
+ CGBuilderTy &Builder;
+ bool IgnoreResultAssign;
+ llvm::LLVMContext &VMContext;
+public:
+
+ ScalarExprEmitter(CodeGenFunction &cgf, bool ira=false)
+ : CGF(cgf), Builder(CGF.Builder), IgnoreResultAssign(ira),
+ VMContext(cgf.getLLVMContext()) {
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Utilities
+ //===--------------------------------------------------------------------===//
+
+ bool TestAndClearIgnoreResultAssign() {
+ bool I = IgnoreResultAssign;
+ IgnoreResultAssign = false;
+ return I;
+ }
+
+ llvm::Type *ConvertType(QualType T) { return CGF.ConvertType(T); }
+ LValue EmitLValue(const Expr *E) { return CGF.EmitLValue(E); }
+ LValue EmitCheckedLValue(const Expr *E) { return CGF.EmitCheckedLValue(E); }
+
+ Value *EmitLoadOfLValue(LValue LV) {
+ return CGF.EmitLoadOfLValue(LV).getScalarVal();
+ }
+
+ /// EmitLoadOfLValue - Given an expression with complex type that represents a
+ /// value l-value, this method emits the address of the l-value, then loads
+ /// and returns the result.
+ Value *EmitLoadOfLValue(const Expr *E) {
+ return EmitLoadOfLValue(EmitCheckedLValue(E));
+ }
+
+ /// EmitConversionToBool - Convert the specified expression value to a
+ /// boolean (i1) truth value. This is equivalent to "Val != 0".
+ Value *EmitConversionToBool(Value *Src, QualType DstTy);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ Value *EmitScalarConversion(Value *Src, QualType SrcTy, QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy);
+
+ /// EmitNullValue - Emit a value that corresponds to null for the given type.
+ Value *EmitNullValue(QualType Ty);
+
+ /// EmitFloatToBoolConversion - Perform an FP to boolean conversion.
+ Value *EmitFloatToBoolConversion(Value *V) {
+ // Compare against 0.0 for fp scalars.
+ llvm::Value *Zero = llvm::Constant::getNullValue(V->getType());
+ return Builder.CreateFCmpUNE(V, Zero, "tobool");
+ }
+
+ /// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
+ Value *EmitPointerToBoolConversion(Value *V) {
+ Value *Zero = llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(V->getType()));
+ return Builder.CreateICmpNE(V, Zero, "tobool");
+ }
+
+ Value *EmitIntToBoolConversion(Value *V) {
+ // Because of the type rules of C, we often end up computing a
+ // logical value, then zero extending it to int, then wanting it
+ // as a logical value again. Optimize this common case.
+ if (llvm::ZExtInst *ZI = dyn_cast<llvm::ZExtInst>(V)) {
+ if (ZI->getOperand(0)->getType() == Builder.getInt1Ty()) {
+ Value *Result = ZI->getOperand(0);
+ // If there aren't any more uses, zap the instruction to save space.
+ // Note that there can be more uses, for example if this
+ // is the result of an assignment.
+ if (ZI->use_empty())
+ ZI->eraseFromParent();
+ return Result;
+ }
+ }
+
+ return Builder.CreateIsNotNull(V, "tobool");
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Visitor Methods
+ //===--------------------------------------------------------------------===//
+
+ Value *Visit(Expr *E) {
+ return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
+ }
+
+ Value *VisitStmt(Stmt *S) {
+ S->dump(CGF.getContext().getSourceManager());
+ llvm_unreachable("Stmt can't have complex result type!");
+ }
+ Value *VisitExpr(Expr *S);
+
+ Value *VisitParenExpr(ParenExpr *PE) {
+ return Visit(PE->getSubExpr());
+ }
+ Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
+ return Visit(E->getReplacement());
+ }
+ Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
+ return Visit(GE->getResultExpr());
+ }
+
+ // Leaves.
+ Value *VisitIntegerLiteral(const IntegerLiteral *E) {
+ return Builder.getInt(E->getValue());
+ }
+ Value *VisitFloatingLiteral(const FloatingLiteral *E) {
+ return llvm::ConstantFP::get(VMContext, E->getValue());
+ }
+ Value *VisitCharacterLiteral(const CharacterLiteral *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitObjCBoolLiteralExpr(const ObjCBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXBoolLiteralExpr(const CXXBoolLiteralExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+ Value *VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitGNUNullExpr(const GNUNullExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+ Value *VisitOffsetOfExpr(OffsetOfExpr *E);
+ Value *VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E);
+ Value *VisitAddrLabelExpr(const AddrLabelExpr *E) {
+ llvm::Value *V = CGF.GetAddrOfLabel(E->getLabel());
+ return Builder.CreateBitCast(V, ConvertType(E->getType()));
+ }
+
+ Value *VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()),E->getPackLength());
+ }
+
+ Value *VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ return CGF.EmitPseudoObjectRValue(E).getScalarVal();
+ }
+
+ Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ if (E->isGLValue())
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
+
+ // Otherwise, assume the mapping is the scalar directly.
+ return CGF.getOpaqueRValueMapping(E).getScalarVal();
+ }
+
+ // l-values.
+ Value *VisitDeclRefExpr(DeclRefExpr *E) {
+ if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
+ if (result.isReference())
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+ return result.getValue();
+ }
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return CGF.EmitObjCSelectorExpr(E);
+ }
+ Value *VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return CGF.EmitObjCProtocolExpr(E);
+ }
+ Value *VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ if (E->getMethodDecl() &&
+ E->getMethodDecl()->getResultType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+ return CGF.EmitObjCMessageExpr(E).getScalarVal();
+ }
+
+ Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ LValue LV = CGF.EmitObjCIsaExpr(E);
+ Value *V = CGF.EmitLoadOfLValue(LV).getScalarVal();
+ return V;
+ }
+
+ Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ Value *VisitMemberExpr(MemberExpr *E);
+ Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitInitListExpr(InitListExpr *E);
+
+ Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
+ return CGF.CGM.EmitNullConstant(E->getType());
+ }
+ Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ if (E->getType()->isVariablyModifiedType())
+ CGF.EmitVariablyModifiedType(E->getType());
+ return VisitCastExpr(E);
+ }
+ Value *VisitCastExpr(CastExpr *E);
+
+ Value *VisitCallExpr(const CallExpr *E) {
+ if (E->getCallReturnType()->isReferenceType())
+ return EmitLoadOfLValue(E);
+
+ return CGF.EmitCallExpr(E).getScalarVal();
+ }
+
+ Value *VisitStmtExpr(const StmtExpr *E);
+
+ // Unary Operators.
+ Value *VisitUnaryPostDec(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, false);
+ }
+ Value *VisitUnaryPostInc(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, false);
+ }
+ Value *VisitUnaryPreDec(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, false, true);
+ }
+ Value *VisitUnaryPreInc(const UnaryOperator *E) {
+ LValue LV = EmitLValue(E->getSubExpr());
+ return EmitScalarPrePostIncDec(E, LV, true, true);
+ }
+
+ llvm::Value *EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ llvm::Value *NextVal,
+ bool IsInc);
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+
+
+ Value *VisitUnaryAddrOf(const UnaryOperator *E) {
+ if (isa<MemberPointerType>(E->getType())) // never sugared
+ return CGF.CGM.getMemberPointerConstant(E);
+
+ return EmitLValue(E->getSubExpr()).getAddress();
+ }
+ Value *VisitUnaryDeref(const UnaryOperator *E) {
+ if (E->getType()->isVoidType())
+ return Visit(E->getSubExpr()); // the actual value should be unused
+ return EmitLoadOfLValue(E);
+ }
+ Value *VisitUnaryPlus(const UnaryOperator *E) {
+ // This differs from gcc, though, most likely due to a bug in gcc.
+ TestAndClearIgnoreResultAssign();
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitUnaryMinus (const UnaryOperator *E);
+ Value *VisitUnaryNot (const UnaryOperator *E);
+ Value *VisitUnaryLNot (const UnaryOperator *E);
+ Value *VisitUnaryReal (const UnaryOperator *E);
+ Value *VisitUnaryImag (const UnaryOperator *E);
+ Value *VisitUnaryExtension(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ // C++
+ Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
+ return EmitLoadOfLValue(E);
+ }
+
+ Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
+ return Visit(DAE->getExpr());
+ }
+ Value *VisitCXXThisExpr(CXXThisExpr *TE) {
+ return CGF.LoadCXXThis();
+ }
+
+ Value *VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ return Visit(E->getSubExpr());
+ }
+ Value *VisitCXXNewExpr(const CXXNewExpr *E) {
+ return CGF.EmitCXXNewExpr(E);
+ }
+ Value *VisitCXXDeleteExpr(const CXXDeleteExpr *E) {
+ CGF.EmitCXXDeleteExpr(E);
+ return 0;
+ }
+ Value *VisitUnaryTypeTraitExpr(const UnaryTypeTraitExpr *E) {
+ return Builder.getInt1(E->getValue());
+ }
+
+ Value *VisitBinaryTypeTraitExpr(const BinaryTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
+ }
+
+ Value *VisitArrayTypeTraitExpr(const ArrayTypeTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt32Ty(), E->getValue());
+ }
+
+ Value *VisitExpressionTraitExpr(const ExpressionTraitExpr *E) {
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), E->getValue());
+ }
+
+ Value *VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *E) {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ CGF.EmitScalarExpr(E->getBase());
+ return 0;
+ }
+
+ Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
+ return EmitNullValue(E->getType());
+ }
+
+ Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
+ CGF.EmitCXXThrowExpr(E);
+ return 0;
+ }
+
+ Value *VisitCXXNoexceptExpr(const CXXNoexceptExpr *E) {
+ return Builder.getInt1(E->getValue());
+ }
+
+ // Binary Operators.
+ Value *EmitMul(const BinOpInfo &Ops) {
+ if (Ops.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(Ops);
+ }
+ }
+
+ if (Ops.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
+ return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
+ }
+ bool isTrapvOverflowBehavior() {
+ return CGF.getContext().getLangOpts().getSignedOverflowBehavior()
+ == LangOptions::SOB_Trapping;
+ }
+ /// Create a binary op that checks for overflow.
+ /// Currently only supports +, - and *.
+ Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
+ // Emit the overflow BB when -ftrapv option is activated.
+ void EmitOverflowBB(llvm::BasicBlock *overflowBB) {
+ Builder.SetInsertPoint(overflowBB);
+ llvm::Function *Trap = CGF.CGM.getIntrinsic(llvm::Intrinsic::trap);
+ Builder.CreateCall(Trap);
+ Builder.CreateUnreachable();
+ }
+ // Check for undefined division and modulus behaviors.
+ void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
+ llvm::Value *Zero,bool isDiv);
+ Value *EmitDiv(const BinOpInfo &Ops);
+ Value *EmitRem(const BinOpInfo &Ops);
+ Value *EmitAdd(const BinOpInfo &Ops);
+ Value *EmitSub(const BinOpInfo &Ops);
+ Value *EmitShl(const BinOpInfo &Ops);
+ Value *EmitShr(const BinOpInfo &Ops);
+ Value *EmitAnd(const BinOpInfo &Ops) {
+ return Builder.CreateAnd(Ops.LHS, Ops.RHS, "and");
+ }
+ Value *EmitXor(const BinOpInfo &Ops) {
+ return Builder.CreateXor(Ops.LHS, Ops.RHS, "xor");
+ }
+ Value *EmitOr (const BinOpInfo &Ops) {
+ return Builder.CreateOr(Ops.LHS, Ops.RHS, "or");
+ }
+
+ BinOpInfo EmitBinOps(const BinaryOperator *E);
+ LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &),
+ Value *&Result);
+
+ Value *EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*F)(const BinOpInfo &));
+
+ // Binary operators and binary compound assignment operators.
+#define HANDLEBINOP(OP) \
+ Value *VisitBin ## OP(const BinaryOperator *E) { \
+ return Emit ## OP(EmitBinOps(E)); \
+ } \
+ Value *VisitBin ## OP ## Assign(const CompoundAssignOperator *E) { \
+ return EmitCompoundAssign(E, &ScalarExprEmitter::Emit ## OP); \
+ }
+ HANDLEBINOP(Mul)
+ HANDLEBINOP(Div)
+ HANDLEBINOP(Rem)
+ HANDLEBINOP(Add)
+ HANDLEBINOP(Sub)
+ HANDLEBINOP(Shl)
+ HANDLEBINOP(Shr)
+ HANDLEBINOP(And)
+ HANDLEBINOP(Xor)
+ HANDLEBINOP(Or)
+#undef HANDLEBINOP
+
+ // Comparisons.
+ Value *EmitCompare(const BinaryOperator *E, unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc);
+#define VISITCOMP(CODE, UI, SI, FP) \
+ Value *VisitBin##CODE(const BinaryOperator *E) { \
+ return EmitCompare(E, llvm::ICmpInst::UI, llvm::ICmpInst::SI, \
+ llvm::FCmpInst::FP); }
+ VISITCOMP(LT, ICMP_ULT, ICMP_SLT, FCMP_OLT)
+ VISITCOMP(GT, ICMP_UGT, ICMP_SGT, FCMP_OGT)
+ VISITCOMP(LE, ICMP_ULE, ICMP_SLE, FCMP_OLE)
+ VISITCOMP(GE, ICMP_UGE, ICMP_SGE, FCMP_OGE)
+ VISITCOMP(EQ, ICMP_EQ , ICMP_EQ , FCMP_OEQ)
+ VISITCOMP(NE, ICMP_NE , ICMP_NE , FCMP_UNE)
+#undef VISITCOMP
+
+ Value *VisitBinAssign (const BinaryOperator *E);
+
+ Value *VisitBinLAnd (const BinaryOperator *E);
+ Value *VisitBinLOr (const BinaryOperator *E);
+ Value *VisitBinComma (const BinaryOperator *E);
+
+ Value *VisitBinPtrMemD(const Expr *E) { return EmitLoadOfLValue(E); }
+ Value *VisitBinPtrMemI(const Expr *E) { return EmitLoadOfLValue(E); }
+
+ // Other Operators.
+ Value *VisitBlockExpr(const BlockExpr *BE);
+ Value *VisitAbstractConditionalOperator(const AbstractConditionalOperator *);
+ Value *VisitChooseExpr(ChooseExpr *CE);
+ Value *VisitVAArgExpr(VAArgExpr *VE);
+ Value *VisitObjCStringLiteral(const ObjCStringLiteral *E) {
+ return CGF.EmitObjCStringLiteral(E);
+ }
+ Value *VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ return CGF.EmitObjCNumericLiteral(E);
+ }
+ Value *VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ return CGF.EmitObjCArrayLiteral(E);
+ }
+ Value *VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ return CGF.EmitObjCDictionaryLiteral(E);
+ }
+ Value *VisitAsTypeExpr(AsTypeExpr *CE);
+ Value *VisitAtomicExpr(AtomicExpr *AE);
+};
+} // end anonymous namespace.
+
+//===----------------------------------------------------------------------===//
+// Utilities
+//===----------------------------------------------------------------------===//
+
+/// EmitConversionToBool - Convert the specified expression value to a
+/// boolean (i1) truth value. This is equivalent to "Val != 0".
+Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
+ assert(SrcType.isCanonical() && "EmitScalarConversion strips typedefs");
+
+ if (SrcType->isRealFloatingType())
+ return EmitFloatToBoolConversion(Src);
+
+ if (const MemberPointerType *MPT = dyn_cast<MemberPointerType>(SrcType))
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, Src, MPT);
+
+ assert((SrcType->isIntegerType() || isa<llvm::PointerType>(Src->getType())) &&
+ "Unknown scalar type to convert");
+
+ if (isa<llvm::IntegerType>(Src->getType()))
+ return EmitIntToBoolConversion(Src);
+
+ assert(isa<llvm::PointerType>(Src->getType()));
+ return EmitPointerToBoolConversion(Src);
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
+ QualType DstType) {
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ if (DstType->isVoidType()) return 0;
+
+ llvm::Type *SrcTy = Src->getType();
+
+ // Floating casts might be a bit special: if we're doing casts to / from half
+ // FP, we should go via special intrinsics.
+ if (SrcType->isHalfType()) {
+ Src = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16), Src);
+ SrcType = CGF.getContext().FloatTy;
+ SrcTy = CGF.FloatTy;
+ }
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstType->isBooleanType())
+ return EmitConversionToBool(Src, SrcType);
+
+ llvm::Type *DstTy = ConvertType(DstType);
+
+ // Ignore conversions like int -> uint.
+ if (SrcTy == DstTy)
+ return Src;
+
+ // Handle pointer conversions next: pointers can only be converted to/from
+ // other pointers and integers. Check for pointer types in terms of LLVM, as
+ // some native types (like Obj-C id) may map to a pointer type.
+ if (isa<llvm::PointerType>(DstTy)) {
+ // The source value may be an integer, or a pointer.
+ if (isa<llvm::PointerType>(SrcTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
+ bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+ // Then, cast to pointer.
+ return Builder.CreateIntToPtr(IntResult, DstTy, "conv");
+ }
+
+ if (isa<llvm::PointerType>(SrcTy)) {
+ // Must be an ptr to int cast.
+ assert(isa<llvm::IntegerType>(DstTy) && "not ptr->int?");
+ return Builder.CreatePtrToInt(Src, DstTy, "conv");
+ }
+
+ // A scalar can be splatted to an extended vector of the same element type
+ if (DstType->isExtVectorType() && !SrcType->isVectorType()) {
+ // Cast the scalar to element type
+ QualType EltTy = DstType->getAs<ExtVectorType>()->getElementType();
+ llvm::Value *Elt = EmitScalarConversion(Src, SrcType, EltTy);
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx = Builder.getInt32(0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
+
+ // Splat the element across to all elements
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements,
+ Builder.getInt32(0));
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+
+ // Allow bitcast from vector to integer/fp of the same size.
+ if (isa<llvm::VectorType>(SrcTy) ||
+ isa<llvm::VectorType>(DstTy))
+ return Builder.CreateBitCast(Src, DstTy, "conv");
+
+ // Finally, we have the arithmetic types: real int/float.
+ Value *Res = NULL;
+ llvm::Type *ResTy = DstTy;
+
+ // Cast to half via float
+ if (DstType->isHalfType())
+ DstTy = CGF.FloatTy;
+
+ if (isa<llvm::IntegerType>(SrcTy)) {
+ bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
+ if (isa<llvm::IntegerType>(DstTy))
+ Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ Res = Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateUIToFP(Src, DstTy, "conv");
+ } else if (isa<llvm::IntegerType>(DstTy)) {
+ assert(SrcTy->isFloatingPointTy() && "Unknown real conversion");
+ if (DstType->isSignedIntegerOrEnumerationType())
+ Res = Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPToUI(Src, DstTy, "conv");
+ } else {
+ assert(SrcTy->isFloatingPointTy() && DstTy->isFloatingPointTy() &&
+ "Unknown real conversion");
+ if (DstTy->getTypeID() < SrcTy->getTypeID())
+ Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPExt(Src, DstTy, "conv");
+ }
+
+ if (DstTy != ResTy) {
+ assert(ResTy->isIntegerTy(16) && "Only half FP requires extra conversion");
+ Res = Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16), Res);
+ }
+
+ return Res;
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *ScalarExprEmitter::
+EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
+ QualType SrcTy, QualType DstTy) {
+ // Get the source element type.
+ SrcTy = SrcTy->getAs<ComplexType>()->getElementType();
+
+ // Handle conversions to bool first, they are special: comparisons against 0.
+ if (DstTy->isBooleanType()) {
+ // Complex != 0 -> (Real != 0) | (Imag != 0)
+ Src.first = EmitScalarConversion(Src.first, SrcTy, DstTy);
+ Src.second = EmitScalarConversion(Src.second, SrcTy, DstTy);
+ return Builder.CreateOr(Src.first, Src.second, "tobool");
+ }
+
+ // C99 6.3.1.7p2: "When a value of complex type is converted to a real type,
+ // the imaginary part of the complex value is discarded and the value of the
+ // real part is converted according to the conversion rules for the
+ // corresponding real type.
+ return EmitScalarConversion(Src.first, SrcTy, DstTy);
+}
+
+Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
+ if (const MemberPointerType *MPT = Ty->getAs<MemberPointerType>())
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
+
+ return llvm::Constant::getNullValue(ConvertType(Ty));
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor Methods
+//===----------------------------------------------------------------------===//
+
+Value *ScalarExprEmitter::VisitExpr(Expr *E) {
+ CGF.ErrorUnsupported(E, "scalar expression");
+ if (E->getType()->isVoidType())
+ return 0;
+ return llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+}
+
+Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ // Vector Mask Case
+ if (E->getNumSubExprs() == 2 ||
+ (E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
+ Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
+ Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
+ Value *Mask;
+
+ llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
+ unsigned LHSElts = LTy->getNumElements();
+
+ if (E->getNumSubExprs() == 3) {
+ Mask = CGF.EmitScalarExpr(E->getExpr(2));
+
+ // Shuffle LHS & RHS into one input vector.
+ SmallVector<llvm::Constant*, 32> concat;
+ for (unsigned i = 0; i != LHSElts; ++i) {
+ concat.push_back(Builder.getInt32(2*i));
+ concat.push_back(Builder.getInt32(2*i+1));
+ }
+
+ Value* CV = llvm::ConstantVector::get(concat);
+ LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
+ LHSElts *= 2;
+ } else {
+ Mask = RHS;
+ }
+
+ llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
+ llvm::Constant* EltMask;
+
+ // Treat vec3 like vec4.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+2))-1);
+ else if ((LHSElts == 3) && (E->getNumSubExprs() == 2))
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts+1))-1);
+ else
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ (1 << llvm::Log2_32(LHSElts))-1);
+
+ // Mask off the high bits of each shuffle index.
+ Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(),
+ EltMask);
+ Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
+
+ // newv = undef
+ // mask = mask & maskbits
+ // for each elt
+ // n = extract mask i
+ // x = extract val n
+ // newv = insert newv, x, i
+ llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
+ MTy->getNumElements());
+ Value* NewV = llvm::UndefValue::get(RTy);
+ for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
+ Value *IIndx = Builder.getInt32(i);
+ Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
+ Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) {
+ Value *cmpIndx, *newIndx;
+ cmpIndx = Builder.CreateICmpUGT(Indx, Builder.getInt32(3),
+ "cmp_shuf_idx");
+ newIndx = Builder.CreateSub(Indx, Builder.getInt32(1), "shuf_idx_adj");
+ Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
+ }
+ Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
+ NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
+ }
+ return NewV;
+ }
+
+ Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
+ Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
+
+ // Handle vec3 special since the index will be off by one for the RHS.
+ llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
+ SmallVector<llvm::Constant*, 32> indices;
+ for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
+ unsigned Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
+ if (VTy->getNumElements() == 3 && Idx > 3)
+ Idx -= 1;
+ indices.push_back(Builder.getInt32(Idx));
+ }
+
+ Value *SV = llvm::ConstantVector::get(indices);
+ return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
+}
+Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
+ if (E->isArrow())
+ CGF.EmitScalarExpr(E->getBase());
+ else
+ EmitLValue(E->getBase());
+ return Builder.getInt(Value);
+ }
+
+ // Emit debug info for aggregate now, if it was delayed to reduce
+ // debug info size.
+ CGDebugInfo *DI = CGF.getDebugInfo();
+ if (DI && CGF.CGM.getCodeGenOpts().LimitDebugInfo) {
+ QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
+ if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
+ if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
+ DI->getOrCreateRecordType(PTy->getPointeeType(),
+ M->getParent()->getLocation());
+ }
+ return EmitLoadOfLValue(E);
+}
+
+Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Emit subscript expressions in rvalue context's. For most cases, this just
+ // loads the lvalue formed by the subscript expr. However, we have to be
+ // careful, because the base of a vector subscript is occasionally an rvalue,
+ // so we can't get it as an lvalue.
+ if (!E->getBase()->getType()->isVectorType())
+ return EmitLoadOfLValue(E);
+
+ // Handle the vector case. The base must be a vector, the index must be an
+ // integer value.
+ Value *Base = Visit(E->getBase());
+ Value *Idx = Visit(E->getIdx());
+ bool IdxSigned = E->getIdx()->getType()->isSignedIntegerOrEnumerationType();
+ Idx = Builder.CreateIntCast(Idx, CGF.Int32Ty, IdxSigned, "vecidxcast");
+ return Builder.CreateExtractElement(Base, Idx, "vecext");
+}
+
+static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
+ unsigned Off, llvm::Type *I32Ty) {
+ int MV = SVI->getMaskValue(Idx);
+ if (MV == -1)
+ return llvm::UndefValue::get(I32Ty);
+ return llvm::ConstantInt::get(I32Ty, Off+MV);
+}
+
+Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ (void)Ignore;
+ assert (Ignore == false && "init list ignored");
+ unsigned NumInitElements = E->getNumInits();
+
+ if (E->hadArrayRangeDesignator())
+ CGF.ErrorUnsupported(E, "GNU array range designator extension");
+
+ llvm::VectorType *VType =
+ dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
+
+ if (!VType) {
+ if (NumInitElements == 0) {
+ // C++11 value-initialization for the scalar.
+ return EmitNullValue(E->getType());
+ }
+ // We have a scalar in braces. Just use the first element.
+ return Visit(E->getInit(0));
+ }
+
+ unsigned ResElts = VType->getNumElements();
+
+ // Loop over initializers collecting the Value for each, and remembering
+ // whether the source was swizzle (ExtVectorElementExpr). This will allow
+ // us to fold the shuffle for the swizzle into the shuffle for the vector
+ // initializer, since LLVM optimizers generally do not want to touch
+ // shuffles.
+ unsigned CurIdx = 0;
+ bool VIsUndefShuffle = false;
+ llvm::Value *V = llvm::UndefValue::get(VType);
+ for (unsigned i = 0; i != NumInitElements; ++i) {
+ Expr *IE = E->getInit(i);
+ Value *Init = Visit(IE);
+ SmallVector<llvm::Constant*, 16> Args;
+
+ llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
+
+ // Handle scalar elements. If the scalar initializer is actually one
+ // element of a different vector of the same width, use shuffle instead of
+ // extract+insert.
+ if (!VVT) {
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ExtractElementInst *EI = cast<llvm::ExtractElementInst>(Init);
+
+ if (EI->getVectorOperandType()->getNumElements() == ResElts) {
+ llvm::ConstantInt *C = cast<llvm::ConstantInt>(EI->getIndexOperand());
+ Value *LHS = 0, *RHS = 0;
+ if (CurIdx == 0) {
+ // insert into undef -> shuffle (src, undef)
+ Args.push_back(C);
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ LHS = EI->getVectorOperand();
+ RHS = V;
+ VIsUndefShuffle = true;
+ } else if (VIsUndefShuffle) {
+ // insert into undefshuffle && size match -> shuffle (v, src)
+ llvm::ShuffleVectorInst *SVV = cast<llvm::ShuffleVectorInst>(V);
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(getMaskElt(SVV, j, 0, CGF.Int32Ty));
+ Args.push_back(Builder.getInt32(ResElts + C->getZExtValue()));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ LHS = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+ RHS = EI->getVectorOperand();
+ VIsUndefShuffle = false;
+ }
+ if (!Args.empty()) {
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ V = Builder.CreateShuffleVector(LHS, RHS, Mask);
+ ++CurIdx;
+ continue;
+ }
+ }
+ }
+ V = Builder.CreateInsertElement(V, Init, Builder.getInt32(CurIdx),
+ "vecinit");
+ VIsUndefShuffle = false;
+ ++CurIdx;
+ continue;
+ }
+
+ unsigned InitElts = VVT->getNumElements();
+
+ // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
+ // input is the same width as the vector being constructed, generate an
+ // optimized shuffle of the swizzle input into the result.
+ unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
+ if (isa<ExtVectorElementExpr>(IE)) {
+ llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
+ Value *SVOp = SVI->getOperand(0);
+ llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
+
+ if (OpTy->getNumElements() == ResElts) {
+ for (unsigned j = 0; j != CurIdx; ++j) {
+ // If the current vector initializer is a shuffle with undef, merge
+ // this shuffle directly into it.
+ if (VIsUndefShuffle) {
+ Args.push_back(getMaskElt(cast<llvm::ShuffleVectorInst>(V), j, 0,
+ CGF.Int32Ty));
+ } else {
+ Args.push_back(Builder.getInt32(j));
+ }
+ }
+ for (unsigned j = 0, je = InitElts; j != je; ++j)
+ Args.push_back(getMaskElt(SVI, j, Offset, CGF.Int32Ty));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+
+ if (VIsUndefShuffle)
+ V = cast<llvm::ShuffleVectorInst>(V)->getOperand(0);
+
+ Init = SVOp;
+ }
+ }
+
+ // Extend init to result vector length, and then shuffle its contribution
+ // to the vector initializer into V.
+ if (Args.empty()) {
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(Builder.getInt32(j));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ Init = Builder.CreateShuffleVector(Init, llvm::UndefValue::get(VVT),
+ Mask, "vext");
+
+ Args.clear();
+ for (unsigned j = 0; j != CurIdx; ++j)
+ Args.push_back(Builder.getInt32(j));
+ for (unsigned j = 0; j != InitElts; ++j)
+ Args.push_back(Builder.getInt32(j+Offset));
+ Args.resize(ResElts, llvm::UndefValue::get(CGF.Int32Ty));
+ }
+
+ // If V is undef, make sure it ends up on the RHS of the shuffle to aid
+ // merging subsequent shuffles into this one.
+ if (CurIdx == 0)
+ std::swap(V, Init);
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+ V = Builder.CreateShuffleVector(V, Init, Mask, "vecinit");
+ VIsUndefShuffle = isa<llvm::UndefValue>(Init);
+ CurIdx += InitElts;
+ }
+
+ // FIXME: evaluate codegen vs. shuffling against constant null vector.
+ // Emit remaining default initializers.
+ llvm::Type *EltTy = VType->getElementType();
+
+ // Emit remaining default initializers
+ for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
+ Value *Idx = Builder.getInt32(CurIdx);
+ llvm::Value *Init = llvm::Constant::getNullValue(EltTy);
+ V = Builder.CreateInsertElement(V, Init, Idx, "vecinit");
+ }
+ return V;
+}
+
+static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+
+ if (CE->getCastKind() == CK_UncheckedDerivedToBase)
+ return false;
+
+ if (isa<CXXThisExpr>(E)) {
+ // We always assume that 'this' is never null.
+ return false;
+ }
+
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
+ // And that glvalue casts are never null.
+ if (ICE->getValueKind() != VK_RValue)
+ return false;
+ }
+
+ return true;
+}
+
+// VisitCastExpr - Emit code for an explicit or implicit cast. Implicit casts
+// have to handle a more broad range of conversions than explicit casts, as they
+// handle things like function to ptr-to-function decay etc.
+Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
+ Expr *E = CE->getSubExpr();
+ QualType DestTy = CE->getType();
+ CastKind Kind = CE->getCastKind();
+
+ if (!DestTy->isVoidType())
+ TestAndClearIgnoreResultAssign();
+
+ // Since almost all cast kinds apply to scalars, this switch doesn't have
+ // a default case, so the compiler will warn on a missing case. The cases
+ // are in the same order as in the CastKind enum.
+ switch (Kind) {
+ case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
+
+ case CK_LValueBitCast:
+ case CK_ObjCObjectLValueCast: {
+ Value *V = EmitLValue(E).getAddress();
+ V = Builder.CreateBitCast(V,
+ ConvertType(CGF.getContext().getPointerType(DestTy)));
+ return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy));
+ }
+
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+ return Builder.CreateBitCast(Src, ConvertType(DestTy));
+ }
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ case CK_NoOp:
+ case CK_UserDefinedConversion:
+ return Visit(const_cast<Expr*>(E));
+
+ case CK_BaseToDerived: {
+ const CXXRecordDecl *DerivedClassDecl =
+ DestTy->getCXXRecordDeclForPointerType();
+
+ return CGF.GetAddressOfDerivedClass(Visit(E), DerivedClassDecl,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE));
+ }
+ case CK_UncheckedDerivedToBase:
+ case CK_DerivedToBase: {
+ const RecordType *DerivedClassTy =
+ E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
+ CXXRecordDecl *DerivedClassDecl =
+ cast<CXXRecordDecl>(DerivedClassTy->getDecl());
+
+ return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE));
+ }
+ case CK_Dynamic: {
+ Value *V = Visit(const_cast<Expr*>(E));
+ const CXXDynamicCastExpr *DCE = cast<CXXDynamicCastExpr>(CE);
+ return CGF.EmitDynamicCast(V, DCE);
+ }
+
+ case CK_ArrayToPointerDecay: {
+ assert(E->getType()->isArrayType() &&
+ "Array to pointer decay must have array source type!");
+
+ Value *V = EmitLValue(E).getAddress(); // Bitfields can't be arrays.
+
+ // Note that VLA pointers are always decayed, so we don't need to do
+ // anything here.
+ if (!E->getType()->isVariableArrayType()) {
+ assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
+ assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
+ ->getElementType()) &&
+ "Expected pointer to array");
+ V = Builder.CreateStructGEP(V, 0, "arraydecay");
+ }
+
+ // Make sure the array decay ends up being the right type. This matters if
+ // the array type was of an incomplete type.
+ return CGF.Builder.CreateBitCast(V, ConvertType(CE->getType()));
+ }
+ case CK_FunctionToPointerDecay:
+ return EmitLValue(E).getAddress();
+
+ case CK_NullToPointer:
+ if (MustVisitNullValue(E))
+ (void) Visit(E);
+
+ return llvm::ConstantPointerNull::get(
+ cast<llvm::PointerType>(ConvertType(DestTy)));
+
+ case CK_NullToMemberPointer: {
+ if (MustVisitNullValue(E))
+ (void) Visit(E);
+
+ const MemberPointerType *MPT = CE->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ }
+
+ case CK_ReinterpretMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer: {
+ Value *Src = Visit(E);
+
+ // Note that the AST doesn't distinguish between checked and
+ // unchecked member pointer conversions, so we always have to
+ // implement checked conversions here. This is inefficient when
+ // actual control flow may be required in order to perform the
+ // check, which it is for data member pointers (but not member
+ // function pointers on Itanium and ARM).
+ return CGF.CGM.getCXXABI().EmitMemberPointerConversion(CGF, CE, Src);
+ }
+
+ case CK_ARCProduceObject:
+ return CGF.EmitARCRetainScalarExpr(E);
+ case CK_ARCConsumeObject:
+ return CGF.EmitObjCConsumeObject(E->getType(), Visit(E));
+ case CK_ARCReclaimReturnedObject: {
+ llvm::Value *value = Visit(E);
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+ return CGF.EmitObjCConsumeObject(E->getType(), value);
+ }
+ case CK_ARCExtendBlockObject:
+ return CGF.EmitARCExtendBlockObject(E);
+
+ case CK_CopyAndAutoreleaseBlockObject:
+ return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
+
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexCast:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_ConstructorConversion:
+ case CK_ToUnion:
+ llvm_unreachable("scalar cast to non-scalar value");
+
+ case CK_LValueToRValue:
+ assert(CGF.getContext().hasSameUnqualifiedType(E->getType(), DestTy));
+ assert(E->isGLValue() && "lvalue-to-rvalue applied to r-value!");
+ return Visit(const_cast<Expr*>(E));
+
+ case CK_IntegralToPointer: {
+ Value *Src = Visit(const_cast<Expr*>(E));
+
+ // First, convert to the correct width so that we control the kind of
+ // extension.
+ llvm::Type *MiddleTy = CGF.IntPtrTy;
+ bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
+ llvm::Value* IntResult =
+ Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
+
+ return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
+ }
+ case CK_PointerToIntegral:
+ assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
+ return Builder.CreatePtrToInt(Visit(E), ConvertType(DestTy));
+
+ case CK_ToVoid: {
+ CGF.EmitIgnoredExpr(E);
+ return 0;
+ }
+ case CK_VectorSplat: {
+ llvm::Type *DstTy = ConvertType(DestTy);
+ Value *Elt = Visit(const_cast<Expr*>(E));
+ Elt = EmitScalarConversion(Elt, E->getType(),
+ DestTy->getAs<VectorType>()->getElementType());
+
+ // Insert the element in element zero of an undef vector
+ llvm::Value *UnV = llvm::UndefValue::get(DstTy);
+ llvm::Value *Idx = Builder.getInt32(0);
+ UnV = Builder.CreateInsertElement(UnV, Elt, Idx);
+
+ // Splat the element across to all elements
+ unsigned NumElements = cast<llvm::VectorType>(DstTy)->getNumElements();
+ llvm::Constant *Zero = Builder.getInt32(0);
+ llvm::Constant *Mask = llvm::ConstantVector::getSplat(NumElements, Zero);
+ llvm::Value *Yay = Builder.CreateShuffleVector(UnV, UnV, Mask, "splat");
+ return Yay;
+ }
+
+ case CK_IntegralCast:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingCast:
+ return EmitScalarConversion(Visit(E), E->getType(), DestTy);
+ case CK_IntegralToBoolean:
+ return EmitIntToBoolConversion(Visit(E));
+ case CK_PointerToBoolean:
+ return EmitPointerToBoolConversion(Visit(E));
+ case CK_FloatingToBoolean:
+ return EmitFloatToBoolConversion(Visit(E));
+ case CK_MemberPointerToBoolean: {
+ llvm::Value *MemPtr = Visit(E);
+ const MemberPointerType *MPT = E->getType()->getAs<MemberPointerType>();
+ return CGF.CGM.getCXXABI().EmitMemberPointerIsNotNull(CGF, MemPtr, MPT);
+ }
+
+ case CK_FloatingComplexToReal:
+ case CK_IntegralComplexToReal:
+ return CGF.EmitComplexExpr(E, false, true).first;
+
+ case CK_FloatingComplexToBoolean:
+ case CK_IntegralComplexToBoolean: {
+ CodeGenFunction::ComplexPairTy V = CGF.EmitComplexExpr(E);
+
+ // TODO: kill this function off, inline appropriate case here
+ return EmitComplexToScalarConversion(V, E->getType(), DestTy);
+ }
+
+ }
+
+ llvm_unreachable("unknown scalar cast");
+}
+
+Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
+ CodeGenFunction::StmtExprEvaluation eval(CGF);
+ return CGF.EmitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType())
+ .getScalarVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Unary Operators
+//===----------------------------------------------------------------------===//
+
+llvm::Value *ScalarExprEmitter::
+EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
+ llvm::Value *InVal,
+ llvm::Value *NextVal, bool IsInc) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
+ case LangOptions::SOB_Trapping:
+ BinOpInfo BinOp;
+ BinOp.LHS = InVal;
+ BinOp.RHS = NextVal;
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Add;
+ BinOp.E = E;
+ return EmitOverflowCheckedBinOp(BinOp);
+ }
+ llvm_unreachable("Unknown SignedOverflowBehaviorTy");
+}
+
+llvm::Value *
+ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+
+ QualType type = E->getSubExpr()->getType();
+ llvm::Value *value = EmitLoadOfLValue(LV);
+ llvm::Value *input = value;
+ llvm::PHINode *atomicPHI = 0;
+
+ int amount = (isInc ? 1 : -1);
+
+ if (const AtomicType *atomicTy = type->getAs<AtomicType>()) {
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(value->getType(), 2);
+ atomicPHI->addIncoming(value, startBB);
+ type = atomicTy->getValueType();
+ value = atomicPHI;
+ }
+
+ // Special case of integer increment that we have to check first: bool++.
+ // Due to promotion rules, we get:
+ // bool++ -> bool = bool + 1
+ // -> bool = (int)bool + 1
+ // -> bool = ((int)bool + 1 != 0)
+ // An interesting aspect of this is that increment is always true.
+ // Decrement does not have this property.
+ if (isInc && type->isBooleanType()) {
+ value = Builder.getTrue();
+
+ // Most common case by far: integer increment.
+ } else if (type->isIntegerType()) {
+
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ // Note that signed integer inc/dec with width less than int can't
+ // overflow because of promotion rules; we're just eliding a few steps here.
+ if (type->isSignedIntegerOrEnumerationType() &&
+ value->getType()->getPrimitiveSizeInBits() >=
+ CGF.IntTy->getBitWidth())
+ value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
+ else
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+
+ // Next most common: pointer increment.
+ } else if (const PointerType *ptr = type->getAs<PointerType>()) {
+ QualType type = ptr->getPointeeType();
+
+ // VLA types don't have constant size.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(type)) {
+ llvm::Value *numElts = CGF.getVLASize(vla).first;
+ if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, numElts, "vla.inc");
+ else
+ value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
+
+ // Arithmetic on function pointers (!) is just +-1.
+ } else if (type->isFunctionType()) {
+ llvm::Value *amt = Builder.getInt32(amount);
+
+ value = CGF.EmitCastToVoidPtr(value);
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, amt, "incdec.funcptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.funcptr");
+ value = Builder.CreateBitCast(value, input->getType());
+
+ // For everything else, we can just do a simple increment.
+ } else {
+ llvm::Value *amt = Builder.getInt32(amount);
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, amt, "incdec.ptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, amt, "incdec.ptr");
+ }
+
+ // Vector increment/decrement.
+ } else if (type->isVectorType()) {
+ if (type->hasIntegerRepresentation()) {
+ llvm::Value *amt = llvm::ConstantInt::get(value->getType(), amount);
+
+ value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
+ } else {
+ value = Builder.CreateFAdd(
+ value,
+ llvm::ConstantFP::get(value->getType(), amount),
+ isInc ? "inc" : "dec");
+ }
+
+ // Floating point.
+ } else if (type->isRealFloatingType()) {
+ // Add the inc/dec to the real part.
+ llvm::Value *amt;
+
+ if (type->isHalfType()) {
+ // Another special case: half FP increment should be done via float
+ value =
+ Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16),
+ input);
+ }
+
+ if (value->getType()->isFloatTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<float>(amount)));
+ else if (value->getType()->isDoubleTy())
+ amt = llvm::ConstantFP::get(VMContext,
+ llvm::APFloat(static_cast<double>(amount)));
+ else {
+ llvm::APFloat F(static_cast<float>(amount));
+ bool ignored;
+ F.convert(CGF.Target.getLongDoubleFormat(), llvm::APFloat::rmTowardZero,
+ &ignored);
+ amt = llvm::ConstantFP::get(VMContext, F);
+ }
+ value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
+
+ if (type->isHalfType())
+ value =
+ Builder.CreateCall(CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16),
+ value);
+
+ // Objective-C pointer types.
+ } else {
+ const ObjCObjectPointerType *OPT = type->castAs<ObjCObjectPointerType>();
+ value = CGF.EmitCastToVoidPtr(value);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(OPT->getObjectType());
+ if (!isInc) size = -size;
+ llvm::Value *sizeValue =
+ llvm::ConstantInt::get(CGF.SizeTy, size.getQuantity());
+
+ if (CGF.getContext().getLangOpts().isSignedOverflowDefined())
+ value = Builder.CreateGEP(value, sizeValue, "incdec.objptr");
+ else
+ value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
+ value = Builder.CreateBitCast(value, input->getType());
+ }
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LV.getAddress(), atomicPHI,
+ value, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return isPre ? value : input;
+ }
+
+ // Store the updated result through the lvalue.
+ if (LV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(value), LV, &value);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(value), LV);
+
+ // If this is a postinc, return the value read from memory, otherwise use the
+ // updated value.
+ return isPre ? value : input;
+}
+
+
+
+Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ // Emit unary minus with EmitSub so we handle overflow cases etc.
+ BinOpInfo BinOp;
+ BinOp.RHS = Visit(E->getSubExpr());
+
+ if (BinOp.RHS->getType()->isFPOrFPVectorTy())
+ BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
+ else
+ BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
+ BinOp.Ty = E->getType();
+ BinOp.Opcode = BO_Sub;
+ BinOp.E = E;
+ return EmitSub(BinOp);
+}
+
+Value *ScalarExprEmitter::VisitUnaryNot(const UnaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ Value *Op = Visit(E->getSubExpr());
+ return Builder.CreateNot(Op, "neg");
+}
+
+Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
+
+ // Perform vector logical not on comparison with zero vector.
+ if (E->getType()->isExtVectorType()) {
+ Value *Oper = Visit(E->getSubExpr());
+ Value *Zero = llvm::Constant::getNullValue(Oper->getType());
+ Value *Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+ }
+
+ // Compare operand to zero.
+ Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
+
+ // Invert value.
+ // TODO: Could dynamically modify easy computations here. For example, if
+ // the operand is an icmp ne, turn into icmp eq.
+ BoolVal = Builder.CreateNot(BoolVal, "lnot");
+
+ // ZExt result to the expr type.
+ return Builder.CreateZExt(BoolVal, ConvertType(E->getType()), "lnot.ext");
+}
+
+Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ // Try folding the offsetof to a constant.
+ llvm::APSInt Value;
+ if (E->EvaluateAsInt(Value, CGF.getContext()))
+ return Builder.getInt(Value);
+
+ // Loop over the components of the offsetof to compute the value.
+ unsigned n = E->getNumComponents();
+ llvm::Type* ResultType = ConvertType(E->getType());
+ llvm::Value* Result = llvm::Constant::getNullValue(ResultType);
+ QualType CurrentType = E->getTypeSourceInfo()->getType();
+ for (unsigned i = 0; i != n; ++i) {
+ OffsetOfExpr::OffsetOfNode ON = E->getComponent(i);
+ llvm::Value *Offset = 0;
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array: {
+ // Compute the index
+ Expr *IdxExpr = E->getIndexExpr(ON.getArrayExprIndex());
+ llvm::Value* Idx = CGF.EmitScalarExpr(IdxExpr);
+ bool IdxSigned = IdxExpr->getType()->isSignedIntegerOrEnumerationType();
+ Idx = Builder.CreateIntCast(Idx, ResultType, IdxSigned, "conv");
+
+ // Save the element type
+ CurrentType =
+ CGF.getContext().getAsArrayType(CurrentType)->getElementType();
+
+ // Compute the element size
+ llvm::Value* ElemSize = llvm::ConstantInt::get(ResultType,
+ CGF.getContext().getTypeSizeInChars(CurrentType).getQuantity());
+
+ // Multiply out to compute the result
+ Offset = Builder.CreateMul(Idx, ElemSize);
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Field: {
+ FieldDecl *MemberDecl = ON.getField();
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Compute the index of the field in its parent.
+ unsigned i = 0;
+ // FIXME: It would be nice if we didn't have to loop here!
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ Field != FieldEnd; (void)++Field, ++i) {
+ if (*Field == MemberDecl)
+ break;
+ }
+ assert(i < RL.getFieldCount() && "offsetof field in wrong type");
+
+ // Compute the offset to the field
+ int64_t OffsetInt = RL.getFieldOffset(i) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+
+ // Save the element type.
+ CurrentType = MemberDecl->getType();
+ break;
+ }
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ llvm_unreachable("dependent __builtin_offsetof");
+
+ case OffsetOfExpr::OffsetOfNode::Base: {
+ if (ON.getBase()->isVirtual()) {
+ CGF.ErrorUnsupported(E, "virtual base in offsetof");
+ continue;
+ }
+
+ RecordDecl *RD = CurrentType->getAs<RecordType>()->getDecl();
+ const ASTRecordLayout &RL = CGF.getContext().getASTRecordLayout(RD);
+
+ // Save the element type.
+ CurrentType = ON.getBase()->getType();
+
+ // Compute the offset to the base.
+ const RecordType *BaseRT = CurrentType->getAs<RecordType>();
+ CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ int64_t OffsetInt = RL.getBaseClassOffsetInBits(BaseRD) /
+ CGF.getContext().getCharWidth();
+ Offset = llvm::ConstantInt::get(ResultType, OffsetInt);
+ break;
+ }
+ }
+ Result = Builder.CreateAdd(Result, Offset);
+ }
+ return Result;
+}
+
+/// VisitUnaryExprOrTypeTraitExpr - Return the size or alignment of the type of
+/// argument of the sizeof expression as an integer.
+Value *
+ScalarExprEmitter::VisitUnaryExprOrTypeTraitExpr(
+ const UnaryExprOrTypeTraitExpr *E) {
+ QualType TypeToSize = E->getTypeOfArgument();
+ if (E->getKind() == UETT_SizeOf) {
+ if (const VariableArrayType *VAT =
+ CGF.getContext().getAsVariableArrayType(TypeToSize)) {
+ if (E->isArgumentType()) {
+ // sizeof(type) - make sure to emit the VLA size.
+ CGF.EmitVariablyModifiedType(TypeToSize);
+ } else {
+ // C99 6.5.3.4p2: If the argument is an expression of type
+ // VLA, it is evaluated.
+ CGF.EmitIgnoredExpr(E->getArgumentExpr());
+ }
+
+ QualType eltType;
+ llvm::Value *numElts;
+ llvm::tie(numElts, eltType) = CGF.getVLASize(VAT);
+
+ llvm::Value *size = numElts;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ size = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), numElts);
+
+ return size;
+ }
+ }
+
+ // If this isn't sizeof(vla), the result must be constant; use the constant
+ // folding logic so we don't have to duplicate it here.
+ return Builder.getInt(E->EvaluateKnownConstInt(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (E->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, false, true).first;
+ }
+
+ return Visit(Op);
+}
+
+Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
+ Expr *Op = E->getSubExpr();
+ if (Op->getType()->isAnyComplexType()) {
+ // If it's an l-value, load through the appropriate subobject l-value.
+ // Note that we have to ask E because Op might be an l-value that
+ // this won't work for, e.g. an Obj-C property.
+ if (Op->isGLValue())
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
+
+ // Otherwise, calculate and project.
+ return CGF.EmitComplexExpr(Op, true, false).second;
+ }
+
+ // __imag on a scalar returns zero. Emit the subexpr to ensure side
+ // effects are evaluated, but not the actual value.
+ if (Op->isGLValue())
+ CGF.EmitLValue(Op);
+ else
+ CGF.EmitScalarExpr(Op, true);
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
+}
+
+//===----------------------------------------------------------------------===//
+// Binary Operators
+//===----------------------------------------------------------------------===//
+
+BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
+ TestAndClearIgnoreResultAssign();
+ BinOpInfo Result;
+ Result.LHS = Visit(E->getLHS());
+ Result.RHS = Visit(E->getRHS());
+ Result.Ty = E->getType();
+ Result.Opcode = E->getOpcode();
+ Result.E = E;
+ return Result;
+}
+
+LValue ScalarExprEmitter::EmitCompoundAssignLValue(
+ const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &),
+ Value *&Result) {
+ QualType LHSTy = E->getLHS()->getType();
+ BinOpInfo OpInfo;
+
+ if (E->getComputationResultType()->isAnyComplexType()) {
+ // This needs to go through the complex expression emitter, but it's a tad
+ // complicated to do that... I'm leaving it out for now. (Note that we do
+ // actually need the imaginary part of the RHS for multiplication and
+ // division.)
+ CGF.ErrorUnsupported(E, "complex compound assignment");
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+ return LValue();
+ }
+
+ // Emit the RHS first. __block variables need to have the rhs evaluated
+ // first, plus this should improve codegen a little.
+ OpInfo.RHS = Visit(E->getRHS());
+ OpInfo.Ty = E->getComputationResultType();
+ OpInfo.Opcode = E->getOpcode();
+ OpInfo.E = E;
+ // Load/convert the LHS.
+ LValue LHSLV = EmitCheckedLValue(E->getLHS());
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV);
+ OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
+ E->getComputationLHSType());
+
+ llvm::PHINode *atomicPHI = 0;
+ if (const AtomicType *atomicTy = OpInfo.Ty->getAs<AtomicType>()) {
+ // FIXME: For floating point types, we should be saving and restoring the
+ // floating point environment in the loop.
+ llvm::BasicBlock *startBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
+ Builder.CreateBr(opBB);
+ Builder.SetInsertPoint(opBB);
+ atomicPHI = Builder.CreatePHI(OpInfo.LHS->getType(), 2);
+ atomicPHI->addIncoming(OpInfo.LHS, startBB);
+ OpInfo.Ty = atomicTy->getValueType();
+ OpInfo.LHS = atomicPHI;
+ }
+
+ // Expand the binary operator.
+ Result = (this->*Func)(OpInfo);
+
+ // Convert the result back to the LHS type.
+ Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
+
+ if (atomicPHI) {
+ llvm::BasicBlock *opBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
+ llvm::Value *old = Builder.CreateAtomicCmpXchg(LHSLV.getAddress(), atomicPHI,
+ Result, llvm::SequentiallyConsistent);
+ atomicPHI->addIncoming(old, opBB);
+ llvm::Value *success = Builder.CreateICmpEQ(old, atomicPHI);
+ Builder.CreateCondBr(success, contBB, opBB);
+ Builder.SetInsertPoint(contBB);
+ return LHSLV;
+ }
+
+ // Store the result value into the LHS lvalue. Bit-fields are handled
+ // specially because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after the
+ // assignment...'.
+ if (LHSLV.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(Result), LHSLV, &Result);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(Result), LHSLV);
+
+ return LHSLV;
+}
+
+Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
+ Value *(ScalarExprEmitter::*Func)(const BinOpInfo &)) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+ Value *RHS;
+ LValue LHS = EmitCompoundAssignLValue(E, Func, RHS);
+
+ // If the result is clearly ignored, return now.
+ if (Ignore)
+ return 0;
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS);
+}
+
+void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
+ const BinOpInfo &Ops,
+ llvm::Value *Zero, bool isDiv) {
+ llvm::Function::iterator insertPt = Builder.GetInsertBlock();
+ llvm::BasicBlock *contBB =
+ CGF.createBasicBlock(isDiv ? "div.cont" : "rem.cont", CGF.CurFn,
+ llvm::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+
+ llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
+
+ if (Ops.Ty->hasSignedIntegerRepresentation()) {
+ llvm::Value *IntMin =
+ Builder.getInt(llvm::APInt::getSignedMinValue(Ty->getBitWidth()));
+ llvm::Value *NegOne = llvm::ConstantInt::get(Ty, -1ULL);
+
+ llvm::Value *Cond1 = Builder.CreateICmpEQ(Ops.RHS, Zero);
+ llvm::Value *LHSCmp = Builder.CreateICmpEQ(Ops.LHS, IntMin);
+ llvm::Value *RHSCmp = Builder.CreateICmpEQ(Ops.RHS, NegOne);
+ llvm::Value *Cond2 = Builder.CreateAnd(LHSCmp, RHSCmp, "and");
+ Builder.CreateCondBr(Builder.CreateOr(Cond1, Cond2, "or"),
+ overflowBB, contBB);
+ } else {
+ CGF.Builder.CreateCondBr(Builder.CreateICmpEQ(Ops.RHS, Zero),
+ overflowBB, contBB);
+ }
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(contBB);
+}
+
+Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
+ if (isTrapvOverflowBehavior()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
+ else if (Ops.Ty->isRealFloatingType()) {
+ llvm::Function::iterator insertPt = Builder.GetInsertBlock();
+ llvm::BasicBlock *DivCont = CGF.createBasicBlock("div.cont", CGF.CurFn,
+ llvm::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow",
+ CGF.CurFn);
+ CGF.Builder.CreateCondBr(Builder.CreateFCmpOEQ(Ops.RHS, Zero),
+ overflowBB, DivCont);
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(DivCont);
+ }
+ }
+ if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
+ llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
+ if (CGF.getContext().getLangOpts().OpenCL) {
+ // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
+ llvm::Type *ValTy = Val->getType();
+ if (ValTy->isFloatTy() ||
+ (isa<llvm::VectorType>(ValTy) &&
+ cast<llvm::VectorType>(ValTy)->getElementType()->isFloatTy()))
+ CGF.SetFPAccuracy(Val, 2.5);
+ }
+ return Val;
+ }
+ else if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateUDiv(Ops.LHS, Ops.RHS, "div");
+ else
+ return Builder.CreateSDiv(Ops.LHS, Ops.RHS, "div");
+}
+
+Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
+ // Rem in C can't be a floating point type: C99 6.5.5p2.
+ if (isTrapvOverflowBehavior()) {
+ llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
+
+ if (Ops.Ty->isIntegerType())
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
+ }
+
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateURem(Ops.LHS, Ops.RHS, "rem");
+ else
+ return Builder.CreateSRem(Ops.LHS, Ops.RHS, "rem");
+}
+
+Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
+ unsigned IID;
+ unsigned OpID = 0;
+
+ switch (Ops.Opcode) {
+ case BO_Add:
+ case BO_AddAssign:
+ OpID = 1;
+ IID = llvm::Intrinsic::sadd_with_overflow;
+ break;
+ case BO_Sub:
+ case BO_SubAssign:
+ OpID = 2;
+ IID = llvm::Intrinsic::ssub_with_overflow;
+ break;
+ case BO_Mul:
+ case BO_MulAssign:
+ OpID = 3;
+ IID = llvm::Intrinsic::smul_with_overflow;
+ break;
+ default:
+ llvm_unreachable("Unsupported operation for overflow detection");
+ }
+ OpID <<= 1;
+ OpID |= 1;
+
+ llvm::Type *opTy = CGF.CGM.getTypes().ConvertType(Ops.Ty);
+
+ llvm::Function *intrinsic = CGF.CGM.getIntrinsic(IID, opTy);
+
+ Value *resultAndOverflow = Builder.CreateCall2(intrinsic, Ops.LHS, Ops.RHS);
+ Value *result = Builder.CreateExtractValue(resultAndOverflow, 0);
+ Value *overflow = Builder.CreateExtractValue(resultAndOverflow, 1);
+
+ // Branch in case of overflow.
+ llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
+ llvm::Function::iterator insertPt = initialBB;
+ llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
+ llvm::next(insertPt));
+ llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
+
+ Builder.CreateCondBr(overflow, overflowBB, continueBB);
+
+ // Handle overflow with llvm.trap.
+ const std::string *handlerName =
+ &CGF.getContext().getLangOpts().OverflowHandler;
+ if (handlerName->empty()) {
+ EmitOverflowBB(overflowBB);
+ Builder.SetInsertPoint(continueBB);
+ return result;
+ }
+
+ // If an overflow handler is set, then we want to call it and then use its
+ // result, if it returns.
+ Builder.SetInsertPoint(overflowBB);
+
+ // Get the overflow handler.
+ llvm::Type *Int8Ty = CGF.Int8Ty;
+ llvm::Type *argTypes[] = { CGF.Int64Ty, CGF.Int64Ty, Int8Ty, Int8Ty };
+ llvm::FunctionType *handlerTy =
+ llvm::FunctionType::get(CGF.Int64Ty, argTypes, true);
+ llvm::Value *handler = CGF.CGM.CreateRuntimeFunction(handlerTy, *handlerName);
+
+ // Sign extend the args to 64-bit, so that we can use the same handler for
+ // all types of overflow.
+ llvm::Value *lhs = Builder.CreateSExt(Ops.LHS, CGF.Int64Ty);
+ llvm::Value *rhs = Builder.CreateSExt(Ops.RHS, CGF.Int64Ty);
+
+ // Call the handler with the two arguments, the operation, and the size of
+ // the result.
+ llvm::Value *handlerResult = Builder.CreateCall4(handler, lhs, rhs,
+ Builder.getInt8(OpID),
+ Builder.getInt8(cast<llvm::IntegerType>(opTy)->getBitWidth()));
+
+ // Truncate the result back to the desired size.
+ handlerResult = Builder.CreateTrunc(handlerResult, opTy);
+ Builder.CreateBr(continueBB);
+
+ Builder.SetInsertPoint(continueBB);
+ llvm::PHINode *phi = Builder.CreatePHI(opTy, 2);
+ phi->addIncoming(result, initialBB);
+ phi->addIncoming(handlerResult, overflowBB);
+
+ return phi;
+}
+
+/// Emit pointer + index arithmetic.
+static Value *emitPointerArithmetic(CodeGenFunction &CGF,
+ const BinOpInfo &op,
+ bool isSubtraction) {
+ // Must have binary (not unary) expr here. Unary pointer
+ // increment/decrement doesn't use this path.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+
+ Value *pointer = op.LHS;
+ Expr *pointerOperand = expr->getLHS();
+ Value *index = op.RHS;
+ Expr *indexOperand = expr->getRHS();
+
+ // In a subtraction, the LHS is always the pointer.
+ if (!isSubtraction && !pointer->getType()->isPointerTy()) {
+ std::swap(pointer, index);
+ std::swap(pointerOperand, indexOperand);
+ }
+
+ unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
+ if (width != CGF.PointerWidthInBits) {
+ // Zero-extend or sign-extend the pointer value according to
+ // whether the index is signed or not.
+ bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
+ index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
+ "idx.ext");
+ }
+
+ // If this is subtraction, negate the index.
+ if (isSubtraction)
+ index = CGF.Builder.CreateNeg(index, "idx.neg");
+
+ const PointerType *pointerType
+ = pointerOperand->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ QualType objectType = pointerOperand->getType()
+ ->castAs<ObjCObjectPointerType>()
+ ->getPointeeType();
+ llvm::Value *objectSize
+ = CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(objectType));
+
+ index = CGF.Builder.CreateMul(index, objectSize);
+
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ QualType elementType = pointerType->getPointeeType();
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ // The element count here is the total number of non-VLA elements.
+ llvm::Value *numElements = CGF.getVLASize(vla).first;
+
+ // Effectively, the multiply by the VLA size is part of the GEP.
+ // GEP indexes are signed, and scaling an index isn't permitted to
+ // signed-overflow, so we use the same semantics for our explicit
+ // multiply. We suppress this if overflow is not undefined behavior.
+ if (CGF.getLangOpts().isSignedOverflowDefined()) {
+ index = CGF.Builder.CreateMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+ } else {
+ index = CGF.Builder.CreateNSWMul(index, numElements, "vla.index");
+ pointer = CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+ }
+ return pointer;
+ }
+
+ // Explicitly handle GNU void* and function pointer arithmetic extensions. The
+ // GNU void* casts amount to no-ops since our void* type is i8*, but this is
+ // future proof.
+ if (elementType->isVoidType() || elementType->isFunctionType()) {
+ Value *result = CGF.Builder.CreateBitCast(pointer, CGF.VoidPtrTy);
+ result = CGF.Builder.CreateGEP(result, index, "add.ptr");
+ return CGF.Builder.CreateBitCast(result, pointer->getType());
+ }
+
+ if (CGF.getLangOpts().isSignedOverflowDefined())
+ return CGF.Builder.CreateGEP(pointer, index, "add.ptr");
+
+ return CGF.Builder.CreateInBoundsGEP(pointer, index, "add.ptr");
+}
+
+Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
+ if (op.LHS->getType()->isPointerTy() ||
+ op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ false);
+
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
+ }
+
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+
+ return Builder.CreateAdd(op.LHS, op.RHS, "add");
+}
+
+Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
+ // The LHS is always a pointer if either side is.
+ if (!op.LHS->getType()->isPointerTy()) {
+ if (op.Ty->isSignedIntegerOrEnumerationType()) {
+ switch (CGF.getContext().getLangOpts().getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined:
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
+ case LangOptions::SOB_Defined:
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ case LangOptions::SOB_Trapping:
+ return EmitOverflowCheckedBinOp(op);
+ }
+ }
+
+ if (op.LHS->getType()->isFPOrFPVectorTy())
+ return Builder.CreateFSub(op.LHS, op.RHS, "sub");
+
+ return Builder.CreateSub(op.LHS, op.RHS, "sub");
+ }
+
+ // If the RHS is not a pointer, then we have normal pointer
+ // arithmetic.
+ if (!op.RHS->getType()->isPointerTy())
+ return emitPointerArithmetic(CGF, op, /*subtraction*/ true);
+
+ // Otherwise, this is a pointer subtraction.
+
+ // Do the raw subtraction part.
+ llvm::Value *LHS
+ = Builder.CreatePtrToInt(op.LHS, CGF.PtrDiffTy, "sub.ptr.lhs.cast");
+ llvm::Value *RHS
+ = Builder.CreatePtrToInt(op.RHS, CGF.PtrDiffTy, "sub.ptr.rhs.cast");
+ Value *diffInChars = Builder.CreateSub(LHS, RHS, "sub.ptr.sub");
+
+ // Okay, figure out the element size.
+ const BinaryOperator *expr = cast<BinaryOperator>(op.E);
+ QualType elementType = expr->getLHS()->getType()->getPointeeType();
+
+ llvm::Value *divisor = 0;
+
+ // For a variable-length array, this is going to be non-constant.
+ if (const VariableArrayType *vla
+ = CGF.getContext().getAsVariableArrayType(elementType)) {
+ llvm::Value *numElements;
+ llvm::tie(numElements, elementType) = CGF.getVLASize(vla);
+
+ divisor = numElements;
+
+ // Scale the number of non-VLA elements by the non-VLA element size.
+ CharUnits eltSize = CGF.getContext().getTypeSizeInChars(elementType);
+ if (!eltSize.isOne())
+ divisor = CGF.Builder.CreateNUWMul(CGF.CGM.getSize(eltSize), divisor);
+
+ // For everything elese, we can just compute it, safe in the
+ // assumption that Sema won't let anything through that we can't
+ // safely compute the size of.
+ } else {
+ CharUnits elementSize;
+ // Handle GCC extension for pointer arithmetic on void* and
+ // function pointer types.
+ if (elementType->isVoidType() || elementType->isFunctionType())
+ elementSize = CharUnits::One();
+ else
+ elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+
+ // Don't even emit the divide for element size of 1.
+ if (elementSize.isOne())
+ return diffInChars;
+
+ divisor = CGF.CGM.getSize(elementSize);
+ }
+
+ // Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
+ // pointer difference in C is only defined in the case where both operands
+ // are pointing to elements of an array.
+ return Builder.CreateExactSDiv(diffInChars, divisor, "sub.ptr.div");
+}
+
+Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (CGF.CatchUndefined
+ && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+ llvm::ConstantInt::get(RHS->getType(), Width)),
+ Cont, CGF.getTrapBB());
+ CGF.EmitBlock(Cont);
+ }
+
+ return Builder.CreateShl(Ops.LHS, RHS, "shl");
+}
+
+Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
+ // LLVM requires the LHS and RHS to be the same type: promote or truncate the
+ // RHS to the same size as the LHS.
+ Value *RHS = Ops.RHS;
+ if (Ops.LHS->getType() != RHS->getType())
+ RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
+
+ if (CGF.CatchUndefined
+ && isa<llvm::IntegerType>(Ops.LHS->getType())) {
+ unsigned Width = cast<llvm::IntegerType>(Ops.LHS->getType())->getBitWidth();
+ llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
+ CGF.Builder.CreateCondBr(Builder.CreateICmpULT(RHS,
+ llvm::ConstantInt::get(RHS->getType(), Width)),
+ Cont, CGF.getTrapBB());
+ CGF.EmitBlock(Cont);
+ }
+
+ if (Ops.Ty->hasUnsignedIntegerRepresentation())
+ return Builder.CreateLShr(Ops.LHS, RHS, "shr");
+ return Builder.CreateAShr(Ops.LHS, RHS, "shr");
+}
+
+enum IntrinsicType { VCMPEQ, VCMPGT };
+// return corresponding comparison intrinsic for given vector type
+static llvm::Intrinsic::ID GetIntrinsic(IntrinsicType IT,
+ BuiltinType::Kind ElemKind) {
+ switch (ElemKind) {
+ default: llvm_unreachable("unexpected element type");
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtub_p;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequb_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsb_p;
+ case BuiltinType::UShort:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuh_p;
+ case BuiltinType::Short:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequh_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsh_p;
+ case BuiltinType::UInt:
+ case BuiltinType::ULong:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtuw_p;
+ case BuiltinType::Int:
+ case BuiltinType::Long:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpequw_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtsw_p;
+ case BuiltinType::Float:
+ return (IT == VCMPEQ) ? llvm::Intrinsic::ppc_altivec_vcmpeqfp_p :
+ llvm::Intrinsic::ppc_altivec_vcmpgtfp_p;
+ }
+}
+
+Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
+ unsigned SICmpOpc, unsigned FCmpOpc) {
+ TestAndClearIgnoreResultAssign();
+ Value *Result;
+ QualType LHSTy = E->getLHS()->getType();
+ if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
+ assert(E->getOpcode() == BO_EQ ||
+ E->getOpcode() == BO_NE);
+ Value *LHS = CGF.EmitScalarExpr(E->getLHS());
+ Value *RHS = CGF.EmitScalarExpr(E->getRHS());
+ Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
+ CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
+ } else if (!LHSTy->isAnyComplexType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+
+ // If AltiVec, the comparison results in a numeric type, so we use
+ // intrinsics comparing vectors and giving 0 or 1 as a result
+ if (LHSTy->isVectorType() && !E->getType()->isVectorType()) {
+ // constants for mapping CR6 register bits to predicate result
+ enum { CR6_EQ=0, CR6_EQ_REV, CR6_LT, CR6_LT_REV } CR6;
+
+ llvm::Intrinsic::ID ID = llvm::Intrinsic::not_intrinsic;
+
+ // in several cases vector arguments order will be reversed
+ Value *FirstVecArg = LHS,
+ *SecondVecArg = RHS;
+
+ QualType ElTy = LHSTy->getAs<VectorType>()->getElementType();
+ const BuiltinType *BTy = ElTy->getAs<BuiltinType>();
+ BuiltinType::Kind ElementKind = BTy->getKind();
+
+ switch(E->getOpcode()) {
+ default: llvm_unreachable("is not a comparison operation");
+ case BO_EQ:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_NE:
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPEQ, ElementKind);
+ break;
+ case BO_LT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ break;
+ case BO_GT:
+ CR6 = CR6_LT;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ break;
+ case BO_LE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ }
+ break;
+ case BO_GE:
+ if (ElementKind == BuiltinType::Float) {
+ CR6 = CR6_LT;
+ ID = llvm::Intrinsic::ppc_altivec_vcmpgefp_p;
+ }
+ else {
+ CR6 = CR6_EQ;
+ ID = GetIntrinsic(VCMPGT, ElementKind);
+ std::swap(FirstVecArg, SecondVecArg);
+ }
+ break;
+ }
+
+ Value *CR6Param = Builder.getInt32(CR6);
+ llvm::Function *F = CGF.CGM.getIntrinsic(ID);
+ Result = Builder.CreateCall3(F, CR6Param, FirstVecArg, SecondVecArg, "");
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+ }
+
+ if (LHS->getType()->isFPOrFPVectorTy()) {
+ Result = Builder.CreateFCmp((llvm::CmpInst::Predicate)FCmpOpc,
+ LHS, RHS, "cmp");
+ } else if (LHSTy->hasSignedIntegerRepresentation()) {
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)SICmpOpc,
+ LHS, RHS, "cmp");
+ } else {
+ // Unsigned integers and pointers.
+ Result = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS, RHS, "cmp");
+ }
+
+ // If this is a vector comparison, sign extend the result to the appropriate
+ // vector integer type and return it (don't convert to bool).
+ if (LHSTy->isVectorType())
+ return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
+
+ } else {
+ // Complex Comparison: can only be an equality comparison.
+ CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
+ CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
+
+ QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+
+ Value *ResultR, *ResultI;
+ if (CETy->isRealFloatingType()) {
+ ResultR = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateFCmp((llvm::FCmpInst::Predicate)FCmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ } else {
+ // Complex comparisons can only be equality comparisons. As such, signed
+ // and unsigned opcodes are the same.
+ ResultR = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.first, RHS.first, "cmp.r");
+ ResultI = Builder.CreateICmp((llvm::ICmpInst::Predicate)UICmpOpc,
+ LHS.second, RHS.second, "cmp.i");
+ }
+
+ if (E->getOpcode() == BO_EQ) {
+ Result = Builder.CreateAnd(ResultR, ResultI, "and.ri");
+ } else {
+ assert(E->getOpcode() == BO_NE &&
+ "Complex comparison other than == or != ?");
+ Result = Builder.CreateOr(ResultR, ResultI, "or.ri");
+ }
+ }
+
+ return EmitScalarConversion(Result, CGF.getContext().BoolTy, E->getType());
+}
+
+Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
+ bool Ignore = TestAndClearIgnoreResultAssign();
+
+ Value *RHS;
+ LValue LHS;
+
+ switch (E->getLHS()->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ llvm::tie(LHS, RHS) = CGF.EmitARCStoreStrong(E, Ignore);
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ llvm::tie(LHS,RHS) = CGF.EmitARCStoreAutoreleasing(E);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
+ break;
+
+ // No reason to do any of these differently.
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // __block variables need to have the rhs evaluated first, plus
+ // this should improve codegen just a little.
+ RHS = Visit(E->getRHS());
+ LHS = EmitCheckedLValue(E->getLHS());
+
+ // Store the value into the LHS. Bit-fields are handled specially
+ // because the result is altered by the store, i.e., [C99 6.5.16p1]
+ // 'An assignment expression has the value of the left operand after
+ // the assignment...'.
+ if (LHS.isBitField())
+ CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
+ else
+ CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
+ }
+
+ // If the result is clearly ignored, return now.
+ if (Ignore)
+ return 0;
+
+ // The result of an assignment in C is the assigned r-value.
+ if (!CGF.getContext().getLangOpts().CPlusPlus)
+ return RHS;
+
+ // If the lvalue is non-volatile, return the computed value of the assignment.
+ if (!LHS.isVolatileQualified())
+ return RHS;
+
+ // Otherwise, reload the value.
+ return EmitLoadOfLValue(LHS);
+}
+
+Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
+
+ // Perform vector logical and on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ Value *And = Builder.CreateAnd(LHS, RHS);
+ return Builder.CreateSExt(And, Zero->getType(), "sext");
+ }
+
+ llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
+ // If we have 1 && X, just emit X without inserting the control flow.
+ bool LHSCondVal;
+ if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
+ if (LHSCondVal) { // If we have 1 && X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "land.ext");
+ }
+
+ // 0 && RHS: If it is safe, just elide the RHS, and return 0/false.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::Constant::getNullValue(ResTy);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("land.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("land.rhs");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
+ // Branch on the LHS first. If it is false, go to the failure (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), RHSBlock, ContBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be false. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
+ "", ContBlock);
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getFalse(VMContext), *PI);
+
+ eval.begin(CGF);
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ eval.end(CGF);
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ if (CGF.getDebugInfo())
+ // There is no need to emit line number for unconditional branch.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "land.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
+
+ // Perform vector logical or on comparisons with zero vectors.
+ if (E->getType()->isVectorType()) {
+ Value *LHS = Visit(E->getLHS());
+ Value *RHS = Visit(E->getRHS());
+ Value *Zero = llvm::ConstantAggregateZero::get(LHS->getType());
+ LHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, LHS, Zero, "cmp");
+ RHS = Builder.CreateICmp(llvm::CmpInst::ICMP_NE, RHS, Zero, "cmp");
+ Value *Or = Builder.CreateOr(LHS, RHS);
+ return Builder.CreateSExt(Or, Zero->getType(), "sext");
+ }
+
+ llvm::Type *ResTy = ConvertType(E->getType());
+
+ // If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
+ // If we have 0 || X, just emit X without inserting the control flow.
+ bool LHSCondVal;
+ if (CGF.ConstantFoldsToSimpleInteger(E->getLHS(), LHSCondVal)) {
+ if (!LHSCondVal) { // If we have 0 || X, just emit X.
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+ // ZExt result to int or bool.
+ return Builder.CreateZExtOrBitCast(RHSCond, ResTy, "lor.ext");
+ }
+
+ // 1 || RHS: If it is safe, just elide the RHS, and return 1/true.
+ if (!CGF.ContainsLabel(E->getRHS()))
+ return llvm::ConstantInt::get(ResTy, 1);
+ }
+
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("lor.end");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("lor.rhs");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+
+ // Branch on the LHS first. If it is true, go to the success (cont) block.
+ CGF.EmitBranchOnBoolExpr(E->getLHS(), ContBlock, RHSBlock);
+
+ // Any edges into the ContBlock are now from an (indeterminate number of)
+ // edges from this first condition. All of these values will be true. Start
+ // setting up the PHI node in the Cont Block for this.
+ llvm::PHINode *PN = llvm::PHINode::Create(llvm::Type::getInt1Ty(VMContext), 2,
+ "", ContBlock);
+ for (llvm::pred_iterator PI = pred_begin(ContBlock), PE = pred_end(ContBlock);
+ PI != PE; ++PI)
+ PN->addIncoming(llvm::ConstantInt::getTrue(VMContext), *PI);
+
+ eval.begin(CGF);
+
+ // Emit the RHS condition as a bool value.
+ CGF.EmitBlock(RHSBlock);
+ Value *RHSCond = CGF.EvaluateExprAsBool(E->getRHS());
+
+ eval.end(CGF);
+
+ // Reaquire the RHS block, as there may be subblocks inserted.
+ RHSBlock = Builder.GetInsertBlock();
+
+ // Emit an unconditional branch from this block to ContBlock. Insert an entry
+ // into the phi node for the edge with the value of RHSCond.
+ CGF.EmitBlock(ContBlock);
+ PN->addIncoming(RHSCond, RHSBlock);
+
+ // ZExt result to int.
+ return Builder.CreateZExtOrBitCast(PN, ResTy, "lor.ext");
+}
+
+Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
+ CGF.EmitIgnoredExpr(E->getLHS());
+ CGF.EnsureInsertPoint();
+ return Visit(E->getRHS());
+}
+
+//===----------------------------------------------------------------------===//
+// Other Operators
+//===----------------------------------------------------------------------===//
+
+/// isCheapEnoughToEvaluateUnconditionally - Return true if the specified
+/// expression is cheap enough and side-effect-free enough to evaluate
+/// unconditionally instead of conditionally. This is used to convert control
+/// flow into selects in some cases.
+static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
+ CodeGenFunction &CGF) {
+ E = E->IgnoreParens();
+
+ // Anything that is an integer or floating point constant is fine.
+ if (E->isConstantInitializer(CGF.getContext(), false))
+ return true;
+
+ // Non-volatile automatic variables too, to get "cond ? X : Y" where
+ // X and Y are local variables.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (VD->hasLocalStorage() && !(CGF.getContext()
+ .getCanonicalType(VD->getType())
+ .isVolatileQualified()))
+ return true;
+
+ return false;
+}
+
+
+Value *ScalarExprEmitter::
+VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
+ TestAndClearIgnoreResultAssign();
+
+ // Bind the common expression if necessary.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E);
+
+ Expr *condExpr = E->getCond();
+ Expr *lhsExpr = E->getTrueExpr();
+ Expr *rhsExpr = E->getFalseExpr();
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm.
+ bool CondExprBool;
+ if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
+ Expr *live = lhsExpr, *dead = rhsExpr;
+ if (!CondExprBool) std::swap(live, dead);
+
+ // If the dead side doesn't have labels we need, just emit the Live part.
+ if (!CGF.ContainsLabel(dead)) {
+ Value *Result = Visit(live);
+
+ // If the live part is a throw expression, it acts like it has a void
+ // type, so evaluating it returns a null Value*. However, a conditional
+ // with non-void type must return a non-null Value*.
+ if (!Result && !E->getType()->isVoidType())
+ Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
+
+ return Result;
+ }
+ }
+
+ // OpenCL: If the condition is a vector, we can treat this condition like
+ // the select function.
+ if (CGF.getContext().getLangOpts().OpenCL
+ && condExpr->getType()->isVectorType()) {
+ llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
+
+ llvm::Type *condType = ConvertType(condExpr->getType());
+ llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
+
+ unsigned numElem = vecTy->getNumElements();
+ llvm::Type *elemType = vecTy->getElementType();
+
+ llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
+ llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
+ llvm::Value *tmp = Builder.CreateSExt(TestMSB,
+ llvm::VectorType::get(elemType,
+ numElem),
+ "sext");
+ llvm::Value *tmp2 = Builder.CreateNot(tmp);
+
+ // Cast float to int to perform ANDs if necessary.
+ llvm::Value *RHSTmp = RHS;
+ llvm::Value *LHSTmp = LHS;
+ bool wasCast = false;
+ llvm::VectorType *rhsVTy = cast<llvm::VectorType>(RHS->getType());
+ if (rhsVTy->getElementType()->isFloatTy()) {
+ RHSTmp = Builder.CreateBitCast(RHS, tmp2->getType());
+ LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
+ wasCast = true;
+ }
+
+ llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
+ llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
+ llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
+ if (wasCast)
+ tmp5 = Builder.CreateBitCast(tmp5, RHS->getType());
+
+ return tmp5;
+ }
+
+ // If this is a really simple expression (like x ? 4 : 5), emit this as a
+ // select instead of as control flow. We can only do this if it is cheap and
+ // safe to evaluate the LHS and RHS unconditionally.
+ if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
+ isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
+ llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
+ llvm::Value *LHS = Visit(lhsExpr);
+ llvm::Value *RHS = Visit(rhsExpr);
+ if (!LHS) {
+ // If the conditional has void type, make sure we return a null Value*.
+ assert(!RHS && "LHS and RHS types must match");
+ return 0;
+ }
+ return Builder.CreateSelect(CondV, LHS, RHS, "cond");
+ }
+
+ llvm::BasicBlock *LHSBlock = CGF.createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = CGF.createBasicBlock("cond.false");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("cond.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(condExpr, LHSBlock, RHSBlock);
+
+ CGF.EmitBlock(LHSBlock);
+ eval.begin(CGF);
+ Value *LHS = Visit(lhsExpr);
+ eval.end(CGF);
+
+ LHSBlock = Builder.GetInsertBlock();
+ Builder.CreateBr(ContBlock);
+
+ CGF.EmitBlock(RHSBlock);
+ eval.begin(CGF);
+ Value *RHS = Visit(rhsExpr);
+ eval.end(CGF);
+
+ RHSBlock = Builder.GetInsertBlock();
+ CGF.EmitBlock(ContBlock);
+
+ // If the LHS or RHS is a throw expression, it will be legitimately null.
+ if (!LHS)
+ return RHS;
+ if (!RHS)
+ return LHS;
+
+ // Create a PHI node for the real part.
+ llvm::PHINode *PN = Builder.CreatePHI(LHS->getType(), 2, "cond");
+ PN->addIncoming(LHS, LHSBlock);
+ PN->addIncoming(RHS, RHSBlock);
+ return PN;
+}
+
+Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
+ return Visit(E->getChosenSubExpr(CGF.getContext()));
+}
+
+Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
+ llvm::Value *ArgValue = CGF.EmitVAListRef(VE->getSubExpr());
+ llvm::Value *ArgPtr = CGF.EmitVAArg(ArgValue, VE->getType());
+
+ // If EmitVAArg fails, we fall back to the LLVM instruction.
+ if (!ArgPtr)
+ return Builder.CreateVAArg(ArgValue, ConvertType(VE->getType()));
+
+ // FIXME Volatility.
+ return Builder.CreateLoad(ArgPtr);
+}
+
+Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
+ return CGF.EmitBlockLiteral(block);
+}
+
+Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
+ Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
+ llvm::Type *DstTy = ConvertType(E->getType());
+
+ // Going from vec4->vec3 or vec3->vec4 is a special case and requires
+ // a shuffle vector instead of a bitcast.
+ llvm::Type *SrcTy = Src->getType();
+ if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) {
+ unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements();
+ unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements();
+ if ((numElementsDst == 3 && numElementsSrc == 4)
+ || (numElementsDst == 4 && numElementsSrc == 3)) {
+
+
+ // In the case of going from int4->float3, a bitcast is needed before
+ // doing a shuffle.
+ llvm::Type *srcElemTy =
+ cast<llvm::VectorType>(SrcTy)->getElementType();
+ llvm::Type *dstElemTy =
+ cast<llvm::VectorType>(DstTy)->getElementType();
+
+ if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy())
+ || (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) {
+ // Create a float type of the same size as the source or destination.
+ llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
+ numElementsSrc);
+
+ Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast");
+ }
+
+ llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
+
+ SmallVector<llvm::Constant*, 3> Args;
+ Args.push_back(Builder.getInt32(0));
+ Args.push_back(Builder.getInt32(1));
+ Args.push_back(Builder.getInt32(2));
+
+ if (numElementsDst == 4)
+ Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+
+ llvm::Constant *Mask = llvm::ConstantVector::get(Args);
+
+ return Builder.CreateShuffleVector(Src, UnV, Mask, "astype");
+ }
+ }
+
+ return Builder.CreateBitCast(Src, DstTy, "astype");
+}
+
+Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
+ return CGF.EmitAtomicExpr(E).getScalarVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Entry Point into this File
+//===----------------------------------------------------------------------===//
+
+/// EmitScalarExpr - Emit the computation of the specified expression of scalar
+/// type, ignoring the result.
+Value *CodeGenFunction::EmitScalarExpr(const Expr *E, bool IgnoreResultAssign) {
+ assert(E && !hasAggregateLLVMType(E->getType()) &&
+ "Invalid scalar expression to emit");
+
+ if (isa<CXXDefaultArgExpr>(E))
+ disableDebugInfo();
+ Value *V = ScalarExprEmitter(*this, IgnoreResultAssign)
+ .Visit(const_cast<Expr*>(E));
+ if (isa<CXXDefaultArgExpr>(E))
+ enableDebugInfo();
+ return V;
+}
+
+/// EmitScalarConversion - Emit a conversion from the specified type to the
+/// specified destination type, both of which are LLVM scalar types.
+Value *CodeGenFunction::EmitScalarConversion(Value *Src, QualType SrcTy,
+ QualType DstTy) {
+ assert(!hasAggregateLLVMType(SrcTy) && !hasAggregateLLVMType(DstTy) &&
+ "Invalid scalar expression to emit");
+ return ScalarExprEmitter(*this).EmitScalarConversion(Src, SrcTy, DstTy);
+}
+
+/// EmitComplexToScalarConversion - Emit a conversion from the specified complex
+/// type to the specified destination type, where the destination type is an
+/// LLVM scalar type.
+Value *CodeGenFunction::EmitComplexToScalarConversion(ComplexPairTy Src,
+ QualType SrcTy,
+ QualType DstTy) {
+ assert(SrcTy->isAnyComplexType() && !hasAggregateLLVMType(DstTy) &&
+ "Invalid complex -> scalar conversion");
+ return ScalarExprEmitter(*this).EmitComplexToScalarConversion(Src, SrcTy,
+ DstTy);
+}
+
+
+llvm::Value *CodeGenFunction::
+EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre) {
+ return ScalarExprEmitter(*this).EmitScalarPrePostIncDec(E, LV, isInc, isPre);
+}
+
+LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
+ llvm::Value *V;
+ // object->isa or (*object).isa
+ // Generate code as for: *(Class*)object
+ // build Class* type
+ llvm::Type *ClassPtrTy = ConvertType(E->getType());
+
+ Expr *BaseExpr = E->getBase();
+ if (BaseExpr->isRValue()) {
+ V = CreateMemTemp(E->getType(), "resval");
+ llvm::Value *Src = EmitScalarExpr(BaseExpr);
+ Builder.CreateStore(Src, V);
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(
+ MakeNaturalAlignAddrLValue(V, E->getType()));
+ } else {
+ if (E->isArrow())
+ V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
+ else
+ V = EmitLValue(BaseExpr).getAddress();
+ }
+
+ // build Class* type
+ ClassPtrTy = ClassPtrTy->getPointerTo();
+ V = Builder.CreateBitCast(V, ClassPtrTy);
+ return MakeNaturalAlignAddrLValue(V, E->getType());
+}
+
+
+LValue CodeGenFunction::EmitCompoundAssignmentLValue(
+ const CompoundAssignOperator *E) {
+ ScalarExprEmitter Scalar(*this);
+ Value *Result = 0;
+ switch (E->getOpcode()) {
+#define COMPOUND_OP(Op) \
+ case BO_##Op##Assign: \
+ return Scalar.EmitCompoundAssignLValue(E, &ScalarExprEmitter::Emit##Op, \
+ Result)
+ COMPOUND_OP(Mul);
+ COMPOUND_OP(Div);
+ COMPOUND_OP(Rem);
+ COMPOUND_OP(Add);
+ COMPOUND_OP(Sub);
+ COMPOUND_OP(Shl);
+ COMPOUND_OP(Shr);
+ COMPOUND_OP(And);
+ COMPOUND_OP(Xor);
+ COMPOUND_OP(Or);
+#undef COMPOUND_OP
+
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_Assign:
+ case BO_Comma:
+ llvm_unreachable("Not valid compound assignment operators");
+ }
+
+ llvm_unreachable("Unhandled compound assignment operator");
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
new file mode 100644
index 0000000..d0aa0f5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjC.cpp
@@ -0,0 +1,2974 @@
+//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Objective-C code as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CGObjCRuntime.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/InlineAsm.h"
+using namespace clang;
+using namespace CodeGen;
+
+typedef llvm::PointerIntPair<llvm::Value*,1,bool> TryEmitResult;
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e);
+static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
+ const Expr *E,
+ const ObjCMethodDecl *Method,
+ RValue Result);
+
+/// Given the address of a variable of pointer type, find the correct
+/// null to store into it.
+static llvm::Constant *getNullForVariable(llvm::Value *addr) {
+ llvm::Type *type =
+ cast<llvm::PointerType>(addr->getType())->getElementType();
+ return llvm::ConstantPointerNull::get(cast<llvm::PointerType>(type));
+}
+
+/// Emits an instance of NSConstantString representing the object.
+llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
+{
+ llvm::Constant *C =
+ CGM.getObjCRuntime().GenerateConstantString(E->getString());
+ // FIXME: This bitcast should just be made an invariant on the Runtime.
+ return llvm::ConstantExpr::getBitCast(C, ConvertType(E->getType()));
+}
+
+/// EmitObjCNumericLiteral - This routine generates code for
+/// the appropriate +[NSNumber numberWith<Type>:] method.
+///
+llvm::Value *
+CodeGenFunction::EmitObjCNumericLiteral(const ObjCNumericLiteral *E) {
+ // Generate the correct selector for this literal's concrete type.
+ const Expr *NL = E->getNumber();
+ // Get the method.
+ const ObjCMethodDecl *Method = E->getObjCNumericLiteralMethod();
+ assert(Method && "NSNumber method is null");
+ Selector Sel = Method->getSelector();
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ QualType ResultType = E->getType(); // should be NSNumber *
+ const ObjCObjectPointerType *InterfacePointerType =
+ ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *NSNumberDecl =
+ InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, NSNumberDecl);
+
+ const ParmVarDecl *argDecl = *Method->param_begin();
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ RValue RV = EmitAnyExpr(NL);
+ CallArgList Args;
+ Args.add(RV, ArgQT);
+
+ RValue result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ ResultType, Sel, Receiver, Args,
+ NSNumberDecl, Method);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects) {
+ ASTContext &Context = CGM.getContext();
+ const ObjCDictionaryLiteral *DLE = 0;
+ const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
+ if (!ALE)
+ DLE = cast<ObjCDictionaryLiteral>(E);
+
+ // Compute the type of the array we're initializing.
+ uint64_t NumElements =
+ ALE ? ALE->getNumElements() : DLE->getNumElements();
+ llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
+ NumElements);
+ QualType ElementType = Context.getObjCIdType().withConst();
+ QualType ElementArrayType
+ = Context.getConstantArrayType(ElementType, APNumElements,
+ ArrayType::Normal, /*IndexTypeQuals=*/0);
+
+ // Allocate the temporary array(s).
+ llvm::Value *Objects = CreateMemTemp(ElementArrayType, "objects");
+ llvm::Value *Keys = 0;
+ if (DLE)
+ Keys = CreateMemTemp(ElementArrayType, "keys");
+
+ // Perform the actual initialialization of the array(s).
+ for (uint64_t i = 0; i < NumElements; i++) {
+ if (ALE) {
+ // Emit the initializer.
+ const Expr *Rhs = ALE->getElement(i);
+ LValue LV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
+ ElementType,
+ Context.getTypeAlignInChars(Rhs->getType()),
+ Context);
+ EmitScalarInit(Rhs, /*D=*/0, LV, /*capturedByInit=*/false);
+ } else {
+ // Emit the key initializer.
+ const Expr *Key = DLE->getKeyValueElement(i).Key;
+ LValue KeyLV = LValue::MakeAddr(Builder.CreateStructGEP(Keys, i),
+ ElementType,
+ Context.getTypeAlignInChars(Key->getType()),
+ Context);
+ EmitScalarInit(Key, /*D=*/0, KeyLV, /*capturedByInit=*/false);
+
+ // Emit the value initializer.
+ const Expr *Value = DLE->getKeyValueElement(i).Value;
+ LValue ValueLV = LValue::MakeAddr(Builder.CreateStructGEP(Objects, i),
+ ElementType,
+ Context.getTypeAlignInChars(Value->getType()),
+ Context);
+ EmitScalarInit(Value, /*D=*/0, ValueLV, /*capturedByInit=*/false);
+ }
+ }
+
+ // Generate the argument list.
+ CallArgList Args;
+ ObjCMethodDecl::param_const_iterator PI = MethodWithObjects->param_begin();
+ const ParmVarDecl *argDecl = *PI++;
+ QualType ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Objects), ArgQT);
+ if (DLE) {
+ argDecl = *PI++;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ Args.add(RValue::get(Keys), ArgQT);
+ }
+ argDecl = *PI;
+ ArgQT = argDecl->getType().getUnqualifiedType();
+ llvm::Value *Count =
+ llvm::ConstantInt::get(CGM.getTypes().ConvertType(ArgQT), NumElements);
+ Args.add(RValue::get(Count), ArgQT);
+
+ // Generate a reference to the class pointer, which will be the receiver.
+ Selector Sel = MethodWithObjects->getSelector();
+ QualType ResultType = E->getType();
+ const ObjCObjectPointerType *InterfacePointerType
+ = ResultType->getAsObjCInterfacePointerType();
+ ObjCInterfaceDecl *Class
+ = InterfacePointerType->getObjectType()->getInterface();
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.GetClass(Builder, Class);
+
+ // Generate the message send.
+ RValue result
+ = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ MethodWithObjects->getResultType(),
+ Sel,
+ Receiver, Args, Class,
+ MethodWithObjects);
+ return Builder.CreateBitCast(result.getScalarVal(),
+ ConvertType(E->getType()));
+}
+
+llvm::Value *CodeGenFunction::EmitObjCArrayLiteral(const ObjCArrayLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getArrayWithObjectsMethod());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCDictionaryLiteral(
+ const ObjCDictionaryLiteral *E) {
+ return EmitObjCCollectionLiteral(E, E->getDictWithObjectsMethod());
+}
+
+/// Emit a selector.
+llvm::Value *CodeGenFunction::EmitObjCSelectorExpr(const ObjCSelectorExpr *E) {
+ // Untyped selector.
+ // Note that this implementation allows for non-constant strings to be passed
+ // as arguments to @selector(). Currently, the only thing preventing this
+ // behaviour is the type checking in the front end.
+ return CGM.getObjCRuntime().GetSelector(Builder, E->getSelector());
+}
+
+llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
+ // FIXME: This should pass the Decl not the name.
+ return CGM.getObjCRuntime().GenerateProtocolRef(Builder, E->getProtocol());
+}
+
+/// \brief Adjust the type of the result of an Objective-C message send
+/// expression when the method has a related result type.
+static RValue AdjustRelatedResultType(CodeGenFunction &CGF,
+ const Expr *E,
+ const ObjCMethodDecl *Method,
+ RValue Result) {
+ if (!Method)
+ return Result;
+
+ if (!Method->hasRelatedResultType() ||
+ CGF.getContext().hasSameType(E->getType(), Method->getResultType()) ||
+ !Result.isScalar())
+ return Result;
+
+ // We have applied a related result type. Cast the rvalue appropriately.
+ return RValue::get(CGF.Builder.CreateBitCast(Result.getScalarVal(),
+ CGF.ConvertType(E->getType())));
+}
+
+/// Decide whether to extend the lifetime of the receiver of a
+/// returns-inner-pointer message.
+static bool
+shouldExtendReceiverForInnerPointerMessage(const ObjCMessageExpr *message) {
+ switch (message->getReceiverKind()) {
+
+ // For a normal instance message, we should extend unless the
+ // receiver is loaded from a variable with precise lifetime.
+ case ObjCMessageExpr::Instance: {
+ const Expr *receiver = message->getInstanceReceiver();
+ const ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(receiver);
+ if (!ice || ice->getCastKind() != CK_LValueToRValue) return true;
+ receiver = ice->getSubExpr()->IgnoreParens();
+
+ // Only __strong variables.
+ if (receiver->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return true;
+
+ // All ivars and fields have precise lifetime.
+ if (isa<MemberExpr>(receiver) || isa<ObjCIvarRefExpr>(receiver))
+ return false;
+
+ // Otherwise, check for variables.
+ const DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(ice->getSubExpr());
+ if (!declRef) return true;
+ const VarDecl *var = dyn_cast<VarDecl>(declRef->getDecl());
+ if (!var) return true;
+
+ // All variables have precise lifetime except local variables with
+ // automatic storage duration that aren't specially marked.
+ return (var->hasLocalStorage() &&
+ !var->hasAttr<ObjCPreciseLifetimeAttr>());
+ }
+
+ case ObjCMessageExpr::Class:
+ case ObjCMessageExpr::SuperClass:
+ // It's never necessary for class objects.
+ return false;
+
+ case ObjCMessageExpr::SuperInstance:
+ // We generally assume that 'self' lives throughout a method call.
+ return false;
+ }
+
+ llvm_unreachable("invalid receiver kind");
+}
+
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return) {
+ // Only the lookup mechanism and first two arguments of the method
+ // implementation vary between runtimes. We can get the receiver and
+ // arguments in generic code.
+
+ bool isDelegateInit = E->isDelegateInitCall();
+
+ const ObjCMethodDecl *method = E->getMethodDecl();
+
+ // We don't retain the receiver in delegate init calls, and this is
+ // safe because the receiver value is always loaded from 'self',
+ // which we zero out. We don't want to Block_copy block receivers,
+ // though.
+ bool retainSelf =
+ (!isDelegateInit &&
+ CGM.getLangOpts().ObjCAutoRefCount &&
+ method &&
+ method->hasAttr<NSConsumesSelfAttr>());
+
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ bool isSuperMessage = false;
+ bool isClassMessage = false;
+ ObjCInterfaceDecl *OID = 0;
+ // Find the receiver
+ QualType ReceiverType;
+ llvm::Value *Receiver = 0;
+ switch (E->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ ReceiverType = E->getInstanceReceiver()->getType();
+ if (retainSelf) {
+ TryEmitResult ter = tryEmitARCRetainScalarExpr(*this,
+ E->getInstanceReceiver());
+ Receiver = ter.getPointer();
+ if (ter.getInt()) retainSelf = false;
+ } else
+ Receiver = EmitScalarExpr(E->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class: {
+ ReceiverType = E->getClassReceiver();
+ const ObjCObjectType *ObjTy = ReceiverType->getAs<ObjCObjectType>();
+ assert(ObjTy && "Invalid Objective-C class message send");
+ OID = ObjTy->getInterface();
+ assert(OID && "Invalid Objective-C class message send");
+ Receiver = Runtime.GetClass(Builder, OID);
+ isClassMessage = true;
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:
+ ReceiverType = E->getSuperType();
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ ReceiverType = E->getSuperType();
+ Receiver = LoadObjCSelf();
+ isSuperMessage = true;
+ isClassMessage = true;
+ break;
+ }
+
+ if (retainSelf)
+ Receiver = EmitARCRetainNonBlock(Receiver);
+
+ // In ARC, we sometimes want to "extend the lifetime"
+ // (i.e. retain+autorelease) of receivers of returns-inner-pointer
+ // messages.
+ if (getLangOpts().ObjCAutoRefCount && method &&
+ method->hasAttr<ObjCReturnsInnerPointerAttr>() &&
+ shouldExtendReceiverForInnerPointerMessage(E))
+ Receiver = EmitARCRetainAutorelease(ReceiverType, Receiver);
+
+ QualType ResultType =
+ method ? method->getResultType() : E->getType();
+
+ CallArgList Args;
+ EmitCallArgs(Args, method, E->arg_begin(), E->arg_end());
+
+ // For delegate init calls in ARC, do an unsafe store of null into
+ // self. This represents the call taking direct ownership of that
+ // value. We have to do this after emitting the other call
+ // arguments because they might also reference self, but we don't
+ // have to worry about any of them modifying self because that would
+ // be an undefined read and write of an object in unordered
+ // expressions.
+ if (isDelegateInit) {
+ assert(getLangOpts().ObjCAutoRefCount &&
+ "delegate init calls should only be marked in ARC");
+
+ // Do an unsafe store of null into self.
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ assert(selfAddr && "no self entry for a delegate init call?");
+
+ Builder.CreateStore(getNullForVariable(selfAddr), selfAddr);
+ }
+
+ RValue result;
+ if (isSuperMessage) {
+ // super is only valid in an Objective-C method
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ result = Runtime.GenerateMessageSendSuper(*this, Return, ResultType,
+ E->getSelector(),
+ OMD->getClassInterface(),
+ isCategoryImpl,
+ Receiver,
+ isClassMessage,
+ Args,
+ method);
+ } else {
+ result = Runtime.GenerateMessageSend(*this, Return, ResultType,
+ E->getSelector(),
+ Receiver, Args, OID,
+ method);
+ }
+
+ // For delegate init calls in ARC, implicitly store the result of
+ // the call back into self. This takes ownership of the value.
+ if (isDelegateInit) {
+ llvm::Value *selfAddr =
+ LocalDeclMap[cast<ObjCMethodDecl>(CurCodeDecl)->getSelfDecl()];
+ llvm::Value *newSelf = result.getScalarVal();
+
+ // The delegate return type isn't necessarily a matching type; in
+ // fact, it's quite likely to be 'id'.
+ llvm::Type *selfTy =
+ cast<llvm::PointerType>(selfAddr->getType())->getElementType();
+ newSelf = Builder.CreateBitCast(newSelf, selfTy);
+
+ Builder.CreateStore(newSelf, selfAddr);
+ }
+
+ return AdjustRelatedResultType(*this, E, method, result);
+}
+
+namespace {
+struct FinishARCDealloc : EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ const ObjCMethodDecl *method = cast<ObjCMethodDecl>(CGF.CurCodeDecl);
+
+ const ObjCImplDecl *impl = cast<ObjCImplDecl>(method->getDeclContext());
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ if (!iface->getSuperClass()) return;
+
+ bool isCategory = isa<ObjCCategoryImplDecl>(impl);
+
+ // Call [super dealloc] if we have a superclass.
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ CallArgList args;
+ CGF.CGM.getObjCRuntime().GenerateMessageSendSuper(CGF, ReturnValueSlot(),
+ CGF.getContext().VoidTy,
+ method->getSelector(),
+ iface,
+ isCategory,
+ self,
+ /*is class msg*/ false,
+ args,
+ method);
+ }
+};
+}
+
+/// StartObjCMethod - Begin emission of an ObjCMethod. This generates
+/// the LLVM function and sets the other context used by
+/// CodeGenFunction.
+void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ SourceLocation StartLoc) {
+ FunctionArgList args;
+ // Check if we should generate debug info for this method.
+ if (CGM.getModuleDebugInfo() && !OMD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
+
+ const CGFunctionInfo &FI = CGM.getTypes().arrangeObjCMethodDeclaration(OMD);
+ CGM.SetInternalFunctionAttributes(OMD, Fn, FI);
+
+ args.push_back(OMD->getSelfDecl());
+ args.push_back(OMD->getCmdDecl());
+
+ for (ObjCMethodDecl::param_const_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI)
+ args.push_back(*PI);
+
+ CurGD = OMD;
+
+ StartFunction(OMD, OMD->getResultType(), Fn, FI, args, StartLoc);
+
+ // In ARC, certain methods get an extra cleanup.
+ if (CGM.getLangOpts().ObjCAutoRefCount &&
+ OMD->isInstanceMethod() &&
+ OMD->getSelector().isUnarySelector()) {
+ const IdentifierInfo *ident =
+ OMD->getSelector().getIdentifierInfoForSlot(0);
+ if (ident->isStr("dealloc"))
+ EHStack.pushCleanup<FinishARCDealloc>(getARCCleanupKind());
+ }
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue, QualType type);
+
+/// Generate an Objective-C method. An Objective-C method is a C function with
+/// its pointer, name, and types registered in the class struture.
+void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
+ StartObjCMethod(OMD, OMD->getClassInterface(), OMD->getLocStart());
+ EmitStmt(OMD->getBody());
+ FinishFunction(OMD->getBodyRBrace());
+}
+
+/// emitStructGetterCall - Call the runtime function to load a property
+/// into the return value slot.
+static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
+ bool isAtomic, bool hasStrong) {
+ ASTContext &Context = CGF.getContext();
+
+ llvm::Value *src =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(),
+ ivar, 0).getAddress();
+
+ // objc_copyStruct (ReturnValue, &structIvar,
+ // sizeof (Type of Ivar), isAtomic, false);
+ CallArgList args;
+
+ llvm::Value *dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
+ args.add(RValue::get(dest), Context.VoidPtrTy);
+
+ src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
+ args.add(RValue::get(src), Context.VoidPtrTy);
+
+ CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
+ args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
+ args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
+ args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
+
+ llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(Context.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ fn, ReturnValueSlot(), args);
+}
+
+/// Determine whether the given architecture supports unaligned atomic
+/// accesses. They don't have to be fast, just faster than a function
+/// call and a mutex.
+static bool hasUnalignedAtomics(llvm::Triple::ArchType arch) {
+ // FIXME: Allow unaligned atomic load/store on x86. (It is not
+ // currently supported by the backend.)
+ return 0;
+}
+
+/// Return the maximum size that permits atomic accesses for the given
+/// architecture.
+static CharUnits getMaxAtomicAccessSize(CodeGenModule &CGM,
+ llvm::Triple::ArchType arch) {
+ // ARM has 8-byte atomic accesses, but it's not clear whether we
+ // want to rely on them here.
+
+ // In the default case, just assume that any size up to a pointer is
+ // fine given adequate alignment.
+ return CharUnits::fromQuantity(CGM.PointerSizeInBytes);
+}
+
+namespace {
+ class PropertyImplStrategy {
+ public:
+ enum StrategyKind {
+ /// The 'native' strategy is to use the architecture's provided
+ /// reads and writes.
+ Native,
+
+ /// Use objc_setProperty and objc_getProperty.
+ GetSetProperty,
+
+ /// Use objc_setProperty for the setter, but use expression
+ /// evaluation for the getter.
+ SetPropertyAndExpressionGet,
+
+ /// Use objc_copyStruct.
+ CopyStruct,
+
+ /// The 'expression' strategy is to emit normal assignment or
+ /// lvalue-to-rvalue expressions.
+ Expression
+ };
+
+ StrategyKind getKind() const { return StrategyKind(Kind); }
+
+ bool hasStrongMember() const { return HasStrong; }
+ bool isAtomic() const { return IsAtomic; }
+ bool isCopy() const { return IsCopy; }
+
+ CharUnits getIvarSize() const { return IvarSize; }
+ CharUnits getIvarAlignment() const { return IvarAlignment; }
+
+ PropertyImplStrategy(CodeGenModule &CGM,
+ const ObjCPropertyImplDecl *propImpl);
+
+ private:
+ unsigned Kind : 8;
+ unsigned IsAtomic : 1;
+ unsigned IsCopy : 1;
+ unsigned HasStrong : 1;
+
+ CharUnits IvarSize;
+ CharUnits IvarAlignment;
+ };
+}
+
+/// Pick an implementation strategy for the the given property synthesis.
+PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
+ const ObjCPropertyImplDecl *propImpl) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCPropertyDecl::SetterKind setterKind = prop->getSetterKind();
+
+ IsCopy = (setterKind == ObjCPropertyDecl::Copy);
+ IsAtomic = prop->isAtomic();
+ HasStrong = false; // doesn't matter here.
+
+ // Evaluate the ivar's size and alignment.
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ QualType ivarType = ivar->getType();
+ llvm::tie(IvarSize, IvarAlignment)
+ = CGM.getContext().getTypeInfoInChars(ivarType);
+
+ // If we have a copy property, we always have to use getProperty/setProperty.
+ // TODO: we could actually use setProperty and an expression for non-atomics.
+ if (IsCopy) {
+ Kind = GetSetProperty;
+ return;
+ }
+
+ // Handle retain.
+ if (setterKind == ObjCPropertyDecl::Retain) {
+ // In GC-only, there's nothing special that needs to be done.
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ // fallthrough
+
+ // In ARC, if the property is non-atomic, use expression emission,
+ // which translates to objc_storeStrong. This isn't required, but
+ // it's slightly nicer.
+ } else if (CGM.getLangOpts().ObjCAutoRefCount && !IsAtomic) {
+ Kind = Expression;
+ return;
+
+ // Otherwise, we need to at least use setProperty. However, if
+ // the property isn't atomic, we can use normal expression
+ // emission for the getter.
+ } else if (!IsAtomic) {
+ Kind = SetPropertyAndExpressionGet;
+ return;
+
+ // Otherwise, we have to use both setProperty and getProperty.
+ } else {
+ Kind = GetSetProperty;
+ return;
+ }
+ }
+
+ // If we're not atomic, just use expression accesses.
+ if (!IsAtomic) {
+ Kind = Expression;
+ return;
+ }
+
+ // Properties on bitfield ivars need to be emitted using expression
+ // accesses even if they're nominally atomic.
+ if (ivar->isBitField()) {
+ Kind = Expression;
+ return;
+ }
+
+ // GC-qualified or ARC-qualified ivars need to be emitted as
+ // expressions. This actually works out to being atomic anyway,
+ // except for ARC __strong, but that should trigger the above code.
+ if (ivarType.hasNonTrivialObjCLifetime() ||
+ (CGM.getLangOpts().getGC() &&
+ CGM.getContext().getObjCGCAttrKind(ivarType))) {
+ Kind = Expression;
+ return;
+ }
+
+ // Compute whether the ivar has strong members.
+ if (CGM.getLangOpts().getGC())
+ if (const RecordType *recordType = ivarType->getAs<RecordType>())
+ HasStrong = recordType->getDecl()->hasObjectMember();
+
+ // We can never access structs with object members with a native
+ // access, because we need to use write barriers. This is what
+ // objc_copyStruct is for.
+ if (HasStrong) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // Otherwise, this is target-dependent and based on the size and
+ // alignment of the ivar.
+
+ // If the size of the ivar is not a power of two, give up. We don't
+ // want to get into the business of doing compare-and-swaps.
+ if (!IvarSize.isPowerOfTwo()) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ llvm::Triple::ArchType arch =
+ CGM.getContext().getTargetInfo().getTriple().getArch();
+
+ // Most architectures require memory to fit within a single cache
+ // line, so the alignment has to be at least the size of the access.
+ // Otherwise we have to grab a lock.
+ if (IvarAlignment < IvarSize && !hasUnalignedAtomics(arch)) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // If the ivar's size exceeds the architecture's maximum atomic
+ // access size, we have to use CopyStruct.
+ if (IvarSize > getMaxAtomicAccessSize(CGM, arch)) {
+ Kind = CopyStruct;
+ return;
+ }
+
+ // Otherwise, we can use native loads and stores.
+ Kind = Native;
+}
+
+/// GenerateObjCGetter - Generate an Objective-C property getter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ GenerateObjCAtomicGetterCopyHelperFunction(PID);
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
+ assert(OMD && "Invalid call to generate getter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
+
+ generateObjCGetterBody(IMP, PID, AtomicHelperFn);
+
+ FinishFunction();
+}
+
+static bool hasTrivialGetExpr(const ObjCPropertyImplDecl *propImpl) {
+ const Expr *getter = propImpl->getGetterCXXConstructor();
+ if (!getter) return true;
+
+ // Sema only makes only of these when the ivar has a C++ class type,
+ // so the form is pretty constrained.
+
+ // If the property has a reference type, we might just be binding a
+ // reference, in which case the result will be a gl-value. We should
+ // treat this as a non-trivial operation.
+ if (getter->isGLValue())
+ return false;
+
+ // If we selected a trivial copy-constructor, we're okay.
+ if (const CXXConstructExpr *construct = dyn_cast<CXXConstructExpr>(getter))
+ return (construct->getConstructor()->isTrivial());
+
+ // The constructor might require cleanups (in which case it's never
+ // trivial).
+ assert(isa<ExprWithCleanups>(getter));
+ return false;
+}
+
+/// emitCPPObjectAtomicGetterCall - Call the runtime function to
+/// copy the ivar into the resturn slot.
+static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
+ llvm::Value *returnAddr,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&returnSlot, &CppObjectIvar,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The 1st argument is the return Slot.
+ args.add(RValue::get(returnAddr), CGF.getContext().VoidPtrTy);
+
+ // The 2nd argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+}
+
+void
+CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
+ // If there's a non-trivial 'get' expression, we just have to emit that.
+ if (!hasTrivialGetExpr(propImpl)) {
+ if (!AtomicHelperFn) {
+ ReturnStmt ret(SourceLocation(), propImpl->getGetterCXXConstructor(),
+ /*nrvo*/ 0);
+ EmitReturnStmt(ret);
+ }
+ else {
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ emitCPPObjectAtomicGetterCall(*this, ReturnValue,
+ ivar, AtomicHelperFn);
+ }
+ return;
+ }
+
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ QualType propType = prop->getType();
+ ObjCMethodDecl *getterMethod = prop->getGetterMethodDecl();
+
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+
+ // Pick an implementation strategy.
+ PropertyImplStrategy strategy(CGM, propImpl);
+ switch (strategy.getKind()) {
+ case PropertyImplStrategy::Native: {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+
+ // Currently, all atomic accesses have to be through integer
+ // types, so there's no point in trying to pick a prettier type.
+ llvm::Type *bitcastType =
+ llvm::Type::getIntNTy(getLLVMContext(),
+ getContext().toBits(strategy.getIvarSize()));
+ bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
+
+ // Perform an atomic load. This does not impose ordering constraints.
+ llvm::Value *ivarAddr = LV.getAddress();
+ ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+ llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
+ load->setAlignment(strategy.getIvarAlignment().getQuantity());
+ load->setAtomic(llvm::Unordered);
+
+ // Store that value into the return address. Doing this with a
+ // bitcast is likely to produce some pretty ugly IR, but it's not
+ // the *most* terrible thing in the world.
+ Builder.CreateStore(load, Builder.CreateBitCast(ReturnValue, bitcastType));
+
+ // Make sure we don't do an autorelease.
+ AutoreleaseResult = false;
+ return;
+ }
+
+ case PropertyImplStrategy::GetSetProperty: {
+ llvm::Value *getPropertyFn =
+ CGM.getObjCRuntime().GetPropertyGetFunction();
+ if (!getPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
+ return;
+ }
+
+ // Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
+ // FIXME: Can't this be simpler? This might even be worse than the
+ // corresponding gcc code.
+ llvm::Value *cmd =
+ Builder.CreateLoad(LocalDeclMap[getterMethod->getCmdDecl()], "cmd");
+ llvm::Value *self = Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
+ llvm::Value *ivarOffset =
+ EmitIvarOffset(classImpl->getClassInterface(), ivar);
+
+ CallArgList args;
+ args.add(RValue::get(self), getContext().getObjCIdType());
+ args.add(RValue::get(cmd), getContext().getObjCSelType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+
+ // FIXME: We shouldn't need to get the function info here, the
+ // runtime already should have computed it to build the function.
+ RValue RV = EmitCall(getTypes().arrangeFunctionCall(propType, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ getPropertyFn, ReturnValueSlot(), args);
+
+ // We need to fix the type here. Ivars with copy & retain are
+ // always objects so we don't need to worry about complex or
+ // aggregates.
+ RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
+ getTypes().ConvertType(propType)));
+
+ EmitReturnOfRValue(RV, propType);
+
+ // objc_getProperty does an autorelease, so we should suppress ours.
+ AutoreleaseResult = false;
+
+ return;
+ }
+
+ case PropertyImplStrategy::CopyStruct:
+ emitStructGetterCall(*this, ivar, strategy.isAtomic(),
+ strategy.hasStrongMember());
+ return;
+
+ case PropertyImplStrategy::Expression:
+ case PropertyImplStrategy::SetPropertyAndExpressionGet: {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, 0);
+
+ QualType ivarType = ivar->getType();
+ if (ivarType->isAnyComplexType()) {
+ ComplexPairTy pair = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+ StoreComplexToAddr(pair, ReturnValue, LV.isVolatileQualified());
+ } else if (hasAggregateLLVMType(ivarType)) {
+ // The return value slot is guaranteed to not be aliased, but
+ // that's not necessarily the same as "on the stack", so
+ // we still potentially need objc_memmove_collectable.
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), ivarType);
+ } else {
+ llvm::Value *value;
+ if (propType->isReferenceType()) {
+ value = LV.getAddress();
+ } else {
+ // We want to load and autoreleaseReturnValue ARC __weak ivars.
+ if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
+ value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
+
+ // Otherwise we want to do a simple load, suppressing the
+ // final autorelease.
+ } else {
+ value = EmitLoadOfLValue(LV).getScalarVal();
+ AutoreleaseResult = false;
+ }
+
+ value = Builder.CreateBitCast(value, ConvertType(propType));
+ }
+
+ EmitReturnOfRValue(RValue::get(value), propType);
+ }
+ return;
+ }
+
+ }
+ llvm_unreachable("bad @property implementation strategy!");
+}
+
+/// emitStructSetterCall - Call the runtime function to store the value
+/// from the first formal parameter into the given ivar.
+static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar) {
+ // objc_copyStruct (&structIvar, &Arg,
+ // sizeof (struct something), true, false);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0)
+ .getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // The third argument is the sizeof the type.
+ llvm::Value *size =
+ CGF.CGM.getSize(CGF.getContext().getTypeSizeInChars(ivar->getType()));
+ args.add(RValue::get(size), CGF.getContext().getSizeType());
+
+ // The fourth argument is the 'isAtomic' flag.
+ args.add(RValue::get(CGF.Builder.getTrue()), CGF.getContext().BoolTy);
+
+ // The fifth argument is the 'hasStrong' flag.
+ // FIXME: should this really always be false?
+ args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
+
+ llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyStructFn, ReturnValueSlot(), args);
+}
+
+/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
+/// the value from the first formal parameter into the given ivar, using
+/// the Cpp API for atomic Cpp objects with non-trivial copy assignment.
+static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
+ ObjCMethodDecl *OMD,
+ ObjCIvarDecl *ivar,
+ llvm::Constant *AtomicHelperFn) {
+ // objc_copyCppObjectAtomic (&CppObjectIvar, &Arg,
+ // AtomicHelperFn);
+ CallArgList args;
+
+ // The first argument is the address of the ivar.
+ llvm::Value *ivarAddr =
+ CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(),
+ CGF.LoadObjCSelf(), ivar, 0).getAddress();
+ ivarAddr = CGF.Builder.CreateBitCast(ivarAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(ivarAddr), CGF.getContext().VoidPtrTy);
+
+ // The second argument is the address of the parameter variable.
+ ParmVarDecl *argVar = *OMD->param_begin();
+ DeclRefExpr argRef(argVar, false, argVar->getType().getNonReferenceType(),
+ VK_LValue, SourceLocation());
+ llvm::Value *argAddr = CGF.EmitLValue(&argRef).getAddress();
+ argAddr = CGF.Builder.CreateBitCast(argAddr, CGF.Int8PtrTy);
+ args.add(RValue::get(argAddr), CGF.getContext().VoidPtrTy);
+
+ // Third argument is the helper function.
+ args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
+
+ llvm::Value *copyCppAtomicObjectFn =
+ CGF.CGM.getObjCRuntime().GetCppAtomicObjectFunction();
+ CGF.EmitCall(CGF.getTypes().arrangeFunctionCall(CGF.getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ copyCppAtomicObjectFn, ReturnValueSlot(), args);
+
+
+}
+
+
+static bool hasTrivialSetExpr(const ObjCPropertyImplDecl *PID) {
+ Expr *setter = PID->getSetterCXXAssignment();
+ if (!setter) return true;
+
+ // Sema only makes only of these when the ivar has a C++ class type,
+ // so the form is pretty constrained.
+
+ // An operator call is trivial if the function it calls is trivial.
+ // This also implies that there's nothing non-trivial going on with
+ // the arguments, because operator= can only be trivial if it's a
+ // synthesized assignment operator and therefore both parameters are
+ // references.
+ if (CallExpr *call = dyn_cast<CallExpr>(setter)) {
+ if (const FunctionDecl *callee
+ = dyn_cast_or_null<FunctionDecl>(call->getCalleeDecl()))
+ if (callee->isTrivial())
+ return true;
+ return false;
+ }
+
+ assert(isa<ExprWithCleanups>(setter));
+ return false;
+}
+
+static bool UseOptimizedSetter(CodeGenModule &CGM) {
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
+ return false;
+ const TargetInfo &Target = CGM.getContext().getTargetInfo();
+
+ if (Target.getPlatformName() != "macosx")
+ return false;
+
+ return Target.getPlatformMinVersion() >= VersionTuple(10, 8);
+}
+
+void
+CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn) {
+ const ObjCPropertyDecl *prop = propImpl->getPropertyDecl();
+ ObjCIvarDecl *ivar = propImpl->getPropertyIvarDecl();
+ ObjCMethodDecl *setterMethod = prop->getSetterMethodDecl();
+
+ // Just use the setter expression if Sema gave us one and it's
+ // non-trivial.
+ if (!hasTrivialSetExpr(propImpl)) {
+ if (!AtomicHelperFn)
+ // If non-atomic, assignment is called directly.
+ EmitStmt(propImpl->getSetterCXXAssignment());
+ else
+ // If atomic, assignment is called via a locking api.
+ emitCPPObjectAtomicSetterCall(*this, setterMethod, ivar,
+ AtomicHelperFn);
+ return;
+ }
+
+ PropertyImplStrategy strategy(CGM, propImpl);
+ switch (strategy.getKind()) {
+ case PropertyImplStrategy::Native: {
+ llvm::Value *argAddr = LocalDeclMap[*setterMethod->param_begin()];
+
+ LValue ivarLValue =
+ EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
+ llvm::Value *ivarAddr = ivarLValue.getAddress();
+
+ // Currently, all atomic accesses have to be through integer
+ // types, so there's no point in trying to pick a prettier type.
+ llvm::Type *bitcastType =
+ llvm::Type::getIntNTy(getLLVMContext(),
+ getContext().toBits(strategy.getIvarSize()));
+ bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
+
+ // Cast both arguments to the chosen operation type.
+ argAddr = Builder.CreateBitCast(argAddr, bitcastType);
+ ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+
+ // This bitcast load is likely to cause some nasty IR.
+ llvm::Value *load = Builder.CreateLoad(argAddr);
+
+ // Perform an atomic store. There are no memory ordering requirements.
+ llvm::StoreInst *store = Builder.CreateStore(load, ivarAddr);
+ store->setAlignment(strategy.getIvarAlignment().getQuantity());
+ store->setAtomic(llvm::Unordered);
+ return;
+ }
+
+ case PropertyImplStrategy::GetSetProperty:
+ case PropertyImplStrategy::SetPropertyAndExpressionGet: {
+
+ llvm::Value *setOptimizedPropertyFn = 0;
+ llvm::Value *setPropertyFn = 0;
+ if (UseOptimizedSetter(CGM)) {
+ // 10.8 code and GC is off
+ setOptimizedPropertyFn =
+ CGM.getObjCRuntime()
+ .GetOptimizedPropertySetFunction(strategy.isAtomic(),
+ strategy.isCopy());
+ if (!setOptimizedPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C optimized setter - NYI");
+ return;
+ }
+ }
+ else {
+ setPropertyFn = CGM.getObjCRuntime().GetPropertySetFunction();
+ if (!setPropertyFn) {
+ CGM.ErrorUnsupported(propImpl, "Obj-C setter requiring atomic copy");
+ return;
+ }
+ }
+
+ // Emit objc_setProperty((id) self, _cmd, offset, arg,
+ // <is-atomic>, <is-copy>).
+ llvm::Value *cmd =
+ Builder.CreateLoad(LocalDeclMap[setterMethod->getCmdDecl()]);
+ llvm::Value *self =
+ Builder.CreateBitCast(LoadObjCSelf(), VoidPtrTy);
+ llvm::Value *ivarOffset =
+ EmitIvarOffset(classImpl->getClassInterface(), ivar);
+ llvm::Value *arg = LocalDeclMap[*setterMethod->param_begin()];
+ arg = Builder.CreateBitCast(Builder.CreateLoad(arg, "arg"), VoidPtrTy);
+
+ CallArgList args;
+ args.add(RValue::get(self), getContext().getObjCIdType());
+ args.add(RValue::get(cmd), getContext().getObjCSelType());
+ if (setOptimizedPropertyFn) {
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setOptimizedPropertyFn, ReturnValueSlot(), args);
+ } else {
+ args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ args.add(RValue::get(arg), getContext().getObjCIdType());
+ args.add(RValue::get(Builder.getInt1(strategy.isAtomic())),
+ getContext().BoolTy);
+ args.add(RValue::get(Builder.getInt1(strategy.isCopy())),
+ getContext().BoolTy);
+ // FIXME: We shouldn't need to get the function info here, the runtime
+ // already should have computed it to build the function.
+ EmitCall(getTypes().arrangeFunctionCall(getContext().VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ setPropertyFn, ReturnValueSlot(), args);
+ }
+
+ return;
+ }
+
+ case PropertyImplStrategy::CopyStruct:
+ emitStructSetterCall(*this, setterMethod, ivar);
+ return;
+
+ case PropertyImplStrategy::Expression:
+ break;
+ }
+
+ // Otherwise, fake up some ASTs and emit a normal assignment.
+ ValueDecl *selfDecl = setterMethod->getSelfDecl();
+ DeclRefExpr self(selfDecl, false, selfDecl->getType(),
+ VK_LValue, SourceLocation());
+ ImplicitCastExpr selfLoad(ImplicitCastExpr::OnStack,
+ selfDecl->getType(), CK_LValueToRValue, &self,
+ VK_RValue);
+ ObjCIvarRefExpr ivarRef(ivar, ivar->getType().getNonReferenceType(),
+ SourceLocation(), &selfLoad, true, true);
+
+ ParmVarDecl *argDecl = *setterMethod->param_begin();
+ QualType argType = argDecl->getType().getNonReferenceType();
+ DeclRefExpr arg(argDecl, false, argType, VK_LValue, SourceLocation());
+ ImplicitCastExpr argLoad(ImplicitCastExpr::OnStack,
+ argType.getUnqualifiedType(), CK_LValueToRValue,
+ &arg, VK_RValue);
+
+ // The property type can differ from the ivar type in some situations with
+ // Objective-C pointer types, we can always bit cast the RHS in these cases.
+ // The following absurdity is just to ensure well-formed IR.
+ CastKind argCK = CK_NoOp;
+ if (ivarRef.getType()->isObjCObjectPointerType()) {
+ if (argLoad.getType()->isObjCObjectPointerType())
+ argCK = CK_BitCast;
+ else if (argLoad.getType()->isBlockPointerType())
+ argCK = CK_BlockPointerToObjCPointerCast;
+ else
+ argCK = CK_CPointerToObjCPointerCast;
+ } else if (ivarRef.getType()->isBlockPointerType()) {
+ if (argLoad.getType()->isBlockPointerType())
+ argCK = CK_BitCast;
+ else
+ argCK = CK_AnyPointerToBlockPointerCast;
+ } else if (ivarRef.getType()->isPointerType()) {
+ argCK = CK_BitCast;
+ }
+ ImplicitCastExpr argCast(ImplicitCastExpr::OnStack,
+ ivarRef.getType(), argCK, &argLoad,
+ VK_RValue);
+ Expr *finalArg = &argLoad;
+ if (!getContext().hasSameUnqualifiedType(ivarRef.getType(),
+ argLoad.getType()))
+ finalArg = &argCast;
+
+
+ BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
+ ivarRef.getType(), VK_RValue, OK_Ordinary,
+ SourceLocation());
+ EmitStmt(&assign);
+}
+
+/// GenerateObjCSetter - Generate an Objective-C property setter
+/// function. The given Decl must be an ObjCImplementationDecl. @synthesize
+/// is illegal within a category.
+void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID) {
+ llvm::Constant *AtomicHelperFn =
+ GenerateObjCAtomicSetterCopyHelperFunction(PID);
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
+ assert(OMD && "Invalid call to generate setter (empty method)");
+ StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
+
+ generateObjCSetterBody(IMP, PID, AtomicHelperFn);
+
+ FinishFunction();
+}
+
+namespace {
+ struct DestroyIvar : EHScopeStack::Cleanup {
+ private:
+ llvm::Value *addr;
+ const ObjCIvarDecl *ivar;
+ CodeGenFunction::Destroyer *destroyer;
+ bool useEHCleanupForArray;
+ public:
+ DestroyIvar(llvm::Value *addr, const ObjCIvarDecl *ivar,
+ CodeGenFunction::Destroyer *destroyer,
+ bool useEHCleanupForArray)
+ : addr(addr), ivar(ivar), destroyer(destroyer),
+ useEHCleanupForArray(useEHCleanupForArray) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ LValue lvalue
+ = CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
+ CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
+ flags.isForNormalCleanup() && useEHCleanupForArray);
+ }
+ };
+}
+
+/// Like CodeGenFunction::destroyARCStrong, but do it with a call.
+static void destroyARCStrongWithStore(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *null = getNullForVariable(addr);
+ CGF.EmitARCStoreStrongCall(addr, null, /*ignored*/ true);
+}
+
+static void emitCXXDestructMethod(CodeGenFunction &CGF,
+ ObjCImplementationDecl *impl) {
+ CodeGenFunction::RunCleanupsScope scope(CGF);
+
+ llvm::Value *self = CGF.LoadObjCSelf();
+
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar()) {
+ QualType type = ivar->getType();
+
+ // Check whether the ivar is a destructible type.
+ QualType::DestructionKind dtorKind = type.isDestructedType();
+ if (!dtorKind) continue;
+
+ CodeGenFunction::Destroyer *destroyer = 0;
+
+ // Use a call to objc_storeStrong to destroy strong ivars, for the
+ // general benefit of the tools.
+ if (dtorKind == QualType::DK_objc_strong_lifetime) {
+ destroyer = destroyARCStrongWithStore;
+
+ // Otherwise use the default for the destruction kind.
+ } else {
+ destroyer = CGF.getDestroyer(dtorKind);
+ }
+
+ CleanupKind cleanupKind = CGF.getCleanupKind(dtorKind);
+
+ CGF.EHStack.pushCleanup<DestroyIvar>(cleanupKind, self, ivar, destroyer,
+ cleanupKind & EHCleanup);
+ }
+
+ assert(scope.requiresCleanups() && "nothing to do in .cxx_destruct?");
+}
+
+void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD,
+ bool ctor) {
+ MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
+ StartObjCMethod(MD, IMP->getClassInterface(), MD->getLocStart());
+
+ // Emit .cxx_construct.
+ if (ctor) {
+ // Suppress the final autorelease in ARC.
+ AutoreleaseResult = false;
+
+ SmallVector<CXXCtorInitializer *, 8> IvarInitializers;
+ for (ObjCImplementationDecl::init_const_iterator B = IMP->init_begin(),
+ E = IMP->init_end(); B != E; ++B) {
+ CXXCtorInitializer *IvarInit = (*B);
+ FieldDecl *Field = IvarInit->getAnyMember();
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(Field);
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
+ LoadObjCSelf(), Ivar, 0);
+ EmitAggExpr(IvarInit->getInit(),
+ AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ }
+ // constructor returns 'self'.
+ CodeGenTypes &Types = CGM.getTypes();
+ QualType IdTy(CGM.getContext().getObjCIdType());
+ llvm::Value *SelfAsId =
+ Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
+ EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
+
+ // Emit .cxx_destruct.
+ } else {
+ emitCXXDestructMethod(*this, IMP);
+ }
+ FinishFunction();
+}
+
+bool CodeGenFunction::IndirectObjCSetterArg(const CGFunctionInfo &FI) {
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin();
+ it++; it++;
+ const ABIArgInfo &AI = it->info;
+ // FIXME. Is this sufficient check?
+ return (AI.getKind() == ABIArgInfo::Indirect);
+}
+
+bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC)
+ return false;
+ if (const RecordType *FDTTy = Ty.getTypePtr()->getAs<RecordType>())
+ return FDTTy->getDecl()->hasObjectMember();
+ return false;
+}
+
+llvm::Value *CodeGenFunction::LoadObjCSelf() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
+}
+
+QualType CodeGenFunction::TypeOfSelfObject() {
+ const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
+ ImplicitParamDecl *selfDecl = OMD->getSelfDecl();
+ const ObjCObjectPointerType *PTy = cast<ObjCObjectPointerType>(
+ getContext().getCanonicalType(selfDecl->getType()));
+ return PTy->getPointeeType();
+}
+
+void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
+ llvm::Constant *EnumerationMutationFn =
+ CGM.getObjCRuntime().EnumerationMutationFunction();
+
+ if (!EnumerationMutationFn) {
+ CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
+ return;
+ }
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ // The local variable comes into scope immediately.
+ AutoVarEmission variable = AutoVarEmission::invalid();
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
+ variable = EmitAutoVarAlloca(*cast<VarDecl>(SD->getSingleDecl()));
+
+ JumpDest LoopEnd = getJumpDestInCurrentScope("forcoll.end");
+
+ // Fast enumeration state.
+ QualType StateTy = CGM.getObjCFastEnumerationStateType();
+ llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
+ EmitNullInitialization(StatePtr, StateTy);
+
+ // Number of elements in the items array.
+ static const unsigned NumItems = 16;
+
+ // Fetch the countByEnumeratingWithState:objects:count: selector.
+ IdentifierInfo *II[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ Selector FastEnumSel =
+ CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
+
+ QualType ItemsTy =
+ getContext().getConstantArrayType(getContext().getObjCIdType(),
+ llvm::APInt(32, NumItems),
+ ArrayType::Normal, 0);
+ llvm::Value *ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
+
+ // Emit the collection pointer. In ARC, we do a retain.
+ llvm::Value *Collection;
+ if (getLangOpts().ObjCAutoRefCount) {
+ Collection = EmitARCRetainScalarExpr(S.getCollection());
+
+ // Enter a cleanup to do the release.
+ EmitObjCConsumeObject(S.getCollection()->getType(), Collection);
+ } else {
+ Collection = EmitScalarExpr(S.getCollection());
+ }
+
+ // The 'continue' label needs to appear within the cleanup for the
+ // collection object.
+ JumpDest AfterBody = getJumpDestInCurrentScope("forcoll.next");
+
+ // Send it our message:
+ CallArgList Args;
+
+ // The first argument is a temporary of the enumeration-state type.
+ Args.add(RValue::get(StatePtr), getContext().getPointerType(StateTy));
+
+ // The second argument is a temporary array with space for NumItems
+ // pointers. We'll actually be loading elements from the array
+ // pointer written into the control state; this buffer is so that
+ // collections that *aren't* backed by arrays can still queue up
+ // batches of elements.
+ Args.add(RValue::get(ItemsPtr), getContext().getPointerType(ItemsTy));
+
+ // The third argument is the capacity of that temporary array.
+ llvm::Type *UnsignedLongLTy = ConvertType(getContext().UnsignedLongTy);
+ llvm::Constant *Count = llvm::ConstantInt::get(UnsignedLongLTy, NumItems);
+ Args.add(RValue::get(Count), getContext().UnsignedLongTy);
+
+ // Start the enumeration.
+ RValue CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+
+ // The initial number of objects that were returned in the buffer.
+ llvm::Value *initialBufferLimit = CountRV.getScalarVal();
+
+ llvm::BasicBlock *EmptyBB = createBasicBlock("forcoll.empty");
+ llvm::BasicBlock *LoopInitBB = createBasicBlock("forcoll.loopinit");
+
+ llvm::Value *zero = llvm::Constant::getNullValue(UnsignedLongLTy);
+
+ // If the limit pointer was zero to begin with, the collection is
+ // empty; skip all this.
+ Builder.CreateCondBr(Builder.CreateICmpEQ(initialBufferLimit, zero, "iszero"),
+ EmptyBB, LoopInitBB);
+
+ // Otherwise, initialize the loop.
+ EmitBlock(LoopInitBB);
+
+ // Save the initial mutations value. This is the value at an
+ // address that was written into the state object by
+ // countByEnumeratingWithState:objects:count:.
+ llvm::Value *StateMutationsPtrPtr =
+ Builder.CreateStructGEP(StatePtr, 2, "mutationsptr.ptr");
+ llvm::Value *StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr,
+ "mutationsptr");
+
+ llvm::Value *initialMutations =
+ Builder.CreateLoad(StateMutationsPtr, "forcoll.initial-mutations");
+
+ // Start looping. This is the point we return to whenever we have a
+ // fresh, non-empty batch of objects.
+ llvm::BasicBlock *LoopBodyBB = createBasicBlock("forcoll.loopbody");
+ EmitBlock(LoopBodyBB);
+
+ // The current index into the buffer.
+ llvm::PHINode *index = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.index");
+ index->addIncoming(zero, LoopInitBB);
+
+ // The current buffer size.
+ llvm::PHINode *count = Builder.CreatePHI(UnsignedLongLTy, 3, "forcoll.count");
+ count->addIncoming(initialBufferLimit, LoopInitBB);
+
+ // Check whether the mutations value has changed from where it was
+ // at start. StateMutationsPtr should actually be invariant between
+ // refreshes.
+ StateMutationsPtr = Builder.CreateLoad(StateMutationsPtrPtr, "mutationsptr");
+ llvm::Value *currentMutations
+ = Builder.CreateLoad(StateMutationsPtr, "statemutations");
+
+ llvm::BasicBlock *WasMutatedBB = createBasicBlock("forcoll.mutated");
+ llvm::BasicBlock *WasNotMutatedBB = createBasicBlock("forcoll.notmutated");
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(currentMutations, initialMutations),
+ WasNotMutatedBB, WasMutatedBB);
+
+ // If so, call the enumeration-mutation function.
+ EmitBlock(WasMutatedBB);
+ llvm::Value *V =
+ Builder.CreateBitCast(Collection,
+ ConvertType(getContext().getObjCIdType()));
+ CallArgList Args2;
+ Args2.add(RValue::get(V), getContext().getObjCIdType());
+ // FIXME: We shouldn't need to get the function info here, the runtime already
+ // should have computed it to build the function.
+ EmitCall(CGM.getTypes().arrangeFunctionCall(getContext().VoidTy, Args2,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All),
+ EnumerationMutationFn, ReturnValueSlot(), Args2);
+
+ // Otherwise, or if the mutation function returns, just continue.
+ EmitBlock(WasNotMutatedBB);
+
+ // Initialize the element variable.
+ RunCleanupsScope elementVariableScope(*this);
+ bool elementIsVariable;
+ LValue elementLValue;
+ QualType elementType;
+ if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement())) {
+ // Initialize the variable, in case it's a __block variable or something.
+ EmitAutoVarInit(variable);
+
+ const VarDecl* D = cast<VarDecl>(SD->getSingleDecl());
+ DeclRefExpr tempDRE(const_cast<VarDecl*>(D), false, D->getType(),
+ VK_LValue, SourceLocation());
+ elementLValue = EmitLValue(&tempDRE);
+ elementType = D->getType();
+ elementIsVariable = true;
+
+ if (D->isARCPseudoStrong())
+ elementLValue.getQuals().setObjCLifetime(Qualifiers::OCL_ExplicitNone);
+ } else {
+ elementLValue = LValue(); // suppress warning
+ elementType = cast<Expr>(S.getElement())->getType();
+ elementIsVariable = false;
+ }
+ llvm::Type *convertedElementType = ConvertType(elementType);
+
+ // Fetch the buffer out of the enumeration state.
+ // TODO: this pointer should actually be invariant between
+ // refreshes, which would help us do certain loop optimizations.
+ llvm::Value *StateItemsPtr =
+ Builder.CreateStructGEP(StatePtr, 1, "stateitems.ptr");
+ llvm::Value *EnumStateItems =
+ Builder.CreateLoad(StateItemsPtr, "stateitems");
+
+ // Fetch the value at the current index from the buffer.
+ llvm::Value *CurrentItemPtr =
+ Builder.CreateGEP(EnumStateItems, index, "currentitem.ptr");
+ llvm::Value *CurrentItem = Builder.CreateLoad(CurrentItemPtr);
+
+ // Cast that value to the right type.
+ CurrentItem = Builder.CreateBitCast(CurrentItem, convertedElementType,
+ "currentitem");
+
+ // Make sure we have an l-value. Yes, this gets evaluated every
+ // time through the loop.
+ if (!elementIsVariable) {
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
+ } else {
+ EmitScalarInit(CurrentItem, elementLValue);
+ }
+
+ // If we do have an element variable, this assignment is the end of
+ // its initialization.
+ if (elementIsVariable)
+ EmitAutoVarCleanups(variable);
+
+ // Perform the loop body, setting up break and continue labels.
+ BreakContinueStack.push_back(BreakContinue(LoopEnd, AfterBody));
+ {
+ RunCleanupsScope Scope(*this);
+ EmitStmt(S.getBody());
+ }
+ BreakContinueStack.pop_back();
+
+ // Destroy the element variable now.
+ elementVariableScope.ForceCleanup();
+
+ // Check whether there are more elements.
+ EmitBlock(AfterBody.getBlock());
+
+ llvm::BasicBlock *FetchMoreBB = createBasicBlock("forcoll.refetch");
+
+ // First we check in the local buffer.
+ llvm::Value *indexPlusOne
+ = Builder.CreateAdd(index, llvm::ConstantInt::get(UnsignedLongLTy, 1));
+
+ // If we haven't overrun the buffer yet, we can continue.
+ Builder.CreateCondBr(Builder.CreateICmpULT(indexPlusOne, count),
+ LoopBodyBB, FetchMoreBB);
+
+ index->addIncoming(indexPlusOne, AfterBody.getBlock());
+ count->addIncoming(count, AfterBody.getBlock());
+
+ // Otherwise, we have to fetch more elements.
+ EmitBlock(FetchMoreBB);
+
+ CountRV =
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().UnsignedLongTy,
+ FastEnumSel,
+ Collection, Args);
+
+ // If we got a zero count, we're done.
+ llvm::Value *refetchCount = CountRV.getScalarVal();
+
+ // (note that the message send might split FetchMoreBB)
+ index->addIncoming(zero, Builder.GetInsertBlock());
+ count->addIncoming(refetchCount, Builder.GetInsertBlock());
+
+ Builder.CreateCondBr(Builder.CreateICmpEQ(refetchCount, zero),
+ EmptyBB, LoopBodyBB);
+
+ // No more elements.
+ EmitBlock(EmptyBB);
+
+ if (!elementIsVariable) {
+ // If the element was not a declaration, set it to be null.
+
+ llvm::Value *null = llvm::Constant::getNullValue(convertedElementType);
+ elementLValue = EmitLValue(cast<Expr>(S.getElement()));
+ EmitStoreThroughLValue(RValue::get(null), elementLValue);
+ }
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+
+ // Leave the cleanup we entered in ARC.
+ if (getLangOpts().ObjCAutoRefCount)
+ PopCleanupBlock();
+
+ EmitBlock(LoopEnd.getBlock());
+}
+
+void CodeGenFunction::EmitObjCAtTryStmt(const ObjCAtTryStmt &S) {
+ CGM.getObjCRuntime().EmitTryStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S) {
+ CGM.getObjCRuntime().EmitThrowStmt(*this, S);
+}
+
+void CodeGenFunction::EmitObjCAtSynchronizedStmt(
+ const ObjCAtSynchronizedStmt &S) {
+ CGM.getObjCRuntime().EmitSynchronizedStmt(*this, S);
+}
+
+/// Produce the code for a CK_ARCProduceObject. Just does a
+/// primitive retain.
+llvm::Value *CodeGenFunction::EmitObjCProduceObject(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetain(type, value);
+}
+
+namespace {
+ struct CallObjCRelease : EHScopeStack::Cleanup {
+ CallObjCRelease(llvm::Value *object) : object(object) {}
+ llvm::Value *object;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitARCRelease(object, /*precise*/ true);
+ }
+ };
+}
+
+/// Produce the code for a CK_ARCConsumeObject. Does a primitive
+/// release at the end of the full-expression.
+llvm::Value *CodeGenFunction::EmitObjCConsumeObject(QualType type,
+ llvm::Value *object) {
+ // If we're in a conditional branch, we need to make the cleanup
+ // conditional.
+ pushFullExprCleanup<CallObjCRelease>(getARCCleanupKind(), object);
+ return object;
+}
+
+llvm::Value *CodeGenFunction::EmitObjCExtendObjectLifetime(QualType type,
+ llvm::Value *value) {
+ return EmitARCRetainAutorelease(type, value);
+}
+
+
+static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
+ llvm::FunctionType *type,
+ StringRef fnName) {
+ llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
+
+ // In -fobjc-no-arc-runtime, emit weak references to the runtime
+ // support library.
+ if (!CGM.getCodeGenOpts().ObjCRuntimeHasARC)
+ if (llvm::Function *f = dyn_cast<llvm::Function>(fn))
+ f->setLinkage(llvm::Function::ExternalWeakLinkage);
+
+ return fn;
+}
+
+/// Perform an operation having the signature
+/// i8* (i8*)
+/// where a null input causes a no-op and returns null.
+static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, CGF.Int8PtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id'.
+ llvm::Type *origType = value->getType();
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ // Cast the result back to the original type.
+ return CGF.Builder.CreateBitCast(call, origType);
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**)
+static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, CGF.Int8PtrPtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(CGF.Int8PtrTy, args, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ // Cast the argument to 'id*'.
+ llvm::Type *origType = addr->getType();
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+
+ // Call the function.
+ llvm::CallInst *call = CGF.Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+
+ // Cast the result back to a dereference of the original type.
+ llvm::Value *result = call;
+ if (origType != CGF.Int8PtrPtrTy)
+ result = CGF.Builder.CreateBitCast(result,
+ cast<llvm::PointerType>(origType)->getElementType());
+
+ return result;
+}
+
+/// Perform an operation having the following signature:
+/// i8* (i8**, i8*)
+static llvm::Value *emitARCStoreOperation(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ llvm::Value *value,
+ llvm::Constant *&fn,
+ StringRef fnName,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ if (!fn) {
+ llvm::Type *argTypes[] = { CGF.Int8PtrPtrTy, CGF.Int8PtrTy };
+
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Int8PtrTy, argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ llvm::Type *origType = value->getType();
+
+ addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+ value = CGF.Builder.CreateBitCast(value, CGF.Int8PtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, addr, value);
+ result->setDoesNotThrow();
+
+ if (ignored) return 0;
+
+ return CGF.Builder.CreateBitCast(result, origType);
+}
+
+/// Perform an operation having the following signature:
+/// void (i8**, i8**)
+static void emitARCCopyOperation(CodeGenFunction &CGF,
+ llvm::Value *dst,
+ llvm::Value *src,
+ llvm::Constant *&fn,
+ StringRef fnName) {
+ assert(dst->getType() == src->getType());
+
+ if (!fn) {
+ std::vector<llvm::Type*> argTypes(2, CGF.Int8PtrPtrTy);
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(CGF.Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGF.CGM, fnType, fnName);
+ }
+
+ dst = CGF.Builder.CreateBitCast(dst, CGF.Int8PtrPtrTy);
+ src = CGF.Builder.CreateBitCast(src, CGF.Int8PtrPtrTy);
+
+ llvm::CallInst *result = CGF.Builder.CreateCall2(fn, dst, src);
+ result->setDoesNotThrow();
+}
+
+/// Produce the code to do a retain. Based on the type, calls one of:
+/// call i8* @objc_retain(i8* %value)
+/// call i8* @objc_retainBlock(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetain(QualType type, llvm::Value *value) {
+ if (type->isBlockPointerType())
+ return EmitARCRetainBlock(value, /*mandatory*/ false);
+ else
+ return EmitARCRetainNonBlock(value);
+}
+
+/// Retain the given object, with normal retain semantics.
+/// call i8* @objc_retain(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCRetainNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retain,
+ "objc_retain");
+}
+
+/// Retain the given block, with _Block_copy semantics.
+/// call i8* @objc_retainBlock(i8* %value)
+///
+/// \param mandatory - If false, emit the call with metadata
+/// indicating that it's okay for the optimizer to eliminate this call
+/// if it can prove that the block never escapes except down the stack.
+llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
+ bool mandatory) {
+ llvm::Value *result
+ = emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainBlock,
+ "objc_retainBlock");
+
+ // If the copy isn't mandatory, add !clang.arc.copy_on_escape to
+ // tell the optimizer that it doesn't need to do this copy if the
+ // block doesn't escape, where being passed as an argument doesn't
+ // count as escaping.
+ if (!mandatory && isa<llvm::Instruction>(result)) {
+ llvm::CallInst *call
+ = cast<llvm::CallInst>(result->stripPointerCasts());
+ assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
+
+ SmallVector<llvm::Value*,1> args;
+ call->setMetadata("clang.arc.copy_on_escape",
+ llvm::MDNode::get(Builder.getContext(), args));
+ }
+
+ return result;
+}
+
+/// Retain the given object which is the result of a function call.
+/// call i8* @objc_retainAutoreleasedReturnValue(i8* %value)
+///
+/// Yes, this function name is one character away from a different
+/// call with completely different semantics.
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
+ // Fetch the void(void) inline asm which marks that we're going to
+ // retain the autoreleased return value.
+ llvm::InlineAsm *&marker
+ = CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker;
+ if (!marker) {
+ StringRef assembly
+ = CGM.getTargetCodeGenInfo()
+ .getARCRetainAutoreleasedReturnValueMarker();
+
+ // If we have an empty assembly string, there's nothing to do.
+ if (assembly.empty()) {
+
+ // Otherwise, at -O0, build an inline asm that we're going to call
+ // in a moment.
+ } else if (CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ llvm::FunctionType *type =
+ llvm::FunctionType::get(VoidTy, /*variadic*/false);
+
+ marker = llvm::InlineAsm::get(type, assembly, "", /*sideeffects*/ true);
+
+ // If we're at -O1 and above, we don't want to litter the code
+ // with this marker yet, so leave a breadcrumb for the ARC
+ // optimizer to pick up.
+ } else {
+ llvm::NamedMDNode *metadata =
+ CGM.getModule().getOrInsertNamedMetadata(
+ "clang.arc.retainAutoreleasedReturnValueMarker");
+ assert(metadata->getNumOperands() <= 1);
+ if (metadata->getNumOperands() == 0) {
+ llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
+ metadata->addOperand(llvm::MDNode::get(getLLVMContext(), string));
+ }
+ }
+ }
+
+ // Call the marker asm if we made one, which we do only at -O0.
+ if (marker) Builder.CreateCall(marker);
+
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleasedReturnValue,
+ "objc_retainAutoreleasedReturnValue");
+}
+
+/// Release the given object.
+/// call void @objc_release(i8* %value)
+void CodeGenFunction::EmitARCRelease(llvm::Value *value, bool precise) {
+ if (isa<llvm::ConstantPointerNull>(value)) return;
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_release;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_release");
+ }
+
+ // Cast the argument to 'id'.
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+
+ // Call objc_release.
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+
+ if (!precise) {
+ SmallVector<llvm::Value*,1> args;
+ call->setMetadata("clang.imprecise_release",
+ llvm::MDNode::get(Builder.getContext(), args));
+ }
+}
+
+/// Store into a strong object. Always calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+llvm::Value *CodeGenFunction::EmitARCStoreStrongCall(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ assert(cast<llvm::PointerType>(addr->getType())->getElementType()
+ == value->getType());
+
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_storeStrong;
+ if (!fn) {
+ llvm::Type *argTypes[] = { Int8PtrPtrTy, Int8PtrTy };
+ llvm::FunctionType *fnType
+ = llvm::FunctionType::get(Builder.getVoidTy(), argTypes, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_storeStrong");
+ }
+
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+ llvm::Value *castValue = Builder.CreateBitCast(value, Int8PtrTy);
+
+ Builder.CreateCall2(fn, addr, castValue)->setDoesNotThrow();
+
+ if (ignored) return 0;
+ return value;
+}
+
+/// Store into a strong object. Sometimes calls this:
+/// call void @objc_storeStrong(i8** %addr, i8* %value)
+/// Other times, breaks it down into components.
+llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
+ llvm::Value *newValue,
+ bool ignored) {
+ QualType type = dst.getType();
+ bool isBlock = type->isBlockPointerType();
+
+ // Use a store barrier at -O0 unless this is a block type or the
+ // lvalue is inadequately aligned.
+ if (shouldUseFusedARCCalls() &&
+ !isBlock &&
+ (dst.getAlignment().isZero() ||
+ dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
+ }
+
+ // Otherwise, split it out.
+
+ // Retain the new value.
+ newValue = EmitARCRetain(type, newValue);
+
+ // Read the old value.
+ llvm::Value *oldValue = EmitLoadOfScalar(dst);
+
+ // Store. We do this before the release so that any deallocs won't
+ // see the old value.
+ EmitStoreOfScalar(newValue, dst);
+
+ // Finally, release the old value.
+ EmitARCRelease(oldValue, /*precise*/ false);
+
+ return newValue;
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autorelease(i8* %value)
+llvm::Value *CodeGenFunction::EmitARCAutorelease(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autorelease,
+ "objc_autorelease");
+}
+
+/// Autorelease the given object.
+/// call i8* @objc_autoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_autoreleaseReturnValue,
+ "objc_autoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutoreleaseReturnValue(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseReturnValue(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutoreleaseReturnValue,
+ "objc_retainAutoreleaseReturnValue");
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+/// or
+/// %retain = call i8* @objc_retainBlock(i8* %value)
+/// call i8* @objc_autorelease(i8* %retain)
+llvm::Value *CodeGenFunction::EmitARCRetainAutorelease(QualType type,
+ llvm::Value *value) {
+ if (!type->isBlockPointerType())
+ return EmitARCRetainAutoreleaseNonBlock(value);
+
+ if (isa<llvm::ConstantPointerNull>(value)) return value;
+
+ llvm::Type *origType = value->getType();
+ value = Builder.CreateBitCast(value, Int8PtrTy);
+ value = EmitARCRetainBlock(value, /*mandatory*/ true);
+ value = EmitARCAutorelease(value);
+ return Builder.CreateBitCast(value, origType);
+}
+
+/// Do a fused retain/autorelease of the given object.
+/// call i8* @objc_retainAutorelease(i8* %value)
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseNonBlock(llvm::Value *value) {
+ return emitARCValueOperation(*this, value,
+ CGM.getARCEntrypoints().objc_retainAutorelease,
+ "objc_retainAutorelease");
+}
+
+/// i8* @objc_loadWeak(i8** %addr)
+/// Essentially objc_autorelease(objc_loadWeakRetained(addr)).
+llvm::Value *CodeGenFunction::EmitARCLoadWeak(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeak,
+ "objc_loadWeak");
+}
+
+/// i8* @objc_loadWeakRetained(i8** %addr)
+llvm::Value *CodeGenFunction::EmitARCLoadWeakRetained(llvm::Value *addr) {
+ return emitARCLoadOperation(*this, addr,
+ CGM.getARCEntrypoints().objc_loadWeakRetained,
+ "objc_loadWeakRetained");
+}
+
+/// i8* @objc_storeWeak(i8** %addr, i8* %value)
+/// Returns %value.
+llvm::Value *CodeGenFunction::EmitARCStoreWeak(llvm::Value *addr,
+ llvm::Value *value,
+ bool ignored) {
+ return emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_storeWeak,
+ "objc_storeWeak", ignored);
+}
+
+/// i8* @objc_initWeak(i8** %addr, i8* %value)
+/// Returns %value. %addr is known to not have a current weak entry.
+/// Essentially equivalent to:
+/// *addr = nil; objc_storeWeak(addr, value);
+void CodeGenFunction::EmitARCInitWeak(llvm::Value *addr, llvm::Value *value) {
+ // If we're initializing to null, just write null to memory; no need
+ // to get the runtime involved. But don't do this if optimization
+ // is enabled, because accounting for this would make the optimizer
+ // much more complicated.
+ if (isa<llvm::ConstantPointerNull>(value) &&
+ CGM.getCodeGenOpts().OptimizationLevel == 0) {
+ Builder.CreateStore(value, addr);
+ return;
+ }
+
+ emitARCStoreOperation(*this, addr, value,
+ CGM.getARCEntrypoints().objc_initWeak,
+ "objc_initWeak", /*ignored*/ true);
+}
+
+/// void @objc_destroyWeak(i8** %addr)
+/// Essentially objc_storeWeak(addr, nil).
+void CodeGenFunction::EmitARCDestroyWeak(llvm::Value *addr) {
+ llvm::Constant *&fn = CGM.getARCEntrypoints().objc_destroyWeak;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrPtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_destroyWeak");
+ }
+
+ // Cast the argument to 'id*'.
+ addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+
+ llvm::CallInst *call = Builder.CreateCall(fn, addr);
+ call->setDoesNotThrow();
+}
+
+/// void @objc_moveWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Leaves %src pointing to nothing.
+/// Essentially (objc_copyWeak(dest, src), objc_destroyWeak(src)).
+void CodeGenFunction::EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_moveWeak,
+ "objc_moveWeak");
+}
+
+/// void @objc_copyWeak(i8** %dest, i8** %src)
+/// Disregards the current value in %dest. Essentially
+/// objc_release(objc_initWeak(dest, objc_readWeakRetained(src)))
+void CodeGenFunction::EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src) {
+ emitARCCopyOperation(*this, dst, src,
+ CGM.getARCEntrypoints().objc_copyWeak,
+ "objc_copyWeak");
+}
+
+/// Produce the code to do a objc_autoreleasepool_push.
+/// call i8* @objc_autoreleasePoolPush(void)
+llvm::Value *CodeGenFunction::EmitObjCAutoreleasePoolPush() {
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPush;
+ if (!fn) {
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Int8PtrTy, false);
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPush");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn);
+ call->setDoesNotThrow();
+
+ return call;
+}
+
+/// Produce the code to do a primitive release.
+/// call void @objc_autoreleasePoolPop(i8* %ptr)
+void CodeGenFunction::EmitObjCAutoreleasePoolPop(llvm::Value *value) {
+ assert(value->getType() == Int8PtrTy);
+
+ llvm::Constant *&fn = CGM.getRREntrypoints().objc_autoreleasePoolPop;
+ if (!fn) {
+ std::vector<llvm::Type*> args(1, Int8PtrTy);
+ llvm::FunctionType *fnType =
+ llvm::FunctionType::get(Builder.getVoidTy(), args, false);
+
+ // We don't want to use a weak import here; instead we should not
+ // fall into this path.
+ fn = createARCRuntimeFunction(CGM, fnType, "objc_autoreleasePoolPop");
+ }
+
+ llvm::CallInst *call = Builder.CreateCall(fn, value);
+ call->setDoesNotThrow();
+}
+
+/// Produce the code to do an MRR version objc_autoreleasepool_push.
+/// Which is: [[NSAutoreleasePool alloc] init];
+/// Where alloc is declared as: + (id) alloc; in NSAutoreleasePool class.
+/// init is declared as: - (id) init; in its NSObject super class.
+///
+llvm::Value *CodeGenFunction::EmitObjCMRRAutoreleasePoolPush() {
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Receiver = Runtime.EmitNSAutoreleasePoolClassRef(Builder);
+ // [NSAutoreleasePool alloc]
+ IdentifierInfo *II = &CGM.getContext().Idents.get("alloc");
+ Selector AllocSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ RValue AllocRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ AllocSel, Receiver, Args);
+
+ // [Receiver init]
+ Receiver = AllocRV.getScalarVal();
+ II = &CGM.getContext().Idents.get("init");
+ Selector InitSel = getContext().Selectors.getSelector(0, &II);
+ RValue InitRV =
+ Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().getObjCIdType(),
+ InitSel, Receiver, Args);
+ return InitRV.getScalarVal();
+}
+
+/// Produce the code to do a primitive release.
+/// [tmp drain];
+void CodeGenFunction::EmitObjCMRRAutoreleasePoolPop(llvm::Value *Arg) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("drain");
+ Selector DrainSel = getContext().Selectors.getSelector(0, &II);
+ CallArgList Args;
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, DrainSel, Arg, Args);
+}
+
+void CodeGenFunction::destroyARCStrongPrecise(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
+ CGF.EmitARCRelease(ptr, /*precise*/ true);
+}
+
+void CodeGenFunction::destroyARCStrongImprecise(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ llvm::Value *ptr = CGF.Builder.CreateLoad(addr, "strongdestroy");
+ CGF.EmitARCRelease(ptr, /*precise*/ false);
+}
+
+void CodeGenFunction::destroyARCWeak(CodeGenFunction &CGF,
+ llvm::Value *addr,
+ QualType type) {
+ CGF.EmitARCDestroyWeak(addr);
+}
+
+namespace {
+ struct CallObjCAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitObjCAutoreleasePoolPop(Token);
+ }
+ };
+ struct CallObjCMRRAutoreleasePoolObject : EHScopeStack::Cleanup {
+ llvm::Value *Token;
+
+ CallObjCMRRAutoreleasePoolObject(llvm::Value *token) : Token(token) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.EmitObjCMRRAutoreleasePoolPop(Token);
+ }
+ };
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr) {
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, Ptr);
+ else
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, Ptr);
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ switch (type.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ return TryEmitResult(CGF.EmitLoadOfLValue(lvalue).getScalarVal(),
+ false);
+
+ case Qualifiers::OCL_Weak:
+ return TryEmitResult(CGF.EmitARCLoadWeakRetained(lvalue.getAddress()),
+ true);
+ }
+
+ llvm_unreachable("impossible lifetime!");
+}
+
+static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ const Expr *e) {
+ e = e->IgnoreParens();
+ QualType type = e->getType();
+
+ // If we're loading retained from a __strong xvalue, we can avoid
+ // an extra retain/release pair by zeroing out the source of this
+ // "move" operation.
+ if (e->isXValue() &&
+ !type.isConstQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Emit the lvalue.
+ LValue lv = CGF.EmitLValue(e);
+
+ // Load the object pointer.
+ llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
+
+ // Set the source pointer to NULL.
+ CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
+
+ return TryEmitResult(result, true);
+ }
+
+ // As a very special optimization, in ARC++, if the l-value is the
+ // result of a non-volatile assignment, do a simple retain of the
+ // result of the call to objc_storeWeak instead of reloading.
+ if (CGF.getLangOpts().CPlusPlus &&
+ !type.isVolatileQualified() &&
+ type.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ isa<BinaryOperator>(e) &&
+ cast<BinaryOperator>(e)->getOpcode() == BO_Assign)
+ return TryEmitResult(CGF.EmitScalarExpr(e), false);
+
+ return tryEmitARCRetainLoadOfScalar(CGF, CGF.EmitLValue(e), type);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value);
+
+/// Given that the given expression is some sort of call (which does
+/// not return retained), emit a retain following it.
+static llvm::Value *emitARCRetainCall(CodeGenFunction &CGF, const Expr *e) {
+ llvm::Value *value = CGF.EmitScalarExpr(e);
+ return emitARCRetainAfterCall(CGF, value);
+}
+
+static llvm::Value *emitARCRetainAfterCall(CodeGenFunction &CGF,
+ llvm::Value *value) {
+ if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain immediately following the call.
+ CGF.Builder.SetInsertPoint(call->getParent(),
+ ++llvm::BasicBlock::iterator(call));
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+ } else if (llvm::InvokeInst *invoke = dyn_cast<llvm::InvokeInst>(value)) {
+ CGBuilderTy::InsertPoint ip = CGF.Builder.saveIP();
+
+ // Place the retain at the beginning of the normal destination block.
+ llvm::BasicBlock *BB = invoke->getNormalDest();
+ CGF.Builder.SetInsertPoint(BB, BB->begin());
+ value = CGF.EmitARCRetainAutoreleasedReturnValue(value);
+
+ CGF.Builder.restoreIP(ip);
+ return value;
+
+ // Bitcasts can arise because of related-result returns. Rewrite
+ // the operand.
+ } else if (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(value)) {
+ llvm::Value *operand = bitcast->getOperand(0);
+ operand = emitARCRetainAfterCall(CGF, operand);
+ bitcast->setOperand(0, operand);
+ return bitcast;
+
+ // Generic fall-back case.
+ } else {
+ // Retain using the non-block variant: we never need to do a copy
+ // of a block that's been returned to us.
+ return CGF.EmitARCRetainNonBlock(value);
+ }
+}
+
+/// Determine whether it might be important to emit a separate
+/// objc_retain_block on the result of the given expression, or
+/// whether it's okay to just emit it in a +1 context.
+static bool shouldEmitSeparateBlockRetain(const Expr *e) {
+ assert(e->getType()->isBlockPointerType());
+ e = e->IgnoreParens();
+
+ // For future goodness, emit block expressions directly in +1
+ // contexts if we can.
+ if (isa<BlockExpr>(e))
+ return false;
+
+ if (const CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ switch (cast->getCastKind()) {
+ // Emitting these operations in +1 contexts is goodness.
+ case CK_LValueToRValue:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCProduceObject:
+ return false;
+
+ // These operations preserve a block type.
+ case CK_NoOp:
+ case CK_BitCast:
+ return shouldEmitSeparateBlockRetain(cast->getSubExpr());
+
+ // These operations are known to be bad (or haven't been considered).
+ case CK_AnyPointerToBlockPointerCast:
+ default:
+ return true;
+ }
+ }
+
+ return true;
+}
+
+/// Try to emit a PseudoObjectExpr at +1.
+///
+/// This massively duplicates emitPseudoObjectRValue.
+static TryEmitResult tryEmitARCRetainPseudoObject(CodeGenFunction &CGF,
+ const PseudoObjectExpr *E) {
+ llvm::SmallVector<CodeGenFunction::OpaqueValueMappingData, 4> opaques;
+
+ // Find the result expression.
+ const Expr *resultExpr = E->getResultExpr();
+ assert(resultExpr);
+ TryEmitResult result;
+
+ for (PseudoObjectExpr::const_semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ const Expr *semantic = *i;
+
+ // If this semantic expression is an opaque value, bind it
+ // to the result of its source expression.
+ if (const OpaqueValueExpr *ov = dyn_cast<OpaqueValueExpr>(semantic)) {
+ typedef CodeGenFunction::OpaqueValueMappingData OVMA;
+ OVMA opaqueData;
+
+ // If this semantic is the result of the pseudo-object
+ // expression, try to evaluate the source as +1.
+ if (ov == resultExpr) {
+ assert(!OVMA::shouldBindAsLValue(ov));
+ result = tryEmitARCRetainScalarExpr(CGF, ov->getSourceExpr());
+ opaqueData = OVMA::bind(CGF, ov, RValue::get(result.getPointer()));
+
+ // Otherwise, just bind it.
+ } else {
+ opaqueData = OVMA::bind(CGF, ov, ov->getSourceExpr());
+ }
+ opaques.push_back(opaqueData);
+
+ // Otherwise, if the expression is the result, evaluate it
+ // and remember the result.
+ } else if (semantic == resultExpr) {
+ result = tryEmitARCRetainScalarExpr(CGF, semantic);
+
+ // Otherwise, evaluate the expression in an ignored context.
+ } else {
+ CGF.EmitIgnoredExpr(semantic);
+ }
+ }
+
+ // Unbind all the opaques now.
+ for (unsigned i = 0, e = opaques.size(); i != e; ++i)
+ opaques[i].unbind(CGF);
+
+ return result;
+}
+
+static TryEmitResult
+tryEmitARCRetainScalarExpr(CodeGenFunction &CGF, const Expr *e) {
+ // Look through cleanups.
+ if (const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(e)) {
+ CGF.enterFullExpression(cleanups);
+ CodeGenFunction::RunCleanupsScope scope(CGF);
+ return tryEmitARCRetainScalarExpr(CGF, cleanups->getSubExpr());
+ }
+
+ // The desired result type, if it differs from the type of the
+ // ultimate opaque expression.
+ llvm::Type *resultType = 0;
+
+ while (true) {
+ e = e->IgnoreParens();
+
+ // There's a break at the end of this if-chain; anything
+ // that wants to keep looping has to explicitly continue.
+ if (const CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ // No-op casts don't change the type, so we just ignore them.
+ case CK_NoOp:
+ e = ce->getSubExpr();
+ continue;
+
+ case CK_LValueToRValue: {
+ TryEmitResult loadResult
+ = tryEmitARCRetainLoadOfScalar(CGF, ce->getSubExpr());
+ if (resultType) {
+ llvm::Value *value = loadResult.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ loadResult.setPointer(value);
+ }
+ return loadResult;
+ }
+
+ // These casts can change the type, so remember that and
+ // soldier on. We only need to remember the outermost such
+ // cast, though.
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_BitCast:
+ if (!resultType)
+ resultType = CGF.ConvertType(ce->getType());
+ e = ce->getSubExpr();
+ assert(e->getType()->hasPointerRepresentation());
+ continue;
+
+ // For consumptions, just emit the subexpression and thus elide
+ // the retain/release pair.
+ case CK_ARCConsumeObject: {
+ llvm::Value *result = CGF.EmitScalarExpr(ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Block extends are net +0. Naively, we could just recurse on
+ // the subexpression, but actually we need to ensure that the
+ // value is copied as a block, so there's a little filter here.
+ case CK_ARCExtendBlockObject: {
+ llvm::Value *result; // will be a +0 value
+
+ // If we can't safely assume the sub-expression will produce a
+ // block-copied value, emit the sub-expression at +0.
+ if (shouldEmitSeparateBlockRetain(ce->getSubExpr())) {
+ result = CGF.EmitScalarExpr(ce->getSubExpr());
+
+ // Otherwise, try to emit the sub-expression at +1 recursively.
+ } else {
+ TryEmitResult subresult
+ = tryEmitARCRetainScalarExpr(CGF, ce->getSubExpr());
+ result = subresult.getPointer();
+
+ // If that produced a retained value, just use that,
+ // possibly casting down.
+ if (subresult.getInt()) {
+ if (resultType)
+ result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // Otherwise it's +0.
+ }
+
+ // Retain the object as a block, then cast down.
+ result = CGF.EmitARCRetainBlock(result, /*mandatory*/ true);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ // For reclaims, emit the subexpression as a retained call and
+ // skip the consumption.
+ case CK_ARCReclaimReturnedObject: {
+ llvm::Value *result = emitARCRetainCall(CGF, ce->getSubExpr());
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+ }
+
+ default:
+ break;
+ }
+
+ // Skip __extension__.
+ } else if (const UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_Extension) {
+ e = op->getSubExpr();
+ continue;
+ }
+
+ // For calls and message sends, use the retained-call logic.
+ // Delegate inits are a special case in that they're the only
+ // returns-retained expression that *isn't* surrounded by
+ // a consume.
+ } else if (isa<CallExpr>(e) ||
+ (isa<ObjCMessageExpr>(e) &&
+ !cast<ObjCMessageExpr>(e)->isDelegateInitCall())) {
+ llvm::Value *result = emitARCRetainCall(CGF, e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, true);
+
+ // Look through pseudo-object expressions.
+ } else if (const PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ TryEmitResult result
+ = tryEmitARCRetainPseudoObject(CGF, pseudo);
+ if (resultType) {
+ llvm::Value *value = result.getPointer();
+ value = CGF.Builder.CreateBitCast(value, resultType);
+ result.setPointer(value);
+ }
+ return result;
+ }
+
+ // Conservatively halt the search at any other expression kind.
+ break;
+ }
+
+ // We didn't find an obvious production, so emit what we've got and
+ // tell the caller that we didn't manage to retain.
+ llvm::Value *result = CGF.EmitScalarExpr(e);
+ if (resultType) result = CGF.Builder.CreateBitCast(result, resultType);
+ return TryEmitResult(result, false);
+}
+
+static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
+ LValue lvalue,
+ QualType type) {
+ TryEmitResult result = tryEmitARCRetainLoadOfScalar(CGF, lvalue, type);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = CGF.EmitARCRetain(type, value);
+ return value;
+}
+
+/// EmitARCRetainScalarExpr - Semantically equivalent to
+/// EmitARCRetainObject(e->getType(), EmitScalarExpr(e)), but making a
+/// best-effort attempt to peephole expressions that naturally produce
+/// retained objects.
+llvm::Value *CodeGenFunction::EmitARCRetainScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (!result.getInt())
+ value = EmitARCRetain(e->getType(), value);
+ return value;
+}
+
+llvm::Value *
+CodeGenFunction::EmitARCRetainAutoreleaseScalarExpr(const Expr *e) {
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e);
+ llvm::Value *value = result.getPointer();
+ if (result.getInt())
+ value = EmitARCAutorelease(value);
+ else
+ value = EmitARCRetainAutorelease(e->getType(), value);
+ return value;
+}
+
+llvm::Value *CodeGenFunction::EmitARCExtendBlockObject(const Expr *e) {
+ llvm::Value *result;
+ bool doRetain;
+
+ if (shouldEmitSeparateBlockRetain(e)) {
+ result = EmitScalarExpr(e);
+ doRetain = true;
+ } else {
+ TryEmitResult subresult = tryEmitARCRetainScalarExpr(*this, e);
+ result = subresult.getPointer();
+ doRetain = !subresult.getInt();
+ }
+
+ if (doRetain)
+ result = EmitARCRetainBlock(result, /*mandatory*/ true);
+ return EmitObjCConsumeObject(e->getType(), result);
+}
+
+llvm::Value *CodeGenFunction::EmitObjCThrowOperand(const Expr *expr) {
+ // In ARC, retain and autorelease the expression.
+ if (getLangOpts().ObjCAutoRefCount) {
+ // Do so before running any cleanups for the full-expression.
+ // tryEmitARCRetainScalarExpr does make an effort to do things
+ // inside cleanups, but there are crazy cases like
+ // @throw A().foo;
+ // where a full retain+autorelease is required and would
+ // otherwise happen after the destructor for the temporary.
+ if (const ExprWithCleanups *ewc = dyn_cast<ExprWithCleanups>(expr)) {
+ enterFullExpression(ewc);
+ expr = ewc->getSubExpr();
+ }
+
+ CodeGenFunction::RunCleanupsScope cleanups(*this);
+ return EmitARCRetainAutoreleaseScalarExpr(expr);
+ }
+
+ // Otherwise, use the normal scalar-expression emission. The
+ // exception machinery doesn't do anything special with the
+ // exception like retaining it, so there's no safety associated with
+ // only running cleanups after the throw has started, and when it
+ // matters it tends to be substantially inferior code.
+ return EmitScalarExpr(expr);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
+ bool ignored) {
+ // Evaluate the RHS first.
+ TryEmitResult result = tryEmitARCRetainScalarExpr(*this, e->getRHS());
+ llvm::Value *value = result.getPointer();
+
+ bool hasImmediateRetain = result.getInt();
+
+ // If we didn't emit a retained object, and the l-value is of block
+ // type, then we need to emit the block-retain immediately in case
+ // it invalidates the l-value.
+ if (!hasImmediateRetain && e->getType()->isBlockPointerType()) {
+ value = EmitARCRetainBlock(value, /*mandatory*/ false);
+ hasImmediateRetain = true;
+ }
+
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ // If the RHS was emitted retained, expand this.
+ if (hasImmediateRetain) {
+ llvm::Value *oldValue =
+ EmitLoadOfScalar(lvalue);
+ EmitStoreOfScalar(value, lvalue);
+ EmitARCRelease(oldValue, /*precise*/ false);
+ } else {
+ value = EmitARCStoreStrong(lvalue, value, ignored);
+ }
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+std::pair<LValue,llvm::Value*>
+CodeGenFunction::EmitARCStoreAutoreleasing(const BinaryOperator *e) {
+ llvm::Value *value = EmitARCRetainAutoreleaseScalarExpr(e->getRHS());
+ LValue lvalue = EmitLValue(e->getLHS());
+
+ EmitStoreOfScalar(value, lvalue);
+
+ return std::pair<LValue,llvm::Value*>(lvalue, value);
+}
+
+void CodeGenFunction::EmitObjCAutoreleasePoolStmt(
+ const ObjCAutoreleasePoolStmt &ARPS) {
+ const Stmt *subStmt = ARPS.getSubStmt();
+ const CompoundStmt &S = cast<CompoundStmt>(*subStmt);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getLBracLoc());
+
+ // Keep track of the current cleanup stack depth.
+ RunCleanupsScope Scope(*this);
+ if (CGM.getCodeGenOpts().ObjCRuntimeHasARC) {
+ llvm::Value *token = EmitObjCAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCAutoreleasePoolObject>(NormalCleanup, token);
+ } else {
+ llvm::Value *token = EmitObjCMRRAutoreleasePoolPush();
+ EHStack.pushCleanup<CallObjCMRRAutoreleasePoolObject>(NormalCleanup, token);
+ }
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end(); I != E; ++I)
+ EmitStmt(*I);
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getRBracLoc());
+}
+
+/// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+/// make sure it survives garbage collection until this point.
+void CodeGenFunction::EmitExtendGCLifetime(llvm::Value *object) {
+ // We just use an inline assembly.
+ llvm::FunctionType *extenderType
+ = llvm::FunctionType::get(VoidTy, VoidPtrTy, RequiredArgs::All);
+ llvm::Value *extender
+ = llvm::InlineAsm::get(extenderType,
+ /* assembly */ "",
+ /* constraints */ "r",
+ /* side effects */ true);
+
+ object = Builder.CreateBitCast(object, VoidPtrTy);
+ Builder.CreateCall(extender, object)->setDoesNotThrow();
+}
+
+/// GenerateObjCAtomicSetterCopyHelperFunction - Given a c++ object type with
+/// non-trivial copy assignment function, produce following helper function.
+/// static void copyHelper(Ty *dest, const Ty *source) { *dest = *source; }
+///
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ // FIXME. This api is for NeXt runtime only for now.
+ if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ return 0;
+ QualType Ty = PID->getPropertyIvarDecl()->getType();
+ if (!Ty->isRecordType())
+ return 0;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return 0;
+ llvm::Constant * HelperFn = 0;
+ if (hasTrivialSetExpr(PID))
+ return 0;
+ assert(PID->getSetterCXXAssignment() && "SetterCXXAssignment - null");
+ if ((HelperFn = CGM.getAtomicSetterHelperFnMap(Ty)))
+ return HelperFn;
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__assign_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__assign_helper_atomic_property_",
+ &CGM.getModule());
+
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator DST(&DstExpr, UO_Deref, DestTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ Expr *Args[2] = { &DST, &SRC };
+ CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
+ CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
+ Args, 2, DestTy->getPointeeType(),
+ VK_LValue, SourceLocation());
+
+ EmitStmt(&TheCall);
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicSetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Constant *
+CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID) {
+ // FIXME. This api is for NeXt runtime only for now.
+ if (!getLangOpts().CPlusPlus || !getLangOpts().NeXTRuntime)
+ return 0;
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ QualType Ty = PD->getType();
+ if (!Ty->isRecordType())
+ return 0;
+ if ((!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_atomic)))
+ return 0;
+ llvm::Constant * HelperFn = 0;
+
+ if (hasTrivialGetExpr(PID))
+ return 0;
+ assert(PID->getGetterCXXConstructor() && "getGetterCXXConstructor - null");
+ if ((HelperFn = CGM.getAtomicGetterHelperFnMap(Ty)))
+ return HelperFn;
+
+
+ ASTContext &C = getContext();
+ IdentifierInfo *II
+ = &CGM.getContext().Idents.get("__copy_helper_atomic_property_");
+ FunctionDecl *FD = FunctionDecl::Create(C,
+ C.getTranslationUnitDecl(),
+ SourceLocation(),
+ SourceLocation(), II, C.VoidTy, 0,
+ SC_Static,
+ SC_None,
+ false,
+ false);
+
+ QualType DestTy = C.getPointerType(Ty);
+ QualType SrcTy = Ty;
+ SrcTy.addConst();
+ SrcTy = C.getPointerType(SrcTy);
+
+ FunctionArgList args;
+ ImplicitParamDecl dstDecl(FD, SourceLocation(), 0, DestTy);
+ args.push_back(&dstDecl);
+ ImplicitParamDecl srcDecl(FD, SourceLocation(), 0, SrcTy);
+ args.push_back(&srcDecl);
+
+ const CGFunctionInfo &FI =
+ CGM.getTypes().arrangeFunctionDeclaration(C.VoidTy, args,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ llvm::FunctionType *LTy = CGM.getTypes().GetFunctionType(FI);
+
+ llvm::Function *Fn =
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ "__copy_helper_atomic_property_", &CGM.getModule());
+
+ if (CGM.getModuleDebugInfo())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+
+ StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+
+ DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
+ VK_RValue, SourceLocation());
+
+ UnaryOperator SRC(&SrcExpr, UO_Deref, SrcTy->getPointeeType(),
+ VK_LValue, OK_Ordinary, SourceLocation());
+
+ CXXConstructExpr *CXXConstExpr =
+ cast<CXXConstructExpr>(PID->getGetterCXXConstructor());
+
+ SmallVector<Expr*, 4> ConstructorArgs;
+ ConstructorArgs.push_back(&SRC);
+ CXXConstructExpr::arg_iterator A = CXXConstExpr->arg_begin();
+ ++A;
+
+ for (CXXConstructExpr::arg_iterator AEnd = CXXConstExpr->arg_end();
+ A != AEnd; ++A)
+ ConstructorArgs.push_back(*A);
+
+ CXXConstructExpr *TheCXXConstructExpr =
+ CXXConstructExpr::Create(C, Ty, SourceLocation(),
+ CXXConstExpr->getConstructor(),
+ CXXConstExpr->isElidable(),
+ &ConstructorArgs[0], ConstructorArgs.size(),
+ CXXConstExpr->hadMultipleCandidates(),
+ CXXConstExpr->isListInitialization(),
+ CXXConstExpr->requiresZeroInitialization(),
+ CXXConstExpr->getConstructionKind(),
+ SourceRange());
+
+ DeclRefExpr DstExpr(&dstDecl, false, DestTy,
+ VK_RValue, SourceLocation());
+
+ RValue DV = EmitAnyExpr(&DstExpr);
+ CharUnits Alignment
+ = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
+ EmitAggExpr(TheCXXConstructExpr,
+ AggValueSlot::forAddr(DV.getScalarVal(), Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+
+ FinishFunction();
+ HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
+ CGM.setAtomicGetterHelperFnMap(Ty, HelperFn);
+ return HelperFn;
+}
+
+llvm::Value *
+CodeGenFunction::EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty) {
+ // Get selectors for retain/autorelease.
+ IdentifierInfo *CopyID = &getContext().Idents.get("copy");
+ Selector CopySelector =
+ getContext().Selectors.getNullarySelector(CopyID);
+ IdentifierInfo *AutoreleaseID = &getContext().Idents.get("autorelease");
+ Selector AutoreleaseSelector =
+ getContext().Selectors.getNullarySelector(AutoreleaseID);
+
+ // Emit calls to retain/autorelease.
+ CGObjCRuntime &Runtime = CGM.getObjCRuntime();
+ llvm::Value *Val = Block;
+ RValue Result;
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, CopySelector,
+ Val, CallArgList(), 0, 0);
+ Val = Result.getScalarVal();
+ Result = Runtime.GenerateMessageSend(*this, ReturnValueSlot(),
+ Ty, AutoreleaseSelector,
+ Val, CallArgList(), 0, 0);
+ Val = Result.getScalarVal();
+ return Val;
+}
+
+
+CGObjCRuntime::~CGObjCRuntime() {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
new file mode 100644
index 0000000..db0bd95
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -0,0 +1,2671 @@
+//===------- CGObjCGNU.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targeting the GNU runtime. The
+// class in this file generates structures used by the GNU Objective-C runtime
+// library. These structures are defined in objc/objc.h and objc/objc-api.h in
+// the GNU runtime distribution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+
+#include "llvm/Intrinsics.h"
+#include "llvm/Module.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Target/TargetData.h"
+
+#include <cstdarg>
+
+
+using namespace clang;
+using namespace CodeGen;
+
+
+namespace {
+/// Class that lazily initialises the runtime function. Avoids inserting the
+/// types and the function declaration into a module if they're not used, and
+/// avoids constructing the type more than once if it's used more than once.
+class LazyRuntimeFunction {
+ CodeGenModule *CGM;
+ std::vector<llvm::Type*> ArgTys;
+ const char *FunctionName;
+ llvm::Constant *Function;
+ public:
+ /// Constructor leaves this class uninitialized, because it is intended to
+ /// be used as a field in another class and not all of the types that are
+ /// used as arguments will necessarily be available at construction time.
+ LazyRuntimeFunction() : CGM(0), FunctionName(0), Function(0) {}
+
+ /// Initialises the lazy function with the name, return type, and the types
+ /// of the arguments.
+ END_WITH_NULL
+ void init(CodeGenModule *Mod, const char *name,
+ llvm::Type *RetTy, ...) {
+ CGM =Mod;
+ FunctionName = name;
+ Function = 0;
+ ArgTys.clear();
+ va_list Args;
+ va_start(Args, RetTy);
+ while (llvm::Type *ArgTy = va_arg(Args, llvm::Type*))
+ ArgTys.push_back(ArgTy);
+ va_end(Args);
+ // Push the return type on at the end so we can pop it off easily
+ ArgTys.push_back(RetTy);
+ }
+ /// Overloaded cast operator, allows the class to be implicitly cast to an
+ /// LLVM constant.
+ operator llvm::Constant*() {
+ if (!Function) {
+ if (0 == FunctionName) return 0;
+ // We put the return type on the end of the vector, so pop it back off
+ llvm::Type *RetTy = ArgTys.back();
+ ArgTys.pop_back();
+ llvm::FunctionType *FTy = llvm::FunctionType::get(RetTy, ArgTys, false);
+ Function =
+ cast<llvm::Constant>(CGM->CreateRuntimeFunction(FTy, FunctionName));
+ // We won't need to use the types again, so we may as well clean up the
+ // vector now
+ ArgTys.resize(0);
+ }
+ return Function;
+ }
+ operator llvm::Function*() {
+ return cast<llvm::Function>((llvm::Constant*)*this);
+ }
+
+};
+
+
+/// GNU Objective-C runtime code generation. This class implements the parts of
+/// Objective-C support that are specific to the GNU family of runtimes (GCC and
+/// GNUstep).
+class CGObjCGNU : public CGObjCRuntime {
+protected:
+ /// The LLVM module into which output is inserted
+ llvm::Module &TheModule;
+ /// strut objc_super. Used for sending messages to super. This structure
+ /// contains the receiver (object) and the expected class.
+ llvm::StructType *ObjCSuperTy;
+ /// struct objc_super*. The type of the argument to the superclass message
+ /// lookup functions.
+ llvm::PointerType *PtrToObjCSuperTy;
+ /// LLVM type for selectors. Opaque pointer (i8*) unless a header declaring
+ /// SEL is included in a header somewhere, in which case it will be whatever
+ /// type is declared in that header, most likely {i8*, i8*}.
+ llvm::PointerType *SelectorTy;
+ /// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
+ /// places where it's used
+ llvm::IntegerType *Int8Ty;
+ /// Pointer to i8 - LLVM type of char*, for all of the places where the
+ /// runtime needs to deal with C strings.
+ llvm::PointerType *PtrToInt8Ty;
+ /// Instance Method Pointer type. This is a pointer to a function that takes,
+ /// at a minimum, an object and a selector, and is the generic type for
+ /// Objective-C methods. Due to differences between variadic / non-variadic
+ /// calling conventions, it must always be cast to the correct type before
+ /// actually being used.
+ llvm::PointerType *IMPTy;
+ /// Type of an untyped Objective-C object. Clang treats id as a built-in type
+ /// when compiling Objective-C code, so this may be an opaque pointer (i8*),
+ /// but if the runtime header declaring it is included then it may be a
+ /// pointer to a structure.
+ llvm::PointerType *IdTy;
+ /// Pointer to a pointer to an Objective-C object. Used in the new ABI
+ /// message lookup function and some GC-related functions.
+ llvm::PointerType *PtrToIdTy;
+ /// The clang type of id. Used when using the clang CGCall infrastructure to
+ /// call Objective-C methods.
+ CanQualType ASTIdTy;
+ /// LLVM type for C int type.
+ llvm::IntegerType *IntTy;
+ /// LLVM type for an opaque pointer. This is identical to PtrToInt8Ty, but is
+ /// used in the code to document the difference between i8* meaning a pointer
+ /// to a C string and i8* meaning a pointer to some opaque type.
+ llvm::PointerType *PtrTy;
+ /// LLVM type for C long type. The runtime uses this in a lot of places where
+ /// it should be using intptr_t, but we can't fix this without breaking
+ /// compatibility with GCC...
+ llvm::IntegerType *LongTy;
+ /// LLVM type for C size_t. Used in various runtime data structures.
+ llvm::IntegerType *SizeTy;
+ /// LLVM type for C intptr_t.
+ llvm::IntegerType *IntPtrTy;
+ /// LLVM type for C ptrdiff_t. Mainly used in property accessor functions.
+ llvm::IntegerType *PtrDiffTy;
+ /// LLVM type for C int*. Used for GCC-ABI-compatible non-fragile instance
+ /// variables.
+ llvm::PointerType *PtrToIntTy;
+ /// LLVM type for Objective-C BOOL type.
+ llvm::Type *BoolTy;
+ /// 32-bit integer type, to save us needing to look it up every time it's used.
+ llvm::IntegerType *Int32Ty;
+ /// 64-bit integer type, to save us needing to look it up every time it's used.
+ llvm::IntegerType *Int64Ty;
+ /// Metadata kind used to tie method lookups to message sends. The GNUstep
+ /// runtime provides some LLVM passes that can use this to do things like
+ /// automatic IMP caching and speculative inlining.
+ unsigned msgSendMDKind;
+ /// Helper function that generates a constant string and returns a pointer to
+ /// the start of the string. The result of this function can be used anywhere
+ /// where the C code specifies const char*.
+ llvm::Constant *MakeConstantString(const std::string &Str,
+ const std::string &Name="") {
+ llvm::Constant *ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros);
+ }
+ /// Emits a linkonce_odr string, whose name is the prefix followed by the
+ /// string value. This allows the linker to combine the strings between
+ /// different modules. Used for EH typeinfo names, selector strings, and a
+ /// few other things.
+ llvm::Constant *ExportUniqueString(const std::string &Str,
+ const std::string prefix) {
+ std::string name = prefix + Str;
+ llvm::Constant *ConstStr = TheModule.getGlobalVariable(name);
+ if (!ConstStr) {
+ llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
+ ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
+ llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ }
+ return llvm::ConstantExpr::getGetElementPtr(ConstStr, Zeros);
+ }
+ /// Generates a global structure, initialized by the elements in the vector.
+ /// The element types must match the types of the structure elements in the
+ /// first argument.
+ llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ }
+ /// Generates a global array. The vector must contain the same number of
+ /// elements that the array type declares, of the type specified as the array
+ /// element type.
+ llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
+ return new llvm::GlobalVariable(TheModule, Ty, false,
+ linkage, C, Name);
+ }
+ /// Generates a global array, inferring the array type from the specified
+ /// element type and the size of the initialiser.
+ llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
+ llvm::ArrayRef<llvm::Constant*> V,
+ StringRef Name="",
+ llvm::GlobalValue::LinkageTypes linkage
+ =llvm::GlobalValue::InternalLinkage) {
+ llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
+ return MakeGlobal(ArrayTy, V, Name, linkage);
+ }
+ /// Ensures that the value has the required type, by inserting a bitcast if
+ /// required. This function lets us avoid inserting bitcasts that are
+ /// redundant.
+ llvm::Value* EnforceType(CGBuilderTy B, llvm::Value *V, llvm::Type *Ty){
+ if (V->getType() == Ty) return V;
+ return B.CreateBitCast(V, Ty);
+ }
+ // Some zeros used for GEPs in lots of places.
+ llvm::Constant *Zeros[2];
+ /// Null pointer value. Mainly used as a terminator in various arrays.
+ llvm::Constant *NULLPtr;
+ /// LLVM context.
+ llvm::LLVMContext &VMContext;
+private:
+ /// Placeholder for the class. Lots of things refer to the class before we've
+ /// actually emitted it. We use this alias as a placeholder, and then replace
+ /// it with a pointer to the class structure before finally emitting the
+ /// module.
+ llvm::GlobalAlias *ClassPtrAlias;
+ /// Placeholder for the metaclass. Lots of things refer to the class before
+ /// we've / actually emitted it. We use this alias as a placeholder, and then
+ /// replace / it with a pointer to the metaclass structure before finally
+ /// emitting the / module.
+ llvm::GlobalAlias *MetaClassPtrAlias;
+ /// All of the classes that have been generated for this compilation units.
+ std::vector<llvm::Constant*> Classes;
+ /// All of the categories that have been generated for this compilation units.
+ std::vector<llvm::Constant*> Categories;
+ /// All of the Objective-C constant strings that have been generated for this
+ /// compilation units.
+ std::vector<llvm::Constant*> ConstantStrings;
+ /// Map from string values to Objective-C constant strings in the output.
+ /// Used to prevent emitting Objective-C strings more than once. This should
+ /// not be required at all - CodeGenModule should manage this list.
+ llvm::StringMap<llvm::Constant*> ObjCStrings;
+ /// All of the protocols that have been declared.
+ llvm::StringMap<llvm::Constant*> ExistingProtocols;
+ /// For each variant of a selector, we store the type encoding and a
+ /// placeholder value. For an untyped selector, the type will be the empty
+ /// string. Selector references are all done via the module's selector table,
+ /// so we create an alias as a placeholder and then replace it with the real
+ /// value later.
+ typedef std::pair<std::string, llvm::GlobalAlias*> TypedSelector;
+ /// Type of the selector map. This is roughly equivalent to the structure
+ /// used in the GNUstep runtime, which maintains a list of all of the valid
+ /// types for a selector in a table.
+ typedef llvm::DenseMap<Selector, SmallVector<TypedSelector, 2> >
+ SelectorMap;
+ /// A map from selectors to selector types. This allows us to emit all
+ /// selectors of the same name and type together.
+ SelectorMap SelectorTable;
+
+ /// Selectors related to memory management. When compiling in GC mode, we
+ /// omit these.
+ Selector RetainSel, ReleaseSel, AutoreleaseSel;
+ /// Runtime functions used for memory management in GC mode. Note that clang
+ /// supports code generation for calling these functions, but neither GNU
+ /// runtime actually supports this API properly yet.
+ LazyRuntimeFunction IvarAssignFn, StrongCastAssignFn, MemMoveFn, WeakReadFn,
+ WeakAssignFn, GlobalAssignFn;
+
+ typedef std::pair<std::string, std::string> ClassAliasPair;
+ /// All classes that have aliases set for them.
+ std::vector<ClassAliasPair> ClassAliases;
+
+protected:
+ /// Function used for throwing Objective-C exceptions.
+ LazyRuntimeFunction ExceptionThrowFn;
+ /// Function used for rethrowing exceptions, used at the end of @finally or
+ /// @synchronize blocks.
+ LazyRuntimeFunction ExceptionReThrowFn;
+ /// Function called when entering a catch function. This is required for
+ /// differentiating Objective-C exceptions and foreign exceptions.
+ LazyRuntimeFunction EnterCatchFn;
+ /// Function called when exiting from a catch block. Used to do exception
+ /// cleanup.
+ LazyRuntimeFunction ExitCatchFn;
+ /// Function called when entering an @synchronize block. Acquires the lock.
+ LazyRuntimeFunction SyncEnterFn;
+ /// Function called when exiting an @synchronize block. Releases the lock.
+ LazyRuntimeFunction SyncExitFn;
+
+private:
+
+ /// Function called if fast enumeration detects that the collection is
+ /// modified during the update.
+ LazyRuntimeFunction EnumerationMutationFn;
+ /// Function for implementing synthesized property getters that return an
+ /// object.
+ LazyRuntimeFunction GetPropertyFn;
+ /// Function for implementing synthesized property setters that return an
+ /// object.
+ LazyRuntimeFunction SetPropertyFn;
+ /// Function used for non-object declared property getters.
+ LazyRuntimeFunction GetStructPropertyFn;
+ /// Function used for non-object declared property setters.
+ LazyRuntimeFunction SetStructPropertyFn;
+
+ /// The version of the runtime that this class targets. Must match the
+ /// version in the runtime.
+ int RuntimeVersion;
+ /// The version of the protocol class. Used to differentiate between ObjC1
+ /// and ObjC2 protocols. Objective-C 1 protocols can not contain optional
+ /// components and can not contain declared properties. We always emit
+ /// Objective-C 2 property structures, but we have to pretend that they're
+ /// Objective-C 1 property structures when targeting the GCC runtime or it
+ /// will abort.
+ const int ProtocolVersion;
+private:
+ /// Generates an instance variable list structure. This is a structure
+ /// containing a size and an array of structures containing instance variable
+ /// metadata. This is used purely for introspection in the fragile ABI. In
+ /// the non-fragile ABI, it's used for instance variable fixup.
+ llvm::Constant *GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets);
+ /// Generates a method list structure. This is a structure containing a size
+ /// and an array of structures containing method metadata.
+ ///
+ /// This structure is used by both classes and categories, and contains a next
+ /// pointer allowing them to be chained together in a linked list.
+ llvm::Constant *GenerateMethodList(const StringRef &ClassName,
+ const StringRef &CategoryName,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
+ bool isClassMethodList);
+ /// Emits an empty protocol. This is used for @protocol() where no protocol
+ /// is found. The runtime will (hopefully) fix up the pointer to refer to the
+ /// real protocol.
+ llvm::Constant *GenerateEmptyProtocol(const std::string &ProtocolName);
+ /// Generates a list of property metadata structures. This follows the same
+ /// pattern as method and instance variable metadata lists.
+ llvm::Constant *GeneratePropertyList(const ObjCImplementationDecl *OID,
+ SmallVectorImpl<Selector> &InstanceMethodSels,
+ SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes);
+ /// Generates a list of referenced protocols. Classes, categories, and
+ /// protocols all use this structure.
+ llvm::Constant *GenerateProtocolList(ArrayRef<std::string> Protocols);
+ /// To ensure that all protocols are seen by the runtime, we add a category on
+ /// a class defined in the runtime, declaring no methods, but adopting the
+ /// protocols. This is a horribly ugly hack, but it allows us to collect all
+ /// of the protocols without changing the ABI.
+ void GenerateProtocolHolderCategory(void);
+ /// Generates a class structure.
+ llvm::Constant *GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ llvm::Constant *StrongIvarBitmap,
+ llvm::Constant *WeakIvarBitmap,
+ bool isMeta=false);
+ /// Generates a method list. This is used by protocols to define the required
+ /// and optional methods.
+ llvm::Constant *GenerateProtocolMethodList(
+ ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes);
+ /// Returns a selector with the specified type encoding. An empty string is
+ /// used to return an untyped selector (with the types field set to NULL).
+ llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ const std::string &TypeEncoding, bool lval);
+ /// Returns the variable used to store the offset of an instance variable.
+ llvm::GlobalVariable *ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+ /// Emits a reference to a class. This allows the linker to object if there
+ /// is no class of the matching name.
+ void EmitClassRef(const std::string &className);
+ /// Emits a pointer to the named class
+ llvm::Value *GetClassNamed(CGBuilderTy &Builder, const std::string &Name,
+ bool isWeak);
+protected:
+ /// Looks up the method for sending a message to the specified object. This
+ /// mechanism differs between the GCC and GNU runtimes, so this method must be
+ /// overridden in subclasses.
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) = 0;
+ /// Looks up the method for sending a message to a superclass. This
+ /// mechanism differs between the GCC and GNU runtimes, so this method must
+ /// be overridden in subclasses.
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) = 0;
+ /// Libobjc2 uses a bitfield representation where small(ish) bitfields are
+ /// stored in a 64-bit value with the low bit set to 1 and the remaining 63
+ /// bits set to their values, LSB first, while larger ones are stored in a
+ /// structure of this / form:
+ ///
+ /// struct { int32_t length; int32_t values[length]; };
+ ///
+ /// The values in the array are stored in host-endian format, with the least
+ /// significant bit being assumed to come first in the bitfield. Therefore,
+ /// a bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] },
+ /// while a bitfield / with the 63rd bit set will be 1<<64.
+ llvm::Constant *MakeBitField(ArrayRef<bool> bits);
+public:
+ CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
+ unsigned protocolClassVersion);
+
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
+
+ virtual RValue
+ GenerateMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+ virtual RValue
+ GenerateMessageSendSuper(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method);
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD);
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD);
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+ virtual llvm::Function *ModuleInitFunction();
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy);
+ virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetCppAtomicObjectFunction();
+ virtual llvm::Constant *GetGetStructFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ virtual void EmitThrowStmt(CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal=false);
+ virtual void EmitObjCIvarAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size);
+ virtual LValue EmitObjCValueForIvar(CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ return NULLPtr;
+ }
+
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
+ return 0;
+ }
+};
+/// Class representing the legacy GCC Objective-C ABI. This is the default when
+/// -fobjc-nonfragile-abi is not specified.
+///
+/// The GCC ABI target actually generates code that is approximately compatible
+/// with the new GNUstep runtime ABI, but refrains from using any features that
+/// would not work with the GCC runtime. For example, clang always generates
+/// the extended form of the class structure, and the extra fields are simply
+/// ignored by GCC libobjc.
+class CGObjCGCC : public CGObjCGNU {
+ /// The GCC ABI message lookup function. Returns an IMP pointing to the
+ /// method implementation for this message.
+ LazyRuntimeFunction MsgLookupFn;
+ /// The GCC ABI superclass message lookup function. Takes a pointer to a
+ /// structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the IMP for the corresponding method.
+ LazyRuntimeFunction MsgLookupSuperFn;
+protected:
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *args[] = {
+ EnforceType(Builder, Receiver, IdTy),
+ EnforceType(Builder, cmd, SelectorTy) };
+ llvm::CallSite imp = CGF.EmitCallOrInvoke(MsgLookupFn, args);
+ imp->setMetadata(msgSendMDKind, node);
+ return imp.getInstruction();
+ }
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
+ PtrToObjCSuperTy), cmd};
+ return Builder.CreateCall(MsgLookupSuperFn, lookupArgs);
+ }
+ public:
+ CGObjCGCC(CodeGenModule &Mod) : CGObjCGNU(Mod, 8, 2) {
+ // IMP objc_msg_lookup(id, SEL);
+ MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, NULL);
+ // IMP objc_msg_lookup_super(struct objc_super*, SEL);
+ MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
+ }
+};
+/// Class used when targeting the new GNUstep runtime ABI.
+class CGObjCGNUstep : public CGObjCGNU {
+ /// The slot lookup function. Returns a pointer to a cacheable structure
+ /// that contains (among other things) the IMP.
+ LazyRuntimeFunction SlotLookupFn;
+ /// The GNUstep ABI superclass message lookup function. Takes a pointer to
+ /// a structure describing the receiver and the class, and a selector as
+ /// arguments. Returns the slot for the corresponding method. Superclass
+ /// message lookup rarely changes, so this is a good caching opportunity.
+ LazyRuntimeFunction SlotLookupSuperFn;
+ /// Type of an slot structure pointer. This is returned by the various
+ /// lookup functions.
+ llvm::Type *SlotTy;
+ protected:
+ virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
+ llvm::Value *&Receiver,
+ llvm::Value *cmd,
+ llvm::MDNode *node) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Function *LookupFn = SlotLookupFn;
+
+ // Store the receiver on the stack so that we can reload it later
+ llvm::Value *ReceiverPtr = CGF.CreateTempAlloca(Receiver->getType());
+ Builder.CreateStore(Receiver, ReceiverPtr);
+
+ llvm::Value *self;
+
+ if (isa<ObjCMethodDecl>(CGF.CurCodeDecl)) {
+ self = CGF.LoadObjCSelf();
+ } else {
+ self = llvm::ConstantPointerNull::get(IdTy);
+ }
+
+ // The lookup function is guaranteed not to capture the receiver pointer.
+ LookupFn->setDoesNotCapture(1);
+
+ llvm::Value *args[] = {
+ EnforceType(Builder, ReceiverPtr, PtrToIdTy),
+ EnforceType(Builder, cmd, SelectorTy),
+ EnforceType(Builder, self, IdTy) };
+ llvm::CallSite slot = CGF.EmitCallOrInvoke(LookupFn, args);
+ slot.setOnlyReadsMemory();
+ slot->setMetadata(msgSendMDKind, node);
+
+ // Load the imp from the slot
+ llvm::Value *imp =
+ Builder.CreateLoad(Builder.CreateStructGEP(slot.getInstruction(), 4));
+
+ // The lookup function may have changed the receiver, so make sure we use
+ // the new one.
+ Receiver = Builder.CreateLoad(ReceiverPtr, true);
+ return imp;
+ }
+ virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
+ llvm::Value *ObjCSuper,
+ llvm::Value *cmd) {
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
+
+ llvm::CallInst *slot = Builder.CreateCall(SlotLookupSuperFn, lookupArgs);
+ slot->setOnlyReadsMemory();
+
+ return Builder.CreateLoad(Builder.CreateStructGEP(slot, 4));
+ }
+ public:
+ CGObjCGNUstep(CodeGenModule &Mod) : CGObjCGNU(Mod, 9, 3) {
+ llvm::StructType *SlotStructTy = llvm::StructType::get(PtrTy,
+ PtrTy, PtrTy, IntTy, IMPTy, NULL);
+ SlotTy = llvm::PointerType::getUnqual(SlotStructTy);
+ // Slot_t objc_msg_lookup_sender(id *receiver, SEL selector, id sender);
+ SlotLookupFn.init(&CGM, "objc_msg_lookup_sender", SlotTy, PtrToIdTy,
+ SelectorTy, IdTy, NULL);
+ // Slot_t objc_msg_lookup_super(struct objc_super*, SEL);
+ SlotLookupSuperFn.init(&CGM, "objc_slot_lookup_super", SlotTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
+ // If we're in ObjC++ mode, then we want to make
+ if (CGM.getLangOpts().CPlusPlus) {
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+ // void *__cxa_begin_catch(void *e)
+ EnterCatchFn.init(&CGM, "__cxa_begin_catch", PtrTy, PtrTy, NULL);
+ // void __cxa_end_catch(void)
+ ExitCatchFn.init(&CGM, "__cxa_end_catch", VoidTy, NULL);
+ // void _Unwind_Resume_or_Rethrow(void*)
+ ExceptionReThrowFn.init(&CGM, "_Unwind_Resume_or_Rethrow", VoidTy, PtrTy, NULL);
+ }
+ }
+};
+
+} // end anonymous namespace
+
+
+/// Emits a reference to a dummy variable which is emitted with each class.
+/// This ensures that a linker error will be generated when trying to link
+/// together modules where a referenced class is not defined.
+void CGObjCGNU::EmitClassRef(const std::string &className) {
+ std::string symbolRef = "__objc_class_ref_" + className;
+ // Don't emit two copies of the same symbol
+ if (TheModule.getGlobalVariable(symbolRef))
+ return;
+ std::string symbolName = "__objc_class_name_" + className;
+ llvm::GlobalVariable *ClassSymbol = TheModule.getGlobalVariable(symbolName);
+ if (!ClassSymbol) {
+ ClassSymbol = new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, 0, symbolName);
+ }
+ new llvm::GlobalVariable(TheModule, ClassSymbol->getType(), true,
+ llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
+}
+
+static std::string SymbolNameForMethod(const StringRef &ClassName,
+ const StringRef &CategoryName, const Selector MethodName,
+ bool isClassMethod) {
+ std::string MethodNameColonStripped = MethodName.getAsString();
+ std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
+ ':', '_');
+ return (Twine(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
+ CategoryName + "_" + MethodNameColonStripped).str();
+}
+
+CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
+ unsigned protocolClassVersion)
+ : CGObjCRuntime(cgm), TheModule(CGM.getModule()),
+ VMContext(cgm.getLLVMContext()), ClassPtrAlias(0), MetaClassPtrAlias(0),
+ RuntimeVersion(runtimeABIVersion), ProtocolVersion(protocolClassVersion) {
+
+ msgSendMDKind = VMContext.getMDKindID("GNUObjCMessageSend");
+
+ CodeGenTypes &Types = CGM.getTypes();
+ IntTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().IntTy));
+ LongTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().LongTy));
+ SizeTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().getSizeType()));
+ PtrDiffTy = cast<llvm::IntegerType>(
+ Types.ConvertType(CGM.getContext().getPointerDiffType()));
+ BoolTy = CGM.getTypes().ConvertType(CGM.getContext().BoolTy);
+
+ Int8Ty = llvm::Type::getInt8Ty(VMContext);
+ // C string type. Used in lots of places.
+ PtrToInt8Ty = llvm::PointerType::getUnqual(Int8Ty);
+
+ Zeros[0] = llvm::ConstantInt::get(LongTy, 0);
+ Zeros[1] = Zeros[0];
+ NULLPtr = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ // Get the selector Type.
+ QualType selTy = CGM.getContext().getObjCSelType();
+ if (QualType() == selTy) {
+ SelectorTy = PtrToInt8Ty;
+ } else {
+ SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+ }
+
+ PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
+ PtrTy = PtrToInt8Ty;
+
+ Int32Ty = llvm::Type::getInt32Ty(VMContext);
+ Int64Ty = llvm::Type::getInt64Ty(VMContext);
+
+ IntPtrTy =
+ TheModule.getPointerSize() == llvm::Module::Pointer32 ? Int32Ty : Int64Ty;
+
+ // Object type
+ QualType UnqualIdTy = CGM.getContext().getObjCIdType();
+ ASTIdTy = CanQualType();
+ if (UnqualIdTy != QualType()) {
+ ASTIdTy = CGM.getContext().getCanonicalType(UnqualIdTy);
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ } else {
+ IdTy = PtrToInt8Ty;
+ }
+ PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
+
+ ObjCSuperTy = llvm::StructType::get(IdTy, IdTy, NULL);
+ PtrToObjCSuperTy = llvm::PointerType::getUnqual(ObjCSuperTy);
+
+ llvm::Type *VoidTy = llvm::Type::getVoidTy(VMContext);
+
+ // void objc_exception_throw(id);
+ ExceptionThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL);
+ ExceptionReThrowFn.init(&CGM, "objc_exception_throw", VoidTy, IdTy, NULL);
+ // int objc_sync_enter(id);
+ SyncEnterFn.init(&CGM, "objc_sync_enter", IntTy, IdTy, NULL);
+ // int objc_sync_exit(id);
+ SyncExitFn.init(&CGM, "objc_sync_exit", IntTy, IdTy, NULL);
+
+ // void objc_enumerationMutation (id)
+ EnumerationMutationFn.init(&CGM, "objc_enumerationMutation", VoidTy,
+ IdTy, NULL);
+
+ // id objc_getProperty(id, SEL, ptrdiff_t, BOOL)
+ GetPropertyFn.init(&CGM, "objc_getProperty", IdTy, IdTy, SelectorTy,
+ PtrDiffTy, BoolTy, NULL);
+ // void objc_setProperty(id, SEL, ptrdiff_t, id, BOOL, BOOL)
+ SetPropertyFn.init(&CGM, "objc_setProperty", VoidTy, IdTy, SelectorTy,
+ PtrDiffTy, IdTy, BoolTy, BoolTy, NULL);
+ // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL)
+ GetStructPropertyFn.init(&CGM, "objc_getPropertyStruct", VoidTy, PtrTy, PtrTy,
+ PtrDiffTy, BoolTy, BoolTy, NULL);
+ // void objc_setPropertyStruct(void*, void*, ptrdiff_t, BOOL, BOOL)
+ SetStructPropertyFn.init(&CGM, "objc_setPropertyStruct", VoidTy, PtrTy, PtrTy,
+ PtrDiffTy, BoolTy, BoolTy, NULL);
+
+ // IMP type
+ llvm::Type *IMPArgs[] = { IdTy, SelectorTy };
+ IMPTy = llvm::PointerType::getUnqual(llvm::FunctionType::get(IdTy, IMPArgs,
+ true));
+
+ const LangOptions &Opts = CGM.getLangOpts();
+ if ((Opts.getGC() != LangOptions::NonGC) || Opts.ObjCAutoRefCount)
+ RuntimeVersion = 10;
+
+ // Don't bother initialising the GC stuff unless we're compiling in GC mode
+ if (Opts.getGC() != LangOptions::NonGC) {
+ // This is a bit of an hack. We should sort this out by having a proper
+ // CGObjCGNUstep subclass for GC, but we may want to really support the old
+ // ABI and GC added in ObjectiveC2.framework, so we fudge it a bit for now
+ // Get selectors needed in GC mode
+ RetainSel = GetNullarySelector("retain", CGM.getContext());
+ ReleaseSel = GetNullarySelector("release", CGM.getContext());
+ AutoreleaseSel = GetNullarySelector("autorelease", CGM.getContext());
+
+ // Get functions needed in GC mode
+
+ // id objc_assign_ivar(id, id, ptrdiff_t);
+ IvarAssignFn.init(&CGM, "objc_assign_ivar", IdTy, IdTy, IdTy, PtrDiffTy,
+ NULL);
+ // id objc_assign_strongCast (id, id*)
+ StrongCastAssignFn.init(&CGM, "objc_assign_strongCast", IdTy, IdTy,
+ PtrToIdTy, NULL);
+ // id objc_assign_global(id, id*);
+ GlobalAssignFn.init(&CGM, "objc_assign_global", IdTy, IdTy, PtrToIdTy,
+ NULL);
+ // id objc_assign_weak(id, id*);
+ WeakAssignFn.init(&CGM, "objc_assign_weak", IdTy, IdTy, PtrToIdTy, NULL);
+ // id objc_read_weak(id*);
+ WeakReadFn.init(&CGM, "objc_read_weak", IdTy, PtrToIdTy, NULL);
+ // void *objc_memmove_collectable(void*, void *, size_t);
+ MemMoveFn.init(&CGM, "objc_memmove_collectable", PtrTy, PtrTy, PtrTy,
+ SizeTy, NULL);
+ }
+}
+
+llvm::Value *CGObjCGNU::GetClassNamed(CGBuilderTy &Builder,
+ const std::string &Name,
+ bool isWeak) {
+ llvm::Value *ClassName = CGM.GetAddrOfConstantCString(Name);
+ // With the incompatible ABI, this will need to be replaced with a direct
+ // reference to the class symbol. For the compatible nonfragile ABI we are
+ // still performing this lookup at run time but emitting the symbol for the
+ // class externally so that we can make the switch later.
+ //
+ // Libobjc2 contains an LLVM pass that replaces calls to objc_lookup_class
+ // with memoized versions or with static references if it's safe to do so.
+ if (!isWeak)
+ EmitClassRef(Name);
+ ClassName = Builder.CreateStructGEP(ClassName, 0);
+
+ llvm::Constant *ClassLookupFn =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, PtrToInt8Ty, true),
+ "objc_lookup_class");
+ return Builder.CreateCall(ClassLookupFn, ClassName);
+}
+
+// This has to perform the lookup every time, since posing and related
+// techniques can modify the name -> class mapping.
+llvm::Value *CGObjCGNU::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) {
+ return GetClassNamed(Builder, OID->getNameAsString(), OID->isWeakImported());
+}
+llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ return GetClassNamed(Builder, "NSAutoreleasePool", false);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ const std::string &TypeEncoding, bool lval) {
+
+ SmallVector<TypedSelector, 2> &Types = SelectorTable[Sel];
+ llvm::GlobalAlias *SelValue = 0;
+
+
+ for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ e = Types.end() ; i!=e ; i++) {
+ if (i->first == TypeEncoding) {
+ SelValue = i->second;
+ break;
+ }
+ }
+ if (0 == SelValue) {
+ SelValue = new llvm::GlobalAlias(SelectorTy,
+ llvm::GlobalValue::PrivateLinkage,
+ ".objc_selector_"+Sel.getAsString(), NULL,
+ &TheModule);
+ Types.push_back(TypedSelector(TypeEncoding, SelValue));
+ }
+
+ if (lval) {
+ llvm::Value *tmp = Builder.CreateAlloca(SelValue->getType());
+ Builder.CreateStore(SelValue, tmp);
+ return tmp;
+ }
+ return SelValue;
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
+ return GetSelector(Builder, Sel, std::string(), lval);
+}
+
+llvm::Value *CGObjCGNU::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+ std::string SelTypes;
+ CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ return GetSelector(Builder, Method->getSelector(), SelTypes, false);
+}
+
+llvm::Constant *CGObjCGNU::GetEHType(QualType T) {
+ if (!CGM.getLangOpts().CPlusPlus) {
+ if (T->isObjCIdType()
+ || T->isObjCQualifiedIdType()) {
+ // With the old ABI, there was only one kind of catchall, which broke
+ // foreign exceptions. With the new ABI, we use __objc_id_typeinfo as
+ // a pointer indicating object catchalls, and NULL to indicate real
+ // catchalls
+ if (CGM.getLangOpts().ObjCNonFragileABI) {
+ return MakeConstantString("@id");
+ } else {
+ return 0;
+ }
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *OPT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(OPT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
+ return MakeConstantString(IDecl->getIdentifier()->getName());
+ }
+ // For Objective-C++, we want to provide the ability to catch both C++ and
+ // Objective-C objects in the same function.
+
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("__objc_id_type_info");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), PtrToInt8Ty,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "__objc_id_type_info");
+ return llvm::ConstantExpr::getBitCast(IDEHType, PtrToInt8Ty);
+ }
+
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ std::string className = IT->getDecl()->getIdentifier()->getName();
+
+ std::string typeinfoName = "__objc_eh_typeinfo_" + className;
+
+ // Return the existing typeinfo if it exists
+ llvm::Constant *typeinfo = TheModule.getGlobalVariable(typeinfoName);
+ if (typeinfo)
+ return llvm::ConstantExpr::getBitCast(typeinfo, PtrToInt8Ty);
+
+ // Otherwise create it.
+
+ // vtable for gnustep::libobjc::__objc_class_type_info
+ // It's quite ugly hard-coding this. Ideally we'd generate it using the host
+ // platform's name mangling.
+ const char *vtableName = "_ZTVN7gnustep7libobjc22__objc_class_type_infoE";
+ llvm::Constant *Vtable = TheModule.getGlobalVariable(vtableName);
+ if (!Vtable) {
+ Vtable = new llvm::GlobalVariable(TheModule, PtrToInt8Ty, true,
+ llvm::GlobalValue::ExternalLinkage, 0, vtableName);
+ }
+ llvm::Constant *Two = llvm::ConstantInt::get(IntTy, 2);
+ Vtable = llvm::ConstantExpr::getGetElementPtr(Vtable, Two);
+ Vtable = llvm::ConstantExpr::getBitCast(Vtable, PtrToInt8Ty);
+
+ llvm::Constant *typeName =
+ ExportUniqueString(className, "__objc_eh_typename_");
+
+ std::vector<llvm::Constant*> fields;
+ fields.push_back(Vtable);
+ fields.push_back(typeName);
+ llvm::Constant *TI =
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
+ NULL), fields, "__objc_eh_typeinfo_" + className,
+ llvm::GlobalValue::LinkOnceODRLinkage);
+ return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
+}
+
+/// Generate an NSConstantString object.
+llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
+
+ std::string Str = SL->getString().str();
+
+ // Look for an existing one
+ llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
+ if (old != ObjCStrings.end())
+ return old->getValue();
+
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NXConstantString";
+
+ std::string Sym = "_OBJC_CLASS_";
+ Sym += StringClass;
+
+ llvm::Constant *isa = TheModule.getNamedGlobal(Sym);
+
+ if (!isa)
+ isa = new llvm::GlobalVariable(TheModule, IdTy, /* isConstant */false,
+ llvm::GlobalValue::ExternalWeakLinkage, 0, Sym);
+ else if (isa->getType() != PtrToIdTy)
+ isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
+
+ std::vector<llvm::Constant*> Ivars;
+ Ivars.push_back(isa);
+ Ivars.push_back(MakeConstantString(Str));
+ Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
+ llvm::Constant *ObjCStr = MakeGlobal(
+ llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, NULL),
+ Ivars, ".objc_str");
+ ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
+ ObjCStrings[Str] = ObjCStr;
+ ConstantStrings.push_back(ObjCStr);
+ return ObjCStr;
+}
+
+///Generates a message send where the super is the receiver. This is a message
+///send to self with special delivery semantics indicating which class's method
+///should be called.
+RValue
+CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ CGBuilderTy &Builder = CGF.Builder;
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(EnforceType(Builder, Receiver,
+ CGM.getTypes().ConvertType(ResultType)));
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(0);
+ }
+ }
+
+ llvm::Value *cmd = GetSelector(Builder, Sel);
+
+
+ CallArgList ActualArgs;
+
+ ActualArgs.add(RValue::get(EnforceType(Builder, Receiver, IdTy)), ASTIdTy);
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ llvm::Value *ReceiverClass = 0;
+ if (isCategoryImpl) {
+ llvm::Constant *classLookupFunction = 0;
+ if (IsClassMessage) {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, PtrTy, true), "objc_get_meta_class");
+ } else {
+ classLookupFunction = CGM.CreateRuntimeFunction(llvm::FunctionType::get(
+ IdTy, PtrTy, true), "objc_get_class");
+ }
+ ReceiverClass = Builder.CreateCall(classLookupFunction,
+ MakeConstantString(Class->getNameAsString()));
+ } else {
+ // Set up global aliases for the metaclass or class pointer if they do not
+ // already exist. These will are forward-references which will be set to
+ // pointers to the class and metaclass structure created for the runtime
+ // load function. To send a message to super, we look up the value of the
+ // super_class pointer from either the class or metaclass structure.
+ if (IsClassMessage) {
+ if (!MetaClassPtrAlias) {
+ MetaClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_metaclass_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = MetaClassPtrAlias;
+ } else {
+ if (!ClassPtrAlias) {
+ ClassPtrAlias = new llvm::GlobalAlias(IdTy,
+ llvm::GlobalValue::InternalLinkage, ".objc_class_ref" +
+ Class->getNameAsString(), NULL, &TheModule);
+ }
+ ReceiverClass = ClassPtrAlias;
+ }
+ }
+ // Cast the pointer to a simplified version of the class structure
+ ReceiverClass = Builder.CreateBitCast(ReceiverClass,
+ llvm::PointerType::getUnqual(
+ llvm::StructType::get(IdTy, IdTy, NULL)));
+ // Get the superclass pointer
+ ReceiverClass = Builder.CreateStructGEP(ReceiverClass, 1);
+ // Load the superclass pointer
+ ReceiverClass = Builder.CreateLoad(ReceiverClass);
+ // Construct the structure used to look up the IMP
+ llvm::StructType *ObjCSuperTy = llvm::StructType::get(
+ Receiver->getType(), IdTy, NULL);
+ llvm::Value *ObjCSuper = Builder.CreateAlloca(ObjCSuperTy);
+
+ Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
+ Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
+
+ ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
+
+ // Get the IMP
+ llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd);
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
+
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsClassMessage)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+ return msgRet;
+}
+
+/// Generate code for a message send expression.
+RValue
+CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // Strip out message sends to retain / release in GC mode
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ if (Sel == RetainSel || Sel == AutoreleaseSel) {
+ return RValue::get(EnforceType(Builder, Receiver,
+ CGM.getTypes().ConvertType(ResultType)));
+ }
+ if (Sel == ReleaseSel) {
+ return RValue::get(0);
+ }
+ }
+
+ // If the return type is something that goes in an integer register, the
+ // runtime will handle 0 returns. For other cases, we fill in the 0 value
+ // ourselves.
+ //
+ // The language spec says the result of this kind of message send is
+ // undefined, but lots of people seem to have forgotten to read that
+ // paragraph and insist on sending messages to nil that have structure
+ // returns. With GCC, this generates a random return value (whatever happens
+ // to be on the stack / in those registers at the time) on most platforms,
+ // and generates an illegal instruction trap on SPARC. With LLVM it corrupts
+ // the stack.
+ bool isPointerSizedReturn = (ResultType->isAnyPointerType() ||
+ ResultType->isIntegralOrEnumerationType() || ResultType->isVoidType());
+
+ llvm::BasicBlock *startBB = 0;
+ llvm::BasicBlock *messageBB = 0;
+ llvm::BasicBlock *continueBB = 0;
+
+ if (!isPointerSizedReturn) {
+ startBB = Builder.GetInsertBlock();
+ messageBB = CGF.createBasicBlock("msgSend");
+ continueBB = CGF.createBasicBlock("continue");
+
+ llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
+ llvm::Constant::getNullValue(Receiver->getType()));
+ Builder.CreateCondBr(isNil, continueBB, messageBB);
+ CGF.EmitBlock(messageBB);
+ }
+
+ IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ llvm::Value *cmd;
+ if (Method)
+ cmd = GetSelector(Builder, Method);
+ else
+ cmd = GetSelector(Builder, Sel);
+ cmd = EnforceType(Builder, cmd, SelectorTy);
+ Receiver = EnforceType(Builder, Receiver, IdTy);
+
+ llvm::Value *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
+ llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), Class!=0)
+ };
+ llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+
+ CallArgList ActualArgs;
+ ActualArgs.add(RValue::get(Receiver), ASTIdTy);
+ ActualArgs.add(RValue::get(cmd), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ // Get the IMP to call
+ llvm::Value *imp;
+
+ // If we have non-legacy dispatch specified, we try using the objc_msgSend()
+ // functions. These are not supported on all platforms (or all runtimes on a
+ // given platform), so we
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ imp = LookupIMP(CGF, Receiver, cmd, node);
+ break;
+ case CodeGenOptions::Mixed:
+ case CodeGenOptions::NonLegacy:
+ if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_fpret");
+ } else if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ // The actual types here don't matter - we're going to bitcast the
+ // function anyway
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend_stret");
+ } else {
+ imp = CGM.CreateRuntimeFunction(llvm::FunctionType::get(IdTy, IdTy, true),
+ "objc_msgSend");
+ }
+ }
+
+ // Reset the receiver in case the lookup modified it
+ ActualArgs[0] = CallArg(RValue::get(Receiver), ASTIdTy, false);
+
+ imp = EnforceType(Builder, imp, MSI.MessengerType);
+
+ llvm::Instruction *call;
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
+ 0, &call);
+ call->setMetadata(msgSendMDKind, node);
+
+
+ if (!isPointerSizedReturn) {
+ messageBB = CGF.Builder.GetInsertBlock();
+ CGF.Builder.CreateBr(continueBB);
+ CGF.EmitBlock(continueBB);
+ if (msgRet.isScalar()) {
+ llvm::Value *v = msgRet.getScalarVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v->getType()), startBB);
+ msgRet = RValue::get(phi);
+ } else if (msgRet.isAggregate()) {
+ llvm::Value *v = msgRet.getAggregateAddr();
+ llvm::PHINode *phi = Builder.CreatePHI(v->getType(), 2);
+ llvm::PointerType *RetTy = cast<llvm::PointerType>(v->getType());
+ llvm::AllocaInst *NullVal =
+ CGF.CreateTempAlloca(RetTy->getElementType(), "null");
+ CGF.InitTempAlloca(NullVal,
+ llvm::Constant::getNullValue(RetTy->getElementType()));
+ phi->addIncoming(v, messageBB);
+ phi->addIncoming(NullVal, startBB);
+ msgRet = RValue::getAggregate(phi);
+ } else /* isComplex() */ {
+ std::pair<llvm::Value*,llvm::Value*> v = msgRet.getComplexVal();
+ llvm::PHINode *phi = Builder.CreatePHI(v.first->getType(), 2);
+ phi->addIncoming(v.first, messageBB);
+ phi->addIncoming(llvm::Constant::getNullValue(v.first->getType()),
+ startBB);
+ llvm::PHINode *phi2 = Builder.CreatePHI(v.second->getType(), 2);
+ phi2->addIncoming(v.second, messageBB);
+ phi2->addIncoming(llvm::Constant::getNullValue(v.second->getType()),
+ startBB);
+ msgRet = RValue::getComplex(phi, phi2);
+ }
+ }
+ return msgRet;
+}
+
+/// Generates a MethodList. Used in construction of a objc_class and
+/// objc_category structures.
+llvm::Constant *CGObjCGNU::
+GenerateMethodList(const StringRef &ClassName,
+ const StringRef &CategoryName,
+ ArrayRef<Selector> MethodSels,
+ ArrayRef<llvm::Constant *> MethodTypes,
+ bool isClassMethodList) {
+ if (MethodSels.empty())
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ IMPTy, //Method pointer
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
+ Elements.clear();
+ llvm::Constant *Method =
+ TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
+ MethodSels[i],
+ isClassMethodList));
+ assert(Method && "Can't generate metadata for method that doesn't exist");
+ llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
+ Elements.push_back(C);
+ Elements.push_back(MethodTypes[i]);
+ Method = llvm::ConstantExpr::getBitCast(Method,
+ IMPTy);
+ Elements.push_back(Method);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
+ Methods.size());
+ llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+
+ // Structure containing list pointer, array and array count
+ llvm::StructType *ObjCMethodListTy = llvm::StructType::create(VMContext);
+ llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy);
+ ObjCMethodListTy->setBody(
+ NextPtrTy,
+ IntTy,
+ ObjCMethodArrayTy,
+ NULL);
+
+ Methods.clear();
+ Methods.push_back(llvm::ConstantPointerNull::get(
+ llvm::PointerType::getUnqual(ObjCMethodListTy)));
+ Methods.push_back(llvm::ConstantInt::get(Int32Ty, MethodTypes.size()));
+ Methods.push_back(MethodArray);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCMethodListTy, Methods, ".objc_method_list");
+}
+
+/// Generates an IvarList. Used in construction of a objc_class.
+llvm::Constant *CGObjCGNU::
+GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
+ ArrayRef<llvm::Constant *> IvarTypes,
+ ArrayRef<llvm::Constant *> IvarOffsets) {
+ if (IvarNames.size() == 0)
+ return NULLPtr;
+ // Get the method structure type.
+ llvm::StructType *ObjCIvarTy = llvm::StructType::get(
+ PtrToInt8Ty,
+ PtrToInt8Ty,
+ IntTy,
+ NULL);
+ std::vector<llvm::Constant*> Ivars;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(IvarNames[i]);
+ Elements.push_back(IvarTypes[i]);
+ Elements.push_back(IvarOffsets[i]);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCIvarTy, Elements));
+ }
+
+ // Array of method structures
+ llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
+ IvarNames.size());
+
+
+ Elements.clear();
+ Elements.push_back(llvm::ConstantInt::get(IntTy, (int)IvarNames.size()));
+ Elements.push_back(llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars));
+ // Structure containing array and array count
+ llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
+ ObjCIvarArrayTy,
+ NULL);
+
+ // Create an instance of the structure
+ return MakeGlobal(ObjCIvarListTy, Elements, ".objc_ivar_list");
+}
+
+/// Generate a class structure
+llvm::Constant *CGObjCGNU::GenerateClassStructure(
+ llvm::Constant *MetaClass,
+ llvm::Constant *SuperClass,
+ unsigned info,
+ const char *Name,
+ llvm::Constant *Version,
+ llvm::Constant *InstanceSize,
+ llvm::Constant *IVars,
+ llvm::Constant *Methods,
+ llvm::Constant *Protocols,
+ llvm::Constant *IvarOffsets,
+ llvm::Constant *Properties,
+ llvm::Constant *StrongIvarBitmap,
+ llvm::Constant *WeakIvarBitmap,
+ bool isMeta) {
+ // Set up the class structure
+ // Note: Several of these are char*s when they should be ids. This is
+ // because the runtime performs this translation on load.
+ //
+ // Fields marked New ABI are part of the GNUstep runtime. We emit them
+ // anyway; the classes will still work with the GNU runtime, they will just
+ // be ignored.
+ llvm::StructType *ClassTy = llvm::StructType::get(
+ PtrToInt8Ty, // isa
+ PtrToInt8Ty, // super_class
+ PtrToInt8Ty, // name
+ LongTy, // version
+ LongTy, // info
+ LongTy, // instance_size
+ IVars->getType(), // ivars
+ Methods->getType(), // methods
+ // These are all filled in by the runtime, so we pretend
+ PtrTy, // dtable
+ PtrTy, // subclass_list
+ PtrTy, // sibling_class
+ PtrTy, // protocols
+ PtrTy, // gc_object_type
+ // New ABI:
+ LongTy, // abi_version
+ IvarOffsets->getType(), // ivar_offsets
+ Properties->getType(), // properties
+ IntPtrTy, // strong_pointers
+ IntPtrTy, // weak_pointers
+ NULL);
+ llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+ // Fill in the structure
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
+ Elements.push_back(SuperClass);
+ Elements.push_back(MakeConstantString(Name, ".class_name"));
+ Elements.push_back(Zero);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+ if (isMeta) {
+ llvm::TargetData td(&TheModule);
+ Elements.push_back(
+ llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ClassTy) /
+ CGM.getContext().getCharWidth()));
+ } else
+ Elements.push_back(InstanceSize);
+ Elements.push_back(IVars);
+ Elements.push_back(Methods);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, 1));
+ Elements.push_back(IvarOffsets);
+ Elements.push_back(Properties);
+ Elements.push_back(StrongIvarBitmap);
+ Elements.push_back(WeakIvarBitmap);
+ // Create an instance of the structure
+ // This is now an externally visible symbol, so that we can speed up class
+ // messages in the next ABI. We may already have some weak references to
+ // this, so check and fix them properly.
+ std::string ClassSym((isMeta ? "_OBJC_METACLASS_": "_OBJC_CLASS_") +
+ std::string(Name));
+ llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
+ llvm::Constant *Class = MakeGlobal(ClassTy, Elements, ClassSym,
+ llvm::GlobalValue::ExternalLinkage);
+ if (ClassRef) {
+ ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
+ ClassRef->getType()));
+ ClassRef->removeFromParent();
+ Class->setName(ClassSym);
+ }
+ return Class;
+}
+
+llvm::Constant *CGObjCGNU::
+GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
+ ArrayRef<llvm::Constant *> MethodTypes) {
+ // Get the method structure type.
+ llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
+ PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
+ PtrToInt8Ty,
+ NULL);
+ std::vector<llvm::Constant*> Methods;
+ std::vector<llvm::Constant*> Elements;
+ for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
+ Elements.clear();
+ Elements.push_back(MethodNames[i]);
+ Elements.push_back(MethodTypes[i]);
+ Methods.push_back(llvm::ConstantStruct::get(ObjCMethodDescTy, Elements));
+ }
+ llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
+ MethodNames.size());
+ llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
+ Methods);
+ llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
+ IntTy, ObjCMethodArrayTy, NULL);
+ Methods.clear();
+ Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
+ Methods.push_back(Array);
+ return MakeGlobal(ObjCMethodDescListTy, Methods, ".objc_method_list");
+}
+
+// Create the protocol list structure used in classes, categories and so on
+llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Protocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ SizeTy,
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *protocol = 0;
+ llvm::StringMap<llvm::Constant*>::iterator value =
+ ExistingProtocols.find(*iter);
+ if (value == ExistingProtocols.end()) {
+ protocol = GenerateEmptyProtocol(*iter);
+ } else {
+ protocol = value->getValue();
+ }
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
+ PtrToInt8Ty);
+ Elements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ Elements);
+ Elements.clear();
+ Elements.push_back(NULLPtr);
+ Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
+ Elements.push_back(ProtocolArray);
+ return MakeGlobal(ProtocolListTy, Elements, ".objc_protocol_list");
+}
+
+llvm::Value *CGObjCGNU::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ llvm::Value *protocol = ExistingProtocols[PD->getNameAsString()];
+ llvm::Type *T =
+ CGM.getTypes().ConvertType(CGM.getContext().getObjCProtoType());
+ return Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
+}
+
+llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
+ const std::string &ProtocolName) {
+ SmallVector<std::string, 0> EmptyStringVector;
+ SmallVector<llvm::Constant*, 0> EmptyConstantVector;
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
+ llvm::Constant *MethodList =
+ GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ MethodList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ Elements.push_back(MethodList);
+ return MakeGlobal(ProtocolTy, Elements, ".objc_protocol");
+}
+
+void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ ASTContext &Context = CGM.getContext();
+ std::string ProtocolName = PD->getNameAsString();
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ SmallVector<std::string, 16> Protocols;
+ for (ObjCProtocolDecl::protocol_iterator PI = PD->protocol_begin(),
+ E = PD->protocol_end(); PI != E; ++PI)
+ Protocols.push_back((*PI)->getNameAsString());
+ SmallVector<llvm::Constant*, 16> InstanceMethodNames;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
+ SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
+ for (ObjCProtocolDecl::instmeth_iterator iter = PD->instmeth_begin(),
+ E = PD->instmeth_end(); iter != E; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(*iter, TypeStr);
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ InstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalInstanceMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalInstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+ // Collect information about class methods:
+ SmallVector<llvm::Constant*, 16> ClassMethodNames;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
+ SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
+ for (ObjCProtocolDecl::classmeth_iterator
+ iter = PD->classmeth_begin(), endIter = PD->classmeth_end();
+ iter != endIter ; iter++) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ if ((*iter)->getImplementationControl() == ObjCMethodDecl::Optional) {
+ ClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ } else {
+ OptionalClassMethodNames.push_back(
+ MakeConstantString((*iter)->getSelector().getAsString()));
+ OptionalClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ }
+
+ llvm::Constant *ProtocolList = GenerateProtocolList(Protocols);
+ llvm::Constant *InstanceMethodList =
+ GenerateProtocolMethodList(InstanceMethodNames, InstanceMethodTypes);
+ llvm::Constant *ClassMethodList =
+ GenerateProtocolMethodList(ClassMethodNames, ClassMethodTypes);
+ llvm::Constant *OptionalInstanceMethodList =
+ GenerateProtocolMethodList(OptionalInstanceMethodNames,
+ OptionalInstanceMethodTypes);
+ llvm::Constant *OptionalClassMethodList =
+ GenerateProtocolMethodList(OptionalClassMethodNames,
+ OptionalClassMethodTypes);
+
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ // The isSynthesized value is always set to 0 in a protocol. It exists to
+ // simplify the runtime library by allowing it to use the same data
+ // structures for protocol metadata everywhere.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+ std::vector<llvm::Constant*> OptionalProperties;
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCContainerDecl::prop_iterator
+ iter = PD->prop_begin(), endIter = PD->prop_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ InstanceMethodTypes.push_back(TypeEncoding);
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
+ OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ } else {
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ }
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
+ llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
+ PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
+ PropertyListInit, ".objc_property_list");
+
+ llvm::Constant *OptionalPropertyArray =
+ llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
+ OptionalProperties.size()) , OptionalProperties);
+ llvm::Constant* OptionalPropertyListInitFields[] = {
+ llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
+ OptionalPropertyArray };
+
+ llvm::Constant *OptionalPropertyListInit =
+ llvm::ConstantStruct::getAnon(OptionalPropertyListInitFields);
+ llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
+ OptionalPropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
+ ".objc_property_list");
+
+ // Protocols are objects containing lists of the methods implemented and
+ // protocols adopted.
+ llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
+ PtrToInt8Ty,
+ ProtocolList->getType(),
+ InstanceMethodList->getType(),
+ ClassMethodList->getType(),
+ OptionalInstanceMethodList->getType(),
+ OptionalClassMethodList->getType(),
+ PropertyList->getType(),
+ OptionalPropertyList->getType(),
+ NULL);
+ std::vector<llvm::Constant*> Elements;
+ // The isa pointer must be set to a magic number so the runtime knows it's
+ // the correct layout.
+ Elements.push_back(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ Elements.push_back(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.push_back(ProtocolList);
+ Elements.push_back(InstanceMethodList);
+ Elements.push_back(ClassMethodList);
+ Elements.push_back(OptionalInstanceMethodList);
+ Elements.push_back(OptionalClassMethodList);
+ Elements.push_back(PropertyList);
+ Elements.push_back(OptionalPropertyList);
+ ExistingProtocols[ProtocolName] =
+ llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
+ ".objc_protocol"), IdTy);
+}
+void CGObjCGNU::GenerateProtocolHolderCategory(void) {
+ // Collect information about instance methods
+ SmallVector<Selector, 1> MethodSels;
+ SmallVector<llvm::Constant*, 1> MethodTypes;
+
+ std::vector<llvm::Constant*> Elements;
+ const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
+ const std::string CategoryName = "AnotherHack";
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+ // Protocol list
+ llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
+ ExistingProtocols.size());
+ llvm::StructType *ProtocolListTy = llvm::StructType::get(
+ PtrTy, //Should be a recurisve pointer, but it's always NULL here.
+ SizeTy,
+ ProtocolArrayTy,
+ NULL);
+ std::vector<llvm::Constant*> ProtocolElements;
+ for (llvm::StringMapIterator<llvm::Constant*> iter =
+ ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+ iter != endIter ; iter++) {
+ llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
+ PtrTy);
+ ProtocolElements.push_back(Ptr);
+ }
+ llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
+ ProtocolElements);
+ ProtocolElements.clear();
+ ProtocolElements.push_back(NULLPtr);
+ ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
+ ExistingProtocols.size()));
+ ProtocolElements.push_back(ProtocolArray);
+ Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
+ ProtocolElements, ".objc_protocol_list"), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
+/// stored in a 64-bit value with the low bit set to 1 and the remaining 63
+/// bits set to their values, LSB first, while larger ones are stored in a
+/// structure of this / form:
+///
+/// struct { int32_t length; int32_t values[length]; };
+///
+/// The values in the array are stored in host-endian format, with the least
+/// significant bit being assumed to come first in the bitfield. Therefore, a
+/// bitfield with the 64th bit set will be (int64_t)&{ 2, [0, 1<<31] }, while a
+/// bitfield / with the 63rd bit set will be 1<<64.
+llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
+ int bitCount = bits.size();
+ int ptrBits =
+ (TheModule.getPointerSize() == llvm::Module::Pointer32) ? 32 : 64;
+ if (bitCount < ptrBits) {
+ uint64_t val = 1;
+ for (int i=0 ; i<bitCount ; ++i) {
+ if (bits[i]) val |= 1ULL<<(i+1);
+ }
+ return llvm::ConstantInt::get(IntPtrTy, val);
+ }
+ llvm::SmallVector<llvm::Constant*, 8> values;
+ int v=0;
+ while (v < bitCount) {
+ int32_t word = 0;
+ for (int i=0 ; (i<32) && (v<bitCount) ; ++i) {
+ if (bits[v]) word |= 1<<i;
+ v++;
+ }
+ values.push_back(llvm::ConstantInt::get(Int32Ty, word));
+ }
+ llvm::ArrayType *arrayTy = llvm::ArrayType::get(Int32Ty, values.size());
+ llvm::Constant *array = llvm::ConstantArray::get(arrayTy, values);
+ llvm::Constant *fields[2] = {
+ llvm::ConstantInt::get(Int32Ty, values.size()),
+ array };
+ llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy,
+ NULL), fields);
+ llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy);
+ return ptr;
+}
+
+void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ std::string ClassName = OCD->getClassInterface()->getNameAsString();
+ std::string CategoryName = OCD->getNameAsString();
+ // Collect information about instance methods
+ SmallVector<Selector, 16> InstanceMethodSels;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ iter = OCD->instmeth_begin(), endIter = OCD->instmeth_end();
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect information about class methods
+ SmallVector<Selector, 16> ClassMethodSels;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ iter = OCD->classmeth_begin(), endIter = OCD->classmeth_end();
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForMethodDecl(*iter,TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ // Collect the names of referenced protocols
+ SmallVector<std::string, 16> Protocols;
+ const ObjCCategoryDecl *CatDecl = OCD->getCategoryDecl();
+ const ObjCList<ObjCProtocolDecl> &Protos = CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protos.begin(),
+ E = Protos.end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+ std::vector<llvm::Constant*> Elements;
+ Elements.push_back(MakeConstantString(CategoryName));
+ Elements.push_back(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, InstanceMethodSels, InstanceMethodTypes,
+ false), PtrTy));
+ // Class method list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
+ ClassName, CategoryName, ClassMethodSels, ClassMethodTypes, true),
+ PtrTy));
+ // Protocol list
+ Elements.push_back(llvm::ConstantExpr::getBitCast(
+ GenerateProtocolList(Protocols), PtrTy));
+ Categories.push_back(llvm::ConstantExpr::getBitCast(
+ MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
+ PtrTy, PtrTy, PtrTy, NULL), Elements), PtrTy));
+}
+
+llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OID,
+ SmallVectorImpl<Selector> &InstanceMethodSels,
+ SmallVectorImpl<llvm::Constant*> &InstanceMethodTypes) {
+ ASTContext &Context = CGM.getContext();
+ //
+ // Property metadata: name, attributes, isSynthesized, setter name, setter
+ // types, getter name, getter types.
+ llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
+ PtrToInt8Ty, Int8Ty, Int8Ty, PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, NULL);
+ std::vector<llvm::Constant*> Properties;
+
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (ObjCImplDecl::propimpl_iterator
+ iter = OID->propimpl_begin(), endIter = OID->propimpl_end();
+ iter != endIter ; iter++) {
+ std::vector<llvm::Constant*> Fields;
+ ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ ObjCPropertyImplDecl *propertyImpl = *iter;
+ bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize);
+
+ Fields.push_back(MakeConstantString(property->getNameAsString()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty,
+ property->getPropertyAttributes()));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, isSynthesized));
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(getter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(setter->getSelector());
+ }
+ Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
+ Fields.push_back(TypeEncoding);
+ } else {
+ Fields.push_back(NULLPtr);
+ Fields.push_back(NULLPtr);
+ }
+ Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ }
+ llvm::ArrayType *PropertyArrayTy =
+ llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
+ llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
+ Properties);
+ llvm::Constant* PropertyListInitFields[] =
+ {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
+
+ llvm::Constant *PropertyListInit =
+ llvm::ConstantStruct::getAnon(PropertyListInitFields);
+ return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
+ llvm::GlobalValue::InternalLinkage, PropertyListInit,
+ ".objc_property_list");
+}
+
+void CGObjCGNU::RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {
+ // Get the class declaration for which the alias is specified.
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OAD->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ std::string AliasName = OAD->getNameAsString();
+ ClassAliases.push_back(ClassAliasPair(ClassName,AliasName));
+}
+
+void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
+ ASTContext &Context = CGM.getContext();
+
+ // Get the superclass name.
+ const ObjCInterfaceDecl * SuperClassDecl =
+ OID->getClassInterface()->getSuperClass();
+ std::string SuperClassName;
+ if (SuperClassDecl) {
+ SuperClassName = SuperClassDecl->getNameAsString();
+ EmitClassRef(SuperClassName);
+ }
+
+ // Get the class name
+ ObjCInterfaceDecl *ClassDecl =
+ const_cast<ObjCInterfaceDecl *>(OID->getClassInterface());
+ std::string ClassName = ClassDecl->getNameAsString();
+ // Emit the symbol that is used to generate linker errors if this class is
+ // referenced in other modules but not declared.
+ std::string classSymbolName = "__objc_class_name_" + ClassName;
+ if (llvm::GlobalVariable *symbol =
+ TheModule.getGlobalVariable(classSymbolName)) {
+ symbol->setInitializer(llvm::ConstantInt::get(LongTy, 0));
+ } else {
+ new llvm::GlobalVariable(TheModule, LongTy, false,
+ llvm::GlobalValue::ExternalLinkage, llvm::ConstantInt::get(LongTy, 0),
+ classSymbolName);
+ }
+
+ // Get the size of instances.
+ int instanceSize =
+ Context.getASTObjCImplementationLayout(OID).getSize().getQuantity();
+
+ // Collect information about instance variables.
+ SmallVector<llvm::Constant*, 16> IvarNames;
+ SmallVector<llvm::Constant*, 16> IvarTypes;
+ SmallVector<llvm::Constant*, 16> IvarOffsets;
+
+ std::vector<llvm::Constant*> IvarOffsetValues;
+ SmallVector<bool, 16> WeakIvars;
+ SmallVector<bool, 16> StrongIvars;
+
+ int superInstanceSize = !SuperClassDecl ? 0 :
+ Context.getASTObjCInterfaceLayout(SuperClassDecl).getSize().getQuantity();
+ // For non-fragile ivars, set the instance size to 0 - {the size of just this
+ // class}. The runtime will then set this to the correct value on load.
+ if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
+ instanceSize = 0 - (instanceSize - superInstanceSize);
+ }
+
+ for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
+ // Store the name
+ IvarNames.push_back(MakeConstantString(IVD->getNameAsString()));
+ // Get the type encoding for this ivar
+ std::string TypeStr;
+ Context.getObjCEncodingForType(IVD->getType(), TypeStr);
+ IvarTypes.push_back(MakeConstantString(TypeStr));
+ // Get the offset
+ uint64_t BaseOffset = ComputeIvarBaseOffset(CGM, OID, IVD);
+ uint64_t Offset = BaseOffset;
+ if (CGM.getContext().getLangOpts().ObjCNonFragileABI) {
+ Offset = BaseOffset - superInstanceSize;
+ }
+ llvm::Constant *OffsetValue = llvm::ConstantInt::get(IntTy, Offset);
+ // Create the direct offset value
+ std::string OffsetName = "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString();
+ llvm::GlobalVariable *OffsetVar = TheModule.getGlobalVariable(OffsetName);
+ if (OffsetVar) {
+ OffsetVar->setInitializer(OffsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ OffsetVar->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else
+ OffsetVar = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ OffsetValue,
+ "__objc_ivar_offset_value_" + ClassName +"." +
+ IVD->getNameAsString());
+ IvarOffsets.push_back(OffsetValue);
+ IvarOffsetValues.push_back(OffsetVar);
+ Qualifiers::ObjCLifetime lt = IVD->getType().getQualifiers().getObjCLifetime();
+ switch (lt) {
+ case Qualifiers::OCL_Strong:
+ StrongIvars.push_back(true);
+ WeakIvars.push_back(false);
+ break;
+ case Qualifiers::OCL_Weak:
+ StrongIvars.push_back(false);
+ WeakIvars.push_back(true);
+ break;
+ default:
+ StrongIvars.push_back(false);
+ WeakIvars.push_back(false);
+ }
+ }
+ llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars);
+ llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars);
+ llvm::GlobalVariable *IvarOffsetArray =
+ MakeGlobalArray(PtrToIntTy, IvarOffsetValues, ".ivar.offsets");
+
+
+ // Collect information about instance methods
+ SmallVector<Selector, 16> InstanceMethodSels;
+ SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
+ for (ObjCImplementationDecl::instmeth_iterator
+ iter = OID->instmeth_begin(), endIter = OID->instmeth_end();
+ iter != endIter ; iter++) {
+ InstanceMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+
+ llvm::Constant *Properties = GeneratePropertyList(OID, InstanceMethodSels,
+ InstanceMethodTypes);
+
+
+ // Collect information about class methods
+ SmallVector<Selector, 16> ClassMethodSels;
+ SmallVector<llvm::Constant*, 16> ClassMethodTypes;
+ for (ObjCImplementationDecl::classmeth_iterator
+ iter = OID->classmeth_begin(), endIter = OID->classmeth_end();
+ iter != endIter ; iter++) {
+ ClassMethodSels.push_back((*iter)->getSelector());
+ std::string TypeStr;
+ Context.getObjCEncodingForMethodDecl((*iter),TypeStr);
+ ClassMethodTypes.push_back(MakeConstantString(TypeStr));
+ }
+ // Collect the names of referenced protocols
+ SmallVector<std::string, 16> Protocols;
+ for (ObjCInterfaceDecl::protocol_iterator
+ I = ClassDecl->protocol_begin(),
+ E = ClassDecl->protocol_end(); I != E; ++I)
+ Protocols.push_back((*I)->getNameAsString());
+
+
+
+ // Get the superclass pointer.
+ llvm::Constant *SuperClass;
+ if (!SuperClassName.empty()) {
+ SuperClass = MakeConstantString(SuperClassName, ".super_class_name");
+ } else {
+ SuperClass = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ }
+ // Empty vector used to construct empty method lists
+ SmallVector<llvm::Constant*, 1> empty;
+ // Generate the method and instance variable lists
+ llvm::Constant *MethodList = GenerateMethodList(ClassName, "",
+ InstanceMethodSels, InstanceMethodTypes, false);
+ llvm::Constant *ClassMethodList = GenerateMethodList(ClassName, "",
+ ClassMethodSels, ClassMethodTypes, true);
+ llvm::Constant *IvarList = GenerateIvarList(IvarNames, IvarTypes,
+ IvarOffsets);
+ // Irrespective of whether we are compiling for a fragile or non-fragile ABI,
+ // we emit a symbol containing the offset for each ivar in the class. This
+ // allows code compiled for the non-Fragile ABI to inherit from code compiled
+ // for the legacy ABI, without causing problems. The converse is also
+ // possible, but causes all ivar accesses to be fragile.
+
+ // Offset pointer for getting at the correct field in the ivar list when
+ // setting up the alias. These are: The base address for the global, the
+ // ivar array (second field), the ivar in this list (set for each ivar), and
+ // the offset (third field in ivar structure)
+ llvm::Type *IndexTy = Int32Ty;
+ llvm::Constant *offsetPointerIndexes[] = {Zeros[0],
+ llvm::ConstantInt::get(IndexTy, 1), 0,
+ llvm::ConstantInt::get(IndexTy, 2) };
+
+ unsigned ivarIndex = 0;
+ for (const ObjCIvarDecl *IVD = ClassDecl->all_declared_ivar_begin(); IVD;
+ IVD = IVD->getNextIvar()) {
+ const std::string Name = "__objc_ivar_offset_" + ClassName + '.'
+ + IVD->getNameAsString();
+ offsetPointerIndexes[2] = llvm::ConstantInt::get(IndexTy, ivarIndex);
+ // Get the correct ivar field
+ llvm::Constant *offsetValue = llvm::ConstantExpr::getGetElementPtr(
+ IvarList, offsetPointerIndexes);
+ // Get the existing variable, if one exists.
+ llvm::GlobalVariable *offset = TheModule.getNamedGlobal(Name);
+ if (offset) {
+ offset->setInitializer(offsetValue);
+ // If this is the real definition, change its linkage type so that
+ // different modules will use this one, rather than their private
+ // copy.
+ offset->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ // Add a new alias if there isn't one already.
+ offset = new llvm::GlobalVariable(TheModule, offsetValue->getType(),
+ false, llvm::GlobalValue::ExternalLinkage, offsetValue, Name);
+ (void) offset; // Silence dead store warning.
+ }
+ ++ivarIndex;
+ }
+ llvm::Constant *ZeroPtr = llvm::ConstantInt::get(IntPtrTy, 0);
+ //Generate metaclass for class methods
+ llvm::Constant *MetaClassStruct = GenerateClassStructure(NULLPtr,
+ NULLPtr, 0x12L, ClassName.c_str(), 0, Zeros[0], GenerateIvarList(
+ empty, empty, empty), ClassMethodList, NULLPtr,
+ NULLPtr, NULLPtr, ZeroPtr, ZeroPtr, true);
+
+ // Generate the class structure
+ llvm::Constant *ClassStruct =
+ GenerateClassStructure(MetaClassStruct, SuperClass, 0x11L,
+ ClassName.c_str(), 0,
+ llvm::ConstantInt::get(LongTy, instanceSize), IvarList,
+ MethodList, GenerateProtocolList(Protocols), IvarOffsetArray,
+ Properties, StrongIvarBitmap, WeakIvarBitmap);
+
+ // Resolve the class aliases, if they exist.
+ if (ClassPtrAlias) {
+ ClassPtrAlias->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(ClassStruct, IdTy));
+ ClassPtrAlias->eraseFromParent();
+ ClassPtrAlias = 0;
+ }
+ if (MetaClassPtrAlias) {
+ MetaClassPtrAlias->replaceAllUsesWith(
+ llvm::ConstantExpr::getBitCast(MetaClassStruct, IdTy));
+ MetaClassPtrAlias->eraseFromParent();
+ MetaClassPtrAlias = 0;
+ }
+
+ // Add class structure to list to be added to the symtab later
+ ClassStruct = llvm::ConstantExpr::getBitCast(ClassStruct, PtrToInt8Ty);
+ Classes.push_back(ClassStruct);
+}
+
+
+llvm::Function *CGObjCGNU::ModuleInitFunction() {
+ // Only emit an ObjC load function if no Objective-C stuff has been called
+ if (Classes.empty() && Categories.empty() && ConstantStrings.empty() &&
+ ExistingProtocols.empty() && SelectorTable.empty())
+ return NULL;
+
+ // Add all referenced protocols to a category.
+ GenerateProtocolHolderCategory();
+
+ llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
+ SelectorTy->getElementType());
+ llvm::Type *SelStructPtrTy = SelectorTy;
+ if (SelStructTy == 0) {
+ SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, NULL);
+ SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ }
+
+ std::vector<llvm::Constant*> Elements;
+ llvm::Constant *Statics = NULLPtr;
+ // Generate statics list:
+ if (ConstantStrings.size()) {
+ llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
+ ConstantStrings.size() + 1);
+ ConstantStrings.push_back(NULLPtr);
+
+ StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+
+ if (StringClass.empty()) StringClass = "NXConstantString";
+
+ Elements.push_back(MakeConstantString(StringClass,
+ ".objc_static_class_name"));
+ Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
+ ConstantStrings));
+ llvm::StructType *StaticsListTy =
+ llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, NULL);
+ llvm::Type *StaticsListPtrTy =
+ llvm::PointerType::getUnqual(StaticsListTy);
+ Statics = MakeGlobal(StaticsListTy, Elements, ".objc_statics");
+ llvm::ArrayType *StaticsListArrayTy =
+ llvm::ArrayType::get(StaticsListPtrTy, 2);
+ Elements.clear();
+ Elements.push_back(Statics);
+ Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
+ Statics = MakeGlobal(StaticsListArrayTy, Elements, ".objc_statics_ptr");
+ Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
+ }
+ // Array of classes, categories, and constant objects
+ llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
+ Classes.size() + Categories.size() + 2);
+ llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
+ llvm::Type::getInt16Ty(VMContext),
+ llvm::Type::getInt16Ty(VMContext),
+ ClassListTy, NULL);
+
+ Elements.clear();
+ // Pointer to an array of selectors used in this module.
+ std::vector<llvm::Constant*> Selectors;
+ std::vector<llvm::GlobalAlias*> SelectorAliases;
+ for (SelectorMap::iterator iter = SelectorTable.begin(),
+ iterEnd = SelectorTable.end(); iter != iterEnd ; ++iter) {
+
+ std::string SelNameStr = iter->first.getAsString();
+ llvm::Constant *SelName = ExportUniqueString(SelNameStr, ".objc_sel_name");
+
+ SmallVectorImpl<TypedSelector> &Types = iter->second;
+ for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
+ e = Types.end() ; i!=e ; i++) {
+
+ llvm::Constant *SelectorTypeEncoding = NULLPtr;
+ if (!i->first.empty())
+ SelectorTypeEncoding = MakeConstantString(i->first, ".objc_sel_types");
+
+ Elements.push_back(SelName);
+ Elements.push_back(SelectorTypeEncoding);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+
+ // Store the selector alias for later replacement
+ SelectorAliases.push_back(i->second);
+ }
+ }
+ unsigned SelectorCount = Selectors.size();
+ // NULL-terminate the selector list. This should not actually be required,
+ // because the selector list has a length field. Unfortunately, the GCC
+ // runtime decides to ignore the length field and expects a NULL terminator,
+ // and GCC cooperates with this by always setting the length to 0.
+ Elements.push_back(NULLPtr);
+ Elements.push_back(NULLPtr);
+ Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
+ Elements.clear();
+
+ // Number of static selectors
+ Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount));
+ llvm::Constant *SelectorList = MakeGlobalArray(SelStructTy, Selectors,
+ ".objc_selector_list");
+ Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
+ SelStructPtrTy));
+
+ // Now that all of the static selectors exist, create pointers to them.
+ for (unsigned int i=0 ; i<SelectorCount ; i++) {
+
+ llvm::Constant *Idxs[] = {Zeros[0],
+ llvm::ConstantInt::get(Int32Ty, i), Zeros[0]};
+ // FIXME: We're generating redundant loads and stores here!
+ llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr(SelectorList,
+ makeArrayRef(Idxs, 2));
+ // If selectors are defined as an opaque type, cast the pointer to this
+ // type.
+ SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy);
+ SelectorAliases[i]->replaceAllUsesWith(SelPtr);
+ SelectorAliases[i]->eraseFromParent();
+ }
+
+ // Number of classes defined.
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Classes.size()));
+ // Number of categories defined
+ Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
+ Categories.size()));
+ // Create an array of classes, then categories, then static object instances
+ Classes.insert(Classes.end(), Categories.begin(), Categories.end());
+ // NULL-terminated list of static object instances (mainly constant strings)
+ Classes.push_back(Statics);
+ Classes.push_back(NULLPtr);
+ llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
+ Elements.push_back(ClassList);
+ // Construct the symbol table
+ llvm::Constant *SymTab= MakeGlobal(SymTabTy, Elements);
+
+ // The symbol table is contained in a module which has some version-checking
+ // constants
+ llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
+ PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy),
+ (RuntimeVersion >= 10) ? IntTy : NULL, NULL);
+ Elements.clear();
+ // Runtime version, used for ABI compatibility checking.
+ Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
+ // sizeof(ModuleTy)
+ llvm::TargetData td(&TheModule);
+ Elements.push_back(
+ llvm::ConstantInt::get(LongTy,
+ td.getTypeSizeInBits(ModuleTy) /
+ CGM.getContext().getCharWidth()));
+
+ // The path to the source file where this module was declared
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
+ std::string path =
+ std::string(mainFile->getDir()->getName()) + '/' + mainFile->getName();
+ Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
+ Elements.push_back(SymTab);
+
+ if (RuntimeVersion >= 10)
+ switch (CGM.getLangOpts().getGC()) {
+ case LangOptions::GCOnly:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
+ break;
+ case LangOptions::NonGC:
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ else
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
+ break;
+ case LangOptions::HybridGC:
+ Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ break;
+ }
+
+ llvm::Value *Module = MakeGlobal(ModuleTy, Elements);
+
+ // Create the load function calling the runtime entry point with the module
+ // structure
+ llvm::Function * LoadFunction = llvm::Function::Create(
+ llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext), false),
+ llvm::GlobalValue::InternalLinkage, ".objc_load_function",
+ &TheModule);
+ llvm::BasicBlock *EntryBB =
+ llvm::BasicBlock::Create(VMContext, "entry", LoadFunction);
+ CGBuilderTy Builder(VMContext);
+ Builder.SetInsertPoint(EntryBB);
+
+ llvm::FunctionType *FT =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ llvm::PointerType::getUnqual(ModuleTy), true);
+ llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
+ Builder.CreateCall(Register, Module);
+
+ if (!ClassAliases.empty()) {
+ llvm::Type *ArgTypes[2] = {PtrTy, PtrToInt8Ty};
+ llvm::FunctionType *RegisterAliasTy =
+ llvm::FunctionType::get(Builder.getVoidTy(),
+ ArgTypes, false);
+ llvm::Function *RegisterAlias = llvm::Function::Create(
+ RegisterAliasTy,
+ llvm::GlobalValue::ExternalWeakLinkage, "class_registerAlias_np",
+ &TheModule);
+ llvm::BasicBlock *AliasBB =
+ llvm::BasicBlock::Create(VMContext, "alias", LoadFunction);
+ llvm::BasicBlock *NoAliasBB =
+ llvm::BasicBlock::Create(VMContext, "no_alias", LoadFunction);
+
+ // Branch based on whether the runtime provided class_registerAlias_np()
+ llvm::Value *HasRegisterAlias = Builder.CreateICmpNE(RegisterAlias,
+ llvm::Constant::getNullValue(RegisterAlias->getType()));
+ Builder.CreateCondBr(HasRegisterAlias, AliasBB, NoAliasBB);
+
+ // The true branch (has alias registration fucntion):
+ Builder.SetInsertPoint(AliasBB);
+ // Emit alias registration calls:
+ for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin();
+ iter != ClassAliases.end(); ++iter) {
+ llvm::Constant *TheClass =
+ TheModule.getGlobalVariable(("_OBJC_CLASS_" + iter->first).c_str(),
+ true);
+ if (0 != TheClass) {
+ TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy);
+ Builder.CreateCall2(RegisterAlias, TheClass,
+ MakeConstantString(iter->second));
+ }
+ }
+ // Jump to end:
+ Builder.CreateBr(NoAliasBB);
+
+ // Missing alias registration function, just return from the function:
+ Builder.SetInsertPoint(NoAliasBB);
+ }
+ Builder.CreateRetVoid();
+
+ return LoadFunction;
+}
+
+llvm::Function *CGObjCGNU::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ const ObjCCategoryImplDecl *OCD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext());
+ StringRef CategoryName = OCD ? OCD->getName() : "";
+ StringRef ClassName = CD->getName();
+ Selector MethodName = OMD->getSelector();
+ bool isClassMethod = !OMD->isInstanceMethod();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
+ std::string FunctionName = SymbolNameForMethod(ClassName, CategoryName,
+ MethodName, isClassMethod);
+
+ llvm::Function *Method
+ = llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ FunctionName,
+ &TheModule);
+ return Method;
+}
+
+llvm::Constant *CGObjCGNU::GetPropertyGetFunction() {
+ return GetPropertyFn;
+}
+
+llvm::Constant *CGObjCGNU::GetPropertySetFunction() {
+ return SetPropertyFn;
+}
+
+llvm::Constant *CGObjCGNU::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return 0;
+}
+
+llvm::Constant *CGObjCGNU::GetGetStructFunction() {
+ return GetStructPropertyFn;
+}
+llvm::Constant *CGObjCGNU::GetSetStructFunction() {
+ return SetStructPropertyFn;
+}
+llvm::Constant *CGObjCGNU::GetCppAtomicObjectFunction() {
+ return 0;
+}
+
+llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
+ return EnumerationMutationFn;
+}
+
+void CGObjCGNU::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ EmitAtSynchronizedStmt(CGF, S, SyncEnterFn, SyncExitFn);
+}
+
+
+void CGObjCGNU::EmitTryStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ // Unlike the Apple non-fragile runtimes, which also uses
+ // unwind-based zero cost exceptions, the GNU Objective C runtime's
+ // EH support isn't a veneer over C++ EH. Instead, exception
+ // objects are created by __objc_exception_throw and destroyed by
+ // the personality function; this avoids the need for bracketing
+ // catch handlers with calls to __blah_begin_catch/__blah_end_catch
+ // (or even _Unwind_DeleteException), but probably doesn't
+ // interoperate very well with foreign exceptions.
+ //
+ // In Objective-C++ mode, we actually emit something equivalent to the C++
+ // exception handler.
+ EmitTryCatchStmt(CGF, S, EnterCatchFn, ExitCatchFn, ExceptionReThrowFn);
+ return ;
+}
+
+void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ ExceptionAsObject = Exception;
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+ ExceptionAsObject = CGF.Builder.CreateBitCast(ExceptionAsObject, IdTy);
+
+ // Note: This may have to be an invoke, if we want to support constructs like:
+ // @try {
+ // @throw(obj);
+ // }
+ // @catch(id) ...
+ //
+ // This is effectively turning @throw into an incredibly-expensive goto, but
+ // it may happen as a result of inlining followed by missed optimizations, or
+ // as a result of stupidity.
+ llvm::BasicBlock *UnwindBB = CGF.getInvokeDest();
+ if (!UnwindBB) {
+ CGF.Builder.CreateCall(ExceptionThrowFn, ExceptionAsObject);
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateInvoke(ExceptionThrowFn, UnwindBB, UnwindBB,
+ ExceptionAsObject);
+ }
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ CGBuilderTy B = CGF.Builder;
+ AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
+ return B.CreateCall(WeakReadFn, AddrWeakObj);
+}
+
+void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(WeakAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ if (!threadlocal)
+ B.CreateCall2(GlobalAssignFn, src, dst);
+ else
+ // FIXME. Add threadloca assign API
+ llvm_unreachable("EmitObjCGlobalAssign - Threal Local API NYI");
+}
+
+void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, IdTy);
+ B.CreateCall3(IvarAssignFn, src, dst, ivarOffset);
+}
+
+void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ CGBuilderTy B = CGF.Builder;
+ src = EnforceType(B, src, IdTy);
+ dst = EnforceType(B, dst, PtrToIdTy);
+ B.CreateCall2(StrongCastAssignFn, src, dst);
+}
+
+void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) {
+ CGBuilderTy B = CGF.Builder;
+ DestPtr = EnforceType(B, DestPtr, PtrTy);
+ SrcPtr = EnforceType(B, SrcPtr, PtrTy);
+
+ B.CreateCall3(MemMoveFn, DestPtr, SrcPtr, Size);
+}
+
+llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const std::string Name = "__objc_ivar_offset_" + ID->getNameAsString()
+ + '.' + Ivar->getNameAsString();
+ // Emit the variable and initialize it with what we think the correct value
+ // is. This allows code compiled with non-fragile ivars to work correctly
+ // when linked against code which isn't (most of the time).
+ llvm::GlobalVariable *IvarOffsetPointer = TheModule.getNamedGlobal(Name);
+ if (!IvarOffsetPointer) {
+ // This will cause a run-time crash if we accidentally use it. A value of
+ // 0 would seem more sensible, but will silently overwrite the isa pointer
+ // causing a great deal of confusion.
+ uint64_t Offset = -1;
+ // We can't call ComputeIvarBaseOffset() here if we have the
+ // implementation, because it will create an invalid ASTRecordLayout object
+ // that we are then stuck with forever, so we only initialize the ivar
+ // offset variable with a guess if we only have the interface. The
+ // initializer will be reset later anyway, when we are generating the class
+ // description.
+ if (!CGM.getContext().getObjCImplementation(
+ const_cast<ObjCInterfaceDecl *>(ID)))
+ Offset = ComputeIvarBaseOffset(CGM, ID, Ivar);
+
+ llvm::ConstantInt *OffsetGuess = llvm::ConstantInt::get(Int32Ty, Offset,
+ /*isSigned*/true);
+ // Don't emit the guess in non-PIC code because the linker will not be able
+ // to replace it with the real version for a library. In non-PIC code you
+ // must compile with the fragile ABI if you want to use ivars from a
+ // GCC-compiled class.
+ if (CGM.getLangOpts().PICLevel || CGM.getLangOpts().PIELevel) {
+ llvm::GlobalVariable *IvarOffsetGV = new llvm::GlobalVariable(TheModule,
+ Int32Ty, false,
+ llvm::GlobalValue::PrivateLinkage, OffsetGuess, Name+".guess");
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ IvarOffsetGV->getType(), false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ IvarOffsetGV, Name);
+ } else {
+ IvarOffsetPointer = new llvm::GlobalVariable(TheModule,
+ llvm::Type::getInt32PtrTy(VMContext), false,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
+ }
+ return IvarOffsetPointer;
+}
+
+LValue CGObjCGNU::EmitObjCValueForIvar(CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+static const ObjCInterfaceDecl *FindIvarInterface(ASTContext &Context,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *OIVD) {
+ for (const ObjCIvarDecl *next = OID->all_declared_ivar_begin(); next;
+ next = next->getNextIvar()) {
+ if (OIVD == next)
+ return OID;
+ }
+
+ // Otherwise check in the super class.
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return FindIvarInterface(Context, Super, OIVD);
+
+ return 0;
+}
+
+llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ if (CGM.getLangOpts().ObjCNonFragileABI) {
+ Interface = FindIvarInterface(CGM.getContext(), Interface, Ivar);
+ if (RuntimeVersion < 10)
+ return CGF.Builder.CreateZExtOrBitCast(
+ CGF.Builder.CreateLoad(CGF.Builder.CreateLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar), false, "ivar")),
+ PtrDiffTy);
+ std::string name = "__objc_ivar_offset_value_" +
+ Interface->getNameAsString() +"." + Ivar->getNameAsString();
+ llvm::Value *Offset = TheModule.getGlobalVariable(name);
+ if (!Offset)
+ Offset = new llvm::GlobalVariable(TheModule, IntTy,
+ false, llvm::GlobalValue::LinkOnceAnyLinkage,
+ llvm::Constant::getNullValue(IntTy), name);
+ Offset = CGF.Builder.CreateLoad(Offset);
+ if (Offset->getType() != PtrDiffTy)
+ Offset = CGF.Builder.CreateZExtOrBitCast(Offset, PtrDiffTy);
+ return Offset;
+ }
+ uint64_t Offset = ComputeIvarBaseOffset(CGF.CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(PtrDiffTy, Offset, /*isSigned*/true);
+}
+
+CGObjCRuntime *
+clang::CodeGen::CreateGNUObjCRuntime(CodeGenModule &CGM) {
+ if (CGM.getLangOpts().ObjCNonFragileABI)
+ return new CGObjCGNUstep(CGM);
+ return new CGObjCGCC(CGM);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
new file mode 100644
index 0000000..e5246f1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCMac.cpp
@@ -0,0 +1,6373 @@
+//===------- CGObjCMac.cpp - Interface to Apple Objective-C Runtime -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides Objective-C code generation targeting the Apple runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGBlocks.h"
+#include "CGCleanup.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+
+#include "llvm/InlineAsm.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+// FIXME: We should find a nicer way to make the labels for metadata, string
+// concatenation is lame.
+
+class ObjCCommonTypesHelper {
+protected:
+ llvm::LLVMContext &VMContext;
+
+private:
+ // The types of these functions don't really matter because we
+ // should always bitcast before calling them.
+
+ /// id objc_msgSend (id, SEL, ...)
+ ///
+ /// The default messenger, used for sends whose ABI is unchanged from
+ /// the all-integer/pointer case.
+ llvm::Constant *getMessageSendFn() const {
+ // Add the non-lazy-bind attribute, since objc_msgSend is likely to
+ // be called a lot.
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend",
+ llvm::Attribute::NonLazyBind);
+ }
+
+ /// void objc_msgSend_stret (id, SEL, ...)
+ ///
+ /// The messenger used when the return value is an aggregate returned
+ /// by indirect reference in the first argument, and therefore the
+ /// self and selector parameters are shifted over by one.
+ llvm::Constant *getMessageSendStretFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy,
+ params, true),
+ "objc_msgSend_stret");
+
+ }
+
+ /// [double | long double] objc_msgSend_fpret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned on the x87
+ /// floating-point stack; without a special entrypoint, the nil case
+ /// would be unbalanced.
+ llvm::Constant *getMessageSendFpretFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.DoubleTy,
+ params, true),
+ "objc_msgSend_fpret");
+
+ }
+
+ /// _Complex long double objc_msgSend_fp2ret(id self, SEL op, ...)
+ ///
+ /// The messenger used when the return value is returned in two values on the
+ /// x87 floating point stack; without a special entrypoint, the nil case
+ /// would be unbalanced. Only used on 64-bit X86.
+ llvm::Constant *getMessageSendFp2retFn() const {
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext);
+ llvm::Type *resultType =
+ llvm::StructType::get(longDoubleType, longDoubleType, NULL);
+
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(resultType,
+ params, true),
+ "objc_msgSend_fp2ret");
+ }
+
+ /// id objc_msgSendSuper(struct objc_super *super, SEL op, ...)
+ ///
+ /// The messenger used for super calls, which have different dispatch
+ /// semantics. The class passed is the superclass of the current
+ /// class.
+ llvm::Constant *getMessageSendSuperFn() const {
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper");
+ }
+
+ /// id objc_msgSendSuper2(struct objc_super *super, SEL op, ...)
+ ///
+ /// A slightly different messenger used for super calls. The class
+ /// passed is the current class.
+ llvm::Constant *getMessageSendSuperFn2() const {
+ llvm::Type *params[] = { SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2");
+ }
+
+ /// void objc_msgSendSuper_stret(void *stretAddr, struct objc_super *super,
+ /// SEL op, ...)
+ ///
+ /// The messenger used for super calls which return an aggregate indirectly.
+ llvm::Constant *getMessageSendSuperStretFn() const {
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, true),
+ "objc_msgSendSuper_stret");
+ }
+
+ /// void objc_msgSendSuper2_stret(void * stretAddr, struct objc_super *super,
+ /// SEL op, ...)
+ ///
+ /// objc_msgSendSuper_stret with the super2 semantics.
+ llvm::Constant *getMessageSendSuperStretFn2() const {
+ llvm::Type *params[] = { Int8PtrTy, SuperPtrTy, SelectorPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, true),
+ "objc_msgSendSuper2_stret");
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn();
+ }
+
+ llvm::Constant *getMessageSendSuperFpretFn2() const {
+ // There is no objc_msgSendSuper_fpret? How can that work?
+ return getMessageSendSuperFn2();
+ }
+
+protected:
+ CodeGen::CodeGenModule &CGM;
+
+public:
+ llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
+ llvm::Type *Int8PtrTy, *Int8PtrPtrTy;
+
+ /// ObjectPtrTy - LLVM type for object handles (typeof(id))
+ llvm::Type *ObjectPtrTy;
+
+ /// PtrObjectPtrTy - LLVM type for id *
+ llvm::Type *PtrObjectPtrTy;
+
+ /// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
+ llvm::Type *SelectorPtrTy;
+
+private:
+ /// ProtocolPtrTy - LLVM type for external protocol handles
+ /// (typeof(Protocol))
+ llvm::Type *ExternalProtocolPtrTy;
+
+public:
+ llvm::Type *getExternalProtocolPtrTy() {
+ if (!ExternalProtocolPtrTy) {
+ // FIXME: It would be nice to unify this with the opaque type, so that the
+ // IR comes out a bit cleaner.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ llvm::Type *T = Types.ConvertType(Ctx.getObjCProtoType());
+ ExternalProtocolPtrTy = llvm::PointerType::getUnqual(T);
+ }
+
+ return ExternalProtocolPtrTy;
+ }
+
+ // SuperCTy - clang type for struct objc_super.
+ QualType SuperCTy;
+ // SuperPtrCTy - clang type for struct objc_super *.
+ QualType SuperPtrCTy;
+
+ /// SuperTy - LLVM type for struct objc_super.
+ llvm::StructType *SuperTy;
+ /// SuperPtrTy - LLVM type for struct objc_super *.
+ llvm::Type *SuperPtrTy;
+
+ /// PropertyTy - LLVM type for struct objc_property (struct _prop_t
+ /// in GCC parlance).
+ llvm::StructType *PropertyTy;
+
+ /// PropertyListTy - LLVM type for struct objc_property_list
+ /// (_prop_list_t in GCC parlance).
+ llvm::StructType *PropertyListTy;
+ /// PropertyListPtrTy - LLVM type for struct objc_property_list*.
+ llvm::Type *PropertyListPtrTy;
+
+ // MethodTy - LLVM type for struct objc_method.
+ llvm::StructType *MethodTy;
+
+ /// CacheTy - LLVM type for struct objc_cache.
+ llvm::Type *CacheTy;
+ /// CachePtrTy - LLVM type for struct objc_cache *.
+ llvm::Type *CachePtrTy;
+
+ llvm::Constant *getGetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // id objc_getProperty (id, SEL, ptrdiff_t, bool)
+ SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(IdType, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
+ }
+
+ llvm::Constant *getSetPropertyFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
+ SmallVector<CanQualType,6> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ Params.push_back(IdType);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
+ }
+
+ llvm::Constant *getOptimizedSetPropertyFn(bool atomic, bool copy) {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_setProperty_atomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_atomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+ // void objc_setProperty_nonatomic_copy(id self, SEL _cmd,
+ // id newValue, ptrdiff_t offset);
+
+ SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
+ Params.push_back(IdType);
+ Params.push_back(SelType);
+ Params.push_back(IdType);
+ Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ const char *name;
+ if (atomic && copy)
+ name = "objc_setProperty_atomic_copy";
+ else if (atomic && !copy)
+ name = "objc_setProperty_atomic";
+ else if (!atomic && copy)
+ name = "objc_setProperty_nonatomic_copy";
+ else
+ name = "objc_setProperty_nonatomic";
+
+ return CGM.CreateRuntimeFunction(FTy, name);
+ }
+
+ llvm::Constant *getCopyStructFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_copyStruct (void *, const void *, size_t, bool, bool)
+ SmallVector<CanQualType,5> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.LongTy);
+ Params.push_back(Ctx.BoolTy);
+ Params.push_back(Ctx.BoolTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
+ }
+
+ /// This routine declares and returns address of:
+ /// void objc_copyCppObjectAtomic(
+ /// void *dest, const void *src,
+ /// void (*copyHelper) (void *dest, const void *source));
+ llvm::Constant *getCppAtomicObjectFunction() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ /// void objc_copyCppObjectAtomic(void *dest, const void *src, void *helper);
+ SmallVector<CanQualType,3> Params;
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ Params.push_back(Ctx.VoidPtrTy);
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_copyCppObjectAtomic");
+ }
+
+ llvm::Constant *getEnumerationMutationFn() {
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+ // void objc_enumerationMutation (id)
+ SmallVector<CanQualType,1> Params;
+ Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
+ llvm::FunctionType *FTy =
+ Types.GetFunctionType(Types.arrangeFunctionType(Ctx.VoidTy, Params,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All));
+ return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
+ }
+
+ /// GcReadWeakFn -- LLVM objc_read_weak (id *src) function.
+ llvm::Constant *getGcReadWeakFn() {
+ // id objc_read_weak (id *)
+ llvm::Type *args[] = { ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_read_weak");
+ }
+
+ /// GcAssignWeakFn -- LLVM objc_assign_weak function.
+ llvm::Constant *getGcAssignWeakFn() {
+ // id objc_assign_weak (id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_weak");
+ }
+
+ /// GcAssignGlobalFn -- LLVM objc_assign_global function.
+ llvm::Constant *getGcAssignGlobalFn() {
+ // id objc_assign_global(id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_global");
+ }
+
+ /// GcAssignThreadLocalFn -- LLVM objc_assign_threadlocal function.
+ llvm::Constant *getGcAssignThreadLocalFn() {
+ // id objc_assign_threadlocal(id src, id * dest)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_threadlocal");
+ }
+
+ /// GcAssignIvarFn -- LLVM objc_assign_ivar function.
+ llvm::Constant *getGcAssignIvarFn() {
+ // id objc_assign_ivar(id, id *, ptrdiff_t)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo(),
+ CGM.PtrDiffTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_ivar");
+ }
+
+ /// GcMemmoveCollectableFn -- LLVM objc_memmove_collectable function.
+ llvm::Constant *GcMemmoveCollectableFn() {
+ // void *objc_memmove_collectable(void *dst, const void *src, size_t size)
+ llvm::Type *args[] = { Int8PtrTy, Int8PtrTy, LongTy };
+ llvm::FunctionType *FTy = llvm::FunctionType::get(Int8PtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_memmove_collectable");
+ }
+
+ /// GcAssignStrongCastFn -- LLVM objc_assign_strongCast function.
+ llvm::Constant *getGcAssignStrongCastFn() {
+ // id objc_assign_strongCast(id, id *)
+ llvm::Type *args[] = { ObjectPtrTy, ObjectPtrTy->getPointerTo() };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ObjectPtrTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_assign_strongCast");
+ }
+
+ /// ExceptionThrowFn - LLVM objc_exception_throw function.
+ llvm::Constant *getExceptionThrowFn() {
+ // void objc_exception_throw(id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_throw");
+ }
+
+ /// ExceptionRethrowFn - LLVM objc_exception_rethrow function.
+ llvm::Constant *getExceptionRethrowFn() {
+ // void objc_exception_rethrow(void)
+ llvm::FunctionType *FTy = llvm::FunctionType::get(CGM.VoidTy, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_exception_rethrow");
+ }
+
+ /// SyncEnterFn - LLVM object_sync_enter function.
+ llvm::Constant *getSyncEnterFn() {
+ // void objc_sync_enter (id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_enter");
+ }
+
+ /// SyncExitFn - LLVM object_sync_exit function.
+ llvm::Constant *getSyncExitFn() {
+ // void objc_sync_exit (id)
+ llvm::Type *args[] = { ObjectPtrTy };
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, args, false);
+ return CGM.CreateRuntimeFunction(FTy, "objc_sync_exit");
+ }
+
+ llvm::Constant *getSendFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFn();
+ }
+
+ llvm::Constant *getSendStretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendStretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperStretFn2() : getMessageSendStretFn();
+ }
+
+ llvm::Constant *getSendFpretFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFpretFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFpretFn2() : getMessageSendFpretFn();
+ }
+
+ llvm::Constant *getSendFp2retFn(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn() : getMessageSendFp2retFn();
+ }
+
+ llvm::Constant *getSendFp2RetFn2(bool IsSuper) const {
+ return IsSuper ? getMessageSendSuperFn2() : getMessageSendFp2retFn();
+ }
+
+ ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCCommonTypesHelper(){}
+};
+
+/// ObjCTypesHelper - Helper class that encapsulates lazy
+/// construction of varies types used during ObjC generation.
+class ObjCTypesHelper : public ObjCCommonTypesHelper {
+public:
+ /// SymtabTy - LLVM type for struct objc_symtab.
+ llvm::StructType *SymtabTy;
+ /// SymtabPtrTy - LLVM type for struct objc_symtab *.
+ llvm::Type *SymtabPtrTy;
+ /// ModuleTy - LLVM type for struct objc_module.
+ llvm::StructType *ModuleTy;
+
+ /// ProtocolTy - LLVM type for struct objc_protocol.
+ llvm::StructType *ProtocolTy;
+ /// ProtocolPtrTy - LLVM type for struct objc_protocol *.
+ llvm::Type *ProtocolPtrTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension.
+ llvm::StructType *ProtocolExtensionTy;
+ /// ProtocolExtensionTy - LLVM type for struct
+ /// objc_protocol_extension *.
+ llvm::Type *ProtocolExtensionPtrTy;
+ /// MethodDescriptionTy - LLVM type for struct
+ /// objc_method_description.
+ llvm::StructType *MethodDescriptionTy;
+ /// MethodDescriptionListTy - LLVM type for struct
+ /// objc_method_description_list.
+ llvm::StructType *MethodDescriptionListTy;
+ /// MethodDescriptionListPtrTy - LLVM type for struct
+ /// objc_method_description_list *.
+ llvm::Type *MethodDescriptionListPtrTy;
+ /// ProtocolListTy - LLVM type for struct objc_property_list.
+ llvm::StructType *ProtocolListTy;
+ /// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
+ llvm::Type *ProtocolListPtrTy;
+ /// CategoryTy - LLVM type for struct objc_category.
+ llvm::StructType *CategoryTy;
+ /// ClassTy - LLVM type for struct objc_class.
+ llvm::StructType *ClassTy;
+ /// ClassPtrTy - LLVM type for struct objc_class *.
+ llvm::Type *ClassPtrTy;
+ /// ClassExtensionTy - LLVM type for struct objc_class_ext.
+ llvm::StructType *ClassExtensionTy;
+ /// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
+ llvm::Type *ClassExtensionPtrTy;
+ // IvarTy - LLVM type for struct objc_ivar.
+ llvm::StructType *IvarTy;
+ /// IvarListTy - LLVM type for struct objc_ivar_list.
+ llvm::Type *IvarListTy;
+ /// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
+ llvm::Type *IvarListPtrTy;
+ /// MethodListTy - LLVM type for struct objc_method_list.
+ llvm::Type *MethodListTy;
+ /// MethodListPtrTy - LLVM type for struct objc_method_list *.
+ llvm::Type *MethodListPtrTy;
+
+ /// ExceptionDataTy - LLVM type for struct _objc_exception_data.
+ llvm::Type *ExceptionDataTy;
+
+ /// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
+ llvm::Constant *getExceptionTryEnterFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, false),
+ "objc_exception_try_enter");
+ }
+
+ /// ExceptionTryExitFn - LLVM objc_exception_try_exit function.
+ llvm::Constant *getExceptionTryExitFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.VoidTy, params, false),
+ "objc_exception_try_exit");
+ }
+
+ /// ExceptionExtractFn - LLVM objc_exception_extract function.
+ llvm::Constant *getExceptionExtractFn() {
+ llvm::Type *params[] = { ExceptionDataTy->getPointerTo() };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, false),
+ "objc_exception_extract");
+ }
+
+ /// ExceptionMatchFn - LLVM objc_exception_match function.
+ llvm::Constant *getExceptionMatchFn() {
+ llvm::Type *params[] = { ClassPtrTy, ObjectPtrTy };
+ return CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(CGM.Int32Ty, params, false),
+ "objc_exception_match");
+
+ }
+
+ /// SetJmpFn - LLVM _setjmp function.
+ llvm::Constant *getSetJmpFn() {
+ // This is specifically the prototype for x86.
+ llvm::Type *params[] = { CGM.Int32Ty->getPointerTo() };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty,
+ params, false),
+ "_setjmp",
+ llvm::Attribute::ReturnsTwice);
+ }
+
+public:
+ ObjCTypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCTypesHelper() {}
+};
+
+/// ObjCNonFragileABITypesHelper - will have all types needed by objective-c's
+/// modern abi
+class ObjCNonFragileABITypesHelper : public ObjCCommonTypesHelper {
+public:
+
+ // MethodListnfABITy - LLVM for struct _method_list_t
+ llvm::StructType *MethodListnfABITy;
+
+ // MethodListnfABIPtrTy - LLVM for struct _method_list_t*
+ llvm::Type *MethodListnfABIPtrTy;
+
+ // ProtocolnfABITy = LLVM for struct _protocol_t
+ llvm::StructType *ProtocolnfABITy;
+
+ // ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
+ llvm::Type *ProtocolnfABIPtrTy;
+
+ // ProtocolListnfABITy - LLVM for struct _objc_protocol_list
+ llvm::StructType *ProtocolListnfABITy;
+
+ // ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
+ llvm::Type *ProtocolListnfABIPtrTy;
+
+ // ClassnfABITy - LLVM for struct _class_t
+ llvm::StructType *ClassnfABITy;
+
+ // ClassnfABIPtrTy - LLVM for struct _class_t*
+ llvm::Type *ClassnfABIPtrTy;
+
+ // IvarnfABITy - LLVM for struct _ivar_t
+ llvm::StructType *IvarnfABITy;
+
+ // IvarListnfABITy - LLVM for struct _ivar_list_t
+ llvm::StructType *IvarListnfABITy;
+
+ // IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
+ llvm::Type *IvarListnfABIPtrTy;
+
+ // ClassRonfABITy - LLVM for struct _class_ro_t
+ llvm::StructType *ClassRonfABITy;
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ llvm::Type *ImpnfABITy;
+
+ // CategorynfABITy - LLVM for struct _category_t
+ llvm::StructType *CategorynfABITy;
+
+ // New types for nonfragile abi messaging.
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+ llvm::StructType *MessageRefTy;
+ // MessageRefCTy - clang type for struct _message_ref_t
+ QualType MessageRefCTy;
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ llvm::Type *MessageRefPtrTy;
+ // MessageRefCPtrTy - clang type for struct _message_ref_t*
+ QualType MessageRefCPtrTy;
+
+ // MessengerTy - Type of the messenger (shown as IMP above)
+ llvm::FunctionType *MessengerTy;
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ llvm::StructType *SuperMessageRefTy;
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ llvm::Type *SuperMessageRefPtrTy;
+
+ llvm::Constant *getMessageSendFixupFn() {
+ // id objc_msgSend_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_fixup");
+ }
+
+ llvm::Constant *getMessageSendFpretFixupFn() {
+ // id objc_msgSend_fpret_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_fpret_fixup");
+ }
+
+ llvm::Constant *getMessageSendStretFixupFn() {
+ // id objc_msgSend_stret_fixup(id, struct message_ref_t*, ...)
+ llvm::Type *params[] = { ObjectPtrTy, MessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSend_stret_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2FixupFn() {
+ // id objc_msgSendSuper2_fixup (struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2_fixup");
+ }
+
+ llvm::Constant *getMessageSendSuper2StretFixupFn() {
+ // id objc_msgSendSuper2_stret_fixup(struct objc_super *,
+ // struct _super_message_ref_t*, ...)
+ llvm::Type *params[] = { SuperPtrTy, SuperMessageRefPtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(ObjectPtrTy,
+ params, true),
+ "objc_msgSendSuper2_stret_fixup");
+ }
+
+ llvm::Constant *getObjCEndCatchFn() {
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.VoidTy, false),
+ "objc_end_catch");
+
+ }
+
+ llvm::Constant *getObjCBeginCatchFn() {
+ llvm::Type *params[] = { Int8PtrTy };
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(Int8PtrTy,
+ params, false),
+ "objc_begin_catch");
+ }
+
+ llvm::StructType *EHTypeTy;
+ llvm::Type *EHTypePtrTy;
+
+ ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
+ ~ObjCNonFragileABITypesHelper(){}
+};
+
+class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
+public:
+ // FIXME - accessibility
+ class GC_IVAR {
+ public:
+ unsigned ivar_bytepos;
+ unsigned ivar_size;
+ GC_IVAR(unsigned bytepos = 0, unsigned size = 0)
+ : ivar_bytepos(bytepos), ivar_size(size) {}
+
+ // Allow sorting based on byte pos.
+ bool operator<(const GC_IVAR &b) const {
+ return ivar_bytepos < b.ivar_bytepos;
+ }
+ };
+
+ class SKIP_SCAN {
+ public:
+ unsigned skip;
+ unsigned scan;
+ SKIP_SCAN(unsigned _skip = 0, unsigned _scan = 0)
+ : skip(_skip), scan(_scan) {}
+ };
+
+protected:
+ llvm::LLVMContext &VMContext;
+ // FIXME! May not be needing this after all.
+ unsigned ObjCABI;
+
+ // gc ivar layout bitmap calculation helper caches.
+ SmallVector<GC_IVAR, 16> SkipIvars;
+ SmallVector<GC_IVAR, 16> IvarsInfo;
+
+ /// LazySymbols - Symbols to generate a lazy reference for. See
+ /// DefinedSymbols and FinishModule().
+ llvm::SetVector<IdentifierInfo*> LazySymbols;
+
+ /// DefinedSymbols - External symbols which are defined by this
+ /// module. The symbols in this list and LazySymbols are used to add
+ /// special linker symbols which ensure that Objective-C modules are
+ /// linked properly.
+ llvm::SetVector<IdentifierInfo*> DefinedSymbols;
+
+ /// ClassNames - uniqued class names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassNames;
+
+ /// MethodVarNames - uniqued method variable names.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
+
+ /// DefinedCategoryNames - list of category names in form Class_Category.
+ llvm::SetVector<std::string> DefinedCategoryNames;
+
+ /// MethodVarTypes - uniqued method type signatures. We have to use
+ /// a StringMap here because have no other unique reference.
+ llvm::StringMap<llvm::GlobalVariable*> MethodVarTypes;
+
+ /// MethodDefinitions - map of methods which have been defined in
+ /// this translation unit.
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*> MethodDefinitions;
+
+ /// PropertyNames - uniqued method variable names.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> PropertyNames;
+
+ /// ClassReferences - uniqued class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> ClassReferences;
+
+ /// SelectorReferences - uniqued selector references.
+ llvm::DenseMap<Selector, llvm::GlobalVariable*> SelectorReferences;
+
+ /// Protocols - Protocols for which an objc_protocol structure has
+ /// been emitted. Forward declarations are handled by creating an
+ /// empty structure whose initializer is filled in when/if defined.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> Protocols;
+
+ /// DefinedProtocols - Protocols which have actually been
+ /// defined. We should not need this, see FIXME in GenerateProtocol.
+ llvm::DenseSet<IdentifierInfo*> DefinedProtocols;
+
+ /// DefinedClasses - List of defined classes.
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedClasses;
+
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyClasses;
+
+ /// DefinedCategories - List of defined categories.
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedCategories;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ llvm::SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories;
+
+ /// GetNameForMethod - Return a name for the given method.
+ /// \param[out] NameOut - The return value.
+ void GetNameForMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD,
+ SmallVectorImpl<char> &NameOut);
+
+ /// GetMethodVarName - Return a unique constant for the given
+ /// selector's name. The return value has type char *.
+ llvm::Constant *GetMethodVarName(Selector Sel);
+ llvm::Constant *GetMethodVarName(IdentifierInfo *Ident);
+
+ /// GetMethodVarType - Return a unique constant for the given
+ /// method's type encoding string. The return value has type char *.
+
+ // FIXME: This is a horrible name.
+ llvm::Constant *GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended = false);
+ llvm::Constant *GetMethodVarType(const FieldDecl *D);
+
+ /// GetPropertyName - Return a unique constant for the given
+ /// name. The return value has type char *.
+ llvm::Constant *GetPropertyName(IdentifierInfo *Ident);
+
+ // FIXME: This can be dropped once string functions are unified.
+ llvm::Constant *GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container);
+
+ /// GetClassName - Return a unique constant for the given selector's
+ /// name. The return value has type char *.
+ llvm::Constant *GetClassName(IdentifierInfo *Ident);
+
+ llvm::Function *GetMethodDefinition(const ObjCMethodDecl *MD);
+
+ /// BuildIvarLayout - Builds ivar layout bitmap for the class
+ /// implementation for the __strong or __weak case.
+ ///
+ llvm::Constant *BuildIvarLayout(const ObjCImplementationDecl *OI,
+ bool ForStrongLayout);
+
+ llvm::Constant *BuildIvarLayoutBitmap(std::string &BitMap);
+
+ void BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+ void BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion);
+
+ /// GetIvarLayoutName - Returns a unique constant for the given
+ /// ivar layout bitmap.
+ llvm::Constant *GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitPropertyList - Emit the given property list. The return
+ /// value has type PropertyListPtrTy.
+ llvm::Constant *EmitPropertyList(Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitProtocolMethodTypes - Generate the array of extended method type
+ /// strings. The return value has type Int8PtrPtrTy.
+ llvm::Constant *EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// PushProtocolProperties - Push protocol's property on the input stack.
+ void PushProtocolProperties(
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
+ llvm::SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// GetProtocolRef - Return a reference to the internal protocol
+ /// description, creating an empty one if it has not been
+ /// defined. The return value has type ProtocolPtrTy.
+ llvm::Constant *GetProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// CreateMetadataVar - Create a global variable with internal
+ /// linkage for use by the Objective-C runtime.
+ ///
+ /// This is a convenience wrapper which not only creates the
+ /// variable, but also sets the section and alignment and adds the
+ /// global to the "llvm.used" list.
+ ///
+ /// \param Name - The variable name.
+ /// \param Init - The variable initializer; this is also used to
+ /// define the type of the variable.
+ /// \param Section - The section the variable should go into, or 0.
+ /// \param Align - The alignment for the variable, or 0.
+ /// \param AddToUsed - Whether the variable should be added to
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(Twine Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed);
+
+ CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *OMD,
+ const ObjCCommonTypesHelper &ObjCTypes);
+
+ /// EmitImageInfo - Emit the image info marker used to encode some module
+ /// level information.
+ void EmitImageInfo();
+
+public:
+ CGObjCCommonMac(CodeGen::CodeGenModule &cgm) :
+ CGObjCRuntime(cgm), VMContext(cgm.getLLVMContext()) { }
+
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *SL);
+
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD=0);
+
+ virtual void GenerateProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD)=0;
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo);
+
+};
+
+class CGObjCMac : public CGObjCCommonMac {
+private:
+ ObjCTypesHelper ObjCTypes;
+
+ /// EmitModuleInfo - Another marker encoding module level
+ /// information.
+ void EmitModuleInfo();
+
+ /// EmitModuleSymols - Emit module symbols, the list of defined
+ /// classes and categories. The result has type SymtabPtrTy.
+ llvm::Constant *EmitModuleSymbols();
+
+ /// FinishModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishModule();
+
+ /// EmitClassExtension - Generate the class extension structure used
+ /// to store the weak ivar layout and properties. The return value
+ /// has type ClassExtensionPtrTy.
+ llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+
+ /// EmitSuperClassRef - Emits reference to class's main metadata class.
+ llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass);
+
+ /// EmitMetaClass - Emit a forward reference to the class structure
+ /// for the metaclass of the given interface. The return value has
+ /// type ClassPtrTy.
+ llvm::Constant *EmitMetaClassRef(const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClass - Emit a class structure for the metaclass of the
+ /// given implementation. The return value has type ClassPtrTy.
+ llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ ArrayRef<llvm::Constant*> Methods);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListPtrTy.
+ llvm::Constant *EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+
+ /// EmitMethodDescList - Emit a method description list for a list of
+ /// method declarations.
+ /// - TypeName: The name for the type containing the methods.
+ /// - IsProtocol: True iff these methods are for a protocol.
+ /// - ClassMethds: True iff these are class methods.
+ /// - Required: When true, only "required" methods are
+ /// listed. Similarly, when false only "optional" methods are
+ /// listed. For classes this should always be true.
+ /// - begin, end: The method list to output.
+ ///
+ /// The return value has type MethodDescriptionListPtrTy.
+ llvm::Constant *EmitMethodDescList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolExtension - Generate the protocol extension
+ /// structure used to store optional instance and class methods, and
+ /// protocol properties. The return value has type
+ /// ProtocolExtensionPtrTy.
+ llvm::Constant *
+ EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
+
+public:
+ CGObjCMac(CodeGen::CodeGenModule &cgm);
+
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval = false);
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetPropertyGetFunction();
+ virtual llvm::Constant *GetPropertySetFunction();
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy);
+ virtual llvm::Constant *GetGetStructFunction();
+ virtual llvm::Constant *GetSetStructFunction();
+ virtual llvm::Constant *GetCppAtomicObjectFunction();
+ virtual llvm::Constant *EnumerationMutationFunction();
+
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ void EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF, const Stmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal = false);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *size);
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) {
+ llvm_unreachable("CGObjCMac::GetClassGlobal");
+ }
+};
+
+class CGObjCNonFragileABIMac : public CGObjCCommonMac {
+private:
+ ObjCNonFragileABITypesHelper ObjCTypes;
+ llvm::GlobalVariable* ObjCEmptyCacheVar;
+ llvm::GlobalVariable* ObjCEmptyVtableVar;
+
+ /// SuperClassReferences - uniqued super class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
+
+ /// MetaClassReferences - uniqued meta class references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> MetaClassReferences;
+
+ /// EHTypeReferences - uniqued class ehtype references.
+ llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> EHTypeReferences;
+
+ /// VTableDispatchMethods - List of methods for which we generate
+ /// vtable-based message dispatch.
+ llvm::DenseSet<Selector> VTableDispatchMethods;
+
+ /// DefinedMetaClasses - List of defined meta-classes.
+ std::vector<llvm::GlobalValue*> DefinedMetaClasses;
+
+ /// isVTableDispatchedSelector - Returns true if SEL is a
+ /// vtable-based selector.
+ bool isVTableDispatchedSelector(Selector Sel);
+
+ /// FinishNonFragileABIModule - Write out global data structures at the end of
+ /// processing a translation unit.
+ void FinishNonFragileABIModule();
+
+ /// AddModuleClassList - Add the given list of class pointers to the
+ /// module with the provided symbol and section names.
+ void AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
+ const char *SymbolName,
+ const char *SectionName);
+
+ llvm::GlobalVariable * BuildClassRoTInitializer(unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID);
+ llvm::GlobalVariable * BuildClassMetaData(std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility);
+
+ llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+
+ llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+
+ /// EmitMethodList - Emit the method list for the given
+ /// implementation. The return value has type MethodListnfABITy.
+ llvm::Constant *EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods);
+ /// EmitIvarList - Emit the ivar list for the given
+ /// implementation. If ForClass is true the list of class ivars
+ /// (i.e. metaclass ivars) is emitted, otherwise the list of
+ /// interface ivars will be emitted. The return value has type
+ /// IvarListnfABIPtrTy.
+ llvm::Constant *EmitIvarList(const ObjCImplementationDecl *ID);
+
+ llvm::Constant *EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int offset);
+
+ /// GetOrEmitProtocol - Get the protocol object for the given
+ /// declaration, emitting it if necessary. The return value has type
+ /// ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocol(const ObjCProtocolDecl *PD);
+
+ /// GetOrEmitProtocolRef - Get a forward reference to the protocol
+ /// object for the given declaration, emitting it if needed. These
+ /// forward references will be filled in with empty bodies if no
+ /// definition is seen. The return value has type ProtocolPtrTy.
+ virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD);
+
+ /// EmitProtocolList - Generate the list of referenced
+ /// protocols. The return value has type ProtocolListPtrTy.
+ llvm::Constant *EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end);
+
+ CodeGen::RValue EmitVTableMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ /// GetClassGlobal - Return the global variable for the Objective-C
+ /// class of the given name.
+ llvm::GlobalVariable *GetClassGlobal(const std::string &Name);
+
+ /// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given class reference.
+ llvm::Value *EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ llvm::Value *EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II);
+
+ llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder);
+
+ /// EmitSuperClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
+ /// for the given super class reference.
+ llvm::Value *EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// EmitMetaClassRef - Return a Value * of the address of _class_t
+ /// meta-data
+ llvm::Value *EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ /// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+ /// the given ivar.
+ ///
+ llvm::GlobalVariable * ObjCIvarOffsetVariable(
+ const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar);
+
+ /// EmitSelector - Return a Value*, of type ObjCTypes.SelectorPtrTy,
+ /// for the given selector.
+ llvm::Value *EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval=false);
+
+ /// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
+ /// interface. The return value has type EHTypePtrTy.
+ llvm::Constant *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition);
+
+ const char *getMetaclassSymbolPrefix() const {
+ return "OBJC_METACLASS_$_";
+ }
+
+ const char *getClassSymbolPrefix() const {
+ return "OBJC_CLASS_$_";
+ }
+
+ void GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize);
+
+ // Shamelessly stolen from Analysis/CFRefCount.cpp
+ Selector GetNullarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(0, &II);
+ }
+
+ Selector GetUnarySelector(const char* name) const {
+ IdentifierInfo* II = &CGM.getContext().Idents.get(name);
+ return CGM.getContext().Selectors.getSelector(1, &II);
+ }
+
+ /// ImplementationIsNonLazy - Check whether the given category or
+ /// class implementation is "non-lazy".
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const;
+
+public:
+ CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
+ // FIXME. All stubs for now!
+ virtual llvm::Function *ModuleInitFunction();
+
+ virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method);
+
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method);
+
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID);
+
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue = false)
+ { return EmitSelector(Builder, Sel, lvalue); }
+
+ /// The NeXT/Apple runtimes do not support typed selectors; just emit an
+ /// untyped one.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method)
+ { return EmitSelector(Builder, Method->getSelector()); }
+
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *CMD);
+
+ virtual void GenerateClass(const ObjCImplementationDecl *ClassDecl);
+
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {}
+
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD);
+
+ virtual llvm::Constant *GetEHType(QualType T);
+
+ virtual llvm::Constant *GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+ }
+ virtual llvm::Constant *GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+ }
+
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+ }
+
+ virtual llvm::Constant *GetSetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+ virtual llvm::Constant *GetGetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+ }
+ virtual llvm::Constant *GetCppAtomicObjectFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+ }
+
+ virtual llvm::Constant *EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+ }
+
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S);
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S);
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S);
+ virtual llvm::Value * EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj);
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst);
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal = false);
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset);
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest);
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *size);
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+};
+
+/// A helper class for performing the null-initialization of a return
+/// value.
+struct NullReturnState {
+ llvm::BasicBlock *NullBB;
+ llvm::BasicBlock *callBB;
+ NullReturnState() : NullBB(0), callBB(0) {}
+
+ void init(CodeGenFunction &CGF, llvm::Value *receiver) {
+ // Make blocks for the null-init and call edges.
+ NullBB = CGF.createBasicBlock("msgSend.nullinit");
+ callBB = CGF.createBasicBlock("msgSend.call");
+
+ // Check for a null receiver and, if there is one, jump to the
+ // null-init test.
+ llvm::Value *isNull = CGF.Builder.CreateIsNull(receiver);
+ CGF.Builder.CreateCondBr(isNull, NullBB, callBB);
+
+ // Otherwise, start performing the call.
+ CGF.EmitBlock(callBB);
+ }
+
+ RValue complete(CodeGenFunction &CGF, RValue result, QualType resultType,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ if (!NullBB) return result;
+
+ llvm::Value *NullInitPtr = 0;
+ if (result.isScalar() && !resultType->isVoidType()) {
+ NullInitPtr = CGF.CreateTempAlloca(result.getScalarVal()->getType());
+ CGF.Builder.CreateStore(result.getScalarVal(), NullInitPtr);
+ }
+
+ // Finish the call path.
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("msgSend.cont");
+ if (CGF.HaveInsertPoint()) CGF.Builder.CreateBr(contBB);
+
+ // Emit the null-init block and perform the null-initialization there.
+ CGF.EmitBlock(NullBB);
+
+ // Release consumed arguments along the null-receiver path.
+ if (Method) {
+ CallArgList::const_iterator I = CallArgs.begin();
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i, ++I) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ RValue RV = I->RV;
+ assert(RV.isScalar() &&
+ "NullReturnState::complete - arg not on object");
+ CGF.EmitARCRelease(RV.getScalarVal(), true);
+ }
+ }
+ }
+
+ if (result.isScalar()) {
+ if (NullInitPtr)
+ CGF.EmitNullInitialization(NullInitPtr, resultType);
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+ return NullInitPtr ? RValue::get(CGF.Builder.CreateLoad(NullInitPtr))
+ : result;
+ }
+
+ if (!resultType->isAnyComplexType()) {
+ assert(result.isAggregate() && "null init of non-aggregate result?");
+ CGF.EmitNullInitialization(result.getAggregateAddr(), resultType);
+ // Jump to the continuation block.
+ CGF.EmitBlock(contBB);
+ return result;
+ }
+
+ // _Complex type
+ // FIXME. Now easy to handle any other scalar type whose result is returned
+ // in memory due to ABI limitations.
+ CGF.EmitBlock(contBB);
+ CodeGenFunction::ComplexPairTy CallCV = result.getComplexVal();
+ llvm::Type *MemberType = CallCV.first->getType();
+ llvm::Constant *ZeroCV = llvm::Constant::getNullValue(MemberType);
+ // Create phi instruction for scalar complex value.
+ llvm::PHINode *PHIReal = CGF.Builder.CreatePHI(MemberType, 2);
+ PHIReal->addIncoming(ZeroCV, NullBB);
+ PHIReal->addIncoming(CallCV.first, callBB);
+ llvm::PHINode *PHIImag = CGF.Builder.CreatePHI(MemberType, 2);
+ PHIImag->addIncoming(ZeroCV, NullBB);
+ PHIImag->addIncoming(CallCV.second, callBB);
+ return RValue::getComplex(PHIReal, PHIImag);
+ }
+};
+
+} // end anonymous namespace
+
+/* *** Helper Functions *** */
+
+/// getConstantGEP() - Help routine to construct simple GEPs.
+static llvm::Constant *getConstantGEP(llvm::LLVMContext &VMContext,
+ llvm::Constant *C,
+ unsigned idx0,
+ unsigned idx1) {
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx0),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), idx1)
+ };
+ return llvm::ConstantExpr::getGetElementPtr(C, Idxs);
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Context, Super);
+ return false;
+}
+
+/* *** CGObjCMac Public Interface *** */
+
+CGObjCMac::CGObjCMac(CodeGen::CodeGenModule &cgm) : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCABI = 1;
+ EmitImageInfo();
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRef(Builder, ID);
+}
+
+/// GetSelector - Return the pointer to the unique'd string for this selector.
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lval) {
+ return EmitSelector(Builder, Sel, lval);
+}
+llvm::Value *CGObjCMac::GetSelector(CGBuilderTy &Builder, const ObjCMethodDecl
+ *Method) {
+ return EmitSelector(Builder, Method->getSelector());
+}
+
+llvm::Constant *CGObjCMac::GetEHType(QualType T) {
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().getObjCIdRedefinitionType(), /*ForEH=*/true);
+ }
+ if (T->isObjCClassType() ||
+ T->isObjCQualifiedClassType()) {
+ return CGM.GetAddrOfRTTIDescriptor(
+ CGM.getContext().getObjCClassRedefinitionType(), /*ForEH=*/true);
+ }
+ if (T->isObjCObjectPointerType())
+ return CGM.GetAddrOfRTTIDescriptor(T, /*ForEH=*/true);
+
+ llvm_unreachable("asking for catch type for ObjC type in fragile runtime");
+}
+
+/// Generate a constant CFString object.
+/*
+ struct __builtin_CFString {
+ const int *isa; // point to __CFConstantStringClassReference
+ int flags;
+ const char *str;
+ long length;
+ };
+*/
+
+/// or Generate a constant NSString object.
+/*
+ struct __builtin_NSString {
+ const int *isa; // point to __NSConstantStringClassReference
+ const char *str;
+ unsigned int length;
+ };
+*/
+
+llvm::Constant *CGObjCCommonMac::GenerateConstantString(
+ const StringLiteral *SL) {
+ return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
+ CGM.GetAddrOfConstantCFString(SL) :
+ CGM.GetAddrOfConstantString(SL));
+}
+
+enum {
+ kCFTaggedObjectID_Integer = (1 << 1) + 1
+};
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to 'super' in a class method defined in a category
+ // implementation requires an odd treatment.
+ // If we are in a class method, we must retrieve the
+ // _metaclass_ for the current class, pointed at by
+ // the class's "isa" pointer. The following assumes that
+ // isa" is the first ivar in a class (which it must be).
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ } else {
+ llvm::Value *MetaClassPtr = EmitMetaClassRef(Class);
+ llvm::Value *SuperPtr = CGF.Builder.CreateStructGEP(MetaClassPtr, 1);
+ llvm::Value *Super = CGF.Builder.CreateLoad(SuperPtr);
+ Target = Super;
+ }
+ }
+ else if (isCategoryImpl)
+ Target = EmitClassRef(CGF.Builder, Class->getSuperClass());
+ else {
+ llvm::Value *ClassPtr = EmitSuperClassRef(Class);
+ ClassPtr = CGF.Builder.CreateStructGEP(ClassPtr, 1);
+ Target = CGF.Builder.CreateLoad(ClassPtr);
+ }
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+ return EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes);
+}
+
+/// Generate code for a message send expression.
+CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes);
+}
+
+CodeGen::RValue
+CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ llvm::Value *Sel,
+ llvm::Value *Arg0,
+ QualType Arg0Ty,
+ bool IsSuper,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ CallArgList ActualArgs;
+ if (!IsSuper)
+ Arg0 = CGF.Builder.CreateBitCast(Arg0, ObjCTypes.ObjectPtrTy);
+ ActualArgs.add(RValue::get(Arg0), Arg0Ty);
+ ActualArgs.add(RValue::get(Sel), CGF.getContext().getObjCSelType());
+ ActualArgs.addFrom(CallArgs);
+
+ // If we're calling a method, use the formal signature.
+ MessageSendInfo MSI = getMessageSendInfo(Method, ResultType, ActualArgs);
+
+ if (Method)
+ assert(CGM.getContext().getCanonicalType(Method->getResultType()) ==
+ CGM.getContext().getCanonicalType(ResultType) &&
+ "Result type mismatch!");
+
+ NullReturnState nullReturn;
+
+ llvm::Constant *Fn = NULL;
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ if (!IsSuper) nullReturn.init(CGF, Arg0);
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendStretFn2(IsSuper)
+ : ObjCTypes.getSendStretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFPRet(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFpretFn2(IsSuper)
+ : ObjCTypes.getSendFpretFn(IsSuper);
+ } else if (CGM.ReturnTypeUsesFP2Ret(ResultType)) {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFp2RetFn2(IsSuper)
+ : ObjCTypes.getSendFp2retFn(IsSuper);
+ } else {
+ Fn = (ObjCABI == 2) ? ObjCTypes.getSendFn2(IsSuper)
+ : ObjCTypes.getSendFn(IsSuper);
+ }
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && Method)
+ for (ObjCMethodDecl::param_const_iterator i = Method->param_begin(),
+ e = Method->param_end(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, Arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
+ Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
+ RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs);
+ return nullReturn.complete(CGF, rvalue, ResultType, CallArgs,
+ requiresnullCheck ? Method : 0);
+}
+
+static Qualifiers::GC GetGCAttrTypeForType(ASTContext &Ctx, QualType FQT) {
+ if (FQT.isObjCGCStrong())
+ return Qualifiers::Strong;
+
+ if (FQT.isObjCGCWeak() || FQT.getObjCLifetime() == Qualifiers::OCL_Weak)
+ return Qualifiers::Weak;
+
+ // check for __unsafe_unretained
+ if (FQT.getObjCLifetime() == Qualifiers::OCL_ExplicitNone)
+ return Qualifiers::GCNone;
+
+ if (FQT->isObjCObjectPointerType() || FQT->isBlockPointerType())
+ return Qualifiers::Strong;
+
+ if (const PointerType *PT = FQT->getAs<PointerType>())
+ return GetGCAttrTypeForType(Ctx, PT->getPointeeType());
+
+ return Qualifiers::GCNone;
+}
+
+llvm::Constant *CGObjCCommonMac::BuildGCBlockLayout(CodeGenModule &CGM,
+ const CGBlockInfo &blockInfo) {
+ llvm::Constant *nullPtr = llvm::Constant::getNullValue(CGM.Int8PtrTy);
+
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount)
+ return nullPtr;
+
+ bool hasUnion = false;
+ SkipIvars.clear();
+ IvarsInfo.clear();
+ unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+
+ // __isa is the first field in block descriptor and must assume by runtime's
+ // convention that it is GC'able.
+ IvarsInfo.push_back(GC_IVAR(0, 1));
+
+ const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+
+ // Calculate the basic layout of the block structure.
+ const llvm::StructLayout *layout =
+ CGM.getTargetData().getStructLayout(blockInfo.StructureType);
+
+ // Ignore the optional 'this' capture: C++ objects are not assumed
+ // to be GC'ed.
+
+ // Walk the captured variables.
+ for (BlockDecl::capture_const_iterator ci = blockDecl->capture_begin(),
+ ce = blockDecl->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ QualType type = variable->getType();
+
+ const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
+
+ // Ignore constant captures.
+ if (capture.isConstant()) continue;
+
+ uint64_t fieldOffset = layout->getElementOffset(capture.getIndex());
+
+ // __block variables are passed by their descriptor address.
+ if (ci->isByRef()) {
+ IvarsInfo.push_back(GC_IVAR(fieldOffset, /*size in words*/ 1));
+ continue;
+ }
+
+ assert(!type->isArrayType() && "array variable should not be caught");
+ if (const RecordType *record = type->getAs<RecordType>()) {
+ BuildAggrIvarRecordLayout(record, fieldOffset, true, hasUnion);
+ continue;
+ }
+
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), type);
+ unsigned fieldSize = CGM.getContext().getTypeSize(type);
+
+ if (GCAttr == Qualifiers::Strong)
+ IvarsInfo.push_back(GC_IVAR(fieldOffset,
+ fieldSize / WordSizeInBits));
+ else if (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak)
+ SkipIvars.push_back(GC_IVAR(fieldOffset,
+ fieldSize / ByteSizeInBits));
+ }
+
+ if (IvarsInfo.empty())
+ return nullPtr;
+
+ // Sort on byte position; captures might not be allocated in order,
+ // and unions can do funny things.
+ llvm::array_pod_sort(IvarsInfo.begin(), IvarsInfo.end());
+ llvm::array_pod_sort(SkipIvars.begin(), SkipIvars.end());
+
+ std::string BitMap;
+ llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n block variable layout for block: ");
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0, e = BitMap.size(); i < e; i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+
+ return C;
+}
+
+llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ return llvm::ConstantExpr::getBitCast(GetProtocolRef(PD),
+ ObjCTypes.getExternalProtocolPtrTy());
+}
+
+void CGObjCCommonMac::GenerateProtocol(const ObjCProtocolDecl *PD) {
+ // FIXME: We shouldn't need this, the protocol decl should contain enough
+ // information to tell us whether this was a declaration or a definition.
+ DefinedProtocols.insert(PD->getIdentifier());
+
+ // If we have generated a forward reference to this protocol, emit
+ // it now. Otherwise do nothing, the protocol objects are lazily
+ // emitted.
+ if (Protocols.count(PD->getIdentifier()))
+ GetOrEmitProtocol(PD);
+}
+
+llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
+ if (DefinedProtocols.count(PD->getIdentifier()))
+ return GetOrEmitProtocol(PD);
+
+ return GetOrEmitProtocolRef(PD);
+}
+
+/*
+// APPLE LOCAL radar 4585769 - Objective-C 1.0 extensions
+struct _objc_protocol {
+struct _objc_protocol_extension *isa;
+char *protocol_name;
+struct _objc_protocol_list *protocol_list;
+struct _objc__method_prototype_list *instance_methods;
+struct _objc__method_prototype_list *class_methods
+};
+
+See EmitProtocolExtension().
+*/
+llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ // FIXME: I don't understand why gcc generates this, or where it is
+ // resolved. Investigate. Its also wasteful to look this up over and over.
+ LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[] = {
+ EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
+ MethodTypesExt),
+ GetClassName(PD->getIdentifier()),
+ EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(),
+ PD->protocol_end()),
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods),
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods)
+ };
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::InternalLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+
+ Protocols[PD->getIdentifier()] = Entry;
+ }
+ CGM.AddUsedGlobal(Entry);
+
+ return Entry;
+}
+
+llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
+ // FIXME: Is this necessary? Why only for protocol?
+ Entry->setAlignment(4);
+ }
+
+ return Entry;
+}
+
+/*
+ struct _objc_protocol_extension {
+ uint32_t size;
+ struct objc_method_description_list *optional_instance_methods;
+ struct objc_method_description_list *optional_class_methods;
+ struct objc_property_list *instance_properties;
+ const char ** extendedMethodTypes;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
+ ArrayRef<llvm::Constant*> OptInstanceMethods,
+ ArrayRef<llvm::Constant*> OptClassMethods,
+ ArrayRef<llvm::Constant*> MethodTypesExt) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods),
+ EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods),
+ EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(), 0, PD,
+ ObjCTypes),
+ EmitProtocolMethodTypes("\01L_OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ MethodTypesExt, ObjCTypes)
+ };
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
+ Values[3]->isNullValue() && Values[4]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+
+ // No special section, but goes in llvm.used
+ return CreateMetadataVar("\01L_OBJC_PROTOCOLEXT_" + PD->getName(),
+ Init,
+ 0, 0, true);
+}
+
+/*
+ struct objc_protocol_list {
+ struct objc_protocol_list *next;
+ long count;
+ Protocol *list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ llvm::SmallVector<llvm::Constant*, 16> ProtocolRefs;
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin));
+
+ // Just return null for empty protocol lists
+ if (ProtocolRefs.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+
+ llvm::Constant *Values[3];
+ // This field is only used by the runtime.
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
+ ProtocolRefs.size() - 1);
+ Values[2] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ 4, false);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
+}
+
+void CGObjCCommonMac::
+PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
+ llvm::SmallVectorImpl<llvm::Constant*> &Properties,
+ const Decl *Container,
+ const ObjCProtocolDecl *PROTO,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ for (ObjCProtocolDecl::protocol_iterator P = PROTO->protocol_begin(),
+ E = PROTO->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P), ObjCTypes);
+ for (ObjCContainerDecl::prop_iterator I = PROTO->prop_begin(),
+ E = PROTO->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ if (!PropertySet.insert(PD->getIdentifier()))
+ continue;
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ }
+}
+
+/*
+ struct _objc_property {
+ const char * const name;
+ const char * const attributes;
+ };
+
+ struct _objc_property_list {
+ uint32_t entsize; // sizeof (struct _objc_property)
+ uint32_t prop_count;
+ struct _objc_property[prop_count];
+ };
+*/
+llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
+ const Decl *Container,
+ const ObjCContainerDecl *OCD,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ llvm::SmallVector<llvm::Constant*, 16> Properties;
+ llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
+ for (ObjCContainerDecl::prop_iterator I = OCD->prop_begin(),
+ E = OCD->prop_end(); I != E; ++I) {
+ const ObjCPropertyDecl *PD = *I;
+ PropertySet.insert(PD->getIdentifier());
+ llvm::Constant *Prop[] = {
+ GetPropertyName(PD->getIdentifier()),
+ GetPropertyTypeString(PD, Container)
+ };
+ Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy,
+ Prop));
+ }
+ if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ P = OID->all_referenced_protocol_begin(),
+ E = OID->all_referenced_protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
+ else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
+ for (ObjCCategoryDecl::protocol_iterator P = CD->protocol_begin(),
+ E = CD->protocol_end(); P != E; ++P)
+ PushProtocolProperties(PropertySet, Properties, Container, (*P),
+ ObjCTypes);
+ }
+
+ // Return null for empty list.
+ if (Properties.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+
+ unsigned PropertySize =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.PropertyTy);
+ llvm::Constant *Values[3];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
+ Properties.size());
+ Values[2] = llvm::ConstantArray::get(AT, Properties);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" :
+ "__OBJC,__property,regular,no_dead_strip",
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
+}
+
+llvm::Constant *
+CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
+ ArrayRef<llvm::Constant*> MethodTypes,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ // Return null for empty list.
+ if (MethodTypes.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrPtrTy);
+
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ MethodTypes.size());
+ llvm::Constant *Init = llvm::ConstantArray::get(AT, MethodTypes);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar(Name, Init,
+ (ObjCABI == 2) ? "__DATA, __objc_const" : 0,
+ (ObjCABI == 2) ? 8 : 4,
+ true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
+}
+
+/*
+ struct objc_method_description_list {
+ int count;
+ struct objc_method_description list[];
+ };
+*/
+llvm::Constant *
+CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ llvm::Constant *Desc[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD)
+ };
+ if (!Desc[1])
+ return 0;
+
+ return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
+ Desc);
+}
+
+llvm::Constant *
+CGObjCMac::EmitMethodDescList(Twine Name, const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+
+ llvm::Constant *Values[2];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
+ Methods.size());
+ Values[1] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+}
+
+/*
+ struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ uint32_t size; // <rdar://4585769>
+ struct _objc_property_list *instance_properties;
+ };
+*/
+void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.CategoryTy);
+
+ // FIXME: This is poor design, the OCD should have a pointer to the category
+ // decl. Additionally, note that Category can be null for the @implementation
+ // w/o an @interface case. Sema should just create one for us as it does for
+ // @implementation so everyone else can live life under a clear blue sky.
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+
+ SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
+ << OCD->getName();
+
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ llvm::Constant *Values[7];
+ Values[0] = GetClassName(OCD->getIdentifier());
+ Values[1] = GetClassName(Interface->getIdentifier());
+ LazySymbols.insert(Interface->getIdentifier());
+ Values[2] =
+ EmitMethodList("\01L_OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] =
+ EmitMethodList("\01L_OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
+ if (Category) {
+ Values[4] =
+ EmitProtocolList("\01L_OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ }
+ Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+
+ // If there is no category @interface then there can be no properties.
+ if (Category) {
+ Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
+ Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_CATEGORY_" + ExtName.str(), Init,
+ "__OBJC,__category,regular,no_dead_strip",
+ 4, true);
+ DefinedCategories.push_back(GV);
+ DefinedCategoryNames.insert(ExtName.str());
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+// FIXME: Get from somewhere?
+enum ClassFlags {
+ eClassFlags_Factory = 0x00001,
+ eClassFlags_Meta = 0x00002,
+ // <rdr://5142207>
+ eClassFlags_HasCXXStructors = 0x02000,
+ eClassFlags_Hidden = 0x20000,
+ eClassFlags_ABI2_Hidden = 0x00010,
+ eClassFlags_ABI2_HasCXXStructors = 0x00004 // <rdr://4923634>
+};
+
+/*
+ struct _objc_class {
+ Class isa;
+ Class super_class;
+ const char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct _objc_cache *cache;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions (<rdr://4585769>)
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+
+ See EmitClassExtension();
+*/
+void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ DefinedSymbols.insert(ID->getIdentifier());
+
+ std::string ClassName = ID->getNameAsString();
+ // FIXME: Gross
+ ObjCInterfaceDecl *Interface =
+ const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
+ llvm::Constant *Protocols =
+ EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
+ Interface->all_referenced_protocol_begin(),
+ Interface->all_referenced_protocol_end());
+ unsigned Flags = eClassFlags_Factory;
+ if (ID->hasCXXStructors())
+ Flags |= eClassFlags_HasCXXStructors;
+ unsigned Size =
+ CGM.getContext().getASTObjCImplementationLayout(ID).getSize().getQuantity();
+
+ // FIXME: Set CXX-structors flag.
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
+ Flags |= eClassFlags_Hidden;
+
+ llvm::SmallVector<llvm::Constant*, 16> InstanceMethods, ClassMethods;
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ InstanceMethods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ ClassMethods.push_back(GetMethodConstant(*i));
+ }
+
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ InstanceMethods.push_back(C);
+ }
+ }
+
+ llvm::Constant *Values[12];
+ Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
+ // Record a reference to the super class.
+ LazySymbols.insert(Super->getIdentifier());
+
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, false);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getName(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ Values[10] = BuildIvarLayout(ID, true);
+ Values[11] = EmitClassExtension(ID);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+ std::string Name("\01L_OBJC_CLASS_");
+ Name += ClassName;
+ const char *Section = "__OBJC,__class,regular,no_dead_strip";
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ GV->setSection(Section);
+ GV->setAlignment(4);
+ CGM.AddUsedGlobal(GV);
+ }
+ else
+ GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ DefinedClasses.push_back(GV);
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
+ llvm::Constant *Protocols,
+ ArrayRef<llvm::Constant*> Methods) {
+ unsigned Flags = eClassFlags_Meta;
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassTy);
+
+ if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
+ Flags |= eClassFlags_Hidden;
+
+ llvm::Constant *Values[12];
+ // The isa for the metaclass is the root of the hierarchy.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ Values[ 0] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Root->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ // The super class for the metaclass is emitted as the name of the
+ // super class. The runtime fixes this up to point to the
+ // *metaclass* for the super class.
+ if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
+ Values[ 1] =
+ llvm::ConstantExpr::getBitCast(GetClassName(Super->getIdentifier()),
+ ObjCTypes.ClassPtrTy);
+ } else {
+ Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ }
+ Values[ 2] = GetClassName(ID->getIdentifier());
+ // Version is always 0.
+ Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
+ Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
+ Values[ 6] = EmitIvarList(ID, true);
+ Values[ 7] =
+ EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip",
+ Methods);
+ // cache is always NULL.
+ Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
+ Values[ 9] = Protocols;
+ // ivar_layout for metaclass is always NULL.
+ Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ // The class extension is always unused for metaclasses.
+ Values[11] = llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
+ Values);
+
+ std::string Name("\01L_OBJC_METACLASS_");
+ Name += ID->getNameAsCString();
+
+ // Check for a forward reference.
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (GV) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ GV->setLinkage(llvm::GlobalValue::InternalLinkage);
+ GV->setInitializer(Init);
+ } else {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name);
+ }
+ GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
+ GV->setAlignment(4);
+ CGM.AddUsedGlobal(GV);
+
+ return GV;
+}
+
+llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+
+ // FIXME: Should we look these up somewhere other than the module. Its a bit
+ // silly since we only generate these while processing an implementation, so
+ // exactly one pointer would work if know when we entered/exitted an
+ // implementation block.
+
+ // Check for an existing forward reference.
+ // Previously, metaclass with internal linkage may have been defined.
+ // pass 'true' as 2nd argument so it is returned.
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+ true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward metaclass reference has incorrect type.");
+ return GV;
+ } else {
+ // Generate as an external reference to keep a consistent
+ // module. This will be patched up when we emit the metaclass.
+ return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ }
+}
+
+llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
+ std::string Name = "\01L_OBJC_CLASS_" + ID->getNameAsString();
+
+ if (llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name,
+ true)) {
+ assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
+ "Forward class metadata reference has incorrect type.");
+ return GV;
+ } else {
+ return new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ }
+}
+
+/*
+ struct objc_class_ext {
+ uint32_t size;
+ const char *weak_ivar_layout;
+ struct _objc_property_list *properties;
+ };
+*/
+llvm::Constant *
+CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
+ uint64_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ llvm::Constant *Values[3];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = BuildIvarLayout(ID, false);
+ Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+
+ // Return null if no extension bits are used.
+ if (Values[1]->isNullValue() && Values[2]->isNullValue())
+ return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
+ return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getName(),
+ Init, "__OBJC,__class_ext,regular,no_dead_strip",
+ 4, true);
+}
+
+/*
+ struct objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+
+ struct objc_ivar_list {
+ int ivar_count;
+ struct objc_ivar list[count];
+ };
+*/
+llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
+ bool ForClass) {
+ std::vector<llvm::Constant*> Ivars;
+
+ // When emitting the root class GCC emits ivar entries for the
+ // actual class structure. It is not clear if we need to follow this
+ // behavior; for now lets try and get away with not doing it. If so,
+ // the cleanest solution would be to make up an ObjCInterfaceDecl
+ // for the class.
+ if (ForClass)
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+
+ for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ llvm::Constant *Ivar[] = {
+ GetMethodVarName(IVD->getIdentifier()),
+ GetMethodVarType(IVD),
+ llvm::ConstantInt::get(ObjCTypes.IntTy,
+ ComputeIvarBaseOffset(CGM, OID, IVD))
+ };
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+ }
+
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+
+ llvm::Constant *Values[2];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
+ Ivars.size());
+ Values[1] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV;
+ if (ForClass)
+ GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getName(),
+ Init, "__OBJC,__class_vars,regular,no_dead_strip",
+ 4, true);
+ else
+ GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_" + ID->getName(),
+ Init, "__OBJC,__instance_vars,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
+}
+
+/*
+ struct objc_method {
+ SEL method_name;
+ char *method_types;
+ void *method;
+ };
+
+ struct objc_method_list {
+ struct objc_method_list *obsolete;
+ int count;
+ struct objc_method methods_list[count];
+ };
+*/
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
+ llvm::Function *Fn = GetMethodDefinition(MD);
+ if (!Fn)
+ return 0;
+
+ llvm::Constant *Method[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+llvm::Constant *CGObjCMac::EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+
+ llvm::Constant *Values[3];
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(Name, Init, Section, 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
+}
+
+llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) {
+ SmallString<256> Name;
+ GetNameForMethod(OMD, CD, Name);
+
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::FunctionType *MethodTy =
+ Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
+ llvm::Function *Method =
+ llvm::Function::Create(MethodTy,
+ llvm::GlobalValue::InternalLinkage,
+ Name.str(),
+ &CGM.getModule());
+ MethodDefinitions.insert(std::make_pair(OMD, Method));
+
+ return Method;
+}
+
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateMetadataVar(Twine Name,
+ llvm::Constant *Init,
+ const char *Section,
+ unsigned Align,
+ bool AddToUsed) {
+ llvm::Type *Ty = Init->getType();
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Ty, false,
+ llvm::GlobalValue::InternalLinkage, Init, Name);
+ if (Section)
+ GV->setSection(Section);
+ if (Align)
+ GV->setAlignment(Align);
+ if (AddToUsed)
+ CGM.AddUsedGlobal(GV);
+ return GV;
+}
+
+llvm::Function *CGObjCMac::ModuleInitFunction() {
+ // Abuse this interface function as a place to finalize.
+ FinishModule();
+ return NULL;
+}
+
+llvm::Constant *CGObjCMac::GetPropertyGetFunction() {
+ return ObjCTypes.getGetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetPropertySetFunction() {
+ return ObjCTypes.getSetPropertyFn();
+}
+
+llvm::Constant *CGObjCMac::GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) {
+ return ObjCTypes.getOptimizedSetPropertyFn(atomic, copy);
+}
+
+llvm::Constant *CGObjCMac::GetGetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+llvm::Constant *CGObjCMac::GetSetStructFunction() {
+ return ObjCTypes.getCopyStructFn();
+}
+
+llvm::Constant *CGObjCMac::GetCppAtomicObjectFunction() {
+ return ObjCTypes.getCppAtomicObjectFunction();
+}
+
+llvm::Constant *CGObjCMac::EnumerationMutationFunction() {
+ return ObjCTypes.getEnumerationMutationFn();
+}
+
+void CGObjCMac::EmitTryStmt(CodeGenFunction &CGF, const ObjCAtTryStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+void CGObjCMac::EmitSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ return EmitTryOrSynchronizedStmt(CGF, S);
+}
+
+namespace {
+ struct PerformFragileFinally : EHScopeStack::Cleanup {
+ const Stmt &S;
+ llvm::Value *SyncArgSlot;
+ llvm::Value *CallTryExitVar;
+ llvm::Value *ExceptionData;
+ ObjCTypesHelper &ObjCTypes;
+ PerformFragileFinally(const Stmt *S,
+ llvm::Value *SyncArgSlot,
+ llvm::Value *CallTryExitVar,
+ llvm::Value *ExceptionData,
+ ObjCTypesHelper *ObjCTypes)
+ : S(*S), SyncArgSlot(SyncArgSlot), CallTryExitVar(CallTryExitVar),
+ ExceptionData(ExceptionData), ObjCTypes(*ObjCTypes) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ // Check whether we need to call objc_exception_try_exit.
+ // In optimized code, this branch will always be folded.
+ llvm::BasicBlock *FinallyCallExit =
+ CGF.createBasicBlock("finally.call_exit");
+ llvm::BasicBlock *FinallyNoCallExit =
+ CGF.createBasicBlock("finally.no_call_exit");
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateLoad(CallTryExitVar),
+ FinallyCallExit, FinallyNoCallExit);
+
+ CGF.EmitBlock(FinallyCallExit);
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryExitFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ CGF.EmitBlock(FinallyNoCallExit);
+
+ if (isa<ObjCAtTryStmt>(S)) {
+ if (const ObjCAtFinallyStmt* FinallyStmt =
+ cast<ObjCAtTryStmt>(S).getFinallyStmt()) {
+ // Save the current cleanup destination in case there's
+ // control flow inside the finally statement.
+ llvm::Value *CurCleanupDest =
+ CGF.Builder.CreateLoad(CGF.getNormalCleanupDestSlot());
+
+ CGF.EmitStmt(FinallyStmt->getFinallyBody());
+
+ if (CGF.HaveInsertPoint()) {
+ CGF.Builder.CreateStore(CurCleanupDest,
+ CGF.getNormalCleanupDestSlot());
+ } else {
+ // Currently, the end of the cleanup must always exist.
+ CGF.EnsureInsertPoint();
+ }
+ }
+ } else {
+ // Emit objc_sync_exit(expr); as finally's sole statement for
+ // @synchronized.
+ llvm::Value *SyncArg = CGF.Builder.CreateLoad(SyncArgSlot);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncExitFn(), SyncArg)
+ ->setDoesNotThrow();
+ }
+ }
+ };
+
+ class FragileHazards {
+ CodeGenFunction &CGF;
+ SmallVector<llvm::Value*, 20> Locals;
+ llvm::DenseSet<llvm::BasicBlock*> BlocksBeforeTry;
+
+ llvm::InlineAsm *ReadHazard;
+ llvm::InlineAsm *WriteHazard;
+
+ llvm::FunctionType *GetAsmFnType();
+
+ void collectLocals();
+ void emitReadHazard(CGBuilderTy &Builder);
+
+ public:
+ FragileHazards(CodeGenFunction &CGF);
+
+ void emitWriteHazard();
+ void emitHazardsInNewBlocks();
+ };
+}
+
+/// Create the fragile-ABI read and write hazards based on the current
+/// state of the function, which is presumed to be immediately prior
+/// to a @try block. These hazards are used to maintain correct
+/// semantics in the face of optimization and the fragile ABI's
+/// cavalier use of setjmp/longjmp.
+FragileHazards::FragileHazards(CodeGenFunction &CGF) : CGF(CGF) {
+ collectLocals();
+
+ if (Locals.empty()) return;
+
+ // Collect all the blocks in the function.
+ for (llvm::Function::iterator
+ I = CGF.CurFn->begin(), E = CGF.CurFn->end(); I != E; ++I)
+ BlocksBeforeTry.insert(&*I);
+
+ llvm::FunctionType *AsmFnTy = GetAsmFnType();
+
+ // Create a read hazard for the allocas. This inhibits dead-store
+ // optimizations and forces the values to memory. This hazard is
+ // inserted before any 'throwing' calls in the protected scope to
+ // reflect the possibility that the variables might be read from the
+ // catch block if the call throws.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "*m";
+ }
+
+ ReadHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+
+ // Create a write hazard for the allocas. This inhibits folding
+ // loads across the hazard. This hazard is inserted at the
+ // beginning of the catch path to reflect the possibility that the
+ // variables might have been written within the protected scope.
+ {
+ std::string Constraint;
+ for (unsigned I = 0, E = Locals.size(); I != E; ++I) {
+ if (I) Constraint += ',';
+ Constraint += "=*m";
+ }
+
+ WriteHazard = llvm::InlineAsm::get(AsmFnTy, "", Constraint, true, false);
+ }
+}
+
+/// Emit a write hazard at the current location.
+void FragileHazards::emitWriteHazard() {
+ if (Locals.empty()) return;
+
+ CGF.Builder.CreateCall(WriteHazard, Locals)->setDoesNotThrow();
+}
+
+void FragileHazards::emitReadHazard(CGBuilderTy &Builder) {
+ assert(!Locals.empty());
+ Builder.CreateCall(ReadHazard, Locals)->setDoesNotThrow();
+}
+
+/// Emit read hazards in all the protected blocks, i.e. all the blocks
+/// which have been inserted since the beginning of the try.
+void FragileHazards::emitHazardsInNewBlocks() {
+ if (Locals.empty()) return;
+
+ CGBuilderTy Builder(CGF.getLLVMContext());
+
+ // Iterate through all blocks, skipping those prior to the try.
+ for (llvm::Function::iterator
+ FI = CGF.CurFn->begin(), FE = CGF.CurFn->end(); FI != FE; ++FI) {
+ llvm::BasicBlock &BB = *FI;
+ if (BlocksBeforeTry.count(&BB)) continue;
+
+ // Walk through all the calls in the block.
+ for (llvm::BasicBlock::iterator
+ BI = BB.begin(), BE = BB.end(); BI != BE; ++BI) {
+ llvm::Instruction &I = *BI;
+
+ // Ignore instructions that aren't non-intrinsic calls.
+ // These are the only calls that can possibly call longjmp.
+ if (!isa<llvm::CallInst>(I) && !isa<llvm::InvokeInst>(I)) continue;
+ if (isa<llvm::IntrinsicInst>(I))
+ continue;
+
+ // Ignore call sites marked nounwind. This may be questionable,
+ // since 'nounwind' doesn't necessarily mean 'does not call longjmp'.
+ llvm::CallSite CS(&I);
+ if (CS.doesNotThrow()) continue;
+
+ // Insert a read hazard before the call. This will ensure that
+ // any writes to the locals are performed before making the
+ // call. If the call throws, then this is sufficient to
+ // guarantee correctness as long as it doesn't also write to any
+ // locals.
+ Builder.SetInsertPoint(&BB, BI);
+ emitReadHazard(Builder);
+ }
+ }
+}
+
+static void addIfPresent(llvm::DenseSet<llvm::Value*> &S, llvm::Value *V) {
+ if (V) S.insert(V);
+}
+
+void FragileHazards::collectLocals() {
+ // Compute a set of allocas to ignore.
+ llvm::DenseSet<llvm::Value*> AllocasToIgnore;
+ addIfPresent(AllocasToIgnore, CGF.ReturnValue);
+ addIfPresent(AllocasToIgnore, CGF.NormalCleanupDest);
+
+ // Collect all the allocas currently in the function. This is
+ // probably way too aggressive.
+ llvm::BasicBlock &Entry = CGF.CurFn->getEntryBlock();
+ for (llvm::BasicBlock::iterator
+ I = Entry.begin(), E = Entry.end(); I != E; ++I)
+ if (isa<llvm::AllocaInst>(*I) && !AllocasToIgnore.count(&*I))
+ Locals.push_back(&*I);
+}
+
+llvm::FunctionType *FragileHazards::GetAsmFnType() {
+ SmallVector<llvm::Type *, 16> tys(Locals.size());
+ for (unsigned i = 0, e = Locals.size(); i != e; ++i)
+ tys[i] = Locals[i]->getType();
+ return llvm::FunctionType::get(CGF.VoidTy, tys, false);
+}
+
+/*
+
+ Objective-C setjmp-longjmp (sjlj) Exception Handling
+ --
+
+ A catch buffer is a setjmp buffer plus:
+ - a pointer to the exception that was caught
+ - a pointer to the previous exception data buffer
+ - two pointers of reserved storage
+ Therefore catch buffers form a stack, with a pointer to the top
+ of the stack kept in thread-local storage.
+
+ objc_exception_try_enter pushes a catch buffer onto the EH stack.
+ objc_exception_try_exit pops the given catch buffer, which is
+ required to be the top of the EH stack.
+ objc_exception_throw pops the top of the EH stack, writes the
+ thrown exception into the appropriate field, and longjmps
+ to the setjmp buffer. It crashes the process (with a printf
+ and an abort()) if there are no catch buffers on the stack.
+ objc_exception_extract just reads the exception pointer out of the
+ catch buffer.
+
+ There's no reason an implementation couldn't use a light-weight
+ setjmp here --- something like __builtin_setjmp, but API-compatible
+ with the heavyweight setjmp. This will be more important if we ever
+ want to implement correct ObjC/C++ exception interactions for the
+ fragile ABI.
+
+ Note that for this use of setjmp/longjmp to be correct, we may need
+ to mark some local variables volatile: if a non-volatile local
+ variable is modified between the setjmp and the longjmp, it has
+ indeterminate value. For the purposes of LLVM IR, it may be
+ sufficient to make loads and stores within the @try (to variables
+ declared outside the @try) volatile. This is necessary for
+ optimized correctness, but is not currently being done; this is
+ being tracked as rdar://problem/8160285
+
+ The basic framework for a @try-catch-finally is as follows:
+ {
+ objc_exception_data d;
+ id _rethrow = null;
+ bool _call_try_exit = true;
+
+ objc_exception_try_enter(&d);
+ if (!setjmp(d.jmp_buf)) {
+ ... try body ...
+ } else {
+ // exception path
+ id _caught = objc_exception_extract(&d);
+
+ // enter new try scope for handlers
+ if (!setjmp(d.jmp_buf)) {
+ ... match exception and execute catch blocks ...
+
+ // fell off end, rethrow.
+ _rethrow = _caught;
+ ... jump-through-finally to finally_rethrow ...
+ } else {
+ // exception in catch block
+ _rethrow = objc_exception_extract(&d);
+ _call_try_exit = false;
+ ... jump-through-finally to finally_rethrow ...
+ }
+ }
+ ... jump-through-finally to finally_end ...
+
+ finally:
+ if (_call_try_exit)
+ objc_exception_try_exit(&d);
+
+ ... finally block ....
+ ... dispatch to finally destination ...
+
+ finally_rethrow:
+ objc_exception_throw(_rethrow);
+
+ finally_end:
+ }
+
+ This framework differs slightly from the one gcc uses, in that gcc
+ uses _rethrow to determine if objc_exception_try_exit should be called
+ and if the object should be rethrown. This breaks in the face of
+ throwing nil and introduces unnecessary branches.
+
+ We specialize this framework for a few particular circumstances:
+
+ - If there are no catch blocks, then we avoid emitting the second
+ exception handling context.
+
+ - If there is a catch-all catch block (i.e. @catch(...) or @catch(id
+ e)) we avoid emitting the code to rethrow an uncaught exception.
+
+ - FIXME: If there is no @finally block we can do a few more
+ simplifications.
+
+ Rethrows and Jumps-Through-Finally
+ --
+
+ '@throw;' is supported by pushing the currently-caught exception
+ onto ObjCEHStack while the @catch blocks are emitted.
+
+ Branches through the @finally block are handled with an ordinary
+ normal cleanup. We do not register an EH cleanup; fragile-ABI ObjC
+ exceptions are not compatible with C++ exceptions, and this is
+ hardly the only place where this will go wrong.
+
+ @synchronized(expr) { stmt; } is emitted as if it were:
+ id synch_value = expr;
+ objc_sync_enter(synch_value);
+ @try { stmt; } @finally { objc_sync_exit(synch_value); }
+*/
+
+void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const Stmt &S) {
+ bool isTry = isa<ObjCAtTryStmt>(S);
+
+ // A destination for the fall-through edges of the catch handlers to
+ // jump to.
+ CodeGenFunction::JumpDest FinallyEnd =
+ CGF.getJumpDestInCurrentScope("finally.end");
+
+ // A destination for the rethrow edge of the catch handlers to jump
+ // to.
+ CodeGenFunction::JumpDest FinallyRethrow =
+ CGF.getJumpDestInCurrentScope("finally.rethrow");
+
+ // For @synchronized, call objc_sync_enter(sync.expr). The
+ // evaluation of the expression must occur before we enter the
+ // @synchronized. We can't avoid a temp here because we need the
+ // value to be preserved. If the backend ever does liveness
+ // correctly after setjmp, this will be unnecessary.
+ llvm::Value *SyncArgSlot = 0;
+ if (!isTry) {
+ llvm::Value *SyncArg =
+ CGF.EmitScalarExpr(cast<ObjCAtSynchronizedStmt>(S).getSynchExpr());
+ SyncArg = CGF.Builder.CreateBitCast(SyncArg, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateCall(ObjCTypes.getSyncEnterFn(), SyncArg)
+ ->setDoesNotThrow();
+
+ SyncArgSlot = CGF.CreateTempAlloca(SyncArg->getType(), "sync.arg");
+ CGF.Builder.CreateStore(SyncArg, SyncArgSlot);
+ }
+
+ // Allocate memory for the setjmp buffer. This needs to be kept
+ // live throughout the try and catch blocks.
+ llvm::Value *ExceptionData = CGF.CreateTempAlloca(ObjCTypes.ExceptionDataTy,
+ "exceptiondata.ptr");
+
+ // Create the fragile hazards. Note that this will not capture any
+ // of the allocas required for exception processing, but will
+ // capture the current basic block (which extends all the way to the
+ // setjmp call) as "before the @try".
+ FragileHazards Hazards(CGF);
+
+ // Create a flag indicating whether the cleanup needs to call
+ // objc_exception_try_exit. This is true except when
+ // - no catches match and we're branching through the cleanup
+ // just to rethrow the exception, or
+ // - a catch matched and we're falling out of the catch handler.
+ // The setjmp-safety rule here is that we should always store to this
+ // variable in a place that dominates the branch through the cleanup
+ // without passing through any setjmps.
+ llvm::Value *CallTryExitVar = CGF.CreateTempAlloca(CGF.Builder.getInt1Ty(),
+ "_call_try_exit");
+
+ // A slot containing the exception to rethrow. Only needed when we
+ // have both a @catch and a @finally.
+ llvm::Value *PropagatingExnVar = 0;
+
+ // Push a normal cleanup to leave the try scope.
+ CGF.EHStack.pushCleanup<PerformFragileFinally>(NormalCleanup, &S,
+ SyncArgSlot,
+ CallTryExitVar,
+ ExceptionData,
+ &ObjCTypes);
+
+ // Enter a try block:
+ // - Call objc_exception_try_enter to push ExceptionData on top of
+ // the EH stack.
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ // - Call setjmp on the exception data buffer.
+ llvm::Constant *Zero = llvm::ConstantInt::get(CGF.Builder.getInt32Ty(), 0);
+ llvm::Value *GEPIndexes[] = { Zero, Zero, Zero };
+ llvm::Value *SetJmpBuffer =
+ CGF.Builder.CreateGEP(ExceptionData, GEPIndexes, "setjmp_buffer");
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer, "setjmp_result");
+ SetJmpResult->setDoesNotThrow();
+ SetJmpResult->setCanReturnTwice();
+
+ // If setjmp returned 0, enter the protected block; otherwise,
+ // branch to the handler.
+ llvm::BasicBlock *TryBlock = CGF.createBasicBlock("try");
+ llvm::BasicBlock *TryHandler = CGF.createBasicBlock("try.handler");
+ llvm::Value *DidCatch =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+ CGF.Builder.CreateCondBr(DidCatch, TryHandler, TryBlock);
+
+ // Emit the protected block.
+ CGF.EmitBlock(TryBlock);
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
+ CGF.EmitStmt(isTry ? cast<ObjCAtTryStmt>(S).getTryBody()
+ : cast<ObjCAtSynchronizedStmt>(S).getSynchBody());
+
+ CGBuilderTy::InsertPoint TryFallthroughIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the exception handler block.
+ CGF.EmitBlock(TryHandler);
+
+ // Don't optimize loads of the in-scope locals across this point.
+ Hazards.emitWriteHazard();
+
+ // For a @synchronized (or a @try with no catches), just branch
+ // through the cleanup to the rethrow block.
+ if (!isTry || !cast<ObjCAtTryStmt>(S).getNumCatchStmts()) {
+ // Tell the cleanup not to re-pop the exit.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ // Otherwise, we have to match against the caught exceptions.
+ } else {
+ // Retrieve the exception object. We may emit multiple blocks but
+ // nothing can cross this so the value is already in SSA form.
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ Caught->setDoesNotThrow();
+
+ // Push the exception to rethrow onto the EH value stack for the
+ // benefit of any @throws in the handlers.
+ CGF.ObjCEHValueStack.push_back(Caught);
+
+ const ObjCAtTryStmt* AtTryStmt = cast<ObjCAtTryStmt>(&S);
+
+ bool HasFinally = (AtTryStmt->getFinallyStmt() != 0);
+
+ llvm::BasicBlock *CatchBlock = 0;
+ llvm::BasicBlock *CatchHandler = 0;
+ if (HasFinally) {
+ // Save the currently-propagating exception before
+ // objc_exception_try_enter clears the exception slot.
+ PropagatingExnVar = CGF.CreateTempAlloca(Caught->getType(),
+ "propagating_exception");
+ CGF.Builder.CreateStore(Caught, PropagatingExnVar);
+
+ // Enter a new exception try block (in case a @catch block
+ // throws an exception).
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionTryEnterFn(), ExceptionData)
+ ->setDoesNotThrow();
+
+ llvm::CallInst *SetJmpResult =
+ CGF.Builder.CreateCall(ObjCTypes.getSetJmpFn(), SetJmpBuffer,
+ "setjmp.result");
+ SetJmpResult->setDoesNotThrow();
+ SetJmpResult->setCanReturnTwice();
+
+ llvm::Value *Threw =
+ CGF.Builder.CreateIsNotNull(SetJmpResult, "did_catch_exception");
+
+ CatchBlock = CGF.createBasicBlock("catch");
+ CatchHandler = CGF.createBasicBlock("catch_for_catch");
+ CGF.Builder.CreateCondBr(Threw, CatchHandler, CatchBlock);
+
+ CGF.EmitBlock(CatchBlock);
+ }
+
+ CGF.Builder.CreateStore(CGF.Builder.getInt1(HasFinally), CallTryExitVar);
+
+ // Handle catch list. As a special case we check if everything is
+ // matched and avoid generating code for falling off the end if
+ // so.
+ bool AllMatched = false;
+ for (unsigned I = 0, N = AtTryStmt->getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = AtTryStmt->getCatchStmt(I);
+
+ const VarDecl *CatchParam = CatchStmt->getCatchParamDecl();
+ const ObjCObjectPointerType *OPT = 0;
+
+ // catch(...) always matches.
+ if (!CatchParam) {
+ AllMatched = true;
+ } else {
+ OPT = CatchParam->getType()->getAs<ObjCObjectPointerType>();
+
+ // catch(id e) always matches under this ABI, since only
+ // ObjC exceptions end up here in the first place.
+ // FIXME: For the time being we also match id<X>; this should
+ // be rejected by Sema instead.
+ if (OPT && (OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()))
+ AllMatched = true;
+ }
+
+ // If this is a catch-all, we don't need to test anything.
+ if (AllMatched) {
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
+ if (CatchParam) {
+ CGF.EmitAutoVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // These types work out because ConvertType(id) == i8*.
+ CGF.Builder.CreateStore(Caught, CGF.GetAddrOfLocalVar(CatchParam));
+ }
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // The scope of the catch variable ends right here.
+ CatchVarCleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+ break;
+ }
+
+ assert(OPT && "Unexpected non-object pointer type in @catch");
+ const ObjCObjectType *ObjTy = OPT->getObjectType();
+
+ // FIXME: @catch (Class c) ?
+ ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
+ assert(IDecl && "Catch parameter must have Objective-C type!");
+
+ // Check if the @catch block matches the exception object.
+ llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl);
+
+ llvm::CallInst *Match =
+ CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
+ Class, Caught, "match");
+ Match->setDoesNotThrow();
+
+ llvm::BasicBlock *MatchedBlock = CGF.createBasicBlock("match");
+ llvm::BasicBlock *NextCatchBlock = CGF.createBasicBlock("catch.next");
+
+ CGF.Builder.CreateCondBr(CGF.Builder.CreateIsNotNull(Match, "matched"),
+ MatchedBlock, NextCatchBlock);
+
+ // Emit the @catch block.
+ CGF.EmitBlock(MatchedBlock);
+
+ // Collect any cleanups for the catch variable. The scope lasts until
+ // the end of the catch body.
+ CodeGenFunction::RunCleanupsScope CatchVarCleanups(CGF);
+
+ CGF.EmitAutoVarDecl(*CatchParam);
+ assert(CGF.HaveInsertPoint() && "DeclStmt destroyed insert point?");
+
+ // Initialize the catch variable.
+ llvm::Value *Tmp =
+ CGF.Builder.CreateBitCast(Caught,
+ CGF.ConvertType(CatchParam->getType()));
+ CGF.Builder.CreateStore(Tmp, CGF.GetAddrOfLocalVar(CatchParam));
+
+ CGF.EmitStmt(CatchStmt->getCatchBody());
+
+ // We're done with the catch variable.
+ CatchVarCleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(FinallyEnd);
+
+ CGF.EmitBlock(NextCatchBlock);
+ }
+
+ CGF.ObjCEHValueStack.pop_back();
+
+ // If nothing wanted anything to do with the caught exception,
+ // kill the extract call.
+ if (Caught->use_empty())
+ Caught->eraseFromParent();
+
+ if (!AllMatched)
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+
+ if (HasFinally) {
+ // Emit the exception handler for the @catch blocks.
+ CGF.EmitBlock(CatchHandler);
+
+ // In theory we might now need a write hazard, but actually it's
+ // unnecessary because there's no local-accessing code between
+ // the try's write hazard and here.
+ //Hazards.emitWriteHazard();
+
+ // Extract the new exception and save it to the
+ // propagating-exception slot.
+ assert(PropagatingExnVar);
+ llvm::CallInst *NewCaught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData, "caught");
+ NewCaught->setDoesNotThrow();
+ CGF.Builder.CreateStore(NewCaught, PropagatingExnVar);
+
+ // Don't pop the catch handler; the throw already did.
+ CGF.Builder.CreateStore(CGF.Builder.getFalse(), CallTryExitVar);
+ CGF.EmitBranchThroughCleanup(FinallyRethrow);
+ }
+ }
+
+ // Insert read hazards as required in the new blocks.
+ Hazards.emitHazardsInNewBlocks();
+
+ // Pop the cleanup.
+ CGF.Builder.restoreIP(TryFallthroughIP);
+ if (CGF.HaveInsertPoint())
+ CGF.Builder.CreateStore(CGF.Builder.getTrue(), CallTryExitVar);
+ CGF.PopCleanupBlock();
+ CGF.EmitBlock(FinallyEnd.getBlock(), true);
+
+ // Emit the rethrow block.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+ CGF.EmitBlock(FinallyRethrow.getBlock(), true);
+ if (CGF.HaveInsertPoint()) {
+ // If we have a propagating-exception variable, check it.
+ llvm::Value *PropagatingExn;
+ if (PropagatingExnVar) {
+ PropagatingExn = CGF.Builder.CreateLoad(PropagatingExnVar);
+
+ // Otherwise, just look in the buffer for the exception to throw.
+ } else {
+ llvm::CallInst *Caught =
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionExtractFn(),
+ ExceptionData);
+ Caught->setDoesNotThrow();
+ PropagatingExn = Caught;
+ }
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), PropagatingExn)
+ ->setDoesNotThrow();
+ CGF.Builder.CreateUnreachable();
+ }
+
+ CGF.Builder.restoreIP(SavedIP);
+}
+
+void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ llvm::Value *ExceptionAsObject;
+
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ ExceptionAsObject =
+ CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
+ } else {
+ assert((!CGF.ObjCEHValueStack.empty() && CGF.ObjCEHValueStack.back()) &&
+ "Unexpected rethrow outside @catch block.");
+ ExceptionAsObject = CGF.ObjCEHValueStack.back();
+ }
+
+ CGF.Builder.CreateCall(ObjCTypes.getExceptionThrowFn(), ExceptionAsObject)
+ ->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ CGF.Builder.ClearInsertionPoint();
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
+ ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ if (!threadlocal)
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ else
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignThreadLocalFn(),
+ src, dst, "threadlocalassign");
+ return;
+}
+
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t ivaroffset)
+///
+void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ assert(ivarOffset && "EmitObjCIvarAssign - ivarOffset is NULL");
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, ivarOffset);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *size) {
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+ DestPtr, SrcPtr, size);
+ return;
+}
+
+/// EmitObjCValueForIvar - Code Gen for ivar reference.
+///
+LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ EmitIvarOffset(CGF, ID, Ivar));
+}
+
+llvm::Value *CGObjCMac::EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ uint64_t Offset = ComputeIvarBaseOffset(CGM, Interface, Ivar);
+ return llvm::ConstantInt::get(
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy),
+ Offset);
+}
+
+/* *** Private Interface *** */
+
+/// EmitImageInfo - Emit the image info marker used to encode some module
+/// level information.
+///
+/// See: <rdr://4810609&4810587&4810587>
+/// struct IMAGE_INFO {
+/// unsigned version;
+/// unsigned flags;
+/// };
+enum ImageInfoFlags {
+ eImageInfo_FixAndContinue = (1 << 0),
+ eImageInfo_GarbageCollected = (1 << 1),
+ eImageInfo_GCOnly = (1 << 2),
+ eImageInfo_OptimizedByDyld = (1 << 3), // FIXME: When is this set.
+
+ // A flag indicating that the module has no instances of a @synthesize of a
+ // superclass variable. <rdar://problem/6803242>
+ eImageInfo_CorrectedSynthesize = (1 << 4)
+};
+
+void CGObjCCommonMac::EmitImageInfo() {
+ unsigned version = 0; // Version is unused?
+ const char *Section = (ObjCABI == 1) ?
+ "__OBJC, __image_info,regular" :
+ "__DATA, __objc_imageinfo, regular, no_dead_strip";
+
+ // Generate module-level named metadata to convey this information to the
+ // linker and code-gen.
+ llvm::Module &Mod = CGM.getModule();
+
+ // Add the ObjC ABI version to the module flags.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Version", ObjCABI);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Version",
+ version);
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C Image Info Section",
+ llvm::MDString::get(VMContext,Section));
+
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC) {
+ // Non-GC overrides those files which specify GC.
+ Mod.addModuleFlag(llvm::Module::Override,
+ "Objective-C Garbage Collection", (uint32_t)0);
+ } else {
+ // Add the ObjC garbage collection value.
+ Mod.addModuleFlag(llvm::Module::Error,
+ "Objective-C Garbage Collection",
+ eImageInfo_GarbageCollected);
+
+ if (CGM.getLangOpts().getGC() == LangOptions::GCOnly) {
+ // Add the ObjC GC Only value.
+ Mod.addModuleFlag(llvm::Module::Error, "Objective-C GC Only",
+ eImageInfo_GCOnly);
+
+ // Require that GC be specified and set to eImageInfo_GarbageCollected.
+ llvm::Value *Ops[2] = {
+ llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
+ eImageInfo_GarbageCollected)
+ };
+ Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
+ llvm::MDNode::get(VMContext, Ops));
+ }
+ }
+}
+
+// struct objc_module {
+// unsigned long version;
+// unsigned long size;
+// const char *name;
+// Symtab symtab;
+// };
+
+// FIXME: Get from somewhere
+static const int ModuleVersion = 7;
+
+void CGObjCMac::EmitModuleInfo() {
+ uint64_t Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.ModuleTy);
+
+ llvm::Constant *Values[] = {
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
+ llvm::ConstantInt::get(ObjCTypes.LongTy, Size),
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ GetClassName(&CGM.getContext().Idents.get("")),
+ EmitModuleSymbols()
+ };
+ CreateMetadataVar("\01L_OBJC_MODULES",
+ llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ "__OBJC,__module_info,regular,no_dead_strip",
+ 4, true);
+}
+
+llvm::Constant *CGObjCMac::EmitModuleSymbols() {
+ unsigned NumClasses = DefinedClasses.size();
+ unsigned NumCategories = DefinedCategories.size();
+
+ // Return null if no symbols were defined.
+ if (!NumClasses && !NumCategories)
+ return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
+
+ llvm::Constant *Values[5];
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
+ Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
+ Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+
+ // The runtime expects exactly the list of defined classes followed
+ // by the list of defined categories, in a single array.
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
+ ObjCTypes.Int8PtrTy);
+ for (unsigned i=0; i<NumCategories; i++)
+ Symbols[NumClasses + i] =
+ llvm::ConstantExpr::getBitCast(DefinedCategories[i],
+ ObjCTypes.Int8PtrTy);
+
+ Values[4] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ Symbols.size()),
+ Symbols);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
+ "__OBJC,__symbols,regular,no_dead_strip",
+ 4, true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
+}
+
+llvm::Value *CGObjCMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ LazySymbols.insert(II);
+
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetClassName(II),
+ ObjCTypes.ClassPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ return Builder.CreateLoad(Entry);
+}
+
+llvm::Value *CGObjCMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCMac::EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
+llvm::Value *CGObjCMac::EmitSelector(CGBuilderTy &Builder, Selector Sel,
+ bool lvalue) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip",
+ 4, true);
+ }
+
+ if (lvalue)
+ return Entry;
+ return Builder.CreateLoad(Entry);
+}
+
+llvm::Constant *CGObjCCommonMac::GetClassName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = ClassNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext,
+ Ident->getNameStart()),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_classname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Function *CGObjCCommonMac::GetMethodDefinition(const ObjCMethodDecl *MD) {
+ llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator
+ I = MethodDefinitions.find(MD);
+ if (I != MethodDefinitions.end())
+ return I->second;
+
+ return NULL;
+}
+
+/// GetIvarLayoutName - Returns a unique constant for the given
+/// ivar layout bitmap.
+llvm::Constant *CGObjCCommonMac::GetIvarLayoutName(IdentifierInfo *Ident,
+ const ObjCCommonTypesHelper &ObjCTypes) {
+ return llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+}
+
+void CGObjCCommonMac::BuildAggrIvarRecordLayout(const RecordType *RT,
+ unsigned int BytePos,
+ bool ForStrongLayout,
+ bool &HasUnion) {
+ const RecordDecl *RD = RT->getDecl();
+ // FIXME - Use iterator.
+ SmallVector<const FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ llvm::Type *Ty = CGM.getTypes().ConvertType(QualType(RT, 0));
+ const llvm::StructLayout *RecLayout =
+ CGM.getTargetData().getStructLayout(cast<llvm::StructType>(Ty));
+
+ BuildAggrIvarLayout(0, RecLayout, RD, Fields, BytePos,
+ ForStrongLayout, HasUnion);
+}
+
+void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
+ const llvm::StructLayout *Layout,
+ const RecordDecl *RD,
+ ArrayRef<const FieldDecl*> RecFields,
+ unsigned int BytePos, bool ForStrongLayout,
+ bool &HasUnion) {
+ bool IsUnion = (RD && RD->isUnion());
+ uint64_t MaxUnionIvarSize = 0;
+ uint64_t MaxSkippedUnionIvarSize = 0;
+ const FieldDecl *MaxField = 0;
+ const FieldDecl *MaxSkippedField = 0;
+ const FieldDecl *LastFieldBitfieldOrUnnamed = 0;
+ uint64_t MaxFieldOffset = 0;
+ uint64_t MaxSkippedFieldOffset = 0;
+ uint64_t LastBitfieldOrUnnamedOffset = 0;
+ uint64_t FirstFieldDelta = 0;
+
+ if (RecFields.empty())
+ return;
+ unsigned WordSizeInBits = CGM.getContext().getTargetInfo().getPointerWidth(0);
+ unsigned ByteSizeInBits = CGM.getContext().getTargetInfo().getCharWidth();
+ if (!RD && CGM.getLangOpts().ObjCAutoRefCount) {
+ const FieldDecl *FirstField = RecFields[0];
+ FirstFieldDelta =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(FirstField));
+ }
+
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ const FieldDecl *Field = RecFields[i];
+ uint64_t FieldOffset;
+ if (RD) {
+ // Note that 'i' here is actually the field index inside RD of Field,
+ // although this dependency is hidden.
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ FieldOffset = (RL.getFieldOffset(i) / ByteSizeInBits) - FirstFieldDelta;
+ } else
+ FieldOffset =
+ ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field)) - FirstFieldDelta;
+
+ // Skip over unnamed or bitfields
+ if (!Field->getIdentifier() || Field->isBitField()) {
+ LastFieldBitfieldOrUnnamed = Field;
+ LastBitfieldOrUnnamedOffset = FieldOffset;
+ continue;
+ }
+
+ LastFieldBitfieldOrUnnamed = 0;
+ QualType FQT = Field->getType();
+ if (FQT->isRecordType() || FQT->isUnionType()) {
+ if (FQT->isUnionType())
+ HasUnion = true;
+
+ BuildAggrIvarRecordLayout(FQT->getAs<RecordType>(),
+ BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+ continue;
+ }
+
+ if (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ uint64_t ElCount = CArray->getSize().getZExtValue();
+ assert(CArray && "only array with known element size is supported");
+ FQT = CArray->getElementType();
+ while (const ArrayType *Array = CGM.getContext().getAsArrayType(FQT)) {
+ const ConstantArrayType *CArray =
+ dyn_cast_or_null<ConstantArrayType>(Array);
+ ElCount *= CArray->getSize().getZExtValue();
+ FQT = CArray->getElementType();
+ }
+
+ assert(!FQT->isUnionType() &&
+ "layout for array of unions not supported");
+ if (FQT->isRecordType() && ElCount) {
+ int OldIndex = IvarsInfo.size() - 1;
+ int OldSkIndex = SkipIvars.size() -1;
+
+ const RecordType *RT = FQT->getAs<RecordType>();
+ BuildAggrIvarRecordLayout(RT, BytePos + FieldOffset,
+ ForStrongLayout, HasUnion);
+
+ // Replicate layout information for each array element. Note that
+ // one element is already done.
+ uint64_t ElIx = 1;
+ for (int FirstIndex = IvarsInfo.size() - 1,
+ FirstSkIndex = SkipIvars.size() - 1 ;ElIx < ElCount; ElIx++) {
+ uint64_t Size = CGM.getContext().getTypeSize(RT)/ByteSizeInBits;
+ for (int i = OldIndex+1; i <= FirstIndex; ++i)
+ IvarsInfo.push_back(GC_IVAR(IvarsInfo[i].ivar_bytepos + Size*ElIx,
+ IvarsInfo[i].ivar_size));
+ for (int i = OldSkIndex+1; i <= FirstSkIndex; ++i)
+ SkipIvars.push_back(GC_IVAR(SkipIvars[i].ivar_bytepos + Size*ElIx,
+ SkipIvars[i].ivar_size));
+ }
+ continue;
+ }
+ }
+ // At this point, we are done with Record/Union and array there of.
+ // For other arrays we are down to its element type.
+ Qualifiers::GC GCAttr = GetGCAttrTypeForType(CGM.getContext(), FQT);
+
+ unsigned FieldSize = CGM.getContext().getTypeSize(Field->getType());
+ if ((ForStrongLayout && GCAttr == Qualifiers::Strong)
+ || (!ForStrongLayout && GCAttr == Qualifiers::Weak)) {
+ if (IsUnion) {
+ uint64_t UnionIvarSize = FieldSize / WordSizeInBits;
+ if (UnionIvarSize > MaxUnionIvarSize) {
+ MaxUnionIvarSize = UnionIvarSize;
+ MaxField = Field;
+ MaxFieldOffset = FieldOffset;
+ }
+ } else {
+ IvarsInfo.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / WordSizeInBits));
+ }
+ } else if ((ForStrongLayout &&
+ (GCAttr == Qualifiers::GCNone || GCAttr == Qualifiers::Weak))
+ || (!ForStrongLayout && GCAttr != Qualifiers::Weak)) {
+ if (IsUnion) {
+ // FIXME: Why the asymmetry? We divide by word size in bits on other
+ // side.
+ uint64_t UnionIvarSize = FieldSize;
+ if (UnionIvarSize > MaxSkippedUnionIvarSize) {
+ MaxSkippedUnionIvarSize = UnionIvarSize;
+ MaxSkippedField = Field;
+ MaxSkippedFieldOffset = FieldOffset;
+ }
+ } else {
+ // FIXME: Why the asymmetry, we divide by byte size in bits here?
+ SkipIvars.push_back(GC_IVAR(BytePos + FieldOffset,
+ FieldSize / ByteSizeInBits));
+ }
+ }
+ }
+
+ if (LastFieldBitfieldOrUnnamed) {
+ if (LastFieldBitfieldOrUnnamed->isBitField()) {
+ // Last field was a bitfield. Must update skip info.
+ uint64_t BitFieldSize
+ = LastFieldBitfieldOrUnnamed->getBitWidthValue(CGM.getContext());
+ GC_IVAR skivar;
+ skivar.ivar_bytepos = BytePos + LastBitfieldOrUnnamedOffset;
+ skivar.ivar_size = (BitFieldSize / ByteSizeInBits)
+ + ((BitFieldSize % ByteSizeInBits) != 0);
+ SkipIvars.push_back(skivar);
+ } else {
+ assert(!LastFieldBitfieldOrUnnamed->getIdentifier() &&"Expected unnamed");
+ // Last field was unnamed. Must update skip info.
+ unsigned FieldSize
+ = CGM.getContext().getTypeSize(LastFieldBitfieldOrUnnamed->getType());
+ SkipIvars.push_back(GC_IVAR(BytePos + LastBitfieldOrUnnamedOffset,
+ FieldSize / ByteSizeInBits));
+ }
+ }
+
+ if (MaxField)
+ IvarsInfo.push_back(GC_IVAR(BytePos + MaxFieldOffset,
+ MaxUnionIvarSize));
+ if (MaxSkippedField)
+ SkipIvars.push_back(GC_IVAR(BytePos + MaxSkippedFieldOffset,
+ MaxSkippedUnionIvarSize));
+}
+
+/// BuildIvarLayoutBitmap - This routine is the horsework for doing all
+/// the computations and returning the layout bitmap (for ivar or blocks) in
+/// the given argument BitMap string container. Routine reads
+/// two containers, IvarsInfo and SkipIvars which are assumed to be
+/// filled already by the caller.
+llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
+ unsigned int WordsToScan, WordsToSkip;
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
+
+ // Build the string of skip/scan nibbles
+ SmallVector<SKIP_SCAN, 32> SkipScanIvars;
+ unsigned int WordSize =
+ CGM.getTypes().getTargetData().getTypeAllocSize(PtrTy);
+ if (IvarsInfo[0].ivar_bytepos == 0) {
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ } else {
+ WordsToSkip = IvarsInfo[0].ivar_bytepos/WordSize;
+ WordsToScan = IvarsInfo[0].ivar_size;
+ }
+ for (unsigned int i=1, Last=IvarsInfo.size(); i != Last; i++) {
+ unsigned int TailPrevGCObjC =
+ IvarsInfo[i-1].ivar_bytepos + IvarsInfo[i-1].ivar_size * WordSize;
+ if (IvarsInfo[i].ivar_bytepos == TailPrevGCObjC) {
+ // consecutive 'scanned' object pointers.
+ WordsToScan += IvarsInfo[i].ivar_size;
+ } else {
+ // Skip over 'gc'able object pointer which lay over each other.
+ if (TailPrevGCObjC > IvarsInfo[i].ivar_bytepos)
+ continue;
+ // Must skip over 1 or more words. We save current skip/scan values
+ // and start a new pair.
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+
+ // Skip the hole.
+ SkScan.skip = (IvarsInfo[i].ivar_bytepos - TailPrevGCObjC) / WordSize;
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ WordsToSkip = 0;
+ WordsToScan = IvarsInfo[i].ivar_size;
+ }
+ }
+ if (WordsToScan > 0) {
+ SKIP_SCAN SkScan;
+ SkScan.skip = WordsToSkip;
+ SkScan.scan = WordsToScan;
+ SkipScanIvars.push_back(SkScan);
+ }
+
+ if (!SkipIvars.empty()) {
+ unsigned int LastIndex = SkipIvars.size()-1;
+ int LastByteSkipped =
+ SkipIvars[LastIndex].ivar_bytepos + SkipIvars[LastIndex].ivar_size;
+ LastIndex = IvarsInfo.size()-1;
+ int LastByteScanned =
+ IvarsInfo[LastIndex].ivar_bytepos +
+ IvarsInfo[LastIndex].ivar_size * WordSize;
+ // Compute number of bytes to skip at the tail end of the last ivar scanned.
+ if (LastByteSkipped > LastByteScanned) {
+ unsigned int TotalWords = (LastByteSkipped + (WordSize -1)) / WordSize;
+ SKIP_SCAN SkScan;
+ SkScan.skip = TotalWords - (LastByteScanned/WordSize);
+ SkScan.scan = 0;
+ SkipScanIvars.push_back(SkScan);
+ }
+ }
+ // Mini optimization of nibbles such that an 0xM0 followed by 0x0N is produced
+ // as 0xMN.
+ int SkipScan = SkipScanIvars.size()-1;
+ for (int i = 0; i <= SkipScan; i++) {
+ if ((i < SkipScan) && SkipScanIvars[i].skip && SkipScanIvars[i].scan == 0
+ && SkipScanIvars[i+1].skip == 0 && SkipScanIvars[i+1].scan) {
+ // 0xM0 followed by 0x0N detected.
+ SkipScanIvars[i].scan = SkipScanIvars[i+1].scan;
+ for (int j = i+1; j < SkipScan; j++)
+ SkipScanIvars[j] = SkipScanIvars[j+1];
+ --SkipScan;
+ }
+ }
+
+ // Generate the string.
+ for (int i = 0; i <= SkipScan; i++) {
+ unsigned char byte;
+ unsigned int skip_small = SkipScanIvars[i].skip % 0xf;
+ unsigned int scan_small = SkipScanIvars[i].scan % 0xf;
+ unsigned int skip_big = SkipScanIvars[i].skip / 0xf;
+ unsigned int scan_big = SkipScanIvars[i].scan / 0xf;
+
+ // first skip big.
+ for (unsigned int ix = 0; ix < skip_big; ix++)
+ BitMap += (unsigned char)(0xf0);
+
+ // next (skip small, scan)
+ if (skip_small) {
+ byte = skip_small << 4;
+ if (scan_big > 0) {
+ byte |= 0xf;
+ --scan_big;
+ } else if (scan_small) {
+ byte |= scan_small;
+ scan_small = 0;
+ }
+ BitMap += byte;
+ }
+ // next scan big
+ for (unsigned int ix = 0; ix < scan_big; ix++)
+ BitMap += (unsigned char)(0x0f);
+ // last scan small
+ if (scan_small) {
+ byte = scan_small;
+ BitMap += byte;
+ }
+ }
+ // null terminate string.
+ unsigned char zero = 0;
+ BitMap += zero;
+
+ llvm::GlobalVariable * Entry =
+ CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap,false),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_classname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+/// BuildIvarLayout - Builds ivar layout bitmap for the class
+/// implementation for the __strong or __weak case.
+/// The layout map displays which words in ivar list must be skipped
+/// and which must be scanned by GC (see below). String is built of bytes.
+/// Each byte is divided up in two nibbles (4-bit each). Left nibble is count
+/// of words to skip and right nibble is count of words to scan. So, each
+/// nibble represents up to 15 workds to skip or scan. Skipping the rest is
+/// represented by a 0x00 byte which also ends the string.
+/// 1. when ForStrongLayout is true, following ivars are scanned:
+/// - id, Class
+/// - object *
+/// - __strong anything
+///
+/// 2. When ForStrongLayout is false, following ivars are scanned:
+/// - __weak anything
+///
+llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
+ const ObjCImplementationDecl *OMD,
+ bool ForStrongLayout) {
+ bool hasUnion = false;
+
+ llvm::Type *PtrTy = CGM.Int8PtrTy;
+ if (CGM.getLangOpts().getGC() == LangOptions::NonGC &&
+ !CGM.getLangOpts().ObjCAutoRefCount)
+ return llvm::Constant::getNullValue(PtrTy);
+
+ const ObjCInterfaceDecl *OI = OMD->getClassInterface();
+ SmallVector<const FieldDecl*, 32> RecFields;
+ if (CGM.getLangOpts().ObjCAutoRefCount) {
+ for (const ObjCIvarDecl *IVD = OI->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ RecFields.push_back(cast<FieldDecl>(IVD));
+ }
+ else {
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ CGM.getContext().DeepCollectObjCIvars(OI, true, Ivars);
+
+ // FIXME: This is not ideal; we shouldn't have to do this copy.
+ RecFields.append(Ivars.begin(), Ivars.end());
+ }
+
+ if (RecFields.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+
+ SkipIvars.clear();
+ IvarsInfo.clear();
+
+ BuildAggrIvarLayout(OMD, 0, 0, RecFields, 0, ForStrongLayout, hasUnion);
+ if (IvarsInfo.empty())
+ return llvm::Constant::getNullValue(PtrTy);
+ // Sort on byte position in case we encounterred a union nested in
+ // the ivar list.
+ if (hasUnion && !IvarsInfo.empty())
+ std::sort(IvarsInfo.begin(), IvarsInfo.end());
+ if (hasUnion && !SkipIvars.empty())
+ std::sort(SkipIvars.begin(), SkipIvars.end());
+
+ std::string BitMap;
+ llvm::Constant *C = BuildIvarLayoutBitmap(BitMap);
+
+ if (CGM.getLangOpts().ObjCGCBitmapPrint) {
+ printf("\n%s ivar layout for class '%s': ",
+ ForStrongLayout ? "strong" : "weak",
+ OMD->getClassInterface()->getName().data());
+ const unsigned char *s = (unsigned char*)BitMap.c_str();
+ for (unsigned i = 0, e = BitMap.size(); i < e; i++)
+ if (!(s[i] & 0xf0))
+ printf("0x0%x%s", s[i], s[i] != 0 ? ", " : "");
+ else
+ printf("0x%x%s", s[i], s[i] != 0 ? ", " : "");
+ printf("\n");
+ }
+ return C;
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
+ llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
+
+ // FIXME: Avoid std::string in "Sel.getAsString()"
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methname,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetMethodVarName(IdentifierInfo *ID) {
+ return GetMethodVarName(CGM.getContext().Selectors.getNullarySelector(ID));
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methtype,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
+ bool Extended) {
+ std::string TypeStr;
+ if (CGM.getContext().getObjCEncodingForMethodDecl(D, TypeStr, Extended))
+ return 0;
+
+ llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ?
+ "__TEXT,__objc_methtype,cstring_literals" :
+ "__TEXT,__cstring,cstring_literals"),
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
+ llvm::GlobalVariable *&Entry = PropertyNames[Ident];
+
+ if (!Entry)
+ Entry = CreateMetadataVar("\01L_OBJC_PROP_NAME_ATTR_",
+ llvm::ConstantDataArray::getString(VMContext,
+ Ident->getNameStart()),
+ "__TEXT,__cstring,cstring_literals",
+ 1, true);
+
+ return getConstantGEP(VMContext, Entry, 0, 0);
+}
+
+// FIXME: Merge into a single cstring creation function.
+// FIXME: This Decl should be more precise.
+llvm::Constant *
+CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
+ const Decl *Container) {
+ std::string TypeStr;
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
+}
+
+void CGObjCCommonMac::GetNameForMethod(const ObjCMethodDecl *D,
+ const ObjCContainerDecl *CD,
+ SmallVectorImpl<char> &Name) {
+ llvm::raw_svector_ostream OS(Name);
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << '\01' << (D->isInstanceMethod() ? '-' : '+')
+ << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(D->getDeclContext()))
+ OS << '(' << *CID << ')';
+ OS << ' ' << D->getSelector().getAsString() << ']';
+}
+
+void CGObjCMac::FinishModule() {
+ EmitModuleInfo();
+
+ // Emit the dummy bodies for any protocols which were referenced but
+ // never defined.
+ for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
+ I = Protocols.begin(), e = Protocols.end(); I != e; ++I) {
+ if (I->second->hasInitializer())
+ continue;
+
+ llvm::Constant *Values[5];
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ Values[1] = GetClassName(I->first);
+ Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values[3] = Values[4] =
+ llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
+ I->second->setLinkage(llvm::GlobalValue::InternalLinkage);
+ I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
+ Values));
+ CGM.AddUsedGlobal(I->second);
+ }
+
+ // Add assembler directives to add lazy undefined symbol references
+ // for classes which are referenced but not defined. This is
+ // important for correct linker interaction.
+ //
+ // FIXME: It would be nice if we had an LLVM construct for this.
+ if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
+ SmallString<256> Asm;
+ Asm += CGM.getModule().getModuleInlineAsm();
+ if (!Asm.empty() && Asm.back() != '\n')
+ Asm += '\n';
+
+ llvm::raw_svector_ostream OS(Asm);
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
+ e = DefinedSymbols.end(); I != e; ++I)
+ OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
+ for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
+ e = LazySymbols.end(); I != e; ++I) {
+ OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
+ }
+
+ for (size_t i = 0, e = DefinedCategoryNames.size(); i < e; ++i) {
+ OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
+ << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
+ }
+
+ CGM.getModule().setModuleInlineAsm(OS.str());
+ }
+}
+
+CGObjCNonFragileABIMac::CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm)
+ : CGObjCCommonMac(cgm),
+ ObjCTypes(cgm) {
+ ObjCEmptyCacheVar = ObjCEmptyVtableVar = NULL;
+ ObjCABI = 2;
+}
+
+/* *** */
+
+ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
+ : VMContext(cgm.getLLVMContext()), CGM(cgm), ExternalProtocolPtrTy(0)
+{
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ ShortTy = Types.ConvertType(Ctx.ShortTy);
+ IntTy = Types.ConvertType(Ctx.IntTy);
+ LongTy = Types.ConvertType(Ctx.LongTy);
+ LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ Int8PtrTy = CGM.Int8PtrTy;
+ Int8PtrPtrTy = CGM.Int8PtrPtrTy;
+
+ ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
+ PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+
+ // I'm not sure I like this. The implicit coordination is a bit
+ // gross. We should solve this in a reasonable fashion because this
+ // is a pretty common task (match some runtime data structure with
+ // an LLVM data structure).
+
+ // FIXME: This is leaked.
+ // FIXME: Merge with rewriter code?
+
+ // struct _objc_super {
+ // id self;
+ // Class cls;
+ // }
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Ctx.Idents.get("_objc_super"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
+ Ctx.getObjCIdType(), 0, 0, false, false));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
+ Ctx.getObjCClassType(), 0, 0, false, false));
+ RD->completeDefinition();
+
+ SuperCTy = Ctx.getTagDeclType(RD);
+ SuperPtrCTy = Ctx.getPointerType(SuperCTy);
+
+ SuperTy = cast<llvm::StructType>(Types.ConvertType(SuperCTy));
+ SuperPtrTy = llvm::PointerType::getUnqual(SuperTy);
+
+ // struct _prop_t {
+ // char *name;
+ // char *attributes;
+ // }
+ PropertyTy = llvm::StructType::create("struct._prop_t",
+ Int8PtrTy, Int8PtrTy, NULL);
+
+ // struct _prop_list_t {
+ // uint32_t entsize; // sizeof(struct _prop_t)
+ // uint32_t count_of_properties;
+ // struct _prop_t prop_list[count_of_properties];
+ // }
+ PropertyListTy =
+ llvm::StructType::create("struct._prop_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(PropertyTy, 0), NULL);
+ // struct _prop_list_t *
+ PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
+
+ // struct _objc_method {
+ // SEL _cmd;
+ // char *method_type;
+ // char *_imp;
+ // }
+ MethodTy = llvm::StructType::create("struct._objc_method",
+ SelectorPtrTy, Int8PtrTy, Int8PtrTy,
+ NULL);
+
+ // struct _objc_cache *
+ CacheTy = llvm::StructType::create(VMContext, "struct._objc_cache");
+ CachePtrTy = llvm::PointerType::getUnqual(CacheTy);
+
+}
+
+ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _objc_method_description {
+ // SEL name;
+ // char *types;
+ // }
+ MethodDescriptionTy =
+ llvm::StructType::create("struct._objc_method_description",
+ SelectorPtrTy, Int8PtrTy, NULL);
+
+ // struct _objc_method_description_list {
+ // int count;
+ // struct _objc_method_description[1];
+ // }
+ MethodDescriptionListTy =
+ llvm::StructType::create("struct._objc_method_description_list",
+ IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0),NULL);
+
+ // struct _objc_method_description_list *
+ MethodDescriptionListPtrTy =
+ llvm::PointerType::getUnqual(MethodDescriptionListTy);
+
+ // Protocol description structures
+
+ // struct _objc_protocol_extension {
+ // uint32_t size; // sizeof(struct _objc_protocol_extension)
+ // struct _objc_method_description_list *optional_instance_methods;
+ // struct _objc_method_description_list *optional_class_methods;
+ // struct _objc_property_list *instance_properties;
+ // const char ** extendedMethodTypes;
+ // }
+ ProtocolExtensionTy =
+ llvm::StructType::create("struct._objc_protocol_extension",
+ IntTy, MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy, PropertyListPtrTy,
+ Int8PtrPtrTy, NULL);
+
+ // struct _objc_protocol_extension *
+ ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
+
+ // Handle recursive construction of Protocol and ProtocolList types
+
+ ProtocolTy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol");
+
+ ProtocolListTy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol_list");
+ ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy),
+ LongTy,
+ llvm::ArrayType::get(ProtocolTy, 0),
+ NULL);
+
+ // struct _objc_protocol {
+ // struct _objc_protocol_extension *isa;
+ // char *protocol_name;
+ // struct _objc_protocol **_objc_protocol_list;
+ // struct _objc_method_description_list *instance_methods;
+ // struct _objc_method_description_list *class_methods;
+ // }
+ ProtocolTy->setBody(ProtocolExtensionPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListTy),
+ MethodDescriptionListPtrTy,
+ MethodDescriptionListPtrTy,
+ NULL);
+
+ // struct _objc_protocol_list *
+ ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
+
+ ProtocolPtrTy = llvm::PointerType::getUnqual(ProtocolTy);
+
+ // Class description structures
+
+ // struct _objc_ivar {
+ // char *ivar_name;
+ // char *ivar_type;
+ // int ivar_offset;
+ // }
+ IvarTy = llvm::StructType::create("struct._objc_ivar",
+ Int8PtrTy, Int8PtrTy, IntTy, NULL);
+
+ // struct _objc_ivar_list *
+ IvarListTy =
+ llvm::StructType::create(VMContext, "struct._objc_ivar_list");
+ IvarListPtrTy = llvm::PointerType::getUnqual(IvarListTy);
+
+ // struct _objc_method_list *
+ MethodListTy =
+ llvm::StructType::create(VMContext, "struct._objc_method_list");
+ MethodListPtrTy = llvm::PointerType::getUnqual(MethodListTy);
+
+ // struct _objc_class_extension *
+ ClassExtensionTy =
+ llvm::StructType::create("struct._objc_class_extension",
+ IntTy, Int8PtrTy, PropertyListPtrTy, NULL);
+ ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
+
+ ClassTy = llvm::StructType::create(VMContext, "struct._objc_class");
+
+ // struct _objc_class {
+ // Class isa;
+ // Class super_class;
+ // char *name;
+ // long version;
+ // long info;
+ // long instance_size;
+ // struct _objc_ivar_list *ivars;
+ // struct _objc_method_list *methods;
+ // struct _objc_cache *cache;
+ // struct _objc_protocol_list *protocols;
+ // char *ivar_layout;
+ // struct _objc_class_ext *ext;
+ // };
+ ClassTy->setBody(llvm::PointerType::getUnqual(ClassTy),
+ llvm::PointerType::getUnqual(ClassTy),
+ Int8PtrTy,
+ LongTy,
+ LongTy,
+ LongTy,
+ IvarListPtrTy,
+ MethodListPtrTy,
+ CachePtrTy,
+ ProtocolListPtrTy,
+ Int8PtrTy,
+ ClassExtensionPtrTy,
+ NULL);
+
+ ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
+
+ // struct _objc_category {
+ // char *category_name;
+ // char *class_name;
+ // struct _objc_method_list *instance_method;
+ // struct _objc_method_list *class_method;
+ // uint32_t size; // sizeof(struct _objc_category)
+ // struct _objc_property_list *instance_properties;// category's @property
+ // }
+ CategoryTy =
+ llvm::StructType::create("struct._objc_category",
+ Int8PtrTy, Int8PtrTy, MethodListPtrTy,
+ MethodListPtrTy, ProtocolListPtrTy,
+ IntTy, PropertyListPtrTy, NULL);
+
+ // Global metadata structures
+
+ // struct _objc_symtab {
+ // long sel_ref_cnt;
+ // SEL *refs;
+ // short cls_def_cnt;
+ // short cat_def_cnt;
+ // char *defs[cls_def_cnt + cat_def_cnt];
+ // }
+ SymtabTy =
+ llvm::StructType::create("struct._objc_symtab",
+ LongTy, SelectorPtrTy, ShortTy, ShortTy,
+ llvm::ArrayType::get(Int8PtrTy, 0), NULL);
+ SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
+
+ // struct _objc_module {
+ // long version;
+ // long size; // sizeof(struct _objc_module)
+ // char *name;
+ // struct _objc_symtab* symtab;
+ // }
+ ModuleTy =
+ llvm::StructType::create("struct._objc_module",
+ LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL);
+
+
+ // FIXME: This is the size of the setjmp buffer and should be target
+ // specific. 18 is what's used on 32-bit X86.
+ uint64_t SetJmpBufferSize = 18;
+
+ // Exceptions
+ llvm::Type *StackPtrTy = llvm::ArrayType::get(CGM.Int8PtrTy, 4);
+
+ ExceptionDataTy =
+ llvm::StructType::create("struct._objc_exception_data",
+ llvm::ArrayType::get(CGM.Int32Ty,SetJmpBufferSize),
+ StackPtrTy, NULL);
+
+}
+
+ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm)
+ : ObjCCommonTypesHelper(cgm) {
+ // struct _method_list_t {
+ // uint32_t entsize; // sizeof(struct _objc_method)
+ // uint32_t method_count;
+ // struct _objc_method method_list[method_count];
+ // }
+ MethodListnfABITy =
+ llvm::StructType::create("struct.__method_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(MethodTy, 0), NULL);
+ // struct method_list_t *
+ MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
+
+ // struct _protocol_t {
+ // id isa; // NULL
+ // const char * const protocol_name;
+ // const struct _protocol_list_t * protocol_list; // super protocols
+ // const struct method_list_t * const instance_methods;
+ // const struct method_list_t * const class_methods;
+ // const struct method_list_t *optionalInstanceMethods;
+ // const struct method_list_t *optionalClassMethods;
+ // const struct _prop_list_t * properties;
+ // const uint32_t size; // sizeof(struct _protocol_t)
+ // const uint32_t flags; // = 0
+ // const char ** extendedMethodTypes;
+ // }
+
+ // Holder for struct _protocol_list_t *
+ ProtocolListnfABITy =
+ llvm::StructType::create(VMContext, "struct._objc_protocol_list");
+
+ ProtocolnfABITy =
+ llvm::StructType::create("struct._protocol_t", ObjectPtrTy, Int8PtrTy,
+ llvm::PointerType::getUnqual(ProtocolListnfABITy),
+ MethodListnfABIPtrTy, MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy, MethodListnfABIPtrTy,
+ PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy,
+ NULL);
+
+ // struct _protocol_t*
+ ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
+
+ // struct _protocol_list_t {
+ // long protocol_count; // Note, this is 32/64 bit
+ // struct _protocol_t *[protocol_count];
+ // }
+ ProtocolListnfABITy->setBody(LongTy,
+ llvm::ArrayType::get(ProtocolnfABIPtrTy, 0),
+ NULL);
+
+ // struct _objc_protocol_list*
+ ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
+
+ // struct _ivar_t {
+ // unsigned long int *offset; // pointer to ivar offset location
+ // char *name;
+ // char *type;
+ // uint32_t alignment;
+ // uint32_t size;
+ // }
+ IvarnfABITy =
+ llvm::StructType::create("struct._ivar_t",
+ llvm::PointerType::getUnqual(LongTy),
+ Int8PtrTy, Int8PtrTy, IntTy, IntTy, NULL);
+
+ // struct _ivar_list_t {
+ // uint32 entsize; // sizeof(struct _ivar_t)
+ // uint32 count;
+ // struct _iver_t list[count];
+ // }
+ IvarListnfABITy =
+ llvm::StructType::create("struct._ivar_list_t", IntTy, IntTy,
+ llvm::ArrayType::get(IvarnfABITy, 0), NULL);
+
+ IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
+
+ // struct _class_ro_t {
+ // uint32_t const flags;
+ // uint32_t const instanceStart;
+ // uint32_t const instanceSize;
+ // uint32_t const reserved; // only when building for 64bit targets
+ // const uint8_t * const ivarLayout;
+ // const char *const name;
+ // const struct _method_list_t * const baseMethods;
+ // const struct _objc_protocol_list *const baseProtocols;
+ // const struct _ivar_list_t *const ivars;
+ // const uint8_t * const weakIvarLayout;
+ // const struct _prop_list_t * const properties;
+ // }
+
+ // FIXME. Add 'reserved' field in 64bit abi mode!
+ ClassRonfABITy = llvm::StructType::create("struct._class_ro_t",
+ IntTy, IntTy, IntTy, Int8PtrTy,
+ Int8PtrTy, MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ IvarListnfABIPtrTy,
+ Int8PtrTy, PropertyListPtrTy, NULL);
+
+ // ImpnfABITy - LLVM for id (*)(id, SEL, ...)
+ llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
+ ImpnfABITy = llvm::FunctionType::get(ObjectPtrTy, params, false)
+ ->getPointerTo();
+
+ // struct _class_t {
+ // struct _class_t *isa;
+ // struct _class_t * const superclass;
+ // void *cache;
+ // IMP *vtable;
+ // struct class_ro_t *ro;
+ // }
+
+ ClassnfABITy = llvm::StructType::create(VMContext, "struct._class_t");
+ ClassnfABITy->setBody(llvm::PointerType::getUnqual(ClassnfABITy),
+ llvm::PointerType::getUnqual(ClassnfABITy),
+ CachePtrTy,
+ llvm::PointerType::getUnqual(ImpnfABITy),
+ llvm::PointerType::getUnqual(ClassRonfABITy),
+ NULL);
+
+ // LLVM for struct _class_t *
+ ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
+
+ // struct _category_t {
+ // const char * const name;
+ // struct _class_t *const cls;
+ // const struct _method_list_t * const instance_methods;
+ // const struct _method_list_t * const class_methods;
+ // const struct _protocol_list_t * const protocols;
+ // const struct _prop_list_t * const properties;
+ // }
+ CategorynfABITy = llvm::StructType::create("struct._category_t",
+ Int8PtrTy, ClassnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ MethodListnfABIPtrTy,
+ ProtocolListnfABIPtrTy,
+ PropertyListPtrTy,
+ NULL);
+
+ // New types for nonfragile abi messaging.
+ CodeGen::CodeGenTypes &Types = CGM.getTypes();
+ ASTContext &Ctx = CGM.getContext();
+
+ // MessageRefTy - LLVM for:
+ // struct _message_ref_t {
+ // IMP messenger;
+ // SEL name;
+ // };
+
+ // First the clang type for struct _message_ref_t
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
+ Ctx.getTranslationUnitDecl(),
+ SourceLocation(), SourceLocation(),
+ &Ctx.Idents.get("_message_ref_t"));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
+ Ctx.VoidPtrTy, 0, 0, false, false));
+ RD->addDecl(FieldDecl::Create(Ctx, RD, SourceLocation(), SourceLocation(), 0,
+ Ctx.getObjCSelType(), 0, 0, false, false));
+ RD->completeDefinition();
+
+ MessageRefCTy = Ctx.getTagDeclType(RD);
+ MessageRefCPtrTy = Ctx.getPointerType(MessageRefCTy);
+ MessageRefTy = cast<llvm::StructType>(Types.ConvertType(MessageRefCTy));
+
+ // MessageRefPtrTy - LLVM for struct _message_ref_t*
+ MessageRefPtrTy = llvm::PointerType::getUnqual(MessageRefTy);
+
+ // SuperMessageRefTy - LLVM for:
+ // struct _super_message_ref_t {
+ // SUPER_IMP messenger;
+ // SEL name;
+ // };
+ SuperMessageRefTy =
+ llvm::StructType::create("struct._super_message_ref_t",
+ ImpnfABITy, SelectorPtrTy, NULL);
+
+ // SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
+ SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
+
+
+ // struct objc_typeinfo {
+ // const void** vtable; // objc_ehtype_vtable + 2
+ // const char* name; // c++ typeinfo string
+ // Class cls;
+ // };
+ EHTypeTy =
+ llvm::StructType::create("struct._objc_typeinfo",
+ llvm::PointerType::getUnqual(Int8PtrTy),
+ Int8PtrTy, ClassnfABIPtrTy, NULL);
+ EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
+}
+
+llvm::Function *CGObjCNonFragileABIMac::ModuleInitFunction() {
+ FinishNonFragileABIModule();
+
+ return NULL;
+}
+
+void CGObjCNonFragileABIMac::
+AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
+ const char *SymbolName,
+ const char *SectionName) {
+ unsigned NumClasses = Container.size();
+
+ if (!NumClasses)
+ return;
+
+ SmallVector<llvm::Constant*, 8> Symbols(NumClasses);
+ for (unsigned i=0; i<NumClasses; i++)
+ Symbols[i] = llvm::ConstantExpr::getBitCast(Container[i],
+ ObjCTypes.Int8PtrTy);
+ llvm::Constant *Init =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
+ Symbols.size()),
+ Symbols);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ SymbolName);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection(SectionName);
+ CGM.AddUsedGlobal(GV);
+}
+
+void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
+ // nonfragile abi has no module definition.
+
+ // Build list of all implemented class addresses in array
+ // L_OBJC_LABEL_CLASS_$.
+ AddModuleClassList(DefinedClasses,
+ "\01L_OBJC_LABEL_CLASS_$",
+ "__DATA, __objc_classlist, regular, no_dead_strip");
+
+ for (unsigned i = 0, e = DefinedClasses.size(); i < e; i++) {
+ llvm::GlobalValue *IMPLGV = DefinedClasses[i];
+ if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+ continue;
+ IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+
+ for (unsigned i = 0, e = DefinedMetaClasses.size(); i < e; i++) {
+ llvm::GlobalValue *IMPLGV = DefinedMetaClasses[i];
+ if (IMPLGV->getLinkage() != llvm::GlobalValue::ExternalWeakLinkage)
+ continue;
+ IMPLGV->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ }
+
+ AddModuleClassList(DefinedNonLazyClasses,
+ "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+ "__DATA, __objc_nlclslist, regular, no_dead_strip");
+
+ // Build list of all implemented category addresses in array
+ // L_OBJC_LABEL_CATEGORY_$.
+ AddModuleClassList(DefinedCategories,
+ "\01L_OBJC_LABEL_CATEGORY_$",
+ "__DATA, __objc_catlist, regular, no_dead_strip");
+ AddModuleClassList(DefinedNonLazyCategories,
+ "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+ "__DATA, __objc_nlcatlist, regular, no_dead_strip");
+
+ EmitImageInfo();
+}
+
+/// isVTableDispatchedSelector - Returns true if SEL is not in the list of
+/// VTableDispatchMethods; false otherwise. What this means is that
+/// except for the 19 selectors in the list, we generate 32bit-style
+/// message dispatch call for all the rest.
+bool CGObjCNonFragileABIMac::isVTableDispatchedSelector(Selector Sel) {
+ // At various points we've experimented with using vtable-based
+ // dispatch for all methods.
+ switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ return false;
+ case CodeGenOptions::NonLegacy:
+ return true;
+ case CodeGenOptions::Mixed:
+ break;
+ }
+
+ // If so, see whether this selector is in the white-list of things which must
+ // use the new dispatch convention. We lazily build a dense set for this.
+ if (VTableDispatchMethods.empty()) {
+ VTableDispatchMethods.insert(GetNullarySelector("alloc"));
+ VTableDispatchMethods.insert(GetNullarySelector("class"));
+ VTableDispatchMethods.insert(GetNullarySelector("self"));
+ VTableDispatchMethods.insert(GetNullarySelector("isFlipped"));
+ VTableDispatchMethods.insert(GetNullarySelector("length"));
+ VTableDispatchMethods.insert(GetNullarySelector("count"));
+
+ // These are vtable-based if GC is disabled.
+ // Optimistically use vtable dispatch for hybrid compiles.
+ if (CGM.getLangOpts().getGC() != LangOptions::GCOnly) {
+ VTableDispatchMethods.insert(GetNullarySelector("retain"));
+ VTableDispatchMethods.insert(GetNullarySelector("release"));
+ VTableDispatchMethods.insert(GetNullarySelector("autorelease"));
+ }
+
+ VTableDispatchMethods.insert(GetUnarySelector("allocWithZone"));
+ VTableDispatchMethods.insert(GetUnarySelector("isKindOfClass"));
+ VTableDispatchMethods.insert(GetUnarySelector("respondsToSelector"));
+ VTableDispatchMethods.insert(GetUnarySelector("objectForKey"));
+ VTableDispatchMethods.insert(GetUnarySelector("objectAtIndex"));
+ VTableDispatchMethods.insert(GetUnarySelector("isEqualToString"));
+ VTableDispatchMethods.insert(GetUnarySelector("isEqual"));
+
+ // These are vtable-based if GC is enabled.
+ // Optimistically use vtable dispatch for hybrid compiles.
+ if (CGM.getLangOpts().getGC() != LangOptions::NonGC) {
+ VTableDispatchMethods.insert(GetNullarySelector("hash"));
+ VTableDispatchMethods.insert(GetUnarySelector("addObject"));
+
+ // "countByEnumeratingWithState:objects:count"
+ IdentifierInfo *KeyIdents[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ VTableDispatchMethods.insert(
+ CGM.getContext().Selectors.getSelector(3, KeyIdents));
+ }
+ }
+
+ return VTableDispatchMethods.count(Sel);
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ CLS_HAS_IVAR_RELEASER = 0x40,
+ /// class was compiled with -fobjc-arr
+ CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
+};
+/// BuildClassRoTInitializer - generate meta-data for:
+/// struct _class_ro_t {
+/// uint32_t const flags;
+/// uint32_t const instanceStart;
+/// uint32_t const instanceSize;
+/// uint32_t const reserved; // only when building for 64bit targets
+/// const uint8_t * const ivarLayout;
+/// const char *const name;
+/// const struct _method_list_t * const baseMethods;
+/// const struct _protocol_list_t *const baseProtocols;
+/// const struct _ivar_list_t *const ivars;
+/// const uint8_t * const weakIvarLayout;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
+ unsigned flags,
+ unsigned InstanceStart,
+ unsigned InstanceSize,
+ const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ llvm::Constant *Values[10]; // 11 for 64bit targets!
+
+ if (CGM.getLangOpts().ObjCAutoRefCount)
+ flags |= CLS_COMPILED_BY_ARC;
+
+ Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
+ Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
+ Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
+ // FIXME. For 64bit targets add 0 here.
+ Values[ 3] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, true);
+ Values[ 4] = GetClassName(ID->getIdentifier());
+ // const struct _method_list_t * const baseMethods;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName("\01l_OBJC_$_");
+ if (flags & CLS_META) {
+ MethodListName += "CLASS_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::classmeth_iterator
+ i = ID->classmeth_begin(), e = ID->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ } else {
+ MethodListName += "INSTANCE_METHODS_" + ID->getNameAsString();
+ for (ObjCImplementationDecl::instmeth_iterator
+ i = ID->instmeth_begin(), e = ID->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = ID->propimpl_begin(), e = ID->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
+ if (llvm::Constant *C = GetMethodConstant(MD))
+ Methods.push_back(C);
+ }
+ }
+ }
+ Values[ 5] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const", Methods);
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
+ Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ + OID->getName(),
+ OID->all_referenced_protocol_begin(),
+ OID->all_referenced_protocol_end());
+
+ if (flags & CLS_META)
+ Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ else
+ Values[ 7] = EmitIvarList(ID);
+ Values[ 8] = (flags & CLS_META) ? GetIvarLayoutName(0, ObjCTypes)
+ : BuildIvarLayout(ID, false);
+ if (flags & CLS_META)
+ Values[ 9] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ else
+ Values[ 9] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
+ Values);
+ llvm::GlobalVariable *CLASS_RO_GV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ (flags & CLS_META) ?
+ std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
+ std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
+ CLASS_RO_GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
+ return CLASS_RO_GV;
+
+}
+
+/// BuildClassMetaData - This routine defines that to-level meta-data
+/// for the given ClassName for:
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t * const superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct class_ro_t *ro;
+/// }
+///
+llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
+ std::string &ClassName,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility) {
+ llvm::Constant *Values[] = {
+ IsAGV,
+ SuperClassGV,
+ ObjCEmptyCacheVar, // &ObjCEmptyCacheVar
+ ObjCEmptyVtableVar, // &ObjCEmptyVtableVar
+ ClassRoGV // &CLASS_RO_GV
+ };
+ if (!Values[1])
+ Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
+ Values);
+ llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
+ GV->setInitializer(Init);
+ GV->setSection("__DATA, __objc_data");
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ if (HiddenVisibility)
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ return GV;
+}
+
+bool
+CGObjCNonFragileABIMac::ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ return OD->getClassMethod(GetNullarySelector("load")) != 0;
+}
+
+void CGObjCNonFragileABIMac::GetClassSizeInfo(const ObjCImplementationDecl *OID,
+ uint32_t &InstanceStart,
+ uint32_t &InstanceSize) {
+ const ASTRecordLayout &RL =
+ CGM.getContext().getASTObjCImplementationLayout(OID);
+
+ // InstanceSize is really instance end.
+ InstanceSize = RL.getDataSize().getQuantity();
+
+ // If there are no fields, the start is the same as the end.
+ if (!RL.getFieldCount())
+ InstanceStart = InstanceSize;
+ else
+ InstanceStart = RL.getFieldOffset(0) / CGM.getContext().getCharWidth();
+}
+
+void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
+ std::string ClassName = ID->getNameAsString();
+ if (!ObjCEmptyCacheVar) {
+ ObjCEmptyCacheVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.CacheTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_cache");
+
+ ObjCEmptyVtableVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.ImpnfABITy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_vtable");
+ }
+ assert(ID->getClassInterface() &&
+ "CGObjCNonFragileABIMac::GenerateClass - class is 0");
+ // FIXME: Is this correct (that meta class size is never computed)?
+ uint32_t InstanceStart =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ClassnfABITy);
+ uint32_t InstanceSize = InstanceStart;
+ uint32_t flags = CLS_META;
+ std::string ObjCMetaClassName(getMetaclassSymbolPrefix());
+ std::string ObjCClassName(getClassSymbolPrefix());
+
+ llvm::GlobalVariable *SuperClassGV, *IsAGV;
+
+ bool classIsHidden =
+ ID->getClassInterface()->getVisibility() == HiddenVisibility;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (ID->hasCXXStructors())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
+ if (!ID->getClassInterface()->getSuperClass()) {
+ // class is root
+ flags |= CLS_ROOT;
+ SuperClassGV = GetClassGlobal(ObjCClassName + ClassName);
+ IsAGV = GetClassGlobal(ObjCMetaClassName + ClassName);
+ } else {
+ // Has a root. Current class is not a root.
+ const ObjCInterfaceDecl *Root = ID->getClassInterface();
+ while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
+ Root = Super;
+ IsAGV = GetClassGlobal(ObjCMetaClassName + Root->getNameAsString());
+ if (Root->isWeakImported())
+ IsAGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ // work on super class metadata symbol.
+ std::string SuperClassName =
+ ObjCMetaClassName +
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(SuperClassName);
+ if (ID->getClassInterface()->getSuperClass()->isWeakImported())
+ SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+ llvm::GlobalVariable *CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,ID);
+ std::string TClassName = ObjCMetaClassName + ClassName;
+ llvm::GlobalVariable *MetaTClass =
+ BuildClassMetaData(TClassName, IsAGV, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedMetaClasses.push_back(MetaTClass);
+
+ // Metadata for the class
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+ if (ID->hasCXXStructors())
+ flags |= eClassFlags_ABI2_HasCXXStructors;
+
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID->getClassInterface()))
+ flags |= CLS_EXCEPTION;
+
+ if (!ID->getClassInterface()->getSuperClass()) {
+ flags |= CLS_ROOT;
+ SuperClassGV = 0;
+ } else {
+ // Has a root. Current class is not a root.
+ std::string RootClassName =
+ ID->getClassInterface()->getSuperClass()->getNameAsString();
+ SuperClassGV = GetClassGlobal(ObjCClassName + RootClassName);
+ if (ID->getClassInterface()->getSuperClass()->isWeakImported())
+ SuperClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+ GetClassSizeInfo(ID, InstanceStart, InstanceSize);
+ CLASS_RO_GV = BuildClassRoTInitializer(flags,
+ InstanceStart,
+ InstanceSize,
+ ID);
+
+ TClassName = ObjCClassName + ClassName;
+ llvm::GlobalVariable *ClassMD =
+ BuildClassMetaData(TClassName, MetaTClass, SuperClassGV, CLASS_RO_GV,
+ classIsHidden);
+ DefinedClasses.push_back(ClassMD);
+
+ // Determine if this class is also "non-lazy".
+ if (ImplementationIsNonLazy(ID))
+ DefinedNonLazyClasses.push_back(ClassMD);
+
+ // Force the definition of the EHType if necessary.
+ if (flags & CLS_EXCEPTION)
+ GetInterfaceEHType(ID->getClassInterface(), true);
+ // Make sure method definition entries are all clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+/// GenerateProtocolRef - This routine is called to generate code for
+/// a protocol reference expression; as in:
+/// @code
+/// @protocol(Proto1);
+/// @endcode
+/// It generates a weak reference to l_OBJC_PROTOCOL_REFERENCE_$_Proto1
+/// which will hold address of the protocol meta-data.
+///
+llvm::Value *CGObjCNonFragileABIMac::GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *PD) {
+
+ // This routine is called for @protocol only. So, we must build definition
+ // of protocol's meta-data (not a reference to it!)
+ //
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getBitCast(GetOrEmitProtocol(PD),
+ ObjCTypes.getExternalProtocolPtrTy());
+
+ std::string ProtocolName("\01l_OBJC_PROTOCOL_REFERENCE_$_");
+ ProtocolName += PD->getName();
+
+ llvm::GlobalVariable *PTGV = CGM.getModule().getGlobalVariable(ProtocolName);
+ if (PTGV)
+ return Builder.CreateLoad(PTGV);
+ PTGV = new llvm::GlobalVariable(
+ CGM.getModule(),
+ Init->getType(), false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ProtocolName);
+ PTGV->setSection("__DATA, __objc_protorefs, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(PTGV);
+ return Builder.CreateLoad(PTGV);
+}
+
+/// GenerateCategory - Build metadata for a category implementation.
+/// struct _category_t {
+/// const char * const name;
+/// struct _class_t *const cls;
+/// const struct _method_list_t * const instance_methods;
+/// const struct _method_list_t * const class_methods;
+/// const struct _protocol_list_t * const protocols;
+/// const struct _prop_list_t * const properties;
+/// }
+///
+void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
+ const ObjCInterfaceDecl *Interface = OCD->getClassInterface();
+ const char *Prefix = "\01l_OBJC_$_CATEGORY_";
+ std::string ExtCatName(Prefix + Interface->getNameAsString()+
+ "_$_" + OCD->getNameAsString());
+ std::string ExtClassName(getClassSymbolPrefix() +
+ Interface->getNameAsString());
+
+ llvm::Constant *Values[6];
+ Values[0] = GetClassName(OCD->getIdentifier());
+ // meta-class entry symbol
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ExtClassName);
+ if (Interface->isWeakImported())
+ ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ Values[1] = ClassGV;
+ std::vector<llvm::Constant*> Methods;
+ std::string MethodListName(Prefix);
+ MethodListName += "INSTANCE_METHODS_" + Interface->getNameAsString() +
+ "_$_" + OCD->getNameAsString();
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ i = OCD->instmeth_begin(), e = OCD->instmeth_end(); i != e; ++i) {
+ // Instance methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[2] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+
+ MethodListName = Prefix;
+ MethodListName += "CLASS_METHODS_" + Interface->getNameAsString() + "_$_" +
+ OCD->getNameAsString();
+ Methods.clear();
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ i = OCD->classmeth_begin(), e = OCD->classmeth_end(); i != e; ++i) {
+ // Class methods should always be defined.
+ Methods.push_back(GetMethodConstant(*i));
+ }
+
+ Values[3] = EmitMethodList(MethodListName,
+ "__DATA, __objc_const",
+ Methods);
+ const ObjCCategoryDecl *Category =
+ Interface->FindCategoryDeclaration(OCD->getIdentifier());
+ if (Category) {
+ SmallString<256> ExtName;
+ llvm::raw_svector_ostream(ExtName) << Interface->getName() << "_$_"
+ << OCD->getName();
+ Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ + Interface->getName() + "_$_"
+ + Category->getName(),
+ Category->protocol_begin(),
+ Category->protocol_end());
+ Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes);
+ } else {
+ Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+ Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ }
+
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
+ Values);
+ llvm::GlobalVariable *GCATV
+ = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
+ false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ ExtCatName);
+ GCATV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.CategorynfABITy));
+ GCATV->setSection("__DATA, __objc_const");
+ CGM.AddUsedGlobal(GCATV);
+ DefinedCategories.push_back(GCATV);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(OCD))
+ DefinedNonLazyCategories.push_back(GCATV);
+ // method definition entries must be clear for next implementation.
+ MethodDefinitions.clear();
+}
+
+/// GetMethodConstant - Return a struct objc_method constant for the
+/// given method if it has been defined. The result is null if the
+/// method has not been defined. The return value has type MethodPtrTy.
+llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
+ const ObjCMethodDecl *MD) {
+ llvm::Function *Fn = GetMethodDefinition(MD);
+ if (!Fn)
+ return 0;
+
+ llvm::Constant *Method[] = {
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy),
+ GetMethodVarType(MD),
+ llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
+ };
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+}
+
+/// EmitMethodList - Build meta-data for method declarations
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
+ const char *Section,
+ ArrayRef<llvm::Constant*> Methods) {
+ // Return null for empty list.
+ if (Methods.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
+
+ llvm::Constant *Values[3];
+ // sizeof(struct _objc_method)
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.MethodTy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ // method_count
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
+ Methods.size());
+ Values[2] = llvm::ConstantArray::get(AT, Methods);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage, Init, Name);
+ GV->setAlignment(CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection(Section);
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
+}
+
+/// ObjCIvarOffsetVariable - Returns the ivar offset variable for
+/// the given ivar.
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::ObjCIvarOffsetVariable(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+ std::string Name = "OBJC_IVAR_$_" + Container->getNameAsString() +
+ '.' + Ivar->getNameAsString();
+ llvm::GlobalVariable *IvarOffsetGV =
+ CGM.getModule().getGlobalVariable(Name);
+ if (!IvarOffsetGV)
+ IvarOffsetGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.LongTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ Name);
+ return IvarOffsetGV;
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
+ const ObjCIvarDecl *Ivar,
+ unsigned long int Offset) {
+ llvm::GlobalVariable *IvarOffsetGV = ObjCIvarOffsetVariable(ID, Ivar);
+ IvarOffsetGV->setInitializer(llvm::ConstantInt::get(ObjCTypes.LongTy,
+ Offset));
+ IvarOffsetGV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.LongTy));
+
+ // FIXME: This matches gcc, but shouldn't the visibility be set on the use as
+ // well (i.e., in ObjCIvarOffsetVariable).
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ ID->getVisibility() == HiddenVisibility)
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ else
+ IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ IvarOffsetGV->setSection("__DATA, __objc_ivar");
+ return IvarOffsetGV;
+}
+
+/// EmitIvarList - Emit the ivar list for the given
+/// implementation. The return value has type
+/// IvarListnfABIPtrTy.
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// char *name;
+/// char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _iver_t list[count];
+/// }
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
+ const ObjCImplementationDecl *ID) {
+
+ std::vector<llvm::Constant*> Ivars;
+
+ const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
+
+ // FIXME. Consolidate this with similar code in GenerateClass.
+
+ for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ llvm::Constant *Ivar[5];
+ Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD));
+ Ivar[1] = GetMethodVarName(IVD->getIdentifier());
+ Ivar[2] = GetMethodVarType(IVD);
+ llvm::Type *FieldTy =
+ CGM.getTypes().ConvertTypeForMem(IVD->getType());
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(FieldTy);
+ unsigned Align = CGM.getContext().getPreferredTypeAlign(
+ IVD->getType().getTypePtr()) >> 3;
+ Align = llvm::Log2_32(Align);
+ Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ // NOTE. Size of a bitfield does not match gcc's, because of the
+ // way bitfields are treated special in each. But I am told that
+ // 'size' for bitfield ivars is ignored by the runtime so it does
+ // not matter. If it matters, there is enough info to get the
+ // bitfield right!
+ Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ }
+ // Return null for empty list.
+ if (Ivars.empty())
+ return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+
+ llvm::Constant *Values[3];
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(ObjCTypes.IvarnfABITy);
+ Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
+ llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
+ Ivars.size());
+ Values[2] = llvm::ConstantArray::get(AT, Ivars);
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init,
+ Prefix + OID->getName());
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ GV->setSection("__DATA, __objc_const");
+
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
+
+ if (!Entry) {
+ // We use the initializer as a marker of whether this is a forward
+ // reference or not. At module finalization we add the empty
+ // contents for protocols which were referenced but never defined.
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/// GetOrEmitProtocol - Generate the protocol meta-data:
+/// @code
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char * const protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t * const instance_methods;
+/// const struct method_list_t * const class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// const char ** extendedMethodTypes;
+/// }
+/// @endcode
+///
+
+llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
+ const ObjCProtocolDecl *PD) {
+ llvm::GlobalVariable *Entry = Protocols[PD->getIdentifier()];
+
+ // Early exit if a defining object has already been generated.
+ if (Entry && Entry->hasInitializer())
+ return Entry;
+
+ // Use the protocol definition, if there is one.
+ if (const ObjCProtocolDecl *Def = PD->getDefinition())
+ PD = Def;
+
+ // Construct method lists.
+ std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
+ std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
+ std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
+ for (ObjCProtocolDecl::instmeth_iterator
+ i = PD->instmeth_begin(), e = PD->instmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ InstanceMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ i = PD->classmeth_begin(), e = PD->classmeth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ llvm::Constant *C = GetMethodDescriptionConstant(MD);
+ if (!C)
+ return GetOrEmitProtocolRef(PD);
+
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(C);
+ OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
+ } else {
+ ClassMethods.push_back(C);
+ MethodTypesExt.push_back(GetMethodVarType(MD, true));
+ }
+ }
+
+ MethodTypesExt.insert(MethodTypesExt.end(),
+ OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+
+ llvm::Constant *Values[11];
+ // isa is NULL
+ Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
+ Values[1] = GetClassName(PD->getIdentifier());
+ Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(),
+ PD->protocol_end());
+
+ Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ InstanceMethods);
+ Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ ClassMethods);
+ Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ OptInstanceMethods);
+ Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
+ + PD->getName(),
+ "__DATA, __objc_const",
+ OptClassMethods);
+ Values[7] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + PD->getName(),
+ 0, PD, ObjCTypes);
+ uint32_t Size =
+ CGM.getTargetData().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
+ Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
+ Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ + PD->getName(),
+ MethodTypesExt, ObjCTypes);
+ llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
+ Values);
+
+ if (Entry) {
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ Entry->setInitializer(Init);
+ } else {
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Init,
+ "\01l_OBJC_PROTOCOL_$_" + PD->getName());
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+
+ Protocols[PD->getIdentifier()] = Entry;
+ }
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(Entry);
+
+ // Use this protocol meta-data to build protocol list table in section
+ // __DATA, __objc_protolist
+ llvm::GlobalVariable *PTGV =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy,
+ false, llvm::GlobalValue::WeakAnyLinkage, Entry,
+ "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getName());
+ PTGV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ CGM.AddUsedGlobal(PTGV);
+ return Entry;
+}
+
+/// EmitProtocolList - Generate protocol list meta-data:
+/// @code
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t[protocol_count];
+/// }
+/// @endcode
+///
+llvm::Constant *
+CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
+ ObjCProtocolDecl::protocol_iterator begin,
+ ObjCProtocolDecl::protocol_iterator end) {
+ llvm::SmallVector<llvm::Constant*, 16> ProtocolRefs;
+
+ // Just return null for empty protocol lists
+ if (begin == end)
+ return llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
+
+ // FIXME: We shouldn't need to do this lookup here, should we?
+ SmallString<256> TmpName;
+ Name.toVector(TmpName);
+ llvm::GlobalVariable *GV =
+ CGM.getModule().getGlobalVariable(TmpName.str(), true);
+ if (GV)
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
+
+ for (; begin != end; ++begin)
+ ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+
+ // This list is null terminated.
+ ProtocolRefs.push_back(llvm::Constant::getNullValue(
+ ObjCTypes.ProtocolnfABIPtrTy));
+
+ llvm::Constant *Values[2];
+ Values[0] =
+ llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
+ Values[1] =
+ llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
+ ProtocolRefs.size()),
+ ProtocolRefs);
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
+ llvm::GlobalValue::InternalLinkage,
+ Init, Name);
+ GV->setSection("__DATA, __objc_const");
+ GV->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(Init->getType()));
+ CGM.AddUsedGlobal(GV);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.ProtocolListnfABIPtrTy);
+}
+
+/// GetMethodDescriptionConstant - This routine build following meta-data:
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
+ llvm::Constant *Desc[3];
+ Desc[0] =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ Desc[1] = GetMethodVarType(MD);
+ if (!Desc[1])
+ return 0;
+
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
+}
+
+/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
+/// This code gen. amounts to generating code for:
+/// @code
+/// (type *)((char *)base + _OBJC_IVAR_$_.ivar;
+/// @encode
+///
+LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
+ CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
+ llvm::Value *Offset = EmitIvarOffset(CGF, ID, Ivar);
+ if (llvm::LoadInst *LI = dyn_cast<llvm::LoadInst>(Offset))
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext,
+ ArrayRef<llvm::Value*>()));
+ return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
+ Offset);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
+ CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) {
+ return CGF.Builder.CreateLoad(ObjCIvarOffsetVariable(Interface, Ivar),"ivar");
+}
+
+static void appendSelectorForMessageRefTable(std::string &buffer,
+ Selector selector) {
+ if (selector.isUnarySelector()) {
+ buffer += selector.getNameForSlot(0);
+ return;
+ }
+
+ for (unsigned i = 0, e = selector.getNumArgs(); i != e; ++i) {
+ buffer += selector.getNameForSlot(i);
+ buffer += '_';
+ }
+}
+
+/// Emit a "v-table" message send. We emit a weak hidden-visibility
+/// struct, initially containing the selector pointer and a pointer to
+/// a "fixup" variant of the appropriate objc_msgSend. To call, we
+/// load and call the function pointer, passing the address of the
+/// struct as the second parameter. The runtime determines whether
+/// the selector is currently emitted using vtable dispatch; if so, it
+/// substitutes a stub function which simply tail-calls through the
+/// appropriate vtable slot, and if not, it substitues a stub function
+/// which tail-calls objc_msgSend. Both stubs adjust the selector
+/// argument to correctly point to the selector.
+RValue
+CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
+ ReturnValueSlot returnSlot,
+ QualType resultType,
+ Selector selector,
+ llvm::Value *arg0,
+ QualType arg0Type,
+ bool isSuper,
+ const CallArgList &formalArgs,
+ const ObjCMethodDecl *method) {
+ // Compute the actual arguments.
+ CallArgList args;
+
+ // First argument: the receiver / super-call structure.
+ if (!isSuper)
+ arg0 = CGF.Builder.CreateBitCast(arg0, ObjCTypes.ObjectPtrTy);
+ args.add(RValue::get(arg0), arg0Type);
+
+ // Second argument: a pointer to the message ref structure. Leave
+ // the actual argument value blank for now.
+ args.add(RValue::get(0), ObjCTypes.MessageRefCPtrTy);
+
+ args.insert(args.end(), formalArgs.begin(), formalArgs.end());
+
+ MessageSendInfo MSI = getMessageSendInfo(method, resultType, args);
+
+ NullReturnState nullReturn;
+
+ // Find the function to call and the mangled name for the message
+ // ref structure. Using a different mangled name wouldn't actually
+ // be a problem; it would just be a waste.
+ //
+ // The runtime currently never uses vtable dispatch for anything
+ // except normal, non-super message-sends.
+ // FIXME: don't use this for that.
+ llvm::Constant *fn = 0;
+ std::string messageRefName("\01l_");
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo)) {
+ if (isSuper) {
+ fn = ObjCTypes.getMessageSendSuper2StretFixupFn();
+ messageRefName += "objc_msgSendSuper2_stret_fixup";
+ } else {
+ nullReturn.init(CGF, arg0);
+ fn = ObjCTypes.getMessageSendStretFixupFn();
+ messageRefName += "objc_msgSend_stret_fixup";
+ }
+ } else if (!isSuper && CGM.ReturnTypeUsesFPRet(resultType)) {
+ fn = ObjCTypes.getMessageSendFpretFixupFn();
+ messageRefName += "objc_msgSend_fpret_fixup";
+ } else {
+ if (isSuper) {
+ fn = ObjCTypes.getMessageSendSuper2FixupFn();
+ messageRefName += "objc_msgSendSuper2_fixup";
+ } else {
+ fn = ObjCTypes.getMessageSendFixupFn();
+ messageRefName += "objc_msgSend_fixup";
+ }
+ }
+ assert(fn && "CGObjCNonFragileABIMac::EmitMessageSend");
+ messageRefName += '_';
+
+ // Append the selector name, except use underscores anywhere we
+ // would have used colons.
+ appendSelectorForMessageRefTable(messageRefName, selector);
+
+ llvm::GlobalVariable *messageRef
+ = CGM.getModule().getGlobalVariable(messageRefName);
+ if (!messageRef) {
+ // Build the message ref structure.
+ llvm::Constant *values[] = { fn, GetMethodVarName(selector) };
+ llvm::Constant *init = llvm::ConstantStruct::getAnon(values);
+ messageRef = new llvm::GlobalVariable(CGM.getModule(),
+ init->getType(),
+ /*constant*/ false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ init,
+ messageRefName);
+ messageRef->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ messageRef->setAlignment(16);
+ messageRef->setSection("__DATA, __objc_msgrefs, coalesced");
+ }
+
+ bool requiresnullCheck = false;
+ if (CGM.getLangOpts().ObjCAutoRefCount && method)
+ for (ObjCMethodDecl::param_const_iterator i = method->param_begin(),
+ e = method->param_end(); i != e; ++i) {
+ const ParmVarDecl *ParamDecl = (*i);
+ if (ParamDecl->hasAttr<NSConsumedAttr>()) {
+ if (!nullReturn.NullBB)
+ nullReturn.init(CGF, arg0);
+ requiresnullCheck = true;
+ break;
+ }
+ }
+
+ llvm::Value *mref =
+ CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy);
+
+ // Update the message ref argument.
+ args[1].RV = RValue::get(mref);
+
+ // Load the function to call from the message ref table.
+ llvm::Value *callee = CGF.Builder.CreateStructGEP(mref, 0);
+ callee = CGF.Builder.CreateLoad(callee, "msgSend_fn");
+
+ callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
+
+ RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args);
+ return nullReturn.complete(CGF, result, resultType, formalArgs,
+ requiresnullCheck ? method : 0);
+}
+
+/// Generate code for a message send expression in the nonfragile abi.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class,
+ const ObjCMethodDecl *Method) {
+ return isVTableDispatchedSelector(Sel)
+ ? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method)
+ : EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ Receiver, CGF.getContext().getObjCIdType(),
+ false, CallArgs, Method, ObjCTypes);
+}
+
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::GetClassGlobal(const std::string &Name) {
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+
+ if (!GV) {
+ GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ 0, Name);
+ }
+
+ return GV;
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CGBuilderTy &Builder,
+ IdentifierInfo *II) {
+ llvm::GlobalVariable *&Entry = ClassReferences[II];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + II->getName().str());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ return EmitClassRefFromId(Builder, ID->getIdentifier());
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
+ CGBuilderTy &Builder) {
+ IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
+ return EmitClassRefFromId(Builder, II);
+}
+
+llvm::Value *
+CGObjCNonFragileABIMac::EmitSuperClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
+
+ if (!Entry) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::InternalLinkage,
+ ClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ return Builder.CreateLoad(Entry);
+}
+
+/// EmitMetaClassRef - Return a Value * of the address of _class_t
+/// meta-data
+///
+llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
+ if (Entry)
+ return Builder.CreateLoad(Entry);
+
+ std::string MetaClassName(getMetaclassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *MetaClassGV = GetClassGlobal(MetaClassName);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ MetaClassGV,
+ "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry->setAlignment(
+ CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.ClassnfABIPtrTy));
+
+ Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+
+ return Builder.CreateLoad(Entry);
+}
+
+/// GetClass - Return a reference to the class for the given interface
+/// decl.
+llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *ID) {
+ if (ID->isWeakImported()) {
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName);
+ ClassGV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+ }
+
+ return EmitClassRef(Builder, ID);
+}
+
+/// Generates a message send where the super is the receiver. This is
+/// a message send to self with special delivery semantics indicating
+/// which class's method should be called.
+CodeGen::RValue
+CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Receiver,
+ bool IsClassMessage,
+ const CodeGen::CallArgList &CallArgs,
+ const ObjCMethodDecl *Method) {
+ // ...
+ // Create and init a super structure; this is a (receiver, class)
+ // pair we will pass to objc_msgSendSuper.
+ llvm::Value *ObjCSuper =
+ CGF.CreateTempAlloca(ObjCTypes.SuperTy, "objc_super");
+
+ llvm::Value *ReceiverAsObject =
+ CGF.Builder.CreateBitCast(Receiver, ObjCTypes.ObjectPtrTy);
+ CGF.Builder.CreateStore(ReceiverAsObject,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 0));
+
+ // If this is a class message the metaclass is passed as the target.
+ llvm::Value *Target;
+ if (IsClassMessage) {
+ if (isCategoryImpl) {
+ // Message sent to "super' in a class method defined in
+ // a category implementation.
+ Target = EmitClassRef(CGF.Builder, Class);
+ Target = CGF.Builder.CreateStructGEP(Target, 0);
+ Target = CGF.Builder.CreateLoad(Target);
+ } else
+ Target = EmitMetaClassRef(CGF.Builder, Class);
+ } else
+ Target = EmitSuperClassRef(CGF.Builder, Class);
+
+ // FIXME: We shouldn't need to do this cast, rectify the ASTContext and
+ // ObjCTypes types.
+ llvm::Type *ClassTy =
+ CGM.getTypes().ConvertType(CGF.getContext().getObjCClassType());
+ Target = CGF.Builder.CreateBitCast(Target, ClassTy);
+ CGF.Builder.CreateStore(Target,
+ CGF.Builder.CreateStructGEP(ObjCSuper, 1));
+
+ return (isVTableDispatchedSelector(Sel))
+ ? EmitVTableMessageSend(CGF, Return, ResultType, Sel,
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method)
+ : EmitMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
+ ObjCSuper, ObjCTypes.SuperPtrCTy,
+ true, CallArgs, Method, ObjCTypes);
+}
+
+llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CGBuilderTy &Builder,
+ Selector Sel, bool lval) {
+ llvm::GlobalVariable *&Entry = SelectorReferences[Sel];
+
+ if (!Entry) {
+ llvm::Constant *Casted =
+ llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
+ ObjCTypes.SelectorPtrTy);
+ Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
+ llvm::GlobalValue::InternalLinkage,
+ Casted, "\01L_OBJC_SELECTOR_REFERENCES_");
+ Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
+ CGM.AddUsedGlobal(Entry);
+ }
+
+ if (lval)
+ return Entry;
+ llvm::LoadInst* LI = Builder.CreateLoad(Entry);
+
+ LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
+ llvm::MDNode::get(VMContext,
+ ArrayRef<llvm::Value*>()));
+ return LI;
+}
+/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
+/// objc_assign_ivar (id src, id *dst, ptrdiff_t)
+///
+void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src,
+ llvm::Value *dst,
+ llvm::Value *ivarOffset) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.getGcAssignIvarFn(),
+ src, dst, ivarOffset);
+ return;
+}
+
+/// EmitObjCStrongCastAssign - Code gen for assigning to a __strong cast object.
+/// objc_assign_strongCast (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignStrongCastFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) {
+ SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
+ DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ CGF.Builder.CreateCall3(ObjCTypes.GcMemmoveCollectableFn(),
+ DestPtr, SrcPtr, Size);
+ return;
+}
+
+/// EmitObjCWeakRead - Code gen for loading value of a __weak
+/// object: objc_read_weak (id *src)
+///
+llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) {
+ llvm::Type* DestTy =
+ cast<llvm::PointerType>(AddrWeakObj->getType())->getElementType();
+ AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *read_weak = CGF.Builder.CreateCall(ObjCTypes.getGcReadWeakFn(),
+ AddrWeakObj, "weakread");
+ read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
+ return read_weak;
+}
+
+/// EmitObjCWeakAssign - Code gen for assigning to a __weak object.
+/// objc_assign_weak (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignWeakFn(),
+ src, dst, "weakassign");
+ return;
+}
+
+/// EmitObjCGlobalAssign - Code gen for assigning to a __strong object.
+/// objc_assign_global (id src, id *dst)
+///
+void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dst,
+ bool threadlocal) {
+ llvm::Type * SrcTy = src->getType();
+ if (!isa<llvm::PointerType>(SrcTy)) {
+ unsigned Size = CGM.getTargetData().getTypeAllocSize(SrcTy);
+ assert(Size <= 8 && "does not support size > 8");
+ src = (Size == 4 ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
+ : CGF.Builder.CreateBitCast(src, ObjCTypes.LongTy));
+ src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
+ }
+ src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
+ dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
+ if (!threadlocal)
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignGlobalFn(),
+ src, dst, "globalassign");
+ else
+ CGF.Builder.CreateCall2(ObjCTypes.getGcAssignThreadLocalFn(),
+ src, dst, "threadlocalassign");
+ return;
+}
+
+void
+CGObjCNonFragileABIMac::EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) {
+ EmitAtSynchronizedStmt(CGF, S,
+ cast<llvm::Function>(ObjCTypes.getSyncEnterFn()),
+ cast<llvm::Function>(ObjCTypes.getSyncExitFn()));
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetEHType(QualType T) {
+ // There's a particular fixed type info for 'id'.
+ if (T->isObjCIdType() ||
+ T->isObjCQualifiedIdType()) {
+ llvm::Constant *IDEHType =
+ CGM.getModule().getGlobalVariable("OBJC_EHTYPE_id");
+ if (!IDEHType)
+ IDEHType =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, "OBJC_EHTYPE_id");
+ return IDEHType;
+ }
+
+ // All other types should be Objective-C interface pointer types.
+ const ObjCObjectPointerType *PT =
+ T->getAs<ObjCObjectPointerType>();
+ assert(PT && "Invalid @catch type.");
+ const ObjCInterfaceType *IT = PT->getInterfaceType();
+ assert(IT && "Invalid @catch type.");
+ return GetInterfaceEHType(IT->getDecl(), false);
+}
+
+void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) {
+ EmitTryCatchStmt(CGF, S,
+ cast<llvm::Function>(ObjCTypes.getObjCBeginCatchFn()),
+ cast<llvm::Function>(ObjCTypes.getObjCEndCatchFn()),
+ cast<llvm::Function>(ObjCTypes.getExceptionRethrowFn()));
+}
+
+/// EmitThrowStmt - Generate code for a throw statement.
+void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) {
+ if (const Expr *ThrowExpr = S.getThrowExpr()) {
+ llvm::Value *Exception = CGF.EmitObjCThrowOperand(ThrowExpr);
+ Exception = CGF.Builder.CreateBitCast(Exception, ObjCTypes.ObjectPtrTy);
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionThrowFn(), Exception)
+ .setDoesNotReturn();
+ } else {
+ CGF.EmitCallOrInvoke(ObjCTypes.getExceptionRethrowFn())
+ .setDoesNotReturn();
+ }
+
+ CGF.Builder.CreateUnreachable();
+ CGF.Builder.ClearInsertionPoint();
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
+ bool ForDefinition) {
+ llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
+
+ // If we don't need a definition, return the entry if found or check
+ // if we use an external reference.
+ if (!ForDefinition) {
+ if (Entry)
+ return Entry;
+
+ // If this type (or a super class) has the __objc_exception__
+ // attribute, emit an external reference.
+ if (hasObjCExceptionAttribute(CGM.getContext(), ID))
+ return Entry =
+ new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ ("OBJC_EHTYPE_$_" +
+ ID->getIdentifier()->getName()));
+ }
+
+ // Otherwise we need to either make a new entry or fill in the
+ // initializer.
+ assert((!Entry || !Entry->hasInitializer()) && "Duplicate EHType definition");
+ std::string ClassName(getClassSymbolPrefix() + ID->getNameAsString());
+ std::string VTableName = "objc_ehtype_vtable";
+ llvm::GlobalVariable *VTableGV =
+ CGM.getModule().getGlobalVariable(VTableName);
+ if (!VTableGV)
+ VTableGV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.Int8PtrTy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, VTableName);
+
+ llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2);
+
+ llvm::Constant *Values[] = {
+ llvm::ConstantExpr::getGetElementPtr(VTableGV, VTableIdx),
+ GetClassName(ID->getIdentifier()),
+ GetClassGlobal(ClassName)
+ };
+ llvm::Constant *Init =
+ llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
+
+ if (Entry) {
+ Entry->setInitializer(Init);
+ } else {
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false,
+ llvm::GlobalValue::WeakAnyLinkage,
+ Init,
+ ("OBJC_EHTYPE_$_" +
+ ID->getIdentifier()->getName()));
+ }
+
+ if (CGM.getLangOpts().getVisibilityMode() == HiddenVisibility)
+ Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ Entry->setAlignment(CGM.getTargetData().getABITypeAlignment(
+ ObjCTypes.EHTypeTy));
+
+ if (ForDefinition) {
+ Entry->setSection("__DATA,__objc_const");
+ Entry->setLinkage(llvm::GlobalValue::ExternalLinkage);
+ } else {
+ Entry->setSection("__DATA,__datacoal_nt,coalesced");
+ }
+
+ return Entry;
+}
+
+/* *** */
+
+CodeGen::CGObjCRuntime *
+CodeGen::CreateMacObjCRuntime(CodeGen::CodeGenModule &CGM) {
+ if (CGM.getLangOpts().ObjCNonFragileABI)
+ return new CGObjCNonFragileABIMac(CGM);
+ return new CGObjCMac(CGM);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
new file mode 100644
index 0000000..9370096
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -0,0 +1,374 @@
+//==- CGObjCRuntime.cpp - Interface to Shared Objective-C Runtime Features ==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This abstract class defines the interface for Objective-C runtime-specific
+// code generation. It provides some concrete helper methods for functionality
+// shared between all (or most) of the Objective-C runtimes supported by clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGObjCRuntime.h"
+
+#include "CGRecordLayout.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGCleanup.h"
+
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtObjC.h"
+
+#include "llvm/Support/CallSite.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+static uint64_t LookupFieldBitOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCImplementationDecl *ID,
+ const ObjCIvarDecl *Ivar) {
+ const ObjCInterfaceDecl *Container = Ivar->getContainingInterface();
+
+ // FIXME: We should eliminate the need to have ObjCImplementationDecl passed
+ // in here; it should never be necessary because that should be the lexical
+ // decl context for the ivar.
+
+ // If we know have an implementation (and the ivar is in it) then
+ // look up in the implementation layout.
+ const ASTRecordLayout *RL;
+ if (ID && declaresSameEntity(ID->getClassInterface(), Container))
+ RL = &CGM.getContext().getASTObjCImplementationLayout(ID);
+ else
+ RL = &CGM.getContext().getASTObjCInterfaceLayout(Container);
+
+ // Compute field index.
+ //
+ // FIXME: The index here is closely tied to how ASTContext::getObjCLayout is
+ // implemented. This should be fixed to get the information from the layout
+ // directly.
+ unsigned Index = 0;
+
+ for (const ObjCIvarDecl *IVD = Container->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ if (Ivar == IVD)
+ break;
+ ++Index;
+ }
+ assert(Index < RL->getFieldCount() && "Ivar is not inside record layout!");
+
+ return RL->getFieldOffset(Index);
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID, 0, Ivar) /
+ CGM.getContext().getCharWidth();
+}
+
+uint64_t CGObjCRuntime::ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar) {
+ return LookupFieldBitOffset(CGM, OID->getClassInterface(), OID, Ivar) /
+ CGM.getContext().getCharWidth();
+}
+
+LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset) {
+ // Compute (type*) ( (char *) BaseValue + Offset)
+ llvm::Type *I8Ptr = CGF.Int8PtrTy;
+ QualType IvarTy = Ivar->getType();
+ llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
+ llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, I8Ptr);
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
+ V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
+
+ if (!Ivar->isBitField()) {
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
+ LV.getQuals().addCVRQualifiers(CVRQualifiers);
+ return LV;
+ }
+
+ // We need to compute an access strategy for this bit-field. We are given the
+ // offset to the first byte in the bit-field, the sub-byte offset is taken
+ // from the original layout. We reuse the normal bit-field access strategy by
+ // treating this as an access to a struct where the bit-field is in byte 0,
+ // and adjust the containing type size as appropriate.
+ //
+ // FIXME: Note that currently we make a very conservative estimate of the
+ // alignment of the bit-field, because (a) it is not clear what guarantees the
+ // runtime makes us, and (b) we don't have a way to specify that the struct is
+ // at an alignment plus offset.
+ //
+ // Note, there is a subtle invariant here: we can only call this routine on
+ // non-synthesized ivars but we may be called for synthesized ivars. However,
+ // a synthesized ivar can never be a bit-field, so this is safe.
+ const ASTRecordLayout &RL =
+ CGF.CGM.getContext().getASTObjCInterfaceLayout(OID);
+ uint64_t TypeSizeInBits = CGF.CGM.getContext().toBits(RL.getSize());
+ uint64_t FieldBitOffset = LookupFieldBitOffset(CGF.CGM, OID, 0, Ivar);
+ uint64_t BitOffset = FieldBitOffset % CGF.CGM.getContext().getCharWidth();
+ uint64_t ContainingTypeAlign = CGF.CGM.getContext().getTargetInfo().getCharAlign();
+ uint64_t ContainingTypeSize = TypeSizeInBits - (FieldBitOffset - BitOffset);
+ uint64_t BitFieldSize = Ivar->getBitWidthValue(CGF.getContext());
+
+ // Allocate a new CGBitFieldInfo object to describe this access.
+ //
+ // FIXME: This is incredibly wasteful, these should be uniqued or part of some
+ // layout object. However, this is blocked on other cleanups to the
+ // Objective-C code, so for now we just live with allocating a bunch of these
+ // objects.
+ CGBitFieldInfo *Info = new (CGF.CGM.getContext()) CGBitFieldInfo(
+ CGBitFieldInfo::MakeInfo(CGF.CGM.getTypes(), Ivar, BitOffset, BitFieldSize,
+ ContainingTypeSize, ContainingTypeAlign));
+
+ return LValue::MakeBitfield(V, *Info,
+ IvarTy.withCVRQualifiers(CVRQualifiers));
+}
+
+namespace {
+ struct CatchHandler {
+ const VarDecl *Variable;
+ const Stmt *Body;
+ llvm::BasicBlock *Block;
+ llvm::Value *TypeInfo;
+ };
+
+ struct CallObjCEndCatch : EHScopeStack::Cleanup {
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
+ MightThrow(MightThrow), Fn(Fn) {}
+ bool MightThrow;
+ llvm::Value *Fn;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ if (!MightThrow) {
+ CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
+ return;
+ }
+
+ CGF.EmitCallOrInvoke(Fn);
+ }
+ };
+}
+
+
+void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *exceptionRethrowFn) {
+ // Jump destination for falling out of catch bodies.
+ CodeGenFunction::JumpDest Cont;
+ if (S.getNumCatchStmts())
+ Cont = CGF.getJumpDestInCurrentScope("eh.cont");
+
+ CodeGenFunction::FinallyInfo FinallyInfo;
+ if (const ObjCAtFinallyStmt *Finally = S.getFinallyStmt())
+ FinallyInfo.enter(CGF, Finally->getFinallyBody(),
+ beginCatchFn, endCatchFn, exceptionRethrowFn);
+
+ SmallVector<CatchHandler, 8> Handlers;
+
+ // Enter the catch, if there is one.
+ if (S.getNumCatchStmts()) {
+ for (unsigned I = 0, N = S.getNumCatchStmts(); I != N; ++I) {
+ const ObjCAtCatchStmt *CatchStmt = S.getCatchStmt(I);
+ const VarDecl *CatchDecl = CatchStmt->getCatchParamDecl();
+
+ Handlers.push_back(CatchHandler());
+ CatchHandler &Handler = Handlers.back();
+ Handler.Variable = CatchDecl;
+ Handler.Body = CatchStmt->getCatchBody();
+ Handler.Block = CGF.createBasicBlock("catch");
+
+ // @catch(...) always matches.
+ if (!CatchDecl) {
+ Handler.TypeInfo = 0; // catch-all
+ // Don't consider any other catches.
+ break;
+ }
+
+ Handler.TypeInfo = GetEHType(CatchDecl->getType());
+ }
+
+ EHCatchScope *Catch = CGF.EHStack.pushCatch(Handlers.size());
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I)
+ Catch->setHandler(I, Handlers[I].TypeInfo, Handlers[I].Block);
+ }
+
+ // Emit the try body.
+ CGF.EmitStmt(S.getTryBody());
+
+ // Leave the try.
+ if (S.getNumCatchStmts())
+ CGF.popCatchScope();
+
+ // Remember where we were.
+ CGBuilderTy::InsertPoint SavedIP = CGF.Builder.saveAndClearIP();
+
+ // Emit the handlers.
+ for (unsigned I = 0, E = Handlers.size(); I != E; ++I) {
+ CatchHandler &Handler = Handlers[I];
+
+ CGF.EmitBlock(Handler.Block);
+ llvm::Value *RawExn = CGF.getExceptionFromSlot();
+
+ // Enter the catch.
+ llvm::Value *Exn = RawExn;
+ if (beginCatchFn) {
+ Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
+ cast<llvm::CallInst>(Exn)->setDoesNotThrow();
+ }
+
+ CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
+
+ if (endCatchFn) {
+ // Add a cleanup to leave the catch.
+ bool EndCatchMightThrow = (Handler.Variable == 0);
+
+ CGF.EHStack.pushCleanup<CallObjCEndCatch>(NormalAndEHCleanup,
+ EndCatchMightThrow,
+ endCatchFn);
+ }
+
+ // Bind the catch parameter if it exists.
+ if (const VarDecl *CatchParam = Handler.Variable) {
+ llvm::Type *CatchType = CGF.ConvertType(CatchParam->getType());
+ llvm::Value *CastExn = CGF.Builder.CreateBitCast(Exn, CatchType);
+
+ CGF.EmitAutoVarDecl(*CatchParam);
+
+ llvm::Value *CatchParamAddr = CGF.GetAddrOfLocalVar(CatchParam);
+
+ switch (CatchParam->getType().getQualifiers().getObjCLifetime()) {
+ case Qualifiers::OCL_Strong:
+ CastExn = CGF.EmitARCRetainNonBlock(CastExn);
+ // fallthrough
+
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ CGF.Builder.CreateStore(CastExn, CatchParamAddr);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ CGF.EmitARCInitWeak(CatchParamAddr, CastExn);
+ break;
+ }
+ }
+
+ CGF.ObjCEHValueStack.push_back(Exn);
+ CGF.EmitStmt(Handler.Body);
+ CGF.ObjCEHValueStack.pop_back();
+
+ // Leave any cleanups associated with the catch.
+ cleanups.ForceCleanup();
+
+ CGF.EmitBranchThroughCleanup(Cont);
+ }
+
+ // Go back to the try-statement fallthrough.
+ CGF.Builder.restoreIP(SavedIP);
+
+ // Pop out of the finally.
+ if (S.getFinallyStmt())
+ FinallyInfo.exit(CGF);
+
+ if (Cont.isValid())
+ CGF.EmitBlock(Cont.getBlock());
+}
+
+namespace {
+ struct CallSyncExit : EHScopeStack::Cleanup {
+ llvm::Value *SyncExitFn;
+ llvm::Value *SyncArg;
+ CallSyncExit(llvm::Value *SyncExitFn, llvm::Value *SyncArg)
+ : SyncExitFn(SyncExitFn), SyncArg(SyncArg) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.Builder.CreateCall(SyncExitFn, SyncArg)->setDoesNotThrow();
+ }
+ };
+}
+
+void CGObjCRuntime::EmitAtSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S,
+ llvm::Function *syncEnterFn,
+ llvm::Function *syncExitFn) {
+ CodeGenFunction::RunCleanupsScope cleanups(CGF);
+
+ // Evaluate the lock operand. This is guaranteed to dominate the
+ // ARC release and lock-release cleanups.
+ const Expr *lockExpr = S.getSynchExpr();
+ llvm::Value *lock;
+ if (CGF.getLangOpts().ObjCAutoRefCount) {
+ lock = CGF.EmitARCRetainScalarExpr(lockExpr);
+ lock = CGF.EmitObjCConsumeObject(lockExpr->getType(), lock);
+ } else {
+ lock = CGF.EmitScalarExpr(lockExpr);
+ }
+ lock = CGF.Builder.CreateBitCast(lock, CGF.VoidPtrTy);
+
+ // Acquire the lock.
+ CGF.Builder.CreateCall(syncEnterFn, lock)->setDoesNotThrow();
+
+ // Register an all-paths cleanup to release the lock.
+ CGF.EHStack.pushCleanup<CallSyncExit>(NormalAndEHCleanup, syncExitFn, lock);
+
+ // Emit the body of the statement.
+ CGF.EmitStmt(S.getSynchBody());
+}
+
+/// Compute the pointer-to-function type to which a message send
+/// should be casted in order to correctly call the given method
+/// with the given arguments.
+///
+/// \param method - may be null
+/// \param resultType - the result type to use if there's no method
+/// \param argInfo - the actual arguments, including implicit ones
+CGObjCRuntime::MessageSendInfo
+CGObjCRuntime::getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs) {
+ // If there's a method, use information from that.
+ if (method) {
+ const CGFunctionInfo &signature =
+ CGM.getTypes().arrangeObjCMessageSendSignature(method, callArgs[0].Ty);
+
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(signature)->getPointerTo();
+
+ // If that's not variadic, there's no need to recompute the ABI
+ // arrangement.
+ if (!signature.isVariadic())
+ return MessageSendInfo(signature, signatureType);
+
+ // Otherwise, there is.
+ FunctionType::ExtInfo einfo = signature.getExtInfo();
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFunctionCall(resultType, callArgs, einfo,
+ signature.getRequiredArgs());
+
+ return MessageSendInfo(argsInfo, signatureType);
+ }
+
+ // There's no method; just use a default CC.
+ const CGFunctionInfo &argsInfo =
+ CGM.getTypes().arrangeFunctionCall(resultType, callArgs,
+ FunctionType::ExtInfo(),
+ RequiredArgs::All);
+
+ // Derive the signature to call from that.
+ llvm::PointerType *signatureType =
+ CGM.getTypes().GetFunctionType(argsInfo)->getPointerTo();
+ return MessageSendInfo(argsInfo, signatureType);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
new file mode 100644
index 0000000..ccf4d4d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGObjCRuntime.h
@@ -0,0 +1,286 @@
+//===----- CGObjCRuntime.h - Interface to ObjC Runtimes ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for Objective-C code generation. Concrete
+// subclasses of this implement code generation for specific Objective-C
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
+#define CLANG_CODEGEN_OBCJRUNTIME_H
+#include "clang/Basic/IdentifierTable.h" // Selector
+#include "clang/AST/DeclObjC.h"
+
+#include "CGBuilder.h"
+#include "CGCall.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class Constant;
+ class Function;
+ class Module;
+ class StructLayout;
+ class StructType;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenFunction;
+}
+
+ class FieldDecl;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCContainerDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCImplementationDecl;
+ class ObjCInterfaceDecl;
+ class ObjCMessageExpr;
+ class ObjCMethodDecl;
+ class ObjCProtocolDecl;
+ class Selector;
+ class ObjCIvarDecl;
+ class ObjCStringLiteral;
+ class BlockDeclRefExpr;
+
+namespace CodeGen {
+ class CodeGenModule;
+ class CGBlockInfo;
+
+// FIXME: Several methods should be pure virtual but aren't to avoid the
+// partially-implemented subclass breaking.
+
+/// Implements runtime-specific code generation functions.
+class CGObjCRuntime {
+protected:
+ CodeGen::CodeGenModule &CGM;
+ CGObjCRuntime(CodeGen::CodeGenModule &CGM) : CGM(CGM) {}
+
+ // Utility functions for unified ivar access. These need to
+ // eventually be folded into other places (the structure layout
+ // code).
+
+ /// Compute an offset to the given ivar, suitable for passing to
+ /// EmitValueForIvarAtOffset. Note that the correct handling of
+ /// bit-fields is carefully coordinated by these two, use caution!
+ ///
+ /// The latter overload is suitable for computing the offset of a
+ /// sythesized ivar.
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCInterfaceDecl *OID,
+ const ObjCIvarDecl *Ivar);
+ uint64_t ComputeIvarBaseOffset(CodeGen::CodeGenModule &CGM,
+ const ObjCImplementationDecl *OID,
+ const ObjCIvarDecl *Ivar);
+
+ LValue EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *OID,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers,
+ llvm::Value *Offset);
+ /// Emits a try / catch statement. This function is intended to be called by
+ /// subclasses, and provides a generic mechanism for generating these, which
+ /// should be usable by all runtimes. The caller must provide the functions to
+ /// call when entering and exiting a @catch() block, and the function used to
+ /// rethrow exceptions. If the begin and end catch functions are NULL, then
+ /// the function assumes that the EH personality function provides the
+ /// thrown object directly.
+ void EmitTryCatchStmt(CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S,
+ llvm::Constant *beginCatchFn,
+ llvm::Constant *endCatchFn,
+ llvm::Constant *exceptionRethrowFn);
+ /// Emits an @synchronize() statement, using the syncEnterFn and syncExitFn
+ /// arguments as the functions called to lock and unlock the object. This
+ /// function can be called by subclasses that use zero-cost exception
+ /// handling.
+ void EmitAtSynchronizedStmt(CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S,
+ llvm::Function *syncEnterFn,
+ llvm::Function *syncExitFn);
+
+public:
+ virtual ~CGObjCRuntime();
+
+ /// Generate the function required to register all Objective-C components in
+ /// this compilation unit with the runtime library.
+ virtual llvm::Function *ModuleInitFunction() = 0;
+
+ /// Get a selector for the specified name and type values. The
+ /// return value should have the LLVM type for pointer-to
+ /// ASTContext::getObjCSelType().
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ Selector Sel, bool lval=false) = 0;
+
+ /// Get a typed selector.
+ virtual llvm::Value *GetSelector(CGBuilderTy &Builder,
+ const ObjCMethodDecl *Method) = 0;
+
+ /// Get the type constant to catch for the given ObjC pointer type.
+ /// This is used externally to implement catching ObjC types in C++.
+ /// Runtimes which don't support this should add the appropriate
+ /// error to Sema.
+ virtual llvm::Constant *GetEHType(QualType T) = 0;
+
+ /// Generate a constant string object.
+ virtual llvm::Constant *GenerateConstantString(const StringLiteral *) = 0;
+
+ /// Generate a category. A category contains a list of methods (and
+ /// accompanying metadata) and a list of protocols.
+ virtual void GenerateCategory(const ObjCCategoryImplDecl *OCD) = 0;
+
+ /// Generate a class structure for this class.
+ virtual void GenerateClass(const ObjCImplementationDecl *OID) = 0;
+
+ /// Register an class alias.
+ virtual void RegisterAlias(const ObjCCompatibleAliasDecl *OAD) = 0;
+
+ /// Generate an Objective-C message send operation.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ llvm::Value *Receiver,
+ const CallArgList &CallArgs,
+ const ObjCInterfaceDecl *Class = 0,
+ const ObjCMethodDecl *Method = 0) = 0;
+
+ /// Generate an Objective-C message send operation to the super
+ /// class initiated in a method for Class and with the given Self
+ /// object.
+ ///
+ /// \param Method - The method being called, this may be null if synthesizing
+ /// a property setter or getter.
+ virtual CodeGen::RValue
+ GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
+ QualType ResultType,
+ Selector Sel,
+ const ObjCInterfaceDecl *Class,
+ bool isCategoryImpl,
+ llvm::Value *Self,
+ bool IsClassMessage,
+ const CallArgList &CallArgs,
+ const ObjCMethodDecl *Method = 0) = 0;
+
+ /// Emit the code to return the named protocol as an object, as in a
+ /// @protocol expression.
+ virtual llvm::Value *GenerateProtocolRef(CGBuilderTy &Builder,
+ const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate the named protocol. Protocols contain method metadata but no
+ /// implementations.
+ virtual void GenerateProtocol(const ObjCProtocolDecl *OPD) = 0;
+
+ /// Generate a function preamble for a method with the specified
+ /// types.
+
+ // FIXME: Current this just generates the Function definition, but really this
+ // should also be generating the loads of the parameters, as the runtime
+ // should have full control over how parameters are passed.
+ virtual llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
+ const ObjCContainerDecl *CD) = 0;
+
+ /// Return the runtime function for getting properties.
+ virtual llvm::Constant *GetPropertyGetFunction() = 0;
+
+ /// Return the runtime function for setting properties.
+ virtual llvm::Constant *GetPropertySetFunction() = 0;
+
+ /// Return the runtime function for optimized setting properties.
+ virtual llvm::Constant *GetOptimizedPropertySetFunction(bool atomic,
+ bool copy) = 0;
+
+ // API for atomic copying of qualified aggregates in getter.
+ virtual llvm::Constant *GetGetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates in setter.
+ virtual llvm::Constant *GetSetStructFunction() = 0;
+ // API for atomic copying of qualified aggregates with non-trivial copy
+ // assignment (c++) in setter/getter.
+ virtual llvm::Constant *GetCppAtomicObjectFunction() = 0;
+
+ /// GetClass - Return a reference to the class for the given
+ /// interface decl.
+ virtual llvm::Value *GetClass(CGBuilderTy &Builder,
+ const ObjCInterfaceDecl *OID) = 0;
+
+
+ virtual llvm::Value *EmitNSAutoreleasePoolClassRef(CGBuilderTy &Builder) {
+ llvm_unreachable("autoreleasepool unsupported in this ABI");
+ }
+
+ /// EnumerationMutationFunction - Return the function that's called by the
+ /// compiler when a mutation is detected during foreach iteration.
+ virtual llvm::Constant *EnumerationMutationFunction() = 0;
+
+ virtual void EmitSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtSynchronizedStmt &S) = 0;
+ virtual void EmitTryStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtTryStmt &S) = 0;
+ virtual void EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
+ const ObjCAtThrowStmt &S) = 0;
+ virtual llvm::Value *EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *AddrWeakObj) = 0;
+ virtual void EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+ virtual void EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ bool threadlocal=false) = 0;
+ virtual void EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest,
+ llvm::Value *ivarOffset) = 0;
+ virtual void EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *src, llvm::Value *dest) = 0;
+
+ virtual LValue EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) = 0;
+ virtual llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
+ const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar) = 0;
+ virtual void EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *DestPtr,
+ llvm::Value *SrcPtr,
+ llvm::Value *Size) = 0;
+ virtual llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
+ const CodeGen::CGBlockInfo &blockInfo) = 0;
+ virtual llvm::GlobalVariable *GetClassGlobal(const std::string &Name) = 0;
+
+ struct MessageSendInfo {
+ const CGFunctionInfo &CallInfo;
+ llvm::PointerType *MessengerType;
+
+ MessageSendInfo(const CGFunctionInfo &callInfo,
+ llvm::PointerType *messengerType)
+ : CallInfo(callInfo), MessengerType(messengerType) {}
+ };
+
+ MessageSendInfo getMessageSendInfo(const ObjCMethodDecl *method,
+ QualType resultType,
+ CallArgList &callArgs);
+};
+
+/// Creates an instance of an Objective-C runtime class.
+//TODO: This should include some way of selecting which runtime to target.
+CGObjCRuntime *CreateGNUObjCRuntime(CodeGenModule &CGM);
+CGObjCRuntime *CreateMacObjCRuntime(CodeGenModule &CGM);
+}
+}
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp
new file mode 100644
index 0000000..3a0e116
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -0,0 +1,28 @@
+//===----- CGOpenCLRuntime.cpp - Interface to OpenCL Runtimes -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for OpenCL code generation. Concrete
+// subclasses of this implement code generation for specific OpenCL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGOpenCLRuntime.h"
+#include "CodeGenFunction.h"
+#include "llvm/GlobalValue.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+CGOpenCLRuntime::~CGOpenCLRuntime() {}
+
+void CGOpenCLRuntime::EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
+ const VarDecl &D) {
+ return CGF.EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.h b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.h
new file mode 100644
index 0000000..9a8430f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGOpenCLRuntime.h
@@ -0,0 +1,46 @@
+//===----- CGOpenCLRuntime.h - Interface to OpenCL Runtimes -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for OpenCL code generation. Concrete
+// subclasses of this implement code generation for specific OpenCL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_OPENCLRUNTIME_H
+#define CLANG_CODEGEN_OPENCLRUNTIME_H
+
+namespace clang {
+
+class VarDecl;
+
+namespace CodeGen {
+
+class CodeGenFunction;
+class CodeGenModule;
+
+class CGOpenCLRuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGOpenCLRuntime();
+
+ /// Emit the IR required for a work-group-local variable declaration, and add
+ /// an entry to CGF's LocalDeclMap for D. The base class does this using
+ /// CodeGenFunction::EmitStaticVarDecl to emit an internal global for D.
+ virtual void EmitWorkGroupLocalVarDecl(CodeGenFunction &CGF,
+ const VarDecl &D);
+};
+
+}
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
new file mode 100644
index 0000000..19973b4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRTTI.cpp
@@ -0,0 +1,1015 @@
+//===--- CGCXXRTTI.cpp - Emit LLVM Code for C++ RTTI descriptors ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of RTTI descriptors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/Type.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "CGObjCRuntime.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class RTTIBuilder {
+ CodeGenModule &CGM; // Per-module state.
+ llvm::LLVMContext &VMContext;
+
+ /// Fields - The fields of the RTTI descriptor currently being built.
+ SmallVector<llvm::Constant *, 16> Fields;
+
+ /// GetAddrOfTypeName - Returns the mangled type name of the given type.
+ llvm::GlobalVariable *
+ GetAddrOfTypeName(QualType Ty, llvm::GlobalVariable::LinkageTypes Linkage);
+
+ /// GetAddrOfExternalRTTIDescriptor - Returns the constant for the RTTI
+ /// descriptor of the given type.
+ llvm::Constant *GetAddrOfExternalRTTIDescriptor(QualType Ty);
+
+ /// BuildVTablePointer - Build the vtable pointer for the given type.
+ void BuildVTablePointer(const Type *Ty);
+
+ /// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+ /// inheritance, according to the Itanium C++ ABI, 2.9.5p6b.
+ void BuildSIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+ /// classes with bases that do not satisfy the abi::__si_class_type_info
+ /// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+ void BuildVMIClassTypeInfo(const CXXRecordDecl *RD);
+
+ /// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct, used
+ /// for pointer types.
+ void BuildPointerTypeInfo(QualType PointeeTy);
+
+ /// BuildObjCObjectTypeInfo - Build the appropriate kind of
+ /// type_info for an object type.
+ void BuildObjCObjectTypeInfo(const ObjCObjectType *Ty);
+
+ /// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+ /// struct, used for member pointer types.
+ void BuildPointerToMemberTypeInfo(const MemberPointerType *Ty);
+
+public:
+ RTTIBuilder(CodeGenModule &CGM) : CGM(CGM),
+ VMContext(CGM.getModule().getContext()) { }
+
+ // Pointer type info flags.
+ enum {
+ /// PTI_Const - Type has const qualifier.
+ PTI_Const = 0x1,
+
+ /// PTI_Volatile - Type has volatile qualifier.
+ PTI_Volatile = 0x2,
+
+ /// PTI_Restrict - Type has restrict qualifier.
+ PTI_Restrict = 0x4,
+
+ /// PTI_Incomplete - Type is incomplete.
+ PTI_Incomplete = 0x8,
+
+ /// PTI_ContainingClassIncomplete - Containing class is incomplete.
+ /// (in pointer to member).
+ PTI_ContainingClassIncomplete = 0x10
+ };
+
+ // VMI type info flags.
+ enum {
+ /// VMI_NonDiamondRepeat - Class has non-diamond repeated inheritance.
+ VMI_NonDiamondRepeat = 0x1,
+
+ /// VMI_DiamondShaped - Class is diamond shaped.
+ VMI_DiamondShaped = 0x2
+ };
+
+ // Base class type info flags.
+ enum {
+ /// BCTI_Virtual - Base class is virtual.
+ BCTI_Virtual = 0x1,
+
+ /// BCTI_Public - Base class is public.
+ BCTI_Public = 0x2
+ };
+
+ /// BuildTypeInfo - Build the RTTI type info struct for the given type.
+ ///
+ /// \param Force - true to force the creation of this RTTI value
+ /// \param ForEH - true if this is for exception handling
+ llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
+};
+}
+
+llvm::GlobalVariable *
+RTTIBuilder::GetAddrOfTypeName(QualType Ty,
+ llvm::GlobalVariable::LinkageTypes Linkage) {
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ // We know that the mangled name of the type starts at index 4 of the
+ // mangled name of the typename, so we can just index into it in order to
+ // get the mangled name of the type.
+ llvm::Constant *Init = llvm::ConstantDataArray::getString(VMContext,
+ Name.substr(4));
+
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, Init->getType(), Linkage);
+
+ GV->setInitializer(Init);
+
+ return GV;
+}
+
+llvm::Constant *RTTIBuilder::GetAddrOfExternalRTTIDescriptor(QualType Ty) {
+ // Mangle the RTTI name.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ // Look for an existing global.
+ llvm::GlobalVariable *GV = CGM.getModule().getNamedGlobal(Name);
+
+ if (!GV) {
+ // Create a new global variable.
+ GV = new llvm::GlobalVariable(CGM.getModule(), CGM.Int8PtrTy,
+ /*Constant=*/true,
+ llvm::GlobalValue::ExternalLinkage, 0, Name);
+ }
+
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+}
+
+/// TypeInfoIsInStandardLibrary - Given a builtin type, returns whether the type
+/// info for that type is defined in the standard library.
+static bool TypeInfoIsInStandardLibrary(const BuiltinType *Ty) {
+ // Itanium C++ ABI 2.9.2:
+ // Basic type information (e.g. for "int", "bool", etc.) will be kept in
+ // the run-time support library. Specifically, the run-time support
+ // library should contain type_info objects for the types X, X* and
+ // X const*, for every X in: void, std::nullptr_t, bool, wchar_t, char,
+ // unsigned char, signed char, short, unsigned short, int, unsigned int,
+ // long, unsigned long, long long, unsigned long long, float, double,
+ // long double, char16_t, char32_t, and the IEEE 754r decimal and
+ // half-precision floating point types.
+ switch (Ty->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::NullPtr:
+ case BuiltinType::Bool:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::Half:
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ case BuiltinType::Int128:
+ case BuiltinType::UInt128:
+ return true;
+
+ case BuiltinType::Dependent:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("asking for RRTI for a placeholder type!");
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ llvm_unreachable("FIXME: Objective-C types are unsupported!");
+ }
+
+ llvm_unreachable("Invalid BuiltinType Kind!");
+}
+
+static bool TypeInfoIsInStandardLibrary(const PointerType *PointerTy) {
+ QualType PointeeTy = PointerTy->getPointeeType();
+ const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(PointeeTy);
+ if (!BuiltinTy)
+ return false;
+
+ // Check the qualifiers.
+ Qualifiers Quals = PointeeTy.getQualifiers();
+ Quals.removeConst();
+
+ if (!Quals.empty())
+ return false;
+
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+}
+
+/// IsStandardLibraryRTTIDescriptor - Returns whether the type
+/// information for the given type exists in the standard library.
+static bool IsStandardLibraryRTTIDescriptor(QualType Ty) {
+ // Type info for builtin types is defined in the standard library.
+ if (const BuiltinType *BuiltinTy = dyn_cast<BuiltinType>(Ty))
+ return TypeInfoIsInStandardLibrary(BuiltinTy);
+
+ // Type info for some pointer types to builtin types is defined in the
+ // standard library.
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return TypeInfoIsInStandardLibrary(PointerTy);
+
+ return false;
+}
+
+/// ShouldUseExternalRTTIDescriptor - Returns whether the type information for
+/// the given type exists somewhere else, and that we should not emit the type
+/// information in this translation unit. Assumes that it is not a
+/// standard-library type.
+static bool ShouldUseExternalRTTIDescriptor(CodeGenModule &CGM, QualType Ty) {
+ ASTContext &Context = CGM.getContext();
+
+ // If RTTI is disabled, don't consider key functions.
+ if (!Context.getLangOpts().RTTI) return false;
+
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!RD->hasDefinition())
+ return false;
+
+ if (!RD->isDynamicClass())
+ return false;
+
+ return !CGM.getVTables().ShouldEmitVTableInThisTU(RD);
+ }
+
+ return false;
+}
+
+/// IsIncompleteClassType - Returns whether the given record type is incomplete.
+static bool IsIncompleteClassType(const RecordType *RecordTy) {
+ return !RecordTy->getDecl()->isCompleteDefinition();
+}
+
+/// ContainsIncompleteClassType - Returns whether the given type contains an
+/// incomplete class type. This is true if
+///
+/// * The given type is an incomplete class type.
+/// * The given type is a pointer type whose pointee type contains an
+/// incomplete class type.
+/// * The given type is a member pointer type whose class is an incomplete
+/// class type.
+/// * The given type is a member pointer type whoise pointee type contains an
+/// incomplete class type.
+/// is an indirect or direct pointer to an incomplete class type.
+static bool ContainsIncompleteClassType(QualType Ty) {
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ if (IsIncompleteClassType(RecordTy))
+ return true;
+ }
+
+ if (const PointerType *PointerTy = dyn_cast<PointerType>(Ty))
+ return ContainsIncompleteClassType(PointerTy->getPointeeType());
+
+ if (const MemberPointerType *MemberPointerTy =
+ dyn_cast<MemberPointerType>(Ty)) {
+ // Check if the class type is incomplete.
+ const RecordType *ClassType = cast<RecordType>(MemberPointerTy->getClass());
+ if (IsIncompleteClassType(ClassType))
+ return true;
+
+ return ContainsIncompleteClassType(MemberPointerTy->getPointeeType());
+ }
+
+ return false;
+}
+
+/// getTypeInfoLinkage - Return the linkage that the type info and type info
+/// name constants should have for the given type.
+static llvm::GlobalVariable::LinkageTypes
+getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
+ // Itanium C++ ABI 2.9.5p7:
+ // In addition, it and all of the intermediate abi::__pointer_type_info
+ // structs in the chain down to the abi::__class_type_info for the
+ // incomplete class type must be prevented from resolving to the
+ // corresponding type_info structs for the complete class type, possibly
+ // by making them local static objects. Finally, a dummy class RTTI is
+ // generated for the incomplete type that will not resolve to the final
+ // complete class RTTI (because the latter need not exist), possibly by
+ // making it a local static object.
+ if (ContainsIncompleteClassType(Ty))
+ return llvm::GlobalValue::InternalLinkage;
+
+ switch (Ty->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return llvm::GlobalValue::InternalLinkage;
+
+ case ExternalLinkage:
+ if (!CGM.getLangOpts().RTTI) {
+ // RTTI is not enabled, which means that this type info struct is going
+ // to be used for exception handling. Give it linkonce_odr linkage.
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
+ if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (RD->hasAttr<WeakAttr>())
+ return llvm::GlobalValue::WeakODRLinkage;
+ if (RD->isDynamicClass())
+ return CGM.getVTableLinkage(RD);
+ }
+
+ return llvm::GlobalValue::LinkOnceODRLinkage;
+ }
+
+ llvm_unreachable("Invalid linkage!");
+}
+
+// CanUseSingleInheritance - Return whether the given record decl has a "single,
+// public, non-virtual base at offset zero (i.e. the derived class is dynamic
+// iff the base is)", according to Itanium C++ ABI, 2.95p6b.
+static bool CanUseSingleInheritance(const CXXRecordDecl *RD) {
+ // Check the number of bases.
+ if (RD->getNumBases() != 1)
+ return false;
+
+ // Get the base.
+ CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin();
+
+ // Check that the base is not virtual.
+ if (Base->isVirtual())
+ return false;
+
+ // Check that the base is public.
+ if (Base->getAccessSpecifier() != AS_public)
+ return false;
+
+ // Check that the class is dynamic iff the base is.
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (!BaseDecl->isEmpty() &&
+ BaseDecl->isDynamicClass() != RD->isDynamicClass())
+ return false;
+
+ return true;
+}
+
+void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
+ // abi::__class_type_info.
+ static const char * const ClassTypeInfo =
+ "_ZTVN10__cxxabiv117__class_type_infoE";
+ // abi::__si_class_type_info.
+ static const char * const SIClassTypeInfo =
+ "_ZTVN10__cxxabiv120__si_class_type_infoE";
+ // abi::__vmi_class_type_info.
+ static const char * const VMIClassTypeInfo =
+ "_ZTVN10__cxxabiv121__vmi_class_type_infoE";
+
+ const char *VTableName = 0;
+
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("References shouldn't get here");
+
+ case Type::Builtin:
+ // GCC treats vector and complex types as fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ case Type::Atomic:
+ // FIXME: GCC treats block pointers as fundamental types?!
+ case Type::BlockPointer:
+ // abi::__fundamental_type_info.
+ VTableName = "_ZTVN10__cxxabiv123__fundamental_type_infoE";
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // abi::__array_type_info.
+ VTableName = "_ZTVN10__cxxabiv117__array_type_infoE";
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // abi::__function_type_info.
+ VTableName = "_ZTVN10__cxxabiv120__function_type_infoE";
+ break;
+
+ case Type::Enum:
+ // abi::__enum_type_info.
+ VTableName = "_ZTVN10__cxxabiv116__enum_type_infoE";
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ VTableName = ClassTypeInfo;
+ } else if (CanUseSingleInheritance(RD)) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = VMIClassTypeInfo;
+ }
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ // Ignore protocol qualifiers.
+ Ty = cast<ObjCObjectType>(Ty)->getBaseType().getTypePtr();
+
+ // Handle id and Class.
+ if (isa<BuiltinType>(Ty)) {
+ VTableName = ClassTypeInfo;
+ break;
+ }
+
+ assert(isa<ObjCInterfaceType>(Ty));
+ // Fall through.
+
+ case Type::ObjCInterface:
+ if (cast<ObjCInterfaceType>(Ty)->getDecl()->getSuperClass()) {
+ VTableName = SIClassTypeInfo;
+ } else {
+ VTableName = ClassTypeInfo;
+ }
+ break;
+
+ case Type::ObjCObjectPointer:
+ case Type::Pointer:
+ // abi::__pointer_type_info.
+ VTableName = "_ZTVN10__cxxabiv119__pointer_type_infoE";
+ break;
+
+ case Type::MemberPointer:
+ // abi::__pointer_to_member_type_info.
+ VTableName = "_ZTVN10__cxxabiv129__pointer_to_member_type_infoE";
+ break;
+ }
+
+ llvm::Constant *VTable =
+ CGM.getModule().getOrInsertGlobal(VTableName, CGM.Int8PtrTy);
+
+ llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ // The vtable address point is 2.
+ llvm::Constant *Two = llvm::ConstantInt::get(PtrDiffTy, 2);
+ VTable = llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Two);
+ VTable = llvm::ConstantExpr::getBitCast(VTable, CGM.Int8PtrTy);
+
+ Fields.push_back(VTable);
+}
+
+// maybeUpdateRTTILinkage - Will update the linkage of the RTTI data structures
+// from available_externally to the correct linkage if necessary. An example of
+// this is:
+//
+// struct A {
+// virtual void f();
+// };
+//
+// const std::type_info &g() {
+// return typeid(A);
+// }
+//
+// void A::f() { }
+//
+// When we're generating the typeid(A) expression, we do not yet know that
+// A's key function is defined in this translation unit, so we will give the
+// typeinfo and typename structures available_externally linkage. When A::f
+// forces the vtable to be generated, we need to change the linkage of the
+// typeinfo and typename structs, otherwise we'll end up with undefined
+// externals when linking.
+static void
+maybeUpdateRTTILinkage(CodeGenModule &CGM, llvm::GlobalVariable *GV,
+ QualType Ty) {
+ // We're only interested in globals with available_externally linkage.
+ if (!GV->hasAvailableExternallyLinkage())
+ return;
+
+ // Get the real linkage for the type.
+ llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
+
+ // If variable is supposed to have available_externally linkage, we don't
+ // need to do anything.
+ if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
+ return;
+
+ // Update the typeinfo linkage.
+ GV->setLinkage(Linkage);
+
+ // Get the typename global.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *TypeNameGV = CGM.getModule().getNamedGlobal(Name);
+
+ assert(TypeNameGV->hasAvailableExternallyLinkage() &&
+ "Type name has different linkage from type info!");
+
+ // And update its linkage.
+ TypeNameGV->setLinkage(Linkage);
+}
+
+llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
+ // We want to operate on the canonical type.
+ Ty = CGM.getContext().getCanonicalType(Ty);
+
+ // Check if we've already emitted an RTTI descriptor for this type.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTI(Ty, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
+ if (OldGV && !OldGV->isDeclaration()) {
+ maybeUpdateRTTILinkage(CGM, OldGV, Ty);
+
+ return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
+ }
+
+ // Check if there is already an external RTTI descriptor for this type.
+ bool IsStdLib = IsStandardLibraryRTTIDescriptor(Ty);
+ if (!Force && (IsStdLib || ShouldUseExternalRTTIDescriptor(CGM, Ty)))
+ return GetAddrOfExternalRTTIDescriptor(Ty);
+
+ // Emit the standard library with external linkage.
+ llvm::GlobalVariable::LinkageTypes Linkage;
+ if (IsStdLib)
+ Linkage = llvm::GlobalValue::ExternalLinkage;
+ else
+ Linkage = getTypeInfoLinkage(CGM, Ty);
+
+ // Add the vtable pointer.
+ BuildVTablePointer(cast<Type>(Ty));
+
+ // And the name.
+ llvm::GlobalVariable *TypeName = GetAddrOfTypeName(Ty, Linkage);
+
+ Fields.push_back(llvm::ConstantExpr::getBitCast(TypeName, CGM.Int8PtrTy));
+
+ switch (Ty->getTypeClass()) {
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical and dependent types shouldn't get here");
+
+ // GCC treats vector types as fundamental types.
+ case Type::Builtin:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ case Type::BlockPointer:
+ // Itanium C++ ABI 2.9.5p4:
+ // abi::__fundamental_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ llvm_unreachable("References shouldn't get here");
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__array_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::FunctionNoProto:
+ case Type::FunctionProto:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__function_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Enum:
+ // Itanium C++ ABI 2.9.5p5:
+ // abi::__enum_type_info adds no data members to std::type_info.
+ break;
+
+ case Type::Record: {
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(cast<RecordType>(Ty)->getDecl());
+ if (!RD->hasDefinition() || !RD->getNumBases()) {
+ // We don't need to emit any fields.
+ break;
+ }
+
+ if (CanUseSingleInheritance(RD))
+ BuildSIClassTypeInfo(RD);
+ else
+ BuildVMIClassTypeInfo(RD);
+
+ break;
+ }
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ BuildObjCObjectTypeInfo(cast<ObjCObjectType>(Ty));
+ break;
+
+ case Type::ObjCObjectPointer:
+ BuildPointerTypeInfo(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ break;
+
+ case Type::Pointer:
+ BuildPointerTypeInfo(cast<PointerType>(Ty)->getPointeeType());
+ break;
+
+ case Type::MemberPointer:
+ BuildPointerToMemberTypeInfo(cast<MemberPointerType>(Ty));
+ break;
+
+ case Type::Atomic:
+ // No fields, at least for the moment.
+ break;
+ }
+
+ llvm::Constant *Init = llvm::ConstantStruct::getAnon(Fields);
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
+ /*Constant=*/true, Linkage, Init, Name);
+
+ // If there's already an old global variable, replace it with the new one.
+ if (OldGV) {
+ GV->takeName(OldGV);
+ llvm::Constant *NewPtr =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtr);
+ OldGV->eraseFromParent();
+ }
+
+ // GCC only relies on the uniqueness of the type names, not the
+ // type_infos themselves, so we can emit these as hidden symbols.
+ // But don't do this if we're worried about strict visibility
+ // compatibility.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ CGM.setTypeVisibility(GV, RD, CodeGenModule::TVK_ForRTTI);
+ CGM.setTypeVisibility(TypeName, RD, CodeGenModule::TVK_ForRTTIName);
+ } else {
+ Visibility TypeInfoVisibility = DefaultVisibility;
+ if (CGM.getCodeGenOpts().HiddenWeakVTables &&
+ Linkage == llvm::GlobalValue::LinkOnceODRLinkage)
+ TypeInfoVisibility = HiddenVisibility;
+
+ // The type name should have the same visibility as the type itself.
+ Visibility ExplicitVisibility = Ty->getVisibility();
+ TypeName->setVisibility(CodeGenModule::
+ GetLLVMVisibility(ExplicitVisibility));
+
+ TypeInfoVisibility = minVisibility(TypeInfoVisibility, Ty->getVisibility());
+ GV->setVisibility(CodeGenModule::GetLLVMVisibility(TypeInfoVisibility));
+ }
+
+ GV->setUnnamedAddr(true);
+
+ return llvm::ConstantExpr::getBitCast(GV, CGM.Int8PtrTy);
+}
+
+/// ComputeQualifierFlags - Compute the pointer type info flags from the
+/// given qualifier.
+static unsigned ComputeQualifierFlags(Qualifiers Quals) {
+ unsigned Flags = 0;
+
+ if (Quals.hasConst())
+ Flags |= RTTIBuilder::PTI_Const;
+ if (Quals.hasVolatile())
+ Flags |= RTTIBuilder::PTI_Volatile;
+ if (Quals.hasRestrict())
+ Flags |= RTTIBuilder::PTI_Restrict;
+
+ return Flags;
+}
+
+/// BuildObjCObjectTypeInfo - Build the appropriate kind of type_info
+/// for the given Objective-C object type.
+void RTTIBuilder::BuildObjCObjectTypeInfo(const ObjCObjectType *OT) {
+ // Drop qualifiers.
+ const Type *T = OT->getBaseType().getTypePtr();
+ assert(isa<BuiltinType>(T) || isa<ObjCInterfaceType>(T));
+
+ // The builtin types are abi::__class_type_infos and don't require
+ // extra fields.
+ if (isa<BuiltinType>(T)) return;
+
+ ObjCInterfaceDecl *Class = cast<ObjCInterfaceType>(T)->getDecl();
+ ObjCInterfaceDecl *Super = Class->getSuperClass();
+
+ // Root classes are also __class_type_info.
+ if (!Super) return;
+
+ QualType SuperTy = CGM.getContext().getObjCInterfaceType(Super);
+
+ // Everything else is single inheritance.
+ llvm::Constant *BaseTypeInfo = RTTIBuilder(CGM).BuildTypeInfo(SuperTy);
+ Fields.push_back(BaseTypeInfo);
+}
+
+/// BuildSIClassTypeInfo - Build an abi::__si_class_type_info, used for single
+/// inheritance, according to the Itanium C++ ABI, 2.95p6b.
+void RTTIBuilder::BuildSIClassTypeInfo(const CXXRecordDecl *RD) {
+ // Itanium C++ ABI 2.9.5p6b:
+ // It adds to abi::__class_type_info a single member pointing to the
+ // type_info structure for the base type,
+ llvm::Constant *BaseTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(RD->bases_begin()->getType());
+ Fields.push_back(BaseTypeInfo);
+}
+
+namespace {
+ /// SeenBases - Contains virtual and non-virtual bases seen when traversing
+ /// a class hierarchy.
+ struct SeenBases {
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> NonVirtualBases;
+ llvm::SmallPtrSet<const CXXRecordDecl *, 16> VirtualBases;
+ };
+}
+
+/// ComputeVMIClassTypeInfoFlags - Compute the value of the flags member in
+/// abi::__vmi_class_type_info.
+///
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXBaseSpecifier *Base,
+ SeenBases &Bases) {
+
+ unsigned Flags = 0;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ if (Base->isVirtual()) {
+ if (Bases.VirtualBases.count(BaseDecl)) {
+ // If this virtual base has been seen before, then the class is diamond
+ // shaped.
+ Flags |= RTTIBuilder::VMI_DiamondShaped;
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl))
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+ // Mark the virtual base as seen.
+ Bases.VirtualBases.insert(BaseDecl);
+ }
+ } else {
+ if (Bases.NonVirtualBases.count(BaseDecl)) {
+ // If this non-virtual base has been seen before, then the class has non-
+ // diamond shaped repeated inheritance.
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+ } else {
+ if (Bases.VirtualBases.count(BaseDecl))
+ Flags |= RTTIBuilder::VMI_NonDiamondRepeat;
+
+ // Mark the non-virtual base as seen.
+ Bases.NonVirtualBases.insert(BaseDecl);
+ }
+ }
+
+ // Walk all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = BaseDecl->bases_begin(),
+ E = BaseDecl->bases_end(); I != E; ++I)
+ Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+
+ return Flags;
+}
+
+static unsigned ComputeVMIClassTypeInfoFlags(const CXXRecordDecl *RD) {
+ unsigned Flags = 0;
+ SeenBases Bases;
+
+ // Walk all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I)
+ Flags |= ComputeVMIClassTypeInfoFlags(I, Bases);
+
+ return Flags;
+}
+
+/// BuildVMIClassTypeInfo - Build an abi::__vmi_class_type_info, used for
+/// classes with bases that do not satisfy the abi::__si_class_type_info
+/// constraints, according ti the Itanium C++ ABI, 2.9.5p5c.
+void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __flags is a word with flags describing details about the class
+ // structure, which may be referenced by using the __flags_masks
+ // enumeration. These flags refer to both direct and indirect bases.
+ unsigned Flags = ComputeVMIClassTypeInfoFlags(RD);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_count is a word with the number of direct proper base class
+ // descriptions that follow.
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, RD->getNumBases()));
+
+ if (!RD->getNumBases())
+ return;
+
+ llvm::Type *LongLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().LongTy);
+
+ // Now add the base class descriptions.
+
+ // Itanium C++ ABI 2.9.5p6c:
+ // __base_info[] is an array of base class descriptions -- one for every
+ // direct proper base. Each description is of the type:
+ //
+ // struct abi::__base_class_type_info {
+ // public:
+ // const __class_type_info *__base_type;
+ // long __offset_flags;
+ //
+ // enum __offset_flags_masks {
+ // __virtual_mask = 0x1,
+ // __public_mask = 0x2,
+ // __offset_shift = 8
+ // };
+ // };
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXBaseSpecifier *Base = I;
+
+ // The __base_type member points to the RTTI for the base type.
+ Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(Base->getType()));
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ int64_t OffsetFlags = 0;
+
+ // All but the lower 8 bits of __offset_flags are a signed offset.
+ // For a non-virtual base, this is the offset in the object of the base
+ // subobject. For a virtual base, this is the offset in the virtual table of
+ // the virtual base offset for the virtual base referenced (negative).
+ CharUnits Offset;
+ if (Base->isVirtual())
+ Offset =
+ CGM.getVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ else {
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ Offset = Layout.getBaseClassOffset(BaseDecl);
+ };
+
+ OffsetFlags = Offset.getQuantity() << 8;
+
+ // The low-order byte of __offset_flags contains flags, as given by the
+ // masks from the enumeration __offset_flags_masks.
+ if (Base->isVirtual())
+ OffsetFlags |= BCTI_Virtual;
+ if (Base->getAccessSpecifier() == AS_public)
+ OffsetFlags |= BCTI_Public;
+
+ Fields.push_back(llvm::ConstantInt::get(LongLTy, OffsetFlags));
+ }
+}
+
+/// BuildPointerTypeInfo - Build an abi::__pointer_type_info struct,
+/// used for pointer types.
+void RTTIBuilder::BuildPointerTypeInfo(QualType PointeeTy) {
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to
+ unsigned Flags = ComputeQualifierFlags(Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
+ Flags |= PTI_Incomplete;
+
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
+ Fields.push_back(PointeeTypeInfo);
+}
+
+/// BuildPointerToMemberTypeInfo - Build an abi::__pointer_to_member_type_info
+/// struct, used for member pointer types.
+void RTTIBuilder::BuildPointerToMemberTypeInfo(const MemberPointerType *Ty) {
+ QualType PointeeTy = Ty->getPointeeType();
+
+ Qualifiers Quals;
+ QualType UnqualifiedPointeeTy =
+ CGM.getContext().getUnqualifiedArrayType(PointeeTy, Quals);
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __flags is a flag word describing the cv-qualification and other
+ // attributes of the type pointed to.
+ unsigned Flags = ComputeQualifierFlags(Quals);
+
+ const RecordType *ClassType = cast<RecordType>(Ty->getClass());
+
+ // Itanium C++ ABI 2.9.5p7:
+ // When the abi::__pbase_type_info is for a direct or indirect pointer to an
+ // incomplete class type, the incomplete target type flag is set.
+ if (ContainsIncompleteClassType(UnqualifiedPointeeTy))
+ Flags |= PTI_Incomplete;
+
+ if (IsIncompleteClassType(ClassType))
+ Flags |= PTI_ContainingClassIncomplete;
+
+ llvm::Type *UnsignedIntLTy =
+ CGM.getTypes().ConvertType(CGM.getContext().UnsignedIntTy);
+ Fields.push_back(llvm::ConstantInt::get(UnsignedIntLTy, Flags));
+
+ // Itanium C++ ABI 2.9.5p7:
+ // __pointee is a pointer to the std::type_info derivation for the
+ // unqualified type being pointed to.
+ llvm::Constant *PointeeTypeInfo =
+ RTTIBuilder(CGM).BuildTypeInfo(UnqualifiedPointeeTy);
+ Fields.push_back(PointeeTypeInfo);
+
+ // Itanium C++ ABI 2.9.5p9:
+ // __context is a pointer to an abi::__class_type_info corresponding to the
+ // class type containing the member pointed to
+ // (e.g., the "A" in "int A::*").
+ Fields.push_back(RTTIBuilder(CGM).BuildTypeInfo(QualType(ClassType, 0)));
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty,
+ bool ForEH) {
+ // Return a bogus pointer if RTTI is disabled, unless it's for EH.
+ // FIXME: should we even be calling this method if RTTI is disabled
+ // and it's not for EH?
+ if (!ForEH && !getContext().getLangOpts().RTTI)
+ return llvm::Constant::getNullValue(Int8PtrTy);
+
+ if (ForEH && Ty->isObjCObjectPointerType() && !LangOpts.NeXTRuntime)
+ return ObjCRuntime->GetEHType(Ty);
+
+ return RTTIBuilder(*this).BuildTypeInfo(Ty);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
+ QualType PointerType = Context.getPointerType(Type);
+ QualType PointerTypeConst = Context.getPointerType(Type.withConst());
+ RTTIBuilder(*this).BuildTypeInfo(Type, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerType, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptors() {
+ QualType FundamentalTypes[] = { Context.VoidTy, Context.NullPtrTy,
+ Context.BoolTy, Context.WCharTy,
+ Context.CharTy, Context.UnsignedCharTy,
+ Context.SignedCharTy, Context.ShortTy,
+ Context.UnsignedShortTy, Context.IntTy,
+ Context.UnsignedIntTy, Context.LongTy,
+ Context.UnsignedLongTy, Context.LongLongTy,
+ Context.UnsignedLongLongTy, Context.FloatTy,
+ Context.DoubleTy, Context.LongDoubleTy,
+ Context.Char16Ty, Context.Char32Ty };
+ for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i)
+ EmitFundamentalRTTIDescriptor(FundamentalTypes[i]);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
new file mode 100644
index 0000000..25a0a50
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayout.h
@@ -0,0 +1,281 @@
+//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGRECORDLAYOUT_H
+#define CLANG_CODEGEN_CGRECORDLAYOUT_H
+
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/DerivedTypes.h"
+
+namespace llvm {
+ class StructType;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// \brief Helper object for describing how to generate the code for access to a
+/// bit-field.
+///
+/// This structure is intended to describe the "policy" of how the bit-field
+/// should be accessed, which may be target, language, or ABI dependent.
+class CGBitFieldInfo {
+public:
+ /// Descriptor for a single component of a bit-field access. The entire
+ /// bit-field is constituted of a bitwise OR of all of the individual
+ /// components.
+ ///
+ /// Each component describes an accessed value, which is how the component
+ /// should be transferred to/from memory, and a target placement, which is how
+ /// that component fits into the constituted bit-field. The pseudo-IR for a
+ /// load is:
+ ///
+ /// %0 = gep %base, 0, FieldIndex
+ /// %1 = gep (i8*) %0, FieldByteOffset
+ /// %2 = (i(AccessWidth) *) %1
+ /// %3 = load %2, align AccessAlignment
+ /// %4 = shr %3, FieldBitStart
+ ///
+ /// and the composed bit-field is formed as the boolean OR of all accesses,
+ /// masked to TargetBitWidth bits and shifted to TargetBitOffset.
+ struct AccessInfo {
+ /// Offset of the field to load in the LLVM structure, if any.
+ unsigned FieldIndex;
+
+ /// Byte offset from the field address, if any. This should generally be
+ /// unused as the cleanest IR comes from having a well-constructed LLVM type
+ /// with proper GEP instructions, but sometimes its use is required, for
+ /// example if an access is intended to straddle an LLVM field boundary.
+ CharUnits FieldByteOffset;
+
+ /// Bit offset in the accessed value to use. The width is implied by \see
+ /// TargetBitWidth.
+ unsigned FieldBitStart;
+
+ /// Bit width of the memory access to perform.
+ unsigned AccessWidth;
+
+ /// The alignment of the memory access, or 0 if the default alignment should
+ /// be used.
+ //
+ // FIXME: Remove use of 0 to encode default, instead have IRgen do the right
+ // thing when it generates the code, if avoiding align directives is
+ // desired.
+ CharUnits AccessAlignment;
+
+ /// Offset for the target value.
+ unsigned TargetBitOffset;
+
+ /// Number of bits in the access that are destined for the bit-field.
+ unsigned TargetBitWidth;
+ };
+
+private:
+ /// The components to use to access the bit-field. We may need up to three
+ /// separate components to support up to i64 bit-field access (4 + 2 + 1 byte
+ /// accesses).
+ //
+ // FIXME: De-hardcode this, just allocate following the struct.
+ AccessInfo Components[3];
+
+ /// The total size of the bit-field, in bits.
+ unsigned Size;
+
+ /// The number of access components to use.
+ unsigned NumComponents;
+
+ /// Whether the bit-field is signed.
+ bool IsSigned : 1;
+
+public:
+ CGBitFieldInfo(unsigned Size, unsigned NumComponents, AccessInfo *_Components,
+ bool IsSigned) : Size(Size), NumComponents(NumComponents),
+ IsSigned(IsSigned) {
+ assert(NumComponents <= 3 && "invalid number of components!");
+ for (unsigned i = 0; i != NumComponents; ++i)
+ Components[i] = _Components[i];
+
+ // Check some invariants.
+ unsigned AccessedSize = 0;
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ AccessedSize += AI.TargetBitWidth;
+
+ // We shouldn't try to load 0 bits.
+ assert(AI.TargetBitWidth > 0);
+
+ // We can't load more bits than we accessed.
+ assert(AI.FieldBitStart + AI.TargetBitWidth <= AI.AccessWidth);
+
+ // We shouldn't put any bits outside the result size.
+ assert(AI.TargetBitWidth + AI.TargetBitOffset <= Size);
+ }
+
+ // Check that the total number of target bits matches the total bit-field
+ // size.
+ assert(AccessedSize == Size && "Total size does not match accessed size!");
+ }
+
+public:
+ /// \brief Check whether this bit-field access is (i.e., should be sign
+ /// extended on loads).
+ bool isSigned() const { return IsSigned; }
+
+ /// \brief Get the size of the bit-field, in bits.
+ unsigned getSize() const { return Size; }
+
+ /// @name Component Access
+ /// @{
+
+ unsigned getNumComponents() const { return NumComponents; }
+
+ const AccessInfo &getComponent(unsigned Index) const {
+ assert(Index < getNumComponents() && "Invalid access!");
+ return Components[Index];
+ }
+
+ /// @}
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size).
+ static CGBitFieldInfo MakeInfo(class CodeGenTypes &Types, const FieldDecl *FD,
+ uint64_t FieldOffset, uint64_t FieldSize);
+
+ /// \brief Given a bit-field decl, build an appropriate helper object for
+ /// accessing that field (which is expected to have the given offset and
+ /// size). The field decl should be known to be contained within a type of at
+ /// least the given size and with the given alignment.
+ static CGBitFieldInfo MakeInfo(CodeGenTypes &Types, const FieldDecl *FD,
+ uint64_t FieldOffset, uint64_t FieldSize,
+ uint64_t ContainingTypeSizeInBits,
+ unsigned ContainingTypeAlign);
+};
+
+/// CGRecordLayout - This class handles struct and union layout info while
+/// lowering AST types to LLVM types.
+///
+/// These layout objects are only created on demand as IR generation requires.
+class CGRecordLayout {
+ friend class CodeGenTypes;
+
+ CGRecordLayout(const CGRecordLayout&); // DO NOT IMPLEMENT
+ void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
+
+private:
+ /// The LLVM type corresponding to this record layout; used when
+ /// laying it out as a complete object.
+ llvm::StructType *CompleteObjectType;
+
+ /// The LLVM type for the non-virtual part of this record layout;
+ /// used when laying it out as a base subobject.
+ llvm::StructType *BaseSubobjectType;
+
+ /// Map from (non-bit-field) struct field to the corresponding llvm struct
+ /// type field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+ /// Map from (bit-field) struct field to the corresponding llvm struct type
+ /// field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ // FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
+ // map for both virtual and non virtual bases.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+
+ /// Map from virtual bases to their field index in the complete object.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> CompleteObjectVirtualBases;
+
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a complete object, requires a non-zero bitpattern
+ /// when zero-initialized.
+ bool IsZeroInitializable : 1;
+
+ /// False if any direct or indirect subobject of this class, when
+ /// considered as a base subobject, requires a non-zero bitpattern
+ /// when zero-initialized.
+ bool IsZeroInitializableAsBase : 1;
+
+public:
+ CGRecordLayout(llvm::StructType *CompleteObjectType,
+ llvm::StructType *BaseSubobjectType,
+ bool IsZeroInitializable,
+ bool IsZeroInitializableAsBase)
+ : CompleteObjectType(CompleteObjectType),
+ BaseSubobjectType(BaseSubobjectType),
+ IsZeroInitializable(IsZeroInitializable),
+ IsZeroInitializableAsBase(IsZeroInitializableAsBase) {}
+
+ /// \brief Return the "complete object" LLVM type associated with
+ /// this record.
+ llvm::StructType *getLLVMType() const {
+ return CompleteObjectType;
+ }
+
+ /// \brief Return the "base subobject" LLVM type associated with
+ /// this record.
+ llvm::StructType *getBaseSubobjectLLVMType() const {
+ return BaseSubobjectType;
+ }
+
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer.
+ bool isZeroInitializable() const {
+ return IsZeroInitializable;
+ }
+
+ /// \brief Check whether this struct can be C++ zero-initialized
+ /// with a zeroinitializer when considered as a base subobject.
+ bool isZeroInitializableAsBase() const {
+ return IsZeroInitializableAsBase;
+ }
+
+ /// \brief Return llvm::StructType element number that corresponds to the
+ /// field FD.
+ unsigned getLLVMFieldNo(const FieldDecl *FD) const {
+ assert(!FD->isBitField() && "Invalid call for bit-field decl!");
+ assert(FieldInfo.count(FD) && "Invalid field for record!");
+ return FieldInfo.lookup(FD);
+ }
+
+ unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
+ assert(NonVirtualBases.count(RD) && "Invalid non-virtual base!");
+ return NonVirtualBases.lookup(RD);
+ }
+
+ /// \brief Return the LLVM field index corresponding to the given
+ /// virtual base. Only valid when operating on the complete object.
+ unsigned getVirtualBaseIndex(const CXXRecordDecl *base) const {
+ assert(CompleteObjectVirtualBases.count(base) && "Invalid virtual base!");
+ return CompleteObjectVirtualBases.lookup(base);
+ }
+
+ /// \brief Return the BitFieldInfo that corresponds to the field FD.
+ const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
+ assert(FD->isBitField() && "Invalid call for non bit-field decl!");
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo>::const_iterator
+ it = BitFields.find(FD);
+ assert(it != BitFields.end() && "Unable to find bitfield info");
+ return it->second;
+ }
+
+ void print(raw_ostream &OS) const;
+ void dump() const;
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
new file mode 100644
index 0000000..1193e97
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -0,0 +1,1170 @@
+//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Builder implementation for CGRecordLayout objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGRecordLayout.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "CodeGenTypes.h"
+#include "CGCXXABI.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Type.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+class CGRecordLayoutBuilder {
+public:
+ /// FieldTypes - Holds the LLVM types that the struct is created from.
+ ///
+ SmallVector<llvm::Type *, 16> FieldTypes;
+
+ /// BaseSubobjectType - Holds the LLVM type for the non-virtual part
+ /// of the struct. For example, consider:
+ ///
+ /// struct A { int i; };
+ /// struct B { void *v; };
+ /// struct C : virtual A, B { };
+ ///
+ /// The LLVM type of C will be
+ /// %struct.C = type { i32 (...)**, %struct.A, i32, %struct.B }
+ ///
+ /// And the LLVM type of the non-virtual base struct will be
+ /// %struct.C.base = type { i32 (...)**, %struct.A, i32 }
+ ///
+ /// This only gets initialized if the base subobject type is
+ /// different from the complete-object type.
+ llvm::StructType *BaseSubobjectType;
+
+ /// FieldInfo - Holds a field and its corresponding LLVM field number.
+ llvm::DenseMap<const FieldDecl *, unsigned> Fields;
+
+ /// BitFieldInfo - Holds location and size information about a bit field.
+ llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBases;
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> VirtualBases;
+
+ /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
+ /// primary base classes for some other direct or indirect base class.
+ CXXIndirectPrimaryBaseSet IndirectPrimaryBases;
+
+ /// LaidOutVirtualBases - A set of all laid out virtual bases, used to avoid
+ /// avoid laying out virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> LaidOutVirtualBases;
+
+ /// IsZeroInitializable - Whether this struct can be C++
+ /// zero-initialized with an LLVM zeroinitializer.
+ bool IsZeroInitializable;
+ bool IsZeroInitializableAsBase;
+
+ /// Packed - Whether the resulting LLVM struct will be packed or not.
+ bool Packed;
+
+ /// IsMsStruct - Whether ms_struct is in effect or not
+ bool IsMsStruct;
+
+private:
+ CodeGenTypes &Types;
+
+ /// LastLaidOutBaseInfo - Contains the offset and non-virtual size of the
+ /// last base laid out. Used so that we can replace the last laid out base
+ /// type with an i8 array if needed.
+ struct LastLaidOutBaseInfo {
+ CharUnits Offset;
+ CharUnits NonVirtualSize;
+
+ bool isValid() const { return !NonVirtualSize.isZero(); }
+ void invalidate() { NonVirtualSize = CharUnits::Zero(); }
+
+ } LastLaidOutBase;
+
+ /// Alignment - Contains the alignment of the RecordDecl.
+ CharUnits Alignment;
+
+ /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
+ /// this will have the number of bits still available in the field.
+ char BitsAvailableInLastField;
+
+ /// NextFieldOffset - Holds the next field offset.
+ CharUnits NextFieldOffset;
+
+ /// LayoutUnionField - Will layout a field in an union and return the type
+ /// that the field will have.
+ llvm::Type *LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout);
+
+ /// LayoutUnion - Will layout a union RecordDecl.
+ void LayoutUnion(const RecordDecl *D);
+
+ /// LayoutField - try to layout all fields in the record decl.
+ /// Returns false if the operation failed because the struct is not packed.
+ bool LayoutFields(const RecordDecl *D);
+
+ /// Layout a single base, virtual or non-virtual
+ bool LayoutBase(const CXXRecordDecl *base,
+ const CGRecordLayout &baseLayout,
+ CharUnits baseOffset);
+
+ /// LayoutVirtualBase - layout a single virtual base.
+ bool LayoutVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset);
+
+ /// LayoutVirtualBases - layout the virtual bases of a record decl.
+ bool LayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
+ /// MSLayoutVirtualBases - layout the virtual bases of a record decl,
+ /// like MSVC.
+ bool MSLayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
+ /// LayoutNonVirtualBase - layout a single non-virtual base.
+ bool LayoutNonVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset);
+
+ /// LayoutNonVirtualBases - layout the virtual bases of a record decl.
+ bool LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
+
+ /// ComputeNonVirtualBaseType - Compute the non-virtual base field types.
+ bool ComputeNonVirtualBaseType(const CXXRecordDecl *RD);
+
+ /// LayoutField - layout a single field. Returns false if the operation failed
+ /// because the current struct is not packed.
+ bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// LayoutBitField - layout a single bit field.
+ void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// AppendField - Appends a field with the given offset and type.
+ void AppendField(CharUnits fieldOffset, llvm::Type *FieldTy);
+
+ /// AppendPadding - Appends enough padding bytes so that the total
+ /// struct size is a multiple of the field alignment.
+ void AppendPadding(CharUnits fieldOffset, CharUnits fieldAlignment);
+
+ /// ResizeLastBaseFieldIfNecessary - Fields and bases can be laid out in the
+ /// tail padding of a previous base. If this happens, the type of the previous
+ /// base needs to be changed to an array of i8. Returns true if the last
+ /// laid out base was resized.
+ bool ResizeLastBaseFieldIfNecessary(CharUnits offset);
+
+ /// getByteArrayType - Returns a byte array type with the given number of
+ /// elements.
+ llvm::Type *getByteArrayType(CharUnits NumBytes);
+
+ /// AppendBytes - Append a given number of bytes to the record.
+ void AppendBytes(CharUnits numBytes);
+
+ /// AppendTailPadding - Append enough tail padding so that the type will have
+ /// the passed size.
+ void AppendTailPadding(CharUnits RecordSize);
+
+ CharUnits getTypeAlignment(llvm::Type *Ty) const;
+
+ /// getAlignmentAsLLVMStruct - Returns the maximum alignment of all the
+ /// LLVM element types.
+ CharUnits getAlignmentAsLLVMStruct() const;
+
+ /// CheckZeroInitializable - Check if the given type contains a pointer
+ /// to data member.
+ void CheckZeroInitializable(QualType T);
+
+public:
+ CGRecordLayoutBuilder(CodeGenTypes &Types)
+ : BaseSubobjectType(0),
+ IsZeroInitializable(true), IsZeroInitializableAsBase(true),
+ Packed(false), IsMsStruct(false),
+ Types(Types), BitsAvailableInLastField(0) { }
+
+ /// Layout - Will layout a RecordDecl.
+ void Layout(const RecordDecl *D);
+};
+
+}
+
+void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
+ Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
+ Packed = D->hasAttr<PackedAttr>();
+
+ IsMsStruct = D->hasAttr<MsStructAttr>();
+
+ if (D->isUnion()) {
+ LayoutUnion(D);
+ return;
+ }
+
+ if (LayoutFields(D))
+ return;
+
+ // We weren't able to layout the struct. Try again with a packed struct
+ Packed = true;
+ LastLaidOutBase.invalidate();
+ NextFieldOffset = CharUnits::Zero();
+ FieldTypes.clear();
+ Fields.clear();
+ BitFields.clear();
+ NonVirtualBases.clear();
+ VirtualBases.clear();
+
+ LayoutFields(D);
+}
+
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize,
+ uint64_t ContainingTypeSizeInBits,
+ unsigned ContainingTypeAlign) {
+ llvm::Type *Ty = Types.ConvertTypeForMem(FD->getType());
+ CharUnits TypeSizeInBytes =
+ CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(Ty));
+ uint64_t TypeSizeInBits = Types.getContext().toBits(TypeSizeInBytes);
+
+ bool IsSigned = FD->getType()->isSignedIntegerOrEnumerationType();
+
+ if (FieldSize > TypeSizeInBits) {
+ // We have a wide bit-field. The extra bits are only used for padding, so
+ // if we have a bitfield of type T, with size N:
+ //
+ // T t : N;
+ //
+ // We can just assume that it's:
+ //
+ // T t : sizeof(T);
+ //
+ FieldSize = TypeSizeInBits;
+ }
+
+ // in big-endian machines the first fields are in higher bit positions,
+ // so revert the offset. The byte offsets are reversed(back) later.
+ if (Types.getTargetData().isBigEndian()) {
+ FieldOffset = ((ContainingTypeSizeInBits)-FieldOffset-FieldSize);
+ }
+
+ // Compute the access components. The policy we use is to start by attempting
+ // to access using the width of the bit-field type itself and to always access
+ // at aligned indices of that type. If such an access would fail because it
+ // extends past the bound of the type, then we reduce size to the next smaller
+ // power of two and retry. The current algorithm assumes pow2 sized types,
+ // although this is easy to fix.
+ //
+ assert(llvm::isPowerOf2_32(TypeSizeInBits) && "Unexpected type size!");
+ CGBitFieldInfo::AccessInfo Components[3];
+ unsigned NumComponents = 0;
+ unsigned AccessedTargetBits = 0; // The number of target bits accessed.
+ unsigned AccessWidth = TypeSizeInBits; // The current access width to attempt.
+
+ // If requested, widen the initial bit-field access to be register sized. The
+ // theory is that this is most likely to allow multiple accesses into the same
+ // structure to be coalesced, and that the backend should be smart enough to
+ // narrow the store if no coalescing is ever done.
+ //
+ // The subsequent code will handle align these access to common boundaries and
+ // guaranteeing that we do not access past the end of the structure.
+ if (Types.getCodeGenOpts().UseRegisterSizedBitfieldAccess) {
+ if (AccessWidth < Types.getTarget().getRegisterWidth())
+ AccessWidth = Types.getTarget().getRegisterWidth();
+ }
+
+ // Round down from the field offset to find the first access position that is
+ // at an aligned offset of the initial access type.
+ uint64_t AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+
+ // Adjust initial access size to fit within record.
+ while (AccessWidth > Types.getTarget().getCharWidth() &&
+ AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ AccessWidth >>= 1;
+ AccessStart = FieldOffset - (FieldOffset % AccessWidth);
+ }
+
+ while (AccessedTargetBits < FieldSize) {
+ // Check that we can access using a type of this size, without reading off
+ // the end of the structure. This can occur with packed structures and
+ // -fno-bitfield-type-align, for example.
+ if (AccessStart + AccessWidth > ContainingTypeSizeInBits) {
+ // If so, reduce access size to the next smaller power-of-two and retry.
+ AccessWidth >>= 1;
+ assert(AccessWidth >= Types.getTarget().getCharWidth()
+ && "Cannot access under byte size!");
+ continue;
+ }
+
+ // Otherwise, add an access component.
+
+ // First, compute the bits inside this access which are part of the
+ // target. We are reading bits [AccessStart, AccessStart + AccessWidth); the
+ // intersection with [FieldOffset, FieldOffset + FieldSize) gives the bits
+ // in the target that we are reading.
+ assert(FieldOffset < AccessStart + AccessWidth && "Invalid access start!");
+ assert(AccessStart < FieldOffset + FieldSize && "Invalid access start!");
+ uint64_t AccessBitsInFieldStart = std::max(AccessStart, FieldOffset);
+ uint64_t AccessBitsInFieldSize =
+ std::min(AccessWidth + AccessStart,
+ FieldOffset + FieldSize) - AccessBitsInFieldStart;
+
+ assert(NumComponents < 3 && "Unexpected number of components!");
+ CGBitFieldInfo::AccessInfo &AI = Components[NumComponents++];
+ AI.FieldIndex = 0;
+ // FIXME: We still follow the old access pattern of only using the field
+ // byte offset. We should switch this once we fix the struct layout to be
+ // pretty.
+
+ // on big-endian machines we reverted the bit offset because first fields are
+ // in higher bits. But this also reverts the bytes, so fix this here by reverting
+ // the byte offset on big-endian machines.
+ if (Types.getTargetData().isBigEndian()) {
+ AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(
+ ContainingTypeSizeInBits - AccessStart - AccessWidth);
+ } else {
+ AI.FieldByteOffset = Types.getContext().toCharUnitsFromBits(AccessStart);
+ }
+ AI.FieldBitStart = AccessBitsInFieldStart - AccessStart;
+ AI.AccessWidth = AccessWidth;
+ AI.AccessAlignment = Types.getContext().toCharUnitsFromBits(
+ llvm::MinAlign(ContainingTypeAlign, AccessStart));
+ AI.TargetBitOffset = AccessedTargetBits;
+ AI.TargetBitWidth = AccessBitsInFieldSize;
+
+ AccessStart += AccessWidth;
+ AccessedTargetBits += AI.TargetBitWidth;
+ }
+
+ assert(AccessedTargetBits == FieldSize && "Invalid bit-field access!");
+ return CGBitFieldInfo(FieldSize, NumComponents, Components, IsSigned);
+}
+
+CGBitFieldInfo CGBitFieldInfo::MakeInfo(CodeGenTypes &Types,
+ const FieldDecl *FD,
+ uint64_t FieldOffset,
+ uint64_t FieldSize) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Types.getContext().getASTRecordLayout(RD);
+ uint64_t ContainingTypeSizeInBits = Types.getContext().toBits(RL.getSize());
+ unsigned ContainingTypeAlign = Types.getContext().toBits(RL.getAlignment());
+
+ return MakeInfo(Types, FD, FieldOffset, FieldSize, ContainingTypeSizeInBits,
+ ContainingTypeAlign);
+}
+
+void CGRecordLayoutBuilder::LayoutBitField(const FieldDecl *D,
+ uint64_t fieldOffset) {
+ uint64_t fieldSize = D->getBitWidthValue(Types.getContext());
+
+ if (fieldSize == 0)
+ return;
+
+ uint64_t nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
+ CharUnits numBytesToAppend;
+ unsigned charAlign = Types.getContext().getTargetInfo().getCharAlign();
+
+ if (fieldOffset < nextFieldOffsetInBits && !BitsAvailableInLastField) {
+ assert(fieldOffset % charAlign == 0 &&
+ "Field offset not aligned correctly");
+
+ CharUnits fieldOffsetInCharUnits =
+ Types.getContext().toCharUnitsFromBits(fieldOffset);
+
+ // Try to resize the last base field.
+ if (ResizeLastBaseFieldIfNecessary(fieldOffsetInCharUnits))
+ nextFieldOffsetInBits = Types.getContext().toBits(NextFieldOffset);
+ }
+
+ if (fieldOffset < nextFieldOffsetInBits) {
+ assert(BitsAvailableInLastField && "Bitfield size mismatch!");
+ assert(!NextFieldOffset.isZero() && "Must have laid out at least one byte");
+
+ // The bitfield begins in the previous bit-field.
+ numBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(fieldSize - BitsAvailableInLastField,
+ charAlign));
+ } else {
+ assert(fieldOffset % charAlign == 0 &&
+ "Field offset not aligned correctly");
+
+ // Append padding if necessary.
+ AppendPadding(Types.getContext().toCharUnitsFromBits(fieldOffset),
+ CharUnits::One());
+
+ numBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(fieldSize, charAlign));
+
+ assert(!numBytesToAppend.isZero() && "No bytes to append!");
+ }
+
+ // Add the bit field info.
+ BitFields.insert(std::make_pair(D,
+ CGBitFieldInfo::MakeInfo(Types, D, fieldOffset, fieldSize)));
+
+ AppendBytes(numBytesToAppend);
+
+ BitsAvailableInLastField =
+ Types.getContext().toBits(NextFieldOffset) - (fieldOffset + fieldSize);
+}
+
+bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
+ uint64_t fieldOffset) {
+ // If the field is packed, then we need a packed struct.
+ if (!Packed && D->hasAttr<PackedAttr>())
+ return false;
+
+ if (D->isBitField()) {
+ // We must use packed structs for unnamed bit fields since they
+ // don't affect the struct alignment.
+ if (!Packed && !D->getDeclName())
+ return false;
+
+ LayoutBitField(D, fieldOffset);
+ return true;
+ }
+
+ CheckZeroInitializable(D->getType());
+
+ assert(fieldOffset % Types.getTarget().getCharWidth() == 0
+ && "field offset is not on a byte boundary!");
+ CharUnits fieldOffsetInBytes
+ = Types.getContext().toCharUnitsFromBits(fieldOffset);
+
+ llvm::Type *Ty = Types.ConvertTypeForMem(D->getType());
+ CharUnits typeAlignment = getTypeAlignment(Ty);
+
+ // If the type alignment is larger then the struct alignment, we must use
+ // a packed struct.
+ if (typeAlignment > Alignment) {
+ assert(!Packed && "Alignment is wrong even with packed struct!");
+ return false;
+ }
+
+ if (!Packed) {
+ if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
+ const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
+ if (const MaxFieldAlignmentAttr *MFAA =
+ RD->getAttr<MaxFieldAlignmentAttr>()) {
+ if (MFAA->getAlignment() != Types.getContext().toBits(typeAlignment))
+ return false;
+ }
+ }
+ }
+
+ // Round up the field offset to the alignment of the field type.
+ CharUnits alignedNextFieldOffsetInBytes =
+ NextFieldOffset.RoundUpToAlignment(typeAlignment);
+
+ if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
+ // Try to resize the last base field.
+ if (ResizeLastBaseFieldIfNecessary(fieldOffsetInBytes)) {
+ alignedNextFieldOffsetInBytes =
+ NextFieldOffset.RoundUpToAlignment(typeAlignment);
+ }
+ }
+
+ if (fieldOffsetInBytes < alignedNextFieldOffsetInBytes) {
+ assert(!Packed && "Could not place field even with packed struct!");
+ return false;
+ }
+
+ AppendPadding(fieldOffsetInBytes, typeAlignment);
+
+ // Now append the field.
+ Fields[D] = FieldTypes.size();
+ AppendField(fieldOffsetInBytes, Ty);
+
+ LastLaidOutBase.invalidate();
+ return true;
+}
+
+llvm::Type *
+CGRecordLayoutBuilder::LayoutUnionField(const FieldDecl *Field,
+ const ASTRecordLayout &Layout) {
+ if (Field->isBitField()) {
+ uint64_t FieldSize = Field->getBitWidthValue(Types.getContext());
+
+ // Ignore zero sized bit fields.
+ if (FieldSize == 0)
+ return 0;
+
+ llvm::Type *FieldTy = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ CharUnits NumBytesToAppend = Types.getContext().toCharUnitsFromBits(
+ llvm::RoundUpToAlignment(FieldSize,
+ Types.getContext().getTargetInfo().getCharAlign()));
+
+ if (NumBytesToAppend > CharUnits::One())
+ FieldTy = llvm::ArrayType::get(FieldTy, NumBytesToAppend.getQuantity());
+
+ // Add the bit field info.
+ BitFields.insert(std::make_pair(Field,
+ CGBitFieldInfo::MakeInfo(Types, Field, 0, FieldSize)));
+ return FieldTy;
+ }
+
+ // This is a regular union field.
+ Fields[Field] = 0;
+ return Types.ConvertTypeForMem(Field->getType());
+}
+
+void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
+ assert(D->isUnion() && "Can't call LayoutUnion on a non-union record!");
+
+ const ASTRecordLayout &layout = Types.getContext().getASTRecordLayout(D);
+
+ llvm::Type *unionType = 0;
+ CharUnits unionSize = CharUnits::Zero();
+ CharUnits unionAlign = CharUnits::Zero();
+
+ bool hasOnlyZeroSizedBitFields = true;
+ bool checkedFirstFieldZeroInit = false;
+
+ unsigned fieldNo = 0;
+ for (RecordDecl::field_iterator field = D->field_begin(),
+ fieldEnd = D->field_end(); field != fieldEnd; ++field, ++fieldNo) {
+ assert(layout.getFieldOffset(fieldNo) == 0 &&
+ "Union field offset did not start at the beginning of record!");
+ llvm::Type *fieldType = LayoutUnionField(*field, layout);
+
+ if (!fieldType)
+ continue;
+
+ if (field->getDeclName() && !checkedFirstFieldZeroInit) {
+ CheckZeroInitializable(field->getType());
+ checkedFirstFieldZeroInit = true;
+ }
+
+ hasOnlyZeroSizedBitFields = false;
+
+ CharUnits fieldAlign = CharUnits::fromQuantity(
+ Types.getTargetData().getABITypeAlignment(fieldType));
+ CharUnits fieldSize = CharUnits::fromQuantity(
+ Types.getTargetData().getTypeAllocSize(fieldType));
+
+ if (fieldAlign < unionAlign)
+ continue;
+
+ if (fieldAlign > unionAlign || fieldSize > unionSize) {
+ unionType = fieldType;
+ unionAlign = fieldAlign;
+ unionSize = fieldSize;
+ }
+ }
+
+ // Now add our field.
+ if (unionType) {
+ AppendField(CharUnits::Zero(), unionType);
+
+ if (getTypeAlignment(unionType) > layout.getAlignment()) {
+ // We need a packed struct.
+ Packed = true;
+ unionAlign = CharUnits::One();
+ }
+ }
+ if (unionAlign.isZero()) {
+ (void)hasOnlyZeroSizedBitFields;
+ assert(hasOnlyZeroSizedBitFields &&
+ "0-align record did not have all zero-sized bit-fields!");
+ unionAlign = CharUnits::One();
+ }
+
+ // Append tail padding.
+ CharUnits recordSize = layout.getSize();
+ if (recordSize > unionSize)
+ AppendPadding(recordSize, unionAlign);
+}
+
+bool CGRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *base,
+ const CGRecordLayout &baseLayout,
+ CharUnits baseOffset) {
+ ResizeLastBaseFieldIfNecessary(baseOffset);
+
+ AppendPadding(baseOffset, CharUnits::One());
+
+ const ASTRecordLayout &baseASTLayout
+ = Types.getContext().getASTRecordLayout(base);
+
+ LastLaidOutBase.Offset = NextFieldOffset;
+ LastLaidOutBase.NonVirtualSize = baseASTLayout.getNonVirtualSize();
+
+ llvm::StructType *subobjectType = baseLayout.getBaseSubobjectLLVMType();
+ if (getTypeAlignment(subobjectType) > Alignment)
+ return false;
+
+ AppendField(baseOffset, subobjectType);
+ return true;
+}
+
+bool CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset) {
+ // Ignore empty bases.
+ if (base->isEmpty()) return true;
+
+ const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
+ if (IsZeroInitializableAsBase) {
+ assert(IsZeroInitializable &&
+ "class zero-initializable as base but not as complete object");
+
+ IsZeroInitializable = IsZeroInitializableAsBase =
+ baseLayout.isZeroInitializableAsBase();
+ }
+
+ if (!LayoutBase(base, baseLayout, baseOffset))
+ return false;
+ NonVirtualBases[base] = (FieldTypes.size() - 1);
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *base,
+ CharUnits baseOffset) {
+ // Ignore empty bases.
+ if (base->isEmpty()) return true;
+
+ const CGRecordLayout &baseLayout = Types.getCGRecordLayout(base);
+ if (IsZeroInitializable)
+ IsZeroInitializable = baseLayout.isZeroInitializableAsBase();
+
+ if (!LayoutBase(base, baseLayout, baseOffset))
+ return false;
+ VirtualBases[base] = (FieldTypes.size() - 1);
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::MSLayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ if (!RD->getNumVBases())
+ return true;
+
+ // The vbases list is uniqued and ordered by a depth-first
+ // traversal, which is what we need here.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->castAs<RecordType>()->getDecl());
+
+ CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
+ return false;
+ }
+ return true;
+}
+
+/// LayoutVirtualBases - layout the non-virtual bases of a record decl.
+bool
+CGRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We only want to lay out virtual bases that aren't indirect primary bases
+ // of some other base.
+ if (I->isVirtual() && !IndirectPrimaryBases.count(BaseDecl)) {
+ // Only lay out the base once.
+ if (!LaidOutVirtualBases.insert(BaseDecl))
+ continue;
+
+ CharUnits vbaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ if (!LayoutVirtualBase(BaseDecl, vbaseOffset))
+ return false;
+ }
+
+ if (!BaseDecl->getNumVBases()) {
+ // This base isn't interesting since it doesn't have any virtual bases.
+ continue;
+ }
+
+ if (!LayoutVirtualBases(BaseDecl, Layout))
+ return false;
+ }
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // If we have a primary base, lay it out first.
+ if (PrimaryBase) {
+ if (!Layout.isPrimaryBaseVirtual()) {
+ if (!LayoutNonVirtualBase(PrimaryBase, CharUnits::Zero()))
+ return false;
+ } else {
+ if (!LayoutVirtualBase(PrimaryBase, CharUnits::Zero()))
+ return false;
+ }
+
+ // Otherwise, add a vtable / vf-table if the layout says to do so.
+ } else if (Types.getContext().getTargetInfo().getCXXABI() == CXXABI_Microsoft
+ ? Layout.getVFPtrOffset() != CharUnits::fromQuantity(-1)
+ : RD->isDynamicClass()) {
+ llvm::Type *FunctionType =
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
+ /*isVarArg=*/true);
+ llvm::Type *VTableTy = FunctionType->getPointerTo();
+
+ assert(NextFieldOffset.isZero() &&
+ "VTable pointer must come first!");
+ AppendField(CharUnits::Zero(), VTableTy->getPointerTo());
+ }
+
+ // Layout the non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We've already laid out the primary base.
+ if (BaseDecl == PrimaryBase && !Layout.isPrimaryBaseVirtual())
+ continue;
+
+ if (!LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl)))
+ return false;
+ }
+
+ // Add a vb-table pointer if the layout insists.
+ if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
+ CharUnits VBPtrOffset = Layout.getVBPtrOffset();
+ llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
+ AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
+ AppendField(VBPtrOffset, Vbptr);
+ }
+
+ return true;
+}
+
+bool
+CGRecordLayoutBuilder::ComputeNonVirtualBaseType(const CXXRecordDecl *RD) {
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(RD);
+
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+ CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
+ CharUnits AlignedNonVirtualTypeSize =
+ NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
+
+ // First check if we can use the same fields as for the complete class.
+ CharUnits RecordSize = Layout.getSize();
+ if (AlignedNonVirtualTypeSize == RecordSize)
+ return true;
+
+ // Check if we need padding.
+ CharUnits AlignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
+
+ if (AlignedNextFieldOffset > AlignedNonVirtualTypeSize) {
+ assert(!Packed && "cannot layout even as packed struct");
+ return false; // Needs packing.
+ }
+
+ bool needsPadding = (AlignedNonVirtualTypeSize != AlignedNextFieldOffset);
+ if (needsPadding) {
+ CharUnits NumBytes = AlignedNonVirtualTypeSize - AlignedNextFieldOffset;
+ FieldTypes.push_back(getByteArrayType(NumBytes));
+ }
+
+ BaseSubobjectType = llvm::StructType::create(Types.getLLVMContext(),
+ FieldTypes, "", Packed);
+ Types.addRecordTypeName(RD, BaseSubobjectType, ".base");
+
+ // Pull the padding back off.
+ if (needsPadding)
+ FieldTypes.pop_back();
+
+ return true;
+}
+
+bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+ assert(!D->isUnion() && "Can't call LayoutFields on a union!");
+ assert(!Alignment.isZero() && "Did not set alignment!");
+
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
+ if (RD)
+ if (!LayoutNonVirtualBases(RD, Layout))
+ return false;
+
+ unsigned FieldNo = 0;
+ const FieldDecl *LastFD = 0;
+
+ for (RecordDecl::field_iterator Field = D->field_begin(),
+ FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ const FieldDecl *FD = (*Field);
+ if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
+ --FieldNo;
+ continue;
+ }
+ LastFD = FD;
+ }
+
+ if (!LayoutField(*Field, Layout.getFieldOffset(FieldNo))) {
+ assert(!Packed &&
+ "Could not layout fields even with a packed LLVM struct!");
+ return false;
+ }
+ }
+
+ if (RD) {
+ // We've laid out the non-virtual bases and the fields, now compute the
+ // non-virtual base field types.
+ if (!ComputeNonVirtualBaseType(RD)) {
+ assert(!Packed && "Could not layout even with a packed LLVM struct!");
+ return false;
+ }
+
+ // Lay out the virtual bases. The MS ABI uses a different
+ // algorithm here due to the lack of primary virtual bases.
+ if (Types.getContext().getTargetInfo().getCXXABI() != CXXABI_Microsoft) {
+ RD->getIndirectPrimaryBases(IndirectPrimaryBases);
+ if (Layout.isPrimaryBaseVirtual())
+ IndirectPrimaryBases.insert(Layout.getPrimaryBase());
+
+ if (!LayoutVirtualBases(RD, Layout))
+ return false;
+ } else {
+ if (!MSLayoutVirtualBases(RD, Layout))
+ return false;
+ }
+ }
+
+ // Append tail padding if necessary.
+ AppendTailPadding(Layout.getSize());
+
+ return true;
+}
+
+void CGRecordLayoutBuilder::AppendTailPadding(CharUnits RecordSize) {
+ ResizeLastBaseFieldIfNecessary(RecordSize);
+
+ assert(NextFieldOffset <= RecordSize && "Size mismatch!");
+
+ CharUnits AlignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(getAlignmentAsLLVMStruct());
+
+ if (AlignedNextFieldOffset == RecordSize) {
+ // We don't need any padding.
+ return;
+ }
+
+ CharUnits NumPadBytes = RecordSize - NextFieldOffset;
+ AppendBytes(NumPadBytes);
+}
+
+void CGRecordLayoutBuilder::AppendField(CharUnits fieldOffset,
+ llvm::Type *fieldType) {
+ CharUnits fieldSize =
+ CharUnits::fromQuantity(Types.getTargetData().getTypeAllocSize(fieldType));
+
+ FieldTypes.push_back(fieldType);
+
+ NextFieldOffset = fieldOffset + fieldSize;
+ BitsAvailableInLastField = 0;
+}
+
+void CGRecordLayoutBuilder::AppendPadding(CharUnits fieldOffset,
+ CharUnits fieldAlignment) {
+ assert(NextFieldOffset <= fieldOffset &&
+ "Incorrect field layout!");
+
+ // Do nothing if we're already at the right offset.
+ if (fieldOffset == NextFieldOffset) return;
+
+ // If we're not emitting a packed LLVM type, try to avoid adding
+ // unnecessary padding fields.
+ if (!Packed) {
+ // Round up the field offset to the alignment of the field type.
+ CharUnits alignedNextFieldOffset =
+ NextFieldOffset.RoundUpToAlignment(fieldAlignment);
+ assert(alignedNextFieldOffset <= fieldOffset);
+
+ // If that's the right offset, we're done.
+ if (alignedNextFieldOffset == fieldOffset) return;
+ }
+
+ // Otherwise we need explicit padding.
+ CharUnits padding = fieldOffset - NextFieldOffset;
+ AppendBytes(padding);
+}
+
+bool CGRecordLayoutBuilder::ResizeLastBaseFieldIfNecessary(CharUnits offset) {
+ // Check if we have a base to resize.
+ if (!LastLaidOutBase.isValid())
+ return false;
+
+ // This offset does not overlap with the tail padding.
+ if (offset >= NextFieldOffset)
+ return false;
+
+ // Restore the field offset and append an i8 array instead.
+ FieldTypes.pop_back();
+ NextFieldOffset = LastLaidOutBase.Offset;
+ AppendBytes(LastLaidOutBase.NonVirtualSize);
+ LastLaidOutBase.invalidate();
+
+ return true;
+}
+
+llvm::Type *CGRecordLayoutBuilder::getByteArrayType(CharUnits numBytes) {
+ assert(!numBytes.isZero() && "Empty byte arrays aren't allowed.");
+
+ llvm::Type *Ty = llvm::Type::getInt8Ty(Types.getLLVMContext());
+ if (numBytes > CharUnits::One())
+ Ty = llvm::ArrayType::get(Ty, numBytes.getQuantity());
+
+ return Ty;
+}
+
+void CGRecordLayoutBuilder::AppendBytes(CharUnits numBytes) {
+ if (numBytes.isZero())
+ return;
+
+ // Append the padding field
+ AppendField(NextFieldOffset, getByteArrayType(numBytes));
+}
+
+CharUnits CGRecordLayoutBuilder::getTypeAlignment(llvm::Type *Ty) const {
+ if (Packed)
+ return CharUnits::One();
+
+ return CharUnits::fromQuantity(Types.getTargetData().getABITypeAlignment(Ty));
+}
+
+CharUnits CGRecordLayoutBuilder::getAlignmentAsLLVMStruct() const {
+ if (Packed)
+ return CharUnits::One();
+
+ CharUnits maxAlignment = CharUnits::One();
+ for (size_t i = 0; i != FieldTypes.size(); ++i)
+ maxAlignment = std::max(maxAlignment, getTypeAlignment(FieldTypes[i]));
+
+ return maxAlignment;
+}
+
+/// Merge in whether a field of the given type is zero-initializable.
+void CGRecordLayoutBuilder::CheckZeroInitializable(QualType T) {
+ // This record already contains a member pointer.
+ if (!IsZeroInitializableAsBase)
+ return;
+
+ // Can only have member pointers if we're compiling C++.
+ if (!Types.getContext().getLangOpts().CPlusPlus)
+ return;
+
+ const Type *elementType = T->getBaseElementTypeUnsafe();
+
+ if (const MemberPointerType *MPT = elementType->getAs<MemberPointerType>()) {
+ if (!Types.getCXXABI().isZeroInitializable(MPT))
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ } else if (const RecordType *RT = elementType->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+ if (!Layout.isZeroInitializable())
+ IsZeroInitializable = IsZeroInitializableAsBase = false;
+ }
+}
+
+CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty) {
+ CGRecordLayoutBuilder Builder(*this);
+
+ Builder.Layout(D);
+
+ Ty->setBody(Builder.FieldTypes, Builder.Packed);
+
+ // If we're in C++, compute the base subobject type.
+ llvm::StructType *BaseTy = 0;
+ if (isa<CXXRecordDecl>(D) && !D->isUnion()) {
+ BaseTy = Builder.BaseSubobjectType;
+ if (!BaseTy) BaseTy = Ty;
+ }
+
+ CGRecordLayout *RL =
+ new CGRecordLayout(Ty, BaseTy, Builder.IsZeroInitializable,
+ Builder.IsZeroInitializableAsBase);
+
+ RL->NonVirtualBases.swap(Builder.NonVirtualBases);
+ RL->CompleteObjectVirtualBases.swap(Builder.VirtualBases);
+
+ // Add all the field numbers.
+ RL->FieldInfo.swap(Builder.Fields);
+
+ // Add bitfield info.
+ RL->BitFields.swap(Builder.BitFields);
+
+ // Dump the layout, if requested.
+ if (getContext().getLangOpts().DumpRecordLayouts) {
+ llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
+ llvm::errs() << "Record: ";
+ D->dump();
+ llvm::errs() << "\nLayout: ";
+ RL->dump();
+ }
+
+#ifndef NDEBUG
+ // Verify that the computed LLVM struct size matches the AST layout size.
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(D);
+
+ uint64_t TypeSizeInBits = getContext().toBits(Layout.getSize());
+ assert(TypeSizeInBits == getTargetData().getTypeAllocSizeInBits(Ty) &&
+ "Type size mismatch!");
+
+ if (BaseTy) {
+ CharUnits NonVirtualSize = Layout.getNonVirtualSize();
+ CharUnits NonVirtualAlign = Layout.getNonVirtualAlign();
+ CharUnits AlignedNonVirtualTypeSize =
+ NonVirtualSize.RoundUpToAlignment(NonVirtualAlign);
+
+ uint64_t AlignedNonVirtualTypeSizeInBits =
+ getContext().toBits(AlignedNonVirtualTypeSize);
+
+ assert(AlignedNonVirtualTypeSizeInBits ==
+ getTargetData().getTypeAllocSizeInBits(BaseTy) &&
+ "Type size mismatch!");
+ }
+
+ // Verify that the LLVM and AST field offsets agree.
+ llvm::StructType *ST =
+ dyn_cast<llvm::StructType>(RL->getLLVMType());
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(ST);
+
+ const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
+ RecordDecl::field_iterator it = D->field_begin();
+ const FieldDecl *LastFD = 0;
+ bool IsMsStruct = D->hasAttr<MsStructAttr>();
+ for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
+ const FieldDecl *FD = *it;
+
+ // For non-bit-fields, just check that the LLVM struct offset matches the
+ // AST offset.
+ if (!FD->isBitField()) {
+ unsigned FieldNo = RL->getLLVMFieldNo(FD);
+ assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
+ "Invalid field offset!");
+ LastFD = FD;
+ continue;
+ }
+
+ if (IsMsStruct) {
+ // Zero-length bitfields following non-bitfield members are
+ // ignored:
+ if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
+ --i;
+ continue;
+ }
+ LastFD = FD;
+ }
+
+ // Ignore unnamed bit-fields.
+ if (!FD->getDeclName()) {
+ LastFD = FD;
+ continue;
+ }
+
+ const CGBitFieldInfo &Info = RL->getBitFieldInfo(FD);
+ for (unsigned i = 0, e = Info.getNumComponents(); i != e; ++i) {
+ const CGBitFieldInfo::AccessInfo &AI = Info.getComponent(i);
+
+ // Verify that every component access is within the structure.
+ uint64_t FieldOffset = SL->getElementOffsetInBits(AI.FieldIndex);
+ uint64_t AccessBitOffset = FieldOffset +
+ getContext().toBits(AI.FieldByteOffset);
+ assert(AccessBitOffset + AI.AccessWidth <= TypeSizeInBits &&
+ "Invalid bit-field access (out of range)!");
+ }
+ }
+#endif
+
+ return RL;
+}
+
+void CGRecordLayout::print(raw_ostream &OS) const {
+ OS << "<CGRecordLayout\n";
+ OS << " LLVMType:" << *CompleteObjectType << "\n";
+ if (BaseSubobjectType)
+ OS << " NonVirtualBaseLLVMType:" << *BaseSubobjectType << "\n";
+ OS << " IsZeroInitializable:" << IsZeroInitializable << "\n";
+ OS << " BitFields:[\n";
+
+ // Print bit-field infos in declaration order.
+ std::vector<std::pair<unsigned, const CGBitFieldInfo*> > BFIs;
+ for (llvm::DenseMap<const FieldDecl*, CGBitFieldInfo>::const_iterator
+ it = BitFields.begin(), ie = BitFields.end();
+ it != ie; ++it) {
+ const RecordDecl *RD = it->first->getParent();
+ unsigned Index = 0;
+ for (RecordDecl::field_iterator
+ it2 = RD->field_begin(); *it2 != it->first; ++it2)
+ ++Index;
+ BFIs.push_back(std::make_pair(Index, &it->second));
+ }
+ llvm::array_pod_sort(BFIs.begin(), BFIs.end());
+ for (unsigned i = 0, e = BFIs.size(); i != e; ++i) {
+ OS.indent(4);
+ BFIs[i].second->print(OS);
+ OS << "\n";
+ }
+
+ OS << "]>\n";
+}
+
+void CGRecordLayout::dump() const {
+ print(llvm::errs());
+}
+
+void CGBitFieldInfo::print(raw_ostream &OS) const {
+ OS << "<CGBitFieldInfo";
+ OS << " Size:" << Size;
+ OS << " IsSigned:" << IsSigned << "\n";
+
+ OS.indent(4 + strlen("<CGBitFieldInfo"));
+ OS << " NumComponents:" << getNumComponents();
+ OS << " Components: [";
+ if (getNumComponents()) {
+ OS << "\n";
+ for (unsigned i = 0, e = getNumComponents(); i != e; ++i) {
+ const AccessInfo &AI = getComponent(i);
+ OS.indent(8);
+ OS << "<AccessInfo"
+ << " FieldIndex:" << AI.FieldIndex
+ << " FieldByteOffset:" << AI.FieldByteOffset.getQuantity()
+ << " FieldBitStart:" << AI.FieldBitStart
+ << " AccessWidth:" << AI.AccessWidth << "\n";
+ OS.indent(8 + strlen("<AccessInfo"));
+ OS << " AccessAlignment:" << AI.AccessAlignment.getQuantity()
+ << " TargetBitOffset:" << AI.TargetBitOffset
+ << " TargetBitWidth:" << AI.TargetBitWidth
+ << ">\n";
+ }
+ OS.indent(4);
+ }
+ OS << "]>";
+}
+
+void CGBitFieldInfo::dump() const {
+ print(llvm::errs());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
new file mode 100644
index 0000000..bf42dcb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGStmt.cpp
@@ -0,0 +1,1676 @@
+//===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code to emit Stmt nodes as LLVM code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGDebugInfo.h"
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "TargetInfo.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/InlineAsm.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+//===----------------------------------------------------------------------===//
+// Statement Emission
+//===----------------------------------------------------------------------===//
+
+void CodeGenFunction::EmitStopPoint(const Stmt *S) {
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ SourceLocation Loc;
+ if (isa<DeclStmt>(S))
+ Loc = S->getLocEnd();
+ else
+ Loc = S->getLocStart();
+ DI->EmitLocation(Builder, Loc);
+ }
+}
+
+void CodeGenFunction::EmitStmt(const Stmt *S) {
+ assert(S && "Null statement?");
+
+ // These statements have their own debug info handling.
+ if (EmitSimpleStmt(S))
+ return;
+
+ // Check if we are generating unreachable code.
+ if (!HaveInsertPoint()) {
+ // If so, and the statement doesn't contain a label, then we do not need to
+ // generate actual code. This is safe because (1) the current point is
+ // unreachable, so we don't need to execute the code, and (2) we've already
+ // handled the statements which update internal data structures (like the
+ // local variable map) which could be used by subsequent statements.
+ if (!ContainsLabel(S)) {
+ // Verify that any decl statements were handled as simple, they may be in
+ // scope of subsequent reachable statements.
+ assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
+ return;
+ }
+
+ // Otherwise, make a new block to hold the code.
+ EnsureInsertPoint();
+ }
+
+ // Generate a stoppoint if we are emitting debug info.
+ EmitStopPoint(S);
+
+ switch (S->getStmtClass()) {
+ case Stmt::NoStmtClass:
+ case Stmt::CXXCatchStmtClass:
+ case Stmt::SEHExceptStmtClass:
+ case Stmt::SEHFinallyStmtClass:
+ case Stmt::MSDependentExistsStmtClass:
+ llvm_unreachable("invalid statement class to emit generically");
+ case Stmt::NullStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::DeclStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::CaseStmtClass:
+ llvm_unreachable("should have emitted these statements as simple");
+
+#define STMT(Type, Base)
+#define ABSTRACT_STMT(Op)
+#define EXPR(Type, Base) \
+ case Stmt::Type##Class:
+#include "clang/AST/StmtNodes.inc"
+ {
+ // Remember the block we came in on.
+ llvm::BasicBlock *incoming = Builder.GetInsertBlock();
+ assert(incoming && "expression emission must have an insertion point");
+
+ EmitIgnoredExpr(cast<Expr>(S));
+
+ llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
+ assert(outgoing && "expression emission cleared block!");
+
+ // The expression emitters assume (reasonably!) that the insertion
+ // point is always set. To maintain that, the call-emission code
+ // for noreturn functions has to enter a new block with no
+ // predecessors. We want to kill that block and mark the current
+ // insertion point unreachable in the common case of a call like
+ // "exit();". Since expression emission doesn't otherwise create
+ // blocks with no predecessors, we can just test for that.
+ // However, we must be careful not to do this to our incoming
+ // block, because *statement* emission does sometimes create
+ // reachable blocks which will have no predecessors until later in
+ // the function. This occurs with, e.g., labels that are not
+ // reachable by fallthrough.
+ if (incoming != outgoing && outgoing->use_empty()) {
+ outgoing->eraseFromParent();
+ Builder.ClearInsertionPoint();
+ }
+ break;
+ }
+
+ case Stmt::IndirectGotoStmtClass:
+ EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
+
+ case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
+ case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S)); break;
+ case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S)); break;
+ case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S)); break;
+
+ case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
+
+ case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
+ case Stmt::AsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
+
+ case Stmt::ObjCAtTryStmtClass:
+ EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
+ break;
+ case Stmt::ObjCAtCatchStmtClass:
+ llvm_unreachable(
+ "@catch statements should be handled by EmitObjCAtTryStmt");
+ case Stmt::ObjCAtFinallyStmtClass:
+ llvm_unreachable(
+ "@finally statements should be handled by EmitObjCAtTryStmt");
+ case Stmt::ObjCAtThrowStmtClass:
+ EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
+ break;
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
+ break;
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
+ break;
+
+ case Stmt::CXXTryStmtClass:
+ EmitCXXTryStmt(cast<CXXTryStmt>(*S));
+ break;
+ case Stmt::CXXForRangeStmtClass:
+ EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
+ case Stmt::SEHTryStmtClass:
+ // FIXME Not yet implemented
+ break;
+ }
+}
+
+bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
+ switch (S->getStmtClass()) {
+ default: return false;
+ case Stmt::NullStmtClass: break;
+ case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
+ case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
+ case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
+ case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
+ case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
+ case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
+ case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
+ case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
+ }
+
+ return true;
+}
+
+/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
+/// this captures the expression result of the last sub-statement and returns it
+/// (for use by the statement expression extension).
+RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ AggValueSlot AggSlot) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
+ "LLVM IR generation of compound statement ('{}')");
+
+ // Keep track of the current cleanup stack depth, including debug scopes.
+ LexicalScope Scope(*this, S.getSourceRange());
+
+ for (CompoundStmt::const_body_iterator I = S.body_begin(),
+ E = S.body_end()-GetLast; I != E; ++I)
+ EmitStmt(*I);
+
+ RValue RV;
+ if (!GetLast)
+ RV = RValue::get(0);
+ else {
+ // We have to special case labels here. They are statements, but when put
+ // at the end of a statement expression, they yield the value of their
+ // subexpression. Handle this by walking through all labels we encounter,
+ // emitting them before we evaluate the subexpr.
+ const Stmt *LastStmt = S.body_back();
+ while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
+ EmitLabel(LS->getDecl());
+ LastStmt = LS->getSubStmt();
+ }
+
+ EnsureInsertPoint();
+
+ RV = EmitAnyExpr(cast<Expr>(LastStmt), AggSlot);
+ }
+
+ return RV;
+}
+
+void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
+ llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
+
+ // If there is a cleanup stack, then we it isn't worth trying to
+ // simplify this block (we would need to remove it from the scope map
+ // and cleanup entry).
+ if (!EHStack.empty())
+ return;
+
+ // Can only simplify direct branches.
+ if (!BI || !BI->isUnconditional())
+ return;
+
+ BB->replaceAllUsesWith(BI->getSuccessor(0));
+ BI->eraseFromParent();
+ BB->eraseFromParent();
+}
+
+void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ // Fall out of the current block (if necessary).
+ EmitBranch(BB);
+
+ if (IsFinished && BB->use_empty()) {
+ delete BB;
+ return;
+ }
+
+ // Place the block after the current block, if possible, or else at
+ // the end of the function.
+ if (CurBB && CurBB->getParent())
+ CurFn->getBasicBlockList().insertAfter(CurBB, BB);
+ else
+ CurFn->getBasicBlockList().push_back(BB);
+ Builder.SetInsertPoint(BB);
+}
+
+void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
+ // Emit a branch from the current block to the target one if this
+ // was a real block. If this was just a fall-through block after a
+ // terminator, don't emit it.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (!CurBB || CurBB->getTerminator()) {
+ // If there is no insert point or the previous block is already
+ // terminated, don't touch it.
+ } else {
+ // Otherwise, create a fall-through branch.
+ Builder.CreateBr(Target);
+ }
+
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
+ bool inserted = false;
+ for (llvm::BasicBlock::use_iterator
+ i = block->use_begin(), e = block->use_end(); i != e; ++i) {
+ if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(*i)) {
+ CurFn->getBasicBlockList().insertAfter(insn->getParent(), block);
+ inserted = true;
+ break;
+ }
+ }
+
+ if (!inserted)
+ CurFn->getBasicBlockList().push_back(block);
+
+ Builder.SetInsertPoint(block);
+}
+
+CodeGenFunction::JumpDest
+CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
+ if (Dest.isValid()) return Dest;
+
+ // Create, but don't insert, the new block.
+ Dest = JumpDest(createBasicBlock(D->getName()),
+ EHScopeStack::stable_iterator::invalid(),
+ NextCleanupDestIndex++);
+ return Dest;
+}
+
+void CodeGenFunction::EmitLabel(const LabelDecl *D) {
+ JumpDest &Dest = LabelMap[D];
+
+ // If we didn't need a forward reference to this label, just go
+ // ahead and create a destination at the current scope.
+ if (!Dest.isValid()) {
+ Dest = getJumpDestInCurrentScope(D->getName());
+
+ // Otherwise, we need to give this label a target depth and remove
+ // it from the branch-fixups list.
+ } else {
+ assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
+ Dest = JumpDest(Dest.getBlock(),
+ EHStack.stable_begin(),
+ Dest.getDestIndex());
+
+ ResolveBranchFixups(Dest.getBlock());
+ }
+
+ EmitBlock(Dest.getBlock());
+}
+
+
+void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
+ EmitLabel(S.getDecl());
+ EmitStmt(S.getSubStmt());
+}
+
+void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
+}
+
+
+void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
+ if (const LabelDecl *Target = S.getConstantTarget()) {
+ EmitBranchThroughCleanup(getJumpDestForLabel(Target));
+ return;
+ }
+
+ // Ensure that we have an i8* for our PHI node.
+ llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
+ Int8PtrTy, "addr");
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+
+ // Get the basic block for the indirect goto.
+ llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
+
+ // The first instruction in the block has to be the PHI for the switch dest,
+ // add an entry for this branch.
+ cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
+
+ EmitBranch(IndGotoBB);
+}
+
+void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
+ // C99 6.8.4.1: The first substatement is executed if the expression compares
+ // unequal to 0. The condition must be a scalar type.
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // If the condition constant folds and can be elided, try to avoid emitting
+ // the condition and the dead arm of the if/else.
+ bool CondConstant;
+ if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant)) {
+ // Figure out which block (then or else) is executed.
+ const Stmt *Executed = S.getThen();
+ const Stmt *Skipped = S.getElse();
+ if (!CondConstant) // Condition false?
+ std::swap(Executed, Skipped);
+
+ // If the skipped block has no labels in it, just emit the executed block.
+ // This avoids emitting dead code and simplifies the CFG substantially.
+ if (!ContainsLabel(Skipped)) {
+ if (Executed) {
+ RunCleanupsScope ExecutedScope(*this);
+ EmitStmt(Executed);
+ }
+ return;
+ }
+ }
+
+ // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
+ // the conditional branch.
+ llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
+ llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
+ llvm::BasicBlock *ElseBlock = ContBlock;
+ if (S.getElse())
+ ElseBlock = createBasicBlock("if.else");
+ EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock);
+
+ // Emit the 'then' code.
+ EmitBlock(ThenBlock);
+ {
+ RunCleanupsScope ThenScope(*this);
+ EmitStmt(S.getThen());
+ }
+ EmitBranch(ContBlock);
+
+ // Emit the 'else' code if present.
+ if (const Stmt *Else = S.getElse()) {
+ // There is no need to emit line number for unconditional branch.
+ if (getDebugInfo())
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ EmitBlock(ElseBlock);
+ {
+ RunCleanupsScope ElseScope(*this);
+ EmitStmt(Else);
+ }
+ // There is no need to emit line number for unconditional branch.
+ if (getDebugInfo())
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ EmitBranch(ContBlock);
+ }
+
+ // Emit the continuation block for code after the if.
+ EmitBlock(ContBlock, true);
+}
+
+void CodeGenFunction::EmitWhileStmt(const WhileStmt &S) {
+ // Emit the header for the loop, which will also become
+ // the continue target.
+ JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
+ EmitBlock(LoopHeader.getBlock());
+
+ // Create an exit block for when the condition fails, which will
+ // also become the break target.
+ JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
+
+ // C++ [stmt.while]p2:
+ // When the condition of a while statement is a declaration, the
+ // scope of the variable that is declared extends from its point
+ // of declaration (3.3.2) to the end of the while statement.
+ // [...]
+ // The object created in a condition is destroyed and created
+ // with each iteration of the loop.
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // Evaluate the conditional in the while header. C99 6.8.5.1: The
+ // evaluation of the controlling expression takes place before each
+ // execution of the loop body.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // while(1) is common, avoid extra exit blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isOne())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, go to the loop body.
+ llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
+ if (EmitBoolCondBranch) {
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (ConditionScope.requiresCleanups())
+ ExitBlock = createBasicBlock("while.exit");
+
+ Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+ }
+
+ // Emit the loop body. We have to emit this in a cleanup scope
+ // because it might be a singleton DeclStmt.
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitBlock(LoopBody);
+ EmitStmt(S.getBody());
+ }
+
+ BreakContinueStack.pop_back();
+
+ // Immediately force cleanup.
+ ConditionScope.ForceCleanup();
+
+ // Branch to the loop header again.
+ EmitBranch(LoopHeader.getBlock());
+
+ // Emit the exit block.
+ EmitBlock(LoopExit.getBlock(), true);
+
+ // The LoopHeader typically is just a branch if we skipped emitting
+ // a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopHeader.getBlock());
+}
+
+void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
+ JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
+
+ // Emit the body of the loop.
+ llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
+ EmitBlock(LoopBody);
+ {
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ BreakContinueStack.pop_back();
+
+ EmitBlock(LoopCond.getBlock());
+
+ // C99 6.8.5.2: "The evaluation of the controlling expression takes place
+ // after each execution of the loop body."
+
+ // Evaluate the conditional in the while header.
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+
+ // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
+ // to correctly handle break/continue though.
+ bool EmitBoolCondBranch = true;
+ if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
+ if (C->isZero())
+ EmitBoolCondBranch = false;
+
+ // As long as the condition is true, iterate the loop.
+ if (EmitBoolCondBranch)
+ Builder.CreateCondBr(BoolCondVal, LoopBody, LoopExit.getBlock());
+
+ // Emit the exit block.
+ EmitBlock(LoopExit.getBlock());
+
+ // The DoCond block typically is just a branch if we skipped
+ // emitting a branch, try to erase it.
+ if (!EmitBoolCondBranch)
+ SimplifyForwardingBlocks(LoopCond.getBlock());
+}
+
+void CodeGenFunction::EmitForStmt(const ForStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ // Evaluate the first part before the loop.
+ if (S.getInit())
+ EmitStmt(S.getInit());
+
+ // Start the loop with a block that tests the condition.
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ JumpDest Continue = getJumpDestInCurrentScope("for.cond");
+ llvm::BasicBlock *CondBlock = Continue.getBlock();
+ EmitBlock(CondBlock);
+
+ // Create a cleanup scope for the condition variable cleanups.
+ RunCleanupsScope ConditionScope(*this);
+
+ llvm::Value *BoolCondVal = 0;
+ if (S.getCond()) {
+ // If the for statement has a condition scope, emit the local variable
+ // declaration.
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (S.getConditionVariable()) {
+ EmitAutoVarDecl(*S.getConditionVariable());
+ }
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
+
+ // As long as the condition is true, iterate the loop.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // C99 6.8.5p2/p4: The first substatement is executed if the expression
+ // compares unequal to 0. The condition must be a scalar type.
+ BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(ForBody);
+ } else {
+ // Treat it as a non-zero constant. Don't even create a new block for the
+ // body, just fall into it.
+ }
+
+ // If the for loop doesn't have an increment we can just use the
+ // condition as the continue block. Otherwise we'll need to create
+ // a block for it (in the current scope, i.e. in the scope of the
+ // condition), and that we will become our continue block.
+ if (S.getInc())
+ Continue = getJumpDestInCurrentScope("for.inc");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ {
+ // Create a separate cleanup scope for the body, in case it is not
+ // a compound statement.
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getBody());
+ }
+
+ // If there is an increment, emit it next.
+ if (S.getInc()) {
+ EmitBlock(Continue.getBlock());
+ EmitStmt(S.getInc());
+ }
+
+ BreakContinueStack.pop_back();
+
+ ConditionScope.ForceCleanup();
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock(), true);
+}
+
+void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
+ JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
+
+ RunCleanupsScope ForScope(*this);
+
+ CGDebugInfo *DI = getDebugInfo();
+ if (DI)
+ DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+
+ // Evaluate the first pieces before the loop.
+ EmitStmt(S.getRangeStmt());
+ EmitStmt(S.getBeginEndStmt());
+
+ // Start the loop with a block that tests the condition.
+ // If there's an increment, the continue scope will be overwritten
+ // later.
+ llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
+ EmitBlock(CondBlock);
+
+ // If there are any cleanups between here and the loop-exit scope,
+ // create a block to stage a loop exit along.
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
+ if (ForScope.requiresCleanups())
+ ExitBlock = createBasicBlock("for.cond.cleanup");
+
+ // The loop body, consisting of the specified body and the loop variable.
+ llvm::BasicBlock *ForBody = createBasicBlock("for.body");
+
+ // The body is executed if the expression, contextually converted
+ // to bool, is true.
+ llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
+ Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+
+ if (ExitBlock != LoopExit.getBlock()) {
+ EmitBlock(ExitBlock);
+ EmitBranchThroughCleanup(LoopExit);
+ }
+
+ EmitBlock(ForBody);
+
+ // Create a block for the increment. In case of a 'continue', we jump there.
+ JumpDest Continue = getJumpDestInCurrentScope("for.inc");
+
+ // Store the blocks to use for break and continue.
+ BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
+
+ {
+ // Create a separate cleanup scope for the loop variable and body.
+ RunCleanupsScope BodyScope(*this);
+ EmitStmt(S.getLoopVarStmt());
+ EmitStmt(S.getBody());
+ }
+
+ // If there is an increment, emit it next.
+ EmitBlock(Continue.getBlock());
+ EmitStmt(S.getInc());
+
+ BreakContinueStack.pop_back();
+
+ EmitBranch(CondBlock);
+
+ ForScope.ForceCleanup();
+
+ if (DI)
+ DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
+
+ // Emit the fall-through block.
+ EmitBlock(LoopExit.getBlock(), true);
+}
+
+void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
+ if (RV.isScalar()) {
+ Builder.CreateStore(RV.getScalarVal(), ReturnValue);
+ } else if (RV.isAggregate()) {
+ EmitAggregateCopy(ReturnValue, RV.getAggregateAddr(), Ty);
+ } else {
+ StoreComplexToAddr(RV.getComplexVal(), ReturnValue, false);
+ }
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+/// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
+/// if the function returns void, or may be missing one if the function returns
+/// non-void. Fun stuff :).
+void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
+ // Emit the result value, even if unused, to evalute the side effects.
+ const Expr *RV = S.getRetValue();
+
+ // FIXME: Clean this up by using an LValue for ReturnTemp,
+ // EmitStoreThroughLValue, and EmitAnyExpr.
+ if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
+ !Target.useGlobalsForAutomaticVariables()) {
+ // Apply the named return value optimization for this return statement,
+ // which means doing nothing: the appropriate result has already been
+ // constructed into the NRVO variable.
+
+ // If there is an NRVO flag for this variable, set it to 1 into indicate
+ // that the cleanup code should not destroy the variable.
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
+ Builder.CreateStore(Builder.getTrue(), NRVOFlag);
+ } else if (!ReturnValue) {
+ // Make sure not to return anything, but evaluate the expression
+ // for side effects.
+ if (RV)
+ EmitAnyExpr(RV);
+ } else if (RV == 0) {
+ // Do nothing (return value is left uninitialized)
+ } else if (FnRetTy->isReferenceType()) {
+ // If this function returns a reference, take the address of the expression
+ // rather than the value.
+ RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0);
+ Builder.CreateStore(Result.getScalarVal(), ReturnValue);
+ } else if (!hasAggregateLLVMType(RV->getType())) {
+ Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
+ } else if (RV->getType()->isAnyComplexType()) {
+ EmitComplexExprIntoAddr(RV, ReturnValue, false);
+ } else {
+ CharUnits Alignment = getContext().getTypeAlignInChars(RV->getType());
+ EmitAggExpr(RV, AggValueSlot::forAddr(ReturnValue, Alignment, Qualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ }
+
+ EmitBranchThroughCleanup(ReturnBlock);
+}
+
+void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
+ // As long as debug info is modeled with instructions, we have to ensure we
+ // have a place to insert here and write the stop point here.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ for (DeclStmt::const_decl_iterator I = S.decl_begin(), E = S.decl_end();
+ I != E; ++I)
+ EmitDecl(**I);
+}
+
+void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
+ assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
+ assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
+
+ // If this code is reachable then emit a stop point (if generating
+ // debug info). We have to do this ourselves because we are on the
+ // "simple" statement path.
+ if (HaveInsertPoint())
+ EmitStopPoint(&S);
+
+ JumpDest Block = BreakContinueStack.back().ContinueBlock;
+ EmitBranchThroughCleanup(Block);
+}
+
+/// EmitCaseStmtRange - If case statement range is not too big then
+/// add multiple cases to switch instruction, one for each value within
+/// the range. If range is too big then emit "if" condition check.
+void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
+ assert(S.getRHS() && "Expected RHS value in CaseStmt");
+
+ llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
+ llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
+
+ // Emit the code for this case. We do this first to make sure it is
+ // properly chained from our predecessor before generating the
+ // switch machinery to enter this block.
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ EmitStmt(S.getSubStmt());
+
+ // If range is empty, do nothing.
+ if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
+ return;
+
+ llvm::APInt Range = RHS - LHS;
+ // FIXME: parameters such as this should not be hardcoded.
+ if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
+ // Range is small enough to add multiple switch instruction cases.
+ for (unsigned i = 0, e = Range.getZExtValue() + 1; i != e; ++i) {
+ SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
+ LHS++;
+ }
+ return;
+ }
+
+ // The range is too big. Emit "if" condition into a new block,
+ // making sure to save and restore the current insertion point.
+ llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
+
+ // Push this test onto the chain of range checks (which terminates
+ // in the default basic block). The switch's default will be changed
+ // to the top of this chain after switch emission is complete.
+ llvm::BasicBlock *FalseDest = CaseRangeBlock;
+ CaseRangeBlock = createBasicBlock("sw.caserange");
+
+ CurFn->getBasicBlockList().push_back(CaseRangeBlock);
+ Builder.SetInsertPoint(CaseRangeBlock);
+
+ // Emit range check.
+ llvm::Value *Diff =
+ Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
+ llvm::Value *Cond =
+ Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
+ Builder.CreateCondBr(Cond, CaseDest, FalseDest);
+
+ // Restore the appropriate insertion point.
+ if (RestoreBB)
+ Builder.SetInsertPoint(RestoreBB);
+ else
+ Builder.ClearInsertionPoint();
+}
+
+void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
+ // If there is no enclosing switch instance that we're aware of, then this
+ // case statement and its block can be elided. This situation only happens
+ // when we've constant-folded the switch, are emitting the constant case,
+ // and part of the constant case includes another case statement. For
+ // instance: switch (4) { case 4: do { case 5: } while (1); }
+ if (!SwitchInsn) {
+ EmitStmt(S.getSubStmt());
+ return;
+ }
+
+ // Handle case ranges.
+ if (S.getRHS()) {
+ EmitCaseStmtRange(S);
+ return;
+ }
+
+ llvm::ConstantInt *CaseVal =
+ Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
+
+ // If the body of the case is just a 'break', and if there was no fallthrough,
+ // try to not emit an empty block.
+ if ((CGM.getCodeGenOpts().OptimizationLevel > 0) && isa<BreakStmt>(S.getSubStmt())) {
+ JumpDest Block = BreakContinueStack.back().BreakBlock;
+
+ // Only do this optimization if there are no cleanups that need emitting.
+ if (isObviouslyBranchWithoutCleanups(Block)) {
+ SwitchInsn->addCase(CaseVal, Block.getBlock());
+
+ // If there was a fallthrough into this case, make sure to redirect it to
+ // the end of the switch as well.
+ if (Builder.GetInsertBlock()) {
+ Builder.CreateBr(Block.getBlock());
+ Builder.ClearInsertionPoint();
+ }
+ return;
+ }
+ }
+
+ EmitBlock(createBasicBlock("sw.bb"));
+ llvm::BasicBlock *CaseDest = Builder.GetInsertBlock();
+ SwitchInsn->addCase(CaseVal, CaseDest);
+
+ // Recursively emitting the statement is acceptable, but is not wonderful for
+ // code where we have many case statements nested together, i.e.:
+ // case 1:
+ // case 2:
+ // case 3: etc.
+ // Handling this recursively will create a new block for each case statement
+ // that falls through to the next case which is IR intensive. It also causes
+ // deep recursion which can run into stack depth limitations. Handle
+ // sequential non-range case statements specially.
+ const CaseStmt *CurCase = &S;
+ const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
+
+ // Otherwise, iteratively add consecutive cases to this switch stmt.
+ while (NextCase && NextCase->getRHS() == 0) {
+ CurCase = NextCase;
+ llvm::ConstantInt *CaseVal =
+ Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
+ SwitchInsn->addCase(CaseVal, CaseDest);
+ NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
+ }
+
+ // Normal default recursion for non-cases.
+ EmitStmt(CurCase->getSubStmt());
+}
+
+void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
+ llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
+ assert(DefaultBlock->empty() &&
+ "EmitDefaultStmt: Default block already defined?");
+ EmitBlock(DefaultBlock);
+ EmitStmt(S.getSubStmt());
+}
+
+/// CollectStatementsForCase - Given the body of a 'switch' statement and a
+/// constant value that is being switched on, see if we can dead code eliminate
+/// the body of the switch to a simple series of statements to emit. Basically,
+/// on a switch (5) we want to find these statements:
+/// case 5:
+/// printf(...); <--
+/// ++i; <--
+/// break;
+///
+/// and add them to the ResultStmts vector. If it is unsafe to do this
+/// transformation (for example, one of the elided statements contains a label
+/// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
+/// should include statements after it (e.g. the printf() line is a substmt of
+/// the case) then return CSFC_FallThrough. If we handled it and found a break
+/// statement, then return CSFC_Success.
+///
+/// If Case is non-null, then we are looking for the specified case, checking
+/// that nothing we jump over contains labels. If Case is null, then we found
+/// the case and are looking for the break.
+///
+/// If the recursive walk actually finds our Case, then we set FoundCase to
+/// true.
+///
+enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
+static CSFC_Result CollectStatementsForCase(const Stmt *S,
+ const SwitchCase *Case,
+ bool &FoundCase,
+ SmallVectorImpl<const Stmt*> &ResultStmts) {
+ // If this is a null statement, just succeed.
+ if (S == 0)
+ return Case ? CSFC_Success : CSFC_FallThrough;
+
+ // If this is the switchcase (case 4: or default) that we're looking for, then
+ // we're in business. Just add the substatement.
+ if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
+ if (S == Case) {
+ FoundCase = true;
+ return CollectStatementsForCase(SC->getSubStmt(), 0, FoundCase,
+ ResultStmts);
+ }
+
+ // Otherwise, this is some other case or default statement, just ignore it.
+ return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
+ ResultStmts);
+ }
+
+ // If we are in the live part of the code and we found our break statement,
+ // return a success!
+ if (Case == 0 && isa<BreakStmt>(S))
+ return CSFC_Success;
+
+ // If this is a switch statement, then it might contain the SwitchCase, the
+ // break, or neither.
+ if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
+ // Handle this as two cases: we might be looking for the SwitchCase (if so
+ // the skipped statements must be skippable) or we might already have it.
+ CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
+ if (Case) {
+ // Keep track of whether we see a skipped declaration. The code could be
+ // using the declaration even if it is skipped, so we can't optimize out
+ // the decl if the kept statements might refer to it.
+ bool HadSkippedDecl = false;
+
+ // If we're looking for the case, just see if we can skip each of the
+ // substatements.
+ for (; Case && I != E; ++I) {
+ HadSkippedDecl |= isa<DeclStmt>(*I);
+
+ switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
+ case CSFC_Failure: return CSFC_Failure;
+ case CSFC_Success:
+ // A successful result means that either 1) that the statement doesn't
+ // have the case and is skippable, or 2) does contain the case value
+ // and also contains the break to exit the switch. In the later case,
+ // we just verify the rest of the statements are elidable.
+ if (FoundCase) {
+ // If we found the case and skipped declarations, we can't do the
+ // optimization.
+ if (HadSkippedDecl)
+ return CSFC_Failure;
+
+ for (++I; I != E; ++I)
+ if (CodeGenFunction::ContainsLabel(*I, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+ break;
+ case CSFC_FallThrough:
+ // If we have a fallthrough condition, then we must have found the
+ // case started to include statements. Consider the rest of the
+ // statements in the compound statement as candidates for inclusion.
+ assert(FoundCase && "Didn't find case but returned fallthrough?");
+ // We recursively found Case, so we're not looking for it anymore.
+ Case = 0;
+
+ // If we found the case and skipped declarations, we can't do the
+ // optimization.
+ if (HadSkippedDecl)
+ return CSFC_Failure;
+ break;
+ }
+ }
+ }
+
+ // If we have statements in our range, then we know that the statements are
+ // live and need to be added to the set of statements we're tracking.
+ for (; I != E; ++I) {
+ switch (CollectStatementsForCase(*I, 0, FoundCase, ResultStmts)) {
+ case CSFC_Failure: return CSFC_Failure;
+ case CSFC_FallThrough:
+ // A fallthrough result means that the statement was simple and just
+ // included in ResultStmt, keep adding them afterwards.
+ break;
+ case CSFC_Success:
+ // A successful result means that we found the break statement and
+ // stopped statement inclusion. We just ensure that any leftover stmts
+ // are skippable and return success ourselves.
+ for (++I; I != E; ++I)
+ if (CodeGenFunction::ContainsLabel(*I, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+ }
+
+ return Case ? CSFC_Success : CSFC_FallThrough;
+ }
+
+ // Okay, this is some other statement that we don't handle explicitly, like a
+ // for statement or increment etc. If we are skipping over this statement,
+ // just verify it doesn't have labels, which would make it invalid to elide.
+ if (Case) {
+ if (CodeGenFunction::ContainsLabel(S, true))
+ return CSFC_Failure;
+ return CSFC_Success;
+ }
+
+ // Otherwise, we want to include this statement. Everything is cool with that
+ // so long as it doesn't contain a break out of the switch we're in.
+ if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
+
+ // Otherwise, everything is great. Include the statement and tell the caller
+ // that we fall through and include the next statement as well.
+ ResultStmts.push_back(S);
+ return CSFC_FallThrough;
+}
+
+/// FindCaseStatementsForValue - Find the case statement being jumped to and
+/// then invoke CollectStatementsForCase to find the list of statements to emit
+/// for a switch on constant. See the comment above CollectStatementsForCase
+/// for more details.
+static bool FindCaseStatementsForValue(const SwitchStmt &S,
+ const llvm::APInt &ConstantCondValue,
+ SmallVectorImpl<const Stmt*> &ResultStmts,
+ ASTContext &C) {
+ // First step, find the switch case that is being branched to. We can do this
+ // efficiently by scanning the SwitchCase list.
+ const SwitchCase *Case = S.getSwitchCaseList();
+ const DefaultStmt *DefaultCase = 0;
+
+ for (; Case; Case = Case->getNextSwitchCase()) {
+ // It's either a default or case. Just remember the default statement in
+ // case we're not jumping to any numbered cases.
+ if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
+ DefaultCase = DS;
+ continue;
+ }
+
+ // Check to see if this case is the one we're looking for.
+ const CaseStmt *CS = cast<CaseStmt>(Case);
+ // Don't handle case ranges yet.
+ if (CS->getRHS()) return false;
+
+ // If we found our case, remember it as 'case'.
+ if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
+ break;
+ }
+
+ // If we didn't find a matching case, we use a default if it exists, or we
+ // elide the whole switch body!
+ if (Case == 0) {
+ // It is safe to elide the body of the switch if it doesn't contain labels
+ // etc. If it is safe, return successfully with an empty ResultStmts list.
+ if (DefaultCase == 0)
+ return !CodeGenFunction::ContainsLabel(&S);
+ Case = DefaultCase;
+ }
+
+ // Ok, we know which case is being jumped to, try to collect all the
+ // statements that follow it. This can fail for a variety of reasons. Also,
+ // check to see that the recursive walk actually found our case statement.
+ // Insane cases like this can fail to find it in the recursive walk since we
+ // don't handle every stmt kind:
+ // switch (4) {
+ // while (1) {
+ // case 4: ...
+ bool FoundCase = false;
+ return CollectStatementsForCase(S.getBody(), Case, FoundCase,
+ ResultStmts) != CSFC_Failure &&
+ FoundCase;
+}
+
+void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
+ JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
+
+ RunCleanupsScope ConditionScope(*this);
+
+ if (S.getConditionVariable())
+ EmitAutoVarDecl(*S.getConditionVariable());
+
+ // Handle nested switch statements.
+ llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
+ llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
+
+ // See if we can constant fold the condition of the switch and therefore only
+ // emit the live case statement (if any) of the switch.
+ llvm::APInt ConstantCondValue;
+ if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
+ SmallVector<const Stmt*, 4> CaseStmts;
+ if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
+ getContext())) {
+ RunCleanupsScope ExecutedScope(*this);
+
+ // At this point, we are no longer "within" a switch instance, so
+ // we can temporarily enforce this to ensure that any embedded case
+ // statements are not emitted.
+ SwitchInsn = 0;
+
+ // Okay, we can dead code eliminate everything except this case. Emit the
+ // specified series of statements and we're good.
+ for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
+ EmitStmt(CaseStmts[i]);
+
+ // Now we want to restore the saved switch instance so that nested
+ // switches continue to function properly
+ SwitchInsn = SavedSwitchInsn;
+
+ return;
+ }
+ }
+
+ llvm::Value *CondV = EmitScalarExpr(S.getCond());
+
+ // Create basic block to hold stuff that comes after switch
+ // statement. We also need to create a default block now so that
+ // explicit case ranges tests can have a place to jump to on
+ // failure.
+ llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
+ SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
+ CaseRangeBlock = DefaultBlock;
+
+ // Clear the insertion point to indicate we are in unreachable code.
+ Builder.ClearInsertionPoint();
+
+ // All break statements jump to NextBlock. If BreakContinueStack is non empty
+ // then reuse last ContinueBlock.
+ JumpDest OuterContinue;
+ if (!BreakContinueStack.empty())
+ OuterContinue = BreakContinueStack.back().ContinueBlock;
+
+ BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
+
+ // Emit switch body.
+ EmitStmt(S.getBody());
+
+ BreakContinueStack.pop_back();
+
+ // Update the default block in case explicit case range tests have
+ // been chained on top.
+ SwitchInsn->setDefaultDest(CaseRangeBlock);
+
+ // If a default was never emitted:
+ if (!DefaultBlock->getParent()) {
+ // If we have cleanups, emit the default block so that there's a
+ // place to jump through the cleanups from.
+ if (ConditionScope.requiresCleanups()) {
+ EmitBlock(DefaultBlock);
+
+ // Otherwise, just forward the default block to the switch end.
+ } else {
+ DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
+ delete DefaultBlock;
+ }
+ }
+
+ ConditionScope.ForceCleanup();
+
+ // Emit continuation.
+ EmitBlock(SwitchExit.getBlock(), true);
+
+ SwitchInsn = SavedSwitchInsn;
+ CaseRangeBlock = SavedCRBlock;
+}
+
+static std::string
+SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
+ SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=0) {
+ std::string Result;
+
+ while (*Constraint) {
+ switch (*Constraint) {
+ default:
+ Result += Target.convertConstraint(Constraint);
+ break;
+ // Ignore these
+ case '*':
+ case '?':
+ case '!':
+ case '=': // Will see this and the following in mult-alt constraints.
+ case '+':
+ break;
+ case ',':
+ Result += "|";
+ break;
+ case 'g':
+ Result += "imr";
+ break;
+ case '[': {
+ assert(OutCons &&
+ "Must pass output names to constraints with a symbolic name");
+ unsigned Index;
+ bool result = Target.resolveSymbolicName(Constraint,
+ &(*OutCons)[0],
+ OutCons->size(), Index);
+ assert(result && "Could not resolve symbolic name"); (void)result;
+ Result += llvm::utostr(Index);
+ break;
+ }
+ }
+
+ Constraint++;
+ }
+
+ return Result;
+}
+
+/// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
+/// as using a particular register add that as a constraint that will be used
+/// in this asm stmt.
+static std::string
+AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
+ const TargetInfo &Target, CodeGenModule &CGM,
+ const AsmStmt &Stmt) {
+ const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
+ if (!AsmDeclRef)
+ return Constraint;
+ const ValueDecl &Value = *AsmDeclRef->getDecl();
+ const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
+ if (!Variable)
+ return Constraint;
+ if (Variable->getStorageClass() != SC_Register)
+ return Constraint;
+ AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return Constraint;
+ StringRef Register = Attr->getLabel();
+ assert(Target.isValidGCCRegisterName(Register));
+ // We're using validateOutputConstraint here because we only care if
+ // this is a register constraint.
+ TargetInfo::ConstraintInfo Info(Constraint, "");
+ if (Target.validateOutputConstraint(Info) &&
+ !Info.allowsRegister()) {
+ CGM.ErrorUnsupported(&Stmt, "__asm__");
+ return Constraint;
+ }
+ // Canonicalize the register here before returning it.
+ Register = Target.getNormalizedGCCRegisterName(Register);
+ return "{" + Register.str() + "}";
+}
+
+llvm::Value*
+CodeGenFunction::EmitAsmInputLValue(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr) {
+ llvm::Value *Arg;
+ if (Info.allowsRegister() || !Info.allowsMemory()) {
+ if (!CodeGenFunction::hasAggregateLLVMType(InputType)) {
+ Arg = EmitLoadOfLValue(InputValue).getScalarVal();
+ } else {
+ llvm::Type *Ty = ConvertType(InputType);
+ uint64_t Size = CGM.getTargetData().getTypeSizeInBits(Ty);
+ if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
+ Ty = llvm::IntegerType::get(getLLVMContext(), Size);
+ Ty = llvm::PointerType::getUnqual(Ty);
+
+ Arg = Builder.CreateLoad(Builder.CreateBitCast(InputValue.getAddress(),
+ Ty));
+ } else {
+ Arg = InputValue.getAddress();
+ ConstraintStr += '*';
+ }
+ }
+ } else {
+ Arg = InputValue.getAddress();
+ ConstraintStr += '*';
+ }
+
+ return Arg;
+}
+
+llvm::Value* CodeGenFunction::EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr,
+ std::string &ConstraintStr) {
+ if (Info.allowsRegister() || !Info.allowsMemory())
+ if (!CodeGenFunction::hasAggregateLLVMType(InputExpr->getType()))
+ return EmitScalarExpr(InputExpr);
+
+ InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
+ LValue Dest = EmitLValue(InputExpr);
+ return EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(), ConstraintStr);
+}
+
+/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
+/// asm call instruction. The !srcloc MDNode contains a list of constant
+/// integers which are the source locations of the start of each line in the
+/// asm.
+static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
+ CodeGenFunction &CGF) {
+ SmallVector<llvm::Value *, 8> Locs;
+ // Add the location of the first line to the MDNode.
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ Str->getLocStart().getRawEncoding()));
+ StringRef StrVal = Str->getString();
+ if (!StrVal.empty()) {
+ const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
+ const LangOptions &LangOpts = CGF.CGM.getLangOpts();
+
+ // Add the location of the start of each subsequent line of the asm to the
+ // MDNode.
+ for (unsigned i = 0, e = StrVal.size()-1; i != e; ++i) {
+ if (StrVal[i] != '\n') continue;
+ SourceLocation LineLoc = Str->getLocationOfByte(i+1, SM, LangOpts,
+ CGF.Target);
+ Locs.push_back(llvm::ConstantInt::get(CGF.Int32Ty,
+ LineLoc.getRawEncoding()));
+ }
+ }
+
+ return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
+}
+
+void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Analyze the asm string to decompose it into its pieces. We know that Sema
+ // has already done this, so it is guaranteed to be successful.
+ SmallVector<AsmStmt::AsmStringPiece, 4> Pieces;
+ unsigned DiagOffs;
+ S.AnalyzeAsmString(Pieces, getContext(), DiagOffs);
+
+ // Assemble the pieces into the final asm string.
+ std::string AsmString;
+ for (unsigned i = 0, e = Pieces.size(); i != e; ++i) {
+ if (Pieces[i].isString())
+ AsmString += Pieces[i].getString();
+ else if (Pieces[i].getModifier() == '\0')
+ AsmString += '$' + llvm::utostr(Pieces[i].getOperandNo());
+ else
+ AsmString += "${" + llvm::utostr(Pieces[i].getOperandNo()) + ':' +
+ Pieces[i].getModifier() + '}';
+ }
+
+ // Get all the output and input constraints together.
+ SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+ SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i),
+ S.getOutputName(i));
+ bool IsValid = Target.validateOutputConstraint(Info); (void)IsValid;
+ assert(IsValid && "Failed to parse output constraint");
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo Info(S.getInputConstraint(i),
+ S.getInputName(i));
+ bool IsValid = Target.validateInputConstraint(OutputConstraintInfos.data(),
+ S.getNumOutputs(), Info);
+ assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
+ InputConstraintInfos.push_back(Info);
+ }
+
+ std::string Constraints;
+
+ std::vector<LValue> ResultRegDests;
+ std::vector<QualType> ResultRegQualTys;
+ std::vector<llvm::Type *> ResultRegTypes;
+ std::vector<llvm::Type *> ResultTruncRegTypes;
+ std::vector<llvm::Type*> ArgTypes;
+ std::vector<llvm::Value*> Args;
+
+ // Keep track of inout constraints.
+ std::string InOutConstraints;
+ std::vector<llvm::Value*> InOutArgs;
+ std::vector<llvm::Type*> InOutArgTypes;
+
+ for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
+ TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
+
+ // Simplify the output constraint.
+ std::string OutputConstraint(S.getOutputConstraint(i));
+ OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1, Target);
+
+ const Expr *OutExpr = S.getOutputExpr(i);
+ OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
+
+ OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
+ Target, CGM, S);
+
+ LValue Dest = EmitLValue(OutExpr);
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // If this is a register output, then make the inline asm return it
+ // by-value. If this is a memory result, return the value by-reference.
+ if (!Info.allowsMemory() && !hasAggregateLLVMType(OutExpr->getType())) {
+ Constraints += "=" + OutputConstraint;
+ ResultRegQualTys.push_back(OutExpr->getType());
+ ResultRegDests.push_back(Dest);
+ ResultRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
+ ResultTruncRegTypes.push_back(ResultRegTypes.back());
+
+ // If this output is tied to an input, and if the input is larger, then
+ // we need to set the actual result type of the inline asm node to be the
+ // same as the input type.
+ if (Info.hasMatchingInput()) {
+ unsigned InputNo;
+ for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
+ TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
+ if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
+ break;
+ }
+ assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
+
+ QualType InputTy = S.getInputExpr(InputNo)->getType();
+ QualType OutputType = OutExpr->getType();
+
+ uint64_t InputSize = getContext().getTypeSize(InputTy);
+ if (getContext().getTypeSize(OutputType) < InputSize) {
+ // Form the asm to return the value as a larger integer or fp type.
+ ResultRegTypes.back() = ConvertType(InputTy);
+ }
+ }
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ ResultRegTypes.back()))
+ ResultRegTypes.back() = AdjTy;
+ } else {
+ ArgTypes.push_back(Dest.getAddress()->getType());
+ Args.push_back(Dest.getAddress());
+ Constraints += "=*";
+ Constraints += OutputConstraint;
+ }
+
+ if (Info.isReadWrite()) {
+ InOutConstraints += ',';
+
+ const Expr *InputExpr = S.getOutputExpr(i);
+ llvm::Value *Arg = EmitAsmInputLValue(S, Info, Dest, InputExpr->getType(),
+ InOutConstraints);
+
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
+
+ if (Info.allowsRegister())
+ InOutConstraints += llvm::utostr(i);
+ else
+ InOutConstraints += OutputConstraint;
+
+ InOutArgTypes.push_back(Arg->getType());
+ InOutArgs.push_back(Arg);
+ }
+ }
+
+ unsigned NumConstraints = S.getNumOutputs() + S.getNumInputs();
+
+ for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
+ const Expr *InputExpr = S.getInputExpr(i);
+
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ if (!Constraints.empty())
+ Constraints += ',';
+
+ // Simplify the input constraint.
+ std::string InputConstraint(S.getInputConstraint(i));
+ InputConstraint = SimplifyConstraint(InputConstraint.c_str(), Target,
+ &OutputConstraintInfos);
+
+ InputConstraint =
+ AddVariableConstraints(InputConstraint,
+ *InputExpr->IgnoreParenNoopCasts(getContext()),
+ Target, CGM, S);
+
+ llvm::Value *Arg = EmitAsmInput(S, Info, InputExpr, Constraints);
+
+ // If this input argument is tied to a larger output result, extend the
+ // input to be the same size as the output. The LLVM backend wants to see
+ // the input and output of a matching constraint be the same size. Note
+ // that GCC does not define what the top bits are here. We use zext because
+ // that is usually cheaper, but LLVM IR should really get an anyext someday.
+ if (Info.hasTiedOperand()) {
+ unsigned Output = Info.getTiedOperand();
+ QualType OutputType = S.getOutputExpr(Output)->getType();
+ QualType InputTy = InputExpr->getType();
+
+ if (getContext().getTypeSize(OutputType) >
+ getContext().getTypeSize(InputTy)) {
+ // Use ptrtoint as appropriate so that we can do our extension.
+ if (isa<llvm::PointerType>(Arg->getType()))
+ Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
+ llvm::Type *OutputTy = ConvertType(OutputType);
+ if (isa<llvm::IntegerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, OutputTy);
+ else if (isa<llvm::PointerType>(OutputTy))
+ Arg = Builder.CreateZExt(Arg, IntPtrTy);
+ else {
+ assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
+ Arg = Builder.CreateFPExt(Arg, OutputTy);
+ }
+ }
+ }
+ if (llvm::Type* AdjTy =
+ getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
+ Arg->getType()))
+ Arg = Builder.CreateBitCast(Arg, AdjTy);
+
+ ArgTypes.push_back(Arg->getType());
+ Args.push_back(Arg);
+ Constraints += InputConstraint;
+ }
+
+ // Append the "input" part of inout constraints last.
+ for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
+ ArgTypes.push_back(InOutArgTypes[i]);
+ Args.push_back(InOutArgs[i]);
+ }
+ Constraints += InOutConstraints;
+
+ // Clobbers
+ for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
+ StringRef Clobber = S.getClobber(i)->getString();
+
+ if (Clobber != "memory" && Clobber != "cc")
+ Clobber = Target.getNormalizedGCCRegisterName(Clobber);
+
+ if (i != 0 || NumConstraints != 0)
+ Constraints += ',';
+
+ Constraints += "~{";
+ Constraints += Clobber;
+ Constraints += '}';
+ }
+
+ // Add machine specific clobbers
+ std::string MachineClobbers = Target.getClobbers();
+ if (!MachineClobbers.empty()) {
+ if (!Constraints.empty())
+ Constraints += ',';
+ Constraints += MachineClobbers;
+ }
+
+ llvm::Type *ResultType;
+ if (ResultRegTypes.empty())
+ ResultType = VoidTy;
+ else if (ResultRegTypes.size() == 1)
+ ResultType = ResultRegTypes[0];
+ else
+ ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
+
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(ResultType, ArgTypes, false);
+
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, AsmString, Constraints,
+ S.isVolatile() || S.getNumOutputs() == 0);
+ llvm::CallInst *Result = Builder.CreateCall(IA, Args);
+ Result->addAttribute(~0, llvm::Attribute::NoUnwind);
+
+ // Slap the source location of the inline asm into a !srcloc metadata on the
+ // call.
+ Result->setMetadata("srcloc", getAsmSrcLocInfo(S.getAsmString(), *this));
+
+ // Extract all of the register value results from the asm.
+ std::vector<llvm::Value*> RegResults;
+ if (ResultRegTypes.size() == 1) {
+ RegResults.push_back(Result);
+ } else {
+ for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
+ llvm::Value *Tmp = Builder.CreateExtractValue(Result, i, "asmresult");
+ RegResults.push_back(Tmp);
+ }
+ }
+
+ for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
+ llvm::Value *Tmp = RegResults[i];
+
+ // If the result type of the LLVM IR asm doesn't match the result type of
+ // the expression, do the conversion.
+ if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
+ llvm::Type *TruncTy = ResultTruncRegTypes[i];
+
+ // Truncate the integer result to the right size, note that TruncTy can be
+ // a pointer.
+ if (TruncTy->isFloatingPointTy())
+ Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
+ else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
+ uint64_t ResSize = CGM.getTargetData().getTypeSizeInBits(TruncTy);
+ Tmp = Builder.CreateTrunc(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
+ Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
+ } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
+ uint64_t TmpSize =CGM.getTargetData().getTypeSizeInBits(Tmp->getType());
+ Tmp = Builder.CreatePtrToInt(Tmp,
+ llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isIntegerTy()) {
+ Tmp = Builder.CreateTrunc(Tmp, TruncTy);
+ } else if (TruncTy->isVectorTy()) {
+ Tmp = Builder.CreateBitCast(Tmp, TruncTy);
+ }
+ }
+
+ EmitStoreThroughLValue(RValue::get(Tmp), ResultRegDests[i]);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
new file mode 100644
index 0000000..98be872
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTT.cpp
@@ -0,0 +1,192 @@
+//===--- CGVTT.cpp - Emit LLVM Code for C++ VTTs --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of VTTs (vtable tables).
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGCXXABI.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/VTTBuilder.h"
+using namespace clang;
+using namespace CodeGen;
+
+static llvm::Constant *
+GetAddrOfVTTVTable(CodeGenVTables &CGVT, const CXXRecordDecl *MostDerivedClass,
+ const VTTVTable &VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
+ if (VTable.getBase() == MostDerivedClass) {
+ assert(VTable.getBaseOffset().isZero() &&
+ "Most derived class vtable must have a zero offset!");
+ // This is a regular vtable.
+ return CGVT.GetAddrOfVTable(MostDerivedClass);
+ }
+
+ return CGVT.GenerateConstructionVTable(MostDerivedClass,
+ VTable.getBaseSubobject(),
+ VTable.isVirtual(),
+ Linkage,
+ AddressPoints);
+}
+
+void
+CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/true);
+
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy, *Int64Ty = CGM.Int64Ty;
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
+
+ SmallVector<llvm::Constant *, 8> VTables;
+ SmallVector<VTableAddressPointsMapTy, 8> VTableAddressPoints;
+ for (const VTTVTable *i = Builder.getVTTVTables().begin(),
+ *e = Builder.getVTTVTables().end(); i != e; ++i) {
+ VTableAddressPoints.push_back(VTableAddressPointsMapTy());
+ VTables.push_back(GetAddrOfVTTVTable(*this, RD, *i, Linkage,
+ VTableAddressPoints.back()));
+ }
+
+ SmallVector<llvm::Constant *, 8> VTTComponents;
+ for (const VTTComponent *i = Builder.getVTTComponents().begin(),
+ *e = Builder.getVTTComponents().end(); i != e; ++i) {
+ const VTTVTable &VTTVT = Builder.getVTTVTables()[i->VTableIndex];
+ llvm::Constant *VTable = VTables[i->VTableIndex];
+ uint64_t AddressPoint;
+ if (VTTVT.getBase() == RD) {
+ // Just get the address point for the regular vtable.
+ AddressPoint = VTContext.getVTableLayout(RD)
+ .getAddressPoint(i->VTableBase);
+ assert(AddressPoint != 0 && "Did not find vtable address point!");
+ } else {
+ AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase);
+ assert(AddressPoint != 0 && "Did not find ctor vtable address point!");
+ }
+
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(Int64Ty, 0),
+ llvm::ConstantInt::get(Int64Ty, AddressPoint)
+ };
+
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs);
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+
+ VTTComponents.push_back(Init);
+ }
+
+ llvm::Constant *Init = llvm::ConstantArray::get(ArrayType, VTTComponents);
+
+ VTT->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTT->setLinkage(Linkage);
+
+ // Set the right visibility.
+ CGM.setTypeVisibility(VTT, RD, CodeGenModule::TVK_ForVTT);
+}
+
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
+ assert(RD->getNumVBases() && "Only classes with virtual bases need a VTT");
+
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ // This will also defer the definition of the VTT.
+ (void) GetAddrOfVTable(RD);
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(CGM.Int8PtrTy, Builder.getVTTComponents().size());
+
+ llvm::GlobalVariable *GV =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
+bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // We don't have any virtual bases, just return early.
+ if (!MD->getParent()->getNumVBases())
+ return false;
+
+ // Check if we have a base constructor.
+ if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
+ return true;
+
+ // Check if we have a base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return true;
+
+ return false;
+}
+
+uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
+
+ SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassSubobjectPair);
+ if (I != SubVTTIndicies.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSubVTTIndicies().begin(),
+ E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
+ // Insert all indices.
+ BaseSubobjectPairTy ClassSubobjectPair(RD, I->first);
+
+ SubVTTIndicies.insert(std::make_pair(ClassSubobjectPair, I->second));
+ }
+
+ I = SubVTTIndicies.find(ClassSubobjectPair);
+ assert(I != SubVTTIndicies.end() && "Did not find index!");
+
+ return I->second;
+}
+
+uint64_t
+CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ SecondaryVirtualPointerIndicesMapTy::iterator I =
+ SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+
+ if (I != SecondaryVirtualPointerIndices.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
+
+ // Insert all secondary vpointer indices.
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSecondaryVirtualPointerIndices().begin(),
+ E = Builder.getSecondaryVirtualPointerIndices().end(); I != E; ++I) {
+ std::pair<const CXXRecordDecl *, BaseSubobject> Pair =
+ std::make_pair(RD, I->first);
+
+ SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second));
+ }
+
+ I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+ assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!");
+
+ return I->second;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
new file mode 100644
index 0000000..17a0537
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.cpp
@@ -0,0 +1,733 @@
+//===--- CGVTables.cpp - Emit LLVM Code for C++ vtables -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Transforms/Utils/Cloning.h"
+#include <algorithm>
+#include <cstdio>
+
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
+ : CGM(CGM), VTContext(CGM.getContext()) { }
+
+bool CodeGenVTables::ShouldEmitVTableInThisTU(const CXXRecordDecl *RD) {
+ assert(RD->isDynamicClass() && "Non dynamic classes have no VTable.");
+
+ TemplateSpecializationKind TSK = RD->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ return false;
+
+ const CXXMethodDecl *KeyFunction = CGM.getContext().getKeyFunction(RD);
+ if (!KeyFunction)
+ return true;
+
+ // Itanium C++ ABI, 5.2.6 Instantiated Templates:
+ // An instantiation of a class template requires:
+ // - In the object where instantiated, the virtual table...
+ if (TSK == TSK_ImplicitInstantiation ||
+ TSK == TSK_ExplicitInstantiationDefinition)
+ return true;
+
+ // If we're building with optimization, we always emit VTables since that
+ // allows for virtual function calls to be devirtualized.
+ // (We don't want to do this in -fapple-kext mode however).
+ if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOpts().AppleKext)
+ return true;
+
+ return KeyFunction->hasBody();
+}
+
+llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Compute the mangled name.
+ SmallString<256> Name;
+ llvm::raw_svector_ostream Out(Name);
+ if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
+ getCXXABI().getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(),
+ Thunk.This, Out);
+ else
+ getCXXABI().getMangleContext().mangleThunk(MD, Thunk, Out);
+ Out.flush();
+
+ llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(GD);
+ return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
+}
+
+static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ int64_t NonVirtualAdjustment,
+ int64_t VirtualAdjustment) {
+ if (!NonVirtualAdjustment && !VirtualAdjustment)
+ return Ptr;
+
+ llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
+ llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+
+ if (NonVirtualAdjustment) {
+ // Do the non-virtual adjustment.
+ V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ }
+
+ if (VirtualAdjustment) {
+ llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ // Do the virtual adjustment.
+ llvm::Value *VTablePtrPtr =
+ CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
+
+ llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+
+ llvm::Value *OffsetPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+
+ OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
+
+ // Load the adjustment offset from the vtable.
+ llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
+
+ // Adjust our pointer.
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset);
+ }
+
+ // Cast back to the original type.
+ return CGF.Builder.CreateBitCast(V, Ptr->getType());
+}
+
+static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk, llvm::Function *Fn) {
+ CGM.setGlobalVisibility(Fn, MD);
+
+ if (!CGM.getCodeGenOpts().HiddenWeakVTables)
+ return;
+
+ // If the thunk has weak/linkonce linkage, but the function must be
+ // emitted in every translation unit that references it, then we can
+ // emit its thunks with hidden visibility, since its thunks must be
+ // emitted when the function is.
+
+ // This follows CodeGenModule::setTypeVisibility; see the comments
+ // there for explanation.
+
+ if ((Fn->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage &&
+ Fn->getLinkage() != llvm::GlobalVariable::WeakODRLinkage) ||
+ Fn->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
+ return;
+
+ if (MD->getExplicitVisibility())
+ return;
+
+ switch (MD->getTemplateSpecializationKind()) {
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitInstantiationDeclaration:
+ return;
+
+ case TSK_Undeclared:
+ break;
+
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ if (!CGM.getCodeGenOpts().HiddenWeakTemplateVTables)
+ return;
+ break;
+ }
+
+ // If there's an explicit definition, and that definition is
+ // out-of-line, then we can't assume that all users will have a
+ // definition to emit.
+ const FunctionDecl *Def = 0;
+ if (MD->hasBody(Def) && Def->isOutOfLine())
+ return;
+
+ Fn->setVisibility(llvm::GlobalValue::HiddenVisibility);
+}
+
+#ifndef NDEBUG
+static bool similar(const ABIArgInfo &infoL, CanQualType typeL,
+ const ABIArgInfo &infoR, CanQualType typeR) {
+ return (infoL.getKind() == infoR.getKind() &&
+ (typeL == typeR ||
+ (isa<PointerType>(typeL) && isa<PointerType>(typeR)) ||
+ (isa<ReferenceType>(typeL) && isa<ReferenceType>(typeR))));
+}
+#endif
+
+static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
+ QualType ResultType, RValue RV,
+ const ThunkInfo &Thunk) {
+ // Emit the return adjustment.
+ bool NullCheckValue = !ResultType->isReferenceType();
+
+ llvm::BasicBlock *AdjustNull = 0;
+ llvm::BasicBlock *AdjustNotNull = 0;
+ llvm::BasicBlock *AdjustEnd = 0;
+
+ llvm::Value *ReturnValue = RV.getScalarVal();
+
+ if (NullCheckValue) {
+ AdjustNull = CGF.createBasicBlock("adjust.null");
+ AdjustNotNull = CGF.createBasicBlock("adjust.notnull");
+ AdjustEnd = CGF.createBasicBlock("adjust.end");
+
+ llvm::Value *IsNull = CGF.Builder.CreateIsNull(ReturnValue);
+ CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
+ CGF.EmitBlock(AdjustNotNull);
+ }
+
+ ReturnValue = PerformTypeAdjustment(CGF, ReturnValue,
+ Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ if (NullCheckValue) {
+ CGF.Builder.CreateBr(AdjustEnd);
+ CGF.EmitBlock(AdjustNull);
+ CGF.Builder.CreateBr(AdjustEnd);
+ CGF.EmitBlock(AdjustEnd);
+
+ llvm::PHINode *PHI = CGF.Builder.CreatePHI(ReturnValue->getType(), 2);
+ PHI->addIncoming(ReturnValue, AdjustNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
+ AdjustNull);
+ ReturnValue = PHI;
+ }
+
+ return RValue::get(ReturnValue);
+}
+
+// This function does roughly the same thing as GenerateThunk, but in a
+// very different way, so that va_start and va_end work correctly.
+// FIXME: This function assumes "this" is the first non-sret LLVM argument of
+// a function, and that there is an alloca built in the entry block
+// for all accesses to "this".
+// FIXME: This function assumes there is only one "ret" statement per function.
+// FIXME: Cloning isn't correct in the presence of indirect goto!
+// FIXME: This implementation of thunks bloats codesize by duplicating the
+// function definition. There are alternatives:
+// 1. Add some sort of stub support to LLVM for cases where we can
+// do a this adjustment, then a sibcall.
+// 2. We could transform the definition to take a va_list instead of an
+// actual variable argument list, then have the thunks (including a
+// no-op thunk for the regular definition) call va_start/va_end.
+// There's a bit of per-call overhead for this solution, but it's
+// better for codesize if the definition is long.
+void CodeGenFunction::GenerateVarArgsThunk(
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+
+ // Get the original function
+ assert(FnInfo.isVariadic());
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FnInfo);
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ llvm::Function *BaseFn = cast<llvm::Function>(Callee);
+
+ // Clone to thunk.
+ llvm::Function *NewFn = llvm::CloneFunction(BaseFn);
+ CGM.getModule().getFunctionList().push_back(NewFn);
+ Fn->replaceAllUsesWith(NewFn);
+ NewFn->takeName(Fn);
+ Fn->eraseFromParent();
+ Fn = NewFn;
+
+ // "Initialize" CGF (minimally).
+ CurFn = Fn;
+
+ // Get the "this" value
+ llvm::Function::arg_iterator AI = Fn->arg_begin();
+ if (CGM.ReturnTypeUsesSRet(FnInfo))
+ ++AI;
+
+ // Find the first store of "this", which will be to the alloca associated
+ // with "this".
+ llvm::Value *ThisPtr = &*AI;
+ llvm::BasicBlock *EntryBB = Fn->begin();
+ llvm::Instruction *ThisStore = 0;
+ for (llvm::BasicBlock::iterator I = EntryBB->begin(), E = EntryBB->end();
+ I != E; I++) {
+ if (isa<llvm::StoreInst>(I) && I->getOperand(0) == ThisPtr) {
+ ThisStore = cast<llvm::StoreInst>(I);
+ break;
+ }
+ }
+ assert(ThisStore && "Store of this should be in entry block?");
+ // Adjust "this", if necessary.
+ Builder.SetInsertPoint(ThisStore);
+ llvm::Value *AdjustedThisPtr =
+ PerformTypeAdjustment(*this, ThisPtr,
+ Thunk.This.NonVirtual,
+ Thunk.This.VCallOffsetOffset);
+ ThisStore->setOperand(0, AdjustedThisPtr);
+
+ if (!Thunk.Return.isEmpty()) {
+ // Fix up the returned value, if necessary.
+ for (llvm::Function::iterator I = Fn->begin(), E = Fn->end(); I != E; I++) {
+ llvm::Instruction *T = I->getTerminator();
+ if (isa<llvm::ReturnInst>(T)) {
+ RValue RV = RValue::get(T->getOperand(0));
+ T->eraseFromParent();
+ Builder.SetInsertPoint(&*I);
+ RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
+ Builder.CreateRet(RV.getScalarVal());
+ break;
+ }
+ }
+ }
+}
+
+void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+ QualType ThisType = MD->getThisType(getContext());
+
+ FunctionArgList FunctionArgs;
+
+ // FIXME: It would be nice if more of this code could be shared with
+ // CodeGenFunction::GenerateCode.
+
+ // Create the implicit 'this' parameter declaration.
+ CurGD = GD;
+ CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *Param = *I;
+
+ FunctionArgs.push_back(Param);
+ }
+
+ StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
+ SourceLocation());
+
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ CXXThisValue = CXXABIThisValue;
+
+ // Adjust the 'this' pointer if necessary.
+ llvm::Value *AdjustedThisPtr =
+ PerformTypeAdjustment(*this, LoadCXXThis(),
+ Thunk.This.NonVirtual,
+ Thunk.This.VCallOffsetOffset);
+
+ CallArgList CallArgs;
+
+ // Add our adjusted 'this' pointer.
+ CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *param = *I;
+ EmitDelegateCallArg(CallArgs, param);
+ }
+
+ // Get our callee.
+ llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+
+#ifndef NDEBUG
+ const CGFunctionInfo &CallFnInfo =
+ CGM.getTypes().arrangeFunctionCall(ResultType, CallArgs, FPT->getExtInfo(),
+ RequiredArgs::forPrototypePlus(FPT, 1));
+ assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
+ CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
+ CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
+ assert(similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
+ FnInfo.getReturnInfo(), FnInfo.getReturnType()));
+ assert(CallFnInfo.arg_size() == FnInfo.arg_size());
+ for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i)
+ assert(similar(CallFnInfo.arg_begin()[i].info,
+ CallFnInfo.arg_begin()[i].type,
+ FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type));
+#endif
+
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
+ // Now emit our call.
+ RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
+
+ if (!Thunk.Return.isEmpty())
+ RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
+
+ if (!ResultType->isVoidType() && Slot.isNull())
+ CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
+
+ FinishFunction();
+
+ // Set the right linkage.
+ CGM.setFunctionLinkage(MD, Fn);
+
+ // Set the right visibility.
+ setThunkVisibility(CGM, MD, Thunk, Fn);
+}
+
+void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool UseAvailableExternallyLinkage)
+{
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
+
+ // FIXME: re-use FnInfo in this computation.
+ llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+ // There's already a declaration with the same name, check if it has the same
+ // type or if we need to replace it.
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() !=
+ CGM.getTypes().GetFunctionTypeForVTable(GD)) {
+ llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldThunkFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // Remove the name from the old thunk function and get a new thunk.
+ OldThunkFn->setName(StringRef());
+ Entry = CGM.GetAddrOfThunk(GD, Thunk);
+
+ // If needed, replace the old thunk with a bitcast.
+ if (!OldThunkFn->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
+ OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Remove the old thunk.
+ OldThunkFn->eraseFromParent();
+ }
+
+ llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+
+ if (!ThunkFn->isDeclaration()) {
+ if (UseAvailableExternallyLinkage) {
+ // There is already a thunk emitted for this function, do nothing.
+ return;
+ }
+
+ // If a function has a body, it should have available_externally linkage.
+ assert(ThunkFn->hasAvailableExternallyLinkage() &&
+ "Function should have available_externally linkage!");
+
+ // Change the linkage.
+ CGM.setFunctionLinkage(cast<CXXMethodDecl>(GD.getDecl()), ThunkFn);
+ return;
+ }
+
+ if (ThunkFn->isVarArg()) {
+ // Varargs thunks are special; we can't just generate a call because
+ // we can't copy the varargs. Our implementation is rather
+ // expensive/sucky at the moment, so don't generate the thunk unless
+ // we have to.
+ // FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
+ if (!UseAvailableExternallyLinkage)
+ CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
+ } else {
+ // Normal thunk body generation.
+ CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk);
+ }
+
+ if (UseAvailableExternallyLinkage)
+ ThunkFn->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
+}
+
+void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ // We only want to do this when building with optimizations.
+ if (!CGM.getCodeGenOpts().OptimizationLevel)
+ return;
+
+ // We can't emit thunks for member functions with incomplete types.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ if (!CGM.getTypes().isFuncTypeConvertible(
+ cast<FunctionType>(MD->getType().getTypePtr())))
+ return;
+
+ EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true);
+}
+
+void CodeGenVTables::EmitThunks(GlobalDecl GD)
+{
+ const CXXMethodDecl *MD =
+ cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
+
+ // We don't need to generate thunks for the base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return;
+
+ const VTableContext::ThunkInfoVectorTy *ThunkInfoVector =
+ VTContext.getThunkInfo(MD);
+ if (!ThunkInfoVector)
+ return;
+
+ for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I)
+ EmitThunk(GD, (*ThunkInfoVector)[I],
+ /*UseAvailableExternallyLinkage=*/false);
+}
+
+llvm::Constant *
+CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
+ const VTableComponent *Components,
+ unsigned NumComponents,
+ const VTableLayout::VTableThunkTy *VTableThunks,
+ unsigned NumVTableThunks) {
+ SmallVector<llvm::Constant *, 64> Inits;
+
+ llvm::Type *Int8PtrTy = CGM.Int8PtrTy;
+
+ llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ QualType ClassType = CGM.getContext().getTagDeclType(RD);
+ llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType);
+
+ unsigned NextVTableThunkIndex = 0;
+
+ llvm::Constant* PureVirtualFn = 0;
+
+ for (unsigned I = 0; I != NumComponents; ++I) {
+ VTableComponent Component = Components[I];
+
+ llvm::Constant *Init = 0;
+
+ switch (Component.getKind()) {
+ case VTableComponent::CK_VCallOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getVCallOffset().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_VBaseOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getVBaseOffset().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_OffsetToTop:
+ Init = llvm::ConstantInt::get(PtrDiffTy,
+ Component.getOffsetToTop().getQuantity());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VTableComponent::CK_RTTI:
+ Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
+ break;
+ case VTableComponent::CK_FunctionPointer:
+ case VTableComponent::CK_CompleteDtorPointer:
+ case VTableComponent::CK_DeletingDtorPointer: {
+ GlobalDecl GD;
+
+ // Get the right global decl.
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind");
+ case VTableComponent::CK_FunctionPointer:
+ GD = Component.getFunctionDecl();
+ break;
+ case VTableComponent::CK_CompleteDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
+ break;
+ case VTableComponent::CK_DeletingDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
+ break;
+ }
+
+ if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
+ // We have a pure virtual member function.
+ if (!PureVirtualFn) {
+ llvm::FunctionType *Ty =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
+ PureVirtualFn =
+ CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ Int8PtrTy);
+ }
+
+ Init = PureVirtualFn;
+ } else {
+ // Check if we should use a thunk.
+ if (NextVTableThunkIndex < NumVTableThunks &&
+ VTableThunks[NextVTableThunkIndex].first == I) {
+ const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
+
+ MaybeEmitThunkAvailableExternally(GD, Thunk);
+ Init = CGM.GetAddrOfThunk(GD, Thunk);
+
+ NextVTableThunkIndex++;
+ } else {
+ llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVTable(GD);
+
+ Init = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ }
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ }
+ break;
+ }
+
+ case VTableComponent::CK_UnusedFunctionPointer:
+ Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
+ break;
+ };
+
+ Inits.push_back(Init);
+ }
+
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
+ return llvm::ConstantArray::get(ArrayType, Inits);
+}
+
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *&VTable = VTables[RD];
+ if (VTable)
+ return VTable;
+
+ // We may need to generate a definition for this vtable.
+ if (ShouldEmitVTableInThisTU(RD))
+ CGM.DeferredVTables.push_back(RD);
+
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(CGM.Int8PtrTy,
+ VTContext.getVTableLayout(RD).getNumVTableComponents());
+
+ VTable =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+ VTable->setUnnamedAddr(true);
+ return VTable;
+}
+
+void
+CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(RD,
+ VTLayout.vtable_component_begin(),
+ VTLayout.getNumVTableComponents(),
+ VTLayout.vtable_thunk_begin(),
+ VTLayout.getNumVTableThunks());
+ VTable->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTable->setLinkage(Linkage);
+
+ // Set the right visibility.
+ CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable);
+}
+
+llvm::GlobalVariable *
+CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ VTableAddressPointsMapTy& AddressPoints) {
+ OwningPtr<VTableLayout> VTLayout(
+ VTContext.createConstructionVTableLayout(Base.getBase(),
+ Base.getBaseOffset(),
+ BaseIsVirtual, RD));
+
+ // Add the address points.
+ AddressPoints = VTLayout->getAddressPoints();
+
+ // Get the mangled construction vtable name.
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ CGM.getCXXABI().getMangleContext().
+ mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(),
+ Out);
+ Out.flush();
+ StringRef Name = OutName.str();
+
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(CGM.Int8PtrTy, VTLayout->getNumVTableComponents());
+
+ // Create the variable that will hold the construction vtable.
+ llvm::GlobalVariable *VTable =
+ CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType, Linkage);
+ CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForConstructionVTable);
+
+ // V-tables are always unnamed_addr.
+ VTable->setUnnamedAddr(true);
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(Base.getBase(),
+ VTLayout->vtable_component_begin(),
+ VTLayout->getNumVTableComponents(),
+ VTLayout->vtable_thunk_begin(),
+ VTLayout->getNumVTableThunks());
+ VTable->setInitializer(Init);
+
+ return VTable;
+}
+
+void
+CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *VTable = GetAddrOfVTable(RD);
+ if (VTable->hasInitializer())
+ return;
+
+ EmitVTableDefinition(VTable, Linkage, RD);
+
+ if (RD->getNumVBases()) {
+ llvm::GlobalVariable *VTT = GetAddrOfVTT(RD);
+ EmitVTTDefinition(VTT, Linkage, RD);
+ }
+
+ // If this is the magic class __cxxabiv1::__fundamental_type_info,
+ // we will emit the typeinfo for the fundamental types. This is the
+ // same behaviour as GCC.
+ const DeclContext *DC = RD->getDeclContext();
+ if (RD->getIdentifier() &&
+ RD->getIdentifier()->isStr("__fundamental_type_info") &&
+ isa<NamespaceDecl>(DC) &&
+ cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
+ DC->getParent()->isTranslationUnit())
+ CGM.EmitFundamentalRTTIDescriptors();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
new file mode 100644
index 0000000..828330e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGVTables.h
@@ -0,0 +1,141 @@
+//===--- CGVTables.h - Emit LLVM Code for C++ vtables -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of virtual tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVTABLE_H
+#define CLANG_CODEGEN_CGVTABLE_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/GlobalVariable.h"
+#include "clang/Basic/ABI.h"
+#include "clang/AST/BaseSubobject.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/VTableBuilder.h"
+
+namespace clang {
+ class CXXRecordDecl;
+
+namespace CodeGen {
+ class CodeGenModule;
+
+class CodeGenVTables {
+ CodeGenModule &CGM;
+
+ VTableContext VTContext;
+
+ /// VTables - All the vtables which have been defined.
+ llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
+
+ /// VTableAddressPointsMapTy - Address points for a single vtable.
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
+
+ typedef std::pair<const CXXRecordDecl *, BaseSubobject> BaseSubobjectPairTy;
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t> SubVTTIndiciesMapTy;
+
+ /// SubVTTIndicies - Contains indices into the various sub-VTTs.
+ SubVTTIndiciesMapTy SubVTTIndicies;
+
+ typedef llvm::DenseMap<BaseSubobjectPairTy, uint64_t>
+ SecondaryVirtualPointerIndicesMapTy;
+
+ /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer
+ /// indices.
+ SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
+
+ /// EmitThunk - Emit a single thunk.
+ void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool UseAvailableExternallyLinkage);
+
+ /// MaybeEmitThunkAvailableExternally - Try to emit the given thunk with
+ /// available_externally linkage to allow for inlining of thunks.
+ /// This will be done iff optimizations are enabled and the member function
+ /// doesn't contain any incomplete types.
+ void MaybeEmitThunkAvailableExternally(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// CreateVTableInitializer - Create a vtable initializer for the given record
+ /// decl.
+ /// \param Components - The vtable components; this is really an array of
+ /// VTableComponents.
+ llvm::Constant *CreateVTableInitializer(const CXXRecordDecl *RD,
+ const VTableComponent *Components,
+ unsigned NumComponents,
+ const VTableLayout::VTableThunkTy *VTableThunks,
+ unsigned NumVTableThunks);
+
+public:
+ CodeGenVTables(CodeGenModule &CGM);
+
+ VTableContext &getVTableContext() { return VTContext; }
+
+ /// \brief True if the VTable of this record must be emitted in the
+ /// translation unit.
+ bool ShouldEmitVTableInThisTU(const CXXRecordDecl *RD);
+
+ /// needsVTTParameter - Return whether the given global decl needs a VTT
+ /// parameter, which it does if it's a base constructor or destructor with
+ /// virtual bases.
+ static bool needsVTTParameter(GlobalDecl GD);
+
+ /// getSubVTTIndex - Return the index of the sub-VTT for the base class of the
+ /// given record decl.
+ uint64_t getSubVTTIndex(const CXXRecordDecl *RD, BaseSubobject Base);
+
+ /// getSecondaryVirtualPointerIndex - Return the index in the VTT where the
+ /// virtual pointer for the given subobject is located.
+ uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base);
+
+ /// getAddressPoint - Get the address point of the given subobject in the
+ /// class decl.
+ uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD);
+
+ /// GetAddrOfVTable - Get the address of the vtable for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTable(const CXXRecordDecl *RD);
+
+ /// EmitVTableDefinition - Emit the definition of the given vtable.
+ void EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+
+ /// GenerateConstructionVTable - Generate a construction vtable for the given
+ /// base subobject.
+ llvm::GlobalVariable *
+ GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ VTableAddressPointsMapTy& AddressPoints);
+
+
+ /// GetAddrOfVTable - Get the address of the VTT for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTT(const CXXRecordDecl *RD);
+
+ /// EmitVTTDefinition - Emit the definition of the given vtable.
+ void EmitVTTDefinition(llvm::GlobalVariable *VTT,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+
+ /// EmitThunks - Emit the associated thunks for the given global decl.
+ void EmitThunks(GlobalDecl GD);
+
+ /// GenerateClassData - Generate all the class data required to be generated
+ /// upon definition of a KeyFunction. This includes the vtable, the
+ /// rtti data structure and the VTT.
+ ///
+ /// \param Linkage - The desired linkage of the vtable, the RTTI and the VTT.
+ void GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
new file mode 100644
index 0000000..ac704e7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CGValue.h
@@ -0,0 +1,451 @@
+//===-- CGValue.h - LLVM CodeGen wrappers for llvm::Value* ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes implement wrappers around llvm::Value in order to
+// fully represent the range of values for C L- and R- values.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGVALUE_H
+#define CLANG_CODEGEN_CGVALUE_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/Type.h"
+
+namespace llvm {
+ class Constant;
+ class Value;
+}
+
+namespace clang {
+namespace CodeGen {
+ class AggValueSlot;
+ class CGBitFieldInfo;
+
+/// RValue - This trivial value class is used to represent the result of an
+/// expression that is evaluated. It can be one of three things: either a
+/// simple LLVM SSA value, a pair of SSA values for complex numbers, or the
+/// address of an aggregate value in memory.
+class RValue {
+ enum Flavor { Scalar, Complex, Aggregate };
+
+ // Stores first value and flavor.
+ llvm::PointerIntPair<llvm::Value *, 2, Flavor> V1;
+ // Stores second value and volatility.
+ llvm::PointerIntPair<llvm::Value *, 1, bool> V2;
+
+public:
+ bool isScalar() const { return V1.getInt() == Scalar; }
+ bool isComplex() const { return V1.getInt() == Complex; }
+ bool isAggregate() const { return V1.getInt() == Aggregate; }
+
+ bool isVolatileQualified() const { return V2.getInt(); }
+
+ /// getScalarVal() - Return the Value* of this scalar value.
+ llvm::Value *getScalarVal() const {
+ assert(isScalar() && "Not a scalar!");
+ return V1.getPointer();
+ }
+
+ /// getComplexVal - Return the real/imag components of this complex value.
+ ///
+ std::pair<llvm::Value *, llvm::Value *> getComplexVal() const {
+ return std::make_pair(V1.getPointer(), V2.getPointer());
+ }
+
+ /// getAggregateAddr() - Return the Value* of the address of the aggregate.
+ llvm::Value *getAggregateAddr() const {
+ assert(isAggregate() && "Not an aggregate!");
+ return V1.getPointer();
+ }
+
+ static RValue get(llvm::Value *V) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Scalar);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(llvm::Value *V1, llvm::Value *V2) {
+ RValue ER;
+ ER.V1.setPointer(V1);
+ ER.V2.setPointer(V2);
+ ER.V1.setInt(Complex);
+ ER.V2.setInt(false);
+ return ER;
+ }
+ static RValue getComplex(const std::pair<llvm::Value *, llvm::Value *> &C) {
+ return getComplex(C.first, C.second);
+ }
+ // FIXME: Aggregate rvalues need to retain information about whether they are
+ // volatile or not. Remove default to find all places that probably get this
+ // wrong.
+ static RValue getAggregate(llvm::Value *V, bool Volatile = false) {
+ RValue ER;
+ ER.V1.setPointer(V);
+ ER.V1.setInt(Aggregate);
+ ER.V2.setInt(Volatile);
+ return ER;
+ }
+};
+
+
+/// LValue - This represents an lvalue references. Because C/C++ allow
+/// bitfields, this is not a simple LLVM pointer, it may be a pointer plus a
+/// bitrange.
+class LValue {
+ enum {
+ Simple, // This is a normal l-value, use getAddress().
+ VectorElt, // This is a vector element l-value (V[i]), use getVector*
+ BitField, // This is a bitfield l-value, use getBitfield*.
+ ExtVectorElt // This is an extended vector subset, use getExtVectorComp
+ } LVType;
+
+ llvm::Value *V;
+
+ union {
+ // Index into a vector subscript: V[i]
+ llvm::Value *VectorIdx;
+
+ // ExtVector element subset: V.xyx
+ llvm::Constant *VectorElts;
+
+ // BitField start bit and size
+ const CGBitFieldInfo *BitFieldInfo;
+ };
+
+ QualType Type;
+
+ // 'const' is unused here
+ Qualifiers Quals;
+
+ // The alignment to use when accessing this lvalue. (For vector elements,
+ // this is the alignment of the whole vector.)
+ unsigned short Alignment;
+
+ // objective-c's ivar
+ bool Ivar:1;
+
+ // objective-c's ivar is an array
+ bool ObjIsArray:1;
+
+ // LValue is non-gc'able for any reason, including being a parameter or local
+ // variable.
+ bool NonGC: 1;
+
+ // Lvalue is a global reference of an objective-c object
+ bool GlobalObjCRef : 1;
+
+ // Lvalue is a thread local reference
+ bool ThreadLocalRef : 1;
+
+ Expr *BaseIvarExp;
+
+ /// TBAAInfo - TBAA information to attach to dereferences of this LValue.
+ llvm::MDNode *TBAAInfo;
+
+private:
+ void Initialize(QualType Type, Qualifiers Quals,
+ CharUnits Alignment = CharUnits(),
+ llvm::MDNode *TBAAInfo = 0) {
+ this->Type = Type;
+ this->Quals = Quals;
+ this->Alignment = Alignment.getQuantity();
+ assert(this->Alignment == Alignment.getQuantity() &&
+ "Alignment exceeds allowed max!");
+
+ // Initialize Objective-C flags.
+ this->Ivar = this->ObjIsArray = this->NonGC = this->GlobalObjCRef = false;
+ this->ThreadLocalRef = false;
+ this->BaseIvarExp = 0;
+ this->TBAAInfo = TBAAInfo;
+ }
+
+public:
+ bool isSimple() const { return LVType == Simple; }
+ bool isVectorElt() const { return LVType == VectorElt; }
+ bool isBitField() const { return LVType == BitField; }
+ bool isExtVectorElt() const { return LVType == ExtVectorElt; }
+
+ bool isVolatileQualified() const { return Quals.hasVolatile(); }
+ bool isRestrictQualified() const { return Quals.hasRestrict(); }
+ unsigned getVRQualifiers() const {
+ return Quals.getCVRQualifiers() & ~Qualifiers::Const;
+ }
+
+ QualType getType() const { return Type; }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
+ bool isObjCIvar() const { return Ivar; }
+ void setObjCIvar(bool Value) { Ivar = Value; }
+
+ bool isObjCArray() const { return ObjIsArray; }
+ void setObjCArray(bool Value) { ObjIsArray = Value; }
+
+ bool isNonGC () const { return NonGC; }
+ void setNonGC(bool Value) { NonGC = Value; }
+
+ bool isGlobalObjCRef() const { return GlobalObjCRef; }
+ void setGlobalObjCRef(bool Value) { GlobalObjCRef = Value; }
+
+ bool isThreadLocalRef() const { return ThreadLocalRef; }
+ void setThreadLocalRef(bool Value) { ThreadLocalRef = Value;}
+
+ bool isObjCWeak() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Weak;
+ }
+ bool isObjCStrong() const {
+ return Quals.getObjCGCAttr() == Qualifiers::Strong;
+ }
+
+ bool isVolatile() const {
+ return Quals.hasVolatile();
+ }
+
+ Expr *getBaseIvarExp() const { return BaseIvarExp; }
+ void setBaseIvarExp(Expr *V) { BaseIvarExp = V; }
+
+ llvm::MDNode *getTBAAInfo() const { return TBAAInfo; }
+ void setTBAAInfo(llvm::MDNode *N) { TBAAInfo = N; }
+
+ const Qualifiers &getQuals() const { return Quals; }
+ Qualifiers &getQuals() { return Quals; }
+
+ unsigned getAddressSpace() const { return Quals.getAddressSpace(); }
+
+ CharUnits getAlignment() const { return CharUnits::fromQuantity(Alignment); }
+ void setAlignment(CharUnits A) { Alignment = A.getQuantity(); }
+
+ // simple lvalue
+ llvm::Value *getAddress() const { assert(isSimple()); return V; }
+ void setAddress(llvm::Value *address) {
+ assert(isSimple());
+ V = address;
+ }
+
+ // vector elt lvalue
+ llvm::Value *getVectorAddr() const { assert(isVectorElt()); return V; }
+ llvm::Value *getVectorIdx() const { assert(isVectorElt()); return VectorIdx; }
+
+ // extended vector elements.
+ llvm::Value *getExtVectorAddr() const { assert(isExtVectorElt()); return V; }
+ llvm::Constant *getExtVectorElts() const {
+ assert(isExtVectorElt());
+ return VectorElts;
+ }
+
+ // bitfield lvalue
+ llvm::Value *getBitFieldBaseAddr() const {
+ assert(isBitField());
+ return V;
+ }
+ const CGBitFieldInfo &getBitFieldInfo() const {
+ assert(isBitField());
+ return *BitFieldInfo;
+ }
+
+ static LValue MakeAddr(llvm::Value *address, QualType type,
+ CharUnits alignment, ASTContext &Context,
+ llvm::MDNode *TBAAInfo = 0) {
+ Qualifiers qs = type.getQualifiers();
+ qs.setObjCGCAttr(Context.getObjCGCAttrKind(type));
+
+ LValue R;
+ R.LVType = Simple;
+ R.V = address;
+ R.Initialize(type, qs, alignment, TBAAInfo);
+ return R;
+ }
+
+ static LValue MakeVectorElt(llvm::Value *Vec, llvm::Value *Idx,
+ QualType type, CharUnits Alignment) {
+ LValue R;
+ R.LVType = VectorElt;
+ R.V = Vec;
+ R.VectorIdx = Idx;
+ R.Initialize(type, type.getQualifiers(), Alignment);
+ return R;
+ }
+
+ static LValue MakeExtVectorElt(llvm::Value *Vec, llvm::Constant *Elts,
+ QualType type, CharUnits Alignment) {
+ LValue R;
+ R.LVType = ExtVectorElt;
+ R.V = Vec;
+ R.VectorElts = Elts;
+ R.Initialize(type, type.getQualifiers(), Alignment);
+ return R;
+ }
+
+ /// \brief Create a new object to represent a bit-field access.
+ ///
+ /// \param BaseValue - The base address of the structure containing the
+ /// bit-field.
+ /// \param Info - The information describing how to perform the bit-field
+ /// access.
+ static LValue MakeBitfield(llvm::Value *BaseValue,
+ const CGBitFieldInfo &Info,
+ QualType type) {
+ LValue R;
+ R.LVType = BitField;
+ R.V = BaseValue;
+ R.BitFieldInfo = &Info;
+ R.Initialize(type, type.getQualifiers());
+ return R;
+ }
+
+ RValue asAggregateRValue() const {
+ // FIMXE: Alignment
+ return RValue::getAggregate(getAddress(), isVolatileQualified());
+ }
+};
+
+/// An aggregate value slot.
+class AggValueSlot {
+ /// The address.
+ llvm::Value *Addr;
+
+ // Qualifiers
+ Qualifiers Quals;
+
+ unsigned short Alignment;
+
+ /// DestructedFlag - This is set to true if some external code is
+ /// responsible for setting up a destructor for the slot. Otherwise
+ /// the code which constructs it should push the appropriate cleanup.
+ bool DestructedFlag : 1;
+
+ /// ObjCGCFlag - This is set to true if writing to the memory in the
+ /// slot might require calling an appropriate Objective-C GC
+ /// barrier. The exact interaction here is unnecessarily mysterious.
+ bool ObjCGCFlag : 1;
+
+ /// ZeroedFlag - This is set to true if the memory in the slot is
+ /// known to be zero before the assignment into it. This means that
+ /// zero fields don't need to be set.
+ bool ZeroedFlag : 1;
+
+ /// AliasedFlag - This is set to true if the slot might be aliased
+ /// and it's not undefined behavior to access it through such an
+ /// alias. Note that it's always undefined behavior to access a C++
+ /// object that's under construction through an alias derived from
+ /// outside the construction process.
+ ///
+ /// This flag controls whether calls that produce the aggregate
+ /// value may be evaluated directly into the slot, or whether they
+ /// must be evaluated into an unaliased temporary and then memcpy'ed
+ /// over. Since it's invalid in general to memcpy a non-POD C++
+ /// object, it's important that this flag never be set when
+ /// evaluating an expression which constructs such an object.
+ bool AliasedFlag : 1;
+
+public:
+ enum IsAliased_t { IsNotAliased, IsAliased };
+ enum IsDestructed_t { IsNotDestructed, IsDestructed };
+ enum IsZeroed_t { IsNotZeroed, IsZeroed };
+ enum NeedsGCBarriers_t { DoesNotNeedGCBarriers, NeedsGCBarriers };
+
+ /// ignored - Returns an aggregate value slot indicating that the
+ /// aggregate value is being ignored.
+ static AggValueSlot ignored() {
+ return forAddr(0, CharUnits(), Qualifiers(), IsNotDestructed,
+ DoesNotNeedGCBarriers, IsNotAliased);
+ }
+
+ /// forAddr - Make a slot for an aggregate value.
+ ///
+ /// \param quals - The qualifiers that dictate how the slot should
+ /// be initialied. Only 'volatile' and the Objective-C lifetime
+ /// qualifiers matter.
+ ///
+ /// \param isDestructed - true if something else is responsible
+ /// for calling destructors on this object
+ /// \param needsGC - true if the slot is potentially located
+ /// somewhere that ObjC GC calls should be emitted for
+ static AggValueSlot forAddr(llvm::Value *addr, CharUnits align,
+ Qualifiers quals,
+ IsDestructed_t isDestructed,
+ NeedsGCBarriers_t needsGC,
+ IsAliased_t isAliased,
+ IsZeroed_t isZeroed = IsNotZeroed) {
+ AggValueSlot AV;
+ AV.Addr = addr;
+ AV.Alignment = align.getQuantity();
+ AV.Quals = quals;
+ AV.DestructedFlag = isDestructed;
+ AV.ObjCGCFlag = needsGC;
+ AV.ZeroedFlag = isZeroed;
+ AV.AliasedFlag = isAliased;
+ return AV;
+ }
+
+ static AggValueSlot forLValue(LValue LV, IsDestructed_t isDestructed,
+ NeedsGCBarriers_t needsGC,
+ IsAliased_t isAliased,
+ IsZeroed_t isZeroed = IsNotZeroed) {
+ return forAddr(LV.getAddress(), LV.getAlignment(),
+ LV.getQuals(), isDestructed, needsGC, isAliased, isZeroed);
+ }
+
+ IsDestructed_t isExternallyDestructed() const {
+ return IsDestructed_t(DestructedFlag);
+ }
+ void setExternallyDestructed(bool destructed = true) {
+ DestructedFlag = destructed;
+ }
+
+ Qualifiers getQualifiers() const { return Quals; }
+
+ bool isVolatile() const {
+ return Quals.hasVolatile();
+ }
+
+ Qualifiers::ObjCLifetime getObjCLifetime() const {
+ return Quals.getObjCLifetime();
+ }
+
+ NeedsGCBarriers_t requiresGCollection() const {
+ return NeedsGCBarriers_t(ObjCGCFlag);
+ }
+
+ llvm::Value *getAddr() const {
+ return Addr;
+ }
+
+ bool isIgnored() const {
+ return Addr == 0;
+ }
+
+ CharUnits getAlignment() const {
+ return CharUnits::fromQuantity(Alignment);
+ }
+
+ IsAliased_t isPotentiallyAliased() const {
+ return IsAliased_t(AliasedFlag);
+ }
+
+ // FIXME: Alignment?
+ RValue asRValue() const {
+ return RValue::getAggregate(getAddr(), isVolatile());
+ }
+
+ void setZeroed(bool V = true) { ZeroedFlag = V; }
+ IsZeroed_t isZeroed() const {
+ return IsZeroed_t(ZeroedFlag);
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
new file mode 100644
index 0000000..dd32167
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenAction.cpp
@@ -0,0 +1,448 @@
+//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Linker.h"
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/Timer.h"
+using namespace clang;
+using namespace llvm;
+
+namespace clang {
+ class BackendConsumer : public ASTConsumer {
+ virtual void anchor();
+ DiagnosticsEngine &Diags;
+ BackendAction Action;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ const LangOptions &LangOpts;
+ raw_ostream *AsmOutStream;
+ ASTContext *Context;
+
+ Timer LLVMIRGeneration;
+
+ OwningPtr<CodeGenerator> Gen;
+
+ OwningPtr<llvm::Module> TheModule, LinkModule;
+
+ public:
+ BackendConsumer(BackendAction action, DiagnosticsEngine &_Diags,
+ const CodeGenOptions &compopts,
+ const TargetOptions &targetopts,
+ const LangOptions &langopts,
+ bool TimePasses,
+ const std::string &infile,
+ llvm::Module *LinkModule,
+ raw_ostream *OS,
+ LLVMContext &C) :
+ Diags(_Diags),
+ Action(action),
+ CodeGenOpts(compopts),
+ TargetOpts(targetopts),
+ LangOpts(langopts),
+ AsmOutStream(OS),
+ LLVMIRGeneration("LLVM IR Generation Time"),
+ Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)),
+ LinkModule(LinkModule) {
+ llvm::TimePassesIsEnabled = TimePasses;
+ }
+
+ llvm::Module *takeModule() { return TheModule.take(); }
+ llvm::Module *takeLinkModule() { return LinkModule.take(); }
+
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Gen->HandleCXXStaticMemberVarInstantiation(VD);
+ }
+
+ virtual void Initialize(ASTContext &Ctx) {
+ Context = &Ctx;
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->Initialize(Ctx);
+
+ TheModule.reset(Gen->GetModule());
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+ PrettyStackTraceDecl CrashInfo(*D.begin(), SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTopLevelDecl(D);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+
+ return true;
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &C) {
+ {
+ PrettyStackTraceString CrashInfo("Per-file LLVM IR generation");
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.startTimer();
+
+ Gen->HandleTranslationUnit(C);
+
+ if (llvm::TimePassesIsEnabled)
+ LLVMIRGeneration.stopTimer();
+ }
+
+ // Silently ignore if we weren't initialized for some reason.
+ if (!TheModule)
+ return;
+
+ // Make sure IR generation is happy with the module. This is released by
+ // the module provider.
+ llvm::Module *M = Gen->ReleaseModule();
+ if (!M) {
+ // The module has been released by IR gen on failures, do not double
+ // free.
+ TheModule.take();
+ return;
+ }
+
+ assert(TheModule.get() == M &&
+ "Unexpected module change during IR generation");
+
+ // Link LinkModule into this module if present, preserving its validity.
+ if (LinkModule) {
+ std::string ErrorMsg;
+ if (Linker::LinkModules(M, LinkModule.get(), Linker::PreserveSource,
+ &ErrorMsg)) {
+ Diags.Report(diag::err_fe_cannot_link_module)
+ << LinkModule->getModuleIdentifier() << ErrorMsg;
+ return;
+ }
+ }
+
+ // Install an inline asm handler so that diagnostics get printed through
+ // our diagnostics hooks.
+ LLVMContext &Ctx = TheModule->getContext();
+ LLVMContext::InlineAsmDiagHandlerTy OldHandler =
+ Ctx.getInlineAsmDiagnosticHandler();
+ void *OldContext = Ctx.getInlineAsmDiagnosticContext();
+ Ctx.setInlineAsmDiagnosticHandler(InlineAsmDiagHandler, this);
+
+ EmitBackendOutput(Diags, CodeGenOpts, TargetOpts, LangOpts,
+ TheModule.get(), Action, AsmOutStream);
+
+ Ctx.setInlineAsmDiagnosticHandler(OldHandler, OldContext);
+ }
+
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ PrettyStackTraceDecl CrashInfo(D, SourceLocation(),
+ Context->getSourceManager(),
+ "LLVM IR generation of declaration");
+ Gen->HandleTagDeclDefinition(D);
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ Gen->CompleteTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ Gen->HandleVTable(RD, DefinitionRequired);
+ }
+
+ static void InlineAsmDiagHandler(const llvm::SMDiagnostic &SM,void *Context,
+ unsigned LocCookie) {
+ SourceLocation Loc = SourceLocation::getFromRawEncoding(LocCookie);
+ ((BackendConsumer*)Context)->InlineAsmDiagHandler2(SM, Loc);
+ }
+
+ void InlineAsmDiagHandler2(const llvm::SMDiagnostic &,
+ SourceLocation LocCookie);
+ };
+
+ void BackendConsumer::anchor() {}
+}
+
+/// ConvertBackendLocation - Convert a location in a temporary llvm::SourceMgr
+/// buffer to be a valid FullSourceLoc.
+static FullSourceLoc ConvertBackendLocation(const llvm::SMDiagnostic &D,
+ SourceManager &CSM) {
+ // Get both the clang and llvm source managers. The location is relative to
+ // a memory buffer that the LLVM Source Manager is handling, we need to add
+ // a copy to the Clang source manager.
+ const llvm::SourceMgr &LSM = *D.getSourceMgr();
+
+ // We need to copy the underlying LLVM memory buffer because llvm::SourceMgr
+ // already owns its one and clang::SourceManager wants to own its one.
+ const MemoryBuffer *LBuf =
+ LSM.getMemoryBuffer(LSM.FindBufferContainingLoc(D.getLoc()));
+
+ // Create the copy and transfer ownership to clang::SourceManager.
+ llvm::MemoryBuffer *CBuf =
+ llvm::MemoryBuffer::getMemBufferCopy(LBuf->getBuffer(),
+ LBuf->getBufferIdentifier());
+ FileID FID = CSM.createFileIDForMemBuffer(CBuf);
+
+ // Translate the offset into the file.
+ unsigned Offset = D.getLoc().getPointer() - LBuf->getBufferStart();
+ SourceLocation NewLoc =
+ CSM.getLocForStartOfFile(FID).getLocWithOffset(Offset);
+ return FullSourceLoc(NewLoc, CSM);
+}
+
+
+/// InlineAsmDiagHandler2 - This function is invoked when the backend hits an
+/// error parsing inline asm. The SMDiagnostic indicates the error relative to
+/// the temporary memory buffer that the inline asm parser has set up.
+void BackendConsumer::InlineAsmDiagHandler2(const llvm::SMDiagnostic &D,
+ SourceLocation LocCookie) {
+ // There are a couple of different kinds of errors we could get here. First,
+ // we re-format the SMDiagnostic in terms of a clang diagnostic.
+
+ // Strip "error: " off the start of the message string.
+ StringRef Message = D.getMessage();
+ if (Message.startswith("error: "))
+ Message = Message.substr(7);
+
+ // If the SMDiagnostic has an inline asm source location, translate it.
+ FullSourceLoc Loc;
+ if (D.getLoc() != SMLoc())
+ Loc = ConvertBackendLocation(D, Context->getSourceManager());
+
+
+ // If this problem has clang-level source location information, report the
+ // issue as being an error in the source with a note showing the instantiated
+ // code.
+ if (LocCookie.isValid()) {
+ Diags.Report(LocCookie, diag::err_fe_inline_asm).AddString(Message);
+
+ if (D.getLoc().isValid()) {
+ DiagnosticBuilder B = Diags.Report(Loc, diag::note_fe_inline_asm_here);
+ // Convert the SMDiagnostic ranges into SourceRange and attach them
+ // to the diagnostic.
+ for (unsigned i = 0, e = D.getRanges().size(); i != e; ++i) {
+ std::pair<unsigned, unsigned> Range = D.getRanges()[i];
+ unsigned Column = D.getColumnNo();
+ B << SourceRange(Loc.getLocWithOffset(Range.first - Column),
+ Loc.getLocWithOffset(Range.second - Column));
+ }
+ }
+ return;
+ }
+
+ // Otherwise, report the backend error as occurring in the generated .s file.
+ // If Loc is invalid, we still need to report the error, it just gets no
+ // location info.
+ Diags.Report(Loc, diag::err_fe_inline_asm).AddString(Message);
+}
+
+//
+
+CodeGenAction::CodeGenAction(unsigned _Act, LLVMContext *_VMContext)
+ : Act(_Act), LinkModule(0),
+ VMContext(_VMContext ? _VMContext : new LLVMContext),
+ OwnsVMContext(!_VMContext) {}
+
+CodeGenAction::~CodeGenAction() {
+ TheModule.reset();
+ if (OwnsVMContext)
+ delete VMContext;
+}
+
+bool CodeGenAction::hasIRSupport() const { return true; }
+
+void CodeGenAction::EndSourceFileAction() {
+ // If the consumer creation failed, do nothing.
+ if (!getCompilerInstance().hasASTConsumer())
+ return;
+
+ // If we were given a link module, release consumer's ownership of it.
+ if (LinkModule)
+ BEConsumer->takeLinkModule();
+
+ // Steal the module from the consumer.
+ TheModule.reset(BEConsumer->takeModule());
+}
+
+llvm::Module *CodeGenAction::takeModule() {
+ return TheModule.take();
+}
+
+llvm::LLVMContext *CodeGenAction::takeLLVMContext() {
+ OwnsVMContext = false;
+ return VMContext;
+}
+
+static raw_ostream *GetOutputStream(CompilerInstance &CI,
+ StringRef InFile,
+ BackendAction Action) {
+ switch (Action) {
+ case Backend_EmitAssembly:
+ return CI.createDefaultOutputFile(false, InFile, "s");
+ case Backend_EmitLL:
+ return CI.createDefaultOutputFile(false, InFile, "ll");
+ case Backend_EmitBC:
+ return CI.createDefaultOutputFile(true, InFile, "bc");
+ case Backend_EmitNothing:
+ return 0;
+ case Backend_EmitMCNull:
+ case Backend_EmitObj:
+ return CI.createDefaultOutputFile(true, InFile, "o");
+ }
+
+ llvm_unreachable("Invalid action!");
+}
+
+ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ OwningPtr<raw_ostream> OS(GetOutputStream(CI, InFile, BA));
+ if (BA != Backend_EmitNothing && !OS)
+ return 0;
+
+ llvm::Module *LinkModuleToUse = LinkModule;
+
+ // If we were not given a link module, and the user requested that one be
+ // loaded from bitcode, do so now.
+ const std::string &LinkBCFile = CI.getCodeGenOpts().LinkBitcodeFile;
+ if (!LinkModuleToUse && !LinkBCFile.empty()) {
+ std::string ErrorStr;
+
+ llvm::MemoryBuffer *BCBuf =
+ CI.getFileManager().getBufferForFile(LinkBCFile, &ErrorStr);
+ if (!BCBuf) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << ErrorStr;
+ return 0;
+ }
+
+ LinkModuleToUse = getLazyBitcodeModule(BCBuf, *VMContext, &ErrorStr);
+ if (!LinkModuleToUse) {
+ CI.getDiagnostics().Report(diag::err_cannot_open_file)
+ << LinkBCFile << ErrorStr;
+ return 0;
+ }
+ }
+
+ BEConsumer =
+ new BackendConsumer(BA, CI.getDiagnostics(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getLangOpts(),
+ CI.getFrontendOpts().ShowTimers, InFile,
+ LinkModuleToUse, OS.take(), *VMContext);
+ return BEConsumer;
+}
+
+void CodeGenAction::ExecuteAction() {
+ // If this is an IR file, we have to treat it specially.
+ if (getCurrentFileKind() == IK_LLVM_IR) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = GetOutputStream(CI, getCurrentFile(), BA);
+ if (BA != Backend_EmitNothing && !OS)
+ return;
+
+ bool Invalid;
+ SourceManager &SM = CI.getSourceManager();
+ const llvm::MemoryBuffer *MainFile = SM.getBuffer(SM.getMainFileID(),
+ &Invalid);
+ if (Invalid)
+ return;
+
+ // FIXME: This is stupid, IRReader shouldn't take ownership.
+ llvm::MemoryBuffer *MainFileCopy =
+ llvm::MemoryBuffer::getMemBufferCopy(MainFile->getBuffer(),
+ getCurrentFile().c_str());
+
+ llvm::SMDiagnostic Err;
+ TheModule.reset(ParseIR(MainFileCopy, Err, *VMContext));
+ if (!TheModule) {
+ // Translate from the diagnostic info to the SourceManager location.
+ SourceLocation Loc = SM.translateFileLineCol(
+ SM.getFileEntryForID(SM.getMainFileID()), Err.getLineNo(),
+ Err.getColumnNo() + 1);
+
+ // Get a custom diagnostic for the error. We strip off a leading
+ // diagnostic code if there is one.
+ StringRef Msg = Err.getMessage();
+ if (Msg.startswith("error: "))
+ Msg = Msg.substr(7);
+
+ // Escape '%', which is interpreted as a format character.
+ llvm::SmallString<128> EscapedMessage;
+ for (unsigned i = 0, e = Msg.size(); i != e; ++i) {
+ if (Msg[i] == '%')
+ EscapedMessage += '%';
+ EscapedMessage += Msg[i];
+ }
+
+ unsigned DiagID = CI.getDiagnostics().getCustomDiagID(
+ DiagnosticsEngine::Error, EscapedMessage);
+
+ CI.getDiagnostics().Report(Loc, DiagID);
+ return;
+ }
+
+ EmitBackendOutput(CI.getDiagnostics(), CI.getCodeGenOpts(),
+ CI.getTargetOpts(), CI.getLangOpts(),
+ TheModule.get(),
+ BA, OS);
+ return;
+ }
+
+ // Otherwise follow the normal AST path.
+ this->ASTFrontendAction::ExecuteAction();
+}
+
+//
+
+void EmitAssemblyAction::anchor() { }
+EmitAssemblyAction::EmitAssemblyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitAssembly, _VMContext) {}
+
+void EmitBCAction::anchor() { }
+EmitBCAction::EmitBCAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitBC, _VMContext) {}
+
+void EmitLLVMAction::anchor() { }
+EmitLLVMAction::EmitLLVMAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitLL, _VMContext) {}
+
+void EmitLLVMOnlyAction::anchor() { }
+EmitLLVMOnlyAction::EmitLLVMOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitNothing, _VMContext) {}
+
+void EmitCodeGenOnlyAction::anchor() { }
+EmitCodeGenOnlyAction::EmitCodeGenOnlyAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitMCNull, _VMContext) {}
+
+void EmitObjAction::anchor() { }
+EmitObjAction::EmitObjAction(llvm::LLVMContext *_VMContext)
+ : CodeGenAction(Backend_EmitObj, _VMContext) {}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
new file mode 100644
index 0000000..06e90b6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -0,0 +1,1144 @@
+//===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-function state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGDebugInfo.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Intrinsics.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
+ : CodeGenTypeCache(cgm), CGM(cgm),
+ Target(CGM.getContext().getTargetInfo()),
+ Builder(cgm.getModule().getContext()),
+ AutoreleaseResult(false), BlockInfo(0), BlockPointer(0),
+ LambdaThisCaptureField(0), NormalCleanupDest(0), NextCleanupDestIndex(1),
+ FirstBlockInfo(0), EHResumeBlock(0), ExceptionSlot(0), EHSelectorSlot(0),
+ DebugInfo(0), DisableDebugInfo(false), DidCallStackSave(false),
+ IndirectBranch(0), SwitchInsn(0), CaseRangeBlock(0), UnreachableBlock(0),
+ CXXABIThisDecl(0), CXXABIThisValue(0), CXXThisValue(0), CXXVTTDecl(0),
+ CXXVTTValue(0), OutermostConditional(0), TerminateLandingPad(0),
+ TerminateHandler(0), TrapBB(0) {
+
+ CatchUndefined = getContext().getLangOpts().CatchUndefined;
+ CGM.getCXXABI().getMangleContext().startNewFunction();
+}
+
+CodeGenFunction::~CodeGenFunction() {
+ // If there are any unclaimed block infos, go ahead and destroy them
+ // now. This can happen if IR-gen gets clever and skips evaluating
+ // something.
+ if (FirstBlockInfo)
+ destroyBlockInfos(FirstBlockInfo);
+}
+
+
+llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
+ return CGM.getTypes().ConvertTypeForMem(T);
+}
+
+llvm::Type *CodeGenFunction::ConvertType(QualType T) {
+ return CGM.getTypes().ConvertType(T);
+}
+
+bool CodeGenFunction::hasAggregateLLVMType(QualType type) {
+ switch (type.getCanonicalType()->getTypeClass()) {
+#define TYPE(name, parent)
+#define ABSTRACT_TYPE(name, parent)
+#define NON_CANONICAL_TYPE(name, parent) case Type::name:
+#define DEPENDENT_TYPE(name, parent) case Type::name:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("non-canonical or dependent type in IR-generation");
+
+ case Type::Builtin:
+ case Type::Pointer:
+ case Type::BlockPointer:
+ case Type::LValueReference:
+ case Type::RValueReference:
+ case Type::MemberPointer:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ case Type::Enum:
+ case Type::ObjCObjectPointer:
+ return false;
+
+ // Complexes, arrays, records, and Objective-C objects.
+ case Type::Complex:
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::Record:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ return true;
+
+ // In IRGen, atomic types are just the underlying type
+ case Type::Atomic:
+ return hasAggregateLLVMType(type->getAs<AtomicType>()->getValueType());
+ }
+ llvm_unreachable("unknown type kind!");
+}
+
+void CodeGenFunction::EmitReturnBlock() {
+ // For cleanliness, we try to avoid emitting the return block for
+ // simple cases.
+ llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
+
+ if (CurBB) {
+ assert(!CurBB->getTerminator() && "Unexpected terminated block.");
+
+ // We have a valid insert point, reuse it if it is empty or there are no
+ // explicit jumps to the return block.
+ if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
+ ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
+ delete ReturnBlock.getBlock();
+ } else
+ EmitBlock(ReturnBlock.getBlock());
+ return;
+ }
+
+ // Otherwise, if the return block is the target of a single direct
+ // branch then we can just put the code in that block instead. This
+ // cleans up functions which started with a unified return block.
+ if (ReturnBlock.getBlock()->hasOneUse()) {
+ llvm::BranchInst *BI =
+ dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->use_begin());
+ if (BI && BI->isUnconditional() &&
+ BI->getSuccessor(0) == ReturnBlock.getBlock()) {
+ // Reset insertion point, including debug location, and delete the branch.
+ Builder.SetCurrentDebugLocation(BI->getDebugLoc());
+ Builder.SetInsertPoint(BI->getParent());
+ BI->eraseFromParent();
+ delete ReturnBlock.getBlock();
+ return;
+ }
+ }
+
+ // FIXME: We are at an unreachable point, there is no reason to emit the block
+ // unless it has uses. However, we still need a place to put the debug
+ // region.end for now.
+
+ EmitBlock(ReturnBlock.getBlock());
+}
+
+static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
+ if (!BB) return;
+ if (!BB->use_empty())
+ return CGF.CurFn->getBasicBlockList().push_back(BB);
+ delete BB;
+}
+
+void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
+ assert(BreakContinueStack.empty() &&
+ "mismatched push/pop in break/continue stack!");
+
+ // Pop any cleanups that might have been associated with the
+ // parameters. Do this in whatever block we're currently in; it's
+ // important to do this before we enter the return block or return
+ // edges will be *really* confused.
+ if (EHStack.stable_begin() != PrologueCleanupDepth)
+ PopCleanupBlocks(PrologueCleanupDepth);
+
+ // Emit function epilog (to return).
+ EmitReturnBlock();
+
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_exit");
+
+ // Emit debug descriptor for function end.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ DI->setLocation(EndLoc);
+ DI->EmitFunctionEnd(Builder);
+ }
+
+ EmitFunctionEpilog(*CurFnInfo);
+ EmitEndEHSpec(CurCodeDecl);
+
+ assert(EHStack.empty() &&
+ "did not remove all scopes from cleanup stack!");
+
+ // If someone did an indirect goto, emit the indirect goto block at the end of
+ // the function.
+ if (IndirectBranch) {
+ EmitBlock(IndirectBranch->getParent());
+ Builder.ClearInsertionPoint();
+ }
+
+ // Remove the AllocaInsertPt instruction, which is just a convenience for us.
+ llvm::Instruction *Ptr = AllocaInsertPt;
+ AllocaInsertPt = 0;
+ Ptr->eraseFromParent();
+
+ // If someone took the address of a label but never did an indirect goto, we
+ // made a zero entry PHI node, which is illegal, zap it now.
+ if (IndirectBranch) {
+ llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
+ if (PN->getNumIncomingValues() == 0) {
+ PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
+ PN->eraseFromParent();
+ }
+ }
+
+ EmitIfUsed(*this, EHResumeBlock);
+ EmitIfUsed(*this, TerminateLandingPad);
+ EmitIfUsed(*this, TerminateHandler);
+ EmitIfUsed(*this, UnreachableBlock);
+
+ if (CGM.getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+}
+
+/// ShouldInstrumentFunction - Return true if the current function should be
+/// instrumented with __cyg_profile_func_* calls
+bool CodeGenFunction::ShouldInstrumentFunction() {
+ if (!CGM.getCodeGenOpts().InstrumentFunctions)
+ return false;
+ if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
+ return false;
+ return true;
+}
+
+/// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+/// instrumentation function with the current function and the call site, if
+/// function instrumentation is enabled.
+void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
+ // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
+ llvm::PointerType *PointerTy = Int8PtrTy;
+ llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
+ llvm::FunctionType *FunctionTy =
+ llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
+
+ llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
+ llvm::CallInst *CallSite = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
+ llvm::ConstantInt::get(Int32Ty, 0),
+ "callsite");
+
+ Builder.CreateCall2(F,
+ llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
+ CallSite);
+}
+
+void CodeGenFunction::EmitMCountInstrumentation() {
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+
+ llvm::Constant *MCountFn = CGM.CreateRuntimeFunction(FTy,
+ Target.getMCountName());
+ Builder.CreateCall(MCountFn);
+}
+
+void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc) {
+ const Decl *D = GD.getDecl();
+
+ DidCallStackSave = false;
+ CurCodeDecl = CurFuncDecl = D;
+ FnRetTy = RetTy;
+ CurFn = Fn;
+ CurFnInfo = &FnInfo;
+ assert(CurFn->isDeclaration() && "Function already has body?");
+
+ // Pass inline keyword to optimizer if it appears explicitly on any
+ // declaration.
+ if (!CGM.getCodeGenOpts().NoInline)
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ for (FunctionDecl::redecl_iterator RI = FD->redecls_begin(),
+ RE = FD->redecls_end(); RI != RE; ++RI)
+ if (RI->isInlineSpecified()) {
+ Fn->addFnAttr(llvm::Attribute::InlineHint);
+ break;
+ }
+
+ if (getContext().getLangOpts().OpenCL) {
+ // Add metadata for a kernel function.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ llvm::LLVMContext &Context = getLLVMContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
+
+ llvm::Value *Op = Fn;
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Op));
+ }
+ }
+
+ llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
+
+ // Create a marker to make it easy to insert allocas into the entryblock
+ // later. Don't create this with the builder, because we don't want it
+ // folded.
+ llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
+ AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
+ if (Builder.isNamePreserving())
+ AllocaInsertPt->setName("allocapt");
+
+ ReturnBlock = getJumpDestInCurrentScope("return");
+
+ Builder.SetInsertPoint(EntryBB);
+
+ // Emit subprogram debug descriptor.
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ unsigned NumArgs = 0;
+ QualType *ArgsArray = new QualType[Args.size()];
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ ArgsArray[NumArgs++] = (*i)->getType();
+ }
+
+ QualType FnType =
+ getContext().getFunctionType(RetTy, ArgsArray, NumArgs,
+ FunctionProtoType::ExtProtoInfo());
+
+ delete[] ArgsArray;
+
+ DI->setLocation(StartLoc);
+ DI->EmitFunctionStart(GD, FnType, CurFn, Builder);
+ }
+
+ if (ShouldInstrumentFunction())
+ EmitFunctionInstrumentation("__cyg_profile_func_enter");
+
+ if (CGM.getCodeGenOpts().InstrumentForProfiling)
+ EmitMCountInstrumentation();
+
+ if (RetTy->isVoidType()) {
+ // Void type; nothing to return.
+ ReturnValue = 0;
+ } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType())) {
+ // Indirect aggregate return; emit returned value directly into sret slot.
+ // This reduces code size, and affects correctness in C++.
+ ReturnValue = CurFn->arg_begin();
+ } else {
+ ReturnValue = CreateIRTemp(RetTy, "retval");
+
+ // Tell the epilog emitter to autorelease the result. We do this
+ // now so that various specialized functions can suppress it
+ // during their IR-generation.
+ if (getLangOpts().ObjCAutoRefCount &&
+ !CurFnInfo->isReturnsRetained() &&
+ RetTy->isObjCRetainableType())
+ AutoreleaseResult = true;
+ }
+
+ EmitStartEHSpec(CurCodeDecl);
+
+ PrologueCleanupDepth = EHStack.stable_begin();
+ EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+
+ if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
+ CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
+ if (MD->getParent()->isLambda() &&
+ MD->getOverloadedOperator() == OO_Call) {
+ // We're in a lambda; figure out the captures.
+ MD->getParent()->getCaptureFields(LambdaCaptureFields,
+ LambdaThisCaptureField);
+ if (LambdaThisCaptureField) {
+ // If this lambda captures this, load it.
+ LValue ThisLValue = EmitLValueForField(CXXABIThisValue,
+ LambdaThisCaptureField, 0);
+ CXXThisValue = EmitLoadOfLValue(ThisLValue).getScalarVal();
+ }
+ } else {
+ // Not in a lambda; just use 'this' from the method.
+ // FIXME: Should we generate a new load for each use of 'this'? The
+ // fast register allocator would be happier...
+ CXXThisValue = CXXABIThisValue;
+ }
+ }
+
+ // If any of the arguments have a variably modified type, make sure to
+ // emit the type size.
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ i != e; ++i) {
+ QualType Ty = (*i)->getType();
+
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+ }
+ // Emit a location at the end of the prologue.
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, StartLoc);
+}
+
+void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
+ const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
+ assert(FD->getBody());
+ EmitStmt(FD->getBody());
+}
+
+/// Tries to mark the given function nounwind based on the
+/// non-existence of any throwing calls within it. We believe this is
+/// lightweight enough to do at -O0.
+static void TryMarkNoThrow(llvm::Function *F) {
+ // LLVM treats 'nounwind' on a function as part of the type, so we
+ // can't do this on functions that can be overwritten.
+ if (F->mayBeOverridden()) return;
+
+ for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
+ for (llvm::BasicBlock::iterator
+ BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
+ if (!Call->doesNotThrow())
+ return;
+ } else if (isa<llvm::ResumeInst>(&*BI)) {
+ return;
+ }
+ F->setDoesNotThrow(true);
+}
+
+void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ // Check if we should generate debug info for this function.
+ if (CGM.getModuleDebugInfo() && !FD->hasAttr<NoDebugAttr>())
+ DebugInfo = CGM.getModuleDebugInfo();
+
+ FunctionArgList Args;
+ QualType ResTy = FD->getResultType();
+
+ CurGD = GD;
+ if (isa<CXXMethodDecl>(FD) && cast<CXXMethodDecl>(FD)->isInstance())
+ CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResTy, Args);
+
+ for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i)
+ Args.push_back(FD->getParamDecl(i));
+
+ SourceRange BodyRange;
+ if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
+
+ // Emit the standard function prologue.
+ StartFunction(GD, ResTy, Fn, FnInfo, Args, BodyRange.getBegin());
+
+ // Generate the body of the function.
+ if (isa<CXXDestructorDecl>(FD))
+ EmitDestructorBody(Args);
+ else if (isa<CXXConstructorDecl>(FD))
+ EmitConstructorBody(Args);
+ else if (getContext().getLangOpts().CUDA &&
+ !CGM.getCodeGenOpts().CUDAIsDevice &&
+ FD->hasAttr<CUDAGlobalAttr>())
+ CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
+ else if (isa<CXXConversionDecl>(FD) &&
+ cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
+ // The lambda conversion to block pointer is special; the semantics can't be
+ // expressed in the AST, so IRGen needs to special-case it.
+ EmitLambdaToBlockPointerBody(Args);
+ } else if (isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
+ // The lambda "__invoke" function is special, because it forwards or
+ // clones the body of the function call operator (but is actually static).
+ EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
+ }
+ else
+ EmitFunctionBody(Args);
+
+ // Emit the standard function epilogue.
+ FinishFunction(BodyRange.getEnd());
+
+ // If we haven't marked the function nothrow through other means, do
+ // a quick pass now to see if we can.
+ if (!CurFn->doesNotThrow())
+ TryMarkNoThrow(CurFn);
+}
+
+/// ContainsLabel - Return true if the statement contains a label in it. If
+/// this statement is not executed normally, it not containing a label means
+/// that we can just remove the code.
+bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a label, we have to emit the code, consider something like:
+ // if (0) { ... foo: bar(); } goto foo;
+ //
+ // TODO: If anyone cared, we could track __label__'s, since we know that you
+ // can't jump to one from outside their declared region.
+ if (isa<LabelStmt>(S))
+ return true;
+
+ // If this is a case/default statement, and we haven't seen a switch, we have
+ // to emit the code.
+ if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
+ return true;
+
+ // If this is a switch statement, we want to ignore cases below it.
+ if (isa<SwitchStmt>(S))
+ IgnoreCaseStmts = true;
+
+ // Scan subexpressions for verboten labels.
+ for (Stmt::const_child_range I = S->children(); I; ++I)
+ if (ContainsLabel(*I, IgnoreCaseStmts))
+ return true;
+
+ return false;
+}
+
+/// containsBreak - Return true if the statement contains a break out of it.
+/// If the statement (recursively) contains a switch or loop with a break
+/// inside of it, this is fine.
+bool CodeGenFunction::containsBreak(const Stmt *S) {
+ // Null statement, not a label!
+ if (S == 0) return false;
+
+ // If this is a switch or loop that defines its own break scope, then we can
+ // include it and anything inside of it.
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
+ isa<ForStmt>(S))
+ return false;
+
+ if (isa<BreakStmt>(S))
+ return true;
+
+ // Scan subexpressions for verboten breaks.
+ for (Stmt::const_child_range I = S->children(); I; ++I)
+ if (containsBreak(*I))
+ return true;
+
+ return false;
+}
+
+
+/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+/// to a constant, or if it does but contains a label, return false. If it
+/// constant folds return true and set the boolean result in Result.
+bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
+ bool &ResultBool) {
+ llvm::APInt ResultInt;
+ if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
+ return false;
+
+ ResultBool = ResultInt.getBoolValue();
+ return true;
+}
+
+/// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+/// to a constant, or if it does but contains a label, return false. If it
+/// constant folds return true and set the folded value.
+bool CodeGenFunction::
+ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &ResultInt) {
+ // FIXME: Rename and handle conversion of other evaluatable things
+ // to bool.
+ llvm::APSInt Int;
+ if (!Cond->EvaluateAsInt(Int, getContext()))
+ return false; // Not foldable, not integer or not fully evaluatable.
+
+ if (CodeGenFunction::ContainsLabel(Cond))
+ return false; // Contains a label.
+
+ ResultInt = Int;
+ return true;
+}
+
+
+
+/// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
+/// statement) to the specified blocks. Based on the condition, this might try
+/// to simplify the codegen of the conditional based on the branch.
+///
+void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
+ llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock) {
+ Cond = Cond->IgnoreParens();
+
+ if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
+ // Handle X && Y in a condition.
+ if (CondBOp->getOpcode() == BO_LAnd) {
+ // If we have "1 && X", simplify the code. "0 && X" would have constant
+ // folded if the case was simple enough.
+ bool ConstantBool = false;
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
+ ConstantBool) {
+ // br(1 && X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X && 1", simplify the code to use an uncond branch.
+ // "X && 0" would have been constant folded to 0.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
+ ConstantBool) {
+ // br(X && 1) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is false, we
+ // want to jump to the FalseBlock.
+ llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock);
+ EmitBlock(LHSTrue);
+
+ // Any temporaries created here are conditional.
+ eval.begin(*this);
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ eval.end(*this);
+
+ return;
+ }
+
+ if (CondBOp->getOpcode() == BO_LOr) {
+ // If we have "0 || X", simplify the code. "1 || X" would have constant
+ // folded if the case was simple enough.
+ bool ConstantBool = false;
+ if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
+ !ConstantBool) {
+ // br(0 || X) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ }
+
+ // If we have "X || 0", simplify the code to use an uncond branch.
+ // "X || 1" would have been constant folded to 1.
+ if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
+ !ConstantBool) {
+ // br(X || 0) -> br(X).
+ return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock);
+ }
+
+ // Emit the LHS as a conditional. If the LHS conditional is true, we
+ // want to jump to the TrueBlock.
+ llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
+
+ ConditionalEvaluation eval(*this);
+ EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse);
+ EmitBlock(LHSFalse);
+
+ // Any temporaries created here are conditional.
+ eval.begin(*this);
+ EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock);
+ eval.end(*this);
+
+ return;
+ }
+ }
+
+ if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
+ // br(!x, t, f) -> br(x, f, t)
+ if (CondUOp->getOpcode() == UO_LNot)
+ return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock);
+ }
+
+ if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
+ // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
+ llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
+ llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
+
+ ConditionalEvaluation cond(*this);
+ EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock);
+
+ cond.begin(*this);
+ EmitBlock(LHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
+
+ cond.begin(*this);
+ EmitBlock(RHSBlock);
+ EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock);
+ cond.end(*this);
+
+ return;
+ }
+
+ // Emit the code with the fully general case.
+ llvm::Value *CondV = EvaluateExprAsBool(Cond);
+ Builder.CreateCondBr(CondV, TrueBlock, FalseBlock);
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ CGM.ErrorUnsupported(S, Type, OmitOnError);
+}
+
+/// emitNonZeroVLAInit - Emit the "zero" initialization of a
+/// variable-length array whose elements have a non-zero bit-pattern.
+///
+/// \param src - a char* pointing to the bit-pattern for a single
+/// base element of the array
+/// \param sizeInChars - the total size of the VLA, in chars
+/// \param align - the total alignment of the VLA
+static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
+ llvm::Value *dest, llvm::Value *src,
+ llvm::Value *sizeInChars) {
+ std::pair<CharUnits,CharUnits> baseSizeAndAlign
+ = CGF.getContext().getTypeInfoInChars(baseType);
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *baseSizeInChars
+ = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
+
+ llvm::Type *i8p = Builder.getInt8PtrTy();
+
+ llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
+ llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
+
+ llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
+ llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
+ llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
+
+ // Make a loop over the VLA. C99 guarantees that the VLA element
+ // count must be nonzero.
+ CGF.EmitBlock(loopBB);
+
+ llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
+ cur->addIncoming(begin, originBB);
+
+ // memcpy the individual element bit-pattern.
+ Builder.CreateMemCpy(cur, src, baseSizeInChars,
+ baseSizeAndAlign.second.getQuantity(),
+ /*volatile*/ false);
+
+ // Go to the next element.
+ llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
+
+ // Leave if that's the end of the VLA.
+ llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
+ Builder.CreateCondBr(done, contBB, loopBB);
+ cur->addIncoming(next, loopBB);
+
+ CGF.EmitBlock(contBB);
+}
+
+void
+CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
+ // Ignore empty classes in C++.
+ if (getContext().getLangOpts().CPlusPlus) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ return;
+ }
+ }
+
+ // Cast the dest ptr to the appropriate i8 pointer type.
+ unsigned DestAS =
+ cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
+ llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
+ if (DestPtr->getType() != BP)
+ DestPtr = Builder.CreateBitCast(DestPtr, BP);
+
+ // Get size and alignment info for this aggregate.
+ std::pair<CharUnits, CharUnits> TypeInfo =
+ getContext().getTypeInfoInChars(Ty);
+ CharUnits Size = TypeInfo.first;
+ CharUnits Align = TypeInfo.second;
+
+ llvm::Value *SizeVal;
+ const VariableArrayType *vla;
+
+ // Don't bother emitting a zero-byte memset.
+ if (Size.isZero()) {
+ // But note that getTypeInfo returns 0 for a VLA.
+ if (const VariableArrayType *vlaType =
+ dyn_cast_or_null<VariableArrayType>(
+ getContext().getAsArrayType(Ty))) {
+ QualType eltType;
+ llvm::Value *numElts;
+ llvm::tie(numElts, eltType) = getVLASize(vlaType);
+
+ SizeVal = numElts;
+ CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
+ if (!eltSize.isOne())
+ SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
+ vla = vlaType;
+ } else {
+ return;
+ }
+ } else {
+ SizeVal = CGM.getSize(Size);
+ vla = 0;
+ }
+
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ // TODO: there are other patterns besides zero that we can usefully memset,
+ // like -1, which happens to be the pattern used by member-pointers.
+ if (!CGM.getTypes().isZeroInitializable(Ty)) {
+ // For a VLA, emit a single element, then splat that over the VLA.
+ if (vla) Ty = getContext().getBaseElementType(vla);
+
+ llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, Twine());
+ llvm::Value *SrcPtr =
+ Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
+
+ if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
+
+ // Get and call the appropriate llvm.memcpy overload.
+ Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
+ return;
+ }
+
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
+ Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
+ Align.getQuantity(), false);
+}
+
+llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
+ // Make sure that there is a block for the indirect goto.
+ if (IndirectBranch == 0)
+ GetIndirectGotoBlock();
+
+ llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
+
+ // Make sure the indirect branch includes all of the address-taken blocks.
+ IndirectBranch->addDestination(BB);
+ return llvm::BlockAddress::get(CurFn, BB);
+}
+
+llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
+ // If we already made the indirect branch for indirect goto, return its block.
+ if (IndirectBranch) return IndirectBranch->getParent();
+
+ CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
+
+ // Create the PHI node that indirect gotos will add entries to.
+ llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
+ "indirect.goto.dest");
+
+ // Create the indirect branch instruction.
+ IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
+ return IndirectBranch->getParent();
+}
+
+/// Computes the length of an array in elements, as well as the base
+/// element type and a properly-typed first element pointer.
+llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
+ QualType &baseType,
+ llvm::Value *&addr) {
+ const ArrayType *arrayType = origArrayType;
+
+ // If it's a VLA, we have to load the stored size. Note that
+ // this is the size of the VLA in bytes, not its size in elements.
+ llvm::Value *numVLAElements = 0;
+ if (isa<VariableArrayType>(arrayType)) {
+ numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
+
+ // Walk into all VLAs. This doesn't require changes to addr,
+ // which has type T* where T is the first non-VLA element type.
+ do {
+ QualType elementType = arrayType->getElementType();
+ arrayType = getContext().getAsArrayType(elementType);
+
+ // If we only have VLA components, 'addr' requires no adjustment.
+ if (!arrayType) {
+ baseType = elementType;
+ return numVLAElements;
+ }
+ } while (isa<VariableArrayType>(arrayType));
+
+ // We get out here only if we find a constant array type
+ // inside the VLA.
+ }
+
+ // We have some number of constant-length arrays, so addr should
+ // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
+ // down to the first element of addr.
+ SmallVector<llvm::Value*, 8> gepIndices;
+
+ // GEP down to the array type.
+ llvm::ConstantInt *zero = Builder.getInt32(0);
+ gepIndices.push_back(zero);
+
+ // It's more efficient to calculate the count from the LLVM
+ // constant-length arrays than to re-evaluate the array bounds.
+ uint64_t countFromCLAs = 1;
+
+ llvm::ArrayType *llvmArrayType =
+ cast<llvm::ArrayType>(
+ cast<llvm::PointerType>(addr->getType())->getElementType());
+ while (true) {
+ assert(isa<ConstantArrayType>(arrayType));
+ assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
+ == llvmArrayType->getNumElements());
+
+ gepIndices.push_back(zero);
+ countFromCLAs *= llvmArrayType->getNumElements();
+
+ llvmArrayType =
+ dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
+ if (!llvmArrayType) break;
+
+ arrayType = getContext().getAsArrayType(arrayType->getElementType());
+ assert(arrayType && "LLVM and Clang types are out-of-synch");
+ }
+
+ baseType = arrayType->getElementType();
+
+ // Create the actual GEP.
+ addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
+
+ llvm::Value *numElements
+ = llvm::ConstantInt::get(SizeTy, countFromCLAs);
+
+ // If we had any VLA dimensions, factor them in.
+ if (numVLAElements)
+ numElements = Builder.CreateNUWMul(numVLAElements, numElements);
+
+ return numElements;
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(QualType type) {
+ const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
+ assert(vla && "type was not a variable array type!");
+ return getVLASize(vla);
+}
+
+std::pair<llvm::Value*, QualType>
+CodeGenFunction::getVLASize(const VariableArrayType *type) {
+ // The number of elements so far; always size_t.
+ llvm::Value *numElements = 0;
+
+ QualType elementType;
+ do {
+ elementType = type->getElementType();
+ llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
+ assert(vlaSize && "no size for VLA!");
+ assert(vlaSize->getType() == SizeTy);
+
+ if (!numElements) {
+ numElements = vlaSize;
+ } else {
+ // It's undefined behavior if this wraps around, so mark it that way.
+ numElements = Builder.CreateNUWMul(numElements, vlaSize);
+ }
+ } while ((type = getContext().getAsVariableArrayType(elementType)));
+
+ return std::pair<llvm::Value*,QualType>(numElements, elementType);
+}
+
+void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
+ assert(type->isVariablyModifiedType() &&
+ "Must pass variably modified type to EmitVLASizes!");
+
+ EnsureInsertPoint();
+
+ // We're going to walk down into the type and look for VLA
+ // expressions.
+ do {
+ assert(type->isVariablyModifiedType());
+
+ const Type *ty = type.getTypePtr();
+ switch (ty->getTypeClass()) {
+
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("unexpected dependent type!");
+
+ // These types are never variably-modified.
+ case Type::Builtin:
+ case Type::Complex:
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Record:
+ case Type::Enum:
+ case Type::Elaborated:
+ case Type::TemplateSpecialization:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ llvm_unreachable("type class is never variably-modified!");
+
+ case Type::Pointer:
+ type = cast<PointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::BlockPointer:
+ type = cast<BlockPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ type = cast<ReferenceType>(ty)->getPointeeType();
+ break;
+
+ case Type::MemberPointer:
+ type = cast<MemberPointerType>(ty)->getPointeeType();
+ break;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ // Losing element qualification here is fine.
+ type = cast<ArrayType>(ty)->getElementType();
+ break;
+
+ case Type::VariableArray: {
+ // Losing element qualification here is fine.
+ const VariableArrayType *vat = cast<VariableArrayType>(ty);
+
+ // Unknown size indication requires no size computation.
+ // Otherwise, evaluate and record it.
+ if (const Expr *size = vat->getSizeExpr()) {
+ // It's possible that we might have emitted this already,
+ // e.g. with a typedef and a pointer to it.
+ llvm::Value *&entry = VLASizeMap[size];
+ if (!entry) {
+ // Always zexting here would be wrong if it weren't
+ // undefined behavior to have a negative bound.
+ entry = Builder.CreateIntCast(EmitScalarExpr(size), SizeTy,
+ /*signed*/ false);
+ }
+ }
+ type = vat->getElementType();
+ break;
+ }
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ type = cast<FunctionType>(ty)->getResultType();
+ break;
+
+ case Type::Paren:
+ case Type::TypeOf:
+ case Type::UnaryTransform:
+ case Type::Attributed:
+ case Type::SubstTemplateTypeParm:
+ // Keep walking after single level desugaring.
+ type = type.getSingleStepDesugaredType(getContext());
+ break;
+
+ case Type::Typedef:
+ case Type::Decltype:
+ case Type::Auto:
+ // Stop walking: nothing to do.
+ return;
+
+ case Type::TypeOfExpr:
+ // Stop walking: emit typeof expression.
+ EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
+ return;
+
+ case Type::Atomic:
+ type = cast<AtomicType>(ty)->getValueType();
+ break;
+ }
+ } while (type->isVariablyModifiedType());
+}
+
+llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
+ if (getContext().getBuiltinVaListType()->isArrayType())
+ return EmitScalarExpr(E);
+ return EmitLValue(E).getAddress();
+}
+
+void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
+ llvm::Constant *Init) {
+ assert (Init && "Invalid DeclRefExpr initializer!");
+ if (CGDebugInfo *Dbg = getDebugInfo())
+ Dbg->EmitGlobalVariable(E->getDecl(), Init);
+}
+
+CodeGenFunction::PeepholeProtection
+CodeGenFunction::protectFromPeepholes(RValue rvalue) {
+ // At the moment, the only aggressive peephole we do in IR gen
+ // is trunc(zext) folding, but if we add more, we can easily
+ // extend this protection.
+
+ if (!rvalue.isScalar()) return PeepholeProtection();
+ llvm::Value *value = rvalue.getScalarVal();
+ if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
+
+ // Just make an extra bitcast.
+ assert(HaveInsertPoint());
+ llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
+ Builder.GetInsertBlock());
+
+ PeepholeProtection protection;
+ protection.Inst = inst;
+ return protection;
+}
+
+void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
+ if (!protection.Inst) return;
+
+ // In theory, we could try to duplicate the peepholes now, but whatever.
+ protection.Inst->eraseFromParent();
+}
+
+llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
+ llvm::Value *AnnotatedVal,
+ llvm::StringRef AnnotationStr,
+ SourceLocation Location) {
+ llvm::Value *Args[4] = {
+ AnnotatedVal,
+ Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
+ Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
+ CGM.EmitAnnotationLineNo(Location)
+ };
+ return Builder.CreateCall(AnnotationFn, Args);
+}
+
+void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ // FIXME We create a new bitcast for every annotation because that's what
+ // llvm-gcc was doing.
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
+ EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
+ Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
+ (*ai)->getAnnotation(), D->getLocation());
+}
+
+llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
+ llvm::Value *V) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ llvm::Type *VTy = V->getType();
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
+ CGM.Int8PtrTy);
+
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai) {
+ // FIXME Always emit the cast inst so we can differentiate between
+ // annotation on the first field of a struct and annotation on the struct
+ // itself.
+ if (VTy != CGM.Int8PtrTy)
+ V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
+ V = EmitAnnotationCall(F, V, (*ai)->getAnnotation(), D->getLocation());
+ V = Builder.CreateBitCast(V, VTy);
+ }
+
+ return V;
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
new file mode 100644
index 0000000..3e0cd14
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenFunction.h
@@ -0,0 +1,2701 @@
+//===-- CodeGenFunction.h - Per-Function state for LLVM CodeGen -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-function state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENFUNCTION_H
+#define CLANG_CODEGEN_CODEGENFUNCTION_H
+
+#include "clang/AST/Type.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/ValueHandle.h"
+#include "llvm/Support/Debug.h"
+#include "CodeGenModule.h"
+#include "CGBuilder.h"
+#include "CGDebugInfo.h"
+#include "CGValue.h"
+
+namespace llvm {
+ class BasicBlock;
+ class LLVMContext;
+ class MDNode;
+ class Module;
+ class SwitchInst;
+ class Twine;
+ class Value;
+ class CallSite;
+}
+
+namespace clang {
+ class ASTContext;
+ class BlockDecl;
+ class CXXDestructorDecl;
+ class CXXForRangeStmt;
+ class CXXTryStmt;
+ class Decl;
+ class LabelDecl;
+ class EnumConstantDecl;
+ class FunctionDecl;
+ class FunctionProtoType;
+ class LabelStmt;
+ class ObjCContainerDecl;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCPropertyImplDecl;
+ class TargetInfo;
+ class TargetCodeGenInfo;
+ class VarDecl;
+ class ObjCForCollectionStmt;
+ class ObjCAtTryStmt;
+ class ObjCAtThrowStmt;
+ class ObjCAtSynchronizedStmt;
+ class ObjCAutoreleasePoolStmt;
+
+namespace CodeGen {
+ class CodeGenTypes;
+ class CGFunctionInfo;
+ class CGRecordLayout;
+ class CGBlockInfo;
+ class CGCXXABI;
+ class BlockFlags;
+ class BlockFieldFlags;
+
+/// A branch fixup. These are required when emitting a goto to a
+/// label which hasn't been emitted yet. The goto is optimistically
+/// emitted as a branch to the basic block for the label, and (if it
+/// occurs in a scope with non-trivial cleanups) a fixup is added to
+/// the innermost cleanup. When a (normal) cleanup is popped, any
+/// unresolved fixups in that scope are threaded through the cleanup.
+struct BranchFixup {
+ /// The block containing the terminator which needs to be modified
+ /// into a switch if this fixup is resolved into the current scope.
+ /// If null, LatestBranch points directly to the destination.
+ llvm::BasicBlock *OptimisticBranchBlock;
+
+ /// The ultimate destination of the branch.
+ ///
+ /// This can be set to null to indicate that this fixup was
+ /// successfully resolved.
+ llvm::BasicBlock *Destination;
+
+ /// The destination index value.
+ unsigned DestinationIndex;
+
+ /// The initial branch of the fixup.
+ llvm::BranchInst *InitialBranch;
+};
+
+template <class T> struct InvariantValue {
+ typedef T type;
+ typedef T saved_type;
+ static bool needsSaving(type value) { return false; }
+ static saved_type save(CodeGenFunction &CGF, type value) { return value; }
+ static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
+};
+
+/// A metaprogramming class for ensuring that a value will dominate an
+/// arbitrary position in a function.
+template <class T> struct DominatingValue : InvariantValue<T> {};
+
+template <class T, bool mightBeInstruction =
+ llvm::is_base_of<llvm::Value, T>::value &&
+ !llvm::is_base_of<llvm::Constant, T>::value &&
+ !llvm::is_base_of<llvm::BasicBlock, T>::value>
+struct DominatingPointer;
+template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
+// template <class T> struct DominatingPointer<T,true> at end of file
+
+template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
+
+enum CleanupKind {
+ EHCleanup = 0x1,
+ NormalCleanup = 0x2,
+ NormalAndEHCleanup = EHCleanup | NormalCleanup,
+
+ InactiveCleanup = 0x4,
+ InactiveEHCleanup = EHCleanup | InactiveCleanup,
+ InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
+ InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup
+};
+
+/// A stack of scopes which respond to exceptions, including cleanups
+/// and catch blocks.
+class EHScopeStack {
+public:
+ /// A saved depth on the scope stack. This is necessary because
+ /// pushing scopes onto the stack invalidates iterators.
+ class stable_iterator {
+ friend class EHScopeStack;
+
+ /// Offset from StartOfData to EndOfBuffer.
+ ptrdiff_t Size;
+
+ stable_iterator(ptrdiff_t Size) : Size(Size) {}
+
+ public:
+ static stable_iterator invalid() { return stable_iterator(-1); }
+ stable_iterator() : Size(-1) {}
+
+ bool isValid() const { return Size >= 0; }
+
+ /// Returns true if this scope encloses I.
+ /// Returns false if I is invalid.
+ /// This scope must be valid.
+ bool encloses(stable_iterator I) const { return Size <= I.Size; }
+
+ /// Returns true if this scope strictly encloses I: that is,
+ /// if it encloses I and is not I.
+ /// Returns false is I is invalid.
+ /// This scope must be valid.
+ bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
+
+ friend bool operator==(stable_iterator A, stable_iterator B) {
+ return A.Size == B.Size;
+ }
+ friend bool operator!=(stable_iterator A, stable_iterator B) {
+ return A.Size != B.Size;
+ }
+ };
+
+ /// Information for lazily generating a cleanup. Subclasses must be
+ /// POD-like: cleanups will not be destructed, and they will be
+ /// allocated on the cleanup stack and freely copied and moved
+ /// around.
+ ///
+ /// Cleanup implementations should generally be declared in an
+ /// anonymous namespace.
+ class Cleanup {
+ // Anchor the construction vtable.
+ virtual void anchor();
+ public:
+ /// Generation flags.
+ class Flags {
+ enum {
+ F_IsForEH = 0x1,
+ F_IsNormalCleanupKind = 0x2,
+ F_IsEHCleanupKind = 0x4
+ };
+ unsigned flags;
+
+ public:
+ Flags() : flags(0) {}
+
+ /// isForEH - true if the current emission is for an EH cleanup.
+ bool isForEHCleanup() const { return flags & F_IsForEH; }
+ bool isForNormalCleanup() const { return !isForEHCleanup(); }
+ void setIsForEHCleanup() { flags |= F_IsForEH; }
+
+ bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
+ void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
+
+ /// isEHCleanupKind - true if the cleanup was pushed as an EH
+ /// cleanup.
+ bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
+ void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
+ };
+
+ // Provide a virtual destructor to suppress a very common warning
+ // that unfortunately cannot be suppressed without this. Cleanups
+ // should not rely on this destructor ever being called.
+ virtual ~Cleanup() {}
+
+ /// Emit the cleanup. For normal cleanups, this is run in the
+ /// same EH context as when the cleanup was pushed, i.e. the
+ /// immediately-enclosing context of the cleanup scope. For
+ /// EH cleanups, this is run in a terminate context.
+ ///
+ // \param IsForEHCleanup true if this is for an EH cleanup, false
+ /// if for a normal cleanup.
+ virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
+ };
+
+ /// ConditionalCleanupN stores the saved form of its N parameters,
+ /// then restores them and performs the cleanup.
+ template <class T, class A0>
+ class ConditionalCleanup1 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ A0_saved a0_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ T(a0).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup1(A0_saved a0)
+ : a0_saved(a0) {}
+ };
+
+ template <class T, class A0, class A1>
+ class ConditionalCleanup2 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ T(a0, a1).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup2(A0_saved a0, A1_saved a1)
+ : a0_saved(a0), a1_saved(a1) {}
+ };
+
+ template <class T, class A0, class A1, class A2>
+ class ConditionalCleanup3 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ typedef typename DominatingValue<A2>::saved_type A2_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+ A2_saved a2_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
+ T(a0, a1, a2).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup3(A0_saved a0, A1_saved a1, A2_saved a2)
+ : a0_saved(a0), a1_saved(a1), a2_saved(a2) {}
+ };
+
+ template <class T, class A0, class A1, class A2, class A3>
+ class ConditionalCleanup4 : public Cleanup {
+ typedef typename DominatingValue<A0>::saved_type A0_saved;
+ typedef typename DominatingValue<A1>::saved_type A1_saved;
+ typedef typename DominatingValue<A2>::saved_type A2_saved;
+ typedef typename DominatingValue<A3>::saved_type A3_saved;
+ A0_saved a0_saved;
+ A1_saved a1_saved;
+ A2_saved a2_saved;
+ A3_saved a3_saved;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ A0 a0 = DominatingValue<A0>::restore(CGF, a0_saved);
+ A1 a1 = DominatingValue<A1>::restore(CGF, a1_saved);
+ A2 a2 = DominatingValue<A2>::restore(CGF, a2_saved);
+ A3 a3 = DominatingValue<A3>::restore(CGF, a3_saved);
+ T(a0, a1, a2, a3).Emit(CGF, flags);
+ }
+
+ public:
+ ConditionalCleanup4(A0_saved a0, A1_saved a1, A2_saved a2, A3_saved a3)
+ : a0_saved(a0), a1_saved(a1), a2_saved(a2), a3_saved(a3) {}
+ };
+
+private:
+ // The implementation for this class is in CGException.h and
+ // CGException.cpp; the definition is here because it's used as a
+ // member of CodeGenFunction.
+
+ /// The start of the scope-stack buffer, i.e. the allocated pointer
+ /// for the buffer. All of these pointers are either simultaneously
+ /// null or simultaneously valid.
+ char *StartOfBuffer;
+
+ /// The end of the buffer.
+ char *EndOfBuffer;
+
+ /// The first valid entry in the buffer.
+ char *StartOfData;
+
+ /// The innermost normal cleanup on the stack.
+ stable_iterator InnermostNormalCleanup;
+
+ /// The innermost EH scope on the stack.
+ stable_iterator InnermostEHScope;
+
+ /// The current set of branch fixups. A branch fixup is a jump to
+ /// an as-yet unemitted label, i.e. a label for which we don't yet
+ /// know the EH stack depth. Whenever we pop a cleanup, we have
+ /// to thread all the current branch fixups through it.
+ ///
+ /// Fixups are recorded as the Use of the respective branch or
+ /// switch statement. The use points to the final destination.
+ /// When popping out of a cleanup, these uses are threaded through
+ /// the cleanup and adjusted to point to the new cleanup.
+ ///
+ /// Note that branches are allowed to jump into protected scopes
+ /// in certain situations; e.g. the following code is legal:
+ /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
+ /// goto foo;
+ /// A a;
+ /// foo:
+ /// bar();
+ SmallVector<BranchFixup, 8> BranchFixups;
+
+ char *allocate(size_t Size);
+
+ void *pushCleanup(CleanupKind K, size_t DataSize);
+
+public:
+ EHScopeStack() : StartOfBuffer(0), EndOfBuffer(0), StartOfData(0),
+ InnermostNormalCleanup(stable_end()),
+ InnermostEHScope(stable_end()) {}
+ ~EHScopeStack() { delete[] StartOfBuffer; }
+
+ // Variadic templates would make this not terrible.
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T>
+ void pushCleanup(CleanupKind Kind) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T();
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0>
+ void pushCleanup(CleanupKind Kind, A0 a0) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3);
+ (void) Obj;
+ }
+
+ /// Push a lazily-created cleanup on the stack.
+ template <class T, class A0, class A1, class A2, class A3, class A4>
+ void pushCleanup(CleanupKind Kind, A0 a0, A1 a1, A2 a2, A3 a3, A4 a4) {
+ void *Buffer = pushCleanup(Kind, sizeof(T));
+ Cleanup *Obj = new(Buffer) T(a0, a1, a2, a3, a4);
+ (void) Obj;
+ }
+
+ // Feel free to add more variants of the following:
+
+ /// Push a cleanup with non-constant storage requirements on the
+ /// stack. The cleanup type must provide an additional static method:
+ /// static size_t getExtraSize(size_t);
+ /// The argument to this method will be the value N, which will also
+ /// be passed as the first argument to the constructor.
+ ///
+ /// The data stored in the extra storage must obey the same
+ /// restrictions as normal cleanup member data.
+ ///
+ /// The pointer returned from this method is valid until the cleanup
+ /// stack is modified.
+ template <class T, class A0, class A1, class A2>
+ T *pushCleanupWithExtra(CleanupKind Kind, size_t N, A0 a0, A1 a1, A2 a2) {
+ void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
+ return new (Buffer) T(N, a0, a1, a2);
+ }
+
+ /// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp.
+ void popCleanup();
+
+ /// Push a set of catch handlers on the stack. The catch is
+ /// uninitialized and will need to have the given number of handlers
+ /// set on it.
+ class EHCatchScope *pushCatch(unsigned NumHandlers);
+
+ /// Pops a catch scope off the stack. This is private to CGException.cpp.
+ void popCatch();
+
+ /// Push an exceptions filter on the stack.
+ class EHFilterScope *pushFilter(unsigned NumFilters);
+
+ /// Pops an exceptions filter off the stack.
+ void popFilter();
+
+ /// Push a terminate handler on the stack.
+ void pushTerminate();
+
+ /// Pops a terminate handler off the stack.
+ void popTerminate();
+
+ /// Determines whether the exception-scopes stack is empty.
+ bool empty() const { return StartOfData == EndOfBuffer; }
+
+ bool requiresLandingPad() const {
+ return InnermostEHScope != stable_end();
+ }
+
+ /// Determines whether there are any normal cleanups on the stack.
+ bool hasNormalCleanups() const {
+ return InnermostNormalCleanup != stable_end();
+ }
+
+ /// Returns the innermost normal cleanup on the stack, or
+ /// stable_end() if there are no normal cleanups.
+ stable_iterator getInnermostNormalCleanup() const {
+ return InnermostNormalCleanup;
+ }
+ stable_iterator getInnermostActiveNormalCleanup() const;
+
+ stable_iterator getInnermostEHScope() const {
+ return InnermostEHScope;
+ }
+
+ stable_iterator getInnermostActiveEHScope() const;
+
+ /// An unstable reference to a scope-stack depth. Invalidated by
+ /// pushes but not pops.
+ class iterator;
+
+ /// Returns an iterator pointing to the innermost EH scope.
+ iterator begin() const;
+
+ /// Returns an iterator pointing to the outermost EH scope.
+ iterator end() const;
+
+ /// Create a stable reference to the top of the EH stack. The
+ /// returned reference is valid until that scope is popped off the
+ /// stack.
+ stable_iterator stable_begin() const {
+ return stable_iterator(EndOfBuffer - StartOfData);
+ }
+
+ /// Create a stable reference to the bottom of the EH stack.
+ static stable_iterator stable_end() {
+ return stable_iterator(0);
+ }
+
+ /// Translates an iterator into a stable_iterator.
+ stable_iterator stabilize(iterator it) const;
+
+ /// Turn a stable reference to a scope depth into a unstable pointer
+ /// to the EH stack.
+ iterator find(stable_iterator save) const;
+
+ /// Removes the cleanup pointed to by the given stable_iterator.
+ void removeCleanup(stable_iterator save);
+
+ /// Add a branch fixup to the current cleanup scope.
+ BranchFixup &addBranchFixup() {
+ assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
+ BranchFixups.push_back(BranchFixup());
+ return BranchFixups.back();
+ }
+
+ unsigned getNumBranchFixups() const { return BranchFixups.size(); }
+ BranchFixup &getBranchFixup(unsigned I) {
+ assert(I < getNumBranchFixups());
+ return BranchFixups[I];
+ }
+
+ /// Pops lazily-removed fixups from the end of the list. This
+ /// should only be called by procedures which have just popped a
+ /// cleanup or resolved one or more fixups.
+ void popNullFixups();
+
+ /// Clears the branch-fixups list. This should only be called by
+ /// ResolveAllBranchFixups.
+ void clearFixups() { BranchFixups.clear(); }
+};
+
+/// CodeGenFunction - This class organizes the per-function state that is used
+/// while generating LLVM code.
+class CodeGenFunction : public CodeGenTypeCache {
+ CodeGenFunction(const CodeGenFunction&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenFunction&); // DO NOT IMPLEMENT
+
+ friend class CGCXXABI;
+public:
+ /// A jump destination is an abstract label, branching to which may
+ /// require a jump out through normal cleanups.
+ struct JumpDest {
+ JumpDest() : Block(0), ScopeDepth(), Index(0) {}
+ JumpDest(llvm::BasicBlock *Block,
+ EHScopeStack::stable_iterator Depth,
+ unsigned Index)
+ : Block(Block), ScopeDepth(Depth), Index(Index) {}
+
+ bool isValid() const { return Block != 0; }
+ llvm::BasicBlock *getBlock() const { return Block; }
+ EHScopeStack::stable_iterator getScopeDepth() const { return ScopeDepth; }
+ unsigned getDestIndex() const { return Index; }
+
+ private:
+ llvm::BasicBlock *Block;
+ EHScopeStack::stable_iterator ScopeDepth;
+ unsigned Index;
+ };
+
+ CodeGenModule &CGM; // Per-module state.
+ const TargetInfo &Target;
+
+ typedef std::pair<llvm::Value *, llvm::Value *> ComplexPairTy;
+ CGBuilderTy Builder;
+
+ /// CurFuncDecl - Holds the Decl for the current function or ObjC method.
+ /// This excludes BlockDecls.
+ const Decl *CurFuncDecl;
+ /// CurCodeDecl - This is the inner-most code context, which includes blocks.
+ const Decl *CurCodeDecl;
+ const CGFunctionInfo *CurFnInfo;
+ QualType FnRetTy;
+ llvm::Function *CurFn;
+
+ /// CurGD - The GlobalDecl for the current function being compiled.
+ GlobalDecl CurGD;
+
+ /// PrologueCleanupDepth - The cleanup depth enclosing all the
+ /// cleanups associated with the parameters.
+ EHScopeStack::stable_iterator PrologueCleanupDepth;
+
+ /// ReturnBlock - Unified return block.
+ JumpDest ReturnBlock;
+
+ /// ReturnValue - The temporary alloca to hold the return value. This is null
+ /// iff the function has no return value.
+ llvm::Value *ReturnValue;
+
+ /// AllocaInsertPoint - This is an instruction in the entry block before which
+ /// we prefer to insert allocas.
+ llvm::AssertingVH<llvm::Instruction> AllocaInsertPt;
+
+ bool CatchUndefined;
+
+ /// In ARC, whether we should autorelease the return value.
+ bool AutoreleaseResult;
+
+ const CodeGen::CGBlockInfo *BlockInfo;
+ llvm::Value *BlockPointer;
+
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+
+ /// \brief A mapping from NRVO variables to the flags used to indicate
+ /// when the NRVO has been applied to this variable.
+ llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
+
+ EHScopeStack EHStack;
+
+ /// i32s containing the indexes of the cleanup destinations.
+ llvm::AllocaInst *NormalCleanupDest;
+
+ unsigned NextCleanupDestIndex;
+
+ /// FirstBlockInfo - The head of a singly-linked-list of block layouts.
+ CGBlockInfo *FirstBlockInfo;
+
+ /// EHResumeBlock - Unified block containing a call to llvm.eh.resume.
+ llvm::BasicBlock *EHResumeBlock;
+
+ /// The exception slot. All landing pads write the current exception pointer
+ /// into this alloca.
+ llvm::Value *ExceptionSlot;
+
+ /// The selector slot. Under the MandatoryCleanup model, all landing pads
+ /// write the current selector value into this alloca.
+ llvm::AllocaInst *EHSelectorSlot;
+
+ /// Emits a landing pad for the current EH stack.
+ llvm::BasicBlock *EmitLandingPad();
+
+ llvm::BasicBlock *getInvokeDestImpl();
+
+ template <class T>
+ typename DominatingValue<T>::saved_type saveValueInCond(T value) {
+ return DominatingValue<T>::save(*this, value);
+ }
+
+public:
+ /// ObjCEHValueStack - Stack of Objective-C exception values, used for
+ /// rethrows.
+ SmallVector<llvm::Value*, 8> ObjCEHValueStack;
+
+ /// A class controlling the emission of a finally block.
+ class FinallyInfo {
+ /// Where the catchall's edge through the cleanup should go.
+ JumpDest RethrowDest;
+
+ /// A function to call to enter the catch.
+ llvm::Constant *BeginCatchFn;
+
+ /// An i1 variable indicating whether or not the @finally is
+ /// running for an exception.
+ llvm::AllocaInst *ForEHVar;
+
+ /// An i8* variable into which the exception pointer to rethrow
+ /// has been saved.
+ llvm::AllocaInst *SavedExnVar;
+
+ public:
+ void enter(CodeGenFunction &CGF, const Stmt *Finally,
+ llvm::Constant *beginCatchFn, llvm::Constant *endCatchFn,
+ llvm::Constant *rethrowFn);
+ void exit(CodeGenFunction &CGF);
+ };
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, a0);
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+
+ typedef EHScopeStack::ConditionalCleanup1<T, A0> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch())
+ return EHStack.pushCleanup<T>(kind, a0, a1);
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+
+ typedef EHScopeStack::ConditionalCleanup2<T, A0, A1> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1, class A2>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ return EHStack.pushCleanup<T>(kind, a0, a1, a2);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+ typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
+
+ typedef EHScopeStack::ConditionalCleanup3<T, A0, A1, A2> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved, a2_saved);
+ initFullExprCleanup();
+ }
+
+ /// pushFullExprCleanup - Push a cleanup to be run at the end of the
+ /// current full-expression. Safe against the possibility that
+ /// we're currently inside a conditionally-evaluated expression.
+ template <class T, class A0, class A1, class A2, class A3>
+ void pushFullExprCleanup(CleanupKind kind, A0 a0, A1 a1, A2 a2, A3 a3) {
+ // If we're not in a conditional branch, or if none of the
+ // arguments requires saving, then use the unconditional cleanup.
+ if (!isInConditionalBranch()) {
+ return EHStack.pushCleanup<T>(kind, a0, a1, a2, a3);
+ }
+
+ typename DominatingValue<A0>::saved_type a0_saved = saveValueInCond(a0);
+ typename DominatingValue<A1>::saved_type a1_saved = saveValueInCond(a1);
+ typename DominatingValue<A2>::saved_type a2_saved = saveValueInCond(a2);
+ typename DominatingValue<A3>::saved_type a3_saved = saveValueInCond(a3);
+
+ typedef EHScopeStack::ConditionalCleanup4<T, A0, A1, A2, A3> CleanupType;
+ EHStack.pushCleanup<CleanupType>(kind, a0_saved, a1_saved,
+ a2_saved, a3_saved);
+ initFullExprCleanup();
+ }
+
+ /// Set up the last cleaup that was pushed as a conditional
+ /// full-expression cleanup.
+ void initFullExprCleanup();
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object destructor of an object of the given type at the
+ /// given address. Does nothing if T is not a C++ class type with a
+ /// non-trivial destructor.
+ void PushDestructorCleanup(QualType T, llvm::Value *Addr);
+
+ /// PushDestructorCleanup - Push a cleanup to call the
+ /// complete-object variant of the given destructor on the object at
+ /// the given address.
+ void PushDestructorCleanup(const CXXDestructorDecl *Dtor,
+ llvm::Value *Addr);
+
+ /// PopCleanupBlock - Will pop the cleanup entry on the stack and
+ /// process all branch fixups.
+ void PopCleanupBlock(bool FallThroughIsBranchThrough = false);
+
+ /// DeactivateCleanupBlock - Deactivates the given cleanup block.
+ /// The block cannot be reactivated. Pops it if it's the top of the
+ /// stack.
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void DeactivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
+
+ /// ActivateCleanupBlock - Activates an initially-inactive cleanup.
+ /// Cannot be used to resurrect a deactivated cleanup.
+ ///
+ /// \param DominatingIP - An instruction which is known to
+ /// dominate the current IP (if set) and which lies along
+ /// all paths of execution between the current IP and the
+ /// the point at which the cleanup comes into scope.
+ void ActivateCleanupBlock(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *DominatingIP);
+
+ /// \brief Enters a new scope for capturing cleanups, all of which
+ /// will be executed once the scope is exited.
+ class RunCleanupsScope {
+ EHScopeStack::stable_iterator CleanupStackDepth;
+ bool OldDidCallStackSave;
+ bool PerformCleanup;
+
+ RunCleanupsScope(const RunCleanupsScope &); // DO NOT IMPLEMENT
+ RunCleanupsScope &operator=(const RunCleanupsScope &); // DO NOT IMPLEMENT
+
+ protected:
+ CodeGenFunction& CGF;
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit RunCleanupsScope(CodeGenFunction &CGF)
+ : PerformCleanup(true), CGF(CGF)
+ {
+ CleanupStackDepth = CGF.EHStack.stable_begin();
+ OldDidCallStackSave = CGF.DidCallStackSave;
+ CGF.DidCallStackSave = false;
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~RunCleanupsScope() {
+ if (PerformCleanup) {
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.PopCleanupBlocks(CleanupStackDepth);
+ }
+ }
+
+ /// \brief Determine whether this scope requires any cleanups.
+ bool requiresCleanups() const {
+ return CGF.EHStack.stable_begin() != CleanupStackDepth;
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ assert(PerformCleanup && "Already forced cleanup");
+ CGF.DidCallStackSave = OldDidCallStackSave;
+ CGF.PopCleanupBlocks(CleanupStackDepth);
+ PerformCleanup = false;
+ }
+ };
+
+ class LexicalScope: protected RunCleanupsScope {
+ SourceRange Range;
+ bool PopDebugStack;
+
+ LexicalScope(const LexicalScope &); // DO NOT IMPLEMENT THESE
+ LexicalScope &operator=(const LexicalScope &);
+
+ public:
+ /// \brief Enter a new cleanup scope.
+ explicit LexicalScope(CodeGenFunction &CGF, SourceRange Range)
+ : RunCleanupsScope(CGF), Range(Range), PopDebugStack(true) {
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitLexicalBlockStart(CGF.Builder, Range.getBegin());
+ }
+
+ /// \brief Exit this cleanup scope, emitting any accumulated
+ /// cleanups.
+ ~LexicalScope() {
+ if (PopDebugStack) {
+ CGDebugInfo *DI = CGF.getDebugInfo();
+ if (DI) DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ }
+ }
+
+ /// \brief Force the emission of cleanups now, instead of waiting
+ /// until this object is destroyed.
+ void ForceCleanup() {
+ RunCleanupsScope::ForceCleanup();
+ if (CGDebugInfo *DI = CGF.getDebugInfo()) {
+ DI->EmitLexicalBlockEnd(CGF.Builder, Range.getEnd());
+ PopDebugStack = false;
+ }
+ }
+ };
+
+
+ /// PopCleanupBlocks - Takes the old cleanup stack size and emits
+ /// the cleanup blocks that have been added.
+ void PopCleanupBlocks(EHScopeStack::stable_iterator OldCleanupStackSize);
+
+ void ResolveBranchFixups(llvm::BasicBlock *Target);
+
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(llvm::BasicBlock *Target) {
+ return JumpDest(Target,
+ EHStack.getInnermostNormalCleanup(),
+ NextCleanupDestIndex++);
+ }
+
+ /// The given basic block lies in the current EH scope, but may be a
+ /// target of a potentially scope-crossing jump; get a stable handle
+ /// to which we can perform this jump later.
+ JumpDest getJumpDestInCurrentScope(StringRef Name = StringRef()) {
+ return getJumpDestInCurrentScope(createBasicBlock(Name));
+ }
+
+ /// EmitBranchThroughCleanup - Emit a branch from the current insert
+ /// block through the normal cleanup handling code (if any) and then
+ /// on to \arg Dest.
+ void EmitBranchThroughCleanup(JumpDest Dest);
+
+ /// isObviouslyBranchWithoutCleanups - Return true if a branch to the
+ /// specified destination obviously has no cleanups to run. 'false' is always
+ /// a conservatively correct answer for this method.
+ bool isObviouslyBranchWithoutCleanups(JumpDest Dest) const;
+
+ /// popCatchScope - Pops the catch scope at the top of the EHScope
+ /// stack, emitting any required code (other than the catch handlers
+ /// themselves).
+ void popCatchScope();
+
+ llvm::BasicBlock *getEHResumeBlock();
+ llvm::BasicBlock *getEHDispatchBlock(EHScopeStack::stable_iterator scope);
+
+ /// An object to manage conditionally-evaluated expressions.
+ class ConditionalEvaluation {
+ llvm::BasicBlock *StartBB;
+
+ public:
+ ConditionalEvaluation(CodeGenFunction &CGF)
+ : StartBB(CGF.Builder.GetInsertBlock()) {}
+
+ void begin(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != this);
+ if (!CGF.OutermostConditional)
+ CGF.OutermostConditional = this;
+ }
+
+ void end(CodeGenFunction &CGF) {
+ assert(CGF.OutermostConditional != 0);
+ if (CGF.OutermostConditional == this)
+ CGF.OutermostConditional = 0;
+ }
+
+ /// Returns a block which will be executed prior to each
+ /// evaluation of the conditional code.
+ llvm::BasicBlock *getStartingBlock() const {
+ return StartBB;
+ }
+ };
+
+ /// isInConditionalBranch - Return true if we're currently emitting
+ /// one branch or the other of a conditional expression.
+ bool isInConditionalBranch() const { return OutermostConditional != 0; }
+
+ void setBeforeOutermostConditional(llvm::Value *value, llvm::Value *addr) {
+ assert(isInConditionalBranch());
+ llvm::BasicBlock *block = OutermostConditional->getStartingBlock();
+ new llvm::StoreInst(value, addr, &block->back());
+ }
+
+ /// An RAII object to record that we're evaluating a statement
+ /// expression.
+ class StmtExprEvaluation {
+ CodeGenFunction &CGF;
+
+ /// We have to save the outermost conditional: cleanups in a
+ /// statement expression aren't conditional just because the
+ /// StmtExpr is.
+ ConditionalEvaluation *SavedOutermostConditional;
+
+ public:
+ StmtExprEvaluation(CodeGenFunction &CGF)
+ : CGF(CGF), SavedOutermostConditional(CGF.OutermostConditional) {
+ CGF.OutermostConditional = 0;
+ }
+
+ ~StmtExprEvaluation() {
+ CGF.OutermostConditional = SavedOutermostConditional;
+ CGF.EnsureInsertPoint();
+ }
+ };
+
+ /// An object which temporarily prevents a value from being
+ /// destroyed by aggressive peephole optimizations that assume that
+ /// all uses of a value have been realized in the IR.
+ class PeepholeProtection {
+ llvm::Instruction *Inst;
+ friend class CodeGenFunction;
+
+ public:
+ PeepholeProtection() : Inst(0) {}
+ };
+
+ /// A non-RAII class containing all the information about a bound
+ /// opaque value. OpaqueValueMapping, below, is a RAII wrapper for
+ /// this which makes individual mappings very simple; using this
+ /// class directly is useful when you have a variable number of
+ /// opaque values or don't want the RAII functionality for some
+ /// reason.
+ class OpaqueValueMappingData {
+ const OpaqueValueExpr *OpaqueValue;
+ bool BoundLValue;
+ CodeGenFunction::PeepholeProtection Protection;
+
+ OpaqueValueMappingData(const OpaqueValueExpr *ov,
+ bool boundLValue)
+ : OpaqueValue(ov), BoundLValue(boundLValue) {}
+ public:
+ OpaqueValueMappingData() : OpaqueValue(0) {}
+
+ static bool shouldBindAsLValue(const Expr *expr) {
+ // gl-values should be bound as l-values for obvious reasons.
+ // Records should be bound as l-values because IR generation
+ // always keeps them in memory. Expressions of function type
+ // act exactly like l-values but are formally required to be
+ // r-values in C.
+ return expr->isGLValue() ||
+ expr->getType()->isRecordType() ||
+ expr->getType()->isFunctionType();
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const Expr *e) {
+ if (shouldBindAsLValue(ov))
+ return bind(CGF, ov, CGF.EmitLValue(e));
+ return bind(CGF, ov, CGF.EmitAnyExpr(e));
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const LValue &lv) {
+ assert(shouldBindAsLValue(ov));
+ CGF.OpaqueLValues.insert(std::make_pair(ov, lv));
+ return OpaqueValueMappingData(ov, true);
+ }
+
+ static OpaqueValueMappingData bind(CodeGenFunction &CGF,
+ const OpaqueValueExpr *ov,
+ const RValue &rv) {
+ assert(!shouldBindAsLValue(ov));
+ CGF.OpaqueRValues.insert(std::make_pair(ov, rv));
+
+ OpaqueValueMappingData data(ov, false);
+
+ // Work around an extremely aggressive peephole optimization in
+ // EmitScalarConversion which assumes that all other uses of a
+ // value are extant.
+ data.Protection = CGF.protectFromPeepholes(rv);
+
+ return data;
+ }
+
+ bool isValid() const { return OpaqueValue != 0; }
+ void clear() { OpaqueValue = 0; }
+
+ void unbind(CodeGenFunction &CGF) {
+ assert(OpaqueValue && "no data to unbind!");
+
+ if (BoundLValue) {
+ CGF.OpaqueLValues.erase(OpaqueValue);
+ } else {
+ CGF.OpaqueRValues.erase(OpaqueValue);
+ CGF.unprotectFromPeepholes(Protection);
+ }
+ }
+ };
+
+ /// An RAII object to set (and then clear) a mapping for an OpaqueValueExpr.
+ class OpaqueValueMapping {
+ CodeGenFunction &CGF;
+ OpaqueValueMappingData Data;
+
+ public:
+ static bool shouldBindAsLValue(const Expr *expr) {
+ return OpaqueValueMappingData::shouldBindAsLValue(expr);
+ }
+
+ /// Build the opaque value mapping for the given conditional
+ /// operator if it's the GNU ?: extension. This is a common
+ /// enough pattern that the convenience operator is really
+ /// helpful.
+ ///
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const AbstractConditionalOperator *op) : CGF(CGF) {
+ if (isa<ConditionalOperator>(op))
+ // Leave Data empty.
+ return;
+
+ const BinaryConditionalOperator *e = cast<BinaryConditionalOperator>(op);
+ Data = OpaqueValueMappingData::bind(CGF, e->getOpaqueValue(),
+ e->getCommon());
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ LValue lvalue)
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, lvalue)) {
+ }
+
+ OpaqueValueMapping(CodeGenFunction &CGF,
+ const OpaqueValueExpr *opaqueValue,
+ RValue rvalue)
+ : CGF(CGF), Data(OpaqueValueMappingData::bind(CGF, opaqueValue, rvalue)) {
+ }
+
+ void pop() {
+ Data.unbind(CGF);
+ Data.clear();
+ }
+
+ ~OpaqueValueMapping() {
+ if (Data.isValid()) Data.unbind(CGF);
+ }
+ };
+
+ /// getByrefValueFieldNumber - Given a declaration, returns the LLVM field
+ /// number that holds the value.
+ unsigned getByRefValueLLVMField(const ValueDecl *VD) const;
+
+ /// BuildBlockByrefAddress - Computes address location of the
+ /// variable which is declared as __block.
+ llvm::Value *BuildBlockByrefAddress(llvm::Value *BaseAddr,
+ const VarDecl *V);
+private:
+ CGDebugInfo *DebugInfo;
+ bool DisableDebugInfo;
+
+ /// DidCallStackSave - Whether llvm.stacksave has been called. Used to avoid
+ /// calling llvm.stacksave for multiple VLAs in the same scope.
+ bool DidCallStackSave;
+
+ /// IndirectBranch - The first time an indirect goto is seen we create a block
+ /// with an indirect branch. Every time we see the address of a label taken,
+ /// we add the label to the indirect goto. Every subsequent indirect goto is
+ /// codegen'd as a jump to the IndirectBranch's basic block.
+ llvm::IndirectBrInst *IndirectBranch;
+
+ /// LocalDeclMap - This keeps track of the LLVM allocas or globals for local C
+ /// decls.
+ typedef llvm::DenseMap<const Decl*, llvm::Value*> DeclMapTy;
+ DeclMapTy LocalDeclMap;
+
+ /// LabelMap - This keeps track of the LLVM basic block for each C label.
+ llvm::DenseMap<const LabelDecl*, JumpDest> LabelMap;
+
+ // BreakContinueStack - This keeps track of where break and continue
+ // statements should jump to.
+ struct BreakContinue {
+ BreakContinue(JumpDest Break, JumpDest Continue)
+ : BreakBlock(Break), ContinueBlock(Continue) {}
+
+ JumpDest BreakBlock;
+ JumpDest ContinueBlock;
+ };
+ SmallVector<BreakContinue, 8> BreakContinueStack;
+
+ /// SwitchInsn - This is nearest current switch instruction. It is null if
+ /// current context is not in a switch.
+ llvm::SwitchInst *SwitchInsn;
+
+ /// CaseRangeBlock - This block holds if condition check for last case
+ /// statement range in current switch instruction.
+ llvm::BasicBlock *CaseRangeBlock;
+
+ /// OpaqueLValues - Keeps track of the current set of opaque value
+ /// expressions.
+ llvm::DenseMap<const OpaqueValueExpr *, LValue> OpaqueLValues;
+ llvm::DenseMap<const OpaqueValueExpr *, RValue> OpaqueRValues;
+
+ // VLASizeMap - This keeps track of the associated size for each VLA type.
+ // We track this by the size expression rather than the type itself because
+ // in certain situations, like a const qualifier applied to an VLA typedef,
+ // multiple VLA types can share the same size expression.
+ // FIXME: Maybe this could be a stack of maps that is pushed/popped as we
+ // enter/leave scopes.
+ llvm::DenseMap<const Expr*, llvm::Value*> VLASizeMap;
+
+ /// A block containing a single 'unreachable' instruction. Created
+ /// lazily by getUnreachableBlock().
+ llvm::BasicBlock *UnreachableBlock;
+
+ /// CXXThisDecl - When generating code for a C++ member function,
+ /// this will hold the implicit 'this' declaration.
+ ImplicitParamDecl *CXXABIThisDecl;
+ llvm::Value *CXXABIThisValue;
+ llvm::Value *CXXThisValue;
+
+ /// CXXVTTDecl - When generating code for a base object constructor or
+ /// base object destructor with virtual bases, this will hold the implicit
+ /// VTT parameter.
+ ImplicitParamDecl *CXXVTTDecl;
+ llvm::Value *CXXVTTValue;
+
+ /// OutermostConditional - Points to the outermost active
+ /// conditional control. This is used so that we know if a
+ /// temporary should be destroyed conditionally.
+ ConditionalEvaluation *OutermostConditional;
+
+
+ /// ByrefValueInfoMap - For each __block variable, contains a pair of the LLVM
+ /// type as well as the field number that contains the actual data.
+ llvm::DenseMap<const ValueDecl *, std::pair<llvm::Type *,
+ unsigned> > ByRefValueInfo;
+
+ llvm::BasicBlock *TerminateLandingPad;
+ llvm::BasicBlock *TerminateHandler;
+ llvm::BasicBlock *TrapBB;
+
+public:
+ CodeGenFunction(CodeGenModule &cgm);
+ ~CodeGenFunction();
+
+ CodeGenTypes &getTypes() const { return CGM.getTypes(); }
+ ASTContext &getContext() const { return CGM.getContext(); }
+ CGDebugInfo *getDebugInfo() {
+ if (DisableDebugInfo)
+ return NULL;
+ return DebugInfo;
+ }
+ void disableDebugInfo() { DisableDebugInfo = true; }
+ void enableDebugInfo() { DisableDebugInfo = false; }
+
+ bool shouldUseFusedARCCalls() {
+ return CGM.getCodeGenOpts().OptimizationLevel == 0;
+ }
+
+ const LangOptions &getLangOpts() const { return CGM.getLangOpts(); }
+
+ /// Returns a pointer to the function's exception object and selector slot,
+ /// which is assigned in every landing pad.
+ llvm::Value *getExceptionSlot();
+ llvm::Value *getEHSelectorSlot();
+
+ /// Returns the contents of the function's exception object and selector
+ /// slots.
+ llvm::Value *getExceptionFromSlot();
+ llvm::Value *getSelectorFromSlot();
+
+ llvm::Value *getNormalCleanupDestSlot();
+
+ llvm::BasicBlock *getUnreachableBlock() {
+ if (!UnreachableBlock) {
+ UnreachableBlock = createBasicBlock("unreachable");
+ new llvm::UnreachableInst(getLLVMContext(), UnreachableBlock);
+ }
+ return UnreachableBlock;
+ }
+
+ llvm::BasicBlock *getInvokeDest() {
+ if (!EHStack.requiresLandingPad()) return 0;
+ return getInvokeDestImpl();
+ }
+
+ llvm::LLVMContext &getLLVMContext() { return CGM.getLLVMContext(); }
+
+ //===--------------------------------------------------------------------===//
+ // Cleanups
+ //===--------------------------------------------------------------------===//
+
+ typedef void Destroyer(CodeGenFunction &CGF, llvm::Value *addr, QualType ty);
+
+ void pushIrregularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEndPointer,
+ QualType elementType,
+ Destroyer *destroyer);
+ void pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
+ llvm::Value *arrayEnd,
+ QualType elementType,
+ Destroyer *destroyer);
+
+ void pushDestroy(QualType::DestructionKind dtorKind,
+ llvm::Value *addr, QualType type);
+ void pushDestroy(CleanupKind kind, llvm::Value *addr, QualType type,
+ Destroyer *destroyer, bool useEHCleanupForArray);
+ void emitDestroy(llvm::Value *addr, QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray);
+ llvm::Function *generateDestroyHelper(llvm::Constant *addr,
+ QualType type,
+ Destroyer *destroyer,
+ bool useEHCleanupForArray);
+ void emitArrayDestroy(llvm::Value *begin, llvm::Value *end,
+ QualType type, Destroyer *destroyer,
+ bool checkZeroLength, bool useEHCleanup);
+
+ Destroyer *getDestroyer(QualType::DestructionKind destructionKind);
+
+ /// Determines whether an EH cleanup is required to destroy a type
+ /// with the given destruction kind.
+ bool needsEHCleanup(QualType::DestructionKind kind) {
+ switch (kind) {
+ case QualType::DK_none:
+ return false;
+ case QualType::DK_cxx_destructor:
+ case QualType::DK_objc_weak_lifetime:
+ return getLangOpts().Exceptions;
+ case QualType::DK_objc_strong_lifetime:
+ return getLangOpts().Exceptions &&
+ CGM.getCodeGenOpts().ObjCAutoRefCountExceptions;
+ }
+ llvm_unreachable("bad destruction kind");
+ }
+
+ CleanupKind getCleanupKind(QualType::DestructionKind kind) {
+ return (needsEHCleanup(kind) ? NormalAndEHCleanup : NormalCleanup);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Objective-C
+ //===--------------------------------------------------------------------===//
+
+ void GenerateObjCMethod(const ObjCMethodDecl *OMD);
+
+ void StartObjCMethod(const ObjCMethodDecl *MD,
+ const ObjCContainerDecl *CD,
+ SourceLocation StartLoc);
+
+ /// GenerateObjCGetter - Synthesize an Objective-C property getter function.
+ void GenerateObjCGetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ void generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
+
+ void GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
+ ObjCMethodDecl *MD, bool ctor);
+
+ /// GenerateObjCSetter - Synthesize an Objective-C property setter function
+ /// for the given property.
+ void GenerateObjCSetter(ObjCImplementationDecl *IMP,
+ const ObjCPropertyImplDecl *PID);
+ void generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
+ const ObjCPropertyImplDecl *propImpl,
+ llvm::Constant *AtomicHelperFn);
+ bool IndirectObjCSetterArg(const CGFunctionInfo &FI);
+ bool IvarTypeWithAggrGCObjects(QualType Ty);
+
+ //===--------------------------------------------------------------------===//
+ // Block Bits
+ //===--------------------------------------------------------------------===//
+
+ llvm::Value *EmitBlockLiteral(const BlockExpr *);
+ llvm::Value *EmitBlockLiteral(const CGBlockInfo &Info);
+ static void destroyBlockInfos(CGBlockInfo *info);
+ llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
+ const CGBlockInfo &Info,
+ llvm::StructType *,
+ llvm::Constant *BlockVarLayout);
+
+ llvm::Function *GenerateBlockFunction(GlobalDecl GD,
+ const CGBlockInfo &Info,
+ const Decl *OuterFuncDecl,
+ const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock);
+
+ llvm::Constant *GenerateCopyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo);
+ llvm::Constant *GenerateObjCAtomicSetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Constant *GenerateObjCAtomicGetterCopyHelperFunction(
+ const ObjCPropertyImplDecl *PID);
+ llvm::Value *EmitBlockCopyAndAutorelease(llvm::Value *Block, QualType Ty);
+
+ void BuildBlockRelease(llvm::Value *DeclPtr, BlockFieldFlags flags);
+
+ class AutoVarEmission;
+
+ void emitByrefStructureInit(const AutoVarEmission &emission);
+ void enterByrefCleanup(const AutoVarEmission &emission);
+
+ llvm::Value *LoadBlockStruct() {
+ assert(BlockPointer && "no block pointer set!");
+ return BlockPointer;
+ }
+
+ void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
+ void AllocateBlockDecl(const DeclRefExpr *E);
+ llvm::Value *GetAddrOfBlockDecl(const VarDecl *var, bool ByRef);
+ llvm::Type *BuildByRefType(const VarDecl *var);
+
+ void GenerateCode(GlobalDecl GD, llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo);
+ void StartFunction(GlobalDecl GD, QualType RetTy,
+ llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ const FunctionArgList &Args,
+ SourceLocation StartLoc);
+
+ void EmitConstructorBody(FunctionArgList &Args);
+ void EmitDestructorBody(FunctionArgList &Args);
+ void EmitFunctionBody(FunctionArgList &Args);
+
+ void EmitForwardingCallToLambda(const CXXRecordDecl *Lambda,
+ CallArgList &CallArgs);
+ void EmitLambdaToBlockPointerBody(FunctionArgList &Args);
+ void EmitLambdaBlockInvokeBody();
+ void EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD);
+ void EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD);
+
+ /// EmitReturnBlock - Emit the unified return block, trying to avoid its
+ /// emission when possible.
+ void EmitReturnBlock();
+
+ /// FinishFunction - Complete IR generation of the current function. It is
+ /// legal to call this function even if there is no current insertion point.
+ void FinishFunction(SourceLocation EndLoc=SourceLocation());
+
+ /// GenerateThunk - Generate a thunk for the given method.
+ void GenerateThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk);
+
+ void GenerateVarArgsThunk(llvm::Function *Fn, const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk);
+
+ void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
+ FunctionArgList &Args);
+
+ void EmitInitializerForField(FieldDecl *Field, LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes);
+
+ /// InitializeVTablePointer - Initialize the vtable pointer of the given
+ /// subobject.
+ ///
+ void InitializeVTablePointer(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+ void InitializeVTablePointers(BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase,
+ CharUnits OffsetFromNearestVBase,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases);
+
+ void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
+
+ /// GetVTablePtr - Return the Value of the vtable pointer member pointed
+ /// to by This.
+ llvm::Value *GetVTablePtr(llvm::Value *This, llvm::Type *Ty);
+
+ /// EnterDtorCleanups - Enter the cleanups necessary to complete the
+ /// given phase of destruction for a destructor. The end result
+ /// should call destructors on members and base classes in reverse
+ /// order of their construction.
+ void EnterDtorCleanups(const CXXDestructorDecl *Dtor, CXXDtorType Type);
+
+ /// ShouldInstrumentFunction - Return true if the current function should be
+ /// instrumented with __cyg_profile_func_* calls
+ bool ShouldInstrumentFunction();
+
+ /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
+ /// instrumentation function with the current function and the call site, if
+ /// function instrumentation is enabled.
+ void EmitFunctionInstrumentation(const char *Fn);
+
+ /// EmitMCountInstrumentation - Emit call to .mcount.
+ void EmitMCountInstrumentation();
+
+ /// EmitFunctionProlog - Emit the target specific LLVM code to load the
+ /// arguments for the given function. This is also responsible for naming the
+ /// LLVM function arguments.
+ void EmitFunctionProlog(const CGFunctionInfo &FI,
+ llvm::Function *Fn,
+ const FunctionArgList &Args);
+
+ /// EmitFunctionEpilog - Emit the target specific LLVM code to return the
+ /// given temporary.
+ void EmitFunctionEpilog(const CGFunctionInfo &FI);
+
+ /// EmitStartEHSpec - Emit the start of the exception spec.
+ void EmitStartEHSpec(const Decl *D);
+
+ /// EmitEndEHSpec - Emit the end of the exception spec.
+ void EmitEndEHSpec(const Decl *D);
+
+ /// getTerminateLandingPad - Return a landing pad that just calls terminate.
+ llvm::BasicBlock *getTerminateLandingPad();
+
+ /// getTerminateHandler - Return a handler (not a landing pad, just
+ /// a catch handler) that just calls terminate. This is used when
+ /// a terminate scope encloses a try.
+ llvm::BasicBlock *getTerminateHandler();
+
+ llvm::Type *ConvertTypeForMem(QualType T);
+ llvm::Type *ConvertType(QualType T);
+ llvm::Type *ConvertType(const TypeDecl *T) {
+ return ConvertType(getContext().getTypeDeclType(T));
+ }
+
+ /// LoadObjCSelf - Load the value of self. This function is only valid while
+ /// generating code for an Objective-C method.
+ llvm::Value *LoadObjCSelf();
+
+ /// TypeOfSelfObject - Return type of object that this self represents.
+ QualType TypeOfSelfObject();
+
+ /// hasAggregateLLVMType - Return true if the specified AST type will map into
+ /// an aggregate LLVM type or is void.
+ static bool hasAggregateLLVMType(QualType T);
+
+ /// createBasicBlock - Create an LLVM basic block.
+ llvm::BasicBlock *createBasicBlock(StringRef name = "",
+ llvm::Function *parent = 0,
+ llvm::BasicBlock *before = 0) {
+#ifdef NDEBUG
+ return llvm::BasicBlock::Create(getLLVMContext(), "", parent, before);
+#else
+ return llvm::BasicBlock::Create(getLLVMContext(), name, parent, before);
+#endif
+ }
+
+ /// getBasicBlockForLabel - Return the LLVM basicblock that the specified
+ /// label maps to.
+ JumpDest getJumpDestForLabel(const LabelDecl *S);
+
+ /// SimplifyForwardingBlocks - If the given basic block is only a branch to
+ /// another basic block, simplify it. This assumes that no other code could
+ /// potentially reference the basic block.
+ void SimplifyForwardingBlocks(llvm::BasicBlock *BB);
+
+ /// EmitBlock - Emit the given block \arg BB and set it as the insert point,
+ /// adding a fall-through branch from the current insert block if
+ /// necessary. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// IsFinished - If true, indicates that the caller has finished emitting
+ /// branches to the given block and does not expect to emit code into it. This
+ /// means the block can be ignored if it is unreachable.
+ void EmitBlock(llvm::BasicBlock *BB, bool IsFinished=false);
+
+ /// EmitBlockAfterUses - Emit the given block somewhere hopefully
+ /// near its uses, and leave the insertion point in it.
+ void EmitBlockAfterUses(llvm::BasicBlock *BB);
+
+ /// EmitBranch - Emit a branch to the specified basic block from the current
+ /// insert block, taking care to avoid creation of branches from dummy
+ /// blocks. It is legal to call this function even if there is no current
+ /// insertion point.
+ ///
+ /// This function clears the current insertion point. The caller should follow
+ /// calls to this function with calls to Emit*Block prior to generation new
+ /// code.
+ void EmitBranch(llvm::BasicBlock *Block);
+
+ /// HaveInsertPoint - True if an insertion point is defined. If not, this
+ /// indicates that the current code being emitted is unreachable.
+ bool HaveInsertPoint() const {
+ return Builder.GetInsertBlock() != 0;
+ }
+
+ /// EnsureInsertPoint - Ensure that an insertion point is defined so that
+ /// emitted IR has a place to go. Note that by definition, if this function
+ /// creates a block then that block is unreachable; callers may do better to
+ /// detect when no insertion point is defined and simply skip IR generation.
+ void EnsureInsertPoint() {
+ if (!HaveInsertPoint())
+ EmitBlock(createBasicBlock());
+ }
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ //===--------------------------------------------------------------------===//
+ // Helpers
+ //===--------------------------------------------------------------------===//
+
+ LValue MakeAddrLValue(llvm::Value *V, QualType T,
+ CharUnits Alignment = CharUnits()) {
+ return LValue::MakeAddr(V, T, Alignment, getContext(),
+ CGM.getTBAAInfo(T));
+ }
+ LValue MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
+ CharUnits Alignment;
+ if (!T->isIncompleteType())
+ Alignment = getContext().getTypeAlignInChars(T);
+ return LValue::MakeAddr(V, T, Alignment, getContext(),
+ CGM.getTBAAInfo(T));
+ }
+
+ /// CreateTempAlloca - This creates a alloca and inserts it into the entry
+ /// block. The caller is responsible for setting an appropriate alignment on
+ /// the alloca.
+ llvm::AllocaInst *CreateTempAlloca(llvm::Type *Ty,
+ const Twine &Name = "tmp");
+
+ /// InitTempAlloca - Provide an initial value for the given alloca.
+ void InitTempAlloca(llvm::AllocaInst *Alloca, llvm::Value *Value);
+
+ /// CreateIRTemp - Create a temporary IR object of the given type, with
+ /// appropriate alignment. This routine should only be used when an temporary
+ /// value needs to be stored into an alloca (for example, to avoid explicit
+ /// PHI construction), but the type is the IR type, not the type appropriate
+ /// for storing in memory.
+ llvm::AllocaInst *CreateIRTemp(QualType T, const Twine &Name = "tmp");
+
+ /// CreateMemTemp - Create a temporary memory object of the given type, with
+ /// appropriate alignment.
+ llvm::AllocaInst *CreateMemTemp(QualType T, const Twine &Name = "tmp");
+
+ /// CreateAggTemp - Create a temporary memory object for the given
+ /// aggregate type.
+ AggValueSlot CreateAggTemp(QualType T, const Twine &Name = "tmp") {
+ CharUnits Alignment = getContext().getTypeAlignInChars(T);
+ return AggValueSlot::forAddr(CreateMemTemp(T, Name), Alignment,
+ T.getQualifiers(),
+ AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ }
+
+ /// Emit a cast to void* in the appropriate address space.
+ llvm::Value *EmitCastToVoidPtr(llvm::Value *value);
+
+ /// EvaluateExprAsBool - Perform the usual unary conversions on the specified
+ /// expression and compare the result against zero, returning an Int1Ty value.
+ llvm::Value *EvaluateExprAsBool(const Expr *E);
+
+ /// EmitIgnoredExpr - Emit an expression in a context which ignores the result.
+ void EmitIgnoredExpr(const Expr *E);
+
+ /// EmitAnyExpr - Emit code to compute the specified expression which can have
+ /// any type. The result is returned as an RValue struct. If this is an
+ /// aggregate expression, the aggloc/agglocvolatile arguments indicate where
+ /// the result should be returned.
+ ///
+ /// \param IgnoreResult - True if the resulting value isn't used.
+ RValue EmitAnyExpr(const Expr *E,
+ AggValueSlot AggSlot = AggValueSlot::ignored(),
+ bool IgnoreResult = false);
+
+ // EmitVAListRef - Emit a "reference" to a va_list; this is either the address
+ // or the value of the expression, depending on how va_list is defined.
+ llvm::Value *EmitVAListRef(const Expr *E);
+
+ /// EmitAnyExprToTemp - Similary to EmitAnyExpr(), however, the result will
+ /// always be accessible even if no aggregate location is provided.
+ RValue EmitAnyExprToTemp(const Expr *E);
+
+ /// EmitAnyExprToMem - Emits the code necessary to evaluate an
+ /// arbitrary expression into the given memory location.
+ void EmitAnyExprToMem(const Expr *E, llvm::Value *Location,
+ Qualifiers Quals, bool IsInitializer);
+
+ /// EmitExprAsInit - Emits the code necessary to initialize a
+ /// location in memory with the given initializer.
+ void EmitExprAsInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit);
+
+ /// EmitAggregateCopy - Emit an aggrate copy.
+ ///
+ /// \param isVolatile - True iff either the source or the destination is
+ /// volatile.
+ void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType EltTy, bool isVolatile=false,
+ unsigned Alignment = 0);
+
+ /// StartBlock - Start new block named N. If insert block is a dummy block
+ /// then reuse it.
+ void StartBlock(const char *N);
+
+ /// GetAddrOfStaticLocalVar - Return the address of a static local variable.
+ llvm::Constant *GetAddrOfStaticLocalVar(const VarDecl *BVD) {
+ return cast<llvm::Constant>(GetAddrOfLocalVar(BVD));
+ }
+
+ /// GetAddrOfLocalVar - Return the address of a local variable.
+ llvm::Value *GetAddrOfLocalVar(const VarDecl *VD) {
+ llvm::Value *Res = LocalDeclMap[VD];
+ assert(Res && "Invalid argument to GetAddrOfLocalVar(), no decl!");
+ return Res;
+ }
+
+ /// getOpaqueLValueMapping - Given an opaque value expression (which
+ /// must be mapped to an l-value), return its mapping.
+ const LValue &getOpaqueLValueMapping(const OpaqueValueExpr *e) {
+ assert(OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,LValue>::iterator
+ it = OpaqueLValues.find(e);
+ assert(it != OpaqueLValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
+ /// getOpaqueRValueMapping - Given an opaque value expression (which
+ /// must be mapped to an r-value), return its mapping.
+ const RValue &getOpaqueRValueMapping(const OpaqueValueExpr *e) {
+ assert(!OpaqueValueMapping::shouldBindAsLValue(e));
+
+ llvm::DenseMap<const OpaqueValueExpr*,RValue>::iterator
+ it = OpaqueRValues.find(e);
+ assert(it != OpaqueRValues.end() && "no mapping for opaque value!");
+ return it->second;
+ }
+
+ /// getAccessedFieldNo - Given an encoded value and a result number, return
+ /// the input field number being accessed.
+ static unsigned getAccessedFieldNo(unsigned Idx, const llvm::Constant *Elts);
+
+ llvm::BlockAddress *GetAddrOfLabel(const LabelDecl *L);
+ llvm::BasicBlock *GetIndirectGotoBlock();
+
+ /// EmitNullInitialization - Generate code to set a value of the given type to
+ /// null, If the type contains data member pointers, they will be initialized
+ /// to -1 in accordance with the Itanium C++ ABI.
+ void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
+
+ // EmitVAArg - Generate code to get an argument from the passed in pointer
+ // and update it accordingly. The return value is a pointer to the argument.
+ // FIXME: We should be able to get rid of this method and use the va_arg
+ // instruction in LLVM instead once it works well enough.
+ llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty);
+
+ /// emitArrayLength - Compute the length of an array, even if it's a
+ /// VLA, and drill down to the base element type.
+ llvm::Value *emitArrayLength(const ArrayType *arrayType,
+ QualType &baseType,
+ llvm::Value *&addr);
+
+ /// EmitVLASize - Capture all the sizes for the VLA expressions in
+ /// the given variably-modified type and store them in the VLASizeMap.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitVariablyModifiedType(QualType Ty);
+
+ /// getVLASize - Returns an LLVM value that corresponds to the size,
+ /// in non-variably-sized elements, of a variable length array type,
+ /// plus that largest non-variably-sized element type. Assumes that
+ /// the type has already been emitted with EmitVariablyModifiedType.
+ std::pair<llvm::Value*,QualType> getVLASize(const VariableArrayType *vla);
+ std::pair<llvm::Value*,QualType> getVLASize(QualType vla);
+
+ /// LoadCXXThis - Load the value of 'this'. This function is only valid while
+ /// generating code for an C++ member function.
+ llvm::Value *LoadCXXThis() {
+ assert(CXXThisValue && "no 'this' value for this function");
+ return CXXThisValue;
+ }
+
+ /// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
+ /// virtual bases.
+ llvm::Value *LoadCXXVTT() {
+ assert(CXXVTTValue && "no VTT value for this function");
+ return CXXVTTValue;
+ }
+
+ /// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
+ /// complete class to the given direct base.
+ llvm::Value *
+ GetAddressOfDirectBaseInCompleteClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Base,
+ bool BaseIsVirtual);
+
+ /// GetAddressOfBaseClass - This function will add the necessary delta to the
+ /// load of 'this' and returns address of the base class.
+ llvm::Value *GetAddressOfBaseClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
+
+ llvm::Value *GetAddressOfDerivedClass(llvm::Value *Value,
+ const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd,
+ bool NullCheckValue);
+
+ llvm::Value *GetVirtualBaseClassOffset(llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl);
+
+ void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args);
+ // It's important not to confuse this and the previous function. Delegating
+ // constructors are the C++0x feature. The constructor delegate optimization
+ // is used to reduce duplication in the base and complete consturctors where
+ // they are substantially the same.
+ void EmitDelegatingCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ const FunctionArgList &Args);
+ void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
+ bool ForVirtualBase, llvm::Value *This,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
+ llvm::Value *This, llvm::Value *Src,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ const ConstantArrayType *ArrayTy,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization = false);
+
+ void EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
+ llvm::Value *NumElements,
+ llvm::Value *ArrayPtr,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ bool ZeroInitialization = false);
+
+ static Destroyer destroyCXXObject;
+
+ void EmitCXXDestructorCall(const CXXDestructorDecl *D, CXXDtorType Type,
+ bool ForVirtualBase, llvm::Value *This);
+
+ void EmitNewArrayInitializer(const CXXNewExpr *E, QualType elementType,
+ llvm::Value *NewPtr, llvm::Value *NumElements);
+
+ void EmitCXXTemporary(const CXXTemporary *Temporary, QualType TempType,
+ llvm::Value *Ptr);
+
+ llvm::Value *EmitCXXNewExpr(const CXXNewExpr *E);
+ void EmitCXXDeleteExpr(const CXXDeleteExpr *E);
+
+ void EmitDeleteCall(const FunctionDecl *DeleteFD, llvm::Value *Ptr,
+ QualType DeleteTy);
+
+ llvm::Value* EmitCXXTypeidExpr(const CXXTypeidExpr *E);
+ llvm::Value *EmitDynamicCast(llvm::Value *V, const CXXDynamicCastExpr *DCE);
+
+ void MaybeEmitStdInitializerListCleanup(llvm::Value *loc, const Expr *init);
+ void EmitStdInitializerListCleanup(llvm::Value *loc,
+ const InitListExpr *init);
+
+ void EmitCheck(llvm::Value *, unsigned Size);
+
+ llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ ComplexPairTy EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
+ bool isInc, bool isPre);
+ //===--------------------------------------------------------------------===//
+ // Declaration Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitDecl - Emit a declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitDecl(const Decl &D);
+
+ /// EmitVarDecl - Emit a local variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitVarDecl(const VarDecl &D);
+
+ void EmitScalarInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit);
+ void EmitScalarInit(llvm::Value *init, LValue lvalue);
+
+ typedef void SpecialInitFn(CodeGenFunction &Init, const VarDecl &D,
+ llvm::Value *Address);
+
+ /// EmitAutoVarDecl - Emit an auto variable declaration.
+ ///
+ /// This function can be called with a null (unreachable) insert point.
+ void EmitAutoVarDecl(const VarDecl &D);
+
+ class AutoVarEmission {
+ friend class CodeGenFunction;
+
+ const VarDecl *Variable;
+
+ /// The alignment of the variable.
+ CharUnits Alignment;
+
+ /// The address of the alloca. Null if the variable was emitted
+ /// as a global constant.
+ llvm::Value *Address;
+
+ llvm::Value *NRVOFlag;
+
+ /// True if the variable is a __block variable.
+ bool IsByRef;
+
+ /// True if the variable is of aggregate type and has a constant
+ /// initializer.
+ bool IsConstantAggregate;
+
+ struct Invalid {};
+ AutoVarEmission(Invalid) : Variable(0) {}
+
+ AutoVarEmission(const VarDecl &variable)
+ : Variable(&variable), Address(0), NRVOFlag(0),
+ IsByRef(false), IsConstantAggregate(false) {}
+
+ bool wasEmittedAsGlobal() const { return Address == 0; }
+
+ public:
+ static AutoVarEmission invalid() { return AutoVarEmission(Invalid()); }
+
+ /// Returns the address of the object within this declaration.
+ /// Note that this does not chase the forwarding pointer for
+ /// __block decls.
+ llvm::Value *getObjectAddress(CodeGenFunction &CGF) const {
+ if (!IsByRef) return Address;
+
+ return CGF.Builder.CreateStructGEP(Address,
+ CGF.getByRefValueLLVMField(Variable),
+ Variable->getNameAsString());
+ }
+ };
+ AutoVarEmission EmitAutoVarAlloca(const VarDecl &var);
+ void EmitAutoVarInit(const AutoVarEmission &emission);
+ void EmitAutoVarCleanups(const AutoVarEmission &emission);
+ void emitAutoVarTypeCleanup(const AutoVarEmission &emission,
+ QualType::DestructionKind dtorKind);
+
+ void EmitStaticVarDecl(const VarDecl &D,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// EmitParmDecl - Emit a ParmVarDecl or an ImplicitParamDecl.
+ void EmitParmDecl(const VarDecl &D, llvm::Value *Arg, unsigned ArgNo);
+
+ /// protectFromPeepholes - Protect a value that we're intending to
+ /// store to the side, but which will probably be used later, from
+ /// aggressive peepholing optimizations that might delete it.
+ ///
+ /// Pass the result to unprotectFromPeepholes to declare that
+ /// protection is no longer required.
+ ///
+ /// There's no particular reason why this shouldn't apply to
+ /// l-values, it's just that no existing peepholes work on pointers.
+ PeepholeProtection protectFromPeepholes(RValue rvalue);
+ void unprotectFromPeepholes(PeepholeProtection protection);
+
+ //===--------------------------------------------------------------------===//
+ // Statement Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitStopPoint - Emit a debug stoppoint if we are emitting debug info.
+ void EmitStopPoint(const Stmt *S);
+
+ /// EmitStmt - Emit the code for the statement \arg S. It is legal to call
+ /// this function even if there is no current insertion point.
+ ///
+ /// This function may clear the current insertion point; callers should use
+ /// EnsureInsertPoint if they wish to subsequently generate code without first
+ /// calling EmitBlock, EmitBranch, or EmitStmt.
+ void EmitStmt(const Stmt *S);
+
+ /// EmitSimpleStmt - Try to emit a "simple" statement which does not
+ /// necessarily require an insertion point or debug information; typically
+ /// because the statement amounts to a jump or a container of other
+ /// statements.
+ ///
+ /// \return True if the statement was handled.
+ bool EmitSimpleStmt(const Stmt *S);
+
+ RValue EmitCompoundStmt(const CompoundStmt &S, bool GetLast = false,
+ AggValueSlot AVS = AggValueSlot::ignored());
+
+ /// EmitLabel - Emit the block for the given label. It is legal to call this
+ /// function even if there is no current insertion point.
+ void EmitLabel(const LabelDecl *D); // helper for EmitLabelStmt.
+
+ void EmitLabelStmt(const LabelStmt &S);
+ void EmitGotoStmt(const GotoStmt &S);
+ void EmitIndirectGotoStmt(const IndirectGotoStmt &S);
+ void EmitIfStmt(const IfStmt &S);
+ void EmitWhileStmt(const WhileStmt &S);
+ void EmitDoStmt(const DoStmt &S);
+ void EmitForStmt(const ForStmt &S);
+ void EmitReturnStmt(const ReturnStmt &S);
+ void EmitDeclStmt(const DeclStmt &S);
+ void EmitBreakStmt(const BreakStmt &S);
+ void EmitContinueStmt(const ContinueStmt &S);
+ void EmitSwitchStmt(const SwitchStmt &S);
+ void EmitDefaultStmt(const DefaultStmt &S);
+ void EmitCaseStmt(const CaseStmt &S);
+ void EmitCaseStmtRange(const CaseStmt &S);
+ void EmitAsmStmt(const AsmStmt &S);
+
+ void EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S);
+ void EmitObjCAtTryStmt(const ObjCAtTryStmt &S);
+ void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
+ void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ void EmitObjCAutoreleasePoolStmt(const ObjCAutoreleasePoolStmt &S);
+
+ llvm::Constant *getUnwindResumeFn();
+ llvm::Constant *getUnwindResumeOrRethrowFn();
+ void EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+ void ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock = false);
+
+ void EmitCXXTryStmt(const CXXTryStmt &S);
+ void EmitCXXForRangeStmt(const CXXForRangeStmt &S);
+
+ //===--------------------------------------------------------------------===//
+ // LValue Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// GetUndefRValue - Get an appropriate 'undef' rvalue for the given type.
+ RValue GetUndefRValue(QualType Ty);
+
+ /// EmitUnsupportedRValue - Emit a dummy r-value using the type of E
+ /// and issue an ErrorUnsupported style diagnostic (using the
+ /// provided Name).
+ RValue EmitUnsupportedRValue(const Expr *E,
+ const char *Name);
+
+ /// EmitUnsupportedLValue - Emit a dummy l-value using the type of E and issue
+ /// an ErrorUnsupported style diagnostic (using the provided Name).
+ LValue EmitUnsupportedLValue(const Expr *E,
+ const char *Name);
+
+ /// EmitLValue - Emit code to compute a designator that specifies the location
+ /// of the expression.
+ ///
+ /// This can return one of two things: a simple address or a bitfield
+ /// reference. In either case, the LLVM Value* in the LValue structure is
+ /// guaranteed to be an LLVM pointer type.
+ ///
+ /// If this returns a bitfield reference, nothing about the pointee type of
+ /// the LLVM value is known: For example, it may not be a pointer to an
+ /// integer.
+ ///
+ /// If this returns a normal address, and if the lvalue's C type is fixed
+ /// size, this method guarantees that the returned pointer type will point to
+ /// an LLVM type of the same size of the lvalue's type. If the lvalue has a
+ /// variable length type, this is not possible.
+ ///
+ LValue EmitLValue(const Expr *E);
+
+ /// EmitCheckedLValue - Same as EmitLValue but additionally we generate
+ /// checking code to guard against undefined behavior. This is only
+ /// suitable when we know that the address will be used to access the
+ /// object.
+ LValue EmitCheckedLValue(const Expr *E);
+
+ /// EmitToMemory - Change a scalar value from its value
+ /// representation to its in-memory representation.
+ llvm::Value *EmitToMemory(llvm::Value *Value, QualType Ty);
+
+ /// EmitFromMemory - Change a scalar value from its memory
+ /// representation to its value representation.
+ llvm::Value *EmitFromMemory(llvm::Value *Value, QualType Ty);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ llvm::Value *EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
+ unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo = 0);
+
+ /// EmitLoadOfScalar - Load a scalar value from an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value.
+ llvm::Value *EmitLoadOfScalar(LValue lvalue);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation.
+ void EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
+ bool Volatile, unsigned Alignment, QualType Ty,
+ llvm::MDNode *TBAAInfo = 0, bool isInit=false);
+
+ /// EmitStoreOfScalar - Store a scalar value to an address, taking
+ /// care to appropriately convert from the memory representation to
+ /// the LLVM value representation. The l-value must be a simple
+ /// l-value. The isInit flag indicates whether this is an initialization.
+ /// If so, atomic qualifiers are ignored and the store is always non-atomic.
+ void EmitStoreOfScalar(llvm::Value *value, LValue lvalue, bool isInit=false);
+
+ /// EmitLoadOfLValue - Given an expression that represents a value lvalue,
+ /// this method emits the address of the lvalue, then loads the result as an
+ /// rvalue, returning the rvalue.
+ RValue EmitLoadOfLValue(LValue V);
+ RValue EmitLoadOfExtVectorElementLValue(LValue V);
+ RValue EmitLoadOfBitfieldLValue(LValue LV);
+
+ /// EmitStoreThroughLValue - Store the specified rvalue into the specified
+ /// lvalue, where both are guaranteed to the have the same type, and that type
+ /// is 'Ty'.
+ void EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit=false);
+ void EmitStoreThroughExtVectorComponentLValue(RValue Src, LValue Dst);
+
+ /// EmitStoreThroughLValue - Store Src into Dst with same constraints as
+ /// EmitStoreThroughLValue.
+ ///
+ /// \param Result [out] - If non-null, this will be set to a Value* for the
+ /// bit-field contents after the store, appropriate for use as the result of
+ /// an assignment to the bit-field.
+ void EmitStoreThroughBitfieldLValue(RValue Src, LValue Dst,
+ llvm::Value **Result=0);
+
+ /// Emit an l-value for an assignment (simple or compound) of complex type.
+ LValue EmitComplexAssignmentLValue(const BinaryOperator *E);
+ LValue EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E);
+
+ // Note: only available for agg return types
+ LValue EmitBinaryOperatorLValue(const BinaryOperator *E);
+ LValue EmitCompoundAssignmentLValue(const CompoundAssignOperator *E);
+ // Note: only available for agg return types
+ LValue EmitCallExprLValue(const CallExpr *E);
+ // Note: only available for agg return types
+ LValue EmitVAArgExprLValue(const VAArgExpr *E);
+ LValue EmitDeclRefLValue(const DeclRefExpr *E);
+ LValue EmitStringLiteralLValue(const StringLiteral *E);
+ LValue EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E);
+ LValue EmitPredefinedLValue(const PredefinedExpr *E);
+ LValue EmitUnaryOpLValue(const UnaryOperator *E);
+ LValue EmitArraySubscriptExpr(const ArraySubscriptExpr *E);
+ LValue EmitExtVectorElementExpr(const ExtVectorElementExpr *E);
+ LValue EmitMemberExpr(const MemberExpr *E);
+ LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
+ LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
+ LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
+ LValue EmitCastLValue(const CastExpr *E);
+ LValue EmitNullInitializationLValue(const CXXScalarValueInitExpr *E);
+ LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
+ LValue EmitOpaqueValueLValue(const OpaqueValueExpr *e);
+
+ class ConstantEmission {
+ llvm::PointerIntPair<llvm::Constant*, 1, bool> ValueAndIsReference;
+ ConstantEmission(llvm::Constant *C, bool isReference)
+ : ValueAndIsReference(C, isReference) {}
+ public:
+ ConstantEmission() {}
+ static ConstantEmission forReference(llvm::Constant *C) {
+ return ConstantEmission(C, true);
+ }
+ static ConstantEmission forValue(llvm::Constant *C) {
+ return ConstantEmission(C, false);
+ }
+
+ operator bool() const { return ValueAndIsReference.getOpaqueValue() != 0; }
+
+ bool isReference() const { return ValueAndIsReference.getInt(); }
+ LValue getReferenceLValue(CodeGenFunction &CGF, Expr *refExpr) const {
+ assert(isReference());
+ return CGF.MakeNaturalAlignAddrLValue(ValueAndIsReference.getPointer(),
+ refExpr->getType());
+ }
+
+ llvm::Constant *getValue() const {
+ assert(!isReference());
+ return ValueAndIsReference.getPointer();
+ }
+ };
+
+ ConstantEmission tryEmitAsConstant(DeclRefExpr *refExpr);
+
+ RValue EmitPseudoObjectRValue(const PseudoObjectExpr *e,
+ AggValueSlot slot = AggValueSlot::ignored());
+ LValue EmitPseudoObjectLValue(const PseudoObjectExpr *e);
+
+ llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
+ const ObjCIvarDecl *Ivar);
+ LValue EmitLValueForAnonRecordField(llvm::Value* Base,
+ const IndirectFieldDecl* Field,
+ unsigned CVRQualifiers);
+ LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ /// EmitLValueForFieldInitialization - Like EmitLValueForField, except that
+ /// if the Field is a reference, this will return the address of the reference
+ /// and not the address of the value stored in the reference.
+ LValue EmitLValueForFieldInitialization(llvm::Value* Base,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForIvar(QualType ObjectTy,
+ llvm::Value* Base, const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers);
+
+ LValue EmitLValueForBitfield(llvm::Value* Base, const FieldDecl* Field,
+ unsigned CVRQualifiers);
+
+ LValue EmitCXXConstructLValue(const CXXConstructExpr *E);
+ LValue EmitCXXBindTemporaryLValue(const CXXBindTemporaryExpr *E);
+ LValue EmitLambdaLValue(const LambdaExpr *E);
+ LValue EmitCXXTypeidLValue(const CXXTypeidExpr *E);
+
+ LValue EmitObjCMessageExprLValue(const ObjCMessageExpr *E);
+ LValue EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E);
+ LValue EmitStmtExprLValue(const StmtExpr *E);
+ LValue EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E);
+ LValue EmitObjCSelectorLValue(const ObjCSelectorExpr *E);
+ void EmitDeclRefExprDbgValue(const DeclRefExpr *E, llvm::Constant *Init);
+
+ //===--------------------------------------------------------------------===//
+ // Scalar Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ /// EmitCall - Generate a call of the given function, expecting the given
+ /// result type, and using the given argument list which specifies both the
+ /// LLVM arguments and the types they were derived from.
+ ///
+ /// \param TargetDecl - If given, the decl of the function in a direct call;
+ /// used to set attributes on the call (noreturn, etc.).
+ RValue EmitCall(const CGFunctionInfo &FnInfo,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ const CallArgList &Args,
+ const Decl *TargetDecl = 0,
+ llvm::Instruction **callOrInvoke = 0);
+
+ RValue EmitCall(QualType FnType, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd,
+ const Decl *TargetDecl = 0);
+ RValue EmitCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue = ReturnValueSlot());
+
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ ArrayRef<llvm::Value *> Args,
+ const Twine &Name = "");
+ llvm::CallSite EmitCallOrInvoke(llvm::Value *Callee,
+ const Twine &Name = "");
+
+ llvm::Value *BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
+ llvm::Type *Ty);
+ llvm::Value *BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
+ llvm::Value *This, llvm::Type *Ty);
+ llvm::Value *BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty);
+
+ llvm::Value *BuildAppleKextVirtualDestructorCall(const CXXDestructorDecl *DD,
+ CXXDtorType Type,
+ const CXXRecordDecl *RD);
+
+ RValue EmitCXXMemberCall(const CXXMethodDecl *MD,
+ llvm::Value *Callee,
+ ReturnValueSlot ReturnValue,
+ llvm::Value *This,
+ llvm::Value *VTT,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd);
+ RValue EmitCXXMemberCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+ RValue EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+ llvm::Value *EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ llvm::Value *This);
+ RValue EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
+ const CXXMethodDecl *MD,
+ ReturnValueSlot ReturnValue);
+
+ RValue EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
+ ReturnValueSlot ReturnValue);
+
+
+ RValue EmitBuiltinExpr(const FunctionDecl *FD,
+ unsigned BuiltinID, const CallExpr *E);
+
+ RValue EmitBlockCallExpr(const CallExpr *E, ReturnValueSlot ReturnValue);
+
+ /// EmitTargetBuiltinExpr - Emit the given builtin call. Returns 0 if the call
+ /// is unhandled by the current target.
+ llvm::Value *EmitTargetBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitARMBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitNeonCall(llvm::Function *F,
+ SmallVectorImpl<llvm::Value*> &O,
+ const char *name,
+ unsigned shift = 0, bool rightshift = false);
+ llvm::Value *EmitNeonSplat(llvm::Value *V, llvm::Constant *Idx);
+ llvm::Value *EmitNeonShiftVector(llvm::Value *V, llvm::Type *Ty,
+ bool negateForRightShift);
+
+ llvm::Value *BuildVector(ArrayRef<llvm::Value*> Ops);
+ llvm::Value *EmitX86BuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitHexagonBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+ llvm::Value *EmitPPCBuiltinExpr(unsigned BuiltinID, const CallExpr *E);
+
+ llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
+ llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
+ llvm::Value *EmitObjCNumericLiteral(const ObjCNumericLiteral *E);
+ llvm::Value *EmitObjCArrayLiteral(const ObjCArrayLiteral *E);
+ llvm::Value *EmitObjCDictionaryLiteral(const ObjCDictionaryLiteral *E);
+ llvm::Value *EmitObjCCollectionLiteral(const Expr *E,
+ const ObjCMethodDecl *MethodWithObjects);
+ llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
+ RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+
+ /// Retrieves the default cleanup kind for an ARC cleanup.
+ /// Except under -fobjc-arc-eh, ARC cleanups are normal-only.
+ CleanupKind getARCCleanupKind() {
+ return CGM.getCodeGenOpts().ObjCAutoRefCountExceptions
+ ? NormalAndEHCleanup : NormalCleanup;
+ }
+
+ // ARC primitives.
+ void EmitARCInitWeak(llvm::Value *value, llvm::Value *addr);
+ void EmitARCDestroyWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeak(llvm::Value *addr);
+ llvm::Value *EmitARCLoadWeakRetained(llvm::Value *addr);
+ llvm::Value *EmitARCStoreWeak(llvm::Value *value, llvm::Value *addr,
+ bool ignored);
+ void EmitARCCopyWeak(llvm::Value *dst, llvm::Value *src);
+ void EmitARCMoveWeak(llvm::Value *dst, llvm::Value *src);
+ llvm::Value *EmitARCRetainAutorelease(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCStoreStrong(LValue lvalue, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCStoreStrongCall(llvm::Value *addr, llvm::Value *value,
+ bool ignored);
+ llvm::Value *EmitARCRetain(QualType type, llvm::Value *value);
+ llvm::Value *EmitARCRetainNonBlock(llvm::Value *value);
+ llvm::Value *EmitARCRetainBlock(llvm::Value *value, bool mandatory);
+ void EmitARCRelease(llvm::Value *value, bool precise);
+ llvm::Value *EmitARCAutorelease(llvm::Value *value);
+ llvm::Value *EmitARCAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleaseReturnValue(llvm::Value *value);
+ llvm::Value *EmitARCRetainAutoreleasedReturnValue(llvm::Value *value);
+
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreAutoreleasing(const BinaryOperator *e);
+ std::pair<LValue,llvm::Value*>
+ EmitARCStoreStrong(const BinaryOperator *e, bool ignored);
+
+ llvm::Value *EmitObjCThrowOperand(const Expr *expr);
+
+ llvm::Value *EmitObjCProduceObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCConsumeObject(QualType T, llvm::Value *Ptr);
+ llvm::Value *EmitObjCExtendObjectLifetime(QualType T, llvm::Value *Ptr);
+
+ llvm::Value *EmitARCExtendBlockObject(const Expr *expr);
+ llvm::Value *EmitARCRetainScalarExpr(const Expr *expr);
+ llvm::Value *EmitARCRetainAutoreleaseScalarExpr(const Expr *expr);
+
+ static Destroyer destroyARCStrongImprecise;
+ static Destroyer destroyARCStrongPrecise;
+ static Destroyer destroyARCWeak;
+
+ void EmitObjCAutoreleasePoolPop(llvm::Value *Ptr);
+ llvm::Value *EmitObjCAutoreleasePoolPush();
+ llvm::Value *EmitObjCMRRAutoreleasePoolPush();
+ void EmitObjCAutoreleasePoolCleanup(llvm::Value *Ptr);
+ void EmitObjCMRRAutoreleasePoolPop(llvm::Value *Ptr);
+
+ /// EmitReferenceBindingToExpr - Emits a reference binding to the passed in
+ /// expression. Will emit a temporary variable if E is not an LValue.
+ RValue EmitReferenceBindingToExpr(const Expr* E,
+ const NamedDecl *InitializedDecl);
+
+ //===--------------------------------------------------------------------===//
+ // Expression Emission
+ //===--------------------------------------------------------------------===//
+
+ // Expressions are broken into three classes: scalar, complex, aggregate.
+
+ /// EmitScalarExpr - Emit the computation of the specified expression of LLVM
+ /// scalar type, returning the result.
+ llvm::Value *EmitScalarExpr(const Expr *E , bool IgnoreResultAssign = false);
+
+ /// EmitScalarConversion - Emit a conversion from the specified type to the
+ /// specified destination type, both of which are LLVM scalar types.
+ llvm::Value *EmitScalarConversion(llvm::Value *Src, QualType SrcTy,
+ QualType DstTy);
+
+ /// EmitComplexToScalarConversion - Emit a conversion from the specified
+ /// complex type to the specified destination type, where the destination type
+ /// is an LLVM scalar type.
+ llvm::Value *EmitComplexToScalarConversion(ComplexPairTy Src, QualType SrcTy,
+ QualType DstTy);
+
+
+ /// EmitAggExpr - Emit the computation of the specified expression
+ /// of aggregate type. The result is computed into the given slot,
+ /// which may be null to indicate that the value is not needed.
+ void EmitAggExpr(const Expr *E, AggValueSlot AS, bool IgnoreResult = false);
+
+ /// EmitAggExprToLValue - Emit the computation of the specified expression of
+ /// aggregate type into a temporary LValue.
+ LValue EmitAggExprToLValue(const Expr *E);
+
+ /// EmitGCMemmoveCollectable - Emit special API for structs with object
+ /// pointers.
+ void EmitGCMemmoveCollectable(llvm::Value *DestPtr, llvm::Value *SrcPtr,
+ QualType Ty);
+
+ /// EmitExtendGCLifetime - Given a pointer to an Objective-C object,
+ /// make sure it survives garbage collection until this point.
+ void EmitExtendGCLifetime(llvm::Value *object);
+
+ /// EmitComplexExpr - Emit the computation of the specified expression of
+ /// complex type, returning the result.
+ ComplexPairTy EmitComplexExpr(const Expr *E,
+ bool IgnoreReal = false,
+ bool IgnoreImag = false);
+
+ /// EmitComplexExprIntoAddr - Emit the computation of the specified expression
+ /// of complex type, storing into the specified Value*.
+ void EmitComplexExprIntoAddr(const Expr *E, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+
+ /// StoreComplexToAddr - Store a complex number into the specified address.
+ void StoreComplexToAddr(ComplexPairTy V, llvm::Value *DestAddr,
+ bool DestIsVolatile);
+ /// LoadComplexFromAddr - Load a complex number from the specified address.
+ ComplexPairTy LoadComplexFromAddr(llvm::Value *SrcAddr, bool SrcIsVolatile);
+
+ /// CreateStaticVarDecl - Create a zero-initialized LLVM global for
+ /// a static local variable.
+ llvm::GlobalVariable *CreateStaticVarDecl(const VarDecl &D,
+ const char *Separator,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// AddInitializerToStaticVarDecl - Add the initializer for 'D' to the
+ /// global variable that has already been created for it. If the initializer
+ /// has a different type than GV does, this may free GV and return a different
+ /// one. Otherwise it just returns GV.
+ llvm::GlobalVariable *
+ AddInitializerToStaticVarDecl(const VarDecl &D,
+ llvm::GlobalVariable *GV);
+
+
+ /// EmitCXXGlobalVarDeclInit - Create the initializer for a C++
+ /// variable with global storage.
+ void EmitCXXGlobalVarDeclInit(const VarDecl &D, llvm::Constant *DeclPtr,
+ bool PerformInit);
+
+ /// EmitCXXGlobalDtorRegistration - Emits a call to register the global ptr
+ /// with the C++ runtime so that its destructor will be called at exit.
+ void EmitCXXGlobalDtorRegistration(llvm::Constant *DtorFn,
+ llvm::Constant *DeclPtr);
+
+ /// Emit code in this function to perform a guarded variable
+ /// initialization. Guarded initializations are used when it's not
+ /// possible to prove that an initialization will be done exactly
+ /// once, e.g. with a static local variable or a static data member
+ /// of a class template.
+ void EmitCXXGuardedInit(const VarDecl &D, llvm::GlobalVariable *DeclPtr,
+ bool PerformInit);
+
+ /// GenerateCXXGlobalInitFunc - Generates code for initializing global
+ /// variables.
+ void GenerateCXXGlobalInitFunc(llvm::Function *Fn,
+ llvm::Constant **Decls,
+ unsigned NumDecls);
+
+ /// GenerateCXXGlobalDtorsFunc - Generates code for destroying global
+ /// variables.
+ void GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
+ const std::vector<std::pair<llvm::WeakVH,
+ llvm::Constant*> > &DtorsAndObjects);
+
+ void GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
+ const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
+
+ void EmitCXXConstructExpr(const CXXConstructExpr *E, AggValueSlot Dest);
+
+ void EmitSynthesizedCXXCopyCtor(llvm::Value *Dest, llvm::Value *Src,
+ const Expr *Exp);
+
+ void enterFullExpression(const ExprWithCleanups *E) {
+ if (E->getNumObjects() == 0) return;
+ enterNonTrivialFullExpression(E);
+ }
+ void enterNonTrivialFullExpression(const ExprWithCleanups *E);
+
+ void EmitCXXThrowExpr(const CXXThrowExpr *E);
+
+ void EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Dest);
+
+ RValue EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest = 0);
+
+ //===--------------------------------------------------------------------===//
+ // Annotations Emission
+ //===--------------------------------------------------------------------===//
+
+ /// Emit an annotation call (intrinsic or builtin).
+ llvm::Value *EmitAnnotationCall(llvm::Value *AnnotationFn,
+ llvm::Value *AnnotatedVal,
+ llvm::StringRef AnnotationStr,
+ SourceLocation Location);
+
+ /// Emit local annotations for the local variable V, declared by D.
+ void EmitVarAnnotations(const VarDecl *D, llvm::Value *V);
+
+ /// Emit field annotations for the given field & value. Returns the
+ /// annotation result.
+ llvm::Value *EmitFieldAnnotations(const FieldDecl *D, llvm::Value *V);
+
+ //===--------------------------------------------------------------------===//
+ // Internal Helpers
+ //===--------------------------------------------------------------------===//
+
+ /// ContainsLabel - Return true if the statement contains a label in it. If
+ /// this statement is not executed normally, it not containing a label means
+ /// that we can just remove the code.
+ static bool ContainsLabel(const Stmt *S, bool IgnoreCaseStmts = false);
+
+ /// containsBreak - Return true if the statement contains a break out of it.
+ /// If the statement (recursively) contains a switch or loop with a break
+ /// inside of it, this is fine.
+ static bool containsBreak(const Stmt *S);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return false. If it
+ /// constant folds return true and set the boolean result in Result.
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, bool &Result);
+
+ /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
+ /// to a constant, or if it does but contains a label, return false. If it
+ /// constant folds return true and set the folded value.
+ bool ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APInt &Result);
+
+ /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an
+ /// if statement) to the specified blocks. Based on the condition, this might
+ /// try to simplify the codegen of the conditional based on the branch.
+ void EmitBranchOnBoolExpr(const Expr *Cond, llvm::BasicBlock *TrueBlock,
+ llvm::BasicBlock *FalseBlock);
+
+ /// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
+ /// generate a branch around the created basic block as necessary.
+ llvm::BasicBlock *getTrapBB();
+
+ /// EmitCallArg - Emit a single call argument.
+ void EmitCallArg(CallArgList &args, const Expr *E, QualType ArgType);
+
+ /// EmitDelegateCallArg - We are performing a delegate call; that
+ /// is, the current function is delegating to another one. Produce
+ /// a r-value suitable for passing the given parameter.
+ void EmitDelegateCallArg(CallArgList &args, const VarDecl *param);
+
+ /// SetFPAccuracy - Set the minimum required accuracy of the given floating
+ /// point operation, expressed as the maximum relative error in ulp.
+ void SetFPAccuracy(llvm::Value *Val, float Accuracy);
+
+private:
+ llvm::MDNode *getRangeForLoadFromType(QualType Ty);
+ void EmitReturnOfRValue(RValue RV, QualType Ty);
+
+ /// ExpandTypeFromArgs - Reconstruct a structure of type \arg Ty
+ /// from function arguments into \arg Dst. See ABIArgInfo::Expand.
+ ///
+ /// \param AI - The first function argument of the expansion.
+ /// \return The argument following the last expanded function
+ /// argument.
+ llvm::Function::arg_iterator
+ ExpandTypeFromArgs(QualType Ty, LValue Dst,
+ llvm::Function::arg_iterator AI);
+
+ /// ExpandTypeToArgs - Expand an RValue \arg Src, with the LLVM type for \arg
+ /// Ty, into individual arguments on the provided vector \arg Args. See
+ /// ABIArgInfo::Expand.
+ void ExpandTypeToArgs(QualType Ty, RValue Src,
+ SmallVector<llvm::Value*, 16> &Args,
+ llvm::FunctionType *IRFuncTy);
+
+ llvm::Value* EmitAsmInput(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ const Expr *InputExpr, std::string &ConstraintStr);
+
+ llvm::Value* EmitAsmInputLValue(const AsmStmt &S,
+ const TargetInfo::ConstraintInfo &Info,
+ LValue InputValue, QualType InputType,
+ std::string &ConstraintStr);
+
+ /// EmitCallArgs - Emit call arguments for a function.
+ /// The CallArgTypeInfo parameter is used for iterating over the known
+ /// argument types of the function being called.
+ template<typename T>
+ void EmitCallArgs(CallArgList& Args, const T* CallArgTypeInfo,
+ CallExpr::const_arg_iterator ArgBeg,
+ CallExpr::const_arg_iterator ArgEnd) {
+ CallExpr::const_arg_iterator Arg = ArgBeg;
+
+ // First, use the argument types that the type info knows about
+ if (CallArgTypeInfo) {
+ for (typename T::arg_type_iterator I = CallArgTypeInfo->arg_type_begin(),
+ E = CallArgTypeInfo->arg_type_end(); I != E; ++I, ++Arg) {
+ assert(Arg != ArgEnd && "Running over edge of argument list!");
+ QualType ArgType = *I;
+#ifndef NDEBUG
+ QualType ActualArgType = Arg->getType();
+ if (ArgType->isPointerType() && ActualArgType->isPointerType()) {
+ QualType ActualBaseType =
+ ActualArgType->getAs<PointerType>()->getPointeeType();
+ QualType ArgBaseType =
+ ArgType->getAs<PointerType>()->getPointeeType();
+ if (ArgBaseType->isVariableArrayType()) {
+ if (const VariableArrayType *VAT =
+ getContext().getAsVariableArrayType(ActualBaseType)) {
+ if (!VAT->getSizeExpr())
+ ActualArgType = ArgType;
+ }
+ }
+ }
+ assert(getContext().getCanonicalType(ArgType.getNonReferenceType()).
+ getTypePtr() ==
+ getContext().getCanonicalType(ActualArgType).getTypePtr() &&
+ "type mismatch in call argument!");
+#endif
+ EmitCallArg(Args, *Arg, ArgType);
+ }
+
+ // Either we've emitted all the call args, or we have a call to a
+ // variadic function.
+ assert((Arg == ArgEnd || CallArgTypeInfo->isVariadic()) &&
+ "Extra arguments in non-variadic function!");
+
+ }
+
+ // If we still have any arguments, emit them using the type of the argument.
+ for (; Arg != ArgEnd; ++Arg)
+ EmitCallArg(Args, *Arg, Arg->getType());
+ }
+
+ const TargetCodeGenInfo &getTargetHooks() const {
+ return CGM.getTargetCodeGenInfo();
+ }
+
+ void EmitDeclMetadata();
+
+ CodeGenModule::ByrefHelpers *
+ buildByrefHelpers(llvm::StructType &byrefType,
+ const AutoVarEmission &emission);
+
+ void AddObjCARCExceptionMetadata(llvm::Instruction *Inst);
+
+ /// GetPointeeAlignment - Given an expression with a pointer type, find the
+ /// alignment of the type referenced by the pointer. Skip over implicit
+ /// casts.
+ unsigned GetPointeeAlignment(const Expr *Addr);
+
+ /// GetPointeeAlignmentValue - Given an expression with a pointer type, find
+ /// the alignment of the type referenced by the pointer. Skip over implicit
+ /// casts. Return the alignment as an llvm::Value.
+ llvm::Value *GetPointeeAlignmentValue(const Expr *Addr);
+};
+
+/// Helper class with most of the code for saving a value for a
+/// conditional expression cleanup.
+struct DominatingLLVMValue {
+ typedef llvm::PointerIntPair<llvm::Value*, 1, bool> saved_type;
+
+ /// Answer whether the given value needs extra work to be saved.
+ static bool needsSaving(llvm::Value *value) {
+ // If it's not an instruction, we don't need to save.
+ if (!isa<llvm::Instruction>(value)) return false;
+
+ // If it's an instruction in the entry block, we don't need to save.
+ llvm::BasicBlock *block = cast<llvm::Instruction>(value)->getParent();
+ return (block != &block->getParent()->getEntryBlock());
+ }
+
+ /// Try to save the given value.
+ static saved_type save(CodeGenFunction &CGF, llvm::Value *value) {
+ if (!needsSaving(value)) return saved_type(value, false);
+
+ // Otherwise we need an alloca.
+ llvm::Value *alloca =
+ CGF.CreateTempAlloca(value->getType(), "cond-cleanup.save");
+ CGF.Builder.CreateStore(value, alloca);
+
+ return saved_type(alloca, true);
+ }
+
+ static llvm::Value *restore(CodeGenFunction &CGF, saved_type value) {
+ if (!value.getInt()) return value.getPointer();
+ return CGF.Builder.CreateLoad(value.getPointer());
+ }
+};
+
+/// A partial specialization of DominatingValue for llvm::Values that
+/// might be llvm::Instructions.
+template <class T> struct DominatingPointer<T,true> : DominatingLLVMValue {
+ typedef T *type;
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return static_cast<T*>(DominatingLLVMValue::restore(CGF, value));
+ }
+};
+
+/// A specialization of DominatingValue for RValue.
+template <> struct DominatingValue<RValue> {
+ typedef RValue type;
+ class saved_type {
+ enum Kind { ScalarLiteral, ScalarAddress, AggregateLiteral,
+ AggregateAddress, ComplexAddress };
+
+ llvm::Value *Value;
+ Kind K;
+ saved_type(llvm::Value *v, Kind k) : Value(v), K(k) {}
+
+ public:
+ static bool needsSaving(RValue value);
+ static saved_type save(CodeGenFunction &CGF, RValue value);
+ RValue restore(CodeGenFunction &CGF);
+
+ // implementations in CGExprCXX.cpp
+ };
+
+ static bool needsSaving(type value) {
+ return saved_type::needsSaving(value);
+ }
+ static saved_type save(CodeGenFunction &CGF, type value) {
+ return saved_type::save(CGF, value);
+ }
+ static type restore(CodeGenFunction &CGF, saved_type value) {
+ return value.restore(CGF);
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
new file mode 100644
index 0000000..c0ccf4d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.cpp
@@ -0,0 +1,2667 @@
+//===--- CodeGenModule.cpp - Emit LLVM Code from ASTs for a Module --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This coordinates the per-module state used while generating code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenModule.h"
+#include "CGDebugInfo.h"
+#include "CodeGenFunction.h"
+#include "CodeGenTBAA.h"
+#include "CGCall.h"
+#include "CGCUDARuntime.h"
+#include "CGCXXABI.h"
+#include "CGObjCRuntime.h"
+#include "CGOpenCLRuntime.h"
+#include "TargetInfo.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Mangle.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/CallingConv.h"
+#include "llvm/Module.h"
+#include "llvm/Intrinsics.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Target/Mangler.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace CodeGen;
+
+static const char AnnotationSection[] = "llvm.metadata";
+
+static CGCXXABI &createCXXABI(CodeGenModule &CGM) {
+ switch (CGM.getContext().getTargetInfo().getCXXABI()) {
+ case CXXABI_ARM: return *CreateARMCXXABI(CGM);
+ case CXXABI_Itanium: return *CreateItaniumCXXABI(CGM);
+ case CXXABI_Microsoft: return *CreateMicrosoftCXXABI(CGM);
+ }
+
+ llvm_unreachable("invalid C++ ABI kind");
+}
+
+
+CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
+ llvm::Module &M, const llvm::TargetData &TD,
+ DiagnosticsEngine &diags)
+ : Context(C), LangOpts(C.getLangOpts()), CodeGenOpts(CGO), TheModule(M),
+ TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
+ ABI(createCXXABI(*this)),
+ Types(*this),
+ TBAA(0),
+ VTables(*this), ObjCRuntime(0), OpenCLRuntime(0), CUDARuntime(0),
+ DebugInfo(0), ARCData(0), NoObjCARCExceptionsMetadata(0),
+ RRData(0), CFConstantStringClassRef(0),
+ ConstantStringClassRef(0), NSConstantStringType(0),
+ VMContext(M.getContext()),
+ NSConcreteGlobalBlock(0), NSConcreteStackBlock(0),
+ BlockObjectAssign(0), BlockObjectDispose(0),
+ BlockDescriptorType(0), GenericBlockLiteralType(0) {
+
+ // Initialize the type cache.
+ llvm::LLVMContext &LLVMContext = M.getContext();
+ VoidTy = llvm::Type::getVoidTy(LLVMContext);
+ Int8Ty = llvm::Type::getInt8Ty(LLVMContext);
+ Int16Ty = llvm::Type::getInt16Ty(LLVMContext);
+ Int32Ty = llvm::Type::getInt32Ty(LLVMContext);
+ Int64Ty = llvm::Type::getInt64Ty(LLVMContext);
+ FloatTy = llvm::Type::getFloatTy(LLVMContext);
+ DoubleTy = llvm::Type::getDoubleTy(LLVMContext);
+ PointerWidthInBits = C.getTargetInfo().getPointerWidth(0);
+ PointerAlignInBytes =
+ C.toCharUnitsFromBits(C.getTargetInfo().getPointerAlign(0)).getQuantity();
+ IntTy = llvm::IntegerType::get(LLVMContext, C.getTargetInfo().getIntWidth());
+ IntPtrTy = llvm::IntegerType::get(LLVMContext, PointerWidthInBits);
+ Int8PtrTy = Int8Ty->getPointerTo(0);
+ Int8PtrPtrTy = Int8PtrTy->getPointerTo(0);
+
+ if (LangOpts.ObjC1)
+ createObjCRuntime();
+ if (LangOpts.OpenCL)
+ createOpenCLRuntime();
+ if (LangOpts.CUDA)
+ createCUDARuntime();
+
+ // Enable TBAA unless it's suppressed.
+ if (!CodeGenOpts.RelaxedAliasing && CodeGenOpts.OptimizationLevel > 0)
+ TBAA = new CodeGenTBAA(Context, VMContext, getLangOpts(),
+ ABI.getMangleContext());
+
+ // If debug info or coverage generation is enabled, create the CGDebugInfo
+ // object.
+ if (CodeGenOpts.DebugInfo || CodeGenOpts.EmitGcovArcs ||
+ CodeGenOpts.EmitGcovNotes)
+ DebugInfo = new CGDebugInfo(*this);
+
+ Block.GlobalUniqueCount = 0;
+
+ if (C.getLangOpts().ObjCAutoRefCount)
+ ARCData = new ARCEntrypoints();
+ RRData = new RREntrypoints();
+}
+
+CodeGenModule::~CodeGenModule() {
+ delete ObjCRuntime;
+ delete OpenCLRuntime;
+ delete CUDARuntime;
+ delete TheTargetCodeGenInfo;
+ delete &ABI;
+ delete TBAA;
+ delete DebugInfo;
+ delete ARCData;
+ delete RRData;
+}
+
+void CodeGenModule::createObjCRuntime() {
+ if (!LangOpts.NeXTRuntime)
+ ObjCRuntime = CreateGNUObjCRuntime(*this);
+ else
+ ObjCRuntime = CreateMacObjCRuntime(*this);
+}
+
+void CodeGenModule::createOpenCLRuntime() {
+ OpenCLRuntime = new CGOpenCLRuntime(*this);
+}
+
+void CodeGenModule::createCUDARuntime() {
+ CUDARuntime = CreateNVCUDARuntime(*this);
+}
+
+void CodeGenModule::Release() {
+ EmitDeferred();
+ EmitCXXGlobalInitFunc();
+ EmitCXXGlobalDtorFunc();
+ if (ObjCRuntime)
+ if (llvm::Function *ObjCInitFunction = ObjCRuntime->ModuleInitFunction())
+ AddGlobalCtor(ObjCInitFunction);
+ EmitCtorList(GlobalCtors, "llvm.global_ctors");
+ EmitCtorList(GlobalDtors, "llvm.global_dtors");
+ EmitGlobalAnnotations();
+ EmitLLVMUsed();
+
+ SimplifyPersonality();
+
+ if (getCodeGenOpts().EmitDeclMetadata)
+ EmitDeclMetadata();
+
+ if (getCodeGenOpts().EmitGcovArcs || getCodeGenOpts().EmitGcovNotes)
+ EmitCoverageFile();
+
+ if (DebugInfo)
+ DebugInfo->finalize();
+}
+
+void CodeGenModule::UpdateCompletedType(const TagDecl *TD) {
+ // Make sure that this type is translated.
+ Types.UpdateCompletedType(TD);
+}
+
+llvm::MDNode *CodeGenModule::getTBAAInfo(QualType QTy) {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAInfo(QTy);
+}
+
+llvm::MDNode *CodeGenModule::getTBAAInfoForVTablePtr() {
+ if (!TBAA)
+ return 0;
+ return TBAA->getTBAAInfoForVTablePtr();
+}
+
+void CodeGenModule::DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo) {
+ Inst->setMetadata(llvm::LLVMContext::MD_tbaa, TBAAInfo);
+}
+
+bool CodeGenModule::isTargetDarwin() const {
+ return getContext().getTargetInfo().getTriple().isOSDarwin();
+}
+
+void CodeGenModule::Error(SourceLocation loc, StringRef error) {
+ unsigned diagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error, error);
+ getDiags().Report(Context.getFullLoc(loc), diagID);
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified stmt yet.
+void CodeGenModule::ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(S->getLocStart()), DiagID)
+ << Msg << S->getSourceRange();
+}
+
+/// ErrorUnsupported - Print out an error that codegen doesn't support the
+/// specified decl yet.
+void CodeGenModule::ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError) {
+ if (OmitOnError && getDiags().hasErrorOccurred())
+ return;
+ unsigned DiagID = getDiags().getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot compile this %0 yet");
+ std::string Msg = Type;
+ getDiags().Report(Context.getFullLoc(D->getLocation()), DiagID) << Msg;
+}
+
+llvm::ConstantInt *CodeGenModule::getSize(CharUnits size) {
+ return llvm::ConstantInt::get(SizeTy, size.getQuantity());
+}
+
+void CodeGenModule::setGlobalVisibility(llvm::GlobalValue *GV,
+ const NamedDecl *D) const {
+ // Internal definitions always have default visibility.
+ if (GV->hasLocalLinkage()) {
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+ return;
+ }
+
+ // Set visibility for definitions.
+ NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.visibilityExplicit() || !GV->hasAvailableExternallyLinkage())
+ GV->setVisibility(GetLLVMVisibility(LV.visibility()));
+}
+
+/// Set the symbol visibility of type information (vtable and RTTI)
+/// associated with the given type.
+void CodeGenModule::setTypeVisibility(llvm::GlobalValue *GV,
+ const CXXRecordDecl *RD,
+ TypeVisibilityKind TVK) const {
+ setGlobalVisibility(GV, RD);
+
+ if (!CodeGenOpts.HiddenWeakVTables)
+ return;
+
+ // We never want to drop the visibility for RTTI names.
+ if (TVK == TVK_ForRTTIName)
+ return;
+
+ // We want to drop the visibility to hidden for weak type symbols.
+ // This isn't possible if there might be unresolved references
+ // elsewhere that rely on this symbol being visible.
+
+ // This should be kept roughly in sync with setThunkVisibility
+ // in CGVTables.cpp.
+
+ // Preconditions.
+ if (GV->getLinkage() != llvm::GlobalVariable::LinkOnceODRLinkage ||
+ GV->getVisibility() != llvm::GlobalVariable::DefaultVisibility)
+ return;
+
+ // Don't override an explicit visibility attribute.
+ if (RD->getExplicitVisibility())
+ return;
+
+ switch (RD->getTemplateSpecializationKind()) {
+ // We have to disable the optimization if this is an EI definition
+ // because there might be EI declarations in other shared objects.
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ExplicitInstantiationDeclaration:
+ return;
+
+ // Every use of a non-template class's type information has to emit it.
+ case TSK_Undeclared:
+ break;
+
+ // In theory, implicit instantiations can ignore the possibility of
+ // an explicit instantiation declaration because there necessarily
+ // must be an EI definition somewhere with default visibility. In
+ // practice, it's possible to have an explicit instantiation for
+ // an arbitrary template class, and linkers aren't necessarily able
+ // to deal with mixed-visibility symbols.
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ if (!CodeGenOpts.HiddenWeakTemplateVTables)
+ return;
+ break;
+ }
+
+ // If there's a key function, there may be translation units
+ // that don't have the key function's definition. But ignore
+ // this if we're emitting RTTI under -fno-rtti.
+ if (!(TVK != TVK_ForRTTI) || LangOpts.RTTI) {
+ if (Context.getKeyFunction(RD))
+ return;
+ }
+
+ // Otherwise, drop the visibility to hidden.
+ GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ GV->setUnnamedAddr(true);
+}
+
+StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
+ const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
+
+ StringRef &Str = MangledDeclNames[GD.getCanonicalDecl()];
+ if (!Str.empty())
+ return Str;
+
+ if (!getCXXABI().getMangleContext().shouldMangleDeclName(ND)) {
+ IdentifierInfo *II = ND->getIdentifier();
+ assert(II && "Attempt to mangle unnamed decl.");
+
+ Str = II->getName();
+ return Str;
+ }
+
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ if (const CXXConstructorDecl *D = dyn_cast<CXXConstructorDecl>(ND))
+ getCXXABI().getMangleContext().mangleCXXCtor(D, GD.getCtorType(), Out);
+ else if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(ND))
+ getCXXABI().getMangleContext().mangleCXXDtor(D, GD.getDtorType(), Out);
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(ND))
+ getCXXABI().getMangleContext().mangleBlock(BD, Out);
+ else
+ getCXXABI().getMangleContext().mangleName(ND, Out);
+
+ // Allocate space for the mangled name.
+ Out.flush();
+ size_t Length = Buffer.size();
+ char *Name = MangledNamesAllocator.Allocate<char>(Length);
+ std::copy(Buffer.begin(), Buffer.end(), Name);
+
+ Str = StringRef(Name, Length);
+
+ return Str;
+}
+
+void CodeGenModule::getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD) {
+ MangleContext &MangleCtx = getCXXABI().getMangleContext();
+ const Decl *D = GD.getDecl();
+ llvm::raw_svector_ostream Out(Buffer.getBuffer());
+ if (D == 0)
+ MangleCtx.mangleGlobalBlock(BD, Out);
+ else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ MangleCtx.mangleCtorBlock(CD, GD.getCtorType(), BD, Out);
+ else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
+ MangleCtx.mangleDtorBlock(DD, GD.getDtorType(), BD, Out);
+ else
+ MangleCtx.mangleBlock(cast<DeclContext>(D), BD, Out);
+}
+
+llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
+ return getModule().getNamedValue(Name);
+}
+
+/// AddGlobalCtor - Add a function to the list that will be called before
+/// main() runs.
+void CodeGenModule::AddGlobalCtor(llvm::Function * Ctor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalCtors.push_back(std::make_pair(Ctor, Priority));
+}
+
+/// AddGlobalDtor - Add a function to the list that will be called
+/// when the module is unloaded.
+void CodeGenModule::AddGlobalDtor(llvm::Function * Dtor, int Priority) {
+ // FIXME: Type coercion of void()* types.
+ GlobalDtors.push_back(std::make_pair(Dtor, Priority));
+}
+
+void CodeGenModule::EmitCtorList(const CtorList &Fns, const char *GlobalName) {
+ // Ctor function type is void()*.
+ llvm::FunctionType* CtorFTy = llvm::FunctionType::get(VoidTy, false);
+ llvm::Type *CtorPFTy = llvm::PointerType::getUnqual(CtorFTy);
+
+ // Get the type of a ctor entry, { i32, void ()* }.
+ llvm::StructType *CtorStructTy =
+ llvm::StructType::get(Int32Ty, llvm::PointerType::getUnqual(CtorFTy), NULL);
+
+ // Construct the constructor and destructor arrays.
+ SmallVector<llvm::Constant*, 8> Ctors;
+ for (CtorList::const_iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+ llvm::Constant *S[] = {
+ llvm::ConstantInt::get(Int32Ty, I->second, false),
+ llvm::ConstantExpr::getBitCast(I->first, CtorPFTy)
+ };
+ Ctors.push_back(llvm::ConstantStruct::get(CtorStructTy, S));
+ }
+
+ if (!Ctors.empty()) {
+ llvm::ArrayType *AT = llvm::ArrayType::get(CtorStructTy, Ctors.size());
+ new llvm::GlobalVariable(TheModule, AT, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(AT, Ctors),
+ GlobalName);
+ }
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
+ GVALinkage Linkage = getContext().GetGVALinkageForFunction(D);
+
+ if (Linkage == GVA_Internal)
+ return llvm::Function::InternalLinkage;
+
+ if (D->hasAttr<DLLExportAttr>())
+ return llvm::Function::DLLExportLinkage;
+
+ if (D->hasAttr<WeakAttr>())
+ return llvm::Function::WeakAnyLinkage;
+
+ // In C99 mode, 'inline' functions are guaranteed to have a strong
+ // definition somewhere else, so we can use available_externally linkage.
+ if (Linkage == GVA_C99Inline)
+ return llvm::Function::AvailableExternallyLinkage;
+
+ // Note that Apple's kernel linker doesn't support symbol
+ // coalescing, so we need to avoid linkonce and weak linkages there.
+ // Normally, this means we just map to internal, but for explicit
+ // instantiations we'll map to external.
+
+ // In C++, the compiler has to emit a definition in every translation unit
+ // that references the function. We should use linkonce_odr because
+ // a) if all references in this translation unit are optimized away, we
+ // don't need to codegen it. b) if the function persists, it needs to be
+ // merged with other definitions. c) C++ has the ODR, so we know the
+ // definition is dependable.
+ if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation)
+ return !Context.getLangOpts().AppleKext
+ ? llvm::Function::LinkOnceODRLinkage
+ : llvm::Function::InternalLinkage;
+
+ // An explicit instantiation of a template has weak linkage, since
+ // explicit instantiations can occur in multiple translation units
+ // and must all be equivalent. However, we are not allowed to
+ // throw away these explicit instantiations.
+ if (Linkage == GVA_ExplicitTemplateInstantiation)
+ return !Context.getLangOpts().AppleKext
+ ? llvm::Function::WeakODRLinkage
+ : llvm::Function::ExternalLinkage;
+
+ // Otherwise, we have strong external linkage.
+ assert(Linkage == GVA_StrongExternal);
+ return llvm::Function::ExternalLinkage;
+}
+
+
+/// SetFunctionDefinitionAttributes - Set attributes for a global.
+///
+/// FIXME: This is currently only done for aliases and functions, but not for
+/// variables (these details are set in EmitGlobalVarDefinition for variables).
+void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV) {
+ SetCommonAttributes(D, GV);
+}
+
+void CodeGenModule::SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F) {
+ unsigned CallingConv;
+ AttributeListType AttributeList;
+ ConstructAttributeList(Info, D, AttributeList, CallingConv);
+ F->setAttributes(llvm::AttrListPtr::get(AttributeList.begin(),
+ AttributeList.size()));
+ F->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+}
+
+/// Determines whether the language options require us to model
+/// unwind exceptions. We treat -fexceptions as mandating this
+/// except under the fragile ObjC ABI with only ObjC exceptions
+/// enabled. This means, for example, that C with -fexceptions
+/// enables this.
+static bool hasUnwindExceptions(const LangOptions &LangOpts) {
+ // If exceptions are completely disabled, obviously this is false.
+ if (!LangOpts.Exceptions) return false;
+
+ // If C++ exceptions are enabled, this is true.
+ if (LangOpts.CXXExceptions) return true;
+
+ // If ObjC exceptions are enabled, this depends on the ABI.
+ if (LangOpts.ObjCExceptions) {
+ if (!LangOpts.ObjCNonFragileABI) return false;
+ }
+
+ return true;
+}
+
+void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
+ llvm::Function *F) {
+ if (CodeGenOpts.UnwindTables)
+ F->setHasUWTable();
+
+ if (!hasUnwindExceptions(LangOpts))
+ F->addFnAttr(llvm::Attribute::NoUnwind);
+
+ if (D->hasAttr<NakedAttr>()) {
+ // Naked implies noinline: we should not be inlining such functions.
+ F->addFnAttr(llvm::Attribute::Naked);
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+
+ if (D->hasAttr<NoInlineAttr>())
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // (noinline wins over always_inline, and we can't specify both in IR)
+ if (D->hasAttr<AlwaysInlineAttr>() &&
+ !F->hasFnAttr(llvm::Attribute::NoInline))
+ F->addFnAttr(llvm::Attribute::AlwaysInline);
+
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D))
+ F->setUnnamedAddr(true);
+
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
+ F->addFnAttr(llvm::Attribute::StackProtect);
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
+ F->addFnAttr(llvm::Attribute::StackProtectReq);
+
+ if (LangOpts.AddressSanitizer) {
+ // When AddressSanitizer is enabled, set AddressSafety attribute
+ // unless __attribute__((no_address_safety_analysis)) is used.
+ if (!D->hasAttr<NoAddressSafetyAnalysisAttr>())
+ F->addFnAttr(llvm::Attribute::AddressSafety);
+ }
+
+ unsigned alignment = D->getMaxAlignment() / Context.getCharWidth();
+ if (alignment)
+ F->setAlignment(alignment);
+
+ // C++ ABI requires 2-byte alignment for member functions.
+ if (F->getAlignment() < 2 && isa<CXXMethodDecl>(D))
+ F->setAlignment(2);
+}
+
+void CodeGenModule::SetCommonAttributes(const Decl *D,
+ llvm::GlobalValue *GV) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ setGlobalVisibility(GV, ND);
+ else
+ GV->setVisibility(llvm::GlobalValue::DefaultVisibility);
+
+ if (D->hasAttr<UsedAttr>())
+ AddUsedGlobal(GV);
+
+ if (const SectionAttr *SA = D->getAttr<SectionAttr>())
+ GV->setSection(SA->getName());
+
+ getTargetCodeGenInfo().SetTargetAttributes(D, GV, *this);
+}
+
+void CodeGenModule::SetInternalFunctionAttributes(const Decl *D,
+ llvm::Function *F,
+ const CGFunctionInfo &FI) {
+ SetLLVMFunctionAttributes(D, FI, F);
+ SetLLVMFunctionAttributesForDefinition(D, F);
+
+ F->setLinkage(llvm::Function::InternalLinkage);
+
+ SetCommonAttributes(D, F);
+}
+
+void CodeGenModule::SetFunctionAttributes(GlobalDecl GD,
+ llvm::Function *F,
+ bool IsIncompleteFunction) {
+ if (unsigned IID = F->getIntrinsicID()) {
+ // If this is an intrinsic function, set the function's attributes
+ // to the intrinsic's attributes.
+ F->setAttributes(llvm::Intrinsic::getAttributes((llvm::Intrinsic::ID)IID));
+ return;
+ }
+
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+
+ if (!IsIncompleteFunction)
+ SetLLVMFunctionAttributes(FD, getTypes().arrangeGlobalDeclaration(GD), F);
+
+ // Only a few attributes are set on declarations; these may later be
+ // overridden by a definition.
+
+ if (FD->hasAttr<DLLImportAttr>()) {
+ F->setLinkage(llvm::Function::DLLImportLinkage);
+ } else if (FD->hasAttr<WeakAttr>() ||
+ FD->isWeakImported()) {
+ // "extern_weak" is overloaded in LLVM; we probably should have
+ // separate linkage types for this.
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ } else {
+ F->setLinkage(llvm::Function::ExternalLinkage);
+
+ NamedDecl::LinkageInfo LV = FD->getLinkageAndVisibility();
+ if (LV.linkage() == ExternalLinkage && LV.visibilityExplicit()) {
+ F->setVisibility(GetLLVMVisibility(LV.visibility()));
+ }
+ }
+
+ if (const SectionAttr *SA = FD->getAttr<SectionAttr>())
+ F->setSection(SA->getName());
+}
+
+void CodeGenModule::AddUsedGlobal(llvm::GlobalValue *GV) {
+ assert(!GV->isDeclaration() &&
+ "Only globals with definition can force usage.");
+ LLVMUsed.push_back(GV);
+}
+
+void CodeGenModule::EmitLLVMUsed() {
+ // Don't create llvm.used if there is no need.
+ if (LLVMUsed.empty())
+ return;
+
+ // Convert LLVMUsed to what ConstantArray needs.
+ SmallVector<llvm::Constant*, 8> UsedArray;
+ UsedArray.resize(LLVMUsed.size());
+ for (unsigned i = 0, e = LLVMUsed.size(); i != e; ++i) {
+ UsedArray[i] =
+ llvm::ConstantExpr::getBitCast(cast<llvm::Constant>(&*LLVMUsed[i]),
+ Int8PtrTy);
+ }
+
+ if (UsedArray.empty())
+ return;
+ llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), ATy, false,
+ llvm::GlobalValue::AppendingLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray),
+ "llvm.used");
+
+ GV->setSection("llvm.metadata");
+}
+
+void CodeGenModule::EmitDeferred() {
+ // Emit code for any potentially referenced deferred decls. Since a
+ // previously unused static decl may become used during the generation of code
+ // for a static function, iterate until no changes are made.
+
+ while (!DeferredDeclsToEmit.empty() || !DeferredVTables.empty()) {
+ if (!DeferredVTables.empty()) {
+ const CXXRecordDecl *RD = DeferredVTables.back();
+ DeferredVTables.pop_back();
+ getVTables().GenerateClassData(getVTableLinkage(RD), RD);
+ continue;
+ }
+
+ GlobalDecl D = DeferredDeclsToEmit.back();
+ DeferredDeclsToEmit.pop_back();
+
+ // Check to see if we've already emitted this. This is necessary
+ // for a couple of reasons: first, decls can end up in the
+ // deferred-decls queue multiple times, and second, decls can end
+ // up with definitions in unusual ways (e.g. by an extern inline
+ // function acquiring a strong function redefinition). Just
+ // ignore these cases.
+ //
+ // TODO: That said, looking this up multiple times is very wasteful.
+ StringRef Name = getMangledName(D);
+ llvm::GlobalValue *CGRef = GetGlobalValue(Name);
+ assert(CGRef && "Deferred decl wasn't referenced?");
+
+ if (!CGRef->isDeclaration())
+ continue;
+
+ // GlobalAlias::isDeclaration() defers to the aliasee, but for our
+ // purposes an alias counts as a definition.
+ if (isa<llvm::GlobalAlias>(CGRef))
+ continue;
+
+ // Otherwise, emit the definition and move on to the next one.
+ EmitGlobalDefinition(D);
+ }
+}
+
+void CodeGenModule::EmitGlobalAnnotations() {
+ if (Annotations.empty())
+ return;
+
+ // Create a new global variable for the ConstantStruct in the Module.
+ llvm::Constant *Array = llvm::ConstantArray::get(llvm::ArrayType::get(
+ Annotations[0]->getType(), Annotations.size()), Annotations);
+ llvm::GlobalValue *gv = new llvm::GlobalVariable(getModule(),
+ Array->getType(), false, llvm::GlobalValue::AppendingLinkage, Array,
+ "llvm.global.annotations");
+ gv->setSection(AnnotationSection);
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationString(llvm::StringRef Str) {
+ llvm::StringMap<llvm::Constant*>::iterator i = AnnotationStrings.find(Str);
+ if (i != AnnotationStrings.end())
+ return i->second;
+
+ // Not found yet, create a new global.
+ llvm::Constant *s = llvm::ConstantDataArray::getString(getLLVMContext(), Str);
+ llvm::GlobalValue *gv = new llvm::GlobalVariable(getModule(), s->getType(),
+ true, llvm::GlobalValue::PrivateLinkage, s, ".str");
+ gv->setSection(AnnotationSection);
+ gv->setUnnamedAddr(true);
+ AnnotationStrings[Str] = gv;
+ return gv;
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationUnit(SourceLocation Loc) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isValid())
+ return EmitAnnotationString(PLoc.getFilename());
+ return EmitAnnotationString(SM.getBufferName(Loc));
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotationLineNo(SourceLocation L) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(L);
+ unsigned LineNo = PLoc.isValid() ? PLoc.getLine() :
+ SM.getExpansionLineNumber(L);
+ return llvm::ConstantInt::get(Int32Ty, LineNo);
+}
+
+llvm::Constant *CodeGenModule::EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ SourceLocation L) {
+ // Get the globals for file name, annotation, and the line number.
+ llvm::Constant *AnnoGV = EmitAnnotationString(AA->getAnnotation()),
+ *UnitGV = EmitAnnotationUnit(L),
+ *LineNoCst = EmitAnnotationLineNo(L);
+
+ // Create the ConstantStruct for the global annotation.
+ llvm::Constant *Fields[4] = {
+ llvm::ConstantExpr::getBitCast(GV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(AnnoGV, Int8PtrTy),
+ llvm::ConstantExpr::getBitCast(UnitGV, Int8PtrTy),
+ LineNoCst
+ };
+ return llvm::ConstantStruct::getAnon(Fields);
+}
+
+void CodeGenModule::AddGlobalAnnotations(const ValueDecl *D,
+ llvm::GlobalValue *GV) {
+ assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
+ // Get the struct elements for these annotations.
+ for (specific_attr_iterator<AnnotateAttr>
+ ai = D->specific_attr_begin<AnnotateAttr>(),
+ ae = D->specific_attr_end<AnnotateAttr>(); ai != ae; ++ai)
+ Annotations.push_back(EmitAnnotateAttr(GV, *ai, D->getLocation()));
+}
+
+bool CodeGenModule::MayDeferGeneration(const ValueDecl *Global) {
+ // Never defer when EmitAllDecls is specified.
+ if (LangOpts.EmitAllDecls)
+ return false;
+
+ return !getContext().DeclMustBeEmitted(Global);
+}
+
+llvm::Constant *CodeGenModule::GetWeakRefReference(const ValueDecl *VD) {
+ const AliasAttr *AA = VD->getAttr<AliasAttr>();
+ assert(AA && "No alias?");
+
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(VD->getType());
+
+ // See if there is already something with the target's name in the module.
+ llvm::GlobalValue *Entry = GetGlobalValue(AA->getAliasee());
+
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ /*ForVTable=*/false);
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy), 0);
+ if (!Entry) {
+ llvm::GlobalValue* F = cast<llvm::GlobalValue>(Aliasee);
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ WeakRefReferences.insert(F);
+ }
+
+ return Aliasee;
+}
+
+void CodeGenModule::EmitGlobal(GlobalDecl GD) {
+ const ValueDecl *Global = cast<ValueDecl>(GD.getDecl());
+
+ // Weak references don't produce any output by themselves.
+ if (Global->hasAttr<WeakRefAttr>())
+ return;
+
+ // If this is an alias definition (which otherwise looks like a declaration)
+ // emit it now.
+ if (Global->hasAttr<AliasAttr>())
+ return EmitAliasDefinition(GD);
+
+ // If this is CUDA, be selective about which declarations we emit.
+ if (LangOpts.CUDA) {
+ if (CodeGenOpts.CUDAIsDevice) {
+ if (!Global->hasAttr<CUDADeviceAttr>() &&
+ !Global->hasAttr<CUDAGlobalAttr>() &&
+ !Global->hasAttr<CUDAConstantAttr>() &&
+ !Global->hasAttr<CUDASharedAttr>())
+ return;
+ } else {
+ if (!Global->hasAttr<CUDAHostAttr>() && (
+ Global->hasAttr<CUDADeviceAttr>() ||
+ Global->hasAttr<CUDAConstantAttr>() ||
+ Global->hasAttr<CUDASharedAttr>()))
+ return;
+ }
+ }
+
+ // Ignore declarations, they will be emitted on their first use.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(Global)) {
+ // Forward declarations are emitted lazily on first use.
+ if (!FD->doesThisDeclarationHaveABody()) {
+ if (!FD->doesDeclarationForceExternallyVisibleDefinition())
+ return;
+
+ const FunctionDecl *InlineDefinition = 0;
+ FD->getBody(InlineDefinition);
+
+ StringRef MangledName = getMangledName(GD);
+ DeferredDecls.erase(MangledName);
+ EmitGlobalDefinition(InlineDefinition);
+ return;
+ }
+ } else {
+ const VarDecl *VD = cast<VarDecl>(Global);
+ assert(VD->isFileVarDecl() && "Cannot emit local var decl as global.");
+
+ if (VD->isThisDeclarationADefinition() != VarDecl::Definition)
+ return;
+ }
+
+ // Defer code generation when possible if this is a static definition, inline
+ // function etc. These we only want to emit if they are used.
+ if (!MayDeferGeneration(Global)) {
+ // Emit the definition if it can't be deferred.
+ EmitGlobalDefinition(GD);
+ return;
+ }
+
+ // If we're deferring emission of a C++ variable with an
+ // initializer, remember the order in which it appeared in the file.
+ if (getLangOpts().CPlusPlus && isa<VarDecl>(Global) &&
+ cast<VarDecl>(Global)->hasInit()) {
+ DelayedCXXInitPosition[Global] = CXXGlobalInits.size();
+ CXXGlobalInits.push_back(0);
+ }
+
+ // If the value has already been used, add it directly to the
+ // DeferredDeclsToEmit list.
+ StringRef MangledName = getMangledName(GD);
+ if (GetGlobalValue(MangledName))
+ DeferredDeclsToEmit.push_back(GD);
+ else {
+ // Otherwise, remember that we saw a deferred decl with this name. The
+ // first use of the mangled name will cause it to move into
+ // DeferredDeclsToEmit.
+ DeferredDecls[MangledName] = GD;
+ }
+}
+
+namespace {
+ struct FunctionIsDirectlyRecursive :
+ public RecursiveASTVisitor<FunctionIsDirectlyRecursive> {
+ const StringRef Name;
+ const Builtin::Context &BI;
+ bool Result;
+ FunctionIsDirectlyRecursive(StringRef N, const Builtin::Context &C) :
+ Name(N), BI(C), Result(false) {
+ }
+ typedef RecursiveASTVisitor<FunctionIsDirectlyRecursive> Base;
+
+ bool TraverseCallExpr(CallExpr *E) {
+ const FunctionDecl *FD = E->getDirectCallee();
+ if (!FD)
+ return true;
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (Attr && Name == Attr->getLabel()) {
+ Result = true;
+ return false;
+ }
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (!BuiltinID)
+ return true;
+ StringRef BuiltinName = BI.GetName(BuiltinID);
+ if (BuiltinName.startswith("__builtin_") &&
+ Name == BuiltinName.slice(strlen("__builtin_"), StringRef::npos)) {
+ Result = true;
+ return false;
+ }
+ return true;
+ }
+ };
+}
+
+// isTriviallyRecursive - Check if this function calls another
+// decl that, because of the asm attribute or the other decl being a builtin,
+// ends up pointing to itself.
+bool
+CodeGenModule::isTriviallyRecursive(const FunctionDecl *FD) {
+ StringRef Name;
+ if (getCXXABI().getMangleContext().shouldMangleDeclName(FD)) {
+ // asm labels are a special kind of mangling we have to support.
+ AsmLabelAttr *Attr = FD->getAttr<AsmLabelAttr>();
+ if (!Attr)
+ return false;
+ Name = Attr->getLabel();
+ } else {
+ Name = FD->getName();
+ }
+
+ FunctionIsDirectlyRecursive Walker(Name, Context.BuiltinInfo);
+ Walker.TraverseFunctionDecl(const_cast<FunctionDecl*>(FD));
+ return Walker.Result;
+}
+
+bool
+CodeGenModule::shouldEmitFunction(const FunctionDecl *F) {
+ if (getFunctionLinkage(F) != llvm::Function::AvailableExternallyLinkage)
+ return true;
+ if (CodeGenOpts.OptimizationLevel == 0 &&
+ !F->hasAttr<AlwaysInlineAttr>())
+ return false;
+ // PR9614. Avoid cases where the source code is lying to us. An available
+ // externally function should have an equivalent function somewhere else,
+ // but a function that calls itself is clearly not equivalent to the real
+ // implementation.
+ // This happens in glibc's btowc and in some configure checks.
+ return !isTriviallyRecursive(F);
+}
+
+void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
+ const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+
+ PrettyStackTraceDecl CrashInfo(const_cast<ValueDecl *>(D), D->getLocation(),
+ Context.getSourceManager(),
+ "Generating code for declaration");
+
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ // At -O0, don't generate IR for functions with available_externally
+ // linkage.
+ if (!shouldEmitFunction(Function))
+ return;
+
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ // Make sure to emit the definition(s) before we emit the thunks.
+ // This is necessary for the generation of certain thunks.
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Method))
+ EmitCXXConstructor(CD, GD.getCtorType());
+ else if (const CXXDestructorDecl *DD =dyn_cast<CXXDestructorDecl>(Method))
+ EmitCXXDestructor(DD, GD.getDtorType());
+ else
+ EmitGlobalFunctionDefinition(GD);
+
+ if (Method->isVirtual())
+ getVTables().EmitThunks(GD);
+
+ return;
+ }
+
+ return EmitGlobalFunctionDefinition(GD);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ return EmitGlobalVarDefinition(VD);
+
+ llvm_unreachable("Invalid argument to EmitGlobalDefinition()");
+}
+
+/// GetOrCreateLLVMFunction - If the specified mangled name is not in the
+/// module, create and return an llvm Function with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the function when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMFunction(StringRef MangledName,
+ llvm::Type *Ty,
+ GlobalDecl D, bool ForVTable,
+ llvm::Attributes ExtraAttrs) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.count(Entry)) {
+ const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl());
+ if (FD && !FD->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+
+ WeakRefReferences.erase(Entry);
+ }
+
+ if (Entry->getType()->getElementType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ return llvm::ConstantExpr::getBitCast(Entry, Ty->getPointerTo());
+ }
+
+ // This function doesn't have a complete type (for example, the return
+ // type is an incomplete struct). Use a fake type instead, and make
+ // sure not to try to set attributes.
+ bool IsIncompleteFunction = false;
+
+ llvm::FunctionType *FTy;
+ if (isa<llvm::FunctionType>(Ty)) {
+ FTy = cast<llvm::FunctionType>(Ty);
+ } else {
+ FTy = llvm::FunctionType::get(VoidTy, false);
+ IsIncompleteFunction = true;
+ }
+
+ llvm::Function *F = llvm::Function::Create(FTy,
+ llvm::Function::ExternalLinkage,
+ MangledName, &getModule());
+ assert(F->getName() == MangledName && "name was uniqued!");
+ if (D.getDecl())
+ SetFunctionAttributes(D, F, IsIncompleteFunction);
+ if (ExtraAttrs != llvm::Attribute::None)
+ F->addFnAttr(ExtraAttrs);
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+
+ // Otherwise, there are cases we have to worry about where we're
+ // using a declaration for which we must emit a definition but where
+ // we might not find a top-level definition:
+ // - member functions defined inline in their classes
+ // - friend functions defined inline in some class
+ // - special member functions with implicit definitions
+ // If we ever change our AST traversal to walk into class methods,
+ // this will be unnecessary.
+ //
+ // We also don't emit a definition for a function if it's going to be an entry
+ // in a vtable, unless it's already marked as used.
+ } else if (getLangOpts().CPlusPlus && D.getDecl()) {
+ // Look for a declaration that's lexically in a record.
+ const FunctionDecl *FD = cast<FunctionDecl>(D.getDecl());
+ do {
+ if (isa<CXXRecordDecl>(FD->getLexicalDeclContext())) {
+ if (FD->isImplicit() && !ForVTable) {
+ assert(FD->isUsed() && "Sema didn't mark implicit function as used!");
+ DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
+ break;
+ } else if (FD->doesThisDeclarationHaveABody()) {
+ DeferredDeclsToEmit.push_back(D.getWithDecl(FD));
+ break;
+ }
+ }
+ FD = FD->getPreviousDecl();
+ } while (FD);
+ }
+
+ // Make sure the result is of the requested type.
+ if (!IsIncompleteFunction) {
+ assert(F->getType()->getElementType() == Ty);
+ return F;
+ }
+
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ return llvm::ConstantExpr::getBitCast(F, PTy);
+}
+
+/// GetAddrOfFunction - Return the address of the given function. If Ty is
+/// non-null, then this function will use the specified type if it has to
+/// create it (this occurs when we see a definition of the function).
+llvm::Constant *CodeGenModule::GetAddrOfFunction(GlobalDecl GD,
+ llvm::Type *Ty,
+ bool ForVTable) {
+ // If there was no specific requested type, just convert it now.
+ if (!Ty)
+ Ty = getTypes().ConvertType(cast<ValueDecl>(GD.getDecl())->getType());
+
+ StringRef MangledName = getMangledName(GD);
+ return GetOrCreateLLVMFunction(MangledName, Ty, GD, ForVTable);
+}
+
+/// CreateRuntimeFunction - Create a new runtime function with the specified
+/// type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeFunction(llvm::FunctionType *FTy,
+ StringRef Name,
+ llvm::Attributes ExtraAttrs) {
+ return GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(), /*ForVTable=*/false,
+ ExtraAttrs);
+}
+
+/// isTypeConstant - Determine whether an object of this type can be emitted
+/// as a constant.
+///
+/// If ExcludeCtor is true, the duration when the object's constructor runs
+/// will not be considered. The caller will need to verify that the object is
+/// not written to during its construction.
+bool CodeGenModule::isTypeConstant(QualType Ty, bool ExcludeCtor) {
+ if (!Ty.isConstant(Context) && !Ty->isReferenceType())
+ return false;
+
+ if (Context.getLangOpts().CPlusPlus) {
+ if (const CXXRecordDecl *Record
+ = Context.getBaseElementType(Ty)->getAsCXXRecordDecl())
+ return ExcludeCtor && !Record->hasMutableFields() &&
+ Record->hasTrivialDestructor();
+ }
+
+ return true;
+}
+
+/// GetOrCreateLLVMGlobal - If the specified mangled name is not in the module,
+/// create and return an llvm GlobalVariable with the specified type. If there
+/// is something in the module with the specified name, return it potentially
+/// bitcasted to the right type.
+///
+/// If D is non-null, it specifies a decl that correspond to this. This is used
+/// to set the attributes on the global when it is first created.
+llvm::Constant *
+CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName,
+ llvm::PointerType *Ty,
+ const VarDecl *D,
+ bool UnnamedAddr) {
+ // Lookup the entry, lazily creating it if necessary.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry) {
+ if (WeakRefReferences.count(Entry)) {
+ if (D && !D->hasAttr<WeakAttr>())
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
+
+ WeakRefReferences.erase(Entry);
+ }
+
+ if (UnnamedAddr)
+ Entry->setUnnamedAddr(true);
+
+ if (Entry->getType() == Ty)
+ return Entry;
+
+ // Make sure the result is of the correct type.
+ return llvm::ConstantExpr::getBitCast(Entry, Ty);
+ }
+
+ // This is the first use or definition of a mangled name. If there is a
+ // deferred decl with this name, remember that we need to emit it at the end
+ // of the file.
+ llvm::StringMap<GlobalDecl>::iterator DDI = DeferredDecls.find(MangledName);
+ if (DDI != DeferredDecls.end()) {
+ // Move the potentially referenced deferred decl to the DeferredDeclsToEmit
+ // list, and remove it from DeferredDecls (since we don't need it anymore).
+ DeferredDeclsToEmit.push_back(DDI->second);
+ DeferredDecls.erase(DDI);
+ }
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), Ty->getElementType(), false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0, MangledName, 0,
+ false, Ty->getAddressSpace());
+
+ // Handle things which are present even on external declarations.
+ if (D) {
+ // FIXME: This code is overly simple and should be merged with other global
+ // handling.
+ GV->setConstant(isTypeConstant(D->getType(), false));
+
+ // Set linkage and visibility in case we never see a definition.
+ NamedDecl::LinkageInfo LV = D->getLinkageAndVisibility();
+ if (LV.linkage() != ExternalLinkage) {
+ // Don't set internal linkage on declarations.
+ } else {
+ if (D->hasAttr<DLLImportAttr>())
+ GV->setLinkage(llvm::GlobalValue::DLLImportLinkage);
+ else if (D->hasAttr<WeakAttr>() || D->isWeakImported())
+ GV->setLinkage(llvm::GlobalValue::ExternalWeakLinkage);
+
+ // Set visibility on a declaration only if it's explicit.
+ if (LV.visibilityExplicit())
+ GV->setVisibility(GetLLVMVisibility(LV.visibility()));
+ }
+
+ GV->setThreadLocal(D->isThreadSpecified());
+ }
+
+ return GV;
+}
+
+
+llvm::GlobalVariable *
+CodeGenModule::CreateOrReplaceCXXRuntimeVariable(StringRef Name,
+ llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+ llvm::GlobalVariable *GV = getModule().getNamedGlobal(Name);
+ llvm::GlobalVariable *OldGV = 0;
+
+
+ if (GV) {
+ // Check if the variable has the right type.
+ if (GV->getType()->getElementType() == Ty)
+ return GV;
+
+ // Because C++ name mangling, the only way we can end up with an already
+ // existing global with the same name is if it has been declared extern "C".
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
+ OldGV = GV;
+ }
+
+ // Create a new variable.
+ GV = new llvm::GlobalVariable(getModule(), Ty, /*isConstant=*/true,
+ Linkage, 0, Name);
+
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV->takeName(OldGV);
+
+ if (!OldGV->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ OldGV->eraseFromParent();
+ }
+
+ return GV;
+}
+
+/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+/// given global variable. If Ty is non-null and if the global doesn't exist,
+/// then it will be greated with the specified type instead of whatever the
+/// normal requested type would be.
+llvm::Constant *CodeGenModule::GetAddrOfGlobalVar(const VarDecl *D,
+ llvm::Type *Ty) {
+ assert(D->hasGlobalStorage() && "Not a global variable");
+ QualType ASTTy = D->getType();
+ if (Ty == 0)
+ Ty = getTypes().ConvertTypeForMem(ASTTy);
+
+ llvm::PointerType *PTy =
+ llvm::PointerType::get(Ty, getContext().getTargetAddressSpace(ASTTy));
+
+ StringRef MangledName = getMangledName(D);
+ return GetOrCreateLLVMGlobal(MangledName, PTy, D);
+}
+
+/// CreateRuntimeVariable - Create a new runtime global variable with the
+/// specified type and name.
+llvm::Constant *
+CodeGenModule::CreateRuntimeVariable(llvm::Type *Ty,
+ StringRef Name) {
+ return GetOrCreateLLVMGlobal(Name, llvm::PointerType::getUnqual(Ty), 0,
+ true);
+}
+
+void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
+ assert(!D->getInit() && "Cannot emit definite definitions here!");
+
+ if (MayDeferGeneration(D)) {
+ // If we have not seen a reference to this variable yet, place it
+ // into the deferred declarations table to be emitted if needed
+ // later.
+ StringRef MangledName = getMangledName(D);
+ if (!GetGlobalValue(MangledName)) {
+ DeferredDecls[MangledName] = D;
+ return;
+ }
+ }
+
+ // The tentative definition is the only definition.
+ EmitGlobalVarDefinition(D);
+}
+
+void CodeGenModule::EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired) {
+ if (DefinitionRequired)
+ getVTables().GenerateClassData(getVTableLinkage(Class), Class);
+}
+
+llvm::GlobalVariable::LinkageTypes
+CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
+ if (RD->getLinkage() != ExternalLinkage)
+ return llvm::GlobalVariable::InternalLinkage;
+
+ if (const CXXMethodDecl *KeyFunction
+ = RD->getASTContext().getKeyFunction(RD)) {
+ // If this class has a key function, use that to determine the linkage of
+ // the vtable.
+ const FunctionDecl *Def = 0;
+ if (KeyFunction->hasBody(Def))
+ KeyFunction = cast<CXXMethodDecl>(Def);
+
+ switch (KeyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ // When compiling with optimizations turned on, we emit all vtables,
+ // even if the key function is not defined in the current translation
+ // unit. If this is the case, use available_externally linkage.
+ if (!Def && CodeGenOpts.OptimizationLevel)
+ return llvm::GlobalVariable::AvailableExternallyLinkage;
+
+ if (KeyFunction->isInlined())
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ return llvm::GlobalVariable::ExternalLinkage;
+
+ case TSK_ImplicitInstantiation:
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::WeakODRLinkage :
+ llvm::Function::InternalLinkage;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ return !Context.getLangOpts().AppleKext ?
+ llvm::GlobalVariable::LinkOnceODRLinkage :
+ llvm::Function::InternalLinkage;
+ }
+ }
+
+ if (Context.getLangOpts().AppleKext)
+ return llvm::Function::InternalLinkage;
+
+ switch (RD->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ImplicitInstantiation:
+ // FIXME: Use available_externally linkage. However, this currently
+ // breaks LLVM's build due to undefined symbols.
+ // return llvm::GlobalVariable::AvailableExternallyLinkage;
+ case TSK_ExplicitInstantiationDeclaration:
+ return llvm::GlobalVariable::LinkOnceODRLinkage;
+
+ case TSK_ExplicitInstantiationDefinition:
+ return llvm::GlobalVariable::WeakODRLinkage;
+ }
+
+ llvm_unreachable("Invalid TemplateSpecializationKind!");
+}
+
+CharUnits CodeGenModule::GetTargetTypeStoreSize(llvm::Type *Ty) const {
+ return Context.toCharUnitsFromBits(
+ TheTargetData.getTypeStoreSizeInBits(Ty));
+}
+
+llvm::Constant *
+CodeGenModule::MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
+ const Expr *rawInit) {
+ ArrayRef<ExprWithCleanups::CleanupObject> cleanups;
+ if (const ExprWithCleanups *withCleanups =
+ dyn_cast<ExprWithCleanups>(rawInit)) {
+ cleanups = withCleanups->getObjects();
+ rawInit = withCleanups->getSubExpr();
+ }
+
+ const InitListExpr *init = dyn_cast<InitListExpr>(rawInit);
+ if (!init || !init->initializesStdInitializerList() ||
+ init->getNumInits() == 0)
+ return 0;
+
+ ASTContext &ctx = getContext();
+ unsigned numInits = init->getNumInits();
+ // FIXME: This check is here because we would otherwise silently miscompile
+ // nested global std::initializer_lists. Better would be to have a real
+ // implementation.
+ for (unsigned i = 0; i < numInits; ++i) {
+ const InitListExpr *inner = dyn_cast<InitListExpr>(init->getInit(i));
+ if (inner && inner->initializesStdInitializerList()) {
+ ErrorUnsupported(inner, "nested global std::initializer_list");
+ return 0;
+ }
+ }
+
+ // Synthesize a fake VarDecl for the array and initialize that.
+ QualType elementType = init->getInit(0)->getType();
+ llvm::APInt numElements(ctx.getTypeSize(ctx.getSizeType()), numInits);
+ QualType arrayType = ctx.getConstantArrayType(elementType, numElements,
+ ArrayType::Normal, 0);
+
+ IdentifierInfo *name = &ctx.Idents.get(D->getNameAsString() + "__initlist");
+ TypeSourceInfo *sourceInfo = ctx.getTrivialTypeSourceInfo(
+ arrayType, D->getLocation());
+ VarDecl *backingArray = VarDecl::Create(ctx, const_cast<DeclContext*>(
+ D->getDeclContext()),
+ D->getLocStart(), D->getLocation(),
+ name, arrayType, sourceInfo,
+ SC_Static, SC_Static);
+
+ // Now clone the InitListExpr to initialize the array instead.
+ // Incredible hack: we want to use the existing InitListExpr here, so we need
+ // to tell it that it no longer initializes a std::initializer_list.
+ Expr *arrayInit = new (ctx) InitListExpr(ctx, init->getLBraceLoc(),
+ const_cast<InitListExpr*>(init)->getInits(),
+ init->getNumInits(),
+ init->getRBraceLoc());
+ arrayInit->setType(arrayType);
+
+ if (!cleanups.empty())
+ arrayInit = ExprWithCleanups::Create(ctx, arrayInit, cleanups);
+
+ backingArray->setInit(arrayInit);
+
+ // Emit the definition of the array.
+ EmitGlobalVarDefinition(backingArray);
+
+ // Inspect the initializer list to validate it and determine its type.
+ // FIXME: doing this every time is probably inefficient; caching would be nice
+ RecordDecl *record = init->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator field = record->field_begin();
+ if (field == record->field_end()) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ QualType elementPtr = ctx.getPointerType(elementType.withConst());
+ // Start pointer.
+ if (!ctx.hasSameType(field->getType(), elementPtr)) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ ++field;
+ if (field == record->field_end()) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+ bool isStartEnd = false;
+ if (ctx.hasSameType(field->getType(), elementPtr)) {
+ // End pointer.
+ isStartEnd = true;
+ } else if(!ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ ErrorUnsupported(D, "weird std::initializer_list");
+ return 0;
+ }
+
+ // Now build an APValue representing the std::initializer_list.
+ APValue initListValue(APValue::UninitStruct(), 0, 2);
+ APValue &startField = initListValue.getStructField(0);
+ APValue::LValuePathEntry startOffsetPathEntry;
+ startOffsetPathEntry.ArrayIndex = 0;
+ startField = APValue(APValue::LValueBase(backingArray),
+ CharUnits::fromQuantity(0),
+ llvm::makeArrayRef(startOffsetPathEntry),
+ /*IsOnePastTheEnd=*/false, 0);
+
+ if (isStartEnd) {
+ APValue &endField = initListValue.getStructField(1);
+ APValue::LValuePathEntry endOffsetPathEntry;
+ endOffsetPathEntry.ArrayIndex = numInits;
+ endField = APValue(APValue::LValueBase(backingArray),
+ ctx.getTypeSizeInChars(elementType) * numInits,
+ llvm::makeArrayRef(endOffsetPathEntry),
+ /*IsOnePastTheEnd=*/true, 0);
+ } else {
+ APValue &sizeField = initListValue.getStructField(1);
+ sizeField = APValue(llvm::APSInt(numElements));
+ }
+
+ // Emit the constant for the initializer_list.
+ llvm::Constant *llvmInit =
+ EmitConstantValueForMemory(initListValue, D->getType());
+ assert(llvmInit && "failed to initialize as constant");
+ return llvmInit;
+}
+
+void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
+ llvm::Constant *Init = 0;
+ QualType ASTTy = D->getType();
+ CXXRecordDecl *RD = ASTTy->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ bool NeedsGlobalCtor = false;
+ bool NeedsGlobalDtor = RD && !RD->hasTrivialDestructor();
+
+ const VarDecl *InitDecl;
+ const Expr *InitExpr = D->getAnyInitializer(InitDecl);
+
+ if (!InitExpr) {
+ // This is a tentative definition; tentative definitions are
+ // implicitly initialized with { 0 }.
+ //
+ // Note that tentative definitions are only emitted at the end of
+ // a translation unit, so they should never have incomplete
+ // type. In addition, EmitTentativeDefinition makes sure that we
+ // never attempt to emit a tentative definition if a real one
+ // exists. A use may still exists, however, so we still may need
+ // to do a RAUW.
+ assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
+ Init = EmitNullConstant(D->getType());
+ } else {
+ // If this is a std::initializer_list, emit the special initializer.
+ Init = MaybeEmitGlobalStdInitializerListInitializer(D, InitExpr);
+ // An empty init list will perform zero-initialization, which happens
+ // to be exactly what we want.
+ // FIXME: It does so in a global constructor, which is *not* what we
+ // want.
+
+ if (!Init)
+ Init = EmitConstantInit(*InitDecl);
+ if (!Init) {
+ QualType T = InitExpr->getType();
+ if (D->getType()->isReferenceType())
+ T = D->getType();
+
+ if (getLangOpts().CPlusPlus) {
+ Init = EmitNullConstant(T);
+ NeedsGlobalCtor = true;
+ } else {
+ ErrorUnsupported(D, "static initializer");
+ Init = llvm::UndefValue::get(getTypes().ConvertType(T));
+ }
+ } else {
+ // We don't need an initializer, so remove the entry for the delayed
+ // initializer position (just in case this entry was delayed) if we
+ // also don't need to register a destructor.
+ if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
+ DelayedCXXInitPosition.erase(D);
+ }
+ }
+
+ llvm::Type* InitType = Init->getType();
+ llvm::Constant *Entry = GetAddrOfGlobalVar(D, InitType);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast ||
+ // all zero index gep.
+ CE->getOpcode() == llvm::Instruction::GetElementPtr);
+ Entry = CE->getOperand(0);
+ }
+
+ // Entry is now either a Function or GlobalVariable.
+ llvm::GlobalVariable *GV = dyn_cast<llvm::GlobalVariable>(Entry);
+
+ // We have a definition after a declaration with the wrong type.
+ // We must make a new GlobalVariable* and update everything that used OldGV
+ // (a declaration or tentative definition) with the new GlobalVariable*
+ // (which will be a definition).
+ //
+ // This happens if there is a prototype for a global (e.g.
+ // "extern int x[];") and then a definition of a different type (e.g.
+ // "int x[10];"). This also happens when an initializer has a different type
+ // from the type of the global (this happens with unions).
+ if (GV == 0 ||
+ GV->getType()->getElementType() != InitType ||
+ GV->getType()->getAddressSpace() !=
+ getContext().getTargetAddressSpace(ASTTy)) {
+
+ // Move the old entry aside so that we'll create a new one.
+ Entry->setName(StringRef());
+
+ // Make a new global with the correct type, this is now guaranteed to work.
+ GV = cast<llvm::GlobalVariable>(GetAddrOfGlobalVar(D, InitType));
+
+ // Replace all uses of the old global with the new global
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+
+ // Erase the old global, since it is no longer used.
+ cast<llvm::GlobalValue>(Entry)->eraseFromParent();
+ }
+
+ if (D->hasAttr<AnnotateAttr>())
+ AddGlobalAnnotations(D, GV);
+
+ GV->setInitializer(Init);
+
+ // If it is safe to mark the global 'constant', do so now.
+ GV->setConstant(!NeedsGlobalCtor && !NeedsGlobalDtor &&
+ isTypeConstant(D->getType(), true));
+
+ GV->setAlignment(getContext().getDeclAlign(D).getQuantity());
+
+ // Set the llvm linkage type as appropriate.
+ llvm::GlobalValue::LinkageTypes Linkage =
+ GetLLVMLinkageVarDefinition(D, GV);
+ GV->setLinkage(Linkage);
+ if (Linkage == llvm::GlobalVariable::CommonLinkage)
+ // common vars aren't constant even if declared const.
+ GV->setConstant(false);
+
+ SetCommonAttributes(D, GV);
+
+ // Emit the initializer function if necessary.
+ if (NeedsGlobalCtor || NeedsGlobalDtor)
+ EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
+
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitGlobalVariable(GV, D);
+}
+
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::GetLLVMLinkageVarDefinition(const VarDecl *D,
+ llvm::GlobalVariable *GV) {
+ GVALinkage Linkage = getContext().GetGVALinkageForVariable(D);
+ if (Linkage == GVA_Internal)
+ return llvm::Function::InternalLinkage;
+ else if (D->hasAttr<DLLImportAttr>())
+ return llvm::Function::DLLImportLinkage;
+ else if (D->hasAttr<DLLExportAttr>())
+ return llvm::Function::DLLExportLinkage;
+ else if (D->hasAttr<WeakAttr>()) {
+ if (GV->isConstant())
+ return llvm::GlobalVariable::WeakODRLinkage;
+ else
+ return llvm::GlobalVariable::WeakAnyLinkage;
+ } else if (Linkage == GVA_TemplateInstantiation ||
+ Linkage == GVA_ExplicitTemplateInstantiation)
+ return llvm::GlobalVariable::WeakODRLinkage;
+ else if (!getLangOpts().CPlusPlus &&
+ ((!CodeGenOpts.NoCommon && !D->getAttr<NoCommonAttr>()) ||
+ D->getAttr<CommonAttr>()) &&
+ !D->hasExternalStorage() && !D->getInit() &&
+ !D->getAttr<SectionAttr>() && !D->isThreadSpecified() &&
+ !D->getAttr<WeakImportAttr>()) {
+ // Thread local vars aren't considered common linkage.
+ return llvm::GlobalVariable::CommonLinkage;
+ }
+ return llvm::GlobalVariable::ExternalLinkage;
+}
+
+/// ReplaceUsesOfNonProtoTypeWithRealFunction - This function is called when we
+/// implement a function with no prototype, e.g. "int foo() {}". If there are
+/// existing call uses of the old function in the module, this adjusts them to
+/// call the new function directly.
+///
+/// This is not just a cleanup: the always_inline pass requires direct calls to
+/// functions to be able to inline them. If there is a bitcast in the way, it
+/// won't inline them. Instcombine normally deletes these calls, but it isn't
+/// run at -O0.
+static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
+ llvm::Function *NewFn) {
+ // If we're redefining a global as a function, don't transform it.
+ llvm::Function *OldFn = dyn_cast<llvm::Function>(Old);
+ if (OldFn == 0) return;
+
+ llvm::Type *NewRetTy = NewFn->getReturnType();
+ SmallVector<llvm::Value*, 4> ArgList;
+
+ for (llvm::Value::use_iterator UI = OldFn->use_begin(), E = OldFn->use_end();
+ UI != E; ) {
+ // TODO: Do invokes ever occur in C code? If so, we should handle them too.
+ llvm::Value::use_iterator I = UI++; // Increment before the CI is erased.
+ llvm::CallInst *CI = dyn_cast<llvm::CallInst>(*I);
+ if (!CI) continue; // FIXME: when we allow Invoke, just do CallSite CS(*I)
+ llvm::CallSite CS(CI);
+ if (!CI || !CS.isCallee(I)) continue;
+
+ // If the return types don't match exactly, and if the call isn't dead, then
+ // we can't transform this call.
+ if (CI->getType() != NewRetTy && !CI->use_empty())
+ continue;
+
+ // Get the attribute list.
+ llvm::SmallVector<llvm::AttributeWithIndex, 8> AttrVec;
+ llvm::AttrListPtr AttrList = CI->getAttributes();
+
+ // Get any return attributes.
+ llvm::Attributes RAttrs = AttrList.getRetAttributes();
+
+ // Add the return attributes.
+ if (RAttrs)
+ AttrVec.push_back(llvm::AttributeWithIndex::get(0, RAttrs));
+
+ // If the function was passed too few arguments, don't transform. If extra
+ // arguments were passed, we silently drop them. If any of the types
+ // mismatch, we don't transform.
+ unsigned ArgNo = 0;
+ bool DontTransform = false;
+ for (llvm::Function::arg_iterator AI = NewFn->arg_begin(),
+ E = NewFn->arg_end(); AI != E; ++AI, ++ArgNo) {
+ if (CS.arg_size() == ArgNo ||
+ CS.getArgument(ArgNo)->getType() != AI->getType()) {
+ DontTransform = true;
+ break;
+ }
+
+ // Add any parameter attributes.
+ if (llvm::Attributes PAttrs = AttrList.getParamAttributes(ArgNo + 1))
+ AttrVec.push_back(llvm::AttributeWithIndex::get(ArgNo + 1, PAttrs));
+ }
+ if (DontTransform)
+ continue;
+
+ if (llvm::Attributes FnAttrs = AttrList.getFnAttributes())
+ AttrVec.push_back(llvm::AttributeWithIndex::get(~0, FnAttrs));
+
+ // Okay, we can transform this. Create the new call instruction and copy
+ // over the required information.
+ ArgList.append(CS.arg_begin(), CS.arg_begin() + ArgNo);
+ llvm::CallInst *NewCall = llvm::CallInst::Create(NewFn, ArgList, "", CI);
+ ArgList.clear();
+ if (!NewCall->getType()->isVoidTy())
+ NewCall->takeName(CI);
+ NewCall->setAttributes(llvm::AttrListPtr::get(AttrVec.begin(),
+ AttrVec.end()));
+ NewCall->setCallingConv(CI->getCallingConv());
+
+ // Finally, remove the old call, replacing any uses with the new one.
+ if (!CI->use_empty())
+ CI->replaceAllUsesWith(NewCall);
+
+ // Copy debug location attached to CI.
+ if (!CI->getDebugLoc().isUnknown())
+ NewCall->setDebugLoc(CI->getDebugLoc());
+ CI->eraseFromParent();
+ }
+}
+
+void CodeGenModule::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ TemplateSpecializationKind TSK = VD->getTemplateSpecializationKind();
+ // If we have a definition, this might be a deferred decl. If the
+ // instantiation is explicit, make sure we emit it at the end.
+ if (VD->getDefinition() && TSK == TSK_ExplicitInstantiationDefinition)
+ GetAddrOfGlobalVar(VD);
+}
+
+void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
+ const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
+
+ // Compute the function info and LLVM type.
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+
+ // Get or create the prototype for the function.
+ llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() != Ty) {
+ llvm::GlobalValue *OldFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // F is the Function* for the one with the wrong type, we must make a new
+ // Function* and update everything that used F (a declaration) with the new
+ // Function* (which will be a definition).
+ //
+ // This happens if there is a prototype for a function
+ // (e.g. "int f()") and then a definition of a different type
+ // (e.g. "int f(int x)"). Move the old function aside so that it
+ // doesn't interfere with GetAddrOfFunction.
+ OldFn->setName(StringRef());
+ llvm::Function *NewFn = cast<llvm::Function>(GetAddrOfFunction(GD, Ty));
+
+ // If this is an implementation of a function without a prototype, try to
+ // replace any existing uses of the function (which may be calls) with uses
+ // of the new function
+ if (D->getType()->isFunctionNoProtoType()) {
+ ReplaceUsesOfNonProtoTypeWithRealFunction(OldFn, NewFn);
+ OldFn->removeDeadConstantUsers();
+ }
+
+ // Replace uses of F with the Function we will endow with a body.
+ if (!Entry->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(NewFn, Entry->getType());
+ Entry->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Ok, delete the old function now, which is dead.
+ OldFn->eraseFromParent();
+
+ Entry = NewFn;
+ }
+
+ // We need to set linkage and visibility on the function before
+ // generating code for it because various parts of IR generation
+ // want to propagate this information down (e.g. to local static
+ // declarations).
+ llvm::Function *Fn = cast<llvm::Function>(Entry);
+ setFunctionLinkage(D, Fn);
+
+ // FIXME: this is redundant with part of SetFunctionDefinitionAttributes
+ setGlobalVisibility(Fn, D);
+
+ CodeGenFunction(*this).GenerateCode(D, Fn, FI);
+
+ SetFunctionDefinitionAttributes(D, Fn);
+ SetLLVMFunctionAttributesForDefinition(D, Fn);
+
+ if (const ConstructorAttr *CA = D->getAttr<ConstructorAttr>())
+ AddGlobalCtor(Fn, CA->getPriority());
+ if (const DestructorAttr *DA = D->getAttr<DestructorAttr>())
+ AddGlobalDtor(Fn, DA->getPriority());
+ if (D->hasAttr<AnnotateAttr>())
+ AddGlobalAnnotations(D, Fn);
+}
+
+void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
+ const ValueDecl *D = cast<ValueDecl>(GD.getDecl());
+ const AliasAttr *AA = D->getAttr<AliasAttr>();
+ assert(AA && "Not an alias?");
+
+ StringRef MangledName = getMangledName(GD);
+
+ // If there is a definition in the module, then it wins over the alias.
+ // This is dubious, but allow it to be safe. Just ignore the alias.
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return;
+
+ llvm::Type *DeclTy = getTypes().ConvertTypeForMem(D->getType());
+
+ // Create a reference to the named value. This ensures that it is emitted
+ // if a deferred decl.
+ llvm::Constant *Aliasee;
+ if (isa<llvm::FunctionType>(DeclTy))
+ Aliasee = GetOrCreateLLVMFunction(AA->getAliasee(), DeclTy, GlobalDecl(),
+ /*ForVTable=*/false);
+ else
+ Aliasee = GetOrCreateLLVMGlobal(AA->getAliasee(),
+ llvm::PointerType::getUnqual(DeclTy), 0);
+
+ // Create the new alias itself, but don't set a name yet.
+ llvm::GlobalValue *GA =
+ new llvm::GlobalAlias(Aliasee->getType(),
+ llvm::Function::ExternalLinkage,
+ "", Aliasee, &getModule());
+
+ if (Entry) {
+ assert(Entry->isDeclaration());
+
+ // If there is a declaration in the module, then we had an extern followed
+ // by the alias, as in:
+ // extern int test6();
+ // ...
+ // int test6() __attribute__((alias("test7")));
+ //
+ // Remove it and replace uses of it with the alias.
+ GA->takeName(Entry);
+
+ Entry->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(GA,
+ Entry->getType()));
+ Entry->eraseFromParent();
+ } else {
+ GA->setName(MangledName);
+ }
+
+ // Set attributes which are particular to an alias; this is a
+ // specialization of the attributes which may be set on a global
+ // variable/function.
+ if (D->hasAttr<DLLExportAttr>()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // The dllexport attribute is ignored for undefined symbols.
+ if (FD->hasBody())
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ } else {
+ GA->setLinkage(llvm::Function::DLLExportLinkage);
+ }
+ } else if (D->hasAttr<WeakAttr>() ||
+ D->hasAttr<WeakRefAttr>() ||
+ D->isWeakImported()) {
+ GA->setLinkage(llvm::Function::WeakAnyLinkage);
+ }
+
+ SetCommonAttributes(D, GA);
+}
+
+llvm::Function *CodeGenModule::getIntrinsic(unsigned IID,
+ ArrayRef<llvm::Type*> Tys) {
+ return llvm::Intrinsic::getDeclaration(&getModule(), (llvm::Intrinsic::ID)IID,
+ Tys);
+}
+
+static llvm::StringMapEntry<llvm::Constant*> &
+GetConstantCFStringEntry(llvm::StringMap<llvm::Constant*> &Map,
+ const StringLiteral *Literal,
+ bool TargetIsLSB,
+ bool &IsUTF16,
+ unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
+
+ // Check for simple case.
+ if (!Literal->containsNonAsciiOrNull()) {
+ StringLength = NumBytes;
+ return Map.GetOrCreateValue(String);
+ }
+
+ // Otherwise, convert the UTF8 literals into a string of shorts.
+ IsUTF16 = true;
+
+ SmallVector<UTF16, 128> ToBuf(NumBytes + 1); // +1 for ending nulls.
+ const UTF8 *FromPtr = (UTF8 *)String.data();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ (void)ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
+
+ // ConvertUTF8toUTF16 returns the length in ToPtr.
+ StringLength = ToPtr - &ToBuf[0];
+
+ // Add an explicit null.
+ *ToPtr = 0;
+ return Map.
+ GetOrCreateValue(StringRef(reinterpret_cast<const char *>(ToBuf.data()),
+ (StringLength + 1) * 2));
+}
+
+static llvm::StringMapEntry<llvm::Constant*> &
+GetConstantStringEntry(llvm::StringMap<llvm::Constant*> &Map,
+ const StringLiteral *Literal,
+ unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ StringLength = String.size();
+ return Map.GetOrCreateValue(String);
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ bool isUTF16 = false;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantCFStringEntry(CFConstantStringMap, Literal,
+ getTargetData().isLittleEndian(),
+ isUTF16, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get __CFConstantStringClassReference.
+ if (!CFConstantStringClassRef) {
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ Ty = llvm::ArrayType::get(Ty, 0);
+ llvm::Constant *GV = CreateRuntimeVariable(Ty,
+ "__CFConstantStringClassReference");
+ // Decay array -> ptr
+ CFConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ }
+
+ QualType CFTy = getContext().getCFConstantStringType();
+
+ llvm::StructType *STy =
+ cast<llvm::StructType>(getTypes().ConvertType(CFTy));
+
+ llvm::Constant *Fields[4];
+
+ // Class pointer.
+ Fields[0] = CFConstantStringClassRef;
+
+ // Flags.
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[1] = isUTF16 ? llvm::ConstantInt::get(Ty, 0x07d0) :
+ llvm::ConstantInt::get(Ty, 0x07C8);
+
+ // String pointer.
+ llvm::Constant *C = 0;
+ if (isUTF16) {
+ ArrayRef<uint16_t> Arr =
+ llvm::makeArrayRef<uint16_t>((uint16_t*)Entry.getKey().data(),
+ Entry.getKey().size() / 2);
+ C = llvm::ConstantDataArray::get(VMContext, Arr);
+ } else {
+ C = llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
+ }
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ if (isUTF16)
+ // FIXME: why do utf strings get "_" labels instead of "L" labels?
+ Linkage = llvm::GlobalValue::InternalLinkage;
+ else
+ // FIXME: With OS X ld 123.2 (xcode 4) and LTO we would get a linker error
+ // when using private linkage. It is not clear if this is a bug in ld
+ // or a reasonable new restriction.
+ Linkage = llvm::GlobalValue::LinkerPrivateLinkage;
+
+ // Note: -fwritable-strings doesn't make the backing store strings of
+ // CFStrings writable. (See <rdar://problem/10657500>)
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), /*isConstant=*/true,
+ Linkage, C, ".str");
+ GV->setUnnamedAddr(true);
+ if (isUTF16) {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().ShortTy);
+ GV->setAlignment(Align.getQuantity());
+ } else {
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
+ }
+
+ // String.
+ Fields[2] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+
+ if (isUTF16)
+ // Cast the UTF16 string to the correct type.
+ Fields[2] = llvm::ConstantExpr::getBitCast(Fields[2], Int8PtrTy);
+
+ // String length.
+ Ty = getTypes().ConvertType(getContext().LongTy);
+ Fields[3] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(STy, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_cfstring_");
+ if (const char *Sect = getContext().getTargetInfo().getCFStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+static RecordDecl *
+CreateRecordDecl(const ASTContext &Ctx, RecordDecl::TagKind TK,
+ DeclContext *DC, IdentifierInfo *Id) {
+ SourceLocation Loc;
+ if (Ctx.getLangOpts().CPlusPlus)
+ return CXXRecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+ else
+ return RecordDecl::Create(Ctx, TK, DC, Loc, Loc, Id);
+}
+
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ llvm::StringMapEntry<llvm::Constant*> &Entry =
+ GetConstantStringEntry(CFConstantStringMap, Literal, StringLength);
+
+ if (llvm::Constant *C = Entry.getValue())
+ return C;
+
+ llvm::Constant *Zero = llvm::Constant::getNullValue(Int32Ty);
+ llvm::Constant *Zeros[] = { Zero, Zero };
+
+ // If we don't already have it, get _NSConstantStringClassReference.
+ if (!ConstantStringClassRef) {
+ std::string StringClass(getLangOpts().ObjCConstantStringClass);
+ llvm::Type *Ty = getTypes().ConvertType(getContext().IntTy);
+ llvm::Constant *GV;
+ if (LangOpts.ObjCNonFragileABI) {
+ std::string str =
+ StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
+ : "OBJC_CLASS_$_" + StringClass;
+ GV = getObjCRuntime().GetClassGlobal(str);
+ // Make sure the result is of the correct type.
+ llvm::Type *PTy = llvm::PointerType::getUnqual(Ty);
+ ConstantStringClassRef =
+ llvm::ConstantExpr::getBitCast(GV, PTy);
+ } else {
+ std::string str =
+ StringClass.empty() ? "_NSConstantStringClassReference"
+ : "_" + StringClass + "ClassReference";
+ llvm::Type *PTy = llvm::ArrayType::get(Ty, 0);
+ GV = CreateRuntimeVariable(PTy, str);
+ // Decay array -> ptr
+ ConstantStringClassRef =
+ llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+ }
+ }
+
+ if (!NSConstantStringType) {
+ // Construct the type for a constant NSString.
+ RecordDecl *D = CreateRecordDecl(Context, TTK_Struct,
+ Context.getTranslationUnitDecl(),
+ &Context.Idents.get("__builtin_NSString"));
+ D->startDefinition();
+
+ QualType FieldTypes[3];
+
+ // const int *isa;
+ FieldTypes[0] = Context.getPointerType(Context.IntTy.withConst());
+ // const char *str;
+ FieldTypes[1] = Context.getPointerType(Context.CharTy.withConst());
+ // unsigned int length;
+ FieldTypes[2] = Context.UnsignedIntTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 3; ++i) {
+ FieldDecl *Field = FieldDecl::Create(Context, D,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ D->addDecl(Field);
+ }
+
+ D->completeDefinition();
+ QualType NSTy = Context.getTagDeclType(D);
+ NSConstantStringType = cast<llvm::StructType>(getTypes().ConvertType(NSTy));
+ }
+
+ llvm::Constant *Fields[3];
+
+ // Class pointer.
+ Fields[0] = ConstantStringClassRef;
+
+ // String pointer.
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(VMContext, Entry.getKey());
+
+ llvm::GlobalValue::LinkageTypes Linkage;
+ bool isConstant;
+ Linkage = llvm::GlobalValue::PrivateLinkage;
+ isConstant = !LangOpts.WritableStrings;
+
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(), C->getType(), isConstant, Linkage, C,
+ ".str");
+ GV->setUnnamedAddr(true);
+ CharUnits Align = getContext().getTypeAlignInChars(getContext().CharTy);
+ GV->setAlignment(Align.getQuantity());
+ Fields[1] = llvm::ConstantExpr::getGetElementPtr(GV, Zeros);
+
+ // String length.
+ llvm::Type *Ty = getTypes().ConvertType(getContext().UnsignedIntTy);
+ Fields[2] = llvm::ConstantInt::get(Ty, StringLength);
+
+ // The struct.
+ C = llvm::ConstantStruct::get(NSConstantStringType, Fields);
+ GV = new llvm::GlobalVariable(getModule(), C->getType(), true,
+ llvm::GlobalVariable::PrivateLinkage, C,
+ "_unnamed_nsstring_");
+ // FIXME. Fix section.
+ if (const char *Sect =
+ LangOpts.ObjCNonFragileABI
+ ? getContext().getTargetInfo().getNSStringNonFragileABISection()
+ : getContext().getTargetInfo().getNSStringSection())
+ GV->setSection(Sect);
+ Entry.setValue(GV);
+
+ return GV;
+}
+
+QualType CodeGenModule::getObjCFastEnumerationStateType() {
+ if (ObjCFastEnumerationStateType.isNull()) {
+ RecordDecl *D = CreateRecordDecl(Context, TTK_Struct,
+ Context.getTranslationUnitDecl(),
+ &Context.Idents.get("__objcFastEnumerationState"));
+ D->startDefinition();
+
+ QualType FieldTypes[] = {
+ Context.UnsignedLongTy,
+ Context.getPointerType(Context.getObjCIdType()),
+ Context.getPointerType(Context.UnsignedLongTy),
+ Context.getConstantArrayType(Context.UnsignedLongTy,
+ llvm::APInt(32, 5), ArrayType::Normal, 0)
+ };
+
+ for (size_t i = 0; i < 4; ++i) {
+ FieldDecl *Field = FieldDecl::Create(Context,
+ D,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], /*TInfo=*/0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false);
+ Field->setAccess(AS_public);
+ D->addDecl(Field);
+ }
+
+ D->completeDefinition();
+ ObjCFastEnumerationStateType = Context.getTagDeclType(D);
+ }
+
+ return ObjCFastEnumerationStateType;
+}
+
+llvm::Constant *
+CodeGenModule::GetConstantArrayFromStringLiteral(const StringLiteral *E) {
+ assert(!E->getType()->isPointerType() && "Strings are always arrays");
+
+ // Don't emit it as the address of the string, emit the string data itself
+ // as an inline array.
+ if (E->getCharByteWidth() == 1) {
+ SmallString<64> Str(E->getString());
+
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(E->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return llvm::ConstantDataArray::getString(VMContext, Str, false);
+ }
+
+ llvm::ArrayType *AType =
+ cast<llvm::ArrayType>(getTypes().ConvertType(E->getType()));
+ llvm::Type *ElemTy = AType->getElementType();
+ unsigned NumElements = AType->getNumElements();
+
+ // Wide strings have either 2-byte or 4-byte elements.
+ if (ElemTy->getPrimitiveSizeInBits() == 16) {
+ SmallVector<uint16_t, 32> Elements;
+ Elements.reserve(NumElements);
+
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
+ }
+
+ assert(ElemTy->getPrimitiveSizeInBits() == 32);
+ SmallVector<uint32_t, 32> Elements;
+ Elements.reserve(NumElements);
+
+ for(unsigned i = 0, e = E->getLength(); i != e; ++i)
+ Elements.push_back(E->getCodeUnit(i));
+ Elements.resize(NumElements);
+ return llvm::ConstantDataArray::get(VMContext, Elements);
+}
+
+/// GetAddrOfConstantStringFromLiteral - Return a pointer to a
+/// constant array for the given string literal.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S) {
+ CharUnits Align = getContext().getTypeAlignInChars(S->getType());
+ if (S->isAscii() || S->isUTF8()) {
+ SmallString<64> Str(S->getString());
+
+ // Resize the string to the right size, which is indicated by its type.
+ const ConstantArrayType *CAT = Context.getAsConstantArrayType(S->getType());
+ Str.resize(CAT->getSize().getZExtValue());
+ return GetAddrOfConstantString(Str, /*GlobalName*/ 0, Align.getQuantity());
+ }
+
+ // FIXME: the following does not memoize wide strings.
+ llvm::Constant *C = GetConstantArrayFromStringLiteral(S);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(getModule(),C->getType(),
+ !LangOpts.WritableStrings,
+ llvm::GlobalValue::PrivateLinkage,
+ C,".str");
+
+ GV->setAlignment(Align.getQuantity());
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
+/// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+/// array for the given ObjCEncodeExpr node.
+llvm::Constant *
+CodeGenModule::GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *E) {
+ std::string Str;
+ getContext().getObjCEncodingForType(E->getEncodedType(), Str);
+
+ return GetAddrOfConstantCString(Str);
+}
+
+
+/// GenerateWritableString -- Creates storage for a string literal.
+static llvm::GlobalVariable *GenerateStringLiteral(StringRef str,
+ bool constant,
+ CodeGenModule &CGM,
+ const char *GlobalName,
+ unsigned Alignment) {
+ // Create Constant for this string literal. Don't add a '\0'.
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(CGM.getLLVMContext(), str, false);
+
+ // Create a global variable for this string
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), C->getType(), constant,
+ llvm::GlobalValue::PrivateLinkage,
+ C, GlobalName);
+ GV->setAlignment(Alignment);
+ GV->setUnnamedAddr(true);
+ return GV;
+}
+
+/// GetAddrOfConstantString - Returns a pointer to a character array
+/// containing the literal. This contents are exactly that of the
+/// given string, i.e. it will not be null terminated automatically;
+/// see GetAddrOfConstantCString. Note that whether the result is
+/// actually a pointer to an LLVM constant depends on
+/// Feature.WriteableStrings.
+///
+/// The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantString(StringRef Str,
+ const char *GlobalName,
+ unsigned Alignment) {
+ // Get the default prefix if a name wasn't specified.
+ if (!GlobalName)
+ GlobalName = ".str";
+
+ // Don't share any string literals if strings aren't constant.
+ if (LangOpts.WritableStrings)
+ return GenerateStringLiteral(Str, false, *this, GlobalName, Alignment);
+
+ llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
+ ConstantStringMap.GetOrCreateValue(Str);
+
+ if (llvm::GlobalVariable *GV = Entry.getValue()) {
+ if (Alignment > GV->getAlignment()) {
+ GV->setAlignment(Alignment);
+ }
+ return GV;
+ }
+
+ // Create a global variable for this.
+ llvm::GlobalVariable *GV = GenerateStringLiteral(Str, true, *this, GlobalName,
+ Alignment);
+ Entry.setValue(GV);
+ return GV;
+}
+
+/// GetAddrOfConstantCString - Returns a pointer to a character
+/// array containing the literal and a terminating '\0'
+/// character. The result has pointer to array type.
+llvm::Constant *CodeGenModule::GetAddrOfConstantCString(const std::string &Str,
+ const char *GlobalName,
+ unsigned Alignment) {
+ StringRef StrWithNull(Str.c_str(), Str.size() + 1);
+ return GetAddrOfConstantString(StrWithNull, GlobalName, Alignment);
+}
+
+/// EmitObjCPropertyImplementations - Emit information for synthesized
+/// properties for an implementation.
+void CodeGenModule::EmitObjCPropertyImplementations(const
+ ObjCImplementationDecl *D) {
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+
+ // Dynamic is just for type-checking.
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+
+ // Determine which methods need to be implemented, some may have
+ // been overridden. Note that ::isSynthesized is not the method
+ // we want, that just indicates if the decl came from a
+ // property. What we want to know is if the method is defined in
+ // this implementation.
+ if (!D->getInstanceMethod(PD->getGetterName()))
+ CodeGenFunction(*this).GenerateObjCGetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ if (!PD->isReadOnly() &&
+ !D->getInstanceMethod(PD->getSetterName()))
+ CodeGenFunction(*this).GenerateObjCSetter(
+ const_cast<ObjCImplementationDecl *>(D), PID);
+ }
+ }
+}
+
+static bool needsDestructMethod(ObjCImplementationDecl *impl) {
+ const ObjCInterfaceDecl *iface = impl->getClassInterface();
+ for (const ObjCIvarDecl *ivar = iface->all_declared_ivar_begin();
+ ivar; ivar = ivar->getNextIvar())
+ if (ivar->getType().isDestructedType())
+ return true;
+
+ return false;
+}
+
+/// EmitObjCIvarInitializations - Emit information for ivar initialization
+/// for an implementation.
+void CodeGenModule::EmitObjCIvarInitializations(ObjCImplementationDecl *D) {
+ // We might need a .cxx_destruct even if we don't have any ivar initializers.
+ if (needsDestructMethod(D)) {
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_destruct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ ObjCMethodDecl *DTORMethod =
+ ObjCMethodDecl::Create(getContext(), D->getLocation(), D->getLocation(),
+ cxxSelector, getContext().VoidTy, 0, D,
+ /*isInstance=*/true, /*isVariadic=*/false,
+ /*isSynthesized=*/true, /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false, ObjCMethodDecl::Required);
+ D->addInstanceMethod(DTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, DTORMethod, false);
+ D->setHasCXXStructors(true);
+ }
+
+ // If the implementation doesn't have any ivar initializers, we don't need
+ // a .cxx_construct.
+ if (D->getNumIvarInitializers() == 0)
+ return;
+
+ IdentifierInfo *II = &getContext().Idents.get(".cxx_construct");
+ Selector cxxSelector = getContext().Selectors.getSelector(0, &II);
+ // The constructor returns 'self'.
+ ObjCMethodDecl *CTORMethod = ObjCMethodDecl::Create(getContext(),
+ D->getLocation(),
+ D->getLocation(),
+ cxxSelector,
+ getContext().getObjCIdType(), 0,
+ D, /*isInstance=*/true,
+ /*isVariadic=*/false,
+ /*isSynthesized=*/true,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ ObjCMethodDecl::Required);
+ D->addInstanceMethod(CTORMethod);
+ CodeGenFunction(*this).GenerateObjCCtorDtorMethod(D, CTORMethod, true);
+ D->setHasCXXStructors(true);
+}
+
+/// EmitNamespace - Emit all declarations in a namespace.
+void CodeGenModule::EmitNamespace(const NamespaceDecl *ND) {
+ for (RecordDecl::decl_iterator I = ND->decls_begin(), E = ND->decls_end();
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+// EmitLinkageSpec - Emit all declarations in a linkage spec.
+void CodeGenModule::EmitLinkageSpec(const LinkageSpecDecl *LSD) {
+ if (LSD->getLanguage() != LinkageSpecDecl::lang_c &&
+ LSD->getLanguage() != LinkageSpecDecl::lang_cxx) {
+ ErrorUnsupported(LSD, "linkage spec");
+ return;
+ }
+
+ for (RecordDecl::decl_iterator I = LSD->decls_begin(), E = LSD->decls_end();
+ I != E; ++I)
+ EmitTopLevelDecl(*I);
+}
+
+/// EmitTopLevelDecl - Emit code for a single top level declaration.
+void CodeGenModule::EmitTopLevelDecl(Decl *D) {
+ // If an error has occurred, stop code generation, but continue
+ // parsing and semantic analysis (to ensure all warnings and errors
+ // are emitted).
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Ignore dependent declarations.
+ if (D->getDeclContext() && D->getDeclContext()->isDependentContext())
+ return;
+
+ switch (D->getKind()) {
+ case Decl::CXXConversion:
+ case Decl::CXXMethod:
+ case Decl::Function:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
+ cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+
+ EmitGlobal(cast<FunctionDecl>(D));
+ break;
+
+ case Decl::Var:
+ EmitGlobal(cast<VarDecl>(D));
+ break;
+
+ // Indirect fields from global anonymous structs and unions can be
+ // ignored; only the actual variable requires IR gen support.
+ case Decl::IndirectField:
+ break;
+
+ // C++ Decls
+ case Decl::Namespace:
+ EmitNamespace(cast<NamespaceDecl>(D));
+ break;
+ // No code generation needed.
+ case Decl::UsingShadow:
+ case Decl::Using:
+ case Decl::UsingDirective:
+ case Decl::ClassTemplate:
+ case Decl::FunctionTemplate:
+ case Decl::TypeAliasTemplate:
+ case Decl::NamespaceAlias:
+ case Decl::Block:
+ case Decl::Import:
+ break;
+ case Decl::CXXConstructor:
+ // Skip function templates
+ if (cast<FunctionDecl>(D)->getDescribedFunctionTemplate() ||
+ cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+
+ EmitCXXConstructors(cast<CXXConstructorDecl>(D));
+ break;
+ case Decl::CXXDestructor:
+ if (cast<FunctionDecl>(D)->isLateTemplateParsed())
+ return;
+ EmitCXXDestructors(cast<CXXDestructorDecl>(D));
+ break;
+
+ case Decl::StaticAssert:
+ // Nothing to do.
+ break;
+
+ // Objective-C Decls
+
+ // Forward declarations, no (immediate) code generation.
+ case Decl::ObjCInterface:
+ break;
+
+ case Decl::ObjCCategory: {
+ ObjCCategoryDecl *CD = cast<ObjCCategoryDecl>(D);
+ if (CD->IsClassExtension() && CD->hasSynthBitfield())
+ Context.ResetObjCLayout(CD->getClassInterface());
+ break;
+ }
+
+ case Decl::ObjCProtocol: {
+ ObjCProtocolDecl *Proto = cast<ObjCProtocolDecl>(D);
+ if (Proto->isThisDeclarationADefinition())
+ ObjCRuntime->GenerateProtocol(Proto);
+ break;
+ }
+
+ case Decl::ObjCCategoryImpl:
+ // Categories have properties but don't support synthesize so we
+ // can ignore them here.
+ ObjCRuntime->GenerateCategory(cast<ObjCCategoryImplDecl>(D));
+ break;
+
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *OMD = cast<ObjCImplementationDecl>(D);
+ if (LangOpts.ObjCNonFragileABI2 && OMD->hasSynthBitfield())
+ Context.ResetObjCLayout(OMD->getClassInterface());
+ EmitObjCPropertyImplementations(OMD);
+ EmitObjCIvarInitializations(OMD);
+ ObjCRuntime->GenerateClass(OMD);
+ // Emit global variable debug information.
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->getOrCreateInterfaceType(getContext().getObjCInterfaceType(OMD->getClassInterface()),
+ OMD->getLocation());
+
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(D);
+ // If this is not a prototype, emit the body.
+ if (OMD->getBody())
+ CodeGenFunction(*this).GenerateObjCMethod(OMD);
+ break;
+ }
+ case Decl::ObjCCompatibleAlias:
+ ObjCRuntime->RegisterAlias(cast<ObjCCompatibleAliasDecl>(D));
+ break;
+
+ case Decl::LinkageSpec:
+ EmitLinkageSpec(cast<LinkageSpecDecl>(D));
+ break;
+
+ case Decl::FileScopeAsm: {
+ FileScopeAsmDecl *AD = cast<FileScopeAsmDecl>(D);
+ StringRef AsmString = AD->getAsmString()->getString();
+
+ const std::string &S = getModule().getModuleInlineAsm();
+ if (S.empty())
+ getModule().setModuleInlineAsm(AsmString);
+ else if (*--S.end() == '\n')
+ getModule().setModuleInlineAsm(S + AsmString.str());
+ else
+ getModule().setModuleInlineAsm(S + '\n' + AsmString.str());
+ break;
+ }
+
+ default:
+ // Make sure we handled everything we should, every other kind is a
+ // non-top-level decl. FIXME: Would be nice to have an isTopLevelDeclKind
+ // function. Need to recode Decl::Kind to do that easily.
+ assert(isa<TypeDecl>(D) && "Unsupported decl kind");
+ }
+}
+
+/// Turns the given pointer into a constant.
+static llvm::Constant *GetPointerConstant(llvm::LLVMContext &Context,
+ const void *Ptr) {
+ uintptr_t PtrInt = reinterpret_cast<uintptr_t>(Ptr);
+ llvm::Type *i64 = llvm::Type::getInt64Ty(Context);
+ return llvm::ConstantInt::get(i64, PtrInt);
+}
+
+static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
+ llvm::NamedMDNode *&GlobalMetadata,
+ GlobalDecl D,
+ llvm::GlobalValue *Addr) {
+ if (!GlobalMetadata)
+ GlobalMetadata =
+ CGM.getModule().getOrInsertNamedMetadata("clang.global.decl.ptrs");
+
+ // TODO: should we report variant information for ctors/dtors?
+ llvm::Value *Ops[] = {
+ Addr,
+ GetPointerConstant(CGM.getLLVMContext(), D.getDecl())
+ };
+ GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
+}
+
+/// Emits metadata nodes associating all the global values in the
+/// current module with the Decls they came from. This is useful for
+/// projects using IR gen as a subroutine.
+///
+/// Since there's currently no way to associate an MDNode directly
+/// with an llvm::GlobalValue, we create a global named metadata
+/// with the name 'clang.global.decl.ptrs'.
+void CodeGenModule::EmitDeclMetadata() {
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ // StaticLocalDeclMap
+ for (llvm::DenseMap<GlobalDecl,StringRef>::iterator
+ I = MangledDeclNames.begin(), E = MangledDeclNames.end();
+ I != E; ++I) {
+ llvm::GlobalValue *Addr = getModule().getNamedValue(I->second);
+ EmitGlobalDeclMetadata(*this, GlobalMetadata, I->first, Addr);
+ }
+}
+
+/// Emits metadata nodes for all the local variables in the current
+/// function.
+void CodeGenFunction::EmitDeclMetadata() {
+ if (LocalDeclMap.empty()) return;
+
+ llvm::LLVMContext &Context = getLLVMContext();
+
+ // Find the unique metadata ID for this name.
+ unsigned DeclPtrKind = Context.getMDKindID("clang.decl.ptr");
+
+ llvm::NamedMDNode *GlobalMetadata = 0;
+
+ for (llvm::DenseMap<const Decl*, llvm::Value*>::iterator
+ I = LocalDeclMap.begin(), E = LocalDeclMap.end(); I != E; ++I) {
+ const Decl *D = I->first;
+ llvm::Value *Addr = I->second;
+
+ if (llvm::AllocaInst *Alloca = dyn_cast<llvm::AllocaInst>(Addr)) {
+ llvm::Value *DAddr = GetPointerConstant(getLLVMContext(), D);
+ Alloca->setMetadata(DeclPtrKind, llvm::MDNode::get(Context, DAddr));
+ } else if (llvm::GlobalValue *GV = dyn_cast<llvm::GlobalValue>(Addr)) {
+ GlobalDecl GD = GlobalDecl(cast<VarDecl>(D));
+ EmitGlobalDeclMetadata(CGM, GlobalMetadata, GD, GV);
+ }
+ }
+}
+
+void CodeGenModule::EmitCoverageFile() {
+ if (!getCodeGenOpts().CoverageFile.empty()) {
+ if (llvm::NamedMDNode *CUNode = TheModule.getNamedMetadata("llvm.dbg.cu")) {
+ llvm::NamedMDNode *GCov = TheModule.getOrInsertNamedMetadata("llvm.gcov");
+ llvm::LLVMContext &Ctx = TheModule.getContext();
+ llvm::MDString *CoverageFile =
+ llvm::MDString::get(Ctx, getCodeGenOpts().CoverageFile);
+ for (int i = 0, e = CUNode->getNumOperands(); i != e; ++i) {
+ llvm::MDNode *CU = CUNode->getOperand(i);
+ llvm::Value *node[] = { CoverageFile, CU };
+ llvm::MDNode *N = llvm::MDNode::get(Ctx, node);
+ GCov->addOperand(N);
+ }
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
new file mode 100644
index 0000000..38f5008
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenModule.h
@@ -0,0 +1,986 @@
+//===--- CodeGenModule.h - Per-Module state for LLVM CodeGen ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal per-translation-unit state used for llvm translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENMODULE_H
+#define CLANG_CODEGEN_CODEGENMODULE_H
+
+#include "clang/Basic/ABI.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/GlobalDecl.h"
+#include "clang/AST/Mangle.h"
+#include "CGVTables.h"
+#include "CodeGenTypes.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/ValueHandle.h"
+
+namespace llvm {
+ class Module;
+ class Constant;
+ class ConstantInt;
+ class Function;
+ class GlobalValue;
+ class TargetData;
+ class FunctionType;
+ class LLVMContext;
+}
+
+namespace clang {
+ class TargetCodeGenInfo;
+ class ASTContext;
+ class FunctionDecl;
+ class IdentifierInfo;
+ class ObjCMethodDecl;
+ class ObjCImplementationDecl;
+ class ObjCCategoryImplDecl;
+ class ObjCProtocolDecl;
+ class ObjCEncodeExpr;
+ class BlockExpr;
+ class CharUnits;
+ class Decl;
+ class Expr;
+ class Stmt;
+ class InitListExpr;
+ class StringLiteral;
+ class NamedDecl;
+ class ValueDecl;
+ class VarDecl;
+ class LangOptions;
+ class CodeGenOptions;
+ class DiagnosticsEngine;
+ class AnnotateAttr;
+ class CXXDestructorDecl;
+ class MangleBuffer;
+
+namespace CodeGen {
+
+ class CallArgList;
+ class CodeGenFunction;
+ class CodeGenTBAA;
+ class CGCXXABI;
+ class CGDebugInfo;
+ class CGObjCRuntime;
+ class CGOpenCLRuntime;
+ class CGCUDARuntime;
+ class BlockFieldFlags;
+ class FunctionArgList;
+
+ struct OrderGlobalInits {
+ unsigned int priority;
+ unsigned int lex_order;
+ OrderGlobalInits(unsigned int p, unsigned int l)
+ : priority(p), lex_order(l) {}
+
+ bool operator==(const OrderGlobalInits &RHS) const {
+ return priority == RHS.priority &&
+ lex_order == RHS.lex_order;
+ }
+
+ bool operator<(const OrderGlobalInits &RHS) const {
+ if (priority < RHS.priority)
+ return true;
+
+ return priority == RHS.priority && lex_order < RHS.lex_order;
+ }
+ };
+
+ struct CodeGenTypeCache {
+ /// void
+ llvm::Type *VoidTy;
+
+ /// i8, i16, i32, and i64
+ llvm::IntegerType *Int8Ty, *Int16Ty, *Int32Ty, *Int64Ty;
+ /// float, double
+ llvm::Type *FloatTy, *DoubleTy;
+
+ /// int
+ llvm::IntegerType *IntTy;
+
+ /// intptr_t, size_t, and ptrdiff_t, which we assume are the same size.
+ union {
+ llvm::IntegerType *IntPtrTy;
+ llvm::IntegerType *SizeTy;
+ llvm::IntegerType *PtrDiffTy;
+ };
+
+ /// void* in address space 0
+ union {
+ llvm::PointerType *VoidPtrTy;
+ llvm::PointerType *Int8PtrTy;
+ };
+
+ /// void** in address space 0
+ union {
+ llvm::PointerType *VoidPtrPtrTy;
+ llvm::PointerType *Int8PtrPtrTy;
+ };
+
+ /// The width of a pointer into the generic address space.
+ unsigned char PointerWidthInBits;
+
+ /// The size and alignment of a pointer into the generic address
+ /// space.
+ union {
+ unsigned char PointerAlignInBytes;
+ unsigned char PointerSizeInBytes;
+ };
+ };
+
+struct RREntrypoints {
+ RREntrypoints() { memset(this, 0, sizeof(*this)); }
+ /// void objc_autoreleasePoolPop(void*);
+ llvm::Constant *objc_autoreleasePoolPop;
+
+ /// void *objc_autoreleasePoolPush(void);
+ llvm::Constant *objc_autoreleasePoolPush;
+};
+
+struct ARCEntrypoints {
+ ARCEntrypoints() { memset(this, 0, sizeof(*this)); }
+
+ /// id objc_autorelease(id);
+ llvm::Constant *objc_autorelease;
+
+ /// id objc_autoreleaseReturnValue(id);
+ llvm::Constant *objc_autoreleaseReturnValue;
+
+ /// void objc_copyWeak(id *dest, id *src);
+ llvm::Constant *objc_copyWeak;
+
+ /// void objc_destroyWeak(id*);
+ llvm::Constant *objc_destroyWeak;
+
+ /// id objc_initWeak(id*, id);
+ llvm::Constant *objc_initWeak;
+
+ /// id objc_loadWeak(id*);
+ llvm::Constant *objc_loadWeak;
+
+ /// id objc_loadWeakRetained(id*);
+ llvm::Constant *objc_loadWeakRetained;
+
+ /// void objc_moveWeak(id *dest, id *src);
+ llvm::Constant *objc_moveWeak;
+
+ /// id objc_retain(id);
+ llvm::Constant *objc_retain;
+
+ /// id objc_retainAutorelease(id);
+ llvm::Constant *objc_retainAutorelease;
+
+ /// id objc_retainAutoreleaseReturnValue(id);
+ llvm::Constant *objc_retainAutoreleaseReturnValue;
+
+ /// id objc_retainAutoreleasedReturnValue(id);
+ llvm::Constant *objc_retainAutoreleasedReturnValue;
+
+ /// id objc_retainBlock(id);
+ llvm::Constant *objc_retainBlock;
+
+ /// void objc_release(id);
+ llvm::Constant *objc_release;
+
+ /// id objc_storeStrong(id*, id);
+ llvm::Constant *objc_storeStrong;
+
+ /// id objc_storeWeak(id*, id);
+ llvm::Constant *objc_storeWeak;
+
+ /// A void(void) inline asm to use to mark that the return value of
+ /// a call will be immediately retain.
+ llvm::InlineAsm *retainAutoreleasedReturnValueMarker;
+};
+
+/// CodeGenModule - This class organizes the cross-function state that is used
+/// while generating LLVM code.
+class CodeGenModule : public CodeGenTypeCache {
+ CodeGenModule(const CodeGenModule&); // DO NOT IMPLEMENT
+ void operator=(const CodeGenModule&); // DO NOT IMPLEMENT
+
+ typedef std::vector<std::pair<llvm::Constant*, int> > CtorList;
+
+ ASTContext &Context;
+ const LangOptions &LangOpts;
+ const CodeGenOptions &CodeGenOpts;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
+ DiagnosticsEngine &Diags;
+ CGCXXABI &ABI;
+ CodeGenTypes Types;
+ CodeGenTBAA *TBAA;
+
+ /// VTables - Holds information about C++ vtables.
+ CodeGenVTables VTables;
+ friend class CodeGenVTables;
+
+ CGObjCRuntime* ObjCRuntime;
+ CGOpenCLRuntime* OpenCLRuntime;
+ CGCUDARuntime* CUDARuntime;
+ CGDebugInfo* DebugInfo;
+ ARCEntrypoints *ARCData;
+ llvm::MDNode *NoObjCARCExceptionsMetadata;
+ RREntrypoints *RRData;
+
+ // WeakRefReferences - A set of references that have only been seen via
+ // a weakref so far. This is used to remove the weak of the reference if we ever
+ // see a direct reference or a definition.
+ llvm::SmallPtrSet<llvm::GlobalValue*, 10> WeakRefReferences;
+
+ /// DeferredDecls - This contains all the decls which have definitions but
+ /// which are deferred for emission and therefore should only be output if
+ /// they are actually used. If a decl is in this, then it is known to have
+ /// not been referenced yet.
+ llvm::StringMap<GlobalDecl> DeferredDecls;
+
+ /// DeferredDeclsToEmit - This is a list of deferred decls which we have seen
+ /// that *are* actually referenced. These get code generated when the module
+ /// is done.
+ std::vector<GlobalDecl> DeferredDeclsToEmit;
+
+ /// LLVMUsed - List of global values which are required to be
+ /// present in the object file; bitcast to i8*. This is used for
+ /// forcing visibility of symbols which may otherwise be optimized
+ /// out.
+ std::vector<llvm::WeakVH> LLVMUsed;
+
+ /// GlobalCtors - Store the list of global constructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalCtors;
+
+ /// GlobalDtors - Store the list of global destructors and their respective
+ /// priorities to be emitted when the translation unit is complete.
+ CtorList GlobalDtors;
+
+ /// MangledDeclNames - A map of canonical GlobalDecls to their mangled names.
+ llvm::DenseMap<GlobalDecl, StringRef> MangledDeclNames;
+ llvm::BumpPtrAllocator MangledNamesAllocator;
+
+ /// Global annotations.
+ std::vector<llvm::Constant*> Annotations;
+
+ /// Map used to get unique annotation strings.
+ llvm::StringMap<llvm::Constant*> AnnotationStrings;
+
+ llvm::StringMap<llvm::Constant*> CFConstantStringMap;
+ llvm::StringMap<llvm::GlobalVariable*> ConstantStringMap;
+ llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap;
+ llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap;
+
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicSetterHelperFnMap;
+ llvm::DenseMap<QualType, llvm::Constant *> AtomicGetterHelperFnMap;
+
+ /// CXXGlobalInits - Global variables with initializers that need to run
+ /// before main.
+ std::vector<llvm::Constant*> CXXGlobalInits;
+
+ /// When a C++ decl with an initializer is deferred, null is
+ /// appended to CXXGlobalInits, and the index of that null is placed
+ /// here so that the initializer will be performed in the correct
+ /// order.
+ llvm::DenseMap<const Decl*, unsigned> DelayedCXXInitPosition;
+
+ /// - Global variables with initializers whose order of initialization
+ /// is set by init_priority attribute.
+
+ SmallVector<std::pair<OrderGlobalInits, llvm::Function*>, 8>
+ PrioritizedCXXGlobalInits;
+
+ /// CXXGlobalDtors - Global destructor functions and arguments that need to
+ /// run on termination.
+ std::vector<std::pair<llvm::WeakVH,llvm::Constant*> > CXXGlobalDtors;
+
+ /// @name Cache for Objective-C runtime types
+ /// @{
+
+ /// CFConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *CFConstantStringClassRef;
+
+ /// ConstantStringClassRef - Cached reference to the class for constant
+ /// strings. This value has type int * but is actually an Obj-C class pointer.
+ llvm::Constant *ConstantStringClassRef;
+
+ /// \brief The LLVM type corresponding to NSConstantString.
+ llvm::StructType *NSConstantStringType;
+
+ /// \brief The type used to describe the state of a fast enumeration in
+ /// Objective-C's for..in loop.
+ QualType ObjCFastEnumerationStateType;
+
+ /// @}
+
+ /// Lazily create the Objective-C runtime
+ void createObjCRuntime();
+
+ void createOpenCLRuntime();
+ void createCUDARuntime();
+
+ bool isTriviallyRecursive(const FunctionDecl *F);
+ bool shouldEmitFunction(const FunctionDecl *F);
+ llvm::LLVMContext &VMContext;
+
+ /// @name Cache for Blocks Runtime Globals
+ /// @{
+
+ llvm::Constant *NSConcreteGlobalBlock;
+ llvm::Constant *NSConcreteStackBlock;
+
+ llvm::Constant *BlockObjectAssign;
+ llvm::Constant *BlockObjectDispose;
+
+ llvm::Type *BlockDescriptorType;
+ llvm::Type *GenericBlockLiteralType;
+
+ struct {
+ int GlobalUniqueCount;
+ } Block;
+
+ /// @}
+public:
+ CodeGenModule(ASTContext &C, const CodeGenOptions &CodeGenOpts,
+ llvm::Module &M, const llvm::TargetData &TD,
+ DiagnosticsEngine &Diags);
+
+ ~CodeGenModule();
+
+ /// Release - Finalize LLVM code generation.
+ void Release();
+
+ /// getObjCRuntime() - Return a reference to the configured
+ /// Objective-C runtime.
+ CGObjCRuntime &getObjCRuntime() {
+ if (!ObjCRuntime) createObjCRuntime();
+ return *ObjCRuntime;
+ }
+
+ /// hasObjCRuntime() - Return true iff an Objective-C runtime has
+ /// been configured.
+ bool hasObjCRuntime() { return !!ObjCRuntime; }
+
+ /// getOpenCLRuntime() - Return a reference to the configured OpenCL runtime.
+ CGOpenCLRuntime &getOpenCLRuntime() {
+ assert(OpenCLRuntime != 0);
+ return *OpenCLRuntime;
+ }
+
+ /// getCUDARuntime() - Return a reference to the configured CUDA runtime.
+ CGCUDARuntime &getCUDARuntime() {
+ assert(CUDARuntime != 0);
+ return *CUDARuntime;
+ }
+
+ /// getCXXABI() - Return a reference to the configured C++ ABI.
+ CGCXXABI &getCXXABI() { return ABI; }
+
+ ARCEntrypoints &getARCEntrypoints() const {
+ assert(getLangOpts().ObjCAutoRefCount && ARCData != 0);
+ return *ARCData;
+ }
+
+ RREntrypoints &getRREntrypoints() const {
+ assert(RRData != 0);
+ return *RRData;
+ }
+
+ llvm::Constant *getStaticLocalDeclAddress(const VarDecl *D) {
+ return StaticLocalDeclMap[D];
+ }
+ void setStaticLocalDeclAddress(const VarDecl *D,
+ llvm::Constant *C) {
+ StaticLocalDeclMap[D] = C;
+ }
+
+ llvm::GlobalVariable *getStaticLocalDeclGuardAddress(const VarDecl *D) {
+ return StaticLocalDeclGuardMap[D];
+ }
+ void setStaticLocalDeclGuardAddress(const VarDecl *D,
+ llvm::GlobalVariable *C) {
+ StaticLocalDeclGuardMap[D] = C;
+ }
+
+ llvm::Constant *getAtomicSetterHelperFnMap(QualType Ty) {
+ return AtomicSetterHelperFnMap[Ty];
+ }
+ void setAtomicSetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicSetterHelperFnMap[Ty] = Fn;
+ }
+
+ llvm::Constant *getAtomicGetterHelperFnMap(QualType Ty) {
+ return AtomicGetterHelperFnMap[Ty];
+ }
+ void setAtomicGetterHelperFnMap(QualType Ty,
+ llvm::Constant *Fn) {
+ AtomicGetterHelperFnMap[Ty] = Fn;
+ }
+
+ CGDebugInfo *getModuleDebugInfo() { return DebugInfo; }
+
+ llvm::MDNode *getNoObjCARCExceptionsMetadata() {
+ if (!NoObjCARCExceptionsMetadata)
+ NoObjCARCExceptionsMetadata =
+ llvm::MDNode::get(getLLVMContext(),
+ SmallVector<llvm::Value*,1>());
+ return NoObjCARCExceptionsMetadata;
+ }
+
+ ASTContext &getContext() const { return Context; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+ const LangOptions &getLangOpts() const { return LangOpts; }
+ llvm::Module &getModule() const { return TheModule; }
+ CodeGenTypes &getTypes() { return Types; }
+ CodeGenVTables &getVTables() { return VTables; }
+ VTableContext &getVTableContext() { return VTables.getVTableContext(); }
+ DiagnosticsEngine &getDiags() const { return Diags; }
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const TargetInfo &getTarget() const { return Context.getTargetInfo(); }
+ llvm::LLVMContext &getLLVMContext() { return VMContext; }
+ const TargetCodeGenInfo &getTargetCodeGenInfo();
+ bool isTargetDarwin() const;
+
+ bool shouldUseTBAA() const { return TBAA != 0; }
+
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+ llvm::MDNode *getTBAAInfoForVTablePtr();
+
+ bool isTypeConstant(QualType QTy, bool ExcludeCtorDtor);
+
+ static void DecorateInstruction(llvm::Instruction *Inst,
+ llvm::MDNode *TBAAInfo);
+
+ /// getSize - Emit the given number of characters as a value of type size_t.
+ llvm::ConstantInt *getSize(CharUnits numChars);
+
+ /// setGlobalVisibility - Set the visibility for the given LLVM
+ /// GlobalValue.
+ void setGlobalVisibility(llvm::GlobalValue *GV, const NamedDecl *D) const;
+
+ /// TypeVisibilityKind - The kind of global variable that is passed to
+ /// setTypeVisibility
+ enum TypeVisibilityKind {
+ TVK_ForVTT,
+ TVK_ForVTable,
+ TVK_ForConstructionVTable,
+ TVK_ForRTTI,
+ TVK_ForRTTIName
+ };
+
+ /// setTypeVisibility - Set the visibility for the given global
+ /// value which holds information about a type.
+ void setTypeVisibility(llvm::GlobalValue *GV, const CXXRecordDecl *D,
+ TypeVisibilityKind TVK) const;
+
+ static llvm::GlobalValue::VisibilityTypes GetLLVMVisibility(Visibility V) {
+ switch (V) {
+ case DefaultVisibility: return llvm::GlobalValue::DefaultVisibility;
+ case HiddenVisibility: return llvm::GlobalValue::HiddenVisibility;
+ case ProtectedVisibility: return llvm::GlobalValue::ProtectedVisibility;
+ }
+ llvm_unreachable("unknown visibility!");
+ }
+
+ llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
+ if (isa<CXXConstructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXConstructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ GD.getCtorType());
+ else if (isa<CXXDestructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXDestructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ GD.getDtorType());
+ else if (isa<FunctionDecl>(GD.getDecl()))
+ return GetAddrOfFunction(GD);
+ else
+ return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
+ }
+
+ /// CreateOrReplaceCXXRuntimeVariable - Will return a global variable of the given
+ /// type. If a variable with a different type already exists then a new
+ /// variable with the right type will be created and all uses of the old
+ /// variable will be replaced with a bitcast to the new variable.
+ llvm::GlobalVariable *
+ CreateOrReplaceCXXRuntimeVariable(StringRef Name, llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage);
+
+ /// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
+ /// given global variable. If Ty is non-null and if the global doesn't exist,
+ /// then it will be greated with the specified type instead of whatever the
+ /// normal requested type would be.
+ llvm::Constant *GetAddrOfGlobalVar(const VarDecl *D,
+ llvm::Type *Ty = 0);
+
+
+ /// GetAddrOfFunction - Return the address of the given function. If Ty is
+ /// non-null, then this function will use the specified type if it has to
+ /// create it.
+ llvm::Constant *GetAddrOfFunction(GlobalDecl GD,
+ llvm::Type *Ty = 0,
+ bool ForVTable = false);
+
+ /// GetAddrOfRTTIDescriptor - Get the address of the RTTI descriptor
+ /// for the given type.
+ llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty, bool ForEH = false);
+
+ /// GetAddrOfThunk - Get the address of the thunk for the given global decl.
+ llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// GetWeakRefReference - Get a reference to the target of VD.
+ llvm::Constant *GetWeakRefReference(const ValueDecl *VD);
+
+ /// GetNonVirtualBaseClassOffset - Returns the offset from a derived class to
+ /// a class. Returns null if the offset is 0.
+ llvm::Constant *
+ GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd);
+
+ /// A pair of helper functions for a __block variable.
+ class ByrefHelpers : public llvm::FoldingSetNode {
+ public:
+ llvm::Constant *CopyHelper;
+ llvm::Constant *DisposeHelper;
+
+ /// The alignment of the field. This is important because
+ /// different offsets to the field within the byref struct need to
+ /// have different helper functions.
+ CharUnits Alignment;
+
+ ByrefHelpers(CharUnits alignment) : Alignment(alignment) {}
+ virtual ~ByrefHelpers();
+
+ void Profile(llvm::FoldingSetNodeID &id) const {
+ id.AddInteger(Alignment.getQuantity());
+ profileImpl(id);
+ }
+ virtual void profileImpl(llvm::FoldingSetNodeID &id) const = 0;
+
+ virtual bool needsCopy() const { return true; }
+ virtual void emitCopy(CodeGenFunction &CGF,
+ llvm::Value *dest, llvm::Value *src) = 0;
+
+ virtual bool needsDispose() const { return true; }
+ virtual void emitDispose(CodeGenFunction &CGF, llvm::Value *field) = 0;
+ };
+
+ llvm::FoldingSet<ByrefHelpers> ByrefHelpersCache;
+
+ /// getUniqueBlockCount - Fetches the global unique block count.
+ int getUniqueBlockCount() { return ++Block.GlobalUniqueCount; }
+
+ /// getBlockDescriptorType - Fetches the type of a generic block
+ /// descriptor.
+ llvm::Type *getBlockDescriptorType();
+
+ /// getGenericBlockLiteralType - The type of a generic block literal.
+ llvm::Type *getGenericBlockLiteralType();
+
+ /// GetAddrOfGlobalBlock - Gets the address of a block which
+ /// requires no captures.
+ llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
+
+ /// GetAddrOfConstantCFString - Return a pointer to a constant CFString object
+ /// for the given string.
+ llvm::Constant *GetAddrOfConstantCFString(const StringLiteral *Literal);
+
+ /// GetAddrOfConstantString - Return a pointer to a constant NSString object
+ /// for the given string. Or a user defined String object as defined via
+ /// -fconstant-string-class=class_name option.
+ llvm::Constant *GetAddrOfConstantString(const StringLiteral *Literal);
+
+ /// GetConstantArrayFromStringLiteral - Return a constant array for the given
+ /// string.
+ llvm::Constant *GetConstantArrayFromStringLiteral(const StringLiteral *E);
+
+ /// GetAddrOfConstantStringFromLiteral - Return a pointer to a constant array
+ /// for the given string literal.
+ llvm::Constant *GetAddrOfConstantStringFromLiteral(const StringLiteral *S);
+
+ /// GetAddrOfConstantStringFromObjCEncode - Return a pointer to a constant
+ /// array for the given ObjCEncodeExpr node.
+ llvm::Constant *GetAddrOfConstantStringFromObjCEncode(const ObjCEncodeExpr *);
+
+ /// GetAddrOfConstantString - Returns a pointer to a character array
+ /// containing the literal. This contents are exactly that of the given
+ /// string, i.e. it will not be null terminated automatically; see
+ /// GetAddrOfConstantCString. Note that whether the result is actually a
+ /// pointer to an LLVM constant depends on Feature.WriteableStrings.
+ ///
+ /// The result has pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global
+ /// (if one is created).
+ llvm::Constant *GetAddrOfConstantString(StringRef Str,
+ const char *GlobalName=0,
+ unsigned Alignment=1);
+
+ /// GetAddrOfConstantCString - Returns a pointer to a character array
+ /// containing the literal and a terminating '\0' character. The result has
+ /// pointer to array type.
+ ///
+ /// \param GlobalName If provided, the name to use for the global (if one is
+ /// created).
+ llvm::Constant *GetAddrOfConstantCString(const std::string &str,
+ const char *GlobalName=0,
+ unsigned Alignment=1);
+
+ /// GetAddrOfConstantCompoundLiteral - Returns a pointer to a constant global
+ /// variable for the given file-scope compound literal expression.
+ llvm::Constant *GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr*E);
+
+ /// \brief Retrieve the record type that describes the state of an
+ /// Objective-C fast enumeration loop (for..in).
+ QualType getObjCFastEnumerationStateType();
+
+ /// GetAddrOfCXXConstructor - Return the address of the constructor of the
+ /// given type.
+ llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
+ CXXCtorType ctorType,
+ const CGFunctionInfo *fnInfo = 0);
+
+ /// GetAddrOfCXXDestructor - Return the address of the constructor of the
+ /// given type.
+ llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
+ CXXDtorType dtorType,
+ const CGFunctionInfo *fnInfo = 0);
+
+ /// getBuiltinLibFunction - Given a builtin id for a function like
+ /// "__builtin_fabsf", return a Function* for "fabsf".
+ llvm::Value *getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID);
+
+ llvm::Function *getIntrinsic(unsigned IID, ArrayRef<llvm::Type*> Tys =
+ ArrayRef<llvm::Type*>());
+
+ /// EmitTopLevelDecl - Emit code for a single top level declaration.
+ void EmitTopLevelDecl(Decl *D);
+
+ /// HandleCXXStaticMemberVarInstantiation - Tell the consumer that this
+ // variable has been instantiated.
+ void HandleCXXStaticMemberVarInstantiation(VarDecl *VD);
+
+ /// AddUsedGlobal - Add a global which should be forced to be
+ /// present in the object file; these are emitted to the llvm.used
+ /// metadata global.
+ void AddUsedGlobal(llvm::GlobalValue *GV);
+
+ /// AddCXXDtorEntry - Add a destructor and object to add to the C++ global
+ /// destructor function.
+ void AddCXXDtorEntry(llvm::Constant *DtorFn, llvm::Constant *Object) {
+ CXXGlobalDtors.push_back(std::make_pair(DtorFn, Object));
+ }
+
+ /// CreateRuntimeFunction - Create a new runtime function with the specified
+ /// type and name.
+ llvm::Constant *CreateRuntimeFunction(llvm::FunctionType *Ty,
+ StringRef Name,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
+ /// CreateRuntimeVariable - Create a new runtime global variable with the
+ /// specified type and name.
+ llvm::Constant *CreateRuntimeVariable(llvm::Type *Ty,
+ StringRef Name);
+
+ ///@name Custom Blocks Runtime Interfaces
+ ///@{
+
+ llvm::Constant *getNSConcreteGlobalBlock();
+ llvm::Constant *getNSConcreteStackBlock();
+ llvm::Constant *getBlockObjectAssign();
+ llvm::Constant *getBlockObjectDispose();
+
+ ///@}
+
+ // UpdateCompleteType - Make sure that this type is translated.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ llvm::Constant *getMemberPointerConstant(const UnaryOperator *e);
+
+ /// EmitConstantInit - Try to emit the initializer for the given declaration
+ /// as a constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantInit(const VarDecl &D, CodeGenFunction *CGF = 0);
+
+ /// EmitConstantExpr - Try to emit the given expression as a
+ /// constant; returns 0 if the expression cannot be emitted as a
+ /// constant.
+ llvm::Constant *EmitConstantExpr(const Expr *E, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitConstantValue - Emit the given constant value as a constant, in the
+ /// type's scalar representation.
+ llvm::Constant *EmitConstantValue(const APValue &Value, QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitConstantValueForMemory - Emit the given constant value as a constant,
+ /// in the type's memory representation.
+ llvm::Constant *EmitConstantValueForMemory(const APValue &Value,
+ QualType DestType,
+ CodeGenFunction *CGF = 0);
+
+ /// EmitNullConstant - Return the result of value-initializing the given
+ /// type, i.e. a null expression of the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstant(QualType T);
+
+ /// EmitNullConstantForBase - Return a null constant appropriate for
+ /// zero-initializing a base class with the given type. This is usually,
+ /// but not always, an LLVM null constant.
+ llvm::Constant *EmitNullConstantForBase(const CXXRecordDecl *Record);
+
+ /// Error - Emit a general error that something can't be done.
+ void Error(SourceLocation loc, StringRef error);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified stmt yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Stmt *S, const char *Type,
+ bool OmitOnError=false);
+
+ /// ErrorUnsupported - Print out an error that codegen doesn't support the
+ /// specified decl yet.
+ /// \param OmitOnError - If true, then this error should only be emitted if no
+ /// other errors have been reported.
+ void ErrorUnsupported(const Decl *D, const char *Type,
+ bool OmitOnError=false);
+
+ /// SetInternalFunctionAttributes - Set the attributes on the LLVM
+ /// function for the given decl and function info. This applies
+ /// attributes necessary for handling the ABI as well as user
+ /// specified attributes like section.
+ void SetInternalFunctionAttributes(const Decl *D, llvm::Function *F,
+ const CGFunctionInfo &FI);
+
+ /// SetLLVMFunctionAttributes - Set the LLVM function attributes
+ /// (sext, zext, etc).
+ void SetLLVMFunctionAttributes(const Decl *D,
+ const CGFunctionInfo &Info,
+ llvm::Function *F);
+
+ /// SetLLVMFunctionAttributesForDefinition - Set the LLVM function attributes
+ /// which only apply to a function definintion.
+ void SetLLVMFunctionAttributesForDefinition(const Decl *D, llvm::Function *F);
+
+ /// ReturnTypeUsesSRet - Return true iff the given type uses 'sret' when used
+ /// as a return type.
+ bool ReturnTypeUsesSRet(const CGFunctionInfo &FI);
+
+ /// ReturnTypeUsesFPRet - Return true iff the given type uses 'fpret' when
+ /// used as a return type.
+ bool ReturnTypeUsesFPRet(QualType ResultType);
+
+ /// ReturnTypeUsesFP2Ret - Return true iff the given type uses 'fp2ret' when
+ /// used as a return type.
+ bool ReturnTypeUsesFP2Ret(QualType ResultType);
+
+ /// ConstructAttributeList - Get the LLVM attributes and calling convention to
+ /// use for a particular function type.
+ ///
+ /// \param Info - The function type information.
+ /// \param TargetDecl - The decl these attributes are being constructed
+ /// for. If supplied the attributes applied to this decl may contribute to the
+ /// function attributes and calling convention.
+ /// \param PAL [out] - On return, the attribute list to use.
+ /// \param CallingConv [out] - On return, the LLVM calling convention to use.
+ void ConstructAttributeList(const CGFunctionInfo &Info,
+ const Decl *TargetDecl,
+ AttributeListType &PAL,
+ unsigned &CallingConv);
+
+ StringRef getMangledName(GlobalDecl GD);
+ void getBlockMangledName(GlobalDecl GD, MangleBuffer &Buffer,
+ const BlockDecl *BD);
+
+ void EmitTentativeDefinition(const VarDecl *D);
+
+ void EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired);
+
+ llvm::GlobalVariable::LinkageTypes
+ getFunctionLinkage(const FunctionDecl *FD);
+
+ void setFunctionLinkage(const FunctionDecl *FD, llvm::GlobalValue *V) {
+ V->setLinkage(getFunctionLinkage(FD));
+ }
+
+ /// getVTableLinkage - Return the appropriate linkage for the vtable, VTT,
+ /// and type information of the given class.
+ llvm::GlobalVariable::LinkageTypes getVTableLinkage(const CXXRecordDecl *RD);
+
+ /// GetTargetTypeStoreSize - Return the store size, in character units, of
+ /// the given LLVM type.
+ CharUnits GetTargetTypeStoreSize(llvm::Type *Ty) const;
+
+ /// GetLLVMLinkageVarDefinition - Returns LLVM linkage for a global
+ /// variable.
+ llvm::GlobalValue::LinkageTypes
+ GetLLVMLinkageVarDefinition(const VarDecl *D,
+ llvm::GlobalVariable *GV);
+
+ std::vector<const CXXRecordDecl*> DeferredVTables;
+
+ /// Emit all the global annotations.
+ void EmitGlobalAnnotations();
+
+ /// Emit an annotation string.
+ llvm::Constant *EmitAnnotationString(llvm::StringRef Str);
+
+ /// Emit the annotation's translation unit.
+ llvm::Constant *EmitAnnotationUnit(SourceLocation Loc);
+
+ /// Emit the annotation line number.
+ llvm::Constant *EmitAnnotationLineNo(SourceLocation L);
+
+ /// EmitAnnotateAttr - Generate the llvm::ConstantStruct which contains the
+ /// annotation information for a given GlobalValue. The annotation struct is
+ /// {i8 *, i8 *, i8 *, i32}. The first field is a constant expression, the
+ /// GlobalValue being annotated. The second field is the constant string
+ /// created from the AnnotateAttr's annotation. The third field is a constant
+ /// string containing the name of the translation unit. The fourth field is
+ /// the line number in the file of the annotated value declaration.
+ llvm::Constant *EmitAnnotateAttr(llvm::GlobalValue *GV,
+ const AnnotateAttr *AA,
+ SourceLocation L);
+
+ /// Add global annotations that are set on D, for the global GV. Those
+ /// annotations are emitted during finalization of the LLVM code.
+ void AddGlobalAnnotations(const ValueDecl *D, llvm::GlobalValue *GV);
+
+private:
+ llvm::GlobalValue *GetGlobalValue(StringRef Ref);
+
+ llvm::Constant *GetOrCreateLLVMFunction(StringRef MangledName,
+ llvm::Type *Ty,
+ GlobalDecl D,
+ bool ForVTable,
+ llvm::Attributes ExtraAttrs =
+ llvm::Attribute::None);
+ llvm::Constant *GetOrCreateLLVMGlobal(StringRef MangledName,
+ llvm::PointerType *PTy,
+ const VarDecl *D,
+ bool UnnamedAddr = false);
+
+ /// SetCommonAttributes - Set attributes which are common to any
+ /// form of a global definition (alias, Objective-C method,
+ /// function, global variable).
+ ///
+ /// NOTE: This should only be called for definitions.
+ void SetCommonAttributes(const Decl *D, llvm::GlobalValue *GV);
+
+ /// SetFunctionDefinitionAttributes - Set attributes for a global definition.
+ void SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV);
+
+ /// SetFunctionAttributes - Set function attributes for a function
+ /// declaration.
+ void SetFunctionAttributes(GlobalDecl GD,
+ llvm::Function *F,
+ bool IsIncompleteFunction);
+
+ /// EmitGlobal - Emit code for a singal global function or var decl. Forward
+ /// declarations are emitted lazily.
+ void EmitGlobal(GlobalDecl D);
+
+ void EmitGlobalDefinition(GlobalDecl D);
+
+ void EmitGlobalFunctionDefinition(GlobalDecl GD);
+ void EmitGlobalVarDefinition(const VarDecl *D);
+ llvm::Constant *MaybeEmitGlobalStdInitializerListInitializer(const VarDecl *D,
+ const Expr *init);
+ void EmitAliasDefinition(GlobalDecl GD);
+ void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
+ void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
+
+ // C++ related functions.
+
+ bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target);
+ bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
+
+ void EmitNamespace(const NamespaceDecl *D);
+ void EmitLinkageSpec(const LinkageSpecDecl *D);
+
+ /// EmitCXXConstructors - Emit constructors (base, complete) from a
+ /// C++ constructor Decl.
+ void EmitCXXConstructors(const CXXConstructorDecl *D);
+
+ /// EmitCXXConstructor - Emit a single constructor with the given type from
+ /// a C++ constructor Decl.
+ void EmitCXXConstructor(const CXXConstructorDecl *D, CXXCtorType Type);
+
+ /// EmitCXXDestructors - Emit destructors (base, complete) from a
+ /// C++ destructor Decl.
+ void EmitCXXDestructors(const CXXDestructorDecl *D);
+
+ /// EmitCXXDestructor - Emit a single destructor with the given type from
+ /// a C++ destructor Decl.
+ void EmitCXXDestructor(const CXXDestructorDecl *D, CXXDtorType Type);
+
+ /// EmitCXXGlobalInitFunc - Emit the function that initializes C++ globals.
+ void EmitCXXGlobalInitFunc();
+
+ /// EmitCXXGlobalDtorFunc - Emit the function that destroys C++ globals.
+ void EmitCXXGlobalDtorFunc();
+
+ /// EmitCXXGlobalVarDeclInitFunc - Emit the function that initializes the
+ /// specified global (if PerformInit is true) and registers its destructor.
+ void EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
+ llvm::GlobalVariable *Addr,
+ bool PerformInit);
+
+ // FIXME: Hardcoding priority here is gross.
+ void AddGlobalCtor(llvm::Function *Ctor, int Priority=65535);
+ void AddGlobalDtor(llvm::Function *Dtor, int Priority=65535);
+
+ /// EmitCtorList - Generates a global array of functions and priorities using
+ /// the given list and name. This array will have appending linkage and is
+ /// suitable for use as a LLVM constructor or destructor array.
+ void EmitCtorList(const CtorList &Fns, const char *GlobalName);
+
+ /// EmitFundamentalRTTIDescriptor - Emit the RTTI descriptors for the
+ /// given type.
+ void EmitFundamentalRTTIDescriptor(QualType Type);
+
+ /// EmitFundamentalRTTIDescriptors - Emit the RTTI descriptors for the
+ /// builtin types.
+ void EmitFundamentalRTTIDescriptors();
+
+ /// EmitDeferred - Emit any needed decls for which code generation
+ /// was deferred.
+ void EmitDeferred(void);
+
+ /// EmitLLVMUsed - Emit the llvm.used metadata used to force
+ /// references to global which may otherwise be optimized out.
+ void EmitLLVMUsed(void);
+
+ void EmitDeclMetadata();
+
+ /// EmitCoverageFile - Emit the llvm.gcov metadata used to tell LLVM where
+ /// to emit the .gcno and .gcda files in a way that persists in .bc files.
+ void EmitCoverageFile();
+
+ /// MayDeferGeneration - Determine if the given decl can be emitted
+ /// lazily; this is only relevant for definitions. The given decl
+ /// must be either a function or var decl.
+ bool MayDeferGeneration(const ValueDecl *D);
+
+ /// SimplifyPersonality - Check whether we can use a "simpler", more
+ /// core exceptions personality function.
+ void SimplifyPersonality();
+};
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
new file mode 100644
index 0000000..9ee3f1d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.cpp
@@ -0,0 +1,185 @@
+//===--- CodeGenTypes.cpp - TBAA information for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use. Relevant standards text includes:
+//
+// C99 6.5p7
+// C++ [basic.lval] (p10 in n3126, p15 in some earlier versions)
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTBAA.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Mangle.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Metadata.h"
+#include "llvm/Constants.h"
+#include "llvm/Type.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTBAA::CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext& VMContext,
+ const LangOptions &Features, MangleContext &MContext)
+ : Context(Ctx), VMContext(VMContext), Features(Features), MContext(MContext),
+ Root(0), Char(0) {
+}
+
+CodeGenTBAA::~CodeGenTBAA() {
+}
+
+llvm::MDNode *CodeGenTBAA::getRoot() {
+ // Define the root of the tree. This identifies the tree, so that
+ // if our LLVM IR is linked with LLVM IR from a different front-end
+ // (or a different version of this front-end), their TBAA trees will
+ // remain distinct, and the optimizer will treat them conservatively.
+ if (!Root)
+ Root = getTBAAInfoForNamedType("Simple C/C++ TBAA", 0);
+
+ return Root;
+}
+
+llvm::MDNode *CodeGenTBAA::getChar() {
+ // Define the root of the tree for user-accessible memory. C and C++
+ // give special powers to char and certain similar types. However,
+ // these special powers only cover user-accessible memory, and doesn't
+ // include things like vtables.
+ if (!Char)
+ Char = getTBAAInfoForNamedType("omnipotent char", getRoot());
+
+ return Char;
+}
+
+/// getTBAAInfoForNamedType - Create a TBAA tree node with the given string
+/// as its identifier, and the given Parent node as its tree parent.
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForNamedType(StringRef NameStr,
+ llvm::MDNode *Parent,
+ bool Readonly) {
+ // Currently there is only one flag defined - the readonly flag.
+ llvm::Value *Flags = 0;
+ if (Readonly)
+ Flags = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), true);
+
+ // Set up the mdnode operand list.
+ llvm::Value *Ops[] = {
+ llvm::MDString::get(VMContext, NameStr),
+ Parent,
+ Flags
+ };
+
+ // Create the mdnode.
+ unsigned Len = llvm::array_lengthof(Ops) - !Flags;
+ return llvm::MDNode::get(VMContext, llvm::makeArrayRef(Ops, Len));
+}
+
+static bool TypeHasMayAlias(QualType QTy) {
+ // Tagged types have declarations, and therefore may have attributes.
+ if (const TagType *TTy = dyn_cast<TagType>(QTy))
+ return TTy->getDecl()->hasAttr<MayAliasAttr>();
+
+ // Typedef types have declarations, and therefore may have attributes.
+ if (const TypedefType *TTy = dyn_cast<TypedefType>(QTy)) {
+ if (TTy->getDecl()->hasAttr<MayAliasAttr>())
+ return true;
+ // Also, their underlying types may have relevant attributes.
+ return TypeHasMayAlias(TTy->desugar());
+ }
+
+ return false;
+}
+
+llvm::MDNode *
+CodeGenTBAA::getTBAAInfo(QualType QTy) {
+ // If the type has the may_alias attribute (even on a typedef), it is
+ // effectively in the general char alias class.
+ if (TypeHasMayAlias(QTy))
+ return getChar();
+
+ const Type *Ty = Context.getCanonicalType(QTy).getTypePtr();
+
+ if (llvm::MDNode *N = MetadataCache[Ty])
+ return N;
+
+ // Handle builtin types.
+ if (const BuiltinType *BTy = dyn_cast<BuiltinType>(Ty)) {
+ switch (BTy->getKind()) {
+ // Character types are special and can alias anything.
+ // In C++, this technically only includes "char" and "unsigned char",
+ // and not "signed char". In C, it includes all three. For now,
+ // the risk of exploiting this detail in C++ seems likely to outweigh
+ // the benefit.
+ case BuiltinType::Char_U:
+ case BuiltinType::Char_S:
+ case BuiltinType::UChar:
+ case BuiltinType::SChar:
+ return getChar();
+
+ // Unsigned types can alias their corresponding signed types.
+ case BuiltinType::UShort:
+ return getTBAAInfo(Context.ShortTy);
+ case BuiltinType::UInt:
+ return getTBAAInfo(Context.IntTy);
+ case BuiltinType::ULong:
+ return getTBAAInfo(Context.LongTy);
+ case BuiltinType::ULongLong:
+ return getTBAAInfo(Context.LongLongTy);
+ case BuiltinType::UInt128:
+ return getTBAAInfo(Context.Int128Ty);
+
+ // Treat all other builtin types as distinct types. This includes
+ // treating wchar_t, char16_t, and char32_t as distinct from their
+ // "underlying types".
+ default:
+ return MetadataCache[Ty] =
+ getTBAAInfoForNamedType(BTy->getName(Features), getChar());
+ }
+ }
+
+ // Handle pointers.
+ // TODO: Implement C++'s type "similarity" and consider dis-"similar"
+ // pointers distinct.
+ if (Ty->isPointerType())
+ return MetadataCache[Ty] = getTBAAInfoForNamedType("any pointer",
+ getChar());
+
+ // Enum types are distinct types. In C++ they have "underlying types",
+ // however they aren't related for TBAA.
+ if (const EnumType *ETy = dyn_cast<EnumType>(Ty)) {
+ // In C mode, two anonymous enums are compatible iff their members
+ // are the same -- see C99 6.2.7p1. For now, be conservative. We could
+ // theoretically implement this by combining information about all the
+ // members into a single identifying MDNode.
+ if (!Features.CPlusPlus &&
+ ETy->getDecl()->getTypedefNameForAnonDecl())
+ return MetadataCache[Ty] = getChar();
+
+ // In C++ mode, types have linkage, so we can rely on the ODR and
+ // on their mangled names, if they're external.
+ // TODO: Is there a way to get a program-wide unique name for a
+ // decl with local linkage or no linkage?
+ if (Features.CPlusPlus &&
+ ETy->getDecl()->getLinkage() != ExternalLinkage)
+ return MetadataCache[Ty] = getChar();
+
+ // TODO: This is using the RTTI name. Is there a better way to get
+ // a unique string for a type?
+ SmallString<256> OutName;
+ llvm::raw_svector_ostream Out(OutName);
+ MContext.mangleCXXRTTIName(QualType(ETy, 0), Out);
+ Out.flush();
+ return MetadataCache[Ty] = getTBAAInfoForNamedType(OutName, getChar());
+ }
+
+ // For now, handle any other kind of type conservatively.
+ return MetadataCache[Ty] = getChar();
+}
+
+llvm::MDNode *CodeGenTBAA::getTBAAInfoForVTablePtr() {
+ return getTBAAInfoForNamedType("vtable pointer", getRoot());
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
new file mode 100644
index 0000000..8e08498
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTBAA.h
@@ -0,0 +1,80 @@
+//===--- CodeGenTBAA.h - TBAA information for LLVM CodeGen ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that manages TBAA information and defines the TBAA policy
+// for the optimizer to use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTBAA_H
+#define CLANG_CODEGEN_CODEGENTBAA_H
+
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/DenseMap.h"
+
+namespace llvm {
+ class LLVMContext;
+ class MDNode;
+}
+
+namespace clang {
+ class ASTContext;
+ class LangOptions;
+ class MangleContext;
+ class QualType;
+ class Type;
+
+namespace CodeGen {
+ class CGRecordLayout;
+
+/// CodeGenTBAA - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTBAA {
+ ASTContext &Context;
+ llvm::LLVMContext& VMContext;
+ const LangOptions &Features;
+ MangleContext &MContext;
+
+ /// MetadataCache - This maps clang::Types to llvm::MDNodes describing them.
+ llvm::DenseMap<const Type *, llvm::MDNode *> MetadataCache;
+
+ llvm::MDNode *Root;
+ llvm::MDNode *Char;
+
+ /// getRoot - This is the mdnode for the root of the metadata type graph
+ /// for this translation unit.
+ llvm::MDNode *getRoot();
+
+ /// getChar - This is the mdnode for "char", which is special, and any types
+ /// considered to be equivalent to it.
+ llvm::MDNode *getChar();
+
+ llvm::MDNode *getTBAAInfoForNamedType(StringRef NameStr,
+ llvm::MDNode *Parent,
+ bool Readonly = false);
+
+public:
+ CodeGenTBAA(ASTContext &Ctx, llvm::LLVMContext &VMContext,
+ const LangOptions &Features,
+ MangleContext &MContext);
+ ~CodeGenTBAA();
+
+ /// getTBAAInfo - Get the TBAA MDNode to be used for a dereference
+ /// of the given type.
+ llvm::MDNode *getTBAAInfo(QualType QTy);
+
+ /// getTBAAInfoForVTablePtr - Get the TBAA MDNode to be used for a
+ /// dereference of a vtable pointer.
+ llvm::MDNode *getTBAAInfoForVTablePtr();
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
new file mode 100644
index 0000000..41fd536
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -0,0 +1,676 @@
+//===--- CodeGenTypes.cpp - Type translation for LLVM CodeGen -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenTypes.h"
+#include "CGCall.h"
+#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "TargetInfo.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
+#include "llvm/DerivedTypes.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace CodeGen;
+
+CodeGenTypes::CodeGenTypes(CodeGenModule &CGM)
+ : Context(CGM.getContext()), Target(Context.getTargetInfo()),
+ TheModule(CGM.getModule()), TheTargetData(CGM.getTargetData()),
+ TheABIInfo(CGM.getTargetCodeGenInfo().getABIInfo()),
+ TheCXXABI(CGM.getCXXABI()),
+ CodeGenOpts(CGM.getCodeGenOpts()), CGM(CGM) {
+ SkippedLayout = false;
+}
+
+CodeGenTypes::~CodeGenTypes() {
+ for (llvm::DenseMap<const Type *, CGRecordLayout *>::iterator
+ I = CGRecordLayouts.begin(), E = CGRecordLayouts.end();
+ I != E; ++I)
+ delete I->second;
+
+ for (llvm::FoldingSet<CGFunctionInfo>::iterator
+ I = FunctionInfos.begin(), E = FunctionInfos.end(); I != E; )
+ delete &*I++;
+}
+
+void CodeGenTypes::addRecordTypeName(const RecordDecl *RD,
+ llvm::StructType *Ty,
+ StringRef suffix) {
+ SmallString<256> TypeName;
+ llvm::raw_svector_ostream OS(TypeName);
+ OS << RD->getKindName() << '.';
+
+ // Name the codegen type after the typedef name
+ // if there is no tag type name available
+ if (RD->getIdentifier()) {
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ if (RD->getDeclContext())
+ OS << RD->getQualifiedNameAsString();
+ else
+ RD->printName(OS);
+ } else if (const TypedefNameDecl *TDD = RD->getTypedefNameForAnonDecl()) {
+ // FIXME: We should not have to check for a null decl context here.
+ // Right now we do it because the implicit Obj-C decls don't have one.
+ if (TDD->getDeclContext())
+ OS << TDD->getQualifiedNameAsString();
+ else
+ TDD->printName(OS);
+ } else
+ OS << "anon";
+
+ if (!suffix.empty())
+ OS << suffix;
+
+ Ty->setName(OS.str());
+}
+
+/// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+/// ConvertType in that it is used to convert to the memory representation for
+/// a type. For example, the scalar representation for _Bool is i1, but the
+/// memory representation is usually i8 or i32, depending on the target.
+llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T){
+ llvm::Type *R = ConvertType(T);
+
+ // If this is a non-bool type, don't map it.
+ if (!R->isIntegerTy(1))
+ return R;
+
+ // Otherwise, return an integer of the target-specified size.
+ return llvm::IntegerType::get(getLLVMContext(),
+ (unsigned)Context.getTypeSize(T));
+}
+
+
+/// isRecordLayoutComplete - Return true if the specified type is already
+/// completely laid out.
+bool CodeGenTypes::isRecordLayoutComplete(const Type *Ty) const {
+ llvm::DenseMap<const Type*, llvm::StructType *>::const_iterator I =
+ RecordDeclTypes.find(Ty);
+ return I != RecordDeclTypes.end() && !I->second->isOpaque();
+}
+
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked);
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool
+isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ // If we have already checked this type (maybe the same type is used by-value
+ // multiple times in multiple structure fields, don't check again.
+ if (!AlreadyChecked.insert(RD)) return true;
+
+ const Type *Key = CGT.getContext().getTagDeclType(RD).getTypePtr();
+
+ // If this type is already laid out, converting it is a noop.
+ if (CGT.isRecordLayoutComplete(Key)) return true;
+
+ // If this type is currently being laid out, we can't recursively compile it.
+ if (CGT.isRecordBeingLaidOut(Key))
+ return false;
+
+ // If this type would require laying out bases that are currently being laid
+ // out, don't do it. This includes virtual base classes which get laid out
+ // when a class is translated, even though they aren't embedded by-value into
+ // the class.
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator I = CRD->bases_begin(),
+ E = CRD->bases_end(); I != E; ++I)
+ if (!isSafeToConvert(I->getType()->getAs<RecordType>()->getDecl(),
+ CGT, AlreadyChecked))
+ return false;
+ }
+
+ // If this type would require laying out members that are currently being laid
+ // out, don't do it.
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I)
+ if (!isSafeToConvert(I->getType(), CGT, AlreadyChecked))
+ return false;
+
+ // If there are no problems, lets do it.
+ return true;
+}
+
+/// isSafeToConvert - Return true if it is safe to convert this field type,
+/// which requires the structure elements contained by-value to all be
+/// recursively safe to convert.
+static bool
+isSafeToConvert(QualType T, CodeGenTypes &CGT,
+ llvm::SmallPtrSet<const RecordDecl*, 16> &AlreadyChecked) {
+ T = T.getCanonicalType();
+
+ // If this is a record, check it.
+ if (const RecordType *RT = dyn_cast<RecordType>(T))
+ return isSafeToConvert(RT->getDecl(), CGT, AlreadyChecked);
+
+ // If this is an array, check the elements, which are embedded inline.
+ if (const ArrayType *AT = dyn_cast<ArrayType>(T))
+ return isSafeToConvert(AT->getElementType(), CGT, AlreadyChecked);
+
+ // Otherwise, there is no concern about transforming this. We only care about
+ // things that are contained by-value in a structure that can have another
+ // structure as a member.
+ return true;
+}
+
+
+/// isSafeToConvert - Return true if it is safe to convert the specified record
+/// decl to IR and lay it out, false if doing so would cause us to get into a
+/// recursive compilation mess.
+static bool isSafeToConvert(const RecordDecl *RD, CodeGenTypes &CGT) {
+ // If no structs are being laid out, we can certainly do this one.
+ if (CGT.noRecordsBeingLaidOut()) return true;
+
+ llvm::SmallPtrSet<const RecordDecl*, 16> AlreadyChecked;
+ return isSafeToConvert(RD, CGT, AlreadyChecked);
+}
+
+
+/// isFuncTypeArgumentConvertible - Return true if the specified type in a
+/// function argument or result position can be converted to an IR type at this
+/// point. This boils down to being whether it is complete, as well as whether
+/// we've temporarily deferred expanding the type because we're in a recursive
+/// context.
+bool CodeGenTypes::isFuncTypeArgumentConvertible(QualType Ty) {
+ // If this isn't a tagged type, we can convert it!
+ const TagType *TT = Ty->getAs<TagType>();
+ if (TT == 0) return true;
+
+ // Incomplete types cannot be converted.
+ if (TT->isIncompleteType())
+ return false;
+
+ // If this is an enum, then it is always safe to convert.
+ const RecordType *RT = dyn_cast<RecordType>(TT);
+ if (RT == 0) return true;
+
+ // Otherwise, we have to be careful. If it is a struct that we're in the
+ // process of expanding, then we can't convert the function type. That's ok
+ // though because we must be in a pointer context under the struct, so we can
+ // just convert it to a dummy type.
+ //
+ // We decide this by checking whether ConvertRecordDeclType returns us an
+ // opaque type for a struct that we know is defined.
+ return isSafeToConvert(RT->getDecl(), *this);
+}
+
+
+/// Code to verify a given function type is complete, i.e. the return type
+/// and all of the argument types are complete. Also check to see if we are in
+/// a RS_StructPointer context, and if so whether any struct types have been
+/// pended. If so, we don't want to ask the ABI lowering code to handle a type
+/// that cannot be converted to an IR type.
+bool CodeGenTypes::isFuncTypeConvertible(const FunctionType *FT) {
+ if (!isFuncTypeArgumentConvertible(FT->getResultType()))
+ return false;
+
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT))
+ for (unsigned i = 0, e = FPT->getNumArgs(); i != e; i++)
+ if (!isFuncTypeArgumentConvertible(FPT->getArgType(i)))
+ return false;
+
+ return true;
+}
+
+/// UpdateCompletedType - When we find the full definition for a TagDecl,
+/// replace the 'opaque' type we previously made for it if applicable.
+void CodeGenTypes::UpdateCompletedType(const TagDecl *TD) {
+ // If this is an enum being completed, then we flush all non-struct types from
+ // the cache. This allows function types and other things that may be derived
+ // from the enum to be recomputed.
+ if (const EnumDecl *ED = dyn_cast<EnumDecl>(TD)) {
+ // Only flush the cache if we've actually already converted this type.
+ if (TypeCache.count(ED->getTypeForDecl())) {
+ // Okay, we formed some types based on this. We speculated that the enum
+ // would be lowered to i32, so we only need to flush the cache if this
+ // didn't happen.
+ if (!ConvertType(ED->getIntegerType())->isIntegerTy(32))
+ TypeCache.clear();
+ }
+ return;
+ }
+
+ // If we completed a RecordDecl that we previously used and converted to an
+ // anonymous type, then go ahead and complete it now.
+ const RecordDecl *RD = cast<RecordDecl>(TD);
+ if (RD->isDependentType()) return;
+
+ // Only complete it if we converted it already. If we haven't converted it
+ // yet, we'll just do it lazily.
+ if (RecordDeclTypes.count(Context.getTagDeclType(RD).getTypePtr()))
+ ConvertRecordDeclType(RD);
+}
+
+static llvm::Type *getTypeForFormat(llvm::LLVMContext &VMContext,
+ const llvm::fltSemantics &format) {
+ if (&format == &llvm::APFloat::IEEEhalf)
+ return llvm::Type::getInt16Ty(VMContext);
+ if (&format == &llvm::APFloat::IEEEsingle)
+ return llvm::Type::getFloatTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEdouble)
+ return llvm::Type::getDoubleTy(VMContext);
+ if (&format == &llvm::APFloat::IEEEquad)
+ return llvm::Type::getFP128Ty(VMContext);
+ if (&format == &llvm::APFloat::PPCDoubleDouble)
+ return llvm::Type::getPPC_FP128Ty(VMContext);
+ if (&format == &llvm::APFloat::x87DoubleExtended)
+ return llvm::Type::getX86_FP80Ty(VMContext);
+ llvm_unreachable("Unknown float format!");
+}
+
+/// ConvertType - Convert the specified type to its LLVM form.
+llvm::Type *CodeGenTypes::ConvertType(QualType T) {
+ T = Context.getCanonicalType(T);
+
+ const Type *Ty = T.getTypePtr();
+
+ // RecordTypes are cached and processed specially.
+ if (const RecordType *RT = dyn_cast<RecordType>(Ty))
+ return ConvertRecordDeclType(RT->getDecl());
+
+ // See if type is already cached.
+ llvm::DenseMap<const Type *, llvm::Type *>::iterator TCI = TypeCache.find(Ty);
+ // If type is found in map then use it. Otherwise, convert type T.
+ if (TCI != TypeCache.end())
+ return TCI->second;
+
+ // If we don't have it in the cache, convert it now.
+ llvm::Type *ResultType = 0;
+ switch (Ty->getTypeClass()) {
+ case Type::Record: // Handled above.
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ llvm_unreachable("Non-canonical or dependent types aren't possible.");
+
+ case Type::Builtin: {
+ switch (cast<BuiltinType>(Ty)->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ // LLVM void type can only be used as the result of a function call. Just
+ // map to the same as char.
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ break;
+
+ case BuiltinType::Bool:
+ // Note that we always return bool as i1 for use as a scalar type.
+ ResultType = llvm::Type::getInt1Ty(getLLVMContext());
+ break;
+
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ case BuiltinType::Int:
+ case BuiltinType::UInt:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U:
+ case BuiltinType::Char16:
+ case BuiltinType::Char32:
+ ResultType = llvm::IntegerType::get(getLLVMContext(),
+ static_cast<unsigned>(Context.getTypeSize(T)));
+ break;
+
+ case BuiltinType::Half:
+ // Half is special: it might be lowered to i16 (and will be storage-only
+ // type),. or can be represented as a set of native operations.
+
+ // FIXME: Ask target which kind of half FP it prefers (storage only vs
+ // native).
+ ResultType = llvm::Type::getInt16Ty(getLLVMContext());
+ break;
+ case BuiltinType::Float:
+ case BuiltinType::Double:
+ case BuiltinType::LongDouble:
+ ResultType = getTypeForFormat(getLLVMContext(),
+ Context.getFloatTypeSemantics(T));
+ break;
+
+ case BuiltinType::NullPtr:
+ // Model std::nullptr_t as i8*
+ ResultType = llvm::Type::getInt8PtrTy(getLLVMContext());
+ break;
+
+ case BuiltinType::UInt128:
+ case BuiltinType::Int128:
+ ResultType = llvm::IntegerType::get(getLLVMContext(), 128);
+ break;
+
+ case BuiltinType::Dependent:
+#define BUILTIN_TYPE(Id, SingletonId)
+#define PLACEHOLDER_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#include "clang/AST/BuiltinTypes.def"
+ llvm_unreachable("Unexpected placeholder builtin type!");
+ }
+ break;
+ }
+ case Type::Complex: {
+ llvm::Type *EltTy = ConvertType(cast<ComplexType>(Ty)->getElementType());
+ ResultType = llvm::StructType::get(EltTy, EltTy, NULL);
+ break;
+ }
+ case Type::LValueReference:
+ case Type::RValueReference: {
+ const ReferenceType *RTy = cast<ReferenceType>(Ty);
+ QualType ETy = RTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
+ unsigned AS = Context.getTargetAddressSpace(ETy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+ case Type::Pointer: {
+ const PointerType *PTy = cast<PointerType>(Ty);
+ QualType ETy = PTy->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(ETy);
+ if (PointeeType->isVoidTy())
+ PointeeType = llvm::Type::getInt8Ty(getLLVMContext());
+ unsigned AS = Context.getTargetAddressSpace(ETy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+
+ case Type::VariableArray: {
+ const VariableArrayType *A = cast<VariableArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // VLAs resolve to the innermost element type; this matches
+ // the return of alloca, and there isn't any obviously better choice.
+ ResultType = ConvertTypeForMem(A->getElementType());
+ break;
+ }
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *A = cast<IncompleteArrayType>(Ty);
+ assert(A->getIndexTypeCVRQualifiers() == 0 &&
+ "FIXME: We only handle trivial array types so far!");
+ // int X[] -> [0 x int], unless the element type is not sized. If it is
+ // unsized (e.g. an incomplete struct) just use [0 x i8].
+ ResultType = ConvertTypeForMem(A->getElementType());
+ if (!ResultType->isSized()) {
+ SkippedLayout = true;
+ ResultType = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+ ResultType = llvm::ArrayType::get(ResultType, 0);
+ break;
+ }
+ case Type::ConstantArray: {
+ const ConstantArrayType *A = cast<ConstantArrayType>(Ty);
+ llvm::Type *EltTy = ConvertTypeForMem(A->getElementType());
+
+ // Lower arrays of undefined struct type to arrays of i8 just to have a
+ // concrete type.
+ if (!EltTy->isSized()) {
+ SkippedLayout = true;
+ EltTy = llvm::Type::getInt8Ty(getLLVMContext());
+ }
+
+ ResultType = llvm::ArrayType::get(EltTy, A->getSize().getZExtValue());
+ break;
+ }
+ case Type::ExtVector:
+ case Type::Vector: {
+ const VectorType *VT = cast<VectorType>(Ty);
+ ResultType = llvm::VectorType::get(ConvertType(VT->getElementType()),
+ VT->getNumElements());
+ break;
+ }
+ case Type::FunctionNoProto:
+ case Type::FunctionProto: {
+ const FunctionType *FT = cast<FunctionType>(Ty);
+ // First, check whether we can build the full function type. If the
+ // function type depends on an incomplete type (e.g. a struct or enum), we
+ // cannot lower the function type.
+ if (!isFuncTypeConvertible(FT)) {
+ // This function's type depends on an incomplete tag type.
+ // Return a placeholder type.
+ ResultType = llvm::StructType::get(getLLVMContext());
+
+ SkippedLayout = true;
+ break;
+ }
+
+ // While we're converting the argument types for a function, we don't want
+ // to recursively convert any pointed-to structs. Converting directly-used
+ // structs is ok though.
+ if (!RecordsBeingLaidOut.insert(Ty)) {
+ ResultType = llvm::StructType::get(getLLVMContext());
+
+ SkippedLayout = true;
+ break;
+ }
+
+ // The function type can be built; call the appropriate routines to
+ // build it.
+ const CGFunctionInfo *FI;
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FT)) {
+ FI = &arrangeFunctionType(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT, 0)));
+ } else {
+ const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(FT);
+ FI = &arrangeFunctionType(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT, 0)));
+ }
+
+ // If there is something higher level prodding our CGFunctionInfo, then
+ // don't recurse into it again.
+ if (FunctionsBeingProcessed.count(FI)) {
+
+ ResultType = llvm::StructType::get(getLLVMContext());
+ SkippedLayout = true;
+ } else {
+
+ // Otherwise, we're good to go, go ahead and convert it.
+ ResultType = GetFunctionType(*FI);
+ }
+
+ RecordsBeingLaidOut.erase(Ty);
+
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+ break;
+ }
+
+ case Type::ObjCObject:
+ ResultType = ConvertType(cast<ObjCObjectType>(Ty)->getBaseType());
+ break;
+
+ case Type::ObjCInterface: {
+ // Objective-C interfaces are always opaque (outside of the
+ // runtime, which can do whatever it likes); we never refine
+ // these.
+ llvm::Type *&T = InterfaceTypes[cast<ObjCInterfaceType>(Ty)];
+ if (!T)
+ T = llvm::StructType::create(getLLVMContext());
+ ResultType = T;
+ break;
+ }
+
+ case Type::ObjCObjectPointer: {
+ // Protocol qualifications do not influence the LLVM type, we just return a
+ // pointer to the underlying interface type. We don't need to worry about
+ // recursive conversion.
+ llvm::Type *T =
+ ConvertTypeForMem(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ ResultType = T->getPointerTo();
+ break;
+ }
+
+ case Type::Enum: {
+ const EnumDecl *ED = cast<EnumType>(Ty)->getDecl();
+ if (ED->isCompleteDefinition() || ED->isFixed())
+ return ConvertType(ED->getIntegerType());
+ // Return a placeholder 'i32' type. This can be changed later when the
+ // type is defined (see UpdateCompletedType), but is likely to be the
+ // "right" answer.
+ ResultType = llvm::Type::getInt32Ty(getLLVMContext());
+ break;
+ }
+
+ case Type::BlockPointer: {
+ const QualType FTy = cast<BlockPointerType>(Ty)->getPointeeType();
+ llvm::Type *PointeeType = ConvertTypeForMem(FTy);
+ unsigned AS = Context.getTargetAddressSpace(FTy);
+ ResultType = llvm::PointerType::get(PointeeType, AS);
+ break;
+ }
+
+ case Type::MemberPointer: {
+ ResultType =
+ getCXXABI().ConvertMemberPointerType(cast<MemberPointerType>(Ty));
+ break;
+ }
+
+ case Type::Atomic: {
+ ResultType = ConvertType(cast<AtomicType>(Ty)->getValueType());
+ break;
+ }
+ }
+
+ assert(ResultType && "Didn't convert a type?");
+
+ TypeCache[Ty] = ResultType;
+ return ResultType;
+}
+
+/// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+llvm::StructType *CodeGenTypes::ConvertRecordDeclType(const RecordDecl *RD) {
+ // TagDecl's are not necessarily unique, instead use the (clang)
+ // type connected to the decl.
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+
+ llvm::StructType *&Entry = RecordDeclTypes[Key];
+
+ // If we don't have a StructType at all yet, create the forward declaration.
+ if (Entry == 0) {
+ Entry = llvm::StructType::create(getLLVMContext());
+ addRecordTypeName(RD, Entry, "");
+ }
+ llvm::StructType *Ty = Entry;
+
+ // If this is still a forward declaration, or the LLVM type is already
+ // complete, there's nothing more to do.
+ RD = RD->getDefinition();
+ if (RD == 0 || !RD->isCompleteDefinition() || !Ty->isOpaque())
+ return Ty;
+
+ // If converting this type would cause us to infinitely loop, don't do it!
+ if (!isSafeToConvert(RD, *this)) {
+ DeferredRecords.push_back(RD);
+ return Ty;
+ }
+
+ // Okay, this is a definition of a type. Compile the implementation now.
+ bool InsertResult = RecordsBeingLaidOut.insert(Key); (void)InsertResult;
+ assert(InsertResult && "Recursively compiling a struct?");
+
+ // Force conversion of non-virtual base classes recursively.
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CRD->bases_begin(),
+ e = CRD->bases_end(); i != e; ++i) {
+ if (i->isVirtual()) continue;
+
+ ConvertRecordDeclType(i->getType()->getAs<RecordType>()->getDecl());
+ }
+ }
+
+ // Layout fields.
+ CGRecordLayout *Layout = ComputeRecordLayout(RD, Ty);
+ CGRecordLayouts[Key] = Layout;
+
+ // We're done laying out this struct.
+ bool EraseResult = RecordsBeingLaidOut.erase(Key); (void)EraseResult;
+ assert(EraseResult && "struct not in RecordsBeingLaidOut set?");
+
+ // If this struct blocked a FunctionType conversion, then recompute whatever
+ // was derived from that.
+ // FIXME: This is hugely overconservative.
+ if (SkippedLayout)
+ TypeCache.clear();
+
+ // If we're done converting the outer-most record, then convert any deferred
+ // structs as well.
+ if (RecordsBeingLaidOut.empty())
+ while (!DeferredRecords.empty())
+ ConvertRecordDeclType(DeferredRecords.pop_back_val());
+
+ return Ty;
+}
+
+/// getCGRecordLayout - Return record layout info for the given record decl.
+const CGRecordLayout &
+CodeGenTypes::getCGRecordLayout(const RecordDecl *RD) {
+ const Type *Key = Context.getTagDeclType(RD).getTypePtr();
+
+ const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
+ if (!Layout) {
+ // Compute the type information.
+ ConvertRecordDeclType(RD);
+
+ // Now try again.
+ Layout = CGRecordLayouts.lookup(Key);
+ }
+
+ assert(Layout && "Unable to find record layout information for type");
+ return *Layout;
+}
+
+bool CodeGenTypes::isZeroInitializable(QualType T) {
+ // No need to check for member pointers when not compiling C++.
+ if (!Context.getLangOpts().CPlusPlus)
+ return true;
+
+ T = Context.getBaseElementType(T);
+
+ // Records are non-zero-initializable if they contain any
+ // non-zero-initializable subobjects.
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return isZeroInitializable(RD);
+ }
+
+ // We have to ask the ABI about member pointers.
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
+ return getCXXABI().isZeroInitializable(MPT);
+
+ // Everything else is okay.
+ return true;
+}
+
+bool CodeGenTypes::isZeroInitializable(const CXXRecordDecl *RD) {
+ return getCGRecordLayout(RD).isZeroInitializable();
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
new file mode 100644
index 0000000..ba2b3ae
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/CodeGenTypes.h
@@ -0,0 +1,254 @@
+//===--- CodeGenTypes.h - Type translation for LLVM CodeGen -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the code that handles AST -> LLVM type lowering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CODEGENTYPES_H
+#define CLANG_CODEGEN_CODEGENTYPES_H
+
+#include "CGCall.h"
+#include "clang/AST/GlobalDecl.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include <vector>
+
+namespace llvm {
+ class FunctionType;
+ class Module;
+ class TargetData;
+ class Type;
+ class LLVMContext;
+ class StructType;
+}
+
+namespace clang {
+ class ABIInfo;
+ class ASTContext;
+ template <typename> class CanQual;
+ class CXXConstructorDecl;
+ class CXXDestructorDecl;
+ class CXXMethodDecl;
+ class CodeGenOptions;
+ class FieldDecl;
+ class FunctionProtoType;
+ class ObjCInterfaceDecl;
+ class ObjCIvarDecl;
+ class PointerType;
+ class QualType;
+ class RecordDecl;
+ class TagDecl;
+ class TargetInfo;
+ class Type;
+ typedef CanQual<Type> CanQualType;
+
+namespace CodeGen {
+ class CGCXXABI;
+ class CGRecordLayout;
+ class CodeGenModule;
+ class RequiredArgs;
+
+/// CodeGenTypes - This class organizes the cross-module state that is used
+/// while lowering AST types to LLVM types.
+class CodeGenTypes {
+ // Some of this stuff should probably be left on the CGM.
+ ASTContext &Context;
+ const TargetInfo &Target;
+ llvm::Module &TheModule;
+ const llvm::TargetData &TheTargetData;
+ const ABIInfo &TheABIInfo;
+ CGCXXABI &TheCXXABI;
+ const CodeGenOptions &CodeGenOpts;
+ CodeGenModule &CGM;
+
+ /// The opaque type map for Objective-C interfaces. All direct
+ /// manipulation is done by the runtime interfaces, which are
+ /// responsible for coercing to the appropriate type; these opaque
+ /// types are never refined.
+ llvm::DenseMap<const ObjCInterfaceType*, llvm::Type *> InterfaceTypes;
+
+ /// CGRecordLayouts - This maps llvm struct type with corresponding
+ /// record layout info.
+ llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
+
+ /// RecordDeclTypes - This contains the LLVM IR type for any converted
+ /// RecordDecl.
+ llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
+
+ /// FunctionInfos - Hold memoized CGFunctionInfo results.
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+
+ /// RecordsBeingLaidOut - This set keeps track of records that we're currently
+ /// converting to an IR type. For example, when converting:
+ /// struct A { struct B { int x; } } when processing 'x', the 'A' and 'B'
+ /// types will be in this set.
+ llvm::SmallPtrSet<const Type*, 4> RecordsBeingLaidOut;
+
+ llvm::SmallPtrSet<const CGFunctionInfo*, 4> FunctionsBeingProcessed;
+
+ /// SkippedLayout - True if we didn't layout a function due to a being inside
+ /// a recursive struct conversion, set this to true.
+ bool SkippedLayout;
+
+ SmallVector<const RecordDecl *, 8> DeferredRecords;
+
+private:
+ /// TypeCache - This map keeps cache of llvm::Types
+ /// and maps llvm::Types to corresponding clang::Type.
+ llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
+
+public:
+ CodeGenTypes(CodeGenModule &CGM);
+ ~CodeGenTypes();
+
+ const llvm::TargetData &getTargetData() const { return TheTargetData; }
+ const TargetInfo &getTarget() const { return Target; }
+ ASTContext &getContext() const { return Context; }
+ const ABIInfo &getABIInfo() const { return TheABIInfo; }
+ const CodeGenOptions &getCodeGenOpts() const { return CodeGenOpts; }
+ CGCXXABI &getCXXABI() const { return TheCXXABI; }
+ llvm::LLVMContext &getLLVMContext() { return TheModule.getContext(); }
+
+ /// ConvertType - Convert type T into a llvm::Type.
+ llvm::Type *ConvertType(QualType T);
+
+ /// ConvertTypeForMem - Convert type T into a llvm::Type. This differs from
+ /// ConvertType in that it is used to convert to the memory representation for
+ /// a type. For example, the scalar representation for _Bool is i1, but the
+ /// memory representation is usually i8 or i32, depending on the target.
+ llvm::Type *ConvertTypeForMem(QualType T);
+
+ /// GetFunctionType - Get the LLVM function type for \arg Info.
+ llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info);
+
+ llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+
+ /// isFuncTypeConvertible - Utility to check whether a function type can
+ /// be converted to an LLVM type (i.e. doesn't depend on an incomplete tag
+ /// type).
+ bool isFuncTypeConvertible(const FunctionType *FT);
+ bool isFuncTypeArgumentConvertible(QualType Ty);
+
+ /// GetFunctionTypeForVTable - Get the LLVM function type for use in a vtable,
+ /// given a CXXMethodDecl. If the method to has an incomplete return type,
+ /// and/or incomplete argument types, this will return the opaque type.
+ llvm::Type *GetFunctionTypeForVTable(GlobalDecl GD);
+
+ const CGRecordLayout &getCGRecordLayout(const RecordDecl*);
+
+ /// UpdateCompletedType - When we find the full definition for a TagDecl,
+ /// replace the 'opaque' type we previously made for it if applicable.
+ void UpdateCompletedType(const TagDecl *TD);
+
+ /// getNullaryFunctionInfo - Get the function info for a void()
+ /// function with standard CC.
+ const CGFunctionInfo &arrangeNullaryFunction();
+
+ // The arrangement methods are split into three families:
+ // - those meant to drive the signature and prologue/epilogue
+ // of a function declaration or definition,
+ // - those meant for the computation of the LLVM type for an abstract
+ // appearance of a function, and
+ // - those meant for performing the IR-generation of a call.
+ // They differ mainly in how they deal with optional (i.e. variadic)
+ // arguments, as well as unprototyped functions.
+ //
+ // Key points:
+ // - The CGFunctionInfo for emitting a specific call site must include
+ // entries for the optional arguments.
+ // - The function type used at the call site must reflect the formal
+ // signature of the declaration being called, or else the call will
+ // go awry.
+ // - For the most part, unprototyped functions are called by casting to
+ // a formal signature inferred from the specific argument types used
+ // at the call-site. However, some targets (e.g. x86-64) screw with
+ // this for compatibility reasons.
+
+ const CGFunctionInfo &arrangeGlobalDeclaration(GlobalDecl GD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(const FunctionDecl *FD);
+ const CGFunctionInfo &arrangeFunctionDeclaration(QualType ResTy,
+ const FunctionArgList &Args,
+ const FunctionType::ExtInfo &Info,
+ bool isVariadic);
+
+ const CGFunctionInfo &arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD);
+ const CGFunctionInfo &arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
+ QualType receiverType);
+
+ const CGFunctionInfo &arrangeCXXMethodDeclaration(const CXXMethodDecl *MD);
+ const CGFunctionInfo &arrangeCXXConstructorDeclaration(
+ const CXXConstructorDecl *D,
+ CXXCtorType Type);
+ const CGFunctionInfo &arrangeCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
+
+ const CGFunctionInfo &arrangeFunctionCall(const CallArgList &Args,
+ const FunctionType *Ty);
+ const CGFunctionInfo &arrangeFunctionCall(QualType ResTy,
+ const CallArgList &args,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs required);
+
+ const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &arrangeFunctionType(CanQual<FunctionNoProtoType> Ty);
+ const CGFunctionInfo &arrangeCXXMethodType(const CXXRecordDecl *RD,
+ const FunctionProtoType *FTP);
+
+ /// Retrieves the ABI information for the given function signature.
+ /// This is the "core" routine to which all the others defer.
+ ///
+ /// \param argTypes - must all actually be canonical as params
+ const CGFunctionInfo &arrangeFunctionType(CanQualType returnType,
+ ArrayRef<CanQualType> argTypes,
+ const FunctionType::ExtInfo &info,
+ RequiredArgs args);
+
+ /// \brief Compute a new LLVM record layout object for the given record.
+ CGRecordLayout *ComputeRecordLayout(const RecordDecl *D,
+ llvm::StructType *Ty);
+
+ /// addRecordTypeName - Compute a name from the given record decl with an
+ /// optional suffix and name the given LLVM type using it.
+ void addRecordTypeName(const RecordDecl *RD, llvm::StructType *Ty,
+ StringRef suffix);
+
+
+public: // These are internal details of CGT that shouldn't be used externally.
+ /// ConvertRecordDeclType - Lay out a tagged decl type like struct or union.
+ llvm::StructType *ConvertRecordDeclType(const RecordDecl *TD);
+
+ /// GetExpandedTypes - Expand the type \arg Ty into the LLVM
+ /// argument types it would be passed as on the provided vector \arg
+ /// ArgTys. See ABIArgInfo::Expand.
+ void GetExpandedTypes(QualType type,
+ SmallVectorImpl<llvm::Type*> &expanded);
+
+ /// IsZeroInitializable - Return whether a type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(QualType T);
+
+ /// IsZeroInitializable - Return whether a record type can be
+ /// zero-initialized (in the C++ sense) with an LLVM zeroinitializer.
+ bool isZeroInitializable(const CXXRecordDecl *RD);
+
+ bool isRecordLayoutComplete(const Type *Ty) const;
+ bool noRecordsBeingLaidOut() const {
+ return RecordsBeingLaidOut.empty();
+ }
+ bool isRecordBeingLaidOut(const Type *Ty) const {
+ return RecordsBeingLaidOut.count(Ty);
+ }
+
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
new file mode 100644
index 0000000..98f67f3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -0,0 +1,1202 @@
+//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targeting the Itanium C++ ABI. The class
+// in this file generates structures that follow the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//
+// It also supports the closely-related ARM ABI, documented at:
+// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0041c/IHI0041C_cppabi.pdf
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
+#include "CodeGenModule.h"
+#include <clang/AST/Mangle.h>
+#include <clang/AST/Type.h>
+#include <llvm/Intrinsics.h>
+#include <llvm/Target/TargetData.h>
+#include <llvm/Value.h>
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+class ItaniumCXXABI : public CodeGen::CGCXXABI {
+private:
+ llvm::IntegerType *PtrDiffTy;
+protected:
+ bool IsARM;
+
+ // It's a little silly for us to cache this.
+ llvm::IntegerType *getPtrDiffTy() {
+ if (!PtrDiffTy) {
+ QualType T = getContext().getPointerDiffType();
+ llvm::Type *Ty = CGM.getTypes().ConvertType(T);
+ PtrDiffTy = cast<llvm::IntegerType>(Ty);
+ }
+ return PtrDiffTy;
+ }
+
+ bool NeedsArrayCookie(const CXXNewExpr *expr);
+ bool NeedsArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType);
+
+public:
+ ItaniumCXXABI(CodeGen::CodeGenModule &CGM, bool IsARM = false) :
+ CGCXXABI(CGM), PtrDiffTy(0), IsARM(IsARM) { }
+
+ bool isZeroInitializable(const MemberPointerType *MPT);
+
+ llvm::Type *ConvertMemberPointerType(const MemberPointerType *MPT);
+
+ llvm::Value *EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT);
+
+ llvm::Value *EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT);
+
+ llvm::Value *EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *Src);
+ llvm::Constant *EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *Src);
+
+ llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
+
+ llvm::Constant *EmitMemberPointer(const CXXMethodDecl *MD);
+ llvm::Constant *EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset);
+ llvm::Constant *EmitMemberPointer(const APValue &MP, QualType MPT);
+ llvm::Constant *BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment);
+
+ llvm::Value *EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality);
+
+ llvm::Value *EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *Addr,
+ const MemberPointerType *MPT);
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params);
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+
+ CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+ void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::GlobalVariable *DeclPtr, bool PerformInit);
+};
+
+class ARMCXXABI : public ItaniumCXXABI {
+public:
+ ARMCXXABI(CodeGen::CodeGenModule &CGM) : ItaniumCXXABI(CGM, /*ARM*/ true) {}
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType T,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys);
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params);
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF);
+
+ void EmitReturnFromThunk(CodeGenFunction &CGF, RValue RV, QualType ResTy);
+
+ CharUnits GetArrayCookieSize(const CXXNewExpr *expr);
+ llvm::Value *InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType);
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize);
+
+private:
+ /// \brief Returns true if the given instance method is one of the
+ /// kinds that the ARM ABI says returns 'this'.
+ static bool HasThisReturn(GlobalDecl GD) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ return ((isa<CXXDestructorDecl>(MD) && GD.getDtorType() != Dtor_Deleting) ||
+ (isa<CXXConstructorDecl>(MD)));
+ }
+};
+}
+
+CodeGen::CGCXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
+ return new ItaniumCXXABI(CGM);
+}
+
+CodeGen::CGCXXABI *CodeGen::CreateARMCXXABI(CodeGenModule &CGM) {
+ return new ARMCXXABI(CGM);
+}
+
+llvm::Type *
+ItaniumCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
+ if (MPT->isMemberDataPointer())
+ return getPtrDiffTy();
+ return llvm::StructType::get(getPtrDiffTy(), getPtrDiffTy(), NULL);
+}
+
+/// In the Itanium and ARM ABIs, method pointers have the form:
+/// struct { ptrdiff_t ptr; ptrdiff_t adj; } memptr;
+///
+/// In the Itanium ABI:
+/// - method pointers are virtual if (memptr.ptr & 1) is nonzero
+/// - the this-adjustment is (memptr.adj)
+/// - the virtual offset is (memptr.ptr - 1)
+///
+/// In the ARM ABI:
+/// - method pointers are virtual if (memptr.adj & 1) is nonzero
+/// - the this-adjustment is (memptr.adj >> 1)
+/// - the virtual offset is (memptr.ptr)
+/// ARM uses 'adj' for the virtual flag because Thumb functions
+/// may be only single-byte aligned.
+///
+/// If the member is virtual, the adjusted 'this' pointer points
+/// to a vtable pointer from which the virtual offset is applied.
+///
+/// If the member is non-virtual, memptr.ptr is the address of
+/// the function to call.
+llvm::Value *
+ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(CodeGenFunction &CGF,
+ llvm::Value *&This,
+ llvm::Value *MemFnPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ const FunctionProtoType *FPT =
+ MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const CXXRecordDecl *RD =
+ cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
+
+ llvm::FunctionType *FTy =
+ CGM.getTypes().GetFunctionType(
+ CGM.getTypes().arrangeCXXMethodType(RD, FPT));
+
+ llvm::IntegerType *ptrdiff = getPtrDiffTy();
+ llvm::Constant *ptrdiff_1 = llvm::ConstantInt::get(ptrdiff, 1);
+
+ llvm::BasicBlock *FnVirtual = CGF.createBasicBlock("memptr.virtual");
+ llvm::BasicBlock *FnNonVirtual = CGF.createBasicBlock("memptr.nonvirtual");
+ llvm::BasicBlock *FnEnd = CGF.createBasicBlock("memptr.end");
+
+ // Extract memptr.adj, which is in the second field.
+ llvm::Value *RawAdj = Builder.CreateExtractValue(MemFnPtr, 1, "memptr.adj");
+
+ // Compute the true adjustment.
+ llvm::Value *Adj = RawAdj;
+ if (IsARM)
+ Adj = Builder.CreateAShr(Adj, ptrdiff_1, "memptr.adj.shifted");
+
+ // Apply the adjustment and cast back to the original struct type
+ // for consistency.
+ llvm::Value *Ptr = Builder.CreateBitCast(This, Builder.getInt8PtrTy());
+ Ptr = Builder.CreateInBoundsGEP(Ptr, Adj);
+ This = Builder.CreateBitCast(Ptr, This->getType(), "this.adjusted");
+
+ // Load the function pointer.
+ llvm::Value *FnAsInt = Builder.CreateExtractValue(MemFnPtr, 0, "memptr.ptr");
+
+ // If the LSB in the function pointer is 1, the function pointer points to
+ // a virtual function.
+ llvm::Value *IsVirtual;
+ if (IsARM)
+ IsVirtual = Builder.CreateAnd(RawAdj, ptrdiff_1);
+ else
+ IsVirtual = Builder.CreateAnd(FnAsInt, ptrdiff_1);
+ IsVirtual = Builder.CreateIsNotNull(IsVirtual, "memptr.isvirtual");
+ Builder.CreateCondBr(IsVirtual, FnVirtual, FnNonVirtual);
+
+ // In the virtual path, the adjustment left 'This' pointing to the
+ // vtable of the correct base subobject. The "function pointer" is an
+ // offset within the vtable (+1 for the virtual flag on non-ARM).
+ CGF.EmitBlock(FnVirtual);
+
+ // Cast the adjusted this to a pointer to vtable pointer and load.
+ llvm::Type *VTableTy = Builder.getInt8PtrTy();
+ llvm::Value *VTable = Builder.CreateBitCast(This, VTableTy->getPointerTo());
+ VTable = Builder.CreateLoad(VTable, "memptr.vtable");
+
+ // Apply the offset.
+ llvm::Value *VTableOffset = FnAsInt;
+ if (!IsARM) VTableOffset = Builder.CreateSub(VTableOffset, ptrdiff_1);
+ VTable = Builder.CreateGEP(VTable, VTableOffset);
+
+ // Load the virtual function to call.
+ VTable = Builder.CreateBitCast(VTable, FTy->getPointerTo()->getPointerTo());
+ llvm::Value *VirtualFn = Builder.CreateLoad(VTable, "memptr.virtualfn");
+ CGF.EmitBranch(FnEnd);
+
+ // In the non-virtual path, the function pointer is actually a
+ // function pointer.
+ CGF.EmitBlock(FnNonVirtual);
+ llvm::Value *NonVirtualFn =
+ Builder.CreateIntToPtr(FnAsInt, FTy->getPointerTo(), "memptr.nonvirtualfn");
+
+ // We're done.
+ CGF.EmitBlock(FnEnd);
+ llvm::PHINode *Callee = Builder.CreatePHI(FTy->getPointerTo(), 2);
+ Callee->addIncoming(VirtualFn, FnVirtual);
+ Callee->addIncoming(NonVirtualFn, FnNonVirtual);
+ return Callee;
+}
+
+/// Compute an l-value by applying the given pointer-to-member to a
+/// base object.
+llvm::Value *ItaniumCXXABI::EmitMemberDataPointerAddress(CodeGenFunction &CGF,
+ llvm::Value *Base,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ assert(MemPtr->getType() == getPtrDiffTy());
+
+ CGBuilderTy &Builder = CGF.Builder;
+
+ unsigned AS = cast<llvm::PointerType>(Base->getType())->getAddressSpace();
+
+ // Cast to char*.
+ Base = Builder.CreateBitCast(Base, Builder.getInt8Ty()->getPointerTo(AS));
+
+ // Apply the offset, which we assume is non-null.
+ llvm::Value *Addr = Builder.CreateInBoundsGEP(Base, MemPtr, "memptr.offset");
+
+ // Cast the address to the appropriate pointer type, adopting the
+ // address space of the base pointer.
+ llvm::Type *PType
+ = CGF.ConvertTypeForMem(MPT->getPointeeType())->getPointerTo(AS);
+ return Builder.CreateBitCast(Addr, PType);
+}
+
+/// Perform a bitcast, derived-to-base, or base-to-derived member pointer
+/// conversion.
+///
+/// Bitcast conversions are always a no-op under Itanium.
+///
+/// Obligatory offset/adjustment diagram:
+/// <-- offset --> <-- adjustment -->
+/// |--------------------------|----------------------|--------------------|
+/// ^Derived address point ^Base address point ^Member address point
+///
+/// So when converting a base member pointer to a derived member pointer,
+/// we add the offset to the adjustment because the address point has
+/// decreased; and conversely, when converting a derived MP to a base MP
+/// we subtract the offset from the adjustment because the address point
+/// has increased.
+///
+/// The standard forbids (at compile time) conversion to and from
+/// virtual bases, which is why we don't have to consider them here.
+///
+/// The standard forbids (at run time) casting a derived MP to a base
+/// MP when the derived MP does not point to a member of the base.
+/// This is why -1 is a reasonable choice for null data member
+/// pointers.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerConversion(CodeGenFunction &CGF,
+ const CastExpr *E,
+ llvm::Value *src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
+
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
+
+ // Use constant emission if we can.
+ if (isa<llvm::Constant>(src))
+ return EmitMemberPointerConversion(E, cast<llvm::Constant>(src));
+
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
+
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (destTy->isMemberDataPointer()) {
+ llvm::Value *dst;
+ if (isDerivedToBase)
+ dst = Builder.CreateNSWSub(src, adj, "adj");
+ else
+ dst = Builder.CreateNSWAdd(src, adj, "adj");
+
+ // Null check.
+ llvm::Value *null = llvm::Constant::getAllOnesValue(src->getType());
+ llvm::Value *isNull = Builder.CreateICmpEQ(src, null, "memptr.isnull");
+ return Builder.CreateSelect(isNull, src, dst);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (IsARM) {
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
+ }
+
+ llvm::Value *srcAdj = Builder.CreateExtractValue(src, 1, "src.adj");
+ llvm::Value *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = Builder.CreateNSWSub(srcAdj, adj, "adj");
+ else
+ dstAdj = Builder.CreateNSWAdd(srcAdj, adj, "adj");
+
+ return Builder.CreateInsertValue(src, dstAdj, 1);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
+ llvm::Constant *src) {
+ assert(E->getCastKind() == CK_DerivedToBaseMemberPointer ||
+ E->getCastKind() == CK_BaseToDerivedMemberPointer ||
+ E->getCastKind() == CK_ReinterpretMemberPointer);
+
+ // Under Itanium, reinterprets don't require any additional processing.
+ if (E->getCastKind() == CK_ReinterpretMemberPointer) return src;
+
+ // If the adjustment is trivial, we don't need to do anything.
+ llvm::Constant *adj = getMemberPointerAdjustment(E);
+ if (!adj) return src;
+
+ bool isDerivedToBase = (E->getCastKind() == CK_DerivedToBaseMemberPointer);
+
+ const MemberPointerType *destTy =
+ E->getType()->castAs<MemberPointerType>();
+
+ // For member data pointers, this is just a matter of adding the
+ // offset if the source is non-null.
+ if (destTy->isMemberDataPointer()) {
+ // null maps to null.
+ if (src->isAllOnesValue()) return src;
+
+ if (isDerivedToBase)
+ return llvm::ConstantExpr::getNSWSub(src, adj);
+ else
+ return llvm::ConstantExpr::getNSWAdd(src, adj);
+ }
+
+ // The this-adjustment is left-shifted by 1 on ARM.
+ if (IsARM) {
+ uint64_t offset = cast<llvm::ConstantInt>(adj)->getZExtValue();
+ offset <<= 1;
+ adj = llvm::ConstantInt::get(adj->getType(), offset);
+ }
+
+ llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
+ llvm::Constant *dstAdj;
+ if (isDerivedToBase)
+ dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
+ else
+ dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
+
+ return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitNullMemberPointer(const MemberPointerType *MPT) {
+ llvm::Type *ptrdiff_t = getPtrDiffTy();
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ if (MPT->isMemberDataPointer())
+ return llvm::ConstantInt::get(ptrdiff_t, -1ULL, /*isSigned=*/true);
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(ptrdiff_t, 0);
+ llvm::Constant *Values[2] = { Zero, Zero };
+ return llvm::ConstantStruct::getAnon(Values);
+}
+
+llvm::Constant *
+ItaniumCXXABI::EmitMemberDataPointer(const MemberPointerType *MPT,
+ CharUnits offset) {
+ // Itanium C++ ABI 2.3:
+ // A pointer to data member is an offset from the base address of
+ // the class object containing it, represented as a ptrdiff_t
+ return llvm::ConstantInt::get(getPtrDiffTy(), offset.getQuantity());
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const CXXMethodDecl *MD) {
+ return BuildMemberPointer(MD, CharUnits::Zero());
+}
+
+llvm::Constant *ItaniumCXXABI::BuildMemberPointer(const CXXMethodDecl *MD,
+ CharUnits ThisAdjustment) {
+ assert(MD->isInstance() && "Member function must not be static!");
+ MD = MD->getCanonicalDecl();
+
+ CodeGenTypes &Types = CGM.getTypes();
+ llvm::Type *ptrdiff_t = getPtrDiffTy();
+
+ // Get the function pointer (or index if this is a virtual function).
+ llvm::Constant *MemPtr[2];
+ if (MD->isVirtual()) {
+ uint64_t Index = CGM.getVTableContext().getMethodVTableIndex(MD);
+
+ const ASTContext &Context = getContext();
+ CharUnits PointerWidth =
+ Context.toCharUnitsFromBits(Context.getTargetInfo().getPointerWidth(0));
+ uint64_t VTableOffset = (Index * PointerWidth.getQuantity());
+
+ if (IsARM) {
+ // ARM C++ ABI 3.2.1:
+ // This ABI specifies that adj contains twice the this
+ // adjustment, plus 1 if the member function is virtual. The
+ // least significant bit of adj then makes exactly the same
+ // discrimination as the least significant bit of ptr does for
+ // Itanium.
+ MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ 2 * ThisAdjustment.getQuantity() + 1);
+ } else {
+ // Itanium C++ ABI 2.3:
+ // For a virtual function, [the pointer field] is 1 plus the
+ // virtual table offset (in bytes) of the function,
+ // represented as a ptrdiff_t.
+ MemPtr[0] = llvm::ConstantInt::get(ptrdiff_t, VTableOffset + 1);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t,
+ ThisAdjustment.getQuantity());
+ }
+ } else {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ llvm::Type *Ty;
+ // Check whether the function has a computable LLVM signature.
+ if (Types.isFuncTypeConvertible(FPT)) {
+ // The function has a computable LLVM signature; use the correct type.
+ Ty = Types.GetFunctionType(Types.arrangeCXXMethodDeclaration(MD));
+ } else {
+ // Use an arbitrary non-function type to tell GetAddrOfFunction that the
+ // function type is incomplete.
+ Ty = ptrdiff_t;
+ }
+ llvm::Constant *addr = CGM.GetAddrOfFunction(MD, Ty);
+
+ MemPtr[0] = llvm::ConstantExpr::getPtrToInt(addr, ptrdiff_t);
+ MemPtr[1] = llvm::ConstantInt::get(ptrdiff_t, (IsARM ? 2 : 1) *
+ ThisAdjustment.getQuantity());
+ }
+
+ return llvm::ConstantStruct::getAnon(MemPtr);
+}
+
+llvm::Constant *ItaniumCXXABI::EmitMemberPointer(const APValue &MP,
+ QualType MPType) {
+ const MemberPointerType *MPT = MPType->castAs<MemberPointerType>();
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ if (!MPD)
+ return EmitNullMemberPointer(MPT);
+
+ // Compute the this-adjustment.
+ CharUnits ThisAdjustment = CharUnits::Zero();
+ ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
+ bool DerivedMember = MP.isMemberPointerToDerivedMember();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = RD;
+ const CXXRecordDecl *Derived = Path[I];
+ if (DerivedMember)
+ std::swap(Base, Derived);
+ ThisAdjustment +=
+ getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
+ RD = Path[I];
+ }
+ if (DerivedMember)
+ ThisAdjustment = -ThisAdjustment;
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MPD))
+ return BuildMemberPointer(MD, ThisAdjustment);
+
+ CharUnits FieldOffset =
+ getContext().toCharUnitsFromBits(getContext().getFieldOffset(MPD));
+ return EmitMemberDataPointer(MPT, ThisAdjustment + FieldOffset);
+}
+
+/// The comparison algorithm is pretty easy: the member pointers are
+/// the same if they're either bitwise identical *or* both null.
+///
+/// ARM is different here only because null-ness is more complicated.
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerComparison(CodeGenFunction &CGF,
+ llvm::Value *L,
+ llvm::Value *R,
+ const MemberPointerType *MPT,
+ bool Inequality) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::ICmpInst::Predicate Eq;
+ llvm::Instruction::BinaryOps And, Or;
+ if (Inequality) {
+ Eq = llvm::ICmpInst::ICMP_NE;
+ And = llvm::Instruction::Or;
+ Or = llvm::Instruction::And;
+ } else {
+ Eq = llvm::ICmpInst::ICMP_EQ;
+ And = llvm::Instruction::And;
+ Or = llvm::Instruction::Or;
+ }
+
+ // Member data pointers are easy because there's a unique null
+ // value, so it just comes down to bitwise equality.
+ if (MPT->isMemberDataPointer())
+ return Builder.CreateICmp(Eq, L, R);
+
+ // For member function pointers, the tautologies are more complex.
+ // The Itanium tautology is:
+ // (L == R) <==> (L.ptr == R.ptr && (L.ptr == 0 || L.adj == R.adj))
+ // The ARM tautology is:
+ // (L == R) <==> (L.ptr == R.ptr &&
+ // (L.adj == R.adj ||
+ // (L.ptr == 0 && ((L.adj|R.adj) & 1) == 0)))
+ // The inequality tautologies have exactly the same structure, except
+ // applying De Morgan's laws.
+
+ llvm::Value *LPtr = Builder.CreateExtractValue(L, 0, "lhs.memptr.ptr");
+ llvm::Value *RPtr = Builder.CreateExtractValue(R, 0, "rhs.memptr.ptr");
+
+ // This condition tests whether L.ptr == R.ptr. This must always be
+ // true for equality to hold.
+ llvm::Value *PtrEq = Builder.CreateICmp(Eq, LPtr, RPtr, "cmp.ptr");
+
+ // This condition, together with the assumption that L.ptr == R.ptr,
+ // tests whether the pointers are both null. ARM imposes an extra
+ // condition.
+ llvm::Value *Zero = llvm::Constant::getNullValue(LPtr->getType());
+ llvm::Value *EqZero = Builder.CreateICmp(Eq, LPtr, Zero, "cmp.ptr.null");
+
+ // This condition tests whether L.adj == R.adj. If this isn't
+ // true, the pointers are unequal unless they're both null.
+ llvm::Value *LAdj = Builder.CreateExtractValue(L, 1, "lhs.memptr.adj");
+ llvm::Value *RAdj = Builder.CreateExtractValue(R, 1, "rhs.memptr.adj");
+ llvm::Value *AdjEq = Builder.CreateICmp(Eq, LAdj, RAdj, "cmp.adj");
+
+ // Null member function pointers on ARM clear the low bit of Adj,
+ // so the zero condition has to check that neither low bit is set.
+ if (IsARM) {
+ llvm::Value *One = llvm::ConstantInt::get(LPtr->getType(), 1);
+
+ // Compute (l.adj | r.adj) & 1 and test it against zero.
+ llvm::Value *OrAdj = Builder.CreateOr(LAdj, RAdj, "or.adj");
+ llvm::Value *OrAdjAnd1 = Builder.CreateAnd(OrAdj, One);
+ llvm::Value *OrAdjAnd1EqZero = Builder.CreateICmp(Eq, OrAdjAnd1, Zero,
+ "cmp.or.adj");
+ EqZero = Builder.CreateBinOp(And, EqZero, OrAdjAnd1EqZero);
+ }
+
+ // Tie together all our conditions.
+ llvm::Value *Result = Builder.CreateBinOp(Or, EqZero, AdjEq);
+ Result = Builder.CreateBinOp(And, PtrEq, Result,
+ Inequality ? "memptr.ne" : "memptr.eq");
+ return Result;
+}
+
+llvm::Value *
+ItaniumCXXABI::EmitMemberPointerIsNotNull(CodeGenFunction &CGF,
+ llvm::Value *MemPtr,
+ const MemberPointerType *MPT) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ /// For member data pointers, this is just a check against -1.
+ if (MPT->isMemberDataPointer()) {
+ assert(MemPtr->getType() == getPtrDiffTy());
+ llvm::Value *NegativeOne =
+ llvm::Constant::getAllOnesValue(MemPtr->getType());
+ return Builder.CreateICmpNE(MemPtr, NegativeOne, "memptr.tobool");
+ }
+
+ // In Itanium, a member function pointer is not null if 'ptr' is not null.
+ llvm::Value *Ptr = Builder.CreateExtractValue(MemPtr, 0, "memptr.ptr");
+
+ llvm::Constant *Zero = llvm::ConstantInt::get(Ptr->getType(), 0);
+ llvm::Value *Result = Builder.CreateICmpNE(Ptr, Zero, "memptr.tobool");
+
+ // On ARM, a member function pointer is also non-null if the low bit of 'adj'
+ // (the virtual bit) is set.
+ if (IsARM) {
+ llvm::Constant *One = llvm::ConstantInt::get(Ptr->getType(), 1);
+ llvm::Value *Adj = Builder.CreateExtractValue(MemPtr, 1, "memptr.adj");
+ llvm::Value *VirtualBit = Builder.CreateAnd(Adj, One, "memptr.virtualbit");
+ llvm::Value *IsVirtual = Builder.CreateICmpNE(VirtualBit, Zero,
+ "memptr.isvirtual");
+ Result = Builder.CreateOr(Result, IsVirtual);
+ }
+
+ return Result;
+}
+
+/// The Itanium ABI requires non-zero initialization only for data
+/// member pointers, for which '0' is a valid offset.
+bool ItaniumCXXABI::isZeroInitializable(const MemberPointerType *MPT) {
+ return MPT->getPointeeType()->isFunctionType();
+}
+
+/// The generic ABI passes 'this', plus a VTT if it's initializing a
+/// base subobject.
+void ItaniumCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // 'this' is already there.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Ctor_Base && Ctor->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+}
+
+/// The ARM ABI does the same as the Itanium ABI, but returns 'this'.
+void ARMCXXABI::BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ItaniumCXXABI::BuildConstructorSignature(Ctor, Type, ResTy, ArgTys);
+ ResTy = ArgTys[0];
+}
+
+/// The generic ABI passes 'this', plus a VTT if it's destroying a
+/// base subobject.
+void ItaniumCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ASTContext &Context = getContext();
+
+ // 'this' is already there.
+
+ // Check if we need to add a VTT parameter (which has type void **).
+ if (Type == Dtor_Base && Dtor->getParent()->getNumVBases() != 0)
+ ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
+}
+
+/// The ARM ABI does the same as the Itanium ABI, but returns 'this'
+/// for non-deleting destructors.
+void ARMCXXABI::BuildDestructorSignature(const CXXDestructorDecl *Dtor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ ItaniumCXXABI::BuildDestructorSignature(Dtor, Type, ResTy, ArgTys);
+
+ if (Type != Dtor_Deleting)
+ ResTy = ArgTys[0];
+}
+
+void ItaniumCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ /// Create the 'this' variable.
+ BuildThisParam(CGF, Params);
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(CGF.CurGD.getDecl());
+ assert(MD->isInstance());
+
+ // Check if we need a VTT parameter as well.
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
+ ASTContext &Context = getContext();
+
+ // FIXME: avoid the fake decl
+ QualType T = Context.getPointerType(Context.VoidPtrTy);
+ ImplicitParamDecl *VTTDecl
+ = ImplicitParamDecl::Create(Context, 0, MD->getLocation(),
+ &Context.Idents.get("vtt"), T);
+ Params.push_back(VTTDecl);
+ getVTTDecl(CGF) = VTTDecl;
+ }
+}
+
+void ARMCXXABI::BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ ItaniumCXXABI::BuildInstanceFunctionParams(CGF, ResTy, Params);
+
+ // Return 'this' from certain constructors and destructors.
+ if (HasThisReturn(CGF.CurGD))
+ ResTy = Params[0]->getType();
+}
+
+void ItaniumCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ /// Initialize the 'this' slot.
+ EmitThisParam(CGF);
+
+ /// Initialize the 'vtt' slot if needed.
+ if (getVTTDecl(CGF)) {
+ getVTTValue(CGF)
+ = CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(getVTTDecl(CGF)),
+ "vtt");
+ }
+}
+
+void ARMCXXABI::EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ ItaniumCXXABI::EmitInstanceFunctionProlog(CGF);
+
+ /// Initialize the return slot to 'this' at the start of the
+ /// function.
+ if (HasThisReturn(CGF.CurGD))
+ CGF.Builder.CreateStore(getThisValue(CGF), CGF.ReturnValue);
+}
+
+void ARMCXXABI::EmitReturnFromThunk(CodeGenFunction &CGF,
+ RValue RV, QualType ResultType) {
+ if (!isa<CXXDestructorDecl>(CGF.CurGD.getDecl()))
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, RV, ResultType);
+
+ // Destructor thunks in the ARM ABI have indeterminate results.
+ llvm::Type *T =
+ cast<llvm::PointerType>(CGF.ReturnValue->getType())->getElementType();
+ RValue Undef = RValue::get(llvm::UndefValue::get(T));
+ return ItaniumCXXABI::EmitReturnFromThunk(CGF, Undef, ResultType);
+}
+
+/************************** Array allocation cookies **************************/
+
+bool ItaniumCXXABI::NeedsArrayCookie(const CXXNewExpr *expr) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ // Automatic Reference Counting:
+ // We need an array cookie for pointers with strong or weak lifetime.
+ QualType AllocatedType = expr->getAllocatedType();
+ if (getContext().getLangOpts().ObjCAutoRefCount &&
+ AllocatedType->isObjCLifetimeType()) {
+ switch (AllocatedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return true;
+ }
+ }
+
+ // Otherwise, if the class has a non-trivial destructor, it always
+ // needs a cookie.
+ const CXXRecordDecl *record =
+ AllocatedType->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ return (record && !record->hasTrivialDestructor());
+}
+
+bool ItaniumCXXABI::NeedsArrayCookie(const CXXDeleteExpr *expr,
+ QualType elementType) {
+ // If the class's usual deallocation function takes two arguments,
+ // it needs a cookie.
+ if (expr->doesUsualArrayDeleteWantSize())
+ return true;
+
+ return elementType.isDestructedType();
+}
+
+CharUnits ItaniumCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!NeedsArrayCookie(expr))
+ return CharUnits::Zero();
+
+ // Padding is the maximum of sizeof(size_t) and alignof(elementType)
+ ASTContext &Ctx = getContext();
+ return std::max(Ctx.getTypeSizeInChars(Ctx.getSizeType()),
+ Ctx.getTypeAlignInChars(expr->getAllocatedType()));
+}
+
+llvm::Value *ItaniumCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ assert(NeedsArrayCookie(expr));
+
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ QualType SizeTy = Ctx.getSizeType();
+ CharUnits SizeSize = Ctx.getTypeSizeInChars(SizeTy);
+
+ // The size of the cookie.
+ CharUnits CookieSize =
+ std::max(SizeSize, Ctx.getTypeAlignInChars(ElementType));
+
+ // Compute an offset to the cookie.
+ llvm::Value *CookiePtr = NewPtr;
+ CharUnits CookieOffset = CookieSize - SizeSize;
+ if (!CookieOffset.isZero())
+ CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_64(CookiePtr,
+ CookieOffset.getQuantity());
+
+ // Write the number of elements into the appropriate slot.
+ llvm::Value *NumElementsPtr
+ = CGF.Builder.CreateBitCast(CookiePtr,
+ CGF.ConvertType(SizeTy)->getPointerTo(AS));
+ CGF.Builder.CreateStore(NumElements, NumElementsPtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookieSize.getQuantity());
+}
+
+void ItaniumCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr,
+ CharUnits &CookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+
+ // If we don't need an array cookie, bail out early.
+ if (!NeedsArrayCookie(expr, ElementType)) {
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ NumElements = 0;
+ CookieSize = CharUnits::Zero();
+ return;
+ }
+
+ QualType SizeTy = getContext().getSizeType();
+ CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
+ llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ CookieSize
+ = std::max(SizeSize, getContext().getTypeAlignInChars(ElementType));
+
+ CharUnits NumElementsOffset = CookieSize - SizeSize;
+
+ // Compute the allocated pointer.
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ -CookieSize.getQuantity());
+
+ llvm::Value *NumElementsPtr = AllocPtr;
+ if (!NumElementsOffset.isZero())
+ NumElementsPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(NumElementsPtr,
+ NumElementsOffset.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
+ NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+}
+
+CharUnits ARMCXXABI::GetArrayCookieSize(const CXXNewExpr *expr) {
+ if (!NeedsArrayCookie(expr))
+ return CharUnits::Zero();
+
+ // On ARM, the cookie is always:
+ // struct array_cookie {
+ // std::size_t element_size; // element_size != 0
+ // std::size_t element_count;
+ // };
+ // TODO: what should we do if the allocated type actually wants
+ // greater alignment?
+ return getContext().getTypeSizeInChars(getContext().getSizeType()) * 2;
+}
+
+llvm::Value *ARMCXXABI::InitializeArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *NewPtr,
+ llvm::Value *NumElements,
+ const CXXNewExpr *expr,
+ QualType ElementType) {
+ assert(NeedsArrayCookie(expr));
+
+ // NewPtr is a char*.
+
+ unsigned AS = cast<llvm::PointerType>(NewPtr->getType())->getAddressSpace();
+
+ ASTContext &Ctx = getContext();
+ CharUnits SizeSize = Ctx.getTypeSizeInChars(Ctx.getSizeType());
+ llvm::IntegerType *SizeTy =
+ cast<llvm::IntegerType>(CGF.ConvertType(Ctx.getSizeType()));
+
+ // The cookie is always at the start of the buffer.
+ llvm::Value *CookiePtr = NewPtr;
+
+ // The first element is the element size.
+ CookiePtr = CGF.Builder.CreateBitCast(CookiePtr, SizeTy->getPointerTo(AS));
+ llvm::Value *ElementSize = llvm::ConstantInt::get(SizeTy,
+ Ctx.getTypeSizeInChars(ElementType).getQuantity());
+ CGF.Builder.CreateStore(ElementSize, CookiePtr);
+
+ // The second element is the element count.
+ CookiePtr = CGF.Builder.CreateConstInBoundsGEP1_32(CookiePtr, 1);
+ CGF.Builder.CreateStore(NumElements, CookiePtr);
+
+ // Finally, compute a pointer to the actual data buffer by skipping
+ // over the cookie completely.
+ CharUnits CookieSize = 2 * SizeSize;
+ return CGF.Builder.CreateConstInBoundsGEP1_64(NewPtr,
+ CookieSize.getQuantity());
+}
+
+void ARMCXXABI::ReadArrayCookie(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType,
+ llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr,
+ CharUnits &CookieSize) {
+ // Derive a char* in the same address space as the pointer.
+ unsigned AS = cast<llvm::PointerType>(Ptr->getType())->getAddressSpace();
+ llvm::Type *CharPtrTy = CGF.Builder.getInt8Ty()->getPointerTo(AS);
+
+ // If we don't need an array cookie, bail out early.
+ if (!NeedsArrayCookie(expr, ElementType)) {
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ NumElements = 0;
+ CookieSize = CharUnits::Zero();
+ return;
+ }
+
+ QualType SizeTy = getContext().getSizeType();
+ CharUnits SizeSize = getContext().getTypeSizeInChars(SizeTy);
+ llvm::Type *SizeLTy = CGF.ConvertType(SizeTy);
+
+ // The cookie size is always 2 * sizeof(size_t).
+ CookieSize = 2 * SizeSize;
+
+ // The allocated pointer is the input ptr, minus that amount.
+ AllocPtr = CGF.Builder.CreateBitCast(Ptr, CharPtrTy);
+ AllocPtr = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ -CookieSize.getQuantity());
+
+ // The number of elements is at offset sizeof(size_t) relative to that.
+ llvm::Value *NumElementsPtr
+ = CGF.Builder.CreateConstInBoundsGEP1_64(AllocPtr,
+ SizeSize.getQuantity());
+ NumElementsPtr =
+ CGF.Builder.CreateBitCast(NumElementsPtr, SizeLTy->getPointerTo(AS));
+ NumElements = CGF.Builder.CreateLoad(NumElementsPtr);
+}
+
+/*********************** Static local initialization **************************/
+
+static llvm::Constant *getGuardAcquireFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // int __cxa_guard_acquire(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.getTypes().ConvertType(CGM.getContext().IntTy),
+ GuardPtrTy, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_acquire",
+ llvm::Attribute::NoUnwind);
+}
+
+static llvm::Constant *getGuardReleaseFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_release(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_release",
+ llvm::Attribute::NoUnwind);
+}
+
+static llvm::Constant *getGuardAbortFn(CodeGenModule &CGM,
+ llvm::PointerType *GuardPtrTy) {
+ // void __cxa_guard_abort(__guard *guard_object);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, GuardPtrTy, /*isVarArg=*/false);
+
+ return CGM.CreateRuntimeFunction(FTy, "__cxa_guard_abort",
+ llvm::Attribute::NoUnwind);
+}
+
+namespace {
+ struct CallGuardAbort : EHScopeStack::Cleanup {
+ llvm::GlobalVariable *Guard;
+ CallGuardAbort(llvm::GlobalVariable *Guard) : Guard(Guard) {}
+
+ void Emit(CodeGenFunction &CGF, Flags flags) {
+ CGF.Builder.CreateCall(getGuardAbortFn(CGF.CGM, Guard->getType()), Guard)
+ ->setDoesNotThrow();
+ }
+ };
+}
+
+/// The ARM code here follows the Itanium code closely enough that we
+/// just special-case it at particular places.
+void ItaniumCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
+ const VarDecl &D,
+ llvm::GlobalVariable *var,
+ bool shouldPerformInit) {
+ CGBuilderTy &Builder = CGF.Builder;
+
+ // We only need to use thread-safe statics for local variables;
+ // global initialization is always single-threaded.
+ bool threadsafe =
+ (getContext().getLangOpts().ThreadsafeStatics && D.isLocalVarDecl());
+
+ // If we have a global variable with internal linkage and thread-safe statics
+ // are disabled, we can just let the guard variable be of type i8.
+ bool useInt8GuardVariable = !threadsafe && var->hasInternalLinkage();
+
+ llvm::IntegerType *guardTy;
+ if (useInt8GuardVariable) {
+ guardTy = CGF.Int8Ty;
+ } else {
+ // Guard variables are 64 bits in the generic ABI and 32 bits on ARM.
+ guardTy = (IsARM ? CGF.Int32Ty : CGF.Int64Ty);
+ }
+ llvm::PointerType *guardPtrTy = guardTy->getPointerTo();
+
+ // Create the guard variable if we don't already have it (as we
+ // might if we're double-emitting this function body).
+ llvm::GlobalVariable *guard = CGM.getStaticLocalDeclGuardAddress(&D);
+ if (!guard) {
+ // Mangle the name for the guard.
+ SmallString<256> guardName;
+ {
+ llvm::raw_svector_ostream out(guardName);
+ getMangleContext().mangleItaniumGuardVariable(&D, out);
+ out.flush();
+ }
+
+ // Create the guard variable with a zero-initializer.
+ // Just absorb linkage and visibility from the guarded variable.
+ guard = new llvm::GlobalVariable(CGM.getModule(), guardTy,
+ false, var->getLinkage(),
+ llvm::ConstantInt::get(guardTy, 0),
+ guardName.str());
+ guard->setVisibility(var->getVisibility());
+
+ CGM.setStaticLocalDeclGuardAddress(&D, guard);
+ }
+
+ // Test whether the variable has completed initialization.
+ llvm::Value *isInitialized;
+
+ // ARM C++ ABI 3.2.3.1:
+ // To support the potential use of initialization guard variables
+ // as semaphores that are the target of ARM SWP and LDREX/STREX
+ // synchronizing instructions we define a static initialization
+ // guard variable to be a 4-byte aligned, 4- byte word with the
+ // following inline access protocol.
+ // #define INITIALIZED 1
+ // if ((obj_guard & INITIALIZED) != INITIALIZED) {
+ // if (__cxa_guard_acquire(&obj_guard))
+ // ...
+ // }
+ if (IsARM && !useInt8GuardVariable) {
+ llvm::Value *V = Builder.CreateLoad(guard);
+ V = Builder.CreateAnd(V, Builder.getInt32(1));
+ isInitialized = Builder.CreateIsNull(V, "guard.uninitialized");
+
+ // Itanium C++ ABI 3.3.2:
+ // The following is pseudo-code showing how these functions can be used:
+ // if (obj_guard.first_byte == 0) {
+ // if ( __cxa_guard_acquire (&obj_guard) ) {
+ // try {
+ // ... initialize the object ...;
+ // } catch (...) {
+ // __cxa_guard_abort (&obj_guard);
+ // throw;
+ // }
+ // ... queue object destructor with __cxa_atexit() ...;
+ // __cxa_guard_release (&obj_guard);
+ // }
+ // }
+ } else {
+ // Load the first byte of the guard variable.
+ llvm::LoadInst *LI =
+ Builder.CreateLoad(Builder.CreateBitCast(guard, CGM.Int8PtrTy));
+ LI->setAlignment(1);
+
+ // Itanium ABI:
+ // An implementation supporting thread-safety on multiprocessor
+ // systems must also guarantee that references to the initialized
+ // object do not occur before the load of the initialization flag.
+ //
+ // In LLVM, we do this by marking the load Acquire.
+ if (threadsafe)
+ LI->setAtomic(llvm::Acquire);
+
+ isInitialized = Builder.CreateIsNull(LI, "guard.uninitialized");
+ }
+
+ llvm::BasicBlock *InitCheckBlock = CGF.createBasicBlock("init.check");
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("init.end");
+
+ // Check if the first byte of the guard variable is zero.
+ Builder.CreateCondBr(isInitialized, InitCheckBlock, EndBlock);
+
+ CGF.EmitBlock(InitCheckBlock);
+
+ // Variables used when coping with thread-safe statics and exceptions.
+ if (threadsafe) {
+ // Call __cxa_guard_acquire.
+ llvm::Value *V
+ = Builder.CreateCall(getGuardAcquireFn(CGM, guardPtrTy), guard);
+
+ llvm::BasicBlock *InitBlock = CGF.createBasicBlock("init");
+
+ Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
+ InitBlock, EndBlock);
+
+ // Call __cxa_guard_abort along the exceptional edge.
+ CGF.EHStack.pushCleanup<CallGuardAbort>(EHCleanup, guard);
+
+ CGF.EmitBlock(InitBlock);
+ }
+
+ // Emit the initializer and add a global destructor if appropriate.
+ CGF.EmitCXXGlobalVarDeclInit(D, var, shouldPerformInit);
+
+ if (threadsafe) {
+ // Pop the guard-abort cleanup if we pushed one.
+ CGF.PopCleanupBlock();
+
+ // Call __cxa_guard_release. This cannot throw.
+ Builder.CreateCall(getGuardReleaseFn(CGM, guardPtrTy), guard);
+ } else {
+ Builder.CreateStore(llvm::ConstantInt::get(guardTy, 1), guard);
+ }
+
+ CGF.EmitBlock(EndBlock);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
new file mode 100644
index 0000000..825e041
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -0,0 +1,95 @@
+//===--- MicrosoftCXXABI.cpp - Emit LLVM Code from ASTs for a Module ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targeting the Microsoft Visual C++ ABI.
+// The class in this file generates structures that follow the Microsoft
+// Visual C++ ABI, which is actually not very well documented at all outside
+// of Microsoft.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CodeGenModule.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace {
+
+class MicrosoftCXXABI : public CGCXXABI {
+public:
+ MicrosoftCXXABI(CodeGenModule &CGM) : CGCXXABI(CGM) {}
+
+ void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
+ CXXCtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ }
+
+ void BuildDestructorSignature(const CXXDestructorDecl *Ctor,
+ CXXDtorType Type,
+ CanQualType &ResTy,
+ SmallVectorImpl<CanQualType> &ArgTys) {
+ // 'this' is already in place
+ // TODO: 'for base' flag
+ }
+
+ void BuildInstanceFunctionParams(CodeGenFunction &CGF,
+ QualType &ResTy,
+ FunctionArgList &Params) {
+ BuildThisParam(CGF, Params);
+ // TODO: 'for base' flag
+ }
+
+ void EmitInstanceFunctionProlog(CodeGenFunction &CGF) {
+ EmitThisParam(CGF);
+ // TODO: 'for base' flag
+ }
+
+ // ==== Notes on array cookies =========
+ //
+ // MSVC seems to only use cookies when the class has a destructor; a
+ // two-argument usual array deallocation function isn't sufficient.
+ //
+ // For example, this code prints "100" and "1":
+ // struct A {
+ // char x;
+ // void *operator new[](size_t sz) {
+ // printf("%u\n", sz);
+ // return malloc(sz);
+ // }
+ // void operator delete[](void *p, size_t sz) {
+ // printf("%u\n", sz);
+ // free(p);
+ // }
+ // };
+ // int main() {
+ // A *p = new A[100];
+ // delete[] p;
+ // }
+ // Whereas it prints "104" and "104" if you give A a destructor.
+ void ReadArrayCookie(CodeGenFunction &CGF, llvm::Value *Ptr,
+ const CXXDeleteExpr *expr,
+ QualType ElementType, llvm::Value *&NumElements,
+ llvm::Value *&AllocPtr, CharUnits &CookieSize) {
+ CGF.CGM.ErrorUnsupported(expr, "don't know how to handle array cookies "
+ "in the Microsoft C++ ABI");
+ }
+};
+
+}
+
+CGCXXABI *clang::CodeGen::CreateMicrosoftCXXABI(CodeGenModule &CGM) {
+ return new MicrosoftCXXABI(CGM);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
new file mode 100644
index 0000000..ea2389e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -0,0 +1,127 @@
+//===--- ModuleBuilder.cpp - Emit LLVM Code from ASTs ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This builds an AST and converts it to LLVM Code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/ModuleBuilder.h"
+#include "CodeGenModule.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/OwningPtr.h"
+using namespace clang;
+
+namespace {
+ class CodeGeneratorImpl : public CodeGenerator {
+ DiagnosticsEngine &Diags;
+ OwningPtr<const llvm::TargetData> TD;
+ ASTContext *Ctx;
+ const CodeGenOptions CodeGenOpts; // Intentionally copied in.
+ protected:
+ OwningPtr<llvm::Module> M;
+ OwningPtr<CodeGen::CodeGenModule> Builder;
+ public:
+ CodeGeneratorImpl(DiagnosticsEngine &diags, const std::string& ModuleName,
+ const CodeGenOptions &CGO, llvm::LLVMContext& C)
+ : Diags(diags), CodeGenOpts(CGO), M(new llvm::Module(ModuleName, C)) {}
+
+ virtual ~CodeGeneratorImpl() {}
+
+ virtual llvm::Module* GetModule() {
+ return M.get();
+ }
+
+ virtual llvm::Module* ReleaseModule() {
+ return M.take();
+ }
+
+ virtual void Initialize(ASTContext &Context) {
+ Ctx = &Context;
+
+ M->setTargetTriple(Ctx->getTargetInfo().getTriple().getTriple());
+ M->setDataLayout(Ctx->getTargetInfo().getTargetDescription());
+ TD.reset(new llvm::TargetData(Ctx->getTargetInfo().getTargetDescription()));
+ Builder.reset(new CodeGen::CodeGenModule(Context, CodeGenOpts,
+ *M, *TD, Diags));
+ }
+
+ virtual void HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ Builder->HandleCXXStaticMemberVarInstantiation(VD);
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef DG) {
+ // Make sure to emit all elements of a Decl.
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ Builder->EmitTopLevelDecl(*I);
+ return true;
+ }
+
+ /// HandleTagDeclDefinition - This callback is invoked each time a TagDecl
+ /// to (e.g. struct, union, enum, class) is completed. This allows the
+ /// client hack on the type, which can occur at any point in the file
+ /// (because these can be defined in declspecs).
+ virtual void HandleTagDeclDefinition(TagDecl *D) {
+ Builder->UpdateCompletedType(D);
+
+ // In C++, we may have member functions that need to be emitted at this
+ // point.
+ if (Ctx->getLangOpts().CPlusPlus && !D->isDependentContext()) {
+ for (DeclContext::decl_iterator M = D->decls_begin(),
+ MEnd = D->decls_end();
+ M != MEnd; ++M)
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*M))
+ if (Method->doesThisDeclarationHaveABody() &&
+ (Method->hasAttr<UsedAttr>() ||
+ Method->hasAttr<ConstructorAttr>()))
+ Builder->EmitTopLevelDecl(Method);
+ }
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ if (Diags.hasErrorOccurred()) {
+ M.reset();
+ return;
+ }
+
+ if (Builder)
+ Builder->Release();
+ }
+
+ virtual void CompleteTentativeDefinition(VarDecl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitTentativeDefinition(D);
+ }
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitVTable(RD, DefinitionRequired);
+ }
+ };
+}
+
+void CodeGenerator::anchor() { }
+
+CodeGenerator *clang::CreateLLVMCodeGen(DiagnosticsEngine &Diags,
+ const std::string& ModuleName,
+ const CodeGenOptions &CGO,
+ llvm::LLVMContext& C) {
+ return new CodeGeneratorImpl(Diags, ModuleName, CGO, C);
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
new file mode 100644
index 0000000..3ed1778
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.cpp
@@ -0,0 +1,3694 @@
+//===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetInfo.h"
+#include "ABIInfo.h"
+#include "CodeGenFunction.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/Type.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace CodeGen;
+
+static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
+ llvm::Value *Array,
+ llvm::Value *Value,
+ unsigned FirstIndex,
+ unsigned LastIndex) {
+ // Alternatively, we could emit this as a loop in the source.
+ for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
+ llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
+ Builder.CreateStore(Value, Cell);
+ }
+}
+
+static bool isAggregateTypeForABI(QualType T) {
+ return CodeGenFunction::hasAggregateLLVMType(T) ||
+ T->isMemberFunctionPointerType();
+}
+
+ABIInfo::~ABIInfo() {}
+
+ASTContext &ABIInfo::getContext() const {
+ return CGT.getContext();
+}
+
+llvm::LLVMContext &ABIInfo::getVMContext() const {
+ return CGT.getLLVMContext();
+}
+
+const llvm::TargetData &ABIInfo::getTargetData() const {
+ return CGT.getTargetData();
+}
+
+
+void ABIArgInfo::dump() const {
+ raw_ostream &OS = llvm::errs();
+ OS << "(ABIArgInfo Kind=";
+ switch (TheKind) {
+ case Direct:
+ OS << "Direct Type=";
+ if (llvm::Type *Ty = getCoerceToType())
+ Ty->print(OS);
+ else
+ OS << "null";
+ break;
+ case Extend:
+ OS << "Extend";
+ break;
+ case Ignore:
+ OS << "Ignore";
+ break;
+ case Indirect:
+ OS << "Indirect Align=" << getIndirectAlign()
+ << " ByVal=" << getIndirectByVal()
+ << " Realign=" << getIndirectRealign();
+ break;
+ case Expand:
+ OS << "Expand";
+ break;
+ }
+ OS << ")\n";
+}
+
+TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
+
+// If someone can figure out a general rule for this, that would be great.
+// It's probably just doomed to be platform-dependent, though.
+unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
+ // Verified for:
+ // x86-64 FreeBSD, Linux, Darwin
+ // x86-32 FreeBSD, Linux, Darwin
+ // PowerPC Linux, Darwin
+ // ARM Darwin (*not* EABI)
+ return 32;
+}
+
+bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
+ // The following conventions are known to require this to be false:
+ // x86_stdcall
+ // MIPS
+ // For everything else, we just prefer false unless we opt out.
+ return false;
+}
+
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
+
+/// isEmptyField - Return true iff a the field is "empty", that is it
+/// is an unnamed bit-field or an (array of) empty record(s).
+static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
+ bool AllowArrays) {
+ if (FD->isUnnamedBitfield())
+ return true;
+
+ QualType FT = FD->getType();
+
+ // Constant arrays of empty records count as empty, strip them off.
+ // Constant arrays of zero length always count as empty.
+ if (AllowArrays)
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize() == 0)
+ return true;
+ FT = AT->getElementType();
+ }
+
+ const RecordType *RT = FT->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // C++ record fields are never empty, at least in the Itanium ABI.
+ //
+ // FIXME: We should use a predicate for whether this behavior is true in the
+ // current ABI.
+ if (isa<CXXRecordDecl>(RT->getDecl()))
+ return false;
+
+ return isEmptyRecord(Context, FT, AllowArrays);
+}
+
+/// isEmptyRecord - Return true iff a structure contains only empty
+/// fields. Note that a structure with a flexible array member is not
+/// considered empty.
+static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isEmptyRecord(Context, i->getType(), true))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i)
+ if (!isEmptyField(Context, *i, AllowArrays))
+ return false;
+ return true;
+}
+
+/// hasNonTrivialDestructorOrCopyConstructor - Determine if a type has either
+/// a non-trivial destructor or a non-trivial copy constructor.
+static bool hasNonTrivialDestructorOrCopyConstructor(const RecordType *RT) {
+ const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+
+ return !RD->hasTrivialDestructor() || !RD->hasTrivialCopyConstructor();
+}
+
+/// isRecordWithNonTrivialDestructorOrCopyConstructor - Determine if a type is
+/// a record type with either a non-trivial destructor or a non-trivial copy
+/// constructor.
+static bool isRecordWithNonTrivialDestructorOrCopyConstructor(QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ return hasNonTrivialDestructorOrCopyConstructor(RT);
+}
+
+/// isSingleElementStruct - Determine if a structure is a "single
+/// element struct", i.e. it has exactly one non-empty field or
+/// exactly one field which is itself a single element
+/// struct. Structures with flexible array members are never
+/// considered single element structs.
+///
+/// \return The field declaration for the single non-empty field, if
+/// it exists.
+static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
+ const RecordType *RT = T->getAsStructureType();
+ if (!RT)
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return 0;
+
+ const Type *Found = 0;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ // Ignore empty records.
+ if (isEmptyRecord(Context, i->getType(), true))
+ continue;
+
+ // If we already found an element then this isn't a single-element struct.
+ if (Found)
+ return 0;
+
+ // If this is non-empty and not a single element struct, the composite
+ // cannot be a single element struct.
+ Found = isSingleElementStruct(i->getType(), Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ // Check for single element.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ QualType FT = FD->getType();
+
+ // Ignore empty fields.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // If we already found an element then this isn't a single-element
+ // struct.
+ if (Found)
+ return 0;
+
+ // Treat single element arrays as the element.
+ while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
+ if (AT->getSize().getZExtValue() != 1)
+ break;
+ FT = AT->getElementType();
+ }
+
+ if (!isAggregateTypeForABI(FT)) {
+ Found = FT.getTypePtr();
+ } else {
+ Found = isSingleElementStruct(FT, Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ // We don't consider a struct a single-element struct if it has
+ // padding beyond the element type.
+ if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
+ return 0;
+
+ return Found;
+}
+
+static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
+ if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
+ !Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
+ !Ty->isBlockPointerType())
+ return false;
+
+ uint64_t Size = Context.getTypeSize(Ty);
+ return Size == 32 || Size == 64;
+}
+
+/// canExpandIndirectArgument - Test whether an argument type which is to be
+/// passed indirectly (on the stack) would have the equivalent layout if it was
+/// expanded into separate arguments. If so, we prefer to do the latter to avoid
+/// inhibiting optimizations.
+///
+// FIXME: This predicate is missing many cases, currently it just follows
+// llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
+// should probably make this smarter, or better yet make the LLVM backend
+// capable of handling it.
+static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
+ // We can only expand structure types.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // We can only expand (C) structures.
+ //
+ // FIXME: This needs to be generalized to handle classes as well.
+ const RecordDecl *RD = RT->getDecl();
+ if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
+ return false;
+
+ uint64_t Size = 0;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ if (!is32Or64BitBasicType(FD->getType(), Context))
+ return false;
+
+ // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
+ // how to expand them yet, and the predicate for telling if a bitfield still
+ // counts as "basic" is more complicated than what we were doing previously.
+ if (FD->isBitField())
+ return false;
+
+ Size += Context.getTypeSize(FD->getType());
+ }
+
+ // Make sure there are not any holes in the struct.
+ if (Size != Context.getTypeSize(Ty))
+ return false;
+
+ return true;
+}
+
+namespace {
+/// DefaultABIInfo - The default implementation for ABI specific
+/// details. This implementation provides information which results in
+/// self-consistent and sensible LLVM IR generation, but does not
+/// conform to any particular ABI.
+class DefaultABIInfo : public ABIInfo {
+public:
+ DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+};
+
+llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ return 0;
+}
+
+ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty)) {
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+/// UseX86_MMXType - Return true if this is an MMX type that should use the
+/// special x86_mmx type.
+bool UseX86_MMXType(llvm::Type *IRType) {
+ // If the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>, use the
+ // special x86_mmx type.
+ return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
+ cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
+ IRType->getScalarSizeInBits() != 64;
+}
+
+static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) {
+ if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy())
+ return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
+ return Ty;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-32 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+/// X86_32ABIInfo - The X86-32 ABI information.
+class X86_32ABIInfo : public ABIInfo {
+ static const unsigned MinABIStackAlignInBytes = 4;
+
+ bool IsDarwinVectorABI;
+ bool IsSmallStructInRegABI;
+ bool IsMMXDisabled;
+ bool IsWin32FloatStructABI;
+
+ static bool isRegisterSize(unsigned Size) {
+ return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
+ }
+
+ static bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context,
+ unsigned callingConvention);
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ABIArgInfo getIndirectResult(QualType Ty, bool ByVal = true) const;
+
+ /// \brief Return the alignment to use for the given type on the stack.
+ unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
+
+public:
+
+ ABIArgInfo classifyReturnType(QualType RetTy,
+ unsigned callingConvention) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType(),
+ FI.getCallingConvention());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+
+ X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool m, bool w)
+ : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
+ IsMMXDisabled(m), IsWin32FloatStructABI(w) {}
+};
+
+class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
+ bool d, bool p, bool m, bool w)
+ :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, m, w)) {}
+
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const;
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ // Darwin uses different dwarf register numbers for EH.
+ if (CGM.isTargetDarwin()) return 5;
+
+ return 4;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+};
+
+}
+
+/// shouldReturnTypeInRegister - Determine if the given type should be
+/// passed in a register (for the Darwin ABI).
+bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
+ ASTContext &Context,
+ unsigned callingConvention) {
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Type must be register sized.
+ if (!isRegisterSize(Size))
+ return false;
+
+ if (Ty->isVectorType()) {
+ // 64- and 128- bit vectors inside structures are not returned in
+ // registers.
+ if (Size == 64 || Size == 128)
+ return false;
+
+ return true;
+ }
+
+ // If this is a builtin, pointer, enum, complex type, member pointer, or
+ // member function pointer it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
+ Ty->isAnyComplexType() || Ty->isEnumeralType() ||
+ Ty->isBlockPointerType() || Ty->isMemberPointerType())
+ return true;
+
+ // Arrays are treated like records.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
+ return shouldReturnTypeInRegister(AT->getElementType(), Context,
+ callingConvention);
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // FIXME: Traverse bases here too.
+
+ // For thiscall conventions, structures will never be returned in
+ // a register. This is for compatibility with the MSVC ABI
+ if (callingConvention == llvm::CallingConv::X86_ThisCall &&
+ RT->isStructureType()) {
+ return false;
+ }
+
+ // Structure types are passed in register if all fields would be
+ // passed in a register.
+ for (RecordDecl::field_iterator i = RT->getDecl()->field_begin(),
+ e = RT->getDecl()->field_end(); i != e; ++i) {
+ const FieldDecl *FD = *i;
+
+ // Empty fields are ignored.
+ if (isEmptyField(Context, FD, true))
+ continue;
+
+ // Check fields recursively.
+ if (!shouldReturnTypeInRegister(FD->getType(), Context,
+ callingConvention))
+ return false;
+ }
+ return true;
+}
+
+ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
+ unsigned callingConvention) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const VectorType *VT = RetTy->getAs<VectorType>()) {
+ // On Darwin, some vectors are returned in registers.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // 128-bit vectors are a special case; they are returned in
+ // registers and we need to make sure to pick a type the LLVM
+ // backend will like.
+ if (Size == 128)
+ return ABIArgInfo::getDirect(llvm::VectorType::get(
+ llvm::Type::getInt64Ty(getVMContext()), 2));
+
+ // Always return in register if it fits in a general purpose
+ // register, or if it is 64 bits and has a single element.
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (const RecordType *RT = RetTy->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Structures with flexible arrays are always indirect.
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // If specified, structs and unions are always indirect.
+ if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
+ return ABIArgInfo::getIndirect(0);
+
+ // Small structures which are register sized are generally returned
+ // in a register.
+ if (X86_32ABIInfo::shouldReturnTypeInRegister(RetTy, getContext(),
+ callingConvention)) {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ // As a special-case, if the struct is a "single-element" struct, and
+ // the field is of type "float" or "double", return it in a
+ // floating-point register. (MSVC does not apply this special case.)
+ // We apply a similar transformation for pointer types to improve the
+ // quality of the generated IR.
+ if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
+ if ((!IsWin32FloatStructABI && SeltTy->isRealFloatingType())
+ || SeltTy->hasPointerRepresentation())
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ // FIXME: We should be able to narrow this integer in cases with dead
+ // padding.
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT)
+ return 0;
+ const RecordDecl *RD = RT->getDecl();
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isRecordWithSSEVectorType(Context, i->getType()))
+ return false;
+
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ QualType FT = i->getType();
+
+ if (FT->getAs<VectorType>() && Context.getTypeSize(FT) == 128)
+ return true;
+
+ if (isRecordWithSSEVectorType(Context, FT))
+ return true;
+ }
+
+ return false;
+}
+
+unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
+ unsigned Align) const {
+ // Otherwise, if the alignment is less than or equal to the minimum ABI
+ // alignment, just use the default; the backend will handle this.
+ if (Align <= MinABIStackAlignInBytes)
+ return 0; // Use default alignment.
+
+ // On non-Darwin, the stack type alignment is always 4.
+ if (!IsDarwinVectorABI) {
+ // Set explicit alignment, since we may need to realign the top.
+ return MinABIStackAlignInBytes;
+ }
+
+ // Otherwise, if the type contains an SSE vector type, the alignment is 16.
+ if (Align >= 16 && isRecordWithSSEVectorType(getContext(), Ty))
+ return 16;
+
+ return MinABIStackAlignInBytes;
+}
+
+ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal) const {
+ if (!ByVal)
+ return ABIArgInfo::getIndirect(0, false);
+
+ // Compute the byval alignment.
+ unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
+ if (StackAlign == 0)
+ return ABIArgInfo::getIndirect(4);
+
+ // If the stack alignment is less than the type alignment, realign the
+ // argument.
+ if (StackAlign < TypeAlign)
+ return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true,
+ /*Realign=*/true);
+
+ return ABIArgInfo::getIndirect(StackAlign);
+}
+
+ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty) const {
+ // FIXME: Set alignment on indirect arguments.
+ if (isAggregateTypeForABI(Ty)) {
+ // Structures with flexible arrays are always indirect.
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return getIndirectResult(Ty, /*ByVal=*/false);
+
+ if (RT->getDecl()->hasFlexibleArrayMember())
+ return getIndirectResult(Ty);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Expand small (<= 128-bit) record types when we know that the stack layout
+ // of those arguments will match the struct. This is important because the
+ // LLVM backend isn't smart enough to remove byval, which inhibits many
+ // optimizations.
+ if (getContext().getTypeSize(Ty) <= 4*32 &&
+ canExpandIndirectArgument(Ty, getContext()))
+ return ABIArgInfo::getExpand();
+
+ return getIndirectResult(Ty);
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ // On Darwin, some vectors are passed in memory, we handle this by passing
+ // it as an i8/i16/i32/i64.
+ if (IsDarwinVectorABI) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if ((Size == 8 || Size == 16 || Size == 32) ||
+ (Size == 64 && VT->getNumElements() == 1))
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ if (UseX86_MMXType(IRType)) {
+ if (IsMMXDisabled)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ 64));
+ ABIArgInfo AAI = ABIArgInfo::getDirect(IRType);
+ AAI.setCoerceToType(llvm::Type::getX86_MMXTy(getVMContext()));
+ return AAI;
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+
+ // Compute if the address needs to be aligned
+ unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
+ Align = getTypeStackAlignInBytes(Ty, Align);
+ Align = std::max(Align, 4U);
+ if (Align > 4) {
+ // addr = (addr + align - 1) & -align;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
+ Addr = CGF.Builder.CreateGEP(Addr, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
+ CGF.Int32Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
+ Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ Addr->getType(),
+ "ap.cur.aligned");
+ }
+
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &CGM) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
+ // Get the LLVM function.
+ llvm::Function *Fn = cast<llvm::Function>(GV);
+
+ // Now add the 'alignstack' attribute with a value of 16.
+ Fn->addFnAttr(llvm::Attribute::constructStackAlignmentFromInt(16));
+ }
+ }
+}
+
+bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
+ CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-7 are the eight integer registers; the order is different
+ // on Darwin (for EH), but the range is the same.
+ // 8 is %eip.
+ AssignToArrayRange(Builder, Address, Four8, 0, 8);
+
+ if (CGF.CGM.isTargetDarwin()) {
+ // 12-16 are st(0..4). Not sure why we stop at 4.
+ // These have size 16, which is sizeof(long double) on
+ // platforms with 8-byte alignment for that type.
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
+ AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
+
+ } else {
+ // 9 is %eflags, which doesn't get a size on Darwin for some
+ // reason.
+ Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
+
+ // 11-16 are st(0..5). Not sure why we stop at 5.
+ // These have size 12, which is sizeof(long double) on
+ // platforms with 4-byte alignment for that type.
+ llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
+ AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// X86-64 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+
+namespace {
+/// X86_64ABIInfo - The X86_64 ABI information.
+class X86_64ABIInfo : public ABIInfo {
+ enum Class {
+ Integer = 0,
+ SSE,
+ SSEUp,
+ X87,
+ X87Up,
+ ComplexX87,
+ NoClass,
+ Memory
+ };
+
+ /// merge - Implement the X86_64 ABI merging algorithm.
+ ///
+ /// Merge an accumulating classification \arg Accum with a field
+ /// classification \arg Field.
+ ///
+ /// \param Accum - The accumulating classification. This should
+ /// always be either NoClass or the result of a previous merge
+ /// call. In addition, this should never be Memory (the caller
+ /// should just return Memory for the aggregate).
+ static Class merge(Class Accum, Class Field);
+
+ /// postMerge - Implement the X86_64 ABI post merging algorithm.
+ ///
+ /// Post merger cleanup, reduces a malformed Hi and Lo pair to
+ /// final MEMORY or SSE classes when necessary.
+ ///
+ /// \param AggregateSize - The size of the current aggregate in
+ /// the classification process.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the higher words of the containing object.
+ ///
+ void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
+
+ /// classify - Determine the x86_64 register classes in which the
+ /// given type T should be passed.
+ ///
+ /// \param Lo - The classification for the parts of the type
+ /// residing in the low word of the containing object.
+ ///
+ /// \param Hi - The classification for the parts of the type
+ /// residing in the high word of the containing object.
+ ///
+ /// \param OffsetBase - The bit offset of this type in the
+ /// containing object. Some parameters are classified different
+ /// depending on whether they straddle an eightbyte boundary.
+ ///
+ /// If a word is unused its result will be NoClass; if a type should
+ /// be passed in Memory then at least the classification of \arg Lo
+ /// will be Memory.
+ ///
+ /// The \arg Lo class will be NoClass iff the argument is ignored.
+ ///
+ /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
+ /// also be ComplexX87.
+ void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi) const;
+
+ llvm::Type *GetByteVectorType(QualType Ty) const;
+ llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+ llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
+ unsigned IROffset, QualType SourceTy,
+ unsigned SourceOffset) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be returned in memory.
+ ABIArgInfo getIndirectReturnResult(QualType Ty) const;
+
+ /// getIndirectResult - Give a source type \arg Ty, return a suitable result
+ /// such that the argument will be passed in memory.
+ ///
+ /// \param freeIntRegs - The number of free integer registers remaining
+ /// available.
+ ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ ABIArgInfo classifyArgumentType(QualType Ty,
+ unsigned freeIntRegs,
+ unsigned &neededInt,
+ unsigned &neededSSE) const;
+
+ bool IsIllegalVectorType(QualType Ty) const;
+
+ /// The 0.98 ABI revision clarified a lot of ambiguities,
+ /// unfortunately in ways that were not always consistent with
+ /// certain previous compilers. In particular, platforms which
+ /// required strict binary compatibility with older versions of GCC
+ /// may need to exempt themselves.
+ bool honorsRevision0_98() const {
+ return !getContext().getTargetInfo().getTriple().isOSDarwin();
+ }
+
+ bool HasAVX;
+
+public:
+ X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
+ ABIInfo(CGT), HasAVX(hasavx) {}
+
+ bool isPassedUsingAVXType(QualType type) const {
+ unsigned neededInt, neededSSE;
+ // The freeIntRegs argument doesn't matter here.
+ ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE);
+ if (info.isDirect()) {
+ llvm::Type *ty = info.getCoerceToType();
+ if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
+ return (vectorTy->getBitWidth() > 128);
+ }
+ return false;
+ }
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+/// WinX86_64ABIInfo - The Windows X86_64 ABI information.
+class WinX86_64ABIInfo : public ABIInfo {
+
+ ABIArgInfo classify(QualType Ty) const;
+
+public:
+ WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
+ : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
+
+ const X86_64ABIInfo &getABIInfo() const {
+ return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+
+ llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const {
+ return X86AdjustInlineAsmType(CGF, Constraint, Ty);
+ }
+
+ bool isNoProtoCallVariadic(const CallArgList &args,
+ const FunctionNoProtoType *fnType) const {
+ // The default CC on x86-64 sets %al to the number of SSA
+ // registers used, and GCC sets this when calling an unprototyped
+ // function, so we override the default behavior. However, don't do
+ // that when AVX types are involved: the ABI explicitly states it is
+ // undefined, and it doesn't work in practice because of how the ABI
+ // defines varargs anyway.
+ if (fnType->getCallConv() == CC_Default || fnType->getCallConv() == CC_C) {
+ bool HasAVXType = false;
+ for (CallArgList::const_iterator
+ it = args.begin(), ie = args.end(); it != ie; ++it) {
+ if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
+ HasAVXType = true;
+ break;
+ }
+ }
+
+ if (!HasAVXType)
+ return true;
+ }
+
+ return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
+ }
+
+};
+
+class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 7;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
+
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
+ return false;
+ }
+};
+
+}
+
+void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
+ Class &Hi) const {
+ // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
+ //
+ // (a) If one of the classes is Memory, the whole argument is passed in
+ // memory.
+ //
+ // (b) If X87UP is not preceded by X87, the whole argument is passed in
+ // memory.
+ //
+ // (c) If the size of the aggregate exceeds two eightbytes and the first
+ // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
+ // argument is passed in memory. NOTE: This is necessary to keep the
+ // ABI working for processors that don't support the __m256 type.
+ //
+ // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
+ //
+ // Some of these are enforced by the merging logic. Others can arise
+ // only with unions; for example:
+ // union { _Complex double; unsigned; }
+ //
+ // Note that clauses (b) and (c) were added in 0.98.
+ //
+ if (Hi == Memory)
+ Lo = Memory;
+ if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
+ Lo = Memory;
+ if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
+ Lo = Memory;
+ if (Hi == SSEUp && Lo != SSE)
+ Hi = SSE;
+}
+
+X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
+ // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
+ // classified recursively so that always two fields are
+ // considered. The resulting class is calculated according to
+ // the classes of the fields in the eightbyte:
+ //
+ // (a) If both classes are equal, this is the resulting class.
+ //
+ // (b) If one of the classes is NO_CLASS, the resulting class is
+ // the other class.
+ //
+ // (c) If one of the classes is MEMORY, the result is the MEMORY
+ // class.
+ //
+ // (d) If one of the classes is INTEGER, the result is the
+ // INTEGER.
+ //
+ // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
+ // MEMORY is used as class.
+ //
+ // (f) Otherwise class SSE is used.
+
+ // Accum should never be memory (we should have returned) or
+ // ComplexX87 (because this cannot be passed in a structure).
+ assert((Accum != Memory && Accum != ComplexX87) &&
+ "Invalid accumulated classification during merge.");
+ if (Accum == Field || Field == NoClass)
+ return Accum;
+ if (Field == Memory)
+ return Memory;
+ if (Accum == NoClass)
+ return Field;
+ if (Accum == Integer || Field == Integer)
+ return Integer;
+ if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
+ Accum == X87 || Accum == X87Up)
+ return Memory;
+ return SSE;
+}
+
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
+ Class &Lo, Class &Hi) const {
+ // FIXME: This code can be simplified by introducing a simple value class for
+ // Class pairs with appropriate constructor methods for the various
+ // situations.
+
+ // FIXME: Some of the split computations are wrong; unaligned vectors
+ // shouldn't be passed in registers for example, so there is no chance they
+ // can straddle an eightbyte. Verify & simplify.
+
+ Lo = Hi = NoClass;
+
+ Class &Current = OffsetBase < 64 ? Lo : Hi;
+ Current = Memory;
+
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ BuiltinType::Kind k = BT->getKind();
+
+ if (k == BuiltinType::Void) {
+ Current = NoClass;
+ } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
+ Lo = Integer;
+ Hi = Integer;
+ } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
+ Current = Integer;
+ } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
+ Current = SSE;
+ } else if (k == BuiltinType::LongDouble) {
+ Lo = X87;
+ Hi = X87Up;
+ }
+ // FIXME: _Decimal32 and _Decimal64 are SSE.
+ // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
+ return;
+ }
+
+ if (const EnumType *ET = Ty->getAs<EnumType>()) {
+ // Classify the underlying integer type.
+ classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi);
+ return;
+ }
+
+ if (Ty->hasPointerRepresentation()) {
+ Current = Integer;
+ return;
+ }
+
+ if (Ty->isMemberPointerType()) {
+ if (Ty->isMemberFunctionPointerType())
+ Lo = Hi = Integer;
+ else
+ Current = Integer;
+ return;
+ }
+
+ if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VT);
+ if (Size == 32) {
+ // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
+ // float> as integer.
+ Current = Integer;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
+ if (EB_Real != EB_Imag)
+ Hi = Lo;
+ } else if (Size == 64) {
+ // gcc passes <1 x double> in memory. :(
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
+ return;
+
+ // gcc passes <1 x long long> as INTEGER.
+ if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
+ VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
+ Current = Integer;
+ else
+ Current = SSE;
+
+ // If this type crosses an eightbyte boundary, it should be
+ // split.
+ if (OffsetBase && OffsetBase != 64)
+ Hi = Lo;
+ } else if (Size == 128 || (HasAVX && Size == 256)) {
+ // Arguments of 256-bits are split into four eightbyte chunks. The
+ // least significant one belongs to class SSE and all the others to class
+ // SSEUP. The original Lo and Hi design considers that types can't be
+ // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
+ // This design isn't correct for 256-bits, but since there're no cases
+ // where the upper parts would need to be inspected, avoid adding
+ // complexity and just consider Hi to match the 64-256 part.
+ Lo = SSE;
+ Hi = SSEUp;
+ }
+ return;
+ }
+
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ QualType ET = getContext().getCanonicalType(CT->getElementType());
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (ET->isIntegralOrEnumerationType()) {
+ if (Size <= 64)
+ Current = Integer;
+ else if (Size <= 128)
+ Lo = Hi = Integer;
+ } else if (ET == getContext().FloatTy)
+ Current = SSE;
+ else if (ET == getContext().DoubleTy)
+ Lo = Hi = SSE;
+ else if (ET == getContext().LongDoubleTy)
+ Current = ComplexX87;
+
+ // If this complex type crosses an eightbyte boundary then it
+ // should be split.
+ uint64_t EB_Real = (OffsetBase) / 64;
+ uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
+ if (Hi == NoClass && EB_Real != EB_Imag)
+ Hi = Lo;
+
+ return;
+ }
+
+ if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
+ // Arrays are treated like structures.
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
+ // fields, it has class MEMORY.
+ //
+ // Only need to check alignment of array base.
+ if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
+ return;
+
+ // Otherwise implement simplified merge. We could be smarter about
+ // this, but it isn't worth it and would be harder to verify.
+ Current = NoClass;
+ uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
+ uint64_t ArraySize = AT->getSize().getZExtValue();
+
+ // The only case a 256-bit wide vector could be used is when the array
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ if (Size > 128 && EltSize != 256)
+ return;
+
+ for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
+ Class FieldLo, FieldHi;
+ classify(AT->getElementType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
+ return;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
+ // than four eightbytes, ..., it has class MEMORY.
+ if (Size > 256)
+ return;
+
+ // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
+ // copy constructor or a non-trivial destructor, it is passed by invisible
+ // reference.
+ if (hasNonTrivialDestructorOrCopyConstructor(RT))
+ return;
+
+ const RecordDecl *RD = RT->getDecl();
+
+ // Assume variable sized types are passed in memory.
+ if (RD->hasFlexibleArrayMember())
+ return;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ // Reset Lo class, this will be recomputed.
+ Current = NoClass;
+
+ // If this is a C++ record, classify the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
+ // single eightbyte, each is classified separately. Each eightbyte gets
+ // initialized to class NO_CLASS.
+ Class FieldLo, FieldHi;
+ uint64_t Offset = OffsetBase + Layout.getBaseClassOffsetInBits(Base);
+ classify(i->getType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+ }
+
+ // Classify the fields one at a time, merging the results.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ bool BitField = i->isBitField();
+
+ // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
+ // four eightbytes, or it contains unaligned fields, it has class MEMORY.
+ //
+ // The only case a 256-bit wide vector could be used is when the struct
+ // contains a single 256-bit element. Since Lo and Hi logic isn't extended
+ // to work for sizes wider than 128, early check and fallback to memory.
+ //
+ if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
+ Lo = Memory;
+ return;
+ }
+ // Note, skip this test for bit-fields, see below.
+ if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
+ Lo = Memory;
+ return;
+ }
+
+ // Classify this field.
+ //
+ // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
+ // exceeds a single eightbyte, each is classified
+ // separately. Each eightbyte gets initialized to class
+ // NO_CLASS.
+ Class FieldLo, FieldHi;
+
+ // Bit-fields require special handling, they do not force the
+ // structure to be passed in memory even if unaligned, and
+ // therefore they can straddle an eightbyte.
+ if (BitField) {
+ // Ignore padding bit-fields.
+ if (i->isUnnamedBitfield())
+ continue;
+
+ uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
+ uint64_t Size = i->getBitWidthValue(getContext());
+
+ uint64_t EB_Lo = Offset / 64;
+ uint64_t EB_Hi = (Offset + Size - 1) / 64;
+ FieldLo = FieldHi = NoClass;
+ if (EB_Lo) {
+ assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
+ FieldLo = NoClass;
+ FieldHi = Integer;
+ } else {
+ FieldLo = Integer;
+ FieldHi = EB_Hi ? Integer : NoClass;
+ }
+ } else
+ classify(i->getType(), Offset, FieldLo, FieldHi);
+ Lo = merge(Lo, FieldLo);
+ Hi = merge(Hi, FieldHi);
+ if (Lo == Memory || Hi == Memory)
+ break;
+ }
+
+ postMerge(Size, Lo, Hi);
+ }
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
+ if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
+ uint64_t Size = getContext().getTypeSize(VecTy);
+ unsigned LargestVector = HasAVX ? 256 : 128;
+ if (Size <= 64 || Size > LargestVector)
+ return true;
+ }
+
+ return false;
+}
+
+ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
+ unsigned freeIntRegs) const {
+ // If this is a scalar LLVM value then assume LLVM will pass it in the right
+ // place naturally.
+ //
+ // This assumption is optimistic, as there could be free registers available
+ // when we need to pass this argument in memory, and LLVM could try to pass
+ // the argument in the free register. This does not seem to happen currently,
+ // but this code would be much safer if we could mark the argument with
+ // 'onstack'. See PR12193.
+ if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Compute the byval alignment. We specify the alignment of the byval in all
+ // cases so that the mid-level optimizer knows the alignment of the byval.
+ unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
+
+ // Attempt to avoid passing indirect results using byval when possible. This
+ // is important for good codegen.
+ //
+ // We do this by coercing the value into a scalar type which the backend can
+ // handle naturally (i.e., without using byval).
+ //
+ // For simplicity, we currently only do this when we have exhausted all of the
+ // free integer registers. Doing this when there are free integer registers
+ // would require more care, as we would have to ensure that the coerced value
+ // did not claim the unused register. That would require either reording the
+ // arguments to the function (so that any subsequent inreg values came first),
+ // or only doing this optimization when there were no following arguments that
+ // might be inreg.
+ //
+ // We currently expect it to be rare (particularly in well written code) for
+ // arguments to be passed on the stack when there are still free integer
+ // registers available (this would typically imply large structs being passed
+ // by value), so this seems like a fair tradeoff for now.
+ //
+ // We can revisit this if the backend grows support for 'onstack' parameter
+ // attributes. See PR12193.
+ if (freeIntRegs == 0) {
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ // If this type fits in an eightbyte, coerce it into the matching integral
+ // type, which will end up on the stack (with alignment 8).
+ if (Align == 8 && Size <= 64)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+ }
+
+ return ABIArgInfo::getIndirect(Align);
+}
+
+/// GetByteVectorType - The ABI specifies that a value should be passed in an
+/// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
+/// vector register.
+llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+
+ // Wrapper structs that just contain vectors are passed just like vectors,
+ // strip them off if present.
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
+ while (STy && STy->getNumElements() == 1) {
+ IRType = STy->getElementType(0);
+ STy = dyn_cast<llvm::StructType>(IRType);
+ }
+
+ // If the preferred type is a 16-byte vector, prefer to pass it.
+ if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
+ llvm::Type *EltTy = VT->getElementType();
+ unsigned BitWidth = VT->getBitWidth();
+ if ((BitWidth >= 128 && BitWidth <= 256) &&
+ (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
+ EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
+ EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
+ EltTy->isIntegerTy(128)))
+ return VT;
+ }
+
+ return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
+}
+
+/// BitsContainNoUserData - Return true if the specified [start,end) bit range
+/// is known to either be off the end of the specified type or being in
+/// alignment padding. The user type specified is known to be at most 128 bits
+/// in size, and have passed through X86_64ABIInfo::classify with a successful
+/// classification that put one of the two halves in the INTEGER class.
+///
+/// It is conservatively correct to return false.
+static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
+ unsigned EndBit, ASTContext &Context) {
+ // If the bytes being queried are off the end of the type, there is no user
+ // data hiding here. This handles analysis of builtins, vectors and other
+ // types that don't contain interesting padding.
+ unsigned TySize = (unsigned)Context.getTypeSize(Ty);
+ if (TySize <= StartBit)
+ return true;
+
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
+ unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
+
+ // Check each element to see if the element overlaps with the queried range.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // If the element is after the span we care about, then we're done..
+ unsigned EltOffset = i*EltSize;
+ if (EltOffset >= EndBit) break;
+
+ unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
+ if (!BitsContainNoUserData(AT->getElementType(), EltStart,
+ EndBit-EltOffset, Context))
+ return false;
+ }
+ // If it overlaps no elements, then it is safe to process as padding.
+ return true;
+ }
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ assert(!i->isVirtual() && !i->getType()->isDependentType() &&
+ "Unexpected base class!");
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+
+ // If the base is after the span we care about, ignore it.
+ unsigned BaseOffset = (unsigned)Layout.getBaseClassOffsetInBits(Base);
+ if (BaseOffset >= EndBit) continue;
+
+ unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
+ if (!BitsContainNoUserData(i->getType(), BaseStart,
+ EndBit-BaseOffset, Context))
+ return false;
+ }
+ }
+
+ // Verify that no field has data that overlaps the region of interest. Yes
+ // this could be sped up a lot by being smarter about queried fields,
+ // however we're only looking at structs up to 16 bytes, so we don't care
+ // much.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
+
+ // If we found a field after the region we care about, then we're done.
+ if (FieldOffset >= EndBit) break;
+
+ unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
+ if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
+ Context))
+ return false;
+ }
+
+ // If nothing in this record overlapped the area of interest, then we're
+ // clean.
+ return true;
+ }
+
+ return false;
+}
+
+/// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
+/// float member at the specified offset. For example, {int,{float}} has a
+/// float at offset 4. It is conservatively correct for this routine to return
+/// false.
+static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
+ const llvm::TargetData &TD) {
+ // Base case if we find a float.
+ if (IROffset == 0 && IRType->isFloatTy())
+ return true;
+
+ // If this is a struct, recurse into the field at the specified offset.
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ const llvm::StructLayout *SL = TD.getStructLayout(STy);
+ unsigned Elt = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(Elt);
+ return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
+ }
+
+ // If this is an array, recurse into the field at the specified offset.
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = TD.getTypeAllocSize(EltTy);
+ IROffset -= IROffset/EltSize*EltSize;
+ return ContainsFloatAtOffset(EltTy, IROffset, TD);
+ }
+
+ return false;
+}
+
+
+/// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
+/// low 8 bytes of an XMM register, corresponding to the SSE class.
+llvm::Type *X86_64ABIInfo::
+GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // The only three choices we have are either double, <2 x float>, or float. We
+ // pass as float if the last 4 bytes is just padding. This happens for
+ // structs that contain 3 floats.
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
+ SourceOffset*8+64, getContext()))
+ return llvm::Type::getFloatTy(getVMContext());
+
+ // We want to pass as <2 x float> if the LLVM IR type contains a float at
+ // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
+ // case.
+ if (ContainsFloatAtOffset(IRType, IROffset, getTargetData()) &&
+ ContainsFloatAtOffset(IRType, IROffset+4, getTargetData()))
+ return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
+
+ return llvm::Type::getDoubleTy(getVMContext());
+}
+
+
+/// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
+/// an 8-byte GPR. This means that we either have a scalar or we are talking
+/// about the high or low part of an up-to-16-byte struct. This routine picks
+/// the best LLVM IR type to represent this, which may be i64 or may be anything
+/// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
+/// etc).
+///
+/// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
+/// the source type. IROffset is an offset in bytes into the LLVM IR type that
+/// the 8-byte value references. PrefType may be null.
+///
+/// SourceTy is the source level type for the entire argument. SourceOffset is
+/// an offset into this that we're processing (which is always either 0 or 8).
+///
+llvm::Type *X86_64ABIInfo::
+GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
+ QualType SourceTy, unsigned SourceOffset) const {
+ // If we're dealing with an un-offset LLVM IR type, then it means that we're
+ // returning an 8-byte unit starting with it. See if we can safely use it.
+ if (IROffset == 0) {
+ // Pointers and int64's always fill the 8-byte unit.
+ if (isa<llvm::PointerType>(IRType) || IRType->isIntegerTy(64))
+ return IRType;
+
+ // If we have a 1/2/4-byte integer, we can use it only if the rest of the
+ // goodness in the source type is just tail padding. This is allowed to
+ // kick in for struct {double,int} on the int, but not on
+ // struct{double,int,int} because we wouldn't return the second int. We
+ // have to do this analysis on the source type because we can't depend on
+ // unions being lowered a specific way etc.
+ if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
+ IRType->isIntegerTy(32)) {
+ unsigned BitWidth = cast<llvm::IntegerType>(IRType)->getBitWidth();
+
+ if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
+ SourceOffset*8+64, getContext()))
+ return IRType;
+ }
+ }
+
+ if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
+ // If this is a struct, recurse into the field at the specified offset.
+ const llvm::StructLayout *SL = getTargetData().getStructLayout(STy);
+ if (IROffset < SL->getSizeInBytes()) {
+ unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
+ IROffset -= SL->getElementOffset(FieldIdx);
+
+ return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
+ SourceTy, SourceOffset);
+ }
+ }
+
+ if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
+ llvm::Type *EltTy = ATy->getElementType();
+ unsigned EltSize = getTargetData().getTypeAllocSize(EltTy);
+ unsigned EltOffset = IROffset/EltSize*EltSize;
+ return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
+ SourceOffset);
+ }
+
+ // Okay, we don't have any better idea of what to pass, so we pass this in an
+ // integer register that isn't too big to fit the rest of the struct.
+ unsigned TySizeInBytes =
+ (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
+
+ assert(TySizeInBytes != SourceOffset && "Empty field?");
+
+ // It is always safe to classify this as an integer type up to i64 that
+ // isn't larger than the structure.
+ return llvm::IntegerType::get(getVMContext(),
+ std::min(TySizeInBytes-SourceOffset, 8U)*8);
+}
+
+
+/// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
+/// be used as elements of a two register pair to pass or return, return a
+/// first class aggregate to represent them. For example, if the low part of
+/// a by-value argument should be passed as i32* and the high part as float,
+/// return {i32*, float}.
+static llvm::Type *
+GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
+ const llvm::TargetData &TD) {
+ // In order to correctly satisfy the ABI, we need to the high part to start
+ // at offset 8. If the high and low parts we inferred are both 4-byte types
+ // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
+ // the second element at offset 8. Check for this:
+ unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
+ unsigned HiAlign = TD.getABITypeAlignment(Hi);
+ unsigned HiStart = llvm::TargetData::RoundUpAlignment(LoSize, HiAlign);
+ assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
+
+ // To handle this, we have to increase the size of the low part so that the
+ // second element will start at an 8 byte offset. We can't increase the size
+ // of the second element because it might make us access off the end of the
+ // struct.
+ if (HiStart != 8) {
+ // There are only two sorts of types the ABI generation code can produce for
+ // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
+ // Promote these to a larger type.
+ if (Lo->isFloatTy())
+ Lo = llvm::Type::getDoubleTy(Lo->getContext());
+ else {
+ assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
+ Lo = llvm::Type::getInt64Ty(Lo->getContext());
+ }
+ }
+
+ llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
+
+
+ // Verify that the second element is at an 8-byte offset.
+ assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
+ "Invalid x86-64 argument pair!");
+ return Result;
+}
+
+ABIArgInfo X86_64ABIInfo::
+classifyReturnType(QualType RetTy) const {
+ // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
+ // classification algorithm.
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(RetTy, 0, Lo, Hi);
+
+ // Check some invariants.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
+ // hidden argument.
+ case Memory:
+ return getIndirectReturnResult(RetTy);
+
+ // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
+ // available register of the sequence %rax, %rdx is used.
+ case Integer:
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ if (RetTy->isIntegralOrEnumerationType() &&
+ RetTy->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
+ // available SSE register of the sequence %xmm0, %xmm1 is used.
+ case SSE:
+ ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
+ // returned on the X87 stack in %st0 as 80-bit x87 number.
+ case X87:
+ ResType = llvm::Type::getX86_FP80Ty(getVMContext());
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
+ // part of the value is returned in %st0 and the imaginary part in
+ // %st1.
+ case ComplexX87:
+ assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
+ ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
+ llvm::Type::getX86_FP80Ty(getVMContext()),
+ NULL);
+ break;
+ }
+
+ llvm::Type *HighPart = 0;
+ switch (Hi) {
+ // Memory was handled previously and X87 should
+ // never occur as a hi class.
+ case Memory:
+ case X87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case ComplexX87: // Previously handled.
+ case NoClass:
+ break;
+
+ case Integer:
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
+ // is passed in the next available eightbyte chunk if the last used
+ // vector register.
+ //
+ // SSEUP should always be preceded by SSE, just widen.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification.");
+ ResType = GetByteVectorType(RetTy);
+ break;
+
+ // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
+ // returned together with the previous X87 value in %st0.
+ case X87Up:
+ // If X87Up is preceded by X87, we don't need to do
+ // anything. However, in some cases with unions it may not be
+ // preceded by X87. In such situations we follow gcc and pass the
+ // extra bits in an SSE reg.
+ if (Lo != X87) {
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
+ if (Lo == NoClass) // Return HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ }
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+ABIArgInfo X86_64ABIInfo::classifyArgumentType(
+ QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE)
+ const
+{
+ X86_64ABIInfo::Class Lo, Hi;
+ classify(Ty, 0, Lo, Hi);
+
+ // Check some invariants.
+ // FIXME: Enforce these by construction.
+ assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
+ assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
+
+ neededInt = 0;
+ neededSSE = 0;
+ llvm::Type *ResType = 0;
+ switch (Lo) {
+ case NoClass:
+ if (Hi == NoClass)
+ return ABIArgInfo::getIgnore();
+ // If the low part is just padding, it takes no register, leave ResType
+ // null.
+ assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
+ "Unknown missing lo part");
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
+ // on the stack.
+ case Memory:
+
+ // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
+ // COMPLEX_X87, it is passed in memory.
+ case X87:
+ case ComplexX87:
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ ++neededInt;
+ return getIndirectResult(Ty, freeIntRegs);
+
+ case SSEUp:
+ case X87Up:
+ llvm_unreachable("Invalid classification for lo word.");
+
+ // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
+ // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
+ // and %r9 is used.
+ case Integer:
+ ++neededInt;
+
+ // Pick an 8-byte type based on the preferred type.
+ ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
+
+ // If we have a sign or zero extended integer, make sure to return Extend
+ // so that the parameter gets the right LLVM IR attributes.
+ if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isIntegralOrEnumerationType() &&
+ Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+ }
+
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
+ // available SSE register is used, the registers are taken in the
+ // order from %xmm0 to %xmm7.
+ case SSE: {
+ llvm::Type *IRType = CGT.ConvertType(Ty);
+ ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
+ ++neededSSE;
+ break;
+ }
+ }
+
+ llvm::Type *HighPart = 0;
+ switch (Hi) {
+ // Memory was handled previously, ComplexX87 and X87 should
+ // never occur as hi classes, and X87Up must be preceded by X87,
+ // which is passed in memory.
+ case Memory:
+ case X87:
+ case ComplexX87:
+ llvm_unreachable("Invalid classification for hi word.");
+
+ case NoClass: break;
+
+ case Integer:
+ ++neededInt;
+ // Pick an 8-byte type based on the preferred type.
+ HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+ break;
+
+ // X87Up generally doesn't occur here (long double is passed in
+ // memory), except in situations involving unions.
+ case X87Up:
+ case SSE:
+ HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
+
+ if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
+ return ABIArgInfo::getDirect(HighPart, 8);
+
+ ++neededSSE;
+ break;
+
+ // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
+ // eightbyte is passed in the upper half of the last used SSE
+ // register. This only happens when 128-bit vectors are passed.
+ case SSEUp:
+ assert(Lo == SSE && "Unexpected SSEUp classification");
+ ResType = GetByteVectorType(Ty);
+ break;
+ }
+
+ // If a high part was specified, merge it together with the low part. It is
+ // known to pass in the high eightbyte of the result. We do this by forming a
+ // first class struct aggregate with the high and low part: {low, high}
+ if (HighPart)
+ ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getTargetData());
+
+ return ABIArgInfo::getDirect(ResType);
+}
+
+void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+
+ // Keep track of the number of assigned registers.
+ unsigned freeIntRegs = 6, freeSSERegs = 8;
+
+ // If the return value is indirect, then the hidden argument is consuming one
+ // integer register.
+ if (FI.getReturnInfo().isIndirect())
+ --freeIntRegs;
+
+ // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
+ // get assigned (in left-to-right order) for passing as follows...
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it) {
+ unsigned neededInt, neededSSE;
+ it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
+ neededSSE);
+
+ // AMD64-ABI 3.2.3p3: If there are no registers available for any
+ // eightbyte of an argument, the whole argument is passed on the
+ // stack. If registers have already been assigned for some
+ // eightbytes of such an argument, the assignments get reverted.
+ if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
+ freeIntRegs -= neededInt;
+ freeSSERegs -= neededSSE;
+ } else {
+ it->info = getIndirectResult(it->type, freeIntRegs);
+ }
+ }
+}
+
+static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
+ QualType Ty,
+ CodeGenFunction &CGF) {
+ llvm::Value *overflow_arg_area_p =
+ CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
+ llvm::Value *overflow_arg_area =
+ CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
+
+ // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
+ // byte boundary if alignment needed by type exceeds 8 byte boundary.
+ // It isn't stated explicitly in the standard, but in practice we use
+ // alignment greater than 16 where necessary.
+ uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (Align > 8) {
+ // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
+ llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
+ CGF.Int64Ty);
+ llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
+ overflow_arg_area =
+ CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
+ overflow_arg_area->getType(),
+ "overflow_arg_area.align");
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *Res =
+ CGF.Builder.CreateBitCast(overflow_arg_area,
+ llvm::PointerType::getUnqual(LTy));
+
+ // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
+ // l->overflow_arg_area + sizeof(type).
+ // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
+ // an 8 byte boundary.
+
+ uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
+ llvm::Value *Offset =
+ llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
+ overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
+ "overflow_arg_area.next");
+ CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
+
+ // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
+ return Res;
+}
+
+llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // Assume that va_list type is correct; should be pointer to LLVM type:
+ // struct {
+ // i32 gp_offset;
+ // i32 fp_offset;
+ // i8* overflow_arg_area;
+ // i8* reg_save_area;
+ // };
+ unsigned neededInt, neededSSE;
+
+ Ty = CGF.getContext().getCanonicalType(Ty);
+ ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE);
+
+ // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
+ // in the registers. If not go to step 7.
+ if (!neededInt && !neededSSE)
+ return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
+ // general purpose registers needed to pass type and num_fp to hold
+ // the number of floating point registers needed.
+
+ // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
+ // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
+ // l->fp_offset > 304 - num_fp * 16 go to step 7.
+ //
+ // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
+ // register save space).
+
+ llvm::Value *InRegs = 0;
+ llvm::Value *gp_offset_p = 0, *gp_offset = 0;
+ llvm::Value *fp_offset_p = 0, *fp_offset = 0;
+ if (neededInt) {
+ gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
+ gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
+ InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
+ InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
+ }
+
+ if (neededSSE) {
+ fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
+ fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
+ llvm::Value *FitsInFP =
+ llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
+ FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
+ InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
+ }
+
+ llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
+ llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
+ llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
+ CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
+
+ // Emit code to load the value if it was passed in registers.
+
+ CGF.EmitBlock(InRegBlock);
+
+ // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
+ // an offset of l->gp_offset and/or l->fp_offset. This may require
+ // copying to a temporary location in case the parameter is passed
+ // in different register classes or requires an alignment greater
+ // than 8 for general purpose registers and 16 for XMM registers.
+ //
+ // FIXME: This really results in shameful code when we end up needing to
+ // collect arguments from different places; often what should result in a
+ // simple assembling of a structure from scattered addresses has many more
+ // loads than necessary. Can we clean this up?
+ llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Value *RegAddr =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
+ "reg_save_area");
+ if (neededInt && neededSSE) {
+ // FIXME: Cleanup.
+ assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
+ llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
+ llvm::Value *Tmp = CGF.CreateTempAlloca(ST);
+ assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
+ llvm::Type *TyLo = ST->getElementType(0);
+ llvm::Type *TyHi = ST->getElementType(1);
+ assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
+ "Unexpected ABI info for mixed regs");
+ llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
+ llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
+ llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegLoAddr = TyLo->isFloatingPointTy() ? FPAddr : GPAddr;
+ llvm::Value *RegHiAddr = TyLo->isFloatingPointTy() ? GPAddr : FPAddr;
+ llvm::Value *V =
+ CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ } else if (neededInt) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else if (neededSSE == 1) {
+ RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ RegAddr = CGF.Builder.CreateBitCast(RegAddr,
+ llvm::PointerType::getUnqual(LTy));
+ } else {
+ assert(neededSSE == 2 && "Invalid number of needed registers!");
+ // SSE registers are spaced 16 bytes apart in the register save
+ // area, we need to collect the two eightbytes together.
+ llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
+ llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
+ llvm::Type *DoubleTy = CGF.DoubleTy;
+ llvm::Type *DblPtrTy =
+ llvm::PointerType::getUnqual(DoubleTy);
+ llvm::StructType *ST = llvm::StructType::get(DoubleTy,
+ DoubleTy, NULL);
+ llvm::Value *V, *Tmp = CGF.CreateTempAlloca(ST);
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
+ V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
+ DblPtrTy));
+ CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
+ RegAddr = CGF.Builder.CreateBitCast(Tmp,
+ llvm::PointerType::getUnqual(LTy));
+ }
+
+ // AMD64-ABI 3.5.7p5: Step 5. Set:
+ // l->gp_offset = l->gp_offset + num_gp * 8
+ // l->fp_offset = l->fp_offset + num_fp * 16.
+ if (neededInt) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
+ gp_offset_p);
+ }
+ if (neededSSE) {
+ llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
+ CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
+ fp_offset_p);
+ }
+ CGF.EmitBranch(ContBlock);
+
+ // Emit code to load the value if it was passed in memory.
+
+ CGF.EmitBlock(InMemBlock);
+ llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
+
+ // Return the appropriate result.
+
+ CGF.EmitBlock(ContBlock);
+ llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
+ "vaarg.addr");
+ ResAddr->addIncoming(RegAddr, InRegBlock);
+ ResAddr->addIncoming(MemAddr, InMemBlock);
+ return ResAddr;
+}
+
+ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty) const {
+
+ if (Ty->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ if (hasNonTrivialDestructorOrCopyConstructor(RT) ||
+ RT->getDecl()->hasFlexibleArrayMember())
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // FIXME: mingw-w64-gcc emits 128-bit struct as i128
+ if (Size == 128 &&
+ getContext().getTargetInfo().getTriple().getOS()
+ == llvm::Triple::MinGW32)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
+ // not 1, 2, 4, or 8 bytes, must be passed by reference."
+ if (Size <= 64 &&
+ (Size & (Size - 1)) == 0)
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ Size));
+
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ }
+
+ if (Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+
+ return ABIArgInfo::getDirect();
+}
+
+void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
+
+ QualType RetTy = FI.getReturnType();
+ FI.getReturnInfo() = classify(RetTy);
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classify(it->type);
+}
+
+llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+// PowerPC-32
+
+namespace {
+class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ // This is recovered from gcc output.
+ return 1; // r1 is the dedicated stack pointer
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+
+}
+
+bool
+PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This is calculated from the LLVM and GCC tables and verified
+ // against gcc output. AFAIK all ABIs use the same encoding.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+
+ llvm::IntegerType *i8 = CGF.Int8Ty;
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+ llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
+ llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
+
+ // 0-31: r0-31, the 4-byte general-purpose registers
+ AssignToArrayRange(Builder, Address, Four8, 0, 31);
+
+ // 32-63: fp0-31, the 8-byte floating-point registers
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
+
+ // 64-76 are various 4-byte special-purpose registers:
+ // 64: mq
+ // 65: lr
+ // 66: ctr
+ // 67: ap
+ // 68-75 cr0-7
+ // 76: xer
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
+
+ // 77-108: v0-31, the 16-byte vector registers
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
+
+ // 109: vrsave
+ // 110: vscr
+ // 111: spe_acc
+ // 112: spefscr
+ // 113: sfp
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// ARM ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class ARMABIInfo : public ABIInfo {
+public:
+ enum ABIKind {
+ APCS = 0,
+ AAPCS = 1,
+ AAPCS_VFP
+ };
+
+private:
+ ABIKind Kind;
+
+public:
+ ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {}
+
+ bool isEABI() const {
+ StringRef Env =
+ getContext().getTargetInfo().getTriple().getEnvironmentName();
+ return (Env == "gnueabi" || Env == "eabi" || Env == "androideabi");
+ }
+
+private:
+ ABIKind getABIKind() const { return Kind; }
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
+ :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
+
+ const ARMABIInfo &getABIInfo() const {
+ return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
+ }
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return 13;
+ }
+
+ StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-15 are the 16 integer registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
+ return false;
+ }
+
+ unsigned getSizeOfUnwindException() const {
+ if (getABIInfo().isEABI()) return 88;
+ return TargetCodeGenInfo::getSizeOfUnwindException();
+ }
+};
+
+}
+
+void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ // Calling convention as default by an ABI.
+ llvm::CallingConv::ID DefaultCC;
+ if (isEABI())
+ DefaultCC = llvm::CallingConv::ARM_AAPCS;
+ else
+ DefaultCC = llvm::CallingConv::ARM_APCS;
+
+ // If user did not ask for specific calling convention explicitly (e.g. via
+ // pcs attribute), set effective calling convention if it's different than ABI
+ // default.
+ switch (getABIKind()) {
+ case APCS:
+ if (DefaultCC != llvm::CallingConv::ARM_APCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_APCS);
+ break;
+ case AAPCS:
+ if (DefaultCC != llvm::CallingConv::ARM_AAPCS)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS);
+ break;
+ case AAPCS_VFP:
+ if (DefaultCC != llvm::CallingConv::ARM_AAPCS_VFP)
+ FI.setEffectiveCallingConvention(llvm::CallingConv::ARM_AAPCS_VFP);
+ break;
+ }
+}
+
+/// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
+/// aggregate. If HAMembers is non-null, the number of base elements
+/// contained in the type is returned through it; this is used for the
+/// recursive calls that check aggregate component types.
+static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ ASTContext &Context,
+ uint64_t *HAMembers = 0) {
+ uint64_t Members;
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
+ return false;
+ Members *= AT->getSize().getZExtValue();
+ } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->isUnion() || RD->hasFlexibleArrayMember())
+ return false;
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CXXRD->isAggregate())
+ return false;
+ }
+ Members = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i) {
+ const FieldDecl *FD = *i;
+ uint64_t FldMembers;
+ if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
+ return false;
+ Members += FldMembers;
+ }
+ } else {
+ Members = 1;
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ Members = 2;
+ Ty = CT->getElementType();
+ }
+
+ // Homogeneous aggregates for AAPCS-VFP must have base types of float,
+ // double, or 64-bit or 128-bit vectors.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
+ if (BT->getKind() != BuiltinType::Float &&
+ BT->getKind() != BuiltinType::Double)
+ return false;
+ } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
+ unsigned VecSize = Context.getTypeSize(VT);
+ if (VecSize != 64 && VecSize != 128)
+ return false;
+ } else {
+ return false;
+ }
+
+ // The base type must be the same for all members. Vector types of the
+ // same total size are treated as being equivalent here.
+ const Type *TyPtr = Ty.getTypePtr();
+ if (!Base)
+ Base = TyPtr;
+ if (Base != TyPtr &&
+ (!Base->isVectorType() || !TyPtr->isVectorType() ||
+ Context.getTypeSize(Base) != Context.getTypeSize(TyPtr)))
+ return false;
+ }
+
+ // Homogeneous Aggregates can have at most 4 members of the base type.
+ if (HAMembers)
+ *HAMembers = Members;
+ return (Members <= 4);
+}
+
+ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ if (getABIKind() == ARMABIInfo::AAPCS_VFP) {
+ // Homogeneous Aggregates need to be expanded.
+ const Type *Base = 0;
+ if (isHomogeneousAggregate(Ty, Base, getContext()))
+ return ABIArgInfo::getExpand();
+ }
+
+ // Otherwise, pass by coercing to a structure of the appropriate size.
+ //
+ // FIXME: This is kind of nasty... but there isn't much choice because the ARM
+ // backend doesn't support byval.
+ // FIXME: This doesn't handle alignment > 64 bits.
+ llvm::Type* ElemTy;
+ unsigned SizeRegs;
+ if (getContext().getTypeAlign(Ty) > 32) {
+ ElemTy = llvm::Type::getInt64Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
+ } else {
+ ElemTy = llvm::Type::getInt32Ty(getVMContext());
+ SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
+ }
+
+ llvm::Type *STy =
+ llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
+ return ABIArgInfo::getDirect(STy);
+}
+
+static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
+ llvm::LLVMContext &VMContext) {
+ // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
+ // is called integer-like if its size is less than or equal to one word, and
+ // the offset of each of its addressable sub-fields is zero.
+
+ uint64_t Size = Context.getTypeSize(Ty);
+
+ // Check that the type fits in a word.
+ if (Size > 32)
+ return false;
+
+ // FIXME: Handle vector types!
+ if (Ty->isVectorType())
+ return false;
+
+ // Float types are never treated as "integer like".
+ if (Ty->isRealFloatingType())
+ return false;
+
+ // If this is a builtin or pointer type then it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
+ return true;
+
+ // Small complex integer types are "integer like".
+ if (const ComplexType *CT = Ty->getAs<ComplexType>())
+ return isIntegerLikeType(CT->getElementType(), Context, VMContext);
+
+ // Single element and zero sized arrays should be allowed, by the definition
+ // above, but they are not.
+
+ // Otherwise, it must be a record type.
+ const RecordType *RT = Ty->getAs<RecordType>();
+ if (!RT) return false;
+
+ // Ignore records with flexible arrays.
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->hasFlexibleArrayMember())
+ return false;
+
+ // Check that all sub-fields are at offset 0, and are themselves "integer
+ // like".
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ bool HadField = false;
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const FieldDecl *FD = *i;
+
+ // Bit-fields are not addressable, we only need to verify they are "integer
+ // like". We still have to disallow a subsequent non-bitfield, for example:
+ // struct { int : 0; int x }
+ // is non-integer like according to gcc.
+ if (FD->isBitField()) {
+ if (!RD->isUnion())
+ HadField = true;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ continue;
+ }
+
+ // Check if this field is at offset 0.
+ if (Layout.getFieldOffset(idx) != 0)
+ return false;
+
+ if (!isIntegerLikeType(FD->getType(), Context, VMContext))
+ return false;
+
+ // Only allow at most one field in a structure. This doesn't match the
+ // wording above, but follows gcc in situations with a field following an
+ // empty structure.
+ if (!RD->isUnion()) {
+ if (HadField)
+ return false;
+
+ HadField = true;
+ }
+ }
+
+ return true;
+}
+
+ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
+ return ABIArgInfo::getIndirect(0);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ // Are we following APCS?
+ if (getABIKind() == APCS) {
+ if (isEmptyRecord(getContext(), RetTy, false))
+ return ABIArgInfo::getIgnore();
+
+ // Complex types are all returned as packed integers.
+ //
+ // FIXME: Consider using 2 x vector types if the back end handles them
+ // correctly.
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
+ getContext().getTypeSize(RetTy)));
+
+ // Integer like structures are returned in r0.
+ if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
+ // Return in the smallest viable integer type.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ // Otherwise return in memory.
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Otherwise this is an AAPCS variant.
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Check for homogeneous aggregates with AAPCS-VFP.
+ if (getABIKind() == AAPCS_VFP) {
+ const Type *Base = 0;
+ if (isHomogeneousAggregate(RetTy, Base, getContext()))
+ // Homogeneous Aggregates are returned directly.
+ return ABIArgInfo::getDirect();
+ }
+
+ // Aggregates <= 4 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 32) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+}
+
+llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ // Handle address alignment for type alignment > 32 bits
+ uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
+ if (TyAlign > 4) {
+ assert((TyAlign & (TyAlign - 1)) == 0 &&
+ "Alignment is not power of 2!");
+ llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
+ AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
+ AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
+ Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
+ }
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+//===----------------------------------------------------------------------===//
+// PTX ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class PTXABIInfo : public ABIInfo {
+public:
+ PTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType Ty) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const;
+};
+
+class PTXTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ PTXTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new PTXABIInfo(CGT)) {}
+
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+ABIArgInfo PTXABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+ return ABIArgInfo::getDirect();
+}
+
+ABIArgInfo PTXABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return ABIArgInfo::getDirect();
+}
+
+void PTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+
+ // Always honor user-specified calling convention.
+ if (FI.getCallingConvention() != llvm::CallingConv::C)
+ return;
+
+ // Calling convention as default by an ABI.
+ llvm::CallingConv::ID DefaultCC;
+ const LangOptions &LangOpts = getContext().getLangOpts();
+ if (LangOpts.OpenCL || LangOpts.CUDA) {
+ // If we are in OpenCL or CUDA mode, then default to device functions
+ DefaultCC = llvm::CallingConv::PTX_Device;
+ } else {
+ // If we are in standard C/C++ mode, use the triple to decide on the default
+ StringRef Env =
+ getContext().getTargetInfo().getTriple().getEnvironmentName();
+ if (Env == "device")
+ DefaultCC = llvm::CallingConv::PTX_Device;
+ else
+ DefaultCC = llvm::CallingConv::PTX_Kernel;
+ }
+ FI.setEffectiveCallingConvention(DefaultCC);
+
+}
+
+llvm::Value *PTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CFG) const {
+ llvm_unreachable("PTX does not support varargs");
+}
+
+void PTXTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const{
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Perform special handling in OpenCL mode
+ if (M.getLangOpts().OpenCL) {
+ // Use OpenCL function attributes to set proper calling conventions
+ // By default, all functions are device functions
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL __kernel functions get a kernel calling convention
+ F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ // And kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+ }
+
+ // Perform special handling in CUDA mode.
+ if (M.getLangOpts().CUDA) {
+ // CUDA __global__ functions get a kernel calling convention. Since
+ // __global__ functions cannot be called from the device, we do not
+ // need to set the noinline attribute.
+ if (FD->getAttr<CUDAGlobalAttr>())
+ F->setCallingConv(llvm::CallingConv::PTX_Kernel);
+ }
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// MBlaze ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MBlazeABIInfo : public ABIInfo {
+public:
+ MBlazeABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+ bool isPromotableIntegerType(QualType Ty) const;
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+ }
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class MBlazeTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MBlazeTargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new MBlazeABIInfo(CGT)) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+bool MBlazeABIInfo::isPromotableIntegerType(QualType Ty) const {
+ // MBlaze ABI requires all 8 and 16 bit quantities to be extended.
+ if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ case BuiltinType::Char_S:
+ case BuiltinType::Char_U:
+ case BuiltinType::SChar:
+ case BuiltinType::UChar:
+ case BuiltinType::Short:
+ case BuiltinType::UShort:
+ return true;
+ default:
+ return false;
+ }
+ return false;
+}
+
+llvm::Value *MBlazeABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Implement
+ return 0;
+}
+
+
+ABIArgInfo MBlazeABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+ if (isAggregateTypeForABI(RetTy))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(RetTy) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+ABIArgInfo MBlazeABIInfo::classifyArgumentType(QualType Ty) const {
+ if (isAggregateTypeForABI(Ty))
+ return ABIArgInfo::getIndirect(0);
+
+ return (isPromotableIntegerType(Ty) ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void MBlazeTargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M)
+ const {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::CallingConv::ID CC = llvm::CallingConv::C;
+ if (FD->hasAttr<MBlazeInterruptHandlerAttr>())
+ CC = llvm::CallingConv::MBLAZE_INTR;
+ else if (FD->hasAttr<MBlazeSaveVolatilesAttr>())
+ CC = llvm::CallingConv::MBLAZE_SVOL;
+
+ if (CC != llvm::CallingConv::C) {
+ // Handle 'interrupt_handler' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(CC);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+ }
+
+ // Step 3: Emit _interrupt_handler alias.
+ if (CC == llvm::CallingConv::MBLAZE_INTR)
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "_interrupt_handler", GV, &M.getModule());
+}
+
+
+//===----------------------------------------------------------------------===//
+// MSP430 ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
+ : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
+ void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+}
+
+void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
+ // Handle 'interrupt' attribute:
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ // Step 1: Set ISR calling convention.
+ F->setCallingConv(llvm::CallingConv::MSP430_INTR);
+
+ // Step 2: Add attributes goodness.
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ // Step 3: Emit ISR vector alias.
+ unsigned Num = attr->getNumber() + 0xffe0;
+ new llvm::GlobalAlias(GV->getType(), llvm::Function::ExternalLinkage,
+ "vector_" + Twine::utohexstr(Num),
+ GV, &M.getModule());
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// MIPS ABI Implementation. This works for both little-endian and
+// big-endian variants.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class MipsABIInfo : public ABIInfo {
+ bool IsO32;
+ unsigned MinABIStackAlignInBytes;
+ llvm::Type* HandleAggregates(QualType Ty) const;
+ llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
+ llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
+public:
+ MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
+ ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8) {}
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+ unsigned SizeOfUnwindException;
+public:
+ MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
+ : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
+ SizeOfUnwindException(IsO32 ? 24 : 32) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 29;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+
+ unsigned getSizeOfUnwindException() const {
+ return SizeOfUnwindException;
+ }
+};
+}
+
+// In N32/64, an aligned double precision floating point field is passed in
+// a register.
+llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty) const {
+ if (IsO32)
+ return 0;
+
+ if (Ty->isComplexType())
+ return CGT.ConvertType(Ty);
+
+ const RecordType *RT = Ty->getAs<RecordType>();
+
+ // Unions are passed in integer registers.
+ if (!RT || !RT->isStructureOrClassType())
+ return 0;
+
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ uint64_t StructSize = getContext().getTypeSize(Ty);
+ assert(!(StructSize % 8) && "Size of structure must be multiple of 8.");
+
+ uint64_t LastOffset = 0;
+ unsigned idx = 0;
+ llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
+ SmallVector<llvm::Type*, 8> ArgList;
+
+ // Iterate over fields in the struct/class and check if there are any aligned
+ // double fields.
+ for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
+ i != e; ++i, ++idx) {
+ const QualType Ty = (*i)->getType();
+ const BuiltinType *BT = Ty->getAs<BuiltinType>();
+
+ if (!BT || BT->getKind() != BuiltinType::Double)
+ continue;
+
+ uint64_t Offset = Layout.getFieldOffset(idx);
+ if (Offset % 64) // Ignore doubles that are not aligned.
+ continue;
+
+ // Add ((Offset - LastOffset) / 64) args of type i64.
+ for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
+ ArgList.push_back(I64);
+
+ // Add double type.
+ ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
+ LastOffset = Offset + 64;
+ }
+
+ // This struct/class doesn't have an aligned double field.
+ if (!LastOffset)
+ return 0;
+
+ // Add ((StructSize - LastOffset) / 64) args of type i64.
+ for (unsigned N = (StructSize - LastOffset) / 64; N; --N)
+ ArgList.push_back(I64);
+
+ // If the size of the remainder is not zero, add one more integer type to
+ // ArgList.
+ unsigned R = (StructSize - LastOffset) % 64;
+ if (R)
+ ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
+
+ return llvm::StructType::get(getVMContext(), ArgList);
+}
+
+llvm::Type *MipsABIInfo::getPaddingType(uint64_t Align, uint64_t Offset) const {
+ // Padding is inserted only for N32/64.
+ if (IsO32)
+ return 0;
+
+ assert(Align <= 16 && "Alignment larger than 16 not handled.");
+ return (Align == 16 && Offset & 0xf) ?
+ llvm::IntegerType::get(getVMContext(), 64) : 0;
+}
+
+ABIArgInfo
+MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
+ uint64_t OrigOffset = Offset;
+ uint64_t TySize =
+ llvm::RoundUpToAlignment(getContext().getTypeSize(Ty), 64) / 8;
+ uint64_t Align = getContext().getTypeAlign(Ty) / 8;
+ Offset = llvm::RoundUpToAlignment(Offset, std::max(Align, (uint64_t)8));
+ Offset += TySize;
+
+ if (isAggregateTypeForABI(Ty)) {
+ // Ignore empty aggregates.
+ if (TySize == 0)
+ return ABIArgInfo::getIgnore();
+
+ // Records with non trivial destructors/constructors should not be passed
+ // by value.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty)) {
+ Offset = OrigOffset + 8;
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+ }
+
+ // If we have reached here, aggregates are passed either indirectly via a
+ // byval pointer or directly by coercing to another structure type. In the
+ // latter case, padding is inserted if the offset of the aggregate is
+ // unaligned.
+ llvm::Type *ResType = HandleAggregates(Ty);
+
+ if (!ResType)
+ return ABIArgInfo::getIndirect(0);
+
+ return ABIArgInfo::getDirect(ResType, 0, getPaddingType(Align, OrigOffset));
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ if (Ty->isPromotableIntegerType())
+ return ABIArgInfo::getExtend();
+
+ return ABIArgInfo::getDirect(0, 0, getPaddingType(Align, OrigOffset));
+}
+
+llvm::Type*
+MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
+ const RecordType *RT = RetTy->getAs<RecordType>();
+ SmallVector<llvm::Type*, 2> RTList;
+
+ if (RT && RT->isStructureOrClassType()) {
+ const RecordDecl *RD = RT->getDecl();
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ unsigned FieldCnt = Layout.getFieldCount();
+
+ // N32/64 returns struct/classes in floating point registers if the
+ // following conditions are met:
+ // 1. The size of the struct/class is no larger than 128-bit.
+ // 2. The struct/class has one or two fields all of which are floating
+ // point types.
+ // 3. The offset of the first field is zero (this follows what gcc does).
+ //
+ // Any other composite results are returned in integer registers.
+ //
+ if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
+ RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
+ for (; b != e; ++b) {
+ const BuiltinType *BT = (*b)->getType()->getAs<BuiltinType>();
+
+ if (!BT || !BT->isFloatingPoint())
+ break;
+
+ RTList.push_back(CGT.ConvertType((*b)->getType()));
+ }
+
+ if (b == e)
+ return llvm::StructType::get(getVMContext(), RTList,
+ RD->hasAttr<PackedAttr>());
+
+ RTList.clear();
+ }
+ }
+
+ RTList.push_back(llvm::IntegerType::get(getVMContext(),
+ std::min(Size, (uint64_t)64)));
+ if (Size > 64)
+ RTList.push_back(llvm::IntegerType::get(getVMContext(), Size - 64));
+
+ return llvm::StructType::get(getVMContext(), RTList);
+}
+
+ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
+ uint64_t Size = getContext().getTypeSize(RetTy);
+
+ if (RetTy->isVoidType() || Size == 0)
+ return ABIArgInfo::getIgnore();
+
+ if (isAggregateTypeForABI(RetTy)) {
+ if (Size <= 128) {
+ if (RetTy->isAnyComplexType())
+ return ABIArgInfo::getDirect();
+
+ if (!IsO32 && !isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
+ }
+
+ return ABIArgInfo::getIndirect(0);
+ }
+
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+}
+
+void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ ABIArgInfo &RetInfo = FI.getReturnInfo();
+ RetInfo = classifyReturnType(FI.getReturnType());
+
+ // Check if a pointer to an aggregate is passed as a hidden argument.
+ uint64_t Offset = RetInfo.isIndirect() ? 8 : 0;
+
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type, Offset);
+}
+
+llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ llvm::Type *BP = CGF.Int8PtrTy;
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
+ llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped;
+ unsigned PtrWidth = getContext().getTargetInfo().getPointerWidth(0);
+ llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
+
+ if (TypeAlign > MinABIStackAlignInBytes) {
+ llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
+ llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
+ llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
+ llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
+ llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
+ AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
+ }
+ else
+ AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
+ TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+bool
+MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This information comes from gcc's implementation, which seems to
+ // as canonical as it gets.
+
+ // Everything on MIPS is 4 bytes. Double-precision FP registers
+ // are aliased to pairs of single-precision FP registers.
+ llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
+
+ // 0-31 are the general purpose registers, $0 - $31.
+ // 32-63 are the floating-point registers, $f0 - $f31.
+ // 64 and 65 are the multiply/divide registers, $hi and $lo.
+ // 66 is the (notional, I think) register for signal-handler return.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
+
+ // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
+ // They are one bit wide and ignored here.
+
+ // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
+ // (coprocessor 1 is the FP unit)
+ // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
+ // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
+ // 176-181 are the DSP accumulator registers.
+ AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
+// Currently subclassed only to implement custom OpenCL C function attribute
+// handling.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
+public:
+ TCETargetCodeGenInfo(CodeGenTypes &CGT)
+ : DefaultTargetCodeGenInfo(CGT) {}
+
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const;
+};
+
+void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
+ llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) return;
+
+ llvm::Function *F = cast<llvm::Function>(GV);
+
+ if (M.getLangOpts().OpenCL) {
+ if (FD->hasAttr<OpenCLKernelAttr>()) {
+ // OpenCL C Kernel functions are not subject to inlining
+ F->addFnAttr(llvm::Attribute::NoInline);
+
+ if (FD->hasAttr<ReqdWorkGroupSizeAttr>()) {
+
+ // Convert the reqd_work_group_size() attributes to metadata.
+ llvm::LLVMContext &Context = F->getContext();
+ llvm::NamedMDNode *OpenCLMetadata =
+ M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
+
+ SmallVector<llvm::Value*, 5> Operands;
+ Operands.push_back(F);
+
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getXDim())));
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getYDim())));
+ Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
+ llvm::APInt(32,
+ FD->getAttr<ReqdWorkGroupSizeAttr>()->getZDim())));
+
+ // Add a boolean constant operand for "required" (true) or "hint" (false)
+ // for implementing the work_group_size_hint attr later. Currently
+ // always true as the hint is not yet implemented.
+ Operands.push_back(llvm::ConstantInt::getTrue(Context));
+ OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
+ }
+ }
+ }
+}
+
+}
+
+//===----------------------------------------------------------------------===//
+// Hexagon ABI Implementation
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HexagonABIInfo : public ABIInfo {
+
+
+public:
+ HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
+
+private:
+
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+ ABIArgInfo classifyArgumentType(QualType RetTy) const;
+
+ virtual void computeInfo(CGFunctionInfo &FI) const;
+
+ virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const;
+};
+
+class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
+ :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return 29;
+ }
+};
+
+}
+
+void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
+ for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
+ it != ie; ++it)
+ it->info = classifyArgumentType(it->type);
+}
+
+ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ return (Ty->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Ignore empty records.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(Ty))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ if (Size > 64)
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+ // Pass in the smallest viable integer type.
+ else if (Size > 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ else if (Size > 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ else if (Size > 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ else
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+}
+
+ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ // Large vector types should be returned via memory.
+ if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
+ return ABIArgInfo::getIndirect(0);
+
+ if (!isAggregateTypeForABI(RetTy)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
+ RetTy = EnumTy->getDecl()->getIntegerType();
+
+ return (RetTy->isPromotableIntegerType() ?
+ ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
+ }
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always indirect.
+ if (isRecordWithNonTrivialDestructorOrCopyConstructor(RetTy))
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
+
+ if (isEmptyRecord(getContext(), RetTy, true))
+ return ABIArgInfo::getIgnore();
+
+ // Aggregates <= 8 bytes are returned in r0; other aggregates
+ // are returned indirectly.
+ uint64_t Size = getContext().getTypeSize(RetTy);
+ if (Size <= 64) {
+ // Return in the smallest viable integer type.
+ if (Size <= 8)
+ return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
+ if (Size <= 16)
+ return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
+ if (Size <= 32)
+ return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
+ return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
+ }
+
+ return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
+}
+
+llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
+ CodeGenFunction &CGF) const {
+ // FIXME: Need to handle alignment
+ llvm::Type *BPP = CGF.Int8PtrPtrTy;
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset =
+ llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
+}
+
+
+const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
+ if (TheTargetCodeGenInfo)
+ return *TheTargetCodeGenInfo;
+
+ const llvm::Triple &Triple = getContext().getTargetInfo().getTriple();
+ switch (Triple.getArch()) {
+ default:
+ return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
+
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
+
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ {
+ ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
+
+ if (strcmp(getContext().getTargetInfo().getABI(), "apcs-gnu") == 0)
+ Kind = ARMABIInfo::APCS;
+ else if (CodeGenOpts.FloatABI == "hard")
+ Kind = ARMABIInfo::AAPCS_VFP;
+
+ return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
+ }
+
+ case llvm::Triple::ppc:
+ return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
+
+ case llvm::Triple::ptx32:
+ case llvm::Triple::ptx64:
+ return *(TheTargetCodeGenInfo = new PTXTargetCodeGenInfo(Types));
+
+ case llvm::Triple::mblaze:
+ return *(TheTargetCodeGenInfo = new MBlazeTargetCodeGenInfo(Types));
+
+ case llvm::Triple::msp430:
+ return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
+
+ case llvm::Triple::tce:
+ return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
+
+ case llvm::Triple::x86: {
+ bool DisableMMX = strcmp(getContext().getTargetInfo().getABI(), "no-mmx") == 0;
+
+ if (Triple.isOSDarwin())
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, true, true, DisableMMX, false));
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::Cygwin:
+ case llvm::Triple::MinGW32:
+ case llvm::Triple::AuroraUX:
+ case llvm::Triple::DragonFly:
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::OpenBSD:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, false, true, DisableMMX, false));
+
+ case llvm::Triple::Win32:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, false, true, DisableMMX, true));
+
+ default:
+ return *(TheTargetCodeGenInfo =
+ new X86_32TargetCodeGenInfo(
+ Types, false, false, DisableMMX, false));
+ }
+ }
+
+ case llvm::Triple::x86_64: {
+ bool HasAVX = strcmp(getContext().getTargetInfo().getABI(), "avx") == 0;
+
+ switch (Triple.getOS()) {
+ case llvm::Triple::Win32:
+ case llvm::Triple::MinGW32:
+ case llvm::Triple::Cygwin:
+ return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
+ default:
+ return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
+ HasAVX));
+ }
+ }
+ case llvm::Triple::hexagon:
+ return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
new file mode 100644
index 0000000..88b4997
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/CodeGen/TargetInfo.h
@@ -0,0 +1,170 @@
+//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These classes wrap the information about a call or function
+// definition used to handle ABI compliancy.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_TARGETINFO_H
+#define CLANG_CODEGEN_TARGETINFO_H
+
+#include "clang/Basic/LLVM.h"
+#include "clang/AST/Type.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+ class GlobalValue;
+ class Type;
+ class Value;
+}
+
+namespace clang {
+ class ABIInfo;
+ class Decl;
+
+ namespace CodeGen {
+ class CallArgList;
+ class CodeGenModule;
+ class CodeGenFunction;
+ class CGFunctionInfo;
+ }
+
+ /// TargetCodeGenInfo - This class organizes various target-specific
+ /// codegeneration issues, like target-specific attributes, builtins and so
+ /// on.
+ class TargetCodeGenInfo {
+ ABIInfo *Info;
+ public:
+ // WARNING: Acquires the ownership of ABIInfo.
+ TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { }
+ virtual ~TargetCodeGenInfo();
+
+ /// getABIInfo() - Returns ABI info helper for the target.
+ const ABIInfo& getABIInfo() const { return *Info; }
+
+ /// SetTargetAttributes - Provides a convenient hook to handle extra
+ /// target-specific attributes for the given global.
+ virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
+ CodeGen::CodeGenModule &M) const { }
+
+ /// Determines the size of struct _Unwind_Exception on this platform,
+ /// in 8-bit units. The Itanium ABI defines this as:
+ /// struct _Unwind_Exception {
+ /// uint64 exception_class;
+ /// _Unwind_Exception_Cleanup_Fn exception_cleanup;
+ /// uint64 private_1;
+ /// uint64 private_2;
+ /// };
+ virtual unsigned getSizeOfUnwindException() const;
+
+ /// Controls whether __builtin_extend_pointer should sign-extend
+ /// pointers to uint64_t or zero-extend them (the default). Has
+ /// no effect for targets:
+ /// - that have 64-bit pointers, or
+ /// - that cannot address through registers larger than pointers, or
+ /// - that implicitly ignore/truncate the top bits when addressing
+ /// through such registers.
+ virtual bool extendPointerWithSExt() const { return false; }
+
+ /// Determines the DWARF register number for the stack pointer, for
+ /// exception-handling purposes. Implements __builtin_dwarf_sp_column.
+ ///
+ /// Returns -1 if the operation is unsupported by this target.
+ virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
+ return -1;
+ }
+
+ /// Initializes the given DWARF EH register-size table, a char*.
+ /// Implements __builtin_init_dwarf_reg_size_table.
+ ///
+ /// Returns true if the operation is unsupported by this target.
+ virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return true;
+ }
+
+ /// Performs the code-generation required to convert a return
+ /// address as stored by the system into the actual address of the
+ /// next instruction that will be executed.
+ ///
+ /// Used by __builtin_extract_return_addr().
+ virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ /// Performs the code-generation required to convert the address
+ /// of an instruction into a return address suitable for storage
+ /// by the system in a return slot.
+ ///
+ /// Used by __builtin_frob_return_addr().
+ virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ virtual llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
+ StringRef Constraint,
+ llvm::Type* Ty) const {
+ return Ty;
+ }
+
+ /// Retrieve the address of a function to call immediately before
+ /// calling objc_retainAutoreleasedReturnValue. The
+ /// implementation of objc_autoreleaseReturnValue sniffs the
+ /// instruction stream following its return address to decide
+ /// whether it's a call to objc_retainAutoreleasedReturnValue.
+ /// This can be prohibitively expensive, depending on the
+ /// relocation model, and so on some targets it instead sniffs for
+ /// a particular instruction sequence. This functions returns
+ /// that instruction sequence in inline assembly, which will be
+ /// empty if none is required.
+ virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
+ return "";
+ }
+
+ /// Determine whether a call to an unprototyped functions under
+ /// the given calling convention should use the variadic
+ /// convention or the non-variadic convention.
+ ///
+ /// There's a good reason to make a platform's variadic calling
+ /// convention be different from its non-variadic calling
+ /// convention: the non-variadic arguments can be passed in
+ /// registers (better for performance), and the variadic arguments
+ /// can be passed on the stack (also better for performance). If
+ /// this is done, however, unprototyped functions *must* use the
+ /// non-variadic convention, because C99 states that a call
+ /// through an unprototyped function type must succeed if the
+ /// function was defined with a non-variadic prototype with
+ /// compatible parameters. Therefore, splitting the conventions
+ /// makes it impossible to call a variadic function through an
+ /// unprototyped type. Since function prototypes came out in the
+ /// late 1970s, this is probably an acceptable trade-off.
+ /// Nonetheless, not all platforms are willing to make it, and in
+ /// particularly x86-64 bends over backwards to make the
+ /// conventions compatible.
+ ///
+ /// The default is false. This is correct whenever:
+ /// - the conventions are exactly the same, because it does not
+ /// matter and the resulting IR will be somewhat prettier in
+ /// certain cases; or
+ /// - the conventions are substantively different in how they pass
+ /// arguments, because in this case using the variadic convention
+ /// will lead to C99 violations.
+ /// It is not necessarily correct when arguments are passed in the
+ /// same way and some out-of-band information is passed for the
+ /// benefit of variadic callees, as is the case for x86-64.
+ /// In this case the ABI should be consulted.
+ virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
+ const FunctionNoProtoType *fnType) const;
+ };
+}
+
+#endif // CLANG_CODEGEN_TARGETINFO_H
diff --git a/contrib/llvm/tools/clang/lib/Driver/Action.cpp b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
new file mode 100644
index 0000000..d7b4bc7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Action.cpp
@@ -0,0 +1,122 @@
+//===--- Action.cpp - Abstract compilation steps --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Action.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <cassert>
+using namespace clang::driver;
+
+Action::~Action() {
+ if (OwnsInputs) {
+ for (iterator it = begin(), ie = end(); it != ie; ++it)
+ delete *it;
+ }
+}
+
+const char *Action::getClassName(ActionClass AC) {
+ switch (AC) {
+ case InputClass: return "input";
+ case BindArchClass: return "bind-arch";
+ case PreprocessJobClass: return "preprocessor";
+ case PrecompileJobClass: return "precompiler";
+ case AnalyzeJobClass: return "analyzer";
+ case MigrateJobClass: return "migrator";
+ case CompileJobClass: return "compiler";
+ case AssembleJobClass: return "assembler";
+ case LinkJobClass: return "linker";
+ case LipoJobClass: return "lipo";
+ case DsymutilJobClass: return "dsymutil";
+ case VerifyJobClass: return "verify";
+ }
+
+ llvm_unreachable("invalid class");
+}
+
+void InputAction::anchor() {}
+
+InputAction::InputAction(const Arg &_Input, types::ID _Type)
+ : Action(InputClass, _Type), Input(_Input) {
+}
+
+void BindArchAction::anchor() {}
+
+BindArchAction::BindArchAction(Action *Input, const char *_ArchName)
+ : Action(BindArchClass, Input, Input->getType()), ArchName(_ArchName) {
+}
+
+void JobAction::anchor() {}
+
+JobAction::JobAction(ActionClass Kind, Action *Input, types::ID Type)
+ : Action(Kind, Input, Type) {
+}
+
+JobAction::JobAction(ActionClass Kind, const ActionList &Inputs, types::ID Type)
+ : Action(Kind, Inputs, Type) {
+}
+
+void PreprocessJobAction::anchor() {}
+
+PreprocessJobAction::PreprocessJobAction(Action *Input, types::ID OutputType)
+ : JobAction(PreprocessJobClass, Input, OutputType) {
+}
+
+void PrecompileJobAction::anchor() {}
+
+PrecompileJobAction::PrecompileJobAction(Action *Input, types::ID OutputType)
+ : JobAction(PrecompileJobClass, Input, OutputType) {
+}
+
+void AnalyzeJobAction::anchor() {}
+
+AnalyzeJobAction::AnalyzeJobAction(Action *Input, types::ID OutputType)
+ : JobAction(AnalyzeJobClass, Input, OutputType) {
+}
+
+void MigrateJobAction::anchor() {}
+
+MigrateJobAction::MigrateJobAction(Action *Input, types::ID OutputType)
+ : JobAction(MigrateJobClass, Input, OutputType) {
+}
+
+void CompileJobAction::anchor() {}
+
+CompileJobAction::CompileJobAction(Action *Input, types::ID OutputType)
+ : JobAction(CompileJobClass, Input, OutputType) {
+}
+
+void AssembleJobAction::anchor() {}
+
+AssembleJobAction::AssembleJobAction(Action *Input, types::ID OutputType)
+ : JobAction(AssembleJobClass, Input, OutputType) {
+}
+
+void LinkJobAction::anchor() {}
+
+LinkJobAction::LinkJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(LinkJobClass, Inputs, Type) {
+}
+
+void LipoJobAction::anchor() {}
+
+LipoJobAction::LipoJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(LipoJobClass, Inputs, Type) {
+}
+
+void DsymutilJobAction::anchor() {}
+
+DsymutilJobAction::DsymutilJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(DsymutilJobClass, Inputs, Type) {
+}
+
+void VerifyJobAction::anchor() {}
+
+VerifyJobAction::VerifyJobAction(ActionList &Inputs, types::ID Type)
+ : JobAction(VerifyJobClass, Inputs, Type) {
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Arg.cpp b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
new file mode 100644
index 0000000..c0a2a50
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Arg.cpp
@@ -0,0 +1,121 @@
+//===--- Arg.cpp - Argument Implementations -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Option.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang::driver;
+
+Arg::Arg(const Option *_Opt, unsigned _Index, const Arg *_BaseArg)
+ : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+ Claimed(false), OwnsValues(false) {
+}
+
+Arg::Arg(const Option *_Opt, unsigned _Index,
+ const char *Value0, const Arg *_BaseArg)
+ : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+ Claimed(false), OwnsValues(false) {
+ Values.push_back(Value0);
+}
+
+Arg::Arg(const Option *_Opt, unsigned _Index,
+ const char *Value0, const char *Value1, const Arg *_BaseArg)
+ : Opt(_Opt), BaseArg(_BaseArg), Index(_Index),
+ Claimed(false), OwnsValues(false) {
+ Values.push_back(Value0);
+ Values.push_back(Value1);
+}
+
+Arg::~Arg() {
+ if (OwnsValues) {
+ for (unsigned i = 0, e = Values.size(); i != e; ++i)
+ delete[] Values[i];
+ }
+}
+
+void Arg::dump() const {
+ llvm::errs() << "<";
+
+ llvm::errs() << " Opt:";
+ Opt->dump();
+
+ llvm::errs() << " Index:" << Index;
+
+ llvm::errs() << " Values: [";
+ for (unsigned i = 0, e = Values.size(); i != e; ++i) {
+ if (i) llvm::errs() << ", ";
+ llvm::errs() << "'" << Values[i] << "'";
+ }
+
+ llvm::errs() << "]>\n";
+}
+
+std::string Arg::getAsString(const ArgList &Args) const {
+ SmallString<256> Res;
+ llvm::raw_svector_ostream OS(Res);
+
+ ArgStringList ASL;
+ render(Args, ASL);
+ for (ArgStringList::iterator
+ it = ASL.begin(), ie = ASL.end(); it != ie; ++it) {
+ if (it != ASL.begin())
+ OS << ' ';
+ OS << *it;
+ }
+
+ return OS.str();
+}
+
+void Arg::renderAsInput(const ArgList &Args, ArgStringList &Output) const {
+ if (!getOption().hasNoOptAsInput()) {
+ render(Args, Output);
+ return;
+ }
+
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+ Output.push_back(getValue(Args, i));
+}
+
+void Arg::render(const ArgList &Args, ArgStringList &Output) const {
+ switch (getOption().getRenderStyle()) {
+ case Option::RenderValuesStyle:
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+ Output.push_back(getValue(Args, i));
+ break;
+
+ case Option::RenderCommaJoinedStyle: {
+ SmallString<256> Res;
+ llvm::raw_svector_ostream OS(Res);
+ OS << getOption().getName();
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i) {
+ if (i) OS << ',';
+ OS << getValue(Args, i);
+ }
+ Output.push_back(Args.MakeArgString(OS.str()));
+ break;
+ }
+
+ case Option::RenderJoinedStyle:
+ Output.push_back(Args.GetOrMakeJoinedArgString(
+ getIndex(), getOption().getName(), getValue(Args, 0)));
+ for (unsigned i = 1, e = getNumValues(); i != e; ++i)
+ Output.push_back(getValue(Args, i));
+ break;
+
+ case Option::RenderSeparateStyle:
+ Output.push_back(getOption().getName().data());
+ for (unsigned i = 0, e = getNumValues(); i != e; ++i)
+ Output.push_back(getValue(Args, i));
+ break;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
new file mode 100644
index 0000000..55a0ddf
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/ArgList.cpp
@@ -0,0 +1,333 @@
+//===--- ArgList.cpp - Argument List Management ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Option.h"
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace clang::driver;
+
+void arg_iterator::SkipToNextArg() {
+ for (; Current != Args.end(); ++Current) {
+ // Done if there are no filters.
+ if (!Id0.isValid())
+ break;
+
+ // Otherwise require a match.
+ const Option &O = (*Current)->getOption();
+ if (O.matches(Id0) ||
+ (Id1.isValid() && O.matches(Id1)) ||
+ (Id2.isValid() && O.matches(Id2)))
+ break;
+ }
+}
+
+//
+
+ArgList::ArgList() {
+}
+
+ArgList::~ArgList() {
+}
+
+void ArgList::append(Arg *A) {
+ Args.push_back(A);
+}
+
+void ArgList::eraseArg(OptSpecifier Id) {
+ for (iterator it = begin(), ie = end(); it != ie; ) {
+ if ((*it)->getOption().matches(Id)) {
+ it = Args.erase(it);
+ ie = end();
+ } else {
+ ++it;
+ }
+ }
+}
+
+Arg *ArgList::getLastArgNoClaim(OptSpecifier Id) const {
+ // FIXME: Make search efficient?
+ for (const_reverse_iterator it = rbegin(), ie = rend(); it != ie; ++it)
+ if ((*it)->getOption().matches(Id))
+ return *it;
+ return 0;
+}
+
+Arg *ArgList::getLastArg(OptSpecifier Id) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
+}
+
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1)) {
+ Res = *it;
+ Res->claim();
+
+ }
+ }
+
+ return Res;
+}
+
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
+}
+
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2, OptSpecifier Id3) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2) ||
+ (*it)->getOption().matches(Id3)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
+}
+
+Arg *ArgList::getLastArg(OptSpecifier Id0, OptSpecifier Id1,
+ OptSpecifier Id2, OptSpecifier Id3,
+ OptSpecifier Id4) const {
+ Arg *Res = 0;
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(Id0) ||
+ (*it)->getOption().matches(Id1) ||
+ (*it)->getOption().matches(Id2) ||
+ (*it)->getOption().matches(Id3) ||
+ (*it)->getOption().matches(Id4)) {
+ Res = *it;
+ Res->claim();
+ }
+ }
+
+ return Res;
+}
+
+bool ArgList::hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default) const {
+ if (Arg *A = getLastArg(Pos, Neg))
+ return A->getOption().matches(Pos);
+ return Default;
+}
+
+StringRef ArgList::getLastArgValue(OptSpecifier Id,
+ StringRef Default) const {
+ if (Arg *A = getLastArg(Id))
+ return A->getValue(*this);
+ return Default;
+}
+
+int ArgList::getLastArgIntValue(OptSpecifier Id, int Default,
+ clang::DiagnosticsEngine *Diags) const {
+ int Res = Default;
+
+ if (Arg *A = getLastArg(Id)) {
+ if (StringRef(A->getValue(*this)).getAsInteger(10, Res)) {
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_int_value)
+ << A->getAsString(*this) << A->getValue(*this);
+ }
+ }
+
+ return Res;
+}
+
+std::vector<std::string> ArgList::getAllArgValues(OptSpecifier Id) const {
+ SmallVector<const char *, 16> Values;
+ AddAllArgValues(Values, Id);
+ return std::vector<std::string>(Values.begin(), Values.end());
+}
+
+void ArgList::AddLastArg(ArgStringList &Output, OptSpecifier Id) const {
+ if (Arg *A = getLastArg(Id)) {
+ A->claim();
+ A->render(*this, Output);
+ }
+}
+
+void ArgList::AddAllArgs(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1, OptSpecifier Id2) const {
+ for (arg_iterator it = filtered_begin(Id0, Id1, Id2),
+ ie = filtered_end(); it != ie; ++it) {
+ (*it)->claim();
+ (*it)->render(*this, Output);
+ }
+}
+
+void ArgList::AddAllArgValues(ArgStringList &Output, OptSpecifier Id0,
+ OptSpecifier Id1, OptSpecifier Id2) const {
+ for (arg_iterator it = filtered_begin(Id0, Id1, Id2),
+ ie = filtered_end(); it != ie; ++it) {
+ (*it)->claim();
+ for (unsigned i = 0, e = (*it)->getNumValues(); i != e; ++i)
+ Output.push_back((*it)->getValue(*this, i));
+ }
+}
+
+void ArgList::AddAllArgsTranslated(ArgStringList &Output, OptSpecifier Id0,
+ const char *Translation,
+ bool Joined) const {
+ for (arg_iterator it = filtered_begin(Id0),
+ ie = filtered_end(); it != ie; ++it) {
+ (*it)->claim();
+
+ if (Joined) {
+ Output.push_back(MakeArgString(StringRef(Translation) +
+ (*it)->getValue(*this, 0)));
+ } else {
+ Output.push_back(Translation);
+ Output.push_back((*it)->getValue(*this, 0));
+ }
+ }
+}
+
+void ArgList::ClaimAllArgs(OptSpecifier Id0) const {
+ for (arg_iterator it = filtered_begin(Id0),
+ ie = filtered_end(); it != ie; ++it)
+ (*it)->claim();
+}
+
+void ArgList::ClaimAllArgs() const {
+ for (const_iterator it = begin(), ie = end(); it != ie; ++it)
+ if (!(*it)->isClaimed())
+ (*it)->claim();
+}
+
+const char *ArgList::MakeArgString(const Twine &T) const {
+ SmallString<256> Str;
+ T.toVector(Str);
+ return MakeArgString(Str.str());
+}
+
+const char *ArgList::GetOrMakeJoinedArgString(unsigned Index,
+ StringRef LHS,
+ StringRef RHS) const {
+ StringRef Cur = getArgString(Index);
+ if (Cur.size() == LHS.size() + RHS.size() &&
+ Cur.startswith(LHS) && Cur.endswith(RHS))
+ return Cur.data();
+
+ return MakeArgString(LHS + RHS);
+}
+
+//
+
+InputArgList::InputArgList(const char* const *ArgBegin,
+ const char* const *ArgEnd)
+ : NumInputArgStrings(ArgEnd - ArgBegin) {
+ ArgStrings.append(ArgBegin, ArgEnd);
+}
+
+InputArgList::~InputArgList() {
+ // An InputArgList always owns its arguments.
+ for (iterator it = begin(), ie = end(); it != ie; ++it)
+ delete *it;
+}
+
+unsigned InputArgList::MakeIndex(StringRef String0) const {
+ unsigned Index = ArgStrings.size();
+
+ // Tuck away so we have a reliable const char *.
+ SynthesizedStrings.push_back(String0);
+ ArgStrings.push_back(SynthesizedStrings.back().c_str());
+
+ return Index;
+}
+
+unsigned InputArgList::MakeIndex(StringRef String0,
+ StringRef String1) const {
+ unsigned Index0 = MakeIndex(String0);
+ unsigned Index1 = MakeIndex(String1);
+ assert(Index0 + 1 == Index1 && "Unexpected non-consecutive indices!");
+ (void) Index1;
+ return Index0;
+}
+
+const char *InputArgList::MakeArgString(StringRef Str) const {
+ return getArgString(MakeIndex(Str));
+}
+
+//
+
+DerivedArgList::DerivedArgList(const InputArgList &_BaseArgs)
+ : BaseArgs(_BaseArgs) {
+}
+
+DerivedArgList::~DerivedArgList() {
+ // We only own the arguments we explicitly synthesized.
+ for (iterator it = SynthesizedArgs.begin(), ie = SynthesizedArgs.end();
+ it != ie; ++it)
+ delete *it;
+}
+
+const char *DerivedArgList::MakeArgString(StringRef Str) const {
+ return BaseArgs.MakeArgString(Str);
+}
+
+Arg *DerivedArgList::MakeFlagArg(const Arg *BaseArg, const Option *Opt) const {
+ Arg *A = new Arg(Opt, BaseArgs.MakeIndex(Opt->getName()), BaseArg);
+ SynthesizedArgs.push_back(A);
+ return A;
+}
+
+Arg *DerivedArgList::MakePositionalArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) const {
+ unsigned Index = BaseArgs.MakeIndex(Value);
+ Arg *A = new Arg(Opt, Index, BaseArgs.getArgString(Index), BaseArg);
+ SynthesizedArgs.push_back(A);
+ return A;
+}
+
+Arg *DerivedArgList::MakeSeparateArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) const {
+ unsigned Index = BaseArgs.MakeIndex(Opt->getName(), Value);
+ Arg *A = new Arg(Opt, Index, BaseArgs.getArgString(Index + 1), BaseArg);
+ SynthesizedArgs.push_back(A);
+ return A;
+}
+
+Arg *DerivedArgList::MakeJoinedArg(const Arg *BaseArg, const Option *Opt,
+ StringRef Value) const {
+ unsigned Index = BaseArgs.MakeIndex(Opt->getName().str() + Value.str());
+ Arg *A = new Arg(Opt, Index,
+ BaseArgs.getArgString(Index) + Opt->getName().size(),
+ BaseArg);
+ SynthesizedArgs.push_back(A);
+ return A;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp b/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp
new file mode 100644
index 0000000..ea80f5a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/CC1AsOptions.cpp
@@ -0,0 +1,39 @@
+//===--- CC1AsOptions.cpp - Clang Assembler Options Table -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/CC1AsOptions.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/OptTable.h"
+using namespace clang;
+using namespace clang::driver;
+using namespace clang::driver::options;
+using namespace clang::driver::cc1asoptions;
+
+static const OptTable::Info CC1AsInfoTable[] = {
+#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) \
+ { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
+ OPT_##GROUP, OPT_##ALIAS },
+#include "clang/Driver/CC1AsOptions.inc"
+};
+
+namespace {
+
+class CC1AsOptTable : public OptTable {
+public:
+ CC1AsOptTable()
+ : OptTable(CC1AsInfoTable,
+ sizeof(CC1AsInfoTable) / sizeof(CC1AsInfoTable[0])) {}
+};
+
+}
+
+OptTable *clang::driver::createCC1AsOptTable() {
+ return new CC1AsOptTable();
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp b/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp
new file mode 100644
index 0000000..884b363
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/CC1Options.cpp
@@ -0,0 +1,38 @@
+//===--- CC1Options.cpp - Clang CC1 Options Table -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/OptTable.h"
+using namespace clang;
+using namespace clang::driver;
+using namespace clang::driver::options;
+using namespace clang::driver::cc1options;
+
+static const OptTable::Info CC1InfoTable[] = {
+#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) \
+ { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
+ OPT_##GROUP, OPT_##ALIAS },
+#include "clang/Driver/CC1Options.inc"
+};
+
+namespace {
+
+class CC1OptTable : public OptTable {
+public:
+ CC1OptTable()
+ : OptTable(CC1InfoTable, sizeof(CC1InfoTable) / sizeof(CC1InfoTable[0])) {}
+};
+
+}
+
+OptTable *clang::driver::createCC1OptTable() {
+ return new CC1OptTable();
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
new file mode 100644
index 0000000..42c8449
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Compilation.cpp
@@ -0,0 +1,232 @@
+//===--- Compilation.cpp - Compilation Task Implementation ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Compilation.h"
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/ToolChain.h"
+
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Program.h"
+#include <sys/stat.h>
+#include <errno.h>
+
+using namespace clang::driver;
+using namespace clang;
+
+Compilation::Compilation(const Driver &D, const ToolChain &_DefaultToolChain,
+ InputArgList *_Args, DerivedArgList *_TranslatedArgs)
+ : TheDriver(D), DefaultToolChain(_DefaultToolChain), Args(_Args),
+ TranslatedArgs(_TranslatedArgs), Redirects(0) {
+}
+
+Compilation::~Compilation() {
+ delete TranslatedArgs;
+ delete Args;
+
+ // Free any derived arg lists.
+ for (llvm::DenseMap<std::pair<const ToolChain*, const char*>,
+ DerivedArgList*>::iterator it = TCArgs.begin(),
+ ie = TCArgs.end(); it != ie; ++it)
+ if (it->second != TranslatedArgs)
+ delete it->second;
+
+ // Free the actions, if built.
+ for (ActionList::iterator it = Actions.begin(), ie = Actions.end();
+ it != ie; ++it)
+ delete *it;
+
+ // Free redirections of stdout/stderr.
+ if (Redirects) {
+ delete Redirects[1];
+ delete Redirects[2];
+ delete [] Redirects;
+ }
+}
+
+const DerivedArgList &Compilation::getArgsForToolChain(const ToolChain *TC,
+ const char *BoundArch) {
+ if (!TC)
+ TC = &DefaultToolChain;
+
+ DerivedArgList *&Entry = TCArgs[std::make_pair(TC, BoundArch)];
+ if (!Entry) {
+ Entry = TC->TranslateArgs(*TranslatedArgs, BoundArch);
+ if (!Entry)
+ Entry = TranslatedArgs;
+ }
+
+ return *Entry;
+}
+
+void Compilation::PrintJob(raw_ostream &OS, const Job &J,
+ const char *Terminator, bool Quote) const {
+ if (const Command *C = dyn_cast<Command>(&J)) {
+ OS << " \"" << C->getExecutable() << '"';
+ for (ArgStringList::const_iterator it = C->getArguments().begin(),
+ ie = C->getArguments().end(); it != ie; ++it) {
+ OS << ' ';
+ if (!Quote && !std::strpbrk(*it, " \"\\$")) {
+ OS << *it;
+ continue;
+ }
+
+ // Quote the argument and escape shell special characters; this isn't
+ // really complete but is good enough.
+ OS << '"';
+ for (const char *s = *it; *s; ++s) {
+ if (*s == '"' || *s == '\\' || *s == '$')
+ OS << '\\';
+ OS << *s;
+ }
+ OS << '"';
+ }
+ OS << Terminator;
+ } else {
+ const JobList *Jobs = cast<JobList>(&J);
+ for (JobList::const_iterator
+ it = Jobs->begin(), ie = Jobs->end(); it != ie; ++it)
+ PrintJob(OS, **it, Terminator, Quote);
+ }
+}
+
+bool Compilation::CleanupFileList(const ArgStringList &Files,
+ bool IssueErrors) const {
+ bool Success = true;
+
+ for (ArgStringList::const_iterator
+ it = Files.begin(), ie = Files.end(); it != ie; ++it) {
+
+ llvm::sys::Path P(*it);
+ std::string Error;
+
+ // Don't try to remove files which we don't have write access to (but may be
+ // able to remove). Underlying tools may have intentionally not overwritten
+ // them.
+ if (!P.canWrite())
+ continue;
+
+ if (P.eraseFromDisk(false, &Error)) {
+ // Failure is only failure if the file exists and is "regular". There is
+ // a race condition here due to the limited interface of
+ // llvm::sys::Path, we want to know if the removal gave ENOENT.
+
+ // FIXME: Grumble, P.exists() is broken. PR3837.
+ struct stat buf;
+ if (::stat(P.c_str(), &buf) == 0 ? (buf.st_mode & S_IFMT) == S_IFREG :
+ (errno != ENOENT)) {
+ if (IssueErrors)
+ getDriver().Diag(clang::diag::err_drv_unable_to_remove_file)
+ << Error;
+ Success = false;
+ }
+ }
+ }
+
+ return Success;
+}
+
+int Compilation::ExecuteCommand(const Command &C,
+ const Command *&FailingCommand) const {
+ llvm::sys::Path Prog(C.getExecutable());
+ const char **Argv = new const char*[C.getArguments().size() + 2];
+ Argv[0] = C.getExecutable();
+ std::copy(C.getArguments().begin(), C.getArguments().end(), Argv+1);
+ Argv[C.getArguments().size() + 1] = 0;
+
+ if ((getDriver().CCCEcho || getDriver().CCPrintOptions ||
+ getArgs().hasArg(options::OPT_v)) && !getDriver().CCGenDiagnostics) {
+ raw_ostream *OS = &llvm::errs();
+
+ // Follow gcc implementation of CC_PRINT_OPTIONS; we could also cache the
+ // output stream.
+ if (getDriver().CCPrintOptions && getDriver().CCPrintOptionsFilename) {
+ std::string Error;
+ OS = new llvm::raw_fd_ostream(getDriver().CCPrintOptionsFilename,
+ Error,
+ llvm::raw_fd_ostream::F_Append);
+ if (!Error.empty()) {
+ getDriver().Diag(clang::diag::err_drv_cc_print_options_failure)
+ << Error;
+ FailingCommand = &C;
+ delete OS;
+ return 1;
+ }
+ }
+
+ if (getDriver().CCPrintOptions)
+ *OS << "[Logging clang options]";
+
+ PrintJob(*OS, C, "\n", /*Quote=*/getDriver().CCPrintOptions);
+
+ if (OS != &llvm::errs())
+ delete OS;
+ }
+
+ std::string Error;
+ int Res =
+ llvm::sys::Program::ExecuteAndWait(Prog, Argv,
+ /*env*/0, Redirects,
+ /*secondsToWait*/0, /*memoryLimit*/0,
+ &Error);
+ if (!Error.empty()) {
+ assert(Res && "Error string set with 0 result code!");
+ getDriver().Diag(clang::diag::err_drv_command_failure) << Error;
+ }
+
+ if (Res)
+ FailingCommand = &C;
+
+ delete[] Argv;
+ return Res;
+}
+
+int Compilation::ExecuteJob(const Job &J,
+ const Command *&FailingCommand) const {
+ if (const Command *C = dyn_cast<Command>(&J)) {
+ return ExecuteCommand(*C, FailingCommand);
+ } else {
+ const JobList *Jobs = cast<JobList>(&J);
+ for (JobList::const_iterator
+ it = Jobs->begin(), ie = Jobs->end(); it != ie; ++it)
+ if (int Res = ExecuteJob(**it, FailingCommand))
+ return Res;
+ return 0;
+ }
+}
+
+void Compilation::initCompilationForDiagnostics(void) {
+ // Free actions and jobs.
+ DeleteContainerPointers(Actions);
+ Jobs.clear();
+
+ // Clear temporary/results file lists.
+ TempFiles.clear();
+ ResultFiles.clear();
+
+ // Remove any user specified output. Claim any unclaimed arguments, so as
+ // to avoid emitting warnings about unused args.
+ OptSpecifier OutputOpts[] = { options::OPT_o, options::OPT_MD,
+ options::OPT_MMD };
+ for (unsigned i = 0; i != sizeof(OutputOpts)/sizeof(OutputOpts[0]); ++i) {
+ if (TranslatedArgs->hasArg(OutputOpts[i]))
+ TranslatedArgs->eraseArg(OutputOpts[i]);
+ }
+ TranslatedArgs->ClaimAllArgs();
+
+ // Redirect stdout/stderr to /dev/null.
+ Redirects = new const llvm::sys::Path*[3]();
+ Redirects[1] = new const llvm::sys::Path();
+ Redirects[2] = new const llvm::sys::Path();
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Driver.cpp b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
new file mode 100644
index 0000000..40e0c00
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Driver.cpp
@@ -0,0 +1,1791 @@
+//===--- Driver.cpp - Clang GCC Compatible Driver -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Driver.h"
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/ToolChain.h"
+
+#include "clang/Basic/Version.h"
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+
+#include "InputInfo.h"
+#include "ToolChains.h"
+
+#include <map>
+
+#include "clang/Config/config.h"
+
+using namespace clang::driver;
+using namespace clang;
+
+Driver::Driver(StringRef ClangExecutable,
+ StringRef DefaultTargetTriple,
+ StringRef DefaultImageName,
+ bool IsProduction,
+ DiagnosticsEngine &Diags)
+ : Opts(createDriverOptTable()), Diags(Diags),
+ ClangExecutable(ClangExecutable), UseStdLib(true),
+ DefaultTargetTriple(DefaultTargetTriple),
+ DefaultImageName(DefaultImageName),
+ DriverTitle("clang \"gcc-compatible\" driver"),
+ CCPrintOptionsFilename(0), CCPrintHeadersFilename(0),
+ CCLogDiagnosticsFilename(0), CCCIsCXX(false),
+ CCCIsCPP(false),CCCEcho(false), CCCPrintBindings(false),
+ CCPrintOptions(false), CCPrintHeaders(false), CCLogDiagnostics(false),
+ CCGenDiagnostics(false), CCCGenericGCCName(""), CheckInputsExist(true),
+ CCCUseClang(true), CCCUseClangCXX(true), CCCUseClangCPP(true),
+ CCCUsePCH(true), SuppressMissingInputWarning(false) {
+ if (IsProduction) {
+ // In a "production" build, only use clang on architectures we expect to
+ // work.
+ //
+ // During development its more convenient to always have the driver use
+ // clang, but we don't want users to be confused when things don't work, or
+ // to file bugs for things we don't support.
+ CCCClangArchs.insert(llvm::Triple::x86);
+ CCCClangArchs.insert(llvm::Triple::x86_64);
+ CCCClangArchs.insert(llvm::Triple::arm);
+ }
+
+ Name = llvm::sys::path::stem(ClangExecutable);
+ Dir = llvm::sys::path::parent_path(ClangExecutable);
+
+ // Compute the path to the resource directory.
+ StringRef ClangResourceDir(CLANG_RESOURCE_DIR);
+ SmallString<128> P(Dir);
+ if (ClangResourceDir != "")
+ llvm::sys::path::append(P, ClangResourceDir);
+ else
+ llvm::sys::path::append(P, "..", "lib", "clang", CLANG_VERSION_STRING);
+ ResourceDir = P.str();
+}
+
+Driver::~Driver() {
+ delete Opts;
+
+ for (llvm::StringMap<ToolChain *>::iterator I = ToolChains.begin(),
+ E = ToolChains.end();
+ I != E; ++I)
+ delete I->second;
+}
+
+InputArgList *Driver::ParseArgStrings(ArrayRef<const char *> ArgList) {
+ llvm::PrettyStackTraceString CrashInfo("Command line argument parsing");
+ unsigned MissingArgIndex, MissingArgCount;
+ InputArgList *Args = getOpts().ParseArgs(ArgList.begin(), ArgList.end(),
+ MissingArgIndex, MissingArgCount);
+
+ // Check for missing argument error.
+ if (MissingArgCount)
+ Diag(clang::diag::err_drv_missing_argument)
+ << Args->getArgString(MissingArgIndex) << MissingArgCount;
+
+ // Check for unsupported options.
+ for (ArgList::const_iterator it = Args->begin(), ie = Args->end();
+ it != ie; ++it) {
+ Arg *A = *it;
+ if (A->getOption().isUnsupported()) {
+ Diag(clang::diag::err_drv_unsupported_opt) << A->getAsString(*Args);
+ continue;
+ }
+
+ // Warn about -mcpu= without an argument.
+ if (A->getOption().matches(options::OPT_mcpu_EQ) &&
+ A->containsValue("")) {
+ Diag(clang::diag::warn_drv_empty_joined_argument) << A->getAsString(*Args);
+ }
+ }
+
+ return Args;
+}
+
+// Determine which compilation mode we are in. We look for options which
+// affect the phase, starting with the earliest phases, and record which
+// option we used to determine the final phase.
+phases::ID Driver::getFinalPhase(const DerivedArgList &DAL, Arg **FinalPhaseArg)
+const {
+ Arg *PhaseArg = 0;
+ phases::ID FinalPhase;
+
+ // -{E,M,MM} only run the preprocessor.
+ if (CCCIsCPP ||
+ (PhaseArg = DAL.getLastArg(options::OPT_E)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_M, options::OPT_MM))) {
+ FinalPhase = phases::Preprocess;
+
+ // -{fsyntax-only,-analyze,emit-ast,S} only run up to the compiler.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_rewrite_objc)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT__analyze,
+ options::OPT__analyze_auto)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_emit_ast)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_S))) {
+ FinalPhase = phases::Compile;
+
+ // -c only runs up to the assembler.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT_c))) {
+ FinalPhase = phases::Assemble;
+
+ // Otherwise do everything.
+ } else
+ FinalPhase = phases::Link;
+
+ if (FinalPhaseArg)
+ *FinalPhaseArg = PhaseArg;
+
+ return FinalPhase;
+}
+
+DerivedArgList *Driver::TranslateInputArgs(const InputArgList &Args) const {
+ DerivedArgList *DAL = new DerivedArgList(Args);
+
+ bool HasNostdlib = Args.hasArg(options::OPT_nostdlib);
+ for (ArgList::const_iterator it = Args.begin(),
+ ie = Args.end(); it != ie; ++it) {
+ const Arg *A = *it;
+
+ // Unfortunately, we have to parse some forwarding options (-Xassembler,
+ // -Xlinker, -Xpreprocessor) because we either integrate their functionality
+ // (assembler and preprocessor), or bypass a previous driver ('collect2').
+
+ // Rewrite linker options, to replace --no-demangle with a custom internal
+ // option.
+ if ((A->getOption().matches(options::OPT_Wl_COMMA) ||
+ A->getOption().matches(options::OPT_Xlinker)) &&
+ A->containsValue("--no-demangle")) {
+ // Add the rewritten no-demangle argument.
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_Z_Xlinker__no_demangle));
+
+ // Add the remaining values as Xlinker arguments.
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
+ if (StringRef(A->getValue(Args, i)) != "--no-demangle")
+ DAL->AddSeparateArg(A, Opts->getOption(options::OPT_Xlinker),
+ A->getValue(Args, i));
+
+ continue;
+ }
+
+ // Rewrite preprocessor options, to replace -Wp,-MD,FOO which is used by
+ // some build systems. We don't try to be complete here because we don't
+ // care to encourage this usage model.
+ if (A->getOption().matches(options::OPT_Wp_COMMA) &&
+ A->getNumValues() == 2 &&
+ (A->getValue(Args, 0) == StringRef("-MD") ||
+ A->getValue(Args, 0) == StringRef("-MMD"))) {
+ // Rewrite to -MD/-MMD along with -MF.
+ if (A->getValue(Args, 0) == StringRef("-MD"))
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_MD));
+ else
+ DAL->AddFlagArg(A, Opts->getOption(options::OPT_MMD));
+ DAL->AddSeparateArg(A, Opts->getOption(options::OPT_MF),
+ A->getValue(Args, 1));
+ continue;
+ }
+
+ // Rewrite reserved library names.
+ if (A->getOption().matches(options::OPT_l)) {
+ StringRef Value = A->getValue(Args);
+
+ // Rewrite unless -nostdlib is present.
+ if (!HasNostdlib && Value == "stdc++") {
+ DAL->AddFlagArg(A, Opts->getOption(
+ options::OPT_Z_reserved_lib_stdcxx));
+ continue;
+ }
+
+ // Rewrite unconditionally.
+ if (Value == "cc_kext") {
+ DAL->AddFlagArg(A, Opts->getOption(
+ options::OPT_Z_reserved_lib_cckext));
+ continue;
+ }
+ }
+
+ DAL->append(*it);
+ }
+
+ // Add a default value of -mlinker-version=, if one was given and the user
+ // didn't specify one.
+#if defined(HOST_LINK_VERSION)
+ if (!Args.hasArg(options::OPT_mlinker_version_EQ)) {
+ DAL->AddJoinedArg(0, Opts->getOption(options::OPT_mlinker_version_EQ),
+ HOST_LINK_VERSION);
+ DAL->getLastArg(options::OPT_mlinker_version_EQ)->claim();
+ }
+#endif
+
+ return DAL;
+}
+
+Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
+ llvm::PrettyStackTraceString CrashInfo("Compilation construction");
+
+ // FIXME: Handle environment options which affect driver behavior, somewhere
+ // (client?). GCC_EXEC_PREFIX, LPATH, CC_PRINT_OPTIONS.
+
+ if (char *env = ::getenv("COMPILER_PATH")) {
+ StringRef CompilerPath = env;
+ while (!CompilerPath.empty()) {
+ std::pair<StringRef, StringRef> Split = CompilerPath.split(':');
+ PrefixDirs.push_back(Split.first);
+ CompilerPath = Split.second;
+ }
+ }
+
+ // FIXME: What are we going to do with -V and -b?
+
+ // FIXME: This stuff needs to go into the Compilation, not the driver.
+ bool CCCPrintOptions = false, CCCPrintActions = false;
+
+ InputArgList *Args = ParseArgStrings(ArgList.slice(1));
+
+ // -no-canonical-prefixes is used very early in main.
+ Args->ClaimAllArgs(options::OPT_no_canonical_prefixes);
+
+ // Ignore -pipe.
+ Args->ClaimAllArgs(options::OPT_pipe);
+
+ // Extract -ccc args.
+ //
+ // FIXME: We need to figure out where this behavior should live. Most of it
+ // should be outside in the client; the parts that aren't should have proper
+ // options, either by introducing new ones or by overloading gcc ones like -V
+ // or -b.
+ CCCPrintOptions = Args->hasArg(options::OPT_ccc_print_options);
+ CCCPrintActions = Args->hasArg(options::OPT_ccc_print_phases);
+ CCCPrintBindings = Args->hasArg(options::OPT_ccc_print_bindings);
+ CCCIsCXX = Args->hasArg(options::OPT_ccc_cxx) || CCCIsCXX;
+ CCCEcho = Args->hasArg(options::OPT_ccc_echo);
+ if (const Arg *A = Args->getLastArg(options::OPT_ccc_gcc_name))
+ CCCGenericGCCName = A->getValue(*Args);
+ CCCUseClangCXX = Args->hasFlag(options::OPT_ccc_clang_cxx,
+ options::OPT_ccc_no_clang_cxx,
+ CCCUseClangCXX);
+ CCCUsePCH = Args->hasFlag(options::OPT_ccc_pch_is_pch,
+ options::OPT_ccc_pch_is_pth);
+ CCCUseClang = !Args->hasArg(options::OPT_ccc_no_clang);
+ CCCUseClangCPP = !Args->hasArg(options::OPT_ccc_no_clang_cpp);
+ if (const Arg *A = Args->getLastArg(options::OPT_ccc_clang_archs)) {
+ StringRef Cur = A->getValue(*Args);
+
+ CCCClangArchs.clear();
+ while (!Cur.empty()) {
+ std::pair<StringRef, StringRef> Split = Cur.split(',');
+
+ if (!Split.first.empty()) {
+ llvm::Triple::ArchType Arch =
+ llvm::Triple(Split.first, "", "").getArch();
+
+ if (Arch == llvm::Triple::UnknownArch)
+ Diag(clang::diag::err_drv_invalid_arch_name) << Split.first;
+
+ CCCClangArchs.insert(Arch);
+ }
+
+ Cur = Split.second;
+ }
+ }
+ // FIXME: DefaultTargetTriple is used by the target-prefixed calls to as/ld
+ // and getToolChain is const.
+ if (const Arg *A = Args->getLastArg(options::OPT_target))
+ DefaultTargetTriple = A->getValue(*Args);
+ if (const Arg *A = Args->getLastArg(options::OPT_ccc_install_dir))
+ Dir = InstalledDir = A->getValue(*Args);
+ for (arg_iterator it = Args->filtered_begin(options::OPT_B),
+ ie = Args->filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
+ PrefixDirs.push_back(A->getValue(*Args, 0));
+ }
+ if (const Arg *A = Args->getLastArg(options::OPT__sysroot_EQ))
+ SysRoot = A->getValue(*Args);
+ if (Args->hasArg(options::OPT_nostdlib))
+ UseStdLib = false;
+
+ // Perform the default argument translations.
+ DerivedArgList *TranslatedArgs = TranslateInputArgs(*Args);
+
+ // Owned by the host.
+ const ToolChain &TC = getToolChain(*Args);
+
+ // The compilation takes ownership of Args.
+ Compilation *C = new Compilation(*this, TC, Args, TranslatedArgs);
+
+ // FIXME: This behavior shouldn't be here.
+ if (CCCPrintOptions) {
+ PrintOptions(C->getInputArgs());
+ return C;
+ }
+
+ if (!HandleImmediateArgs(*C))
+ return C;
+
+ // Construct the list of inputs.
+ InputList Inputs;
+ BuildInputs(C->getDefaultToolChain(), C->getArgs(), Inputs);
+
+ // Construct the list of abstract actions to perform for this compilation. On
+ // Darwin target OSes this uses the driver-driver and universal actions.
+ if (TC.getTriple().isOSDarwin())
+ BuildUniversalActions(C->getDefaultToolChain(), C->getArgs(),
+ Inputs, C->getActions());
+ else
+ BuildActions(C->getDefaultToolChain(), C->getArgs(), Inputs,
+ C->getActions());
+
+ if (CCCPrintActions) {
+ PrintActions(*C);
+ return C;
+ }
+
+ BuildJobs(*C);
+
+ return C;
+}
+
+// When clang crashes, produce diagnostic information including the fully
+// preprocessed source file(s). Request that the developer attach the
+// diagnostic information to a bug report.
+void Driver::generateCompilationDiagnostics(Compilation &C,
+ const Command *FailingCommand) {
+ if (C.getArgs().hasArg(options::OPT_fno_crash_diagnostics))
+ return;
+
+ // Don't try to generate diagnostics for link jobs.
+ if (FailingCommand->getCreator().isLinkJob())
+ return;
+
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Please submit a bug report to " BUG_REPORT_URL " and include command"
+ " line arguments and all diagnostic information.";
+
+ // Suppress driver output and emit preprocessor output to temp file.
+ CCCIsCPP = true;
+ CCGenDiagnostics = true;
+
+ // Save the original job command(s).
+ std::string Cmd;
+ llvm::raw_string_ostream OS(Cmd);
+ C.PrintJob(OS, C.getJobs(), "\n", false);
+ OS.flush();
+
+ // Clear stale state and suppress tool output.
+ C.initCompilationForDiagnostics();
+ Diags.Reset();
+
+ // Construct the list of inputs.
+ InputList Inputs;
+ BuildInputs(C.getDefaultToolChain(), C.getArgs(), Inputs);
+
+ for (InputList::iterator it = Inputs.begin(), ie = Inputs.end(); it != ie;) {
+ bool IgnoreInput = false;
+
+ // Ignore input from stdin or any inputs that cannot be preprocessed.
+ if (!strcmp(it->second->getValue(C.getArgs()), "-")) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s) - ignoring input from stdin"
+ ".";
+ IgnoreInput = true;
+ } else if (types::getPreprocessedType(it->first) == types::TY_INVALID) {
+ IgnoreInput = true;
+ }
+
+ if (IgnoreInput) {
+ it = Inputs.erase(it);
+ ie = Inputs.end();
+ } else {
+ ++it;
+ }
+ }
+
+ // Don't attempt to generate preprocessed files if multiple -arch options are
+ // used, unless they're all duplicates.
+ llvm::StringSet<> ArchNames;
+ for (ArgList::const_iterator it = C.getArgs().begin(), ie = C.getArgs().end();
+ it != ie; ++it) {
+ Arg *A = *it;
+ if (A->getOption().matches(options::OPT_arch)) {
+ StringRef ArchName = A->getValue(C.getArgs());
+ ArchNames.insert(ArchName);
+ }
+ }
+ if (ArchNames.size() > 1) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s) - cannot generate "
+ "preprocessed source with multiple -arch options.";
+ return;
+ }
+
+ if (Inputs.empty()) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s) - no preprocessable inputs.";
+ return;
+ }
+
+ // Construct the list of abstract actions to perform for this compilation. On
+ // Darwin OSes this uses the driver-driver and builds universal actions.
+ const ToolChain &TC = C.getDefaultToolChain();
+ if (TC.getTriple().isOSDarwin())
+ BuildUniversalActions(TC, C.getArgs(), Inputs, C.getActions());
+ else
+ BuildActions(TC, C.getArgs(), Inputs, C.getActions());
+
+ BuildJobs(C);
+
+ // If there were errors building the compilation, quit now.
+ if (Diags.hasErrorOccurred()) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s).";
+ return;
+ }
+
+ // Generate preprocessed output.
+ FailingCommand = 0;
+ int Res = C.ExecuteJob(C.getJobs(), FailingCommand);
+
+ // If the command succeeded, we are done.
+ if (Res == 0) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Preprocessed source(s) and associated run script(s) are located at:";
+ ArgStringList Files = C.getTempFiles();
+ for (ArgStringList::const_iterator it = Files.begin(), ie = Files.end();
+ it != ie; ++it) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << *it;
+
+ std::string Err;
+ std::string Script = StringRef(*it).rsplit('.').first;
+ Script += ".sh";
+ llvm::raw_fd_ostream ScriptOS(Script.c_str(), Err,
+ llvm::raw_fd_ostream::F_Excl |
+ llvm::raw_fd_ostream::F_Binary);
+ if (!Err.empty()) {
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating run script: " + Script + " " + Err;
+ } else {
+ ScriptOS << Cmd;
+ Diag(clang::diag::note_drv_command_failed_diag_msg) << Script;
+ }
+ }
+ } else {
+ // Failure, remove preprocessed files.
+ if (!C.getArgs().hasArg(options::OPT_save_temps))
+ C.CleanupFileList(C.getTempFiles(), true);
+
+ Diag(clang::diag::note_drv_command_failed_diag_msg)
+ << "Error generating preprocessed source(s).";
+ }
+}
+
+int Driver::ExecuteCompilation(const Compilation &C,
+ const Command *&FailingCommand) const {
+ // Just print if -### was present.
+ if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
+ C.PrintJob(llvm::errs(), C.getJobs(), "\n", true);
+ return 0;
+ }
+
+ // If there were errors building the compilation, quit now.
+ if (Diags.hasErrorOccurred())
+ return 1;
+
+ int Res = C.ExecuteJob(C.getJobs(), FailingCommand);
+
+ // Remove temp files.
+ C.CleanupFileList(C.getTempFiles());
+
+ // If the command succeeded, we are done.
+ if (Res == 0)
+ return Res;
+
+ // Otherwise, remove result files as well.
+ if (!C.getArgs().hasArg(options::OPT_save_temps)) {
+ C.CleanupFileList(C.getResultFiles(), true);
+
+ // Failure result files are valid unless we crashed.
+ if (Res < 0) {
+ C.CleanupFileList(C.getFailureResultFiles(), true);
+#ifdef _WIN32
+ // Exit status should not be negative on Win32,
+ // unless abnormal termination.
+ Res = 1;
+#endif
+ }
+ }
+
+ // Print extra information about abnormal failures, if possible.
+ //
+ // This is ad-hoc, but we don't want to be excessively noisy. If the result
+ // status was 1, assume the command failed normally. In particular, if it was
+ // the compiler then assume it gave a reasonable error code. Failures in other
+ // tools are less common, and they generally have worse diagnostics, so always
+ // print the diagnostic there.
+ const Tool &FailingTool = FailingCommand->getCreator();
+
+ if (!FailingCommand->getCreator().hasGoodDiagnostics() || Res != 1) {
+ // FIXME: See FIXME above regarding result code interpretation.
+ if (Res < 0)
+ Diag(clang::diag::err_drv_command_signalled)
+ << FailingTool.getShortName();
+ else
+ Diag(clang::diag::err_drv_command_failed)
+ << FailingTool.getShortName() << Res;
+ }
+
+ return Res;
+}
+
+void Driver::PrintOptions(const ArgList &Args) const {
+ unsigned i = 0;
+ for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
+ it != ie; ++it, ++i) {
+ Arg *A = *it;
+ llvm::errs() << "Option " << i << " - "
+ << "Name: \"" << A->getOption().getName() << "\", "
+ << "Values: {";
+ for (unsigned j = 0; j < A->getNumValues(); ++j) {
+ if (j)
+ llvm::errs() << ", ";
+ llvm::errs() << '"' << A->getValue(Args, j) << '"';
+ }
+ llvm::errs() << "}\n";
+ }
+}
+
+void Driver::PrintHelp(bool ShowHidden) const {
+ getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
+ ShowHidden);
+}
+
+void Driver::PrintVersion(const Compilation &C, raw_ostream &OS) const {
+ // FIXME: The following handlers should use a callback mechanism, we don't
+ // know what the client would like to do.
+ OS << getClangFullVersion() << '\n';
+ const ToolChain &TC = C.getDefaultToolChain();
+ OS << "Target: " << TC.getTripleString() << '\n';
+
+ // Print the threading model.
+ //
+ // FIXME: Implement correctly.
+ OS << "Thread model: " << "posix" << '\n';
+}
+
+/// PrintDiagnosticCategories - Implement the --print-diagnostic-categories
+/// option.
+static void PrintDiagnosticCategories(raw_ostream &OS) {
+ // Skip the empty category.
+ for (unsigned i = 1, max = DiagnosticIDs::getNumberOfCategories();
+ i != max; ++i)
+ OS << i << ',' << DiagnosticIDs::getCategoryNameFromID(i) << '\n';
+}
+
+bool Driver::HandleImmediateArgs(const Compilation &C) {
+ // The order these options are handled in gcc is all over the place, but we
+ // don't expect inconsistencies w.r.t. that to matter in practice.
+
+ if (C.getArgs().hasArg(options::OPT_dumpmachine)) {
+ llvm::outs() << C.getDefaultToolChain().getTripleString() << '\n';
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_dumpversion)) {
+ // Since -dumpversion is only implemented for pedantic GCC compatibility, we
+ // return an answer which matches our definition of __VERSION__.
+ //
+ // If we want to return a more correct answer some day, then we should
+ // introduce a non-pedantically GCC compatible mode to Clang in which we
+ // provide sensible definitions for -dumpversion, __VERSION__, etc.
+ llvm::outs() << "4.2.1\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT__print_diagnostic_categories)) {
+ PrintDiagnosticCategories(llvm::outs());
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT__help) ||
+ C.getArgs().hasArg(options::OPT__help_hidden)) {
+ PrintHelp(C.getArgs().hasArg(options::OPT__help_hidden));
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT__version)) {
+ // Follow gcc behavior and use stdout for --version and stderr for -v.
+ PrintVersion(C, llvm::outs());
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_v) ||
+ C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
+ PrintVersion(C, llvm::errs());
+ SuppressMissingInputWarning = true;
+ }
+
+ const ToolChain &TC = C.getDefaultToolChain();
+ if (C.getArgs().hasArg(options::OPT_print_search_dirs)) {
+ llvm::outs() << "programs: =";
+ for (ToolChain::path_list::const_iterator it = TC.getProgramPaths().begin(),
+ ie = TC.getProgramPaths().end(); it != ie; ++it) {
+ if (it != TC.getProgramPaths().begin())
+ llvm::outs() << ':';
+ llvm::outs() << *it;
+ }
+ llvm::outs() << "\n";
+ llvm::outs() << "libraries: =" << ResourceDir;
+
+ std::string sysroot;
+ if (Arg *A = C.getArgs().getLastArg(options::OPT__sysroot_EQ))
+ sysroot = A->getValue(C.getArgs());
+
+ for (ToolChain::path_list::const_iterator it = TC.getFilePaths().begin(),
+ ie = TC.getFilePaths().end(); it != ie; ++it) {
+ llvm::outs() << ':';
+ const char *path = it->c_str();
+ if (path[0] == '=')
+ llvm::outs() << sysroot << path + 1;
+ else
+ llvm::outs() << path;
+ }
+ llvm::outs() << "\n";
+ return false;
+ }
+
+ // FIXME: The following handlers should use a callback mechanism, we don't
+ // know what the client would like to do.
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
+ llvm::outs() << GetFilePath(A->getValue(C.getArgs()), TC) << "\n";
+ return false;
+ }
+
+ if (Arg *A = C.getArgs().getLastArg(options::OPT_print_prog_name_EQ)) {
+ llvm::outs() << GetProgramPath(A->getValue(C.getArgs()), TC) << "\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_print_libgcc_file_name)) {
+ llvm::outs() << GetFilePath("libgcc.a", TC) << "\n";
+ return false;
+ }
+
+ if (C.getArgs().hasArg(options::OPT_print_multi_lib)) {
+ // FIXME: We need tool chain support for this.
+ llvm::outs() << ".;\n";
+
+ switch (C.getDefaultToolChain().getTriple().getArch()) {
+ default:
+ break;
+
+ case llvm::Triple::x86_64:
+ llvm::outs() << "x86_64;@m64" << "\n";
+ break;
+
+ case llvm::Triple::ppc64:
+ llvm::outs() << "ppc64;@m64" << "\n";
+ break;
+ }
+ return false;
+ }
+
+ // FIXME: What is the difference between print-multi-directory and
+ // print-multi-os-directory?
+ if (C.getArgs().hasArg(options::OPT_print_multi_directory) ||
+ C.getArgs().hasArg(options::OPT_print_multi_os_directory)) {
+ switch (C.getDefaultToolChain().getTriple().getArch()) {
+ default:
+ case llvm::Triple::x86:
+ case llvm::Triple::ppc:
+ llvm::outs() << "." << "\n";
+ break;
+
+ case llvm::Triple::x86_64:
+ llvm::outs() << "." << "\n";
+ break;
+
+ case llvm::Triple::ppc64:
+ llvm::outs() << "ppc64" << "\n";
+ break;
+ }
+ return false;
+ }
+
+ return true;
+}
+
+static unsigned PrintActions1(const Compilation &C, Action *A,
+ std::map<Action*, unsigned> &Ids) {
+ if (Ids.count(A))
+ return Ids[A];
+
+ std::string str;
+ llvm::raw_string_ostream os(str);
+
+ os << Action::getClassName(A->getKind()) << ", ";
+ if (InputAction *IA = dyn_cast<InputAction>(A)) {
+ os << "\"" << IA->getInputArg().getValue(C.getArgs()) << "\"";
+ } else if (BindArchAction *BIA = dyn_cast<BindArchAction>(A)) {
+ os << '"' << (BIA->getArchName() ? BIA->getArchName() :
+ C.getDefaultToolChain().getArchName()) << '"'
+ << ", {" << PrintActions1(C, *BIA->begin(), Ids) << "}";
+ } else {
+ os << "{";
+ for (Action::iterator it = A->begin(), ie = A->end(); it != ie;) {
+ os << PrintActions1(C, *it, Ids);
+ ++it;
+ if (it != ie)
+ os << ", ";
+ }
+ os << "}";
+ }
+
+ unsigned Id = Ids.size();
+ Ids[A] = Id;
+ llvm::errs() << Id << ": " << os.str() << ", "
+ << types::getTypeName(A->getType()) << "\n";
+
+ return Id;
+}
+
+void Driver::PrintActions(const Compilation &C) const {
+ std::map<Action*, unsigned> Ids;
+ for (ActionList::const_iterator it = C.getActions().begin(),
+ ie = C.getActions().end(); it != ie; ++it)
+ PrintActions1(C, *it, Ids);
+}
+
+/// \brief Check whether the given input tree contains any compilation or
+/// assembly actions.
+static bool ContainsCompileOrAssembleAction(const Action *A) {
+ if (isa<CompileJobAction>(A) || isa<AssembleJobAction>(A))
+ return true;
+
+ for (Action::const_iterator it = A->begin(), ie = A->end(); it != ie; ++it)
+ if (ContainsCompileOrAssembleAction(*it))
+ return true;
+
+ return false;
+}
+
+void Driver::BuildUniversalActions(const ToolChain &TC,
+ const DerivedArgList &Args,
+ const InputList &BAInputs,
+ ActionList &Actions) const {
+ llvm::PrettyStackTraceString CrashInfo("Building universal build actions");
+ // Collect the list of architectures. Duplicates are allowed, but should only
+ // be handled once (in the order seen).
+ llvm::StringSet<> ArchNames;
+ SmallVector<const char *, 4> Archs;
+ for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
+ it != ie; ++it) {
+ Arg *A = *it;
+
+ if (A->getOption().matches(options::OPT_arch)) {
+ // Validate the option here; we don't save the type here because its
+ // particular spelling may participate in other driver choices.
+ llvm::Triple::ArchType Arch =
+ llvm::Triple::getArchTypeForDarwinArchName(A->getValue(Args));
+ if (Arch == llvm::Triple::UnknownArch) {
+ Diag(clang::diag::err_drv_invalid_arch_name)
+ << A->getAsString(Args);
+ continue;
+ }
+
+ A->claim();
+ if (ArchNames.insert(A->getValue(Args)))
+ Archs.push_back(A->getValue(Args));
+ }
+ }
+
+ // When there is no explicit arch for this platform, make sure we still bind
+ // the architecture (to the default) so that -Xarch_ is handled correctly.
+ if (!Archs.size())
+ Archs.push_back(0);
+
+ // FIXME: We killed off some others but these aren't yet detected in a
+ // functional manner. If we added information to jobs about which "auxiliary"
+ // files they wrote then we could detect the conflict these cause downstream.
+ if (Archs.size() > 1) {
+ // No recovery needed, the point of this is just to prevent
+ // overwriting the same files.
+ if (const Arg *A = Args.getLastArg(options::OPT_save_temps))
+ Diag(clang::diag::err_drv_invalid_opt_with_multiple_archs)
+ << A->getAsString(Args);
+ }
+
+ ActionList SingleActions;
+ BuildActions(TC, Args, BAInputs, SingleActions);
+
+ // Add in arch bindings for every top level action, as well as lipo and
+ // dsymutil steps if needed.
+ for (unsigned i = 0, e = SingleActions.size(); i != e; ++i) {
+ Action *Act = SingleActions[i];
+
+ // Make sure we can lipo this kind of output. If not (and it is an actual
+ // output) then we disallow, since we can't create an output file with the
+ // right name without overwriting it. We could remove this oddity by just
+ // changing the output names to include the arch, which would also fix
+ // -save-temps. Compatibility wins for now.
+
+ if (Archs.size() > 1 && !types::canLipoType(Act->getType()))
+ Diag(clang::diag::err_drv_invalid_output_with_multiple_archs)
+ << types::getTypeName(Act->getType());
+
+ ActionList Inputs;
+ for (unsigned i = 0, e = Archs.size(); i != e; ++i) {
+ Inputs.push_back(new BindArchAction(Act, Archs[i]));
+ if (i != 0)
+ Inputs.back()->setOwnsInputs(false);
+ }
+
+ // Lipo if necessary, we do it this way because we need to set the arch flag
+ // so that -Xarch_ gets overwritten.
+ if (Inputs.size() == 1 || Act->getType() == types::TY_Nothing)
+ Actions.append(Inputs.begin(), Inputs.end());
+ else
+ Actions.push_back(new LipoJobAction(Inputs, Act->getType()));
+
+ // Handle debug info queries.
+ Arg *A = Args.getLastArg(options::OPT_g_Group);
+ if (A && !A->getOption().matches(options::OPT_g0) &&
+ !A->getOption().matches(options::OPT_gstabs) &&
+ ContainsCompileOrAssembleAction(Actions.back())) {
+
+ // Add a 'dsymutil' step if necessary, when debug info is enabled and we
+ // have a compile input. We need to run 'dsymutil' ourselves in such cases
+ // because the debug info will refer to a temporary object file which is
+ // will be removed at the end of the compilation process.
+ if (Act->getType() == types::TY_Image) {
+ ActionList Inputs;
+ Inputs.push_back(Actions.back());
+ Actions.pop_back();
+ Actions.push_back(new DsymutilJobAction(Inputs, types::TY_dSYM));
+ }
+
+ // Verify the output (debug information only) if we passed '-verify'.
+ if (Args.hasArg(options::OPT_verify)) {
+ ActionList VerifyInputs;
+ VerifyInputs.push_back(Actions.back());
+ Actions.pop_back();
+ Actions.push_back(new VerifyJobAction(VerifyInputs,
+ types::TY_Nothing));
+ }
+ }
+ }
+}
+
+// Construct a the list of inputs and their types.
+void Driver::BuildInputs(const ToolChain &TC, const DerivedArgList &Args,
+ InputList &Inputs) const {
+ // Track the current user specified (-x) input. We also explicitly track the
+ // argument used to set the type; we only want to claim the type when we
+ // actually use it, so we warn about unused -x arguments.
+ types::ID InputType = types::TY_Nothing;
+ Arg *InputTypeArg = 0;
+
+ for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
+ it != ie; ++it) {
+ Arg *A = *it;
+
+ if (isa<InputOption>(A->getOption())) {
+ const char *Value = A->getValue(Args);
+ types::ID Ty = types::TY_INVALID;
+
+ // Infer the input type if necessary.
+ if (InputType == types::TY_Nothing) {
+ // If there was an explicit arg for this, claim it.
+ if (InputTypeArg)
+ InputTypeArg->claim();
+
+ // stdin must be handled specially.
+ if (memcmp(Value, "-", 2) == 0) {
+ // If running with -E, treat as a C input (this changes the builtin
+ // macros, for example). This may be overridden by -ObjC below.
+ //
+ // Otherwise emit an error but still use a valid type to avoid
+ // spurious errors (e.g., no inputs).
+ if (!Args.hasArgNoClaim(options::OPT_E) && !CCCIsCPP)
+ Diag(clang::diag::err_drv_unknown_stdin_type);
+ Ty = types::TY_C;
+ } else {
+ // Otherwise lookup by extension.
+ // Fallback is C if invoked as C preprocessor or Object otherwise.
+ // We use a host hook here because Darwin at least has its own
+ // idea of what .s is.
+ if (const char *Ext = strrchr(Value, '.'))
+ Ty = TC.LookupTypeForExtension(Ext + 1);
+
+ if (Ty == types::TY_INVALID) {
+ if (CCCIsCPP)
+ Ty = types::TY_C;
+ else
+ Ty = types::TY_Object;
+ }
+
+ // If the driver is invoked as C++ compiler (like clang++ or c++) it
+ // should autodetect some input files as C++ for g++ compatibility.
+ if (CCCIsCXX) {
+ types::ID OldTy = Ty;
+ Ty = types::lookupCXXTypeForCType(Ty);
+
+ if (Ty != OldTy)
+ Diag(clang::diag::warn_drv_treating_input_as_cxx)
+ << getTypeName(OldTy) << getTypeName(Ty);
+ }
+ }
+
+ // -ObjC and -ObjC++ override the default language, but only for "source
+ // files". We just treat everything that isn't a linker input as a
+ // source file.
+ //
+ // FIXME: Clean this up if we move the phase sequence into the type.
+ if (Ty != types::TY_Object) {
+ if (Args.hasArg(options::OPT_ObjC))
+ Ty = types::TY_ObjC;
+ else if (Args.hasArg(options::OPT_ObjCXX))
+ Ty = types::TY_ObjCXX;
+ }
+ } else {
+ assert(InputTypeArg && "InputType set w/o InputTypeArg");
+ InputTypeArg->claim();
+ Ty = InputType;
+ }
+
+ // Check that the file exists, if enabled.
+ if (CheckInputsExist && memcmp(Value, "-", 2) != 0) {
+ SmallString<64> Path(Value);
+ if (Arg *WorkDir = Args.getLastArg(options::OPT_working_directory)) {
+ SmallString<64> Directory(WorkDir->getValue(Args));
+ if (llvm::sys::path::is_absolute(Directory.str())) {
+ llvm::sys::path::append(Directory, Value);
+ Path.assign(Directory);
+ }
+ }
+
+ bool exists = false;
+ if (llvm::sys::fs::exists(Path.c_str(), exists) || !exists)
+ Diag(clang::diag::err_drv_no_such_file) << Path.str();
+ else
+ Inputs.push_back(std::make_pair(Ty, A));
+ } else
+ Inputs.push_back(std::make_pair(Ty, A));
+
+ } else if (A->getOption().isLinkerInput()) {
+ // Just treat as object type, we could make a special type for this if
+ // necessary.
+ Inputs.push_back(std::make_pair(types::TY_Object, A));
+
+ } else if (A->getOption().matches(options::OPT_x)) {
+ InputTypeArg = A;
+ InputType = types::lookupTypeForTypeSpecifier(A->getValue(Args));
+ A->claim();
+
+ // Follow gcc behavior and treat as linker input for invalid -x
+ // options. Its not clear why we shouldn't just revert to unknown; but
+ // this isn't very important, we might as well be bug compatible.
+ if (!InputType) {
+ Diag(clang::diag::err_drv_unknown_language) << A->getValue(Args);
+ InputType = types::TY_Object;
+ }
+ }
+ }
+ if (CCCIsCPP && Inputs.empty()) {
+ // If called as standalone preprocessor, stdin is processed
+ // if no other input is present.
+ unsigned Index = Args.getBaseArgs().MakeIndex("-");
+ Arg *A = Opts->ParseOneArg(Args, Index);
+ A->claim();
+ Inputs.push_back(std::make_pair(types::TY_C, A));
+ }
+}
+
+void Driver::BuildActions(const ToolChain &TC, const DerivedArgList &Args,
+ const InputList &Inputs, ActionList &Actions) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation actions");
+
+ if (!SuppressMissingInputWarning && Inputs.empty()) {
+ Diag(clang::diag::err_drv_no_input_files);
+ return;
+ }
+
+ Arg *FinalPhaseArg;
+ phases::ID FinalPhase = getFinalPhase(Args, &FinalPhaseArg);
+
+ // Reject -Z* at the top level, these options should never have been exposed
+ // by gcc.
+ if (Arg *A = Args.getLastArg(options::OPT_Z_Joined))
+ Diag(clang::diag::err_drv_use_of_Z_option) << A->getAsString(Args);
+
+ // Construct the actions to perform.
+ ActionList LinkerInputs;
+ unsigned NumSteps = 0;
+ for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
+ types::ID InputType = Inputs[i].first;
+ const Arg *InputArg = Inputs[i].second;
+
+ NumSteps = types::getNumCompilationPhases(InputType);
+ assert(NumSteps && "Invalid number of steps!");
+
+ // If the first step comes after the final phase we are doing as part of
+ // this compilation, warn the user about it.
+ phases::ID InitialPhase = types::getCompilationPhase(InputType, 0);
+ if (InitialPhase > FinalPhase) {
+ // Claim here to avoid the more general unused warning.
+ InputArg->claim();
+
+ // Suppress all unused style warnings with -Qunused-arguments
+ if (Args.hasArg(options::OPT_Qunused_arguments))
+ continue;
+
+ // Special case '-E' warning on a previously preprocessed file to make
+ // more sense.
+ if (InitialPhase == phases::Compile && FinalPhase == phases::Preprocess &&
+ getPreprocessedType(InputType) == types::TY_INVALID)
+ Diag(clang::diag::warn_drv_preprocessed_input_file_unused)
+ << InputArg->getAsString(Args)
+ << FinalPhaseArg->getOption().getName();
+ else
+ Diag(clang::diag::warn_drv_input_file_unused)
+ << InputArg->getAsString(Args)
+ << getPhaseName(InitialPhase)
+ << FinalPhaseArg->getOption().getName();
+ continue;
+ }
+
+ // Build the pipeline for this file.
+ OwningPtr<Action> Current(new InputAction(*InputArg, InputType));
+ for (unsigned i = 0; i != NumSteps; ++i) {
+ phases::ID Phase = types::getCompilationPhase(InputType, i);
+
+ // We are done if this step is past what the user requested.
+ if (Phase > FinalPhase)
+ break;
+
+ // Queue linker inputs.
+ if (Phase == phases::Link) {
+ assert(i + 1 == NumSteps && "linking must be final compilation step.");
+ LinkerInputs.push_back(Current.take());
+ break;
+ }
+
+ // Some types skip the assembler phase (e.g., llvm-bc), but we can't
+ // encode this in the steps because the intermediate type depends on
+ // arguments. Just special case here.
+ if (Phase == phases::Assemble && Current->getType() != types::TY_PP_Asm)
+ continue;
+
+ // Otherwise construct the appropriate action.
+ Current.reset(ConstructPhaseAction(Args, Phase, Current.take()));
+ if (Current->getType() == types::TY_Nothing)
+ break;
+ }
+
+ // If we ended with something, add to the output list.
+ if (Current)
+ Actions.push_back(Current.take());
+ }
+
+ // Add a link action if necessary.
+ if (!LinkerInputs.empty())
+ Actions.push_back(new LinkJobAction(LinkerInputs, types::TY_Image));
+
+ // If we are linking, claim any options which are obviously only used for
+ // compilation.
+ if (FinalPhase == phases::Link && (NumSteps == 1))
+ Args.ClaimAllArgs(options::OPT_CompileOnly_Group);
+}
+
+Action *Driver::ConstructPhaseAction(const ArgList &Args, phases::ID Phase,
+ Action *Input) const {
+ llvm::PrettyStackTraceString CrashInfo("Constructing phase actions");
+ // Build the appropriate action.
+ switch (Phase) {
+ case phases::Link: llvm_unreachable("link action invalid here.");
+ case phases::Preprocess: {
+ types::ID OutputTy;
+ // -{M, MM} alter the output type.
+ if (Args.hasArg(options::OPT_M, options::OPT_MM)) {
+ OutputTy = types::TY_Dependencies;
+ } else {
+ OutputTy = types::getPreprocessedType(Input->getType());
+ assert(OutputTy != types::TY_INVALID &&
+ "Cannot preprocess this input type!");
+ }
+ return new PreprocessJobAction(Input, OutputTy);
+ }
+ case phases::Precompile:
+ return new PrecompileJobAction(Input, types::TY_PCH);
+ case phases::Compile: {
+ if (Args.hasArg(options::OPT_fsyntax_only)) {
+ return new CompileJobAction(Input, types::TY_Nothing);
+ } else if (Args.hasArg(options::OPT_rewrite_objc)) {
+ return new CompileJobAction(Input, types::TY_RewrittenObjC);
+ } else if (Args.hasArg(options::OPT_rewrite_legacy_objc)) {
+ return new CompileJobAction(Input, types::TY_RewrittenLegacyObjC);
+ } else if (Args.hasArg(options::OPT__analyze, options::OPT__analyze_auto)) {
+ return new AnalyzeJobAction(Input, types::TY_Plist);
+ } else if (Args.hasArg(options::OPT__migrate)) {
+ return new MigrateJobAction(Input, types::TY_Remap);
+ } else if (Args.hasArg(options::OPT_emit_ast)) {
+ return new CompileJobAction(Input, types::TY_AST);
+ } else if (IsUsingLTO(Args)) {
+ types::ID Output =
+ Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
+ return new CompileJobAction(Input, Output);
+ } else {
+ return new CompileJobAction(Input, types::TY_PP_Asm);
+ }
+ }
+ case phases::Assemble:
+ return new AssembleJobAction(Input, types::TY_Object);
+ }
+
+ llvm_unreachable("invalid phase in ConstructPhaseAction");
+}
+
+bool Driver::IsUsingLTO(const ArgList &Args) const {
+ // Check for -emit-llvm or -flto.
+ if (Args.hasArg(options::OPT_emit_llvm) ||
+ Args.hasFlag(options::OPT_flto, options::OPT_fno_lto, false))
+ return true;
+
+ // Check for -O4.
+ if (const Arg *A = Args.getLastArg(options::OPT_O_Group))
+ return A->getOption().matches(options::OPT_O4);
+
+ return false;
+}
+
+void Driver::BuildJobs(Compilation &C) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
+
+ Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o);
+
+ // It is an error to provide a -o option if we are making multiple output
+ // files.
+ if (FinalOutput) {
+ unsigned NumOutputs = 0;
+ for (ActionList::const_iterator it = C.getActions().begin(),
+ ie = C.getActions().end(); it != ie; ++it)
+ if ((*it)->getType() != types::TY_Nothing)
+ ++NumOutputs;
+
+ if (NumOutputs > 1) {
+ Diag(clang::diag::err_drv_output_argument_with_multiple_files);
+ FinalOutput = 0;
+ }
+ }
+
+ for (ActionList::const_iterator it = C.getActions().begin(),
+ ie = C.getActions().end(); it != ie; ++it) {
+ Action *A = *it;
+
+ // If we are linking an image for multiple archs then the linker wants
+ // -arch_multiple and -final_output <final image name>. Unfortunately, this
+ // doesn't fit in cleanly because we have to pass this information down.
+ //
+ // FIXME: This is a hack; find a cleaner way to integrate this into the
+ // process.
+ const char *LinkingOutput = 0;
+ if (isa<LipoJobAction>(A)) {
+ if (FinalOutput)
+ LinkingOutput = FinalOutput->getValue(C.getArgs());
+ else
+ LinkingOutput = DefaultImageName.c_str();
+ }
+
+ InputInfo II;
+ BuildJobsForAction(C, A, &C.getDefaultToolChain(),
+ /*BoundArch*/0,
+ /*AtTopLevel*/ true,
+ /*LinkingOutput*/ LinkingOutput,
+ II);
+ }
+
+ // If the user passed -Qunused-arguments or there were errors, don't warn
+ // about any unused arguments.
+ if (Diags.hasErrorOccurred() ||
+ C.getArgs().hasArg(options::OPT_Qunused_arguments))
+ return;
+
+ // Claim -### here.
+ (void) C.getArgs().hasArg(options::OPT__HASH_HASH_HASH);
+
+ for (ArgList::const_iterator it = C.getArgs().begin(), ie = C.getArgs().end();
+ it != ie; ++it) {
+ Arg *A = *it;
+
+ // FIXME: It would be nice to be able to send the argument to the
+ // DiagnosticsEngine, so that extra values, position, and so on could be
+ // printed.
+ if (!A->isClaimed()) {
+ if (A->getOption().hasNoArgumentUnused())
+ continue;
+
+ // Suppress the warning automatically if this is just a flag, and it is an
+ // instance of an argument we already claimed.
+ const Option &Opt = A->getOption();
+ if (isa<FlagOption>(Opt)) {
+ bool DuplicateClaimed = false;
+
+ for (arg_iterator it = C.getArgs().filtered_begin(&Opt),
+ ie = C.getArgs().filtered_end(); it != ie; ++it) {
+ if ((*it)->isClaimed()) {
+ DuplicateClaimed = true;
+ break;
+ }
+ }
+
+ if (DuplicateClaimed)
+ continue;
+ }
+
+ Diag(clang::diag::warn_drv_unused_argument)
+ << A->getAsString(C.getArgs());
+ }
+ }
+}
+
+static const Tool &SelectToolForJob(Compilation &C, const ToolChain *TC,
+ const JobAction *JA,
+ const ActionList *&Inputs) {
+ const Tool *ToolForJob = 0;
+
+ // See if we should look for a compiler with an integrated assembler. We match
+ // bottom up, so what we are actually looking for is an assembler job with a
+ // compiler input.
+
+ if (C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ TC->IsIntegratedAssemblerDefault()) &&
+ !C.getArgs().hasArg(options::OPT_save_temps) &&
+ isa<AssembleJobAction>(JA) &&
+ Inputs->size() == 1 && isa<CompileJobAction>(*Inputs->begin())) {
+ const Tool &Compiler = TC->SelectTool(
+ C, cast<JobAction>(**Inputs->begin()), (*Inputs)[0]->getInputs());
+ if (Compiler.hasIntegratedAssembler()) {
+ Inputs = &(*Inputs)[0]->getInputs();
+ ToolForJob = &Compiler;
+ }
+ }
+
+ // Otherwise use the tool for the current job.
+ if (!ToolForJob)
+ ToolForJob = &TC->SelectTool(C, *JA, *Inputs);
+
+ // See if we should use an integrated preprocessor. We do so when we have
+ // exactly one input, since this is the only use case we care about
+ // (irrelevant since we don't support combine yet).
+ if (Inputs->size() == 1 && isa<PreprocessJobAction>(*Inputs->begin()) &&
+ !C.getArgs().hasArg(options::OPT_no_integrated_cpp) &&
+ !C.getArgs().hasArg(options::OPT_traditional_cpp) &&
+ !C.getArgs().hasArg(options::OPT_save_temps) &&
+ ToolForJob->hasIntegratedCPP())
+ Inputs = &(*Inputs)[0]->getInputs();
+
+ return *ToolForJob;
+}
+
+void Driver::BuildJobsForAction(Compilation &C,
+ const Action *A,
+ const ToolChain *TC,
+ const char *BoundArch,
+ bool AtTopLevel,
+ const char *LinkingOutput,
+ InputInfo &Result) const {
+ llvm::PrettyStackTraceString CrashInfo("Building compilation jobs");
+
+ if (const InputAction *IA = dyn_cast<InputAction>(A)) {
+ // FIXME: It would be nice to not claim this here; maybe the old scheme of
+ // just using Args was better?
+ const Arg &Input = IA->getInputArg();
+ Input.claim();
+ if (Input.getOption().matches(options::OPT_INPUT)) {
+ const char *Name = Input.getValue(C.getArgs());
+ Result = InputInfo(Name, A->getType(), Name);
+ } else
+ Result = InputInfo(&Input, A->getType(), "");
+ return;
+ }
+
+ if (const BindArchAction *BAA = dyn_cast<BindArchAction>(A)) {
+ const ToolChain *TC = &C.getDefaultToolChain();
+
+ if (BAA->getArchName())
+ TC = &getToolChain(C.getArgs(), BAA->getArchName());
+
+ BuildJobsForAction(C, *BAA->begin(), TC, BAA->getArchName(),
+ AtTopLevel, LinkingOutput, Result);
+ return;
+ }
+
+ const ActionList *Inputs = &A->getInputs();
+
+ const JobAction *JA = cast<JobAction>(A);
+ const Tool &T = SelectToolForJob(C, TC, JA, Inputs);
+
+ // Only use pipes when there is exactly one input.
+ InputInfoList InputInfos;
+ for (ActionList::const_iterator it = Inputs->begin(), ie = Inputs->end();
+ it != ie; ++it) {
+ // Treat dsymutil sub-jobs as being at the top-level too, they shouldn't get
+ // temporary output names.
+ //
+ // FIXME: Clean this up.
+ bool SubJobAtTopLevel = false;
+ if (AtTopLevel && isa<DsymutilJobAction>(A))
+ SubJobAtTopLevel = true;
+
+ // Also treat verify sub-jobs as being at the top-level. They don't
+ // produce any output and so don't need temporary output names.
+ if (AtTopLevel && isa<VerifyJobAction>(A))
+ SubJobAtTopLevel = true;
+
+ InputInfo II;
+ BuildJobsForAction(C, *it, TC, BoundArch,
+ SubJobAtTopLevel, LinkingOutput, II);
+ InputInfos.push_back(II);
+ }
+
+ // Always use the first input as the base input.
+ const char *BaseInput = InputInfos[0].getBaseInput();
+
+ // ... except dsymutil actions, which use their actual input as the base
+ // input.
+ if (JA->getType() == types::TY_dSYM)
+ BaseInput = InputInfos[0].getFilename();
+
+ // Determine the place to write output to, if any.
+ if (JA->getType() == types::TY_Nothing) {
+ Result = InputInfo(A->getType(), BaseInput);
+ } else {
+ Result = InputInfo(GetNamedOutputPath(C, *JA, BaseInput, AtTopLevel),
+ A->getType(), BaseInput);
+ }
+
+ if (CCCPrintBindings && !CCGenDiagnostics) {
+ llvm::errs() << "# \"" << T.getToolChain().getTripleString() << '"'
+ << " - \"" << T.getName() << "\", inputs: [";
+ for (unsigned i = 0, e = InputInfos.size(); i != e; ++i) {
+ llvm::errs() << InputInfos[i].getAsString();
+ if (i + 1 != e)
+ llvm::errs() << ", ";
+ }
+ llvm::errs() << "], output: " << Result.getAsString() << "\n";
+ } else {
+ T.ConstructJob(C, *JA, Result, InputInfos,
+ C.getArgsForToolChain(TC, BoundArch), LinkingOutput);
+ }
+}
+
+const char *Driver::GetNamedOutputPath(Compilation &C,
+ const JobAction &JA,
+ const char *BaseInput,
+ bool AtTopLevel) const {
+ llvm::PrettyStackTraceString CrashInfo("Computing output path");
+ // Output to a user requested destination?
+ if (AtTopLevel && !isa<DsymutilJobAction>(JA) &&
+ !isa<VerifyJobAction>(JA)) {
+ if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
+ return C.addResultFile(FinalOutput->getValue(C.getArgs()));
+ }
+
+ // Default to writing to stdout?
+ if (AtTopLevel && isa<PreprocessJobAction>(JA) && !CCGenDiagnostics)
+ return "-";
+
+ // Output to a temporary file?
+ if ((!AtTopLevel && !C.getArgs().hasArg(options::OPT_save_temps)) ||
+ CCGenDiagnostics) {
+ StringRef Name = llvm::sys::path::filename(BaseInput);
+ std::pair<StringRef, StringRef> Split = Name.split('.');
+ std::string TmpName =
+ GetTemporaryPath(Split.first, types::getTypeTempSuffix(JA.getType()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ }
+
+ SmallString<128> BasePath(BaseInput);
+ StringRef BaseName;
+
+ // Dsymutil actions should use the full path.
+ if (isa<DsymutilJobAction>(JA) || isa<VerifyJobAction>(JA))
+ BaseName = BasePath;
+ else
+ BaseName = llvm::sys::path::filename(BasePath);
+
+ // Determine what the derived output name should be.
+ const char *NamedOutput;
+ if (JA.getType() == types::TY_Image) {
+ NamedOutput = DefaultImageName.c_str();
+ } else {
+ const char *Suffix = types::getTypeTempSuffix(JA.getType());
+ assert(Suffix && "All types used for output should have a suffix.");
+
+ std::string::size_type End = std::string::npos;
+ if (!types::appendSuffixForType(JA.getType()))
+ End = BaseName.rfind('.');
+ std::string Suffixed(BaseName.substr(0, End));
+ Suffixed += '.';
+ Suffixed += Suffix;
+ NamedOutput = C.getArgs().MakeArgString(Suffixed.c_str());
+ }
+
+ // If we're saving temps and the temp filename conflicts with the input
+ // filename, then avoid overwriting input file.
+ if (!AtTopLevel && C.getArgs().hasArg(options::OPT_save_temps) &&
+ NamedOutput == BaseName) {
+ StringRef Name = llvm::sys::path::filename(BaseInput);
+ std::pair<StringRef, StringRef> Split = Name.split('.');
+ std::string TmpName =
+ GetTemporaryPath(Split.first, types::getTypeTempSuffix(JA.getType()));
+ return C.addTempFile(C.getArgs().MakeArgString(TmpName.c_str()));
+ }
+
+ // As an annoying special case, PCH generation doesn't strip the pathname.
+ if (JA.getType() == types::TY_PCH) {
+ llvm::sys::path::remove_filename(BasePath);
+ if (BasePath.empty())
+ BasePath = NamedOutput;
+ else
+ llvm::sys::path::append(BasePath, NamedOutput);
+ return C.addResultFile(C.getArgs().MakeArgString(BasePath.c_str()));
+ } else {
+ return C.addResultFile(NamedOutput);
+ }
+}
+
+std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
+ // Respect a limited subset of the '-Bprefix' functionality in GCC by
+ // attempting to use this prefix when lokup up program paths.
+ for (Driver::prefix_list::const_iterator it = PrefixDirs.begin(),
+ ie = PrefixDirs.end(); it != ie; ++it) {
+ std::string Dir(*it);
+ if (Dir.empty())
+ continue;
+ if (Dir[0] == '=')
+ Dir = SysRoot + Dir.substr(1);
+ llvm::sys::Path P(Dir);
+ P.appendComponent(Name);
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ return P.str();
+ }
+
+ llvm::sys::Path P(ResourceDir);
+ P.appendComponent(Name);
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ return P.str();
+
+ const ToolChain::path_list &List = TC.getFilePaths();
+ for (ToolChain::path_list::const_iterator
+ it = List.begin(), ie = List.end(); it != ie; ++it) {
+ std::string Dir(*it);
+ if (Dir.empty())
+ continue;
+ if (Dir[0] == '=')
+ Dir = SysRoot + Dir.substr(1);
+ llvm::sys::Path P(Dir);
+ P.appendComponent(Name);
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ return P.str();
+ }
+
+ return Name;
+}
+
+static bool isPathExecutable(llvm::sys::Path &P, bool WantFile) {
+ bool Exists;
+ return (WantFile ? !llvm::sys::fs::exists(P.str(), Exists) && Exists
+ : P.canExecute());
+}
+
+std::string Driver::GetProgramPath(const char *Name, const ToolChain &TC,
+ bool WantFile) const {
+ // FIXME: Needs a better variable than DefaultTargetTriple
+ std::string TargetSpecificExecutable(DefaultTargetTriple + "-" + Name);
+ // Respect a limited subset of the '-Bprefix' functionality in GCC by
+ // attempting to use this prefix when lokup up program paths.
+ for (Driver::prefix_list::const_iterator it = PrefixDirs.begin(),
+ ie = PrefixDirs.end(); it != ie; ++it) {
+ llvm::sys::Path P(*it);
+ P.appendComponent(TargetSpecificExecutable);
+ if (isPathExecutable(P, WantFile)) return P.str();
+ P.eraseComponent();
+ P.appendComponent(Name);
+ if (isPathExecutable(P, WantFile)) return P.str();
+ }
+
+ const ToolChain::path_list &List = TC.getProgramPaths();
+ for (ToolChain::path_list::const_iterator
+ it = List.begin(), ie = List.end(); it != ie; ++it) {
+ llvm::sys::Path P(*it);
+ P.appendComponent(TargetSpecificExecutable);
+ if (isPathExecutable(P, WantFile)) return P.str();
+ P.eraseComponent();
+ P.appendComponent(Name);
+ if (isPathExecutable(P, WantFile)) return P.str();
+ }
+
+ // If all else failed, search the path.
+ llvm::sys::Path
+ P(llvm::sys::Program::FindProgramByName(TargetSpecificExecutable));
+ if (!P.empty())
+ return P.str();
+
+ P = llvm::sys::Path(llvm::sys::Program::FindProgramByName(Name));
+ if (!P.empty())
+ return P.str();
+
+ return Name;
+}
+
+std::string Driver::GetTemporaryPath(StringRef Prefix, const char *Suffix)
+ const {
+ // FIXME: This is lame; sys::Path should provide this function (in particular,
+ // it should know how to find the temporary files dir).
+ std::string Error;
+ const char *TmpDir = ::getenv("TMPDIR");
+ if (!TmpDir)
+ TmpDir = ::getenv("TEMP");
+ if (!TmpDir)
+ TmpDir = ::getenv("TMP");
+ if (!TmpDir)
+ TmpDir = "/tmp";
+ llvm::sys::Path P(TmpDir);
+ P.appendComponent(Prefix);
+ if (P.makeUnique(false, &Error)) {
+ Diag(clang::diag::err_drv_unable_to_make_temp) << Error;
+ return "";
+ }
+
+ // FIXME: Grumble, makeUnique sometimes leaves the file around!? PR3837.
+ P.eraseFromDisk(false, 0);
+
+ P.appendSuffix(Suffix);
+ return P.str();
+}
+
+/// \brief Compute target triple from args.
+///
+/// This routine provides the logic to compute a target triple from various
+/// args passed to the driver and the default triple string.
+static llvm::Triple computeTargetTriple(StringRef DefaultTargetTriple,
+ const ArgList &Args,
+ StringRef DarwinArchName) {
+ // FIXME: Already done in Compilation *Driver::BuildCompilation
+ if (const Arg *A = Args.getLastArg(options::OPT_target))
+ DefaultTargetTriple = A->getValue(Args);
+
+ llvm::Triple Target(llvm::Triple::normalize(DefaultTargetTriple));
+
+ // Handle Darwin-specific options available here.
+ if (Target.isOSDarwin()) {
+ // If an explict Darwin arch name is given, that trumps all.
+ if (!DarwinArchName.empty()) {
+ Target.setArch(
+ llvm::Triple::getArchTypeForDarwinArchName(DarwinArchName));
+ return Target;
+ }
+
+ // Handle the Darwin '-arch' flag.
+ if (Arg *A = Args.getLastArg(options::OPT_arch)) {
+ llvm::Triple::ArchType DarwinArch
+ = llvm::Triple::getArchTypeForDarwinArchName(A->getValue(Args));
+ if (DarwinArch != llvm::Triple::UnknownArch)
+ Target.setArch(DarwinArch);
+ }
+ }
+
+ // Skip further flag support on OSes which don't support '-m32' or '-m64'.
+ if (Target.getArchName() == "tce" ||
+ Target.getOS() == llvm::Triple::AuroraUX ||
+ Target.getOS() == llvm::Triple::Minix)
+ return Target;
+
+ // Handle pseudo-target flags '-m32' and '-m64'.
+ // FIXME: Should this information be in llvm::Triple?
+ if (Arg *A = Args.getLastArg(options::OPT_m32, options::OPT_m64)) {
+ if (A->getOption().matches(options::OPT_m32)) {
+ if (Target.getArch() == llvm::Triple::x86_64)
+ Target.setArch(llvm::Triple::x86);
+ if (Target.getArch() == llvm::Triple::ppc64)
+ Target.setArch(llvm::Triple::ppc);
+ } else {
+ if (Target.getArch() == llvm::Triple::x86)
+ Target.setArch(llvm::Triple::x86_64);
+ if (Target.getArch() == llvm::Triple::ppc)
+ Target.setArch(llvm::Triple::ppc64);
+ }
+ }
+
+ return Target;
+}
+
+const ToolChain &Driver::getToolChain(const ArgList &Args,
+ StringRef DarwinArchName) const {
+ llvm::Triple Target = computeTargetTriple(DefaultTargetTriple, Args,
+ DarwinArchName);
+
+ ToolChain *&TC = ToolChains[Target.str()];
+ if (!TC) {
+ switch (Target.getOS()) {
+ case llvm::Triple::AuroraUX:
+ TC = new toolchains::AuroraUX(*this, Target, Args);
+ break;
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ case llvm::Triple::IOS:
+ if (Target.getArch() == llvm::Triple::x86 ||
+ Target.getArch() == llvm::Triple::x86_64 ||
+ Target.getArch() == llvm::Triple::arm ||
+ Target.getArch() == llvm::Triple::thumb)
+ TC = new toolchains::DarwinClang(*this, Target);
+ else
+ TC = new toolchains::Darwin_Generic_GCC(*this, Target, Args);
+ break;
+ case llvm::Triple::DragonFly:
+ TC = new toolchains::DragonFly(*this, Target, Args);
+ break;
+ case llvm::Triple::OpenBSD:
+ TC = new toolchains::OpenBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::NetBSD:
+ TC = new toolchains::NetBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::FreeBSD:
+ TC = new toolchains::FreeBSD(*this, Target, Args);
+ break;
+ case llvm::Triple::Minix:
+ TC = new toolchains::Minix(*this, Target, Args);
+ break;
+ case llvm::Triple::Linux:
+ if (Target.getArch() == llvm::Triple::hexagon)
+ TC = new toolchains::Hexagon_TC(*this, Target);
+ else
+ TC = new toolchains::Linux(*this, Target, Args);
+ break;
+ case llvm::Triple::Solaris:
+ TC = new toolchains::Solaris(*this, Target, Args);
+ break;
+ case llvm::Triple::Win32:
+ TC = new toolchains::Windows(*this, Target);
+ break;
+ case llvm::Triple::MinGW32:
+ // FIXME: We need a MinGW toolchain. Fallthrough for now.
+ default:
+ // TCE is an OSless target
+ if (Target.getArchName() == "tce") {
+ TC = new toolchains::TCEToolChain(*this, Target);
+ break;
+ }
+
+ TC = new toolchains::Generic_GCC(*this, Target, Args);
+ break;
+ }
+ }
+ return *TC;
+}
+
+bool Driver::ShouldUseClangCompiler(const Compilation &C, const JobAction &JA,
+ const llvm::Triple &Triple) const {
+ // Check if user requested no clang, or clang doesn't understand this type (we
+ // only handle single inputs for now).
+ if (!CCCUseClang || JA.size() != 1 ||
+ !types::isAcceptedByClang((*JA.begin())->getType()))
+ return false;
+
+ // Otherwise make sure this is an action clang understands.
+ if (isa<PreprocessJobAction>(JA)) {
+ if (!CCCUseClangCPP) {
+ Diag(clang::diag::warn_drv_not_using_clang_cpp);
+ return false;
+ }
+ } else if (!isa<PrecompileJobAction>(JA) && !isa<CompileJobAction>(JA))
+ return false;
+
+ // Use clang for C++?
+ if (!CCCUseClangCXX && types::isCXX((*JA.begin())->getType())) {
+ Diag(clang::diag::warn_drv_not_using_clang_cxx);
+ return false;
+ }
+
+ // Always use clang for precompiling, AST generation, and rewriting,
+ // regardless of archs.
+ if (isa<PrecompileJobAction>(JA) ||
+ types::isOnlyAcceptedByClang(JA.getType()))
+ return true;
+
+ // Finally, don't use clang if this isn't one of the user specified archs to
+ // build.
+ if (!CCCClangArchs.empty() && !CCCClangArchs.count(Triple.getArch())) {
+ Diag(clang::diag::warn_drv_not_using_clang_arch) << Triple.getArchName();
+ return false;
+ }
+
+ return true;
+}
+
+/// GetReleaseVersion - Parse (([0-9]+)(.([0-9]+)(.([0-9]+)?))?)? and return the
+/// grouped values as integers. Numbers which are not provided are set to 0.
+///
+/// \return True if the entire string was parsed (9.2), or all groups were
+/// parsed (10.3.5extrastuff).
+bool Driver::GetReleaseVersion(const char *Str, unsigned &Major,
+ unsigned &Minor, unsigned &Micro,
+ bool &HadExtra) {
+ HadExtra = false;
+
+ Major = Minor = Micro = 0;
+ if (*Str == '\0')
+ return true;
+
+ char *End;
+ Major = (unsigned) strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (*End != '.')
+ return false;
+
+ Str = End+1;
+ Minor = (unsigned) strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (*End != '.')
+ return false;
+
+ Str = End+1;
+ Micro = (unsigned) strtol(Str, &End, 10);
+ if (*Str != '\0' && *End == '\0')
+ return true;
+ if (Str == End)
+ return false;
+ HadExtra = true;
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
new file mode 100644
index 0000000..715819d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/DriverOptions.cpp
@@ -0,0 +1,37 @@
+//===--- DriverOptions.cpp - Driver Options Table -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Options.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Driver/Option.h"
+
+using namespace clang::driver;
+using namespace clang::driver::options;
+
+static const OptTable::Info InfoTable[] = {
+#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) \
+ { NAME, HELPTEXT, METAVAR, Option::KIND##Class, PARAM, FLAGS, \
+ OPT_##GROUP, OPT_##ALIAS },
+#include "clang/Driver/Options.inc"
+};
+
+namespace {
+
+class DriverOptTable : public OptTable {
+public:
+ DriverOptTable()
+ : OptTable(InfoTable, sizeof(InfoTable) / sizeof(InfoTable[0])) {}
+};
+
+}
+
+OptTable *clang::driver::createDriverOptTable() {
+ return new DriverOptTable();
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/InputInfo.h b/contrib/llvm/tools/clang/lib/Driver/InputInfo.h
new file mode 100644
index 0000000..2a2f4b9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/InputInfo.h
@@ -0,0 +1,88 @@
+//===--- InputInfo.h - Input Source & Type Information ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_DRIVER_INPUTINFO_H_
+#define CLANG_LIB_DRIVER_INPUTINFO_H_
+
+#include "clang/Driver/Types.h"
+
+#include <cassert>
+#include <string>
+
+namespace clang {
+namespace driver {
+
+/// InputInfo - Wrapper for information about an input source.
+class InputInfo {
+ // FIXME: The distinction between filenames and inputarg here is
+ // gross; we should probably drop the idea of a "linker
+ // input". Doing so means tweaking pipelining to still create link
+ // steps when it sees linker inputs (but not treat them as
+ // arguments), and making sure that arguments get rendered
+ // correctly.
+ enum Class {
+ Nothing,
+ Filename,
+ InputArg,
+ Pipe
+ };
+
+ union {
+ const char *Filename;
+ const Arg *InputArg;
+ } Data;
+ Class Kind;
+ types::ID Type;
+ const char *BaseInput;
+
+public:
+ InputInfo() {}
+ InputInfo(types::ID _Type, const char *_BaseInput)
+ : Kind(Nothing), Type(_Type), BaseInput(_BaseInput) {
+ }
+ InputInfo(const char *_Filename, types::ID _Type, const char *_BaseInput)
+ : Kind(Filename), Type(_Type), BaseInput(_BaseInput) {
+ Data.Filename = _Filename;
+ }
+ InputInfo(const Arg *_InputArg, types::ID _Type, const char *_BaseInput)
+ : Kind(InputArg), Type(_Type), BaseInput(_BaseInput) {
+ Data.InputArg = _InputArg;
+ }
+
+ bool isNothing() const { return Kind == Nothing; }
+ bool isFilename() const { return Kind == Filename; }
+ bool isInputArg() const { return Kind == InputArg; }
+ types::ID getType() const { return Type; }
+ const char *getBaseInput() const { return BaseInput; }
+
+ const char *getFilename() const {
+ assert(isFilename() && "Invalid accessor.");
+ return Data.Filename;
+ }
+ const Arg &getInputArg() const {
+ assert(isInputArg() && "Invalid accessor.");
+ return *Data.InputArg;
+ }
+
+ /// getAsString - Return a string name for this input, for
+ /// debugging.
+ std::string getAsString() const {
+ if (isFilename())
+ return std::string("\"") + getFilename() + '"';
+ else if (isInputArg())
+ return "(input arg)";
+ else
+ return "(nothing)";
+ }
+};
+
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Driver/Job.cpp b/contrib/llvm/tools/clang/lib/Driver/Job.cpp
new file mode 100644
index 0000000..825c86a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Job.cpp
@@ -0,0 +1,42 @@
+//===--- Job.cpp - Command to Execute -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Job.h"
+
+#include "llvm/ADT/STLExtras.h"
+
+#include <cassert>
+using namespace clang::driver;
+
+Job::~Job() {}
+
+void Command::anchor() {}
+
+Command::Command(const Action &_Source, const Tool &_Creator,
+ const char *_Executable, const ArgStringList &_Arguments)
+ : Job(CommandClass), Source(_Source), Creator(_Creator),
+ Executable(_Executable), Arguments(_Arguments)
+{
+}
+
+JobList::JobList() : Job(JobListClass) {}
+
+JobList::~JobList() {
+ for (iterator it = begin(), ie = end(); it != ie; ++it)
+ delete *it;
+}
+
+void JobList::clear() {
+ DeleteContainerPointers(Jobs);
+}
+
+void Job::addCommand(Command *C) {
+ cast<JobList>(this)->addJob(C);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
new file mode 100644
index 0000000..4f5390b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/OptTable.cpp
@@ -0,0 +1,384 @@
+//===--- OptTable.cpp - Option Table Implementation -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/OptTable.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Option.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <map>
+using namespace clang::driver;
+using namespace clang::driver::options;
+using namespace clang;
+
+// Ordering on Info. The ordering is *almost* lexicographic, with two
+// exceptions. First, '\0' comes at the end of the alphabet instead of
+// the beginning (thus options precede any other options which prefix
+// them). Second, for options with the same name, the less permissive
+// version should come first; a Flag option should precede a Joined
+// option, for example.
+
+static int StrCmpOptionName(const char *A, const char *B) {
+ char a = *A, b = *B;
+ while (a == b) {
+ if (a == '\0')
+ return 0;
+
+ a = *++A;
+ b = *++B;
+ }
+
+ if (a == '\0') // A is a prefix of B.
+ return 1;
+ if (b == '\0') // B is a prefix of A.
+ return -1;
+
+ // Otherwise lexicographic.
+ return (a < b) ? -1 : 1;
+}
+
+namespace clang {
+namespace driver {
+static inline bool operator<(const OptTable::Info &A, const OptTable::Info &B) {
+ if (&A == &B)
+ return false;
+
+ if (int N = StrCmpOptionName(A.Name, B.Name))
+ return N == -1;
+
+ // Names are the same, check that classes are in order; exactly one
+ // should be joined, and it should succeed the other.
+ assert(((A.Kind == Option::JoinedClass) ^ (B.Kind == Option::JoinedClass)) &&
+ "Unexpected classes for options with same name.");
+ return B.Kind == Option::JoinedClass;
+}
+
+// Support lower_bound between info and an option name.
+static inline bool operator<(const OptTable::Info &I, const char *Name) {
+ return StrCmpOptionName(I.Name, Name) == -1;
+}
+static inline bool operator<(const char *Name, const OptTable::Info &I) {
+ return StrCmpOptionName(Name, I.Name) == -1;
+}
+}
+}
+
+//
+
+OptSpecifier::OptSpecifier(const Option *Opt) : ID(Opt->getID()) {}
+
+//
+
+OptTable::OptTable(const Info *_OptionInfos, unsigned _NumOptionInfos)
+ : OptionInfos(_OptionInfos), NumOptionInfos(_NumOptionInfos),
+ Options(new Option*[NumOptionInfos]),
+ TheInputOption(0), TheUnknownOption(0), FirstSearchableIndex(0)
+{
+ // Explicitly zero initialize the error to work around a bug in array
+ // value-initialization on MinGW with gcc 4.3.5.
+ memset(Options, 0, sizeof(*Options) * NumOptionInfos);
+
+ // Find start of normal options.
+ for (unsigned i = 0, e = getNumOptions(); i != e; ++i) {
+ unsigned Kind = getInfo(i + 1).Kind;
+ if (Kind == Option::InputClass) {
+ assert(!TheInputOption && "Cannot have multiple input options!");
+ TheInputOption = getOption(i + 1);
+ } else if (Kind == Option::UnknownClass) {
+ assert(!TheUnknownOption && "Cannot have multiple input options!");
+ TheUnknownOption = getOption(i + 1);
+ } else if (Kind != Option::GroupClass) {
+ FirstSearchableIndex = i;
+ break;
+ }
+ }
+ assert(FirstSearchableIndex != 0 && "No searchable options?");
+
+#ifndef NDEBUG
+ // Check that everything after the first searchable option is a
+ // regular option class.
+ for (unsigned i = FirstSearchableIndex, e = getNumOptions(); i != e; ++i) {
+ Option::OptionClass Kind = (Option::OptionClass) getInfo(i + 1).Kind;
+ assert((Kind != Option::InputClass && Kind != Option::UnknownClass &&
+ Kind != Option::GroupClass) &&
+ "Special options should be defined first!");
+ }
+
+ // Check that options are in order.
+ for (unsigned i = FirstSearchableIndex+1, e = getNumOptions(); i != e; ++i) {
+ if (!(getInfo(i) < getInfo(i + 1))) {
+ getOption(i)->dump();
+ getOption(i + 1)->dump();
+ llvm_unreachable("Options are not in order!");
+ }
+ }
+#endif
+}
+
+OptTable::~OptTable() {
+ for (unsigned i = 0, e = getNumOptions(); i != e; ++i)
+ delete Options[i];
+ delete[] Options;
+}
+
+Option *OptTable::CreateOption(unsigned id) const {
+ const Info &info = getInfo(id);
+ const OptionGroup *Group =
+ cast_or_null<OptionGroup>(getOption(info.GroupID));
+ const Option *Alias = getOption(info.AliasID);
+
+ Option *Opt = 0;
+ switch (info.Kind) {
+ case Option::InputClass:
+ Opt = new InputOption(id); break;
+ case Option::UnknownClass:
+ Opt = new UnknownOption(id); break;
+ case Option::GroupClass:
+ Opt = new OptionGroup(id, info.Name, Group); break;
+ case Option::FlagClass:
+ Opt = new FlagOption(id, info.Name, Group, Alias); break;
+ case Option::JoinedClass:
+ Opt = new JoinedOption(id, info.Name, Group, Alias); break;
+ case Option::SeparateClass:
+ Opt = new SeparateOption(id, info.Name, Group, Alias); break;
+ case Option::CommaJoinedClass:
+ Opt = new CommaJoinedOption(id, info.Name, Group, Alias); break;
+ case Option::MultiArgClass:
+ Opt = new MultiArgOption(id, info.Name, Group, Alias, info.Param); break;
+ case Option::JoinedOrSeparateClass:
+ Opt = new JoinedOrSeparateOption(id, info.Name, Group, Alias); break;
+ case Option::JoinedAndSeparateClass:
+ Opt = new JoinedAndSeparateOption(id, info.Name, Group, Alias); break;
+ }
+
+ if (info.Flags & DriverOption)
+ Opt->setDriverOption(true);
+ if (info.Flags & LinkerInput)
+ Opt->setLinkerInput(true);
+ if (info.Flags & NoArgumentUnused)
+ Opt->setNoArgumentUnused(true);
+ if (info.Flags & NoForward)
+ Opt->setNoForward(true);
+ if (info.Flags & RenderAsInput)
+ Opt->setNoOptAsInput(true);
+ if (info.Flags & RenderJoined) {
+ assert((info.Kind == Option::JoinedOrSeparateClass ||
+ info.Kind == Option::SeparateClass) && "Invalid option.");
+ Opt->setRenderStyle(Option::RenderJoinedStyle);
+ }
+ if (info.Flags & RenderSeparate) {
+ assert((info.Kind == Option::JoinedOrSeparateClass ||
+ info.Kind == Option::JoinedClass) && "Invalid option.");
+ Opt->setRenderStyle(Option::RenderSeparateStyle);
+ }
+ if (info.Flags & Unsupported)
+ Opt->setUnsupported(true);
+
+ return Opt;
+}
+
+Arg *OptTable::ParseOneArg(const ArgList &Args, unsigned &Index) const {
+ unsigned Prev = Index;
+ const char *Str = Args.getArgString(Index);
+
+ // Anything that doesn't start with '-' is an input, as is '-' itself.
+ if (Str[0] != '-' || Str[1] == '\0')
+ return new Arg(TheInputOption, Index++, Str);
+
+ const Info *Start = OptionInfos + FirstSearchableIndex;
+ const Info *End = OptionInfos + getNumOptions();
+
+ // Search for the first next option which could be a prefix.
+ Start = std::lower_bound(Start, End, Str);
+
+ // Options are stored in sorted order, with '\0' at the end of the
+ // alphabet. Since the only options which can accept a string must
+ // prefix it, we iteratively search for the next option which could
+ // be a prefix.
+ //
+ // FIXME: This is searching much more than necessary, but I am
+ // blanking on the simplest way to make it fast. We can solve this
+ // problem when we move to TableGen.
+ for (; Start != End; ++Start) {
+ // Scan for first option which is a proper prefix.
+ for (; Start != End; ++Start)
+ if (memcmp(Str, Start->Name, strlen(Start->Name)) == 0)
+ break;
+ if (Start == End)
+ break;
+
+ // See if this option matches.
+ if (Arg *A = getOption(Start - OptionInfos + 1)->accept(Args, Index))
+ return A;
+
+ // Otherwise, see if this argument was missing values.
+ if (Prev != Index)
+ return 0;
+ }
+
+ return new Arg(TheUnknownOption, Index++, Str);
+}
+
+InputArgList *OptTable::ParseArgs(const char* const *ArgBegin,
+ const char* const *ArgEnd,
+ unsigned &MissingArgIndex,
+ unsigned &MissingArgCount) const {
+ InputArgList *Args = new InputArgList(ArgBegin, ArgEnd);
+
+ // FIXME: Handle '@' args (or at least error on them).
+
+ MissingArgIndex = MissingArgCount = 0;
+ unsigned Index = 0, End = ArgEnd - ArgBegin;
+ while (Index < End) {
+ // Ignore empty arguments (other things may still take them as arguments).
+ if (Args->getArgString(Index)[0] == '\0') {
+ ++Index;
+ continue;
+ }
+
+ unsigned Prev = Index;
+ Arg *A = ParseOneArg(*Args, Index);
+ assert(Index > Prev && "Parser failed to consume argument.");
+
+ // Check for missing argument error.
+ if (!A) {
+ assert(Index >= End && "Unexpected parser error.");
+ assert(Index - Prev - 1 && "No missing arguments!");
+ MissingArgIndex = Prev;
+ MissingArgCount = Index - Prev - 1;
+ break;
+ }
+
+ Args->append(A);
+ }
+
+ return Args;
+}
+
+static std::string getOptionHelpName(const OptTable &Opts, OptSpecifier Id) {
+ std::string Name = Opts.getOptionName(Id);
+
+ // Add metavar, if used.
+ switch (Opts.getOptionKind(Id)) {
+ case Option::GroupClass: case Option::InputClass: case Option::UnknownClass:
+ llvm_unreachable("Invalid option with help text.");
+
+ case Option::MultiArgClass:
+ llvm_unreachable("Cannot print metavar for this kind of option.");
+
+ case Option::FlagClass:
+ break;
+
+ case Option::SeparateClass: case Option::JoinedOrSeparateClass:
+ Name += ' ';
+ // FALLTHROUGH
+ case Option::JoinedClass: case Option::CommaJoinedClass:
+ case Option::JoinedAndSeparateClass:
+ if (const char *MetaVarName = Opts.getOptionMetaVar(Id))
+ Name += MetaVarName;
+ else
+ Name += "<value>";
+ break;
+ }
+
+ return Name;
+}
+
+static void PrintHelpOptionList(raw_ostream &OS, StringRef Title,
+ std::vector<std::pair<std::string,
+ const char*> > &OptionHelp) {
+ OS << Title << ":\n";
+
+ // Find the maximum option length.
+ unsigned OptionFieldWidth = 0;
+ for (unsigned i = 0, e = OptionHelp.size(); i != e; ++i) {
+ // Skip titles.
+ if (!OptionHelp[i].second)
+ continue;
+
+ // Limit the amount of padding we are willing to give up for alignment.
+ unsigned Length = OptionHelp[i].first.size();
+ if (Length <= 23)
+ OptionFieldWidth = std::max(OptionFieldWidth, Length);
+ }
+
+ const unsigned InitialPad = 2;
+ for (unsigned i = 0, e = OptionHelp.size(); i != e; ++i) {
+ const std::string &Option = OptionHelp[i].first;
+ int Pad = OptionFieldWidth - int(Option.size());
+ OS.indent(InitialPad) << Option;
+
+ // Break on long option names.
+ if (Pad < 0) {
+ OS << "\n";
+ Pad = OptionFieldWidth + InitialPad;
+ }
+ OS.indent(Pad + 1) << OptionHelp[i].second << '\n';
+ }
+}
+
+static const char *getOptionHelpGroup(const OptTable &Opts, OptSpecifier Id) {
+ unsigned GroupID = Opts.getOptionGroupID(Id);
+
+ // If not in a group, return the default help group.
+ if (!GroupID)
+ return "OPTIONS";
+
+ // Abuse the help text of the option groups to store the "help group"
+ // name.
+ //
+ // FIXME: Split out option groups.
+ if (const char *GroupHelp = Opts.getOptionHelpText(GroupID))
+ return GroupHelp;
+
+ // Otherwise keep looking.
+ return getOptionHelpGroup(Opts, GroupID);
+}
+
+void OptTable::PrintHelp(raw_ostream &OS, const char *Name,
+ const char *Title, bool ShowHidden) const {
+ OS << "OVERVIEW: " << Title << "\n";
+ OS << '\n';
+ OS << "USAGE: " << Name << " [options] <inputs>\n";
+ OS << '\n';
+
+ // Render help text into a map of group-name to a list of (option, help)
+ // pairs.
+ typedef std::map<std::string,
+ std::vector<std::pair<std::string, const char*> > > helpmap_ty;
+ helpmap_ty GroupedOptionHelp;
+
+ for (unsigned i = 0, e = getNumOptions(); i != e; ++i) {
+ unsigned Id = i + 1;
+
+ // FIXME: Split out option groups.
+ if (getOptionKind(Id) == Option::GroupClass)
+ continue;
+
+ if (!ShowHidden && isOptionHelpHidden(Id))
+ continue;
+
+ if (const char *Text = getOptionHelpText(Id)) {
+ const char *HelpGroup = getOptionHelpGroup(*this, Id);
+ const std::string &OptName = getOptionHelpName(*this, Id);
+ GroupedOptionHelp[HelpGroup].push_back(std::make_pair(OptName, Text));
+ }
+ }
+
+ for (helpmap_ty::iterator it = GroupedOptionHelp .begin(),
+ ie = GroupedOptionHelp.end(); it != ie; ++it) {
+ if (it != GroupedOptionHelp .begin())
+ OS << "\n";
+ PrintHelpOptionList(OS, it->first, it->second);
+ }
+
+ OS.flush();
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Option.cpp b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
new file mode 100644
index 0000000..03360ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Option.cpp
@@ -0,0 +1,280 @@
+//===--- Option.cpp - Abstract Driver Options -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Option.h"
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <algorithm>
+using namespace clang::driver;
+
+Option::Option(OptionClass _Kind, OptSpecifier _ID, const char *_Name,
+ const OptionGroup *_Group, const Option *_Alias)
+ : Kind(_Kind), ID(_ID.getID()), Name(_Name), Group(_Group), Alias(_Alias),
+ Unsupported(false), LinkerInput(false), NoOptAsInput(false),
+ DriverOption(false), NoArgumentUnused(false), NoForward(false) {
+
+ // Multi-level aliases are not supported, and alias options cannot
+ // have groups. This just simplifies option tracking, it is not an
+ // inherent limitation.
+ assert((!Alias || (!Alias->Alias && !Group)) &&
+ "Multi-level aliases and aliases with groups are unsupported.");
+
+ // Initialize rendering options based on the class.
+ switch (Kind) {
+ case GroupClass:
+ case InputClass:
+ case UnknownClass:
+ RenderStyle = RenderValuesStyle;
+ break;
+
+ case JoinedClass:
+ case JoinedAndSeparateClass:
+ RenderStyle = RenderJoinedStyle;
+ break;
+
+ case CommaJoinedClass:
+ RenderStyle = RenderCommaJoinedStyle;
+ break;
+
+ case FlagClass:
+ case SeparateClass:
+ case MultiArgClass:
+ case JoinedOrSeparateClass:
+ RenderStyle = RenderSeparateStyle;
+ break;
+ }
+}
+
+Option::~Option() {
+}
+
+void Option::dump() const {
+ llvm::errs() << "<";
+ switch (Kind) {
+#define P(N) case N: llvm::errs() << #N; break
+ P(GroupClass);
+ P(InputClass);
+ P(UnknownClass);
+ P(FlagClass);
+ P(JoinedClass);
+ P(SeparateClass);
+ P(CommaJoinedClass);
+ P(MultiArgClass);
+ P(JoinedOrSeparateClass);
+ P(JoinedAndSeparateClass);
+#undef P
+ }
+
+ llvm::errs() << " Name:\"" << Name << '"';
+
+ if (Group) {
+ llvm::errs() << " Group:";
+ Group->dump();
+ }
+
+ if (Alias) {
+ llvm::errs() << " Alias:";
+ Alias->dump();
+ }
+
+ if (const MultiArgOption *MOA = dyn_cast<MultiArgOption>(this))
+ llvm::errs() << " NumArgs:" << MOA->getNumArgs();
+
+ llvm::errs() << ">\n";
+}
+
+bool Option::matches(OptSpecifier Opt) const {
+ // Aliases are never considered in matching, look through them.
+ if (Alias)
+ return Alias->matches(Opt);
+
+ // Check exact match.
+ if (ID == Opt)
+ return true;
+
+ if (Group)
+ return Group->matches(Opt);
+ return false;
+}
+
+OptionGroup::OptionGroup(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group)
+ : Option(Option::GroupClass, ID, Name, Group, 0) {
+}
+
+Arg *OptionGroup::accept(const ArgList &Args, unsigned &Index) const {
+ llvm_unreachable("accept() should never be called on an OptionGroup");
+}
+
+InputOption::InputOption(OptSpecifier ID)
+ : Option(Option::InputClass, ID, "<input>", 0, 0) {
+}
+
+Arg *InputOption::accept(const ArgList &Args, unsigned &Index) const {
+ llvm_unreachable("accept() should never be called on an InputOption");
+}
+
+UnknownOption::UnknownOption(OptSpecifier ID)
+ : Option(Option::UnknownClass, ID, "<unknown>", 0, 0) {
+}
+
+Arg *UnknownOption::accept(const ArgList &Args, unsigned &Index) const {
+ llvm_unreachable("accept() should never be called on an UnknownOption");
+}
+
+FlagOption::FlagOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias)
+ : Option(Option::FlagClass, ID, Name, Group, Alias) {
+}
+
+Arg *FlagOption::accept(const ArgList &Args, unsigned &Index) const {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (getName().size() != strlen(Args.getArgString(Index)))
+ return 0;
+
+ return new Arg(getUnaliasedOption(), Index++);
+}
+
+JoinedOption::JoinedOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias)
+ : Option(Option::JoinedClass, ID, Name, Group, Alias) {
+}
+
+Arg *JoinedOption::accept(const ArgList &Args, unsigned &Index) const {
+ // Always matches.
+ const char *Value = Args.getArgString(Index) + getName().size();
+ return new Arg(getUnaliasedOption(), Index++, Value);
+}
+
+CommaJoinedOption::CommaJoinedOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group,
+ const Option *Alias)
+ : Option(Option::CommaJoinedClass, ID, Name, Group, Alias) {
+}
+
+Arg *CommaJoinedOption::accept(const ArgList &Args,
+ unsigned &Index) const {
+ // Always matches.
+ const char *Str = Args.getArgString(Index) + getName().size();
+ Arg *A = new Arg(getUnaliasedOption(), Index++);
+
+ // Parse out the comma separated values.
+ const char *Prev = Str;
+ for (;; ++Str) {
+ char c = *Str;
+
+ if (!c || c == ',') {
+ if (Prev != Str) {
+ char *Value = new char[Str - Prev + 1];
+ memcpy(Value, Prev, Str - Prev);
+ Value[Str - Prev] = '\0';
+ A->getValues().push_back(Value);
+ }
+
+ if (!c)
+ break;
+
+ Prev = Str + 1;
+ }
+ }
+ A->setOwnsValues(true);
+
+ return A;
+}
+
+SeparateOption::SeparateOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias)
+ : Option(Option::SeparateClass, ID, Name, Group, Alias) {
+}
+
+Arg *SeparateOption::accept(const ArgList &Args, unsigned &Index) const {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (getName().size() != strlen(Args.getArgString(Index)))
+ return 0;
+
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new Arg(getUnaliasedOption(), Index - 2, Args.getArgString(Index - 1));
+}
+
+MultiArgOption::MultiArgOption(OptSpecifier ID, const char *Name,
+ const OptionGroup *Group, const Option *Alias,
+ unsigned _NumArgs)
+ : Option(Option::MultiArgClass, ID, Name, Group, Alias), NumArgs(_NumArgs) {
+ assert(NumArgs > 1 && "Invalid MultiArgOption!");
+}
+
+Arg *MultiArgOption::accept(const ArgList &Args, unsigned &Index) const {
+ // Matches iff this is an exact match.
+ // FIXME: Avoid strlen.
+ if (getName().size() != strlen(Args.getArgString(Index)))
+ return 0;
+
+ Index += 1 + NumArgs;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ Arg *A = new Arg(getUnaliasedOption(), Index - 1 - NumArgs,
+ Args.getArgString(Index - NumArgs));
+ for (unsigned i = 1; i != NumArgs; ++i)
+ A->getValues().push_back(Args.getArgString(Index - NumArgs + i));
+ return A;
+}
+
+JoinedOrSeparateOption::JoinedOrSeparateOption(OptSpecifier ID,
+ const char *Name,
+ const OptionGroup *Group,
+ const Option *Alias)
+ : Option(Option::JoinedOrSeparateClass, ID, Name, Group, Alias) {
+}
+
+Arg *JoinedOrSeparateOption::accept(const ArgList &Args,
+ unsigned &Index) const {
+ // If this is not an exact match, it is a joined arg.
+ // FIXME: Avoid strlen.
+ if (getName().size() != strlen(Args.getArgString(Index))) {
+ const char *Value = Args.getArgString(Index) + getName().size();
+ return new Arg(this, Index++, Value);
+ }
+
+ // Otherwise it must be separate.
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new Arg(getUnaliasedOption(), Index - 2, Args.getArgString(Index - 1));
+}
+
+JoinedAndSeparateOption::JoinedAndSeparateOption(OptSpecifier ID,
+ const char *Name,
+ const OptionGroup *Group,
+ const Option *Alias)
+ : Option(Option::JoinedAndSeparateClass, ID, Name, Group, Alias) {
+}
+
+Arg *JoinedAndSeparateOption::accept(const ArgList &Args,
+ unsigned &Index) const {
+ // Always matches.
+
+ Index += 2;
+ if (Index > Args.getNumInputArgStrings())
+ return 0;
+
+ return new Arg(getUnaliasedOption(), Index - 2,
+ Args.getArgString(Index-2)+getName().size(),
+ Args.getArgString(Index-1));
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Phases.cpp b/contrib/llvm/tools/clang/lib/Driver/Phases.cpp
new file mode 100644
index 0000000..b885eee
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Phases.cpp
@@ -0,0 +1,27 @@
+//===--- Phases.cpp - Transformations on Driver Types ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Phases.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include <cassert>
+
+using namespace clang::driver;
+
+const char *phases::getPhaseName(ID Id) {
+ switch (Id) {
+ case Preprocess: return "preprocessor";
+ case Precompile: return "precompiler";
+ case Compile: return "compiler";
+ case Assemble: return "assembler";
+ case Link: return "linker";
+ }
+
+ llvm_unreachable("Invalid phase id.");
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tool.cpp b/contrib/llvm/tools/clang/lib/Driver/Tool.cpp
new file mode 100644
index 0000000..b93864f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Tool.cpp
@@ -0,0 +1,21 @@
+//===--- Tool.cpp - Compilation Tools -------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Tool.h"
+
+using namespace clang::driver;
+
+Tool::Tool(const char *_Name, const char *_ShortName,
+ const ToolChain &TC) : Name(_Name), ShortName(_ShortName),
+ TheToolChain(TC)
+{
+}
+
+Tool::~Tool() {
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
new file mode 100644
index 0000000..db4d2a8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChain.cpp
@@ -0,0 +1,288 @@
+//===--- ToolChain.cpp - Collections of tools for one platform ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/ToolChain.h"
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/ObjCRuntime.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang::driver;
+using namespace clang;
+
+ToolChain::ToolChain(const Driver &D, const llvm::Triple &T)
+ : D(D), Triple(T) {
+}
+
+ToolChain::~ToolChain() {
+}
+
+const Driver &ToolChain::getDriver() const {
+ return D;
+}
+
+std::string ToolChain::GetFilePath(const char *Name) const {
+ return D.GetFilePath(Name, *this);
+
+}
+
+std::string ToolChain::GetProgramPath(const char *Name, bool WantFile) const {
+ return D.GetProgramPath(Name, *this, WantFile);
+}
+
+types::ID ToolChain::LookupTypeForExtension(const char *Ext) const {
+ return types::lookupTypeForExtension(Ext);
+}
+
+bool ToolChain::HasNativeLLVMSupport() const {
+ return false;
+}
+
+void ToolChain::configureObjCRuntime(ObjCRuntime &runtime) const {
+ switch (runtime.getKind()) {
+ case ObjCRuntime::NeXT:
+ // Assume a minimal NeXT runtime.
+ runtime.HasARC = false;
+ runtime.HasWeak = false;
+ runtime.HasSubscripting = false;
+ runtime.HasTerminate = false;
+ return;
+
+ case ObjCRuntime::GNU:
+ // Assume a maximal GNU runtime.
+ runtime.HasARC = true;
+ runtime.HasWeak = true;
+ runtime.HasSubscripting = false; // to be added
+ runtime.HasTerminate = false; // to be added
+ return;
+ }
+ llvm_unreachable("invalid runtime kind!");
+}
+
+/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
+//
+// FIXME: tblgen this.
+static const char *getARMTargetCPU(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ // For Darwin targets, the -arch option (which is translated to a
+ // corresponding -march option) should determine the architecture
+ // (and the Mach-O slice) regardless of any -mcpu options.
+ if (!Triple.isOSDarwin()) {
+ // FIXME: Warn on inconsistent use of -mcpu and -march.
+ // If we have -mcpu=, use that.
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ return A->getValue(Args);
+ }
+
+ StringRef MArch;
+ if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ // Otherwise, if we have -march= choose the base CPU for that arch.
+ MArch = A->getValue(Args);
+ } else {
+ // Otherwise, use the Arch from the triple.
+ MArch = Triple.getArchName();
+ }
+
+ return llvm::StringSwitch<const char *>(MArch)
+ .Cases("armv2", "armv2a","arm2")
+ .Case("armv3", "arm6")
+ .Case("armv3m", "arm7m")
+ .Cases("armv4", "armv4t", "arm7tdmi")
+ .Cases("armv5", "armv5t", "arm10tdmi")
+ .Cases("armv5e", "armv5te", "arm1026ejs")
+ .Case("armv5tej", "arm926ej-s")
+ .Cases("armv6", "armv6k", "arm1136jf-s")
+ .Case("armv6j", "arm1136j-s")
+ .Cases("armv6z", "armv6zk", "arm1176jzf-s")
+ .Case("armv6t2", "arm1156t2-s")
+ .Cases("armv7", "armv7a", "armv7-a", "cortex-a8")
+ .Cases("armv7r", "armv7-r", "cortex-r4")
+ .Cases("armv7m", "armv7-m", "cortex-m3")
+ .Case("ep9312", "ep9312")
+ .Case("iwmmxt", "iwmmxt")
+ .Case("xscale", "xscale")
+ .Cases("armv6m", "armv6-m", "cortex-m0")
+ // If all else failed, return the most base CPU LLVM supports.
+ .Default("arm7tdmi");
+}
+
+/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
+/// CPU.
+//
+// FIXME: This is redundant with -mcpu, why does LLVM use this.
+// FIXME: tblgen this, or kill it!
+static const char *getLLVMArchSuffixForARM(StringRef CPU) {
+ return llvm::StringSwitch<const char *>(CPU)
+ .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t")
+ .Cases("arm720t", "arm9", "arm9tdmi", "v4t")
+ .Cases("arm920", "arm920t", "arm922t", "v4t")
+ .Cases("arm940t", "ep9312","v4t")
+ .Cases("arm10tdmi", "arm1020t", "v5")
+ .Cases("arm9e", "arm926ej-s", "arm946e-s", "v5e")
+ .Cases("arm966e-s", "arm968e-s", "arm10e", "v5e")
+ .Cases("arm1020e", "arm1022e", "xscale", "iwmmxt", "v5e")
+ .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6")
+ .Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
+ .Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
+ .Cases("cortex-a8", "cortex-a9", "v7")
+ .Case("cortex-m3", "v7m")
+ .Case("cortex-m4", "v7m")
+ .Case("cortex-m0", "v6m")
+ .Default("");
+}
+
+std::string ToolChain::ComputeLLVMTriple(const ArgList &Args,
+ types::ID InputType) const {
+ switch (getTriple().getArch()) {
+ default:
+ return getTripleString();
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb: {
+ // FIXME: Factor into subclasses.
+ llvm::Triple Triple = getTriple();
+
+ // Thumb2 is the default for V7 on Darwin.
+ //
+ // FIXME: Thumb should just be another -target-feaure, not in the triple.
+ StringRef Suffix =
+ getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
+ bool ThumbDefault = (Suffix == "v7" && getTriple().isOSDarwin());
+ std::string ArchName = "arm";
+
+ // Assembly files should start in ARM mode.
+ if (InputType != types::TY_PP_Asm &&
+ Args.hasFlag(options::OPT_mthumb, options::OPT_mno_thumb, ThumbDefault))
+ ArchName = "thumb";
+ Triple.setArchName(ArchName + Suffix.str());
+
+ return Triple.getTriple();
+ }
+ }
+}
+
+std::string ToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
+ // Diagnose use of Darwin OS deployment target arguments on non-Darwin.
+ if (Arg *A = Args.getLastArg(options::OPT_mmacosx_version_min_EQ,
+ options::OPT_miphoneos_version_min_EQ,
+ options::OPT_mios_simulator_version_min_EQ))
+ getDriver().Diag(diag::err_drv_clang_unsupported)
+ << A->getAsString(Args);
+
+ return ComputeLLVMTriple(Args, InputType);
+}
+
+void ToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Each toolchain should provide the appropriate include flags.
+}
+
+ToolChain::RuntimeLibType ToolChain::GetRuntimeLibType(
+ const ArgList &Args) const
+{
+ if (Arg *A = Args.getLastArg(options::OPT_rtlib_EQ)) {
+ StringRef Value = A->getValue(Args);
+ if (Value == "compiler-rt")
+ return ToolChain::RLT_CompilerRT;
+ if (Value == "libgcc")
+ return ToolChain::RLT_Libgcc;
+ getDriver().Diag(diag::err_drv_invalid_rtlib_name)
+ << A->getAsString(Args);
+ }
+
+ return GetDefaultRuntimeLibType();
+}
+
+ToolChain::CXXStdlibType ToolChain::GetCXXStdlibType(const ArgList &Args) const{
+ if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
+ StringRef Value = A->getValue(Args);
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+ if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ getDriver().Diag(diag::err_drv_invalid_stdlib_name)
+ << A->getAsString(Args);
+ }
+
+ return ToolChain::CST_Libstdcxx;
+}
+
+/// \brief Utility function to add a system include directory to CC1 arguments.
+/*static*/ void ToolChain::addSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path) {
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
+}
+
+/// \brief Utility function to add a system include directory with extern "C"
+/// semantics to CC1 arguments.
+///
+/// Note that this should be used rarely, and only for directories that
+/// historically and for legacy reasons are treated as having implicit extern
+/// "C" semantics. These semantics are *ignored* by and large today, but its
+/// important to preserve the preprocessor changes resulting from the
+/// classification.
+/*static*/ void ToolChain::addExternCSystemInclude(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ const Twine &Path) {
+ CC1Args.push_back("-internal-externc-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(Path));
+}
+
+/// \brief Utility function to add a list of system include directories to CC1.
+/*static*/ void ToolChain::addSystemIncludes(const ArgList &DriverArgs,
+ ArgStringList &CC1Args,
+ ArrayRef<StringRef> Paths) {
+ for (ArrayRef<StringRef>::iterator I = Paths.begin(), E = Paths.end();
+ I != E; ++I) {
+ CC1Args.push_back("-internal-isystem");
+ CC1Args.push_back(DriverArgs.MakeArgString(*I));
+ }
+}
+
+void ToolChain::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // Header search paths should be handled by each of the subclasses.
+ // Historically, they have not been, and instead have been handled inside of
+ // the CC1-layer frontend. As the logic is hoisted out, this generic function
+ // will slowly stop being called.
+ //
+ // While it is being called, replicate a bit of a hack to propagate the
+ // '-stdlib=' flag down to CC1 so that it can in turn customize the C++
+ // header search paths with it. Once all systems are overriding this
+ // function, the CC1 flag and this line can be removed.
+ DriverArgs.AddAllArgs(CC1Args, options::OPT_stdlib_EQ);
+}
+
+void ToolChain::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ break;
+
+ case ToolChain::CST_Libstdcxx:
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+}
+
+void ToolChain::AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-lcc_kext");
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
new file mode 100644
index 0000000..fa9ed49
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.cpp
@@ -0,0 +1,2335 @@
+//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/ObjCRuntime.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/Options.h"
+#include "clang/Basic/Version.h"
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
+
+#include <cstdlib> // ::getenv
+
+#include "clang/Config/config.h" // for GCC_INSTALL_PREFIX
+
+#ifndef CLANG_PREFIX
+#define CLANG_PREFIX
+#endif
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+
+/// Darwin - Darwin tool chain for i386 and x86_64.
+
+Darwin::Darwin(const Driver &D, const llvm::Triple& Triple)
+ : ToolChain(D, Triple), TargetInitialized(false),
+ ARCRuntimeForSimulator(ARCSimulator_None),
+ LibCXXForSimulator(LibCXXSimulator_None)
+{
+ // Compute the initial Darwin version from the triple
+ unsigned Major, Minor, Micro;
+ if (!Triple.getMacOSXVersion(Major, Minor, Micro))
+ getDriver().Diag(diag::err_drv_invalid_darwin_version) <<
+ Triple.getOSName();
+ llvm::raw_string_ostream(MacosxVersionMin)
+ << Major << '.' << Minor << '.' << Micro;
+
+ // FIXME: DarwinVersion is only used to find GCC's libexec directory.
+ // It should be removed when we stop supporting that.
+ DarwinVersion[0] = Minor + 4;
+ DarwinVersion[1] = Micro;
+ DarwinVersion[2] = 0;
+}
+
+types::ID Darwin::LookupTypeForExtension(const char *Ext) const {
+ types::ID Ty = types::lookupTypeForExtension(Ext);
+
+ // Darwin always preprocesses assembly files (unless -x is used explicitly).
+ if (Ty == types::TY_PP_Asm)
+ return types::TY_Asm;
+
+ return Ty;
+}
+
+bool Darwin::HasNativeLLVMSupport() const {
+ return true;
+}
+
+bool Darwin::hasARCRuntime() const {
+ // FIXME: Remove this once there is a proper way to detect an ARC runtime
+ // for the simulator.
+ switch (ARCRuntimeForSimulator) {
+ case ARCSimulator_None:
+ break;
+ case ARCSimulator_HasARCRuntime:
+ return true;
+ case ARCSimulator_NoARCRuntime:
+ return false;
+ }
+
+ if (isTargetIPhoneOS())
+ return !isIPhoneOSVersionLT(5);
+ else
+ return !isMacosxVersionLT(10, 7);
+}
+
+bool Darwin::hasSubscriptingRuntime() const {
+ return !isTargetIPhoneOS() && !isMacosxVersionLT(10, 8);
+}
+
+/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
+void Darwin::configureObjCRuntime(ObjCRuntime &runtime) const {
+ if (runtime.getKind() != ObjCRuntime::NeXT)
+ return ToolChain::configureObjCRuntime(runtime);
+
+ runtime.HasARC = runtime.HasWeak = hasARCRuntime();
+ runtime.HasSubscripting = hasSubscriptingRuntime();
+
+ // So far, objc_terminate is only available in iOS 5.
+ // FIXME: do the simulator logic properly.
+ if (!ARCRuntimeForSimulator && isTargetIPhoneOS())
+ runtime.HasTerminate = !isIPhoneOSVersionLT(5);
+ else
+ runtime.HasTerminate = false;
+}
+
+/// Darwin provides a blocks runtime starting in MacOS X 10.6 and iOS 3.2.
+bool Darwin::hasBlocksRuntime() const {
+ if (isTargetIPhoneOS())
+ return !isIPhoneOSVersionLT(3, 2);
+ else
+ return !isMacosxVersionLT(10, 6);
+}
+
+static const char *GetArmArchForMArch(StringRef Value) {
+ return llvm::StringSwitch<const char*>(Value)
+ .Case("armv6k", "armv6")
+ .Case("armv5tej", "armv5")
+ .Case("xscale", "xscale")
+ .Case("armv4t", "armv4t")
+ .Case("armv7", "armv7")
+ .Cases("armv7a", "armv7-a", "armv7")
+ .Cases("armv7r", "armv7-r", "armv7")
+ .Cases("armv7m", "armv7-m", "armv7")
+ .Default(0);
+}
+
+static const char *GetArmArchForMCpu(StringRef Value) {
+ return llvm::StringSwitch<const char *>(Value)
+ .Cases("arm9e", "arm946e-s", "arm966e-s", "arm968e-s", "arm926ej-s","armv5")
+ .Cases("arm10e", "arm10tdmi", "armv5")
+ .Cases("arm1020t", "arm1020e", "arm1022e", "arm1026ej-s", "armv5")
+ .Case("xscale", "xscale")
+ .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s",
+ "arm1176jzf-s", "cortex-m0", "armv6")
+ .Cases("cortex-a8", "cortex-r4", "cortex-m3", "cortex-a9", "armv7")
+ .Default(0);
+}
+
+StringRef Darwin::getDarwinArchName(const ArgList &Args) const {
+ switch (getTriple().getArch()) {
+ default:
+ return getArchName();
+
+ case llvm::Triple::thumb:
+ case llvm::Triple::arm: {
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ if (const char *Arch = GetArmArchForMArch(A->getValue(Args)))
+ return Arch;
+
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ if (const char *Arch = GetArmArchForMCpu(A->getValue(Args)))
+ return Arch;
+
+ return "arm";
+ }
+ }
+}
+
+Darwin::~Darwin() {
+ // Free tool implementations.
+ for (llvm::DenseMap<unsigned, Tool*>::iterator
+ it = Tools.begin(), ie = Tools.end(); it != ie; ++it)
+ delete it->second;
+}
+
+std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
+ llvm::Triple Triple(ComputeLLVMTriple(Args, InputType));
+
+ // If the target isn't initialized (e.g., an unknown Darwin platform, return
+ // the default triple).
+ if (!isTargetInitialized())
+ return Triple.getTriple();
+
+ SmallString<16> Str;
+ Str += isTargetIPhoneOS() ? "ios" : "macosx";
+ Str += getTargetVersion().getAsString();
+ Triple.setOSName(Str);
+
+ return Triple.getTriple();
+}
+
+void Generic_ELF::anchor() {}
+
+Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple())) {
+ // Fallback to llvm-gcc for i386 kext compiles, we don't support that ABI.
+ if (Inputs.size() == 1 &&
+ types::isCXX(Inputs[0]->getType()) &&
+ getTriple().isOSDarwin() &&
+ getTriple().getArch() == llvm::Triple::x86 &&
+ (C.getArgs().getLastArg(options::OPT_fapple_kext) ||
+ C.getArgs().getLastArg(options::OPT_mkernel)))
+ Key = JA.getKind();
+ else
+ Key = Action::AnalyzeJobClass;
+ } else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::InputClass:
+ case Action::BindArchClass:
+ llvm_unreachable("Invalid tool kind.");
+ case Action::PreprocessJobClass:
+ T = new tools::darwin::Preprocess(*this); break;
+ case Action::AnalyzeJobClass:
+ case Action::MigrateJobClass:
+ T = new tools::Clang(*this); break;
+ case Action::PrecompileJobClass:
+ case Action::CompileJobClass:
+ T = new tools::darwin::Compile(*this); break;
+ case Action::AssembleJobClass: {
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::darwin::Assemble(*this);
+ break;
+ }
+ case Action::LinkJobClass:
+ T = new tools::darwin::Link(*this); break;
+ case Action::LipoJobClass:
+ T = new tools::darwin::Lipo(*this); break;
+ case Action::DsymutilJobClass:
+ T = new tools::darwin::Dsymutil(*this); break;
+ case Action::VerifyJobClass:
+ T = new tools::darwin::VerifyDebug(*this); break;
+ }
+ }
+
+ return *T;
+}
+
+
+DarwinClang::DarwinClang(const Driver &D, const llvm::Triple& Triple)
+ : Darwin(D, Triple)
+{
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
+ // We expect 'as', 'ld', etc. to be adjacent to our install dir.
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
+ // For fallback, we need to know how to find the GCC cc1 executables, so we
+ // also add the GCC libexec paths. This is legacy code that can be removed
+ // once fallback is no longer useful.
+ AddGCCLibexecPath(DarwinVersion[0]);
+ AddGCCLibexecPath(DarwinVersion[0] - 2);
+ AddGCCLibexecPath(DarwinVersion[0] - 1);
+ AddGCCLibexecPath(DarwinVersion[0] + 1);
+ AddGCCLibexecPath(DarwinVersion[0] + 2);
+}
+
+void DarwinClang::AddGCCLibexecPath(unsigned darwinVersion) {
+ std::string ToolChainDir = "i686-apple-darwin";
+ ToolChainDir += llvm::utostr(darwinVersion);
+ ToolChainDir += "/4.2.1";
+
+ std::string Path = getDriver().Dir;
+ Path += "/../llvm-gcc-4.2/libexec/gcc/";
+ Path += ToolChainDir;
+ getProgramPaths().push_back(Path);
+
+ Path = "/usr/llvm-gcc-4.2/libexec/gcc/";
+ Path += ToolChainDir;
+ getProgramPaths().push_back(Path);
+}
+
+void DarwinClang::AddLinkSearchPathArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // The Clang toolchain uses explicit paths for internal libraries.
+
+ // Unfortunately, we still might depend on a few of the libraries that are
+ // only available in the gcc library directory (in particular
+ // libstdc++.dylib). For now, hardcode the path to the known install location.
+ // FIXME: This should get ripped out someday. However, when building on
+ // 10.6 (darwin10), we're still relying on this to find libstdc++.dylib.
+ llvm::sys::Path P(getDriver().Dir);
+ P.eraseComponent(); // .../usr/bin -> ../usr
+ P.appendComponent("llvm-gcc-4.2");
+ P.appendComponent("lib");
+ P.appendComponent("gcc");
+ switch (getTriple().getArch()) {
+ default:
+ llvm_unreachable("Invalid Darwin arch!");
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ P.appendComponent("i686-apple-darwin10");
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ P.appendComponent("arm-apple-darwin10");
+ break;
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ P.appendComponent("powerpc-apple-darwin10");
+ break;
+ }
+ P.appendComponent("4.2.1");
+
+ // Determine the arch specific GCC subdirectory.
+ const char *ArchSpecificDir = 0;
+ switch (getTriple().getArch()) {
+ default:
+ break;
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb: {
+ std::string Triple = ComputeLLVMTriple(Args);
+ StringRef TripleStr = Triple;
+ if (TripleStr.startswith("armv5") || TripleStr.startswith("thumbv5"))
+ ArchSpecificDir = "v5";
+ else if (TripleStr.startswith("armv6") || TripleStr.startswith("thumbv6"))
+ ArchSpecificDir = "v6";
+ else if (TripleStr.startswith("armv7") || TripleStr.startswith("thumbv7"))
+ ArchSpecificDir = "v7";
+ break;
+ }
+ case llvm::Triple::ppc64:
+ ArchSpecificDir = "ppc64";
+ break;
+ case llvm::Triple::x86_64:
+ ArchSpecificDir = "x86_64";
+ break;
+ }
+
+ if (ArchSpecificDir) {
+ P.appendComponent(ArchSpecificDir);
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ CmdArgs.push_back(Args.MakeArgString("-L" + P.str()));
+ P.eraseComponent();
+ }
+
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ CmdArgs.push_back(Args.MakeArgString("-L" + P.str()));
+}
+
+void DarwinClang::AddLinkARCArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+
+ CmdArgs.push_back("-force_load");
+ llvm::sys::Path P(getDriver().ClangExecutable);
+ P.eraseComponent(); // 'clang'
+ P.eraseComponent(); // 'bin'
+ P.appendComponent("lib");
+ P.appendComponent("arc");
+ P.appendComponent("libarclite_");
+ std::string s = P.str();
+ // Mash in the platform.
+ if (isTargetIOSSimulator())
+ s += "iphonesimulator";
+ else if (isTargetIPhoneOS())
+ s += "iphoneos";
+ // FIXME: Remove this once we depend fully on -mios-simulator-version-min.
+ else if (ARCRuntimeForSimulator != ARCSimulator_None)
+ s += "iphonesimulator";
+ else
+ s += "macosx";
+ s += ".a";
+
+ CmdArgs.push_back(Args.MakeArgString(s));
+}
+
+void DarwinClang::AddLinkRuntimeLib(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const char *DarwinStaticLib) const {
+ llvm::sys::Path P(getDriver().ResourceDir);
+ P.appendComponent("lib");
+ P.appendComponent("darwin");
+ P.appendComponent(DarwinStaticLib);
+
+ // For now, allow missing resource libraries to support developers who may
+ // not have compiler-rt checked out or integrated into their build.
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ CmdArgs.push_back(Args.MakeArgString(P.str()));
+}
+
+void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Darwin only supports the compiler-rt based runtime libraries.
+ switch (GetRuntimeLibType(Args)) {
+ case ToolChain::RLT_CompilerRT:
+ break;
+ default:
+ getDriver().Diag(diag::err_drv_unsupported_rtlib_for_platform)
+ << Args.getLastArg(options::OPT_rtlib_EQ)->getValue(Args) << "darwin";
+ return;
+ }
+
+ // Darwin doesn't support real static executables, don't link any runtime
+ // libraries with -static.
+ if (Args.hasArg(options::OPT_static))
+ return;
+
+ // Reject -static-libgcc for now, we can deal with this when and if someone
+ // cares. This is useful in situations where someone wants to statically link
+ // something like libstdc++, and needs its runtime support routines.
+ if (const Arg *A = Args.getLastArg(options::OPT_static_libgcc)) {
+ getDriver().Diag(diag::err_drv_unsupported_opt)
+ << A->getAsString(Args);
+ return;
+ }
+
+ // If we are building profile support, link that library in.
+ if (Args.hasArg(options::OPT_fprofile_arcs) ||
+ Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage)) {
+ // Select the appropriate runtime library for the target.
+ if (isTargetIPhoneOS()) {
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_ios.a");
+ } else {
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.profile_osx.a");
+ }
+ }
+
+ // Add ASAN runtime library, if required. Dynamic libraries and bundles
+ // should not be linked with the runtime library.
+ if (Args.hasFlag(options::OPT_faddress_sanitizer,
+ options::OPT_fno_address_sanitizer, false)) {
+ if (Args.hasArg(options::OPT_dynamiclib) ||
+ Args.hasArg(options::OPT_bundle)) return;
+ if (isTargetIPhoneOS()) {
+ getDriver().Diag(diag::err_drv_clang_unsupported_per_platform)
+ << "-faddress-sanitizer";
+ } else {
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.asan_osx.a");
+
+ // The ASAN runtime library requires C++ and CoreFoundation.
+ AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("CoreFoundation");
+ }
+ }
+
+ // Otherwise link libSystem, then the dynamic runtime library, and finally any
+ // target specific static runtime library.
+ CmdArgs.push_back("-lSystem");
+
+ // Select the dynamic runtime library and the target specific static library.
+ if (isTargetIPhoneOS()) {
+ // If we are compiling as iOS / simulator, don't attempt to link libgcc_s.1,
+ // it never went into the SDK.
+ // Linking against libgcc_s.1 isn't needed for iOS 5.0+
+ if (isIPhoneOSVersionLT(5, 0) && !isTargetIOSSimulator())
+ CmdArgs.push_back("-lgcc_s.1");
+
+ // We currently always need a static runtime library for iOS.
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.ios.a");
+ } else {
+ // The dynamic runtime library was merged with libSystem for 10.6 and
+ // beyond; only 10.4 and 10.5 need an additional runtime library.
+ if (isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-lgcc_s.10.4");
+ else if (isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-lgcc_s.10.5");
+
+ // For OS X, we thought we would only need a static runtime library when
+ // targeting 10.4, to provide versions of the static functions which were
+ // omitted from 10.4.dylib.
+ //
+ // Unfortunately, that turned out to not be true, because Darwin system
+ // headers can still use eprintf on i386, and it is not exported from
+ // libSystem. Therefore, we still must provide a runtime library just for
+ // the tiny tiny handful of projects that *might* use that symbol.
+ if (isMacosxVersionLT(10, 5)) {
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.10.4.a");
+ } else {
+ if (getTriple().getArch() == llvm::Triple::x86)
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.eprintf.a");
+ AddLinkRuntimeLib(Args, CmdArgs, "libclang_rt.osx.a");
+ }
+ }
+}
+
+static inline StringRef SimulatorVersionDefineName() {
+ return "__IPHONE_OS_VERSION_MIN_REQUIRED";
+}
+
+/// \brief Parse the simulator version define:
+/// __IPHONE_OS_VERSION_MIN_REQUIRED=([0-9])([0-9][0-9])([0-9][0-9])
+// and return the grouped values as integers, e.g:
+// __IPHONE_OS_VERSION_MIN_REQUIRED=40201
+// will return Major=4, Minor=2, Micro=1.
+static bool GetVersionFromSimulatorDefine(StringRef define,
+ unsigned &Major, unsigned &Minor,
+ unsigned &Micro) {
+ assert(define.startswith(SimulatorVersionDefineName()));
+ StringRef name, version;
+ llvm::tie(name, version) = define.split('=');
+ if (version.empty())
+ return false;
+ std::string verstr = version.str();
+ char *end;
+ unsigned num = (unsigned) strtol(verstr.c_str(), &end, 10);
+ if (*end != '\0')
+ return false;
+ Major = num / 10000;
+ num = num % 10000;
+ Minor = num / 100;
+ Micro = num % 100;
+ return true;
+}
+
+void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
+ const OptTable &Opts = getDriver().getOpts();
+
+ Arg *OSXVersion = Args.getLastArg(options::OPT_mmacosx_version_min_EQ);
+ Arg *iOSVersion = Args.getLastArg(options::OPT_miphoneos_version_min_EQ);
+ Arg *iOSSimVersion = Args.getLastArg(
+ options::OPT_mios_simulator_version_min_EQ);
+
+ // FIXME: HACK! When compiling for the simulator we don't get a
+ // '-miphoneos-version-min' to help us know whether there is an ARC runtime
+ // or not; try to parse a __IPHONE_OS_VERSION_MIN_REQUIRED
+ // define passed in command-line.
+ if (!iOSVersion && !iOSSimVersion) {
+ for (arg_iterator it = Args.filtered_begin(options::OPT_D),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ StringRef define = (*it)->getValue(Args);
+ if (define.startswith(SimulatorVersionDefineName())) {
+ unsigned Major = 0, Minor = 0, Micro = 0;
+ if (GetVersionFromSimulatorDefine(define, Major, Minor, Micro) &&
+ Major < 10 && Minor < 100 && Micro < 100) {
+ ARCRuntimeForSimulator = Major < 5 ? ARCSimulator_NoARCRuntime
+ : ARCSimulator_HasARCRuntime;
+ LibCXXForSimulator = Major < 5 ? LibCXXSimulator_NotAvailable
+ : LibCXXSimulator_Available;
+ }
+ break;
+ }
+ }
+ }
+
+ if (OSXVersion && (iOSVersion || iOSSimVersion)) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << OSXVersion->getAsString(Args)
+ << (iOSVersion ? iOSVersion : iOSSimVersion)->getAsString(Args);
+ iOSVersion = iOSSimVersion = 0;
+ } else if (iOSVersion && iOSSimVersion) {
+ getDriver().Diag(diag::err_drv_argument_not_allowed_with)
+ << iOSVersion->getAsString(Args)
+ << iOSSimVersion->getAsString(Args);
+ iOSSimVersion = 0;
+ } else if (!OSXVersion && !iOSVersion && !iOSSimVersion) {
+ // If no deployment target was specified on the command line, check for
+ // environment defines.
+ StringRef OSXTarget;
+ StringRef iOSTarget;
+ StringRef iOSSimTarget;
+ if (char *env = ::getenv("MACOSX_DEPLOYMENT_TARGET"))
+ OSXTarget = env;
+ if (char *env = ::getenv("IPHONEOS_DEPLOYMENT_TARGET"))
+ iOSTarget = env;
+ if (char *env = ::getenv("IOS_SIMULATOR_DEPLOYMENT_TARGET"))
+ iOSSimTarget = env;
+
+ // If no '-miphoneos-version-min' specified on the command line and
+ // IPHONEOS_DEPLOYMENT_TARGET is not defined, see if we can set the default
+ // based on isysroot.
+ if (iOSTarget.empty()) {
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ StringRef first, second;
+ StringRef isysroot = A->getValue(Args);
+ llvm::tie(first, second) = isysroot.split(StringRef("SDKs/iPhoneOS"));
+ if (second != "")
+ iOSTarget = second.substr(0,3);
+ }
+ }
+
+ // If no OSX or iOS target has been specified and we're compiling for armv7,
+ // go ahead as assume we're targeting iOS.
+ if (OSXTarget.empty() && iOSTarget.empty())
+ if (getDarwinArchName(Args) == "armv7")
+ iOSTarget = "0.0";
+
+ // Handle conflicting deployment targets
+ //
+ // FIXME: Don't hardcode default here.
+
+ // Do not allow conflicts with the iOS simulator target.
+ if (!iOSSimTarget.empty() && (!OSXTarget.empty() || !iOSTarget.empty())) {
+ getDriver().Diag(diag::err_drv_conflicting_deployment_targets)
+ << "IOS_SIMULATOR_DEPLOYMENT_TARGET"
+ << (!OSXTarget.empty() ? "MACOSX_DEPLOYMENT_TARGET" :
+ "IPHONEOS_DEPLOYMENT_TARGET");
+ }
+
+ // Allow conflicts among OSX and iOS for historical reasons, but choose the
+ // default platform.
+ if (!OSXTarget.empty() && !iOSTarget.empty()) {
+ if (getTriple().getArch() == llvm::Triple::arm ||
+ getTriple().getArch() == llvm::Triple::thumb)
+ OSXTarget = "";
+ else
+ iOSTarget = "";
+ }
+
+ if (!OSXTarget.empty()) {
+ const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
+ OSXVersion = Args.MakeJoinedArg(0, O, OSXTarget);
+ Args.append(OSXVersion);
+ } else if (!iOSTarget.empty()) {
+ const Option *O = Opts.getOption(options::OPT_miphoneos_version_min_EQ);
+ iOSVersion = Args.MakeJoinedArg(0, O, iOSTarget);
+ Args.append(iOSVersion);
+ } else if (!iOSSimTarget.empty()) {
+ const Option *O = Opts.getOption(
+ options::OPT_mios_simulator_version_min_EQ);
+ iOSSimVersion = Args.MakeJoinedArg(0, O, iOSSimTarget);
+ Args.append(iOSSimVersion);
+ } else {
+ // Otherwise, assume we are targeting OS X.
+ const Option *O = Opts.getOption(options::OPT_mmacosx_version_min_EQ);
+ OSXVersion = Args.MakeJoinedArg(0, O, MacosxVersionMin);
+ Args.append(OSXVersion);
+ }
+ }
+
+ // Reject invalid architecture combinations.
+ if (iOSSimVersion && (getTriple().getArch() != llvm::Triple::x86 &&
+ getTriple().getArch() != llvm::Triple::x86_64)) {
+ getDriver().Diag(diag::err_drv_invalid_arch_for_deployment_target)
+ << getTriple().getArchName() << iOSSimVersion->getAsString(Args);
+ }
+
+ // Set the tool chain target information.
+ unsigned Major, Minor, Micro;
+ bool HadExtra;
+ if (OSXVersion) {
+ assert((!iOSVersion && !iOSSimVersion) && "Unknown target platform!");
+ if (!Driver::GetReleaseVersion(OSXVersion->getValue(Args), Major, Minor,
+ Micro, HadExtra) || HadExtra ||
+ Major != 10 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << OSXVersion->getAsString(Args);
+ } else {
+ const Arg *Version = iOSVersion ? iOSVersion : iOSSimVersion;
+ assert(Version && "Unknown target platform!");
+ if (!Driver::GetReleaseVersion(Version->getValue(Args), Major, Minor,
+ Micro, HadExtra) || HadExtra ||
+ Major >= 10 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << Version->getAsString(Args);
+ }
+
+ bool IsIOSSim = bool(iOSSimVersion);
+
+ // In GCC, the simulator historically was treated as being OS X in some
+ // contexts, like determining the link logic, despite generally being called
+ // with an iOS deployment target. For compatibility, we detect the
+ // simulator as iOS + x86, and treat it differently in a few contexts.
+ if (iOSVersion && (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64))
+ IsIOSSim = true;
+
+ setTarget(/*IsIPhoneOS=*/ !OSXVersion, Major, Minor, Micro, IsIOSSim);
+}
+
+void DarwinClang::AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CXXStdlibType Type = GetCXXStdlibType(Args);
+
+ switch (Type) {
+ case ToolChain::CST_Libcxx:
+ CmdArgs.push_back("-lc++");
+ break;
+
+ case ToolChain::CST_Libstdcxx: {
+ // Unfortunately, -lstdc++ doesn't always exist in the standard search path;
+ // it was previously found in the gcc lib dir. However, for all the Darwin
+ // platforms we care about it was -lstdc++.6, so we search for that
+ // explicitly if we can't see an obvious -lstdc++ candidate.
+
+ // Check in the sysroot first.
+ bool Exists;
+ if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ llvm::sys::Path P(A->getValue(Args));
+ P.appendComponent("usr");
+ P.appendComponent("lib");
+ P.appendComponent("libstdc++.dylib");
+
+ if (llvm::sys::fs::exists(P.str(), Exists) || !Exists) {
+ P.eraseComponent();
+ P.appendComponent("libstdc++.6.dylib");
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists) {
+ CmdArgs.push_back(Args.MakeArgString(P.str()));
+ return;
+ }
+ }
+ }
+
+ // Otherwise, look in the root.
+ // FIXME: This should be removed someday when we don't have to care about
+ // 10.6 and earlier, where /usr/lib/libstdc++.dylib does not exist.
+ if ((llvm::sys::fs::exists("/usr/lib/libstdc++.dylib", Exists) || !Exists)&&
+ (!llvm::sys::fs::exists("/usr/lib/libstdc++.6.dylib", Exists) && Exists)){
+ CmdArgs.push_back("/usr/lib/libstdc++.6.dylib");
+ return;
+ }
+
+ // Otherwise, let the linker search.
+ CmdArgs.push_back("-lstdc++");
+ break;
+ }
+ }
+}
+
+void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+
+ // For Darwin platforms, use the compiler-rt-based support library
+ // instead of the gcc-provided one (which is also incidentally
+ // only present in the gcc lib dir, which makes it hard to find).
+
+ llvm::sys::Path P(getDriver().ResourceDir);
+ P.appendComponent("lib");
+ P.appendComponent("darwin");
+ P.appendComponent("libclang_rt.cc_kext.a");
+
+ // For now, allow missing resource libraries to support developers who may
+ // not have compiler-rt checked out or integrated into their build.
+ bool Exists;
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ CmdArgs.push_back(Args.MakeArgString(P.str()));
+}
+
+DerivedArgList *Darwin::TranslateArgs(const DerivedArgList &Args,
+ const char *BoundArch) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+ const OptTable &Opts = getDriver().getOpts();
+
+ // FIXME: We really want to get out of the tool chain level argument
+ // translation business, as it makes the driver functionality much
+ // more opaque. For now, we follow gcc closely solely for the
+ // purpose of easily achieving feature parity & testability. Once we
+ // have something that works, we should reevaluate each translation
+ // and try to push it down into tool specific logic.
+
+ for (ArgList::const_iterator it = Args.begin(),
+ ie = Args.end(); it != ie; ++it) {
+ Arg *A = *it;
+
+ if (A->getOption().matches(options::OPT_Xarch__)) {
+ // Skip this argument unless the architecture matches either the toolchain
+ // triple arch, or the arch being bound.
+ //
+ // FIXME: Canonicalize name.
+ StringRef XarchArch = A->getValue(Args, 0);
+ if (!(XarchArch == getArchName() ||
+ (BoundArch && XarchArch == BoundArch)))
+ continue;
+
+ Arg *OriginalArg = A;
+ unsigned Index = Args.getBaseArgs().MakeIndex(A->getValue(Args, 1));
+ unsigned Prev = Index;
+ Arg *XarchArg = Opts.ParseOneArg(Args, Index);
+
+ // If the argument parsing failed or more than one argument was
+ // consumed, the -Xarch_ argument's parameter tried to consume
+ // extra arguments. Emit an error and ignore.
+ //
+ // We also want to disallow any options which would alter the
+ // driver behavior; that isn't going to work in our model. We
+ // use isDriverOption() as an approximation, although things
+ // like -O4 are going to slip through.
+ if (!XarchArg || Index > Prev + 1) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_with_args)
+ << A->getAsString(Args);
+ continue;
+ } else if (XarchArg->getOption().isDriverOption()) {
+ getDriver().Diag(diag::err_drv_invalid_Xarch_argument_isdriver)
+ << A->getAsString(Args);
+ continue;
+ }
+
+ XarchArg->setBaseArg(A);
+ A = XarchArg;
+
+ DAL->AddSynthesizedArg(A);
+
+ // Linker input arguments require custom handling. The problem is that we
+ // have already constructed the phase actions, so we can not treat them as
+ // "input arguments".
+ if (A->getOption().isLinkerInput()) {
+ // Convert the argument into individual Zlinker_input_args.
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) {
+ DAL->AddSeparateArg(OriginalArg,
+ Opts.getOption(options::OPT_Zlinker_input),
+ A->getValue(Args, i));
+
+ }
+ continue;
+ }
+ }
+
+ // Sob. These is strictly gcc compatible for the time being. Apple
+ // gcc translates options twice, which means that self-expanding
+ // options add duplicates.
+ switch ((options::ID) A->getOption().getID()) {
+ default:
+ DAL->append(A);
+ break;
+
+ case options::OPT_mkernel:
+ case options::OPT_fapple_kext:
+ DAL->append(A);
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_static));
+ break;
+
+ case options::OPT_dependency_file:
+ DAL->AddSeparateArg(A, Opts.getOption(options::OPT_MF),
+ A->getValue(Args));
+ break;
+
+ case options::OPT_gfull:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_g_Flag));
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_fno_eliminate_unused_debug_symbols));
+ break;
+
+ case options::OPT_gused:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_g_Flag));
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_feliminate_unused_debug_symbols));
+ break;
+
+ case options::OPT_shared:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_dynamiclib));
+ break;
+
+ case options::OPT_fconstant_cfstrings:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mconstant_cfstrings));
+ break;
+
+ case options::OPT_fno_constant_cfstrings:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_constant_cfstrings));
+ break;
+
+ case options::OPT_Wnonportable_cfstrings:
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_mwarn_nonportable_cfstrings));
+ break;
+
+ case options::OPT_Wno_nonportable_cfstrings:
+ DAL->AddFlagArg(A,
+ Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings));
+ break;
+
+ case options::OPT_fpascal_strings:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mpascal_strings));
+ break;
+
+ case options::OPT_fno_pascal_strings:
+ DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_pascal_strings));
+ break;
+ }
+ }
+
+ if (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64)
+ if (!Args.hasArgNoClaim(options::OPT_mtune_EQ))
+ DAL->AddJoinedArg(0, Opts.getOption(options::OPT_mtune_EQ), "core2");
+
+ // Add the arch options based on the particular spelling of -arch, to match
+ // how the driver driver works.
+ if (BoundArch) {
+ StringRef Name = BoundArch;
+ const Option *MCpu = Opts.getOption(options::OPT_mcpu_EQ);
+ const Option *MArch = Opts.getOption(options::OPT_march_EQ);
+
+ // This code must be kept in sync with LLVM's getArchTypeForDarwinArch,
+ // which defines the list of which architectures we accept.
+ if (Name == "ppc")
+ ;
+ else if (Name == "ppc601")
+ DAL->AddJoinedArg(0, MCpu, "601");
+ else if (Name == "ppc603")
+ DAL->AddJoinedArg(0, MCpu, "603");
+ else if (Name == "ppc604")
+ DAL->AddJoinedArg(0, MCpu, "604");
+ else if (Name == "ppc604e")
+ DAL->AddJoinedArg(0, MCpu, "604e");
+ else if (Name == "ppc750")
+ DAL->AddJoinedArg(0, MCpu, "750");
+ else if (Name == "ppc7400")
+ DAL->AddJoinedArg(0, MCpu, "7400");
+ else if (Name == "ppc7450")
+ DAL->AddJoinedArg(0, MCpu, "7450");
+ else if (Name == "ppc970")
+ DAL->AddJoinedArg(0, MCpu, "970");
+
+ else if (Name == "ppc64")
+ DAL->AddFlagArg(0, Opts.getOption(options::OPT_m64));
+
+ else if (Name == "i386")
+ ;
+ else if (Name == "i486")
+ DAL->AddJoinedArg(0, MArch, "i486");
+ else if (Name == "i586")
+ DAL->AddJoinedArg(0, MArch, "i586");
+ else if (Name == "i686")
+ DAL->AddJoinedArg(0, MArch, "i686");
+ else if (Name == "pentium")
+ DAL->AddJoinedArg(0, MArch, "pentium");
+ else if (Name == "pentium2")
+ DAL->AddJoinedArg(0, MArch, "pentium2");
+ else if (Name == "pentpro")
+ DAL->AddJoinedArg(0, MArch, "pentiumpro");
+ else if (Name == "pentIIm3")
+ DAL->AddJoinedArg(0, MArch, "pentium2");
+
+ else if (Name == "x86_64")
+ DAL->AddFlagArg(0, Opts.getOption(options::OPT_m64));
+
+ else if (Name == "arm")
+ DAL->AddJoinedArg(0, MArch, "armv4t");
+ else if (Name == "armv4t")
+ DAL->AddJoinedArg(0, MArch, "armv4t");
+ else if (Name == "armv5")
+ DAL->AddJoinedArg(0, MArch, "armv5tej");
+ else if (Name == "xscale")
+ DAL->AddJoinedArg(0, MArch, "xscale");
+ else if (Name == "armv6")
+ DAL->AddJoinedArg(0, MArch, "armv6k");
+ else if (Name == "armv7")
+ DAL->AddJoinedArg(0, MArch, "armv7a");
+
+ else
+ llvm_unreachable("invalid Darwin arch");
+ }
+
+ // Add an explicit version min argument for the deployment target. We do this
+ // after argument translation because -Xarch_ arguments may add a version min
+ // argument.
+ AddDeploymentTarget(*DAL);
+
+ // Validate the C++ standard library choice.
+ CXXStdlibType Type = GetCXXStdlibType(*DAL);
+ if (Type == ToolChain::CST_Libcxx) {
+ switch (LibCXXForSimulator) {
+ case LibCXXSimulator_None:
+ // Handle non-simulator cases.
+ if (isTargetIPhoneOS()) {
+ if (isIPhoneOSVersionLT(5, 0)) {
+ getDriver().Diag(clang::diag::err_drv_invalid_libcxx_deployment)
+ << "iOS 5.0";
+ }
+ }
+ break;
+ case LibCXXSimulator_NotAvailable:
+ getDriver().Diag(clang::diag::err_drv_invalid_libcxx_deployment)
+ << "iOS 5.0";
+ break;
+ case LibCXXSimulator_Available:
+ break;
+ }
+ }
+
+ return DAL;
+}
+
+bool Darwin::IsUnwindTablesDefault() const {
+ // FIXME: Gross; we should probably have some separate target
+ // definition, possibly even reusing the one in clang.
+ return getArchName() == "x86_64";
+}
+
+bool Darwin::UseDwarfDebugFlags() const {
+ if (const char *S = ::getenv("RC_DEBUG_OPTIONS"))
+ return S[0] != '\0';
+ return false;
+}
+
+bool Darwin::UseSjLjExceptions() const {
+ // Darwin uses SjLj exceptions on ARM.
+ return (getTriple().getArch() == llvm::Triple::arm ||
+ getTriple().getArch() == llvm::Triple::thumb);
+}
+
+const char *Darwin::GetDefaultRelocationModel() const {
+ return "pic";
+}
+
+const char *Darwin::GetForcedPicModel() const {
+ if (getArchName() == "x86_64")
+ return "pic";
+ return 0;
+}
+
+bool Darwin::SupportsProfiling() const {
+ // Profiling instrumentation is only supported on x86.
+ return getArchName() == "i386" || getArchName() == "x86_64";
+}
+
+bool Darwin::SupportsObjCGC() const {
+ // Garbage collection is supported everywhere except on iPhone OS.
+ return !isTargetIPhoneOS();
+}
+
+bool Darwin::SupportsObjCARC() const {
+ return isTargetIPhoneOS() || !isMacosxVersionLT(10, 6);
+}
+
+std::string
+Darwin_Generic_GCC::ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const {
+ return ComputeLLVMTriple(Args, InputType);
+}
+
+/// Generic_GCC - A tool chain using the 'gcc' command to perform
+/// all subcommands; this relies on gcc translating the majority of
+/// command line options.
+
+/// \brief Parse a GCCVersion object out of a string of text.
+///
+/// This is the primary means of forming GCCVersion objects.
+/*static*/
+Generic_GCC::GCCVersion Linux::GCCVersion::Parse(StringRef VersionText) {
+ const GCCVersion BadVersion = { VersionText.str(), -1, -1, -1, "" };
+ std::pair<StringRef, StringRef> First = VersionText.split('.');
+ std::pair<StringRef, StringRef> Second = First.second.split('.');
+
+ GCCVersion GoodVersion = { VersionText.str(), -1, -1, -1, "" };
+ if (First.first.getAsInteger(10, GoodVersion.Major) ||
+ GoodVersion.Major < 0)
+ return BadVersion;
+ if (Second.first.getAsInteger(10, GoodVersion.Minor) ||
+ GoodVersion.Minor < 0)
+ return BadVersion;
+
+ // First look for a number prefix and parse that if present. Otherwise just
+ // stash the entire patch string in the suffix, and leave the number
+ // unspecified. This covers versions strings such as:
+ // 4.4
+ // 4.4.0
+ // 4.4.x
+ // 4.4.2-rc4
+ // 4.4.x-patched
+ // And retains any patch number it finds.
+ StringRef PatchText = GoodVersion.PatchSuffix = Second.second.str();
+ if (!PatchText.empty()) {
+ if (unsigned EndNumber = PatchText.find_first_not_of("0123456789")) {
+ // Try to parse the number and any suffix.
+ if (PatchText.slice(0, EndNumber).getAsInteger(10, GoodVersion.Patch) ||
+ GoodVersion.Patch < 0)
+ return BadVersion;
+ GoodVersion.PatchSuffix = PatchText.substr(EndNumber).str();
+ }
+ }
+
+ return GoodVersion;
+}
+
+/// \brief Less-than for GCCVersion, implementing a Strict Weak Ordering.
+bool Generic_GCC::GCCVersion::operator<(const GCCVersion &RHS) const {
+ if (Major < RHS.Major) return true; if (Major > RHS.Major) return false;
+ if (Minor < RHS.Minor) return true; if (Minor > RHS.Minor) return false;
+
+ // Note that we rank versions with *no* patch specified is better than ones
+ // hard-coding a patch version. Thus if the RHS has no patch, it always
+ // wins, and the LHS only wins if it has no patch and the RHS does have
+ // a patch.
+ if (RHS.Patch == -1) return true; if (Patch == -1) return false;
+ if (Patch < RHS.Patch) return true; if (Patch > RHS.Patch) return false;
+
+ // Finally, between completely tied version numbers, the version with the
+ // suffix loses as we prefer full releases.
+ if (RHS.PatchSuffix.empty()) return true;
+ return false;
+}
+
+static StringRef getGCCToolchainDir(const ArgList &Args) {
+ const Arg *A = Args.getLastArg(options::OPT_gcc_toolchain);
+ if (A)
+ return A->getValue(Args);
+ return GCC_INSTALL_PREFIX;
+}
+
+/// \brief Construct a GCCInstallationDetector from the driver.
+///
+/// This performs all of the autodetection and sets up the various paths.
+/// Once constructed, a GCCInstallation is esentially immutable.
+///
+/// FIXME: We shouldn't need an explicit TargetTriple parameter here, and
+/// should instead pull the target out of the driver. This is currently
+/// necessary because the driver doesn't store the final version of the target
+/// triple.
+Generic_GCC::GCCInstallationDetector::GCCInstallationDetector(
+ const Driver &D,
+ const llvm::Triple &TargetTriple,
+ const ArgList &Args)
+ : IsValid(false) {
+ llvm::Triple MultiarchTriple
+ = TargetTriple.isArch32Bit() ? TargetTriple.get64BitArchVariant()
+ : TargetTriple.get32BitArchVariant();
+ llvm::Triple::ArchType TargetArch = TargetTriple.getArch();
+ // The library directories which may contain GCC installations.
+ SmallVector<StringRef, 4> CandidateLibDirs, CandidateMultiarchLibDirs;
+ // The compatible GCC triples for this particular architecture.
+ SmallVector<StringRef, 10> CandidateTripleAliases;
+ SmallVector<StringRef, 10> CandidateMultiarchTripleAliases;
+ CollectLibDirsAndTriples(TargetTriple, MultiarchTriple, CandidateLibDirs,
+ CandidateTripleAliases,
+ CandidateMultiarchLibDirs,
+ CandidateMultiarchTripleAliases);
+
+ // Compute the set of prefixes for our search.
+ SmallVector<std::string, 8> Prefixes(D.PrefixDirs.begin(),
+ D.PrefixDirs.end());
+
+ StringRef GCCToolchainDir = getGCCToolchainDir(Args);
+ if (GCCToolchainDir != "") {
+ if (GCCToolchainDir.back() == '/')
+ GCCToolchainDir = GCCToolchainDir.drop_back(); // remove the /
+
+ Prefixes.push_back(GCCToolchainDir);
+ } else {
+ Prefixes.push_back(D.SysRoot);
+ Prefixes.push_back(D.SysRoot + "/usr");
+ Prefixes.push_back(D.InstalledDir + "/..");
+ }
+
+ // Loop over the various components which exist and select the best GCC
+ // installation available. GCC installs are ranked by version number.
+ Version = GCCVersion::Parse("0.0.0");
+ for (unsigned i = 0, ie = Prefixes.size(); i < ie; ++i) {
+ if (!llvm::sys::fs::exists(Prefixes[i]))
+ continue;
+ for (unsigned j = 0, je = CandidateLibDirs.size(); j < je; ++j) {
+ const std::string LibDir = Prefixes[i] + CandidateLibDirs[j].str();
+ if (!llvm::sys::fs::exists(LibDir))
+ continue;
+ for (unsigned k = 0, ke = CandidateTripleAliases.size(); k < ke; ++k)
+ ScanLibDirForGCCTriple(TargetArch, LibDir, CandidateTripleAliases[k]);
+ }
+ for (unsigned j = 0, je = CandidateMultiarchLibDirs.size(); j < je; ++j) {
+ const std::string LibDir
+ = Prefixes[i] + CandidateMultiarchLibDirs[j].str();
+ if (!llvm::sys::fs::exists(LibDir))
+ continue;
+ for (unsigned k = 0, ke = CandidateMultiarchTripleAliases.size(); k < ke;
+ ++k)
+ ScanLibDirForGCCTriple(TargetArch, LibDir,
+ CandidateMultiarchTripleAliases[k],
+ /*NeedsMultiarchSuffix=*/true);
+ }
+ }
+}
+
+/*static*/ void Generic_GCC::GCCInstallationDetector::CollectLibDirsAndTriples(
+ const llvm::Triple &TargetTriple,
+ const llvm::Triple &MultiarchTriple,
+ SmallVectorImpl<StringRef> &LibDirs,
+ SmallVectorImpl<StringRef> &TripleAliases,
+ SmallVectorImpl<StringRef> &MultiarchLibDirs,
+ SmallVectorImpl<StringRef> &MultiarchTripleAliases) {
+ // Declare a bunch of static data sets that we'll select between below. These
+ // are specifically designed to always refer to string literals to avoid any
+ // lifetime or initialization issues.
+ static const char *const ARMLibDirs[] = { "/lib" };
+ static const char *const ARMTriples[] = {
+ "arm-linux-gnueabi",
+ "arm-linux-androideabi"
+ };
+
+ static const char *const X86_64LibDirs[] = { "/lib64", "/lib" };
+ static const char *const X86_64Triples[] = {
+ "x86_64-linux-gnu",
+ "x86_64-unknown-linux-gnu",
+ "x86_64-pc-linux-gnu",
+ "x86_64-redhat-linux6E",
+ "x86_64-redhat-linux",
+ "x86_64-suse-linux",
+ "x86_64-manbo-linux-gnu",
+ "x86_64-linux-gnu",
+ "x86_64-slackware-linux"
+ };
+ static const char *const X86LibDirs[] = { "/lib32", "/lib" };
+ static const char *const X86Triples[] = {
+ "i686-linux-gnu",
+ "i686-pc-linux-gnu",
+ "i486-linux-gnu",
+ "i386-linux-gnu",
+ "i686-redhat-linux",
+ "i586-redhat-linux",
+ "i386-redhat-linux",
+ "i586-suse-linux",
+ "i486-slackware-linux"
+ };
+
+ static const char *const MIPSLibDirs[] = { "/lib" };
+ static const char *const MIPSTriples[] = { "mips-linux-gnu" };
+ static const char *const MIPSELLibDirs[] = { "/lib" };
+ static const char *const MIPSELTriples[] = { "mipsel-linux-gnu" };
+
+ static const char *const PPCLibDirs[] = { "/lib32", "/lib" };
+ static const char *const PPCTriples[] = {
+ "powerpc-linux-gnu",
+ "powerpc-unknown-linux-gnu",
+ "powerpc-suse-linux"
+ };
+ static const char *const PPC64LibDirs[] = { "/lib64", "/lib" };
+ static const char *const PPC64Triples[] = {
+ "powerpc64-linux-gnu",
+ "powerpc64-unknown-linux-gnu",
+ "powerpc64-suse-linux",
+ "ppc64-redhat-linux"
+ };
+
+ switch (TargetTriple.getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ LibDirs.append(ARMLibDirs, ARMLibDirs + llvm::array_lengthof(ARMLibDirs));
+ TripleAliases.append(
+ ARMTriples, ARMTriples + llvm::array_lengthof(ARMTriples));
+ break;
+ case llvm::Triple::x86_64:
+ LibDirs.append(
+ X86_64LibDirs, X86_64LibDirs + llvm::array_lengthof(X86_64LibDirs));
+ TripleAliases.append(
+ X86_64Triples, X86_64Triples + llvm::array_lengthof(X86_64Triples));
+ MultiarchLibDirs.append(
+ X86LibDirs, X86LibDirs + llvm::array_lengthof(X86LibDirs));
+ MultiarchTripleAliases.append(
+ X86Triples, X86Triples + llvm::array_lengthof(X86Triples));
+ break;
+ case llvm::Triple::x86:
+ LibDirs.append(X86LibDirs, X86LibDirs + llvm::array_lengthof(X86LibDirs));
+ TripleAliases.append(
+ X86Triples, X86Triples + llvm::array_lengthof(X86Triples));
+ MultiarchLibDirs.append(
+ X86_64LibDirs, X86_64LibDirs + llvm::array_lengthof(X86_64LibDirs));
+ MultiarchTripleAliases.append(
+ X86_64Triples, X86_64Triples + llvm::array_lengthof(X86_64Triples));
+ break;
+ case llvm::Triple::mips:
+ LibDirs.append(
+ MIPSLibDirs, MIPSLibDirs + llvm::array_lengthof(MIPSLibDirs));
+ TripleAliases.append(
+ MIPSTriples, MIPSTriples + llvm::array_lengthof(MIPSTriples));
+ break;
+ case llvm::Triple::mipsel:
+ LibDirs.append(
+ MIPSELLibDirs, MIPSELLibDirs + llvm::array_lengthof(MIPSELLibDirs));
+ TripleAliases.append(
+ MIPSELTriples, MIPSELTriples + llvm::array_lengthof(MIPSELTriples));
+ break;
+ case llvm::Triple::ppc:
+ LibDirs.append(PPCLibDirs, PPCLibDirs + llvm::array_lengthof(PPCLibDirs));
+ TripleAliases.append(
+ PPCTriples, PPCTriples + llvm::array_lengthof(PPCTriples));
+ MultiarchLibDirs.append(
+ PPC64LibDirs, PPC64LibDirs + llvm::array_lengthof(PPC64LibDirs));
+ MultiarchTripleAliases.append(
+ PPC64Triples, PPC64Triples + llvm::array_lengthof(PPC64Triples));
+ break;
+ case llvm::Triple::ppc64:
+ LibDirs.append(
+ PPC64LibDirs, PPC64LibDirs + llvm::array_lengthof(PPC64LibDirs));
+ TripleAliases.append(
+ PPC64Triples, PPC64Triples + llvm::array_lengthof(PPC64Triples));
+ MultiarchLibDirs.append(
+ PPCLibDirs, PPCLibDirs + llvm::array_lengthof(PPCLibDirs));
+ MultiarchTripleAliases.append(
+ PPCTriples, PPCTriples + llvm::array_lengthof(PPCTriples));
+ break;
+
+ default:
+ // By default, just rely on the standard lib directories and the original
+ // triple.
+ break;
+ }
+
+ // Always append the drivers target triple to the end, in case it doesn't
+ // match any of our aliases.
+ TripleAliases.push_back(TargetTriple.str());
+
+ // Also include the multiarch variant if it's different.
+ if (TargetTriple.str() != MultiarchTriple.str())
+ MultiarchTripleAliases.push_back(MultiarchTriple.str());
+}
+
+void Generic_GCC::GCCInstallationDetector::ScanLibDirForGCCTriple(
+ llvm::Triple::ArchType TargetArch, const std::string &LibDir,
+ StringRef CandidateTriple, bool NeedsMultiarchSuffix) {
+ // There are various different suffixes involving the triple we
+ // check for. We also record what is necessary to walk from each back
+ // up to the lib directory.
+ const std::string LibSuffixes[] = {
+ "/gcc/" + CandidateTriple.str(),
+ "/" + CandidateTriple.str() + "/gcc/" + CandidateTriple.str(),
+
+ // Ubuntu has a strange mis-matched pair of triples that this happens to
+ // match.
+ // FIXME: It may be worthwhile to generalize this and look for a second
+ // triple.
+ "/i386-linux-gnu/gcc/" + CandidateTriple.str()
+ };
+ const std::string InstallSuffixes[] = {
+ "/../../..",
+ "/../../../..",
+ "/../../../.."
+ };
+ // Only look at the final, weird Ubuntu suffix for i386-linux-gnu.
+ const unsigned NumLibSuffixes = (llvm::array_lengthof(LibSuffixes) -
+ (TargetArch != llvm::Triple::x86));
+ for (unsigned i = 0; i < NumLibSuffixes; ++i) {
+ StringRef LibSuffix = LibSuffixes[i];
+ llvm::error_code EC;
+ for (llvm::sys::fs::directory_iterator LI(LibDir + LibSuffix, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ GCCVersion CandidateVersion = GCCVersion::Parse(VersionText);
+ static const GCCVersion MinVersion = { "4.1.1", 4, 1, 1, "" };
+ if (CandidateVersion < MinVersion)
+ continue;
+ if (CandidateVersion <= Version)
+ continue;
+
+ // Some versions of SUSE and Fedora on ppc64 put 32-bit libs
+ // in what would normally be GCCInstallPath and put the 64-bit
+ // libs in a subdirectory named 64. The simple logic we follow is that
+ // *if* there is a subdirectory of the right name with crtbegin.o in it,
+ // we use that. If not, and if not a multiarch triple, we look for
+ // crtbegin.o without the subdirectory.
+ StringRef MultiarchSuffix
+ = (TargetArch == llvm::Triple::x86_64 ||
+ TargetArch == llvm::Triple::ppc64) ? "/64" : "/32";
+ if (llvm::sys::fs::exists(LI->path() + MultiarchSuffix + "/crtbegin.o")) {
+ GCCMultiarchSuffix = MultiarchSuffix.str();
+ } else {
+ if (NeedsMultiarchSuffix ||
+ !llvm::sys::fs::exists(LI->path() + "/crtbegin.o"))
+ continue;
+ GCCMultiarchSuffix.clear();
+ }
+
+ Version = CandidateVersion;
+ GCCTriple.setTriple(CandidateTriple);
+ // FIXME: We hack together the directory name here instead of
+ // using LI to ensure stable path separators across Windows and
+ // Linux.
+ GCCInstallPath = LibDir + LibSuffixes[i] + "/" + VersionText.str();
+ GCCParentLibPath = GCCInstallPath + InstallSuffixes[i];
+ IsValid = true;
+ }
+ }
+}
+
+Generic_GCC::Generic_GCC(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple), GCCInstallation(getDriver(), Triple, Args) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+Generic_GCC::~Generic_GCC() {
+ // Free tool implementations.
+ for (llvm::DenseMap<unsigned, Tool*>::iterator
+ it = Tools.begin(), ie = Tools.end(); it != ie; ++it)
+ delete it->second;
+}
+
+Tool &Generic_GCC::SelectTool(const Compilation &C,
+ const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::InputClass:
+ case Action::BindArchClass:
+ llvm_unreachable("Invalid tool kind.");
+ case Action::PreprocessJobClass:
+ T = new tools::gcc::Preprocess(*this); break;
+ case Action::PrecompileJobClass:
+ T = new tools::gcc::Precompile(*this); break;
+ case Action::AnalyzeJobClass:
+ case Action::MigrateJobClass:
+ T = new tools::Clang(*this); break;
+ case Action::CompileJobClass:
+ T = new tools::gcc::Compile(*this); break;
+ case Action::AssembleJobClass:
+ T = new tools::gcc::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::gcc::Link(*this); break;
+
+ // This is a bit ungeneric, but the only platform using a driver
+ // driver is Darwin.
+ case Action::LipoJobClass:
+ T = new tools::darwin::Lipo(*this); break;
+ case Action::DsymutilJobClass:
+ T = new tools::darwin::Dsymutil(*this); break;
+ case Action::VerifyJobClass:
+ T = new tools::darwin::VerifyDebug(*this); break;
+ }
+ }
+
+ return *T;
+}
+
+bool Generic_GCC::IsUnwindTablesDefault() const {
+ // FIXME: Gross; we should probably have some separate target
+ // definition, possibly even reusing the one in clang.
+ return getArchName() == "x86_64";
+}
+
+const char *Generic_GCC::GetDefaultRelocationModel() const {
+ return "static";
+}
+
+const char *Generic_GCC::GetForcedPicModel() const {
+ return 0;
+}
+/// Hexagon Toolchain
+
+Hexagon_TC::Hexagon_TC(const Driver &D, const llvm::Triple& Triple)
+ : ToolChain(D, Triple) {
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir.c_str())
+ getProgramPaths().push_back(getDriver().Dir);
+}
+
+Hexagon_TC::~Hexagon_TC() {
+ // Free tool implementations.
+ for (llvm::DenseMap<unsigned, Tool*>::iterator
+ it = Tools.begin(), ie = Tools.end(); it != ie; ++it)
+ delete it->second;
+}
+
+Tool &Hexagon_TC::SelectTool(const Compilation &C,
+ const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ // if (JA.getKind () == Action::CompileJobClass)
+ // Key = JA.getKind ();
+ // else
+
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+ // if ((JA.getKind () == Action::CompileJobClass)
+ // && (JA.getType () != types::TY_LTO_BC)) {
+ // Key = JA.getKind ();
+ // }
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::InputClass:
+ case Action::BindArchClass:
+ assert(0 && "Invalid tool kind.");
+ case Action::AnalyzeJobClass:
+ T = new tools::Clang(*this); break;
+ case Action::AssembleJobClass:
+ T = new tools::hexagon::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::hexagon::Link(*this); break;
+ default:
+ assert(false && "Unsupported action for Hexagon target.");
+ }
+ }
+
+ return *T;
+}
+
+bool Hexagon_TC::IsUnwindTablesDefault() const {
+ // FIXME: Gross; we should probably have some separate target
+ // definition, possibly even reusing the one in clang.
+ return getArchName() == "x86_64";
+}
+
+const char *Hexagon_TC::GetDefaultRelocationModel() const {
+ return "static";
+}
+
+const char *Hexagon_TC::GetForcedPicModel() const {
+ return 0;
+} // End Hexagon
+
+
+/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
+/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
+/// Currently does not support anything else but compilation.
+
+TCEToolChain::TCEToolChain(const Driver &D, const llvm::Triple& Triple)
+ : ToolChain(D, Triple) {
+ // Path mangling to find libexec
+ std::string Path(getDriver().Dir);
+
+ Path += "/../libexec";
+ getProgramPaths().push_back(Path);
+}
+
+TCEToolChain::~TCEToolChain() {
+ for (llvm::DenseMap<unsigned, Tool*>::iterator
+ it = Tools.begin(), ie = Tools.end(); it != ie; ++it)
+ delete it->second;
+}
+
+bool TCEToolChain::IsMathErrnoDefault() const {
+ return true;
+}
+
+bool TCEToolChain::IsUnwindTablesDefault() const {
+ return false;
+}
+
+const char *TCEToolChain::GetDefaultRelocationModel() const {
+ return "static";
+}
+
+const char *TCEToolChain::GetForcedPicModel() const {
+ return 0;
+}
+
+Tool &TCEToolChain::SelectTool(const Compilation &C,
+ const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ Key = Action::AnalyzeJobClass;
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::PreprocessJobClass:
+ T = new tools::gcc::Preprocess(*this); break;
+ case Action::AnalyzeJobClass:
+ T = new tools::Clang(*this); break;
+ default:
+ llvm_unreachable("Unsupported action for TCE target.");
+ }
+ }
+ return *T;
+}
+
+/// OpenBSD - OpenBSD tool chain which can call as(1) and ld(1) directly.
+
+OpenBSD::OpenBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+}
+
+Tool &OpenBSD::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass: {
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::openbsd::Assemble(*this);
+ break;
+ }
+ case Action::LinkJobClass:
+ T = new tools::openbsd::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
+
+/// FreeBSD - FreeBSD tool chain which can call as(1) and ld(1) directly.
+
+FreeBSD::FreeBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ // When targeting 32-bit platforms, look for '/usr/lib32/crt1.o' and fall
+ // back to '/usr/lib' if it doesn't exist.
+ if ((Triple.getArch() == llvm::Triple::x86 ||
+ Triple.getArch() == llvm::Triple::ppc) &&
+ llvm::sys::fs::exists(getDriver().SysRoot + CLANG_PREFIX "/usr/lib32/crt1.o"))
+ getFilePaths().push_back(getDriver().SysRoot + CLANG_PREFIX "/usr/lib32");
+ else
+ getFilePaths().push_back(getDriver().SysRoot + CLANG_PREFIX "/usr/lib");
+}
+
+Tool &FreeBSD::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::freebsd::Assemble(*this);
+ break;
+ case Action::LinkJobClass:
+ T = new tools::freebsd::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
+
+/// NetBSD - NetBSD tool chain which can call as(1) and ld(1) directly.
+
+NetBSD::NetBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ if (getDriver().UseStdLib) {
+ // When targeting a 32-bit platform, try the special directory used on
+ // 64-bit hosts, and only fall back to the main library directory if that
+ // doesn't work.
+ // FIXME: It'd be nicer to test if this directory exists, but I'm not sure
+ // what all logic is needed to emulate the '=' prefix here.
+ if (Triple.getArch() == llvm::Triple::x86)
+ getFilePaths().push_back("=/usr/lib/i386");
+
+ getFilePaths().push_back("=/usr/lib");
+ }
+}
+
+Tool &NetBSD::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::netbsd::Assemble(*this);
+ break;
+ case Action::LinkJobClass:
+ T = new tools::netbsd::Link(*this);
+ break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
+
+/// Minix - Minix tool chain which can call as(1) and ld(1) directly.
+
+Minix::Minix(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+}
+
+Tool &Minix::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ T = new tools::minix::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::minix::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
+
+/// AuroraUX - AuroraUX tool chain which can call as(1) and ld(1) directly.
+
+AuroraUX::AuroraUX(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {
+
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+ getFilePaths().push_back("/usr/sfw/lib");
+ getFilePaths().push_back("/opt/gcc4/lib");
+ getFilePaths().push_back("/opt/gcc4/lib/gcc/i386-pc-solaris2.11/4.2.4");
+
+}
+
+Tool &AuroraUX::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ T = new tools::auroraux::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::auroraux::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
+
+/// Solaris - Solaris tool chain which can call as(1) and ld(1) directly.
+
+Solaris::Solaris(const Driver &D, const llvm::Triple& Triple,
+ const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {
+
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+}
+
+Tool &Solaris::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ T = new tools::solaris::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::solaris::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
+
+/// Linux toolchain (very bare-bones at the moment).
+
+enum LinuxDistro {
+ ArchLinux,
+ DebianLenny,
+ DebianSqueeze,
+ DebianWheezy,
+ Exherbo,
+ RHEL4,
+ RHEL5,
+ RHEL6,
+ Fedora13,
+ Fedora14,
+ Fedora15,
+ Fedora16,
+ FedoraRawhide,
+ OpenSuse11_3,
+ OpenSuse11_4,
+ OpenSuse12_1,
+ UbuntuHardy,
+ UbuntuIntrepid,
+ UbuntuJaunty,
+ UbuntuKarmic,
+ UbuntuLucid,
+ UbuntuMaverick,
+ UbuntuNatty,
+ UbuntuOneiric,
+ UbuntuPrecise,
+ UnknownDistro
+};
+
+static bool IsRedhat(enum LinuxDistro Distro) {
+ return (Distro >= Fedora13 && Distro <= FedoraRawhide) ||
+ (Distro >= RHEL4 && Distro <= RHEL6);
+}
+
+static bool IsOpenSuse(enum LinuxDistro Distro) {
+ return Distro >= OpenSuse11_3 && Distro <= OpenSuse12_1;
+}
+
+static bool IsDebian(enum LinuxDistro Distro) {
+ return Distro >= DebianLenny && Distro <= DebianWheezy;
+}
+
+static bool IsUbuntu(enum LinuxDistro Distro) {
+ return Distro >= UbuntuHardy && Distro <= UbuntuPrecise;
+}
+
+static LinuxDistro DetectLinuxDistro(llvm::Triple::ArchType Arch) {
+ OwningPtr<llvm::MemoryBuffer> File;
+ if (!llvm::MemoryBuffer::getFile("/etc/lsb-release", File)) {
+ StringRef Data = File.get()->getBuffer();
+ SmallVector<StringRef, 8> Lines;
+ Data.split(Lines, "\n");
+ LinuxDistro Version = UnknownDistro;
+ for (unsigned i = 0, s = Lines.size(); i != s; ++i)
+ if (Version == UnknownDistro && Lines[i].startswith("DISTRIB_CODENAME="))
+ Version = llvm::StringSwitch<LinuxDistro>(Lines[i].substr(17))
+ .Case("hardy", UbuntuHardy)
+ .Case("intrepid", UbuntuIntrepid)
+ .Case("jaunty", UbuntuJaunty)
+ .Case("karmic", UbuntuKarmic)
+ .Case("lucid", UbuntuLucid)
+ .Case("maverick", UbuntuMaverick)
+ .Case("natty", UbuntuNatty)
+ .Case("oneiric", UbuntuOneiric)
+ .Case("precise", UbuntuPrecise)
+ .Default(UnknownDistro);
+ return Version;
+ }
+
+ if (!llvm::MemoryBuffer::getFile("/etc/redhat-release", File)) {
+ StringRef Data = File.get()->getBuffer();
+ if (Data.startswith("Fedora release 16"))
+ return Fedora16;
+ else if (Data.startswith("Fedora release 15"))
+ return Fedora15;
+ else if (Data.startswith("Fedora release 14"))
+ return Fedora14;
+ else if (Data.startswith("Fedora release 13"))
+ return Fedora13;
+ else if (Data.startswith("Fedora release") &&
+ Data.find("Rawhide") != StringRef::npos)
+ return FedoraRawhide;
+ else if (Data.startswith("Red Hat Enterprise Linux") &&
+ Data.find("release 6") != StringRef::npos)
+ return RHEL6;
+ else if ((Data.startswith("Red Hat Enterprise Linux") ||
+ Data.startswith("CentOS")) &&
+ Data.find("release 5") != StringRef::npos)
+ return RHEL5;
+ else if ((Data.startswith("Red Hat Enterprise Linux") ||
+ Data.startswith("CentOS")) &&
+ Data.find("release 4") != StringRef::npos)
+ return RHEL4;
+ return UnknownDistro;
+ }
+
+ if (!llvm::MemoryBuffer::getFile("/etc/debian_version", File)) {
+ StringRef Data = File.get()->getBuffer();
+ if (Data[0] == '5')
+ return DebianLenny;
+ else if (Data.startswith("squeeze/sid") || Data[0] == '6')
+ return DebianSqueeze;
+ else if (Data.startswith("wheezy/sid") || Data[0] == '7')
+ return DebianWheezy;
+ return UnknownDistro;
+ }
+
+ if (!llvm::MemoryBuffer::getFile("/etc/SuSE-release", File))
+ return llvm::StringSwitch<LinuxDistro>(File.get()->getBuffer())
+ .StartsWith("openSUSE 11.3", OpenSuse11_3)
+ .StartsWith("openSUSE 11.4", OpenSuse11_4)
+ .StartsWith("openSUSE 12.1", OpenSuse12_1)
+ .Default(UnknownDistro);
+
+ bool Exists;
+ if (!llvm::sys::fs::exists("/etc/exherbo-release", Exists) && Exists)
+ return Exherbo;
+
+ if (!llvm::sys::fs::exists("/etc/arch-release", Exists) && Exists)
+ return ArchLinux;
+
+ return UnknownDistro;
+}
+
+/// \brief Get our best guess at the multiarch triple for a target.
+///
+/// Debian-based systems are starting to use a multiarch setup where they use
+/// a target-triple directory in the library and header search paths.
+/// Unfortunately, this triple does not align with the vanilla target triple,
+/// so we provide a rough mapping here.
+static std::string getMultiarchTriple(const llvm::Triple TargetTriple,
+ StringRef SysRoot) {
+ // For most architectures, just use whatever we have rather than trying to be
+ // clever.
+ switch (TargetTriple.getArch()) {
+ default:
+ return TargetTriple.str();
+
+ // We use the existence of '/lib/<triple>' as a directory to detect some
+ // common linux triples that don't quite match the Clang triple for both
+ // 32-bit and 64-bit targets. Multiarch fixes its install triples to these
+ // regardless of what the actual target triple is.
+ case llvm::Triple::x86:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/i386-linux-gnu"))
+ return "i386-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::x86_64:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/x86_64-linux-gnu"))
+ return "x86_64-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::mips:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/mips-linux-gnu"))
+ return "mips-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::mipsel:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/mipsel-linux-gnu"))
+ return "mipsel-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::ppc:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/powerpc-linux-gnu"))
+ return "powerpc-linux-gnu";
+ return TargetTriple.str();
+ case llvm::Triple::ppc64:
+ if (llvm::sys::fs::exists(SysRoot + "/lib/powerpc64-linux-gnu"))
+ return "powerpc64-linux-gnu";
+ return TargetTriple.str();
+ }
+}
+
+static void addPathIfExists(Twine Path, ToolChain::path_list &Paths) {
+ if (llvm::sys::fs::exists(Path)) Paths.push_back(Path.str());
+}
+
+Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ llvm::Triple::ArchType Arch = Triple.getArch();
+ const std::string &SysRoot = getDriver().SysRoot;
+
+ // OpenSuse stores the linker with the compiler, add that to the search
+ // path.
+ ToolChain::path_list &PPaths = getProgramPaths();
+ PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
+ GCCInstallation.getTriple().str() + "/bin").str());
+
+ Linker = GetProgramPath("ld");
+
+ LinuxDistro Distro = DetectLinuxDistro(Arch);
+
+ if (IsOpenSuse(Distro) || IsUbuntu(Distro)) {
+ ExtraOpts.push_back("-z");
+ ExtraOpts.push_back("relro");
+ }
+
+ if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)
+ ExtraOpts.push_back("-X");
+
+ const bool IsMips = Arch == llvm::Triple::mips ||
+ Arch == llvm::Triple::mipsel ||
+ Arch == llvm::Triple::mips64 ||
+ Arch == llvm::Triple::mips64el;
+
+ const bool IsAndroid = Triple.getEnvironment() == llvm::Triple::ANDROIDEABI;
+
+ // Do not use 'gnu' hash style for Mips targets because .gnu.hash
+ // and the MIPS ABI require .dynsym to be sorted in different ways.
+ // .gnu.hash needs symbols to be grouped by hash code whereas the MIPS
+ // ABI requires a mapping between the GOT and the symbol table.
+ // Android loader does not support .gnu.hash.
+ if (!IsMips && !IsAndroid) {
+ if (IsRedhat(Distro) || IsOpenSuse(Distro) ||
+ (IsUbuntu(Distro) && Distro >= UbuntuMaverick))
+ ExtraOpts.push_back("--hash-style=gnu");
+
+ if (IsDebian(Distro) || IsOpenSuse(Distro) || Distro == UbuntuLucid ||
+ Distro == UbuntuJaunty || Distro == UbuntuKarmic)
+ ExtraOpts.push_back("--hash-style=both");
+ }
+
+ if (IsRedhat(Distro))
+ ExtraOpts.push_back("--no-add-needed");
+
+ if (Distro == DebianSqueeze || Distro == DebianWheezy ||
+ IsOpenSuse(Distro) ||
+ (IsRedhat(Distro) && Distro != RHEL4 && Distro != RHEL5) ||
+ (IsUbuntu(Distro) && Distro >= UbuntuKarmic))
+ ExtraOpts.push_back("--build-id");
+
+ if (IsOpenSuse(Distro))
+ ExtraOpts.push_back("--enable-new-dtags");
+
+ // The selection of paths to try here is designed to match the patterns which
+ // the GCC driver itself uses, as this is part of the GCC-compatible driver.
+ // This was determined by running GCC in a fake filesystem, creating all
+ // possible permutations of these directories, and seeing which ones it added
+ // to the link paths.
+ path_list &Paths = getFilePaths();
+
+ const std::string Multilib = Triple.isArch32Bit() ? "lib32" : "lib64";
+ const std::string MultiarchTriple = getMultiarchTriple(Triple, SysRoot);
+
+ // Add the multilib suffixed paths where they are available.
+ if (GCCInstallation.isValid()) {
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ const std::string &LibPath = GCCInstallation.getParentLibPath();
+ addPathIfExists((GCCInstallation.getInstallPath() +
+ GCCInstallation.getMultiarchSuffix()),
+ Paths);
+
+ // If the GCC installation we found is inside of the sysroot, we want to
+ // prefer libraries installed in the parent prefix of the GCC installation.
+ // It is important to *not* use these paths when the GCC installation is
+ // outside of the system root as that can pick up un-intented libraries.
+ // This usually happens when there is an external cross compiler on the
+ // host system, and a more minimal sysroot available that is the target of
+ // the cross.
+ if (StringRef(LibPath).startswith(SysRoot)) {
+ addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib/../" + Multilib,
+ Paths);
+ addPathIfExists(LibPath + "/" + MultiarchTriple, Paths);
+ addPathIfExists(LibPath + "/../" + Multilib, Paths);
+ }
+ }
+ addPathIfExists(SysRoot + "/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(SysRoot + "/lib/../" + Multilib, Paths);
+ addPathIfExists(SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(SysRoot + "/usr/lib/../" + Multilib, Paths);
+
+ // Try walking via the GCC triple path in case of multiarch GCC
+ // installations with strange symlinks.
+ if (GCCInstallation.isValid())
+ addPathIfExists(SysRoot + "/usr/lib/" + GCCInstallation.getTriple().str() +
+ "/../../" + Multilib, Paths);
+
+ // Add the non-multilib suffixed paths (if potentially different).
+ if (GCCInstallation.isValid()) {
+ const std::string &LibPath = GCCInstallation.getParentLibPath();
+ const llvm::Triple &GCCTriple = GCCInstallation.getTriple();
+ if (!GCCInstallation.getMultiarchSuffix().empty())
+ addPathIfExists(GCCInstallation.getInstallPath(), Paths);
+
+ if (StringRef(LibPath).startswith(SysRoot)) {
+ addPathIfExists(LibPath + "/../" + GCCTriple.str() + "/lib", Paths);
+ addPathIfExists(LibPath, Paths);
+ }
+ }
+ addPathIfExists(SysRoot + "/lib", Paths);
+ addPathIfExists(SysRoot + "/usr/lib", Paths);
+}
+
+bool Linux::HasNativeLLVMSupport() const {
+ return true;
+}
+
+Tool &Linux::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::linuxtools::Assemble(*this);
+ break;
+ case Action::LinkJobClass:
+ T = new tools::linuxtools::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
+
+void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc))
+ addSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/usr/local/include");
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ llvm::sys::Path P(D.ResourceDir);
+ P.appendComponent("include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+ // Check for configure-time C include directories.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (SmallVectorImpl<StringRef>::iterator I = dirs.begin(), E = dirs.end();
+ I != E; ++I) {
+ StringRef Prefix = llvm::sys::path::is_absolute(*I) ? D.SysRoot : "";
+ addExternCSystemInclude(DriverArgs, CC1Args, Prefix + *I);
+ }
+ return;
+ }
+
+ // Lacking those, try to detect the correct set of system includes for the
+ // target triple.
+
+ // Implement generic Debian multiarch support.
+ const StringRef X86_64MultiarchIncludeDirs[] = {
+ "/usr/include/x86_64-linux-gnu",
+
+ // FIXME: These are older forms of multiarch. It's not clear that they're
+ // in use in any released version of Debian, so we should consider
+ // removing them.
+ "/usr/include/i686-linux-gnu/64",
+ "/usr/include/i486-linux-gnu/64"
+ };
+ const StringRef X86MultiarchIncludeDirs[] = {
+ "/usr/include/i386-linux-gnu",
+
+ // FIXME: These are older forms of multiarch. It's not clear that they're
+ // in use in any released version of Debian, so we should consider
+ // removing them.
+ "/usr/include/x86_64-linux-gnu/32",
+ "/usr/include/i686-linux-gnu",
+ "/usr/include/i486-linux-gnu"
+ };
+ const StringRef ARMMultiarchIncludeDirs[] = {
+ "/usr/include/arm-linux-gnueabi"
+ };
+ const StringRef MIPSMultiarchIncludeDirs[] = {
+ "/usr/include/mips-linux-gnu"
+ };
+ const StringRef MIPSELMultiarchIncludeDirs[] = {
+ "/usr/include/mipsel-linux-gnu"
+ };
+ const StringRef PPCMultiarchIncludeDirs[] = {
+ "/usr/include/powerpc-linux-gnu"
+ };
+ const StringRef PPC64MultiarchIncludeDirs[] = {
+ "/usr/include/powerpc64-linux-gnu"
+ };
+ ArrayRef<StringRef> MultiarchIncludeDirs;
+ if (getTriple().getArch() == llvm::Triple::x86_64) {
+ MultiarchIncludeDirs = X86_64MultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::x86) {
+ MultiarchIncludeDirs = X86MultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::arm) {
+ MultiarchIncludeDirs = ARMMultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::mips) {
+ MultiarchIncludeDirs = MIPSMultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::mipsel) {
+ MultiarchIncludeDirs = MIPSELMultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::ppc) {
+ MultiarchIncludeDirs = PPCMultiarchIncludeDirs;
+ } else if (getTriple().getArch() == llvm::Triple::ppc64) {
+ MultiarchIncludeDirs = PPC64MultiarchIncludeDirs;
+ }
+ for (ArrayRef<StringRef>::iterator I = MultiarchIncludeDirs.begin(),
+ E = MultiarchIncludeDirs.end();
+ I != E; ++I) {
+ if (llvm::sys::fs::exists(D.SysRoot + *I)) {
+ addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + *I);
+ break;
+ }
+ }
+
+ if (getTriple().getOS() == llvm::Triple::RTEMS)
+ return;
+
+ // Add an include of '/include' directly. This isn't provided by default by
+ // system GCCs, but is often used with cross-compiling GCCs, and harmless to
+ // add even when Clang is acting as-if it were a system compiler.
+ addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/include");
+
+ addExternCSystemInclude(DriverArgs, CC1Args, D.SysRoot + "/usr/include");
+}
+
+/// \brief Helper to add the thre variant paths for a libstdc++ installation.
+/*static*/ bool Linux::addLibStdCXXIncludePaths(Twine Base, Twine TargetArchDir,
+ const ArgList &DriverArgs,
+ ArgStringList &CC1Args) {
+ if (!llvm::sys::fs::exists(Base))
+ return false;
+ addSystemInclude(DriverArgs, CC1Args, Base);
+ addSystemInclude(DriverArgs, CC1Args, Base + "/" + TargetArchDir);
+ addSystemInclude(DriverArgs, CC1Args, Base + "/backward");
+ return true;
+}
+
+void Linux::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ // Check if libc++ has been enabled and provide its include paths if so.
+ if (GetCXXStdlibType(DriverArgs) == ToolChain::CST_Libcxx) {
+ // libc++ is always installed at a fixed path on Linux currently.
+ addSystemInclude(DriverArgs, CC1Args,
+ getDriver().SysRoot + "/usr/include/c++/v1");
+ return;
+ }
+
+ // We need a detected GCC installation on Linux to provide libstdc++'s
+ // headers. We handled the libc++ case above.
+ if (!GCCInstallation.isValid())
+ return;
+
+ // By default, look for the C++ headers in an include directory adjacent to
+ // the lib directory of the GCC installation. Note that this is expect to be
+ // equivalent to '/usr/include/c++/X.Y' in almost all cases.
+ StringRef LibDir = GCCInstallation.getParentLibPath();
+ StringRef InstallDir = GCCInstallation.getInstallPath();
+ StringRef Version = GCCInstallation.getVersion();
+ if (!addLibStdCXXIncludePaths(LibDir + "/../include/c++/" + Version,
+ (GCCInstallation.getTriple().str() +
+ GCCInstallation.getMultiarchSuffix()),
+ DriverArgs, CC1Args)) {
+ // Gentoo is weird and places its headers inside the GCC install, so if the
+ // first attempt to find the headers fails, try this pattern.
+ addLibStdCXXIncludePaths(InstallDir + "/include/g++-v4",
+ (GCCInstallation.getTriple().str() +
+ GCCInstallation.getMultiarchSuffix()),
+ DriverArgs, CC1Args);
+ }
+}
+
+/// DragonFly - DragonFly tool chain which can call as(1) and ld(1) directly.
+
+DragonFly::DragonFly(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+
+ // Path mangling to find libexec
+ getProgramPaths().push_back(getDriver().getInstalledDir());
+ if (getDriver().getInstalledDir() != getDriver().Dir)
+ getProgramPaths().push_back(getDriver().Dir);
+
+ getFilePaths().push_back(getDriver().Dir + "/../lib");
+ getFilePaths().push_back("/usr/lib");
+ getFilePaths().push_back("/usr/lib/gcc41");
+}
+
+Tool &DragonFly::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::AssembleJobClass:
+ T = new tools::dragonfly::Assemble(*this); break;
+ case Action::LinkJobClass:
+ T = new tools::dragonfly::Link(*this); break;
+ default:
+ T = &Generic_GCC::SelectTool(C, JA, Inputs);
+ }
+ }
+
+ return *T;
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/ToolChains.h b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
new file mode 100644
index 0000000..eaa6be1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/ToolChains.h
@@ -0,0 +1,596 @@
+//===--- ToolChains.h - ToolChain Implementations ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_DRIVER_TOOLCHAINS_H_
+#define CLANG_LIB_DRIVER_TOOLCHAINS_H_
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/ToolChain.h"
+
+#include "clang/Basic/VersionTuple.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/Compiler.h"
+
+#include "Tools.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+/// Generic_GCC - A tool chain using the 'gcc' command to perform
+/// all subcommands; this relies on gcc translating the majority of
+/// command line options.
+class LLVM_LIBRARY_VISIBILITY Generic_GCC : public ToolChain {
+protected:
+ /// \brief Struct to store and manipulate GCC versions.
+ ///
+ /// We rely on assumptions about the form and structure of GCC version
+ /// numbers: they consist of at most three '.'-separated components, and each
+ /// component is a non-negative integer except for the last component. For
+ /// the last component we are very flexible in order to tolerate release
+ /// candidates or 'x' wildcards.
+ ///
+ /// Note that the ordering established among GCCVersions is based on the
+ /// preferred version string to use. For example we prefer versions without
+ /// a hard-coded patch number to those with a hard coded patch number.
+ ///
+ /// Currently this doesn't provide any logic for textual suffixes to patches
+ /// in the way that (for example) Debian's version format does. If that ever
+ /// becomes necessary, it can be added.
+ struct GCCVersion {
+ /// \brief The unparsed text of the version.
+ std::string Text;
+
+ /// \brief The parsed major, minor, and patch numbers.
+ int Major, Minor, Patch;
+
+ /// \brief Any textual suffix on the patch number.
+ std::string PatchSuffix;
+
+ static GCCVersion Parse(StringRef VersionText);
+ bool operator<(const GCCVersion &RHS) const;
+ bool operator>(const GCCVersion &RHS) const { return RHS < *this; }
+ bool operator<=(const GCCVersion &RHS) const { return !(*this > RHS); }
+ bool operator>=(const GCCVersion &RHS) const { return !(*this < RHS); }
+ };
+
+
+ /// \brief This is a class to find a viable GCC installation for Clang to
+ /// use.
+ ///
+ /// This class tries to find a GCC installation on the system, and report
+ /// information about it. It starts from the host information provided to the
+ /// Driver, and has logic for fuzzing that where appropriate.
+ class GCCInstallationDetector {
+
+ bool IsValid;
+ llvm::Triple GCCTriple;
+
+ // FIXME: These might be better as path objects.
+ std::string GCCInstallPath;
+ std::string GCCMultiarchSuffix;
+ std::string GCCParentLibPath;
+
+ GCCVersion Version;
+
+ public:
+ GCCInstallationDetector(const Driver &D, const llvm::Triple &TargetTriple,
+ const ArgList &Args);
+
+ /// \brief Check whether we detected a valid GCC install.
+ bool isValid() const { return IsValid; }
+
+ /// \brief Get the GCC triple for the detected install.
+ const llvm::Triple &getTriple() const { return GCCTriple; }
+
+ /// \brief Get the detected GCC installation path.
+ StringRef getInstallPath() const { return GCCInstallPath; }
+
+ /// \brief Get the detected GCC installation path suffix for multiarch GCCs.
+ StringRef getMultiarchSuffix() const { return GCCMultiarchSuffix; }
+
+ /// \brief Get the detected GCC parent lib path.
+ StringRef getParentLibPath() const { return GCCParentLibPath; }
+
+ /// \brief Get the detected GCC version string.
+ StringRef getVersion() const { return Version.Text; }
+
+ private:
+ static void CollectLibDirsAndTriples(
+ const llvm::Triple &TargetTriple,
+ const llvm::Triple &MultiarchTriple,
+ SmallVectorImpl<StringRef> &LibDirs,
+ SmallVectorImpl<StringRef> &TripleAliases,
+ SmallVectorImpl<StringRef> &MultiarchLibDirs,
+ SmallVectorImpl<StringRef> &MultiarchTripleAliases);
+
+ void ScanLibDirForGCCTriple(llvm::Triple::ArchType TargetArch,
+ const std::string &LibDir,
+ StringRef CandidateTriple,
+ bool NeedsMultiarchSuffix = false);
+ };
+
+ GCCInstallationDetector GCCInstallation;
+
+ mutable llvm::DenseMap<unsigned, Tool*> Tools;
+
+public:
+ Generic_GCC(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+ ~Generic_GCC();
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+
+ virtual bool IsUnwindTablesDefault() const;
+ virtual const char *GetDefaultRelocationModel() const;
+ virtual const char *GetForcedPicModel() const;
+
+protected:
+ /// \name ToolChain Implementation Helper Functions
+ /// @{
+
+ /// \brief Check whether the target triple's architecture is 64-bits.
+ bool isTarget64Bit() const { return getTriple().isArch64Bit(); }
+
+ /// \brief Check whether the target triple's architecture is 32-bits.
+ bool isTarget32Bit() const { return getTriple().isArch32Bit(); }
+
+ /// @}
+};
+
+class LLVM_LIBRARY_VISIBILITY Hexagon_TC : public ToolChain {
+protected:
+ mutable llvm::DenseMap<unsigned, Tool*> Tools;
+
+public:
+ Hexagon_TC(const Driver &D, const llvm::Triple& Triple);
+ ~Hexagon_TC();
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+
+ virtual bool IsUnwindTablesDefault() const;
+ virtual const char *GetDefaultRelocationModel() const;
+ virtual const char *GetForcedPicModel() const;
+};
+
+ /// Darwin - The base Darwin tool chain.
+class LLVM_LIBRARY_VISIBILITY Darwin : public ToolChain {
+public:
+ /// The host version.
+ unsigned DarwinVersion[3];
+
+private:
+ mutable llvm::DenseMap<unsigned, Tool*> Tools;
+
+ /// Whether the information on the target has been initialized.
+ //
+ // FIXME: This should be eliminated. What we want to do is make this part of
+ // the "default target for arguments" selection process, once we get out of
+ // the argument translation business.
+ mutable bool TargetInitialized;
+
+ // FIXME: Remove this once there is a proper way to detect an ARC runtime
+ // for the simulator.
+ public:
+ mutable enum {
+ ARCSimulator_None,
+ ARCSimulator_HasARCRuntime,
+ ARCSimulator_NoARCRuntime
+ } ARCRuntimeForSimulator;
+
+ mutable enum {
+ LibCXXSimulator_None,
+ LibCXXSimulator_NotAvailable,
+ LibCXXSimulator_Available
+ } LibCXXForSimulator;
+
+private:
+ /// Whether we are targeting iPhoneOS target.
+ mutable bool TargetIsIPhoneOS;
+
+ /// Whether we are targeting the iPhoneOS simulator target.
+ mutable bool TargetIsIPhoneOSSimulator;
+
+ /// The OS version we are targeting.
+ mutable VersionTuple TargetVersion;
+
+ /// The default macosx-version-min of this tool chain; empty until
+ /// initialized.
+ std::string MacosxVersionMin;
+
+ bool hasARCRuntime() const;
+ bool hasSubscriptingRuntime() const;
+
+private:
+ void AddDeploymentTarget(DerivedArgList &Args) const;
+
+public:
+ Darwin(const Driver &D, const llvm::Triple& Triple);
+ ~Darwin();
+
+ std::string ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const;
+
+ /// @name Darwin Specific Toolchain API
+ /// {
+
+ // FIXME: Eliminate these ...Target functions and derive separate tool chains
+ // for these targets and put version in constructor.
+ void setTarget(bool IsIPhoneOS, unsigned Major, unsigned Minor,
+ unsigned Micro, bool IsIOSSim) const {
+ assert((!IsIOSSim || IsIPhoneOS) && "Unexpected deployment target!");
+
+ // FIXME: For now, allow reinitialization as long as values don't
+ // change. This will go away when we move away from argument translation.
+ if (TargetInitialized && TargetIsIPhoneOS == IsIPhoneOS &&
+ TargetIsIPhoneOSSimulator == IsIOSSim &&
+ TargetVersion == VersionTuple(Major, Minor, Micro))
+ return;
+
+ assert(!TargetInitialized && "Target already initialized!");
+ TargetInitialized = true;
+ TargetIsIPhoneOS = IsIPhoneOS;
+ TargetIsIPhoneOSSimulator = IsIOSSim;
+ TargetVersion = VersionTuple(Major, Minor, Micro);
+ }
+
+ bool isTargetIPhoneOS() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetIsIPhoneOS;
+ }
+
+ bool isTargetIOSSimulator() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetIsIPhoneOSSimulator;
+ }
+
+ bool isTargetMacOS() const {
+ return !isTargetIOSSimulator() &&
+ !isTargetIPhoneOS() &&
+ ARCRuntimeForSimulator == ARCSimulator_None;
+ }
+
+ bool isTargetInitialized() const { return TargetInitialized; }
+
+ VersionTuple getTargetVersion() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetVersion;
+ }
+
+ /// getDarwinArchName - Get the "Darwin" arch name for a particular compiler
+ /// invocation. For example, Darwin treats different ARM variations as
+ /// distinct architectures.
+ StringRef getDarwinArchName(const ArgList &Args) const;
+
+ bool isIPhoneOSVersionLT(unsigned V0, unsigned V1=0, unsigned V2=0) const {
+ assert(isTargetIPhoneOS() && "Unexpected call for OS X target!");
+ return TargetVersion < VersionTuple(V0, V1, V2);
+ }
+
+ bool isMacosxVersionLT(unsigned V0, unsigned V1=0, unsigned V2=0) const {
+ assert(!isTargetIPhoneOS() && "Unexpected call for iPhoneOS target!");
+ return TargetVersion < VersionTuple(V0, V1, V2);
+ }
+
+ /// AddLinkSearchPathArgs - Add the linker search paths to \arg CmdArgs.
+ ///
+ /// \param Args - The input argument list.
+ /// \param CmdArgs [out] - The command argument list to append the paths
+ /// (prefixed by -L) to.
+ virtual void AddLinkSearchPathArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const = 0;
+
+ /// AddLinkARCArgs - Add the linker arguments to link the ARC runtime library.
+ virtual void AddLinkARCArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const = 0;
+
+ /// AddLinkRuntimeLibArgs - Add the linker arguments to link the compiler
+ /// runtime library.
+ virtual void AddLinkRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const = 0;
+
+ /// }
+ /// @name ToolChain Implementation
+ /// {
+
+ virtual types::ID LookupTypeForExtension(const char *Ext) const;
+
+ virtual bool HasNativeLLVMSupport() const;
+
+ virtual void configureObjCRuntime(ObjCRuntime &runtime) const;
+ virtual bool hasBlocksRuntime() const;
+
+ virtual DerivedArgList *TranslateArgs(const DerivedArgList &Args,
+ const char *BoundArch) const;
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+
+ virtual bool IsBlocksDefault() const {
+ // Always allow blocks on Darwin; users interested in versioning are
+ // expected to use /usr/include/Blocks.h.
+ return true;
+ }
+ virtual bool IsIntegratedAssemblerDefault() const {
+#ifdef DISABLE_DEFAULT_INTEGRATED_ASSEMBLER
+ return false;
+#else
+ // Default integrated assembler to on for Darwin.
+ return true;
+#endif
+ }
+ virtual bool IsStrictAliasingDefault() const {
+#ifdef DISABLE_DEFAULT_STRICT_ALIASING
+ return false;
+#else
+ return ToolChain::IsStrictAliasingDefault();
+#endif
+ }
+
+ virtual bool IsObjCDefaultSynthPropertiesDefault() const {
+ return true;
+ }
+
+ virtual bool IsObjCNonFragileABIDefault() const {
+ // Non-fragile ABI is default for everything but i386.
+ return getTriple().getArch() != llvm::Triple::x86;
+ }
+ virtual bool IsObjCLegacyDispatchDefault() const {
+ // This is only used with the non-fragile ABI.
+
+ // Legacy dispatch is used everywhere except on x86_64.
+ return getTriple().getArch() != llvm::Triple::x86_64;
+ }
+ virtual bool UseObjCMixedDispatch() const {
+ // This is only used with the non-fragile ABI and non-legacy dispatch.
+
+ // Mixed dispatch is used everywhere except OS X before 10.6.
+ return !(!isTargetIPhoneOS() && isMacosxVersionLT(10, 6));
+ }
+ virtual bool IsUnwindTablesDefault() const;
+ virtual unsigned GetDefaultStackProtectorLevel(bool KernelOrKext) const {
+ // Stack protectors default to on for user code on 10.5,
+ // and for everything in 10.6 and beyond
+ return isTargetIPhoneOS() ||
+ (!isMacosxVersionLT(10, 6) ||
+ (!isMacosxVersionLT(10, 5) && !KernelOrKext));
+ }
+ virtual RuntimeLibType GetDefaultRuntimeLibType() const {
+ return ToolChain::RLT_CompilerRT;
+ }
+ virtual const char *GetDefaultRelocationModel() const;
+ virtual const char *GetForcedPicModel() const;
+
+ virtual bool SupportsProfiling() const;
+
+ virtual bool SupportsObjCGC() const;
+
+ virtual bool SupportsObjCARC() const;
+
+ virtual bool UseDwarfDebugFlags() const;
+
+ virtual bool UseSjLjExceptions() const;
+
+ /// }
+};
+
+/// DarwinClang - The Darwin toolchain used by Clang.
+class LLVM_LIBRARY_VISIBILITY DarwinClang : public Darwin {
+private:
+ void AddGCCLibexecPath(unsigned darwinVersion);
+
+public:
+ DarwinClang(const Driver &D, const llvm::Triple& Triple);
+
+ /// @name Darwin ToolChain Implementation
+ /// {
+
+ virtual void AddLinkSearchPathArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
+ virtual void AddLinkRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+ void AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
+ const char *DarwinStaticLib) const;
+
+ virtual void AddCXXStdlibLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
+ virtual void AddCCKextLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
+ virtual void AddLinkARCArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+ /// }
+};
+
+/// Darwin_Generic_GCC - Generic Darwin tool chain using gcc.
+class LLVM_LIBRARY_VISIBILITY Darwin_Generic_GCC : public Generic_GCC {
+public:
+ Darwin_Generic_GCC(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {}
+
+ std::string ComputeEffectiveClangTriple(const ArgList &Args,
+ types::ID InputType) const;
+
+ virtual const char *GetDefaultRelocationModel() const { return "pic"; }
+};
+
+class LLVM_LIBRARY_VISIBILITY Generic_ELF : public Generic_GCC {
+ virtual void anchor();
+public:
+ Generic_ELF(const Driver &D, const llvm::Triple& Triple, const ArgList &Args)
+ : Generic_GCC(D, Triple, Args) {}
+
+ virtual bool IsIntegratedAssemblerDefault() const {
+ // Default integrated assembler to on for x86.
+ return (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64);
+ }
+};
+
+class LLVM_LIBRARY_VISIBILITY AuroraUX : public Generic_GCC {
+public:
+ AuroraUX(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+};
+
+class LLVM_LIBRARY_VISIBILITY Solaris : public Generic_GCC {
+public:
+ Solaris(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+
+ virtual bool IsIntegratedAssemblerDefault() const { return true; }
+};
+
+
+class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_ELF {
+public:
+ OpenBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual bool IsObjCNonFragileABIDefault() const { return true; }
+ virtual bool IsObjCLegacyDispatchDefault() const {
+ llvm::Triple::ArchType Arch = getTriple().getArch();
+ if (Arch == llvm::Triple::arm ||
+ Arch == llvm::Triple::x86 ||
+ Arch == llvm::Triple::x86_64)
+ return false;
+ return true;
+ }
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+};
+
+class LLVM_LIBRARY_VISIBILITY FreeBSD : public Generic_ELF {
+public:
+ FreeBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual bool IsObjCNonFragileABIDefault() const { return true; }
+ virtual bool IsObjCLegacyDispatchDefault() const {
+ llvm::Triple::ArchType Arch = getTriple().getArch();
+ if (Arch == llvm::Triple::arm ||
+ Arch == llvm::Triple::x86 ||
+ Arch == llvm::Triple::x86_64)
+ return false;
+ return true;
+ }
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+};
+
+class LLVM_LIBRARY_VISIBILITY NetBSD : public Generic_ELF {
+public:
+ NetBSD(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual bool IsObjCNonFragileABIDefault() const { return true; }
+ virtual bool IsObjCLegacyDispatchDefault() const {
+ llvm::Triple::ArchType Arch = getTriple().getArch();
+ if (Arch == llvm::Triple::arm ||
+ Arch == llvm::Triple::x86 ||
+ Arch == llvm::Triple::x86_64)
+ return false;
+ return true;
+ }
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+};
+
+class LLVM_LIBRARY_VISIBILITY Minix : public Generic_ELF {
+public:
+ Minix(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+};
+
+class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_ELF {
+public:
+ DragonFly(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+};
+
+class LLVM_LIBRARY_VISIBILITY Linux : public Generic_ELF {
+public:
+ Linux(const Driver &D, const llvm::Triple& Triple, const ArgList &Args);
+
+ virtual bool HasNativeLLVMSupport() const;
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+
+ virtual void AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const;
+ virtual void AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const;
+
+ std::string Linker;
+ std::vector<std::string> ExtraOpts;
+
+private:
+ static bool addLibStdCXXIncludePaths(Twine Base, Twine TargetArchDir,
+ const ArgList &DriverArgs,
+ ArgStringList &CC1Args);
+};
+
+
+/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
+/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
+class LLVM_LIBRARY_VISIBILITY TCEToolChain : public ToolChain {
+public:
+ TCEToolChain(const Driver &D, const llvm::Triple& Triple);
+ ~TCEToolChain();
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+ bool IsMathErrnoDefault() const;
+ bool IsUnwindTablesDefault() const;
+ const char* GetDefaultRelocationModel() const;
+ const char* GetForcedPicModel() const;
+
+private:
+ mutable llvm::DenseMap<unsigned, Tool*> Tools;
+
+};
+
+class LLVM_LIBRARY_VISIBILITY Windows : public ToolChain {
+ mutable llvm::DenseMap<unsigned, Tool*> Tools;
+
+public:
+ Windows(const Driver &D, const llvm::Triple& Triple);
+
+ virtual Tool &SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const;
+
+ virtual bool IsIntegratedAssemblerDefault() const;
+ virtual bool IsUnwindTablesDefault() const;
+ virtual const char *GetDefaultRelocationModel() const;
+ virtual const char *GetForcedPicModel() const;
+
+ virtual void AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const;
+ virtual void AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const;
+
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.cpp b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
new file mode 100644
index 0000000..d3dab19
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.cpp
@@ -0,0 +1,5588 @@
+//===--- Tools.cpp - Tools Implementations --------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Tools.h"
+
+#include "clang/Driver/Action.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/ObjCRuntime.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/ToolChain.h"
+#include "clang/Driver/Util.h"
+
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/ErrorHandling.h"
+
+#include "InputInfo.h"
+#include "ToolChains.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+
+/// CheckPreprocessingOptions - Perform some validation of preprocessing
+/// arguments that is shared with gcc.
+static void CheckPreprocessingOptions(const Driver &D, const ArgList &Args) {
+ if (Arg *A = Args.getLastArg(options::OPT_C, options::OPT_CC))
+ if (!Args.hasArg(options::OPT_E) && !D.CCCIsCPP)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-E";
+}
+
+/// CheckCodeGenerationOptions - Perform some validation of code generation
+/// arguments that is shared with gcc.
+static void CheckCodeGenerationOptions(const Driver &D, const ArgList &Args) {
+ // In gcc, only ARM checks this, but it seems reasonable to check universally.
+ if (Args.hasArg(options::OPT_static))
+ if (const Arg *A = Args.getLastArg(options::OPT_dynamic,
+ options::OPT_mdynamic_no_pic))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-static";
+}
+
+// Quote target names for inclusion in GNU Make dependency files.
+// Only the characters '$', '#', ' ', '\t' are quoted.
+static void QuoteTarget(StringRef Target,
+ SmallVectorImpl<char> &Res) {
+ for (unsigned i = 0, e = Target.size(); i != e; ++i) {
+ switch (Target[i]) {
+ case ' ':
+ case '\t':
+ // Escape the preceding backslashes
+ for (int j = i - 1; j >= 0 && Target[j] == '\\'; --j)
+ Res.push_back('\\');
+
+ // Escape the space/tab
+ Res.push_back('\\');
+ break;
+ case '$':
+ Res.push_back('$');
+ break;
+ case '#':
+ Res.push_back('\\');
+ break;
+ default:
+ break;
+ }
+
+ Res.push_back(Target[i]);
+ }
+}
+
+static void addDirectoryList(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const char *ArgName,
+ const char *EnvVar) {
+ const char *DirList = ::getenv(EnvVar);
+ if (!DirList)
+ return; // Nothing to do.
+
+ StringRef Dirs(DirList);
+ if (Dirs.empty()) // Empty string should not add '.'.
+ return;
+
+ StringRef::size_type Delim;
+ while ((Delim = Dirs.find(llvm::sys::PathSeparator)) != StringRef::npos) {
+ if (Delim == 0) { // Leading colon.
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(".");
+ } else {
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(Args.MakeArgString(Dirs.substr(0, Delim)));
+ }
+ Dirs = Dirs.substr(Delim + 1);
+ }
+
+ if (Dirs.empty()) { // Trailing colon.
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(".");
+ } else { // Add the last path.
+ CmdArgs.push_back(ArgName);
+ CmdArgs.push_back(Args.MakeArgString(Dirs));
+ }
+}
+
+static void AddLinkerInputs(const ToolChain &TC,
+ const InputInfoList &Inputs, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ const Driver &D = TC.getDriver();
+
+ // Add extra linker input arguments which are not treated as inputs
+ // (constructed via -Xarch_).
+ Args.AddAllArgValues(CmdArgs, options::OPT_Zlinker_input);
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ if (!TC.HasNativeLLVMSupport()) {
+ // Don't try to pass LLVM inputs unless we have native support.
+ if (II.getType() == types::TY_LLVM_IR ||
+ II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC ||
+ II.getType() == types::TY_LTO_BC)
+ D.Diag(diag::err_drv_no_linker_llvm_support)
+ << TC.getTripleString();
+ }
+
+ // Add filenames immediately.
+ if (II.isFilename()) {
+ CmdArgs.push_back(II.getFilename());
+ continue;
+ }
+
+ // Otherwise, this is a linker input argument.
+ const Arg &A = II.getInputArg();
+
+ // Handle reserved library options.
+ if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx)) {
+ TC.AddCXXStdlibLibArgs(Args, CmdArgs);
+ } else if (A.getOption().matches(options::OPT_Z_reserved_lib_cckext)) {
+ TC.AddCCKextLibArgs(Args, CmdArgs);
+ } else
+ A.renderAsInput(Args, CmdArgs);
+ }
+
+ // LIBRARY_PATH - included following the user specified library paths.
+ addDirectoryList(Args, CmdArgs, "-L", "LIBRARY_PATH");
+}
+
+/// \brief Determine whether Objective-C automated reference counting is
+/// enabled.
+static bool isObjCAutoRefCount(const ArgList &Args) {
+ return Args.hasFlag(options::OPT_fobjc_arc, options::OPT_fno_objc_arc, false);
+}
+
+/// \brief Determine whether we are linking the ObjC runtime.
+static bool isObjCRuntimeLinked(const ArgList &Args) {
+ if (isObjCAutoRefCount(Args))
+ return true;
+ return Args.hasArg(options::OPT_fobjc_link_runtime);
+}
+
+static void addProfileRT(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs,
+ llvm::Triple Triple) {
+ if (!(Args.hasArg(options::OPT_fprofile_arcs) ||
+ Args.hasArg(options::OPT_fprofile_generate) ||
+ Args.hasArg(options::OPT_fcreate_profile) ||
+ Args.hasArg(options::OPT_coverage)))
+ return;
+
+ // GCC links libgcov.a by adding -L<inst>/gcc/lib/gcc/<triple>/<ver> -lgcov to
+ // the link line. We cannot do the same thing because unlike gcov there is a
+ // libprofile_rt.so. We used to use the -l:libprofile_rt.a syntax, but that is
+ // not supported by old linkers.
+ std::string ProfileRT =
+ std::string(TC.getDriver().Dir) + "/../lib/libprofile_rt.a";
+
+ CmdArgs.push_back(Args.MakeArgString(ProfileRT));
+}
+
+void Clang::AddPreprocessingOptions(Compilation &C,
+ const Driver &D,
+ const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfo &Output,
+ const InputInfoList &Inputs) const {
+ Arg *A;
+
+ CheckPreprocessingOptions(D, Args);
+
+ Args.AddLastArg(CmdArgs, options::OPT_C);
+ Args.AddLastArg(CmdArgs, options::OPT_CC);
+
+ // Handle dependency file generation.
+ if ((A = Args.getLastArg(options::OPT_M, options::OPT_MM)) ||
+ (A = Args.getLastArg(options::OPT_MD)) ||
+ (A = Args.getLastArg(options::OPT_MMD))) {
+ // Determine the output location.
+ const char *DepFile;
+ if (Output.getType() == types::TY_Dependencies) {
+ DepFile = Output.getFilename();
+ } else if (Arg *MF = Args.getLastArg(options::OPT_MF)) {
+ DepFile = MF->getValue(Args);
+ C.addFailureResultFile(DepFile);
+ } else if (A->getOption().matches(options::OPT_M) ||
+ A->getOption().matches(options::OPT_MM)) {
+ DepFile = "-";
+ } else {
+ DepFile = darwin::CC1::getDependencyFileName(Args, Inputs);
+ C.addFailureResultFile(DepFile);
+ }
+ CmdArgs.push_back("-dependency-file");
+ CmdArgs.push_back(DepFile);
+
+ // Add a default target if one wasn't specified.
+ if (!Args.hasArg(options::OPT_MT) && !Args.hasArg(options::OPT_MQ)) {
+ const char *DepTarget;
+
+ // If user provided -o, that is the dependency target, except
+ // when we are only generating a dependency file.
+ Arg *OutputOpt = Args.getLastArg(options::OPT_o);
+ if (OutputOpt && Output.getType() != types::TY_Dependencies) {
+ DepTarget = OutputOpt->getValue(Args);
+ } else {
+ // Otherwise derive from the base input.
+ //
+ // FIXME: This should use the computed output file location.
+ SmallString<128> P(Inputs[0].getBaseInput());
+ llvm::sys::path::replace_extension(P, "o");
+ DepTarget = Args.MakeArgString(llvm::sys::path::filename(P));
+ }
+
+ CmdArgs.push_back("-MT");
+ SmallString<128> Quoted;
+ QuoteTarget(DepTarget, Quoted);
+ CmdArgs.push_back(Args.MakeArgString(Quoted));
+ }
+
+ if (A->getOption().matches(options::OPT_M) ||
+ A->getOption().matches(options::OPT_MD))
+ CmdArgs.push_back("-sys-header-deps");
+ }
+
+ if (Args.hasArg(options::OPT_MG)) {
+ if (!A || A->getOption().matches(options::OPT_MD) ||
+ A->getOption().matches(options::OPT_MMD))
+ D.Diag(diag::err_drv_mg_requires_m_or_mm);
+ CmdArgs.push_back("-MG");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_MP);
+
+ // Convert all -MQ <target> args to -MT <quoted target>
+ for (arg_iterator it = Args.filtered_begin(options::OPT_MT,
+ options::OPT_MQ),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
+
+ if (A->getOption().matches(options::OPT_MQ)) {
+ CmdArgs.push_back("-MT");
+ SmallString<128> Quoted;
+ QuoteTarget(A->getValue(Args), Quoted);
+ CmdArgs.push_back(Args.MakeArgString(Quoted));
+
+ // -MT flag - no change
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ // Add -i* options, and automatically translate to
+ // -include-pch/-include-pth for transparent PCH support. It's
+ // wonky, but we include looking for .gch so we can support seamless
+ // replacement into a build system already set up to be generating
+ // .gch files.
+ bool RenderedImplicitInclude = false;
+ for (arg_iterator it = Args.filtered_begin(options::OPT_clang_i_Group),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = it;
+
+ if (A->getOption().matches(options::OPT_include)) {
+ bool IsFirstImplicitInclude = !RenderedImplicitInclude;
+ RenderedImplicitInclude = true;
+
+ // Use PCH if the user requested it.
+ bool UsePCH = D.CCCUsePCH;
+
+ bool FoundPTH = false;
+ bool FoundPCH = false;
+ llvm::sys::Path P(A->getValue(Args));
+ bool Exists;
+ if (UsePCH) {
+ P.appendSuffix("pch");
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ FoundPCH = true;
+ else
+ P.eraseSuffix();
+ }
+
+ if (!FoundPCH) {
+ P.appendSuffix("pth");
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists)
+ FoundPTH = true;
+ else
+ P.eraseSuffix();
+ }
+
+ if (!FoundPCH && !FoundPTH) {
+ P.appendSuffix("gch");
+ if (!llvm::sys::fs::exists(P.str(), Exists) && Exists) {
+ FoundPCH = UsePCH;
+ FoundPTH = !UsePCH;
+ }
+ else
+ P.eraseSuffix();
+ }
+
+ if (FoundPCH || FoundPTH) {
+ if (IsFirstImplicitInclude) {
+ A->claim();
+ if (UsePCH)
+ CmdArgs.push_back("-include-pch");
+ else
+ CmdArgs.push_back("-include-pth");
+ CmdArgs.push_back(Args.MakeArgString(P.str()));
+ continue;
+ } else {
+ // Ignore the PCH if not first on command line and emit warning.
+ D.Diag(diag::warn_drv_pch_not_first_include)
+ << P.str() << A->getAsString(Args);
+ }
+ }
+ }
+
+ // Not translated, render as usual.
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U);
+ Args.AddAllArgs(CmdArgs, options::OPT_I_Group, options::OPT_F,
+ options::OPT_index_header_map);
+
+ // Add -Wp, and -Xassembler if using the preprocessor.
+
+ // FIXME: There is a very unfortunate problem here, some troubled
+ // souls abuse -Wp, to pass preprocessor options in gcc syntax. To
+ // really support that we would have to parse and then translate
+ // those options. :(
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wp_COMMA,
+ options::OPT_Xpreprocessor);
+
+ // -I- is a deprecated GCC feature, reject it.
+ if (Arg *A = Args.getLastArg(options::OPT_I_))
+ D.Diag(diag::err_drv_I_dash_not_supported) << A->getAsString(Args);
+
+ // If we have a --sysroot, and don't have an explicit -isysroot flag, add an
+ // -isysroot to the CC1 invocation.
+ if (Arg *A = Args.getLastArg(options::OPT__sysroot_EQ)) {
+ if (!Args.hasArg(options::OPT_isysroot)) {
+ CmdArgs.push_back("-isysroot");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+ }
+
+ // If a module path was provided, pass it along. Otherwise, use a temporary
+ // directory.
+ if (Arg *A = Args.getLastArg(options::OPT_fmodule_cache_path)) {
+ A->claim();
+ A->render(Args, CmdArgs);
+ } else {
+ SmallString<128> DefaultModuleCache;
+ llvm::sys::path::system_temp_directory(/*erasedOnReboot=*/false,
+ DefaultModuleCache);
+ llvm::sys::path::append(DefaultModuleCache, "clang-module-cache");
+ CmdArgs.push_back("-fmodule-cache-path");
+ CmdArgs.push_back(Args.MakeArgString(DefaultModuleCache));
+ }
+
+ // Parse additional include paths from environment variables.
+ // FIXME: We should probably sink the logic for handling these from the
+ // frontend into the driver. It will allow deleting 4 otherwise unused flags.
+ // CPATH - included following the user specified includes (but prior to
+ // builtin and standard includes).
+ addDirectoryList(Args, CmdArgs, "-I", "CPATH");
+ // C_INCLUDE_PATH - system includes enabled when compiling C.
+ addDirectoryList(Args, CmdArgs, "-c-isystem", "C_INCLUDE_PATH");
+ // CPLUS_INCLUDE_PATH - system includes enabled when compiling C++.
+ addDirectoryList(Args, CmdArgs, "-cxx-isystem", "CPLUS_INCLUDE_PATH");
+ // OBJC_INCLUDE_PATH - system includes enabled when compiling ObjC.
+ addDirectoryList(Args, CmdArgs, "-objc-isystem", "OBJC_INCLUDE_PATH");
+ // OBJCPLUS_INCLUDE_PATH - system includes enabled when compiling ObjC++.
+ addDirectoryList(Args, CmdArgs, "-objcxx-isystem", "OBJCPLUS_INCLUDE_PATH");
+
+ // Add C++ include arguments, if needed.
+ if (types::isCXX(Inputs[0].getType()))
+ getToolChain().AddClangCXXStdlibIncludeArgs(Args, CmdArgs);
+
+ // Add system include arguments.
+ getToolChain().AddClangSystemIncludeArgs(Args, CmdArgs);
+}
+
+/// getARMTargetCPU - Get the (LLVM) name of the ARM cpu we are targeting.
+//
+// FIXME: tblgen this.
+static const char *getARMTargetCPU(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ // FIXME: Warn on inconsistent use of -mcpu and -march.
+
+ // If we have -mcpu=, use that.
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ return A->getValue(Args);
+
+ StringRef MArch;
+ if (Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ // Otherwise, if we have -march= choose the base CPU for that arch.
+ MArch = A->getValue(Args);
+ } else {
+ // Otherwise, use the Arch from the triple.
+ MArch = Triple.getArchName();
+ }
+
+ return llvm::StringSwitch<const char *>(MArch)
+ .Cases("armv2", "armv2a","arm2")
+ .Case("armv3", "arm6")
+ .Case("armv3m", "arm7m")
+ .Cases("armv4", "armv4t", "arm7tdmi")
+ .Cases("armv5", "armv5t", "arm10tdmi")
+ .Cases("armv5e", "armv5te", "arm1026ejs")
+ .Case("armv5tej", "arm926ej-s")
+ .Cases("armv6", "armv6k", "arm1136jf-s")
+ .Case("armv6j", "arm1136j-s")
+ .Cases("armv6z", "armv6zk", "arm1176jzf-s")
+ .Case("armv6t2", "arm1156t2-s")
+ .Cases("armv7", "armv7a", "armv7-a", "cortex-a8")
+ .Cases("armv7r", "armv7-r", "cortex-r4")
+ .Cases("armv7m", "armv7-m", "cortex-m3")
+ .Case("ep9312", "ep9312")
+ .Case("iwmmxt", "iwmmxt")
+ .Case("xscale", "xscale")
+ .Cases("armv6m", "armv6-m", "cortex-m0")
+ // If all else failed, return the most base CPU LLVM supports.
+ .Default("arm7tdmi");
+}
+
+/// getLLVMArchSuffixForARM - Get the LLVM arch name to use for a particular
+/// CPU.
+//
+// FIXME: This is redundant with -mcpu, why does LLVM use this.
+// FIXME: tblgen this, or kill it!
+static const char *getLLVMArchSuffixForARM(StringRef CPU) {
+ return llvm::StringSwitch<const char *>(CPU)
+ .Cases("arm7tdmi", "arm7tdmi-s", "arm710t", "v4t")
+ .Cases("arm720t", "arm9", "arm9tdmi", "v4t")
+ .Cases("arm920", "arm920t", "arm922t", "v4t")
+ .Cases("arm940t", "ep9312","v4t")
+ .Cases("arm10tdmi", "arm1020t", "v5")
+ .Cases("arm9e", "arm926ej-s", "arm946e-s", "v5e")
+ .Cases("arm966e-s", "arm968e-s", "arm10e", "v5e")
+ .Cases("arm1020e", "arm1022e", "xscale", "iwmmxt", "v5e")
+ .Cases("arm1136j-s", "arm1136jf-s", "arm1176jz-s", "v6")
+ .Cases("arm1176jzf-s", "mpcorenovfp", "mpcore", "v6")
+ .Cases("arm1156t2-s", "arm1156t2f-s", "v6t2")
+ .Cases("cortex-a8", "cortex-a9", "v7")
+ .Case("cortex-m3", "v7m")
+ .Case("cortex-m4", "v7m")
+ .Case("cortex-m0", "v6m")
+ .Default("");
+}
+
+// FIXME: Move to target hook.
+static bool isSignedCharDefault(const llvm::Triple &Triple) {
+ switch (Triple.getArch()) {
+ default:
+ return true;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ if (Triple.isOSDarwin())
+ return true;
+ return false;
+ }
+}
+
+// Handle -mfpu=.
+//
+// FIXME: Centralize feature selection, defaulting shouldn't be also in the
+// frontend target.
+static void addFPUArgs(const Driver &D, const Arg *A, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ StringRef FPU = A->getValue(Args);
+
+ // Set the target features based on the FPU.
+ if (FPU == "fpa" || FPU == "fpe2" || FPU == "fpe3" || FPU == "maverick") {
+ // Disable any default FPU support.
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-vfp2");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-vfp3");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ } else if (FPU == "vfp3-d16" || FPU == "vfpv3-d16") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+vfp3");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+d16");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ } else if (FPU == "vfp") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+vfp2");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ } else if (FPU == "vfp3" || FPU == "vfpv3") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+vfp3");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ } else if (FPU == "neon") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+neon");
+ } else
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+}
+
+// Handle -mfpmath=.
+static void addFPMathArgs(const Driver &D, const Arg *A, const ArgList &Args,
+ ArgStringList &CmdArgs, StringRef CPU) {
+ StringRef FPMath = A->getValue(Args);
+
+ // Set the target features based on the FPMath.
+ if (FPMath == "neon") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+neonfp");
+
+ if (CPU != "cortex-a8" && CPU != "cortex-a9" && CPU != "cortex-a9-mp")
+ D.Diag(diag::err_drv_invalid_feature) << "-mfpmath=neon" << CPU;
+
+ } else if (FPMath == "vfp" || FPMath == "vfp2" || FPMath == "vfp3" ||
+ FPMath == "vfp4") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neonfp");
+
+ // FIXME: Add warnings when disabling a feature not present for a given CPU.
+ } else
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+}
+
+// Select the float ABI as determined by -msoft-float, -mhard-float, and
+// -mfloat-abi=.
+static StringRef getARMFloatABI(const Driver &D,
+ const ArgList &Args,
+ const llvm::Triple &Triple) {
+ StringRef FloatABI;
+ if (Arg *A = Args.getLastArg(options::OPT_msoft_float,
+ options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ FloatABI = "soft";
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ FloatABI = "hard";
+ else {
+ FloatABI = A->getValue(Args);
+ if (FloatABI != "soft" && FloatABI != "softfp" && FloatABI != "hard") {
+ D.Diag(diag::err_drv_invalid_mfloat_abi)
+ << A->getAsString(Args);
+ FloatABI = "soft";
+ }
+ }
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (FloatABI.empty()) {
+ switch (Triple.getOS()) {
+ case llvm::Triple::Darwin:
+ case llvm::Triple::MacOSX:
+ case llvm::Triple::IOS: {
+ // Darwin defaults to "softfp" for v6 and v7.
+ //
+ // FIXME: Factor out an ARM class so we can cache the arch somewhere.
+ StringRef ArchName =
+ getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
+ if (ArchName.startswith("v6") || ArchName.startswith("v7"))
+ FloatABI = "softfp";
+ else
+ FloatABI = "soft";
+ break;
+ }
+
+ case llvm::Triple::Linux: {
+ if (Triple.getEnvironment() == llvm::Triple::GNUEABI) {
+ FloatABI = "softfp";
+ break;
+ }
+ }
+ // fall through
+
+ default:
+ switch(Triple.getEnvironment()) {
+ case llvm::Triple::GNUEABI:
+ FloatABI = "softfp";
+ break;
+ case llvm::Triple::EABI:
+ // EABI is always AAPCS, and if it was not marked 'hard', it's softfp
+ FloatABI = "softfp";
+ break;
+ case llvm::Triple::ANDROIDEABI: {
+ StringRef ArchName =
+ getLLVMArchSuffixForARM(getARMTargetCPU(Args, Triple));
+ if (ArchName.startswith("v7"))
+ FloatABI = "softfp";
+ else
+ FloatABI = "soft";
+ break;
+ }
+ default:
+ // Assume "soft", but warn the user we are guessing.
+ FloatABI = "soft";
+ D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
+ break;
+ }
+ }
+ }
+
+ return FloatABI;
+}
+
+
+void Clang::AddARMTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ bool KernelOrKext) const {
+ const Driver &D = getToolChain().getDriver();
+ llvm::Triple Triple = getToolChain().getTriple();
+
+ // Select the ABI to use.
+ //
+ // FIXME: Support -meabi.
+ const char *ABIName = 0;
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
+ ABIName = A->getValue(Args);
+ } else {
+ // Select the default based on the platform.
+ switch(Triple.getEnvironment()) {
+ case llvm::Triple::ANDROIDEABI:
+ case llvm::Triple::GNUEABI:
+ ABIName = "aapcs-linux";
+ break;
+ case llvm::Triple::EABI:
+ ABIName = "aapcs";
+ break;
+ default:
+ ABIName = "apcs-gnu";
+ }
+ }
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName);
+
+ // Set the CPU based on -march= and -mcpu=.
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(getARMTargetCPU(Args, Triple));
+
+ // Determine floating point ABI from the options & target defaults.
+ StringRef FloatABI = getARMFloatABI(D, Args, Triple);
+ if (FloatABI == "soft") {
+ // Floating point operations and argument passing are soft.
+ //
+ // FIXME: This changes CPP defines, we need -target-soft-float.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+ } else if (FloatABI == "softfp") {
+ // Floating point operations are hard, but argument passing is soft.
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+ } else {
+ // Floating point operations and argument passing are hard.
+ assert(FloatABI == "hard" && "Invalid float abi!");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
+ }
+
+ // Set appropriate target features for floating point mode.
+ //
+ // FIXME: Note, this is a hack, the LLVM backend doesn't actually use these
+ // yet (it uses the -mfloat-abi and -msoft-float options above), and it is
+ // stripped out by the ARM target.
+
+ // Use software floating point operations?
+ if (FloatABI == "soft") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+soft-float");
+ }
+
+ // Use software floating point argument passing?
+ if (FloatABI != "hard") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+soft-float-abi");
+ }
+
+ // Honor -mfpu=.
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
+ addFPUArgs(D, A, Args, CmdArgs);
+
+ // Honor -mfpmath=.
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpmath_EQ))
+ addFPMathArgs(D, A, Args, CmdArgs, getARMTargetCPU(Args, Triple));
+
+ // Setting -msoft-float effectively disables NEON because of the GCC
+ // implementation, although the same isn't true of VFP or VFP3.
+ if (FloatABI == "soft") {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("-neon");
+ }
+
+ // Kernel code has more strict alignment requirements.
+ if (KernelOrKext) {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-long-calls");
+
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-strict-align");
+
+ // The kext linker doesn't know how to deal with movw/movt.
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-arm-darwin-use-movt=0");
+ }
+
+ // Setting -mno-global-merge disables the codegen global merge pass. Setting
+ // -mglobal-merge has no effect as the pass is enabled by default.
+ if (Arg *A = Args.getLastArg(options::OPT_mglobal_merge,
+ options::OPT_mno_global_merge)) {
+ if (A->getOption().matches(options::OPT_mno_global_merge))
+ CmdArgs.push_back("-mno-global-merge");
+ }
+}
+
+// Get default architecture.
+static const char* getMipsArchFromCPU(StringRef CPUName) {
+ if (CPUName == "mips32" || CPUName == "mips32r2")
+ return "mips";
+
+ assert((CPUName == "mips64" || CPUName == "mips64r2") &&
+ "Unexpected cpu name.");
+
+ return "mips64";
+}
+
+// Check that ArchName is a known Mips architecture name.
+static bool checkMipsArchName(StringRef ArchName) {
+ return ArchName == "mips" ||
+ ArchName == "mipsel" ||
+ ArchName == "mips64" ||
+ ArchName == "mips64el";
+}
+
+// Get default target cpu.
+static const char* getMipsCPUFromArch(StringRef ArchName) {
+ if (ArchName == "mips" || ArchName == "mipsel")
+ return "mips32";
+
+ assert((ArchName == "mips64" || ArchName == "mips64el") &&
+ "Unexpected arch name.");
+
+ return "mips64";
+}
+
+// Get default ABI.
+static const char* getMipsABIFromArch(StringRef ArchName) {
+ if (ArchName == "mips" || ArchName == "mipsel")
+ return "o32";
+
+ assert((ArchName == "mips64" || ArchName == "mips64el") &&
+ "Unexpected arch name.");
+ return "n64";
+}
+
+// Get CPU and ABI names. They are not independent
+// so we have to calculate them together.
+static void getMipsCPUAndABI(const ArgList &Args,
+ const ToolChain &TC,
+ StringRef &CPUName,
+ StringRef &ABIName) {
+ StringRef ArchName;
+
+ // Select target cpu and architecture.
+ if (Arg *A = Args.getLastArg(options::OPT_mcpu_EQ)) {
+ CPUName = A->getValue(Args);
+ ArchName = getMipsArchFromCPU(CPUName);
+ }
+ else {
+ ArchName = Args.MakeArgString(TC.getArchName());
+ if (!checkMipsArchName(ArchName))
+ TC.getDriver().Diag(diag::err_drv_invalid_arch_name) << ArchName;
+ else
+ CPUName = getMipsCPUFromArch(ArchName);
+ }
+
+ // Select the ABI to use.
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ))
+ ABIName = A->getValue(Args);
+ else
+ ABIName = getMipsABIFromArch(ArchName);
+}
+
+void Clang::AddMIPSTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ StringRef CPUName;
+ StringRef ABIName;
+ getMipsCPUAndABI(Args, getToolChain(), CPUName, ABIName);
+
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(CPUName.data());
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName.data());
+
+ // Select the float ABI as determined by -msoft-float, -mhard-float,
+ // and -mfloat-abi=.
+ StringRef FloatABI;
+ if (Arg *A = Args.getLastArg(options::OPT_msoft_float,
+ options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ FloatABI = "soft";
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ FloatABI = "hard";
+ else {
+ FloatABI = A->getValue(Args);
+ if (FloatABI != "soft" && FloatABI != "single" && FloatABI != "hard") {
+ D.Diag(diag::err_drv_invalid_mfloat_abi)
+ << A->getAsString(Args);
+ FloatABI = "hard";
+ }
+ }
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (FloatABI.empty()) {
+ // Assume "hard", because it's a default value used by gcc.
+ // When we start to recognize specific target MIPS processors,
+ // we will be able to select the default more correctly.
+ FloatABI = "hard";
+ }
+
+ if (FloatABI == "soft") {
+ // Floating point operations and argument passing are soft.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("soft");
+
+ // FIXME: Note, this is a hack. We need to pass the selected float
+ // mode to the MipsTargetInfoBase to define appropriate macros there.
+ // Now it is the only method.
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+soft-float");
+ }
+ else if (FloatABI == "single") {
+ // Restrict the use of hardware floating-point
+ // instructions to 32-bit operations.
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+single-float");
+ }
+ else {
+ // Floating point operations and argument passing are hard.
+ assert(FloatABI == "hard" && "Invalid float abi!");
+ CmdArgs.push_back("-mfloat-abi");
+ CmdArgs.push_back("hard");
+ }
+}
+
+void Clang::AddSparcTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ // Select the float ABI as determined by -msoft-float, -mhard-float, and
+ StringRef FloatABI;
+ if (Arg *A = Args.getLastArg(options::OPT_msoft_float,
+ options::OPT_mhard_float)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ FloatABI = "soft";
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ FloatABI = "hard";
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (FloatABI.empty()) {
+ switch (getToolChain().getTriple().getOS()) {
+ default:
+ // Assume "soft", but warn the user we are guessing.
+ FloatABI = "soft";
+ D.Diag(diag::warn_drv_assuming_mfloat_abi_is) << "soft";
+ break;
+ }
+ }
+
+ if (FloatABI == "soft") {
+ // Floating point operations and argument passing are soft.
+ //
+ // FIXME: This changes CPP defines, we need -target-soft-float.
+ CmdArgs.push_back("-msoft-float");
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back("+soft-float");
+ } else {
+ assert(FloatABI == "hard" && "Invalid float abi!");
+ CmdArgs.push_back("-mhard-float");
+ }
+}
+
+void Clang::AddX86TargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ if (!Args.hasFlag(options::OPT_mred_zone,
+ options::OPT_mno_red_zone,
+ true) ||
+ Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext))
+ CmdArgs.push_back("-disable-red-zone");
+
+ if (Args.hasFlag(options::OPT_msoft_float,
+ options::OPT_mno_soft_float,
+ false))
+ CmdArgs.push_back("-no-implicit-float");
+
+ const char *CPUName = 0;
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ if (StringRef(A->getValue(Args)) == "native") {
+ // FIXME: Reject attempts to use -march=native unless the target matches
+ // the host.
+ //
+ // FIXME: We should also incorporate the detected target features for use
+ // with -native.
+ std::string CPU = llvm::sys::getHostCPUName();
+ if (!CPU.empty())
+ CPUName = Args.MakeArgString(CPU);
+ } else
+ CPUName = A->getValue(Args);
+ }
+
+ // Select the default CPU if none was given (or detection failed).
+ if (!CPUName) {
+ // FIXME: Need target hooks.
+ if (getToolChain().getTriple().isOSDarwin()) {
+ if (getToolChain().getArch() == llvm::Triple::x86_64)
+ CPUName = "core2";
+ else if (getToolChain().getArch() == llvm::Triple::x86)
+ CPUName = "yonah";
+ } else if (getToolChain().getOS().startswith("haiku")) {
+ if (getToolChain().getArch() == llvm::Triple::x86_64)
+ CPUName = "x86-64";
+ else if (getToolChain().getArch() == llvm::Triple::x86)
+ CPUName = "i586";
+ } else if (getToolChain().getOS().startswith("openbsd")) {
+ if (getToolChain().getArch() == llvm::Triple::x86_64)
+ CPUName = "x86-64";
+ else if (getToolChain().getArch() == llvm::Triple::x86)
+ CPUName = "i486";
+ } else if (getToolChain().getOS().startswith("freebsd")) {
+ if (getToolChain().getArch() == llvm::Triple::x86_64)
+ CPUName = "x86-64";
+ else if (getToolChain().getArch() == llvm::Triple::x86)
+ CPUName = "i486";
+ } else if (getToolChain().getOS().startswith("netbsd")) {
+ if (getToolChain().getArch() == llvm::Triple::x86_64)
+ CPUName = "x86-64";
+ else if (getToolChain().getArch() == llvm::Triple::x86)
+ CPUName = "i486";
+ } else {
+ if (getToolChain().getArch() == llvm::Triple::x86_64)
+ CPUName = "x86-64";
+ else if (getToolChain().getArch() == llvm::Triple::x86)
+ CPUName = "pentium4";
+ }
+ }
+
+ if (CPUName) {
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(CPUName);
+ }
+
+ // The required algorithm here is slightly strange: the options are applied
+ // in order (so -mno-sse -msse2 disables SSE3), but any option that gets
+ // directly overridden later is ignored (so "-mno-sse -msse2 -mno-sse2 -msse"
+ // is equivalent to "-mno-sse2 -msse"). The -cc1 handling deals with the
+ // former correctly, but not the latter; handle directly-overridden
+ // attributes here.
+ llvm::StringMap<unsigned> PrevFeature;
+ std::vector<const char*> Features;
+ for (arg_iterator it = Args.filtered_begin(options::OPT_m_x86_Features_Group),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ StringRef Name = (*it)->getOption().getName();
+ (*it)->claim();
+
+ // Skip over "-m".
+ assert(Name.startswith("-m") && "Invalid feature name.");
+ Name = Name.substr(2);
+
+ bool IsNegative = Name.startswith("no-");
+ if (IsNegative)
+ Name = Name.substr(3);
+
+ unsigned& Prev = PrevFeature[Name];
+ if (Prev)
+ Features[Prev - 1] = 0;
+ Prev = Features.size() + 1;
+ Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
+ }
+ for (unsigned i = 0; i < Features.size(); i++) {
+ if (Features[i]) {
+ CmdArgs.push_back("-target-feature");
+ CmdArgs.push_back(Features[i]);
+ }
+ }
+}
+
+static Arg* getLastHexagonArchArg (const ArgList &Args)
+{
+ Arg * A = NULL;
+
+ for (ArgList::const_iterator it = Args.begin(), ie = Args.end();
+ it != ie; ++it) {
+ if ((*it)->getOption().matches(options::OPT_march_EQ) ||
+ (*it)->getOption().matches(options::OPT_mcpu_EQ)) {
+ A = *it;
+ A->claim();
+ }
+ else if ((*it)->getOption().matches(options::OPT_m_Joined)){
+ StringRef Value = (*it)->getValue(Args,0);
+ if (Value.startswith("v")) {
+ A = *it;
+ A->claim();
+ }
+ }
+ }
+ return A;
+}
+
+static StringRef getHexagonTargetCPU(const ArgList &Args)
+{
+ Arg *A;
+ llvm::StringRef WhichHexagon;
+
+ // Select the default CPU (v4) if none was given or detection failed.
+ if ((A = getLastHexagonArchArg (Args))) {
+ WhichHexagon = A->getValue(Args);
+ if (WhichHexagon == "")
+ return "v4";
+ else
+ return WhichHexagon;
+ }
+ else
+ return "v4";
+}
+
+void Clang::AddHexagonTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ llvm::Triple Triple = getToolChain().getTriple();
+
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(Args.MakeArgString("hexagon" + getHexagonTargetCPU(Args)));
+ CmdArgs.push_back("-fno-signed-char");
+ CmdArgs.push_back("-nobuiltininc");
+
+ if (Args.hasArg(options::OPT_mqdsp6_compat))
+ CmdArgs.push_back("-mqdsp6-compat");
+
+ if (Arg *A = Args.getLastArg(options::OPT_G,
+ options::OPT_msmall_data_threshold_EQ)) {
+ std::string SmallDataThreshold="-small-data-threshold=";
+ SmallDataThreshold += A->getValue(Args);
+ CmdArgs.push_back ("-mllvm");
+ CmdArgs.push_back(Args.MakeArgString(SmallDataThreshold));
+ A->claim();
+ }
+
+ CmdArgs.push_back ("-mllvm");
+ CmdArgs.push_back ("-machine-sink-split=0");
+}
+
+static bool
+shouldUseExceptionTablesForObjCExceptions(unsigned objcABIVersion,
+ const llvm::Triple &Triple) {
+ // We use the zero-cost exception tables for Objective-C if the non-fragile
+ // ABI is enabled or when compiling for x86_64 and ARM on Snow Leopard and
+ // later.
+
+ if (objcABIVersion >= 2)
+ return true;
+
+ if (!Triple.isOSDarwin())
+ return false;
+
+ return (!Triple.isMacOSXVersionLT(10,5) &&
+ (Triple.getArch() == llvm::Triple::x86_64 ||
+ Triple.getArch() == llvm::Triple::arm));
+}
+
+/// addExceptionArgs - Adds exception related arguments to the driver command
+/// arguments. There's a master flag, -fexceptions and also language specific
+/// flags to enable/disable C++ and Objective-C exceptions.
+/// This makes it possible to for example disable C++ exceptions but enable
+/// Objective-C exceptions.
+static void addExceptionArgs(const ArgList &Args, types::ID InputType,
+ const llvm::Triple &Triple,
+ bool KernelOrKext,
+ unsigned objcABIVersion,
+ ArgStringList &CmdArgs) {
+ if (KernelOrKext) {
+ // -mkernel and -fapple-kext imply no exceptions, so claim exception related
+ // arguments now to avoid warnings about unused arguments.
+ Args.ClaimAllArgs(options::OPT_fexceptions);
+ Args.ClaimAllArgs(options::OPT_fno_exceptions);
+ Args.ClaimAllArgs(options::OPT_fobjc_exceptions);
+ Args.ClaimAllArgs(options::OPT_fno_objc_exceptions);
+ Args.ClaimAllArgs(options::OPT_fcxx_exceptions);
+ Args.ClaimAllArgs(options::OPT_fno_cxx_exceptions);
+ return;
+ }
+
+ // Exceptions are enabled by default.
+ bool ExceptionsEnabled = true;
+
+ // This keeps track of whether exceptions were explicitly turned on or off.
+ bool DidHaveExplicitExceptionFlag = false;
+
+ if (Arg *A = Args.getLastArg(options::OPT_fexceptions,
+ options::OPT_fno_exceptions)) {
+ if (A->getOption().matches(options::OPT_fexceptions))
+ ExceptionsEnabled = true;
+ else
+ ExceptionsEnabled = false;
+
+ DidHaveExplicitExceptionFlag = true;
+ }
+
+ bool ShouldUseExceptionTables = false;
+
+ // Exception tables and cleanups can be enabled with -fexceptions even if the
+ // language itself doesn't support exceptions.
+ if (ExceptionsEnabled && DidHaveExplicitExceptionFlag)
+ ShouldUseExceptionTables = true;
+
+ // Obj-C exceptions are enabled by default, regardless of -fexceptions. This
+ // is not necessarily sensible, but follows GCC.
+ if (types::isObjC(InputType) &&
+ Args.hasFlag(options::OPT_fobjc_exceptions,
+ options::OPT_fno_objc_exceptions,
+ true)) {
+ CmdArgs.push_back("-fobjc-exceptions");
+
+ ShouldUseExceptionTables |=
+ shouldUseExceptionTablesForObjCExceptions(objcABIVersion, Triple);
+ }
+
+ if (types::isCXX(InputType)) {
+ bool CXXExceptionsEnabled = ExceptionsEnabled;
+
+ if (Arg *A = Args.getLastArg(options::OPT_fcxx_exceptions,
+ options::OPT_fno_cxx_exceptions,
+ options::OPT_fexceptions,
+ options::OPT_fno_exceptions)) {
+ if (A->getOption().matches(options::OPT_fcxx_exceptions))
+ CXXExceptionsEnabled = true;
+ else if (A->getOption().matches(options::OPT_fno_cxx_exceptions))
+ CXXExceptionsEnabled = false;
+ }
+
+ if (CXXExceptionsEnabled) {
+ CmdArgs.push_back("-fcxx-exceptions");
+
+ ShouldUseExceptionTables = true;
+ }
+ }
+
+ if (ShouldUseExceptionTables)
+ CmdArgs.push_back("-fexceptions");
+}
+
+static bool ShouldDisableCFI(const ArgList &Args,
+ const ToolChain &TC) {
+ bool Default = true;
+ if (TC.getTriple().isOSDarwin()) {
+ // The native darwin assembler doesn't support cfi directives, so
+ // we disable them if we think the .s file will be passed to it.
+ Default = Args.hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ TC.IsIntegratedAssemblerDefault());
+ }
+ return !Args.hasFlag(options::OPT_fdwarf2_cfi_asm,
+ options::OPT_fno_dwarf2_cfi_asm,
+ Default);
+}
+
+static bool ShouldDisableDwarfDirectory(const ArgList &Args,
+ const ToolChain &TC) {
+ bool IsIADefault = TC.IsIntegratedAssemblerDefault();
+ bool UseIntegratedAs = Args.hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIADefault);
+ bool UseDwarfDirectory = Args.hasFlag(options::OPT_fdwarf_directory_asm,
+ options::OPT_fno_dwarf_directory_asm,
+ UseIntegratedAs);
+ return !UseDwarfDirectory;
+}
+
+/// \brief Check whether the given input tree contains any compilation actions.
+static bool ContainsCompileAction(const Action *A) {
+ if (isa<CompileJobAction>(A))
+ return true;
+
+ for (Action::const_iterator it = A->begin(), ie = A->end(); it != ie; ++it)
+ if (ContainsCompileAction(*it))
+ return true;
+
+ return false;
+}
+
+/// \brief Check if -relax-all should be passed to the internal assembler.
+/// This is done by default when compiling non-assembler source with -O0.
+static bool UseRelaxAll(Compilation &C, const ArgList &Args) {
+ bool RelaxDefault = true;
+
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group))
+ RelaxDefault = A->getOption().matches(options::OPT_O0);
+
+ if (RelaxDefault) {
+ RelaxDefault = false;
+ for (ActionList::const_iterator it = C.getActions().begin(),
+ ie = C.getActions().end(); it != ie; ++it) {
+ if (ContainsCompileAction(*it)) {
+ RelaxDefault = true;
+ break;
+ }
+ }
+ }
+
+ return Args.hasFlag(options::OPT_mrelax_all, options::OPT_mno_relax_all,
+ RelaxDefault);
+}
+
+/// If AddressSanitizer is enabled, add appropriate linker flags (Linux).
+/// This needs to be called before we add the C run-time (malloc, etc).
+static void addAsanRTLinux(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // Add asan linker flags when linking an executable, but not a shared object.
+ if (Args.hasArg(options::OPT_shared) ||
+ !Args.hasFlag(options::OPT_faddress_sanitizer,
+ options::OPT_fno_address_sanitizer, false))
+ return;
+
+ // LibAsan is "libclang_rt.asan-<ArchName>.a" in the Linux library resource
+ // directory.
+ SmallString<128> LibAsan(TC.getDriver().ResourceDir);
+ llvm::sys::path::append(LibAsan, "lib", "linux",
+ (Twine("libclang_rt.asan-") +
+ TC.getArchName() + ".a"));
+ CmdArgs.push_back(Args.MakeArgString(LibAsan));
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-ldl");
+ CmdArgs.push_back("-export-dynamic");
+}
+
+static bool shouldUseFramePointer(const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (Arg *A = Args.getLastArg(options::OPT_fno_omit_frame_pointer,
+ options::OPT_fomit_frame_pointer))
+ return A->getOption().matches(options::OPT_fno_omit_frame_pointer);
+
+ // Don't use a frame pointer on linux x86 and x86_64 if optimizing.
+ if ((Triple.getArch() == llvm::Triple::x86_64 ||
+ Triple.getArch() == llvm::Triple::x86) &&
+ Triple.getOS() == llvm::Triple::Linux) {
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group))
+ if (!A->getOption().matches(options::OPT_O0))
+ return false;
+ }
+
+ return true;
+}
+
+void Clang::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ bool KernelOrKext = Args.hasArg(options::OPT_mkernel,
+ options::OPT_fapple_kext);
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+
+ // Invoke ourselves in -cc1 mode.
+ //
+ // FIXME: Implement custom jobs for internal actions.
+ CmdArgs.push_back("-cc1");
+
+ // Add the "effective" target triple.
+ CmdArgs.push_back("-triple");
+ std::string TripleStr = getToolChain().ComputeEffectiveClangTriple(Args);
+ CmdArgs.push_back(Args.MakeArgString(TripleStr));
+
+ // Select the appropriate action.
+ bool IsRewriter = false;
+ bool IsModernRewriter = false;
+
+ if (isa<AnalyzeJobAction>(JA)) {
+ assert(JA.getType() == types::TY_Plist && "Invalid output type.");
+ CmdArgs.push_back("-analyze");
+ } else if (isa<MigrateJobAction>(JA)) {
+ CmdArgs.push_back("-migrate");
+ } else if (isa<PreprocessJobAction>(JA)) {
+ if (Output.getType() == types::TY_Dependencies)
+ CmdArgs.push_back("-Eonly");
+ else
+ CmdArgs.push_back("-E");
+ } else if (isa<AssembleJobAction>(JA)) {
+ CmdArgs.push_back("-emit-obj");
+
+ if (UseRelaxAll(C, Args))
+ CmdArgs.push_back("-mrelax-all");
+
+ // When using an integrated assembler, translate -Wa, and -Xassembler
+ // options.
+ for (arg_iterator it = Args.filtered_begin(options::OPT_Wa_COMMA,
+ options::OPT_Xassembler),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
+
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i) {
+ StringRef Value = A->getValue(Args, i);
+
+ if (Value == "-force_cpusubtype_ALL") {
+ // Do nothing, this is the default and we don't support anything else.
+ } else if (Value == "-L") {
+ CmdArgs.push_back("-msave-temp-labels");
+ } else if (Value == "--fatal-warnings") {
+ CmdArgs.push_back("-mllvm");
+ CmdArgs.push_back("-fatal-assembler-warnings");
+ } else if (Value == "--noexecstack") {
+ CmdArgs.push_back("-mnoexecstack");
+ } else {
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Value;
+ }
+ }
+ }
+
+ // Also ignore explicit -force_cpusubtype_ALL option.
+ (void) Args.hasArg(options::OPT_force__cpusubtype__ALL);
+ } else if (isa<PrecompileJobAction>(JA)) {
+ // Use PCH if the user requested it.
+ bool UsePCH = D.CCCUsePCH;
+
+ if (UsePCH)
+ CmdArgs.push_back("-emit-pch");
+ else
+ CmdArgs.push_back("-emit-pth");
+ } else {
+ assert(isa<CompileJobAction>(JA) && "Invalid action for clang tool.");
+
+ if (JA.getType() == types::TY_Nothing) {
+ CmdArgs.push_back("-fsyntax-only");
+ } else if (JA.getType() == types::TY_LLVM_IR ||
+ JA.getType() == types::TY_LTO_IR) {
+ CmdArgs.push_back("-emit-llvm");
+ } else if (JA.getType() == types::TY_LLVM_BC ||
+ JA.getType() == types::TY_LTO_BC) {
+ CmdArgs.push_back("-emit-llvm-bc");
+ } else if (JA.getType() == types::TY_PP_Asm) {
+ CmdArgs.push_back("-S");
+ } else if (JA.getType() == types::TY_AST) {
+ CmdArgs.push_back("-emit-pch");
+ } else if (JA.getType() == types::TY_RewrittenObjC) {
+ CmdArgs.push_back("-rewrite-objc");
+ IsModernRewriter = true;
+ } else if (JA.getType() == types::TY_RewrittenLegacyObjC) {
+ CmdArgs.push_back("-rewrite-objc");
+ IsRewriter = true;
+ } else {
+ assert(JA.getType() == types::TY_PP_Asm &&
+ "Unexpected output type!");
+ }
+ }
+
+ // The make clang go fast button.
+ CmdArgs.push_back("-disable-free");
+
+ // Disable the verification pass in -asserts builds.
+#ifdef NDEBUG
+ CmdArgs.push_back("-disable-llvm-verifier");
+#endif
+
+ // Set the main file name, so that debug info works even with
+ // -save-temps.
+ CmdArgs.push_back("-main-file-name");
+ CmdArgs.push_back(darwin::CC1::getBaseInputName(Args, Inputs));
+
+ // Some flags which affect the language (via preprocessor
+ // defines). See darwin::CC1::AddCPPArgs.
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-static-define");
+
+ if (isa<AnalyzeJobAction>(JA)) {
+ // Enable region store model by default.
+ CmdArgs.push_back("-analyzer-store=region");
+
+ // Treat blocks as analysis entry points.
+ CmdArgs.push_back("-analyzer-opt-analyze-nested-blocks");
+
+ CmdArgs.push_back("-analyzer-eagerly-assume");
+
+ CmdArgs.push_back("-analyzer-ipa=inlining");
+
+ // Add default argument set.
+ if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
+ CmdArgs.push_back("-analyzer-checker=core");
+
+ if (getToolChain().getTriple().getOS() != llvm::Triple::Win32)
+ CmdArgs.push_back("-analyzer-checker=unix");
+
+ if (getToolChain().getTriple().getVendor() == llvm::Triple::Apple)
+ CmdArgs.push_back("-analyzer-checker=osx");
+
+ CmdArgs.push_back("-analyzer-checker=deadcode");
+
+ // Enable the following experimental checkers for testing.
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.UncheckedReturn");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mktemp");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.mkstemp");
+ CmdArgs.push_back("-analyzer-checker=security.insecureAPI.vfork");
+ }
+
+ // Set the output format. The default is plist, for (lame) historical
+ // reasons.
+ CmdArgs.push_back("-analyzer-output");
+ if (Arg *A = Args.getLastArg(options::OPT__analyzer_output))
+ CmdArgs.push_back(A->getValue(Args));
+ else
+ CmdArgs.push_back("plist");
+
+ // Disable the presentation of standard compiler warnings when
+ // using --analyze. We only want to show static analyzer diagnostics
+ // or frontend errors.
+ CmdArgs.push_back("-w");
+
+ // Add -Xanalyzer arguments when running as analyzer.
+ Args.AddAllArgValues(CmdArgs, options::OPT_Xanalyzer);
+ }
+
+ CheckCodeGenerationOptions(D, Args);
+
+ // Perform argument translation for LLVM backend. This
+ // takes some care in reconciling with llvm-gcc. The
+ // issue is that llvm-gcc translates these options based on
+ // the values in cc1, whereas we are processing based on
+ // the driver arguments.
+
+ // This comes from the default translation the driver + cc1
+ // would do to enable flag_pic.
+ //
+ // FIXME: Centralize this code.
+ Arg *LastPICArg = 0;
+ for (ArgList::const_iterator I = Args.begin(), E = Args.end(); I != E; ++I) {
+ if ((*I)->getOption().matches(options::OPT_fPIC) ||
+ (*I)->getOption().matches(options::OPT_fno_PIC) ||
+ (*I)->getOption().matches(options::OPT_fpic) ||
+ (*I)->getOption().matches(options::OPT_fno_pic) ||
+ (*I)->getOption().matches(options::OPT_fPIE) ||
+ (*I)->getOption().matches(options::OPT_fno_PIE) ||
+ (*I)->getOption().matches(options::OPT_fpie) ||
+ (*I)->getOption().matches(options::OPT_fno_pie)) {
+ LastPICArg = *I;
+ (*I)->claim();
+ }
+ }
+ bool PICDisabled = false;
+ bool PICEnabled = false;
+ bool PICForPIE = false;
+ if (LastPICArg) {
+ PICForPIE = (LastPICArg->getOption().matches(options::OPT_fPIE) ||
+ LastPICArg->getOption().matches(options::OPT_fpie));
+ PICEnabled = (PICForPIE ||
+ LastPICArg->getOption().matches(options::OPT_fPIC) ||
+ LastPICArg->getOption().matches(options::OPT_fpic));
+ PICDisabled = !PICEnabled;
+ }
+ // Note that these flags are trump-cards. Regardless of the order w.r.t. the
+ // PIC or PIE options above, if these show up, PIC is disabled.
+ if (Args.hasArg(options::OPT_mkernel))
+ PICDisabled = true;
+ if (Args.hasArg(options::OPT_static))
+ PICDisabled = true;
+ bool DynamicNoPIC = Args.hasArg(options::OPT_mdynamic_no_pic);
+
+ // Select the relocation model.
+ const char *Model = getToolChain().GetForcedPicModel();
+ if (!Model) {
+ if (DynamicNoPIC)
+ Model = "dynamic-no-pic";
+ else if (PICDisabled)
+ Model = "static";
+ else if (PICEnabled)
+ Model = "pic";
+ else
+ Model = getToolChain().GetDefaultRelocationModel();
+ }
+ StringRef ModelStr = Model ? Model : "";
+ if (Model && ModelStr != "pic") {
+ CmdArgs.push_back("-mrelocation-model");
+ CmdArgs.push_back(Model);
+ }
+
+ // Infer the __PIC__ and __PIE__ values.
+ if (ModelStr == "pic" && PICForPIE) {
+ CmdArgs.push_back("-pie-level");
+ CmdArgs.push_back((LastPICArg &&
+ LastPICArg->getOption().matches(options::OPT_fPIE)) ?
+ "2" : "1");
+ } else if (ModelStr == "pic" || ModelStr == "dynamic-no-pic") {
+ CmdArgs.push_back("-pic-level");
+ CmdArgs.push_back(((ModelStr != "dynamic-no-pic" && LastPICArg &&
+ LastPICArg->getOption().matches(options::OPT_fPIC)) ||
+ getToolChain().getTriple().isOSDarwin()) ? "2" : "1");
+ }
+
+ if (!Args.hasFlag(options::OPT_fmerge_all_constants,
+ options::OPT_fno_merge_all_constants))
+ CmdArgs.push_back("-fno-merge-all-constants");
+
+ // LLVM Code Generator Options.
+
+ if (Arg *A = Args.getLastArg(options::OPT_mregparm_EQ)) {
+ CmdArgs.push_back("-mregparm");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (Args.hasFlag(options::OPT_mrtd, options::OPT_mno_rtd, false))
+ CmdArgs.push_back("-mrtd");
+
+ if (shouldUseFramePointer(Args, getToolChain().getTriple()))
+ CmdArgs.push_back("-mdisable-fp-elim");
+ if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
+ options::OPT_fno_zero_initialized_in_bss))
+ CmdArgs.push_back("-mno-zero-initialized-in-bss");
+ if (!Args.hasFlag(options::OPT_fstrict_aliasing,
+ options::OPT_fno_strict_aliasing,
+ getToolChain().IsStrictAliasingDefault()))
+ CmdArgs.push_back("-relaxed-aliasing");
+ if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums,
+ false))
+ CmdArgs.push_back("-fstrict-enums");
+ if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
+ options::OPT_fno_optimize_sibling_calls))
+ CmdArgs.push_back("-mdisable-tail-calls");
+
+ // Handle various floating point optimization flags, mapping them to the
+ // appropriate LLVM code generation flags. The pattern for all of these is to
+ // default off the codegen optimizations, and if any flag enables them and no
+ // flag disables them after the flag enabling them, enable the codegen
+ // optimization. This is complicated by several "umbrella" flags.
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_ffinite_math_only,
+ options::OPT_fno_finite_math_only,
+ options::OPT_fhonor_infinities,
+ options::OPT_fno_honor_infinities))
+ if (A->getOption().getID() != options::OPT_fno_finite_math_only &&
+ A->getOption().getID() != options::OPT_fhonor_infinities)
+ CmdArgs.push_back("-menable-no-infs");
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_ffinite_math_only,
+ options::OPT_fno_finite_math_only,
+ options::OPT_fhonor_nans,
+ options::OPT_fno_honor_nans))
+ if (A->getOption().getID() != options::OPT_fno_finite_math_only &&
+ A->getOption().getID() != options::OPT_fhonor_nans)
+ CmdArgs.push_back("-menable-no-nans");
+
+ // -fno-math-errno is default.
+ bool MathErrno = false;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_fmath_errno,
+ options::OPT_fno_math_errno)) {
+ if (A->getOption().getID() == options::OPT_fmath_errno) {
+ CmdArgs.push_back("-fmath-errno");
+ MathErrno = true;
+ }
+ }
+
+ // There are several flags which require disabling very specific
+ // optimizations. Any of these being disabled forces us to turn off the
+ // entire set of LLVM optimizations, so collect them through all the flag
+ // madness.
+ bool AssociativeMath = false;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_fassociative_math,
+ options::OPT_fno_associative_math))
+ if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fno_associative_math)
+ AssociativeMath = true;
+ bool ReciprocalMath = false;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_freciprocal_math,
+ options::OPT_fno_reciprocal_math))
+ if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fno_reciprocal_math)
+ ReciprocalMath = true;
+ bool SignedZeros = true;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_fsigned_zeros,
+ options::OPT_fno_signed_zeros))
+ if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_fsigned_zeros)
+ SignedZeros = false;
+ bool TrappingMath = true;
+ if (Arg *A = Args.getLastArg(options::OPT_ffast_math,
+ options::OPT_funsafe_math_optimizations,
+ options::OPT_fno_unsafe_math_optimizations,
+ options::OPT_ftrapping_math,
+ options::OPT_fno_trapping_math))
+ if (A->getOption().getID() != options::OPT_fno_unsafe_math_optimizations &&
+ A->getOption().getID() != options::OPT_ftrapping_math)
+ TrappingMath = false;
+ if (!MathErrno && AssociativeMath && ReciprocalMath && !SignedZeros &&
+ !TrappingMath)
+ CmdArgs.push_back("-menable-unsafe-fp-math");
+
+ // We separately look for the '-ffast-math' flag, and if we find it, tell the
+ // frontend to provide the appropriate preprocessor macros. This is distinct
+ // from enabling any optimizations as it induces a language change which must
+ // survive serialization and deserialization, etc.
+ if (Args.hasArg(options::OPT_ffast_math))
+ CmdArgs.push_back("-ffast-math");
+
+ // Decide whether to use verbose asm. Verbose assembly is the default on
+ // toolchains which have the integrated assembler on by default.
+ bool IsVerboseAsmDefault = getToolChain().IsIntegratedAssemblerDefault();
+ if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
+ IsVerboseAsmDefault) ||
+ Args.hasArg(options::OPT_dA))
+ CmdArgs.push_back("-masm-verbose");
+
+ if (Args.hasArg(options::OPT_fdebug_pass_structure)) {
+ CmdArgs.push_back("-mdebug-pass");
+ CmdArgs.push_back("Structure");
+ }
+ if (Args.hasArg(options::OPT_fdebug_pass_arguments)) {
+ CmdArgs.push_back("-mdebug-pass");
+ CmdArgs.push_back("Arguments");
+ }
+
+ // Enable -mconstructor-aliases except on darwin, where we have to
+ // work around a linker bug; see <rdar://problem/7651567>.
+ if (!getToolChain().getTriple().isOSDarwin())
+ CmdArgs.push_back("-mconstructor-aliases");
+
+ // Darwin's kernel doesn't support guard variables; just die if we
+ // try to use them.
+ if (KernelOrKext && getToolChain().getTriple().isOSDarwin())
+ CmdArgs.push_back("-fforbid-guard-variables");
+
+ if (Args.hasArg(options::OPT_mms_bitfields)) {
+ CmdArgs.push_back("-mms-bitfields");
+ }
+
+ // This is a coarse approximation of what llvm-gcc actually does, both
+ // -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
+ // complicated ways.
+ bool AsynchronousUnwindTables =
+ Args.hasFlag(options::OPT_fasynchronous_unwind_tables,
+ options::OPT_fno_asynchronous_unwind_tables,
+ getToolChain().IsUnwindTablesDefault() &&
+ !KernelOrKext);
+ if (Args.hasFlag(options::OPT_funwind_tables, options::OPT_fno_unwind_tables,
+ AsynchronousUnwindTables))
+ CmdArgs.push_back("-munwind-tables");
+
+ if (Arg *A = Args.getLastArg(options::OPT_flimited_precision_EQ)) {
+ CmdArgs.push_back("-mlimit-float-precision");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ // FIXME: Handle -mtune=.
+ (void) Args.hasArg(options::OPT_mtune_EQ);
+
+ if (Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ CmdArgs.push_back("-mcode-model");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ // Add target specific cpu and features flags.
+ switch(getToolChain().getTriple().getArch()) {
+ default:
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ AddARMTargetArgs(Args, CmdArgs, KernelOrKext);
+ break;
+
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ AddMIPSTargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::sparc:
+ AddSparcTargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ AddX86TargetArgs(Args, CmdArgs);
+ break;
+
+ case llvm::Triple::hexagon:
+ AddHexagonTargetArgs(Args, CmdArgs);
+ break;
+ }
+
+
+
+ // Pass the linker version in use.
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
+ CmdArgs.push_back("-target-linker-version");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ // -mno-omit-leaf-frame-pointer is the default on Darwin.
+ if (Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
+ options::OPT_mno_omit_leaf_frame_pointer,
+ !getToolChain().getTriple().isOSDarwin()))
+ CmdArgs.push_back("-momit-leaf-frame-pointer");
+
+ // Explicitly error on some things we know we don't support and can't just
+ // ignore.
+ types::ID InputType = Inputs[0].getType();
+ if (!Args.hasArg(options::OPT_fallow_unsupported)) {
+ Arg *Unsupported;
+ if (types::isCXX(InputType) &&
+ getToolChain().getTriple().isOSDarwin() &&
+ getToolChain().getTriple().getArch() == llvm::Triple::x86) {
+ if ((Unsupported = Args.getLastArg(options::OPT_fapple_kext)) ||
+ (Unsupported = Args.getLastArg(options::OPT_mkernel)))
+ D.Diag(diag::err_drv_clang_unsupported_opt_cxx_darwin_i386)
+ << Unsupported->getOption().getName();
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_v);
+ Args.AddLastArg(CmdArgs, options::OPT_H);
+ if (D.CCPrintHeaders && !D.CCGenDiagnostics) {
+ CmdArgs.push_back("-header-include-file");
+ CmdArgs.push_back(D.CCPrintHeadersFilename ?
+ D.CCPrintHeadersFilename : "-");
+ }
+ Args.AddLastArg(CmdArgs, options::OPT_P);
+ Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout);
+
+ if (D.CCLogDiagnostics && !D.CCGenDiagnostics) {
+ CmdArgs.push_back("-diagnostic-log-file");
+ CmdArgs.push_back(D.CCLogDiagnosticsFilename ?
+ D.CCLogDiagnosticsFilename : "-");
+ }
+
+ // Special case debug options to only pass -g to clang. This is
+ // wrong.
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group))
+ if (!A->getOption().matches(options::OPT_g0)) {
+ CmdArgs.push_back("-g");
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_ffunction_sections);
+ Args.AddAllArgs(CmdArgs, options::OPT_fdata_sections);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_finstrument_functions);
+
+ if (Args.hasArg(options::OPT_ftest_coverage) ||
+ Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back("-femit-coverage-notes");
+ if (Args.hasArg(options::OPT_fprofile_arcs) ||
+ Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back("-femit-coverage-data");
+
+ if (C.getArgs().hasArg(options::OPT_c) ||
+ C.getArgs().hasArg(options::OPT_S)) {
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-coverage-file");
+ CmdArgs.push_back(Args.MakeArgString(Output.getFilename()));
+ }
+ }
+
+ // Pass options for controlling the default header search paths.
+ if (Args.hasArg(options::OPT_nostdinc)) {
+ CmdArgs.push_back("-nostdsysteminc");
+ CmdArgs.push_back("-nobuiltininc");
+ } else {
+ if (Args.hasArg(options::OPT_nostdlibinc))
+ CmdArgs.push_back("-nostdsysteminc");
+ Args.AddLastArg(CmdArgs, options::OPT_nostdincxx);
+ Args.AddLastArg(CmdArgs, options::OPT_nobuiltininc);
+ }
+
+ // Pass the path to compiler resource files.
+ CmdArgs.push_back("-resource-dir");
+ CmdArgs.push_back(D.ResourceDir.c_str());
+
+ Args.AddLastArg(CmdArgs, options::OPT_working_directory);
+
+ bool ARCMTEnabled = false;
+ if (!Args.hasArg(options::OPT_fno_objc_arc)) {
+ if (const Arg *A = Args.getLastArg(options::OPT_ccc_arcmt_check,
+ options::OPT_ccc_arcmt_modify,
+ options::OPT_ccc_arcmt_migrate)) {
+ ARCMTEnabled = true;
+ switch (A->getOption().getID()) {
+ default:
+ llvm_unreachable("missed a case");
+ case options::OPT_ccc_arcmt_check:
+ CmdArgs.push_back("-arcmt-check");
+ break;
+ case options::OPT_ccc_arcmt_modify:
+ CmdArgs.push_back("-arcmt-modify");
+ break;
+ case options::OPT_ccc_arcmt_migrate:
+ CmdArgs.push_back("-arcmt-migrate");
+ CmdArgs.push_back("-mt-migrate-directory");
+ CmdArgs.push_back(A->getValue(Args));
+
+ Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_report_output);
+ Args.AddLastArg(CmdArgs, options::OPT_arcmt_migrate_emit_arc_errors);
+ break;
+ }
+ }
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_ccc_objcmt_migrate)) {
+ if (ARCMTEnabled) {
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-ccc-arcmt-migrate";
+ }
+ CmdArgs.push_back("-mt-migrate-directory");
+ CmdArgs.push_back(A->getValue(Args));
+
+ if (!Args.hasArg(options::OPT_objcmt_migrate_literals,
+ options::OPT_objcmt_migrate_subscripting)) {
+ // None specified, means enable them all.
+ CmdArgs.push_back("-objcmt-migrate-literals");
+ CmdArgs.push_back("-objcmt-migrate-subscripting");
+ } else {
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_literals);
+ Args.AddLastArg(CmdArgs, options::OPT_objcmt_migrate_subscripting);
+ }
+ }
+
+ // Add preprocessing options like -I, -D, etc. if we are using the
+ // preprocessor.
+ //
+ // FIXME: Support -fpreprocessed
+ if (types::getPreprocessedType(InputType) != types::TY_INVALID)
+ AddPreprocessingOptions(C, D, Args, CmdArgs, Output, Inputs);
+
+ // Don't warn about "clang -c -DPIC -fPIC test.i" because libtool.m4 assumes
+ // that "The compiler can only warn and ignore the option if not recognized".
+ // When building with ccache, it will pass -D options to clang even on
+ // preprocessed inputs and configure concludes that -fPIC is not supported.
+ Args.ClaimAllArgs(options::OPT_D);
+
+ // Manually translate -O to -O2 and -O4 to -O3; let clang reject
+ // others.
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O4))
+ CmdArgs.push_back("-O3");
+ else if (A->getOption().matches(options::OPT_O) &&
+ A->getValue(Args)[0] == '\0')
+ CmdArgs.push_back("-O2");
+ else
+ A->render(Args, CmdArgs);
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_W_Group);
+ Args.AddLastArg(CmdArgs, options::OPT_pedantic);
+ Args.AddLastArg(CmdArgs, options::OPT_pedantic_errors);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+
+ // Handle -{std, ansi, trigraphs} -- take the last of -{std, ansi}
+ // (-ansi is equivalent to -std=c89).
+ //
+ // If a std is supplied, only add -trigraphs if it follows the
+ // option.
+ if (Arg *Std = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) {
+ if (Std->getOption().matches(options::OPT_ansi))
+ if (types::isCXX(InputType))
+ CmdArgs.push_back("-std=c++98");
+ else
+ CmdArgs.push_back("-std=c89");
+ else
+ Std->render(Args, CmdArgs);
+
+ if (Arg *A = Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi,
+ options::OPT_trigraphs))
+ if (A != Std)
+ A->render(Args, CmdArgs);
+ } else {
+ // Honor -std-default.
+ //
+ // FIXME: Clang doesn't correctly handle -std= when the input language
+ // doesn't match. For the time being just ignore this for C++ inputs;
+ // eventually we want to do all the standard defaulting here instead of
+ // splitting it between the driver and clang -cc1.
+ if (!types::isCXX(InputType))
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
+ "-std=", /*Joined=*/true);
+ Args.AddLastArg(CmdArgs, options::OPT_trigraphs);
+ }
+
+ // Map the bizarre '-Wwrite-strings' flag to a more sensible
+ // '-fconst-strings'; this better indicates its actual behavior.
+ if (Args.hasFlag(options::OPT_Wwrite_strings, options::OPT_Wno_write_strings,
+ false)) {
+ // For perfect compatibility with GCC, we do this even in the presence of
+ // '-w'. This flag names something other than a warning for GCC.
+ CmdArgs.push_back("-fconst-strings");
+ }
+
+ // GCC provides a macro definition '__DEPRECATED' when -Wdeprecated is active
+ // during C++ compilation, which it is by default. GCC keeps this define even
+ // in the presence of '-w', match this behavior bug-for-bug.
+ if (types::isCXX(InputType) &&
+ Args.hasFlag(options::OPT_Wdeprecated, options::OPT_Wno_deprecated,
+ true)) {
+ CmdArgs.push_back("-fdeprecated-macro");
+ }
+
+ // Translate GCC's misnamer '-fasm' arguments to '-fgnu-keywords'.
+ if (Arg *Asm = Args.getLastArg(options::OPT_fasm, options::OPT_fno_asm)) {
+ if (Asm->getOption().matches(options::OPT_fasm))
+ CmdArgs.push_back("-fgnu-keywords");
+ else
+ CmdArgs.push_back("-fno-gnu-keywords");
+ }
+
+ if (ShouldDisableCFI(Args, getToolChain()))
+ CmdArgs.push_back("-fno-dwarf2-cfi-asm");
+
+ if (ShouldDisableDwarfDirectory(Args, getToolChain()))
+ CmdArgs.push_back("-fno-dwarf-directory-asm");
+
+ if (const char *pwd = ::getenv("PWD")) {
+ // GCC also verifies that stat(pwd) and stat(".") have the same inode
+ // number. Not doing those because stats are slow, but we could.
+ if (llvm::sys::path::is_absolute(pwd)) {
+ std::string CompDir = pwd;
+ CmdArgs.push_back("-fdebug-compilation-dir");
+ CmdArgs.push_back(Args.MakeArgString(CompDir));
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
+ options::OPT_ftemplate_depth_EQ)) {
+ CmdArgs.push_back("-ftemplate-depth");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_depth_EQ)) {
+ CmdArgs.push_back("-fconstexpr-depth");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_Wlarge_by_value_copy_EQ,
+ options::OPT_Wlarge_by_value_copy_def)) {
+ CmdArgs.push_back("-Wlarge-by-value-copy");
+ if (A->getNumValues())
+ CmdArgs.push_back(A->getValue(Args));
+ else
+ CmdArgs.push_back("64"); // default value for -Wlarge-by-value-copy.
+ }
+
+ if (Args.hasArg(options::OPT__relocatable_pch))
+ CmdArgs.push_back("-relocatable-pch");
+
+ if (Arg *A = Args.getLastArg(options::OPT_fconstant_string_class_EQ)) {
+ CmdArgs.push_back("-fconstant-string-class");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftabstop_EQ)) {
+ CmdArgs.push_back("-ftabstop");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ CmdArgs.push_back("-ferror-limit");
+ if (Arg *A = Args.getLastArg(options::OPT_ferror_limit_EQ))
+ CmdArgs.push_back(A->getValue(Args));
+ else
+ CmdArgs.push_back("19");
+
+ if (Arg *A = Args.getLastArg(options::OPT_fmacro_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-fmacro-backtrace-limit");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftemplate_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-ftemplate-backtrace-limit");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_fconstexpr_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-fconstexpr-backtrace-limit");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ // Pass -fmessage-length=.
+ CmdArgs.push_back("-fmessage-length");
+ if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) {
+ CmdArgs.push_back(A->getValue(Args));
+ } else {
+ // If -fmessage-length=N was not specified, determine whether this is a
+ // terminal and, if so, implicitly define -fmessage-length appropriately.
+ unsigned N = llvm::sys::Process::StandardErrColumns();
+ CmdArgs.push_back(Args.MakeArgString(Twine(N)));
+ }
+
+ if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ)) {
+ CmdArgs.push_back("-fvisibility");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_fvisibility_inlines_hidden);
+
+ // -fhosted is default.
+ if (Args.hasFlag(options::OPT_ffreestanding, options::OPT_fhosted, false) ||
+ KernelOrKext)
+ CmdArgs.push_back("-ffreestanding");
+
+ // Forward -f (flag) options which we can pass directly.
+ Args.AddLastArg(CmdArgs, options::OPT_fcatch_undefined_behavior);
+ Args.AddLastArg(CmdArgs, options::OPT_femit_all_decls);
+ Args.AddLastArg(CmdArgs, options::OPT_fformat_extensions);
+ Args.AddLastArg(CmdArgs, options::OPT_fheinous_gnu_extensions);
+ Args.AddLastArg(CmdArgs, options::OPT_flimit_debug_info);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_limit_debug_info);
+ Args.AddLastArg(CmdArgs, options::OPT_fno_operator_names);
+ Args.AddLastArg(CmdArgs, options::OPT_faltivec);
+
+ // Report and error for -faltivec on anything other then PowerPC.
+ if (const Arg *A = Args.getLastArg(options::OPT_faltivec))
+ if (!(getToolChain().getTriple().getArch() == llvm::Triple::ppc ||
+ getToolChain().getTriple().getArch() == llvm::Triple::ppc64))
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "ppc/ppc64";
+
+ if (getToolChain().SupportsProfiling())
+ Args.AddLastArg(CmdArgs, options::OPT_pg);
+
+ if (Args.hasFlag(options::OPT_faddress_sanitizer,
+ options::OPT_fno_address_sanitizer, false))
+ CmdArgs.push_back("-faddress-sanitizer");
+
+ if (Args.hasFlag(options::OPT_fthread_sanitizer,
+ options::OPT_fno_thread_sanitizer, false))
+ CmdArgs.push_back("-fthread-sanitizer");
+
+ // -flax-vector-conversions is default.
+ if (!Args.hasFlag(options::OPT_flax_vector_conversions,
+ options::OPT_fno_lax_vector_conversions))
+ CmdArgs.push_back("-fno-lax-vector-conversions");
+
+ if (Args.getLastArg(options::OPT_fapple_kext))
+ CmdArgs.push_back("-fapple-kext");
+
+ Args.AddLastArg(CmdArgs, options::OPT_fobjc_sender_dependent_dispatch);
+ Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_print_source_range_info);
+ Args.AddLastArg(CmdArgs, options::OPT_fdiagnostics_parseable_fixits);
+ Args.AddLastArg(CmdArgs, options::OPT_ftime_report);
+ Args.AddLastArg(CmdArgs, options::OPT_ftrapv);
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftrapv_handler_EQ)) {
+ CmdArgs.push_back("-ftrapv-handler");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_ftrap_function_EQ);
+
+ // -fno-strict-overflow implies -fwrapv if it isn't disabled, but
+ // -fstrict-overflow won't turn off an explicitly enabled -fwrapv.
+ if (Arg *A = Args.getLastArg(options::OPT_fwrapv,
+ options::OPT_fno_wrapv)) {
+ if (A->getOption().matches(options::OPT_fwrapv))
+ CmdArgs.push_back("-fwrapv");
+ } else if (Arg *A = Args.getLastArg(options::OPT_fstrict_overflow,
+ options::OPT_fno_strict_overflow)) {
+ if (A->getOption().matches(options::OPT_fno_strict_overflow))
+ CmdArgs.push_back("-fwrapv");
+ }
+ Args.AddLastArg(CmdArgs, options::OPT_fwritable_strings);
+ Args.AddLastArg(CmdArgs, options::OPT_funroll_loops);
+
+ Args.AddLastArg(CmdArgs, options::OPT_pthread);
+
+ // -stack-protector=0 is default.
+ unsigned StackProtectorLevel = 0;
+ if (Arg *A = Args.getLastArg(options::OPT_fno_stack_protector,
+ options::OPT_fstack_protector_all,
+ options::OPT_fstack_protector)) {
+ if (A->getOption().matches(options::OPT_fstack_protector))
+ StackProtectorLevel = 1;
+ else if (A->getOption().matches(options::OPT_fstack_protector_all))
+ StackProtectorLevel = 2;
+ } else {
+ StackProtectorLevel =
+ getToolChain().GetDefaultStackProtectorLevel(KernelOrKext);
+ }
+ if (StackProtectorLevel) {
+ CmdArgs.push_back("-stack-protector");
+ CmdArgs.push_back(Args.MakeArgString(Twine(StackProtectorLevel)));
+ }
+
+ // Translate -mstackrealign
+ if (Args.hasFlag(options::OPT_mstackrealign, options::OPT_mno_stackrealign,
+ false)) {
+ CmdArgs.push_back("-backend-option");
+ CmdArgs.push_back("-force-align-stack");
+ }
+ if (!Args.hasFlag(options::OPT_mno_stackrealign, options::OPT_mstackrealign,
+ false)) {
+ CmdArgs.push_back(Args.MakeArgString("-mstackrealign"));
+ }
+
+ if (Args.hasArg(options::OPT_mstack_alignment)) {
+ StringRef alignment = Args.getLastArgValue(options::OPT_mstack_alignment);
+ CmdArgs.push_back(Args.MakeArgString("-mstack-alignment=" + alignment));
+ }
+
+ // Forward -f options with positive and negative forms; we translate
+ // these by hand.
+
+ if (Args.hasArg(options::OPT_mkernel)) {
+ if (!Args.hasArg(options::OPT_fapple_kext) && types::isCXX(InputType))
+ CmdArgs.push_back("-fapple-kext");
+ if (!Args.hasArg(options::OPT_fbuiltin))
+ CmdArgs.push_back("-fno-builtin");
+ Args.ClaimAllArgs(options::OPT_fno_builtin);
+ }
+ // -fbuiltin is default.
+ else if (!Args.hasFlag(options::OPT_fbuiltin, options::OPT_fno_builtin))
+ CmdArgs.push_back("-fno-builtin");
+
+ if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
+ options::OPT_fno_assume_sane_operator_new))
+ CmdArgs.push_back("-fno-assume-sane-operator-new");
+
+ // -fblocks=0 is default.
+ if (Args.hasFlag(options::OPT_fblocks, options::OPT_fno_blocks,
+ getToolChain().IsBlocksDefault()) ||
+ (Args.hasArg(options::OPT_fgnu_runtime) &&
+ Args.hasArg(options::OPT_fobjc_nonfragile_abi) &&
+ !Args.hasArg(options::OPT_fno_blocks))) {
+ CmdArgs.push_back("-fblocks");
+
+ if (!Args.hasArg(options::OPT_fgnu_runtime) &&
+ !getToolChain().hasBlocksRuntime())
+ CmdArgs.push_back("-fblocks-runtime-optional");
+ }
+
+ // -fmodules enables modules (off by default). However, for C++/Objective-C++,
+ // users must also pass -fcxx-modules. The latter flag will disappear once the
+ // modules implementation is solid for C++/Objective-C++ programs as well.
+ if (Args.hasFlag(options::OPT_fmodules, options::OPT_fno_modules, false)) {
+ bool AllowedInCXX = Args.hasFlag(options::OPT_fcxx_modules,
+ options::OPT_fno_cxx_modules,
+ false);
+ if (AllowedInCXX || !types::isCXX(InputType))
+ CmdArgs.push_back("-fmodules");
+ }
+
+ // -faccess-control is default.
+ if (Args.hasFlag(options::OPT_fno_access_control,
+ options::OPT_faccess_control,
+ false))
+ CmdArgs.push_back("-fno-access-control");
+
+ // -felide-constructors is the default.
+ if (Args.hasFlag(options::OPT_fno_elide_constructors,
+ options::OPT_felide_constructors,
+ false))
+ CmdArgs.push_back("-fno-elide-constructors");
+
+ // -frtti is default.
+ if (!Args.hasFlag(options::OPT_frtti, options::OPT_fno_rtti) ||
+ KernelOrKext)
+ CmdArgs.push_back("-fno-rtti");
+
+ // -fshort-enums=0 is default for all architectures except Hexagon.
+ if (Args.hasFlag(options::OPT_fshort_enums,
+ options::OPT_fno_short_enums,
+ getToolChain().getTriple().getArch() ==
+ llvm::Triple::hexagon))
+ CmdArgs.push_back("-fshort-enums");
+
+ // -fsigned-char is default.
+ if (!Args.hasFlag(options::OPT_fsigned_char, options::OPT_funsigned_char,
+ isSignedCharDefault(getToolChain().getTriple())))
+ CmdArgs.push_back("-fno-signed-char");
+
+ // -fthreadsafe-static is default.
+ if (!Args.hasFlag(options::OPT_fthreadsafe_statics,
+ options::OPT_fno_threadsafe_statics))
+ CmdArgs.push_back("-fno-threadsafe-statics");
+
+ // -fuse-cxa-atexit is default.
+ if (!Args.hasFlag(options::OPT_fuse_cxa_atexit,
+ options::OPT_fno_use_cxa_atexit,
+ getToolChain().getTriple().getOS() != llvm::Triple::Cygwin &&
+ getToolChain().getTriple().getOS() != llvm::Triple::MinGW32 &&
+ getToolChain().getTriple().getArch() != llvm::Triple::hexagon) ||
+ KernelOrKext)
+ CmdArgs.push_back("-fno-use-cxa-atexit");
+
+ // -fms-extensions=0 is default.
+ if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
+ getToolChain().getTriple().getOS() == llvm::Triple::Win32))
+ CmdArgs.push_back("-fms-extensions");
+
+ // -fms-compatibility=0 is default.
+ if (Args.hasFlag(options::OPT_fms_compatibility,
+ options::OPT_fno_ms_compatibility,
+ (getToolChain().getTriple().getOS() == llvm::Triple::Win32 &&
+ Args.hasFlag(options::OPT_fms_extensions,
+ options::OPT_fno_ms_extensions,
+ true))))
+ CmdArgs.push_back("-fms-compatibility");
+
+ // -fmsc-version=1300 is default.
+ if (Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
+ getToolChain().getTriple().getOS() == llvm::Triple::Win32) ||
+ Args.hasArg(options::OPT_fmsc_version)) {
+ StringRef msc_ver = Args.getLastArgValue(options::OPT_fmsc_version);
+ if (msc_ver.empty())
+ CmdArgs.push_back("-fmsc-version=1300");
+ else
+ CmdArgs.push_back(Args.MakeArgString("-fmsc-version=" + msc_ver));
+ }
+
+
+ // -fborland-extensions=0 is default.
+ if (Args.hasFlag(options::OPT_fborland_extensions,
+ options::OPT_fno_borland_extensions, false))
+ CmdArgs.push_back("-fborland-extensions");
+
+ // -fno-delayed-template-parsing is default, except for Windows where MSVC STL
+ // needs it.
+ if (Args.hasFlag(options::OPT_fdelayed_template_parsing,
+ options::OPT_fno_delayed_template_parsing,
+ getToolChain().getTriple().getOS() == llvm::Triple::Win32))
+ CmdArgs.push_back("-fdelayed-template-parsing");
+
+ // -fgnu-keywords default varies depending on language; only pass if
+ // specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fgnu_keywords,
+ options::OPT_fno_gnu_keywords))
+ A->render(Args, CmdArgs);
+
+ if (Args.hasFlag(options::OPT_fgnu89_inline,
+ options::OPT_fno_gnu89_inline,
+ false))
+ CmdArgs.push_back("-fgnu89-inline");
+
+ if (Args.hasArg(options::OPT_fno_inline))
+ CmdArgs.push_back("-fno-inline");
+
+ if (Args.hasArg(options::OPT_fno_inline_functions))
+ CmdArgs.push_back("-fno-inline-functions");
+
+ // -fobjc-nonfragile-abi=0 is default.
+ ObjCRuntime objCRuntime;
+ unsigned objcABIVersion = 0;
+ bool NeXTRuntimeIsDefault
+ = (IsRewriter || IsModernRewriter ||
+ getToolChain().getTriple().isOSDarwin());
+ if (Args.hasFlag(options::OPT_fnext_runtime, options::OPT_fgnu_runtime,
+ NeXTRuntimeIsDefault)) {
+ objCRuntime.setKind(ObjCRuntime::NeXT);
+ } else {
+ CmdArgs.push_back("-fgnu-runtime");
+ objCRuntime.setKind(ObjCRuntime::GNU);
+ }
+ getToolChain().configureObjCRuntime(objCRuntime);
+ if (objCRuntime.HasARC)
+ CmdArgs.push_back("-fobjc-runtime-has-arc");
+ if (objCRuntime.HasWeak)
+ CmdArgs.push_back("-fobjc-runtime-has-weak");
+ if (objCRuntime.HasTerminate)
+ CmdArgs.push_back("-fobjc-runtime-has-terminate");
+
+ // Compute the Objective-C ABI "version" to use. Version numbers are
+ // slightly confusing for historical reasons:
+ // 1 - Traditional "fragile" ABI
+ // 2 - Non-fragile ABI, version 1
+ // 3 - Non-fragile ABI, version 2
+ objcABIVersion = 1;
+ // If -fobjc-abi-version= is present, use that to set the version.
+ if (Arg *A = Args.getLastArg(options::OPT_fobjc_abi_version_EQ)) {
+ if (StringRef(A->getValue(Args)) == "1")
+ objcABIVersion = 1;
+ else if (StringRef(A->getValue(Args)) == "2")
+ objcABIVersion = 2;
+ else if (StringRef(A->getValue(Args)) == "3")
+ objcABIVersion = 3;
+ else
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ } else {
+ // Otherwise, determine if we are using the non-fragile ABI.
+ bool NonFragileABIIsDefault =
+ (IsModernRewriter ||
+ (!IsRewriter && getToolChain().IsObjCNonFragileABIDefault()));
+ if (Args.hasFlag(options::OPT_fobjc_nonfragile_abi,
+ options::OPT_fno_objc_nonfragile_abi,
+ NonFragileABIIsDefault)) {
+ // Determine the non-fragile ABI version to use.
+#ifdef DISABLE_DEFAULT_NONFRAGILEABI_TWO
+ unsigned NonFragileABIVersion = 1;
+#else
+ unsigned NonFragileABIVersion = 2;
+#endif
+
+ if (Arg *A = Args.getLastArg(
+ options::OPT_fobjc_nonfragile_abi_version_EQ)) {
+ if (StringRef(A->getValue(Args)) == "1")
+ NonFragileABIVersion = 1;
+ else if (StringRef(A->getValue(Args)) == "2")
+ NonFragileABIVersion = 2;
+ else
+ D.Diag(diag::err_drv_clang_unsupported)
+ << A->getAsString(Args);
+ }
+
+ objcABIVersion = 1 + NonFragileABIVersion;
+ } else {
+ objcABIVersion = 1;
+ }
+ }
+
+ if (objcABIVersion == 1) {
+ CmdArgs.push_back("-fobjc-fragile-abi");
+ } else {
+ // -fobjc-dispatch-method is only relevant with the nonfragile-abi, and
+ // legacy is the default.
+ if (!Args.hasFlag(options::OPT_fobjc_legacy_dispatch,
+ options::OPT_fno_objc_legacy_dispatch,
+ getToolChain().IsObjCLegacyDispatchDefault())) {
+ if (getToolChain().UseObjCMixedDispatch())
+ CmdArgs.push_back("-fobjc-dispatch-method=mixed");
+ else
+ CmdArgs.push_back("-fobjc-dispatch-method=non-legacy");
+ }
+ }
+
+ // -fobjc-default-synthesize-properties=1 is default. This only has an effect
+ // if the nonfragile objc abi is used.
+ if (getToolChain().IsObjCDefaultSynthPropertiesDefault()) {
+ CmdArgs.push_back("-fobjc-default-synthesize-properties");
+ }
+
+ // Allow -fno-objc-arr to trump -fobjc-arr/-fobjc-arc.
+ // NOTE: This logic is duplicated in ToolChains.cpp.
+ bool ARC = isObjCAutoRefCount(Args);
+ if (ARC) {
+ if (!getToolChain().SupportsObjCARC())
+ D.Diag(diag::err_arc_unsupported);
+
+ CmdArgs.push_back("-fobjc-arc");
+
+ // FIXME: It seems like this entire block, and several around it should be
+ // wrapped in isObjC, but for now we just use it here as this is where it
+ // was being used previously.
+ if (types::isCXX(InputType) && types::isObjC(InputType)) {
+ if (getToolChain().GetCXXStdlibType(Args) == ToolChain::CST_Libcxx)
+ CmdArgs.push_back("-fobjc-arc-cxxlib=libc++");
+ else
+ CmdArgs.push_back("-fobjc-arc-cxxlib=libstdc++");
+ }
+
+ // Allow the user to enable full exceptions code emission.
+ // We define off for Objective-CC, on for Objective-C++.
+ if (Args.hasFlag(options::OPT_fobjc_arc_exceptions,
+ options::OPT_fno_objc_arc_exceptions,
+ /*default*/ types::isCXX(InputType)))
+ CmdArgs.push_back("-fobjc-arc-exceptions");
+ }
+
+ // -fobjc-infer-related-result-type is the default, except in the Objective-C
+ // rewriter.
+ if (IsRewriter || IsModernRewriter)
+ CmdArgs.push_back("-fno-objc-infer-related-result-type");
+
+ // Handle -fobjc-gc and -fobjc-gc-only. They are exclusive, and -fobjc-gc-only
+ // takes precedence.
+ const Arg *GCArg = Args.getLastArg(options::OPT_fobjc_gc_only);
+ if (!GCArg)
+ GCArg = Args.getLastArg(options::OPT_fobjc_gc);
+ if (GCArg) {
+ if (ARC) {
+ D.Diag(diag::err_drv_objc_gc_arr)
+ << GCArg->getAsString(Args);
+ } else if (getToolChain().SupportsObjCGC()) {
+ GCArg->render(Args, CmdArgs);
+ } else {
+ // FIXME: We should move this to a hard error.
+ D.Diag(diag::warn_drv_objc_gc_unsupported)
+ << GCArg->getAsString(Args);
+ }
+ }
+
+ // Add exception args.
+ addExceptionArgs(Args, InputType, getToolChain().getTriple(),
+ KernelOrKext, objcABIVersion, CmdArgs);
+
+ if (getToolChain().UseSjLjExceptions())
+ CmdArgs.push_back("-fsjlj-exceptions");
+
+ // C++ "sane" operator new.
+ if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
+ options::OPT_fno_assume_sane_operator_new))
+ CmdArgs.push_back("-fno-assume-sane-operator-new");
+
+ // -fconstant-cfstrings is default, and may be subject to argument translation
+ // on Darwin.
+ if (!Args.hasFlag(options::OPT_fconstant_cfstrings,
+ options::OPT_fno_constant_cfstrings) ||
+ !Args.hasFlag(options::OPT_mconstant_cfstrings,
+ options::OPT_mno_constant_cfstrings))
+ CmdArgs.push_back("-fno-constant-cfstrings");
+
+ // -fshort-wchar default varies depending on platform; only
+ // pass if specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fshort_wchar))
+ A->render(Args, CmdArgs);
+
+ // -fno-pascal-strings is default, only pass non-default. If the tool chain
+ // happened to translate to -mpascal-strings, we want to back translate here.
+ //
+ // FIXME: This is gross; that translation should be pulled from the
+ // tool chain.
+ if (Args.hasFlag(options::OPT_fpascal_strings,
+ options::OPT_fno_pascal_strings,
+ false) ||
+ Args.hasFlag(options::OPT_mpascal_strings,
+ options::OPT_mno_pascal_strings,
+ false))
+ CmdArgs.push_back("-fpascal-strings");
+
+ // Honor -fpack-struct= and -fpack-struct, if given. Note that
+ // -fno-pack-struct doesn't apply to -fpack-struct=.
+ if (Arg *A = Args.getLastArg(options::OPT_fpack_struct_EQ)) {
+ CmdArgs.push_back("-fpack-struct");
+ CmdArgs.push_back(A->getValue(Args));
+ } else if (Args.hasFlag(options::OPT_fpack_struct,
+ options::OPT_fno_pack_struct, false)) {
+ CmdArgs.push_back("-fpack-struct");
+ CmdArgs.push_back("1");
+ }
+
+ if (Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_fapple_kext)) {
+ if (!Args.hasArg(options::OPT_fcommon))
+ CmdArgs.push_back("-fno-common");
+ Args.ClaimAllArgs(options::OPT_fno_common);
+ }
+
+ // -fcommon is default, only pass non-default.
+ else if (!Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common))
+ CmdArgs.push_back("-fno-common");
+
+ // -fsigned-bitfields is default, and clang doesn't yet support
+ // -funsigned-bitfields.
+ if (!Args.hasFlag(options::OPT_fsigned_bitfields,
+ options::OPT_funsigned_bitfields))
+ D.Diag(diag::warn_drv_clang_unsupported)
+ << Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args);
+
+ // -fsigned-bitfields is default, and clang doesn't support -fno-for-scope.
+ if (!Args.hasFlag(options::OPT_ffor_scope,
+ options::OPT_fno_for_scope))
+ D.Diag(diag::err_drv_clang_unsupported)
+ << Args.getLastArg(options::OPT_fno_for_scope)->getAsString(Args);
+
+ // -fcaret-diagnostics is default.
+ if (!Args.hasFlag(options::OPT_fcaret_diagnostics,
+ options::OPT_fno_caret_diagnostics, true))
+ CmdArgs.push_back("-fno-caret-diagnostics");
+
+ // -fdiagnostics-fixit-info is default, only pass non-default.
+ if (!Args.hasFlag(options::OPT_fdiagnostics_fixit_info,
+ options::OPT_fno_diagnostics_fixit_info))
+ CmdArgs.push_back("-fno-diagnostics-fixit-info");
+
+ // Enable -fdiagnostics-show-option by default.
+ if (Args.hasFlag(options::OPT_fdiagnostics_show_option,
+ options::OPT_fno_diagnostics_show_option))
+ CmdArgs.push_back("-fdiagnostics-show-option");
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) {
+ CmdArgs.push_back("-fdiagnostics-show-category");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_format_EQ)) {
+ CmdArgs.push_back("-fdiagnostics-format");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
+ if (Arg *A = Args.getLastArg(
+ options::OPT_fdiagnostics_show_note_include_stack,
+ options::OPT_fno_diagnostics_show_note_include_stack)) {
+ if (A->getOption().matches(
+ options::OPT_fdiagnostics_show_note_include_stack))
+ CmdArgs.push_back("-fdiagnostics-show-note-include-stack");
+ else
+ CmdArgs.push_back("-fno-diagnostics-show-note-include-stack");
+ }
+
+ // Color diagnostics are the default, unless the terminal doesn't support
+ // them.
+ if (Args.hasFlag(options::OPT_fcolor_diagnostics,
+ options::OPT_fno_color_diagnostics,
+ llvm::sys::Process::StandardErrHasColors()))
+ CmdArgs.push_back("-fcolor-diagnostics");
+
+ if (!Args.hasFlag(options::OPT_fshow_source_location,
+ options::OPT_fno_show_source_location))
+ CmdArgs.push_back("-fno-show-source-location");
+
+ if (!Args.hasFlag(options::OPT_fshow_column,
+ options::OPT_fno_show_column,
+ true))
+ CmdArgs.push_back("-fno-show-column");
+
+ if (!Args.hasFlag(options::OPT_fspell_checking,
+ options::OPT_fno_spell_checking))
+ CmdArgs.push_back("-fno-spell-checking");
+
+
+ // Silently ignore -fasm-blocks for now.
+ (void) Args.hasFlag(options::OPT_fasm_blocks, options::OPT_fno_asm_blocks,
+ false);
+
+ if (Arg *A = Args.getLastArg(options::OPT_fshow_overloads_EQ))
+ A->render(Args, CmdArgs);
+
+ // -fdollars-in-identifiers default varies depending on platform and
+ // language; only pass if specified.
+ if (Arg *A = Args.getLastArg(options::OPT_fdollars_in_identifiers,
+ options::OPT_fno_dollars_in_identifiers)) {
+ if (A->getOption().matches(options::OPT_fdollars_in_identifiers))
+ CmdArgs.push_back("-fdollars-in-identifiers");
+ else
+ CmdArgs.push_back("-fno-dollars-in-identifiers");
+ }
+
+ // -funit-at-a-time is default, and we don't support -fno-unit-at-a-time for
+ // practical purposes.
+ if (Arg *A = Args.getLastArg(options::OPT_funit_at_a_time,
+ options::OPT_fno_unit_at_a_time)) {
+ if (A->getOption().matches(options::OPT_fno_unit_at_a_time))
+ D.Diag(diag::warn_drv_clang_unsupported) << A->getAsString(Args);
+ }
+
+ if (Args.hasFlag(options::OPT_fapple_pragma_pack,
+ options::OPT_fno_apple_pragma_pack, false))
+ CmdArgs.push_back("-fapple-pragma-pack");
+
+ // Default to -fno-builtin-str{cat,cpy} on Darwin for ARM.
+ //
+ // FIXME: This is disabled until clang -cc1 supports -fno-builtin-foo. PR4941.
+#if 0
+ if (getToolChain().getTriple().isOSDarwin() &&
+ (getToolChain().getTriple().getArch() == llvm::Triple::arm ||
+ getToolChain().getTriple().getArch() == llvm::Triple::thumb)) {
+ if (!Args.hasArg(options::OPT_fbuiltin_strcat))
+ CmdArgs.push_back("-fno-builtin-strcat");
+ if (!Args.hasArg(options::OPT_fbuiltin_strcpy))
+ CmdArgs.push_back("-fno-builtin-strcpy");
+ }
+#endif
+
+ // Only allow -traditional or -traditional-cpp outside in preprocessing modes.
+ if (Arg *A = Args.getLastArg(options::OPT_traditional,
+ options::OPT_traditional_cpp)) {
+ if (isa<PreprocessJobAction>(JA))
+ CmdArgs.push_back("-traditional-cpp");
+ else
+ D.Diag(diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_dM);
+ Args.AddLastArg(CmdArgs, options::OPT_dD);
+
+ // Handle serialized diagnostics.
+ if (Arg *A = Args.getLastArg(options::OPT__serialize_diags)) {
+ CmdArgs.push_back("-serialize-diagnostic-file");
+ CmdArgs.push_back(Args.MakeArgString(A->getValue(Args)));
+ }
+
+ // Forward -Xclang arguments to -cc1, and -mllvm arguments to the LLVM option
+ // parser.
+ Args.AddAllArgValues(CmdArgs, options::OPT_Xclang);
+ for (arg_iterator it = Args.filtered_begin(options::OPT_mllvm),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ (*it)->claim();
+
+ // We translate this by hand to the -cc1 argument, since nightly test uses
+ // it and developers have been trained to spell it with -mllvm.
+ if (StringRef((*it)->getValue(Args, 0)) == "-disable-llvm-optzns")
+ CmdArgs.push_back("-disable-llvm-optzns");
+ else
+ (*it)->render(Args, CmdArgs);
+ }
+
+ if (Output.getType() == types::TY_Dependencies) {
+ // Handled with other dependency code.
+ } else if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back(types::getTypeName(II.getType()));
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ II.getInputArg().renderAsInput(Args, CmdArgs);
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_undef);
+
+ const char *Exec = getToolChain().getDriver().getClangProgramPath();
+
+ // Optionally embed the -cc1 level arguments into the debug info, for build
+ // analysis.
+ if (getToolChain().UseDwarfDebugFlags()) {
+ ArgStringList OriginalArgs;
+ for (ArgList::const_iterator it = Args.begin(),
+ ie = Args.end(); it != ie; ++it)
+ (*it)->render(Args, OriginalArgs);
+
+ SmallString<256> Flags;
+ Flags += Exec;
+ for (unsigned i = 0, e = OriginalArgs.size(); i != e; ++i) {
+ Flags += " ";
+ Flags += OriginalArgs[i];
+ }
+ CmdArgs.push_back("-dwarf-debug-flags");
+ CmdArgs.push_back(Args.MakeArgString(Flags.str()));
+ }
+
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+
+ if (Arg *A = Args.getLastArg(options::OPT_pg))
+ if (Args.hasArg(options::OPT_fomit_frame_pointer))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << "-fomit-frame-pointer" << A->getAsString(Args);
+
+ // Claim some arguments which clang supports automatically.
+
+ // -fpch-preprocess is used with gcc to add a special marker in the output to
+ // include the PCH file. Clang's PTH solution is completely transparent, so we
+ // do not need to deal with it at all.
+ Args.ClaimAllArgs(options::OPT_fpch_preprocess);
+
+ // Claim some arguments which clang doesn't support, but we don't
+ // care to warn the user about.
+ Args.ClaimAllArgs(options::OPT_clang_ignored_f_Group);
+ Args.ClaimAllArgs(options::OPT_clang_ignored_m_Group);
+
+ // Disable warnings for clang -E -use-gold-plugin -emit-llvm foo.c
+ Args.ClaimAllArgs(options::OPT_use_gold_plugin);
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+}
+
+void ClangAs::AddARMTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ llvm::Triple Triple = getToolChain().getTriple();
+
+ // Set the CPU based on -march= and -mcpu=.
+ CmdArgs.push_back("-target-cpu");
+ CmdArgs.push_back(getARMTargetCPU(Args, Triple));
+
+ // Honor -mfpu=.
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpu_EQ))
+ addFPUArgs(D, A, Args, CmdArgs);
+
+ // Honor -mfpmath=.
+ if (const Arg *A = Args.getLastArg(options::OPT_mfpmath_EQ))
+ addFPMathArgs(D, A, Args, CmdArgs, getARMTargetCPU(Args, Triple));
+}
+
+void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs.");
+ const InputInfo &Input = Inputs[0];
+
+ // Don't warn about "clang -w -c foo.s"
+ Args.ClaimAllArgs(options::OPT_w);
+ // and "clang -emit-llvm -c foo.s"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and "clang -use-gold-plugin -c foo.s"
+ Args.ClaimAllArgs(options::OPT_use_gold_plugin);
+
+ // Invoke ourselves in -cc1as mode.
+ //
+ // FIXME: Implement custom jobs for internal actions.
+ CmdArgs.push_back("-cc1as");
+
+ // Add the "effective" target triple.
+ CmdArgs.push_back("-triple");
+ std::string TripleStr =
+ getToolChain().ComputeEffectiveClangTriple(Args, Input.getType());
+ CmdArgs.push_back(Args.MakeArgString(TripleStr));
+
+ // Set the output mode, we currently only expect to be used as a real
+ // assembler.
+ CmdArgs.push_back("-filetype");
+ CmdArgs.push_back("obj");
+
+ if (UseRelaxAll(C, Args))
+ CmdArgs.push_back("-relax-all");
+
+ // Add target specific cpu and features flags.
+ switch(getToolChain().getTriple().getArch()) {
+ default:
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ AddARMTargetArgs(Args, CmdArgs);
+ break;
+ }
+
+ // Ignore explicit -force_cpusubtype_ALL option.
+ (void) Args.hasArg(options::OPT_force__cpusubtype__ALL);
+
+ // Determine the original source input.
+ const Action *SourceAction = &JA;
+ while (SourceAction->getKind() != Action::InputClass) {
+ assert(!SourceAction->getInputs().empty() && "unexpected root action!");
+ SourceAction = SourceAction->getInputs()[0];
+ }
+
+ // Forward -g, assuming we are dealing with an actual assembly file.
+ if (SourceAction->getType() == types::TY_Asm ||
+ SourceAction->getType() == types::TY_PP_Asm) {
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group))
+ if (!A->getOption().matches(options::OPT_g0))
+ CmdArgs.push_back("-g");
+ }
+
+ // Optionally embed the -cc1as level arguments into the debug info, for build
+ // analysis.
+ if (getToolChain().UseDwarfDebugFlags()) {
+ ArgStringList OriginalArgs;
+ for (ArgList::const_iterator it = Args.begin(),
+ ie = Args.end(); it != ie; ++it)
+ (*it)->render(Args, OriginalArgs);
+
+ SmallString<256> Flags;
+ const char *Exec = getToolChain().getDriver().getClangProgramPath();
+ Flags += Exec;
+ for (unsigned i = 0, e = OriginalArgs.size(); i != e; ++i) {
+ Flags += " ";
+ Flags += OriginalArgs[i];
+ }
+ CmdArgs.push_back("-dwarf-debug-flags");
+ CmdArgs.push_back(Args.MakeArgString(Flags.str()));
+ }
+
+ // FIXME: Add -static support, once we have it.
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+ Args.AddAllArgs(CmdArgs, options::OPT_mllvm);
+
+ assert(Output.isFilename() && "Unexpected lipo output.");
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ assert(Input.isFilename() && "Invalid input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec = getToolChain().getDriver().getClangProgramPath();
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ for (ArgList::const_iterator
+ it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ Arg *A = *it;
+ if (A->getOption().hasForwardToGCC()) {
+ // Don't forward any -g arguments to assembly steps.
+ if (isa<AssembleJobAction>(JA) &&
+ A->getOption().matches(options::OPT_g_Group))
+ continue;
+
+ // It is unfortunate that we have to claim here, as this means
+ // we will basically never report anything interesting for
+ // platforms using a generic gcc, even if we are just using gcc
+ // to get to the assembler.
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ RenderExtraToolArgs(JA, CmdArgs);
+
+ // If using a driver driver, force the arch.
+ const std::string &Arch = getToolChain().getArchName();
+ if (getToolChain().getTriple().isOSDarwin()) {
+ CmdArgs.push_back("-arch");
+
+ // FIXME: Remove these special cases.
+ if (Arch == "powerpc")
+ CmdArgs.push_back("ppc");
+ else if (Arch == "powerpc64")
+ CmdArgs.push_back("ppc64");
+ else
+ CmdArgs.push_back(Args.MakeArgString(Arch));
+ }
+
+ // Try to force gcc to match the tool chain we want, if we recognize
+ // the arch.
+ //
+ // FIXME: The triple class should directly provide the information we want
+ // here.
+ if (Arch == "i386" || Arch == "powerpc")
+ CmdArgs.push_back("-m32");
+ else if (Arch == "x86_64" || Arch == "powerpc64")
+ CmdArgs.push_back("-m64");
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Unexpected output");
+ CmdArgs.push_back("-fsyntax-only");
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ // Only pass -x if gcc will understand it; otherwise hope gcc
+ // understands the suffix correctly. The main use case this would go
+ // wrong in is for linker inputs if they happened to have an odd
+ // suffix; really the only way to get this to happen is a command
+ // like '-x foobar a.c' which will treat a.c like a linker input.
+ //
+ // FIXME: For the linker case specifically, can we safely convert
+ // inputs into '-Wl,' options?
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM or AST inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
+ D.Diag(diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString();
+ else if (II.getType() == types::TY_AST)
+ D.Diag(diag::err_drv_no_ast_support)
+ << getToolChain().getTripleString();
+
+ if (types::canTypeBeUserSpecified(II.getType())) {
+ CmdArgs.push_back("-x");
+ CmdArgs.push_back(types::getTypeName(II.getType()));
+ }
+
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else {
+ const Arg &A = II.getInputArg();
+
+ // Reverse translate some rewritten options.
+ if (A.getOption().matches(options::OPT_Z_reserved_lib_stdcxx)) {
+ CmdArgs.push_back("-lstdc++");
+ continue;
+ }
+
+ // Don't render as input, we need gcc to do the translations.
+ A.render(Args, CmdArgs);
+ }
+ }
+
+ const std::string customGCCName = D.getCCCGenericGCCName();
+ const char *GCCName;
+ if (!customGCCName.empty())
+ GCCName = customGCCName.c_str();
+ else if (D.CCCIsCXX) {
+ GCCName = "g++";
+ } else
+ GCCName = "gcc";
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void gcc::Preprocess::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-E");
+}
+
+void gcc::Precompile::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ // The type is good enough.
+}
+
+void gcc::Compile::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // If -flto, etc. are present then make sure not to force assembly output.
+ if (JA.getType() == types::TY_LLVM_IR || JA.getType() == types::TY_LTO_IR ||
+ JA.getType() == types::TY_LLVM_BC || JA.getType() == types::TY_LTO_BC)
+ CmdArgs.push_back("-c");
+ else {
+ if (JA.getType() != types::TY_PP_Asm)
+ D.Diag(diag::err_drv_invalid_gcc_output_type)
+ << getTypeName(JA.getType());
+
+ CmdArgs.push_back("-S");
+ }
+}
+
+void gcc::Assemble::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.push_back("-c");
+}
+
+void gcc::Link::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ // The types are (hopefully) good enough.
+}
+
+// Hexagon tools start.
+void hexagon::Assemble::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+
+}
+void hexagon::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ std::string MarchString = "-march=";
+ MarchString += getHexagonTargetCPU(Args);
+ CmdArgs.push_back(Args.MakeArgString(MarchString));
+
+ RenderExtraToolArgs(JA, CmdArgs);
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Unexpected output");
+ CmdArgs.push_back("-fsyntax-only");
+ }
+
+
+ // Only pass -x if gcc will understand it; otherwise hope gcc
+ // understands the suffix correctly. The main use case this would go
+ // wrong in is for linker inputs if they happened to have an odd
+ // suffix; really the only way to get this to happen is a command
+ // like '-x foobar a.c' which will treat a.c like a linker input.
+ //
+ // FIXME: For the linker case specifically, can we safely convert
+ // inputs into '-Wl,' options?
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM or AST inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString();
+ else if (II.getType() == types::TY_AST)
+ D.Diag(clang::diag::err_drv_no_ast_support)
+ << getToolChain().getTripleString();
+
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ // Don't render as input, we need gcc to do the translations. FIXME: Pranav: What is this ?
+ II.getInputArg().render(Args, CmdArgs);
+ }
+
+ const char *GCCName = "hexagon-as";
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+
+}
+void hexagon::Link::RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const {
+ // The types are (hopefully) good enough.
+}
+
+void hexagon::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ for (ArgList::const_iterator
+ it = Args.begin(), ie = Args.end(); it != ie; ++it) {
+ Arg *A = *it;
+ if (A->getOption().hasForwardToGCC()) {
+ // Don't forward any -g arguments to assembly steps.
+ if (isa<AssembleJobAction>(JA) &&
+ A->getOption().matches(options::OPT_g_Group))
+ continue;
+
+ // It is unfortunate that we have to claim here, as this means
+ // we will basically never report anything interesting for
+ // platforms using a generic gcc, even if we are just using gcc
+ // to get to the assembler.
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+ }
+
+ RenderExtraToolArgs(JA, CmdArgs);
+
+ // Add Arch Information
+ Arg *A;
+ if ((A = getLastHexagonArchArg(Args))) {
+ if (A->getOption().matches(options::OPT_m_Joined))
+ A->render(Args, CmdArgs);
+ else
+ CmdArgs.push_back (Args.MakeArgString("-m" + getHexagonTargetCPU(Args)));
+ }
+ else {
+ CmdArgs.push_back (Args.MakeArgString("-m" + getHexagonTargetCPU(Args)));
+ }
+
+ CmdArgs.push_back("-mqdsp6-compat");
+
+ const char *GCCName;
+ if (C.getDriver().CCCIsCXX)
+ GCCName = "hexagon-g++";
+ else
+ GCCName = "hexagon-gcc";
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(GCCName));
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ }
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Don't try to pass LLVM or AST inputs to a generic gcc.
+ if (II.getType() == types::TY_LLVM_IR || II.getType() == types::TY_LTO_IR ||
+ II.getType() == types::TY_LLVM_BC || II.getType() == types::TY_LTO_BC)
+ D.Diag(clang::diag::err_drv_no_linker_llvm_support)
+ << getToolChain().getTripleString();
+ else if (II.getType() == types::TY_AST)
+ D.Diag(clang::diag::err_drv_no_ast_support)
+ << getToolChain().getTripleString();
+
+ if (II.isFilename())
+ CmdArgs.push_back(II.getFilename());
+ else
+ // Don't render as input, we need gcc to do the translations. FIXME: Pranav: What is this ?
+ II.getInputArg().render(Args, CmdArgs);
+ }
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+
+}
+// Hexagon tools end.
+
+
+const char *darwin::CC1::getCC1Name(types::ID Type) const {
+ switch (Type) {
+ default:
+ llvm_unreachable("Unexpected type for Darwin CC1 tool.");
+ case types::TY_Asm:
+ case types::TY_C: case types::TY_CHeader:
+ case types::TY_PP_C: case types::TY_PP_CHeader:
+ return "cc1";
+ case types::TY_ObjC: case types::TY_ObjCHeader:
+ case types::TY_PP_ObjC: case types::TY_PP_ObjC_Alias:
+ case types::TY_PP_ObjCHeader:
+ return "cc1obj";
+ case types::TY_CXX: case types::TY_CXXHeader:
+ case types::TY_PP_CXX: case types::TY_PP_CXXHeader:
+ return "cc1plus";
+ case types::TY_ObjCXX: case types::TY_ObjCXXHeader:
+ case types::TY_PP_ObjCXX: case types::TY_PP_ObjCXX_Alias:
+ case types::TY_PP_ObjCXXHeader:
+ return "cc1objplus";
+ }
+}
+
+void darwin::CC1::anchor() {}
+
+const char *darwin::CC1::getBaseInputName(const ArgList &Args,
+ const InputInfoList &Inputs) {
+ return Args.MakeArgString(
+ llvm::sys::path::filename(Inputs[0].getBaseInput()));
+}
+
+const char *darwin::CC1::getBaseInputStem(const ArgList &Args,
+ const InputInfoList &Inputs) {
+ const char *Str = getBaseInputName(Args, Inputs);
+
+ if (const char *End = strrchr(Str, '.'))
+ return Args.MakeArgString(std::string(Str, End));
+
+ return Str;
+}
+
+const char *
+darwin::CC1::getDependencyFileName(const ArgList &Args,
+ const InputInfoList &Inputs) {
+ // FIXME: Think about this more.
+ std::string Res;
+
+ if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) {
+ std::string Str(OutputOpt->getValue(Args));
+ Res = Str.substr(0, Str.rfind('.'));
+ } else {
+ Res = darwin::CC1::getBaseInputStem(Args, Inputs);
+ }
+ return Args.MakeArgString(Res + ".d");
+}
+
+void darwin::CC1::RemoveCC1UnsupportedArgs(ArgStringList &CmdArgs) const {
+ for (ArgStringList::iterator it = CmdArgs.begin(), ie = CmdArgs.end();
+ it != ie;) {
+
+ StringRef Option = *it;
+ bool RemoveOption = false;
+
+ // Erase both -fmodule-cache-path and its argument.
+ if (Option.equals("-fmodule-cache-path") && it+2 != ie) {
+ it = CmdArgs.erase(it, it+2);
+ ie = CmdArgs.end();
+ continue;
+ }
+
+ // Remove unsupported -f options.
+ if (Option.startswith("-f")) {
+ // Remove -f/-fno- to reduce the number of cases.
+ if (Option.startswith("-fno-"))
+ Option = Option.substr(5);
+ else
+ Option = Option.substr(2);
+ RemoveOption = llvm::StringSwitch<bool>(Option)
+ .Case("altivec", true)
+ .Case("modules", true)
+ .Case("diagnostics-show-note-include-stack", true)
+ .Default(false);
+ }
+
+ // Handle machine specific options.
+ if (Option.startswith("-m")) {
+ RemoveOption = llvm::StringSwitch<bool>(Option)
+ .Case("-mthumb", true)
+ .Case("-mno-thumb", true)
+ .Case("-mno-fused-madd", true)
+ .Case("-mlong-branch", true)
+ .Case("-mlongcall", true)
+ .Case("-mcpu=G4", true)
+ .Case("-mcpu=G5", true)
+ .Default(false);
+ }
+
+ // Handle warning options.
+ if (Option.startswith("-W")) {
+ // Remove -W/-Wno- to reduce the number of cases.
+ if (Option.startswith("-Wno-"))
+ Option = Option.substr(5);
+ else
+ Option = Option.substr(2);
+
+ RemoveOption = llvm::StringSwitch<bool>(Option)
+ .Case("address-of-temporary", true)
+ .Case("ambiguous-member-template", true)
+ .Case("analyzer-incompatible-plugin", true)
+ .Case("array-bounds", true)
+ .Case("array-bounds-pointer-arithmetic", true)
+ .Case("bind-to-temporary-copy", true)
+ .Case("bitwise-op-parentheses", true)
+ .Case("bool-conversions", true)
+ .Case("builtin-macro-redefined", true)
+ .Case("c++-hex-floats", true)
+ .Case("c++0x-compat", true)
+ .Case("c++0x-extensions", true)
+ .Case("c++0x-narrowing", true)
+ .Case("c++11-compat", true)
+ .Case("c++11-extensions", true)
+ .Case("c++11-narrowing", true)
+ .Case("conditional-uninitialized", true)
+ .Case("constant-conversion", true)
+ .Case("conversion-null", true)
+ .Case("CFString-literal", true)
+ .Case("constant-logical-operand", true)
+ .Case("custom-atomic-properties", true)
+ .Case("default-arg-special-member", true)
+ .Case("delegating-ctor-cycles", true)
+ .Case("delete-non-virtual-dtor", true)
+ .Case("deprecated-implementations", true)
+ .Case("deprecated-writable-strings", true)
+ .Case("distributed-object-modifiers", true)
+ .Case("duplicate-method-arg", true)
+ .Case("dynamic-class-memaccess", true)
+ .Case("enum-compare", true)
+ .Case("exit-time-destructors", true)
+ .Case("gnu", true)
+ .Case("gnu-designator", true)
+ .Case("header-hygiene", true)
+ .Case("idiomatic-parentheses", true)
+ .Case("ignored-qualifiers", true)
+ .Case("implicit-atomic-properties", true)
+ .Case("incompatible-pointer-types", true)
+ .Case("incomplete-implementation", true)
+ .Case("initializer-overrides", true)
+ .Case("invalid-noreturn", true)
+ .Case("invalid-token-paste", true)
+ .Case("language-extension-token", true)
+ .Case("literal-conversion", true)
+ .Case("literal-range", true)
+ .Case("local-type-template-args", true)
+ .Case("logical-op-parentheses", true)
+ .Case("method-signatures", true)
+ .Case("microsoft", true)
+ .Case("mismatched-tags", true)
+ .Case("missing-method-return-type", true)
+ .Case("non-pod-varargs", true)
+ .Case("nonfragile-abi2", true)
+ .Case("null-arithmetic", true)
+ .Case("null-dereference", true)
+ .Case("out-of-line-declaration", true)
+ .Case("overriding-method-mismatch", true)
+ .Case("readonly-setter-attrs", true)
+ .Case("return-stack-address", true)
+ .Case("self-assign", true)
+ .Case("semicolon-before-method-body", true)
+ .Case("sentinel", true)
+ .Case("shift-overflow", true)
+ .Case("shift-sign-overflow", true)
+ .Case("sign-conversion", true)
+ .Case("sizeof-array-argument", true)
+ .Case("sizeof-pointer-memaccess", true)
+ .Case("string-compare", true)
+ .Case("super-class-method-mismatch", true)
+ .Case("tautological-compare", true)
+ .Case("typedef-redefinition", true)
+ .Case("typename-missing", true)
+ .Case("undefined-reinterpret-cast", true)
+ .Case("unknown-warning-option", true)
+ .Case("unnamed-type-template-args", true)
+ .Case("unneeded-internal-declaration", true)
+ .Case("unneeded-member-function", true)
+ .Case("unused-comparison", true)
+ .Case("unused-exception-parameter", true)
+ .Case("unused-member-function", true)
+ .Case("unused-result", true)
+ .Case("vector-conversions", true)
+ .Case("vla", true)
+ .Case("used-but-marked-unused", true)
+ .Case("weak-vtables", true)
+ .Default(false);
+ } // if (Option.startswith("-W"))
+ if (RemoveOption) {
+ it = CmdArgs.erase(it);
+ ie = CmdArgs.end();
+ } else {
+ ++it;
+ }
+ }
+}
+
+void darwin::CC1::AddCC1Args(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+
+ CheckCodeGenerationOptions(D, Args);
+
+ // Derived from cc1 spec.
+ if (!Args.hasArg(options::OPT_mkernel) && !Args.hasArg(options::OPT_static) &&
+ !Args.hasArg(options::OPT_mdynamic_no_pic))
+ CmdArgs.push_back("-fPIC");
+
+ if (getToolChain().getTriple().getArch() == llvm::Triple::arm ||
+ getToolChain().getTriple().getArch() == llvm::Triple::thumb) {
+ if (!Args.hasArg(options::OPT_fbuiltin_strcat))
+ CmdArgs.push_back("-fno-builtin-strcat");
+ if (!Args.hasArg(options::OPT_fbuiltin_strcpy))
+ CmdArgs.push_back("-fno-builtin-strcpy");
+ }
+
+ if (Args.hasArg(options::OPT_g_Flag) &&
+ !Args.hasArg(options::OPT_fno_eliminate_unused_debug_symbols))
+ CmdArgs.push_back("-feliminate-unused-debug-symbols");
+}
+
+void darwin::CC1::AddCC1OptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfoList &Inputs,
+ const ArgStringList &OutputArgs) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Derived from cc1_options spec.
+ if (Args.hasArg(options::OPT_fast) ||
+ Args.hasArg(options::OPT_fastf) ||
+ Args.hasArg(options::OPT_fastcp))
+ CmdArgs.push_back("-O3");
+
+ if (Arg *A = Args.getLastArg(options::OPT_pg))
+ if (Args.hasArg(options::OPT_fomit_frame_pointer))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-fomit-frame-pointer";
+
+ AddCC1Args(Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_Q))
+ CmdArgs.push_back("-quiet");
+
+ CmdArgs.push_back("-dumpbase");
+ CmdArgs.push_back(darwin::CC1::getBaseInputName(Args, Inputs));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_d_Group);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_m_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_a_Group);
+
+ // FIXME: The goal is to use the user provided -o if that is our
+ // final output, otherwise to drive from the original input
+ // name. Find a clean way to go about this.
+ if ((Args.hasArg(options::OPT_c) || Args.hasArg(options::OPT_S)) &&
+ Args.hasArg(options::OPT_o)) {
+ Arg *OutputOpt = Args.getLastArg(options::OPT_o);
+ CmdArgs.push_back("-auxbase-strip");
+ CmdArgs.push_back(OutputOpt->getValue(Args));
+ } else {
+ CmdArgs.push_back("-auxbase");
+ CmdArgs.push_back(darwin::CC1::getBaseInputStem(Args, Inputs));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_g_Group);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_O);
+ // FIXME: -Wall is getting some special treatment. Investigate.
+ Args.AddAllArgs(CmdArgs, options::OPT_W_Group, options::OPT_pedantic_Group);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+ Args.AddAllArgs(CmdArgs, options::OPT_std_EQ, options::OPT_ansi,
+ options::OPT_trigraphs);
+ if (!Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) {
+ // Honor -std-default.
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
+ "-std=", /*Joined=*/true);
+ }
+
+ if (Args.hasArg(options::OPT_v))
+ CmdArgs.push_back("-version");
+ if (Args.hasArg(options::OPT_pg) &&
+ getToolChain().SupportsProfiling())
+ CmdArgs.push_back("-p");
+ Args.AddLastArg(CmdArgs, options::OPT_p);
+
+ // The driver treats -fsyntax-only specially.
+ if (getToolChain().getTriple().getArch() == llvm::Triple::arm ||
+ getToolChain().getTriple().getArch() == llvm::Triple::thumb) {
+ // Removes -fbuiltin-str{cat,cpy}; these aren't recognized by cc1 but are
+ // used to inhibit the default -fno-builtin-str{cat,cpy}.
+ //
+ // FIXME: Should we grow a better way to deal with "removing" args?
+ for (arg_iterator it = Args.filtered_begin(options::OPT_f_Group,
+ options::OPT_fsyntax_only),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ if (!(*it)->getOption().matches(options::OPT_fbuiltin_strcat) &&
+ !(*it)->getOption().matches(options::OPT_fbuiltin_strcpy)) {
+ (*it)->claim();
+ (*it)->render(Args, CmdArgs);
+ }
+ }
+ } else
+ Args.AddAllArgs(CmdArgs, options::OPT_f_Group, options::OPT_fsyntax_only);
+
+ // Claim Clang only -f options, they aren't worth warning about.
+ Args.ClaimAllArgs(options::OPT_f_clang_Group);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_undef);
+ if (Args.hasArg(options::OPT_Qn))
+ CmdArgs.push_back("-fno-ident");
+
+ // FIXME: This isn't correct.
+ //Args.AddLastArg(CmdArgs, options::OPT__help)
+ //Args.AddLastArg(CmdArgs, options::OPT__targetHelp)
+
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+
+ // FIXME: Still don't get what is happening here. Investigate.
+ Args.AddAllArgs(CmdArgs, options::OPT__param);
+
+ if (Args.hasArg(options::OPT_fmudflap) ||
+ Args.hasArg(options::OPT_fmudflapth)) {
+ CmdArgs.push_back("-fno-builtin");
+ CmdArgs.push_back("-fno-merge-constants");
+ }
+
+ if (Args.hasArg(options::OPT_coverage)) {
+ CmdArgs.push_back("-fprofile-arcs");
+ CmdArgs.push_back("-ftest-coverage");
+ }
+
+ if (types::isCXX(Inputs[0].getType()))
+ CmdArgs.push_back("-D__private_extern__=extern");
+}
+
+void darwin::CC1::AddCPPOptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfoList &Inputs,
+ const ArgStringList &OutputArgs) const {
+ // Derived from cpp_options
+ AddCPPUniqueOptionsArgs(Args, CmdArgs, Inputs);
+
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+
+ AddCC1Args(Args, CmdArgs);
+
+ // NOTE: The code below has some commonality with cpp_options, but
+ // in classic gcc style ends up sending things in different
+ // orders. This may be a good merge candidate once we drop pedantic
+ // compatibility.
+
+ Args.AddAllArgs(CmdArgs, options::OPT_m_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_std_EQ, options::OPT_ansi,
+ options::OPT_trigraphs);
+ if (!Args.getLastArg(options::OPT_std_EQ, options::OPT_ansi)) {
+ // Honor -std-default.
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_std_default_EQ,
+ "-std=", /*Joined=*/true);
+ }
+ Args.AddAllArgs(CmdArgs, options::OPT_W_Group, options::OPT_pedantic_Group);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+
+ // The driver treats -fsyntax-only specially.
+ Args.AddAllArgs(CmdArgs, options::OPT_f_Group, options::OPT_fsyntax_only);
+
+ // Claim Clang only -f options, they aren't worth warning about.
+ Args.ClaimAllArgs(options::OPT_f_clang_Group);
+
+ if (Args.hasArg(options::OPT_g_Group) && !Args.hasArg(options::OPT_g0) &&
+ !Args.hasArg(options::OPT_fno_working_directory))
+ CmdArgs.push_back("-fworking-directory");
+
+ Args.AddAllArgs(CmdArgs, options::OPT_O);
+ Args.AddAllArgs(CmdArgs, options::OPT_undef);
+ if (Args.hasArg(options::OPT_save_temps))
+ CmdArgs.push_back("-fpch-preprocess");
+}
+
+void darwin::CC1::AddCPPUniqueOptionsArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfoList &Inputs) const {
+ const Driver &D = getToolChain().getDriver();
+
+ CheckPreprocessingOptions(D, Args);
+
+ // Derived from cpp_unique_options.
+ // -{C,CC} only with -E is checked in CheckPreprocessingOptions().
+ Args.AddLastArg(CmdArgs, options::OPT_C);
+ Args.AddLastArg(CmdArgs, options::OPT_CC);
+ if (!Args.hasArg(options::OPT_Q))
+ CmdArgs.push_back("-quiet");
+ Args.AddAllArgs(CmdArgs, options::OPT_nostdinc);
+ Args.AddAllArgs(CmdArgs, options::OPT_nostdincxx);
+ Args.AddLastArg(CmdArgs, options::OPT_v);
+ Args.AddAllArgs(CmdArgs, options::OPT_I_Group, options::OPT_F);
+ Args.AddLastArg(CmdArgs, options::OPT_P);
+
+ // FIXME: Handle %I properly.
+ if (getToolChain().getArchName() == "x86_64") {
+ CmdArgs.push_back("-imultilib");
+ CmdArgs.push_back("x86_64");
+ }
+
+ if (Args.hasArg(options::OPT_MD)) {
+ CmdArgs.push_back("-MD");
+ CmdArgs.push_back(darwin::CC1::getDependencyFileName(Args, Inputs));
+ }
+
+ if (Args.hasArg(options::OPT_MMD)) {
+ CmdArgs.push_back("-MMD");
+ CmdArgs.push_back(darwin::CC1::getDependencyFileName(Args, Inputs));
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_M);
+ Args.AddLastArg(CmdArgs, options::OPT_MM);
+ Args.AddAllArgs(CmdArgs, options::OPT_MF);
+ Args.AddLastArg(CmdArgs, options::OPT_MG);
+ Args.AddLastArg(CmdArgs, options::OPT_MP);
+ Args.AddAllArgs(CmdArgs, options::OPT_MQ);
+ Args.AddAllArgs(CmdArgs, options::OPT_MT);
+ if (!Args.hasArg(options::OPT_M) && !Args.hasArg(options::OPT_MM) &&
+ (Args.hasArg(options::OPT_MD) || Args.hasArg(options::OPT_MMD))) {
+ if (Arg *OutputOpt = Args.getLastArg(options::OPT_o)) {
+ CmdArgs.push_back("-MQ");
+ CmdArgs.push_back(OutputOpt->getValue(Args));
+ }
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_remap);
+ if (Args.hasArg(options::OPT_g3))
+ CmdArgs.push_back("-dD");
+ Args.AddLastArg(CmdArgs, options::OPT_H);
+
+ AddCPPArgs(Args, CmdArgs);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_D, options::OPT_U, options::OPT_A);
+ Args.AddAllArgs(CmdArgs, options::OPT_i_Group);
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wp_COMMA,
+ options::OPT_Xpreprocessor);
+
+ if (Args.hasArg(options::OPT_fmudflap)) {
+ CmdArgs.push_back("-D_MUDFLAP");
+ CmdArgs.push_back("-include");
+ CmdArgs.push_back("mf-runtime.h");
+ }
+
+ if (Args.hasArg(options::OPT_fmudflapth)) {
+ CmdArgs.push_back("-D_MUDFLAP");
+ CmdArgs.push_back("-D_MUDFLAPTH");
+ CmdArgs.push_back("-include");
+ CmdArgs.push_back("mf-runtime.h");
+ }
+}
+
+void darwin::CC1::AddCPPArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ // Derived from cpp spec.
+
+ if (Args.hasArg(options::OPT_static)) {
+ // The gcc spec is broken here, it refers to dynamic but
+ // that has been translated. Start by being bug compatible.
+
+ // if (!Args.hasArg(arglist.parser.dynamicOption))
+ CmdArgs.push_back("-D__STATIC__");
+ } else
+ CmdArgs.push_back("-D__DYNAMIC__");
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-D_REENTRANT");
+}
+
+void darwin::Preprocess::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs!");
+
+ CmdArgs.push_back("-E");
+
+ if (Args.hasArg(options::OPT_traditional) ||
+ Args.hasArg(options::OPT_traditional_cpp))
+ CmdArgs.push_back("-traditional-cpp");
+
+ ArgStringList OutputArgs;
+ assert(Output.isFilename() && "Unexpected CC1 output.");
+ OutputArgs.push_back("-o");
+ OutputArgs.push_back(Output.getFilename());
+
+ if (Args.hasArg(options::OPT_E) || getToolChain().getDriver().CCCIsCPP) {
+ AddCPPOptionsArgs(Args, CmdArgs, Inputs, OutputArgs);
+ } else {
+ AddCPPOptionsArgs(Args, CmdArgs, Inputs, ArgStringList());
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_d_Group);
+
+ RemoveCC1UnsupportedArgs(CmdArgs);
+
+ const char *CC1Name = getCC1Name(Inputs[0].getType());
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(CC1Name));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void darwin::Compile::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs!");
+
+ // Silence warning about unused --serialize-diagnostics
+ Args.ClaimAllArgs(options::OPT__serialize_diags);
+
+ types::ID InputType = Inputs[0].getType();
+ if (const Arg *A = Args.getLastArg(options::OPT_traditional))
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-E";
+
+ if (JA.getType() == types::TY_LLVM_IR ||
+ JA.getType() == types::TY_LTO_IR)
+ CmdArgs.push_back("-emit-llvm");
+ else if (JA.getType() == types::TY_LLVM_BC ||
+ JA.getType() == types::TY_LTO_BC)
+ CmdArgs.push_back("-emit-llvm-bc");
+ else if (Output.getType() == types::TY_AST)
+ D.Diag(diag::err_drv_no_ast_support)
+ << getToolChain().getTripleString();
+ else if (JA.getType() != types::TY_PP_Asm &&
+ JA.getType() != types::TY_PCH)
+ D.Diag(diag::err_drv_invalid_gcc_output_type)
+ << getTypeName(JA.getType());
+
+ ArgStringList OutputArgs;
+ if (Output.getType() != types::TY_PCH) {
+ OutputArgs.push_back("-o");
+ if (Output.isNothing())
+ OutputArgs.push_back("/dev/null");
+ else
+ OutputArgs.push_back(Output.getFilename());
+ }
+
+ // There is no need for this level of compatibility, but it makes
+ // diffing easier.
+ bool OutputArgsEarly = (Args.hasArg(options::OPT_fsyntax_only) ||
+ Args.hasArg(options::OPT_S));
+
+ if (types::getPreprocessedType(InputType) != types::TY_INVALID) {
+ AddCPPUniqueOptionsArgs(Args, CmdArgs, Inputs);
+ if (OutputArgsEarly) {
+ AddCC1OptionsArgs(Args, CmdArgs, Inputs, OutputArgs);
+ } else {
+ AddCC1OptionsArgs(Args, CmdArgs, Inputs, ArgStringList());
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+ }
+ } else {
+ CmdArgs.push_back("-fpreprocessed");
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+
+ // Reject AST inputs.
+ if (II.getType() == types::TY_AST) {
+ D.Diag(diag::err_drv_no_ast_support)
+ << getToolChain().getTripleString();
+ return;
+ }
+
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ if (OutputArgsEarly) {
+ AddCC1OptionsArgs(Args, CmdArgs, Inputs, OutputArgs);
+ } else {
+ AddCC1OptionsArgs(Args, CmdArgs, Inputs, ArgStringList());
+ CmdArgs.append(OutputArgs.begin(), OutputArgs.end());
+ }
+ }
+
+ if (Output.getType() == types::TY_PCH) {
+ assert(Output.isFilename() && "Invalid PCH output.");
+
+ CmdArgs.push_back("-o");
+ // NOTE: gcc uses a temp .s file for this, but there doesn't seem
+ // to be a good reason.
+ const char *TmpPath = C.getArgs().MakeArgString(
+ D.GetTemporaryPath("cc", "s"));
+ C.addTempFile(TmpPath);
+ CmdArgs.push_back(TmpPath);
+
+ // If we're emitting a pch file with the last 4 characters of ".pth"
+ // and falling back to llvm-gcc we want to use ".gch" instead.
+ std::string OutputFile(Output.getFilename());
+ size_t loc = OutputFile.rfind(".pth");
+ if (loc != std::string::npos)
+ OutputFile.replace(loc, 4, ".gch");
+ const char *Tmp = C.getArgs().MakeArgString("--output-pch="+OutputFile);
+ CmdArgs.push_back(Tmp);
+ }
+
+ RemoveCC1UnsupportedArgs(CmdArgs);
+
+ const char *CC1Name = getCC1Name(Inputs[0].getType());
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(CC1Name));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void darwin::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs.");
+ const InputInfo &Input = Inputs[0];
+
+ // Determine the original source input.
+ const Action *SourceAction = &JA;
+ while (SourceAction->getKind() != Action::InputClass) {
+ assert(!SourceAction->getInputs().empty() && "unexpected root action!");
+ SourceAction = SourceAction->getInputs()[0];
+ }
+
+ // Forward -g, assuming we are dealing with an actual assembly file.
+ if (SourceAction->getType() == types::TY_Asm ||
+ SourceAction->getType() == types::TY_PP_Asm) {
+ if (Args.hasArg(options::OPT_gstabs))
+ CmdArgs.push_back("--gstabs");
+ else if (Args.hasArg(options::OPT_g_Group))
+ CmdArgs.push_back("-g");
+ }
+
+ // Derived from asm spec.
+ AddDarwinArch(Args, CmdArgs);
+
+ // Use -force_cpusubtype_ALL on x86 by default.
+ if (getToolChain().getTriple().getArch() == llvm::Triple::x86 ||
+ getToolChain().getTriple().getArch() == llvm::Triple::x86_64 ||
+ Args.hasArg(options::OPT_force__cpusubtype__ALL))
+ CmdArgs.push_back("-force_cpusubtype_ALL");
+
+ if (getToolChain().getTriple().getArch() != llvm::Triple::x86_64 &&
+ (Args.hasArg(options::OPT_mkernel) ||
+ Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_fapple_kext)))
+ CmdArgs.push_back("-static");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ assert(Output.isFilename() && "Unexpected lipo output.");
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ assert(Input.isFilename() && "Invalid input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ // asm_final spec is empty.
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void darwin::DarwinTool::anchor() {}
+
+void darwin::DarwinTool::AddDarwinArch(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ StringRef ArchName = getDarwinToolChain().getDarwinArchName(Args);
+
+ // Derived from darwin_arch spec.
+ CmdArgs.push_back("-arch");
+ CmdArgs.push_back(Args.MakeArgString(ArchName));
+
+ // FIXME: Is this needed anymore?
+ if (ArchName == "arm")
+ CmdArgs.push_back("-force_cpusubtype_ALL");
+}
+
+void darwin::Link::AddLinkArgs(Compilation &C,
+ const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+ const toolchains::Darwin &DarwinTC = getDarwinToolChain();
+
+ unsigned Version[3] = { 0, 0, 0 };
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
+ bool HadExtra;
+ if (!Driver::GetReleaseVersion(A->getValue(Args), Version[0],
+ Version[1], Version[2], HadExtra) ||
+ HadExtra)
+ D.Diag(diag::err_drv_invalid_version_number)
+ << A->getAsString(Args);
+ }
+
+ // Newer linkers support -demangle, pass it if supported and not disabled by
+ // the user.
+ if (Version[0] >= 100 && !Args.hasArg(options::OPT_Z_Xlinker__no_demangle)) {
+ // Don't pass -demangle to ld_classic.
+ //
+ // FIXME: This is a temporary workaround, ld should be handling this.
+ bool UsesLdClassic = (getToolChain().getArch() == llvm::Triple::x86 &&
+ Args.hasArg(options::OPT_static));
+ if (getToolChain().getArch() == llvm::Triple::x86) {
+ for (arg_iterator it = Args.filtered_begin(options::OPT_Xlinker,
+ options::OPT_Wl_COMMA),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ for (unsigned i = 0, e = A->getNumValues(); i != e; ++i)
+ if (StringRef(A->getValue(Args, i)) == "-kext")
+ UsesLdClassic = true;
+ }
+ }
+ if (!UsesLdClassic)
+ CmdArgs.push_back("-demangle");
+ }
+
+ // If we are using LTO, then automatically create a temporary file path for
+ // the linker to use, so that it's lifetime will extend past a possible
+ // dsymutil step.
+ if (Version[0] >= 116 && D.IsUsingLTO(Args)) {
+ const char *TmpPath = C.getArgs().MakeArgString(
+ D.GetTemporaryPath("cc", types::getTypeTempSuffix(types::TY_Object)));
+ C.addTempFile(TmpPath);
+ CmdArgs.push_back("-object_path_lto");
+ CmdArgs.push_back(TmpPath);
+ }
+
+ // Derived from the "link" spec.
+ Args.AddAllArgs(CmdArgs, options::OPT_static);
+ if (!Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("-dynamic");
+ if (Args.hasArg(options::OPT_fgnu_runtime)) {
+ // FIXME: gcc replaces -lobjc in forward args with -lobjc-gnu
+ // here. How do we wish to handle such things?
+ }
+
+ if (!Args.hasArg(options::OPT_dynamiclib)) {
+ AddDarwinArch(Args, CmdArgs);
+ // FIXME: Why do this only on this path?
+ Args.AddLastArg(CmdArgs, options::OPT_force__cpusubtype__ALL);
+
+ Args.AddLastArg(CmdArgs, options::OPT_bundle);
+ Args.AddAllArgs(CmdArgs, options::OPT_bundle__loader);
+ Args.AddAllArgs(CmdArgs, options::OPT_client__name);
+
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_compatibility__version)) ||
+ (A = Args.getLastArg(options::OPT_current__version)) ||
+ (A = Args.getLastArg(options::OPT_install__name)))
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << A->getAsString(Args) << "-dynamiclib";
+
+ Args.AddLastArg(CmdArgs, options::OPT_force__flat__namespace);
+ Args.AddLastArg(CmdArgs, options::OPT_keep__private__externs);
+ Args.AddLastArg(CmdArgs, options::OPT_private__bundle);
+ } else {
+ CmdArgs.push_back("-dylib");
+
+ Arg *A;
+ if ((A = Args.getLastArg(options::OPT_bundle)) ||
+ (A = Args.getLastArg(options::OPT_bundle__loader)) ||
+ (A = Args.getLastArg(options::OPT_client__name)) ||
+ (A = Args.getLastArg(options::OPT_force__flat__namespace)) ||
+ (A = Args.getLastArg(options::OPT_keep__private__externs)) ||
+ (A = Args.getLastArg(options::OPT_private__bundle)))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "-dynamiclib";
+
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_compatibility__version,
+ "-dylib_compatibility_version");
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_current__version,
+ "-dylib_current_version");
+
+ AddDarwinArch(Args, CmdArgs);
+
+ Args.AddAllArgsTranslated(CmdArgs, options::OPT_install__name,
+ "-dylib_install_name");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_all__load);
+ Args.AddAllArgs(CmdArgs, options::OPT_allowable__client);
+ Args.AddLastArg(CmdArgs, options::OPT_bind__at__load);
+ if (DarwinTC.isTargetIPhoneOS())
+ Args.AddLastArg(CmdArgs, options::OPT_arch__errors__fatal);
+ Args.AddLastArg(CmdArgs, options::OPT_dead__strip);
+ Args.AddLastArg(CmdArgs, options::OPT_no__dead__strip__inits__and__terms);
+ Args.AddAllArgs(CmdArgs, options::OPT_dylib__file);
+ Args.AddLastArg(CmdArgs, options::OPT_dynamic);
+ Args.AddAllArgs(CmdArgs, options::OPT_exported__symbols__list);
+ Args.AddLastArg(CmdArgs, options::OPT_flat__namespace);
+ Args.AddAllArgs(CmdArgs, options::OPT_force__load);
+ Args.AddAllArgs(CmdArgs, options::OPT_headerpad__max__install__names);
+ Args.AddAllArgs(CmdArgs, options::OPT_image__base);
+ Args.AddAllArgs(CmdArgs, options::OPT_init);
+
+ // Add the deployment target.
+ VersionTuple TargetVersion = DarwinTC.getTargetVersion();
+
+ // If we had an explicit -mios-simulator-version-min argument, honor that,
+ // otherwise use the traditional deployment targets. We can't just check the
+ // is-sim attribute because existing code follows this path, and the linker
+ // may not handle the argument.
+ //
+ // FIXME: We may be able to remove this, once we can verify no one depends on
+ // it.
+ if (Args.hasArg(options::OPT_mios_simulator_version_min_EQ))
+ CmdArgs.push_back("-ios_simulator_version_min");
+ else if (DarwinTC.isTargetIPhoneOS())
+ CmdArgs.push_back("-iphoneos_version_min");
+ else
+ CmdArgs.push_back("-macosx_version_min");
+ CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+
+ Args.AddLastArg(CmdArgs, options::OPT_nomultidefs);
+ Args.AddLastArg(CmdArgs, options::OPT_multi__module);
+ Args.AddLastArg(CmdArgs, options::OPT_single__module);
+ Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined);
+ Args.AddAllArgs(CmdArgs, options::OPT_multiply__defined__unused);
+
+ if (const Arg *A = Args.getLastArg(options::OPT_fpie, options::OPT_fPIE,
+ options::OPT_fno_pie,
+ options::OPT_fno_PIE)) {
+ if (A->getOption().matches(options::OPT_fpie) ||
+ A->getOption().matches(options::OPT_fPIE))
+ CmdArgs.push_back("-pie");
+ else
+ CmdArgs.push_back("-no_pie");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_prebind);
+ Args.AddLastArg(CmdArgs, options::OPT_noprebind);
+ Args.AddLastArg(CmdArgs, options::OPT_nofixprebinding);
+ Args.AddLastArg(CmdArgs, options::OPT_prebind__all__twolevel__modules);
+ Args.AddLastArg(CmdArgs, options::OPT_read__only__relocs);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectcreate);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectorder);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg1addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segprot);
+ Args.AddAllArgs(CmdArgs, options::OPT_segaddr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__only__addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__write__addr);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg__addr__table);
+ Args.AddAllArgs(CmdArgs, options::OPT_seg__addr__table__filename);
+ Args.AddAllArgs(CmdArgs, options::OPT_sub__library);
+ Args.AddAllArgs(CmdArgs, options::OPT_sub__umbrella);
+
+ // Give --sysroot= preference, over the Apple specific behavior to also use
+ // --isysroot as the syslibroot.
+ if (const Arg *A = Args.getLastArg(options::OPT__sysroot_EQ)) {
+ CmdArgs.push_back("-syslibroot");
+ CmdArgs.push_back(A->getValue(Args));
+ } else if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
+ CmdArgs.push_back("-syslibroot");
+ CmdArgs.push_back(A->getValue(Args));
+ } else if (getDarwinToolChain().isTargetIPhoneOS()) {
+ CmdArgs.push_back("-syslibroot");
+ CmdArgs.push_back("/Developer/SDKs/Extra");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace);
+ Args.AddLastArg(CmdArgs, options::OPT_twolevel__namespace__hints);
+ Args.AddAllArgs(CmdArgs, options::OPT_umbrella);
+ Args.AddAllArgs(CmdArgs, options::OPT_undefined);
+ Args.AddAllArgs(CmdArgs, options::OPT_unexported__symbols__list);
+ Args.AddAllArgs(CmdArgs, options::OPT_weak__reference__mismatches);
+ Args.AddLastArg(CmdArgs, options::OPT_X_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_y);
+ Args.AddLastArg(CmdArgs, options::OPT_w);
+ Args.AddAllArgs(CmdArgs, options::OPT_pagezero__size);
+ Args.AddAllArgs(CmdArgs, options::OPT_segs__read__);
+ Args.AddLastArg(CmdArgs, options::OPT_seglinkedit);
+ Args.AddLastArg(CmdArgs, options::OPT_noseglinkedit);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectalign);
+ Args.AddAllArgs(CmdArgs, options::OPT_sectobjectsymbols);
+ Args.AddAllArgs(CmdArgs, options::OPT_segcreate);
+ Args.AddLastArg(CmdArgs, options::OPT_whyload);
+ Args.AddLastArg(CmdArgs, options::OPT_whatsloaded);
+ Args.AddAllArgs(CmdArgs, options::OPT_dylinker__install__name);
+ Args.AddLastArg(CmdArgs, options::OPT_dylinker);
+ Args.AddLastArg(CmdArgs, options::OPT_Mach);
+}
+
+void darwin::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ assert(Output.getType() == types::TY_Image && "Invalid linker output type.");
+
+ // The logic here is derived from gcc's behavior; most of which
+ // comes from specs (starting with link_command). Consult gcc for
+ // more information.
+ ArgStringList CmdArgs;
+
+ /// Hack(tm) to ignore linking errors when we are doing ARC migration.
+ if (Args.hasArg(options::OPT_ccc_arcmt_check,
+ options::OPT_ccc_arcmt_migrate)) {
+ for (ArgList::const_iterator I = Args.begin(), E = Args.end(); I != E; ++I)
+ (*I)->claim();
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("touch"));
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+ return;
+ }
+
+ // I'm not sure why this particular decomposition exists in gcc, but
+ // we follow suite for ease of comparison.
+ AddLinkArgs(C, Args, CmdArgs);
+
+ Args.AddAllArgs(CmdArgs, options::OPT_d_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_u_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_A);
+ Args.AddLastArg(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_m_Separate);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ // Forward -ObjC when either -ObjC or -ObjC++ is used, to force loading
+ // members of static archive libraries which implement Objective-C classes or
+ // categories.
+ if (Args.hasArg(options::OPT_ObjC) || Args.hasArg(options::OPT_ObjCXX))
+ CmdArgs.push_back("-ObjC");
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (!Args.hasArg(options::OPT_A) &&
+ !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ // Derived from startfile spec.
+ if (Args.hasArg(options::OPT_dynamiclib)) {
+ // Derived from darwin_dylib1 spec.
+ if (getDarwinToolChain().isTargetIOSSimulator()) {
+ // The simulator doesn't have a versioned crt1 file.
+ CmdArgs.push_back("-ldylib1.o");
+ } else if (getDarwinToolChain().isTargetIPhoneOS()) {
+ if (getDarwinToolChain().isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-ldylib1.o");
+ } else {
+ if (getDarwinToolChain().isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-ldylib1.o");
+ else if (getDarwinToolChain().isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-ldylib1.10.5.o");
+ }
+ } else {
+ if (Args.hasArg(options::OPT_bundle)) {
+ if (!Args.hasArg(options::OPT_static)) {
+ // Derived from darwin_bundle1 spec.
+ if (getDarwinToolChain().isTargetIOSSimulator()) {
+ // The simulator doesn't have a versioned crt1 file.
+ CmdArgs.push_back("-lbundle1.o");
+ } else if (getDarwinToolChain().isTargetIPhoneOS()) {
+ if (getDarwinToolChain().isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-lbundle1.o");
+ } else {
+ if (getDarwinToolChain().isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-lbundle1.o");
+ }
+ }
+ } else {
+ if (Args.hasArg(options::OPT_pg) &&
+ getToolChain().SupportsProfiling()) {
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lgcrt0.o");
+ } else {
+ CmdArgs.push_back("-lgcrt1.o");
+
+ // darwin_crt2 spec is empty.
+ }
+ } else {
+ if (Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_object) ||
+ Args.hasArg(options::OPT_preload)) {
+ CmdArgs.push_back("-lcrt0.o");
+ } else {
+ // Derived from darwin_crt1 spec.
+ if (getDarwinToolChain().isTargetIOSSimulator()) {
+ // The simulator doesn't have a versioned crt1 file.
+ CmdArgs.push_back("-lcrt1.o");
+ } else if (getDarwinToolChain().isTargetIPhoneOS()) {
+ if (getDarwinToolChain().isIPhoneOSVersionLT(3, 1))
+ CmdArgs.push_back("-lcrt1.o");
+ else
+ CmdArgs.push_back("-lcrt1.3.1.o");
+ } else {
+ if (getDarwinToolChain().isMacosxVersionLT(10, 5))
+ CmdArgs.push_back("-lcrt1.o");
+ else if (getDarwinToolChain().isMacosxVersionLT(10, 6))
+ CmdArgs.push_back("-lcrt1.10.5.o");
+ else if (getDarwinToolChain().isMacosxVersionLT(10, 8))
+ CmdArgs.push_back("-lcrt1.10.6.o");
+
+ // darwin_crt2 spec is empty.
+ }
+ }
+ }
+ }
+ }
+
+ if (!getDarwinToolChain().isTargetIPhoneOS() &&
+ Args.hasArg(options::OPT_shared_libgcc) &&
+ getDarwinToolChain().isMacosxVersionLT(10, 5)) {
+ const char *Str =
+ Args.MakeArgString(getToolChain().GetFilePath("crt3.o"));
+ CmdArgs.push_back(Str);
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+
+ // If we're building a dynamic lib with -faddress-sanitizer, unresolved
+ // symbols may appear. Mark all of them as dynamic_lookup.
+ // Linking executables is handled in lib/Driver/ToolChains.cpp.
+ if (Args.hasFlag(options::OPT_faddress_sanitizer,
+ options::OPT_fno_address_sanitizer, false)) {
+ if (Args.hasArg(options::OPT_dynamiclib) ||
+ Args.hasArg(options::OPT_bundle)) {
+ CmdArgs.push_back("-undefined");
+ CmdArgs.push_back("dynamic_lookup");
+ }
+ }
+
+ if (Args.hasArg(options::OPT_fopenmp))
+ // This is more complicated in gcc...
+ CmdArgs.push_back("-lgomp");
+
+ getDarwinToolChain().AddLinkSearchPathArgs(Args, CmdArgs);
+
+ if (isObjCRuntimeLinked(Args)) {
+ // Avoid linking compatibility stubs on i386 mac.
+ if (!getDarwinToolChain().isTargetMacOS() ||
+ getDarwinToolChain().getArchName() != "i386") {
+ // If we don't have ARC or subscripting runtime support, link in the
+ // runtime stubs. We have to do this *before* adding any of the normal
+ // linker inputs so that its initializer gets run first.
+ ObjCRuntime runtime;
+ getDarwinToolChain().configureObjCRuntime(runtime);
+ // We use arclite library for both ARC and subscripting support.
+ if ((!runtime.HasARC && isObjCAutoRefCount(Args)) ||
+ !runtime.HasSubscripting)
+ getDarwinToolChain().AddLinkARCArgs(Args, CmdArgs);
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("Foundation");
+ }
+ // Link libobj.
+ CmdArgs.push_back("-lobjc");
+ }
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (LinkingOutput) {
+ CmdArgs.push_back("-arch_multiple");
+ CmdArgs.push_back("-final_output");
+ CmdArgs.push_back(LinkingOutput);
+ }
+
+ if (Args.hasArg(options::OPT_fnested_functions))
+ CmdArgs.push_back("-allow_stack_execute");
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (getToolChain().getDriver().CCCIsCXX)
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+
+ // link_ssp spec is empty.
+
+ // Let the tool chain choose which runtime library to link.
+ getDarwinToolChain().AddLinkRuntimeLibArgs(Args, CmdArgs);
+ }
+
+ if (!Args.hasArg(options::OPT_A) &&
+ !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ // endfile_spec is empty.
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_F);
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void darwin::Lipo::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ CmdArgs.push_back("-create");
+ assert(Output.isFilename() && "Unexpected lipo output.");
+
+ CmdArgs.push_back("-output");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ assert(II.isFilename() && "Unexpected lipo input.");
+ CmdArgs.push_back(II.getFilename());
+ }
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("lipo"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void darwin::Dsymutil::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Unexpected dsymutil input.");
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("dsymutil"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void darwin::VerifyDebug::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+ CmdArgs.push_back("--verify");
+ CmdArgs.push_back("--debug-info");
+ CmdArgs.push_back("--eh-frame");
+ CmdArgs.push_back("--quiet");
+
+ assert(Inputs.size() == 1 && "Unable to handle multiple inputs.");
+ const InputInfo &Input = Inputs[0];
+ assert(Input.isFilename() && "Unexpected verify input");
+
+ // Grabbing the output of the earlier dsymutil run.
+ CmdArgs.push_back(Input.getFilename());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("dwarfdump"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void solaris::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+
+void solaris::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ // FIXME: Find a real GCC, don't hard-code versions here
+ std::string GCCLibPath = "/usr/gcc/4.5/lib/gcc/";
+ const llvm::Triple &T = getToolChain().getTriple();
+ std::string LibPath = "/usr/lib/";
+ llvm::Triple::ArchType Arch = T.getArch();
+ switch (Arch) {
+ case llvm::Triple::x86:
+ GCCLibPath += ("i386-" + T.getVendorName() + "-" +
+ T.getOSName()).str() + "/4.5.2/";
+ break;
+ case llvm::Triple::x86_64:
+ GCCLibPath += ("i386-" + T.getVendorName() + "-" +
+ T.getOSName()).str();
+ GCCLibPath += "/4.5.2/amd64/";
+ LibPath += "amd64/";
+ break;
+ default:
+ assert(0 && "Unsupported architecture");
+ }
+
+ ArgStringList CmdArgs;
+
+ // Demangle C++ names in errors
+ CmdArgs.push_back("-C");
+
+ if ((!Args.hasArg(options::OPT_nostdlib)) &&
+ (!Args.hasArg(options::OPT_shared))) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("_start");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ CmdArgs.push_back("-dn");
+ } else {
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ } else {
+ CmdArgs.push_back("--dynamic-linker");
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "ld.so.1"));
+ }
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "crt1.o"));
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "crti.o"));
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "values-Xa.o"));
+ CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtbegin.o"));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "crti.o"));
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "values-Xa.o"));
+ CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtbegin.o"));
+ }
+ if (getToolChain().getDriver().CCCIsCXX)
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "cxa_finalize.o"));
+ }
+
+ CmdArgs.push_back(Args.MakeArgString("-L" + GCCLibPath));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (getToolChain().getDriver().CCCIsCXX)
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lgcc_s");
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-lgcc");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lm");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(GCCLibPath + "crtend.o"));
+ }
+ CmdArgs.push_back(Args.MakeArgString(LibPath + "crtn.o"));
+
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void auroraux::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("gas"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void auroraux::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ if ((!Args.hasArg(options::OPT_nostdlib)) &&
+ (!Args.hasArg(options::OPT_shared))) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("_start");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ CmdArgs.push_back("-dn");
+ } else {
+// CmdArgs.push_back("--eh-frame-hdr");
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ } else {
+ CmdArgs.push_back("--dynamic-linker");
+ CmdArgs.push_back("/lib/ld.so.1"); // 64Bit Path /lib/amd64/ld.so.1
+ }
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ }
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtn.o")));
+ }
+
+ CmdArgs.push_back(Args.MakeArgString("-L/opt/gcc4/lib/gcc/"
+ + getToolChain().getTripleString()
+ + "/4.2.4"));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ // FIXME: For some reason GCC passes -lgcc before adding
+ // the default system libraries. Just mimic this for now.
+ CmdArgs.push_back("-lgcc");
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-pthread");
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lgcc");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtend.o")));
+ }
+
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void openbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void openbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if ((!Args.hasArg(options::OPT_nostdlib)) &&
+ (!Args.hasArg(options::OPT_shared))) {
+ CmdArgs.push_back("-e");
+ CmdArgs.push_back("__start");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ CmdArgs.push_back("-Bdynamic");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/usr/libexec/ld.so");
+ }
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("gcrt0.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ std::string Triple = getToolChain().getTripleString();
+ if (Triple.substr(0, 6) == "x86_64")
+ Triple.replace(0, 6, "amd64");
+ CmdArgs.push_back(Args.MakeArgString("-L/usr/lib/gcc-lib/" + Triple +
+ "/4.2.1"));
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+
+ // FIXME: For some reason GCC passes -lgcc before adding
+ // the default system libraries. Just mimic this for now.
+ CmdArgs.push_back("-lgcc");
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lc_p");
+ else
+ CmdArgs.push_back("-lc");
+ }
+ CmdArgs.push_back("-lgcc");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtend.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtendS.o")));
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ // When building 32-bit code on FreeBSD/amd64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ if (getToolChain().getArchName() == "i386")
+ CmdArgs.push_back("--32");
+
+ if (getToolChain().getArchName() == "powerpc")
+ CmdArgs.push_back("-a32");
+
+ // Set byte order explicitly
+ if (getToolChain().getArchName() == "mips")
+ CmdArgs.push_back("-EB");
+ else if (getToolChain().getArchName() == "mipsel")
+ CmdArgs.push_back("-EL");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/libexec/ld-elf.so.1");
+ }
+ }
+
+ // When building 32-bit code on FreeBSD/amd64, we have to explicitly
+ // instruct ld in the base system to link 32-bit code.
+ if (getToolChain().getArchName() == "i386") {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386_fbsd");
+ }
+
+ if (getToolChain().getArchName() == "powerpc") {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf32ppc_fbsd");
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("gcrt1.o")));
+ else {
+ const char *crt = Args.hasArg(options::OPT_pie) ? "Scrt1.o" : "crt1.o";
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath(crt)));
+ }
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ const ToolChain::path_list Paths = getToolChain().getFilePaths();
+ for (ToolChain::path_list::const_iterator i = Paths.begin(), e = Paths.end();
+ i != e; ++i)
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + *i));
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lm_p");
+ else
+ CmdArgs.push_back("-lm");
+ }
+ // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
+ // the default system libraries. Just mimic this for now.
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lgcc_p");
+ else
+ CmdArgs.push_back("-lgcc");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (Args.hasArg(options::OPT_pthread)) {
+ if (Args.hasArg(options::OPT_pg))
+ CmdArgs.push_back("-lpthread_p");
+ else
+ CmdArgs.push_back("-lpthread");
+ }
+
+ if (Args.hasArg(options::OPT_pg)) {
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-lc");
+ else
+ CmdArgs.push_back("-lc_p");
+ CmdArgs.push_back("-lgcc_p");
+ } else {
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lgcc");
+ }
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else if (Args.hasArg(options::OPT_pg)) {
+ CmdArgs.push_back("-lgcc_eh_p");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtend.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtn.o")));
+ }
+
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void netbsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ // When building 32-bit code on NetBSD/amd64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ if (getToolChain().getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("--32");
+
+ // Set byte order explicitly
+ if (getToolChain().getArchName() == "mips")
+ CmdArgs.push_back("-EB");
+ else if (getToolChain().getArchName() == "mipsel")
+ CmdArgs.push_back("-EL");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec = Args.MakeArgString((getToolChain().GetProgramPath("as")));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void netbsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+ CmdArgs.push_back("--eh-frame-hdr");
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-Bshareable");
+ } else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/libexec/ld.elf_so");
+ }
+ }
+
+ // When building 32-bit code on NetBSD/amd64, we have to explicitly
+ // instruct ld in the base system to link 32-bit code.
+ if (getToolChain().getArch() == llvm::Triple::x86) {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386");
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+ Args.AddAllArgs(CmdArgs, options::OPT_s);
+ Args.AddAllArgs(CmdArgs, options::OPT_t);
+ Args.AddAllArgs(CmdArgs, options::OPT_Z_Flag);
+ Args.AddAllArgs(CmdArgs, options::OPT_r);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+ // FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
+ // the default system libraries. Just mimic this for now.
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ CmdArgs.push_back("-lgcc");
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lc");
+
+ CmdArgs.push_back("-lgcc");
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-lgcc_eh");
+ } else {
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ CmdArgs.push_back("--no-as-needed");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtend.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath(
+ "crtn.o")));
+ }
+
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void linuxtools::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ // Add --32/--64 to make sure we get the format we want.
+ // This is incomplete
+ if (getToolChain().getArch() == llvm::Triple::x86) {
+ CmdArgs.push_back("--32");
+ } else if (getToolChain().getArch() == llvm::Triple::x86_64) {
+ CmdArgs.push_back("--64");
+ } else if (getToolChain().getArch() == llvm::Triple::ppc) {
+ CmdArgs.push_back("-a32");
+ CmdArgs.push_back("-mppc");
+ CmdArgs.push_back("-many");
+ } else if (getToolChain().getArch() == llvm::Triple::ppc64) {
+ CmdArgs.push_back("-a64");
+ CmdArgs.push_back("-mppc64");
+ CmdArgs.push_back("-many");
+ } else if (getToolChain().getArch() == llvm::Triple::arm) {
+ StringRef MArch = getToolChain().getArchName();
+ if (MArch == "armv7" || MArch == "armv7a" || MArch == "armv7-a")
+ CmdArgs.push_back("-mfpu=neon");
+ } else if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mipsel ||
+ getToolChain().getArch() == llvm::Triple::mips64 ||
+ getToolChain().getArch() == llvm::Triple::mips64el) {
+ StringRef CPUName;
+ StringRef ABIName;
+ getMipsCPUAndABI(Args, getToolChain(), CPUName, ABIName);
+
+ CmdArgs.push_back("-march");
+ CmdArgs.push_back(CPUName.data());
+
+ // Convert ABI name to the GNU tools acceptable variant.
+ if (ABIName == "o32")
+ ABIName = "32";
+ else if (ABIName == "n64")
+ ABIName = "64";
+
+ CmdArgs.push_back("-mabi");
+ CmdArgs.push_back(ABIName.data());
+
+ if (getToolChain().getArch() == llvm::Triple::mips ||
+ getToolChain().getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("-EB");
+ else
+ CmdArgs.push_back("-EL");
+ }
+
+ Args.AddLastArg(CmdArgs, options::OPT_march_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_mcpu_EQ);
+ Args.AddLastArg(CmdArgs, options::OPT_mfpu_EQ);
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+static void AddLibgcc(const Driver &D, ArgStringList &CmdArgs,
+ const ArgList &Args) {
+ bool StaticLibgcc = Args.hasArg(options::OPT_static) ||
+ Args.hasArg(options::OPT_static_libgcc);
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+
+ if (StaticLibgcc) {
+ if (D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+ } else {
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("--as-needed");
+ CmdArgs.push_back("-lgcc_s");
+ if (!D.CCCIsCXX)
+ CmdArgs.push_back("--no-as-needed");
+ }
+
+ if (StaticLibgcc)
+ CmdArgs.push_back("-lgcc_eh");
+ else if (!Args.hasArg(options::OPT_shared) && D.CCCIsCXX)
+ CmdArgs.push_back("-lgcc");
+}
+
+void linuxtools::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const toolchains::Linux& ToolChain =
+ static_cast<const toolchains::Linux&>(getToolChain());
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ // Silence warning for "clang -g foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_g_Group);
+ // and "clang -emit-llvm foo.o -o foo"
+ Args.ClaimAllArgs(options::OPT_emit_llvm);
+ // and for "clang -g foo.o -o foo". Other warning options are already
+ // handled somewhere else.
+ Args.ClaimAllArgs(options::OPT_w);
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_pie))
+ CmdArgs.push_back("-pie");
+
+ if (Args.hasArg(options::OPT_rdynamic))
+ CmdArgs.push_back("-export-dynamic");
+
+ if (Args.hasArg(options::OPT_s))
+ CmdArgs.push_back("-s");
+
+ for (std::vector<std::string>::const_iterator i = ToolChain.ExtraOpts.begin(),
+ e = ToolChain.ExtraOpts.end();
+ i != e; ++i)
+ CmdArgs.push_back(i->c_str());
+
+ if (!Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("--eh-frame-hdr");
+ }
+
+ CmdArgs.push_back("-m");
+ if (ToolChain.getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("elf_i386");
+ else if (ToolChain.getArch() == llvm::Triple::arm
+ || ToolChain.getArch() == llvm::Triple::thumb)
+ CmdArgs.push_back("armelf_linux_eabi");
+ else if (ToolChain.getArch() == llvm::Triple::ppc)
+ CmdArgs.push_back("elf32ppclinux");
+ else if (ToolChain.getArch() == llvm::Triple::ppc64)
+ CmdArgs.push_back("elf64ppc");
+ else if (ToolChain.getArch() == llvm::Triple::mips)
+ CmdArgs.push_back("elf32btsmip");
+ else if (ToolChain.getArch() == llvm::Triple::mipsel)
+ CmdArgs.push_back("elf32ltsmip");
+ else if (ToolChain.getArch() == llvm::Triple::mips64)
+ CmdArgs.push_back("elf64btsmip");
+ else if (ToolChain.getArch() == llvm::Triple::mips64el)
+ CmdArgs.push_back("elf64ltsmip");
+ else
+ CmdArgs.push_back("elf_x86_64");
+
+ if (Args.hasArg(options::OPT_static)) {
+ if (ToolChain.getArch() == llvm::Triple::arm
+ || ToolChain.getArch() == llvm::Triple::thumb)
+ CmdArgs.push_back("-Bstatic");
+ else
+ CmdArgs.push_back("-static");
+ } else if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-shared");
+ }
+
+ if (ToolChain.getArch() == llvm::Triple::arm ||
+ ToolChain.getArch() == llvm::Triple::thumb ||
+ (!Args.hasArg(options::OPT_static) &&
+ !Args.hasArg(options::OPT_shared))) {
+ CmdArgs.push_back("-dynamic-linker");
+ if (ToolChain.getArch() == llvm::Triple::x86)
+ CmdArgs.push_back("/lib/ld-linux.so.2");
+ else if (ToolChain.getArch() == llvm::Triple::arm ||
+ ToolChain.getArch() == llvm::Triple::thumb)
+ CmdArgs.push_back("/lib/ld-linux.so.3");
+ else if (ToolChain.getArch() == llvm::Triple::mips ||
+ ToolChain.getArch() == llvm::Triple::mipsel)
+ CmdArgs.push_back("/lib/ld.so.1");
+ else if (ToolChain.getArch() == llvm::Triple::mips64 ||
+ ToolChain.getArch() == llvm::Triple::mips64el)
+ CmdArgs.push_back("/lib64/ld.so.1");
+ else if (ToolChain.getArch() == llvm::Triple::ppc)
+ CmdArgs.push_back("/lib/ld.so.1");
+ else if (ToolChain.getArch() == llvm::Triple::ppc64)
+ CmdArgs.push_back("/lib64/ld64.so.1");
+ else
+ CmdArgs.push_back("/lib64/ld-linux-x86-64.so.2");
+ }
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ const char *crt1 = NULL;
+ if (!Args.hasArg(options::OPT_shared)){
+ if (Args.hasArg(options::OPT_pie))
+ crt1 = "Scrt1.o";
+ else
+ crt1 = "crt1.o";
+ }
+ if (crt1)
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crt1)));
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+
+ const char *crtbegin;
+ if (Args.hasArg(options::OPT_static))
+ crtbegin = "crtbeginT.o";
+ else if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ crtbegin = "crtbeginS.o";
+ else
+ crtbegin = "crtbegin.o";
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+
+ const ToolChain::path_list Paths = ToolChain.getFilePaths();
+
+ for (ToolChain::path_list::const_iterator i = Paths.begin(), e = Paths.end();
+ i != e; ++i)
+ CmdArgs.push_back(Args.MakeArgString(StringRef("-L") + *i));
+
+ // Tell the linker to load the plugin. This has to come before AddLinkerInputs
+ // as gold requires -plugin to come before any -plugin-opt that -Wl might
+ // forward.
+ if (D.IsUsingLTO(Args) || Args.hasArg(options::OPT_use_gold_plugin)) {
+ CmdArgs.push_back("-plugin");
+ std::string Plugin = ToolChain.getDriver().Dir + "/../lib/LLVMgold.so";
+ CmdArgs.push_back(Args.MakeArgString(Plugin));
+ }
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs);
+
+ if (D.CCCIsCXX && !Args.hasArg(options::OPT_nostdlib)) {
+ bool OnlyLibstdcxxStatic = Args.hasArg(options::OPT_static_libstdcxx) &&
+ !Args.hasArg(options::OPT_static);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bstatic");
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ if (OnlyLibstdcxxStatic)
+ CmdArgs.push_back("-Bdynamic");
+ CmdArgs.push_back("-lm");
+ }
+
+ // Call this before we add the C run-time.
+ addAsanRTLinux(getToolChain(), Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib)) {
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--start-group");
+
+ AddLibgcc(D, CmdArgs, Args);
+
+ if (Args.hasArg(options::OPT_pthread) ||
+ Args.hasArg(options::OPT_pthreads))
+ CmdArgs.push_back("-lpthread");
+
+ CmdArgs.push_back("-lc");
+
+ if (Args.hasArg(options::OPT_static))
+ CmdArgs.push_back("--end-group");
+ else
+ AddLibgcc(D, CmdArgs, Args);
+
+
+ if (!Args.hasArg(options::OPT_nostartfiles)) {
+ const char *crtend;
+ if (Args.hasArg(options::OPT_shared) || Args.hasArg(options::OPT_pie))
+ crtend = "crtendS.o";
+ else
+ crtend = "crtend.o";
+
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+ }
+
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
+ C.addCommand(new Command(JA, *this, ToolChain.Linker.c_str(), CmdArgs));
+}
+
+void minix::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void minix::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crtn.o")));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+ CmdArgs.push_back("-lc");
+ CmdArgs.push_back("-lCompilerRT-Generic");
+ CmdArgs.push_back("-L/usr/pkg/compiler-rt/lib");
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtend.o")));
+ }
+
+ const char *Exec = Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+/// DragonFly Tools
+
+// For now, DragonFly Assemble does just about the same as for
+// FreeBSD, but this may change soon.
+void dragonfly::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ // When building 32-bit code on DragonFly/pc64, we have to explicitly
+ // instruct as in the base system to assemble 32-bit code.
+ if (getToolChain().getArchName() == "i386")
+ CmdArgs.push_back("--32");
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ for (InputInfoList::const_iterator
+ it = Inputs.begin(), ie = Inputs.end(); it != ie; ++it) {
+ const InputInfo &II = *it;
+ CmdArgs.push_back(II.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("as"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void dragonfly::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-Bstatic");
+ } else {
+ if (Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back("-Bshareable");
+ else {
+ CmdArgs.push_back("-dynamic-linker");
+ CmdArgs.push_back("/usr/libexec/ld-elf.so.2");
+ }
+ }
+
+ // When building 32-bit code on DragonFly/pc64, we have to explicitly
+ // instruct ld in the base system to link 32-bit code.
+ if (getToolChain().getArchName() == "i386") {
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("elf_i386");
+ }
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbegin.o")));
+ } else {
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
+ CmdArgs.push_back(
+ Args.MakeArgString(getToolChain().GetFilePath("crtbeginS.o")));
+ }
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
+ Args.AddAllArgs(CmdArgs, options::OPT_e);
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ // FIXME: GCC passes on -lgcc, -lgcc_pic and a whole lot of
+ // rpaths
+ CmdArgs.push_back("-L/usr/lib/gcc41");
+
+ if (!Args.hasArg(options::OPT_static)) {
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back("/usr/lib/gcc41");
+
+ CmdArgs.push_back("-rpath-link");
+ CmdArgs.push_back("/usr/lib/gcc41");
+
+ CmdArgs.push_back("-rpath");
+ CmdArgs.push_back("/usr/lib");
+
+ CmdArgs.push_back("-rpath-link");
+ CmdArgs.push_back("/usr/lib");
+ }
+
+ if (D.CCCIsCXX) {
+ getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-lgcc_pic");
+ } else {
+ CmdArgs.push_back("-lgcc");
+ }
+
+
+ if (Args.hasArg(options::OPT_pthread))
+ CmdArgs.push_back("-lpthread");
+
+ if (!Args.hasArg(options::OPT_nolibc)) {
+ CmdArgs.push_back("-lc");
+ }
+
+ if (Args.hasArg(options::OPT_shared)) {
+ CmdArgs.push_back("-lgcc_pic");
+ } else {
+ CmdArgs.push_back("-lgcc");
+ }
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_shared))
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtend.o")));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtendS.o")));
+ CmdArgs.push_back(Args.MakeArgString(
+ getToolChain().GetFilePath("crtn.o")));
+ }
+
+ addProfileRT(getToolChain(), Args, CmdArgs, getToolChain().getTriple());
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("ld"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
+void visualstudio::Link::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ if (Output.isFilename()) {
+ CmdArgs.push_back(Args.MakeArgString(std::string("-out:") +
+ Output.getFilename()));
+ } else {
+ assert(Output.isNothing() && "Invalid output.");
+ }
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nostartfiles)) {
+ CmdArgs.push_back("-defaultlib:libcmt");
+ }
+
+ CmdArgs.push_back("-nologo");
+
+ AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs);
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath("link.exe"));
+ C.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/Tools.h b/contrib/llvm/tools/clang/lib/Driver/Tools.h
new file mode 100644
index 0000000..651a8f2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Tools.h
@@ -0,0 +1,605 @@
+//===--- Tools.h - Tool Implementations -------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_LIB_DRIVER_TOOLS_H_
+#define CLANG_LIB_DRIVER_TOOLS_H_
+
+#include "clang/Driver/Tool.h"
+#include "clang/Driver/Types.h"
+#include "clang/Driver/Util.h"
+
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace driver {
+ class Driver;
+
+namespace toolchains {
+ class Darwin;
+}
+
+namespace tools {
+
+ /// \brief Clang compiler tool.
+ class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
+ void AddPreprocessingOptions(Compilation &C,
+ const Driver &D,
+ const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfo &Output,
+ const InputInfoList &Inputs) const;
+
+ void AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ bool KernelOrKext) const;
+ void AddMIPSTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddSparcTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddX86TargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddHexagonTargetArgs (const ArgList &Args, ArgStringList &CmdArgs) const;
+
+ public:
+ Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC) {}
+
+ virtual bool hasGoodDiagnostics() const { return true; }
+ virtual bool hasIntegratedAssembler() const { return true; }
+ virtual bool hasIntegratedCPP() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ /// \brief Clang integrated assembler tool.
+ class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
+ void AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ public:
+ ClangAs(const ToolChain &TC) : Tool("clang::as",
+ "clang integrated assembler", TC) {}
+
+ virtual bool hasGoodDiagnostics() const { return true; }
+ virtual bool hasIntegratedAssembler() const { return false; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ /// gcc - Generic GCC tool implementations.
+namespace gcc {
+ class LLVM_LIBRARY_VISIBILITY Common : public Tool {
+ public:
+ Common(const char *Name, const char *ShortName,
+ const ToolChain &TC) : Tool(Name, ShortName, TC) {}
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+
+ /// RenderExtraToolArgs - Render any arguments necessary to force
+ /// the particular tool mode.
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const = 0;
+ };
+
+
+ class LLVM_LIBRARY_VISIBILITY Preprocess : public Common {
+ public:
+ Preprocess(const ToolChain &TC) : Common("gcc::Preprocess",
+ "gcc preprocessor", TC) {}
+
+ virtual bool hasGoodDiagnostics() const { return true; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Precompile : public Common {
+ public:
+ Precompile(const ToolChain &TC) : Common("gcc::Precompile",
+ "gcc precompile", TC) {}
+
+ virtual bool hasGoodDiagnostics() const { return true; }
+ virtual bool hasIntegratedCPP() const { return true; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Compile : public Common {
+ public:
+ Compile(const ToolChain &TC) : Common("gcc::Compile",
+ "gcc frontend", TC) {}
+
+ virtual bool hasGoodDiagnostics() const { return true; }
+ virtual bool hasIntegratedCPP() const { return true; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Common {
+ public:
+ Assemble(const ToolChain &TC) : Common("gcc::Assemble",
+ "assembler (via gcc)", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Link : public Common {
+ public:
+ Link(const ToolChain &TC) : Common("gcc::Link",
+ "linker (via gcc)", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ };
+} // end namespace gcc
+
+namespace hexagon {
+ // For Hexagon, we do not need to instantiate tools for PreProcess, PreCompile and Compile.
+ // We simply use "clang -cc1" for those actions.
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("hexagon::Assemble",
+ "hexagon-as", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("hexagon::Link",
+ "hexagon-ld", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void RenderExtraToolArgs(const JobAction &JA,
+ ArgStringList &CmdArgs) const;
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace hexagon.
+
+
+namespace darwin {
+ class LLVM_LIBRARY_VISIBILITY DarwinTool : public Tool {
+ virtual void anchor();
+ protected:
+ void AddDarwinArch(const ArgList &Args, ArgStringList &CmdArgs) const;
+
+ const toolchains::Darwin &getDarwinToolChain() const {
+ return reinterpret_cast<const toolchains::Darwin&>(getToolChain());
+ }
+
+ public:
+ DarwinTool(const char *Name, const char *ShortName,
+ const ToolChain &TC) : Tool(Name, ShortName, TC) {}
+ };
+
+ class LLVM_LIBRARY_VISIBILITY CC1 : public DarwinTool {
+ virtual void anchor();
+ public:
+ static const char *getBaseInputName(const ArgList &Args,
+ const InputInfoList &Input);
+ static const char *getBaseInputStem(const ArgList &Args,
+ const InputInfoList &Input);
+ static const char *getDependencyFileName(const ArgList &Args,
+ const InputInfoList &Inputs);
+
+ protected:
+ const char *getCC1Name(types::ID Type) const;
+
+ void AddCC1Args(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void RemoveCC1UnsupportedArgs(ArgStringList &CmdArgs) const;
+ void AddCC1OptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfoList &Inputs,
+ const ArgStringList &OutputArgs) const;
+ void AddCPPOptionsArgs(const ArgList &Args, ArgStringList &CmdArgs,
+ const InputInfoList &Inputs,
+ const ArgStringList &OutputArgs) const;
+ void AddCPPUniqueOptionsArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const InputInfoList &Inputs) const;
+ void AddCPPArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+
+ public:
+ CC1(const char *Name, const char *ShortName,
+ const ToolChain &TC) : DarwinTool(Name, ShortName, TC) {}
+
+ virtual bool hasGoodDiagnostics() const { return true; }
+ virtual bool hasIntegratedCPP() const { return true; }
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Preprocess : public CC1 {
+ public:
+ Preprocess(const ToolChain &TC) : CC1("darwin::Preprocess",
+ "gcc preprocessor", TC) {}
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Compile : public CC1 {
+ public:
+ Compile(const ToolChain &TC) : CC1("darwin::Compile", "gcc frontend", TC) {}
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Assemble : public DarwinTool {
+ public:
+ Assemble(const ToolChain &TC) : DarwinTool("darwin::Assemble",
+ "assembler", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Link : public DarwinTool {
+ void AddLinkArgs(Compilation &C, const ArgList &Args,
+ ArgStringList &CmdArgs) const;
+
+ public:
+ Link(const ToolChain &TC) : DarwinTool("darwin::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Lipo : public DarwinTool {
+ public:
+ Lipo(const ToolChain &TC) : DarwinTool("darwin::Lipo", "lipo", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY Dsymutil : public DarwinTool {
+ public:
+ Dsymutil(const ToolChain &TC) : DarwinTool("darwin::Dsymutil",
+ "dsymutil", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+ class LLVM_LIBRARY_VISIBILITY VerifyDebug : public DarwinTool {
+ public:
+ VerifyDebug(const ToolChain &TC) : DarwinTool("darwin::VerifyDebug",
+ "dwarfdump", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
+}
+
+ /// openbsd -- Directly call GNU Binutils assembler and linker
+namespace openbsd {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("openbsd::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("openbsd::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace openbsd
+
+ /// freebsd -- Directly call GNU Binutils assembler and linker
+namespace freebsd {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("freebsd::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("freebsd::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace freebsd
+
+ /// netbsd -- Directly call GNU Binutils assembler and linker
+namespace netbsd {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+
+ public:
+ Assemble(const ToolChain &TC)
+ : Tool("netbsd::Assemble", "assembler", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+
+ public:
+ Link(const ToolChain &TC)
+ : Tool("netbsd::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace netbsd
+
+ /// linux -- Directly call GNU Binutils assembler and linker
+namespace linuxtools {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("linux::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("linux::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+}
+ /// minix -- Directly call GNU Binutils assembler and linker
+namespace minix {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("minix::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("minix::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace minix
+
+ /// solaris -- Directly call Solaris assembler and linker
+namespace solaris {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("solaris::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("solaris::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace solaris
+
+ /// auroraux -- Directly call GNU Binutils assembler and linker
+namespace auroraux {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("auroraux::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("auroraux::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace auroraux
+
+ /// dragonfly -- Directly call GNU Binutils assembler and linker
+namespace dragonfly {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+ public:
+ Assemble(const ToolChain &TC) : Tool("dragonfly::Assemble", "assembler",
+ TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("dragonfly::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace dragonfly
+
+ /// Visual studio tools.
+namespace visualstudio {
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+ public:
+ Link(const ToolChain &TC) : Tool("visualstudio::Link", "linker", TC) {}
+
+ virtual bool hasIntegratedCPP() const { return false; }
+ virtual bool isLinkJob() const { return true; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+} // end namespace visualstudio
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // CLANG_LIB_DRIVER_TOOLS_H_
diff --git a/contrib/llvm/tools/clang/lib/Driver/Types.cpp b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
new file mode 100644
index 0000000..50742fe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/Types.cpp
@@ -0,0 +1,254 @@
+//===--- Types.cpp - Driver input & temporary type information ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Types.h"
+
+#include "llvm/ADT/StringSwitch.h"
+#include <string.h>
+#include <cassert>
+
+using namespace clang::driver;
+using namespace clang::driver::types;
+
+struct TypeInfo {
+ const char *Name;
+ const char *Flags;
+ const char *TempSuffix;
+ ID PreprocessedType;
+};
+
+static const TypeInfo TypeInfos[] = {
+#define TYPE(NAME, ID, PP_TYPE, TEMP_SUFFIX, FLAGS) \
+ { NAME, FLAGS, TEMP_SUFFIX, TY_##PP_TYPE, },
+#include "clang/Driver/Types.def"
+#undef TYPE
+};
+static const unsigned numTypes = sizeof(TypeInfos) / sizeof(TypeInfos[0]);
+
+static const TypeInfo &getInfo(unsigned id) {
+ assert(id > 0 && id - 1 < numTypes && "Invalid Type ID.");
+ return TypeInfos[id - 1];
+}
+
+const char *types::getTypeName(ID Id) {
+ return getInfo(Id).Name;
+}
+
+types::ID types::getPreprocessedType(ID Id) {
+ return getInfo(Id).PreprocessedType;
+}
+
+const char *types::getTypeTempSuffix(ID Id) {
+ return getInfo(Id).TempSuffix;
+}
+
+bool types::onlyAssembleType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'a');
+}
+
+bool types::onlyPrecompileType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'p');
+}
+
+bool types::canTypeBeUserSpecified(ID Id) {
+ return strchr(getInfo(Id).Flags, 'u');
+}
+
+bool types::appendSuffixForType(ID Id) {
+ return strchr(getInfo(Id).Flags, 'A');
+}
+
+bool types::canLipoType(ID Id) {
+ return (Id == TY_Nothing ||
+ Id == TY_Image ||
+ Id == TY_Object);
+}
+
+bool types::isAcceptedByClang(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_Asm:
+ case TY_C: case TY_PP_C:
+ case TY_CL:
+ case TY_CUDA:
+ case TY_ObjC: case TY_PP_ObjC: case TY_PP_ObjC_Alias:
+ case TY_CXX: case TY_PP_CXX:
+ case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias:
+ case TY_CHeader: case TY_PP_CHeader:
+ case TY_CLHeader:
+ case TY_ObjCHeader: case TY_PP_ObjCHeader:
+ case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ case TY_AST:
+ case TY_LLVM_IR: case TY_LLVM_BC:
+ return true;
+ }
+}
+
+bool types::isOnlyAcceptedByClang(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_AST:
+ case TY_LLVM_IR:
+ case TY_LLVM_BC:
+ case TY_RewrittenObjC:
+ case TY_RewrittenLegacyObjC:
+ return true;
+ }
+}
+
+bool types::isObjC(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_ObjC: case TY_PP_ObjC: case TY_PP_ObjC_Alias:
+ case TY_ObjCXX: case TY_PP_ObjCXX:
+ case TY_ObjCHeader: case TY_PP_ObjCHeader:
+ case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader: case TY_PP_ObjCXX_Alias:
+ return true;
+ }
+}
+
+bool types::isCXX(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_CXX: case TY_PP_CXX:
+ case TY_ObjCXX: case TY_PP_ObjCXX:
+ case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
+ return true;
+ }
+}
+
+types::ID types::lookupTypeForExtension(const char *Ext) {
+ return llvm::StringSwitch<types::ID>(Ext)
+ .Case("c", TY_C)
+ .Case("i", TY_PP_C)
+ .Case("m", TY_ObjC)
+ .Case("M", TY_ObjCXX)
+ .Case("h", TY_CHeader)
+ .Case("C", TY_CXX)
+ .Case("H", TY_CXXHeader)
+ .Case("f", TY_PP_Fortran)
+ .Case("F", TY_Fortran)
+ .Case("s", TY_PP_Asm)
+ .Case("S", TY_Asm)
+ .Case("o", TY_Object)
+ .Case("ii", TY_PP_CXX)
+ .Case("mi", TY_PP_ObjC)
+ .Case("mm", TY_ObjCXX)
+ .Case("bc", TY_LLVM_BC)
+ .Case("cc", TY_CXX)
+ .Case("CC", TY_CXX)
+ .Case("cl", TY_CL)
+ .Case("cp", TY_CXX)
+ .Case("cu", TY_CUDA)
+ .Case("hh", TY_CXXHeader)
+ .Case("ll", TY_LLVM_IR)
+ .Case("hpp", TY_CXXHeader)
+ .Case("ads", TY_Ada)
+ .Case("adb", TY_Ada)
+ .Case("ast", TY_AST)
+ .Case("c++", TY_CXX)
+ .Case("C++", TY_CXX)
+ .Case("cxx", TY_CXX)
+ .Case("cpp", TY_CXX)
+ .Case("CPP", TY_CXX)
+ .Case("CXX", TY_CXX)
+ .Case("for", TY_PP_Fortran)
+ .Case("FOR", TY_PP_Fortran)
+ .Case("fpp", TY_Fortran)
+ .Case("FPP", TY_Fortran)
+ .Case("f90", TY_PP_Fortran)
+ .Case("f95", TY_PP_Fortran)
+ .Case("F90", TY_Fortran)
+ .Case("F95", TY_Fortran)
+ .Case("mii", TY_PP_ObjCXX)
+ .Default(TY_INVALID);
+}
+
+types::ID types::lookupTypeForTypeSpecifier(const char *Name) {
+ unsigned N = strlen(Name);
+
+ for (unsigned i=0; i<numTypes; ++i) {
+ types::ID Id = (types::ID) (i + 1);
+ if (canTypeBeUserSpecified(Id) &&
+ memcmp(Name, getInfo(Id).Name, N + 1) == 0)
+ return Id;
+ }
+
+ return TY_INVALID;
+}
+
+// FIXME: Why don't we just put this list in the defs file, eh.
+
+unsigned types::getNumCompilationPhases(ID Id) {
+ if (Id == TY_Object)
+ return 1;
+
+ unsigned N = 0;
+ if (getPreprocessedType(Id) != TY_INVALID)
+ N += 1;
+
+ if (onlyAssembleType(Id))
+ return N + 2; // assemble, link
+ if (onlyPrecompileType(Id))
+ return N + 1; // precompile
+
+ return N + 3; // compile, assemble, link
+}
+
+phases::ID types::getCompilationPhase(ID Id, unsigned N) {
+ assert(N < getNumCompilationPhases(Id) && "Invalid index.");
+
+ if (Id == TY_Object)
+ return phases::Link;
+
+ if (getPreprocessedType(Id) != TY_INVALID) {
+ if (N == 0)
+ return phases::Preprocess;
+ --N;
+ }
+
+ if (onlyAssembleType(Id))
+ return N == 0 ? phases::Assemble : phases::Link;
+
+ if (onlyPrecompileType(Id))
+ return phases::Precompile;
+
+ if (N == 0)
+ return phases::Compile;
+ if (N == 1)
+ return phases::Assemble;
+
+ return phases::Link;
+}
+
+ID types::lookupCXXTypeForCType(ID Id) {
+ switch (Id) {
+ default:
+ return Id;
+
+ case types::TY_C:
+ return types::TY_CXX;
+ case types::TY_PP_C:
+ return types::TY_PP_CXX;
+ case types::TY_CHeader:
+ return types::TY_CXXHeader;
+ case types::TY_PP_CHeader:
+ return types::TY_PP_CXXHeader;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp b/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp
new file mode 100644
index 0000000..6827034
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Driver/WindowsToolChain.cpp
@@ -0,0 +1,368 @@
+//===--- ToolChains.cpp - ToolChain Implementations -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ToolChains.h"
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Options.h"
+#include "clang/Basic/Version.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Path.h"
+
+// Include the necessary headers to interface with the Windows registry and
+// environment.
+#ifdef _MSC_VER
+ #define WIN32_LEAN_AND_MEAN
+ #define NOGDI
+ #define NOMINMAX
+ #include <Windows.h>
+#endif
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang;
+
+Windows::Windows(const Driver &D, const llvm::Triple& Triple)
+ : ToolChain(D, Triple) {
+}
+
+Tool &Windows::SelectTool(const Compilation &C, const JobAction &JA,
+ const ActionList &Inputs) const {
+ Action::ActionClass Key;
+ if (getDriver().ShouldUseClangCompiler(C, JA, getTriple()))
+ Key = Action::AnalyzeJobClass;
+ else
+ Key = JA.getKind();
+
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIntegratedAssemblerDefault());
+
+ Tool *&T = Tools[Key];
+ if (!T) {
+ switch (Key) {
+ case Action::InputClass:
+ case Action::BindArchClass:
+ case Action::LipoJobClass:
+ case Action::DsymutilJobClass:
+ case Action::VerifyJobClass:
+ llvm_unreachable("Invalid tool kind.");
+ case Action::PreprocessJobClass:
+ case Action::PrecompileJobClass:
+ case Action::AnalyzeJobClass:
+ case Action::MigrateJobClass:
+ case Action::CompileJobClass:
+ T = new tools::Clang(*this); break;
+ case Action::AssembleJobClass:
+ if (!UseIntegratedAs && getTriple().getEnvironment() == llvm::Triple::MachO)
+ T = new tools::darwin::Assemble(*this);
+ else
+ T = new tools::ClangAs(*this);
+ break;
+ case Action::LinkJobClass:
+ T = new tools::visualstudio::Link(*this); break;
+ }
+ }
+
+ return *T;
+}
+
+bool Windows::IsIntegratedAssemblerDefault() const {
+ return true;
+}
+
+bool Windows::IsUnwindTablesDefault() const {
+ // FIXME: Gross; we should probably have some separate target
+ // definition, possibly even reusing the one in clang.
+ return getArchName() == "x86_64";
+}
+
+const char *Windows::GetDefaultRelocationModel() const {
+ return "static";
+}
+
+const char *Windows::GetForcedPicModel() const {
+ if (getArchName() == "x86_64")
+ return "pic";
+ return 0;
+}
+
+// FIXME: This probably should goto to some platform utils place.
+#ifdef _MSC_VER
+
+/// \brief Read registry string.
+/// This also supports a means to look for high-versioned keys by use
+/// of a $VERSION placeholder in the key path.
+/// $VERSION in the key path is a placeholder for the version number,
+/// causing the highest value path to be searched for and used.
+/// I.e. "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\$VERSION".
+/// There can be additional characters in the component. Only the numberic
+/// characters are compared.
+static bool getSystemRegistryString(const char *keyPath, const char *valueName,
+ char *value, size_t maxLength) {
+ HKEY hRootKey = NULL;
+ HKEY hKey = NULL;
+ const char* subKey = NULL;
+ DWORD valueType;
+ DWORD valueSize = maxLength - 1;
+ long lResult;
+ bool returnValue = false;
+
+ if (strncmp(keyPath, "HKEY_CLASSES_ROOT\\", 18) == 0) {
+ hRootKey = HKEY_CLASSES_ROOT;
+ subKey = keyPath + 18;
+ } else if (strncmp(keyPath, "HKEY_USERS\\", 11) == 0) {
+ hRootKey = HKEY_USERS;
+ subKey = keyPath + 11;
+ } else if (strncmp(keyPath, "HKEY_LOCAL_MACHINE\\", 19) == 0) {
+ hRootKey = HKEY_LOCAL_MACHINE;
+ subKey = keyPath + 19;
+ } else if (strncmp(keyPath, "HKEY_CURRENT_USER\\", 18) == 0) {
+ hRootKey = HKEY_CURRENT_USER;
+ subKey = keyPath + 18;
+ } else {
+ return false;
+ }
+
+ const char *placeHolder = strstr(subKey, "$VERSION");
+ char bestName[256];
+ bestName[0] = '\0';
+ // If we have a $VERSION placeholder, do the highest-version search.
+ if (placeHolder) {
+ const char *keyEnd = placeHolder - 1;
+ const char *nextKey = placeHolder;
+ // Find end of previous key.
+ while ((keyEnd > subKey) && (*keyEnd != '\\'))
+ keyEnd--;
+ // Find end of key containing $VERSION.
+ while (*nextKey && (*nextKey != '\\'))
+ nextKey++;
+ size_t partialKeyLength = keyEnd - subKey;
+ char partialKey[256];
+ if (partialKeyLength > sizeof(partialKey))
+ partialKeyLength = sizeof(partialKey);
+ strncpy(partialKey, subKey, partialKeyLength);
+ partialKey[partialKeyLength] = '\0';
+ HKEY hTopKey = NULL;
+ lResult = RegOpenKeyEx(hRootKey, partialKey, 0, KEY_READ, &hTopKey);
+ if (lResult == ERROR_SUCCESS) {
+ char keyName[256];
+ int bestIndex = -1;
+ double bestValue = 0.0;
+ DWORD index, size = sizeof(keyName) - 1;
+ for (index = 0; RegEnumKeyEx(hTopKey, index, keyName, &size, NULL,
+ NULL, NULL, NULL) == ERROR_SUCCESS; index++) {
+ const char *sp = keyName;
+ while (*sp && !isdigit(*sp))
+ sp++;
+ if (!*sp)
+ continue;
+ const char *ep = sp + 1;
+ while (*ep && (isdigit(*ep) || (*ep == '.')))
+ ep++;
+ char numBuf[32];
+ strncpy(numBuf, sp, sizeof(numBuf) - 1);
+ numBuf[sizeof(numBuf) - 1] = '\0';
+ double value = strtod(numBuf, NULL);
+ if (value > bestValue) {
+ bestIndex = (int)index;
+ bestValue = value;
+ strcpy(bestName, keyName);
+ }
+ size = sizeof(keyName) - 1;
+ }
+ // If we found the highest versioned key, open the key and get the value.
+ if (bestIndex != -1) {
+ // Append rest of key.
+ strncat(bestName, nextKey, sizeof(bestName) - 1);
+ bestName[sizeof(bestName) - 1] = '\0';
+ // Open the chosen key path remainder.
+ lResult = RegOpenKeyEx(hTopKey, bestName, 0, KEY_READ, &hKey);
+ if (lResult == ERROR_SUCCESS) {
+ lResult = RegQueryValueEx(hKey, valueName, NULL, &valueType,
+ (LPBYTE)value, &valueSize);
+ if (lResult == ERROR_SUCCESS)
+ returnValue = true;
+ RegCloseKey(hKey);
+ }
+ }
+ RegCloseKey(hTopKey);
+ }
+ } else {
+ lResult = RegOpenKeyEx(hRootKey, subKey, 0, KEY_READ, &hKey);
+ if (lResult == ERROR_SUCCESS) {
+ lResult = RegQueryValueEx(hKey, valueName, NULL, &valueType,
+ (LPBYTE)value, &valueSize);
+ if (lResult == ERROR_SUCCESS)
+ returnValue = true;
+ RegCloseKey(hKey);
+ }
+ }
+ return returnValue;
+}
+
+/// \brief Get Windows SDK installation directory.
+static bool getWindowsSDKDir(std::string &path) {
+ char windowsSDKInstallDir[256];
+ // Try the Windows registry.
+ bool hasSDKDir = getSystemRegistryString(
+ "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
+ "InstallationFolder",
+ windowsSDKInstallDir,
+ sizeof(windowsSDKInstallDir) - 1);
+ // If we have both vc80 and vc90, pick version we were compiled with.
+ if (hasSDKDir && windowsSDKInstallDir[0]) {
+ path = windowsSDKInstallDir;
+ return true;
+ }
+ return false;
+}
+
+ // Get Visual Studio installation directory.
+static bool getVisualStudioDir(std::string &path) {
+ // First check the environment variables that vsvars32.bat sets.
+ const char* vcinstalldir = getenv("VCINSTALLDIR");
+ if (vcinstalldir) {
+ char *p = const_cast<char *>(strstr(vcinstalldir, "\\VC"));
+ if (p)
+ *p = '\0';
+ path = vcinstalldir;
+ return true;
+ }
+
+ char vsIDEInstallDir[256];
+ char vsExpressIDEInstallDir[256];
+ // Then try the windows registry.
+ bool hasVCDir = getSystemRegistryString(
+ "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VisualStudio\\$VERSION",
+ "InstallDir", vsIDEInstallDir, sizeof(vsIDEInstallDir) - 1);
+ bool hasVCExpressDir = getSystemRegistryString(
+ "HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\VCExpress\\$VERSION",
+ "InstallDir", vsExpressIDEInstallDir, sizeof(vsExpressIDEInstallDir) - 1);
+ // If we have both vc80 and vc90, pick version we were compiled with.
+ if (hasVCDir && vsIDEInstallDir[0]) {
+ char *p = (char*)strstr(vsIDEInstallDir, "\\Common7\\IDE");
+ if (p)
+ *p = '\0';
+ path = vsIDEInstallDir;
+ return true;
+ }
+
+ if (hasVCExpressDir && vsExpressIDEInstallDir[0]) {
+ char *p = (char*)strstr(vsExpressIDEInstallDir, "\\Common7\\IDE");
+ if (p)
+ *p = '\0';
+ path = vsExpressIDEInstallDir;
+ return true;
+ }
+
+ // Try the environment.
+ const char *vs100comntools = getenv("VS100COMNTOOLS");
+ const char *vs90comntools = getenv("VS90COMNTOOLS");
+ const char *vs80comntools = getenv("VS80COMNTOOLS");
+ const char *vscomntools = NULL;
+
+ // Try to find the version that we were compiled with
+ if(false) {}
+ #if (_MSC_VER >= 1600) // VC100
+ else if(vs100comntools) {
+ vscomntools = vs100comntools;
+ }
+ #elif (_MSC_VER == 1500) // VC80
+ else if(vs90comntools) {
+ vscomntools = vs90comntools;
+ }
+ #elif (_MSC_VER == 1400) // VC80
+ else if(vs80comntools) {
+ vscomntools = vs80comntools;
+ }
+ #endif
+ // Otherwise find any version we can
+ else if (vs100comntools)
+ vscomntools = vs100comntools;
+ else if (vs90comntools)
+ vscomntools = vs90comntools;
+ else if (vs80comntools)
+ vscomntools = vs80comntools;
+
+ if (vscomntools && *vscomntools) {
+ const char *p = strstr(vscomntools, "\\Common7\\Tools");
+ path = p ? std::string(vscomntools, p) : vscomntools;
+ return true;
+ }
+ return false;
+}
+
+#endif // _MSC_VER
+
+void Windows::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ llvm::sys::Path P(getDriver().ResourceDir);
+ P.appendComponent("include");
+ addSystemInclude(DriverArgs, CC1Args, P.str());
+ }
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc))
+ return;
+
+#ifdef _MSC_VER
+ // Honor %INCLUDE%. It should know essential search paths with vcvarsall.bat.
+ if (const char *cl_include_dir = getenv("INCLUDE")) {
+ SmallVector<StringRef, 8> Dirs;
+ StringRef(cl_include_dir).split(Dirs, ";");
+ int n = 0;
+ for (SmallVectorImpl<StringRef>::iterator I = Dirs.begin(), E = Dirs.end();
+ I != E; ++I) {
+ StringRef d = *I;
+ if (d.size() == 0)
+ continue;
+ ++n;
+ addSystemInclude(DriverArgs, CC1Args, d);
+ }
+ if (n) return;
+ }
+
+ std::string VSDir;
+ std::string WindowsSDKDir;
+
+ // When built with access to the proper Windows APIs, try to actually find
+ // the correct include paths first.
+ if (getVisualStudioDir(VSDir)) {
+ addSystemInclude(DriverArgs, CC1Args, VSDir + "\\VC\\include");
+ if (getWindowsSDKDir(WindowsSDKDir))
+ addSystemInclude(DriverArgs, CC1Args, WindowsSDKDir + "\\include");
+ else
+ addSystemInclude(DriverArgs, CC1Args,
+ VSDir + "\\VC\\PlatformSDK\\Include");
+ return;
+ }
+#endif // _MSC_VER
+
+ // As a fallback, select default install paths.
+ const StringRef Paths[] = {
+ "C:/Program Files/Microsoft Visual Studio 10.0/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 9.0/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 9.0/VC/PlatformSDK/Include",
+ "C:/Program Files/Microsoft Visual Studio 8/VC/include",
+ "C:/Program Files/Microsoft Visual Studio 8/VC/PlatformSDK/Include"
+ };
+ addSystemIncludes(DriverArgs, CC1Args, Paths);
+}
+
+void Windows::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ // FIXME: There should probably be logic here to find libc++ on Windows.
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/Commit.cpp b/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
new file mode 100644
index 0000000..c45ee1f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/Commit.cpp
@@ -0,0 +1,345 @@
+//===----- Commit.cpp - A unit of edits -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace edit;
+
+SourceLocation Commit::Edit::getFileLocation(SourceManager &SM) const {
+ SourceLocation Loc = SM.getLocForStartOfFile(Offset.getFID());
+ Loc = Loc.getLocWithOffset(Offset.getOffset());
+ assert(Loc.isFileID());
+ return Loc;
+}
+
+CharSourceRange Commit::Edit::getFileRange(SourceManager &SM) const {
+ SourceLocation Loc = getFileLocation(SM);
+ return CharSourceRange::getCharRange(Loc, Loc.getLocWithOffset(Length));
+}
+
+CharSourceRange Commit::Edit::getInsertFromRange(SourceManager &SM) const {
+ SourceLocation Loc = SM.getLocForStartOfFile(InsertFromRangeOffs.getFID());
+ Loc = Loc.getLocWithOffset(InsertFromRangeOffs.getOffset());
+ assert(Loc.isFileID());
+ return CharSourceRange::getCharRange(Loc, Loc.getLocWithOffset(Length));
+}
+
+Commit::Commit(EditedSource &Editor)
+ : SourceMgr(Editor.getSourceManager()), LangOpts(Editor.getLangOpts()),
+ PPRec(Editor.getPreprocessingRecord()),
+ Editor(&Editor), IsCommitable(true) { }
+
+bool Commit::insert(SourceLocation loc, StringRef text,
+ bool afterToken, bool beforePreviousInsertions) {
+ if (text.empty())
+ return true;
+
+ FileOffset Offs;
+ if ((!afterToken && !canInsert(loc, Offs)) ||
+ ( afterToken && !canInsertAfterToken(loc, Offs, loc))) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addInsert(loc, Offs, text, beforePreviousInsertions);
+ return true;
+}
+
+bool Commit::insertFromRange(SourceLocation loc,
+ CharSourceRange range,
+ bool afterToken, bool beforePreviousInsertions) {
+ FileOffset RangeOffs;
+ unsigned RangeLen;
+ if (!canRemoveRange(range, RangeOffs, RangeLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset Offs;
+ if ((!afterToken && !canInsert(loc, Offs)) ||
+ ( afterToken && !canInsertAfterToken(loc, Offs, loc))) {
+ IsCommitable = false;
+ return false;
+ }
+
+ if (PPRec &&
+ PPRec->areInDifferentConditionalDirectiveRegion(loc, range.getBegin())) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addInsertFromRange(loc, Offs, RangeOffs, RangeLen, beforePreviousInsertions);
+ return true;
+}
+
+bool Commit::remove(CharSourceRange range) {
+ FileOffset Offs;
+ unsigned Len;
+ if (!canRemoveRange(range, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(), Offs, Len);
+ return true;
+}
+
+bool Commit::insertWrap(StringRef before, CharSourceRange range,
+ StringRef after) {
+ bool commitableBefore = insert(range.getBegin(), before, /*afterToken=*/false,
+ /*beforePreviousInsertions=*/true);
+ bool commitableAfter;
+ if (range.isTokenRange())
+ commitableAfter = insertAfterToken(range.getEnd(), after);
+ else
+ commitableAfter = insert(range.getEnd(), after);
+
+ return commitableBefore && commitableAfter;
+}
+
+bool Commit::replace(CharSourceRange range, StringRef text) {
+ if (text.empty())
+ return remove(range);
+
+ FileOffset Offs;
+ unsigned Len;
+ if (!canInsert(range.getBegin(), Offs) || !canRemoveRange(range, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(), Offs, Len);
+ addInsert(range.getBegin(), Offs, text, false);
+ return true;
+}
+
+bool Commit::replaceWithInner(CharSourceRange range,
+ CharSourceRange replacementRange) {
+ FileOffset OuterBegin;
+ unsigned OuterLen;
+ if (!canRemoveRange(range, OuterBegin, OuterLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset InnerBegin;
+ unsigned InnerLen;
+ if (!canRemoveRange(replacementRange, InnerBegin, InnerLen)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ FileOffset OuterEnd = OuterBegin.getWithOffset(OuterLen);
+ FileOffset InnerEnd = InnerBegin.getWithOffset(InnerLen);
+ if (OuterBegin.getFID() != InnerBegin.getFID() ||
+ InnerBegin < OuterBegin ||
+ InnerBegin > OuterEnd ||
+ InnerEnd > OuterEnd) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(range.getBegin(),
+ OuterBegin, InnerBegin.getOffset() - OuterBegin.getOffset());
+ addRemove(replacementRange.getEnd(),
+ InnerEnd, OuterEnd.getOffset() - InnerEnd.getOffset());
+ return true;
+}
+
+bool Commit::replaceText(SourceLocation loc, StringRef text,
+ StringRef replacementText) {
+ if (text.empty() || replacementText.empty())
+ return true;
+
+ FileOffset Offs;
+ unsigned Len;
+ if (!canReplaceText(loc, replacementText, Offs, Len)) {
+ IsCommitable = false;
+ return false;
+ }
+
+ addRemove(loc, Offs, Len);
+ addInsert(loc, Offs, text, false);
+ return true;
+}
+
+void Commit::addInsert(SourceLocation OrigLoc, FileOffset Offs, StringRef text,
+ bool beforePreviousInsertions) {
+ if (text.empty())
+ return;
+
+ Edit data;
+ data.Kind = Act_Insert;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.Text = text;
+ data.BeforePrev = beforePreviousInsertions;
+ CachedEdits.push_back(data);
+}
+
+void Commit::addInsertFromRange(SourceLocation OrigLoc, FileOffset Offs,
+ FileOffset RangeOffs, unsigned RangeLen,
+ bool beforePreviousInsertions) {
+ if (RangeLen == 0)
+ return;
+
+ Edit data;
+ data.Kind = Act_InsertFromRange;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.InsertFromRangeOffs = RangeOffs;
+ data.Length = RangeLen;
+ data.BeforePrev = beforePreviousInsertions;
+ CachedEdits.push_back(data);
+}
+
+void Commit::addRemove(SourceLocation OrigLoc,
+ FileOffset Offs, unsigned Len) {
+ if (Len == 0)
+ return;
+
+ Edit data;
+ data.Kind = Act_Remove;
+ data.OrigLoc = OrigLoc;
+ data.Offset = Offs;
+ data.Length = Len;
+ CachedEdits.push_back(data);
+}
+
+bool Commit::canInsert(SourceLocation loc, FileOffset &offs) {
+ if (loc.isInvalid())
+ return false;
+
+ if (loc.isMacroID())
+ isAtStartOfMacroExpansion(loc, &loc);
+
+ const SourceManager &SM = SourceMgr;
+ while (SM.isMacroArgExpansion(loc))
+ loc = SM.getImmediateSpellingLoc(loc);
+
+ if (loc.isMacroID())
+ if (!isAtStartOfMacroExpansion(loc, &loc))
+ return false;
+
+ if (SM.isInSystemHeader(loc))
+ return false;
+
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+ if (locInfo.first.isInvalid())
+ return false;
+ offs = FileOffset(locInfo.first, locInfo.second);
+ return canInsertInOffset(loc, offs);
+}
+
+bool Commit::canInsertAfterToken(SourceLocation loc, FileOffset &offs,
+ SourceLocation &AfterLoc) {
+ if (loc.isInvalid())
+
+ return false;
+
+ SourceLocation spellLoc = SourceMgr.getSpellingLoc(loc);
+ unsigned tokLen = Lexer::MeasureTokenLength(spellLoc, SourceMgr, LangOpts);
+ AfterLoc = loc.getLocWithOffset(tokLen);
+
+ if (loc.isMacroID())
+ isAtEndOfMacroExpansion(loc, &loc);
+
+ const SourceManager &SM = SourceMgr;
+ while (SM.isMacroArgExpansion(loc))
+ loc = SM.getImmediateSpellingLoc(loc);
+
+ if (loc.isMacroID())
+ if (!isAtEndOfMacroExpansion(loc, &loc))
+ return false;
+
+ if (SM.isInSystemHeader(loc))
+ return false;
+
+ loc = Lexer::getLocForEndOfToken(loc, 0, SourceMgr, LangOpts);
+ if (loc.isInvalid())
+ return false;
+
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+ if (locInfo.first.isInvalid())
+ return false;
+ offs = FileOffset(locInfo.first, locInfo.second);
+ return canInsertInOffset(loc, offs);
+}
+
+bool Commit::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
+ for (unsigned i = 0, e = CachedEdits.size(); i != e; ++i) {
+ Edit &act = CachedEdits[i];
+ if (act.Kind == Act_Remove) {
+ if (act.Offset.getFID() == Offs.getFID() &&
+ Offs > act.Offset && Offs < act.Offset.getWithOffset(act.Length))
+ return false; // position has been removed.
+ }
+ }
+
+ if (!Editor)
+ return true;
+ return Editor->canInsertInOffset(OrigLoc, Offs);
+}
+
+bool Commit::canRemoveRange(CharSourceRange range,
+ FileOffset &Offs, unsigned &Len) {
+ const SourceManager &SM = SourceMgr;
+ range = Lexer::makeFileCharRange(range, SM, LangOpts);
+ if (range.isInvalid())
+ return false;
+
+ if (range.getBegin().isMacroID() || range.getEnd().isMacroID())
+ return false;
+ if (SM.isInSystemHeader(range.getBegin()) ||
+ SM.isInSystemHeader(range.getEnd()))
+ return false;
+
+ if (PPRec && PPRec->rangeIntersectsConditionalDirective(range.getAsRange()))
+ return false;
+
+ std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(range.getBegin());
+ std::pair<FileID, unsigned> endInfo = SM.getDecomposedLoc(range.getEnd());
+ if (beginInfo.first != endInfo.first ||
+ beginInfo.second > endInfo.second)
+ return false;
+
+ Offs = FileOffset(beginInfo.first, beginInfo.second);
+ Len = endInfo.second - beginInfo.second;
+ return true;
+}
+
+bool Commit::canReplaceText(SourceLocation loc, StringRef text,
+ FileOffset &Offs, unsigned &Len) {
+ assert(!text.empty());
+
+ if (!canInsert(loc, Offs))
+ return false;
+
+ // Try to load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SourceMgr.getBufferData(Offs.getFID(), &invalidTemp);
+ if (invalidTemp)
+ return false;
+
+ return file.substr(Offs.getOffset()).startswith(text);
+}
+
+bool Commit::isAtStartOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroBegin) const {
+ return Lexer::isAtStartOfMacroExpansion(loc, SourceMgr, LangOpts, MacroBegin);
+}
+bool Commit::isAtEndOfMacroExpansion(SourceLocation loc,
+ SourceLocation *MacroEnd) const {
+ return Lexer::isAtEndOfMacroExpansion(loc, SourceMgr, LangOpts, MacroEnd);
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
new file mode 100644
index 0000000..5b7fa4a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/EditedSource.cpp
@@ -0,0 +1,329 @@
+//===----- EditedSource.cpp - Collection of source edits ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Twine.h"
+
+using namespace clang;
+using namespace edit;
+
+void EditsReceiver::remove(CharSourceRange range) {
+ replace(range, StringRef());
+}
+
+StringRef EditedSource::copyString(const Twine &twine) {
+ llvm::SmallString<128> Data;
+ return copyString(twine.toStringRef(Data));
+}
+
+bool EditedSource::canInsertInOffset(SourceLocation OrigLoc, FileOffset Offs) {
+ FileEditsTy::iterator FA = getActionForOffset(Offs);
+ if (FA != FileEdits.end()) {
+ if (FA->first != Offs)
+ return false; // position has been removed.
+ }
+
+ if (SourceMgr.isMacroArgExpansion(OrigLoc)) {
+ SourceLocation
+ DefArgLoc = SourceMgr.getImmediateExpansionRange(OrigLoc).first;
+ SourceLocation
+ ExpLoc = SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
+ llvm::DenseMap<unsigned, SourceLocation>::iterator
+ I = ExpansionToArgMap.find(ExpLoc.getRawEncoding());
+ if (I != ExpansionToArgMap.end() && I->second != DefArgLoc)
+ return false; // Trying to write in a macro argument input that has
+ // already been written for another argument of the same macro.
+ }
+
+ return true;
+}
+
+bool EditedSource::commitInsert(SourceLocation OrigLoc,
+ FileOffset Offs, StringRef text,
+ bool beforePreviousInsertions) {
+ if (!canInsertInOffset(OrigLoc, Offs))
+ return false;
+ if (text.empty())
+ return true;
+
+ if (SourceMgr.isMacroArgExpansion(OrigLoc)) {
+ SourceLocation
+ DefArgLoc = SourceMgr.getImmediateExpansionRange(OrigLoc).first;
+ SourceLocation
+ ExpLoc = SourceMgr.getImmediateExpansionRange(DefArgLoc).first;
+ ExpansionToArgMap[ExpLoc.getRawEncoding()] = DefArgLoc;
+ }
+
+ FileEdit &FA = FileEdits[Offs];
+ if (FA.Text.empty()) {
+ FA.Text = copyString(text);
+ return true;
+ }
+
+ Twine concat;
+ if (beforePreviousInsertions)
+ concat = Twine(text) + FA.Text;
+ else
+ concat = Twine(FA.Text) + text;
+
+ FA.Text = copyString(concat);
+ return true;
+}
+
+bool EditedSource::commitInsertFromRange(SourceLocation OrigLoc,
+ FileOffset Offs,
+ FileOffset InsertFromRangeOffs, unsigned Len,
+ bool beforePreviousInsertions) {
+ if (Len == 0)
+ return true;
+
+ llvm::SmallString<128> StrVec;
+ FileOffset BeginOffs = InsertFromRangeOffs;
+ FileOffset EndOffs = BeginOffs.getWithOffset(Len);
+ FileEditsTy::iterator I = FileEdits.upper_bound(BeginOffs);
+ if (I != FileEdits.begin())
+ --I;
+
+ for (; I != FileEdits.end(); ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs < E) {
+ if (BeginOffs >= B) {
+ BeginOffs = E;
+ ++I;
+ }
+ break;
+ }
+ }
+
+ for (; I != FileEdits.end() && EndOffs > I->first; ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs < B) {
+ bool Invalid = false;
+ StringRef text = getSourceText(BeginOffs, B, Invalid);
+ if (Invalid)
+ return false;
+ StrVec += text;
+ }
+ StrVec += FA.Text;
+ BeginOffs = E;
+ }
+
+ if (BeginOffs < EndOffs) {
+ bool Invalid = false;
+ StringRef text = getSourceText(BeginOffs, EndOffs, Invalid);
+ if (Invalid)
+ return false;
+ StrVec += text;
+ }
+
+ return commitInsert(OrigLoc, Offs, StrVec.str(), beforePreviousInsertions);
+}
+
+void EditedSource::commitRemove(SourceLocation OrigLoc,
+ FileOffset BeginOffs, unsigned Len) {
+ if (Len == 0)
+ return;
+
+ FileOffset EndOffs = BeginOffs.getWithOffset(Len);
+ FileEditsTy::iterator I = FileEdits.upper_bound(BeginOffs);
+ if (I != FileEdits.begin())
+ --I;
+
+ for (; I != FileEdits.end(); ++I) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (BeginOffs < E)
+ break;
+ }
+
+ FileOffset TopBegin, TopEnd;
+ FileEdit *TopFA = 0;
+
+ if (I == FileEdits.end()) {
+ FileEditsTy::iterator
+ NewI = FileEdits.insert(I, std::make_pair(BeginOffs, FileEdit()));
+ NewI->second.RemoveLen = Len;
+ return;
+ }
+
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+ if (BeginOffs < B) {
+ FileEditsTy::iterator
+ NewI = FileEdits.insert(I, std::make_pair(BeginOffs, FileEdit()));
+ TopBegin = BeginOffs;
+ TopEnd = EndOffs;
+ TopFA = &NewI->second;
+ TopFA->RemoveLen = Len;
+ } else {
+ TopBegin = B;
+ TopEnd = E;
+ TopFA = &I->second;
+ if (TopEnd >= EndOffs)
+ return;
+ unsigned diff = EndOffs.getOffset() - TopEnd.getOffset();
+ TopEnd = EndOffs;
+ TopFA->RemoveLen += diff;
+ ++I;
+ }
+
+ while (I != FileEdits.end()) {
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+
+ if (B >= TopEnd)
+ break;
+
+ if (E <= TopEnd) {
+ FileEdits.erase(I++);
+ continue;
+ }
+
+ if (B < TopEnd) {
+ unsigned diff = E.getOffset() - TopEnd.getOffset();
+ TopEnd = E;
+ TopFA->RemoveLen += diff;
+ FileEdits.erase(I);
+ }
+
+ break;
+ }
+}
+
+bool EditedSource::commit(const Commit &commit) {
+ if (!commit.isCommitable())
+ return false;
+
+ for (edit::Commit::edit_iterator
+ I = commit.edit_begin(), E = commit.edit_end(); I != E; ++I) {
+ const edit::Commit::Edit &edit = *I;
+ switch (edit.Kind) {
+ case edit::Commit::Act_Insert:
+ commitInsert(edit.OrigLoc, edit.Offset, edit.Text, edit.BeforePrev);
+ break;
+ case edit::Commit::Act_InsertFromRange:
+ commitInsertFromRange(edit.OrigLoc, edit.Offset,
+ edit.InsertFromRangeOffs, edit.Length,
+ edit.BeforePrev);
+ break;
+ case edit::Commit::Act_Remove:
+ commitRemove(edit.OrigLoc, edit.Offset, edit.Length);
+ break;
+ }
+ }
+
+ return true;
+}
+
+static void applyRewrite(EditsReceiver &receiver,
+ StringRef text, FileOffset offs, unsigned len,
+ const SourceManager &SM) {
+ assert(!offs.getFID().isInvalid());
+ SourceLocation Loc = SM.getLocForStartOfFile(offs.getFID());
+ Loc = Loc.getLocWithOffset(offs.getOffset());
+ assert(Loc.isFileID());
+ CharSourceRange range = CharSourceRange::getCharRange(Loc,
+ Loc.getLocWithOffset(len));
+
+ if (text.empty()) {
+ assert(len);
+ receiver.remove(range);
+ return;
+ }
+
+ if (len)
+ receiver.replace(range, text);
+ else
+ receiver.insert(Loc, text);
+}
+
+void EditedSource::applyRewrites(EditsReceiver &receiver) {
+ llvm::SmallString<128> StrVec;
+ FileOffset CurOffs, CurEnd;
+ unsigned CurLen;
+
+ if (FileEdits.empty())
+ return;
+
+ FileEditsTy::iterator I = FileEdits.begin();
+ CurOffs = I->first;
+ StrVec = I->second.Text;
+ CurLen = I->second.RemoveLen;
+ CurEnd = CurOffs.getWithOffset(CurLen);
+ ++I;
+
+ for (FileEditsTy::iterator E = FileEdits.end(); I != E; ++I) {
+ FileOffset offs = I->first;
+ FileEdit act = I->second;
+ assert(offs >= CurEnd);
+
+ if (offs == CurEnd) {
+ StrVec += act.Text;
+ CurLen += act.RemoveLen;
+ CurEnd.getWithOffset(act.RemoveLen);
+ continue;
+ }
+
+ applyRewrite(receiver, StrVec.str(), CurOffs, CurLen, SourceMgr);
+ CurOffs = offs;
+ StrVec = act.Text;
+ CurLen = act.RemoveLen;
+ CurEnd = CurOffs.getWithOffset(CurLen);
+ }
+
+ applyRewrite(receiver, StrVec.str(), CurOffs, CurLen, SourceMgr);
+}
+
+void EditedSource::clearRewrites() {
+ FileEdits.clear();
+ StrAlloc.Reset();
+}
+
+StringRef EditedSource::getSourceText(FileOffset BeginOffs, FileOffset EndOffs,
+ bool &Invalid) {
+ assert(BeginOffs.getFID() == EndOffs.getFID());
+ assert(BeginOffs <= EndOffs);
+ SourceLocation BLoc = SourceMgr.getLocForStartOfFile(BeginOffs.getFID());
+ BLoc = BLoc.getLocWithOffset(BeginOffs.getOffset());
+ assert(BLoc.isFileID());
+ SourceLocation
+ ELoc = BLoc.getLocWithOffset(EndOffs.getOffset() - BeginOffs.getOffset());
+ return Lexer::getSourceText(CharSourceRange::getCharRange(BLoc, ELoc),
+ SourceMgr, LangOpts, &Invalid);
+}
+
+EditedSource::FileEditsTy::iterator
+EditedSource::getActionForOffset(FileOffset Offs) {
+ FileEditsTy::iterator I = FileEdits.upper_bound(Offs);
+ if (I == FileEdits.begin())
+ return FileEdits.end();
+ --I;
+ FileEdit &FA = I->second;
+ FileOffset B = I->first;
+ FileOffset E = B.getWithOffset(FA.RemoveLen);
+ if (Offs >= B && Offs < E)
+ return I;
+
+ return FileEdits.end();
+}
diff --git a/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
new file mode 100644
index 0000000..24a0db1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -0,0 +1,587 @@
+//===--- RewriteObjCFoundationAPI.cpp - Foundation API Rewriter -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Rewrites legacy method calls to modern syntax.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Edit/Rewriters.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/NSAPI.h"
+
+using namespace clang;
+using namespace edit;
+
+static bool checkForLiteralCreation(const ObjCMessageExpr *Msg,
+ IdentifierInfo *&ClassId) {
+ if (!Msg || Msg->isImplicit() || !Msg->getMethodDecl())
+ return false;
+
+ const ObjCInterfaceDecl *Receiver = Msg->getReceiverInterface();
+ if (!Receiver)
+ return false;
+ ClassId = Receiver->getIdentifier();
+
+ if (Msg->getReceiverKind() == ObjCMessageExpr::Class)
+ return true;
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteObjCRedundantCallWithLiteral.
+//===----------------------------------------------------------------------===//
+
+bool edit::rewriteObjCRedundantCallWithLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ IdentifierInfo *II = 0;
+ if (!checkForLiteralCreation(Msg, II))
+ return false;
+ if (Msg->getNumArgs() != 1)
+ return false;
+
+ const Expr *Arg = Msg->getArg(0)->IgnoreParenImpCasts();
+ Selector Sel = Msg->getSelector();
+
+ if ((isa<ObjCStringLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSString) == II &&
+ NS.getNSStringSelector(NSAPI::NSStr_stringWithString) == Sel) ||
+
+ (isa<ObjCArrayLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSArray) == II &&
+ NS.getNSArraySelector(NSAPI::NSArr_arrayWithArray) == Sel) ||
+
+ (isa<ObjCDictionaryLiteral>(Arg) &&
+ NS.getNSClassId(NSAPI::ClassId_NSDictionary) == II &&
+ NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithDictionary) == Sel)) {
+
+ commit.replaceWithInner(Msg->getSourceRange(),
+ Msg->getArg(0)->getSourceRange());
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToObjCSubscriptSyntax.
+//===----------------------------------------------------------------------===//
+
+static void maybePutParensOnReceiver(const Expr *Receiver, Commit &commit) {
+ Receiver = Receiver->IgnoreImpCasts();
+ if (isa<BinaryOperator>(Receiver) || isa<UnaryOperator>(Receiver)) {
+ SourceRange RecRange = Receiver->getSourceRange();
+ commit.insertWrap("(", RecRange, ")");
+ }
+}
+
+static bool rewriteToSubscriptGet(const ObjCMessageExpr *Msg, Commit &commit) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange ArgRange = Msg->getArg(0)->getSourceRange();
+
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ ArgRange.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(SourceRange(ArgRange.getBegin(), MsgRange.getEnd()),
+ ArgRange);
+ commit.insertWrap("[", ArgRange, "]");
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+static bool rewriteToArraySubscriptSet(const ObjCMessageExpr *Msg,
+ Commit &commit) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange Arg0Range = Msg->getArg(0)->getSourceRange();
+ SourceRange Arg1Range = Msg->getArg(1)->getSourceRange();
+
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ Arg0Range.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(CharSourceRange::getCharRange(Arg0Range.getBegin(),
+ Arg1Range.getBegin()),
+ CharSourceRange::getTokenRange(Arg0Range));
+ commit.replaceWithInner(SourceRange(Arg1Range.getBegin(), MsgRange.getEnd()),
+ Arg1Range);
+ commit.insertWrap("[", CharSourceRange::getCharRange(Arg0Range.getBegin(),
+ Arg1Range.getBegin()),
+ "] = ");
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+static bool rewriteToDictionarySubscriptSet(const ObjCMessageExpr *Msg,
+ Commit &commit) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+ const Expr *Rec = Msg->getInstanceReceiver();
+ if (!Rec)
+ return false;
+
+ SourceRange MsgRange = Msg->getSourceRange();
+ SourceRange RecRange = Rec->getSourceRange();
+ SourceRange Arg0Range = Msg->getArg(0)->getSourceRange();
+ SourceRange Arg1Range = Msg->getArg(1)->getSourceRange();
+
+ SourceLocation LocBeforeVal = Arg0Range.getBegin();
+ commit.insertBefore(LocBeforeVal, "] = ");
+ commit.insertFromRange(LocBeforeVal, Arg1Range, /*afterToken=*/false,
+ /*beforePreviousInsertions=*/true);
+ commit.insertBefore(LocBeforeVal, "[");
+ commit.replaceWithInner(CharSourceRange::getCharRange(MsgRange.getBegin(),
+ Arg0Range.getBegin()),
+ CharSourceRange::getTokenRange(RecRange));
+ commit.replaceWithInner(SourceRange(Arg0Range.getBegin(), MsgRange.getEnd()),
+ Arg0Range);
+ maybePutParensOnReceiver(Rec, commit);
+ return true;
+}
+
+bool edit::rewriteToObjCSubscriptSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ if (!Msg || Msg->isImplicit() ||
+ Msg->getReceiverKind() != ObjCMessageExpr::Instance)
+ return false;
+ const ObjCMethodDecl *Method = Msg->getMethodDecl();
+ if (!Method)
+ return false;
+
+ const ObjCInterfaceDecl *
+ IFace = NS.getASTContext().getObjContainingInterface(
+ const_cast<ObjCMethodDecl *>(Method));
+ if (!IFace)
+ return false;
+ IdentifierInfo *II = IFace->getIdentifier();
+ Selector Sel = Msg->getSelector();
+
+ if ((II == NS.getNSClassId(NSAPI::ClassId_NSArray) &&
+ Sel == NS.getNSArraySelector(NSAPI::NSArr_objectAtIndex)) ||
+ (II == NS.getNSClassId(NSAPI::ClassId_NSDictionary) &&
+ Sel == NS.getNSDictionarySelector(NSAPI::NSDict_objectForKey)))
+ return rewriteToSubscriptGet(Msg, commit);
+
+ if (Msg->getNumArgs() != 2)
+ return false;
+
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSMutableArray) &&
+ Sel == NS.getNSArraySelector(NSAPI::NSMutableArr_replaceObjectAtIndex))
+ return rewriteToArraySubscriptSet(Msg, commit);
+
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSMutableDictionary) &&
+ Sel == NS.getNSDictionarySelector(NSAPI::NSMutableDict_setObjectForKey))
+ return rewriteToDictionarySubscriptSet(Msg, commit);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToObjCLiteralSyntax.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit);
+
+bool edit::rewriteToObjCLiteralSyntax(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ IdentifierInfo *II = 0;
+ if (!checkForLiteralCreation(Msg, II))
+ return false;
+
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSArray))
+ return rewriteToArrayLiteral(Msg, NS, commit);
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSDictionary))
+ return rewriteToDictionaryLiteral(Msg, NS, commit);
+ if (II == NS.getNSClassId(NSAPI::ClassId_NSNumber))
+ return rewriteToNumberLiteral(Msg, NS, commit);
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToArrayLiteral.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToArrayLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ Selector Sel = Msg->getSelector();
+ SourceRange MsgRange = Msg->getSourceRange();
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_array)) {
+ if (Msg->getNumArgs() != 0)
+ return false;
+ commit.replace(MsgRange, "@[]");
+ return true;
+ }
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObject)) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+ SourceRange ArgRange = Msg->getArg(0)->getSourceRange();
+ commit.replaceWithInner(MsgRange, ArgRange);
+ commit.insertWrap("@[", ArgRange, "]");
+ return true;
+ }
+
+ if (Sel == NS.getNSArraySelector(NSAPI::NSArr_arrayWithObjects)) {
+ if (Msg->getNumArgs() == 0)
+ return false;
+ const Expr *SentinelExpr = Msg->getArg(Msg->getNumArgs() - 1);
+ if (!NS.getASTContext().isSentinelNullExpr(SentinelExpr))
+ return false;
+
+ if (Msg->getNumArgs() == 1) {
+ commit.replace(MsgRange, "@[]");
+ return true;
+ }
+ SourceRange ArgRange(Msg->getArg(0)->getLocStart(),
+ Msg->getArg(Msg->getNumArgs()-2)->getLocEnd());
+ commit.replaceWithInner(MsgRange, ArgRange);
+ commit.insertWrap("@[", ArgRange, "]");
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToDictionaryLiteral.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToDictionaryLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ Selector Sel = Msg->getSelector();
+ SourceRange MsgRange = Msg->getSourceRange();
+
+ if (Sel == NS.getNSDictionarySelector(NSAPI::NSDict_dictionary)) {
+ if (Msg->getNumArgs() != 0)
+ return false;
+ commit.replace(MsgRange, "@{}");
+ return true;
+ }
+
+ if (Sel == NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectForKey)) {
+ if (Msg->getNumArgs() != 2)
+ return false;
+ SourceRange ValRange = Msg->getArg(0)->getSourceRange();
+ SourceRange KeyRange = Msg->getArg(1)->getSourceRange();
+ // Insert key before the value.
+ commit.insertBefore(ValRange.getBegin(), ": ");
+ commit.insertFromRange(ValRange.getBegin(),
+ CharSourceRange::getTokenRange(KeyRange),
+ /*afterToken=*/false, /*beforePreviousInsertions=*/true);
+ commit.insertBefore(ValRange.getBegin(), "@{");
+ commit.insertAfterToken(ValRange.getEnd(), "}");
+ commit.replaceWithInner(MsgRange, ValRange);
+ return true;
+ }
+
+ if (Sel == NS.getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectsAndKeys)) {
+ if (Msg->getNumArgs() % 2 != 1)
+ return false;
+ unsigned SentinelIdx = Msg->getNumArgs() - 1;
+ const Expr *SentinelExpr = Msg->getArg(SentinelIdx);
+ if (!NS.getASTContext().isSentinelNullExpr(SentinelExpr))
+ return false;
+
+ if (Msg->getNumArgs() == 1) {
+ commit.replace(MsgRange, "@{}");
+ return true;
+ }
+
+ for (unsigned i = 0; i < SentinelIdx; i += 2) {
+ SourceRange ValRange = Msg->getArg(i)->getSourceRange();
+ SourceRange KeyRange = Msg->getArg(i+1)->getSourceRange();
+ // Insert value after key.
+ commit.insertAfterToken(KeyRange.getEnd(), ": ");
+ commit.insertFromRange(KeyRange.getEnd(), ValRange, /*afterToken=*/true);
+ commit.remove(CharSourceRange::getCharRange(ValRange.getBegin(),
+ KeyRange.getBegin()));
+ }
+ // Range of arguments up until and including the last key.
+ // The sentinel and first value are cut off, the value will move after the
+ // key.
+ SourceRange ArgRange(Msg->getArg(1)->getLocStart(),
+ Msg->getArg(SentinelIdx-1)->getLocEnd());
+ commit.insertWrap("@{", ArgRange, "}");
+ commit.replaceWithInner(MsgRange, ArgRange);
+ return true;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// rewriteToNumberLiteral.
+//===----------------------------------------------------------------------===//
+
+static bool rewriteToCharLiteral(const ObjCMessageExpr *Msg,
+ const CharacterLiteral *Arg,
+ const NSAPI &NS, Commit &commit) {
+ if (Arg->getKind() != CharacterLiteral::Ascii)
+ return false;
+ if (NS.isNSNumberLiteralSelector(NSAPI::NSNumberWithChar,
+ Msg->getSelector())) {
+ SourceRange ArgRange = Arg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ return false;
+}
+
+static bool rewriteToBoolLiteral(const ObjCMessageExpr *Msg,
+ const Expr *Arg,
+ const NSAPI &NS, Commit &commit) {
+ if (NS.isNSNumberLiteralSelector(NSAPI::NSNumberWithBool,
+ Msg->getSelector())) {
+ SourceRange ArgRange = Arg->getSourceRange();
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ return false;
+}
+
+namespace {
+
+struct LiteralInfo {
+ bool Hex, Octal;
+ StringRef U, F, L, LL;
+ CharSourceRange WithoutSuffRange;
+};
+
+}
+
+static bool getLiteralInfo(SourceRange literalRange,
+ bool isFloat, bool isIntZero,
+ ASTContext &Ctx, LiteralInfo &Info) {
+ if (literalRange.getBegin().isMacroID() ||
+ literalRange.getEnd().isMacroID())
+ return false;
+ StringRef text = Lexer::getSourceText(
+ CharSourceRange::getTokenRange(literalRange),
+ Ctx.getSourceManager(), Ctx.getLangOpts());
+ if (text.empty())
+ return false;
+
+ llvm::Optional<bool> UpperU, UpperL;
+ bool UpperF = false;
+
+ struct Suff {
+ static bool has(StringRef suff, StringRef &text) {
+ if (text.endswith(suff)) {
+ text = text.substr(0, text.size()-suff.size());
+ return true;
+ }
+ return false;
+ }
+ };
+
+ while (1) {
+ if (Suff::has("u", text)) {
+ UpperU = false;
+ } else if (Suff::has("U", text)) {
+ UpperU = true;
+ } else if (Suff::has("ll", text)) {
+ UpperL = false;
+ } else if (Suff::has("LL", text)) {
+ UpperL = true;
+ } else if (Suff::has("l", text)) {
+ UpperL = false;
+ } else if (Suff::has("L", text)) {
+ UpperL = true;
+ } else if (isFloat && Suff::has("f", text)) {
+ UpperF = false;
+ } else if (isFloat && Suff::has("F", text)) {
+ UpperF = true;
+ } else
+ break;
+ }
+
+ if (!UpperU.hasValue() && !UpperL.hasValue())
+ UpperU = UpperL = true;
+ else if (UpperU.hasValue() && !UpperL.hasValue())
+ UpperL = UpperU;
+ else if (UpperL.hasValue() && !UpperU.hasValue())
+ UpperU = UpperL;
+
+ Info.U = *UpperU ? "U" : "u";
+ Info.L = *UpperL ? "L" : "l";
+ Info.LL = *UpperL ? "LL" : "ll";
+ Info.F = UpperF ? "F" : "f";
+
+ Info.Hex = Info.Octal = false;
+ if (text.startswith("0x"))
+ Info.Hex = true;
+ else if (!isFloat && !isIntZero && text.startswith("0"))
+ Info.Octal = true;
+
+ SourceLocation B = literalRange.getBegin();
+ Info.WithoutSuffRange =
+ CharSourceRange::getCharRange(B, B.getLocWithOffset(text.size()));
+ return true;
+}
+
+static bool rewriteToNumberLiteral(const ObjCMessageExpr *Msg,
+ const NSAPI &NS, Commit &commit) {
+ if (Msg->getNumArgs() != 1)
+ return false;
+
+ const Expr *Arg = Msg->getArg(0)->IgnoreParenImpCasts();
+ if (const CharacterLiteral *CharE = dyn_cast<CharacterLiteral>(Arg))
+ return rewriteToCharLiteral(Msg, CharE, NS, commit);
+ if (const ObjCBoolLiteralExpr *BE = dyn_cast<ObjCBoolLiteralExpr>(Arg))
+ return rewriteToBoolLiteral(Msg, BE, NS, commit);
+ if (const CXXBoolLiteralExpr *BE = dyn_cast<CXXBoolLiteralExpr>(Arg))
+ return rewriteToBoolLiteral(Msg, BE, NS, commit);
+
+ const Expr *literalE = Arg;
+ if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(literalE)) {
+ if (UOE->getOpcode() == UO_Plus || UOE->getOpcode() == UO_Minus)
+ literalE = UOE->getSubExpr();
+ }
+
+ // Only integer and floating literals; non-literals or imaginary literal
+ // cannot be rewritten.
+ if (!isa<IntegerLiteral>(literalE) && !isa<FloatingLiteral>(literalE))
+ return false;
+
+ ASTContext &Ctx = NS.getASTContext();
+ Selector Sel = Msg->getSelector();
+ llvm::Optional<NSAPI::NSNumberLiteralMethodKind>
+ MKOpt = NS.getNSNumberLiteralMethodKind(Sel);
+ if (!MKOpt)
+ return false;
+ NSAPI::NSNumberLiteralMethodKind MK = *MKOpt;
+
+ bool CallIsUnsigned = false, CallIsLong = false, CallIsLongLong = false;
+ bool CallIsFloating = false, CallIsDouble = false;
+
+ switch (MK) {
+ // We cannot have these calls with int/float literals.
+ case NSAPI::NSNumberWithChar:
+ case NSAPI::NSNumberWithUnsignedChar:
+ case NSAPI::NSNumberWithShort:
+ case NSAPI::NSNumberWithUnsignedShort:
+ case NSAPI::NSNumberWithBool:
+ return false;
+
+ case NSAPI::NSNumberWithUnsignedInt:
+ case NSAPI::NSNumberWithUnsignedInteger:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithInt:
+ case NSAPI::NSNumberWithInteger:
+ break;
+
+ case NSAPI::NSNumberWithUnsignedLong:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithLong:
+ CallIsLong = true;
+ break;
+
+ case NSAPI::NSNumberWithUnsignedLongLong:
+ CallIsUnsigned = true;
+ case NSAPI::NSNumberWithLongLong:
+ CallIsLongLong = true;
+ break;
+
+ case NSAPI::NSNumberWithDouble:
+ CallIsDouble = true;
+ case NSAPI::NSNumberWithFloat:
+ CallIsFloating = true;
+ break;
+ }
+
+ SourceRange ArgRange = Arg->getSourceRange();
+ QualType ArgTy = Arg->getType();
+ QualType CallTy = Msg->getArg(0)->getType();
+
+ // Check for the easy case, the literal maps directly to the call.
+ if (Ctx.hasSameType(ArgTy, CallTy)) {
+ commit.replaceWithInner(Msg->getSourceRange(), ArgRange);
+ commit.insert(ArgRange.getBegin(), "@");
+ return true;
+ }
+
+ // We will need to modify the literal suffix to get the same type as the call.
+ // Don't even try if it came from a macro.
+ if (ArgRange.getBegin().isMacroID())
+ return false;
+
+ bool LitIsFloat = ArgTy->isFloatingType();
+ // For a float passed to integer call, don't try rewriting. It is difficult
+ // and a very uncommon case anyway.
+ if (LitIsFloat && !CallIsFloating)
+ return false;
+
+ // Try to modify the literal make it the same type as the method call.
+ // -Modify the suffix, and/or
+ // -Change integer to float
+
+ LiteralInfo LitInfo;
+ bool isIntZero = false;
+ if (const IntegerLiteral *IntE = dyn_cast<IntegerLiteral>(literalE))
+ isIntZero = !IntE->getValue().getBoolValue();
+ if (!getLiteralInfo(ArgRange, LitIsFloat, isIntZero, Ctx, LitInfo))
+ return false;
+
+ // Not easy to do int -> float with hex/octal and uncommon anyway.
+ if (!LitIsFloat && CallIsFloating && (LitInfo.Hex || LitInfo.Octal))
+ return false;
+
+ SourceLocation LitB = LitInfo.WithoutSuffRange.getBegin();
+ SourceLocation LitE = LitInfo.WithoutSuffRange.getEnd();
+
+ commit.replaceWithInner(CharSourceRange::getTokenRange(Msg->getSourceRange()),
+ LitInfo.WithoutSuffRange);
+ commit.insert(LitB, "@");
+
+ if (!LitIsFloat && CallIsFloating)
+ commit.insert(LitE, ".0");
+
+ if (CallIsFloating) {
+ if (!CallIsDouble)
+ commit.insert(LitE, LitInfo.F);
+ } else {
+ if (CallIsUnsigned)
+ commit.insert(LitE, LitInfo.U);
+
+ if (CallIsLong)
+ commit.insert(LitE, LitInfo.L);
+ else if (CallIsLongLong)
+ commit.insert(LitE, LitInfo.LL);
+ }
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
new file mode 100644
index 0000000..390ae09
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTConsumers.cpp
@@ -0,0 +1,422 @@
+//===--- ASTConsumers.cpp - ASTConsumer implementations -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// AST Consumer Implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "llvm/Module.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+/// ASTPrinter - Pretty-printer and dumper of ASTs
+
+namespace {
+ class ASTPrinter : public ASTConsumer {
+ raw_ostream &Out;
+ bool Dump;
+
+ public:
+ ASTPrinter(raw_ostream* o = NULL, bool Dump = false)
+ : Out(o? *o : llvm::outs()), Dump(Dump) { }
+
+ virtual void HandleTranslationUnit(ASTContext &Context) {
+ PrintingPolicy Policy = Context.getPrintingPolicy();
+ Policy.Dump = Dump;
+ Context.getTranslationUnitDecl()->print(Out, Policy, /*Indentation=*/0,
+ /*PrintInstantiation=*/true);
+ }
+ };
+} // end anonymous namespace
+
+ASTConsumer *clang::CreateASTPrinter(raw_ostream* out) {
+ return new ASTPrinter(out);
+}
+
+ASTConsumer *clang::CreateASTDumper() {
+ return new ASTPrinter(0, true);
+}
+
+//===----------------------------------------------------------------------===//
+/// ASTViewer - AST Visualization
+
+namespace {
+ class ASTViewer : public ASTConsumer {
+ ASTContext *Context;
+ public:
+ void Initialize(ASTContext &Context) {
+ this->Context = &Context;
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I)
+ HandleTopLevelSingleDecl(*I);
+ return true;
+ }
+
+ void HandleTopLevelSingleDecl(Decl *D);
+ };
+}
+
+void ASTViewer::HandleTopLevelSingleDecl(Decl *D) {
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ D->print(llvm::errs());
+
+ if (Stmt *Body = D->getBody()) {
+ llvm::errs() << '\n';
+ Body->viewAST();
+ llvm::errs() << '\n';
+ }
+ }
+}
+
+
+ASTConsumer *clang::CreateASTViewer() { return new ASTViewer(); }
+
+//===----------------------------------------------------------------------===//
+/// DeclContextPrinter - Decl and DeclContext Visualization
+
+namespace {
+
+class DeclContextPrinter : public ASTConsumer {
+ raw_ostream& Out;
+public:
+ DeclContextPrinter() : Out(llvm::errs()) {}
+
+ void HandleTranslationUnit(ASTContext &C) {
+ PrintDeclContext(C.getTranslationUnitDecl(), 4);
+ }
+
+ void PrintDeclContext(const DeclContext* DC, unsigned Indentation);
+};
+} // end anonymous namespace
+
+void DeclContextPrinter::PrintDeclContext(const DeclContext* DC,
+ unsigned Indentation) {
+ // Print DeclContext name.
+ switch (DC->getDeclKind()) {
+ case Decl::TranslationUnit:
+ Out << "[translation unit] " << DC;
+ break;
+ case Decl::Namespace: {
+ Out << "[namespace] ";
+ const NamespaceDecl* ND = cast<NamespaceDecl>(DC);
+ Out << *ND;
+ break;
+ }
+ case Decl::Enum: {
+ const EnumDecl* ED = cast<EnumDecl>(DC);
+ if (ED->isCompleteDefinition())
+ Out << "[enum] ";
+ else
+ Out << "<enum> ";
+ Out << *ED;
+ break;
+ }
+ case Decl::Record: {
+ const RecordDecl* RD = cast<RecordDecl>(DC);
+ if (RD->isCompleteDefinition())
+ Out << "[struct] ";
+ else
+ Out << "<struct> ";
+ Out << *RD;
+ break;
+ }
+ case Decl::CXXRecord: {
+ const CXXRecordDecl* RD = cast<CXXRecordDecl>(DC);
+ if (RD->isCompleteDefinition())
+ Out << "[class] ";
+ else
+ Out << "<class> ";
+ Out << *RD << ' ' << DC;
+ break;
+ }
+ case Decl::ObjCMethod:
+ Out << "[objc method]";
+ break;
+ case Decl::ObjCInterface:
+ Out << "[objc interface]";
+ break;
+ case Decl::ObjCCategory:
+ Out << "[objc category]";
+ break;
+ case Decl::ObjCProtocol:
+ Out << "[objc protocol]";
+ break;
+ case Decl::ObjCImplementation:
+ Out << "[objc implementation]";
+ break;
+ case Decl::ObjCCategoryImpl:
+ Out << "[objc categoryimpl]";
+ break;
+ case Decl::LinkageSpec:
+ Out << "[linkage spec]";
+ break;
+ case Decl::Block:
+ Out << "[block]";
+ break;
+ case Decl::Function: {
+ const FunctionDecl* FD = cast<FunctionDecl>(DC);
+ if (FD->doesThisDeclarationHaveABody())
+ Out << "[function] ";
+ else
+ Out << "<function> ";
+ Out << *FD;
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (FunctionDecl::param_const_iterator I = FD->param_begin(),
+ E = FD->param_end(); I != E; ++I) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << **I;
+ }
+ Out << ")";
+ break;
+ }
+ case Decl::CXXMethod: {
+ const CXXMethodDecl* D = cast<CXXMethodDecl>(DC);
+ if (D->isOutOfLine())
+ Out << "[c++ method] ";
+ else if (D->isImplicit())
+ Out << "(c++ method) ";
+ else
+ Out << "<c++ method> ";
+ Out << *D;
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (FunctionDecl::param_const_iterator I = D->param_begin(),
+ E = D->param_end(); I != E; ++I) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << **I;
+ }
+ Out << ")";
+
+ // Check the semantic DeclContext.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+
+ break;
+ }
+ case Decl::CXXConstructor: {
+ const CXXConstructorDecl* D = cast<CXXConstructorDecl>(DC);
+ if (D->isOutOfLine())
+ Out << "[c++ ctor] ";
+ else if (D->isImplicit())
+ Out << "(c++ ctor) ";
+ else
+ Out << "<c++ ctor> ";
+ Out << *D;
+ // Print the parameters.
+ Out << "(";
+ bool PrintComma = false;
+ for (FunctionDecl::param_const_iterator I = D->param_begin(),
+ E = D->param_end(); I != E; ++I) {
+ if (PrintComma)
+ Out << ", ";
+ else
+ PrintComma = true;
+ Out << **I;
+ }
+ Out << ")";
+
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+ case Decl::CXXDestructor: {
+ const CXXDestructorDecl* D = cast<CXXDestructorDecl>(DC);
+ if (D->isOutOfLine())
+ Out << "[c++ dtor] ";
+ else if (D->isImplicit())
+ Out << "(c++ dtor) ";
+ else
+ Out << "<c++ dtor> ";
+ Out << *D;
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+ case Decl::CXXConversion: {
+ const CXXConversionDecl* D = cast<CXXConversionDecl>(DC);
+ if (D->isOutOfLine())
+ Out << "[c++ conversion] ";
+ else if (D->isImplicit())
+ Out << "(c++ conversion) ";
+ else
+ Out << "<c++ conversion> ";
+ Out << *D;
+ // Check the semantic DC.
+ const DeclContext* SemaDC = D->getDeclContext();
+ const DeclContext* LexicalDC = D->getLexicalDeclContext();
+ if (SemaDC != LexicalDC)
+ Out << " [[" << SemaDC << "]]";
+ break;
+ }
+
+ default:
+ llvm_unreachable("a decl that inherits DeclContext isn't handled");
+ }
+
+ Out << "\n";
+
+ // Print decls in the DeclContext.
+ for (DeclContext::decl_iterator I = DC->decls_begin(), E = DC->decls_end();
+ I != E; ++I) {
+ for (unsigned i = 0; i < Indentation; ++i)
+ Out << " ";
+
+ Decl::Kind DK = I->getKind();
+ switch (DK) {
+ case Decl::Namespace:
+ case Decl::Enum:
+ case Decl::Record:
+ case Decl::CXXRecord:
+ case Decl::ObjCMethod:
+ case Decl::ObjCInterface:
+ case Decl::ObjCCategory:
+ case Decl::ObjCProtocol:
+ case Decl::ObjCImplementation:
+ case Decl::ObjCCategoryImpl:
+ case Decl::LinkageSpec:
+ case Decl::Block:
+ case Decl::Function:
+ case Decl::CXXMethod:
+ case Decl::CXXConstructor:
+ case Decl::CXXDestructor:
+ case Decl::CXXConversion:
+ {
+ DeclContext* DC = cast<DeclContext>(*I);
+ PrintDeclContext(DC, Indentation+2);
+ break;
+ }
+ case Decl::IndirectField: {
+ IndirectFieldDecl* IFD = cast<IndirectFieldDecl>(*I);
+ Out << "<IndirectField> " << *IFD << '\n';
+ break;
+ }
+ case Decl::Label: {
+ LabelDecl *LD = cast<LabelDecl>(*I);
+ Out << "<Label> " << *LD << '\n';
+ break;
+ }
+ case Decl::Field: {
+ FieldDecl *FD = cast<FieldDecl>(*I);
+ Out << "<field> " << *FD << '\n';
+ break;
+ }
+ case Decl::Typedef:
+ case Decl::TypeAlias: {
+ TypedefNameDecl* TD = cast<TypedefNameDecl>(*I);
+ Out << "<typedef> " << *TD << '\n';
+ break;
+ }
+ case Decl::EnumConstant: {
+ EnumConstantDecl* ECD = cast<EnumConstantDecl>(*I);
+ Out << "<enum constant> " << *ECD << '\n';
+ break;
+ }
+ case Decl::Var: {
+ VarDecl* VD = cast<VarDecl>(*I);
+ Out << "<var> " << *VD << '\n';
+ break;
+ }
+ case Decl::ImplicitParam: {
+ ImplicitParamDecl* IPD = cast<ImplicitParamDecl>(*I);
+ Out << "<implicit parameter> " << *IPD << '\n';
+ break;
+ }
+ case Decl::ParmVar: {
+ ParmVarDecl* PVD = cast<ParmVarDecl>(*I);
+ Out << "<parameter> " << *PVD << '\n';
+ break;
+ }
+ case Decl::ObjCProperty: {
+ ObjCPropertyDecl* OPD = cast<ObjCPropertyDecl>(*I);
+ Out << "<objc property> " << *OPD << '\n';
+ break;
+ }
+ case Decl::FunctionTemplate: {
+ FunctionTemplateDecl* FTD = cast<FunctionTemplateDecl>(*I);
+ Out << "<function template> " << *FTD << '\n';
+ break;
+ }
+ case Decl::FileScopeAsm: {
+ Out << "<file-scope asm>\n";
+ break;
+ }
+ case Decl::UsingDirective: {
+ Out << "<using directive>\n";
+ break;
+ }
+ case Decl::NamespaceAlias: {
+ NamespaceAliasDecl* NAD = cast<NamespaceAliasDecl>(*I);
+ Out << "<namespace alias> " << *NAD << '\n';
+ break;
+ }
+ case Decl::ClassTemplate: {
+ ClassTemplateDecl *CTD = cast<ClassTemplateDecl>(*I);
+ Out << "<class template> " << *CTD << '\n';
+ break;
+ }
+ default:
+ Out << "DeclKind: " << DK << '"' << *I << "\"\n";
+ llvm_unreachable("decl unhandled");
+ }
+ }
+}
+ASTConsumer *clang::CreateDeclContextPrinter() {
+ return new DeclContextPrinter();
+}
+
+//===----------------------------------------------------------------------===//
+/// ASTDumperXML - In-depth XML dumping.
+
+namespace {
+class ASTDumpXML : public ASTConsumer {
+ raw_ostream &OS;
+
+public:
+ ASTDumpXML(raw_ostream &OS) : OS(OS) {}
+
+ void HandleTranslationUnit(ASTContext &C) {
+ C.getTranslationUnitDecl()->dumpXML(OS);
+ }
+};
+}
+
+ASTConsumer *clang::CreateASTDumperXML(raw_ostream &OS) {
+ return new ASTDumpXML(OS);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
new file mode 100644
index 0000000..9feb3de
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTMerge.cpp
@@ -0,0 +1,109 @@
+//===-- ASTMerge.cpp - AST Merging Frontent Action --------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/ASTImporter.h"
+#include "clang/Basic/Diagnostic.h"
+
+using namespace clang;
+
+ASTConsumer *ASTMergeAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return AdaptedAction->CreateASTConsumer(CI, InFile);
+}
+
+bool ASTMergeAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ // FIXME: This is a hack. We need a better way to communicate the
+ // AST file, compiler instance, and file name than member variables
+ // of FrontendAction.
+ AdaptedAction->setCurrentInput(getCurrentInput(), takeCurrentASTUnit());
+ AdaptedAction->setCompilerInstance(&CI);
+ return AdaptedAction->BeginSourceFileAction(CI, Filename);
+}
+
+void ASTMergeAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ CI.getDiagnostics().getClient()->BeginSourceFile(
+ CI.getASTContext().getLangOpts());
+ CI.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument,
+ &CI.getASTContext());
+ IntrusiveRefCntPtr<DiagnosticIDs>
+ DiagIDs(CI.getDiagnostics().getDiagnosticIDs());
+ for (unsigned I = 0, N = ASTFiles.size(); I != N; ++I) {
+ IntrusiveRefCntPtr<DiagnosticsEngine>
+ Diags(new DiagnosticsEngine(DiagIDs, CI.getDiagnostics().getClient(),
+ /*ShouldOwnClient=*/false));
+ ASTUnit *Unit = ASTUnit::LoadFromASTFile(ASTFiles[I], Diags,
+ CI.getFileSystemOpts(), false);
+ if (!Unit)
+ continue;
+
+ ASTImporter Importer(CI.getASTContext(),
+ CI.getFileManager(),
+ Unit->getASTContext(),
+ Unit->getFileManager(),
+ /*MinimalImport=*/false);
+
+ TranslationUnitDecl *TU = Unit->getASTContext().getTranslationUnitDecl();
+ for (DeclContext::decl_iterator D = TU->decls_begin(),
+ DEnd = TU->decls_end();
+ D != DEnd; ++D) {
+ // Don't re-import __va_list_tag, __builtin_va_list.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*D))
+ if (IdentifierInfo *II = ND->getIdentifier())
+ if (II->isStr("__va_list_tag") || II->isStr("__builtin_va_list"))
+ continue;
+
+ Importer.Import(*D);
+ }
+
+ delete Unit;
+ }
+
+ AdaptedAction->ExecuteAction();
+ CI.getDiagnostics().getClient()->EndSourceFile();
+}
+
+void ASTMergeAction::EndSourceFileAction() {
+ return AdaptedAction->EndSourceFileAction();
+}
+
+ASTMergeAction::ASTMergeAction(FrontendAction *AdaptedAction,
+ ArrayRef<std::string> ASTFiles)
+ : AdaptedAction(AdaptedAction), ASTFiles(ASTFiles.begin(), ASTFiles.end()) {
+ assert(AdaptedAction && "ASTMergeAction needs an action to adapt");
+}
+
+ASTMergeAction::~ASTMergeAction() {
+ delete AdaptedAction;
+}
+
+bool ASTMergeAction::usesPreprocessorOnly() const {
+ return AdaptedAction->usesPreprocessorOnly();
+}
+
+TranslationUnitKind ASTMergeAction::getTranslationUnitKind() {
+ return AdaptedAction->getTranslationUnitKind();
+}
+
+bool ASTMergeAction::hasPCHSupport() const {
+ return AdaptedAction->hasPCHSupport();
+}
+
+bool ASTMergeAction::hasASTFileSupport() const {
+ return AdaptedAction->hasASTFileSupport();
+}
+
+bool ASTMergeAction::hasCodeCompletionSupport() const {
+ return AdaptedAction->hasCodeCompletionSupport();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
new file mode 100644
index 0000000..e32fa63
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ASTUnit.cpp
@@ -0,0 +1,2773 @@
+//===--- ASTUnit.cpp - ASTUnit utility ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASTUnit Implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Job.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTWriter.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Atomic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Mutex.h"
+#include "llvm/Support/MutexGuard.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include <cstdlib>
+#include <cstdio>
+#include <sys/stat.h>
+using namespace clang;
+
+using llvm::TimeRecord;
+
+namespace {
+ class SimpleTimer {
+ bool WantTiming;
+ TimeRecord Start;
+ std::string Output;
+
+ public:
+ explicit SimpleTimer(bool WantTiming) : WantTiming(WantTiming) {
+ if (WantTiming)
+ Start = TimeRecord::getCurrentTime();
+ }
+
+ void setOutput(const Twine &Output) {
+ if (WantTiming)
+ this->Output = Output.str();
+ }
+
+ ~SimpleTimer() {
+ if (WantTiming) {
+ TimeRecord Elapsed = TimeRecord::getCurrentTime();
+ Elapsed -= Start;
+ llvm::errs() << Output << ':';
+ Elapsed.print(Elapsed, llvm::errs());
+ llvm::errs() << '\n';
+ }
+ }
+ };
+
+ struct OnDiskData {
+ /// \brief The file in which the precompiled preamble is stored.
+ std::string PreambleFile;
+
+ /// \brief Temporary files that should be removed when the ASTUnit is
+ /// destroyed.
+ SmallVector<llvm::sys::Path, 4> TemporaryFiles;
+
+ /// \brief Erase temporary files.
+ void CleanTemporaryFiles();
+
+ /// \brief Erase the preamble file.
+ void CleanPreambleFile();
+
+ /// \brief Erase temporary files and the preamble file.
+ void Cleanup();
+ };
+}
+
+static llvm::sys::SmartMutex<false> &getOnDiskMutex() {
+ static llvm::sys::SmartMutex<false> M(/* recursive = */ true);
+ return M;
+}
+
+static void cleanupOnDiskMapAtExit(void);
+
+typedef llvm::DenseMap<const ASTUnit *, OnDiskData *> OnDiskDataMap;
+static OnDiskDataMap &getOnDiskDataMap() {
+ static OnDiskDataMap M;
+ static bool hasRegisteredAtExit = false;
+ if (!hasRegisteredAtExit) {
+ hasRegisteredAtExit = true;
+ atexit(cleanupOnDiskMapAtExit);
+ }
+ return M;
+}
+
+static void cleanupOnDiskMapAtExit(void) {
+ // No mutex required here since we are leaving the program.
+ OnDiskDataMap &M = getOnDiskDataMap();
+ for (OnDiskDataMap::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ // We don't worry about freeing the memory associated with OnDiskDataMap.
+ // All we care about is erasing stale files.
+ I->second->Cleanup();
+ }
+}
+
+static OnDiskData &getOnDiskData(const ASTUnit *AU) {
+ // We require the mutex since we are modifying the structure of the
+ // DenseMap.
+ llvm::MutexGuard Guard(getOnDiskMutex());
+ OnDiskDataMap &M = getOnDiskDataMap();
+ OnDiskData *&D = M[AU];
+ if (!D)
+ D = new OnDiskData();
+ return *D;
+}
+
+static void erasePreambleFile(const ASTUnit *AU) {
+ getOnDiskData(AU).CleanPreambleFile();
+}
+
+static void removeOnDiskEntry(const ASTUnit *AU) {
+ // We require the mutex since we are modifying the structure of the
+ // DenseMap.
+ llvm::MutexGuard Guard(getOnDiskMutex());
+ OnDiskDataMap &M = getOnDiskDataMap();
+ OnDiskDataMap::iterator I = M.find(AU);
+ if (I != M.end()) {
+ I->second->Cleanup();
+ delete I->second;
+ M.erase(AU);
+ }
+}
+
+static void setPreambleFile(const ASTUnit *AU, llvm::StringRef preambleFile) {
+ getOnDiskData(AU).PreambleFile = preambleFile;
+}
+
+static const std::string &getPreambleFile(const ASTUnit *AU) {
+ return getOnDiskData(AU).PreambleFile;
+}
+
+void OnDiskData::CleanTemporaryFiles() {
+ for (unsigned I = 0, N = TemporaryFiles.size(); I != N; ++I)
+ TemporaryFiles[I].eraseFromDisk();
+ TemporaryFiles.clear();
+}
+
+void OnDiskData::CleanPreambleFile() {
+ if (!PreambleFile.empty()) {
+ llvm::sys::Path(PreambleFile).eraseFromDisk();
+ PreambleFile.clear();
+ }
+}
+
+void OnDiskData::Cleanup() {
+ CleanTemporaryFiles();
+ CleanPreambleFile();
+}
+
+void ASTUnit::clearFileLevelDecls() {
+ for (FileDeclsTy::iterator
+ I = FileDecls.begin(), E = FileDecls.end(); I != E; ++I)
+ delete I->second;
+ FileDecls.clear();
+}
+
+void ASTUnit::CleanTemporaryFiles() {
+ getOnDiskData(this).CleanTemporaryFiles();
+}
+
+void ASTUnit::addTemporaryFile(const llvm::sys::Path &TempFile) {
+ getOnDiskData(this).TemporaryFiles.push_back(TempFile);
+}
+
+/// \brief After failing to build a precompiled preamble (due to
+/// errors in the source that occurs in the preamble), the number of
+/// reparses during which we'll skip even trying to precompile the
+/// preamble.
+const unsigned DefaultPreambleRebuildInterval = 5;
+
+/// \brief Tracks the number of ASTUnit objects that are currently active.
+///
+/// Used for debugging purposes only.
+static llvm::sys::cas_flag ActiveASTUnitObjects;
+
+ASTUnit::ASTUnit(bool _MainFileIsAST)
+ : Reader(0), OnlyLocalDecls(false), CaptureDiagnostics(false),
+ MainFileIsAST(_MainFileIsAST),
+ TUKind(TU_Complete), WantTiming(getenv("LIBCLANG_TIMING")),
+ OwnsRemappedFileBuffers(true),
+ NumStoredDiagnosticsFromDriver(0),
+ PreambleRebuildCounter(0), SavedMainFileBuffer(0), PreambleBuffer(0),
+ NumWarningsInPreamble(0),
+ ShouldCacheCodeCompletionResults(false),
+ CompletionCacheTopLevelHashValue(0),
+ PreambleTopLevelHashValue(0),
+ CurrentTopLevelHashValue(0),
+ UnsafeToFree(false) {
+ if (getenv("LIBCLANG_OBJTRACKING")) {
+ llvm::sys::AtomicIncrement(&ActiveASTUnitObjects);
+ fprintf(stderr, "+++ %d translation units\n", ActiveASTUnitObjects);
+ }
+}
+
+ASTUnit::~ASTUnit() {
+ clearFileLevelDecls();
+
+ // Clean up the temporary files and the preamble file.
+ removeOnDiskEntry(this);
+
+ // Free the buffers associated with remapped files. We are required to
+ // perform this operation here because we explicitly request that the
+ // compiler instance *not* free these buffers for each invocation of the
+ // parser.
+ if (Invocation.getPtr() && OwnsRemappedFileBuffers) {
+ PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
+ for (PreprocessorOptions::remapped_file_buffer_iterator
+ FB = PPOpts.remapped_file_buffer_begin(),
+ FBEnd = PPOpts.remapped_file_buffer_end();
+ FB != FBEnd;
+ ++FB)
+ delete FB->second;
+ }
+
+ delete SavedMainFileBuffer;
+ delete PreambleBuffer;
+
+ ClearCachedCompletionResults();
+
+ if (getenv("LIBCLANG_OBJTRACKING")) {
+ llvm::sys::AtomicDecrement(&ActiveASTUnitObjects);
+ fprintf(stderr, "--- %d translation units\n", ActiveASTUnitObjects);
+ }
+}
+
+void ASTUnit::setPreprocessor(Preprocessor *pp) { PP = pp; }
+
+/// \brief Determine the set of code-completion contexts in which this
+/// declaration should be shown.
+static unsigned getDeclShowContexts(NamedDecl *ND,
+ const LangOptions &LangOpts,
+ bool &IsNestedNameSpecifier) {
+ IsNestedNameSpecifier = false;
+
+ if (isa<UsingShadowDecl>(ND))
+ ND = dyn_cast<NamedDecl>(ND->getUnderlyingDecl());
+ if (!ND)
+ return 0;
+
+ unsigned Contexts = 0;
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND) ||
+ isa<ClassTemplateDecl>(ND) || isa<TemplateTemplateParmDecl>(ND)) {
+ // Types can appear in these contexts.
+ if (LangOpts.CPlusPlus || !isa<TagDecl>(ND))
+ Contexts |= (1 << (CodeCompletionContext::CCC_TopLevel - 1))
+ | (1 << (CodeCompletionContext::CCC_ObjCIvarList - 1))
+ | (1 << (CodeCompletionContext::CCC_ClassStructUnion - 1))
+ | (1 << (CodeCompletionContext::CCC_Statement - 1))
+ | (1 << (CodeCompletionContext::CCC_Type - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1));
+
+ // In C++, types can appear in expressions contexts (for functional casts).
+ if (LangOpts.CPlusPlus)
+ Contexts |= (1 << (CodeCompletionContext::CCC_Expression - 1));
+
+ // In Objective-C, message sends can send interfaces. In Objective-C++,
+ // all types are available due to functional casts.
+ if (LangOpts.CPlusPlus || isa<ObjCInterfaceDecl>(ND))
+ Contexts |= (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1));
+
+ // In Objective-C, you can only be a subclass of another Objective-C class
+ if (isa<ObjCInterfaceDecl>(ND))
+ Contexts |= (1 << (CodeCompletionContext::CCC_ObjCInterfaceName - 1));
+
+ // Deal with tag names.
+ if (isa<EnumDecl>(ND)) {
+ Contexts |= (1 << (CodeCompletionContext::CCC_EnumTag - 1));
+
+ // Part of the nested-name-specifier in C++0x.
+ if (LangOpts.CPlusPlus0x)
+ IsNestedNameSpecifier = true;
+ } else if (RecordDecl *Record = dyn_cast<RecordDecl>(ND)) {
+ if (Record->isUnion())
+ Contexts |= (1 << (CodeCompletionContext::CCC_UnionTag - 1));
+ else
+ Contexts |= (1 << (CodeCompletionContext::CCC_ClassOrStructTag - 1));
+
+ if (LangOpts.CPlusPlus)
+ IsNestedNameSpecifier = true;
+ } else if (isa<ClassTemplateDecl>(ND))
+ IsNestedNameSpecifier = true;
+ } else if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) {
+ // Values can appear in these contexts.
+ Contexts = (1 << (CodeCompletionContext::CCC_Statement - 1))
+ | (1 << (CodeCompletionContext::CCC_Expression - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
+ | (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1));
+ } else if (isa<ObjCProtocolDecl>(ND)) {
+ Contexts = (1 << (CodeCompletionContext::CCC_ObjCProtocolName - 1));
+ } else if (isa<ObjCCategoryDecl>(ND)) {
+ Contexts = (1 << (CodeCompletionContext::CCC_ObjCCategoryName - 1));
+ } else if (isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) {
+ Contexts = (1 << (CodeCompletionContext::CCC_Namespace - 1));
+
+ // Part of the nested-name-specifier.
+ IsNestedNameSpecifier = true;
+ }
+
+ return Contexts;
+}
+
+void ASTUnit::CacheCodeCompletionResults() {
+ if (!TheSema)
+ return;
+
+ SimpleTimer Timer(WantTiming);
+ Timer.setOutput("Cache global code completions for " + getMainFileName());
+
+ // Clear out the previous results.
+ ClearCachedCompletionResults();
+
+ // Gather the set of global code completions.
+ typedef CodeCompletionResult Result;
+ SmallVector<Result, 8> Results;
+ CachedCompletionAllocator = new GlobalCodeCompletionAllocator;
+ TheSema->GatherGlobalCodeCompletions(*CachedCompletionAllocator,
+ getCodeCompletionTUInfo(), Results);
+
+ // Translate global code completions into cached completions.
+ llvm::DenseMap<CanQualType, unsigned> CompletionTypes;
+
+ for (unsigned I = 0, N = Results.size(); I != N; ++I) {
+ switch (Results[I].Kind) {
+ case Result::RK_Declaration: {
+ bool IsNestedNameSpecifier = false;
+ CachedCodeCompletionResult CachedResult;
+ CachedResult.Completion = Results[I].CreateCodeCompletionString(*TheSema,
+ *CachedCompletionAllocator,
+ getCodeCompletionTUInfo());
+ CachedResult.ShowInContexts = getDeclShowContexts(Results[I].Declaration,
+ Ctx->getLangOpts(),
+ IsNestedNameSpecifier);
+ CachedResult.Priority = Results[I].Priority;
+ CachedResult.Kind = Results[I].CursorKind;
+ CachedResult.Availability = Results[I].Availability;
+
+ // Keep track of the type of this completion in an ASTContext-agnostic
+ // way.
+ QualType UsageType = getDeclUsageType(*Ctx, Results[I].Declaration);
+ if (UsageType.isNull()) {
+ CachedResult.TypeClass = STC_Void;
+ CachedResult.Type = 0;
+ } else {
+ CanQualType CanUsageType
+ = Ctx->getCanonicalType(UsageType.getUnqualifiedType());
+ CachedResult.TypeClass = getSimplifiedTypeClass(CanUsageType);
+
+ // Determine whether we have already seen this type. If so, we save
+ // ourselves the work of formatting the type string by using the
+ // temporary, CanQualType-based hash table to find the associated value.
+ unsigned &TypeValue = CompletionTypes[CanUsageType];
+ if (TypeValue == 0) {
+ TypeValue = CompletionTypes.size();
+ CachedCompletionTypes[QualType(CanUsageType).getAsString()]
+ = TypeValue;
+ }
+
+ CachedResult.Type = TypeValue;
+ }
+
+ CachedCompletionResults.push_back(CachedResult);
+
+ /// Handle nested-name-specifiers in C++.
+ if (TheSema->Context.getLangOpts().CPlusPlus &&
+ IsNestedNameSpecifier && !Results[I].StartsNestedNameSpecifier) {
+ // The contexts in which a nested-name-specifier can appear in C++.
+ unsigned NNSContexts
+ = (1 << (CodeCompletionContext::CCC_TopLevel - 1))
+ | (1 << (CodeCompletionContext::CCC_ObjCIvarList - 1))
+ | (1 << (CodeCompletionContext::CCC_ClassStructUnion - 1))
+ | (1 << (CodeCompletionContext::CCC_Statement - 1))
+ | (1 << (CodeCompletionContext::CCC_Expression - 1))
+ | (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
+ | (1 << (CodeCompletionContext::CCC_EnumTag - 1))
+ | (1 << (CodeCompletionContext::CCC_UnionTag - 1))
+ | (1 << (CodeCompletionContext::CCC_ClassOrStructTag - 1))
+ | (1 << (CodeCompletionContext::CCC_Type - 1))
+ | (1 << (CodeCompletionContext::CCC_PotentiallyQualifiedName - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1));
+
+ if (isa<NamespaceDecl>(Results[I].Declaration) ||
+ isa<NamespaceAliasDecl>(Results[I].Declaration))
+ NNSContexts |= (1 << (CodeCompletionContext::CCC_Namespace - 1));
+
+ if (unsigned RemainingContexts
+ = NNSContexts & ~CachedResult.ShowInContexts) {
+ // If there any contexts where this completion can be a
+ // nested-name-specifier but isn't already an option, create a
+ // nested-name-specifier completion.
+ Results[I].StartsNestedNameSpecifier = true;
+ CachedResult.Completion
+ = Results[I].CreateCodeCompletionString(*TheSema,
+ *CachedCompletionAllocator,
+ getCodeCompletionTUInfo());
+ CachedResult.ShowInContexts = RemainingContexts;
+ CachedResult.Priority = CCP_NestedNameSpecifier;
+ CachedResult.TypeClass = STC_Void;
+ CachedResult.Type = 0;
+ CachedCompletionResults.push_back(CachedResult);
+ }
+ }
+ break;
+ }
+
+ case Result::RK_Keyword:
+ case Result::RK_Pattern:
+ // Ignore keywords and patterns; we don't care, since they are so
+ // easily regenerated.
+ break;
+
+ case Result::RK_Macro: {
+ CachedCodeCompletionResult CachedResult;
+ CachedResult.Completion
+ = Results[I].CreateCodeCompletionString(*TheSema,
+ *CachedCompletionAllocator,
+ getCodeCompletionTUInfo());
+ CachedResult.ShowInContexts
+ = (1 << (CodeCompletionContext::CCC_TopLevel - 1))
+ | (1 << (CodeCompletionContext::CCC_ObjCInterface - 1))
+ | (1 << (CodeCompletionContext::CCC_ObjCImplementation - 1))
+ | (1 << (CodeCompletionContext::CCC_ObjCIvarList - 1))
+ | (1 << (CodeCompletionContext::CCC_ClassStructUnion - 1))
+ | (1 << (CodeCompletionContext::CCC_Statement - 1))
+ | (1 << (CodeCompletionContext::CCC_Expression - 1))
+ | (1 << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
+ | (1 << (CodeCompletionContext::CCC_MacroNameUse - 1))
+ | (1 << (CodeCompletionContext::CCC_PreprocessorExpression - 1))
+ | (1 << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
+ | (1 << (CodeCompletionContext::CCC_OtherWithMacros - 1));
+
+ CachedResult.Priority = Results[I].Priority;
+ CachedResult.Kind = Results[I].CursorKind;
+ CachedResult.Availability = Results[I].Availability;
+ CachedResult.TypeClass = STC_Void;
+ CachedResult.Type = 0;
+ CachedCompletionResults.push_back(CachedResult);
+ break;
+ }
+ }
+ }
+
+ // Save the current top-level hash value.
+ CompletionCacheTopLevelHashValue = CurrentTopLevelHashValue;
+}
+
+void ASTUnit::ClearCachedCompletionResults() {
+ CachedCompletionResults.clear();
+ CachedCompletionTypes.clear();
+ CachedCompletionAllocator = 0;
+}
+
+namespace {
+
+/// \brief Gathers information from ASTReader that will be used to initialize
+/// a Preprocessor.
+class ASTInfoCollector : public ASTReaderListener {
+ Preprocessor &PP;
+ ASTContext &Context;
+ LangOptions &LangOpt;
+ HeaderSearch &HSI;
+ IntrusiveRefCntPtr<TargetInfo> &Target;
+ std::string &Predefines;
+ unsigned &Counter;
+
+ unsigned NumHeaderInfos;
+
+ bool InitializedLanguage;
+public:
+ ASTInfoCollector(Preprocessor &PP, ASTContext &Context, LangOptions &LangOpt,
+ HeaderSearch &HSI,
+ IntrusiveRefCntPtr<TargetInfo> &Target,
+ std::string &Predefines,
+ unsigned &Counter)
+ : PP(PP), Context(Context), LangOpt(LangOpt), HSI(HSI), Target(Target),
+ Predefines(Predefines), Counter(Counter), NumHeaderInfos(0),
+ InitializedLanguage(false) {}
+
+ virtual bool ReadLanguageOptions(const LangOptions &LangOpts) {
+ if (InitializedLanguage)
+ return false;
+
+ LangOpt = LangOpts;
+
+ // Initialize the preprocessor.
+ PP.Initialize(*Target);
+
+ // Initialize the ASTContext
+ Context.InitBuiltinTypes(*Target);
+
+ InitializedLanguage = true;
+ return false;
+ }
+
+ virtual bool ReadTargetTriple(StringRef Triple) {
+ // If we've already initialized the target, don't do it again.
+ if (Target)
+ return false;
+
+ // FIXME: This is broken, we should store the TargetOptions in the AST file.
+ TargetOptions TargetOpts;
+ TargetOpts.ABI = "";
+ TargetOpts.CXXABI = "";
+ TargetOpts.CPU = "";
+ TargetOpts.Features.clear();
+ TargetOpts.Triple = Triple;
+ Target = TargetInfo::CreateTargetInfo(PP.getDiagnostics(), TargetOpts);
+ return false;
+ }
+
+ virtual bool ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
+ StringRef OriginalFileName,
+ std::string &SuggestedPredefines,
+ FileManager &FileMgr) {
+ Predefines = Buffers[0].Data;
+ for (unsigned I = 1, N = Buffers.size(); I != N; ++I) {
+ Predefines += Buffers[I].Data;
+ }
+ return false;
+ }
+
+ virtual void ReadHeaderFileInfo(const HeaderFileInfo &HFI, unsigned ID) {
+ HSI.setHeaderFileInfoForUID(HFI, NumHeaderInfos++);
+ }
+
+ virtual void ReadCounter(unsigned Value) {
+ Counter = Value;
+ }
+};
+
+class StoredDiagnosticConsumer : public DiagnosticConsumer {
+ SmallVectorImpl<StoredDiagnostic> &StoredDiags;
+
+public:
+ explicit StoredDiagnosticConsumer(
+ SmallVectorImpl<StoredDiagnostic> &StoredDiags)
+ : StoredDiags(StoredDiags) { }
+
+ virtual void HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info);
+
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const {
+ // Just drop any diagnostics that come from cloned consumers; they'll
+ // have different source managers anyway.
+ // FIXME: We'd like to be able to capture these somehow, even if it's just
+ // file/line/column, because they could occur when parsing module maps or
+ // building modules on-demand.
+ return new IgnoringDiagConsumer();
+ }
+};
+
+/// \brief RAII object that optionally captures diagnostics, if
+/// there is no diagnostic client to capture them already.
+class CaptureDroppedDiagnostics {
+ DiagnosticsEngine &Diags;
+ StoredDiagnosticConsumer Client;
+ DiagnosticConsumer *PreviousClient;
+
+public:
+ CaptureDroppedDiagnostics(bool RequestCapture, DiagnosticsEngine &Diags,
+ SmallVectorImpl<StoredDiagnostic> &StoredDiags)
+ : Diags(Diags), Client(StoredDiags), PreviousClient(0)
+ {
+ if (RequestCapture || Diags.getClient() == 0) {
+ PreviousClient = Diags.takeClient();
+ Diags.setClient(&Client);
+ }
+ }
+
+ ~CaptureDroppedDiagnostics() {
+ if (Diags.getClient() == &Client) {
+ Diags.takeClient();
+ Diags.setClient(PreviousClient);
+ }
+ }
+};
+
+} // anonymous namespace
+
+void StoredDiagnosticConsumer::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ StoredDiags.push_back(StoredDiagnostic(Level, Info));
+}
+
+const std::string &ASTUnit::getOriginalSourceFileName() {
+ return OriginalSourceFile;
+}
+
+llvm::MemoryBuffer *ASTUnit::getBufferForFile(StringRef Filename,
+ std::string *ErrorStr) {
+ assert(FileMgr);
+ return FileMgr->getBufferForFile(Filename, ErrorStr);
+}
+
+/// \brief Configure the diagnostics object for use with ASTUnit.
+void ASTUnit::ConfigureDiags(IntrusiveRefCntPtr<DiagnosticsEngine> &Diags,
+ const char **ArgBegin, const char **ArgEnd,
+ ASTUnit &AST, bool CaptureDiagnostics) {
+ if (!Diags.getPtr()) {
+ // No diagnostics engine was provided, so create our own diagnostics object
+ // with the default options.
+ DiagnosticOptions DiagOpts;
+ DiagnosticConsumer *Client = 0;
+ if (CaptureDiagnostics)
+ Client = new StoredDiagnosticConsumer(AST.StoredDiagnostics);
+ Diags = CompilerInstance::createDiagnostics(DiagOpts, ArgEnd- ArgBegin,
+ ArgBegin, Client);
+ } else if (CaptureDiagnostics) {
+ Diags->setClient(new StoredDiagnosticConsumer(AST.StoredDiagnostics));
+ }
+}
+
+ASTUnit *ASTUnit::LoadFromASTFile(const std::string &Filename,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ const FileSystemOptions &FileSystemOpts,
+ bool OnlyLocalDecls,
+ RemappedFile *RemappedFiles,
+ unsigned NumRemappedFiles,
+ bool CaptureDiagnostics,
+ bool AllowPCHWithCompilerErrors) {
+ OwningPtr<ASTUnit> AST(new ASTUnit(true));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
+ ASTUnitCleanup(AST.get());
+ llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ DiagCleanup(Diags.getPtr());
+
+ ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
+
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ AST->Diagnostics = Diags;
+ AST->FileMgr = new FileManager(FileSystemOpts);
+ AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
+ AST->getFileManager());
+ AST->HeaderInfo.reset(new HeaderSearch(AST->getFileManager(),
+ AST->getDiagnostics(),
+ AST->ASTFileLangOpts,
+ /*Target=*/0));
+
+ for (unsigned I = 0; I != NumRemappedFiles; ++I) {
+ FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
+ if (const llvm::MemoryBuffer *
+ memBuf = fileOrBuf.dyn_cast<const llvm::MemoryBuffer *>()) {
+ // Create the file entry for the file that we're mapping from.
+ const FileEntry *FromFile
+ = AST->getFileManager().getVirtualFile(RemappedFiles[I].first,
+ memBuf->getBufferSize(),
+ 0);
+ if (!FromFile) {
+ AST->getDiagnostics().Report(diag::err_fe_remap_missing_from_file)
+ << RemappedFiles[I].first;
+ delete memBuf;
+ continue;
+ }
+
+ // Override the contents of the "from" file with the contents of
+ // the "to" file.
+ AST->getSourceManager().overrideFileContents(FromFile, memBuf);
+
+ } else {
+ const char *fname = fileOrBuf.get<const char *>();
+ const FileEntry *ToFile = AST->FileMgr->getFile(fname);
+ if (!ToFile) {
+ AST->getDiagnostics().Report(diag::err_fe_remap_missing_to_file)
+ << RemappedFiles[I].first << fname;
+ continue;
+ }
+
+ // Create the file entry for the file that we're mapping from.
+ const FileEntry *FromFile
+ = AST->getFileManager().getVirtualFile(RemappedFiles[I].first,
+ ToFile->getSize(),
+ 0);
+ if (!FromFile) {
+ AST->getDiagnostics().Report(diag::err_fe_remap_missing_from_file)
+ << RemappedFiles[I].first;
+ delete memBuf;
+ continue;
+ }
+
+ // Override the contents of the "from" file with the contents of
+ // the "to" file.
+ AST->getSourceManager().overrideFileContents(FromFile, ToFile);
+ }
+ }
+
+ // Gather Info for preprocessor construction later on.
+
+ HeaderSearch &HeaderInfo = *AST->HeaderInfo.get();
+ std::string Predefines;
+ unsigned Counter;
+
+ OwningPtr<ASTReader> Reader;
+
+ AST->PP = new Preprocessor(AST->getDiagnostics(), AST->ASTFileLangOpts,
+ /*Target=*/0, AST->getSourceManager(), HeaderInfo,
+ *AST,
+ /*IILookup=*/0,
+ /*OwnsHeaderSearch=*/false,
+ /*DelayInitialization=*/true);
+ Preprocessor &PP = *AST->PP;
+
+ AST->Ctx = new ASTContext(AST->ASTFileLangOpts,
+ AST->getSourceManager(),
+ /*Target=*/0,
+ PP.getIdentifierTable(),
+ PP.getSelectorTable(),
+ PP.getBuiltinInfo(),
+ /* size_reserve = */0,
+ /*DelayInitialization=*/true);
+ ASTContext &Context = *AST->Ctx;
+
+ Reader.reset(new ASTReader(PP, Context,
+ /*isysroot=*/"",
+ /*DisableValidation=*/false,
+ /*DisableStatCache=*/false,
+ AllowPCHWithCompilerErrors));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTReader>
+ ReaderCleanup(Reader.get());
+
+ Reader->setListener(new ASTInfoCollector(*AST->PP, Context,
+ AST->ASTFileLangOpts, HeaderInfo,
+ AST->Target, Predefines, Counter));
+
+ switch (Reader->ReadAST(Filename, serialization::MK_MainFile)) {
+ case ASTReader::Success:
+ break;
+
+ case ASTReader::Failure:
+ case ASTReader::IgnorePCH:
+ AST->getDiagnostics().Report(diag::err_fe_unable_to_load_pch);
+ return NULL;
+ }
+
+ AST->OriginalSourceFile = Reader->getOriginalSourceFile();
+
+ PP.setPredefines(Reader->getSuggestedPredefines());
+ PP.setCounterValue(Counter);
+
+ // Attach the AST reader to the AST context as an external AST
+ // source, so that declarations will be deserialized from the
+ // AST file as needed.
+ ASTReader *ReaderPtr = Reader.get();
+ OwningPtr<ExternalASTSource> Source(Reader.take());
+
+ // Unregister the cleanup for ASTReader. It will get cleaned up
+ // by the ASTUnit cleanup.
+ ReaderCleanup.unregister();
+
+ Context.setExternalSource(Source);
+
+ // Create an AST consumer, even though it isn't used.
+ AST->Consumer.reset(new ASTConsumer);
+
+ // Create a semantic analysis object and tell the AST reader about it.
+ AST->TheSema.reset(new Sema(PP, Context, *AST->Consumer));
+ AST->TheSema->Initialize();
+ ReaderPtr->InitializeSema(*AST->TheSema);
+ AST->Reader = ReaderPtr;
+
+ return AST.take();
+}
+
+namespace {
+
+/// \brief Preprocessor callback class that updates a hash value with the names
+/// of all macros that have been defined by the translation unit.
+class MacroDefinitionTrackerPPCallbacks : public PPCallbacks {
+ unsigned &Hash;
+
+public:
+ explicit MacroDefinitionTrackerPPCallbacks(unsigned &Hash) : Hash(Hash) { }
+
+ virtual void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI) {
+ Hash = llvm::HashString(MacroNameTok.getIdentifierInfo()->getName(), Hash);
+ }
+};
+
+/// \brief Add the given declaration to the hash of all top-level entities.
+void AddTopLevelDeclarationToHash(Decl *D, unsigned &Hash) {
+ if (!D)
+ return;
+
+ DeclContext *DC = D->getDeclContext();
+ if (!DC)
+ return;
+
+ if (!(DC->isTranslationUnit() || DC->getLookupParent()->isTranslationUnit()))
+ return;
+
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ if (ND->getIdentifier())
+ Hash = llvm::HashString(ND->getIdentifier()->getName(), Hash);
+ else if (DeclarationName Name = ND->getDeclName()) {
+ std::string NameStr = Name.getAsString();
+ Hash = llvm::HashString(NameStr, Hash);
+ }
+ return;
+ }
+}
+
+class TopLevelDeclTrackerConsumer : public ASTConsumer {
+ ASTUnit &Unit;
+ unsigned &Hash;
+
+public:
+ TopLevelDeclTrackerConsumer(ASTUnit &_Unit, unsigned &Hash)
+ : Unit(_Unit), Hash(Hash) {
+ Hash = 0;
+ }
+
+ void handleTopLevelDecl(Decl *D) {
+ if (!D)
+ return;
+
+ // FIXME: Currently ObjC method declarations are incorrectly being
+ // reported as top-level declarations, even though their DeclContext
+ // is the containing ObjC @interface/@implementation. This is a
+ // fundamental problem in the parser right now.
+ if (isa<ObjCMethodDecl>(D))
+ return;
+
+ AddTopLevelDeclarationToHash(D, Hash);
+ Unit.addTopLevelDecl(D);
+
+ handleFileLevelDecl(D);
+ }
+
+ void handleFileLevelDecl(Decl *D) {
+ Unit.addFileLevelDecl(D);
+ if (NamespaceDecl *NSD = dyn_cast<NamespaceDecl>(D)) {
+ for (NamespaceDecl::decl_iterator
+ I = NSD->decls_begin(), E = NSD->decls_end(); I != E; ++I)
+ handleFileLevelDecl(*I);
+ }
+ }
+
+ bool HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it)
+ handleTopLevelDecl(*it);
+ return true;
+ }
+
+ // We're not interested in "interesting" decls.
+ void HandleInterestingDecl(DeclGroupRef) {}
+
+ void HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
+ for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it)
+ handleTopLevelDecl(*it);
+ }
+};
+
+class TopLevelDeclTrackerAction : public ASTFrontendAction {
+public:
+ ASTUnit &Unit;
+
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ CI.getPreprocessor().addPPCallbacks(
+ new MacroDefinitionTrackerPPCallbacks(Unit.getCurrentTopLevelHashValue()));
+ return new TopLevelDeclTrackerConsumer(Unit,
+ Unit.getCurrentTopLevelHashValue());
+ }
+
+public:
+ TopLevelDeclTrackerAction(ASTUnit &_Unit) : Unit(_Unit) {}
+
+ virtual bool hasCodeCompletionSupport() const { return false; }
+ virtual TranslationUnitKind getTranslationUnitKind() {
+ return Unit.getTranslationUnitKind();
+ }
+};
+
+class PrecompilePreambleConsumer : public PCHGenerator {
+ ASTUnit &Unit;
+ unsigned &Hash;
+ std::vector<Decl *> TopLevelDecls;
+
+public:
+ PrecompilePreambleConsumer(ASTUnit &Unit, const Preprocessor &PP,
+ StringRef isysroot, raw_ostream *Out)
+ : PCHGenerator(PP, "", 0, isysroot, Out), Unit(Unit),
+ Hash(Unit.getCurrentTopLevelHashValue()) {
+ Hash = 0;
+ }
+
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator it = D.begin(), ie = D.end(); it != ie; ++it) {
+ Decl *D = *it;
+ // FIXME: Currently ObjC method declarations are incorrectly being
+ // reported as top-level declarations, even though their DeclContext
+ // is the containing ObjC @interface/@implementation. This is a
+ // fundamental problem in the parser right now.
+ if (isa<ObjCMethodDecl>(D))
+ continue;
+ AddTopLevelDeclarationToHash(D, Hash);
+ TopLevelDecls.push_back(D);
+ }
+ return true;
+ }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ PCHGenerator::HandleTranslationUnit(Ctx);
+ if (!Unit.getDiagnostics().hasErrorOccurred()) {
+ // Translate the top-level declarations we captured during
+ // parsing into declaration IDs in the precompiled
+ // preamble. This will allow us to deserialize those top-level
+ // declarations when requested.
+ for (unsigned I = 0, N = TopLevelDecls.size(); I != N; ++I)
+ Unit.addTopLevelDeclFromPreamble(
+ getWriter().getDeclID(TopLevelDecls[I]));
+ }
+ }
+};
+
+class PrecompilePreambleAction : public ASTFrontendAction {
+ ASTUnit &Unit;
+
+public:
+ explicit PrecompilePreambleAction(ASTUnit &Unit) : Unit(Unit) {}
+
+ virtual ASTConsumer *CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::string Sysroot;
+ std::string OutputFile;
+ raw_ostream *OS = 0;
+ if (GeneratePCHAction::ComputeASTConsumerArguments(CI, InFile, Sysroot,
+ OutputFile,
+ OS))
+ return 0;
+
+ if (!CI.getFrontendOpts().RelocatablePCH)
+ Sysroot.clear();
+
+ CI.getPreprocessor().addPPCallbacks(
+ new MacroDefinitionTrackerPPCallbacks(Unit.getCurrentTopLevelHashValue()));
+ return new PrecompilePreambleConsumer(Unit, CI.getPreprocessor(), Sysroot,
+ OS);
+ }
+
+ virtual bool hasCodeCompletionSupport() const { return false; }
+ virtual bool hasASTFileSupport() const { return false; }
+ virtual TranslationUnitKind getTranslationUnitKind() { return TU_Prefix; }
+};
+
+}
+
+static void checkAndRemoveNonDriverDiags(SmallVectorImpl<StoredDiagnostic> &
+ StoredDiagnostics) {
+ // Get rid of stored diagnostics except the ones from the driver which do not
+ // have a source location.
+ for (unsigned I = 0; I < StoredDiagnostics.size(); ++I) {
+ if (StoredDiagnostics[I].getLocation().isValid()) {
+ StoredDiagnostics.erase(StoredDiagnostics.begin()+I);
+ --I;
+ }
+ }
+}
+
+static void checkAndSanitizeDiags(SmallVectorImpl<StoredDiagnostic> &
+ StoredDiagnostics,
+ SourceManager &SM) {
+ // The stored diagnostic has the old source manager in it; update
+ // the locations to refer into the new source manager. Since we've
+ // been careful to make sure that the source manager's state
+ // before and after are identical, so that we can reuse the source
+ // location itself.
+ for (unsigned I = 0, N = StoredDiagnostics.size(); I < N; ++I) {
+ if (StoredDiagnostics[I].getLocation().isValid()) {
+ FullSourceLoc Loc(StoredDiagnostics[I].getLocation(), SM);
+ StoredDiagnostics[I].setLocation(Loc);
+ }
+ }
+}
+
+/// Parse the source file into a translation unit using the given compiler
+/// invocation, replacing the current translation unit.
+///
+/// \returns True if a failure occurred that causes the ASTUnit not to
+/// contain any translation-unit information, false otherwise.
+bool ASTUnit::Parse(llvm::MemoryBuffer *OverrideMainBuffer) {
+ delete SavedMainFileBuffer;
+ SavedMainFileBuffer = 0;
+
+ if (!Invocation) {
+ delete OverrideMainBuffer;
+ return true;
+ }
+
+ // Create the compiler instance to use for building the AST.
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
+ CICleanup(Clang.get());
+
+ IntrusiveRefCntPtr<CompilerInvocation>
+ CCInvocation(new CompilerInvocation(*Invocation));
+
+ Clang->setInvocation(CCInvocation.getPtr());
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
+
+ // Set up diagnostics, capturing any diagnostics that would
+ // otherwise be dropped.
+ Clang->setDiagnostics(&getDiagnostics());
+
+ // Create the target instance.
+ Clang->getTargetOpts().Features = TargetFeatures;
+ Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
+ Clang->getTargetOpts()));
+ if (!Clang->hasTarget()) {
+ delete OverrideMainBuffer;
+ return true;
+ }
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Clang->getTarget().setForcedLangOptions(Clang->getLangOpts());
+
+ assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
+ "Invocation must have exactly one source file!");
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
+ "FIXME: AST inputs not yet supported here!");
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
+ "IR inputs not support here!");
+
+ // Configure the various subsystems.
+ // FIXME: Should we retain the previous file manager?
+ LangOpts = &Clang->getLangOpts();
+ FileSystemOpts = Clang->getFileSystemOpts();
+ FileMgr = new FileManager(FileSystemOpts);
+ SourceMgr = new SourceManager(getDiagnostics(), *FileMgr);
+ TheSema.reset();
+ Ctx = 0;
+ PP = 0;
+ Reader = 0;
+
+ // Clear out old caches and data.
+ TopLevelDecls.clear();
+ clearFileLevelDecls();
+ CleanTemporaryFiles();
+
+ if (!OverrideMainBuffer) {
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
+ TopLevelDeclsInPreamble.clear();
+ }
+
+ // Create a file manager object to provide access to and cache the filesystem.
+ Clang->setFileManager(&getFileManager());
+
+ // Create the source manager.
+ Clang->setSourceManager(&getSourceManager());
+
+ // If the main file has been overridden due to the use of a preamble,
+ // make that override happen and introduce the preamble.
+ PreprocessorOptions &PreprocessorOpts = Clang->getPreprocessorOpts();
+ if (OverrideMainBuffer) {
+ PreprocessorOpts.addRemappedFile(OriginalSourceFile, OverrideMainBuffer);
+ PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
+ PreprocessorOpts.PrecompiledPreambleBytes.second
+ = PreambleEndsAtStartOfLine;
+ PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
+ PreprocessorOpts.DisablePCHValidation = true;
+
+ // The stored diagnostic has the old source manager in it; update
+ // the locations to refer into the new source manager. Since we've
+ // been careful to make sure that the source manager's state
+ // before and after are identical, so that we can reuse the source
+ // location itself.
+ checkAndSanitizeDiags(StoredDiagnostics, getSourceManager());
+
+ // Keep track of the override buffer;
+ SavedMainFileBuffer = OverrideMainBuffer;
+ }
+
+ OwningPtr<TopLevelDeclTrackerAction> Act(
+ new TopLevelDeclTrackerAction(*this));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
+ ActCleanup(Act.get());
+
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
+ goto error;
+
+ if (OverrideMainBuffer) {
+ std::string ModName = getPreambleFile(this);
+ TranslateStoredDiagnostics(Clang->getModuleManager(), ModName,
+ getSourceManager(), PreambleDiagnostics,
+ StoredDiagnostics);
+ }
+
+ Act->Execute();
+
+ transferASTDataFromCompilerInstance(*Clang);
+
+ Act->EndSourceFile();
+
+ FailedParseDiagnostics.clear();
+
+ return false;
+
+error:
+ // Remove the overridden buffer we used for the preamble.
+ if (OverrideMainBuffer) {
+ delete OverrideMainBuffer;
+ SavedMainFileBuffer = 0;
+ }
+
+ // Keep the ownership of the data in the ASTUnit because the client may
+ // want to see the diagnostics.
+ transferASTDataFromCompilerInstance(*Clang);
+ FailedParseDiagnostics.swap(StoredDiagnostics);
+ StoredDiagnostics.clear();
+ NumStoredDiagnosticsFromDriver = 0;
+ return true;
+}
+
+/// \brief Simple function to retrieve a path for a preamble precompiled header.
+static std::string GetPreamblePCHPath() {
+ // FIXME: This is lame; sys::Path should provide this function (in particular,
+ // it should know how to find the temporary files dir).
+ // FIXME: This is really lame. I copied this code from the Driver!
+ // FIXME: This is a hack so that we can override the preamble file during
+ // crash-recovery testing, which is the only case where the preamble files
+ // are not necessarily cleaned up.
+ const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE");
+ if (TmpFile)
+ return TmpFile;
+
+ std::string Error;
+ const char *TmpDir = ::getenv("TMPDIR");
+ if (!TmpDir)
+ TmpDir = ::getenv("TEMP");
+ if (!TmpDir)
+ TmpDir = ::getenv("TMP");
+#ifdef LLVM_ON_WIN32
+ if (!TmpDir)
+ TmpDir = ::getenv("USERPROFILE");
+#endif
+ if (!TmpDir)
+ TmpDir = "/tmp";
+ llvm::sys::Path P(TmpDir);
+ P.createDirectoryOnDisk(true);
+ P.appendComponent("preamble");
+ P.appendSuffix("pch");
+ if (P.makeUnique(/*reuse_current=*/false, /*ErrMsg*/0))
+ return std::string();
+
+ return P.str();
+}
+
+/// \brief Compute the preamble for the main file, providing the source buffer
+/// that corresponds to the main file along with a pair (bytes, start-of-line)
+/// that describes the preamble.
+std::pair<llvm::MemoryBuffer *, std::pair<unsigned, bool> >
+ASTUnit::ComputePreamble(CompilerInvocation &Invocation,
+ unsigned MaxLines, bool &CreatedBuffer) {
+ FrontendOptions &FrontendOpts = Invocation.getFrontendOpts();
+ PreprocessorOptions &PreprocessorOpts = Invocation.getPreprocessorOpts();
+ CreatedBuffer = false;
+
+ // Try to determine if the main file has been remapped, either from the
+ // command line (to another file) or directly through the compiler invocation
+ // (to a memory buffer).
+ llvm::MemoryBuffer *Buffer = 0;
+ llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].File);
+ if (const llvm::sys::FileStatus *MainFileStatus = MainFilePath.getFileStatus()) {
+ // Check whether there is a file-file remapping of the main file
+ for (PreprocessorOptions::remapped_file_iterator
+ M = PreprocessorOpts.remapped_file_begin(),
+ E = PreprocessorOpts.remapped_file_end();
+ M != E;
+ ++M) {
+ llvm::sys::PathWithStatus MPath(M->first);
+ if (const llvm::sys::FileStatus *MStatus = MPath.getFileStatus()) {
+ if (MainFileStatus->uniqueID == MStatus->uniqueID) {
+ // We found a remapping. Try to load the resulting, remapped source.
+ if (CreatedBuffer) {
+ delete Buffer;
+ CreatedBuffer = false;
+ }
+
+ Buffer = getBufferForFile(M->second);
+ if (!Buffer)
+ return std::make_pair((llvm::MemoryBuffer*)0,
+ std::make_pair(0, true));
+ CreatedBuffer = true;
+ }
+ }
+ }
+
+ // Check whether there is a file-buffer remapping. It supercedes the
+ // file-file remapping.
+ for (PreprocessorOptions::remapped_file_buffer_iterator
+ M = PreprocessorOpts.remapped_file_buffer_begin(),
+ E = PreprocessorOpts.remapped_file_buffer_end();
+ M != E;
+ ++M) {
+ llvm::sys::PathWithStatus MPath(M->first);
+ if (const llvm::sys::FileStatus *MStatus = MPath.getFileStatus()) {
+ if (MainFileStatus->uniqueID == MStatus->uniqueID) {
+ // We found a remapping.
+ if (CreatedBuffer) {
+ delete Buffer;
+ CreatedBuffer = false;
+ }
+
+ Buffer = const_cast<llvm::MemoryBuffer *>(M->second);
+ }
+ }
+ }
+ }
+
+ // If the main source file was not remapped, load it now.
+ if (!Buffer) {
+ Buffer = getBufferForFile(FrontendOpts.Inputs[0].File);
+ if (!Buffer)
+ return std::make_pair((llvm::MemoryBuffer*)0, std::make_pair(0, true));
+
+ CreatedBuffer = true;
+ }
+
+ return std::make_pair(Buffer, Lexer::ComputePreamble(Buffer,
+ *Invocation.getLangOpts(),
+ MaxLines));
+}
+
+static llvm::MemoryBuffer *CreatePaddedMainFileBuffer(llvm::MemoryBuffer *Old,
+ unsigned NewSize,
+ StringRef NewName) {
+ llvm::MemoryBuffer *Result
+ = llvm::MemoryBuffer::getNewUninitMemBuffer(NewSize, NewName);
+ memcpy(const_cast<char*>(Result->getBufferStart()),
+ Old->getBufferStart(), Old->getBufferSize());
+ memset(const_cast<char*>(Result->getBufferStart()) + Old->getBufferSize(),
+ ' ', NewSize - Old->getBufferSize() - 1);
+ const_cast<char*>(Result->getBufferEnd())[-1] = '\n';
+
+ return Result;
+}
+
+/// \brief Attempt to build or re-use a precompiled preamble when (re-)parsing
+/// the source file.
+///
+/// This routine will compute the preamble of the main source file. If a
+/// non-trivial preamble is found, it will precompile that preamble into a
+/// precompiled header so that the precompiled preamble can be used to reduce
+/// reparsing time. If a precompiled preamble has already been constructed,
+/// this routine will determine if it is still valid and, if so, avoid
+/// rebuilding the precompiled preamble.
+///
+/// \param AllowRebuild When true (the default), this routine is
+/// allowed to rebuild the precompiled preamble if it is found to be
+/// out-of-date.
+///
+/// \param MaxLines When non-zero, the maximum number of lines that
+/// can occur within the preamble.
+///
+/// \returns If the precompiled preamble can be used, returns a newly-allocated
+/// buffer that should be used in place of the main file when doing so.
+/// Otherwise, returns a NULL pointer.
+llvm::MemoryBuffer *ASTUnit::getMainBufferWithPrecompiledPreamble(
+ const CompilerInvocation &PreambleInvocationIn,
+ bool AllowRebuild,
+ unsigned MaxLines) {
+
+ IntrusiveRefCntPtr<CompilerInvocation>
+ PreambleInvocation(new CompilerInvocation(PreambleInvocationIn));
+ FrontendOptions &FrontendOpts = PreambleInvocation->getFrontendOpts();
+ PreprocessorOptions &PreprocessorOpts
+ = PreambleInvocation->getPreprocessorOpts();
+
+ bool CreatedPreambleBuffer = false;
+ std::pair<llvm::MemoryBuffer *, std::pair<unsigned, bool> > NewPreamble
+ = ComputePreamble(*PreambleInvocation, MaxLines, CreatedPreambleBuffer);
+
+ // If ComputePreamble() Take ownership of the preamble buffer.
+ OwningPtr<llvm::MemoryBuffer> OwnedPreambleBuffer;
+ if (CreatedPreambleBuffer)
+ OwnedPreambleBuffer.reset(NewPreamble.first);
+
+ if (!NewPreamble.second.first) {
+ // We couldn't find a preamble in the main source. Clear out the current
+ // preamble, if we have one. It's obviously no good any more.
+ Preamble.clear();
+ erasePreambleFile(this);
+
+ // The next time we actually see a preamble, precompile it.
+ PreambleRebuildCounter = 1;
+ return 0;
+ }
+
+ if (!Preamble.empty()) {
+ // We've previously computed a preamble. Check whether we have the same
+ // preamble now that we did before, and that there's enough space in
+ // the main-file buffer within the precompiled preamble to fit the
+ // new main file.
+ if (Preamble.size() == NewPreamble.second.first &&
+ PreambleEndsAtStartOfLine == NewPreamble.second.second &&
+ NewPreamble.first->getBufferSize() < PreambleReservedSize-2 &&
+ memcmp(Preamble.getBufferStart(), NewPreamble.first->getBufferStart(),
+ NewPreamble.second.first) == 0) {
+ // The preamble has not changed. We may be able to re-use the precompiled
+ // preamble.
+
+ // Check that none of the files used by the preamble have changed.
+ bool AnyFileChanged = false;
+
+ // First, make a record of those files that have been overridden via
+ // remapping or unsaved_files.
+ llvm::StringMap<std::pair<off_t, time_t> > OverriddenFiles;
+ for (PreprocessorOptions::remapped_file_iterator
+ R = PreprocessorOpts.remapped_file_begin(),
+ REnd = PreprocessorOpts.remapped_file_end();
+ !AnyFileChanged && R != REnd;
+ ++R) {
+ struct stat StatBuf;
+ if (FileMgr->getNoncachedStatValue(R->second, StatBuf)) {
+ // If we can't stat the file we're remapping to, assume that something
+ // horrible happened.
+ AnyFileChanged = true;
+ break;
+ }
+
+ OverriddenFiles[R->first] = std::make_pair(StatBuf.st_size,
+ StatBuf.st_mtime);
+ }
+ for (PreprocessorOptions::remapped_file_buffer_iterator
+ R = PreprocessorOpts.remapped_file_buffer_begin(),
+ REnd = PreprocessorOpts.remapped_file_buffer_end();
+ !AnyFileChanged && R != REnd;
+ ++R) {
+ // FIXME: Should we actually compare the contents of file->buffer
+ // remappings?
+ OverriddenFiles[R->first] = std::make_pair(R->second->getBufferSize(),
+ 0);
+ }
+
+ // Check whether anything has changed.
+ for (llvm::StringMap<std::pair<off_t, time_t> >::iterator
+ F = FilesInPreamble.begin(), FEnd = FilesInPreamble.end();
+ !AnyFileChanged && F != FEnd;
+ ++F) {
+ llvm::StringMap<std::pair<off_t, time_t> >::iterator Overridden
+ = OverriddenFiles.find(F->first());
+ if (Overridden != OverriddenFiles.end()) {
+ // This file was remapped; check whether the newly-mapped file
+ // matches up with the previous mapping.
+ if (Overridden->second != F->second)
+ AnyFileChanged = true;
+ continue;
+ }
+
+ // The file was not remapped; check whether it has changed on disk.
+ struct stat StatBuf;
+ if (FileMgr->getNoncachedStatValue(F->first(), StatBuf)) {
+ // If we can't stat the file, assume that something horrible happened.
+ AnyFileChanged = true;
+ } else if (StatBuf.st_size != F->second.first ||
+ StatBuf.st_mtime != F->second.second)
+ AnyFileChanged = true;
+ }
+
+ if (!AnyFileChanged) {
+ // Okay! We can re-use the precompiled preamble.
+
+ // Set the state of the diagnostic object to mimic its state
+ // after parsing the preamble.
+ getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(),
+ PreambleInvocation->getDiagnosticOpts());
+ getDiagnostics().setNumWarnings(NumWarningsInPreamble);
+
+ // Create a version of the main file buffer that is padded to
+ // buffer size we reserved when creating the preamble.
+ return CreatePaddedMainFileBuffer(NewPreamble.first,
+ PreambleReservedSize,
+ FrontendOpts.Inputs[0].File);
+ }
+ }
+
+ // If we aren't allowed to rebuild the precompiled preamble, just
+ // return now.
+ if (!AllowRebuild)
+ return 0;
+
+ // We can't reuse the previously-computed preamble. Build a new one.
+ Preamble.clear();
+ PreambleDiagnostics.clear();
+ erasePreambleFile(this);
+ PreambleRebuildCounter = 1;
+ } else if (!AllowRebuild) {
+ // We aren't allowed to rebuild the precompiled preamble; just
+ // return now.
+ return 0;
+ }
+
+ // If the preamble rebuild counter > 1, it's because we previously
+ // failed to build a preamble and we're not yet ready to try
+ // again. Decrement the counter and return a failure.
+ if (PreambleRebuildCounter > 1) {
+ --PreambleRebuildCounter;
+ return 0;
+ }
+
+ // Create a temporary file for the precompiled preamble. In rare
+ // circumstances, this can fail.
+ std::string PreamblePCHPath = GetPreamblePCHPath();
+ if (PreamblePCHPath.empty()) {
+ // Try again next time.
+ PreambleRebuildCounter = 1;
+ return 0;
+ }
+
+ // We did not previously compute a preamble, or it can't be reused anyway.
+ SimpleTimer PreambleTimer(WantTiming);
+ PreambleTimer.setOutput("Precompiling preamble");
+
+ // Create a new buffer that stores the preamble. The buffer also contains
+ // extra space for the original contents of the file (which will be present
+ // when we actually parse the file) along with more room in case the file
+ // grows.
+ PreambleReservedSize = NewPreamble.first->getBufferSize();
+ if (PreambleReservedSize < 4096)
+ PreambleReservedSize = 8191;
+ else
+ PreambleReservedSize *= 2;
+
+ // Save the preamble text for later; we'll need to compare against it for
+ // subsequent reparses.
+ StringRef MainFilename = PreambleInvocation->getFrontendOpts().Inputs[0].File;
+ Preamble.assign(FileMgr->getFile(MainFilename),
+ NewPreamble.first->getBufferStart(),
+ NewPreamble.first->getBufferStart()
+ + NewPreamble.second.first);
+ PreambleEndsAtStartOfLine = NewPreamble.second.second;
+
+ delete PreambleBuffer;
+ PreambleBuffer
+ = llvm::MemoryBuffer::getNewUninitMemBuffer(PreambleReservedSize,
+ FrontendOpts.Inputs[0].File);
+ memcpy(const_cast<char*>(PreambleBuffer->getBufferStart()),
+ NewPreamble.first->getBufferStart(), Preamble.size());
+ memset(const_cast<char*>(PreambleBuffer->getBufferStart()) + Preamble.size(),
+ ' ', PreambleReservedSize - Preamble.size() - 1);
+ const_cast<char*>(PreambleBuffer->getBufferEnd())[-1] = '\n';
+
+ // Remap the main source file to the preamble buffer.
+ llvm::sys::PathWithStatus MainFilePath(FrontendOpts.Inputs[0].File);
+ PreprocessorOpts.addRemappedFile(MainFilePath.str(), PreambleBuffer);
+
+ // Tell the compiler invocation to generate a temporary precompiled header.
+ FrontendOpts.ProgramAction = frontend::GeneratePCH;
+ // FIXME: Generate the precompiled header into memory?
+ FrontendOpts.OutputFile = PreamblePCHPath;
+ PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
+ PreprocessorOpts.PrecompiledPreambleBytes.second = false;
+
+ // Create the compiler instance to use for building the precompiled preamble.
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
+ CICleanup(Clang.get());
+
+ Clang->setInvocation(&*PreambleInvocation);
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
+
+ // Set up diagnostics, capturing all of the diagnostics produced.
+ Clang->setDiagnostics(&getDiagnostics());
+
+ // Create the target instance.
+ Clang->getTargetOpts().Features = TargetFeatures;
+ Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
+ Clang->getTargetOpts()));
+ if (!Clang->hasTarget()) {
+ llvm::sys::Path(FrontendOpts.OutputFile).eraseFromDisk();
+ Preamble.clear();
+ PreambleRebuildCounter = DefaultPreambleRebuildInterval;
+ PreprocessorOpts.eraseRemappedFile(
+ PreprocessorOpts.remapped_file_buffer_end() - 1);
+ return 0;
+ }
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Clang->getTarget().setForcedLangOptions(Clang->getLangOpts());
+
+ assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
+ "Invocation must have exactly one source file!");
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
+ "FIXME: AST inputs not yet supported here!");
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
+ "IR inputs not support here!");
+
+ // Clear out old caches and data.
+ getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(), Clang->getDiagnosticOpts());
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
+ TopLevelDecls.clear();
+ TopLevelDeclsInPreamble.clear();
+
+ // Create a file manager object to provide access to and cache the filesystem.
+ Clang->setFileManager(new FileManager(Clang->getFileSystemOpts()));
+
+ // Create the source manager.
+ Clang->setSourceManager(new SourceManager(getDiagnostics(),
+ Clang->getFileManager()));
+
+ OwningPtr<PrecompilePreambleAction> Act;
+ Act.reset(new PrecompilePreambleAction(*this));
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
+ llvm::sys::Path(FrontendOpts.OutputFile).eraseFromDisk();
+ Preamble.clear();
+ PreambleRebuildCounter = DefaultPreambleRebuildInterval;
+ PreprocessorOpts.eraseRemappedFile(
+ PreprocessorOpts.remapped_file_buffer_end() - 1);
+ return 0;
+ }
+
+ Act->Execute();
+ Act->EndSourceFile();
+
+ if (Diagnostics->hasErrorOccurred()) {
+ // There were errors parsing the preamble, so no precompiled header was
+ // generated. Forget that we even tried.
+ // FIXME: Should we leave a note for ourselves to try again?
+ llvm::sys::Path(FrontendOpts.OutputFile).eraseFromDisk();
+ Preamble.clear();
+ TopLevelDeclsInPreamble.clear();
+ PreambleRebuildCounter = DefaultPreambleRebuildInterval;
+ PreprocessorOpts.eraseRemappedFile(
+ PreprocessorOpts.remapped_file_buffer_end() - 1);
+ return 0;
+ }
+
+ // Transfer any diagnostics generated when parsing the preamble into the set
+ // of preamble diagnostics.
+ PreambleDiagnostics.clear();
+ PreambleDiagnostics.insert(PreambleDiagnostics.end(),
+ stored_diag_afterDriver_begin(), stored_diag_end());
+ checkAndRemoveNonDriverDiags(StoredDiagnostics);
+
+ // Keep track of the preamble we precompiled.
+ setPreambleFile(this, FrontendOpts.OutputFile);
+ NumWarningsInPreamble = getDiagnostics().getNumWarnings();
+
+ // Keep track of all of the files that the source manager knows about,
+ // so we can verify whether they have changed or not.
+ FilesInPreamble.clear();
+ SourceManager &SourceMgr = Clang->getSourceManager();
+ const llvm::MemoryBuffer *MainFileBuffer
+ = SourceMgr.getBuffer(SourceMgr.getMainFileID());
+ for (SourceManager::fileinfo_iterator F = SourceMgr.fileinfo_begin(),
+ FEnd = SourceMgr.fileinfo_end();
+ F != FEnd;
+ ++F) {
+ const FileEntry *File = F->second->OrigEntry;
+ if (!File || F->second->getRawBuffer() == MainFileBuffer)
+ continue;
+
+ FilesInPreamble[File->getName()]
+ = std::make_pair(F->second->getSize(), File->getModificationTime());
+ }
+
+ PreambleRebuildCounter = 1;
+ PreprocessorOpts.eraseRemappedFile(
+ PreprocessorOpts.remapped_file_buffer_end() - 1);
+
+ // If the hash of top-level entities differs from the hash of the top-level
+ // entities the last time we rebuilt the preamble, clear out the completion
+ // cache.
+ if (CurrentTopLevelHashValue != PreambleTopLevelHashValue) {
+ CompletionCacheTopLevelHashValue = 0;
+ PreambleTopLevelHashValue = CurrentTopLevelHashValue;
+ }
+
+ return CreatePaddedMainFileBuffer(NewPreamble.first,
+ PreambleReservedSize,
+ FrontendOpts.Inputs[0].File);
+}
+
+void ASTUnit::RealizeTopLevelDeclsFromPreamble() {
+ std::vector<Decl *> Resolved;
+ Resolved.reserve(TopLevelDeclsInPreamble.size());
+ ExternalASTSource &Source = *getASTContext().getExternalSource();
+ for (unsigned I = 0, N = TopLevelDeclsInPreamble.size(); I != N; ++I) {
+ // Resolve the declaration ID to an actual declaration, possibly
+ // deserializing the declaration in the process.
+ Decl *D = Source.GetExternalDecl(TopLevelDeclsInPreamble[I]);
+ if (D)
+ Resolved.push_back(D);
+ }
+ TopLevelDeclsInPreamble.clear();
+ TopLevelDecls.insert(TopLevelDecls.begin(), Resolved.begin(), Resolved.end());
+}
+
+void ASTUnit::transferASTDataFromCompilerInstance(CompilerInstance &CI) {
+ // Steal the created target, context, and preprocessor.
+ TheSema.reset(CI.takeSema());
+ Consumer.reset(CI.takeASTConsumer());
+ Ctx = &CI.getASTContext();
+ PP = &CI.getPreprocessor();
+ CI.setSourceManager(0);
+ CI.setFileManager(0);
+ Target = &CI.getTarget();
+ Reader = CI.getModuleManager();
+}
+
+StringRef ASTUnit::getMainFileName() const {
+ return Invocation->getFrontendOpts().Inputs[0].File;
+}
+
+ASTUnit *ASTUnit::create(CompilerInvocation *CI,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ bool CaptureDiagnostics) {
+ OwningPtr<ASTUnit> AST;
+ AST.reset(new ASTUnit(false));
+ ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
+ AST->Diagnostics = Diags;
+ AST->Invocation = CI;
+ AST->FileSystemOpts = CI->getFileSystemOpts();
+ AST->FileMgr = new FileManager(AST->FileSystemOpts);
+ AST->SourceMgr = new SourceManager(AST->getDiagnostics(), *AST->FileMgr);
+
+ return AST.take();
+}
+
+ASTUnit *ASTUnit::LoadFromCompilerInvocationAction(CompilerInvocation *CI,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ ASTFrontendAction *Action,
+ ASTUnit *Unit,
+ bool Persistent,
+ StringRef ResourceFilesPath,
+ bool OnlyLocalDecls,
+ bool CaptureDiagnostics,
+ bool PrecompilePreamble,
+ bool CacheCodeCompletionResults,
+ OwningPtr<ASTUnit> *ErrAST) {
+ assert(CI && "A CompilerInvocation is required");
+
+ OwningPtr<ASTUnit> OwnAST;
+ ASTUnit *AST = Unit;
+ if (!AST) {
+ // Create the AST unit.
+ OwnAST.reset(create(CI, Diags, CaptureDiagnostics));
+ AST = OwnAST.get();
+ }
+
+ if (!ResourceFilesPath.empty()) {
+ // Override the resources path.
+ CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+ }
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ if (PrecompilePreamble)
+ AST->PreambleRebuildCounter = 2;
+ AST->TUKind = Action ? Action->getTranslationUnitKind() : TU_Complete;
+ AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
+ ASTUnitCleanup(OwnAST.get());
+ llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ DiagCleanup(Diags.getPtr());
+
+ // We'll manage file buffers ourselves.
+ CI->getPreprocessorOpts().RetainRemappedFileBuffers = true;
+ CI->getFrontendOpts().DisableFree = false;
+ ProcessWarningOptions(AST->getDiagnostics(), CI->getDiagnosticOpts());
+
+ // Save the target features.
+ AST->TargetFeatures = CI->getTargetOpts().Features;
+
+ // Create the compiler instance to use for building the AST.
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
+ CICleanup(Clang.get());
+
+ Clang->setInvocation(CI);
+ AST->OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
+
+ // Set up diagnostics, capturing any diagnostics that would
+ // otherwise be dropped.
+ Clang->setDiagnostics(&AST->getDiagnostics());
+
+ // Create the target instance.
+ Clang->getTargetOpts().Features = AST->TargetFeatures;
+ Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
+ Clang->getTargetOpts()));
+ if (!Clang->hasTarget())
+ return 0;
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Clang->getTarget().setForcedLangOptions(Clang->getLangOpts());
+
+ assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
+ "Invocation must have exactly one source file!");
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
+ "FIXME: AST inputs not yet supported here!");
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
+ "IR inputs not supported here!");
+
+ // Configure the various subsystems.
+ AST->TheSema.reset();
+ AST->Ctx = 0;
+ AST->PP = 0;
+ AST->Reader = 0;
+
+ // Create a file manager object to provide access to and cache the filesystem.
+ Clang->setFileManager(&AST->getFileManager());
+
+ // Create the source manager.
+ Clang->setSourceManager(&AST->getSourceManager());
+
+ ASTFrontendAction *Act = Action;
+
+ OwningPtr<TopLevelDeclTrackerAction> TrackerAct;
+ if (!Act) {
+ TrackerAct.reset(new TopLevelDeclTrackerAction(*AST));
+ Act = TrackerAct.get();
+ }
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<TopLevelDeclTrackerAction>
+ ActCleanup(TrackerAct.get());
+
+ if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
+ AST->transferASTDataFromCompilerInstance(*Clang);
+ if (OwnAST && ErrAST)
+ ErrAST->swap(OwnAST);
+
+ return 0;
+ }
+
+ if (Persistent && !TrackerAct) {
+ Clang->getPreprocessor().addPPCallbacks(
+ new MacroDefinitionTrackerPPCallbacks(AST->getCurrentTopLevelHashValue()));
+ std::vector<ASTConsumer*> Consumers;
+ if (Clang->hasASTConsumer())
+ Consumers.push_back(Clang->takeASTConsumer());
+ Consumers.push_back(new TopLevelDeclTrackerConsumer(*AST,
+ AST->getCurrentTopLevelHashValue()));
+ Clang->setASTConsumer(new MultiplexConsumer(Consumers));
+ }
+ Act->Execute();
+
+ // Steal the created target, context, and preprocessor.
+ AST->transferASTDataFromCompilerInstance(*Clang);
+
+ Act->EndSourceFile();
+
+ if (OwnAST)
+ return OwnAST.take();
+ else
+ return AST;
+}
+
+bool ASTUnit::LoadFromCompilerInvocation(bool PrecompilePreamble) {
+ if (!Invocation)
+ return true;
+
+ // We'll manage file buffers ourselves.
+ Invocation->getPreprocessorOpts().RetainRemappedFileBuffers = true;
+ Invocation->getFrontendOpts().DisableFree = false;
+ ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
+
+ // Save the target features.
+ TargetFeatures = Invocation->getTargetOpts().Features;
+
+ llvm::MemoryBuffer *OverrideMainBuffer = 0;
+ if (PrecompilePreamble) {
+ PreambleRebuildCounter = 2;
+ OverrideMainBuffer
+ = getMainBufferWithPrecompiledPreamble(*Invocation);
+ }
+
+ SimpleTimer ParsingTimer(WantTiming);
+ ParsingTimer.setOutput("Parsing " + getMainFileName());
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<llvm::MemoryBuffer>
+ MemBufferCleanup(OverrideMainBuffer);
+
+ return Parse(OverrideMainBuffer);
+}
+
+ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ bool OnlyLocalDecls,
+ bool CaptureDiagnostics,
+ bool PrecompilePreamble,
+ TranslationUnitKind TUKind,
+ bool CacheCodeCompletionResults) {
+ // Create the AST unit.
+ OwningPtr<ASTUnit> AST;
+ AST.reset(new ASTUnit(false));
+ ConfigureDiags(Diags, 0, 0, *AST, CaptureDiagnostics);
+ AST->Diagnostics = Diags;
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ AST->TUKind = TUKind;
+ AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->Invocation = CI;
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
+ ASTUnitCleanup(AST.get());
+ llvm::CrashRecoveryContextCleanupRegistrar<DiagnosticsEngine,
+ llvm::CrashRecoveryContextReleaseRefCleanup<DiagnosticsEngine> >
+ DiagCleanup(Diags.getPtr());
+
+ return AST->LoadFromCompilerInvocation(PrecompilePreamble)? 0 : AST.take();
+}
+
+ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
+ const char **ArgEnd,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
+ StringRef ResourceFilesPath,
+ bool OnlyLocalDecls,
+ bool CaptureDiagnostics,
+ RemappedFile *RemappedFiles,
+ unsigned NumRemappedFiles,
+ bool RemappedFilesKeepOriginalName,
+ bool PrecompilePreamble,
+ TranslationUnitKind TUKind,
+ bool CacheCodeCompletionResults,
+ bool AllowPCHWithCompilerErrors,
+ bool SkipFunctionBodies,
+ OwningPtr<ASTUnit> *ErrAST) {
+ if (!Diags.getPtr()) {
+ // No diagnostics engine was provided, so create our own diagnostics object
+ // with the default options.
+ DiagnosticOptions DiagOpts;
+ Diags = CompilerInstance::createDiagnostics(DiagOpts, ArgEnd - ArgBegin,
+ ArgBegin);
+ }
+
+ SmallVector<StoredDiagnostic, 4> StoredDiagnostics;
+
+ IntrusiveRefCntPtr<CompilerInvocation> CI;
+
+ {
+
+ CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags,
+ StoredDiagnostics);
+
+ CI = clang::createInvocationFromCommandLine(
+ llvm::makeArrayRef(ArgBegin, ArgEnd),
+ Diags);
+ if (!CI)
+ return 0;
+ }
+
+ // Override any files that need remapping
+ for (unsigned I = 0; I != NumRemappedFiles; ++I) {
+ FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
+ if (const llvm::MemoryBuffer *
+ memBuf = fileOrBuf.dyn_cast<const llvm::MemoryBuffer *>()) {
+ CI->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first, memBuf);
+ } else {
+ const char *fname = fileOrBuf.get<const char *>();
+ CI->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first, fname);
+ }
+ }
+ PreprocessorOptions &PPOpts = CI->getPreprocessorOpts();
+ PPOpts.RemappedFilesKeepOriginalName = RemappedFilesKeepOriginalName;
+ PPOpts.AllowPCHWithCompilerErrors = AllowPCHWithCompilerErrors;
+
+ // Override the resources path.
+ CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
+
+ CI->getFrontendOpts().SkipFunctionBodies = SkipFunctionBodies;
+
+ // Create the AST unit.
+ OwningPtr<ASTUnit> AST;
+ AST.reset(new ASTUnit(false));
+ ConfigureDiags(Diags, ArgBegin, ArgEnd, *AST, CaptureDiagnostics);
+ AST->Diagnostics = Diags;
+ Diags = 0; // Zero out now to ease cleanup during crash recovery.
+ AST->FileSystemOpts = CI->getFileSystemOpts();
+ AST->FileMgr = new FileManager(AST->FileSystemOpts);
+ AST->OnlyLocalDecls = OnlyLocalDecls;
+ AST->CaptureDiagnostics = CaptureDiagnostics;
+ AST->TUKind = TUKind;
+ AST->ShouldCacheCodeCompletionResults = CacheCodeCompletionResults;
+ AST->NumStoredDiagnosticsFromDriver = StoredDiagnostics.size();
+ AST->StoredDiagnostics.swap(StoredDiagnostics);
+ AST->Invocation = CI;
+ CI = 0; // Zero out now to ease cleanup during crash recovery.
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<ASTUnit>
+ ASTUnitCleanup(AST.get());
+
+ if (AST->LoadFromCompilerInvocation(PrecompilePreamble)) {
+ // Some error occurred, if caller wants to examine diagnostics, pass it the
+ // ASTUnit.
+ if (ErrAST) {
+ AST->StoredDiagnostics.swap(AST->FailedParseDiagnostics);
+ ErrAST->swap(AST);
+ }
+ return 0;
+ }
+
+ return AST.take();
+}
+
+bool ASTUnit::Reparse(RemappedFile *RemappedFiles, unsigned NumRemappedFiles) {
+ if (!Invocation)
+ return true;
+
+ clearFileLevelDecls();
+
+ SimpleTimer ParsingTimer(WantTiming);
+ ParsingTimer.setOutput("Reparsing " + getMainFileName());
+
+ // Remap files.
+ PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
+ PPOpts.DisableStatCache = true;
+ for (PreprocessorOptions::remapped_file_buffer_iterator
+ R = PPOpts.remapped_file_buffer_begin(),
+ REnd = PPOpts.remapped_file_buffer_end();
+ R != REnd;
+ ++R) {
+ delete R->second;
+ }
+ Invocation->getPreprocessorOpts().clearRemappedFiles();
+ for (unsigned I = 0; I != NumRemappedFiles; ++I) {
+ FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
+ if (const llvm::MemoryBuffer *
+ memBuf = fileOrBuf.dyn_cast<const llvm::MemoryBuffer *>()) {
+ Invocation->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first,
+ memBuf);
+ } else {
+ const char *fname = fileOrBuf.get<const char *>();
+ Invocation->getPreprocessorOpts().addRemappedFile(RemappedFiles[I].first,
+ fname);
+ }
+ }
+
+ // If we have a preamble file lying around, or if we might try to
+ // build a precompiled preamble, do so now.
+ llvm::MemoryBuffer *OverrideMainBuffer = 0;
+ if (!getPreambleFile(this).empty() || PreambleRebuildCounter > 0)
+ OverrideMainBuffer = getMainBufferWithPrecompiledPreamble(*Invocation);
+
+ // Clear out the diagnostics state.
+ getDiagnostics().Reset();
+ ProcessWarningOptions(getDiagnostics(), Invocation->getDiagnosticOpts());
+ if (OverrideMainBuffer)
+ getDiagnostics().setNumWarnings(NumWarningsInPreamble);
+
+ // Parse the sources
+ bool Result = Parse(OverrideMainBuffer);
+
+ // If we're caching global code-completion results, and the top-level
+ // declarations have changed, clear out the code-completion cache.
+ if (!Result && ShouldCacheCodeCompletionResults &&
+ CurrentTopLevelHashValue != CompletionCacheTopLevelHashValue)
+ CacheCodeCompletionResults();
+
+ // We now need to clear out the completion info related to this translation
+ // unit; it'll be recreated if necessary.
+ CCTUInfo.reset();
+
+ return Result;
+}
+
+//----------------------------------------------------------------------------//
+// Code completion
+//----------------------------------------------------------------------------//
+
+namespace {
+ /// \brief Code completion consumer that combines the cached code-completion
+ /// results from an ASTUnit with the code-completion results provided to it,
+ /// then passes the result on to
+ class AugmentedCodeCompleteConsumer : public CodeCompleteConsumer {
+ unsigned long long NormalContexts;
+ ASTUnit &AST;
+ CodeCompleteConsumer &Next;
+
+ public:
+ AugmentedCodeCompleteConsumer(ASTUnit &AST, CodeCompleteConsumer &Next,
+ bool IncludeMacros, bool IncludeCodePatterns,
+ bool IncludeGlobals)
+ : CodeCompleteConsumer(IncludeMacros, IncludeCodePatterns, IncludeGlobals,
+ Next.isOutputBinary()), AST(AST), Next(Next)
+ {
+ // Compute the set of contexts in which we will look when we don't have
+ // any information about the specific context.
+ NormalContexts
+ = (1LL << (CodeCompletionContext::CCC_TopLevel - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCInterface - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCImplementation - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCIvarList - 1))
+ | (1LL << (CodeCompletionContext::CCC_Statement - 1))
+ | (1LL << (CodeCompletionContext::CCC_Expression - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCMessageReceiver - 1))
+ | (1LL << (CodeCompletionContext::CCC_DotMemberAccess - 1))
+ | (1LL << (CodeCompletionContext::CCC_ArrowMemberAccess - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCPropertyAccess - 1))
+ | (1LL << (CodeCompletionContext::CCC_ObjCProtocolName - 1))
+ | (1LL << (CodeCompletionContext::CCC_ParenthesizedExpression - 1))
+ | (1LL << (CodeCompletionContext::CCC_Recovery - 1));
+
+ if (AST.getASTContext().getLangOpts().CPlusPlus)
+ NormalContexts |= (1LL << (CodeCompletionContext::CCC_EnumTag - 1))
+ | (1LL << (CodeCompletionContext::CCC_UnionTag - 1))
+ | (1LL << (CodeCompletionContext::CCC_ClassOrStructTag - 1));
+ }
+
+ virtual void ProcessCodeCompleteResults(Sema &S,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults);
+
+ virtual void ProcessOverloadCandidates(Sema &S, unsigned CurrentArg,
+ OverloadCandidate *Candidates,
+ unsigned NumCandidates) {
+ Next.ProcessOverloadCandidates(S, CurrentArg, Candidates, NumCandidates);
+ }
+
+ virtual CodeCompletionAllocator &getAllocator() {
+ return Next.getAllocator();
+ }
+
+ virtual CodeCompletionTUInfo &getCodeCompletionTUInfo() {
+ return Next.getCodeCompletionTUInfo();
+ }
+ };
+}
+
+/// \brief Helper function that computes which global names are hidden by the
+/// local code-completion results.
+static void CalculateHiddenNames(const CodeCompletionContext &Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults,
+ ASTContext &Ctx,
+ llvm::StringSet<llvm::BumpPtrAllocator> &HiddenNames){
+ bool OnlyTagNames = false;
+ switch (Context.getKind()) {
+ case CodeCompletionContext::CCC_Recovery:
+ case CodeCompletionContext::CCC_TopLevel:
+ case CodeCompletionContext::CCC_ObjCInterface:
+ case CodeCompletionContext::CCC_ObjCImplementation:
+ case CodeCompletionContext::CCC_ObjCIvarList:
+ case CodeCompletionContext::CCC_ClassStructUnion:
+ case CodeCompletionContext::CCC_Statement:
+ case CodeCompletionContext::CCC_Expression:
+ case CodeCompletionContext::CCC_ObjCMessageReceiver:
+ case CodeCompletionContext::CCC_DotMemberAccess:
+ case CodeCompletionContext::CCC_ArrowMemberAccess:
+ case CodeCompletionContext::CCC_ObjCPropertyAccess:
+ case CodeCompletionContext::CCC_Namespace:
+ case CodeCompletionContext::CCC_Type:
+ case CodeCompletionContext::CCC_Name:
+ case CodeCompletionContext::CCC_PotentiallyQualifiedName:
+ case CodeCompletionContext::CCC_ParenthesizedExpression:
+ case CodeCompletionContext::CCC_ObjCInterfaceName:
+ break;
+
+ case CodeCompletionContext::CCC_EnumTag:
+ case CodeCompletionContext::CCC_UnionTag:
+ case CodeCompletionContext::CCC_ClassOrStructTag:
+ OnlyTagNames = true;
+ break;
+
+ case CodeCompletionContext::CCC_ObjCProtocolName:
+ case CodeCompletionContext::CCC_MacroName:
+ case CodeCompletionContext::CCC_MacroNameUse:
+ case CodeCompletionContext::CCC_PreprocessorExpression:
+ case CodeCompletionContext::CCC_PreprocessorDirective:
+ case CodeCompletionContext::CCC_NaturalLanguage:
+ case CodeCompletionContext::CCC_SelectorName:
+ case CodeCompletionContext::CCC_TypeQualifiers:
+ case CodeCompletionContext::CCC_Other:
+ case CodeCompletionContext::CCC_OtherWithMacros:
+ case CodeCompletionContext::CCC_ObjCInstanceMessage:
+ case CodeCompletionContext::CCC_ObjCClassMessage:
+ case CodeCompletionContext::CCC_ObjCCategoryName:
+ // We're looking for nothing, or we're looking for names that cannot
+ // be hidden.
+ return;
+ }
+
+ typedef CodeCompletionResult Result;
+ for (unsigned I = 0; I != NumResults; ++I) {
+ if (Results[I].Kind != Result::RK_Declaration)
+ continue;
+
+ unsigned IDNS
+ = Results[I].Declaration->getUnderlyingDecl()->getIdentifierNamespace();
+
+ bool Hiding = false;
+ if (OnlyTagNames)
+ Hiding = (IDNS & Decl::IDNS_Tag);
+ else {
+ unsigned HiddenIDNS = (Decl::IDNS_Type | Decl::IDNS_Member |
+ Decl::IDNS_Namespace | Decl::IDNS_Ordinary |
+ Decl::IDNS_NonMemberOperator);
+ if (Ctx.getLangOpts().CPlusPlus)
+ HiddenIDNS |= Decl::IDNS_Tag;
+ Hiding = (IDNS & HiddenIDNS);
+ }
+
+ if (!Hiding)
+ continue;
+
+ DeclarationName Name = Results[I].Declaration->getDeclName();
+ if (IdentifierInfo *Identifier = Name.getAsIdentifierInfo())
+ HiddenNames.insert(Identifier->getName());
+ else
+ HiddenNames.insert(Name.getAsString());
+ }
+}
+
+
+void AugmentedCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &S,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults) {
+ // Merge the results we were given with the results we cached.
+ bool AddedResult = false;
+ unsigned InContexts
+ = (Context.getKind() == CodeCompletionContext::CCC_Recovery? NormalContexts
+ : (1ULL << (Context.getKind() - 1)));
+ // Contains the set of names that are hidden by "local" completion results.
+ llvm::StringSet<llvm::BumpPtrAllocator> HiddenNames;
+ typedef CodeCompletionResult Result;
+ SmallVector<Result, 8> AllResults;
+ for (ASTUnit::cached_completion_iterator
+ C = AST.cached_completion_begin(),
+ CEnd = AST.cached_completion_end();
+ C != CEnd; ++C) {
+ // If the context we are in matches any of the contexts we are
+ // interested in, we'll add this result.
+ if ((C->ShowInContexts & InContexts) == 0)
+ continue;
+
+ // If we haven't added any results previously, do so now.
+ if (!AddedResult) {
+ CalculateHiddenNames(Context, Results, NumResults, S.Context,
+ HiddenNames);
+ AllResults.insert(AllResults.end(), Results, Results + NumResults);
+ AddedResult = true;
+ }
+
+ // Determine whether this global completion result is hidden by a local
+ // completion result. If so, skip it.
+ if (C->Kind != CXCursor_MacroDefinition &&
+ HiddenNames.count(C->Completion->getTypedText()))
+ continue;
+
+ // Adjust priority based on similar type classes.
+ unsigned Priority = C->Priority;
+ CXCursorKind CursorKind = C->Kind;
+ CodeCompletionString *Completion = C->Completion;
+ if (!Context.getPreferredType().isNull()) {
+ if (C->Kind == CXCursor_MacroDefinition) {
+ Priority = getMacroUsagePriority(C->Completion->getTypedText(),
+ S.getLangOpts(),
+ Context.getPreferredType()->isAnyPointerType());
+ } else if (C->Type) {
+ CanQualType Expected
+ = S.Context.getCanonicalType(
+ Context.getPreferredType().getUnqualifiedType());
+ SimplifiedTypeClass ExpectedSTC = getSimplifiedTypeClass(Expected);
+ if (ExpectedSTC == C->TypeClass) {
+ // We know this type is similar; check for an exact match.
+ llvm::StringMap<unsigned> &CachedCompletionTypes
+ = AST.getCachedCompletionTypes();
+ llvm::StringMap<unsigned>::iterator Pos
+ = CachedCompletionTypes.find(QualType(Expected).getAsString());
+ if (Pos != CachedCompletionTypes.end() && Pos->second == C->Type)
+ Priority /= CCF_ExactTypeMatch;
+ else
+ Priority /= CCF_SimilarTypeMatch;
+ }
+ }
+ }
+
+ // Adjust the completion string, if required.
+ if (C->Kind == CXCursor_MacroDefinition &&
+ Context.getKind() == CodeCompletionContext::CCC_MacroNameUse) {
+ // Create a new code-completion string that just contains the
+ // macro name, without its arguments.
+ CodeCompletionBuilder Builder(getAllocator(), getCodeCompletionTUInfo(),
+ CCP_CodePattern, C->Availability);
+ Builder.AddTypedTextChunk(C->Completion->getTypedText());
+ CursorKind = CXCursor_NotImplemented;
+ Priority = CCP_CodePattern;
+ Completion = Builder.TakeString();
+ }
+
+ AllResults.push_back(Result(Completion, Priority, CursorKind,
+ C->Availability));
+ }
+
+ // If we did not add any cached completion results, just forward the
+ // results we were given to the next consumer.
+ if (!AddedResult) {
+ Next.ProcessCodeCompleteResults(S, Context, Results, NumResults);
+ return;
+ }
+
+ Next.ProcessCodeCompleteResults(S, Context, AllResults.data(),
+ AllResults.size());
+}
+
+
+
+void ASTUnit::CodeComplete(StringRef File, unsigned Line, unsigned Column,
+ RemappedFile *RemappedFiles,
+ unsigned NumRemappedFiles,
+ bool IncludeMacros,
+ bool IncludeCodePatterns,
+ CodeCompleteConsumer &Consumer,
+ DiagnosticsEngine &Diag, LangOptions &LangOpts,
+ SourceManager &SourceMgr, FileManager &FileMgr,
+ SmallVectorImpl<StoredDiagnostic> &StoredDiagnostics,
+ SmallVectorImpl<const llvm::MemoryBuffer *> &OwnedBuffers) {
+ if (!Invocation)
+ return;
+
+ SimpleTimer CompletionTimer(WantTiming);
+ CompletionTimer.setOutput("Code completion @ " + File + ":" +
+ Twine(Line) + ":" + Twine(Column));
+
+ IntrusiveRefCntPtr<CompilerInvocation>
+ CCInvocation(new CompilerInvocation(*Invocation));
+
+ FrontendOptions &FrontendOpts = CCInvocation->getFrontendOpts();
+ PreprocessorOptions &PreprocessorOpts = CCInvocation->getPreprocessorOpts();
+
+ FrontendOpts.ShowMacrosInCodeCompletion
+ = IncludeMacros && CachedCompletionResults.empty();
+ FrontendOpts.ShowCodePatternsInCodeCompletion = IncludeCodePatterns;
+ FrontendOpts.ShowGlobalSymbolsInCodeCompletion
+ = CachedCompletionResults.empty();
+ FrontendOpts.CodeCompletionAt.FileName = File;
+ FrontendOpts.CodeCompletionAt.Line = Line;
+ FrontendOpts.CodeCompletionAt.Column = Column;
+
+ // Set the language options appropriately.
+ LangOpts = *CCInvocation->getLangOpts();
+
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<CompilerInstance>
+ CICleanup(Clang.get());
+
+ Clang->setInvocation(&*CCInvocation);
+ OriginalSourceFile = Clang->getFrontendOpts().Inputs[0].File;
+
+ // Set up diagnostics, capturing any diagnostics produced.
+ Clang->setDiagnostics(&Diag);
+ ProcessWarningOptions(Diag, CCInvocation->getDiagnosticOpts());
+ CaptureDroppedDiagnostics Capture(true,
+ Clang->getDiagnostics(),
+ StoredDiagnostics);
+
+ // Create the target instance.
+ Clang->getTargetOpts().Features = TargetFeatures;
+ Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
+ Clang->getTargetOpts()));
+ if (!Clang->hasTarget()) {
+ Clang->setInvocation(0);
+ return;
+ }
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ Clang->getTarget().setForcedLangOptions(Clang->getLangOpts());
+
+ assert(Clang->getFrontendOpts().Inputs.size() == 1 &&
+ "Invocation must have exactly one source file!");
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_AST &&
+ "FIXME: AST inputs not yet supported here!");
+ assert(Clang->getFrontendOpts().Inputs[0].Kind != IK_LLVM_IR &&
+ "IR inputs not support here!");
+
+
+ // Use the source and file managers that we were given.
+ Clang->setFileManager(&FileMgr);
+ Clang->setSourceManager(&SourceMgr);
+
+ // Remap files.
+ PreprocessorOpts.clearRemappedFiles();
+ PreprocessorOpts.RetainRemappedFileBuffers = true;
+ for (unsigned I = 0; I != NumRemappedFiles; ++I) {
+ FilenameOrMemBuf fileOrBuf = RemappedFiles[I].second;
+ if (const llvm::MemoryBuffer *
+ memBuf = fileOrBuf.dyn_cast<const llvm::MemoryBuffer *>()) {
+ PreprocessorOpts.addRemappedFile(RemappedFiles[I].first, memBuf);
+ OwnedBuffers.push_back(memBuf);
+ } else {
+ const char *fname = fileOrBuf.get<const char *>();
+ PreprocessorOpts.addRemappedFile(RemappedFiles[I].first, fname);
+ }
+ }
+
+ // Use the code completion consumer we were given, but adding any cached
+ // code-completion results.
+ AugmentedCodeCompleteConsumer *AugmentedConsumer
+ = new AugmentedCodeCompleteConsumer(*this, Consumer,
+ FrontendOpts.ShowMacrosInCodeCompletion,
+ FrontendOpts.ShowCodePatternsInCodeCompletion,
+ FrontendOpts.ShowGlobalSymbolsInCodeCompletion);
+ Clang->setCodeCompletionConsumer(AugmentedConsumer);
+
+ Clang->getFrontendOpts().SkipFunctionBodies = true;
+
+ // If we have a precompiled preamble, try to use it. We only allow
+ // the use of the precompiled preamble if we're if the completion
+ // point is within the main file, after the end of the precompiled
+ // preamble.
+ llvm::MemoryBuffer *OverrideMainBuffer = 0;
+ if (!getPreambleFile(this).empty()) {
+ using llvm::sys::FileStatus;
+ llvm::sys::PathWithStatus CompleteFilePath(File);
+ llvm::sys::PathWithStatus MainPath(OriginalSourceFile);
+ if (const FileStatus *CompleteFileStatus = CompleteFilePath.getFileStatus())
+ if (const FileStatus *MainStatus = MainPath.getFileStatus())
+ if (CompleteFileStatus->getUniqueID() == MainStatus->getUniqueID() &&
+ Line > 1)
+ OverrideMainBuffer
+ = getMainBufferWithPrecompiledPreamble(*CCInvocation, false,
+ Line - 1);
+ }
+
+ // If the main file has been overridden due to the use of a preamble,
+ // make that override happen and introduce the preamble.
+ PreprocessorOpts.DisableStatCache = true;
+ StoredDiagnostics.insert(StoredDiagnostics.end(),
+ stored_diag_begin(),
+ stored_diag_afterDriver_begin());
+ if (OverrideMainBuffer) {
+ PreprocessorOpts.addRemappedFile(OriginalSourceFile, OverrideMainBuffer);
+ PreprocessorOpts.PrecompiledPreambleBytes.first = Preamble.size();
+ PreprocessorOpts.PrecompiledPreambleBytes.second
+ = PreambleEndsAtStartOfLine;
+ PreprocessorOpts.ImplicitPCHInclude = getPreambleFile(this);
+ PreprocessorOpts.DisablePCHValidation = true;
+
+ OwnedBuffers.push_back(OverrideMainBuffer);
+ } else {
+ PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
+ PreprocessorOpts.PrecompiledPreambleBytes.second = false;
+ }
+
+ // Disable the preprocessing record
+ PreprocessorOpts.DetailedRecord = false;
+
+ OwningPtr<SyntaxOnlyAction> Act;
+ Act.reset(new SyntaxOnlyAction);
+ if (Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0])) {
+ if (OverrideMainBuffer) {
+ std::string ModName = getPreambleFile(this);
+ TranslateStoredDiagnostics(Clang->getModuleManager(), ModName,
+ getSourceManager(), PreambleDiagnostics,
+ StoredDiagnostics);
+ }
+ Act->Execute();
+ Act->EndSourceFile();
+ }
+
+ checkAndSanitizeDiags(StoredDiagnostics, getSourceManager());
+}
+
+CXSaveError ASTUnit::Save(StringRef File) {
+ // Write to a temporary file and later rename it to the actual file, to avoid
+ // possible race conditions.
+ SmallString<128> TempPath;
+ TempPath = File;
+ TempPath += "-%%%%%%%%";
+ int fd;
+ if (llvm::sys::fs::unique_file(TempPath.str(), fd, TempPath,
+ /*makeAbsolute=*/false))
+ return CXSaveError_Unknown;
+
+ // FIXME: Can we somehow regenerate the stat cache here, or do we need to
+ // unconditionally create a stat cache when we parse the file?
+ llvm::raw_fd_ostream Out(fd, /*shouldClose=*/true);
+
+ serialize(Out);
+ Out.close();
+ if (Out.has_error()) {
+ Out.clear_error();
+ return CXSaveError_Unknown;
+ }
+
+ if (llvm::sys::fs::rename(TempPath.str(), File)) {
+ bool exists;
+ llvm::sys::fs::remove(TempPath.str(), exists);
+ return CXSaveError_Unknown;
+ }
+
+ return CXSaveError_None;
+}
+
+bool ASTUnit::serialize(raw_ostream &OS) {
+ bool hasErrors = getDiagnostics().hasErrorOccurred();
+
+ SmallString<128> Buffer;
+ llvm::BitstreamWriter Stream(Buffer);
+ ASTWriter Writer(Stream);
+ // FIXME: Handle modules
+ Writer.WriteAST(getSema(), 0, std::string(), 0, "", hasErrors);
+
+ // Write the generated bitstream to "Out".
+ if (!Buffer.empty())
+ OS.write((char *)&Buffer.front(), Buffer.size());
+
+ return false;
+}
+
+typedef ContinuousRangeMap<unsigned, int, 2> SLocRemap;
+
+static void TranslateSLoc(SourceLocation &L, SLocRemap &Remap) {
+ unsigned Raw = L.getRawEncoding();
+ const unsigned MacroBit = 1U << 31;
+ L = SourceLocation::getFromRawEncoding((Raw & MacroBit) |
+ ((Raw & ~MacroBit) + Remap.find(Raw & ~MacroBit)->second));
+}
+
+void ASTUnit::TranslateStoredDiagnostics(
+ ASTReader *MMan,
+ StringRef ModName,
+ SourceManager &SrcMgr,
+ const SmallVectorImpl<StoredDiagnostic> &Diags,
+ SmallVectorImpl<StoredDiagnostic> &Out) {
+ // The stored diagnostic has the old source manager in it; update
+ // the locations to refer into the new source manager. We also need to remap
+ // all the locations to the new view. This includes the diag location, any
+ // associated source ranges, and the source ranges of associated fix-its.
+ // FIXME: There should be a cleaner way to do this.
+
+ SmallVector<StoredDiagnostic, 4> Result;
+ Result.reserve(Diags.size());
+ assert(MMan && "Don't have a module manager");
+ serialization::ModuleFile *Mod = MMan->ModuleMgr.lookup(ModName);
+ assert(Mod && "Don't have preamble module");
+ SLocRemap &Remap = Mod->SLocRemap;
+ for (unsigned I = 0, N = Diags.size(); I != N; ++I) {
+ // Rebuild the StoredDiagnostic.
+ const StoredDiagnostic &SD = Diags[I];
+ SourceLocation L = SD.getLocation();
+ TranslateSLoc(L, Remap);
+ FullSourceLoc Loc(L, SrcMgr);
+
+ SmallVector<CharSourceRange, 4> Ranges;
+ Ranges.reserve(SD.range_size());
+ for (StoredDiagnostic::range_iterator I = SD.range_begin(),
+ E = SD.range_end();
+ I != E; ++I) {
+ SourceLocation BL = I->getBegin();
+ TranslateSLoc(BL, Remap);
+ SourceLocation EL = I->getEnd();
+ TranslateSLoc(EL, Remap);
+ Ranges.push_back(CharSourceRange(SourceRange(BL, EL), I->isTokenRange()));
+ }
+
+ SmallVector<FixItHint, 2> FixIts;
+ FixIts.reserve(SD.fixit_size());
+ for (StoredDiagnostic::fixit_iterator I = SD.fixit_begin(),
+ E = SD.fixit_end();
+ I != E; ++I) {
+ FixIts.push_back(FixItHint());
+ FixItHint &FH = FixIts.back();
+ FH.CodeToInsert = I->CodeToInsert;
+ SourceLocation BL = I->RemoveRange.getBegin();
+ TranslateSLoc(BL, Remap);
+ SourceLocation EL = I->RemoveRange.getEnd();
+ TranslateSLoc(EL, Remap);
+ FH.RemoveRange = CharSourceRange(SourceRange(BL, EL),
+ I->RemoveRange.isTokenRange());
+ }
+
+ Result.push_back(StoredDiagnostic(SD.getLevel(), SD.getID(),
+ SD.getMessage(), Loc, Ranges, FixIts));
+ }
+ Result.swap(Out);
+}
+
+static inline bool compLocDecl(std::pair<unsigned, Decl *> L,
+ std::pair<unsigned, Decl *> R) {
+ return L.first < R.first;
+}
+
+void ASTUnit::addFileLevelDecl(Decl *D) {
+ assert(D);
+
+ // We only care about local declarations.
+ if (D->isFromASTFile())
+ return;
+
+ SourceManager &SM = *SourceMgr;
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid() || !SM.isLocalSourceLocation(Loc))
+ return;
+
+ // We only keep track of the file-level declarations of each file.
+ if (!D->getLexicalDeclContext()->isFileContext())
+ return;
+
+ SourceLocation FileLoc = SM.getFileLoc(Loc);
+ assert(SM.isLocalSourceLocation(FileLoc));
+ FileID FID;
+ unsigned Offset;
+ llvm::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
+ if (FID.isInvalid())
+ return;
+
+ LocDeclsTy *&Decls = FileDecls[FID];
+ if (!Decls)
+ Decls = new LocDeclsTy();
+
+ std::pair<unsigned, Decl *> LocDecl(Offset, D);
+
+ if (Decls->empty() || Decls->back().first <= Offset) {
+ Decls->push_back(LocDecl);
+ return;
+ }
+
+ LocDeclsTy::iterator
+ I = std::upper_bound(Decls->begin(), Decls->end(), LocDecl, compLocDecl);
+
+ Decls->insert(I, LocDecl);
+}
+
+void ASTUnit::findFileRegionDecls(FileID File, unsigned Offset, unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {
+ if (File.isInvalid())
+ return;
+
+ if (SourceMgr->isLoadedFileID(File)) {
+ assert(Ctx->getExternalSource() && "No external source!");
+ return Ctx->getExternalSource()->FindFileRegionDecls(File, Offset, Length,
+ Decls);
+ }
+
+ FileDeclsTy::iterator I = FileDecls.find(File);
+ if (I == FileDecls.end())
+ return;
+
+ LocDeclsTy &LocDecls = *I->second;
+ if (LocDecls.empty())
+ return;
+
+ LocDeclsTy::iterator
+ BeginIt = std::lower_bound(LocDecls.begin(), LocDecls.end(),
+ std::make_pair(Offset, (Decl*)0), compLocDecl);
+ if (BeginIt != LocDecls.begin())
+ --BeginIt;
+
+ // If we are pointing at a top-level decl inside an objc container, we need
+ // to backtrack until we find it otherwise we will fail to report that the
+ // region overlaps with an objc container.
+ while (BeginIt != LocDecls.begin() &&
+ BeginIt->second->isTopLevelDeclInObjCContainer())
+ --BeginIt;
+
+ LocDeclsTy::iterator
+ EndIt = std::upper_bound(LocDecls.begin(), LocDecls.end(),
+ std::make_pair(Offset+Length, (Decl*)0),
+ compLocDecl);
+ if (EndIt != LocDecls.end())
+ ++EndIt;
+
+ for (LocDeclsTy::iterator DIt = BeginIt; DIt != EndIt; ++DIt)
+ Decls.push_back(DIt->second);
+}
+
+SourceLocation ASTUnit::getLocation(const FileEntry *File,
+ unsigned Line, unsigned Col) const {
+ const SourceManager &SM = getSourceManager();
+ SourceLocation Loc = SM.translateFileLineCol(File, Line, Col);
+ return SM.getMacroArgExpandedLocation(Loc);
+}
+
+SourceLocation ASTUnit::getLocation(const FileEntry *File,
+ unsigned Offset) const {
+ const SourceManager &SM = getSourceManager();
+ SourceLocation FileLoc = SM.translateFileLineCol(File, 1, 1);
+ return SM.getMacroArgExpandedLocation(FileLoc.getLocWithOffset(Offset));
+}
+
+/// \brief If \arg Loc is a loaded location from the preamble, returns
+/// the corresponding local location of the main file, otherwise it returns
+/// \arg Loc.
+SourceLocation ASTUnit::mapLocationFromPreamble(SourceLocation Loc) {
+ FileID PreambleID;
+ if (SourceMgr)
+ PreambleID = SourceMgr->getPreambleFileID();
+
+ if (Loc.isInvalid() || Preamble.empty() || PreambleID.isInvalid())
+ return Loc;
+
+ unsigned Offs;
+ if (SourceMgr->isInFileID(Loc, PreambleID, &Offs) && Offs < Preamble.size()) {
+ SourceLocation FileLoc
+ = SourceMgr->getLocForStartOfFile(SourceMgr->getMainFileID());
+ return FileLoc.getLocWithOffset(Offs);
+ }
+
+ return Loc;
+}
+
+/// \brief If \arg Loc is a local location of the main file but inside the
+/// preamble chunk, returns the corresponding loaded location from the
+/// preamble, otherwise it returns \arg Loc.
+SourceLocation ASTUnit::mapLocationToPreamble(SourceLocation Loc) {
+ FileID PreambleID;
+ if (SourceMgr)
+ PreambleID = SourceMgr->getPreambleFileID();
+
+ if (Loc.isInvalid() || Preamble.empty() || PreambleID.isInvalid())
+ return Loc;
+
+ unsigned Offs;
+ if (SourceMgr->isInFileID(Loc, SourceMgr->getMainFileID(), &Offs) &&
+ Offs < Preamble.size()) {
+ SourceLocation FileLoc = SourceMgr->getLocForStartOfFile(PreambleID);
+ return FileLoc.getLocWithOffset(Offs);
+ }
+
+ return Loc;
+}
+
+bool ASTUnit::isInPreambleFileID(SourceLocation Loc) {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getPreambleFileID();
+
+ if (Loc.isInvalid() || FID.isInvalid())
+ return false;
+
+ return SourceMgr->isInFileID(Loc, FID);
+}
+
+bool ASTUnit::isInMainFileID(SourceLocation Loc) {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getMainFileID();
+
+ if (Loc.isInvalid() || FID.isInvalid())
+ return false;
+
+ return SourceMgr->isInFileID(Loc, FID);
+}
+
+SourceLocation ASTUnit::getEndOfPreambleFileID() {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getPreambleFileID();
+
+ if (FID.isInvalid())
+ return SourceLocation();
+
+ return SourceMgr->getLocForEndOfFile(FID);
+}
+
+SourceLocation ASTUnit::getStartOfMainFileID() {
+ FileID FID;
+ if (SourceMgr)
+ FID = SourceMgr->getMainFileID();
+
+ if (FID.isInvalid())
+ return SourceLocation();
+
+ return SourceMgr->getLocForStartOfFile(FID);
+}
+
+void ASTUnit::PreambleData::countLines() const {
+ NumLines = 0;
+ if (empty())
+ return;
+
+ for (std::vector<char>::const_iterator
+ I = Buffer.begin(), E = Buffer.end(); I != E; ++I) {
+ if (*I == '\n')
+ ++NumLines;
+ }
+ if (Buffer.back() != '\n')
+ ++NumLines;
+}
+
+#ifndef NDEBUG
+ASTUnit::ConcurrencyState::ConcurrencyState() {
+ Mutex = new llvm::sys::MutexImpl(/*recursive=*/true);
+}
+
+ASTUnit::ConcurrencyState::~ConcurrencyState() {
+ delete static_cast<llvm::sys::MutexImpl *>(Mutex);
+}
+
+void ASTUnit::ConcurrencyState::start() {
+ bool acquired = static_cast<llvm::sys::MutexImpl *>(Mutex)->tryacquire();
+ assert(acquired && "Concurrent access to ASTUnit!");
+}
+
+void ASTUnit::ConcurrencyState::finish() {
+ static_cast<llvm::sys::MutexImpl *>(Mutex)->release();
+}
+
+#else // NDEBUG
+
+ASTUnit::ConcurrencyState::ConcurrencyState() {}
+ASTUnit::ConcurrencyState::~ConcurrencyState() {}
+void ASTUnit::ConcurrencyState::start() {}
+void ASTUnit::ConcurrencyState::finish() {}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
new file mode 100644
index 0000000..58a6b8d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CacheTokens.cpp
@@ -0,0 +1,653 @@
+//===--- CacheTokens.cpp - Caching of lexer tokens for PTH support --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides a possible implementation of PTH support for Clang that is
+// based on caching lexed tokens and identifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+
+// FIXME: put this somewhere else?
+#ifndef S_ISDIR
+#define S_ISDIR(x) (((x)&_S_IFDIR)!=0)
+#endif
+
+using namespace clang;
+using namespace clang::io;
+
+//===----------------------------------------------------------------------===//
+// PTH-specific stuff.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PTHEntry {
+ Offset TokenData, PPCondData;
+
+public:
+ PTHEntry() {}
+
+ PTHEntry(Offset td, Offset ppcd)
+ : TokenData(td), PPCondData(ppcd) {}
+
+ Offset getTokenOffset() const { return TokenData; }
+ Offset getPPCondTableOffset() const { return PPCondData; }
+};
+
+
+class PTHEntryKeyVariant {
+ union { const FileEntry* FE; const char* Path; };
+ enum { IsFE = 0x1, IsDE = 0x2, IsNoExist = 0x0 } Kind;
+ struct stat *StatBuf;
+public:
+ PTHEntryKeyVariant(const FileEntry *fe)
+ : FE(fe), Kind(IsFE), StatBuf(0) {}
+
+ PTHEntryKeyVariant(struct stat* statbuf, const char* path)
+ : Path(path), Kind(IsDE), StatBuf(new struct stat(*statbuf)) {}
+
+ explicit PTHEntryKeyVariant(const char* path)
+ : Path(path), Kind(IsNoExist), StatBuf(0) {}
+
+ bool isFile() const { return Kind == IsFE; }
+
+ StringRef getString() const {
+ return Kind == IsFE ? FE->getName() : Path;
+ }
+
+ unsigned getKind() const { return (unsigned) Kind; }
+
+ void EmitData(raw_ostream& Out) {
+ switch (Kind) {
+ case IsFE:
+ // Emit stat information.
+ ::Emit32(Out, FE->getInode());
+ ::Emit32(Out, FE->getDevice());
+ ::Emit16(Out, FE->getFileMode());
+ ::Emit64(Out, FE->getModificationTime());
+ ::Emit64(Out, FE->getSize());
+ break;
+ case IsDE:
+ // Emit stat information.
+ ::Emit32(Out, (uint32_t) StatBuf->st_ino);
+ ::Emit32(Out, (uint32_t) StatBuf->st_dev);
+ ::Emit16(Out, (uint16_t) StatBuf->st_mode);
+ ::Emit64(Out, (uint64_t) StatBuf->st_mtime);
+ ::Emit64(Out, (uint64_t) StatBuf->st_size);
+ delete StatBuf;
+ break;
+ default:
+ break;
+ }
+ }
+
+ unsigned getRepresentationLength() const {
+ return Kind == IsNoExist ? 0 : 4 + 4 + 2 + 8 + 8;
+ }
+};
+
+class FileEntryPTHEntryInfo {
+public:
+ typedef PTHEntryKeyVariant key_type;
+ typedef key_type key_type_ref;
+
+ typedef PTHEntry data_type;
+ typedef const PTHEntry& data_type_ref;
+
+ static unsigned ComputeHash(PTHEntryKeyVariant V) {
+ return llvm::HashString(V.getString());
+ }
+
+ static std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, PTHEntryKeyVariant V,
+ const PTHEntry& E) {
+
+ unsigned n = V.getString().size() + 1 + 1;
+ ::Emit16(Out, n);
+
+ unsigned m = V.getRepresentationLength() + (V.isFile() ? 4 + 4 : 0);
+ ::Emit8(Out, m);
+
+ return std::make_pair(n, m);
+ }
+
+ static void EmitKey(raw_ostream& Out, PTHEntryKeyVariant V, unsigned n){
+ // Emit the entry kind.
+ ::Emit8(Out, (unsigned) V.getKind());
+ // Emit the string.
+ Out.write(V.getString().data(), n - 1);
+ }
+
+ static void EmitData(raw_ostream& Out, PTHEntryKeyVariant V,
+ const PTHEntry& E, unsigned) {
+
+
+ // For file entries emit the offsets into the PTH file for token data
+ // and the preprocessor blocks table.
+ if (V.isFile()) {
+ ::Emit32(Out, E.getTokenOffset());
+ ::Emit32(Out, E.getPPCondTableOffset());
+ }
+
+ // Emit any other data associated with the key (i.e., stat information).
+ V.EmitData(Out);
+ }
+};
+
+class OffsetOpt {
+ bool valid;
+ Offset off;
+public:
+ OffsetOpt() : valid(false) {}
+ bool hasOffset() const { return valid; }
+ Offset getOffset() const { assert(valid); return off; }
+ void setOffset(Offset o) { off = o; valid = true; }
+};
+} // end anonymous namespace
+
+typedef OnDiskChainedHashTableGenerator<FileEntryPTHEntryInfo> PTHMap;
+
+namespace {
+class PTHWriter {
+ typedef llvm::DenseMap<const IdentifierInfo*,uint32_t> IDMap;
+ typedef llvm::StringMap<OffsetOpt, llvm::BumpPtrAllocator> CachedStrsTy;
+
+ IDMap IM;
+ llvm::raw_fd_ostream& Out;
+ Preprocessor& PP;
+ uint32_t idcount;
+ PTHMap PM;
+ CachedStrsTy CachedStrs;
+ Offset CurStrOffset;
+ std::vector<llvm::StringMapEntry<OffsetOpt>*> StrEntries;
+
+ //// Get the persistent id for the given IdentifierInfo*.
+ uint32_t ResolveID(const IdentifierInfo* II);
+
+ /// Emit a token to the PTH file.
+ void EmitToken(const Token& T);
+
+ void Emit8(uint32_t V) { ::Emit8(Out, V); }
+
+ void Emit16(uint32_t V) { ::Emit16(Out, V); }
+
+ void Emit32(uint32_t V) { ::Emit32(Out, V); }
+
+ void EmitBuf(const char *Ptr, unsigned NumBytes) {
+ Out.write(Ptr, NumBytes);
+ }
+
+ void EmitString(StringRef V) {
+ ::Emit16(Out, V.size());
+ EmitBuf(V.data(), V.size());
+ }
+
+ /// EmitIdentifierTable - Emits two tables to the PTH file. The first is
+ /// a hashtable mapping from identifier strings to persistent IDs.
+ /// The second is a straight table mapping from persistent IDs to string data
+ /// (the keys of the first table).
+ std::pair<Offset, Offset> EmitIdentifierTable();
+
+ /// EmitFileTable - Emit a table mapping from file name strings to PTH
+ /// token data.
+ Offset EmitFileTable() { return PM.Emit(Out); }
+
+ PTHEntry LexTokens(Lexer& L);
+ Offset EmitCachedSpellings();
+
+public:
+ PTHWriter(llvm::raw_fd_ostream& out, Preprocessor& pp)
+ : Out(out), PP(pp), idcount(0), CurStrOffset(0) {}
+
+ PTHMap &getPM() { return PM; }
+ void GeneratePTH(const std::string &MainFile);
+};
+} // end anonymous namespace
+
+uint32_t PTHWriter::ResolveID(const IdentifierInfo* II) {
+ // Null IdentifierInfo's map to the persistent ID 0.
+ if (!II)
+ return 0;
+
+ IDMap::iterator I = IM.find(II);
+ if (I != IM.end())
+ return I->second; // We've already added 1.
+
+ IM[II] = ++idcount; // Pre-increment since '0' is reserved for NULL.
+ return idcount;
+}
+
+void PTHWriter::EmitToken(const Token& T) {
+ // Emit the token kind, flags, and length.
+ Emit32(((uint32_t) T.getKind()) | ((((uint32_t) T.getFlags())) << 8)|
+ (((uint32_t) T.getLength()) << 16));
+
+ if (!T.isLiteral()) {
+ Emit32(ResolveID(T.getIdentifierInfo()));
+ } else {
+ // We cache *un-cleaned* spellings. This gives us 100% fidelity with the
+ // source code.
+ StringRef s(T.getLiteralData(), T.getLength());
+
+ // Get the string entry.
+ llvm::StringMapEntry<OffsetOpt> *E = &CachedStrs.GetOrCreateValue(s);
+
+ // If this is a new string entry, bump the PTH offset.
+ if (!E->getValue().hasOffset()) {
+ E->getValue().setOffset(CurStrOffset);
+ StrEntries.push_back(E);
+ CurStrOffset += s.size() + 1;
+ }
+
+ // Emit the relative offset into the PTH file for the spelling string.
+ Emit32(E->getValue().getOffset());
+ }
+
+ // Emit the offset into the original source file of this token so that we
+ // can reconstruct its SourceLocation.
+ Emit32(PP.getSourceManager().getFileOffset(T.getLocation()));
+}
+
+PTHEntry PTHWriter::LexTokens(Lexer& L) {
+ // Pad 0's so that we emit tokens to a 4-byte alignment.
+ // This speed up reading them back in.
+ Pad(Out, 4);
+ Offset TokenOff = (Offset) Out.tell();
+
+ // Keep track of matching '#if' ... '#endif'.
+ typedef std::vector<std::pair<Offset, unsigned> > PPCondTable;
+ PPCondTable PPCond;
+ std::vector<unsigned> PPStartCond;
+ bool ParsingPreprocessorDirective = false;
+ Token Tok;
+
+ do {
+ L.LexFromRawLexer(Tok);
+ NextToken:
+
+ if ((Tok.isAtStartOfLine() || Tok.is(tok::eof)) &&
+ ParsingPreprocessorDirective) {
+ // Insert an eod token into the token cache. It has the same
+ // position as the next token that is not on the same line as the
+ // preprocessor directive. Observe that we continue processing
+ // 'Tok' when we exit this branch.
+ Token Tmp = Tok;
+ Tmp.setKind(tok::eod);
+ Tmp.clearFlag(Token::StartOfLine);
+ Tmp.setIdentifierInfo(0);
+ EmitToken(Tmp);
+ ParsingPreprocessorDirective = false;
+ }
+
+ if (Tok.is(tok::raw_identifier)) {
+ PP.LookUpIdentifierInfo(Tok);
+ EmitToken(Tok);
+ continue;
+ }
+
+ if (Tok.is(tok::hash) && Tok.isAtStartOfLine()) {
+ // Special processing for #include. Store the '#' token and lex
+ // the next token.
+ assert(!ParsingPreprocessorDirective);
+ Offset HashOff = (Offset) Out.tell();
+
+ // Get the next token.
+ Token NextTok;
+ L.LexFromRawLexer(NextTok);
+
+ // If we see the start of line, then we had a null directive "#". In
+ // this case, discard both tokens.
+ if (NextTok.isAtStartOfLine())
+ goto NextToken;
+
+ // The token is the start of a directive. Emit it.
+ EmitToken(Tok);
+ Tok = NextTok;
+
+ // Did we see 'include'/'import'/'include_next'?
+ if (Tok.isNot(tok::raw_identifier)) {
+ EmitToken(Tok);
+ continue;
+ }
+
+ IdentifierInfo* II = PP.LookUpIdentifierInfo(Tok);
+ tok::PPKeywordKind K = II->getPPKeywordID();
+
+ ParsingPreprocessorDirective = true;
+
+ switch (K) {
+ case tok::pp_not_keyword:
+ // Invalid directives "#foo" can occur in #if 0 blocks etc, just pass
+ // them through.
+ default:
+ break;
+
+ case tok::pp_include:
+ case tok::pp_import:
+ case tok::pp_include_next: {
+ // Save the 'include' token.
+ EmitToken(Tok);
+ // Lex the next token as an include string.
+ L.setParsingPreprocessorDirective(true);
+ L.LexIncludeFilename(Tok);
+ L.setParsingPreprocessorDirective(false);
+ assert(!Tok.isAtStartOfLine());
+ if (Tok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(Tok);
+
+ break;
+ }
+ case tok::pp_if:
+ case tok::pp_ifdef:
+ case tok::pp_ifndef: {
+ // Add an entry for '#if' and friends. We initially set the target
+ // index to 0. This will get backpatched when we hit #endif.
+ PPStartCond.push_back(PPCond.size());
+ PPCond.push_back(std::make_pair(HashOff, 0U));
+ break;
+ }
+ case tok::pp_endif: {
+ // Add an entry for '#endif'. We set the target table index to itself.
+ // This will later be set to zero when emitting to the PTH file. We
+ // use 0 for uninitialized indices because that is easier to debug.
+ unsigned index = PPCond.size();
+ // Backpatch the opening '#if' entry.
+ assert(!PPStartCond.empty());
+ assert(PPCond.size() > PPStartCond.back());
+ assert(PPCond[PPStartCond.back()].second == 0);
+ PPCond[PPStartCond.back()].second = index;
+ PPStartCond.pop_back();
+ // Add the new entry to PPCond.
+ PPCond.push_back(std::make_pair(HashOff, index));
+ EmitToken(Tok);
+
+ // Some files have gibberish on the same line as '#endif'.
+ // Discard these tokens.
+ do
+ L.LexFromRawLexer(Tok);
+ while (Tok.isNot(tok::eof) && !Tok.isAtStartOfLine());
+ // We have the next token in hand.
+ // Don't immediately lex the next one.
+ goto NextToken;
+ }
+ case tok::pp_elif:
+ case tok::pp_else: {
+ // Add an entry for #elif or #else.
+ // This serves as both a closing and opening of a conditional block.
+ // This means that its entry will get backpatched later.
+ unsigned index = PPCond.size();
+ // Backpatch the previous '#if' entry.
+ assert(!PPStartCond.empty());
+ assert(PPCond.size() > PPStartCond.back());
+ assert(PPCond[PPStartCond.back()].second == 0);
+ PPCond[PPStartCond.back()].second = index;
+ PPStartCond.pop_back();
+ // Now add '#elif' as a new block opening.
+ PPCond.push_back(std::make_pair(HashOff, 0U));
+ PPStartCond.push_back(index);
+ break;
+ }
+ }
+ }
+
+ EmitToken(Tok);
+ }
+ while (Tok.isNot(tok::eof));
+
+ assert(PPStartCond.empty() && "Error: imblanced preprocessor conditionals.");
+
+ // Next write out PPCond.
+ Offset PPCondOff = (Offset) Out.tell();
+
+ // Write out the size of PPCond so that clients can identifer empty tables.
+ Emit32(PPCond.size());
+
+ for (unsigned i = 0, e = PPCond.size(); i!=e; ++i) {
+ Emit32(PPCond[i].first - TokenOff);
+ uint32_t x = PPCond[i].second;
+ assert(x != 0 && "PPCond entry not backpatched.");
+ // Emit zero for #endifs. This allows us to do checking when
+ // we read the PTH file back in.
+ Emit32(x == i ? 0 : x);
+ }
+
+ return PTHEntry(TokenOff, PPCondOff);
+}
+
+Offset PTHWriter::EmitCachedSpellings() {
+ // Write each cached strings to the PTH file.
+ Offset SpellingsOff = Out.tell();
+
+ for (std::vector<llvm::StringMapEntry<OffsetOpt>*>::iterator
+ I = StrEntries.begin(), E = StrEntries.end(); I!=E; ++I)
+ EmitBuf((*I)->getKeyData(), (*I)->getKeyLength()+1 /*nul included*/);
+
+ return SpellingsOff;
+}
+
+void PTHWriter::GeneratePTH(const std::string &MainFile) {
+ // Generate the prologue.
+ Out << "cfe-pth";
+ Emit32(PTHManager::Version);
+
+ // Leave 4 words for the prologue.
+ Offset PrologueOffset = Out.tell();
+ for (unsigned i = 0; i < 4; ++i)
+ Emit32(0);
+
+ // Write the name of the MainFile.
+ if (!MainFile.empty()) {
+ EmitString(MainFile);
+ } else {
+ // String with 0 bytes.
+ Emit16(0);
+ }
+ Emit8(0);
+
+ // Iterate over all the files in SourceManager. Create a lexer
+ // for each file and cache the tokens.
+ SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LOpts = PP.getLangOpts();
+
+ for (SourceManager::fileinfo_iterator I = SM.fileinfo_begin(),
+ E = SM.fileinfo_end(); I != E; ++I) {
+ const SrcMgr::ContentCache &C = *I->second;
+ const FileEntry *FE = C.OrigEntry;
+
+ // FIXME: Handle files with non-absolute paths.
+ if (llvm::sys::path::is_relative(FE->getName()))
+ continue;
+
+ const llvm::MemoryBuffer *B = C.getBuffer(PP.getDiagnostics(), SM);
+ if (!B) continue;
+
+ FileID FID = SM.createFileID(FE, SourceLocation(), SrcMgr::C_User);
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer L(FID, FromFile, SM, LOpts);
+ PM.insert(FE, LexTokens(L));
+ }
+
+ // Write out the identifier table.
+ const std::pair<Offset,Offset> &IdTableOff = EmitIdentifierTable();
+
+ // Write out the cached strings table.
+ Offset SpellingOff = EmitCachedSpellings();
+
+ // Write out the file table.
+ Offset FileTableOff = EmitFileTable();
+
+ // Finally, write the prologue.
+ Out.seek(PrologueOffset);
+ Emit32(IdTableOff.first);
+ Emit32(IdTableOff.second);
+ Emit32(FileTableOff);
+ Emit32(SpellingOff);
+}
+
+namespace {
+/// StatListener - A simple "interpose" object used to monitor stat calls
+/// invoked by FileManager while processing the original sources used
+/// as input to PTH generation. StatListener populates the PTHWriter's
+/// file map with stat information for directories as well as negative stats.
+/// Stat information for files are populated elsewhere.
+class StatListener : public FileSystemStatCache {
+ PTHMap &PM;
+public:
+ StatListener(PTHMap &pm) : PM(pm) {}
+ ~StatListener() {}
+
+ LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ LookupResult Result = statChained(Path, StatBuf, FileDescriptor);
+
+ if (Result == CacheMissing) // Failed 'stat'.
+ PM.insert(PTHEntryKeyVariant(Path), PTHEntry());
+ else if (S_ISDIR(StatBuf.st_mode)) {
+ // Only cache directories with absolute paths.
+ if (llvm::sys::path::is_relative(Path))
+ return Result;
+
+ PM.insert(PTHEntryKeyVariant(&StatBuf, Path), PTHEntry());
+ }
+
+ return Result;
+ }
+};
+} // end anonymous namespace
+
+
+void clang::CacheTokens(Preprocessor &PP, llvm::raw_fd_ostream* OS) {
+ // Get the name of the main file.
+ const SourceManager &SrcMgr = PP.getSourceManager();
+ const FileEntry *MainFile = SrcMgr.getFileEntryForID(SrcMgr.getMainFileID());
+ SmallString<128> MainFilePath(MainFile->getName());
+
+ llvm::sys::fs::make_absolute(MainFilePath);
+
+ // Create the PTHWriter.
+ PTHWriter PW(*OS, PP);
+
+ // Install the 'stat' system call listener in the FileManager.
+ StatListener *StatCache = new StatListener(PW.getPM());
+ PP.getFileManager().addStatCache(StatCache, /*AtBeginning=*/true);
+
+ // Lex through the entire file. This will populate SourceManager with
+ // all of the header information.
+ Token Tok;
+ PP.EnterMainSourceFile();
+ do { PP.Lex(Tok); } while (Tok.isNot(tok::eof));
+
+ // Generate the PTH file.
+ PP.getFileManager().removeStatCache(StatCache);
+ PW.GeneratePTH(MainFilePath.str());
+}
+
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PTHIdKey {
+public:
+ const IdentifierInfo* II;
+ uint32_t FileOffset;
+};
+
+class PTHIdentifierTableTrait {
+public:
+ typedef PTHIdKey* key_type;
+ typedef key_type key_type_ref;
+
+ typedef uint32_t data_type;
+ typedef data_type data_type_ref;
+
+ static unsigned ComputeHash(PTHIdKey* key) {
+ return llvm::HashString(key->II->getName());
+ }
+
+ static std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, const PTHIdKey* key, uint32_t) {
+ unsigned n = key->II->getLength() + 1;
+ ::Emit16(Out, n);
+ return std::make_pair(n, sizeof(uint32_t));
+ }
+
+ static void EmitKey(raw_ostream& Out, PTHIdKey* key, unsigned n) {
+ // Record the location of the key data. This is used when generating
+ // the mapping from persistent IDs to strings.
+ key->FileOffset = Out.tell();
+ Out.write(key->II->getNameStart(), n);
+ }
+
+ static void EmitData(raw_ostream& Out, PTHIdKey*, uint32_t pID,
+ unsigned) {
+ ::Emit32(Out, pID);
+ }
+};
+} // end anonymous namespace
+
+/// EmitIdentifierTable - Emits two tables to the PTH file. The first is
+/// a hashtable mapping from identifier strings to persistent IDs. The second
+/// is a straight table mapping from persistent IDs to string data (the
+/// keys of the first table).
+///
+std::pair<Offset,Offset> PTHWriter::EmitIdentifierTable() {
+ // Build two maps:
+ // (1) an inverse map from persistent IDs -> (IdentifierInfo*,Offset)
+ // (2) a map from (IdentifierInfo*, Offset)* -> persistent IDs
+
+ // Note that we use 'calloc', so all the bytes are 0.
+ PTHIdKey *IIDMap = (PTHIdKey*)calloc(idcount, sizeof(PTHIdKey));
+
+ // Create the hashtable.
+ OnDiskChainedHashTableGenerator<PTHIdentifierTableTrait> IIOffMap;
+
+ // Generate mapping from persistent IDs -> IdentifierInfo*.
+ for (IDMap::iterator I = IM.begin(), E = IM.end(); I != E; ++I) {
+ // Decrement by 1 because we are using a vector for the lookup and
+ // 0 is reserved for NULL.
+ assert(I->second > 0);
+ assert(I->second-1 < idcount);
+ unsigned idx = I->second-1;
+
+ // Store the mapping from persistent ID to IdentifierInfo*
+ IIDMap[idx].II = I->first;
+
+ // Store the reverse mapping in a hashtable.
+ IIOffMap.insert(&IIDMap[idx], I->second);
+ }
+
+ // Write out the inverse map first. This causes the PCIDKey entries to
+ // record PTH file offsets for the string data. This is used to write
+ // the second table.
+ Offset StringTableOffset = IIOffMap.Emit(Out);
+
+ // Now emit the table mapping from persistent IDs to PTH file offsets.
+ Offset IDOff = Out.tell();
+ Emit32(idcount); // Emit the number of identifiers.
+ for (unsigned i = 0 ; i < idcount; ++i)
+ Emit32(IIDMap[i].FileOffset);
+
+ // Finally, release the inverse map.
+ free(IIDMap);
+
+ return std::make_pair(IDOff, StringTableOffset);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
new file mode 100644
index 0000000..c1d3db8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedDiagnosticConsumer.cpp
@@ -0,0 +1,14 @@
+//===- ChainedDiagnosticConsumer.cpp - Chain Diagnostic Clients -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ChainedDiagnosticConsumer.h"
+
+using namespace clang;
+
+void ChainedDiagnosticConsumer::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
new file mode 100644
index 0000000..dbb06bd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/ChainedIncludesSource.cpp
@@ -0,0 +1,240 @@
+//===- ChainedIncludesSource.cpp - Chained PCHs in Memory -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ChainedIncludesSource class, which converts headers
+// to chained PCHs in memory, mainly used for testing.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/ChainedIncludesSource.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTWriter.h"
+#include "clang/Parse/ParseAST.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang;
+
+static ASTReader *createASTReader(CompilerInstance &CI,
+ StringRef pchFile,
+ SmallVector<llvm::MemoryBuffer *, 4> &memBufs,
+ SmallVector<std::string, 4> &bufNames,
+ ASTDeserializationListener *deserialListener = 0) {
+ Preprocessor &PP = CI.getPreprocessor();
+ OwningPtr<ASTReader> Reader;
+ Reader.reset(new ASTReader(PP, CI.getASTContext(), /*isysroot=*/"",
+ /*DisableValidation=*/true));
+ for (unsigned ti = 0; ti < bufNames.size(); ++ti) {
+ StringRef sr(bufNames[ti]);
+ Reader->addInMemoryBuffer(sr, memBufs[ti]);
+ }
+ Reader->setDeserializationListener(deserialListener);
+ switch (Reader->ReadAST(pchFile, serialization::MK_PCH)) {
+ case ASTReader::Success:
+ // Set the predefines buffer as suggested by the PCH reader.
+ PP.setPredefines(Reader->getSuggestedPredefines());
+ return Reader.take();
+
+ case ASTReader::Failure:
+ case ASTReader::IgnorePCH:
+ break;
+ }
+ return 0;
+}
+
+ChainedIncludesSource::~ChainedIncludesSource() {
+ for (unsigned i = 0, e = CIs.size(); i != e; ++i)
+ delete CIs[i];
+}
+
+ChainedIncludesSource *ChainedIncludesSource::create(CompilerInstance &CI) {
+
+ std::vector<std::string> &includes = CI.getPreprocessorOpts().ChainedIncludes;
+ assert(!includes.empty() && "No '-chain-include' in options!");
+
+ OwningPtr<ChainedIncludesSource> source(new ChainedIncludesSource());
+ InputKind IK = CI.getFrontendOpts().Inputs[0].Kind;
+
+ SmallVector<llvm::MemoryBuffer *, 4> serialBufs;
+ SmallVector<std::string, 4> serialBufNames;
+
+ for (unsigned i = 0, e = includes.size(); i != e; ++i) {
+ bool firstInclude = (i == 0);
+ OwningPtr<CompilerInvocation> CInvok;
+ CInvok.reset(new CompilerInvocation(CI.getInvocation()));
+
+ CInvok->getPreprocessorOpts().ChainedIncludes.clear();
+ CInvok->getPreprocessorOpts().ImplicitPCHInclude.clear();
+ CInvok->getPreprocessorOpts().ImplicitPTHInclude.clear();
+ CInvok->getPreprocessorOpts().DisablePCHValidation = true;
+ CInvok->getPreprocessorOpts().Includes.clear();
+ CInvok->getPreprocessorOpts().MacroIncludes.clear();
+ CInvok->getPreprocessorOpts().Macros.clear();
+
+ CInvok->getFrontendOpts().Inputs.clear();
+ CInvok->getFrontendOpts().Inputs.push_back(FrontendInputFile(includes[i],
+ IK));
+
+ TextDiagnosticPrinter *DiagClient =
+ new TextDiagnosticPrinter(llvm::errs(), DiagnosticOptions());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(
+ new DiagnosticsEngine(DiagID, DiagClient));
+
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+ Clang->setInvocation(CInvok.take());
+ Clang->setDiagnostics(Diags.getPtr());
+ Clang->setTarget(TargetInfo::CreateTargetInfo(Clang->getDiagnostics(),
+ Clang->getTargetOpts()));
+ Clang->createFileManager();
+ Clang->createSourceManager(Clang->getFileManager());
+ Clang->createPreprocessor();
+ Clang->getDiagnosticClient().BeginSourceFile(Clang->getLangOpts(),
+ &Clang->getPreprocessor());
+ Clang->createASTContext();
+
+ SmallVector<char, 256> serialAST;
+ llvm::raw_svector_ostream OS(serialAST);
+ OwningPtr<ASTConsumer> consumer;
+ consumer.reset(new PCHGenerator(Clang->getPreprocessor(), "-", 0,
+ /*isysroot=*/"", &OS));
+ Clang->getASTContext().setASTMutationListener(
+ consumer->GetASTMutationListener());
+ Clang->setASTConsumer(consumer.take());
+ Clang->createSema(TU_Prefix, 0);
+
+ if (firstInclude) {
+ Preprocessor &PP = Clang->getPreprocessor();
+ PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
+ PP.getLangOpts());
+ } else {
+ assert(!serialBufs.empty());
+ SmallVector<llvm::MemoryBuffer *, 4> bufs;
+ for (unsigned si = 0, se = serialBufs.size(); si != se; ++si) {
+ bufs.push_back(llvm::MemoryBuffer::getMemBufferCopy(
+ StringRef(serialBufs[si]->getBufferStart(),
+ serialBufs[si]->getBufferSize())));
+ }
+ std::string pchName = includes[i-1];
+ llvm::raw_string_ostream os(pchName);
+ os << ".pch" << i-1;
+ os.flush();
+
+ serialBufNames.push_back(pchName);
+
+ OwningPtr<ExternalASTSource> Reader;
+
+ Reader.reset(createASTReader(*Clang, pchName, bufs, serialBufNames,
+ Clang->getASTConsumer().GetASTDeserializationListener()));
+ if (!Reader)
+ return 0;
+ Clang->getASTContext().setExternalSource(Reader);
+ }
+
+ if (!Clang->InitializeSourceManager(includes[i]))
+ return 0;
+
+ ParseAST(Clang->getSema());
+ OS.flush();
+ Clang->getDiagnosticClient().EndSourceFile();
+ serialBufs.push_back(
+ llvm::MemoryBuffer::getMemBufferCopy(StringRef(serialAST.data(),
+ serialAST.size())));
+ source->CIs.push_back(Clang.take());
+ }
+
+ assert(!serialBufs.empty());
+ std::string pchName = includes.back() + ".pch-final";
+ serialBufNames.push_back(pchName);
+ OwningPtr<ASTReader> Reader;
+ Reader.reset(createASTReader(CI, pchName, serialBufs, serialBufNames));
+ if (!Reader)
+ return 0;
+
+ source->FinalReader.reset(Reader.take());
+ return source.take();
+}
+
+//===----------------------------------------------------------------------===//
+// ExternalASTSource interface.
+//===----------------------------------------------------------------------===//
+
+Decl *ChainedIncludesSource::GetExternalDecl(uint32_t ID) {
+ return getFinalReader().GetExternalDecl(ID);
+}
+Selector ChainedIncludesSource::GetExternalSelector(uint32_t ID) {
+ return getFinalReader().GetExternalSelector(ID);
+}
+uint32_t ChainedIncludesSource::GetNumExternalSelectors() {
+ return getFinalReader().GetNumExternalSelectors();
+}
+Stmt *ChainedIncludesSource::GetExternalDeclStmt(uint64_t Offset) {
+ return getFinalReader().GetExternalDeclStmt(Offset);
+}
+CXXBaseSpecifier *
+ChainedIncludesSource::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
+ return getFinalReader().GetExternalCXXBaseSpecifiers(Offset);
+}
+DeclContextLookupResult
+ChainedIncludesSource::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+ return getFinalReader().FindExternalVisibleDeclsByName(DC, Name);
+}
+ExternalLoadResult
+ChainedIncludesSource::FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Result) {
+ return getFinalReader().FindExternalLexicalDecls(DC, isKindWeWant, Result);
+}
+void ChainedIncludesSource::CompleteType(TagDecl *Tag) {
+ return getFinalReader().CompleteType(Tag);
+}
+void ChainedIncludesSource::CompleteType(ObjCInterfaceDecl *Class) {
+ return getFinalReader().CompleteType(Class);
+}
+void ChainedIncludesSource::StartedDeserializing() {
+ return getFinalReader().StartedDeserializing();
+}
+void ChainedIncludesSource::FinishedDeserializing() {
+ return getFinalReader().FinishedDeserializing();
+}
+void ChainedIncludesSource::StartTranslationUnit(ASTConsumer *Consumer) {
+ return getFinalReader().StartTranslationUnit(Consumer);
+}
+void ChainedIncludesSource::PrintStats() {
+ return getFinalReader().PrintStats();
+}
+void ChainedIncludesSource::getMemoryBufferSizes(MemoryBufferSizes &sizes)const{
+ for (unsigned i = 0, e = CIs.size(); i != e; ++i) {
+ if (const ExternalASTSource *eSrc =
+ CIs[i]->getASTContext().getExternalSource()) {
+ eSrc->getMemoryBufferSizes(sizes);
+ }
+ }
+
+ getFinalReader().getMemoryBufferSizes(sizes);
+}
+
+void ChainedIncludesSource::InitializeSema(Sema &S) {
+ return getFinalReader().InitializeSema(S);
+}
+void ChainedIncludesSource::ForgetSema() {
+ return getFinalReader().ForgetSema();
+}
+void ChainedIncludesSource::ReadMethodPool(Selector Sel) {
+ getFinalReader().ReadMethodPool(Sel);
+}
+bool ChainedIncludesSource::LookupUnqualified(LookupResult &R, Scope *S) {
+ return getFinalReader().LookupUnqualified(R, S);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
new file mode 100644
index 0000000..cab6b90
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInstance.cpp
@@ -0,0 +1,1097 @@
+//===--- CompilerInstance.cpp ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Sema/Sema.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Version.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Frontend/ChainedDiagnosticConsumer.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/LogDiagnosticPrinter.h"
+#include "clang/Frontend/SerializedDiagnosticPrinter.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Frontend/VerifyDiagnosticConsumer.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/LockFileManager.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Config/config.h"
+
+using namespace clang;
+
+CompilerInstance::CompilerInstance()
+ : Invocation(new CompilerInvocation()), ModuleManager(0) {
+}
+
+CompilerInstance::~CompilerInstance() {
+}
+
+void CompilerInstance::setInvocation(CompilerInvocation *Value) {
+ Invocation = Value;
+}
+
+void CompilerInstance::setDiagnostics(DiagnosticsEngine *Value) {
+ Diagnostics = Value;
+}
+
+void CompilerInstance::setTarget(TargetInfo *Value) {
+ Target = Value;
+}
+
+void CompilerInstance::setFileManager(FileManager *Value) {
+ FileMgr = Value;
+}
+
+void CompilerInstance::setSourceManager(SourceManager *Value) {
+ SourceMgr = Value;
+}
+
+void CompilerInstance::setPreprocessor(Preprocessor *Value) { PP = Value; }
+
+void CompilerInstance::setASTContext(ASTContext *Value) { Context = Value; }
+
+void CompilerInstance::setSema(Sema *S) {
+ TheSema.reset(S);
+}
+
+void CompilerInstance::setASTConsumer(ASTConsumer *Value) {
+ Consumer.reset(Value);
+}
+
+void CompilerInstance::setCodeCompletionConsumer(CodeCompleteConsumer *Value) {
+ CompletionConsumer.reset(Value);
+ getFrontendOpts().SkipFunctionBodies = Value != 0;
+}
+
+// Diagnostics
+static void SetUpBuildDumpLog(const DiagnosticOptions &DiagOpts,
+ unsigned argc, const char* const *argv,
+ DiagnosticsEngine &Diags) {
+ std::string ErrorInfo;
+ OwningPtr<raw_ostream> OS(
+ new llvm::raw_fd_ostream(DiagOpts.DumpBuildInformation.c_str(), ErrorInfo));
+ if (!ErrorInfo.empty()) {
+ Diags.Report(diag::err_fe_unable_to_open_logfile)
+ << DiagOpts.DumpBuildInformation << ErrorInfo;
+ return;
+ }
+
+ (*OS) << "clang -cc1 command line arguments: ";
+ for (unsigned i = 0; i != argc; ++i)
+ (*OS) << argv[i] << ' ';
+ (*OS) << '\n';
+
+ // Chain in a diagnostic client which will log the diagnostics.
+ DiagnosticConsumer *Logger =
+ new TextDiagnosticPrinter(*OS.take(), DiagOpts, /*OwnsOutputStream=*/true);
+ Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(), Logger));
+}
+
+static void SetUpDiagnosticLog(const DiagnosticOptions &DiagOpts,
+ const CodeGenOptions *CodeGenOpts,
+ DiagnosticsEngine &Diags) {
+ std::string ErrorInfo;
+ bool OwnsStream = false;
+ raw_ostream *OS = &llvm::errs();
+ if (DiagOpts.DiagnosticLogFile != "-") {
+ // Create the output stream.
+ llvm::raw_fd_ostream *FileOS(
+ new llvm::raw_fd_ostream(DiagOpts.DiagnosticLogFile.c_str(),
+ ErrorInfo, llvm::raw_fd_ostream::F_Append));
+ if (!ErrorInfo.empty()) {
+ Diags.Report(diag::warn_fe_cc_log_diagnostics_failure)
+ << DiagOpts.DumpBuildInformation << ErrorInfo;
+ } else {
+ FileOS->SetUnbuffered();
+ FileOS->SetUseAtomicWrites(true);
+ OS = FileOS;
+ OwnsStream = true;
+ }
+ }
+
+ // Chain in the diagnostic client which will log the diagnostics.
+ LogDiagnosticPrinter *Logger = new LogDiagnosticPrinter(*OS, DiagOpts,
+ OwnsStream);
+ if (CodeGenOpts)
+ Logger->setDwarfDebugFlags(CodeGenOpts->DwarfDebugFlags);
+ Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(), Logger));
+}
+
+static void SetupSerializedDiagnostics(const DiagnosticOptions &DiagOpts,
+ DiagnosticsEngine &Diags,
+ StringRef OutputFile) {
+ std::string ErrorInfo;
+ OwningPtr<llvm::raw_fd_ostream> OS;
+ OS.reset(new llvm::raw_fd_ostream(OutputFile.str().c_str(), ErrorInfo,
+ llvm::raw_fd_ostream::F_Binary));
+
+ if (!ErrorInfo.empty()) {
+ Diags.Report(diag::warn_fe_serialized_diag_failure)
+ << OutputFile << ErrorInfo;
+ return;
+ }
+
+ DiagnosticConsumer *SerializedConsumer =
+ clang::serialized_diags::create(OS.take(), DiagOpts);
+
+
+ Diags.setClient(new ChainedDiagnosticConsumer(Diags.takeClient(),
+ SerializedConsumer));
+}
+
+void CompilerInstance::createDiagnostics(int Argc, const char* const *Argv,
+ DiagnosticConsumer *Client,
+ bool ShouldOwnClient,
+ bool ShouldCloneClient) {
+ Diagnostics = createDiagnostics(getDiagnosticOpts(), Argc, Argv, Client,
+ ShouldOwnClient, ShouldCloneClient,
+ &getCodeGenOpts());
+}
+
+IntrusiveRefCntPtr<DiagnosticsEngine>
+CompilerInstance::createDiagnostics(const DiagnosticOptions &Opts,
+ int Argc, const char* const *Argv,
+ DiagnosticConsumer *Client,
+ bool ShouldOwnClient,
+ bool ShouldCloneClient,
+ const CodeGenOptions *CodeGenOpts) {
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ IntrusiveRefCntPtr<DiagnosticsEngine>
+ Diags(new DiagnosticsEngine(DiagID));
+
+ // Create the diagnostic client for reporting errors or for
+ // implementing -verify.
+ if (Client) {
+ if (ShouldCloneClient)
+ Diags->setClient(Client->clone(*Diags), ShouldOwnClient);
+ else
+ Diags->setClient(Client, ShouldOwnClient);
+ } else
+ Diags->setClient(new TextDiagnosticPrinter(llvm::errs(), Opts));
+
+ // Chain in -verify checker, if requested.
+ if (Opts.VerifyDiagnostics)
+ Diags->setClient(new VerifyDiagnosticConsumer(*Diags));
+
+ // Chain in -diagnostic-log-file dumper, if requested.
+ if (!Opts.DiagnosticLogFile.empty())
+ SetUpDiagnosticLog(Opts, CodeGenOpts, *Diags);
+
+ if (!Opts.DumpBuildInformation.empty())
+ SetUpBuildDumpLog(Opts, Argc, Argv, *Diags);
+
+ if (!Opts.DiagnosticSerializationFile.empty())
+ SetupSerializedDiagnostics(Opts, *Diags,
+ Opts.DiagnosticSerializationFile);
+
+ // Configure our handling of diagnostics.
+ ProcessWarningOptions(*Diags, Opts);
+
+ return Diags;
+}
+
+// File Manager
+
+void CompilerInstance::createFileManager() {
+ FileMgr = new FileManager(getFileSystemOpts());
+}
+
+// Source Manager
+
+void CompilerInstance::createSourceManager(FileManager &FileMgr) {
+ SourceMgr = new SourceManager(getDiagnostics(), FileMgr);
+}
+
+// Preprocessor
+
+void CompilerInstance::createPreprocessor() {
+ const PreprocessorOptions &PPOpts = getPreprocessorOpts();
+
+ // Create a PTH manager if we are using some form of a token cache.
+ PTHManager *PTHMgr = 0;
+ if (!PPOpts.TokenCache.empty())
+ PTHMgr = PTHManager::Create(PPOpts.TokenCache, getDiagnostics());
+
+ // Create the Preprocessor.
+ HeaderSearch *HeaderInfo = new HeaderSearch(getFileManager(),
+ getDiagnostics(),
+ getLangOpts(),
+ &getTarget());
+ PP = new Preprocessor(getDiagnostics(), getLangOpts(), &getTarget(),
+ getSourceManager(), *HeaderInfo, *this, PTHMgr,
+ /*OwnsHeaderSearch=*/true);
+
+ // Note that this is different then passing PTHMgr to Preprocessor's ctor.
+ // That argument is used as the IdentifierInfoLookup argument to
+ // IdentifierTable's ctor.
+ if (PTHMgr) {
+ PTHMgr->setPreprocessor(&*PP);
+ PP->setPTHManager(PTHMgr);
+ }
+
+ if (PPOpts.DetailedRecord)
+ PP->createPreprocessingRecord(PPOpts.DetailedRecordConditionalDirectives);
+
+ InitializePreprocessor(*PP, PPOpts, getHeaderSearchOpts(), getFrontendOpts());
+
+ // Set up the module path, including the hash for the
+ // module-creation options.
+ SmallString<256> SpecificModuleCache(
+ getHeaderSearchOpts().ModuleCachePath);
+ if (!getHeaderSearchOpts().DisableModuleHash)
+ llvm::sys::path::append(SpecificModuleCache,
+ getInvocation().getModuleHash());
+ PP->getHeaderSearchInfo().setModuleCachePath(SpecificModuleCache);
+
+ // Handle generating dependencies, if requested.
+ const DependencyOutputOptions &DepOpts = getDependencyOutputOpts();
+ if (!DepOpts.OutputFile.empty())
+ AttachDependencyFileGen(*PP, DepOpts);
+ if (!DepOpts.DOTOutputFile.empty())
+ AttachDependencyGraphGen(*PP, DepOpts.DOTOutputFile,
+ getHeaderSearchOpts().Sysroot);
+
+
+ // Handle generating header include information, if requested.
+ if (DepOpts.ShowHeaderIncludes)
+ AttachHeaderIncludeGen(*PP);
+ if (!DepOpts.HeaderIncludeOutputFile.empty()) {
+ StringRef OutputPath = DepOpts.HeaderIncludeOutputFile;
+ if (OutputPath == "-")
+ OutputPath = "";
+ AttachHeaderIncludeGen(*PP, /*ShowAllHeaders=*/true, OutputPath,
+ /*ShowDepth=*/false);
+ }
+}
+
+// ASTContext
+
+void CompilerInstance::createASTContext() {
+ Preprocessor &PP = getPreprocessor();
+ Context = new ASTContext(getLangOpts(), PP.getSourceManager(),
+ &getTarget(), PP.getIdentifierTable(),
+ PP.getSelectorTable(), PP.getBuiltinInfo(),
+ /*size_reserve=*/ 0);
+}
+
+// ExternalASTSource
+
+void CompilerInstance::createPCHExternalASTSource(StringRef Path,
+ bool DisablePCHValidation,
+ bool DisableStatCache,
+ bool AllowPCHWithCompilerErrors,
+ void *DeserializationListener){
+ OwningPtr<ExternalASTSource> Source;
+ bool Preamble = getPreprocessorOpts().PrecompiledPreambleBytes.first != 0;
+ Source.reset(createPCHExternalASTSource(Path, getHeaderSearchOpts().Sysroot,
+ DisablePCHValidation,
+ DisableStatCache,
+ AllowPCHWithCompilerErrors,
+ getPreprocessor(), getASTContext(),
+ DeserializationListener,
+ Preamble));
+ ModuleManager = static_cast<ASTReader*>(Source.get());
+ getASTContext().setExternalSource(Source);
+}
+
+ExternalASTSource *
+CompilerInstance::createPCHExternalASTSource(StringRef Path,
+ const std::string &Sysroot,
+ bool DisablePCHValidation,
+ bool DisableStatCache,
+ bool AllowPCHWithCompilerErrors,
+ Preprocessor &PP,
+ ASTContext &Context,
+ void *DeserializationListener,
+ bool Preamble) {
+ OwningPtr<ASTReader> Reader;
+ Reader.reset(new ASTReader(PP, Context,
+ Sysroot.empty() ? "" : Sysroot.c_str(),
+ DisablePCHValidation, DisableStatCache,
+ AllowPCHWithCompilerErrors));
+
+ Reader->setDeserializationListener(
+ static_cast<ASTDeserializationListener *>(DeserializationListener));
+ switch (Reader->ReadAST(Path,
+ Preamble ? serialization::MK_Preamble
+ : serialization::MK_PCH)) {
+ case ASTReader::Success:
+ // Set the predefines buffer as suggested by the PCH reader. Typically, the
+ // predefines buffer will be empty.
+ PP.setPredefines(Reader->getSuggestedPredefines());
+ return Reader.take();
+
+ case ASTReader::Failure:
+ // Unrecoverable failure: don't even try to process the input file.
+ break;
+
+ case ASTReader::IgnorePCH:
+ // No suitable PCH file could be found. Return an error.
+ break;
+ }
+
+ return 0;
+}
+
+// Code Completion
+
+static bool EnableCodeCompletion(Preprocessor &PP,
+ const std::string &Filename,
+ unsigned Line,
+ unsigned Column) {
+ // Tell the source manager to chop off the given file at a specific
+ // line and column.
+ const FileEntry *Entry = PP.getFileManager().getFile(Filename);
+ if (!Entry) {
+ PP.getDiagnostics().Report(diag::err_fe_invalid_code_complete_file)
+ << Filename;
+ return true;
+ }
+
+ // Truncate the named file at the given line/column.
+ PP.SetCodeCompletionPoint(Entry, Line, Column);
+ return false;
+}
+
+void CompilerInstance::createCodeCompletionConsumer() {
+ const ParsedSourceLocation &Loc = getFrontendOpts().CodeCompletionAt;
+ if (!CompletionConsumer) {
+ setCodeCompletionConsumer(
+ createCodeCompletionConsumer(getPreprocessor(),
+ Loc.FileName, Loc.Line, Loc.Column,
+ getFrontendOpts().ShowMacrosInCodeCompletion,
+ getFrontendOpts().ShowCodePatternsInCodeCompletion,
+ getFrontendOpts().ShowGlobalSymbolsInCodeCompletion,
+ llvm::outs()));
+ if (!CompletionConsumer)
+ return;
+ } else if (EnableCodeCompletion(getPreprocessor(), Loc.FileName,
+ Loc.Line, Loc.Column)) {
+ setCodeCompletionConsumer(0);
+ return;
+ }
+
+ if (CompletionConsumer->isOutputBinary() &&
+ llvm::sys::Program::ChangeStdoutToBinary()) {
+ getPreprocessor().getDiagnostics().Report(diag::err_fe_stdout_binary);
+ setCodeCompletionConsumer(0);
+ }
+}
+
+void CompilerInstance::createFrontendTimer() {
+ FrontendTimer.reset(new llvm::Timer("Clang front-end timer"));
+}
+
+CodeCompleteConsumer *
+CompilerInstance::createCodeCompletionConsumer(Preprocessor &PP,
+ const std::string &Filename,
+ unsigned Line,
+ unsigned Column,
+ bool ShowMacros,
+ bool ShowCodePatterns,
+ bool ShowGlobals,
+ raw_ostream &OS) {
+ if (EnableCodeCompletion(PP, Filename, Line, Column))
+ return 0;
+
+ // Set up the creation routine for code-completion.
+ return new PrintingCodeCompleteConsumer(ShowMacros, ShowCodePatterns,
+ ShowGlobals, OS);
+}
+
+void CompilerInstance::createSema(TranslationUnitKind TUKind,
+ CodeCompleteConsumer *CompletionConsumer) {
+ TheSema.reset(new Sema(getPreprocessor(), getASTContext(), getASTConsumer(),
+ TUKind, CompletionConsumer));
+}
+
+// Output Files
+
+void CompilerInstance::addOutputFile(const OutputFile &OutFile) {
+ assert(OutFile.OS && "Attempt to add empty stream to output list!");
+ OutputFiles.push_back(OutFile);
+}
+
+void CompilerInstance::clearOutputFiles(bool EraseFiles) {
+ for (std::list<OutputFile>::iterator
+ it = OutputFiles.begin(), ie = OutputFiles.end(); it != ie; ++it) {
+ delete it->OS;
+ if (!it->TempFilename.empty()) {
+ if (EraseFiles) {
+ bool existed;
+ llvm::sys::fs::remove(it->TempFilename, existed);
+ } else {
+ SmallString<128> NewOutFile(it->Filename);
+
+ // If '-working-directory' was passed, the output filename should be
+ // relative to that.
+ FileMgr->FixupRelativePath(NewOutFile);
+ if (llvm::error_code ec = llvm::sys::fs::rename(it->TempFilename,
+ NewOutFile.str())) {
+ getDiagnostics().Report(diag::err_fe_unable_to_rename_temp)
+ << it->TempFilename << it->Filename << ec.message();
+
+ bool existed;
+ llvm::sys::fs::remove(it->TempFilename, existed);
+ }
+ }
+ } else if (!it->Filename.empty() && EraseFiles)
+ llvm::sys::Path(it->Filename).eraseFromDisk();
+
+ }
+ OutputFiles.clear();
+}
+
+llvm::raw_fd_ostream *
+CompilerInstance::createDefaultOutputFile(bool Binary,
+ StringRef InFile,
+ StringRef Extension) {
+ return createOutputFile(getFrontendOpts().OutputFile, Binary,
+ /*RemoveFileOnSignal=*/true, InFile, Extension,
+ /*UseTemporary=*/true);
+}
+
+llvm::raw_fd_ostream *
+CompilerInstance::createOutputFile(StringRef OutputPath,
+ bool Binary, bool RemoveFileOnSignal,
+ StringRef InFile,
+ StringRef Extension,
+ bool UseTemporary,
+ bool CreateMissingDirectories) {
+ std::string Error, OutputPathName, TempPathName;
+ llvm::raw_fd_ostream *OS = createOutputFile(OutputPath, Error, Binary,
+ RemoveFileOnSignal,
+ InFile, Extension,
+ UseTemporary,
+ CreateMissingDirectories,
+ &OutputPathName,
+ &TempPathName);
+ if (!OS) {
+ getDiagnostics().Report(diag::err_fe_unable_to_open_output)
+ << OutputPath << Error;
+ return 0;
+ }
+
+ // Add the output file -- but don't try to remove "-", since this means we are
+ // using stdin.
+ addOutputFile(OutputFile((OutputPathName != "-") ? OutputPathName : "",
+ TempPathName, OS));
+
+ return OS;
+}
+
+llvm::raw_fd_ostream *
+CompilerInstance::createOutputFile(StringRef OutputPath,
+ std::string &Error,
+ bool Binary,
+ bool RemoveFileOnSignal,
+ StringRef InFile,
+ StringRef Extension,
+ bool UseTemporary,
+ bool CreateMissingDirectories,
+ std::string *ResultPathName,
+ std::string *TempPathName) {
+ assert((!CreateMissingDirectories || UseTemporary) &&
+ "CreateMissingDirectories is only allowed when using temporary files");
+
+ std::string OutFile, TempFile;
+ if (!OutputPath.empty()) {
+ OutFile = OutputPath;
+ } else if (InFile == "-") {
+ OutFile = "-";
+ } else if (!Extension.empty()) {
+ llvm::sys::Path Path(InFile);
+ Path.eraseSuffix();
+ Path.appendSuffix(Extension);
+ OutFile = Path.str();
+ } else {
+ OutFile = "-";
+ }
+
+ OwningPtr<llvm::raw_fd_ostream> OS;
+ std::string OSFile;
+
+ if (UseTemporary && OutFile != "-") {
+ // Only create the temporary if the parent directory exists (or create
+ // missing directories is true) and we can actually write to OutPath,
+ // otherwise we want to fail early.
+ SmallString<256> AbsPath(OutputPath);
+ llvm::sys::fs::make_absolute(AbsPath);
+ llvm::sys::Path OutPath(AbsPath);
+ bool ParentExists = false;
+ if (llvm::sys::fs::exists(llvm::sys::path::parent_path(AbsPath.str()),
+ ParentExists))
+ ParentExists = false;
+ bool Exists;
+ if ((CreateMissingDirectories || ParentExists) &&
+ ((llvm::sys::fs::exists(AbsPath.str(), Exists) || !Exists) ||
+ (OutPath.isRegularFile() && OutPath.canWrite()))) {
+ // Create a temporary file.
+ SmallString<128> TempPath;
+ TempPath = OutFile;
+ TempPath += "-%%%%%%%%";
+ int fd;
+ if (llvm::sys::fs::unique_file(TempPath.str(), fd, TempPath,
+ /*makeAbsolute=*/false) == llvm::errc::success) {
+ OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
+ OSFile = TempFile = TempPath.str();
+ }
+ }
+ }
+
+ if (!OS) {
+ OSFile = OutFile;
+ OS.reset(
+ new llvm::raw_fd_ostream(OSFile.c_str(), Error,
+ (Binary ? llvm::raw_fd_ostream::F_Binary : 0)));
+ if (!Error.empty())
+ return 0;
+ }
+
+ // Make sure the out stream file gets removed if we crash.
+ if (RemoveFileOnSignal)
+ llvm::sys::RemoveFileOnSignal(llvm::sys::Path(OSFile));
+
+ if (ResultPathName)
+ *ResultPathName = OutFile;
+ if (TempPathName)
+ *TempPathName = TempFile;
+
+ return OS.take();
+}
+
+// Initialization Utilities
+
+bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
+ SrcMgr::CharacteristicKind Kind){
+ return InitializeSourceManager(InputFile, Kind, getDiagnostics(),
+ getFileManager(), getSourceManager(),
+ getFrontendOpts());
+}
+
+bool CompilerInstance::InitializeSourceManager(StringRef InputFile,
+ SrcMgr::CharacteristicKind Kind,
+ DiagnosticsEngine &Diags,
+ FileManager &FileMgr,
+ SourceManager &SourceMgr,
+ const FrontendOptions &Opts) {
+ // Figure out where to get and map in the main file.
+ if (InputFile != "-") {
+ const FileEntry *File = FileMgr.getFile(InputFile);
+ if (!File) {
+ Diags.Report(diag::err_fe_error_reading) << InputFile;
+ return false;
+ }
+ SourceMgr.createMainFileID(File, Kind);
+ } else {
+ OwningPtr<llvm::MemoryBuffer> SB;
+ if (llvm::MemoryBuffer::getSTDIN(SB)) {
+ // FIXME: Give ec.message() in this diag.
+ Diags.Report(diag::err_fe_error_reading_stdin);
+ return false;
+ }
+ const FileEntry *File = FileMgr.getVirtualFile(SB->getBufferIdentifier(),
+ SB->getBufferSize(), 0);
+ SourceMgr.createMainFileID(File, Kind);
+ SourceMgr.overrideFileContents(File, SB.take());
+ }
+
+ assert(!SourceMgr.getMainFileID().isInvalid() &&
+ "Couldn't establish MainFileID!");
+ return true;
+}
+
+// High-Level Operations
+
+bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
+ assert(hasDiagnostics() && "Diagnostics engine is not initialized!");
+ assert(!getFrontendOpts().ShowHelp && "Client must handle '-help'!");
+ assert(!getFrontendOpts().ShowVersion && "Client must handle '-version'!");
+
+ // FIXME: Take this as an argument, once all the APIs we used have moved to
+ // taking it as an input instead of hard-coding llvm::errs.
+ raw_ostream &OS = llvm::errs();
+
+ // Create the target instance.
+ setTarget(TargetInfo::CreateTargetInfo(getDiagnostics(), getTargetOpts()));
+ if (!hasTarget())
+ return false;
+
+ // Inform the target of the language options.
+ //
+ // FIXME: We shouldn't need to do this, the target should be immutable once
+ // created. This complexity should be lifted elsewhere.
+ getTarget().setForcedLangOptions(getLangOpts());
+
+ // Validate/process some options.
+ if (getHeaderSearchOpts().Verbose)
+ OS << "clang -cc1 version " CLANG_VERSION_STRING
+ << " based upon " << PACKAGE_STRING
+ << " default target " << llvm::sys::getDefaultTargetTriple() << "\n";
+
+ if (getFrontendOpts().ShowTimers)
+ createFrontendTimer();
+
+ if (getFrontendOpts().ShowStats)
+ llvm::EnableStatistics();
+
+ for (unsigned i = 0, e = getFrontendOpts().Inputs.size(); i != e; ++i) {
+ // Reset the ID tables if we are reusing the SourceManager.
+ if (hasSourceManager())
+ getSourceManager().clearIDTables();
+
+ if (Act.BeginSourceFile(*this, getFrontendOpts().Inputs[i])) {
+ Act.Execute();
+ Act.EndSourceFile();
+ }
+ }
+
+ // Notify the diagnostic client that all files were processed.
+ getDiagnostics().getClient()->finish();
+
+ if (getDiagnosticOpts().ShowCarets) {
+ // We can have multiple diagnostics sharing one diagnostic client.
+ // Get the total number of warnings/errors from the client.
+ unsigned NumWarnings = getDiagnostics().getClient()->getNumWarnings();
+ unsigned NumErrors = getDiagnostics().getClient()->getNumErrors();
+
+ if (NumWarnings)
+ OS << NumWarnings << " warning" << (NumWarnings == 1 ? "" : "s");
+ if (NumWarnings && NumErrors)
+ OS << " and ";
+ if (NumErrors)
+ OS << NumErrors << " error" << (NumErrors == 1 ? "" : "s");
+ if (NumWarnings || NumErrors)
+ OS << " generated.\n";
+ }
+
+ if (getFrontendOpts().ShowStats && hasFileManager()) {
+ getFileManager().PrintStats();
+ OS << "\n";
+ }
+
+ return !getDiagnostics().getClient()->getNumErrors();
+}
+
+/// \brief Determine the appropriate source input kind based on language
+/// options.
+static InputKind getSourceInputKindFromOptions(const LangOptions &LangOpts) {
+ if (LangOpts.OpenCL)
+ return IK_OpenCL;
+ if (LangOpts.CUDA)
+ return IK_CUDA;
+ if (LangOpts.ObjC1)
+ return LangOpts.CPlusPlus? IK_ObjCXX : IK_ObjC;
+ return LangOpts.CPlusPlus? IK_CXX : IK_C;
+}
+
+namespace {
+ struct CompileModuleMapData {
+ CompilerInstance &Instance;
+ GenerateModuleAction &CreateModuleAction;
+ };
+}
+
+/// \brief Helper function that executes the module-generating action under
+/// a crash recovery context.
+static void doCompileMapModule(void *UserData) {
+ CompileModuleMapData &Data
+ = *reinterpret_cast<CompileModuleMapData *>(UserData);
+ Data.Instance.ExecuteAction(Data.CreateModuleAction);
+}
+
+/// \brief Compile a module file for the given module, using the options
+/// provided by the importing compiler instance.
+static void compileModule(CompilerInstance &ImportingInstance,
+ Module *Module,
+ StringRef ModuleFileName) {
+ llvm::LockFileManager Locked(ModuleFileName);
+ switch (Locked) {
+ case llvm::LockFileManager::LFS_Error:
+ return;
+
+ case llvm::LockFileManager::LFS_Owned:
+ // We're responsible for building the module ourselves. Do so below.
+ break;
+
+ case llvm::LockFileManager::LFS_Shared:
+ // Someone else is responsible for building the module. Wait for them to
+ // finish.
+ Locked.waitForUnlock();
+ break;
+ }
+
+ ModuleMap &ModMap
+ = ImportingInstance.getPreprocessor().getHeaderSearchInfo().getModuleMap();
+
+ // Construct a compiler invocation for creating this module.
+ IntrusiveRefCntPtr<CompilerInvocation> Invocation
+ (new CompilerInvocation(ImportingInstance.getInvocation()));
+
+ PreprocessorOptions &PPOpts = Invocation->getPreprocessorOpts();
+
+ // For any options that aren't intended to affect how a module is built,
+ // reset them to their default values.
+ Invocation->getLangOpts()->resetNonModularOptions();
+ PPOpts.resetNonModularOptions();
+
+ // Note the name of the module we're building.
+ Invocation->getLangOpts()->CurrentModule = Module->getTopLevelModuleName();
+
+ // Note that this module is part of the module build path, so that we
+ // can detect cycles in the module graph.
+ PPOpts.ModuleBuildPath.push_back(Module->getTopLevelModuleName());
+
+ // If there is a module map file, build the module using the module map.
+ // Set up the inputs/outputs so that we build the module from its umbrella
+ // header.
+ FrontendOptions &FrontendOpts = Invocation->getFrontendOpts();
+ FrontendOpts.OutputFile = ModuleFileName.str();
+ FrontendOpts.DisableFree = false;
+ FrontendOpts.Inputs.clear();
+ InputKind IK = getSourceInputKindFromOptions(*Invocation->getLangOpts());
+
+ // Get or create the module map that we'll use to build this module.
+ SmallString<128> TempModuleMapFileName;
+ if (const FileEntry *ModuleMapFile
+ = ModMap.getContainingModuleMapFile(Module)) {
+ // Use the module map where this module resides.
+ FrontendOpts.Inputs.push_back(FrontendInputFile(ModuleMapFile->getName(),
+ IK));
+ } else {
+ // Create a temporary module map file.
+ TempModuleMapFileName = Module->Name;
+ TempModuleMapFileName += "-%%%%%%%%.map";
+ int FD;
+ if (llvm::sys::fs::unique_file(TempModuleMapFileName.str(), FD,
+ TempModuleMapFileName,
+ /*makeAbsolute=*/true)
+ != llvm::errc::success) {
+ ImportingInstance.getDiagnostics().Report(diag::err_module_map_temp_file)
+ << TempModuleMapFileName;
+ return;
+ }
+ // Print the module map to this file.
+ llvm::raw_fd_ostream OS(FD, /*shouldClose=*/true);
+ Module->print(OS);
+ FrontendOpts.Inputs.push_back(
+ FrontendInputFile(TempModuleMapFileName.str().str(), IK));
+ }
+
+ // Don't free the remapped file buffers; they are owned by our caller.
+ PPOpts.RetainRemappedFileBuffers = true;
+
+ Invocation->getDiagnosticOpts().VerifyDiagnostics = 0;
+ assert(ImportingInstance.getInvocation().getModuleHash() ==
+ Invocation->getModuleHash() && "Module hash mismatch!");
+
+ // Construct a compiler instance that will be used to actually create the
+ // module.
+ CompilerInstance Instance;
+ Instance.setInvocation(&*Invocation);
+ Instance.createDiagnostics(/*argc=*/0, /*argv=*/0,
+ &ImportingInstance.getDiagnosticClient(),
+ /*ShouldOwnClient=*/true,
+ /*ShouldCloneClient=*/true);
+
+ // Construct a module-generating action.
+ GenerateModuleAction CreateModuleAction;
+
+ // Execute the action to actually build the module in-place. Use a separate
+ // thread so that we get a stack large enough.
+ const unsigned ThreadStackSize = 8 << 20;
+ llvm::CrashRecoveryContext CRC;
+ CompileModuleMapData Data = { Instance, CreateModuleAction };
+ CRC.RunSafelyOnThread(&doCompileMapModule, &Data, ThreadStackSize);
+
+ // Delete the temporary module map file.
+ // FIXME: Even though we're executing under crash protection, it would still
+ // be nice to do this with RemoveFileOnSignal when we can. However, that
+ // doesn't make sense for all clients, so clean this up manually.
+ if (!TempModuleMapFileName.empty())
+ llvm::sys::Path(TempModuleMapFileName).eraseFromDisk();
+}
+
+Module *CompilerInstance::loadModule(SourceLocation ImportLoc,
+ ModuleIdPath Path,
+ Module::NameVisibilityKind Visibility,
+ bool IsInclusionDirective) {
+ // If we've already handled this import, just return the cached result.
+ // This one-element cache is important to eliminate redundant diagnostics
+ // when both the preprocessor and parser see the same import declaration.
+ if (!ImportLoc.isInvalid() && LastModuleImportLoc == ImportLoc) {
+ // Make the named module visible.
+ if (LastModuleImportResult)
+ ModuleManager->makeModuleVisible(LastModuleImportResult, Visibility);
+ return LastModuleImportResult;
+ }
+
+ // Determine what file we're searching from.
+ SourceManager &SourceMgr = getSourceManager();
+ SourceLocation ExpandedImportLoc = SourceMgr.getExpansionLoc(ImportLoc);
+ const FileEntry *CurFile
+ = SourceMgr.getFileEntryForID(SourceMgr.getFileID(ExpandedImportLoc));
+ if (!CurFile)
+ CurFile = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID());
+
+ StringRef ModuleName = Path[0].first->getName();
+ SourceLocation ModuleNameLoc = Path[0].second;
+
+ clang::Module *Module = 0;
+
+ // If we don't already have information on this module, load the module now.
+ llvm::DenseMap<const IdentifierInfo *, clang::Module *>::iterator Known
+ = KnownModules.find(Path[0].first);
+ if (Known != KnownModules.end()) {
+ // Retrieve the cached top-level module.
+ Module = Known->second;
+ } else if (ModuleName == getLangOpts().CurrentModule) {
+ // This is the module we're building.
+ Module = PP->getHeaderSearchInfo().getModuleMap().findModule(ModuleName);
+ Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
+ } else {
+ // Search for a module with the given name.
+ Module = PP->getHeaderSearchInfo().lookupModule(ModuleName);
+ std::string ModuleFileName;
+ if (Module)
+ ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(Module);
+ else
+ ModuleFileName = PP->getHeaderSearchInfo().getModuleFileName(ModuleName);
+
+ if (ModuleFileName.empty()) {
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_not_found)
+ << ModuleName
+ << SourceRange(ImportLoc, ModuleNameLoc);
+ LastModuleImportLoc = ImportLoc;
+ LastModuleImportResult = 0;
+ return 0;
+ }
+
+ const FileEntry *ModuleFile
+ = getFileManager().getFile(ModuleFileName, /*OpenFile=*/false,
+ /*CacheFailure=*/false);
+ bool BuildingModule = false;
+ if (!ModuleFile && Module) {
+ // The module is not cached, but we have a module map from which we can
+ // build the module.
+
+ // Check whether there is a cycle in the module graph.
+ SmallVectorImpl<std::string> &ModuleBuildPath
+ = getPreprocessorOpts().ModuleBuildPath;
+ SmallVectorImpl<std::string>::iterator Pos
+ = std::find(ModuleBuildPath.begin(), ModuleBuildPath.end(), ModuleName);
+ if (Pos != ModuleBuildPath.end()) {
+ SmallString<256> CyclePath;
+ for (; Pos != ModuleBuildPath.end(); ++Pos) {
+ CyclePath += *Pos;
+ CyclePath += " -> ";
+ }
+ CyclePath += ModuleName;
+
+ getDiagnostics().Report(ModuleNameLoc, diag::err_module_cycle)
+ << ModuleName << CyclePath;
+ return 0;
+ }
+
+ getDiagnostics().Report(ModuleNameLoc, diag::warn_module_build)
+ << ModuleName;
+ BuildingModule = true;
+ compileModule(*this, Module, ModuleFileName);
+ ModuleFile = FileMgr->getFile(ModuleFileName);
+ }
+
+ if (!ModuleFile) {
+ getDiagnostics().Report(ModuleNameLoc,
+ BuildingModule? diag::err_module_not_built
+ : diag::err_module_not_found)
+ << ModuleName
+ << SourceRange(ImportLoc, ModuleNameLoc);
+ return 0;
+ }
+
+ // If we don't already have an ASTReader, create one now.
+ if (!ModuleManager) {
+ if (!hasASTContext())
+ createASTContext();
+
+ std::string Sysroot = getHeaderSearchOpts().Sysroot;
+ const PreprocessorOptions &PPOpts = getPreprocessorOpts();
+ ModuleManager = new ASTReader(getPreprocessor(), *Context,
+ Sysroot.empty() ? "" : Sysroot.c_str(),
+ PPOpts.DisablePCHValidation,
+ PPOpts.DisableStatCache);
+ if (hasASTConsumer()) {
+ ModuleManager->setDeserializationListener(
+ getASTConsumer().GetASTDeserializationListener());
+ getASTContext().setASTMutationListener(
+ getASTConsumer().GetASTMutationListener());
+ }
+ OwningPtr<ExternalASTSource> Source;
+ Source.reset(ModuleManager);
+ getASTContext().setExternalSource(Source);
+ if (hasSema())
+ ModuleManager->InitializeSema(getSema());
+ if (hasASTConsumer())
+ ModuleManager->StartTranslationUnit(&getASTConsumer());
+ }
+
+ // Try to load the module we found.
+ switch (ModuleManager->ReadAST(ModuleFile->getName(),
+ serialization::MK_Module)) {
+ case ASTReader::Success:
+ break;
+
+ case ASTReader::IgnorePCH:
+ // FIXME: The ASTReader will already have complained, but can we showhorn
+ // that diagnostic information into a more useful form?
+ KnownModules[Path[0].first] = 0;
+ return 0;
+
+ case ASTReader::Failure:
+ // Already complained, but note now that we failed.
+ KnownModules[Path[0].first] = 0;
+ return 0;
+ }
+
+ if (!Module) {
+ // If we loaded the module directly, without finding a module map first,
+ // we'll have loaded the module's information from the module itself.
+ Module = PP->getHeaderSearchInfo().getModuleMap()
+ .findModule((Path[0].first->getName()));
+ }
+
+ // Cache the result of this top-level module lookup for later.
+ Known = KnownModules.insert(std::make_pair(Path[0].first, Module)).first;
+ }
+
+ // If we never found the module, fail.
+ if (!Module)
+ return 0;
+
+ // Verify that the rest of the module path actually corresponds to
+ // a submodule.
+ if (Path.size() > 1) {
+ for (unsigned I = 1, N = Path.size(); I != N; ++I) {
+ StringRef Name = Path[I].first->getName();
+ clang::Module *Sub = Module->findSubmodule(Name);
+
+ if (!Sub) {
+ // Attempt to perform typo correction to find a module name that works.
+ llvm::SmallVector<StringRef, 2> Best;
+ unsigned BestEditDistance = (std::numeric_limits<unsigned>::max)();
+
+ for (clang::Module::submodule_iterator J = Module->submodule_begin(),
+ JEnd = Module->submodule_end();
+ J != JEnd; ++J) {
+ unsigned ED = Name.edit_distance((*J)->Name,
+ /*AllowReplacements=*/true,
+ BestEditDistance);
+ if (ED <= BestEditDistance) {
+ if (ED < BestEditDistance) {
+ Best.clear();
+ BestEditDistance = ED;
+ }
+
+ Best.push_back((*J)->Name);
+ }
+ }
+
+ // If there was a clear winner, user it.
+ if (Best.size() == 1) {
+ getDiagnostics().Report(Path[I].second,
+ diag::err_no_submodule_suggest)
+ << Path[I].first << Module->getFullModuleName() << Best[0]
+ << SourceRange(Path[0].second, Path[I-1].second)
+ << FixItHint::CreateReplacement(SourceRange(Path[I].second),
+ Best[0]);
+
+ Sub = Module->findSubmodule(Best[0]);
+ }
+ }
+
+ if (!Sub) {
+ // No submodule by this name. Complain, and don't look for further
+ // submodules.
+ getDiagnostics().Report(Path[I].second, diag::err_no_submodule)
+ << Path[I].first << Module->getFullModuleName()
+ << SourceRange(Path[0].second, Path[I-1].second);
+ break;
+ }
+
+ Module = Sub;
+ }
+ }
+
+ // Make the named module visible, if it's not already part of the module
+ // we are parsing.
+ if (ModuleName != getLangOpts().CurrentModule) {
+ if (!Module->IsFromModuleFile) {
+ // We have an umbrella header or directory that doesn't actually include
+ // all of the headers within the directory it covers. Complain about
+ // this missing submodule and recover by forgetting that we ever saw
+ // this submodule.
+ // FIXME: Should we detect this at module load time? It seems fairly
+ // expensive (and rare).
+ getDiagnostics().Report(ImportLoc, diag::warn_missing_submodule)
+ << Module->getFullModuleName()
+ << SourceRange(Path.front().second, Path.back().second);
+
+ return 0;
+ }
+
+ // Check whether this module is available.
+ StringRef Feature;
+ if (!Module->isAvailable(getLangOpts(), getTarget(), Feature)) {
+ getDiagnostics().Report(ImportLoc, diag::err_module_unavailable)
+ << Module->getFullModuleName()
+ << Feature
+ << SourceRange(Path.front().second, Path.back().second);
+ LastModuleImportLoc = ImportLoc;
+ LastModuleImportResult = 0;
+ return 0;
+ }
+
+ ModuleManager->makeModuleVisible(Module, Visibility);
+ }
+
+ // If this module import was due to an inclusion directive, create an
+ // implicit import declaration to capture it in the AST.
+ if (IsInclusionDirective && hasASTContext()) {
+ TranslationUnitDecl *TU = getASTContext().getTranslationUnitDecl();
+ TU->addDecl(ImportDecl::CreateImplicit(getASTContext(), TU,
+ ImportLoc, Module,
+ Path.back().second));
+ }
+
+ LastModuleImportLoc = ImportLoc;
+ LastModuleImportResult = Module;
+ return Module;
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
new file mode 100644
index 0000000..612a0d8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CompilerInvocation.cpp
@@ -0,0 +1,2242 @@
+//===--- CompilerInvocation.cpp -------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/Version.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Driver/Option.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/LangStandard.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Initialization.
+//===----------------------------------------------------------------------===//
+
+CompilerInvocationBase::CompilerInvocationBase()
+ : LangOpts(new LangOptions()) {}
+
+CompilerInvocationBase::CompilerInvocationBase(const CompilerInvocationBase &X)
+ : RefCountedBase<CompilerInvocation>(),
+ LangOpts(new LangOptions(*X.getLangOpts())) {}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static const char *getAnalysisStoreName(AnalysisStores Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown analysis store!");
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
+ case NAME##Model: return CMDFLAG;
+#include "clang/Frontend/Analyses.def"
+ }
+}
+
+static const char *getAnalysisConstraintName(AnalysisConstraints Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown analysis constraints!");
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) \
+ case NAME##Model: return CMDFLAG;
+#include "clang/Frontend/Analyses.def"
+ }
+}
+
+static const char *getAnalysisDiagClientName(AnalysisDiagClients Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown analysis client!");
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN, AUTOCREATE) \
+ case PD_##NAME: return CMDFLAG;
+#include "clang/Frontend/Analyses.def"
+ }
+}
+
+static const char *getAnalysisPurgeModeName(AnalysisPurgeMode Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown analysis purge mode!");
+#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) \
+ case NAME: return CMDFLAG;
+#include "clang/Frontend/Analyses.def"
+ }
+}
+
+static const char *getAnalysisIPAModeName(AnalysisIPAMode Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown analysis ipa mode!");
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) \
+ case NAME: return CMDFLAG;
+#include "clang/Frontend/Analyses.def"
+ }
+}
+
+static const char *
+ getAnalysisInliningModeName(AnalysisInliningMode Kind) {
+ switch (Kind) {
+ default:
+ llvm_unreachable("Unknown analysis inlining mode!");
+#define ANALYSIS_INLINE_SELECTION(NAME, CMDFLAG, DESC) \
+ case NAME: return CMDFLAG;
+#include "clang/Frontend/Analyses.def"
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Serialization (to args)
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// ToArgsList - Helper class to create a list of std::strings.
+ class ToArgsList {
+ std::vector<std::string> &Res;
+ public:
+ explicit ToArgsList(std::vector<std::string> &Res) : Res(Res) {}
+
+ void push_back(StringRef Str) {
+ // Avoid creating a temporary string.
+ Res.push_back(std::string());
+ Res.back().assign(Str.data(), Str.size());
+ }
+
+ void push_back(StringRef Str1, StringRef Str2) {
+ push_back(Str1);
+ push_back(Str2);
+ }
+ };
+}
+
+static void AnalyzerOptsToArgs(const AnalyzerOptions &Opts, ToArgsList &Res) {
+ if (Opts.ShowCheckerHelp)
+ Res.push_back("-analyzer-checker-help");
+ if (Opts.AnalysisStoreOpt != RegionStoreModel)
+ Res.push_back("-analyzer-store",
+ getAnalysisStoreName(Opts.AnalysisStoreOpt));
+ if (Opts.AnalysisConstraintsOpt != RangeConstraintsModel)
+ Res.push_back("-analyzer-constraints",
+ getAnalysisConstraintName(Opts.AnalysisConstraintsOpt));
+ if (Opts.AnalysisDiagOpt != PD_HTML)
+ Res.push_back("-analyzer-output",
+ getAnalysisDiagClientName(Opts.AnalysisDiagOpt));
+ if (Opts.AnalysisPurgeOpt != PurgeStmt)
+ Res.push_back("-analyzer-purge",
+ getAnalysisPurgeModeName(Opts.AnalysisPurgeOpt));
+ if (!Opts.AnalyzeSpecificFunction.empty())
+ Res.push_back("-analyze-function", Opts.AnalyzeSpecificFunction);
+ if (Opts.IPAMode != Inlining)
+ Res.push_back("-analyzer-ipa", getAnalysisIPAModeName(Opts.IPAMode));
+ if (Opts.InliningMode != NoRedundancy)
+ Res.push_back("-analyzer-inlining-mode",
+ getAnalysisInliningModeName(Opts.InliningMode));
+
+ if (Opts.AnalyzeAll)
+ Res.push_back("-analyzer-opt-analyze-headers");
+ if (Opts.AnalyzerDisplayProgress)
+ Res.push_back("-analyzer-display-progress");
+ if (Opts.AnalyzeNestedBlocks)
+ Res.push_back("-analyzer-opt-analyze-nested-blocks");
+ if (Opts.EagerlyAssume)
+ Res.push_back("-analyzer-eagerly-assume");
+ if (Opts.TrimGraph)
+ Res.push_back("-trim-egraph");
+ if (Opts.VisualizeEGDot)
+ Res.push_back("-analyzer-viz-egraph-graphviz");
+ if (Opts.VisualizeEGUbi)
+ Res.push_back("-analyzer-viz-egraph-ubigraph");
+ if (Opts.NoRetryExhausted)
+ Res.push_back("-analyzer-disable-retry-exhausted");
+
+ for (unsigned i = 0, e = Opts.CheckersControlList.size(); i != e; ++i) {
+ const std::pair<std::string, bool> &opt = Opts.CheckersControlList[i];
+ if (opt.second)
+ Res.push_back("-analyzer-disable-checker");
+ else
+ Res.push_back("-analyzer-checker");
+ Res.push_back(opt.first);
+ }
+}
+
+static void CodeGenOptsToArgs(const CodeGenOptions &Opts, ToArgsList &Res) {
+ if (Opts.DebugInfo)
+ Res.push_back("-g");
+ if (Opts.DisableLLVMOpts)
+ Res.push_back("-disable-llvm-optzns");
+ if (Opts.DisableRedZone)
+ Res.push_back("-disable-red-zone");
+ if (Opts.DisableTailCalls)
+ Res.push_back("-mdisable-tail-calls");
+ if (!Opts.DebugCompilationDir.empty())
+ Res.push_back("-fdebug-compilation-dir", Opts.DebugCompilationDir);
+ if (!Opts.DwarfDebugFlags.empty())
+ Res.push_back("-dwarf-debug-flags", Opts.DwarfDebugFlags);
+ if (Opts.ObjCRuntimeHasARC)
+ Res.push_back("-fobjc-runtime-has-arc");
+ if (Opts.ObjCRuntimeHasTerminate)
+ Res.push_back("-fobjc-runtime-has-terminate");
+ if (Opts.EmitGcovArcs)
+ Res.push_back("-femit-coverage-data");
+ if (Opts.EmitGcovNotes)
+ Res.push_back("-femit-coverage-notes");
+ if (!Opts.MergeAllConstants)
+ Res.push_back("-fno-merge-all-constants");
+ if (Opts.NoCommon)
+ Res.push_back("-fno-common");
+ if (Opts.ForbidGuardVariables)
+ Res.push_back("-fforbid-guard-variables");
+ if (Opts.UseRegisterSizedBitfieldAccess)
+ Res.push_back("-fuse-register-sized-bitfield-access");
+ if (Opts.NoImplicitFloat)
+ Res.push_back("-no-implicit-float");
+ if (Opts.OmitLeafFramePointer)
+ Res.push_back("-momit-leaf-frame-pointer");
+ if (Opts.OptimizeSize) {
+ assert(Opts.OptimizationLevel == 2 && "Invalid options!");
+ Opts.OptimizeSize == 1 ? Res.push_back("-Os") : Res.push_back("-Oz");
+ } else if (Opts.OptimizationLevel != 0)
+ Res.push_back("-O" + llvm::utostr(Opts.OptimizationLevel));
+ if (!Opts.MainFileName.empty())
+ Res.push_back("-main-file-name", Opts.MainFileName);
+ if (Opts.NoInfsFPMath)
+ Res.push_back("-menable-no-infinities");
+ if (Opts.NoNaNsFPMath)
+ Res.push_back("-menable-no-nans");
+ // SimplifyLibCalls is only derived.
+ // TimePasses is only derived.
+ // UnitAtATime is unused.
+ // Inlining is only derived.
+
+ // UnrollLoops is derived, but also accepts an option, no
+ // harm in pushing it back here.
+ if (Opts.UnrollLoops)
+ Res.push_back("-funroll-loops");
+ if (Opts.DataSections)
+ Res.push_back("-fdata-sections");
+ if (Opts.FunctionSections)
+ Res.push_back("-ffunction-sections");
+ if (Opts.AsmVerbose)
+ Res.push_back("-masm-verbose");
+ if (!Opts.CodeModel.empty())
+ Res.push_back("-mcode-model", Opts.CodeModel);
+ if (Opts.CUDAIsDevice)
+ Res.push_back("-fcuda-is-device");
+ if (!Opts.CXAAtExit)
+ Res.push_back("-fno-use-cxa-atexit");
+ if (Opts.CXXCtorDtorAliases)
+ Res.push_back("-mconstructor-aliases");
+ if (Opts.ObjCAutoRefCountExceptions)
+ Res.push_back("-fobjc-arc-eh");
+ if (!Opts.DebugPass.empty()) {
+ Res.push_back("-mdebug-pass", Opts.DebugPass);
+ }
+ if (Opts.DisableFPElim)
+ Res.push_back("-mdisable-fp-elim");
+ if (!Opts.FloatABI.empty())
+ Res.push_back("-mfloat-abi", Opts.FloatABI);
+ if (!Opts.LimitFloatPrecision.empty())
+ Res.push_back("-mlimit-float-precision", Opts.LimitFloatPrecision);
+ if (Opts.NoZeroInitializedInBSS)
+ Res.push_back("-mno-zero-initialized-bss");
+ switch (Opts.getObjCDispatchMethod()) {
+ case CodeGenOptions::Legacy:
+ break;
+ case CodeGenOptions::Mixed:
+ Res.push_back("-fobjc-dispatch-method=mixed");
+ break;
+ case CodeGenOptions::NonLegacy:
+ Res.push_back("-fobjc-dispatch-method=non-legacy");
+ break;
+ }
+ if (Opts.NumRegisterParameters)
+ Res.push_back("-mregparm", llvm::utostr(Opts.NumRegisterParameters));
+ if (Opts.NoGlobalMerge)
+ Res.push_back("-mno-global-merge");
+ if (Opts.NoExecStack)
+ Res.push_back("-mnoexecstack");
+ if (Opts.RelaxAll)
+ Res.push_back("-mrelax-all");
+ if (Opts.SaveTempLabels)
+ Res.push_back("-msave-temp-labels");
+ if (Opts.NoDwarf2CFIAsm)
+ Res.push_back("-fno-dwarf2-cfi-asm");
+ if (Opts.NoDwarfDirectoryAsm)
+ Res.push_back("-fno-dwarf-directory-asm");
+ if (Opts.SoftFloat)
+ Res.push_back("-msoft-float");
+ if (Opts.StrictEnums)
+ Res.push_back("-fstrict-enums");
+ if (Opts.UnwindTables)
+ Res.push_back("-munwind-tables");
+ if (Opts.RelocationModel != "pic")
+ Res.push_back("-mrelocation-model", Opts.RelocationModel);
+ if (!Opts.VerifyModule)
+ Res.push_back("-disable-llvm-verifier");
+ for (unsigned i = 0, e = Opts.BackendOptions.size(); i != e; ++i)
+ Res.push_back("-backend-option", Opts.BackendOptions[i]);
+}
+
+static void DependencyOutputOptsToArgs(const DependencyOutputOptions &Opts,
+ ToArgsList &Res) {
+ if (Opts.IncludeSystemHeaders)
+ Res.push_back("-sys-header-deps");
+ if (Opts.ShowHeaderIncludes)
+ Res.push_back("-H");
+ if (!Opts.HeaderIncludeOutputFile.empty())
+ Res.push_back("-header-include-file", Opts.HeaderIncludeOutputFile);
+ if (Opts.UsePhonyTargets)
+ Res.push_back("-MP");
+ if (!Opts.OutputFile.empty())
+ Res.push_back("-dependency-file", Opts.OutputFile);
+ for (unsigned i = 0, e = Opts.Targets.size(); i != e; ++i)
+ Res.push_back("-MT", Opts.Targets[i]);
+}
+
+static void DiagnosticOptsToArgs(const DiagnosticOptions &Opts,
+ ToArgsList &Res) {
+ if (Opts.IgnoreWarnings)
+ Res.push_back("-w");
+ if (Opts.NoRewriteMacros)
+ Res.push_back("-Wno-rewrite-macros");
+ if (Opts.Pedantic)
+ Res.push_back("-pedantic");
+ if (Opts.PedanticErrors)
+ Res.push_back("-pedantic-errors");
+ if (!Opts.ShowColumn)
+ Res.push_back("-fno-show-column");
+ if (!Opts.ShowLocation)
+ Res.push_back("-fno-show-source-location");
+ if (!Opts.ShowCarets)
+ Res.push_back("-fno-caret-diagnostics");
+ if (!Opts.ShowFixits)
+ Res.push_back("-fno-diagnostics-fixit-info");
+ if (Opts.ShowSourceRanges)
+ Res.push_back("-fdiagnostics-print-source-range-info");
+ if (Opts.ShowParseableFixits)
+ Res.push_back("-fdiagnostics-parseable-fixits");
+ if (Opts.ShowColors)
+ Res.push_back("-fcolor-diagnostics");
+ if (Opts.VerifyDiagnostics)
+ Res.push_back("-verify");
+ if (Opts.ShowOptionNames)
+ Res.push_back("-fdiagnostics-show-option");
+ if (Opts.ShowCategories == 1)
+ Res.push_back("-fdiagnostics-show-category=id");
+ else if (Opts.ShowCategories == 2)
+ Res.push_back("-fdiagnostics-show-category=name");
+ switch (Opts.Format) {
+ case DiagnosticOptions::Clang:
+ Res.push_back("-fdiagnostics-format=clang"); break;
+ case DiagnosticOptions::Msvc:
+ Res.push_back("-fdiagnostics-format=msvc"); break;
+ case DiagnosticOptions::Vi:
+ Res.push_back("-fdiagnostics-format=vi"); break;
+ }
+ if (Opts.ErrorLimit)
+ Res.push_back("-ferror-limit", llvm::utostr(Opts.ErrorLimit));
+ if (!Opts.DiagnosticLogFile.empty())
+ Res.push_back("-diagnostic-log-file", Opts.DiagnosticLogFile);
+ if (Opts.MacroBacktraceLimit
+ != DiagnosticOptions::DefaultMacroBacktraceLimit)
+ Res.push_back("-fmacro-backtrace-limit",
+ llvm::utostr(Opts.MacroBacktraceLimit));
+ if (Opts.TemplateBacktraceLimit
+ != DiagnosticOptions::DefaultTemplateBacktraceLimit)
+ Res.push_back("-ftemplate-backtrace-limit",
+ llvm::utostr(Opts.TemplateBacktraceLimit));
+ if (Opts.ConstexprBacktraceLimit
+ != DiagnosticOptions::DefaultConstexprBacktraceLimit)
+ Res.push_back("-fconstexpr-backtrace-limit",
+ llvm::utostr(Opts.ConstexprBacktraceLimit));
+
+ if (Opts.TabStop != DiagnosticOptions::DefaultTabStop)
+ Res.push_back("-ftabstop", llvm::utostr(Opts.TabStop));
+ if (Opts.MessageLength)
+ Res.push_back("-fmessage-length", llvm::utostr(Opts.MessageLength));
+ if (!Opts.DumpBuildInformation.empty())
+ Res.push_back("-dump-build-information", Opts.DumpBuildInformation);
+ for (unsigned i = 0, e = Opts.Warnings.size(); i != e; ++i)
+ Res.push_back("-W" + Opts.Warnings[i]);
+}
+
+static const char *getInputKindName(InputKind Kind) {
+ switch (Kind) {
+ case IK_None: break;
+ case IK_AST: return "ast";
+ case IK_Asm: return "assembler-with-cpp";
+ case IK_C: return "c";
+ case IK_CXX: return "c++";
+ case IK_LLVM_IR: return "ir";
+ case IK_ObjC: return "objective-c";
+ case IK_ObjCXX: return "objective-c++";
+ case IK_OpenCL: return "cl";
+ case IK_CUDA: return "cuda";
+ case IK_PreprocessedC: return "cpp-output";
+ case IK_PreprocessedCXX: return "c++-cpp-output";
+ case IK_PreprocessedObjC: return "objective-c-cpp-output";
+ case IK_PreprocessedObjCXX:return "objective-c++-cpp-output";
+ }
+
+ llvm_unreachable("Unexpected language kind!");
+}
+
+static const char *getActionName(frontend::ActionKind Kind) {
+ switch (Kind) {
+ case frontend::PluginAction:
+ llvm_unreachable("Invalid kind!");
+
+ case frontend::ASTDump: return "-ast-dump";
+ case frontend::ASTDumpXML: return "-ast-dump-xml";
+ case frontend::ASTPrint: return "-ast-print";
+ case frontend::ASTView: return "-ast-view";
+ case frontend::DumpRawTokens: return "-dump-raw-tokens";
+ case frontend::DumpTokens: return "-dump-tokens";
+ case frontend::EmitAssembly: return "-S";
+ case frontend::EmitBC: return "-emit-llvm-bc";
+ case frontend::EmitHTML: return "-emit-html";
+ case frontend::EmitLLVM: return "-emit-llvm";
+ case frontend::EmitLLVMOnly: return "-emit-llvm-only";
+ case frontend::EmitCodeGenOnly: return "-emit-codegen-only";
+ case frontend::EmitObj: return "-emit-obj";
+ case frontend::FixIt: return "-fixit";
+ case frontend::GenerateModule: return "-emit-module";
+ case frontend::GeneratePCH: return "-emit-pch";
+ case frontend::GeneratePTH: return "-emit-pth";
+ case frontend::InitOnly: return "-init-only";
+ case frontend::ParseSyntaxOnly: return "-fsyntax-only";
+ case frontend::PrintDeclContext: return "-print-decl-contexts";
+ case frontend::PrintPreamble: return "-print-preamble";
+ case frontend::PrintPreprocessedInput: return "-E";
+ case frontend::PubnamesDump: return "-pubnames-dump";
+ case frontend::RewriteMacros: return "-rewrite-macros";
+ case frontend::RewriteObjC: return "-rewrite-objc";
+ case frontend::RewriteTest: return "-rewrite-test";
+ case frontend::RunAnalysis: return "-analyze";
+ case frontend::MigrateSource: return "-migrate";
+ case frontend::RunPreprocessorOnly: return "-Eonly";
+ }
+
+ llvm_unreachable("Unexpected language kind!");
+}
+
+static void FileSystemOptsToArgs(const FileSystemOptions &Opts, ToArgsList &Res){
+ if (!Opts.WorkingDir.empty())
+ Res.push_back("-working-directory", Opts.WorkingDir);
+}
+
+static void FrontendOptsToArgs(const FrontendOptions &Opts, ToArgsList &Res) {
+ if (Opts.DisableFree)
+ Res.push_back("-disable-free");
+ if (Opts.RelocatablePCH)
+ Res.push_back("-relocatable-pch");
+ if (Opts.ShowHelp)
+ Res.push_back("-help");
+ if (Opts.ShowMacrosInCodeCompletion)
+ Res.push_back("-code-completion-macros");
+ if (Opts.ShowCodePatternsInCodeCompletion)
+ Res.push_back("-code-completion-patterns");
+ if (!Opts.ShowGlobalSymbolsInCodeCompletion)
+ Res.push_back("-no-code-completion-globals");
+ if (Opts.ShowStats)
+ Res.push_back("-print-stats");
+ if (Opts.ShowTimers)
+ Res.push_back("-ftime-report");
+ if (Opts.ShowVersion)
+ Res.push_back("-version");
+ if (Opts.FixWhatYouCan)
+ Res.push_back("-fix-what-you-can");
+ if (Opts.FixOnlyWarnings)
+ Res.push_back("-fix-only-warnings");
+ if (Opts.FixAndRecompile)
+ Res.push_back("-fixit-recompile");
+ if (Opts.FixToTemporaries)
+ Res.push_back("-fixit-to-temporary");
+ switch (Opts.ARCMTAction) {
+ case FrontendOptions::ARCMT_None:
+ break;
+ case FrontendOptions::ARCMT_Check:
+ Res.push_back("-arcmt-check");
+ break;
+ case FrontendOptions::ARCMT_Modify:
+ Res.push_back("-arcmt-modify");
+ break;
+ case FrontendOptions::ARCMT_Migrate:
+ Res.push_back("-arcmt-migrate");
+ break;
+ }
+ if (!Opts.MTMigrateDir.empty())
+ Res.push_back("-mt-migrate-directory", Opts.MTMigrateDir);
+ if (!Opts.ARCMTMigrateReportOut.empty())
+ Res.push_back("-arcmt-migrate-report-output", Opts.ARCMTMigrateReportOut);
+ if (Opts.ARCMTMigrateEmitARCErrors)
+ Res.push_back("-arcmt-migrate-emit-errors");
+
+ if (Opts.ObjCMTAction & ~FrontendOptions::ObjCMT_Literals)
+ Res.push_back("-objcmt-migrate-literals");
+ if (Opts.ObjCMTAction & ~FrontendOptions::ObjCMT_Subscripting)
+ Res.push_back("-objcmt-migrate-subscripting");
+
+ bool NeedLang = false;
+ for (unsigned i = 0, e = Opts.Inputs.size(); i != e; ++i)
+ if (FrontendOptions::getInputKindForExtension(Opts.Inputs[i].File) !=
+ Opts.Inputs[i].Kind)
+ NeedLang = true;
+ if (NeedLang)
+ Res.push_back("-x", getInputKindName(Opts.Inputs[0].Kind));
+ for (unsigned i = 0, e = Opts.Inputs.size(); i != e; ++i) {
+ assert((!NeedLang || Opts.Inputs[i].Kind == Opts.Inputs[0].Kind) &&
+ "Unable to represent this input vector!");
+ Res.push_back(Opts.Inputs[i].File);
+ }
+
+ if (!Opts.OutputFile.empty())
+ Res.push_back("-o", Opts.OutputFile);
+ if (!Opts.CodeCompletionAt.FileName.empty())
+ Res.push_back("-code-completion-at",
+ Opts.CodeCompletionAt.FileName + ":" +
+ llvm::utostr(Opts.CodeCompletionAt.Line) + ":" +
+ llvm::utostr(Opts.CodeCompletionAt.Column));
+ if (Opts.ProgramAction != frontend::PluginAction)
+ Res.push_back(getActionName(Opts.ProgramAction));
+ if (!Opts.ActionName.empty()) {
+ Res.push_back("-plugin", Opts.ActionName);
+ for(unsigned i = 0, e = Opts.PluginArgs.size(); i != e; ++i)
+ Res.push_back("-plugin-arg-" + Opts.ActionName, Opts.PluginArgs[i]);
+ }
+ for (unsigned i = 0, e = Opts.Plugins.size(); i != e; ++i)
+ Res.push_back("-load", Opts.Plugins[i]);
+ for (unsigned i = 0, e = Opts.AddPluginActions.size(); i != e; ++i) {
+ Res.push_back("-add-plugin", Opts.AddPluginActions[i]);
+ for(unsigned ai = 0, ae = Opts.AddPluginArgs.size(); ai != ae; ++ai)
+ Res.push_back("-plugin-arg-" + Opts.AddPluginActions[i],
+ Opts.AddPluginArgs[i][ai]);
+ }
+ for (unsigned i = 0, e = Opts.ASTMergeFiles.size(); i != e; ++i)
+ Res.push_back("-ast-merge", Opts.ASTMergeFiles[i]);
+ for (unsigned i = 0, e = Opts.LLVMArgs.size(); i != e; ++i)
+ Res.push_back("-mllvm", Opts.LLVMArgs[i]);
+ if (!Opts.OverrideRecordLayoutsFile.empty())
+ Res.push_back("-foverride-record-layout=" + Opts.OverrideRecordLayoutsFile);
+}
+
+static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
+ ToArgsList &Res) {
+ if (Opts.Sysroot != "/") {
+ Res.push_back("-isysroot");
+ Res.push_back(Opts.Sysroot);
+ }
+
+ /// User specified include entries.
+ for (unsigned i = 0, e = Opts.UserEntries.size(); i != e; ++i) {
+ const HeaderSearchOptions::Entry &E = Opts.UserEntries[i];
+ if (E.IsFramework && (E.Group != frontend::Angled || !E.IsUserSupplied))
+ llvm::report_fatal_error("Invalid option set!");
+ if (E.IsUserSupplied) {
+ switch (E.Group) {
+ case frontend::After:
+ Res.push_back("-idirafter");
+ break;
+
+ case frontend::Quoted:
+ Res.push_back("-iquote");
+ break;
+
+ case frontend::System:
+ Res.push_back("-isystem");
+ break;
+
+ case frontend::IndexHeaderMap:
+ Res.push_back("-index-header-map");
+ Res.push_back(E.IsFramework? "-F" : "-I");
+ break;
+
+ case frontend::CSystem:
+ Res.push_back("-c-isystem");
+ break;
+
+ case frontend::CXXSystem:
+ Res.push_back("-cxx-isystem");
+ break;
+
+ case frontend::ObjCSystem:
+ Res.push_back("-objc-isystem");
+ break;
+
+ case frontend::ObjCXXSystem:
+ Res.push_back("-objcxx-isystem");
+ break;
+
+ case frontend::Angled:
+ Res.push_back(E.IsFramework ? "-F" : "-I");
+ break;
+ }
+ } else {
+ if (E.IsInternal) {
+ assert(E.Group == frontend::System && "Unexpected header search group");
+ if (E.ImplicitExternC)
+ Res.push_back("-internal-externc-isystem");
+ else
+ Res.push_back("-internal-isystem");
+ } else {
+ if (E.Group != frontend::Angled && E.Group != frontend::System)
+ llvm::report_fatal_error("Invalid option set!");
+ Res.push_back(E.Group == frontend::Angled ? "-iwithprefixbefore" :
+ "-iwithprefix");
+ }
+ }
+ Res.push_back(E.Path);
+ }
+
+ if (!Opts.ResourceDir.empty())
+ Res.push_back("-resource-dir", Opts.ResourceDir);
+ if (!Opts.ModuleCachePath.empty())
+ Res.push_back("-fmodule-cache-path", Opts.ModuleCachePath);
+ if (!Opts.UseStandardSystemIncludes)
+ Res.push_back("-nostdsysteminc");
+ if (!Opts.UseStandardCXXIncludes)
+ Res.push_back("-nostdinc++");
+ if (Opts.UseLibcxx)
+ Res.push_back("-stdlib=libc++");
+ if (Opts.Verbose)
+ Res.push_back("-v");
+}
+
+static void LangOptsToArgs(const LangOptions &Opts, ToArgsList &Res) {
+ LangOptions DefaultLangOpts;
+
+ // FIXME: Need to set -std to get all the implicit options.
+
+ // FIXME: We want to only pass options relative to the defaults, which
+ // requires constructing a target. :(
+ //
+ // It would be better to push the all target specific choices into the driver,
+ // so that everything below that was more uniform.
+
+ if (Opts.Trigraphs)
+ Res.push_back("-trigraphs");
+ // Implicit based on the input kind:
+ // AsmPreprocessor, CPlusPlus, ObjC1, ObjC2, OpenCL
+ // Implicit based on the input language standard:
+ // BCPLComment, C99, CPlusPlus0x, Digraphs, GNUInline, ImplicitInt, GNUMode
+ if (Opts.DollarIdents)
+ Res.push_back("-fdollars-in-identifiers");
+ if (Opts.GNUMode && !Opts.GNUKeywords)
+ Res.push_back("-fno-gnu-keywords");
+ if (!Opts.GNUMode && Opts.GNUKeywords)
+ Res.push_back("-fgnu-keywords");
+ if (Opts.MicrosoftExt)
+ Res.push_back("-fms-extensions");
+ if (Opts.MicrosoftMode)
+ Res.push_back("-fms-compatibility");
+ if (Opts.MSCVersion != 0)
+ Res.push_back("-fmsc-version=" + llvm::utostr(Opts.MSCVersion));
+ if (Opts.Borland)
+ Res.push_back("-fborland-extensions");
+ if (!Opts.ObjCNonFragileABI)
+ Res.push_back("-fobjc-fragile-abi");
+ if (Opts.ObjCDefaultSynthProperties)
+ Res.push_back("-fobjc-default-synthesize-properties");
+ // NoInline is implicit.
+ if (!Opts.CXXOperatorNames)
+ Res.push_back("-fno-operator-names");
+ if (Opts.PascalStrings)
+ Res.push_back("-fpascal-strings");
+ if (Opts.CatchUndefined)
+ Res.push_back("-fcatch-undefined-behavior");
+ if (Opts.AddressSanitizer)
+ Res.push_back("-faddress-sanitizer");
+ if (Opts.ThreadSanitizer)
+ Res.push_back("-fthread-sanitizer");
+ if (Opts.WritableStrings)
+ Res.push_back("-fwritable-strings");
+ if (Opts.ConstStrings)
+ Res.push_back("-fconst-strings");
+ if (!Opts.LaxVectorConversions)
+ Res.push_back("-fno-lax-vector-conversions");
+ if (Opts.AltiVec)
+ Res.push_back("-faltivec");
+ if (Opts.Exceptions)
+ Res.push_back("-fexceptions");
+ if (Opts.ObjCExceptions)
+ Res.push_back("-fobjc-exceptions");
+ if (Opts.CXXExceptions)
+ Res.push_back("-fcxx-exceptions");
+ if (Opts.SjLjExceptions)
+ Res.push_back("-fsjlj-exceptions");
+ if (Opts.TraditionalCPP)
+ Res.push_back("-traditional-cpp");
+ if (!Opts.RTTI)
+ Res.push_back("-fno-rtti");
+ if (Opts.MSBitfields)
+ Res.push_back("-mms-bitfields");
+ if (!Opts.NeXTRuntime)
+ Res.push_back("-fgnu-runtime");
+ if (Opts.Freestanding)
+ Res.push_back("-ffreestanding");
+ if (Opts.FormatExtensions)
+ Res.push_back("-fformat-extensions");
+ if (Opts.NoBuiltin)
+ Res.push_back("-fno-builtin");
+ if (!Opts.AssumeSaneOperatorNew)
+ Res.push_back("-fno-assume-sane-operator-new");
+ if (!Opts.ThreadsafeStatics)
+ Res.push_back("-fno-threadsafe-statics");
+ if (Opts.POSIXThreads)
+ Res.push_back("-pthread");
+ if (Opts.Blocks)
+ Res.push_back("-fblocks");
+ if (Opts.BlocksRuntimeOptional)
+ Res.push_back("-fblocks-runtime-optional");
+ if (Opts.Modules)
+ Res.push_back("-fmodules");
+ if (Opts.EmitAllDecls)
+ Res.push_back("-femit-all-decls");
+ if (Opts.MathErrno)
+ Res.push_back("-fmath-errno");
+ switch (Opts.getSignedOverflowBehavior()) {
+ case LangOptions::SOB_Undefined: break;
+ case LangOptions::SOB_Defined: Res.push_back("-fwrapv"); break;
+ case LangOptions::SOB_Trapping:
+ Res.push_back("-ftrapv");
+ if (!Opts.OverflowHandler.empty())
+ Res.push_back("-ftrapv-handler", Opts.OverflowHandler);
+ break;
+ }
+ if (Opts.HeinousExtensions)
+ Res.push_back("-fheinous-gnu-extensions");
+ // Optimize is implicit.
+ // OptimizeSize is implicit.
+ if (Opts.FastMath)
+ Res.push_back("-ffast-math");
+ if (Opts.Static)
+ Res.push_back("-static-define");
+ if (Opts.DumpRecordLayoutsSimple)
+ Res.push_back("-fdump-record-layouts-simple");
+ else if (Opts.DumpRecordLayouts)
+ Res.push_back("-fdump-record-layouts");
+ if (Opts.DumpVTableLayouts)
+ Res.push_back("-fdump-vtable-layouts");
+ if (Opts.NoBitFieldTypeAlign)
+ Res.push_back("-fno-bitfield-type-alignment");
+ if (Opts.PICLevel)
+ Res.push_back("-pic-level", llvm::utostr(Opts.PICLevel));
+ if (Opts.PIELevel)
+ Res.push_back("-pie-level", llvm::utostr(Opts.PIELevel));
+ if (Opts.ObjCGCBitmapPrint)
+ Res.push_back("-print-ivar-layout");
+ if (Opts.NoConstantCFStrings)
+ Res.push_back("-fno-constant-cfstrings");
+ if (!Opts.AccessControl)
+ Res.push_back("-fno-access-control");
+ if (!Opts.CharIsSigned)
+ Res.push_back("-fno-signed-char");
+ if (Opts.ShortWChar)
+ Res.push_back("-fshort-wchar");
+ if (!Opts.ElideConstructors)
+ Res.push_back("-fno-elide-constructors");
+ if (Opts.getGC() != LangOptions::NonGC) {
+ if (Opts.getGC() == LangOptions::HybridGC) {
+ Res.push_back("-fobjc-gc");
+ } else {
+ assert(Opts.getGC() == LangOptions::GCOnly && "Invalid GC mode!");
+ Res.push_back("-fobjc-gc-only");
+ }
+ }
+ if (Opts.ObjCAutoRefCount)
+ Res.push_back("-fobjc-arc");
+ if (Opts.ObjCRuntimeHasWeak)
+ Res.push_back("-fobjc-runtime-has-weak");
+ if (!Opts.ObjCInferRelatedResultType)
+ Res.push_back("-fno-objc-infer-related-result-type");
+
+ if (Opts.AppleKext)
+ Res.push_back("-fapple-kext");
+
+ if (Opts.getVisibilityMode() != DefaultVisibility) {
+ Res.push_back("-fvisibility");
+ if (Opts.getVisibilityMode() == HiddenVisibility) {
+ Res.push_back("hidden");
+ } else {
+ assert(Opts.getVisibilityMode() == ProtectedVisibility &&
+ "Invalid visibility!");
+ Res.push_back("protected");
+ }
+ }
+ if (Opts.InlineVisibilityHidden)
+ Res.push_back("-fvisibility-inlines-hidden");
+
+ if (Opts.getStackProtector() != 0)
+ Res.push_back("-stack-protector", llvm::utostr(Opts.getStackProtector()));
+ if (Opts.InstantiationDepth != DefaultLangOpts.InstantiationDepth)
+ Res.push_back("-ftemplate-depth", llvm::utostr(Opts.InstantiationDepth));
+ if (Opts.ConstexprCallDepth != DefaultLangOpts.ConstexprCallDepth)
+ Res.push_back("-fconstexpr-depth", llvm::utostr(Opts.ConstexprCallDepth));
+ if (!Opts.ObjCConstantStringClass.empty())
+ Res.push_back("-fconstant-string-class", Opts.ObjCConstantStringClass);
+ if (Opts.FakeAddressSpaceMap)
+ Res.push_back("-ffake-address-space-map");
+ if (Opts.ParseUnknownAnytype)
+ Res.push_back("-funknown-anytype");
+ if (Opts.DebuggerSupport)
+ Res.push_back("-fdebugger-support");
+ if (Opts.DebuggerCastResultToId)
+ Res.push_back("-fdebugger-cast-result-to-id");
+ if (Opts.DebuggerObjCLiteral)
+ Res.push_back("-fdebugger-objc-literal");
+ if (Opts.DelayedTemplateParsing)
+ Res.push_back("-fdelayed-template-parsing");
+ if (Opts.Deprecated)
+ Res.push_back("-fdeprecated-macro");
+ if (Opts.ApplePragmaPack)
+ Res.push_back("-fapple-pragma-pack");
+ if (!Opts.CurrentModule.empty())
+ Res.push_back("-fmodule-name=" + Opts.CurrentModule);
+}
+
+static void PreprocessorOptsToArgs(const PreprocessorOptions &Opts,
+ ToArgsList &Res) {
+ for (unsigned i = 0, e = Opts.Macros.size(); i != e; ++i)
+ Res.push_back(std::string(Opts.Macros[i].second ? "-U" : "-D") +
+ Opts.Macros[i].first);
+ for (unsigned i = 0, e = Opts.Includes.size(); i != e; ++i) {
+ // FIXME: We need to avoid reincluding the implicit PCH and PTH includes.
+ Res.push_back("-include", Opts.Includes[i]);
+ }
+ for (unsigned i = 0, e = Opts.MacroIncludes.size(); i != e; ++i)
+ Res.push_back("-imacros", Opts.MacroIncludes[i]);
+ if (!Opts.UsePredefines)
+ Res.push_back("-undef");
+ if (Opts.DetailedRecord)
+ Res.push_back("-detailed-preprocessing-record");
+ if (!Opts.ImplicitPCHInclude.empty())
+ Res.push_back("-include-pch", Opts.ImplicitPCHInclude);
+ if (!Opts.ImplicitPTHInclude.empty())
+ Res.push_back("-include-pth", Opts.ImplicitPTHInclude);
+ if (!Opts.TokenCache.empty()) {
+ if (Opts.ImplicitPTHInclude.empty())
+ Res.push_back("-token-cache", Opts.TokenCache);
+ else
+ assert(Opts.ImplicitPTHInclude == Opts.TokenCache &&
+ "Unsupported option combination!");
+ }
+ for (unsigned i = 0, e = Opts.ChainedIncludes.size(); i != e; ++i)
+ Res.push_back("-chain-include", Opts.ChainedIncludes[i]);
+ for (unsigned i = 0, e = Opts.RemappedFiles.size(); i != e; ++i) {
+ Res.push_back("-remap-file", Opts.RemappedFiles[i].first + ";" +
+ Opts.RemappedFiles[i].second);
+ }
+}
+
+static void PreprocessorOutputOptsToArgs(const PreprocessorOutputOptions &Opts,
+ ToArgsList &Res) {
+ if (!Opts.ShowCPP && !Opts.ShowMacros)
+ llvm::report_fatal_error("Invalid option combination!");
+
+ if (Opts.ShowCPP && Opts.ShowMacros)
+ Res.push_back("-dD");
+ else if (!Opts.ShowCPP && Opts.ShowMacros)
+ Res.push_back("-dM");
+
+ if (!Opts.ShowLineMarkers)
+ Res.push_back("-P");
+ if (Opts.ShowComments)
+ Res.push_back("-C");
+ if (Opts.ShowMacroComments)
+ Res.push_back("-CC");
+}
+
+static void TargetOptsToArgs(const TargetOptions &Opts,
+ ToArgsList &Res) {
+ Res.push_back("-triple");
+ Res.push_back(Opts.Triple);
+ if (!Opts.CPU.empty())
+ Res.push_back("-target-cpu", Opts.CPU);
+ if (!Opts.ABI.empty())
+ Res.push_back("-target-abi", Opts.ABI);
+ if (!Opts.LinkerVersion.empty())
+ Res.push_back("-target-linker-version", Opts.LinkerVersion);
+ if (!Opts.CXXABI.empty())
+ Res.push_back("-cxx-abi", Opts.CXXABI);
+ for (unsigned i = 0, e = Opts.Features.size(); i != e; ++i)
+ Res.push_back("-target-feature", Opts.Features[i]);
+}
+
+void CompilerInvocation::toArgs(std::vector<std::string> &Res) {
+ ToArgsList List(Res);
+ AnalyzerOptsToArgs(getAnalyzerOpts(), List);
+ CodeGenOptsToArgs(getCodeGenOpts(), List);
+ DependencyOutputOptsToArgs(getDependencyOutputOpts(), List);
+ DiagnosticOptsToArgs(getDiagnosticOpts(), List);
+ FileSystemOptsToArgs(getFileSystemOpts(), List);
+ FrontendOptsToArgs(getFrontendOpts(), List);
+ HeaderSearchOptsToArgs(getHeaderSearchOpts(), List);
+ LangOptsToArgs(*getLangOpts(), List);
+ PreprocessorOptsToArgs(getPreprocessorOpts(), List);
+ PreprocessorOutputOptsToArgs(getPreprocessorOutputOpts(), List);
+ TargetOptsToArgs(getTargetOpts(), List);
+}
+
+//===----------------------------------------------------------------------===//
+// Deserialization (to args)
+//===----------------------------------------------------------------------===//
+
+using namespace clang::driver;
+using namespace clang::driver::cc1options;
+
+//
+
+static unsigned getOptimizationLevel(ArgList &Args, InputKind IK,
+ DiagnosticsEngine &Diags) {
+ unsigned DefaultOpt = 0;
+ if (IK == IK_OpenCL && !Args.hasArg(OPT_cl_opt_disable))
+ DefaultOpt = 2;
+ // -Os/-Oz implies -O2
+ return (Args.hasArg(OPT_Os) || Args.hasArg (OPT_Oz)) ? 2 :
+ Args.getLastArgIntValue(OPT_O, DefaultOpt, Diags);
+}
+
+static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ using namespace cc1options;
+ bool Success = true;
+ if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
+ StringRef Name = A->getValue(Args);
+ AnalysisStores Value = llvm::StringSwitch<AnalysisStores>(Name)
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
+ .Case(CMDFLAG, NAME##Model)
+#include "clang/Frontend/Analyses.def"
+ .Default(NumStores);
+ if (Value == NumStores) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.AnalysisStoreOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_constraints)) {
+ StringRef Name = A->getValue(Args);
+ AnalysisConstraints Value = llvm::StringSwitch<AnalysisConstraints>(Name)
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) \
+ .Case(CMDFLAG, NAME##Model)
+#include "clang/Frontend/Analyses.def"
+ .Default(NumConstraints);
+ if (Value == NumConstraints) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.AnalysisConstraintsOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_output)) {
+ StringRef Name = A->getValue(Args);
+ AnalysisDiagClients Value = llvm::StringSwitch<AnalysisDiagClients>(Name)
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATFN, AUTOCREAT) \
+ .Case(CMDFLAG, PD_##NAME)
+#include "clang/Frontend/Analyses.def"
+ .Default(NUM_ANALYSIS_DIAG_CLIENTS);
+ if (Value == NUM_ANALYSIS_DIAG_CLIENTS) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.AnalysisDiagOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_purge)) {
+ StringRef Name = A->getValue(Args);
+ AnalysisPurgeMode Value = llvm::StringSwitch<AnalysisPurgeMode>(Name)
+#define ANALYSIS_PURGE(NAME, CMDFLAG, DESC) \
+ .Case(CMDFLAG, NAME)
+#include "clang/Frontend/Analyses.def"
+ .Default(NumPurgeModes);
+ if (Value == NumPurgeModes) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.AnalysisPurgeOpt = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_ipa)) {
+ StringRef Name = A->getValue(Args);
+ AnalysisIPAMode Value = llvm::StringSwitch<AnalysisIPAMode>(Name)
+#define ANALYSIS_IPA(NAME, CMDFLAG, DESC) \
+ .Case(CMDFLAG, NAME)
+#include "clang/Frontend/Analyses.def"
+ .Default(NumIPAModes);
+ if (Value == NumIPAModes) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.IPAMode = Value;
+ }
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_analyzer_inlining_mode)) {
+ StringRef Name = A->getValue(Args);
+ AnalysisInliningMode Value = llvm::StringSwitch<AnalysisInliningMode>(Name)
+#define ANALYSIS_INLINING_MODE(NAME, CMDFLAG, DESC) \
+ .Case(CMDFLAG, NAME)
+#include "clang/Frontend/Analyses.def"
+ .Default(NumInliningModes);
+ if (Value == NumInliningModes) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.InliningMode = Value;
+ }
+ }
+
+ Opts.ShowCheckerHelp = Args.hasArg(OPT_analyzer_checker_help);
+ Opts.VisualizeEGDot = Args.hasArg(OPT_analyzer_viz_egraph_graphviz);
+ Opts.VisualizeEGUbi = Args.hasArg(OPT_analyzer_viz_egraph_ubigraph);
+ Opts.NoRetryExhausted = Args.hasArg(OPT_analyzer_disable_retry_exhausted);
+ Opts.AnalyzeAll = Args.hasArg(OPT_analyzer_opt_analyze_headers);
+ Opts.AnalyzerDisplayProgress = Args.hasArg(OPT_analyzer_display_progress);
+ Opts.AnalyzeNestedBlocks =
+ Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks);
+ Opts.EagerlyAssume = Args.hasArg(OPT_analyzer_eagerly_assume);
+ Opts.AnalyzeSpecificFunction = Args.getLastArgValue(OPT_analyze_function);
+ Opts.UnoptimizedCFG = Args.hasArg(OPT_analysis_UnoptimizedCFG);
+ Opts.CFGAddImplicitDtors = Args.hasArg(OPT_analysis_CFGAddImplicitDtors);
+ Opts.CFGAddInitializers = Args.hasArg(OPT_analysis_CFGAddInitializers);
+ Opts.TrimGraph = Args.hasArg(OPT_trim_egraph);
+ Opts.MaxNodes = Args.getLastArgIntValue(OPT_analyzer_max_nodes, 150000,Diags);
+ Opts.MaxLoop = Args.getLastArgIntValue(OPT_analyzer_max_loop, 4, Diags);
+ Opts.EagerlyTrimEGraph = !Args.hasArg(OPT_analyzer_no_eagerly_trim_egraph);
+ Opts.PrintStats = Args.hasArg(OPT_analyzer_stats);
+ Opts.InlineMaxStackDepth =
+ Args.getLastArgIntValue(OPT_analyzer_inline_max_stack_depth,
+ Opts.InlineMaxStackDepth, Diags);
+ Opts.InlineMaxFunctionSize =
+ Args.getLastArgIntValue(OPT_analyzer_inline_max_function_size,
+ Opts.InlineMaxFunctionSize, Diags);
+
+ Opts.CheckersControlList.clear();
+ for (arg_iterator it = Args.filtered_begin(OPT_analyzer_checker,
+ OPT_analyzer_disable_checker),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ A->claim();
+ bool enable = (A->getOption().getID() == OPT_analyzer_checker);
+ // We can have a list of comma separated checker names, e.g:
+ // '-analyzer-checker=cocoa,unix'
+ StringRef checkerList = A->getValue(Args);
+ SmallVector<StringRef, 4> checkers;
+ checkerList.split(checkers, ",");
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i)
+ Opts.CheckersControlList.push_back(std::make_pair(checkers[i], enable));
+ }
+
+ return Success;
+}
+
+static bool ParseMigratorArgs(MigratorOptions &Opts, ArgList &Args) {
+ Opts.NoNSAllocReallocError = Args.hasArg(OPT_migrator_no_nsalloc_error);
+ Opts.NoFinalizeRemoval = Args.hasArg(OPT_migrator_no_finalize_removal);
+ return true;
+}
+
+static bool ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args, InputKind IK,
+ DiagnosticsEngine &Diags) {
+ using namespace cc1options;
+ bool Success = true;
+
+ unsigned OptLevel = getOptimizationLevel(Args, IK, Diags);
+ if (OptLevel > 3) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_O)->getAsString(Args) << OptLevel;
+ OptLevel = 3;
+ Success = false;
+ }
+ Opts.OptimizationLevel = OptLevel;
+
+ // We must always run at least the always inlining pass.
+ Opts.Inlining = (Opts.OptimizationLevel > 1) ? CodeGenOptions::NormalInlining
+ : CodeGenOptions::OnlyAlwaysInlining;
+ // -fno-inline-functions overrides OptimizationLevel > 1.
+ Opts.NoInline = Args.hasArg(OPT_fno_inline);
+ Opts.Inlining = Args.hasArg(OPT_fno_inline_functions) ?
+ CodeGenOptions::OnlyAlwaysInlining : Opts.Inlining;
+
+ Opts.DebugInfo = Args.hasArg(OPT_g);
+ Opts.LimitDebugInfo = !Args.hasArg(OPT_fno_limit_debug_info)
+ || Args.hasArg(OPT_flimit_debug_info);
+ Opts.DisableLLVMOpts = Args.hasArg(OPT_disable_llvm_optzns);
+ Opts.DisableRedZone = Args.hasArg(OPT_disable_red_zone);
+ Opts.ForbidGuardVariables = Args.hasArg(OPT_fforbid_guard_variables);
+ Opts.UseRegisterSizedBitfieldAccess = Args.hasArg(
+ OPT_fuse_register_sized_bitfield_access);
+ Opts.RelaxedAliasing = Args.hasArg(OPT_relaxed_aliasing);
+ Opts.DwarfDebugFlags = Args.getLastArgValue(OPT_dwarf_debug_flags);
+ Opts.MergeAllConstants = !Args.hasArg(OPT_fno_merge_all_constants);
+ Opts.NoCommon = Args.hasArg(OPT_fno_common);
+ Opts.NoImplicitFloat = Args.hasArg(OPT_no_implicit_float);
+ Opts.OptimizeSize = Args.hasArg(OPT_Os);
+ Opts.OptimizeSize = Args.hasArg(OPT_Oz) ? 2 : Opts.OptimizeSize;
+ Opts.SimplifyLibCalls = !(Args.hasArg(OPT_fno_builtin) ||
+ Args.hasArg(OPT_ffreestanding));
+ Opts.UnrollLoops = Args.hasArg(OPT_funroll_loops) ||
+ (Opts.OptimizationLevel > 1 && !Opts.OptimizeSize);
+
+ Opts.AsmVerbose = Args.hasArg(OPT_masm_verbose);
+ Opts.ObjCAutoRefCountExceptions = Args.hasArg(OPT_fobjc_arc_exceptions);
+ Opts.ObjCRuntimeHasARC = Args.hasArg(OPT_fobjc_runtime_has_arc);
+ Opts.ObjCRuntimeHasTerminate = Args.hasArg(OPT_fobjc_runtime_has_terminate);
+ Opts.CUDAIsDevice = Args.hasArg(OPT_fcuda_is_device);
+ Opts.CXAAtExit = !Args.hasArg(OPT_fno_use_cxa_atexit);
+ Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
+ Opts.CodeModel = Args.getLastArgValue(OPT_mcode_model);
+ Opts.DebugPass = Args.getLastArgValue(OPT_mdebug_pass);
+ Opts.DisableFPElim = Args.hasArg(OPT_mdisable_fp_elim);
+ Opts.DisableTailCalls = Args.hasArg(OPT_mdisable_tail_calls);
+ Opts.FloatABI = Args.getLastArgValue(OPT_mfloat_abi);
+ Opts.HiddenWeakVTables = Args.hasArg(OPT_fhidden_weak_vtables);
+ Opts.LessPreciseFPMAD = Args.hasArg(OPT_cl_mad_enable);
+ Opts.LimitFloatPrecision = Args.getLastArgValue(OPT_mlimit_float_precision);
+ Opts.NoInfsFPMath = (Args.hasArg(OPT_menable_no_infinities) ||
+ Args.hasArg(OPT_cl_finite_math_only)||
+ Args.hasArg(OPT_cl_fast_relaxed_math));
+ Opts.NoNaNsFPMath = (Args.hasArg(OPT_menable_no_nans) ||
+ Args.hasArg(OPT_cl_finite_math_only)||
+ Args.hasArg(OPT_cl_fast_relaxed_math));
+ Opts.NoZeroInitializedInBSS = Args.hasArg(OPT_mno_zero_initialized_in_bss);
+ Opts.BackendOptions = Args.getAllArgValues(OPT_backend_option);
+ Opts.NumRegisterParameters = Args.getLastArgIntValue(OPT_mregparm, 0, Diags);
+ Opts.NoGlobalMerge = Args.hasArg(OPT_mno_global_merge);
+ Opts.NoExecStack = Args.hasArg(OPT_mno_exec_stack);
+ Opts.RelaxAll = Args.hasArg(OPT_mrelax_all);
+ Opts.OmitLeafFramePointer = Args.hasArg(OPT_momit_leaf_frame_pointer);
+ Opts.SaveTempLabels = Args.hasArg(OPT_msave_temp_labels);
+ Opts.NoDwarf2CFIAsm = Args.hasArg(OPT_fno_dwarf2_cfi_asm);
+ Opts.NoDwarfDirectoryAsm = Args.hasArg(OPT_fno_dwarf_directory_asm);
+ Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
+ Opts.StrictEnums = Args.hasArg(OPT_fstrict_enums);
+ Opts.UnsafeFPMath = Args.hasArg(OPT_menable_unsafe_fp_math) ||
+ Args.hasArg(OPT_cl_unsafe_math_optimizations) ||
+ Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
+ Opts.RelocationModel = Args.getLastArgValue(OPT_mrelocation_model, "pic");
+ Opts.TrapFuncName = Args.getLastArgValue(OPT_ftrap_function_EQ);
+
+ Opts.FunctionSections = Args.hasArg(OPT_ffunction_sections);
+ Opts.DataSections = Args.hasArg(OPT_fdata_sections);
+
+ Opts.MainFileName = Args.getLastArgValue(OPT_main_file_name);
+ Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
+
+ Opts.InstrumentFunctions = Args.hasArg(OPT_finstrument_functions);
+ Opts.InstrumentForProfiling = Args.hasArg(OPT_pg);
+ Opts.EmitGcovArcs = Args.hasArg(OPT_femit_coverage_data);
+ Opts.EmitGcovNotes = Args.hasArg(OPT_femit_coverage_notes);
+ Opts.CoverageFile = Args.getLastArgValue(OPT_coverage_file);
+ Opts.DebugCompilationDir = Args.getLastArgValue(OPT_fdebug_compilation_dir);
+ Opts.LinkBitcodeFile = Args.getLastArgValue(OPT_mlink_bitcode_file);
+ Opts.StackRealignment = Args.hasArg(OPT_mstackrealign);
+ if (Arg *A = Args.getLastArg(OPT_mstack_alignment)) {
+ StringRef Val = A->getValue(Args);
+ Val.getAsInteger(10, Opts.StackAlignment);
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_fobjc_dispatch_method_EQ)) {
+ StringRef Name = A->getValue(Args);
+ unsigned Method = llvm::StringSwitch<unsigned>(Name)
+ .Case("legacy", CodeGenOptions::Legacy)
+ .Case("non-legacy", CodeGenOptions::NonLegacy)
+ .Case("mixed", CodeGenOptions::Mixed)
+ .Default(~0U);
+ if (Method == ~0U) {
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ Success = false;
+ } else {
+ Opts.ObjCDispatchMethod = Method;
+ }
+ }
+
+ return Success;
+}
+
+static void ParseDependencyOutputArgs(DependencyOutputOptions &Opts,
+ ArgList &Args) {
+ using namespace cc1options;
+ Opts.OutputFile = Args.getLastArgValue(OPT_dependency_file);
+ Opts.Targets = Args.getAllArgValues(OPT_MT);
+ Opts.IncludeSystemHeaders = Args.hasArg(OPT_sys_header_deps);
+ Opts.UsePhonyTargets = Args.hasArg(OPT_MP);
+ Opts.ShowHeaderIncludes = Args.hasArg(OPT_H);
+ Opts.HeaderIncludeOutputFile = Args.getLastArgValue(OPT_header_include_file);
+ Opts.AddMissingHeaderDeps = Args.hasArg(OPT_MG);
+ Opts.DOTOutputFile = Args.getLastArgValue(OPT_dependency_dot);
+}
+
+bool clang::ParseDiagnosticArgs(DiagnosticOptions &Opts, ArgList &Args,
+ DiagnosticsEngine *Diags) {
+ using namespace cc1options;
+ bool Success = true;
+
+ Opts.DiagnosticLogFile = Args.getLastArgValue(OPT_diagnostic_log_file);
+ Opts.DiagnosticSerializationFile =
+ Args.getLastArgValue(OPT_diagnostic_serialized_file);
+ Opts.IgnoreWarnings = Args.hasArg(OPT_w);
+ Opts.NoRewriteMacros = Args.hasArg(OPT_Wno_rewrite_macros);
+ Opts.Pedantic = Args.hasArg(OPT_pedantic);
+ Opts.PedanticErrors = Args.hasArg(OPT_pedantic_errors);
+ Opts.ShowCarets = !Args.hasArg(OPT_fno_caret_diagnostics);
+ Opts.ShowColors = Args.hasArg(OPT_fcolor_diagnostics);
+ Opts.ShowColumn = Args.hasFlag(OPT_fshow_column,
+ OPT_fno_show_column,
+ /*Default=*/true);
+ Opts.ShowFixits = !Args.hasArg(OPT_fno_diagnostics_fixit_info);
+ Opts.ShowLocation = !Args.hasArg(OPT_fno_show_source_location);
+ Opts.ShowOptionNames = Args.hasArg(OPT_fdiagnostics_show_option);
+
+ // Default behavior is to not to show note include stacks.
+ Opts.ShowNoteIncludeStack = false;
+ if (Arg *A = Args.getLastArg(OPT_fdiagnostics_show_note_include_stack,
+ OPT_fno_diagnostics_show_note_include_stack))
+ if (A->getOption().matches(OPT_fdiagnostics_show_note_include_stack))
+ Opts.ShowNoteIncludeStack = true;
+
+ StringRef ShowOverloads =
+ Args.getLastArgValue(OPT_fshow_overloads_EQ, "all");
+ if (ShowOverloads == "best")
+ Opts.ShowOverloads = DiagnosticsEngine::Ovl_Best;
+ else if (ShowOverloads == "all")
+ Opts.ShowOverloads = DiagnosticsEngine::Ovl_All;
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_fshow_overloads_EQ)->getAsString(Args)
+ << ShowOverloads;
+ }
+
+ StringRef ShowCategory =
+ Args.getLastArgValue(OPT_fdiagnostics_show_category, "none");
+ if (ShowCategory == "none")
+ Opts.ShowCategories = 0;
+ else if (ShowCategory == "id")
+ Opts.ShowCategories = 1;
+ else if (ShowCategory == "name")
+ Opts.ShowCategories = 2;
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_fdiagnostics_show_category)->getAsString(Args)
+ << ShowCategory;
+ }
+
+ StringRef Format =
+ Args.getLastArgValue(OPT_fdiagnostics_format, "clang");
+ if (Format == "clang")
+ Opts.Format = DiagnosticOptions::Clang;
+ else if (Format == "msvc")
+ Opts.Format = DiagnosticOptions::Msvc;
+ else if (Format == "vi")
+ Opts.Format = DiagnosticOptions::Vi;
+ else {
+ Success = false;
+ if (Diags)
+ Diags->Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_fdiagnostics_format)->getAsString(Args)
+ << Format;
+ }
+
+ Opts.ShowSourceRanges = Args.hasArg(OPT_fdiagnostics_print_source_range_info);
+ Opts.ShowParseableFixits = Args.hasArg(OPT_fdiagnostics_parseable_fixits);
+ Opts.VerifyDiagnostics = Args.hasArg(OPT_verify);
+ Opts.ErrorLimit = Args.getLastArgIntValue(OPT_ferror_limit, 0, Diags);
+ Opts.MacroBacktraceLimit
+ = Args.getLastArgIntValue(OPT_fmacro_backtrace_limit,
+ DiagnosticOptions::DefaultMacroBacktraceLimit, Diags);
+ Opts.TemplateBacktraceLimit
+ = Args.getLastArgIntValue(OPT_ftemplate_backtrace_limit,
+ DiagnosticOptions::DefaultTemplateBacktraceLimit,
+ Diags);
+ Opts.ConstexprBacktraceLimit
+ = Args.getLastArgIntValue(OPT_fconstexpr_backtrace_limit,
+ DiagnosticOptions::DefaultConstexprBacktraceLimit,
+ Diags);
+ Opts.TabStop = Args.getLastArgIntValue(OPT_ftabstop,
+ DiagnosticOptions::DefaultTabStop, Diags);
+ if (Opts.TabStop == 0 || Opts.TabStop > DiagnosticOptions::MaxTabStop) {
+ Opts.TabStop = DiagnosticOptions::DefaultTabStop;
+ if (Diags)
+ Diags->Report(diag::warn_ignoring_ftabstop_value)
+ << Opts.TabStop << DiagnosticOptions::DefaultTabStop;
+ }
+ Opts.MessageLength = Args.getLastArgIntValue(OPT_fmessage_length, 0, Diags);
+ Opts.DumpBuildInformation = Args.getLastArgValue(OPT_dump_build_information);
+
+ for (arg_iterator it = Args.filtered_begin(OPT_W),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ StringRef V = (*it)->getValue(Args);
+ // "-Wl," and such are not warnings options.
+ if (V.startswith("l,") || V.startswith("a,") || V.startswith("p,"))
+ continue;
+
+ Opts.Warnings.push_back(V);
+ }
+
+ return Success;
+}
+
+static void ParseFileSystemArgs(FileSystemOptions &Opts, ArgList &Args) {
+ Opts.WorkingDir = Args.getLastArgValue(OPT_working_directory);
+}
+
+static InputKind ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
+ DiagnosticsEngine &Diags) {
+ using namespace cc1options;
+ Opts.ProgramAction = frontend::ParseSyntaxOnly;
+ if (const Arg *A = Args.getLastArg(OPT_Action_Group)) {
+ switch (A->getOption().getID()) {
+ default:
+ llvm_unreachable("Invalid option in group!");
+ case OPT_ast_dump:
+ Opts.ProgramAction = frontend::ASTDump; break;
+ case OPT_ast_dump_xml:
+ Opts.ProgramAction = frontend::ASTDumpXML; break;
+ case OPT_ast_print:
+ Opts.ProgramAction = frontend::ASTPrint; break;
+ case OPT_ast_view:
+ Opts.ProgramAction = frontend::ASTView; break;
+ case OPT_dump_raw_tokens:
+ Opts.ProgramAction = frontend::DumpRawTokens; break;
+ case OPT_dump_tokens:
+ Opts.ProgramAction = frontend::DumpTokens; break;
+ case OPT_S:
+ Opts.ProgramAction = frontend::EmitAssembly; break;
+ case OPT_emit_llvm_bc:
+ Opts.ProgramAction = frontend::EmitBC; break;
+ case OPT_emit_html:
+ Opts.ProgramAction = frontend::EmitHTML; break;
+ case OPT_emit_llvm:
+ Opts.ProgramAction = frontend::EmitLLVM; break;
+ case OPT_emit_llvm_only:
+ Opts.ProgramAction = frontend::EmitLLVMOnly; break;
+ case OPT_emit_codegen_only:
+ Opts.ProgramAction = frontend::EmitCodeGenOnly; break;
+ case OPT_emit_obj:
+ Opts.ProgramAction = frontend::EmitObj; break;
+ case OPT_fixit_EQ:
+ Opts.FixItSuffix = A->getValue(Args);
+ // fall-through!
+ case OPT_fixit:
+ Opts.ProgramAction = frontend::FixIt; break;
+ case OPT_emit_module:
+ Opts.ProgramAction = frontend::GenerateModule; break;
+ case OPT_emit_pch:
+ Opts.ProgramAction = frontend::GeneratePCH; break;
+ case OPT_emit_pth:
+ Opts.ProgramAction = frontend::GeneratePTH; break;
+ case OPT_init_only:
+ Opts.ProgramAction = frontend::InitOnly; break;
+ case OPT_fsyntax_only:
+ Opts.ProgramAction = frontend::ParseSyntaxOnly; break;
+ case OPT_print_decl_contexts:
+ Opts.ProgramAction = frontend::PrintDeclContext; break;
+ case OPT_print_preamble:
+ Opts.ProgramAction = frontend::PrintPreamble; break;
+ case OPT_E:
+ Opts.ProgramAction = frontend::PrintPreprocessedInput; break;
+ case OPT_pubnames_dump:
+ Opts.ProgramAction = frontend::PubnamesDump; break;
+ case OPT_rewrite_macros:
+ Opts.ProgramAction = frontend::RewriteMacros; break;
+ case OPT_rewrite_objc:
+ Opts.ProgramAction = frontend::RewriteObjC; break;
+ case OPT_rewrite_test:
+ Opts.ProgramAction = frontend::RewriteTest; break;
+ case OPT_analyze:
+ Opts.ProgramAction = frontend::RunAnalysis; break;
+ case OPT_migrate:
+ Opts.ProgramAction = frontend::MigrateSource; break;
+ case OPT_Eonly:
+ Opts.ProgramAction = frontend::RunPreprocessorOnly; break;
+ }
+ }
+
+ if (const Arg* A = Args.getLastArg(OPT_plugin)) {
+ Opts.Plugins.push_back(A->getValue(Args,0));
+ Opts.ProgramAction = frontend::PluginAction;
+ Opts.ActionName = A->getValue(Args);
+
+ for (arg_iterator it = Args.filtered_begin(OPT_plugin_arg),
+ end = Args.filtered_end(); it != end; ++it) {
+ if ((*it)->getValue(Args, 0) == Opts.ActionName)
+ Opts.PluginArgs.push_back((*it)->getValue(Args, 1));
+ }
+ }
+
+ Opts.AddPluginActions = Args.getAllArgValues(OPT_add_plugin);
+ Opts.AddPluginArgs.resize(Opts.AddPluginActions.size());
+ for (int i = 0, e = Opts.AddPluginActions.size(); i != e; ++i) {
+ for (arg_iterator it = Args.filtered_begin(OPT_plugin_arg),
+ end = Args.filtered_end(); it != end; ++it) {
+ if ((*it)->getValue(Args, 0) == Opts.AddPluginActions[i])
+ Opts.AddPluginArgs[i].push_back((*it)->getValue(Args, 1));
+ }
+ }
+
+ if (const Arg *A = Args.getLastArg(OPT_code_completion_at)) {
+ Opts.CodeCompletionAt =
+ ParsedSourceLocation::FromString(A->getValue(Args));
+ if (Opts.CodeCompletionAt.FileName.empty())
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue(Args);
+ }
+ Opts.DisableFree = Args.hasArg(OPT_disable_free);
+
+ Opts.OutputFile = Args.getLastArgValue(OPT_o);
+ Opts.Plugins = Args.getAllArgValues(OPT_load);
+ Opts.RelocatablePCH = Args.hasArg(OPT_relocatable_pch);
+ Opts.ShowHelp = Args.hasArg(OPT_help);
+ Opts.ShowMacrosInCodeCompletion = Args.hasArg(OPT_code_completion_macros);
+ Opts.ShowCodePatternsInCodeCompletion
+ = Args.hasArg(OPT_code_completion_patterns);
+ Opts.ShowGlobalSymbolsInCodeCompletion
+ = !Args.hasArg(OPT_no_code_completion_globals);
+ Opts.ShowStats = Args.hasArg(OPT_print_stats);
+ Opts.ShowTimers = Args.hasArg(OPT_ftime_report);
+ Opts.ShowVersion = Args.hasArg(OPT_version);
+ Opts.ASTMergeFiles = Args.getAllArgValues(OPT_ast_merge);
+ Opts.LLVMArgs = Args.getAllArgValues(OPT_mllvm);
+ Opts.FixWhatYouCan = Args.hasArg(OPT_fix_what_you_can);
+ Opts.FixOnlyWarnings = Args.hasArg(OPT_fix_only_warnings);
+ Opts.FixAndRecompile = Args.hasArg(OPT_fixit_recompile);
+ Opts.FixToTemporaries = Args.hasArg(OPT_fixit_to_temp);
+ Opts.OverrideRecordLayoutsFile
+ = Args.getLastArgValue(OPT_foverride_record_layout_EQ);
+ if (const Arg *A = Args.getLastArg(OPT_arcmt_check,
+ OPT_arcmt_modify,
+ OPT_arcmt_migrate)) {
+ switch (A->getOption().getID()) {
+ default:
+ llvm_unreachable("missed a case");
+ case OPT_arcmt_check:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Check;
+ break;
+ case OPT_arcmt_modify:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Modify;
+ break;
+ case OPT_arcmt_migrate:
+ Opts.ARCMTAction = FrontendOptions::ARCMT_Migrate;
+ break;
+ }
+ }
+ Opts.MTMigrateDir = Args.getLastArgValue(OPT_mt_migrate_directory);
+ Opts.ARCMTMigrateReportOut
+ = Args.getLastArgValue(OPT_arcmt_migrate_report_output);
+ Opts.ARCMTMigrateEmitARCErrors
+ = Args.hasArg(OPT_arcmt_migrate_emit_arc_errors);
+
+ if (Args.hasArg(OPT_objcmt_migrate_literals))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Literals;
+ if (Args.hasArg(OPT_objcmt_migrate_subscripting))
+ Opts.ObjCMTAction |= FrontendOptions::ObjCMT_Subscripting;
+
+ if (Opts.ARCMTAction != FrontendOptions::ARCMT_None &&
+ Opts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << "ARC migration" << "ObjC migration";
+ }
+
+ InputKind DashX = IK_None;
+ if (const Arg *A = Args.getLastArg(OPT_x)) {
+ DashX = llvm::StringSwitch<InputKind>(A->getValue(Args))
+ .Case("c", IK_C)
+ .Case("cl", IK_OpenCL)
+ .Case("cuda", IK_CUDA)
+ .Case("c++", IK_CXX)
+ .Case("objective-c", IK_ObjC)
+ .Case("objective-c++", IK_ObjCXX)
+ .Case("cpp-output", IK_PreprocessedC)
+ .Case("assembler-with-cpp", IK_Asm)
+ .Case("c++-cpp-output", IK_PreprocessedCXX)
+ .Case("objective-c-cpp-output", IK_PreprocessedObjC)
+ .Case("objc-cpp-output", IK_PreprocessedObjC)
+ .Case("objective-c++-cpp-output", IK_PreprocessedObjCXX)
+ .Case("objc++-cpp-output", IK_PreprocessedObjCXX)
+ .Case("c-header", IK_C)
+ .Case("cl-header", IK_OpenCL)
+ .Case("objective-c-header", IK_ObjC)
+ .Case("c++-header", IK_CXX)
+ .Case("objective-c++-header", IK_ObjCXX)
+ .Case("ast", IK_AST)
+ .Case("ir", IK_LLVM_IR)
+ .Default(IK_None);
+ if (DashX == IK_None)
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue(Args);
+ }
+
+ // '-' is the default input if none is given.
+ std::vector<std::string> Inputs = Args.getAllArgValues(OPT_INPUT);
+ Opts.Inputs.clear();
+ if (Inputs.empty())
+ Inputs.push_back("-");
+ for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
+ InputKind IK = DashX;
+ if (IK == IK_None) {
+ IK = FrontendOptions::getInputKindForExtension(
+ StringRef(Inputs[i]).rsplit('.').second);
+ // FIXME: Remove this hack.
+ if (i == 0)
+ DashX = IK;
+ }
+ Opts.Inputs.push_back(FrontendInputFile(Inputs[i], IK));
+ }
+
+ return DashX;
+}
+
+std::string CompilerInvocation::GetResourcesPath(const char *Argv0,
+ void *MainAddr) {
+ llvm::sys::Path P = llvm::sys::Path::GetMainExecutable(Argv0, MainAddr);
+
+ if (!P.isEmpty()) {
+ P.eraseComponent(); // Remove /clang from foo/bin/clang
+ P.eraseComponent(); // Remove /bin from foo/bin
+
+ // Get foo/lib/clang/<version>/include
+ P.appendComponent("lib");
+ P.appendComponent("clang");
+ P.appendComponent(CLANG_VERSION_STRING);
+ }
+
+ return P.str();
+}
+
+static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
+ using namespace cc1options;
+ Opts.Sysroot = Args.getLastArgValue(OPT_isysroot, "/");
+ Opts.Verbose = Args.hasArg(OPT_v);
+ Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
+ Opts.UseStandardSystemIncludes = !Args.hasArg(OPT_nostdsysteminc);
+ Opts.UseStandardCXXIncludes = !Args.hasArg(OPT_nostdincxx);
+ if (const Arg *A = Args.getLastArg(OPT_stdlib_EQ))
+ Opts.UseLibcxx = (strcmp(A->getValue(Args), "libc++") == 0);
+ Opts.ResourceDir = Args.getLastArgValue(OPT_resource_dir);
+ Opts.ModuleCachePath = Args.getLastArgValue(OPT_fmodule_cache_path);
+ Opts.DisableModuleHash = Args.hasArg(OPT_fdisable_module_hash);
+
+ // Add -I..., -F..., and -index-header-map options in order.
+ bool IsIndexHeaderMap = false;
+ for (arg_iterator it = Args.filtered_begin(OPT_I, OPT_F,
+ OPT_index_header_map),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(OPT_index_header_map)) {
+ // -index-header-map applies to the next -I or -F.
+ IsIndexHeaderMap = true;
+ continue;
+ }
+
+ frontend::IncludeDirGroup Group
+ = IsIndexHeaderMap? frontend::IndexHeaderMap : frontend::Angled;
+
+ Opts.AddPath((*it)->getValue(Args), Group, true,
+ /*IsFramework=*/ (*it)->getOption().matches(OPT_F), false);
+ IsIndexHeaderMap = false;
+ }
+
+ // Add -iprefix/-iwith-prefix/-iwithprefixbefore options.
+ StringRef Prefix = ""; // FIXME: This isn't the correct default prefix.
+ for (arg_iterator it = Args.filtered_begin(OPT_iprefix, OPT_iwithprefix,
+ OPT_iwithprefixbefore),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ if (A->getOption().matches(OPT_iprefix))
+ Prefix = A->getValue(Args);
+ else if (A->getOption().matches(OPT_iwithprefix))
+ Opts.AddPath(Prefix.str() + A->getValue(Args),
+ frontend::System, false, false, false);
+ else
+ Opts.AddPath(Prefix.str() + A->getValue(Args),
+ frontend::Angled, false, false, false);
+ }
+
+ for (arg_iterator it = Args.filtered_begin(OPT_idirafter),
+ ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::After, true, false, false);
+ for (arg_iterator it = Args.filtered_begin(OPT_iquote),
+ ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::Quoted, true, false, false);
+ for (arg_iterator it = Args.filtered_begin(OPT_isystem,
+ OPT_iwithsysroot), ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::System, true, false,
+ !(*it)->getOption().matches(OPT_iwithsysroot));
+ for (arg_iterator it = Args.filtered_begin(OPT_iframework),
+ ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::System, true, true,
+ true);
+
+ // Add the paths for the various language specific isystem flags.
+ for (arg_iterator it = Args.filtered_begin(OPT_c_isystem),
+ ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::CSystem, true, false, true);
+ for (arg_iterator it = Args.filtered_begin(OPT_cxx_isystem),
+ ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::CXXSystem, true, false, true);
+ for (arg_iterator it = Args.filtered_begin(OPT_objc_isystem),
+ ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::ObjCSystem, true, false,true);
+ for (arg_iterator it = Args.filtered_begin(OPT_objcxx_isystem),
+ ie = Args.filtered_end(); it != ie; ++it)
+ Opts.AddPath((*it)->getValue(Args), frontend::ObjCXXSystem, true, false,
+ true);
+
+ // Add the internal paths from a driver that detects standard include paths.
+ for (arg_iterator I = Args.filtered_begin(OPT_internal_isystem,
+ OPT_internal_externc_isystem),
+ E = Args.filtered_end();
+ I != E; ++I)
+ Opts.AddPath((*I)->getValue(Args), frontend::System,
+ false, false, /*IgnoreSysRoot=*/true, /*IsInternal=*/true,
+ (*I)->getOption().matches(OPT_internal_externc_isystem));
+}
+
+void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
+ LangStandard::Kind LangStd) {
+ // Set some properties which depend solely on the input kind; it would be nice
+ // to move these to the language standard, and have the driver resolve the
+ // input kind + language standard.
+ if (IK == IK_Asm) {
+ Opts.AsmPreprocessor = 1;
+ } else if (IK == IK_ObjC ||
+ IK == IK_ObjCXX ||
+ IK == IK_PreprocessedObjC ||
+ IK == IK_PreprocessedObjCXX) {
+ Opts.ObjC1 = Opts.ObjC2 = 1;
+ }
+
+ if (LangStd == LangStandard::lang_unspecified) {
+ // Based on the base language, pick one.
+ switch (IK) {
+ case IK_None:
+ case IK_AST:
+ case IK_LLVM_IR:
+ llvm_unreachable("Invalid input kind!");
+ case IK_OpenCL:
+ LangStd = LangStandard::lang_opencl;
+ break;
+ case IK_CUDA:
+ LangStd = LangStandard::lang_cuda;
+ break;
+ case IK_Asm:
+ case IK_C:
+ case IK_PreprocessedC:
+ case IK_ObjC:
+ case IK_PreprocessedObjC:
+ LangStd = LangStandard::lang_gnu99;
+ break;
+ case IK_CXX:
+ case IK_PreprocessedCXX:
+ case IK_ObjCXX:
+ case IK_PreprocessedObjCXX:
+ LangStd = LangStandard::lang_gnucxx98;
+ break;
+ }
+ }
+
+ const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
+ Opts.BCPLComment = Std.hasBCPLComments();
+ Opts.C99 = Std.isC99();
+ Opts.C11 = Std.isC11();
+ Opts.CPlusPlus = Std.isCPlusPlus();
+ Opts.CPlusPlus0x = Std.isCPlusPlus0x();
+ Opts.Digraphs = Std.hasDigraphs();
+ Opts.GNUMode = Std.isGNUMode();
+ Opts.GNUInline = !Std.isC99();
+ Opts.HexFloats = Std.hasHexFloats();
+ Opts.ImplicitInt = Std.hasImplicitInt();
+
+ // OpenCL has some additional defaults.
+ if (LangStd == LangStandard::lang_opencl) {
+ Opts.OpenCL = 1;
+ Opts.AltiVec = 0;
+ Opts.CXXOperatorNames = 1;
+ Opts.LaxVectorConversions = 0;
+ Opts.DefaultFPContract = 1;
+ }
+
+ if (LangStd == LangStandard::lang_cuda)
+ Opts.CUDA = 1;
+
+ // OpenCL and C++ both have bool, true, false keywords.
+ Opts.Bool = Opts.OpenCL || Opts.CPlusPlus;
+
+ Opts.GNUKeywords = Opts.GNUMode;
+ Opts.CXXOperatorNames = Opts.CPlusPlus;
+
+ // Mimicing gcc's behavior, trigraphs are only enabled if -trigraphs
+ // is specified, or -std is set to a conforming mode.
+ Opts.Trigraphs = !Opts.GNUMode;
+
+ Opts.DollarIdents = !Opts.AsmPreprocessor;
+}
+
+static void ParseLangArgs(LangOptions &Opts, ArgList &Args, InputKind IK,
+ DiagnosticsEngine &Diags) {
+ // FIXME: Cleanup per-file based stuff.
+ LangStandard::Kind LangStd = LangStandard::lang_unspecified;
+ if (const Arg *A = Args.getLastArg(OPT_std_EQ)) {
+ LangStd = llvm::StringSwitch<LangStandard::Kind>(A->getValue(Args))
+#define LANGSTANDARD(id, name, desc, features) \
+ .Case(name, LangStandard::lang_##id)
+#include "clang/Frontend/LangStandards.def"
+ .Default(LangStandard::lang_unspecified);
+ if (LangStd == LangStandard::lang_unspecified)
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue(Args);
+ else {
+ // Valid standard, check to make sure language and standard are compatable.
+ const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
+ switch (IK) {
+ case IK_C:
+ case IK_ObjC:
+ case IK_PreprocessedC:
+ case IK_PreprocessedObjC:
+ if (!(Std.isC89() || Std.isC99()))
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "C/ObjC";
+ break;
+ case IK_CXX:
+ case IK_ObjCXX:
+ case IK_PreprocessedCXX:
+ case IK_PreprocessedObjCXX:
+ if (!Std.isCPlusPlus())
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "C++/ObjC++";
+ break;
+ case IK_OpenCL:
+ if (!Std.isC99())
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "OpenCL";
+ break;
+ case IK_CUDA:
+ if (!Std.isCPlusPlus())
+ Diags.Report(diag::err_drv_argument_not_allowed_with)
+ << A->getAsString(Args) << "CUDA";
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (const Arg *A = Args.getLastArg(OPT_cl_std_EQ)) {
+ if (strcmp(A->getValue(Args), "CL1.1") != 0) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue(Args);
+ }
+ }
+
+ CompilerInvocation::setLangDefaults(Opts, IK, LangStd);
+
+ // We abuse '-f[no-]gnu-keywords' to force overriding all GNU-extension
+ // keywords. This behavior is provided by GCC's poorly named '-fasm' flag,
+ // while a subset (the non-C++ GNU keywords) is provided by GCC's
+ // '-fgnu-keywords'. Clang conflates the two for simplicity under the single
+ // name, as it doesn't seem a useful distinction.
+ Opts.GNUKeywords = Args.hasFlag(OPT_fgnu_keywords, OPT_fno_gnu_keywords,
+ Opts.GNUKeywords);
+
+ if (Args.hasArg(OPT_fno_operator_names))
+ Opts.CXXOperatorNames = 0;
+
+ if (Opts.ObjC1) {
+ if (Args.hasArg(OPT_fobjc_gc_only))
+ Opts.setGC(LangOptions::GCOnly);
+ else if (Args.hasArg(OPT_fobjc_gc))
+ Opts.setGC(LangOptions::HybridGC);
+ else if (Args.hasArg(OPT_fobjc_arc)) {
+ Opts.ObjCAutoRefCount = 1;
+ if (Args.hasArg(OPT_fobjc_fragile_abi))
+ Diags.Report(diag::err_arc_nonfragile_abi);
+ }
+
+ if (Args.hasArg(OPT_fobjc_runtime_has_weak))
+ Opts.ObjCRuntimeHasWeak = 1;
+
+ if (Args.hasArg(OPT_fno_objc_infer_related_result_type))
+ Opts.ObjCInferRelatedResultType = 0;
+ }
+
+ if (Args.hasArg(OPT_fgnu89_inline))
+ Opts.GNUInline = 1;
+
+ if (Args.hasArg(OPT_fapple_kext)) {
+ if (!Opts.CPlusPlus)
+ Diags.Report(diag::warn_c_kext);
+ else
+ Opts.AppleKext = 1;
+ }
+
+ if (Args.hasArg(OPT_print_ivar_layout))
+ Opts.ObjCGCBitmapPrint = 1;
+ if (Args.hasArg(OPT_fno_constant_cfstrings))
+ Opts.NoConstantCFStrings = 1;
+
+ if (Args.hasArg(OPT_faltivec))
+ Opts.AltiVec = 1;
+
+ if (Args.hasArg(OPT_pthread))
+ Opts.POSIXThreads = 1;
+
+ if (Args.hasArg(OPT_fdelayed_template_parsing))
+ Opts.DelayedTemplateParsing = 1;
+
+ StringRef Vis = Args.getLastArgValue(OPT_fvisibility, "default");
+ if (Vis == "default")
+ Opts.setVisibilityMode(DefaultVisibility);
+ else if (Vis == "hidden")
+ Opts.setVisibilityMode(HiddenVisibility);
+ else if (Vis == "protected")
+ // FIXME: diagnose if target does not support protected visibility
+ Opts.setVisibilityMode(ProtectedVisibility);
+ else
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_fvisibility)->getAsString(Args) << Vis;
+
+ if (Args.hasArg(OPT_fvisibility_inlines_hidden))
+ Opts.InlineVisibilityHidden = 1;
+
+ if (Args.hasArg(OPT_ftrapv)) {
+ Opts.setSignedOverflowBehavior(LangOptions::SOB_Trapping);
+ // Set the handler, if one is specified.
+ Opts.OverflowHandler =
+ Args.getLastArgValue(OPT_ftrapv_handler);
+ }
+ else if (Args.hasArg(OPT_fwrapv))
+ Opts.setSignedOverflowBehavior(LangOptions::SOB_Defined);
+
+ if (Args.hasArg(OPT_trigraphs))
+ Opts.Trigraphs = 1;
+
+ Opts.DollarIdents = Args.hasFlag(OPT_fdollars_in_identifiers,
+ OPT_fno_dollars_in_identifiers,
+ Opts.DollarIdents);
+ Opts.PascalStrings = Args.hasArg(OPT_fpascal_strings);
+ Opts.MicrosoftExt
+ = Args.hasArg(OPT_fms_extensions) || Args.hasArg(OPT_fms_compatibility);
+ Opts.MicrosoftMode = Args.hasArg(OPT_fms_compatibility);
+ Opts.MSCVersion = Args.getLastArgIntValue(OPT_fmsc_version, 0, Diags);
+ Opts.Borland = Args.hasArg(OPT_fborland_extensions);
+ Opts.WritableStrings = Args.hasArg(OPT_fwritable_strings);
+ Opts.ConstStrings = Args.hasFlag(OPT_fconst_strings, OPT_fno_const_strings,
+ Opts.ConstStrings);
+ if (Args.hasArg(OPT_fno_lax_vector_conversions))
+ Opts.LaxVectorConversions = 0;
+ if (Args.hasArg(OPT_fno_threadsafe_statics))
+ Opts.ThreadsafeStatics = 0;
+ Opts.Exceptions = Args.hasArg(OPT_fexceptions);
+ Opts.ObjCExceptions = Args.hasArg(OPT_fobjc_exceptions);
+ Opts.CXXExceptions = Args.hasArg(OPT_fcxx_exceptions);
+ Opts.SjLjExceptions = Args.hasArg(OPT_fsjlj_exceptions);
+ Opts.TraditionalCPP = Args.hasArg(OPT_traditional_cpp);
+
+ Opts.RTTI = !Args.hasArg(OPT_fno_rtti);
+ Opts.Blocks = Args.hasArg(OPT_fblocks);
+ Opts.BlocksRuntimeOptional = Args.hasArg(OPT_fblocks_runtime_optional);
+ Opts.Modules = Args.hasArg(OPT_fmodules);
+ Opts.CharIsSigned = !Args.hasArg(OPT_fno_signed_char);
+ Opts.ShortWChar = Args.hasArg(OPT_fshort_wchar);
+ Opts.ShortEnums = Args.hasArg(OPT_fshort_enums);
+ Opts.Freestanding = Args.hasArg(OPT_ffreestanding);
+ Opts.FormatExtensions = Args.hasArg(OPT_fformat_extensions);
+ Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
+ Opts.AssumeSaneOperatorNew = !Args.hasArg(OPT_fno_assume_sane_operator_new);
+ Opts.HeinousExtensions = Args.hasArg(OPT_fheinous_gnu_extensions);
+ Opts.AccessControl = !Args.hasArg(OPT_fno_access_control);
+ Opts.ElideConstructors = !Args.hasArg(OPT_fno_elide_constructors);
+ Opts.MathErrno = Args.hasArg(OPT_fmath_errno);
+ Opts.InstantiationDepth = Args.getLastArgIntValue(OPT_ftemplate_depth, 1024,
+ Diags);
+ Opts.ConstexprCallDepth = Args.getLastArgIntValue(OPT_fconstexpr_depth, 512,
+ Diags);
+ Opts.DelayedTemplateParsing = Args.hasArg(OPT_fdelayed_template_parsing);
+ Opts.NumLargeByValueCopy = Args.getLastArgIntValue(OPT_Wlarge_by_value_copy,
+ 0, Diags);
+ Opts.MSBitfields = Args.hasArg(OPT_mms_bitfields);
+ Opts.NeXTRuntime = !Args.hasArg(OPT_fgnu_runtime);
+ Opts.ObjCConstantStringClass =
+ Args.getLastArgValue(OPT_fconstant_string_class);
+ Opts.ObjCNonFragileABI = !Args.hasArg(OPT_fobjc_fragile_abi);
+ if (Opts.ObjCNonFragileABI)
+ Opts.ObjCNonFragileABI2 = true;
+ Opts.ObjCDefaultSynthProperties =
+ Args.hasArg(OPT_fobjc_default_synthesize_properties);
+ Opts.CatchUndefined = Args.hasArg(OPT_fcatch_undefined_behavior);
+ Opts.EmitAllDecls = Args.hasArg(OPT_femit_all_decls);
+ Opts.PackStruct = Args.getLastArgIntValue(OPT_fpack_struct, 0, Diags);
+ Opts.PICLevel = Args.getLastArgIntValue(OPT_pic_level, 0, Diags);
+ Opts.PIELevel = Args.getLastArgIntValue(OPT_pie_level, 0, Diags);
+ Opts.Static = Args.hasArg(OPT_static_define);
+ Opts.DumpRecordLayoutsSimple = Args.hasArg(OPT_fdump_record_layouts_simple);
+ Opts.DumpRecordLayouts = Opts.DumpRecordLayoutsSimple
+ || Args.hasArg(OPT_fdump_record_layouts);
+ Opts.DumpVTableLayouts = Args.hasArg(OPT_fdump_vtable_layouts);
+ Opts.SpellChecking = !Args.hasArg(OPT_fno_spell_checking);
+ Opts.NoBitFieldTypeAlign = Args.hasArg(OPT_fno_bitfield_type_align);
+ Opts.SinglePrecisionConstants = Args.hasArg(OPT_cl_single_precision_constant);
+ Opts.FastRelaxedMath = Args.hasArg(OPT_cl_fast_relaxed_math);
+ Opts.MRTD = Args.hasArg(OPT_mrtd);
+ Opts.HexagonQdsp6Compat = Args.hasArg(OPT_mqdsp6_compat);
+ Opts.FakeAddressSpaceMap = Args.hasArg(OPT_ffake_address_space_map);
+ Opts.ParseUnknownAnytype = Args.hasArg(OPT_funknown_anytype);
+ Opts.DebuggerSupport = Args.hasArg(OPT_fdebugger_support);
+ Opts.DebuggerCastResultToId = Args.hasArg(OPT_fdebugger_cast_result_to_id);
+ Opts.DebuggerObjCLiteral = Args.hasArg(OPT_fdebugger_objc_literal);
+ Opts.AddressSanitizer = Args.hasArg(OPT_faddress_sanitizer);
+ Opts.ThreadSanitizer = Args.hasArg(OPT_fthread_sanitizer);
+ Opts.ApplePragmaPack = Args.hasArg(OPT_fapple_pragma_pack);
+ Opts.CurrentModule = Args.getLastArgValue(OPT_fmodule_name);
+
+ // Record whether the __DEPRECATED define was requested.
+ Opts.Deprecated = Args.hasFlag(OPT_fdeprecated_macro,
+ OPT_fno_deprecated_macro,
+ Opts.Deprecated);
+
+ // FIXME: Eliminate this dependency.
+ unsigned Opt = getOptimizationLevel(Args, IK, Diags);
+ Opts.Optimize = Opt != 0;
+ Opts.OptimizeSize = Args.hasArg(OPT_Os) || Args.hasArg(OPT_Oz);
+
+ // This is the __NO_INLINE__ define, which just depends on things like the
+ // optimization level and -fno-inline, not actually whether the backend has
+ // inlining enabled.
+ Opts.NoInlineDefine = !Opt || Args.hasArg(OPT_fno_inline);
+
+ Opts.FastMath = Args.hasArg(OPT_ffast_math);
+
+ unsigned SSP = Args.getLastArgIntValue(OPT_stack_protector, 0, Diags);
+ switch (SSP) {
+ default:
+ Diags.Report(diag::err_drv_invalid_value)
+ << Args.getLastArg(OPT_stack_protector)->getAsString(Args) << SSP;
+ break;
+ case 0: Opts.setStackProtector(LangOptions::SSPOff); break;
+ case 1: Opts.setStackProtector(LangOptions::SSPOn); break;
+ case 2: Opts.setStackProtector(LangOptions::SSPReq); break;
+ }
+}
+
+static void ParsePreprocessorArgs(PreprocessorOptions &Opts, ArgList &Args,
+ FileManager &FileMgr,
+ DiagnosticsEngine &Diags) {
+ using namespace cc1options;
+ Opts.ImplicitPCHInclude = Args.getLastArgValue(OPT_include_pch);
+ Opts.ImplicitPTHInclude = Args.getLastArgValue(OPT_include_pth);
+ if (const Arg *A = Args.getLastArg(OPT_token_cache))
+ Opts.TokenCache = A->getValue(Args);
+ else
+ Opts.TokenCache = Opts.ImplicitPTHInclude;
+ Opts.UsePredefines = !Args.hasArg(OPT_undef);
+ Opts.DetailedRecord = Args.hasArg(OPT_detailed_preprocessing_record);
+ Opts.DisablePCHValidation = Args.hasArg(OPT_fno_validate_pch);
+
+ Opts.DumpDeserializedPCHDecls = Args.hasArg(OPT_dump_deserialized_pch_decls);
+ for (arg_iterator it = Args.filtered_begin(OPT_error_on_deserialized_pch_decl),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ Opts.DeserializedPCHDeclsToErrorOn.insert(A->getValue(Args));
+ }
+
+ if (const Arg *A = Args.getLastArg(OPT_preamble_bytes_EQ)) {
+ StringRef Value(A->getValue(Args));
+ size_t Comma = Value.find(',');
+ unsigned Bytes = 0;
+ unsigned EndOfLine = 0;
+
+ if (Comma == StringRef::npos ||
+ Value.substr(0, Comma).getAsInteger(10, Bytes) ||
+ Value.substr(Comma + 1).getAsInteger(10, EndOfLine))
+ Diags.Report(diag::err_drv_preamble_format);
+ else {
+ Opts.PrecompiledPreambleBytes.first = Bytes;
+ Opts.PrecompiledPreambleBytes.second = (EndOfLine != 0);
+ }
+ }
+
+ // Add macros from the command line.
+ for (arg_iterator it = Args.filtered_begin(OPT_D, OPT_U),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ if ((*it)->getOption().matches(OPT_D))
+ Opts.addMacroDef((*it)->getValue(Args));
+ else
+ Opts.addMacroUndef((*it)->getValue(Args));
+ }
+
+ Opts.MacroIncludes = Args.getAllArgValues(OPT_imacros);
+
+ // Add the ordered list of -includes.
+ for (arg_iterator it = Args.filtered_begin(OPT_include, OPT_include_pch,
+ OPT_include_pth),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ // PCH is handled specially, we need to extra the original include path.
+ if (A->getOption().matches(OPT_include_pch)) {
+ std::string OriginalFile =
+ ASTReader::getOriginalSourceFile(A->getValue(Args), FileMgr, Diags);
+ if (OriginalFile.empty())
+ continue;
+
+ Opts.Includes.push_back(OriginalFile);
+ } else
+ Opts.Includes.push_back(A->getValue(Args));
+ }
+
+ for (arg_iterator it = Args.filtered_begin(OPT_chain_include),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ Opts.ChainedIncludes.push_back(A->getValue(Args));
+ }
+
+ // Include 'altivec.h' if -faltivec option present
+ if (Args.hasArg(OPT_faltivec))
+ Opts.Includes.push_back("altivec.h");
+
+ for (arg_iterator it = Args.filtered_begin(OPT_remap_file),
+ ie = Args.filtered_end(); it != ie; ++it) {
+ const Arg *A = *it;
+ std::pair<StringRef,StringRef> Split =
+ StringRef(A->getValue(Args)).split(';');
+
+ if (Split.second.empty()) {
+ Diags.Report(diag::err_drv_invalid_remap_file) << A->getAsString(Args);
+ continue;
+ }
+
+ Opts.addRemappedFile(Split.first, Split.second);
+ }
+
+ if (Arg *A = Args.getLastArg(OPT_fobjc_arc_cxxlib_EQ)) {
+ StringRef Name = A->getValue(Args);
+ unsigned Library = llvm::StringSwitch<unsigned>(Name)
+ .Case("libc++", ARCXX_libcxx)
+ .Case("libstdc++", ARCXX_libstdcxx)
+ .Case("none", ARCXX_nolib)
+ .Default(~0U);
+ if (Library == ~0U)
+ Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Name;
+ else
+ Opts.ObjCXXARCStandardLibrary = (ObjCXXARCStandardLibraryKind)Library;
+ }
+}
+
+static void ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
+ ArgList &Args) {
+ using namespace cc1options;
+ Opts.ShowCPP = !Args.hasArg(OPT_dM);
+ Opts.ShowComments = Args.hasArg(OPT_C);
+ Opts.ShowLineMarkers = !Args.hasArg(OPT_P);
+ Opts.ShowMacroComments = Args.hasArg(OPT_CC);
+ Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
+}
+
+static void ParseTargetArgs(TargetOptions &Opts, ArgList &Args) {
+ using namespace cc1options;
+ Opts.ABI = Args.getLastArgValue(OPT_target_abi);
+ Opts.CXXABI = Args.getLastArgValue(OPT_cxx_abi);
+ Opts.CPU = Args.getLastArgValue(OPT_target_cpu);
+ Opts.Features = Args.getAllArgValues(OPT_target_feature);
+ Opts.LinkerVersion = Args.getLastArgValue(OPT_target_linker_version);
+ Opts.Triple = llvm::Triple::normalize(Args.getLastArgValue(OPT_triple));
+
+ // Use the default target triple if unspecified.
+ if (Opts.Triple.empty())
+ Opts.Triple = llvm::sys::getDefaultTargetTriple();
+}
+
+//
+
+bool CompilerInvocation::CreateFromArgs(CompilerInvocation &Res,
+ const char *const *ArgBegin,
+ const char *const *ArgEnd,
+ DiagnosticsEngine &Diags) {
+ bool Success = true;
+
+ // Parse the arguments.
+ OwningPtr<OptTable> Opts(createCC1OptTable());
+ unsigned MissingArgIndex, MissingArgCount;
+ OwningPtr<InputArgList> Args(
+ Opts->ParseArgs(ArgBegin, ArgEnd,MissingArgIndex, MissingArgCount));
+
+ // Check for missing argument error.
+ if (MissingArgCount) {
+ Diags.Report(diag::err_drv_missing_argument)
+ << Args->getArgString(MissingArgIndex) << MissingArgCount;
+ Success = false;
+ }
+
+ // Issue errors on unknown arguments.
+ for (arg_iterator it = Args->filtered_begin(OPT_UNKNOWN),
+ ie = Args->filtered_end(); it != ie; ++it) {
+ Diags.Report(diag::err_drv_unknown_argument) << (*it)->getAsString(*Args);
+ Success = false;
+ }
+
+ Success = ParseAnalyzerArgs(Res.getAnalyzerOpts(), *Args, Diags) && Success;
+ Success = ParseMigratorArgs(Res.getMigratorOpts(), *Args) && Success;
+ ParseDependencyOutputArgs(Res.getDependencyOutputOpts(), *Args);
+ Success = ParseDiagnosticArgs(Res.getDiagnosticOpts(), *Args, &Diags)
+ && Success;
+ ParseFileSystemArgs(Res.getFileSystemOpts(), *Args);
+ // FIXME: We shouldn't have to pass the DashX option around here
+ InputKind DashX = ParseFrontendArgs(Res.getFrontendOpts(), *Args, Diags);
+ Success = ParseCodeGenArgs(Res.getCodeGenOpts(), *Args, DashX, Diags)
+ && Success;
+ ParseHeaderSearchArgs(Res.getHeaderSearchOpts(), *Args);
+ if (DashX != IK_AST && DashX != IK_LLVM_IR) {
+ ParseLangArgs(*Res.getLangOpts(), *Args, DashX, Diags);
+ if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
+ Res.getLangOpts()->ObjCExceptions = 1;
+ }
+ // FIXME: ParsePreprocessorArgs uses the FileManager to read the contents of
+ // PCH file and find the original header name. Remove the need to do that in
+ // ParsePreprocessorArgs and remove the FileManager
+ // parameters from the function and the "FileManager.h" #include.
+ FileManager FileMgr(Res.getFileSystemOpts());
+ ParsePreprocessorArgs(Res.getPreprocessorOpts(), *Args, FileMgr, Diags);
+ ParsePreprocessorOutputArgs(Res.getPreprocessorOutputOpts(), *Args);
+ ParseTargetArgs(Res.getTargetOpts(), *Args);
+
+ return Success;
+}
+
+namespace {
+
+ class ModuleSignature {
+ llvm::SmallVector<uint64_t, 16> Data;
+ unsigned CurBit;
+ uint64_t CurValue;
+
+ public:
+ ModuleSignature() : CurBit(0), CurValue(0) { }
+
+ void add(uint64_t Value, unsigned Bits);
+ void add(StringRef Value);
+ void flush();
+
+ llvm::APInt getAsInteger() const;
+ };
+}
+
+void ModuleSignature::add(uint64_t Value, unsigned int NumBits) {
+ CurValue |= Value << CurBit;
+ if (CurBit + NumBits < 64) {
+ CurBit += NumBits;
+ return;
+ }
+
+ // Add the current word.
+ Data.push_back(CurValue);
+
+ if (CurBit)
+ CurValue = Value >> (64-CurBit);
+ else
+ CurValue = 0;
+ CurBit = (CurBit+NumBits) & 63;
+}
+
+void ModuleSignature::flush() {
+ if (CurBit == 0)
+ return;
+
+ Data.push_back(CurValue);
+ CurBit = 0;
+ CurValue = 0;
+}
+
+void ModuleSignature::add(StringRef Value) {
+ for (StringRef::iterator I = Value.begin(), IEnd = Value.end(); I != IEnd;++I)
+ add(*I, 8);
+}
+
+llvm::APInt ModuleSignature::getAsInteger() const {
+ return llvm::APInt(Data.size() * 64, Data);
+}
+
+std::string CompilerInvocation::getModuleHash() const {
+ ModuleSignature Signature;
+
+ // Start the signature with the compiler version.
+ // FIXME: The full version string can be quite long. Omit it from the
+ // module hash for now to avoid failures where the path name becomes too
+ // long. An MD5 or similar checksum would work well here.
+ // Signature.add(getClangFullRepositoryVersion());
+
+ // Extend the signature with the language options
+#define LANGOPT(Name, Bits, Default, Description) \
+ Signature.add(LangOpts->Name, Bits);
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ Signature.add(static_cast<unsigned>(LangOpts->get##Name()), Bits);
+#define BENIGN_LANGOPT(Name, Bits, Default, Description)
+#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
+#include "clang/Basic/LangOptions.def"
+
+ // Extend the signature with the target triple
+ llvm::Triple T(TargetOpts.Triple);
+ Signature.add((unsigned)T.getArch(), 5);
+ Signature.add((unsigned)T.getVendor(), 4);
+ Signature.add((unsigned)T.getOS(), 5);
+ Signature.add((unsigned)T.getEnvironment(), 4);
+
+ // Extend the signature with preprocessor options.
+ Signature.add(getPreprocessorOpts().UsePredefines, 1);
+ Signature.add(getPreprocessorOpts().DetailedRecord, 1);
+
+ // Hash the preprocessor defines.
+ // FIXME: This is terrible. Use an MD5 sum of the preprocessor defines.
+ std::vector<StringRef> MacroDefs;
+ for (std::vector<std::pair<std::string, bool/*isUndef*/> >::const_iterator
+ I = getPreprocessorOpts().Macros.begin(),
+ IEnd = getPreprocessorOpts().Macros.end();
+ I != IEnd; ++I) {
+ if (!I->second)
+ MacroDefs.push_back(I->first);
+ }
+ llvm::array_pod_sort(MacroDefs.begin(), MacroDefs.end());
+
+ unsigned PPHashResult = 0;
+ for (unsigned I = 0, N = MacroDefs.size(); I != N; ++I)
+ PPHashResult = llvm::HashString(MacroDefs[I], PPHashResult);
+ Signature.add(PPHashResult, 32);
+
+ // We've generated the signature. Treat it as one large APInt that we'll
+ // encode in base-36 and return.
+ Signature.flush();
+ return Signature.getAsInteger().toString(36, /*Signed=*/false);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
new file mode 100644
index 0000000..b477ade
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -0,0 +1,91 @@
+//===--- CreateInvocationFromCommandLine.cpp - CompilerInvocation from Args ==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Construct a compiler invocation object for command line driver arguments
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Options.h"
+#include "clang/Driver/Tool.h"
+#include "llvm/Support/Host.h"
+using namespace clang;
+
+/// createInvocationFromCommandLine - Construct a compiler invocation object for
+/// a command line argument vector.
+///
+/// \return A CompilerInvocation, or 0 if none was built for the given
+/// argument vector.
+CompilerInvocation *
+clang::createInvocationFromCommandLine(ArrayRef<const char *> ArgList,
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags) {
+ if (!Diags.getPtr()) {
+ // No diagnostics engine was provided, so create our own diagnostics object
+ // with the default options.
+ DiagnosticOptions DiagOpts;
+ Diags = CompilerInstance::createDiagnostics(DiagOpts, ArgList.size(),
+ ArgList.begin());
+ }
+
+ SmallVector<const char *, 16> Args;
+ Args.push_back("<clang>"); // FIXME: Remove dummy argument.
+ Args.insert(Args.end(), ArgList.begin(), ArgList.end());
+
+ // FIXME: Find a cleaner way to force the driver into restricted modes. We
+ // also want to force it to use clang.
+ Args.push_back("-fsyntax-only");
+
+ // FIXME: We shouldn't have to pass in the path info.
+ driver::Driver TheDriver("clang", llvm::sys::getDefaultTargetTriple(),
+ "a.out", false, *Diags);
+
+ // Don't check that inputs exist, they may have been remapped.
+ TheDriver.setCheckInputsExist(false);
+
+ OwningPtr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
+
+ // Just print the cc1 options if -### was present.
+ if (C->getArgs().hasArg(driver::options::OPT__HASH_HASH_HASH)) {
+ C->PrintJob(llvm::errs(), C->getJobs(), "\n", true);
+ return 0;
+ }
+
+ // We expect to get back exactly one command job, if we didn't something
+ // failed.
+ const driver::JobList &Jobs = C->getJobs();
+ if (Jobs.size() != 1 || !isa<driver::Command>(*Jobs.begin())) {
+ SmallString<256> Msg;
+ llvm::raw_svector_ostream OS(Msg);
+ C->PrintJob(OS, C->getJobs(), "; ", true);
+ Diags->Report(diag::err_fe_expected_compiler_job) << OS.str();
+ return 0;
+ }
+
+ const driver::Command *Cmd = cast<driver::Command>(*Jobs.begin());
+ if (StringRef(Cmd->getCreator().getName()) != "clang") {
+ Diags->Report(diag::err_fe_expected_clang_command);
+ return 0;
+ }
+
+ const driver::ArgStringList &CCArgs = Cmd->getArguments();
+ OwningPtr<CompilerInvocation> CI(new CompilerInvocation());
+ if (!CompilerInvocation::CreateFromArgs(*CI,
+ const_cast<const char **>(CCArgs.data()),
+ const_cast<const char **>(CCArgs.data()) +
+ CCArgs.size(),
+ *Diags))
+ return 0;
+ return CI.take();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
new file mode 100644
index 0000000..21f5daa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyFile.cpp
@@ -0,0 +1,231 @@
+//===--- DependencyFile.cpp - Generate dependency file --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code generates dependency files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DependencyOutputOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/DirectoryLookup.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace {
+class DependencyFileCallback : public PPCallbacks {
+ std::vector<std::string> Files;
+ llvm::StringSet<> FilesSet;
+ const Preprocessor *PP;
+ std::string OutputFile;
+ std::vector<std::string> Targets;
+ bool IncludeSystemHeaders;
+ bool PhonyTarget;
+ bool AddMissingHeaderDeps;
+ bool SeenMissingHeader;
+private:
+ bool FileMatchesDepCriteria(const char *Filename,
+ SrcMgr::CharacteristicKind FileType);
+ void AddFilename(StringRef Filename);
+ void OutputDependencyFile();
+
+public:
+ DependencyFileCallback(const Preprocessor *_PP,
+ const DependencyOutputOptions &Opts)
+ : PP(_PP), OutputFile(Opts.OutputFile), Targets(Opts.Targets),
+ IncludeSystemHeaders(Opts.IncludeSystemHeaders),
+ PhonyTarget(Opts.UsePhonyTargets),
+ AddMissingHeaderDeps(Opts.AddMissingHeaderDeps),
+ SeenMissingHeader(false) {}
+
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID);
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath);
+
+ virtual void EndOfMainFile() {
+ OutputDependencyFile();
+ }
+};
+}
+
+void clang::AttachDependencyFileGen(Preprocessor &PP,
+ const DependencyOutputOptions &Opts) {
+ if (Opts.Targets.empty()) {
+ PP.getDiagnostics().Report(diag::err_fe_dependency_file_requires_MT);
+ return;
+ }
+
+ // Disable the "file not found" diagnostic if the -MG option was given.
+ if (Opts.AddMissingHeaderDeps)
+ PP.SetSuppressIncludeNotFoundError(true);
+
+ PP.addPPCallbacks(new DependencyFileCallback(&PP, Opts));
+}
+
+/// FileMatchesDepCriteria - Determine whether the given Filename should be
+/// considered as a dependency.
+bool DependencyFileCallback::FileMatchesDepCriteria(const char *Filename,
+ SrcMgr::CharacteristicKind FileType) {
+ if (strcmp("<built-in>", Filename) == 0)
+ return false;
+
+ if (IncludeSystemHeaders)
+ return true;
+
+ return FileType == SrcMgr::C_User;
+}
+
+void DependencyFileCallback::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID) {
+ if (Reason != PPCallbacks::EnterFile)
+ return;
+
+ // Dependency generation really does want to go all the way to the
+ // file entry for a source location to find out what is depended on.
+ // We do not want #line markers to affect dependency generation!
+ SourceManager &SM = PP->getSourceManager();
+
+ const FileEntry *FE =
+ SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(Loc)));
+ if (FE == 0) return;
+
+ StringRef Filename = FE->getName();
+ if (!FileMatchesDepCriteria(Filename.data(), FileType))
+ return;
+
+ // Remove leading "./" (or ".//" or "././" etc.)
+ while (Filename.size() > 2 && Filename[0] == '.' &&
+ llvm::sys::path::is_separator(Filename[1])) {
+ Filename = Filename.substr(1);
+ while (llvm::sys::path::is_separator(Filename[0]))
+ Filename = Filename.substr(1);
+ }
+
+ AddFilename(Filename);
+}
+
+void DependencyFileCallback::InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath) {
+ if (!File) {
+ if (AddMissingHeaderDeps)
+ AddFilename(FileName);
+ else
+ SeenMissingHeader = true;
+ }
+}
+
+void DependencyFileCallback::AddFilename(StringRef Filename) {
+ if (FilesSet.insert(Filename))
+ Files.push_back(Filename);
+}
+
+/// PrintFilename - GCC escapes spaces, but apparently not ' or " or other
+/// scary characters.
+static void PrintFilename(raw_ostream &OS, StringRef Filename) {
+ for (unsigned i = 0, e = Filename.size(); i != e; ++i) {
+ if (Filename[i] == ' ')
+ OS << '\\';
+ OS << Filename[i];
+ }
+}
+
+void DependencyFileCallback::OutputDependencyFile() {
+ if (SeenMissingHeader) {
+ llvm::sys::Path(OutputFile).eraseFromDisk();
+ return;
+ }
+
+ std::string Err;
+ llvm::raw_fd_ostream OS(OutputFile.c_str(), Err);
+ if (!Err.empty()) {
+ PP->getDiagnostics().Report(diag::err_fe_error_opening)
+ << OutputFile << Err;
+ return;
+ }
+
+ // Write out the dependency targets, trying to avoid overly long
+ // lines when possible. We try our best to emit exactly the same
+ // dependency file as GCC (4.2), assuming the included files are the
+ // same.
+ const unsigned MaxColumns = 75;
+ unsigned Columns = 0;
+
+ for (std::vector<std::string>::iterator
+ I = Targets.begin(), E = Targets.end(); I != E; ++I) {
+ unsigned N = I->length();
+ if (Columns == 0) {
+ Columns += N;
+ } else if (Columns + N + 2 > MaxColumns) {
+ Columns = N + 2;
+ OS << " \\\n ";
+ } else {
+ Columns += N + 1;
+ OS << ' ';
+ }
+ // Targets already quoted as needed.
+ OS << *I;
+ }
+
+ OS << ':';
+ Columns += 1;
+
+ // Now add each dependency in the order it was seen, but avoiding
+ // duplicates.
+ for (std::vector<std::string>::iterator I = Files.begin(),
+ E = Files.end(); I != E; ++I) {
+ // Start a new line if this would exceed the column limit. Make
+ // sure to leave space for a trailing " \" in case we need to
+ // break the line on the next iteration.
+ unsigned N = I->length();
+ if (Columns + (N + 1) + 2 > MaxColumns) {
+ OS << " \\\n ";
+ Columns = 2;
+ }
+ OS << ' ';
+ PrintFilename(OS, *I);
+ Columns += N + 1;
+ }
+ OS << '\n';
+
+ // Create phony targets if requested.
+ if (PhonyTarget && !Files.empty()) {
+ // Skip the first entry, this is always the input file itself.
+ for (std::vector<std::string>::iterator I = Files.begin() + 1,
+ E = Files.end(); I != E; ++I) {
+ OS << '\n';
+ PrintFilename(OS, *I);
+ OS << ":\n";
+ }
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp b/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
new file mode 100644
index 0000000..eebaf0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/DependencyGraph.cpp
@@ -0,0 +1,140 @@
+//===--- DependencyGraph.cpp - Generate dependency file -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code generates a header dependency graph in DOT format, for use
+// with, e.g., GraphViz.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang;
+namespace DOT = llvm::DOT;
+
+namespace {
+class DependencyGraphCallback : public PPCallbacks {
+ const Preprocessor *PP;
+ std::string OutputFile;
+ std::string SysRoot;
+ llvm::SetVector<const FileEntry *> AllFiles;
+ typedef llvm::DenseMap<const FileEntry *,
+ llvm::SmallVector<const FileEntry *, 2> >
+ DependencyMap;
+
+ DependencyMap Dependencies;
+
+private:
+ llvm::raw_ostream &writeNodeReference(llvm::raw_ostream &OS,
+ const FileEntry *Node);
+ void OutputGraphFile();
+
+public:
+ DependencyGraphCallback(const Preprocessor *_PP, StringRef OutputFile,
+ StringRef SysRoot)
+ : PP(_PP), OutputFile(OutputFile.str()), SysRoot(SysRoot.str()) { }
+
+ virtual void InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath);
+
+ virtual void EndOfMainFile() {
+ OutputGraphFile();
+ }
+
+};
+}
+
+void clang::AttachDependencyGraphGen(Preprocessor &PP, StringRef OutputFile,
+ StringRef SysRoot) {
+ PP.addPPCallbacks(new DependencyGraphCallback(&PP, OutputFile, SysRoot));
+}
+
+void DependencyGraphCallback::InclusionDirective(SourceLocation HashLoc,
+ const Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath) {
+ if (!File)
+ return;
+
+ SourceManager &SM = PP->getSourceManager();
+ const FileEntry *FromFile
+ = SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(HashLoc)));
+ if (FromFile == 0)
+ return;
+
+ Dependencies[FromFile].push_back(File);
+
+ AllFiles.insert(File);
+ AllFiles.insert(FromFile);
+}
+
+llvm::raw_ostream &
+DependencyGraphCallback::writeNodeReference(llvm::raw_ostream &OS,
+ const FileEntry *Node) {
+ OS << "header_" << Node->getUID();
+ return OS;
+}
+
+void DependencyGraphCallback::OutputGraphFile() {
+ std::string Err;
+ llvm::raw_fd_ostream OS(OutputFile.c_str(), Err);
+ if (!Err.empty()) {
+ PP->getDiagnostics().Report(diag::err_fe_error_opening)
+ << OutputFile << Err;
+ return;
+ }
+
+ OS << "digraph \"dependencies\" {\n";
+
+ // Write the nodes
+ for (unsigned I = 0, N = AllFiles.size(); I != N; ++I) {
+ // Write the node itself.
+ OS.indent(2);
+ writeNodeReference(OS, AllFiles[I]);
+ OS << " [ shape=\"box\", label=\"";
+ StringRef FileName = AllFiles[I]->getName();
+ if (FileName.startswith(SysRoot))
+ FileName = FileName.substr(SysRoot.size());
+
+ OS << DOT::EscapeString(FileName)
+ << "\"];\n";
+ }
+
+ // Write the edges
+ for (DependencyMap::iterator F = Dependencies.begin(),
+ FEnd = Dependencies.end();
+ F != FEnd; ++F) {
+ for (unsigned I = 0, N = F->second.size(); I != N; ++I) {
+ OS.indent(2);
+ writeNodeReference(OS, F->first);
+ OS << " -> ";
+ writeNodeReference(OS, F->second[I]);
+ OS << ";\n";
+ }
+ }
+ OS << "}\n";
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
new file mode 100644
index 0000000..6c3bb1d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/DiagnosticRenderer.cpp
@@ -0,0 +1,386 @@
+//===--- DiagnosticRenderer.cpp - Diagnostic Pretty-Printing --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/DiagnosticRenderer.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Edit/EditedSource.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+using namespace clang;
+
+/// Look through spelling locations for a macro argument expansion, and
+/// if found skip to it so that we can trace the argument rather than the macros
+/// in which that argument is used. If no macro argument expansion is found,
+/// don't skip anything and return the starting location.
+static SourceLocation skipToMacroArgExpansion(const SourceManager &SM,
+ SourceLocation StartLoc) {
+ for (SourceLocation L = StartLoc; L.isMacroID();
+ L = SM.getImmediateSpellingLoc(L)) {
+ if (SM.isMacroArgExpansion(L))
+ return L;
+ }
+
+ // Otherwise just return initial location, there's nothing to skip.
+ return StartLoc;
+}
+
+/// Gets the location of the immediate macro caller, one level up the stack
+/// toward the initial macro typed into the source.
+static SourceLocation getImmediateMacroCallerLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ if (!Loc.isMacroID()) return Loc;
+
+ // When we have the location of (part of) an expanded parameter, its spelling
+ // location points to the argument as typed into the macro call, and
+ // therefore is used to locate the macro caller.
+ if (SM.isMacroArgExpansion(Loc))
+ return SM.getImmediateSpellingLoc(Loc);
+
+ // Otherwise, the caller of the macro is located where this macro is
+ // expanded (while the spelling is part of the macro definition).
+ return SM.getImmediateExpansionRange(Loc).first;
+}
+
+/// Gets the location of the immediate macro callee, one level down the stack
+/// toward the leaf macro.
+static SourceLocation getImmediateMacroCalleeLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ if (!Loc.isMacroID()) return Loc;
+
+ // When we have the location of (part of) an expanded parameter, its
+ // expansion location points to the unexpanded paramater reference within
+ // the macro definition (or callee).
+ if (SM.isMacroArgExpansion(Loc))
+ return SM.getImmediateExpansionRange(Loc).first;
+
+ // Otherwise, the callee of the macro is located where this location was
+ // spelled inside the macro definition.
+ return SM.getImmediateSpellingLoc(Loc);
+}
+
+/// \brief Retrieve the name of the immediate macro expansion.
+///
+/// This routine starts from a source location, and finds the name of the macro
+/// responsible for its immediate expansion. It looks through any intervening
+/// macro argument expansions to compute this. It returns a StringRef which
+/// refers to the SourceManager-owned buffer of the source where that macro
+/// name is spelled. Thus, the result shouldn't out-live that SourceManager.
+///
+/// This differs from Lexer::getImmediateMacroName in that any macro argument
+/// location will result in the topmost function macro that accepted it.
+/// e.g.
+/// \code
+/// MAC1( MAC2(foo) )
+/// \endcode
+/// for location of 'foo' token, this function will return "MAC1" while
+/// Lexer::getImmediateMacroName will return "MAC2".
+static StringRef getImmediateMacroName(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(Loc.isMacroID() && "Only reasonble to call this on macros");
+ // Walk past macro argument expanions.
+ while (SM.isMacroArgExpansion(Loc))
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+
+ // Find the spelling location of the start of the non-argument expansion
+ // range. This is where the macro name was spelled in order to begin
+ // expanding this macro.
+ Loc = SM.getSpellingLoc(SM.getImmediateExpansionRange(Loc).first);
+
+ // Dig out the buffer where the macro name was spelled and the extents of the
+ // name so that we can render it into the expansion note.
+ std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
+ unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+ StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
+ return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
+}
+
+/// Get the presumed location of a diagnostic message. This computes the
+/// presumed location for the top of any macro backtrace when present.
+static PresumedLoc getDiagnosticPresumedLoc(const SourceManager &SM,
+ SourceLocation Loc) {
+ // This is a condensed form of the algorithm used by emitCaretDiagnostic to
+ // walk to the top of the macro call stack.
+ while (Loc.isMacroID()) {
+ Loc = skipToMacroArgExpansion(SM, Loc);
+ Loc = getImmediateMacroCallerLoc(SM, Loc);
+ }
+
+ return SM.getPresumedLoc(Loc);
+}
+
+DiagnosticRenderer::DiagnosticRenderer(const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts)
+: SM(SM), LangOpts(LangOpts), DiagOpts(DiagOpts), LastLevel() {}
+
+DiagnosticRenderer::~DiagnosticRenderer() {}
+
+namespace {
+
+class FixitReceiver : public edit::EditsReceiver {
+ SmallVectorImpl<FixItHint> &MergedFixits;
+
+public:
+ FixitReceiver(SmallVectorImpl<FixItHint> &MergedFixits)
+ : MergedFixits(MergedFixits) { }
+ virtual void insert(SourceLocation loc, StringRef text) {
+ MergedFixits.push_back(FixItHint::CreateInsertion(loc, text));
+ }
+ virtual void replace(CharSourceRange range, StringRef text) {
+ MergedFixits.push_back(FixItHint::CreateReplacement(range, text));
+ }
+};
+
+}
+
+static void mergeFixits(ArrayRef<FixItHint> FixItHints,
+ const SourceManager &SM, const LangOptions &LangOpts,
+ SmallVectorImpl<FixItHint> &MergedFixits) {
+ edit::Commit commit(SM, LangOpts);
+ for (ArrayRef<FixItHint>::const_iterator
+ I = FixItHints.begin(), E = FixItHints.end(); I != E; ++I) {
+ const FixItHint &Hint = *I;
+ if (Hint.CodeToInsert.empty()) {
+ if (Hint.InsertFromRange.isValid())
+ commit.insertFromRange(Hint.RemoveRange.getBegin(),
+ Hint.InsertFromRange, /*afterToken=*/false,
+ Hint.BeforePreviousInsertions);
+ else
+ commit.remove(Hint.RemoveRange);
+ } else {
+ if (Hint.RemoveRange.isTokenRange() ||
+ Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd())
+ commit.replace(Hint.RemoveRange, Hint.CodeToInsert);
+ else
+ commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert,
+ /*afterToken=*/false, Hint.BeforePreviousInsertions);
+ }
+ }
+
+ edit::EditedSource Editor(SM, LangOpts);
+ if (Editor.commit(commit)) {
+ FixitReceiver Rec(MergedFixits);
+ Editor.applyRewrites(Rec);
+ }
+}
+
+void DiagnosticRenderer::emitDiagnostic(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ ArrayRef<FixItHint> FixItHints,
+ DiagOrStoredDiag D) {
+
+ beginDiagnostic(D, Level);
+
+ PresumedLoc PLoc = getDiagnosticPresumedLoc(SM, Loc);
+
+ // First, if this diagnostic is not in the main file, print out the
+ // "included from" lines.
+ emitIncludeStack(PLoc.getIncludeLoc(), Level);
+
+ // Next, emit the actual diagnostic message.
+ emitDiagnosticMessage(Loc, PLoc, Level, Message, Ranges, D);
+
+ // Only recurse if we have a valid location.
+ if (Loc.isValid()) {
+ // Get the ranges into a local array we can hack on.
+ SmallVector<CharSourceRange, 20> MutableRanges(Ranges.begin(),
+ Ranges.end());
+
+ llvm::SmallVector<FixItHint, 8> MergedFixits;
+ if (!FixItHints.empty()) {
+ mergeFixits(FixItHints, SM, LangOpts, MergedFixits);
+ FixItHints = MergedFixits;
+ }
+
+ for (ArrayRef<FixItHint>::const_iterator I = FixItHints.begin(),
+ E = FixItHints.end();
+ I != E; ++I)
+ if (I->RemoveRange.isValid())
+ MutableRanges.push_back(I->RemoveRange);
+
+ unsigned MacroDepth = 0;
+ emitMacroExpansionsAndCarets(Loc, Level, MutableRanges, FixItHints,
+ MacroDepth);
+ }
+
+ LastLoc = Loc;
+ LastLevel = Level;
+
+ endDiagnostic(D, Level);
+}
+
+
+void DiagnosticRenderer::emitStoredDiagnostic(StoredDiagnostic &Diag) {
+ emitDiagnostic(Diag.getLocation(), Diag.getLevel(), Diag.getMessage(),
+ Diag.getRanges(), Diag.getFixIts(),
+ &Diag);
+}
+
+/// \brief Prints an include stack when appropriate for a particular
+/// diagnostic level and location.
+///
+/// This routine handles all the logic of suppressing particular include
+/// stacks (such as those for notes) and duplicate include stacks when
+/// repeated warnings occur within the same file. It also handles the logic
+/// of customizing the formatting and display of the include stack.
+///
+/// \param Level The diagnostic level of the message this stack pertains to.
+/// \param Loc The include location of the current file (not the diagnostic
+/// location).
+void DiagnosticRenderer::emitIncludeStack(SourceLocation Loc,
+ DiagnosticsEngine::Level Level) {
+ // Skip redundant include stacks altogether.
+ if (LastIncludeLoc == Loc)
+ return;
+ LastIncludeLoc = Loc;
+
+ if (!DiagOpts.ShowNoteIncludeStack && Level == DiagnosticsEngine::Note)
+ return;
+
+ emitIncludeStackRecursively(Loc);
+}
+
+/// \brief Helper to recursivly walk up the include stack and print each layer
+/// on the way back down.
+void DiagnosticRenderer::emitIncludeStackRecursively(SourceLocation Loc) {
+ if (Loc.isInvalid())
+ return;
+
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isInvalid())
+ return;
+
+ // Emit the other include frames first.
+ emitIncludeStackRecursively(PLoc.getIncludeLoc());
+
+ // Emit the inclusion text/note.
+ emitIncludeLocation(Loc, PLoc);
+}
+
+/// \brief Recursively emit notes for each macro expansion and caret
+/// diagnostics where appropriate.
+///
+/// Walks up the macro expansion stack printing expansion notes, the code
+/// snippet, caret, underlines and FixItHint display as appropriate at each
+/// level.
+///
+/// \param Loc The location for this caret.
+/// \param Level The diagnostic level currently being emitted.
+/// \param Ranges The underlined ranges for this code snippet.
+/// \param Hints The FixIt hints active for this diagnostic.
+/// \param MacroSkipEnd The depth to stop skipping macro expansions.
+/// \param OnMacroInst The current depth of the macro expansion stack.
+void DiagnosticRenderer::emitMacroExpansionsAndCarets(
+ SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints,
+ unsigned &MacroDepth,
+ unsigned OnMacroInst)
+{
+ assert(!Loc.isInvalid() && "must have a valid source location here");
+
+ // If this is a file source location, directly emit the source snippet and
+ // caret line. Also record the macro depth reached.
+ if (Loc.isFileID()) {
+ assert(MacroDepth == 0 && "We shouldn't hit a leaf node twice!");
+ MacroDepth = OnMacroInst;
+ emitCodeContext(Loc, Level, Ranges, Hints);
+ return;
+ }
+ // Otherwise recurse through each macro expansion layer.
+
+ // When processing macros, skip over the expansions leading up to
+ // a macro argument, and trace the argument's expansion stack instead.
+ Loc = skipToMacroArgExpansion(SM, Loc);
+
+ SourceLocation OneLevelUp = getImmediateMacroCallerLoc(SM, Loc);
+
+ // FIXME: Map ranges?
+ emitMacroExpansionsAndCarets(OneLevelUp, Level, Ranges, Hints, MacroDepth,
+ OnMacroInst + 1);
+
+ // Save the original location so we can find the spelling of the macro call.
+ SourceLocation MacroLoc = Loc;
+
+ // Map the location.
+ Loc = getImmediateMacroCalleeLoc(SM, Loc);
+
+ unsigned MacroSkipStart = 0, MacroSkipEnd = 0;
+ if (MacroDepth > DiagOpts.MacroBacktraceLimit &&
+ DiagOpts.MacroBacktraceLimit != 0) {
+ MacroSkipStart = DiagOpts.MacroBacktraceLimit / 2 +
+ DiagOpts.MacroBacktraceLimit % 2;
+ MacroSkipEnd = MacroDepth - DiagOpts.MacroBacktraceLimit / 2;
+ }
+
+ // Whether to suppress printing this macro expansion.
+ bool Suppressed = (OnMacroInst >= MacroSkipStart &&
+ OnMacroInst < MacroSkipEnd);
+
+ // Map the ranges.
+ for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I) {
+ SourceLocation Start = I->getBegin(), End = I->getEnd();
+ if (Start.isMacroID())
+ I->setBegin(getImmediateMacroCalleeLoc(SM, Start));
+ if (End.isMacroID())
+ I->setEnd(getImmediateMacroCalleeLoc(SM, End));
+ }
+
+ if (Suppressed) {
+ // Tell the user that we've skipped contexts.
+ if (OnMacroInst == MacroSkipStart) {
+ SmallString<200> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "(skipping " << (MacroSkipEnd - MacroSkipStart)
+ << " expansions in backtrace; use -fmacro-backtrace-limit=0 to "
+ "see all)";
+ emitBasicNote(Message.str());
+ }
+ return;
+ }
+
+ SmallString<100> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "expanded from macro '"
+ << getImmediateMacroName(MacroLoc, SM, LangOpts) << "'";
+ emitDiagnostic(SM.getSpellingLoc(Loc), DiagnosticsEngine::Note,
+ Message.str(),
+ Ranges, ArrayRef<FixItHint>());
+}
+
+DiagnosticNoteRenderer::~DiagnosticNoteRenderer() {}
+
+void DiagnosticNoteRenderer::emitIncludeLocation(SourceLocation Loc,
+ PresumedLoc PLoc) {
+ // Generate a note indicating the include location.
+ SmallString<200> MessageStorage;
+ llvm::raw_svector_ostream Message(MessageStorage);
+ Message << "in file included from " << PLoc.getFilename() << ':'
+ << PLoc.getLine() << ":";
+ emitNote(Loc, Message.str());
+}
+
+void DiagnosticNoteRenderer::emitBasicNote(StringRef Message) {
+ emitNote(SourceLocation(), Message);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
new file mode 100644
index 0000000..da4bdfa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendAction.cpp
@@ -0,0 +1,468 @@
+//===--- FrontendAction.cpp -----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/ChainedIncludesSource.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendPluginRegistry.h"
+#include "clang/Frontend/LayoutOverrideSource.h"
+#include "clang/Frontend/MultiplexConsumer.h"
+#include "clang/Parse/ParseAST.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+#include "clang/Serialization/ASTReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+
+class DelegatingDeserializationListener : public ASTDeserializationListener {
+ ASTDeserializationListener *Previous;
+
+public:
+ explicit DelegatingDeserializationListener(
+ ASTDeserializationListener *Previous)
+ : Previous(Previous) { }
+
+ virtual void ReaderInitialized(ASTReader *Reader) {
+ if (Previous)
+ Previous->ReaderInitialized(Reader);
+ }
+ virtual void IdentifierRead(serialization::IdentID ID,
+ IdentifierInfo *II) {
+ if (Previous)
+ Previous->IdentifierRead(ID, II);
+ }
+ virtual void TypeRead(serialization::TypeIdx Idx, QualType T) {
+ if (Previous)
+ Previous->TypeRead(Idx, T);
+ }
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+ if (Previous)
+ Previous->DeclRead(ID, D);
+ }
+ virtual void SelectorRead(serialization::SelectorID ID, Selector Sel) {
+ if (Previous)
+ Previous->SelectorRead(ID, Sel);
+ }
+ virtual void MacroDefinitionRead(serialization::PreprocessedEntityID PPID,
+ MacroDefinition *MD) {
+ if (Previous)
+ Previous->MacroDefinitionRead(PPID, MD);
+ }
+};
+
+/// \brief Dumps deserialized declarations.
+class DeserializedDeclsDumper : public DelegatingDeserializationListener {
+public:
+ explicit DeserializedDeclsDumper(ASTDeserializationListener *Previous)
+ : DelegatingDeserializationListener(Previous) { }
+
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+ llvm::outs() << "PCH DECL: " << D->getDeclKindName();
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ llvm::outs() << " - " << *ND;
+ llvm::outs() << "\n";
+
+ DelegatingDeserializationListener::DeclRead(ID, D);
+ }
+};
+
+ /// \brief Checks deserialized declarations and emits error if a name
+ /// matches one given in command-line using -error-on-deserialized-decl.
+ class DeserializedDeclsChecker : public DelegatingDeserializationListener {
+ ASTContext &Ctx;
+ std::set<std::string> NamesToCheck;
+
+ public:
+ DeserializedDeclsChecker(ASTContext &Ctx,
+ const std::set<std::string> &NamesToCheck,
+ ASTDeserializationListener *Previous)
+ : DelegatingDeserializationListener(Previous),
+ Ctx(Ctx), NamesToCheck(NamesToCheck) { }
+
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ if (NamesToCheck.find(ND->getNameAsString()) != NamesToCheck.end()) {
+ unsigned DiagID
+ = Ctx.getDiagnostics().getCustomDiagID(DiagnosticsEngine::Error,
+ "%0 was deserialized");
+ Ctx.getDiagnostics().Report(Ctx.getFullLoc(D->getLocation()), DiagID)
+ << ND->getNameAsString();
+ }
+
+ DelegatingDeserializationListener::DeclRead(ID, D);
+ }
+};
+
+} // end anonymous namespace
+
+FrontendAction::FrontendAction() : Instance(0) {}
+
+FrontendAction::~FrontendAction() {}
+
+void FrontendAction::setCurrentInput(const FrontendInputFile &CurrentInput,
+ ASTUnit *AST) {
+ this->CurrentInput = CurrentInput;
+ CurrentASTUnit.reset(AST);
+}
+
+ASTConsumer* FrontendAction::CreateWrappedASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ ASTConsumer* Consumer = CreateASTConsumer(CI, InFile);
+ if (!Consumer)
+ return 0;
+
+ if (CI.getFrontendOpts().AddPluginActions.size() == 0)
+ return Consumer;
+
+ // Make sure the non-plugin consumer is first, so that plugins can't
+ // modifiy the AST.
+ std::vector<ASTConsumer*> Consumers(1, Consumer);
+
+ for (size_t i = 0, e = CI.getFrontendOpts().AddPluginActions.size();
+ i != e; ++i) {
+ // This is O(|plugins| * |add_plugins|), but since both numbers are
+ // way below 50 in practice, that's ok.
+ for (FrontendPluginRegistry::iterator
+ it = FrontendPluginRegistry::begin(),
+ ie = FrontendPluginRegistry::end();
+ it != ie; ++it) {
+ if (it->getName() == CI.getFrontendOpts().AddPluginActions[i]) {
+ OwningPtr<PluginASTAction> P(it->instantiate());
+ FrontendAction* c = P.get();
+ if (P->ParseArgs(CI, CI.getFrontendOpts().AddPluginArgs[i]))
+ Consumers.push_back(c->CreateASTConsumer(CI, InFile));
+ }
+ }
+ }
+
+ return new MultiplexConsumer(Consumers);
+}
+
+bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
+ const FrontendInputFile &Input) {
+ assert(!Instance && "Already processing a source file!");
+ assert(!Input.File.empty() && "Unexpected empty filename!");
+ setCurrentInput(Input);
+ setCompilerInstance(&CI);
+
+ if (!BeginInvocation(CI))
+ goto failure;
+
+ // AST files follow a very different path, since they share objects via the
+ // AST unit.
+ if (Input.Kind == IK_AST) {
+ assert(!usesPreprocessorOnly() &&
+ "Attempt to pass AST file to preprocessor only action!");
+ assert(hasASTFileSupport() &&
+ "This action does not have AST file support!");
+
+ IntrusiveRefCntPtr<DiagnosticsEngine> Diags(&CI.getDiagnostics());
+ std::string Error;
+ ASTUnit *AST = ASTUnit::LoadFromASTFile(Input.File, Diags,
+ CI.getFileSystemOpts());
+ if (!AST)
+ goto failure;
+
+ setCurrentInput(Input, AST);
+
+ // Set the shared objects, these are reset when we finish processing the
+ // file, otherwise the CompilerInstance will happily destroy them.
+ CI.setFileManager(&AST->getFileManager());
+ CI.setSourceManager(&AST->getSourceManager());
+ CI.setPreprocessor(&AST->getPreprocessor());
+ CI.setASTContext(&AST->getASTContext());
+
+ // Initialize the action.
+ if (!BeginSourceFileAction(CI, Input.File))
+ goto failure;
+
+ /// Create the AST consumer.
+ CI.setASTConsumer(CreateWrappedASTConsumer(CI, Input.File));
+ if (!CI.hasASTConsumer())
+ goto failure;
+
+ return true;
+ }
+
+ // Set up the file and source managers, if needed.
+ if (!CI.hasFileManager())
+ CI.createFileManager();
+ if (!CI.hasSourceManager())
+ CI.createSourceManager(CI.getFileManager());
+
+ // IR files bypass the rest of initialization.
+ if (Input.Kind == IK_LLVM_IR) {
+ assert(hasIRSupport() &&
+ "This action does not have IR file support!");
+
+ // Inform the diagnostic client we are processing a source file.
+ CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(), 0);
+
+ // Initialize the action.
+ if (!BeginSourceFileAction(CI, Input.File))
+ goto failure;
+
+ return true;
+ }
+
+ // Set up the preprocessor.
+ CI.createPreprocessor();
+
+ // Inform the diagnostic client we are processing a source file.
+ CI.getDiagnosticClient().BeginSourceFile(CI.getLangOpts(),
+ &CI.getPreprocessor());
+
+ // Initialize the action.
+ if (!BeginSourceFileAction(CI, Input.File))
+ goto failure;
+
+ /// Create the AST context and consumer unless this is a preprocessor only
+ /// action.
+ if (!usesPreprocessorOnly()) {
+ CI.createASTContext();
+
+ OwningPtr<ASTConsumer> Consumer(
+ CreateWrappedASTConsumer(CI, Input.File));
+ if (!Consumer)
+ goto failure;
+
+ CI.getASTContext().setASTMutationListener(Consumer->GetASTMutationListener());
+
+ if (!CI.getPreprocessorOpts().ChainedIncludes.empty()) {
+ // Convert headers to PCH and chain them.
+ OwningPtr<ExternalASTSource> source;
+ source.reset(ChainedIncludesSource::create(CI));
+ if (!source)
+ goto failure;
+ CI.getASTContext().setExternalSource(source);
+
+ } else if (!CI.getPreprocessorOpts().ImplicitPCHInclude.empty()) {
+ // Use PCH.
+ assert(hasPCHSupport() && "This action does not have PCH support!");
+ ASTDeserializationListener *DeserialListener =
+ Consumer->GetASTDeserializationListener();
+ if (CI.getPreprocessorOpts().DumpDeserializedPCHDecls)
+ DeserialListener = new DeserializedDeclsDumper(DeserialListener);
+ if (!CI.getPreprocessorOpts().DeserializedPCHDeclsToErrorOn.empty())
+ DeserialListener = new DeserializedDeclsChecker(CI.getASTContext(),
+ CI.getPreprocessorOpts().DeserializedPCHDeclsToErrorOn,
+ DeserialListener);
+ CI.createPCHExternalASTSource(
+ CI.getPreprocessorOpts().ImplicitPCHInclude,
+ CI.getPreprocessorOpts().DisablePCHValidation,
+ CI.getPreprocessorOpts().DisableStatCache,
+ CI.getPreprocessorOpts().AllowPCHWithCompilerErrors,
+ DeserialListener);
+ if (!CI.getASTContext().getExternalSource())
+ goto failure;
+ }
+
+ CI.setASTConsumer(Consumer.take());
+ if (!CI.hasASTConsumer())
+ goto failure;
+ }
+
+ // Initialize built-in info as long as we aren't using an external AST
+ // source.
+ if (!CI.hasASTContext() || !CI.getASTContext().getExternalSource()) {
+ Preprocessor &PP = CI.getPreprocessor();
+ PP.getBuiltinInfo().InitializeBuiltins(PP.getIdentifierTable(),
+ PP.getLangOpts());
+ }
+
+ // If there is a layout overrides file, attach an external AST source that
+ // provides the layouts from that file.
+ if (!CI.getFrontendOpts().OverrideRecordLayoutsFile.empty() &&
+ CI.hasASTContext() && !CI.getASTContext().getExternalSource()) {
+ OwningPtr<ExternalASTSource>
+ Override(new LayoutOverrideSource(
+ CI.getFrontendOpts().OverrideRecordLayoutsFile));
+ CI.getASTContext().setExternalSource(Override);
+ }
+
+ return true;
+
+ // If we failed, reset state since the client will not end up calling the
+ // matching EndSourceFile().
+ failure:
+ if (isCurrentFileAST()) {
+ CI.setASTContext(0);
+ CI.setPreprocessor(0);
+ CI.setSourceManager(0);
+ CI.setFileManager(0);
+ }
+
+ CI.getDiagnosticClient().EndSourceFile();
+ setCurrentInput(FrontendInputFile());
+ setCompilerInstance(0);
+ return false;
+}
+
+void FrontendAction::Execute() {
+ CompilerInstance &CI = getCompilerInstance();
+
+ // Initialize the main file entry. This needs to be delayed until after PCH
+ // has loaded.
+ if (!isCurrentFileAST()) {
+ if (!CI.InitializeSourceManager(getCurrentFile(),
+ getCurrentInput().IsSystem
+ ? SrcMgr::C_System
+ : SrcMgr::C_User))
+ return;
+ }
+
+ if (CI.hasFrontendTimer()) {
+ llvm::TimeRegion Timer(CI.getFrontendTimer());
+ ExecuteAction();
+ }
+ else ExecuteAction();
+}
+
+void FrontendAction::EndSourceFile() {
+ CompilerInstance &CI = getCompilerInstance();
+
+ // Inform the diagnostic client we are done with this source file.
+ CI.getDiagnosticClient().EndSourceFile();
+
+ // Finalize the action.
+ EndSourceFileAction();
+
+ // Release the consumer and the AST, in that order since the consumer may
+ // perform actions in its destructor which require the context.
+ //
+ // FIXME: There is more per-file stuff we could just drop here?
+ if (CI.getFrontendOpts().DisableFree) {
+ CI.takeASTConsumer();
+ if (!isCurrentFileAST()) {
+ CI.takeSema();
+ CI.resetAndLeakASTContext();
+ }
+ } else {
+ if (!isCurrentFileAST()) {
+ CI.setSema(0);
+ CI.setASTContext(0);
+ }
+ CI.setASTConsumer(0);
+ }
+
+ // Inform the preprocessor we are done.
+ if (CI.hasPreprocessor())
+ CI.getPreprocessor().EndSourceFile();
+
+ if (CI.getFrontendOpts().ShowStats) {
+ llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFile() << "':\n";
+ CI.getPreprocessor().PrintStats();
+ CI.getPreprocessor().getIdentifierTable().PrintStats();
+ CI.getPreprocessor().getHeaderSearchInfo().PrintStats();
+ CI.getSourceManager().PrintStats();
+ llvm::errs() << "\n";
+ }
+
+ // Cleanup the output streams, and erase the output files if we encountered
+ // an error.
+ CI.clearOutputFiles(/*EraseFiles=*/CI.getDiagnostics().hasErrorOccurred());
+
+ if (isCurrentFileAST()) {
+ CI.takeSema();
+ CI.resetAndLeakASTContext();
+ CI.resetAndLeakPreprocessor();
+ CI.resetAndLeakSourceManager();
+ CI.resetAndLeakFileManager();
+ }
+
+ setCompilerInstance(0);
+ setCurrentInput(FrontendInputFile());
+}
+
+//===----------------------------------------------------------------------===//
+// Utility Actions
+//===----------------------------------------------------------------------===//
+
+void ASTFrontendAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+
+ // FIXME: Move the truncation aspect of this into Sema, we delayed this till
+ // here so the source manager would be initialized.
+ if (hasCodeCompletionSupport() &&
+ !CI.getFrontendOpts().CodeCompletionAt.FileName.empty())
+ CI.createCodeCompletionConsumer();
+
+ // Use a code completion consumer?
+ CodeCompleteConsumer *CompletionConsumer = 0;
+ if (CI.hasCodeCompletionConsumer())
+ CompletionConsumer = &CI.getCodeCompletionConsumer();
+
+ if (!CI.hasSema())
+ CI.createSema(getTranslationUnitKind(), CompletionConsumer);
+
+ ParseAST(CI.getSema(), CI.getFrontendOpts().ShowStats,
+ CI.getFrontendOpts().SkipFunctionBodies);
+}
+
+void PluginASTAction::anchor() { }
+
+ASTConsumer *
+PreprocessorFrontendAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ llvm_unreachable("Invalid CreateASTConsumer on preprocessor action!");
+}
+
+ASTConsumer *WrapperFrontendAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return WrappedAction->CreateASTConsumer(CI, InFile);
+}
+bool WrapperFrontendAction::BeginInvocation(CompilerInstance &CI) {
+ return WrappedAction->BeginInvocation(CI);
+}
+bool WrapperFrontendAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ WrappedAction->setCurrentInput(getCurrentInput());
+ WrappedAction->setCompilerInstance(&CI);
+ return WrappedAction->BeginSourceFileAction(CI, Filename);
+}
+void WrapperFrontendAction::ExecuteAction() {
+ WrappedAction->ExecuteAction();
+}
+void WrapperFrontendAction::EndSourceFileAction() {
+ WrappedAction->EndSourceFileAction();
+}
+
+bool WrapperFrontendAction::usesPreprocessorOnly() const {
+ return WrappedAction->usesPreprocessorOnly();
+}
+TranslationUnitKind WrapperFrontendAction::getTranslationUnitKind() {
+ return WrappedAction->getTranslationUnitKind();
+}
+bool WrapperFrontendAction::hasPCHSupport() const {
+ return WrappedAction->hasPCHSupport();
+}
+bool WrapperFrontendAction::hasASTFileSupport() const {
+ return WrappedAction->hasASTFileSupport();
+}
+bool WrapperFrontendAction::hasIRSupport() const {
+ return WrappedAction->hasIRSupport();
+}
+bool WrapperFrontendAction::hasCodeCompletionSupport() const {
+ return WrappedAction->hasCodeCompletionSupport();
+}
+
+WrapperFrontendAction::WrapperFrontendAction(FrontendAction *WrappedAction)
+ : WrappedAction(WrappedAction) {}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
new file mode 100644
index 0000000..b4a439d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendActions.cpp
@@ -0,0 +1,572 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/ASTUnit.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Serialization/ASTWriter.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include <set>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Custom Actions
+//===----------------------------------------------------------------------===//
+
+ASTConsumer *InitOnlyAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return new ASTConsumer();
+}
+
+void InitOnlyAction::ExecuteAction() {
+}
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+ASTConsumer *ASTPrintAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
+ return CreateASTPrinter(OS);
+ return 0;
+}
+
+ASTConsumer *ASTDumpAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return CreateASTDumper();
+}
+
+ASTConsumer *ASTDumpXMLAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ raw_ostream *OS;
+ if (CI.getFrontendOpts().OutputFile.empty())
+ OS = &llvm::outs();
+ else
+ OS = CI.createDefaultOutputFile(false, InFile);
+ if (!OS) return 0;
+ return CreateASTDumperXML(*OS);
+}
+
+ASTConsumer *ASTViewAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return CreateASTViewer();
+}
+
+ASTConsumer *DeclContextPrintAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return CreateDeclContextPrinter();
+}
+
+ASTConsumer *GeneratePCHAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::string Sysroot;
+ std::string OutputFile;
+ raw_ostream *OS = 0;
+ if (ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile, OS))
+ return 0;
+
+ if (!CI.getFrontendOpts().RelocatablePCH)
+ Sysroot.clear();
+ return new PCHGenerator(CI.getPreprocessor(), OutputFile, 0, Sysroot, OS);
+}
+
+bool GeneratePCHAction::ComputeASTConsumerArguments(CompilerInstance &CI,
+ StringRef InFile,
+ std::string &Sysroot,
+ std::string &OutputFile,
+ raw_ostream *&OS) {
+ Sysroot = CI.getHeaderSearchOpts().Sysroot;
+ if (CI.getFrontendOpts().RelocatablePCH && Sysroot.empty()) {
+ CI.getDiagnostics().Report(diag::err_relocatable_without_isysroot);
+ return true;
+ }
+
+ // We use createOutputFile here because this is exposed via libclang, and we
+ // must disable the RemoveFileOnSignal behavior.
+ // We use a temporary to avoid race conditions.
+ OS = CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
+ /*RemoveFileOnSignal=*/false, InFile,
+ /*Extension=*/"", /*useTemporary=*/true);
+ if (!OS)
+ return true;
+
+ OutputFile = CI.getFrontendOpts().OutputFile;
+ return false;
+}
+
+ASTConsumer *GenerateModuleAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ std::string Sysroot;
+ std::string OutputFile;
+ raw_ostream *OS = 0;
+ if (ComputeASTConsumerArguments(CI, InFile, Sysroot, OutputFile, OS))
+ return 0;
+
+ return new PCHGenerator(CI.getPreprocessor(), OutputFile, Module,
+ Sysroot, OS);
+}
+
+/// \brief Collect the set of header includes needed to construct the given
+/// module.
+///
+/// \param Module The module we're collecting includes from.
+///
+/// \param Includes Will be augmented with the set of #includes or #imports
+/// needed to load all of the named headers.
+static void collectModuleHeaderIncludes(const LangOptions &LangOpts,
+ FileManager &FileMgr,
+ ModuleMap &ModMap,
+ clang::Module *Module,
+ SmallString<256> &Includes) {
+ // Don't collect any headers for unavailable modules.
+ if (!Module->isAvailable())
+ return;
+
+ // Add includes for each of these headers.
+ for (unsigned I = 0, N = Module->Headers.size(); I != N; ++I) {
+ if (LangOpts.ObjC1)
+ Includes += "#import \"";
+ else
+ Includes += "#include \"";
+ Includes += Module->Headers[I]->getName();
+ Includes += "\"\n";
+ }
+
+ if (const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader()) {
+ if (Module->Parent) {
+ // Include the umbrella header for submodules.
+ if (LangOpts.ObjC1)
+ Includes += "#import \"";
+ else
+ Includes += "#include \"";
+ Includes += UmbrellaHeader->getName();
+ Includes += "\"\n";
+ }
+ } else if (const DirectoryEntry *UmbrellaDir = Module->getUmbrellaDir()) {
+ // Add all of the headers we find in this subdirectory.
+ llvm::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(UmbrellaDir->getName(), DirNative);
+ for (llvm::sys::fs::recursive_directory_iterator Dir(DirNative.str(), EC),
+ DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ // Check whether this entry has an extension typically associated with
+ // headers.
+ if (!llvm::StringSwitch<bool>(llvm::sys::path::extension(Dir->path()))
+ .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Default(false))
+ continue;
+
+ // If this header is marked 'unavailable' in this module, don't include
+ // it.
+ if (const FileEntry *Header = FileMgr.getFile(Dir->path()))
+ if (ModMap.isHeaderInUnavailableModule(Header))
+ continue;
+
+ // Include this header umbrella header for submodules.
+ if (LangOpts.ObjC1)
+ Includes += "#import \"";
+ else
+ Includes += "#include \"";
+ Includes += Dir->path();
+ Includes += "\"\n";
+ }
+ }
+
+ // Recurse into submodules.
+ for (clang::Module::submodule_iterator Sub = Module->submodule_begin(),
+ SubEnd = Module->submodule_end();
+ Sub != SubEnd; ++Sub)
+ collectModuleHeaderIncludes(LangOpts, FileMgr, ModMap, *Sub, Includes);
+}
+
+bool GenerateModuleAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ // Find the module map file.
+ const FileEntry *ModuleMap = CI.getFileManager().getFile(Filename);
+ if (!ModuleMap) {
+ CI.getDiagnostics().Report(diag::err_module_map_not_found)
+ << Filename;
+ return false;
+ }
+
+ // Parse the module map file.
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ if (HS.loadModuleMapFile(ModuleMap))
+ return false;
+
+ if (CI.getLangOpts().CurrentModule.empty()) {
+ CI.getDiagnostics().Report(diag::err_missing_module_name);
+
+ // FIXME: Eventually, we could consider asking whether there was just
+ // a single module described in the module map, and use that as a
+ // default. Then it would be fairly trivial to just "compile" a module
+ // map with a single module (the common case).
+ return false;
+ }
+
+ // Dig out the module definition.
+ Module = HS.lookupModule(CI.getLangOpts().CurrentModule,
+ /*AllowSearch=*/false);
+ if (!Module) {
+ CI.getDiagnostics().Report(diag::err_missing_module)
+ << CI.getLangOpts().CurrentModule << Filename;
+
+ return false;
+ }
+
+ // Check whether we can build this module at all.
+ StringRef Feature;
+ if (!Module->isAvailable(CI.getLangOpts(), CI.getTarget(), Feature)) {
+ CI.getDiagnostics().Report(diag::err_module_unavailable)
+ << Module->getFullModuleName()
+ << Feature;
+
+ return false;
+ }
+
+ // Do we have an umbrella header for this module?
+ const FileEntry *UmbrellaHeader = Module->getUmbrellaHeader();
+
+ // Collect the set of #includes we need to build the module.
+ SmallString<256> HeaderContents;
+ collectModuleHeaderIncludes(CI.getLangOpts(), CI.getFileManager(),
+ CI.getPreprocessor().getHeaderSearchInfo().getModuleMap(),
+ Module, HeaderContents);
+ if (UmbrellaHeader && HeaderContents.empty()) {
+ // Simple case: we have an umbrella header and there are no additional
+ // includes, we can just parse the umbrella header directly.
+ setCurrentInput(FrontendInputFile(UmbrellaHeader->getName(),
+ getCurrentFileKind(),
+ Module->IsSystem));
+ return true;
+ }
+
+ FileManager &FileMgr = CI.getFileManager();
+ SmallString<128> HeaderName;
+ time_t ModTime;
+ if (UmbrellaHeader) {
+ // Read in the umbrella header.
+ // FIXME: Go through the source manager; the umbrella header may have
+ // been overridden.
+ std::string ErrorStr;
+ llvm::MemoryBuffer *UmbrellaContents
+ = FileMgr.getBufferForFile(UmbrellaHeader, &ErrorStr);
+ if (!UmbrellaContents) {
+ CI.getDiagnostics().Report(diag::err_missing_umbrella_header)
+ << UmbrellaHeader->getName() << ErrorStr;
+ return false;
+ }
+
+ // Combine the contents of the umbrella header with the automatically-
+ // generated includes.
+ SmallString<256> OldContents = HeaderContents;
+ HeaderContents = UmbrellaContents->getBuffer();
+ HeaderContents += "\n\n";
+ HeaderContents += "/* Module includes */\n";
+ HeaderContents += OldContents;
+
+ // Pretend that we're parsing the umbrella header.
+ HeaderName = UmbrellaHeader->getName();
+ ModTime = UmbrellaHeader->getModificationTime();
+
+ delete UmbrellaContents;
+ } else {
+ // Pick an innocuous-sounding name for the umbrella header.
+ HeaderName = Module->Name + ".h";
+ if (FileMgr.getFile(HeaderName, /*OpenFile=*/false,
+ /*CacheFailure=*/false)) {
+ // Try again!
+ HeaderName = Module->Name + "-module.h";
+ if (FileMgr.getFile(HeaderName, /*OpenFile=*/false,
+ /*CacheFailure=*/false)) {
+ // Pick something ridiculous and go with it.
+ HeaderName = Module->Name + "-module.hmod";
+ }
+ }
+ ModTime = time(0);
+ }
+
+ // Remap the contents of the header name we're using to our synthesized
+ // buffer.
+ const FileEntry *HeaderFile = FileMgr.getVirtualFile(HeaderName,
+ HeaderContents.size(),
+ ModTime);
+ llvm::MemoryBuffer *HeaderContentsBuf
+ = llvm::MemoryBuffer::getMemBufferCopy(HeaderContents);
+ CI.getSourceManager().overrideFileContents(HeaderFile, HeaderContentsBuf);
+ setCurrentInput(FrontendInputFile(HeaderName, getCurrentFileKind(),
+ Module->IsSystem));
+ return true;
+}
+
+bool GenerateModuleAction::ComputeASTConsumerArguments(CompilerInstance &CI,
+ StringRef InFile,
+ std::string &Sysroot,
+ std::string &OutputFile,
+ raw_ostream *&OS) {
+ // If no output file was provided, figure out where this module would go
+ // in the module cache.
+ if (CI.getFrontendOpts().OutputFile.empty()) {
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ SmallString<256> ModuleFileName(HS.getModuleCachePath());
+ llvm::sys::path::append(ModuleFileName,
+ CI.getLangOpts().CurrentModule + ".pcm");
+ CI.getFrontendOpts().OutputFile = ModuleFileName.str();
+ }
+
+ // We use createOutputFile here because this is exposed via libclang, and we
+ // must disable the RemoveFileOnSignal behavior.
+ // We use a temporary to avoid race conditions.
+ OS = CI.createOutputFile(CI.getFrontendOpts().OutputFile, /*Binary=*/true,
+ /*RemoveFileOnSignal=*/false, InFile,
+ /*Extension=*/"", /*useTemporary=*/true,
+ /*CreateMissingDirectories=*/true);
+ if (!OS)
+ return true;
+
+ OutputFile = CI.getFrontendOpts().OutputFile;
+ return false;
+}
+
+ASTConsumer *SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return new ASTConsumer();
+}
+
+namespace {
+ class PubnamesDumpConsumer : public ASTConsumer {
+ Preprocessor &PP;
+
+ /// \brief Determine whether the given identifier provides a 'public' name.
+ bool isPublicName(IdentifierInfo *II) {
+ // If there are any top-level declarations associated with this
+ // identifier, it is a public name.
+ if (II->getFETokenInfo<void>())
+ return true;
+
+ // If this identifier is the name of a non-builtin macro that isn't
+ // defined on the command line or implicitly by the front end, it is a
+ // public name.
+ if (II->hasMacroDefinition()) {
+ if (MacroInfo *M = PP.getMacroInfo(II))
+ if (!M->isBuiltinMacro()) {
+ SourceLocation Loc = M->getDefinitionLoc();
+ FileID File = PP.getSourceManager().getFileID(Loc);
+ if (PP.getSourceManager().getFileEntryForID(File))
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ public:
+ PubnamesDumpConsumer(Preprocessor &PP) : PP(PP) { }
+
+ virtual void HandleTranslationUnit(ASTContext &Ctx) {
+ std::set<StringRef> Pubnames;
+
+ // Add the names of any non-builtin macros.
+ for (IdentifierTable::iterator I = Ctx.Idents.begin(),
+ IEnd = Ctx.Idents.end();
+ I != IEnd; ++I) {
+ if (isPublicName(I->second))
+ Pubnames.insert(I->first());
+ }
+
+ // If there is an external identifier lookup source, consider those
+ // identifiers as well.
+ if (IdentifierInfoLookup *External
+ = Ctx.Idents.getExternalIdentifierLookup()) {
+ OwningPtr<IdentifierIterator> Iter(External->getIdentifiers());
+ do {
+ StringRef Name = Iter->Next();
+ if (Name.empty())
+ break;
+
+ if (isPublicName(PP.getIdentifierInfo(Name)))
+ Pubnames.insert(Name);
+ } while (true);
+ }
+
+ // Print the names, in lexicographical order.
+ for (std::set<StringRef>::iterator N = Pubnames.begin(),
+ NEnd = Pubnames.end();
+ N != NEnd; ++N) {
+ llvm::outs() << *N << '\n';
+ }
+ }
+ };
+}
+
+ASTConsumer *PubnamesDumpAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return new PubnamesDumpConsumer(CI.getPreprocessor());
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Actions
+//===----------------------------------------------------------------------===//
+
+void DumpRawTokensAction::ExecuteAction() {
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
+ SourceManager &SM = PP.getSourceManager();
+
+ // Start lexing the specified input file.
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
+ Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
+ RawLex.SetKeepWhitespaceMode(true);
+
+ Token RawTok;
+ RawLex.LexFromRawLexer(RawTok);
+ while (RawTok.isNot(tok::eof)) {
+ PP.DumpToken(RawTok, true);
+ llvm::errs() << "\n";
+ RawLex.LexFromRawLexer(RawTok);
+ }
+}
+
+void DumpTokensAction::ExecuteAction() {
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
+ // Start preprocessing the specified input file.
+ Token Tok;
+ PP.EnterMainSourceFile();
+ do {
+ PP.Lex(Tok);
+ PP.DumpToken(Tok, true);
+ llvm::errs() << "\n";
+ } while (Tok.isNot(tok::eof));
+}
+
+void GeneratePTHAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ if (CI.getFrontendOpts().OutputFile.empty() ||
+ CI.getFrontendOpts().OutputFile == "-") {
+ // FIXME: Don't fail this way.
+ // FIXME: Verify that we can actually seek in the given file.
+ llvm::report_fatal_error("PTH requires a seekable file for output!");
+ }
+ llvm::raw_fd_ostream *OS =
+ CI.createDefaultOutputFile(true, getCurrentFile());
+ if (!OS) return;
+
+ CacheTokens(CI.getPreprocessor(), OS);
+}
+
+void PreprocessOnlyAction::ExecuteAction() {
+ Preprocessor &PP = getCompilerInstance().getPreprocessor();
+
+ // Ignore unknown pragmas.
+ PP.AddPragmaHandler(new EmptyPragmaHandler());
+
+ Token Tok;
+ // Start parsing the specified input file.
+ PP.EnterMainSourceFile();
+ do {
+ PP.Lex(Tok);
+ } while (Tok.isNot(tok::eof));
+}
+
+void PrintPreprocessedAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ // Output file may need to be set to 'Binary', to avoid converting Unix style
+ // line feeds (<LF>) to Microsoft style line feeds (<CR><LF>).
+ //
+ // Look to see what type of line endings the file uses. If there's a
+ // CRLF, then we won't open the file up in binary mode. If there is
+ // just an LF or CR, then we will open the file up in binary mode.
+ // In this fashion, the output format should match the input format, unless
+ // the input format has inconsistent line endings.
+ //
+ // This should be a relatively fast operation since most files won't have
+ // all of their source code on a single line. However, that is still a
+ // concern, so if we scan for too long, we'll just assume the file should
+ // be opened in binary mode.
+ bool BinaryMode = true;
+ bool InvalidFile = false;
+ const SourceManager& SM = CI.getSourceManager();
+ const llvm::MemoryBuffer *Buffer = SM.getBuffer(SM.getMainFileID(),
+ &InvalidFile);
+ if (!InvalidFile) {
+ const char *cur = Buffer->getBufferStart();
+ const char *end = Buffer->getBufferEnd();
+ const char *next = (cur != end) ? cur + 1 : end;
+
+ // Limit ourselves to only scanning 256 characters into the source
+ // file. This is mostly a sanity check in case the file has no
+ // newlines whatsoever.
+ if (end - cur > 256) end = cur + 256;
+
+ while (next < end) {
+ if (*cur == 0x0D) { // CR
+ if (*next == 0x0A) // CRLF
+ BinaryMode = false;
+
+ break;
+ } else if (*cur == 0x0A) // LF
+ break;
+
+ ++cur, ++next;
+ }
+ }
+
+ raw_ostream *OS = CI.createDefaultOutputFile(BinaryMode, getCurrentFile());
+ if (!OS) return;
+
+ DoPrintPreprocessedInput(CI.getPreprocessor(), OS,
+ CI.getPreprocessorOutputOpts());
+}
+
+void PrintPreambleAction::ExecuteAction() {
+ switch (getCurrentFileKind()) {
+ case IK_C:
+ case IK_CXX:
+ case IK_ObjC:
+ case IK_ObjCXX:
+ case IK_OpenCL:
+ case IK_CUDA:
+ break;
+
+ case IK_None:
+ case IK_Asm:
+ case IK_PreprocessedC:
+ case IK_PreprocessedCXX:
+ case IK_PreprocessedObjC:
+ case IK_PreprocessedObjCXX:
+ case IK_AST:
+ case IK_LLVM_IR:
+ // We can't do anything with these.
+ return;
+ }
+
+ CompilerInstance &CI = getCompilerInstance();
+ llvm::MemoryBuffer *Buffer
+ = CI.getFileManager().getBufferForFile(getCurrentFile());
+ if (Buffer) {
+ unsigned Preamble = Lexer::ComputePreamble(Buffer, CI.getLangOpts()).first;
+ llvm::outs().write(Buffer->getBufferStart(), Preamble);
+ delete Buffer;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
new file mode 100644
index 0000000..ea4005f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/FrontendOptions.cpp
@@ -0,0 +1,32 @@
+//===--- FrontendOptions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/FrontendOptions.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
+ return llvm::StringSwitch<InputKind>(Extension)
+ .Case("ast", IK_AST)
+ .Case("c", IK_C)
+ .Cases("S", "s", IK_Asm)
+ .Case("i", IK_PreprocessedC)
+ .Case("ii", IK_PreprocessedCXX)
+ .Case("m", IK_ObjC)
+ .Case("mi", IK_PreprocessedObjC)
+ .Cases("mm", "M", IK_ObjCXX)
+ .Case("mii", IK_PreprocessedObjCXX)
+ .Case("C", IK_CXX)
+ .Cases("C", "cc", "cp", IK_CXX)
+ .Cases("cpp", "CPP", "c++", "cxx", "hpp", IK_CXX)
+ .Case("cl", IK_OpenCL)
+ .Case("cu", IK_CUDA)
+ .Cases("ll", "bc", IK_LLVM_IR)
+ .Default(IK_C);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
new file mode 100644
index 0000000..79920df
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/HeaderIncludeGen.cpp
@@ -0,0 +1,126 @@
+//===--- HeaderIncludes.cpp - Generate Header Includes --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+namespace {
+class HeaderIncludesCallback : public PPCallbacks {
+ SourceManager &SM;
+ raw_ostream *OutputFile;
+ unsigned CurrentIncludeDepth;
+ bool HasProcessedPredefines;
+ bool OwnsOutputFile;
+ bool ShowAllHeaders;
+ bool ShowDepth;
+
+public:
+ HeaderIncludesCallback(const Preprocessor *PP, bool ShowAllHeaders_,
+ raw_ostream *OutputFile_, bool OwnsOutputFile_,
+ bool ShowDepth_)
+ : SM(PP->getSourceManager()), OutputFile(OutputFile_),
+ CurrentIncludeDepth(0), HasProcessedPredefines(false),
+ OwnsOutputFile(OwnsOutputFile_), ShowAllHeaders(ShowAllHeaders_),
+ ShowDepth(ShowDepth_) {}
+
+ ~HeaderIncludesCallback() {
+ if (OwnsOutputFile)
+ delete OutputFile;
+ }
+
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID);
+};
+}
+
+void clang::AttachHeaderIncludeGen(Preprocessor &PP, bool ShowAllHeaders,
+ StringRef OutputPath, bool ShowDepth) {
+ raw_ostream *OutputFile = &llvm::errs();
+ bool OwnsOutputFile = false;
+
+ // Open the output file, if used.
+ if (!OutputPath.empty()) {
+ std::string Error;
+ llvm::raw_fd_ostream *OS = new llvm::raw_fd_ostream(
+ OutputPath.str().c_str(), Error, llvm::raw_fd_ostream::F_Append);
+ if (!Error.empty()) {
+ PP.getDiagnostics().Report(
+ clang::diag::warn_fe_cc_print_header_failure) << Error;
+ delete OS;
+ } else {
+ OS->SetUnbuffered();
+ OS->SetUseAtomicWrites(true);
+ OutputFile = OS;
+ OwnsOutputFile = true;
+ }
+ }
+
+ PP.addPPCallbacks(new HeaderIncludesCallback(&PP, ShowAllHeaders,
+ OutputFile, OwnsOutputFile,
+ ShowDepth));
+}
+
+void HeaderIncludesCallback::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType,
+ FileID PrevFID) {
+ // Unless we are exiting a #include, make sure to skip ahead to the line the
+ // #include directive was at.
+ PresumedLoc UserLoc = SM.getPresumedLoc(Loc);
+ if (UserLoc.isInvalid())
+ return;
+
+ // Adjust the current include depth.
+ if (Reason == PPCallbacks::EnterFile) {
+ ++CurrentIncludeDepth;
+ } else if (Reason == PPCallbacks::ExitFile) {
+ if (CurrentIncludeDepth)
+ --CurrentIncludeDepth;
+
+ // We track when we are done with the predefines by watching for the first
+ // place where we drop back to a nesting depth of 1.
+ if (CurrentIncludeDepth == 1 && !HasProcessedPredefines)
+ HasProcessedPredefines = true;
+
+ return;
+ } else
+ return;
+
+ // Show the header if we are (a) past the predefines, or (b) showing all
+ // headers and in the predefines at a depth past the initial file and command
+ // line buffers.
+ bool ShowHeader = (HasProcessedPredefines ||
+ (ShowAllHeaders && CurrentIncludeDepth > 2));
+
+ // Dump the header include information we are past the predefines buffer or
+ // are showing all headers.
+ if (ShowHeader && Reason == PPCallbacks::EnterFile) {
+ // Write to a temporary string to avoid unnecessary flushing on errs().
+ SmallString<512> Filename(UserLoc.getFilename());
+ Lexer::Stringify(Filename);
+
+ SmallString<256> Msg;
+ if (ShowDepth) {
+ // The main source file is at depth 1, so skip one dot.
+ for (unsigned i = 1; i != CurrentIncludeDepth; ++i)
+ Msg += '.';
+ Msg += ' ';
+ }
+ Msg += Filename;
+ Msg += '\n';
+
+ OutputFile->write(Msg.data(), Msg.size());
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
new file mode 100644
index 0000000..7f01cd9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitHeaderSearch.cpp
@@ -0,0 +1,683 @@
+//===--- InitHeaderSearch.cpp - Initialize header search paths ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the InitHeaderSearch class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/HeaderSearchOptions.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Path.h"
+
+#include "clang/Config/config.h" // C_INCLUDE_DIRS
+
+#ifndef CLANG_PREFIX
+#define CLANG_PREFIX
+#endif
+using namespace clang;
+using namespace clang::frontend;
+
+namespace {
+
+/// InitHeaderSearch - This class makes it easier to set the search paths of
+/// a HeaderSearch object. InitHeaderSearch stores several search path lists
+/// internally, which can be sent to a HeaderSearch object in one swoop.
+class InitHeaderSearch {
+ std::vector<std::pair<IncludeDirGroup, DirectoryLookup> > IncludePath;
+ typedef std::vector<std::pair<IncludeDirGroup,
+ DirectoryLookup> >::const_iterator path_iterator;
+ HeaderSearch &Headers;
+ bool Verbose;
+ std::string IncludeSysroot;
+ bool IsNotEmptyOrRoot;
+
+public:
+
+ InitHeaderSearch(HeaderSearch &HS, bool verbose, StringRef sysroot)
+ : Headers(HS), Verbose(verbose), IncludeSysroot(sysroot),
+ IsNotEmptyOrRoot(!(sysroot.empty() || sysroot == "/")) {
+ }
+
+ /// AddPath - Add the specified path to the specified group list.
+ void AddPath(const Twine &Path, IncludeDirGroup Group,
+ bool isCXXAware, bool isUserSupplied,
+ bool isFramework, bool IgnoreSysRoot = false);
+
+ /// AddGnuCPlusPlusIncludePaths - Add the necessary paths to support a gnu
+ /// libstdc++.
+ void AddGnuCPlusPlusIncludePaths(StringRef Base,
+ StringRef ArchDir,
+ StringRef Dir32,
+ StringRef Dir64,
+ const llvm::Triple &triple);
+
+ /// AddMinGWCPlusPlusIncludePaths - Add the necessary paths to support a MinGW
+ /// libstdc++.
+ void AddMinGWCPlusPlusIncludePaths(StringRef Base,
+ StringRef Arch,
+ StringRef Version);
+
+ /// AddMinGW64CXXPaths - Add the necessary paths to support
+ /// libstdc++ of x86_64-w64-mingw32 aka mingw-w64.
+ void AddMinGW64CXXPaths(StringRef Base,
+ StringRef Version);
+
+ // AddDefaultCIncludePaths - Add paths that should always be searched.
+ void AddDefaultCIncludePaths(const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts);
+
+ // AddDefaultCPlusPlusIncludePaths - Add paths that should be searched when
+ // compiling c++.
+ void AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts);
+
+ /// AddDefaultSystemIncludePaths - Adds the default system include paths so
+ /// that e.g. stdio.h is found.
+ void AddDefaultIncludePaths(const LangOptions &Lang,
+ const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts);
+
+ /// Realize - Merges all search path lists into one list and send it to
+ /// HeaderSearch.
+ void Realize(const LangOptions &Lang);
+};
+
+} // end anonymous namespace.
+
+void InitHeaderSearch::AddPath(const Twine &Path,
+ IncludeDirGroup Group, bool isCXXAware,
+ bool isUserSupplied, bool isFramework,
+ bool IgnoreSysRoot) {
+ assert(!Path.isTriviallyEmpty() && "can't handle empty path here");
+ FileManager &FM = Headers.getFileMgr();
+
+ // Compute the actual path, taking into consideration -isysroot.
+ SmallString<256> MappedPathStorage;
+ StringRef MappedPathStr = Path.toStringRef(MappedPathStorage);
+
+ // Handle isysroot.
+ if ((Group == System || Group == CXXSystem) && !IgnoreSysRoot &&
+#if defined(_WIN32)
+ !MappedPathStr.empty() &&
+ llvm::sys::path::is_separator(MappedPathStr[0]) &&
+#else
+ llvm::sys::path::is_absolute(MappedPathStr) &&
+#endif
+ IsNotEmptyOrRoot) {
+ MappedPathStorage.clear();
+ MappedPathStr =
+ (IncludeSysroot + Path).toStringRef(MappedPathStorage);
+ }
+
+ // Compute the DirectoryLookup type.
+ SrcMgr::CharacteristicKind Type;
+ if (Group == Quoted || Group == Angled || Group == IndexHeaderMap)
+ Type = SrcMgr::C_User;
+ else if (isCXXAware)
+ Type = SrcMgr::C_System;
+ else
+ Type = SrcMgr::C_ExternCSystem;
+
+
+ // If the directory exists, add it.
+ if (const DirectoryEntry *DE = FM.getDirectory(MappedPathStr)) {
+ IncludePath.push_back(std::make_pair(Group, DirectoryLookup(DE, Type,
+ isUserSupplied, isFramework)));
+ return;
+ }
+
+ // Check to see if this is an apple-style headermap (which are not allowed to
+ // be frameworks).
+ if (!isFramework) {
+ if (const FileEntry *FE = FM.getFile(MappedPathStr)) {
+ if (const HeaderMap *HM = Headers.CreateHeaderMap(FE)) {
+ // It is a headermap, add it to the search path.
+ IncludePath.push_back(std::make_pair(Group, DirectoryLookup(HM, Type,
+ isUserSupplied, Group == IndexHeaderMap)));
+ return;
+ }
+ }
+ }
+
+ if (Verbose)
+ llvm::errs() << "ignoring nonexistent directory \""
+ << MappedPathStr << "\"\n";
+}
+
+void InitHeaderSearch::AddGnuCPlusPlusIncludePaths(StringRef Base,
+ StringRef ArchDir,
+ StringRef Dir32,
+ StringRef Dir64,
+ const llvm::Triple &triple) {
+ // Add the base dir
+ AddPath(Base, CXXSystem, true, false, false);
+
+ // Add the multilib dirs
+ llvm::Triple::ArchType arch = triple.getArch();
+ bool is64bit = arch == llvm::Triple::ppc64 || arch == llvm::Triple::x86_64;
+ if (is64bit)
+ AddPath(Base + "/" + ArchDir + "/" + Dir64, CXXSystem, true, false, false);
+ else
+ AddPath(Base + "/" + ArchDir + "/" + Dir32, CXXSystem, true, false, false);
+
+ // Add the backward dir
+ AddPath(Base + "/backward", CXXSystem, true, false, false);
+}
+
+void InitHeaderSearch::AddMinGWCPlusPlusIncludePaths(StringRef Base,
+ StringRef Arch,
+ StringRef Version) {
+ AddPath(Base + "/" + Arch + "/" + Version + "/include/c++",
+ CXXSystem, true, false, false);
+ AddPath(Base + "/" + Arch + "/" + Version + "/include/c++/" + Arch,
+ CXXSystem, true, false, false);
+ AddPath(Base + "/" + Arch + "/" + Version + "/include/c++/backward",
+ CXXSystem, true, false, false);
+}
+
+void InitHeaderSearch::AddMinGW64CXXPaths(StringRef Base,
+ StringRef Version) {
+ // Assumes Base is HeaderSearchOpts' ResourceDir
+ AddPath(Base + "/../../../include/c++/" + Version,
+ CXXSystem, true, false, false);
+ AddPath(Base + "/../../../include/c++/" + Version + "/x86_64-w64-mingw32",
+ CXXSystem, true, false, false);
+ AddPath(Base + "/../../../include/c++/" + Version + "/i686-w64-mingw32",
+ CXXSystem, true, false, false);
+ AddPath(Base + "/../../../include/c++/" + Version + "/backward",
+ CXXSystem, true, false, false);
+}
+
+void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts) {
+ llvm::Triple::OSType os = triple.getOS();
+
+ if (HSOpts.UseStandardSystemIncludes) {
+ switch (os) {
+ case llvm::Triple::FreeBSD:
+ case llvm::Triple::NetBSD:
+ break;
+ default:
+ // FIXME: temporary hack: hard-coded paths.
+ AddPath("/usr/local/include", System, true, false, false);
+ break;
+ }
+ }
+
+ // Builtin includes use #include_next directives and should be positioned
+ // just prior C include dirs.
+ if (HSOpts.UseBuiltinIncludes) {
+ // Ignore the sys root, we *always* look for clang headers relative to
+ // supplied path.
+ llvm::sys::Path P(HSOpts.ResourceDir);
+ P.appendComponent("include");
+ AddPath(P.str(), System, false, false, false, /*IgnoreSysRoot=*/ true);
+ }
+
+ // All remaining additions are for system include directories, early exit if
+ // we aren't using them.
+ if (!HSOpts.UseStandardSystemIncludes)
+ return;
+
+ // Add dirs specified via 'configure --with-c-include-dirs'.
+ StringRef CIncludeDirs(C_INCLUDE_DIRS);
+ if (CIncludeDirs != "") {
+ SmallVector<StringRef, 5> dirs;
+ CIncludeDirs.split(dirs, ":");
+ for (SmallVectorImpl<StringRef>::iterator i = dirs.begin();
+ i != dirs.end();
+ ++i)
+ AddPath(*i, System, false, false, false);
+ return;
+ }
+
+ switch (os) {
+ case llvm::Triple::Linux:
+ case llvm::Triple::Win32:
+ llvm_unreachable("Include management is handled in the driver.");
+
+ case llvm::Triple::Haiku:
+ AddPath("/boot/common/include", System, true, false, false);
+ AddPath("/boot/develop/headers/os", System, true, false, false);
+ AddPath("/boot/develop/headers/os/app", System, true, false, false);
+ AddPath("/boot/develop/headers/os/arch", System, true, false, false);
+ AddPath("/boot/develop/headers/os/device", System, true, false, false);
+ AddPath("/boot/develop/headers/os/drivers", System, true, false, false);
+ AddPath("/boot/develop/headers/os/game", System, true, false, false);
+ AddPath("/boot/develop/headers/os/interface", System, true, false, false);
+ AddPath("/boot/develop/headers/os/kernel", System, true, false, false);
+ AddPath("/boot/develop/headers/os/locale", System, true, false, false);
+ AddPath("/boot/develop/headers/os/mail", System, true, false, false);
+ AddPath("/boot/develop/headers/os/media", System, true, false, false);
+ AddPath("/boot/develop/headers/os/midi", System, true, false, false);
+ AddPath("/boot/develop/headers/os/midi2", System, true, false, false);
+ AddPath("/boot/develop/headers/os/net", System, true, false, false);
+ AddPath("/boot/develop/headers/os/storage", System, true, false, false);
+ AddPath("/boot/develop/headers/os/support", System, true, false, false);
+ AddPath("/boot/develop/headers/os/translation",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/os/add-ons/graphics",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/os/add-ons/input_server",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/os/add-ons/screen_saver",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/os/add-ons/tracker",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/os/be_apps/Deskbar",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/os/be_apps/NetPositive",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/os/be_apps/Tracker",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/cpp", System, true, false, false);
+ AddPath("/boot/develop/headers/cpp/i586-pc-haiku",
+ System, true, false, false);
+ AddPath("/boot/develop/headers/3rdparty", System, true, false, false);
+ AddPath("/boot/develop/headers/bsd", System, true, false, false);
+ AddPath("/boot/develop/headers/glibc", System, true, false, false);
+ AddPath("/boot/develop/headers/posix", System, true, false, false);
+ AddPath("/boot/develop/headers", System, true, false, false);
+ break;
+ case llvm::Triple::RTEMS:
+ break;
+ case llvm::Triple::Cygwin:
+ AddPath("/usr/include/w32api", System, true, false, false);
+ break;
+ case llvm::Triple::MinGW32: {
+ // mingw-w64 crt include paths
+ llvm::sys::Path P(HSOpts.ResourceDir);
+ P.appendComponent("../../../i686-w64-mingw32/include"); // <sysroot>/i686-w64-mingw32/include
+ AddPath(P.str(), System, true, false, false);
+ P = llvm::sys::Path(HSOpts.ResourceDir);
+ P.appendComponent("../../../x86_64-w64-mingw32/include"); // <sysroot>/x86_64-w64-mingw32/include
+ AddPath(P.str(), System, true, false, false);
+ // mingw.org crt include paths
+ P = llvm::sys::Path(HSOpts.ResourceDir);
+ P.appendComponent("../../../include"); // <sysroot>/include
+ AddPath(P.str(), System, true, false, false);
+ AddPath("/mingw/include", System, true, false, false);
+ AddPath("c:/mingw/include", System, true, false, false);
+ }
+ break;
+ case llvm::Triple::FreeBSD:
+ AddPath(CLANG_PREFIX "/usr/include/clang/" CLANG_VERSION_STRING,
+ System, false, false, false);
+ break;
+
+ default:
+ break;
+ }
+
+ if ( os != llvm::Triple::RTEMS )
+ AddPath(CLANG_PREFIX "/usr/include", System, false, false, false);
+}
+
+void InitHeaderSearch::
+AddDefaultCPlusPlusIncludePaths(const llvm::Triple &triple, const HeaderSearchOptions &HSOpts) {
+ llvm::Triple::OSType os = triple.getOS();
+ // FIXME: temporary hack: hard-coded paths.
+
+ if (triple.isOSDarwin()) {
+ switch (triple.getArch()) {
+ default: break;
+
+ case llvm::Triple::ppc:
+ case llvm::Triple::ppc64:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "powerpc-apple-darwin10", "", "ppc64",
+ triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
+ "powerpc-apple-darwin10", "", "ppc64",
+ triple);
+ break;
+
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "i686-apple-darwin10", "", "x86_64", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.0.0",
+ "i686-apple-darwin8", "", "", triple);
+ break;
+
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "arm-apple-darwin10", "v7", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.2.1",
+ "arm-apple-darwin10", "v6", "", triple);
+ break;
+ }
+ return;
+ }
+
+ switch (os) {
+ case llvm::Triple::Linux:
+ case llvm::Triple::Win32:
+ llvm_unreachable("Include management is handled in the driver.");
+
+ case llvm::Triple::Cygwin:
+ // Cygwin-1.7
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.5.3");
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.4");
+ // g++-4 / Cygwin-1.5
+ AddMinGWCPlusPlusIncludePaths("/usr/lib/gcc", "i686-pc-cygwin", "4.3.2");
+ break;
+ case llvm::Triple::MinGW32:
+ // mingw-w64 C++ include paths (i686-w64-mingw32 and x86_64-w64-mingw32)
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.0");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.1");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.2");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.3");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.5.4");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.0");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.1");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.2");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.6.3");
+ AddMinGW64CXXPaths(HSOpts.ResourceDir, "4.7.0");
+ // mingw.org C++ include paths
+ AddMinGWCPlusPlusIncludePaths("/mingw/lib/gcc", "mingw32", "4.5.2"); //MSYS
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.6.2");
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.6.1");
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.2");
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.5.0");
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.4.0");
+ AddMinGWCPlusPlusIncludePaths("c:/MinGW/lib/gcc", "mingw32", "4.3.0");
+ break;
+ case llvm::Triple::DragonFly:
+ AddPath("/usr/include/c++/4.1", CXXSystem, true, false, false);
+ break;
+ case llvm::Triple::FreeBSD:
+ // FreeBSD 8.0
+ // FreeBSD 7.3
+ AddGnuCPlusPlusIncludePaths(CLANG_PREFIX "/usr/include/c++/4.2",
+ "", "", "", triple);
+ AddGnuCPlusPlusIncludePaths(CLANG_PREFIX "/usr/include/c++/4.2/backward",
+ "", "", "", triple);
+ break;
+ case llvm::Triple::NetBSD:
+ AddGnuCPlusPlusIncludePaths("/usr/include/g++", "", "", "", triple);
+ break;
+ case llvm::Triple::OpenBSD: {
+ std::string t = triple.getTriple();
+ if (t.substr(0, 6) == "x86_64")
+ t.replace(0, 6, "amd64");
+ AddGnuCPlusPlusIncludePaths("/usr/include/g++",
+ t, "", "", triple);
+ break;
+ }
+ case llvm::Triple::Minix:
+ AddGnuCPlusPlusIncludePaths("/usr/gnu/include/c++/4.4.3",
+ "", "", "", triple);
+ break;
+ case llvm::Triple::Solaris:
+ AddGnuCPlusPlusIncludePaths("/usr/gcc/4.5/include/c++/4.5.2/",
+ "i386-pc-solaris2.11", "", "", triple);
+ // Solaris - Fall though..
+ case llvm::Triple::AuroraUX:
+ // AuroraUX
+ AddGnuCPlusPlusIncludePaths("/opt/gcc4/include/c++/4.2.4",
+ "i386-pc-solaris2.11", "", "", triple);
+ break;
+ default:
+ break;
+ }
+}
+
+void InitHeaderSearch::AddDefaultIncludePaths(const LangOptions &Lang,
+ const llvm::Triple &triple,
+ const HeaderSearchOptions &HSOpts) {
+ // NB: This code path is going away. All of the logic is moving into the
+ // driver which has the information necessary to do target-specific
+ // selections of default include paths. Each target which moves there will be
+ // exempted from this logic here until we can delete the entire pile of code.
+ switch (triple.getOS()) {
+ default:
+ break; // Everything else continues to use this routine's logic.
+
+ case llvm::Triple::Linux:
+ case llvm::Triple::Win32:
+ return;
+ }
+
+ if (Lang.CPlusPlus && HSOpts.UseStandardCXXIncludes &&
+ HSOpts.UseStandardSystemIncludes) {
+ if (HSOpts.UseLibcxx) {
+ if (triple.isOSDarwin()) {
+ // On Darwin, libc++ may be installed alongside the compiler in
+ // lib/c++/v1.
+ llvm::sys::Path P(HSOpts.ResourceDir);
+ if (!P.isEmpty()) {
+ P.eraseComponent(); // Remove version from foo/lib/clang/version
+ P.eraseComponent(); // Remove clang from foo/lib/clang
+
+ // Get foo/lib/c++/v1
+ P.appendComponent("c++");
+ P.appendComponent("v1");
+ AddPath(P.str(), CXXSystem, true, false, false, true);
+ }
+ }
+ // On Solaris, include the support directory for things like xlocale and
+ // fudged system headers.
+ if (triple.getOS() == llvm::Triple::Solaris)
+ AddPath("/usr/include/c++/v1/support/solaris", CXXSystem, true, false,
+ false);
+
+ AddPath("/usr/include/c++/v1", CXXSystem, true, false, false);
+ } else {
+ AddDefaultCPlusPlusIncludePaths(triple, HSOpts);
+ }
+ }
+
+ AddDefaultCIncludePaths(triple, HSOpts);
+
+ // Add the default framework include paths on Darwin.
+ if (HSOpts.UseStandardSystemIncludes) {
+ if (triple.isOSDarwin()) {
+ AddPath("/System/Library/Frameworks", System, true, false, true);
+ AddPath("/Library/Frameworks", System, true, false, true);
+ }
+ }
+}
+
+/// RemoveDuplicates - If there are duplicate directory entries in the specified
+/// search list, remove the later (dead) ones. Returns the number of non-system
+/// headers removed, which is used to update NumAngled.
+static unsigned RemoveDuplicates(std::vector<DirectoryLookup> &SearchList,
+ unsigned First, bool Verbose) {
+ llvm::SmallPtrSet<const DirectoryEntry *, 8> SeenDirs;
+ llvm::SmallPtrSet<const DirectoryEntry *, 8> SeenFrameworkDirs;
+ llvm::SmallPtrSet<const HeaderMap *, 8> SeenHeaderMaps;
+ unsigned NonSystemRemoved = 0;
+ for (unsigned i = First; i != SearchList.size(); ++i) {
+ unsigned DirToRemove = i;
+
+ const DirectoryLookup &CurEntry = SearchList[i];
+
+ if (CurEntry.isNormalDir()) {
+ // If this isn't the first time we've seen this dir, remove it.
+ if (SeenDirs.insert(CurEntry.getDir()))
+ continue;
+ } else if (CurEntry.isFramework()) {
+ // If this isn't the first time we've seen this framework dir, remove it.
+ if (SeenFrameworkDirs.insert(CurEntry.getFrameworkDir()))
+ continue;
+ } else {
+ assert(CurEntry.isHeaderMap() && "Not a headermap or normal dir?");
+ // If this isn't the first time we've seen this headermap, remove it.
+ if (SeenHeaderMaps.insert(CurEntry.getHeaderMap()))
+ continue;
+ }
+
+ // If we have a normal #include dir/framework/headermap that is shadowed
+ // later in the chain by a system include location, we actually want to
+ // ignore the user's request and drop the user dir... keeping the system
+ // dir. This is weird, but required to emulate GCC's search path correctly.
+ //
+ // Since dupes of system dirs are rare, just rescan to find the original
+ // that we're nuking instead of using a DenseMap.
+ if (CurEntry.getDirCharacteristic() != SrcMgr::C_User) {
+ // Find the dir that this is the same of.
+ unsigned FirstDir;
+ for (FirstDir = 0; ; ++FirstDir) {
+ assert(FirstDir != i && "Didn't find dupe?");
+
+ const DirectoryLookup &SearchEntry = SearchList[FirstDir];
+
+ // If these are different lookup types, then they can't be the dupe.
+ if (SearchEntry.getLookupType() != CurEntry.getLookupType())
+ continue;
+
+ bool isSame;
+ if (CurEntry.isNormalDir())
+ isSame = SearchEntry.getDir() == CurEntry.getDir();
+ else if (CurEntry.isFramework())
+ isSame = SearchEntry.getFrameworkDir() == CurEntry.getFrameworkDir();
+ else {
+ assert(CurEntry.isHeaderMap() && "Not a headermap or normal dir?");
+ isSame = SearchEntry.getHeaderMap() == CurEntry.getHeaderMap();
+ }
+
+ if (isSame)
+ break;
+ }
+
+ // If the first dir in the search path is a non-system dir, zap it
+ // instead of the system one.
+ if (SearchList[FirstDir].getDirCharacteristic() == SrcMgr::C_User)
+ DirToRemove = FirstDir;
+ }
+
+ if (Verbose) {
+ llvm::errs() << "ignoring duplicate directory \""
+ << CurEntry.getName() << "\"\n";
+ if (DirToRemove != i)
+ llvm::errs() << " as it is a non-system directory that duplicates "
+ << "a system directory\n";
+ }
+ if (DirToRemove != i)
+ ++NonSystemRemoved;
+
+ // This is reached if the current entry is a duplicate. Remove the
+ // DirToRemove (usually the current dir).
+ SearchList.erase(SearchList.begin()+DirToRemove);
+ --i;
+ }
+ return NonSystemRemoved;
+}
+
+
+void InitHeaderSearch::Realize(const LangOptions &Lang) {
+ // Concatenate ANGLE+SYSTEM+AFTER chains together into SearchList.
+ std::vector<DirectoryLookup> SearchList;
+ SearchList.reserve(IncludePath.size());
+
+ // Quoted arguments go first.
+ for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
+ it != ie; ++it) {
+ if (it->first == Quoted)
+ SearchList.push_back(it->second);
+ }
+ // Deduplicate and remember index.
+ RemoveDuplicates(SearchList, 0, Verbose);
+ unsigned NumQuoted = SearchList.size();
+
+ for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
+ it != ie; ++it) {
+ if (it->first == Angled || it->first == IndexHeaderMap)
+ SearchList.push_back(it->second);
+ }
+
+ RemoveDuplicates(SearchList, NumQuoted, Verbose);
+ unsigned NumAngled = SearchList.size();
+
+ for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
+ it != ie; ++it) {
+ if (it->first == System ||
+ (!Lang.ObjC1 && !Lang.CPlusPlus && it->first == CSystem) ||
+ (/*FIXME !Lang.ObjC1 && */Lang.CPlusPlus && it->first == CXXSystem) ||
+ (Lang.ObjC1 && !Lang.CPlusPlus && it->first == ObjCSystem) ||
+ (Lang.ObjC1 && Lang.CPlusPlus && it->first == ObjCXXSystem))
+ SearchList.push_back(it->second);
+ }
+
+ for (path_iterator it = IncludePath.begin(), ie = IncludePath.end();
+ it != ie; ++it) {
+ if (it->first == After)
+ SearchList.push_back(it->second);
+ }
+
+ // Remove duplicates across both the Angled and System directories. GCC does
+ // this and failing to remove duplicates across these two groups breaks
+ // #include_next.
+ unsigned NonSystemRemoved = RemoveDuplicates(SearchList, NumQuoted, Verbose);
+ NumAngled -= NonSystemRemoved;
+
+ bool DontSearchCurDir = false; // TODO: set to true if -I- is set?
+ Headers.SetSearchPaths(SearchList, NumQuoted, NumAngled, DontSearchCurDir);
+
+ // If verbose, print the list of directories that will be searched.
+ if (Verbose) {
+ llvm::errs() << "#include \"...\" search starts here:\n";
+ for (unsigned i = 0, e = SearchList.size(); i != e; ++i) {
+ if (i == NumQuoted)
+ llvm::errs() << "#include <...> search starts here:\n";
+ const char *Name = SearchList[i].getName();
+ const char *Suffix;
+ if (SearchList[i].isNormalDir())
+ Suffix = "";
+ else if (SearchList[i].isFramework())
+ Suffix = " (framework directory)";
+ else {
+ assert(SearchList[i].isHeaderMap() && "Unknown DirectoryLookup");
+ Suffix = " (headermap)";
+ }
+ llvm::errs() << " " << Name << Suffix << "\n";
+ }
+ llvm::errs() << "End of search list.\n";
+ }
+}
+
+void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
+ const HeaderSearchOptions &HSOpts,
+ const LangOptions &Lang,
+ const llvm::Triple &Triple) {
+ InitHeaderSearch Init(HS, HSOpts.Verbose, HSOpts.Sysroot);
+
+ // Add the user defined entries.
+ for (unsigned i = 0, e = HSOpts.UserEntries.size(); i != e; ++i) {
+ const HeaderSearchOptions::Entry &E = HSOpts.UserEntries[i];
+ Init.AddPath(E.Path, E.Group, !E.ImplicitExternC, E.IsUserSupplied,
+ E.IsFramework, E.IgnoreSysRoot);
+ }
+
+ Init.AddDefaultIncludePaths(Lang, Triple, HSOpts);
+
+ if (HSOpts.UseBuiltinIncludes) {
+ // Set up the builtin include directory in the module map.
+ llvm::sys::Path P(HSOpts.ResourceDir);
+ P.appendComponent("include");
+ if (const DirectoryEntry *Dir = HS.getFileMgr().getDirectory(P.str()))
+ HS.getModuleMap().setBuiltinIncludeDir(Dir);
+ }
+
+ Init.Realize(Lang);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
new file mode 100644
index 0000000..93d49b0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/InitPreprocessor.cpp
@@ -0,0 +1,763 @@
+//===--- InitPreprocessor.cpp - PP initialization code. ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the clang::InitializePreprocessor function.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Version.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Frontend/PreprocessorOptions.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+using namespace clang;
+
+// Append a #define line to Buf for Macro. Macro should be of the form XXX,
+// in which case we emit "#define XXX 1" or "XXX=Y z W" in which case we emit
+// "#define XXX Y z W". To get a #define with no value, use "XXX=".
+static void DefineBuiltinMacro(MacroBuilder &Builder, StringRef Macro,
+ DiagnosticsEngine &Diags) {
+ std::pair<StringRef, StringRef> MacroPair = Macro.split('=');
+ StringRef MacroName = MacroPair.first;
+ StringRef MacroBody = MacroPair.second;
+ if (MacroName.size() != Macro.size()) {
+ // Per GCC -D semantics, the macro ends at \n if it exists.
+ StringRef::size_type End = MacroBody.find_first_of("\n\r");
+ if (End != StringRef::npos)
+ Diags.Report(diag::warn_fe_macro_contains_embedded_newline)
+ << MacroName;
+ Builder.defineMacro(MacroName, MacroBody.substr(0, End));
+ } else {
+ // Push "macroname 1".
+ Builder.defineMacro(Macro);
+ }
+}
+
+/// AddImplicitInclude - Add an implicit #include of the specified file to the
+/// predefines buffer.
+static void AddImplicitInclude(MacroBuilder &Builder, StringRef File,
+ FileManager &FileMgr) {
+ Builder.append(Twine("#include \"") +
+ HeaderSearch::NormalizeDashIncludePath(File, FileMgr) + "\"");
+}
+
+static void AddImplicitIncludeMacros(MacroBuilder &Builder,
+ StringRef File,
+ FileManager &FileMgr) {
+ Builder.append(Twine("#__include_macros \"") +
+ HeaderSearch::NormalizeDashIncludePath(File, FileMgr) + "\"");
+ // Marker token to stop the __include_macros fetch loop.
+ Builder.append("##"); // ##?
+}
+
+/// AddImplicitIncludePTH - Add an implicit #include using the original file
+/// used to generate a PTH cache.
+static void AddImplicitIncludePTH(MacroBuilder &Builder, Preprocessor &PP,
+ StringRef ImplicitIncludePTH) {
+ PTHManager *P = PP.getPTHManager();
+ // Null check 'P' in the corner case where it couldn't be created.
+ const char *OriginalFile = P ? P->getOriginalSourceFile() : 0;
+
+ if (!OriginalFile) {
+ PP.getDiagnostics().Report(diag::err_fe_pth_file_has_no_source_header)
+ << ImplicitIncludePTH;
+ return;
+ }
+
+ AddImplicitInclude(Builder, OriginalFile, PP.getFileManager());
+}
+
+/// PickFP - This is used to pick a value based on the FP semantics of the
+/// specified FP model.
+template <typename T>
+static T PickFP(const llvm::fltSemantics *Sem, T IEEESingleVal,
+ T IEEEDoubleVal, T X87DoubleExtendedVal, T PPCDoubleDoubleVal,
+ T IEEEQuadVal) {
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEsingle)
+ return IEEESingleVal;
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEdouble)
+ return IEEEDoubleVal;
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::x87DoubleExtended)
+ return X87DoubleExtendedVal;
+ if (Sem == (const llvm::fltSemantics*)&llvm::APFloat::PPCDoubleDouble)
+ return PPCDoubleDoubleVal;
+ assert(Sem == (const llvm::fltSemantics*)&llvm::APFloat::IEEEquad);
+ return IEEEQuadVal;
+}
+
+static void DefineFloatMacros(MacroBuilder &Builder, StringRef Prefix,
+ const llvm::fltSemantics *Sem) {
+ const char *DenormMin, *Epsilon, *Max, *Min;
+ DenormMin = PickFP(Sem, "1.40129846e-45F", "4.9406564584124654e-324",
+ "3.64519953188247460253e-4951L",
+ "4.94065645841246544176568792868221e-324L",
+ "6.47517511943802511092443895822764655e-4966L");
+ int Digits = PickFP(Sem, 6, 15, 18, 31, 33);
+ Epsilon = PickFP(Sem, "1.19209290e-7F", "2.2204460492503131e-16",
+ "1.08420217248550443401e-19L",
+ "4.94065645841246544176568792868221e-324L",
+ "1.92592994438723585305597794258492732e-34L");
+ int MantissaDigits = PickFP(Sem, 24, 53, 64, 106, 113);
+ int Min10Exp = PickFP(Sem, -37, -307, -4931, -291, -4931);
+ int Max10Exp = PickFP(Sem, 38, 308, 4932, 308, 4932);
+ int MinExp = PickFP(Sem, -125, -1021, -16381, -968, -16381);
+ int MaxExp = PickFP(Sem, 128, 1024, 16384, 1024, 16384);
+ Min = PickFP(Sem, "1.17549435e-38F", "2.2250738585072014e-308",
+ "3.36210314311209350626e-4932L",
+ "2.00416836000897277799610805135016e-292L",
+ "3.36210314311209350626267781732175260e-4932L");
+ Max = PickFP(Sem, "3.40282347e+38F", "1.7976931348623157e+308",
+ "1.18973149535723176502e+4932L",
+ "1.79769313486231580793728971405301e+308L",
+ "1.18973149535723176508575932662800702e+4932L");
+
+ SmallString<32> DefPrefix;
+ DefPrefix = "__";
+ DefPrefix += Prefix;
+ DefPrefix += "_";
+
+ Builder.defineMacro(DefPrefix + "DENORM_MIN__", DenormMin);
+ Builder.defineMacro(DefPrefix + "HAS_DENORM__");
+ Builder.defineMacro(DefPrefix + "DIG__", Twine(Digits));
+ Builder.defineMacro(DefPrefix + "EPSILON__", Twine(Epsilon));
+ Builder.defineMacro(DefPrefix + "HAS_INFINITY__");
+ Builder.defineMacro(DefPrefix + "HAS_QUIET_NAN__");
+ Builder.defineMacro(DefPrefix + "MANT_DIG__", Twine(MantissaDigits));
+
+ Builder.defineMacro(DefPrefix + "MAX_10_EXP__", Twine(Max10Exp));
+ Builder.defineMacro(DefPrefix + "MAX_EXP__", Twine(MaxExp));
+ Builder.defineMacro(DefPrefix + "MAX__", Twine(Max));
+
+ Builder.defineMacro(DefPrefix + "MIN_10_EXP__","("+Twine(Min10Exp)+")");
+ Builder.defineMacro(DefPrefix + "MIN_EXP__", "("+Twine(MinExp)+")");
+ Builder.defineMacro(DefPrefix + "MIN__", Twine(Min));
+}
+
+
+/// DefineTypeSize - Emit a macro to the predefines buffer that declares a macro
+/// named MacroName with the max value for a type with width 'TypeWidth' a
+/// signedness of 'isSigned' and with a value suffix of 'ValSuffix' (e.g. LL).
+static void DefineTypeSize(StringRef MacroName, unsigned TypeWidth,
+ StringRef ValSuffix, bool isSigned,
+ MacroBuilder &Builder) {
+ llvm::APInt MaxVal = isSigned ? llvm::APInt::getSignedMaxValue(TypeWidth)
+ : llvm::APInt::getMaxValue(TypeWidth);
+ Builder.defineMacro(MacroName, MaxVal.toString(10, isSigned) + ValSuffix);
+}
+
+/// DefineTypeSize - An overloaded helper that uses TargetInfo to determine
+/// the width, suffix, and signedness of the given type
+static void DefineTypeSize(StringRef MacroName, TargetInfo::IntType Ty,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ DefineTypeSize(MacroName, TI.getTypeWidth(Ty), TI.getTypeConstantSuffix(Ty),
+ TI.isTypeSigned(Ty), Builder);
+}
+
+static void DefineType(const Twine &MacroName, TargetInfo::IntType Ty,
+ MacroBuilder &Builder) {
+ Builder.defineMacro(MacroName, TargetInfo::getTypeName(Ty));
+}
+
+static void DefineTypeWidth(StringRef MacroName, TargetInfo::IntType Ty,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ Builder.defineMacro(MacroName, Twine(TI.getTypeWidth(Ty)));
+}
+
+static void DefineTypeSizeof(StringRef MacroName, unsigned BitWidth,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ Builder.defineMacro(MacroName,
+ Twine(BitWidth / TI.getCharWidth()));
+}
+
+static void DefineExactWidthIntType(TargetInfo::IntType Ty,
+ const TargetInfo &TI, MacroBuilder &Builder) {
+ int TypeWidth = TI.getTypeWidth(Ty);
+
+ // Use the target specified int64 type, when appropriate, so that [u]int64_t
+ // ends up being defined in terms of the correct type.
+ if (TypeWidth == 64)
+ Ty = TI.getInt64Type();
+
+ DefineType("__INT" + Twine(TypeWidth) + "_TYPE__", Ty, Builder);
+
+ StringRef ConstSuffix(TargetInfo::getTypeConstantSuffix(Ty));
+ if (!ConstSuffix.empty())
+ Builder.defineMacro("__INT" + Twine(TypeWidth) + "_C_SUFFIX__",
+ ConstSuffix);
+}
+
+/// Get the value the ATOMIC_*_LOCK_FREE macro should have for a type with
+/// the specified properties.
+static const char *getLockFreeValue(unsigned TypeWidth, unsigned TypeAlign,
+ unsigned InlineWidth) {
+ // Fully-aligned, power-of-2 sizes no larger than the inline
+ // width will be inlined as lock-free operations.
+ if (TypeWidth == TypeAlign && (TypeWidth & (TypeWidth - 1)) == 0 &&
+ TypeWidth <= InlineWidth)
+ return "2"; // "always lock free"
+ // We cannot be certain what operations the lib calls might be
+ // able to implement as lock-free on future processors.
+ return "1"; // "sometimes lock free"
+}
+
+/// \brief Add definitions required for a smooth interaction between
+/// Objective-C++ automated reference counting and libstdc++ (4.2).
+static void AddObjCXXARCLibstdcxxDefines(const LangOptions &LangOpts,
+ MacroBuilder &Builder) {
+ Builder.defineMacro("_GLIBCXX_PREDEFINED_OBJC_ARC_IS_SCALAR");
+
+ std::string Result;
+ {
+ // Provide specializations for the __is_scalar type trait so that
+ // lifetime-qualified objects are not considered "scalar" types, which
+ // libstdc++ uses as an indicator of the presence of trivial copy, assign,
+ // default-construct, and destruct semantics (none of which hold for
+ // lifetime-qualified objects in ARC).
+ llvm::raw_string_ostream Out(Result);
+
+ Out << "namespace std {\n"
+ << "\n"
+ << "struct __true_type;\n"
+ << "struct __false_type;\n"
+ << "\n";
+
+ Out << "template<typename _Tp> struct __is_scalar;\n"
+ << "\n";
+
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(strong))) _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+
+ if (LangOpts.ObjCRuntimeHasWeak) {
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(weak))) _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+ }
+
+ Out << "template<typename _Tp>\n"
+ << "struct __is_scalar<__attribute__((objc_ownership(autoreleasing)))"
+ << " _Tp> {\n"
+ << " enum { __value = 0 };\n"
+ << " typedef __false_type __type;\n"
+ << "};\n"
+ << "\n";
+
+ Out << "}\n";
+ }
+ Builder.append(Result);
+}
+
+static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
+ const LangOptions &LangOpts,
+ const FrontendOptions &FEOpts,
+ MacroBuilder &Builder) {
+ if (!LangOpts.MicrosoftMode && !LangOpts.TraditionalCPP)
+ Builder.defineMacro("__STDC__");
+ if (LangOpts.Freestanding)
+ Builder.defineMacro("__STDC_HOSTED__", "0");
+ else
+ Builder.defineMacro("__STDC_HOSTED__");
+
+ if (!LangOpts.CPlusPlus) {
+ if (LangOpts.C11)
+ Builder.defineMacro("__STDC_VERSION__", "201112L");
+ else if (LangOpts.C99)
+ Builder.defineMacro("__STDC_VERSION__", "199901L");
+ else if (!LangOpts.GNUMode && LangOpts.Digraphs)
+ Builder.defineMacro("__STDC_VERSION__", "199409L");
+ } else {
+ if (LangOpts.GNUMode)
+ Builder.defineMacro("__cplusplus");
+ else {
+ // C++0x [cpp.predefined]p1:
+ // The name_ _cplusplus is defined to the value 201103L when compiling a
+ // C++ translation unit.
+ if (LangOpts.CPlusPlus0x)
+ Builder.defineMacro("__cplusplus", "201103L");
+ // C++03 [cpp.predefined]p1:
+ // The name_ _cplusplus is defined to the value 199711L when compiling a
+ // C++ translation unit.
+ else
+ Builder.defineMacro("__cplusplus", "199711L");
+ }
+ }
+
+ if (LangOpts.ObjC1)
+ Builder.defineMacro("__OBJC__");
+
+ // Not "standard" per se, but available even with the -undef flag.
+ if (LangOpts.AsmPreprocessor)
+ Builder.defineMacro("__ASSEMBLER__");
+}
+
+static void InitializePredefinedMacros(const TargetInfo &TI,
+ const LangOptions &LangOpts,
+ const FrontendOptions &FEOpts,
+ MacroBuilder &Builder) {
+ // Compiler version introspection macros.
+ Builder.defineMacro("__llvm__"); // LLVM Backend
+ Builder.defineMacro("__clang__"); // Clang Frontend
+#define TOSTR2(X) #X
+#define TOSTR(X) TOSTR2(X)
+ Builder.defineMacro("__clang_major__", TOSTR(CLANG_VERSION_MAJOR));
+ Builder.defineMacro("__clang_minor__", TOSTR(CLANG_VERSION_MINOR));
+#ifdef CLANG_VERSION_PATCHLEVEL
+ Builder.defineMacro("__clang_patchlevel__", TOSTR(CLANG_VERSION_PATCHLEVEL));
+#else
+ Builder.defineMacro("__clang_patchlevel__", "0");
+#endif
+ Builder.defineMacro("__clang_version__",
+ "\"" CLANG_VERSION_STRING " ("
+ + getClangFullRepositoryVersion() + ")\"");
+#undef TOSTR
+#undef TOSTR2
+ if (!LangOpts.MicrosoftMode) {
+ // Currently claim to be compatible with GCC 4.2.1-5621, but only if we're
+ // not compiling for MSVC compatibility
+ Builder.defineMacro("__GNUC_MINOR__", "2");
+ Builder.defineMacro("__GNUC_PATCHLEVEL__", "1");
+ Builder.defineMacro("__GNUC__", "4");
+ Builder.defineMacro("__GXX_ABI_VERSION", "1002");
+ }
+
+ // Define macros for the C11 / C++11 memory orderings
+ Builder.defineMacro("__ATOMIC_RELAXED", "0");
+ Builder.defineMacro("__ATOMIC_CONSUME", "1");
+ Builder.defineMacro("__ATOMIC_ACQUIRE", "2");
+ Builder.defineMacro("__ATOMIC_RELEASE", "3");
+ Builder.defineMacro("__ATOMIC_ACQ_REL", "4");
+ Builder.defineMacro("__ATOMIC_SEQ_CST", "5");
+
+ // Support for #pragma redefine_extname (Sun compatibility)
+ Builder.defineMacro("__PRAGMA_REDEFINE_EXTNAME", "1");
+
+ // As sad as it is, enough software depends on the __VERSION__ for version
+ // checks that it is necessary to report 4.2.1 (the base GCC version we claim
+ // compatibility with) first.
+ Builder.defineMacro("__VERSION__", "\"4.2.1 Compatible " +
+ Twine(getClangFullCPPVersion()) + "\"");
+
+ // Initialize language-specific preprocessor defines.
+
+ // Standard conforming mode?
+ if (!LangOpts.GNUMode)
+ Builder.defineMacro("__STRICT_ANSI__");
+
+ if (LangOpts.CPlusPlus0x)
+ Builder.defineMacro("__GXX_EXPERIMENTAL_CXX0X__");
+
+ if (LangOpts.ObjC1) {
+ if (LangOpts.ObjCNonFragileABI) {
+ Builder.defineMacro("__OBJC2__");
+
+ if (LangOpts.ObjCExceptions)
+ Builder.defineMacro("OBJC_ZEROCOST_EXCEPTIONS");
+ }
+
+ if (LangOpts.getGC() != LangOptions::NonGC)
+ Builder.defineMacro("__OBJC_GC__");
+
+ if (LangOpts.NeXTRuntime)
+ Builder.defineMacro("__NEXT_RUNTIME__");
+ }
+
+ // darwin_constant_cfstrings controls this. This is also dependent
+ // on other things like the runtime I believe. This is set even for C code.
+ if (!LangOpts.NoConstantCFStrings)
+ Builder.defineMacro("__CONSTANT_CFSTRINGS__");
+
+ if (LangOpts.ObjC2)
+ Builder.defineMacro("OBJC_NEW_PROPERTIES");
+
+ if (LangOpts.PascalStrings)
+ Builder.defineMacro("__PASCAL_STRINGS__");
+
+ if (LangOpts.Blocks) {
+ Builder.defineMacro("__block", "__attribute__((__blocks__(byref)))");
+ Builder.defineMacro("__BLOCKS__");
+ }
+
+ if (LangOpts.CXXExceptions)
+ Builder.defineMacro("__EXCEPTIONS");
+ if (LangOpts.RTTI)
+ Builder.defineMacro("__GXX_RTTI");
+ if (LangOpts.SjLjExceptions)
+ Builder.defineMacro("__USING_SJLJ_EXCEPTIONS__");
+
+ if (LangOpts.Deprecated)
+ Builder.defineMacro("__DEPRECATED");
+
+ if (LangOpts.CPlusPlus) {
+ Builder.defineMacro("__GNUG__", "4");
+ Builder.defineMacro("__GXX_WEAK__");
+ Builder.defineMacro("__private_extern__", "extern");
+ }
+
+ if (LangOpts.MicrosoftExt) {
+ // Both __PRETTY_FUNCTION__ and __FUNCTION__ are GCC extensions, however
+ // VC++ appears to only like __FUNCTION__.
+ Builder.defineMacro("__PRETTY_FUNCTION__", "__FUNCTION__");
+ // Work around some issues with Visual C++ headerws.
+ if (LangOpts.CPlusPlus) {
+ // Since we define wchar_t in C++ mode.
+ Builder.defineMacro("_WCHAR_T_DEFINED");
+ Builder.defineMacro("_NATIVE_WCHAR_T_DEFINED");
+ // FIXME: Support Microsoft's __identifier extension in the lexer.
+ Builder.append("#define __identifier(x) x");
+ Builder.append("class type_info;");
+ }
+
+ if (LangOpts.CPlusPlus0x) {
+ Builder.defineMacro("_HAS_CHAR16_T_LANGUAGE_SUPPORT", "1");
+ }
+ }
+
+ if (LangOpts.Optimize)
+ Builder.defineMacro("__OPTIMIZE__");
+ if (LangOpts.OptimizeSize)
+ Builder.defineMacro("__OPTIMIZE_SIZE__");
+
+ if (LangOpts.FastMath)
+ Builder.defineMacro("__FAST_MATH__");
+
+ // Initialize target-specific preprocessor defines.
+
+ // Define type sizing macros based on the target properties.
+ assert(TI.getCharWidth() == 8 && "Only support 8-bit char so far");
+ Builder.defineMacro("__CHAR_BIT__", "8");
+
+ DefineTypeSize("__SCHAR_MAX__", TI.getCharWidth(), "", true, Builder);
+ DefineTypeSize("__SHRT_MAX__", TargetInfo::SignedShort, TI, Builder);
+ DefineTypeSize("__INT_MAX__", TargetInfo::SignedInt, TI, Builder);
+ DefineTypeSize("__LONG_MAX__", TargetInfo::SignedLong, TI, Builder);
+ DefineTypeSize("__LONG_LONG_MAX__", TargetInfo::SignedLongLong, TI, Builder);
+ DefineTypeSize("__WCHAR_MAX__", TI.getWCharType(), TI, Builder);
+ DefineTypeSize("__INTMAX_MAX__", TI.getIntMaxType(), TI, Builder);
+
+ DefineTypeSizeof("__SIZEOF_DOUBLE__", TI.getDoubleWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_FLOAT__", TI.getFloatWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_INT__", TI.getIntWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_LONG__", TI.getLongWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_LONG_DOUBLE__",TI.getLongDoubleWidth(),TI,Builder);
+ DefineTypeSizeof("__SIZEOF_LONG_LONG__", TI.getLongLongWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_POINTER__", TI.getPointerWidth(0), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_SHORT__", TI.getShortWidth(), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_PTRDIFF_T__",
+ TI.getTypeWidth(TI.getPtrDiffType(0)), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_SIZE_T__",
+ TI.getTypeWidth(TI.getSizeType()), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_WCHAR_T__",
+ TI.getTypeWidth(TI.getWCharType()), TI, Builder);
+ DefineTypeSizeof("__SIZEOF_WINT_T__",
+ TI.getTypeWidth(TI.getWIntType()), TI, Builder);
+
+ DefineType("__INTMAX_TYPE__", TI.getIntMaxType(), Builder);
+ DefineType("__UINTMAX_TYPE__", TI.getUIntMaxType(), Builder);
+ DefineTypeWidth("__INTMAX_WIDTH__", TI.getIntMaxType(), TI, Builder);
+ DefineType("__PTRDIFF_TYPE__", TI.getPtrDiffType(0), Builder);
+ DefineTypeWidth("__PTRDIFF_WIDTH__", TI.getPtrDiffType(0), TI, Builder);
+ DefineType("__INTPTR_TYPE__", TI.getIntPtrType(), Builder);
+ DefineTypeWidth("__INTPTR_WIDTH__", TI.getIntPtrType(), TI, Builder);
+ DefineType("__SIZE_TYPE__", TI.getSizeType(), Builder);
+ DefineTypeWidth("__SIZE_WIDTH__", TI.getSizeType(), TI, Builder);
+ DefineType("__WCHAR_TYPE__", TI.getWCharType(), Builder);
+ DefineTypeWidth("__WCHAR_WIDTH__", TI.getWCharType(), TI, Builder);
+ DefineType("__WINT_TYPE__", TI.getWIntType(), Builder);
+ DefineTypeWidth("__WINT_WIDTH__", TI.getWIntType(), TI, Builder);
+ DefineTypeWidth("__SIG_ATOMIC_WIDTH__", TI.getSigAtomicType(), TI, Builder);
+ DefineType("__CHAR16_TYPE__", TI.getChar16Type(), Builder);
+ DefineType("__CHAR32_TYPE__", TI.getChar32Type(), Builder);
+
+ DefineFloatMacros(Builder, "FLT", &TI.getFloatFormat());
+ DefineFloatMacros(Builder, "DBL", &TI.getDoubleFormat());
+ DefineFloatMacros(Builder, "LDBL", &TI.getLongDoubleFormat());
+
+ // Define a __POINTER_WIDTH__ macro for stdint.h.
+ Builder.defineMacro("__POINTER_WIDTH__",
+ Twine((int)TI.getPointerWidth(0)));
+
+ if (!LangOpts.CharIsSigned)
+ Builder.defineMacro("__CHAR_UNSIGNED__");
+
+ if (!TargetInfo::isTypeSigned(TI.getWIntType()))
+ Builder.defineMacro("__WINT_UNSIGNED__");
+
+ // Define exact-width integer types for stdint.h
+ Builder.defineMacro("__INT" + Twine(TI.getCharWidth()) + "_TYPE__",
+ "char");
+
+ if (TI.getShortWidth() > TI.getCharWidth())
+ DefineExactWidthIntType(TargetInfo::SignedShort, TI, Builder);
+
+ if (TI.getIntWidth() > TI.getShortWidth())
+ DefineExactWidthIntType(TargetInfo::SignedInt, TI, Builder);
+
+ if (TI.getLongWidth() > TI.getIntWidth())
+ DefineExactWidthIntType(TargetInfo::SignedLong, TI, Builder);
+
+ if (TI.getLongLongWidth() > TI.getLongWidth())
+ DefineExactWidthIntType(TargetInfo::SignedLongLong, TI, Builder);
+
+ // Add __builtin_va_list typedef.
+ Builder.append(TI.getVAListDeclaration());
+
+ if (const char *Prefix = TI.getUserLabelPrefix())
+ Builder.defineMacro("__USER_LABEL_PREFIX__", Prefix);
+
+ // Build configuration options. FIXME: these should be controlled by
+ // command line options or something.
+ Builder.defineMacro("__FINITE_MATH_ONLY__", "0");
+
+ if (LangOpts.GNUInline)
+ Builder.defineMacro("__GNUC_GNU_INLINE__");
+ else
+ Builder.defineMacro("__GNUC_STDC_INLINE__");
+
+ // The value written by __atomic_test_and_set.
+ // FIXME: This is target-dependent.
+ Builder.defineMacro("__GCC_ATOMIC_TEST_AND_SET_TRUEVAL", "1");
+
+ // Used by libstdc++ to implement ATOMIC_<foo>_LOCK_FREE.
+ unsigned InlineWidthBits = TI.getMaxAtomicInlineWidth();
+#define DEFINE_LOCK_FREE_MACRO(TYPE, Type) \
+ Builder.defineMacro("__GCC_ATOMIC_" #TYPE "_LOCK_FREE", \
+ getLockFreeValue(TI.get##Type##Width(), \
+ TI.get##Type##Align(), \
+ InlineWidthBits));
+ DEFINE_LOCK_FREE_MACRO(BOOL, Bool);
+ DEFINE_LOCK_FREE_MACRO(CHAR, Char);
+ DEFINE_LOCK_FREE_MACRO(CHAR16_T, Char16);
+ DEFINE_LOCK_FREE_MACRO(CHAR32_T, Char32);
+ DEFINE_LOCK_FREE_MACRO(WCHAR_T, WChar);
+ DEFINE_LOCK_FREE_MACRO(SHORT, Short);
+ DEFINE_LOCK_FREE_MACRO(INT, Int);
+ DEFINE_LOCK_FREE_MACRO(LONG, Long);
+ DEFINE_LOCK_FREE_MACRO(LLONG, LongLong);
+ Builder.defineMacro("__GCC_ATOMIC_POINTER_LOCK_FREE",
+ getLockFreeValue(TI.getPointerWidth(0),
+ TI.getPointerAlign(0),
+ InlineWidthBits));
+#undef DEFINE_LOCK_FREE_MACRO
+
+ if (LangOpts.NoInlineDefine)
+ Builder.defineMacro("__NO_INLINE__");
+
+ if (unsigned PICLevel = LangOpts.PICLevel) {
+ Builder.defineMacro("__PIC__", Twine(PICLevel));
+ Builder.defineMacro("__pic__", Twine(PICLevel));
+ }
+ if (unsigned PIELevel = LangOpts.PIELevel) {
+ Builder.defineMacro("__PIE__", Twine(PIELevel));
+ Builder.defineMacro("__pie__", Twine(PIELevel));
+ }
+
+ // Macros to control C99 numerics and <float.h>
+ Builder.defineMacro("__FLT_EVAL_METHOD__", Twine(TI.getFloatEvalMethod()));
+ Builder.defineMacro("__FLT_RADIX__", "2");
+ int Dig = PickFP(&TI.getLongDoubleFormat(), -1/*FIXME*/, 17, 21, 33, 36);
+ Builder.defineMacro("__DECIMAL_DIG__", Twine(Dig));
+
+ if (LangOpts.getStackProtector() == LangOptions::SSPOn)
+ Builder.defineMacro("__SSP__");
+ else if (LangOpts.getStackProtector() == LangOptions::SSPReq)
+ Builder.defineMacro("__SSP_ALL__", "2");
+
+ if (FEOpts.ProgramAction == frontend::RewriteObjC)
+ Builder.defineMacro("__weak", "__attribute__((objc_gc(weak)))");
+
+ // Define a macro that exists only when using the static analyzer.
+ if (FEOpts.ProgramAction == frontend::RunAnalysis)
+ Builder.defineMacro("__clang_analyzer__");
+
+ if (LangOpts.FastRelaxedMath)
+ Builder.defineMacro("__FAST_RELAXED_MATH__");
+
+ if (LangOpts.ObjCAutoRefCount) {
+ Builder.defineMacro("__weak", "__attribute__((objc_ownership(weak)))");
+ Builder.defineMacro("__strong", "__attribute__((objc_ownership(strong)))");
+ Builder.defineMacro("__autoreleasing",
+ "__attribute__((objc_ownership(autoreleasing)))");
+ Builder.defineMacro("__unsafe_unretained",
+ "__attribute__((objc_ownership(none)))");
+ }
+
+ // Get other target #defines.
+ TI.getTargetDefines(LangOpts, Builder);
+}
+
+// Initialize the remapping of files to alternative contents, e.g.,
+// those specified through other files.
+static void InitializeFileRemapping(DiagnosticsEngine &Diags,
+ SourceManager &SourceMgr,
+ FileManager &FileMgr,
+ const PreprocessorOptions &InitOpts) {
+ // Remap files in the source manager (with buffers).
+ for (PreprocessorOptions::const_remapped_file_buffer_iterator
+ Remap = InitOpts.remapped_file_buffer_begin(),
+ RemapEnd = InitOpts.remapped_file_buffer_end();
+ Remap != RemapEnd;
+ ++Remap) {
+ // Create the file entry for the file that we're mapping from.
+ const FileEntry *FromFile = FileMgr.getVirtualFile(Remap->first,
+ Remap->second->getBufferSize(),
+ 0);
+ if (!FromFile) {
+ Diags.Report(diag::err_fe_remap_missing_from_file)
+ << Remap->first;
+ if (!InitOpts.RetainRemappedFileBuffers)
+ delete Remap->second;
+ continue;
+ }
+
+ // Override the contents of the "from" file with the contents of
+ // the "to" file.
+ SourceMgr.overrideFileContents(FromFile, Remap->second,
+ InitOpts.RetainRemappedFileBuffers);
+ }
+
+ // Remap files in the source manager (with other files).
+ for (PreprocessorOptions::const_remapped_file_iterator
+ Remap = InitOpts.remapped_file_begin(),
+ RemapEnd = InitOpts.remapped_file_end();
+ Remap != RemapEnd;
+ ++Remap) {
+ // Find the file that we're mapping to.
+ const FileEntry *ToFile = FileMgr.getFile(Remap->second);
+ if (!ToFile) {
+ Diags.Report(diag::err_fe_remap_missing_to_file)
+ << Remap->first << Remap->second;
+ continue;
+ }
+
+ // Create the file entry for the file that we're mapping from.
+ const FileEntry *FromFile = FileMgr.getVirtualFile(Remap->first,
+ ToFile->getSize(), 0);
+ if (!FromFile) {
+ Diags.Report(diag::err_fe_remap_missing_from_file)
+ << Remap->first;
+ continue;
+ }
+
+ // Override the contents of the "from" file with the contents of
+ // the "to" file.
+ SourceMgr.overrideFileContents(FromFile, ToFile);
+ }
+
+ SourceMgr.setOverridenFilesKeepOriginalName(
+ InitOpts.RemappedFilesKeepOriginalName);
+}
+
+/// InitializePreprocessor - Initialize the preprocessor getting it and the
+/// environment ready to process a single file. This returns true on error.
+///
+void clang::InitializePreprocessor(Preprocessor &PP,
+ const PreprocessorOptions &InitOpts,
+ const HeaderSearchOptions &HSOpts,
+ const FrontendOptions &FEOpts) {
+ const LangOptions &LangOpts = PP.getLangOpts();
+ std::string PredefineBuffer;
+ PredefineBuffer.reserve(4080);
+ llvm::raw_string_ostream Predefines(PredefineBuffer);
+ MacroBuilder Builder(Predefines);
+
+ InitializeFileRemapping(PP.getDiagnostics(), PP.getSourceManager(),
+ PP.getFileManager(), InitOpts);
+
+ // Emit line markers for various builtin sections of the file. We don't do
+ // this in asm preprocessor mode, because "# 4" is not a line marker directive
+ // in this mode.
+ if (!PP.getLangOpts().AsmPreprocessor)
+ Builder.append("# 1 \"<built-in>\" 3");
+
+ // Install things like __POWERPC__, __GNUC__, etc into the macro table.
+ if (InitOpts.UsePredefines) {
+ InitializePredefinedMacros(PP.getTargetInfo(), LangOpts, FEOpts, Builder);
+
+ // Install definitions to make Objective-C++ ARC work well with various
+ // C++ Standard Library implementations.
+ if (LangOpts.ObjC1 && LangOpts.CPlusPlus && LangOpts.ObjCAutoRefCount) {
+ switch (InitOpts.ObjCXXARCStandardLibrary) {
+ case ARCXX_nolib:
+ case ARCXX_libcxx:
+ break;
+
+ case ARCXX_libstdcxx:
+ AddObjCXXARCLibstdcxxDefines(LangOpts, Builder);
+ break;
+ }
+ }
+ }
+
+ // Even with predefines off, some macros are still predefined.
+ // These should all be defined in the preprocessor according to the
+ // current language configuration.
+ InitializeStandardPredefinedMacros(PP.getTargetInfo(), PP.getLangOpts(),
+ FEOpts, Builder);
+
+ // Add on the predefines from the driver. Wrap in a #line directive to report
+ // that they come from the command line.
+ if (!PP.getLangOpts().AsmPreprocessor)
+ Builder.append("# 1 \"<command line>\" 1");
+
+ // Process #define's and #undef's in the order they are given.
+ for (unsigned i = 0, e = InitOpts.Macros.size(); i != e; ++i) {
+ if (InitOpts.Macros[i].second) // isUndef
+ Builder.undefineMacro(InitOpts.Macros[i].first);
+ else
+ DefineBuiltinMacro(Builder, InitOpts.Macros[i].first,
+ PP.getDiagnostics());
+ }
+
+ // If -imacros are specified, include them now. These are processed before
+ // any -include directives.
+ for (unsigned i = 0, e = InitOpts.MacroIncludes.size(); i != e; ++i)
+ AddImplicitIncludeMacros(Builder, InitOpts.MacroIncludes[i],
+ PP.getFileManager());
+
+ // Process -include directives.
+ for (unsigned i = 0, e = InitOpts.Includes.size(); i != e; ++i) {
+ const std::string &Path = InitOpts.Includes[i];
+ if (Path == InitOpts.ImplicitPTHInclude)
+ AddImplicitIncludePTH(Builder, PP, Path);
+ else
+ AddImplicitInclude(Builder, Path, PP.getFileManager());
+ }
+
+ // Exit the command line and go back to <built-in> (2 is LC_LEAVE).
+ if (!PP.getLangOpts().AsmPreprocessor)
+ Builder.append("# 1 \"<built-in>\" 2");
+
+ // Instruct the preprocessor to skip the preamble.
+ PP.setSkipMainFilePreamble(InitOpts.PrecompiledPreambleBytes.first,
+ InitOpts.PrecompiledPreambleBytes.second);
+
+ // Copy PredefinedBuffer into the Preprocessor.
+ PP.setPredefines(Predefines.str());
+
+ // Initialize the header search object.
+ ApplyHeaderSearchOptions(PP.getHeaderSearchInfo(), HSOpts,
+ PP.getLangOpts(),
+ PP.getTargetInfo().getTriple());
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp b/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp
new file mode 100644
index 0000000..f86a574
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/LangStandards.cpp
@@ -0,0 +1,43 @@
+//===--- LangStandards.cpp - Language Standard Definitions ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/LangStandard.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace clang::frontend;
+
+#define LANGSTANDARD(id, name, desc, features) \
+ static const LangStandard Lang_##id = { name, desc, features };
+#include "clang/Frontend/LangStandards.def"
+
+const LangStandard &LangStandard::getLangStandardForKind(Kind K) {
+ switch (K) {
+ case lang_unspecified:
+ llvm::report_fatal_error("getLangStandardForKind() on unspecified kind");
+#define LANGSTANDARD(id, name, desc, features) \
+ case lang_##id: return Lang_##id;
+#include "clang/Frontend/LangStandards.def"
+ }
+ llvm_unreachable("Invalid language kind!");
+}
+
+const LangStandard *LangStandard::getLangStandardForName(StringRef Name) {
+ Kind K = llvm::StringSwitch<Kind>(Name)
+#define LANGSTANDARD(id, name, desc, features) \
+ .Case(name, lang_##id)
+#include "clang/Frontend/LangStandards.def"
+ .Default(lang_unspecified);
+ if (K == lang_unspecified)
+ return 0;
+
+ return &getLangStandardForKind(K);
+}
+
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp b/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
new file mode 100644
index 0000000..eb7865e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/LayoutOverrideSource.cpp
@@ -0,0 +1,206 @@
+//===--- LayoutOverrideSource.cpp --Override Record Layouts ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Frontend/LayoutOverrideSource.h"
+#include "clang/AST/Decl.h"
+#include "llvm/Support/raw_ostream.h"
+#include <fstream>
+#include <string>
+
+using namespace clang;
+
+/// \brief Parse a simple identifier.
+static std::string parseName(StringRef S) {
+ unsigned Offset = 0;
+ while (Offset < S.size() &&
+ (isalpha(S[Offset]) || S[Offset] == '_' ||
+ (Offset > 0 && isdigit(S[Offset]))))
+ ++Offset;
+
+ return S.substr(0, Offset).str();
+}
+
+LayoutOverrideSource::LayoutOverrideSource(llvm::StringRef Filename) {
+ std::ifstream Input(Filename.str().c_str());
+ if (!Input.is_open())
+ return;
+
+ // Parse the output of -fdump-record-layouts.
+ std::string CurrentType;
+ Layout CurrentLayout;
+ bool ExpectingType = false;
+
+ while (Input.good()) {
+ std::string Line;
+ getline(Input, Line);
+
+ StringRef LineStr(Line);
+
+ // Determine whether the following line will start a
+ if (LineStr.find("*** Dumping AST Record Layout") != StringRef::npos) {
+ // Flush the last type/layout, if there is one.
+ if (!CurrentType.empty())
+ Layouts[CurrentType] = CurrentLayout;
+ CurrentLayout = Layout();
+
+ ExpectingType = true;
+ continue;
+ }
+
+ // If we're expecting a type, grab it.
+ if (ExpectingType) {
+ ExpectingType = false;
+
+ StringRef::size_type Pos;
+ if ((Pos = LineStr.find("struct ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("struct "));
+ else if ((Pos = LineStr.find("class ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("class "));
+ else if ((Pos = LineStr.find("union ")) != StringRef::npos)
+ LineStr = LineStr.substr(Pos + strlen("union "));
+ else
+ continue;
+
+ // Find the name of the type.
+ CurrentType = parseName(LineStr);
+ CurrentLayout = Layout();
+ continue;
+ }
+
+ // Check for the size of the type.
+ StringRef::size_type Pos = LineStr.find(" Size:");
+ if (Pos != StringRef::npos) {
+ // Skip past the " Size:" prefix.
+ LineStr = LineStr.substr(Pos + strlen(" Size:"));
+
+ unsigned long long Size = 0;
+ (void)LineStr.getAsInteger(10, Size);
+ CurrentLayout.Size = Size;
+ continue;
+ }
+
+ // Check for the alignment of the type.
+ Pos = LineStr.find("Alignment:");
+ if (Pos != StringRef::npos) {
+ // Skip past the "Alignment:" prefix.
+ LineStr = LineStr.substr(Pos + strlen("Alignment:"));
+
+ unsigned long long Alignment = 0;
+ (void)LineStr.getAsInteger(10, Alignment);
+ CurrentLayout.Align = Alignment;
+ continue;
+ }
+
+ // Check for the size/alignment of the type.
+ Pos = LineStr.find("sizeof=");
+ if (Pos != StringRef::npos) {
+ /* Skip past the sizeof= prefix. */
+ LineStr = LineStr.substr(Pos + strlen("sizeof="));
+
+ // Parse size.
+ unsigned long long Size = 0;
+ (void)LineStr.getAsInteger(10, Size);
+ CurrentLayout.Size = Size;
+
+ Pos = LineStr.find("align=");
+ if (Pos != StringRef::npos) {
+ /* Skip past the align= prefix. */
+ LineStr = LineStr.substr(Pos + strlen("align="));
+
+ // Parse alignment.
+ unsigned long long Alignment = 0;
+ (void)LineStr.getAsInteger(10, Alignment);
+ CurrentLayout.Align = Alignment;
+ }
+
+ continue;
+ }
+
+ // Check for the field offsets of the type.
+ Pos = LineStr.find("FieldOffsets: [");
+ if (Pos == StringRef::npos)
+ continue;
+
+ LineStr = LineStr.substr(Pos + strlen("FieldOffsets: ["));
+ while (!LineStr.empty() && isdigit(LineStr[0])) {
+ // Parse this offset.
+ unsigned Idx = 1;
+ while (Idx < LineStr.size() && isdigit(LineStr[Idx]))
+ ++Idx;
+
+ unsigned long long Offset = 0;
+ (void)LineStr.substr(0, Idx).getAsInteger(10, Offset);
+
+ CurrentLayout.FieldOffsets.push_back(Offset);
+
+ // Skip over this offset, the following comma, and any spaces.
+ LineStr = LineStr.substr(Idx + 1);
+ while (!LineStr.empty() && isspace(LineStr[0]))
+ LineStr = LineStr.substr(1);
+ }
+ }
+
+ // Flush the last type/layout, if there is one.
+ if (!CurrentType.empty())
+ Layouts[CurrentType] = CurrentLayout;
+}
+
+bool
+LayoutOverrideSource::layoutRecordType(const RecordDecl *Record,
+ uint64_t &Size, uint64_t &Alignment,
+ llvm::DenseMap<const FieldDecl *, uint64_t> &FieldOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &BaseOffsets,
+ llvm::DenseMap<const CXXRecordDecl *, CharUnits> &VirtualBaseOffsets)
+{
+ // We can't override unnamed declarations.
+ if (!Record->getIdentifier())
+ return false;
+
+ // Check whether we have a layout for this record.
+ llvm::StringMap<Layout>::iterator Known = Layouts.find(Record->getName());
+ if (Known == Layouts.end())
+ return false;
+
+ // Provide field layouts.
+ unsigned NumFields = 0;
+ for (RecordDecl::field_iterator F = Record->field_begin(),
+ FEnd = Record->field_end();
+ F != FEnd; ++F, ++NumFields) {
+ if (NumFields >= Known->second.FieldOffsets.size())
+ continue;
+
+ FieldOffsets[*F] = Known->second.FieldOffsets[NumFields];
+ }
+
+ // Wrong number of fields.
+ if (NumFields != Known->second.FieldOffsets.size())
+ return false;
+
+ Size = Known->second.Size;
+ Alignment = Known->second.Align;
+ return true;
+}
+
+void LayoutOverrideSource::dump() {
+ llvm::raw_ostream &OS = llvm::errs();
+ for (llvm::StringMap<Layout>::iterator L = Layouts.begin(),
+ LEnd = Layouts.end();
+ L != LEnd; ++L) {
+ OS << "Type: blah " << L->first() << '\n';
+ OS << " Size:" << L->second.Size << '\n';
+ OS << " Alignment:" << L->second.Align << '\n';
+ OS << " FieldOffsets: [";
+ for (unsigned I = 0, N = L->second.FieldOffsets.size(); I != N; ++I) {
+ if (I)
+ OS << ", ";
+ OS << L->second.FieldOffsets[I];
+ }
+ OS << "]\n";
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
new file mode 100644
index 0000000..3fee957
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -0,0 +1,177 @@
+//===--- LogDiagnosticPrinter.cpp - Log Diagnostic Printer ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/LogDiagnosticPrinter.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+LogDiagnosticPrinter::LogDiagnosticPrinter(raw_ostream &os,
+ const DiagnosticOptions &diags,
+ bool _OwnsOutputStream)
+ : OS(os), LangOpts(0), DiagOpts(&diags),
+ OwnsOutputStream(_OwnsOutputStream) {
+}
+
+LogDiagnosticPrinter::~LogDiagnosticPrinter() {
+ if (OwnsOutputStream)
+ delete &OS;
+}
+
+static StringRef getLevelName(DiagnosticsEngine::Level Level) {
+ switch (Level) {
+ case DiagnosticsEngine::Ignored: return "ignored";
+ case DiagnosticsEngine::Note: return "note";
+ case DiagnosticsEngine::Warning: return "warning";
+ case DiagnosticsEngine::Error: return "error";
+ case DiagnosticsEngine::Fatal: return "fatal error";
+ }
+ llvm_unreachable("Invalid DiagnosticsEngine level!");
+}
+
+// Escape XML characters inside the raw string.
+static void emitString(llvm::raw_svector_ostream &OS, const StringRef Raw) {
+ for (StringRef::iterator I = Raw.begin(), E = Raw.end(); I != E; ++I) {
+ char c = *I;
+ switch (c) {
+ default: OS << c; break;
+ case '&': OS << "&amp;"; break;
+ case '<': OS << "&lt;"; break;
+ case '>': OS << "&gt;"; break;
+ case '\'': OS << "&apos;"; break;
+ case '\"': OS << "&quot;"; break;
+ }
+ }
+}
+
+void LogDiagnosticPrinter::EndSourceFile() {
+ // We emit all the diagnostics in EndSourceFile. However, we don't emit any
+ // entry if no diagnostics were present.
+ //
+ // Note that DiagnosticConsumer has no "end-of-compilation" callback, so we
+ // will miss any diagnostics which are emitted after and outside the
+ // translation unit processing.
+ if (Entries.empty())
+ return;
+
+ // Write to a temporary string to ensure atomic write of diagnostic object.
+ SmallString<512> Msg;
+ llvm::raw_svector_ostream OS(Msg);
+
+ OS << "<dict>\n";
+ if (!MainFilename.empty()) {
+ OS << " <key>main-file</key>\n"
+ << " <string>";
+ emitString(OS, MainFilename);
+ OS << "</string>\n";
+ }
+ if (!DwarfDebugFlags.empty()) {
+ OS << " <key>dwarf-debug-flags</key>\n"
+ << " <string>";
+ emitString(OS, DwarfDebugFlags);
+ OS << "</string>\n";
+ }
+ OS << " <key>diagnostics</key>\n";
+ OS << " <array>\n";
+ for (unsigned i = 0, e = Entries.size(); i != e; ++i) {
+ DiagEntry &DE = Entries[i];
+
+ OS << " <dict>\n";
+ OS << " <key>level</key>\n"
+ << " <string>";
+ emitString(OS, getLevelName(DE.DiagnosticLevel));
+ OS << "</string>\n";
+ if (!DE.Filename.empty()) {
+ OS << " <key>filename</key>\n"
+ << " <string>";
+ emitString(OS, DE.Filename);
+ OS << "</string>\n";
+ }
+ if (DE.Line != 0) {
+ OS << " <key>line</key>\n"
+ << " <integer>" << DE.Line << "</integer>\n";
+ }
+ if (DE.Column != 0) {
+ OS << " <key>column</key>\n"
+ << " <integer>" << DE.Column << "</integer>\n";
+ }
+ if (!DE.Message.empty()) {
+ OS << " <key>message</key>\n"
+ << " <string>";
+ emitString(OS, DE.Message);
+ OS << "</string>\n";
+ }
+ OS << " </dict>\n";
+ }
+ OS << " </array>\n";
+ OS << "</dict>\n";
+
+ this->OS << OS.str();
+}
+
+void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ // Initialize the main file name, if we haven't already fetched it.
+ if (MainFilename.empty() && Info.hasSourceManager()) {
+ const SourceManager &SM = Info.getSourceManager();
+ FileID FID = SM.getMainFileID();
+ if (!FID.isInvalid()) {
+ const FileEntry *FE = SM.getFileEntryForID(FID);
+ if (FE && FE->getName())
+ MainFilename = FE->getName();
+ }
+ }
+
+ // Create the diag entry.
+ DiagEntry DE;
+ DE.DiagnosticID = Info.getID();
+ DE.DiagnosticLevel = Level;
+
+ // Format the message.
+ SmallString<100> MessageStr;
+ Info.FormatDiagnostic(MessageStr);
+ DE.Message = MessageStr.str();
+
+ // Set the location information.
+ DE.Filename = "";
+ DE.Line = DE.Column = 0;
+ if (Info.getLocation().isValid() && Info.hasSourceManager()) {
+ const SourceManager &SM = Info.getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Info.getLocation());
+
+ if (PLoc.isInvalid()) {
+ // At least print the file name if available:
+ FileID FID = SM.getFileID(Info.getLocation());
+ if (!FID.isInvalid()) {
+ const FileEntry *FE = SM.getFileEntryForID(FID);
+ if (FE && FE->getName())
+ DE.Filename = FE->getName();
+ }
+ } else {
+ DE.Filename = PLoc.getFilename();
+ DE.Line = PLoc.getLine();
+ DE.Column = PLoc.getColumn();
+ }
+ }
+
+ // Record the diagnostic entry.
+ Entries.push_back(DE);
+}
+
+DiagnosticConsumer *
+LogDiagnosticPrinter::clone(DiagnosticsEngine &Diags) const {
+ return new LogDiagnosticPrinter(OS, *DiagOpts, /*OwnsOutputStream=*/false);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
new file mode 100644
index 0000000..992eeb0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -0,0 +1,276 @@
+//===- MultiplexConsumer.cpp - AST Consumer for PCH Generation --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MultiplexConsumer class. It also declares and defines
+// MultiplexASTDeserializationListener and MultiplexASTMutationListener, which
+// are implementation details of MultiplexConsumer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/MultiplexConsumer.h"
+
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+
+using namespace clang;
+
+namespace clang {
+
+// This ASTDeserializationListener forwards its notifications to a set of
+// child listeners.
+class MultiplexASTDeserializationListener
+ : public ASTDeserializationListener {
+public:
+ // Does NOT take ownership of the elements in L.
+ MultiplexASTDeserializationListener(
+ const std::vector<ASTDeserializationListener*>& L);
+ virtual void ReaderInitialized(ASTReader *Reader);
+ virtual void IdentifierRead(serialization::IdentID ID,
+ IdentifierInfo *II);
+ virtual void TypeRead(serialization::TypeIdx Idx, QualType T);
+ virtual void DeclRead(serialization::DeclID ID, const Decl *D);
+ virtual void SelectorRead(serialization::SelectorID iD, Selector Sel);
+ virtual void MacroDefinitionRead(serialization::PreprocessedEntityID,
+ MacroDefinition *MD);
+private:
+ std::vector<ASTDeserializationListener*> Listeners;
+};
+
+MultiplexASTDeserializationListener::MultiplexASTDeserializationListener(
+ const std::vector<ASTDeserializationListener*>& L)
+ : Listeners(L) {
+}
+
+void MultiplexASTDeserializationListener::ReaderInitialized(
+ ASTReader *Reader) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->ReaderInitialized(Reader);
+}
+
+void MultiplexASTDeserializationListener::IdentifierRead(
+ serialization::IdentID ID, IdentifierInfo *II) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->IdentifierRead(ID, II);
+}
+
+void MultiplexASTDeserializationListener::TypeRead(
+ serialization::TypeIdx Idx, QualType T) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->TypeRead(Idx, T);
+}
+
+void MultiplexASTDeserializationListener::DeclRead(
+ serialization::DeclID ID, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->DeclRead(ID, D);
+}
+
+void MultiplexASTDeserializationListener::SelectorRead(
+ serialization::SelectorID ID, Selector Sel) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->SelectorRead(ID, Sel);
+}
+
+void MultiplexASTDeserializationListener::MacroDefinitionRead(
+ serialization::PreprocessedEntityID ID, MacroDefinition *MD) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->MacroDefinitionRead(ID, MD);
+}
+
+// This ASTMutationListener forwards its notifications to a set of
+// child listeners.
+class MultiplexASTMutationListener : public ASTMutationListener {
+public:
+ // Does NOT take ownership of the elements in L.
+ MultiplexASTMutationListener(ArrayRef<ASTMutationListener*> L);
+ virtual void CompletedTagDefinition(const TagDecl *D);
+ virtual void AddedVisibleDecl(const DeclContext *DC, const Decl *D);
+ virtual void AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D);
+ virtual void AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D);
+ virtual void AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
+ const FunctionDecl *D);
+ virtual void CompletedImplicitDefinition(const FunctionDecl *D);
+ virtual void StaticDataMemberInstantiated(const VarDecl *D);
+ virtual void AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD);
+ virtual void AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt);
+private:
+ std::vector<ASTMutationListener*> Listeners;
+};
+
+MultiplexASTMutationListener::MultiplexASTMutationListener(
+ ArrayRef<ASTMutationListener*> L)
+ : Listeners(L.begin(), L.end()) {
+}
+
+void MultiplexASTMutationListener::CompletedTagDefinition(const TagDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->CompletedTagDefinition(D);
+}
+
+void MultiplexASTMutationListener::AddedVisibleDecl(
+ const DeclContext *DC, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedVisibleDecl(DC, D);
+}
+
+void MultiplexASTMutationListener::AddedCXXImplicitMember(
+ const CXXRecordDecl *RD, const Decl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXImplicitMember(RD, D);
+}
+void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
+ const ClassTemplateDecl *TD, const ClassTemplateSpecializationDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
+}
+void MultiplexASTMutationListener::AddedCXXTemplateSpecialization(
+ const FunctionTemplateDecl *TD, const FunctionDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedCXXTemplateSpecialization(TD, D);
+}
+void MultiplexASTMutationListener::CompletedImplicitDefinition(
+ const FunctionDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->CompletedImplicitDefinition(D);
+}
+void MultiplexASTMutationListener::StaticDataMemberInstantiated(
+ const VarDecl *D) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->StaticDataMemberInstantiated(D);
+}
+void MultiplexASTMutationListener::AddedObjCCategoryToInterface(
+ const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedObjCCategoryToInterface(CatD, IFD);
+}
+void MultiplexASTMutationListener::AddedObjCPropertyInClassExtension(
+ const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt) {
+ for (size_t i = 0, e = Listeners.size(); i != e; ++i)
+ Listeners[i]->AddedObjCPropertyInClassExtension(Prop, OrigProp, ClassExt);
+}
+
+} // end namespace clang
+
+
+MultiplexConsumer::MultiplexConsumer(ArrayRef<ASTConsumer*> C)
+ : Consumers(C.begin(), C.end()),
+ MutationListener(0), DeserializationListener(0) {
+ // Collect the mutation listeners and deserialization listeners of all
+ // children, and create a multiplex listener each if so.
+ std::vector<ASTMutationListener*> mutationListeners;
+ std::vector<ASTDeserializationListener*> serializationListeners;
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i) {
+ ASTMutationListener* mutationListener =
+ Consumers[i]->GetASTMutationListener();
+ if (mutationListener)
+ mutationListeners.push_back(mutationListener);
+ ASTDeserializationListener* serializationListener =
+ Consumers[i]->GetASTDeserializationListener();
+ if (serializationListener)
+ serializationListeners.push_back(serializationListener);
+ }
+ if (mutationListeners.size()) {
+ MutationListener.reset(new MultiplexASTMutationListener(mutationListeners));
+ }
+ if (serializationListeners.size()) {
+ DeserializationListener.reset(
+ new MultiplexASTDeserializationListener(serializationListeners));
+ }
+}
+
+MultiplexConsumer::~MultiplexConsumer() {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ delete Consumers[i];
+}
+
+void MultiplexConsumer::Initialize(ASTContext &Context) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->Initialize(Context);
+}
+
+bool MultiplexConsumer::HandleTopLevelDecl(DeclGroupRef D) {
+ bool Continue = true;
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Continue = Continue && Consumers[i]->HandleTopLevelDecl(D);
+ return Continue;
+}
+
+void MultiplexConsumer::HandleCXXStaticMemberVarInstantiation(VarDecl *VD) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleCXXStaticMemberVarInstantiation(VD);
+}
+
+void MultiplexConsumer::HandleInterestingDecl(DeclGroupRef D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleInterestingDecl(D);
+}
+
+void MultiplexConsumer::HandleTranslationUnit(ASTContext &Ctx) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleTranslationUnit(Ctx);
+}
+
+void MultiplexConsumer::HandleTagDeclDefinition(TagDecl *D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleTagDeclDefinition(D);
+}
+
+void MultiplexConsumer::HandleCXXImplicitFunctionInstantiation(FunctionDecl *D){
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleCXXImplicitFunctionInstantiation(D);
+}
+
+void MultiplexConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleTopLevelDeclInObjCContainer(D);
+}
+
+void MultiplexConsumer::CompleteTentativeDefinition(VarDecl *D) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->CompleteTentativeDefinition(D);
+}
+
+void MultiplexConsumer::HandleVTable(
+ CXXRecordDecl *RD, bool DefinitionRequired) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->HandleVTable(RD, DefinitionRequired);
+}
+
+ASTMutationListener *MultiplexConsumer::GetASTMutationListener() {
+ return MutationListener.get();
+}
+
+ASTDeserializationListener *MultiplexConsumer::GetASTDeserializationListener() {
+ return DeserializationListener.get();
+}
+
+void MultiplexConsumer::PrintStats() {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ Consumers[i]->PrintStats();
+}
+
+void MultiplexConsumer::InitializeSema(Sema &S) {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumers[i]))
+ SC->InitializeSema(S);
+}
+
+void MultiplexConsumer::ForgetSema() {
+ for (size_t i = 0, e = Consumers.size(); i != e; ++i)
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(Consumers[i]))
+ SC->ForgetSema();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
new file mode 100644
index 0000000..9e1587c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -0,0 +1,628 @@
+//===--- PrintPreprocessedOutput.cpp - Implement the -E mode --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code simply runs the preprocessor on the input file and prints out the
+// result. This is the traditional behavior of the -E option.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/PreprocessorOutputOptions.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/TokenConcatenation.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdio>
+using namespace clang;
+
+/// PrintMacroDefinition - Print a macro definition in a form that will be
+/// properly accepted back as a definition.
+static void PrintMacroDefinition(const IdentifierInfo &II, const MacroInfo &MI,
+ Preprocessor &PP, raw_ostream &OS) {
+ OS << "#define " << II.getName();
+
+ if (MI.isFunctionLike()) {
+ OS << '(';
+ if (!MI.arg_empty()) {
+ MacroInfo::arg_iterator AI = MI.arg_begin(), E = MI.arg_end();
+ for (; AI+1 != E; ++AI) {
+ OS << (*AI)->getName();
+ OS << ',';
+ }
+
+ // Last argument.
+ if ((*AI)->getName() == "__VA_ARGS__")
+ OS << "...";
+ else
+ OS << (*AI)->getName();
+ }
+
+ if (MI.isGNUVarargs())
+ OS << "..."; // #define foo(x...)
+
+ OS << ')';
+ }
+
+ // GCC always emits a space, even if the macro body is empty. However, do not
+ // want to emit two spaces if the first token has a leading space.
+ if (MI.tokens_empty() || !MI.tokens_begin()->hasLeadingSpace())
+ OS << ' ';
+
+ SmallString<128> SpellingBuffer;
+ for (MacroInfo::tokens_iterator I = MI.tokens_begin(), E = MI.tokens_end();
+ I != E; ++I) {
+ if (I->hasLeadingSpace())
+ OS << ' ';
+
+ OS << PP.getSpelling(*I, SpellingBuffer);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessed token printer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PrintPPOutputPPCallbacks : public PPCallbacks {
+ Preprocessor &PP;
+ SourceManager &SM;
+ TokenConcatenation ConcatInfo;
+public:
+ raw_ostream &OS;
+private:
+ unsigned CurLine;
+
+ bool EmittedTokensOnThisLine;
+ bool EmittedMacroOnThisLine;
+ SrcMgr::CharacteristicKind FileType;
+ SmallString<512> CurFilename;
+ bool Initialized;
+ bool DisableLineMarkers;
+ bool DumpDefines;
+ bool UseLineDirective;
+public:
+ PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream &os,
+ bool lineMarkers, bool defines)
+ : PP(pp), SM(PP.getSourceManager()),
+ ConcatInfo(PP), OS(os), DisableLineMarkers(lineMarkers),
+ DumpDefines(defines) {
+ CurLine = 0;
+ CurFilename += "<uninit>";
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ FileType = SrcMgr::C_User;
+ Initialized = false;
+
+ // If we're in microsoft mode, use normal #line instead of line markers.
+ UseLineDirective = PP.getLangOpts().MicrosoftExt;
+ }
+
+ void SetEmittedTokensOnThisLine() { EmittedTokensOnThisLine = true; }
+ bool hasEmittedTokensOnThisLine() const { return EmittedTokensOnThisLine; }
+
+ bool StartNewLineIfNeeded();
+
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType,
+ FileID PrevFID);
+ virtual void Ident(SourceLocation Loc, const std::string &str);
+ virtual void PragmaComment(SourceLocation Loc, const IdentifierInfo *Kind,
+ const std::string &Str);
+ virtual void PragmaMessage(SourceLocation Loc, StringRef Str);
+ virtual void PragmaDiagnosticPush(SourceLocation Loc,
+ StringRef Namespace);
+ virtual void PragmaDiagnosticPop(SourceLocation Loc,
+ StringRef Namespace);
+ virtual void PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
+ diag::Mapping Map, StringRef Str);
+
+ bool HandleFirstTokOnLine(Token &Tok);
+ bool MoveToLine(SourceLocation Loc) {
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+ if (PLoc.isInvalid())
+ return false;
+ return MoveToLine(PLoc.getLine());
+ }
+ bool MoveToLine(unsigned LineNo);
+
+ bool AvoidConcat(const Token &PrevPrevTok, const Token &PrevTok,
+ const Token &Tok) {
+ return ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok);
+ }
+ void WriteLineInfo(unsigned LineNo, const char *Extra=0, unsigned ExtraLen=0);
+ bool LineMarkersAreDisabled() const { return DisableLineMarkers; }
+ void HandleNewlinesInToken(const char *TokStr, unsigned Len);
+
+ /// MacroDefined - This hook is called whenever a macro definition is seen.
+ void MacroDefined(const Token &MacroNameTok, const MacroInfo *MI);
+
+ /// MacroUndefined - This hook is called whenever a macro #undef is seen.
+ void MacroUndefined(const Token &MacroNameTok, const MacroInfo *MI);
+};
+} // end anonymous namespace
+
+void PrintPPOutputPPCallbacks::WriteLineInfo(unsigned LineNo,
+ const char *Extra,
+ unsigned ExtraLen) {
+ if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
+ OS << '\n';
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ }
+
+ // Emit #line directives or GNU line markers depending on what mode we're in.
+ if (UseLineDirective) {
+ OS << "#line" << ' ' << LineNo << ' ' << '"';
+ OS.write(CurFilename.data(), CurFilename.size());
+ OS << '"';
+ } else {
+ OS << '#' << ' ' << LineNo << ' ' << '"';
+ OS.write(CurFilename.data(), CurFilename.size());
+ OS << '"';
+
+ if (ExtraLen)
+ OS.write(Extra, ExtraLen);
+
+ if (FileType == SrcMgr::C_System)
+ OS.write(" 3", 2);
+ else if (FileType == SrcMgr::C_ExternCSystem)
+ OS.write(" 3 4", 4);
+ }
+ OS << '\n';
+}
+
+/// MoveToLine - Move the output to the source line specified by the location
+/// object. We can do this by emitting some number of \n's, or be emitting a
+/// #line directive. This returns false if already at the specified line, true
+/// if some newlines were emitted.
+bool PrintPPOutputPPCallbacks::MoveToLine(unsigned LineNo) {
+ // If this line is "close enough" to the original line, just print newlines,
+ // otherwise print a #line directive.
+ if (LineNo-CurLine <= 8) {
+ if (LineNo-CurLine == 1)
+ OS << '\n';
+ else if (LineNo == CurLine)
+ return false; // Spelling line moved, but expansion line didn't.
+ else {
+ const char *NewLines = "\n\n\n\n\n\n\n\n";
+ OS.write(NewLines, LineNo-CurLine);
+ }
+ } else if (!DisableLineMarkers) {
+ // Emit a #line or line marker.
+ WriteLineInfo(LineNo, 0, 0);
+ } else {
+ // Okay, we're in -P mode, which turns off line markers. However, we still
+ // need to emit a newline between tokens on different lines.
+ if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
+ OS << '\n';
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ }
+ }
+
+ CurLine = LineNo;
+ return true;
+}
+
+bool PrintPPOutputPPCallbacks::StartNewLineIfNeeded() {
+ if (EmittedTokensOnThisLine || EmittedMacroOnThisLine) {
+ OS << '\n';
+ EmittedTokensOnThisLine = false;
+ EmittedMacroOnThisLine = false;
+ ++CurLine;
+ return true;
+ }
+
+ return false;
+}
+
+/// FileChanged - Whenever the preprocessor enters or exits a #include file
+/// it invokes this handler. Update our conception of the current source
+/// position.
+void PrintPPOutputPPCallbacks::FileChanged(SourceLocation Loc,
+ FileChangeReason Reason,
+ SrcMgr::CharacteristicKind NewFileType,
+ FileID PrevFID) {
+ // Unless we are exiting a #include, make sure to skip ahead to the line the
+ // #include directive was at.
+ SourceManager &SourceMgr = SM;
+
+ PresumedLoc UserLoc = SourceMgr.getPresumedLoc(Loc);
+ if (UserLoc.isInvalid())
+ return;
+
+ unsigned NewLine = UserLoc.getLine();
+
+ if (Reason == PPCallbacks::EnterFile) {
+ SourceLocation IncludeLoc = UserLoc.getIncludeLoc();
+ if (IncludeLoc.isValid())
+ MoveToLine(IncludeLoc);
+ } else if (Reason == PPCallbacks::SystemHeaderPragma) {
+ MoveToLine(NewLine);
+
+ // TODO GCC emits the # directive for this directive on the line AFTER the
+ // directive and emits a bunch of spaces that aren't needed. Emulate this
+ // strange behavior.
+ }
+
+ CurLine = NewLine;
+
+ CurFilename.clear();
+ CurFilename += UserLoc.getFilename();
+ Lexer::Stringify(CurFilename);
+ FileType = NewFileType;
+
+ if (DisableLineMarkers) return;
+
+ if (!Initialized) {
+ WriteLineInfo(CurLine);
+ Initialized = true;
+ }
+
+ switch (Reason) {
+ case PPCallbacks::EnterFile:
+ WriteLineInfo(CurLine, " 1", 2);
+ break;
+ case PPCallbacks::ExitFile:
+ WriteLineInfo(CurLine, " 2", 2);
+ break;
+ case PPCallbacks::SystemHeaderPragma:
+ case PPCallbacks::RenameFile:
+ WriteLineInfo(CurLine);
+ break;
+ }
+}
+
+/// Ident - Handle #ident directives when read by the preprocessor.
+///
+void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, const std::string &S) {
+ MoveToLine(Loc);
+
+ OS.write("#ident ", strlen("#ident "));
+ OS.write(&S[0], S.size());
+ EmittedTokensOnThisLine = true;
+}
+
+/// MacroDefined - This hook is called whenever a macro definition is seen.
+void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
+ const MacroInfo *MI) {
+ // Only print out macro definitions in -dD mode.
+ if (!DumpDefines ||
+ // Ignore __FILE__ etc.
+ MI->isBuiltinMacro()) return;
+
+ MoveToLine(MI->getDefinitionLoc());
+ PrintMacroDefinition(*MacroNameTok.getIdentifierInfo(), *MI, PP, OS);
+ EmittedMacroOnThisLine = true;
+}
+
+void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok,
+ const MacroInfo *MI) {
+ // Only print out macro definitions in -dD mode.
+ if (!DumpDefines) return;
+
+ MoveToLine(MacroNameTok.getLocation());
+ OS << "#undef " << MacroNameTok.getIdentifierInfo()->getName();
+ EmittedMacroOnThisLine = true;
+}
+
+void PrintPPOutputPPCallbacks::PragmaComment(SourceLocation Loc,
+ const IdentifierInfo *Kind,
+ const std::string &Str) {
+ MoveToLine(Loc);
+ OS << "#pragma comment(" << Kind->getName();
+
+ if (!Str.empty()) {
+ OS << ", \"";
+
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ unsigned char Char = Str[i];
+ if (isprint(Char) && Char != '\\' && Char != '"')
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0'+ ((Char >> 6) & 7))
+ << (char)('0'+ ((Char >> 3) & 7))
+ << (char)('0'+ ((Char >> 0) & 7));
+ }
+ OS << '"';
+ }
+
+ OS << ')';
+ EmittedTokensOnThisLine = true;
+}
+
+void PrintPPOutputPPCallbacks::PragmaMessage(SourceLocation Loc,
+ StringRef Str) {
+ MoveToLine(Loc);
+ OS << "#pragma message(";
+
+ OS << '"';
+
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ unsigned char Char = Str[i];
+ if (isprint(Char) && Char != '\\' && Char != '"')
+ OS << (char)Char;
+ else // Output anything hard as an octal escape.
+ OS << '\\'
+ << (char)('0'+ ((Char >> 6) & 7))
+ << (char)('0'+ ((Char >> 3) & 7))
+ << (char)('0'+ ((Char >> 0) & 7));
+ }
+ OS << '"';
+
+ OS << ')';
+ EmittedTokensOnThisLine = true;
+}
+
+void PrintPPOutputPPCallbacks::
+PragmaDiagnosticPush(SourceLocation Loc, StringRef Namespace) {
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic push";
+ EmittedTokensOnThisLine = true;
+}
+
+void PrintPPOutputPPCallbacks::
+PragmaDiagnosticPop(SourceLocation Loc, StringRef Namespace) {
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic pop";
+ EmittedTokensOnThisLine = true;
+}
+
+void PrintPPOutputPPCallbacks::
+PragmaDiagnostic(SourceLocation Loc, StringRef Namespace,
+ diag::Mapping Map, StringRef Str) {
+ MoveToLine(Loc);
+ OS << "#pragma " << Namespace << " diagnostic ";
+ switch (Map) {
+ case diag::MAP_WARNING:
+ OS << "warning";
+ break;
+ case diag::MAP_ERROR:
+ OS << "error";
+ break;
+ case diag::MAP_IGNORE:
+ OS << "ignored";
+ break;
+ case diag::MAP_FATAL:
+ OS << "fatal";
+ break;
+ }
+ OS << " \"" << Str << '"';
+ EmittedTokensOnThisLine = true;
+}
+
+/// HandleFirstTokOnLine - When emitting a preprocessed file in -E mode, this
+/// is called for the first token on each new line. If this really is the start
+/// of a new logical line, handle it and return true, otherwise return false.
+/// This may not be the start of a logical line because the "start of line"
+/// marker is set for spelling lines, not expansion ones.
+bool PrintPPOutputPPCallbacks::HandleFirstTokOnLine(Token &Tok) {
+ // Figure out what line we went to and insert the appropriate number of
+ // newline characters.
+ if (!MoveToLine(Tok.getLocation()))
+ return false;
+
+ // Print out space characters so that the first token on a line is
+ // indented for easy reading.
+ unsigned ColNo = SM.getExpansionColumnNumber(Tok.getLocation());
+
+ // This hack prevents stuff like:
+ // #define HASH #
+ // HASH define foo bar
+ // From having the # character end up at column 1, which makes it so it
+ // is not handled as a #define next time through the preprocessor if in
+ // -fpreprocessed mode.
+ if (ColNo <= 1 && Tok.is(tok::hash))
+ OS << ' ';
+
+ // Otherwise, indent the appropriate number of spaces.
+ for (; ColNo > 1; --ColNo)
+ OS << ' ';
+
+ return true;
+}
+
+void PrintPPOutputPPCallbacks::HandleNewlinesInToken(const char *TokStr,
+ unsigned Len) {
+ unsigned NumNewlines = 0;
+ for (; Len; --Len, ++TokStr) {
+ if (*TokStr != '\n' &&
+ *TokStr != '\r')
+ continue;
+
+ ++NumNewlines;
+
+ // If we have \n\r or \r\n, skip both and count as one line.
+ if (Len != 1 &&
+ (TokStr[1] == '\n' || TokStr[1] == '\r') &&
+ TokStr[0] != TokStr[1])
+ ++TokStr, --Len;
+ }
+
+ if (NumNewlines == 0) return;
+
+ CurLine += NumNewlines;
+}
+
+
+namespace {
+struct UnknownPragmaHandler : public PragmaHandler {
+ const char *Prefix;
+ PrintPPOutputPPCallbacks *Callbacks;
+
+ UnknownPragmaHandler(const char *prefix, PrintPPOutputPPCallbacks *callbacks)
+ : Prefix(prefix), Callbacks(callbacks) {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PragmaTok) {
+ // Figure out what line we went to and insert the appropriate number of
+ // newline characters.
+ Callbacks->StartNewLineIfNeeded();
+ Callbacks->MoveToLine(PragmaTok.getLocation());
+ Callbacks->OS.write(Prefix, strlen(Prefix));
+ Callbacks->SetEmittedTokensOnThisLine();
+ // Read and print all of the pragma tokens.
+ while (PragmaTok.isNot(tok::eod)) {
+ if (PragmaTok.hasLeadingSpace())
+ Callbacks->OS << ' ';
+ std::string TokSpell = PP.getSpelling(PragmaTok);
+ Callbacks->OS.write(&TokSpell[0], TokSpell.size());
+ PP.LexUnexpandedToken(PragmaTok);
+ }
+ Callbacks->StartNewLineIfNeeded();
+ }
+};
+} // end anonymous namespace
+
+
+static void PrintPreprocessedTokens(Preprocessor &PP, Token &Tok,
+ PrintPPOutputPPCallbacks *Callbacks,
+ raw_ostream &OS) {
+ char Buffer[256];
+ Token PrevPrevTok, PrevTok;
+ PrevPrevTok.startToken();
+ PrevTok.startToken();
+ while (1) {
+
+ // If this token is at the start of a line, emit newlines if needed.
+ if (Tok.isAtStartOfLine() && Callbacks->HandleFirstTokOnLine(Tok)) {
+ // done.
+ } else if (Tok.hasLeadingSpace() ||
+ // If we haven't emitted a token on this line yet, PrevTok isn't
+ // useful to look at and no concatenation could happen anyway.
+ (Callbacks->hasEmittedTokensOnThisLine() &&
+ // Don't print "-" next to "-", it would form "--".
+ Callbacks->AvoidConcat(PrevPrevTok, PrevTok, Tok))) {
+ OS << ' ';
+ }
+
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ OS << II->getName();
+ } else if (Tok.isLiteral() && !Tok.needsCleaning() &&
+ Tok.getLiteralData()) {
+ OS.write(Tok.getLiteralData(), Tok.getLength());
+ } else if (Tok.getLength() < 256) {
+ const char *TokPtr = Buffer;
+ unsigned Len = PP.getSpelling(Tok, TokPtr);
+ OS.write(TokPtr, Len);
+
+ // Tokens that can contain embedded newlines need to adjust our current
+ // line number.
+ if (Tok.getKind() == tok::comment)
+ Callbacks->HandleNewlinesInToken(TokPtr, Len);
+ } else {
+ std::string S = PP.getSpelling(Tok);
+ OS.write(&S[0], S.size());
+
+ // Tokens that can contain embedded newlines need to adjust our current
+ // line number.
+ if (Tok.getKind() == tok::comment)
+ Callbacks->HandleNewlinesInToken(&S[0], S.size());
+ }
+ Callbacks->SetEmittedTokensOnThisLine();
+
+ if (Tok.is(tok::eof)) break;
+
+ PrevPrevTok = PrevTok;
+ PrevTok = Tok;
+ PP.Lex(Tok);
+ }
+}
+
+typedef std::pair<IdentifierInfo*, MacroInfo*> id_macro_pair;
+static int MacroIDCompare(const void* a, const void* b) {
+ const id_macro_pair *LHS = static_cast<const id_macro_pair*>(a);
+ const id_macro_pair *RHS = static_cast<const id_macro_pair*>(b);
+ return LHS->first->getName().compare(RHS->first->getName());
+}
+
+static void DoPrintMacros(Preprocessor &PP, raw_ostream *OS) {
+ // Ignore unknown pragmas.
+ PP.AddPragmaHandler(new EmptyPragmaHandler());
+
+ // -dM mode just scans and ignores all tokens in the files, then dumps out
+ // the macro table at the end.
+ PP.EnterMainSourceFile();
+
+ Token Tok;
+ do PP.Lex(Tok);
+ while (Tok.isNot(tok::eof));
+
+ SmallVector<id_macro_pair, 128>
+ MacrosByID(PP.macro_begin(), PP.macro_end());
+ llvm::array_pod_sort(MacrosByID.begin(), MacrosByID.end(), MacroIDCompare);
+
+ for (unsigned i = 0, e = MacrosByID.size(); i != e; ++i) {
+ MacroInfo &MI = *MacrosByID[i].second;
+ // Ignore computed macros like __LINE__ and friends.
+ if (MI.isBuiltinMacro()) continue;
+
+ PrintMacroDefinition(*MacrosByID[i].first, MI, PP, *OS);
+ *OS << '\n';
+ }
+}
+
+/// DoPrintPreprocessedInput - This implements -E mode.
+///
+void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
+ const PreprocessorOutputOptions &Opts) {
+ // Show macros with no output is handled specially.
+ if (!Opts.ShowCPP) {
+ assert(Opts.ShowMacros && "Not yet implemented!");
+ DoPrintMacros(PP, OS);
+ return;
+ }
+
+ // Inform the preprocessor whether we want it to retain comments or not, due
+ // to -C or -CC.
+ PP.SetCommentRetentionState(Opts.ShowComments, Opts.ShowMacroComments);
+
+ PrintPPOutputPPCallbacks *Callbacks =
+ new PrintPPOutputPPCallbacks(PP, *OS, !Opts.ShowLineMarkers,
+ Opts.ShowMacros);
+ PP.AddPragmaHandler(new UnknownPragmaHandler("#pragma", Callbacks));
+ PP.AddPragmaHandler("GCC", new UnknownPragmaHandler("#pragma GCC",Callbacks));
+ PP.AddPragmaHandler("clang",
+ new UnknownPragmaHandler("#pragma clang", Callbacks));
+
+ PP.addPPCallbacks(Callbacks);
+
+ // After we have configured the preprocessor, enter the main file.
+ PP.EnterMainSourceFile();
+
+ // Consume all of the tokens that come from the predefines buffer. Those
+ // should not be emitted into the output and are guaranteed to be at the
+ // start.
+ const SourceManager &SourceMgr = PP.getSourceManager();
+ Token Tok;
+ do {
+ PP.Lex(Tok);
+ if (Tok.is(tok::eof) || !Tok.getLocation().isFileID())
+ break;
+
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (PLoc.isInvalid())
+ break;
+
+ if (strcmp(PLoc.getFilename(), "<built-in>"))
+ break;
+ } while (true);
+
+ // Read all the preprocessed tokens, printing them out to the stream.
+ PrintPreprocessedTokens(PP, Tok, Callbacks, *OS);
+ *OS << '\n';
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
new file mode 100644
index 0000000..7bf8742
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/SerializedDiagnosticPrinter.cpp
@@ -0,0 +1,592 @@
+//===--- SerializedDiagnosticPrinter.cpp - Serializer for diagnostics -----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include <vector>
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/DenseSet.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/Version.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Frontend/SerializedDiagnosticPrinter.h"
+#include "clang/Frontend/DiagnosticRenderer.h"
+
+using namespace clang;
+using namespace clang::serialized_diags;
+
+namespace {
+
+class AbbreviationMap {
+ llvm::DenseMap<unsigned, unsigned> Abbrevs;
+public:
+ AbbreviationMap() {}
+
+ void set(unsigned recordID, unsigned abbrevID) {
+ assert(Abbrevs.find(recordID) == Abbrevs.end()
+ && "Abbreviation already set.");
+ Abbrevs[recordID] = abbrevID;
+ }
+
+ unsigned get(unsigned recordID) {
+ assert(Abbrevs.find(recordID) != Abbrevs.end() &&
+ "Abbreviation not set.");
+ return Abbrevs[recordID];
+ }
+};
+
+typedef llvm::SmallVector<uint64_t, 64> RecordData;
+typedef llvm::SmallVectorImpl<uint64_t> RecordDataImpl;
+
+class SDiagsWriter;
+
+class SDiagsRenderer : public DiagnosticNoteRenderer {
+ SDiagsWriter &Writer;
+ RecordData &Record;
+public:
+ SDiagsRenderer(SDiagsWriter &Writer, RecordData &Record,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts)
+ : DiagnosticNoteRenderer(SM, LangOpts, DiagOpts),
+ Writer(Writer), Record(Record){}
+
+ virtual ~SDiagsRenderer() {}
+
+protected:
+ virtual void emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<CharSourceRange> Ranges,
+ DiagOrStoredDiag D);
+
+ virtual void emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges) {}
+
+ void emitNote(SourceLocation Loc, StringRef Message);
+
+ virtual void emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints);
+
+ virtual void beginDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level);
+ virtual void endDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level);
+};
+
+class SDiagsWriter : public DiagnosticConsumer {
+ friend class SDiagsRenderer;
+public:
+ explicit SDiagsWriter(llvm::raw_ostream *os, const DiagnosticOptions &diags)
+ : LangOpts(0), DiagOpts(diags),
+ Stream(Buffer), OS(os), inNonNoteDiagnostic(false)
+ {
+ EmitPreamble();
+ }
+
+ ~SDiagsWriter() {}
+
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info);
+
+ void BeginSourceFile(const LangOptions &LO,
+ const Preprocessor *PP) {
+ LangOpts = &LO;
+ }
+
+ virtual void finish();
+
+ DiagnosticConsumer *clone(DiagnosticsEngine &Diags) const {
+ // It makes no sense to clone this.
+ return 0;
+ }
+
+private:
+ /// \brief Emit the preamble for the serialized diagnostics.
+ void EmitPreamble();
+
+ /// \brief Emit the BLOCKINFO block.
+ void EmitBlockInfoBlock();
+
+ /// \brief Emit the META data block.
+ void EmitMetaBlock();
+
+ /// \brief Emit a record for a CharSourceRange.
+ void EmitCharSourceRange(CharSourceRange R, const SourceManager &SM);
+
+ /// \brief Emit the string information for the category.
+ unsigned getEmitCategory(unsigned category = 0);
+
+ /// \brief Emit the string information for diagnostic flags.
+ unsigned getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
+ unsigned DiagID = 0);
+
+ /// \brief Emit (lazily) the file string and retrieved the file identifier.
+ unsigned getEmitFile(const char *Filename);
+
+ /// \brief Add SourceLocation information the specified record.
+ void AddLocToRecord(SourceLocation Loc, const SourceManager &SM,
+ PresumedLoc PLoc, RecordDataImpl &Record,
+ unsigned TokSize = 0);
+
+ /// \brief Add SourceLocation information the specified record.
+ void AddLocToRecord(SourceLocation Loc, RecordDataImpl &Record,
+ const SourceManager &SM,
+ unsigned TokSize = 0) {
+ AddLocToRecord(Loc, SM, SM.getPresumedLoc(Loc), Record, TokSize);
+ }
+
+ /// \brief Add CharSourceRange information the specified record.
+ void AddCharSourceRangeToRecord(CharSourceRange R, RecordDataImpl &Record,
+ const SourceManager &SM);
+
+ /// \brief The version of the diagnostics file.
+ enum { Version = 1 };
+
+ const LangOptions *LangOpts;
+ const DiagnosticOptions &DiagOpts;
+
+ /// \brief The byte buffer for the serialized content.
+ SmallString<1024> Buffer;
+
+ /// \brief The BitStreamWriter for the serialized diagnostics.
+ llvm::BitstreamWriter Stream;
+
+ /// \brief The name of the diagnostics file.
+ OwningPtr<llvm::raw_ostream> OS;
+
+ /// \brief The set of constructed record abbreviations.
+ AbbreviationMap Abbrevs;
+
+ /// \brief A utility buffer for constructing record content.
+ RecordData Record;
+
+ /// \brief A text buffer for rendering diagnostic text.
+ SmallString<256> diagBuf;
+
+ /// \brief The collection of diagnostic categories used.
+ llvm::DenseSet<unsigned> Categories;
+
+ /// \brief The collection of files used.
+ llvm::DenseMap<const char *, unsigned> Files;
+
+ typedef llvm::DenseMap<const void *, std::pair<unsigned, llvm::StringRef> >
+ DiagFlagsTy;
+
+ /// \brief Map for uniquing strings.
+ DiagFlagsTy DiagFlags;
+
+ /// \brief Flag indicating whether or not we are in the process of
+ /// emitting a non-note diagnostic.
+ bool inNonNoteDiagnostic;
+};
+} // end anonymous namespace
+
+namespace clang {
+namespace serialized_diags {
+DiagnosticConsumer *create(llvm::raw_ostream *OS,
+ const DiagnosticOptions &diags) {
+ return new SDiagsWriter(OS, diags);
+}
+} // end namespace serialized_diags
+} // end namespace clang
+
+//===----------------------------------------------------------------------===//
+// Serialization methods.
+//===----------------------------------------------------------------------===//
+
+/// \brief Emits a block ID in the BLOCKINFO block.
+static void EmitBlockID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ RecordDataImpl &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID, Record);
+
+ // Emit the block name if present.
+ if (Name == 0 || Name[0] == 0)
+ return;
+
+ Record.clear();
+
+ while (*Name)
+ Record.push_back(*Name++);
+
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_BLOCKNAME, Record);
+}
+
+/// \brief Emits a record ID in the BLOCKINFO block.
+static void EmitRecordID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ RecordDataImpl &Record){
+ Record.clear();
+ Record.push_back(ID);
+
+ while (*Name)
+ Record.push_back(*Name++);
+
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETRECORDNAME, Record);
+}
+
+void SDiagsWriter::AddLocToRecord(SourceLocation Loc,
+ const SourceManager &SM,
+ PresumedLoc PLoc,
+ RecordDataImpl &Record,
+ unsigned TokSize) {
+ if (PLoc.isInvalid()) {
+ // Emit a "sentinel" location.
+ Record.push_back((unsigned)0); // File.
+ Record.push_back((unsigned)0); // Line.
+ Record.push_back((unsigned)0); // Column.
+ Record.push_back((unsigned)0); // Offset.
+ return;
+ }
+
+ Record.push_back(getEmitFile(PLoc.getFilename()));
+ Record.push_back(PLoc.getLine());
+ Record.push_back(PLoc.getColumn()+TokSize);
+ Record.push_back(SM.getFileOffset(Loc));
+}
+
+void SDiagsWriter::AddCharSourceRangeToRecord(CharSourceRange Range,
+ RecordDataImpl &Record,
+ const SourceManager &SM) {
+ AddLocToRecord(Range.getBegin(), Record, SM);
+ unsigned TokSize = 0;
+ if (Range.isTokenRange())
+ TokSize = Lexer::MeasureTokenLength(Range.getEnd(),
+ SM, *LangOpts);
+
+ AddLocToRecord(Range.getEnd(), Record, SM, TokSize);
+}
+
+unsigned SDiagsWriter::getEmitFile(const char *FileName){
+ if (!FileName)
+ return 0;
+
+ unsigned &entry = Files[FileName];
+ if (entry)
+ return entry;
+
+ // Lazily generate the record for the file.
+ entry = Files.size();
+ RecordData Record;
+ Record.push_back(RECORD_FILENAME);
+ Record.push_back(entry);
+ Record.push_back(0); // For legacy.
+ Record.push_back(0); // For legacy.
+ StringRef Name(FileName);
+ Record.push_back(Name.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_FILENAME), Record, Name);
+
+ return entry;
+}
+
+void SDiagsWriter::EmitCharSourceRange(CharSourceRange R,
+ const SourceManager &SM) {
+ Record.clear();
+ Record.push_back(RECORD_SOURCE_RANGE);
+ AddCharSourceRangeToRecord(R, Record, SM);
+ Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_SOURCE_RANGE), Record);
+}
+
+/// \brief Emits the preamble of the diagnostics file.
+void SDiagsWriter::EmitPreamble() {
+ // Emit the file header.
+ Stream.Emit((unsigned)'D', 8);
+ Stream.Emit((unsigned)'I', 8);
+ Stream.Emit((unsigned)'A', 8);
+ Stream.Emit((unsigned)'G', 8);
+
+ EmitBlockInfoBlock();
+ EmitMetaBlock();
+}
+
+static void AddSourceLocationAbbrev(llvm::BitCodeAbbrev *Abbrev) {
+ using namespace llvm;
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // File ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Line.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Column.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Offset;
+}
+
+static void AddRangeLocationAbbrev(llvm::BitCodeAbbrev *Abbrev) {
+ AddSourceLocationAbbrev(Abbrev);
+ AddSourceLocationAbbrev(Abbrev);
+}
+
+void SDiagsWriter::EmitBlockInfoBlock() {
+ Stream.EnterBlockInfoBlock(3);
+
+ using namespace llvm;
+
+ // ==---------------------------------------------------------------------==//
+ // The subsequent records and Abbrevs are for the "Meta" block.
+ // ==---------------------------------------------------------------------==//
+
+ EmitBlockID(BLOCK_META, "Meta", Stream, Record);
+ EmitRecordID(RECORD_VERSION, "Version", Stream, Record);
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_VERSION));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrevs.set(RECORD_VERSION, Stream.EmitBlockInfoAbbrev(BLOCK_META, Abbrev));
+
+ // ==---------------------------------------------------------------------==//
+ // The subsequent records and Abbrevs are for the "Diagnostic" block.
+ // ==---------------------------------------------------------------------==//
+
+ EmitBlockID(BLOCK_DIAG, "Diag", Stream, Record);
+ EmitRecordID(RECORD_DIAG, "DiagInfo", Stream, Record);
+ EmitRecordID(RECORD_SOURCE_RANGE, "SrcRange", Stream, Record);
+ EmitRecordID(RECORD_CATEGORY, "CatName", Stream, Record);
+ EmitRecordID(RECORD_DIAG_FLAG, "DiagFlag", Stream, Record);
+ EmitRecordID(RECORD_FILENAME, "FileName", Stream, Record);
+ EmitRecordID(RECORD_FIXIT, "FixIt", Stream, Record);
+
+ // Emit abbreviation for RECORD_DIAG.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_DIAG));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Diag level.
+ AddSourceLocationAbbrev(Abbrev);
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Category.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped Diag ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Diagnostc text.
+ Abbrevs.set(RECORD_DIAG, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit abbrevation for RECORD_CATEGORY.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_CATEGORY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Category ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 8)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Category text.
+ Abbrevs.set(RECORD_CATEGORY, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit abbrevation for RECORD_SOURCE_RANGE.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_SOURCE_RANGE));
+ AddRangeLocationAbbrev(Abbrev);
+ Abbrevs.set(RECORD_SOURCE_RANGE,
+ Stream.EmitBlockInfoAbbrev(BLOCK_DIAG, Abbrev));
+
+ // Emit the abbreviation for RECORD_DIAG_FLAG.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_DIAG_FLAG));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped Diag ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Flag name text.
+ Abbrevs.set(RECORD_DIAG_FLAG, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ // Emit the abbreviation for RECORD_FILENAME.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_FILENAME));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 10)); // Mapped file ID.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // Modifcation time.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name text.
+ Abbrevs.set(RECORD_FILENAME, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ // Emit the abbreviation for RECORD_FIXIT.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(RECORD_FIXIT));
+ AddRangeLocationAbbrev(Abbrev);
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Text size.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // FixIt text.
+ Abbrevs.set(RECORD_FIXIT, Stream.EmitBlockInfoAbbrev(BLOCK_DIAG,
+ Abbrev));
+
+ Stream.ExitBlock();
+}
+
+void SDiagsWriter::EmitMetaBlock() {
+ Stream.EnterSubblock(BLOCK_META, 3);
+ Record.clear();
+ Record.push_back(RECORD_VERSION);
+ Record.push_back(Version);
+ Stream.EmitRecordWithAbbrev(Abbrevs.get(RECORD_VERSION), Record);
+ Stream.ExitBlock();
+}
+
+unsigned SDiagsWriter::getEmitCategory(unsigned int category) {
+ if (Categories.count(category))
+ return category;
+
+ Categories.insert(category);
+
+ // We use a local version of 'Record' so that we can be generating
+ // another record when we lazily generate one for the category entry.
+ RecordData Record;
+ Record.push_back(RECORD_CATEGORY);
+ Record.push_back(category);
+ StringRef catName = DiagnosticIDs::getCategoryNameFromID(category);
+ Record.push_back(catName.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_CATEGORY), Record, catName);
+
+ return category;
+}
+
+unsigned SDiagsWriter::getEmitDiagnosticFlag(DiagnosticsEngine::Level DiagLevel,
+ unsigned DiagID) {
+ if (DiagLevel == DiagnosticsEngine::Note)
+ return 0; // No flag for notes.
+
+ StringRef FlagName = DiagnosticIDs::getWarningOptionForDiag(DiagID);
+ if (FlagName.empty())
+ return 0;
+
+ // Here we assume that FlagName points to static data whose pointer
+ // value is fixed. This allows us to unique by diagnostic groups.
+ const void *data = FlagName.data();
+ std::pair<unsigned, StringRef> &entry = DiagFlags[data];
+ if (entry.first == 0) {
+ entry.first = DiagFlags.size();
+ entry.second = FlagName;
+
+ // Lazily emit the string in a separate record.
+ RecordData Record;
+ Record.push_back(RECORD_DIAG_FLAG);
+ Record.push_back(entry.first);
+ Record.push_back(FlagName.size());
+ Stream.EmitRecordWithBlob(Abbrevs.get(RECORD_DIAG_FLAG),
+ Record, FlagName);
+ }
+
+ return entry.first;
+}
+
+void SDiagsWriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ if (DiagLevel != DiagnosticsEngine::Note) {
+ if (inNonNoteDiagnostic) {
+ // We have encountered a non-note diagnostic. Finish up the previous
+ // diagnostic block before starting a new one.
+ Stream.ExitBlock();
+ }
+ inNonNoteDiagnostic = true;
+ }
+
+ // Compute the diagnostic text.
+ diagBuf.clear();
+ Info.FormatDiagnostic(diagBuf);
+
+ SourceManager &SM = Info.getSourceManager();
+ SDiagsRenderer Renderer(*this, Record, SM, *LangOpts, DiagOpts);
+ Renderer.emitDiagnostic(Info.getLocation(), DiagLevel,
+ diagBuf.str(),
+ Info.getRanges(),
+ llvm::makeArrayRef(Info.getFixItHints(),
+ Info.getNumFixItHints()),
+ &Info);
+}
+
+void
+SDiagsRenderer::emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<clang::CharSourceRange> Ranges,
+ DiagOrStoredDiag D) {
+ // Emit the RECORD_DIAG record.
+ Writer.Record.clear();
+ Writer.Record.push_back(RECORD_DIAG);
+ Writer.Record.push_back(Level);
+ Writer.AddLocToRecord(Loc, SM, PLoc, Record);
+
+ if (const Diagnostic *Info = D.dyn_cast<const Diagnostic*>()) {
+ // Emit the category string lazily and get the category ID.
+ unsigned DiagID = DiagnosticIDs::getCategoryNumberForDiag(Info->getID());
+ Writer.Record.push_back(Writer.getEmitCategory(DiagID));
+ // Emit the diagnostic flag string lazily and get the mapped ID.
+ Writer.Record.push_back(Writer.getEmitDiagnosticFlag(Level, Info->getID()));
+ }
+ else {
+ Writer.Record.push_back(Writer.getEmitCategory());
+ Writer.Record.push_back(Writer.getEmitDiagnosticFlag(Level));
+ }
+
+ Writer.Record.push_back(Message.size());
+ Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_DIAG),
+ Writer.Record, Message);
+}
+
+void SDiagsRenderer::beginDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {
+ Writer.Stream.EnterSubblock(BLOCK_DIAG, 4);
+}
+
+void SDiagsRenderer::endDiagnostic(DiagOrStoredDiag D,
+ DiagnosticsEngine::Level Level) {
+ if (D && Level != DiagnosticsEngine::Note)
+ return;
+ Writer.Stream.ExitBlock();
+}
+
+void SDiagsRenderer::emitCodeContext(SourceLocation Loc,
+ DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange> &Ranges,
+ ArrayRef<FixItHint> Hints) {
+ // Emit Source Ranges.
+ for (ArrayRef<CharSourceRange>::iterator it=Ranges.begin(), ei=Ranges.end();
+ it != ei; ++it) {
+ if (it->isValid())
+ Writer.EmitCharSourceRange(*it, SM);
+ }
+
+ // Emit FixIts.
+ for (ArrayRef<FixItHint>::iterator it = Hints.begin(), et = Hints.end();
+ it != et; ++it) {
+ const FixItHint &fix = *it;
+ if (fix.isNull())
+ continue;
+ Writer.Record.clear();
+ Writer.Record.push_back(RECORD_FIXIT);
+ Writer.AddCharSourceRangeToRecord(fix.RemoveRange, Record, SM);
+ Writer.Record.push_back(fix.CodeToInsert.size());
+ Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_FIXIT), Record,
+ fix.CodeToInsert);
+ }
+}
+
+void SDiagsRenderer::emitNote(SourceLocation Loc, StringRef Message) {
+ Writer.Stream.EnterSubblock(BLOCK_DIAG, 4);
+ RecordData Record;
+ Record.push_back(RECORD_DIAG);
+ Record.push_back(DiagnosticsEngine::Note);
+ Writer.AddLocToRecord(Loc, Record, SM);
+ Record.push_back(Writer.getEmitCategory());
+ Record.push_back(Writer.getEmitDiagnosticFlag(DiagnosticsEngine::Note));
+ Record.push_back(Message.size());
+ Writer.Stream.EmitRecordWithBlob(Writer.Abbrevs.get(RECORD_DIAG),
+ Record, Message);
+ Writer.Stream.ExitBlock();
+}
+
+void SDiagsWriter::finish() {
+ if (inNonNoteDiagnostic) {
+ // Finish off any diagnostics we were in the process of emitting.
+ Stream.ExitBlock();
+ inNonNoteDiagnostic = false;
+ }
+
+ // Write the generated bitstream to "Out".
+ OS->write((char *)&Buffer.front(), Buffer.size());
+ OS->flush();
+
+ OS.reset(0);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
new file mode 100644
index 0000000..9f5dcb4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnostic.cpp
@@ -0,0 +1,881 @@
+//===--- TextDiagnostic.cpp - Text Diagnostic Pretty-Printing -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+using namespace clang;
+
+static const enum raw_ostream::Colors noteColor =
+ raw_ostream::BLACK;
+static const enum raw_ostream::Colors fixitColor =
+ raw_ostream::GREEN;
+static const enum raw_ostream::Colors caretColor =
+ raw_ostream::GREEN;
+static const enum raw_ostream::Colors warningColor =
+ raw_ostream::MAGENTA;
+static const enum raw_ostream::Colors errorColor = raw_ostream::RED;
+static const enum raw_ostream::Colors fatalColor = raw_ostream::RED;
+// Used for changing only the bold attribute.
+static const enum raw_ostream::Colors savedColor =
+ raw_ostream::SAVEDCOLOR;
+
+/// \brief Number of spaces to indent when word-wrapping.
+const unsigned WordWrapIndentation = 6;
+
+/// \brief When the source code line we want to print is too long for
+/// the terminal, select the "interesting" region.
+static void selectInterestingSourceRegion(std::string &SourceLine,
+ std::string &CaretLine,
+ std::string &FixItInsertionLine,
+ unsigned EndOfCaretToken,
+ unsigned Columns) {
+ unsigned MaxSize = std::max(SourceLine.size(),
+ std::max(CaretLine.size(),
+ FixItInsertionLine.size()));
+ if (MaxSize > SourceLine.size())
+ SourceLine.resize(MaxSize, ' ');
+ if (MaxSize > CaretLine.size())
+ CaretLine.resize(MaxSize, ' ');
+ if (!FixItInsertionLine.empty() && MaxSize > FixItInsertionLine.size())
+ FixItInsertionLine.resize(MaxSize, ' ');
+
+ // Find the slice that we need to display the full caret line
+ // correctly.
+ unsigned CaretStart = 0, CaretEnd = CaretLine.size();
+ for (; CaretStart != CaretEnd; ++CaretStart)
+ if (!isspace(CaretLine[CaretStart]))
+ break;
+
+ for (; CaretEnd != CaretStart; --CaretEnd)
+ if (!isspace(CaretLine[CaretEnd - 1]))
+ break;
+
+ // Make sure we don't chop the string shorter than the caret token
+ // itself.
+ if (CaretEnd < EndOfCaretToken)
+ CaretEnd = EndOfCaretToken;
+
+ // If we have a fix-it line, make sure the slice includes all of the
+ // fix-it information.
+ if (!FixItInsertionLine.empty()) {
+ unsigned FixItStart = 0, FixItEnd = FixItInsertionLine.size();
+ for (; FixItStart != FixItEnd; ++FixItStart)
+ if (!isspace(FixItInsertionLine[FixItStart]))
+ break;
+
+ for (; FixItEnd != FixItStart; --FixItEnd)
+ if (!isspace(FixItInsertionLine[FixItEnd - 1]))
+ break;
+
+ if (FixItStart < CaretStart)
+ CaretStart = FixItStart;
+ if (FixItEnd > CaretEnd)
+ CaretEnd = FixItEnd;
+ }
+
+ // CaretLine[CaretStart, CaretEnd) contains all of the interesting
+ // parts of the caret line. While this slice is smaller than the
+ // number of columns we have, try to grow the slice to encompass
+ // more context.
+
+ // If the end of the interesting region comes before we run out of
+ // space in the terminal, start at the beginning of the line.
+ if (Columns > 3 && CaretEnd < Columns - 3)
+ CaretStart = 0;
+
+ unsigned TargetColumns = Columns;
+ if (TargetColumns > 8)
+ TargetColumns -= 8; // Give us extra room for the ellipses.
+ unsigned SourceLength = SourceLine.size();
+ while ((CaretEnd - CaretStart) < TargetColumns) {
+ bool ExpandedRegion = false;
+ // Move the start of the interesting region left until we've
+ // pulled in something else interesting.
+ if (CaretStart == 1)
+ CaretStart = 0;
+ else if (CaretStart > 1) {
+ unsigned NewStart = CaretStart - 1;
+
+ // Skip over any whitespace we see here; we're looking for
+ // another bit of interesting text.
+ while (NewStart && isspace(SourceLine[NewStart]))
+ --NewStart;
+
+ // Skip over this bit of "interesting" text.
+ while (NewStart && !isspace(SourceLine[NewStart]))
+ --NewStart;
+
+ // Move up to the non-whitespace character we just saw.
+ if (NewStart)
+ ++NewStart;
+
+ // If we're still within our limit, update the starting
+ // position within the source/caret line.
+ if (CaretEnd - NewStart <= TargetColumns) {
+ CaretStart = NewStart;
+ ExpandedRegion = true;
+ }
+ }
+
+ // Move the end of the interesting region right until we've
+ // pulled in something else interesting.
+ if (CaretEnd != SourceLength) {
+ assert(CaretEnd < SourceLength && "Unexpected caret position!");
+ unsigned NewEnd = CaretEnd;
+
+ // Skip over any whitespace we see here; we're looking for
+ // another bit of interesting text.
+ while (NewEnd != SourceLength && isspace(SourceLine[NewEnd - 1]))
+ ++NewEnd;
+
+ // Skip over this bit of "interesting" text.
+ while (NewEnd != SourceLength && !isspace(SourceLine[NewEnd - 1]))
+ ++NewEnd;
+
+ if (NewEnd - CaretStart <= TargetColumns) {
+ CaretEnd = NewEnd;
+ ExpandedRegion = true;
+ }
+ }
+
+ if (!ExpandedRegion)
+ break;
+ }
+
+ // [CaretStart, CaretEnd) is the slice we want. Update the various
+ // output lines to show only this slice, with two-space padding
+ // before the lines so that it looks nicer.
+ if (CaretEnd < SourceLine.size())
+ SourceLine.replace(CaretEnd, std::string::npos, "...");
+ if (CaretEnd < CaretLine.size())
+ CaretLine.erase(CaretEnd, std::string::npos);
+ if (FixItInsertionLine.size() > CaretEnd)
+ FixItInsertionLine.erase(CaretEnd, std::string::npos);
+
+ if (CaretStart > 2) {
+ SourceLine.replace(0, CaretStart, " ...");
+ CaretLine.replace(0, CaretStart, " ");
+ if (FixItInsertionLine.size() >= CaretStart)
+ FixItInsertionLine.replace(0, CaretStart, " ");
+ }
+}
+
+/// \brief Skip over whitespace in the string, starting at the given
+/// index.
+///
+/// \returns The index of the first non-whitespace character that is
+/// greater than or equal to Idx or, if no such character exists,
+/// returns the end of the string.
+static unsigned skipWhitespace(unsigned Idx, StringRef Str, unsigned Length) {
+ while (Idx < Length && isspace(Str[Idx]))
+ ++Idx;
+ return Idx;
+}
+
+/// \brief If the given character is the start of some kind of
+/// balanced punctuation (e.g., quotes or parentheses), return the
+/// character that will terminate the punctuation.
+///
+/// \returns The ending punctuation character, if any, or the NULL
+/// character if the input character does not start any punctuation.
+static inline char findMatchingPunctuation(char c) {
+ switch (c) {
+ case '\'': return '\'';
+ case '`': return '\'';
+ case '"': return '"';
+ case '(': return ')';
+ case '[': return ']';
+ case '{': return '}';
+ default: break;
+ }
+
+ return 0;
+}
+
+/// \brief Find the end of the word starting at the given offset
+/// within a string.
+///
+/// \returns the index pointing one character past the end of the
+/// word.
+static unsigned findEndOfWord(unsigned Start, StringRef Str,
+ unsigned Length, unsigned Column,
+ unsigned Columns) {
+ assert(Start < Str.size() && "Invalid start position!");
+ unsigned End = Start + 1;
+
+ // If we are already at the end of the string, take that as the word.
+ if (End == Str.size())
+ return End;
+
+ // Determine if the start of the string is actually opening
+ // punctuation, e.g., a quote or parentheses.
+ char EndPunct = findMatchingPunctuation(Str[Start]);
+ if (!EndPunct) {
+ // This is a normal word. Just find the first space character.
+ while (End < Length && !isspace(Str[End]))
+ ++End;
+ return End;
+ }
+
+ // We have the start of a balanced punctuation sequence (quotes,
+ // parentheses, etc.). Determine the full sequence is.
+ SmallString<16> PunctuationEndStack;
+ PunctuationEndStack.push_back(EndPunct);
+ while (End < Length && !PunctuationEndStack.empty()) {
+ if (Str[End] == PunctuationEndStack.back())
+ PunctuationEndStack.pop_back();
+ else if (char SubEndPunct = findMatchingPunctuation(Str[End]))
+ PunctuationEndStack.push_back(SubEndPunct);
+
+ ++End;
+ }
+
+ // Find the first space character after the punctuation ended.
+ while (End < Length && !isspace(Str[End]))
+ ++End;
+
+ unsigned PunctWordLength = End - Start;
+ if (// If the word fits on this line
+ Column + PunctWordLength <= Columns ||
+ // ... or the word is "short enough" to take up the next line
+ // without too much ugly white space
+ PunctWordLength < Columns/3)
+ return End; // Take the whole thing as a single "word".
+
+ // The whole quoted/parenthesized string is too long to print as a
+ // single "word". Instead, find the "word" that starts just after
+ // the punctuation and use that end-point instead. This will recurse
+ // until it finds something small enough to consider a word.
+ return findEndOfWord(Start + 1, Str, Length, Column + 1, Columns);
+}
+
+/// \brief Print the given string to a stream, word-wrapping it to
+/// some number of columns in the process.
+///
+/// \param OS the stream to which the word-wrapping string will be
+/// emitted.
+/// \param Str the string to word-wrap and output.
+/// \param Columns the number of columns to word-wrap to.
+/// \param Column the column number at which the first character of \p
+/// Str will be printed. This will be non-zero when part of the first
+/// line has already been printed.
+/// \param Indentation the number of spaces to indent any lines beyond
+/// the first line.
+/// \returns true if word-wrapping was required, or false if the
+/// string fit on the first line.
+static bool printWordWrapped(raw_ostream &OS, StringRef Str,
+ unsigned Columns,
+ unsigned Column = 0,
+ unsigned Indentation = WordWrapIndentation) {
+ const unsigned Length = std::min(Str.find('\n'), Str.size());
+
+ // The string used to indent each line.
+ SmallString<16> IndentStr;
+ IndentStr.assign(Indentation, ' ');
+ bool Wrapped = false;
+ for (unsigned WordStart = 0, WordEnd; WordStart < Length;
+ WordStart = WordEnd) {
+ // Find the beginning of the next word.
+ WordStart = skipWhitespace(WordStart, Str, Length);
+ if (WordStart == Length)
+ break;
+
+ // Find the end of this word.
+ WordEnd = findEndOfWord(WordStart, Str, Length, Column, Columns);
+
+ // Does this word fit on the current line?
+ unsigned WordLength = WordEnd - WordStart;
+ if (Column + WordLength < Columns) {
+ // This word fits on the current line; print it there.
+ if (WordStart) {
+ OS << ' ';
+ Column += 1;
+ }
+ OS << Str.substr(WordStart, WordLength);
+ Column += WordLength;
+ continue;
+ }
+
+ // This word does not fit on the current line, so wrap to the next
+ // line.
+ OS << '\n';
+ OS.write(&IndentStr[0], Indentation);
+ OS << Str.substr(WordStart, WordLength);
+ Column = Indentation + WordLength;
+ Wrapped = true;
+ }
+
+ // Append any remaning text from the message with its existing formatting.
+ OS << Str.substr(Length);
+
+ return Wrapped;
+}
+
+TextDiagnostic::TextDiagnostic(raw_ostream &OS,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const DiagnosticOptions &DiagOpts)
+ : DiagnosticRenderer(SM, LangOpts, DiagOpts), OS(OS) {}
+
+TextDiagnostic::~TextDiagnostic() {}
+
+void
+TextDiagnostic::emitDiagnosticMessage(SourceLocation Loc,
+ PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ ArrayRef<clang::CharSourceRange> Ranges,
+ DiagOrStoredDiag D) {
+ uint64_t StartOfLocationInfo = OS.tell();
+
+ // Emit the location of this particular diagnostic.
+ emitDiagnosticLoc(Loc, PLoc, Level, Ranges);
+
+ if (DiagOpts.ShowColors)
+ OS.resetColor();
+
+ printDiagnosticLevel(OS, Level, DiagOpts.ShowColors);
+ printDiagnosticMessage(OS, Level, Message,
+ OS.tell() - StartOfLocationInfo,
+ DiagOpts.MessageLength, DiagOpts.ShowColors);
+}
+
+/*static*/ void
+TextDiagnostic::printDiagnosticLevel(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ bool ShowColors) {
+ if (ShowColors) {
+ // Print diagnostic category in bold and color
+ switch (Level) {
+ case DiagnosticsEngine::Ignored:
+ llvm_unreachable("Invalid diagnostic type");
+ case DiagnosticsEngine::Note: OS.changeColor(noteColor, true); break;
+ case DiagnosticsEngine::Warning: OS.changeColor(warningColor, true); break;
+ case DiagnosticsEngine::Error: OS.changeColor(errorColor, true); break;
+ case DiagnosticsEngine::Fatal: OS.changeColor(fatalColor, true); break;
+ }
+ }
+
+ switch (Level) {
+ case DiagnosticsEngine::Ignored:
+ llvm_unreachable("Invalid diagnostic type");
+ case DiagnosticsEngine::Note: OS << "note: "; break;
+ case DiagnosticsEngine::Warning: OS << "warning: "; break;
+ case DiagnosticsEngine::Error: OS << "error: "; break;
+ case DiagnosticsEngine::Fatal: OS << "fatal error: "; break;
+ }
+
+ if (ShowColors)
+ OS.resetColor();
+}
+
+/*static*/ void
+TextDiagnostic::printDiagnosticMessage(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ StringRef Message,
+ unsigned CurrentColumn, unsigned Columns,
+ bool ShowColors) {
+ if (ShowColors) {
+ // Print warnings, errors and fatal errors in bold, no color
+ switch (Level) {
+ case DiagnosticsEngine::Warning: OS.changeColor(savedColor, true); break;
+ case DiagnosticsEngine::Error: OS.changeColor(savedColor, true); break;
+ case DiagnosticsEngine::Fatal: OS.changeColor(savedColor, true); break;
+ default: break; //don't bold notes
+ }
+ }
+
+ if (Columns)
+ printWordWrapped(OS, Message, Columns, CurrentColumn);
+ else
+ OS << Message;
+
+ if (ShowColors)
+ OS.resetColor();
+ OS << '\n';
+}
+
+/// \brief Print out the file/line/column information and include trace.
+///
+/// This method handlen the emission of the diagnostic location information.
+/// This includes extracting as much location information as is present for
+/// the diagnostic and printing it, as well as any include stack or source
+/// ranges necessary.
+void TextDiagnostic::emitDiagnosticLoc(SourceLocation Loc, PresumedLoc PLoc,
+ DiagnosticsEngine::Level Level,
+ ArrayRef<CharSourceRange> Ranges) {
+ if (PLoc.isInvalid()) {
+ // At least print the file name if available:
+ FileID FID = SM.getFileID(Loc);
+ if (!FID.isInvalid()) {
+ const FileEntry* FE = SM.getFileEntryForID(FID);
+ if (FE && FE->getName()) {
+ OS << FE->getName();
+ if (FE->getDevice() == 0 && FE->getInode() == 0
+ && FE->getFileMode() == 0) {
+ // in PCH is a guess, but a good one:
+ OS << " (in PCH)";
+ }
+ OS << ": ";
+ }
+ }
+ return;
+ }
+ unsigned LineNo = PLoc.getLine();
+
+ if (!DiagOpts.ShowLocation)
+ return;
+
+ if (DiagOpts.ShowColors)
+ OS.changeColor(savedColor, true);
+
+ OS << PLoc.getFilename();
+ switch (DiagOpts.Format) {
+ case DiagnosticOptions::Clang: OS << ':' << LineNo; break;
+ case DiagnosticOptions::Msvc: OS << '(' << LineNo; break;
+ case DiagnosticOptions::Vi: OS << " +" << LineNo; break;
+ }
+
+ if (DiagOpts.ShowColumn)
+ // Compute the column number.
+ if (unsigned ColNo = PLoc.getColumn()) {
+ if (DiagOpts.Format == DiagnosticOptions::Msvc) {
+ OS << ',';
+ ColNo--;
+ } else
+ OS << ':';
+ OS << ColNo;
+ }
+ switch (DiagOpts.Format) {
+ case DiagnosticOptions::Clang:
+ case DiagnosticOptions::Vi: OS << ':'; break;
+ case DiagnosticOptions::Msvc: OS << ") : "; break;
+ }
+
+ if (DiagOpts.ShowSourceRanges && !Ranges.empty()) {
+ FileID CaretFileID =
+ SM.getFileID(SM.getExpansionLoc(Loc));
+ bool PrintedRange = false;
+
+ for (ArrayRef<CharSourceRange>::const_iterator RI = Ranges.begin(),
+ RE = Ranges.end();
+ RI != RE; ++RI) {
+ // Ignore invalid ranges.
+ if (!RI->isValid()) continue;
+
+ SourceLocation B = SM.getExpansionLoc(RI->getBegin());
+ SourceLocation E = SM.getExpansionLoc(RI->getEnd());
+
+ // If the End location and the start location are the same and are a
+ // macro location, then the range was something that came from a
+ // macro expansion or _Pragma. If this is an object-like macro, the
+ // best we can do is to highlight the range. If this is a
+ // function-like macro, we'd also like to highlight the arguments.
+ if (B == E && RI->getEnd().isMacroID())
+ E = SM.getExpansionRange(RI->getEnd()).second;
+
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(B);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(E);
+
+ // If the start or end of the range is in another file, just discard
+ // it.
+ if (BInfo.first != CaretFileID || EInfo.first != CaretFileID)
+ continue;
+
+ // Add in the length of the token, so that we cover multi-char
+ // tokens.
+ unsigned TokSize = 0;
+ if (RI->isTokenRange())
+ TokSize = Lexer::MeasureTokenLength(E, SM, LangOpts);
+
+ OS << '{' << SM.getLineNumber(BInfo.first, BInfo.second) << ':'
+ << SM.getColumnNumber(BInfo.first, BInfo.second) << '-'
+ << SM.getLineNumber(EInfo.first, EInfo.second) << ':'
+ << (SM.getColumnNumber(EInfo.first, EInfo.second)+TokSize)
+ << '}';
+ PrintedRange = true;
+ }
+
+ if (PrintedRange)
+ OS << ':';
+ }
+ OS << ' ';
+}
+
+void TextDiagnostic::emitBasicNote(StringRef Message) {
+ // FIXME: Emit this as a real note diagnostic.
+ // FIXME: Format an actual diagnostic rather than a hard coded string.
+ OS << "note: " << Message << "\n";
+}
+
+void TextDiagnostic::emitIncludeLocation(SourceLocation Loc,
+ PresumedLoc PLoc) {
+ if (DiagOpts.ShowLocation)
+ OS << "In file included from " << PLoc.getFilename() << ':'
+ << PLoc.getLine() << ":\n";
+ else
+ OS << "In included file:\n";
+}
+
+/// \brief Emit a code snippet and caret line.
+///
+/// This routine emits a single line's code snippet and caret line..
+///
+/// \param Loc The location for the caret.
+/// \param Ranges The underlined ranges for this code snippet.
+/// \param Hints The FixIt hints active for this diagnostic.
+void TextDiagnostic::emitSnippetAndCaret(
+ SourceLocation Loc, DiagnosticsEngine::Level Level,
+ SmallVectorImpl<CharSourceRange>& Ranges,
+ ArrayRef<FixItHint> Hints) {
+ assert(!Loc.isInvalid() && "must have a valid source location here");
+ assert(Loc.isFileID() && "must have a file location here");
+
+ // If caret diagnostics are enabled and we have location, we want to
+ // emit the caret. However, we only do this if the location moved
+ // from the last diagnostic, if the last diagnostic was a note that
+ // was part of a different warning or error diagnostic, or if the
+ // diagnostic has ranges. We don't want to emit the same caret
+ // multiple times if one loc has multiple diagnostics.
+ if (!DiagOpts.ShowCarets)
+ return;
+ if (Loc == LastLoc && Ranges.empty() && Hints.empty() &&
+ (LastLevel != DiagnosticsEngine::Note || Level == LastLevel))
+ return;
+
+ // Decompose the location into a FID/Offset pair.
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ FileID FID = LocInfo.first;
+ unsigned FileOffset = LocInfo.second;
+
+ // Get information about the buffer it points into.
+ bool Invalid = false;
+ const char *BufStart = SM.getBufferData(FID, &Invalid).data();
+ if (Invalid)
+ return;
+
+ unsigned LineNo = SM.getLineNumber(FID, FileOffset);
+ unsigned ColNo = SM.getColumnNumber(FID, FileOffset);
+ unsigned CaretEndColNo
+ = ColNo + Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+
+ // Rewind from the current position to the start of the line.
+ const char *TokPtr = BufStart+FileOffset;
+ const char *LineStart = TokPtr-ColNo+1; // Column # is 1-based.
+
+
+ // Compute the line end. Scan forward from the error position to the end of
+ // the line.
+ const char *LineEnd = TokPtr;
+ while (*LineEnd != '\n' && *LineEnd != '\r' && *LineEnd != '\0')
+ ++LineEnd;
+
+ // FIXME: This shouldn't be necessary, but the CaretEndColNo can extend past
+ // the source line length as currently being computed. See
+ // test/Misc/message-length.c.
+ CaretEndColNo = std::min(CaretEndColNo, unsigned(LineEnd - LineStart));
+
+ // Copy the line of code into an std::string for ease of manipulation.
+ std::string SourceLine(LineStart, LineEnd);
+
+ // Create a line for the caret that is filled with spaces that is the same
+ // length as the line of source code.
+ std::string CaretLine(LineEnd-LineStart, ' ');
+
+ // Highlight all of the characters covered by Ranges with ~ characters.
+ for (SmallVectorImpl<CharSourceRange>::iterator I = Ranges.begin(),
+ E = Ranges.end();
+ I != E; ++I)
+ highlightRange(*I, LineNo, FID, SourceLine, CaretLine);
+
+ // Next, insert the caret itself.
+ if (ColNo-1 < CaretLine.size())
+ CaretLine[ColNo-1] = '^';
+ else
+ CaretLine.push_back('^');
+
+ expandTabs(SourceLine, CaretLine);
+
+ // If we are in -fdiagnostics-print-source-range-info mode, we are trying
+ // to produce easily machine parsable output. Add a space before the
+ // source line and the caret to make it trivial to tell the main diagnostic
+ // line from what the user is intended to see.
+ if (DiagOpts.ShowSourceRanges) {
+ SourceLine = ' ' + SourceLine;
+ CaretLine = ' ' + CaretLine;
+ }
+
+ std::string FixItInsertionLine = buildFixItInsertionLine(LineNo,
+ LineStart, LineEnd,
+ Hints);
+
+ // If the source line is too long for our terminal, select only the
+ // "interesting" source region within that line.
+ unsigned Columns = DiagOpts.MessageLength;
+ if (Columns && SourceLine.size() > Columns)
+ selectInterestingSourceRegion(SourceLine, CaretLine, FixItInsertionLine,
+ CaretEndColNo, Columns);
+
+ // Finally, remove any blank spaces from the end of CaretLine.
+ while (CaretLine[CaretLine.size()-1] == ' ')
+ CaretLine.erase(CaretLine.end()-1);
+
+ // Emit what we have computed.
+ OS << SourceLine << '\n';
+
+ if (DiagOpts.ShowColors)
+ OS.changeColor(caretColor, true);
+ OS << CaretLine << '\n';
+ if (DiagOpts.ShowColors)
+ OS.resetColor();
+
+ if (!FixItInsertionLine.empty()) {
+ if (DiagOpts.ShowColors)
+ // Print fixit line in color
+ OS.changeColor(fixitColor, false);
+ if (DiagOpts.ShowSourceRanges)
+ OS << ' ';
+ OS << FixItInsertionLine << '\n';
+ if (DiagOpts.ShowColors)
+ OS.resetColor();
+ }
+
+ // Print out any parseable fixit information requested by the options.
+ emitParseableFixits(Hints);
+}
+
+/// \brief Highlight a SourceRange (with ~'s) for any characters on LineNo.
+void TextDiagnostic::highlightRange(const CharSourceRange &R,
+ unsigned LineNo, FileID FID,
+ const std::string &SourceLine,
+ std::string &CaretLine) {
+ assert(CaretLine.size() == SourceLine.size() &&
+ "Expect a correspondence between source and caret line!");
+ if (!R.isValid()) return;
+
+ SourceLocation Begin = SM.getExpansionLoc(R.getBegin());
+ SourceLocation End = SM.getExpansionLoc(R.getEnd());
+
+ // If the End location and the start location are the same and are a macro
+ // location, then the range was something that came from a macro expansion
+ // or _Pragma. If this is an object-like macro, the best we can do is to
+ // highlight the range. If this is a function-like macro, we'd also like to
+ // highlight the arguments.
+ if (Begin == End && R.getEnd().isMacroID())
+ End = SM.getExpansionRange(R.getEnd()).second;
+
+ unsigned StartLineNo = SM.getExpansionLineNumber(Begin);
+ if (StartLineNo > LineNo || SM.getFileID(Begin) != FID)
+ return; // No intersection.
+
+ unsigned EndLineNo = SM.getExpansionLineNumber(End);
+ if (EndLineNo < LineNo || SM.getFileID(End) != FID)
+ return; // No intersection.
+
+ // Compute the column number of the start.
+ unsigned StartColNo = 0;
+ if (StartLineNo == LineNo) {
+ StartColNo = SM.getExpansionColumnNumber(Begin);
+ if (StartColNo) --StartColNo; // Zero base the col #.
+ }
+
+ // Compute the column number of the end.
+ unsigned EndColNo = CaretLine.size();
+ if (EndLineNo == LineNo) {
+ EndColNo = SM.getExpansionColumnNumber(End);
+ if (EndColNo) {
+ --EndColNo; // Zero base the col #.
+
+ // Add in the length of the token, so that we cover multi-char tokens if
+ // this is a token range.
+ if (R.isTokenRange())
+ EndColNo += Lexer::MeasureTokenLength(End, SM, LangOpts);
+ } else {
+ EndColNo = CaretLine.size();
+ }
+ }
+
+ assert(StartColNo <= EndColNo && "Invalid range!");
+
+ // Check that a token range does not highlight only whitespace.
+ if (R.isTokenRange()) {
+ // Pick the first non-whitespace column.
+ while (StartColNo < SourceLine.size() &&
+ (SourceLine[StartColNo] == ' ' || SourceLine[StartColNo] == '\t'))
+ ++StartColNo;
+
+ // Pick the last non-whitespace column.
+ if (EndColNo > SourceLine.size())
+ EndColNo = SourceLine.size();
+ while (EndColNo-1 &&
+ (SourceLine[EndColNo-1] == ' ' || SourceLine[EndColNo-1] == '\t'))
+ --EndColNo;
+
+ // If the start/end passed each other, then we are trying to highlight a
+ // range that just exists in whitespace, which must be some sort of other
+ // bug.
+ assert(StartColNo <= EndColNo && "Trying to highlight whitespace??");
+ }
+
+ // Fill the range with ~'s.
+ for (unsigned i = StartColNo; i < EndColNo; ++i)
+ CaretLine[i] = '~';
+}
+
+std::string TextDiagnostic::buildFixItInsertionLine(unsigned LineNo,
+ const char *LineStart,
+ const char *LineEnd,
+ ArrayRef<FixItHint> Hints) {
+ std::string FixItInsertionLine;
+ if (Hints.empty() || !DiagOpts.ShowFixits)
+ return FixItInsertionLine;
+
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ if (!I->CodeToInsert.empty()) {
+ // We have an insertion hint. Determine whether the inserted
+ // code is on the same line as the caret.
+ std::pair<FileID, unsigned> HintLocInfo
+ = SM.getDecomposedExpansionLoc(I->RemoveRange.getBegin());
+ if (LineNo == SM.getLineNumber(HintLocInfo.first, HintLocInfo.second)) {
+ // Insert the new code into the line just below the code
+ // that the user wrote.
+ unsigned HintColNo
+ = SM.getColumnNumber(HintLocInfo.first, HintLocInfo.second);
+ unsigned LastColumnModified
+ = HintColNo - 1 + I->CodeToInsert.size();
+ if (LastColumnModified > FixItInsertionLine.size())
+ FixItInsertionLine.resize(LastColumnModified, ' ');
+ std::copy(I->CodeToInsert.begin(), I->CodeToInsert.end(),
+ FixItInsertionLine.begin() + HintColNo - 1);
+ } else {
+ FixItInsertionLine.clear();
+ break;
+ }
+ }
+ }
+
+ if (FixItInsertionLine.empty())
+ return FixItInsertionLine;
+
+ // Now that we have the entire fixit line, expand the tabs in it.
+ // Since we don't want to insert spaces in the middle of a word,
+ // find each word and the column it should line up with and insert
+ // spaces until they match.
+ unsigned FixItPos = 0;
+ unsigned LinePos = 0;
+ unsigned TabExpandedCol = 0;
+ unsigned LineLength = LineEnd - LineStart;
+
+ while (FixItPos < FixItInsertionLine.size() && LinePos < LineLength) {
+ // Find the next word in the FixIt line.
+ while (FixItPos < FixItInsertionLine.size() &&
+ FixItInsertionLine[FixItPos] == ' ')
+ ++FixItPos;
+ unsigned CharDistance = FixItPos - TabExpandedCol;
+
+ // Walk forward in the source line, keeping track of
+ // the tab-expanded column.
+ for (unsigned I = 0; I < CharDistance; ++I, ++LinePos)
+ if (LinePos >= LineLength || LineStart[LinePos] != '\t')
+ ++TabExpandedCol;
+ else
+ TabExpandedCol =
+ (TabExpandedCol/DiagOpts.TabStop + 1) * DiagOpts.TabStop;
+
+ // Adjust the fixit line to match this column.
+ FixItInsertionLine.insert(FixItPos, TabExpandedCol-FixItPos, ' ');
+ FixItPos = TabExpandedCol;
+
+ // Walk to the end of the word.
+ while (FixItPos < FixItInsertionLine.size() &&
+ FixItInsertionLine[FixItPos] != ' ')
+ ++FixItPos;
+ }
+
+ return FixItInsertionLine;
+}
+
+void TextDiagnostic::expandTabs(std::string &SourceLine,
+ std::string &CaretLine) {
+ // Scan the source line, looking for tabs. If we find any, manually expand
+ // them to spaces and update the CaretLine to match.
+ for (unsigned i = 0; i != SourceLine.size(); ++i) {
+ if (SourceLine[i] != '\t') continue;
+
+ // Replace this tab with at least one space.
+ SourceLine[i] = ' ';
+
+ // Compute the number of spaces we need to insert.
+ unsigned TabStop = DiagOpts.TabStop;
+ assert(0 < TabStop && TabStop <= DiagnosticOptions::MaxTabStop &&
+ "Invalid -ftabstop value");
+ unsigned NumSpaces = ((i+TabStop)/TabStop * TabStop) - (i+1);
+ assert(NumSpaces < TabStop && "Invalid computation of space amt");
+
+ // Insert spaces into the SourceLine.
+ SourceLine.insert(i+1, NumSpaces, ' ');
+
+ // Insert spaces or ~'s into CaretLine.
+ CaretLine.insert(i+1, NumSpaces, CaretLine[i] == '~' ? '~' : ' ');
+ }
+}
+
+void TextDiagnostic::emitParseableFixits(ArrayRef<FixItHint> Hints) {
+ if (!DiagOpts.ShowParseableFixits)
+ return;
+
+ // We follow FixItRewriter's example in not (yet) handling
+ // fix-its in macros.
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ if (I->RemoveRange.isInvalid() ||
+ I->RemoveRange.getBegin().isMacroID() ||
+ I->RemoveRange.getEnd().isMacroID())
+ return;
+ }
+
+ for (ArrayRef<FixItHint>::iterator I = Hints.begin(), E = Hints.end();
+ I != E; ++I) {
+ SourceLocation BLoc = I->RemoveRange.getBegin();
+ SourceLocation ELoc = I->RemoveRange.getEnd();
+
+ std::pair<FileID, unsigned> BInfo = SM.getDecomposedLoc(BLoc);
+ std::pair<FileID, unsigned> EInfo = SM.getDecomposedLoc(ELoc);
+
+ // Adjust for token ranges.
+ if (I->RemoveRange.isTokenRange())
+ EInfo.second += Lexer::MeasureTokenLength(ELoc, SM, LangOpts);
+
+ // We specifically do not do word-wrapping or tab-expansion here,
+ // because this is supposed to be easy to parse.
+ PresumedLoc PLoc = SM.getPresumedLoc(BLoc);
+ if (PLoc.isInvalid())
+ break;
+
+ OS << "fix-it:\"";
+ OS.write_escaped(PLoc.getFilename());
+ OS << "\":{" << SM.getLineNumber(BInfo.first, BInfo.second)
+ << ':' << SM.getColumnNumber(BInfo.first, BInfo.second)
+ << '-' << SM.getLineNumber(EInfo.first, EInfo.second)
+ << ':' << SM.getColumnNumber(EInfo.first, EInfo.second)
+ << "}:\"";
+ OS.write_escaped(I->CodeToInsert);
+ OS << "\"\n";
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
new file mode 100644
index 0000000..57105f1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticBuffer.cpp
@@ -0,0 +1,60 @@
+//===--- TextDiagnosticBuffer.cpp - Buffer Text Diagnostics ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a concrete diagnostic client, which buffers the diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+/// HandleDiagnostic - Store the errors, warnings, and notes that are
+/// reported.
+///
+void TextDiagnosticBuffer::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ SmallString<100> Buf;
+ Info.FormatDiagnostic(Buf);
+ switch (Level) {
+ default: llvm_unreachable(
+ "Diagnostic not handled during diagnostic buffering!");
+ case DiagnosticsEngine::Note:
+ Notes.push_back(std::make_pair(Info.getLocation(), Buf.str()));
+ break;
+ case DiagnosticsEngine::Warning:
+ Warnings.push_back(std::make_pair(Info.getLocation(), Buf.str()));
+ break;
+ case DiagnosticsEngine::Error:
+ case DiagnosticsEngine::Fatal:
+ Errors.push_back(std::make_pair(Info.getLocation(), Buf.str()));
+ break;
+ }
+}
+
+void TextDiagnosticBuffer::FlushDiagnostics(DiagnosticsEngine &Diags) const {
+ // FIXME: Flush the diagnostics in order.
+ for (const_iterator it = err_begin(), ie = err_end(); it != ie; ++it)
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ it->second.c_str()));
+ for (const_iterator it = warn_begin(), ie = warn_end(); it != ie; ++it)
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ it->second.c_str()));
+ for (const_iterator it = note_begin(), ie = note_end(); it != ie; ++it)
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Note,
+ it->second.c_str()));
+}
+
+DiagnosticConsumer *TextDiagnosticBuffer::clone(DiagnosticsEngine &) const {
+ return new TextDiagnosticBuffer();
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
new file mode 100644
index 0000000..6445a0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -0,0 +1,178 @@
+//===--- TextDiagnosticPrinter.cpp - Diagnostic Printer -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This diagnostic client prints out their diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Frontend/TextDiagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/ADT/SmallString.h"
+#include <algorithm>
+using namespace clang;
+
+TextDiagnosticPrinter::TextDiagnosticPrinter(raw_ostream &os,
+ const DiagnosticOptions &diags,
+ bool _OwnsOutputStream)
+ : OS(os), LangOpts(0), DiagOpts(&diags), SM(0),
+ OwnsOutputStream(_OwnsOutputStream) {
+}
+
+TextDiagnosticPrinter::~TextDiagnosticPrinter() {
+ if (OwnsOutputStream)
+ delete &OS;
+}
+
+void TextDiagnosticPrinter::BeginSourceFile(const LangOptions &LO,
+ const Preprocessor *PP) {
+ LangOpts = &LO;
+}
+
+void TextDiagnosticPrinter::EndSourceFile() {
+ LangOpts = 0;
+ TextDiag.reset(0);
+}
+
+/// \brief Print any diagnostic option information to a raw_ostream.
+///
+/// This implements all of the logic for adding diagnostic options to a message
+/// (via OS). Each relevant option is comma separated and all are enclosed in
+/// the standard bracketing: " [...]".
+static void printDiagnosticOptions(raw_ostream &OS,
+ DiagnosticsEngine::Level Level,
+ const Diagnostic &Info,
+ const DiagnosticOptions &DiagOpts) {
+ bool Started = false;
+ if (DiagOpts.ShowOptionNames) {
+ // Handle special cases for non-warnings early.
+ if (Info.getID() == diag::fatal_too_many_errors) {
+ OS << " [-ferror-limit=]";
+ return;
+ }
+
+ // The code below is somewhat fragile because we are essentially trying to
+ // report to the user what happened by inferring what the diagnostic engine
+ // did. Eventually it might make more sense to have the diagnostic engine
+ // include some "why" information in the diagnostic.
+
+ // If this is a warning which has been mapped to an error by the user (as
+ // inferred by checking whether the default mapping is to an error) then
+ // flag it as such. Note that diagnostics could also have been mapped by a
+ // pragma, but we don't currently have a way to distinguish this.
+ if (Level == DiagnosticsEngine::Error &&
+ DiagnosticIDs::isBuiltinWarningOrExtension(Info.getID()) &&
+ !DiagnosticIDs::isDefaultMappingAsError(Info.getID())) {
+ OS << " [-Werror";
+ Started = true;
+ }
+
+ // If the diagnostic is an extension diagnostic and not enabled by default
+ // then it must have been turned on with -pedantic.
+ bool EnabledByDefault;
+ if (DiagnosticIDs::isBuiltinExtensionDiag(Info.getID(),
+ EnabledByDefault) &&
+ !EnabledByDefault) {
+ OS << (Started ? "," : " [") << "-pedantic";
+ Started = true;
+ }
+
+ StringRef Opt = DiagnosticIDs::getWarningOptionForDiag(Info.getID());
+ if (!Opt.empty()) {
+ OS << (Started ? "," : " [") << "-W" << Opt;
+ Started = true;
+ }
+ }
+
+ // If the user wants to see category information, include it too.
+ if (DiagOpts.ShowCategories) {
+ unsigned DiagCategory =
+ DiagnosticIDs::getCategoryNumberForDiag(Info.getID());
+ if (DiagCategory) {
+ OS << (Started ? "," : " [");
+ Started = true;
+ if (DiagOpts.ShowCategories == 1)
+ OS << DiagCategory;
+ else {
+ assert(DiagOpts.ShowCategories == 2 && "Invalid ShowCategories value");
+ OS << DiagnosticIDs::getCategoryNameFromID(DiagCategory);
+ }
+ }
+ }
+ if (Started)
+ OS << ']';
+}
+
+void TextDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(Level, Info);
+
+ // Render the diagnostic message into a temporary buffer eagerly. We'll use
+ // this later as we print out the diagnostic to the terminal.
+ SmallString<100> OutStr;
+ Info.FormatDiagnostic(OutStr);
+
+ llvm::raw_svector_ostream DiagMessageStream(OutStr);
+ printDiagnosticOptions(DiagMessageStream, Level, Info, *DiagOpts);
+
+ // Keeps track of the the starting position of the location
+ // information (e.g., "foo.c:10:4:") that precedes the error
+ // message. We use this information to determine how long the
+ // file+line+column number prefix is.
+ uint64_t StartOfLocationInfo = OS.tell();
+
+ if (!Prefix.empty())
+ OS << Prefix << ": ";
+
+ // Use a dedicated, simpler path for diagnostics without a valid location.
+ // This is important as if the location is missing, we may be emitting
+ // diagnostics in a context that lacks language options, a source manager, or
+ // other infrastructure necessary when emitting more rich diagnostics.
+ if (!Info.getLocation().isValid()) {
+ TextDiagnostic::printDiagnosticLevel(OS, Level, DiagOpts->ShowColors);
+ TextDiagnostic::printDiagnosticMessage(OS, Level, DiagMessageStream.str(),
+ OS.tell() - StartOfLocationInfo,
+ DiagOpts->MessageLength,
+ DiagOpts->ShowColors);
+ OS.flush();
+ return;
+ }
+
+ // Assert that the rest of our infrastructure is setup properly.
+ assert(LangOpts && "Unexpected diagnostic outside source file processing");
+ assert(DiagOpts && "Unexpected diagnostic without options set");
+ assert(Info.hasSourceManager() &&
+ "Unexpected diagnostic with no source manager");
+
+ // Rebuild the TextDiagnostic utility if missing or the source manager has
+ // changed.
+ if (!TextDiag || SM != &Info.getSourceManager()) {
+ SM = &Info.getSourceManager();
+ TextDiag.reset(new TextDiagnostic(OS, *SM, *LangOpts, *DiagOpts));
+ }
+
+ TextDiag->emitDiagnostic(Info.getLocation(), Level, DiagMessageStream.str(),
+ Info.getRanges(),
+ llvm::makeArrayRef(Info.getFixItHints(),
+ Info.getNumFixItHints()));
+
+ OS.flush();
+}
+
+DiagnosticConsumer *
+TextDiagnosticPrinter::clone(DiagnosticsEngine &Diags) const {
+ return new TextDiagnosticPrinter(OS, *DiagOpts, /*OwnsOutputStream=*/false);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
new file mode 100644
index 0000000..552282d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/VerifyDiagnosticConsumer.cpp
@@ -0,0 +1,557 @@
+//===---- VerifyDiagnosticConsumer.cpp - Verifying Diagnostic Client ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a concrete diagnostic client, which buffers the diagnostic messages.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Frontend/VerifyDiagnosticConsumer.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <climits>
+
+using namespace clang;
+
+VerifyDiagnosticConsumer::VerifyDiagnosticConsumer(DiagnosticsEngine &_Diags)
+ : Diags(_Diags), PrimaryClient(Diags.getClient()),
+ OwnsPrimaryClient(Diags.ownsClient()),
+ Buffer(new TextDiagnosticBuffer()), CurrentPreprocessor(0)
+{
+ Diags.takeClient();
+}
+
+VerifyDiagnosticConsumer::~VerifyDiagnosticConsumer() {
+ CheckDiagnostics();
+ Diags.takeClient();
+ if (OwnsPrimaryClient)
+ delete PrimaryClient;
+}
+
+// DiagnosticConsumer interface.
+
+void VerifyDiagnosticConsumer::BeginSourceFile(const LangOptions &LangOpts,
+ const Preprocessor *PP) {
+ // FIXME: Const hack, we screw up the preprocessor but in practice its ok
+ // because it doesn't get reused. It would be better if we could make a copy
+ // though.
+ CurrentPreprocessor = const_cast<Preprocessor*>(PP);
+
+ PrimaryClient->BeginSourceFile(LangOpts, PP);
+}
+
+void VerifyDiagnosticConsumer::EndSourceFile() {
+ CheckDiagnostics();
+
+ PrimaryClient->EndSourceFile();
+
+ CurrentPreprocessor = 0;
+}
+
+void VerifyDiagnosticConsumer::HandleDiagnostic(
+ DiagnosticsEngine::Level DiagLevel, const Diagnostic &Info) {
+ if (FirstErrorFID.isInvalid() && Info.hasSourceManager()) {
+ const SourceManager &SM = Info.getSourceManager();
+ FirstErrorFID = SM.getFileID(Info.getLocation());
+ }
+ // Send the diagnostic to the buffer, we will check it once we reach the end
+ // of the source file (or are destructed).
+ Buffer->HandleDiagnostic(DiagLevel, Info);
+}
+
+//===----------------------------------------------------------------------===//
+// Checking diagnostics implementation.
+//===----------------------------------------------------------------------===//
+
+typedef TextDiagnosticBuffer::DiagList DiagList;
+typedef TextDiagnosticBuffer::const_iterator const_diag_iterator;
+
+namespace {
+
+/// Directive - Abstract class representing a parsed verify directive.
+///
+class Directive {
+public:
+ static Directive* Create(bool RegexKind, const SourceLocation &Location,
+ const std::string &Text, unsigned Count);
+public:
+ /// Constant representing one or more matches aka regex "+".
+ static const unsigned OneOrMoreCount = UINT_MAX;
+
+ SourceLocation Location;
+ const std::string Text;
+ unsigned Count;
+
+ virtual ~Directive() { }
+
+ // Returns true if directive text is valid.
+ // Otherwise returns false and populates E.
+ virtual bool isValid(std::string &Error) = 0;
+
+ // Returns true on match.
+ virtual bool Match(const std::string &S) = 0;
+
+protected:
+ Directive(const SourceLocation &Location, const std::string &Text,
+ unsigned Count)
+ : Location(Location), Text(Text), Count(Count) { }
+
+private:
+ Directive(const Directive&); // DO NOT IMPLEMENT
+ void operator=(const Directive&); // DO NOT IMPLEMENT
+};
+
+/// StandardDirective - Directive with string matching.
+///
+class StandardDirective : public Directive {
+public:
+ StandardDirective(const SourceLocation &Location, const std::string &Text,
+ unsigned Count)
+ : Directive(Location, Text, Count) { }
+
+ virtual bool isValid(std::string &Error) {
+ // all strings are considered valid; even empty ones
+ return true;
+ }
+
+ virtual bool Match(const std::string &S) {
+ return S.find(Text) != std::string::npos;
+ }
+};
+
+/// RegexDirective - Directive with regular-expression matching.
+///
+class RegexDirective : public Directive {
+public:
+ RegexDirective(const SourceLocation &Location, const std::string &Text,
+ unsigned Count)
+ : Directive(Location, Text, Count), Regex(Text) { }
+
+ virtual bool isValid(std::string &Error) {
+ if (Regex.isValid(Error))
+ return true;
+ return false;
+ }
+
+ virtual bool Match(const std::string &S) {
+ return Regex.match(S);
+ }
+
+private:
+ llvm::Regex Regex;
+};
+
+typedef std::vector<Directive*> DirectiveList;
+
+/// ExpectedData - owns directive objects and deletes on destructor.
+///
+struct ExpectedData {
+ DirectiveList Errors;
+ DirectiveList Warnings;
+ DirectiveList Notes;
+
+ ~ExpectedData() {
+ DirectiveList* Lists[] = { &Errors, &Warnings, &Notes, 0 };
+ for (DirectiveList **PL = Lists; *PL; ++PL) {
+ DirectiveList * const L = *PL;
+ for (DirectiveList::iterator I = L->begin(), E = L->end(); I != E; ++I)
+ delete *I;
+ }
+ }
+};
+
+class ParseHelper
+{
+public:
+ ParseHelper(const char *Begin, const char *End)
+ : Begin(Begin), End(End), C(Begin), P(Begin), PEnd(NULL) { }
+
+ // Return true if string literal is next.
+ bool Next(StringRef S) {
+ P = C;
+ PEnd = C + S.size();
+ if (PEnd > End)
+ return false;
+ return !memcmp(P, S.data(), S.size());
+ }
+
+ // Return true if number is next.
+ // Output N only if number is next.
+ bool Next(unsigned &N) {
+ unsigned TMP = 0;
+ P = C;
+ for (; P < End && P[0] >= '0' && P[0] <= '9'; ++P) {
+ TMP *= 10;
+ TMP += P[0] - '0';
+ }
+ if (P == C)
+ return false;
+ PEnd = P;
+ N = TMP;
+ return true;
+ }
+
+ // Return true if string literal is found.
+ // When true, P marks begin-position of S in content.
+ bool Search(StringRef S) {
+ P = std::search(C, End, S.begin(), S.end());
+ PEnd = P + S.size();
+ return P != End;
+ }
+
+ // Advance 1-past previous next/search.
+ // Behavior is undefined if previous next/search failed.
+ bool Advance() {
+ C = PEnd;
+ return C < End;
+ }
+
+ // Skip zero or more whitespace.
+ void SkipWhitespace() {
+ for (; C < End && isspace(*C); ++C)
+ ;
+ }
+
+ // Return true if EOF reached.
+ bool Done() {
+ return !(C < End);
+ }
+
+ const char * const Begin; // beginning of expected content
+ const char * const End; // end of expected content (1-past)
+ const char *C; // position of next char in content
+ const char *P;
+
+private:
+ const char *PEnd; // previous next/search subject end (1-past)
+};
+
+} // namespace anonymous
+
+/// ParseDirective - Go through the comment and see if it indicates expected
+/// diagnostics. If so, then put them in the appropriate directive list.
+///
+static void ParseDirective(const char *CommentStart, unsigned CommentLen,
+ ExpectedData &ED, Preprocessor &PP,
+ SourceLocation Pos) {
+ // A single comment may contain multiple directives.
+ for (ParseHelper PH(CommentStart, CommentStart+CommentLen); !PH.Done();) {
+ // search for token: expected
+ if (!PH.Search("expected"))
+ break;
+ PH.Advance();
+
+ // next token: -
+ if (!PH.Next("-"))
+ continue;
+ PH.Advance();
+
+ // next token: { error | warning | note }
+ DirectiveList* DL = NULL;
+ if (PH.Next("error"))
+ DL = &ED.Errors;
+ else if (PH.Next("warning"))
+ DL = &ED.Warnings;
+ else if (PH.Next("note"))
+ DL = &ED.Notes;
+ else
+ continue;
+ PH.Advance();
+
+ // default directive kind
+ bool RegexKind = false;
+ const char* KindStr = "string";
+
+ // next optional token: -
+ if (PH.Next("-re")) {
+ PH.Advance();
+ RegexKind = true;
+ KindStr = "regex";
+ }
+
+ // skip optional whitespace
+ PH.SkipWhitespace();
+
+ // next optional token: positive integer or a '+'.
+ unsigned Count = 1;
+ if (PH.Next(Count))
+ PH.Advance();
+ else if (PH.Next("+")) {
+ Count = Directive::OneOrMoreCount;
+ PH.Advance();
+ }
+
+ // skip optional whitespace
+ PH.SkipWhitespace();
+
+ // next token: {{
+ if (!PH.Next("{{")) {
+ PP.Diag(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_start) << KindStr;
+ continue;
+ }
+ PH.Advance();
+ const char* const ContentBegin = PH.C; // mark content begin
+
+ // search for token: }}
+ if (!PH.Search("}}")) {
+ PP.Diag(Pos.getLocWithOffset(PH.C-PH.Begin),
+ diag::err_verify_missing_end) << KindStr;
+ continue;
+ }
+ const char* const ContentEnd = PH.P; // mark content end
+ PH.Advance();
+
+ // build directive text; convert \n to newlines
+ std::string Text;
+ StringRef NewlineStr = "\\n";
+ StringRef Content(ContentBegin, ContentEnd-ContentBegin);
+ size_t CPos = 0;
+ size_t FPos;
+ while ((FPos = Content.find(NewlineStr, CPos)) != StringRef::npos) {
+ Text += Content.substr(CPos, FPos-CPos);
+ Text += '\n';
+ CPos = FPos + NewlineStr.size();
+ }
+ if (Text.empty())
+ Text.assign(ContentBegin, ContentEnd);
+
+ // construct new directive
+ Directive *D = Directive::Create(RegexKind, Pos, Text, Count);
+ std::string Error;
+ if (D->isValid(Error))
+ DL->push_back(D);
+ else {
+ PP.Diag(Pos.getLocWithOffset(ContentBegin-PH.Begin),
+ diag::err_verify_invalid_content)
+ << KindStr << Error;
+ }
+ }
+}
+
+/// FindExpectedDiags - Lex the main source file to find all of the
+// expected errors and warnings.
+static void FindExpectedDiags(Preprocessor &PP, ExpectedData &ED, FileID FID) {
+ // Create a raw lexer to pull all the comments out of FID.
+ if (FID.isInvalid())
+ return;
+
+ SourceManager& SM = PP.getSourceManager();
+ // Create a lexer to lex all the tokens of the main file in raw mode.
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer RawLex(FID, FromFile, SM, PP.getLangOpts());
+
+ // Return comments as tokens, this is how we find expected diagnostics.
+ RawLex.SetCommentRetentionState(true);
+
+ Token Tok;
+ Tok.setKind(tok::comment);
+ while (Tok.isNot(tok::eof)) {
+ RawLex.Lex(Tok);
+ if (!Tok.is(tok::comment)) continue;
+
+ std::string Comment = PP.getSpelling(Tok);
+ if (Comment.empty()) continue;
+
+ // Find all expected errors/warnings/notes.
+ ParseDirective(&Comment[0], Comment.size(), ED, PP, Tok.getLocation());
+ };
+}
+
+/// PrintProblem - This takes a diagnostic map of the delta between expected and
+/// seen diagnostics. If there's anything in it, then something unexpected
+/// happened. Print the map out in a nice format and return "true". If the map
+/// is empty and we're not going to print things, then return "false".
+///
+static unsigned PrintProblem(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
+ const_diag_iterator diag_begin,
+ const_diag_iterator diag_end,
+ const char *Kind, bool Expected) {
+ if (diag_begin == diag_end) return 0;
+
+ SmallString<256> Fmt;
+ llvm::raw_svector_ostream OS(Fmt);
+ for (const_diag_iterator I = diag_begin, E = diag_end; I != E; ++I) {
+ if (I->first.isInvalid() || !SourceMgr)
+ OS << "\n (frontend)";
+ else
+ OS << "\n Line " << SourceMgr->getPresumedLineNumber(I->first);
+ OS << ": " << I->second;
+ }
+
+ Diags.Report(diag::err_verify_inconsistent_diags)
+ << Kind << !Expected << OS.str();
+ return std::distance(diag_begin, diag_end);
+}
+
+static unsigned PrintProblem(DiagnosticsEngine &Diags, SourceManager *SourceMgr,
+ DirectiveList &DL, const char *Kind,
+ bool Expected) {
+ if (DL.empty())
+ return 0;
+
+ SmallString<256> Fmt;
+ llvm::raw_svector_ostream OS(Fmt);
+ for (DirectiveList::iterator I = DL.begin(), E = DL.end(); I != E; ++I) {
+ Directive& D = **I;
+ if (D.Location.isInvalid() || !SourceMgr)
+ OS << "\n (frontend)";
+ else
+ OS << "\n Line " << SourceMgr->getPresumedLineNumber(D.Location);
+ OS << ": " << D.Text;
+ }
+
+ Diags.Report(diag::err_verify_inconsistent_diags)
+ << Kind << !Expected << OS.str();
+ return DL.size();
+}
+
+/// CheckLists - Compare expected to seen diagnostic lists and return the
+/// the difference between them.
+///
+static unsigned CheckLists(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
+ const char *Label,
+ DirectiveList &Left,
+ const_diag_iterator d2_begin,
+ const_diag_iterator d2_end) {
+ DirectiveList LeftOnly;
+ DiagList Right(d2_begin, d2_end);
+
+ for (DirectiveList::iterator I = Left.begin(), E = Left.end(); I != E; ++I) {
+ Directive& D = **I;
+ unsigned LineNo1 = SourceMgr.getPresumedLineNumber(D.Location);
+ bool FoundOnce = false;
+
+ for (unsigned i = 0; i < D.Count; ++i) {
+ DiagList::iterator II, IE;
+ for (II = Right.begin(), IE = Right.end(); II != IE; ++II) {
+ unsigned LineNo2 = SourceMgr.getPresumedLineNumber(II->first);
+ if (LineNo1 != LineNo2)
+ continue;
+
+ const std::string &RightText = II->second;
+ if (D.Match(RightText))
+ break;
+ }
+ if (II == IE) {
+ if (D.Count == D.OneOrMoreCount) {
+ if (!FoundOnce)
+ LeftOnly.push_back(*I);
+ // We are only interested in at least one match, so exit the loop.
+ break;
+ }
+ // Not found.
+ LeftOnly.push_back(*I);
+ } else {
+ // Found. The same cannot be found twice.
+ Right.erase(II);
+ FoundOnce = true;
+ }
+ }
+ }
+ // Now all that's left in Right are those that were not matched.
+ unsigned num = PrintProblem(Diags, &SourceMgr, LeftOnly, Label, true);
+ num += PrintProblem(Diags, &SourceMgr, Right.begin(), Right.end(),
+ Label, false);
+ return num;
+}
+
+/// CheckResults - This compares the expected results to those that
+/// were actually reported. It emits any discrepencies. Return "true" if there
+/// were problems. Return "false" otherwise.
+///
+static unsigned CheckResults(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
+ const TextDiagnosticBuffer &Buffer,
+ ExpectedData &ED) {
+ // We want to capture the delta between what was expected and what was
+ // seen.
+ //
+ // Expected \ Seen - set expected but not seen
+ // Seen \ Expected - set seen but not expected
+ unsigned NumProblems = 0;
+
+ // See if there are error mismatches.
+ NumProblems += CheckLists(Diags, SourceMgr, "error", ED.Errors,
+ Buffer.err_begin(), Buffer.err_end());
+
+ // See if there are warning mismatches.
+ NumProblems += CheckLists(Diags, SourceMgr, "warning", ED.Warnings,
+ Buffer.warn_begin(), Buffer.warn_end());
+
+ // See if there are note mismatches.
+ NumProblems += CheckLists(Diags, SourceMgr, "note", ED.Notes,
+ Buffer.note_begin(), Buffer.note_end());
+
+ return NumProblems;
+}
+
+void VerifyDiagnosticConsumer::CheckDiagnostics() {
+ ExpectedData ED;
+
+ // Ensure any diagnostics go to the primary client.
+ bool OwnsCurClient = Diags.ownsClient();
+ DiagnosticConsumer *CurClient = Diags.takeClient();
+ Diags.setClient(PrimaryClient, false);
+
+ // If we have a preprocessor, scan the source for expected diagnostic
+ // markers. If not then any diagnostics are unexpected.
+ if (CurrentPreprocessor) {
+ SourceManager &SM = CurrentPreprocessor->getSourceManager();
+ // Extract expected-error strings from main file.
+ FindExpectedDiags(*CurrentPreprocessor, ED, SM.getMainFileID());
+ // Only check for expectations in other diagnostic locations
+ // if they are not the main file (via ID or FileEntry) - the main
+ // file has already been looked at, and its expectations must not
+ // be added twice.
+ if (!FirstErrorFID.isInvalid() && FirstErrorFID != SM.getMainFileID()
+ && (!SM.getFileEntryForID(FirstErrorFID)
+ || (SM.getFileEntryForID(FirstErrorFID) !=
+ SM.getFileEntryForID(SM.getMainFileID())))) {
+ FindExpectedDiags(*CurrentPreprocessor, ED, FirstErrorFID);
+ FirstErrorFID = FileID();
+ }
+
+ // Check that the expected diagnostics occurred.
+ NumErrors += CheckResults(Diags, SM, *Buffer, ED);
+ } else {
+ NumErrors += (PrintProblem(Diags, 0,
+ Buffer->err_begin(), Buffer->err_end(),
+ "error", false) +
+ PrintProblem(Diags, 0,
+ Buffer->warn_begin(), Buffer->warn_end(),
+ "warn", false) +
+ PrintProblem(Diags, 0,
+ Buffer->note_begin(), Buffer->note_end(),
+ "note", false));
+ }
+
+ Diags.takeClient();
+ Diags.setClient(CurClient, OwnsCurClient);
+
+ // Reset the buffer, we have processed all the diagnostics in it.
+ Buffer.reset(new TextDiagnosticBuffer());
+}
+
+DiagnosticConsumer *
+VerifyDiagnosticConsumer::clone(DiagnosticsEngine &Diags) const {
+ if (!Diags.getClient())
+ Diags.setClient(PrimaryClient->clone(Diags));
+
+ return new VerifyDiagnosticConsumer(Diags);
+}
+
+Directive* Directive::Create(bool RegexKind, const SourceLocation &Location,
+ const std::string &Text, unsigned Count) {
+ if (RegexKind)
+ return new RegexDirective(Location, Text, Count);
+ return new StandardDirective(Location, Text, Count);
+}
diff --git a/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
new file mode 100644
index 0000000..ec5fde0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Frontend/Warnings.cpp
@@ -0,0 +1,191 @@
+//===--- Warnings.cpp - C-Language Front-end ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Command line warning options handler.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is responsible for handling all warning options. This includes
+// a number of -Wfoo options and their variants, which are driven by TableGen-
+// generated data, and the special cases -pedantic, -pedantic-errors, -w,
+// -Werror and -Wfatal-errors.
+//
+// Each warning option controls any number of actual warnings.
+// Given a warning option 'foo', the following are valid:
+// -Wfoo, -Wno-foo, -Werror=foo, -Wfatal-errors=foo
+//
+#include "clang/Frontend/Utils.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include <cstring>
+#include <utility>
+#include <algorithm>
+using namespace clang;
+
+// EmitUnknownDiagWarning - Emit a warning and typo hint for unknown warning
+// opts
+static void EmitUnknownDiagWarning(DiagnosticsEngine &Diags,
+ StringRef Prefix, StringRef Opt,
+ bool isPositive) {
+ StringRef Suggestion = DiagnosticIDs::getNearestWarningOption(Opt);
+ if (!Suggestion.empty())
+ Diags.Report(isPositive? diag::warn_unknown_warning_option_suggest :
+ diag::warn_unknown_negative_warning_option_suggest)
+ << (Prefix.str() += Opt) << (Prefix.str() += Suggestion);
+ else
+ Diags.Report(isPositive? diag::warn_unknown_warning_option :
+ diag::warn_unknown_negative_warning_option)
+ << (Prefix.str() += Opt);
+}
+
+void clang::ProcessWarningOptions(DiagnosticsEngine &Diags,
+ const DiagnosticOptions &Opts) {
+ Diags.setSuppressSystemWarnings(true); // Default to -Wno-system-headers
+ Diags.setIgnoreAllWarnings(Opts.IgnoreWarnings);
+ Diags.setShowOverloads(
+ static_cast<DiagnosticsEngine::OverloadsShown>(Opts.ShowOverloads));
+
+ // Handle -ferror-limit
+ if (Opts.ErrorLimit)
+ Diags.setErrorLimit(Opts.ErrorLimit);
+ if (Opts.TemplateBacktraceLimit)
+ Diags.setTemplateBacktraceLimit(Opts.TemplateBacktraceLimit);
+ if (Opts.ConstexprBacktraceLimit)
+ Diags.setConstexprBacktraceLimit(Opts.ConstexprBacktraceLimit);
+
+ // If -pedantic or -pedantic-errors was specified, then we want to map all
+ // extension diagnostics onto WARNING or ERROR unless the user has futz'd
+ // around with them explicitly.
+ if (Opts.PedanticErrors)
+ Diags.setExtensionHandlingBehavior(DiagnosticsEngine::Ext_Error);
+ else if (Opts.Pedantic)
+ Diags.setExtensionHandlingBehavior(DiagnosticsEngine::Ext_Warn);
+ else
+ Diags.setExtensionHandlingBehavior(DiagnosticsEngine::Ext_Ignore);
+
+ llvm::SmallVector<diag::kind, 10> _Diags;
+ const IntrusiveRefCntPtr< DiagnosticIDs > DiagIDs =
+ Diags.getDiagnosticIDs();
+ // We parse the warning options twice. The first pass sets diagnostic state,
+ // while the second pass reports warnings/errors. This has the effect that
+ // we follow the more canonical "last option wins" paradigm when there are
+ // conflicting options.
+ for (unsigned Report = 0, ReportEnd = 2; Report != ReportEnd; ++Report) {
+ bool SetDiagnostic = (Report == 0);
+ for (unsigned i = 0, e = Opts.Warnings.size(); i != e; ++i) {
+ StringRef Opt = Opts.Warnings[i];
+
+ // Treat -Wformat=0 as an alias for -Wno-format.
+ if (Opt == "format=0")
+ Opt = "no-format";
+
+ // Check to see if this warning starts with "no-", if so, this is a
+ // negative form of the option.
+ bool isPositive = true;
+ if (Opt.startswith("no-")) {
+ isPositive = false;
+ Opt = Opt.substr(3);
+ }
+
+ // Figure out how this option affects the warning. If -Wfoo, map the
+ // diagnostic to a warning, if -Wno-foo, map it to ignore.
+ diag::Mapping Mapping = isPositive ? diag::MAP_WARNING : diag::MAP_IGNORE;
+
+ // -Wsystem-headers is a special case, not driven by the option table. It
+ // cannot be controlled with -Werror.
+ if (Opt == "system-headers") {
+ if (SetDiagnostic)
+ Diags.setSuppressSystemWarnings(!isPositive);
+ continue;
+ }
+
+ // -Weverything is a special case as well. It implicitly enables all
+ // warnings, including ones not explicitly in a warning group.
+ if (Opt == "everything") {
+ if (SetDiagnostic) {
+ if (isPositive) {
+ Diags.setEnableAllWarnings(true);
+ } else {
+ Diags.setEnableAllWarnings(false);
+ Diags.setMappingToAllDiagnostics(diag::MAP_IGNORE);
+ }
+ }
+ continue;
+ }
+
+ // -Werror/-Wno-error is a special case, not controlled by the option
+ // table. It also has the "specifier" form of -Werror=foo and -Werror-foo.
+ if (Opt.startswith("error")) {
+ StringRef Specifier;
+ if (Opt.size() > 5) { // Specifier must be present.
+ if ((Opt[5] != '=' && Opt[5] != '-') || Opt.size() == 6) {
+ if (Report)
+ Diags.Report(diag::warn_unknown_warning_specifier)
+ << "-Werror" << ("-W" + Opt.str());
+ continue;
+ }
+ Specifier = Opt.substr(6);
+ }
+
+ if (Specifier.empty()) {
+ if (SetDiagnostic)
+ Diags.setWarningsAsErrors(isPositive);
+ continue;
+ }
+
+ if (SetDiagnostic) {
+ // Set the warning as error flag for this specifier.
+ Diags.setDiagnosticGroupWarningAsError(Specifier, isPositive);
+ } else if (DiagIDs->getDiagnosticsInGroup(Specifier, _Diags)) {
+ EmitUnknownDiagWarning(Diags, "-Werror=", Specifier, isPositive);
+ }
+ continue;
+ }
+
+ // -Wfatal-errors is yet another special case.
+ if (Opt.startswith("fatal-errors")) {
+ StringRef Specifier;
+ if (Opt.size() != 12) {
+ if ((Opt[12] != '=' && Opt[12] != '-') || Opt.size() == 13) {
+ if (Report)
+ Diags.Report(diag::warn_unknown_warning_specifier)
+ << "-Wfatal-errors" << ("-W" + Opt.str());
+ continue;
+ }
+ Specifier = Opt.substr(13);
+ }
+
+ if (Specifier.empty()) {
+ if (SetDiagnostic)
+ Diags.setErrorsAsFatal(isPositive);
+ continue;
+ }
+
+ if (SetDiagnostic) {
+ // Set the error as fatal flag for this specifier.
+ Diags.setDiagnosticGroupErrorAsFatal(Specifier, isPositive);
+ } else if (DiagIDs->getDiagnosticsInGroup(Specifier, _Diags)) {
+ EmitUnknownDiagWarning(Diags, "-Wfatal-errors=", Specifier,
+ isPositive);
+ }
+ continue;
+ }
+
+ if (Report) {
+ if (DiagIDs->getDiagnosticsInGroup(Opt, _Diags))
+ EmitUnknownDiagWarning(Diags, "-W", Opt, isPositive);
+ } else {
+ Diags.setDiagnosticGroupMapping(Opt, Mapping);
+ }
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
new file mode 100644
index 0000000..2066505
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -0,0 +1,191 @@
+//===--- ExecuteCompilerInvocation.cpp ------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file holds ExecuteCompilerInvocation(). It is split into its own file to
+// minimize the impact of pulling in essentially everything else in Clang.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/FrontendTool/Utils.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "clang/ARCMigrate/ARCMTActions.h"
+#include "clang/CodeGen/CodeGenAction.h"
+#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/FrontendPluginRegistry.h"
+#include "clang/Rewrite/FrontendActions.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/DynamicLibrary.h"
+using namespace clang;
+
+static FrontendAction *CreateFrontendBaseAction(CompilerInstance &CI) {
+ using namespace clang::frontend;
+
+ switch (CI.getFrontendOpts().ProgramAction) {
+ case ASTDump: return new ASTDumpAction();
+ case ASTDumpXML: return new ASTDumpXMLAction();
+ case ASTPrint: return new ASTPrintAction();
+ case ASTView: return new ASTViewAction();
+ case DumpRawTokens: return new DumpRawTokensAction();
+ case DumpTokens: return new DumpTokensAction();
+ case EmitAssembly: return new EmitAssemblyAction();
+ case EmitBC: return new EmitBCAction();
+ case EmitHTML: return new HTMLPrintAction();
+ case EmitLLVM: return new EmitLLVMAction();
+ case EmitLLVMOnly: return new EmitLLVMOnlyAction();
+ case EmitCodeGenOnly: return new EmitCodeGenOnlyAction();
+ case EmitObj: return new EmitObjAction();
+ case FixIt: return new FixItAction();
+ case GenerateModule: return new GenerateModuleAction;
+ case GeneratePCH: return new GeneratePCHAction;
+ case GeneratePTH: return new GeneratePTHAction();
+ case InitOnly: return new InitOnlyAction();
+ case ParseSyntaxOnly: return new SyntaxOnlyAction();
+
+ case PluginAction: {
+ for (FrontendPluginRegistry::iterator it =
+ FrontendPluginRegistry::begin(), ie = FrontendPluginRegistry::end();
+ it != ie; ++it) {
+ if (it->getName() == CI.getFrontendOpts().ActionName) {
+ OwningPtr<PluginASTAction> P(it->instantiate());
+ if (!P->ParseArgs(CI, CI.getFrontendOpts().PluginArgs))
+ return 0;
+ return P.take();
+ }
+ }
+
+ CI.getDiagnostics().Report(diag::err_fe_invalid_plugin_name)
+ << CI.getFrontendOpts().ActionName;
+ return 0;
+ }
+
+ case PrintDeclContext: return new DeclContextPrintAction();
+ case PrintPreamble: return new PrintPreambleAction();
+ case PrintPreprocessedInput: return new PrintPreprocessedAction();
+ case PubnamesDump: return new PubnamesDumpAction();
+ case RewriteMacros: return new RewriteMacrosAction();
+ case RewriteObjC: return new RewriteObjCAction();
+ case RewriteTest: return new RewriteTestAction();
+ case RunAnalysis: return new ento::AnalysisAction();
+ case MigrateSource: return new arcmt::MigrateSourceAction();
+ case RunPreprocessorOnly: return new PreprocessOnlyAction();
+ }
+ llvm_unreachable("Invalid program action!");
+}
+
+static FrontendAction *CreateFrontendAction(CompilerInstance &CI) {
+ // Create the underlying action.
+ FrontendAction *Act = CreateFrontendBaseAction(CI);
+ if (!Act)
+ return 0;
+
+ const FrontendOptions &FEOpts = CI.getFrontendOpts();
+
+ if (FEOpts.FixAndRecompile) {
+ Act = new FixItRecompile(Act);
+ }
+
+ // Potentially wrap the base FE action in an ARC Migrate Tool action.
+ switch (FEOpts.ARCMTAction) {
+ case FrontendOptions::ARCMT_None:
+ break;
+ case FrontendOptions::ARCMT_Check:
+ Act = new arcmt::CheckAction(Act);
+ break;
+ case FrontendOptions::ARCMT_Modify:
+ Act = new arcmt::ModifyAction(Act);
+ break;
+ case FrontendOptions::ARCMT_Migrate:
+ Act = new arcmt::MigrateAction(Act,
+ FEOpts.MTMigrateDir,
+ FEOpts.ARCMTMigrateReportOut,
+ FEOpts.ARCMTMigrateEmitARCErrors);
+ break;
+ }
+
+ if (FEOpts.ObjCMTAction != FrontendOptions::ObjCMT_None) {
+ Act = new arcmt::ObjCMigrateAction(Act, FEOpts.MTMigrateDir,
+ FEOpts.ObjCMTAction & ~FrontendOptions::ObjCMT_Literals,
+ FEOpts.ObjCMTAction & ~FrontendOptions::ObjCMT_Subscripting);
+ }
+
+ // If there are any AST files to merge, create a frontend action
+ // adaptor to perform the merge.
+ if (!FEOpts.ASTMergeFiles.empty())
+ Act = new ASTMergeAction(Act, FEOpts.ASTMergeFiles);
+
+ return Act;
+}
+
+bool clang::ExecuteCompilerInvocation(CompilerInstance *Clang) {
+ // Honor -help.
+ if (Clang->getFrontendOpts().ShowHelp) {
+ OwningPtr<driver::OptTable> Opts(driver::createCC1OptTable());
+ Opts->PrintHelp(llvm::outs(), "clang -cc1",
+ "LLVM 'Clang' Compiler: http://clang.llvm.org");
+ return 0;
+ }
+
+ // Honor -version.
+ //
+ // FIXME: Use a better -version message?
+ if (Clang->getFrontendOpts().ShowVersion) {
+ llvm::cl::PrintVersionMessage();
+ return 0;
+ }
+
+ // Load any requested plugins.
+ for (unsigned i = 0,
+ e = Clang->getFrontendOpts().Plugins.size(); i != e; ++i) {
+ const std::string &Path = Clang->getFrontendOpts().Plugins[i];
+ std::string Error;
+ if (llvm::sys::DynamicLibrary::LoadLibraryPermanently(Path.c_str(), &Error))
+ Clang->getDiagnostics().Report(diag::err_fe_unable_to_load_plugin)
+ << Path << Error;
+ }
+
+ // Honor -mllvm.
+ //
+ // FIXME: Remove this, one day.
+ // This should happen AFTER plugins have been loaded!
+ if (!Clang->getFrontendOpts().LLVMArgs.empty()) {
+ unsigned NumArgs = Clang->getFrontendOpts().LLVMArgs.size();
+ const char **Args = new const char*[NumArgs + 2];
+ Args[0] = "clang (LLVM option parsing)";
+ for (unsigned i = 0; i != NumArgs; ++i)
+ Args[i + 1] = Clang->getFrontendOpts().LLVMArgs[i].c_str();
+ Args[NumArgs + 1] = 0;
+ llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args);
+ }
+
+ // Honor -analyzer-checker-help.
+ // This should happen AFTER plugins have been loaded!
+ if (Clang->getAnalyzerOpts().ShowCheckerHelp) {
+ ento::printCheckerHelp(llvm::outs(), Clang->getFrontendOpts().Plugins);
+ return 0;
+ }
+
+ // If there were errors in processing arguments, don't do anything else.
+ bool Success = false;
+ if (!Clang->getDiagnostics().hasErrorOccurred()) {
+ // Create and execute the frontend action.
+ OwningPtr<FrontendAction> Act(CreateFrontendAction(*Clang));
+ if (Act) {
+ Success = Clang->ExecuteAction(*Act);
+ if (Clang->getFrontendOpts().DisableFree)
+ Act.take();
+ }
+ }
+
+ return Success;
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/altivec.h b/contrib/llvm/tools/clang/lib/Headers/altivec.h
new file mode 100644
index 0000000..a225378
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/altivec.h
@@ -0,0 +1,11856 @@
+/*===---- altivec.h - Standard header for type generic math ---------------===*\
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __ALTIVEC_H
+#define __ALTIVEC_H
+
+#ifndef __ALTIVEC__
+#error "AltiVec support not enabled"
+#endif
+
+/* constants for mapping CR6 bits to predicate result. */
+
+#define __CR6_EQ 0
+#define __CR6_EQ_REV 1
+#define __CR6_LT 2
+#define __CR6_LT_REV 3
+
+#define __ATTRS_o_ai __attribute__((__overloadable__, __always_inline__))
+
+static vector signed char __ATTRS_o_ai
+vec_perm(vector signed char a, vector signed char b, vector unsigned char c);
+
+static vector unsigned char __ATTRS_o_ai
+vec_perm(vector unsigned char a,
+ vector unsigned char b,
+ vector unsigned char c);
+
+static vector bool char __ATTRS_o_ai
+vec_perm(vector bool char a, vector bool char b, vector unsigned char c);
+
+static vector short __ATTRS_o_ai
+vec_perm(vector short a, vector short b, vector unsigned char c);
+
+static vector unsigned short __ATTRS_o_ai
+vec_perm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned char c);
+
+static vector bool short __ATTRS_o_ai
+vec_perm(vector bool short a, vector bool short b, vector unsigned char c);
+
+static vector pixel __ATTRS_o_ai
+vec_perm(vector pixel a, vector pixel b, vector unsigned char c);
+
+static vector int __ATTRS_o_ai
+vec_perm(vector int a, vector int b, vector unsigned char c);
+
+static vector unsigned int __ATTRS_o_ai
+vec_perm(vector unsigned int a, vector unsigned int b, vector unsigned char c);
+
+static vector bool int __ATTRS_o_ai
+vec_perm(vector bool int a, vector bool int b, vector unsigned char c);
+
+static vector float __ATTRS_o_ai
+vec_perm(vector float a, vector float b, vector unsigned char c);
+
+/* vec_abs */
+
+#define __builtin_altivec_abs_v16qi vec_abs
+#define __builtin_altivec_abs_v8hi vec_abs
+#define __builtin_altivec_abs_v4si vec_abs
+
+static vector signed char __ATTRS_o_ai
+vec_abs(vector signed char a)
+{
+ return __builtin_altivec_vmaxsb(a, -a);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_abs(vector signed short a)
+{
+ return __builtin_altivec_vmaxsh(a, -a);
+}
+
+static vector signed int __ATTRS_o_ai
+vec_abs(vector signed int a)
+{
+ return __builtin_altivec_vmaxsw(a, -a);
+}
+
+static vector float __ATTRS_o_ai
+vec_abs(vector float a)
+{
+ vector unsigned int res = (vector unsigned int)a
+ & (vector unsigned int)(0x7FFFFFFF);
+ return (vector float)res;
+}
+
+/* vec_abss */
+
+#define __builtin_altivec_abss_v16qi vec_abss
+#define __builtin_altivec_abss_v8hi vec_abss
+#define __builtin_altivec_abss_v4si vec_abss
+
+static vector signed char __ATTRS_o_ai
+vec_abss(vector signed char a)
+{
+ return __builtin_altivec_vmaxsb
+ (a, __builtin_altivec_vsubsbs((vector signed char)(0), a));
+}
+
+static vector signed short __ATTRS_o_ai
+vec_abss(vector signed short a)
+{
+ return __builtin_altivec_vmaxsh
+ (a, __builtin_altivec_vsubshs((vector signed short)(0), a));
+}
+
+static vector signed int __ATTRS_o_ai
+vec_abss(vector signed int a)
+{
+ return __builtin_altivec_vmaxsw
+ (a, __builtin_altivec_vsubsws((vector signed int)(0), a));
+}
+
+/* vec_add */
+
+static vector signed char __ATTRS_o_ai
+vec_add(vector signed char a, vector signed char b)
+{
+ return a + b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_add(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a + b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_add(vector signed char a, vector bool char b)
+{
+ return a + (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_add(vector unsigned char a, vector unsigned char b)
+{
+ return a + b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_add(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a + b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_add(vector unsigned char a, vector bool char b)
+{
+ return a + (vector unsigned char)b;
+}
+
+static vector short __ATTRS_o_ai
+vec_add(vector short a, vector short b)
+{
+ return a + b;
+}
+
+static vector short __ATTRS_o_ai
+vec_add(vector bool short a, vector short b)
+{
+ return (vector short)a + b;
+}
+
+static vector short __ATTRS_o_ai
+vec_add(vector short a, vector bool short b)
+{
+ return a + (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_add(vector unsigned short a, vector unsigned short b)
+{
+ return a + b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_add(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a + b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_add(vector unsigned short a, vector bool short b)
+{
+ return a + (vector unsigned short)b;
+}
+
+static vector int __ATTRS_o_ai
+vec_add(vector int a, vector int b)
+{
+ return a + b;
+}
+
+static vector int __ATTRS_o_ai
+vec_add(vector bool int a, vector int b)
+{
+ return (vector int)a + b;
+}
+
+static vector int __ATTRS_o_ai
+vec_add(vector int a, vector bool int b)
+{
+ return a + (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_add(vector unsigned int a, vector unsigned int b)
+{
+ return a + b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_add(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a + b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_add(vector unsigned int a, vector bool int b)
+{
+ return a + (vector unsigned int)b;
+}
+
+static vector float __ATTRS_o_ai
+vec_add(vector float a, vector float b)
+{
+ return a + b;
+}
+
+/* vec_vaddubm */
+
+#define __builtin_altivec_vaddubm vec_vaddubm
+
+static vector signed char __ATTRS_o_ai
+vec_vaddubm(vector signed char a, vector signed char b)
+{
+ return a + b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vaddubm(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a + b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vaddubm(vector signed char a, vector bool char b)
+{
+ return a + (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector unsigned char a, vector unsigned char b)
+{
+ return a + b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a + b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vaddubm(vector unsigned char a, vector bool char b)
+{
+ return a + (vector unsigned char)b;
+}
+
+/* vec_vadduhm */
+
+#define __builtin_altivec_vadduhm vec_vadduhm
+
+static vector short __ATTRS_o_ai
+vec_vadduhm(vector short a, vector short b)
+{
+ return a + b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vadduhm(vector bool short a, vector short b)
+{
+ return (vector short)a + b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vadduhm(vector short a, vector bool short b)
+{
+ return a + (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector unsigned short a, vector unsigned short b)
+{
+ return a + b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a + b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhm(vector unsigned short a, vector bool short b)
+{
+ return a + (vector unsigned short)b;
+}
+
+/* vec_vadduwm */
+
+#define __builtin_altivec_vadduwm vec_vadduwm
+
+static vector int __ATTRS_o_ai
+vec_vadduwm(vector int a, vector int b)
+{
+ return a + b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vadduwm(vector bool int a, vector int b)
+{
+ return (vector int)a + b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vadduwm(vector int a, vector bool int b)
+{
+ return a + (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector unsigned int a, vector unsigned int b)
+{
+ return a + b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a + b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vadduwm(vector unsigned int a, vector bool int b)
+{
+ return a + (vector unsigned int)b;
+}
+
+/* vec_vaddfp */
+
+#define __builtin_altivec_vaddfp vec_vaddfp
+
+static vector float __attribute__((__always_inline__))
+vec_vaddfp(vector float a, vector float b)
+{
+ return a + b;
+}
+
+/* vec_addc */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_addc(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vaddcuw(a, b);
+}
+
+/* vec_vaddcuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vaddcuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vaddcuw(a, b);
+}
+
+/* vec_adds */
+
+static vector signed char __ATTRS_o_ai
+vec_adds(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vaddsbs(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_adds(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vaddsbs((vector signed char)a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_adds(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vaddsbs(a, (vector signed char)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_adds(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vaddubs(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_adds(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vaddubs((vector unsigned char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_adds(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vaddubs(a, (vector unsigned char)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_adds(vector short a, vector short b)
+{
+ return __builtin_altivec_vaddshs(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_adds(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vaddshs((vector short)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_adds(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vaddshs(a, (vector short)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_adds(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vadduhs(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_adds(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vadduhs((vector unsigned short)a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_adds(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vadduhs(a, (vector unsigned short)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_adds(vector int a, vector int b)
+{
+ return __builtin_altivec_vaddsws(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_adds(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vaddsws((vector int)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_adds(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vaddsws(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_adds(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vadduws(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_adds(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vadduws((vector unsigned int)a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_adds(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vadduws(a, (vector unsigned int)b);
+}
+
+/* vec_vaddsbs */
+
+static vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vaddsbs(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vaddsbs((vector signed char)a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vaddsbs(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vaddsbs(a, (vector signed char)b);
+}
+
+/* vec_vaddubs */
+
+static vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vaddubs(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vaddubs((vector unsigned char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vaddubs(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vaddubs(a, (vector unsigned char)b);
+}
+
+/* vec_vaddshs */
+
+static vector short __ATTRS_o_ai
+vec_vaddshs(vector short a, vector short b)
+{
+ return __builtin_altivec_vaddshs(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vaddshs(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vaddshs((vector short)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vaddshs(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vaddshs(a, (vector short)b);
+}
+
+/* vec_vadduhs */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vadduhs(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vadduhs((vector unsigned short)a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vadduhs(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vadduhs(a, (vector unsigned short)b);
+}
+
+/* vec_vaddsws */
+
+static vector int __ATTRS_o_ai
+vec_vaddsws(vector int a, vector int b)
+{
+ return __builtin_altivec_vaddsws(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vaddsws(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vaddsws((vector int)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vaddsws(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vaddsws(a, (vector int)b);
+}
+
+/* vec_vadduws */
+
+static vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vadduws(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vadduws((vector unsigned int)a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vadduws(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vadduws(a, (vector unsigned int)b);
+}
+
+/* vec_and */
+
+#define __builtin_altivec_vand vec_and
+
+static vector signed char __ATTRS_o_ai
+vec_and(vector signed char a, vector signed char b)
+{
+ return a & b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_and(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a & b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_and(vector signed char a, vector bool char b)
+{
+ return a & (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_and(vector unsigned char a, vector unsigned char b)
+{
+ return a & b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_and(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a & b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_and(vector unsigned char a, vector bool char b)
+{
+ return a & (vector unsigned char)b;
+}
+
+static vector bool char __ATTRS_o_ai
+vec_and(vector bool char a, vector bool char b)
+{
+ return a & b;
+}
+
+static vector short __ATTRS_o_ai
+vec_and(vector short a, vector short b)
+{
+ return a & b;
+}
+
+static vector short __ATTRS_o_ai
+vec_and(vector bool short a, vector short b)
+{
+ return (vector short)a & b;
+}
+
+static vector short __ATTRS_o_ai
+vec_and(vector short a, vector bool short b)
+{
+ return a & (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_and(vector unsigned short a, vector unsigned short b)
+{
+ return a & b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_and(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a & b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_and(vector unsigned short a, vector bool short b)
+{
+ return a & (vector unsigned short)b;
+}
+
+static vector bool short __ATTRS_o_ai
+vec_and(vector bool short a, vector bool short b)
+{
+ return a & b;
+}
+
+static vector int __ATTRS_o_ai
+vec_and(vector int a, vector int b)
+{
+ return a & b;
+}
+
+static vector int __ATTRS_o_ai
+vec_and(vector bool int a, vector int b)
+{
+ return (vector int)a & b;
+}
+
+static vector int __ATTRS_o_ai
+vec_and(vector int a, vector bool int b)
+{
+ return a & (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_and(vector unsigned int a, vector unsigned int b)
+{
+ return a & b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_and(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a & b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_and(vector unsigned int a, vector bool int b)
+{
+ return a & (vector unsigned int)b;
+}
+
+static vector bool int __ATTRS_o_ai
+vec_and(vector bool int a, vector bool int b)
+{
+ return a & b;
+}
+
+static vector float __ATTRS_o_ai
+vec_and(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_and(vector bool int a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_and(vector float a, vector bool int b)
+{
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_vand */
+
+static vector signed char __ATTRS_o_ai
+vec_vand(vector signed char a, vector signed char b)
+{
+ return a & b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vand(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a & b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vand(vector signed char a, vector bool char b)
+{
+ return a & (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vand(vector unsigned char a, vector unsigned char b)
+{
+ return a & b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vand(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a & b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vand(vector unsigned char a, vector bool char b)
+{
+ return a & (vector unsigned char)b;
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vand(vector bool char a, vector bool char b)
+{
+ return a & b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vand(vector short a, vector short b)
+{
+ return a & b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vand(vector bool short a, vector short b)
+{
+ return (vector short)a & b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vand(vector short a, vector bool short b)
+{
+ return a & (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vand(vector unsigned short a, vector unsigned short b)
+{
+ return a & b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vand(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a & b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vand(vector unsigned short a, vector bool short b)
+{
+ return a & (vector unsigned short)b;
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vand(vector bool short a, vector bool short b)
+{
+ return a & b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vand(vector int a, vector int b)
+{
+ return a & b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vand(vector bool int a, vector int b)
+{
+ return (vector int)a & b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vand(vector int a, vector bool int b)
+{
+ return a & (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vand(vector unsigned int a, vector unsigned int b)
+{
+ return a & b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vand(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a & b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vand(vector unsigned int a, vector bool int b)
+{
+ return a & (vector unsigned int)b;
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vand(vector bool int a, vector bool int b)
+{
+ return a & b;
+}
+
+static vector float __ATTRS_o_ai
+vec_vand(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vand(vector bool int a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vand(vector float a, vector bool int b)
+{
+ vector unsigned int res = (vector unsigned int)a & (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_andc */
+
+#define __builtin_altivec_vandc vec_andc
+
+static vector signed char __ATTRS_o_ai
+vec_andc(vector signed char a, vector signed char b)
+{
+ return a & ~b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_andc(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a & ~b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_andc(vector signed char a, vector bool char b)
+{
+ return a & ~(vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_andc(vector unsigned char a, vector unsigned char b)
+{
+ return a & ~b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_andc(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a & ~b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_andc(vector unsigned char a, vector bool char b)
+{
+ return a & ~(vector unsigned char)b;
+}
+
+static vector bool char __ATTRS_o_ai
+vec_andc(vector bool char a, vector bool char b)
+{
+ return a & ~b;
+}
+
+static vector short __ATTRS_o_ai
+vec_andc(vector short a, vector short b)
+{
+ return a & ~b;
+}
+
+static vector short __ATTRS_o_ai
+vec_andc(vector bool short a, vector short b)
+{
+ return (vector short)a & ~b;
+}
+
+static vector short __ATTRS_o_ai
+vec_andc(vector short a, vector bool short b)
+{
+ return a & ~(vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_andc(vector unsigned short a, vector unsigned short b)
+{
+ return a & ~b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_andc(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a & ~b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_andc(vector unsigned short a, vector bool short b)
+{
+ return a & ~(vector unsigned short)b;
+}
+
+static vector bool short __ATTRS_o_ai
+vec_andc(vector bool short a, vector bool short b)
+{
+ return a & ~b;
+}
+
+static vector int __ATTRS_o_ai
+vec_andc(vector int a, vector int b)
+{
+ return a & ~b;
+}
+
+static vector int __ATTRS_o_ai
+vec_andc(vector bool int a, vector int b)
+{
+ return (vector int)a & ~b;
+}
+
+static vector int __ATTRS_o_ai
+vec_andc(vector int a, vector bool int b)
+{
+ return a & ~(vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_andc(vector unsigned int a, vector unsigned int b)
+{
+ return a & ~b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_andc(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a & ~b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_andc(vector unsigned int a, vector bool int b)
+{
+ return a & ~(vector unsigned int)b;
+}
+
+static vector bool int __ATTRS_o_ai
+vec_andc(vector bool int a, vector bool int b)
+{
+ return a & ~b;
+}
+
+static vector float __ATTRS_o_ai
+vec_andc(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_andc(vector bool int a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_andc(vector float a, vector bool int b)
+{
+ vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_vandc */
+
+static vector signed char __ATTRS_o_ai
+vec_vandc(vector signed char a, vector signed char b)
+{
+ return a & ~b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vandc(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a & ~b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vandc(vector signed char a, vector bool char b)
+{
+ return a & ~(vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vandc(vector unsigned char a, vector unsigned char b)
+{
+ return a & ~b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vandc(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a & ~b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vandc(vector unsigned char a, vector bool char b)
+{
+ return a & ~(vector unsigned char)b;
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vandc(vector bool char a, vector bool char b)
+{
+ return a & ~b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vandc(vector short a, vector short b)
+{
+ return a & ~b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vandc(vector bool short a, vector short b)
+{
+ return (vector short)a & ~b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vandc(vector short a, vector bool short b)
+{
+ return a & ~(vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vandc(vector unsigned short a, vector unsigned short b)
+{
+ return a & ~b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vandc(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a & ~b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vandc(vector unsigned short a, vector bool short b)
+{
+ return a & ~(vector unsigned short)b;
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vandc(vector bool short a, vector bool short b)
+{
+ return a & ~b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vandc(vector int a, vector int b)
+{
+ return a & ~b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vandc(vector bool int a, vector int b)
+{
+ return (vector int)a & ~b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vandc(vector int a, vector bool int b)
+{
+ return a & ~(vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vandc(vector unsigned int a, vector unsigned int b)
+{
+ return a & ~b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vandc(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a & ~b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vandc(vector unsigned int a, vector bool int b)
+{
+ return a & ~(vector unsigned int)b;
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vandc(vector bool int a, vector bool int b)
+{
+ return a & ~b;
+}
+
+static vector float __ATTRS_o_ai
+vec_vandc(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vandc(vector bool int a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vandc(vector float a, vector bool int b)
+{
+ vector unsigned int res = (vector unsigned int)a & ~(vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_avg */
+
+static vector signed char __ATTRS_o_ai
+vec_avg(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vavgsb(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_avg(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vavgub(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_avg(vector short a, vector short b)
+{
+ return __builtin_altivec_vavgsh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_avg(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vavguh(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_avg(vector int a, vector int b)
+{
+ return __builtin_altivec_vavgsw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_avg(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vavguw(a, b);
+}
+
+/* vec_vavgsb */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vavgsb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vavgsb(a, b);
+}
+
+/* vec_vavgub */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vavgub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vavgub(a, b);
+}
+
+/* vec_vavgsh */
+
+static vector short __attribute__((__always_inline__))
+vec_vavgsh(vector short a, vector short b)
+{
+ return __builtin_altivec_vavgsh(a, b);
+}
+
+/* vec_vavguh */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vavguh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vavguh(a, b);
+}
+
+/* vec_vavgsw */
+
+static vector int __attribute__((__always_inline__))
+vec_vavgsw(vector int a, vector int b)
+{
+ return __builtin_altivec_vavgsw(a, b);
+}
+
+/* vec_vavguw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vavguw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vavguw(a, b);
+}
+
+/* vec_ceil */
+
+static vector float __attribute__((__always_inline__))
+vec_ceil(vector float a)
+{
+ return __builtin_altivec_vrfip(a);
+}
+
+/* vec_vrfip */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfip(vector float a)
+{
+ return __builtin_altivec_vrfip(a);
+}
+
+/* vec_cmpb */
+
+static vector int __attribute__((__always_inline__))
+vec_cmpb(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpbfp(a, b);
+}
+
+/* vec_vcmpbfp */
+
+static vector int __attribute__((__always_inline__))
+vec_vcmpbfp(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpbfp(a, b);
+}
+
+/* vec_cmpeq */
+
+static vector bool char __ATTRS_o_ai
+vec_cmpeq(vector signed char a, vector signed char b)
+{
+ return (vector bool char)
+ __builtin_altivec_vcmpequb((vector char)a, (vector char)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmpeq(vector unsigned char a, vector unsigned char b)
+{
+ return (vector bool char)
+ __builtin_altivec_vcmpequb((vector char)a, (vector char)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpeq(vector short a, vector short b)
+{
+ return (vector bool short)__builtin_altivec_vcmpequh(a, b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpeq(vector unsigned short a, vector unsigned short b)
+{
+ return (vector bool short)
+ __builtin_altivec_vcmpequh((vector short)a, (vector short)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpeq(vector int a, vector int b)
+{
+ return (vector bool int)__builtin_altivec_vcmpequw(a, b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpeq(vector unsigned int a, vector unsigned int b)
+{
+ return (vector bool int)
+ __builtin_altivec_vcmpequw((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpeq(vector float a, vector float b)
+{
+ return (vector bool int)__builtin_altivec_vcmpeqfp(a, b);
+}
+
+/* vec_cmpge */
+
+static vector bool int __attribute__((__always_inline__))
+vec_cmpge(vector float a, vector float b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgefp(a, b);
+}
+
+/* vec_vcmpgefp */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgefp(vector float a, vector float b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgefp(a, b);
+}
+
+/* vec_cmpgt */
+
+static vector bool char __ATTRS_o_ai
+vec_cmpgt(vector signed char a, vector signed char b)
+{
+ return (vector bool char)__builtin_altivec_vcmpgtsb(a, b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmpgt(vector unsigned char a, vector unsigned char b)
+{
+ return (vector bool char)__builtin_altivec_vcmpgtub(a, b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpgt(vector short a, vector short b)
+{
+ return (vector bool short)__builtin_altivec_vcmpgtsh(a, b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmpgt(vector unsigned short a, vector unsigned short b)
+{
+ return (vector bool short)__builtin_altivec_vcmpgtuh(a, b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpgt(vector int a, vector int b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtsw(a, b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpgt(vector unsigned int a, vector unsigned int b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtuw(a, b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmpgt(vector float a, vector float b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtfp(a, b);
+}
+
+/* vec_vcmpgtsb */
+
+static vector bool char __attribute__((__always_inline__))
+vec_vcmpgtsb(vector signed char a, vector signed char b)
+{
+ return (vector bool char)__builtin_altivec_vcmpgtsb(a, b);
+}
+
+/* vec_vcmpgtub */
+
+static vector bool char __attribute__((__always_inline__))
+vec_vcmpgtub(vector unsigned char a, vector unsigned char b)
+{
+ return (vector bool char)__builtin_altivec_vcmpgtub(a, b);
+}
+
+/* vec_vcmpgtsh */
+
+static vector bool short __attribute__((__always_inline__))
+vec_vcmpgtsh(vector short a, vector short b)
+{
+ return (vector bool short)__builtin_altivec_vcmpgtsh(a, b);
+}
+
+/* vec_vcmpgtuh */
+
+static vector bool short __attribute__((__always_inline__))
+vec_vcmpgtuh(vector unsigned short a, vector unsigned short b)
+{
+ return (vector bool short)__builtin_altivec_vcmpgtuh(a, b);
+}
+
+/* vec_vcmpgtsw */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtsw(vector int a, vector int b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtsw(a, b);
+}
+
+/* vec_vcmpgtuw */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtuw(vector unsigned int a, vector unsigned int b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtuw(a, b);
+}
+
+/* vec_vcmpgtfp */
+
+static vector bool int __attribute__((__always_inline__))
+vec_vcmpgtfp(vector float a, vector float b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtfp(a, b);
+}
+
+/* vec_cmple */
+
+static vector bool int __attribute__((__always_inline__))
+vec_cmple(vector float a, vector float b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgefp(b, a);
+}
+
+/* vec_cmplt */
+
+static vector bool char __ATTRS_o_ai
+vec_cmplt(vector signed char a, vector signed char b)
+{
+ return (vector bool char)__builtin_altivec_vcmpgtsb(b, a);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_cmplt(vector unsigned char a, vector unsigned char b)
+{
+ return (vector bool char)__builtin_altivec_vcmpgtub(b, a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmplt(vector short a, vector short b)
+{
+ return (vector bool short)__builtin_altivec_vcmpgtsh(b, a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_cmplt(vector unsigned short a, vector unsigned short b)
+{
+ return (vector bool short)__builtin_altivec_vcmpgtuh(b, a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmplt(vector int a, vector int b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtsw(b, a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmplt(vector unsigned int a, vector unsigned int b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtuw(b, a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_cmplt(vector float a, vector float b)
+{
+ return (vector bool int)__builtin_altivec_vcmpgtfp(b, a);
+}
+
+/* vec_ctf */
+
+static vector float __ATTRS_o_ai
+vec_ctf(vector int a, int b)
+{
+ return __builtin_altivec_vcfsx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ctf(vector unsigned int a, int b)
+{
+ return __builtin_altivec_vcfux((vector int)a, b);
+}
+
+/* vec_vcfsx */
+
+static vector float __attribute__((__always_inline__))
+vec_vcfsx(vector int a, int b)
+{
+ return __builtin_altivec_vcfsx(a, b);
+}
+
+/* vec_vcfux */
+
+static vector float __attribute__((__always_inline__))
+vec_vcfux(vector unsigned int a, int b)
+{
+ return __builtin_altivec_vcfux((vector int)a, b);
+}
+
+/* vec_cts */
+
+static vector int __attribute__((__always_inline__))
+vec_cts(vector float a, int b)
+{
+ return __builtin_altivec_vctsxs(a, b);
+}
+
+/* vec_vctsxs */
+
+static vector int __attribute__((__always_inline__))
+vec_vctsxs(vector float a, int b)
+{
+ return __builtin_altivec_vctsxs(a, b);
+}
+
+/* vec_ctu */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_ctu(vector float a, int b)
+{
+ return __builtin_altivec_vctuxs(a, b);
+}
+
+/* vec_vctuxs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vctuxs(vector float a, int b)
+{
+ return __builtin_altivec_vctuxs(a, b);
+}
+
+/* vec_dss */
+
+static void __attribute__((__always_inline__))
+vec_dss(int a)
+{
+ __builtin_altivec_dss(a);
+}
+
+/* vec_dssall */
+
+static void __attribute__((__always_inline__))
+vec_dssall(void)
+{
+ __builtin_altivec_dssall();
+}
+
+/* vec_dst */
+
+static void __attribute__((__always_inline__))
+vec_dst(const void *a, int b, int c)
+{
+ __builtin_altivec_dst(a, b, c);
+}
+
+/* vec_dstst */
+
+static void __attribute__((__always_inline__))
+vec_dstst(const void *a, int b, int c)
+{
+ __builtin_altivec_dstst(a, b, c);
+}
+
+/* vec_dststt */
+
+static void __attribute__((__always_inline__))
+vec_dststt(const void *a, int b, int c)
+{
+ __builtin_altivec_dststt(a, b, c);
+}
+
+/* vec_dstt */
+
+static void __attribute__((__always_inline__))
+vec_dstt(const void *a, int b, int c)
+{
+ __builtin_altivec_dstt(a, b, c);
+}
+
+/* vec_expte */
+
+static vector float __attribute__((__always_inline__))
+vec_expte(vector float a)
+{
+ return __builtin_altivec_vexptefp(a);
+}
+
+/* vec_vexptefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vexptefp(vector float a)
+{
+ return __builtin_altivec_vexptefp(a);
+}
+
+/* vec_floor */
+
+static vector float __attribute__((__always_inline__))
+vec_floor(vector float a)
+{
+ return __builtin_altivec_vrfim(a);
+}
+
+/* vec_vrfim */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfim(vector float a)
+{
+ return __builtin_altivec_vrfim(a);
+}
+
+/* vec_ld */
+
+static vector signed char __ATTRS_o_ai
+vec_ld(int a, const vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvx(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_ld(int a, const signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ld(int a, const vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ld(int a, const unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvx(a, b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_ld(int a, const vector bool char *b)
+{
+ return (vector bool char)__builtin_altivec_lvx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_ld(int a, const vector short *b)
+{
+ return (vector short)__builtin_altivec_lvx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_ld(int a, const short *b)
+{
+ return (vector short)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ld(int a, const vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ld(int a, const unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvx(a, b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_ld(int a, const vector bool short *b)
+{
+ return (vector bool short)__builtin_altivec_lvx(a, b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_ld(int a, const vector pixel *b)
+{
+ return (vector pixel)__builtin_altivec_lvx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_ld(int a, const vector int *b)
+{
+ return (vector int)__builtin_altivec_lvx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_ld(int a, const int *b)
+{
+ return (vector int)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ld(int a, const vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ld(int a, const unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvx(a, b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_ld(int a, const vector bool int *b)
+{
+ return (vector bool int)__builtin_altivec_lvx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ld(int a, const vector float *b)
+{
+ return (vector float)__builtin_altivec_lvx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ld(int a, const float *b)
+{
+ return (vector float)__builtin_altivec_lvx(a, b);
+}
+
+/* vec_lvx */
+
+static vector signed char __ATTRS_o_ai
+vec_lvx(int a, const vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvx(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvx(int a, const signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvx(int a, const vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvx(int a, const unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvx(a, b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvx(int a, const vector bool char *b)
+{
+ return (vector bool char)__builtin_altivec_lvx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lvx(int a, const vector short *b)
+{
+ return (vector short)__builtin_altivec_lvx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lvx(int a, const short *b)
+{
+ return (vector short)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvx(int a, const vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvx(int a, const unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvx(a, b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvx(int a, const vector bool short *b)
+{
+ return (vector bool short)__builtin_altivec_lvx(a, b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvx(int a, const vector pixel *b)
+{
+ return (vector pixel)__builtin_altivec_lvx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lvx(int a, const vector int *b)
+{
+ return (vector int)__builtin_altivec_lvx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lvx(int a, const int *b)
+{
+ return (vector int)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvx(int a, const vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvx(int a, const unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvx(a, b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvx(int a, const vector bool int *b)
+{
+ return (vector bool int)__builtin_altivec_lvx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvx(int a, const vector float *b)
+{
+ return (vector float)__builtin_altivec_lvx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvx(int a, const float *b)
+{
+ return (vector float)__builtin_altivec_lvx(a, b);
+}
+
+/* vec_lde */
+
+static vector signed char __ATTRS_o_ai
+vec_lde(int a, const vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvebx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lde(int a, const vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvebx(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lde(int a, const vector short *b)
+{
+ return (vector short)__builtin_altivec_lvehx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lde(int a, const vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvehx(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lde(int a, const vector int *b)
+{
+ return (vector int)__builtin_altivec_lvewx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lde(int a, const vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvewx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lde(int a, const vector float *b)
+{
+ return (vector float)__builtin_altivec_lvewx(a, b);
+}
+
+/* vec_lvebx */
+
+static vector signed char __ATTRS_o_ai
+vec_lvebx(int a, const vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvebx(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvebx(int a, const vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvebx(a, b);
+}
+
+/* vec_lvehx */
+
+static vector short __ATTRS_o_ai
+vec_lvehx(int a, const vector short *b)
+{
+ return (vector short)__builtin_altivec_lvehx(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvehx(int a, const vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvehx(a, b);
+}
+
+/* vec_lvewx */
+
+static vector int __ATTRS_o_ai
+vec_lvewx(int a, const vector int *b)
+{
+ return (vector int)__builtin_altivec_lvewx(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvewx(int a, const vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvewx(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvewx(int a, const vector float *b)
+{
+ return (vector float)__builtin_altivec_lvewx(a, b);
+}
+
+/* vec_ldl */
+
+static vector signed char __ATTRS_o_ai
+vec_ldl(int a, const vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_ldl(int a, const signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ldl(int a, const vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_ldl(int a, const unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_ldl(int a, const vector bool char *b)
+{
+ return (vector bool char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_ldl(int a, const vector short *b)
+{
+ return (vector short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_ldl(int a, const short *b)
+{
+ return (vector short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ldl(int a, const vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_ldl(int a, const unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_ldl(int a, const vector bool short *b)
+{
+ return (vector bool short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_ldl(int a, const vector pixel *b)
+{
+ return (vector pixel short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_ldl(int a, const vector int *b)
+{
+ return (vector int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_ldl(int a, const int *b)
+{
+ return (vector int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ldl(int a, const vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_ldl(int a, const unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_ldl(int a, const vector bool int *b)
+{
+ return (vector bool int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ldl(int a, const vector float *b)
+{
+ return (vector float)__builtin_altivec_lvxl(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_ldl(int a, const float *b)
+{
+ return (vector float)__builtin_altivec_lvxl(a, b);
+}
+
+/* vec_lvxl */
+
+static vector signed char __ATTRS_o_ai
+vec_lvxl(int a, const vector signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvxl(int a, const signed char *b)
+{
+ return (vector signed char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvxl(int a, const vector unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvxl(int a, const unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvxl(int a, const vector bool char *b)
+{
+ return (vector bool char)__builtin_altivec_lvxl(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lvxl(int a, const vector short *b)
+{
+ return (vector short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_lvxl(int a, const short *b)
+{
+ return (vector short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvxl(int a, const vector unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvxl(int a, const unsigned short *b)
+{
+ return (vector unsigned short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvxl(int a, const vector bool short *b)
+{
+ return (vector bool short)__builtin_altivec_lvxl(a, b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvxl(int a, const vector pixel *b)
+{
+ return (vector pixel)__builtin_altivec_lvxl(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lvxl(int a, const vector int *b)
+{
+ return (vector int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_lvxl(int a, const int *b)
+{
+ return (vector int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvxl(int a, const vector unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvxl(int a, const unsigned int *b)
+{
+ return (vector unsigned int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvxl(int a, const vector bool int *b)
+{
+ return (vector bool int)__builtin_altivec_lvxl(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvxl(int a, const vector float *b)
+{
+ return (vector float)__builtin_altivec_lvxl(a, b);
+}
+
+static vector float __ATTRS_o_ai
+vec_lvxl(int a, const float *b)
+{
+ return (vector float)__builtin_altivec_lvxl(a, b);
+}
+
+/* vec_loge */
+
+static vector float __attribute__((__always_inline__))
+vec_loge(vector float a)
+{
+ return __builtin_altivec_vlogefp(a);
+}
+
+/* vec_vlogefp */
+
+static vector float __attribute__((__always_inline__))
+vec_vlogefp(vector float a)
+{
+ return __builtin_altivec_vlogefp(a);
+}
+
+/* vec_lvsl */
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, const signed char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, const unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, const short *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, const unsigned short *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, const int *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, const unsigned int *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsl(int a, const float *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsl(a, b);
+}
+
+/* vec_lvsr */
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, const signed char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, const unsigned char *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, const short *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, const unsigned short *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, const int *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, const unsigned int *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvsr(int a, const float *b)
+{
+ return (vector unsigned char)__builtin_altivec_lvsr(a, b);
+}
+
+/* vec_madd */
+
+static vector float __attribute__((__always_inline__))
+vec_madd(vector float a, vector float b, vector float c)
+{
+ return __builtin_altivec_vmaddfp(a, b, c);
+}
+
+/* vec_vmaddfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vmaddfp(vector float a, vector float b, vector float c)
+{
+ return __builtin_altivec_vmaddfp(a, b, c);
+}
+
+/* vec_madds */
+
+static vector signed short __attribute__((__always_inline__))
+vec_madds(vector signed short a, vector signed short b, vector signed short c)
+{
+ return __builtin_altivec_vmhaddshs(a, b, c);
+}
+
+/* vec_vmhaddshs */
+static vector signed short __attribute__((__always_inline__))
+vec_vmhaddshs(vector signed short a,
+ vector signed short b,
+ vector signed short c)
+{
+ return __builtin_altivec_vmhaddshs(a, b, c);
+}
+
+/* vec_max */
+
+static vector signed char __ATTRS_o_ai
+vec_max(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmaxsb(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_max(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vmaxsb((vector signed char)a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_max(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vmaxsb(a, (vector signed char)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_max(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmaxub(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_max(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmaxub((vector unsigned char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_max(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vmaxub(a, (vector unsigned char)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_max(vector short a, vector short b)
+{
+ return __builtin_altivec_vmaxsh(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_max(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vmaxsh((vector short)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_max(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vmaxsh(a, (vector short)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_max(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmaxuh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_max(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmaxuh((vector unsigned short)a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_max(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vmaxuh(a, (vector unsigned short)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_max(vector int a, vector int b)
+{
+ return __builtin_altivec_vmaxsw(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_max(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vmaxsw((vector int)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_max(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vmaxsw(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_max(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vmaxuw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_max(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vmaxuw((vector unsigned int)a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_max(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vmaxuw(a, (vector unsigned int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_max(vector float a, vector float b)
+{
+ return __builtin_altivec_vmaxfp(a, b);
+}
+
+/* vec_vmaxsb */
+
+static vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmaxsb(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vmaxsb((vector signed char)a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vmaxsb(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vmaxsb(a, (vector signed char)b);
+}
+
+/* vec_vmaxub */
+
+static vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmaxub(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmaxub((vector unsigned char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vmaxub(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vmaxub(a, (vector unsigned char)b);
+}
+
+/* vec_vmaxsh */
+
+static vector short __ATTRS_o_ai
+vec_vmaxsh(vector short a, vector short b)
+{
+ return __builtin_altivec_vmaxsh(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vmaxsh(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vmaxsh((vector short)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vmaxsh(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vmaxsh(a, (vector short)b);
+}
+
+/* vec_vmaxuh */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmaxuh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmaxuh((vector unsigned short)a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmaxuh(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vmaxuh(a, (vector unsigned short)b);
+}
+
+/* vec_vmaxsw */
+
+static vector int __ATTRS_o_ai
+vec_vmaxsw(vector int a, vector int b)
+{
+ return __builtin_altivec_vmaxsw(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vmaxsw(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vmaxsw((vector int)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vmaxsw(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vmaxsw(a, (vector int)b);
+}
+
+/* vec_vmaxuw */
+
+static vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vmaxuw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vmaxuw((vector unsigned int)a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vmaxuw(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vmaxuw(a, (vector unsigned int)b);
+}
+
+/* vec_vmaxfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vmaxfp(vector float a, vector float b)
+{
+ return __builtin_altivec_vmaxfp(a, b);
+}
+
+/* vec_mergeh */
+
+static vector signed char __ATTRS_o_ai
+vec_mergeh(vector signed char a, vector signed char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_mergeh(vector unsigned char a, vector unsigned char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_mergeh(vector bool char a, vector bool char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector short __ATTRS_o_ai
+vec_mergeh(vector short a, vector short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mergeh(vector unsigned short a, vector unsigned short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_mergeh(vector bool short a, vector bool short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_mergeh(vector pixel a, vector pixel b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector int __ATTRS_o_ai
+vec_mergeh(vector int a, vector int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mergeh(vector unsigned int a, vector unsigned int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_mergeh(vector bool int a, vector bool int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector float __ATTRS_o_ai
+vec_mergeh(vector float a, vector float b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+/* vec_vmrghb */
+
+#define __builtin_altivec_vmrghb vec_vmrghb
+
+static vector signed char __ATTRS_o_ai
+vec_vmrghb(vector signed char a, vector signed char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vmrghb(vector unsigned char a, vector unsigned char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vmrghb(vector bool char a, vector bool char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x10, 0x01, 0x11, 0x02, 0x12, 0x03, 0x13,
+ 0x04, 0x14, 0x05, 0x15, 0x06, 0x16, 0x07, 0x17));
+}
+
+/* vec_vmrghh */
+
+#define __builtin_altivec_vmrghh vec_vmrghh
+
+static vector short __ATTRS_o_ai
+vec_vmrghh(vector short a, vector short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmrghh(vector unsigned short a, vector unsigned short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vmrghh(vector bool short a, vector bool short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vmrghh(vector pixel a, vector pixel b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x10, 0x11, 0x02, 0x03, 0x12, 0x13,
+ 0x04, 0x05, 0x14, 0x15, 0x06, 0x07, 0x16, 0x17));
+}
+
+/* vec_vmrghw */
+
+#define __builtin_altivec_vmrghw vec_vmrghw
+
+static vector int __ATTRS_o_ai
+vec_vmrghw(vector int a, vector int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vmrghw(vector unsigned int a, vector unsigned int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vmrghw(vector bool int a, vector bool int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+static vector float __ATTRS_o_ai
+vec_vmrghw(vector float a, vector float b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x00, 0x01, 0x02, 0x03, 0x10, 0x11, 0x12, 0x13,
+ 0x04, 0x05, 0x06, 0x07, 0x14, 0x15, 0x16, 0x17));
+}
+
+/* vec_mergel */
+
+static vector signed char __ATTRS_o_ai
+vec_mergel(vector signed char a, vector signed char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_mergel(vector unsigned char a, vector unsigned char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_mergel(vector bool char a, vector bool char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector short __ATTRS_o_ai
+vec_mergel(vector short a, vector short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mergel(vector unsigned short a, vector unsigned short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_mergel(vector bool short a, vector bool short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_mergel(vector pixel a, vector pixel b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector int __ATTRS_o_ai
+vec_mergel(vector int a, vector int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mergel(vector unsigned int a, vector unsigned int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_mergel(vector bool int a, vector bool int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector float __ATTRS_o_ai
+vec_mergel(vector float a, vector float b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+/* vec_vmrglb */
+
+#define __builtin_altivec_vmrglb vec_vmrglb
+
+static vector signed char __ATTRS_o_ai
+vec_vmrglb(vector signed char a, vector signed char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vmrglb(vector unsigned char a, vector unsigned char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vmrglb(vector bool char a, vector bool char b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x18, 0x09, 0x19, 0x0A, 0x1A, 0x0B, 0x1B,
+ 0x0C, 0x1C, 0x0D, 0x1D, 0x0E, 0x1E, 0x0F, 0x1F));
+}
+
+/* vec_vmrglh */
+
+#define __builtin_altivec_vmrglh vec_vmrglh
+
+static vector short __ATTRS_o_ai
+vec_vmrglh(vector short a, vector short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmrglh(vector unsigned short a, vector unsigned short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vmrglh(vector bool short a, vector bool short b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vmrglh(vector pixel a, vector pixel b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x18, 0x19, 0x0A, 0x0B, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x1C, 0x1D, 0x0E, 0x0F, 0x1E, 0x1F));
+}
+
+/* vec_vmrglw */
+
+#define __builtin_altivec_vmrglw vec_vmrglw
+
+static vector int __ATTRS_o_ai
+vec_vmrglw(vector int a, vector int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vmrglw(vector unsigned int a, vector unsigned int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vmrglw(vector bool int a, vector bool int b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+static vector float __ATTRS_o_ai
+vec_vmrglw(vector float a, vector float b)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (0x08, 0x09, 0x0A, 0x0B, 0x18, 0x19, 0x1A, 0x1B,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x1C, 0x1D, 0x1E, 0x1F));
+}
+
+/* vec_mfvscr */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_mfvscr(void)
+{
+ return __builtin_altivec_mfvscr();
+}
+
+/* vec_min */
+
+static vector signed char __ATTRS_o_ai
+vec_min(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vminsb(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_min(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vminsb((vector signed char)a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_min(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vminsb(a, (vector signed char)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_min(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vminub(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_min(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vminub((vector unsigned char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_min(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vminub(a, (vector unsigned char)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_min(vector short a, vector short b)
+{
+ return __builtin_altivec_vminsh(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_min(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vminsh((vector short)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_min(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vminsh(a, (vector short)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_min(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vminuh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_min(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vminuh((vector unsigned short)a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_min(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vminuh(a, (vector unsigned short)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_min(vector int a, vector int b)
+{
+ return __builtin_altivec_vminsw(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_min(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vminsw((vector int)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_min(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vminsw(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_min(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vminuw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_min(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vminuw((vector unsigned int)a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_min(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vminuw(a, (vector unsigned int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_min(vector float a, vector float b)
+{
+ return __builtin_altivec_vminfp(a, b);
+}
+
+/* vec_vminsb */
+
+static vector signed char __ATTRS_o_ai
+vec_vminsb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vminsb(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vminsb(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vminsb((vector signed char)a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vminsb(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vminsb(a, (vector signed char)b);
+}
+
+/* vec_vminub */
+
+static vector unsigned char __ATTRS_o_ai
+vec_vminub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vminub(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vminub(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vminub((vector unsigned char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vminub(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vminub(a, (vector unsigned char)b);
+}
+
+/* vec_vminsh */
+
+static vector short __ATTRS_o_ai
+vec_vminsh(vector short a, vector short b)
+{
+ return __builtin_altivec_vminsh(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vminsh(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vminsh((vector short)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vminsh(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vminsh(a, (vector short)b);
+}
+
+/* vec_vminuh */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vminuh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vminuh((vector unsigned short)a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vminuh(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vminuh(a, (vector unsigned short)b);
+}
+
+/* vec_vminsw */
+
+static vector int __ATTRS_o_ai
+vec_vminsw(vector int a, vector int b)
+{
+ return __builtin_altivec_vminsw(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vminsw(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vminsw((vector int)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vminsw(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vminsw(a, (vector int)b);
+}
+
+/* vec_vminuw */
+
+static vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vminuw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vminuw((vector unsigned int)a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vminuw(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vminuw(a, (vector unsigned int)b);
+}
+
+/* vec_vminfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vminfp(vector float a, vector float b)
+{
+ return __builtin_altivec_vminfp(a, b);
+}
+
+/* vec_mladd */
+
+#define __builtin_altivec_vmladduhm vec_mladd
+
+static vector short __ATTRS_o_ai
+vec_mladd(vector short a, vector short b, vector short c)
+{
+ return a * b + c;
+}
+
+static vector short __ATTRS_o_ai
+vec_mladd(vector short a, vector unsigned short b, vector unsigned short c)
+{
+ return a * (vector short)b + (vector short)c;
+}
+
+static vector short __ATTRS_o_ai
+vec_mladd(vector unsigned short a, vector short b, vector short c)
+{
+ return (vector short)a * b + c;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mladd(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned short c)
+{
+ return a * b + c;
+}
+
+/* vec_vmladduhm */
+
+static vector short __ATTRS_o_ai
+vec_vmladduhm(vector short a, vector short b, vector short c)
+{
+ return a * b + c;
+}
+
+static vector short __ATTRS_o_ai
+vec_vmladduhm(vector short a, vector unsigned short b, vector unsigned short c)
+{
+ return a * (vector short)b + (vector short)c;
+}
+
+static vector short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short a, vector short b, vector short c)
+{
+ return (vector short)a * b + c;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vmladduhm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned short c)
+{
+ return a * b + c;
+}
+
+/* vec_mradds */
+
+static vector short __attribute__((__always_inline__))
+vec_mradds(vector short a, vector short b, vector short c)
+{
+ return __builtin_altivec_vmhraddshs(a, b, c);
+}
+
+/* vec_vmhraddshs */
+
+static vector short __attribute__((__always_inline__))
+vec_vmhraddshs(vector short a, vector short b, vector short c)
+{
+ return __builtin_altivec_vmhraddshs(a, b, c);
+}
+
+/* vec_msum */
+
+static vector int __ATTRS_o_ai
+vec_msum(vector signed char a, vector unsigned char b, vector int c)
+{
+ return __builtin_altivec_vmsummbm(a, b, c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_msum(vector unsigned char a, vector unsigned char b, vector unsigned int c)
+{
+ return __builtin_altivec_vmsumubm(a, b, c);
+}
+
+static vector int __ATTRS_o_ai
+vec_msum(vector short a, vector short b, vector int c)
+{
+ return __builtin_altivec_vmsumshm(a, b, c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_msum(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned int c)
+{
+ return __builtin_altivec_vmsumuhm(a, b, c);
+}
+
+/* vec_vmsummbm */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsummbm(vector signed char a, vector unsigned char b, vector int c)
+{
+ return __builtin_altivec_vmsummbm(a, b, c);
+}
+
+/* vec_vmsumubm */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumubm(vector unsigned char a,
+ vector unsigned char b,
+ vector unsigned int c)
+{
+ return __builtin_altivec_vmsumubm(a, b, c);
+}
+
+/* vec_vmsumshm */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsumshm(vector short a, vector short b, vector int c)
+{
+ return __builtin_altivec_vmsumshm(a, b, c);
+}
+
+/* vec_vmsumuhm */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned int c)
+{
+ return __builtin_altivec_vmsumuhm(a, b, c);
+}
+
+/* vec_msums */
+
+static vector int __ATTRS_o_ai
+vec_msums(vector short a, vector short b, vector int c)
+{
+ return __builtin_altivec_vmsumshs(a, b, c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_msums(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned int c)
+{
+ return __builtin_altivec_vmsumuhs(a, b, c);
+}
+
+/* vec_vmsumshs */
+
+static vector int __attribute__((__always_inline__))
+vec_vmsumshs(vector short a, vector short b, vector int c)
+{
+ return __builtin_altivec_vmsumshs(a, b, c);
+}
+
+/* vec_vmsumuhs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmsumuhs(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned int c)
+{
+ return __builtin_altivec_vmsumuhs(a, b, c);
+}
+
+/* vec_mtvscr */
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector signed char a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector unsigned char a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector bool char a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector short a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector unsigned short a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector bool short a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector pixel a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector int a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector unsigned int a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector bool int a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+static void __ATTRS_o_ai
+vec_mtvscr(vector float a)
+{
+ __builtin_altivec_mtvscr((vector int)a);
+}
+
+/* vec_mule */
+
+static vector short __ATTRS_o_ai
+vec_mule(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmulesb(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mule(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmuleub(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_mule(vector short a, vector short b)
+{
+ return __builtin_altivec_vmulesh(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mule(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmuleuh(a, b);
+}
+
+/* vec_vmulesb */
+
+static vector short __attribute__((__always_inline__))
+vec_vmulesb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmulesb(a, b);
+}
+
+/* vec_vmuleub */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmuleub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmuleub(a, b);
+}
+
+/* vec_vmulesh */
+
+static vector int __attribute__((__always_inline__))
+vec_vmulesh(vector short a, vector short b)
+{
+ return __builtin_altivec_vmulesh(a, b);
+}
+
+/* vec_vmuleuh */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmuleuh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmuleuh(a, b);
+}
+
+/* vec_mulo */
+
+static vector short __ATTRS_o_ai
+vec_mulo(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmulosb(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_mulo(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmuloub(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_mulo(vector short a, vector short b)
+{
+ return __builtin_altivec_vmulosh(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_mulo(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmulouh(a, b);
+}
+
+/* vec_vmulosb */
+
+static vector short __attribute__((__always_inline__))
+vec_vmulosb(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vmulosb(a, b);
+}
+
+/* vec_vmuloub */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vmuloub(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vmuloub(a, b);
+}
+
+/* vec_vmulosh */
+
+static vector int __attribute__((__always_inline__))
+vec_vmulosh(vector short a, vector short b)
+{
+ return __builtin_altivec_vmulosh(a, b);
+}
+
+/* vec_vmulouh */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vmulouh(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vmulouh(a, b);
+}
+
+/* vec_nmsub */
+
+static vector float __attribute__((__always_inline__))
+vec_nmsub(vector float a, vector float b, vector float c)
+{
+ return __builtin_altivec_vnmsubfp(a, b, c);
+}
+
+/* vec_vnmsubfp */
+
+static vector float __attribute__((__always_inline__))
+vec_vnmsubfp(vector float a, vector float b, vector float c)
+{
+ return __builtin_altivec_vnmsubfp(a, b, c);
+}
+
+/* vec_nor */
+
+#define __builtin_altivec_vnor vec_nor
+
+static vector signed char __ATTRS_o_ai
+vec_nor(vector signed char a, vector signed char b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_nor(vector unsigned char a, vector unsigned char b)
+{
+ return ~(a | b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_nor(vector bool char a, vector bool char b)
+{
+ return ~(a | b);
+}
+
+static vector short __ATTRS_o_ai
+vec_nor(vector short a, vector short b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_nor(vector unsigned short a, vector unsigned short b)
+{
+ return ~(a | b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_nor(vector bool short a, vector bool short b)
+{
+ return ~(a | b);
+}
+
+static vector int __ATTRS_o_ai
+vec_nor(vector int a, vector int b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_nor(vector unsigned int a, vector unsigned int b)
+{
+ return ~(a | b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_nor(vector bool int a, vector bool int b)
+{
+ return ~(a | b);
+}
+
+static vector float __ATTRS_o_ai
+vec_nor(vector float a, vector float b)
+{
+ vector unsigned int res = ~((vector unsigned int)a | (vector unsigned int)b);
+ return (vector float)res;
+}
+
+/* vec_vnor */
+
+static vector signed char __ATTRS_o_ai
+vec_vnor(vector signed char a, vector signed char b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vnor(vector unsigned char a, vector unsigned char b)
+{
+ return ~(a | b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vnor(vector bool char a, vector bool char b)
+{
+ return ~(a | b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vnor(vector short a, vector short b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vnor(vector unsigned short a, vector unsigned short b)
+{
+ return ~(a | b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vnor(vector bool short a, vector bool short b)
+{
+ return ~(a | b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vnor(vector int a, vector int b)
+{
+ return ~(a | b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vnor(vector unsigned int a, vector unsigned int b)
+{
+ return ~(a | b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vnor(vector bool int a, vector bool int b)
+{
+ return ~(a | b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vnor(vector float a, vector float b)
+{
+ vector unsigned int res = ~((vector unsigned int)a | (vector unsigned int)b);
+ return (vector float)res;
+}
+
+/* vec_or */
+
+#define __builtin_altivec_vor vec_or
+
+static vector signed char __ATTRS_o_ai
+vec_or(vector signed char a, vector signed char b)
+{
+ return a | b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_or(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a | b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_or(vector signed char a, vector bool char b)
+{
+ return a | (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_or(vector unsigned char a, vector unsigned char b)
+{
+ return a | b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_or(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a | b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_or(vector unsigned char a, vector bool char b)
+{
+ return a | (vector unsigned char)b;
+}
+
+static vector bool char __ATTRS_o_ai
+vec_or(vector bool char a, vector bool char b)
+{
+ return a | b;
+}
+
+static vector short __ATTRS_o_ai
+vec_or(vector short a, vector short b)
+{
+ return a | b;
+}
+
+static vector short __ATTRS_o_ai
+vec_or(vector bool short a, vector short b)
+{
+ return (vector short)a | b;
+}
+
+static vector short __ATTRS_o_ai
+vec_or(vector short a, vector bool short b)
+{
+ return a | (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_or(vector unsigned short a, vector unsigned short b)
+{
+ return a | b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_or(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a | b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_or(vector unsigned short a, vector bool short b)
+{
+ return a | (vector unsigned short)b;
+}
+
+static vector bool short __ATTRS_o_ai
+vec_or(vector bool short a, vector bool short b)
+{
+ return a | b;
+}
+
+static vector int __ATTRS_o_ai
+vec_or(vector int a, vector int b)
+{
+ return a | b;
+}
+
+static vector int __ATTRS_o_ai
+vec_or(vector bool int a, vector int b)
+{
+ return (vector int)a | b;
+}
+
+static vector int __ATTRS_o_ai
+vec_or(vector int a, vector bool int b)
+{
+ return a | (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_or(vector unsigned int a, vector unsigned int b)
+{
+ return a | b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_or(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a | b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_or(vector unsigned int a, vector bool int b)
+{
+ return a | (vector unsigned int)b;
+}
+
+static vector bool int __ATTRS_o_ai
+vec_or(vector bool int a, vector bool int b)
+{
+ return a | b;
+}
+
+static vector float __ATTRS_o_ai
+vec_or(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_or(vector bool int a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_or(vector float a, vector bool int b)
+{
+ vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_vor */
+
+static vector signed char __ATTRS_o_ai
+vec_vor(vector signed char a, vector signed char b)
+{
+ return a | b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vor(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a | b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vor(vector signed char a, vector bool char b)
+{
+ return a | (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vor(vector unsigned char a, vector unsigned char b)
+{
+ return a | b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vor(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a | b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vor(vector unsigned char a, vector bool char b)
+{
+ return a | (vector unsigned char)b;
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vor(vector bool char a, vector bool char b)
+{
+ return a | b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vor(vector short a, vector short b)
+{
+ return a | b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vor(vector bool short a, vector short b)
+{
+ return (vector short)a | b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vor(vector short a, vector bool short b)
+{
+ return a | (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vor(vector unsigned short a, vector unsigned short b)
+{
+ return a | b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vor(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a | b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vor(vector unsigned short a, vector bool short b)
+{
+ return a | (vector unsigned short)b;
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vor(vector bool short a, vector bool short b)
+{
+ return a | b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vor(vector int a, vector int b)
+{
+ return a | b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vor(vector bool int a, vector int b)
+{
+ return (vector int)a | b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vor(vector int a, vector bool int b)
+{
+ return a | (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vor(vector unsigned int a, vector unsigned int b)
+{
+ return a | b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vor(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a | b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vor(vector unsigned int a, vector bool int b)
+{
+ return a | (vector unsigned int)b;
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vor(vector bool int a, vector bool int b)
+{
+ return a | b;
+}
+
+static vector float __ATTRS_o_ai
+vec_vor(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vor(vector bool int a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vor(vector float a, vector bool int b)
+{
+ vector unsigned int res = (vector unsigned int)a | (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_pack */
+
+static vector signed char __ATTRS_o_ai
+vec_pack(vector signed short a, vector signed short b)
+{
+ return (vector signed char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_pack(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_pack(vector bool short a, vector bool short b)
+{
+ return (vector bool char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+static vector short __ATTRS_o_ai
+vec_pack(vector int a, vector int b)
+{
+ return (vector short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_pack(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_pack(vector bool int a, vector bool int b)
+{
+ return (vector bool short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+/* vec_vpkuhum */
+
+#define __builtin_altivec_vpkuhum vec_vpkuhum
+
+static vector signed char __ATTRS_o_ai
+vec_vpkuhum(vector signed short a, vector signed short b)
+{
+ return (vector signed char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkuhum(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vpkuhum(vector bool short a, vector bool short b)
+{
+ return (vector bool char)vec_perm(a, b, (vector unsigned char)
+ (0x01, 0x03, 0x05, 0x07, 0x09, 0x0B, 0x0D, 0x0F,
+ 0x11, 0x13, 0x15, 0x17, 0x19, 0x1B, 0x1D, 0x1F));
+}
+
+/* vec_vpkuwum */
+
+#define __builtin_altivec_vpkuwum vec_vpkuwum
+
+static vector short __ATTRS_o_ai
+vec_vpkuwum(vector int a, vector int b)
+{
+ return (vector short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vpkuwum(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vpkuwum(vector bool int a, vector bool int b)
+{
+ return (vector bool short)vec_perm(a, b, (vector unsigned char)
+ (0x02, 0x03, 0x06, 0x07, 0x0A, 0x0B, 0x0E, 0x0F,
+ 0x12, 0x13, 0x16, 0x17, 0x1A, 0x1B, 0x1E, 0x1F));
+}
+
+/* vec_packpx */
+
+static vector pixel __attribute__((__always_inline__))
+vec_packpx(vector unsigned int a, vector unsigned int b)
+{
+ return (vector pixel)__builtin_altivec_vpkpx(a, b);
+}
+
+/* vec_vpkpx */
+
+static vector pixel __attribute__((__always_inline__))
+vec_vpkpx(vector unsigned int a, vector unsigned int b)
+{
+ return (vector pixel)__builtin_altivec_vpkpx(a, b);
+}
+
+/* vec_packs */
+
+static vector signed char __ATTRS_o_ai
+vec_packs(vector short a, vector short b)
+{
+ return __builtin_altivec_vpkshss(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_packs(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vpkuhus(a, b);
+}
+
+static vector signed short __ATTRS_o_ai
+vec_packs(vector int a, vector int b)
+{
+ return __builtin_altivec_vpkswss(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_packs(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vpkuwus(a, b);
+}
+
+/* vec_vpkshss */
+
+static vector signed char __attribute__((__always_inline__))
+vec_vpkshss(vector short a, vector short b)
+{
+ return __builtin_altivec_vpkshss(a, b);
+}
+
+/* vec_vpkuhus */
+
+static vector unsigned char __attribute__((__always_inline__))
+vec_vpkuhus(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vpkuhus(a, b);
+}
+
+/* vec_vpkswss */
+
+static vector signed short __attribute__((__always_inline__))
+vec_vpkswss(vector int a, vector int b)
+{
+ return __builtin_altivec_vpkswss(a, b);
+}
+
+/* vec_vpkuwus */
+
+static vector unsigned short __attribute__((__always_inline__))
+vec_vpkuwus(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vpkuwus(a, b);
+}
+
+/* vec_packsu */
+
+static vector unsigned char __ATTRS_o_ai
+vec_packsu(vector short a, vector short b)
+{
+ return __builtin_altivec_vpkshus(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_packsu(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vpkuhus(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_packsu(vector int a, vector int b)
+{
+ return __builtin_altivec_vpkswus(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_packsu(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vpkuwus(a, b);
+}
+
+/* vec_vpkshus */
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector short a, vector short b)
+{
+ return __builtin_altivec_vpkshus(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vpkshus(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vpkuhus(a, b);
+}
+
+/* vec_vpkswus */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vpkswus(vector int a, vector int b)
+{
+ return __builtin_altivec_vpkswus(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vpkswus(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vpkuwus(a, b);
+}
+
+/* vec_perm */
+
+vector signed char __ATTRS_o_ai
+vec_perm(vector signed char a, vector signed char b, vector unsigned char c)
+{
+ return (vector signed char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector unsigned char __ATTRS_o_ai
+vec_perm(vector unsigned char a,
+ vector unsigned char b,
+ vector unsigned char c)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector bool char __ATTRS_o_ai
+vec_perm(vector bool char a, vector bool char b, vector unsigned char c)
+{
+ return (vector bool char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector short __ATTRS_o_ai
+vec_perm(vector short a, vector short b, vector unsigned char c)
+{
+ return (vector short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector unsigned short __ATTRS_o_ai
+vec_perm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned char c)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector bool short __ATTRS_o_ai
+vec_perm(vector bool short a, vector bool short b, vector unsigned char c)
+{
+ return (vector bool short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector pixel __ATTRS_o_ai
+vec_perm(vector pixel a, vector pixel b, vector unsigned char c)
+{
+ return (vector pixel)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector int __ATTRS_o_ai
+vec_perm(vector int a, vector int b, vector unsigned char c)
+{
+ return (vector int)__builtin_altivec_vperm_4si(a, b, c);
+}
+
+vector unsigned int __ATTRS_o_ai
+vec_perm(vector unsigned int a, vector unsigned int b, vector unsigned char c)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector bool int __ATTRS_o_ai
+vec_perm(vector bool int a, vector bool int b, vector unsigned char c)
+{
+ return (vector bool int)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector float __ATTRS_o_ai
+vec_perm(vector float a, vector float b, vector unsigned char c)
+{
+ return (vector float)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+/* vec_vperm */
+
+vector signed char __ATTRS_o_ai
+vec_vperm(vector signed char a, vector signed char b, vector unsigned char c)
+{
+ return (vector signed char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector unsigned char __ATTRS_o_ai
+vec_vperm(vector unsigned char a,
+ vector unsigned char b,
+ vector unsigned char c)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector bool char __ATTRS_o_ai
+vec_vperm(vector bool char a, vector bool char b, vector unsigned char c)
+{
+ return (vector bool char)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector short __ATTRS_o_ai
+vec_vperm(vector short a, vector short b, vector unsigned char c)
+{
+ return (vector short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector unsigned short __ATTRS_o_ai
+vec_vperm(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned char c)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector bool short __ATTRS_o_ai
+vec_vperm(vector bool short a, vector bool short b, vector unsigned char c)
+{
+ return (vector bool short)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector pixel __ATTRS_o_ai
+vec_vperm(vector pixel a, vector pixel b, vector unsigned char c)
+{
+ return (vector pixel)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector int __ATTRS_o_ai
+vec_vperm(vector int a, vector int b, vector unsigned char c)
+{
+ return (vector int)__builtin_altivec_vperm_4si(a, b, c);
+}
+
+vector unsigned int __ATTRS_o_ai
+vec_vperm(vector unsigned int a, vector unsigned int b, vector unsigned char c)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector bool int __ATTRS_o_ai
+vec_vperm(vector bool int a, vector bool int b, vector unsigned char c)
+{
+ return (vector bool int)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+vector float __ATTRS_o_ai
+vec_vperm(vector float a, vector float b, vector unsigned char c)
+{
+ return (vector float)
+ __builtin_altivec_vperm_4si((vector int)a, (vector int)b, c);
+}
+
+/* vec_re */
+
+vector float __attribute__((__always_inline__))
+vec_re(vector float a)
+{
+ return __builtin_altivec_vrefp(a);
+}
+
+/* vec_vrefp */
+
+vector float __attribute__((__always_inline__))
+vec_vrefp(vector float a)
+{
+ return __builtin_altivec_vrefp(a);
+}
+
+/* vec_rl */
+
+static vector signed char __ATTRS_o_ai
+vec_rl(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vrlb((vector char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_rl(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_rl(vector short a, vector unsigned short b)
+{
+ return __builtin_altivec_vrlh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_rl(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_rl(vector int a, vector unsigned int b)
+{
+ return __builtin_altivec_vrlw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_rl(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)a, b);
+}
+
+/* vec_vrlb */
+
+static vector signed char __ATTRS_o_ai
+vec_vrlb(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vrlb((vector char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vrlb(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vrlb((vector char)a, b);
+}
+
+/* vec_vrlh */
+
+static vector short __ATTRS_o_ai
+vec_vrlh(vector short a, vector unsigned short b)
+{
+ return __builtin_altivec_vrlh(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vrlh(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vrlh((vector short)a, b);
+}
+
+/* vec_vrlw */
+
+static vector int __ATTRS_o_ai
+vec_vrlw(vector int a, vector unsigned int b)
+{
+ return __builtin_altivec_vrlw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vrlw(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vrlw((vector int)a, b);
+}
+
+/* vec_round */
+
+static vector float __attribute__((__always_inline__))
+vec_round(vector float a)
+{
+ return __builtin_altivec_vrfin(a);
+}
+
+/* vec_vrfin */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfin(vector float a)
+{
+ return __builtin_altivec_vrfin(a);
+}
+
+/* vec_rsqrte */
+
+static __vector float __attribute__((__always_inline__))
+vec_rsqrte(vector float a)
+{
+ return __builtin_altivec_vrsqrtefp(a);
+}
+
+/* vec_vrsqrtefp */
+
+static __vector float __attribute__((__always_inline__))
+vec_vrsqrtefp(vector float a)
+{
+ return __builtin_altivec_vrsqrtefp(a);
+}
+
+/* vec_sel */
+
+#define __builtin_altivec_vsel_4si vec_sel
+
+static vector signed char __ATTRS_o_ai
+vec_sel(vector signed char a, vector signed char b, vector unsigned char c)
+{
+ return (a & ~(vector signed char)c) | (b & (vector signed char)c);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sel(vector signed char a, vector signed char b, vector bool char c)
+{
+ return (a & ~(vector signed char)c) | (b & (vector signed char)c);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sel(vector unsigned char a, vector unsigned char b, vector unsigned char c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sel(vector unsigned char a, vector unsigned char b, vector bool char c)
+{
+ return (a & ~(vector unsigned char)c) | (b & (vector unsigned char)c);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_sel(vector bool char a, vector bool char b, vector unsigned char c)
+{
+ return (a & ~(vector bool char)c) | (b & (vector bool char)c);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_sel(vector bool char a, vector bool char b, vector bool char c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector short __ATTRS_o_ai
+vec_sel(vector short a, vector short b, vector unsigned short c)
+{
+ return (a & ~(vector short)c) | (b & (vector short)c);
+}
+
+static vector short __ATTRS_o_ai
+vec_sel(vector short a, vector short b, vector bool short c)
+{
+ return (a & ~(vector short)c) | (b & (vector short)c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sel(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned short c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sel(vector unsigned short a, vector unsigned short b, vector bool short c)
+{
+ return (a & ~(vector unsigned short)c) | (b & (vector unsigned short)c);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_sel(vector bool short a, vector bool short b, vector unsigned short c)
+{
+ return (a & ~(vector bool short)c) | (b & (vector bool short)c);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_sel(vector bool short a, vector bool short b, vector bool short c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector int __ATTRS_o_ai
+vec_sel(vector int a, vector int b, vector unsigned int c)
+{
+ return (a & ~(vector int)c) | (b & (vector int)c);
+}
+
+static vector int __ATTRS_o_ai
+vec_sel(vector int a, vector int b, vector bool int c)
+{
+ return (a & ~(vector int)c) | (b & (vector int)c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sel(vector unsigned int a, vector unsigned int b, vector unsigned int c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sel(vector unsigned int a, vector unsigned int b, vector bool int c)
+{
+ return (a & ~(vector unsigned int)c) | (b & (vector unsigned int)c);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_sel(vector bool int a, vector bool int b, vector unsigned int c)
+{
+ return (a & ~(vector bool int)c) | (b & (vector bool int)c);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_sel(vector bool int a, vector bool int b, vector bool int c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector float __ATTRS_o_ai
+vec_sel(vector float a, vector float b, vector unsigned int c)
+{
+ vector int res = ((vector int)a & ~(vector int)c)
+ | ((vector int)b & (vector int)c);
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_sel(vector float a, vector float b, vector bool int c)
+{
+ vector int res = ((vector int)a & ~(vector int)c)
+ | ((vector int)b & (vector int)c);
+ return (vector float)res;
+}
+
+/* vec_vsel */
+
+static vector signed char __ATTRS_o_ai
+vec_vsel(vector signed char a, vector signed char b, vector unsigned char c)
+{
+ return (a & ~(vector signed char)c) | (b & (vector signed char)c);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsel(vector signed char a, vector signed char b, vector bool char c)
+{
+ return (a & ~(vector signed char)c) | (b & (vector signed char)c);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsel(vector unsigned char a, vector unsigned char b, vector unsigned char c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsel(vector unsigned char a, vector unsigned char b, vector bool char c)
+{
+ return (a & ~(vector unsigned char)c) | (b & (vector unsigned char)c);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vsel(vector bool char a, vector bool char b, vector unsigned char c)
+{
+ return (a & ~(vector bool char)c) | (b & (vector bool char)c);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vsel(vector bool char a, vector bool char b, vector bool char c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsel(vector short a, vector short b, vector unsigned short c)
+{
+ return (a & ~(vector short)c) | (b & (vector short)c);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsel(vector short a, vector short b, vector bool short c)
+{
+ return (a & ~(vector short)c) | (b & (vector short)c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsel(vector unsigned short a,
+ vector unsigned short b,
+ vector unsigned short c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsel(vector unsigned short a, vector unsigned short b, vector bool short c)
+{
+ return (a & ~(vector unsigned short)c) | (b & (vector unsigned short)c);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsel(vector bool short a, vector bool short b, vector unsigned short c)
+{
+ return (a & ~(vector bool short)c) | (b & (vector bool short)c);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsel(vector bool short a, vector bool short b, vector bool short c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsel(vector int a, vector int b, vector unsigned int c)
+{
+ return (a & ~(vector int)c) | (b & (vector int)c);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsel(vector int a, vector int b, vector bool int c)
+{
+ return (a & ~(vector int)c) | (b & (vector int)c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsel(vector unsigned int a, vector unsigned int b, vector unsigned int c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsel(vector unsigned int a, vector unsigned int b, vector bool int c)
+{
+ return (a & ~(vector unsigned int)c) | (b & (vector unsigned int)c);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vsel(vector bool int a, vector bool int b, vector unsigned int c)
+{
+ return (a & ~(vector bool int)c) | (b & (vector bool int)c);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vsel(vector bool int a, vector bool int b, vector bool int c)
+{
+ return (a & ~c) | (b & c);
+}
+
+static vector float __ATTRS_o_ai
+vec_vsel(vector float a, vector float b, vector unsigned int c)
+{
+ vector int res = ((vector int)a & ~(vector int)c)
+ | ((vector int)b & (vector int)c);
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vsel(vector float a, vector float b, vector bool int c)
+{
+ vector int res = ((vector int)a & ~(vector int)c)
+ | ((vector int)b & (vector int)c);
+ return (vector float)res;
+}
+
+/* vec_sl */
+
+static vector signed char __ATTRS_o_ai
+vec_sl(vector signed char a, vector unsigned char b)
+{
+ return a << (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sl(vector unsigned char a, vector unsigned char b)
+{
+ return a << b;
+}
+
+static vector short __ATTRS_o_ai
+vec_sl(vector short a, vector unsigned short b)
+{
+ return a << (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sl(vector unsigned short a, vector unsigned short b)
+{
+ return a << b;
+}
+
+static vector int __ATTRS_o_ai
+vec_sl(vector int a, vector unsigned int b)
+{
+ return a << (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sl(vector unsigned int a, vector unsigned int b)
+{
+ return a << b;
+}
+
+/* vec_vslb */
+
+#define __builtin_altivec_vslb vec_vslb
+
+static vector signed char __ATTRS_o_ai
+vec_vslb(vector signed char a, vector unsigned char b)
+{
+ return vec_sl(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vslb(vector unsigned char a, vector unsigned char b)
+{
+ return vec_sl(a, b);
+}
+
+/* vec_vslh */
+
+#define __builtin_altivec_vslh vec_vslh
+
+static vector short __ATTRS_o_ai
+vec_vslh(vector short a, vector unsigned short b)
+{
+ return vec_sl(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vslh(vector unsigned short a, vector unsigned short b)
+{
+ return vec_sl(a, b);
+}
+
+/* vec_vslw */
+
+#define __builtin_altivec_vslw vec_vslw
+
+static vector int __ATTRS_o_ai
+vec_vslw(vector int a, vector unsigned int b)
+{
+ return vec_sl(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vslw(vector unsigned int a, vector unsigned int b)
+{
+ return vec_sl(a, b);
+}
+
+/* vec_sld */
+
+#define __builtin_altivec_vsldoi_4si vec_sld
+
+static vector signed char __ATTRS_o_ai
+vec_sld(vector signed char a, vector signed char b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sld(vector unsigned char a, vector unsigned char b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector short __ATTRS_o_ai
+vec_sld(vector short a, vector short b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sld(vector unsigned short a, vector unsigned short b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_sld(vector pixel a, vector pixel b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector int __ATTRS_o_ai
+vec_sld(vector int a, vector int b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sld(vector unsigned int a, vector unsigned int b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector float __ATTRS_o_ai
+vec_sld(vector float a, vector float b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+/* vec_vsldoi */
+
+static vector signed char __ATTRS_o_ai
+vec_vsldoi(vector signed char a, vector signed char b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsldoi(vector unsigned char a, vector unsigned char b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector short __ATTRS_o_ai
+vec_vsldoi(vector short a, vector short b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsldoi(vector unsigned short a, vector unsigned short b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsldoi(vector pixel a, vector pixel b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector int __ATTRS_o_ai
+vec_vsldoi(vector int a, vector int b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsldoi(vector unsigned int a, vector unsigned int b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+static vector float __ATTRS_o_ai
+vec_vsldoi(vector float a, vector float b, unsigned char c)
+{
+ return vec_perm(a, b, (vector unsigned char)
+ (c, c+1, c+2, c+3, c+4, c+5, c+6, c+7,
+ c+8, c+9, c+10, c+11, c+12, c+13, c+14, c+15));
+}
+
+/* vec_sll */
+
+static vector signed char __ATTRS_o_ai
+vec_sll(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sll(vector signed char a, vector unsigned short b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sll(vector signed char a, vector unsigned int b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char a, vector unsigned short b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sll(vector unsigned char a, vector unsigned int b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_sll(vector bool char a, vector unsigned char b)
+{
+ return (vector bool char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_sll(vector bool char a, vector unsigned short b)
+{
+ return (vector bool char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_sll(vector bool char a, vector unsigned int b)
+{
+ return (vector bool char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sll(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sll(vector short a, vector unsigned short b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sll(vector short a, vector unsigned int b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sll(vector unsigned short a, vector unsigned int b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_sll(vector bool short a, vector unsigned char b)
+{
+ return (vector bool short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_sll(vector bool short a, vector unsigned short b)
+{
+ return (vector bool short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_sll(vector bool short a, vector unsigned int b)
+{
+ return (vector bool short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_sll(vector pixel a, vector unsigned char b)
+{
+ return (vector pixel)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_sll(vector pixel a, vector unsigned short b)
+{
+ return (vector pixel)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_sll(vector pixel a, vector unsigned int b)
+{
+ return (vector pixel)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sll(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sll(vector int a, vector unsigned short b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sll(vector int a, vector unsigned int b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int a, vector unsigned short b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sll(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_sll(vector bool int a, vector unsigned char b)
+{
+ return (vector bool int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_sll(vector bool int a, vector unsigned short b)
+{
+ return (vector bool int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_sll(vector bool int a, vector unsigned int b)
+{
+ return (vector bool int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+/* vec_vsl */
+
+static vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char a, vector unsigned short b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsl(vector signed char a, vector unsigned int b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char a, vector unsigned short b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsl(vector unsigned char a, vector unsigned int b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char a, vector unsigned char b)
+{
+ return (vector bool char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char a, vector unsigned short b)
+{
+ return (vector bool char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vsl(vector bool char a, vector unsigned int b)
+{
+ return (vector bool char)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsl(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsl(vector short a, vector unsigned short b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsl(vector short a, vector unsigned int b)
+{
+ return (vector short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsl(vector unsigned short a, vector unsigned int b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short a, vector unsigned char b)
+{
+ return (vector bool short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short a, vector unsigned short b)
+{
+ return (vector bool short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsl(vector bool short a, vector unsigned int b)
+{
+ return (vector bool short)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsl(vector pixel a, vector unsigned char b)
+{
+ return (vector pixel)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsl(vector pixel a, vector unsigned short b)
+{
+ return (vector pixel)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsl(vector pixel a, vector unsigned int b)
+{
+ return (vector pixel)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsl(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsl(vector int a, vector unsigned short b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsl(vector int a, vector unsigned int b)
+{
+ return (vector int)__builtin_altivec_vsl(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int a, vector unsigned short b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsl(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int a, vector unsigned char b)
+{
+ return (vector bool int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int a, vector unsigned short b)
+{
+ return (vector bool int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vsl(vector bool int a, vector unsigned int b)
+{
+ return (vector bool int)__builtin_altivec_vsl((vector int)a, (vector int)b);
+}
+
+/* vec_slo */
+
+static vector signed char __ATTRS_o_ai
+vec_slo(vector signed char a, vector signed char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_slo(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_slo(vector unsigned char a, vector signed char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_slo(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_slo(vector short a, vector signed char b)
+{
+ return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_slo(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_slo(vector unsigned short a, vector signed char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_slo(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_slo(vector pixel a, vector signed char b)
+{
+ return (vector pixel)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_slo(vector pixel a, vector unsigned char b)
+{
+ return (vector pixel)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_slo(vector int a, vector signed char b)
+{
+ return (vector int)__builtin_altivec_vslo(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_slo(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vslo(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_slo(vector unsigned int a, vector signed char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_slo(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_slo(vector float a, vector signed char b)
+{
+ return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_slo(vector float a, vector unsigned char b)
+{
+ return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+/* vec_vslo */
+
+static vector signed char __ATTRS_o_ai
+vec_vslo(vector signed char a, vector signed char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vslo(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vslo(vector unsigned char a, vector signed char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vslo(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vslo(vector short a, vector signed char b)
+{
+ return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vslo(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vslo(vector unsigned short a, vector signed char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vslo(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vslo(vector pixel a, vector signed char b)
+{
+ return (vector pixel)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vslo(vector pixel a, vector unsigned char b)
+{
+ return (vector pixel)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vslo(vector int a, vector signed char b)
+{
+ return (vector int)__builtin_altivec_vslo(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vslo(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vslo(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vslo(vector unsigned int a, vector signed char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vslo(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vslo(vector float a, vector signed char b)
+{
+ return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vslo(vector float a, vector unsigned char b)
+{
+ return (vector float)__builtin_altivec_vslo((vector int)a, (vector int)b);
+}
+
+/* vec_splat */
+
+static vector signed char __ATTRS_o_ai
+vec_splat(vector signed char a, unsigned char b)
+{
+ return vec_perm(a, a, (vector unsigned char)(b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_splat(vector unsigned char a, unsigned char b)
+{
+ return vec_perm(a, a, (vector unsigned char)(b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_splat(vector bool char a, unsigned char b)
+{
+ return vec_perm(a, a, (vector unsigned char)(b));
+}
+
+static vector short __ATTRS_o_ai
+vec_splat(vector short a, unsigned char b)
+{
+ b *= 2;
+ unsigned char b1=b+1;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_splat(vector unsigned short a, unsigned char b)
+{
+ b *= 2;
+ unsigned char b1=b+1;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_splat(vector bool short a, unsigned char b)
+{
+ b *= 2;
+ unsigned char b1=b+1;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_splat(vector pixel a, unsigned char b)
+{
+ b *= 2;
+ unsigned char b1=b+1;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
+}
+
+static vector int __ATTRS_o_ai
+vec_splat(vector int a, unsigned char b)
+{
+ b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_splat(vector unsigned int a, unsigned char b)
+{
+ b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_splat(vector bool int a, unsigned char b)
+{
+ b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
+}
+
+static vector float __ATTRS_o_ai
+vec_splat(vector float a, unsigned char b)
+{
+ b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
+}
+
+/* vec_vspltb */
+
+#define __builtin_altivec_vspltb vec_vspltb
+
+static vector signed char __ATTRS_o_ai
+vec_vspltb(vector signed char a, unsigned char b)
+{
+ return vec_perm(a, a, (vector unsigned char)(b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vspltb(vector unsigned char a, unsigned char b)
+{
+ return vec_perm(a, a, (vector unsigned char)(b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vspltb(vector bool char a, unsigned char b)
+{
+ return vec_perm(a, a, (vector unsigned char)(b));
+}
+
+/* vec_vsplth */
+
+#define __builtin_altivec_vsplth vec_vsplth
+
+static vector short __ATTRS_o_ai
+vec_vsplth(vector short a, unsigned char b)
+{
+ b *= 2;
+ unsigned char b1=b+1;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsplth(vector unsigned short a, unsigned char b)
+{
+ b *= 2;
+ unsigned char b1=b+1;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsplth(vector bool short a, unsigned char b)
+{
+ b *= 2;
+ unsigned char b1=b+1;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsplth(vector pixel a, unsigned char b)
+{
+ b *= 2;
+ unsigned char b1=b+1;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1, b, b1));
+}
+
+/* vec_vspltw */
+
+#define __builtin_altivec_vspltw vec_vspltw
+
+static vector int __ATTRS_o_ai
+vec_vspltw(vector int a, unsigned char b)
+{
+ b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vspltw(vector unsigned int a, unsigned char b)
+{
+ b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vspltw(vector bool int a, unsigned char b)
+{
+ b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
+}
+
+static vector float __ATTRS_o_ai
+vec_vspltw(vector float a, unsigned char b)
+{
+ b *= 4;
+ unsigned char b1=b+1, b2=b+2, b3=b+3;
+ return vec_perm(a, a, (vector unsigned char)
+ (b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3, b, b1, b2, b3));
+}
+
+/* vec_splat_s8 */
+
+#define __builtin_altivec_vspltisb vec_splat_s8
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector signed char __ATTRS_o_ai
+vec_splat_s8(signed char a)
+{
+ return (vector signed char)(a);
+}
+
+/* vec_vspltisb */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector signed char __ATTRS_o_ai
+vec_vspltisb(signed char a)
+{
+ return (vector signed char)(a);
+}
+
+/* vec_splat_s16 */
+
+#define __builtin_altivec_vspltish vec_splat_s16
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector short __ATTRS_o_ai
+vec_splat_s16(signed char a)
+{
+ return (vector short)(a);
+}
+
+/* vec_vspltish */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector short __ATTRS_o_ai
+vec_vspltish(signed char a)
+{
+ return (vector short)(a);
+}
+
+/* vec_splat_s32 */
+
+#define __builtin_altivec_vspltisw vec_splat_s32
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector int __ATTRS_o_ai
+vec_splat_s32(signed char a)
+{
+ return (vector int)(a);
+}
+
+/* vec_vspltisw */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector int __ATTRS_o_ai
+vec_vspltisw(signed char a)
+{
+ return (vector int)(a);
+}
+
+/* vec_splat_u8 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned char __ATTRS_o_ai
+vec_splat_u8(unsigned char a)
+{
+ return (vector unsigned char)(a);
+}
+
+/* vec_splat_u16 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned short __ATTRS_o_ai
+vec_splat_u16(signed char a)
+{
+ return (vector unsigned short)(a);
+}
+
+/* vec_splat_u32 */
+
+// FIXME: parameter should be treated as 5-bit signed literal
+static vector unsigned int __ATTRS_o_ai
+vec_splat_u32(signed char a)
+{
+ return (vector unsigned int)(a);
+}
+
+/* vec_sr */
+
+static vector signed char __ATTRS_o_ai
+vec_sr(vector signed char a, vector unsigned char b)
+{
+ return a >> (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sr(vector unsigned char a, vector unsigned char b)
+{
+ return a >> b;
+}
+
+static vector short __ATTRS_o_ai
+vec_sr(vector short a, vector unsigned short b)
+{
+ return a >> (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sr(vector unsigned short a, vector unsigned short b)
+{
+ return a >> b;
+}
+
+static vector int __ATTRS_o_ai
+vec_sr(vector int a, vector unsigned int b)
+{
+ return a >> (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sr(vector unsigned int a, vector unsigned int b)
+{
+ return a >> b;
+}
+
+/* vec_vsrb */
+
+#define __builtin_altivec_vsrb vec_vsrb
+
+static vector signed char __ATTRS_o_ai
+vec_vsrb(vector signed char a, vector unsigned char b)
+{
+ return a >> (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsrb(vector unsigned char a, vector unsigned char b)
+{
+ return a >> b;
+}
+
+/* vec_vsrh */
+
+#define __builtin_altivec_vsrh vec_vsrh
+
+static vector short __ATTRS_o_ai
+vec_vsrh(vector short a, vector unsigned short b)
+{
+ return a >> (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsrh(vector unsigned short a, vector unsigned short b)
+{
+ return a >> b;
+}
+
+/* vec_vsrw */
+
+#define __builtin_altivec_vsrw vec_vsrw
+
+static vector int __ATTRS_o_ai
+vec_vsrw(vector int a, vector unsigned int b)
+{
+ return a >> (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsrw(vector unsigned int a, vector unsigned int b)
+{
+ return a >> b;
+}
+
+/* vec_sra */
+
+static vector signed char __ATTRS_o_ai
+vec_sra(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsrab((vector char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sra(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sra(vector short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsrah(a, (vector unsigned short)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sra(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sra(vector int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsraw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sra(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)a, b);
+}
+
+/* vec_vsrab */
+
+static vector signed char __ATTRS_o_ai
+vec_vsrab(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)__builtin_altivec_vsrab((vector char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsrab(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)__builtin_altivec_vsrab((vector char)a, b);
+}
+
+/* vec_vsrah */
+
+static vector short __ATTRS_o_ai
+vec_vsrah(vector short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsrah(a, (vector unsigned short)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsrah(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)__builtin_altivec_vsrah((vector short)a, b);
+}
+
+/* vec_vsraw */
+
+static vector int __ATTRS_o_ai
+vec_vsraw(vector int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsraw(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsraw(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)__builtin_altivec_vsraw((vector int)a, b);
+}
+
+/* vec_srl */
+
+static vector signed char __ATTRS_o_ai
+vec_srl(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_srl(vector signed char a, vector unsigned short b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_srl(vector signed char a, vector unsigned int b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char a, vector unsigned short b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_srl(vector unsigned char a, vector unsigned int b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_srl(vector bool char a, vector unsigned char b)
+{
+ return (vector bool char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_srl(vector bool char a, vector unsigned short b)
+{
+ return (vector bool char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_srl(vector bool char a, vector unsigned int b)
+{
+ return (vector bool char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_srl(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_srl(vector short a, vector unsigned short b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_srl(vector short a, vector unsigned int b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_srl(vector unsigned short a, vector unsigned int b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_srl(vector bool short a, vector unsigned char b)
+{
+ return (vector bool short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_srl(vector bool short a, vector unsigned short b)
+{
+ return (vector bool short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_srl(vector bool short a, vector unsigned int b)
+{
+ return (vector bool short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_srl(vector pixel a, vector unsigned char b)
+{
+ return (vector pixel)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_srl(vector pixel a, vector unsigned short b)
+{
+ return (vector pixel)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_srl(vector pixel a, vector unsigned int b)
+{
+ return (vector pixel)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_srl(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_srl(vector int a, vector unsigned short b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_srl(vector int a, vector unsigned int b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int a, vector unsigned short b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_srl(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_srl(vector bool int a, vector unsigned char b)
+{
+ return (vector bool int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_srl(vector bool int a, vector unsigned short b)
+{
+ return (vector bool int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_srl(vector bool int a, vector unsigned int b)
+{
+ return (vector bool int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+/* vec_vsr */
+
+static vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char a, vector unsigned short b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsr(vector signed char a, vector unsigned int b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char a, vector unsigned short b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsr(vector unsigned char a, vector unsigned int b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char a, vector unsigned char b)
+{
+ return (vector bool char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char a, vector unsigned short b)
+{
+ return (vector bool char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vsr(vector bool char a, vector unsigned int b)
+{
+ return (vector bool char)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsr(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsr(vector short a, vector unsigned short b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsr(vector short a, vector unsigned int b)
+{
+ return (vector short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short a, vector unsigned short b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsr(vector unsigned short a, vector unsigned int b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short a, vector unsigned char b)
+{
+ return (vector bool short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short a, vector unsigned short b)
+{
+ return (vector bool short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vsr(vector bool short a, vector unsigned int b)
+{
+ return (vector bool short)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsr(vector pixel a, vector unsigned char b)
+{
+ return (vector pixel)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsr(vector pixel a, vector unsigned short b)
+{
+ return (vector pixel)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsr(vector pixel a, vector unsigned int b)
+{
+ return (vector pixel)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsr(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsr(vector int a, vector unsigned short b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsr(vector int a, vector unsigned int b)
+{
+ return (vector int)__builtin_altivec_vsr(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int a, vector unsigned short b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsr(vector unsigned int a, vector unsigned int b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int a, vector unsigned char b)
+{
+ return (vector bool int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int a, vector unsigned short b)
+{
+ return (vector bool int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vsr(vector bool int a, vector unsigned int b)
+{
+ return (vector bool int)__builtin_altivec_vsr((vector int)a, (vector int)b);
+}
+
+/* vec_sro */
+
+static vector signed char __ATTRS_o_ai
+vec_sro(vector signed char a, vector signed char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sro(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sro(vector unsigned char a, vector signed char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sro(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sro(vector short a, vector signed char b)
+{
+ return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_sro(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sro(vector unsigned short a, vector signed char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sro(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_sro(vector pixel a, vector signed char b)
+{
+ return (vector pixel)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_sro(vector pixel a, vector unsigned char b)
+{
+ return (vector pixel)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sro(vector int a, vector signed char b)
+{
+ return (vector int)__builtin_altivec_vsro(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sro(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsro(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sro(vector unsigned int a, vector signed char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sro(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_sro(vector float a, vector signed char b)
+{
+ return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_sro(vector float a, vector unsigned char b)
+{
+ return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+/* vec_vsro */
+
+static vector signed char __ATTRS_o_ai
+vec_vsro(vector signed char a, vector signed char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsro(vector signed char a, vector unsigned char b)
+{
+ return (vector signed char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsro(vector unsigned char a, vector signed char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsro(vector unsigned char a, vector unsigned char b)
+{
+ return (vector unsigned char)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsro(vector short a, vector signed char b)
+{
+ return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsro(vector short a, vector unsigned char b)
+{
+ return (vector short)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsro(vector unsigned short a, vector signed char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsro(vector unsigned short a, vector unsigned char b)
+{
+ return (vector unsigned short)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsro(vector pixel a, vector signed char b)
+{
+ return (vector pixel)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector pixel __ATTRS_o_ai
+vec_vsro(vector pixel a, vector unsigned char b)
+{
+ return (vector pixel)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsro(vector int a, vector signed char b)
+{
+ return (vector int)__builtin_altivec_vsro(a, (vector int)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsro(vector int a, vector unsigned char b)
+{
+ return (vector int)__builtin_altivec_vsro(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsro(vector unsigned int a, vector signed char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsro(vector unsigned int a, vector unsigned char b)
+{
+ return (vector unsigned int)
+ __builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vsro(vector float a, vector signed char b)
+{
+ return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+static vector float __ATTRS_o_ai
+vec_vsro(vector float a, vector unsigned char b)
+{
+ return (vector float)__builtin_altivec_vsro((vector int)a, (vector int)b);
+}
+
+/* vec_st */
+
+static void __ATTRS_o_ai
+vec_st(vector signed char a, int b, vector signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned char a, int b, vector unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool char a, int b, signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool char a, int b, vector bool char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector short a, int b, vector short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned short a, int b, vector unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool short a, int b, short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool short a, int b, vector bool short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector pixel a, int b, short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector pixel a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector pixel a, int b, vector pixel *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector int a, int b, vector int *c)
+{
+ __builtin_altivec_stvx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned int a, int b, vector unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool int a, int b, int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector bool int a, int b, vector bool int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector float a, int b, vector float *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_st(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+/* vec_stvx */
+
+static void __ATTRS_o_ai
+vec_stvx(vector signed char a, int b, vector signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned char a, int b, vector unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool char a, int b, signed char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool char a, int b, vector bool char *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector short a, int b, vector short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned short a, int b, vector unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool short a, int b, short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool short a, int b, vector bool short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector pixel a, int b, short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector pixel a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector pixel a, int b, vector pixel *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector int a, int b, vector int *c)
+{
+ __builtin_altivec_stvx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned int a, int b, vector unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool int a, int b, int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector bool int a, int b, vector bool int *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector float a, int b, vector float *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvx(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvx((vector int)a, b, c);
+}
+
+/* vec_ste */
+
+static void __ATTRS_o_ai
+vec_ste(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector bool char a, int b, signed char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector bool char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvehx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector bool short a, int b, short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector bool short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector pixel a, int b, short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector pixel a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvewx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector bool int a, int b, int *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector bool int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_ste(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+/* vec_stvebx */
+
+static void __ATTRS_o_ai
+vec_stvebx(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvebx(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvebx(vector bool char a, int b, signed char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvebx(vector bool char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvebx((vector char)a, b, c);
+}
+
+/* vec_stvehx */
+
+static void __ATTRS_o_ai
+vec_stvehx(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvehx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvehx(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvehx(vector bool short a, int b, short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvehx(vector bool short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvehx(vector pixel a, int b, short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvehx(vector pixel a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvehx((vector short)a, b, c);
+}
+
+/* vec_stvewx */
+
+static void __ATTRS_o_ai
+vec_stvewx(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvewx(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvewx(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvewx(vector bool int a, int b, int *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvewx(vector bool int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvewx(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvewx((vector int)a, b, c);
+}
+
+/* vec_stl */
+
+static void __ATTRS_o_ai
+vec_stl(vector signed char a, int b, vector signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned char a, int b, vector unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool char a, int b, signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool char a, int b, vector bool char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector short a, int b, vector short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned short a, int b, vector unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool short a, int b, short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool short a, int b, vector bool short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector pixel a, int b, short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector pixel a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector pixel a, int b, vector pixel *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector int a, int b, vector int *c)
+{
+ __builtin_altivec_stvxl(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvxl(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned int a, int b, vector unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool int a, int b, int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector bool int a, int b, vector bool int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector float a, int b, vector float *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stl(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+/* vec_stvxl */
+
+static void __ATTRS_o_ai
+vec_stvxl(vector signed char a, int b, vector signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector signed char a, int b, signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned char a, int b, vector unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool char a, int b, signed char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool char a, int b, unsigned char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool char a, int b, vector bool char *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector short a, int b, vector short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector short a, int b, short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned short a, int b, vector unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool short a, int b, short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool short a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool short a, int b, vector bool short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector pixel a, int b, short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector pixel a, int b, unsigned short *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector pixel a, int b, vector pixel *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector int a, int b, vector int *c)
+{
+ __builtin_altivec_stvxl(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector int a, int b, int *c)
+{
+ __builtin_altivec_stvxl(a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned int a, int b, vector unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector unsigned int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool int a, int b, int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool int a, int b, unsigned int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector bool int a, int b, vector bool int *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector float a, int b, vector float *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvxl(vector float a, int b, float *c)
+{
+ __builtin_altivec_stvxl((vector int)a, b, c);
+}
+
+/* vec_sub */
+
+static vector signed char __ATTRS_o_ai
+vec_sub(vector signed char a, vector signed char b)
+{
+ return a - b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sub(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a - b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_sub(vector signed char a, vector bool char b)
+{
+ return a - (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sub(vector unsigned char a, vector unsigned char b)
+{
+ return a - b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sub(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a - b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_sub(vector unsigned char a, vector bool char b)
+{
+ return a - (vector unsigned char)b;
+}
+
+static vector short __ATTRS_o_ai
+vec_sub(vector short a, vector short b)
+{
+ return a - b;
+}
+
+static vector short __ATTRS_o_ai
+vec_sub(vector bool short a, vector short b)
+{
+ return (vector short)a - b;
+}
+
+static vector short __ATTRS_o_ai
+vec_sub(vector short a, vector bool short b)
+{
+ return a - (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sub(vector unsigned short a, vector unsigned short b)
+{
+ return a - b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sub(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a - b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_sub(vector unsigned short a, vector bool short b)
+{
+ return a - (vector unsigned short)b;
+}
+
+static vector int __ATTRS_o_ai
+vec_sub(vector int a, vector int b)
+{
+ return a - b;
+}
+
+static vector int __ATTRS_o_ai
+vec_sub(vector bool int a, vector int b)
+{
+ return (vector int)a - b;
+}
+
+static vector int __ATTRS_o_ai
+vec_sub(vector int a, vector bool int b)
+{
+ return a - (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sub(vector unsigned int a, vector unsigned int b)
+{
+ return a - b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sub(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a - b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sub(vector unsigned int a, vector bool int b)
+{
+ return a - (vector unsigned int)b;
+}
+
+static vector float __ATTRS_o_ai
+vec_sub(vector float a, vector float b)
+{
+ return a - b;
+}
+
+/* vec_vsububm */
+
+#define __builtin_altivec_vsububm vec_vsububm
+
+static vector signed char __ATTRS_o_ai
+vec_vsububm(vector signed char a, vector signed char b)
+{
+ return a - b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsububm(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a - b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsububm(vector signed char a, vector bool char b)
+{
+ return a - (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector unsigned char a, vector unsigned char b)
+{
+ return a - b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a - b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsububm(vector unsigned char a, vector bool char b)
+{
+ return a - (vector unsigned char)b;
+}
+
+/* vec_vsubuhm */
+
+#define __builtin_altivec_vsubuhm vec_vsubuhm
+
+static vector short __ATTRS_o_ai
+vec_vsubuhm(vector short a, vector short b)
+{
+ return a - b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vsubuhm(vector bool short a, vector short b)
+{
+ return (vector short)a - b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vsubuhm(vector short a, vector bool short b)
+{
+ return a - (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector unsigned short a, vector unsigned short b)
+{
+ return a - b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a - b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhm(vector unsigned short a, vector bool short b)
+{
+ return a - (vector unsigned short)b;
+}
+
+/* vec_vsubuwm */
+
+#define __builtin_altivec_vsubuwm vec_vsubuwm
+
+static vector int __ATTRS_o_ai
+vec_vsubuwm(vector int a, vector int b)
+{
+ return a - b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vsubuwm(vector bool int a, vector int b)
+{
+ return (vector int)a - b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vsubuwm(vector int a, vector bool int b)
+{
+ return a - (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector unsigned int a, vector unsigned int b)
+{
+ return a - b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a - b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsubuwm(vector unsigned int a, vector bool int b)
+{
+ return a - (vector unsigned int)b;
+}
+
+/* vec_vsubfp */
+
+#define __builtin_altivec_vsubfp vec_vsubfp
+
+static vector float __attribute__((__always_inline__))
+vec_vsubfp(vector float a, vector float b)
+{
+ return a - b;
+}
+
+/* vec_subc */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_subc(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubcuw(a, b);
+}
+
+/* vec_vsubcuw */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsubcuw(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubcuw(a, b);
+}
+
+/* vec_subs */
+
+static vector signed char __ATTRS_o_ai
+vec_subs(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vsubsbs(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_subs(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vsubsbs((vector signed char)a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_subs(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vsubsbs(a, (vector signed char)b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_subs(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vsububs(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_subs(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vsububs((vector unsigned char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_subs(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vsububs(a, (vector unsigned char)b);
+}
+
+static vector short __ATTRS_o_ai
+vec_subs(vector short a, vector short b)
+{
+ return __builtin_altivec_vsubshs(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_subs(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vsubshs((vector short)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_subs(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vsubshs(a, (vector short)b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_subs(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsubuhs(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_subs(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsubuhs((vector unsigned short)a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_subs(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vsubuhs(a, (vector unsigned short)b);
+}
+
+static vector int __ATTRS_o_ai
+vec_subs(vector int a, vector int b)
+{
+ return __builtin_altivec_vsubsws(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_subs(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vsubsws((vector int)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_subs(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vsubsws(a, (vector int)b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_subs(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubuws(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_subs(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubuws((vector unsigned int)a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_subs(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vsubuws(a, (vector unsigned int)b);
+}
+
+/* vec_vsubsbs */
+
+static vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vsubsbs(a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vsubsbs((vector signed char)a, b);
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vsubsbs(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vsubsbs(a, (vector signed char)b);
+}
+
+/* vec_vsububs */
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vsububs(a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vsububs((vector unsigned char)a, b);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vsububs(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vsububs(a, (vector unsigned char)b);
+}
+
+/* vec_vsubshs */
+
+static vector short __ATTRS_o_ai
+vec_vsubshs(vector short a, vector short b)
+{
+ return __builtin_altivec_vsubshs(a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsubshs(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vsubshs((vector short)a, b);
+}
+
+static vector short __ATTRS_o_ai
+vec_vsubshs(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vsubshs(a, (vector short)b);
+}
+
+/* vec_vsubuhs */
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsubuhs(a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vsubuhs((vector unsigned short)a, b);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vsubuhs(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vsubuhs(a, (vector unsigned short)b);
+}
+
+/* vec_vsubsws */
+
+static vector int __ATTRS_o_ai
+vec_vsubsws(vector int a, vector int b)
+{
+ return __builtin_altivec_vsubsws(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsubsws(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vsubsws((vector int)a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_vsubsws(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vsubsws(a, (vector int)b);
+}
+
+/* vec_vsubuws */
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubuws(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vsubuws((vector unsigned int)a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vsubuws(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vsubuws(a, (vector unsigned int)b);
+}
+
+/* vec_sum4s */
+
+static vector int __ATTRS_o_ai
+vec_sum4s(vector signed char a, vector int b)
+{
+ return __builtin_altivec_vsum4sbs(a, b);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_sum4s(vector unsigned char a, vector unsigned int b)
+{
+ return __builtin_altivec_vsum4ubs(a, b);
+}
+
+static vector int __ATTRS_o_ai
+vec_sum4s(vector signed short a, vector int b)
+{
+ return __builtin_altivec_vsum4shs(a, b);
+}
+
+/* vec_vsum4sbs */
+
+static vector int __attribute__((__always_inline__))
+vec_vsum4sbs(vector signed char a, vector int b)
+{
+ return __builtin_altivec_vsum4sbs(a, b);
+}
+
+/* vec_vsum4ubs */
+
+static vector unsigned int __attribute__((__always_inline__))
+vec_vsum4ubs(vector unsigned char a, vector unsigned int b)
+{
+ return __builtin_altivec_vsum4ubs(a, b);
+}
+
+/* vec_vsum4shs */
+
+static vector int __attribute__((__always_inline__))
+vec_vsum4shs(vector signed short a, vector int b)
+{
+ return __builtin_altivec_vsum4shs(a, b);
+}
+
+/* vec_sum2s */
+
+static vector signed int __attribute__((__always_inline__))
+vec_sum2s(vector int a, vector int b)
+{
+ return __builtin_altivec_vsum2sws(a, b);
+}
+
+/* vec_vsum2sws */
+
+static vector signed int __attribute__((__always_inline__))
+vec_vsum2sws(vector int a, vector int b)
+{
+ return __builtin_altivec_vsum2sws(a, b);
+}
+
+/* vec_sums */
+
+static vector signed int __attribute__((__always_inline__))
+vec_sums(vector signed int a, vector signed int b)
+{
+ return __builtin_altivec_vsumsws(a, b);
+}
+
+/* vec_vsumsws */
+
+static vector signed int __attribute__((__always_inline__))
+vec_vsumsws(vector signed int a, vector signed int b)
+{
+ return __builtin_altivec_vsumsws(a, b);
+}
+
+/* vec_trunc */
+
+static vector float __attribute__((__always_inline__))
+vec_trunc(vector float a)
+{
+ return __builtin_altivec_vrfiz(a);
+}
+
+/* vec_vrfiz */
+
+static vector float __attribute__((__always_inline__))
+vec_vrfiz(vector float a)
+{
+ return __builtin_altivec_vrfiz(a);
+}
+
+/* vec_unpackh */
+
+static vector short __ATTRS_o_ai
+vec_unpackh(vector signed char a)
+{
+ return __builtin_altivec_vupkhsb((vector char)a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_unpackh(vector bool char a)
+{
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)a);
+}
+
+static vector int __ATTRS_o_ai
+vec_unpackh(vector short a)
+{
+ return __builtin_altivec_vupkhsh(a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_unpackh(vector bool short a)
+{
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)a);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_unpackh(vector pixel a)
+{
+ return (vector unsigned int)__builtin_altivec_vupkhsh((vector short)a);
+}
+
+/* vec_vupkhsb */
+
+static vector short __ATTRS_o_ai
+vec_vupkhsb(vector signed char a)
+{
+ return __builtin_altivec_vupkhsb((vector char)a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vupkhsb(vector bool char a)
+{
+ return (vector bool short)__builtin_altivec_vupkhsb((vector char)a);
+}
+
+/* vec_vupkhsh */
+
+static vector int __ATTRS_o_ai
+vec_vupkhsh(vector short a)
+{
+ return __builtin_altivec_vupkhsh(a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vupkhsh(vector bool short a)
+{
+ return (vector bool int)__builtin_altivec_vupkhsh((vector short)a);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vupkhsh(vector pixel a)
+{
+ return (vector unsigned int)__builtin_altivec_vupkhsh((vector short)a);
+}
+
+/* vec_unpackl */
+
+static vector short __ATTRS_o_ai
+vec_unpackl(vector signed char a)
+{
+ return __builtin_altivec_vupklsb((vector char)a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_unpackl(vector bool char a)
+{
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)a);
+}
+
+static vector int __ATTRS_o_ai
+vec_unpackl(vector short a)
+{
+ return __builtin_altivec_vupklsh(a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_unpackl(vector bool short a)
+{
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)a);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_unpackl(vector pixel a)
+{
+ return (vector unsigned int)__builtin_altivec_vupklsh((vector short)a);
+}
+
+/* vec_vupklsb */
+
+static vector short __ATTRS_o_ai
+vec_vupklsb(vector signed char a)
+{
+ return __builtin_altivec_vupklsb((vector char)a);
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vupklsb(vector bool char a)
+{
+ return (vector bool short)__builtin_altivec_vupklsb((vector char)a);
+}
+
+/* vec_vupklsh */
+
+static vector int __ATTRS_o_ai
+vec_vupklsh(vector short a)
+{
+ return __builtin_altivec_vupklsh(a);
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vupklsh(vector bool short a)
+{
+ return (vector bool int)__builtin_altivec_vupklsh((vector short)a);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vupklsh(vector pixel a)
+{
+ return (vector unsigned int)__builtin_altivec_vupklsh((vector short)a);
+}
+
+/* vec_xor */
+
+#define __builtin_altivec_vxor vec_xor
+
+static vector signed char __ATTRS_o_ai
+vec_xor(vector signed char a, vector signed char b)
+{
+ return a ^ b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_xor(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a ^ b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_xor(vector signed char a, vector bool char b)
+{
+ return a ^ (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char a, vector unsigned char b)
+{
+ return a ^ b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_xor(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a ^ b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_xor(vector unsigned char a, vector bool char b)
+{
+ return a ^ (vector unsigned char)b;
+}
+
+static vector bool char __ATTRS_o_ai
+vec_xor(vector bool char a, vector bool char b)
+{
+ return a ^ b;
+}
+
+static vector short __ATTRS_o_ai
+vec_xor(vector short a, vector short b)
+{
+ return a ^ b;
+}
+
+static vector short __ATTRS_o_ai
+vec_xor(vector bool short a, vector short b)
+{
+ return (vector short)a ^ b;
+}
+
+static vector short __ATTRS_o_ai
+vec_xor(vector short a, vector bool short b)
+{
+ return a ^ (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_xor(vector unsigned short a, vector unsigned short b)
+{
+ return a ^ b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_xor(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a ^ b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_xor(vector unsigned short a, vector bool short b)
+{
+ return a ^ (vector unsigned short)b;
+}
+
+static vector bool short __ATTRS_o_ai
+vec_xor(vector bool short a, vector bool short b)
+{
+ return a ^ b;
+}
+
+static vector int __ATTRS_o_ai
+vec_xor(vector int a, vector int b)
+{
+ return a ^ b;
+}
+
+static vector int __ATTRS_o_ai
+vec_xor(vector bool int a, vector int b)
+{
+ return (vector int)a ^ b;
+}
+
+static vector int __ATTRS_o_ai
+vec_xor(vector int a, vector bool int b)
+{
+ return a ^ (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_xor(vector unsigned int a, vector unsigned int b)
+{
+ return a ^ b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_xor(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a ^ b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_xor(vector unsigned int a, vector bool int b)
+{
+ return a ^ (vector unsigned int)b;
+}
+
+static vector bool int __ATTRS_o_ai
+vec_xor(vector bool int a, vector bool int b)
+{
+ return a ^ b;
+}
+
+static vector float __ATTRS_o_ai
+vec_xor(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_xor(vector bool int a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_xor(vector float a, vector bool int b)
+{
+ vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* vec_vxor */
+
+static vector signed char __ATTRS_o_ai
+vec_vxor(vector signed char a, vector signed char b)
+{
+ return a ^ b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vxor(vector bool char a, vector signed char b)
+{
+ return (vector signed char)a ^ b;
+}
+
+static vector signed char __ATTRS_o_ai
+vec_vxor(vector signed char a, vector bool char b)
+{
+ return a ^ (vector signed char)b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vxor(vector unsigned char a, vector unsigned char b)
+{
+ return a ^ b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vxor(vector bool char a, vector unsigned char b)
+{
+ return (vector unsigned char)a ^ b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_vxor(vector unsigned char a, vector bool char b)
+{
+ return a ^ (vector unsigned char)b;
+}
+
+static vector bool char __ATTRS_o_ai
+vec_vxor(vector bool char a, vector bool char b)
+{
+ return a ^ b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vxor(vector short a, vector short b)
+{
+ return a ^ b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vxor(vector bool short a, vector short b)
+{
+ return (vector short)a ^ b;
+}
+
+static vector short __ATTRS_o_ai
+vec_vxor(vector short a, vector bool short b)
+{
+ return a ^ (vector short)b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vxor(vector unsigned short a, vector unsigned short b)
+{
+ return a ^ b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vxor(vector bool short a, vector unsigned short b)
+{
+ return (vector unsigned short)a ^ b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_vxor(vector unsigned short a, vector bool short b)
+{
+ return a ^ (vector unsigned short)b;
+}
+
+static vector bool short __ATTRS_o_ai
+vec_vxor(vector bool short a, vector bool short b)
+{
+ return a ^ b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vxor(vector int a, vector int b)
+{
+ return a ^ b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vxor(vector bool int a, vector int b)
+{
+ return (vector int)a ^ b;
+}
+
+static vector int __ATTRS_o_ai
+vec_vxor(vector int a, vector bool int b)
+{
+ return a ^ (vector int)b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vxor(vector unsigned int a, vector unsigned int b)
+{
+ return a ^ b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vxor(vector bool int a, vector unsigned int b)
+{
+ return (vector unsigned int)a ^ b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_vxor(vector unsigned int a, vector bool int b)
+{
+ return a ^ (vector unsigned int)b;
+}
+
+static vector bool int __ATTRS_o_ai
+vec_vxor(vector bool int a, vector bool int b)
+{
+ return a ^ b;
+}
+
+static vector float __ATTRS_o_ai
+vec_vxor(vector float a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vxor(vector bool int a, vector float b)
+{
+ vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b;
+ return (vector float)res;
+}
+
+static vector float __ATTRS_o_ai
+vec_vxor(vector float a, vector bool int b)
+{
+ vector unsigned int res = (vector unsigned int)a ^ (vector unsigned int)b;
+ return (vector float)res;
+}
+
+/* ------------------------ extensions for CBEA ----------------------------- */
+
+/* vec_extract */
+
+static signed char __ATTRS_o_ai
+vec_extract(vector signed char a, int b)
+{
+ return a[b];
+}
+
+static unsigned char __ATTRS_o_ai
+vec_extract(vector unsigned char a, int b)
+{
+ return a[b];
+}
+
+static short __ATTRS_o_ai
+vec_extract(vector short a, int b)
+{
+ return a[b];
+}
+
+static unsigned short __ATTRS_o_ai
+vec_extract(vector unsigned short a, int b)
+{
+ return a[b];
+}
+
+static int __ATTRS_o_ai
+vec_extract(vector int a, int b)
+{
+ return a[b];
+}
+
+static unsigned int __ATTRS_o_ai
+vec_extract(vector unsigned int a, int b)
+{
+ return a[b];
+}
+
+static float __ATTRS_o_ai
+vec_extract(vector float a, int b)
+{
+ return a[b];
+}
+
+/* vec_insert */
+
+static vector signed char __ATTRS_o_ai
+vec_insert(signed char a, vector signed char b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_insert(unsigned char a, vector unsigned char b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector short __ATTRS_o_ai
+vec_insert(short a, vector short b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_insert(unsigned short a, vector unsigned short b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector int __ATTRS_o_ai
+vec_insert(int a, vector int b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_insert(unsigned int a, vector unsigned int b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+static vector float __ATTRS_o_ai
+vec_insert(float a, vector float b, int c)
+{
+ b[c] = a;
+ return b;
+}
+
+/* vec_lvlx */
+
+static vector signed char __ATTRS_o_ai
+vec_lvlx(int a, const signed char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector signed char)(0),
+ vec_lvsl(a, b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvlx(int a, const vector signed char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector signed char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlx(int a, const unsigned char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned char)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlx(int a, const vector unsigned char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvlx(int a, const vector bool char *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector bool char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvlx(int a, const short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector short)(0),
+ vec_lvsl(a, b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvlx(int a, const vector short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlx(int a, const unsigned short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned short)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlx(int a, const vector unsigned short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvlx(int a, const vector bool short *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector bool short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvlx(int a, const vector pixel *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector pixel)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvlx(int a, const int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector int)(0),
+ vec_lvsl(a, b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvlx(int a, const vector int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlx(int a, const unsigned int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned int)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlx(int a, const vector unsigned int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector unsigned int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvlx(int a, const vector bool int *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector bool int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvlx(int a, const float *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector float)(0),
+ vec_lvsl(a, b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvlx(int a, const vector float *b)
+{
+ return vec_perm(vec_ld(a, b),
+ (vector float)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+/* vec_lvlxl */
+
+static vector signed char __ATTRS_o_ai
+vec_lvlxl(int a, const signed char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector signed char)(0),
+ vec_lvsl(a, b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvlxl(int a, const vector signed char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector signed char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int a, const unsigned char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned char)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvlxl(int a, const vector unsigned char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvlxl(int a, const vector bool char *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector bool char)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvlxl(int a, const short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector short)(0),
+ vec_lvsl(a, b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvlxl(int a, const vector short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int a, const unsigned short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned short)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvlxl(int a, const vector unsigned short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvlxl(int a, const vector bool short *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector bool short)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvlxl(int a, const vector pixel *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector pixel)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvlxl(int a, const int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector int)(0),
+ vec_lvsl(a, b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvlxl(int a, const vector int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int a, const unsigned int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned int)(0),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvlxl(int a, const vector unsigned int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector unsigned int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvlxl(int a, const vector bool int *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector bool int)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvlxl(int a, const float *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector float)(0),
+ vec_lvsl(a, b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvlxl(int a, vector float *b)
+{
+ return vec_perm(vec_ldl(a, b),
+ (vector float)(0),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+/* vec_lvrx */
+
+static vector signed char __ATTRS_o_ai
+vec_lvrx(int a, const signed char *b)
+{
+ return vec_perm((vector signed char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvrx(int a, const vector signed char *b)
+{
+ return vec_perm((vector signed char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrx(int a, const unsigned char *b)
+{
+ return vec_perm((vector unsigned char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrx(int a, const vector unsigned char *b)
+{
+ return vec_perm((vector unsigned char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvrx(int a, const vector bool char *b)
+{
+ return vec_perm((vector bool char)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvrx(int a, const short *b)
+{
+ return vec_perm((vector short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvrx(int a, const vector short *b)
+{
+ return vec_perm((vector short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrx(int a, const unsigned short *b)
+{
+ return vec_perm((vector unsigned short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrx(int a, const vector unsigned short *b)
+{
+ return vec_perm((vector unsigned short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvrx(int a, const vector bool short *b)
+{
+ return vec_perm((vector bool short)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvrx(int a, const vector pixel *b)
+{
+ return vec_perm((vector pixel)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvrx(int a, const int *b)
+{
+ return vec_perm((vector int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvrx(int a, const vector int *b)
+{
+ return vec_perm((vector int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrx(int a, const unsigned int *b)
+{
+ return vec_perm((vector unsigned int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrx(int a, const vector unsigned int *b)
+{
+ return vec_perm((vector unsigned int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvrx(int a, const vector bool int *b)
+{
+ return vec_perm((vector bool int)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvrx(int a, const float *b)
+{
+ return vec_perm((vector float)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvrx(int a, const vector float *b)
+{
+ return vec_perm((vector float)(0),
+ vec_ld(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+/* vec_lvrxl */
+
+static vector signed char __ATTRS_o_ai
+vec_lvrxl(int a, const signed char *b)
+{
+ return vec_perm((vector signed char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector signed char __ATTRS_o_ai
+vec_lvrxl(int a, const vector signed char *b)
+{
+ return vec_perm((vector signed char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int a, const unsigned char *b)
+{
+ return vec_perm((vector unsigned char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_lvrxl(int a, const vector unsigned char *b)
+{
+ return vec_perm((vector unsigned char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool char __ATTRS_o_ai
+vec_lvrxl(int a, const vector bool char *b)
+{
+ return vec_perm((vector bool char)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvrxl(int a, const short *b)
+{
+ return vec_perm((vector short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector short __ATTRS_o_ai
+vec_lvrxl(int a, const vector short *b)
+{
+ return vec_perm((vector short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int a, const unsigned short *b)
+{
+ return vec_perm((vector unsigned short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_lvrxl(int a, const vector unsigned short *b)
+{
+ return vec_perm((vector unsigned short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool short __ATTRS_o_ai
+vec_lvrxl(int a, const vector bool short *b)
+{
+ return vec_perm((vector bool short)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector pixel __ATTRS_o_ai
+vec_lvrxl(int a, const vector pixel *b)
+{
+ return vec_perm((vector pixel)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvrxl(int a, const int *b)
+{
+ return vec_perm((vector int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector int __ATTRS_o_ai
+vec_lvrxl(int a, const vector int *b)
+{
+ return vec_perm((vector int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int a, const unsigned int *b)
+{
+ return vec_perm((vector unsigned int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_lvrxl(int a, const vector unsigned int *b)
+{
+ return vec_perm((vector unsigned int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector bool int __ATTRS_o_ai
+vec_lvrxl(int a, const vector bool int *b)
+{
+ return vec_perm((vector bool int)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvrxl(int a, const float *b)
+{
+ return vec_perm((vector float)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, b));
+}
+
+static vector float __ATTRS_o_ai
+vec_lvrxl(int a, const vector float *b)
+{
+ return vec_perm((vector float)(0),
+ vec_ldl(a, b),
+ vec_lvsl(a, (unsigned char *)b));
+}
+
+/* vec_stvlx */
+
+static void __ATTRS_o_ai
+vec_stvlx(vector signed char a, int b, signed char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector signed char a, int b, vector signed char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned char a, int b, unsigned char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned char a, int b, vector unsigned char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector bool char a, int b, vector bool char *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector short a, int b, short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector short a, int b, vector short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned short a, int b, unsigned short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned short a, int b, vector unsigned short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector bool short a, int b, vector bool short *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector pixel a, int b, vector pixel *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector int a, int b, int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector int a, int b, vector int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned int a, int b, unsigned int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector unsigned int a, int b, vector unsigned int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector bool int a, int b, vector bool int *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlx(vector float a, int b, vector float *c)
+{
+ return vec_st(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+/* vec_stvlxl */
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector signed char a, int b, signed char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector signed char a, int b, vector signed char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned char a, int b, unsigned char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned char a, int b, vector unsigned char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector bool char a, int b, vector bool char *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector short a, int b, short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector short a, int b, vector short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned short a, int b, unsigned short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned short a, int b, vector unsigned short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector bool short a, int b, vector bool short *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector pixel a, int b, vector pixel *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector int a, int b, int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector int a, int b, vector int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned int a, int b, unsigned int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector unsigned int a, int b, vector unsigned int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector bool int a, int b, vector bool int *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvlxl(vector float a, int b, vector float *c)
+{
+ return vec_stl(vec_perm(vec_lvrx(b, c),
+ a,
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+/* vec_stvrx */
+
+static void __ATTRS_o_ai
+vec_stvrx(vector signed char a, int b, signed char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector signed char a, int b, vector signed char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned char a, int b, unsigned char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned char a, int b, vector unsigned char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector bool char a, int b, vector bool char *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector short a, int b, short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector short a, int b, vector short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned short a, int b, unsigned short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned short a, int b, vector unsigned short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector bool short a, int b, vector bool short *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector pixel a, int b, vector pixel *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector int a, int b, int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector int a, int b, vector int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned int a, int b, unsigned int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector unsigned int a, int b, vector unsigned int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector bool int a, int b, vector bool int *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrx(vector float a, int b, vector float *c)
+{
+ return vec_st(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+/* vec_stvrxl */
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector signed char a, int b, signed char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector signed char a, int b, vector signed char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned char a, int b, unsigned char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned char a, int b, vector unsigned char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector bool char a, int b, vector bool char *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector short a, int b, short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector short a, int b, vector short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned short a, int b, unsigned short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned short a, int b, vector unsigned short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector bool short a, int b, vector bool short *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector pixel a, int b, vector pixel *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector int a, int b, int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector int a, int b, vector int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned int a, int b, unsigned int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector unsigned int a, int b, vector unsigned int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector bool int a, int b, vector bool int *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+static void __ATTRS_o_ai
+vec_stvrxl(vector float a, int b, vector float *c)
+{
+ return vec_stl(vec_perm(a,
+ vec_lvlx(b, c),
+ vec_lvsr(b, (unsigned char *)c)),
+ b, c);
+}
+
+/* vec_promote */
+
+static vector signed char __ATTRS_o_ai
+vec_promote(signed char a, int b)
+{
+ vector signed char res = (vector signed char)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_promote(unsigned char a, int b)
+{
+ vector unsigned char res = (vector unsigned char)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector short __ATTRS_o_ai
+vec_promote(short a, int b)
+{
+ vector short res = (vector short)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_promote(unsigned short a, int b)
+{
+ vector unsigned short res = (vector unsigned short)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector int __ATTRS_o_ai
+vec_promote(int a, int b)
+{
+ vector int res = (vector int)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_promote(unsigned int a, int b)
+{
+ vector unsigned int res = (vector unsigned int)(0);
+ res[b] = a;
+ return res;
+}
+
+static vector float __ATTRS_o_ai
+vec_promote(float a, int b)
+{
+ vector float res = (vector float)(0);
+ res[b] = a;
+ return res;
+}
+
+/* vec_splats */
+
+static vector signed char __ATTRS_o_ai
+vec_splats(signed char a)
+{
+ return (vector signed char)(a);
+}
+
+static vector unsigned char __ATTRS_o_ai
+vec_splats(unsigned char a)
+{
+ return (vector unsigned char)(a);
+}
+
+static vector short __ATTRS_o_ai
+vec_splats(short a)
+{
+ return (vector short)(a);
+}
+
+static vector unsigned short __ATTRS_o_ai
+vec_splats(unsigned short a)
+{
+ return (vector unsigned short)(a);
+}
+
+static vector int __ATTRS_o_ai
+vec_splats(int a)
+{
+ return (vector int)(a);
+}
+
+static vector unsigned int __ATTRS_o_ai
+vec_splats(unsigned int a)
+{
+ return (vector unsigned int)(a);
+}
+
+static vector float __ATTRS_o_ai
+vec_splats(float a)
+{
+ return (vector float)(a);
+}
+
+/* ----------------------------- predicates --------------------------------- */
+
+/* vec_all_eq */
+
+static int __ATTRS_o_ai
+vec_all_eq(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_LT, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT, a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector unsigned short a, vector unsigned short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector unsigned short a, vector bool short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool short a, vector short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool short a, vector unsigned short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool short a, vector bool short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector pixel a, vector pixel b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_LT, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_eq(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT, a, b);
+}
+
+/* vec_all_ge */
+
+static int __ATTRS_o_ai
+vec_all_ge(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, (vector signed char)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, b, (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, (vector short)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, b, (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, (vector int)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, b, (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_ge(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, a, b);
+}
+
+/* vec_all_gt */
+
+static int __ATTRS_o_ai
+vec_all_gt(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, a, (vector signed char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, a, (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, a, (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, a, (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_gt(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT, a, b);
+}
+
+/* vec_all_in */
+
+static int __attribute__((__always_inline__))
+vec_all_in(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpbfp_p(__CR6_EQ, a, b);
+}
+
+/* vec_all_le */
+
+static int __ATTRS_o_ai
+vec_all_le(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ, a, (vector signed char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, a, (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ, (vector unsigned char)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ, a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, a, (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ, (vector unsigned short)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ, a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, a, (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ, (vector unsigned int)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_le(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT, b, a);
+}
+
+/* vec_all_lt */
+
+static int __ATTRS_o_ai
+vec_all_lt(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT, (vector signed char)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, (vector unsigned char)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT, b, (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT, (vector short)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, (vector unsigned short)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT, b, (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT, (vector int)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, (vector unsigned int)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT, b, (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_all_lt(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT, b, a);
+}
+
+/* vec_all_nan */
+
+static int __attribute__((__always_inline__))
+vec_all_nan(vector float a)
+{
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, a, a);
+}
+
+/* vec_all_ne */
+
+static int __ATTRS_o_ai
+vec_all_ne(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpequb_p(__CR6_EQ, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ, a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector unsigned short a, vector unsigned short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector unsigned short a, vector bool short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool short a, vector short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool short a, vector unsigned short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool short a, vector bool short b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector pixel a, vector pixel b)
+{
+ return
+ __builtin_altivec_vcmpequh_p(__CR6_EQ, (vector short)a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_all_ne(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ, a, b);
+}
+
+/* vec_all_nge */
+
+static int __attribute__((__always_inline__))
+vec_all_nge(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ, a, b);
+}
+
+/* vec_all_ngt */
+
+static int __attribute__((__always_inline__))
+vec_all_ngt(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, a, b);
+}
+
+/* vec_all_nle */
+
+static int __attribute__((__always_inline__))
+vec_all_nle(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ, b, a);
+}
+
+/* vec_all_nlt */
+
+static int __attribute__((__always_inline__))
+vec_all_nlt(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ, b, a);
+}
+
+/* vec_all_numeric */
+
+static int __attribute__((__always_inline__))
+vec_all_numeric(vector float a)
+{
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT, a, a);
+}
+
+/* vec_any_eq */
+
+static int __ATTRS_o_ai
+vec_any_eq(vector signed char a, vector signed char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector signed char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector unsigned char a, vector unsigned char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector unsigned char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool char a, vector signed char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool char a, vector unsigned char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_EQ_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV, a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector pixel a, vector pixel b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_EQ_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector unsigned int a, vector unsigned int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector unsigned int a, vector bool int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool int a, vector int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool int a, vector unsigned int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector bool int a, vector bool int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_EQ_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_eq(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, a, b);
+}
+
+/* vec_any_ge */
+
+static int __ATTRS_o_ai
+vec_any_ge(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, (vector signed char)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector unsigned char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, b, (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, (vector short)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector unsigned short a, vector bool short b)
+{
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool short a, vector unsigned short b)
+{
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, b, (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, (vector int)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, b, (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_ge(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, a, b);
+}
+
+/* vec_any_gt */
+
+static int __ATTRS_o_ai
+vec_any_gt(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, a, (vector signed char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector unsigned char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, a, (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool char a, vector unsigned char b)
+{
+ return
+ __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector unsigned short a, vector bool short b)
+{
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, a, (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool short a, vector unsigned short b)
+{
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, a, (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_gt(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, a, b);
+}
+
+/* vec_any_le */
+
+static int __ATTRS_o_ai
+vec_any_le(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_LT_REV, a, (vector signed char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector unsigned char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, a, (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool char a, vector unsigned char b)
+{
+ return
+ __builtin_altivec_vcmpgtub_p(__CR6_LT_REV, (vector unsigned char)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_LT_REV,
+ (vector unsigned char)a,
+ (vector unsigned char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_LT_REV, a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector unsigned short a, vector bool short b)
+{
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, a, (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool short a, vector unsigned short b)
+{
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV, (vector unsigned short)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_LT_REV,
+ (vector unsigned short)a,
+ (vector unsigned short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_LT_REV, a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, a, (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV, (vector unsigned int)a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_LT_REV,
+ (vector unsigned int)a,
+ (vector unsigned int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_le(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp_p(__CR6_EQ_REV, b, a);
+}
+
+/* vec_any_lt */
+
+static int __ATTRS_o_ai
+vec_any_lt(vector signed char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector signed char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtsb_p(__CR6_EQ_REV, (vector signed char)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector unsigned char a, vector unsigned char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector unsigned char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, (vector unsigned char)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool char a, vector signed char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool char a, vector unsigned char b)
+{
+ return
+ __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV, b, (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool char a, vector bool char b)
+{
+ return __builtin_altivec_vcmpgtub_p(__CR6_EQ_REV,
+ (vector unsigned char)b,
+ (vector unsigned char)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtsh_p(__CR6_EQ_REV, (vector short)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector unsigned short a, vector bool short b)
+{
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, (vector unsigned short)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool short a, vector unsigned short b)
+{
+ return
+ __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV, b, (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpgtuh_p(__CR6_EQ_REV,
+ (vector unsigned short)b,
+ (vector unsigned short)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtsw_p(__CR6_EQ_REV, (vector int)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector unsigned int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector unsigned int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, (vector unsigned int)b, a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool int a, vector int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool int a, vector unsigned int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV, b, (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector bool int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpgtuw_p(__CR6_EQ_REV,
+ (vector unsigned int)b,
+ (vector unsigned int)a);
+}
+
+static int __ATTRS_o_ai
+vec_any_lt(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp_p(__CR6_EQ_REV, b, a);
+}
+
+/* vec_any_nan */
+
+static int __attribute__((__always_inline__))
+vec_any_nan(vector float a)
+{
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, a, a);
+}
+
+/* vec_any_ne */
+
+static int __ATTRS_o_ai
+vec_any_ne(vector signed char a, vector signed char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector signed char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector unsigned char a, vector unsigned char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector unsigned char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool char a, vector signed char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool char a, vector unsigned char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool char a, vector bool char b)
+{
+ return
+ __builtin_altivec_vcmpequb_p(__CR6_LT_REV, (vector char)a, (vector char)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector short a, vector short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV, a, (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector unsigned short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector unsigned short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool short a, vector short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool short a, vector unsigned short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool short a, vector bool short b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector pixel a, vector pixel b)
+{
+ return __builtin_altivec_vcmpequh_p(__CR6_LT_REV,
+ (vector short)a,
+ (vector short)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector int a, vector int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, a, b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector int a, vector bool int b)
+{
+ return __builtin_altivec_vcmpequw_p(__CR6_LT_REV, a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector unsigned int a, vector unsigned int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector unsigned int a, vector bool int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool int a, vector int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool int a, vector unsigned int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector bool int a, vector bool int b)
+{
+ return
+ __builtin_altivec_vcmpequw_p(__CR6_LT_REV, (vector int)a, (vector int)b);
+}
+
+static int __ATTRS_o_ai
+vec_any_ne(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpeqfp_p(__CR6_LT_REV, a, b);
+}
+
+/* vec_any_nge */
+
+static int __attribute__((__always_inline__))
+vec_any_nge(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, a, b);
+}
+
+/* vec_any_ngt */
+
+static int __attribute__((__always_inline__))
+vec_any_ngt(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, a, b);
+}
+
+/* vec_any_nle */
+
+static int __attribute__((__always_inline__))
+vec_any_nle(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgefp_p(__CR6_LT_REV, b, a);
+}
+
+/* vec_any_nlt */
+
+static int __attribute__((__always_inline__))
+vec_any_nlt(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpgtfp_p(__CR6_LT_REV, b, a);
+}
+
+/* vec_any_numeric */
+
+static int __attribute__((__always_inline__))
+vec_any_numeric(vector float a)
+{
+ return __builtin_altivec_vcmpeqfp_p(__CR6_EQ_REV, a, a);
+}
+
+/* vec_any_out */
+
+static int __attribute__((__always_inline__))
+vec_any_out(vector float a, vector float b)
+{
+ return __builtin_altivec_vcmpbfp_p(__CR6_EQ_REV, a, b);
+}
+
+#undef __ATTRS_o_ai
+
+#endif /* __ALTIVEC_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
new file mode 100644
index 0000000..d165f1f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avx2intrin.h
@@ -0,0 +1,961 @@
+/*===---- avx2intrin.h - AVX2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avx2intrin.h> directly; include <immintrin.h> instead."
+#endif
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M))
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi8(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsb256((__v32qi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi16(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsw256((__v16hi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_abs_epi32(__m256i a)
+{
+ return (__m256i)__builtin_ia32_pabsd256((__v8si)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packsswb256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packs_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packssdw256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packus_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_packuswb256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_packus_epi32(__m256i __V1, __m256i __V2)
+{
+ return (__m256i) __builtin_ia32_packusdw256((__v8si)__V1, (__v8si)__V2);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a + (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a + (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a + (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_add_epi64(__m256i a, __m256i b)
+{
+ return a + b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddusb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_adds_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_paddusw256((__v16hi)a, (__v16hi)b);
+}
+
+#define _mm256_alignr_epi8(a, b, n) __extension__ ({ \
+ __m256i __a = (a); \
+ __m256i __b = (b); \
+ (__m256i)__builtin_ia32_palignr256((__v32qi)__a, (__v32qi)__b, (n)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_and_si256(__m256i a, __m256i b)
+{
+ return a & b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_andnot_si256(__m256i a, __m256i b)
+{
+ return ~a & b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_avg_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pavgb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_avg_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pavgw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_blendv_epi8(__m256i __V1, __m256i __V2, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_pblendvb256((__v32qi)__V1, (__v32qi)__V2,
+ (__v32qi)__M);
+}
+
+#define _mm256_blend_epi16(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_ia32_pblendw256((__v16hi)__V1, (__v16hi)__V2, (M)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a == (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a == (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a == (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpeq_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)(a == b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a > (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a > (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a > (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cmpgt_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)(a > b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hadds_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phaddsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_hsubs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_phsubsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maddubs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaddubsw256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_madd_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaddwd256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxsd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxub256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxuw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_max_epu32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmaxud256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminsd256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminub256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminuw256 ((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_min_epu32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pminud256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm256_movemask_epi8(__m256i a)
+{
+ return __builtin_ia32_pmovmskb256((__v32qi)a);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovsxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi16(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbw256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbd256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu8_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxbq256((__v16qi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu16_epi32(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwd256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu16_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxwq256((__v8hi)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepu32_epi64(__m128i __V)
+{
+ return (__m256i)__builtin_ia32_pmovzxdq256((__v4si)__V);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmuldq256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhrs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhrsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhi_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhuw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mulhi_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pmulhw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mullo_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a * (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mullo_epi32 (__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a * (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_epu32(__m256i a, __m256i b)
+{
+ return __builtin_ia32_pmuludq256((__v8si)a, (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_or_si256(__m256i a, __m256i b)
+{
+ return a | b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sad_epu8(__m256i a, __m256i b)
+{
+ return __builtin_ia32_psadbw256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_shuffle_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_pshufb256((__v32qi)a, (__v32qi)b);
+}
+
+#define _mm256_shuffle_epi32(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)_mm256_set1_epi32(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflehi_epi16(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6), \
+ 8, 9, 10, 11, \
+ 12 + (((imm) & 0x03) >> 0), \
+ 12 + (((imm) & 0x0c) >> 2), \
+ 12 + (((imm) & 0x30) >> 4), \
+ 12 + (((imm) & 0xc0) >> 6)); })
+
+#define _mm256_shufflelo_epi16(a, imm) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \
+ (imm) & 0x3,((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7, \
+ 8 + (((imm) & 0x03) >> 0), \
+ 8 + (((imm) & 0x0c) >> 2), \
+ 8 + (((imm) & 0x30) >> 4), \
+ 8 + (((imm) & 0xc0) >> 6), \
+ 12, 13, 14, 15); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sign_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psignd256((__v8si)a, (__v8si)b);
+}
+
+#define _mm256_slli_si256(a, count) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_ia32_pslldqi256(__a, (count)*8); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psllwi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psllw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_pslldi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_pslld256((__v8si)a, (__v4si)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_slli_epi64(__m256i a, int count)
+{
+ return __builtin_ia32_psllqi256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sll_epi64(__m256i a, __m128i count)
+{
+ return __builtin_ia32_psllq256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srai_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrawi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sra_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psraw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srai_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psradi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sra_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrad256((__v8si)a, (__v4si)count);
+}
+
+#define _mm256_srli_si256(a, count) __extension__ ({ \
+ __m256i __a = (a); \
+ (__m256i)__builtin_ia32_psrldqi256(__a, (count)*8); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi16(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrlwi256((__v16hi)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi16(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrlw256((__v16hi)a, (__v8hi)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi32(__m256i a, int count)
+{
+ return (__m256i)__builtin_ia32_psrldi256((__v8si)a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi32(__m256i a, __m128i count)
+{
+ return (__m256i)__builtin_ia32_psrld256((__v8si)a, (__v4si)count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srli_epi64(__m256i a, int count)
+{
+ return __builtin_ia32_psrlqi256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srl_epi64(__m256i a, __m128i count)
+{
+ return __builtin_ia32_psrlq256(a, count);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)((__v32qi)a - (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)((__v16hi)a - (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)((__v8si)a - (__v8si)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_epi64(__m256i a, __m256i b)
+{
+ return a - b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubsb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubsw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epu8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubusb256((__v32qi)a, (__v32qi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_subs_epu16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_psubusw256((__v16hi)a, (__v16hi)b);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)a, (__v32qi)b, 8, 32+8, 9, 32+9, 10, 32+10, 11, 32+11, 12, 32+12, 13, 32+13, 14, 32+14, 15, 32+15, 24, 32+24, 25, 32+25, 26, 32+26, 27, 32+27, 28, 32+28, 29, 32+29, 30, 32+30, 31, 32+31);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)a, (__v16hi)b, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)a, (__v8si)b, 2, 8+2, 3, 8+3, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector(a, b, 1, 4+1, 3, 4+3);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi8(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v32qi)a, (__v32qi)b, 0, 32+0, 1, 32+1, 2, 32+2, 3, 32+3, 4, 32+4, 5, 32+5, 6, 32+6, 7, 32+7, 16, 32+16, 17, 32+17, 18, 32+18, 19, 32+19, 20, 32+20, 21, 32+21, 22, 32+22, 23, 32+23);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi16(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v16hi)a, (__v16hi)b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector((__v8si)a, (__v8si)b, 0, 8+0, 1, 8+1, 4, 8+4, 5, 8+5);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_epi64(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_shufflevector(a, b, 0, 4+0, 2, 4+2);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_xor_si256(__m256i a, __m256i b)
+{
+ return a ^ b;
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_load_si256(__m256i *__V)
+{
+ return (__m256i)__builtin_ia32_movntdqa256((__v4di *)__V);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastss_ps(__m128 __X)
+{
+ return (__m128)__builtin_ia32_vbroadcastss_ps((__v4sf)__X);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastss_ps(__m128 __X)
+{
+ return (__m256)__builtin_ia32_vbroadcastss_ps256((__v4sf)__X);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastsd_pd(__m128d __X)
+{
+ return (__m256d)__builtin_ia32_vbroadcastsd_pd256((__v2df)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastsi128_si256(__m128i const *a)
+{
+ return (__m256i)__builtin_ia32_vbroadcastsi256(a);
+}
+
+#define _mm_blend_epi32(V1, V2, M) __extension__ ({ \
+ __m128i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m128i)__builtin_ia32_pblendd128((__v4si)__V1, (__v4si)__V2, (M)); })
+
+#define _mm256_blend_epi32(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_ia32_pblendd256((__v8si)__V1, (__v8si)__V2, (M)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastb_epi8(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastb256((__v16qi)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastw_epi16(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastw256((__v8hi)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastd_epi32(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastd256((__v4si)__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcastq_epi64(__m128i __X)
+{
+ return (__m256i)__builtin_ia32_pbroadcastq256(__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastb_epi8(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastb128((__v16qi)__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastw_epi16(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastw128((__v8hi)__X);
+}
+
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastd_epi32(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastd128((__v4si)__X);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_broadcastq_epi64(__m128i __X)
+{
+ return (__m128i)__builtin_ia32_pbroadcastq128(__X);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar8x32_epi32(__m256i a, __m256i b)
+{
+ return (__m256i)__builtin_ia32_permvarsi256((__v8si)a, (__v8si)b);
+}
+
+#define _mm256_permute4x64_pd(V, M) __extension__ ({ \
+ __m256d __V = (V); \
+ (__m256d)__builtin_ia32_permdf256((__v4df)__V, (M)); })
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar8x32_ps(__m256 a, __m256 b)
+{
+ return (__m256)__builtin_ia32_permvarsf256((__v8sf)a, (__v8sf)b);
+}
+
+#define _mm256_permute4x64_epi64(V, M) __extension__ ({ \
+ __m256i __V = (V); \
+ (__m256i)__builtin_ia32_permdi256(__V, (M)); })
+
+#define _mm256_permute2x128_si256(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ __builtin_shufflevector(__V1, __V2, \
+ ((M) & 0x3) * 2, \
+ ((M) & 0x3) * 2 + 1, \
+ (((M) & 0x30) >> 4) * 2, \
+ (((M) & 0x30) >> 4) * 2 + 1); })
+
+#define _mm256_extracti128_si256(A, O) __extension__ ({ \
+ __m256i __A = (A); \
+ (__m128i)__builtin_ia32_extract128i256(__A, (O)); })
+
+#define _mm256_inserti128_si256(V1, V2, O) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m256i)__builtin_ia32_insert128i256(__V1, __V2, (O)); })
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_epi32(int const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadd256((const __v8si *)__X, (__v8si)__M);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_epi64(long long const *__X, __m256i __M)
+{
+ return (__m256i)__builtin_ia32_maskloadq256((const __v4di *)__X, __M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_epi32(int const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadd((const __v4si *)__X, (__v4si)__M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_epi64(long long const *__X, __m128i __M)
+{
+ return (__m128i)__builtin_ia32_maskloadq((const __v2di *)__X, (__v2di)__M);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_epi32(int *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstored256((__v8si *)__X, (__v8si)__M, (__v8si)__Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_epi64(long long *__X, __m256i __M, __m256i __Y)
+{
+ __builtin_ia32_maskstoreq256((__v4di *)__X, __M, __Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_epi32(int *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstored((__v4si *)__X, (__v4si)__M, (__v4si)__Y);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_epi64(long long *__X, __m128i __M, __m128i __Y)
+{
+ __builtin_ia32_maskstoreq(( __v2di *)__X, __M, __Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sllv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sllv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_sllv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psllv4di(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sllv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psllv2di(__X, __Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srav_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrav8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srav_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrav4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srlv_epi32(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv8si((__v8si)__X, (__v8si)__Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srlv_epi32(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv4si((__v4si)__X, (__v4si)__Y);
+}
+
+static __inline__ __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_srlv_epi64(__m256i __X, __m256i __Y)
+{
+ return (__m256i)__builtin_ia32_psrlv4di(__X, __Y);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srlv_epi64(__m128i __X, __m128i __Y)
+{
+ return (__m128i)__builtin_ia32_psrlv2di(__X, __Y);
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/avxintrin.h b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
new file mode 100644
index 0000000..7a0ec3f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/avxintrin.h
@@ -0,0 +1,1235 @@
+/*===---- avxintrin.h - AVX intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#error "Never use <avxintrin.h> directly; include <immintrin.h> instead."
+#endif
+
+typedef double __v4df __attribute__ ((__vector_size__ (32)));
+typedef float __v8sf __attribute__ ((__vector_size__ (32)));
+typedef long long __v4di __attribute__ ((__vector_size__ (32)));
+typedef int __v8si __attribute__ ((__vector_size__ (32)));
+typedef short __v16hi __attribute__ ((__vector_size__ (32)));
+typedef char __v32qi __attribute__ ((__vector_size__ (32)));
+
+typedef float __m256 __attribute__ ((__vector_size__ (32)));
+typedef double __m256d __attribute__((__vector_size__(32)));
+typedef long long __m256i __attribute__((__vector_size__(32)));
+
+/* Arithmetic */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_add_pd(__m256d a, __m256d b)
+{
+ return a+b;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_add_ps(__m256 a, __m256 b)
+{
+ return a+b;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_pd(__m256d a, __m256d b)
+{
+ return a-b;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_sub_ps(__m256 a, __m256 b)
+{
+ return a-b;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_addsub_pd(__m256d a, __m256d b)
+{
+ return (__m256d)__builtin_ia32_addsubpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_addsub_ps(__m256 a, __m256 b)
+{
+ return (__m256)__builtin_ia32_addsubps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_div_pd(__m256d a, __m256d b)
+{
+ return a / b;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_div_ps(__m256 a, __m256 b)
+{
+ return a / b;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_max_pd(__m256d a, __m256d b)
+{
+ return (__m256d)__builtin_ia32_maxpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_max_ps(__m256 a, __m256 b)
+{
+ return (__m256)__builtin_ia32_maxps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_min_pd(__m256d a, __m256d b)
+{
+ return (__m256d)__builtin_ia32_minpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_min_ps(__m256 a, __m256 b)
+{
+ return (__m256)__builtin_ia32_minps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_pd(__m256d a, __m256d b)
+{
+ return a * b;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_mul_ps(__m256 a, __m256 b)
+{
+ return a * b;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_sqrt_pd(__m256d a)
+{
+ return (__m256d)__builtin_ia32_sqrtpd256((__v4df)a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_sqrt_ps(__m256 a)
+{
+ return (__m256)__builtin_ia32_sqrtps256((__v8sf)a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_rsqrt_ps(__m256 a)
+{
+ return (__m256)__builtin_ia32_rsqrtps256((__v8sf)a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_rcp_ps(__m256 a)
+{
+ return (__m256)__builtin_ia32_rcpps256((__v8sf)a);
+}
+
+#define _mm256_round_pd(V, M) __extension__ ({ \
+ __m256d __V = (V); \
+ (__m256d)__builtin_ia32_roundpd256((__v4df)__V, (M)); })
+
+#define _mm256_round_ps(V, M) __extension__ ({ \
+ __m256 __V = (V); \
+ (__m256)__builtin_ia32_roundps256((__v8sf)__V, (M)); })
+
+#define _mm256_ceil_pd(V) _mm256_round_pd((V), _MM_FROUND_CEIL)
+#define _mm256_floor_pd(V) _mm256_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm256_ceil_ps(V) _mm256_round_ps((V), _MM_FROUND_CEIL)
+#define _mm256_floor_ps(V) _mm256_round_ps((V), _MM_FROUND_FLOOR)
+
+/* Logical */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_and_pd(__m256d a, __m256d b)
+{
+ return (__m256d)((__v4di)a & (__v4di)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_and_ps(__m256 a, __m256 b)
+{
+ return (__m256)((__v8si)a & (__v8si)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_andnot_pd(__m256d a, __m256d b)
+{
+ return (__m256d)(~(__v4di)a & (__v4di)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_andnot_ps(__m256 a, __m256 b)
+{
+ return (__m256)(~(__v8si)a & (__v8si)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_or_pd(__m256d a, __m256d b)
+{
+ return (__m256d)((__v4di)a | (__v4di)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_or_ps(__m256 a, __m256 b)
+{
+ return (__m256)((__v8si)a | (__v8si)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_xor_pd(__m256d a, __m256d b)
+{
+ return (__m256d)((__v4di)a ^ (__v4di)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_xor_ps(__m256 a, __m256 b)
+{
+ return (__m256)((__v8si)a ^ (__v8si)b);
+}
+
+/* Horizontal arithmetic */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_pd(__m256d a, __m256d b)
+{
+ return (__m256d)__builtin_ia32_haddpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_hadd_ps(__m256 a, __m256 b)
+{
+ return (__m256)__builtin_ia32_haddps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_pd(__m256d a, __m256d b)
+{
+ return (__m256d)__builtin_ia32_hsubpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_hsub_ps(__m256 a, __m256 b)
+{
+ return (__m256)__builtin_ia32_hsubps256((__v8sf)a, (__v8sf)b);
+}
+
+/* Vector permutations */
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_permutevar_pd(__m128d a, __m128i c)
+{
+ return (__m128d)__builtin_ia32_vpermilvarpd((__v2df)a, (__v2di)c);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar_pd(__m256d a, __m256i c)
+{
+ return (__m256d)__builtin_ia32_vpermilvarpd256((__v4df)a, (__v4di)c);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_permutevar_ps(__m128 a, __m128i c)
+{
+ return (__m128)__builtin_ia32_vpermilvarps((__v4sf)a, (__v4si)c);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_permutevar_ps(__m256 a, __m256i c)
+{
+ return (__m256)__builtin_ia32_vpermilvarps256((__v8sf)a,
+ (__v8si)c);
+}
+
+#define _mm_permute_pd(A, C) __extension__ ({ \
+ __m128d __A = (A); \
+ (__m128d)__builtin_shufflevector((__v2df)__A, (__v2df) _mm_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1); })
+
+#define _mm256_permute_pd(A, C) __extension__ ({ \
+ __m256d __A = (A); \
+ (__m256d)__builtin_shufflevector((__v4df)__A, (__v4df) _mm256_setzero_pd(), \
+ (C) & 0x1, ((C) & 0x2) >> 1, \
+ 2 + (((C) & 0x4) >> 2), \
+ 2 + (((C) & 0x8) >> 3)); })
+
+#define _mm_permute_ps(A, C) __extension__ ({ \
+ __m128 __A = (A); \
+ (__m128)__builtin_shufflevector((__v4sf)__A, (__v4sf) _mm_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6); })
+
+#define _mm256_permute_ps(A, C) __extension__ ({ \
+ __m256 __A = (A); \
+ (__m256)__builtin_shufflevector((__v8sf)__A, (__v8sf) _mm256_setzero_ps(), \
+ (C) & 0x3, ((C) & 0xc) >> 2, \
+ ((C) & 0x30) >> 4, ((C) & 0xc0) >> 6, \
+ 4 + (((C) & 0x03) >> 0), \
+ 4 + (((C) & 0x0c) >> 2), \
+ 4 + (((C) & 0x30) >> 4), \
+ 4 + (((C) & 0xc0) >> 6)); })
+
+#define _mm256_permute2f128_pd(V1, V2, M) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m256d __V2 = (V2); \
+ (__m256d)__builtin_shufflevector((__v4df)__V1, (__v4df)__V2, \
+ ((M) & 0x3) * 2, \
+ ((M) & 0x3) * 2 + 1, \
+ (((M) & 0x30) >> 4) * 2, \
+ (((M) & 0x30) >> 4) * 2 + 1); })
+
+#define _mm256_permute2f128_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_shufflevector((__v8sf)__V1, (__v8sf)__V2, \
+ ((M) & 0x3) * 4, \
+ ((M) & 0x3) * 4 + 1, \
+ ((M) & 0x3) * 4 + 2, \
+ ((M) & 0x3) * 4 + 3, \
+ (((M) & 0x30) >> 4) * 4, \
+ (((M) & 0x30) >> 4) * 4 + 1, \
+ (((M) & 0x30) >> 4) * 4 + 2, \
+ (((M) & 0x30) >> 4) * 4 + 3); })
+
+#define _mm256_permute2f128_si256(V1, V2, M) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m256i __V2 = (V2); \
+ (__m256i)__builtin_shufflevector((__v8si)__V1, (__v8si)__V2, \
+ ((M) & 0x3) * 4, \
+ ((M) & 0x3) * 4 + 1, \
+ ((M) & 0x3) * 4 + 2, \
+ ((M) & 0x3) * 4 + 3, \
+ (((M) & 0x30) >> 4) * 4, \
+ (((M) & 0x30) >> 4) * 4 + 1, \
+ (((M) & 0x30) >> 4) * 4 + 2, \
+ (((M) & 0x30) >> 4) * 4 + 3); })
+
+/* Vector Blend */
+#define _mm256_blend_pd(V1, V2, M) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m256d __V2 = (V2); \
+ (__m256d)__builtin_ia32_blendpd256((__v4df)__V1, (__v4df)__V2, (M)); })
+
+#define _mm256_blend_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_ia32_blendps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_blendv_pd(__m256d a, __m256d b, __m256d c)
+{
+ return (__m256d)__builtin_ia32_blendvpd256((__v4df)a, (__v4df)b, (__v4df)c);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_blendv_ps(__m256 a, __m256 b, __m256 c)
+{
+ return (__m256)__builtin_ia32_blendvps256((__v8sf)a, (__v8sf)b, (__v8sf)c);
+}
+
+/* Vector Dot Product */
+#define _mm256_dp_ps(V1, V2, M) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m256 __V2 = (V2); \
+ (__m256)__builtin_ia32_dpps256((__v8sf)__V1, (__v8sf)__V2, (M)); })
+
+/* Vector shuffle */
+#define _mm256_shuffle_ps(a, b, mask) __extension__ ({ \
+ __m256 __a = (a); \
+ __m256 __b = (b); \
+ (__m256)__builtin_shufflevector((__v8sf)__a, (__v8sf)__b, \
+ (mask) & 0x3, ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 8, (((mask) & 0xc0) >> 6) + 8, \
+ ((mask) & 0x3) + 4, (((mask) & 0xc) >> 2) + 4, \
+ (((mask) & 0x30) >> 4) + 12, (((mask) & 0xc0) >> 6) + 12); })
+
+#define _mm256_shuffle_pd(a, b, mask) __extension__ ({ \
+ __m256d __a = (a); \
+ __m256d __b = (b); \
+ (__m256d)__builtin_shufflevector((__v4df)__a, (__v4df)__b, \
+ (mask) & 0x1, \
+ (((mask) & 0x2) >> 1) + 4, \
+ (((mask) & 0x4) >> 2) + 2, \
+ (((mask) & 0x8) >> 3) + 6); })
+
+/* Compare */
+#define _CMP_EQ_OQ 0x00 /* Equal (ordered, non-signaling) */
+#define _CMP_LT_OS 0x01 /* Less-than (ordered, signaling) */
+#define _CMP_LE_OS 0x02 /* Less-than-or-equal (ordered, signaling) */
+#define _CMP_UNORD_Q 0x03 /* Unordered (non-signaling) */
+#define _CMP_NEQ_UQ 0x04 /* Not-equal (unordered, non-signaling) */
+#define _CMP_NLT_US 0x05 /* Not-less-than (unordered, signaling) */
+#define _CMP_NLE_US 0x06 /* Not-less-than-or-equal (unordered, signaling) */
+#define _CMP_ORD_Q 0x07 /* Ordered (nonsignaling) */
+#define _CMP_EQ_UQ 0x08 /* Equal (unordered, non-signaling) */
+#define _CMP_NGE_US 0x09 /* Not-greater-than-or-equal (unord, signaling) */
+#define _CMP_NGT_US 0x0a /* Not-greater-than (unordered, signaling) */
+#define _CMP_FALSE_OQ 0x0b /* False (ordered, non-signaling) */
+#define _CMP_NEQ_OQ 0x0c /* Not-equal (ordered, non-signaling) */
+#define _CMP_GE_OS 0x0d /* Greater-than-or-equal (ordered, signaling) */
+#define _CMP_GT_OS 0x0e /* Greater-than (ordered, signaling) */
+#define _CMP_TRUE_UQ 0x0f /* True (unordered, non-signaling) */
+#define _CMP_EQ_OS 0x10 /* Equal (ordered, signaling) */
+#define _CMP_LT_OQ 0x11 /* Less-than (ordered, non-signaling) */
+#define _CMP_LE_OQ 0x12 /* Less-than-or-equal (ordered, non-signaling) */
+#define _CMP_UNORD_S 0x13 /* Unordered (signaling) */
+#define _CMP_NEQ_US 0x14 /* Not-equal (unordered, signaling) */
+#define _CMP_NLT_UQ 0x15 /* Not-less-than (unordered, non-signaling) */
+#define _CMP_NLE_UQ 0x16 /* Not-less-than-or-equal (unord, non-signaling) */
+#define _CMP_ORD_S 0x17 /* Ordered (signaling) */
+#define _CMP_EQ_US 0x18 /* Equal (unordered, signaling) */
+#define _CMP_NGE_UQ 0x19 /* Not-greater-than-or-equal (unord, non-sign) */
+#define _CMP_NGT_UQ 0x1a /* Not-greater-than (unordered, non-signaling) */
+#define _CMP_FALSE_OS 0x1b /* False (ordered, signaling) */
+#define _CMP_NEQ_OS 0x1c /* Not-equal (ordered, signaling) */
+#define _CMP_GE_OQ 0x1d /* Greater-than-or-equal (ordered, non-signaling) */
+#define _CMP_GT_OQ 0x1e /* Greater-than (ordered, non-signaling) */
+#define _CMP_TRUE_US 0x1f /* True (unordered, signaling) */
+
+#define _mm_cmp_pd(a, b, c) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ (__m128d)__builtin_ia32_cmppd((__v2df)__a, (__v2df)__b, (c)); })
+
+#define _mm_cmp_ps(a, b, c) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_ia32_cmpps((__v4sf)__a, (__v4sf)__b, (c)); })
+
+#define _mm256_cmp_pd(a, b, c) __extension__ ({ \
+ __m256d __a = (a); \
+ __m256d __b = (b); \
+ (__m256d)__builtin_ia32_cmppd256((__v4df)__a, (__v4df)__b, (c)); })
+
+#define _mm256_cmp_ps(a, b, c) __extension__ ({ \
+ __m256 __a = (a); \
+ __m256 __b = (b); \
+ (__m256)__builtin_ia32_cmpps256((__v8sf)__a, (__v8sf)__b, (c)); })
+
+#define _mm_cmp_sd(a, b, c) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ (__m128d)__builtin_ia32_cmpsd((__v2df)__a, (__v2df)__b, (c)); })
+
+#define _mm_cmp_ss(a, b, c) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_ia32_cmpss((__v4sf)__a, (__v4sf)__b, (c)); })
+
+/* Vector extract */
+#define _mm256_extractf128_pd(A, O) __extension__ ({ \
+ __m256d __A = (A); \
+ (__m128d)__builtin_ia32_vextractf128_pd256((__v4df)__A, (O)); })
+
+#define _mm256_extractf128_ps(A, O) __extension__ ({ \
+ __m256 __A = (A); \
+ (__m128)__builtin_ia32_vextractf128_ps256((__v8sf)__A, (O)); })
+
+#define _mm256_extractf128_si256(A, O) __extension__ ({ \
+ __m256i __A = (A); \
+ (__m128i)__builtin_ia32_vextractf128_si256((__v8si)__A, (O)); })
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_extract_epi32(__m256i a, int const imm)
+{
+ __v8si b = (__v8si)a;
+ return b[imm];
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_extract_epi16(__m256i a, int const imm)
+{
+ __v16hi b = (__v16hi)a;
+ return b[imm];
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_extract_epi8(__m256i a, int const imm)
+{
+ __v32qi b = (__v32qi)a;
+ return b[imm];
+}
+
+#ifdef __x86_64__
+static __inline long long __attribute__((__always_inline__, __nodebug__))
+_mm256_extract_epi64(__m256i a, const int imm)
+{
+ __v4di b = (__v4di)a;
+ return b[imm];
+}
+#endif
+
+/* Vector insert */
+#define _mm256_insertf128_pd(V1, V2, O) __extension__ ({ \
+ __m256d __V1 = (V1); \
+ __m128d __V2 = (V2); \
+ (__m256d)__builtin_ia32_vinsertf128_pd256((__v4df)__V1, (__v2df)__V2, (O)); })
+
+#define _mm256_insertf128_ps(V1, V2, O) __extension__ ({ \
+ __m256 __V1 = (V1); \
+ __m128 __V2 = (V2); \
+ (__m256)__builtin_ia32_vinsertf128_ps256((__v8sf)__V1, (__v4sf)__V2, (O)); })
+
+#define _mm256_insertf128_si256(V1, V2, O) __extension__ ({ \
+ __m256i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m256i)__builtin_ia32_vinsertf128_si256((__v8si)__V1, (__v4si)__V2, (O)); })
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insert_epi32(__m256i a, int b, int const imm)
+{
+ __v8si c = (__v8si)a;
+ c[imm & 7] = b;
+ return (__m256i)c;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insert_epi16(__m256i a, int b, int const imm)
+{
+ __v16hi c = (__v16hi)a;
+ c[imm & 15] = b;
+ return (__m256i)c;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insert_epi8(__m256i a, int b, int const imm)
+{
+ __v32qi c = (__v32qi)a;
+ c[imm & 31] = b;
+ return (__m256i)c;
+}
+
+#ifdef __x86_64__
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_insert_epi64(__m256i a, int b, int const imm)
+{
+ __v4di c = (__v4di)a;
+ c[imm & 3] = b;
+ return (__m256i)c;
+}
+#endif
+
+/* Conversion */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi32_pd(__m128i a)
+{
+ return (__m256d)__builtin_ia32_cvtdq2pd256((__v4si) a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtepi32_ps(__m256i a)
+{
+ return (__m256)__builtin_ia32_cvtdq2ps256((__v8si) a);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtpd_ps(__m256d a)
+{
+ return (__m128)__builtin_ia32_cvtpd2ps256((__v4df) a);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtps_epi32(__m256 a)
+{
+ return (__m256i)__builtin_ia32_cvtps2dq256((__v8sf) a);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtps_pd(__m128 a)
+{
+ return (__m256d)__builtin_ia32_cvtps2pd256((__v4sf) a);
+}
+
+static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvttpd_epi32(__m256d a)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq256((__v4df) a);
+}
+
+static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvtpd_epi32(__m256d a)
+{
+ return (__m128i)__builtin_ia32_cvtpd2dq256((__v4df) a);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_cvttps_epi32(__m256 a)
+{
+ return (__m256i)__builtin_ia32_cvttps2dq256((__v8sf) a);
+}
+
+/* Vector replicate */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_movehdup_ps(__m256 a)
+{
+ return __builtin_shufflevector(a, a, 1, 1, 3, 3, 5, 5, 7, 7);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_moveldup_ps(__m256 a)
+{
+ return __builtin_shufflevector(a, a, 0, 0, 2, 2, 4, 4, 6, 6);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_movedup_pd(__m256d a)
+{
+ return __builtin_shufflevector(a, a, 0, 0, 2, 2);
+}
+
+/* Unpack and Interleave */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_pd(__m256d a, __m256d b)
+{
+ return __builtin_shufflevector(a, b, 1, 5, 1+2, 5+2);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_pd(__m256d a, __m256d b)
+{
+ return __builtin_shufflevector(a, b, 0, 4, 0+2, 4+2);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_unpackhi_ps(__m256 a, __m256 b)
+{
+ return __builtin_shufflevector(a, b, 2, 10, 2+1, 10+1, 6, 14, 6+1, 14+1);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_unpacklo_ps(__m256 a, __m256 b)
+{
+ return __builtin_shufflevector(a, b, 0, 8, 0+1, 8+1, 4, 12, 4+1, 12+1);
+}
+
+/* Bit Test */
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testz_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_vtestzpd((__v2df)a, (__v2df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testc_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_vtestcpd((__v2df)a, (__v2df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testnzc_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_vtestnzcpd((__v2df)a, (__v2df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testz_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_vtestzps((__v4sf)a, (__v4sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testc_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_vtestcps((__v4sf)a, (__v4sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm_testnzc_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_vtestnzcps((__v4sf)a, (__v4sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testz_pd(__m256d a, __m256d b)
+{
+ return __builtin_ia32_vtestzpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testc_pd(__m256d a, __m256d b)
+{
+ return __builtin_ia32_vtestcpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testnzc_pd(__m256d a, __m256d b)
+{
+ return __builtin_ia32_vtestnzcpd256((__v4df)a, (__v4df)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testz_ps(__m256 a, __m256 b)
+{
+ return __builtin_ia32_vtestzps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testc_ps(__m256 a, __m256 b)
+{
+ return __builtin_ia32_vtestcps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testnzc_ps(__m256 a, __m256 b)
+{
+ return __builtin_ia32_vtestnzcps256((__v8sf)a, (__v8sf)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testz_si256(__m256i a, __m256i b)
+{
+ return __builtin_ia32_ptestz256((__v4di)a, (__v4di)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testc_si256(__m256i a, __m256i b)
+{
+ return __builtin_ia32_ptestc256((__v4di)a, (__v4di)b);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_testnzc_si256(__m256i a, __m256i b)
+{
+ return __builtin_ia32_ptestnzc256((__v4di)a, (__v4di)b);
+}
+
+/* Vector extract sign mask */
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_movemask_pd(__m256d a)
+{
+ return __builtin_ia32_movmskpd256((__v4df)a);
+}
+
+static __inline int __attribute__((__always_inline__, __nodebug__))
+_mm256_movemask_ps(__m256 a)
+{
+ return __builtin_ia32_movmskps256((__v8sf)a);
+}
+
+/* Vector zero */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_zeroall(void)
+{
+ __builtin_ia32_vzeroall();
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_zeroupper(void)
+{
+ __builtin_ia32_vzeroupper();
+}
+
+/* Vector load with broadcast */
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_broadcast_ss(float const *a)
+{
+ return (__m128)__builtin_ia32_vbroadcastss(a);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcast_sd(double const *a)
+{
+ return (__m256d)__builtin_ia32_vbroadcastsd256(a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcast_ss(float const *a)
+{
+ return (__m256)__builtin_ia32_vbroadcastss256(a);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcast_pd(__m128d const *a)
+{
+ return (__m256d)__builtin_ia32_vbroadcastf128_pd256(a);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_broadcast_ps(__m128 const *a)
+{
+ return (__m256)__builtin_ia32_vbroadcastf128_ps256(a);
+}
+
+/* SIMD load ops */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_load_pd(double const *p)
+{
+ return *(__m256d *)p;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_load_ps(float const *p)
+{
+ return *(__m256 *)p;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu_pd(double const *p)
+{
+ struct __loadu_pd {
+ __m256d v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_pd*)p)->v;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu_ps(float const *p)
+{
+ struct __loadu_ps {
+ __m256 v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_ps*)p)->v;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_load_si256(__m256i const *p)
+{
+ return *p;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu_si256(__m256i const *p)
+{
+ struct __loadu_si256 {
+ __m256i v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_si256*)p)->v;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_lddqu_si256(__m256i const *p)
+{
+ return (__m256i)__builtin_ia32_lddqu256((char const *)p);
+}
+
+/* SIMD store ops */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_store_pd(double *p, __m256d a)
+{
+ *(__m256d *)p = a;
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_store_ps(float *p, __m256 a)
+{
+ *(__m256 *)p = a;
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu_pd(double *p, __m256d a)
+{
+ __builtin_ia32_storeupd256(p, (__v4df)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu_ps(float *p, __m256 a)
+{
+ __builtin_ia32_storeups256(p, (__v8sf)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_store_si256(__m256i *p, __m256i a)
+{
+ *p = a;
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu_si256(__m256i *p, __m256i a)
+{
+ __builtin_ia32_storedqu256((char *)p, (__v32qi)a);
+}
+
+/* Conditional load ops */
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_pd(double const *p, __m128d m)
+{
+ return (__m128d)__builtin_ia32_maskloadpd((const __v2df *)p, (__v2df)m);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_pd(double const *p, __m256d m)
+{
+ return (__m256d)__builtin_ia32_maskloadpd256((const __v4df *)p, (__v4df)m);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_maskload_ps(float const *p, __m128 m)
+{
+ return (__m128)__builtin_ia32_maskloadps((const __v4sf *)p, (__v4sf)m);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_maskload_ps(float const *p, __m256 m)
+{
+ return (__m256)__builtin_ia32_maskloadps256((const __v8sf *)p, (__v8sf)m);
+}
+
+/* Conditional store ops */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_ps(float *p, __m256 m, __m256 a)
+{
+ __builtin_ia32_maskstoreps256((__v8sf *)p, (__v8sf)m, (__v8sf)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_pd(double *p, __m128d m, __m128d a)
+{
+ __builtin_ia32_maskstorepd((__v2df *)p, (__v2df)m, (__v2df)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_maskstore_pd(double *p, __m256d m, __m256d a)
+{
+ __builtin_ia32_maskstorepd256((__v4df *)p, (__v4df)m, (__v4df)a);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm_maskstore_ps(float *p, __m128 m, __m128 a)
+{
+ __builtin_ia32_maskstoreps((__v4sf *)p, (__v4sf)m, (__v4sf)a);
+}
+
+/* Cacheability support ops */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_si256(__m256i *a, __m256i b)
+{
+ __builtin_ia32_movntdq256((__v4di *)a, (__v4di)b);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_pd(double *a, __m256d b)
+{
+ __builtin_ia32_movntpd256(a, (__v4df)b);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_stream_ps(float *p, __m256 a)
+{
+ __builtin_ia32_movntps256(p, (__v8sf)a);
+}
+
+/* Create vectors */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_set_pd(double a, double b, double c, double d)
+{
+ return (__m256d){ d, c, b, a };
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_set_ps(float a, float b, float c, float d,
+ float e, float f, float g, float h)
+{
+ return (__m256){ h, g, f, e, d, c, b, a };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set_epi32(int i0, int i1, int i2, int i3,
+ int i4, int i5, int i6, int i7)
+{
+ return (__m256i)(__v8si){ i7, i6, i5, i4, i3, i2, i1, i0 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set_epi16(short w15, short w14, short w13, short w12,
+ short w11, short w10, short w09, short w08,
+ short w07, short w06, short w05, short w04,
+ short w03, short w02, short w01, short w00)
+{
+ return (__m256i)(__v16hi){ w00, w01, w02, w03, w04, w05, w06, w07,
+ w08, w09, w10, w11, w12, w13, w14, w15 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set_epi8(char b31, char b30, char b29, char b28,
+ char b27, char b26, char b25, char b24,
+ char b23, char b22, char b21, char b20,
+ char b19, char b18, char b17, char b16,
+ char b15, char b14, char b13, char b12,
+ char b11, char b10, char b09, char b08,
+ char b07, char b06, char b05, char b04,
+ char b03, char b02, char b01, char b00)
+{
+ return (__m256i)(__v32qi){
+ b00, b01, b02, b03, b04, b05, b06, b07,
+ b08, b09, b10, b11, b12, b13, b14, b15,
+ b16, b17, b18, b19, b20, b21, b22, b23,
+ b24, b25, b26, b27, b28, b29, b30, b31
+ };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set_epi64x(long long a, long long b, long long c, long long d)
+{
+ return (__m256i)(__v4di){ d, c, b, a };
+}
+
+/* Create vectors with elements in reverse order */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_pd(double a, double b, double c, double d)
+{
+ return (__m256d){ a, b, c, d };
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_ps(float a, float b, float c, float d,
+ float e, float f, float g, float h)
+{
+ return (__m256){ a, b, c, d, e, f, g, h };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_epi32(int i0, int i1, int i2, int i3,
+ int i4, int i5, int i6, int i7)
+{
+ return (__m256i)(__v8si){ i0, i1, i2, i3, i4, i5, i6, i7 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_epi16(short w15, short w14, short w13, short w12,
+ short w11, short w10, short w09, short w08,
+ short w07, short w06, short w05, short w04,
+ short w03, short w02, short w01, short w00)
+{
+ return (__m256i)(__v16hi){ w15, w14, w13, w12, w11, w10, w09, w08,
+ w07, w06, w05, w04, w03, w02, w01, w00 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_epi8(char b31, char b30, char b29, char b28,
+ char b27, char b26, char b25, char b24,
+ char b23, char b22, char b21, char b20,
+ char b19, char b18, char b17, char b16,
+ char b15, char b14, char b13, char b12,
+ char b11, char b10, char b09, char b08,
+ char b07, char b06, char b05, char b04,
+ char b03, char b02, char b01, char b00)
+{
+ return (__m256i)(__v32qi){
+ b31, b30, b29, b28, b27, b26, b25, b24,
+ b23, b22, b21, b20, b19, b18, b17, b16,
+ b15, b14, b13, b12, b11, b10, b09, b08,
+ b07, b06, b05, b04, b03, b02, b01, b00 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setr_epi64x(long long a, long long b, long long c, long long d)
+{
+ return (__m256i)(__v4di){ a, b, c, d };
+}
+
+/* Create vectors with repeated elements */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_pd(double w)
+{
+ return (__m256d){ w, w, w, w };
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_ps(float w)
+{
+ return (__m256){ w, w, w, w, w, w, w, w };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_epi32(int i)
+{
+ return (__m256i)(__v8si){ i, i, i, i, i, i, i, i };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_epi16(short w)
+{
+ return (__m256i)(__v16hi){ w, w, w, w, w, w, w, w, w, w, w, w, w, w, w, w };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_epi8(char b)
+{
+ return (__m256i)(__v32qi){ b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b,
+ b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_set1_epi64x(long long q)
+{
+ return (__m256i)(__v4di){ q, q, q, q };
+}
+
+/* Create zeroed vectors */
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_setzero_pd(void)
+{
+ return (__m256d){ 0, 0, 0, 0 };
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_setzero_ps(void)
+{
+ return (__m256){ 0, 0, 0, 0, 0, 0, 0, 0 };
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_setzero_si256(void)
+{
+ return (__m256i){ 0LL, 0LL, 0LL, 0LL };
+}
+
+/* Cast between vector types */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_castpd_ps(__m256d in)
+{
+ return (__m256)in;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_castpd_si256(__m256d in)
+{
+ return (__m256i)in;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_castps_pd(__m256 in)
+{
+ return (__m256d)in;
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_castps_si256(__m256 in)
+{
+ return (__m256i)in;
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_castsi256_ps(__m256i in)
+{
+ return (__m256)in;
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_castsi256_pd(__m256i in)
+{
+ return (__m256d)in;
+}
+
+static __inline __m128d __attribute__((__always_inline__, __nodebug__))
+_mm256_castpd256_pd128(__m256d in)
+{
+ return __builtin_shufflevector(in, in, 0, 1);
+}
+
+static __inline __m128 __attribute__((__always_inline__, __nodebug__))
+_mm256_castps256_ps128(__m256 in)
+{
+ return __builtin_shufflevector(in, in, 0, 1, 2, 3);
+}
+
+static __inline __m128i __attribute__((__always_inline__, __nodebug__))
+_mm256_castsi256_si128(__m256i in)
+{
+ return __builtin_shufflevector(in, in, 0, 1);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_castpd128_pd256(__m128d in)
+{
+ __m128d zero = _mm_setzero_pd();
+ return __builtin_shufflevector(in, zero, 0, 1, 2, 2);
+}
+
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_castps128_ps256(__m128 in)
+{
+ __m128 zero = _mm_setzero_ps();
+ return __builtin_shufflevector(in, zero, 0, 1, 2, 3, 4, 4, 4, 4);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_castsi128_si256(__m128i in)
+{
+ __m128i zero = _mm_setzero_si128();
+ return __builtin_shufflevector(in, zero, 0, 1, 2, 2);
+}
+
+/* SIMD load ops (unaligned) */
+static __inline __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128(float const *addr_hi, float const *addr_lo)
+{
+ struct __loadu_ps {
+ __m128 v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256 v256 = _mm256_castps128_ps256(((struct __loadu_ps*)addr_lo)->v);
+ return _mm256_insertf128_ps(v256, ((struct __loadu_ps*)addr_hi)->v, 1);
+}
+
+static __inline __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128d(double const *addr_hi, double const *addr_lo)
+{
+ struct __loadu_pd {
+ __m128d v;
+ } __attribute__((__packed__, __may_alias__));
+
+ __m256d v256 = _mm256_castpd128_pd256(((struct __loadu_pd*)addr_lo)->v);
+ return _mm256_insertf128_pd(v256, ((struct __loadu_pd*)addr_hi)->v, 1);
+}
+
+static __inline __m256i __attribute__((__always_inline__, __nodebug__))
+_mm256_loadu2_m128i(__m128i const *addr_hi, __m128i const *addr_lo)
+{
+ struct __loadu_si128 {
+ __m128i v;
+ } __attribute__((packed, may_alias));
+ __m256i v256 = _mm256_castsi128_si256(((struct __loadu_si128*)addr_lo)->v);
+ return _mm256_insertf128_si256(v256, ((struct __loadu_si128*)addr_hi)->v, 1);
+}
+
+/* SIMD store ops (unaligned) */
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128(float *addr_hi, float *addr_lo, __m256 a)
+{
+ __m128 v128;
+
+ v128 = _mm256_castps256_ps128(a);
+ __builtin_ia32_storeups(addr_lo, v128);
+ v128 = _mm256_extractf128_ps(a, 1);
+ __builtin_ia32_storeups(addr_hi, v128);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128d(double *addr_hi, double *addr_lo, __m256d a)
+{
+ __m128d v128;
+
+ v128 = _mm256_castpd256_pd128(a);
+ __builtin_ia32_storeupd(addr_lo, v128);
+ v128 = _mm256_extractf128_pd(a, 1);
+ __builtin_ia32_storeupd(addr_hi, v128);
+}
+
+static __inline void __attribute__((__always_inline__, __nodebug__))
+_mm256_storeu2_m128i(__m128i *addr_hi, __m128i *addr_lo, __m256i a)
+{
+ __m128i v128;
+
+ v128 = _mm256_castsi256_si128(a);
+ __builtin_ia32_storedqu((char *)addr_lo, (__v16qi)v128);
+ v128 = _mm256_extractf128_si256(a, 1);
+ __builtin_ia32_storedqu((char *)addr_hi, (__v16qi)v128);
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
new file mode 100644
index 0000000..c60b0c4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/bmi2intrin.h
@@ -0,0 +1,75 @@
+/*===---- bmi2intrin.h - BMI2 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmi2intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI2__
+# error "BMI2 instruction set not enabled"
+#endif /* __BMI2__ */
+
+#ifndef __BMI2INTRIN_H
+#define __BMI2INTRIN_H
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_bzhi_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bzhi_si(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_pdep_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pdep_si(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_pext_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_pext_si(__X, __Y);
+}
+
+#ifdef __x86_64__
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_bzhi_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bzhi_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_pdep_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pdep_di(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_pext_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_pext_di(__X, __Y);
+}
+
+#endif /* !__x86_64__ */
+
+#endif /* __BMI2INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
new file mode 100644
index 0000000..2f7db73
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/bmiintrin.h
@@ -0,0 +1,115 @@
+/*===---- bmiintrin.h - BMI intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <bmiintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __BMI__
+# error "BMI instruction set not enabled"
+#endif /* __BMI__ */
+
+#ifndef __BMIINTRIN_H
+#define __BMIINTRIN_H
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__tzcnt16(unsigned short __X)
+{
+ return __builtin_ctzs(__X);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__andn_u32(unsigned int __X, unsigned int __Y)
+{
+ return ~__X & __Y;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__bextr_u32(unsigned int __X, unsigned int __Y)
+{
+ return __builtin_ia32_bextr_u32(__X, __Y);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsi_u32(unsigned int __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsmsk_u32(unsigned int __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__blsr_u32(unsigned int __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__tzcnt32(unsigned int __X)
+{
+ return __builtin_ctz(__X);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__andn_u64 (unsigned long long __X, unsigned long long __Y)
+{
+ return ~__X & __Y;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__bextr_u64(unsigned long long __X, unsigned long long __Y)
+{
+ return __builtin_ia32_bextr_u64(__X, __Y);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsi_u64(unsigned long long __X)
+{
+ return __X & -__X;
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsmsk_u64(unsigned long long __X)
+{
+ return __X ^ (__X - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__blsr_u64(unsigned long long __X)
+{
+ return __X & (__X - 1);
+}
+
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__tzcnt64(unsigned long long __X)
+{
+ return __builtin_ctzll(__X);
+}
+#endif
+
+#endif /* __BMIINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/cpuid.h b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
new file mode 100644
index 0000000..05c293f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/cpuid.h
@@ -0,0 +1,33 @@
+/*===---- cpuid.h - X86 cpu model detection --------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !(__x86_64__ || __i386__)
+#error this header is for x86 only
+#endif
+
+static inline int __get_cpuid (unsigned int level, unsigned int *eax,
+ unsigned int *ebx, unsigned int *ecx,
+ unsigned int *edx) {
+ asm("cpuid" : "=a"(*eax), "=b" (*ebx), "=c"(*ecx), "=d"(*edx) : "0"(level));
+ return 1;
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/emmintrin.h b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
new file mode 100644
index 0000000..e10b77d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/emmintrin.h
@@ -0,0 +1,1424 @@
+/*===---- emmintrin.h - SSE2 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __EMMINTRIN_H
+#define __EMMINTRIN_H
+
+#ifndef __SSE2__
+#error "SSE2 instruction set not enabled"
+#else
+
+#include <xmmintrin.h>
+
+typedef double __m128d __attribute__((__vector_size__(16)));
+typedef long long __m128i __attribute__((__vector_size__(16)));
+
+/* Type defines. */
+typedef double __v2df __attribute__ ((__vector_size__ (16)));
+typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef short __v8hi __attribute__((__vector_size__(16)));
+typedef char __v16qi __attribute__((__vector_size__(16)));
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_add_sd(__m128d a, __m128d b)
+{
+ a[0] += b[0];
+ return a;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_add_pd(__m128d a, __m128d b)
+{
+ return a + b;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_sub_sd(__m128d a, __m128d b)
+{
+ a[0] -= b[0];
+ return a;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_sub_pd(__m128d a, __m128d b)
+{
+ return a - b;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_mul_sd(__m128d a, __m128d b)
+{
+ a[0] *= b[0];
+ return a;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_mul_pd(__m128d a, __m128d b)
+{
+ return a * b;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_div_sd(__m128d a, __m128d b)
+{
+ a[0] /= b[0];
+ return a;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_div_pd(__m128d a, __m128d b)
+{
+ return a / b;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_sqrt_sd(__m128d a, __m128d b)
+{
+ __m128d c = __builtin_ia32_sqrtsd(b);
+ return (__m128d) { c[0], a[1] };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_sqrt_pd(__m128d a)
+{
+ return __builtin_ia32_sqrtpd(a);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_min_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_minsd(a, b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_min_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_minpd(a, b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_max_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_maxsd(a, b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_max_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_maxpd(a, b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_and_pd(__m128d a, __m128d b)
+{
+ return (__m128d)((__v4si)a & (__v4si)b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_andnot_pd(__m128d a, __m128d b)
+{
+ return (__m128d)(~(__v4si)a & (__v4si)b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_or_pd(__m128d a, __m128d b)
+{
+ return (__m128d)((__v4si)a | (__v4si)b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_xor_pd(__m128d a, __m128d b)
+{
+ return (__m128d)((__v4si)a ^ (__v4si)b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 0);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 1);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmple_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 2);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(b, a, 1);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpge_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(b, a, 2);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpord_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 7);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpunord_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 3);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpneq_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 4);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnlt_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 5);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnle_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(a, b, 6);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpngt_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(b, a, 5);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnge_pd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmppd(b, a, 6);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 0);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 1);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmple_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 2);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(b, a, 1);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpge_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(b, a, 2);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpord_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 7);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpunord_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 3);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpneq_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 4);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnlt_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 5);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnle_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(a, b, 6);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpngt_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(b, a, 5);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnge_sd(__m128d a, __m128d b)
+{
+ return (__m128d)__builtin_ia32_cmpsd(b, a, 6);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comieq_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdeq(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comilt_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdlt(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comile_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdle(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comigt_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdgt(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comige_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdge(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comineq_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_comisdneq(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomieq_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdeq(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomilt_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdlt(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomile_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdle(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomigt_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdgt(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomige_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdge(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomineq_sd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_ucomisdneq(a, b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpd_ps(__m128d a)
+{
+ return __builtin_ia32_cvtpd2ps(a);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_pd(__m128 a)
+{
+ return __builtin_ia32_cvtps2pd(a);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi32_pd(__m128i a)
+{
+ return __builtin_ia32_cvtdq2pd((__v4si)a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpd_epi32(__m128d a)
+{
+ return __builtin_ia32_cvtpd2dq(a);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsd_si32(__m128d a)
+{
+ return __builtin_ia32_cvtsd2si(a);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsd_ss(__m128 a, __m128d b)
+{
+ a[0] = b[0];
+ return a;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi32_sd(__m128d a, int b)
+{
+ a[0] = b;
+ return a;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtss_sd(__m128d a, __m128 b)
+{
+ a[0] = b[0];
+ return a;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvttpd_epi32(__m128d a)
+{
+ return (__m128i)__builtin_ia32_cvttpd2dq(a);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_cvttsd_si32(__m128d a)
+{
+ return a[0];
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpd_pi32(__m128d a)
+{
+ return (__m64)__builtin_ia32_cvtpd2pi(a);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvttpd_pi32(__m128d a)
+{
+ return (__m64)__builtin_ia32_cvttpd2pi(a);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi32_pd(__m64 a)
+{
+ return __builtin_ia32_cvtpi2pd((__v2si)a);
+}
+
+static __inline__ double __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsd_f64(__m128d a)
+{
+ return a[0];
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_load_pd(double const *dp)
+{
+ return *(__m128d*)dp;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_load1_pd(double const *dp)
+{
+ struct __mm_load1_pd_struct {
+ double u;
+ } __attribute__((__packed__, __may_alias__));
+ double u = ((struct __mm_load1_pd_struct*)dp)->u;
+ return (__m128d){ u, u };
+}
+
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loadr_pd(double const *dp)
+{
+ __m128d u = *(__m128d*)dp;
+ return __builtin_shufflevector(u, u, 1, 0);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loadu_pd(double const *dp)
+{
+ struct __loadu_pd {
+ __m128d v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_pd*)dp)->v;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_load_sd(double const *dp)
+{
+ struct __mm_load_sd_struct {
+ double u;
+ } __attribute__((__packed__, __may_alias__));
+ double u = ((struct __mm_load_sd_struct*)dp)->u;
+ return (__m128d){ u, 0 };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loadh_pd(__m128d a, double const *dp)
+{
+ struct __mm_loadh_pd_struct {
+ double u;
+ } __attribute__((__packed__, __may_alias__));
+ double u = ((struct __mm_loadh_pd_struct*)dp)->u;
+ return (__m128d){ a[0], u };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_loadl_pd(__m128d a, double const *dp)
+{
+ struct __mm_loadl_pd_struct {
+ double u;
+ } __attribute__((__packed__, __may_alias__));
+ double u = ((struct __mm_loadl_pd_struct*)dp)->u;
+ return (__m128d){ u, a[1] };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_set_sd(double w)
+{
+ return (__m128d){ w, 0 };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_set1_pd(double w)
+{
+ return (__m128d){ w, w };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_set_pd(double w, double x)
+{
+ return (__m128d){ x, w };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_setr_pd(double w, double x)
+{
+ return (__m128d){ w, x };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_setzero_pd(void)
+{
+ return (__m128d){ 0, 0 };
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_move_sd(__m128d a, __m128d b)
+{
+ return (__m128d){ b[0], a[1] };
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_store_sd(double *dp, __m128d a)
+{
+ struct __mm_store_sd_struct {
+ double u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_sd_struct*)dp)->u = a[0];
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_store1_pd(double *dp, __m128d a)
+{
+ struct __mm_store1_pd_struct {
+ double u[2];
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store1_pd_struct*)dp)->u[0] = a[0];
+ ((struct __mm_store1_pd_struct*)dp)->u[1] = a[0];
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_store_pd(double *dp, __m128d a)
+{
+ *(__m128d *)dp = a;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_storeu_pd(double *dp, __m128d a)
+{
+ __builtin_ia32_storeupd(dp, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_storer_pd(double *dp, __m128d a)
+{
+ a = __builtin_shufflevector(a, a, 1, 0);
+ *(__m128d *)dp = a;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_storeh_pd(double *dp, __m128d a)
+{
+ struct __mm_storeh_pd_struct {
+ double u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pd_struct*)dp)->u = a[1];
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_storel_pd(double *dp, __m128d a)
+{
+ struct __mm_storeh_pd_struct {
+ double u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_storeh_pd_struct*)dp)->u = a[0];
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_add_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)((__v16qi)a + (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_add_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)((__v8hi)a + (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_add_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)((__v4si)a + (__v4si)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_add_si64(__m64 a, __m64 b)
+{
+ return a + b;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_add_epi64(__m128i a, __m128i b)
+{
+ return a + b;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_adds_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_paddsb128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_adds_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_paddsw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_adds_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_paddusb128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_adds_epu16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_paddusw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_avg_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pavgb128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_avg_epu16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pavgw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_madd_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_max_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_max_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmaxub128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_min_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pminsw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_min_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pminub128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mulhi_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmulhw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mulhi_epu16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mullo_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)((__v8hi)a * (__v8hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mul_su32(__m64 a, __m64 b)
+{
+ return __builtin_ia32_pmuludq((__v2si)a, (__v2si)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mul_epu32(__m128i a, __m128i b)
+{
+ return __builtin_ia32_pmuludq128((__v4si)a, (__v4si)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sad_epu8(__m128i a, __m128i b)
+{
+ return __builtin_ia32_psadbw128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sub_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)((__v16qi)a - (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sub_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)((__v8hi)a - (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sub_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)((__v4si)a - (__v4si)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_si64(__m64 a, __m64 b)
+{
+ return a - b;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sub_epi64(__m128i a, __m128i b)
+{
+ return a - b;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_subs_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubsb128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_subs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubsw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_subs_epu8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubusb128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_subs_epu16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psubusw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_and_si128(__m128i a, __m128i b)
+{
+ return a & b;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_andnot_si128(__m128i a, __m128i b)
+{
+ return ~a & b;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_or_si128(__m128i a, __m128i b)
+{
+ return a | b;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_xor_si128(__m128i a, __m128i b)
+{
+ return a ^ b;
+}
+
+#define _mm_slli_si128(a, count) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_ia32_pslldqi128(__a, (count)*8); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_slli_epi16(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psllwi128((__v8hi)a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sll_epi16(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psllw128((__v8hi)a, (__v8hi)count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_slli_epi32(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_pslldi128((__v4si)a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sll_epi32(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_pslld128((__v4si)a, (__v4si)count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_slli_epi64(__m128i a, int count)
+{
+ return __builtin_ia32_psllqi128(a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sll_epi64(__m128i a, __m128i count)
+{
+ return __builtin_ia32_psllq128(a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srai_epi16(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psrawi128((__v8hi)a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sra_epi16(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psraw128((__v8hi)a, (__v8hi)count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srai_epi32(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psradi128((__v4si)a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sra_epi32(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psrad128((__v4si)a, (__v4si)count);
+}
+
+
+#define _mm_srli_si128(a, count) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_ia32_psrldqi128(__a, (count)*8); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srli_epi16(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psrlwi128((__v8hi)a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srl_epi16(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psrlw128((__v8hi)a, (__v8hi)count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srli_epi32(__m128i a, int count)
+{
+ return (__m128i)__builtin_ia32_psrldi128((__v4si)a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srl_epi32(__m128i a, __m128i count)
+{
+ return (__m128i)__builtin_ia32_psrld128((__v4si)a, (__v4si)count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srli_epi64(__m128i a, int count)
+{
+ return __builtin_ia32_psrlqi128(a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_srl_epi64(__m128i a, __m128i count)
+{
+ return __builtin_ia32_psrlq128(a, count);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)((__v16qi)a == (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)((__v8hi)a == (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)((__v4si)a == (__v4si)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_epi8(__m128i a, __m128i b)
+{
+ /* This function always performs a signed comparison, but __v16qi is a char
+ which may be signed or unsigned. */
+ typedef signed char __v16qs __attribute__((__vector_size__(16)));
+ return (__m128i)((__v16qs)a > (__v16qs)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)((__v8hi)a > (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)((__v4si)a > (__v4si)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_epi8(__m128i a, __m128i b)
+{
+ return _mm_cmpgt_epi8(b,a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_epi16(__m128i a, __m128i b)
+{
+ return _mm_cmpgt_epi16(b,a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_epi32(__m128i a, __m128i b)
+{
+ return _mm_cmpgt_epi32(b,a);
+}
+
+#ifdef __x86_64__
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_sd(__m128d a, long long b)
+{
+ a[0] = b;
+ return a;
+}
+
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsd_si64(__m128d a)
+{
+ return __builtin_ia32_cvtsd2si64(a);
+}
+
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvttsd_si64(__m128d a)
+{
+ return a[0];
+}
+#endif
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi32_ps(__m128i a)
+{
+ return __builtin_ia32_cvtdq2ps((__v4si)a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_epi32(__m128 a)
+{
+ return (__m128i)__builtin_ia32_cvtps2dq(a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvttps_epi32(__m128 a)
+{
+ return (__m128i)__builtin_ia32_cvttps2dq(a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi32_si128(int a)
+{
+ return (__m128i)(__v4si){ a, 0, 0, 0 };
+}
+
+#ifdef __x86_64__
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_si128(long long a)
+{
+ return (__m128i){ a, 0 };
+}
+#endif
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi128_si32(__m128i a)
+{
+ __v4si b = (__v4si)a;
+ return b[0];
+}
+
+#ifdef __x86_64__
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi128_si64(__m128i a)
+{
+ return a[0];
+}
+#endif
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_load_si128(__m128i const *p)
+{
+ return *p;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_loadu_si128(__m128i const *p)
+{
+ struct __loadu_si128 {
+ __m128i v;
+ } __attribute__((packed, may_alias));
+ return ((struct __loadu_si128*)p)->v;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_loadl_epi64(__m128i const *p)
+{
+ struct __mm_loadl_epi64_struct {
+ long long u;
+ } __attribute__((__packed__, __may_alias__));
+ return (__m128i) { ((struct __mm_loadl_epi64_struct*)p)->u, 0};
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi64x(long long q1, long long q0)
+{
+ return (__m128i){ q0, q1 };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi64(__m64 q1, __m64 q0)
+{
+ return (__m128i){ (long long)q0, (long long)q1 };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi32(int i3, int i2, int i1, int i0)
+{
+ return (__m128i)(__v4si){ i0, i1, i2, i3};
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi16(short w7, short w6, short w5, short w4, short w3, short w2, short w1, short w0)
+{
+ return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
+{
+ return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi64x(long long q)
+{
+ return (__m128i){ q, q };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi64(__m64 q)
+{
+ return (__m128i){ (long long)q, (long long)q };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi32(int i)
+{
+ return (__m128i)(__v4si){ i, i, i, i };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi16(short w)
+{
+ return (__m128i)(__v8hi){ w, w, w, w, w, w, w, w };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_set1_epi8(char b)
+{
+ return (__m128i)(__v16qi){ b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setr_epi64(__m64 q0, __m64 q1)
+{
+ return (__m128i){ (long long)q0, (long long)q1 };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setr_epi32(int i0, int i1, int i2, int i3)
+{
+ return (__m128i)(__v4si){ i0, i1, i2, i3};
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setr_epi16(short w0, short w1, short w2, short w3, short w4, short w5, short w6, short w7)
+{
+ return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setr_epi8(char b0, char b1, char b2, char b3, char b4, char b5, char b6, char b7, char b8, char b9, char b10, char b11, char b12, char b13, char b14, char b15)
+{
+ return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_setzero_si128(void)
+{
+ return (__m128i){ 0LL, 0LL };
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_store_si128(__m128i *p, __m128i b)
+{
+ *p = b;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_storeu_si128(__m128i *p, __m128i b)
+{
+ __builtin_ia32_storedqu((char *)p, (__v16qi)b);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskmoveu_si128(__m128i d, __m128i n, char *p)
+{
+ __builtin_ia32_maskmovdqu((__v16qi)d, (__v16qi)n, p);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_storel_epi64(__m128i *p, __m128i a)
+{
+ __builtin_ia32_storelv4si((__v2si *)p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_pd(double *p, __m128d a)
+{
+ __builtin_ia32_movntpd(p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_si128(__m128i *p, __m128i a)
+{
+ __builtin_ia32_movntdq(p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_si32(int *p, int a)
+{
+ __builtin_ia32_movnti(p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_clflush(void const *p)
+{
+ __builtin_ia32_clflush(p);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_lfence(void)
+{
+ __builtin_ia32_lfence();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_mfence(void)
+{
+ __builtin_ia32_mfence();
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_packs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_packsswb128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_packs_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_packssdw128((__v4si)a, (__v4si)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_packus_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_packuswb128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_extract_epi16(__m128i a, int imm)
+{
+ __v8hi b = (__v8hi)a;
+ return (unsigned short)b[imm];
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_insert_epi16(__m128i a, int b, int imm)
+{
+ __v8hi c = (__v8hi)a;
+ c[imm & 7] = b;
+ return (__m128i)c;
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_movemask_epi8(__m128i a)
+{
+ return __builtin_ia32_pmovmskb128((__v16qi)a);
+}
+
+#define _mm_shuffle_epi32(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si) _mm_set1_epi32(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6); })
+
+#define _mm_shufflelo_epi16(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
+ (imm) & 0x3, ((imm) & 0xc) >> 2, \
+ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \
+ 4, 5, 6, 7); })
+
+#define _mm_shufflehi_epi16(a, imm) __extension__ ({ \
+ __m128i __a = (a); \
+ (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi) _mm_set1_epi16(0), \
+ 0, 1, 2, 3, \
+ 4 + (((imm) & 0x03) >> 0), \
+ 4 + (((imm) & 0x0c) >> 2), \
+ 4 + (((imm) & 0x30) >> 4), \
+ 4 + (((imm) & 0xc0) >> 6)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)a, (__v16qi)b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)a, (__v8hi)b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)a, (__v4si)b, 2, 4+2, 3, 4+3);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_epi64(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector(a, b, 1, 2+1);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v16qi)a, (__v16qi)b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v8hi)a, (__v8hi)b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector((__v4si)a, (__v4si)b, 0, 4+0, 1, 4+1);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_epi64(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_shufflevector(a, b, 0, 2+0);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_movepi64_pi64(__m128i a)
+{
+ return (__m64)a[0];
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_movpi64_pi64(__m64 a)
+{
+ return (__m128i){ (long long)a, 0 };
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_move_epi64(__m128i a)
+{
+ return __builtin_shufflevector(a, (__m128i){ 0 }, 0, 2);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_pd(__m128d a, __m128d b)
+{
+ return __builtin_shufflevector(a, b, 1, 2+1);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_pd(__m128d a, __m128d b)
+{
+ return __builtin_shufflevector(a, b, 0, 2+0);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_movemask_pd(__m128d a)
+{
+ return __builtin_ia32_movmskpd(a);
+}
+
+#define _mm_shuffle_pd(a, b, i) __extension__ ({ \
+ __m128d __a = (a); \
+ __m128d __b = (b); \
+ __builtin_shufflevector(__a, __b, (i) & 1, (((i) & 2) >> 1) + 2); })
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_castpd_ps(__m128d in)
+{
+ return (__m128)in;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_castpd_si128(__m128d in)
+{
+ return (__m128i)in;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_castps_pd(__m128 in)
+{
+ return (__m128d)in;
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_castps_si128(__m128 in)
+{
+ return (__m128i)in;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_castsi128_ps(__m128i in)
+{
+ return (__m128)in;
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_castsi128_pd(__m128i in)
+{
+ return (__m128d)in;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_pause(void)
+{
+ __asm__ volatile ("pause");
+}
+
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
+
+#endif /* __SSE2__ */
+
+#endif /* __EMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/float.h b/contrib/llvm/tools/clang/lib/Headers/float.h
new file mode 100644
index 0000000..65b517d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/float.h
@@ -0,0 +1,124 @@
+/*===---- float.h - Characteristics of floating point types ----------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __FLOAT_H
+#define __FLOAT_H
+
+/* If we're on MinGW, fall back to the system's float.h, which might have
+ * additional definitions provided for Windows.
+ * For more details see http://msdn.microsoft.com/en-us/library/y0ybw9fy.aspx
+ */
+#if defined(__MINGW32__) && \
+ defined(__has_include_next) && __has_include_next(<float.h>)
+# include_next <float.h>
+
+/* Undefine anything that we'll be redefining below. */
+# undef FLT_EVAL_METHOD
+# undef FLT_ROUNDS
+# undef FLT_RADIX
+# undef FLT_MANT_DIG
+# undef DBL_MANT_DIG
+# undef LDBL_MANT_DIG
+# undef DECIMAL_DIG
+# undef FLT_DIG
+# undef DBL_DIG
+# undef LDBL_DIG
+# undef FLT_MIN_EXP
+# undef DBL_MIN_EXP
+# undef LDBL_MIN_EXP
+# undef FLT_MIN_10_EXP
+# undef DBL_MIN_10_EXP
+# undef LDBL_MIN_10_EXP
+# undef FLT_MAX_EXP
+# undef DBL_MAX_EXP
+# undef LDBL_MAX_EXP
+# undef FLT_MAX_10_EXP
+# undef DBL_MAX_10_EXP
+# undef LDBL_MAX_10_EXP
+# undef FLT_MAX
+# undef DBL_MAX
+# undef LDBL_MAX
+# undef FLT_EPSILON
+# undef DBL_EPSILON
+# undef LDBL_EPSILON
+# undef FLT_MIN
+# undef DBL_MIN
+# undef LDBL_MIN
+# if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# undef FLT_TRUE_MIN
+# undef DBL_TRUE_MIN
+# undef LDBL_TRUE_MIN
+# endif
+#endif
+
+/* Characteristics of floating point types, C99 5.2.4.2.2 */
+
+#define FLT_EVAL_METHOD __FLT_EVAL_METHOD__
+#define FLT_ROUNDS (__builtin_flt_rounds())
+#define FLT_RADIX __FLT_RADIX__
+
+#define FLT_MANT_DIG __FLT_MANT_DIG__
+#define DBL_MANT_DIG __DBL_MANT_DIG__
+#define LDBL_MANT_DIG __LDBL_MANT_DIG__
+
+#define DECIMAL_DIG __DECIMAL_DIG__
+
+#define FLT_DIG __FLT_DIG__
+#define DBL_DIG __DBL_DIG__
+#define LDBL_DIG __LDBL_DIG__
+
+#define FLT_MIN_EXP __FLT_MIN_EXP__
+#define DBL_MIN_EXP __DBL_MIN_EXP__
+#define LDBL_MIN_EXP __LDBL_MIN_EXP__
+
+#define FLT_MIN_10_EXP __FLT_MIN_10_EXP__
+#define DBL_MIN_10_EXP __DBL_MIN_10_EXP__
+#define LDBL_MIN_10_EXP __LDBL_MIN_10_EXP__
+
+#define FLT_MAX_EXP __FLT_MAX_EXP__
+#define DBL_MAX_EXP __DBL_MAX_EXP__
+#define LDBL_MAX_EXP __LDBL_MAX_EXP__
+
+#define FLT_MAX_10_EXP __FLT_MAX_10_EXP__
+#define DBL_MAX_10_EXP __DBL_MAX_10_EXP__
+#define LDBL_MAX_10_EXP __LDBL_MAX_10_EXP__
+
+#define FLT_MAX __FLT_MAX__
+#define DBL_MAX __DBL_MAX__
+#define LDBL_MAX __LDBL_MAX__
+
+#define FLT_EPSILON __FLT_EPSILON__
+#define DBL_EPSILON __DBL_EPSILON__
+#define LDBL_EPSILON __LDBL_EPSILON__
+
+#define FLT_MIN __FLT_MIN__
+#define DBL_MIN __DBL_MIN__
+#define LDBL_MIN __LDBL_MIN__
+
+#if __STDC_VERSION__ >= 201112L || !defined(__STRICT_ANSI__)
+# define FLT_TRUE_MIN __FLT_DENORM_MIN__
+# define DBL_TRUE_MIN __DBL_DENORM_MIN__
+# define LDBL_TRUE_MIN __LDBL_DENORM_MIN__
+#endif
+
+#endif /* __FLOAT_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h b/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h
new file mode 100644
index 0000000..c30920d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/fma4intrin.h
@@ -0,0 +1,231 @@
+/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __FMA4INTRIN_H
+#define __FMA4INTRIN_H
+
+#ifndef __FMA4__
+# error "FMA4 instruction set is not enabled"
+#else
+
+#include <pmmintrin.h>
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C)
+{
+ return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C)
+{
+ return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C);
+}
+
+static __inline__ __m256 __attribute__((__always_inline__, __nodebug__))
+_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C)
+{
+ return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C);
+}
+
+static __inline__ __m256d __attribute__((__always_inline__, __nodebug__))
+_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C)
+{
+ return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C);
+}
+
+#endif /* __FMA4__ */
+
+#endif /* __FMA4INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/immintrin.h b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
new file mode 100644
index 0000000..1605525
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/immintrin.h
@@ -0,0 +1,75 @@
+/*===---- immintrin.h - Intel intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __IMMINTRIN_H
+#define __IMMINTRIN_H
+
+#ifdef __MMX__
+#include <mmintrin.h>
+#endif
+
+#ifdef __SSE__
+#include <xmmintrin.h>
+#endif
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+
+#ifdef __SSE3__
+#include <pmmintrin.h>
+#endif
+
+#ifdef __SSSE3__
+#include <tmmintrin.h>
+#endif
+
+#if defined (__SSE4_2__) || defined (__SSE4_1__)
+#include <smmintrin.h>
+#endif
+
+#if defined (__AES__)
+#include <wmmintrin.h>
+#endif
+
+#ifdef __AVX__
+#include <avxintrin.h>
+#endif
+
+#ifdef __AVX2__
+#include <avx2intrin.h>
+#endif
+
+#ifdef __BMI__
+#include <bmiintrin.h>
+#endif
+
+#ifdef __BMI2__
+#include <bmi2intrin.h>
+#endif
+
+#ifdef __LZCNT__
+#include <lzcntintrin.h>
+#endif
+
+#endif /* __IMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/iso646.h b/contrib/llvm/tools/clang/lib/Headers/iso646.h
new file mode 100644
index 0000000..dca13c5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/iso646.h
@@ -0,0 +1,43 @@
+/*===---- iso646.h - Standard header for alternate spellings of operators---===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __ISO646_H
+#define __ISO646_H
+
+#ifndef __cplusplus
+#define and &&
+#define and_eq &=
+#define bitand &
+#define bitor |
+#define compl ~
+#define not !
+#define not_eq !=
+#define or ||
+#define or_eq |=
+#define xor ^
+#define xor_eq ^=
+#endif
+
+#endif /* __ISO646_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/limits.h b/contrib/llvm/tools/clang/lib/Headers/limits.h
new file mode 100644
index 0000000..ecd09a4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/limits.h
@@ -0,0 +1,117 @@
+/*===---- limits.h - Standard header for integer sizes --------------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_LIMITS_H
+#define __CLANG_LIMITS_H
+
+/* The system's limits.h may, in turn, try to #include_next GCC's limits.h.
+ Avert this #include_next madness. */
+#if defined __GNUC__ && !defined _GCC_LIMITS_H_
+#define _GCC_LIMITS_H_
+#endif
+
+/* System headers include a number of constants from POSIX in <limits.h>.
+ Include it if we're hosted. */
+#if __STDC_HOSTED__ && \
+ defined(__has_include_next) && __has_include_next(<limits.h>)
+#include_next <limits.h>
+#endif
+
+/* Many system headers try to "help us out" by defining these. No really, we
+ know how big each datatype is. */
+#undef SCHAR_MIN
+#undef SCHAR_MAX
+#undef UCHAR_MAX
+#undef SHRT_MIN
+#undef SHRT_MAX
+#undef USHRT_MAX
+#undef INT_MIN
+#undef INT_MAX
+#undef UINT_MAX
+#undef LONG_MIN
+#undef LONG_MAX
+#undef ULONG_MAX
+
+#undef CHAR_BIT
+#undef CHAR_MIN
+#undef CHAR_MAX
+
+/* C90/99 5.2.4.2.1 */
+#define SCHAR_MAX __SCHAR_MAX__
+#define SHRT_MAX __SHRT_MAX__
+#define INT_MAX __INT_MAX__
+#define LONG_MAX __LONG_MAX__
+
+#define SCHAR_MIN (-__SCHAR_MAX__-1)
+#define SHRT_MIN (-__SHRT_MAX__ -1)
+#define INT_MIN (-__INT_MAX__ -1)
+#define LONG_MIN (-__LONG_MAX__ -1L)
+
+#define UCHAR_MAX (__SCHAR_MAX__*2 +1)
+#define USHRT_MAX (__SHRT_MAX__ *2 +1)
+#define UINT_MAX (__INT_MAX__ *2U +1U)
+#define ULONG_MAX (__LONG_MAX__ *2UL+1UL)
+
+#ifndef MB_LEN_MAX
+#define MB_LEN_MAX 1
+#endif
+
+#define CHAR_BIT __CHAR_BIT__
+
+#ifdef __CHAR_UNSIGNED__ /* -funsigned-char */
+#define CHAR_MIN 0
+#define CHAR_MAX UCHAR_MAX
+#else
+#define CHAR_MIN SCHAR_MIN
+#define CHAR_MAX __SCHAR_MAX__
+#endif
+
+/* C99 5.2.4.2.1: Added long long. */
+#if __STDC_VERSION__ >= 199901
+
+#undef LLONG_MIN
+#undef LLONG_MAX
+#undef ULLONG_MAX
+
+#define LLONG_MAX __LONG_LONG_MAX__
+#define LLONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULLONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+/* LONG_LONG_MIN/LONG_LONG_MAX/ULONG_LONG_MAX are a GNU extension. It's too bad
+ that we don't have something like #pragma poison that could be used to
+ deprecate a macro - the code should just use LLONG_MAX and friends.
+ */
+#if defined(__GNU_LIBRARY__) ? defined(__USE_GNU) : !defined(__STRICT_ANSI__)
+
+#undef LONG_LONG_MIN
+#undef LONG_LONG_MAX
+#undef ULONG_LONG_MAX
+
+#define LONG_LONG_MAX __LONG_LONG_MAX__
+#define LONG_LONG_MIN (-__LONG_LONG_MAX__-1LL)
+#define ULONG_LONG_MAX (__LONG_LONG_MAX__*2ULL+1ULL)
+#endif
+
+#endif /* __CLANG_LIMITS_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h b/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h
new file mode 100644
index 0000000..62ab5ca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/lzcntintrin.h
@@ -0,0 +1,55 @@
+/*===---- lzcntintrin.h - LZCNT intrinsics ---------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined __X86INTRIN_H && !defined __IMMINTRIN_H
+#error "Never use <lzcntintrin.h> directly; include <x86intrin.h> instead."
+#endif
+
+#ifndef __LZCNT__
+# error "LZCNT instruction is not enabled"
+#endif /* __LZCNT__ */
+
+#ifndef __LZCNTINTRIN_H
+#define __LZCNTINTRIN_H
+
+static __inline__ unsigned short __attribute__((__always_inline__, __nodebug__))
+__lzcnt16(unsigned short __X)
+{
+ return __builtin_clzs(__X);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+__lzcnt32(unsigned int __X)
+{
+ return __builtin_clz(__X);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+__lzcnt64(unsigned long long __X)
+{
+ return __builtin_clzll(__X);
+}
+#endif
+
+#endif /* __LZCNTINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h b/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
new file mode 100644
index 0000000..d5236f8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/mm3dnow.h
@@ -0,0 +1,161 @@
+/*===---- mm3dnow.h - 3DNow! intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _MM3DNOW_H_INCLUDED
+#define _MM3DNOW_H_INCLUDED
+
+#include <mmintrin.h>
+
+typedef float __v2sf __attribute__((__vector_size__(8)));
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_m_femms() {
+ __builtin_ia32_femms();
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pavgusb(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pavgusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pf2id(__m64 __m) {
+ return (__m64)__builtin_ia32_pf2id((__v2sf)__m);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfadd(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfadd((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfcmpeq(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpeq((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfcmpge(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpge((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfcmpgt(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfcmpgt((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfmax(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmax((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfmin(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmin((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfmul(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfmul((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfrcp(__m64 __m) {
+ return (__m64)__builtin_ia32_pfrcp((__v2sf)__m);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfrcpit1(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrcpit1((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfrcpit2(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrcpit2((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfrsqrt(__m64 __m) {
+ return (__m64)__builtin_ia32_pfrsqrt((__v2sf)__m);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfrsqrtit1(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfrsqit1((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfsub(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfsub((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfsubr(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfsubr((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pi2fd(__m64 __m) {
+ return (__m64)__builtin_ia32_pi2fd((__v2si)__m);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pmulhrw(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pmulhrw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pf2iw(__m64 __m) {
+ return (__m64)__builtin_ia32_pf2iw((__v2sf)__m);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfnacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfnacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pfpnacc(__m64 __m1, __m64 __m2) {
+ return (__m64)__builtin_ia32_pfpnacc((__v2sf)__m1, (__v2sf)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pi2fw(__m64 __m) {
+ return (__m64)__builtin_ia32_pi2fw((__v2si)__m);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pswapdsf(__m64 __m) {
+ return (__m64)__builtin_ia32_pswapdsf((__v2sf)__m);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_m_pswapdsi(__m64 __m) {
+ return (__m64)__builtin_ia32_pswapdsi((__v2si)__m);
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h b/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h
new file mode 100644
index 0000000..5fa1761
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/mm_malloc.h
@@ -0,0 +1,75 @@
+/*===---- mm_malloc.h - Allocating and Freeing Aligned Memory Blocks -------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MM_MALLOC_H
+#define __MM_MALLOC_H
+
+#include <stdlib.h>
+
+#ifdef _WIN32
+#include <malloc.h>
+#else
+#ifndef __cplusplus
+extern int posix_memalign(void **memptr, size_t alignment, size_t size);
+#else
+// Some systems (e.g. those with GNU libc) declare posix_memalign with an
+// exception specifier. Via an "egregious workaround" in
+// Sema::CheckEquivalentExceptionSpec, Clang accepts the following as a valid
+// redeclaration of glibc's declaration.
+extern "C" int posix_memalign(void **memptr, size_t alignment, size_t size);
+#endif
+#endif
+
+#if !(defined(_WIN32) && defined(_mm_malloc))
+static __inline__ void *__attribute__((__always_inline__, __nodebug__,
+ __malloc__))
+_mm_malloc(size_t size, size_t align)
+{
+ if (align == 1) {
+ return malloc(size);
+ }
+
+ if (!(align & (align - 1)) && align < sizeof(void *))
+ align = sizeof(void *);
+
+ void *mallocedMemory;
+#if defined(__MINGW32__)
+ mallocedMemory = __mingw_aligned_malloc(size, align);
+#elif defined(_WIN32)
+ mallocedMemory = _aligned_malloc(size, align);
+#else
+ if (posix_memalign(&mallocedMemory, align, size))
+ return 0;
+#endif
+
+ return mallocedMemory;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_free(void *p)
+{
+ free(p);
+}
+#endif
+
+#endif /* __MM_MALLOC_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/mmintrin.h b/contrib/llvm/tools/clang/lib/Headers/mmintrin.h
new file mode 100644
index 0000000..986870a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/mmintrin.h
@@ -0,0 +1,503 @@
+/*===---- mmintrin.h - MMX intrinsics --------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __MMINTRIN_H
+#define __MMINTRIN_H
+
+#ifndef __MMX__
+#error "MMX instruction set not enabled"
+#else
+
+typedef long long __m64 __attribute__((__vector_size__(8)));
+
+typedef int __v2si __attribute__((__vector_size__(8)));
+typedef short __v4hi __attribute__((__vector_size__(8)));
+typedef char __v8qi __attribute__((__vector_size__(8)));
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_empty(void)
+{
+ __builtin_ia32_emms();
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi32_si64(int __i)
+{
+ return (__m64)__builtin_ia32_vec_init_v2si(__i, 0);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_si32(__m64 __m)
+{
+ return __builtin_ia32_vec_ext_v2si((__v2si)__m, 0);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_m64(long long __i)
+{
+ return (__m64)__i;
+}
+
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvtm64_si64(__m64 __m)
+{
+ return (long long)__m;
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_packs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_packs_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_packs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhbw((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckhdq((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpcklbw((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpcklwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_punpckldq((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_add_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_add_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_add_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_adds_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_adds_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_adds_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_adds_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_subs_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_subs_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_subs_pu8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_subs_pu16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_madd_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mullo_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pmullw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sll_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_slli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sll_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_slli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sll_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psllq(__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_slli_si64(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psllqi(__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sra_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srai_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sra_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srai_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srl_pi16(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srli_pi16(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srl_pi32(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srli_pi32(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srl_si64(__m64 __m, __m64 __count)
+{
+ return (__m64)__builtin_ia32_psrlq(__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_srli_si64(__m64 __m, int __count)
+{
+ return (__m64)__builtin_ia32_psrlqi(__m, __count);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_and_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pand(__m1, __m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_andnot_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pandn(__m1, __m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_or_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_por(__m1, __m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_xor_si64(__m64 __m1, __m64 __m2)
+{
+ return __builtin_ia32_pxor(__m1, __m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpeqd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtb((__v8qi)__m1, (__v8qi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtw((__v4hi)__m1, (__v4hi)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
+{
+ return (__m64)__builtin_ia32_pcmpgtd((__v2si)__m1, (__v2si)__m2);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_setzero_si64(void)
+{
+ return (__m64){ 0LL };
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set_pi32(int __i1, int __i0)
+{
+ return (__m64)__builtin_ia32_vec_init_v2si(__i0, __i1);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
+{
+ return (__m64)__builtin_ia32_vec_init_v4hi(__s0, __s1, __s2, __s3);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
+ char __b1, char __b0)
+{
+ return (__m64)__builtin_ia32_vec_init_v8qi(__b0, __b1, __b2, __b3,
+ __b4, __b5, __b6, __b7);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set1_pi32(int __i)
+{
+ return _mm_set_pi32(__i, __i);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set1_pi16(short __w)
+{
+ return _mm_set_pi16(__w, __w, __w, __w);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_set1_pi8(char __b)
+{
+ return _mm_set_pi8(__b, __b, __b, __b, __b, __b, __b, __b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_setr_pi32(int __i0, int __i1)
+{
+ return _mm_set_pi32(__i1, __i0);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_setr_pi16(short __w0, short __w1, short __w2, short __w3)
+{
+ return _mm_set_pi16(__w3, __w2, __w1, __w0);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_setr_pi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
+ char __b6, char __b7)
+{
+ return _mm_set_pi8(__b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+}
+
+
+/* Aliases for compatibility. */
+#define _m_empty _mm_empty
+#define _m_from_int _mm_cvtsi32_si64
+#define _m_to_int _mm_cvtsi64_si32
+#define _m_packsswb _mm_packs_pi16
+#define _m_packssdw _mm_packs_pi32
+#define _m_packuswb _mm_packs_pu16
+#define _m_punpckhbw _mm_unpackhi_pi8
+#define _m_punpckhwd _mm_unpackhi_pi16
+#define _m_punpckhdq _mm_unpackhi_pi32
+#define _m_punpcklbw _mm_unpacklo_pi8
+#define _m_punpcklwd _mm_unpacklo_pi16
+#define _m_punpckldq _mm_unpacklo_pi32
+#define _m_paddb _mm_add_pi8
+#define _m_paddw _mm_add_pi16
+#define _m_paddd _mm_add_pi32
+#define _m_paddsb _mm_adds_pi8
+#define _m_paddsw _mm_adds_pi16
+#define _m_paddusb _mm_adds_pu8
+#define _m_paddusw _mm_adds_pu16
+#define _m_psubb _mm_sub_pi8
+#define _m_psubw _mm_sub_pi16
+#define _m_psubd _mm_sub_pi32
+#define _m_psubsb _mm_subs_pi8
+#define _m_psubsw _mm_subs_pi16
+#define _m_psubusb _mm_subs_pu8
+#define _m_psubusw _mm_subs_pu16
+#define _m_pmaddwd _mm_madd_pi16
+#define _m_pmulhw _mm_mulhi_pi16
+#define _m_pmullw _mm_mullo_pi16
+#define _m_psllw _mm_sll_pi16
+#define _m_psllwi _mm_slli_pi16
+#define _m_pslld _mm_sll_pi32
+#define _m_pslldi _mm_slli_pi32
+#define _m_psllq _mm_sll_si64
+#define _m_psllqi _mm_slli_si64
+#define _m_psraw _mm_sra_pi16
+#define _m_psrawi _mm_srai_pi16
+#define _m_psrad _mm_sra_pi32
+#define _m_psradi _mm_srai_pi32
+#define _m_psrlw _mm_srl_pi16
+#define _m_psrlwi _mm_srli_pi16
+#define _m_psrld _mm_srl_pi32
+#define _m_psrldi _mm_srli_pi32
+#define _m_psrlq _mm_srl_si64
+#define _m_psrlqi _mm_srli_si64
+#define _m_pand _mm_and_si64
+#define _m_pandn _mm_andnot_si64
+#define _m_por _mm_or_si64
+#define _m_pxor _mm_xor_si64
+#define _m_pcmpeqb _mm_cmpeq_pi8
+#define _m_pcmpeqw _mm_cmpeq_pi16
+#define _m_pcmpeqd _mm_cmpeq_pi32
+#define _m_pcmpgtb _mm_cmpgt_pi8
+#define _m_pcmpgtw _mm_cmpgt_pi16
+#define _m_pcmpgtd _mm_cmpgt_pi32
+
+#endif /* __MMX__ */
+
+#endif /* __MMINTRIN_H */
+
diff --git a/contrib/llvm/tools/clang/lib/Headers/module.map b/contrib/llvm/tools/clang/lib/Headers/module.map
new file mode 100644
index 0000000..418ba50
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/module.map
@@ -0,0 +1,108 @@
+module _Builtin_intrinsics [system] {
+ explicit module altivec {
+ requires altivec
+ header "altivec.h"
+ }
+
+ explicit module intel {
+ requires x86
+ export *
+
+ header "immintrin.h"
+ header "x86intrin.h"
+
+ explicit module mm_malloc {
+ header "mm_malloc.h"
+ export * // note: for <stdlib.h> dependency
+ }
+
+ explicit module cpuid {
+ header "cpuid.h"
+ }
+
+ explicit module mmx {
+ requires mmx
+ header "mmintrin.h"
+ }
+
+ explicit module sse {
+ requires sse
+ export mmx
+ export * // note: for hackish <emmintrin.h> dependency
+ header "xmmintrin.h"
+ }
+
+ explicit module sse2 {
+ requires sse2
+ export sse
+ header "emmintrin.h"
+ }
+
+ explicit module sse3 {
+ requires sse3
+ export sse2
+ header "pmmintrin.h"
+ }
+
+ explicit module ssse3 {
+ requires ssse3
+ export sse3
+ header "tmmintrin.h"
+ }
+
+ explicit module sse4_1 {
+ requires sse41
+ export ssse3
+ header "smmintrin.h"
+ }
+
+ explicit module sse4_2 {
+ requires sse42
+ export sse4_1
+ header "nmmintrin.h"
+ }
+
+ explicit module avx {
+ requires avx
+ export sse4_2
+ header "avxintrin.h"
+ }
+
+ explicit module avx2 {
+ requires avx2
+ export avx
+ header "avx2intrin.h"
+ }
+
+ explicit module bmi {
+ requires bmi
+ header "bmiintrin.h"
+ }
+
+ explicit module bmi2 {
+ requires bmi2
+ header "bmi2intrin.h"
+ }
+
+ explicit module fma4 {
+ requires fma4
+ export sse3
+ header "fma4intrin.h"
+ }
+
+ explicit module lzcnt {
+ requires lzcnt
+ header "lzcntintrin.h"
+ }
+
+ explicit module popcnt {
+ requires popcnt
+ header "popcntintrin.h"
+ }
+
+ explicit module mm3dnow {
+ requires mm3dnow
+ header "mm3dnow.h"
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Headers/nmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/nmmintrin.h
new file mode 100644
index 0000000..f12622d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/nmmintrin.h
@@ -0,0 +1,35 @@
+/*===---- nmmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _NMMINTRIN_H
+#define _NMMINTRIN_H
+
+#ifndef __SSE4_2__
+#error "SSE4.2 instruction set not enabled"
+#else
+
+/* To match expectations of gcc we put the sse4.2 definitions into smmintrin.h,
+ just include it now then. */
+#include <smmintrin.h>
+#endif /* __SSE4_2__ */
+#endif /* _NMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/pmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/pmmintrin.h
new file mode 100644
index 0000000..5f9b097
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/pmmintrin.h
@@ -0,0 +1,117 @@
+/*===---- pmmintrin.h - SSE3 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __PMMINTRIN_H
+#define __PMMINTRIN_H
+
+#ifndef __SSE3__
+#error "SSE3 instruction set not enabled"
+#else
+
+#include <emmintrin.h>
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_lddqu_si128(__m128i const *p)
+{
+ return (__m128i)__builtin_ia32_lddqu((char const *)p);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_addsub_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_addsubps(a, b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_haddps(a, b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_hsubps(a, b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_movehdup_ps(__m128 a)
+{
+ return __builtin_shufflevector(a, a, 1, 1, 3, 3);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_moveldup_ps(__m128 a)
+{
+ return __builtin_shufflevector(a, a, 0, 0, 2, 2);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_addsub_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_addsubpd(a, b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_haddpd(a, b);
+}
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_pd(__m128d a, __m128d b)
+{
+ return __builtin_ia32_hsubpd(a, b);
+}
+
+#define _mm_loaddup_pd(dp) _mm_load1_pd(dp)
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_movedup_pd(__m128d a)
+{
+ return __builtin_shufflevector(a, a, 0, 0);
+}
+
+#define _MM_DENORMALS_ZERO_ON (0x0040)
+#define _MM_DENORMALS_ZERO_OFF (0x0000)
+
+#define _MM_DENORMALS_ZERO_MASK (0x0040)
+
+#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
+#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_monitor(void const *p, unsigned extensions, unsigned hints)
+{
+ __builtin_ia32_monitor((void *)p, extensions, hints);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_mwait(unsigned extensions, unsigned hints)
+{
+ __builtin_ia32_mwait(extensions, hints);
+}
+
+#endif /* __SSE3__ */
+
+#endif /* __PMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h b/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h
new file mode 100644
index 0000000..d439daa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/popcntintrin.h
@@ -0,0 +1,45 @@
+/*===---- popcntintrin.h - POPCNT intrinsics -------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __POPCNT__
+#error "POPCNT instruction set not enabled"
+#endif
+
+#ifndef _POPCNTINTRIN_H
+#define _POPCNTINTRIN_H
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_popcnt_u32(unsigned int __A)
+{
+ return __builtin_popcount(__A);
+}
+
+#ifdef __x86_64__
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_popcnt_u64(unsigned long long __A)
+{
+ return __builtin_popcountll(__A);
+}
+#endif /* __x86_64__ */
+
+#endif /* _POPCNTINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/smmintrin.h b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
new file mode 100644
index 0000000..2fab50e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/smmintrin.h
@@ -0,0 +1,467 @@
+/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _SMMINTRIN_H
+#define _SMMINTRIN_H
+
+#ifndef __SSE4_1__
+#error "SSE4.1 instruction set not enabled"
+#else
+
+#include <tmmintrin.h>
+
+/* SSE4 Rounding macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
+
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+
+#define _mm_round_ps(X, M) __extension__ ({ \
+ __m128 __X = (X); \
+ (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
+
+#define _mm_round_ss(X, Y, M) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_round_pd(X, M) __extension__ ({ \
+ __m128d __X = (X); \
+ (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
+
+#define _mm_round_sd(X, Y, M) __extension__ ({ \
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
+
+/* SSE4 Packed Blending Intrinsics. */
+#define _mm_blend_pd(V1, V2, M) __extension__ ({ \
+ __m128d __V1 = (V1); \
+ __m128d __V2 = (V2); \
+ (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); })
+
+#define _mm_blend_ps(V1, V2, M) __extension__ ({ \
+ __m128 __V1 = (V1); \
+ __m128 __V2 = (V2); \
+ (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); })
+
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
+_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
+{
+ return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
+ (__v2df)__M);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
+{
+ return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
+ (__v4sf)__M);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
+{
+ return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
+ (__v16qi)__M);
+}
+
+#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
+ __m128i __V1 = (V1); \
+ __m128i __V2 = (V2); \
+ (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); })
+
+/* SSE4 Dword Multiply Instructions. */
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) ((__v4si)__V1 * (__v4si)__V2);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mul_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+}
+
+/* SSE4 Floating Point Dot Product Instructions. */
+#define _mm_dp_ps(X, Y, M) __extension__ ({ \
+ __m128 __X = (X); \
+ __m128 __Y = (Y); \
+ (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
+
+#define _mm_dp_pd(X, Y, M) __extension__ ({\
+ __m128d __X = (X); \
+ __m128d __Y = (Y); \
+ (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
+
+/* SSE4 Streaming Load Hint Instruction. */
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_stream_load_si128 (__m128i *__V)
+{
+ return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V);
+}
+
+/* SSE4 Packed Integer Min/Max Instructions. */
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_min_epi8 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_max_epi8 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_min_epu16 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_max_epu16 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_min_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_max_epi32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_min_epu32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_max_epu32 (__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
+}
+
+/* SSE4 Insertion and Extraction from XMM Register Instructions. */
+#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
+#define _mm_extract_ps(X, N) (__extension__ \
+ ({ union { int i; float f; } __t; \
+ __v4sf __a = (__v4sf)(X); \
+ __t.f = __a[N]; \
+ __t.i;}))
+
+/* Miscellaneous insert and extract macros. */
+/* Extract a single-precision float from X at index N into D. */
+#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
+ (D) = __a[N]; }))
+
+/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
+ an index suitable for _mm_insert_ps. */
+#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
+
+/* Extract a float from X at index N into the first index of the return. */
+#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
+ _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+
+/* Insert int into packed integer array at index. */
+#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
+ __a[(N)] = (I); \
+ __a;}))
+#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
+ __a[(N)] = (I); \
+ __a;}))
+#ifdef __x86_64__
+#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
+ __a[(N)] = (I); \
+ __a;}))
+#endif /* __x86_64__ */
+
+/* Extract int from packed integer array at index. This returns the element
+ * as a zero extended value, so it is unsigned.
+ */
+#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
+ (unsigned char)__a[(N)];}))
+#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
+ (unsigned)__a[(N)];}))
+#ifdef __x86_64__
+#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
+ __a[(N)];}))
+#endif /* __x86_64 */
+
+/* SSE4 128-bit Packed Integer Comparisons. */
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_testz_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_testc_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_testnzc_si128(__m128i __M, __m128i __V)
+{
+ return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
+}
+
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+
+/* SSE4 64-bit Packed Integer Comparisons. */
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
+{
+ return (__m128i)((__v2di)__V1 == (__v2di)__V2);
+}
+
+/* SSE4 Packed Integer Sign-Extension. */
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi8_epi16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi8_epi32(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi8_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi16_epi32(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi16_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepi32_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V);
+}
+
+/* SSE4 Packed Integer Zero-Extension. */
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepu8_epi16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepu8_epi32(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepu8_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepu16_epi32(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepu16_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cvtepu32_epi64(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V);
+}
+
+/* SSE4 Pack with Unsigned Saturation. */
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_packus_epi32(__m128i __V1, __m128i __V2)
+{
+ return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+}
+
+/* SSE4 Multiple Packed Sums of Absolute Difference. */
+#define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
+ __m128i __X = (X); \
+ __m128i __Y = (Y); \
+ (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_minpos_epu16(__m128i __V)
+{
+ return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+}
+
+/* These definitions are normally in nmmintrin.h, but gcc puts them in here
+ so we'll do the same. */
+#ifdef __SSE4_2__
+
+/* These specify the type of data that we're comparing. */
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
+
+/* These specify the type of comparison operation. */
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
+
+/* These macros specify the polarity of the operation. */
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
+
+/* These macros are used in _mm_cmpXstri() to specify the return. */
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
+
+/* These macros are used in _mm_cmpXstri() to specify the return. */
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
+
+/* SSE4.2 Packed Comparison Intrinsics. */
+#define _mm_cmpistrm(A, B, M) __builtin_ia32_pcmpistrm128((A), (B), (M))
+#define _mm_cmpistri(A, B, M) __builtin_ia32_pcmpistri128((A), (B), (M))
+
+#define _mm_cmpestrm(A, LA, B, LB, M) \
+ __builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
+#define _mm_cmpestri(A, LA, B, LB, M) \
+ __builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
+
+/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
+#define _mm_cmpistra(A, B, M) \
+ __builtin_ia32_pcmpistria128((A), (B), (M))
+#define _mm_cmpistrc(A, B, M) \
+ __builtin_ia32_pcmpistric128((A), (B), (M))
+#define _mm_cmpistro(A, B, M) \
+ __builtin_ia32_pcmpistrio128((A), (B), (M))
+#define _mm_cmpistrs(A, B, M) \
+ __builtin_ia32_pcmpistris128((A), (B), (M))
+#define _mm_cmpistrz(A, B, M) \
+ __builtin_ia32_pcmpistriz128((A), (B), (M))
+
+#define _mm_cmpestra(A, LA, B, LB, M) \
+ __builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
+#define _mm_cmpestrc(A, LA, B, LB, M) \
+ __builtin_ia32_pcmpestric128((A), (LA), (B), (LB), (M))
+#define _mm_cmpestro(A, LA, B, LB, M) \
+ __builtin_ia32_pcmpestrio128((A), (LA), (B), (LB), (M))
+#define _mm_cmpestrs(A, LA, B, LB, M) \
+ __builtin_ia32_pcmpestris128((A), (LA), (B), (LB), (M))
+#define _mm_cmpestrz(A, LA, B, LB, M) \
+ __builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M))
+
+/* SSE4.2 Compare Packed Data -- Greater Than. */
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
+{
+ return (__m128i)((__v2di)__V1 > (__v2di)__V2);
+}
+
+/* SSE4.2 Accumulate CRC32. */
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_mm_crc32_u8(unsigned int __C, unsigned char __D)
+{
+ return __builtin_ia32_crc32qi(__C, __D);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_mm_crc32_u16(unsigned int __C, unsigned short __D)
+{
+ return __builtin_ia32_crc32hi(__C, __D);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_mm_crc32_u32(unsigned int __C, unsigned int __D)
+{
+ return __builtin_ia32_crc32si(__C, __D);
+}
+
+#ifdef __x86_64__
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
+_mm_crc32_u64(unsigned long long __C, unsigned long long __D)
+{
+ return __builtin_ia32_crc32di(__C, __D);
+}
+#endif /* __x86_64__ */
+
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
+
+#endif /* __SSE4_2__ */
+#endif /* __SSE4_1__ */
+
+#endif /* _SMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdalign.h b/contrib/llvm/tools/clang/lib/Headers/stdalign.h
new file mode 100644
index 0000000..e7fbfa0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdalign.h
@@ -0,0 +1,30 @@
+/*===---- stdalign.h - Standard header for alignment ------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDALIGN_H
+#define __STDALIGN_H
+
+#define alignas _Alignas
+#define __alignas_is_defined 1
+
+#endif /* __STDALIGN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdarg.h b/contrib/llvm/tools/clang/lib/Headers/stdarg.h
new file mode 100644
index 0000000..2957bf0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdarg.h
@@ -0,0 +1,50 @@
+/*===---- stdarg.h - Variable argument handling ----------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDARG_H
+#define __STDARG_H
+
+#ifndef _VA_LIST
+typedef __builtin_va_list va_list;
+#define _VA_LIST
+#endif
+#define va_start(ap, param) __builtin_va_start(ap, param)
+#define va_end(ap) __builtin_va_end(ap)
+#define va_arg(ap, type) __builtin_va_arg(ap, type)
+
+/* GCC always defines __va_copy, but does not define va_copy unless in c99 mode
+ * or -ansi is not specified, since it was not part of C90.
+ */
+#define __va_copy(d,s) __builtin_va_copy(d,s)
+
+#if __STDC_VERSION__ >= 199900L || __cplusplus >= 201103L || !defined(__STRICT_ANSI__)
+#define va_copy(dest, src) __builtin_va_copy(dest, src)
+#endif
+
+/* Hack required to make standard headers work, at least on Ubuntu */
+#define __GNUC_VA_LIST 1
+typedef __builtin_va_list __gnuc_va_list;
+
+#endif /* __STDARG_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdbool.h b/contrib/llvm/tools/clang/lib/Headers/stdbool.h
new file mode 100644
index 0000000..0467893
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdbool.h
@@ -0,0 +1,44 @@
+/*===---- stdbool.h - Standard header for booleans -------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDBOOL_H
+#define __STDBOOL_H
+
+/* Don't define bool, true, and false in C++, except as a GNU extension. */
+#ifndef __cplusplus
+#define bool _Bool
+#define true 1
+#define false 0
+#elif defined(__GNUC__) && !defined(__STRICT_ANSI__)
+/* Define _Bool, bool, false, true as a GNU extension. */
+#define _Bool bool
+#define bool bool
+#define false false
+#define true true
+#endif
+
+#define __bool_true_false_are_defined 1
+
+#endif /* __STDBOOL_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stddef.h b/contrib/llvm/tools/clang/lib/Headers/stddef.h
new file mode 100644
index 0000000..9e87ee89
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stddef.h
@@ -0,0 +1,64 @@
+/*===---- stddef.h - Basic type definitions --------------------------------===
+ *
+ * Copyright (c) 2008 Eli Friedman
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __STDDEF_H
+#define __STDDEF_H
+
+#ifndef _PTRDIFF_T
+#define _PTRDIFF_T
+typedef __typeof__(((int*)0)-((int*)0)) ptrdiff_t;
+#endif
+#ifndef _SIZE_T
+#define _SIZE_T
+typedef __typeof__(sizeof(int)) size_t;
+#endif
+#ifndef __cplusplus
+#ifndef _WCHAR_T
+#define _WCHAR_T
+typedef __WCHAR_TYPE__ wchar_t;
+#endif
+#endif
+
+#undef NULL
+#ifdef __cplusplus
+#undef __null // VC++ hack.
+#define NULL __null
+#else
+#define NULL ((void*)0)
+#endif
+
+#define offsetof(t, d) __builtin_offsetof(t, d)
+
+#endif /* __STDDEF_H */
+
+/* Some C libraries expect to see a wint_t here. Others (notably MinGW) will use
+__WINT_TYPE__ directly; accommodate both by requiring __need_wint_t */
+#if defined(__need_wint_t)
+#if !defined(_WINT_T)
+#define _WINT_T
+typedef __WINT_TYPE__ wint_t;
+#endif /* _WINT_T */
+#undef __need_wint_t
+#endif /* __need_wint_t */
diff --git a/contrib/llvm/tools/clang/lib/Headers/stdint.h b/contrib/llvm/tools/clang/lib/Headers/stdint.h
new file mode 100644
index 0000000..6f1a876
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/stdint.h
@@ -0,0 +1,661 @@
+/*===---- stdint.h - Standard header for sized integer types --------------===*\
+ *
+ * Copyright (c) 2009 Chris Lattner
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __CLANG_STDINT_H
+#define __CLANG_STDINT_H
+
+/* If we're hosted, fall back to the system's stdint.h, which might have
+ * additional definitions.
+ */
+#if __STDC_HOSTED__ && \
+ defined(__has_include_next) && __has_include_next(<stdint.h>)
+# include_next <stdint.h>
+#else
+
+/* C99 7.18.1.1 Exact-width integer types.
+ * C99 7.18.1.2 Minimum-width integer types.
+ * C99 7.18.1.3 Fastest minimum-width integer types.
+ *
+ * The standard requires that exact-width type be defined for 8-, 16-, 32-, and
+ * 64-bit types if they are implemented. Other exact width types are optional.
+ * This implementation defines an exact-width types for every integer width
+ * that is represented in the standard integer types.
+ *
+ * The standard also requires minimum-width types be defined for 8-, 16-, 32-,
+ * and 64-bit widths regardless of whether there are corresponding exact-width
+ * types.
+ *
+ * To accommodate targets that are missing types that are exactly 8, 16, 32, or
+ * 64 bits wide, this implementation takes an approach of cascading
+ * redefintions, redefining __int_leastN_t to successively smaller exact-width
+ * types. It is therefore important that the types are defined in order of
+ * descending widths.
+ *
+ * We currently assume that the minimum-width types and the fastest
+ * minimum-width types are the same. This is allowed by the standard, but is
+ * suboptimal.
+ *
+ * In violation of the standard, some targets do not implement a type that is
+ * wide enough to represent all of the required widths (8-, 16-, 32-, 64-bit).
+ * To accommodate these targets, a required minimum-width type is only
+ * defined if there exists an exact-width type of equal or greater width.
+ */
+
+#ifdef __INT64_TYPE__
+# ifndef __int8_t_defined /* glibc sys/types.h also defines int64_t*/
+typedef signed __INT64_TYPE__ int64_t;
+# endif /* __int8_t_defined */
+typedef unsigned __INT64_TYPE__ uint64_t;
+# define __int_least64_t int64_t
+# define __uint_least64_t uint64_t
+# define __int_least32_t int64_t
+# define __uint_least32_t uint64_t
+# define __int_least16_t int64_t
+# define __uint_least16_t uint64_t
+# define __int_least8_t int64_t
+# define __uint_least8_t uint64_t
+#endif /* __INT64_TYPE__ */
+
+#ifdef __int_least64_t
+typedef __int_least64_t int_least64_t;
+typedef __uint_least64_t uint_least64_t;
+typedef __int_least64_t int_fast64_t;
+typedef __uint_least64_t uint_fast64_t;
+#endif /* __int_least64_t */
+
+#ifdef __INT56_TYPE__
+typedef signed __INT56_TYPE__ int56_t;
+typedef unsigned __INT56_TYPE__ uint56_t;
+typedef int56_t int_least56_t;
+typedef uint56_t uint_least56_t;
+typedef int56_t int_fast56_t;
+typedef uint56_t uint_fast56_t;
+# define __int_least32_t int56_t
+# define __uint_least32_t uint56_t
+# define __int_least16_t int56_t
+# define __uint_least16_t uint56_t
+# define __int_least8_t int56_t
+# define __uint_least8_t uint56_t
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+typedef signed __INT48_TYPE__ int48_t;
+typedef unsigned __INT48_TYPE__ uint48_t;
+typedef int48_t int_least48_t;
+typedef uint48_t uint_least48_t;
+typedef int48_t int_fast48_t;
+typedef uint48_t uint_fast48_t;
+# define __int_least32_t int48_t
+# define __uint_least32_t uint48_t
+# define __int_least16_t int48_t
+# define __uint_least16_t uint48_t
+# define __int_least8_t int48_t
+# define __uint_least8_t uint48_t
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+typedef signed __INT40_TYPE__ int40_t;
+typedef unsigned __INT40_TYPE__ uint40_t;
+typedef int40_t int_least40_t;
+typedef uint40_t uint_least40_t;
+typedef int40_t int_fast40_t;
+typedef uint40_t uint_fast40_t;
+# define __int_least32_t int40_t
+# define __uint_least32_t uint40_t
+# define __int_least16_t int40_t
+# define __uint_least16_t uint40_t
+# define __int_least8_t int40_t
+# define __uint_least8_t uint40_t
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+
+# ifndef __int8_t_defined /* glibc sys/types.h also defines int32_t*/
+typedef signed __INT32_TYPE__ int32_t;
+# endif /* __int8_t_defined */
+
+# ifndef __uint32_t_defined /* more glibc compatibility */
+# define __uint32_t_defined
+typedef unsigned __INT32_TYPE__ uint32_t;
+# endif /* __uint32_t_defined */
+
+# define __int_least32_t int32_t
+# define __uint_least32_t uint32_t
+# define __int_least16_t int32_t
+# define __uint_least16_t uint32_t
+# define __int_least8_t int32_t
+# define __uint_least8_t uint32_t
+#endif /* __INT32_TYPE__ */
+
+#ifdef __int_least32_t
+typedef __int_least32_t int_least32_t;
+typedef __uint_least32_t uint_least32_t;
+typedef __int_least32_t int_fast32_t;
+typedef __uint_least32_t uint_fast32_t;
+#endif /* __int_least32_t */
+
+#ifdef __INT24_TYPE__
+typedef signed __INT24_TYPE__ int24_t;
+typedef unsigned __INT24_TYPE__ uint24_t;
+typedef int24_t int_least24_t;
+typedef uint24_t uint_least24_t;
+typedef int24_t int_fast24_t;
+typedef uint24_t uint_fast24_t;
+# define __int_least16_t int24_t
+# define __uint_least16_t uint24_t
+# define __int_least8_t int24_t
+# define __uint_least8_t uint24_t
+#endif /* __INT24_TYPE__ */
+
+#ifdef __INT16_TYPE__
+#ifndef __int8_t_defined /* glibc sys/types.h also defines int16_t*/
+typedef signed __INT16_TYPE__ int16_t;
+#endif /* __int8_t_defined */
+typedef unsigned __INT16_TYPE__ uint16_t;
+# define __int_least16_t int16_t
+# define __uint_least16_t uint16_t
+# define __int_least8_t int16_t
+# define __uint_least8_t uint16_t
+#endif /* __INT16_TYPE__ */
+
+#ifdef __int_least16_t
+typedef __int_least16_t int_least16_t;
+typedef __uint_least16_t uint_least16_t;
+typedef __int_least16_t int_fast16_t;
+typedef __uint_least16_t uint_fast16_t;
+#endif /* __int_least16_t */
+
+
+#ifdef __INT8_TYPE__
+#ifndef __int8_t_defined /* glibc sys/types.h also defines int8_t*/
+typedef signed __INT8_TYPE__ int8_t;
+#endif /* __int8_t_defined */
+typedef unsigned __INT8_TYPE__ uint8_t;
+# define __int_least8_t int8_t
+# define __uint_least8_t uint8_t
+#endif /* __INT8_TYPE__ */
+
+#ifdef __int_least8_t
+typedef __int_least8_t int_least8_t;
+typedef __uint_least8_t uint_least8_t;
+typedef __int_least8_t int_fast8_t;
+typedef __uint_least8_t uint_fast8_t;
+#endif /* __int_least8_t */
+
+/* prevent glibc sys/types.h from defining conflicting types */
+#ifndef __int8_t_defined
+# define __int8_t_defined
+#endif /* __int8_t_defined */
+
+/* C99 7.18.1.4 Integer types capable of holding object pointers.
+ */
+#define __stdint_join3(a,b,c) a ## b ## c
+
+#define __intn_t(n) __stdint_join3( int, n, _t)
+#define __uintn_t(n) __stdint_join3(uint, n, _t)
+
+#ifndef _INTPTR_T
+#ifndef __intptr_t_defined
+typedef __intn_t(__INTPTR_WIDTH__) intptr_t;
+#define __intptr_t_defined
+#define _INTPTR_T
+#endif
+#endif
+
+#ifndef _UINTPTR_T
+typedef __uintn_t(__INTPTR_WIDTH__) uintptr_t;
+#define _UINTPTR_T
+#endif
+
+/* C99 7.18.1.5 Greatest-width integer types.
+ */
+typedef __INTMAX_TYPE__ intmax_t;
+typedef __UINTMAX_TYPE__ uintmax_t;
+
+/* C99 7.18.4 Macros for minimum-width integer constants.
+ *
+ * The standard requires that integer constant macros be defined for all the
+ * minimum-width types defined above. As 8-, 16-, 32-, and 64-bit minimum-width
+ * types are required, the corresponding integer constant macros are defined
+ * here. This implementation also defines minimum-width types for every other
+ * integer width that the target implements, so corresponding macros are
+ * defined below, too.
+ *
+ * These macros are defined using the same successive-shrinking approach as
+ * the type definitions above. It is likewise important that macros are defined
+ * in order of decending width.
+ *
+ * Note that C++ should not check __STDC_CONSTANT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#define __int_c_join(a, b) a ## b
+#define __int_c(v, suffix) __int_c_join(v, suffix)
+#define __uint_c(v, suffix) __int_c_join(v##U, suffix)
+
+
+#ifdef __INT64_TYPE__
+# ifdef __INT64_C_SUFFIX__
+# define __int64_c_suffix __INT64_C_SUFFIX__
+# define __int32_c_suffix __INT64_C_SUFFIX__
+# define __int16_c_suffix __INT64_C_SUFFIX__
+# define __int8_c_suffix __INT64_C_SUFFIX__
+# else
+# undef __int64_c_suffix
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT64_C_SUFFIX__ */
+#endif /* __INT64_TYPE__ */
+
+#ifdef __int_least64_t
+# ifdef __int64_c_suffix
+# define INT64_C(v) __int_c(v, __int64_c_suffix)
+# define UINT64_C(v) __uint_c(v, __int64_c_suffix)
+# else
+# define INT64_C(v) v
+# define UINT64_C(v) v ## U
+# endif /* __int64_c_suffix */
+#endif /* __int_least64_t */
+
+
+#ifdef __INT56_TYPE__
+# ifdef __INT56_C_SUFFIX__
+# define INT56_C(v) __int_c(v, __INT56_C_SUFFIX__)
+# define UINT56_C(v) __uint_c(v, __INT56_C_SUFFIX__)
+# define __int32_c_suffix __INT56_C_SUFFIX__
+# define __int16_c_suffix __INT56_C_SUFFIX__
+# define __int8_c_suffix __INT56_C_SUFFIX__
+# else
+# define INT56_C(v) v
+# define UINT56_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT56_C_SUFFIX__ */
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+# ifdef __INT48_C_SUFFIX__
+# define INT48_C(v) __int_c(v, __INT48_C_SUFFIX__)
+# define UINT48_C(v) __uint_c(v, __INT48_C_SUFFIX__)
+# define __int32_c_suffix __INT48_C_SUFFIX__
+# define __int16_c_suffix __INT48_C_SUFFIX__
+# define __int8_c_suffix __INT48_C_SUFFIX__
+# else
+# define INT48_C(v) v
+# define UINT48_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT48_C_SUFFIX__ */
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+# ifdef __INT40_C_SUFFIX__
+# define INT40_C(v) __int_c(v, __INT40_C_SUFFIX__)
+# define UINT40_C(v) __uint_c(v, __INT40_C_SUFFIX__)
+# define __int32_c_suffix __INT40_C_SUFFIX__
+# define __int16_c_suffix __INT40_C_SUFFIX__
+# define __int8_c_suffix __INT40_C_SUFFIX__
+# else
+# define INT40_C(v) v
+# define UINT40_C(v) v ## U
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT40_C_SUFFIX__ */
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+# ifdef __INT32_C_SUFFIX__
+# define __int32_c_suffix __INT32_C_SUFFIX__
+# define __int16_c_suffix __INT32_C_SUFFIX__
+# define __int8_c_suffix __INT32_C_SUFFIX__
+#else
+# undef __int32_c_suffix
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT32_C_SUFFIX__ */
+#endif /* __INT32_TYPE__ */
+
+#ifdef __int_least32_t
+# ifdef __int32_c_suffix
+# define INT32_C(v) __int_c(v, __int32_c_suffix)
+# define UINT32_C(v) __uint_c(v, __int32_c_suffix)
+# else
+# define INT32_C(v) v
+# define UINT32_C(v) v ## U
+# endif /* __int32_c_suffix */
+#endif /* __int_least32_t */
+
+
+#ifdef __INT24_TYPE__
+# ifdef __INT24_C_SUFFIX__
+# define INT24_C(v) __int_c(v, __INT24_C_SUFFIX__)
+# define UINT24_C(v) __uint_c(v, __INT24_C_SUFFIX__)
+# define __int16_c_suffix __INT24_C_SUFFIX__
+# define __int8_c_suffix __INT24_C_SUFFIX__
+# else
+# define INT24_C(v) v
+# define UINT24_C(v) v ## U
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT24_C_SUFFIX__ */
+#endif /* __INT24_TYPE__ */
+
+
+#ifdef __INT16_TYPE__
+# ifdef __INT16_C_SUFFIX__
+# define __int16_c_suffix __INT16_C_SUFFIX__
+# define __int8_c_suffix __INT16_C_SUFFIX__
+#else
+# undef __int16_c_suffix
+# undef __int8_c_suffix
+# endif /* __INT16_C_SUFFIX__ */
+#endif /* __INT16_TYPE__ */
+
+#ifdef __int_least16_t
+# ifdef __int16_c_suffix
+# define INT16_C(v) __int_c(v, __int16_c_suffix)
+# define UINT16_C(v) __uint_c(v, __int16_c_suffix)
+# else
+# define INT16_C(v) v
+# define UINT16_C(v) v ## U
+# endif /* __int16_c_suffix */
+#endif /* __int_least16_t */
+
+
+#ifdef __INT8_TYPE__
+# ifdef __INT8_C_SUFFIX__
+# define __int8_c_suffix __INT8_C_SUFFIX__
+#else
+# undef __int8_c_suffix
+# endif /* __INT8_C_SUFFIX__ */
+#endif /* __INT8_TYPE__ */
+
+#ifdef __int_least8_t
+# ifdef __int8_c_suffix
+# define INT8_C(v) __int_c(v, __int8_c_suffix)
+# define UINT8_C(v) __uint_c(v, __int8_c_suffix)
+# else
+# define INT8_C(v) v
+# define UINT8_C(v) v ## U
+# endif /* __int8_c_suffix */
+#endif /* __int_least8_t */
+
+
+/* C99 7.18.2.1 Limits of exact-width integer types.
+ * C99 7.18.2.2 Limits of minimum-width integer types.
+ * C99 7.18.2.3 Limits of fastest minimum-width integer types.
+ *
+ * The presence of limit macros are completely optional in C99. This
+ * implementation defines limits for all of the types (exact- and
+ * minimum-width) that it defines above, using the limits of the minimum-width
+ * type for any types that do not have exact-width representations.
+ *
+ * As in the type definitions, this section takes an approach of
+ * successive-shrinking to determine which limits to use for the standard (8,
+ * 16, 32, 64) bit widths when they don't have exact representations. It is
+ * therefore important that the defintions be kept in order of decending
+ * widths.
+ *
+ * Note that C++ should not check __STDC_LIMIT_MACROS here, contrary to the
+ * claims of the C standard (see C++ 18.3.1p2, [cstdint.syn]).
+ */
+
+#ifdef __INT64_TYPE__
+# define INT64_MAX INT64_C( 9223372036854775807)
+# define INT64_MIN (-INT64_C( 9223372036854775807)-1)
+# define UINT64_MAX UINT64_C(18446744073709551615)
+# define __INT_LEAST64_MIN INT64_MIN
+# define __INT_LEAST64_MAX INT64_MAX
+# define __UINT_LEAST64_MAX UINT64_MAX
+# define __INT_LEAST32_MIN INT64_MIN
+# define __INT_LEAST32_MAX INT64_MAX
+# define __UINT_LEAST32_MAX UINT64_MAX
+# define __INT_LEAST16_MIN INT64_MIN
+# define __INT_LEAST16_MAX INT64_MAX
+# define __UINT_LEAST16_MAX UINT64_MAX
+# define __INT_LEAST8_MIN INT64_MIN
+# define __INT_LEAST8_MAX INT64_MAX
+# define __UINT_LEAST8_MAX UINT64_MAX
+#endif /* __INT64_TYPE__ */
+
+#ifdef __INT_LEAST64_MIN
+# define INT_LEAST64_MIN __INT_LEAST64_MIN
+# define INT_LEAST64_MAX __INT_LEAST64_MAX
+# define UINT_LEAST64_MAX __UINT_LEAST64_MAX
+# define INT_FAST64_MIN __INT_LEAST64_MIN
+# define INT_FAST64_MAX __INT_LEAST64_MAX
+# define UINT_FAST64_MAX __UINT_LEAST64_MAX
+#endif /* __INT_LEAST64_MIN */
+
+
+#ifdef __INT56_TYPE__
+# define INT56_MAX INT56_C(36028797018963967)
+# define INT56_MIN (-INT56_C(36028797018963967)-1)
+# define UINT56_MAX UINT56_C(72057594037927935)
+# define INT_LEAST56_MIN INT56_MIN
+# define INT_LEAST56_MAX INT56_MAX
+# define UINT_LEAST56_MAX UINT56_MAX
+# define INT_FAST56_MIN INT56_MIN
+# define INT_FAST56_MAX INT56_MAX
+# define UINT_FAST56_MAX UINT56_MAX
+# define __INT_LEAST32_MIN INT56_MIN
+# define __INT_LEAST32_MAX INT56_MAX
+# define __UINT_LEAST32_MAX UINT56_MAX
+# define __INT_LEAST16_MIN INT56_MIN
+# define __INT_LEAST16_MAX INT56_MAX
+# define __UINT_LEAST16_MAX UINT56_MAX
+# define __INT_LEAST8_MIN INT56_MIN
+# define __INT_LEAST8_MAX INT56_MAX
+# define __UINT_LEAST8_MAX UINT56_MAX
+#endif /* __INT56_TYPE__ */
+
+
+#ifdef __INT48_TYPE__
+# define INT48_MAX INT48_C(140737488355327)
+# define INT48_MIN (-INT48_C(140737488355327)-1)
+# define UINT48_MAX UINT48_C(281474976710655)
+# define INT_LEAST48_MIN INT48_MIN
+# define INT_LEAST48_MAX INT48_MAX
+# define UINT_LEAST48_MAX UINT48_MAX
+# define INT_FAST48_MIN INT48_MIN
+# define INT_FAST48_MAX INT48_MAX
+# define UINT_FAST48_MAX UINT48_MAX
+# define __INT_LEAST32_MIN INT48_MIN
+# define __INT_LEAST32_MAX INT48_MAX
+# define __UINT_LEAST32_MAX UINT48_MAX
+# define __INT_LEAST16_MIN INT48_MIN
+# define __INT_LEAST16_MAX INT48_MAX
+# define __UINT_LEAST16_MAX UINT48_MAX
+# define __INT_LEAST8_MIN INT48_MIN
+# define __INT_LEAST8_MAX INT48_MAX
+# define __UINT_LEAST8_MAX UINT48_MAX
+#endif /* __INT48_TYPE__ */
+
+
+#ifdef __INT40_TYPE__
+# define INT40_MAX INT40_C(549755813887)
+# define INT40_MIN (-INT40_C(549755813887)-1)
+# define UINT40_MAX UINT40_C(1099511627775)
+# define INT_LEAST40_MIN INT40_MIN
+# define INT_LEAST40_MAX INT40_MAX
+# define UINT_LEAST40_MAX UINT40_MAX
+# define INT_FAST40_MIN INT40_MIN
+# define INT_FAST40_MAX INT40_MAX
+# define UINT_FAST40_MAX UINT40_MAX
+# define __INT_LEAST32_MIN INT40_MIN
+# define __INT_LEAST32_MAX INT40_MAX
+# define __UINT_LEAST32_MAX UINT40_MAX
+# define __INT_LEAST16_MIN INT40_MIN
+# define __INT_LEAST16_MAX INT40_MAX
+# define __UINT_LEAST16_MAX UINT40_MAX
+# define __INT_LEAST8_MIN INT40_MIN
+# define __INT_LEAST8_MAX INT40_MAX
+# define __UINT_LEAST8_MAX UINT40_MAX
+#endif /* __INT40_TYPE__ */
+
+
+#ifdef __INT32_TYPE__
+# define INT32_MAX INT32_C(2147483647)
+# define INT32_MIN (-INT32_C(2147483647)-1)
+# define UINT32_MAX UINT32_C(4294967295)
+# define __INT_LEAST32_MIN INT32_MIN
+# define __INT_LEAST32_MAX INT32_MAX
+# define __UINT_LEAST32_MAX UINT32_MAX
+# define __INT_LEAST16_MIN INT32_MIN
+# define __INT_LEAST16_MAX INT32_MAX
+# define __UINT_LEAST16_MAX UINT32_MAX
+# define __INT_LEAST8_MIN INT32_MIN
+# define __INT_LEAST8_MAX INT32_MAX
+# define __UINT_LEAST8_MAX UINT32_MAX
+#endif /* __INT32_TYPE__ */
+
+#ifdef __INT_LEAST32_MIN
+# define INT_LEAST32_MIN __INT_LEAST32_MIN
+# define INT_LEAST32_MAX __INT_LEAST32_MAX
+# define UINT_LEAST32_MAX __UINT_LEAST32_MAX
+# define INT_FAST32_MIN __INT_LEAST32_MIN
+# define INT_FAST32_MAX __INT_LEAST32_MAX
+# define UINT_FAST32_MAX __UINT_LEAST32_MAX
+#endif /* __INT_LEAST32_MIN */
+
+
+#ifdef __INT24_TYPE__
+# define INT24_MAX INT24_C(8388607)
+# define INT24_MIN (-INT24_C(8388607)-1)
+# define UINT24_MAX UINT24_C(16777215)
+# define INT_LEAST24_MIN INT24_MIN
+# define INT_LEAST24_MAX INT24_MAX
+# define UINT_LEAST24_MAX UINT24_MAX
+# define INT_FAST24_MIN INT24_MIN
+# define INT_FAST24_MAX INT24_MAX
+# define UINT_FAST24_MAX UINT24_MAX
+# define __INT_LEAST16_MIN INT24_MIN
+# define __INT_LEAST16_MAX INT24_MAX
+# define __UINT_LEAST16_MAX UINT24_MAX
+# define __INT_LEAST8_MIN INT24_MIN
+# define __INT_LEAST8_MAX INT24_MAX
+# define __UINT_LEAST8_MAX UINT24_MAX
+#endif /* __INT24_TYPE__ */
+
+
+#ifdef __INT16_TYPE__
+#define INT16_MAX INT16_C(32767)
+#define INT16_MIN (-INT16_C(32767)-1)
+#define UINT16_MAX UINT16_C(65535)
+# define __INT_LEAST16_MIN INT16_MIN
+# define __INT_LEAST16_MAX INT16_MAX
+# define __UINT_LEAST16_MAX UINT16_MAX
+# define __INT_LEAST8_MIN INT16_MIN
+# define __INT_LEAST8_MAX INT16_MAX
+# define __UINT_LEAST8_MAX UINT16_MAX
+#endif /* __INT16_TYPE__ */
+
+#ifdef __INT_LEAST16_MIN
+# define INT_LEAST16_MIN __INT_LEAST16_MIN
+# define INT_LEAST16_MAX __INT_LEAST16_MAX
+# define UINT_LEAST16_MAX __UINT_LEAST16_MAX
+# define INT_FAST16_MIN __INT_LEAST16_MIN
+# define INT_FAST16_MAX __INT_LEAST16_MAX
+# define UINT_FAST16_MAX __UINT_LEAST16_MAX
+#endif /* __INT_LEAST16_MIN */
+
+
+#ifdef __INT8_TYPE__
+# define INT8_MAX INT8_C(127)
+# define INT8_MIN (-INT8_C(127)-1)
+# define UINT8_MAX UINT8_C(255)
+# define __INT_LEAST8_MIN INT8_MIN
+# define __INT_LEAST8_MAX INT8_MAX
+# define __UINT_LEAST8_MAX UINT8_MAX
+#endif /* __INT8_TYPE__ */
+
+#ifdef __INT_LEAST8_MIN
+# define INT_LEAST8_MIN __INT_LEAST8_MIN
+# define INT_LEAST8_MAX __INT_LEAST8_MAX
+# define UINT_LEAST8_MAX __UINT_LEAST8_MAX
+# define INT_FAST8_MIN __INT_LEAST8_MIN
+# define INT_FAST8_MAX __INT_LEAST8_MAX
+# define UINT_FAST8_MAX __UINT_LEAST8_MAX
+#endif /* __INT_LEAST8_MIN */
+
+/* Some utility macros */
+#define __INTN_MIN(n) __stdint_join3( INT, n, _MIN)
+#define __INTN_MAX(n) __stdint_join3( INT, n, _MAX)
+#define __UINTN_MAX(n) __stdint_join3(UINT, n, _MAX)
+#define __INTN_C(n, v) __stdint_join3( INT, n, _C(v))
+#define __UINTN_C(n, v) __stdint_join3(UINT, n, _C(v))
+
+/* C99 7.18.2.4 Limits of integer types capable of holding object pointers. */
+/* C99 7.18.3 Limits of other integer types. */
+
+#define INTPTR_MIN __INTN_MIN(__INTPTR_WIDTH__)
+#define INTPTR_MAX __INTN_MAX(__INTPTR_WIDTH__)
+#define UINTPTR_MAX __UINTN_MAX(__INTPTR_WIDTH__)
+#define PTRDIFF_MIN __INTN_MIN(__PTRDIFF_WIDTH__)
+#define PTRDIFF_MAX __INTN_MAX(__PTRDIFF_WIDTH__)
+#define SIZE_MAX __UINTN_MAX(__SIZE_WIDTH__)
+
+/* C99 7.18.2.5 Limits of greatest-width integer types. */
+#define INTMAX_MIN __INTN_MIN(__INTMAX_WIDTH__)
+#define INTMAX_MAX __INTN_MAX(__INTMAX_WIDTH__)
+#define UINTMAX_MAX __UINTN_MAX(__INTMAX_WIDTH__)
+
+/* C99 7.18.3 Limits of other integer types. */
+#define SIG_ATOMIC_MIN __INTN_MIN(__SIG_ATOMIC_WIDTH__)
+#define SIG_ATOMIC_MAX __INTN_MAX(__SIG_ATOMIC_WIDTH__)
+#ifdef __WINT_UNSIGNED__
+# define WINT_MIN __UINTN_C(__WINT_WIDTH__, 0)
+# define WINT_MAX __UINTN_MAX(__WINT_WIDTH__)
+#else
+# define WINT_MIN __INTN_MIN(__WINT_WIDTH__)
+# define WINT_MAX __INTN_MAX(__WINT_WIDTH__)
+#endif
+
+#ifndef WCHAR_MAX
+# define WCHAR_MAX __WCHAR_MAX__
+#endif
+#ifndef WCHAR_MIN
+# if __WCHAR_MAX__ == __INTN_MAX(__WCHAR_WIDTH__)
+# define WCHAR_MIN __INTN_MIN(__WCHAR_WIDTH__)
+# else
+# define WCHAR_MIN __UINTN_C(__WCHAR_WIDTH__, 0)
+# endif
+#endif
+
+/* 7.18.4.2 Macros for greatest-width integer constants. */
+#define INTMAX_C(v) __INTN_C(__INTMAX_WIDTH__, v)
+#define UINTMAX_C(v) __UINTN_C(__INTMAX_WIDTH__, v)
+
+#endif /* __STDC_HOSTED__ */
+#endif /* __CLANG_STDINT_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/tgmath.h b/contrib/llvm/tools/clang/lib/Headers/tgmath.h
new file mode 100644
index 0000000..4fa1cf7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/tgmath.h
@@ -0,0 +1,1374 @@
+/*===---- tgmath.h - Standard header for type generic math ----------------===*\
+ *
+ * Copyright (c) 2009 Howard Hinnant
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+\*===----------------------------------------------------------------------===*/
+
+#ifndef __TGMATH_H
+#define __TGMATH_H
+
+/* C99 7.22 Type-generic math <tgmath.h>. */
+#include <math.h>
+
+/* C++ handles type genericity with overloading in math.h. */
+#ifndef __cplusplus
+#include <complex.h>
+
+#define _TG_ATTRSp __attribute__((__overloadable__))
+#define _TG_ATTRS __attribute__((__overloadable__, __always_inline__))
+
+// promotion
+
+typedef void _Argument_type_is_not_arithmetic;
+static _Argument_type_is_not_arithmetic __tg_promote(...)
+ __attribute__((__unavailable__,__overloadable__));
+static double _TG_ATTRSp __tg_promote(int);
+static double _TG_ATTRSp __tg_promote(unsigned int);
+static double _TG_ATTRSp __tg_promote(long);
+static double _TG_ATTRSp __tg_promote(unsigned long);
+static double _TG_ATTRSp __tg_promote(long long);
+static double _TG_ATTRSp __tg_promote(unsigned long long);
+static float _TG_ATTRSp __tg_promote(float);
+static double _TG_ATTRSp __tg_promote(double);
+static long double _TG_ATTRSp __tg_promote(long double);
+static float _Complex _TG_ATTRSp __tg_promote(float _Complex);
+static double _Complex _TG_ATTRSp __tg_promote(double _Complex);
+static long double _Complex _TG_ATTRSp __tg_promote(long double _Complex);
+
+#define __tg_promote1(__x) (__typeof__(__tg_promote(__x)))
+#define __tg_promote2(__x, __y) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y)))
+#define __tg_promote3(__x, __y, __z) (__typeof__(__tg_promote(__x) + \
+ __tg_promote(__y) + \
+ __tg_promote(__z)))
+
+// acos
+
+static float
+ _TG_ATTRS
+ __tg_acos(float __x) {return acosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acos(double __x) {return acos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acos(long double __x) {return acosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acos(float _Complex __x) {return cacosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acos(double _Complex __x) {return cacos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acos(long double _Complex __x) {return cacosl(__x);}
+
+#undef acos
+#define acos(__x) __tg_acos(__tg_promote1((__x))(__x))
+
+// asin
+
+static float
+ _TG_ATTRS
+ __tg_asin(float __x) {return asinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asin(double __x) {return asin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asin(long double __x) {return asinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asin(float _Complex __x) {return casinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asin(double _Complex __x) {return casin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asin(long double _Complex __x) {return casinl(__x);}
+
+#undef asin
+#define asin(__x) __tg_asin(__tg_promote1((__x))(__x))
+
+// atan
+
+static float
+ _TG_ATTRS
+ __tg_atan(float __x) {return atanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atan(double __x) {return atan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan(long double __x) {return atanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atan(float _Complex __x) {return catanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atan(double _Complex __x) {return catan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atan(long double _Complex __x) {return catanl(__x);}
+
+#undef atan
+#define atan(__x) __tg_atan(__tg_promote1((__x))(__x))
+
+// acosh
+
+static float
+ _TG_ATTRS
+ __tg_acosh(float __x) {return acoshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_acosh(double __x) {return acosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_acosh(long double __x) {return acoshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_acosh(float _Complex __x) {return cacoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_acosh(double _Complex __x) {return cacosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_acosh(long double _Complex __x) {return cacoshl(__x);}
+
+#undef acosh
+#define acosh(__x) __tg_acosh(__tg_promote1((__x))(__x))
+
+// asinh
+
+static float
+ _TG_ATTRS
+ __tg_asinh(float __x) {return asinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_asinh(double __x) {return asinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_asinh(long double __x) {return asinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_asinh(float _Complex __x) {return casinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_asinh(double _Complex __x) {return casinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_asinh(long double _Complex __x) {return casinhl(__x);}
+
+#undef asinh
+#define asinh(__x) __tg_asinh(__tg_promote1((__x))(__x))
+
+// atanh
+
+static float
+ _TG_ATTRS
+ __tg_atanh(float __x) {return atanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_atanh(double __x) {return atanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_atanh(long double __x) {return atanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_atanh(float _Complex __x) {return catanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_atanh(double _Complex __x) {return catanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_atanh(long double _Complex __x) {return catanhl(__x);}
+
+#undef atanh
+#define atanh(__x) __tg_atanh(__tg_promote1((__x))(__x))
+
+// cos
+
+static float
+ _TG_ATTRS
+ __tg_cos(float __x) {return cosf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cos(double __x) {return cos(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cos(long double __x) {return cosl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cos(float _Complex __x) {return ccosf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cos(double _Complex __x) {return ccos(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cos(long double _Complex __x) {return ccosl(__x);}
+
+#undef cos
+#define cos(__x) __tg_cos(__tg_promote1((__x))(__x))
+
+// sin
+
+static float
+ _TG_ATTRS
+ __tg_sin(float __x) {return sinf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sin(double __x) {return sin(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sin(long double __x) {return sinl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sin(float _Complex __x) {return csinf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sin(double _Complex __x) {return csin(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sin(long double _Complex __x) {return csinl(__x);}
+
+#undef sin
+#define sin(__x) __tg_sin(__tg_promote1((__x))(__x))
+
+// tan
+
+static float
+ _TG_ATTRS
+ __tg_tan(float __x) {return tanf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tan(double __x) {return tan(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tan(long double __x) {return tanl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tan(float _Complex __x) {return ctanf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tan(double _Complex __x) {return ctan(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tan(long double _Complex __x) {return ctanl(__x);}
+
+#undef tan
+#define tan(__x) __tg_tan(__tg_promote1((__x))(__x))
+
+// cosh
+
+static float
+ _TG_ATTRS
+ __tg_cosh(float __x) {return coshf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cosh(double __x) {return cosh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cosh(long double __x) {return coshl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cosh(float _Complex __x) {return ccoshf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cosh(double _Complex __x) {return ccosh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cosh(long double _Complex __x) {return ccoshl(__x);}
+
+#undef cosh
+#define cosh(__x) __tg_cosh(__tg_promote1((__x))(__x))
+
+// sinh
+
+static float
+ _TG_ATTRS
+ __tg_sinh(float __x) {return sinhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sinh(double __x) {return sinh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sinh(long double __x) {return sinhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sinh(float _Complex __x) {return csinhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sinh(double _Complex __x) {return csinh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sinh(long double _Complex __x) {return csinhl(__x);}
+
+#undef sinh
+#define sinh(__x) __tg_sinh(__tg_promote1((__x))(__x))
+
+// tanh
+
+static float
+ _TG_ATTRS
+ __tg_tanh(float __x) {return tanhf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tanh(double __x) {return tanh(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tanh(long double __x) {return tanhl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_tanh(float _Complex __x) {return ctanhf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_tanh(double _Complex __x) {return ctanh(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_tanh(long double _Complex __x) {return ctanhl(__x);}
+
+#undef tanh
+#define tanh(__x) __tg_tanh(__tg_promote1((__x))(__x))
+
+// exp
+
+static float
+ _TG_ATTRS
+ __tg_exp(float __x) {return expf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp(double __x) {return exp(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp(long double __x) {return expl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_exp(float _Complex __x) {return cexpf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_exp(double _Complex __x) {return cexp(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_exp(long double _Complex __x) {return cexpl(__x);}
+
+#undef exp
+#define exp(__x) __tg_exp(__tg_promote1((__x))(__x))
+
+// log
+
+static float
+ _TG_ATTRS
+ __tg_log(float __x) {return logf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log(double __x) {return log(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log(long double __x) {return logl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_log(float _Complex __x) {return clogf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_log(double _Complex __x) {return clog(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_log(long double _Complex __x) {return clogl(__x);}
+
+#undef log
+#define log(__x) __tg_log(__tg_promote1((__x))(__x))
+
+// pow
+
+static float
+ _TG_ATTRS
+ __tg_pow(float __x, float __y) {return powf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_pow(double __x, double __y) {return pow(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_pow(long double __x, long double __y) {return powl(__x, __y);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_pow(float _Complex __x, float _Complex __y) {return cpowf(__x, __y);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_pow(double _Complex __x, double _Complex __y) {return cpow(__x, __y);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_pow(long double _Complex __x, long double _Complex __y)
+ {return cpowl(__x, __y);}
+
+#undef pow
+#define pow(__x, __y) __tg_pow(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// sqrt
+
+static float
+ _TG_ATTRS
+ __tg_sqrt(float __x) {return sqrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_sqrt(double __x) {return sqrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_sqrt(long double __x) {return sqrtl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_sqrt(float _Complex __x) {return csqrtf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_sqrt(double _Complex __x) {return csqrt(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_sqrt(long double _Complex __x) {return csqrtl(__x);}
+
+#undef sqrt
+#define sqrt(__x) __tg_sqrt(__tg_promote1((__x))(__x))
+
+// fabs
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float __x) {return fabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double __x) {return fabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double __x) {return fabsl(__x);}
+
+static float
+ _TG_ATTRS
+ __tg_fabs(float _Complex __x) {return cabsf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_fabs(double _Complex __x) {return cabs(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_fabs(long double _Complex __x) {return cabsl(__x);}
+
+#undef fabs
+#define fabs(__x) __tg_fabs(__tg_promote1((__x))(__x))
+
+// atan2
+
+static float
+ _TG_ATTRS
+ __tg_atan2(float __x, float __y) {return atan2f(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_atan2(double __x, double __y) {return atan2(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_atan2(long double __x, long double __y) {return atan2l(__x, __y);}
+
+#undef atan2
+#define atan2(__x, __y) __tg_atan2(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// cbrt
+
+static float
+ _TG_ATTRS
+ __tg_cbrt(float __x) {return cbrtf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cbrt(double __x) {return cbrt(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cbrt(long double __x) {return cbrtl(__x);}
+
+#undef cbrt
+#define cbrt(__x) __tg_cbrt(__tg_promote1((__x))(__x))
+
+// ceil
+
+static float
+ _TG_ATTRS
+ __tg_ceil(float __x) {return ceilf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_ceil(double __x) {return ceil(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_ceil(long double __x) {return ceill(__x);}
+
+#undef ceil
+#define ceil(__x) __tg_ceil(__tg_promote1((__x))(__x))
+
+// copysign
+
+static float
+ _TG_ATTRS
+ __tg_copysign(float __x, float __y) {return copysignf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_copysign(double __x, double __y) {return copysign(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_copysign(long double __x, long double __y) {return copysignl(__x, __y);}
+
+#undef copysign
+#define copysign(__x, __y) __tg_copysign(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// erf
+
+static float
+ _TG_ATTRS
+ __tg_erf(float __x) {return erff(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erf(double __x) {return erf(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erf(long double __x) {return erfl(__x);}
+
+#undef erf
+#define erf(__x) __tg_erf(__tg_promote1((__x))(__x))
+
+// erfc
+
+static float
+ _TG_ATTRS
+ __tg_erfc(float __x) {return erfcf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_erfc(double __x) {return erfc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_erfc(long double __x) {return erfcl(__x);}
+
+#undef erfc
+#define erfc(__x) __tg_erfc(__tg_promote1((__x))(__x))
+
+// exp2
+
+static float
+ _TG_ATTRS
+ __tg_exp2(float __x) {return exp2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_exp2(double __x) {return exp2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_exp2(long double __x) {return exp2l(__x);}
+
+#undef exp2
+#define exp2(__x) __tg_exp2(__tg_promote1((__x))(__x))
+
+// expm1
+
+static float
+ _TG_ATTRS
+ __tg_expm1(float __x) {return expm1f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_expm1(double __x) {return expm1(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_expm1(long double __x) {return expm1l(__x);}
+
+#undef expm1
+#define expm1(__x) __tg_expm1(__tg_promote1((__x))(__x))
+
+// fdim
+
+static float
+ _TG_ATTRS
+ __tg_fdim(float __x, float __y) {return fdimf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fdim(double __x, double __y) {return fdim(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fdim(long double __x, long double __y) {return fdiml(__x, __y);}
+
+#undef fdim
+#define fdim(__x, __y) __tg_fdim(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// floor
+
+static float
+ _TG_ATTRS
+ __tg_floor(float __x) {return floorf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_floor(double __x) {return floor(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_floor(long double __x) {return floorl(__x);}
+
+#undef floor
+#define floor(__x) __tg_floor(__tg_promote1((__x))(__x))
+
+// fma
+
+static float
+ _TG_ATTRS
+ __tg_fma(float __x, float __y, float __z)
+ {return fmaf(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_fma(double __x, double __y, double __z)
+ {return fma(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_fma(long double __x,long double __y, long double __z)
+ {return fmal(__x, __y, __z);}
+
+#undef fma
+#define fma(__x, __y, __z) \
+ __tg_fma(__tg_promote3((__x), (__y), (__z))(__x), \
+ __tg_promote3((__x), (__y), (__z))(__y), \
+ __tg_promote3((__x), (__y), (__z))(__z))
+
+// fmax
+
+static float
+ _TG_ATTRS
+ __tg_fmax(float __x, float __y) {return fmaxf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmax(double __x, double __y) {return fmax(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmax(long double __x, long double __y) {return fmaxl(__x, __y);}
+
+#undef fmax
+#define fmax(__x, __y) __tg_fmax(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmin
+
+static float
+ _TG_ATTRS
+ __tg_fmin(float __x, float __y) {return fminf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmin(double __x, double __y) {return fmin(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmin(long double __x, long double __y) {return fminl(__x, __y);}
+
+#undef fmin
+#define fmin(__x, __y) __tg_fmin(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// fmod
+
+static float
+ _TG_ATTRS
+ __tg_fmod(float __x, float __y) {return fmodf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_fmod(double __x, double __y) {return fmod(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_fmod(long double __x, long double __y) {return fmodl(__x, __y);}
+
+#undef fmod
+#define fmod(__x, __y) __tg_fmod(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// frexp
+
+static float
+ _TG_ATTRS
+ __tg_frexp(float __x, int* __y) {return frexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_frexp(double __x, int* __y) {return frexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_frexp(long double __x, int* __y) {return frexpl(__x, __y);}
+
+#undef frexp
+#define frexp(__x, __y) __tg_frexp(__tg_promote1((__x))(__x), __y)
+
+// hypot
+
+static float
+ _TG_ATTRS
+ __tg_hypot(float __x, float __y) {return hypotf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_hypot(double __x, double __y) {return hypot(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_hypot(long double __x, long double __y) {return hypotl(__x, __y);}
+
+#undef hypot
+#define hypot(__x, __y) __tg_hypot(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// ilogb
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(float __x) {return ilogbf(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(double __x) {return ilogb(__x);}
+
+static int
+ _TG_ATTRS
+ __tg_ilogb(long double __x) {return ilogbl(__x);}
+
+#undef ilogb
+#define ilogb(__x) __tg_ilogb(__tg_promote1((__x))(__x))
+
+// ldexp
+
+static float
+ _TG_ATTRS
+ __tg_ldexp(float __x, int __y) {return ldexpf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_ldexp(double __x, int __y) {return ldexp(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_ldexp(long double __x, int __y) {return ldexpl(__x, __y);}
+
+#undef ldexp
+#define ldexp(__x, __y) __tg_ldexp(__tg_promote1((__x))(__x), __y)
+
+// lgamma
+
+static float
+ _TG_ATTRS
+ __tg_lgamma(float __x) {return lgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_lgamma(double __x) {return lgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_lgamma(long double __x) {return lgammal(__x);}
+
+#undef lgamma
+#define lgamma(__x) __tg_lgamma(__tg_promote1((__x))(__x))
+
+// llrint
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(float __x) {return llrintf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(double __x) {return llrint(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llrint(long double __x) {return llrintl(__x);}
+
+#undef llrint
+#define llrint(__x) __tg_llrint(__tg_promote1((__x))(__x))
+
+// llround
+
+static long long
+ _TG_ATTRS
+ __tg_llround(float __x) {return llroundf(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(double __x) {return llround(__x);}
+
+static long long
+ _TG_ATTRS
+ __tg_llround(long double __x) {return llroundl(__x);}
+
+#undef llround
+#define llround(__x) __tg_llround(__tg_promote1((__x))(__x))
+
+// log10
+
+static float
+ _TG_ATTRS
+ __tg_log10(float __x) {return log10f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log10(double __x) {return log10(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log10(long double __x) {return log10l(__x);}
+
+#undef log10
+#define log10(__x) __tg_log10(__tg_promote1((__x))(__x))
+
+// log1p
+
+static float
+ _TG_ATTRS
+ __tg_log1p(float __x) {return log1pf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log1p(double __x) {return log1p(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log1p(long double __x) {return log1pl(__x);}
+
+#undef log1p
+#define log1p(__x) __tg_log1p(__tg_promote1((__x))(__x))
+
+// log2
+
+static float
+ _TG_ATTRS
+ __tg_log2(float __x) {return log2f(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_log2(double __x) {return log2(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_log2(long double __x) {return log2l(__x);}
+
+#undef log2
+#define log2(__x) __tg_log2(__tg_promote1((__x))(__x))
+
+// logb
+
+static float
+ _TG_ATTRS
+ __tg_logb(float __x) {return logbf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_logb(double __x) {return logb(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_logb(long double __x) {return logbl(__x);}
+
+#undef logb
+#define logb(__x) __tg_logb(__tg_promote1((__x))(__x))
+
+// lrint
+
+static long
+ _TG_ATTRS
+ __tg_lrint(float __x) {return lrintf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(double __x) {return lrint(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lrint(long double __x) {return lrintl(__x);}
+
+#undef lrint
+#define lrint(__x) __tg_lrint(__tg_promote1((__x))(__x))
+
+// lround
+
+static long
+ _TG_ATTRS
+ __tg_lround(float __x) {return lroundf(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(double __x) {return lround(__x);}
+
+static long
+ _TG_ATTRS
+ __tg_lround(long double __x) {return lroundl(__x);}
+
+#undef lround
+#define lround(__x) __tg_lround(__tg_promote1((__x))(__x))
+
+// nearbyint
+
+static float
+ _TG_ATTRS
+ __tg_nearbyint(float __x) {return nearbyintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_nearbyint(double __x) {return nearbyint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_nearbyint(long double __x) {return nearbyintl(__x);}
+
+#undef nearbyint
+#define nearbyint(__x) __tg_nearbyint(__tg_promote1((__x))(__x))
+
+// nextafter
+
+static float
+ _TG_ATTRS
+ __tg_nextafter(float __x, float __y) {return nextafterf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nextafter(double __x, double __y) {return nextafter(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nextafter(long double __x, long double __y) {return nextafterl(__x, __y);}
+
+#undef nextafter
+#define nextafter(__x, __y) __tg_nextafter(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// nexttoward
+
+static float
+ _TG_ATTRS
+ __tg_nexttoward(float __x, long double __y) {return nexttowardf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_nexttoward(double __x, long double __y) {return nexttoward(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_nexttoward(long double __x, long double __y) {return nexttowardl(__x, __y);}
+
+#undef nexttoward
+#define nexttoward(__x, __y) __tg_nexttoward(__tg_promote1((__x))(__x), (__y))
+
+// remainder
+
+static float
+ _TG_ATTRS
+ __tg_remainder(float __x, float __y) {return remainderf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_remainder(double __x, double __y) {return remainder(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_remainder(long double __x, long double __y) {return remainderl(__x, __y);}
+
+#undef remainder
+#define remainder(__x, __y) __tg_remainder(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y))
+
+// remquo
+
+static float
+ _TG_ATTRS
+ __tg_remquo(float __x, float __y, int* __z)
+ {return remquof(__x, __y, __z);}
+
+static double
+ _TG_ATTRS
+ __tg_remquo(double __x, double __y, int* __z)
+ {return remquo(__x, __y, __z);}
+
+static long double
+ _TG_ATTRS
+ __tg_remquo(long double __x,long double __y, int* __z)
+ {return remquol(__x, __y, __z);}
+
+#undef remquo
+#define remquo(__x, __y, __z) \
+ __tg_remquo(__tg_promote2((__x), (__y))(__x), \
+ __tg_promote2((__x), (__y))(__y), \
+ (__z))
+
+// rint
+
+static float
+ _TG_ATTRS
+ __tg_rint(float __x) {return rintf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_rint(double __x) {return rint(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_rint(long double __x) {return rintl(__x);}
+
+#undef rint
+#define rint(__x) __tg_rint(__tg_promote1((__x))(__x))
+
+// round
+
+static float
+ _TG_ATTRS
+ __tg_round(float __x) {return roundf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_round(double __x) {return round(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_round(long double __x) {return roundl(__x);}
+
+#undef round
+#define round(__x) __tg_round(__tg_promote1((__x))(__x))
+
+// scalbn
+
+static float
+ _TG_ATTRS
+ __tg_scalbn(float __x, int __y) {return scalbnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbn(double __x, int __y) {return scalbn(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbn(long double __x, int __y) {return scalbnl(__x, __y);}
+
+#undef scalbn
+#define scalbn(__x, __y) __tg_scalbn(__tg_promote1((__x))(__x), __y)
+
+// scalbln
+
+static float
+ _TG_ATTRS
+ __tg_scalbln(float __x, long __y) {return scalblnf(__x, __y);}
+
+static double
+ _TG_ATTRS
+ __tg_scalbln(double __x, long __y) {return scalbln(__x, __y);}
+
+static long double
+ _TG_ATTRS
+ __tg_scalbln(long double __x, long __y) {return scalblnl(__x, __y);}
+
+#undef scalbln
+#define scalbln(__x, __y) __tg_scalbln(__tg_promote1((__x))(__x), __y)
+
+// tgamma
+
+static float
+ _TG_ATTRS
+ __tg_tgamma(float __x) {return tgammaf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_tgamma(double __x) {return tgamma(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_tgamma(long double __x) {return tgammal(__x);}
+
+#undef tgamma
+#define tgamma(__x) __tg_tgamma(__tg_promote1((__x))(__x))
+
+// trunc
+
+static float
+ _TG_ATTRS
+ __tg_trunc(float __x) {return truncf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_trunc(double __x) {return trunc(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_trunc(long double __x) {return truncl(__x);}
+
+#undef trunc
+#define trunc(__x) __tg_trunc(__tg_promote1((__x))(__x))
+
+// carg
+
+static float
+ _TG_ATTRS
+ __tg_carg(float __x) {return atan2f(0.F, __x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double __x) {return atan2(0., __x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double __x) {return atan2l(0.L, __x);}
+
+static float
+ _TG_ATTRS
+ __tg_carg(float _Complex __x) {return cargf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_carg(double _Complex __x) {return carg(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_carg(long double _Complex __x) {return cargl(__x);}
+
+#undef carg
+#define carg(__x) __tg_carg(__tg_promote1((__x))(__x))
+
+// cimag
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float __x) {return 0;}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double __x) {return 0;}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double __x) {return 0;}
+
+static float
+ _TG_ATTRS
+ __tg_cimag(float _Complex __x) {return cimagf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_cimag(double _Complex __x) {return cimag(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_cimag(long double _Complex __x) {return cimagl(__x);}
+
+#undef cimag
+#define cimag(__x) __tg_cimag(__tg_promote1((__x))(__x))
+
+// conj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float __x) {return __x;}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double __x) {return __x;}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double __x) {return __x;}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_conj(float _Complex __x) {return conjf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_conj(double _Complex __x) {return conj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_conj(long double _Complex __x) {return conjl(__x);}
+
+#undef conj
+#define conj(__x) __tg_conj(__tg_promote1((__x))(__x))
+
+// cproj
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double __x) {return cprojl(__x);}
+
+static float _Complex
+ _TG_ATTRS
+ __tg_cproj(float _Complex __x) {return cprojf(__x);}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_cproj(double _Complex __x) {return cproj(__x);}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_cproj(long double _Complex __x) {return cprojl(__x);}
+
+#undef cproj
+#define cproj(__x) __tg_cproj(__tg_promote1((__x))(__x))
+
+// creal
+
+static float _Complex
+ _TG_ATTRS
+ __tg_creal(float __x) {return __x;}
+
+static double _Complex
+ _TG_ATTRS
+ __tg_creal(double __x) {return __x;}
+
+static long double _Complex
+ _TG_ATTRS
+ __tg_creal(long double __x) {return __x;}
+
+static float
+ _TG_ATTRS
+ __tg_creal(float _Complex __x) {return crealf(__x);}
+
+static double
+ _TG_ATTRS
+ __tg_creal(double _Complex __x) {return creal(__x);}
+
+static long double
+ _TG_ATTRS
+ __tg_creal(long double _Complex __x) {return creall(__x);}
+
+#undef creal
+#define creal(__x) __tg_creal(__tg_promote1((__x))(__x))
+
+#undef _TG_ATTRSp
+#undef _TG_ATTRS
+
+#endif /* __cplusplus */
+#endif /* __TGMATH_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
new file mode 100644
index 0000000..a62c6cc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/tmmintrin.h
@@ -0,0 +1,225 @@
+/*===---- tmmintrin.h - SSSE3 intrinsics -----------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __TMMINTRIN_H
+#define __TMMINTRIN_H
+
+#ifndef __SSSE3__
+#error "SSSE3 instruction set not enabled"
+#else
+
+#include <pmmintrin.h>
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_abs_pi8(__m64 a)
+{
+ return (__m64)__builtin_ia32_pabsb((__v8qi)a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_abs_epi8(__m128i a)
+{
+ return (__m128i)__builtin_ia32_pabsb128((__v16qi)a);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_abs_pi16(__m64 a)
+{
+ return (__m64)__builtin_ia32_pabsw((__v4hi)a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_abs_epi16(__m128i a)
+{
+ return (__m128i)__builtin_ia32_pabsw128((__v8hi)a);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_abs_pi32(__m64 a)
+{
+ return (__m64)__builtin_ia32_pabsd((__v2si)a);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_abs_epi32(__m128i a)
+{
+ return (__m128i)__builtin_ia32_pabsd128((__v4si)a);
+}
+
+#define _mm_alignr_epi8(a, b, n) __extension__ ({ \
+ __m128i __a = (a); \
+ __m128i __b = (b); \
+ (__m128i)__builtin_ia32_palignr128((__v16qi)__a, (__v16qi)__b, (n)); })
+
+#define _mm_alignr_pi8(a, b, n) __extension__ ({ \
+ __m64 __a = (a); \
+ __m64 __b = (b); \
+ (__m64)__builtin_ia32_palignr((__v8qi)__a, (__v8qi)__b, (n)); })
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phaddw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phaddd128((__v4si)a, (__v4si)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phaddw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hadd_pi32(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phaddd((__v2si)a, (__v2si)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hadds_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phaddsw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hadds_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phaddsw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phsubw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phsubd128((__v4si)a, (__v4si)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phsubw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hsub_pi32(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phsubd((__v2si)a, (__v2si)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_hsubs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_phsubsw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_hsubs_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_phsubsw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_maddubs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_maddubs_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmaddubsw((__v8qi)a, (__v8qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_mulhrs_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mulhrs_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmulhrsw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_shuffle_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_pshufb128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_shuffle_pi8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pshufb((__v8qi)a, (__v8qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sign_epi8(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psignb128((__v16qi)a, (__v16qi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sign_epi16(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psignw128((__v8hi)a, (__v8hi)b);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_sign_epi32(__m128i a, __m128i b)
+{
+ return (__m128i)__builtin_ia32_psignd128((__v4si)a, (__v4si)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sign_pi8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psignb((__v8qi)a, (__v8qi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sign_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psignw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sign_pi32(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psignd((__v2si)a, (__v2si)b);
+}
+
+#endif /* __SSSE3__ */
+
+#endif /* __TMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/unwind.h b/contrib/llvm/tools/clang/lib/Headers/unwind.h
new file mode 100644
index 0000000..a065920
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/unwind.h
@@ -0,0 +1,124 @@
+/*===---- unwind.h - Stack unwinding ----------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+/* See "Data Definitions for libgcc_s" in the Linux Standard Base.*/
+
+#if __has_include_next(<unwind.h>)
+/* Darwin and libunwind provide an unwind.h. If that's available, use
+ * it. libunwind wraps some of its definitions in #ifdef _GNU_SOURCE,
+ * so define that around the include.*/
+# ifndef _GNU_SOURCE
+# define _SHOULD_UNDEFINE_GNU_SOURCE
+# define _GNU_SOURCE
+# endif
+// libunwind's unwind.h reflects the current visibility. However, Mozilla
+// builds with -fvisibility=hidden and relies on gcc's unwind.h to reset the
+// visibility to default and export its contents. gcc also allows users to
+// override its override by #defining HIDE_EXPORTS (but note, this only obeys
+// the user's -fvisibility setting; it doesn't hide any exports on its own). We
+// imitate gcc's header here:
+# ifdef HIDE_EXPORTS
+# include_next <unwind.h>
+# else
+# pragma GCC visibility push(default)
+# include_next <unwind.h>
+# pragma GCC visibility pop
+# endif
+# ifdef _SHOULD_UNDEFINE_GNU_SOURCE
+# undef _GNU_SOURCE
+# undef _SHOULD_UNDEFINE_GNU_SOURCE
+# endif
+#else
+
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* It is a bit strange for a header to play with the visibility of the
+ symbols it declares, but this matches gcc's behavior and some programs
+ depend on it */
+#pragma GCC visibility push(default)
+
+struct _Unwind_Context;
+typedef enum {
+ _URC_NO_REASON = 0,
+ _URC_FOREIGN_EXCEPTION_CAUGHT = 1,
+
+ _URC_FATAL_PHASE2_ERROR = 2,
+ _URC_FATAL_PHASE1_ERROR = 3,
+ _URC_NORMAL_STOP = 4,
+
+ _URC_END_OF_STACK = 5,
+ _URC_HANDLER_FOUND = 6,
+ _URC_INSTALL_CONTEXT = 7,
+ _URC_CONTINUE_UNWIND = 8
+} _Unwind_Reason_Code;
+
+
+#ifdef __arm__
+
+typedef enum {
+ _UVRSC_CORE = 0, /* integer register */
+ _UVRSC_VFP = 1, /* vfp */
+ _UVRSC_WMMXD = 3, /* Intel WMMX data register */
+ _UVRSC_WMMXC = 4 /* Intel WMMX control register */
+} _Unwind_VRS_RegClass;
+
+typedef enum {
+ _UVRSD_UINT32 = 0,
+ _UVRSD_VFPX = 1,
+ _UVRSD_UINT64 = 3,
+ _UVRSD_FLOAT = 4,
+ _UVRSD_DOUBLE = 5
+} _Unwind_VRS_DataRepresentation;
+
+typedef enum {
+ _UVRSR_OK = 0,
+ _UVRSR_NOT_IMPLEMENTED = 1,
+ _UVRSR_FAILED = 2
+} _Unwind_VRS_Result;
+
+_Unwind_VRS_Result _Unwind_VRS_Get(_Unwind_Context *context,
+ _Unwind_VRS_RegClass regclass,
+ uint32_t regno,
+ _Unwind_VRS_DataRepresentation representation,
+ void *valuep);
+
+#else
+
+uintptr_t _Unwind_GetIP(struct _Unwind_Context* context);
+
+#endif
+
+typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context*, void*);
+_Unwind_Reason_Code _Unwind_Backtrace(_Unwind_Trace_Fn, void*);
+
+#pragma GCC visibility pop
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/varargs.h b/contrib/llvm/tools/clang/lib/Headers/varargs.h
new file mode 100644
index 0000000..b5477d0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/varargs.h
@@ -0,0 +1,26 @@
+/*===---- varargs.h - Variable argument handling -------------------------------------===
+*
+* Permission is hereby granted, free of charge, to any person obtaining a copy
+* of this software and associated documentation files (the "Software"), to deal
+* in the Software without restriction, including without limitation the rights
+* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+* copies of the Software, and to permit persons to whom the Software is
+* furnished to do so, subject to the following conditions:
+*
+* The above copyright notice and this permission notice shall be included in
+* all copies or substantial portions of the Software.
+*
+* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+* THE SOFTWARE.
+*
+*===-----------------------------------------------------------------------===
+*/
+#ifndef __VARARGS_H
+#define __VARARGS_H
+ #error "Please use <stdarg.h> instead of <varargs.h>"
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
new file mode 100644
index 0000000..8f58850
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/wmmintrin.h
@@ -0,0 +1,67 @@
+/*===---- wmmintrin.h - AES intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _WMMINTRIN_H
+#define _WMMINTRIN_H
+
+#if !defined (__AES__)
+# error "AES instructions not enabled"
+#else
+
+#include <xmmintrin.h>
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesenc_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenc128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesenclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenclast128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesdec_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdec128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesdeclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdeclast128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesimc_si128(__m128i __V)
+{
+ return (__m128i)__builtin_ia32_aesimc128(__V);
+}
+
+#define _mm_aeskeygenassist_si128(C, R) \
+ __builtin_ia32_aeskeygenassist128((C), (R))
+
+#endif /* __AES__ */
+#endif /* _WMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/x86intrin.h b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
new file mode 100644
index 0000000..f5e4d88
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/x86intrin.h
@@ -0,0 +1,55 @@
+/*===---- x86intrin.h - X86 intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __X86INTRIN_H
+#define __X86INTRIN_H
+
+#include <immintrin.h>
+
+#ifdef __3dNOW__
+#include <mm3dnow.h>
+#endif
+
+#ifdef __BMI__
+#include <bmiintrin.h>
+#endif
+
+#ifdef __BMI2__
+#include <bmi2intrin.h>
+#endif
+
+#ifdef __LZCNT__
+#include <lzcntintrin.h>
+#endif
+
+#ifdef __POPCNT__
+#include <popcntintrin.h>
+#endif
+
+#ifdef __FMA4__
+#include <fma4intrin.h>
+#endif
+
+// FIXME: SSE4A, XOP, LWP, ABM
+
+#endif /* __X86INTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
new file mode 100644
index 0000000..e616157
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Headers/xmmintrin.h
@@ -0,0 +1,990 @@
+/*===---- xmmintrin.h - SSE intrinsics -------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef __XMMINTRIN_H
+#define __XMMINTRIN_H
+
+#ifndef __SSE__
+#error "SSE instruction set not enabled"
+#else
+
+#include <mmintrin.h>
+
+typedef int __v4si __attribute__((__vector_size__(16)));
+typedef float __v4sf __attribute__((__vector_size__(16)));
+typedef float __m128 __attribute__((__vector_size__(16)));
+
+// This header should only be included in a hosted environment as it depends on
+// a standard library to provide allocation routines.
+#if __STDC_HOSTED__
+#include <mm_malloc.h>
+#endif
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_add_ss(__m128 a, __m128 b)
+{
+ a[0] += b[0];
+ return a;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_add_ps(__m128 a, __m128 b)
+{
+ return a + b;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_ss(__m128 a, __m128 b)
+{
+ a[0] -= b[0];
+ return a;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_sub_ps(__m128 a, __m128 b)
+{
+ return a - b;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_mul_ss(__m128 a, __m128 b)
+{
+ a[0] *= b[0];
+ return a;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_mul_ps(__m128 a, __m128 b)
+{
+ return a * b;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_div_ss(__m128 a, __m128 b)
+{
+ a[0] /= b[0];
+ return a;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_div_ps(__m128 a, __m128 b)
+{
+ return a / b;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_sqrt_ss(__m128 a)
+{
+ return __builtin_ia32_sqrtss(a);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_sqrt_ps(__m128 a)
+{
+ return __builtin_ia32_sqrtps(a);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rcp_ss(__m128 a)
+{
+ return __builtin_ia32_rcpss(a);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rcp_ps(__m128 a)
+{
+ return __builtin_ia32_rcpps(a);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rsqrt_ss(__m128 a)
+{
+ return __builtin_ia32_rsqrtss(a);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_rsqrt_ps(__m128 a)
+{
+ return __builtin_ia32_rsqrtps(a);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_min_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_minss(a, b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_min_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_minps(a, b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_max_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_maxss(a, b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_max_ps(__m128 a, __m128 b)
+{
+ return __builtin_ia32_maxps(a, b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_and_ps(__m128 a, __m128 b)
+{
+ return (__m128)((__v4si)a & (__v4si)b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_andnot_ps(__m128 a, __m128 b)
+{
+ return (__m128)(~(__v4si)a & (__v4si)b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_or_ps(__m128 a, __m128 b)
+{
+ return (__m128)((__v4si)a | (__v4si)b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_xor_ps(__m128 a, __m128 b)
+{
+ return (__m128)((__v4si)a ^ (__v4si)b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 0);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpeq_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 0);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 1);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmplt_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 1);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmple_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 2);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmple_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 2);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(b, a, 1);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpgt_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(b, a, 1);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpge_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(b, a, 2);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpge_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(b, a, 2);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpneq_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 4);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpneq_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 4);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnlt_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 5);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnlt_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 5);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnle_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 6);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnle_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 6);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpngt_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(b, a, 5);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpngt_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(b, a, 5);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnge_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(b, a, 6);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpnge_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(b, a, 6);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpord_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 7);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpord_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 7);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpunord_ss(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpss(a, b, 3);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cmpunord_ps(__m128 a, __m128 b)
+{
+ return (__m128)__builtin_ia32_cmpps(a, b, 3);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comieq_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comieq(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comilt_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comilt(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comile_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comile(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comigt_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comigt(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comige_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comige(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_comineq_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_comineq(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomieq_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomieq(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomilt_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomilt(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomile_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomile(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomigt_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomigt(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomige_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomige(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_ucomineq_ss(__m128 a, __m128 b)
+{
+ return __builtin_ia32_ucomineq(a, b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtss_si32(__m128 a)
+{
+ return __builtin_ia32_cvtss2si(a);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_cvt_ss2si(__m128 a)
+{
+ return _mm_cvtss_si32(a);
+}
+
+#ifdef __x86_64__
+
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvtss_si64(__m128 a)
+{
+ return __builtin_ia32_cvtss2si64(a);
+}
+
+#endif
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_pi32(__m128 a)
+{
+ return (__m64)__builtin_ia32_cvtps2pi(a);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvt_ps2pi(__m128 a)
+{
+ return _mm_cvtps_pi32(a);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_cvttss_si32(__m128 a)
+{
+ return a[0];
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtt_ss2si(__m128 a)
+{
+ return _mm_cvttss_si32(a);
+}
+
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
+_mm_cvttss_si64(__m128 a)
+{
+ return a[0];
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvttps_pi32(__m128 a)
+{
+ return (__m64)__builtin_ia32_cvttps2pi(a);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtt_ps2pi(__m128 a)
+{
+ return _mm_cvttps_pi32(a);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi32_ss(__m128 a, int b)
+{
+ a[0] = b;
+ return a;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvt_si2ss(__m128 a, int b)
+{
+ return _mm_cvtsi32_ss(a, b);
+}
+
+#ifdef __x86_64__
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtsi64_ss(__m128 a, long long b)
+{
+ a[0] = b;
+ return a;
+}
+
+#endif
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi32_ps(__m128 a, __m64 b)
+{
+ return __builtin_ia32_cvtpi2ps(a, (__v2si)b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvt_pi2ps(__m128 a, __m64 b)
+{
+ return _mm_cvtpi32_ps(a, b);
+}
+
+static __inline__ float __attribute__((__always_inline__, __nodebug__))
+_mm_cvtss_f32(__m128 a)
+{
+ return a[0];
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_loadh_pi(__m128 a, const __m64 *p)
+{
+ typedef float __mm_loadh_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_loadh_pi_struct {
+ __mm_loadh_pi_v2f32 u;
+ } __attribute__((__packed__, __may_alias__));
+ __mm_loadh_pi_v2f32 b = ((struct __mm_loadh_pi_struct*)p)->u;
+ __m128 bb = __builtin_shufflevector(b, b, 0, 1, 0, 1);
+ return __builtin_shufflevector(a, bb, 0, 1, 4, 5);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_loadl_pi(__m128 a, const __m64 *p)
+{
+ typedef float __mm_loadl_pi_v2f32 __attribute__((__vector_size__(8)));
+ struct __mm_loadl_pi_struct {
+ __mm_loadl_pi_v2f32 u;
+ } __attribute__((__packed__, __may_alias__));
+ __mm_loadl_pi_v2f32 b = ((struct __mm_loadl_pi_struct*)p)->u;
+ __m128 bb = __builtin_shufflevector(b, b, 0, 1, 0, 1);
+ return __builtin_shufflevector(a, bb, 4, 5, 2, 3);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_load_ss(const float *p)
+{
+ struct __mm_load_ss_struct {
+ float u;
+ } __attribute__((__packed__, __may_alias__));
+ float u = ((struct __mm_load_ss_struct*)p)->u;
+ return (__m128){ u, 0, 0, 0 };
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_load1_ps(const float *p)
+{
+ struct __mm_load1_ps_struct {
+ float u;
+ } __attribute__((__packed__, __may_alias__));
+ float u = ((struct __mm_load1_ps_struct*)p)->u;
+ return (__m128){ u, u, u, u };
+}
+
+#define _mm_load_ps1(p) _mm_load1_ps(p)
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_load_ps(const float *p)
+{
+ return *(__m128*)p;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_loadu_ps(const float *p)
+{
+ struct __loadu_ps {
+ __m128 v;
+ } __attribute__((__packed__, __may_alias__));
+ return ((struct __loadu_ps*)p)->v;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_loadr_ps(const float *p)
+{
+ __m128 a = _mm_load_ps(p);
+ return __builtin_shufflevector(a, a, 3, 2, 1, 0);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_set_ss(float w)
+{
+ return (__m128){ w, 0, 0, 0 };
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_set1_ps(float w)
+{
+ return (__m128){ w, w, w, w };
+}
+
+// Microsoft specific.
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_set_ps1(float w)
+{
+ return _mm_set1_ps(w);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_set_ps(float z, float y, float x, float w)
+{
+ return (__m128){ w, x, y, z };
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_setr_ps(float z, float y, float x, float w)
+{
+ return (__m128){ z, y, x, w };
+}
+
+static __inline__ __m128 __attribute__((__always_inline__))
+_mm_setzero_ps(void)
+{
+ return (__m128){ 0, 0, 0, 0 };
+}
+
+static __inline__ void __attribute__((__always_inline__))
+_mm_storeh_pi(__m64 *p, __m128 a)
+{
+ __builtin_ia32_storehps((__v2si *)p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__))
+_mm_storel_pi(__m64 *p, __m128 a)
+{
+ __builtin_ia32_storelps((__v2si *)p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__))
+_mm_store_ss(float *p, __m128 a)
+{
+ struct __mm_store_ss_struct {
+ float u;
+ } __attribute__((__packed__, __may_alias__));
+ ((struct __mm_store_ss_struct*)p)->u = a[0];
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_storeu_ps(float *p, __m128 a)
+{
+ __builtin_ia32_storeups(p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_store1_ps(float *p, __m128 a)
+{
+ a = __builtin_shufflevector(a, a, 0, 0, 0, 0);
+ _mm_storeu_ps(p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_store_ps1(float *p, __m128 a)
+{
+ return _mm_store1_ps(p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_store_ps(float *p, __m128 a)
+{
+ *(__m128 *)p = a;
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_storer_ps(float *p, __m128 a)
+{
+ a = __builtin_shufflevector(a, a, 3, 2, 1, 0);
+ _mm_store_ps(p, a);
+}
+
+#define _MM_HINT_T0 3
+#define _MM_HINT_T1 2
+#define _MM_HINT_T2 1
+#define _MM_HINT_NTA 0
+
+/* FIXME: We have to #define this because "sel" must be a constant integer, and
+ Sema doesn't do any form of constant propagation yet. */
+
+#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)(a), 0, (sel)))
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_pi(__m64 *p, __m64 a)
+{
+ __builtin_ia32_movntq(p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_stream_ps(float *p, __m128 a)
+{
+ __builtin_ia32_movntps(p, a);
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_sfence(void)
+{
+ __builtin_ia32_sfence();
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_extract_pi16(__m64 a, int n)
+{
+ __v4hi b = (__v4hi)a;
+ return (unsigned short)b[n & 3];
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_insert_pi16(__m64 a, int d, int n)
+{
+ __v4hi b = (__v4hi)a;
+ b[n & 3] = d;
+ return (__m64)b;
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_max_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmaxsw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_max_pu8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmaxub((__v8qi)a, (__v8qi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_min_pi16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pminsw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_min_pu8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pminub((__v8qi)a, (__v8qi)b);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_movemask_pi8(__m64 a)
+{
+ return __builtin_ia32_pmovmskb((__v8qi)a);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_mulhi_pu16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pmulhuw((__v4hi)a, (__v4hi)b);
+}
+
+#define _mm_shuffle_pi16(a, n) __extension__ ({ \
+ __m64 __a = (a); \
+ (__m64)__builtin_ia32_pshufw((__v4hi)__a, (n)); })
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_maskmove_si64(__m64 d, __m64 n, char *p)
+{
+ __builtin_ia32_maskmovq((__v8qi)d, (__v8qi)n, p);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_avg_pu8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pavgb((__v8qi)a, (__v8qi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_avg_pu16(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_pavgw((__v4hi)a, (__v4hi)b);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_sad_pu8(__m64 a, __m64 b)
+{
+ return (__m64)__builtin_ia32_psadbw((__v8qi)a, (__v8qi)b);
+}
+
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
+_mm_getcsr(void)
+{
+ return __builtin_ia32_stmxcsr();
+}
+
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_setcsr(unsigned int i)
+{
+ __builtin_ia32_ldmxcsr(i);
+}
+
+#define _mm_shuffle_ps(a, b, mask) __extension__ ({ \
+ __m128 __a = (a); \
+ __m128 __b = (b); \
+ (__m128)__builtin_shufflevector((__v4sf)__a, (__v4sf)__b, \
+ (mask) & 0x3, ((mask) & 0xc) >> 2, \
+ (((mask) & 0x30) >> 4) + 4, \
+ (((mask) & 0xc0) >> 6) + 4); })
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_unpackhi_ps(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 2, 6, 3, 7);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_unpacklo_ps(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 0, 4, 1, 5);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_move_ss(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 4, 1, 2, 3);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_movehl_ps(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 6, 7, 2, 3);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_movelh_ps(__m128 a, __m128 b)
+{
+ return __builtin_shufflevector(a, b, 0, 1, 4, 5);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi16_ps(__m64 a)
+{
+ __m64 b, c;
+ __m128 r;
+
+ b = _mm_setzero_si64();
+ b = _mm_cmpgt_pi16(b, a);
+ c = _mm_unpackhi_pi16(a, b);
+ r = _mm_setzero_ps();
+ r = _mm_cvtpi32_ps(r, c);
+ r = _mm_movelh_ps(r, r);
+ c = _mm_unpacklo_pi16(a, b);
+ r = _mm_cvtpi32_ps(r, c);
+
+ return r;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpu16_ps(__m64 a)
+{
+ __m64 b, c;
+ __m128 r;
+
+ b = _mm_setzero_si64();
+ c = _mm_unpackhi_pi16(a, b);
+ r = _mm_setzero_ps();
+ r = _mm_cvtpi32_ps(r, c);
+ r = _mm_movelh_ps(r, r);
+ c = _mm_unpacklo_pi16(a, b);
+ r = _mm_cvtpi32_ps(r, c);
+
+ return r;
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi8_ps(__m64 a)
+{
+ __m64 b;
+
+ b = _mm_setzero_si64();
+ b = _mm_cmpgt_pi8(b, a);
+ b = _mm_unpacklo_pi8(a, b);
+
+ return _mm_cvtpi16_ps(b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpu8_ps(__m64 a)
+{
+ __m64 b;
+
+ b = _mm_setzero_si64();
+ b = _mm_unpacklo_pi8(a, b);
+
+ return _mm_cvtpi16_ps(b);
+}
+
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtpi32x2_ps(__m64 a, __m64 b)
+{
+ __m128 c;
+
+ c = _mm_setzero_ps();
+ c = _mm_cvtpi32_ps(c, b);
+ c = _mm_movelh_ps(c, c);
+
+ return _mm_cvtpi32_ps(c, a);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_pi16(__m128 a)
+{
+ __m64 b, c;
+
+ b = _mm_cvtps_pi32(a);
+ a = _mm_movehl_ps(a, a);
+ c = _mm_cvtps_pi32(a);
+
+ return _mm_packs_pi16(b, c);
+}
+
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
+_mm_cvtps_pi8(__m128 a)
+{
+ __m64 b, c;
+
+ b = _mm_cvtps_pi16(a);
+ c = _mm_setzero_si64();
+
+ return _mm_packs_pi16(b, c);
+}
+
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
+_mm_movemask_ps(__m128 a)
+{
+ return __builtin_ia32_movmskps(a);
+}
+
+#define _MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
+
+#define _MM_EXCEPT_INVALID (0x0001)
+#define _MM_EXCEPT_DENORM (0x0002)
+#define _MM_EXCEPT_DIV_ZERO (0x0004)
+#define _MM_EXCEPT_OVERFLOW (0x0008)
+#define _MM_EXCEPT_UNDERFLOW (0x0010)
+#define _MM_EXCEPT_INEXACT (0x0020)
+#define _MM_EXCEPT_MASK (0x003f)
+
+#define _MM_MASK_INVALID (0x0080)
+#define _MM_MASK_DENORM (0x0100)
+#define _MM_MASK_DIV_ZERO (0x0200)
+#define _MM_MASK_OVERFLOW (0x0400)
+#define _MM_MASK_UNDERFLOW (0x0800)
+#define _MM_MASK_INEXACT (0x1000)
+#define _MM_MASK_MASK (0x1f80)
+
+#define _MM_ROUND_NEAREST (0x0000)
+#define _MM_ROUND_DOWN (0x2000)
+#define _MM_ROUND_UP (0x4000)
+#define _MM_ROUND_TOWARD_ZERO (0x6000)
+#define _MM_ROUND_MASK (0x6000)
+
+#define _MM_FLUSH_ZERO_MASK (0x8000)
+#define _MM_FLUSH_ZERO_ON (0x8000)
+#define _MM_FLUSH_ZERO_OFF (0x0000)
+
+#define _MM_GET_EXCEPTION_MASK() (_mm_getcsr() & _MM_MASK_MASK)
+#define _MM_GET_EXCEPTION_STATE() (_mm_getcsr() & _MM_EXCEPT_MASK)
+#define _MM_GET_FLUSH_ZERO_MODE() (_mm_getcsr() & _MM_FLUSH_ZERO_MASK)
+#define _MM_GET_ROUNDING_MODE() (_mm_getcsr() & _MM_ROUND_MASK)
+
+#define _MM_SET_EXCEPTION_MASK(x) (_mm_setcsr((_mm_getcsr() & ~_MM_MASK_MASK) | (x)))
+#define _MM_SET_EXCEPTION_STATE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_EXCEPT_MASK) | (x)))
+#define _MM_SET_FLUSH_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_FLUSH_ZERO_MASK) | (x)))
+#define _MM_SET_ROUNDING_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_ROUND_MASK) | (x)))
+
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+do { \
+ __m128 tmp3, tmp2, tmp1, tmp0; \
+ tmp0 = _mm_unpacklo_ps((row0), (row1)); \
+ tmp2 = _mm_unpacklo_ps((row2), (row3)); \
+ tmp1 = _mm_unpackhi_ps((row0), (row1)); \
+ tmp3 = _mm_unpackhi_ps((row2), (row3)); \
+ (row0) = _mm_movelh_ps(tmp0, tmp2); \
+ (row1) = _mm_movehl_ps(tmp2, tmp0); \
+ (row2) = _mm_movelh_ps(tmp1, tmp3); \
+ (row3) = _mm_movehl_ps(tmp3, tmp1); \
+} while (0)
+
+/* Aliases for compatibility. */
+#define _m_pextrw _mm_extract_pi16
+#define _m_pinsrw _mm_insert_pi16
+#define _m_pmaxsw _mm_max_pi16
+#define _m_pmaxub _mm_max_pu8
+#define _m_pminsw _mm_min_pi16
+#define _m_pminub _mm_min_pu8
+#define _m_pmovmskb _mm_movemask_pi8
+#define _m_pmulhuw _mm_mulhi_pu16
+#define _m_pshufw _mm_shuffle_pi16
+#define _m_maskmovq _mm_maskmove_si64
+#define _m_pavgb _mm_avg_pu8
+#define _m_pavgw _mm_avg_pu16
+#define _m_psadbw _mm_sad_pu8
+#define _m_ _mm_
+#define _m_ _mm_
+
+/* Ugly hack for backwards-compatibility (compatible with gcc) */
+#ifdef __SSE2__
+#include <emmintrin.h>
+#endif
+
+#endif /* __SSE__ */
+
+#endif /* __XMMINTRIN_H */
diff --git a/contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp b/contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp
new file mode 100644
index 0000000..fce6099
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/ASTLocation.cpp
@@ -0,0 +1,114 @@
+//===--- ASTLocation.cpp - A <Decl, Stmt> pair ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// ASTLocation is Decl or a Stmt and its immediate Decl parent.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/ASTLocation.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+using namespace clang;
+using namespace idx;
+
+static Decl *getDeclFromExpr(Stmt *E) {
+ if (DeclRefExpr *RefExpr = dyn_cast<DeclRefExpr>(E))
+ return RefExpr->getDecl();
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ return ME->getMemberDecl();
+ if (ObjCIvarRefExpr *RE = dyn_cast<ObjCIvarRefExpr>(E))
+ return RE->getDecl();
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(E))
+ return getDeclFromExpr(CE->getCallee());
+ if (CastExpr *CE = dyn_cast<CastExpr>(E))
+ return getDeclFromExpr(CE->getSubExpr());
+
+ return 0;
+}
+
+Decl *ASTLocation::getReferencedDecl() {
+ if (isInvalid())
+ return 0;
+
+ switch (getKind()) {
+ case N_Type:
+ return 0;
+ case N_Decl:
+ return D;
+ case N_NamedRef:
+ return NDRef.ND;
+ case N_Stmt:
+ return getDeclFromExpr(Stm);
+ }
+
+ llvm_unreachable("Invalid ASTLocation Kind!");
+}
+
+SourceRange ASTLocation::getSourceRange() const {
+ if (isInvalid())
+ return SourceRange();
+
+ switch (getKind()) {
+ case N_Decl:
+ return D->getSourceRange();
+ case N_Stmt:
+ return Stm->getSourceRange();
+ case N_NamedRef:
+ return SourceRange(AsNamedRef().Loc, AsNamedRef().Loc);
+ case N_Type:
+ return AsTypeLoc().getLocalSourceRange();
+ }
+
+ llvm_unreachable("Invalid ASTLocation Kind!");
+}
+
+void ASTLocation::print(raw_ostream &OS) const {
+ if (isInvalid()) {
+ OS << "<< Invalid ASTLocation >>\n";
+ return;
+ }
+
+ ASTContext &Ctx = getParentDecl()->getASTContext();
+
+ switch (getKind()) {
+ case N_Decl:
+ OS << "[Decl: " << AsDecl()->getDeclKindName() << " ";
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(AsDecl()))
+ OS << *ND;
+ break;
+
+ case N_Stmt:
+ OS << "[Stmt: " << AsStmt()->getStmtClassName() << " ";
+ AsStmt()->printPretty(OS, Ctx, 0, PrintingPolicy(Ctx.getLangOpts()));
+ break;
+
+ case N_NamedRef:
+ OS << "[NamedRef: " << AsNamedRef().ND->getDeclKindName() << " ";
+ OS << *AsNamedRef().ND;
+ break;
+
+ case N_Type: {
+ QualType T = AsTypeLoc().getType();
+ OS << "[Type: " << T->getTypeClassName() << " " << T.getAsString();
+ }
+ }
+
+ OS << "] <";
+
+ SourceRange Range = getSourceRange();
+ SourceManager &SourceMgr = Ctx.getSourceManager();
+ Range.getBegin().print(OS, SourceMgr);
+ OS << ", ";
+ Range.getEnd().print(OS, SourceMgr);
+ OS << ">\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/ASTVisitor.h b/contrib/llvm/tools/clang/lib/Index/ASTVisitor.h
new file mode 100644
index 0000000..0b8425b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/ASTVisitor.h
@@ -0,0 +1,143 @@
+//===--- ASTVisitor.h - Visitor for an ASTContext ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTVisitor interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_ASTVISITOR_H
+#define LLVM_CLANG_INDEX_ASTVISITOR_H
+
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLocVisitor.h"
+
+namespace clang {
+
+namespace idx {
+
+/// \brief Traverses the full AST, both Decls and Stmts.
+template<typename ImplClass>
+class ASTVisitor : public DeclVisitor<ImplClass>,
+ public StmtVisitor<ImplClass>,
+ public TypeLocVisitor<ImplClass> {
+public:
+ ASTVisitor() : CurrentDecl(0) { }
+
+ Decl *CurrentDecl;
+
+ typedef ASTVisitor<ImplClass> Base;
+ typedef DeclVisitor<ImplClass> BaseDeclVisitor;
+ typedef StmtVisitor<ImplClass> BaseStmtVisitor;
+ typedef TypeLocVisitor<ImplClass> BaseTypeLocVisitor;
+
+ using BaseStmtVisitor::Visit;
+
+ //===--------------------------------------------------------------------===//
+ // DeclVisitor
+ //===--------------------------------------------------------------------===//
+
+ void Visit(Decl *D) {
+ Decl *PrevDecl = CurrentDecl;
+ CurrentDecl = D;
+ BaseDeclVisitor::Visit(D);
+ CurrentDecl = PrevDecl;
+ }
+
+ void VisitDeclaratorDecl(DeclaratorDecl *D) {
+ BaseDeclVisitor::VisitDeclaratorDecl(D);
+ if (TypeSourceInfo *TInfo = D->getTypeSourceInfo())
+ Visit(TInfo->getTypeLoc());
+ }
+
+ void VisitFunctionDecl(FunctionDecl *D) {
+ BaseDeclVisitor::VisitFunctionDecl(D);
+ if (D->isThisDeclarationADefinition())
+ Visit(D->getBody());
+ }
+
+ void VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ BaseDeclVisitor::VisitObjCMethodDecl(D);
+ if (D->getBody())
+ Visit(D->getBody());
+ }
+
+ void VisitBlockDecl(BlockDecl *D) {
+ BaseDeclVisitor::VisitBlockDecl(D);
+ Visit(D->getBody());
+ }
+
+ void VisitVarDecl(VarDecl *D) {
+ BaseDeclVisitor::VisitVarDecl(D);
+ if (Expr *Init = D->getInit())
+ Visit(Init);
+ }
+
+ void VisitDecl(Decl *D) {
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D) || isa<BlockDecl>(D))
+ return;
+
+ if (DeclContext *DC = dyn_cast<DeclContext>(D))
+ static_cast<ImplClass*>(this)->VisitDeclContext(DC);
+ }
+
+ void VisitDeclContext(DeclContext *DC) {
+ for (DeclContext::decl_iterator
+ I = DC->decls_begin(), E = DC->decls_end(); I != E; ++I)
+ Visit(*I);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // StmtVisitor
+ //===--------------------------------------------------------------------===//
+
+ void VisitDeclStmt(DeclStmt *Node) {
+ for (DeclStmt::decl_iterator
+ I = Node->decl_begin(), E = Node->decl_end(); I != E; ++I)
+ Visit(*I);
+ }
+
+ void VisitBlockExpr(BlockExpr *Node) {
+ // The BlockDecl is also visited by 'VisitDeclContext()'. No need to visit it twice.
+ }
+
+ void VisitStmt(Stmt *Node) {
+ for (Stmt::child_range I = Node->children(); I; ++I)
+ if (*I)
+ Visit(*I);
+ }
+
+ //===--------------------------------------------------------------------===//
+ // TypeLocVisitor
+ //===--------------------------------------------------------------------===//
+
+ void Visit(TypeLoc TL) {
+ for (; TL; TL = TL.getNextTypeLoc())
+ BaseTypeLocVisitor::Visit(TL);
+ }
+
+ void VisitArrayLoc(ArrayTypeLoc TL) {
+ BaseTypeLocVisitor::VisitArrayTypeLoc(TL);
+ if (TL.getSizeExpr())
+ Visit(TL.getSizeExpr());
+ }
+
+ void VisitFunctionTypeLoc(FunctionTypeLoc TL) {
+ BaseTypeLocVisitor::VisitFunctionTypeLoc(TL);
+ for (unsigned i = 0; i != TL.getNumArgs(); ++i)
+ Visit(TL.getArg(i));
+ }
+
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Index/Analyzer.cpp b/contrib/llvm/tools/clang/lib/Index/Analyzer.cpp
new file mode 100644
index 0000000..f77e6ef
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/Analyzer.cpp
@@ -0,0 +1,470 @@
+//===--- Analyzer.cpp - Analysis for indexing information -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Analyzer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/Analyzer.h"
+#include "clang/Index/Entity.h"
+#include "clang/Index/TranslationUnit.h"
+#include "clang/Index/Handlers.h"
+#include "clang/Index/ASTLocation.h"
+#include "clang/Index/GlobalSelector.h"
+#include "clang/Index/DeclReferenceMap.h"
+#include "clang/Index/SelectorMap.h"
+#include "clang/Index/IndexProvider.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprObjC.h"
+#include "llvm/ADT/SmallSet.h"
+using namespace clang;
+using namespace idx;
+
+namespace {
+
+//===----------------------------------------------------------------------===//
+// DeclEntityAnalyzer Implementation
+//===----------------------------------------------------------------------===//
+
+class DeclEntityAnalyzer : public TranslationUnitHandler {
+ Entity Ent;
+ TULocationHandler &TULocHandler;
+
+public:
+ DeclEntityAnalyzer(Entity ent, TULocationHandler &handler)
+ : Ent(ent), TULocHandler(handler) { }
+
+ virtual void Handle(TranslationUnit *TU) {
+ assert(TU && "Passed null translation unit");
+
+ Decl *D = Ent.getDecl(TU->getASTContext());
+ assert(D && "Couldn't resolve Entity");
+
+ for (Decl::redecl_iterator I = D->redecls_begin(),
+ E = D->redecls_end(); I != E; ++I)
+ TULocHandler.Handle(TULocation(TU, ASTLocation(*I)));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// RefEntityAnalyzer Implementation
+//===----------------------------------------------------------------------===//
+
+class RefEntityAnalyzer : public TranslationUnitHandler {
+ Entity Ent;
+ TULocationHandler &TULocHandler;
+
+public:
+ RefEntityAnalyzer(Entity ent, TULocationHandler &handler)
+ : Ent(ent), TULocHandler(handler) { }
+
+ virtual void Handle(TranslationUnit *TU) {
+ assert(TU && "Passed null translation unit");
+
+ Decl *D = Ent.getDecl(TU->getASTContext());
+ assert(D && "Couldn't resolve Entity");
+ NamedDecl *ND = dyn_cast<NamedDecl>(D);
+ if (!ND)
+ return;
+
+ DeclReferenceMap &RefMap = TU->getDeclReferenceMap();
+ for (DeclReferenceMap::astlocation_iterator
+ I = RefMap.refs_begin(ND), E = RefMap.refs_end(ND); I != E; ++I)
+ TULocHandler.Handle(TULocation(TU, *I));
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// RefSelectorAnalyzer Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Accepts an ObjC method and finds all message expressions that this
+/// method may respond to.
+class RefSelectorAnalyzer : public TranslationUnitHandler {
+ Program &Prog;
+ TULocationHandler &TULocHandler;
+
+ // The original ObjCInterface associated with the method.
+ Entity IFaceEnt;
+ GlobalSelector GlobSel;
+ bool IsInstanceMethod;
+
+ /// \brief Super classes of the ObjCInterface.
+ typedef llvm::SmallSet<Entity, 16> EntitiesSetTy;
+ EntitiesSetTy HierarchyEntities;
+
+public:
+ RefSelectorAnalyzer(ObjCMethodDecl *MD,
+ Program &prog, TULocationHandler &handler)
+ : Prog(prog), TULocHandler(handler) {
+ assert(MD);
+
+ // FIXME: Protocol methods.
+ assert(!isa<ObjCProtocolDecl>(MD->getDeclContext()) &&
+ "Protocol methods not supported yet");
+
+ ObjCInterfaceDecl *IFD = MD->getClassInterface();
+ assert(IFD);
+ IFaceEnt = Entity::get(IFD, Prog);
+ GlobSel = GlobalSelector::get(MD->getSelector(), Prog);
+ IsInstanceMethod = MD->isInstanceMethod();
+
+ for (ObjCInterfaceDecl *Cls = IFD->getSuperClass();
+ Cls; Cls = Cls->getSuperClass())
+ HierarchyEntities.insert(Entity::get(Cls, Prog));
+ }
+
+ virtual void Handle(TranslationUnit *TU) {
+ assert(TU && "Passed null translation unit");
+
+ ASTContext &Ctx = TU->getASTContext();
+ // Null means it doesn't exist in this translation unit.
+ ObjCInterfaceDecl *IFace =
+ cast_or_null<ObjCInterfaceDecl>(IFaceEnt.getDecl(Ctx));
+ Selector Sel = GlobSel.getSelector(Ctx);
+
+ SelectorMap &SelMap = TU->getSelectorMap();
+ for (SelectorMap::astlocation_iterator
+ I = SelMap.refs_begin(Sel), E = SelMap.refs_end(Sel); I != E; ++I) {
+ if (ValidReference(*I, IFace))
+ TULocHandler.Handle(TULocation(TU, *I));
+ }
+ }
+
+ /// \brief Determines whether the given message expression is likely to end
+ /// up at the given interface decl.
+ ///
+ /// It returns true "eagerly", meaning it will return false only if it can
+ /// "prove" statically that the interface cannot accept this message.
+ bool ValidReference(ASTLocation ASTLoc, ObjCInterfaceDecl *IFace) {
+ assert(ASTLoc.isStmt());
+
+ // FIXME: Finding @selector references should be through another Analyzer
+ // method, like FindSelectors.
+ if (isa<ObjCSelectorExpr>(ASTLoc.AsStmt()))
+ return false;
+
+ ObjCInterfaceDecl *MsgD = 0;
+ ObjCMessageExpr *Msg = cast<ObjCMessageExpr>(ASTLoc.AsStmt());
+
+ switch (Msg->getReceiverKind()) {
+ case ObjCMessageExpr::Instance: {
+ const ObjCObjectPointerType *OPT =
+ Msg->getInstanceReceiver()->getType()->getAsObjCInterfacePointerType();
+
+ // Can be anything! Accept it as a possibility..
+ if (!OPT || OPT->isObjCIdType() || OPT->isObjCQualifiedIdType())
+ return true;
+
+ // Expecting class method.
+ if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType())
+ return !IsInstanceMethod;
+
+ MsgD = OPT->getInterfaceDecl();
+ assert(MsgD);
+
+ // Should be an instance method.
+ if (!IsInstanceMethod)
+ return false;
+ break;
+ }
+
+ case ObjCMessageExpr::Class: {
+ // Expecting class method.
+ if (IsInstanceMethod)
+ return false;
+
+ MsgD = Msg->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ break;
+ }
+
+ case ObjCMessageExpr::SuperClass:
+ // Expecting class method.
+ if (IsInstanceMethod)
+ return false;
+
+ MsgD = Msg->getSuperType()->getAs<ObjCObjectType>()->getInterface();
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ // Expecting instance method.
+ if (!IsInstanceMethod)
+ return false;
+
+ MsgD = Msg->getSuperType()->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl();
+ break;
+ }
+
+ assert(MsgD);
+
+ // Same interface ? We have a winner!
+ if (declaresSameEntity(MsgD, IFace))
+ return true;
+
+ // If the message interface is a superclass of the original interface,
+ // accept this message as a possibility.
+ if (HierarchyEntities.count(Entity::get(MsgD, Prog)))
+ return true;
+
+ // If the message interface is a subclass of the original interface, accept
+ // the message unless there is a subclass in the hierarchy that will
+ // "steal" the message (thus the message "will go" to the subclass and not
+ /// the original interface).
+ if (IFace) {
+ Selector Sel = Msg->getSelector();
+ for (ObjCInterfaceDecl *Cls = MsgD; Cls; Cls = Cls->getSuperClass()) {
+ if (declaresSameEntity(Cls, IFace))
+ return true;
+ if (Cls->getMethod(Sel, IsInstanceMethod))
+ return false;
+ }
+ }
+
+ // The interfaces are unrelated, don't accept the message.
+ return false;
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// MessageAnalyzer Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Accepts an ObjC message expression and finds all methods that may
+/// respond to it.
+class MessageAnalyzer : public TranslationUnitHandler {
+ Program &Prog;
+ TULocationHandler &TULocHandler;
+
+ // The ObjCInterface associated with the message. Can be null/invalid.
+ Entity MsgIFaceEnt;
+ GlobalSelector GlobSel;
+ bool CanBeInstanceMethod;
+ bool CanBeClassMethod;
+
+ /// \brief Super classes of the ObjCInterface.
+ typedef llvm::SmallSet<Entity, 16> EntitiesSetTy;
+ EntitiesSetTy HierarchyEntities;
+
+ /// \brief The interface in the message interface hierarchy that "intercepts"
+ /// the selector.
+ Entity ReceiverIFaceEnt;
+
+public:
+ MessageAnalyzer(ObjCMessageExpr *Msg,
+ Program &prog, TULocationHandler &handler)
+ : Prog(prog), TULocHandler(handler),
+ CanBeInstanceMethod(false),
+ CanBeClassMethod(false) {
+
+ assert(Msg);
+
+ ObjCInterfaceDecl *MsgD = 0;
+
+ while (true) {
+ switch (Msg->getReceiverKind()) {
+ case ObjCMessageExpr::Instance: {
+ const ObjCObjectPointerType *OPT =
+ Msg->getInstanceReceiver()->getType()
+ ->getAsObjCInterfacePointerType();
+
+ if (!OPT || OPT->isObjCIdType() || OPT->isObjCQualifiedIdType()) {
+ CanBeInstanceMethod = CanBeClassMethod = true;
+ break;
+ }
+
+ if (OPT->isObjCClassType() || OPT->isObjCQualifiedClassType()) {
+ CanBeClassMethod = true;
+ break;
+ }
+
+ MsgD = OPT->getInterfaceDecl();
+ assert(MsgD);
+ CanBeInstanceMethod = true;
+ break;
+ }
+
+ case ObjCMessageExpr::Class:
+ CanBeClassMethod = true;
+ MsgD = Msg->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ CanBeClassMethod = true;
+ MsgD = Msg->getSuperType()->getAs<ObjCObjectType>()->getInterface();
+ break;
+
+ case ObjCMessageExpr::SuperInstance:
+ CanBeInstanceMethod = true;
+ MsgD = Msg->getSuperType()->getAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl();
+ break;
+ }
+ }
+
+ assert(CanBeInstanceMethod || CanBeClassMethod);
+
+ Selector sel = Msg->getSelector();
+ assert(!sel.isNull());
+
+ MsgIFaceEnt = Entity::get(MsgD, Prog);
+ GlobSel = GlobalSelector::get(sel, Prog);
+
+ if (MsgD) {
+ for (ObjCInterfaceDecl *Cls = MsgD->getSuperClass();
+ Cls; Cls = Cls->getSuperClass())
+ HierarchyEntities.insert(Entity::get(Cls, Prog));
+
+ // Find the interface in the hierarchy that "receives" the message.
+ for (ObjCInterfaceDecl *Cls = MsgD; Cls; Cls = Cls->getSuperClass()) {
+ bool isReceiver = false;
+
+ ObjCInterfaceDecl::lookup_const_iterator Meth, MethEnd;
+ for (llvm::tie(Meth, MethEnd) = Cls->lookup(sel);
+ Meth != MethEnd; ++Meth) {
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(*Meth))
+ if ((MD->isInstanceMethod() && CanBeInstanceMethod) ||
+ (MD->isClassMethod() && CanBeClassMethod)) {
+ isReceiver = true;
+ break;
+ }
+ }
+
+ if (isReceiver) {
+ ReceiverIFaceEnt = Entity::get(Cls, Prog);
+ break;
+ }
+ }
+ }
+ }
+
+ virtual void Handle(TranslationUnit *TU) {
+ assert(TU && "Passed null translation unit");
+ ASTContext &Ctx = TU->getASTContext();
+
+ // Null means it doesn't exist in this translation unit or there was no
+ // interface that was determined to receive the original message.
+ ObjCInterfaceDecl *ReceiverIFace =
+ cast_or_null<ObjCInterfaceDecl>(ReceiverIFaceEnt.getDecl(Ctx));
+
+ // No subclass for the original receiver interface, so it remains the
+ // receiver.
+ if (ReceiverIFaceEnt.isValid() && ReceiverIFace == 0)
+ return;
+
+ // Null means it doesn't exist in this translation unit or there was no
+ // interface associated with the message in the first place.
+ ObjCInterfaceDecl *MsgIFace =
+ cast_or_null<ObjCInterfaceDecl>(MsgIFaceEnt.getDecl(Ctx));
+
+ Selector Sel = GlobSel.getSelector(Ctx);
+ SelectorMap &SelMap = TU->getSelectorMap();
+ for (SelectorMap::method_iterator
+ I = SelMap.methods_begin(Sel), E = SelMap.methods_end(Sel);
+ I != E; ++I) {
+ ObjCMethodDecl *D = *I;
+ if (ValidMethod(D, MsgIFace, ReceiverIFace)) {
+ for (ObjCMethodDecl::redecl_iterator
+ RI = D->redecls_begin(), RE = D->redecls_end(); RI != RE; ++RI)
+ TULocHandler.Handle(TULocation(TU, ASTLocation(*RI)));
+ }
+ }
+ }
+
+ /// \brief Determines whether the given method is likely to accept the
+ /// original message.
+ ///
+ /// It returns true "eagerly", meaning it will return false only if it can
+ /// "prove" statically that the method cannot accept the original message.
+ bool ValidMethod(ObjCMethodDecl *D, ObjCInterfaceDecl *MsgIFace,
+ ObjCInterfaceDecl *ReceiverIFace) {
+ assert(D);
+
+ // FIXME: Protocol methods ?
+ if (isa<ObjCProtocolDecl>(D->getDeclContext()))
+ return false;
+
+ // No specific interface associated with the message. Can be anything.
+ if (MsgIFaceEnt.isInvalid())
+ return true;
+
+ if ((!CanBeInstanceMethod && D->isInstanceMethod()) ||
+ (!CanBeClassMethod && D->isClassMethod()))
+ return false;
+
+ ObjCInterfaceDecl *IFace = D->getClassInterface();
+ assert(IFace);
+
+ // If the original message interface is the same or a superclass of the
+ // given interface, accept the method as a possibility.
+ if (MsgIFace && MsgIFace->isSuperClassOf(IFace))
+ return true;
+
+ if (ReceiverIFace) {
+ // The given interface, "overrides" the receiver.
+ if (ReceiverIFace->isSuperClassOf(IFace))
+ return true;
+ } else {
+ // No receiver was found for the original message.
+ assert(ReceiverIFaceEnt.isInvalid());
+
+ // If the original message interface is a subclass of the given interface,
+ // accept the message.
+ if (HierarchyEntities.count(Entity::get(IFace, Prog)))
+ return true;
+ }
+
+ // The interfaces are unrelated, or the receiver interface wasn't
+ // "overriden".
+ return false;
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Analyzer Implementation
+//===----------------------------------------------------------------------===//
+
+void Analyzer::FindDeclarations(Decl *D, TULocationHandler &Handler) {
+ assert(D && "Passed null declaration");
+ Entity Ent = Entity::get(D, Prog);
+ if (Ent.isInvalid())
+ return;
+
+ DeclEntityAnalyzer DEA(Ent, Handler);
+ Idxer.GetTranslationUnitsFor(Ent, DEA);
+}
+
+void Analyzer::FindReferences(Decl *D, TULocationHandler &Handler) {
+ assert(D && "Passed null declaration");
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ RefSelectorAnalyzer RSA(MD, Prog, Handler);
+ GlobalSelector Sel = GlobalSelector::get(MD->getSelector(), Prog);
+ Idxer.GetTranslationUnitsFor(Sel, RSA);
+ return;
+ }
+
+ Entity Ent = Entity::get(D, Prog);
+ if (Ent.isInvalid())
+ return;
+
+ RefEntityAnalyzer REA(Ent, Handler);
+ Idxer.GetTranslationUnitsFor(Ent, REA);
+}
+
+/// \brief Find methods that may respond to the given message and pass them
+/// to Handler.
+void Analyzer::FindObjCMethods(ObjCMessageExpr *Msg,
+ TULocationHandler &Handler) {
+ assert(Msg);
+ MessageAnalyzer MsgAnalyz(Msg, Prog, Handler);
+ GlobalSelector GlobSel = GlobalSelector::get(Msg->getSelector(), Prog);
+ Idxer.GetTranslationUnitsFor(GlobSel, MsgAnalyz);
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/DeclReferenceMap.cpp b/contrib/llvm/tools/clang/lib/Index/DeclReferenceMap.cpp
new file mode 100644
index 0000000..3fd4336
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/DeclReferenceMap.cpp
@@ -0,0 +1,90 @@
+//===--- DeclReferenceMap.cpp - Map Decls to their references -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// DeclReferenceMap creates a mapping from Decls to the ASTLocations that
+// reference them.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/DeclReferenceMap.h"
+#include "clang/Index/ASTLocation.h"
+#include "ASTVisitor.h"
+using namespace clang;
+using namespace idx;
+
+namespace {
+
+class RefMapper : public ASTVisitor<RefMapper> {
+ DeclReferenceMap::MapTy &Map;
+
+public:
+ RefMapper(DeclReferenceMap::MapTy &map) : Map(map) { }
+
+ void VisitDeclRefExpr(DeclRefExpr *Node);
+ void VisitMemberExpr(MemberExpr *Node);
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node);
+
+ void VisitTypedefTypeLoc(TypedefTypeLoc TL);
+ void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL);
+};
+
+} // anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RefMapper Implementation
+//===----------------------------------------------------------------------===//
+
+void RefMapper::VisitDeclRefExpr(DeclRefExpr *Node) {
+ NamedDecl *PrimD = cast<NamedDecl>(Node->getDecl()->getCanonicalDecl());
+ Map.insert(std::make_pair(PrimD, ASTLocation(CurrentDecl, Node)));
+}
+
+void RefMapper::VisitMemberExpr(MemberExpr *Node) {
+ NamedDecl *PrimD = cast<NamedDecl>(Node->getMemberDecl()->getCanonicalDecl());
+ Map.insert(std::make_pair(PrimD, ASTLocation(CurrentDecl, Node)));
+}
+
+void RefMapper::VisitObjCIvarRefExpr(ObjCIvarRefExpr *Node) {
+ Map.insert(std::make_pair(Node->getDecl(), ASTLocation(CurrentDecl, Node)));
+}
+
+void RefMapper::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ NamedDecl *ND = TL.getTypedefNameDecl();
+ Map.insert(std::make_pair(ND, ASTLocation(CurrentDecl, ND, TL.getNameLoc())));
+}
+
+void RefMapper::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
+ NamedDecl *ND = TL.getIFaceDecl();
+ Map.insert(std::make_pair(ND, ASTLocation(CurrentDecl, ND, TL.getNameLoc())));
+}
+
+//===----------------------------------------------------------------------===//
+// DeclReferenceMap Implementation
+//===----------------------------------------------------------------------===//
+
+DeclReferenceMap::DeclReferenceMap(ASTContext &Ctx) {
+ RefMapper(Map).Visit(Ctx.getTranslationUnitDecl());
+}
+
+DeclReferenceMap::astlocation_iterator
+DeclReferenceMap::refs_begin(NamedDecl *D) const {
+ NamedDecl *Prim = cast<NamedDecl>(D->getCanonicalDecl());
+ return astlocation_iterator(Map.lower_bound(Prim));
+}
+
+DeclReferenceMap::astlocation_iterator
+DeclReferenceMap::refs_end(NamedDecl *D) const {
+ NamedDecl *Prim = cast<NamedDecl>(D->getCanonicalDecl());
+ return astlocation_iterator(Map.upper_bound(Prim));
+}
+
+bool DeclReferenceMap::refs_empty(NamedDecl *D) const {
+ NamedDecl *Prim = cast<NamedDecl>(D->getCanonicalDecl());
+ return refs_begin(Prim) == refs_end(Prim);
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/Entity.cpp b/contrib/llvm/tools/clang/lib/Index/Entity.cpp
new file mode 100644
index 0000000..fbab6d8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/Entity.cpp
@@ -0,0 +1,270 @@
+//===--- Entity.cpp - Cross-translation-unit "token" for decls ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Entity is a ASTContext-independent way to refer to declarations that are
+// visible across translation units.
+//
+//===----------------------------------------------------------------------===//
+
+#include "EntityImpl.h"
+#include "ProgramImpl.h"
+#include "clang/Index/Program.h"
+#include "clang/Index/GlobalSelector.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclVisitor.h"
+using namespace clang;
+using namespace idx;
+
+// FIXME: Entity is really really basic currently, mostly written to work
+// on variables and functions. Should support types and other decls eventually..
+
+
+//===----------------------------------------------------------------------===//
+// EntityGetter
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+namespace idx {
+
+/// \brief Gets the Entity associated with a Decl.
+class EntityGetter : public DeclVisitor<EntityGetter, Entity> {
+ Program &Prog;
+ ProgramImpl &ProgImpl;
+
+public:
+ EntityGetter(Program &prog, ProgramImpl &progImpl)
+ : Prog(prog), ProgImpl(progImpl) { }
+
+ // Get an Entity.
+ Entity getEntity(Entity Parent, DeclarationName Name,
+ unsigned IdNS, bool isObjCInstanceMethod);
+
+ // Get an Entity associated with the name in the global namespace.
+ Entity getGlobalEntity(StringRef Name);
+
+ Entity VisitNamedDecl(NamedDecl *D);
+ Entity VisitVarDecl(VarDecl *D);
+ Entity VisitFieldDecl(FieldDecl *D);
+ Entity VisitFunctionDecl(FunctionDecl *D);
+ Entity VisitTypeDecl(TypeDecl *D);
+};
+
+}
+}
+
+Entity EntityGetter::getEntity(Entity Parent, DeclarationName Name,
+ unsigned IdNS, bool isObjCInstanceMethod) {
+ llvm::FoldingSetNodeID ID;
+ EntityImpl::Profile(ID, Parent, Name, IdNS, isObjCInstanceMethod);
+
+ ProgramImpl::EntitySetTy &Entities = ProgImpl.getEntities();
+ void *InsertPos = 0;
+ if (EntityImpl *Ent = Entities.FindNodeOrInsertPos(ID, InsertPos))
+ return Entity(Ent);
+
+ void *Buf = ProgImpl.Allocate(sizeof(EntityImpl));
+ EntityImpl *New =
+ new (Buf) EntityImpl(Parent, Name, IdNS, isObjCInstanceMethod);
+ Entities.InsertNode(New, InsertPos);
+
+ return Entity(New);
+}
+
+Entity EntityGetter::getGlobalEntity(StringRef Name) {
+ IdentifierInfo *II = &ProgImpl.getIdents().get(Name);
+ DeclarationName GlobName(II);
+ unsigned IdNS = Decl::IDNS_Ordinary;
+ return getEntity(Entity(), GlobName, IdNS, false);
+}
+
+Entity EntityGetter::VisitNamedDecl(NamedDecl *D) {
+ Entity Parent;
+ if (!D->getDeclContext()->isTranslationUnit()) {
+ Parent = Visit(cast<Decl>(D->getDeclContext()));
+ // FIXME: Anonymous structs ?
+ if (Parent.isInvalid())
+ return Entity();
+ }
+ if (Parent.isValid() && Parent.isInternalToTU())
+ return Entity(D);
+
+ // FIXME: Only works for DeclarationNames that are identifiers and selectors.
+ // Treats other DeclarationNames as internal Decls for now..
+
+ DeclarationName LocalName = D->getDeclName();
+ if (!LocalName)
+ return Entity(D);
+
+ DeclarationName GlobName;
+
+ if (IdentifierInfo *II = LocalName.getAsIdentifierInfo()) {
+ IdentifierInfo *GlobII = &ProgImpl.getIdents().get(II->getName());
+ GlobName = DeclarationName(GlobII);
+ } else {
+ Selector LocalSel = LocalName.getObjCSelector();
+
+ // Treats other DeclarationNames as internal Decls for now..
+ if (LocalSel.isNull())
+ return Entity(D);
+
+ Selector GlobSel =
+ (uintptr_t)GlobalSelector::get(LocalSel, Prog).getAsOpaquePtr();
+ GlobName = DeclarationName(GlobSel);
+ }
+
+ assert(GlobName);
+
+ unsigned IdNS = D->getIdentifierNamespace();
+
+ ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D);
+ bool isObjCInstanceMethod = MD && MD->isInstanceMethod();
+ return getEntity(Parent, GlobName, IdNS, isObjCInstanceMethod);
+}
+
+Entity EntityGetter::VisitVarDecl(VarDecl *D) {
+ // Local variables have no linkage, make invalid Entities.
+ if (D->hasLocalStorage())
+ return Entity();
+
+ // If it's static it cannot be referred to by another translation unit.
+ if (D->getStorageClass() == SC_Static)
+ return Entity(D);
+
+ return VisitNamedDecl(D);
+}
+
+Entity EntityGetter::VisitFunctionDecl(FunctionDecl *D) {
+ // If it's static it cannot be referred to by another translation unit.
+ if (D->getStorageClass() == SC_Static)
+ return Entity(D);
+
+ return VisitNamedDecl(D);
+}
+
+Entity EntityGetter::VisitFieldDecl(FieldDecl *D) {
+ // Make FieldDecl an invalid Entity since it has no linkage.
+ return Entity();
+}
+
+Entity EntityGetter::VisitTypeDecl(TypeDecl *D) {
+ // Although in C++ class name has external linkage, usually the definition of
+ // the class is available in the same translation unit when it's needed. So we
+ // make all of them invalid Entity.
+ return Entity();
+}
+
+//===----------------------------------------------------------------------===//
+// EntityImpl Implementation
+//===----------------------------------------------------------------------===//
+
+Decl *EntityImpl::getDecl(ASTContext &AST) {
+ DeclContext *DC =
+ Parent.isInvalid() ? AST.getTranslationUnitDecl()
+ : cast<DeclContext>(Parent.getDecl(AST));
+ if (!DC)
+ return 0; // Couldn't get the parent context.
+
+ DeclarationName LocalName;
+
+ if (IdentifierInfo *GlobII = Name.getAsIdentifierInfo()) {
+ IdentifierInfo &II = AST.Idents.get(GlobII->getName());
+ LocalName = DeclarationName(&II);
+ } else {
+ Selector GlobSel = Name.getObjCSelector();
+ assert(!GlobSel.isNull() && "A not handled yet declaration name");
+ GlobalSelector GSel =
+ GlobalSelector::getFromOpaquePtr(GlobSel.getAsOpaquePtr());
+ LocalName = GSel.getSelector(AST);
+ }
+
+ assert(LocalName);
+
+ DeclContext::lookup_result Res = DC->lookup(LocalName);
+ for (DeclContext::lookup_iterator I = Res.first, E = Res.second; I!=E; ++I) {
+ Decl *D = *I;
+ if (D->getIdentifierNamespace() == IdNS) {
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (MD->isInstanceMethod() == IsObjCInstanceMethod)
+ return MD;
+ } else
+ return D;
+ }
+ }
+
+ return 0; // Failed to find a decl using this Entity.
+}
+
+/// \brief Get an Entity associated with the given Decl.
+/// \returns Null if an Entity cannot refer to this Decl.
+Entity EntityImpl::get(Decl *D, Program &Prog, ProgramImpl &ProgImpl) {
+ assert(D && "Passed null Decl");
+ return EntityGetter(Prog, ProgImpl).Visit(D);
+}
+
+/// \brief Get an Entity associated with a global name.
+Entity EntityImpl::get(StringRef Name, Program &Prog,
+ ProgramImpl &ProgImpl) {
+ return EntityGetter(Prog, ProgImpl).getGlobalEntity(Name);
+}
+
+std::string EntityImpl::getPrintableName() {
+ return Name.getAsString();
+}
+
+//===----------------------------------------------------------------------===//
+// Entity Implementation
+//===----------------------------------------------------------------------===//
+
+Entity::Entity(Decl *D) : Val(D->getCanonicalDecl()) { }
+
+/// \brief Find the Decl that can be referred to by this entity.
+Decl *Entity::getDecl(ASTContext &AST) const {
+ if (isInvalid())
+ return 0;
+
+ if (Decl *D = Val.dyn_cast<Decl *>())
+ // Check that the passed AST is actually the one that this Decl belongs to.
+ return (&D->getASTContext() == &AST) ? D : 0;
+
+ return Val.get<EntityImpl *>()->getDecl(AST);
+}
+
+std::string Entity::getPrintableName() const {
+ if (isInvalid())
+ return "<< Invalid >>";
+
+ if (Decl *D = Val.dyn_cast<Decl *>()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D))
+ return ND->getNameAsString();
+ else
+ return std::string();
+ }
+
+ return Val.get<EntityImpl *>()->getPrintableName();
+}
+
+/// \brief Get an Entity associated with the given Decl.
+/// \returns Null if an Entity cannot refer to this Decl.
+Entity Entity::get(Decl *D, Program &Prog) {
+ if (D == 0)
+ return Entity();
+ ProgramImpl &ProgImpl = *static_cast<ProgramImpl*>(Prog.Impl);
+ return EntityImpl::get(D, Prog, ProgImpl);
+}
+
+Entity Entity::get(StringRef Name, Program &Prog) {
+ ProgramImpl &ProgImpl = *static_cast<ProgramImpl*>(Prog.Impl);
+ return EntityImpl::get(Name, Prog, ProgImpl);
+}
+
+unsigned
+llvm::DenseMapInfo<Entity>::getHashValue(Entity E) {
+ return DenseMapInfo<void*>::getHashValue(E.getAsOpaquePtr());
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/EntityImpl.h b/contrib/llvm/tools/clang/lib/Index/EntityImpl.h
new file mode 100644
index 0000000..6d6a0c6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/EntityImpl.h
@@ -0,0 +1,71 @@
+//===--- EntityImpl.h - Internal Entity implementation---------*- C++ -*-=====//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Internal implementation for the Entity class
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_ENTITYIMPL_H
+#define LLVM_CLANG_INDEX_ENTITYIMPL_H
+
+#include "clang/Index/Entity.h"
+#include "clang/AST/DeclarationName.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/StringSet.h"
+
+namespace clang {
+
+namespace idx {
+ class ProgramImpl;
+
+class EntityImpl : public llvm::FoldingSetNode {
+ Entity Parent;
+ DeclarationName Name;
+
+ /// \brief Identifier namespace.
+ unsigned IdNS;
+
+ /// \brief If Name is a selector, this keeps track whether it's for an
+ /// instance method.
+ bool IsObjCInstanceMethod;
+
+public:
+ EntityImpl(Entity parent, DeclarationName name, unsigned idNS,
+ bool isObjCInstanceMethod)
+ : Parent(parent), Name(name), IdNS(idNS),
+ IsObjCInstanceMethod(isObjCInstanceMethod) { }
+
+ /// \brief Find the Decl that can be referred to by this entity.
+ Decl *getDecl(ASTContext &AST);
+
+ /// \brief Get an Entity associated with the given Decl.
+ /// \returns Null if an Entity cannot refer to this Decl.
+ static Entity get(Decl *D, Program &Prog, ProgramImpl &ProgImpl);
+ static Entity get(StringRef Name, Program &Prog, ProgramImpl &ProgImpl);
+
+ std::string getPrintableName();
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID, Parent, Name, IdNS, IsObjCInstanceMethod);
+ }
+ static void Profile(llvm::FoldingSetNodeID &ID, Entity Parent,
+ DeclarationName Name, unsigned IdNS,
+ bool isObjCInstanceMethod) {
+ ID.AddPointer(Parent.getAsOpaquePtr());
+ ID.AddPointer(Name.getAsOpaquePtr());
+ ID.AddInteger(IdNS);
+ ID.AddBoolean(isObjCInstanceMethod);
+ }
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Index/GlobalCallGraph.cpp b/contrib/llvm/tools/clang/lib/Index/GlobalCallGraph.cpp
new file mode 100644
index 0000000..a21b52a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/GlobalCallGraph.cpp
@@ -0,0 +1,152 @@
+//== GlobalCallGraph.cpp - Call graph building ------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the CallGraph and CGBuilder classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/GlobalCallGraph.h"
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/StmtVisitor.h"
+
+#include "llvm/Support/GraphWriter.h"
+
+using namespace clang::idx;
+using clang::FunctionDecl;
+using clang::DeclContext;
+using clang::ASTContext;
+
+namespace {
+class CGBuilder : public StmtVisitor<CGBuilder> {
+
+ CallGraph &G;
+ FunctionDecl *FD;
+
+ Entity CallerEnt;
+
+ CallGraphNode *CallerNode;
+
+public:
+ CGBuilder(CallGraph &g, FunctionDecl *fd, Entity E, CallGraphNode *N)
+ : G(g), FD(fd), CallerEnt(E), CallerNode(N) {}
+
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ void VisitCallExpr(CallExpr *CE);
+
+ void VisitChildren(Stmt *S) {
+ for (Stmt::child_range I = S->children(); I; ++I)
+ if (*I)
+ static_cast<CGBuilder*>(this)->Visit(*I);
+ }
+};
+}
+
+void CGBuilder::VisitCallExpr(CallExpr *CE) {
+ if (FunctionDecl *CalleeDecl = CE->getDirectCallee()) {
+ Entity Ent = Entity::get(CalleeDecl, G.getProgram());
+ CallGraphNode *CalleeNode = G.getOrInsertFunction(Ent);
+ CallerNode->addCallee(ASTLocation(FD, CE), CalleeNode);
+ }
+}
+
+CallGraph::CallGraph(Program &P) : Prog(P), Root(0) {
+ ExternalCallingNode = getOrInsertFunction(Entity());
+}
+
+CallGraph::~CallGraph() {
+ if (!FunctionMap.empty()) {
+ for (FunctionMapTy::iterator I = FunctionMap.begin(), E = FunctionMap.end();
+ I != E; ++I)
+ delete I->second;
+ FunctionMap.clear();
+ }
+}
+
+void CallGraph::addTU(ASTContext& Ctx) {
+ DeclContext *DC = Ctx.getTranslationUnitDecl();
+ for (DeclContext::decl_iterator I = DC->decls_begin(), E = DC->decls_end();
+ I != E; ++I) {
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ if (FD->doesThisDeclarationHaveABody()) {
+ // Set caller's ASTContext.
+ Entity Ent = Entity::get(FD, Prog);
+ CallGraphNode *Node = getOrInsertFunction(Ent);
+ CallerCtx[Node] = &Ctx;
+
+ // If this function has external linkage, anything could call it.
+ if (FD->isGlobal())
+ ExternalCallingNode->addCallee(idx::ASTLocation(), Node);
+
+ // Set root node to 'main' function.
+ if (FD->getNameAsString() == "main")
+ Root = Node;
+
+ CGBuilder builder(*this, FD, Ent, Node);
+ builder.Visit(FD->getBody());
+ }
+ }
+ }
+}
+
+CallGraphNode *CallGraph::getOrInsertFunction(Entity F) {
+ CallGraphNode *&Node = FunctionMap[F];
+ if (Node)
+ return Node;
+
+ return Node = new CallGraphNode(F);
+}
+
+Decl *CallGraph::getDecl(CallGraphNode *Node) {
+ // Get the function's context.
+ ASTContext *Ctx = CallerCtx[Node];
+
+ return Node->getDecl(*Ctx);
+}
+
+void CallGraph::print(raw_ostream &os) {
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ if (I->second->hasCallee()) {
+ os << "function: " << I->first.getPrintableName()
+ << " calls:\n";
+ for (CallGraphNode::iterator CI = I->second->begin(),
+ CE = I->second->end(); CI != CE; ++CI) {
+ os << " " << CI->second->getName();
+ }
+ os << '\n';
+ }
+ }
+}
+
+void CallGraph::dump() {
+ print(llvm::errs());
+}
+
+void CallGraph::ViewCallGraph() const {
+ llvm::ViewGraph(*this, "CallGraph");
+}
+
+namespace llvm {
+
+template <>
+struct DOTGraphTraits<CallGraph> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getNodeLabel(const CallGraphNode *Node,
+ const CallGraph &CG) {
+ return Node->getName();
+
+ }
+
+};
+
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/GlobalSelector.cpp b/contrib/llvm/tools/clang/lib/Index/GlobalSelector.cpp
new file mode 100644
index 0000000..2fe6f95
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/GlobalSelector.cpp
@@ -0,0 +1,71 @@
+//===-- GlobalSelector.cpp - Cross-translation-unit "token" for selectors -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// GlobalSelector is a ASTContext-independent way to refer to selectors.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/GlobalSelector.h"
+#include "ProgramImpl.h"
+#include "clang/Index/Program.h"
+#include "clang/AST/ASTContext.h"
+using namespace clang;
+using namespace idx;
+
+/// \brief Get the ASTContext-specific selector.
+Selector GlobalSelector::getSelector(ASTContext &AST) const {
+ if (isInvalid())
+ return Selector();
+
+ Selector GlobSel = Selector(reinterpret_cast<uintptr_t>(Val));
+
+ SmallVector<IdentifierInfo *, 8> Ids;
+ for (unsigned i = 0, e = GlobSel.isUnarySelector() ? 1 : GlobSel.getNumArgs();
+ i != e; ++i) {
+ IdentifierInfo *GlobII = GlobSel.getIdentifierInfoForSlot(i);
+ IdentifierInfo *II = &AST.Idents.get(GlobII->getName());
+ Ids.push_back(II);
+ }
+
+ return AST.Selectors.getSelector(GlobSel.getNumArgs(), Ids.data());
+}
+
+/// \brief Get a printable name for debugging purpose.
+std::string GlobalSelector::getPrintableName() const {
+ if (isInvalid())
+ return "<< Invalid >>";
+
+ Selector GlobSel = Selector(reinterpret_cast<uintptr_t>(Val));
+ return GlobSel.getAsString();
+}
+
+/// \brief Get a GlobalSelector for the ASTContext-specific selector.
+GlobalSelector GlobalSelector::get(Selector Sel, Program &Prog) {
+ if (Sel.isNull())
+ return GlobalSelector();
+
+ ProgramImpl &ProgImpl = *static_cast<ProgramImpl*>(Prog.Impl);
+
+ SmallVector<IdentifierInfo *, 8> Ids;
+ for (unsigned i = 0, e = Sel.isUnarySelector() ? 1 : Sel.getNumArgs();
+ i != e; ++i) {
+ IdentifierInfo *II = Sel.getIdentifierInfoForSlot(i);
+ IdentifierInfo *GlobII = &ProgImpl.getIdents().get(II->getName());
+ Ids.push_back(GlobII);
+ }
+
+ Selector GlobSel = ProgImpl.getSelectors().getSelector(Sel.getNumArgs(),
+ Ids.data());
+ return GlobalSelector(GlobSel.getAsOpaquePtr());
+}
+
+unsigned
+llvm::DenseMapInfo<GlobalSelector>::getHashValue(GlobalSelector Sel) {
+ return DenseMapInfo<void*>::getHashValue(Sel.getAsOpaquePtr());
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/Handlers.cpp b/contrib/llvm/tools/clang/lib/Index/Handlers.cpp
new file mode 100644
index 0000000..1e9a27d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/Handlers.cpp
@@ -0,0 +1,22 @@
+//===--- Handlers.cpp - Interfaces for receiving information ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Abstract interfaces for receiving information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/Handlers.h"
+#include "clang/Index/Entity.h"
+using namespace clang;
+using namespace idx;
+
+// Out-of-line to give the virtual tables a home.
+EntityHandler::~EntityHandler() { }
+TranslationUnitHandler::~TranslationUnitHandler() { }
+TULocationHandler::~TULocationHandler() { }
diff --git a/contrib/llvm/tools/clang/lib/Index/IndexProvider.cpp b/contrib/llvm/tools/clang/lib/Index/IndexProvider.cpp
new file mode 100644
index 0000000..eea0988
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/IndexProvider.cpp
@@ -0,0 +1,20 @@
+//===- IndexProvider.cpp - Maps information to translation units -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Maps information to TranslationUnits.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/IndexProvider.h"
+#include "clang/Index/Entity.h"
+using namespace clang;
+using namespace idx;
+
+// Out-of-line to give the virtual table a home.
+IndexProvider::~IndexProvider() { }
diff --git a/contrib/llvm/tools/clang/lib/Index/Indexer.cpp b/contrib/llvm/tools/clang/lib/Index/Indexer.cpp
new file mode 100644
index 0000000..ebba43c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/Indexer.cpp
@@ -0,0 +1,121 @@
+//===--- Indexer.cpp - IndexProvider implementation -------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// IndexProvider implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/Indexer.h"
+#include "clang/Index/Program.h"
+#include "clang/Index/Handlers.h"
+#include "clang/Index/TranslationUnit.h"
+#include "ASTVisitor.h"
+#include "clang/AST/DeclBase.h"
+using namespace clang;
+using namespace idx;
+
+namespace {
+
+class EntityIndexer : public EntityHandler {
+ TranslationUnit *TU;
+ Indexer::MapTy &Map;
+ Indexer::DefMapTy &DefMap;
+
+public:
+ EntityIndexer(TranslationUnit *tu, Indexer::MapTy &map,
+ Indexer::DefMapTy &defmap)
+ : TU(tu), Map(map), DefMap(defmap) { }
+
+ virtual void Handle(Entity Ent) {
+ if (Ent.isInternalToTU())
+ return;
+ Map[Ent].insert(TU);
+
+ Decl *D = Ent.getDecl(TU->getASTContext());
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->doesThisDeclarationHaveABody())
+ DefMap[Ent] = std::make_pair(FD, TU);
+ }
+};
+
+class SelectorIndexer : public ASTVisitor<SelectorIndexer> {
+ Program &Prog;
+ TranslationUnit *TU;
+ Indexer::SelMapTy &Map;
+
+public:
+ SelectorIndexer(Program &prog, TranslationUnit *tu, Indexer::SelMapTy &map)
+ : Prog(prog), TU(tu), Map(map) { }
+
+ void VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ Map[GlobalSelector::get(D->getSelector(), Prog)].insert(TU);
+ Base::VisitObjCMethodDecl(D);
+ }
+
+ void VisitObjCMessageExpr(ObjCMessageExpr *Node) {
+ Map[GlobalSelector::get(Node->getSelector(), Prog)].insert(TU);
+ Base::VisitObjCMessageExpr(Node);
+ }
+};
+
+} // anonymous namespace
+
+void Indexer::IndexAST(TranslationUnit *TU) {
+ assert(TU && "Passed null TranslationUnit");
+ ASTContext &Ctx = TU->getASTContext();
+ CtxTUMap[&Ctx] = TU;
+ EntityIndexer Idx(TU, Map, DefMap);
+ Prog.FindEntities(Ctx, Idx);
+
+ SelectorIndexer SelIdx(Prog, TU, SelMap);
+ SelIdx.Visit(Ctx.getTranslationUnitDecl());
+}
+
+void Indexer::GetTranslationUnitsFor(Entity Ent,
+ TranslationUnitHandler &Handler) {
+ assert(Ent.isValid() && "Expected valid Entity");
+
+ if (Ent.isInternalToTU()) {
+ Decl *D = Ent.getInternalDecl();
+ CtxTUMapTy::iterator I = CtxTUMap.find(&D->getASTContext());
+ if (I != CtxTUMap.end())
+ Handler.Handle(I->second);
+ return;
+ }
+
+ MapTy::iterator I = Map.find(Ent);
+ if (I == Map.end())
+ return;
+
+ TUSetTy &Set = I->second;
+ for (TUSetTy::iterator I = Set.begin(), E = Set.end(); I != E; ++I)
+ Handler.Handle(*I);
+}
+
+void Indexer::GetTranslationUnitsFor(GlobalSelector Sel,
+ TranslationUnitHandler &Handler) {
+ assert(Sel.isValid() && "Expected valid GlobalSelector");
+
+ SelMapTy::iterator I = SelMap.find(Sel);
+ if (I == SelMap.end())
+ return;
+
+ TUSetTy &Set = I->second;
+ for (TUSetTy::iterator I = Set.begin(), E = Set.end(); I != E; ++I)
+ Handler.Handle(*I);
+}
+
+std::pair<FunctionDecl *, TranslationUnit *>
+Indexer::getDefinitionFor(Entity Ent) {
+ DefMapTy::iterator I = DefMap.find(Ent);
+ if (I == DefMap.end())
+ return std::make_pair((FunctionDecl *)0, (TranslationUnit *)0);
+ else
+ return I->second;
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/Program.cpp b/contrib/llvm/tools/clang/lib/Index/Program.cpp
new file mode 100644
index 0000000..4efad2c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/Program.cpp
@@ -0,0 +1,50 @@
+//===--- Program.cpp - Entity originator and misc -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Storage for Entities and utility functions
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/Program.h"
+#include "ProgramImpl.h"
+#include "clang/Index/Handlers.h"
+#include "clang/Index/TranslationUnit.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace idx;
+
+// Out-of-line to give the virtual tables a home.
+TranslationUnit::~TranslationUnit() { }
+
+Program::Program() : Impl(new ProgramImpl()) { }
+
+Program::~Program() {
+ delete static_cast<ProgramImpl *>(Impl);
+}
+
+static void FindEntitiesInDC(DeclContext *DC, Program &Prog,
+ EntityHandler &Handler) {
+ for (DeclContext::decl_iterator
+ I = DC->decls_begin(), E = DC->decls_end(); I != E; ++I) {
+ if (I->getLocation().isInvalid())
+ continue;
+ Entity Ent = Entity::get(*I, Prog);
+ if (Ent.isValid())
+ Handler.Handle(Ent);
+ if (DeclContext *SubDC = dyn_cast<DeclContext>(*I))
+ FindEntitiesInDC(SubDC, Prog, Handler);
+ }
+}
+
+/// \brief Traverses the AST and passes all the entities to the Handler.
+void Program::FindEntities(ASTContext &Ctx, EntityHandler &Handler) {
+ FindEntitiesInDC(Ctx.getTranslationUnitDecl(), *this, Handler);
+}
diff --git a/contrib/llvm/tools/clang/lib/Index/ProgramImpl.h b/contrib/llvm/tools/clang/lib/Index/ProgramImpl.h
new file mode 100644
index 0000000..57b9ce3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/ProgramImpl.h
@@ -0,0 +1,56 @@
+//===--- ProgramImpl.h - Internal Program implementation---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Internal implementation for the Program class
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_INDEX_PROGRAMIMPL_H
+#define LLVM_CLANG_INDEX_PROGRAMIMPL_H
+
+#include "EntityImpl.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/LangOptions.h"
+
+namespace clang {
+
+namespace idx {
+ class EntityListener;
+
+class ProgramImpl {
+public:
+ typedef llvm::FoldingSet<EntityImpl> EntitySetTy;
+
+private:
+ EntitySetTy Entities;
+ llvm::BumpPtrAllocator BumpAlloc;
+
+ IdentifierTable Identifiers;
+ SelectorTable Selectors;
+
+ ProgramImpl(const ProgramImpl&); // do not implement
+ ProgramImpl &operator=(const ProgramImpl &); // do not implement
+
+public:
+ ProgramImpl() : Identifiers(LangOptions()) { }
+
+ EntitySetTy &getEntities() { return Entities; }
+ IdentifierTable &getIdents() { return Identifiers; }
+ SelectorTable &getSelectors() { return Selectors; }
+
+ void *Allocate(unsigned Size, unsigned Align = 8) {
+ return BumpAlloc.Allocate(Size, Align);
+ }
+};
+
+} // namespace idx
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Index/SelectorMap.cpp b/contrib/llvm/tools/clang/lib/Index/SelectorMap.cpp
new file mode 100644
index 0000000..0f11e31
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Index/SelectorMap.cpp
@@ -0,0 +1,84 @@
+//===- SelectorMap.cpp - Maps selectors to methods and messages -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// SelectorMap creates a mapping from selectors to ObjC method declarations
+// and ObjC message expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Index/SelectorMap.h"
+#include "ASTVisitor.h"
+using namespace clang;
+using namespace idx;
+
+namespace {
+
+class SelMapper : public ASTVisitor<SelMapper> {
+ SelectorMap::SelMethMapTy &SelMethMap;
+ SelectorMap::SelRefMapTy &SelRefMap;
+
+public:
+ SelMapper(SelectorMap::SelMethMapTy &MethMap,
+ SelectorMap::SelRefMapTy &RefMap)
+ : SelMethMap(MethMap), SelRefMap(RefMap) { }
+
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCMessageExpr(ObjCMessageExpr *Node);
+ void VisitObjCSelectorExpr(ObjCSelectorExpr *Node);
+};
+
+} // anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// SelMapper Implementation
+//===----------------------------------------------------------------------===//
+
+void SelMapper::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ if (D->getCanonicalDecl() == D)
+ SelMethMap.insert(std::make_pair(D->getSelector(), D));
+ Base::VisitObjCMethodDecl(D);
+}
+
+void SelMapper::VisitObjCMessageExpr(ObjCMessageExpr *Node) {
+ ASTLocation ASTLoc(CurrentDecl, Node);
+ SelRefMap.insert(std::make_pair(Node->getSelector(), ASTLoc));
+}
+
+void SelMapper::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
+ ASTLocation ASTLoc(CurrentDecl, Node);
+ SelRefMap.insert(std::make_pair(Node->getSelector(), ASTLoc));
+}
+
+//===----------------------------------------------------------------------===//
+// SelectorMap Implementation
+//===----------------------------------------------------------------------===//
+
+SelectorMap::SelectorMap(ASTContext &Ctx) {
+ SelMapper(SelMethMap, SelRefMap).Visit(Ctx.getTranslationUnitDecl());
+}
+
+SelectorMap::method_iterator
+SelectorMap::methods_begin(Selector Sel) const {
+ return method_iterator(SelMethMap.lower_bound(Sel));
+}
+
+SelectorMap::method_iterator
+SelectorMap::methods_end(Selector Sel) const {
+ return method_iterator(SelMethMap.upper_bound(Sel));
+}
+
+SelectorMap::astlocation_iterator
+SelectorMap::refs_begin(Selector Sel) const {
+ return astlocation_iterator(SelRefMap.lower_bound(Sel));
+}
+
+SelectorMap::astlocation_iterator
+SelectorMap::refs_end(Selector Sel) const {
+ return astlocation_iterator(SelRefMap.upper_bound(Sel));
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
new file mode 100644
index 0000000..bbfc1df
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderMap.cpp
@@ -0,0 +1,228 @@
+//===--- HeaderMap.cpp - A file that acts like dir of symlinks ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the HeaderMap interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/HeaderMap.h"
+#include "clang/Basic/FileManager.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MathExtras.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cctype>
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Data Structures and Manifest Constants
+//===----------------------------------------------------------------------===//
+
+enum {
+ HMAP_HeaderMagicNumber = ('h' << 24) | ('m' << 16) | ('a' << 8) | 'p',
+ HMAP_HeaderVersion = 1,
+
+ HMAP_EmptyBucketKey = 0
+};
+
+namespace clang {
+struct HMapBucket {
+ uint32_t Key; // Offset (into strings) of key.
+
+ uint32_t Prefix; // Offset (into strings) of value prefix.
+ uint32_t Suffix; // Offset (into strings) of value suffix.
+};
+
+struct HMapHeader {
+ uint32_t Magic; // Magic word, also indicates byte order.
+ uint16_t Version; // Version number -- currently 1.
+ uint16_t Reserved; // Reserved for future use - zero for now.
+ uint32_t StringsOffset; // Offset to start of string pool.
+ uint32_t NumEntries; // Number of entries in the string table.
+ uint32_t NumBuckets; // Number of buckets (always a power of 2).
+ uint32_t MaxValueLength; // Length of longest result path (excluding nul).
+ // An array of 'NumBuckets' HMapBucket objects follows this header.
+ // Strings follow the buckets, at StringsOffset.
+};
+} // end namespace clang.
+
+/// HashHMapKey - This is the 'well known' hash function required by the file
+/// format, used to look up keys in the hash table. The hash table uses simple
+/// linear probing based on this function.
+static inline unsigned HashHMapKey(StringRef Str) {
+ unsigned Result = 0;
+ const char *S = Str.begin(), *End = Str.end();
+
+ for (; S != End; S++)
+ Result += tolower(*S) * 13;
+ return Result;
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// Verification and Construction
+//===----------------------------------------------------------------------===//
+
+/// HeaderMap::Create - This attempts to load the specified file as a header
+/// map. If it doesn't look like a HeaderMap, it gives up and returns null.
+/// If it looks like a HeaderMap but is obviously corrupted, it puts a reason
+/// into the string error argument and returns null.
+const HeaderMap *HeaderMap::Create(const FileEntry *FE, FileManager &FM) {
+ // If the file is too small to be a header map, ignore it.
+ unsigned FileSize = FE->getSize();
+ if (FileSize <= sizeof(HMapHeader)) return 0;
+
+ OwningPtr<const llvm::MemoryBuffer> FileBuffer(FM.getBufferForFile(FE));
+ if (FileBuffer == 0) return 0; // Unreadable file?
+ const char *FileStart = FileBuffer->getBufferStart();
+
+ // We know the file is at least as big as the header, check it now.
+ const HMapHeader *Header = reinterpret_cast<const HMapHeader*>(FileStart);
+
+ // Sniff it to see if it's a headermap by checking the magic number and
+ // version.
+ bool NeedsByteSwap;
+ if (Header->Magic == HMAP_HeaderMagicNumber &&
+ Header->Version == HMAP_HeaderVersion)
+ NeedsByteSwap = false;
+ else if (Header->Magic == llvm::ByteSwap_32(HMAP_HeaderMagicNumber) &&
+ Header->Version == llvm::ByteSwap_16(HMAP_HeaderVersion))
+ NeedsByteSwap = true; // Mixed endianness headermap.
+ else
+ return 0; // Not a header map.
+
+ if (Header->Reserved != 0) return 0;
+
+ // Okay, everything looks good, create the header map.
+ return new HeaderMap(FileBuffer.take(), NeedsByteSwap);
+}
+
+HeaderMap::~HeaderMap() {
+ delete FileBuffer;
+}
+
+//===----------------------------------------------------------------------===//
+// Utility Methods
+//===----------------------------------------------------------------------===//
+
+
+/// getFileName - Return the filename of the headermap.
+const char *HeaderMap::getFileName() const {
+ return FileBuffer->getBufferIdentifier();
+}
+
+unsigned HeaderMap::getEndianAdjustedWord(unsigned X) const {
+ if (!NeedsBSwap) return X;
+ return llvm::ByteSwap_32(X);
+}
+
+/// getHeader - Return a reference to the file header, in unbyte-swapped form.
+/// This method cannot fail.
+const HMapHeader &HeaderMap::getHeader() const {
+ // We know the file is at least as big as the header. Return it.
+ return *reinterpret_cast<const HMapHeader*>(FileBuffer->getBufferStart());
+}
+
+/// getBucket - Return the specified hash table bucket from the header map,
+/// bswap'ing its fields as appropriate. If the bucket number is not valid,
+/// this return a bucket with an empty key (0).
+HMapBucket HeaderMap::getBucket(unsigned BucketNo) const {
+ HMapBucket Result;
+ Result.Key = HMAP_EmptyBucketKey;
+
+ const HMapBucket *BucketArray =
+ reinterpret_cast<const HMapBucket*>(FileBuffer->getBufferStart() +
+ sizeof(HMapHeader));
+
+ const HMapBucket *BucketPtr = BucketArray+BucketNo;
+ if ((char*)(BucketPtr+1) > FileBuffer->getBufferEnd()) {
+ Result.Prefix = 0;
+ Result.Suffix = 0;
+ return Result; // Invalid buffer, corrupt hmap.
+ }
+
+ // Otherwise, the bucket is valid. Load the values, bswapping as needed.
+ Result.Key = getEndianAdjustedWord(BucketPtr->Key);
+ Result.Prefix = getEndianAdjustedWord(BucketPtr->Prefix);
+ Result.Suffix = getEndianAdjustedWord(BucketPtr->Suffix);
+ return Result;
+}
+
+/// getString - Look up the specified string in the string table. If the string
+/// index is not valid, it returns an empty string.
+const char *HeaderMap::getString(unsigned StrTabIdx) const {
+ // Add the start of the string table to the idx.
+ StrTabIdx += getEndianAdjustedWord(getHeader().StringsOffset);
+
+ // Check for invalid index.
+ if (StrTabIdx >= FileBuffer->getBufferSize())
+ return 0;
+
+ // Otherwise, we have a valid pointer into the file. Just return it. We know
+ // that the "string" can not overrun the end of the file, because the buffer
+ // is nul terminated by virtue of being a MemoryBuffer.
+ return FileBuffer->getBufferStart()+StrTabIdx;
+}
+
+//===----------------------------------------------------------------------===//
+// The Main Drivers
+//===----------------------------------------------------------------------===//
+
+/// dump - Print the contents of this headermap to stderr.
+void HeaderMap::dump() const {
+ const HMapHeader &Hdr = getHeader();
+ unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
+
+ fprintf(stderr, "Header Map %s:\n %d buckets, %d entries\n",
+ getFileName(), NumBuckets,
+ getEndianAdjustedWord(Hdr.NumEntries));
+
+ for (unsigned i = 0; i != NumBuckets; ++i) {
+ HMapBucket B = getBucket(i);
+ if (B.Key == HMAP_EmptyBucketKey) continue;
+
+ const char *Key = getString(B.Key);
+ const char *Prefix = getString(B.Prefix);
+ const char *Suffix = getString(B.Suffix);
+ fprintf(stderr, " %d. %s -> '%s' '%s'\n", i, Key, Prefix, Suffix);
+ }
+}
+
+/// LookupFile - Check to see if the specified relative filename is located in
+/// this HeaderMap. If so, open it and return its FileEntry.
+const FileEntry *HeaderMap::LookupFile(
+ StringRef Filename, FileManager &FM) const {
+ const HMapHeader &Hdr = getHeader();
+ unsigned NumBuckets = getEndianAdjustedWord(Hdr.NumBuckets);
+
+ // If the number of buckets is not a power of two, the headermap is corrupt.
+ // Don't probe infinitely.
+ if (NumBuckets & (NumBuckets-1))
+ return 0;
+
+ // Linearly probe the hash table.
+ for (unsigned Bucket = HashHMapKey(Filename);; ++Bucket) {
+ HMapBucket B = getBucket(Bucket & (NumBuckets-1));
+ if (B.Key == HMAP_EmptyBucketKey) return 0; // Hash miss.
+
+ // See if the key matches. If not, probe on.
+ if (!Filename.equals_lower(getString(B.Key)))
+ continue;
+
+ // If so, we have a match in the hash table. Construct the destination
+ // path.
+ SmallString<1024> DestPath;
+ DestPath += getString(B.Prefix);
+ DestPath += getString(B.Suffix);
+ return FM.getFile(DestPath.str());
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
new file mode 100644
index 0000000..d688e23
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/HeaderSearch.cpp
@@ -0,0 +1,1035 @@
+//===--- HeaderSearch.cpp - Resolve Header File Locations ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DirectoryLookup and HeaderSearch interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/HeaderMap.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/Capacity.h"
+#include <cstdio>
+using namespace clang;
+
+const IdentifierInfo *
+HeaderFileInfo::getControllingMacro(ExternalIdentifierLookup *External) {
+ if (ControllingMacro)
+ return ControllingMacro;
+
+ if (!ControllingMacroID || !External)
+ return 0;
+
+ ControllingMacro = External->GetIdentifier(ControllingMacroID);
+ return ControllingMacro;
+}
+
+ExternalHeaderFileInfoSource::~ExternalHeaderFileInfoSource() {}
+
+HeaderSearch::HeaderSearch(FileManager &FM, DiagnosticsEngine &Diags,
+ const LangOptions &LangOpts,
+ const TargetInfo *Target)
+ : FileMgr(FM), Diags(Diags), FrameworkMap(64),
+ ModMap(FileMgr, *Diags.getClient(), LangOpts, Target)
+{
+ AngledDirIdx = 0;
+ SystemDirIdx = 0;
+ NoCurDirSearch = false;
+
+ ExternalLookup = 0;
+ ExternalSource = 0;
+ NumIncluded = 0;
+ NumMultiIncludeFileOptzn = 0;
+ NumFrameworkLookups = NumSubFrameworkLookups = 0;
+}
+
+HeaderSearch::~HeaderSearch() {
+ // Delete headermaps.
+ for (unsigned i = 0, e = HeaderMaps.size(); i != e; ++i)
+ delete HeaderMaps[i].second;
+}
+
+void HeaderSearch::PrintStats() {
+ fprintf(stderr, "\n*** HeaderSearch Stats:\n");
+ fprintf(stderr, "%d files tracked.\n", (int)FileInfo.size());
+ unsigned NumOnceOnlyFiles = 0, MaxNumIncludes = 0, NumSingleIncludedFiles = 0;
+ for (unsigned i = 0, e = FileInfo.size(); i != e; ++i) {
+ NumOnceOnlyFiles += FileInfo[i].isImport;
+ if (MaxNumIncludes < FileInfo[i].NumIncludes)
+ MaxNumIncludes = FileInfo[i].NumIncludes;
+ NumSingleIncludedFiles += FileInfo[i].NumIncludes == 1;
+ }
+ fprintf(stderr, " %d #import/#pragma once files.\n", NumOnceOnlyFiles);
+ fprintf(stderr, " %d included exactly once.\n", NumSingleIncludedFiles);
+ fprintf(stderr, " %d max times a file is included.\n", MaxNumIncludes);
+
+ fprintf(stderr, " %d #include/#include_next/#import.\n", NumIncluded);
+ fprintf(stderr, " %d #includes skipped due to"
+ " the multi-include optimization.\n", NumMultiIncludeFileOptzn);
+
+ fprintf(stderr, "%d framework lookups.\n", NumFrameworkLookups);
+ fprintf(stderr, "%d subframework lookups.\n", NumSubFrameworkLookups);
+}
+
+/// CreateHeaderMap - This method returns a HeaderMap for the specified
+/// FileEntry, uniquing them through the the 'HeaderMaps' datastructure.
+const HeaderMap *HeaderSearch::CreateHeaderMap(const FileEntry *FE) {
+ // We expect the number of headermaps to be small, and almost always empty.
+ // If it ever grows, use of a linear search should be re-evaluated.
+ if (!HeaderMaps.empty()) {
+ for (unsigned i = 0, e = HeaderMaps.size(); i != e; ++i)
+ // Pointer equality comparison of FileEntries works because they are
+ // already uniqued by inode.
+ if (HeaderMaps[i].first == FE)
+ return HeaderMaps[i].second;
+ }
+
+ if (const HeaderMap *HM = HeaderMap::Create(FE, FileMgr)) {
+ HeaderMaps.push_back(std::make_pair(FE, HM));
+ return HM;
+ }
+
+ return 0;
+}
+
+std::string HeaderSearch::getModuleFileName(Module *Module) {
+ // If we don't have a module cache path, we can't do anything.
+ if (ModuleCachePath.empty())
+ return std::string();
+
+
+ SmallString<256> Result(ModuleCachePath);
+ llvm::sys::path::append(Result, Module->getTopLevelModule()->Name + ".pcm");
+ return Result.str().str();
+}
+
+std::string HeaderSearch::getModuleFileName(StringRef ModuleName) {
+ // If we don't have a module cache path, we can't do anything.
+ if (ModuleCachePath.empty())
+ return std::string();
+
+
+ SmallString<256> Result(ModuleCachePath);
+ llvm::sys::path::append(Result, ModuleName + ".pcm");
+ return Result.str().str();
+}
+
+Module *HeaderSearch::lookupModule(StringRef ModuleName, bool AllowSearch) {
+ // Look in the module map to determine if there is a module by this name.
+ Module *Module = ModMap.findModule(ModuleName);
+ if (Module || !AllowSearch)
+ return Module;
+
+ // Look through the various header search paths to load any avai;able module
+ // maps, searching for a module map that describes this module.
+ for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ if (SearchDirs[Idx].isFramework()) {
+ // Search for or infer a module map for a framework.
+ SmallString<128> FrameworkDirName;
+ FrameworkDirName += SearchDirs[Idx].getFrameworkDir()->getName();
+ llvm::sys::path::append(FrameworkDirName, ModuleName + ".framework");
+ if (const DirectoryEntry *FrameworkDir
+ = FileMgr.getDirectory(FrameworkDirName)) {
+ bool IsSystem
+ = SearchDirs[Idx].getDirCharacteristic() != SrcMgr::C_User;
+ Module = loadFrameworkModule(ModuleName, FrameworkDir, IsSystem);
+ if (Module)
+ break;
+ }
+ }
+
+ // FIXME: Figure out how header maps and module maps will work together.
+
+ // Only deal with normal search directories.
+ if (!SearchDirs[Idx].isNormalDir())
+ continue;
+
+ // Search for a module map file in this directory.
+ if (loadModuleMapFile(SearchDirs[Idx].getDir()) == LMM_NewlyLoaded) {
+ // We just loaded a module map file; check whether the module is
+ // available now.
+ Module = ModMap.findModule(ModuleName);
+ if (Module)
+ break;
+ }
+
+ // Search for a module map in a subdirectory with the same name as the
+ // module.
+ SmallString<128> NestedModuleMapDirName;
+ NestedModuleMapDirName = SearchDirs[Idx].getDir()->getName();
+ llvm::sys::path::append(NestedModuleMapDirName, ModuleName);
+ if (loadModuleMapFile(NestedModuleMapDirName) == LMM_NewlyLoaded) {
+ // If we just loaded a module map file, look for the module again.
+ Module = ModMap.findModule(ModuleName);
+ if (Module)
+ break;
+ }
+ }
+
+ return Module;
+}
+
+//===----------------------------------------------------------------------===//
+// File lookup within a DirectoryLookup scope
+//===----------------------------------------------------------------------===//
+
+/// getName - Return the directory or filename corresponding to this lookup
+/// object.
+const char *DirectoryLookup::getName() const {
+ if (isNormalDir())
+ return getDir()->getName();
+ if (isFramework())
+ return getFrameworkDir()->getName();
+ assert(isHeaderMap() && "Unknown DirectoryLookup");
+ return getHeaderMap()->getFileName();
+}
+
+
+/// LookupFile - Lookup the specified file in this search path, returning it
+/// if it exists or returning null if not.
+const FileEntry *DirectoryLookup::LookupFile(
+ StringRef Filename,
+ HeaderSearch &HS,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module **SuggestedModule,
+ bool &InUserSpecifiedSystemFramework) const {
+ InUserSpecifiedSystemFramework = false;
+
+ SmallString<1024> TmpDir;
+ if (isNormalDir()) {
+ // Concatenate the requested file onto the directory.
+ TmpDir = getDir()->getName();
+ llvm::sys::path::append(TmpDir, Filename);
+ if (SearchPath != NULL) {
+ StringRef SearchPathRef(getDir()->getName());
+ SearchPath->clear();
+ SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
+ }
+ if (RelativePath != NULL) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin(), Filename.end());
+ }
+
+ // If we have a module map that might map this header, load it and
+ // check whether we'll have a suggestion for a module.
+ if (SuggestedModule && HS.hasModuleMap(TmpDir, getDir())) {
+ const FileEntry *File = HS.getFileMgr().getFile(TmpDir.str(),
+ /*openFile=*/false);
+ if (!File)
+ return File;
+
+ // If there is a module that corresponds to this header,
+ // suggest it.
+ *SuggestedModule = HS.findModuleForHeader(File);
+ return File;
+ }
+
+ return HS.getFileMgr().getFile(TmpDir.str(), /*openFile=*/true);
+ }
+
+ if (isFramework())
+ return DoFrameworkLookup(Filename, HS, SearchPath, RelativePath,
+ SuggestedModule, InUserSpecifiedSystemFramework);
+
+ assert(isHeaderMap() && "Unknown directory lookup");
+ const FileEntry * const Result = getHeaderMap()->LookupFile(
+ Filename, HS.getFileMgr());
+ if (Result) {
+ if (SearchPath != NULL) {
+ StringRef SearchPathRef(getName());
+ SearchPath->clear();
+ SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
+ }
+ if (RelativePath != NULL) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin(), Filename.end());
+ }
+ }
+ return Result;
+}
+
+
+/// DoFrameworkLookup - Do a lookup of the specified file in the current
+/// DirectoryLookup, which is a framework directory.
+const FileEntry *DirectoryLookup::DoFrameworkLookup(
+ StringRef Filename,
+ HeaderSearch &HS,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module **SuggestedModule,
+ bool &InUserSpecifiedSystemFramework) const
+{
+ FileManager &FileMgr = HS.getFileMgr();
+
+ // Framework names must have a '/' in the filename.
+ size_t SlashPos = Filename.find('/');
+ if (SlashPos == StringRef::npos) return 0;
+
+ // Find out if this is the home for the specified framework, by checking
+ // HeaderSearch. Possible answers are yes/no and unknown.
+ HeaderSearch::FrameworkCacheEntry &CacheEntry =
+ HS.LookupFrameworkCache(Filename.substr(0, SlashPos));
+
+ // If it is known and in some other directory, fail.
+ if (CacheEntry.Directory && CacheEntry.Directory != getFrameworkDir())
+ return 0;
+
+ // Otherwise, construct the path to this framework dir.
+
+ // FrameworkName = "/System/Library/Frameworks/"
+ SmallString<1024> FrameworkName;
+ FrameworkName += getFrameworkDir()->getName();
+ if (FrameworkName.empty() || FrameworkName.back() != '/')
+ FrameworkName.push_back('/');
+
+ // FrameworkName = "/System/Library/Frameworks/Cocoa"
+ StringRef ModuleName(Filename.begin(), SlashPos);
+ FrameworkName += ModuleName;
+
+ // FrameworkName = "/System/Library/Frameworks/Cocoa.framework/"
+ FrameworkName += ".framework/";
+
+ // If the cache entry was unresolved, populate it now.
+ if (CacheEntry.Directory == 0) {
+ HS.IncrementFrameworkLookupCount();
+
+ // If the framework dir doesn't exist, we fail.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName.str());
+ if (Dir == 0) return 0;
+
+ // Otherwise, if it does, remember that this is the right direntry for this
+ // framework.
+ CacheEntry.Directory = getFrameworkDir();
+
+ // If this is a user search directory, check if the framework has been
+ // user-specified as a system framework.
+ if (getDirCharacteristic() == SrcMgr::C_User) {
+ SmallString<1024> SystemFrameworkMarker(FrameworkName);
+ SystemFrameworkMarker += ".system_framework";
+ if (llvm::sys::fs::exists(SystemFrameworkMarker.str())) {
+ CacheEntry.IsUserSpecifiedSystemFramework = true;
+ }
+ }
+ }
+
+ // Set the 'user-specified system framework' flag.
+ InUserSpecifiedSystemFramework = CacheEntry.IsUserSpecifiedSystemFramework;
+
+ if (RelativePath != NULL) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin()+SlashPos+1, Filename.end());
+ }
+
+ // If we're allowed to look for modules, try to load or create the module
+ // corresponding to this framework.
+ Module *Module = 0;
+ if (SuggestedModule) {
+ if (const DirectoryEntry *FrameworkDir
+ = FileMgr.getDirectory(FrameworkName)) {
+ bool IsSystem = getDirCharacteristic() != SrcMgr::C_User;
+ Module = HS.loadFrameworkModule(ModuleName, FrameworkDir, IsSystem);
+ }
+ }
+
+ // Check "/System/Library/Frameworks/Cocoa.framework/Headers/file.h"
+ unsigned OrigSize = FrameworkName.size();
+
+ FrameworkName += "Headers/";
+
+ if (SearchPath != NULL) {
+ SearchPath->clear();
+ // Without trailing '/'.
+ SearchPath->append(FrameworkName.begin(), FrameworkName.end()-1);
+ }
+
+ // Determine whether this is the module we're building or not.
+ bool AutomaticImport = Module;
+ FrameworkName.append(Filename.begin()+SlashPos+1, Filename.end());
+ if (const FileEntry *FE = FileMgr.getFile(FrameworkName.str(),
+ /*openFile=*/!AutomaticImport)) {
+ if (AutomaticImport)
+ *SuggestedModule = HS.findModuleForHeader(FE);
+ return FE;
+ }
+
+ // Check "/System/Library/Frameworks/Cocoa.framework/PrivateHeaders/file.h"
+ const char *Private = "Private";
+ FrameworkName.insert(FrameworkName.begin()+OrigSize, Private,
+ Private+strlen(Private));
+ if (SearchPath != NULL)
+ SearchPath->insert(SearchPath->begin()+OrigSize, Private,
+ Private+strlen(Private));
+
+ const FileEntry *FE = FileMgr.getFile(FrameworkName.str(),
+ /*openFile=*/!AutomaticImport);
+ if (FE && AutomaticImport)
+ *SuggestedModule = HS.findModuleForHeader(FE);
+ return FE;
+}
+
+void HeaderSearch::setTarget(const TargetInfo &Target) {
+ ModMap.setTarget(Target);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Header File Location.
+//===----------------------------------------------------------------------===//
+
+
+/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
+/// return null on failure. isAngled indicates whether the file reference is
+/// for system #include's or not (i.e. using <> instead of ""). CurFileEnt, if
+/// non-null, indicates where the #including file is, in case a relative search
+/// is needed.
+const FileEntry *HeaderSearch::LookupFile(
+ StringRef Filename,
+ bool isAngled,
+ const DirectoryLookup *FromDir,
+ const DirectoryLookup *&CurDir,
+ const FileEntry *CurFileEnt,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module **SuggestedModule,
+ bool SkipCache)
+{
+ if (SuggestedModule)
+ *SuggestedModule = 0;
+
+ // If 'Filename' is absolute, check to see if it exists and no searching.
+ if (llvm::sys::path::is_absolute(Filename)) {
+ CurDir = 0;
+
+ // If this was an #include_next "/absolute/file", fail.
+ if (FromDir) return 0;
+
+ if (SearchPath != NULL)
+ SearchPath->clear();
+ if (RelativePath != NULL) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin(), Filename.end());
+ }
+ // Otherwise, just return the file.
+ return FileMgr.getFile(Filename, /*openFile=*/true);
+ }
+
+ // Unless disabled, check to see if the file is in the #includer's
+ // directory. This has to be based on CurFileEnt, not CurDir, because
+ // CurFileEnt could be a #include of a subdirectory (#include "foo/bar.h") and
+ // a subsequent include of "baz.h" should resolve to "whatever/foo/baz.h".
+ // This search is not done for <> headers.
+ if (CurFileEnt && !isAngled && !NoCurDirSearch) {
+ SmallString<1024> TmpDir;
+ // Concatenate the requested file onto the directory.
+ // FIXME: Portability. Filename concatenation should be in sys::Path.
+ TmpDir += CurFileEnt->getDir()->getName();
+ TmpDir.push_back('/');
+ TmpDir.append(Filename.begin(), Filename.end());
+ if (const FileEntry *FE = FileMgr.getFile(TmpDir.str(),/*openFile=*/true)) {
+ // Leave CurDir unset.
+ // This file is a system header or C++ unfriendly if the old file is.
+ //
+ // Note that the temporary 'DirInfo' is required here, as either call to
+ // getFileInfo could resize the vector and we don't want to rely on order
+ // of evaluation.
+ unsigned DirInfo = getFileInfo(CurFileEnt).DirInfo;
+ getFileInfo(FE).DirInfo = DirInfo;
+ if (SearchPath != NULL) {
+ StringRef SearchPathRef(CurFileEnt->getDir()->getName());
+ SearchPath->clear();
+ SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
+ }
+ if (RelativePath != NULL) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin(), Filename.end());
+ }
+ return FE;
+ }
+ }
+
+ CurDir = 0;
+
+ // If this is a system #include, ignore the user #include locs.
+ unsigned i = isAngled ? AngledDirIdx : 0;
+
+ // If this is a #include_next request, start searching after the directory the
+ // file was found in.
+ if (FromDir)
+ i = FromDir-&SearchDirs[0];
+
+ // Cache all of the lookups performed by this method. Many headers are
+ // multiply included, and the "pragma once" optimization prevents them from
+ // being relex/pp'd, but they would still have to search through a
+ // (potentially huge) series of SearchDirs to find it.
+ std::pair<unsigned, unsigned> &CacheLookup =
+ LookupFileCache.GetOrCreateValue(Filename).getValue();
+
+ // If the entry has been previously looked up, the first value will be
+ // non-zero. If the value is equal to i (the start point of our search), then
+ // this is a matching hit.
+ if (!SkipCache && CacheLookup.first == i+1) {
+ // Skip querying potentially lots of directories for this lookup.
+ i = CacheLookup.second;
+ } else {
+ // Otherwise, this is the first query, or the previous query didn't match
+ // our search start. We will fill in our found location below, so prime the
+ // start point value.
+ CacheLookup.first = i+1;
+ }
+
+ // Check each directory in sequence to see if it contains this file.
+ for (; i != SearchDirs.size(); ++i) {
+ bool InUserSpecifiedSystemFramework = false;
+ const FileEntry *FE =
+ SearchDirs[i].LookupFile(Filename, *this, SearchPath, RelativePath,
+ SuggestedModule, InUserSpecifiedSystemFramework);
+ if (!FE) continue;
+
+ CurDir = &SearchDirs[i];
+
+ // This file is a system header or C++ unfriendly if the dir is.
+ HeaderFileInfo &HFI = getFileInfo(FE);
+ HFI.DirInfo = CurDir->getDirCharacteristic();
+
+ // If the directory characteristic is User but this framework was
+ // user-specified to be treated as a system framework, promote the
+ // characteristic.
+ if (HFI.DirInfo == SrcMgr::C_User && InUserSpecifiedSystemFramework)
+ HFI.DirInfo = SrcMgr::C_System;
+
+ // If this file is found in a header map and uses the framework style of
+ // includes, then this header is part of a framework we're building.
+ if (CurDir->isIndexHeaderMap()) {
+ size_t SlashPos = Filename.find('/');
+ if (SlashPos != StringRef::npos) {
+ HFI.IndexHeaderMapHeader = 1;
+ HFI.Framework = getUniqueFrameworkName(StringRef(Filename.begin(),
+ SlashPos));
+ }
+ }
+
+ // Remember this location for the next lookup we do.
+ CacheLookup.second = i;
+ return FE;
+ }
+
+ // If we are including a file with a quoted include "foo.h" from inside
+ // a header in a framework that is currently being built, and we couldn't
+ // resolve "foo.h" any other way, change the include to <Foo/foo.h>, where
+ // "Foo" is the name of the framework in which the including header was found.
+ if (CurFileEnt && !isAngled && Filename.find('/') == StringRef::npos) {
+ HeaderFileInfo &IncludingHFI = getFileInfo(CurFileEnt);
+ if (IncludingHFI.IndexHeaderMapHeader) {
+ SmallString<128> ScratchFilename;
+ ScratchFilename += IncludingHFI.Framework;
+ ScratchFilename += '/';
+ ScratchFilename += Filename;
+
+ const FileEntry *Result = LookupFile(ScratchFilename, /*isAngled=*/true,
+ FromDir, CurDir, CurFileEnt,
+ SearchPath, RelativePath,
+ SuggestedModule);
+ std::pair<unsigned, unsigned> &CacheLookup
+ = LookupFileCache.GetOrCreateValue(Filename).getValue();
+ CacheLookup.second
+ = LookupFileCache.GetOrCreateValue(ScratchFilename).getValue().second;
+ return Result;
+ }
+ }
+
+ // Otherwise, didn't find it. Remember we didn't find this.
+ CacheLookup.second = SearchDirs.size();
+ return 0;
+}
+
+/// LookupSubframeworkHeader - Look up a subframework for the specified
+/// #include file. For example, if #include'ing <HIToolbox/HIToolbox.h> from
+/// within ".../Carbon.framework/Headers/Carbon.h", check to see if HIToolbox
+/// is a subframework within Carbon.framework. If so, return the FileEntry
+/// for the designated file, otherwise return null.
+const FileEntry *HeaderSearch::
+LookupSubframeworkHeader(StringRef Filename,
+ const FileEntry *ContextFileEnt,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath) {
+ assert(ContextFileEnt && "No context file?");
+
+ // Framework names must have a '/' in the filename. Find it.
+ // FIXME: Should we permit '\' on Windows?
+ size_t SlashPos = Filename.find('/');
+ if (SlashPos == StringRef::npos) return 0;
+
+ // Look up the base framework name of the ContextFileEnt.
+ const char *ContextName = ContextFileEnt->getName();
+
+ // If the context info wasn't a framework, couldn't be a subframework.
+ const unsigned DotFrameworkLen = 10;
+ const char *FrameworkPos = strstr(ContextName, ".framework");
+ if (FrameworkPos == 0 ||
+ (FrameworkPos[DotFrameworkLen] != '/' &&
+ FrameworkPos[DotFrameworkLen] != '\\'))
+ return 0;
+
+ SmallString<1024> FrameworkName(ContextName, FrameworkPos+DotFrameworkLen+1);
+
+ // Append Frameworks/HIToolbox.framework/
+ FrameworkName += "Frameworks/";
+ FrameworkName.append(Filename.begin(), Filename.begin()+SlashPos);
+ FrameworkName += ".framework/";
+
+ llvm::StringMapEntry<FrameworkCacheEntry> &CacheLookup =
+ FrameworkMap.GetOrCreateValue(Filename.substr(0, SlashPos));
+
+ // Some other location?
+ if (CacheLookup.getValue().Directory &&
+ CacheLookup.getKeyLength() == FrameworkName.size() &&
+ memcmp(CacheLookup.getKeyData(), &FrameworkName[0],
+ CacheLookup.getKeyLength()) != 0)
+ return 0;
+
+ // Cache subframework.
+ if (CacheLookup.getValue().Directory == 0) {
+ ++NumSubFrameworkLookups;
+
+ // If the framework dir doesn't exist, we fail.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(FrameworkName.str());
+ if (Dir == 0) return 0;
+
+ // Otherwise, if it does, remember that this is the right direntry for this
+ // framework.
+ CacheLookup.getValue().Directory = Dir;
+ }
+
+ const FileEntry *FE = 0;
+
+ if (RelativePath != NULL) {
+ RelativePath->clear();
+ RelativePath->append(Filename.begin()+SlashPos+1, Filename.end());
+ }
+
+ // Check ".../Frameworks/HIToolbox.framework/Headers/HIToolbox.h"
+ SmallString<1024> HeadersFilename(FrameworkName);
+ HeadersFilename += "Headers/";
+ if (SearchPath != NULL) {
+ SearchPath->clear();
+ // Without trailing '/'.
+ SearchPath->append(HeadersFilename.begin(), HeadersFilename.end()-1);
+ }
+
+ HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end());
+ if (!(FE = FileMgr.getFile(HeadersFilename.str(), /*openFile=*/true))) {
+
+ // Check ".../Frameworks/HIToolbox.framework/PrivateHeaders/HIToolbox.h"
+ HeadersFilename = FrameworkName;
+ HeadersFilename += "PrivateHeaders/";
+ if (SearchPath != NULL) {
+ SearchPath->clear();
+ // Without trailing '/'.
+ SearchPath->append(HeadersFilename.begin(), HeadersFilename.end()-1);
+ }
+
+ HeadersFilename.append(Filename.begin()+SlashPos+1, Filename.end());
+ if (!(FE = FileMgr.getFile(HeadersFilename.str(), /*openFile=*/true)))
+ return 0;
+ }
+
+ // This file is a system header or C++ unfriendly if the old file is.
+ //
+ // Note that the temporary 'DirInfo' is required here, as either call to
+ // getFileInfo could resize the vector and we don't want to rely on order
+ // of evaluation.
+ unsigned DirInfo = getFileInfo(ContextFileEnt).DirInfo;
+ getFileInfo(FE).DirInfo = DirInfo;
+ return FE;
+}
+
+/// \brief Helper static function to normalize a path for injection into
+/// a synthetic header.
+/*static*/ std::string
+HeaderSearch::NormalizeDashIncludePath(StringRef File, FileManager &FileMgr) {
+ // Implicit include paths should be resolved relative to the current
+ // working directory first, and then use the regular header search
+ // mechanism. The proper way to handle this is to have the
+ // predefines buffer located at the current working directory, but
+ // it has no file entry. For now, workaround this by using an
+ // absolute path if we find the file here, and otherwise letting
+ // header search handle it.
+ SmallString<128> Path(File);
+ llvm::sys::fs::make_absolute(Path);
+ bool exists;
+ if (llvm::sys::fs::exists(Path.str(), exists) || !exists)
+ Path = File;
+ else if (exists)
+ FileMgr.getFile(File);
+
+ return Lexer::Stringify(Path.str());
+}
+
+//===----------------------------------------------------------------------===//
+// File Info Management.
+//===----------------------------------------------------------------------===//
+
+/// \brief Merge the header file info provided by \p OtherHFI into the current
+/// header file info (\p HFI)
+static void mergeHeaderFileInfo(HeaderFileInfo &HFI,
+ const HeaderFileInfo &OtherHFI) {
+ HFI.isImport |= OtherHFI.isImport;
+ HFI.isPragmaOnce |= OtherHFI.isPragmaOnce;
+ HFI.NumIncludes += OtherHFI.NumIncludes;
+
+ if (!HFI.ControllingMacro && !HFI.ControllingMacroID) {
+ HFI.ControllingMacro = OtherHFI.ControllingMacro;
+ HFI.ControllingMacroID = OtherHFI.ControllingMacroID;
+ }
+
+ if (OtherHFI.External) {
+ HFI.DirInfo = OtherHFI.DirInfo;
+ HFI.External = OtherHFI.External;
+ HFI.IndexHeaderMapHeader = OtherHFI.IndexHeaderMapHeader;
+ }
+
+ if (HFI.Framework.empty())
+ HFI.Framework = OtherHFI.Framework;
+
+ HFI.Resolved = true;
+}
+
+/// getFileInfo - Return the HeaderFileInfo structure for the specified
+/// FileEntry.
+HeaderFileInfo &HeaderSearch::getFileInfo(const FileEntry *FE) {
+ if (FE->getUID() >= FileInfo.size())
+ FileInfo.resize(FE->getUID()+1);
+
+ HeaderFileInfo &HFI = FileInfo[FE->getUID()];
+ if (ExternalSource && !HFI.Resolved)
+ mergeHeaderFileInfo(HFI, ExternalSource->GetHeaderFileInfo(FE));
+ return HFI;
+}
+
+bool HeaderSearch::isFileMultipleIncludeGuarded(const FileEntry *File) {
+ // Check if we've ever seen this file as a header.
+ if (File->getUID() >= FileInfo.size())
+ return false;
+
+ // Resolve header file info from the external source, if needed.
+ HeaderFileInfo &HFI = FileInfo[File->getUID()];
+ if (ExternalSource && !HFI.Resolved)
+ mergeHeaderFileInfo(HFI, ExternalSource->GetHeaderFileInfo(File));
+
+ return HFI.isPragmaOnce || HFI.ControllingMacro || HFI.ControllingMacroID;
+}
+
+void HeaderSearch::setHeaderFileInfoForUID(HeaderFileInfo HFI, unsigned UID) {
+ if (UID >= FileInfo.size())
+ FileInfo.resize(UID+1);
+ HFI.Resolved = true;
+ FileInfo[UID] = HFI;
+}
+
+/// ShouldEnterIncludeFile - Mark the specified file as a target of of a
+/// #include, #include_next, or #import directive. Return false if #including
+/// the file will have no effect or true if we should include it.
+bool HeaderSearch::ShouldEnterIncludeFile(const FileEntry *File, bool isImport){
+ ++NumIncluded; // Count # of attempted #includes.
+
+ // Get information about this file.
+ HeaderFileInfo &FileInfo = getFileInfo(File);
+
+ // If this is a #import directive, check that we have not already imported
+ // this header.
+ if (isImport) {
+ // If this has already been imported, don't import it again.
+ FileInfo.isImport = true;
+
+ // Has this already been #import'ed or #include'd?
+ if (FileInfo.NumIncludes) return false;
+ } else {
+ // Otherwise, if this is a #include of a file that was previously #import'd
+ // or if this is the second #include of a #pragma once file, ignore it.
+ if (FileInfo.isImport)
+ return false;
+ }
+
+ // Next, check to see if the file is wrapped with #ifndef guards. If so, and
+ // if the macro that guards it is defined, we know the #include has no effect.
+ if (const IdentifierInfo *ControllingMacro
+ = FileInfo.getControllingMacro(ExternalLookup))
+ if (ControllingMacro->hasMacroDefinition()) {
+ ++NumMultiIncludeFileOptzn;
+ return false;
+ }
+
+ // Increment the number of times this file has been included.
+ ++FileInfo.NumIncludes;
+
+ return true;
+}
+
+size_t HeaderSearch::getTotalMemory() const {
+ return SearchDirs.capacity()
+ + llvm::capacity_in_bytes(FileInfo)
+ + llvm::capacity_in_bytes(HeaderMaps)
+ + LookupFileCache.getAllocator().getTotalMemory()
+ + FrameworkMap.getAllocator().getTotalMemory();
+}
+
+StringRef HeaderSearch::getUniqueFrameworkName(StringRef Framework) {
+ return FrameworkNames.GetOrCreateValue(Framework).getKey();
+}
+
+bool HeaderSearch::hasModuleMap(StringRef FileName,
+ const DirectoryEntry *Root) {
+ llvm::SmallVector<const DirectoryEntry *, 2> FixUpDirectories;
+
+ StringRef DirName = FileName;
+ do {
+ // Get the parent directory name.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ return false;
+
+ // Determine whether this directory exists.
+ const DirectoryEntry *Dir = FileMgr.getDirectory(DirName);
+ if (!Dir)
+ return false;
+
+ // Try to load the module map file in this directory.
+ switch (loadModuleMapFile(Dir)) {
+ case LMM_NewlyLoaded:
+ case LMM_AlreadyLoaded:
+ // Success. All of the directories we stepped through inherit this module
+ // map file.
+ for (unsigned I = 0, N = FixUpDirectories.size(); I != N; ++I)
+ DirectoryHasModuleMap[FixUpDirectories[I]] = true;
+
+ return true;
+
+ case LMM_NoDirectory:
+ case LMM_InvalidModuleMap:
+ break;
+ }
+
+ // If we hit the top of our search, we're done.
+ if (Dir == Root)
+ return false;
+
+ // Keep track of all of the directories we checked, so we can mark them as
+ // having module maps if we eventually do find a module map.
+ FixUpDirectories.push_back(Dir);
+ } while (true);
+}
+
+Module *HeaderSearch::findModuleForHeader(const FileEntry *File) {
+ if (Module *Mod = ModMap.findModuleForHeader(File))
+ return Mod;
+
+ return 0;
+}
+
+bool HeaderSearch::loadModuleMapFile(const FileEntry *File) {
+ const DirectoryEntry *Dir = File->getDir();
+
+ llvm::DenseMap<const DirectoryEntry *, bool>::iterator KnownDir
+ = DirectoryHasModuleMap.find(Dir);
+ if (KnownDir != DirectoryHasModuleMap.end())
+ return !KnownDir->second;
+
+ bool Result = ModMap.parseModuleMapFile(File);
+ if (!Result && llvm::sys::path::filename(File->getName()) == "module.map") {
+ // If the file we loaded was a module.map, look for the corresponding
+ // module_private.map.
+ SmallString<128> PrivateFilename(Dir->getName());
+ llvm::sys::path::append(PrivateFilename, "module_private.map");
+ if (const FileEntry *PrivateFile = FileMgr.getFile(PrivateFilename))
+ Result = ModMap.parseModuleMapFile(PrivateFile);
+ }
+
+ DirectoryHasModuleMap[Dir] = !Result;
+ return Result;
+}
+
+Module *HeaderSearch::loadFrameworkModule(StringRef Name,
+ const DirectoryEntry *Dir,
+ bool IsSystem) {
+ if (Module *Module = ModMap.findModule(Name))
+ return Module;
+
+ // Try to load a module map file.
+ switch (loadModuleMapFile(Dir)) {
+ case LMM_InvalidModuleMap:
+ break;
+
+ case LMM_AlreadyLoaded:
+ case LMM_NoDirectory:
+ return 0;
+
+ case LMM_NewlyLoaded:
+ return ModMap.findModule(Name);
+ }
+
+ // The top-level framework directory, from which we'll infer a framework
+ // module.
+ const DirectoryEntry *TopFrameworkDir = Dir;
+
+ // The path from the module we're actually looking for back to the top-level
+ // framework name.
+ llvm::SmallVector<StringRef, 2> SubmodulePath;
+ SubmodulePath.push_back(Name);
+
+ // Walk the directory structure to find any enclosing frameworks.
+ StringRef DirName = Dir->getName();
+ do {
+ // Get the parent directory name.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Determine whether this directory exists.
+ Dir = FileMgr.getDirectory(DirName);
+ if (!Dir)
+ break;
+
+ // If this is a framework directory, then we're a subframework of this
+ // framework.
+ if (llvm::sys::path::extension(DirName) == ".framework") {
+ SubmodulePath.push_back(llvm::sys::path::stem(DirName));
+ TopFrameworkDir = Dir;
+ }
+ } while (true);
+
+ // Try to infer a module map from the top-level framework directory.
+ Module *Result = ModMap.inferFrameworkModule(SubmodulePath.back(),
+ TopFrameworkDir,
+ IsSystem,
+ /*Parent=*/0);
+
+ // Follow the submodule path to find the requested (sub)framework module
+ // within the top-level framework module.
+ SubmodulePath.pop_back();
+ while (!SubmodulePath.empty() && Result) {
+ Result = ModMap.lookupModuleQualified(SubmodulePath.back(), Result);
+ SubmodulePath.pop_back();
+ }
+ return Result;
+}
+
+
+HeaderSearch::LoadModuleMapResult
+HeaderSearch::loadModuleMapFile(StringRef DirName) {
+ if (const DirectoryEntry *Dir = FileMgr.getDirectory(DirName))
+ return loadModuleMapFile(Dir);
+
+ return LMM_NoDirectory;
+}
+
+HeaderSearch::LoadModuleMapResult
+HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir) {
+ llvm::DenseMap<const DirectoryEntry *, bool>::iterator KnownDir
+ = DirectoryHasModuleMap.find(Dir);
+ if (KnownDir != DirectoryHasModuleMap.end())
+ return KnownDir->second? LMM_AlreadyLoaded : LMM_InvalidModuleMap;
+
+ SmallString<128> ModuleMapFileName;
+ ModuleMapFileName += Dir->getName();
+ unsigned ModuleMapDirNameLen = ModuleMapFileName.size();
+ llvm::sys::path::append(ModuleMapFileName, "module.map");
+ if (const FileEntry *ModuleMapFile = FileMgr.getFile(ModuleMapFileName)) {
+ // We have found a module map file. Try to parse it.
+ if (ModMap.parseModuleMapFile(ModuleMapFile)) {
+ // No suitable module map.
+ DirectoryHasModuleMap[Dir] = false;
+ return LMM_InvalidModuleMap;
+ }
+
+ // This directory has a module map.
+ DirectoryHasModuleMap[Dir] = true;
+
+ // Check whether there is a private module map that we need to load as well.
+ ModuleMapFileName.erase(ModuleMapFileName.begin() + ModuleMapDirNameLen,
+ ModuleMapFileName.end());
+ llvm::sys::path::append(ModuleMapFileName, "module_private.map");
+ if (const FileEntry *PrivateModuleMapFile
+ = FileMgr.getFile(ModuleMapFileName)) {
+ if (ModMap.parseModuleMapFile(PrivateModuleMapFile)) {
+ // No suitable module map.
+ DirectoryHasModuleMap[Dir] = false;
+ return LMM_InvalidModuleMap;
+ }
+ }
+
+ return LMM_NewlyLoaded;
+ }
+
+ // No suitable module map.
+ DirectoryHasModuleMap[Dir] = false;
+ return LMM_InvalidModuleMap;
+}
+
+void HeaderSearch::collectAllModules(llvm::SmallVectorImpl<Module *> &Modules) {
+ Modules.clear();
+
+ // Load module maps for each of the header search directories.
+ for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ if (SearchDirs[Idx].isFramework()) {
+ llvm::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(SearchDirs[Idx].getFrameworkDir()->getName(),
+ DirNative);
+
+ // Search each of the ".framework" directories to load them as modules.
+ bool IsSystem = SearchDirs[Idx].getDirCharacteristic() != SrcMgr::C_User;
+ for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (llvm::sys::path::extension(Dir->path()) != ".framework")
+ continue;
+
+ const DirectoryEntry *FrameworkDir = FileMgr.getDirectory(Dir->path());
+ if (!FrameworkDir)
+ continue;
+
+ // Load this framework module.
+ loadFrameworkModule(llvm::sys::path::stem(Dir->path()), FrameworkDir,
+ IsSystem);
+ }
+ continue;
+ }
+
+ // FIXME: Deal with header maps.
+ if (SearchDirs[Idx].isHeaderMap())
+ continue;
+
+ // Try to load a module map file for the search directory.
+ loadModuleMapFile(SearchDirs[Idx].getDir());
+
+ // Try to load module map files for immediate subdirectories of this search
+ // directory.
+ llvm::error_code EC;
+ SmallString<128> DirNative;
+ llvm::sys::path::native(SearchDirs[Idx].getDir()->getName(), DirNative);
+ for (llvm::sys::fs::directory_iterator Dir(DirNative.str(), EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ loadModuleMapFile(Dir->path());
+ }
+ }
+
+ // Populate the list of modules.
+ for (ModuleMap::module_iterator M = ModMap.module_begin(),
+ MEnd = ModMap.module_end();
+ M != MEnd; ++M) {
+ Modules.push_back(M->getValue());
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
new file mode 100644
index 0000000..535a852
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/Lexer.cpp
@@ -0,0 +1,3234 @@
+//===--- Lexer.cpp - C Language Family Lexer ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Lexer and Token interfaces.
+//
+//===----------------------------------------------------------------------===//
+//
+// TODO: GCC Diagnostics emitted by the lexer:
+// PEDWARN: (form feed|vertical tab) in preprocessing directive
+//
+// Universal characters, unicode, char mapping:
+// WARNING: `%.*s' is not in NFKC
+// WARNING: `%.*s' is not in NFC
+//
+// Other:
+// TODO: Options to support:
+// -fexec-charset,-fwide-exec-charset
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+using namespace clang;
+
+static void InitCharacterInfo();
+
+//===----------------------------------------------------------------------===//
+// Token Class Implementation
+//===----------------------------------------------------------------------===//
+
+/// isObjCAtKeyword - Return true if we have an ObjC keyword identifier.
+bool Token::isObjCAtKeyword(tok::ObjCKeywordKind objcKey) const {
+ if (IdentifierInfo *II = getIdentifierInfo())
+ return II->getObjCKeywordID() == objcKey;
+ return false;
+}
+
+/// getObjCKeywordID - Return the ObjC keyword kind.
+tok::ObjCKeywordKind Token::getObjCKeywordID() const {
+ IdentifierInfo *specId = getIdentifierInfo();
+ return specId ? specId->getObjCKeywordID() : tok::objc_not_keyword;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Lexer Class Implementation
+//===----------------------------------------------------------------------===//
+
+void Lexer::anchor() { }
+
+void Lexer::InitLexer(const char *BufStart, const char *BufPtr,
+ const char *BufEnd) {
+ InitCharacterInfo();
+
+ BufferStart = BufStart;
+ BufferPtr = BufPtr;
+ BufferEnd = BufEnd;
+
+ assert(BufEnd[0] == 0 &&
+ "We assume that the input buffer has a null character at the end"
+ " to simplify lexing!");
+
+ // Check whether we have a BOM in the beginning of the buffer. If yes - act
+ // accordingly. Right now we support only UTF-8 with and without BOM, so, just
+ // skip the UTF-8 BOM if it's present.
+ if (BufferStart == BufferPtr) {
+ // Determine the size of the BOM.
+ StringRef Buf(BufferStart, BufferEnd - BufferStart);
+ size_t BOMLength = llvm::StringSwitch<size_t>(Buf)
+ .StartsWith("\xEF\xBB\xBF", 3) // UTF-8 BOM
+ .Default(0);
+
+ // Skip the BOM.
+ BufferPtr += BOMLength;
+ }
+
+ Is_PragmaLexer = false;
+ CurrentConflictMarkerState = CMK_None;
+
+ // Start of the file is a start of line.
+ IsAtStartOfLine = true;
+
+ // We are not after parsing a #.
+ ParsingPreprocessorDirective = false;
+
+ // We are not after parsing #include.
+ ParsingFilename = false;
+
+ // We are not in raw mode. Raw mode disables diagnostics and interpretation
+ // of tokens (e.g. identifiers, thus disabling macro expansion). It is used
+ // to quickly lex the tokens of the buffer, e.g. when handling a "#if 0" block
+ // or otherwise skipping over tokens.
+ LexingRawMode = false;
+
+ // Default to not keeping comments.
+ ExtendedTokenMode = 0;
+}
+
+/// Lexer constructor - Create a new lexer object for the specified buffer
+/// with the specified preprocessor managing the lexing process. This lexer
+/// assumes that the associated file buffer and Preprocessor objects will
+/// outlive it, so it doesn't take ownership of either of them.
+Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *InputFile, Preprocessor &PP)
+ : PreprocessorLexer(&PP, FID),
+ FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
+ LangOpts(PP.getLangOpts()) {
+
+ InitLexer(InputFile->getBufferStart(), InputFile->getBufferStart(),
+ InputFile->getBufferEnd());
+
+ // Default to keeping comments if the preprocessor wants them.
+ SetCommentRetentionState(PP.getCommentRetentionState());
+}
+
+/// Lexer constructor - Create a new raw lexer object. This object is only
+/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
+/// range will outlive it, so it doesn't take ownership of it.
+Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
+ const char *BufStart, const char *BufPtr, const char *BufEnd)
+ : FileLoc(fileloc), LangOpts(langOpts) {
+
+ InitLexer(BufStart, BufPtr, BufEnd);
+
+ // We *are* in raw mode.
+ LexingRawMode = true;
+}
+
+/// Lexer constructor - Create a new raw lexer object. This object is only
+/// suitable for calls to 'LexRawToken'. This lexer assumes that the text
+/// range will outlive it, so it doesn't take ownership of it.
+Lexer::Lexer(FileID FID, const llvm::MemoryBuffer *FromFile,
+ const SourceManager &SM, const LangOptions &langOpts)
+ : FileLoc(SM.getLocForStartOfFile(FID)), LangOpts(langOpts) {
+
+ InitLexer(FromFile->getBufferStart(), FromFile->getBufferStart(),
+ FromFile->getBufferEnd());
+
+ // We *are* in raw mode.
+ LexingRawMode = true;
+}
+
+/// Create_PragmaLexer: Lexer constructor - Create a new lexer object for
+/// _Pragma expansion. This has a variety of magic semantics that this method
+/// sets up. It returns a new'd Lexer that must be delete'd when done.
+///
+/// On entrance to this routine, TokStartLoc is a macro location which has a
+/// spelling loc that indicates the bytes to be lexed for the token and an
+/// expansion location that indicates where all lexed tokens should be
+/// "expanded from".
+///
+/// FIXME: It would really be nice to make _Pragma just be a wrapper around a
+/// normal lexer that remaps tokens as they fly by. This would require making
+/// Preprocessor::Lex virtual. Given that, we could just dump in a magic lexer
+/// interface that could handle this stuff. This would pull GetMappedTokenLoc
+/// out of the critical path of the lexer!
+///
+Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd,
+ unsigned TokLen, Preprocessor &PP) {
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create the lexer as if we were going to lex the file normally.
+ FileID SpellingFID = SM.getFileID(SpellingLoc);
+ const llvm::MemoryBuffer *InputFile = SM.getBuffer(SpellingFID);
+ Lexer *L = new Lexer(SpellingFID, InputFile, PP);
+
+ // Now that the lexer is created, change the start/end locations so that we
+ // just lex the subsection of the file that we want. This is lexing from a
+ // scratch buffer.
+ const char *StrData = SM.getCharacterData(SpellingLoc);
+
+ L->BufferPtr = StrData;
+ L->BufferEnd = StrData+TokLen;
+ assert(L->BufferEnd[0] == 0 && "Buffer is not nul terminated!");
+
+ // Set the SourceLocation with the remapping information. This ensures that
+ // GetMappedTokenLoc will remap the tokens as they are lexed.
+ L->FileLoc = SM.createExpansionLoc(SM.getLocForStartOfFile(SpellingFID),
+ ExpansionLocStart,
+ ExpansionLocEnd, TokLen);
+
+ // Ensure that the lexer thinks it is inside a directive, so that end \n will
+ // return an EOD token.
+ L->ParsingPreprocessorDirective = true;
+
+ // This lexer really is for _Pragma.
+ L->Is_PragmaLexer = true;
+ return L;
+}
+
+
+/// Stringify - Convert the specified string into a C string, with surrounding
+/// ""'s, and with escaped \ and " characters.
+std::string Lexer::Stringify(const std::string &Str, bool Charify) {
+ std::string Result = Str;
+ char Quote = Charify ? '\'' : '"';
+ for (unsigned i = 0, e = Result.size(); i != e; ++i) {
+ if (Result[i] == '\\' || Result[i] == Quote) {
+ Result.insert(Result.begin()+i, '\\');
+ ++i; ++e;
+ }
+ }
+ return Result;
+}
+
+/// Stringify - Convert the specified string into a C string by escaping '\'
+/// and " characters. This does not add surrounding ""'s to the string.
+void Lexer::Stringify(SmallVectorImpl<char> &Str) {
+ for (unsigned i = 0, e = Str.size(); i != e; ++i) {
+ if (Str[i] == '\\' || Str[i] == '"') {
+ Str.insert(Str.begin()+i, '\\');
+ ++i; ++e;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Token Spelling
+//===----------------------------------------------------------------------===//
+
+/// getSpelling() - Return the 'spelling' of this token. The spelling of a
+/// token are the characters used to represent the token in the source file
+/// after trigraph expansion and escaped-newline folding. In particular, this
+/// wants to get the true, uncanonicalized, spelling of things like digraphs
+/// UCNs, etc.
+StringRef Lexer::getSpelling(SourceLocation loc,
+ SmallVectorImpl<char> &buffer,
+ const SourceManager &SM,
+ const LangOptions &options,
+ bool *invalid) {
+ // Break down the source location.
+ std::pair<FileID, unsigned> locInfo = SM.getDecomposedLoc(loc);
+
+ // Try to the load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(locInfo.first, &invalidTemp);
+ if (invalidTemp) {
+ if (invalid) *invalid = true;
+ return StringRef();
+ }
+
+ const char *tokenBegin = file.data() + locInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(locInfo.first), options,
+ file.begin(), tokenBegin, file.end());
+ Token token;
+ lexer.LexFromRawLexer(token);
+
+ unsigned length = token.getLength();
+
+ // Common case: no need for cleaning.
+ if (!token.needsCleaning())
+ return StringRef(tokenBegin, length);
+
+ // Hard case, we need to relex the characters into the string.
+ buffer.clear();
+ buffer.reserve(length);
+
+ for (const char *ti = tokenBegin, *te = ti + length; ti != te; ) {
+ unsigned charSize;
+ buffer.push_back(Lexer::getCharAndSizeNoWarn(ti, charSize, options));
+ ti += charSize;
+ }
+
+ return StringRef(buffer.data(), buffer.size());
+}
+
+/// getSpelling() - Return the 'spelling' of this token. The spelling of a
+/// token are the characters used to represent the token in the source file
+/// after trigraph expansion and escaped-newline folding. In particular, this
+/// wants to get the true, uncanonicalized, spelling of things like digraphs
+/// UCNs, etc.
+std::string Lexer::getSpelling(const Token &Tok, const SourceManager &SourceMgr,
+ const LangOptions &LangOpts, bool *Invalid) {
+ assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
+
+ // If this token contains nothing interesting, return it directly.
+ bool CharDataInvalid = false;
+ const char* TokStart = SourceMgr.getCharacterData(Tok.getLocation(),
+ &CharDataInvalid);
+ if (Invalid)
+ *Invalid = CharDataInvalid;
+ if (CharDataInvalid)
+ return std::string();
+
+ if (!Tok.needsCleaning())
+ return std::string(TokStart, TokStart+Tok.getLength());
+
+ std::string Result;
+ Result.reserve(Tok.getLength());
+
+ // Otherwise, hard case, relex the characters into the string.
+ for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
+ Ptr != End; ) {
+ unsigned CharSize;
+ Result.push_back(Lexer::getCharAndSizeNoWarn(Ptr, CharSize, LangOpts));
+ Ptr += CharSize;
+ }
+ assert(Result.size() != unsigned(Tok.getLength()) &&
+ "NeedsCleaning flag set on something that didn't need cleaning!");
+ return Result;
+}
+
+/// getSpelling - This method is used to get the spelling of a token into a
+/// preallocated buffer, instead of as an std::string. The caller is required
+/// to allocate enough space for the token, which is guaranteed to be at least
+/// Tok.getLength() bytes long. The actual length of the token is returned.
+///
+/// Note that this method may do two possible things: it may either fill in
+/// the buffer specified with characters, or it may *change the input pointer*
+/// to point to a constant buffer with the data already in it (avoiding a
+/// copy). The caller is not allowed to modify the returned buffer pointer
+/// if an internal buffer is returned.
+unsigned Lexer::getSpelling(const Token &Tok, const char *&Buffer,
+ const SourceManager &SourceMgr,
+ const LangOptions &LangOpts, bool *Invalid) {
+ assert((int)Tok.getLength() >= 0 && "Token character range is bogus!");
+
+ const char *TokStart = 0;
+ // NOTE: this has to be checked *before* testing for an IdentifierInfo.
+ if (Tok.is(tok::raw_identifier))
+ TokStart = Tok.getRawIdentifierData();
+ else if (const IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ // Just return the string from the identifier table, which is very quick.
+ Buffer = II->getNameStart();
+ return II->getLength();
+ }
+
+ // NOTE: this can be checked even after testing for an IdentifierInfo.
+ if (Tok.isLiteral())
+ TokStart = Tok.getLiteralData();
+
+ if (TokStart == 0) {
+ // Compute the start of the token in the input lexer buffer.
+ bool CharDataInvalid = false;
+ TokStart = SourceMgr.getCharacterData(Tok.getLocation(), &CharDataInvalid);
+ if (Invalid)
+ *Invalid = CharDataInvalid;
+ if (CharDataInvalid) {
+ Buffer = "";
+ return 0;
+ }
+ }
+
+ // If this token contains nothing interesting, return it directly.
+ if (!Tok.needsCleaning()) {
+ Buffer = TokStart;
+ return Tok.getLength();
+ }
+
+ // Otherwise, hard case, relex the characters into the string.
+ char *OutBuf = const_cast<char*>(Buffer);
+ for (const char *Ptr = TokStart, *End = TokStart+Tok.getLength();
+ Ptr != End; ) {
+ unsigned CharSize;
+ *OutBuf++ = Lexer::getCharAndSizeNoWarn(Ptr, CharSize, LangOpts);
+ Ptr += CharSize;
+ }
+ assert(unsigned(OutBuf-Buffer) != Tok.getLength() &&
+ "NeedsCleaning flag set on something that didn't need cleaning!");
+
+ return OutBuf-Buffer;
+}
+
+
+
+static bool isWhitespace(unsigned char c);
+
+/// MeasureTokenLength - Relex the token at the specified location and return
+/// its length in bytes in the input file. If the token needs cleaning (e.g.
+/// includes a trigraph or an escaped newline) then this count includes bytes
+/// that are part of that.
+unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ // TODO: this could be special cased for common tokens like identifiers, ')',
+ // etc to make this faster, if it mattered. Just look at StrData[0] to handle
+ // all obviously single-char tokens. This could use
+ // Lexer::isObviouslySimpleCharacter for example to handle identifiers or
+ // something.
+
+ // If this comes from a macro expansion, we really do want the macro name, not
+ // the token this macro expanded to.
+ Loc = SM.getExpansionLoc(Loc);
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return 0;
+
+ const char *StrData = Buffer.data()+LocInfo.second;
+
+ if (isWhitespace(StrData[0]))
+ return 0;
+
+ // Create a lexer starting at the beginning of this token.
+ Lexer TheLexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts,
+ Buffer.begin(), StrData, Buffer.end());
+ TheLexer.SetCommentRetentionState(true);
+ Token TheTok;
+ TheLexer.LexFromRawLexer(TheTok);
+ return TheTok.getLength();
+}
+
+static SourceLocation getBeginningOfFileToken(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(Loc.isFileID());
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+ if (LocInfo.first.isInvalid())
+ return Loc;
+
+ bool Invalid = false;
+ StringRef Buffer = SM.getBufferData(LocInfo.first, &Invalid);
+ if (Invalid)
+ return Loc;
+
+ // Back up from the current location until we hit the beginning of a line
+ // (or the buffer). We'll relex from that point.
+ const char *BufStart = Buffer.data();
+ if (LocInfo.second >= Buffer.size())
+ return Loc;
+
+ const char *StrData = BufStart+LocInfo.second;
+ if (StrData[0] == '\n' || StrData[0] == '\r')
+ return Loc;
+
+ const char *LexStart = StrData;
+ while (LexStart != BufStart) {
+ if (LexStart[0] == '\n' || LexStart[0] == '\r') {
+ ++LexStart;
+ break;
+ }
+
+ --LexStart;
+ }
+
+ // Create a lexer starting at the beginning of this token.
+ SourceLocation LexerStartLoc = Loc.getLocWithOffset(-LocInfo.second);
+ Lexer TheLexer(LexerStartLoc, LangOpts, BufStart, LexStart, Buffer.end());
+ TheLexer.SetCommentRetentionState(true);
+
+ // Lex tokens until we find the token that contains the source location.
+ Token TheTok;
+ do {
+ TheLexer.LexFromRawLexer(TheTok);
+
+ if (TheLexer.getBufferLocation() > StrData) {
+ // Lexing this token has taken the lexer past the source location we're
+ // looking for. If the current token encompasses our source location,
+ // return the beginning of that token.
+ if (TheLexer.getBufferLocation() - TheTok.getLength() <= StrData)
+ return TheTok.getLocation();
+
+ // We ended up skipping over the source location entirely, which means
+ // that it points into whitespace. We're done here.
+ break;
+ }
+ } while (TheTok.getKind() != tok::eof);
+
+ // We've passed our source location; just return the original source location.
+ return Loc;
+}
+
+SourceLocation Lexer::GetBeginningOfToken(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (Loc.isFileID())
+ return getBeginningOfFileToken(Loc, SM, LangOpts);
+
+ if (!SM.isMacroArgExpansion(Loc))
+ return Loc;
+
+ SourceLocation FileLoc = SM.getSpellingLoc(Loc);
+ SourceLocation BeginFileLoc = getBeginningOfFileToken(FileLoc, SM, LangOpts);
+ std::pair<FileID, unsigned> FileLocInfo = SM.getDecomposedLoc(FileLoc);
+ std::pair<FileID, unsigned> BeginFileLocInfo
+ = SM.getDecomposedLoc(BeginFileLoc);
+ assert(FileLocInfo.first == BeginFileLocInfo.first &&
+ FileLocInfo.second >= BeginFileLocInfo.second);
+ return Loc.getLocWithOffset(BeginFileLocInfo.second - FileLocInfo.second);
+}
+
+namespace {
+ enum PreambleDirectiveKind {
+ PDK_Skipped,
+ PDK_StartIf,
+ PDK_EndIf,
+ PDK_Unknown
+ };
+}
+
+std::pair<unsigned, bool>
+Lexer::ComputePreamble(const llvm::MemoryBuffer *Buffer,
+ const LangOptions &LangOpts, unsigned MaxLines) {
+ // Create a lexer starting at the beginning of the file. Note that we use a
+ // "fake" file source location at offset 1 so that the lexer will track our
+ // position within the file.
+ const unsigned StartOffset = 1;
+ SourceLocation StartLoc = SourceLocation::getFromRawEncoding(StartOffset);
+ Lexer TheLexer(StartLoc, LangOpts, Buffer->getBufferStart(),
+ Buffer->getBufferStart(), Buffer->getBufferEnd());
+
+ bool InPreprocessorDirective = false;
+ Token TheTok;
+ Token IfStartTok;
+ unsigned IfCount = 0;
+
+ unsigned MaxLineOffset = 0;
+ if (MaxLines) {
+ const char *CurPtr = Buffer->getBufferStart();
+ unsigned CurLine = 0;
+ while (CurPtr != Buffer->getBufferEnd()) {
+ char ch = *CurPtr++;
+ if (ch == '\n') {
+ ++CurLine;
+ if (CurLine == MaxLines)
+ break;
+ }
+ }
+ if (CurPtr != Buffer->getBufferEnd())
+ MaxLineOffset = CurPtr - Buffer->getBufferStart();
+ }
+
+ do {
+ TheLexer.LexFromRawLexer(TheTok);
+
+ if (InPreprocessorDirective) {
+ // If we've hit the end of the file, we're done.
+ if (TheTok.getKind() == tok::eof) {
+ InPreprocessorDirective = false;
+ break;
+ }
+
+ // If we haven't hit the end of the preprocessor directive, skip this
+ // token.
+ if (!TheTok.isAtStartOfLine())
+ continue;
+
+ // We've passed the end of the preprocessor directive, and will look
+ // at this token again below.
+ InPreprocessorDirective = false;
+ }
+
+ // Keep track of the # of lines in the preamble.
+ if (TheTok.isAtStartOfLine()) {
+ unsigned TokOffset = TheTok.getLocation().getRawEncoding() - StartOffset;
+
+ // If we were asked to limit the number of lines in the preamble,
+ // and we're about to exceed that limit, we're done.
+ if (MaxLineOffset && TokOffset >= MaxLineOffset)
+ break;
+ }
+
+ // Comments are okay; skip over them.
+ if (TheTok.getKind() == tok::comment)
+ continue;
+
+ if (TheTok.isAtStartOfLine() && TheTok.getKind() == tok::hash) {
+ // This is the start of a preprocessor directive.
+ Token HashTok = TheTok;
+ InPreprocessorDirective = true;
+
+ // Figure out which directive this is. Since we're lexing raw tokens,
+ // we don't have an identifier table available. Instead, just look at
+ // the raw identifier to recognize and categorize preprocessor directives.
+ TheLexer.LexFromRawLexer(TheTok);
+ if (TheTok.getKind() == tok::raw_identifier && !TheTok.needsCleaning()) {
+ StringRef Keyword(TheTok.getRawIdentifierData(),
+ TheTok.getLength());
+ PreambleDirectiveKind PDK
+ = llvm::StringSwitch<PreambleDirectiveKind>(Keyword)
+ .Case("include", PDK_Skipped)
+ .Case("__include_macros", PDK_Skipped)
+ .Case("define", PDK_Skipped)
+ .Case("undef", PDK_Skipped)
+ .Case("line", PDK_Skipped)
+ .Case("error", PDK_Skipped)
+ .Case("pragma", PDK_Skipped)
+ .Case("import", PDK_Skipped)
+ .Case("include_next", PDK_Skipped)
+ .Case("warning", PDK_Skipped)
+ .Case("ident", PDK_Skipped)
+ .Case("sccs", PDK_Skipped)
+ .Case("assert", PDK_Skipped)
+ .Case("unassert", PDK_Skipped)
+ .Case("if", PDK_StartIf)
+ .Case("ifdef", PDK_StartIf)
+ .Case("ifndef", PDK_StartIf)
+ .Case("elif", PDK_Skipped)
+ .Case("else", PDK_Skipped)
+ .Case("endif", PDK_EndIf)
+ .Default(PDK_Unknown);
+
+ switch (PDK) {
+ case PDK_Skipped:
+ continue;
+
+ case PDK_StartIf:
+ if (IfCount == 0)
+ IfStartTok = HashTok;
+
+ ++IfCount;
+ continue;
+
+ case PDK_EndIf:
+ // Mismatched #endif. The preamble ends here.
+ if (IfCount == 0)
+ break;
+
+ --IfCount;
+ continue;
+
+ case PDK_Unknown:
+ // We don't know what this directive is; stop at the '#'.
+ break;
+ }
+ }
+
+ // We only end up here if we didn't recognize the preprocessor
+ // directive or it was one that can't occur in the preamble at this
+ // point. Roll back the current token to the location of the '#'.
+ InPreprocessorDirective = false;
+ TheTok = HashTok;
+ }
+
+ // We hit a token that we don't recognize as being in the
+ // "preprocessing only" part of the file, so we're no longer in
+ // the preamble.
+ break;
+ } while (true);
+
+ SourceLocation End = IfCount? IfStartTok.getLocation() : TheTok.getLocation();
+ return std::make_pair(End.getRawEncoding() - StartLoc.getRawEncoding(),
+ IfCount? IfStartTok.isAtStartOfLine()
+ : TheTok.isAtStartOfLine());
+}
+
+
+/// AdvanceToTokenCharacter - Given a location that specifies the start of a
+/// token, return a new location that specifies a character within the token.
+SourceLocation Lexer::AdvanceToTokenCharacter(SourceLocation TokStart,
+ unsigned CharNo,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ // Figure out how many physical characters away the specified expansion
+ // character is. This needs to take into consideration newlines and
+ // trigraphs.
+ bool Invalid = false;
+ const char *TokPtr = SM.getCharacterData(TokStart, &Invalid);
+
+ // If they request the first char of the token, we're trivially done.
+ if (Invalid || (CharNo == 0 && Lexer::isObviouslySimpleCharacter(*TokPtr)))
+ return TokStart;
+
+ unsigned PhysOffset = 0;
+
+ // The usual case is that tokens don't contain anything interesting. Skip
+ // over the uninteresting characters. If a token only consists of simple
+ // chars, this method is extremely fast.
+ while (Lexer::isObviouslySimpleCharacter(*TokPtr)) {
+ if (CharNo == 0)
+ return TokStart.getLocWithOffset(PhysOffset);
+ ++TokPtr, --CharNo, ++PhysOffset;
+ }
+
+ // If we have a character that may be a trigraph or escaped newline, use a
+ // lexer to parse it correctly.
+ for (; CharNo; --CharNo) {
+ unsigned Size;
+ Lexer::getCharAndSizeNoWarn(TokPtr, Size, LangOpts);
+ TokPtr += Size;
+ PhysOffset += Size;
+ }
+
+ // Final detail: if we end up on an escaped newline, we want to return the
+ // location of the actual byte of the token. For example foo\<newline>bar
+ // advanced by 3 should return the location of b, not of \\. One compounding
+ // detail of this is that the escape may be made by a trigraph.
+ if (!Lexer::isObviouslySimpleCharacter(*TokPtr))
+ PhysOffset += Lexer::SkipEscapedNewLines(TokPtr)-TokPtr;
+
+ return TokStart.getLocWithOffset(PhysOffset);
+}
+
+/// \brief Computes the source location just past the end of the
+/// token at this source location.
+///
+/// This routine can be used to produce a source location that
+/// points just past the end of the token referenced by \p Loc, and
+/// is generally used when a diagnostic needs to point just after a
+/// token where it expected something different that it received. If
+/// the returned source location would not be meaningful (e.g., if
+/// it points into a macro), this routine returns an invalid
+/// source location.
+///
+/// \param Offset an offset from the end of the token, where the source
+/// location should refer to. The default offset (0) produces a source
+/// location pointing just past the end of the token; an offset of 1 produces
+/// a source location pointing to the last character in the token, etc.
+SourceLocation Lexer::getLocForEndOfToken(SourceLocation Loc, unsigned Offset,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ if (Loc.isInvalid())
+ return SourceLocation();
+
+ if (Loc.isMacroID()) {
+ if (Offset > 0 || !isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
+ return SourceLocation(); // Points inside the macro expansion.
+ }
+
+ unsigned Len = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+ if (Len > Offset)
+ Len = Len - Offset;
+ else
+ return Loc;
+
+ return Loc.getLocWithOffset(Len);
+}
+
+/// \brief Returns true if the given MacroID location points at the first
+/// token of the macro expansion.
+bool Lexer::isAtStartOfMacroExpansion(SourceLocation loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroBegin) {
+ assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
+
+ std::pair<FileID, unsigned> infoLoc = SM.getDecomposedLoc(loc);
+ // FIXME: If the token comes from the macro token paste operator ('##')
+ // this function will always return false;
+ if (infoLoc.second > 0)
+ return false; // Does not point at the start of token.
+
+ SourceLocation expansionLoc =
+ SM.getSLocEntry(infoLoc.first).getExpansion().getExpansionLocStart();
+ if (expansionLoc.isFileID()) {
+ // No other macro expansions, this is the first.
+ if (MacroBegin)
+ *MacroBegin = expansionLoc;
+ return true;
+ }
+
+ return isAtStartOfMacroExpansion(expansionLoc, SM, LangOpts, MacroBegin);
+}
+
+/// \brief Returns true if the given MacroID location points at the last
+/// token of the macro expansion.
+bool Lexer::isAtEndOfMacroExpansion(SourceLocation loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation *MacroEnd) {
+ assert(loc.isValid() && loc.isMacroID() && "Expected a valid macro loc");
+
+ SourceLocation spellLoc = SM.getSpellingLoc(loc);
+ unsigned tokLen = MeasureTokenLength(spellLoc, SM, LangOpts);
+ if (tokLen == 0)
+ return false;
+
+ FileID FID = SM.getFileID(loc);
+ SourceLocation afterLoc = loc.getLocWithOffset(tokLen+1);
+ if (SM.isInFileID(afterLoc, FID))
+ return false; // Still in the same FileID, does not point to the last token.
+
+ // FIXME: If the token comes from the macro token paste operator ('##')
+ // or the stringify operator ('#') this function will always return false;
+
+ SourceLocation expansionLoc =
+ SM.getSLocEntry(FID).getExpansion().getExpansionLocEnd();
+ if (expansionLoc.isFileID()) {
+ // No other macro expansions.
+ if (MacroEnd)
+ *MacroEnd = expansionLoc;
+ return true;
+ }
+
+ return isAtEndOfMacroExpansion(expansionLoc, SM, LangOpts, MacroEnd);
+}
+
+static CharSourceRange makeRangeFromFileLocs(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Begin = Range.getBegin();
+ SourceLocation End = Range.getEnd();
+ assert(Begin.isFileID() && End.isFileID());
+ if (Range.isTokenRange()) {
+ End = Lexer::getLocForEndOfToken(End, 0, SM,LangOpts);
+ if (End.isInvalid())
+ return CharSourceRange();
+ }
+
+ // Break down the source locations.
+ FileID FID;
+ unsigned BeginOffs;
+ llvm::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
+ if (FID.isInvalid())
+ return CharSourceRange();
+
+ unsigned EndOffs;
+ if (!SM.isInFileID(End, FID, &EndOffs) ||
+ BeginOffs > EndOffs)
+ return CharSourceRange();
+
+ return CharSourceRange::getCharRange(Begin, End);
+}
+
+/// \brief Accepts a range and returns a character range with file locations.
+///
+/// Returns a null range if a part of the range resides inside a macro
+/// expansion or the range does not reside on the same FileID.
+CharSourceRange Lexer::makeFileCharRange(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ SourceLocation Begin = Range.getBegin();
+ SourceLocation End = Range.getEnd();
+ if (Begin.isInvalid() || End.isInvalid())
+ return CharSourceRange();
+
+ if (Begin.isFileID() && End.isFileID())
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+
+ if (Begin.isMacroID() && End.isFileID()) {
+ if (!isAtStartOfMacroExpansion(Begin, SM, LangOpts, &Begin))
+ return CharSourceRange();
+ Range.setBegin(Begin);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ if (Begin.isFileID() && End.isMacroID()) {
+ if ((Range.isTokenRange() && !isAtEndOfMacroExpansion(End, SM, LangOpts,
+ &End)) ||
+ (Range.isCharRange() && !isAtStartOfMacroExpansion(End, SM, LangOpts,
+ &End)))
+ return CharSourceRange();
+ Range.setEnd(End);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ assert(Begin.isMacroID() && End.isMacroID());
+ SourceLocation MacroBegin, MacroEnd;
+ if (isAtStartOfMacroExpansion(Begin, SM, LangOpts, &MacroBegin) &&
+ ((Range.isTokenRange() && isAtEndOfMacroExpansion(End, SM, LangOpts,
+ &MacroEnd)) ||
+ (Range.isCharRange() && isAtStartOfMacroExpansion(End, SM, LangOpts,
+ &MacroEnd)))) {
+ Range.setBegin(MacroBegin);
+ Range.setEnd(MacroEnd);
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ FileID FID;
+ unsigned BeginOffs;
+ llvm::tie(FID, BeginOffs) = SM.getDecomposedLoc(Begin);
+ if (FID.isInvalid())
+ return CharSourceRange();
+
+ unsigned EndOffs;
+ if (!SM.isInFileID(End, FID, &EndOffs) ||
+ BeginOffs > EndOffs)
+ return CharSourceRange();
+
+ const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
+ const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
+ if (Expansion.isMacroArgExpansion() &&
+ Expansion.getSpellingLoc().isFileID()) {
+ SourceLocation SpellLoc = Expansion.getSpellingLoc();
+ Range.setBegin(SpellLoc.getLocWithOffset(BeginOffs));
+ Range.setEnd(SpellLoc.getLocWithOffset(EndOffs));
+ return makeRangeFromFileLocs(Range, SM, LangOpts);
+ }
+
+ return CharSourceRange();
+}
+
+StringRef Lexer::getSourceText(CharSourceRange Range,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool *Invalid) {
+ Range = makeFileCharRange(Range, SM, LangOpts);
+ if (Range.isInvalid()) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> beginInfo = SM.getDecomposedLoc(Range.getBegin());
+ if (beginInfo.first.isInvalid()) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ unsigned EndOffs;
+ if (!SM.isInFileID(Range.getEnd(), beginInfo.first, &EndOffs) ||
+ beginInfo.second > EndOffs) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ // Try to the load the file buffer.
+ bool invalidTemp = false;
+ StringRef file = SM.getBufferData(beginInfo.first, &invalidTemp);
+ if (invalidTemp) {
+ if (Invalid) *Invalid = true;
+ return StringRef();
+ }
+
+ if (Invalid) *Invalid = false;
+ return file.substr(beginInfo.second, EndOffs - beginInfo.second);
+}
+
+StringRef Lexer::getImmediateMacroName(SourceLocation Loc,
+ const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ assert(Loc.isMacroID() && "Only reasonble to call this on macros");
+
+ // Find the location of the immediate macro expansion.
+ while (1) {
+ FileID FID = SM.getFileID(Loc);
+ const SrcMgr::SLocEntry *E = &SM.getSLocEntry(FID);
+ const SrcMgr::ExpansionInfo &Expansion = E->getExpansion();
+ Loc = Expansion.getExpansionLocStart();
+ if (!Expansion.isMacroArgExpansion())
+ break;
+
+ // For macro arguments we need to check that the argument did not come
+ // from an inner macro, e.g: "MAC1( MAC2(foo) )"
+
+ // Loc points to the argument id of the macro definition, move to the
+ // macro expansion.
+ Loc = SM.getImmediateExpansionRange(Loc).first;
+ SourceLocation SpellLoc = Expansion.getSpellingLoc();
+ if (SpellLoc.isFileID())
+ break; // No inner macro.
+
+ // If spelling location resides in the same FileID as macro expansion
+ // location, it means there is no inner macro.
+ FileID MacroFID = SM.getFileID(Loc);
+ if (SM.isInFileID(SpellLoc, MacroFID))
+ break;
+
+ // Argument came from inner macro.
+ Loc = SpellLoc;
+ }
+
+ // Find the spelling location of the start of the non-argument expansion
+ // range. This is where the macro name was spelled in order to begin
+ // expanding this macro.
+ Loc = SM.getSpellingLoc(Loc);
+
+ // Dig out the buffer where the macro name was spelled and the extents of the
+ // name so that we can render it into the expansion note.
+ std::pair<FileID, unsigned> ExpansionInfo = SM.getDecomposedLoc(Loc);
+ unsigned MacroTokenLength = Lexer::MeasureTokenLength(Loc, SM, LangOpts);
+ StringRef ExpansionBuffer = SM.getBufferData(ExpansionInfo.first);
+ return ExpansionBuffer.substr(ExpansionInfo.second, MacroTokenLength);
+}
+
+//===----------------------------------------------------------------------===//
+// Character information.
+//===----------------------------------------------------------------------===//
+
+enum {
+ CHAR_HORZ_WS = 0x01, // ' ', '\t', '\f', '\v'. Note, no '\0'
+ CHAR_VERT_WS = 0x02, // '\r', '\n'
+ CHAR_LETTER = 0x04, // a-z,A-Z
+ CHAR_NUMBER = 0x08, // 0-9
+ CHAR_UNDER = 0x10, // _
+ CHAR_PERIOD = 0x20, // .
+ CHAR_RAWDEL = 0x40 // {}[]#<>%:;?*+-/^&|~!=,"'
+};
+
+// Statically initialize CharInfo table based on ASCII character set
+// Reference: FreeBSD 7.2 /usr/share/misc/ascii
+static const unsigned char CharInfo[256] =
+{
+// 0 NUL 1 SOH 2 STX 3 ETX
+// 4 EOT 5 ENQ 6 ACK 7 BEL
+ 0 , 0 , 0 , 0 ,
+ 0 , 0 , 0 , 0 ,
+// 8 BS 9 HT 10 NL 11 VT
+//12 NP 13 CR 14 SO 15 SI
+ 0 , CHAR_HORZ_WS, CHAR_VERT_WS, CHAR_HORZ_WS,
+ CHAR_HORZ_WS, CHAR_VERT_WS, 0 , 0 ,
+//16 DLE 17 DC1 18 DC2 19 DC3
+//20 DC4 21 NAK 22 SYN 23 ETB
+ 0 , 0 , 0 , 0 ,
+ 0 , 0 , 0 , 0 ,
+//24 CAN 25 EM 26 SUB 27 ESC
+//28 FS 29 GS 30 RS 31 US
+ 0 , 0 , 0 , 0 ,
+ 0 , 0 , 0 , 0 ,
+//32 SP 33 ! 34 " 35 #
+//36 $ 37 % 38 & 39 '
+ CHAR_HORZ_WS, CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
+ 0 , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
+//40 ( 41 ) 42 * 43 +
+//44 , 45 - 46 . 47 /
+ 0 , 0 , CHAR_RAWDEL , CHAR_RAWDEL ,
+ CHAR_RAWDEL , CHAR_RAWDEL , CHAR_PERIOD , CHAR_RAWDEL ,
+//48 0 49 1 50 2 51 3
+//52 4 53 5 54 6 55 7
+ CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER ,
+ CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER , CHAR_NUMBER ,
+//56 8 57 9 58 : 59 ;
+//60 < 61 = 62 > 63 ?
+ CHAR_NUMBER , CHAR_NUMBER , CHAR_RAWDEL , CHAR_RAWDEL ,
+ CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL ,
+//64 @ 65 A 66 B 67 C
+//68 D 69 E 70 F 71 G
+ 0 , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+//72 H 73 I 74 J 75 K
+//76 L 77 M 78 N 79 O
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+//80 P 81 Q 82 R 83 S
+//84 T 85 U 86 V 87 W
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+//88 X 89 Y 90 Z 91 [
+//92 \ 93 ] 94 ^ 95 _
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_RAWDEL ,
+ 0 , CHAR_RAWDEL , CHAR_RAWDEL , CHAR_UNDER ,
+//96 ` 97 a 98 b 99 c
+//100 d 101 e 102 f 103 g
+ 0 , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+//104 h 105 i 106 j 107 k
+//108 l 109 m 110 n 111 o
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+//112 p 113 q 114 r 115 s
+//116 t 117 u 118 v 119 w
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_LETTER ,
+//120 x 121 y 122 z 123 {
+//124 | 125 } 126 ~ 127 DEL
+ CHAR_LETTER , CHAR_LETTER , CHAR_LETTER , CHAR_RAWDEL ,
+ CHAR_RAWDEL , CHAR_RAWDEL , CHAR_RAWDEL , 0
+};
+
+static void InitCharacterInfo() {
+ static bool isInited = false;
+ if (isInited) return;
+ // check the statically-initialized CharInfo table
+ assert(CHAR_HORZ_WS == CharInfo[(int)' ']);
+ assert(CHAR_HORZ_WS == CharInfo[(int)'\t']);
+ assert(CHAR_HORZ_WS == CharInfo[(int)'\f']);
+ assert(CHAR_HORZ_WS == CharInfo[(int)'\v']);
+ assert(CHAR_VERT_WS == CharInfo[(int)'\n']);
+ assert(CHAR_VERT_WS == CharInfo[(int)'\r']);
+ assert(CHAR_UNDER == CharInfo[(int)'_']);
+ assert(CHAR_PERIOD == CharInfo[(int)'.']);
+ for (unsigned i = 'a'; i <= 'z'; ++i) {
+ assert(CHAR_LETTER == CharInfo[i]);
+ assert(CHAR_LETTER == CharInfo[i+'A'-'a']);
+ }
+ for (unsigned i = '0'; i <= '9'; ++i)
+ assert(CHAR_NUMBER == CharInfo[i]);
+
+ isInited = true;
+}
+
+
+/// isIdentifierHead - Return true if this is the first character of an
+/// identifier, which is [a-zA-Z_].
+static inline bool isIdentifierHead(unsigned char c) {
+ return (CharInfo[c] & (CHAR_LETTER|CHAR_UNDER)) ? true : false;
+}
+
+/// isIdentifierBody - Return true if this is the body character of an
+/// identifier, which is [a-zA-Z0-9_].
+static inline bool isIdentifierBody(unsigned char c) {
+ return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER)) ? true : false;
+}
+
+/// isHorizontalWhitespace - Return true if this character is horizontal
+/// whitespace: ' ', '\t', '\f', '\v'. Note that this returns false for '\0'.
+static inline bool isHorizontalWhitespace(unsigned char c) {
+ return (CharInfo[c] & CHAR_HORZ_WS) ? true : false;
+}
+
+/// isVerticalWhitespace - Return true if this character is vertical
+/// whitespace: '\n', '\r'. Note that this returns false for '\0'.
+static inline bool isVerticalWhitespace(unsigned char c) {
+ return (CharInfo[c] & CHAR_VERT_WS) ? true : false;
+}
+
+/// isWhitespace - Return true if this character is horizontal or vertical
+/// whitespace: ' ', '\t', '\f', '\v', '\n', '\r'. Note that this returns false
+/// for '\0'.
+static inline bool isWhitespace(unsigned char c) {
+ return (CharInfo[c] & (CHAR_HORZ_WS|CHAR_VERT_WS)) ? true : false;
+}
+
+/// isNumberBody - Return true if this is the body character of an
+/// preprocessing number, which is [a-zA-Z0-9_.].
+static inline bool isNumberBody(unsigned char c) {
+ return (CharInfo[c] & (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD)) ?
+ true : false;
+}
+
+/// isRawStringDelimBody - Return true if this is the body character of a
+/// raw string delimiter.
+static inline bool isRawStringDelimBody(unsigned char c) {
+ return (CharInfo[c] &
+ (CHAR_LETTER|CHAR_NUMBER|CHAR_UNDER|CHAR_PERIOD|CHAR_RAWDEL)) ?
+ true : false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Diagnostics forwarding code.
+//===----------------------------------------------------------------------===//
+
+/// GetMappedTokenLoc - If lexing out of a 'mapped buffer', where we pretend the
+/// lexer buffer was all expanded at a single point, perform the mapping.
+/// This is currently only used for _Pragma implementation, so it is the slow
+/// path of the hot getSourceLocation method. Do not allow it to be inlined.
+static LLVM_ATTRIBUTE_NOINLINE SourceLocation GetMappedTokenLoc(
+ Preprocessor &PP, SourceLocation FileLoc, unsigned CharNo, unsigned TokLen);
+static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
+ SourceLocation FileLoc,
+ unsigned CharNo, unsigned TokLen) {
+ assert(FileLoc.isMacroID() && "Must be a macro expansion");
+
+ // Otherwise, we're lexing "mapped tokens". This is used for things like
+ // _Pragma handling. Combine the expansion location of FileLoc with the
+ // spelling location.
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create a new SLoc which is expanded from Expansion(FileLoc) but whose
+ // characters come from spelling(FileLoc)+Offset.
+ SourceLocation SpellingLoc = SM.getSpellingLoc(FileLoc);
+ SpellingLoc = SpellingLoc.getLocWithOffset(CharNo);
+
+ // Figure out the expansion loc range, which is the range covered by the
+ // original _Pragma(...) sequence.
+ std::pair<SourceLocation,SourceLocation> II =
+ SM.getImmediateExpansionRange(FileLoc);
+
+ return SM.createExpansionLoc(SpellingLoc, II.first, II.second, TokLen);
+}
+
+/// getSourceLocation - Return a source location identifier for the specified
+/// offset in the current file.
+SourceLocation Lexer::getSourceLocation(const char *Loc,
+ unsigned TokLen) const {
+ assert(Loc >= BufferStart && Loc <= BufferEnd &&
+ "Location out of range for this buffer!");
+
+ // In the normal case, we're just lexing from a simple file buffer, return
+ // the file id from FileLoc with the offset specified.
+ unsigned CharNo = Loc-BufferStart;
+ if (FileLoc.isFileID())
+ return FileLoc.getLocWithOffset(CharNo);
+
+ // Otherwise, this is the _Pragma lexer case, which pretends that all of the
+ // tokens are lexed from where the _Pragma was defined.
+ assert(PP && "This doesn't work on raw lexers");
+ return GetMappedTokenLoc(*PP, FileLoc, CharNo, TokLen);
+}
+
+/// Diag - Forwarding function for diagnostics. This translate a source
+/// position in the current buffer into a SourceLocation object for rendering.
+DiagnosticBuilder Lexer::Diag(const char *Loc, unsigned DiagID) const {
+ return PP->Diag(getSourceLocation(Loc), DiagID);
+}
+
+//===----------------------------------------------------------------------===//
+// Trigraph and Escaped Newline Handling Code.
+//===----------------------------------------------------------------------===//
+
+/// GetTrigraphCharForLetter - Given a character that occurs after a ?? pair,
+/// return the decoded trigraph letter it corresponds to, or '\0' if nothing.
+static char GetTrigraphCharForLetter(char Letter) {
+ switch (Letter) {
+ default: return 0;
+ case '=': return '#';
+ case ')': return ']';
+ case '(': return '[';
+ case '!': return '|';
+ case '\'': return '^';
+ case '>': return '}';
+ case '/': return '\\';
+ case '<': return '{';
+ case '-': return '~';
+ }
+}
+
+/// DecodeTrigraphChar - If the specified character is a legal trigraph when
+/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
+/// return the result character. Finally, emit a warning about trigraph use
+/// whether trigraphs are enabled or not.
+static char DecodeTrigraphChar(const char *CP, Lexer *L) {
+ char Res = GetTrigraphCharForLetter(*CP);
+ if (!Res || !L) return Res;
+
+ if (!L->getLangOpts().Trigraphs) {
+ if (!L->isLexingRawMode())
+ L->Diag(CP-2, diag::trigraph_ignored);
+ return 0;
+ }
+
+ if (!L->isLexingRawMode())
+ L->Diag(CP-2, diag::trigraph_converted) << StringRef(&Res, 1);
+ return Res;
+}
+
+/// getEscapedNewLineSize - Return the size of the specified escaped newline,
+/// or 0 if it is not an escaped newline. P[-1] is known to be a "\" or a
+/// trigraph equivalent on entry to this function.
+unsigned Lexer::getEscapedNewLineSize(const char *Ptr) {
+ unsigned Size = 0;
+ while (isWhitespace(Ptr[Size])) {
+ ++Size;
+
+ if (Ptr[Size-1] != '\n' && Ptr[Size-1] != '\r')
+ continue;
+
+ // If this is a \r\n or \n\r, skip the other half.
+ if ((Ptr[Size] == '\r' || Ptr[Size] == '\n') &&
+ Ptr[Size-1] != Ptr[Size])
+ ++Size;
+
+ return Size;
+ }
+
+ // Not an escaped newline, must be a \t or something else.
+ return 0;
+}
+
+/// SkipEscapedNewLines - If P points to an escaped newline (or a series of
+/// them), skip over them and return the first non-escaped-newline found,
+/// otherwise return P.
+const char *Lexer::SkipEscapedNewLines(const char *P) {
+ while (1) {
+ const char *AfterEscape;
+ if (*P == '\\') {
+ AfterEscape = P+1;
+ } else if (*P == '?') {
+ // If not a trigraph for escape, bail out.
+ if (P[1] != '?' || P[2] != '/')
+ return P;
+ AfterEscape = P+3;
+ } else {
+ return P;
+ }
+
+ unsigned NewLineSize = Lexer::getEscapedNewLineSize(AfterEscape);
+ if (NewLineSize == 0) return P;
+ P = AfterEscape+NewLineSize;
+ }
+}
+
+/// \brief Checks that the given token is the first token that occurs after the
+/// given location (this excludes comments and whitespace). Returns the location
+/// immediately after the specified token. If the token is not found or the
+/// location is inside a macro, the returned source location will be invalid.
+SourceLocation Lexer::findLocationAfterToken(SourceLocation Loc,
+ tok::TokenKind TKind,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ bool SkipTrailingWhitespaceAndNewLine) {
+ if (Loc.isMacroID()) {
+ if (!Lexer::isAtEndOfMacroExpansion(Loc, SM, LangOpts, &Loc))
+ return SourceLocation();
+ }
+ Loc = Lexer::getLocForEndOfToken(Loc, 0, SM, LangOpts);
+
+ // Break down the source location.
+ std::pair<FileID, unsigned> LocInfo = SM.getDecomposedLoc(Loc);
+
+ // Try to load the file buffer.
+ bool InvalidTemp = false;
+ llvm::StringRef File = SM.getBufferData(LocInfo.first, &InvalidTemp);
+ if (InvalidTemp)
+ return SourceLocation();
+
+ const char *TokenBegin = File.data() + LocInfo.second;
+
+ // Lex from the start of the given location.
+ Lexer lexer(SM.getLocForStartOfFile(LocInfo.first), LangOpts, File.begin(),
+ TokenBegin, File.end());
+ // Find the token.
+ Token Tok;
+ lexer.LexFromRawLexer(Tok);
+ if (Tok.isNot(TKind))
+ return SourceLocation();
+ SourceLocation TokenLoc = Tok.getLocation();
+
+ // Calculate how much whitespace needs to be skipped if any.
+ unsigned NumWhitespaceChars = 0;
+ if (SkipTrailingWhitespaceAndNewLine) {
+ const char *TokenEnd = SM.getCharacterData(TokenLoc) +
+ Tok.getLength();
+ unsigned char C = *TokenEnd;
+ while (isHorizontalWhitespace(C)) {
+ C = *(++TokenEnd);
+ NumWhitespaceChars++;
+ }
+ if (isVerticalWhitespace(C))
+ NumWhitespaceChars++;
+ }
+
+ return TokenLoc.getLocWithOffset(Tok.getLength() + NumWhitespaceChars);
+}
+
+/// getCharAndSizeSlow - Peek a single 'character' from the specified buffer,
+/// get its size, and return it. This is tricky in several cases:
+/// 1. If currently at the start of a trigraph, we warn about the trigraph,
+/// then either return the trigraph (skipping 3 chars) or the '?',
+/// depending on whether trigraphs are enabled or not.
+/// 2. If this is an escaped newline (potentially with whitespace between
+/// the backslash and newline), implicitly skip the newline and return
+/// the char after it.
+/// 3. If this is a UCN, return it. FIXME: C++ UCN's?
+///
+/// This handles the slow/uncommon case of the getCharAndSize method. Here we
+/// know that we can accumulate into Size, and that we have already incremented
+/// Ptr by Size bytes.
+///
+/// NOTE: When this method is updated, getCharAndSizeSlowNoWarn (below) should
+/// be updated to match.
+///
+char Lexer::getCharAndSizeSlow(const char *Ptr, unsigned &Size,
+ Token *Tok) {
+ // If we have a slash, look for an escaped newline.
+ if (Ptr[0] == '\\') {
+ ++Size;
+ ++Ptr;
+Slash:
+ // Common case, backslash-char where the char is not whitespace.
+ if (!isWhitespace(Ptr[0])) return '\\';
+
+ // See if we have optional whitespace characters between the slash and
+ // newline.
+ if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
+ // Remember that this token needs to be cleaned.
+ if (Tok) Tok->setFlag(Token::NeedsCleaning);
+
+ // Warn if there was whitespace between the backslash and newline.
+ if (Ptr[0] != '\n' && Ptr[0] != '\r' && Tok && !isLexingRawMode())
+ Diag(Ptr, diag::backslash_newline_space);
+
+ // Found backslash<whitespace><newline>. Parse the char after it.
+ Size += EscapedNewLineSize;
+ Ptr += EscapedNewLineSize;
+
+ // If the char that we finally got was a \n, then we must have had
+ // something like \<newline><newline>. We don't want to consume the
+ // second newline.
+ if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
+ return ' ';
+
+ // Use slow version to accumulate a correct size field.
+ return getCharAndSizeSlow(Ptr, Size, Tok);
+ }
+
+ // Otherwise, this is not an escaped newline, just return the slash.
+ return '\\';
+ }
+
+ // If this is a trigraph, process it.
+ if (Ptr[0] == '?' && Ptr[1] == '?') {
+ // If this is actually a legal trigraph (not something like "??x"), emit
+ // a trigraph warning. If so, and if trigraphs are enabled, return it.
+ if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : 0)) {
+ // Remember that this token needs to be cleaned.
+ if (Tok) Tok->setFlag(Token::NeedsCleaning);
+
+ Ptr += 3;
+ Size += 3;
+ if (C == '\\') goto Slash;
+ return C;
+ }
+ }
+
+ // If this is neither, return a single character.
+ ++Size;
+ return *Ptr;
+}
+
+
+/// getCharAndSizeSlowNoWarn - Handle the slow/uncommon case of the
+/// getCharAndSizeNoWarn method. Here we know that we can accumulate into Size,
+/// and that we have already incremented Ptr by Size bytes.
+///
+/// NOTE: When this method is updated, getCharAndSizeSlow (above) should
+/// be updated to match.
+char Lexer::getCharAndSizeSlowNoWarn(const char *Ptr, unsigned &Size,
+ const LangOptions &LangOpts) {
+ // If we have a slash, look for an escaped newline.
+ if (Ptr[0] == '\\') {
+ ++Size;
+ ++Ptr;
+Slash:
+ // Common case, backslash-char where the char is not whitespace.
+ if (!isWhitespace(Ptr[0])) return '\\';
+
+ // See if we have optional whitespace characters followed by a newline.
+ if (unsigned EscapedNewLineSize = getEscapedNewLineSize(Ptr)) {
+ // Found backslash<whitespace><newline>. Parse the char after it.
+ Size += EscapedNewLineSize;
+ Ptr += EscapedNewLineSize;
+
+ // If the char that we finally got was a \n, then we must have had
+ // something like \<newline><newline>. We don't want to consume the
+ // second newline.
+ if (*Ptr == '\n' || *Ptr == '\r' || *Ptr == '\0')
+ return ' ';
+
+ // Use slow version to accumulate a correct size field.
+ return getCharAndSizeSlowNoWarn(Ptr, Size, LangOpts);
+ }
+
+ // Otherwise, this is not an escaped newline, just return the slash.
+ return '\\';
+ }
+
+ // If this is a trigraph, process it.
+ if (LangOpts.Trigraphs && Ptr[0] == '?' && Ptr[1] == '?') {
+ // If this is actually a legal trigraph (not something like "??x"), return
+ // it.
+ if (char C = GetTrigraphCharForLetter(Ptr[2])) {
+ Ptr += 3;
+ Size += 3;
+ if (C == '\\') goto Slash;
+ return C;
+ }
+ }
+
+ // If this is neither, return a single character.
+ ++Size;
+ return *Ptr;
+}
+
+//===----------------------------------------------------------------------===//
+// Helper methods for lexing.
+//===----------------------------------------------------------------------===//
+
+/// \brief Routine that indiscriminately skips bytes in the source file.
+void Lexer::SkipBytes(unsigned Bytes, bool StartOfLine) {
+ BufferPtr += Bytes;
+ if (BufferPtr > BufferEnd)
+ BufferPtr = BufferEnd;
+ IsAtStartOfLine = StartOfLine;
+}
+
+void Lexer::LexIdentifier(Token &Result, const char *CurPtr) {
+ // Match [_A-Za-z0-9]*, we have already matched [_A-Za-z$]
+ unsigned Size;
+ unsigned char C = *CurPtr++;
+ while (isIdentifierBody(C))
+ C = *CurPtr++;
+
+ --CurPtr; // Back up over the skipped character.
+
+ // Fast path, no $,\,? in identifier found. '\' might be an escaped newline
+ // or UCN, and ? might be a trigraph for '\', an escaped newline or UCN.
+ // FIXME: UCNs.
+ //
+ // TODO: Could merge these checks into a CharInfo flag to make the comparison
+ // cheaper
+ if (C != '\\' && C != '?' && (C != '$' || !LangOpts.DollarIdents)) {
+FinishIdentifier:
+ const char *IdStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::raw_identifier);
+ Result.setRawIdentifierData(IdStart);
+
+ // If we are in raw mode, return this identifier raw. There is no need to
+ // look up identifier information or attempt to macro expand it.
+ if (LexingRawMode)
+ return;
+
+ // Fill in Result.IdentifierInfo and update the token kind,
+ // looking up the identifier in the identifier table.
+ IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
+
+ // Finally, now that we know we have an identifier, pass this off to the
+ // preprocessor, which may macro expand it or something.
+ if (II->isHandleIdentifierCase())
+ PP->HandleIdentifier(Result);
+
+ return;
+ }
+
+ // Otherwise, $,\,? in identifier found. Enter slower path.
+
+ C = getCharAndSize(CurPtr, Size);
+ while (1) {
+ if (C == '$') {
+ // If we hit a $ and they are not supported in identifiers, we are done.
+ if (!LangOpts.DollarIdents) goto FinishIdentifier;
+
+ // Otherwise, emit a diagnostic and continue.
+ if (!isLexingRawMode())
+ Diag(CurPtr, diag::ext_dollar_in_identifier);
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ C = getCharAndSize(CurPtr, Size);
+ continue;
+ } else if (!isIdentifierBody(C)) { // FIXME: UCNs.
+ // Found end of identifier.
+ goto FinishIdentifier;
+ }
+
+ // Otherwise, this character is good, consume it.
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+
+ C = getCharAndSize(CurPtr, Size);
+ while (isIdentifierBody(C)) { // FIXME: UCNs.
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ C = getCharAndSize(CurPtr, Size);
+ }
+ }
+}
+
+/// isHexaLiteral - Return true if Start points to a hex constant.
+/// in microsoft mode (where this is supposed to be several different tokens).
+static bool isHexaLiteral(const char *Start, const LangOptions &LangOpts) {
+ unsigned Size;
+ char C1 = Lexer::getCharAndSizeNoWarn(Start, Size, LangOpts);
+ if (C1 != '0')
+ return false;
+ char C2 = Lexer::getCharAndSizeNoWarn(Start + Size, Size, LangOpts);
+ return (C2 == 'x' || C2 == 'X');
+}
+
+/// LexNumericConstant - Lex the remainder of a integer or floating point
+/// constant. From[-1] is the first character lexed. Return the end of the
+/// constant.
+void Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
+ unsigned Size;
+ char C = getCharAndSize(CurPtr, Size);
+ char PrevCh = 0;
+ while (isNumberBody(C)) { // FIXME: UCNs.
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ PrevCh = C;
+ C = getCharAndSize(CurPtr, Size);
+ }
+
+ // If we fell out, check for a sign, due to 1e+12. If we have one, continue.
+ if ((C == '-' || C == '+') && (PrevCh == 'E' || PrevCh == 'e')) {
+ // If we are in Microsoft mode, don't continue if the constant is hex.
+ // For example, MSVC will accept the following as 3 tokens: 0x1234567e+1
+ if (!LangOpts.MicrosoftExt || !isHexaLiteral(BufferPtr, LangOpts))
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+ }
+
+ // If we have a hex FP constant, continue.
+ if ((C == '-' || C == '+') && (PrevCh == 'P' || PrevCh == 'p'))
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, Size, Result));
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::numeric_constant);
+ Result.setLiteralData(TokStart);
+}
+
+/// LexUDSuffix - Lex the ud-suffix production for user-defined literal suffixes
+/// in C++11, or warn on a ud-suffix in C++98.
+const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr) {
+ assert(getLangOpts().CPlusPlus);
+
+ // Maximally munch an identifier. FIXME: UCNs.
+ unsigned Size;
+ char C = getCharAndSize(CurPtr, Size);
+ if (isIdentifierHead(C)) {
+ if (!getLangOpts().CPlusPlus0x) {
+ if (!isLexingRawMode())
+ Diag(CurPtr,
+ C == '_' ? diag::warn_cxx11_compat_user_defined_literal
+ : diag::warn_cxx11_compat_reserved_user_defined_literal)
+ << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+ return CurPtr;
+ }
+
+ // C++11 [lex.ext]p10, [usrlit.suffix]p1: A program containing a ud-suffix
+ // that does not start with an underscore is ill-formed. As a conforming
+ // extension, we treat all such suffixes as if they had whitespace before
+ // them.
+ if (C != '_') {
+ if (!isLexingRawMode())
+ Diag(CurPtr, getLangOpts().MicrosoftMode ?
+ diag::ext_ms_reserved_user_defined_literal :
+ diag::ext_reserved_user_defined_literal)
+ << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+ return CurPtr;
+ }
+
+ Result.setFlag(Token::HasUDSuffix);
+ do {
+ CurPtr = ConsumeChar(CurPtr, Size, Result);
+ C = getCharAndSize(CurPtr, Size);
+ } while (isIdentifierBody(C));
+ }
+ return CurPtr;
+}
+
+/// LexStringLiteral - Lex the remainder of a string literal, after having lexed
+/// either " or L" or u8" or u" or U".
+void Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
+ tok::TokenKind Kind) {
+ const char *NulCharacter = 0; // Does this string contain the \0 character?
+
+ if (!isLexingRawMode() &&
+ (Kind == tok::utf8_string_literal ||
+ Kind == tok::utf16_string_literal ||
+ Kind == tok::utf32_string_literal))
+ Diag(BufferPtr, diag::warn_cxx98_compat_unicode_literal);
+
+ char C = getAndAdvanceChar(CurPtr, Result);
+ while (C != '"') {
+ // Skip escaped characters. Escaped newlines will already be processed by
+ // getAndAdvanceChar.
+ if (C == '\\')
+ C = getAndAdvanceChar(CurPtr, Result);
+
+ if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
+ Diag(BufferPtr, diag::warn_unterminated_string);
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return;
+ }
+
+ if (C == 0) {
+ if (isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return cutOffLexing();
+ }
+
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr);
+
+ // If a nul character existed in the string, warn about it.
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_string);
+
+ // Update the location of the token as well as the BufferPtr instance var.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, Kind);
+ Result.setLiteralData(TokStart);
+}
+
+/// LexRawStringLiteral - Lex the remainder of a raw string literal, after
+/// having lexed R", LR", u8R", uR", or UR".
+void Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
+ tok::TokenKind Kind) {
+ // This function doesn't use getAndAdvanceChar because C++0x [lex.pptoken]p3:
+ // Between the initial and final double quote characters of the raw string,
+ // any transformations performed in phases 1 and 2 (trigraphs,
+ // universal-character-names, and line splicing) are reverted.
+
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::warn_cxx98_compat_raw_string_literal);
+
+ unsigned PrefixLen = 0;
+
+ while (PrefixLen != 16 && isRawStringDelimBody(CurPtr[PrefixLen]))
+ ++PrefixLen;
+
+ // If the last character was not a '(', then we didn't lex a valid delimiter.
+ if (CurPtr[PrefixLen] != '(') {
+ if (!isLexingRawMode()) {
+ const char *PrefixEnd = &CurPtr[PrefixLen];
+ if (PrefixLen == 16) {
+ Diag(PrefixEnd, diag::err_raw_delim_too_long);
+ } else {
+ Diag(PrefixEnd, diag::err_invalid_char_raw_delim)
+ << StringRef(PrefixEnd, 1);
+ }
+ }
+
+ // Search for the next '"' in hopes of salvaging the lexer. Unfortunately,
+ // it's possible the '"' was intended to be part of the raw string, but
+ // there's not much we can do about that.
+ while (1) {
+ char C = *CurPtr++;
+
+ if (C == '"')
+ break;
+ if (C == 0 && CurPtr-1 == BufferEnd) {
+ --CurPtr;
+ break;
+ }
+ }
+
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return;
+ }
+
+ // Save prefix and move CurPtr past it
+ const char *Prefix = CurPtr;
+ CurPtr += PrefixLen + 1; // skip over prefix and '('
+
+ while (1) {
+ char C = *CurPtr++;
+
+ if (C == ')') {
+ // Check for prefix match and closing quote.
+ if (strncmp(CurPtr, Prefix, PrefixLen) == 0 && CurPtr[PrefixLen] == '"') {
+ CurPtr += PrefixLen + 1; // skip over prefix and '"'
+ break;
+ }
+ } else if (C == 0 && CurPtr-1 == BufferEnd) { // End of file.
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::err_unterminated_raw_string)
+ << StringRef(Prefix, PrefixLen);
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return;
+ }
+ }
+
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr);
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, Kind);
+ Result.setLiteralData(TokStart);
+}
+
+/// LexAngledStringLiteral - Lex the remainder of an angled string literal,
+/// after having lexed the '<' character. This is used for #include filenames.
+void Lexer::LexAngledStringLiteral(Token &Result, const char *CurPtr) {
+ const char *NulCharacter = 0; // Does this string contain the \0 character?
+ const char *AfterLessPos = CurPtr;
+ char C = getAndAdvanceChar(CurPtr, Result);
+ while (C != '>') {
+ // Skip escaped characters.
+ if (C == '\\') {
+ // Skip the escaped character.
+ C = getAndAdvanceChar(CurPtr, Result);
+ } else if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && (CurPtr-1 == BufferEnd || // End of file.
+ isCodeCompletionPoint(CurPtr-1)))) {
+ // If the filename is unterminated, then it must just be a lone <
+ // character. Return this as such.
+ FormTokenWithChars(Result, AfterLessPos, tok::less);
+ return;
+ } else if (C == 0) {
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ // If a nul character existed in the string, warn about it.
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_string);
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, tok::angle_string_literal);
+ Result.setLiteralData(TokStart);
+}
+
+
+/// LexCharConstant - Lex the remainder of a character constant, after having
+/// lexed either ' or L' or u' or U'.
+void Lexer::LexCharConstant(Token &Result, const char *CurPtr,
+ tok::TokenKind Kind) {
+ const char *NulCharacter = 0; // Does this character contain the \0 character?
+
+ if (!isLexingRawMode() &&
+ (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant))
+ Diag(BufferPtr, diag::warn_cxx98_compat_unicode_literal);
+
+ char C = getAndAdvanceChar(CurPtr, Result);
+ if (C == '\'') {
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
+ Diag(BufferPtr, diag::err_empty_character);
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return;
+ }
+
+ while (C != '\'') {
+ // Skip escaped characters.
+ if (C == '\\') {
+ // Skip the escaped character.
+ // FIXME: UCN's
+ C = getAndAdvanceChar(CurPtr, Result);
+ } else if (C == '\n' || C == '\r' || // Newline.
+ (C == 0 && CurPtr-1 == BufferEnd)) { // End of file.
+ if (!isLexingRawMode() && !LangOpts.AsmPreprocessor)
+ Diag(BufferPtr, diag::warn_unterminated_char);
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return;
+ } else if (C == 0) {
+ if (isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ FormTokenWithChars(Result, CurPtr-1, tok::unknown);
+ return cutOffLexing();
+ }
+
+ NulCharacter = CurPtr-1;
+ }
+ C = getAndAdvanceChar(CurPtr, Result);
+ }
+
+ // If we are in C++11, lex the optional ud-suffix.
+ if (getLangOpts().CPlusPlus)
+ CurPtr = LexUDSuffix(Result, CurPtr);
+
+ // If a nul character existed in the character, warn about it.
+ if (NulCharacter && !isLexingRawMode())
+ Diag(NulCharacter, diag::null_in_char);
+
+ // Update the location of token as well as BufferPtr.
+ const char *TokStart = BufferPtr;
+ FormTokenWithChars(Result, CurPtr, Kind);
+ Result.setLiteralData(TokStart);
+}
+
+/// SkipWhitespace - Efficiently skip over a series of whitespace characters.
+/// Update BufferPtr to point to the next non-whitespace character and return.
+///
+/// This method forms a token and returns true if KeepWhitespaceMode is enabled.
+///
+bool Lexer::SkipWhitespace(Token &Result, const char *CurPtr) {
+ // Whitespace - Skip it, then return the token after the whitespace.
+ unsigned char Char = *CurPtr; // Skip consequtive spaces efficiently.
+ while (1) {
+ // Skip horizontal whitespace very aggressively.
+ while (isHorizontalWhitespace(Char))
+ Char = *++CurPtr;
+
+ // Otherwise if we have something other than whitespace, we're done.
+ if (Char != '\n' && Char != '\r')
+ break;
+
+ if (ParsingPreprocessorDirective) {
+ // End of preprocessor directive line, let LexTokenInternal handle this.
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // ok, but handle newline.
+ // The returned token is at the start of the line.
+ Result.setFlag(Token::StartOfLine);
+ // No leading whitespace seen so far.
+ Result.clearFlag(Token::LeadingSpace);
+ Char = *++CurPtr;
+ }
+
+ // If this isn't immediately after a newline, there is leading space.
+ char PrevChar = CurPtr[-1];
+ if (PrevChar != '\n' && PrevChar != '\r')
+ Result.setFlag(Token::LeadingSpace);
+
+ // If the client wants us to return whitespace, return it now.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+}
+
+// SkipBCPLComment - We have just read the // characters from input. Skip until
+// we find the newline character thats terminate the comment. Then update
+/// BufferPtr and return.
+///
+/// If we're in KeepCommentMode or any CommentHandler has inserted
+/// some tokens, this will store the first token and return true.
+bool Lexer::SkipBCPLComment(Token &Result, const char *CurPtr) {
+ // If BCPL comments aren't explicitly enabled for this language, emit an
+ // extension warning.
+ if (!LangOpts.BCPLComment && !isLexingRawMode()) {
+ Diag(BufferPtr, diag::ext_bcpl_comment);
+
+ // Mark them enabled so we only emit one warning for this translation
+ // unit.
+ LangOpts.BCPLComment = true;
+ }
+
+ // Scan over the body of the comment. The common case, when scanning, is that
+ // the comment contains normal ascii characters with nothing interesting in
+ // them. As such, optimize for this case with the inner loop.
+ char C;
+ do {
+ C = *CurPtr;
+ // Skip over characters in the fast loop.
+ while (C != 0 && // Potentially EOF.
+ C != '\n' && C != '\r') // Newline or DOS-style newline.
+ C = *++CurPtr;
+
+ const char *NextLine = CurPtr;
+ if (C != 0) {
+ // We found a newline, see if it's escaped.
+ const char *EscapePtr = CurPtr-1;
+ while (isHorizontalWhitespace(*EscapePtr)) // Skip whitespace.
+ --EscapePtr;
+
+ if (*EscapePtr == '\\') // Escaped newline.
+ CurPtr = EscapePtr;
+ else if (EscapePtr[0] == '/' && EscapePtr[-1] == '?' &&
+ EscapePtr[-2] == '?') // Trigraph-escaped newline.
+ CurPtr = EscapePtr-2;
+ else
+ break; // This is a newline, we're done.
+
+ C = *CurPtr;
+ }
+
+ // Otherwise, this is a hard case. Fall back on getAndAdvanceChar to
+ // properly decode the character. Read it in raw mode to avoid emitting
+ // diagnostics about things like trigraphs. If we see an escaped newline,
+ // we'll handle it below.
+ const char *OldPtr = CurPtr;
+ bool OldRawMode = isLexingRawMode();
+ LexingRawMode = true;
+ C = getAndAdvanceChar(CurPtr, Result);
+ LexingRawMode = OldRawMode;
+
+ // If we only read only one character, then no special handling is needed.
+ // We're done and can skip forward to the newline.
+ if (C != 0 && CurPtr == OldPtr+1) {
+ CurPtr = NextLine;
+ break;
+ }
+
+ // If we read multiple characters, and one of those characters was a \r or
+ // \n, then we had an escaped newline within the comment. Emit diagnostic
+ // unless the next line is also a // comment.
+ if (CurPtr != OldPtr+1 && C != '/' && CurPtr[0] != '/') {
+ for (; OldPtr != CurPtr; ++OldPtr)
+ if (OldPtr[0] == '\n' || OldPtr[0] == '\r') {
+ // Okay, we found a // comment that ends in a newline, if the next
+ // line is also a // comment, but has spaces, don't emit a diagnostic.
+ if (isWhitespace(C)) {
+ const char *ForwardPtr = CurPtr;
+ while (isWhitespace(*ForwardPtr)) // Skip whitespace.
+ ++ForwardPtr;
+ if (ForwardPtr[0] == '/' && ForwardPtr[1] == '/')
+ break;
+ }
+
+ if (!isLexingRawMode())
+ Diag(OldPtr-1, diag::ext_multi_line_bcpl_comment);
+ break;
+ }
+ }
+
+ if (CurPtr == BufferEnd+1) {
+ --CurPtr;
+ break;
+ }
+
+ if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ cutOffLexing();
+ return false;
+ }
+
+ } while (C != '\n' && C != '\r');
+
+ // Found but did not consume the newline. Notify comment handlers about the
+ // comment unless we're in a #if 0 block.
+ if (PP && !isLexingRawMode() &&
+ PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
+ getSourceLocation(CurPtr)))) {
+ BufferPtr = CurPtr;
+ return true; // A token has to be returned.
+ }
+
+ // If we are returning comments as tokens, return this comment as a token.
+ if (inKeepCommentMode())
+ return SaveBCPLComment(Result, CurPtr);
+
+ // If we are inside a preprocessor directive and we see the end of line,
+ // return immediately, so that the lexer can return this as an EOD token.
+ if (ParsingPreprocessorDirective || CurPtr == BufferEnd) {
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // Otherwise, eat the \n character. We don't care if this is a \n\r or
+ // \r\n sequence. This is an efficiency hack (because we know the \n can't
+ // contribute to another token), it isn't needed for correctness. Note that
+ // this is ok even in KeepWhitespaceMode, because we would have returned the
+ /// comment above in that mode.
+ ++CurPtr;
+
+ // The next returned token is at the start of the line.
+ Result.setFlag(Token::StartOfLine);
+ // No leading whitespace seen so far.
+ Result.clearFlag(Token::LeadingSpace);
+ BufferPtr = CurPtr;
+ return false;
+}
+
+/// SaveBCPLComment - If in save-comment mode, package up this BCPL comment in
+/// an appropriate way and return it.
+bool Lexer::SaveBCPLComment(Token &Result, const char *CurPtr) {
+ // If we're not in a preprocessor directive, just return the // comment
+ // directly.
+ FormTokenWithChars(Result, CurPtr, tok::comment);
+
+ if (!ParsingPreprocessorDirective)
+ return true;
+
+ // If this BCPL-style comment is in a macro definition, transmogrify it into
+ // a C-style block comment.
+ bool Invalid = false;
+ std::string Spelling = PP->getSpelling(Result, &Invalid);
+ if (Invalid)
+ return true;
+
+ assert(Spelling[0] == '/' && Spelling[1] == '/' && "Not bcpl comment?");
+ Spelling[1] = '*'; // Change prefix to "/*".
+ Spelling += "*/"; // add suffix.
+
+ Result.setKind(tok::comment);
+ PP->CreateString(&Spelling[0], Spelling.size(), Result,
+ Result.getLocation(), Result.getLocation());
+ return true;
+}
+
+/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
+/// character (either \n or \r) is part of an escaped newline sequence. Issue a
+/// diagnostic if so. We know that the newline is inside of a block comment.
+static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
+ Lexer *L) {
+ assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
+
+ // Back up off the newline.
+ --CurPtr;
+
+ // If this is a two-character newline sequence, skip the other character.
+ if (CurPtr[0] == '\n' || CurPtr[0] == '\r') {
+ // \n\n or \r\r -> not escaped newline.
+ if (CurPtr[0] == CurPtr[1])
+ return false;
+ // \n\r or \r\n -> skip the newline.
+ --CurPtr;
+ }
+
+ // If we have horizontal whitespace, skip over it. We allow whitespace
+ // between the slash and newline.
+ bool HasSpace = false;
+ while (isHorizontalWhitespace(*CurPtr) || *CurPtr == 0) {
+ --CurPtr;
+ HasSpace = true;
+ }
+
+ // If we have a slash, we know this is an escaped newline.
+ if (*CurPtr == '\\') {
+ if (CurPtr[-1] != '*') return false;
+ } else {
+ // It isn't a slash, is it the ?? / trigraph?
+ if (CurPtr[0] != '/' || CurPtr[-1] != '?' || CurPtr[-2] != '?' ||
+ CurPtr[-3] != '*')
+ return false;
+
+ // This is the trigraph ending the comment. Emit a stern warning!
+ CurPtr -= 2;
+
+ // If no trigraphs are enabled, warn that we ignored this trigraph and
+ // ignore this * character.
+ if (!L->getLangOpts().Trigraphs) {
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::trigraph_ignored_block_comment);
+ return false;
+ }
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::trigraph_ends_block_comment);
+ }
+
+ // Warn about having an escaped newline between the */ characters.
+ if (!L->isLexingRawMode())
+ L->Diag(CurPtr, diag::escaped_newline_block_comment_end);
+
+ // If there was space between the backslash and newline, warn about it.
+ if (HasSpace && !L->isLexingRawMode())
+ L->Diag(CurPtr, diag::backslash_newline_space);
+
+ return true;
+}
+
+#ifdef __SSE2__
+#include <emmintrin.h>
+#elif __ALTIVEC__
+#include <altivec.h>
+#undef bool
+#endif
+
+/// SkipBlockComment - We have just read the /* characters from input. Read
+/// until we find the */ characters that terminate the comment. Note that we
+/// don't bother decoding trigraphs or escaped newlines in block comments,
+/// because they cannot cause the comment to end. The only thing that can
+/// happen is the comment could end with an escaped newline between the */ end
+/// of comment.
+///
+/// If we're in KeepCommentMode or any CommentHandler has inserted
+/// some tokens, this will store the first token and return true.
+bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr) {
+ // Scan one character past where we should, looking for a '/' character. Once
+ // we find it, check to see if it was preceded by a *. This common
+ // optimization helps people who like to put a lot of * characters in their
+ // comments.
+
+ // The first character we get with newlines and trigraphs skipped to handle
+ // the degenerate /*/ case below correctly if the * has an escaped newline
+ // after it.
+ unsigned CharSize;
+ unsigned char C = getCharAndSize(CurPtr, CharSize);
+ CurPtr += CharSize;
+ if (C == 0 && CurPtr == BufferEnd+1) {
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::err_unterminated_block_comment);
+ --CurPtr;
+
+ // KeepWhitespaceMode should return this broken comment as a token. Since
+ // it isn't a well formed comment, just return it as an 'unknown' token.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+ }
+
+ // Check to see if the first character after the '/*' is another /. If so,
+ // then this slash does not end the block comment, it is part of it.
+ if (C == '/')
+ C = *CurPtr++;
+
+ while (1) {
+ // Skip over all non-interesting characters until we find end of buffer or a
+ // (probably ending) '/' character.
+ if (CurPtr + 24 < BufferEnd &&
+ // If there is a code-completion point avoid the fast scan because it
+ // doesn't check for '\0'.
+ !(PP && PP->getCodeCompletionFileLoc() == FileLoc)) {
+ // While not aligned to a 16-byte boundary.
+ while (C != '/' && ((intptr_t)CurPtr & 0x0F) != 0)
+ C = *CurPtr++;
+
+ if (C == '/') goto FoundSlash;
+
+#ifdef __SSE2__
+ __m128i Slashes = _mm_set1_epi8('/');
+ while (CurPtr+16 <= BufferEnd) {
+ int cmp = _mm_movemask_epi8(_mm_cmpeq_epi8(*(__m128i*)CurPtr, Slashes));
+ if (cmp != 0) {
+ // Adjust the pointer to point directly after the first slash. It's
+ // not necessary to set C here, it will be overwritten at the end of
+ // the outer loop.
+ CurPtr += llvm::CountTrailingZeros_32(cmp) + 1;
+ goto FoundSlash;
+ }
+ CurPtr += 16;
+ }
+#elif __ALTIVEC__
+ __vector unsigned char Slashes = {
+ '/', '/', '/', '/', '/', '/', '/', '/',
+ '/', '/', '/', '/', '/', '/', '/', '/'
+ };
+ while (CurPtr+16 <= BufferEnd &&
+ !vec_any_eq(*(vector unsigned char*)CurPtr, Slashes))
+ CurPtr += 16;
+#else
+ // Scan for '/' quickly. Many block comments are very large.
+ while (CurPtr[0] != '/' &&
+ CurPtr[1] != '/' &&
+ CurPtr[2] != '/' &&
+ CurPtr[3] != '/' &&
+ CurPtr+4 < BufferEnd) {
+ CurPtr += 4;
+ }
+#endif
+
+ // It has to be one of the bytes scanned, increment to it and read one.
+ C = *CurPtr++;
+ }
+
+ // Loop to scan the remainder.
+ while (C != '/' && C != '\0')
+ C = *CurPtr++;
+
+ if (C == '/') {
+ FoundSlash:
+ if (CurPtr[-2] == '*') // We found the final */. We're done!
+ break;
+
+ if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
+ if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
+ // We found the final */, though it had an escaped newline between the
+ // * and /. We're done!
+ break;
+ }
+ }
+ if (CurPtr[0] == '*' && CurPtr[1] != '/') {
+ // If this is a /* inside of the comment, emit a warning. Don't do this
+ // if this is a /*/, which will end the comment. This misses cases with
+ // embedded escaped newlines, but oh well.
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::warn_nested_block_comment);
+ }
+ } else if (C == 0 && CurPtr == BufferEnd+1) {
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::err_unterminated_block_comment);
+ // Note: the user probably forgot a */. We could continue immediately
+ // after the /*, but this would involve lexing a lot of what really is the
+ // comment, which surely would confuse the parser.
+ --CurPtr;
+
+ // KeepWhitespaceMode should return this broken comment as a token. Since
+ // it isn't a well formed comment, just return it as an 'unknown' token.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return true;
+ }
+
+ BufferPtr = CurPtr;
+ return false;
+ } else if (C == '\0' && isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ cutOffLexing();
+ return false;
+ }
+
+ C = *CurPtr++;
+ }
+
+ // Notify comment handlers about the comment unless we're in a #if 0 block.
+ if (PP && !isLexingRawMode() &&
+ PP->HandleComment(Result, SourceRange(getSourceLocation(BufferPtr),
+ getSourceLocation(CurPtr)))) {
+ BufferPtr = CurPtr;
+ return true; // A token has to be returned.
+ }
+
+ // If we are returning comments as tokens, return this comment as a token.
+ if (inKeepCommentMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::comment);
+ return true;
+ }
+
+ // It is common for the tokens immediately after a /**/ comment to be
+ // whitespace. Instead of going through the big switch, handle it
+ // efficiently now. This is safe even in KeepWhitespaceMode because we would
+ // have already returned above with the comment as a token.
+ if (isHorizontalWhitespace(*CurPtr)) {
+ Result.setFlag(Token::LeadingSpace);
+ SkipWhitespace(Result, CurPtr+1);
+ return false;
+ }
+
+ // Otherwise, just return so that the next character will be lexed as a token.
+ BufferPtr = CurPtr;
+ Result.setFlag(Token::LeadingSpace);
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Primary Lexing Entry Points
+//===----------------------------------------------------------------------===//
+
+/// ReadToEndOfLine - Read the rest of the current preprocessor line as an
+/// uninterpreted string. This switches the lexer out of directive mode.
+std::string Lexer::ReadToEndOfLine() {
+ assert(ParsingPreprocessorDirective && ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+ std::string Result;
+ Token Tmp;
+
+ // CurPtr - Cache BufferPtr in an automatic variable.
+ const char *CurPtr = BufferPtr;
+ while (1) {
+ char Char = getAndAdvanceChar(CurPtr, Tmp);
+ switch (Char) {
+ default:
+ Result += Char;
+ break;
+ case 0: // Null.
+ // Found end of file?
+ if (CurPtr-1 != BufferEnd) {
+ if (isCodeCompletionPoint(CurPtr-1)) {
+ PP->CodeCompleteNaturalLanguage();
+ cutOffLexing();
+ return Result;
+ }
+
+ // Nope, normal character, continue.
+ Result += Char;
+ break;
+ }
+ // FALL THROUGH.
+ case '\r':
+ case '\n':
+ // Okay, we found the end of the line. First, back up past the \0, \r, \n.
+ assert(CurPtr[-1] == Char && "Trigraphs for newline?");
+ BufferPtr = CurPtr-1;
+
+ // Next, lex the character, which should handle the EOD transition.
+ Lex(Tmp);
+ if (Tmp.is(tok::code_completion)) {
+ if (PP)
+ PP->CodeCompleteNaturalLanguage();
+ Lex(Tmp);
+ }
+ assert(Tmp.is(tok::eod) && "Unexpected token!");
+
+ // Finally, we're done, return the string we found.
+ return Result;
+ }
+ }
+}
+
+/// LexEndOfFile - CurPtr points to the end of this file. Handle this
+/// condition, reporting diagnostics and handling other edge cases as required.
+/// This returns true if Result contains a token, false if PP.Lex should be
+/// called again.
+bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
+ // If we hit the end of the file while parsing a preprocessor directive,
+ // end the preprocessor directive first. The next token returned will
+ // then be the end of file.
+ if (ParsingPreprocessorDirective) {
+ // Done parsing the "line".
+ ParsingPreprocessorDirective = false;
+ // Update the location of token as well as BufferPtr.
+ FormTokenWithChars(Result, CurPtr, tok::eod);
+
+ // Restore comment saving mode, in case it was disabled for directive.
+ SetCommentRetentionState(PP->getCommentRetentionState());
+ return true; // Have a token.
+ }
+
+ // If we are in raw mode, return this event as an EOF token. Let the caller
+ // that put us in raw mode handle the event.
+ if (isLexingRawMode()) {
+ Result.startToken();
+ BufferPtr = BufferEnd;
+ FormTokenWithChars(Result, BufferEnd, tok::eof);
+ return true;
+ }
+
+ // Issue diagnostics for unterminated #if and missing newline.
+
+ // If we are in a #if directive, emit an error.
+ while (!ConditionalStack.empty()) {
+ if (PP->getCodeCompletionFileLoc() != FileLoc)
+ PP->Diag(ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ ConditionalStack.pop_back();
+ }
+
+ // C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
+ // a pedwarn.
+ if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r'))
+ Diag(BufferEnd, LangOpts.CPlusPlus0x ? // C++11 [lex.phases] 2.2 p2
+ diag::warn_cxx98_compat_no_newline_eof : diag::ext_no_newline_eof)
+ << FixItHint::CreateInsertion(getSourceLocation(BufferEnd), "\n");
+
+ BufferPtr = CurPtr;
+
+ // Finally, let the preprocessor handle this.
+ return PP->HandleEndOfFile(Result);
+}
+
+/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
+/// the specified lexer will return a tok::l_paren token, 0 if it is something
+/// else and 2 if there are no more tokens in the buffer controlled by the
+/// lexer.
+unsigned Lexer::isNextPPTokenLParen() {
+ assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
+
+ // Switch to 'skipping' mode. This will ensure that we can lex a token
+ // without emitting diagnostics, disables macro expansion, and will cause EOF
+ // to return an EOF token instead of popping the include stack.
+ LexingRawMode = true;
+
+ // Save state that can be changed while lexing so that we can restore it.
+ const char *TmpBufferPtr = BufferPtr;
+ bool inPPDirectiveMode = ParsingPreprocessorDirective;
+
+ Token Tok;
+ Tok.startToken();
+ LexTokenInternal(Tok);
+
+ // Restore state that may have changed.
+ BufferPtr = TmpBufferPtr;
+ ParsingPreprocessorDirective = inPPDirectiveMode;
+
+ // Restore the lexer back to non-skipping mode.
+ LexingRawMode = false;
+
+ if (Tok.is(tok::eof))
+ return 2;
+ return Tok.is(tok::l_paren);
+}
+
+/// FindConflictEnd - Find the end of a version control conflict marker.
+static const char *FindConflictEnd(const char *CurPtr, const char *BufferEnd,
+ ConflictMarkerKind CMK) {
+ const char *Terminator = CMK == CMK_Perforce ? "<<<<\n" : ">>>>>>>";
+ size_t TermLen = CMK == CMK_Perforce ? 5 : 7;
+ StringRef RestOfBuffer(CurPtr+TermLen, BufferEnd-CurPtr-TermLen);
+ size_t Pos = RestOfBuffer.find(Terminator);
+ while (Pos != StringRef::npos) {
+ // Must occur at start of line.
+ if (RestOfBuffer[Pos-1] != '\r' &&
+ RestOfBuffer[Pos-1] != '\n') {
+ RestOfBuffer = RestOfBuffer.substr(Pos+TermLen);
+ Pos = RestOfBuffer.find(Terminator);
+ continue;
+ }
+ return RestOfBuffer.data()+Pos;
+ }
+ return 0;
+}
+
+/// IsStartOfConflictMarker - If the specified pointer is the start of a version
+/// control conflict marker like '<<<<<<<', recognize it as such, emit an error
+/// and recover nicely. This returns true if it is a conflict marker and false
+/// if not.
+bool Lexer::IsStartOfConflictMarker(const char *CurPtr) {
+ // Only a conflict marker if it starts at the beginning of a line.
+ if (CurPtr != BufferStart &&
+ CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
+ return false;
+
+ // Check to see if we have <<<<<<< or >>>>.
+ if ((BufferEnd-CurPtr < 8 || StringRef(CurPtr, 7) != "<<<<<<<") &&
+ (BufferEnd-CurPtr < 6 || StringRef(CurPtr, 5) != ">>>> "))
+ return false;
+
+ // If we have a situation where we don't care about conflict markers, ignore
+ // it.
+ if (CurrentConflictMarkerState || isLexingRawMode())
+ return false;
+
+ ConflictMarkerKind Kind = *CurPtr == '<' ? CMK_Normal : CMK_Perforce;
+
+ // Check to see if there is an ending marker somewhere in the buffer at the
+ // start of a line to terminate this conflict marker.
+ if (FindConflictEnd(CurPtr, BufferEnd, Kind)) {
+ // We found a match. We are really in a conflict marker.
+ // Diagnose this, and ignore to the end of line.
+ Diag(CurPtr, diag::err_conflict_marker);
+ CurrentConflictMarkerState = Kind;
+
+ // Skip ahead to the end of line. We know this exists because the
+ // end-of-conflict marker starts with \r or \n.
+ while (*CurPtr != '\r' && *CurPtr != '\n') {
+ assert(CurPtr != BufferEnd && "Didn't find end of line");
+ ++CurPtr;
+ }
+ BufferPtr = CurPtr;
+ return true;
+ }
+
+ // No end of conflict marker found.
+ return false;
+}
+
+
+/// HandleEndOfConflictMarker - If this is a '====' or '||||' or '>>>>', or if
+/// it is '<<<<' and the conflict marker started with a '>>>>' marker, then it
+/// is the end of a conflict marker. Handle it by ignoring up until the end of
+/// the line. This returns true if it is a conflict marker and false if not.
+bool Lexer::HandleEndOfConflictMarker(const char *CurPtr) {
+ // Only a conflict marker if it starts at the beginning of a line.
+ if (CurPtr != BufferStart &&
+ CurPtr[-1] != '\n' && CurPtr[-1] != '\r')
+ return false;
+
+ // If we have a situation where we don't care about conflict markers, ignore
+ // it.
+ if (!CurrentConflictMarkerState || isLexingRawMode())
+ return false;
+
+ // Check to see if we have the marker (4 characters in a row).
+ for (unsigned i = 1; i != 4; ++i)
+ if (CurPtr[i] != CurPtr[0])
+ return false;
+
+ // If we do have it, search for the end of the conflict marker. This could
+ // fail if it got skipped with a '#if 0' or something. Note that CurPtr might
+ // be the end of conflict marker.
+ if (const char *End = FindConflictEnd(CurPtr, BufferEnd,
+ CurrentConflictMarkerState)) {
+ CurPtr = End;
+
+ // Skip ahead to the end of line.
+ while (CurPtr != BufferEnd && *CurPtr != '\r' && *CurPtr != '\n')
+ ++CurPtr;
+
+ BufferPtr = CurPtr;
+
+ // No longer in the conflict marker.
+ CurrentConflictMarkerState = CMK_None;
+ return true;
+ }
+
+ return false;
+}
+
+bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
+ if (PP && PP->isCodeCompletionEnabled()) {
+ SourceLocation Loc = FileLoc.getLocWithOffset(CurPtr-BufferStart);
+ return Loc == PP->getCodeCompletionLoc();
+ }
+
+ return false;
+}
+
+
+/// LexTokenInternal - This implements a simple C family lexer. It is an
+/// extremely performance critical piece of code. This assumes that the buffer
+/// has a null character at the end of the file. This returns a preprocessing
+/// token, not a normal token, as such, it is an internal interface. It assumes
+/// that the Flags of result have been cleared before calling this.
+void Lexer::LexTokenInternal(Token &Result) {
+LexNextToken:
+ // New token, can't need cleaning yet.
+ Result.clearFlag(Token::NeedsCleaning);
+ Result.setIdentifierInfo(0);
+
+ // CurPtr - Cache BufferPtr in an automatic variable.
+ const char *CurPtr = BufferPtr;
+
+ // Small amounts of horizontal whitespace is very common between tokens.
+ if ((*CurPtr == ' ') || (*CurPtr == '\t')) {
+ ++CurPtr;
+ while ((*CurPtr == ' ') || (*CurPtr == '\t'))
+ ++CurPtr;
+
+ // If we are keeping whitespace and other tokens, just return what we just
+ // skipped. The next lexer invocation will return the token after the
+ // whitespace.
+ if (isKeepWhitespaceMode()) {
+ FormTokenWithChars(Result, CurPtr, tok::unknown);
+ return;
+ }
+
+ BufferPtr = CurPtr;
+ Result.setFlag(Token::LeadingSpace);
+ }
+
+ unsigned SizeTmp, SizeTmp2; // Temporaries for use in cases below.
+
+ // Read a character, advancing over it.
+ char Char = getAndAdvanceChar(CurPtr, Result);
+ tok::TokenKind Kind;
+
+ switch (Char) {
+ case 0: // Null.
+ // Found end of file?
+ if (CurPtr-1 == BufferEnd) {
+ // Read the PP instance variable into an automatic variable, because
+ // LexEndOfFile will often delete 'this'.
+ Preprocessor *PPCache = PP;
+ if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file.
+ return; // Got a token to return.
+ assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
+ return PPCache->Lex(Result);
+ }
+
+ // Check if we are performing code completion.
+ if (isCodeCompletionPoint(CurPtr-1)) {
+ // Return the code-completion token.
+ Result.startToken();
+ FormTokenWithChars(Result, CurPtr, tok::code_completion);
+ return;
+ }
+
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::null_in_file);
+ Result.setFlag(Token::LeadingSpace);
+ if (SkipWhitespace(Result, CurPtr))
+ return; // KeepWhitespaceMode
+
+ goto LexNextToken; // GCC isn't tail call eliminating.
+
+ case 26: // DOS & CP/M EOF: "^Z".
+ // If we're in Microsoft extensions mode, treat this as end of file.
+ if (LangOpts.MicrosoftExt) {
+ // Read the PP instance variable into an automatic variable, because
+ // LexEndOfFile will often delete 'this'.
+ Preprocessor *PPCache = PP;
+ if (LexEndOfFile(Result, CurPtr-1)) // Retreat back into the file.
+ return; // Got a token to return.
+ assert(PPCache && "Raw buffer::LexEndOfFile should return a token");
+ return PPCache->Lex(Result);
+ }
+ // If Microsoft extensions are disabled, this is just random garbage.
+ Kind = tok::unknown;
+ break;
+
+ case '\n':
+ case '\r':
+ // If we are inside a preprocessor directive and we see the end of line,
+ // we know we are done with the directive, so return an EOD token.
+ if (ParsingPreprocessorDirective) {
+ // Done parsing the "line".
+ ParsingPreprocessorDirective = false;
+
+ // Restore comment saving mode, in case it was disabled for directive.
+ SetCommentRetentionState(PP->getCommentRetentionState());
+
+ // Since we consumed a newline, we are back at the start of a line.
+ IsAtStartOfLine = true;
+
+ Kind = tok::eod;
+ break;
+ }
+ // The returned token is at the start of the line.
+ Result.setFlag(Token::StartOfLine);
+ // No leading whitespace seen so far.
+ Result.clearFlag(Token::LeadingSpace);
+
+ if (SkipWhitespace(Result, CurPtr))
+ return; // KeepWhitespaceMode
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ SkipHorizontalWhitespace:
+ Result.setFlag(Token::LeadingSpace);
+ if (SkipWhitespace(Result, CurPtr))
+ return; // KeepWhitespaceMode
+
+ SkipIgnoredUnits:
+ CurPtr = BufferPtr;
+
+ // If the next token is obviously a // or /* */ comment, skip it efficiently
+ // too (without going through the big switch stmt).
+ if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
+ LangOpts.BCPLComment && !LangOpts.TraditionalCPP) {
+ if (SkipBCPLComment(Result, CurPtr+2))
+ return; // There is a token to return.
+ goto SkipIgnoredUnits;
+ } else if (CurPtr[0] == '/' && CurPtr[1] == '*' && !inKeepCommentMode()) {
+ if (SkipBlockComment(Result, CurPtr+2))
+ return; // There is a token to return.
+ goto SkipIgnoredUnits;
+ } else if (isHorizontalWhitespace(*CurPtr)) {
+ goto SkipHorizontalWhitespace;
+ }
+ goto LexNextToken; // GCC isn't tail call eliminating.
+
+ // C99 6.4.4.1: Integer Constants.
+ // C99 6.4.4.2: Floating Constants.
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexNumericConstant(Result, CurPtr);
+
+ case 'u': // Identifier (uber) or C++0x UTF-8 or UTF-16 string literal
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ if (LangOpts.CPlusPlus0x) {
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ // UTF-16 string literal
+ if (Char == '"')
+ return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::utf16_string_literal);
+
+ // UTF-16 character constant
+ if (Char == '\'')
+ return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::utf16_char_constant);
+
+ // UTF-16 raw string literal
+ if (Char == 'R' && getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
+ return LexRawStringLiteral(Result,
+ ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::utf16_string_literal);
+
+ if (Char == '8') {
+ char Char2 = getCharAndSize(CurPtr + SizeTmp, SizeTmp2);
+
+ // UTF-8 string literal
+ if (Char2 == '"')
+ return LexStringLiteral(Result,
+ ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::utf8_string_literal);
+
+ if (Char2 == 'R') {
+ unsigned SizeTmp3;
+ char Char3 = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
+ // UTF-8 raw string literal
+ if (Char3 == '"') {
+ return LexRawStringLiteral(Result,
+ ConsumeChar(ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ SizeTmp3, Result),
+ tok::utf8_string_literal);
+ }
+ }
+ }
+ }
+
+ // treat u like the start of an identifier.
+ return LexIdentifier(Result, CurPtr);
+
+ case 'U': // Identifier (Uber) or C++0x UTF-32 string literal
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ if (LangOpts.CPlusPlus0x) {
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ // UTF-32 string literal
+ if (Char == '"')
+ return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::utf32_string_literal);
+
+ // UTF-32 character constant
+ if (Char == '\'')
+ return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::utf32_char_constant);
+
+ // UTF-32 raw string literal
+ if (Char == 'R' && getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
+ return LexRawStringLiteral(Result,
+ ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::utf32_string_literal);
+ }
+
+ // treat U like the start of an identifier.
+ return LexIdentifier(Result, CurPtr);
+
+ case 'R': // Identifier or C++0x raw string literal
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ if (LangOpts.CPlusPlus0x) {
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ if (Char == '"')
+ return LexRawStringLiteral(Result,
+ ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::string_literal);
+ }
+
+ // treat R like the start of an identifier.
+ return LexIdentifier(Result, CurPtr);
+
+ case 'L': // Identifier (Loony) or wide literal (L'x' or L"xyz").
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ Char = getCharAndSize(CurPtr, SizeTmp);
+
+ // Wide string literal.
+ if (Char == '"')
+ return LexStringLiteral(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::wide_string_literal);
+
+ // Wide raw string literal.
+ if (LangOpts.CPlusPlus0x && Char == 'R' &&
+ getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == '"')
+ return LexRawStringLiteral(Result,
+ ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result),
+ tok::wide_string_literal);
+
+ // Wide character constant.
+ if (Char == '\'')
+ return LexCharConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result),
+ tok::wide_char_constant);
+ // FALL THROUGH, treating L like the start of an identifier.
+
+ // C99 6.4.2: Identifiers.
+ case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': case 'G':
+ case 'H': case 'I': case 'J': case 'K': /*'L'*/case 'M': case 'N':
+ case 'O': case 'P': case 'Q': /*'R'*/case 'S': case 'T': /*'U'*/
+ case 'V': case 'W': case 'X': case 'Y': case 'Z':
+ case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': case 'g':
+ case 'h': case 'i': case 'j': case 'k': case 'l': case 'm': case 'n':
+ case 'o': case 'p': case 'q': case 'r': case 's': case 't': /*'u'*/
+ case 'v': case 'w': case 'x': case 'y': case 'z':
+ case '_':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexIdentifier(Result, CurPtr);
+
+ case '$': // $ in identifiers.
+ if (LangOpts.DollarIdents) {
+ if (!isLexingRawMode())
+ Diag(CurPtr-1, diag::ext_dollar_in_identifier);
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexIdentifier(Result, CurPtr);
+ }
+
+ Kind = tok::unknown;
+ break;
+
+ // C99 6.4.4: Character Constants.
+ case '\'':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexCharConstant(Result, CurPtr, tok::char_constant);
+
+ // C99 6.4.5: String Literals.
+ case '"':
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+ return LexStringLiteral(Result, CurPtr, tok::string_literal);
+
+ // C99 6.4.6: Punctuators.
+ case '?':
+ Kind = tok::question;
+ break;
+ case '[':
+ Kind = tok::l_square;
+ break;
+ case ']':
+ Kind = tok::r_square;
+ break;
+ case '(':
+ Kind = tok::l_paren;
+ break;
+ case ')':
+ Kind = tok::r_paren;
+ break;
+ case '{':
+ Kind = tok::l_brace;
+ break;
+ case '}':
+ Kind = tok::r_brace;
+ break;
+ case '.':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char >= '0' && Char <= '9') {
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ return LexNumericConstant(Result, ConsumeChar(CurPtr, SizeTmp, Result));
+ } else if (LangOpts.CPlusPlus && Char == '*') {
+ Kind = tok::periodstar;
+ CurPtr += SizeTmp;
+ } else if (Char == '.' &&
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '.') {
+ Kind = tok::ellipsis;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else {
+ Kind = tok::period;
+ }
+ break;
+ case '&':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '&') {
+ Kind = tok::ampamp;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '=') {
+ Kind = tok::ampequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::amp;
+ }
+ break;
+ case '*':
+ if (getCharAndSize(CurPtr, SizeTmp) == '=') {
+ Kind = tok::starequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::star;
+ }
+ break;
+ case '+':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '+') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::plusplus;
+ } else if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::plusequal;
+ } else {
+ Kind = tok::plus;
+ }
+ break;
+ case '-':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '-') { // --
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::minusminus;
+ } else if (Char == '>' && LangOpts.CPlusPlus &&
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == '*') { // C++ ->*
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ Kind = tok::arrowstar;
+ } else if (Char == '>') { // ->
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::arrow;
+ } else if (Char == '=') { // -=
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::minusequal;
+ } else {
+ Kind = tok::minus;
+ }
+ break;
+ case '~':
+ Kind = tok::tilde;
+ break;
+ case '!':
+ if (getCharAndSize(CurPtr, SizeTmp) == '=') {
+ Kind = tok::exclaimequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::exclaim;
+ }
+ break;
+ case '/':
+ // 6.4.9: Comments
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '/') { // BCPL comment.
+ // Even if BCPL comments are disabled (e.g. in C89 mode), we generally
+ // want to lex this as a comment. There is one problem with this though,
+ // that in one particular corner case, this can change the behavior of the
+ // resultant program. For example, In "foo //**/ bar", C89 would lex
+ // this as "foo / bar" and langauges with BCPL comments would lex it as
+ // "foo". Check to see if the character after the second slash is a '*'.
+ // If so, we will lex that as a "/" instead of the start of a comment.
+ // However, we never do this in -traditional-cpp mode.
+ if ((LangOpts.BCPLComment ||
+ getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*') &&
+ !LangOpts.TraditionalCPP) {
+ if (SkipBCPLComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
+ return; // There is a token to return.
+
+ // It is common for the tokens immediately after a // comment to be
+ // whitespace (indentation for the next line). Instead of going through
+ // the big switch, handle it efficiently now.
+ goto SkipIgnoredUnits;
+ }
+ }
+
+ if (Char == '*') { // /**/ comment.
+ if (SkipBlockComment(Result, ConsumeChar(CurPtr, SizeTmp, Result)))
+ return; // There is a token to return.
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ }
+
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::slashequal;
+ } else {
+ Kind = tok::slash;
+ }
+ break;
+ case '%':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ Kind = tok::percentequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (LangOpts.Digraphs && Char == '>') {
+ Kind = tok::r_brace; // '%>' -> '}'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (LangOpts.Digraphs && Char == ':') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '%' && getCharAndSize(CurPtr+SizeTmp, SizeTmp2) == ':') {
+ Kind = tok::hashhash; // '%:%:' -> '##'
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else if (Char == '@' && LangOpts.MicrosoftExt) {// %:@ -> #@ -> Charize
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::ext_charize_microsoft);
+ Kind = tok::hashat;
+ } else { // '%:' -> '#'
+ // We parsed a # character. If this occurs at the start of the line,
+ // it's actually the start of a preprocessing directive. Callback to
+ // the preprocessor to handle it.
+ // FIXME: -fpreprocessed mode??
+ if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) {
+ FormTokenWithChars(Result, CurPtr, tok::hash);
+ PP->HandleDirective(Result);
+
+ // As an optimization, if the preprocessor didn't switch lexers, tail
+ // recurse.
+ if (PP->isCurrentLexer(this)) {
+ // Start a new token. If this is a #include or something, the PP may
+ // want us starting at the beginning of the line again. If so, set
+ // the StartOfLine flag and clear LeadingSpace.
+ if (IsAtStartOfLine) {
+ Result.setFlag(Token::StartOfLine);
+ Result.clearFlag(Token::LeadingSpace);
+ IsAtStartOfLine = false;
+ }
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ }
+
+ return PP->Lex(Result);
+ }
+
+ Kind = tok::hash;
+ }
+ } else {
+ Kind = tok::percent;
+ }
+ break;
+ case '<':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (ParsingFilename) {
+ return LexAngledStringLiteral(Result, CurPtr);
+ } else if (Char == '<') {
+ char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
+ if (After == '=') {
+ Kind = tok::lesslessequal;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else if (After == '<' && IsStartOfConflictMarker(CurPtr-1)) {
+ // If this is actually a '<<<<<<<' version control conflict marker,
+ // recognize it as such and recover nicely.
+ goto LexNextToken;
+ } else if (After == '<' && HandleEndOfConflictMarker(CurPtr-1)) {
+ // If this is '<<<<' and we're in a Perforce-style conflict marker,
+ // ignore it.
+ goto LexNextToken;
+ } else if (LangOpts.CUDA && After == '<') {
+ Kind = tok::lesslessless;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::lessless;
+ }
+ } else if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::lessequal;
+ } else if (LangOpts.Digraphs && Char == ':') { // '<:' -> '['
+ if (LangOpts.CPlusPlus0x &&
+ getCharAndSize(CurPtr + SizeTmp, SizeTmp2) == ':') {
+ // C++0x [lex.pptoken]p3:
+ // Otherwise, if the next three characters are <:: and the subsequent
+ // character is neither : nor >, the < is treated as a preprocessor
+ // token by itself and not as the first character of the alternative
+ // token <:.
+ unsigned SizeTmp3;
+ char After = getCharAndSize(CurPtr + SizeTmp + SizeTmp2, SizeTmp3);
+ if (After != ':' && After != '>') {
+ Kind = tok::less;
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::warn_cxx98_compat_less_colon_colon);
+ break;
+ }
+ }
+
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::l_square;
+ } else if (LangOpts.Digraphs && Char == '%') { // '<%' -> '{'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::l_brace;
+ } else {
+ Kind = tok::less;
+ }
+ break;
+ case '>':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::greaterequal;
+ } else if (Char == '>') {
+ char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
+ if (After == '=') {
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ Kind = tok::greatergreaterequal;
+ } else if (After == '>' && IsStartOfConflictMarker(CurPtr-1)) {
+ // If this is actually a '>>>>' conflict marker, recognize it as such
+ // and recover nicely.
+ goto LexNextToken;
+ } else if (After == '>' && HandleEndOfConflictMarker(CurPtr-1)) {
+ // If this is '>>>>>>>' and we're in a conflict marker, ignore it.
+ goto LexNextToken;
+ } else if (LangOpts.CUDA && After == '>') {
+ Kind = tok::greatergreatergreater;
+ CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
+ SizeTmp2, Result);
+ } else {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::greatergreater;
+ }
+
+ } else {
+ Kind = tok::greater;
+ }
+ break;
+ case '^':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ Kind = tok::caretequal;
+ } else {
+ Kind = tok::caret;
+ }
+ break;
+ case '|':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ Kind = tok::pipeequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '|') {
+ // If this is '|||||||' and we're in a conflict marker, ignore it.
+ if (CurPtr[1] == '|' && HandleEndOfConflictMarker(CurPtr-1))
+ goto LexNextToken;
+ Kind = tok::pipepipe;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::pipe;
+ }
+ break;
+ case ':':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (LangOpts.Digraphs && Char == '>') {
+ Kind = tok::r_square; // ':>' -> ']'
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (LangOpts.CPlusPlus && Char == ':') {
+ Kind = tok::coloncolon;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::colon;
+ }
+ break;
+ case ';':
+ Kind = tok::semi;
+ break;
+ case '=':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '=') {
+ // If this is '====' and we're in a conflict marker, ignore it.
+ if (CurPtr[1] == '=' && HandleEndOfConflictMarker(CurPtr-1))
+ goto LexNextToken;
+
+ Kind = tok::equalequal;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ Kind = tok::equal;
+ }
+ break;
+ case ',':
+ Kind = tok::comma;
+ break;
+ case '#':
+ Char = getCharAndSize(CurPtr, SizeTmp);
+ if (Char == '#') {
+ Kind = tok::hashhash;
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else if (Char == '@' && LangOpts.MicrosoftExt) { // #@ -> Charize
+ Kind = tok::hashat;
+ if (!isLexingRawMode())
+ Diag(BufferPtr, diag::ext_charize_microsoft);
+ CurPtr = ConsumeChar(CurPtr, SizeTmp, Result);
+ } else {
+ // We parsed a # character. If this occurs at the start of the line,
+ // it's actually the start of a preprocessing directive. Callback to
+ // the preprocessor to handle it.
+ // FIXME: -fpreprocessed mode??
+ if (Result.isAtStartOfLine() && !LexingRawMode && !Is_PragmaLexer) {
+ FormTokenWithChars(Result, CurPtr, tok::hash);
+ PP->HandleDirective(Result);
+
+ // As an optimization, if the preprocessor didn't switch lexers, tail
+ // recurse.
+ if (PP->isCurrentLexer(this)) {
+ // Start a new token. If this is a #include or something, the PP may
+ // want us starting at the beginning of the line again. If so, set
+ // the StartOfLine flag and clear LeadingSpace.
+ if (IsAtStartOfLine) {
+ Result.setFlag(Token::StartOfLine);
+ Result.clearFlag(Token::LeadingSpace);
+ IsAtStartOfLine = false;
+ }
+ goto LexNextToken; // GCC isn't tail call eliminating.
+ }
+ return PP->Lex(Result);
+ }
+
+ Kind = tok::hash;
+ }
+ break;
+
+ case '@':
+ // Objective C support.
+ if (CurPtr[-1] == '@' && LangOpts.ObjC1)
+ Kind = tok::at;
+ else
+ Kind = tok::unknown;
+ break;
+
+ case '\\':
+ // FIXME: UCN's.
+ // FALL THROUGH.
+ default:
+ Kind = tok::unknown;
+ break;
+ }
+
+ // Notify MIOpt that we read a non-whitespace/non-comment token.
+ MIOpt.ReadToken();
+
+ // Update the location of token as well as BufferPtr.
+ FormTokenWithChars(Result, CurPtr, Kind);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
new file mode 100644
index 0000000..c1d228b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/LiteralSupport.cpp
@@ -0,0 +1,1400 @@
+//===--- LiteralSupport.cpp - Code to parse and process literals ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the NumericLiteralParser, CharLiteralParser, and
+// StringLiteralParser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+/// HexDigitValue - Return the value of the specified hex digit, or -1 if it's
+/// not valid.
+static int HexDigitValue(char C) {
+ if (C >= '0' && C <= '9') return C-'0';
+ if (C >= 'a' && C <= 'f') return C-'a'+10;
+ if (C >= 'A' && C <= 'F') return C-'A'+10;
+ return -1;
+}
+
+static unsigned getCharWidth(tok::TokenKind kind, const TargetInfo &Target) {
+ switch (kind) {
+ default: llvm_unreachable("Unknown token type!");
+ case tok::char_constant:
+ case tok::string_literal:
+ case tok::utf8_string_literal:
+ return Target.getCharWidth();
+ case tok::wide_char_constant:
+ case tok::wide_string_literal:
+ return Target.getWCharWidth();
+ case tok::utf16_char_constant:
+ case tok::utf16_string_literal:
+ return Target.getChar16Width();
+ case tok::utf32_char_constant:
+ case tok::utf32_string_literal:
+ return Target.getChar32Width();
+ }
+}
+
+/// ProcessCharEscape - Parse a standard C escape sequence, which can occur in
+/// either a character or a string literal.
+static unsigned ProcessCharEscape(const char *&ThisTokBuf,
+ const char *ThisTokEnd, bool &HadError,
+ FullSourceLoc Loc, unsigned CharWidth,
+ DiagnosticsEngine *Diags) {
+ // Skip the '\' char.
+ ++ThisTokBuf;
+
+ // We know that this character can't be off the end of the buffer, because
+ // that would have been \", which would not have been the end of string.
+ unsigned ResultChar = *ThisTokBuf++;
+ switch (ResultChar) {
+ // These map to themselves.
+ case '\\': case '\'': case '"': case '?': break;
+
+ // These have fixed mappings.
+ case 'a':
+ // TODO: K&R: the meaning of '\\a' is different in traditional C
+ ResultChar = 7;
+ break;
+ case 'b':
+ ResultChar = 8;
+ break;
+ case 'e':
+ if (Diags)
+ Diags->Report(Loc, diag::ext_nonstandard_escape) << "e";
+ ResultChar = 27;
+ break;
+ case 'E':
+ if (Diags)
+ Diags->Report(Loc, diag::ext_nonstandard_escape) << "E";
+ ResultChar = 27;
+ break;
+ case 'f':
+ ResultChar = 12;
+ break;
+ case 'n':
+ ResultChar = 10;
+ break;
+ case 'r':
+ ResultChar = 13;
+ break;
+ case 't':
+ ResultChar = 9;
+ break;
+ case 'v':
+ ResultChar = 11;
+ break;
+ case 'x': { // Hex escape.
+ ResultChar = 0;
+ if (ThisTokBuf == ThisTokEnd || !isxdigit(*ThisTokBuf)) {
+ if (Diags)
+ Diags->Report(Loc, diag::err_hex_escape_no_digits);
+ HadError = 1;
+ break;
+ }
+
+ // Hex escapes are a maximal series of hex digits.
+ bool Overflow = false;
+ for (; ThisTokBuf != ThisTokEnd; ++ThisTokBuf) {
+ int CharVal = HexDigitValue(ThisTokBuf[0]);
+ if (CharVal == -1) break;
+ // About to shift out a digit?
+ Overflow |= (ResultChar & 0xF0000000) ? true : false;
+ ResultChar <<= 4;
+ ResultChar |= CharVal;
+ }
+
+ // See if any bits will be truncated when evaluated as a character.
+ if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
+ Overflow = true;
+ ResultChar &= ~0U >> (32-CharWidth);
+ }
+
+ // Check for overflow.
+ if (Overflow && Diags) // Too many digits to fit in
+ Diags->Report(Loc, diag::warn_hex_escape_too_large);
+ break;
+ }
+ case '0': case '1': case '2': case '3':
+ case '4': case '5': case '6': case '7': {
+ // Octal escapes.
+ --ThisTokBuf;
+ ResultChar = 0;
+
+ // Octal escapes are a series of octal digits with maximum length 3.
+ // "\0123" is a two digit sequence equal to "\012" "3".
+ unsigned NumDigits = 0;
+ do {
+ ResultChar <<= 3;
+ ResultChar |= *ThisTokBuf++ - '0';
+ ++NumDigits;
+ } while (ThisTokBuf != ThisTokEnd && NumDigits < 3 &&
+ ThisTokBuf[0] >= '0' && ThisTokBuf[0] <= '7');
+
+ // Check for overflow. Reject '\777', but not L'\777'.
+ if (CharWidth != 32 && (ResultChar >> CharWidth) != 0) {
+ if (Diags)
+ Diags->Report(Loc, diag::warn_octal_escape_too_large);
+ ResultChar &= ~0U >> (32-CharWidth);
+ }
+ break;
+ }
+
+ // Otherwise, these are not valid escapes.
+ case '(': case '{': case '[': case '%':
+ // GCC accepts these as extensions. We warn about them as such though.
+ if (Diags)
+ Diags->Report(Loc, diag::ext_nonstandard_escape)
+ << std::string()+(char)ResultChar;
+ break;
+ default:
+ if (Diags == 0)
+ break;
+
+ if (isgraph(ResultChar))
+ Diags->Report(Loc, diag::ext_unknown_escape)
+ << std::string()+(char)ResultChar;
+ else
+ Diags->Report(Loc, diag::ext_unknown_escape)
+ << "x"+llvm::utohexstr(ResultChar);
+ break;
+ }
+
+ return ResultChar;
+}
+
+/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
+/// return the UTF32.
+static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd,
+ uint32_t &UcnVal, unsigned short &UcnLen,
+ FullSourceLoc Loc, DiagnosticsEngine *Diags,
+ const LangOptions &Features,
+ bool in_char_string_literal = false) {
+ if (!Features.CPlusPlus && !Features.C99 && Diags)
+ Diags->Report(Loc, diag::warn_ucn_not_valid_in_c89);
+
+ const char *UcnBegin = ThisTokBuf;
+
+ // Skip the '\u' char's.
+ ThisTokBuf += 2;
+
+ if (ThisTokBuf == ThisTokEnd || !isxdigit(*ThisTokBuf)) {
+ if (Diags)
+ Diags->Report(Loc, diag::err_ucn_escape_no_digits);
+ return false;
+ }
+ UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
+ unsigned short UcnLenSave = UcnLen;
+ for (; ThisTokBuf != ThisTokEnd && UcnLenSave; ++ThisTokBuf, UcnLenSave--) {
+ int CharVal = HexDigitValue(ThisTokBuf[0]);
+ if (CharVal == -1) break;
+ UcnVal <<= 4;
+ UcnVal |= CharVal;
+ }
+ // If we didn't consume the proper number of digits, there is a problem.
+ if (UcnLenSave) {
+ if (Diags) {
+ SourceLocation L =
+ Lexer::AdvanceToTokenCharacter(Loc, UcnBegin - ThisTokBegin,
+ Loc.getManager(), Features);
+ Diags->Report(L, diag::err_ucn_escape_incomplete);
+ }
+ return false;
+ }
+
+ // Check UCN constraints (C99 6.4.3p2) [C++11 lex.charset p2]
+ if ((0xD800 <= UcnVal && UcnVal <= 0xDFFF) || // surrogate codepoints
+ UcnVal > 0x10FFFF) { // maximum legal UTF32 value
+ if (Diags)
+ Diags->Report(Loc, diag::err_ucn_escape_invalid);
+ return false;
+ }
+
+ // C++11 allows UCNs that refer to control characters and basic source
+ // characters inside character and string literals
+ if (UcnVal < 0xa0 &&
+ (UcnVal != 0x24 && UcnVal != 0x40 && UcnVal != 0x60)) { // $, @, `
+ bool IsError = (!Features.CPlusPlus0x || !in_char_string_literal);
+ if (Diags) {
+ SourceLocation UcnBeginLoc =
+ Lexer::AdvanceToTokenCharacter(Loc, UcnBegin - ThisTokBegin,
+ Loc.getManager(), Features);
+ char BasicSCSChar = UcnVal;
+ if (UcnVal >= 0x20 && UcnVal < 0x7f)
+ Diags->Report(UcnBeginLoc, IsError ? diag::err_ucn_escape_basic_scs :
+ diag::warn_cxx98_compat_literal_ucn_escape_basic_scs)
+ << StringRef(&BasicSCSChar, 1);
+ else
+ Diags->Report(UcnBeginLoc, IsError ? diag::err_ucn_control_character :
+ diag::warn_cxx98_compat_literal_ucn_control_character);
+ }
+ if (IsError)
+ return false;
+ }
+
+ return true;
+}
+
+/// EncodeUCNEscape - Read the Universal Character Name, check constraints and
+/// convert the UTF32 to UTF8 or UTF16. This is a subroutine of
+/// StringLiteralParser. When we decide to implement UCN's for identifiers,
+/// we will likely rework our support for UCN's.
+static void EncodeUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd,
+ char *&ResultBuf, bool &HadError,
+ FullSourceLoc Loc, unsigned CharByteWidth,
+ DiagnosticsEngine *Diags,
+ const LangOptions &Features) {
+ typedef uint32_t UTF32;
+ UTF32 UcnVal = 0;
+ unsigned short UcnLen = 0;
+ if (!ProcessUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal, UcnLen,
+ Loc, Diags, Features, true)) {
+ HadError = 1;
+ return;
+ }
+
+ assert((CharByteWidth == 1 || CharByteWidth == 2 || CharByteWidth) &&
+ "only character widths of 1, 2, or 4 bytes supported");
+
+ (void)UcnLen;
+ assert((UcnLen== 4 || UcnLen== 8) && "only ucn length of 4 or 8 supported");
+
+ if (CharByteWidth == 4) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultPtr = reinterpret_cast<UTF32*>(ResultBuf);
+ *ResultPtr = UcnVal;
+ ResultBuf += 4;
+ return;
+ }
+
+ if (CharByteWidth == 2) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultPtr = reinterpret_cast<UTF16*>(ResultBuf);
+
+ if (UcnVal < (UTF32)0xFFFF) {
+ *ResultPtr = UcnVal;
+ ResultBuf += 2;
+ return;
+ }
+
+ // Convert to UTF16.
+ UcnVal -= 0x10000;
+ *ResultPtr = 0xD800 + (UcnVal >> 10);
+ *(ResultPtr+1) = 0xDC00 + (UcnVal & 0x3FF);
+ ResultBuf += 4;
+ return;
+ }
+
+ assert(CharByteWidth == 1 && "UTF-8 encoding is only for 1 byte characters");
+
+ // Now that we've parsed/checked the UCN, we convert from UTF32->UTF8.
+ // The conversion below was inspired by:
+ // http://www.unicode.org/Public/PROGRAMS/CVTUTF/ConvertUTF.c
+ // First, we determine how many bytes the result will require.
+ typedef uint8_t UTF8;
+
+ unsigned short bytesToWrite = 0;
+ if (UcnVal < (UTF32)0x80)
+ bytesToWrite = 1;
+ else if (UcnVal < (UTF32)0x800)
+ bytesToWrite = 2;
+ else if (UcnVal < (UTF32)0x10000)
+ bytesToWrite = 3;
+ else
+ bytesToWrite = 4;
+
+ const unsigned byteMask = 0xBF;
+ const unsigned byteMark = 0x80;
+
+ // Once the bits are split out into bytes of UTF8, this is a mask OR-ed
+ // into the first byte, depending on how many bytes follow.
+ static const UTF8 firstByteMark[5] = {
+ 0x00, 0x00, 0xC0, 0xE0, 0xF0
+ };
+ // Finally, we write the bytes into ResultBuf.
+ ResultBuf += bytesToWrite;
+ switch (bytesToWrite) { // note: everything falls through.
+ case 4: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 3: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 2: *--ResultBuf = (UTF8)((UcnVal | byteMark) & byteMask); UcnVal >>= 6;
+ case 1: *--ResultBuf = (UTF8) (UcnVal | firstByteMark[bytesToWrite]);
+ }
+ // Update the buffer.
+ ResultBuf += bytesToWrite;
+}
+
+
+/// integer-constant: [C99 6.4.4.1]
+/// decimal-constant integer-suffix
+/// octal-constant integer-suffix
+/// hexadecimal-constant integer-suffix
+/// user-defined-integer-literal: [C++11 lex.ext]
+/// decimal-literal ud-suffix
+/// octal-literal ud-suffix
+/// hexadecimal-literal ud-suffix
+/// decimal-constant:
+/// nonzero-digit
+/// decimal-constant digit
+/// octal-constant:
+/// 0
+/// octal-constant octal-digit
+/// hexadecimal-constant:
+/// hexadecimal-prefix hexadecimal-digit
+/// hexadecimal-constant hexadecimal-digit
+/// hexadecimal-prefix: one of
+/// 0x 0X
+/// integer-suffix:
+/// unsigned-suffix [long-suffix]
+/// unsigned-suffix [long-long-suffix]
+/// long-suffix [unsigned-suffix]
+/// long-long-suffix [unsigned-sufix]
+/// nonzero-digit:
+/// 1 2 3 4 5 6 7 8 9
+/// octal-digit:
+/// 0 1 2 3 4 5 6 7
+/// hexadecimal-digit:
+/// 0 1 2 3 4 5 6 7 8 9
+/// a b c d e f
+/// A B C D E F
+/// unsigned-suffix: one of
+/// u U
+/// long-suffix: one of
+/// l L
+/// long-long-suffix: one of
+/// ll LL
+///
+/// floating-constant: [C99 6.4.4.2]
+/// TODO: add rules...
+///
+NumericLiteralParser::
+NumericLiteralParser(const char *begin, const char *end,
+ SourceLocation TokLoc, Preprocessor &pp)
+ : PP(pp), ThisTokBegin(begin), ThisTokEnd(end) {
+
+ // This routine assumes that the range begin/end matches the regex for integer
+ // and FP constants (specifically, the 'pp-number' regex), and assumes that
+ // the byte at "*end" is both valid and not part of the regex. Because of
+ // this, it doesn't have to check for 'overscan' in various places.
+ assert(!isalnum(*end) && *end != '.' && *end != '_' &&
+ "Lexer didn't maximally munch?");
+
+ s = DigitsBegin = begin;
+ saw_exponent = false;
+ saw_period = false;
+ saw_ud_suffix = false;
+ isLong = false;
+ isUnsigned = false;
+ isLongLong = false;
+ isFloat = false;
+ isImaginary = false;
+ isMicrosoftInteger = false;
+ hadError = false;
+
+ if (*s == '0') { // parse radix
+ ParseNumberStartingWithZero(TokLoc);
+ if (hadError)
+ return;
+ } else { // the first digit is non-zero
+ radix = 10;
+ s = SkipDigits(s);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (isxdigit(*s) && !(*s == 'e' || *s == 'E')) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
+ diag::err_invalid_decimal_digit) << StringRef(s, 1);
+ hadError = true;
+ return;
+ } else if (*s == '.') {
+ s++;
+ saw_period = true;
+ s = SkipDigits(s);
+ }
+ if ((*s == 'e' || *s == 'E')) { // exponent
+ const char *Exponent = s;
+ s++;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit != s) {
+ s = first_non_digit;
+ } else {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-begin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ }
+ }
+
+ SuffixBegin = s;
+
+ // Parse the suffix. At this point we can classify whether we have an FP or
+ // integer constant.
+ bool isFPConstant = isFloatingLiteral();
+
+ // Loop over all of the characters of the suffix. If we see something bad,
+ // we break out of the loop.
+ for (; s != ThisTokEnd; ++s) {
+ switch (*s) {
+ case 'f': // FP Suffix for "float"
+ case 'F':
+ if (!isFPConstant) break; // Error for integer constant.
+ if (isFloat || isLong) break; // FF, LF invalid.
+ isFloat = true;
+ continue; // Success.
+ case 'u':
+ case 'U':
+ if (isFPConstant) break; // Error for floating constant.
+ if (isUnsigned) break; // Cannot be repeated.
+ isUnsigned = true;
+ continue; // Success.
+ case 'l':
+ case 'L':
+ if (isLong || isLongLong) break; // Cannot be repeated.
+ if (isFloat) break; // LF invalid.
+
+ // Check for long long. The L's need to be adjacent and the same case.
+ if (s+1 != ThisTokEnd && s[1] == s[0]) {
+ if (isFPConstant) break; // long long invalid for floats.
+ isLongLong = true;
+ ++s; // Eat both of them.
+ } else {
+ isLong = true;
+ }
+ continue; // Success.
+ case 'i':
+ case 'I':
+ if (PP.getLangOpts().MicrosoftExt) {
+ if (isFPConstant || isLong || isLongLong) break;
+
+ // Allow i8, i16, i32, i64, and i128.
+ if (s + 1 != ThisTokEnd) {
+ switch (s[1]) {
+ case '8':
+ s += 2; // i8 suffix
+ isMicrosoftInteger = true;
+ break;
+ case '1':
+ if (s + 2 == ThisTokEnd) break;
+ if (s[2] == '6') {
+ s += 3; // i16 suffix
+ isMicrosoftInteger = true;
+ }
+ else if (s[2] == '2') {
+ if (s + 3 == ThisTokEnd) break;
+ if (s[3] == '8') {
+ s += 4; // i128 suffix
+ isMicrosoftInteger = true;
+ }
+ }
+ break;
+ case '3':
+ if (s + 2 == ThisTokEnd) break;
+ if (s[2] == '2') {
+ s += 3; // i32 suffix
+ isLong = true;
+ isMicrosoftInteger = true;
+ }
+ break;
+ case '6':
+ if (s + 2 == ThisTokEnd) break;
+ if (s[2] == '4') {
+ s += 3; // i64 suffix
+ isLongLong = true;
+ isMicrosoftInteger = true;
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ }
+ // fall through.
+ case 'j':
+ case 'J':
+ if (isImaginary) break; // Cannot be repeated.
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-begin),
+ diag::ext_imaginary_constant);
+ isImaginary = true;
+ continue; // Success.
+ }
+ // If we reached here, there was an error or a ud-suffix.
+ break;
+ }
+
+ if (s != ThisTokEnd) {
+ if (PP.getLangOpts().CPlusPlus0x && s == SuffixBegin && *s == '_') {
+ // We have a ud-suffix! By C++11 [lex.ext]p10, ud-suffixes not starting
+ // with an '_' are ill-formed.
+ saw_ud_suffix = true;
+ return;
+ }
+
+ // Report an error if there are any.
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, SuffixBegin-begin),
+ isFPConstant ? diag::err_invalid_suffix_float_constant :
+ diag::err_invalid_suffix_integer_constant)
+ << StringRef(SuffixBegin, ThisTokEnd-SuffixBegin);
+ hadError = true;
+ return;
+ }
+}
+
+/// ParseNumberStartingWithZero - This method is called when the first character
+/// of the number is found to be a zero. This means it is either an octal
+/// number (like '04') or a hex number ('0x123a') a binary number ('0b1010') or
+/// a floating point number (01239.123e4). Eat the prefix, determining the
+/// radix etc.
+void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
+ assert(s[0] == '0' && "Invalid method call");
+ s++;
+
+ // Handle a hex number like 0x1234.
+ if ((*s == 'x' || *s == 'X') && (isxdigit(s[1]) || s[1] == '.')) {
+ s++;
+ radix = 16;
+ DigitsBegin = s;
+ s = SkipHexDigits(s);
+ bool noSignificand = (s == DigitsBegin);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (*s == '.') {
+ s++;
+ saw_period = true;
+ const char *floatDigitsBegin = s;
+ s = SkipHexDigits(s);
+ noSignificand &= (floatDigitsBegin == s);
+ }
+
+ if (noSignificand) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin), \
+ diag::err_hexconstant_requires_digits);
+ hadError = true;
+ return;
+ }
+
+ // A binary exponent can appear with or with a '.'. If dotted, the
+ // binary exponent is required.
+ if (*s == 'p' || *s == 'P') {
+ const char *Exponent = s;
+ s++;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit == s) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ s = first_non_digit;
+
+ if (!PP.getLangOpts().HexFloats)
+ PP.Diag(TokLoc, diag::ext_hexconstant_invalid);
+ } else if (saw_period) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_hexconstant_requires_exponent);
+ hadError = true;
+ }
+ return;
+ }
+
+ // Handle simple binary numbers 0b01010
+ if (*s == 'b' || *s == 'B') {
+ // 0b101010 is a GCC extension.
+ PP.Diag(TokLoc, diag::ext_binary_literal);
+ ++s;
+ radix = 2;
+ DigitsBegin = s;
+ s = SkipBinaryDigits(s);
+ if (s == ThisTokEnd) {
+ // Done.
+ } else if (isxdigit(*s)) {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_invalid_binary_digit) << StringRef(s, 1);
+ hadError = true;
+ }
+ // Other suffixes will be diagnosed by the caller.
+ return;
+ }
+
+ // For now, the radix is set to 8. If we discover that we have a
+ // floating point constant, the radix will change to 10. Octal floating
+ // point constants are not permitted (only decimal and hexadecimal).
+ radix = 8;
+ DigitsBegin = s;
+ s = SkipOctalDigits(s);
+ if (s == ThisTokEnd)
+ return; // Done, simple octal number like 01234
+
+ // If we have some other non-octal digit that *is* a decimal digit, see if
+ // this is part of a floating point number like 094.123 or 09e1.
+ if (isdigit(*s)) {
+ const char *EndDecimal = SkipDigits(s);
+ if (EndDecimal[0] == '.' || EndDecimal[0] == 'e' || EndDecimal[0] == 'E') {
+ s = EndDecimal;
+ radix = 10;
+ }
+ }
+
+ // If we have a hex digit other than 'e' (which denotes a FP exponent) then
+ // the code is using an incorrect base.
+ if (isxdigit(*s) && *s != 'e' && *s != 'E') {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, s-ThisTokBegin),
+ diag::err_invalid_octal_digit) << StringRef(s, 1);
+ hadError = true;
+ return;
+ }
+
+ if (*s == '.') {
+ s++;
+ radix = 10;
+ saw_period = true;
+ s = SkipDigits(s); // Skip suffix.
+ }
+ if (*s == 'e' || *s == 'E') { // exponent
+ const char *Exponent = s;
+ s++;
+ radix = 10;
+ saw_exponent = true;
+ if (*s == '+' || *s == '-') s++; // sign
+ const char *first_non_digit = SkipDigits(s);
+ if (first_non_digit != s) {
+ s = first_non_digit;
+ } else {
+ PP.Diag(PP.AdvanceToTokenCharacter(TokLoc, Exponent-ThisTokBegin),
+ diag::err_exponent_has_no_digits);
+ hadError = true;
+ return;
+ }
+ }
+}
+
+
+/// GetIntegerValue - Convert this numeric literal value to an APInt that
+/// matches Val's input width. If there is an overflow, set Val to the low bits
+/// of the result and return true. Otherwise, return false.
+bool NumericLiteralParser::GetIntegerValue(llvm::APInt &Val) {
+ // Fast path: Compute a conservative bound on the maximum number of
+ // bits per digit in this radix. If we can't possibly overflow a
+ // uint64 based on that bound then do the simple conversion to
+ // integer. This avoids the expensive overflow checking below, and
+ // handles the common cases that matter (small decimal integers and
+ // hex/octal values which don't overflow).
+ unsigned MaxBitsPerDigit = 1;
+ while ((1U << MaxBitsPerDigit) < radix)
+ MaxBitsPerDigit += 1;
+ if ((SuffixBegin - DigitsBegin) * MaxBitsPerDigit <= 64) {
+ uint64_t N = 0;
+ for (s = DigitsBegin; s != SuffixBegin; ++s)
+ N = N*radix + HexDigitValue(*s);
+
+ // This will truncate the value to Val's input width. Simply check
+ // for overflow by comparing.
+ Val = N;
+ return Val.getZExtValue() != N;
+ }
+
+ Val = 0;
+ s = DigitsBegin;
+
+ llvm::APInt RadixVal(Val.getBitWidth(), radix);
+ llvm::APInt CharVal(Val.getBitWidth(), 0);
+ llvm::APInt OldVal = Val;
+
+ bool OverflowOccurred = false;
+ while (s < SuffixBegin) {
+ unsigned C = HexDigitValue(*s++);
+
+ // If this letter is out of bound for this radix, reject it.
+ assert(C < radix && "NumericLiteralParser ctor should have rejected this");
+
+ CharVal = C;
+
+ // Add the digit to the value in the appropriate radix. If adding in digits
+ // made the value smaller, then this overflowed.
+ OldVal = Val;
+
+ // Multiply by radix, did overflow occur on the multiply?
+ Val *= RadixVal;
+ OverflowOccurred |= Val.udiv(RadixVal) != OldVal;
+
+ // Add value, did overflow occur on the value?
+ // (a + b) ult b <=> overflow
+ Val += CharVal;
+ OverflowOccurred |= Val.ult(CharVal);
+ }
+ return OverflowOccurred;
+}
+
+llvm::APFloat::opStatus
+NumericLiteralParser::GetFloatValue(llvm::APFloat &Result) {
+ using llvm::APFloat;
+
+ unsigned n = std::min(SuffixBegin - ThisTokBegin, ThisTokEnd - ThisTokBegin);
+ return Result.convertFromString(StringRef(ThisTokBegin, n),
+ APFloat::rmNearestTiesToEven);
+}
+
+
+/// user-defined-character-literal: [C++11 lex.ext]
+/// character-literal ud-suffix
+/// ud-suffix:
+/// identifier
+/// character-literal: [C++11 lex.ccon]
+/// ' c-char-sequence '
+/// u' c-char-sequence '
+/// U' c-char-sequence '
+/// L' c-char-sequence '
+/// c-char-sequence:
+/// c-char
+/// c-char-sequence c-char
+/// c-char:
+/// any member of the source character set except the single-quote ',
+/// backslash \, or new-line character
+/// escape-sequence
+/// universal-character-name
+/// escape-sequence:
+/// simple-escape-sequence
+/// octal-escape-sequence
+/// hexadecimal-escape-sequence
+/// simple-escape-sequence:
+/// one of \' \" \? \\ \a \b \f \n \r \t \v
+/// octal-escape-sequence:
+/// \ octal-digit
+/// \ octal-digit octal-digit
+/// \ octal-digit octal-digit octal-digit
+/// hexadecimal-escape-sequence:
+/// \x hexadecimal-digit
+/// hexadecimal-escape-sequence hexadecimal-digit
+/// universal-character-name: [C++11 lex.charset]
+/// \u hex-quad
+/// \U hex-quad hex-quad
+/// hex-quad:
+/// hex-digit hex-digit hex-digit hex-digit
+///
+CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
+ SourceLocation Loc, Preprocessor &PP,
+ tok::TokenKind kind) {
+ // At this point we know that the character matches the regex "(L|u|U)?'.*'".
+ HadError = false;
+
+ Kind = kind;
+
+ const char *TokBegin = begin;
+
+ // Skip over wide character determinant.
+ if (Kind != tok::char_constant) {
+ ++begin;
+ }
+
+ // Skip over the entry quote.
+ assert(begin[0] == '\'' && "Invalid token lexed");
+ ++begin;
+
+ // Remove an optional ud-suffix.
+ if (end[-1] != '\'') {
+ const char *UDSuffixEnd = end;
+ do {
+ --end;
+ } while (end[-1] != '\'');
+ UDSuffixBuf.assign(end, UDSuffixEnd);
+ UDSuffixOffset = end - TokBegin;
+ }
+
+ // Trim the ending quote.
+ assert(end != begin && "Invalid token lexed");
+ --end;
+
+ // FIXME: The "Value" is an uint64_t so we can handle char literals of
+ // up to 64-bits.
+ // FIXME: This extensively assumes that 'char' is 8-bits.
+ assert(PP.getTargetInfo().getCharWidth() == 8 &&
+ "Assumes char is 8 bits");
+ assert(PP.getTargetInfo().getIntWidth() <= 64 &&
+ (PP.getTargetInfo().getIntWidth() & 7) == 0 &&
+ "Assumes sizeof(int) on target is <= 64 and a multiple of char");
+ assert(PP.getTargetInfo().getWCharWidth() <= 64 &&
+ "Assumes sizeof(wchar) on target is <= 64");
+
+ SmallVector<uint32_t,4> codepoint_buffer;
+ codepoint_buffer.resize(end-begin);
+ uint32_t *buffer_begin = &codepoint_buffer.front();
+ uint32_t *buffer_end = buffer_begin + codepoint_buffer.size();
+
+ // Unicode escapes representing characters that cannot be correctly
+ // represented in a single code unit are disallowed in character literals
+ // by this implementation.
+ uint32_t largest_character_for_kind;
+ if (tok::wide_char_constant == Kind) {
+ largest_character_for_kind = 0xFFFFFFFFu >> (32-PP.getTargetInfo().getWCharWidth());
+ } else if (tok::utf16_char_constant == Kind) {
+ largest_character_for_kind = 0xFFFF;
+ } else if (tok::utf32_char_constant == Kind) {
+ largest_character_for_kind = 0x10FFFF;
+ } else {
+ largest_character_for_kind = 0x7Fu;
+ }
+
+ while (begin!=end) {
+ // Is this a span of non-escape characters?
+ if (begin[0] != '\\') {
+ char const *start = begin;
+ do {
+ ++begin;
+ } while (begin != end && *begin != '\\');
+
+ char const *tmp_in_start = start;
+ uint32_t *tmp_out_start = buffer_begin;
+ ConversionResult res =
+ ConvertUTF8toUTF32(reinterpret_cast<UTF8 const **>(&start),
+ reinterpret_cast<UTF8 const *>(begin),
+ &buffer_begin,buffer_end,strictConversion);
+ if (res!=conversionOK) {
+ // If we see bad encoding for unprefixed character literals, warn and
+ // simply copy the byte values, for compatibility with gcc and
+ // older versions of clang.
+ bool NoErrorOnBadEncoding = isAscii();
+ unsigned Msg = diag::err_bad_character_encoding;
+ if (NoErrorOnBadEncoding)
+ Msg = diag::warn_bad_character_encoding;
+ PP.Diag(Loc, Msg);
+ if (NoErrorOnBadEncoding) {
+ start = tmp_in_start;
+ buffer_begin = tmp_out_start;
+ for ( ; start != begin; ++start, ++buffer_begin)
+ *buffer_begin = static_cast<uint8_t>(*start);
+ } else {
+ HadError = true;
+ }
+ } else {
+ for (; tmp_out_start <buffer_begin; ++tmp_out_start) {
+ if (*tmp_out_start > largest_character_for_kind) {
+ HadError = true;
+ PP.Diag(Loc, diag::err_character_too_large);
+ }
+ }
+ }
+
+ continue;
+ }
+ // Is this a Universal Character Name excape?
+ if (begin[1] == 'u' || begin[1] == 'U') {
+ unsigned short UcnLen = 0;
+ if (!ProcessUCNEscape(TokBegin, begin, end, *buffer_begin, UcnLen,
+ FullSourceLoc(Loc, PP.getSourceManager()),
+ &PP.getDiagnostics(), PP.getLangOpts(),
+ true))
+ {
+ HadError = true;
+ } else if (*buffer_begin > largest_character_for_kind) {
+ HadError = true;
+ PP.Diag(Loc,diag::err_character_too_large);
+ }
+
+ ++buffer_begin;
+ continue;
+ }
+ unsigned CharWidth = getCharWidth(Kind, PP.getTargetInfo());
+ uint64_t result =
+ ProcessCharEscape(begin, end, HadError,
+ FullSourceLoc(Loc,PP.getSourceManager()),
+ CharWidth, &PP.getDiagnostics());
+ *buffer_begin++ = result;
+ }
+
+ unsigned NumCharsSoFar = buffer_begin-&codepoint_buffer.front();
+
+ if (NumCharsSoFar > 1) {
+ if (isWide())
+ PP.Diag(Loc, diag::warn_extraneous_char_constant);
+ else if (isAscii() && NumCharsSoFar == 4)
+ PP.Diag(Loc, diag::ext_four_char_character_literal);
+ else if (isAscii())
+ PP.Diag(Loc, diag::ext_multichar_character_literal);
+ else
+ PP.Diag(Loc, diag::err_multichar_utf_character_literal);
+ IsMultiChar = true;
+ } else
+ IsMultiChar = false;
+
+ llvm::APInt LitVal(PP.getTargetInfo().getIntWidth(), 0);
+
+ // Narrow character literals act as though their value is concatenated
+ // in this implementation, but warn on overflow.
+ bool multi_char_too_long = false;
+ if (isAscii() && isMultiChar()) {
+ LitVal = 0;
+ for (size_t i=0;i<NumCharsSoFar;++i) {
+ // check for enough leading zeros to shift into
+ multi_char_too_long |= (LitVal.countLeadingZeros() < 8);
+ LitVal <<= 8;
+ LitVal = LitVal + (codepoint_buffer[i] & 0xFF);
+ }
+ } else if (NumCharsSoFar > 0) {
+ // otherwise just take the last character
+ LitVal = buffer_begin[-1];
+ }
+
+ if (!HadError && multi_char_too_long) {
+ PP.Diag(Loc,diag::warn_char_constant_too_large);
+ }
+
+ // Transfer the value from APInt to uint64_t
+ Value = LitVal.getZExtValue();
+
+ // If this is a single narrow character, sign extend it (e.g. '\xFF' is "-1")
+ // if 'char' is signed for this target (C99 6.4.4.4p10). Note that multiple
+ // character constants are not sign extended in the this implementation:
+ // '\xFF\xFF' = 65536 and '\x0\xFF' = 255, which matches GCC.
+ if (isAscii() && NumCharsSoFar == 1 && (Value & 128) &&
+ PP.getLangOpts().CharIsSigned)
+ Value = (signed char)Value;
+}
+
+
+/// string-literal: [C++0x lex.string]
+/// encoding-prefix " [s-char-sequence] "
+/// encoding-prefix R raw-string
+/// encoding-prefix:
+/// u8
+/// u
+/// U
+/// L
+/// s-char-sequence:
+/// s-char
+/// s-char-sequence s-char
+/// s-char:
+/// any member of the source character set except the double-quote ",
+/// backslash \, or new-line character
+/// escape-sequence
+/// universal-character-name
+/// raw-string:
+/// " d-char-sequence ( r-char-sequence ) d-char-sequence "
+/// r-char-sequence:
+/// r-char
+/// r-char-sequence r-char
+/// r-char:
+/// any member of the source character set, except a right parenthesis )
+/// followed by the initial d-char-sequence (which may be empty)
+/// followed by a double quote ".
+/// d-char-sequence:
+/// d-char
+/// d-char-sequence d-char
+/// d-char:
+/// any member of the basic source character set except:
+/// space, the left parenthesis (, the right parenthesis ),
+/// the backslash \, and the control characters representing horizontal
+/// tab, vertical tab, form feed, and newline.
+/// escape-sequence: [C++0x lex.ccon]
+/// simple-escape-sequence
+/// octal-escape-sequence
+/// hexadecimal-escape-sequence
+/// simple-escape-sequence:
+/// one of \' \" \? \\ \a \b \f \n \r \t \v
+/// octal-escape-sequence:
+/// \ octal-digit
+/// \ octal-digit octal-digit
+/// \ octal-digit octal-digit octal-digit
+/// hexadecimal-escape-sequence:
+/// \x hexadecimal-digit
+/// hexadecimal-escape-sequence hexadecimal-digit
+/// universal-character-name:
+/// \u hex-quad
+/// \U hex-quad hex-quad
+/// hex-quad:
+/// hex-digit hex-digit hex-digit hex-digit
+///
+StringLiteralParser::
+StringLiteralParser(const Token *StringToks, unsigned NumStringToks,
+ Preprocessor &PP, bool Complain)
+ : SM(PP.getSourceManager()), Features(PP.getLangOpts()),
+ Target(PP.getTargetInfo()), Diags(Complain ? &PP.getDiagnostics() : 0),
+ MaxTokenLength(0), SizeBound(0), CharByteWidth(0), Kind(tok::unknown),
+ ResultPtr(ResultBuf.data()), hadError(false), Pascal(false) {
+ init(StringToks, NumStringToks);
+}
+
+void StringLiteralParser::init(const Token *StringToks, unsigned NumStringToks){
+ // The literal token may have come from an invalid source location (e.g. due
+ // to a PCH error), in which case the token length will be 0.
+ if (NumStringToks == 0 || StringToks[0].getLength() < 2) {
+ hadError = true;
+ return;
+ }
+
+ // Scan all of the string portions, remember the max individual token length,
+ // computing a bound on the concatenated string length, and see whether any
+ // piece is a wide-string. If any of the string portions is a wide-string
+ // literal, the result is a wide-string literal [C99 6.4.5p4].
+ assert(NumStringToks && "expected at least one token");
+ MaxTokenLength = StringToks[0].getLength();
+ assert(StringToks[0].getLength() >= 2 && "literal token is invalid!");
+ SizeBound = StringToks[0].getLength()-2; // -2 for "".
+ Kind = StringToks[0].getKind();
+
+ hadError = false;
+
+ // Implement Translation Phase #6: concatenation of string literals
+ /// (C99 5.1.1.2p1). The common case is only one string fragment.
+ for (unsigned i = 1; i != NumStringToks; ++i) {
+ if (StringToks[i].getLength() < 2) {
+ hadError = true;
+ return;
+ }
+
+ // The string could be shorter than this if it needs cleaning, but this is a
+ // reasonable bound, which is all we need.
+ assert(StringToks[i].getLength() >= 2 && "literal token is invalid!");
+ SizeBound += StringToks[i].getLength()-2; // -2 for "".
+
+ // Remember maximum string piece length.
+ if (StringToks[i].getLength() > MaxTokenLength)
+ MaxTokenLength = StringToks[i].getLength();
+
+ // Remember if we see any wide or utf-8/16/32 strings.
+ // Also check for illegal concatenations.
+ if (StringToks[i].isNot(Kind) && StringToks[i].isNot(tok::string_literal)) {
+ if (isAscii()) {
+ Kind = StringToks[i].getKind();
+ } else {
+ if (Diags)
+ Diags->Report(FullSourceLoc(StringToks[i].getLocation(), SM),
+ diag::err_unsupported_string_concat);
+ hadError = true;
+ }
+ }
+ }
+
+ // Include space for the null terminator.
+ ++SizeBound;
+
+ // TODO: K&R warning: "traditional C rejects string constant concatenation"
+
+ // Get the width in bytes of char/wchar_t/char16_t/char32_t
+ CharByteWidth = getCharWidth(Kind, Target);
+ assert((CharByteWidth & 7) == 0 && "Assumes character size is byte multiple");
+ CharByteWidth /= 8;
+
+ // The output buffer size needs to be large enough to hold wide characters.
+ // This is a worst-case assumption which basically corresponds to L"" "long".
+ SizeBound *= CharByteWidth;
+
+ // Size the temporary buffer to hold the result string data.
+ ResultBuf.resize(SizeBound);
+
+ // Likewise, but for each string piece.
+ SmallString<512> TokenBuf;
+ TokenBuf.resize(MaxTokenLength);
+
+ // Loop over all the strings, getting their spelling, and expanding them to
+ // wide strings as appropriate.
+ ResultPtr = &ResultBuf[0]; // Next byte to fill in.
+
+ Pascal = false;
+
+ SourceLocation UDSuffixTokLoc;
+
+ for (unsigned i = 0, e = NumStringToks; i != e; ++i) {
+ const char *ThisTokBuf = &TokenBuf[0];
+ // Get the spelling of the token, which eliminates trigraphs, etc. We know
+ // that ThisTokBuf points to a buffer that is big enough for the whole token
+ // and 'spelled' tokens can only shrink.
+ bool StringInvalid = false;
+ unsigned ThisTokLen =
+ Lexer::getSpelling(StringToks[i], ThisTokBuf, SM, Features,
+ &StringInvalid);
+ if (StringInvalid) {
+ hadError = true;
+ continue;
+ }
+
+ const char *ThisTokBegin = ThisTokBuf;
+ const char *ThisTokEnd = ThisTokBuf+ThisTokLen;
+
+ // Remove an optional ud-suffix.
+ if (ThisTokEnd[-1] != '"') {
+ const char *UDSuffixEnd = ThisTokEnd;
+ do {
+ --ThisTokEnd;
+ } while (ThisTokEnd[-1] != '"');
+
+ StringRef UDSuffix(ThisTokEnd, UDSuffixEnd - ThisTokEnd);
+
+ if (UDSuffixBuf.empty()) {
+ UDSuffixBuf.assign(UDSuffix);
+ UDSuffixToken = i;
+ UDSuffixOffset = ThisTokEnd - ThisTokBuf;
+ UDSuffixTokLoc = StringToks[i].getLocation();
+ } else if (!UDSuffixBuf.equals(UDSuffix)) {
+ // C++11 [lex.ext]p8: At the end of phase 6, if a string literal is the
+ // result of a concatenation involving at least one user-defined-string-
+ // literal, all the participating user-defined-string-literals shall
+ // have the same ud-suffix.
+ if (Diags) {
+ SourceLocation TokLoc = StringToks[i].getLocation();
+ Diags->Report(TokLoc, diag::err_string_concat_mixed_suffix)
+ << UDSuffixBuf << UDSuffix
+ << SourceRange(UDSuffixTokLoc, UDSuffixTokLoc)
+ << SourceRange(TokLoc, TokLoc);
+ }
+ hadError = true;
+ }
+ }
+
+ // Strip the end quote.
+ --ThisTokEnd;
+
+ // TODO: Input character set mapping support.
+
+ // Skip marker for wide or unicode strings.
+ if (ThisTokBuf[0] == 'L' || ThisTokBuf[0] == 'u' || ThisTokBuf[0] == 'U') {
+ ++ThisTokBuf;
+ // Skip 8 of u8 marker for utf8 strings.
+ if (ThisTokBuf[0] == '8')
+ ++ThisTokBuf;
+ }
+
+ // Check for raw string
+ if (ThisTokBuf[0] == 'R') {
+ ThisTokBuf += 2; // skip R"
+
+ const char *Prefix = ThisTokBuf;
+ while (ThisTokBuf[0] != '(')
+ ++ThisTokBuf;
+ ++ThisTokBuf; // skip '('
+
+ // Remove same number of characters from the end
+ ThisTokEnd -= ThisTokBuf - Prefix;
+ assert(ThisTokEnd >= ThisTokBuf && "malformed raw string literal");
+
+ // Copy the string over
+ if (CopyStringFragment(StringRef(ThisTokBuf, ThisTokEnd - ThisTokBuf)))
+ if (DiagnoseBadString(StringToks[i]))
+ hadError = true;
+ } else {
+ assert(ThisTokBuf[0] == '"' && "Expected quote, lexer broken?");
+ ++ThisTokBuf; // skip "
+
+ // Check if this is a pascal string
+ if (Features.PascalStrings && ThisTokBuf + 1 != ThisTokEnd &&
+ ThisTokBuf[0] == '\\' && ThisTokBuf[1] == 'p') {
+
+ // If the \p sequence is found in the first token, we have a pascal string
+ // Otherwise, if we already have a pascal string, ignore the first \p
+ if (i == 0) {
+ ++ThisTokBuf;
+ Pascal = true;
+ } else if (Pascal)
+ ThisTokBuf += 2;
+ }
+
+ while (ThisTokBuf != ThisTokEnd) {
+ // Is this a span of non-escape characters?
+ if (ThisTokBuf[0] != '\\') {
+ const char *InStart = ThisTokBuf;
+ do {
+ ++ThisTokBuf;
+ } while (ThisTokBuf != ThisTokEnd && ThisTokBuf[0] != '\\');
+
+ // Copy the character span over.
+ if (CopyStringFragment(StringRef(InStart, ThisTokBuf - InStart)))
+ if (DiagnoseBadString(StringToks[i]))
+ hadError = true;
+ continue;
+ }
+ // Is this a Universal Character Name escape?
+ if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
+ EncodeUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd,
+ ResultPtr, hadError,
+ FullSourceLoc(StringToks[i].getLocation(), SM),
+ CharByteWidth, Diags, Features);
+ continue;
+ }
+ // Otherwise, this is a non-UCN escape character. Process it.
+ unsigned ResultChar =
+ ProcessCharEscape(ThisTokBuf, ThisTokEnd, hadError,
+ FullSourceLoc(StringToks[i].getLocation(), SM),
+ CharByteWidth*8, Diags);
+
+ if (CharByteWidth == 4) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultWidePtr = reinterpret_cast<UTF32*>(ResultPtr);
+ *ResultWidePtr = ResultChar;
+ ResultPtr += 4;
+ } else if (CharByteWidth == 2) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultWidePtr = reinterpret_cast<UTF16*>(ResultPtr);
+ *ResultWidePtr = ResultChar & 0xFFFF;
+ ResultPtr += 2;
+ } else {
+ assert(CharByteWidth == 1 && "Unexpected char width");
+ *ResultPtr++ = ResultChar & 0xFF;
+ }
+ }
+ }
+ }
+
+ if (Pascal) {
+ if (CharByteWidth == 4) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *ResultWidePtr = reinterpret_cast<UTF32*>(ResultBuf.data());
+ ResultWidePtr[0] = GetNumStringChars() - 1;
+ } else if (CharByteWidth == 2) {
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *ResultWidePtr = reinterpret_cast<UTF16*>(ResultBuf.data());
+ ResultWidePtr[0] = GetNumStringChars() - 1;
+ } else {
+ assert(CharByteWidth == 1 && "Unexpected char width");
+ ResultBuf[0] = GetNumStringChars() - 1;
+ }
+
+ // Verify that pascal strings aren't too large.
+ if (GetStringLength() > 256) {
+ if (Diags)
+ Diags->Report(FullSourceLoc(StringToks[0].getLocation(), SM),
+ diag::err_pascal_string_too_long)
+ << SourceRange(StringToks[0].getLocation(),
+ StringToks[NumStringToks-1].getLocation());
+ hadError = true;
+ return;
+ }
+ } else if (Diags) {
+ // Complain if this string literal has too many characters.
+ unsigned MaxChars = Features.CPlusPlus? 65536 : Features.C99 ? 4095 : 509;
+
+ if (GetNumStringChars() > MaxChars)
+ Diags->Report(FullSourceLoc(StringToks[0].getLocation(), SM),
+ diag::ext_string_too_long)
+ << GetNumStringChars() << MaxChars
+ << (Features.CPlusPlus ? 2 : Features.C99 ? 1 : 0)
+ << SourceRange(StringToks[0].getLocation(),
+ StringToks[NumStringToks-1].getLocation());
+ }
+}
+
+
+/// copyStringFragment - This function copies from Start to End into ResultPtr.
+/// Performs widening for multi-byte characters.
+bool StringLiteralParser::CopyStringFragment(StringRef Fragment) {
+ assert(CharByteWidth==1 || CharByteWidth==2 || CharByteWidth==4);
+ ConversionResult result = conversionOK;
+ // Copy the character span over.
+ if (CharByteWidth == 1) {
+ if (!isLegalUTF8String(reinterpret_cast<const UTF8*>(Fragment.begin()),
+ reinterpret_cast<const UTF8*>(Fragment.end())))
+ result = sourceIllegal;
+ memcpy(ResultPtr, Fragment.data(), Fragment.size());
+ ResultPtr += Fragment.size();
+ } else if (CharByteWidth == 2) {
+ UTF8 const *sourceStart = (UTF8 const *)Fragment.data();
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF16 *targetStart = reinterpret_cast<UTF16*>(ResultPtr);
+ ConversionFlags flags = strictConversion;
+ result = ConvertUTF8toUTF16(
+ &sourceStart,sourceStart + Fragment.size(),
+ &targetStart,targetStart + 2*Fragment.size(),flags);
+ if (result==conversionOK)
+ ResultPtr = reinterpret_cast<char*>(targetStart);
+ } else if (CharByteWidth == 4) {
+ UTF8 const *sourceStart = (UTF8 const *)Fragment.data();
+ // FIXME: Make the type of the result buffer correct instead of
+ // using reinterpret_cast.
+ UTF32 *targetStart = reinterpret_cast<UTF32*>(ResultPtr);
+ ConversionFlags flags = strictConversion;
+ result = ConvertUTF8toUTF32(
+ &sourceStart,sourceStart + Fragment.size(),
+ &targetStart,targetStart + 4*Fragment.size(),flags);
+ if (result==conversionOK)
+ ResultPtr = reinterpret_cast<char*>(targetStart);
+ }
+ assert((result != targetExhausted)
+ && "ConvertUTF8toUTFXX exhausted target buffer");
+ return result != conversionOK;
+}
+
+bool StringLiteralParser::DiagnoseBadString(const Token &Tok) {
+ // If we see bad encoding for unprefixed string literals, warn and
+ // simply copy the byte values, for compatibility with gcc and older
+ // versions of clang.
+ bool NoErrorOnBadEncoding = isAscii();
+ unsigned Msg = NoErrorOnBadEncoding ? diag::warn_bad_string_encoding :
+ diag::err_bad_string_encoding;
+ if (Diags)
+ Diags->Report(FullSourceLoc(Tok.getLocation(), SM), Msg);
+ return !NoErrorOnBadEncoding;
+}
+
+/// getOffsetOfStringByte - This function returns the offset of the
+/// specified byte of the string data represented by Token. This handles
+/// advancing over escape sequences in the string.
+unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
+ unsigned ByteNo) const {
+ // Get the spelling of the token.
+ SmallString<32> SpellingBuffer;
+ SpellingBuffer.resize(Tok.getLength());
+
+ bool StringInvalid = false;
+ const char *SpellingPtr = &SpellingBuffer[0];
+ unsigned TokLen = Lexer::getSpelling(Tok, SpellingPtr, SM, Features,
+ &StringInvalid);
+ if (StringInvalid)
+ return 0;
+
+ assert(SpellingPtr[0] != 'L' && SpellingPtr[0] != 'u' &&
+ SpellingPtr[0] != 'U' && "Doesn't handle wide or utf strings yet");
+
+
+ const char *SpellingStart = SpellingPtr;
+ const char *SpellingEnd = SpellingPtr+TokLen;
+
+ // Skip over the leading quote.
+ assert(SpellingPtr[0] == '"' && "Should be a string literal!");
+ ++SpellingPtr;
+
+ // Skip over bytes until we find the offset we're looking for.
+ while (ByteNo) {
+ assert(SpellingPtr < SpellingEnd && "Didn't find byte offset!");
+
+ // Step over non-escapes simply.
+ if (*SpellingPtr != '\\') {
+ ++SpellingPtr;
+ --ByteNo;
+ continue;
+ }
+
+ // Otherwise, this is an escape character. Advance over it.
+ bool HadError = false;
+ ProcessCharEscape(SpellingPtr, SpellingEnd, HadError,
+ FullSourceLoc(Tok.getLocation(), SM),
+ CharByteWidth*8, Diags);
+ assert(!HadError && "This method isn't valid on erroneous strings");
+ --ByteNo;
+ }
+
+ return SpellingPtr-SpellingStart;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
new file mode 100644
index 0000000..e2b251a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.cpp
@@ -0,0 +1,317 @@
+//===--- TokenLexer.cpp - Lex from a token stream -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include <algorithm>
+
+using namespace clang;
+
+/// MacroArgs ctor function - This destroys the vector passed in.
+MacroArgs *MacroArgs::create(const MacroInfo *MI,
+ llvm::ArrayRef<Token> UnexpArgTokens,
+ bool VarargsElided, Preprocessor &PP) {
+ assert(MI->isFunctionLike() &&
+ "Can't have args for an object-like macro!");
+ MacroArgs **ResultEnt = 0;
+ unsigned ClosestMatch = ~0U;
+
+ // See if we have an entry with a big enough argument list to reuse on the
+ // free list. If so, reuse it.
+ for (MacroArgs **Entry = &PP.MacroArgCache; *Entry;
+ Entry = &(*Entry)->ArgCache)
+ if ((*Entry)->NumUnexpArgTokens >= UnexpArgTokens.size() &&
+ (*Entry)->NumUnexpArgTokens < ClosestMatch) {
+ ResultEnt = Entry;
+
+ // If we have an exact match, use it.
+ if ((*Entry)->NumUnexpArgTokens == UnexpArgTokens.size())
+ break;
+ // Otherwise, use the best fit.
+ ClosestMatch = (*Entry)->NumUnexpArgTokens;
+ }
+
+ MacroArgs *Result;
+ if (ResultEnt == 0) {
+ // Allocate memory for a MacroArgs object with the lexer tokens at the end.
+ Result = (MacroArgs*)malloc(sizeof(MacroArgs) +
+ UnexpArgTokens.size() * sizeof(Token));
+ // Construct the MacroArgs object.
+ new (Result) MacroArgs(UnexpArgTokens.size(), VarargsElided);
+ } else {
+ Result = *ResultEnt;
+ // Unlink this node from the preprocessors singly linked list.
+ *ResultEnt = Result->ArgCache;
+ Result->NumUnexpArgTokens = UnexpArgTokens.size();
+ Result->VarargsElided = VarargsElided;
+ }
+
+ // Copy the actual unexpanded tokens to immediately after the result ptr.
+ if (!UnexpArgTokens.empty())
+ std::copy(UnexpArgTokens.begin(), UnexpArgTokens.end(),
+ const_cast<Token*>(Result->getUnexpArgument(0)));
+
+ return Result;
+}
+
+/// destroy - Destroy and deallocate the memory for this object.
+///
+void MacroArgs::destroy(Preprocessor &PP) {
+ StringifiedArgs.clear();
+
+ // Don't clear PreExpArgTokens, just clear the entries. Clearing the entries
+ // would deallocate the element vectors.
+ for (unsigned i = 0, e = PreExpArgTokens.size(); i != e; ++i)
+ PreExpArgTokens[i].clear();
+
+ // Add this to the preprocessor's free list.
+ ArgCache = PP.MacroArgCache;
+ PP.MacroArgCache = this;
+}
+
+/// deallocate - This should only be called by the Preprocessor when managing
+/// its freelist.
+MacroArgs *MacroArgs::deallocate() {
+ MacroArgs *Next = ArgCache;
+
+ // Run the dtor to deallocate the vectors.
+ this->~MacroArgs();
+ // Release the memory for the object.
+ free(this);
+
+ return Next;
+}
+
+
+/// getArgLength - Given a pointer to an expanded or unexpanded argument,
+/// return the number of tokens, not counting the EOF, that make up the
+/// argument.
+unsigned MacroArgs::getArgLength(const Token *ArgPtr) {
+ unsigned NumArgTokens = 0;
+ for (; ArgPtr->isNot(tok::eof); ++ArgPtr)
+ ++NumArgTokens;
+ return NumArgTokens;
+}
+
+
+/// getUnexpArgument - Return the unexpanded tokens for the specified formal.
+///
+const Token *MacroArgs::getUnexpArgument(unsigned Arg) const {
+ // The unexpanded argument tokens start immediately after the MacroArgs object
+ // in memory.
+ const Token *Start = (const Token *)(this+1);
+ const Token *Result = Start;
+ // Scan to find Arg.
+ for (; Arg; ++Result) {
+ assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
+ if (Result->is(tok::eof))
+ --Arg;
+ }
+ assert(Result < Start+NumUnexpArgTokens && "Invalid arg #");
+ return Result;
+}
+
+
+/// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
+/// by pre-expansion, return false. Otherwise, conservatively return true.
+bool MacroArgs::ArgNeedsPreexpansion(const Token *ArgTok,
+ Preprocessor &PP) const {
+ // If there are no identifiers in the argument list, or if the identifiers are
+ // known to not be macros, pre-expansion won't modify it.
+ for (; ArgTok->isNot(tok::eof); ++ArgTok)
+ if (IdentifierInfo *II = ArgTok->getIdentifierInfo()) {
+ if (II->hasMacroDefinition() && PP.getMacroInfo(II)->isEnabled())
+ // Return true even though the macro could be a function-like macro
+ // without a following '(' token.
+ return true;
+ }
+ return false;
+}
+
+/// getPreExpArgument - Return the pre-expanded form of the specified
+/// argument.
+const std::vector<Token> &
+MacroArgs::getPreExpArgument(unsigned Arg, const MacroInfo *MI,
+ Preprocessor &PP) {
+ assert(Arg < MI->getNumArgs() && "Invalid argument number!");
+
+ // If we have already computed this, return it.
+ if (PreExpArgTokens.size() < MI->getNumArgs())
+ PreExpArgTokens.resize(MI->getNumArgs());
+
+ std::vector<Token> &Result = PreExpArgTokens[Arg];
+ if (!Result.empty()) return Result;
+
+ SaveAndRestore<bool> PreExpandingMacroArgs(PP.InMacroArgPreExpansion, true);
+
+ const Token *AT = getUnexpArgument(Arg);
+ unsigned NumToks = getArgLength(AT)+1; // Include the EOF.
+
+ // Otherwise, we have to pre-expand this argument, populating Result. To do
+ // this, we set up a fake TokenLexer to lex from the unexpanded argument
+ // list. With this installed, we lex expanded tokens until we hit the EOF
+ // token at the end of the unexp list.
+ PP.EnterTokenStream(AT, NumToks, false /*disable expand*/,
+ false /*owns tokens*/);
+
+ // Lex all of the macro-expanded tokens into Result.
+ do {
+ Result.push_back(Token());
+ Token &Tok = Result.back();
+ PP.Lex(Tok);
+ } while (Result.back().isNot(tok::eof));
+
+ // Pop the token stream off the top of the stack. We know that the internal
+ // pointer inside of it is to the "end" of the token stream, but the stack
+ // will not otherwise be popped until the next token is lexed. The problem is
+ // that the token may be lexed sometime after the vector of tokens itself is
+ // destroyed, which would be badness.
+ if (PP.InCachingLexMode())
+ PP.ExitCachingLexMode();
+ PP.RemoveTopOfLexerStack();
+ return Result;
+}
+
+
+/// StringifyArgument - Implement C99 6.10.3.2p2, converting a sequence of
+/// tokens into the literal string token that should be produced by the C #
+/// preprocessor operator. If Charify is true, then it should be turned into
+/// a character literal for the Microsoft charize (#@) extension.
+///
+Token MacroArgs::StringifyArgument(const Token *ArgToks,
+ Preprocessor &PP, bool Charify,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd) {
+ Token Tok;
+ Tok.startToken();
+ Tok.setKind(Charify ? tok::char_constant : tok::string_literal);
+
+ const Token *ArgTokStart = ArgToks;
+
+ // Stringify all the tokens.
+ SmallString<128> Result;
+ Result += "\"";
+
+ bool isFirst = true;
+ for (; ArgToks->isNot(tok::eof); ++ArgToks) {
+ const Token &Tok = *ArgToks;
+ if (!isFirst && (Tok.hasLeadingSpace() || Tok.isAtStartOfLine()))
+ Result += ' ';
+ isFirst = false;
+
+ // If this is a string or character constant, escape the token as specified
+ // by 6.10.3.2p2.
+ if (Tok.is(tok::string_literal) || // "foo"
+ Tok.is(tok::wide_string_literal) || // L"foo"
+ Tok.is(tok::utf8_string_literal) || // u8"foo"
+ Tok.is(tok::utf16_string_literal) || // u"foo"
+ Tok.is(tok::utf32_string_literal) || // U"foo"
+ Tok.is(tok::char_constant) || // 'x'
+ Tok.is(tok::wide_char_constant) || // L'x'.
+ Tok.is(tok::utf16_char_constant) || // u'x'.
+ Tok.is(tok::utf32_char_constant)) { // U'x'.
+ bool Invalid = false;
+ std::string TokStr = PP.getSpelling(Tok, &Invalid);
+ if (!Invalid) {
+ std::string Str = Lexer::Stringify(TokStr);
+ Result.append(Str.begin(), Str.end());
+ }
+ } else if (Tok.is(tok::code_completion)) {
+ PP.CodeCompleteNaturalLanguage();
+ } else {
+ // Otherwise, just append the token. Do some gymnastics to get the token
+ // in place and avoid copies where possible.
+ unsigned CurStrLen = Result.size();
+ Result.resize(CurStrLen+Tok.getLength());
+ const char *BufPtr = &Result[CurStrLen];
+ bool Invalid = false;
+ unsigned ActualTokLen = PP.getSpelling(Tok, BufPtr, &Invalid);
+
+ if (!Invalid) {
+ // If getSpelling returned a pointer to an already uniqued version of
+ // the string instead of filling in BufPtr, memcpy it onto our string.
+ if (BufPtr != &Result[CurStrLen])
+ memcpy(&Result[CurStrLen], BufPtr, ActualTokLen);
+
+ // If the token was dirty, the spelling may be shorter than the token.
+ if (ActualTokLen != Tok.getLength())
+ Result.resize(CurStrLen+ActualTokLen);
+ }
+ }
+ }
+
+ // If the last character of the string is a \, and if it isn't escaped, this
+ // is an invalid string literal, diagnose it as specified in C99.
+ if (Result.back() == '\\') {
+ // Count the number of consequtive \ characters. If even, then they are
+ // just escaped backslashes, otherwise it's an error.
+ unsigned FirstNonSlash = Result.size()-2;
+ // Guaranteed to find the starting " if nothing else.
+ while (Result[FirstNonSlash] == '\\')
+ --FirstNonSlash;
+ if ((Result.size()-1-FirstNonSlash) & 1) {
+ // Diagnose errors for things like: #define F(X) #X / F(\)
+ PP.Diag(ArgToks[-1], diag::pp_invalid_string_literal);
+ Result.pop_back(); // remove one of the \'s.
+ }
+ }
+ Result += '"';
+
+ // If this is the charify operation and the result is not a legal character
+ // constant, diagnose it.
+ if (Charify) {
+ // First step, turn double quotes into single quotes:
+ Result[0] = '\'';
+ Result[Result.size()-1] = '\'';
+
+ // Check for bogus character.
+ bool isBad = false;
+ if (Result.size() == 3)
+ isBad = Result[1] == '\''; // ''' is not legal. '\' already fixed above.
+ else
+ isBad = (Result.size() != 4 || Result[1] != '\\'); // Not '\x'
+
+ if (isBad) {
+ PP.Diag(ArgTokStart[0], diag::err_invalid_character_to_charify);
+ Result = "' '"; // Use something arbitrary, but legal.
+ }
+ }
+
+ PP.CreateString(&Result[0], Result.size(), Tok,
+ ExpansionLocStart, ExpansionLocEnd);
+ return Tok;
+}
+
+/// getStringifiedArgument - Compute, cache, and return the specified argument
+/// that has been 'stringified' as required by the # operator.
+const Token &MacroArgs::getStringifiedArgument(unsigned ArgNo,
+ Preprocessor &PP,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd) {
+ assert(ArgNo < NumUnexpArgTokens && "Invalid argument number!");
+ if (StringifiedArgs.empty()) {
+ StringifiedArgs.resize(getNumArguments());
+ memset((void*)&StringifiedArgs[0], 0,
+ sizeof(StringifiedArgs[0])*getNumArguments());
+ }
+ if (StringifiedArgs[ArgNo].isNot(tok::string_literal))
+ StringifiedArgs[ArgNo] = StringifyArgument(getUnexpArgument(ArgNo), PP,
+ /*Charify=*/false,
+ ExpansionLocStart,
+ ExpansionLocEnd);
+ return StringifiedArgs[ArgNo];
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroArgs.h b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.h
new file mode 100644
index 0000000..cf86d71
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroArgs.h
@@ -0,0 +1,125 @@
+//===--- MacroArgs.h - Formal argument info for Macros ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the MacroArgs interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_MACROARGS_H
+#define LLVM_CLANG_MACROARGS_H
+
+#include "llvm/ADT/ArrayRef.h"
+
+#include <vector>
+
+namespace clang {
+ class MacroInfo;
+ class Preprocessor;
+ class Token;
+ class SourceLocation;
+
+/// MacroArgs - An instance of this class captures information about
+/// the formal arguments specified to a function-like macro invocation.
+class MacroArgs {
+ /// NumUnexpArgTokens - The number of raw, unexpanded tokens for the
+ /// arguments. All of the actual argument tokens are allocated immediately
+ /// after the MacroArgs object in memory. This is all of the arguments
+ /// concatenated together, with 'EOF' markers at the end of each argument.
+ unsigned NumUnexpArgTokens;
+
+ /// VarargsElided - True if this is a C99 style varargs macro invocation and
+ /// there was no argument specified for the "..." argument. If the argument
+ /// was specified (even empty) or this isn't a C99 style varargs function, or
+ /// if in strict mode and the C99 varargs macro had only a ... argument, this
+ /// is false.
+ bool VarargsElided;
+
+ /// PreExpArgTokens - Pre-expanded tokens for arguments that need them. Empty
+ /// if not yet computed. This includes the EOF marker at the end of the
+ /// stream.
+ std::vector<std::vector<Token> > PreExpArgTokens;
+
+ /// StringifiedArgs - This contains arguments in 'stringified' form. If the
+ /// stringified form of an argument has not yet been computed, this is empty.
+ std::vector<Token> StringifiedArgs;
+
+ /// ArgCache - This is a linked list of MacroArgs objects that the
+ /// Preprocessor owns which we use to avoid thrashing malloc/free.
+ MacroArgs *ArgCache;
+
+ MacroArgs(unsigned NumToks, bool varargsElided)
+ : NumUnexpArgTokens(NumToks), VarargsElided(varargsElided), ArgCache(0) {}
+ ~MacroArgs() {}
+public:
+ /// MacroArgs ctor function - Create a new MacroArgs object with the specified
+ /// macro and argument info.
+ static MacroArgs *create(const MacroInfo *MI,
+ llvm::ArrayRef<Token> UnexpArgTokens,
+ bool VarargsElided, Preprocessor &PP);
+
+ /// destroy - Destroy and deallocate the memory for this object.
+ ///
+ void destroy(Preprocessor &PP);
+
+ /// ArgNeedsPreexpansion - If we can prove that the argument won't be affected
+ /// by pre-expansion, return false. Otherwise, conservatively return true.
+ bool ArgNeedsPreexpansion(const Token *ArgTok, Preprocessor &PP) const;
+
+ /// getUnexpArgument - Return a pointer to the first token of the unexpanded
+ /// token list for the specified formal.
+ ///
+ const Token *getUnexpArgument(unsigned Arg) const;
+
+ /// getArgLength - Given a pointer to an expanded or unexpanded argument,
+ /// return the number of tokens, not counting the EOF, that make up the
+ /// argument.
+ static unsigned getArgLength(const Token *ArgPtr);
+
+ /// getPreExpArgument - Return the pre-expanded form of the specified
+ /// argument.
+ const std::vector<Token> &
+ getPreExpArgument(unsigned Arg, const MacroInfo *MI, Preprocessor &PP);
+
+ /// getStringifiedArgument - Compute, cache, and return the specified argument
+ /// that has been 'stringified' as required by the # operator.
+ const Token &getStringifiedArgument(unsigned ArgNo, Preprocessor &PP,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd);
+
+ /// getNumArguments - Return the number of arguments passed into this macro
+ /// invocation.
+ unsigned getNumArguments() const { return NumUnexpArgTokens; }
+
+
+ /// isVarargsElidedUse - Return true if this is a C99 style varargs macro
+ /// invocation and there was no argument specified for the "..." argument. If
+ /// the argument was specified (even empty) or this isn't a C99 style varargs
+ /// function, or if in strict mode and the C99 varargs macro had only a ...
+ /// argument, this returns false.
+ bool isVarargsElidedUse() const { return VarargsElided; }
+
+ /// StringifyArgument - Implement C99 6.10.3.2p2, converting a sequence of
+ /// tokens into the literal string token that should be produced by the C #
+ /// preprocessor operator. If Charify is true, then it should be turned into
+ /// a character literal for the Microsoft charize (#@) extension.
+ ///
+ static Token StringifyArgument(const Token *ArgToks,
+ Preprocessor &PP, bool Charify,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd);
+
+
+ /// deallocate - This should only be called by the Preprocessor when managing
+ /// its freelist.
+ MacroArgs *deallocate();
+};
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
new file mode 100644
index 0000000..3d0c9a1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/MacroInfo.cpp
@@ -0,0 +1,133 @@
+//===--- MacroInfo.cpp - Information about #defined identifiers -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MacroInfo interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+MacroInfo::MacroInfo(SourceLocation DefLoc) : Location(DefLoc) {
+ IsFunctionLike = false;
+ IsC99Varargs = false;
+ IsGNUVarargs = false;
+ IsBuiltinMacro = false;
+ IsFromAST = false;
+ ChangedAfterLoad = false;
+ IsDisabled = false;
+ IsUsed = false;
+ IsAllowRedefinitionsWithoutWarning = false;
+ IsWarnIfUnused = false;
+ IsDefinitionLengthCached = false;
+ IsPublic = true;
+
+ ArgumentList = 0;
+ NumArguments = 0;
+}
+
+MacroInfo::MacroInfo(const MacroInfo &MI, llvm::BumpPtrAllocator &PPAllocator) {
+ Location = MI.Location;
+ EndLocation = MI.EndLocation;
+ ReplacementTokens = MI.ReplacementTokens;
+ IsFunctionLike = MI.IsFunctionLike;
+ IsC99Varargs = MI.IsC99Varargs;
+ IsGNUVarargs = MI.IsGNUVarargs;
+ IsBuiltinMacro = MI.IsBuiltinMacro;
+ IsFromAST = MI.IsFromAST;
+ ChangedAfterLoad = MI.ChangedAfterLoad;
+ IsDisabled = MI.IsDisabled;
+ IsUsed = MI.IsUsed;
+ IsAllowRedefinitionsWithoutWarning = MI.IsAllowRedefinitionsWithoutWarning;
+ IsWarnIfUnused = MI.IsWarnIfUnused;
+ IsDefinitionLengthCached = MI.IsDefinitionLengthCached;
+ DefinitionLength = MI.DefinitionLength;
+ IsPublic = MI.IsPublic;
+
+ ArgumentList = 0;
+ NumArguments = 0;
+ setArgumentList(MI.ArgumentList, MI.NumArguments, PPAllocator);
+}
+
+unsigned MacroInfo::getDefinitionLengthSlow(SourceManager &SM) const {
+ assert(!IsDefinitionLengthCached);
+ IsDefinitionLengthCached = true;
+
+ if (ReplacementTokens.empty())
+ return (DefinitionLength = 0);
+
+ const Token &firstToken = ReplacementTokens.front();
+ const Token &lastToken = ReplacementTokens.back();
+ SourceLocation macroStart = firstToken.getLocation();
+ SourceLocation macroEnd = lastToken.getLocation();
+ assert(macroStart.isValid() && macroEnd.isValid());
+ assert((macroStart.isFileID() || firstToken.is(tok::comment)) &&
+ "Macro defined in macro?");
+ assert((macroEnd.isFileID() || lastToken.is(tok::comment)) &&
+ "Macro defined in macro?");
+ std::pair<FileID, unsigned>
+ startInfo = SM.getDecomposedExpansionLoc(macroStart);
+ std::pair<FileID, unsigned>
+ endInfo = SM.getDecomposedExpansionLoc(macroEnd);
+ assert(startInfo.first == endInfo.first &&
+ "Macro definition spanning multiple FileIDs ?");
+ assert(startInfo.second <= endInfo.second);
+ DefinitionLength = endInfo.second - startInfo.second;
+ DefinitionLength += lastToken.getLength();
+
+ return DefinitionLength;
+}
+
+/// isIdenticalTo - Return true if the specified macro definition is equal to
+/// this macro in spelling, arguments, and whitespace. This is used to emit
+/// duplicate definition warnings. This implements the rules in C99 6.10.3.
+///
+bool MacroInfo::isIdenticalTo(const MacroInfo &Other, Preprocessor &PP) const {
+ // Check # tokens in replacement, number of args, and various flags all match.
+ if (ReplacementTokens.size() != Other.ReplacementTokens.size() ||
+ getNumArgs() != Other.getNumArgs() ||
+ isFunctionLike() != Other.isFunctionLike() ||
+ isC99Varargs() != Other.isC99Varargs() ||
+ isGNUVarargs() != Other.isGNUVarargs())
+ return false;
+
+ // Check arguments.
+ for (arg_iterator I = arg_begin(), OI = Other.arg_begin(), E = arg_end();
+ I != E; ++I, ++OI)
+ if (*I != *OI) return false;
+
+ // Check all the tokens.
+ for (unsigned i = 0, e = ReplacementTokens.size(); i != e; ++i) {
+ const Token &A = ReplacementTokens[i];
+ const Token &B = Other.ReplacementTokens[i];
+ if (A.getKind() != B.getKind())
+ return false;
+
+ // If this isn't the first first token, check that the whitespace and
+ // start-of-line characteristics match.
+ if (i != 0 &&
+ (A.isAtStartOfLine() != B.isAtStartOfLine() ||
+ A.hasLeadingSpace() != B.hasLeadingSpace()))
+ return false;
+
+ // If this is an identifier, it is easy.
+ if (A.getIdentifierInfo() || B.getIdentifierInfo()) {
+ if (A.getIdentifierInfo() != B.getIdentifierInfo())
+ return false;
+ continue;
+ }
+
+ // Otherwise, check the spelling.
+ if (PP.getSpelling(A) != PP.getSpelling(B))
+ return false;
+ }
+
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
new file mode 100644
index 0000000..5304311
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/ModuleMap.cpp
@@ -0,0 +1,1437 @@
+//===--- ModuleMap.cpp - Describe the layout of modules ---------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleMap implementation, which describes the layout
+// of a module as it relates to headers.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Lex/ModuleMap.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/PathV2.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+Module::ExportDecl
+ModuleMap::resolveExport(Module *Mod,
+ const Module::UnresolvedExportDecl &Unresolved,
+ bool Complain) {
+ // We may have just a wildcard.
+ if (Unresolved.Id.empty()) {
+ assert(Unresolved.Wildcard && "Invalid unresolved export");
+ return Module::ExportDecl(0, true);
+ }
+
+ // Find the starting module.
+ Module *Context = lookupModuleUnqualified(Unresolved.Id[0].first, Mod);
+ if (!Context) {
+ if (Complain)
+ Diags->Report(Unresolved.Id[0].second,
+ diag::err_mmap_missing_module_unqualified)
+ << Unresolved.Id[0].first << Mod->getFullModuleName();
+
+ return Module::ExportDecl();
+ }
+
+ // Dig into the module path.
+ for (unsigned I = 1, N = Unresolved.Id.size(); I != N; ++I) {
+ Module *Sub = lookupModuleQualified(Unresolved.Id[I].first,
+ Context);
+ if (!Sub) {
+ if (Complain)
+ Diags->Report(Unresolved.Id[I].second,
+ diag::err_mmap_missing_module_qualified)
+ << Unresolved.Id[I].first << Context->getFullModuleName()
+ << SourceRange(Unresolved.Id[0].second, Unresolved.Id[I-1].second);
+
+ return Module::ExportDecl();
+ }
+
+ Context = Sub;
+ }
+
+ return Module::ExportDecl(Context, Unresolved.Wildcard);
+}
+
+ModuleMap::ModuleMap(FileManager &FileMgr, const DiagnosticConsumer &DC,
+ const LangOptions &LangOpts, const TargetInfo *Target)
+ : LangOpts(LangOpts), Target(Target), BuiltinIncludeDir(0)
+{
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagIDs(new DiagnosticIDs);
+ Diags = IntrusiveRefCntPtr<DiagnosticsEngine>(
+ new DiagnosticsEngine(DiagIDs));
+ Diags->setClient(DC.clone(*Diags), /*ShouldOwnClient=*/true);
+ SourceMgr = new SourceManager(*Diags, FileMgr);
+}
+
+ModuleMap::~ModuleMap() {
+ for (llvm::StringMap<Module *>::iterator I = Modules.begin(),
+ IEnd = Modules.end();
+ I != IEnd; ++I) {
+ delete I->getValue();
+ }
+
+ delete SourceMgr;
+}
+
+void ModuleMap::setTarget(const TargetInfo &Target) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Improper target override");
+ this->Target = &Target;
+}
+
+Module *ModuleMap::findModuleForHeader(const FileEntry *File) {
+ llvm::DenseMap<const FileEntry *, Module *>::iterator Known
+ = Headers.find(File);
+ if (Known != Headers.end()) {
+ // If a header corresponds to an unavailable module, don't report
+ // that it maps to anything.
+ if (!Known->second->isAvailable())
+ return 0;
+
+ return Known->second;
+ }
+
+ const DirectoryEntry *Dir = File->getDir();
+ llvm::SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ StringRef DirName = Dir->getName();
+
+ // Keep walking up the directory hierarchy, looking for a directory with
+ // an umbrella header.
+ do {
+ llvm::DenseMap<const DirectoryEntry *, Module *>::iterator KnownDir
+ = UmbrellaDirs.find(Dir);
+ if (KnownDir != UmbrellaDirs.end()) {
+ Module *Result = KnownDir->second;
+
+ // Search up the module stack until we find a module with an umbrella
+ // directory.
+ Module *UmbrellaModule = Result;
+ while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ UmbrellaModule = UmbrellaModule->Parent;
+
+ if (UmbrellaModule->InferSubmodules) {
+ // Infer submodules for each of the directories we found between
+ // the directory of the umbrella header and the directory where
+ // the actual header is located.
+ bool Explicit = UmbrellaModule->InferExplicitSubmodules;
+
+ for (unsigned I = SkippedDirs.size(); I != 0; --I) {
+ // Find or create the module that corresponds to this directory name.
+ StringRef Name = llvm::sys::path::stem(SkippedDirs[I-1]->getName());
+ Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
+ Explicit).first;
+
+ // Associate the module and the directory.
+ UmbrellaDirs[SkippedDirs[I-1]] = Result;
+
+ // If inferred submodules export everything they import, add a
+ // wildcard to the set of exports.
+ if (UmbrellaModule->InferExportWildcard && Result->Exports.empty())
+ Result->Exports.push_back(Module::ExportDecl(0, true));
+ }
+
+ // Infer a submodule with the same name as this header file.
+ StringRef Name = llvm::sys::path::stem(File->getName());
+ Result = findOrCreateModule(Name, Result, /*IsFramework=*/false,
+ Explicit).first;
+
+ // If inferred submodules export everything they import, add a
+ // wildcard to the set of exports.
+ if (UmbrellaModule->InferExportWildcard && Result->Exports.empty())
+ Result->Exports.push_back(Module::ExportDecl(0, true));
+ } else {
+ // Record each of the directories we stepped through as being part of
+ // the module we found, since the umbrella header covers them all.
+ for (unsigned I = 0, N = SkippedDirs.size(); I != N; ++I)
+ UmbrellaDirs[SkippedDirs[I]] = Result;
+ }
+
+ Headers[File] = Result;
+
+ // If a header corresponds to an unavailable module, don't report
+ // that it maps to anything.
+ if (!Result->isAvailable())
+ return 0;
+
+ return Result;
+ }
+
+ SkippedDirs.push_back(Dir);
+
+ // Retrieve our parent path.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Resolve the parent path to a directory entry.
+ Dir = SourceMgr->getFileManager().getDirectory(DirName);
+ } while (Dir);
+
+ return 0;
+}
+
+bool ModuleMap::isHeaderInUnavailableModule(const FileEntry *Header) {
+ llvm::DenseMap<const FileEntry *, Module *>::iterator Known
+ = Headers.find(Header);
+ if (Known != Headers.end())
+ return !Known->second->isAvailable();
+
+ const DirectoryEntry *Dir = Header->getDir();
+ llvm::SmallVector<const DirectoryEntry *, 2> SkippedDirs;
+ StringRef DirName = Dir->getName();
+
+ // Keep walking up the directory hierarchy, looking for a directory with
+ // an umbrella header.
+ do {
+ llvm::DenseMap<const DirectoryEntry *, Module *>::iterator KnownDir
+ = UmbrellaDirs.find(Dir);
+ if (KnownDir != UmbrellaDirs.end()) {
+ Module *Found = KnownDir->second;
+ if (!Found->isAvailable())
+ return true;
+
+ // Search up the module stack until we find a module with an umbrella
+ // directory.
+ Module *UmbrellaModule = Found;
+ while (!UmbrellaModule->getUmbrellaDir() && UmbrellaModule->Parent)
+ UmbrellaModule = UmbrellaModule->Parent;
+
+ if (UmbrellaModule->InferSubmodules) {
+ for (unsigned I = SkippedDirs.size(); I != 0; --I) {
+ // Find or create the module that corresponds to this directory name.
+ StringRef Name = llvm::sys::path::stem(SkippedDirs[I-1]->getName());
+ Found = lookupModuleQualified(Name, Found);
+ if (!Found)
+ return false;
+ if (!Found->isAvailable())
+ return true;
+ }
+
+ // Infer a submodule with the same name as this header file.
+ StringRef Name = llvm::sys::path::stem(Header->getName());
+ Found = lookupModuleQualified(Name, Found);
+ if (!Found)
+ return false;
+ }
+
+ return !Found->isAvailable();
+ }
+
+ SkippedDirs.push_back(Dir);
+
+ // Retrieve our parent path.
+ DirName = llvm::sys::path::parent_path(DirName);
+ if (DirName.empty())
+ break;
+
+ // Resolve the parent path to a directory entry.
+ Dir = SourceMgr->getFileManager().getDirectory(DirName);
+ } while (Dir);
+
+ return false;
+}
+
+Module *ModuleMap::findModule(StringRef Name) {
+ llvm::StringMap<Module *>::iterator Known = Modules.find(Name);
+ if (Known != Modules.end())
+ return Known->getValue();
+
+ return 0;
+}
+
+Module *ModuleMap::lookupModuleUnqualified(StringRef Name, Module *Context) {
+ for(; Context; Context = Context->Parent) {
+ if (Module *Sub = lookupModuleQualified(Name, Context))
+ return Sub;
+ }
+
+ return findModule(Name);
+}
+
+Module *ModuleMap::lookupModuleQualified(StringRef Name, Module *Context) {
+ if (!Context)
+ return findModule(Name);
+
+ return Context->findSubmodule(Name);
+}
+
+std::pair<Module *, bool>
+ModuleMap::findOrCreateModule(StringRef Name, Module *Parent, bool IsFramework,
+ bool IsExplicit) {
+ // Try to find an existing module with this name.
+ if (Module *Sub = lookupModuleQualified(Name, Parent))
+ return std::make_pair(Sub, false);
+
+ // Create a new module with this name.
+ Module *Result = new Module(Name, SourceLocation(), Parent, IsFramework,
+ IsExplicit);
+ if (!Parent)
+ Modules[Name] = Result;
+ return std::make_pair(Result, true);
+}
+
+Module *
+ModuleMap::inferFrameworkModule(StringRef ModuleName,
+ const DirectoryEntry *FrameworkDir,
+ bool IsSystem,
+ Module *Parent) {
+ // Check whether we've already found this module.
+ if (Module *Mod = lookupModuleQualified(ModuleName, Parent))
+ return Mod;
+
+ FileManager &FileMgr = SourceMgr->getFileManager();
+
+ // Look for an umbrella header.
+ SmallString<128> UmbrellaName = StringRef(FrameworkDir->getName());
+ llvm::sys::path::append(UmbrellaName, "Headers");
+ llvm::sys::path::append(UmbrellaName, ModuleName + ".h");
+ const FileEntry *UmbrellaHeader = FileMgr.getFile(UmbrellaName);
+
+ // FIXME: If there's no umbrella header, we could probably scan the
+ // framework to load *everything*. But, it's not clear that this is a good
+ // idea.
+ if (!UmbrellaHeader)
+ return 0;
+
+ Module *Result = new Module(ModuleName, SourceLocation(), Parent,
+ /*IsFramework=*/true, /*IsExplicit=*/false);
+ if (IsSystem)
+ Result->IsSystem = IsSystem;
+
+ if (!Parent)
+ Modules[ModuleName] = Result;
+
+ // umbrella header "umbrella-header-name"
+ Result->Umbrella = UmbrellaHeader;
+ Headers[UmbrellaHeader] = Result;
+ UmbrellaDirs[UmbrellaHeader->getDir()] = Result;
+
+ // export *
+ Result->Exports.push_back(Module::ExportDecl(0, true));
+
+ // module * { export * }
+ Result->InferSubmodules = true;
+ Result->InferExportWildcard = true;
+
+ // Look for subframeworks.
+ llvm::error_code EC;
+ SmallString<128> SubframeworksDirName
+ = StringRef(FrameworkDir->getName());
+ llvm::sys::path::append(SubframeworksDirName, "Frameworks");
+ SmallString<128> SubframeworksDirNameNative;
+ llvm::sys::path::native(SubframeworksDirName.str(),
+ SubframeworksDirNameNative);
+ for (llvm::sys::fs::directory_iterator
+ Dir(SubframeworksDirNameNative.str(), EC), DirEnd;
+ Dir != DirEnd && !EC; Dir.increment(EC)) {
+ if (!StringRef(Dir->path()).endswith(".framework"))
+ continue;
+
+ if (const DirectoryEntry *SubframeworkDir
+ = FileMgr.getDirectory(Dir->path())) {
+ // FIXME: Do we want to warn about subframeworks without umbrella headers?
+ inferFrameworkModule(llvm::sys::path::stem(Dir->path()), SubframeworkDir,
+ IsSystem, Result);
+ }
+ }
+
+ return Result;
+}
+
+void ModuleMap::setUmbrellaHeader(Module *Mod, const FileEntry *UmbrellaHeader){
+ Headers[UmbrellaHeader] = Mod;
+ Mod->Umbrella = UmbrellaHeader;
+ UmbrellaDirs[UmbrellaHeader->getDir()] = Mod;
+}
+
+void ModuleMap::setUmbrellaDir(Module *Mod, const DirectoryEntry *UmbrellaDir) {
+ Mod->Umbrella = UmbrellaDir;
+ UmbrellaDirs[UmbrellaDir] = Mod;
+}
+
+void ModuleMap::addHeader(Module *Mod, const FileEntry *Header) {
+ Mod->Headers.push_back(Header);
+ Headers[Header] = Mod;
+}
+
+const FileEntry *
+ModuleMap::getContainingModuleMapFile(Module *Module) {
+ if (Module->DefinitionLoc.isInvalid() || !SourceMgr)
+ return 0;
+
+ return SourceMgr->getFileEntryForID(
+ SourceMgr->getFileID(Module->DefinitionLoc));
+}
+
+void ModuleMap::dump() {
+ llvm::errs() << "Modules:";
+ for (llvm::StringMap<Module *>::iterator M = Modules.begin(),
+ MEnd = Modules.end();
+ M != MEnd; ++M)
+ M->getValue()->print(llvm::errs(), 2);
+
+ llvm::errs() << "Headers:";
+ for (llvm::DenseMap<const FileEntry *, Module *>::iterator
+ H = Headers.begin(),
+ HEnd = Headers.end();
+ H != HEnd; ++H) {
+ llvm::errs() << " \"" << H->first->getName() << "\" -> "
+ << H->second->getFullModuleName() << "\n";
+ }
+}
+
+bool ModuleMap::resolveExports(Module *Mod, bool Complain) {
+ bool HadError = false;
+ for (unsigned I = 0, N = Mod->UnresolvedExports.size(); I != N; ++I) {
+ Module::ExportDecl Export = resolveExport(Mod, Mod->UnresolvedExports[I],
+ Complain);
+ if (Export.getPointer() || Export.getInt())
+ Mod->Exports.push_back(Export);
+ else
+ HadError = true;
+ }
+ Mod->UnresolvedExports.clear();
+ return HadError;
+}
+
+Module *ModuleMap::inferModuleFromLocation(FullSourceLoc Loc) {
+ if (Loc.isInvalid())
+ return 0;
+
+ // Use the expansion location to determine which module we're in.
+ FullSourceLoc ExpansionLoc = Loc.getExpansionLoc();
+ if (!ExpansionLoc.isFileID())
+ return 0;
+
+
+ const SourceManager &SrcMgr = Loc.getManager();
+ FileID ExpansionFileID = ExpansionLoc.getFileID();
+
+ while (const FileEntry *ExpansionFile
+ = SrcMgr.getFileEntryForID(ExpansionFileID)) {
+ // Find the module that owns this header (if any).
+ if (Module *Mod = findModuleForHeader(ExpansionFile))
+ return Mod;
+
+ // No module owns this header, so look up the inclusion chain to see if
+ // any included header has an associated module.
+ SourceLocation IncludeLoc = SrcMgr.getIncludeLoc(ExpansionFileID);
+ if (IncludeLoc.isInvalid())
+ return 0;
+
+ ExpansionFileID = SrcMgr.getFileID(IncludeLoc);
+ }
+
+ return 0;
+}
+
+//----------------------------------------------------------------------------//
+// Module map file parser
+//----------------------------------------------------------------------------//
+
+namespace clang {
+ /// \brief A token in a module map file.
+ struct MMToken {
+ enum TokenKind {
+ Comma,
+ EndOfFile,
+ HeaderKeyword,
+ Identifier,
+ ExplicitKeyword,
+ ExportKeyword,
+ FrameworkKeyword,
+ ModuleKeyword,
+ Period,
+ UmbrellaKeyword,
+ RequiresKeyword,
+ Star,
+ StringLiteral,
+ LBrace,
+ RBrace,
+ LSquare,
+ RSquare
+ } Kind;
+
+ unsigned Location;
+ unsigned StringLength;
+ const char *StringData;
+
+ void clear() {
+ Kind = EndOfFile;
+ Location = 0;
+ StringLength = 0;
+ StringData = 0;
+ }
+
+ bool is(TokenKind K) const { return Kind == K; }
+
+ SourceLocation getLocation() const {
+ return SourceLocation::getFromRawEncoding(Location);
+ }
+
+ StringRef getString() const {
+ return StringRef(StringData, StringLength);
+ }
+ };
+
+ class ModuleMapParser {
+ Lexer &L;
+ SourceManager &SourceMgr;
+ DiagnosticsEngine &Diags;
+ ModuleMap &Map;
+
+ /// \brief The directory that this module map resides in.
+ const DirectoryEntry *Directory;
+
+ /// \brief The directory containing Clang-supplied headers.
+ const DirectoryEntry *BuiltinIncludeDir;
+
+ /// \brief Whether an error occurred.
+ bool HadError;
+
+ /// \brief Default target information, used only for string literal
+ /// parsing.
+ OwningPtr<TargetInfo> Target;
+
+ /// \brief Stores string data for the various string literals referenced
+ /// during parsing.
+ llvm::BumpPtrAllocator StringData;
+
+ /// \brief The current token.
+ MMToken Tok;
+
+ /// \brief The active module.
+ Module *ActiveModule;
+
+ /// \brief Consume the current token and return its location.
+ SourceLocation consumeToken();
+
+ /// \brief Skip tokens until we reach the a token with the given kind
+ /// (or the end of the file).
+ void skipUntil(MMToken::TokenKind K);
+
+ typedef llvm::SmallVector<std::pair<std::string, SourceLocation>, 2>
+ ModuleId;
+ bool parseModuleId(ModuleId &Id);
+ void parseModuleDecl();
+ void parseRequiresDecl();
+ void parseHeaderDecl(SourceLocation UmbrellaLoc);
+ void parseUmbrellaDirDecl(SourceLocation UmbrellaLoc);
+ void parseExportDecl();
+ void parseInferredSubmoduleDecl(bool Explicit);
+
+ const DirectoryEntry *getOverriddenHeaderSearchDir();
+
+ public:
+ explicit ModuleMapParser(Lexer &L, SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags,
+ ModuleMap &Map,
+ const DirectoryEntry *Directory,
+ const DirectoryEntry *BuiltinIncludeDir)
+ : L(L), SourceMgr(SourceMgr), Diags(Diags), Map(Map),
+ Directory(Directory), BuiltinIncludeDir(BuiltinIncludeDir),
+ HadError(false), ActiveModule(0)
+ {
+ TargetOptions TargetOpts;
+ TargetOpts.Triple = llvm::sys::getDefaultTargetTriple();
+ Target.reset(TargetInfo::CreateTargetInfo(Diags, TargetOpts));
+
+ Tok.clear();
+ consumeToken();
+ }
+
+ bool parseModuleMapFile();
+ };
+}
+
+SourceLocation ModuleMapParser::consumeToken() {
+retry:
+ SourceLocation Result = Tok.getLocation();
+ Tok.clear();
+
+ Token LToken;
+ L.LexFromRawLexer(LToken);
+ Tok.Location = LToken.getLocation().getRawEncoding();
+ switch (LToken.getKind()) {
+ case tok::raw_identifier:
+ Tok.StringData = LToken.getRawIdentifierData();
+ Tok.StringLength = LToken.getLength();
+ Tok.Kind = llvm::StringSwitch<MMToken::TokenKind>(Tok.getString())
+ .Case("header", MMToken::HeaderKeyword)
+ .Case("explicit", MMToken::ExplicitKeyword)
+ .Case("export", MMToken::ExportKeyword)
+ .Case("framework", MMToken::FrameworkKeyword)
+ .Case("module", MMToken::ModuleKeyword)
+ .Case("requires", MMToken::RequiresKeyword)
+ .Case("umbrella", MMToken::UmbrellaKeyword)
+ .Default(MMToken::Identifier);
+ break;
+
+ case tok::comma:
+ Tok.Kind = MMToken::Comma;
+ break;
+
+ case tok::eof:
+ Tok.Kind = MMToken::EndOfFile;
+ break;
+
+ case tok::l_brace:
+ Tok.Kind = MMToken::LBrace;
+ break;
+
+ case tok::l_square:
+ Tok.Kind = MMToken::LSquare;
+ break;
+
+ case tok::period:
+ Tok.Kind = MMToken::Period;
+ break;
+
+ case tok::r_brace:
+ Tok.Kind = MMToken::RBrace;
+ break;
+
+ case tok::r_square:
+ Tok.Kind = MMToken::RSquare;
+ break;
+
+ case tok::star:
+ Tok.Kind = MMToken::Star;
+ break;
+
+ case tok::string_literal: {
+ if (LToken.hasUDSuffix()) {
+ Diags.Report(LToken.getLocation(), diag::err_invalid_string_udl);
+ HadError = true;
+ goto retry;
+ }
+
+ // Parse the string literal.
+ LangOptions LangOpts;
+ StringLiteralParser StringLiteral(&LToken, 1, SourceMgr, LangOpts, *Target);
+ if (StringLiteral.hadError)
+ goto retry;
+
+ // Copy the string literal into our string data allocator.
+ unsigned Length = StringLiteral.GetStringLength();
+ char *Saved = StringData.Allocate<char>(Length + 1);
+ memcpy(Saved, StringLiteral.GetString().data(), Length);
+ Saved[Length] = 0;
+
+ // Form the token.
+ Tok.Kind = MMToken::StringLiteral;
+ Tok.StringData = Saved;
+ Tok.StringLength = Length;
+ break;
+ }
+
+ case tok::comment:
+ goto retry;
+
+ default:
+ Diags.Report(LToken.getLocation(), diag::err_mmap_unknown_token);
+ HadError = true;
+ goto retry;
+ }
+
+ return Result;
+}
+
+void ModuleMapParser::skipUntil(MMToken::TokenKind K) {
+ unsigned braceDepth = 0;
+ unsigned squareDepth = 0;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ return;
+
+ case MMToken::LBrace:
+ if (Tok.is(K) && braceDepth == 0 && squareDepth == 0)
+ return;
+
+ ++braceDepth;
+ break;
+
+ case MMToken::LSquare:
+ if (Tok.is(K) && braceDepth == 0 && squareDepth == 0)
+ return;
+
+ ++squareDepth;
+ break;
+
+ case MMToken::RBrace:
+ if (braceDepth > 0)
+ --braceDepth;
+ else if (Tok.is(K))
+ return;
+ break;
+
+ case MMToken::RSquare:
+ if (squareDepth > 0)
+ --squareDepth;
+ else if (Tok.is(K))
+ return;
+ break;
+
+ default:
+ if (braceDepth == 0 && squareDepth == 0 && Tok.is(K))
+ return;
+ break;
+ }
+
+ consumeToken();
+ } while (true);
+}
+
+/// \brief Parse a module-id.
+///
+/// module-id:
+/// identifier
+/// identifier '.' module-id
+///
+/// \returns true if an error occurred, false otherwise.
+bool ModuleMapParser::parseModuleId(ModuleId &Id) {
+ Id.clear();
+ do {
+ if (Tok.is(MMToken::Identifier)) {
+ Id.push_back(std::make_pair(Tok.getString(), Tok.getLocation()));
+ consumeToken();
+ } else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module_name);
+ return true;
+ }
+
+ if (!Tok.is(MMToken::Period))
+ break;
+
+ consumeToken();
+ } while (true);
+
+ return false;
+}
+
+namespace {
+ /// \brief Enumerates the known attributes.
+ enum AttributeKind {
+ /// \brief An unknown attribute.
+ AT_unknown,
+ /// \brief The 'system' attribute.
+ AT_system
+ };
+}
+
+/// \brief Parse a module declaration.
+///
+/// module-declaration:
+/// 'explicit'[opt] 'framework'[opt] 'module' module-id attributes[opt]
+/// { module-member* }
+///
+/// attributes:
+/// attribute attributes
+/// attribute
+///
+/// attribute:
+/// [ identifier ]
+///
+/// module-member:
+/// requires-declaration
+/// header-declaration
+/// submodule-declaration
+/// export-declaration
+///
+/// submodule-declaration:
+/// module-declaration
+/// inferred-submodule-declaration
+void ModuleMapParser::parseModuleDecl() {
+ assert(Tok.is(MMToken::ExplicitKeyword) || Tok.is(MMToken::ModuleKeyword) ||
+ Tok.is(MMToken::FrameworkKeyword));
+ // Parse 'explicit' or 'framework' keyword, if present.
+ SourceLocation ExplicitLoc;
+ bool Explicit = false;
+ bool Framework = false;
+
+ // Parse 'explicit' keyword, if present.
+ if (Tok.is(MMToken::ExplicitKeyword)) {
+ ExplicitLoc = consumeToken();
+ Explicit = true;
+ }
+
+ // Parse 'framework' keyword, if present.
+ if (Tok.is(MMToken::FrameworkKeyword)) {
+ consumeToken();
+ Framework = true;
+ }
+
+ // Parse 'module' keyword.
+ if (!Tok.is(MMToken::ModuleKeyword)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module);
+ consumeToken();
+ HadError = true;
+ return;
+ }
+ consumeToken(); // 'module' keyword
+
+ // If we have a wildcard for the module name, this is an inferred submodule.
+ // Parse it.
+ if (Tok.is(MMToken::Star))
+ return parseInferredSubmoduleDecl(Explicit);
+
+ // Parse the module name.
+ ModuleId Id;
+ if (parseModuleId(Id)) {
+ HadError = true;
+ return;
+ }
+
+ if (ActiveModule) {
+ if (Id.size() > 1) {
+ Diags.Report(Id.front().second, diag::err_mmap_nested_submodule_id)
+ << SourceRange(Id.front().second, Id.back().second);
+
+ HadError = true;
+ return;
+ }
+ } else if (Id.size() == 1 && Explicit) {
+ // Top-level modules can't be explicit.
+ Diags.Report(ExplicitLoc, diag::err_mmap_explicit_top_level);
+ Explicit = false;
+ ExplicitLoc = SourceLocation();
+ HadError = true;
+ }
+
+ Module *PreviousActiveModule = ActiveModule;
+ if (Id.size() > 1) {
+ // This module map defines a submodule. Go find the module of which it
+ // is a submodule.
+ ActiveModule = 0;
+ for (unsigned I = 0, N = Id.size() - 1; I != N; ++I) {
+ if (Module *Next = Map.lookupModuleQualified(Id[I].first, ActiveModule)) {
+ ActiveModule = Next;
+ continue;
+ }
+
+ if (ActiveModule) {
+ Diags.Report(Id[I].second, diag::err_mmap_missing_module_qualified)
+ << Id[I].first << ActiveModule->getTopLevelModule();
+ } else {
+ Diags.Report(Id[I].second, diag::err_mmap_expected_module_name);
+ }
+ HadError = true;
+ return;
+ }
+ }
+
+ StringRef ModuleName = Id.back().first;
+ SourceLocation ModuleNameLoc = Id.back().second;
+
+ // Parse the optional attribute list.
+ bool IsSystem = false;
+ while (Tok.is(MMToken::LSquare)) {
+ // Consume the '['.
+ SourceLocation LSquareLoc = consumeToken();
+
+ // Check whether we have an attribute name here.
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_attribute);
+ skipUntil(MMToken::RSquare);
+ if (Tok.is(MMToken::RSquare))
+ consumeToken();
+ continue;
+ }
+
+ // Decode the attribute name.
+ AttributeKind Attribute
+ = llvm::StringSwitch<AttributeKind>(Tok.getString())
+ .Case("system", AT_system)
+ .Default(AT_unknown);
+ switch (Attribute) {
+ case AT_unknown:
+ Diags.Report(Tok.getLocation(), diag::warn_mmap_unknown_attribute)
+ << Tok.getString();
+ break;
+
+ case AT_system:
+ IsSystem = true;
+ break;
+ }
+ consumeToken();
+
+ // Consume the ']'.
+ if (!Tok.is(MMToken::RSquare)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rsquare);
+ Diags.Report(LSquareLoc, diag::note_mmap_lsquare_match);
+ skipUntil(MMToken::RSquare);
+ }
+
+ if (Tok.is(MMToken::RSquare))
+ consumeToken();
+ }
+
+ // Parse the opening brace.
+ if (!Tok.is(MMToken::LBrace)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_lbrace)
+ << ModuleName;
+ HadError = true;
+ return;
+ }
+ SourceLocation LBraceLoc = consumeToken();
+
+ // Determine whether this (sub)module has already been defined.
+ if (Module *Existing = Map.lookupModuleQualified(ModuleName, ActiveModule)) {
+ if (Existing->DefinitionLoc.isInvalid() && !ActiveModule) {
+ // Skip the module definition.
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+ return;
+ }
+
+ Diags.Report(ModuleNameLoc, diag::err_mmap_module_redefinition)
+ << ModuleName;
+ Diags.Report(Existing->DefinitionLoc, diag::note_mmap_prev_definition);
+
+ // Skip the module definition.
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+
+ HadError = true;
+ return;
+ }
+
+ // Start defining this module.
+ ActiveModule = Map.findOrCreateModule(ModuleName, ActiveModule, Framework,
+ Explicit).first;
+ ActiveModule->DefinitionLoc = ModuleNameLoc;
+ if (IsSystem)
+ ActiveModule->IsSystem = true;
+
+ bool Done = false;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ case MMToken::RBrace:
+ Done = true;
+ break;
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::FrameworkKeyword:
+ case MMToken::ModuleKeyword:
+ parseModuleDecl();
+ break;
+
+ case MMToken::ExportKeyword:
+ parseExportDecl();
+ break;
+
+ case MMToken::RequiresKeyword:
+ parseRequiresDecl();
+ break;
+
+ case MMToken::UmbrellaKeyword: {
+ SourceLocation UmbrellaLoc = consumeToken();
+ if (Tok.is(MMToken::HeaderKeyword))
+ parseHeaderDecl(UmbrellaLoc);
+ else
+ parseUmbrellaDirDecl(UmbrellaLoc);
+ break;
+ }
+
+ case MMToken::HeaderKeyword:
+ parseHeaderDecl(SourceLocation());
+ break;
+
+ default:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_member);
+ consumeToken();
+ break;
+ }
+ } while (!Done);
+
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+
+ // We're done parsing this module. Pop back to the previous module.
+ ActiveModule = PreviousActiveModule;
+}
+
+/// \brief Parse a requires declaration.
+///
+/// requires-declaration:
+/// 'requires' feature-list
+///
+/// feature-list:
+/// identifier ',' feature-list
+/// identifier
+void ModuleMapParser::parseRequiresDecl() {
+ assert(Tok.is(MMToken::RequiresKeyword));
+
+ // Parse 'requires' keyword.
+ consumeToken();
+
+ // Parse the feature-list.
+ do {
+ if (!Tok.is(MMToken::Identifier)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_feature);
+ HadError = true;
+ return;
+ }
+
+ // Consume the feature name.
+ std::string Feature = Tok.getString();
+ consumeToken();
+
+ // Add this feature.
+ ActiveModule->addRequirement(Feature, Map.LangOpts, *Map.Target);
+
+ if (!Tok.is(MMToken::Comma))
+ break;
+
+ // Consume the comma.
+ consumeToken();
+ } while (true);
+}
+
+/// \brief Append to \p Paths the set of paths needed to get to the
+/// subframework in which the given module lives.
+static void appendSubframeworkPaths(Module *Mod,
+ llvm::SmallVectorImpl<char> &Path) {
+ // Collect the framework names from the given module to the top-level module.
+ llvm::SmallVector<StringRef, 2> Paths;
+ for (; Mod; Mod = Mod->Parent) {
+ if (Mod->IsFramework)
+ Paths.push_back(Mod->Name);
+ }
+
+ if (Paths.empty())
+ return;
+
+ // Add Frameworks/Name.framework for each subframework.
+ for (unsigned I = Paths.size() - 1; I != 0; --I) {
+ llvm::sys::path::append(Path, "Frameworks");
+ llvm::sys::path::append(Path, Paths[I-1] + ".framework");
+ }
+}
+
+/// \brief Determine whether the given file name is the name of a builtin
+/// header, supplied by Clang to replace, override, or augment existing system
+/// headers.
+static bool isBuiltinHeader(StringRef FileName) {
+ return llvm::StringSwitch<bool>(FileName)
+ .Case("float.h", true)
+ .Case("iso646.h", true)
+ .Case("limits.h", true)
+ .Case("stdalign.h", true)
+ .Case("stdarg.h", true)
+ .Case("stdbool.h", true)
+ .Case("stddef.h", true)
+ .Case("stdint.h", true)
+ .Case("tgmath.h", true)
+ .Case("unwind.h", true)
+ .Default(false);
+}
+
+/// \brief Parse a header declaration.
+///
+/// header-declaration:
+/// 'umbrella'[opt] 'header' string-literal
+void ModuleMapParser::parseHeaderDecl(SourceLocation UmbrellaLoc) {
+ assert(Tok.is(MMToken::HeaderKeyword));
+ consumeToken();
+
+ bool Umbrella = UmbrellaLoc.isValid();
+
+ // Parse the header name.
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << "header";
+ HadError = true;
+ return;
+ }
+ std::string FileName = Tok.getString();
+ SourceLocation FileNameLoc = consumeToken();
+
+ // Check whether we already have an umbrella.
+ if (Umbrella && ActiveModule->Umbrella) {
+ Diags.Report(FileNameLoc, diag::err_mmap_umbrella_clash)
+ << ActiveModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Look for this file.
+ const FileEntry *File = 0;
+ const FileEntry *BuiltinFile = 0;
+ SmallString<128> PathName;
+ if (llvm::sys::path::is_absolute(FileName)) {
+ PathName = FileName;
+ File = SourceMgr.getFileManager().getFile(PathName);
+ } else if (const DirectoryEntry *Dir = getOverriddenHeaderSearchDir()) {
+ PathName = Dir->getName();
+ llvm::sys::path::append(PathName, FileName);
+ File = SourceMgr.getFileManager().getFile(PathName);
+ } else {
+ // Search for the header file within the search directory.
+ PathName = Directory->getName();
+ unsigned PathLength = PathName.size();
+
+ if (ActiveModule->isPartOfFramework()) {
+ appendSubframeworkPaths(ActiveModule, PathName);
+
+ // Check whether this file is in the public headers.
+ llvm::sys::path::append(PathName, "Headers");
+ llvm::sys::path::append(PathName, FileName);
+ File = SourceMgr.getFileManager().getFile(PathName);
+
+ if (!File) {
+ // Check whether this file is in the private headers.
+ PathName.resize(PathLength);
+ llvm::sys::path::append(PathName, "PrivateHeaders");
+ llvm::sys::path::append(PathName, FileName);
+ File = SourceMgr.getFileManager().getFile(PathName);
+ }
+ } else {
+ // Lookup for normal headers.
+ llvm::sys::path::append(PathName, FileName);
+ File = SourceMgr.getFileManager().getFile(PathName);
+
+ // If this is a system module with a top-level header, this header
+ // may have a counterpart (or replacement) in the set of headers
+ // supplied by Clang. Find that builtin header.
+ if (ActiveModule->IsSystem && !Umbrella && BuiltinIncludeDir &&
+ BuiltinIncludeDir != Directory && isBuiltinHeader(FileName)) {
+ SmallString<128> BuiltinPathName(BuiltinIncludeDir->getName());
+ llvm::sys::path::append(BuiltinPathName, FileName);
+ BuiltinFile = SourceMgr.getFileManager().getFile(BuiltinPathName);
+
+ // If Clang supplies this header but the underlying system does not,
+ // just silently swap in our builtin version. Otherwise, we'll end
+ // up adding both (later).
+ if (!File && BuiltinFile) {
+ File = BuiltinFile;
+ BuiltinFile = 0;
+ }
+ }
+ }
+ }
+
+ // FIXME: We shouldn't be eagerly stat'ing every file named in a module map.
+ // Come up with a lazy way to do this.
+ if (File) {
+ if (const Module *OwningModule = Map.Headers[File]) {
+ Diags.Report(FileNameLoc, diag::err_mmap_header_conflict)
+ << FileName << OwningModule->getFullModuleName();
+ HadError = true;
+ } else if (Umbrella) {
+ const DirectoryEntry *UmbrellaDir = File->getDir();
+ if ((OwningModule = Map.UmbrellaDirs[UmbrellaDir])) {
+ Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
+ << OwningModule->getFullModuleName();
+ HadError = true;
+ } else {
+ // Record this umbrella header.
+ Map.setUmbrellaHeader(ActiveModule, File);
+ }
+ } else {
+ // Record this header.
+ Map.addHeader(ActiveModule, File);
+
+ // If there is a builtin counterpart to this file, add it now.
+ if (BuiltinFile)
+ Map.addHeader(ActiveModule, BuiltinFile);
+ }
+ } else {
+ Diags.Report(FileNameLoc, diag::err_mmap_header_not_found)
+ << Umbrella << FileName;
+ HadError = true;
+ }
+}
+
+/// \brief Parse an umbrella directory declaration.
+///
+/// umbrella-dir-declaration:
+/// umbrella string-literal
+void ModuleMapParser::parseUmbrellaDirDecl(SourceLocation UmbrellaLoc) {
+ // Parse the directory name.
+ if (!Tok.is(MMToken::StringLiteral)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_header)
+ << "umbrella";
+ HadError = true;
+ return;
+ }
+
+ std::string DirName = Tok.getString();
+ SourceLocation DirNameLoc = consumeToken();
+
+ // Check whether we already have an umbrella.
+ if (ActiveModule->Umbrella) {
+ Diags.Report(DirNameLoc, diag::err_mmap_umbrella_clash)
+ << ActiveModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Look for this file.
+ const DirectoryEntry *Dir = 0;
+ if (llvm::sys::path::is_absolute(DirName))
+ Dir = SourceMgr.getFileManager().getDirectory(DirName);
+ else {
+ SmallString<128> PathName;
+ PathName = Directory->getName();
+ llvm::sys::path::append(PathName, DirName);
+ Dir = SourceMgr.getFileManager().getDirectory(PathName);
+ }
+
+ if (!Dir) {
+ Diags.Report(DirNameLoc, diag::err_mmap_umbrella_dir_not_found)
+ << DirName;
+ HadError = true;
+ return;
+ }
+
+ if (Module *OwningModule = Map.UmbrellaDirs[Dir]) {
+ Diags.Report(UmbrellaLoc, diag::err_mmap_umbrella_clash)
+ << OwningModule->getFullModuleName();
+ HadError = true;
+ return;
+ }
+
+ // Record this umbrella directory.
+ Map.setUmbrellaDir(ActiveModule, Dir);
+}
+
+/// \brief Parse a module export declaration.
+///
+/// export-declaration:
+/// 'export' wildcard-module-id
+///
+/// wildcard-module-id:
+/// identifier
+/// '*'
+/// identifier '.' wildcard-module-id
+void ModuleMapParser::parseExportDecl() {
+ assert(Tok.is(MMToken::ExportKeyword));
+ SourceLocation ExportLoc = consumeToken();
+
+ // Parse the module-id with an optional wildcard at the end.
+ ModuleId ParsedModuleId;
+ bool Wildcard = false;
+ do {
+ if (Tok.is(MMToken::Identifier)) {
+ ParsedModuleId.push_back(std::make_pair(Tok.getString(),
+ Tok.getLocation()));
+ consumeToken();
+
+ if (Tok.is(MMToken::Period)) {
+ consumeToken();
+ continue;
+ }
+
+ break;
+ }
+
+ if(Tok.is(MMToken::Star)) {
+ Wildcard = true;
+ consumeToken();
+ break;
+ }
+
+ Diags.Report(Tok.getLocation(), diag::err_mmap_export_module_id);
+ HadError = true;
+ return;
+ } while (true);
+
+ Module::UnresolvedExportDecl Unresolved = {
+ ExportLoc, ParsedModuleId, Wildcard
+ };
+ ActiveModule->UnresolvedExports.push_back(Unresolved);
+}
+
+void ModuleMapParser::parseInferredSubmoduleDecl(bool Explicit) {
+ assert(Tok.is(MMToken::Star));
+ SourceLocation StarLoc = consumeToken();
+ bool Failed = false;
+
+ // Inferred modules must be submodules.
+ if (!ActiveModule) {
+ Diags.Report(StarLoc, diag::err_mmap_top_level_inferred_submodule);
+ Failed = true;
+ }
+
+ // Inferred modules must have umbrella directories.
+ if (!Failed && !ActiveModule->getUmbrellaDir()) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_no_umbrella);
+ Failed = true;
+ }
+
+ // Check for redefinition of an inferred module.
+ if (!Failed && ActiveModule->InferSubmodules) {
+ Diags.Report(StarLoc, diag::err_mmap_inferred_redef);
+ if (ActiveModule->InferredSubmoduleLoc.isValid())
+ Diags.Report(ActiveModule->InferredSubmoduleLoc,
+ diag::note_mmap_prev_definition);
+ Failed = true;
+ }
+
+ // If there were any problems with this inferred submodule, skip its body.
+ if (Failed) {
+ if (Tok.is(MMToken::LBrace)) {
+ consumeToken();
+ skipUntil(MMToken::RBrace);
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ }
+ HadError = true;
+ return;
+ }
+
+ // Note that we have an inferred submodule.
+ ActiveModule->InferSubmodules = true;
+ ActiveModule->InferredSubmoduleLoc = StarLoc;
+ ActiveModule->InferExplicitSubmodules = Explicit;
+
+ // Parse the opening brace.
+ if (!Tok.is(MMToken::LBrace)) {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_lbrace_wildcard);
+ HadError = true;
+ return;
+ }
+ SourceLocation LBraceLoc = consumeToken();
+
+ // Parse the body of the inferred submodule.
+ bool Done = false;
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ case MMToken::RBrace:
+ Done = true;
+ break;
+
+ case MMToken::ExportKeyword: {
+ consumeToken();
+ if (Tok.is(MMToken::Star))
+ ActiveModule->InferExportWildcard = true;
+ else
+ Diags.Report(Tok.getLocation(),
+ diag::err_mmap_expected_export_wildcard);
+ consumeToken();
+ break;
+ }
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::ModuleKeyword:
+ case MMToken::HeaderKeyword:
+ case MMToken::UmbrellaKeyword:
+ default:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_wildcard_member);
+ consumeToken();
+ break;
+ }
+ } while (!Done);
+
+ if (Tok.is(MMToken::RBrace))
+ consumeToken();
+ else {
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_rbrace);
+ Diags.Report(LBraceLoc, diag::note_mmap_lbrace_match);
+ HadError = true;
+ }
+}
+
+/// \brief If there is a specific header search directory due the presence
+/// of an umbrella directory, retrieve that directory. Otherwise, returns null.
+const DirectoryEntry *ModuleMapParser::getOverriddenHeaderSearchDir() {
+ for (Module *Mod = ActiveModule; Mod; Mod = Mod->Parent) {
+ // If we have an umbrella directory, use that.
+ if (Mod->hasUmbrellaDir())
+ return Mod->getUmbrellaDir();
+
+ // If we have a framework directory, stop looking.
+ if (Mod->IsFramework)
+ return 0;
+ }
+
+ return 0;
+}
+
+/// \brief Parse a module map file.
+///
+/// module-map-file:
+/// module-declaration*
+bool ModuleMapParser::parseModuleMapFile() {
+ do {
+ switch (Tok.Kind) {
+ case MMToken::EndOfFile:
+ return HadError;
+
+ case MMToken::ExplicitKeyword:
+ case MMToken::ModuleKeyword:
+ case MMToken::FrameworkKeyword:
+ parseModuleDecl();
+ break;
+
+ case MMToken::Comma:
+ case MMToken::ExportKeyword:
+ case MMToken::HeaderKeyword:
+ case MMToken::Identifier:
+ case MMToken::LBrace:
+ case MMToken::LSquare:
+ case MMToken::Period:
+ case MMToken::RBrace:
+ case MMToken::RSquare:
+ case MMToken::RequiresKeyword:
+ case MMToken::Star:
+ case MMToken::StringLiteral:
+ case MMToken::UmbrellaKeyword:
+ Diags.Report(Tok.getLocation(), diag::err_mmap_expected_module);
+ HadError = true;
+ consumeToken();
+ break;
+ }
+ } while (true);
+}
+
+bool ModuleMap::parseModuleMapFile(const FileEntry *File) {
+ assert(Target != 0 && "Missing target information");
+ FileID ID = SourceMgr->createFileID(File, SourceLocation(), SrcMgr::C_User);
+ const llvm::MemoryBuffer *Buffer = SourceMgr->getBuffer(ID);
+ if (!Buffer)
+ return true;
+
+ // Parse this module map file.
+ Lexer L(ID, SourceMgr->getBuffer(ID), *SourceMgr, MMapLangOpts);
+ Diags->getClient()->BeginSourceFile(MMapLangOpts);
+ ModuleMapParser Parser(L, *SourceMgr, *Diags, *this, File->getDir(),
+ BuiltinIncludeDir);
+ bool Result = Parser.parseModuleMapFile();
+ Diags->getClient()->EndSourceFile();
+
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
new file mode 100644
index 0000000..6f4c189
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPCaching.cpp
@@ -0,0 +1,118 @@
+//===--- PPCaching.cpp - Handle caching lexed tokens ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements pieces of the Preprocessor interface that manage the
+// caching of lexed tokens.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+/// EnableBacktrackAtThisPos - From the point that this method is called, and
+/// until CommitBacktrackedTokens() or Backtrack() is called, the Preprocessor
+/// keeps track of the lexed tokens so that a subsequent Backtrack() call will
+/// make the Preprocessor re-lex the same tokens.
+///
+/// Nested backtracks are allowed, meaning that EnableBacktrackAtThisPos can
+/// be called multiple times and CommitBacktrackedTokens/Backtrack calls will
+/// be combined with the EnableBacktrackAtThisPos calls in reverse order.
+void Preprocessor::EnableBacktrackAtThisPos() {
+ BacktrackPositions.push_back(CachedLexPos);
+ EnterCachingLexMode();
+}
+
+/// CommitBacktrackedTokens - Disable the last EnableBacktrackAtThisPos call.
+void Preprocessor::CommitBacktrackedTokens() {
+ assert(!BacktrackPositions.empty()
+ && "EnableBacktrackAtThisPos was not called!");
+ BacktrackPositions.pop_back();
+}
+
+/// Backtrack - Make Preprocessor re-lex the tokens that were lexed since
+/// EnableBacktrackAtThisPos() was previously called.
+void Preprocessor::Backtrack() {
+ assert(!BacktrackPositions.empty()
+ && "EnableBacktrackAtThisPos was not called!");
+ CachedLexPos = BacktrackPositions.back();
+ BacktrackPositions.pop_back();
+ recomputeCurLexerKind();
+}
+
+void Preprocessor::CachingLex(Token &Result) {
+ if (!InCachingLexMode())
+ return;
+
+ if (CachedLexPos < CachedTokens.size()) {
+ Result = CachedTokens[CachedLexPos++];
+ return;
+ }
+
+ ExitCachingLexMode();
+ Lex(Result);
+
+ if (isBacktrackEnabled()) {
+ // Cache the lexed token.
+ EnterCachingLexMode();
+ CachedTokens.push_back(Result);
+ ++CachedLexPos;
+ return;
+ }
+
+ if (CachedLexPos < CachedTokens.size()) {
+ EnterCachingLexMode();
+ } else {
+ // All cached tokens were consumed.
+ CachedTokens.clear();
+ CachedLexPos = 0;
+ }
+}
+
+void Preprocessor::EnterCachingLexMode() {
+ if (InCachingLexMode())
+ return;
+
+ PushIncludeMacroStack();
+ CurLexerKind = CLK_CachingLexer;
+}
+
+
+const Token &Preprocessor::PeekAhead(unsigned N) {
+ assert(CachedLexPos + N > CachedTokens.size() && "Confused caching.");
+ ExitCachingLexMode();
+ for (unsigned C = CachedLexPos + N - CachedTokens.size(); C > 0; --C) {
+ CachedTokens.push_back(Token());
+ Lex(CachedTokens.back());
+ }
+ EnterCachingLexMode();
+ return CachedTokens.back();
+}
+
+void Preprocessor::AnnotatePreviousCachedTokens(const Token &Tok) {
+ assert(Tok.isAnnotation() && "Expected annotation token");
+ assert(CachedLexPos != 0 && "Expected to have some cached tokens");
+ assert(CachedTokens[CachedLexPos-1].getLastLoc() == Tok.getAnnotationEndLoc()
+ && "The annotation should be until the most recent cached token");
+
+ // Start from the end of the cached tokens list and look for the token
+ // that is the beginning of the annotation token.
+ for (CachedTokensTy::size_type i = CachedLexPos; i != 0; --i) {
+ CachedTokensTy::iterator AnnotBegin = CachedTokens.begin() + i-1;
+ if (AnnotBegin->getLocation() == Tok.getLocation()) {
+ assert((BacktrackPositions.empty() || BacktrackPositions.back() < i) &&
+ "The backtrack pos points inside the annotated tokens!");
+ // Replace the cached tokens with the single annotation token.
+ if (i < CachedLexPos)
+ CachedTokens.erase(AnnotBegin + 1, CachedTokens.begin() + CachedLexPos);
+ *AnnotBegin = Tok;
+ CachedLexPos = i;
+ return;
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp b/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp
new file mode 100644
index 0000000..952b926
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPCallbacks.cpp
@@ -0,0 +1,14 @@
+//===--- PPCallbacks.cpp - Callbacks for Preprocessor actions ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/PPCallbacks.h"
+
+using namespace clang;
+
+void PPChainedCallbacks::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
new file mode 100644
index 0000000..625a204
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPDirectives.cpp
@@ -0,0 +1,2075 @@
+//===--- PPDirectives.cpp - Directive Handling for Preprocessor -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements # directive processing for the Preprocessor.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Utility Methods for Preprocessor Directive Handling.
+//===----------------------------------------------------------------------===//
+
+MacroInfo *Preprocessor::AllocateMacroInfo() {
+ MacroInfoChain *MIChain;
+
+ if (MICache) {
+ MIChain = MICache;
+ MICache = MICache->Next;
+ }
+ else {
+ MIChain = BP.Allocate<MacroInfoChain>();
+ }
+
+ MIChain->Next = MIChainHead;
+ MIChain->Prev = 0;
+ if (MIChainHead)
+ MIChainHead->Prev = MIChain;
+ MIChainHead = MIChain;
+
+ return &(MIChain->MI);
+}
+
+MacroInfo *Preprocessor::AllocateMacroInfo(SourceLocation L) {
+ MacroInfo *MI = AllocateMacroInfo();
+ new (MI) MacroInfo(L);
+ return MI;
+}
+
+MacroInfo *Preprocessor::CloneMacroInfo(const MacroInfo &MacroToClone) {
+ MacroInfo *MI = AllocateMacroInfo();
+ new (MI) MacroInfo(MacroToClone, BP);
+ return MI;
+}
+
+/// ReleaseMacroInfo - Release the specified MacroInfo. This memory will
+/// be reused for allocating new MacroInfo objects.
+void Preprocessor::ReleaseMacroInfo(MacroInfo *MI) {
+ MacroInfoChain *MIChain = (MacroInfoChain*) MI;
+ if (MacroInfoChain *Prev = MIChain->Prev) {
+ MacroInfoChain *Next = MIChain->Next;
+ Prev->Next = Next;
+ if (Next)
+ Next->Prev = Prev;
+ }
+ else {
+ assert(MIChainHead == MIChain);
+ MIChainHead = MIChain->Next;
+ MIChainHead->Prev = 0;
+ }
+ MIChain->Next = MICache;
+ MICache = MIChain;
+
+ MI->Destroy();
+}
+
+/// DiscardUntilEndOfDirective - Read and discard all tokens remaining on the
+/// current line until the tok::eod token is found.
+void Preprocessor::DiscardUntilEndOfDirective() {
+ Token Tmp;
+ do {
+ LexUnexpandedToken(Tmp);
+ assert(Tmp.isNot(tok::eof) && "EOF seen while discarding directive tokens");
+ } while (Tmp.isNot(tok::eod));
+}
+
+/// ReadMacroName - Lex and validate a macro name, which occurs after a
+/// #define or #undef. This sets the token kind to eod and discards the rest
+/// of the macro line if the macro name is invalid. isDefineUndef is 1 if
+/// this is due to a a #define, 2 if #undef directive, 0 if it is something
+/// else (e.g. #ifdef).
+void Preprocessor::ReadMacroName(Token &MacroNameTok, char isDefineUndef) {
+ // Read the token, don't allow macro expansion on it.
+ LexUnexpandedToken(MacroNameTok);
+
+ if (MacroNameTok.is(tok::code_completion)) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteMacroName(isDefineUndef == 1);
+ setCodeCompletionReached();
+ LexUnexpandedToken(MacroNameTok);
+ }
+
+ // Missing macro name?
+ if (MacroNameTok.is(tok::eod)) {
+ Diag(MacroNameTok, diag::err_pp_missing_macro_name);
+ return;
+ }
+
+ IdentifierInfo *II = MacroNameTok.getIdentifierInfo();
+ if (II == 0) {
+ bool Invalid = false;
+ std::string Spelling = getSpelling(MacroNameTok, &Invalid);
+ if (Invalid)
+ return;
+
+ const IdentifierInfo &Info = Identifiers.get(Spelling);
+
+ // Allow #defining |and| and friends in microsoft mode.
+ if (Info.isCPlusPlusOperatorKeyword() && getLangOpts().MicrosoftMode) {
+ MacroNameTok.setIdentifierInfo(getIdentifierInfo(Spelling));
+ return;
+ }
+
+ if (Info.isCPlusPlusOperatorKeyword())
+ // C++ 2.5p2: Alternative tokens behave the same as its primary token
+ // except for their spellings.
+ Diag(MacroNameTok, diag::err_pp_operator_used_as_macro_name) << Spelling;
+ else
+ Diag(MacroNameTok, diag::err_pp_macro_not_identifier);
+ // Fall through on error.
+ } else if (isDefineUndef && II->getPPKeywordID() == tok::pp_defined) {
+ // Error if defining "defined": C99 6.10.8.4.
+ Diag(MacroNameTok, diag::err_defined_macro_name);
+ } else if (isDefineUndef && II->hasMacroDefinition() &&
+ getMacroInfo(II)->isBuiltinMacro()) {
+ // Error if defining "__LINE__" and other builtins: C99 6.10.8.4.
+ if (isDefineUndef == 1)
+ Diag(MacroNameTok, diag::pp_redef_builtin_macro);
+ else
+ Diag(MacroNameTok, diag::pp_undef_builtin_macro);
+ } else {
+ // Okay, we got a good identifier node. Return it.
+ return;
+ }
+
+ // Invalid macro name, read and discard the rest of the line. Then set the
+ // token kind to tok::eod.
+ MacroNameTok.setKind(tok::eod);
+ return DiscardUntilEndOfDirective();
+}
+
+/// CheckEndOfDirective - Ensure that the next token is a tok::eod token. If
+/// not, emit a diagnostic and consume up until the eod. If EnableMacros is
+/// true, then we consider macros that expand to zero tokens as being ok.
+void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
+ Token Tmp;
+ // Lex unexpanded tokens for most directives: macros might expand to zero
+ // tokens, causing us to miss diagnosing invalid lines. Some directives (like
+ // #line) allow empty macros.
+ if (EnableMacros)
+ Lex(Tmp);
+ else
+ LexUnexpandedToken(Tmp);
+
+ // There should be no tokens after the directive, but we allow them as an
+ // extension.
+ while (Tmp.is(tok::comment)) // Skip comments in -C mode.
+ LexUnexpandedToken(Tmp);
+
+ if (Tmp.isNot(tok::eod)) {
+ // Add a fixit in GNU/C99/C++ mode. Don't offer a fixit for strict-C89,
+ // or if this is a macro-style preprocessing directive, because it is more
+ // trouble than it is worth to insert /**/ and check that there is no /**/
+ // in the range also.
+ FixItHint Hint;
+ if ((LangOpts.GNUMode || LangOpts.C99 || LangOpts.CPlusPlus) &&
+ !CurTokenLexer)
+ Hint = FixItHint::CreateInsertion(Tmp.getLocation(),"//");
+ Diag(Tmp, diag::ext_pp_extra_tokens_at_eol) << DirType << Hint;
+ DiscardUntilEndOfDirective();
+ }
+}
+
+
+
+/// SkipExcludedConditionalBlock - We just read a #if or related directive and
+/// decided that the subsequent tokens are in the #if'd out portion of the
+/// file. Lex the rest of the file, until we see an #endif. If
+/// FoundNonSkipPortion is true, then we have already emitted code for part of
+/// this #if directive, so #else/#elif blocks should never be entered. If ElseOk
+/// is true, then #else directives are ok, if not, then we have already seen one
+/// so a #else directive is a duplicate. When this returns, the caller can lex
+/// the first valid token.
+void Preprocessor::SkipExcludedConditionalBlock(SourceLocation IfTokenLoc,
+ bool FoundNonSkipPortion,
+ bool FoundElse,
+ SourceLocation ElseLoc) {
+ ++NumSkipped;
+ assert(CurTokenLexer == 0 && CurPPLexer && "Lexing a macro, not a file?");
+
+ CurPPLexer->pushConditionalLevel(IfTokenLoc, /*isSkipping*/false,
+ FoundNonSkipPortion, FoundElse);
+
+ if (CurPTHLexer) {
+ PTHSkipExcludedConditionalBlock();
+ return;
+ }
+
+ // Enter raw mode to disable identifier lookup (and thus macro expansion),
+ // disabling warnings, etc.
+ CurPPLexer->LexingRawMode = true;
+ Token Tok;
+ while (1) {
+ CurLexer->Lex(Tok);
+
+ if (Tok.is(tok::code_completion)) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteInConditionalExclusion();
+ setCodeCompletionReached();
+ continue;
+ }
+
+ // If this is the end of the buffer, we have an error.
+ if (Tok.is(tok::eof)) {
+ // Emit errors for each unterminated conditional on the stack, including
+ // the current one.
+ while (!CurPPLexer->ConditionalStack.empty()) {
+ if (CurLexer->getFileLoc() != CodeCompletionFileLoc)
+ Diag(CurPPLexer->ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ CurPPLexer->ConditionalStack.pop_back();
+ }
+
+ // Just return and let the caller lex after this #include.
+ break;
+ }
+
+ // If this token is not a preprocessor directive, just skip it.
+ if (Tok.isNot(tok::hash) || !Tok.isAtStartOfLine())
+ continue;
+
+ // We just parsed a # character at the start of a line, so we're in
+ // directive mode. Tell the lexer this so any newlines we see will be
+ // converted into an EOD token (this terminates the macro).
+ CurPPLexer->ParsingPreprocessorDirective = true;
+ if (CurLexer) CurLexer->SetCommentRetentionState(false);
+
+
+ // Read the next token, the directive flavor.
+ LexUnexpandedToken(Tok);
+
+ // If this isn't an identifier directive (e.g. is "# 1\n" or "#\n", or
+ // something bogus), skip it.
+ if (Tok.isNot(tok::raw_identifier)) {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
+ continue;
+ }
+
+ // If the first letter isn't i or e, it isn't intesting to us. We know that
+ // this is safe in the face of spelling differences, because there is no way
+ // to spell an i/e in a strange way that is another letter. Skipping this
+ // allows us to avoid looking up the identifier info for #define/#undef and
+ // other common directives.
+ const char *RawCharData = Tok.getRawIdentifierData();
+
+ char FirstChar = RawCharData[0];
+ if (FirstChar >= 'a' && FirstChar <= 'z' &&
+ FirstChar != 'i' && FirstChar != 'e') {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
+ continue;
+ }
+
+ // Get the identifier name without trigraphs or embedded newlines. Note
+ // that we can't use Tok.getIdentifierInfo() because its lookup is disabled
+ // when skipping.
+ char DirectiveBuf[20];
+ StringRef Directive;
+ if (!Tok.needsCleaning() && Tok.getLength() < 20) {
+ Directive = StringRef(RawCharData, Tok.getLength());
+ } else {
+ std::string DirectiveStr = getSpelling(Tok);
+ unsigned IdLen = DirectiveStr.size();
+ if (IdLen >= 20) {
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
+ continue;
+ }
+ memcpy(DirectiveBuf, &DirectiveStr[0], IdLen);
+ Directive = StringRef(DirectiveBuf, IdLen);
+ }
+
+ if (Directive.startswith("if")) {
+ StringRef Sub = Directive.substr(2);
+ if (Sub.empty() || // "if"
+ Sub == "def" || // "ifdef"
+ Sub == "ndef") { // "ifndef"
+ // We know the entire #if/#ifdef/#ifndef block will be skipped, don't
+ // bother parsing the condition.
+ DiscardUntilEndOfDirective();
+ CurPPLexer->pushConditionalLevel(Tok.getLocation(), /*wasskipping*/true,
+ /*foundnonskip*/false,
+ /*foundelse*/false);
+ }
+ } else if (Directive[0] == 'e') {
+ StringRef Sub = Directive.substr(1);
+ if (Sub == "ndif") { // "endif"
+ CheckEndOfDirective("endif");
+ PPConditionalInfo CondInfo;
+ CondInfo.WasSkipping = true; // Silence bogus warning.
+ bool InCond = CurPPLexer->popConditionalLevel(CondInfo);
+ (void)InCond; // Silence warning in no-asserts mode.
+ assert(!InCond && "Can't be skipping if not in a conditional!");
+
+ // If we popped the outermost skipping block, we're done skipping!
+ if (!CondInfo.WasSkipping) {
+ if (Callbacks)
+ Callbacks->Endif(Tok.getLocation(), CondInfo.IfLoc);
+ break;
+ }
+ } else if (Sub == "lse") { // "else".
+ // #else directive in a skipping conditional. If not in some other
+ // skipping conditional, and if #else hasn't already been seen, enter it
+ // as a non-skipping conditional.
+ PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+
+ // If this is a #else with a #else before it, report the error.
+ if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_else_after_else);
+
+ // Note that we've seen a #else in this conditional.
+ CondInfo.FoundElse = true;
+
+ // If the conditional is at the top level, and the #if block wasn't
+ // entered, enter the #else block now.
+ if (!CondInfo.WasSkipping && !CondInfo.FoundNonSkip) {
+ CondInfo.FoundNonSkip = true;
+ CheckEndOfDirective("else");
+ if (Callbacks)
+ Callbacks->Else(Tok.getLocation(), CondInfo.IfLoc);
+ break;
+ } else {
+ DiscardUntilEndOfDirective(); // C99 6.10p4.
+ }
+ } else if (Sub == "lif") { // "elif".
+ PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+
+ bool ShouldEnter;
+ const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
+ // If this is in a skipping block or if we're already handled this #if
+ // block, don't bother parsing the condition.
+ if (CondInfo.WasSkipping || CondInfo.FoundNonSkip) {
+ DiscardUntilEndOfDirective();
+ ShouldEnter = false;
+ } else {
+ // Restore the value of LexingRawMode so that identifiers are
+ // looked up, etc, inside the #elif expression.
+ assert(CurPPLexer->LexingRawMode && "We have to be skipping here!");
+ CurPPLexer->LexingRawMode = false;
+ IdentifierInfo *IfNDefMacro = 0;
+ ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro);
+ CurPPLexer->LexingRawMode = true;
+ }
+ const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CondInfo.FoundElse) Diag(Tok, diag::pp_err_elif_after_else);
+
+ // If this condition is true, enter it!
+ if (ShouldEnter) {
+ CondInfo.FoundNonSkip = true;
+ if (Callbacks)
+ Callbacks->Elif(Tok.getLocation(),
+ SourceRange(ConditionalBegin, ConditionalEnd),
+ CondInfo.IfLoc);
+ break;
+ }
+ }
+ }
+
+ CurPPLexer->ParsingPreprocessorDirective = false;
+ // Restore comment saving mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepComments);
+ }
+
+ // Finally, if we are out of the conditional (saw an #endif or ran off the end
+ // of the file, just stop skipping and return to lexing whatever came after
+ // the #if block.
+ CurPPLexer->LexingRawMode = false;
+
+ if (Callbacks) {
+ SourceLocation BeginLoc = ElseLoc.isValid() ? ElseLoc : IfTokenLoc;
+ Callbacks->SourceRangeSkipped(SourceRange(BeginLoc, Tok.getLocation()));
+ }
+}
+
+void Preprocessor::PTHSkipExcludedConditionalBlock() {
+
+ while (1) {
+ assert(CurPTHLexer);
+ assert(CurPTHLexer->LexingRawMode == false);
+
+ // Skip to the next '#else', '#elif', or #endif.
+ if (CurPTHLexer->SkipBlock()) {
+ // We have reached an #endif. Both the '#' and 'endif' tokens
+ // have been consumed by the PTHLexer. Just pop off the condition level.
+ PPConditionalInfo CondInfo;
+ bool InCond = CurPTHLexer->popConditionalLevel(CondInfo);
+ (void)InCond; // Silence warning in no-asserts mode.
+ assert(!InCond && "Can't be skipping if not in a conditional!");
+ break;
+ }
+
+ // We have reached a '#else' or '#elif'. Lex the next token to get
+ // the directive flavor.
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ // We can actually look up the IdentifierInfo here since we aren't in
+ // raw mode.
+ tok::PPKeywordKind K = Tok.getIdentifierInfo()->getPPKeywordID();
+
+ if (K == tok::pp_else) {
+ // #else: Enter the else condition. We aren't in a nested condition
+ // since we skip those. We're always in the one matching the last
+ // blocked we skipped.
+ PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
+ // Note that we've seen a #else in this conditional.
+ CondInfo.FoundElse = true;
+
+ // If the #if block wasn't entered then enter the #else block now.
+ if (!CondInfo.FoundNonSkip) {
+ CondInfo.FoundNonSkip = true;
+
+ // Scan until the eod token.
+ CurPTHLexer->ParsingPreprocessorDirective = true;
+ DiscardUntilEndOfDirective();
+ CurPTHLexer->ParsingPreprocessorDirective = false;
+
+ break;
+ }
+
+ // Otherwise skip this block.
+ continue;
+ }
+
+ assert(K == tok::pp_elif);
+ PPConditionalInfo &CondInfo = CurPTHLexer->peekConditionalLevel();
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CondInfo.FoundElse)
+ Diag(Tok, diag::pp_err_elif_after_else);
+
+ // If this is in a skipping block or if we're already handled this #if
+ // block, don't bother parsing the condition. We just skip this block.
+ if (CondInfo.FoundNonSkip)
+ continue;
+
+ // Evaluate the condition of the #elif.
+ IdentifierInfo *IfNDefMacro = 0;
+ CurPTHLexer->ParsingPreprocessorDirective = true;
+ bool ShouldEnter = EvaluateDirectiveExpression(IfNDefMacro);
+ CurPTHLexer->ParsingPreprocessorDirective = false;
+
+ // If this condition is true, enter it!
+ if (ShouldEnter) {
+ CondInfo.FoundNonSkip = true;
+ break;
+ }
+
+ // Otherwise, skip this block and go to the next one.
+ continue;
+ }
+}
+
+/// LookupFile - Given a "foo" or <foo> reference, look up the indicated file,
+/// return null on failure. isAngled indicates whether the file reference is
+/// for system #include's or not (i.e. using <> instead of "").
+const FileEntry *Preprocessor::LookupFile(
+ StringRef Filename,
+ bool isAngled,
+ const DirectoryLookup *FromDir,
+ const DirectoryLookup *&CurDir,
+ SmallVectorImpl<char> *SearchPath,
+ SmallVectorImpl<char> *RelativePath,
+ Module **SuggestedModule,
+ bool SkipCache) {
+ // If the header lookup mechanism may be relative to the current file, pass in
+ // info about where the current file is.
+ const FileEntry *CurFileEnt = 0;
+ if (!FromDir) {
+ FileID FID = getCurrentFileLexer()->getFileID();
+ CurFileEnt = SourceMgr.getFileEntryForID(FID);
+
+ // If there is no file entry associated with this file, it must be the
+ // predefines buffer. Any other file is not lexed with a normal lexer, so
+ // it won't be scanned for preprocessor directives. If we have the
+ // predefines buffer, resolve #include references (which come from the
+ // -include command line argument) as if they came from the main file, this
+ // affects file lookup etc.
+ if (CurFileEnt == 0) {
+ FID = SourceMgr.getMainFileID();
+ CurFileEnt = SourceMgr.getFileEntryForID(FID);
+ }
+ }
+
+ // Do a standard file entry lookup.
+ CurDir = CurDirLookup;
+ const FileEntry *FE = HeaderInfo.LookupFile(
+ Filename, isAngled, FromDir, CurDir, CurFileEnt,
+ SearchPath, RelativePath, SuggestedModule, SkipCache);
+ if (FE) return FE;
+
+ // Otherwise, see if this is a subframework header. If so, this is relative
+ // to one of the headers on the #include stack. Walk the list of the current
+ // headers on the #include stack and pass them to HeaderInfo.
+ // FIXME: SuggestedModule!
+ if (IsFileLexer()) {
+ if ((CurFileEnt = SourceMgr.getFileEntryForID(CurPPLexer->getFileID())))
+ if ((FE = HeaderInfo.LookupSubframeworkHeader(Filename, CurFileEnt,
+ SearchPath, RelativePath)))
+ return FE;
+ }
+
+ for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
+ IncludeStackInfo &ISEntry = IncludeMacroStack[e-i-1];
+ if (IsFileLexer(ISEntry)) {
+ if ((CurFileEnt =
+ SourceMgr.getFileEntryForID(ISEntry.ThePPLexer->getFileID())))
+ if ((FE = HeaderInfo.LookupSubframeworkHeader(
+ Filename, CurFileEnt, SearchPath, RelativePath)))
+ return FE;
+ }
+ }
+
+ // Otherwise, we really couldn't find the file.
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// HandleDirective - This callback is invoked when the lexer sees a # token
+/// at the start of a line. This consumes the directive, modifies the
+/// lexer/preprocessor state, and advances the lexer(s) so that the next token
+/// read is the correct one.
+void Preprocessor::HandleDirective(Token &Result) {
+ // FIXME: Traditional: # with whitespace before it not recognized by K&R?
+
+ // We just parsed a # character at the start of a line, so we're in directive
+ // mode. Tell the lexer this so any newlines we see will be converted into an
+ // EOD token (which terminates the directive).
+ CurPPLexer->ParsingPreprocessorDirective = true;
+
+ ++NumDirectives;
+
+ // We are about to read a token. For the multiple-include optimization FA to
+ // work, we have to remember if we had read any tokens *before* this
+ // pp-directive.
+ bool ReadAnyTokensBeforeDirective =CurPPLexer->MIOpt.getHasReadAnyTokensVal();
+
+ // Save the '#' token in case we need to return it later.
+ Token SavedHash = Result;
+
+ // Read the next token, the directive flavor. This isn't expanded due to
+ // C99 6.10.3p8.
+ LexUnexpandedToken(Result);
+
+ // C99 6.10.3p11: Is this preprocessor directive in macro invocation? e.g.:
+ // #define A(x) #x
+ // A(abc
+ // #warning blah
+ // def)
+ // If so, the user is relying on undefined behavior, emit a diagnostic. Do
+ // not support this for #include-like directives, since that can result in
+ // terrible diagnostics, and does not work in GCC.
+ if (InMacroArgs) {
+ if (IdentifierInfo *II = Result.getIdentifierInfo()) {
+ switch (II->getPPKeywordID()) {
+ case tok::pp_include:
+ case tok::pp_import:
+ case tok::pp_include_next:
+ case tok::pp___include_macros:
+ Diag(Result, diag::err_embedded_include) << II->getName();
+ DiscardUntilEndOfDirective();
+ return;
+ default:
+ break;
+ }
+ }
+ Diag(Result, diag::ext_embedded_directive);
+ }
+
+TryAgain:
+ switch (Result.getKind()) {
+ case tok::eod:
+ return; // null directive.
+ case tok::comment:
+ // Handle stuff like "# /*foo*/ define X" in -E -C mode.
+ LexUnexpandedToken(Result);
+ goto TryAgain;
+ case tok::code_completion:
+ if (CodeComplete)
+ CodeComplete->CodeCompleteDirective(
+ CurPPLexer->getConditionalStackDepth() > 0);
+ setCodeCompletionReached();
+ return;
+ case tok::numeric_constant: // # 7 GNU line marker directive.
+ if (getLangOpts().AsmPreprocessor)
+ break; // # 4 is not a preprocessor directive in .S files.
+ return HandleDigitDirective(Result);
+ default:
+ IdentifierInfo *II = Result.getIdentifierInfo();
+ if (II == 0) break; // Not an identifier.
+
+ // Ask what the preprocessor keyword ID is.
+ switch (II->getPPKeywordID()) {
+ default: break;
+ // C99 6.10.1 - Conditional Inclusion.
+ case tok::pp_if:
+ return HandleIfDirective(Result, ReadAnyTokensBeforeDirective);
+ case tok::pp_ifdef:
+ return HandleIfdefDirective(Result, false, true/*not valid for miopt*/);
+ case tok::pp_ifndef:
+ return HandleIfdefDirective(Result, true, ReadAnyTokensBeforeDirective);
+ case tok::pp_elif:
+ return HandleElifDirective(Result);
+ case tok::pp_else:
+ return HandleElseDirective(Result);
+ case tok::pp_endif:
+ return HandleEndifDirective(Result);
+
+ // C99 6.10.2 - Source File Inclusion.
+ case tok::pp_include:
+ // Handle #include.
+ return HandleIncludeDirective(SavedHash.getLocation(), Result);
+ case tok::pp___include_macros:
+ // Handle -imacros.
+ return HandleIncludeMacrosDirective(SavedHash.getLocation(), Result);
+
+ // C99 6.10.3 - Macro Replacement.
+ case tok::pp_define:
+ return HandleDefineDirective(Result);
+ case tok::pp_undef:
+ return HandleUndefDirective(Result);
+
+ // C99 6.10.4 - Line Control.
+ case tok::pp_line:
+ return HandleLineDirective(Result);
+
+ // C99 6.10.5 - Error Directive.
+ case tok::pp_error:
+ return HandleUserDiagnosticDirective(Result, false);
+
+ // C99 6.10.6 - Pragma Directive.
+ case tok::pp_pragma:
+ return HandlePragmaDirective(PIK_HashPragma);
+
+ // GNU Extensions.
+ case tok::pp_import:
+ return HandleImportDirective(SavedHash.getLocation(), Result);
+ case tok::pp_include_next:
+ return HandleIncludeNextDirective(SavedHash.getLocation(), Result);
+
+ case tok::pp_warning:
+ Diag(Result, diag::ext_pp_warning_directive);
+ return HandleUserDiagnosticDirective(Result, true);
+ case tok::pp_ident:
+ return HandleIdentSCCSDirective(Result);
+ case tok::pp_sccs:
+ return HandleIdentSCCSDirective(Result);
+ case tok::pp_assert:
+ //isExtension = true; // FIXME: implement #assert
+ break;
+ case tok::pp_unassert:
+ //isExtension = true; // FIXME: implement #unassert
+ break;
+
+ case tok::pp___public_macro:
+ if (getLangOpts().Modules)
+ return HandleMacroPublicDirective(Result);
+ break;
+
+ case tok::pp___private_macro:
+ if (getLangOpts().Modules)
+ return HandleMacroPrivateDirective(Result);
+ break;
+ }
+ break;
+ }
+
+ // If this is a .S file, treat unknown # directives as non-preprocessor
+ // directives. This is important because # may be a comment or introduce
+ // various pseudo-ops. Just return the # token and push back the following
+ // token to be lexed next time.
+ if (getLangOpts().AsmPreprocessor) {
+ Token *Toks = new Token[2];
+ // Return the # and the token after it.
+ Toks[0] = SavedHash;
+ Toks[1] = Result;
+
+ // If the second token is a hashhash token, then we need to translate it to
+ // unknown so the token lexer doesn't try to perform token pasting.
+ if (Result.is(tok::hashhash))
+ Toks[1].setKind(tok::unknown);
+
+ // Enter this token stream so that we re-lex the tokens. Make sure to
+ // enable macro expansion, in case the token after the # is an identifier
+ // that is expanded.
+ EnterTokenStream(Toks, 2, false, true);
+ return;
+ }
+
+ // If we reached here, the preprocessing token is not valid!
+ Diag(Result, diag::err_pp_invalid_directive);
+
+ // Read the rest of the PP line.
+ DiscardUntilEndOfDirective();
+
+ // Okay, we're done parsing the directive.
+}
+
+/// GetLineValue - Convert a numeric token into an unsigned value, emitting
+/// Diagnostic DiagID if it is invalid, and returning the value in Val.
+static bool GetLineValue(Token &DigitTok, unsigned &Val,
+ unsigned DiagID, Preprocessor &PP) {
+ if (DigitTok.isNot(tok::numeric_constant)) {
+ PP.Diag(DigitTok, DiagID);
+
+ if (DigitTok.isNot(tok::eod))
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ SmallString<64> IntegerBuffer;
+ IntegerBuffer.resize(DigitTok.getLength());
+ const char *DigitTokBegin = &IntegerBuffer[0];
+ bool Invalid = false;
+ unsigned ActualLength = PP.getSpelling(DigitTok, DigitTokBegin, &Invalid);
+ if (Invalid)
+ return true;
+
+ // Verify that we have a simple digit-sequence, and compute the value. This
+ // is always a simple digit string computed in decimal, so we do this manually
+ // here.
+ Val = 0;
+ for (unsigned i = 0; i != ActualLength; ++i) {
+ if (!isdigit(DigitTokBegin[i])) {
+ PP.Diag(PP.AdvanceToTokenCharacter(DigitTok.getLocation(), i),
+ diag::err_pp_line_digit_sequence);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ unsigned NextVal = Val*10+(DigitTokBegin[i]-'0');
+ if (NextVal < Val) { // overflow.
+ PP.Diag(DigitTok, DiagID);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+ Val = NextVal;
+ }
+
+ // Reject 0, this is needed both by #line numbers and flags.
+ if (Val == 0) {
+ PP.Diag(DigitTok, DiagID);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ if (DigitTokBegin[0] == '0')
+ PP.Diag(DigitTok.getLocation(), diag::warn_pp_line_decimal);
+
+ return false;
+}
+
+/// HandleLineDirective - Handle #line directive: C99 6.10.4. The two
+/// acceptable forms are:
+/// # line digit-sequence
+/// # line digit-sequence "s-char-sequence"
+void Preprocessor::HandleLineDirective(Token &Tok) {
+ // Read the line # and string argument. Per C99 6.10.4p5, these tokens are
+ // expanded.
+ Token DigitTok;
+ Lex(DigitTok);
+
+ // Validate the number and convert it to an unsigned.
+ unsigned LineNo;
+ if (GetLineValue(DigitTok, LineNo, diag::err_pp_line_requires_integer,*this))
+ return;
+
+ // Enforce C99 6.10.4p3: "The digit sequence shall not specify ... a
+ // number greater than 2147483647". C90 requires that the line # be <= 32767.
+ unsigned LineLimit = 32768U;
+ if (LangOpts.C99 || LangOpts.CPlusPlus0x)
+ LineLimit = 2147483648U;
+ if (LineNo >= LineLimit)
+ Diag(DigitTok, diag::ext_pp_line_too_big) << LineLimit;
+ else if (LangOpts.CPlusPlus0x && LineNo >= 32768U)
+ Diag(DigitTok, diag::warn_cxx98_compat_pp_line_too_big);
+
+ int FilenameID = -1;
+ Token StrTok;
+ Lex(StrTok);
+
+ // If the StrTok is "eod", then it wasn't present. Otherwise, it must be a
+ // string followed by eod.
+ if (StrTok.is(tok::eod))
+ ; // ok
+ else if (StrTok.isNot(tok::string_literal)) {
+ Diag(StrTok, diag::err_pp_line_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ } else if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
+ } else {
+ // Parse and validate the string, converting it into a unique ID.
+ StringLiteralParser Literal(&StrTok, 1, *this);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return DiscardUntilEndOfDirective();
+ if (Literal.Pascal) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ }
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
+
+ // Verify that there is nothing after the string, other than EOD. Because
+ // of C99 6.10.4p5, macros that expand to empty tokens are ok.
+ CheckEndOfDirective("line", true);
+ }
+
+ SourceMgr.AddLineNote(DigitTok.getLocation(), LineNo, FilenameID);
+
+ if (Callbacks)
+ Callbacks->FileChanged(CurPPLexer->getSourceLocation(),
+ PPCallbacks::RenameFile,
+ SrcMgr::C_User);
+}
+
+/// ReadLineMarkerFlags - Parse and validate any flags at the end of a GNU line
+/// marker directive.
+static bool ReadLineMarkerFlags(bool &IsFileEntry, bool &IsFileExit,
+ bool &IsSystemHeader, bool &IsExternCHeader,
+ Preprocessor &PP) {
+ unsigned FlagVal;
+ Token FlagTok;
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag, PP))
+ return true;
+
+ if (FlagVal == 1) {
+ IsFileEntry = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag,PP))
+ return true;
+ } else if (FlagVal == 2) {
+ IsFileExit = true;
+
+ SourceManager &SM = PP.getSourceManager();
+ // If we are leaving the current presumed file, check to make sure the
+ // presumed include stack isn't empty!
+ FileID CurFileID =
+ SM.getDecomposedExpansionLoc(FlagTok.getLocation()).first;
+ PresumedLoc PLoc = SM.getPresumedLoc(FlagTok.getLocation());
+ if (PLoc.isInvalid())
+ return true;
+
+ // If there is no include loc (main file) or if the include loc is in a
+ // different physical file, then we aren't in a "1" line marker flag region.
+ SourceLocation IncLoc = PLoc.getIncludeLoc();
+ if (IncLoc.isInvalid() ||
+ SM.getDecomposedExpansionLoc(IncLoc).first != CurFileID) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_pop);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag,PP))
+ return true;
+ }
+
+ // We must have 3 if there are still flags.
+ if (FlagVal != 3) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ IsSystemHeader = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+ if (GetLineValue(FlagTok, FlagVal, diag::err_pp_linemarker_invalid_flag, PP))
+ return true;
+
+ // We must have 4 if there is yet another flag.
+ if (FlagVal != 4) {
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+ }
+
+ IsExternCHeader = true;
+
+ PP.Lex(FlagTok);
+ if (FlagTok.is(tok::eod)) return false;
+
+ // There are no more valid flags here.
+ PP.Diag(FlagTok, diag::err_pp_linemarker_invalid_flag);
+ PP.DiscardUntilEndOfDirective();
+ return true;
+}
+
+/// HandleDigitDirective - Handle a GNU line marker directive, whose syntax is
+/// one of the following forms:
+///
+/// # 42
+/// # 42 "file" ('1' | '2')?
+/// # 42 "file" ('1' | '2')? '3' '4'?
+///
+void Preprocessor::HandleDigitDirective(Token &DigitTok) {
+ // Validate the number and convert it to an unsigned. GNU does not have a
+ // line # limit other than it fit in 32-bits.
+ unsigned LineNo;
+ if (GetLineValue(DigitTok, LineNo, diag::err_pp_linemarker_requires_integer,
+ *this))
+ return;
+
+ Token StrTok;
+ Lex(StrTok);
+
+ bool IsFileEntry = false, IsFileExit = false;
+ bool IsSystemHeader = false, IsExternCHeader = false;
+ int FilenameID = -1;
+
+ // If the StrTok is "eod", then it wasn't present. Otherwise, it must be a
+ // string followed by eod.
+ if (StrTok.is(tok::eod))
+ ; // ok
+ else if (StrTok.isNot(tok::string_literal)) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ } else if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
+ } else {
+ // Parse and validate the string, converting it into a unique ID.
+ StringLiteralParser Literal(&StrTok, 1, *this);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return DiscardUntilEndOfDirective();
+ if (Literal.Pascal) {
+ Diag(StrTok, diag::err_pp_linemarker_invalid_filename);
+ return DiscardUntilEndOfDirective();
+ }
+ FilenameID = SourceMgr.getLineTableFilenameID(Literal.GetString());
+
+ // If a filename was present, read any flags that are present.
+ if (ReadLineMarkerFlags(IsFileEntry, IsFileExit,
+ IsSystemHeader, IsExternCHeader, *this))
+ return;
+ }
+
+ // Create a line note with this information.
+ SourceMgr.AddLineNote(DigitTok.getLocation(), LineNo, FilenameID,
+ IsFileEntry, IsFileExit,
+ IsSystemHeader, IsExternCHeader);
+
+ // If the preprocessor has callbacks installed, notify them of the #line
+ // change. This is used so that the line marker comes out in -E mode for
+ // example.
+ if (Callbacks) {
+ PPCallbacks::FileChangeReason Reason = PPCallbacks::RenameFile;
+ if (IsFileEntry)
+ Reason = PPCallbacks::EnterFile;
+ else if (IsFileExit)
+ Reason = PPCallbacks::ExitFile;
+ SrcMgr::CharacteristicKind FileKind = SrcMgr::C_User;
+ if (IsExternCHeader)
+ FileKind = SrcMgr::C_ExternCSystem;
+ else if (IsSystemHeader)
+ FileKind = SrcMgr::C_System;
+
+ Callbacks->FileChanged(CurPPLexer->getSourceLocation(), Reason, FileKind);
+ }
+}
+
+
+/// HandleUserDiagnosticDirective - Handle a #warning or #error directive.
+///
+void Preprocessor::HandleUserDiagnosticDirective(Token &Tok,
+ bool isWarning) {
+ // PTH doesn't emit #warning or #error directives.
+ if (CurPTHLexer)
+ return CurPTHLexer->DiscardToEndOfLine();
+
+ // Read the rest of the line raw. We do this because we don't want macros
+ // to be expanded and we don't require that the tokens be valid preprocessing
+ // tokens. For example, this is allowed: "#warning ` 'foo". GCC does
+ // collapse multiple consequtive white space between tokens, but this isn't
+ // specified by the standard.
+ std::string Message = CurLexer->ReadToEndOfLine();
+
+ // Find the first non-whitespace character, so that we can make the
+ // diagnostic more succinct.
+ StringRef Msg(Message);
+ size_t i = Msg.find_first_not_of(' ');
+ if (i < Msg.size())
+ Msg = Msg.substr(i);
+
+ if (isWarning)
+ Diag(Tok, diag::pp_hash_warning) << Msg;
+ else
+ Diag(Tok, diag::err_pp_hash_error) << Msg;
+}
+
+/// HandleIdentSCCSDirective - Handle a #ident/#sccs directive.
+///
+void Preprocessor::HandleIdentSCCSDirective(Token &Tok) {
+ // Yes, this directive is an extension.
+ Diag(Tok, diag::ext_pp_ident_directive);
+
+ // Read the string argument.
+ Token StrTok;
+ Lex(StrTok);
+
+ // If the token kind isn't a string, it's a malformed directive.
+ if (StrTok.isNot(tok::string_literal) &&
+ StrTok.isNot(tok::wide_string_literal)) {
+ Diag(StrTok, diag::err_pp_malformed_ident);
+ if (StrTok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ if (StrTok.hasUDSuffix()) {
+ Diag(StrTok, diag::err_invalid_string_udl);
+ return DiscardUntilEndOfDirective();
+ }
+
+ // Verify that there is nothing after the string, other than EOD.
+ CheckEndOfDirective("ident");
+
+ if (Callbacks) {
+ bool Invalid = false;
+ std::string Str = getSpelling(StrTok, &Invalid);
+ if (!Invalid)
+ Callbacks->Ident(Tok.getLocation(), Str);
+ }
+}
+
+/// \brief Handle a #public directive.
+void Preprocessor::HandleMacroPublicDirective(Token &Tok) {
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, 2);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ // Check to see if this is the last token on the #__public_macro line.
+ CheckEndOfDirective("__public_macro");
+
+ // Okay, we finally have a valid identifier to undef.
+ MacroInfo *MI = getMacroInfo(MacroNameTok.getIdentifierInfo());
+
+ // If the macro is not defined, this is an error.
+ if (MI == 0) {
+ Diag(MacroNameTok, diag::err_pp_visibility_non_macro)
+ << MacroNameTok.getIdentifierInfo();
+ return;
+ }
+
+ // Note that this macro has now been exported.
+ MI->setVisibility(/*IsPublic=*/true, MacroNameTok.getLocation());
+
+ // If this macro definition came from a PCH file, mark it
+ // as having changed since serialization.
+ if (MI->isFromAST())
+ MI->setChangedAfterLoad();
+}
+
+/// \brief Handle a #private directive.
+void Preprocessor::HandleMacroPrivateDirective(Token &Tok) {
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, 2);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ // Check to see if this is the last token on the #__private_macro line.
+ CheckEndOfDirective("__private_macro");
+
+ // Okay, we finally have a valid identifier to undef.
+ MacroInfo *MI = getMacroInfo(MacroNameTok.getIdentifierInfo());
+
+ // If the macro is not defined, this is an error.
+ if (MI == 0) {
+ Diag(MacroNameTok, diag::err_pp_visibility_non_macro)
+ << MacroNameTok.getIdentifierInfo();
+ return;
+ }
+
+ // Note that this macro has now been marked private.
+ MI->setVisibility(/*IsPublic=*/false, MacroNameTok.getLocation());
+
+ // If this macro definition came from a PCH file, mark it
+ // as having changed since serialization.
+ if (MI->isFromAST())
+ MI->setChangedAfterLoad();
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Include Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// GetIncludeFilenameSpelling - Turn the specified lexer token into a fully
+/// checked and spelled filename, e.g. as an operand of #include. This returns
+/// true if the input filename was in <>'s or false if it were in ""'s. The
+/// caller is expected to provide a buffer that is large enough to hold the
+/// spelling of the filename, but is also expected to handle the case when
+/// this method decides to use a different buffer.
+bool Preprocessor::GetIncludeFilenameSpelling(SourceLocation Loc,
+ StringRef &Buffer) {
+ // Get the text form of the filename.
+ assert(!Buffer.empty() && "Can't have tokens with empty spellings!");
+
+ // Make sure the filename is <x> or "x".
+ bool isAngled;
+ if (Buffer[0] == '<') {
+ if (Buffer.back() != '>') {
+ Diag(Loc, diag::err_pp_expects_filename);
+ Buffer = StringRef();
+ return true;
+ }
+ isAngled = true;
+ } else if (Buffer[0] == '"') {
+ if (Buffer.back() != '"') {
+ Diag(Loc, diag::err_pp_expects_filename);
+ Buffer = StringRef();
+ return true;
+ }
+ isAngled = false;
+ } else {
+ Diag(Loc, diag::err_pp_expects_filename);
+ Buffer = StringRef();
+ return true;
+ }
+
+ // Diagnose #include "" as invalid.
+ if (Buffer.size() <= 2) {
+ Diag(Loc, diag::err_pp_empty_filename);
+ Buffer = StringRef();
+ return true;
+ }
+
+ // Skip the brackets.
+ Buffer = Buffer.substr(1, Buffer.size()-2);
+ return isAngled;
+}
+
+/// ConcatenateIncludeName - Handle cases where the #include name is expanded
+/// from a macro as multiple tokens, which need to be glued together. This
+/// occurs for code like:
+/// #define FOO <a/b.h>
+/// #include FOO
+/// because in this case, "<a/b.h>" is returned as 7 tokens, not one.
+///
+/// This code concatenates and consumes tokens up to the '>' token. It returns
+/// false if the > was found, otherwise it returns true if it finds and consumes
+/// the EOD marker.
+bool Preprocessor::ConcatenateIncludeName(
+ SmallString<128> &FilenameBuffer,
+ SourceLocation &End) {
+ Token CurTok;
+
+ Lex(CurTok);
+ while (CurTok.isNot(tok::eod)) {
+ End = CurTok.getLocation();
+
+ // FIXME: Provide code completion for #includes.
+ if (CurTok.is(tok::code_completion)) {
+ setCodeCompletionReached();
+ Lex(CurTok);
+ continue;
+ }
+
+ // Append the spelling of this token to the buffer. If there was a space
+ // before it, add it now.
+ if (CurTok.hasLeadingSpace())
+ FilenameBuffer.push_back(' ');
+
+ // Get the spelling of the token, directly into FilenameBuffer if possible.
+ unsigned PreAppendSize = FilenameBuffer.size();
+ FilenameBuffer.resize(PreAppendSize+CurTok.getLength());
+
+ const char *BufPtr = &FilenameBuffer[PreAppendSize];
+ unsigned ActualLen = getSpelling(CurTok, BufPtr);
+
+ // If the token was spelled somewhere else, copy it into FilenameBuffer.
+ if (BufPtr != &FilenameBuffer[PreAppendSize])
+ memcpy(&FilenameBuffer[PreAppendSize], BufPtr, ActualLen);
+
+ // Resize FilenameBuffer to the correct size.
+ if (CurTok.getLength() != ActualLen)
+ FilenameBuffer.resize(PreAppendSize+ActualLen);
+
+ // If we found the '>' marker, return success.
+ if (CurTok.is(tok::greater))
+ return false;
+
+ Lex(CurTok);
+ }
+
+ // If we hit the eod marker, emit an error and return true so that the caller
+ // knows the EOD has been read.
+ Diag(CurTok.getLocation(), diag::err_pp_expects_filename);
+ return true;
+}
+
+/// HandleIncludeDirective - The "#include" tokens have just been read, read the
+/// file to be included from the lexer, then include it! This is a common
+/// routine with functionality shared between #include, #include_next and
+/// #import. LookupFrom is set when this is a #include_next directive, it
+/// specifies the file to start searching from.
+void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
+ Token &IncludeTok,
+ const DirectoryLookup *LookupFrom,
+ bool isImport) {
+
+ Token FilenameTok;
+ CurPPLexer->LexIncludeFilename(FilenameTok);
+
+ // Reserve a buffer to get the spelling.
+ SmallString<128> FilenameBuffer;
+ StringRef Filename;
+ SourceLocation End;
+ SourceLocation CharEnd; // the end of this directive, in characters
+
+ switch (FilenameTok.getKind()) {
+ case tok::eod:
+ // If the token kind is EOD, the error has already been diagnosed.
+ return;
+
+ case tok::angle_string_literal:
+ case tok::string_literal:
+ Filename = getSpelling(FilenameTok, FilenameBuffer);
+ End = FilenameTok.getLocation();
+ CharEnd = End.getLocWithOffset(Filename.size());
+ break;
+
+ case tok::less:
+ // This could be a <foo/bar.h> file coming from a macro expansion. In this
+ // case, glue the tokens together into FilenameBuffer and interpret those.
+ FilenameBuffer.push_back('<');
+ if (ConcatenateIncludeName(FilenameBuffer, End))
+ return; // Found <eod> but no ">"? Diagnostic already emitted.
+ Filename = FilenameBuffer.str();
+ CharEnd = getLocForEndOfToken(End);
+ break;
+ default:
+ Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ StringRef OriginalFilename = Filename;
+ bool isAngled =
+ GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ if (Filename.empty()) {
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // Verify that there is nothing after the filename, other than EOD. Note that
+ // we allow macros that expand to nothing after the filename, because this
+ // falls into the category of "#include pp-tokens new-line" specified in
+ // C99 6.10.2p4.
+ CheckEndOfDirective(IncludeTok.getIdentifierInfo()->getNameStart(), true);
+
+ // Check that we don't have infinite #include recursion.
+ if (IncludeMacroStack.size() == MaxAllowedIncludeStackDepth-1) {
+ Diag(FilenameTok, diag::err_pp_include_too_deep);
+ return;
+ }
+
+ // Complain about attempts to #include files in an audit pragma.
+ if (PragmaARCCFCodeAuditedLoc.isValid()) {
+ Diag(HashLoc, diag::err_pp_include_in_arc_cf_code_audited);
+ Diag(PragmaARCCFCodeAuditedLoc, diag::note_pragma_entered_here);
+
+ // Immediately leave the pragma.
+ PragmaARCCFCodeAuditedLoc = SourceLocation();
+ }
+
+ if (HeaderInfo.HasIncludeAliasMap()) {
+ // Map the filename with the brackets still attached. If the name doesn't
+ // map to anything, fall back on the filename we've already gotten the
+ // spelling for.
+ StringRef NewName = HeaderInfo.MapHeaderToIncludeAlias(OriginalFilename);
+ if (!NewName.empty())
+ Filename = NewName;
+ }
+
+ // Search include directories.
+ const DirectoryLookup *CurDir;
+ SmallString<1024> SearchPath;
+ SmallString<1024> RelativePath;
+ // We get the raw path only if we have 'Callbacks' to which we later pass
+ // the path.
+ Module *SuggestedModule = 0;
+ const FileEntry *File = LookupFile(
+ Filename, isAngled, LookupFrom, CurDir,
+ Callbacks ? &SearchPath : NULL, Callbacks ? &RelativePath : NULL,
+ getLangOpts().Modules? &SuggestedModule : 0);
+
+ if (Callbacks) {
+ if (!File) {
+ // Give the clients a chance to recover.
+ SmallString<128> RecoveryPath;
+ if (Callbacks->FileNotFound(Filename, RecoveryPath)) {
+ if (const DirectoryEntry *DE = FileMgr.getDirectory(RecoveryPath)) {
+ // Add the recovery path to the list of search paths.
+ DirectoryLookup DL(DE, SrcMgr::C_User, true, false);
+ HeaderInfo.AddSearchPath(DL, isAngled);
+
+ // Try the lookup again, skipping the cache.
+ File = LookupFile(Filename, isAngled, LookupFrom, CurDir, 0, 0,
+ getLangOpts().Modules? &SuggestedModule : 0,
+ /*SkipCache*/true);
+ }
+ }
+ }
+
+ // Notify the callback object that we've seen an inclusion directive.
+ Callbacks->InclusionDirective(HashLoc, IncludeTok, Filename, isAngled, File,
+ End, SearchPath, RelativePath);
+ }
+
+ if (File == 0) {
+ if (!SuppressIncludeNotFoundError)
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
+ return;
+ }
+
+ // If we are supposed to import a module rather than including the header,
+ // do so now.
+ if (SuggestedModule) {
+ // Compute the module access path corresponding to this module.
+ // FIXME: Should we have a second loadModule() overload to avoid this
+ // extra lookup step?
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+ for (Module *Mod = SuggestedModule; Mod; Mod = Mod->Parent)
+ Path.push_back(std::make_pair(getIdentifierInfo(Mod->Name),
+ FilenameTok.getLocation()));
+ std::reverse(Path.begin(), Path.end());
+
+ // Warn that we're replacing the include/import with a module import.
+ SmallString<128> PathString;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ if (I)
+ PathString += '.';
+ PathString += Path[I].first->getName();
+ }
+ int IncludeKind = 0;
+
+ switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ IncludeKind = 0;
+ break;
+
+ case tok::pp_import:
+ IncludeKind = 1;
+ break;
+
+ case tok::pp_include_next:
+ IncludeKind = 2;
+ break;
+
+ case tok::pp___include_macros:
+ IncludeKind = 3;
+ break;
+
+ default:
+ llvm_unreachable("unknown include directive kind");
+ }
+
+ // Determine whether we are actually building the module that this
+ // include directive maps to.
+ bool BuildingImportedModule
+ = Path[0].first->getName() == getLangOpts().CurrentModule;
+
+ if (!BuildingImportedModule && getLangOpts().ObjC2) {
+ // If we're not building the imported module, warn that we're going
+ // to automatically turn this inclusion directive into a module import.
+ // We only do this in Objective-C, where we have a module-import syntax.
+ CharSourceRange ReplaceRange(SourceRange(HashLoc, CharEnd),
+ /*IsTokenRange=*/false);
+ Diag(HashLoc, diag::warn_auto_module_import)
+ << IncludeKind << PathString
+ << FixItHint::CreateReplacement(ReplaceRange,
+ "@__experimental_modules_import " + PathString.str().str() + ";");
+ }
+
+ // Load the module.
+ // If this was an #__include_macros directive, only make macros visible.
+ Module::NameVisibilityKind Visibility
+ = (IncludeKind == 3)? Module::MacrosVisible : Module::AllVisible;
+ Module *Imported
+ = TheModuleLoader.loadModule(IncludeTok.getLocation(), Path, Visibility,
+ /*IsIncludeDirective=*/true);
+
+ // If this header isn't part of the module we're building, we're done.
+ if (!BuildingImportedModule && Imported)
+ return;
+ }
+
+ // The #included file will be considered to be a system header if either it is
+ // in a system include directory, or if the #includer is a system include
+ // header.
+ SrcMgr::CharacteristicKind FileCharacter =
+ std::max(HeaderInfo.getFileDirFlavor(File),
+ SourceMgr.getFileCharacteristic(FilenameTok.getLocation()));
+
+ // Ask HeaderInfo if we should enter this #include file. If not, #including
+ // this file will have no effect.
+ if (!HeaderInfo.ShouldEnterIncludeFile(File, isImport)) {
+ if (Callbacks)
+ Callbacks->FileSkipped(*File, FilenameTok, FileCharacter);
+ return;
+ }
+
+ // Look up the file, create a File ID for it.
+ SourceLocation IncludePos = End;
+ // If the filename string was the result of macro expansions, set the include
+ // position on the file where it will be included and after the expansions.
+ if (IncludePos.isMacroID())
+ IncludePos = SourceMgr.getExpansionRange(IncludePos).second;
+ FileID FID = SourceMgr.createFileID(File, IncludePos, FileCharacter);
+ assert(!FID.isInvalid() && "Expected valid file ID");
+
+ // Finally, if all is good, enter the new file!
+ EnterSourceFile(FID, CurDir, FilenameTok.getLocation());
+}
+
+/// HandleIncludeNextDirective - Implements #include_next.
+///
+void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
+ Token &IncludeNextTok) {
+ Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
+
+ // #include_next is like #include, except that we start searching after
+ // the current found directory. If we can't do this, issue a
+ // diagnostic.
+ const DirectoryLookup *Lookup = CurDirLookup;
+ if (isInPrimaryFile()) {
+ Lookup = 0;
+ Diag(IncludeNextTok, diag::pp_include_next_in_primary);
+ } else if (Lookup == 0) {
+ Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
+ } else {
+ // Start looking up in the next directory.
+ ++Lookup;
+ }
+
+ return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup);
+}
+
+/// HandleMicrosoftImportDirective - Implements #import for Microsoft Mode
+void Preprocessor::HandleMicrosoftImportDirective(Token &Tok) {
+ // The Microsoft #import directive takes a type library and generates header
+ // files from it, and includes those. This is beyond the scope of what clang
+ // does, so we ignore it and error out. However, #import can optionally have
+ // trailing attributes that span multiple lines. We're going to eat those
+ // so we can continue processing from there.
+ Diag(Tok, diag::err_pp_import_directive_ms );
+
+ // Read tokens until we get to the end of the directive. Note that the
+ // directive can be split over multiple lines using the backslash character.
+ DiscardUntilEndOfDirective();
+}
+
+/// HandleImportDirective - Implements #import.
+///
+void Preprocessor::HandleImportDirective(SourceLocation HashLoc,
+ Token &ImportTok) {
+ if (!LangOpts.ObjC1) { // #import is standard for ObjC.
+ if (LangOpts.MicrosoftMode)
+ return HandleMicrosoftImportDirective(ImportTok);
+ Diag(ImportTok, diag::ext_pp_import_directive);
+ }
+ return HandleIncludeDirective(HashLoc, ImportTok, 0, true);
+}
+
+/// HandleIncludeMacrosDirective - The -imacros command line option turns into a
+/// pseudo directive in the predefines buffer. This handles it by sucking all
+/// tokens through the preprocessor and discarding them (only keeping the side
+/// effects on the preprocessor).
+void Preprocessor::HandleIncludeMacrosDirective(SourceLocation HashLoc,
+ Token &IncludeMacrosTok) {
+ // This directive should only occur in the predefines buffer. If not, emit an
+ // error and reject it.
+ SourceLocation Loc = IncludeMacrosTok.getLocation();
+ if (strcmp(SourceMgr.getBufferName(Loc), "<built-in>") != 0) {
+ Diag(IncludeMacrosTok.getLocation(),
+ diag::pp_include_macros_out_of_predefines);
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // Treat this as a normal #include for checking purposes. If this is
+ // successful, it will push a new lexer onto the include stack.
+ HandleIncludeDirective(HashLoc, IncludeMacrosTok, 0, false);
+
+ Token TmpTok;
+ do {
+ Lex(TmpTok);
+ assert(TmpTok.isNot(tok::eof) && "Didn't find end of -imacros!");
+ } while (TmpTok.isNot(tok::hashhash));
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Macro Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// ReadMacroDefinitionArgList - The ( starting an argument list of a macro
+/// definition has just been read. Lex the rest of the arguments and the
+/// closing ), updating MI with what we learn. Return true if an error occurs
+/// parsing the arg list.
+bool Preprocessor::ReadMacroDefinitionArgList(MacroInfo *MI, Token &Tok) {
+ SmallVector<IdentifierInfo*, 32> Arguments;
+
+ while (1) {
+ LexUnexpandedToken(Tok);
+ switch (Tok.getKind()) {
+ case tok::r_paren:
+ // Found the end of the argument list.
+ if (Arguments.empty()) // #define FOO()
+ return false;
+ // Otherwise we have #define FOO(A,)
+ Diag(Tok, diag::err_pp_expected_ident_in_arg_list);
+ return true;
+ case tok::ellipsis: // #define X(... -> C99 varargs
+ if (!LangOpts.C99)
+ Diag(Tok, LangOpts.CPlusPlus0x ?
+ diag::warn_cxx98_compat_variadic_macro :
+ diag::ext_variadic_macro);
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ }
+ // Add the __VA_ARGS__ identifier as an argument.
+ Arguments.push_back(Ident__VA_ARGS__);
+ MI->setIsC99Varargs();
+ MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ return false;
+ case tok::eod: // #define X(
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ default:
+ // Handle keywords and identifiers here to accept things like
+ // #define Foo(for) for.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II == 0) {
+ // #define X(1
+ Diag(Tok, diag::err_pp_invalid_tok_in_arg_list);
+ return true;
+ }
+
+ // If this is already used as an argument, it is used multiple times (e.g.
+ // #define X(A,A.
+ if (std::find(Arguments.begin(), Arguments.end(), II) !=
+ Arguments.end()) { // C99 6.10.3p6
+ Diag(Tok, diag::err_pp_duplicate_name_in_arg_list) << II;
+ return true;
+ }
+
+ // Add the argument to the macro info.
+ Arguments.push_back(II);
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+
+ switch (Tok.getKind()) {
+ default: // #define X(A B
+ Diag(Tok, diag::err_pp_expected_comma_in_arg_list);
+ return true;
+ case tok::r_paren: // #define X(A)
+ MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ return false;
+ case tok::comma: // #define X(A,
+ break;
+ case tok::ellipsis: // #define X(A... -> GCC extension
+ // Diagnose extension.
+ Diag(Tok, diag::ext_named_variadic_macro);
+
+ // Lex the token after the identifier.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_pp_missing_rparen_in_macro_def);
+ return true;
+ }
+
+ MI->setIsGNUVarargs();
+ MI->setArgumentList(&Arguments[0], Arguments.size(), BP);
+ return false;
+ }
+ }
+ }
+}
+
+/// HandleDefineDirective - Implements #define. This consumes the entire macro
+/// line then lets the caller lex the next real token.
+void Preprocessor::HandleDefineDirective(Token &DefineTok) {
+ ++NumDefined;
+
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, 1);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ Token LastTok = MacroNameTok;
+
+ // If we are supposed to keep comments in #defines, reenable comment saving
+ // mode.
+ if (CurLexer) CurLexer->SetCommentRetentionState(KeepMacroComments);
+
+ // Create the new macro.
+ MacroInfo *MI = AllocateMacroInfo(MacroNameTok.getLocation());
+
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ // If this is a function-like macro definition, parse the argument list,
+ // marking each of the identifiers as being used as macro arguments. Also,
+ // check other constraints on the first token of the macro body.
+ if (Tok.is(tok::eod)) {
+ // If there is no body to this macro, we have no special handling here.
+ } else if (Tok.hasLeadingSpace()) {
+ // This is a normal token with leading space. Clear the leading space
+ // marker on the first token to get proper expansion.
+ Tok.clearFlag(Token::LeadingSpace);
+ } else if (Tok.is(tok::l_paren)) {
+ // This is a function-like macro definition. Read the argument list.
+ MI->setIsFunctionLike();
+ if (ReadMacroDefinitionArgList(MI, LastTok)) {
+ // Forget about MI.
+ ReleaseMacroInfo(MI);
+ // Throw away the rest of the line.
+ if (CurPPLexer->ParsingPreprocessorDirective)
+ DiscardUntilEndOfDirective();
+ return;
+ }
+
+ // If this is a definition of a variadic C99 function-like macro, not using
+ // the GNU named varargs extension, enabled __VA_ARGS__.
+
+ // "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
+ // This gets unpoisoned where it is allowed.
+ assert(Ident__VA_ARGS__->isPoisoned() && "__VA_ARGS__ should be poisoned!");
+ if (MI->isC99Varargs())
+ Ident__VA_ARGS__->setIsPoisoned(false);
+
+ // Read the first token after the arg list for down below.
+ LexUnexpandedToken(Tok);
+ } else if (LangOpts.C99 || LangOpts.CPlusPlus0x) {
+ // C99 requires whitespace between the macro definition and the body. Emit
+ // a diagnostic for something like "#define X+".
+ Diag(Tok, diag::ext_c99_whitespace_required_after_macro_name);
+ } else {
+ // C90 6.8 TC1 says: "In the definition of an object-like macro, if the
+ // first character of a replacement list is not a character required by
+ // subclause 5.2.1, then there shall be white-space separation between the
+ // identifier and the replacement list.". 5.2.1 lists this set:
+ // "A-Za-z0-9!"#%&'()*+,_./:;<=>?[\]^_{|}~" as well as whitespace, which
+ // is irrelevant here.
+ bool isInvalid = false;
+ if (Tok.is(tok::at)) // @ is not in the list above.
+ isInvalid = true;
+ else if (Tok.is(tok::unknown)) {
+ // If we have an unknown token, it is something strange like "`". Since
+ // all of valid characters would have lexed into a single character
+ // token of some sort, we know this is not a valid case.
+ isInvalid = true;
+ }
+ if (isInvalid)
+ Diag(Tok, diag::ext_missing_whitespace_after_macro_name);
+ else
+ Diag(Tok, diag::warn_missing_whitespace_after_macro_name);
+ }
+
+ if (!Tok.is(tok::eod))
+ LastTok = Tok;
+
+ // Read the rest of the macro body.
+ if (MI->isObjectLike()) {
+ // Object-like macros are very simple, just read their body.
+ while (Tok.isNot(tok::eod)) {
+ LastTok = Tok;
+ MI->AddTokenToBody(Tok);
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ }
+
+ } else {
+ // Otherwise, read the body of a function-like macro. While we are at it,
+ // check C99 6.10.3.2p1: ensure that # operators are followed by macro
+ // parameters in function-like macro expansions.
+ while (Tok.isNot(tok::eod)) {
+ LastTok = Tok;
+
+ if (Tok.isNot(tok::hash)) {
+ MI->AddTokenToBody(Tok);
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ continue;
+ }
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+
+ // Check for a valid macro arg identifier.
+ if (Tok.getIdentifierInfo() == 0 ||
+ MI->getArgumentNum(Tok.getIdentifierInfo()) == -1) {
+
+ // If this is assembler-with-cpp mode, we accept random gibberish after
+ // the '#' because '#' is often a comment character. However, change
+ // the kind of the token to tok::unknown so that the preprocessor isn't
+ // confused.
+ if (getLangOpts().AsmPreprocessor && Tok.isNot(tok::eod)) {
+ LastTok.setKind(tok::unknown);
+ } else {
+ Diag(Tok, diag::err_pp_stringize_not_parameter);
+ ReleaseMacroInfo(MI);
+
+ // Disable __VA_ARGS__ again.
+ Ident__VA_ARGS__->setIsPoisoned(true);
+ return;
+ }
+ }
+
+ // Things look ok, add the '#' and param name tokens to the macro.
+ MI->AddTokenToBody(LastTok);
+ MI->AddTokenToBody(Tok);
+ LastTok = Tok;
+
+ // Get the next token of the macro.
+ LexUnexpandedToken(Tok);
+ }
+ }
+
+
+ // Disable __VA_ARGS__ again.
+ Ident__VA_ARGS__->setIsPoisoned(true);
+
+ // Check that there is no paste (##) operator at the beginning or end of the
+ // replacement list.
+ unsigned NumTokens = MI->getNumTokens();
+ if (NumTokens != 0) {
+ if (MI->getReplacementToken(0).is(tok::hashhash)) {
+ Diag(MI->getReplacementToken(0), diag::err_paste_at_start);
+ ReleaseMacroInfo(MI);
+ return;
+ }
+ if (MI->getReplacementToken(NumTokens-1).is(tok::hashhash)) {
+ Diag(MI->getReplacementToken(NumTokens-1), diag::err_paste_at_end);
+ ReleaseMacroInfo(MI);
+ return;
+ }
+ }
+
+ MI->setDefinitionEndLoc(LastTok.getLocation());
+
+ // Finally, if this identifier already had a macro defined for it, verify that
+ // the macro bodies are identical and free the old definition.
+ if (MacroInfo *OtherMI = getMacroInfo(MacroNameTok.getIdentifierInfo())) {
+ // It is very common for system headers to have tons of macro redefinitions
+ // and for warnings to be disabled in system headers. If this is the case,
+ // then don't bother calling MacroInfo::isIdenticalTo.
+ if (!getDiagnostics().getSuppressSystemWarnings() ||
+ !SourceMgr.isInSystemHeader(DefineTok.getLocation())) {
+ if (!OtherMI->isUsed() && OtherMI->isWarnIfUnused())
+ Diag(OtherMI->getDefinitionLoc(), diag::pp_macro_not_used);
+
+ // Macros must be identical. This means all tokens and whitespace
+ // separation must be the same. C99 6.10.3.2.
+ if (!OtherMI->isAllowRedefinitionsWithoutWarning() &&
+ !MI->isIdenticalTo(*OtherMI, *this)) {
+ Diag(MI->getDefinitionLoc(), diag::ext_pp_macro_redef)
+ << MacroNameTok.getIdentifierInfo();
+ Diag(OtherMI->getDefinitionLoc(), diag::note_previous_definition);
+ }
+ }
+ if (OtherMI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(OtherMI->getDefinitionLoc());
+ ReleaseMacroInfo(OtherMI);
+ }
+
+ setMacroInfo(MacroNameTok.getIdentifierInfo(), MI);
+
+ assert(!MI->isUsed());
+ // If we need warning for not using the macro, add its location in the
+ // warn-because-unused-macro set. If it gets used it will be removed from set.
+ if (isInPrimaryFile() && // don't warn for include'd macros.
+ Diags->getDiagnosticLevel(diag::pp_macro_not_used,
+ MI->getDefinitionLoc()) != DiagnosticsEngine::Ignored) {
+ MI->setIsWarnIfUnused(true);
+ WarnUnusedMacroLocs.insert(MI->getDefinitionLoc());
+ }
+
+ // If the callbacks want to know, tell them about the macro definition.
+ if (Callbacks)
+ Callbacks->MacroDefined(MacroNameTok, MI);
+}
+
+/// HandleUndefDirective - Implements #undef.
+///
+void Preprocessor::HandleUndefDirective(Token &UndefTok) {
+ ++NumUndefined;
+
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok, 2);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod))
+ return;
+
+ // Check to see if this is the last token on the #undef line.
+ CheckEndOfDirective("undef");
+
+ // Okay, we finally have a valid identifier to undef.
+ MacroInfo *MI = getMacroInfo(MacroNameTok.getIdentifierInfo());
+
+ // If the macro is not defined, this is a noop undef, just return.
+ if (MI == 0) return;
+
+ if (!MI->isUsed() && MI->isWarnIfUnused())
+ Diag(MI->getDefinitionLoc(), diag::pp_macro_not_used);
+
+ // If the callbacks want to know, tell them about the macro #undef.
+ if (Callbacks)
+ Callbacks->MacroUndefined(MacroNameTok, MI);
+
+ if (MI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
+
+ // Free macro definition.
+ ReleaseMacroInfo(MI);
+ setMacroInfo(MacroNameTok.getIdentifierInfo(), 0);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Conditional Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// HandleIfdefDirective - Implements the #ifdef/#ifndef directive. isIfndef is
+/// true when this is a #ifndef directive. ReadAnyTokensBeforeDirective is true
+/// if any tokens have been returned or pp-directives activated before this
+/// #ifndef has been lexed.
+///
+void Preprocessor::HandleIfdefDirective(Token &Result, bool isIfndef,
+ bool ReadAnyTokensBeforeDirective) {
+ ++NumIf;
+ Token DirectiveTok = Result;
+
+ Token MacroNameTok;
+ ReadMacroName(MacroNameTok);
+
+ // Error reading macro name? If so, diagnostic already issued.
+ if (MacroNameTok.is(tok::eod)) {
+ // Skip code until we get to #endif. This helps with recovery by not
+ // emitting an error when the #endif is reached.
+ SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
+ /*Foundnonskip*/false, /*FoundElse*/false);
+ return;
+ }
+
+ // Check to see if this is the last token on the #if[n]def line.
+ CheckEndOfDirective(isIfndef ? "ifndef" : "ifdef");
+
+ IdentifierInfo *MII = MacroNameTok.getIdentifierInfo();
+ MacroInfo *MI = getMacroInfo(MII);
+
+ if (CurPPLexer->getConditionalStackDepth() == 0) {
+ // If the start of a top-level #ifdef and if the macro is not defined,
+ // inform MIOpt that this might be the start of a proper include guard.
+ // Otherwise it is some other form of unknown conditional which we can't
+ // handle.
+ if (!ReadAnyTokensBeforeDirective && MI == 0) {
+ assert(isIfndef && "#ifdef shouldn't reach here");
+ CurPPLexer->MIOpt.EnterTopLevelIFNDEF(MII);
+ } else
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+ }
+
+ // If there is a macro, process it.
+ if (MI) // Mark it used.
+ markMacroAsUsed(MI);
+
+ if (Callbacks) {
+ if (isIfndef)
+ Callbacks->Ifndef(DirectiveTok.getLocation(), MacroNameTok);
+ else
+ Callbacks->Ifdef(DirectiveTok.getLocation(), MacroNameTok);
+ }
+
+ // Should we include the stuff contained by this directive?
+ if (!MI == isIfndef) {
+ // Yes, remember that we are inside a conditional, then lex the next token.
+ CurPPLexer->pushConditionalLevel(DirectiveTok.getLocation(),
+ /*wasskip*/false, /*foundnonskip*/true,
+ /*foundelse*/false);
+ } else {
+ // No, skip the contents of this block.
+ SkipExcludedConditionalBlock(DirectiveTok.getLocation(),
+ /*Foundnonskip*/false,
+ /*FoundElse*/false);
+ }
+}
+
+/// HandleIfDirective - Implements the #if directive.
+///
+void Preprocessor::HandleIfDirective(Token &IfToken,
+ bool ReadAnyTokensBeforeDirective) {
+ ++NumIf;
+
+ // Parse and evaluate the conditional expression.
+ IdentifierInfo *IfNDefMacro = 0;
+ const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
+ const bool ConditionalTrue = EvaluateDirectiveExpression(IfNDefMacro);
+ const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
+
+ // If this condition is equivalent to #ifndef X, and if this is the first
+ // directive seen, handle it for the multiple-include optimization.
+ if (CurPPLexer->getConditionalStackDepth() == 0) {
+ if (!ReadAnyTokensBeforeDirective && IfNDefMacro && ConditionalTrue)
+ CurPPLexer->MIOpt.EnterTopLevelIFNDEF(IfNDefMacro);
+ else
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+ }
+
+ if (Callbacks)
+ Callbacks->If(IfToken.getLocation(),
+ SourceRange(ConditionalBegin, ConditionalEnd));
+
+ // Should we include the stuff contained by this directive?
+ if (ConditionalTrue) {
+ // Yes, remember that we are inside a conditional, then lex the next token.
+ CurPPLexer->pushConditionalLevel(IfToken.getLocation(), /*wasskip*/false,
+ /*foundnonskip*/true, /*foundelse*/false);
+ } else {
+ // No, skip the contents of this block.
+ SkipExcludedConditionalBlock(IfToken.getLocation(), /*Foundnonskip*/false,
+ /*FoundElse*/false);
+ }
+}
+
+/// HandleEndifDirective - Implements the #endif directive.
+///
+void Preprocessor::HandleEndifDirective(Token &EndifToken) {
+ ++NumEndif;
+
+ // Check that this is the whole directive.
+ CheckEndOfDirective("endif");
+
+ PPConditionalInfo CondInfo;
+ if (CurPPLexer->popConditionalLevel(CondInfo)) {
+ // No conditionals on the stack: this is an #endif without an #if.
+ Diag(EndifToken, diag::err_pp_endif_without_if);
+ return;
+ }
+
+ // If this the end of a top-level #endif, inform MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.ExitTopLevelConditional();
+
+ assert(!CondInfo.WasSkipping && !CurPPLexer->LexingRawMode &&
+ "This code should only be reachable in the non-skipping case!");
+
+ if (Callbacks)
+ Callbacks->Endif(EndifToken.getLocation(), CondInfo.IfLoc);
+}
+
+/// HandleElseDirective - Implements the #else directive.
+///
+void Preprocessor::HandleElseDirective(Token &Result) {
+ ++NumElse;
+
+ // #else directive in a non-skipping conditional... start skipping.
+ CheckEndOfDirective("else");
+
+ PPConditionalInfo CI;
+ if (CurPPLexer->popConditionalLevel(CI)) {
+ Diag(Result, diag::pp_err_else_without_if);
+ return;
+ }
+
+ // If this is a top-level #else, inform the MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+
+ // If this is a #else with a #else before it, report the error.
+ if (CI.FoundElse) Diag(Result, diag::pp_err_else_after_else);
+
+ if (Callbacks)
+ Callbacks->Else(Result.getLocation(), CI.IfLoc);
+
+ // Finally, skip the rest of the contents of this block.
+ SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
+ /*FoundElse*/true, Result.getLocation());
+}
+
+/// HandleElifDirective - Implements the #elif directive.
+///
+void Preprocessor::HandleElifDirective(Token &ElifToken) {
+ ++NumElse;
+
+ // #elif directive in a non-skipping conditional... start skipping.
+ // We don't care what the condition is, because we will always skip it (since
+ // the block immediately before it was included).
+ const SourceLocation ConditionalBegin = CurPPLexer->getSourceLocation();
+ DiscardUntilEndOfDirective();
+ const SourceLocation ConditionalEnd = CurPPLexer->getSourceLocation();
+
+ PPConditionalInfo CI;
+ if (CurPPLexer->popConditionalLevel(CI)) {
+ Diag(ElifToken, diag::pp_err_elif_without_if);
+ return;
+ }
+
+ // If this is a top-level #elif, inform the MIOpt.
+ if (CurPPLexer->getConditionalStackDepth() == 0)
+ CurPPLexer->MIOpt.EnterTopLevelConditional();
+
+ // If this is a #elif with a #else before it, report the error.
+ if (CI.FoundElse) Diag(ElifToken, diag::pp_err_elif_after_else);
+
+ if (Callbacks)
+ Callbacks->Elif(ElifToken.getLocation(),
+ SourceRange(ConditionalBegin, ConditionalEnd), CI.IfLoc);
+
+ // Finally, skip the rest of the contents of this block.
+ SkipExcludedConditionalBlock(CI.IfLoc, /*Foundnonskip*/true,
+ /*FoundElse*/CI.FoundElse,
+ ElifToken.getLocation());
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
new file mode 100644
index 0000000..7cac63e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPExpressions.cpp
@@ -0,0 +1,786 @@
+//===--- PPExpressions.cpp - Preprocessor Expression Evaluation -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Preprocessor::EvaluateDirectiveExpression method,
+// which parses and evaluates integer constant expressions for #if directives.
+//
+//===----------------------------------------------------------------------===//
+//
+// FIXME: implement testing for #assert's.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "llvm/ADT/APSInt.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+namespace {
+
+/// PPValue - Represents the value of a subexpression of a preprocessor
+/// conditional and the source range covered by it.
+class PPValue {
+ SourceRange Range;
+public:
+ llvm::APSInt Val;
+
+ // Default ctor - Construct an 'invalid' PPValue.
+ PPValue(unsigned BitWidth) : Val(BitWidth) {}
+
+ unsigned getBitWidth() const { return Val.getBitWidth(); }
+ bool isUnsigned() const { return Val.isUnsigned(); }
+
+ const SourceRange &getRange() const { return Range; }
+
+ void setRange(SourceLocation L) { Range.setBegin(L); Range.setEnd(L); }
+ void setRange(SourceLocation B, SourceLocation E) {
+ Range.setBegin(B); Range.setEnd(E);
+ }
+ void setBegin(SourceLocation L) { Range.setBegin(L); }
+ void setEnd(SourceLocation L) { Range.setEnd(L); }
+};
+
+}
+
+static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
+ Token &PeekTok, bool ValueLive,
+ Preprocessor &PP);
+
+/// DefinedTracker - This struct is used while parsing expressions to keep track
+/// of whether !defined(X) has been seen.
+///
+/// With this simple scheme, we handle the basic forms:
+/// !defined(X) and !defined X
+/// but we also trivially handle (silly) stuff like:
+/// !!!defined(X) and +!defined(X) and !+!+!defined(X) and !(defined(X)).
+struct DefinedTracker {
+ /// Each time a Value is evaluated, it returns information about whether the
+ /// parsed value is of the form defined(X), !defined(X) or is something else.
+ enum TrackerState {
+ DefinedMacro, // defined(X)
+ NotDefinedMacro, // !defined(X)
+ Unknown // Something else.
+ } State;
+ /// TheMacro - When the state is DefinedMacro or NotDefinedMacro, this
+ /// indicates the macro that was checked.
+ IdentifierInfo *TheMacro;
+};
+
+/// EvaluateDefined - Process a 'defined(sym)' expression.
+static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
+ bool ValueLive, Preprocessor &PP) {
+ IdentifierInfo *II;
+ Result.setBegin(PeekTok.getLocation());
+
+ // Get the next token, don't expand it.
+ PP.LexUnexpandedNonComment(PeekTok);
+
+ // Two options, it can either be a pp-identifier or a (.
+ SourceLocation LParenLoc;
+ if (PeekTok.is(tok::l_paren)) {
+ // Found a paren, remember we saw it and skip it.
+ LParenLoc = PeekTok.getLocation();
+ PP.LexUnexpandedNonComment(PeekTok);
+ }
+
+ if (PeekTok.is(tok::code_completion)) {
+ if (PP.getCodeCompletionHandler())
+ PP.getCodeCompletionHandler()->CodeCompleteMacroName(false);
+ PP.setCodeCompletionReached();
+ PP.LexUnexpandedNonComment(PeekTok);
+ }
+
+ // If we don't have a pp-identifier now, this is an error.
+ if ((II = PeekTok.getIdentifierInfo()) == 0) {
+ PP.Diag(PeekTok, diag::err_pp_defined_requires_identifier);
+ return true;
+ }
+
+ // Otherwise, we got an identifier, is it defined to something?
+ Result.Val = II->hasMacroDefinition();
+ Result.Val.setIsUnsigned(false); // Result is signed intmax_t.
+
+ // If there is a macro, mark it used.
+ if (Result.Val != 0 && ValueLive) {
+ MacroInfo *Macro = PP.getMacroInfo(II);
+ PP.markMacroAsUsed(Macro);
+ }
+
+ // Invoke the 'defined' callback.
+ if (PPCallbacks *Callbacks = PP.getPPCallbacks())
+ Callbacks->Defined(PeekTok);
+
+ // If we are in parens, ensure we have a trailing ).
+ if (LParenLoc.isValid()) {
+ // Consume identifier.
+ Result.setEnd(PeekTok.getLocation());
+ PP.LexUnexpandedNonComment(PeekTok);
+
+ if (PeekTok.isNot(tok::r_paren)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_missing_rparen) << "defined";
+ PP.Diag(LParenLoc, diag::note_matching) << "(";
+ return true;
+ }
+ // Consume the ).
+ Result.setEnd(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ } else {
+ // Consume identifier.
+ Result.setEnd(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ }
+
+ // Success, remember that we saw defined(X).
+ DT.State = DefinedTracker::DefinedMacro;
+ DT.TheMacro = II;
+ return false;
+}
+
+/// EvaluateValue - Evaluate the token PeekTok (and any others needed) and
+/// return the computed value in Result. Return true if there was an error
+/// parsing. This function also returns information about the form of the
+/// expression in DT. See above for information on what DT means.
+///
+/// If ValueLive is false, then this value is being evaluated in a context where
+/// the result is not used. As such, avoid diagnostics that relate to
+/// evaluation.
+static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
+ bool ValueLive, Preprocessor &PP) {
+ DT.State = DefinedTracker::Unknown;
+
+ if (PeekTok.is(tok::code_completion)) {
+ if (PP.getCodeCompletionHandler())
+ PP.getCodeCompletionHandler()->CodeCompletePreprocessorExpression();
+ PP.setCodeCompletionReached();
+ PP.LexNonComment(PeekTok);
+ }
+
+ // If this token's spelling is a pp-identifier, check to see if it is
+ // 'defined' or if it is a macro. Note that we check here because many
+ // keywords are pp-identifiers, so we can't check the kind.
+ if (IdentifierInfo *II = PeekTok.getIdentifierInfo()) {
+ // Handle "defined X" and "defined(X)".
+ if (II->isStr("defined"))
+ return(EvaluateDefined(Result, PeekTok, DT, ValueLive, PP));
+
+ // If this identifier isn't 'defined' or one of the special
+ // preprocessor keywords and it wasn't macro expanded, it turns
+ // into a simple 0, unless it is the C++ keyword "true", in which case it
+ // turns into "1".
+ if (ValueLive)
+ PP.Diag(PeekTok, diag::warn_pp_undef_identifier) << II;
+ Result.Val = II->getTokenID() == tok::kw_true;
+ Result.Val.setIsUnsigned(false); // "0" is signed intmax_t 0.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+
+ switch (PeekTok.getKind()) {
+ default: // Non-value token.
+ PP.Diag(PeekTok, diag::err_pp_expr_bad_token_start_expr);
+ return true;
+ case tok::eod:
+ case tok::r_paren:
+ // If there is no expression, report and exit.
+ PP.Diag(PeekTok, diag::err_pp_expected_value_in_expr);
+ return true;
+ case tok::numeric_constant: {
+ SmallString<64> IntegerBuffer;
+ bool NumberInvalid = false;
+ StringRef Spelling = PP.getSpelling(PeekTok, IntegerBuffer,
+ &NumberInvalid);
+ if (NumberInvalid)
+ return true; // a diagnostic was already reported
+
+ NumericLiteralParser Literal(Spelling.begin(), Spelling.end(),
+ PeekTok.getLocation(), PP);
+ if (Literal.hadError)
+ return true; // a diagnostic was already reported.
+
+ if (Literal.isFloatingLiteral() || Literal.isImaginary) {
+ PP.Diag(PeekTok, diag::err_pp_illegal_floating_literal);
+ return true;
+ }
+ assert(Literal.isIntegerLiteral() && "Unknown ppnumber");
+
+ // Complain about, and drop, any ud-suffix.
+ if (Literal.hasUDSuffix())
+ PP.Diag(PeekTok, diag::err_pp_invalid_udl) << /*integer*/1;
+
+ // long long is a C99 feature.
+ if (!PP.getLangOpts().C99 && Literal.isLongLong)
+ PP.Diag(PeekTok, PP.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_longlong);
+
+ // Parse the integer literal into Result.
+ if (Literal.GetIntegerValue(Result.Val)) {
+ // Overflow parsing integer literal.
+ if (ValueLive) PP.Diag(PeekTok, diag::warn_integer_too_large);
+ Result.Val.setIsUnsigned(true);
+ } else {
+ // Set the signedness of the result to match whether there was a U suffix
+ // or not.
+ Result.Val.setIsUnsigned(Literal.isUnsigned);
+
+ // Detect overflow based on whether the value is signed. If signed
+ // and if the value is too large, emit a warning "integer constant is so
+ // large that it is unsigned" e.g. on 12345678901234567890 where intmax_t
+ // is 64-bits.
+ if (!Literal.isUnsigned && Result.Val.isNegative()) {
+ // Don't warn for a hex literal: 0x8000..0 shouldn't warn.
+ if (ValueLive && Literal.getRadix() != 16)
+ PP.Diag(PeekTok, diag::warn_integer_too_large_for_signed);
+ Result.Val.setIsUnsigned(true);
+ }
+ }
+
+ // Consume the token.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+ case tok::char_constant: // 'x'
+ case tok::wide_char_constant: { // L'x'
+ case tok::utf16_char_constant: // u'x'
+ case tok::utf32_char_constant: // U'x'
+ // Complain about, and drop, any ud-suffix.
+ if (PeekTok.hasUDSuffix())
+ PP.Diag(PeekTok, diag::err_pp_invalid_udl) << /*character*/0;
+
+ SmallString<32> CharBuffer;
+ bool CharInvalid = false;
+ StringRef ThisTok = PP.getSpelling(PeekTok, CharBuffer, &CharInvalid);
+ if (CharInvalid)
+ return true;
+
+ CharLiteralParser Literal(ThisTok.begin(), ThisTok.end(),
+ PeekTok.getLocation(), PP, PeekTok.getKind());
+ if (Literal.hadError())
+ return true; // A diagnostic was already emitted.
+
+ // Character literals are always int or wchar_t, expand to intmax_t.
+ const TargetInfo &TI = PP.getTargetInfo();
+ unsigned NumBits;
+ if (Literal.isMultiChar())
+ NumBits = TI.getIntWidth();
+ else if (Literal.isWide())
+ NumBits = TI.getWCharWidth();
+ else if (Literal.isUTF16())
+ NumBits = TI.getChar16Width();
+ else if (Literal.isUTF32())
+ NumBits = TI.getChar32Width();
+ else
+ NumBits = TI.getCharWidth();
+
+ // Set the width.
+ llvm::APSInt Val(NumBits);
+ // Set the value.
+ Val = Literal.getValue();
+ // Set the signedness. UTF-16 and UTF-32 are always unsigned
+ if (!Literal.isUTF16() && !Literal.isUTF32())
+ Val.setIsUnsigned(!PP.getLangOpts().CharIsSigned);
+
+ if (Result.Val.getBitWidth() > Val.getBitWidth()) {
+ Result.Val = Val.extend(Result.Val.getBitWidth());
+ } else {
+ assert(Result.Val.getBitWidth() == Val.getBitWidth() &&
+ "intmax_t smaller than char/wchar_t?");
+ Result.Val = Val;
+ }
+
+ // Consume the token.
+ Result.setRange(PeekTok.getLocation());
+ PP.LexNonComment(PeekTok);
+ return false;
+ }
+ case tok::l_paren: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok); // Eat the (.
+ // Parse the value and if there are any binary operators involved, parse
+ // them.
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+
+ // If this is a silly value like (X), which doesn't need parens, check for
+ // !(defined X).
+ if (PeekTok.is(tok::r_paren)) {
+ // Just use DT unmodified as our result.
+ } else {
+ // Otherwise, we have something like (x+y), and we consumed '(x'.
+ if (EvaluateDirectiveSubExpr(Result, 1, PeekTok, ValueLive, PP))
+ return true;
+
+ if (PeekTok.isNot(tok::r_paren)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expected_rparen)
+ << Result.getRange();
+ PP.Diag(Start, diag::note_matching) << "(";
+ return true;
+ }
+ DT.State = DefinedTracker::Unknown;
+ }
+ Result.setRange(Start, PeekTok.getLocation());
+ PP.LexNonComment(PeekTok); // Eat the ).
+ return false;
+ }
+ case tok::plus: {
+ SourceLocation Start = PeekTok.getLocation();
+ // Unary plus doesn't modify the value.
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+ return false;
+ }
+ case tok::minus: {
+ SourceLocation Loc = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Loc);
+
+ // C99 6.5.3.3p3: The sign of the result matches the sign of the operand.
+ Result.Val = -Result.Val;
+
+ // -MININT is the only thing that overflows. Unsigned never overflows.
+ bool Overflow = !Result.isUnsigned() && Result.Val.isMinSignedValue();
+
+ // If this operator is live and overflowed, report the issue.
+ if (Overflow && ValueLive)
+ PP.Diag(Loc, diag::warn_pp_expr_overflow) << Result.getRange();
+
+ DT.State = DefinedTracker::Unknown;
+ return false;
+ }
+
+ case tok::tilde: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+
+ // C99 6.5.3.3p4: The sign of the result matches the sign of the operand.
+ Result.Val = ~Result.Val;
+ DT.State = DefinedTracker::Unknown;
+ return false;
+ }
+
+ case tok::exclaim: {
+ SourceLocation Start = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+ if (EvaluateValue(Result, PeekTok, DT, ValueLive, PP)) return true;
+ Result.setBegin(Start);
+ Result.Val = !Result.Val;
+ // C99 6.5.3.3p5: The sign of the result is 'int', aka it is signed.
+ Result.Val.setIsUnsigned(false);
+
+ if (DT.State == DefinedTracker::DefinedMacro)
+ DT.State = DefinedTracker::NotDefinedMacro;
+ else if (DT.State == DefinedTracker::NotDefinedMacro)
+ DT.State = DefinedTracker::DefinedMacro;
+ return false;
+ }
+
+ // FIXME: Handle #assert
+ }
+}
+
+
+
+/// getPrecedence - Return the precedence of the specified binary operator
+/// token. This returns:
+/// ~0 - Invalid token.
+/// 14 -> 3 - various operators.
+/// 0 - 'eod' or ')'
+static unsigned getPrecedence(tok::TokenKind Kind) {
+ switch (Kind) {
+ default: return ~0U;
+ case tok::percent:
+ case tok::slash:
+ case tok::star: return 14;
+ case tok::plus:
+ case tok::minus: return 13;
+ case tok::lessless:
+ case tok::greatergreater: return 12;
+ case tok::lessequal:
+ case tok::less:
+ case tok::greaterequal:
+ case tok::greater: return 11;
+ case tok::exclaimequal:
+ case tok::equalequal: return 10;
+ case tok::amp: return 9;
+ case tok::caret: return 8;
+ case tok::pipe: return 7;
+ case tok::ampamp: return 6;
+ case tok::pipepipe: return 5;
+ case tok::question: return 4;
+ case tok::comma: return 3;
+ case tok::colon: return 2;
+ case tok::r_paren: return 0;// Lowest priority, end of expr.
+ case tok::eod: return 0;// Lowest priority, end of directive.
+ }
+}
+
+
+/// EvaluateDirectiveSubExpr - Evaluate the subexpression whose first token is
+/// PeekTok, and whose precedence is PeekPrec. This returns the result in LHS.
+///
+/// If ValueLive is false, then this value is being evaluated in a context where
+/// the result is not used. As such, avoid diagnostics that relate to
+/// evaluation, such as division by zero warnings.
+static bool EvaluateDirectiveSubExpr(PPValue &LHS, unsigned MinPrec,
+ Token &PeekTok, bool ValueLive,
+ Preprocessor &PP) {
+ unsigned PeekPrec = getPrecedence(PeekTok.getKind());
+ // If this token isn't valid, report the error.
+ if (PeekPrec == ~0U) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expr_bad_token_binop)
+ << LHS.getRange();
+ return true;
+ }
+
+ while (1) {
+ // If this token has a lower precedence than we are allowed to parse, return
+ // it so that higher levels of the recursion can parse it.
+ if (PeekPrec < MinPrec)
+ return false;
+
+ tok::TokenKind Operator = PeekTok.getKind();
+
+ // If this is a short-circuiting operator, see if the RHS of the operator is
+ // dead. Note that this cannot just clobber ValueLive. Consider
+ // "0 && 1 ? 4 : 1 / 0", which is parsed as "(0 && 1) ? 4 : (1 / 0)". In
+ // this example, the RHS of the && being dead does not make the rest of the
+ // expr dead.
+ bool RHSIsLive;
+ if (Operator == tok::ampamp && LHS.Val == 0)
+ RHSIsLive = false; // RHS of "0 && x" is dead.
+ else if (Operator == tok::pipepipe && LHS.Val != 0)
+ RHSIsLive = false; // RHS of "1 || x" is dead.
+ else if (Operator == tok::question && LHS.Val == 0)
+ RHSIsLive = false; // RHS (x) of "0 ? x : y" is dead.
+ else
+ RHSIsLive = ValueLive;
+
+ // Consume the operator, remembering the operator's location for reporting.
+ SourceLocation OpLoc = PeekTok.getLocation();
+ PP.LexNonComment(PeekTok);
+
+ PPValue RHS(LHS.getBitWidth());
+ // Parse the RHS of the operator.
+ DefinedTracker DT;
+ if (EvaluateValue(RHS, PeekTok, DT, RHSIsLive, PP)) return true;
+
+ // Remember the precedence of this operator and get the precedence of the
+ // operator immediately to the right of the RHS.
+ unsigned ThisPrec = PeekPrec;
+ PeekPrec = getPrecedence(PeekTok.getKind());
+
+ // If this token isn't valid, report the error.
+ if (PeekPrec == ~0U) {
+ PP.Diag(PeekTok.getLocation(), diag::err_pp_expr_bad_token_binop)
+ << RHS.getRange();
+ return true;
+ }
+
+ // Decide whether to include the next binop in this subexpression. For
+ // example, when parsing x+y*z and looking at '*', we want to recursively
+ // handle y*z as a single subexpression. We do this because the precedence
+ // of * is higher than that of +. The only strange case we have to handle
+ // here is for the ?: operator, where the precedence is actually lower than
+ // the LHS of the '?'. The grammar rule is:
+ //
+ // conditional-expression ::=
+ // logical-OR-expression ? expression : conditional-expression
+ // where 'expression' is actually comma-expression.
+ unsigned RHSPrec;
+ if (Operator == tok::question)
+ // The RHS of "?" should be maximally consumed as an expression.
+ RHSPrec = getPrecedence(tok::comma);
+ else // All others should munch while higher precedence.
+ RHSPrec = ThisPrec+1;
+
+ if (PeekPrec >= RHSPrec) {
+ if (EvaluateDirectiveSubExpr(RHS, RHSPrec, PeekTok, RHSIsLive, PP))
+ return true;
+ PeekPrec = getPrecedence(PeekTok.getKind());
+ }
+ assert(PeekPrec <= ThisPrec && "Recursion didn't work!");
+
+ // Usual arithmetic conversions (C99 6.3.1.8p1): result is unsigned if
+ // either operand is unsigned.
+ llvm::APSInt Res(LHS.getBitWidth());
+ switch (Operator) {
+ case tok::question: // No UAC for x and y in "x ? y : z".
+ case tok::lessless: // Shift amount doesn't UAC with shift value.
+ case tok::greatergreater: // Shift amount doesn't UAC with shift value.
+ case tok::comma: // Comma operands are not subject to UACs.
+ case tok::pipepipe: // Logical || does not do UACs.
+ case tok::ampamp: // Logical && does not do UACs.
+ break; // No UAC
+ default:
+ Res.setIsUnsigned(LHS.isUnsigned()|RHS.isUnsigned());
+ // If this just promoted something from signed to unsigned, and if the
+ // value was negative, warn about it.
+ if (ValueLive && Res.isUnsigned()) {
+ if (!LHS.isUnsigned() && LHS.Val.isNegative())
+ PP.Diag(OpLoc, diag::warn_pp_convert_lhs_to_positive)
+ << LHS.Val.toString(10, true) + " to " +
+ LHS.Val.toString(10, false)
+ << LHS.getRange() << RHS.getRange();
+ if (!RHS.isUnsigned() && RHS.Val.isNegative())
+ PP.Diag(OpLoc, diag::warn_pp_convert_rhs_to_positive)
+ << RHS.Val.toString(10, true) + " to " +
+ RHS.Val.toString(10, false)
+ << LHS.getRange() << RHS.getRange();
+ }
+ LHS.Val.setIsUnsigned(Res.isUnsigned());
+ RHS.Val.setIsUnsigned(Res.isUnsigned());
+ }
+
+ bool Overflow = false;
+ switch (Operator) {
+ default: llvm_unreachable("Unknown operator token!");
+ case tok::percent:
+ if (RHS.Val != 0)
+ Res = LHS.Val % RHS.Val;
+ else if (ValueLive) {
+ PP.Diag(OpLoc, diag::err_pp_remainder_by_zero)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+ break;
+ case tok::slash:
+ if (RHS.Val != 0) {
+ if (LHS.Val.isSigned())
+ Res = llvm::APSInt(LHS.Val.sdiv_ov(RHS.Val, Overflow), false);
+ else
+ Res = LHS.Val / RHS.Val;
+ } else if (ValueLive) {
+ PP.Diag(OpLoc, diag::err_pp_division_by_zero)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+ break;
+
+ case tok::star:
+ if (Res.isSigned())
+ Res = llvm::APSInt(LHS.Val.smul_ov(RHS.Val, Overflow), false);
+ else
+ Res = LHS.Val * RHS.Val;
+ break;
+ case tok::lessless: {
+ // Determine whether overflow is about to happen.
+ unsigned ShAmt = static_cast<unsigned>(RHS.Val.getLimitedValue());
+ if (LHS.isUnsigned()) {
+ Overflow = ShAmt >= LHS.Val.getBitWidth();
+ if (Overflow)
+ ShAmt = LHS.Val.getBitWidth()-1;
+ Res = LHS.Val << ShAmt;
+ } else {
+ Res = llvm::APSInt(LHS.Val.sshl_ov(ShAmt, Overflow), false);
+ }
+ break;
+ }
+ case tok::greatergreater: {
+ // Determine whether overflow is about to happen.
+ unsigned ShAmt = static_cast<unsigned>(RHS.Val.getLimitedValue());
+ if (ShAmt >= LHS.getBitWidth())
+ Overflow = true, ShAmt = LHS.getBitWidth()-1;
+ Res = LHS.Val >> ShAmt;
+ break;
+ }
+ case tok::plus:
+ if (LHS.isUnsigned())
+ Res = LHS.Val + RHS.Val;
+ else
+ Res = llvm::APSInt(LHS.Val.sadd_ov(RHS.Val, Overflow), false);
+ break;
+ case tok::minus:
+ if (LHS.isUnsigned())
+ Res = LHS.Val - RHS.Val;
+ else
+ Res = llvm::APSInt(LHS.Val.ssub_ov(RHS.Val, Overflow), false);
+ break;
+ case tok::lessequal:
+ Res = LHS.Val <= RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::less:
+ Res = LHS.Val < RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::greaterequal:
+ Res = LHS.Val >= RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::greater:
+ Res = LHS.Val > RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.8p6, result is always int (signed)
+ break;
+ case tok::exclaimequal:
+ Res = LHS.Val != RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.9p3, result is always int (signed)
+ break;
+ case tok::equalequal:
+ Res = LHS.Val == RHS.Val;
+ Res.setIsUnsigned(false); // C99 6.5.9p3, result is always int (signed)
+ break;
+ case tok::amp:
+ Res = LHS.Val & RHS.Val;
+ break;
+ case tok::caret:
+ Res = LHS.Val ^ RHS.Val;
+ break;
+ case tok::pipe:
+ Res = LHS.Val | RHS.Val;
+ break;
+ case tok::ampamp:
+ Res = (LHS.Val != 0 && RHS.Val != 0);
+ Res.setIsUnsigned(false); // C99 6.5.13p3, result is always int (signed)
+ break;
+ case tok::pipepipe:
+ Res = (LHS.Val != 0 || RHS.Val != 0);
+ Res.setIsUnsigned(false); // C99 6.5.14p3, result is always int (signed)
+ break;
+ case tok::comma:
+ // Comma is invalid in pp expressions in c89/c++ mode, but is valid in C99
+ // if not being evaluated.
+ if (!PP.getLangOpts().C99 || ValueLive)
+ PP.Diag(OpLoc, diag::ext_pp_comma_expr)
+ << LHS.getRange() << RHS.getRange();
+ Res = RHS.Val; // LHS = LHS,RHS -> RHS.
+ break;
+ case tok::question: {
+ // Parse the : part of the expression.
+ if (PeekTok.isNot(tok::colon)) {
+ PP.Diag(PeekTok.getLocation(), diag::err_expected_colon)
+ << LHS.getRange(), RHS.getRange();
+ PP.Diag(OpLoc, diag::note_matching) << "?";
+ return true;
+ }
+ // Consume the :.
+ PP.LexNonComment(PeekTok);
+
+ // Evaluate the value after the :.
+ bool AfterColonLive = ValueLive && LHS.Val == 0;
+ PPValue AfterColonVal(LHS.getBitWidth());
+ DefinedTracker DT;
+ if (EvaluateValue(AfterColonVal, PeekTok, DT, AfterColonLive, PP))
+ return true;
+
+ // Parse anything after the : with the same precedence as ?. We allow
+ // things of equal precedence because ?: is right associative.
+ if (EvaluateDirectiveSubExpr(AfterColonVal, ThisPrec,
+ PeekTok, AfterColonLive, PP))
+ return true;
+
+ // Now that we have the condition, the LHS and the RHS of the :, evaluate.
+ Res = LHS.Val != 0 ? RHS.Val : AfterColonVal.Val;
+ RHS.setEnd(AfterColonVal.getRange().getEnd());
+
+ // Usual arithmetic conversions (C99 6.3.1.8p1): result is unsigned if
+ // either operand is unsigned.
+ Res.setIsUnsigned(RHS.isUnsigned() | AfterColonVal.isUnsigned());
+
+ // Figure out the precedence of the token after the : part.
+ PeekPrec = getPrecedence(PeekTok.getKind());
+ break;
+ }
+ case tok::colon:
+ // Don't allow :'s to float around without being part of ?: exprs.
+ PP.Diag(OpLoc, diag::err_pp_colon_without_question)
+ << LHS.getRange() << RHS.getRange();
+ return true;
+ }
+
+ // If this operator is live and overflowed, report the issue.
+ if (Overflow && ValueLive)
+ PP.Diag(OpLoc, diag::warn_pp_expr_overflow)
+ << LHS.getRange() << RHS.getRange();
+
+ // Put the result back into 'LHS' for our next iteration.
+ LHS.Val = Res;
+ LHS.setEnd(RHS.getRange().getEnd());
+ }
+}
+
+/// EvaluateDirectiveExpression - Evaluate an integer constant expression that
+/// may occur after a #if or #elif directive. If the expression is equivalent
+/// to "!defined(X)" return X in IfNDefMacro.
+bool Preprocessor::
+EvaluateDirectiveExpression(IdentifierInfo *&IfNDefMacro) {
+ // Save the current state of 'DisableMacroExpansion' and reset it to false. If
+ // 'DisableMacroExpansion' is true, then we must be in a macro argument list
+ // in which case a directive is undefined behavior. We want macros to be able
+ // to recursively expand in order to get more gcc-list behavior, so we force
+ // DisableMacroExpansion to false and restore it when we're done parsing the
+ // expression.
+ bool DisableMacroExpansionAtStartOfDirective = DisableMacroExpansion;
+ DisableMacroExpansion = false;
+
+ // Peek ahead one token.
+ Token Tok;
+ LexNonComment(Tok);
+
+ // C99 6.10.1p3 - All expressions are evaluated as intmax_t or uintmax_t.
+ unsigned BitWidth = getTargetInfo().getIntMaxTWidth();
+
+ PPValue ResVal(BitWidth);
+ DefinedTracker DT;
+ if (EvaluateValue(ResVal, Tok, DT, true, *this)) {
+ // Parse error, skip the rest of the macro line.
+ if (Tok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
+
+ // Restore 'DisableMacroExpansion'.
+ DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
+ return false;
+ }
+
+ // If we are at the end of the expression after just parsing a value, there
+ // must be no (unparenthesized) binary operators involved, so we can exit
+ // directly.
+ if (Tok.is(tok::eod)) {
+ // If the expression we parsed was of the form !defined(macro), return the
+ // macro in IfNDefMacro.
+ if (DT.State == DefinedTracker::NotDefinedMacro)
+ IfNDefMacro = DT.TheMacro;
+
+ // Restore 'DisableMacroExpansion'.
+ DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
+ return ResVal.Val != 0;
+ }
+
+ // Otherwise, we must have a binary operator (e.g. "#if 1 < 2"), so parse the
+ // operator and the stuff after it.
+ if (EvaluateDirectiveSubExpr(ResVal, getPrecedence(tok::question),
+ Tok, true, *this)) {
+ // Parse error, skip the rest of the macro line.
+ if (Tok.isNot(tok::eod))
+ DiscardUntilEndOfDirective();
+
+ // Restore 'DisableMacroExpansion'.
+ DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
+ return false;
+ }
+
+ // If we aren't at the tok::eod token, something bad happened, like an extra
+ // ')' token.
+ if (Tok.isNot(tok::eod)) {
+ Diag(Tok, diag::err_pp_expected_eol);
+ DiscardUntilEndOfDirective();
+ }
+
+ // Restore 'DisableMacroExpansion'.
+ DisableMacroExpansion = DisableMacroExpansionAtStartOfDirective;
+ return ResVal.Val != 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
new file mode 100644
index 0000000..b6689df
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPLexerChange.cpp
@@ -0,0 +1,494 @@
+//===--- PPLexerChange.cpp - Handle changing lexers in the preprocessor ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements pieces of the Preprocessor interface that manage the
+// current lexer stack.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PathV2.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+PPCallbacks::~PPCallbacks() {}
+
+//===----------------------------------------------------------------------===//
+// Miscellaneous Methods.
+//===----------------------------------------------------------------------===//
+
+/// isInPrimaryFile - Return true if we're in the top-level file, not in a
+/// #include. This looks through macro expansions and active _Pragma lexers.
+bool Preprocessor::isInPrimaryFile() const {
+ if (IsFileLexer())
+ return IncludeMacroStack.empty();
+
+ // If there are any stacked lexers, we're in a #include.
+ assert(IsFileLexer(IncludeMacroStack[0]) &&
+ "Top level include stack isn't our primary lexer?");
+ for (unsigned i = 1, e = IncludeMacroStack.size(); i != e; ++i)
+ if (IsFileLexer(IncludeMacroStack[i]))
+ return false;
+ return true;
+}
+
+/// getCurrentLexer - Return the current file lexer being lexed from. Note
+/// that this ignores any potentially active macro expansions and _Pragma
+/// expansions going on at the time.
+PreprocessorLexer *Preprocessor::getCurrentFileLexer() const {
+ if (IsFileLexer())
+ return CurPPLexer;
+
+ // Look for a stacked lexer.
+ for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
+ const IncludeStackInfo& ISI = IncludeMacroStack[i-1];
+ if (IsFileLexer(ISI))
+ return ISI.ThePPLexer;
+ }
+ return 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Methods for Entering and Callbacks for leaving various contexts
+//===----------------------------------------------------------------------===//
+
+/// EnterSourceFile - Add a source file to the top of the include stack and
+/// start lexing tokens from it instead of the current buffer.
+void Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
+ SourceLocation Loc) {
+ assert(CurTokenLexer == 0 && "Cannot #include a file inside a macro!");
+ ++NumEnteredSourceFiles;
+
+ if (MaxIncludeStackDepth < IncludeMacroStack.size())
+ MaxIncludeStackDepth = IncludeMacroStack.size();
+
+ if (PTH) {
+ if (PTHLexer *PL = PTH->CreateLexer(FID)) {
+ EnterSourceFileWithPTH(PL, CurDir);
+ return;
+ }
+ }
+
+ // Get the MemoryBuffer for this FID, if it fails, we fail.
+ bool Invalid = false;
+ const llvm::MemoryBuffer *InputFile =
+ getSourceManager().getBuffer(FID, Loc, &Invalid);
+ if (Invalid) {
+ SourceLocation FileStart = SourceMgr.getLocForStartOfFile(FID);
+ Diag(Loc, diag::err_pp_error_opening_file)
+ << std::string(SourceMgr.getBufferName(FileStart)) << "";
+ return;
+ }
+
+ if (isCodeCompletionEnabled() &&
+ SourceMgr.getFileEntryForID(FID) == CodeCompletionFile) {
+ CodeCompletionFileLoc = SourceMgr.getLocForStartOfFile(FID);
+ CodeCompletionLoc =
+ CodeCompletionFileLoc.getLocWithOffset(CodeCompletionOffset);
+ }
+
+ EnterSourceFileWithLexer(new Lexer(FID, InputFile, *this), CurDir);
+ return;
+}
+
+/// EnterSourceFileWithLexer - Add a source file to the top of the include stack
+/// and start lexing tokens from it instead of the current buffer.
+void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
+ const DirectoryLookup *CurDir) {
+
+ // Add the current lexer to the include stack.
+ if (CurPPLexer || CurTokenLexer)
+ PushIncludeMacroStack();
+
+ CurLexer.reset(TheLexer);
+ CurPPLexer = TheLexer;
+ CurDirLookup = CurDir;
+ if (CurLexerKind != CLK_LexAfterModuleImport)
+ CurLexerKind = CLK_Lexer;
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks && !CurLexer->Is_PragmaLexer) {
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(CurLexer->getFileLoc());
+
+ Callbacks->FileChanged(CurLexer->getFileLoc(),
+ PPCallbacks::EnterFile, FileType);
+ }
+}
+
+/// EnterSourceFileWithPTH - Add a source file to the top of the include stack
+/// and start getting tokens from it using the PTH cache.
+void Preprocessor::EnterSourceFileWithPTH(PTHLexer *PL,
+ const DirectoryLookup *CurDir) {
+
+ if (CurPPLexer || CurTokenLexer)
+ PushIncludeMacroStack();
+
+ CurDirLookup = CurDir;
+ CurPTHLexer.reset(PL);
+ CurPPLexer = CurPTHLexer.get();
+ if (CurLexerKind != CLK_LexAfterModuleImport)
+ CurLexerKind = CLK_PTHLexer;
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks) {
+ FileID FID = CurPPLexer->getFileID();
+ SourceLocation EnterLoc = SourceMgr.getLocForStartOfFile(FID);
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(EnterLoc);
+ Callbacks->FileChanged(EnterLoc, PPCallbacks::EnterFile, FileType);
+ }
+}
+
+/// EnterMacro - Add a Macro to the top of the include stack and start lexing
+/// tokens from it instead of the current buffer.
+void Preprocessor::EnterMacro(Token &Tok, SourceLocation ILEnd,
+ MacroArgs *Args) {
+ PushIncludeMacroStack();
+ CurDirLookup = 0;
+
+ if (NumCachedTokenLexers == 0) {
+ CurTokenLexer.reset(new TokenLexer(Tok, ILEnd, Args, *this));
+ } else {
+ CurTokenLexer.reset(TokenLexerCache[--NumCachedTokenLexers]);
+ CurTokenLexer->Init(Tok, ILEnd, Args);
+ }
+ if (CurLexerKind != CLK_LexAfterModuleImport)
+ CurLexerKind = CLK_TokenLexer;
+}
+
+/// EnterTokenStream - Add a "macro" context to the top of the include stack,
+/// which will cause the lexer to start returning the specified tokens.
+///
+/// If DisableMacroExpansion is true, tokens lexed from the token stream will
+/// not be subject to further macro expansion. Otherwise, these tokens will
+/// be re-macro-expanded when/if expansion is enabled.
+///
+/// If OwnsTokens is false, this method assumes that the specified stream of
+/// tokens has a permanent owner somewhere, so they do not need to be copied.
+/// If it is true, it assumes the array of tokens is allocated with new[] and
+/// must be freed.
+///
+void Preprocessor::EnterTokenStream(const Token *Toks, unsigned NumToks,
+ bool DisableMacroExpansion,
+ bool OwnsTokens) {
+ // Save our current state.
+ PushIncludeMacroStack();
+ CurDirLookup = 0;
+
+ // Create a macro expander to expand from the specified token stream.
+ if (NumCachedTokenLexers == 0) {
+ CurTokenLexer.reset(new TokenLexer(Toks, NumToks, DisableMacroExpansion,
+ OwnsTokens, *this));
+ } else {
+ CurTokenLexer.reset(TokenLexerCache[--NumCachedTokenLexers]);
+ CurTokenLexer->Init(Toks, NumToks, DisableMacroExpansion, OwnsTokens);
+ }
+ if (CurLexerKind != CLK_LexAfterModuleImport)
+ CurLexerKind = CLK_TokenLexer;
+}
+
+/// \brief Compute the relative path that names the given file relative to
+/// the given directory.
+static void computeRelativePath(FileManager &FM, const DirectoryEntry *Dir,
+ const FileEntry *File,
+ SmallString<128> &Result) {
+ Result.clear();
+
+ StringRef FilePath = File->getDir()->getName();
+ StringRef Path = FilePath;
+ while (!Path.empty()) {
+ if (const DirectoryEntry *CurDir = FM.getDirectory(Path)) {
+ if (CurDir == Dir) {
+ Result = FilePath.substr(Path.size());
+ llvm::sys::path::append(Result,
+ llvm::sys::path::filename(File->getName()));
+ return;
+ }
+ }
+
+ Path = llvm::sys::path::parent_path(Path);
+ }
+
+ Result = File->getName();
+}
+
+/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
+/// the current file. This either returns the EOF token or pops a level off
+/// the include stack and keeps going.
+bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
+ assert(!CurTokenLexer &&
+ "Ending a file when currently in a macro!");
+
+ // See if this file had a controlling macro.
+ if (CurPPLexer) { // Not ending a macro, ignore it.
+ if (const IdentifierInfo *ControllingMacro =
+ CurPPLexer->MIOpt.GetControllingMacroAtEndOfFile()) {
+ // Okay, this has a controlling macro, remember in HeaderFileInfo.
+ if (const FileEntry *FE =
+ SourceMgr.getFileEntryForID(CurPPLexer->getFileID()))
+ HeaderInfo.SetFileControllingMacro(FE, ControllingMacro);
+ }
+ }
+
+ // Complain about reaching a true EOF within arc_cf_code_audited.
+ // We don't want to complain about reaching the end of a macro
+ // instantiation or a _Pragma.
+ if (PragmaARCCFCodeAuditedLoc.isValid() &&
+ !isEndOfMacro && !(CurLexer && CurLexer->Is_PragmaLexer)) {
+ Diag(PragmaARCCFCodeAuditedLoc, diag::err_pp_eof_in_arc_cf_code_audited);
+
+ // Recover by leaving immediately.
+ PragmaARCCFCodeAuditedLoc = SourceLocation();
+ }
+
+ // If this is a #include'd file, pop it off the include stack and continue
+ // lexing the #includer file.
+ if (!IncludeMacroStack.empty()) {
+
+ // If we lexed the code-completion file, act as if we reached EOF.
+ if (isCodeCompletionEnabled() && CurPPLexer &&
+ SourceMgr.getLocForStartOfFile(CurPPLexer->getFileID()) ==
+ CodeCompletionFileLoc) {
+ if (CurLexer) {
+ Result.startToken();
+ CurLexer->FormTokenWithChars(Result, CurLexer->BufferEnd, tok::eof);
+ CurLexer.reset();
+ } else {
+ assert(CurPTHLexer && "Got EOF but no current lexer set!");
+ CurPTHLexer->getEOF(Result);
+ CurPTHLexer.reset();
+ }
+
+ CurPPLexer = 0;
+ return true;
+ }
+
+ if (!isEndOfMacro && CurPPLexer &&
+ SourceMgr.getIncludeLoc(CurPPLexer->getFileID()).isValid()) {
+ // Notify SourceManager to record the number of FileIDs that were created
+ // during lexing of the #include'd file.
+ unsigned NumFIDs =
+ SourceMgr.local_sloc_entry_size() -
+ CurPPLexer->getInitialNumSLocEntries() + 1/*#include'd file*/;
+ SourceMgr.setNumCreatedFIDsForFileID(CurPPLexer->getFileID(), NumFIDs);
+ }
+
+ FileID ExitedFID;
+ if (Callbacks && !isEndOfMacro && CurPPLexer)
+ ExitedFID = CurPPLexer->getFileID();
+
+ // We're done with the #included file.
+ RemoveTopOfLexerStack();
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks && !isEndOfMacro && CurPPLexer) {
+ SrcMgr::CharacteristicKind FileType =
+ SourceMgr.getFileCharacteristic(CurPPLexer->getSourceLocation());
+ Callbacks->FileChanged(CurPPLexer->getSourceLocation(),
+ PPCallbacks::ExitFile, FileType, ExitedFID);
+ }
+
+ // Client should lex another token.
+ return false;
+ }
+
+ // If the file ends with a newline, form the EOF token on the newline itself,
+ // rather than "on the line following it", which doesn't exist. This makes
+ // diagnostics relating to the end of file include the last file that the user
+ // actually typed, which is goodness.
+ if (CurLexer) {
+ const char *EndPos = CurLexer->BufferEnd;
+ if (EndPos != CurLexer->BufferStart &&
+ (EndPos[-1] == '\n' || EndPos[-1] == '\r')) {
+ --EndPos;
+
+ // Handle \n\r and \r\n:
+ if (EndPos != CurLexer->BufferStart &&
+ (EndPos[-1] == '\n' || EndPos[-1] == '\r') &&
+ EndPos[-1] != EndPos[0])
+ --EndPos;
+ }
+
+ Result.startToken();
+ CurLexer->BufferPtr = EndPos;
+ CurLexer->FormTokenWithChars(Result, EndPos, tok::eof);
+
+ if (!isIncrementalProcessingEnabled())
+ // We're done with lexing.
+ CurLexer.reset();
+ } else {
+ assert(CurPTHLexer && "Got EOF but no current lexer set!");
+ CurPTHLexer->getEOF(Result);
+ CurPTHLexer.reset();
+ }
+
+ if (!isIncrementalProcessingEnabled())
+ CurPPLexer = 0;
+
+ // This is the end of the top-level file. 'WarnUnusedMacroLocs' has collected
+ // all macro locations that we need to warn because they are not used.
+ for (WarnUnusedMacroLocsTy::iterator
+ I=WarnUnusedMacroLocs.begin(), E=WarnUnusedMacroLocs.end(); I!=E; ++I)
+ Diag(*I, diag::pp_macro_not_used);
+
+ // If we are building a module that has an umbrella header, make sure that
+ // each of the headers within the directory covered by the umbrella header
+ // was actually included by the umbrella header.
+ if (Module *Mod = getCurrentModule()) {
+ if (Mod->getUmbrellaHeader()) {
+ SourceLocation StartLoc
+ = SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
+
+ if (getDiagnostics().getDiagnosticLevel(
+ diag::warn_uncovered_module_header,
+ StartLoc) != DiagnosticsEngine::Ignored) {
+ ModuleMap &ModMap = getHeaderSearchInfo().getModuleMap();
+ typedef llvm::sys::fs::recursive_directory_iterator
+ recursive_directory_iterator;
+ const DirectoryEntry *Dir = Mod->getUmbrellaDir();
+ llvm::error_code EC;
+ for (recursive_directory_iterator Entry(Dir->getName(), EC), End;
+ Entry != End && !EC; Entry.increment(EC)) {
+ using llvm::StringSwitch;
+
+ // Check whether this entry has an extension typically associated with
+ // headers.
+ if (!StringSwitch<bool>(llvm::sys::path::extension(Entry->path()))
+ .Cases(".h", ".H", ".hh", ".hpp", true)
+ .Default(false))
+ continue;
+
+ if (const FileEntry *Header = getFileManager().getFile(Entry->path()))
+ if (!getSourceManager().hasFileInfo(Header)) {
+ if (!ModMap.isHeaderInUnavailableModule(Header)) {
+ // Find the relative path that would access this header.
+ SmallString<128> RelativePath;
+ computeRelativePath(FileMgr, Dir, Header, RelativePath);
+ Diag(StartLoc, diag::warn_uncovered_module_header)
+ << RelativePath;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ return true;
+}
+
+/// HandleEndOfTokenLexer - This callback is invoked when the current TokenLexer
+/// hits the end of its token stream.
+bool Preprocessor::HandleEndOfTokenLexer(Token &Result) {
+ assert(CurTokenLexer && !CurPPLexer &&
+ "Ending a macro when currently in a #include file!");
+
+ if (!MacroExpandingLexersStack.empty() &&
+ MacroExpandingLexersStack.back().first == CurTokenLexer.get())
+ removeCachedMacroExpandedTokensOfLastLexer();
+
+ // Delete or cache the now-dead macro expander.
+ if (NumCachedTokenLexers == TokenLexerCacheSize)
+ CurTokenLexer.reset();
+ else
+ TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.take();
+
+ // Handle this like a #include file being popped off the stack.
+ return HandleEndOfFile(Result, true);
+}
+
+/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
+/// lexer stack. This should only be used in situations where the current
+/// state of the top-of-stack lexer is unknown.
+void Preprocessor::RemoveTopOfLexerStack() {
+ assert(!IncludeMacroStack.empty() && "Ran out of stack entries to load");
+
+ if (CurTokenLexer) {
+ // Delete or cache the now-dead macro expander.
+ if (NumCachedTokenLexers == TokenLexerCacheSize)
+ CurTokenLexer.reset();
+ else
+ TokenLexerCache[NumCachedTokenLexers++] = CurTokenLexer.take();
+ }
+
+ PopIncludeMacroStack();
+}
+
+/// HandleMicrosoftCommentPaste - When the macro expander pastes together a
+/// comment (/##/) in microsoft mode, this method handles updating the current
+/// state, returning the token on the next source line.
+void Preprocessor::HandleMicrosoftCommentPaste(Token &Tok) {
+ assert(CurTokenLexer && !CurPPLexer &&
+ "Pasted comment can only be formed from macro");
+
+ // We handle this by scanning for the closest real lexer, switching it to
+ // raw mode and preprocessor mode. This will cause it to return \n as an
+ // explicit EOD token.
+ PreprocessorLexer *FoundLexer = 0;
+ bool LexerWasInPPMode = false;
+ for (unsigned i = 0, e = IncludeMacroStack.size(); i != e; ++i) {
+ IncludeStackInfo &ISI = *(IncludeMacroStack.end()-i-1);
+ if (ISI.ThePPLexer == 0) continue; // Scan for a real lexer.
+
+ // Once we find a real lexer, mark it as raw mode (disabling macro
+ // expansions) and preprocessor mode (return EOD). We know that the lexer
+ // was *not* in raw mode before, because the macro that the comment came
+ // from was expanded. However, it could have already been in preprocessor
+ // mode (#if COMMENT) in which case we have to return it to that mode and
+ // return EOD.
+ FoundLexer = ISI.ThePPLexer;
+ FoundLexer->LexingRawMode = true;
+ LexerWasInPPMode = FoundLexer->ParsingPreprocessorDirective;
+ FoundLexer->ParsingPreprocessorDirective = true;
+ break;
+ }
+
+ // Okay, we either found and switched over the lexer, or we didn't find a
+ // lexer. In either case, finish off the macro the comment came from, getting
+ // the next token.
+ if (!HandleEndOfTokenLexer(Tok)) Lex(Tok);
+
+ // Discarding comments as long as we don't have EOF or EOD. This 'comments
+ // out' the rest of the line, including any tokens that came from other macros
+ // that were active, as in:
+ // #define submacro a COMMENT b
+ // submacro c
+ // which should lex to 'a' only: 'b' and 'c' should be removed.
+ while (Tok.isNot(tok::eod) && Tok.isNot(tok::eof))
+ Lex(Tok);
+
+ // If we got an eod token, then we successfully found the end of the line.
+ if (Tok.is(tok::eod)) {
+ assert(FoundLexer && "Can't get end of line without an active lexer");
+ // Restore the lexer back to normal mode instead of raw mode.
+ FoundLexer->LexingRawMode = false;
+
+ // If the lexer was already in preprocessor mode, just return the EOD token
+ // to finish the preprocessor line.
+ if (LexerWasInPPMode) return;
+
+ // Otherwise, switch out of PP mode and return the next lexed token.
+ FoundLexer->ParsingPreprocessorDirective = false;
+ return Lex(Tok);
+ }
+
+ // If we got an EOF token, then we reached the end of the token stream but
+ // didn't find an explicit \n. This can only happen if there was no lexer
+ // active (an active lexer would return EOD at EOF if there was no \n in
+ // preprocessor directive mode), so just return EOF as our token.
+ assert(!FoundLexer && "Lexer should return EOD before EOF in PP mode");
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
new file mode 100644
index 0000000..fe70585
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PPMacroExpansion.cpp
@@ -0,0 +1,1156 @@
+//===--- MacroExpansion.cpp - Top level Macro Expansion -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the top level handling of macro expasion for the
+// preprocessor.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Config/llvm-config.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstdio>
+#include <ctime>
+using namespace clang;
+
+MacroInfo *Preprocessor::getInfoForMacro(IdentifierInfo *II) const {
+ assert(II->hasMacroDefinition() && "Identifier is not a macro!");
+
+ llvm::DenseMap<IdentifierInfo*, MacroInfo*>::const_iterator Pos
+ = Macros.find(II);
+ if (Pos == Macros.end()) {
+ // Load this macro from the external source.
+ getExternalSource()->LoadMacroDefinition(II);
+ Pos = Macros.find(II);
+ }
+ assert(Pos != Macros.end() && "Identifier macro info is missing!");
+ return Pos->second;
+}
+
+/// setMacroInfo - Specify a macro for this identifier.
+///
+void Preprocessor::setMacroInfo(IdentifierInfo *II, MacroInfo *MI,
+ bool LoadedFromAST) {
+ if (MI) {
+ Macros[II] = MI;
+ II->setHasMacroDefinition(true);
+ if (II->isFromAST() && !LoadedFromAST)
+ II->setChangedSinceDeserialization();
+ } else if (II->hasMacroDefinition()) {
+ Macros.erase(II);
+ II->setHasMacroDefinition(false);
+ if (II->isFromAST() && !LoadedFromAST)
+ II->setChangedSinceDeserialization();
+ }
+}
+
+/// RegisterBuiltinMacro - Register the specified identifier in the identifier
+/// table and mark it as a builtin macro to be expanded.
+static IdentifierInfo *RegisterBuiltinMacro(Preprocessor &PP, const char *Name){
+ // Get the identifier.
+ IdentifierInfo *Id = PP.getIdentifierInfo(Name);
+
+ // Mark it as being a macro that is builtin.
+ MacroInfo *MI = PP.AllocateMacroInfo(SourceLocation());
+ MI->setIsBuiltinMacro();
+ PP.setMacroInfo(Id, MI);
+ return Id;
+}
+
+
+/// RegisterBuiltinMacros - Register builtin macros, such as __LINE__ with the
+/// identifier table.
+void Preprocessor::RegisterBuiltinMacros() {
+ Ident__LINE__ = RegisterBuiltinMacro(*this, "__LINE__");
+ Ident__FILE__ = RegisterBuiltinMacro(*this, "__FILE__");
+ Ident__DATE__ = RegisterBuiltinMacro(*this, "__DATE__");
+ Ident__TIME__ = RegisterBuiltinMacro(*this, "__TIME__");
+ Ident__COUNTER__ = RegisterBuiltinMacro(*this, "__COUNTER__");
+ Ident_Pragma = RegisterBuiltinMacro(*this, "_Pragma");
+
+ // GCC Extensions.
+ Ident__BASE_FILE__ = RegisterBuiltinMacro(*this, "__BASE_FILE__");
+ Ident__INCLUDE_LEVEL__ = RegisterBuiltinMacro(*this, "__INCLUDE_LEVEL__");
+ Ident__TIMESTAMP__ = RegisterBuiltinMacro(*this, "__TIMESTAMP__");
+
+ // Clang Extensions.
+ Ident__has_feature = RegisterBuiltinMacro(*this, "__has_feature");
+ Ident__has_extension = RegisterBuiltinMacro(*this, "__has_extension");
+ Ident__has_builtin = RegisterBuiltinMacro(*this, "__has_builtin");
+ Ident__has_attribute = RegisterBuiltinMacro(*this, "__has_attribute");
+ Ident__has_include = RegisterBuiltinMacro(*this, "__has_include");
+ Ident__has_include_next = RegisterBuiltinMacro(*this, "__has_include_next");
+ Ident__has_warning = RegisterBuiltinMacro(*this, "__has_warning");
+
+ // Microsoft Extensions.
+ if (LangOpts.MicrosoftExt)
+ Ident__pragma = RegisterBuiltinMacro(*this, "__pragma");
+ else
+ Ident__pragma = 0;
+}
+
+/// isTrivialSingleTokenExpansion - Return true if MI, which has a single token
+/// in its expansion, currently expands to that token literally.
+static bool isTrivialSingleTokenExpansion(const MacroInfo *MI,
+ const IdentifierInfo *MacroIdent,
+ Preprocessor &PP) {
+ IdentifierInfo *II = MI->getReplacementToken(0).getIdentifierInfo();
+
+ // If the token isn't an identifier, it's always literally expanded.
+ if (II == 0) return true;
+
+ // If the information about this identifier is out of date, update it from
+ // the external source.
+ if (II->isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(*II);
+
+ // If the identifier is a macro, and if that macro is enabled, it may be
+ // expanded so it's not a trivial expansion.
+ if (II->hasMacroDefinition() && PP.getMacroInfo(II)->isEnabled() &&
+ // Fast expanding "#define X X" is ok, because X would be disabled.
+ II != MacroIdent)
+ return false;
+
+ // If this is an object-like macro invocation, it is safe to trivially expand
+ // it.
+ if (MI->isObjectLike()) return true;
+
+ // If this is a function-like macro invocation, it's safe to trivially expand
+ // as long as the identifier is not a macro argument.
+ for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end();
+ I != E; ++I)
+ if (*I == II)
+ return false; // Identifier is a macro argument.
+
+ return true;
+}
+
+
+/// isNextPPTokenLParen - Determine whether the next preprocessor token to be
+/// lexed is a '('. If so, consume the token and return true, if not, this
+/// method should have no observable side-effect on the lexed tokens.
+bool Preprocessor::isNextPPTokenLParen() {
+ // Do some quick tests for rejection cases.
+ unsigned Val;
+ if (CurLexer)
+ Val = CurLexer->isNextPPTokenLParen();
+ else if (CurPTHLexer)
+ Val = CurPTHLexer->isNextPPTokenLParen();
+ else
+ Val = CurTokenLexer->isNextTokenLParen();
+
+ if (Val == 2) {
+ // We have run off the end. If it's a source file we don't
+ // examine enclosing ones (C99 5.1.1.2p4). Otherwise walk up the
+ // macro stack.
+ if (CurPPLexer)
+ return false;
+ for (unsigned i = IncludeMacroStack.size(); i != 0; --i) {
+ IncludeStackInfo &Entry = IncludeMacroStack[i-1];
+ if (Entry.TheLexer)
+ Val = Entry.TheLexer->isNextPPTokenLParen();
+ else if (Entry.ThePTHLexer)
+ Val = Entry.ThePTHLexer->isNextPPTokenLParen();
+ else
+ Val = Entry.TheTokenLexer->isNextTokenLParen();
+
+ if (Val != 2)
+ break;
+
+ // Ran off the end of a source file?
+ if (Entry.ThePPLexer)
+ return false;
+ }
+ }
+
+ // Okay, if we know that the token is a '(', lex it and return. Otherwise we
+ // have found something that isn't a '(' or we found the end of the
+ // translation unit. In either case, return false.
+ return Val == 1;
+}
+
+/// HandleMacroExpandedIdentifier - If an identifier token is read that is to be
+/// expanded as a macro, handle it and return the next token as 'Identifier'.
+bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
+ MacroInfo *MI) {
+ // If this is a macro expansion in the "#if !defined(x)" line for the file,
+ // then the macro could expand to different things in other contexts, we need
+ // to disable the optimization in this case.
+ if (CurPPLexer) CurPPLexer->MIOpt.ExpandedMacro();
+
+ // If this is a builtin macro, like __LINE__ or _Pragma, handle it specially.
+ if (MI->isBuiltinMacro()) {
+ if (Callbacks) Callbacks->MacroExpands(Identifier, MI,
+ Identifier.getLocation());
+ ExpandBuiltinMacro(Identifier);
+ return false;
+ }
+
+ /// Args - If this is a function-like macro expansion, this contains,
+ /// for each macro argument, the list of tokens that were provided to the
+ /// invocation.
+ MacroArgs *Args = 0;
+
+ // Remember where the end of the expansion occurred. For an object-like
+ // macro, this is the identifier. For a function-like macro, this is the ')'.
+ SourceLocation ExpansionEnd = Identifier.getLocation();
+
+ // If this is a function-like macro, read the arguments.
+ if (MI->isFunctionLike()) {
+ // C99 6.10.3p10: If the preprocessing token immediately after the the macro
+ // name isn't a '(', this macro should not be expanded.
+ if (!isNextPPTokenLParen())
+ return true;
+
+ // Remember that we are now parsing the arguments to a macro invocation.
+ // Preprocessor directives used inside macro arguments are not portable, and
+ // this enables the warning.
+ InMacroArgs = true;
+ Args = ReadFunctionLikeMacroArgs(Identifier, MI, ExpansionEnd);
+
+ // Finished parsing args.
+ InMacroArgs = false;
+
+ // If there was an error parsing the arguments, bail out.
+ if (Args == 0) return false;
+
+ ++NumFnMacroExpanded;
+ } else {
+ ++NumMacroExpanded;
+ }
+
+ // Notice that this macro has been used.
+ markMacroAsUsed(MI);
+
+ // Remember where the token is expanded.
+ SourceLocation ExpandLoc = Identifier.getLocation();
+
+ if (Callbacks) Callbacks->MacroExpands(Identifier, MI,
+ SourceRange(ExpandLoc, ExpansionEnd));
+
+ // If we started lexing a macro, enter the macro expansion body.
+
+ // If this macro expands to no tokens, don't bother to push it onto the
+ // expansion stack, only to take it right back off.
+ if (MI->getNumTokens() == 0) {
+ // No need for arg info.
+ if (Args) Args->destroy(*this);
+
+ // Ignore this macro use, just return the next token in the current
+ // buffer.
+ bool HadLeadingSpace = Identifier.hasLeadingSpace();
+ bool IsAtStartOfLine = Identifier.isAtStartOfLine();
+
+ Lex(Identifier);
+
+ // If the identifier isn't on some OTHER line, inherit the leading
+ // whitespace/first-on-a-line property of this token. This handles
+ // stuff like "! XX," -> "! ," and " XX," -> " ,", when XX is
+ // empty.
+ if (!Identifier.isAtStartOfLine()) {
+ if (IsAtStartOfLine) Identifier.setFlag(Token::StartOfLine);
+ if (HadLeadingSpace) Identifier.setFlag(Token::LeadingSpace);
+ }
+ Identifier.setFlag(Token::LeadingEmptyMacro);
+ ++NumFastMacroExpanded;
+ return false;
+
+ } else if (MI->getNumTokens() == 1 &&
+ isTrivialSingleTokenExpansion(MI, Identifier.getIdentifierInfo(),
+ *this)) {
+ // Otherwise, if this macro expands into a single trivially-expanded
+ // token: expand it now. This handles common cases like
+ // "#define VAL 42".
+
+ // No need for arg info.
+ if (Args) Args->destroy(*this);
+
+ // Propagate the isAtStartOfLine/hasLeadingSpace markers of the macro
+ // identifier to the expanded token.
+ bool isAtStartOfLine = Identifier.isAtStartOfLine();
+ bool hasLeadingSpace = Identifier.hasLeadingSpace();
+
+ // Replace the result token.
+ Identifier = MI->getReplacementToken(0);
+
+ // Restore the StartOfLine/LeadingSpace markers.
+ Identifier.setFlagValue(Token::StartOfLine , isAtStartOfLine);
+ Identifier.setFlagValue(Token::LeadingSpace, hasLeadingSpace);
+
+ // Update the tokens location to include both its expansion and physical
+ // locations.
+ SourceLocation Loc =
+ SourceMgr.createExpansionLoc(Identifier.getLocation(), ExpandLoc,
+ ExpansionEnd,Identifier.getLength());
+ Identifier.setLocation(Loc);
+
+ // If this is a disabled macro or #define X X, we must mark the result as
+ // unexpandable.
+ if (IdentifierInfo *NewII = Identifier.getIdentifierInfo()) {
+ if (MacroInfo *NewMI = getMacroInfo(NewII))
+ if (!NewMI->isEnabled() || NewMI == MI) {
+ Identifier.setFlag(Token::DisableExpand);
+ Diag(Identifier, diag::pp_disabled_macro_expansion);
+ }
+ }
+
+ // Since this is not an identifier token, it can't be macro expanded, so
+ // we're done.
+ ++NumFastMacroExpanded;
+ return false;
+ }
+
+ // Start expanding the macro.
+ EnterMacro(Identifier, ExpansionEnd, Args);
+
+ // Now that the macro is at the top of the include stack, ask the
+ // preprocessor to read the next token from it.
+ Lex(Identifier);
+ return false;
+}
+
+/// ReadFunctionLikeMacroArgs - After reading "MACRO" and knowing that the next
+/// token is the '(' of the macro, this method is invoked to read all of the
+/// actual arguments specified for the macro invocation. This returns null on
+/// error.
+MacroArgs *Preprocessor::ReadFunctionLikeMacroArgs(Token &MacroName,
+ MacroInfo *MI,
+ SourceLocation &MacroEnd) {
+ // The number of fixed arguments to parse.
+ unsigned NumFixedArgsLeft = MI->getNumArgs();
+ bool isVariadic = MI->isVariadic();
+
+ // Outer loop, while there are more arguments, keep reading them.
+ Token Tok;
+
+ // Read arguments as unexpanded tokens. This avoids issues, e.g., where
+ // an argument value in a macro could expand to ',' or '(' or ')'.
+ LexUnexpandedToken(Tok);
+ assert(Tok.is(tok::l_paren) && "Error computing l-paren-ness?");
+
+ // ArgTokens - Build up a list of tokens that make up each argument. Each
+ // argument is separated by an EOF token. Use a SmallVector so we can avoid
+ // heap allocations in the common case.
+ SmallVector<Token, 64> ArgTokens;
+
+ unsigned NumActuals = 0;
+ while (Tok.isNot(tok::r_paren)) {
+ assert((Tok.is(tok::l_paren) || Tok.is(tok::comma)) &&
+ "only expect argument separators here");
+
+ unsigned ArgTokenStart = ArgTokens.size();
+ SourceLocation ArgStartLoc = Tok.getLocation();
+
+ // C99 6.10.3p11: Keep track of the number of l_parens we have seen. Note
+ // that we already consumed the first one.
+ unsigned NumParens = 0;
+
+ while (1) {
+ // Read arguments as unexpanded tokens. This avoids issues, e.g., where
+ // an argument value in a macro could expand to ',' or '(' or ')'.
+ LexUnexpandedToken(Tok);
+
+ if (Tok.is(tok::eof) || Tok.is(tok::eod)) { // "#if f(<eof>" & "#if f(\n"
+ Diag(MacroName, diag::err_unterm_macro_invoc);
+ // Do not lose the EOF/EOD. Return it to the client.
+ MacroName = Tok;
+ return 0;
+ } else if (Tok.is(tok::r_paren)) {
+ // If we found the ) token, the macro arg list is done.
+ if (NumParens-- == 0) {
+ MacroEnd = Tok.getLocation();
+ break;
+ }
+ } else if (Tok.is(tok::l_paren)) {
+ ++NumParens;
+ } else if (Tok.is(tok::comma) && NumParens == 0) {
+ // Comma ends this argument if there are more fixed arguments expected.
+ // However, if this is a variadic macro, and this is part of the
+ // variadic part, then the comma is just an argument token.
+ if (!isVariadic) break;
+ if (NumFixedArgsLeft > 1)
+ break;
+ } else if (Tok.is(tok::comment) && !KeepMacroComments) {
+ // If this is a comment token in the argument list and we're just in
+ // -C mode (not -CC mode), discard the comment.
+ continue;
+ } else if (Tok.getIdentifierInfo() != 0) {
+ // Reading macro arguments can cause macros that we are currently
+ // expanding from to be popped off the expansion stack. Doing so causes
+ // them to be reenabled for expansion. Here we record whether any
+ // identifiers we lex as macro arguments correspond to disabled macros.
+ // If so, we mark the token as noexpand. This is a subtle aspect of
+ // C99 6.10.3.4p2.
+ if (MacroInfo *MI = getMacroInfo(Tok.getIdentifierInfo()))
+ if (!MI->isEnabled())
+ Tok.setFlag(Token::DisableExpand);
+ } else if (Tok.is(tok::code_completion)) {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteMacroArgument(MacroName.getIdentifierInfo(),
+ MI, NumActuals);
+ // Don't mark that we reached the code-completion point because the
+ // parser is going to handle the token and there will be another
+ // code-completion callback.
+ }
+
+ ArgTokens.push_back(Tok);
+ }
+
+ // If this was an empty argument list foo(), don't add this as an empty
+ // argument.
+ if (ArgTokens.empty() && Tok.getKind() == tok::r_paren)
+ break;
+
+ // If this is not a variadic macro, and too many args were specified, emit
+ // an error.
+ if (!isVariadic && NumFixedArgsLeft == 0) {
+ if (ArgTokens.size() != ArgTokenStart)
+ ArgStartLoc = ArgTokens[ArgTokenStart].getLocation();
+
+ // Emit the diagnostic at the macro name in case there is a missing ).
+ // Emitting it at the , could be far away from the macro name.
+ Diag(ArgStartLoc, diag::err_too_many_args_in_macro_invoc);
+ return 0;
+ }
+
+ // Empty arguments are standard in C99 and C++0x, and are supported as an extension in
+ // other modes.
+ if (ArgTokens.size() == ArgTokenStart && !LangOpts.C99)
+ Diag(Tok, LangOpts.CPlusPlus0x ?
+ diag::warn_cxx98_compat_empty_fnmacro_arg :
+ diag::ext_empty_fnmacro_arg);
+
+ // Add a marker EOF token to the end of the token list for this argument.
+ Token EOFTok;
+ EOFTok.startToken();
+ EOFTok.setKind(tok::eof);
+ EOFTok.setLocation(Tok.getLocation());
+ EOFTok.setLength(0);
+ ArgTokens.push_back(EOFTok);
+ ++NumActuals;
+ assert(NumFixedArgsLeft != 0 && "Too many arguments parsed");
+ --NumFixedArgsLeft;
+ }
+
+ // Okay, we either found the r_paren. Check to see if we parsed too few
+ // arguments.
+ unsigned MinArgsExpected = MI->getNumArgs();
+
+ // See MacroArgs instance var for description of this.
+ bool isVarargsElided = false;
+
+ if (NumActuals < MinArgsExpected) {
+ // There are several cases where too few arguments is ok, handle them now.
+ if (NumActuals == 0 && MinArgsExpected == 1) {
+ // #define A(X) or #define A(...) ---> A()
+
+ // If there is exactly one argument, and that argument is missing,
+ // then we have an empty "()" argument empty list. This is fine, even if
+ // the macro expects one argument (the argument is just empty).
+ isVarargsElided = MI->isVariadic();
+ } else if (MI->isVariadic() &&
+ (NumActuals+1 == MinArgsExpected || // A(x, ...) -> A(X)
+ (NumActuals == 0 && MinArgsExpected == 2))) {// A(x,...) -> A()
+ // Varargs where the named vararg parameter is missing: ok as extension.
+ // #define A(x, ...)
+ // A("blah")
+ Diag(Tok, diag::ext_missing_varargs_arg);
+
+ // Remember this occurred, allowing us to elide the comma when used for
+ // cases like:
+ // #define A(x, foo...) blah(a, ## foo)
+ // #define B(x, ...) blah(a, ## __VA_ARGS__)
+ // #define C(...) blah(a, ## __VA_ARGS__)
+ // A(x) B(x) C()
+ isVarargsElided = true;
+ } else {
+ // Otherwise, emit the error.
+ Diag(Tok, diag::err_too_few_args_in_macro_invoc);
+ return 0;
+ }
+
+ // Add a marker EOF token to the end of the token list for this argument.
+ SourceLocation EndLoc = Tok.getLocation();
+ Tok.startToken();
+ Tok.setKind(tok::eof);
+ Tok.setLocation(EndLoc);
+ Tok.setLength(0);
+ ArgTokens.push_back(Tok);
+
+ // If we expect two arguments, add both as empty.
+ if (NumActuals == 0 && MinArgsExpected == 2)
+ ArgTokens.push_back(Tok);
+
+ } else if (NumActuals > MinArgsExpected && !MI->isVariadic()) {
+ // Emit the diagnostic at the macro name in case there is a missing ).
+ // Emitting it at the , could be far away from the macro name.
+ Diag(MacroName, diag::err_too_many_args_in_macro_invoc);
+ return 0;
+ }
+
+ return MacroArgs::create(MI, ArgTokens, isVarargsElided, *this);
+}
+
+/// \brief Keeps macro expanded tokens for TokenLexers.
+//
+/// Works like a stack; a TokenLexer adds the macro expanded tokens that is
+/// going to lex in the cache and when it finishes the tokens are removed
+/// from the end of the cache.
+Token *Preprocessor::cacheMacroExpandedTokens(TokenLexer *tokLexer,
+ ArrayRef<Token> tokens) {
+ assert(tokLexer);
+ if (tokens.empty())
+ return 0;
+
+ size_t newIndex = MacroExpandedTokens.size();
+ bool cacheNeedsToGrow = tokens.size() >
+ MacroExpandedTokens.capacity()-MacroExpandedTokens.size();
+ MacroExpandedTokens.append(tokens.begin(), tokens.end());
+
+ if (cacheNeedsToGrow) {
+ // Go through all the TokenLexers whose 'Tokens' pointer points in the
+ // buffer and update the pointers to the (potential) new buffer array.
+ for (unsigned i = 0, e = MacroExpandingLexersStack.size(); i != e; ++i) {
+ TokenLexer *prevLexer;
+ size_t tokIndex;
+ llvm::tie(prevLexer, tokIndex) = MacroExpandingLexersStack[i];
+ prevLexer->Tokens = MacroExpandedTokens.data() + tokIndex;
+ }
+ }
+
+ MacroExpandingLexersStack.push_back(std::make_pair(tokLexer, newIndex));
+ return MacroExpandedTokens.data() + newIndex;
+}
+
+void Preprocessor::removeCachedMacroExpandedTokensOfLastLexer() {
+ assert(!MacroExpandingLexersStack.empty());
+ size_t tokIndex = MacroExpandingLexersStack.back().second;
+ assert(tokIndex < MacroExpandedTokens.size());
+ // Pop the cached macro expanded tokens from the end.
+ MacroExpandedTokens.resize(tokIndex);
+ MacroExpandingLexersStack.pop_back();
+}
+
+/// ComputeDATE_TIME - Compute the current time, enter it into the specified
+/// scratch buffer, then return DATELoc/TIMELoc locations with the position of
+/// the identifier tokens inserted.
+static void ComputeDATE_TIME(SourceLocation &DATELoc, SourceLocation &TIMELoc,
+ Preprocessor &PP) {
+ time_t TT = time(0);
+ struct tm *TM = localtime(&TT);
+
+ static const char * const Months[] = {
+ "Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"
+ };
+
+ char TmpBuffer[32];
+#ifdef LLVM_ON_WIN32
+ sprintf(TmpBuffer, "\"%s %2d %4d\"", Months[TM->tm_mon], TM->tm_mday,
+ TM->tm_year+1900);
+#else
+ snprintf(TmpBuffer, sizeof(TmpBuffer), "\"%s %2d %4d\"", Months[TM->tm_mon], TM->tm_mday,
+ TM->tm_year+1900);
+#endif
+
+ Token TmpTok;
+ TmpTok.startToken();
+ PP.CreateString(TmpBuffer, strlen(TmpBuffer), TmpTok);
+ DATELoc = TmpTok.getLocation();
+
+#ifdef LLVM_ON_WIN32
+ sprintf(TmpBuffer, "\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min, TM->tm_sec);
+#else
+ snprintf(TmpBuffer, sizeof(TmpBuffer), "\"%02d:%02d:%02d\"", TM->tm_hour, TM->tm_min, TM->tm_sec);
+#endif
+ PP.CreateString(TmpBuffer, strlen(TmpBuffer), TmpTok);
+ TIMELoc = TmpTok.getLocation();
+}
+
+
+/// HasFeature - Return true if we recognize and implement the feature
+/// specified by the identifier as a standard language feature.
+static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
+ const LangOptions &LangOpts = PP.getLangOpts();
+ StringRef Feature = II->getName();
+
+ // Normalize the feature name, __foo__ becomes foo.
+ if (Feature.startswith("__") && Feature.endswith("__") && Feature.size() >= 4)
+ Feature = Feature.substr(2, Feature.size() - 4);
+
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("address_sanitizer", LangOpts.AddressSanitizer)
+ .Case("attribute_analyzer_noreturn", true)
+ .Case("attribute_availability", true)
+ .Case("attribute_cf_returns_not_retained", true)
+ .Case("attribute_cf_returns_retained", true)
+ .Case("attribute_deprecated_with_message", true)
+ .Case("attribute_ext_vector_type", true)
+ .Case("attribute_ns_returns_not_retained", true)
+ .Case("attribute_ns_returns_retained", true)
+ .Case("attribute_ns_consumes_self", true)
+ .Case("attribute_ns_consumed", true)
+ .Case("attribute_cf_consumed", true)
+ .Case("attribute_objc_ivar_unused", true)
+ .Case("attribute_objc_method_family", true)
+ .Case("attribute_overloadable", true)
+ .Case("attribute_unavailable_with_message", true)
+ .Case("blocks", LangOpts.Blocks)
+ .Case("cxx_exceptions", LangOpts.Exceptions)
+ .Case("cxx_rtti", LangOpts.RTTI)
+ .Case("enumerator_attributes", true)
+ // Objective-C features
+ .Case("objc_arr", LangOpts.ObjCAutoRefCount) // FIXME: REMOVE?
+ .Case("objc_arc", LangOpts.ObjCAutoRefCount)
+ .Case("objc_arc_weak", LangOpts.ObjCAutoRefCount &&
+ LangOpts.ObjCRuntimeHasWeak)
+ .Case("objc_default_synthesize_properties", LangOpts.ObjC2)
+ .Case("objc_fixed_enum", LangOpts.ObjC2)
+ .Case("objc_instancetype", LangOpts.ObjC2)
+ .Case("objc_modules", LangOpts.ObjC2 && LangOpts.Modules)
+ .Case("objc_nonfragile_abi", LangOpts.ObjCNonFragileABI)
+ .Case("objc_weak_class", LangOpts.ObjCNonFragileABI)
+ .Case("ownership_holds", true)
+ .Case("ownership_returns", true)
+ .Case("ownership_takes", true)
+ .Case("objc_bool", true)
+ .Case("objc_subscripting", LangOpts.ObjCNonFragileABI)
+ .Case("objc_array_literals", LangOpts.ObjC2)
+ .Case("objc_dictionary_literals", LangOpts.ObjC2)
+ .Case("arc_cf_code_audited", true)
+ // C11 features
+ .Case("c_alignas", LangOpts.C11)
+ .Case("c_atomic", LangOpts.C11)
+ .Case("c_generic_selections", LangOpts.C11)
+ .Case("c_static_assert", LangOpts.C11)
+ // C++11 features
+ .Case("cxx_access_control_sfinae", LangOpts.CPlusPlus0x)
+ .Case("cxx_alias_templates", LangOpts.CPlusPlus0x)
+ .Case("cxx_alignas", LangOpts.CPlusPlus0x)
+ .Case("cxx_atomic", LangOpts.CPlusPlus0x)
+ .Case("cxx_attributes", LangOpts.CPlusPlus0x)
+ .Case("cxx_auto_type", LangOpts.CPlusPlus0x)
+ .Case("cxx_constexpr", LangOpts.CPlusPlus0x)
+ .Case("cxx_decltype", LangOpts.CPlusPlus0x)
+ .Case("cxx_decltype_incomplete_return_types", LangOpts.CPlusPlus0x)
+ .Case("cxx_default_function_template_args", LangOpts.CPlusPlus0x)
+ .Case("cxx_defaulted_functions", LangOpts.CPlusPlus0x)
+ .Case("cxx_delegating_constructors", LangOpts.CPlusPlus0x)
+ .Case("cxx_deleted_functions", LangOpts.CPlusPlus0x)
+ .Case("cxx_explicit_conversions", LangOpts.CPlusPlus0x)
+ .Case("cxx_generalized_initializers", LangOpts.CPlusPlus0x)
+ .Case("cxx_implicit_moves", LangOpts.CPlusPlus0x)
+ //.Case("cxx_inheriting_constructors", false)
+ .Case("cxx_inline_namespaces", LangOpts.CPlusPlus0x)
+ .Case("cxx_lambdas", LangOpts.CPlusPlus0x)
+ .Case("cxx_local_type_template_args", LangOpts.CPlusPlus0x)
+ .Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus0x)
+ .Case("cxx_noexcept", LangOpts.CPlusPlus0x)
+ .Case("cxx_nullptr", LangOpts.CPlusPlus0x)
+ .Case("cxx_override_control", LangOpts.CPlusPlus0x)
+ .Case("cxx_range_for", LangOpts.CPlusPlus0x)
+ .Case("cxx_raw_string_literals", LangOpts.CPlusPlus0x)
+ .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus0x)
+ .Case("cxx_rvalue_references", LangOpts.CPlusPlus0x)
+ .Case("cxx_strong_enums", LangOpts.CPlusPlus0x)
+ .Case("cxx_static_assert", LangOpts.CPlusPlus0x)
+ .Case("cxx_trailing_return", LangOpts.CPlusPlus0x)
+ .Case("cxx_unicode_literals", LangOpts.CPlusPlus0x)
+ .Case("cxx_unrestricted_unions", LangOpts.CPlusPlus0x)
+ .Case("cxx_user_literals", LangOpts.CPlusPlus0x)
+ .Case("cxx_variadic_templates", LangOpts.CPlusPlus0x)
+ // Type traits
+ .Case("has_nothrow_assign", LangOpts.CPlusPlus)
+ .Case("has_nothrow_copy", LangOpts.CPlusPlus)
+ .Case("has_nothrow_constructor", LangOpts.CPlusPlus)
+ .Case("has_trivial_assign", LangOpts.CPlusPlus)
+ .Case("has_trivial_copy", LangOpts.CPlusPlus)
+ .Case("has_trivial_constructor", LangOpts.CPlusPlus)
+ .Case("has_trivial_destructor", LangOpts.CPlusPlus)
+ .Case("has_virtual_destructor", LangOpts.CPlusPlus)
+ .Case("is_abstract", LangOpts.CPlusPlus)
+ .Case("is_base_of", LangOpts.CPlusPlus)
+ .Case("is_class", LangOpts.CPlusPlus)
+ .Case("is_convertible_to", LangOpts.CPlusPlus)
+ // __is_empty is available only if the horrible
+ // "struct __is_empty" parsing hack hasn't been needed in this
+ // translation unit. If it has, __is_empty reverts to a normal
+ // identifier and __has_feature(is_empty) evaluates false.
+ .Case("is_empty",
+ LangOpts.CPlusPlus &&
+ PP.getIdentifierInfo("__is_empty")->getTokenID()
+ != tok::identifier)
+ .Case("is_enum", LangOpts.CPlusPlus)
+ .Case("is_final", LangOpts.CPlusPlus)
+ .Case("is_literal", LangOpts.CPlusPlus)
+ .Case("is_standard_layout", LangOpts.CPlusPlus)
+ // __is_pod is available only if the horrible
+ // "struct __is_pod" parsing hack hasn't been needed in this
+ // translation unit. If it has, __is_pod reverts to a normal
+ // identifier and __has_feature(is_pod) evaluates false.
+ .Case("is_pod",
+ LangOpts.CPlusPlus &&
+ PP.getIdentifierInfo("__is_pod")->getTokenID()
+ != tok::identifier)
+ .Case("is_polymorphic", LangOpts.CPlusPlus)
+ .Case("is_trivial", LangOpts.CPlusPlus)
+ .Case("is_trivially_assignable", LangOpts.CPlusPlus)
+ .Case("is_trivially_constructible", LangOpts.CPlusPlus)
+ .Case("is_trivially_copyable", LangOpts.CPlusPlus)
+ .Case("is_union", LangOpts.CPlusPlus)
+ .Case("modules", LangOpts.Modules)
+ .Case("tls", PP.getTargetInfo().isTLSSupported())
+ .Case("underlying_type", LangOpts.CPlusPlus)
+ .Default(false);
+}
+
+/// HasExtension - Return true if we recognize and implement the feature
+/// specified by the identifier, either as an extension or a standard language
+/// feature.
+static bool HasExtension(const Preprocessor &PP, const IdentifierInfo *II) {
+ if (HasFeature(PP, II))
+ return true;
+
+ // If the use of an extension results in an error diagnostic, extensions are
+ // effectively unavailable, so just return false here.
+ if (PP.getDiagnostics().getExtensionHandlingBehavior() ==
+ DiagnosticsEngine::Ext_Error)
+ return false;
+
+ const LangOptions &LangOpts = PP.getLangOpts();
+ StringRef Extension = II->getName();
+
+ // Normalize the extension name, __foo__ becomes foo.
+ if (Extension.startswith("__") && Extension.endswith("__") &&
+ Extension.size() >= 4)
+ Extension = Extension.substr(2, Extension.size() - 4);
+
+ // Because we inherit the feature list from HasFeature, this string switch
+ // must be less restrictive than HasFeature's.
+ return llvm::StringSwitch<bool>(Extension)
+ // C11 features supported by other languages as extensions.
+ .Case("c_alignas", true)
+ .Case("c_atomic", true)
+ .Case("c_generic_selections", true)
+ .Case("c_static_assert", true)
+ // C++0x features supported by other languages as extensions.
+ .Case("cxx_atomic", LangOpts.CPlusPlus)
+ .Case("cxx_deleted_functions", LangOpts.CPlusPlus)
+ .Case("cxx_explicit_conversions", LangOpts.CPlusPlus)
+ .Case("cxx_inline_namespaces", LangOpts.CPlusPlus)
+ .Case("cxx_local_type_template_args", LangOpts.CPlusPlus)
+ .Case("cxx_nonstatic_member_init", LangOpts.CPlusPlus)
+ .Case("cxx_override_control", LangOpts.CPlusPlus)
+ .Case("cxx_range_for", LangOpts.CPlusPlus)
+ .Case("cxx_reference_qualified_functions", LangOpts.CPlusPlus)
+ .Case("cxx_rvalue_references", LangOpts.CPlusPlus)
+ .Default(false);
+}
+
+/// HasAttribute - Return true if we recognize and implement the attribute
+/// specified by the given identifier.
+static bool HasAttribute(const IdentifierInfo *II) {
+ StringRef Name = II->getName();
+ // Normalize the attribute name, __foo__ becomes foo.
+ if (Name.startswith("__") && Name.endswith("__") && Name.size() >= 4)
+ Name = Name.substr(2, Name.size() - 4);
+
+ return llvm::StringSwitch<bool>(Name)
+#include "clang/Lex/AttrSpellings.inc"
+ .Default(false);
+}
+
+/// EvaluateHasIncludeCommon - Process a '__has_include("path")'
+/// or '__has_include_next("path")' expression.
+/// Returns true if successful.
+static bool EvaluateHasIncludeCommon(Token &Tok,
+ IdentifierInfo *II, Preprocessor &PP,
+ const DirectoryLookup *LookupFrom) {
+ SourceLocation LParenLoc;
+
+ // Get '('.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a '('.
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pp_missing_lparen) << II->getName();
+ return false;
+ }
+
+ // Save '(' location for possible missing ')' message.
+ LParenLoc = Tok.getLocation();
+
+ // Get the file name.
+ PP.getCurrentLexer()->LexIncludeFilename(Tok);
+
+ // Reserve a buffer to get the spelling.
+ SmallString<128> FilenameBuffer;
+ StringRef Filename;
+ SourceLocation EndLoc;
+
+ switch (Tok.getKind()) {
+ case tok::eod:
+ // If the token kind is EOD, the error has already been diagnosed.
+ return false;
+
+ case tok::angle_string_literal:
+ case tok::string_literal: {
+ bool Invalid = false;
+ Filename = PP.getSpelling(Tok, FilenameBuffer, &Invalid);
+ if (Invalid)
+ return false;
+ break;
+ }
+
+ case tok::less:
+ // This could be a <foo/bar.h> file coming from a macro expansion. In this
+ // case, glue the tokens together into FilenameBuffer and interpret those.
+ FilenameBuffer.push_back('<');
+ if (PP.ConcatenateIncludeName(FilenameBuffer, EndLoc))
+ return false; // Found <eod> but no ">"? Diagnostic already emitted.
+ Filename = FilenameBuffer.str();
+ break;
+ default:
+ PP.Diag(Tok.getLocation(), diag::err_pp_expects_filename);
+ return false;
+ }
+
+ // Get ')'.
+ PP.LexNonComment(Tok);
+
+ // Ensure we have a trailing ).
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::err_pp_missing_rparen) << II->getName();
+ PP.Diag(LParenLoc, diag::note_matching) << "(";
+ return false;
+ }
+
+ bool isAngled = PP.GetIncludeFilenameSpelling(Tok.getLocation(), Filename);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ if (Filename.empty())
+ return false;
+
+ // Search include directories.
+ const DirectoryLookup *CurDir;
+ const FileEntry *File =
+ PP.LookupFile(Filename, isAngled, LookupFrom, CurDir, NULL, NULL, NULL);
+
+ // Get the result value. A result of true means the file exists.
+ return File != 0;
+}
+
+/// EvaluateHasInclude - Process a '__has_include("path")' expression.
+/// Returns true if successful.
+static bool EvaluateHasInclude(Token &Tok, IdentifierInfo *II,
+ Preprocessor &PP) {
+ return EvaluateHasIncludeCommon(Tok, II, PP, NULL);
+}
+
+/// EvaluateHasIncludeNext - Process '__has_include_next("path")' expression.
+/// Returns true if successful.
+static bool EvaluateHasIncludeNext(Token &Tok,
+ IdentifierInfo *II, Preprocessor &PP) {
+ // __has_include_next is like __has_include, except that we start
+ // searching after the current found directory. If we can't do this,
+ // issue a diagnostic.
+ const DirectoryLookup *Lookup = PP.GetCurDirLookup();
+ if (PP.isInPrimaryFile()) {
+ Lookup = 0;
+ PP.Diag(Tok, diag::pp_include_next_in_primary);
+ } else if (Lookup == 0) {
+ PP.Diag(Tok, diag::pp_include_next_absolute_path);
+ } else {
+ // Start looking up in the next directory.
+ ++Lookup;
+ }
+
+ return EvaluateHasIncludeCommon(Tok, II, PP, Lookup);
+}
+
+/// ExpandBuiltinMacro - If an identifier token is read that is to be expanded
+/// as a builtin macro, handle it and return the next token as 'Tok'.
+void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
+ // Figure out which token this is.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ assert(II && "Can't be a macro without id info!");
+
+ // If this is an _Pragma or Microsoft __pragma directive, expand it,
+ // invoke the pragma handler, then lex the token after it.
+ if (II == Ident_Pragma)
+ return Handle_Pragma(Tok);
+ else if (II == Ident__pragma) // in non-MS mode this is null
+ return HandleMicrosoft__pragma(Tok);
+
+ ++NumBuiltinMacroExpanded;
+
+ SmallString<128> TmpBuffer;
+ llvm::raw_svector_ostream OS(TmpBuffer);
+
+ // Set up the return result.
+ Tok.setIdentifierInfo(0);
+ Tok.clearFlag(Token::NeedsCleaning);
+
+ if (II == Ident__LINE__) {
+ // C99 6.10.8: "__LINE__: The presumed line number (within the current
+ // source file) of the current source line (an integer constant)". This can
+ // be affected by #line.
+ SourceLocation Loc = Tok.getLocation();
+
+ // Advance to the location of the first _, this might not be the first byte
+ // of the token if it starts with an escaped newline.
+ Loc = AdvanceToTokenCharacter(Loc, 0);
+
+ // One wrinkle here is that GCC expands __LINE__ to location of the *end* of
+ // a macro expansion. This doesn't matter for object-like macros, but
+ // can matter for a function-like macro that expands to contain __LINE__.
+ // Skip down through expansion points until we find a file loc for the
+ // end of the expansion history.
+ Loc = SourceMgr.getExpansionRange(Loc).second;
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Loc);
+
+ // __LINE__ expands to a simple numeric value.
+ OS << (PLoc.isValid()? PLoc.getLine() : 1);
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__FILE__ || II == Ident__BASE_FILE__) {
+ // C99 6.10.8: "__FILE__: The presumed name of the current source file (a
+ // character string literal)". This can be affected by #line.
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+
+ // __BASE_FILE__ is a GNU extension that returns the top of the presumed
+ // #include stack instead of the current file.
+ if (II == Ident__BASE_FILE__ && PLoc.isValid()) {
+ SourceLocation NextLoc = PLoc.getIncludeLoc();
+ while (NextLoc.isValid()) {
+ PLoc = SourceMgr.getPresumedLoc(NextLoc);
+ if (PLoc.isInvalid())
+ break;
+
+ NextLoc = PLoc.getIncludeLoc();
+ }
+ }
+
+ // Escape this filename. Turn '\' -> '\\' '"' -> '\"'
+ SmallString<128> FN;
+ if (PLoc.isValid()) {
+ FN += PLoc.getFilename();
+ Lexer::Stringify(FN);
+ OS << '"' << FN.str() << '"';
+ }
+ Tok.setKind(tok::string_literal);
+ } else if (II == Ident__DATE__) {
+ if (!DATELoc.isValid())
+ ComputeDATE_TIME(DATELoc, TIMELoc, *this);
+ Tok.setKind(tok::string_literal);
+ Tok.setLength(strlen("\"Mmm dd yyyy\""));
+ Tok.setLocation(SourceMgr.createExpansionLoc(DATELoc, Tok.getLocation(),
+ Tok.getLocation(),
+ Tok.getLength()));
+ return;
+ } else if (II == Ident__TIME__) {
+ if (!TIMELoc.isValid())
+ ComputeDATE_TIME(DATELoc, TIMELoc, *this);
+ Tok.setKind(tok::string_literal);
+ Tok.setLength(strlen("\"hh:mm:ss\""));
+ Tok.setLocation(SourceMgr.createExpansionLoc(TIMELoc, Tok.getLocation(),
+ Tok.getLocation(),
+ Tok.getLength()));
+ return;
+ } else if (II == Ident__INCLUDE_LEVEL__) {
+ // Compute the presumed include depth of this token. This can be affected
+ // by GNU line markers.
+ unsigned Depth = 0;
+
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (PLoc.isValid()) {
+ PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
+ for (; PLoc.isValid(); ++Depth)
+ PLoc = SourceMgr.getPresumedLoc(PLoc.getIncludeLoc());
+ }
+
+ // __INCLUDE_LEVEL__ expands to a simple numeric value.
+ OS << Depth;
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__TIMESTAMP__) {
+ // MSVC, ICC, GCC, VisualAge C++ extension. The generated string should be
+ // of the form "Ddd Mmm dd hh::mm::ss yyyy", which is returned by asctime.
+
+ // Get the file that we are lexing out of. If we're currently lexing from
+ // a macro, dig into the include stack.
+ const FileEntry *CurFile = 0;
+ PreprocessorLexer *TheLexer = getCurrentFileLexer();
+
+ if (TheLexer)
+ CurFile = SourceMgr.getFileEntryForID(TheLexer->getFileID());
+
+ const char *Result;
+ if (CurFile) {
+ time_t TT = CurFile->getModificationTime();
+ struct tm *TM = localtime(&TT);
+ Result = asctime(TM);
+ } else {
+ Result = "??? ??? ?? ??:??:?? ????\n";
+ }
+ // Surround the string with " and strip the trailing newline.
+ OS << '"' << StringRef(Result, strlen(Result)-1) << '"';
+ Tok.setKind(tok::string_literal);
+ } else if (II == Ident__COUNTER__) {
+ // __COUNTER__ expands to a simple numeric value.
+ OS << CounterValue++;
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__has_feature ||
+ II == Ident__has_extension ||
+ II == Ident__has_builtin ||
+ II == Ident__has_attribute) {
+ // The argument to these builtins should be a parenthesized identifier.
+ SourceLocation StartLoc = Tok.getLocation();
+
+ bool IsValid = false;
+ IdentifierInfo *FeatureII = 0;
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.is(tok::l_paren)) {
+ // Read the identifier
+ Lex(Tok);
+ if (Tok.is(tok::identifier)) {
+ FeatureII = Tok.getIdentifierInfo();
+
+ // Read the ')'.
+ Lex(Tok);
+ if (Tok.is(tok::r_paren))
+ IsValid = true;
+ }
+ }
+
+ bool Value = false;
+ if (!IsValid)
+ Diag(StartLoc, diag::err_feature_check_malformed);
+ else if (II == Ident__has_builtin) {
+ // Check for a builtin is trivial.
+ Value = FeatureII->getBuiltinID() != 0;
+ } else if (II == Ident__has_attribute)
+ Value = HasAttribute(FeatureII);
+ else if (II == Ident__has_extension)
+ Value = HasExtension(*this, FeatureII);
+ else {
+ assert(II == Ident__has_feature && "Must be feature check");
+ Value = HasFeature(*this, FeatureII);
+ }
+
+ OS << (int)Value;
+ if (IsValid)
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__has_include ||
+ II == Ident__has_include_next) {
+ // The argument to these two builtins should be a parenthesized
+ // file name string literal using angle brackets (<>) or
+ // double-quotes ("").
+ bool Value;
+ if (II == Ident__has_include)
+ Value = EvaluateHasInclude(Tok, II, *this);
+ else
+ Value = EvaluateHasIncludeNext(Tok, II, *this);
+ OS << (int)Value;
+ Tok.setKind(tok::numeric_constant);
+ } else if (II == Ident__has_warning) {
+ // The argument should be a parenthesized string literal.
+ // The argument to these builtins should be a parenthesized identifier.
+ SourceLocation StartLoc = Tok.getLocation();
+ bool IsValid = false;
+ bool Value = false;
+ // Read the '('.
+ Lex(Tok);
+ do {
+ if (Tok.is(tok::l_paren)) {
+ // Read the string.
+ Lex(Tok);
+
+ // We need at least one string literal.
+ if (!Tok.is(tok::string_literal)) {
+ StartLoc = Tok.getLocation();
+ IsValid = false;
+ // Eat tokens until ')'.
+ do Lex(Tok); while (!(Tok.is(tok::r_paren) || Tok.is(tok::eod)));
+ break;
+ }
+
+ // String concatenation allows multiple strings, which can even come
+ // from macro expansion.
+ SmallVector<Token, 4> StrToks;
+ while (Tok.is(tok::string_literal)) {
+ // Complain about, and drop, any ud-suffix.
+ if (Tok.hasUDSuffix())
+ Diag(Tok, diag::err_invalid_string_udl);
+ StrToks.push_back(Tok);
+ LexUnexpandedToken(Tok);
+ }
+
+ // Is the end a ')'?
+ if (!(IsValid = Tok.is(tok::r_paren)))
+ break;
+
+ // Concatenate and parse the strings.
+ StringLiteralParser Literal(&StrToks[0], StrToks.size(), *this);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ break;
+ if (Literal.Pascal) {
+ Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ break;
+ }
+
+ StringRef WarningName(Literal.GetString());
+
+ if (WarningName.size() < 3 || WarningName[0] != '-' ||
+ WarningName[1] != 'W') {
+ Diag(StrToks[0].getLocation(), diag::warn_has_warning_invalid_option);
+ break;
+ }
+
+ // Finally, check if the warning flags maps to a diagnostic group.
+ // We construct a SmallVector here to talk to getDiagnosticIDs().
+ // Although we don't use the result, this isn't a hot path, and not
+ // worth special casing.
+ llvm::SmallVector<diag::kind, 10> Diags;
+ Value = !getDiagnostics().getDiagnosticIDs()->
+ getDiagnosticsInGroup(WarningName.substr(2), Diags);
+ }
+ } while (false);
+
+ if (!IsValid)
+ Diag(StartLoc, diag::err_warning_check_malformed);
+
+ OS << (int)Value;
+ Tok.setKind(tok::numeric_constant);
+ } else {
+ llvm_unreachable("Unknown identifier!");
+ }
+ CreateString(OS.str().data(), OS.str().size(), Tok,
+ Tok.getLocation(), Tok.getLocation());
+}
+
+void Preprocessor::markMacroAsUsed(MacroInfo *MI) {
+ // If the 'used' status changed, and the macro requires 'unused' warning,
+ // remove its SourceLocation from the warn-for-unused-macro locations.
+ if (MI->isWarnIfUnused() && !MI->isUsed())
+ WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
+ MI->setIsUsed(true);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
new file mode 100644
index 0000000..f104f96
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PTHLexer.cpp
@@ -0,0 +1,710 @@
+//===--- PTHLexer.cpp - Lex from a token stream ---------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PTHLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/TokenKinds.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/PTHLexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PTHManager.h"
+#include "clang/Lex/Token.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/system_error.h"
+using namespace clang;
+using namespace clang::io;
+
+#define DISK_TOKEN_SIZE (1+1+2+4+4)
+
+//===----------------------------------------------------------------------===//
+// PTHLexer methods.
+//===----------------------------------------------------------------------===//
+
+PTHLexer::PTHLexer(Preprocessor &PP, FileID FID, const unsigned char *D,
+ const unsigned char *ppcond, PTHManager &PM)
+ : PreprocessorLexer(&PP, FID), TokBuf(D), CurPtr(D), LastHashTokPtr(0),
+ PPCond(ppcond), CurPPCondPtr(ppcond), PTHMgr(PM) {
+
+ FileStartLoc = PP.getSourceManager().getLocForStartOfFile(FID);
+}
+
+void PTHLexer::Lex(Token& Tok) {
+LexNextToken:
+
+ //===--------------------------------------==//
+ // Read the raw token data.
+ //===--------------------------------------==//
+
+ // Shadow CurPtr into an automatic variable.
+ const unsigned char *CurPtrShadow = CurPtr;
+
+ // Read in the data for the token.
+ unsigned Word0 = ReadLE32(CurPtrShadow);
+ uint32_t IdentifierID = ReadLE32(CurPtrShadow);
+ uint32_t FileOffset = ReadLE32(CurPtrShadow);
+
+ tok::TokenKind TKind = (tok::TokenKind) (Word0 & 0xFF);
+ Token::TokenFlags TFlags = (Token::TokenFlags) ((Word0 >> 8) & 0xFF);
+ uint32_t Len = Word0 >> 16;
+
+ CurPtr = CurPtrShadow;
+
+ //===--------------------------------------==//
+ // Construct the token itself.
+ //===--------------------------------------==//
+
+ Tok.startToken();
+ Tok.setKind(TKind);
+ Tok.setFlag(TFlags);
+ assert(!LexingRawMode);
+ Tok.setLocation(FileStartLoc.getLocWithOffset(FileOffset));
+ Tok.setLength(Len);
+
+ // Handle identifiers.
+ if (Tok.isLiteral()) {
+ Tok.setLiteralData((const char*) (PTHMgr.SpellingBase + IdentifierID));
+ }
+ else if (IdentifierID) {
+ MIOpt.ReadToken();
+ IdentifierInfo *II = PTHMgr.GetIdentifierInfo(IdentifierID-1);
+
+ Tok.setIdentifierInfo(II);
+
+ // Change the kind of this identifier to the appropriate token kind, e.g.
+ // turning "for" into a keyword.
+ Tok.setKind(II->getTokenID());
+
+ if (II->isHandleIdentifierCase())
+ PP->HandleIdentifier(Tok);
+ return;
+ }
+
+ //===--------------------------------------==//
+ // Process the token.
+ //===--------------------------------------==//
+ if (TKind == tok::eof) {
+ // Save the end-of-file token.
+ EofToken = Tok;
+
+ // Save 'PP' to 'PPCache' as LexEndOfFile can delete 'this'.
+ Preprocessor *PPCache = PP;
+
+ assert(!ParsingPreprocessorDirective);
+ assert(!LexingRawMode);
+
+ if (LexEndOfFile(Tok))
+ return;
+
+ return PPCache->Lex(Tok);
+ }
+
+ if (TKind == tok::hash && Tok.isAtStartOfLine()) {
+ LastHashTokPtr = CurPtr - DISK_TOKEN_SIZE;
+ assert(!LexingRawMode);
+ PP->HandleDirective(Tok);
+
+ if (PP->isCurrentLexer(this))
+ goto LexNextToken;
+
+ return PP->Lex(Tok);
+ }
+
+ if (TKind == tok::eod) {
+ assert(ParsingPreprocessorDirective);
+ ParsingPreprocessorDirective = false;
+ return;
+ }
+
+ MIOpt.ReadToken();
+}
+
+bool PTHLexer::LexEndOfFile(Token &Result) {
+ // If we hit the end of the file while parsing a preprocessor directive,
+ // end the preprocessor directive first. The next token returned will
+ // then be the end of file.
+ if (ParsingPreprocessorDirective) {
+ ParsingPreprocessorDirective = false; // Done parsing the "line".
+ return true; // Have a token.
+ }
+
+ assert(!LexingRawMode);
+
+ // If we are in a #if directive, emit an error.
+ while (!ConditionalStack.empty()) {
+ if (PP->getCodeCompletionFileLoc() != FileStartLoc)
+ PP->Diag(ConditionalStack.back().IfLoc,
+ diag::err_pp_unterminated_conditional);
+ ConditionalStack.pop_back();
+ }
+
+ // Finally, let the preprocessor handle this.
+ return PP->HandleEndOfFile(Result);
+}
+
+// FIXME: We can just grab the last token instead of storing a copy
+// into EofToken.
+void PTHLexer::getEOF(Token& Tok) {
+ assert(EofToken.is(tok::eof));
+ Tok = EofToken;
+}
+
+void PTHLexer::DiscardToEndOfLine() {
+ assert(ParsingPreprocessorDirective && ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+
+ // We assume that if the preprocessor wishes to discard to the end of
+ // the line that it also means to end the current preprocessor directive.
+ ParsingPreprocessorDirective = false;
+
+ // Skip tokens by only peeking at their token kind and the flags.
+ // We don't need to actually reconstruct full tokens from the token buffer.
+ // This saves some copies and it also reduces IdentifierInfo* lookup.
+ const unsigned char* p = CurPtr;
+ while (1) {
+ // Read the token kind. Are we at the end of the file?
+ tok::TokenKind x = (tok::TokenKind) (uint8_t) *p;
+ if (x == tok::eof) break;
+
+ // Read the token flags. Are we at the start of the next line?
+ Token::TokenFlags y = (Token::TokenFlags) (uint8_t) p[1];
+ if (y & Token::StartOfLine) break;
+
+ // Skip to the next token.
+ p += DISK_TOKEN_SIZE;
+ }
+
+ CurPtr = p;
+}
+
+/// SkipBlock - Used by Preprocessor to skip the current conditional block.
+bool PTHLexer::SkipBlock() {
+ assert(CurPPCondPtr && "No cached PP conditional information.");
+ assert(LastHashTokPtr && "No known '#' token.");
+
+ const unsigned char* HashEntryI = 0;
+ uint32_t Offset;
+ uint32_t TableIdx;
+
+ do {
+ // Read the token offset from the side-table.
+ Offset = ReadLE32(CurPPCondPtr);
+
+ // Read the target table index from the side-table.
+ TableIdx = ReadLE32(CurPPCondPtr);
+
+ // Compute the actual memory address of the '#' token data for this entry.
+ HashEntryI = TokBuf + Offset;
+
+ // Optmization: "Sibling jumping". #if...#else...#endif blocks can
+ // contain nested blocks. In the side-table we can jump over these
+ // nested blocks instead of doing a linear search if the next "sibling"
+ // entry is not at a location greater than LastHashTokPtr.
+ if (HashEntryI < LastHashTokPtr && TableIdx) {
+ // In the side-table we are still at an entry for a '#' token that
+ // is earlier than the last one we saw. Check if the location we would
+ // stride gets us closer.
+ const unsigned char* NextPPCondPtr =
+ PPCond + TableIdx*(sizeof(uint32_t)*2);
+ assert(NextPPCondPtr >= CurPPCondPtr);
+ // Read where we should jump to.
+ uint32_t TmpOffset = ReadLE32(NextPPCondPtr);
+ const unsigned char* HashEntryJ = TokBuf + TmpOffset;
+
+ if (HashEntryJ <= LastHashTokPtr) {
+ // Jump directly to the next entry in the side table.
+ HashEntryI = HashEntryJ;
+ Offset = TmpOffset;
+ TableIdx = ReadLE32(NextPPCondPtr);
+ CurPPCondPtr = NextPPCondPtr;
+ }
+ }
+ }
+ while (HashEntryI < LastHashTokPtr);
+ assert(HashEntryI == LastHashTokPtr && "No PP-cond entry found for '#'");
+ assert(TableIdx && "No jumping from #endifs.");
+
+ // Update our side-table iterator.
+ const unsigned char* NextPPCondPtr = PPCond + TableIdx*(sizeof(uint32_t)*2);
+ assert(NextPPCondPtr >= CurPPCondPtr);
+ CurPPCondPtr = NextPPCondPtr;
+
+ // Read where we should jump to.
+ HashEntryI = TokBuf + ReadLE32(NextPPCondPtr);
+ uint32_t NextIdx = ReadLE32(NextPPCondPtr);
+
+ // By construction NextIdx will be zero if this is a #endif. This is useful
+ // to know to obviate lexing another token.
+ bool isEndif = NextIdx == 0;
+
+ // This case can occur when we see something like this:
+ //
+ // #if ...
+ // /* a comment or nothing */
+ // #elif
+ //
+ // If we are skipping the first #if block it will be the case that CurPtr
+ // already points 'elif'. Just return.
+
+ if (CurPtr > HashEntryI) {
+ assert(CurPtr == HashEntryI + DISK_TOKEN_SIZE);
+ // Did we reach a #endif? If so, go ahead and consume that token as well.
+ if (isEndif)
+ CurPtr += DISK_TOKEN_SIZE*2;
+ else
+ LastHashTokPtr = HashEntryI;
+
+ return isEndif;
+ }
+
+ // Otherwise, we need to advance. Update CurPtr to point to the '#' token.
+ CurPtr = HashEntryI;
+
+ // Update the location of the last observed '#'. This is useful if we
+ // are skipping multiple blocks.
+ LastHashTokPtr = CurPtr;
+
+ // Skip the '#' token.
+ assert(((tok::TokenKind)*CurPtr) == tok::hash);
+ CurPtr += DISK_TOKEN_SIZE;
+
+ // Did we reach a #endif? If so, go ahead and consume that token as well.
+ if (isEndif) { CurPtr += DISK_TOKEN_SIZE*2; }
+
+ return isEndif;
+}
+
+SourceLocation PTHLexer::getSourceLocation() {
+ // getSourceLocation is not on the hot path. It is used to get the location
+ // of the next token when transitioning back to this lexer when done
+ // handling a #included file. Just read the necessary data from the token
+ // data buffer to construct the SourceLocation object.
+ // NOTE: This is a virtual function; hence it is defined out-of-line.
+ const unsigned char *OffsetPtr = CurPtr + (DISK_TOKEN_SIZE - 4);
+ uint32_t Offset = ReadLE32(OffsetPtr);
+ return FileStartLoc.getLocWithOffset(Offset);
+}
+
+//===----------------------------------------------------------------------===//
+// PTH file lookup: map from strings to file data.
+//===----------------------------------------------------------------------===//
+
+/// PTHFileLookup - This internal data structure is used by the PTHManager
+/// to map from FileEntry objects managed by FileManager to offsets within
+/// the PTH file.
+namespace {
+class PTHFileData {
+ const uint32_t TokenOff;
+ const uint32_t PPCondOff;
+public:
+ PTHFileData(uint32_t tokenOff, uint32_t ppCondOff)
+ : TokenOff(tokenOff), PPCondOff(ppCondOff) {}
+
+ uint32_t getTokenOffset() const { return TokenOff; }
+ uint32_t getPPCondOffset() const { return PPCondOff; }
+};
+
+
+class PTHFileLookupCommonTrait {
+public:
+ typedef std::pair<unsigned char, const char*> internal_key_type;
+
+ static unsigned ComputeHash(internal_key_type x) {
+ return llvm::HashString(x.second);
+ }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ unsigned keyLen = (unsigned) ReadUnalignedLE16(d);
+ unsigned dataLen = (unsigned) *(d++);
+ return std::make_pair(keyLen, dataLen);
+ }
+
+ static internal_key_type ReadKey(const unsigned char* d, unsigned) {
+ unsigned char k = *(d++); // Read the entry kind.
+ return std::make_pair(k, (const char*) d);
+ }
+};
+
+class PTHFileLookupTrait : public PTHFileLookupCommonTrait {
+public:
+ typedef const FileEntry* external_key_type;
+ typedef PTHFileData data_type;
+
+ static internal_key_type GetInternalKey(const FileEntry* FE) {
+ return std::make_pair((unsigned char) 0x1, FE->getName());
+ }
+
+ static bool EqualKey(internal_key_type a, internal_key_type b) {
+ return a.first == b.first && strcmp(a.second, b.second) == 0;
+ }
+
+ static PTHFileData ReadData(const internal_key_type& k,
+ const unsigned char* d, unsigned) {
+ assert(k.first == 0x1 && "Only file lookups can match!");
+ uint32_t x = ::ReadUnalignedLE32(d);
+ uint32_t y = ::ReadUnalignedLE32(d);
+ return PTHFileData(x, y);
+ }
+};
+
+class PTHStringLookupTrait {
+public:
+ typedef uint32_t
+ data_type;
+
+ typedef const std::pair<const char*, unsigned>
+ external_key_type;
+
+ typedef external_key_type internal_key_type;
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
+ : false;
+ }
+
+ static unsigned ComputeHash(const internal_key_type& a) {
+ return llvm::HashString(StringRef(a.first, a.second));
+ }
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ return std::make_pair((unsigned) ReadUnalignedLE16(d), sizeof(uint32_t));
+ }
+
+ static std::pair<const char*, unsigned>
+ ReadKey(const unsigned char* d, unsigned n) {
+ assert(n >= 2 && d[n-1] == '\0');
+ return std::make_pair((const char*) d, n-1);
+ }
+
+ static uint32_t ReadData(const internal_key_type& k, const unsigned char* d,
+ unsigned) {
+ return ::ReadUnalignedLE32(d);
+ }
+};
+
+} // end anonymous namespace
+
+typedef OnDiskChainedHashTable<PTHFileLookupTrait> PTHFileLookup;
+typedef OnDiskChainedHashTable<PTHStringLookupTrait> PTHStringIdLookup;
+
+//===----------------------------------------------------------------------===//
+// PTHManager methods.
+//===----------------------------------------------------------------------===//
+
+PTHManager::PTHManager(const llvm::MemoryBuffer* buf, void* fileLookup,
+ const unsigned char* idDataTable,
+ IdentifierInfo** perIDCache,
+ void* stringIdLookup, unsigned numIds,
+ const unsigned char* spellingBase,
+ const char* originalSourceFile)
+: Buf(buf), PerIDCache(perIDCache), FileLookup(fileLookup),
+ IdDataTable(idDataTable), StringIdLookup(stringIdLookup),
+ NumIds(numIds), PP(0), SpellingBase(spellingBase),
+ OriginalSourceFile(originalSourceFile) {}
+
+PTHManager::~PTHManager() {
+ delete Buf;
+ delete (PTHFileLookup*) FileLookup;
+ delete (PTHStringIdLookup*) StringIdLookup;
+ free(PerIDCache);
+}
+
+static void InvalidPTH(DiagnosticsEngine &Diags, const char *Msg) {
+ Diags.Report(Diags.getCustomDiagID(DiagnosticsEngine::Error, Msg));
+}
+
+PTHManager *PTHManager::Create(const std::string &file,
+ DiagnosticsEngine &Diags) {
+ // Memory map the PTH file.
+ OwningPtr<llvm::MemoryBuffer> File;
+
+ if (llvm::MemoryBuffer::getFile(file, File)) {
+ // FIXME: Add ec.message() to this diag.
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return 0;
+ }
+
+ // Get the buffer ranges and check if there are at least three 32-bit
+ // words at the end of the file.
+ const unsigned char *BufBeg = (unsigned char*)File->getBufferStart();
+ const unsigned char *BufEnd = (unsigned char*)File->getBufferEnd();
+
+ // Check the prologue of the file.
+ if ((BufEnd - BufBeg) < (signed)(sizeof("cfe-pth") + 3 + 4) ||
+ memcmp(BufBeg, "cfe-pth", sizeof("cfe-pth") - 1) != 0) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return 0;
+ }
+
+ // Read the PTH version.
+ const unsigned char *p = BufBeg + (sizeof("cfe-pth") - 1);
+ unsigned Version = ReadLE32(p);
+
+ if (Version < PTHManager::Version) {
+ InvalidPTH(Diags,
+ Version < PTHManager::Version
+ ? "PTH file uses an older PTH format that is no longer supported"
+ : "PTH file uses a newer PTH format that cannot be read");
+ return 0;
+ }
+
+ // Compute the address of the index table at the end of the PTH file.
+ const unsigned char *PrologueOffset = p;
+
+ if (PrologueOffset >= BufEnd) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return 0;
+ }
+
+ // Construct the file lookup table. This will be used for mapping from
+ // FileEntry*'s to cached tokens.
+ const unsigned char* FileTableOffset = PrologueOffset + sizeof(uint32_t)*2;
+ const unsigned char* FileTable = BufBeg + ReadLE32(FileTableOffset);
+
+ if (!(FileTable > BufBeg && FileTable < BufEnd)) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return 0; // FIXME: Proper error diagnostic?
+ }
+
+ OwningPtr<PTHFileLookup> FL(PTHFileLookup::Create(FileTable, BufBeg));
+
+ // Warn if the PTH file is empty. We still want to create a PTHManager
+ // as the PTH could be used with -include-pth.
+ if (FL->isEmpty())
+ InvalidPTH(Diags, "PTH file contains no cached source data");
+
+ // Get the location of the table mapping from persistent ids to the
+ // data needed to reconstruct identifiers.
+ const unsigned char* IDTableOffset = PrologueOffset + sizeof(uint32_t)*0;
+ const unsigned char* IData = BufBeg + ReadLE32(IDTableOffset);
+
+ if (!(IData >= BufBeg && IData < BufEnd)) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return 0;
+ }
+
+ // Get the location of the hashtable mapping between strings and
+ // persistent IDs.
+ const unsigned char* StringIdTableOffset = PrologueOffset + sizeof(uint32_t)*1;
+ const unsigned char* StringIdTable = BufBeg + ReadLE32(StringIdTableOffset);
+ if (!(StringIdTable >= BufBeg && StringIdTable < BufEnd)) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return 0;
+ }
+
+ OwningPtr<PTHStringIdLookup> SL(PTHStringIdLookup::Create(StringIdTable,
+ BufBeg));
+
+ // Get the location of the spelling cache.
+ const unsigned char* spellingBaseOffset = PrologueOffset + sizeof(uint32_t)*3;
+ const unsigned char* spellingBase = BufBeg + ReadLE32(spellingBaseOffset);
+ if (!(spellingBase >= BufBeg && spellingBase < BufEnd)) {
+ Diags.Report(diag::err_invalid_pth_file) << file;
+ return 0;
+ }
+
+ // Get the number of IdentifierInfos and pre-allocate the identifier cache.
+ uint32_t NumIds = ReadLE32(IData);
+
+ // Pre-allocate the persistent ID -> IdentifierInfo* cache. We use calloc()
+ // so that we in the best case only zero out memory once when the OS returns
+ // us new pages.
+ IdentifierInfo** PerIDCache = 0;
+
+ if (NumIds) {
+ PerIDCache = (IdentifierInfo**)calloc(NumIds, sizeof(*PerIDCache));
+ if (!PerIDCache) {
+ InvalidPTH(Diags, "Could not allocate memory for processing PTH file");
+ return 0;
+ }
+ }
+
+ // Compute the address of the original source file.
+ const unsigned char* originalSourceBase = PrologueOffset + sizeof(uint32_t)*4;
+ unsigned len = ReadUnalignedLE16(originalSourceBase);
+ if (!len) originalSourceBase = 0;
+
+ // Create the new PTHManager.
+ return new PTHManager(File.take(), FL.take(), IData, PerIDCache,
+ SL.take(), NumIds, spellingBase,
+ (const char*) originalSourceBase);
+}
+
+IdentifierInfo* PTHManager::LazilyCreateIdentifierInfo(unsigned PersistentID) {
+ // Look in the PTH file for the string data for the IdentifierInfo object.
+ const unsigned char* TableEntry = IdDataTable + sizeof(uint32_t)*PersistentID;
+ const unsigned char* IDData =
+ (const unsigned char*)Buf->getBufferStart() + ReadLE32(TableEntry);
+ assert(IDData < (const unsigned char*)Buf->getBufferEnd());
+
+ // Allocate the object.
+ std::pair<IdentifierInfo,const unsigned char*> *Mem =
+ Alloc.Allocate<std::pair<IdentifierInfo,const unsigned char*> >();
+
+ Mem->second = IDData;
+ assert(IDData[0] != '\0');
+ IdentifierInfo *II = new ((void*) Mem) IdentifierInfo();
+
+ // Store the new IdentifierInfo in the cache.
+ PerIDCache[PersistentID] = II;
+ assert(II->getNameStart() && II->getNameStart()[0] != '\0');
+ return II;
+}
+
+IdentifierInfo* PTHManager::get(StringRef Name) {
+ PTHStringIdLookup& SL = *((PTHStringIdLookup*)StringIdLookup);
+ // Double check our assumption that the last character isn't '\0'.
+ assert(Name.empty() || Name.back() != '\0');
+ PTHStringIdLookup::iterator I = SL.find(std::make_pair(Name.data(),
+ Name.size()));
+ if (I == SL.end()) // No identifier found?
+ return 0;
+
+ // Match found. Return the identifier!
+ assert(*I > 0);
+ return GetIdentifierInfo(*I-1);
+}
+
+PTHLexer *PTHManager::CreateLexer(FileID FID) {
+ const FileEntry *FE = PP->getSourceManager().getFileEntryForID(FID);
+ if (!FE)
+ return 0;
+
+ // Lookup the FileEntry object in our file lookup data structure. It will
+ // return a variant that indicates whether or not there is an offset within
+ // the PTH file that contains cached tokens.
+ PTHFileLookup& PFL = *((PTHFileLookup*)FileLookup);
+ PTHFileLookup::iterator I = PFL.find(FE);
+
+ if (I == PFL.end()) // No tokens available?
+ return 0;
+
+ const PTHFileData& FileData = *I;
+
+ const unsigned char *BufStart = (const unsigned char *)Buf->getBufferStart();
+ // Compute the offset of the token data within the buffer.
+ const unsigned char* data = BufStart + FileData.getTokenOffset();
+
+ // Get the location of pp-conditional table.
+ const unsigned char* ppcond = BufStart + FileData.getPPCondOffset();
+ uint32_t Len = ReadLE32(ppcond);
+ if (Len == 0) ppcond = 0;
+
+ assert(PP && "No preprocessor set yet!");
+ return new PTHLexer(*PP, FID, data, ppcond, *this);
+}
+
+//===----------------------------------------------------------------------===//
+// 'stat' caching.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class PTHStatData {
+public:
+ const bool hasStat;
+ const ino_t ino;
+ const dev_t dev;
+ const mode_t mode;
+ const time_t mtime;
+ const off_t size;
+
+ PTHStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s)
+ : hasStat(true), ino(i), dev(d), mode(mo), mtime(m), size(s) {}
+
+ PTHStatData()
+ : hasStat(false), ino(0), dev(0), mode(0), mtime(0), size(0) {}
+};
+
+class PTHStatLookupTrait : public PTHFileLookupCommonTrait {
+public:
+ typedef const char* external_key_type; // const char*
+ typedef PTHStatData data_type;
+
+ static internal_key_type GetInternalKey(const char *path) {
+ // The key 'kind' doesn't matter here because it is ignored in EqualKey.
+ return std::make_pair((unsigned char) 0x0, path);
+ }
+
+ static bool EqualKey(internal_key_type a, internal_key_type b) {
+ // When doing 'stat' lookups we don't care about the kind of 'a' and 'b',
+ // just the paths.
+ return strcmp(a.second, b.second) == 0;
+ }
+
+ static data_type ReadData(const internal_key_type& k, const unsigned char* d,
+ unsigned) {
+
+ if (k.first /* File or Directory */) {
+ if (k.first == 0x1 /* File */) d += 4 * 2; // Skip the first 2 words.
+ ino_t ino = (ino_t) ReadUnalignedLE32(d);
+ dev_t dev = (dev_t) ReadUnalignedLE32(d);
+ mode_t mode = (mode_t) ReadUnalignedLE16(d);
+ time_t mtime = (time_t) ReadUnalignedLE64(d);
+ return data_type(ino, dev, mode, mtime, (off_t) ReadUnalignedLE64(d));
+ }
+
+ // Negative stat. Don't read anything.
+ return data_type();
+ }
+};
+
+class PTHStatCache : public FileSystemStatCache {
+ typedef OnDiskChainedHashTable<PTHStatLookupTrait> CacheTy;
+ CacheTy Cache;
+
+public:
+ PTHStatCache(PTHFileLookup &FL) :
+ Cache(FL.getNumBuckets(), FL.getNumEntries(), FL.getBuckets(),
+ FL.getBase()) {}
+
+ ~PTHStatCache() {}
+
+ LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ // Do the lookup for the file's data in the PTH file.
+ CacheTy::iterator I = Cache.find(Path);
+
+ // If we don't get a hit in the PTH file just forward to 'stat'.
+ if (I == Cache.end())
+ return statChained(Path, StatBuf, FileDescriptor);
+
+ const PTHStatData &Data = *I;
+
+ if (!Data.hasStat)
+ return CacheMissing;
+
+ StatBuf.st_ino = Data.ino;
+ StatBuf.st_dev = Data.dev;
+ StatBuf.st_mtime = Data.mtime;
+ StatBuf.st_mode = Data.mode;
+ StatBuf.st_size = Data.size;
+ return CacheExists;
+ }
+};
+} // end anonymous namespace
+
+FileSystemStatCache *PTHManager::createStatCache() {
+ return new PTHStatCache(*((PTHFileLookup*) FileLookup));
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
new file mode 100644
index 0000000..e2a192b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/Pragma.cpp
@@ -0,0 +1,1292 @@
+//===--- Pragma.cpp - Pragma registration and handling --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PragmaHandler/PragmaTable interfaces and implements
+// pragma related methods of the Preprocessor class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+using namespace clang;
+
+// Out-of-line destructor to provide a home for the class.
+PragmaHandler::~PragmaHandler() {
+}
+
+//===----------------------------------------------------------------------===//
+// EmptyPragmaHandler Implementation.
+//===----------------------------------------------------------------------===//
+
+EmptyPragmaHandler::EmptyPragmaHandler() {}
+
+void EmptyPragmaHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &FirstToken) {}
+
+//===----------------------------------------------------------------------===//
+// PragmaNamespace Implementation.
+//===----------------------------------------------------------------------===//
+
+
+PragmaNamespace::~PragmaNamespace() {
+ for (llvm::StringMap<PragmaHandler*>::iterator
+ I = Handlers.begin(), E = Handlers.end(); I != E; ++I)
+ delete I->second;
+}
+
+/// FindHandler - Check to see if there is already a handler for the
+/// specified name. If not, return the handler for the null identifier if it
+/// exists, otherwise return null. If IgnoreNull is true (the default) then
+/// the null handler isn't returned on failure to match.
+PragmaHandler *PragmaNamespace::FindHandler(StringRef Name,
+ bool IgnoreNull) const {
+ if (PragmaHandler *Handler = Handlers.lookup(Name))
+ return Handler;
+ return IgnoreNull ? 0 : Handlers.lookup(StringRef());
+}
+
+void PragmaNamespace::AddPragma(PragmaHandler *Handler) {
+ assert(!Handlers.lookup(Handler->getName()) &&
+ "A handler with this name is already registered in this namespace");
+ llvm::StringMapEntry<PragmaHandler *> &Entry =
+ Handlers.GetOrCreateValue(Handler->getName());
+ Entry.setValue(Handler);
+}
+
+void PragmaNamespace::RemovePragmaHandler(PragmaHandler *Handler) {
+ assert(Handlers.lookup(Handler->getName()) &&
+ "Handler not registered in this namespace");
+ Handlers.erase(Handler->getName());
+}
+
+void PragmaNamespace::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ // Read the 'namespace' that the directive is in, e.g. STDC. Do not macro
+ // expand it, the user can have a STDC #define, that should not affect this.
+ PP.LexUnexpandedToken(Tok);
+
+ // Get the handler for this token. If there is no handler, ignore the pragma.
+ PragmaHandler *Handler
+ = FindHandler(Tok.getIdentifierInfo() ? Tok.getIdentifierInfo()->getName()
+ : StringRef(),
+ /*IgnoreNull=*/false);
+ if (Handler == 0) {
+ PP.Diag(Tok, diag::warn_pragma_ignored);
+ return;
+ }
+
+ // Otherwise, pass it down.
+ Handler->HandlePragma(PP, Introducer, Tok);
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Pragma Directive Handling.
+//===----------------------------------------------------------------------===//
+
+/// HandlePragmaDirective - The "#pragma" directive has been parsed. Lex the
+/// rest of the pragma, passing it to the registered pragma handlers.
+void Preprocessor::HandlePragmaDirective(unsigned Introducer) {
+ ++NumPragma;
+
+ // Invoke the first level of pragma handlers which reads the namespace id.
+ Token Tok;
+ PragmaHandlers->HandlePragma(*this, PragmaIntroducerKind(Introducer), Tok);
+
+ // If the pragma handler didn't read the rest of the line, consume it now.
+ if ((CurTokenLexer && CurTokenLexer->isParsingPreprocessorDirective())
+ || (CurPPLexer && CurPPLexer->ParsingPreprocessorDirective))
+ DiscardUntilEndOfDirective();
+}
+
+namespace {
+/// \brief Helper class for \see Preprocessor::Handle_Pragma.
+class LexingFor_PragmaRAII {
+ Preprocessor &PP;
+ bool InMacroArgPreExpansion;
+ bool Failed;
+ Token &OutTok;
+ Token PragmaTok;
+
+public:
+ LexingFor_PragmaRAII(Preprocessor &PP, bool InMacroArgPreExpansion,
+ Token &Tok)
+ : PP(PP), InMacroArgPreExpansion(InMacroArgPreExpansion),
+ Failed(false), OutTok(Tok) {
+ if (InMacroArgPreExpansion) {
+ PragmaTok = OutTok;
+ PP.EnableBacktrackAtThisPos();
+ }
+ }
+
+ ~LexingFor_PragmaRAII() {
+ if (InMacroArgPreExpansion) {
+ if (Failed) {
+ PP.CommitBacktrackedTokens();
+ } else {
+ PP.Backtrack();
+ OutTok = PragmaTok;
+ }
+ }
+ }
+
+ void failed() {
+ Failed = true;
+ }
+};
+}
+
+/// Handle_Pragma - Read a _Pragma directive, slice it up, process it, then
+/// return the first token after the directive. The _Pragma token has just
+/// been read into 'Tok'.
+void Preprocessor::Handle_Pragma(Token &Tok) {
+
+ // This works differently if we are pre-expanding a macro argument.
+ // In that case we don't actually "activate" the pragma now, we only lex it
+ // until we are sure it is lexically correct and then we backtrack so that
+ // we activate the pragma whenever we encounter the tokens again in the token
+ // stream. This ensures that we will activate it in the correct location
+ // or that we will ignore it if it never enters the token stream, e.g:
+ //
+ // #define EMPTY(x)
+ // #define INACTIVE(x) EMPTY(x)
+ // INACTIVE(_Pragma("clang diagnostic ignored \"-Wconversion\""))
+
+ LexingFor_PragmaRAII _PragmaLexing(*this, InMacroArgPreExpansion, Tok);
+
+ // Remember the pragma token location.
+ SourceLocation PragmaLoc = Tok.getLocation();
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return _PragmaLexing.failed();
+ }
+
+ // Read the '"..."'.
+ Lex(Tok);
+ if (Tok.isNot(tok::string_literal) && Tok.isNot(tok::wide_string_literal)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ // Skip this token, and the ')', if present.
+ if (Tok.isNot(tok::r_paren))
+ Lex(Tok);
+ if (Tok.is(tok::r_paren))
+ Lex(Tok);
+ return _PragmaLexing.failed();
+ }
+
+ if (Tok.hasUDSuffix()) {
+ Diag(Tok, diag::err_invalid_string_udl);
+ // Skip this token, and the ')', if present.
+ Lex(Tok);
+ if (Tok.is(tok::r_paren))
+ Lex(Tok);
+ return _PragmaLexing.failed();
+ }
+
+ // Remember the string.
+ Token StrTok = Tok;
+
+ // Read the ')'.
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return _PragmaLexing.failed();
+ }
+
+ if (InMacroArgPreExpansion)
+ return;
+
+ SourceLocation RParenLoc = Tok.getLocation();
+ std::string StrVal = getSpelling(StrTok);
+
+ // The _Pragma is lexically sound. Destringize according to C99 6.10.9.1:
+ // "The string literal is destringized by deleting the L prefix, if present,
+ // deleting the leading and trailing double-quotes, replacing each escape
+ // sequence \" by a double-quote, and replacing each escape sequence \\ by a
+ // single backslash."
+ if (StrVal[0] == 'L') // Remove L prefix.
+ StrVal.erase(StrVal.begin());
+ assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' &&
+ "Invalid string token!");
+
+ // Remove the front quote, replacing it with a space, so that the pragma
+ // contents appear to have a space before them.
+ StrVal[0] = ' ';
+
+ // Replace the terminating quote with a \n.
+ StrVal[StrVal.size()-1] = '\n';
+
+ // Remove escaped quotes and escapes.
+ for (unsigned i = 0, e = StrVal.size(); i != e-1; ++i) {
+ if (StrVal[i] == '\\' &&
+ (StrVal[i+1] == '\\' || StrVal[i+1] == '"')) {
+ // \\ -> '\' and \" -> '"'.
+ StrVal.erase(StrVal.begin()+i);
+ --e;
+ }
+ }
+
+ // Plop the string (including the newline and trailing null) into a buffer
+ // where we can lex it.
+ Token TmpTok;
+ TmpTok.startToken();
+ CreateString(&StrVal[0], StrVal.size(), TmpTok);
+ SourceLocation TokLoc = TmpTok.getLocation();
+
+ // Make and enter a lexer object so that we lex and expand the tokens just
+ // like any others.
+ Lexer *TL = Lexer::Create_PragmaLexer(TokLoc, PragmaLoc, RParenLoc,
+ StrVal.size(), *this);
+
+ EnterSourceFileWithLexer(TL, 0);
+
+ // With everything set up, lex this as a #pragma directive.
+ HandlePragmaDirective(PIK__Pragma);
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
+
+/// HandleMicrosoft__pragma - Like Handle_Pragma except the pragma text
+/// is not enclosed within a string literal.
+void Preprocessor::HandleMicrosoft__pragma(Token &Tok) {
+ // Remember the pragma token location.
+ SourceLocation PragmaLoc = Tok.getLocation();
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return;
+ }
+
+ // Get the tokens enclosed within the __pragma(), as well as the final ')'.
+ SmallVector<Token, 32> PragmaToks;
+ int NumParens = 0;
+ Lex(Tok);
+ while (Tok.isNot(tok::eof)) {
+ PragmaToks.push_back(Tok);
+ if (Tok.is(tok::l_paren))
+ NumParens++;
+ else if (Tok.is(tok::r_paren) && NumParens-- == 0)
+ break;
+ Lex(Tok);
+ }
+
+ if (Tok.is(tok::eof)) {
+ Diag(PragmaLoc, diag::err_unterminated___pragma);
+ return;
+ }
+
+ PragmaToks.front().setFlag(Token::LeadingSpace);
+
+ // Replace the ')' with an EOD to mark the end of the pragma.
+ PragmaToks.back().setKind(tok::eod);
+
+ Token *TokArray = new Token[PragmaToks.size()];
+ std::copy(PragmaToks.begin(), PragmaToks.end(), TokArray);
+
+ // Push the tokens onto the stack.
+ EnterTokenStream(TokArray, PragmaToks.size(), true, true);
+
+ // With everything set up, lex this as a #pragma directive.
+ HandlePragmaDirective(PIK___pragma);
+
+ // Finally, return whatever came after the pragma directive.
+ return Lex(Tok);
+}
+
+/// HandlePragmaOnce - Handle #pragma once. OnceTok is the 'once'.
+///
+void Preprocessor::HandlePragmaOnce(Token &OnceTok) {
+ if (isInPrimaryFile()) {
+ Diag(OnceTok, diag::pp_pragma_once_in_main_file);
+ return;
+ }
+
+ // Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
+ // Mark the file as a once-only file now.
+ HeaderInfo.MarkFileIncludeOnce(getCurrentFileLexer()->getFileEntry());
+}
+
+void Preprocessor::HandlePragmaMark() {
+ assert(CurPPLexer && "No current lexer?");
+ if (CurLexer)
+ CurLexer->ReadToEndOfLine();
+ else
+ CurPTHLexer->DiscardToEndOfLine();
+}
+
+
+/// HandlePragmaPoison - Handle #pragma GCC poison. PoisonTok is the 'poison'.
+///
+void Preprocessor::HandlePragmaPoison(Token &PoisonTok) {
+ Token Tok;
+
+ while (1) {
+ // Read the next token to poison. While doing this, pretend that we are
+ // skipping while reading the identifier to poison.
+ // This avoids errors on code like:
+ // #pragma GCC poison X
+ // #pragma GCC poison X
+ if (CurPPLexer) CurPPLexer->LexingRawMode = true;
+ LexUnexpandedToken(Tok);
+ if (CurPPLexer) CurPPLexer->LexingRawMode = false;
+
+ // If we reached the end of line, we're done.
+ if (Tok.is(tok::eod)) return;
+
+ // Can only poison identifiers.
+ if (Tok.isNot(tok::raw_identifier)) {
+ Diag(Tok, diag::err_pp_invalid_poison);
+ return;
+ }
+
+ // Look up the identifier info for the token. We disabled identifier lookup
+ // by saying we're skipping contents, so we need to do this manually.
+ IdentifierInfo *II = LookUpIdentifierInfo(Tok);
+
+ // Already poisoned.
+ if (II->isPoisoned()) continue;
+
+ // If this is a macro identifier, emit a warning.
+ if (II->hasMacroDefinition())
+ Diag(Tok, diag::pp_poisoning_existing_macro);
+
+ // Finally, poison it!
+ II->setIsPoisoned();
+ if (II->isFromAST())
+ II->setChangedSinceDeserialization();
+ }
+}
+
+/// HandlePragmaSystemHeader - Implement #pragma GCC system_header. We know
+/// that the whole directive has been parsed.
+void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
+ if (isInPrimaryFile()) {
+ Diag(SysHeaderTok, diag::pp_pragma_sysheader_in_main_file);
+ return;
+ }
+
+ // Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
+ PreprocessorLexer *TheLexer = getCurrentFileLexer();
+
+ // Mark the file as a system header.
+ HeaderInfo.MarkFileSystemHeader(TheLexer->getFileEntry());
+
+
+ PresumedLoc PLoc = SourceMgr.getPresumedLoc(SysHeaderTok.getLocation());
+ if (PLoc.isInvalid())
+ return;
+
+ unsigned FilenameID = SourceMgr.getLineTableFilenameID(PLoc.getFilename());
+
+ // Notify the client, if desired, that we are in a new source file.
+ if (Callbacks)
+ Callbacks->FileChanged(SysHeaderTok.getLocation(),
+ PPCallbacks::SystemHeaderPragma, SrcMgr::C_System);
+
+ // Emit a line marker. This will change any source locations from this point
+ // forward to realize they are in a system header.
+ // Create a line note with this information.
+ SourceMgr.AddLineNote(SysHeaderTok.getLocation(), PLoc.getLine(), FilenameID,
+ false, false, true, false);
+}
+
+/// HandlePragmaDependency - Handle #pragma GCC dependency "foo" blah.
+///
+void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
+ Token FilenameTok;
+ CurPPLexer->LexIncludeFilename(FilenameTok);
+
+ // If the token kind is EOD, the error has already been diagnosed.
+ if (FilenameTok.is(tok::eod))
+ return;
+
+ // Reserve a buffer to get the spelling.
+ SmallString<128> FilenameBuffer;
+ bool Invalid = false;
+ StringRef Filename = getSpelling(FilenameTok, FilenameBuffer, &Invalid);
+ if (Invalid)
+ return;
+
+ bool isAngled =
+ GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+ // If GetIncludeFilenameSpelling set the start ptr to null, there was an
+ // error.
+ if (Filename.empty())
+ return;
+
+ // Search include directories for this file.
+ const DirectoryLookup *CurDir;
+ const FileEntry *File = LookupFile(Filename, isAngled, 0, CurDir, NULL, NULL,
+ NULL);
+ if (File == 0) {
+ if (!SuppressIncludeNotFoundError)
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
+ return;
+ }
+
+ const FileEntry *CurFile = getCurrentFileLexer()->getFileEntry();
+
+ // If this file is older than the file it depends on, emit a diagnostic.
+ if (CurFile && CurFile->getModificationTime() < File->getModificationTime()) {
+ // Lex tokens at the end of the message and include them in the message.
+ std::string Message;
+ Lex(DependencyTok);
+ while (DependencyTok.isNot(tok::eod)) {
+ Message += getSpelling(DependencyTok) + " ";
+ Lex(DependencyTok);
+ }
+
+ // Remove the trailing ' ' if present.
+ if (!Message.empty())
+ Message.erase(Message.end()-1);
+ Diag(FilenameTok, diag::pp_out_of_date_dependency) << Message;
+ }
+}
+
+/// HandlePragmaComment - Handle the microsoft #pragma comment extension. The
+/// syntax is:
+/// #pragma comment(linker, "foo")
+/// 'linker' is one of five identifiers: compiler, exestr, lib, linker, user.
+/// "foo" is a string, which is fully macro expanded, and permits string
+/// concatenation, embedded escape characters etc. See MSDN for more details.
+void Preprocessor::HandlePragmaComment(Token &Tok) {
+ SourceLocation CommentLoc = Tok.getLocation();
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(CommentLoc, diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // Read the identifier.
+ Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ Diag(CommentLoc, diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // Verify that this is one of the 5 whitelisted options.
+ // FIXME: warn that 'exestr' is deprecated.
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II->isStr("compiler") && !II->isStr("exestr") && !II->isStr("lib") &&
+ !II->isStr("linker") && !II->isStr("user")) {
+ Diag(Tok.getLocation(), diag::err_pragma_comment_unknown_kind);
+ return;
+ }
+
+ // Read the optional string if present.
+ Lex(Tok);
+ std::string ArgumentString;
+ if (Tok.is(tok::comma)) {
+ Lex(Tok); // eat the comma.
+
+ // We need at least one string.
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // String concatenation allows multiple strings, which can even come from
+ // macro expansion.
+ // "foo " "bar" "Baz"
+ SmallVector<Token, 4> StrToks;
+ while (Tok.is(tok::string_literal)) {
+ if (Tok.hasUDSuffix())
+ Diag(Tok, diag::err_invalid_string_udl);
+ StrToks.push_back(Tok);
+ Lex(Tok);
+ }
+
+ // Concatenate and parse the strings.
+ StringLiteralParser Literal(&StrToks[0], StrToks.size(), *this);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return;
+ if (Literal.Pascal) {
+ Diag(StrToks[0].getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ ArgumentString = Literal.GetString();
+ }
+
+ // FIXME: If the kind is "compiler" warn if the string is present (it is
+ // ignored).
+ // FIXME: 'lib' requires a comment string.
+ // FIXME: 'linker' requires a comment string, and has a specific list of
+ // things that are allowable.
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+ Lex(Tok); // eat the r_paren.
+
+ if (Tok.isNot(tok::eod)) {
+ Diag(Tok.getLocation(), diag::err_pragma_comment_malformed);
+ return;
+ }
+
+ // If the pragma is lexically sound, notify any interested PPCallbacks.
+ if (Callbacks)
+ Callbacks->PragmaComment(CommentLoc, II, ArgumentString);
+}
+
+/// HandlePragmaMessage - Handle the microsoft and gcc #pragma message
+/// extension. The syntax is:
+/// #pragma message(string)
+/// OR, in GCC mode:
+/// #pragma message string
+/// string is a string, which is fully macro expanded, and permits string
+/// concatenation, embedded escape characters, etc... See MSDN for more details.
+void Preprocessor::HandlePragmaMessage(Token &Tok) {
+ SourceLocation MessageLoc = Tok.getLocation();
+ Lex(Tok);
+ bool ExpectClosingParen = false;
+ switch (Tok.getKind()) {
+ case tok::l_paren:
+ // We have a MSVC style pragma message.
+ ExpectClosingParen = true;
+ // Read the string.
+ Lex(Tok);
+ break;
+ case tok::string_literal:
+ // We have a GCC style pragma message, and we just read the string.
+ break;
+ default:
+ Diag(MessageLoc, diag::err_pragma_message_malformed);
+ return;
+ }
+
+ // We need at least one string.
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+
+ // String concatenation allows multiple strings, which can even come from
+ // macro expansion.
+ // "foo " "bar" "Baz"
+ SmallVector<Token, 4> StrToks;
+ while (Tok.is(tok::string_literal)) {
+ if (Tok.hasUDSuffix())
+ Diag(Tok, diag::err_invalid_string_udl);
+ StrToks.push_back(Tok);
+ Lex(Tok);
+ }
+
+ // Concatenate and parse the strings.
+ StringLiteralParser Literal(&StrToks[0], StrToks.size(), *this);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return;
+ if (Literal.Pascal) {
+ Diag(StrToks[0].getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+
+ StringRef MessageString(Literal.GetString());
+
+ if (ExpectClosingParen) {
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+ Lex(Tok); // eat the r_paren.
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ Diag(Tok.getLocation(), diag::err_pragma_message_malformed);
+ return;
+ }
+
+ // Output the message.
+ Diag(MessageLoc, diag::warn_pragma_message) << MessageString;
+
+ // If the pragma is lexically sound, notify any interested PPCallbacks.
+ if (Callbacks)
+ Callbacks->PragmaMessage(MessageLoc, MessageString);
+}
+
+/// ParsePragmaPushOrPopMacro - Handle parsing of pragma push_macro/pop_macro.
+/// Return the IdentifierInfo* associated with the macro to push or pop.
+IdentifierInfo *Preprocessor::ParsePragmaPushOrPopMacro(Token &Tok) {
+ // Remember the pragma token location.
+ Token PragmaTok = Tok;
+
+ // Read the '('.
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return 0;
+ }
+
+ // Read the macro name string.
+ Lex(Tok);
+ if (Tok.isNot(tok::string_literal)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return 0;
+ }
+
+ if (Tok.hasUDSuffix()) {
+ Diag(Tok, diag::err_invalid_string_udl);
+ return 0;
+ }
+
+ // Remember the macro string.
+ std::string StrVal = getSpelling(Tok);
+
+ // Read the ')'.
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(PragmaTok.getLocation(), diag::err_pragma_push_pop_macro_malformed)
+ << getSpelling(PragmaTok);
+ return 0;
+ }
+
+ assert(StrVal[0] == '"' && StrVal[StrVal.size()-1] == '"' &&
+ "Invalid string token!");
+
+ // Create a Token from the string.
+ Token MacroTok;
+ MacroTok.startToken();
+ MacroTok.setKind(tok::raw_identifier);
+ CreateString(&StrVal[1], StrVal.size() - 2, MacroTok);
+
+ // Get the IdentifierInfo of MacroToPushTok.
+ return LookUpIdentifierInfo(MacroTok);
+}
+
+/// HandlePragmaPushMacro - Handle #pragma push_macro.
+/// The syntax is:
+/// #pragma push_macro("macro")
+void Preprocessor::HandlePragmaPushMacro(Token &PushMacroTok) {
+ // Parse the pragma directive and get the macro IdentifierInfo*.
+ IdentifierInfo *IdentInfo = ParsePragmaPushOrPopMacro(PushMacroTok);
+ if (!IdentInfo) return;
+
+ // Get the MacroInfo associated with IdentInfo.
+ MacroInfo *MI = getMacroInfo(IdentInfo);
+
+ MacroInfo *MacroCopyToPush = 0;
+ if (MI) {
+ // Make a clone of MI.
+ MacroCopyToPush = CloneMacroInfo(*MI);
+
+ // Allow the original MacroInfo to be redefined later.
+ MI->setIsAllowRedefinitionsWithoutWarning(true);
+ }
+
+ // Push the cloned MacroInfo so we can retrieve it later.
+ PragmaPushMacroInfo[IdentInfo].push_back(MacroCopyToPush);
+}
+
+/// HandlePragmaPopMacro - Handle #pragma pop_macro.
+/// The syntax is:
+/// #pragma pop_macro("macro")
+void Preprocessor::HandlePragmaPopMacro(Token &PopMacroTok) {
+ SourceLocation MessageLoc = PopMacroTok.getLocation();
+
+ // Parse the pragma directive and get the macro IdentifierInfo*.
+ IdentifierInfo *IdentInfo = ParsePragmaPushOrPopMacro(PopMacroTok);
+ if (!IdentInfo) return;
+
+ // Find the vector<MacroInfo*> associated with the macro.
+ llvm::DenseMap<IdentifierInfo*, std::vector<MacroInfo*> >::iterator iter =
+ PragmaPushMacroInfo.find(IdentInfo);
+ if (iter != PragmaPushMacroInfo.end()) {
+ // Release the MacroInfo currently associated with IdentInfo.
+ MacroInfo *CurrentMI = getMacroInfo(IdentInfo);
+ if (CurrentMI) {
+ if (CurrentMI->isWarnIfUnused())
+ WarnUnusedMacroLocs.erase(CurrentMI->getDefinitionLoc());
+ ReleaseMacroInfo(CurrentMI);
+ }
+
+ // Get the MacroInfo we want to reinstall.
+ MacroInfo *MacroToReInstall = iter->second.back();
+
+ // Reinstall the previously pushed macro.
+ setMacroInfo(IdentInfo, MacroToReInstall);
+
+ // Pop PragmaPushMacroInfo stack.
+ iter->second.pop_back();
+ if (iter->second.size() == 0)
+ PragmaPushMacroInfo.erase(iter);
+ } else {
+ Diag(MessageLoc, diag::warn_pragma_pop_macro_no_push)
+ << IdentInfo->getName();
+ }
+}
+
+void Preprocessor::HandlePragmaIncludeAlias(Token &Tok) {
+ // We will either get a quoted filename or a bracketed filename, and we
+ // have to track which we got. The first filename is the source name,
+ // and the second name is the mapped filename. If the first is quoted,
+ // the second must be as well (cannot mix and match quotes and brackets).
+
+ // Get the open paren
+ Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << "(";
+ return;
+ }
+
+ // We expect either a quoted string literal, or a bracketed name
+ Token SourceFilenameTok;
+ CurPPLexer->LexIncludeFilename(SourceFilenameTok);
+ if (SourceFilenameTok.is(tok::eod)) {
+ // The diagnostic has already been handled
+ return;
+ }
+
+ StringRef SourceFileName;
+ SmallString<128> FileNameBuffer;
+ if (SourceFilenameTok.is(tok::string_literal) ||
+ SourceFilenameTok.is(tok::angle_string_literal)) {
+ SourceFileName = getSpelling(SourceFilenameTok, FileNameBuffer);
+ } else if (SourceFilenameTok.is(tok::less)) {
+ // This could be a path instead of just a name
+ FileNameBuffer.push_back('<');
+ SourceLocation End;
+ if (ConcatenateIncludeName(FileNameBuffer, End))
+ return; // Diagnostic already emitted
+ SourceFileName = FileNameBuffer.str();
+ } else {
+ Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
+ return;
+ }
+ FileNameBuffer.clear();
+
+ // Now we expect a comma, followed by another include name
+ Lex(Tok);
+ if (Tok.isNot(tok::comma)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << ",";
+ return;
+ }
+
+ Token ReplaceFilenameTok;
+ CurPPLexer->LexIncludeFilename(ReplaceFilenameTok);
+ if (ReplaceFilenameTok.is(tok::eod)) {
+ // The diagnostic has already been handled
+ return;
+ }
+
+ StringRef ReplaceFileName;
+ if (ReplaceFilenameTok.is(tok::string_literal) ||
+ ReplaceFilenameTok.is(tok::angle_string_literal)) {
+ ReplaceFileName = getSpelling(ReplaceFilenameTok, FileNameBuffer);
+ } else if (ReplaceFilenameTok.is(tok::less)) {
+ // This could be a path instead of just a name
+ FileNameBuffer.push_back('<');
+ SourceLocation End;
+ if (ConcatenateIncludeName(FileNameBuffer, End))
+ return; // Diagnostic already emitted
+ ReplaceFileName = FileNameBuffer.str();
+ } else {
+ Diag(Tok, diag::warn_pragma_include_alias_expected_filename);
+ return;
+ }
+
+ // Finally, we expect the closing paren
+ Lex(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::warn_pragma_include_alias_expected) << ")";
+ return;
+ }
+
+ // Now that we have the source and target filenames, we need to make sure
+ // they're both of the same type (angled vs non-angled)
+ StringRef OriginalSource = SourceFileName;
+
+ bool SourceIsAngled =
+ GetIncludeFilenameSpelling(SourceFilenameTok.getLocation(),
+ SourceFileName);
+ bool ReplaceIsAngled =
+ GetIncludeFilenameSpelling(ReplaceFilenameTok.getLocation(),
+ ReplaceFileName);
+ if (!SourceFileName.empty() && !ReplaceFileName.empty() &&
+ (SourceIsAngled != ReplaceIsAngled)) {
+ unsigned int DiagID;
+ if (SourceIsAngled)
+ DiagID = diag::warn_pragma_include_alias_mismatch_angle;
+ else
+ DiagID = diag::warn_pragma_include_alias_mismatch_quote;
+
+ Diag(SourceFilenameTok.getLocation(), DiagID)
+ << SourceFileName
+ << ReplaceFileName;
+
+ return;
+ }
+
+ // Now we can let the include handler know about this mapping
+ getHeaderSearchInfo().AddIncludeAlias(OriginalSource, ReplaceFileName);
+}
+
+/// AddPragmaHandler - Add the specified pragma handler to the preprocessor.
+/// If 'Namespace' is non-null, then it is a token required to exist on the
+/// pragma line before the pragma string starts, e.g. "STDC" or "GCC".
+void Preprocessor::AddPragmaHandler(StringRef Namespace,
+ PragmaHandler *Handler) {
+ PragmaNamespace *InsertNS = PragmaHandlers;
+
+ // If this is specified to be in a namespace, step down into it.
+ if (!Namespace.empty()) {
+ // If there is already a pragma handler with the name of this namespace,
+ // we either have an error (directive with the same name as a namespace) or
+ // we already have the namespace to insert into.
+ if (PragmaHandler *Existing = PragmaHandlers->FindHandler(Namespace)) {
+ InsertNS = Existing->getIfNamespace();
+ assert(InsertNS != 0 && "Cannot have a pragma namespace and pragma"
+ " handler with the same name!");
+ } else {
+ // Otherwise, this namespace doesn't exist yet, create and insert the
+ // handler for it.
+ InsertNS = new PragmaNamespace(Namespace);
+ PragmaHandlers->AddPragma(InsertNS);
+ }
+ }
+
+ // Check to make sure we don't already have a pragma for this identifier.
+ assert(!InsertNS->FindHandler(Handler->getName()) &&
+ "Pragma handler already exists for this identifier!");
+ InsertNS->AddPragma(Handler);
+}
+
+/// RemovePragmaHandler - Remove the specific pragma handler from the
+/// preprocessor. If \arg Namespace is non-null, then it should be the
+/// namespace that \arg Handler was added to. It is an error to remove
+/// a handler that has not been registered.
+void Preprocessor::RemovePragmaHandler(StringRef Namespace,
+ PragmaHandler *Handler) {
+ PragmaNamespace *NS = PragmaHandlers;
+
+ // If this is specified to be in a namespace, step down into it.
+ if (!Namespace.empty()) {
+ PragmaHandler *Existing = PragmaHandlers->FindHandler(Namespace);
+ assert(Existing && "Namespace containing handler does not exist!");
+
+ NS = Existing->getIfNamespace();
+ assert(NS && "Invalid namespace, registered as a regular pragma handler!");
+ }
+
+ NS->RemovePragmaHandler(Handler);
+
+ // If this is a non-default namespace and it is now empty, remove
+ // it.
+ if (NS != PragmaHandlers && NS->IsEmpty()) {
+ PragmaHandlers->RemovePragmaHandler(NS);
+ delete NS;
+ }
+}
+
+bool Preprocessor::LexOnOffSwitch(tok::OnOffSwitch &Result) {
+ Token Tok;
+ LexUnexpandedToken(Tok);
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::ext_on_off_switch_syntax);
+ return true;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("ON"))
+ Result = tok::OOS_ON;
+ else if (II->isStr("OFF"))
+ Result = tok::OOS_OFF;
+ else if (II->isStr("DEFAULT"))
+ Result = tok::OOS_DEFAULT;
+ else {
+ Diag(Tok, diag::ext_on_off_switch_syntax);
+ return true;
+ }
+
+ // Verify that this is followed by EOD.
+ LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eod))
+ Diag(Tok, diag::ext_pragma_syntax_eod);
+ return false;
+}
+
+namespace {
+/// PragmaOnceHandler - "#pragma once" marks the file as atomically included.
+struct PragmaOnceHandler : public PragmaHandler {
+ PragmaOnceHandler() : PragmaHandler("once") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &OnceTok) {
+ PP.CheckEndOfDirective("pragma once");
+ PP.HandlePragmaOnce(OnceTok);
+ }
+};
+
+/// PragmaMarkHandler - "#pragma mark ..." is ignored by the compiler, and the
+/// rest of the line is not lexed.
+struct PragmaMarkHandler : public PragmaHandler {
+ PragmaMarkHandler() : PragmaHandler("mark") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &MarkTok) {
+ PP.HandlePragmaMark();
+ }
+};
+
+/// PragmaPoisonHandler - "#pragma poison x" marks x as not usable.
+struct PragmaPoisonHandler : public PragmaHandler {
+ PragmaPoisonHandler() : PragmaHandler("poison") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PoisonTok) {
+ PP.HandlePragmaPoison(PoisonTok);
+ }
+};
+
+/// PragmaSystemHeaderHandler - "#pragma system_header" marks the current file
+/// as a system header, which silences warnings in it.
+struct PragmaSystemHeaderHandler : public PragmaHandler {
+ PragmaSystemHeaderHandler() : PragmaHandler("system_header") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &SHToken) {
+ PP.HandlePragmaSystemHeader(SHToken);
+ PP.CheckEndOfDirective("pragma");
+ }
+};
+struct PragmaDependencyHandler : public PragmaHandler {
+ PragmaDependencyHandler() : PragmaHandler("dependency") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DepToken) {
+ PP.HandlePragmaDependency(DepToken);
+ }
+};
+
+struct PragmaDebugHandler : public PragmaHandler {
+ PragmaDebugHandler() : PragmaHandler("__debug") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DepToken) {
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ if (II->isStr("assert")) {
+ llvm_unreachable("This is an assertion!");
+ } else if (II->isStr("crash")) {
+ *(volatile int*) 0x11 = 0;
+ } else if (II->isStr("llvm_fatal_error")) {
+ llvm::report_fatal_error("#pragma clang __debug llvm_fatal_error");
+ } else if (II->isStr("llvm_unreachable")) {
+ llvm_unreachable("#pragma clang __debug llvm_unreachable");
+ } else if (II->isStr("overflow_stack")) {
+ DebugOverflowStack();
+ } else if (II->isStr("handle_crash")) {
+ llvm::CrashRecoveryContext *CRC =llvm::CrashRecoveryContext::GetCurrent();
+ if (CRC)
+ CRC->HandleCrash();
+ } else {
+ PP.Diag(Tok, diag::warn_pragma_debug_unexpected_command)
+ << II->getName();
+ }
+ }
+
+// Disable MSVC warning about runtime stack overflow.
+#ifdef _MSC_VER
+ #pragma warning(disable : 4717)
+#endif
+ void DebugOverflowStack() {
+ DebugOverflowStack();
+ }
+#ifdef _MSC_VER
+ #pragma warning(default : 4717)
+#endif
+
+};
+
+/// PragmaDiagnosticHandler - e.g. '#pragma GCC diagnostic ignored "-Wformat"'
+struct PragmaDiagnosticHandler : public PragmaHandler {
+private:
+ const char *Namespace;
+public:
+ explicit PragmaDiagnosticHandler(const char *NS) :
+ PragmaHandler("diagnostic"), Namespace(NS) {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &DiagToken) {
+ SourceLocation DiagLoc = DiagToken.getLocation();
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ PPCallbacks *Callbacks = PP.getPPCallbacks();
+
+ diag::Mapping Map;
+ if (II->isStr("warning"))
+ Map = diag::MAP_WARNING;
+ else if (II->isStr("error"))
+ Map = diag::MAP_ERROR;
+ else if (II->isStr("ignored"))
+ Map = diag::MAP_IGNORE;
+ else if (II->isStr("fatal"))
+ Map = diag::MAP_FATAL;
+ else if (II->isStr("pop")) {
+ if (!PP.getDiagnostics().popMappings(DiagLoc))
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_cannot_pop);
+ else if (Callbacks)
+ Callbacks->PragmaDiagnosticPop(DiagLoc, Namespace);
+ return;
+ } else if (II->isStr("push")) {
+ PP.getDiagnostics().pushMappings(DiagLoc);
+ if (Callbacks)
+ Callbacks->PragmaDiagnosticPush(DiagLoc, Namespace);
+ return;
+ } else {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+
+ PP.LexUnexpandedToken(Tok);
+
+ // We need at least one string.
+ if (Tok.isNot(tok::string_literal)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
+ return;
+ }
+
+ // String concatenation allows multiple strings, which can even come from
+ // macro expansion.
+ // "foo " "bar" "Baz"
+ SmallVector<Token, 4> StrToks;
+ while (Tok.is(tok::string_literal)) {
+ StrToks.push_back(Tok);
+ PP.LexUnexpandedToken(Tok);
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_diagnostic_invalid_token);
+ return;
+ }
+
+ // Concatenate and parse the strings.
+ StringLiteralParser Literal(&StrToks[0], StrToks.size(), PP);
+ assert(Literal.isAscii() && "Didn't allow wide strings in");
+ if (Literal.hadError)
+ return;
+ if (Literal.Pascal) {
+ PP.Diag(Tok, diag::warn_pragma_diagnostic_invalid);
+ return;
+ }
+
+ StringRef WarningName(Literal.GetString());
+
+ if (WarningName.size() < 3 || WarningName[0] != '-' ||
+ WarningName[1] != 'W') {
+ PP.Diag(StrToks[0].getLocation(),
+ diag::warn_pragma_diagnostic_invalid_option);
+ return;
+ }
+
+ if (PP.getDiagnostics().setDiagnosticGroupMapping(WarningName.substr(2),
+ Map, DiagLoc))
+ PP.Diag(StrToks[0].getLocation(),
+ diag::warn_pragma_diagnostic_unknown_warning) << WarningName;
+ else if (Callbacks)
+ Callbacks->PragmaDiagnostic(DiagLoc, Namespace, Map, WarningName);
+ }
+};
+
+/// PragmaCommentHandler - "#pragma comment ...".
+struct PragmaCommentHandler : public PragmaHandler {
+ PragmaCommentHandler() : PragmaHandler("comment") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &CommentTok) {
+ PP.HandlePragmaComment(CommentTok);
+ }
+};
+
+/// PragmaIncludeAliasHandler - "#pragma include_alias("...")".
+struct PragmaIncludeAliasHandler : public PragmaHandler {
+ PragmaIncludeAliasHandler() : PragmaHandler("include_alias") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &IncludeAliasTok) {
+ PP.HandlePragmaIncludeAlias(IncludeAliasTok);
+ }
+};
+
+/// PragmaMessageHandler - "#pragma message("...")".
+struct PragmaMessageHandler : public PragmaHandler {
+ PragmaMessageHandler() : PragmaHandler("message") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &CommentTok) {
+ PP.HandlePragmaMessage(CommentTok);
+ }
+};
+
+/// PragmaPushMacroHandler - "#pragma push_macro" saves the value of the
+/// macro on the top of the stack.
+struct PragmaPushMacroHandler : public PragmaHandler {
+ PragmaPushMacroHandler() : PragmaHandler("push_macro") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PushMacroTok) {
+ PP.HandlePragmaPushMacro(PushMacroTok);
+ }
+};
+
+
+/// PragmaPopMacroHandler - "#pragma pop_macro" sets the value of the
+/// macro to the value on the top of the stack.
+struct PragmaPopMacroHandler : public PragmaHandler {
+ PragmaPopMacroHandler() : PragmaHandler("pop_macro") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &PopMacroTok) {
+ PP.HandlePragmaPopMacro(PopMacroTok);
+ }
+};
+
+// Pragma STDC implementations.
+
+/// PragmaSTDC_FENV_ACCESSHandler - "#pragma STDC FENV_ACCESS ...".
+struct PragmaSTDC_FENV_ACCESSHandler : public PragmaHandler {
+ PragmaSTDC_FENV_ACCESSHandler() : PragmaHandler("FENV_ACCESS") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ tok::OnOffSwitch OOS;
+ if (PP.LexOnOffSwitch(OOS))
+ return;
+ if (OOS == tok::OOS_ON)
+ PP.Diag(Tok, diag::warn_stdc_fenv_access_not_supported);
+ }
+};
+
+/// PragmaSTDC_CX_LIMITED_RANGEHandler - "#pragma STDC CX_LIMITED_RANGE ...".
+struct PragmaSTDC_CX_LIMITED_RANGEHandler : public PragmaHandler {
+ PragmaSTDC_CX_LIMITED_RANGEHandler()
+ : PragmaHandler("CX_LIMITED_RANGE") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ tok::OnOffSwitch OOS;
+ PP.LexOnOffSwitch(OOS);
+ }
+};
+
+/// PragmaSTDC_UnknownHandler - "#pragma STDC ...".
+struct PragmaSTDC_UnknownHandler : public PragmaHandler {
+ PragmaSTDC_UnknownHandler() {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &UnknownTok) {
+ // C99 6.10.6p2, unknown forms are not allowed.
+ PP.Diag(UnknownTok, diag::ext_stdc_pragma_ignored);
+ }
+};
+
+/// PragmaARCCFCodeAuditedHandler -
+/// #pragma clang arc_cf_code_audited begin/end
+struct PragmaARCCFCodeAuditedHandler : public PragmaHandler {
+ PragmaARCCFCodeAuditedHandler() : PragmaHandler("arc_cf_code_audited") {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &NameTok) {
+ SourceLocation Loc = NameTok.getLocation();
+ bool IsBegin;
+
+ Token Tok;
+
+ // Lex the 'begin' or 'end'.
+ PP.LexUnexpandedToken(Tok);
+ const IdentifierInfo *BeginEnd = Tok.getIdentifierInfo();
+ if (BeginEnd && BeginEnd->isStr("begin")) {
+ IsBegin = true;
+ } else if (BeginEnd && BeginEnd->isStr("end")) {
+ IsBegin = false;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::err_pp_arc_cf_code_audited_syntax);
+ return;
+ }
+
+ // Verify that this is followed by EOD.
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eod))
+ PP.Diag(Tok, diag::ext_pp_extra_tokens_at_eol) << "pragma";
+
+ // The start location of the active audit.
+ SourceLocation BeginLoc = PP.getPragmaARCCFCodeAuditedLoc();
+
+ // The start location we want after processing this.
+ SourceLocation NewLoc;
+
+ if (IsBegin) {
+ // Complain about attempts to re-enter an audit.
+ if (BeginLoc.isValid()) {
+ PP.Diag(Loc, diag::err_pp_double_begin_of_arc_cf_code_audited);
+ PP.Diag(BeginLoc, diag::note_pragma_entered_here);
+ }
+ NewLoc = Loc;
+ } else {
+ // Complain about attempts to leave an audit that doesn't exist.
+ if (!BeginLoc.isValid()) {
+ PP.Diag(Loc, diag::err_pp_unmatched_end_of_arc_cf_code_audited);
+ return;
+ }
+ NewLoc = SourceLocation();
+ }
+
+ PP.setPragmaARCCFCodeAuditedLoc(NewLoc);
+ }
+};
+
+} // end anonymous namespace
+
+
+/// RegisterBuiltinPragmas - Install the standard preprocessor pragmas:
+/// #pragma GCC poison/system_header/dependency and #pragma once.
+void Preprocessor::RegisterBuiltinPragmas() {
+ AddPragmaHandler(new PragmaOnceHandler());
+ AddPragmaHandler(new PragmaMarkHandler());
+ AddPragmaHandler(new PragmaPushMacroHandler());
+ AddPragmaHandler(new PragmaPopMacroHandler());
+ AddPragmaHandler(new PragmaMessageHandler());
+
+ // #pragma GCC ...
+ AddPragmaHandler("GCC", new PragmaPoisonHandler());
+ AddPragmaHandler("GCC", new PragmaSystemHeaderHandler());
+ AddPragmaHandler("GCC", new PragmaDependencyHandler());
+ AddPragmaHandler("GCC", new PragmaDiagnosticHandler("GCC"));
+ // #pragma clang ...
+ AddPragmaHandler("clang", new PragmaPoisonHandler());
+ AddPragmaHandler("clang", new PragmaSystemHeaderHandler());
+ AddPragmaHandler("clang", new PragmaDebugHandler());
+ AddPragmaHandler("clang", new PragmaDependencyHandler());
+ AddPragmaHandler("clang", new PragmaDiagnosticHandler("clang"));
+ AddPragmaHandler("clang", new PragmaARCCFCodeAuditedHandler());
+
+ AddPragmaHandler("STDC", new PragmaSTDC_FENV_ACCESSHandler());
+ AddPragmaHandler("STDC", new PragmaSTDC_CX_LIMITED_RANGEHandler());
+ AddPragmaHandler("STDC", new PragmaSTDC_UnknownHandler());
+
+ // MS extensions.
+ if (LangOpts.MicrosoftExt) {
+ AddPragmaHandler(new PragmaCommentHandler());
+ AddPragmaHandler(new PragmaIncludeAliasHandler());
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
new file mode 100644
index 0000000..89d19fd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessingRecord.cpp
@@ -0,0 +1,519 @@
+//===--- PreprocessingRecord.cpp - Record of Preprocessing ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PreprocessingRecord class, which maintains a record
+// of what occurred during preprocessing, and its helpers.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Token.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Capacity.h"
+
+using namespace clang;
+
+ExternalPreprocessingRecordSource::~ExternalPreprocessingRecordSource() { }
+
+
+InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
+ InclusionKind Kind,
+ StringRef FileName,
+ bool InQuotes, const FileEntry *File,
+ SourceRange Range)
+ : PreprocessingDirective(InclusionDirectiveKind, Range),
+ InQuotes(InQuotes), Kind(Kind), File(File)
+{
+ char *Memory
+ = (char*)PPRec.Allocate(FileName.size() + 1, llvm::alignOf<char>());
+ memcpy(Memory, FileName.data(), FileName.size());
+ Memory[FileName.size()] = 0;
+ this->FileName = StringRef(Memory, FileName.size());
+}
+
+PreprocessingRecord::PreprocessingRecord(SourceManager &SM,
+ bool RecordConditionalDirectives)
+ : SourceMgr(SM),
+ RecordCondDirectives(RecordConditionalDirectives), CondDirectiveNextIdx(0),
+ ExternalSource(0)
+{
+ if (RecordCondDirectives)
+ CondDirectiveStack.push_back(CondDirectiveNextIdx++);
+}
+
+/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
+/// that source range \arg R encompasses.
+std::pair<PreprocessingRecord::iterator, PreprocessingRecord::iterator>
+PreprocessingRecord::getPreprocessedEntitiesInRange(SourceRange Range) {
+ if (Range.isInvalid())
+ return std::make_pair(iterator(), iterator());
+
+ if (CachedRangeQuery.Range == Range) {
+ return std::make_pair(iterator(this, CachedRangeQuery.Result.first),
+ iterator(this, CachedRangeQuery.Result.second));
+ }
+
+ std::pair<PPEntityID, PPEntityID>
+ Res = getPreprocessedEntitiesInRangeSlow(Range);
+
+ CachedRangeQuery.Range = Range;
+ CachedRangeQuery.Result = Res;
+
+ return std::make_pair(iterator(this, Res.first), iterator(this, Res.second));
+}
+
+static bool isPreprocessedEntityIfInFileID(PreprocessedEntity *PPE, FileID FID,
+ SourceManager &SM) {
+ assert(!FID.isInvalid());
+ if (!PPE)
+ return false;
+
+ SourceLocation Loc = PPE->getSourceRange().getBegin();
+ if (Loc.isInvalid())
+ return false;
+
+ if (SM.isInFileID(SM.getFileLoc(Loc), FID))
+ return true;
+ else
+ return false;
+}
+
+/// \brief Returns true if the preprocessed entity that \arg PPEI iterator
+/// points to is coming from the file \arg FID.
+///
+/// Can be used to avoid implicit deserializations of preallocated
+/// preprocessed entities if we only care about entities of a specific file
+/// and not from files #included in the range given at
+/// \see getPreprocessedEntitiesInRange.
+bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
+ if (FID.isInvalid())
+ return false;
+
+ PPEntityID PPID = PPEI.Position;
+ if (PPID < 0) {
+ assert(unsigned(-PPID-1) < LoadedPreprocessedEntities.size() &&
+ "Out-of bounds loaded preprocessed entity");
+ assert(ExternalSource && "No external source to load from");
+ unsigned LoadedIndex = LoadedPreprocessedEntities.size()+PPID;
+ if (PreprocessedEntity *PPE = LoadedPreprocessedEntities[LoadedIndex])
+ return isPreprocessedEntityIfInFileID(PPE, FID, SourceMgr);
+
+ // See if the external source can see if the entity is in the file without
+ // deserializing it.
+ llvm::Optional<bool>
+ IsInFile = ExternalSource->isPreprocessedEntityInFileID(LoadedIndex, FID);
+ if (IsInFile.hasValue())
+ return IsInFile.getValue();
+
+ // The external source did not provide a definite answer, go and deserialize
+ // the entity to check it.
+ return isPreprocessedEntityIfInFileID(
+ getLoadedPreprocessedEntity(LoadedIndex),
+ FID, SourceMgr);
+ }
+
+ assert(unsigned(PPID) < PreprocessedEntities.size() &&
+ "Out-of bounds local preprocessed entity");
+ return isPreprocessedEntityIfInFileID(PreprocessedEntities[PPID],
+ FID, SourceMgr);
+}
+
+/// \brief Returns a pair of [Begin, End) iterators of preprocessed entities
+/// that source range \arg R encompasses.
+std::pair<PreprocessingRecord::PPEntityID, PreprocessingRecord::PPEntityID>
+PreprocessingRecord::getPreprocessedEntitiesInRangeSlow(SourceRange Range) {
+ assert(Range.isValid());
+ assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
+
+ std::pair<unsigned, unsigned>
+ Local = findLocalPreprocessedEntitiesInRange(Range);
+
+ // Check if range spans local entities.
+ if (!ExternalSource || SourceMgr.isLocalSourceLocation(Range.getBegin()))
+ return std::make_pair(Local.first, Local.second);
+
+ std::pair<unsigned, unsigned>
+ Loaded = ExternalSource->findPreprocessedEntitiesInRange(Range);
+
+ // Check if range spans local entities.
+ if (Loaded.first == Loaded.second)
+ return std::make_pair(Local.first, Local.second);
+
+ unsigned TotalLoaded = LoadedPreprocessedEntities.size();
+
+ // Check if range spans loaded entities.
+ if (Local.first == Local.second)
+ return std::make_pair(int(Loaded.first)-TotalLoaded,
+ int(Loaded.second)-TotalLoaded);
+
+ // Range spands loaded and local entities.
+ return std::make_pair(int(Loaded.first)-TotalLoaded, Local.second);
+}
+
+std::pair<unsigned, unsigned>
+PreprocessingRecord::findLocalPreprocessedEntitiesInRange(
+ SourceRange Range) const {
+ if (Range.isInvalid())
+ return std::make_pair(0,0);
+ assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
+
+ unsigned Begin = findBeginLocalPreprocessedEntity(Range.getBegin());
+ unsigned End = findEndLocalPreprocessedEntity(Range.getEnd());
+ return std::make_pair(Begin, End);
+}
+
+namespace {
+
+template <SourceLocation (SourceRange::*getRangeLoc)() const>
+struct PPEntityComp {
+ const SourceManager &SM;
+
+ explicit PPEntityComp(const SourceManager &SM) : SM(SM) { }
+
+ bool operator()(PreprocessedEntity *L, PreprocessedEntity *R) const {
+ SourceLocation LHS = getLoc(L);
+ SourceLocation RHS = getLoc(R);
+ return SM.isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(PreprocessedEntity *L, SourceLocation RHS) const {
+ SourceLocation LHS = getLoc(L);
+ return SM.isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(SourceLocation LHS, PreprocessedEntity *R) const {
+ SourceLocation RHS = getLoc(R);
+ return SM.isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ SourceLocation getLoc(PreprocessedEntity *PPE) const {
+ SourceRange Range = PPE->getSourceRange();
+ return (Range.*getRangeLoc)();
+ }
+};
+
+}
+
+unsigned PreprocessingRecord::findBeginLocalPreprocessedEntity(
+ SourceLocation Loc) const {
+ if (SourceMgr.isLoadedSourceLocation(Loc))
+ return 0;
+
+ size_t Count = PreprocessedEntities.size();
+ size_t Half;
+ std::vector<PreprocessedEntity *>::const_iterator
+ First = PreprocessedEntities.begin();
+ std::vector<PreprocessedEntity *>::const_iterator I;
+
+ // Do a binary search manually instead of using std::lower_bound because
+ // The end locations of entities may be unordered (when a macro expansion
+ // is inside another macro argument), but for this case it is not important
+ // whether we get the first macro expansion or its containing macro.
+ while (Count > 0) {
+ Half = Count/2;
+ I = First;
+ std::advance(I, Half);
+ if (SourceMgr.isBeforeInTranslationUnit((*I)->getSourceRange().getEnd(),
+ Loc)){
+ First = I;
+ ++First;
+ Count = Count - Half - 1;
+ } else
+ Count = Half;
+ }
+
+ return First - PreprocessedEntities.begin();
+}
+
+unsigned PreprocessingRecord::findEndLocalPreprocessedEntity(
+ SourceLocation Loc) const {
+ if (SourceMgr.isLoadedSourceLocation(Loc))
+ return 0;
+
+ std::vector<PreprocessedEntity *>::const_iterator
+ I = std::upper_bound(PreprocessedEntities.begin(),
+ PreprocessedEntities.end(),
+ Loc,
+ PPEntityComp<&SourceRange::getBegin>(SourceMgr));
+ return I - PreprocessedEntities.begin();
+}
+
+PreprocessingRecord::PPEntityID
+PreprocessingRecord::addPreprocessedEntity(PreprocessedEntity *Entity) {
+ assert(Entity);
+ SourceLocation BeginLoc = Entity->getSourceRange().getBegin();
+
+ if (!isa<class InclusionDirective>(Entity)) {
+ assert((PreprocessedEntities.empty() ||
+ !SourceMgr.isBeforeInTranslationUnit(BeginLoc,
+ PreprocessedEntities.back()->getSourceRange().getBegin())) &&
+ "a macro directive was encountered out-of-order");
+ PreprocessedEntities.push_back(Entity);
+ return getPPEntityID(PreprocessedEntities.size()-1, /*isLoaded=*/false);
+ }
+
+ // Check normal case, this entity begin location is after the previous one.
+ if (PreprocessedEntities.empty() ||
+ !SourceMgr.isBeforeInTranslationUnit(BeginLoc,
+ PreprocessedEntities.back()->getSourceRange().getBegin())) {
+ PreprocessedEntities.push_back(Entity);
+ return getPPEntityID(PreprocessedEntities.size()-1, /*isLoaded=*/false);
+ }
+
+ // The entity's location is not after the previous one; this can happen with
+ // include directives that form the filename using macros, e.g:
+ // "#include MACRO(STUFF)".
+
+ typedef std::vector<PreprocessedEntity *>::iterator pp_iter;
+
+ // Usually there are few macro expansions when defining the filename, do a
+ // linear search for a few entities.
+ unsigned count = 0;
+ for (pp_iter RI = PreprocessedEntities.end(),
+ Begin = PreprocessedEntities.begin();
+ RI != Begin && count < 4; --RI, ++count) {
+ pp_iter I = RI;
+ --I;
+ if (!SourceMgr.isBeforeInTranslationUnit(BeginLoc,
+ (*I)->getSourceRange().getBegin())) {
+ pp_iter insertI = PreprocessedEntities.insert(RI, Entity);
+ return getPPEntityID(insertI - PreprocessedEntities.begin(),
+ /*isLoaded=*/false);
+ }
+ }
+
+ // Linear search unsuccessful. Do a binary search.
+ pp_iter I = std::upper_bound(PreprocessedEntities.begin(),
+ PreprocessedEntities.end(),
+ BeginLoc,
+ PPEntityComp<&SourceRange::getBegin>(SourceMgr));
+ pp_iter insertI = PreprocessedEntities.insert(I, Entity);
+ return getPPEntityID(insertI - PreprocessedEntities.begin(),
+ /*isLoaded=*/false);
+}
+
+void PreprocessingRecord::SetExternalSource(
+ ExternalPreprocessingRecordSource &Source) {
+ assert(!ExternalSource &&
+ "Preprocessing record already has an external source");
+ ExternalSource = &Source;
+}
+
+unsigned PreprocessingRecord::allocateLoadedEntities(unsigned NumEntities) {
+ unsigned Result = LoadedPreprocessedEntities.size();
+ LoadedPreprocessedEntities.resize(LoadedPreprocessedEntities.size()
+ + NumEntities);
+ return Result;
+}
+
+void PreprocessingRecord::RegisterMacroDefinition(MacroInfo *Macro,
+ PPEntityID PPID) {
+ MacroDefinitions[Macro] = PPID;
+}
+
+/// \brief Retrieve the preprocessed entity at the given ID.
+PreprocessedEntity *PreprocessingRecord::getPreprocessedEntity(PPEntityID PPID){
+ if (PPID < 0) {
+ assert(unsigned(-PPID-1) < LoadedPreprocessedEntities.size() &&
+ "Out-of bounds loaded preprocessed entity");
+ return getLoadedPreprocessedEntity(LoadedPreprocessedEntities.size()+PPID);
+ }
+ assert(unsigned(PPID) < PreprocessedEntities.size() &&
+ "Out-of bounds local preprocessed entity");
+ return PreprocessedEntities[PPID];
+}
+
+/// \brief Retrieve the loaded preprocessed entity at the given index.
+PreprocessedEntity *
+PreprocessingRecord::getLoadedPreprocessedEntity(unsigned Index) {
+ assert(Index < LoadedPreprocessedEntities.size() &&
+ "Out-of bounds loaded preprocessed entity");
+ assert(ExternalSource && "No external source to load from");
+ PreprocessedEntity *&Entity = LoadedPreprocessedEntities[Index];
+ if (!Entity) {
+ Entity = ExternalSource->ReadPreprocessedEntity(Index);
+ if (!Entity) // Failed to load.
+ Entity = new (*this)
+ PreprocessedEntity(PreprocessedEntity::InvalidKind, SourceRange());
+ }
+ return Entity;
+}
+
+MacroDefinition *PreprocessingRecord::findMacroDefinition(const MacroInfo *MI) {
+ llvm::DenseMap<const MacroInfo *, PPEntityID>::iterator Pos
+ = MacroDefinitions.find(MI);
+ if (Pos == MacroDefinitions.end())
+ return 0;
+
+ PreprocessedEntity *Entity = getPreprocessedEntity(Pos->second);
+ if (Entity->isInvalid())
+ return 0;
+ return cast<MacroDefinition>(Entity);
+}
+
+void PreprocessingRecord::MacroExpands(const Token &Id, const MacroInfo* MI,
+ SourceRange Range) {
+ // We don't record nested macro expansions.
+ if (Id.getLocation().isMacroID())
+ return;
+
+ if (MI->isBuiltinMacro())
+ addPreprocessedEntity(
+ new (*this) MacroExpansion(Id.getIdentifierInfo(),Range));
+ else if (MacroDefinition *Def = findMacroDefinition(MI))
+ addPreprocessedEntity(
+ new (*this) MacroExpansion(Def, Range));
+}
+
+void PreprocessingRecord::MacroDefined(const Token &Id,
+ const MacroInfo *MI) {
+ SourceRange R(MI->getDefinitionLoc(), MI->getDefinitionEndLoc());
+ MacroDefinition *Def
+ = new (*this) MacroDefinition(Id.getIdentifierInfo(), R);
+ MacroDefinitions[MI] = addPreprocessedEntity(Def);
+}
+
+void PreprocessingRecord::MacroUndefined(const Token &Id,
+ const MacroInfo *MI) {
+ MacroDefinitions.erase(MI);
+}
+
+void PreprocessingRecord::InclusionDirective(
+ SourceLocation HashLoc,
+ const clang::Token &IncludeTok,
+ StringRef FileName,
+ bool IsAngled,
+ const FileEntry *File,
+ clang::SourceLocation EndLoc,
+ StringRef SearchPath,
+ StringRef RelativePath) {
+ InclusionDirective::InclusionKind Kind = InclusionDirective::Include;
+
+ switch (IncludeTok.getIdentifierInfo()->getPPKeywordID()) {
+ case tok::pp_include:
+ Kind = InclusionDirective::Include;
+ break;
+
+ case tok::pp_import:
+ Kind = InclusionDirective::Import;
+ break;
+
+ case tok::pp_include_next:
+ Kind = InclusionDirective::IncludeNext;
+ break;
+
+ case tok::pp___include_macros:
+ Kind = InclusionDirective::IncludeMacros;
+ break;
+
+ default:
+ llvm_unreachable("Unknown include directive kind");
+ }
+
+ clang::InclusionDirective *ID
+ = new (*this) clang::InclusionDirective(*this, Kind, FileName, !IsAngled,
+ File, SourceRange(HashLoc, EndLoc));
+ addPreprocessedEntity(ID);
+}
+
+bool PreprocessingRecord::rangeIntersectsConditionalDirective(
+ SourceRange Range) const {
+ if (Range.isInvalid())
+ return false;
+
+ CondDirectiveLocsTy::const_iterator
+ low = std::lower_bound(CondDirectiveLocs.begin(), CondDirectiveLocs.end(),
+ Range.getBegin(), CondDirectiveLoc::Comp(SourceMgr));
+ if (low == CondDirectiveLocs.end())
+ return false;
+
+ if (SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), low->getLoc()))
+ return false;
+
+ CondDirectiveLocsTy::const_iterator
+ upp = std::upper_bound(low, CondDirectiveLocs.end(),
+ Range.getEnd(), CondDirectiveLoc::Comp(SourceMgr));
+ unsigned uppIdx;
+ if (upp != CondDirectiveLocs.end())
+ uppIdx = upp->getIdx();
+ else
+ uppIdx = 0;
+
+ return low->getIdx() != uppIdx;
+}
+
+unsigned PreprocessingRecord::findCondDirectiveIdx(SourceLocation Loc) const {
+ if (Loc.isInvalid())
+ return 0;
+
+ CondDirectiveLocsTy::const_iterator
+ low = std::lower_bound(CondDirectiveLocs.begin(), CondDirectiveLocs.end(),
+ Loc, CondDirectiveLoc::Comp(SourceMgr));
+ if (low == CondDirectiveLocs.end())
+ return 0;
+ return low->getIdx();
+}
+
+void PreprocessingRecord::addCondDirectiveLoc(CondDirectiveLoc DirLoc) {
+ // Ignore directives in system headers.
+ if (SourceMgr.isInSystemHeader(DirLoc.getLoc()))
+ return;
+
+ assert(CondDirectiveLocs.empty() ||
+ SourceMgr.isBeforeInTranslationUnit(CondDirectiveLocs.back().getLoc(),
+ DirLoc.getLoc()));
+ CondDirectiveLocs.push_back(DirLoc);
+}
+
+void PreprocessingRecord::If(SourceLocation Loc, SourceRange ConditionRange) {
+ if (RecordCondDirectives) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(CondDirectiveNextIdx++);
+ }
+}
+
+void PreprocessingRecord::Ifdef(SourceLocation Loc, const Token &MacroNameTok) {
+ if (RecordCondDirectives) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(CondDirectiveNextIdx++);
+ }
+}
+
+void PreprocessingRecord::Ifndef(SourceLocation Loc,const Token &MacroNameTok) {
+ if (RecordCondDirectives) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ CondDirectiveStack.push_back(CondDirectiveNextIdx++);
+ }
+}
+
+void PreprocessingRecord::Elif(SourceLocation Loc, SourceRange ConditionRange,
+ SourceLocation IfLoc) {
+ if (RecordCondDirectives)
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+}
+
+void PreprocessingRecord::Else(SourceLocation Loc, SourceLocation IfLoc) {
+ if (RecordCondDirectives)
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+}
+
+void PreprocessingRecord::Endif(SourceLocation Loc, SourceLocation IfLoc) {
+ if (RecordCondDirectives) {
+ addCondDirectiveLoc(CondDirectiveLoc(Loc, CondDirectiveStack.back()));
+ assert(!CondDirectiveStack.empty());
+ CondDirectiveStack.pop_back();
+ }
+}
+
+size_t PreprocessingRecord::getTotalMemory() const {
+ return BumpAlloc.getTotalMemory()
+ + llvm::capacity_in_bytes(MacroDefinitions)
+ + llvm::capacity_in_bytes(PreprocessedEntities)
+ + llvm::capacity_in_bytes(LoadedPreprocessedEntities);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
new file mode 100644
index 0000000..06e5685
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/Preprocessor.cpp
@@ -0,0 +1,666 @@
+//===--- Preprocess.cpp - C Language Family Preprocessor Implementation ---===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Preprocessor interface.
+//
+//===----------------------------------------------------------------------===//
+//
+// Options to support:
+// -H - Print the name of each header file used.
+// -d[DNI] - Dump various things.
+// -fworking-directory - #line's with preprocessor's working dir.
+// -fpreprocessed
+// -dependency-file,-M,-MM,-MF,-MG,-MP,-MT,-MQ,-MD,-MMD
+// -W*
+// -w
+//
+// Messages to emit:
+// "Multiple include guards may be useful for:\n"
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "MacroArgs.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Pragma.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/ScratchBuffer.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/CodeCompletionHandler.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Capacity.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+ExternalPreprocessorSource::~ExternalPreprocessorSource() { }
+
+Preprocessor::Preprocessor(DiagnosticsEngine &diags, LangOptions &opts,
+ const TargetInfo *target, SourceManager &SM,
+ HeaderSearch &Headers, ModuleLoader &TheModuleLoader,
+ IdentifierInfoLookup* IILookup,
+ bool OwnsHeaders,
+ bool DelayInitialization,
+ bool IncrProcessing)
+ : Diags(&diags), LangOpts(opts), Target(target),FileMgr(Headers.getFileMgr()),
+ SourceMgr(SM), HeaderInfo(Headers), TheModuleLoader(TheModuleLoader),
+ ExternalSource(0), Identifiers(opts, IILookup),
+ IncrementalProcessing(IncrProcessing), CodeComplete(0),
+ CodeCompletionFile(0), CodeCompletionOffset(0), CodeCompletionReached(0),
+ SkipMainFilePreamble(0, true), CurPPLexer(0),
+ CurDirLookup(0), CurLexerKind(CLK_Lexer), Callbacks(0), MacroArgCache(0),
+ Record(0), MIChainHead(0), MICache(0)
+{
+ OwnsHeaderSearch = OwnsHeaders;
+
+ if (!DelayInitialization) {
+ assert(Target && "Must provide target information for PP initialization");
+ Initialize(*Target);
+ }
+}
+
+Preprocessor::~Preprocessor() {
+ assert(BacktrackPositions.empty() && "EnableBacktrack/Backtrack imbalance!");
+
+ while (!IncludeMacroStack.empty()) {
+ delete IncludeMacroStack.back().TheLexer;
+ delete IncludeMacroStack.back().TheTokenLexer;
+ IncludeMacroStack.pop_back();
+ }
+
+ // Free any macro definitions.
+ for (MacroInfoChain *I = MIChainHead ; I ; I = I->Next)
+ I->MI.Destroy();
+
+ // Free any cached macro expanders.
+ for (unsigned i = 0, e = NumCachedTokenLexers; i != e; ++i)
+ delete TokenLexerCache[i];
+
+ // Free any cached MacroArgs.
+ for (MacroArgs *ArgList = MacroArgCache; ArgList; )
+ ArgList = ArgList->deallocate();
+
+ // Release pragma information.
+ delete PragmaHandlers;
+
+ // Delete the scratch buffer info.
+ delete ScratchBuf;
+
+ // Delete the header search info, if we own it.
+ if (OwnsHeaderSearch)
+ delete &HeaderInfo;
+
+ delete Callbacks;
+}
+
+void Preprocessor::Initialize(const TargetInfo &Target) {
+ assert((!this->Target || this->Target == &Target) &&
+ "Invalid override of target information");
+ this->Target = &Target;
+
+ // Initialize information about built-ins.
+ BuiltinInfo.InitializeTarget(Target);
+
+ ScratchBuf = new ScratchBuffer(SourceMgr);
+ CounterValue = 0; // __COUNTER__ starts at 0.
+
+ // Clear stats.
+ NumDirectives = NumDefined = NumUndefined = NumPragma = 0;
+ NumIf = NumElse = NumEndif = 0;
+ NumEnteredSourceFiles = 0;
+ NumMacroExpanded = NumFnMacroExpanded = NumBuiltinMacroExpanded = 0;
+ NumFastMacroExpanded = NumTokenPaste = NumFastTokenPaste = 0;
+ MaxIncludeStackDepth = 0;
+ NumSkipped = 0;
+
+ // Default to discarding comments.
+ KeepComments = false;
+ KeepMacroComments = false;
+ SuppressIncludeNotFoundError = false;
+
+ // Macro expansion is enabled.
+ DisableMacroExpansion = false;
+ InMacroArgs = false;
+ InMacroArgPreExpansion = false;
+ NumCachedTokenLexers = 0;
+
+ CachedLexPos = 0;
+
+ // We haven't read anything from the external source.
+ ReadMacrosFromExternalSource = false;
+
+ // "Poison" __VA_ARGS__, which can only appear in the expansion of a macro.
+ // This gets unpoisoned where it is allowed.
+ (Ident__VA_ARGS__ = getIdentifierInfo("__VA_ARGS__"))->setIsPoisoned();
+ SetPoisonReason(Ident__VA_ARGS__,diag::ext_pp_bad_vaargs_use);
+
+ // Initialize the pragma handlers.
+ PragmaHandlers = new PragmaNamespace(StringRef());
+ RegisterBuiltinPragmas();
+
+ // Initialize builtin macros like __LINE__ and friends.
+ RegisterBuiltinMacros();
+
+ if(LangOpts.Borland) {
+ Ident__exception_info = getIdentifierInfo("_exception_info");
+ Ident___exception_info = getIdentifierInfo("__exception_info");
+ Ident_GetExceptionInfo = getIdentifierInfo("GetExceptionInformation");
+ Ident__exception_code = getIdentifierInfo("_exception_code");
+ Ident___exception_code = getIdentifierInfo("__exception_code");
+ Ident_GetExceptionCode = getIdentifierInfo("GetExceptionCode");
+ Ident__abnormal_termination = getIdentifierInfo("_abnormal_termination");
+ Ident___abnormal_termination = getIdentifierInfo("__abnormal_termination");
+ Ident_AbnormalTermination = getIdentifierInfo("AbnormalTermination");
+ } else {
+ Ident__exception_info = Ident__exception_code = Ident__abnormal_termination = 0;
+ Ident___exception_info = Ident___exception_code = Ident___abnormal_termination = 0;
+ Ident_GetExceptionInfo = Ident_GetExceptionCode = Ident_AbnormalTermination = 0;
+ }
+
+ HeaderInfo.setTarget(Target);
+}
+
+void Preprocessor::setPTHManager(PTHManager* pm) {
+ PTH.reset(pm);
+ FileMgr.addStatCache(PTH->createStatCache());
+}
+
+void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
+ llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
+ << getSpelling(Tok) << "'";
+
+ if (!DumpFlags) return;
+
+ llvm::errs() << "\t";
+ if (Tok.isAtStartOfLine())
+ llvm::errs() << " [StartOfLine]";
+ if (Tok.hasLeadingSpace())
+ llvm::errs() << " [LeadingSpace]";
+ if (Tok.isExpandDisabled())
+ llvm::errs() << " [ExpandDisabled]";
+ if (Tok.needsCleaning()) {
+ const char *Start = SourceMgr.getCharacterData(Tok.getLocation());
+ llvm::errs() << " [UnClean='" << StringRef(Start, Tok.getLength())
+ << "']";
+ }
+
+ llvm::errs() << "\tLoc=<";
+ DumpLocation(Tok.getLocation());
+ llvm::errs() << ">";
+}
+
+void Preprocessor::DumpLocation(SourceLocation Loc) const {
+ Loc.dump(SourceMgr);
+}
+
+void Preprocessor::DumpMacro(const MacroInfo &MI) const {
+ llvm::errs() << "MACRO: ";
+ for (unsigned i = 0, e = MI.getNumTokens(); i != e; ++i) {
+ DumpToken(MI.getReplacementToken(i));
+ llvm::errs() << " ";
+ }
+ llvm::errs() << "\n";
+}
+
+void Preprocessor::PrintStats() {
+ llvm::errs() << "\n*** Preprocessor Stats:\n";
+ llvm::errs() << NumDirectives << " directives found:\n";
+ llvm::errs() << " " << NumDefined << " #define.\n";
+ llvm::errs() << " " << NumUndefined << " #undef.\n";
+ llvm::errs() << " #include/#include_next/#import:\n";
+ llvm::errs() << " " << NumEnteredSourceFiles << " source files entered.\n";
+ llvm::errs() << " " << MaxIncludeStackDepth << " max include stack depth\n";
+ llvm::errs() << " " << NumIf << " #if/#ifndef/#ifdef.\n";
+ llvm::errs() << " " << NumElse << " #else/#elif.\n";
+ llvm::errs() << " " << NumEndif << " #endif.\n";
+ llvm::errs() << " " << NumPragma << " #pragma.\n";
+ llvm::errs() << NumSkipped << " #if/#ifndef#ifdef regions skipped\n";
+
+ llvm::errs() << NumMacroExpanded << "/" << NumFnMacroExpanded << "/"
+ << NumBuiltinMacroExpanded << " obj/fn/builtin macros expanded, "
+ << NumFastMacroExpanded << " on the fast path.\n";
+ llvm::errs() << (NumFastTokenPaste+NumTokenPaste)
+ << " token paste (##) operations performed, "
+ << NumFastTokenPaste << " on the fast path.\n";
+}
+
+Preprocessor::macro_iterator
+Preprocessor::macro_begin(bool IncludeExternalMacros) const {
+ if (IncludeExternalMacros && ExternalSource &&
+ !ReadMacrosFromExternalSource) {
+ ReadMacrosFromExternalSource = true;
+ ExternalSource->ReadDefinedMacros();
+ }
+
+ return Macros.begin();
+}
+
+size_t Preprocessor::getTotalMemory() const {
+ return BP.getTotalMemory()
+ + llvm::capacity_in_bytes(MacroExpandedTokens)
+ + Predefines.capacity() /* Predefines buffer. */
+ + llvm::capacity_in_bytes(Macros)
+ + llvm::capacity_in_bytes(PragmaPushMacroInfo)
+ + llvm::capacity_in_bytes(PoisonReasons)
+ + llvm::capacity_in_bytes(CommentHandlers);
+}
+
+Preprocessor::macro_iterator
+Preprocessor::macro_end(bool IncludeExternalMacros) const {
+ if (IncludeExternalMacros && ExternalSource &&
+ !ReadMacrosFromExternalSource) {
+ ReadMacrosFromExternalSource = true;
+ ExternalSource->ReadDefinedMacros();
+ }
+
+ return Macros.end();
+}
+
+void Preprocessor::recomputeCurLexerKind() {
+ if (CurLexer)
+ CurLexerKind = CLK_Lexer;
+ else if (CurPTHLexer)
+ CurLexerKind = CLK_PTHLexer;
+ else if (CurTokenLexer)
+ CurLexerKind = CLK_TokenLexer;
+ else
+ CurLexerKind = CLK_CachingLexer;
+}
+
+bool Preprocessor::SetCodeCompletionPoint(const FileEntry *File,
+ unsigned CompleteLine,
+ unsigned CompleteColumn) {
+ assert(File);
+ assert(CompleteLine && CompleteColumn && "Starts from 1:1");
+ assert(!CodeCompletionFile && "Already set");
+
+ using llvm::MemoryBuffer;
+
+ // Load the actual file's contents.
+ bool Invalid = false;
+ const MemoryBuffer *Buffer = SourceMgr.getMemoryBufferForFile(File, &Invalid);
+ if (Invalid)
+ return true;
+
+ // Find the byte position of the truncation point.
+ const char *Position = Buffer->getBufferStart();
+ for (unsigned Line = 1; Line < CompleteLine; ++Line) {
+ for (; *Position; ++Position) {
+ if (*Position != '\r' && *Position != '\n')
+ continue;
+
+ // Eat \r\n or \n\r as a single line.
+ if ((Position[1] == '\r' || Position[1] == '\n') &&
+ Position[0] != Position[1])
+ ++Position;
+ ++Position;
+ break;
+ }
+ }
+
+ Position += CompleteColumn - 1;
+
+ // Insert '\0' at the code-completion point.
+ if (Position < Buffer->getBufferEnd()) {
+ CodeCompletionFile = File;
+ CodeCompletionOffset = Position - Buffer->getBufferStart();
+
+ MemoryBuffer *NewBuffer =
+ MemoryBuffer::getNewUninitMemBuffer(Buffer->getBufferSize() + 1,
+ Buffer->getBufferIdentifier());
+ char *NewBuf = const_cast<char*>(NewBuffer->getBufferStart());
+ char *NewPos = std::copy(Buffer->getBufferStart(), Position, NewBuf);
+ *NewPos = '\0';
+ std::copy(Position, Buffer->getBufferEnd(), NewPos+1);
+ SourceMgr.overrideFileContents(File, NewBuffer);
+ }
+
+ return false;
+}
+
+void Preprocessor::CodeCompleteNaturalLanguage() {
+ if (CodeComplete)
+ CodeComplete->CodeCompleteNaturalLanguage();
+ setCodeCompletionReached();
+}
+
+/// getSpelling - This method is used to get the spelling of a token into a
+/// SmallVector. Note that the returned StringRef may not point to the
+/// supplied buffer if a copy can be avoided.
+StringRef Preprocessor::getSpelling(const Token &Tok,
+ SmallVectorImpl<char> &Buffer,
+ bool *Invalid) const {
+ // NOTE: this has to be checked *before* testing for an IdentifierInfo.
+ if (Tok.isNot(tok::raw_identifier)) {
+ // Try the fast path.
+ if (const IdentifierInfo *II = Tok.getIdentifierInfo())
+ return II->getName();
+ }
+
+ // Resize the buffer if we need to copy into it.
+ if (Tok.needsCleaning())
+ Buffer.resize(Tok.getLength());
+
+ const char *Ptr = Buffer.data();
+ unsigned Len = getSpelling(Tok, Ptr, Invalid);
+ return StringRef(Ptr, Len);
+}
+
+/// CreateString - Plop the specified string into a scratch buffer and return a
+/// location for it. If specified, the source location provides a source
+/// location for the token.
+void Preprocessor::CreateString(const char *Buf, unsigned Len, Token &Tok,
+ SourceLocation ExpansionLocStart,
+ SourceLocation ExpansionLocEnd) {
+ Tok.setLength(Len);
+
+ const char *DestPtr;
+ SourceLocation Loc = ScratchBuf->getToken(Buf, Len, DestPtr);
+
+ if (ExpansionLocStart.isValid())
+ Loc = SourceMgr.createExpansionLoc(Loc, ExpansionLocStart,
+ ExpansionLocEnd, Len);
+ Tok.setLocation(Loc);
+
+ // If this is a raw identifier or a literal token, set the pointer data.
+ if (Tok.is(tok::raw_identifier))
+ Tok.setRawIdentifierData(DestPtr);
+ else if (Tok.isLiteral())
+ Tok.setLiteralData(DestPtr);
+}
+
+Module *Preprocessor::getCurrentModule() {
+ if (getLangOpts().CurrentModule.empty())
+ return 0;
+
+ return getHeaderSearchInfo().lookupModule(getLangOpts().CurrentModule);
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Initialization Methods
+//===----------------------------------------------------------------------===//
+
+
+/// EnterMainSourceFile - Enter the specified FileID as the main source file,
+/// which implicitly adds the builtin defines etc.
+void Preprocessor::EnterMainSourceFile() {
+ // We do not allow the preprocessor to reenter the main file. Doing so will
+ // cause FileID's to accumulate information from both runs (e.g. #line
+ // information) and predefined macros aren't guaranteed to be set properly.
+ assert(NumEnteredSourceFiles == 0 && "Cannot reenter the main file!");
+ FileID MainFileID = SourceMgr.getMainFileID();
+
+ // If MainFileID is loaded it means we loaded an AST file, no need to enter
+ // a main file.
+ if (!SourceMgr.isLoadedFileID(MainFileID)) {
+ // Enter the main file source buffer.
+ EnterSourceFile(MainFileID, 0, SourceLocation());
+
+ // If we've been asked to skip bytes in the main file (e.g., as part of a
+ // precompiled preamble), do so now.
+ if (SkipMainFilePreamble.first > 0)
+ CurLexer->SkipBytes(SkipMainFilePreamble.first,
+ SkipMainFilePreamble.second);
+
+ // Tell the header info that the main file was entered. If the file is later
+ // #imported, it won't be re-entered.
+ if (const FileEntry *FE = SourceMgr.getFileEntryForID(MainFileID))
+ HeaderInfo.IncrementIncludeCount(FE);
+ }
+
+ // Preprocess Predefines to populate the initial preprocessor state.
+ llvm::MemoryBuffer *SB =
+ llvm::MemoryBuffer::getMemBufferCopy(Predefines, "<built-in>");
+ assert(SB && "Cannot create predefined source buffer");
+ FileID FID = SourceMgr.createFileIDForMemBuffer(SB);
+ assert(!FID.isInvalid() && "Could not create FileID for predefines?");
+
+ // Start parsing the predefines.
+ EnterSourceFile(FID, 0, SourceLocation());
+}
+
+void Preprocessor::EndSourceFile() {
+ // Notify the client that we reached the end of the source file.
+ if (Callbacks)
+ Callbacks->EndOfMainFile();
+}
+
+//===----------------------------------------------------------------------===//
+// Lexer Event Handling.
+//===----------------------------------------------------------------------===//
+
+/// LookUpIdentifierInfo - Given a tok::raw_identifier token, look up the
+/// identifier information for the token and install it into the token,
+/// updating the token kind accordingly.
+IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier) const {
+ assert(Identifier.getRawIdentifierData() != 0 && "No raw identifier data!");
+
+ // Look up this token, see if it is a macro, or if it is a language keyword.
+ IdentifierInfo *II;
+ if (!Identifier.needsCleaning()) {
+ // No cleaning needed, just use the characters from the lexed buffer.
+ II = getIdentifierInfo(StringRef(Identifier.getRawIdentifierData(),
+ Identifier.getLength()));
+ } else {
+ // Cleaning needed, alloca a buffer, clean into it, then use the buffer.
+ SmallString<64> IdentifierBuffer;
+ StringRef CleanedStr = getSpelling(Identifier, IdentifierBuffer);
+ II = getIdentifierInfo(CleanedStr);
+ }
+
+ // Update the token info (identifier info and appropriate token kind).
+ Identifier.setIdentifierInfo(II);
+ Identifier.setKind(II->getTokenID());
+
+ return II;
+}
+
+void Preprocessor::SetPoisonReason(IdentifierInfo *II, unsigned DiagID) {
+ PoisonReasons[II] = DiagID;
+}
+
+void Preprocessor::PoisonSEHIdentifiers(bool Poison) {
+ assert(Ident__exception_code && Ident__exception_info);
+ assert(Ident___exception_code && Ident___exception_info);
+ Ident__exception_code->setIsPoisoned(Poison);
+ Ident___exception_code->setIsPoisoned(Poison);
+ Ident_GetExceptionCode->setIsPoisoned(Poison);
+ Ident__exception_info->setIsPoisoned(Poison);
+ Ident___exception_info->setIsPoisoned(Poison);
+ Ident_GetExceptionInfo->setIsPoisoned(Poison);
+ Ident__abnormal_termination->setIsPoisoned(Poison);
+ Ident___abnormal_termination->setIsPoisoned(Poison);
+ Ident_AbnormalTermination->setIsPoisoned(Poison);
+}
+
+void Preprocessor::HandlePoisonedIdentifier(Token & Identifier) {
+ assert(Identifier.getIdentifierInfo() &&
+ "Can't handle identifiers without identifier info!");
+ llvm::DenseMap<IdentifierInfo*,unsigned>::const_iterator it =
+ PoisonReasons.find(Identifier.getIdentifierInfo());
+ if(it == PoisonReasons.end())
+ Diag(Identifier, diag::err_pp_used_poisoned_id);
+ else
+ Diag(Identifier,it->second) << Identifier.getIdentifierInfo();
+}
+
+/// HandleIdentifier - This callback is invoked when the lexer reads an
+/// identifier. This callback looks up the identifier in the map and/or
+/// potentially macro expands it or turns it into a named token (like 'for').
+///
+/// Note that callers of this method are guarded by checking the
+/// IdentifierInfo's 'isHandleIdentifierCase' bit. If this method changes, the
+/// IdentifierInfo methods that compute these properties will need to change to
+/// match.
+void Preprocessor::HandleIdentifier(Token &Identifier) {
+ assert(Identifier.getIdentifierInfo() &&
+ "Can't handle identifiers without identifier info!");
+
+ IdentifierInfo &II = *Identifier.getIdentifierInfo();
+
+ // If the information about this identifier is out of date, update it from
+ // the external source.
+ if (II.isOutOfDate()) {
+ ExternalSource->updateOutOfDateIdentifier(II);
+ Identifier.setKind(II.getTokenID());
+ }
+
+ // If this identifier was poisoned, and if it was not produced from a macro
+ // expansion, emit an error.
+ if (II.isPoisoned() && CurPPLexer) {
+ HandlePoisonedIdentifier(Identifier);
+ }
+
+ // If this is a macro to be expanded, do it.
+ if (MacroInfo *MI = getMacroInfo(&II)) {
+ if (!DisableMacroExpansion) {
+ if (Identifier.isExpandDisabled()) {
+ Diag(Identifier, diag::pp_disabled_macro_expansion);
+ } else if (MI->isEnabled()) {
+ if (!HandleMacroExpandedIdentifier(Identifier, MI))
+ return;
+ } else {
+ // C99 6.10.3.4p2 says that a disabled macro may never again be
+ // expanded, even if it's in a context where it could be expanded in the
+ // future.
+ Identifier.setFlag(Token::DisableExpand);
+ Diag(Identifier, diag::pp_disabled_macro_expansion);
+ }
+ }
+ }
+
+ // If this identifier is a keyword in C++11, produce a warning. Don't warn if
+ // we're not considering macro expansion, since this identifier might be the
+ // name of a macro.
+ // FIXME: This warning is disabled in cases where it shouldn't be, like
+ // "#define constexpr constexpr", "int constexpr;"
+ if (II.isCXX11CompatKeyword() & !DisableMacroExpansion) {
+ Diag(Identifier, diag::warn_cxx11_keyword) << II.getName();
+ // Don't diagnose this keyword again in this translation unit.
+ II.setIsCXX11CompatKeyword(false);
+ }
+
+ // C++ 2.11p2: If this is an alternative representation of a C++ operator,
+ // then we act as if it is the actual operator and not the textual
+ // representation of it.
+ if (II.isCPlusPlusOperatorKeyword())
+ Identifier.setIdentifierInfo(0);
+
+ // If this is an extension token, diagnose its use.
+ // We avoid diagnosing tokens that originate from macro definitions.
+ // FIXME: This warning is disabled in cases where it shouldn't be,
+ // like "#define TY typeof", "TY(1) x".
+ if (II.isExtensionToken() && !DisableMacroExpansion)
+ Diag(Identifier, diag::ext_token_used);
+
+ // If this is the '__experimental_modules_import' contextual keyword, note
+ // that the next token indicates a module name.
+ //
+ // Note that we do not treat '__experimental_modules_import' as a contextual
+ // keyword when we're in a caching lexer, because caching lexers only get
+ // used in contexts where import declarations are disallowed.
+ if (II.isModulesImport() && !InMacroArgs && !DisableMacroExpansion &&
+ getLangOpts().Modules && CurLexerKind != CLK_CachingLexer) {
+ ModuleImportLoc = Identifier.getLocation();
+ ModuleImportPath.clear();
+ ModuleImportExpectsIdentifier = true;
+ CurLexerKind = CLK_LexAfterModuleImport;
+ }
+}
+
+/// \brief Lex a token following the 'import' contextual keyword.
+///
+void Preprocessor::LexAfterModuleImport(Token &Result) {
+ // Figure out what kind of lexer we actually have.
+ recomputeCurLexerKind();
+
+ // Lex the next token.
+ Lex(Result);
+
+ // The token sequence
+ //
+ // import identifier (. identifier)*
+ //
+ // indicates a module import directive. We already saw the 'import'
+ // contextual keyword, so now we're looking for the identifiers.
+ if (ModuleImportExpectsIdentifier && Result.getKind() == tok::identifier) {
+ // We expected to see an identifier here, and we did; continue handling
+ // identifiers.
+ ModuleImportPath.push_back(std::make_pair(Result.getIdentifierInfo(),
+ Result.getLocation()));
+ ModuleImportExpectsIdentifier = false;
+ CurLexerKind = CLK_LexAfterModuleImport;
+ return;
+ }
+
+ // If we're expecting a '.' or a ';', and we got a '.', then wait until we
+ // see the next identifier.
+ if (!ModuleImportExpectsIdentifier && Result.getKind() == tok::period) {
+ ModuleImportExpectsIdentifier = true;
+ CurLexerKind = CLK_LexAfterModuleImport;
+ return;
+ }
+
+ // If we have a non-empty module path, load the named module.
+ if (!ModuleImportPath.empty())
+ (void)TheModuleLoader.loadModule(ModuleImportLoc, ModuleImportPath,
+ Module::MacrosVisible,
+ /*IsIncludeDirective=*/false);
+}
+
+void Preprocessor::AddCommentHandler(CommentHandler *Handler) {
+ assert(Handler && "NULL comment handler");
+ assert(std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler) ==
+ CommentHandlers.end() && "Comment handler already registered");
+ CommentHandlers.push_back(Handler);
+}
+
+void Preprocessor::RemoveCommentHandler(CommentHandler *Handler) {
+ std::vector<CommentHandler *>::iterator Pos
+ = std::find(CommentHandlers.begin(), CommentHandlers.end(), Handler);
+ assert(Pos != CommentHandlers.end() && "Comment handler not registered");
+ CommentHandlers.erase(Pos);
+}
+
+bool Preprocessor::HandleComment(Token &result, SourceRange Comment) {
+ bool AnyPendingTokens = false;
+ for (std::vector<CommentHandler *>::iterator H = CommentHandlers.begin(),
+ HEnd = CommentHandlers.end();
+ H != HEnd; ++H) {
+ if ((*H)->HandleComment(*this, Comment))
+ AnyPendingTokens = true;
+ }
+ if (!AnyPendingTokens || getCommentRetentionState())
+ return false;
+ Lex(result);
+ return true;
+}
+
+ModuleLoader::~ModuleLoader() { }
+
+CommentHandler::~CommentHandler() { }
+
+CodeCompletionHandler::~CodeCompletionHandler() { }
+
+void Preprocessor::createPreprocessingRecord(bool RecordConditionalDirectives) {
+ if (Record)
+ return;
+
+ Record = new PreprocessingRecord(getSourceManager(),
+ RecordConditionalDirectives);
+ addPPCallbacks(Record);
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
new file mode 100644
index 0000000..a72bbca
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/PreprocessorLexer.cpp
@@ -0,0 +1,55 @@
+//===--- PreprocessorLexer.cpp - C Language Family Lexer ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the PreprocessorLexer and Token interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/PreprocessorLexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+using namespace clang;
+
+void PreprocessorLexer::anchor() { }
+
+PreprocessorLexer::PreprocessorLexer(Preprocessor *pp, FileID fid)
+ : PP(pp), FID(fid), InitialNumSLocEntries(0),
+ ParsingPreprocessorDirective(false),
+ ParsingFilename(false), LexingRawMode(false) {
+ if (pp)
+ InitialNumSLocEntries = pp->getSourceManager().local_sloc_entry_size();
+}
+
+/// LexIncludeFilename - After the preprocessor has parsed a #include, lex and
+/// (potentially) macro expand the filename.
+void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
+ assert(ParsingPreprocessorDirective &&
+ ParsingFilename == false &&
+ "Must be in a preprocessing directive!");
+
+ // We are now parsing a filename!
+ ParsingFilename = true;
+
+ // Lex the filename.
+ IndirectLex(FilenameTok);
+
+ // We should have obtained the filename now.
+ ParsingFilename = false;
+
+ // No filename?
+ if (FilenameTok.is(tok::eod))
+ PP->Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+}
+
+/// getFileEntry - Return the FileEntry corresponding to this FileID. Like
+/// getFileID(), this only works for lexers with attached preprocessors.
+const FileEntry *PreprocessorLexer::getFileEntry() const {
+ return PP->getSourceManager().getFileEntryForID(getFileID());
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp b/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp
new file mode 100644
index 0000000..3d363fa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/ScratchBuffer.cpp
@@ -0,0 +1,73 @@
+//===--- ScratchBuffer.cpp - Scratch space for forming tokens -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ScratchBuffer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/ScratchBuffer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include <cstring>
+using namespace clang;
+
+// ScratchBufSize - The size of each chunk of scratch memory. Slightly less
+//than a page, almost certainly enough for anything. :)
+static const unsigned ScratchBufSize = 4060;
+
+ScratchBuffer::ScratchBuffer(SourceManager &SM) : SourceMgr(SM), CurBuffer(0) {
+ // Set BytesUsed so that the first call to getToken will require an alloc.
+ BytesUsed = ScratchBufSize;
+}
+
+/// getToken - Splat the specified text into a temporary MemoryBuffer and
+/// return a SourceLocation that refers to the token. This is just like the
+/// method below, but returns a location that indicates the physloc of the
+/// token.
+SourceLocation ScratchBuffer::getToken(const char *Buf, unsigned Len,
+ const char *&DestPtr) {
+ if (BytesUsed+Len+2 > ScratchBufSize)
+ AllocScratchBuffer(Len+2);
+
+ // Prefix the token with a \n, so that it looks like it is the first thing on
+ // its own virtual line in caret diagnostics.
+ CurBuffer[BytesUsed++] = '\n';
+
+ // Return a pointer to the character data.
+ DestPtr = CurBuffer+BytesUsed;
+
+ // Copy the token data into the buffer.
+ memcpy(CurBuffer+BytesUsed, Buf, Len);
+
+ // Remember that we used these bytes.
+ BytesUsed += Len+1;
+
+ // Add a NUL terminator to the token. This keeps the tokens separated, in
+ // case they get relexed, and puts them on their own virtual lines in case a
+ // diagnostic points to one.
+ CurBuffer[BytesUsed-1] = '\0';
+
+ return BufferStartLoc.getLocWithOffset(BytesUsed-Len-1);
+}
+
+void ScratchBuffer::AllocScratchBuffer(unsigned RequestLen) {
+ // Only pay attention to the requested length if it is larger than our default
+ // page size. If it is, we allocate an entire chunk for it. This is to
+ // support gigantic tokens, which almost certainly won't happen. :)
+ if (RequestLen < ScratchBufSize)
+ RequestLen = ScratchBufSize;
+
+ llvm::MemoryBuffer *Buf =
+ llvm::MemoryBuffer::getNewMemBuffer(RequestLen, "<scratch space>");
+ FileID FID = SourceMgr.createFileIDForMemBuffer(Buf);
+ BufferStartLoc = SourceMgr.getLocForStartOfFile(FID);
+ CurBuffer = const_cast<char*>(Buf->getBufferStart());
+ BytesUsed = 1;
+ CurBuffer[0] = '0'; // Start out with a \0 for cleanliness.
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
new file mode 100644
index 0000000..84a46ed
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenConcatenation.cpp
@@ -0,0 +1,272 @@
+//===--- TokenConcatenation.cpp - Token Concatenation Avoidance -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenConcatenation class.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/TokenConcatenation.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+
+/// IsStringPrefix - Return true if Str is a string prefix.
+/// 'L', 'u', 'U', or 'u8'. Including raw versions.
+static bool IsStringPrefix(StringRef Str, bool CPlusPlus0x) {
+
+ if (Str[0] == 'L' ||
+ (CPlusPlus0x && (Str[0] == 'u' || Str[0] == 'U' || Str[0] == 'R'))) {
+
+ if (Str.size() == 1)
+ return true; // "L", "u", "U", and "R"
+
+ // Check for raw flavors. Need to make sure the first character wasn't
+ // already R. Need CPlusPlus0x check for "LR".
+ if (Str[1] == 'R' && Str[0] != 'R' && Str.size() == 2 && CPlusPlus0x)
+ return true; // "LR", "uR", "UR"
+
+ // Check for "u8" and "u8R"
+ if (Str[0] == 'u' && Str[1] == '8') {
+ if (Str.size() == 2) return true; // "u8"
+ if (Str.size() == 3 && Str[2] == 'R') return true; // "u8R"
+ }
+ }
+
+ return false;
+}
+
+/// IsIdentifierStringPrefix - Return true if the spelling of the token
+/// is literally 'L', 'u', 'U', or 'u8'. Including raw versions.
+bool TokenConcatenation::IsIdentifierStringPrefix(const Token &Tok) const {
+ const LangOptions &LangOpts = PP.getLangOpts();
+
+ if (!Tok.needsCleaning()) {
+ if (Tok.getLength() < 1 || Tok.getLength() > 3)
+ return false;
+ SourceManager &SM = PP.getSourceManager();
+ const char *Ptr = SM.getCharacterData(SM.getSpellingLoc(Tok.getLocation()));
+ return IsStringPrefix(StringRef(Ptr, Tok.getLength()),
+ LangOpts.CPlusPlus0x);
+ }
+
+ if (Tok.getLength() < 256) {
+ char Buffer[256];
+ const char *TokPtr = Buffer;
+ unsigned length = PP.getSpelling(Tok, TokPtr);
+ return IsStringPrefix(StringRef(TokPtr, length), LangOpts.CPlusPlus0x);
+ }
+
+ return IsStringPrefix(StringRef(PP.getSpelling(Tok)), LangOpts.CPlusPlus0x);
+}
+
+TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
+ memset(TokenInfo, 0, sizeof(TokenInfo));
+
+ // These tokens have custom code in AvoidConcat.
+ TokenInfo[tok::identifier ] |= aci_custom;
+ TokenInfo[tok::numeric_constant] |= aci_custom_firstchar;
+ TokenInfo[tok::period ] |= aci_custom_firstchar;
+ TokenInfo[tok::amp ] |= aci_custom_firstchar;
+ TokenInfo[tok::plus ] |= aci_custom_firstchar;
+ TokenInfo[tok::minus ] |= aci_custom_firstchar;
+ TokenInfo[tok::slash ] |= aci_custom_firstchar;
+ TokenInfo[tok::less ] |= aci_custom_firstchar;
+ TokenInfo[tok::greater ] |= aci_custom_firstchar;
+ TokenInfo[tok::pipe ] |= aci_custom_firstchar;
+ TokenInfo[tok::percent ] |= aci_custom_firstchar;
+ TokenInfo[tok::colon ] |= aci_custom_firstchar;
+ TokenInfo[tok::hash ] |= aci_custom_firstchar;
+ TokenInfo[tok::arrow ] |= aci_custom_firstchar;
+
+ // These tokens have custom code in C++11 mode.
+ if (PP.getLangOpts().CPlusPlus0x) {
+ TokenInfo[tok::string_literal ] |= aci_custom;
+ TokenInfo[tok::wide_string_literal ] |= aci_custom;
+ TokenInfo[tok::utf8_string_literal ] |= aci_custom;
+ TokenInfo[tok::utf16_string_literal] |= aci_custom;
+ TokenInfo[tok::utf32_string_literal] |= aci_custom;
+ TokenInfo[tok::char_constant ] |= aci_custom;
+ TokenInfo[tok::wide_char_constant ] |= aci_custom;
+ TokenInfo[tok::utf16_char_constant ] |= aci_custom;
+ TokenInfo[tok::utf32_char_constant ] |= aci_custom;
+ }
+
+ // These tokens change behavior if followed by an '='.
+ TokenInfo[tok::amp ] |= aci_avoid_equal; // &=
+ TokenInfo[tok::plus ] |= aci_avoid_equal; // +=
+ TokenInfo[tok::minus ] |= aci_avoid_equal; // -=
+ TokenInfo[tok::slash ] |= aci_avoid_equal; // /=
+ TokenInfo[tok::less ] |= aci_avoid_equal; // <=
+ TokenInfo[tok::greater ] |= aci_avoid_equal; // >=
+ TokenInfo[tok::pipe ] |= aci_avoid_equal; // |=
+ TokenInfo[tok::percent ] |= aci_avoid_equal; // %=
+ TokenInfo[tok::star ] |= aci_avoid_equal; // *=
+ TokenInfo[tok::exclaim ] |= aci_avoid_equal; // !=
+ TokenInfo[tok::lessless ] |= aci_avoid_equal; // <<=
+ TokenInfo[tok::greatergreater] |= aci_avoid_equal; // >>=
+ TokenInfo[tok::caret ] |= aci_avoid_equal; // ^=
+ TokenInfo[tok::equal ] |= aci_avoid_equal; // ==
+}
+
+/// GetFirstChar - Get the first character of the token \arg Tok,
+/// avoiding calls to getSpelling where possible.
+static char GetFirstChar(Preprocessor &PP, const Token &Tok) {
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ // Avoid spelling identifiers, the most common form of token.
+ return II->getNameStart()[0];
+ } else if (!Tok.needsCleaning()) {
+ if (Tok.isLiteral() && Tok.getLiteralData()) {
+ return *Tok.getLiteralData();
+ } else {
+ SourceManager &SM = PP.getSourceManager();
+ return *SM.getCharacterData(SM.getSpellingLoc(Tok.getLocation()));
+ }
+ } else if (Tok.getLength() < 256) {
+ char Buffer[256];
+ const char *TokPtr = Buffer;
+ PP.getSpelling(Tok, TokPtr);
+ return TokPtr[0];
+ } else {
+ return PP.getSpelling(Tok)[0];
+ }
+}
+
+/// AvoidConcat - If printing PrevTok immediately followed by Tok would cause
+/// the two individual tokens to be lexed as a single token, return true
+/// (which causes a space to be printed between them). This allows the output
+/// of -E mode to be lexed to the same token stream as lexing the input
+/// directly would.
+///
+/// This code must conservatively return true if it doesn't want to be 100%
+/// accurate. This will cause the output to include extra space characters,
+/// but the resulting output won't have incorrect concatenations going on.
+/// Examples include "..", which we print with a space between, because we
+/// don't want to track enough to tell "x.." from "...".
+bool TokenConcatenation::AvoidConcat(const Token &PrevPrevTok,
+ const Token &PrevTok,
+ const Token &Tok) const {
+ // First, check to see if the tokens were directly adjacent in the original
+ // source. If they were, it must be okay to stick them together: if there
+ // were an issue, the tokens would have been lexed differently.
+ if (PrevTok.getLocation().isFileID() && Tok.getLocation().isFileID() &&
+ PrevTok.getLocation().getLocWithOffset(PrevTok.getLength()) ==
+ Tok.getLocation())
+ return false;
+
+ tok::TokenKind PrevKind = PrevTok.getKind();
+ if (PrevTok.getIdentifierInfo()) // Language keyword or named operator.
+ PrevKind = tok::identifier;
+
+ // Look up information on when we should avoid concatenation with prevtok.
+ unsigned ConcatInfo = TokenInfo[PrevKind];
+
+ // If prevtok never causes a problem for anything after it, return quickly.
+ if (ConcatInfo == 0) return false;
+
+ if (ConcatInfo & aci_avoid_equal) {
+ // If the next token is '=' or '==', avoid concatenation.
+ if (Tok.is(tok::equal) || Tok.is(tok::equalequal))
+ return true;
+ ConcatInfo &= ~aci_avoid_equal;
+ }
+
+ if (ConcatInfo == 0) return false;
+
+ // Basic algorithm: we look at the first character of the second token, and
+ // determine whether it, if appended to the first token, would form (or
+ // would contribute) to a larger token if concatenated.
+ char FirstChar = 0;
+ if (ConcatInfo & aci_custom) {
+ // If the token does not need to know the first character, don't get it.
+ } else {
+ FirstChar = GetFirstChar(PP, Tok);
+ }
+
+ switch (PrevKind) {
+ default:
+ llvm_unreachable("InitAvoidConcatTokenInfo built wrong");
+
+ case tok::raw_identifier:
+ llvm_unreachable("tok::raw_identifier in non-raw lexing mode!");
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ case tok::char_constant:
+ case tok::wide_char_constant:
+ case tok::utf16_char_constant:
+ case tok::utf32_char_constant:
+ if (!PP.getLangOpts().CPlusPlus0x)
+ return false;
+
+ // In C++11, a string or character literal followed by an identifier is a
+ // single token.
+ if (Tok.getIdentifierInfo())
+ return true;
+
+ // A ud-suffix is an identifier. If the previous token ends with one, treat
+ // it as an identifier.
+ if (!PrevTok.hasUDSuffix())
+ return false;
+ // FALL THROUGH.
+ case tok::identifier: // id+id or id+number or id+L"foo".
+ // id+'.'... will not append.
+ if (Tok.is(tok::numeric_constant))
+ return GetFirstChar(PP, Tok) != '.';
+
+ if (Tok.getIdentifierInfo() || Tok.is(tok::wide_string_literal) ||
+ Tok.is(tok::utf8_string_literal) || Tok.is(tok::utf16_string_literal) ||
+ Tok.is(tok::utf32_string_literal) || Tok.is(tok::wide_char_constant) ||
+ Tok.is(tok::utf16_char_constant) || Tok.is(tok::utf32_char_constant))
+ return true;
+
+ // If this isn't identifier + string, we're done.
+ if (Tok.isNot(tok::char_constant) && Tok.isNot(tok::string_literal))
+ return false;
+
+ // Otherwise, this is a narrow character or string. If the *identifier*
+ // is a literal 'L', 'u8', 'u' or 'U', avoid pasting L "foo" -> L"foo".
+ return IsIdentifierStringPrefix(PrevTok);
+
+ case tok::numeric_constant:
+ return isalnum(FirstChar) || Tok.is(tok::numeric_constant) ||
+ FirstChar == '+' || FirstChar == '-' || FirstChar == '.' ||
+ (PP.getLangOpts().CPlusPlus0x && FirstChar == '_');
+ case tok::period: // ..., .*, .1234
+ return (FirstChar == '.' && PrevPrevTok.is(tok::period)) ||
+ isdigit(FirstChar) ||
+ (PP.getLangOpts().CPlusPlus && FirstChar == '*');
+ case tok::amp: // &&
+ return FirstChar == '&';
+ case tok::plus: // ++
+ return FirstChar == '+';
+ case tok::minus: // --, ->, ->*
+ return FirstChar == '-' || FirstChar == '>';
+ case tok::slash: //, /*, //
+ return FirstChar == '*' || FirstChar == '/';
+ case tok::less: // <<, <<=, <:, <%
+ return FirstChar == '<' || FirstChar == ':' || FirstChar == '%';
+ case tok::greater: // >>, >>=
+ return FirstChar == '>';
+ case tok::pipe: // ||
+ return FirstChar == '|';
+ case tok::percent: // %>, %:
+ return FirstChar == '>' || FirstChar == ':';
+ case tok::colon: // ::, :>
+ return FirstChar == '>' ||
+ (PP.getLangOpts().CPlusPlus && FirstChar == ':');
+ case tok::hash: // ##, #@, %:%:
+ return FirstChar == '#' || FirstChar == '@' || FirstChar == '%';
+ case tok::arrow: // ->*
+ return PP.getLangOpts().CPlusPlus && FirstChar == '*';
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
new file mode 100644
index 0000000..696754c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Lex/TokenLexer.cpp
@@ -0,0 +1,756 @@
+//===--- TokenLexer.cpp - Lex from a token stream -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenLexer interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/TokenLexer.h"
+#include "MacroArgs.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+
+/// Create a TokenLexer for the specified macro with the specified actual
+/// arguments. Note that this ctor takes ownership of the ActualArgs pointer.
+void TokenLexer::Init(Token &Tok, SourceLocation ELEnd, MacroArgs *Actuals) {
+ // If the client is reusing a TokenLexer, make sure to free any memory
+ // associated with it.
+ destroy();
+
+ Macro = PP.getMacroInfo(Tok.getIdentifierInfo());
+ ActualArgs = Actuals;
+ CurToken = 0;
+
+ ExpandLocStart = Tok.getLocation();
+ ExpandLocEnd = ELEnd;
+ AtStartOfLine = Tok.isAtStartOfLine();
+ HasLeadingSpace = Tok.hasLeadingSpace();
+ Tokens = &*Macro->tokens_begin();
+ OwnsTokens = false;
+ DisableMacroExpansion = false;
+ NumTokens = Macro->tokens_end()-Macro->tokens_begin();
+ MacroExpansionStart = SourceLocation();
+
+ SourceManager &SM = PP.getSourceManager();
+ MacroStartSLocOffset = SM.getNextLocalOffset();
+
+ if (NumTokens > 0) {
+ assert(Tokens[0].getLocation().isValid());
+ assert((Tokens[0].getLocation().isFileID() || Tokens[0].is(tok::comment)) &&
+ "Macro defined in macro?");
+ assert(ExpandLocStart.isValid());
+
+ // Reserve a source location entry chunk for the length of the macro
+ // definition. Tokens that get lexed directly from the definition will
+ // have their locations pointing inside this chunk. This is to avoid
+ // creating separate source location entries for each token.
+ MacroDefStart = SM.getExpansionLoc(Tokens[0].getLocation());
+ MacroDefLength = Macro->getDefinitionLength(SM);
+ MacroExpansionStart = SM.createExpansionLoc(MacroDefStart,
+ ExpandLocStart,
+ ExpandLocEnd,
+ MacroDefLength);
+ }
+
+ // If this is a function-like macro, expand the arguments and change
+ // Tokens to point to the expanded tokens.
+ if (Macro->isFunctionLike() && Macro->getNumArgs())
+ ExpandFunctionArguments();
+
+ // Mark the macro as currently disabled, so that it is not recursively
+ // expanded. The macro must be disabled only after argument pre-expansion of
+ // function-like macro arguments occurs.
+ Macro->DisableMacro();
+}
+
+
+
+/// Create a TokenLexer for the specified token stream. This does not
+/// take ownership of the specified token vector.
+void TokenLexer::Init(const Token *TokArray, unsigned NumToks,
+ bool disableMacroExpansion, bool ownsTokens) {
+ // If the client is reusing a TokenLexer, make sure to free any memory
+ // associated with it.
+ destroy();
+
+ Macro = 0;
+ ActualArgs = 0;
+ Tokens = TokArray;
+ OwnsTokens = ownsTokens;
+ DisableMacroExpansion = disableMacroExpansion;
+ NumTokens = NumToks;
+ CurToken = 0;
+ ExpandLocStart = ExpandLocEnd = SourceLocation();
+ AtStartOfLine = false;
+ HasLeadingSpace = false;
+ MacroExpansionStart = SourceLocation();
+
+ // Set HasLeadingSpace/AtStartOfLine so that the first token will be
+ // returned unmodified.
+ if (NumToks != 0) {
+ AtStartOfLine = TokArray[0].isAtStartOfLine();
+ HasLeadingSpace = TokArray[0].hasLeadingSpace();
+ }
+}
+
+
+void TokenLexer::destroy() {
+ // If this was a function-like macro that actually uses its arguments, delete
+ // the expanded tokens.
+ if (OwnsTokens) {
+ delete [] Tokens;
+ Tokens = 0;
+ OwnsTokens = false;
+ }
+
+ // TokenLexer owns its formal arguments.
+ if (ActualArgs) ActualArgs->destroy(PP);
+}
+
+/// Expand the arguments of a function-like macro so that we can quickly
+/// return preexpanded tokens from Tokens.
+void TokenLexer::ExpandFunctionArguments() {
+
+ SmallVector<Token, 128> ResultToks;
+
+ // Loop through 'Tokens', expanding them into ResultToks. Keep
+ // track of whether we change anything. If not, no need to keep them. If so,
+ // we install the newly expanded sequence as the new 'Tokens' list.
+ bool MadeChange = false;
+
+ // NextTokGetsSpace - When this is true, the next token appended to the
+ // output list will get a leading space, regardless of whether it had one to
+ // begin with or not. This is used for placemarker support.
+ bool NextTokGetsSpace = false;
+
+ for (unsigned i = 0, e = NumTokens; i != e; ++i) {
+ // If we found the stringify operator, get the argument stringified. The
+ // preprocessor already verified that the following token is a macro name
+ // when the #define was parsed.
+ const Token &CurTok = Tokens[i];
+ if (CurTok.is(tok::hash) || CurTok.is(tok::hashat)) {
+ int ArgNo = Macro->getArgumentNum(Tokens[i+1].getIdentifierInfo());
+ assert(ArgNo != -1 && "Token following # is not an argument?");
+
+ SourceLocation ExpansionLocStart =
+ getExpansionLocForMacroDefLoc(CurTok.getLocation());
+ SourceLocation ExpansionLocEnd =
+ getExpansionLocForMacroDefLoc(Tokens[i+1].getLocation());
+
+ Token Res;
+ if (CurTok.is(tok::hash)) // Stringify
+ Res = ActualArgs->getStringifiedArgument(ArgNo, PP,
+ ExpansionLocStart,
+ ExpansionLocEnd);
+ else {
+ // 'charify': don't bother caching these.
+ Res = MacroArgs::StringifyArgument(ActualArgs->getUnexpArgument(ArgNo),
+ PP, true,
+ ExpansionLocStart,
+ ExpansionLocEnd);
+ }
+
+ // The stringified/charified string leading space flag gets set to match
+ // the #/#@ operator.
+ if (CurTok.hasLeadingSpace() || NextTokGetsSpace)
+ Res.setFlag(Token::LeadingSpace);
+
+ ResultToks.push_back(Res);
+ MadeChange = true;
+ ++i; // Skip arg name.
+ NextTokGetsSpace = false;
+ continue;
+ }
+
+ // Otherwise, if this is not an argument token, just add the token to the
+ // output buffer.
+ IdentifierInfo *II = CurTok.getIdentifierInfo();
+ int ArgNo = II ? Macro->getArgumentNum(II) : -1;
+ if (ArgNo == -1) {
+ // This isn't an argument, just add it.
+ ResultToks.push_back(CurTok);
+
+ if (NextTokGetsSpace) {
+ ResultToks.back().setFlag(Token::LeadingSpace);
+ NextTokGetsSpace = false;
+ }
+ continue;
+ }
+
+ // An argument is expanded somehow, the result is different than the
+ // input.
+ MadeChange = true;
+
+ // Otherwise, this is a use of the argument. Find out if there is a paste
+ // (##) operator before or after the argument.
+ bool PasteBefore =
+ !ResultToks.empty() && ResultToks.back().is(tok::hashhash);
+ bool PasteAfter = i+1 != e && Tokens[i+1].is(tok::hashhash);
+
+ // If it is not the LHS/RHS of a ## operator, we must pre-expand the
+ // argument and substitute the expanded tokens into the result. This is
+ // C99 6.10.3.1p1.
+ if (!PasteBefore && !PasteAfter) {
+ const Token *ResultArgToks;
+
+ // Only preexpand the argument if it could possibly need it. This
+ // avoids some work in common cases.
+ const Token *ArgTok = ActualArgs->getUnexpArgument(ArgNo);
+ if (ActualArgs->ArgNeedsPreexpansion(ArgTok, PP))
+ ResultArgToks = &ActualArgs->getPreExpArgument(ArgNo, Macro, PP)[0];
+ else
+ ResultArgToks = ArgTok; // Use non-preexpanded tokens.
+
+ // If the arg token expanded into anything, append it.
+ if (ResultArgToks->isNot(tok::eof)) {
+ unsigned FirstResult = ResultToks.size();
+ unsigned NumToks = MacroArgs::getArgLength(ResultArgToks);
+ ResultToks.append(ResultArgToks, ResultArgToks+NumToks);
+
+ // If the '##' came from expanding an argument, turn it into 'unknown'
+ // to avoid pasting.
+ for (unsigned i = FirstResult, e = ResultToks.size(); i != e; ++i) {
+ Token &Tok = ResultToks[i];
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+ }
+
+ if(ExpandLocStart.isValid()) {
+ updateLocForMacroArgTokens(CurTok.getLocation(),
+ ResultToks.begin()+FirstResult,
+ ResultToks.end());
+ }
+
+ // If any tokens were substituted from the argument, the whitespace
+ // before the first token should match the whitespace of the arg
+ // identifier.
+ ResultToks[FirstResult].setFlagValue(Token::LeadingSpace,
+ CurTok.hasLeadingSpace() ||
+ NextTokGetsSpace);
+ NextTokGetsSpace = false;
+ } else {
+ // If this is an empty argument, and if there was whitespace before the
+ // formal token, make sure the next token gets whitespace before it.
+ NextTokGetsSpace = CurTok.hasLeadingSpace();
+ }
+ continue;
+ }
+
+ // Okay, we have a token that is either the LHS or RHS of a paste (##)
+ // argument. It gets substituted as its non-pre-expanded tokens.
+ const Token *ArgToks = ActualArgs->getUnexpArgument(ArgNo);
+ unsigned NumToks = MacroArgs::getArgLength(ArgToks);
+ if (NumToks) { // Not an empty argument?
+ // If this is the GNU ", ## __VA_ARG__" extension, and we just learned
+ // that __VA_ARG__ expands to multiple tokens, avoid a pasting error when
+ // the expander trys to paste ',' with the first token of the __VA_ARG__
+ // expansion.
+ if (PasteBefore && ResultToks.size() >= 2 &&
+ ResultToks[ResultToks.size()-2].is(tok::comma) &&
+ (unsigned)ArgNo == Macro->getNumArgs()-1 &&
+ Macro->isVariadic()) {
+ // Remove the paste operator, report use of the extension.
+ PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
+ ResultToks.pop_back();
+ }
+
+ ResultToks.append(ArgToks, ArgToks+NumToks);
+
+ // If the '##' came from expanding an argument, turn it into 'unknown'
+ // to avoid pasting.
+ for (unsigned i = ResultToks.size() - NumToks, e = ResultToks.size();
+ i != e; ++i) {
+ Token &Tok = ResultToks[i];
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+ }
+
+ if (ExpandLocStart.isValid()) {
+ updateLocForMacroArgTokens(CurTok.getLocation(),
+ ResultToks.end()-NumToks, ResultToks.end());
+ }
+
+ // If this token (the macro argument) was supposed to get leading
+ // whitespace, transfer this information onto the first token of the
+ // expansion.
+ //
+ // Do not do this if the paste operator occurs before the macro argument,
+ // as in "A ## MACROARG". In valid code, the first token will get
+ // smooshed onto the preceding one anyway (forming AMACROARG). In
+ // assembler-with-cpp mode, invalid pastes are allowed through: in this
+ // case, we do not want the extra whitespace to be added. For example,
+ // we want ". ## foo" -> ".foo" not ". foo".
+ if ((CurTok.hasLeadingSpace() || NextTokGetsSpace) &&
+ !PasteBefore)
+ ResultToks[ResultToks.size()-NumToks].setFlag(Token::LeadingSpace);
+
+ NextTokGetsSpace = false;
+ continue;
+ }
+
+ // If an empty argument is on the LHS or RHS of a paste, the standard (C99
+ // 6.10.3.3p2,3) calls for a bunch of placemarker stuff to occur. We
+ // implement this by eating ## operators when a LHS or RHS expands to
+ // empty.
+ NextTokGetsSpace |= CurTok.hasLeadingSpace();
+ if (PasteAfter) {
+ // Discard the argument token and skip (don't copy to the expansion
+ // buffer) the paste operator after it.
+ NextTokGetsSpace |= Tokens[i+1].hasLeadingSpace();
+ ++i;
+ continue;
+ }
+
+ // If this is on the RHS of a paste operator, we've already copied the
+ // paste operator to the ResultToks list. Remove it.
+ assert(PasteBefore && ResultToks.back().is(tok::hashhash));
+ NextTokGetsSpace |= ResultToks.back().hasLeadingSpace();
+ ResultToks.pop_back();
+
+ // If this is the __VA_ARGS__ token, and if the argument wasn't provided,
+ // and if the macro had at least one real argument, and if the token before
+ // the ## was a comma, remove the comma.
+ if ((unsigned)ArgNo == Macro->getNumArgs()-1 && // is __VA_ARGS__
+ ActualArgs->isVarargsElidedUse() && // Argument elided.
+ !ResultToks.empty() && ResultToks.back().is(tok::comma)) {
+ // Never add a space, even if the comma, ##, or arg had a space.
+ NextTokGetsSpace = false;
+ // Remove the paste operator, report use of the extension.
+ PP.Diag(ResultToks.back().getLocation(), diag::ext_paste_comma);
+ ResultToks.pop_back();
+
+ // If the comma was right after another paste (e.g. "X##,##__VA_ARGS__"),
+ // then removal of the comma should produce a placemarker token (in C99
+ // terms) which we model by popping off the previous ##, giving us a plain
+ // "X" when __VA_ARGS__ is empty.
+ if (!ResultToks.empty() && ResultToks.back().is(tok::hashhash))
+ ResultToks.pop_back();
+ }
+ continue;
+ }
+
+ // If anything changed, install this as the new Tokens list.
+ if (MadeChange) {
+ assert(!OwnsTokens && "This would leak if we already own the token list");
+ // This is deleted in the dtor.
+ NumTokens = ResultToks.size();
+ // The tokens will be added to Preprocessor's cache and will be removed
+ // when this TokenLexer finishes lexing them.
+ Tokens = PP.cacheMacroExpandedTokens(this, ResultToks);
+
+ // The preprocessor cache of macro expanded tokens owns these tokens,not us.
+ OwnsTokens = false;
+ }
+}
+
+/// Lex - Lex and return a token from this macro stream.
+///
+void TokenLexer::Lex(Token &Tok) {
+ // Lexing off the end of the macro, pop this macro off the expansion stack.
+ if (isAtEnd()) {
+ // If this is a macro (not a token stream), mark the macro enabled now
+ // that it is no longer being expanded.
+ if (Macro) Macro->EnableMacro();
+
+ // Pop this context off the preprocessors lexer stack and get the next
+ // token. This will delete "this" so remember the PP instance var.
+ Preprocessor &PPCache = PP;
+ if (PP.HandleEndOfTokenLexer(Tok))
+ return;
+
+ // HandleEndOfTokenLexer may not return a token. If it doesn't, lex
+ // whatever is next.
+ return PPCache.Lex(Tok);
+ }
+
+ SourceManager &SM = PP.getSourceManager();
+
+ // If this is the first token of the expanded result, we inherit spacing
+ // properties later.
+ bool isFirstToken = CurToken == 0;
+
+ // Get the next token to return.
+ Tok = Tokens[CurToken++];
+
+ bool TokenIsFromPaste = false;
+
+ // If this token is followed by a token paste (##) operator, paste the tokens!
+ // Note that ## is a normal token when not expanding a macro.
+ if (!isAtEnd() && Tokens[CurToken].is(tok::hashhash) && Macro) {
+ // When handling the microsoft /##/ extension, the final token is
+ // returned by PasteTokens, not the pasted token.
+ if (PasteTokens(Tok))
+ return;
+
+ TokenIsFromPaste = true;
+ }
+
+ // The token's current location indicate where the token was lexed from. We
+ // need this information to compute the spelling of the token, but any
+ // diagnostics for the expanded token should appear as if they came from
+ // ExpansionLoc. Pull this information together into a new SourceLocation
+ // that captures all of this.
+ if (ExpandLocStart.isValid() && // Don't do this for token streams.
+ // Check that the token's location was not already set properly.
+ SM.isBeforeInSLocAddrSpace(Tok.getLocation(), MacroStartSLocOffset)) {
+ SourceLocation instLoc;
+ if (Tok.is(tok::comment)) {
+ instLoc = SM.createExpansionLoc(Tok.getLocation(),
+ ExpandLocStart,
+ ExpandLocEnd,
+ Tok.getLength());
+ } else {
+ instLoc = getExpansionLocForMacroDefLoc(Tok.getLocation());
+ }
+
+ Tok.setLocation(instLoc);
+ }
+
+ // If this is the first token, set the lexical properties of the token to
+ // match the lexical properties of the macro identifier.
+ if (isFirstToken) {
+ Tok.setFlagValue(Token::StartOfLine , AtStartOfLine);
+ Tok.setFlagValue(Token::LeadingSpace, HasLeadingSpace);
+ }
+
+ // Handle recursive expansion!
+ if (!Tok.isAnnotation() && Tok.getIdentifierInfo() != 0) {
+ // Change the kind of this identifier to the appropriate token kind, e.g.
+ // turning "for" into a keyword.
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Tok.setKind(II->getTokenID());
+
+ // If this identifier was poisoned and from a paste, emit an error. This
+ // won't be handled by Preprocessor::HandleIdentifier because this is coming
+ // from a macro expansion.
+ if (II->isPoisoned() && TokenIsFromPaste) {
+ PP.HandlePoisonedIdentifier(Tok);
+ }
+
+ if (!DisableMacroExpansion && II->isHandleIdentifierCase())
+ PP.HandleIdentifier(Tok);
+ }
+
+ // Otherwise, return a normal token.
+}
+
+/// PasteTokens - Tok is the LHS of a ## operator, and CurToken is the ##
+/// operator. Read the ## and RHS, and paste the LHS/RHS together. If there
+/// are more ## after it, chomp them iteratively. Return the result as Tok.
+/// If this returns true, the caller should immediately return the token.
+bool TokenLexer::PasteTokens(Token &Tok) {
+ SmallString<128> Buffer;
+ const char *ResultTokStrPtr = 0;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation PasteOpLoc;
+ do {
+ // Consume the ## operator.
+ PasteOpLoc = Tokens[CurToken].getLocation();
+ ++CurToken;
+ assert(!isAtEnd() && "No token on the RHS of a paste operator!");
+
+ // Get the RHS token.
+ const Token &RHS = Tokens[CurToken];
+
+ // Allocate space for the result token. This is guaranteed to be enough for
+ // the two tokens.
+ Buffer.resize(Tok.getLength() + RHS.getLength());
+
+ // Get the spelling of the LHS token in Buffer.
+ const char *BufPtr = &Buffer[0];
+ bool Invalid = false;
+ unsigned LHSLen = PP.getSpelling(Tok, BufPtr, &Invalid);
+ if (BufPtr != &Buffer[0]) // Really, we want the chars in Buffer!
+ memcpy(&Buffer[0], BufPtr, LHSLen);
+ if (Invalid)
+ return true;
+
+ BufPtr = &Buffer[LHSLen];
+ unsigned RHSLen = PP.getSpelling(RHS, BufPtr, &Invalid);
+ if (Invalid)
+ return true;
+ if (BufPtr != &Buffer[LHSLen]) // Really, we want the chars in Buffer!
+ memcpy(&Buffer[LHSLen], BufPtr, RHSLen);
+
+ // Trim excess space.
+ Buffer.resize(LHSLen+RHSLen);
+
+ // Plop the pasted result (including the trailing newline and null) into a
+ // scratch buffer where we can lex it.
+ Token ResultTokTmp;
+ ResultTokTmp.startToken();
+
+ // Claim that the tmp token is a string_literal so that we can get the
+ // character pointer back from CreateString in getLiteralData().
+ ResultTokTmp.setKind(tok::string_literal);
+ PP.CreateString(&Buffer[0], Buffer.size(), ResultTokTmp);
+ SourceLocation ResultTokLoc = ResultTokTmp.getLocation();
+ ResultTokStrPtr = ResultTokTmp.getLiteralData();
+
+ // Lex the resultant pasted token into Result.
+ Token Result;
+
+ if (Tok.isAnyIdentifier() && RHS.isAnyIdentifier()) {
+ // Common paste case: identifier+identifier = identifier. Avoid creating
+ // a lexer and other overhead.
+ PP.IncrementPasteCounter(true);
+ Result.startToken();
+ Result.setKind(tok::raw_identifier);
+ Result.setRawIdentifierData(ResultTokStrPtr);
+ Result.setLocation(ResultTokLoc);
+ Result.setLength(LHSLen+RHSLen);
+ } else {
+ PP.IncrementPasteCounter(false);
+
+ assert(ResultTokLoc.isFileID() &&
+ "Should be a raw location into scratch buffer");
+ SourceManager &SourceMgr = PP.getSourceManager();
+ FileID LocFileID = SourceMgr.getFileID(ResultTokLoc);
+
+ bool Invalid = false;
+ const char *ScratchBufStart
+ = SourceMgr.getBufferData(LocFileID, &Invalid).data();
+ if (Invalid)
+ return false;
+
+ // Make a lexer to lex this string from. Lex just this one token.
+ // Make a lexer object so that we lex and expand the paste result.
+ Lexer TL(SourceMgr.getLocForStartOfFile(LocFileID),
+ PP.getLangOpts(), ScratchBufStart,
+ ResultTokStrPtr, ResultTokStrPtr+LHSLen+RHSLen);
+
+ // Lex a token in raw mode. This way it won't look up identifiers
+ // automatically, lexing off the end will return an eof token, and
+ // warnings are disabled. This returns true if the result token is the
+ // entire buffer.
+ bool isInvalid = !TL.LexFromRawLexer(Result);
+
+ // If we got an EOF token, we didn't form even ONE token. For example, we
+ // did "/ ## /" to get "//".
+ isInvalid |= Result.is(tok::eof);
+
+ // If pasting the two tokens didn't form a full new token, this is an
+ // error. This occurs with "x ## +" and other stuff. Return with Tok
+ // unmodified and with RHS as the next token to lex.
+ if (isInvalid) {
+ // Test for the Microsoft extension of /##/ turning into // here on the
+ // error path.
+ if (PP.getLangOpts().MicrosoftExt && Tok.is(tok::slash) &&
+ RHS.is(tok::slash)) {
+ HandleMicrosoftCommentPaste(Tok);
+ return true;
+ }
+
+ // Do not emit the error when preprocessing assembler code.
+ if (!PP.getLangOpts().AsmPreprocessor) {
+ // Explicitly convert the token location to have proper expansion
+ // information so that the user knows where it came from.
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation Loc =
+ SM.createExpansionLoc(PasteOpLoc, ExpandLocStart, ExpandLocEnd, 2);
+ // If we're in microsoft extensions mode, downgrade this from a hard
+ // error to a warning that defaults to an error. This allows
+ // disabling it.
+ PP.Diag(Loc,
+ PP.getLangOpts().MicrosoftExt ? diag::err_pp_bad_paste_ms
+ : diag::err_pp_bad_paste)
+ << Buffer.str();
+ }
+
+ // Do not consume the RHS.
+ --CurToken;
+ }
+
+ // Turn ## into 'unknown' to avoid # ## # from looking like a paste
+ // operator.
+ if (Result.is(tok::hashhash))
+ Result.setKind(tok::unknown);
+ }
+
+ // Transfer properties of the LHS over the the Result.
+ Result.setFlagValue(Token::StartOfLine , Tok.isAtStartOfLine());
+ Result.setFlagValue(Token::LeadingSpace, Tok.hasLeadingSpace());
+
+ // Finally, replace LHS with the result, consume the RHS, and iterate.
+ ++CurToken;
+ Tok = Result;
+ } while (!isAtEnd() && Tokens[CurToken].is(tok::hashhash));
+
+ SourceLocation EndLoc = Tokens[CurToken - 1].getLocation();
+
+ // The token's current location indicate where the token was lexed from. We
+ // need this information to compute the spelling of the token, but any
+ // diagnostics for the expanded token should appear as if the token was
+ // expanded from the full ## expression. Pull this information together into
+ // a new SourceLocation that captures all of this.
+ SourceManager &SM = PP.getSourceManager();
+ if (StartLoc.isFileID())
+ StartLoc = getExpansionLocForMacroDefLoc(StartLoc);
+ if (EndLoc.isFileID())
+ EndLoc = getExpansionLocForMacroDefLoc(EndLoc);
+ Tok.setLocation(SM.createExpansionLoc(Tok.getLocation(), StartLoc, EndLoc,
+ Tok.getLength()));
+
+ // Now that we got the result token, it will be subject to expansion. Since
+ // token pasting re-lexes the result token in raw mode, identifier information
+ // isn't looked up. As such, if the result is an identifier, look up id info.
+ if (Tok.is(tok::raw_identifier)) {
+ // Look up the identifier info for the token. We disabled identifier lookup
+ // by saying we're skipping contents, so we need to do this manually.
+ PP.LookUpIdentifierInfo(Tok);
+ }
+ return false;
+}
+
+/// isNextTokenLParen - If the next token lexed will pop this macro off the
+/// expansion stack, return 2. If the next unexpanded token is a '(', return
+/// 1, otherwise return 0.
+unsigned TokenLexer::isNextTokenLParen() const {
+ // Out of tokens?
+ if (isAtEnd())
+ return 2;
+ return Tokens[CurToken].is(tok::l_paren);
+}
+
+/// isParsingPreprocessorDirective - Return true if we are in the middle of a
+/// preprocessor directive.
+bool TokenLexer::isParsingPreprocessorDirective() const {
+ return Tokens[NumTokens-1].is(tok::eod) && !isAtEnd();
+}
+
+/// HandleMicrosoftCommentPaste - In microsoft compatibility mode, /##/ pastes
+/// together to form a comment that comments out everything in the current
+/// macro, other active macros, and anything left on the current physical
+/// source line of the expanded buffer. Handle this by returning the
+/// first token on the next line.
+void TokenLexer::HandleMicrosoftCommentPaste(Token &Tok) {
+ // We 'comment out' the rest of this macro by just ignoring the rest of the
+ // tokens that have not been lexed yet, if any.
+
+ // Since this must be a macro, mark the macro enabled now that it is no longer
+ // being expanded.
+ assert(Macro && "Token streams can't paste comments");
+ Macro->EnableMacro();
+
+ PP.HandleMicrosoftCommentPaste(Tok);
+}
+
+/// \brief If \arg loc is a file ID and points inside the current macro
+/// definition, returns the appropriate source location pointing at the
+/// macro expansion source location entry, otherwise it returns an invalid
+/// SourceLocation.
+SourceLocation
+TokenLexer::getExpansionLocForMacroDefLoc(SourceLocation loc) const {
+ assert(ExpandLocStart.isValid() && MacroExpansionStart.isValid() &&
+ "Not appropriate for token streams");
+ assert(loc.isValid() && loc.isFileID());
+
+ SourceManager &SM = PP.getSourceManager();
+ assert(SM.isInSLocAddrSpace(loc, MacroDefStart, MacroDefLength) &&
+ "Expected loc to come from the macro definition");
+
+ unsigned relativeOffset = 0;
+ SM.isInSLocAddrSpace(loc, MacroDefStart, MacroDefLength, &relativeOffset);
+ return MacroExpansionStart.getLocWithOffset(relativeOffset);
+}
+
+/// \brief Finds the tokens that are consecutive (from the same FileID)
+/// creates a single SLocEntry, and assigns SourceLocations to each token that
+/// point to that SLocEntry. e.g for
+/// assert(foo == bar);
+/// There will be a single SLocEntry for the "foo == bar" chunk and locations
+/// for the 'foo', '==', 'bar' tokens will point inside that chunk.
+///
+/// \arg begin_tokens will be updated to a position past all the found
+/// consecutive tokens.
+static void updateConsecutiveMacroArgTokens(SourceManager &SM,
+ SourceLocation InstLoc,
+ Token *&begin_tokens,
+ Token * end_tokens) {
+ assert(begin_tokens < end_tokens);
+
+ SourceLocation FirstLoc = begin_tokens->getLocation();
+ SourceLocation CurLoc = FirstLoc;
+
+ // Compare the source location offset of tokens and group together tokens that
+ // are close, even if their locations point to different FileIDs. e.g.
+ //
+ // |bar | foo | cake | (3 tokens from 3 consecutive FileIDs)
+ // ^ ^
+ // |bar foo cake| (one SLocEntry chunk for all tokens)
+ //
+ // we can perform this "merge" since the token's spelling location depends
+ // on the relative offset.
+
+ Token *NextTok = begin_tokens + 1;
+ for (; NextTok < end_tokens; ++NextTok) {
+ int RelOffs;
+ if (!SM.isInSameSLocAddrSpace(CurLoc, NextTok->getLocation(), &RelOffs))
+ break; // Token from different local/loaded location.
+ // Check that token is not before the previous token or more than 50
+ // "characters" away.
+ if (RelOffs < 0 || RelOffs > 50)
+ break;
+ CurLoc = NextTok->getLocation();
+ }
+
+ // For the consecutive tokens, find the length of the SLocEntry to contain
+ // all of them.
+ Token &LastConsecutiveTok = *(NextTok-1);
+ int LastRelOffs = 0;
+ SM.isInSameSLocAddrSpace(FirstLoc, LastConsecutiveTok.getLocation(),
+ &LastRelOffs);
+ unsigned FullLength = LastRelOffs + LastConsecutiveTok.getLength();
+
+ // Create a macro expansion SLocEntry that will "contain" all of the tokens.
+ SourceLocation Expansion =
+ SM.createMacroArgExpansionLoc(FirstLoc, InstLoc,FullLength);
+
+ // Change the location of the tokens from the spelling location to the new
+ // expanded location.
+ for (; begin_tokens < NextTok; ++begin_tokens) {
+ Token &Tok = *begin_tokens;
+ int RelOffs = 0;
+ SM.isInSameSLocAddrSpace(FirstLoc, Tok.getLocation(), &RelOffs);
+ Tok.setLocation(Expansion.getLocWithOffset(RelOffs));
+ }
+}
+
+/// \brief Creates SLocEntries and updates the locations of macro argument
+/// tokens to their new expanded locations.
+///
+/// \param ArgIdDefLoc the location of the macro argument id inside the macro
+/// definition.
+/// \param Tokens the macro argument tokens to update.
+void TokenLexer::updateLocForMacroArgTokens(SourceLocation ArgIdSpellLoc,
+ Token *begin_tokens,
+ Token *end_tokens) {
+ SourceManager &SM = PP.getSourceManager();
+
+ SourceLocation InstLoc =
+ getExpansionLocForMacroDefLoc(ArgIdSpellLoc);
+
+ while (begin_tokens < end_tokens) {
+ // If there's only one token just create a SLocEntry for it.
+ if (end_tokens - begin_tokens == 1) {
+ Token &Tok = *begin_tokens;
+ Tok.setLocation(SM.createMacroArgExpansionLoc(Tok.getLocation(),
+ InstLoc,
+ Tok.getLength()));
+ return;
+ }
+
+ updateConsecutiveMacroArgTokens(SM, InstLoc, begin_tokens, end_tokens);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
new file mode 100644
index 0000000..d1c2624
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseAST.cpp
@@ -0,0 +1,119 @@
+//===--- ParseAST.cpp - Provide the clang::ParseAST method ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the clang::ParseAST method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/ParseAST.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExternalASTSource.h"
+#include "clang/AST/Stmt.h"
+#include "clang/Parse/Parser.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/CrashRecoveryContext.h"
+#include <cstdio>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Public interface to the file
+//===----------------------------------------------------------------------===//
+
+/// ParseAST - Parse the entire file specified, notifying the ASTConsumer as
+/// the file is parsed. This inserts the parsed decls into the translation unit
+/// held by Ctx.
+///
+void clang::ParseAST(Preprocessor &PP, ASTConsumer *Consumer,
+ ASTContext &Ctx, bool PrintStats,
+ TranslationUnitKind TUKind,
+ CodeCompleteConsumer *CompletionConsumer,
+ bool SkipFunctionBodies) {
+
+ OwningPtr<Sema> S(new Sema(PP, Ctx, *Consumer,
+ TUKind,
+ CompletionConsumer));
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<Sema> CleanupSema(S.get());
+
+ ParseAST(*S.get(), PrintStats, SkipFunctionBodies);
+}
+
+void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
+ // Collect global stats on Decls/Stmts (until we have a module streamer).
+ if (PrintStats) {
+ Decl::EnableStatistics();
+ Stmt::EnableStatistics();
+ }
+
+ // Also turn on collection of stats inside of the Sema object.
+ bool OldCollectStats = PrintStats;
+ std::swap(OldCollectStats, S.CollectStats);
+
+ ASTConsumer *Consumer = &S.getASTConsumer();
+
+ OwningPtr<Parser> ParseOP(new Parser(S.getPreprocessor(), S,
+ SkipFunctionBodies));
+ Parser &P = *ParseOP.get();
+
+ PrettyStackTraceParserEntry CrashInfo(P);
+
+ // Recover resources if we crash before exiting this method.
+ llvm::CrashRecoveryContextCleanupRegistrar<Parser>
+ CleanupParser(ParseOP.get());
+
+ S.getPreprocessor().EnterMainSourceFile();
+ P.Initialize();
+ S.Initialize();
+
+ if (ExternalASTSource *External = S.getASTContext().getExternalSource())
+ External->StartTranslationUnit(Consumer);
+
+ bool Abort = false;
+ Parser::DeclGroupPtrTy ADecl;
+
+ while (!P.ParseTopLevelDecl(ADecl)) { // Not end of file.
+ // If we got a null return and something *was* parsed, ignore it. This
+ // is due to a top-level semicolon, an action override, or a parse error
+ // skipping something.
+ if (ADecl) {
+ if (!Consumer->HandleTopLevelDecl(ADecl.get())) {
+ Abort = true;
+ break;
+ }
+ }
+ };
+
+ if (Abort)
+ return;
+
+ // Process any TopLevelDecls generated by #pragma weak.
+ for (SmallVector<Decl*,2>::iterator
+ I = S.WeakTopLevelDecls().begin(),
+ E = S.WeakTopLevelDecls().end(); I != E; ++I)
+ Consumer->HandleTopLevelDecl(DeclGroupRef(*I));
+
+ Consumer->HandleTranslationUnit(S.getASTContext());
+
+ std::swap(OldCollectStats, S.CollectStats);
+ if (PrintStats) {
+ llvm::errs() << "\nSTATISTICS:\n";
+ P.getActions().PrintStats();
+ S.getASTContext().PrintStats();
+ Decl::PrintStats();
+ Stmt::PrintStats();
+ Consumer->PrintStats();
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
new file mode 100644
index 0000000..c000f69
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -0,0 +1,685 @@
+//===--- ParseCXXInlineMethods.cpp - C++ class inline methods parsing------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements parsing for C++ class inline methods.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Scope.h"
+#include "clang/AST/DeclTemplate.h"
+using namespace clang;
+
+/// ParseCXXInlineMethodDef - We parsed and verified that the specified
+/// Declarator is a well formed C++ inline method definition. Now lex its body
+/// and store its tokens for parsing after the C++ class is complete.
+Decl *Parser::ParseCXXInlineMethodDef(AccessSpecifier AS,
+ AttributeList *AccessAttrs,
+ ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo,
+ const VirtSpecifiers& VS,
+ FunctionDefinitionKind DefinitionKind,
+ ExprResult& Init) {
+ assert(D.isFunctionDeclarator() && "This isn't a function declarator!");
+ assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try) ||
+ Tok.is(tok::equal)) &&
+ "Current token not a '{', ':', '=', or 'try'!");
+
+ MultiTemplateParamsArg TemplateParams(Actions,
+ TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->data() : 0,
+ TemplateInfo.TemplateParams ? TemplateInfo.TemplateParams->size() : 0);
+
+ Decl *FnD;
+ D.setFunctionDefinitionKind(DefinitionKind);
+ if (D.getDeclSpec().isFriendSpecified())
+ FnD = Actions.ActOnFriendFunctionDecl(getCurScope(), D,
+ move(TemplateParams));
+ else {
+ FnD = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS, D,
+ move(TemplateParams), 0,
+ VS, /*HasDeferredInit=*/false);
+ if (FnD) {
+ Actions.ProcessDeclAttributeList(getCurScope(), FnD, AccessAttrs,
+ false, true);
+ bool TypeSpecContainsAuto
+ = D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
+ if (Init.isUsable())
+ Actions.AddInitializerToDecl(FnD, Init.get(), false,
+ TypeSpecContainsAuto);
+ else
+ Actions.ActOnUninitializedDecl(FnD, TypeSpecContainsAuto);
+ }
+ }
+
+ HandleMemberFunctionDefaultArgs(D, FnD);
+
+ D.complete(FnD);
+
+ if (Tok.is(tok::equal)) {
+ ConsumeToken();
+
+ if (!FnD) {
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ bool Delete = false;
+ SourceLocation KWLoc;
+ if (Tok.is(tok::kw_delete)) {
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_deleted_function :
+ diag::ext_deleted_function);
+
+ KWLoc = ConsumeToken();
+ Actions.SetDeclDeleted(FnD, KWLoc);
+ Delete = true;
+ } else if (Tok.is(tok::kw_default)) {
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_defaulted_function :
+ diag::ext_defaulted_function);
+
+ KWLoc = ConsumeToken();
+ Actions.SetDeclDefaulted(FnD, KWLoc);
+ } else {
+ llvm_unreachable("function definition after = not 'delete' or 'default'");
+ }
+
+ if (Tok.is(tok::comma)) {
+ Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
+ << Delete;
+ SkipUntil(tok::semi);
+ } else {
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ Delete ? "delete" : "default", tok::semi);
+ }
+
+ return FnD;
+ }
+
+ // In delayed template parsing mode, if we are within a class template
+ // or if we are about to parse function member template then consume
+ // the tokens and store them for parsing at the end of the translation unit.
+ if (getLangOpts().DelayedTemplateParsing &&
+ ((Actions.CurContext->isDependentContext() ||
+ TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) &&
+ !Actions.IsInsideALocalClassWithinATemplateFunction())) {
+
+ if (FnD) {
+ LateParsedTemplatedFunction *LPT = new LateParsedTemplatedFunction(FnD);
+
+ FunctionDecl *FD = 0;
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(FnD))
+ FD = FunTmpl->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(FnD);
+ Actions.CheckForFunctionRedefinition(FD);
+
+ LateParsedTemplateMap[FD] = LPT;
+ Actions.MarkAsLateParsedTemplate(FD);
+ LexTemplateFunctionForLateParsing(LPT->Toks);
+ } else {
+ CachedTokens Toks;
+ LexTemplateFunctionForLateParsing(Toks);
+ }
+
+ return FnD;
+ }
+
+ // Consume the tokens and store them for later parsing.
+
+ LexedMethod* LM = new LexedMethod(this, FnD);
+ getCurrentClass().LateParsedDeclarations.push_back(LM);
+ LM->TemplateScope = getCurScope()->isTemplateParamScope();
+ CachedTokens &Toks = LM->Toks;
+
+ tok::TokenKind kind = Tok.getKind();
+ // Consume everything up to (and including) the left brace of the
+ // function body.
+ if (ConsumeAndStoreFunctionPrologue(Toks)) {
+ // We didn't find the left-brace we expected after the
+ // constructor initializer; we already printed an error, and it's likely
+ // impossible to recover, so don't try to parse this method later.
+ // If we stopped at a semicolon, consume it to avoid an extra warning.
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ delete getCurrentClass().LateParsedDeclarations.back();
+ getCurrentClass().LateParsedDeclarations.pop_back();
+ return FnD;
+ } else {
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+
+ // If we're in a function-try-block, we need to store all the catch blocks.
+ if (kind == tok::kw_try) {
+ while (Tok.is(tok::kw_catch)) {
+ ConsumeAndStoreUntil(tok::l_brace, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+ }
+
+
+ if (!FnD) {
+ // If semantic analysis could not build a function declaration,
+ // just throw away the late-parsed declaration.
+ delete getCurrentClass().LateParsedDeclarations.back();
+ getCurrentClass().LateParsedDeclarations.pop_back();
+ }
+
+ return FnD;
+}
+
+/// ParseCXXNonStaticMemberInitializer - We parsed and verified that the
+/// specified Declarator is a well formed C++ non-static data member
+/// declaration. Now lex its initializer and store its tokens for parsing
+/// after the class is complete.
+void Parser::ParseCXXNonStaticMemberInitializer(Decl *VarD) {
+ assert((Tok.is(tok::l_brace) || Tok.is(tok::equal)) &&
+ "Current token not a '{' or '='!");
+
+ LateParsedMemberInitializer *MI =
+ new LateParsedMemberInitializer(this, VarD);
+ getCurrentClass().LateParsedDeclarations.push_back(MI);
+ CachedTokens &Toks = MI->Toks;
+
+ tok::TokenKind kind = Tok.getKind();
+ if (kind == tok::equal) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
+
+ if (kind == tok::l_brace) {
+ // Begin by storing the '{' token.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/true);
+ } else {
+ // Consume everything up to (but excluding) the comma or semicolon.
+ ConsumeAndStoreUntil(tok::comma, Toks, /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/false);
+ }
+
+ // Store an artificial EOF token to ensure that we don't run off the end of
+ // the initializer when we come to parse it.
+ Token Eof;
+ Eof.startToken();
+ Eof.setKind(tok::eof);
+ Eof.setLocation(Tok.getLocation());
+ Toks.push_back(Eof);
+}
+
+Parser::LateParsedDeclaration::~LateParsedDeclaration() {}
+void Parser::LateParsedDeclaration::ParseLexedMethodDeclarations() {}
+void Parser::LateParsedDeclaration::ParseLexedMemberInitializers() {}
+void Parser::LateParsedDeclaration::ParseLexedMethodDefs() {}
+
+Parser::LateParsedClass::LateParsedClass(Parser *P, ParsingClass *C)
+ : Self(P), Class(C) {}
+
+Parser::LateParsedClass::~LateParsedClass() {
+ Self->DeallocateParsedClasses(Class);
+}
+
+void Parser::LateParsedClass::ParseLexedMethodDeclarations() {
+ Self->ParseLexedMethodDeclarations(*Class);
+}
+
+void Parser::LateParsedClass::ParseLexedMemberInitializers() {
+ Self->ParseLexedMemberInitializers(*Class);
+}
+
+void Parser::LateParsedClass::ParseLexedMethodDefs() {
+ Self->ParseLexedMethodDefs(*Class);
+}
+
+void Parser::LateParsedMethodDeclaration::ParseLexedMethodDeclarations() {
+ Self->ParseLexedMethodDeclaration(*this);
+}
+
+void Parser::LexedMethod::ParseLexedMethodDefs() {
+ Self->ParseLexedMethodDef(*this);
+}
+
+void Parser::LateParsedMemberInitializer::ParseLexedMemberInitializers() {
+ Self->ParseLexedMemberInitializer(*this);
+}
+
+/// ParseLexedMethodDeclarations - We finished parsing the member
+/// specification of a top (non-nested) C++ class. Now go over the
+/// stack of method declarations with some parts for which parsing was
+/// delayed (such as default arguments) and parse them.
+void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
+
+ // The current scope is still active if we're the top-level class.
+ // Otherwise we'll need to push and enter a new scope.
+ bool HasClassScope = !Class.TopLevelClass;
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
+ HasClassScope);
+ if (HasClassScope)
+ Actions.ActOnStartDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate);
+
+ for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedMethodDeclarations();
+ }
+
+ if (HasClassScope)
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(), Class.TagOrTemplate);
+}
+
+void Parser::ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM) {
+ // If this is a member template, introduce the template parameter scope.
+ ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
+ if (LM.TemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), LM.Method);
+
+ // Start the delayed C++ method declaration
+ Actions.ActOnStartDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
+
+ // Introduce the parameters into scope and parse their default
+ // arguments.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+ for (unsigned I = 0, N = LM.DefaultArgs.size(); I != N; ++I) {
+ // Introduce the parameter into scope.
+ Actions.ActOnDelayedCXXMethodParameter(getCurScope(),
+ LM.DefaultArgs[I].Param);
+
+ if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
+ // Save the current token position.
+ SourceLocation origLoc = Tok.getLocation();
+
+ // Parse the default argument from its saved token stream.
+ Toks->push_back(Tok); // So that the current token doesn't get lost
+ PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);
+
+ // Consume the previously-pushed token.
+ ConsumeAnyToken();
+
+ // Consume the '='.
+ assert(Tok.is(tok::equal) && "Default argument not starting with '='");
+ SourceLocation EqualLoc = ConsumeToken();
+
+ // The argument isn't actually potentially evaluated unless it is
+ // used.
+ EnterExpressionEvaluationContext Eval(Actions,
+ Sema::PotentiallyEvaluatedIfUsed,
+ LM.DefaultArgs[I].Param);
+
+ ExprResult DefArgResult;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ DefArgResult = ParseBraceInitializer();
+ } else
+ DefArgResult = ParseAssignmentExpression();
+ if (DefArgResult.isInvalid())
+ Actions.ActOnParamDefaultArgumentError(LM.DefaultArgs[I].Param);
+ else {
+ if (Tok.is(tok::cxx_defaultarg_end))
+ ConsumeToken();
+ else
+ Diag(Tok.getLocation(), diag::err_default_arg_unparsed);
+ Actions.ActOnParamDefaultArgument(LM.DefaultArgs[I].Param, EqualLoc,
+ DefArgResult.take());
+ }
+
+ assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
+ Tok.getLocation()) &&
+ "ParseAssignmentExpression went over the default arg tokens!");
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the original token position.
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+
+ delete Toks;
+ LM.DefaultArgs[I].Toks = 0;
+ }
+ }
+ PrototypeScope.Exit();
+
+ // Finish the delayed C++ method declaration.
+ Actions.ActOnFinishDelayedCXXMethodDeclaration(getCurScope(), LM.Method);
+}
+
+/// ParseLexedMethodDefs - We finished parsing the member specification of a top
+/// (non-nested) C++ class. Now go over the stack of lexed methods that were
+/// collected during its parsing and parse them all.
+void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
+
+ bool HasClassScope = !Class.TopLevelClass;
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope,
+ HasClassScope);
+
+ for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedMethodDefs();
+ }
+}
+
+void Parser::ParseLexedMethodDef(LexedMethod &LM) {
+ // If this is a member template, introduce the template parameter scope.
+ ParseScope TemplateScope(this, Scope::TemplateParamScope, LM.TemplateScope);
+ if (LM.TemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), LM.D);
+
+ // Save the current token position.
+ SourceLocation origLoc = Tok.getLocation();
+
+ assert(!LM.Toks.empty() && "Empty body!");
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LM.Toks.push_back(Tok);
+ PP.EnterTokenStream(LM.Toks.data(), LM.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken();
+ assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try))
+ && "Inline method not starting with '{', ':' or 'try'");
+
+ // Parse the method body. Function body parsing code is similar enough
+ // to be re-used for method bodies as well.
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
+ Actions.ActOnStartOfFunctionDef(getCurScope(), LM.D);
+
+ if (Tok.is(tok::kw_try)) {
+ ParseFunctionTryBlock(LM.D, FnScope);
+ assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
+ Tok.getLocation()) &&
+ "ParseFunctionTryBlock went over the cached tokens!");
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the original token position.
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ return;
+ }
+ if (Tok.is(tok::colon)) {
+ ParseConstructorInitializer(LM.D);
+
+ // Error recovery.
+ if (!Tok.is(tok::l_brace)) {
+ FnScope.Exit();
+ Actions.ActOnFinishFunctionBody(LM.D, 0);
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ return;
+ }
+ } else
+ Actions.ActOnDefaultCtorInitializers(LM.D);
+
+ ParseFunctionStatementBody(LM.D, FnScope);
+
+ if (Tok.getLocation() != origLoc) {
+ // Due to parsing error, we either went over the cached tokens or
+ // there are still cached tokens left. If it's the latter case skip the
+ // leftover tokens.
+ // Since this is an uncommon situation that should be avoided, use the
+ // expensive isBeforeInTranslationUnit call.
+ if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
+ origLoc))
+ while (Tok.getLocation() != origLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+}
+
+/// ParseLexedMemberInitializers - We finished parsing the member specification
+/// of a top (non-nested) C++ class. Now go over the stack of lexed data member
+/// initializers that were collected during its parsing and parse them all.
+void Parser::ParseLexedMemberInitializers(ParsingClass &Class) {
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
+ HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
+
+ // Set or update the scope flags to include Scope::ThisScope.
+ bool AlreadyHasClassScope = Class.TopLevelClass;
+ unsigned ScopeFlags = Scope::ClassScope|Scope::DeclScope|Scope::ThisScope;
+ ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
+ ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
+
+ if (!AlreadyHasClassScope)
+ Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+
+ for (size_t i = 0; i < Class.LateParsedDeclarations.size(); ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedMemberInitializers();
+ }
+
+ if (!AlreadyHasClassScope)
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+
+ Actions.ActOnFinishDelayedMemberInitializers(Class.TagOrTemplate);
+}
+
+void Parser::ParseLexedMemberInitializer(LateParsedMemberInitializer &MI) {
+ if (!MI.Field || MI.Field->isInvalidDecl())
+ return;
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ MI.Toks.push_back(Tok);
+ PP.EnterTokenStream(MI.Toks.data(), MI.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken();
+
+ SourceLocation EqualLoc;
+ ExprResult Init = ParseCXXMemberInitializer(MI.Field, /*IsFunction=*/false,
+ EqualLoc);
+
+ Actions.ActOnCXXInClassMemberInitializer(MI.Field, EqualLoc, Init.release());
+
+ // The next token should be our artificial terminating EOF token.
+ if (Tok.isNot(tok::eof)) {
+ SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ if (!EndLoc.isValid())
+ EndLoc = Tok.getLocation();
+ // No fixit; we can't recover as if there were a semicolon here.
+ Diag(EndLoc, diag::err_expected_semi_decl_list);
+
+ // Consume tokens until we hit the artificial EOF.
+ while (Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+ ConsumeAnyToken();
+}
+
+/// ConsumeAndStoreUntil - Consume and store the token at the passed token
+/// container until the token 'T' is reached (which gets
+/// consumed/stored too, if ConsumeFinalToken).
+/// If StopAtSemi is true, then we will stop early at a ';' character.
+/// Returns true if token 'T1' or 'T2' was found.
+/// NOTE: This is a specialized version of Parser::SkipUntil.
+bool Parser::ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
+ CachedTokens &Toks,
+ bool StopAtSemi, bool ConsumeFinalToken) {
+ // We always want this function to consume at least one token if the first
+ // token isn't T and if not at EOF.
+ bool isFirstTokenConsumed = true;
+ while (1) {
+ // If we found one of the tokens, stop and return true.
+ if (Tok.is(T1) || Tok.is(T2)) {
+ if (ConsumeFinalToken) {
+ Toks.push_back(Tok);
+ ConsumeAnyToken();
+ }
+ return true;
+ }
+
+ switch (Tok.getKind()) {
+ case tok::eof:
+ // Ran out of tokens.
+ return false;
+
+ case tok::l_paren:
+ // Recursively consume properly-nested parens.
+ Toks.push_back(Tok);
+ ConsumeParen();
+ ConsumeAndStoreUntil(tok::r_paren, Toks, /*StopAtSemi=*/false);
+ break;
+ case tok::l_square:
+ // Recursively consume properly-nested square brackets.
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ ConsumeAndStoreUntil(tok::r_square, Toks, /*StopAtSemi=*/false);
+ break;
+ case tok::l_brace:
+ // Recursively consume properly-nested braces.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ break;
+
+ // Okay, we found a ']' or '}' or ')', which we think should be balanced.
+ // Since the user wasn't looking for this token (if they were, it would
+ // already be handled), this isn't balanced. If there is a LHS token at a
+ // higher level, we will assume that this matches the unbalanced token
+ // and return it. Otherwise, this is a spurious RHS token, which we skip.
+ case tok::r_paren:
+ if (ParenCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeParen();
+ break;
+ case tok::r_square:
+ if (BracketCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeBracket();
+ break;
+ case tok::r_brace:
+ if (BraceCount && !isFirstTokenConsumed)
+ return false; // Matches something.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ break;
+
+ case tok::code_completion:
+ Toks.push_back(Tok);
+ ConsumeCodeCompletionToken();
+ break;
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ Toks.push_back(Tok);
+ ConsumeStringToken();
+ break;
+ case tok::semi:
+ if (StopAtSemi)
+ return false;
+ // FALL THROUGH.
+ default:
+ // consume this token.
+ Toks.push_back(Tok);
+ ConsumeToken();
+ break;
+ }
+ isFirstTokenConsumed = false;
+ }
+}
+
+/// \brief Consume tokens and store them in the passed token container until
+/// we've passed the try keyword and constructor initializers and have consumed
+/// the opening brace of the function body. The opening brace will be consumed
+/// if and only if there was no error.
+///
+/// \return True on error.
+bool Parser::ConsumeAndStoreFunctionPrologue(CachedTokens &Toks) {
+ if (Tok.is(tok::kw_try)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
+ bool ReadInitializer = false;
+ if (Tok.is(tok::colon)) {
+ // Initializers can contain braces too.
+ Toks.push_back(Tok);
+ ConsumeToken();
+
+ while (Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) {
+ if (Tok.is(tok::eof) || Tok.is(tok::semi))
+ return Diag(Tok.getLocation(), diag::err_expected_lbrace);
+
+ // Grab the identifier.
+ if (!ConsumeAndStoreUntil(tok::l_paren, tok::l_brace, Toks,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/false))
+ return Diag(Tok.getLocation(), diag::err_expected_lparen);
+
+ tok::TokenKind kind = Tok.getKind();
+ Toks.push_back(Tok);
+ bool IsLParen = (kind == tok::l_paren);
+ SourceLocation LOpen = Tok.getLocation();
+
+ if (IsLParen) {
+ ConsumeParen();
+ } else {
+ assert(kind == tok::l_brace && "Must be left paren or brace here.");
+ ConsumeBrace();
+ // In C++03, this has to be the start of the function body, which
+ // means the initializer is malformed; we'll diagnose it later.
+ if (!getLangOpts().CPlusPlus0x)
+ return false;
+ }
+
+ // Grab the initializer
+ if (!ConsumeAndStoreUntil(IsLParen ? tok::r_paren : tok::r_brace,
+ Toks, /*StopAtSemi=*/true)) {
+ Diag(Tok, IsLParen ? diag::err_expected_rparen :
+ diag::err_expected_rbrace);
+ Diag(LOpen, diag::note_matching) << (IsLParen ? "(" : "{");
+ return true;
+ }
+
+ // Grab pack ellipsis, if present
+ if (Tok.is(tok::ellipsis)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ }
+
+ // Grab the separating comma, if any.
+ if (Tok.is(tok::comma)) {
+ Toks.push_back(Tok);
+ ConsumeToken();
+ } else if (Tok.isNot(tok::l_brace)) {
+ ReadInitializer = true;
+ break;
+ }
+ }
+ }
+
+ // Grab any remaining garbage to be diagnosed later. We stop when we reach a
+ // brace: an opening one is the function body, while a closing one probably
+ // means we've reached the end of the class.
+ ConsumeAndStoreUntil(tok::l_brace, tok::r_brace, Toks,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/false);
+ if (Tok.isNot(tok::l_brace)) {
+ if (ReadInitializer)
+ return Diag(Tok.getLocation(), diag::err_expected_lbrace_or_comma);
+ return Diag(Tok.getLocation(), diag::err_expected_lbrace);
+ }
+
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
new file mode 100644
index 0000000..cf3dca2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDecl.cpp
@@ -0,0 +1,4838 @@
+//===--- ParseDecl.cpp - Declaration Parsing ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Declaration portions of the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Basic/OpenCL.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "RAIIObjectsForParser.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// C99 6.7: Declarations.
+//===----------------------------------------------------------------------===//
+
+/// ParseTypeName
+/// type-name: [C99 6.7.6]
+/// specifier-qualifier-list abstract-declarator[opt]
+///
+/// Called type-id in C++.
+TypeResult Parser::ParseTypeName(SourceRange *Range,
+ Declarator::TheContext Context,
+ AccessSpecifier AS,
+ Decl **OwnedType) {
+ DeclSpecContext DSC = getDeclSpecContextFromDeclaratorContext(Context);
+
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS, AS, DSC);
+ if (OwnedType)
+ *OwnedType = DS.isTypeSpecOwned() ? DS.getRepAsDecl() : 0;
+
+ // Parse the abstract-declarator, if present.
+ Declarator DeclaratorInfo(DS, Context);
+ ParseDeclarator(DeclaratorInfo);
+ if (Range)
+ *Range = DeclaratorInfo.getSourceRange();
+
+ if (DeclaratorInfo.isInvalidType())
+ return true;
+
+ return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+}
+
+
+/// isAttributeLateParsed - Return true if the attribute has arguments that
+/// require late parsing.
+static bool isAttributeLateParsed(const IdentifierInfo &II) {
+ return llvm::StringSwitch<bool>(II.getName())
+#include "clang/Parse/AttrLateParsed.inc"
+ .Default(false);
+}
+
+
+/// ParseGNUAttributes - Parse a non-empty attributes list.
+///
+/// [GNU] attributes:
+/// attribute
+/// attributes attribute
+///
+/// [GNU] attribute:
+/// '__attribute__' '(' '(' attribute-list ')' ')'
+///
+/// [GNU] attribute-list:
+/// attrib
+/// attribute_list ',' attrib
+///
+/// [GNU] attrib:
+/// empty
+/// attrib-name
+/// attrib-name '(' identifier ')'
+/// attrib-name '(' identifier ',' nonempty-expr-list ')'
+/// attrib-name '(' argument-expression-list [C99 6.5.2] ')'
+///
+/// [GNU] attrib-name:
+/// identifier
+/// typespec
+/// typequal
+/// storageclass
+///
+/// FIXME: The GCC grammar/code for this construct implies we need two
+/// token lookahead. Comment from gcc: "If they start with an identifier
+/// which is followed by a comma or close parenthesis, then the arguments
+/// start with that identifier; otherwise they are an expression list."
+///
+/// GCC does not require the ',' between attribs in an attribute-list.
+///
+/// At the moment, I am not doing 2 token lookahead. I am also unaware of
+/// any attributes that don't work (based on my limited testing). Most
+/// attributes are very simple in practice. Until we find a bug, I don't see
+/// a pressing need to implement the 2 token lookahead.
+
+void Parser::ParseGNUAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc,
+ LateParsedAttrList *LateAttrs) {
+ assert(Tok.is(tok::kw___attribute) && "Not a GNU attribute list!");
+
+ while (Tok.is(tok::kw___attribute)) {
+ ConsumeToken();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
+ "attribute")) {
+ SkipUntil(tok::r_paren, true); // skip until ) or ;
+ return;
+ }
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after, "(")) {
+ SkipUntil(tok::r_paren, true); // skip until ) or ;
+ return;
+ }
+ // Parse the attribute-list. e.g. __attribute__(( weak, alias("__f") ))
+ while (Tok.is(tok::identifier) || isDeclarationSpecifier() ||
+ Tok.is(tok::comma)) {
+ if (Tok.is(tok::comma)) {
+ // allows for empty/non-empty attributes. ((__vector_size__(16),,,,))
+ ConsumeToken();
+ continue;
+ }
+ // we have an identifier or declaration specifier (const, int, etc.)
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+
+ if (Tok.is(tok::l_paren)) {
+ // handle "parameterized" attributes
+ if (LateAttrs && isAttributeLateParsed(*AttrName)) {
+ LateParsedAttribute *LA =
+ new LateParsedAttribute(this, *AttrName, AttrNameLoc);
+ LateAttrs->push_back(LA);
+
+ // Attributes in a class are parsed at the end of the class, along
+ // with other late-parsed declarations.
+ if (!ClassStack.empty())
+ getCurrentClass().LateParsedDeclarations.push_back(LA);
+
+ // consume everything up to and including the matching right parens
+ ConsumeAndStoreUntil(tok::r_paren, LA->Toks, true, false);
+
+ Token Eof;
+ Eof.startToken();
+ Eof.setLocation(Tok.getLocation());
+ LA->Toks.push_back(Eof);
+ } else {
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, attrs, endLoc);
+ }
+ } else {
+ attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc,
+ 0, SourceLocation(), 0, 0);
+ }
+ }
+ if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
+ SkipUntil(tok::r_paren, false);
+ SourceLocation Loc = Tok.getLocation();
+ if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen)) {
+ SkipUntil(tok::r_paren, false);
+ }
+ if (endLoc)
+ *endLoc = Loc;
+ }
+}
+
+
+/// Parse the arguments to a parameterized GNU attribute
+void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc) {
+
+ assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
+
+ // Availability attributes have their own grammar.
+ if (AttrName->isStr("availability")) {
+ ParseAvailabilityAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
+ return;
+ }
+ // Thread safety attributes fit into the FIXME case above, so we
+ // just parse the arguments as a list of expressions
+ if (IsThreadSafetyAttribute(AttrName->getName())) {
+ ParseThreadSafetyAttribute(*AttrName, AttrNameLoc, Attrs, EndLoc);
+ return;
+ }
+
+ ConsumeParen(); // ignore the left paren loc for now
+
+ IdentifierInfo *ParmName = 0;
+ SourceLocation ParmLoc;
+ bool BuiltinType = false;
+
+ switch (Tok.getKind()) {
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::kw_typeof:
+ // __attribute__(( vec_type_hint(char) ))
+ // FIXME: Don't just discard the builtin type token.
+ ConsumeToken();
+ BuiltinType = true;
+ break;
+
+ case tok::identifier:
+ ParmName = Tok.getIdentifierInfo();
+ ParmLoc = ConsumeToken();
+ break;
+
+ default:
+ break;
+ }
+
+ ExprVector ArgExprs(Actions);
+
+ if (!BuiltinType &&
+ (ParmLoc.isValid() ? Tok.is(tok::comma) : Tok.isNot(tok::r_paren))) {
+ // Eat the comma.
+ if (ParmLoc.isValid())
+ ConsumeToken();
+
+ // Parse the non-empty comma-separated list of expressions.
+ while (1) {
+ ExprResult ArgExpr(ParseAssignmentExpression());
+ if (ArgExpr.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return;
+ }
+ ArgExprs.push_back(ArgExpr.release());
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // Eat the comma, move to the next argument
+ }
+ }
+ else if (Tok.is(tok::less) && AttrName->isStr("iboutletcollection")) {
+ if (!ExpectAndConsume(tok::less, diag::err_expected_less_after, "<",
+ tok::greater)) {
+ while (Tok.is(tok::identifier)) {
+ ConsumeToken();
+ if (Tok.is(tok::greater))
+ break;
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ continue;
+ }
+ }
+ if (Tok.isNot(tok::greater))
+ Diag(Tok, diag::err_iboutletcollection_with_protocol);
+ SkipUntil(tok::r_paren, false, true); // skip until ')'
+ }
+ }
+
+ SourceLocation RParen = Tok.getLocation();
+ if (!ExpectAndConsume(tok::r_paren, diag::err_expected_rparen)) {
+ AttributeList *attr =
+ Attrs.addNew(AttrName, SourceRange(AttrNameLoc, RParen), 0, AttrNameLoc,
+ ParmName, ParmLoc, ArgExprs.take(), ArgExprs.size());
+ if (BuiltinType && attr->getKind() == AttributeList::AT_iboutletcollection)
+ Diag(Tok, diag::err_iboutletcollection_builtintype);
+ }
+}
+
+
+/// ParseMicrosoftDeclSpec - Parse an __declspec construct
+///
+/// [MS] decl-specifier:
+/// __declspec ( extended-decl-modifier-seq )
+///
+/// [MS] extended-decl-modifier-seq:
+/// extended-decl-modifier[opt]
+/// extended-decl-modifier extended-decl-modifier-seq
+
+void Parser::ParseMicrosoftDeclSpec(ParsedAttributes &attrs) {
+ assert(Tok.is(tok::kw___declspec) && "Not a declspec!");
+
+ ConsumeToken();
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen_after,
+ "declspec")) {
+ SkipUntil(tok::r_paren, true); // skip until ) or ;
+ return;
+ }
+
+ while (Tok.getIdentifierInfo()) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+
+ // FIXME: Remove this when we have proper __declspec(property()) support.
+ // Just skip everything inside property().
+ if (AttrName->getName() == "property") {
+ ConsumeParen();
+ SkipUntil(tok::r_paren);
+ }
+ if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ // FIXME: This doesn't parse __declspec(property(get=get_func_name))
+ // correctly.
+ ExprResult ArgExpr(ParseAssignmentExpression());
+ if (!ArgExpr.isInvalid()) {
+ Expr *ExprList = ArgExpr.take();
+ attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), &ExprList, 1, true);
+ }
+ if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
+ SkipUntil(tok::r_paren, false);
+ } else {
+ attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc,
+ 0, SourceLocation(), 0, 0, true);
+ }
+ }
+ if (ExpectAndConsume(tok::r_paren, diag::err_expected_rparen))
+ SkipUntil(tok::r_paren, false);
+ return;
+}
+
+void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
+ // Treat these like attributes
+ // FIXME: Allow Sema to distinguish between these and real attributes!
+ while (Tok.is(tok::kw___fastcall) || Tok.is(tok::kw___stdcall) ||
+ Tok.is(tok::kw___thiscall) || Tok.is(tok::kw___cdecl) ||
+ Tok.is(tok::kw___ptr64) || Tok.is(tok::kw___w64) ||
+ Tok.is(tok::kw___ptr32) ||
+ Tok.is(tok::kw___unaligned)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ if (Tok.is(tok::kw___ptr64) || Tok.is(tok::kw___w64) ||
+ Tok.is(tok::kw___ptr32))
+ // FIXME: Support these properly!
+ continue;
+ attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), 0, 0, true);
+ }
+}
+
+void Parser::ParseBorlandTypeAttributes(ParsedAttributes &attrs) {
+ // Treat these like attributes
+ while (Tok.is(tok::kw___pascal)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), 0, 0, true);
+ }
+}
+
+void Parser::ParseOpenCLAttributes(ParsedAttributes &attrs) {
+ // Treat these like attributes
+ while (Tok.is(tok::kw___kernel)) {
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(PP.getIdentifierInfo("opencl_kernel_function"),
+ AttrNameLoc, 0, AttrNameLoc, 0,
+ SourceLocation(), 0, 0, false);
+ }
+}
+
+void Parser::ParseOpenCLQualifiers(DeclSpec &DS) {
+ SourceLocation Loc = Tok.getLocation();
+ switch(Tok.getKind()) {
+ // OpenCL qualifiers:
+ case tok::kw___private:
+ case tok::kw_private:
+ DS.getAttributes().addNewInteger(
+ Actions.getASTContext(),
+ PP.getIdentifierInfo("address_space"), Loc, 0);
+ break;
+
+ case tok::kw___global:
+ DS.getAttributes().addNewInteger(
+ Actions.getASTContext(),
+ PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_global);
+ break;
+
+ case tok::kw___local:
+ DS.getAttributes().addNewInteger(
+ Actions.getASTContext(),
+ PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_local);
+ break;
+
+ case tok::kw___constant:
+ DS.getAttributes().addNewInteger(
+ Actions.getASTContext(),
+ PP.getIdentifierInfo("address_space"), Loc, LangAS::opencl_constant);
+ break;
+
+ case tok::kw___read_only:
+ DS.getAttributes().addNewInteger(
+ Actions.getASTContext(),
+ PP.getIdentifierInfo("opencl_image_access"), Loc, CLIA_read_only);
+ break;
+
+ case tok::kw___write_only:
+ DS.getAttributes().addNewInteger(
+ Actions.getASTContext(),
+ PP.getIdentifierInfo("opencl_image_access"), Loc, CLIA_write_only);
+ break;
+
+ case tok::kw___read_write:
+ DS.getAttributes().addNewInteger(
+ Actions.getASTContext(),
+ PP.getIdentifierInfo("opencl_image_access"), Loc, CLIA_read_write);
+ break;
+ default: break;
+ }
+}
+
+/// \brief Parse a version number.
+///
+/// version:
+/// simple-integer
+/// simple-integer ',' simple-integer
+/// simple-integer ',' simple-integer ',' simple-integer
+VersionTuple Parser::ParseVersionTuple(SourceRange &Range) {
+ Range = Tok.getLocation();
+
+ if (!Tok.is(tok::numeric_constant)) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren, true, true, true);
+ return VersionTuple();
+ }
+
+ // Parse the major (and possibly minor and subminor) versions, which
+ // are stored in the numeric constant. We utilize a quirk of the
+ // lexer, which is that it handles something like 1.2.3 as a single
+ // numeric constant, rather than two separate tokens.
+ SmallString<512> Buffer;
+ Buffer.resize(Tok.getLength()+1);
+ const char *ThisTokBegin = &Buffer[0];
+
+ // Get the spelling of the token, which eliminates trigraphs, etc.
+ bool Invalid = false;
+ unsigned ActualLength = PP.getSpelling(Tok, ThisTokBegin, &Invalid);
+ if (Invalid)
+ return VersionTuple();
+
+ // Parse the major version.
+ unsigned AfterMajor = 0;
+ unsigned Major = 0;
+ while (AfterMajor < ActualLength && isdigit(ThisTokBegin[AfterMajor])) {
+ Major = Major * 10 + ThisTokBegin[AfterMajor] - '0';
+ ++AfterMajor;
+ }
+
+ if (AfterMajor == 0) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren, true, true, true);
+ return VersionTuple();
+ }
+
+ if (AfterMajor == ActualLength) {
+ ConsumeToken();
+
+ // We only had a single version component.
+ if (Major == 0) {
+ Diag(Tok, diag::err_zero_version);
+ return VersionTuple();
+ }
+
+ return VersionTuple(Major);
+ }
+
+ if (ThisTokBegin[AfterMajor] != '.' || (AfterMajor + 1 == ActualLength)) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren, true, true, true);
+ return VersionTuple();
+ }
+
+ // Parse the minor version.
+ unsigned AfterMinor = AfterMajor + 1;
+ unsigned Minor = 0;
+ while (AfterMinor < ActualLength && isdigit(ThisTokBegin[AfterMinor])) {
+ Minor = Minor * 10 + ThisTokBegin[AfterMinor] - '0';
+ ++AfterMinor;
+ }
+
+ if (AfterMinor == ActualLength) {
+ ConsumeToken();
+
+ // We had major.minor.
+ if (Major == 0 && Minor == 0) {
+ Diag(Tok, diag::err_zero_version);
+ return VersionTuple();
+ }
+
+ return VersionTuple(Major, Minor);
+ }
+
+ // If what follows is not a '.', we have a problem.
+ if (ThisTokBegin[AfterMinor] != '.') {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren, true, true, true);
+ return VersionTuple();
+ }
+
+ // Parse the subminor version.
+ unsigned AfterSubminor = AfterMinor + 1;
+ unsigned Subminor = 0;
+ while (AfterSubminor < ActualLength && isdigit(ThisTokBegin[AfterSubminor])) {
+ Subminor = Subminor * 10 + ThisTokBegin[AfterSubminor] - '0';
+ ++AfterSubminor;
+ }
+
+ if (AfterSubminor != ActualLength) {
+ Diag(Tok, diag::err_expected_version);
+ SkipUntil(tok::comma, tok::r_paren, true, true, true);
+ return VersionTuple();
+ }
+ ConsumeToken();
+ return VersionTuple(Major, Minor, Subminor);
+}
+
+/// \brief Parse the contents of the "availability" attribute.
+///
+/// availability-attribute:
+/// 'availability' '(' platform ',' version-arg-list, opt-message')'
+///
+/// platform:
+/// identifier
+///
+/// version-arg-list:
+/// version-arg
+/// version-arg ',' version-arg-list
+///
+/// version-arg:
+/// 'introduced' '=' version
+/// 'deprecated' '=' version
+/// 'obsoleted' = version
+/// 'unavailable'
+/// opt-message:
+/// 'message' '=' <string>
+void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
+ SourceLocation AvailabilityLoc,
+ ParsedAttributes &attrs,
+ SourceLocation *endLoc) {
+ SourceLocation PlatformLoc;
+ IdentifierInfo *Platform = 0;
+
+ enum { Introduced, Deprecated, Obsoleted, Unknown };
+ AvailabilityChange Changes[Unknown];
+ ExprResult MessageExpr;
+
+ // Opening '('.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lparen);
+ return;
+ }
+
+ // Parse the platform name,
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_availability_expected_platform);
+ SkipUntil(tok::r_paren);
+ return;
+ }
+ Platform = Tok.getIdentifierInfo();
+ PlatformLoc = ConsumeToken();
+
+ // Parse the ',' following the platform name.
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "", tok::r_paren))
+ return;
+
+ // If we haven't grabbed the pointers for the identifiers
+ // "introduced", "deprecated", and "obsoleted", do so now.
+ if (!Ident_introduced) {
+ Ident_introduced = PP.getIdentifierInfo("introduced");
+ Ident_deprecated = PP.getIdentifierInfo("deprecated");
+ Ident_obsoleted = PP.getIdentifierInfo("obsoleted");
+ Ident_unavailable = PP.getIdentifierInfo("unavailable");
+ Ident_message = PP.getIdentifierInfo("message");
+ }
+
+ // Parse the set of introductions/deprecations/removals.
+ SourceLocation UnavailableLoc;
+ do {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_availability_expected_change);
+ SkipUntil(tok::r_paren);
+ return;
+ }
+ IdentifierInfo *Keyword = Tok.getIdentifierInfo();
+ SourceLocation KeywordLoc = ConsumeToken();
+
+ if (Keyword == Ident_unavailable) {
+ if (UnavailableLoc.isValid()) {
+ Diag(KeywordLoc, diag::err_availability_redundant)
+ << Keyword << SourceRange(UnavailableLoc);
+ }
+ UnavailableLoc = KeywordLoc;
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ continue;
+ }
+
+ if (Tok.isNot(tok::equal)) {
+ Diag(Tok, diag::err_expected_equal_after)
+ << Keyword;
+ SkipUntil(tok::r_paren);
+ return;
+ }
+ ConsumeToken();
+ if (Keyword == Ident_message) {
+ if (!isTokenStringLiteral()) {
+ Diag(Tok, diag::err_expected_string_literal);
+ SkipUntil(tok::r_paren);
+ return;
+ }
+ MessageExpr = ParseStringLiteralExpression();
+ break;
+ }
+
+ SourceRange VersionRange;
+ VersionTuple Version = ParseVersionTuple(VersionRange);
+
+ if (Version.empty()) {
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ unsigned Index;
+ if (Keyword == Ident_introduced)
+ Index = Introduced;
+ else if (Keyword == Ident_deprecated)
+ Index = Deprecated;
+ else if (Keyword == Ident_obsoleted)
+ Index = Obsoleted;
+ else
+ Index = Unknown;
+
+ if (Index < Unknown) {
+ if (!Changes[Index].KeywordLoc.isInvalid()) {
+ Diag(KeywordLoc, diag::err_availability_redundant)
+ << Keyword
+ << SourceRange(Changes[Index].KeywordLoc,
+ Changes[Index].VersionRange.getEnd());
+ }
+
+ Changes[Index].KeywordLoc = KeywordLoc;
+ Changes[Index].Version = Version;
+ Changes[Index].VersionRange = VersionRange;
+ } else {
+ Diag(KeywordLoc, diag::err_availability_unknown_change)
+ << Keyword << VersionRange;
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ } while (true);
+
+ // Closing ')'.
+ if (T.consumeClose())
+ return;
+
+ if (endLoc)
+ *endLoc = T.getCloseLocation();
+
+ // The 'unavailable' availability cannot be combined with any other
+ // availability changes. Make sure that hasn't happened.
+ if (UnavailableLoc.isValid()) {
+ bool Complained = false;
+ for (unsigned Index = Introduced; Index != Unknown; ++Index) {
+ if (Changes[Index].KeywordLoc.isValid()) {
+ if (!Complained) {
+ Diag(UnavailableLoc, diag::warn_availability_and_unavailable)
+ << SourceRange(Changes[Index].KeywordLoc,
+ Changes[Index].VersionRange.getEnd());
+ Complained = true;
+ }
+
+ // Clear out the availability.
+ Changes[Index] = AvailabilityChange();
+ }
+ }
+ }
+
+ // Record this attribute
+ attrs.addNew(&Availability,
+ SourceRange(AvailabilityLoc, T.getCloseLocation()),
+ 0, AvailabilityLoc,
+ Platform, PlatformLoc,
+ Changes[Introduced],
+ Changes[Deprecated],
+ Changes[Obsoleted],
+ UnavailableLoc, MessageExpr.take(),
+ false, false);
+}
+
+
+// Late Parsed Attributes:
+// See other examples of late parsing in lib/Parse/ParseCXXInlineMethods
+
+void Parser::LateParsedDeclaration::ParseLexedAttributes() {}
+
+void Parser::LateParsedClass::ParseLexedAttributes() {
+ Self->ParseLexedAttributes(*Class);
+}
+
+void Parser::LateParsedAttribute::ParseLexedAttributes() {
+ Self->ParseLexedAttribute(*this, true, false);
+}
+
+/// Wrapper class which calls ParseLexedAttribute, after setting up the
+/// scope appropriately.
+void Parser::ParseLexedAttributes(ParsingClass &Class) {
+ // Deal with templates
+ // FIXME: Test cases to make sure this does the right thing for templates.
+ bool HasTemplateScope = !Class.TopLevelClass && Class.TemplateScope;
+ ParseScope ClassTemplateScope(this, Scope::TemplateParamScope,
+ HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(getCurScope(), Class.TagOrTemplate);
+
+ // Set or update the scope flags to include Scope::ThisScope.
+ bool AlreadyHasClassScope = Class.TopLevelClass;
+ unsigned ScopeFlags = Scope::ClassScope|Scope::DeclScope|Scope::ThisScope;
+ ParseScope ClassScope(this, ScopeFlags, !AlreadyHasClassScope);
+ ParseScopeFlags ClassScopeFlags(this, ScopeFlags, AlreadyHasClassScope);
+
+ // Enter the scope of nested classes
+ if (!AlreadyHasClassScope)
+ Actions.ActOnStartDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+
+ for (unsigned i = 0, ni = Class.LateParsedDeclarations.size(); i < ni; ++i) {
+ Class.LateParsedDeclarations[i]->ParseLexedAttributes();
+ }
+
+ if (!AlreadyHasClassScope)
+ Actions.ActOnFinishDelayedMemberDeclarations(getCurScope(),
+ Class.TagOrTemplate);
+}
+
+
+/// \brief Parse all attributes in LAs, and attach them to Decl D.
+void Parser::ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
+ bool EnterScope, bool OnDefinition) {
+ for (unsigned i = 0, ni = LAs.size(); i < ni; ++i) {
+ LAs[i]->addDecl(D);
+ ParseLexedAttribute(*LAs[i], EnterScope, OnDefinition);
+ }
+ LAs.clear();
+}
+
+
+/// \brief Finish parsing an attribute for which parsing was delayed.
+/// This will be called at the end of parsing a class declaration
+/// for each LateParsedAttribute. We consume the saved tokens and
+/// create an attribute with the arguments filled in. We add this
+/// to the Attribute list for the decl.
+void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
+ bool EnterScope, bool OnDefinition) {
+ // Save the current token position.
+ SourceLocation OrigLoc = Tok.getLocation();
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LA.Toks.push_back(Tok);
+ PP.EnterTokenStream(LA.Toks.data(), LA.Toks.size(), true, false);
+ // Consume the previously pushed token.
+ ConsumeAnyToken();
+
+ if (OnDefinition && !IsThreadSafetyAttribute(LA.AttrName.getName())) {
+ Diag(Tok, diag::warn_attribute_on_function_definition)
+ << LA.AttrName.getName();
+ }
+
+ ParsedAttributes Attrs(AttrFactory);
+ SourceLocation endLoc;
+
+ if (LA.Decls.size() == 1) {
+ Decl *D = LA.Decls[0];
+
+ // If the Decl is templatized, add template parameters to scope.
+ bool HasTemplateScope = EnterScope && D->isTemplateDecl();
+ ParseScope TempScope(this, Scope::TemplateParamScope, HasTemplateScope);
+ if (HasTemplateScope)
+ Actions.ActOnReenterTemplateScope(Actions.CurScope, D);
+
+ // If the Decl is on a function, add function parameters to the scope.
+ bool HasFunctionScope = EnterScope && D->isFunctionOrFunctionTemplate();
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope, HasFunctionScope);
+ if (HasFunctionScope)
+ Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
+
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc);
+
+ if (HasFunctionScope) {
+ Actions.ActOnExitFunctionContext();
+ FnScope.Exit(); // Pop scope, and remove Decls from IdResolver
+ }
+ if (HasTemplateScope) {
+ TempScope.Exit();
+ }
+ } else if (LA.Decls.size() > 0) {
+ // If there are multiple decls, then the decl cannot be within the
+ // function scope.
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc);
+ } else {
+ Diag(Tok, diag::warn_attribute_no_decl) << LA.AttrName.getName();
+ }
+
+ for (unsigned i = 0, ni = LA.Decls.size(); i < ni; ++i) {
+ Actions.ActOnFinishDelayedAttribute(getCurScope(), LA.Decls[i], Attrs);
+ }
+
+ if (Tok.getLocation() != OrigLoc) {
+ // Due to a parsing error, we either went over the cached tokens or
+ // there are still cached tokens left, so we skip the leftover tokens.
+ // Since this is an uncommon situation that should be avoided, use the
+ // expensive isBeforeInTranslationUnit call.
+ if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
+ OrigLoc))
+ while (Tok.getLocation() != OrigLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+}
+
+/// \brief Wrapper around a case statement checking if AttrName is
+/// one of the thread safety attributes
+bool Parser::IsThreadSafetyAttribute(llvm::StringRef AttrName){
+ return llvm::StringSwitch<bool>(AttrName)
+ .Case("guarded_by", true)
+ .Case("guarded_var", true)
+ .Case("pt_guarded_by", true)
+ .Case("pt_guarded_var", true)
+ .Case("lockable", true)
+ .Case("scoped_lockable", true)
+ .Case("no_thread_safety_analysis", true)
+ .Case("acquired_after", true)
+ .Case("acquired_before", true)
+ .Case("exclusive_lock_function", true)
+ .Case("shared_lock_function", true)
+ .Case("exclusive_trylock_function", true)
+ .Case("shared_trylock_function", true)
+ .Case("unlock_function", true)
+ .Case("lock_returned", true)
+ .Case("locks_excluded", true)
+ .Case("exclusive_locks_required", true)
+ .Case("shared_locks_required", true)
+ .Default(false);
+}
+
+/// \brief Parse the contents of thread safety attributes. These
+/// should always be parsed as an expression list.
+///
+/// We need to special case the parsing due to the fact that if the first token
+/// of the first argument is an identifier, the main parse loop will store
+/// that token as a "parameter" and the rest of
+/// the arguments will be added to a list of "arguments". However,
+/// subsequent tokens in the first argument are lost. We instead parse each
+/// argument as an expression and add all arguments to the list of "arguments".
+/// In future, we will take advantage of this special case to also
+/// deal with some argument scoping issues here (for example, referring to a
+/// function parameter in the attribute on that function).
+void Parser::ParseThreadSafetyAttribute(IdentifierInfo &AttrName,
+ SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs,
+ SourceLocation *EndLoc) {
+ assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprVector ArgExprs(Actions);
+ bool ArgExprsOk = true;
+
+ // now parse the list of expressions
+ while (Tok.isNot(tok::r_paren)) {
+ ExprResult ArgExpr(ParseAssignmentExpression());
+ if (ArgExpr.isInvalid()) {
+ ArgExprsOk = false;
+ T.consumeClose();
+ break;
+ } else {
+ ArgExprs.push_back(ArgExpr.release());
+ }
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // Eat the comma, move to the next argument
+ }
+ // Match the ')'.
+ if (ArgExprsOk && !T.consumeClose()) {
+ Attrs.addNew(&AttrName, AttrNameLoc, 0, AttrNameLoc, 0, SourceLocation(),
+ ArgExprs.take(), ArgExprs.size());
+ }
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
+}
+
+/// DiagnoseProhibitedCXX11Attribute - We have found the opening square brackets
+/// of a C++11 attribute-specifier in a location where an attribute is not
+/// permitted. By C++11 [dcl.attr.grammar]p6, this is ill-formed. Diagnose this
+/// situation.
+///
+/// \return \c true if we skipped an attribute-like chunk of tokens, \c false if
+/// this doesn't appear to actually be an attribute-specifier, and the caller
+/// should try to parse it.
+bool Parser::DiagnoseProhibitedCXX11Attribute() {
+ assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square));
+
+ switch (isCXX11AttributeSpecifier(/*Disambiguate*/true)) {
+ case CAK_NotAttributeSpecifier:
+ // No diagnostic: we're in Obj-C++11 and this is not actually an attribute.
+ return false;
+
+ case CAK_InvalidAttributeSpecifier:
+ Diag(Tok.getLocation(), diag::err_l_square_l_square_not_attribute);
+ return false;
+
+ case CAK_AttributeSpecifier:
+ // Parse and discard the attributes.
+ SourceLocation BeginLoc = ConsumeBracket();
+ ConsumeBracket();
+ SkipUntil(tok::r_square, /*StopAtSemi*/ false);
+ assert(Tok.is(tok::r_square) && "isCXX11AttributeSpecifier lied");
+ SourceLocation EndLoc = ConsumeBracket();
+ Diag(BeginLoc, diag::err_attributes_not_allowed)
+ << SourceRange(BeginLoc, EndLoc);
+ return true;
+ }
+ llvm_unreachable("All cases handled above.");
+}
+
+void Parser::DiagnoseProhibitedAttributes(ParsedAttributesWithRange &attrs) {
+ Diag(attrs.Range.getBegin(), diag::err_attributes_not_allowed)
+ << attrs.Range;
+}
+
+/// ParseDeclaration - Parse a full 'declaration', which consists of
+/// declaration-specifiers, some number of declarators, and a semicolon.
+/// 'Context' should be a Declarator::TheContext value. This returns the
+/// location of the semicolon in DeclEnd.
+///
+/// declaration: [C99 6.7]
+/// block-declaration ->
+/// simple-declaration
+/// others [FIXME]
+/// [C++] template-declaration
+/// [C++] namespace-definition
+/// [C++] using-directive
+/// [C++] using-declaration
+/// [C++0x/C11] static_assert-declaration
+/// others... [FIXME]
+///
+Parser::DeclGroupPtrTy Parser::ParseDeclaration(StmtVector &Stmts,
+ unsigned Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs) {
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+ // Must temporarily exit the objective-c container scope for
+ // parsing c none objective-c decls.
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ Decl *SingleDecl = 0;
+ Decl *OwnedType = 0;
+ switch (Tok.getKind()) {
+ case tok::kw_template:
+ case tok::kw_export:
+ ProhibitAttributes(attrs);
+ SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd);
+ break;
+ case tok::kw_inline:
+ // Could be the start of an inline namespace. Allowed as an ext in C++03.
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_namespace)) {
+ ProhibitAttributes(attrs);
+ SourceLocation InlineLoc = ConsumeToken();
+ SingleDecl = ParseNamespace(Context, DeclEnd, InlineLoc);
+ break;
+ }
+ return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs,
+ true);
+ case tok::kw_namespace:
+ ProhibitAttributes(attrs);
+ SingleDecl = ParseNamespace(Context, DeclEnd);
+ break;
+ case tok::kw_using:
+ SingleDecl = ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
+ DeclEnd, attrs, &OwnedType);
+ break;
+ case tok::kw_static_assert:
+ case tok::kw__Static_assert:
+ ProhibitAttributes(attrs);
+ SingleDecl = ParseStaticAssertDeclaration(DeclEnd);
+ break;
+ default:
+ return ParseSimpleDeclaration(Stmts, Context, DeclEnd, attrs, true);
+ }
+
+ // This routine returns a DeclGroup, if the thing we parsed only contains a
+ // single decl, convert it now. Alias declarations can also declare a type;
+ // include that too if it is present.
+ return Actions.ConvertDeclToDeclGroup(SingleDecl, OwnedType);
+}
+
+/// simple-declaration: [C99 6.7: declaration] [C++ 7p1: dcl.dcl]
+/// declaration-specifiers init-declarator-list[opt] ';'
+///[C90/C++]init-declarator-list ';' [TODO]
+/// [OMP] threadprivate-directive [TODO]
+///
+/// for-range-declaration: [C++0x 6.5p1: stmt.ranged]
+/// attribute-specifier-seq[opt] type-specifier-seq declarator
+///
+/// If RequireSemi is false, this does not check for a ';' at the end of the
+/// declaration. If it is true, it checks for and eats it.
+///
+/// If FRI is non-null, we might be parsing a for-range-declaration instead
+/// of a simple-declaration. If we find that we are, we also parse the
+/// for-range-initializer, and place it here.
+Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(StmtVector &Stmts,
+ unsigned Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &attrs,
+ bool RequireSemi,
+ ForRangeInit *FRI) {
+ // Parse the common declaration-specifiers piece.
+ ParsingDeclSpec DS(*this);
+ DS.takeAttributesFrom(attrs);
+
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none,
+ getDeclSpecContextFromDeclaratorContext(Context));
+
+ // C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
+ // declaration-specifiers init-declarator-list[opt] ';'
+ if (Tok.is(tok::semi)) {
+ if (RequireSemi) ConsumeToken();
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
+ DS);
+ DS.complete(TheDecl);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ return ParseDeclGroup(DS, Context, /*FunctionDefs=*/ false, &DeclEnd, FRI);
+}
+
+/// Returns true if this might be the start of a declarator, or a common typo
+/// for a declarator.
+bool Parser::MightBeDeclarator(unsigned Context) {
+ switch (Tok.getKind()) {
+ case tok::annot_cxxscope:
+ case tok::annot_template_id:
+ case tok::caret:
+ case tok::code_completion:
+ case tok::coloncolon:
+ case tok::ellipsis:
+ case tok::kw___attribute:
+ case tok::kw_operator:
+ case tok::l_paren:
+ case tok::star:
+ return true;
+
+ case tok::amp:
+ case tok::ampamp:
+ return getLangOpts().CPlusPlus;
+
+ case tok::l_square: // Might be an attribute on an unnamed bit-field.
+ return Context == Declarator::MemberContext && getLangOpts().CPlusPlus0x &&
+ NextToken().is(tok::l_square);
+
+ case tok::colon: // Might be a typo for '::' or an unnamed bit-field.
+ return Context == Declarator::MemberContext || getLangOpts().CPlusPlus;
+
+ case tok::identifier:
+ switch (NextToken().getKind()) {
+ case tok::code_completion:
+ case tok::coloncolon:
+ case tok::comma:
+ case tok::equal:
+ case tok::equalequal: // Might be a typo for '='.
+ case tok::kw_alignas:
+ case tok::kw_asm:
+ case tok::kw___attribute:
+ case tok::l_brace:
+ case tok::l_paren:
+ case tok::l_square:
+ case tok::less:
+ case tok::r_brace:
+ case tok::r_paren:
+ case tok::r_square:
+ case tok::semi:
+ return true;
+
+ case tok::colon:
+ // At namespace scope, 'identifier:' is probably a typo for 'identifier::'
+ // and in block scope it's probably a label. Inside a class definition,
+ // this is a bit-field.
+ return Context == Declarator::MemberContext ||
+ (getLangOpts().CPlusPlus && Context == Declarator::FileContext);
+
+ case tok::identifier: // Possible virt-specifier.
+ return getLangOpts().CPlusPlus0x && isCXX0XVirtSpecifier(NextToken());
+
+ default:
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+/// Skip until we reach something which seems like a sensible place to pick
+/// up parsing after a malformed declaration. This will sometimes stop sooner
+/// than SkipUntil(tok::r_brace) would, but will never stop later.
+void Parser::SkipMalformedDecl() {
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::l_brace:
+ // Skip until matching }, then stop. We've probably skipped over
+ // a malformed class or function definition or similar.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi*/false);
+ if (Tok.is(tok::comma) || Tok.is(tok::l_brace) || Tok.is(tok::kw_try)) {
+ // This declaration isn't over yet. Keep skipping.
+ continue;
+ }
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return;
+
+ case tok::l_square:
+ ConsumeBracket();
+ SkipUntil(tok::r_square, /*StopAtSemi*/false);
+ continue;
+
+ case tok::l_paren:
+ ConsumeParen();
+ SkipUntil(tok::r_paren, /*StopAtSemi*/false);
+ continue;
+
+ case tok::r_brace:
+ return;
+
+ case tok::semi:
+ ConsumeToken();
+ return;
+
+ case tok::kw_inline:
+ // 'inline namespace' at the start of a line is almost certainly
+ // a good place to pick back up parsing.
+ if (Tok.isAtStartOfLine() && NextToken().is(tok::kw_namespace))
+ return;
+ break;
+
+ case tok::kw_namespace:
+ // 'namespace' at the start of a line is almost certainly a good
+ // place to pick back up parsing.
+ if (Tok.isAtStartOfLine())
+ return;
+ break;
+
+ case tok::eof:
+ return;
+
+ default:
+ break;
+ }
+
+ ConsumeAnyToken();
+ }
+}
+
+/// ParseDeclGroup - Having concluded that this is either a function
+/// definition or a group of object declarations, actually parse the
+/// result.
+Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
+ unsigned Context,
+ bool AllowFunctionDefinitions,
+ SourceLocation *DeclEnd,
+ ForRangeInit *FRI) {
+ // Parse the first declarator.
+ ParsingDeclarator D(*this, DS, static_cast<Declarator::TheContext>(Context));
+ ParseDeclarator(D);
+
+ // Bail out if the first declarator didn't seem well-formed.
+ if (!D.hasName() && !D.mayOmitIdentifier()) {
+ SkipMalformedDecl();
+ return DeclGroupPtrTy();
+ }
+
+ // Save late-parsed attributes for now; they need to be parsed in the
+ // appropriate function scope after the function Decl has been constructed.
+ LateParsedAttrList LateParsedAttrs;
+ if (D.isFunctionDeclarator())
+ MaybeParseGNUAttributes(D, &LateParsedAttrs);
+
+ // Check to see if we have a function *definition* which must have a body.
+ if (AllowFunctionDefinitions && D.isFunctionDeclarator() &&
+ // Look at the next token to make sure that this isn't a function
+ // declaration. We have to check this because __attribute__ might be the
+ // start of a function definition in GCC-extended K&R C.
+ !isDeclarationAfterDeclarator()) {
+
+ if (isStartOfFunctionDefinition(D)) {
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(Tok, diag::err_function_declared_typedef);
+
+ // Recover by treating the 'typedef' as spurious.
+ DS.ClearStorageClassSpecs();
+ }
+
+ Decl *TheDecl =
+ ParseFunctionDefinition(D, ParsedTemplateInfo(), &LateParsedAttrs);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ if (isDeclarationSpecifier()) {
+ // If there is an invalid declaration specifier right after the function
+ // prototype, then we must be in a missing semicolon case where this isn't
+ // actually a body. Just fall through into the code that handles it as a
+ // prototype, and let the top-level code handle the erroneous declspec
+ // where it would otherwise expect a comma or semicolon.
+ } else {
+ Diag(Tok, diag::err_expected_fn_body);
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+ }
+
+ if (ParseAsmAttributesAfterDeclarator(D))
+ return DeclGroupPtrTy();
+
+ // C++0x [stmt.iter]p1: Check if we have a for-range-declarator. If so, we
+ // must parse and analyze the for-range-initializer before the declaration is
+ // analyzed.
+ if (FRI && Tok.is(tok::colon)) {
+ FRI->ColonLoc = ConsumeToken();
+ if (Tok.is(tok::l_brace))
+ FRI->RangeExpr = ParseBraceInitializer();
+ else
+ FRI->RangeExpr = ParseExpression();
+ Decl *ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
+ Actions.ActOnCXXForRangeDecl(ThisDecl);
+ Actions.FinalizeDeclaration(ThisDecl);
+ D.complete(ThisDecl);
+ return Actions.FinalizeDeclaratorGroup(getCurScope(), DS, &ThisDecl, 1);
+ }
+
+ SmallVector<Decl *, 8> DeclsInGroup;
+ Decl *FirstDecl = ParseDeclarationAfterDeclaratorAndAttributes(D);
+ if (LateParsedAttrs.size() > 0)
+ ParseLexedAttributeList(LateParsedAttrs, FirstDecl, true, false);
+ D.complete(FirstDecl);
+ if (FirstDecl)
+ DeclsInGroup.push_back(FirstDecl);
+
+ bool ExpectSemi = Context != Declarator::ForContext;
+
+ // If we don't have a comma, it is either the end of the list (a ';') or an
+ // error, bail out.
+ while (Tok.is(tok::comma)) {
+ SourceLocation CommaLoc = ConsumeToken();
+
+ if (Tok.isAtStartOfLine() && ExpectSemi && !MightBeDeclarator(Context)) {
+ // This comma was followed by a line-break and something which can't be
+ // the start of a declarator. The comma was probably a typo for a
+ // semicolon.
+ Diag(CommaLoc, diag::err_expected_semi_declaration)
+ << FixItHint::CreateReplacement(CommaLoc, ";");
+ ExpectSemi = false;
+ break;
+ }
+
+ // Parse the next declarator.
+ D.clear();
+ D.setCommaLoc(CommaLoc);
+
+ // Accept attributes in an init-declarator. In the first declarator in a
+ // declaration, these would be part of the declspec. In subsequent
+ // declarators, they become part of the declarator itself, so that they
+ // don't apply to declarators after *this* one. Examples:
+ // short __attribute__((common)) var; -> declspec
+ // short var __attribute__((common)); -> declarator
+ // short x, __attribute__((common)) var; -> declarator
+ MaybeParseGNUAttributes(D);
+
+ ParseDeclarator(D);
+ if (!D.isInvalidType()) {
+ Decl *ThisDecl = ParseDeclarationAfterDeclarator(D);
+ D.complete(ThisDecl);
+ if (ThisDecl)
+ DeclsInGroup.push_back(ThisDecl);
+ }
+ }
+
+ if (DeclEnd)
+ *DeclEnd = Tok.getLocation();
+
+ if (ExpectSemi &&
+ ExpectAndConsume(tok::semi,
+ Context == Declarator::FileContext
+ ? diag::err_invalid_token_after_toplevel_declarator
+ : diag::err_expected_semi_declaration)) {
+ // Okay, there was no semicolon and one was expected. If we see a
+ // declaration specifier, just assume it was missing and continue parsing.
+ // Otherwise things are very confused and we skip to recover.
+ if (!isDeclarationSpecifier()) {
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
+ }
+
+ return Actions.FinalizeDeclaratorGroup(getCurScope(), DS,
+ DeclsInGroup.data(),
+ DeclsInGroup.size());
+}
+
+/// Parse an optional simple-asm-expr and attributes, and attach them to a
+/// declarator. Returns true on an error.
+bool Parser::ParseAsmAttributesAfterDeclarator(Declarator &D) {
+ // If a simple-asm-expr is present, parse it.
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ ExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid()) {
+ SkipUntil(tok::semi, true, true);
+ return true;
+ }
+
+ D.setAsmLabel(AsmLabel.release());
+ D.SetRangeEnd(Loc);
+ }
+
+ MaybeParseGNUAttributes(D);
+ return false;
+}
+
+/// \brief Parse 'declaration' after parsing 'declaration-specifiers
+/// declarator'. This method parses the remainder of the declaration
+/// (including any attributes or initializer, among other things) and
+/// finalizes the declaration.
+///
+/// init-declarator: [C99 6.7]
+/// declarator
+/// declarator '=' initializer
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt]
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt] '=' initializer
+/// [C++] declarator initializer[opt]
+///
+/// [C++] initializer:
+/// [C++] '=' initializer-clause
+/// [C++] '(' expression-list ')'
+/// [C++0x] '=' 'default' [TODO]
+/// [C++0x] '=' 'delete'
+/// [C++0x] braced-init-list
+///
+/// According to the standard grammar, =default and =delete are function
+/// definitions, but that definitely doesn't fit with the parser here.
+///
+Decl *Parser::ParseDeclarationAfterDeclarator(Declarator &D,
+ const ParsedTemplateInfo &TemplateInfo) {
+ if (ParseAsmAttributesAfterDeclarator(D))
+ return 0;
+
+ return ParseDeclarationAfterDeclaratorAndAttributes(D, TemplateInfo);
+}
+
+Decl *Parser::ParseDeclarationAfterDeclaratorAndAttributes(Declarator &D,
+ const ParsedTemplateInfo &TemplateInfo) {
+ // Inform the current actions module that we just parsed this declarator.
+ Decl *ThisDecl = 0;
+ switch (TemplateInfo.Kind) {
+ case ParsedTemplateInfo::NonTemplate:
+ ThisDecl = Actions.ActOnDeclarator(getCurScope(), D);
+ break;
+
+ case ParsedTemplateInfo::Template:
+ case ParsedTemplateInfo::ExplicitSpecialization:
+ ThisDecl = Actions.ActOnTemplateDeclarator(getCurScope(),
+ MultiTemplateParamsArg(Actions,
+ TemplateInfo.TemplateParams->data(),
+ TemplateInfo.TemplateParams->size()),
+ D);
+ break;
+
+ case ParsedTemplateInfo::ExplicitInstantiation: {
+ DeclResult ThisRes
+ = Actions.ActOnExplicitInstantiation(getCurScope(),
+ TemplateInfo.ExternLoc,
+ TemplateInfo.TemplateLoc,
+ D);
+ if (ThisRes.isInvalid()) {
+ SkipUntil(tok::semi, true, true);
+ return 0;
+ }
+
+ ThisDecl = ThisRes.get();
+ break;
+ }
+ }
+
+ bool TypeContainsAuto =
+ D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
+
+ // Parse declarator '=' initializer.
+ // If a '==' or '+=' is found, suggest a fixit to '='.
+ if (isTokenEqualOrEqualTypo()) {
+ ConsumeToken();
+ if (Tok.is(tok::kw_delete)) {
+ if (D.isFunctionDeclarator())
+ Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
+ << 1 /* delete */;
+ else
+ Diag(ConsumeToken(), diag::err_deleted_non_function);
+ } else if (Tok.is(tok::kw_default)) {
+ if (D.isFunctionDeclarator())
+ Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
+ << 0 /* default */;
+ else
+ Diag(ConsumeToken(), diag::err_default_special_members);
+ } else {
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ EnterScope(0);
+ Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteInitializer(getCurScope(), ThisDecl);
+ cutOffParsing();
+ return 0;
+ }
+
+ ExprResult Init(ParseInitializer());
+
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
+ ExitScope();
+ }
+
+ if (Init.isInvalid()) {
+ SkipUntil(tok::comma, true, true);
+ Actions.ActOnInitializerError(ThisDecl);
+ } else
+ Actions.AddInitializerToDecl(ThisDecl, Init.take(),
+ /*DirectInit=*/false, TypeContainsAuto);
+ }
+ } else if (Tok.is(tok::l_paren)) {
+ // Parse C++ direct initializer: '(' expression-list ')'
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprVector Exprs(Actions);
+ CommaLocsTy CommaLocs;
+
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ EnterScope(0);
+ Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
+ }
+
+ if (ParseExpressionList(Exprs, CommaLocs)) {
+ SkipUntil(tok::r_paren);
+
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
+ ExitScope();
+ }
+ } else {
+ // Match the ')'.
+ T.consumeClose();
+
+ assert(!Exprs.empty() && Exprs.size()-1 == CommaLocs.size() &&
+ "Unexpected number of commas!");
+
+ if (getLangOpts().CPlusPlus && D.getCXXScopeSpec().isSet()) {
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
+ ExitScope();
+ }
+
+ ExprResult Initializer = Actions.ActOnParenListExpr(T.getOpenLocation(),
+ T.getCloseLocation(),
+ move_arg(Exprs));
+ Actions.AddInitializerToDecl(ThisDecl, Initializer.take(),
+ /*DirectInit=*/true, TypeContainsAuto);
+ }
+ } else if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ // Parse C++0x braced-init-list.
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
+ if (D.getCXXScopeSpec().isSet()) {
+ EnterScope(0);
+ Actions.ActOnCXXEnterDeclInitializer(getCurScope(), ThisDecl);
+ }
+
+ ExprResult Init(ParseBraceInitializer());
+
+ if (D.getCXXScopeSpec().isSet()) {
+ Actions.ActOnCXXExitDeclInitializer(getCurScope(), ThisDecl);
+ ExitScope();
+ }
+
+ if (Init.isInvalid()) {
+ Actions.ActOnInitializerError(ThisDecl);
+ } else
+ Actions.AddInitializerToDecl(ThisDecl, Init.take(),
+ /*DirectInit=*/true, TypeContainsAuto);
+
+ } else {
+ Actions.ActOnUninitializedDecl(ThisDecl, TypeContainsAuto);
+ }
+
+ Actions.FinalizeDeclaration(ThisDecl);
+
+ return ThisDecl;
+}
+
+/// ParseSpecifierQualifierList
+/// specifier-qualifier-list:
+/// type-specifier specifier-qualifier-list[opt]
+/// type-qualifier specifier-qualifier-list[opt]
+/// [GNU] attributes specifier-qualifier-list[opt]
+///
+void Parser::ParseSpecifierQualifierList(DeclSpec &DS, AccessSpecifier AS,
+ DeclSpecContext DSC) {
+ /// specifier-qualifier-list is a subset of declaration-specifiers. Just
+ /// parse declaration-specifiers and complain about extra stuff.
+ /// TODO: diagnose attribute-specifiers and alignment-specifiers.
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC);
+
+ // Validate declspec for type-name.
+ unsigned Specs = DS.getParsedSpecifiers();
+ if (DSC == DSC_type_specifier && !DS.hasTypeSpecifier()) {
+ Diag(Tok, diag::err_expected_type);
+ DS.SetTypeSpecError();
+ } else if (Specs == DeclSpec::PQ_None && !DS.getNumProtocolQualifiers() &&
+ !DS.hasAttributes()) {
+ Diag(Tok, diag::err_typename_requires_specqual);
+ if (!DS.hasTypeSpecifier())
+ DS.SetTypeSpecError();
+ }
+
+ // Issue diagnostic and remove storage class if present.
+ if (Specs & DeclSpec::PQ_StorageClassSpecifier) {
+ if (DS.getStorageClassSpecLoc().isValid())
+ Diag(DS.getStorageClassSpecLoc(),diag::err_typename_invalid_storageclass);
+ else
+ Diag(DS.getThreadSpecLoc(), diag::err_typename_invalid_storageclass);
+ DS.ClearStorageClassSpecs();
+ }
+
+ // Issue diagnostic and remove function specfier if present.
+ if (Specs & DeclSpec::PQ_FunctionSpecifier) {
+ if (DS.isInlineSpecified())
+ Diag(DS.getInlineSpecLoc(), diag::err_typename_invalid_functionspec);
+ if (DS.isVirtualSpecified())
+ Diag(DS.getVirtualSpecLoc(), diag::err_typename_invalid_functionspec);
+ if (DS.isExplicitSpecified())
+ Diag(DS.getExplicitSpecLoc(), diag::err_typename_invalid_functionspec);
+ DS.ClearFunctionSpecs();
+ }
+
+ // Issue diagnostic and remove constexpr specfier if present.
+ if (DS.isConstexprSpecified()) {
+ Diag(DS.getConstexprSpecLoc(), diag::err_typename_invalid_constexpr);
+ DS.ClearConstexprSpec();
+ }
+}
+
+/// isValidAfterIdentifierInDeclaratorAfterDeclSpec - Return true if the
+/// specified token is valid after the identifier in a declarator which
+/// immediately follows the declspec. For example, these things are valid:
+///
+/// int x [ 4]; // direct-declarator
+/// int x ( int y); // direct-declarator
+/// int(int x ) // direct-declarator
+/// int x ; // simple-declaration
+/// int x = 17; // init-declarator-list
+/// int x , y; // init-declarator-list
+/// int x __asm__ ("foo"); // init-declarator-list
+/// int x : 4; // struct-declarator
+/// int x { 5}; // C++'0x unified initializers
+///
+/// This is not, because 'x' does not immediately follow the declspec (though
+/// ')' happens to be valid anyway).
+/// int (x)
+///
+static bool isValidAfterIdentifierInDeclarator(const Token &T) {
+ return T.is(tok::l_square) || T.is(tok::l_paren) || T.is(tok::r_paren) ||
+ T.is(tok::semi) || T.is(tok::comma) || T.is(tok::equal) ||
+ T.is(tok::kw_asm) || T.is(tok::l_brace) || T.is(tok::colon);
+}
+
+
+/// ParseImplicitInt - This method is called when we have an non-typename
+/// identifier in a declspec (which normally terminates the decl spec) when
+/// the declspec has no type specifier. In this case, the declspec is either
+/// malformed or is "implicit int" (in K&R and C89).
+///
+/// This method handles diagnosing this prettily and returns false if the
+/// declspec is done being processed. If it recovers and thinks there may be
+/// other pieces of declspec after it, it returns true.
+///
+bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, DeclSpecContext DSC) {
+ assert(Tok.is(tok::identifier) && "should have identifier");
+
+ SourceLocation Loc = Tok.getLocation();
+ // If we see an identifier that is not a type name, we normally would
+ // parse it as the identifer being declared. However, when a typename
+ // is typo'd or the definition is not included, this will incorrectly
+ // parse the typename as the identifier name and fall over misparsing
+ // later parts of the diagnostic.
+ //
+ // As such, we try to do some look-ahead in cases where this would
+ // otherwise be an "implicit-int" case to see if this is invalid. For
+ // example: "static foo_t x = 4;" In this case, if we parsed foo_t as
+ // an identifier with implicit int, we'd get a parse error because the
+ // next token is obviously invalid for a type. Parse these as a case
+ // with an invalid type specifier.
+ assert(!DS.hasTypeSpecifier() && "Type specifier checked above");
+
+ // Since we know that this either implicit int (which is rare) or an
+ // error, do lookahead to try to do better recovery. This never applies within
+ // a type specifier.
+ // FIXME: Don't bail out here in languages with no implicit int (like
+ // C++ with no -fms-extensions). This is much more likely to be an undeclared
+ // type or typo than a use of implicit int.
+ if (DSC != DSC_type_specifier &&
+ isValidAfterIdentifierInDeclarator(NextToken())) {
+ // If this token is valid for implicit int, e.g. "static x = 4", then
+ // we just avoid eating the identifier, so it will be parsed as the
+ // identifier in the declarator.
+ return false;
+ }
+
+ // Otherwise, if we don't consume this token, we are going to emit an
+ // error anyway. Try to recover from various common problems. Check
+ // to see if this was a reference to a tag name without a tag specified.
+ // This is a common problem in C (saying 'foo' instead of 'struct foo').
+ //
+ // C++ doesn't need this, and isTagName doesn't take SS.
+ if (SS == 0) {
+ const char *TagName = 0, *FixitTagName = 0;
+ tok::TokenKind TagKind = tok::unknown;
+
+ switch (Actions.isTagName(*Tok.getIdentifierInfo(), getCurScope())) {
+ default: break;
+ case DeclSpec::TST_enum:
+ TagName="enum" ; FixitTagName = "enum " ; TagKind=tok::kw_enum ;break;
+ case DeclSpec::TST_union:
+ TagName="union" ; FixitTagName = "union " ;TagKind=tok::kw_union ;break;
+ case DeclSpec::TST_struct:
+ TagName="struct"; FixitTagName = "struct ";TagKind=tok::kw_struct;break;
+ case DeclSpec::TST_class:
+ TagName="class" ; FixitTagName = "class " ;TagKind=tok::kw_class ;break;
+ }
+
+ if (TagName) {
+ Diag(Loc, diag::err_use_of_tag_name_without_tag)
+ << Tok.getIdentifierInfo() << TagName << getLangOpts().CPlusPlus
+ << FixItHint::CreateInsertion(Tok.getLocation(),FixitTagName);
+
+ // Parse this as a tag as if the missing tag were present.
+ if (TagKind == tok::kw_enum)
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS, DSC_normal);
+ else
+ ParseClassSpecifier(TagKind, Loc, DS, TemplateInfo, AS,
+ /*EnteringContext*/ false, DSC_normal);
+ return true;
+ }
+ }
+
+ // This is almost certainly an invalid type name. Let the action emit a
+ // diagnostic and attempt to recover.
+ ParsedType T;
+ if (Actions.DiagnoseUnknownTypeName(*Tok.getIdentifierInfo(), Loc,
+ getCurScope(), SS, T)) {
+ // The action emitted a diagnostic, so we don't have to.
+ if (T) {
+ // The action has suggested that the type T could be used. Set that as
+ // the type in the declaration specifiers, consume the would-be type
+ // name token, and we're done.
+ const char *PrevSpec;
+ unsigned DiagID;
+ DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID, T);
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+
+ // There may be other declaration specifiers after this.
+ return true;
+ }
+
+ // Fall through; the action had no suggestion for us.
+ } else {
+ // The action did not emit a diagnostic, so emit one now.
+ SourceRange R;
+ if (SS) R = SS->getRange();
+ Diag(Loc, diag::err_unknown_typename) << Tok.getIdentifierInfo() << R;
+ }
+
+ // Mark this as an error.
+ DS.SetTypeSpecError();
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+
+ // TODO: Could inject an invalid typedef decl in an enclosing scope to
+ // avoid rippling error messages on subsequent uses of the same type,
+ // could be useful if #include was forgotten.
+ return false;
+}
+
+/// \brief Determine the declaration specifier context from the declarator
+/// context.
+///
+/// \param Context the declarator context, which is one of the
+/// Declarator::TheContext enumerator values.
+Parser::DeclSpecContext
+Parser::getDeclSpecContextFromDeclaratorContext(unsigned Context) {
+ if (Context == Declarator::MemberContext)
+ return DSC_class;
+ if (Context == Declarator::FileContext)
+ return DSC_top_level;
+ if (Context == Declarator::TrailingReturnContext)
+ return DSC_trailing;
+ return DSC_normal;
+}
+
+/// ParseAlignArgument - Parse the argument to an alignment-specifier.
+///
+/// FIXME: Simply returns an alignof() expression if the argument is a
+/// type. Ideally, the type should be propagated directly into Sema.
+///
+/// [C11] type-id
+/// [C11] constant-expression
+/// [C++0x] type-id ...[opt]
+/// [C++0x] assignment-expression ...[opt]
+ExprResult Parser::ParseAlignArgument(SourceLocation Start,
+ SourceLocation &EllipsisLoc) {
+ ExprResult ER;
+ if (isTypeIdInParens()) {
+ SourceLocation TypeLoc = Tok.getLocation();
+ ParsedType Ty = ParseTypeName().get();
+ SourceRange TypeRange(Start, Tok.getLocation());
+ ER = Actions.ActOnUnaryExprOrTypeTraitExpr(TypeLoc, UETT_AlignOf, true,
+ Ty.getAsOpaquePtr(), TypeRange);
+ } else
+ ER = ParseConstantExpression();
+
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
+ return ER;
+}
+
+/// ParseAlignmentSpecifier - Parse an alignment-specifier, and add the
+/// attribute to Attrs.
+///
+/// alignment-specifier:
+/// [C11] '_Alignas' '(' type-id ')'
+/// [C11] '_Alignas' '(' constant-expression ')'
+/// [C++0x] 'alignas' '(' type-id ...[opt] ')'
+/// [C++0x] 'alignas' '(' assignment-expression ...[opt] ')'
+void Parser::ParseAlignmentSpecifier(ParsedAttributes &Attrs,
+ SourceLocation *endLoc) {
+ assert((Tok.is(tok::kw_alignas) || Tok.is(tok::kw__Alignas)) &&
+ "Not an alignment-specifier!");
+
+ SourceLocation KWLoc = Tok.getLocation();
+ ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen))
+ return;
+
+ SourceLocation EllipsisLoc;
+ ExprResult ArgExpr = ParseAlignArgument(T.getOpenLocation(), EllipsisLoc);
+ if (ArgExpr.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ T.consumeClose();
+ if (endLoc)
+ *endLoc = T.getCloseLocation();
+
+ // FIXME: Handle pack-expansions here.
+ if (EllipsisLoc.isValid()) {
+ Diag(EllipsisLoc, diag::err_alignas_pack_exp_unsupported);
+ return;
+ }
+
+ ExprVector ArgExprs(Actions);
+ ArgExprs.push_back(ArgExpr.release());
+ Attrs.addNew(PP.getIdentifierInfo("aligned"), KWLoc, 0, KWLoc,
+ 0, T.getOpenLocation(), ArgExprs.take(), 1, false, true);
+}
+
+/// ParseDeclarationSpecifiers
+/// declaration-specifiers: [C99 6.7]
+/// storage-class-specifier declaration-specifiers[opt]
+/// type-specifier declaration-specifiers[opt]
+/// [C99] function-specifier declaration-specifiers[opt]
+/// [C11] alignment-specifier declaration-specifiers[opt]
+/// [GNU] attributes declaration-specifiers[opt]
+/// [Clang] '__module_private__' declaration-specifiers[opt]
+///
+/// storage-class-specifier: [C99 6.7.1]
+/// 'typedef'
+/// 'extern'
+/// 'static'
+/// 'auto'
+/// 'register'
+/// [C++] 'mutable'
+/// [GNU] '__thread'
+/// function-specifier: [C99 6.7.4]
+/// [C99] 'inline'
+/// [C++] 'virtual'
+/// [C++] 'explicit'
+/// [OpenCL] '__kernel'
+/// 'friend': [C++ dcl.friend]
+/// 'constexpr': [C++0x dcl.constexpr]
+
+///
+void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS,
+ DeclSpecContext DSContext,
+ LateParsedAttrList *LateAttrs) {
+ if (DS.getSourceRange().isInvalid()) {
+ DS.SetRangeStart(Tok.getLocation());
+ DS.SetRangeEnd(Tok.getLocation());
+ }
+
+ bool EnteringContext = (DSContext == DSC_class || DSContext == DSC_top_level);
+ while (1) {
+ bool isInvalid = false;
+ const char *PrevSpec = 0;
+ unsigned DiagID = 0;
+
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ default:
+ DoneWithDeclSpec:
+ // [C++0x] decl-specifier-seq: decl-specifier attribute-specifier-seq[opt]
+ MaybeParseCXX0XAttributes(DS.getAttributes());
+
+ // If this is not a declaration specifier token, we're done reading decl
+ // specifiers. First verify that DeclSpec's are consistent.
+ DS.Finish(Diags, PP);
+ return;
+
+ case tok::code_completion: {
+ Sema::ParserCompletionContext CCC = Sema::PCC_Namespace;
+ if (DS.hasTypeSpecifier()) {
+ bool AllowNonIdentifiers
+ = (getCurScope()->getFlags() & (Scope::ControlScope |
+ Scope::BlockScope |
+ Scope::TemplateParamScope |
+ Scope::FunctionPrototypeScope |
+ Scope::AtCatchScope)) == 0;
+ bool AllowNestedNameSpecifiers
+ = DSContext == DSC_top_level ||
+ (DSContext == DSC_class && DS.isFriendSpecified());
+
+ Actions.CodeCompleteDeclSpec(getCurScope(), DS,
+ AllowNonIdentifiers,
+ AllowNestedNameSpecifiers);
+ return cutOffParsing();
+ }
+
+ if (getCurScope()->getFnParent() || getCurScope()->getBlockParent())
+ CCC = Sema::PCC_LocalDeclarationSpecifiers;
+ else if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate)
+ CCC = DSContext == DSC_class? Sema::PCC_MemberTemplate
+ : Sema::PCC_Template;
+ else if (DSContext == DSC_class)
+ CCC = Sema::PCC_Class;
+ else if (CurParsedObjCImpl)
+ CCC = Sema::PCC_ObjCImplementation;
+
+ Actions.CodeCompleteOrdinaryName(getCurScope(), CCC);
+ return cutOffParsing();
+ }
+
+ case tok::coloncolon: // ::foo::bar
+ // C++ scope specifier. Annotate and loop, or bail out on error.
+ if (TryAnnotateCXXScopeToken(true)) {
+ if (!DS.hasTypeSpecifier())
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (Tok.is(tok::coloncolon)) // ::new or ::delete
+ goto DoneWithDeclSpec;
+ continue;
+
+ case tok::annot_cxxscope: {
+ if (DS.hasTypeSpecifier())
+ goto DoneWithDeclSpec;
+
+ CXXScopeSpec SS;
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+
+ // We are looking for a qualified typename.
+ Token Next = NextToken();
+ if (Next.is(tok::annot_template_id) &&
+ static_cast<TemplateIdAnnotation *>(Next.getAnnotationValue())
+ ->Kind == TNK_Type_template) {
+ // We have a qualified template-id, e.g., N::A<int>
+
+ // C++ [class.qual]p2:
+ // In a lookup in which the constructor is an acceptable lookup
+ // result and the nested-name-specifier nominates a class C:
+ //
+ // - if the name specified after the
+ // nested-name-specifier, when looked up in C, is the
+ // injected-class-name of C (Clause 9), or
+ //
+ // - if the name specified after the nested-name-specifier
+ // is the same as the identifier or the
+ // simple-template-id's template-name in the last
+ // component of the nested-name-specifier,
+ //
+ // the name is instead considered to name the constructor of
+ // class C.
+ //
+ // Thus, if the template-name is actually the constructor
+ // name, then the code is ill-formed; this interpretation is
+ // reinforced by the NAD status of core issue 635.
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
+ if ((DSContext == DSC_top_level ||
+ (DSContext == DSC_class && DS.isFriendSpecified())) &&
+ TemplateId->Name &&
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
+ if (isConstructorDeclarator()) {
+ // The user meant this to be an out-of-line constructor
+ // definition, but template arguments are not allowed
+ // there. Just allow this as a constructor; we'll
+ // complain about it later.
+ goto DoneWithDeclSpec;
+ }
+
+ // The user meant this to name a type, but it actually names
+ // a constructor with some extraneous template
+ // arguments. Complain, then parse it as a type as the user
+ // intended.
+ Diag(TemplateId->TemplateNameLoc,
+ diag::err_out_of_line_template_id_names_constructor)
+ << TemplateId->Name;
+ }
+
+ DS.getTypeSpecScope() = SS;
+ ConsumeToken(); // The C++ scope.
+ assert(Tok.is(tok::annot_template_id) &&
+ "ParseOptionalCXXScopeSpecifier not working");
+ AnnotateTemplateIdTokenAsType();
+ continue;
+ }
+
+ if (Next.is(tok::annot_typename)) {
+ DS.getTypeSpecScope() = SS;
+ ConsumeToken(); // The C++ scope.
+ if (Tok.getAnnotationValue()) {
+ ParsedType T = getTypeAnnotation(Tok);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename,
+ Tok.getAnnotationEndLoc(),
+ PrevSpec, DiagID, T);
+ }
+ else
+ DS.SetTypeSpecError();
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken(); // The typename
+ }
+
+ if (Next.isNot(tok::identifier))
+ goto DoneWithDeclSpec;
+
+ // If we're in a context where the identifier could be a class name,
+ // check whether this is a constructor declaration.
+ if ((DSContext == DSC_top_level ||
+ (DSContext == DSC_class && DS.isFriendSpecified())) &&
+ Actions.isCurrentClassName(*Next.getIdentifierInfo(), getCurScope(),
+ &SS)) {
+ if (isConstructorDeclarator())
+ goto DoneWithDeclSpec;
+
+ // As noted in C++ [class.qual]p2 (cited above), when the name
+ // of the class is qualified in a context where it could name
+ // a constructor, its a constructor name. However, we've
+ // looked at the declarator, and the user probably meant this
+ // to be a type. Complain that it isn't supposed to be treated
+ // as a type, then proceed to parse it as a type.
+ Diag(Next.getLocation(), diag::err_out_of_line_type_names_constructor)
+ << Next.getIdentifierInfo();
+ }
+
+ ParsedType TypeRep = Actions.getTypeName(*Next.getIdentifierInfo(),
+ Next.getLocation(),
+ getCurScope(), &SS,
+ false, false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialSourceInfo=*/true);
+
+ // If the referenced identifier is not a type, then this declspec is
+ // erroneous: We already checked about that it has no type specifier, and
+ // C++ doesn't have implicit int. Diagnose it as a typo w.r.t. to the
+ // typename.
+ if (TypeRep == 0) {
+ ConsumeToken(); // Eat the scope spec so the identifier is current.
+ if (ParseImplicitInt(DS, &SS, TemplateInfo, AS, DSContext)) continue;
+ goto DoneWithDeclSpec;
+ }
+
+ DS.getTypeSpecScope() = SS;
+ ConsumeToken(); // The C++ scope.
+
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, TypeRep);
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken(); // The typename.
+
+ continue;
+ }
+
+ case tok::annot_typename: {
+ if (Tok.getAnnotationValue()) {
+ ParsedType T = getTypeAnnotation(Tok);
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, T);
+ } else
+ DS.SetTypeSpecError();
+
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken(); // The typename
+
+ // Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
+ // is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
+ // Objective-C interface.
+ if (Tok.is(tok::less) && getLangOpts().ObjC1)
+ ParseObjCProtocolQualifiers(DS);
+
+ continue;
+ }
+
+ case tok::kw___is_signed:
+ // GNU libstdc++ 4.4 uses __is_signed as an identifier, but Clang
+ // typically treats it as a trait. If we see __is_signed as it appears
+ // in libstdc++, e.g.,
+ //
+ // static const bool __is_signed;
+ //
+ // then treat __is_signed as an identifier rather than as a keyword.
+ if (DS.getTypeSpecType() == TST_bool &&
+ DS.getTypeQualifiers() == DeclSpec::TQ_const &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_static) {
+ Tok.getIdentifierInfo()->RevertTokenIDToIdentifier();
+ Tok.setKind(tok::identifier);
+ }
+
+ // We're done with the declaration-specifiers.
+ goto DoneWithDeclSpec;
+
+ // typedef-name
+ case tok::kw_decltype:
+ case tok::identifier: {
+ // In C++, check to see if this is a scope specifier like foo::bar::, if
+ // so handle it as such. This is important for ctor parsing.
+ if (getLangOpts().CPlusPlus) {
+ if (TryAnnotateCXXScopeToken(true)) {
+ if (!DS.hasTypeSpecifier())
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (!Tok.is(tok::identifier))
+ continue;
+ }
+
+ // This identifier can only be a typedef name if we haven't already seen
+ // a type-specifier. Without this check we misparse:
+ // typedef int X; struct Y { short X; }; as 'short int'.
+ if (DS.hasTypeSpecifier())
+ goto DoneWithDeclSpec;
+
+ // Check for need to substitute AltiVec keyword tokens.
+ if (TryAltiVecToken(DS, Loc, PrevSpec, DiagID, isInvalid))
+ break;
+
+ ParsedType TypeRep =
+ Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), getCurScope());
+
+ // If this is not a typedef name, don't parse it as part of the declspec,
+ // it must be an implicit int or an error.
+ if (!TypeRep) {
+ if (ParseImplicitInt(DS, 0, TemplateInfo, AS, DSContext)) continue;
+ goto DoneWithDeclSpec;
+ }
+
+ // If we're in a context where the identifier could be a class name,
+ // check whether this is a constructor declaration.
+ if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
+ Actions.isCurrentClassName(*Tok.getIdentifierInfo(), getCurScope()) &&
+ isConstructorDeclarator())
+ goto DoneWithDeclSpec;
+
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec,
+ DiagID, TypeRep);
+ if (isInvalid)
+ break;
+
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken(); // The identifier
+
+ // Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
+ // is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
+ // Objective-C interface.
+ if (Tok.is(tok::less) && getLangOpts().ObjC1)
+ ParseObjCProtocolQualifiers(DS);
+
+ // Need to support trailing type qualifiers (e.g. "id<p> const").
+ // If a type specifier follows, it will be diagnosed elsewhere.
+ continue;
+ }
+
+ // type-name
+ case tok::annot_template_id: {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind != TNK_Type_template) {
+ // This template-id does not refer to a type name, so we're
+ // done with the type-specifiers.
+ goto DoneWithDeclSpec;
+ }
+
+ // If we're in a context where the template-id could be a
+ // constructor name or specialization, check whether this is a
+ // constructor declaration.
+ if (getLangOpts().CPlusPlus && DSContext == DSC_class &&
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope()) &&
+ isConstructorDeclarator())
+ goto DoneWithDeclSpec;
+
+ // Turn the template-id annotation token into a type annotation
+ // token, then try again to parse it as a type-specifier.
+ AnnotateTemplateIdTokenAsType();
+ continue;
+ }
+
+ // GNU attributes support.
+ case tok::kw___attribute:
+ ParseGNUAttributes(DS.getAttributes(), 0, LateAttrs);
+ continue;
+
+ // Microsoft declspec support.
+ case tok::kw___declspec:
+ ParseMicrosoftDeclSpec(DS.getAttributes());
+ continue;
+
+ // Microsoft single token adornments.
+ case tok::kw___forceinline:
+ // FIXME: Add handling here!
+ break;
+
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___w64:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___unaligned:
+ ParseMicrosoftTypeAttributes(DS.getAttributes());
+ continue;
+
+ // Borland single token adornments.
+ case tok::kw___pascal:
+ ParseBorlandTypeAttributes(DS.getAttributes());
+ continue;
+
+ // OpenCL single token adornments.
+ case tok::kw___kernel:
+ ParseOpenCLAttributes(DS.getAttributes());
+ continue;
+
+ // storage-class-specifier
+ case tok::kw_typedef:
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_typedef, Loc,
+ PrevSpec, DiagID);
+ break;
+ case tok::kw_extern:
+ if (DS.isThreadSpecified())
+ Diag(Tok, diag::ext_thread_before) << "extern";
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_extern, Loc,
+ PrevSpec, DiagID);
+ break;
+ case tok::kw___private_extern__:
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_private_extern,
+ Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_static:
+ if (DS.isThreadSpecified())
+ Diag(Tok, diag::ext_thread_before) << "static";
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_static, Loc,
+ PrevSpec, DiagID);
+ break;
+ case tok::kw_auto:
+ if (getLangOpts().CPlusPlus0x) {
+ if (isKnownToBeTypeSpecifier(GetLookAheadToken(1))) {
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
+ PrevSpec, DiagID);
+ if (!isInvalid)
+ Diag(Tok, diag::ext_auto_storage_class)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+ } else
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_auto, Loc, PrevSpec,
+ DiagID);
+ } else
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_auto, Loc,
+ PrevSpec, DiagID);
+ break;
+ case tok::kw_register:
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_register, Loc,
+ PrevSpec, DiagID);
+ break;
+ case tok::kw_mutable:
+ isInvalid = DS.SetStorageClassSpec(Actions, DeclSpec::SCS_mutable, Loc,
+ PrevSpec, DiagID);
+ break;
+ case tok::kw___thread:
+ isInvalid = DS.SetStorageClassSpecThread(Loc, PrevSpec, DiagID);
+ break;
+
+ // function-specifier
+ case tok::kw_inline:
+ isInvalid = DS.SetFunctionSpecInline(Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_virtual:
+ isInvalid = DS.SetFunctionSpecVirtual(Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_explicit:
+ isInvalid = DS.SetFunctionSpecExplicit(Loc, PrevSpec, DiagID);
+ break;
+
+ // alignment-specifier
+ case tok::kw__Alignas:
+ if (!getLangOpts().C11)
+ Diag(Tok, diag::ext_c11_alignas);
+ ParseAlignmentSpecifier(DS.getAttributes());
+ continue;
+
+ // friend
+ case tok::kw_friend:
+ if (DSContext == DSC_class)
+ isInvalid = DS.SetFriendSpec(Loc, PrevSpec, DiagID);
+ else {
+ PrevSpec = ""; // not actually used by the diagnostic
+ DiagID = diag::err_friend_invalid_in_context;
+ isInvalid = true;
+ }
+ break;
+
+ // Modules
+ case tok::kw___module_private__:
+ isInvalid = DS.setModulePrivateSpec(Loc, PrevSpec, DiagID);
+ break;
+
+ // constexpr
+ case tok::kw_constexpr:
+ isInvalid = DS.SetConstexprSpec(Loc, PrevSpec, DiagID);
+ break;
+
+ // type-specifier
+ case tok::kw_short:
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_long:
+ if (DS.getTypeSpecWidth() != DeclSpec::TSW_long)
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec,
+ DiagID);
+ else
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw___int64:
+ isInvalid = DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_signed:
+ isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_unsigned:
+ isInvalid = DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw__Complex:
+ isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_complex, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw__Imaginary:
+ isInvalid = DS.SetTypeSpecComplex(DeclSpec::TSC_imaginary, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_void:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_char:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_int:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw___int128:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_half:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_float:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_double:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_wchar_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_char16_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_char32_t:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_char32, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ if (Tok.is(tok::kw_bool) &&
+ DS.getTypeSpecType() != DeclSpec::TST_unspecified &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ PrevSpec = ""; // Not used by the diagnostic.
+ DiagID = diag::err_bool_redeclaration;
+ // For better error recovery.
+ Tok.setKind(tok::identifier);
+ isInvalid = true;
+ } else {
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec,
+ DiagID);
+ }
+ break;
+ case tok::kw__Decimal32:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal32, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw__Decimal64:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal64, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw__Decimal128:
+ isInvalid = DS.SetTypeSpecType(DeclSpec::TST_decimal128, Loc, PrevSpec,
+ DiagID);
+ break;
+ case tok::kw___vector:
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw___pixel:
+ isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw___unknown_anytype:
+ isInvalid = DS.SetTypeSpecType(TST_unknown_anytype, Loc,
+ PrevSpec, DiagID);
+ break;
+
+ // class-specifier:
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union: {
+ tok::TokenKind Kind = Tok.getKind();
+ ConsumeToken();
+ ParseClassSpecifier(Kind, Loc, DS, TemplateInfo, AS,
+ EnteringContext, DSContext);
+ continue;
+ }
+
+ // enum-specifier:
+ case tok::kw_enum:
+ ConsumeToken();
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS, DSContext);
+ continue;
+
+ // cv-qualifier:
+ case tok::kw_const:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_const, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw_volatile:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw_restrict:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+
+ // C++ typename-specifier:
+ case tok::kw_typename:
+ if (TryAnnotateTypeOrScopeToken()) {
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (!Tok.is(tok::kw_typename))
+ continue;
+ break;
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+ ParseTypeofSpecifier(DS);
+ continue;
+
+ case tok::annot_decltype:
+ ParseDecltypeSpecifier(DS);
+ continue;
+
+ case tok::kw___underlying_type:
+ ParseUnderlyingTypeSpecifier(DS);
+ continue;
+
+ case tok::kw__Atomic:
+ ParseAtomicSpecifier(DS);
+ continue;
+
+ // OpenCL qualifiers:
+ case tok::kw_private:
+ if (!getLangOpts().OpenCL)
+ goto DoneWithDeclSpec;
+ case tok::kw___private:
+ case tok::kw___global:
+ case tok::kw___local:
+ case tok::kw___constant:
+ case tok::kw___read_only:
+ case tok::kw___write_only:
+ case tok::kw___read_write:
+ ParseOpenCLQualifiers(DS);
+ break;
+
+ case tok::less:
+ // GCC ObjC supports types like "<SomeProtocol>" as a synonym for
+ // "id<SomeProtocol>". This is hopelessly old fashioned and dangerous,
+ // but we support it.
+ if (DS.hasTypeSpecifier() || !getLangOpts().ObjC1)
+ goto DoneWithDeclSpec;
+
+ if (!ParseObjCProtocolQualifiers(DS))
+ Diag(Loc, diag::warn_objc_protocol_qualifier_missing_id)
+ << FixItHint::CreateInsertion(Loc, "id")
+ << SourceRange(Loc, DS.getSourceRange().getEnd());
+
+ // Need to support trailing type qualifiers (e.g. "id<p> const").
+ // If a type specifier follows, it will be diagnosed elsewhere.
+ continue;
+ }
+ // If the specifier wasn't legal, issue a diagnostic.
+ if (isInvalid) {
+ assert(PrevSpec && "Method did not return previous specifier!");
+ assert(DiagID);
+
+ if (DiagID == diag::ext_duplicate_declspec)
+ Diag(Tok, DiagID)
+ << PrevSpec << FixItHint::CreateRemoval(Tok.getLocation());
+ else
+ Diag(Tok, DiagID) << PrevSpec;
+ }
+
+ DS.SetRangeEnd(Tok.getLocation());
+ if (DiagID != diag::err_bool_redeclaration)
+ ConsumeToken();
+ }
+}
+
+/// ParseStructDeclaration - Parse a struct declaration without the terminating
+/// semicolon.
+///
+/// struct-declaration:
+/// specifier-qualifier-list struct-declarator-list
+/// [GNU] __extension__ struct-declaration
+/// [GNU] specifier-qualifier-list
+/// struct-declarator-list:
+/// struct-declarator
+/// struct-declarator-list ',' struct-declarator
+/// [GNU] struct-declarator-list ',' attributes[opt] struct-declarator
+/// struct-declarator:
+/// declarator
+/// [GNU] declarator attributes[opt]
+/// declarator[opt] ':' constant-expression
+/// [GNU] declarator[opt] ':' constant-expression attributes[opt]
+///
+void Parser::
+ParseStructDeclaration(DeclSpec &DS, FieldCallback &Fields) {
+
+ if (Tok.is(tok::kw___extension__)) {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseStructDeclaration(DS, Fields);
+ }
+
+ // Parse the common specifier-qualifiers-list piece.
+ ParseSpecifierQualifierList(DS);
+
+ // If there are no declarators, this is a free-standing declaration
+ // specifier. Let the actions module cope with it.
+ if (Tok.is(tok::semi)) {
+ Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none, DS);
+ return;
+ }
+
+ // Read struct-declarators until we find the semicolon.
+ bool FirstDeclarator = true;
+ SourceLocation CommaLoc;
+ while (1) {
+ ParsingDeclRAIIObject PD(*this);
+ FieldDeclarator DeclaratorInfo(DS);
+ DeclaratorInfo.D.setCommaLoc(CommaLoc);
+
+ // Attributes are only allowed here on successive declarators.
+ if (!FirstDeclarator)
+ MaybeParseGNUAttributes(DeclaratorInfo.D);
+
+ /// struct-declarator: declarator
+ /// struct-declarator: declarator[opt] ':' constant-expression
+ if (Tok.isNot(tok::colon)) {
+ // Don't parse FOO:BAR as if it were a typo for FOO::BAR.
+ ColonProtectionRAIIObject X(*this);
+ ParseDeclarator(DeclaratorInfo.D);
+ }
+
+ if (Tok.is(tok::colon)) {
+ ConsumeToken();
+ ExprResult Res(ParseConstantExpression());
+ if (Res.isInvalid())
+ SkipUntil(tok::semi, true, true);
+ else
+ DeclaratorInfo.BitfieldSize = Res.release();
+ }
+
+ // If attributes exist after the declarator, parse them.
+ MaybeParseGNUAttributes(DeclaratorInfo.D);
+
+ // We're done with this declarator; invoke the callback.
+ Decl *D = Fields.invoke(DeclaratorInfo);
+ PD.complete(D);
+
+ // If we don't have a comma, it is either the end of the list (a ';')
+ // or an error, bail out.
+ if (Tok.isNot(tok::comma))
+ return;
+
+ // Consume the comma.
+ CommaLoc = ConsumeToken();
+
+ FirstDeclarator = false;
+ }
+}
+
+/// ParseStructUnionBody
+/// struct-contents:
+/// struct-declaration-list
+/// [EXT] empty
+/// [GNU] "struct-declaration-list" without terminatoring ';'
+/// struct-declaration-list:
+/// struct-declaration
+/// struct-declaration-list struct-declaration
+/// [OBC] '@' 'defs' '(' class-name ')'
+///
+void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
+ unsigned TagType, Decl *TagDecl) {
+ PrettyDeclStackTraceEntry CrashInfo(Actions, TagDecl, RecordLoc,
+ "parsing struct/union body");
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ if (T.consumeOpen())
+ return;
+
+ ParseScope StructScope(this, Scope::ClassScope|Scope::DeclScope);
+ Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
+
+ // Empty structs are an extension in C (C99 6.7.2.1p7), but are allowed in
+ // C++.
+ if (Tok.is(tok::r_brace) && !getLangOpts().CPlusPlus) {
+ Diag(Tok, diag::ext_empty_struct_union) << (TagType == TST_union);
+ Diag(Tok, diag::warn_empty_struct_union_compat) << (TagType == TST_union);
+ }
+
+ SmallVector<Decl *, 32> FieldDecls;
+
+ // While we still have something to read, read the declarations in the struct.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one struct-declaration.
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ Diag(Tok, diag::ext_extra_struct_semi)
+ << DeclSpec::getSpecifierName((DeclSpec::TST)TagType)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ continue;
+ }
+
+ // Parse all the comma separated declarators.
+ DeclSpec DS(AttrFactory);
+
+ if (!Tok.is(tok::at)) {
+ struct CFieldCallback : FieldCallback {
+ Parser &P;
+ Decl *TagDecl;
+ SmallVectorImpl<Decl *> &FieldDecls;
+
+ CFieldCallback(Parser &P, Decl *TagDecl,
+ SmallVectorImpl<Decl *> &FieldDecls) :
+ P(P), TagDecl(TagDecl), FieldDecls(FieldDecls) {}
+
+ virtual Decl *invoke(FieldDeclarator &FD) {
+ // Install the declarator into the current TagDecl.
+ Decl *Field = P.Actions.ActOnField(P.getCurScope(), TagDecl,
+ FD.D.getDeclSpec().getSourceRange().getBegin(),
+ FD.D, FD.BitfieldSize);
+ FieldDecls.push_back(Field);
+ return Field;
+ }
+ } Callback(*this, TagDecl, FieldDecls);
+
+ ParseStructDeclaration(DS, Callback);
+ } else { // Handle @defs
+ ConsumeToken();
+ if (!Tok.isObjCAtKeyword(tok::objc_defs)) {
+ Diag(Tok, diag::err_unexpected_at);
+ SkipUntil(tok::semi, true);
+ continue;
+ }
+ ConsumeToken();
+ ExpectAndConsume(tok::l_paren, diag::err_expected_lparen);
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::semi, true);
+ continue;
+ }
+ SmallVector<Decl *, 16> Fields;
+ Actions.ActOnDefs(getCurScope(), TagDecl, Tok.getLocation(),
+ Tok.getIdentifierInfo(), Fields);
+ FieldDecls.insert(FieldDecls.end(), Fields.begin(), Fields.end());
+ ConsumeToken();
+ ExpectAndConsume(tok::r_paren, diag::err_expected_rparen);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else if (Tok.is(tok::r_brace)) {
+ ExpectAndConsume(tok::semi, diag::ext_expected_semi_decl_list);
+ break;
+ } else {
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list);
+ // Skip to end of block or statement to avoid ext-warning on extra ';'.
+ SkipUntil(tok::r_brace, true, true);
+ // If we stopped at a ';', eat it.
+ if (Tok.is(tok::semi)) ConsumeToken();
+ }
+ }
+
+ T.consumeClose();
+
+ ParsedAttributes attrs(AttrFactory);
+ // If attributes exist after struct contents, parse them.
+ MaybeParseGNUAttributes(attrs);
+
+ Actions.ActOnFields(getCurScope(),
+ RecordLoc, TagDecl, FieldDecls,
+ T.getOpenLocation(), T.getCloseLocation(),
+ attrs.getList());
+ StructScope.Exit();
+ Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl,
+ T.getCloseLocation());
+}
+
+/// ParseEnumSpecifier
+/// enum-specifier: [C99 6.7.2.2]
+/// 'enum' identifier[opt] '{' enumerator-list '}'
+///[C99/C++]'enum' identifier[opt] '{' enumerator-list ',' '}'
+/// [GNU] 'enum' attributes[opt] identifier[opt] '{' enumerator-list ',' [opt]
+/// '}' attributes[opt]
+/// [MS] 'enum' __declspec[opt] identifier[opt] '{' enumerator-list ',' [opt]
+/// '}'
+/// 'enum' identifier
+/// [GNU] 'enum' attributes[opt] identifier
+///
+/// [C++11] enum-head '{' enumerator-list[opt] '}'
+/// [C++11] enum-head '{' enumerator-list ',' '}'
+///
+/// enum-head: [C++11]
+/// enum-key attribute-specifier-seq[opt] identifier[opt] enum-base[opt]
+/// enum-key attribute-specifier-seq[opt] nested-name-specifier
+/// identifier enum-base[opt]
+///
+/// enum-key: [C++11]
+/// 'enum'
+/// 'enum' 'class'
+/// 'enum' 'struct'
+///
+/// enum-base: [C++11]
+/// ':' type-specifier-seq
+///
+/// [C++] elaborated-type-specifier:
+/// [C++] 'enum' '::'[opt] nested-name-specifier[opt] identifier
+///
+void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS, DeclSpecContext DSC) {
+ // Parse the tag portion of this.
+ if (Tok.is(tok::code_completion)) {
+ // Code completion for an enum name.
+ Actions.CodeCompleteTag(getCurScope(), DeclSpec::TST_enum);
+ return cutOffParsing();
+ }
+
+ SourceLocation ScopedEnumKWLoc;
+ bool IsScopedUsingClassTag = false;
+
+ if (getLangOpts().CPlusPlus0x &&
+ (Tok.is(tok::kw_class) || Tok.is(tok::kw_struct))) {
+ Diag(Tok, diag::warn_cxx98_compat_scoped_enum);
+ IsScopedUsingClassTag = Tok.is(tok::kw_class);
+ ScopedEnumKWLoc = ConsumeToken();
+ }
+
+ // C++11 [temp.explicit]p12: The usual access controls do not apply to names
+ // used to specify explicit instantiations. We extend this to also cover
+ // explicit specializations.
+ Sema::SuppressAccessChecksRAII SuppressAccess(Actions,
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+
+ // If attributes exist after tag, parse them.
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+
+ // If declspecs exist after tag, parse them.
+ while (Tok.is(tok::kw___declspec))
+ ParseMicrosoftDeclSpec(attrs);
+
+ // Enum definitions should not be parsed in a trailing-return-type.
+ bool AllowDeclaration = DSC != DSC_trailing;
+
+ bool AllowFixedUnderlyingType = AllowDeclaration &&
+ (getLangOpts().CPlusPlus0x || getLangOpts().MicrosoftExt ||
+ getLangOpts().ObjC2);
+
+ CXXScopeSpec &SS = DS.getTypeSpecScope();
+ if (getLangOpts().CPlusPlus) {
+ // "enum foo : bar;" is not a potential typo for "enum foo::bar;"
+ // if a fixed underlying type is allowed.
+ ColonProtectionRAIIObject X(*this, AllowFixedUnderlyingType);
+
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false))
+ return;
+
+ if (SS.isSet() && Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ if (Tok.isNot(tok::l_brace)) {
+ // Has no name and is not a definition.
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, true);
+ return;
+ }
+ }
+ }
+
+ // Must have either 'enum name' or 'enum {...}'.
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::l_brace) &&
+ !(AllowFixedUnderlyingType && Tok.is(tok::colon))) {
+ Diag(Tok, diag::err_expected_ident_lbrace);
+
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, true);
+ return;
+ }
+
+ // If an identifier is present, consume and remember it.
+ IdentifierInfo *Name = 0;
+ SourceLocation NameLoc;
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ }
+
+ if (!Name && ScopedEnumKWLoc.isValid()) {
+ // C++0x 7.2p2: The optional identifier shall not be omitted in the
+ // declaration of a scoped enumeration.
+ Diag(Tok, diag::err_scoped_enum_missing_identifier);
+ ScopedEnumKWLoc = SourceLocation();
+ IsScopedUsingClassTag = false;
+ }
+
+ // Stop suppressing access control now we've parsed the enum name.
+ SuppressAccess.done();
+
+ TypeResult BaseType;
+
+ // Parse the fixed underlying type.
+ if (AllowFixedUnderlyingType && Tok.is(tok::colon)) {
+ bool PossibleBitfield = false;
+ if (getCurScope()->getFlags() & Scope::ClassScope) {
+ // If we're in class scope, this can either be an enum declaration with
+ // an underlying type, or a declaration of a bitfield member. We try to
+ // use a simple disambiguation scheme first to catch the common cases
+ // (integer literal, sizeof); if it's still ambiguous, we then consider
+ // anything that's a simple-type-specifier followed by '(' as an
+ // expression. This suffices because function types are not valid
+ // underlying types anyway.
+ TPResult TPR = isExpressionOrTypeSpecifierSimple(NextToken().getKind());
+ // If the next token starts an expression, we know we're parsing a
+ // bit-field. This is the common case.
+ if (TPR == TPResult::True())
+ PossibleBitfield = true;
+ // If the next token starts a type-specifier-seq, it may be either a
+ // a fixed underlying type or the start of a function-style cast in C++;
+ // lookahead one more token to see if it's obvious that we have a
+ // fixed underlying type.
+ else if (TPR == TPResult::False() &&
+ GetLookAheadToken(2).getKind() == tok::semi) {
+ // Consume the ':'.
+ ConsumeToken();
+ } else {
+ // We have the start of a type-specifier-seq, so we have to perform
+ // tentative parsing to determine whether we have an expression or a
+ // type.
+ TentativeParsingAction TPA(*this);
+
+ // Consume the ':'.
+ ConsumeToken();
+
+ // If we see a type specifier followed by an open-brace, we have an
+ // ambiguity between an underlying type and a C++11 braced
+ // function-style cast. Resolve this by always treating it as an
+ // underlying type.
+ // FIXME: The standard is not entirely clear on how to disambiguate in
+ // this case.
+ if ((getLangOpts().CPlusPlus &&
+ isCXXDeclarationSpecifier(TPResult::True()) != TPResult::True()) ||
+ (!getLangOpts().CPlusPlus && !isDeclarationSpecifier(true))) {
+ // We'll parse this as a bitfield later.
+ PossibleBitfield = true;
+ TPA.Revert();
+ } else {
+ // We have a type-specifier-seq.
+ TPA.Commit();
+ }
+ }
+ } else {
+ // Consume the ':'.
+ ConsumeToken();
+ }
+
+ if (!PossibleBitfield) {
+ SourceRange Range;
+ BaseType = ParseTypeName(&Range);
+
+ if (!getLangOpts().CPlusPlus0x && !getLangOpts().ObjC2)
+ Diag(StartLoc, diag::ext_ms_enum_fixed_underlying_type)
+ << Range;
+ if (getLangOpts().CPlusPlus0x)
+ Diag(StartLoc, diag::warn_cxx98_compat_enum_fixed_underlying_type);
+ }
+ }
+
+ // There are four options here. If we have 'friend enum foo;' then this is a
+ // friend declaration, and cannot have an accompanying definition. If we have
+ // 'enum foo;', then this is a forward declaration. If we have
+ // 'enum foo {...' then this is a definition. Otherwise we have something
+ // like 'enum foo xyz', a reference.
+ //
+ // This is needed to handle stuff like this right (C99 6.7.2.3p11):
+ // enum foo {..}; void bar() { enum foo; } <- new foo in bar.
+ // enum foo {..}; void bar() { enum foo x; } <- use of old foo.
+ //
+ Sema::TagUseKind TUK;
+ if (DS.isFriendSpecified())
+ TUK = Sema::TUK_Friend;
+ else if (!AllowDeclaration)
+ TUK = Sema::TUK_Reference;
+ else if (Tok.is(tok::l_brace))
+ TUK = Sema::TUK_Definition;
+ else if (Tok.is(tok::semi) && DSC != DSC_type_specifier)
+ TUK = Sema::TUK_Declaration;
+ else
+ TUK = Sema::TUK_Reference;
+
+ MultiTemplateParamsArg TParams;
+ if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ TUK != Sema::TUK_Reference) {
+ if (!getLangOpts().CPlusPlus0x || !SS.isSet()) {
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ Diag(Tok, diag::err_enum_template);
+ SkipUntil(tok::comma, true);
+ return;
+ }
+
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ // Enumerations can't be explicitly instantiated.
+ DS.SetTypeSpecError();
+ Diag(StartLoc, diag::err_explicit_instantiation_enum);
+ return;
+ }
+
+ assert(TemplateInfo.TemplateParams && "no template parameters");
+ TParams = MultiTemplateParamsArg(TemplateInfo.TemplateParams->data(),
+ TemplateInfo.TemplateParams->size());
+ }
+
+ if (!Name && TUK != Sema::TUK_Definition) {
+ Diag(Tok, diag::err_enumerator_unnamed_no_def);
+
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, true);
+ return;
+ }
+
+ bool Owned = false;
+ bool IsDependent = false;
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ Decl *TagDecl = Actions.ActOnTag(getCurScope(), DeclSpec::TST_enum, TUK,
+ StartLoc, SS, Name, NameLoc, attrs.getList(),
+ AS, DS.getModulePrivateSpecLoc(), TParams,
+ Owned, IsDependent, ScopedEnumKWLoc,
+ IsScopedUsingClassTag, BaseType);
+
+ if (IsDependent) {
+ // This enum has a dependent nested-name-specifier. Handle it as a
+ // dependent tag.
+ if (!Name) {
+ DS.SetTypeSpecError();
+ Diag(Tok, diag::err_expected_type_name_after_typename);
+ return;
+ }
+
+ TypeResult Type = Actions.ActOnDependentTag(getCurScope(), DeclSpec::TST_enum,
+ TUK, SS, Name, StartLoc,
+ NameLoc);
+ if (Type.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ if (DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, Type.get()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+
+ return;
+ }
+
+ if (!TagDecl) {
+ // The action failed to produce an enumeration tag. If this is a
+ // definition, consume the entire definition.
+ if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+ }
+
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ if (TUK == Sema::TUK_Friend) {
+ Diag(Tok, diag::err_friend_decl_defines_type)
+ << SourceRange(DS.getFriendSpecLoc());
+ ConsumeBrace();
+ SkipUntil(tok::r_brace);
+ } else {
+ ParseEnumBody(StartLoc, TagDecl);
+ }
+ }
+
+ if (DS.SetTypeSpecType(DeclSpec::TST_enum, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TagDecl, Owned))
+ Diag(StartLoc, DiagID) << PrevSpec;
+}
+
+/// ParseEnumBody - Parse a {} enclosed enumerator-list.
+/// enumerator-list:
+/// enumerator
+/// enumerator-list ',' enumerator
+/// enumerator:
+/// enumeration-constant
+/// enumeration-constant '=' constant-expression
+/// enumeration-constant:
+/// identifier
+///
+void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
+ // Enter the scope of the enum body and start the definition.
+ ParseScope EnumScope(this, Scope::DeclScope);
+ Actions.ActOnTagStartDefinition(getCurScope(), EnumDecl);
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+
+ // C does not allow an empty enumerator-list, C++ does [dcl.enum].
+ if (Tok.is(tok::r_brace) && !getLangOpts().CPlusPlus)
+ Diag(Tok, diag::error_empty_enum);
+
+ SmallVector<Decl *, 32> EnumConstantDecls;
+
+ Decl *LastEnumConstDecl = 0;
+
+ // Parse the enumerator-list.
+ while (Tok.is(tok::identifier)) {
+ IdentifierInfo *Ident = Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+
+ // If attributes exist after the enumerator, parse them.
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+
+ SourceLocation EqualLoc;
+ ExprResult AssignedVal;
+ ParsingDeclRAIIObject PD(*this);
+
+ if (Tok.is(tok::equal)) {
+ EqualLoc = ConsumeToken();
+ AssignedVal = ParseConstantExpression();
+ if (AssignedVal.isInvalid())
+ SkipUntil(tok::comma, tok::r_brace, true, true);
+ }
+
+ // Install the enumerator constant into EnumDecl.
+ Decl *EnumConstDecl = Actions.ActOnEnumConstant(getCurScope(), EnumDecl,
+ LastEnumConstDecl,
+ IdentLoc, Ident,
+ attrs.getList(), EqualLoc,
+ AssignedVal.release());
+ PD.complete(EnumConstDecl);
+
+ EnumConstantDecls.push_back(EnumConstDecl);
+ LastEnumConstDecl = EnumConstDecl;
+
+ if (Tok.is(tok::identifier)) {
+ // We're missing a comma between enumerators.
+ SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(Loc, diag::err_enumerator_list_missing_comma)
+ << FixItHint::CreateInsertion(Loc, ", ");
+ continue;
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+ SourceLocation CommaLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ if (!getLangOpts().C99 && !getLangOpts().CPlusPlus0x)
+ Diag(CommaLoc, diag::ext_enumerator_list_comma)
+ << getLangOpts().CPlusPlus
+ << FixItHint::CreateRemoval(CommaLoc);
+ else if (getLangOpts().CPlusPlus0x)
+ Diag(CommaLoc, diag::warn_cxx98_compat_enumerator_list_comma)
+ << FixItHint::CreateRemoval(CommaLoc);
+ }
+ }
+
+ // Eat the }.
+ T.consumeClose();
+
+ // If attributes exist after the identifier list, parse them.
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+
+ Actions.ActOnEnumBody(StartLoc, T.getOpenLocation(), T.getCloseLocation(),
+ EnumDecl, EnumConstantDecls.data(),
+ EnumConstantDecls.size(), getCurScope(),
+ attrs.getList());
+
+ EnumScope.Exit();
+ Actions.ActOnTagFinishDefinition(getCurScope(), EnumDecl,
+ T.getCloseLocation());
+}
+
+/// isTypeSpecifierQualifier - Return true if the current token could be the
+/// start of a type-qualifier-list.
+bool Parser::isTypeQualifier() const {
+ switch (Tok.getKind()) {
+ default: return false;
+
+ // type-qualifier only in OpenCL
+ case tok::kw_private:
+ return getLangOpts().OpenCL;
+
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+ case tok::kw___private:
+ case tok::kw___local:
+ case tok::kw___global:
+ case tok::kw___constant:
+ case tok::kw___read_only:
+ case tok::kw___read_write:
+ case tok::kw___write_only:
+ return true;
+ }
+}
+
+/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
+/// is definitely a type-specifier. Return false if it isn't part of a type
+/// specifier or if we're not sure.
+bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
+ switch (Tok.getKind()) {
+ default: return false;
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___vector:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // typedef-name
+ case tok::annot_typename:
+ return true;
+ }
+}
+
+/// isTypeSpecifierQualifier - Return true if the current token could be the
+/// start of a specifier-qualifier-list.
+bool Parser::isTypeSpecifierQualifier() {
+ switch (Tok.getKind()) {
+ default: return false;
+
+ case tok::identifier: // foo::bar
+ if (TryAltiVecVectorToken())
+ return true;
+ // Fall through.
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return true;
+ if (Tok.is(tok::identifier))
+ return false;
+ return isTypeSpecifierQualifier();
+
+ case tok::coloncolon: // ::foo::bar
+ if (NextToken().is(tok::kw_new) || // ::new
+ NextToken().is(tok::kw_delete)) // ::delete
+ return false;
+
+ if (TryAnnotateTypeOrScopeToken())
+ return true;
+ return isTypeSpecifierQualifier();
+
+ // GNU attributes support.
+ case tok::kw___attribute:
+ // GNU typeof support.
+ case tok::kw_typeof:
+
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___vector:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+
+ // typedef-name
+ case tok::annot_typename:
+ return true;
+
+ // GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
+ case tok::less:
+ return getLangOpts().ObjC1;
+
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___w64:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___pascal:
+ case tok::kw___unaligned:
+
+ case tok::kw___private:
+ case tok::kw___local:
+ case tok::kw___global:
+ case tok::kw___constant:
+ case tok::kw___read_only:
+ case tok::kw___read_write:
+ case tok::kw___write_only:
+
+ return true;
+
+ case tok::kw_private:
+ return getLangOpts().OpenCL;
+
+ // C11 _Atomic()
+ case tok::kw__Atomic:
+ return true;
+ }
+}
+
+/// isDeclarationSpecifier() - Return true if the current token is part of a
+/// declaration specifier.
+///
+/// \param DisambiguatingWithExpression True to indicate that the purpose of
+/// this check is to disambiguate between an expression and a declaration.
+bool Parser::isDeclarationSpecifier(bool DisambiguatingWithExpression) {
+ switch (Tok.getKind()) {
+ default: return false;
+
+ case tok::kw_private:
+ return getLangOpts().OpenCL;
+
+ case tok::identifier: // foo::bar
+ // Unfortunate hack to support "Class.factoryMethod" notation.
+ if (getLangOpts().ObjC1 && NextToken().is(tok::period))
+ return false;
+ if (TryAltiVecVectorToken())
+ return true;
+ // Fall through.
+ case tok::kw_decltype: // decltype(T())::type
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return true;
+ if (Tok.is(tok::identifier))
+ return false;
+
+ // If we're in Objective-C and we have an Objective-C class type followed
+ // by an identifier and then either ':' or ']', in a place where an
+ // expression is permitted, then this is probably a class message send
+ // missing the initial '['. In this case, we won't consider this to be
+ // the start of a declaration.
+ if (DisambiguatingWithExpression &&
+ isStartOfObjCClassMessageMissingOpenBracket())
+ return false;
+
+ return isDeclarationSpecifier();
+
+ case tok::coloncolon: // ::foo::bar
+ if (NextToken().is(tok::kw_new) || // ::new
+ NextToken().is(tok::kw_delete)) // ::delete
+ return false;
+
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return true;
+ return isDeclarationSpecifier();
+
+ // storage-class-specifier
+ case tok::kw_typedef:
+ case tok::kw_extern:
+ case tok::kw___private_extern__:
+ case tok::kw_static:
+ case tok::kw_auto:
+ case tok::kw_register:
+ case tok::kw___thread:
+
+ // Modules
+ case tok::kw___module_private__:
+
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___vector:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // type-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+ case tok::kw_restrict:
+
+ // function-specifier
+ case tok::kw_inline:
+ case tok::kw_virtual:
+ case tok::kw_explicit:
+
+ // static_assert-declaration
+ case tok::kw__Static_assert:
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+
+ // GNU attributes.
+ case tok::kw___attribute:
+ return true;
+
+ // C++0x decltype.
+ case tok::annot_decltype:
+ return true;
+
+ // C11 _Atomic()
+ case tok::kw__Atomic:
+ return true;
+
+ // GNU ObjC bizarre protocol extension: <proto1,proto2> with implicit 'id'.
+ case tok::less:
+ return getLangOpts().ObjC1;
+
+ // typedef-name
+ case tok::annot_typename:
+ return !DisambiguatingWithExpression ||
+ !isStartOfObjCClassMessageMissingOpenBracket();
+
+ case tok::kw___declspec:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___w64:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___forceinline:
+ case tok::kw___pascal:
+ case tok::kw___unaligned:
+
+ case tok::kw___private:
+ case tok::kw___local:
+ case tok::kw___global:
+ case tok::kw___constant:
+ case tok::kw___read_only:
+ case tok::kw___read_write:
+ case tok::kw___write_only:
+
+ return true;
+ }
+}
+
+bool Parser::isConstructorDeclarator() {
+ TentativeParsingAction TPA(*this);
+
+ // Parse the C++ scope specifier.
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/true)) {
+ TPA.Revert();
+ return false;
+ }
+
+ // Parse the constructor name.
+ if (Tok.is(tok::identifier) || Tok.is(tok::annot_template_id)) {
+ // We already know that we have a constructor name; just consume
+ // the token.
+ ConsumeToken();
+ } else {
+ TPA.Revert();
+ return false;
+ }
+
+ // Current class name must be followed by a left parenthesis.
+ if (Tok.isNot(tok::l_paren)) {
+ TPA.Revert();
+ return false;
+ }
+ ConsumeParen();
+
+ // A right parenthesis, or ellipsis followed by a right parenthesis signals
+ // that we have a constructor.
+ if (Tok.is(tok::r_paren) ||
+ (Tok.is(tok::ellipsis) && NextToken().is(tok::r_paren))) {
+ TPA.Revert();
+ return true;
+ }
+
+ // If we need to, enter the specified scope.
+ DeclaratorScopeObj DeclScopeObj(*this, SS);
+ if (SS.isSet() && Actions.ShouldEnterDeclaratorScope(getCurScope(), SS))
+ DeclScopeObj.EnterDeclaratorScope();
+
+ // Optionally skip Microsoft attributes.
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseMicrosoftAttributes(Attrs);
+
+ // Check whether the next token(s) are part of a declaration
+ // specifier, in which case we have the start of a parameter and,
+ // therefore, we know that this is a constructor.
+ bool IsConstructor = false;
+ if (isDeclarationSpecifier())
+ IsConstructor = true;
+ else if (Tok.is(tok::identifier) ||
+ (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier))) {
+ // We've seen "C ( X" or "C ( X::Y", but "X" / "X::Y" is not a type.
+ // This might be a parenthesized member name, but is more likely to
+ // be a constructor declaration with an invalid argument type. Keep
+ // looking.
+ if (Tok.is(tok::annot_cxxscope))
+ ConsumeToken();
+ ConsumeToken();
+
+ // If this is not a constructor, we must be parsing a declarator,
+ // which must have one of the following syntactic forms (see the
+ // grammar extract at the start of ParseDirectDeclarator):
+ switch (Tok.getKind()) {
+ case tok::l_paren:
+ // C(X ( int));
+ case tok::l_square:
+ // C(X [ 5]);
+ // C(X [ [attribute]]);
+ case tok::coloncolon:
+ // C(X :: Y);
+ // C(X :: *p);
+ case tok::r_paren:
+ // C(X )
+ // Assume this isn't a constructor, rather than assuming it's a
+ // constructor with an unnamed parameter of an ill-formed type.
+ break;
+
+ default:
+ IsConstructor = true;
+ break;
+ }
+ }
+
+ TPA.Revert();
+ return IsConstructor;
+}
+
+/// ParseTypeQualifierListOpt
+/// type-qualifier-list: [C99 6.7.5]
+/// type-qualifier
+/// [vendor] attributes
+/// [ only if VendorAttributesAllowed=true ]
+/// type-qualifier-list type-qualifier
+/// [vendor] type-qualifier-list attributes
+/// [ only if VendorAttributesAllowed=true ]
+/// [C++0x] attribute-specifier[opt] is allowed before cv-qualifier-seq
+/// [ only if CXX0XAttributesAllowed=true ]
+/// Note: vendor can be GNU, MS, etc.
+///
+void Parser::ParseTypeQualifierListOpt(DeclSpec &DS,
+ bool VendorAttributesAllowed,
+ bool CXX11AttributesAllowed) {
+ if (getLangOpts().CPlusPlus0x && CXX11AttributesAllowed &&
+ isCXX11AttributeSpecifier()) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ ParseCXX11Attributes(attrs);
+ DS.takeAttributesFrom(attrs);
+ }
+
+ SourceLocation EndLoc;
+
+ while (1) {
+ bool isInvalid = false;
+ const char *PrevSpec = 0;
+ unsigned DiagID = 0;
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ case tok::code_completion:
+ Actions.CodeCompleteTypeQualifiers(DS);
+ return cutOffParsing();
+
+ case tok::kw_const:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_const , Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw_volatile:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_volatile, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+ case tok::kw_restrict:
+ isInvalid = DS.SetTypeQual(DeclSpec::TQ_restrict, Loc, PrevSpec, DiagID,
+ getLangOpts());
+ break;
+
+ // OpenCL qualifiers:
+ case tok::kw_private:
+ if (!getLangOpts().OpenCL)
+ goto DoneWithTypeQuals;
+ case tok::kw___private:
+ case tok::kw___global:
+ case tok::kw___local:
+ case tok::kw___constant:
+ case tok::kw___read_only:
+ case tok::kw___write_only:
+ case tok::kw___read_write:
+ ParseOpenCLQualifiers(DS);
+ break;
+
+ case tok::kw___w64:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___unaligned:
+ if (VendorAttributesAllowed) {
+ ParseMicrosoftTypeAttributes(DS.getAttributes());
+ continue;
+ }
+ goto DoneWithTypeQuals;
+ case tok::kw___pascal:
+ if (VendorAttributesAllowed) {
+ ParseBorlandTypeAttributes(DS.getAttributes());
+ continue;
+ }
+ goto DoneWithTypeQuals;
+ case tok::kw___attribute:
+ if (VendorAttributesAllowed) {
+ ParseGNUAttributes(DS.getAttributes());
+ continue; // do *not* consume the next token!
+ }
+ // otherwise, FALL THROUGH!
+ default:
+ DoneWithTypeQuals:
+ // If this is not a type-qualifier token, we're done reading type
+ // qualifiers. First verify that DeclSpec's are consistent.
+ DS.Finish(Diags, PP);
+ if (EndLoc.isValid())
+ DS.SetRangeEnd(EndLoc);
+ return;
+ }
+
+ // If the specifier combination wasn't legal, issue a diagnostic.
+ if (isInvalid) {
+ assert(PrevSpec && "Method did not return previous specifier!");
+ Diag(Tok, DiagID) << PrevSpec;
+ }
+ EndLoc = ConsumeToken();
+ }
+}
+
+
+/// ParseDeclarator - Parse and verify a newly-initialized declarator.
+///
+void Parser::ParseDeclarator(Declarator &D) {
+ /// This implements the 'declarator' production in the C grammar, then checks
+ /// for well-formedness and issues diagnostics.
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+}
+
+static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang) {
+ if (Kind == tok::star || Kind == tok::caret)
+ return true;
+
+ // We parse rvalue refs in C++03, because otherwise the errors are scary.
+ if (!Lang.CPlusPlus)
+ return false;
+
+ return Kind == tok::amp || Kind == tok::ampamp;
+}
+
+/// ParseDeclaratorInternal - Parse a C or C++ declarator. The direct-declarator
+/// is parsed by the function passed to it. Pass null, and the direct-declarator
+/// isn't parsed at all, making this function effectively parse the C++
+/// ptr-operator production.
+///
+/// If the grammar of this construct is extended, matching changes must also be
+/// made to TryParseDeclarator and MightBeDeclarator, and possibly to
+/// isConstructorDeclarator.
+///
+/// declarator: [C99 6.7.5] [C++ 8p4, dcl.decl]
+/// [C] pointer[opt] direct-declarator
+/// [C++] direct-declarator
+/// [C++] ptr-operator declarator
+///
+/// pointer: [C99 6.7.5]
+/// '*' type-qualifier-list[opt]
+/// '*' type-qualifier-list[opt] pointer
+///
+/// ptr-operator:
+/// '*' cv-qualifier-seq[opt]
+/// '&'
+/// [C++0x] '&&'
+/// [GNU] '&' restrict[opt] attributes[opt]
+/// [GNU?] '&&' restrict[opt] attributes[opt]
+/// '::'[opt] nested-name-specifier '*' cv-qualifier-seq[opt]
+void Parser::ParseDeclaratorInternal(Declarator &D,
+ DirectDeclParseFunction DirectDeclParser) {
+ if (Diags.hasAllExtensionsSilenced())
+ D.setExtension();
+
+ // C++ member pointers start with a '::' or a nested-name.
+ // Member pointers get special handling, since there's no place for the
+ // scope spec in the generic path below.
+ if (getLangOpts().CPlusPlus &&
+ (Tok.is(tok::coloncolon) || Tok.is(tok::identifier) ||
+ Tok.is(tok::annot_cxxscope))) {
+ bool EnteringContext = D.getContext() == Declarator::FileContext ||
+ D.getContext() == Declarator::MemberContext;
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext);
+
+ if (SS.isNotEmpty()) {
+ if (Tok.isNot(tok::star)) {
+ // The scope spec really belongs to the direct-declarator.
+ D.getCXXScopeSpec() = SS;
+ if (DirectDeclParser)
+ (this->*DirectDeclParser)(D);
+ return;
+ }
+
+ SourceLocation Loc = ConsumeToken();
+ D.SetRangeEnd(Loc);
+ DeclSpec DS(AttrFactory);
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
+
+ // Recurse to parse whatever is left.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+
+ // Sema will have to catch (syntactically invalid) pointers into global
+ // scope. It has to catch pointers into namespace scope anyway.
+ D.AddTypeInfo(DeclaratorChunk::getMemberPointer(SS,DS.getTypeQualifiers(),
+ Loc),
+ DS.getAttributes(),
+ /* Don't replace range end. */SourceLocation());
+ return;
+ }
+ }
+
+ tok::TokenKind Kind = Tok.getKind();
+ // Not a pointer, C++ reference, or block.
+ if (!isPtrOperatorToken(Kind, getLangOpts())) {
+ if (DirectDeclParser)
+ (this->*DirectDeclParser)(D);
+ return;
+ }
+
+ // Otherwise, '*' -> pointer, '^' -> block, '&' -> lvalue reference,
+ // '&&' -> rvalue reference
+ SourceLocation Loc = ConsumeToken(); // Eat the *, ^, & or &&.
+ D.SetRangeEnd(Loc);
+
+ if (Kind == tok::star || Kind == tok::caret) {
+ // Is a pointer.
+ DeclSpec DS(AttrFactory);
+
+ // FIXME: GNU attributes are not allowed here in a new-type-id.
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
+
+ // Recursively parse the declarator.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+ if (Kind == tok::star)
+ // Remember that we parsed a pointer type, and remember the type-quals.
+ D.AddTypeInfo(DeclaratorChunk::getPointer(DS.getTypeQualifiers(), Loc,
+ DS.getConstSpecLoc(),
+ DS.getVolatileSpecLoc(),
+ DS.getRestrictSpecLoc()),
+ DS.getAttributes(),
+ SourceLocation());
+ else
+ // Remember that we parsed a Block type, and remember the type-quals.
+ D.AddTypeInfo(DeclaratorChunk::getBlockPointer(DS.getTypeQualifiers(),
+ Loc),
+ DS.getAttributes(),
+ SourceLocation());
+ } else {
+ // Is a reference
+ DeclSpec DS(AttrFactory);
+
+ // Complain about rvalue references in C++03, but then go on and build
+ // the declarator.
+ if (Kind == tok::ampamp)
+ Diag(Loc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_rvalue_reference :
+ diag::ext_rvalue_reference);
+
+ // GNU-style and C++11 attributes are allowed here, as is restrict.
+ ParseTypeQualifierListOpt(DS);
+ D.ExtendWithDeclSpec(DS);
+
+ // C++ 8.3.2p1: cv-qualified references are ill-formed except when the
+ // cv-qualifiers are introduced through the use of a typedef or of a
+ // template type argument, in which case the cv-qualifiers are ignored.
+ if (DS.getTypeQualifiers() != DeclSpec::TQ_unspecified) {
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(DS.getConstSpecLoc(),
+ diag::err_invalid_reference_qualifier_application) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
+ Diag(DS.getVolatileSpecLoc(),
+ diag::err_invalid_reference_qualifier_application) << "volatile";
+ }
+
+ // Recursively parse the declarator.
+ ParseDeclaratorInternal(D, DirectDeclParser);
+
+ if (D.getNumTypeObjects() > 0) {
+ // C++ [dcl.ref]p4: There shall be no references to references.
+ DeclaratorChunk& InnerChunk = D.getTypeObject(D.getNumTypeObjects() - 1);
+ if (InnerChunk.Kind == DeclaratorChunk::Reference) {
+ if (const IdentifierInfo *II = D.getIdentifier())
+ Diag(InnerChunk.Loc, diag::err_illegal_decl_reference_to_reference)
+ << II;
+ else
+ Diag(InnerChunk.Loc, diag::err_illegal_decl_reference_to_reference)
+ << "type name";
+
+ // Once we've complained about the reference-to-reference, we
+ // can go ahead and build the (technically ill-formed)
+ // declarator: reference collapsing will take care of it.
+ }
+ }
+
+ // Remember that we parsed a reference type. It doesn't have type-quals.
+ D.AddTypeInfo(DeclaratorChunk::getReference(DS.getTypeQualifiers(), Loc,
+ Kind == tok::amp),
+ DS.getAttributes(),
+ SourceLocation());
+ }
+}
+
+static void diagnoseMisplacedEllipsis(Parser &P, Declarator &D,
+ SourceLocation EllipsisLoc) {
+ if (EllipsisLoc.isValid()) {
+ FixItHint Insertion;
+ if (!D.getEllipsisLoc().isValid()) {
+ Insertion = FixItHint::CreateInsertion(D.getIdentifierLoc(), "...");
+ D.setEllipsisLoc(EllipsisLoc);
+ }
+ P.Diag(EllipsisLoc, diag::err_misplaced_ellipsis_in_declaration)
+ << FixItHint::CreateRemoval(EllipsisLoc) << Insertion << !D.hasName();
+ }
+}
+
+/// ParseDirectDeclarator
+/// direct-declarator: [C99 6.7.5]
+/// [C99] identifier
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+/// [C90] direct-declarator '[' constant-expression[opt] ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] assignment-expr[opt] ']'
+/// [C99] direct-declarator '[' 'static' type-qual-list[opt] assign-expr ']'
+/// [C99] direct-declarator '[' type-qual-list 'static' assignment-expr ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] '*' ']'
+/// [C++11] direct-declarator '[' constant-expression[opt] ']'
+/// attribute-specifier-seq[opt]
+/// direct-declarator '(' parameter-type-list ')'
+/// direct-declarator '(' identifier-list[opt] ')'
+/// [GNU] direct-declarator '(' parameter-forward-declarations
+/// parameter-type-list[opt] ')'
+/// [C++] direct-declarator '(' parameter-declaration-clause ')'
+/// cv-qualifier-seq[opt] exception-specification[opt]
+/// [C++11] direct-declarator '(' parameter-declaration-clause ')'
+/// attribute-specifier-seq[opt] cv-qualifier-seq[opt]
+/// ref-qualifier[opt] exception-specification[opt]
+/// [C++] declarator-id
+/// [C++11] declarator-id attribute-specifier-seq[opt]
+///
+/// declarator-id: [C++ 8]
+/// '...'[opt] id-expression
+/// '::'[opt] nested-name-specifier[opt] type-name
+///
+/// id-expression: [C++ 5.1]
+/// unqualified-id
+/// qualified-id
+///
+/// unqualified-id: [C++ 5.1]
+/// identifier
+/// operator-function-id
+/// conversion-function-id
+/// '~' class-name
+/// template-id
+///
+/// Note, any additional constructs added here may need corresponding changes
+/// in isConstructorDeclarator.
+void Parser::ParseDirectDeclarator(Declarator &D) {
+ DeclaratorScopeObj DeclScopeObj(*this, D.getCXXScopeSpec());
+
+ if (getLangOpts().CPlusPlus && D.mayHaveIdentifier()) {
+ // ParseDeclaratorInternal might already have parsed the scope.
+ if (D.getCXXScopeSpec().isEmpty()) {
+ bool EnteringContext = D.getContext() == Declarator::FileContext ||
+ D.getContext() == Declarator::MemberContext;
+ ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), ParsedType(),
+ EnteringContext);
+ }
+
+ if (D.getCXXScopeSpec().isValid()) {
+ if (Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec()))
+ // Change the declaration context for name lookup, until this function
+ // is exited (and the declarator has been parsed).
+ DeclScopeObj.EnterDeclaratorScope();
+ }
+
+ // C++0x [dcl.fct]p14:
+ // There is a syntactic ambiguity when an ellipsis occurs at the end
+ // of a parameter-declaration-clause without a preceding comma. In
+ // this case, the ellipsis is parsed as part of the
+ // abstract-declarator if the type of the parameter names a template
+ // parameter pack that has not been expanded; otherwise, it is parsed
+ // as part of the parameter-declaration-clause.
+ if (Tok.is(tok::ellipsis) && D.getCXXScopeSpec().isEmpty() &&
+ !((D.getContext() == Declarator::PrototypeContext ||
+ D.getContext() == Declarator::BlockLiteralContext) &&
+ NextToken().is(tok::r_paren) &&
+ !Actions.containsUnexpandedParameterPacks(D))) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ if (isPtrOperatorToken(Tok.getKind(), getLangOpts())) {
+ // The ellipsis was put in the wrong place. Recover, and explain to
+ // the user what they should have done.
+ ParseDeclarator(D);
+ diagnoseMisplacedEllipsis(*this, D, EllipsisLoc);
+ return;
+ } else
+ D.setEllipsisLoc(EllipsisLoc);
+
+ // The ellipsis can't be followed by a parenthesized declarator. We
+ // check for that in ParseParenDeclarator, after we have disambiguated
+ // the l_paren token.
+ }
+
+ if (Tok.is(tok::identifier) || Tok.is(tok::kw_operator) ||
+ Tok.is(tok::annot_template_id) || Tok.is(tok::tilde)) {
+ // We found something that indicates the start of an unqualified-id.
+ // Parse that unqualified-id.
+ bool AllowConstructorName;
+ if (D.getDeclSpec().hasTypeSpecifier())
+ AllowConstructorName = false;
+ else if (D.getCXXScopeSpec().isSet())
+ AllowConstructorName =
+ (D.getContext() == Declarator::FileContext ||
+ (D.getContext() == Declarator::MemberContext &&
+ D.getDeclSpec().isFriendSpecified()));
+ else
+ AllowConstructorName = (D.getContext() == Declarator::MemberContext);
+
+ SourceLocation TemplateKWLoc;
+ if (ParseUnqualifiedId(D.getCXXScopeSpec(),
+ /*EnteringContext=*/true,
+ /*AllowDestructorName=*/true,
+ AllowConstructorName,
+ ParsedType(),
+ TemplateKWLoc,
+ D.getName()) ||
+ // Once we're past the identifier, if the scope was bad, mark the
+ // whole declarator bad.
+ D.getCXXScopeSpec().isInvalid()) {
+ D.SetIdentifier(0, Tok.getLocation());
+ D.setInvalidType(true);
+ } else {
+ // Parsed the unqualified-id; update range information and move along.
+ if (D.getSourceRange().getBegin().isInvalid())
+ D.SetRangeBegin(D.getName().getSourceRange().getBegin());
+ D.SetRangeEnd(D.getName().getSourceRange().getEnd());
+ }
+ goto PastIdentifier;
+ }
+ } else if (Tok.is(tok::identifier) && D.mayHaveIdentifier()) {
+ assert(!getLangOpts().CPlusPlus &&
+ "There's a C++-specific check for tok::identifier above");
+ assert(Tok.getIdentifierInfo() && "Not an identifier?");
+ D.SetIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken();
+ goto PastIdentifier;
+ }
+
+ if (Tok.is(tok::l_paren)) {
+ // direct-declarator: '(' declarator ')'
+ // direct-declarator: '(' attributes declarator ')'
+ // Example: 'char (*X)' or 'int (*XX)(void)'
+ ParseParenDeclarator(D);
+
+ // If the declarator was parenthesized, we entered the declarator
+ // scope when parsing the parenthesized declarator, then exited
+ // the scope already. Re-enter the scope, if we need to.
+ if (D.getCXXScopeSpec().isSet()) {
+ // If there was an error parsing parenthesized declarator, declarator
+ // scope may have been entered before. Don't do it again.
+ if (!D.isInvalidType() &&
+ Actions.ShouldEnterDeclaratorScope(getCurScope(), D.getCXXScopeSpec()))
+ // Change the declaration context for name lookup, until this function
+ // is exited (and the declarator has been parsed).
+ DeclScopeObj.EnterDeclaratorScope();
+ }
+ } else if (D.mayOmitIdentifier()) {
+ // This could be something simple like "int" (in which case the declarator
+ // portion is empty), if an abstract-declarator is allowed.
+ D.SetIdentifier(0, Tok.getLocation());
+ } else {
+ if (D.getContext() == Declarator::MemberContext)
+ Diag(Tok, diag::err_expected_member_name_or_semi)
+ << D.getDeclSpec().getSourceRange();
+ else if (getLangOpts().CPlusPlus)
+ Diag(Tok, diag::err_expected_unqualified_id) << getLangOpts().CPlusPlus;
+ else
+ Diag(Tok, diag::err_expected_ident_lparen);
+ D.SetIdentifier(0, Tok.getLocation());
+ D.setInvalidType(true);
+ }
+
+ PastIdentifier:
+ assert(D.isPastIdentifier() &&
+ "Haven't past the location of the identifier yet?");
+
+ // Don't parse attributes unless we have parsed an unparenthesized name.
+ if (D.hasName() && !D.getNumTypeObjects())
+ MaybeParseCXX0XAttributes(D);
+
+ while (1) {
+ if (Tok.is(tok::l_paren)) {
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+ // The paren may be part of a C++ direct initializer, eg. "int x(1);".
+ // In such a case, check if we actually have a function declarator; if it
+ // is not, the declarator has been fully parsed.
+ if (getLangOpts().CPlusPlus && D.mayBeFollowedByCXXDirectInit()) {
+ // When not in file scope, warn for ambiguous function declarators, just
+ // in case the author intended it as a variable definition.
+ bool warnIfAmbiguous = D.getContext() != Declarator::FileContext;
+ if (!isCXXFunctionDeclarator(warnIfAmbiguous))
+ break;
+ }
+ ParsedAttributes attrs(AttrFactory);
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ParseFunctionDeclarator(D, attrs, T);
+ PrototypeScope.Exit();
+ } else if (Tok.is(tok::l_square)) {
+ ParseBracketDeclarator(D);
+ } else {
+ break;
+ }
+ }
+}
+
+/// ParseParenDeclarator - We parsed the declarator D up to a paren. This is
+/// only called before the identifier, so these are most likely just grouping
+/// parens for precedence. If we find that these are actually function
+/// parameter parens in an abstract-declarator, we call ParseFunctionDeclarator.
+///
+/// direct-declarator:
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+/// direct-declarator '(' parameter-type-list ')'
+/// direct-declarator '(' identifier-list[opt] ')'
+/// [GNU] direct-declarator '(' parameter-forward-declarations
+/// parameter-type-list[opt] ')'
+///
+void Parser::ParseParenDeclarator(Declarator &D) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ assert(!D.isPastIdentifier() && "Should be called before passing identifier");
+
+ // Eat any attributes before we look at whether this is a grouping or function
+ // declarator paren. If this is a grouping paren, the attribute applies to
+ // the type being built up, for example:
+ // int (__attribute__(()) *x)(long y)
+ // If this ends up not being a grouping paren, the attribute applies to the
+ // first argument, for example:
+ // int (__attribute__(()) int x)
+ // In either case, we need to eat any attributes to be able to determine what
+ // sort of paren this is.
+ //
+ ParsedAttributes attrs(AttrFactory);
+ bool RequiresArg = false;
+ if (Tok.is(tok::kw___attribute)) {
+ ParseGNUAttributes(attrs);
+
+ // We require that the argument list (if this is a non-grouping paren) be
+ // present even if the attribute list was empty.
+ RequiresArg = true;
+ }
+ // Eat any Microsoft extensions.
+ if (Tok.is(tok::kw___cdecl) || Tok.is(tok::kw___stdcall) ||
+ Tok.is(tok::kw___thiscall) || Tok.is(tok::kw___fastcall) ||
+ Tok.is(tok::kw___w64) || Tok.is(tok::kw___ptr64) ||
+ Tok.is(tok::kw___ptr32) || Tok.is(tok::kw___unaligned)) {
+ ParseMicrosoftTypeAttributes(attrs);
+ }
+ // Eat any Borland extensions.
+ if (Tok.is(tok::kw___pascal))
+ ParseBorlandTypeAttributes(attrs);
+
+ // If we haven't past the identifier yet (or where the identifier would be
+ // stored, if this is an abstract declarator), then this is probably just
+ // grouping parens. However, if this could be an abstract-declarator, then
+ // this could also be the start of function arguments (consider 'void()').
+ bool isGrouping;
+
+ if (!D.mayOmitIdentifier()) {
+ // If this can't be an abstract-declarator, this *must* be a grouping
+ // paren, because we haven't seen the identifier yet.
+ isGrouping = true;
+ } else if (Tok.is(tok::r_paren) || // 'int()' is a function.
+ (getLangOpts().CPlusPlus && Tok.is(tok::ellipsis) &&
+ NextToken().is(tok::r_paren)) || // C++ int(...)
+ isDeclarationSpecifier() || // 'int(int)' is a function.
+ isCXX11AttributeSpecifier()) { // 'int([[]]int)' is a function.
+ // This handles C99 6.7.5.3p11: in "typedef int X; void foo(X)", X is
+ // considered to be a type, not a K&R identifier-list.
+ isGrouping = false;
+ } else {
+ // Otherwise, this is a grouping paren, e.g. 'int (*X)' or 'int(X)'.
+ isGrouping = true;
+ }
+
+ // If this is a grouping paren, handle:
+ // direct-declarator: '(' declarator ')'
+ // direct-declarator: '(' attributes declarator ')'
+ if (isGrouping) {
+ SourceLocation EllipsisLoc = D.getEllipsisLoc();
+ D.setEllipsisLoc(SourceLocation());
+
+ bool hadGroupingParens = D.hasGroupingParens();
+ D.setGroupingParens(true);
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+ // Match the ')'.
+ T.consumeClose();
+ D.AddTypeInfo(DeclaratorChunk::getParen(T.getOpenLocation(),
+ T.getCloseLocation()),
+ attrs, T.getCloseLocation());
+
+ D.setGroupingParens(hadGroupingParens);
+
+ // An ellipsis cannot be placed outside parentheses.
+ if (EllipsisLoc.isValid())
+ diagnoseMisplacedEllipsis(*this, D, EllipsisLoc);
+
+ return;
+ }
+
+ // Okay, if this wasn't a grouping paren, it must be the start of a function
+ // argument list. Recognize that this declarator will never have an
+ // identifier (and remember where it would have been), then call into
+ // ParseFunctionDeclarator to handle of argument list.
+ D.SetIdentifier(0, Tok.getLocation());
+
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+ ParseFunctionDeclarator(D, attrs, T, RequiresArg);
+ PrototypeScope.Exit();
+}
+
+/// ParseFunctionDeclarator - We are after the identifier and have parsed the
+/// declarator D up to a paren, which indicates that we are parsing function
+/// arguments.
+///
+/// If FirstArgAttrs is non-null, then the caller parsed those arguments
+/// immediately after the open paren - they should be considered to be the
+/// first argument of a parameter.
+///
+/// If RequiresArg is true, then the first argument of the function is required
+/// to be present and required to not be an identifier list.
+///
+/// For C++, after the parameter-list, it also parses the cv-qualifier-seq[opt],
+/// (C++11) ref-qualifier[opt], exception-specification[opt],
+/// (C++11) attribute-specifier-seq[opt], and (C++11) trailing-return-type[opt].
+///
+/// [C++11] exception-specification:
+/// dynamic-exception-specification
+/// noexcept-specification
+///
+void Parser::ParseFunctionDeclarator(Declarator &D,
+ ParsedAttributes &FirstArgAttrs,
+ BalancedDelimiterTracker &Tracker,
+ bool RequiresArg) {
+ assert(getCurScope()->isFunctionPrototypeScope() &&
+ "Should call from a Function scope");
+ // lparen is already consumed!
+ assert(D.isPastIdentifier() && "Should not call before identifier!");
+
+ // This should be true when the function has typed arguments.
+ // Otherwise, it is treated as a K&R-style function.
+ bool HasProto = false;
+ // Build up an array of information about the parsed arguments.
+ SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ // Remember where we see an ellipsis, if any.
+ SourceLocation EllipsisLoc;
+
+ DeclSpec DS(AttrFactory);
+ bool RefQualifierIsLValueRef = true;
+ SourceLocation RefQualifierLoc;
+ SourceLocation ConstQualifierLoc;
+ SourceLocation VolatileQualifierLoc;
+ ExceptionSpecificationType ESpecType = EST_None;
+ SourceRange ESpecRange;
+ SmallVector<ParsedType, 2> DynamicExceptions;
+ SmallVector<SourceRange, 2> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
+ ParsedAttributes FnAttrs(AttrFactory);
+ ParsedType TrailingReturnType;
+
+ Actions.ActOnStartFunctionDeclarator();
+
+ SourceLocation EndLoc;
+ if (isFunctionDeclaratorIdentifierList()) {
+ if (RequiresArg)
+ Diag(Tok, diag::err_argument_required_after_attribute);
+
+ ParseFunctionDeclaratorIdentifierList(D, ParamInfo);
+
+ Tracker.consumeClose();
+ EndLoc = Tracker.getCloseLocation();
+ } else {
+ if (Tok.isNot(tok::r_paren))
+ ParseParameterDeclarationClause(D, FirstArgAttrs, ParamInfo, EllipsisLoc);
+ else if (RequiresArg)
+ Diag(Tok, diag::err_argument_required_after_attribute);
+
+ HasProto = ParamInfo.size() || getLangOpts().CPlusPlus;
+
+ // If we have the closing ')', eat it.
+ Tracker.consumeClose();
+ EndLoc = Tracker.getCloseLocation();
+
+ if (getLangOpts().CPlusPlus) {
+ // FIXME: Accept these components in any order, and produce fixits to
+ // correct the order if the user gets it wrong. Ideally we should deal
+ // with the virt-specifier-seq and pure-specifier in the same way.
+
+ // Parse cv-qualifier-seq[opt].
+ ParseTypeQualifierListOpt(DS, false /*no attributes*/, false);
+ if (!DS.getSourceRange().getEnd().isInvalid()) {
+ EndLoc = DS.getSourceRange().getEnd();
+ ConstQualifierLoc = DS.getConstSpecLoc();
+ VolatileQualifierLoc = DS.getVolatileSpecLoc();
+ }
+
+ // Parse ref-qualifier[opt].
+ if (Tok.is(tok::amp) || Tok.is(tok::ampamp)) {
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_ref_qualifier :
+ diag::ext_ref_qualifier);
+
+ RefQualifierIsLValueRef = Tok.is(tok::amp);
+ RefQualifierLoc = ConsumeToken();
+ EndLoc = RefQualifierLoc;
+ }
+
+ // Parse exception-specification[opt].
+ ESpecType = MaybeParseExceptionSpecification(ESpecRange,
+ DynamicExceptions,
+ DynamicExceptionRanges,
+ NoexceptExpr);
+ if (ESpecType != EST_None)
+ EndLoc = ESpecRange.getEnd();
+
+ // Parse attribute-specifier-seq[opt]. Per DR 979 and DR 1297, this goes
+ // after the exception-specification.
+ MaybeParseCXX0XAttributes(FnAttrs);
+
+ // Parse trailing-return-type[opt].
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::arrow)) {
+ Diag(Tok, diag::warn_cxx98_compat_trailing_return_type);
+ SourceRange Range;
+ TrailingReturnType = ParseTrailingReturnType(Range).get();
+ if (Range.getEnd().isValid())
+ EndLoc = Range.getEnd();
+ }
+ }
+ }
+
+ // Remember that we parsed a function type, and remember the attributes.
+ D.AddTypeInfo(DeclaratorChunk::getFunction(HasProto,
+ /*isVariadic=*/EllipsisLoc.isValid(),
+ EllipsisLoc,
+ ParamInfo.data(), ParamInfo.size(),
+ DS.getTypeQualifiers(),
+ RefQualifierIsLValueRef,
+ RefQualifierLoc, ConstQualifierLoc,
+ VolatileQualifierLoc,
+ /*MutableLoc=*/SourceLocation(),
+ ESpecType, ESpecRange.getBegin(),
+ DynamicExceptions.data(),
+ DynamicExceptionRanges.data(),
+ DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ?
+ NoexceptExpr.get() : 0,
+ Tracker.getOpenLocation(),
+ EndLoc, D,
+ TrailingReturnType),
+ FnAttrs, EndLoc);
+
+ Actions.ActOnEndFunctionDeclarator();
+}
+
+/// isFunctionDeclaratorIdentifierList - This parameter list may have an
+/// identifier list form for a K&R-style function: void foo(a,b,c)
+///
+/// Note that identifier-lists are only allowed for normal declarators, not for
+/// abstract-declarators.
+bool Parser::isFunctionDeclaratorIdentifierList() {
+ return !getLangOpts().CPlusPlus
+ && Tok.is(tok::identifier)
+ && !TryAltiVecVectorToken()
+ // K&R identifier lists can't have typedefs as identifiers, per C99
+ // 6.7.5.3p11.
+ && (TryAnnotateTypeOrScopeToken() || !Tok.is(tok::annot_typename))
+ // Identifier lists follow a really simple grammar: the identifiers can
+ // be followed *only* by a ", identifier" or ")". However, K&R
+ // identifier lists are really rare in the brave new modern world, and
+ // it is very common for someone to typo a type in a non-K&R style
+ // list. If we are presented with something like: "void foo(intptr x,
+ // float y)", we don't want to start parsing the function declarator as
+ // though it is a K&R style declarator just because intptr is an
+ // invalid type.
+ //
+ // To handle this, we check to see if the token after the first
+ // identifier is a "," or ")". Only then do we parse it as an
+ // identifier list.
+ && (NextToken().is(tok::comma) || NextToken().is(tok::r_paren));
+}
+
+/// ParseFunctionDeclaratorIdentifierList - While parsing a function declarator
+/// we found a K&R-style identifier list instead of a typed parameter list.
+///
+/// After returning, ParamInfo will hold the parsed parameters.
+///
+/// identifier-list: [C99 6.7.5]
+/// identifier
+/// identifier-list ',' identifier
+///
+void Parser::ParseFunctionDeclaratorIdentifierList(
+ Declarator &D,
+ SmallVector<DeclaratorChunk::ParamInfo, 16> &ParamInfo) {
+ // If there was no identifier specified for the declarator, either we are in
+ // an abstract-declarator, or we are in a parameter declarator which was found
+ // to be abstract. In abstract-declarators, identifier lists are not valid:
+ // diagnose this.
+ if (!D.getIdentifier())
+ Diag(Tok, diag::ext_ident_list_in_param);
+
+ // Maintain an efficient lookup of params we have seen so far.
+ llvm::SmallSet<const IdentifierInfo*, 16> ParamsSoFar;
+
+ while (1) {
+ // If this isn't an identifier, report the error and skip until ')'.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ // Forget we parsed anything.
+ ParamInfo.clear();
+ return;
+ }
+
+ IdentifierInfo *ParmII = Tok.getIdentifierInfo();
+
+ // Reject 'typedef int y; int test(x, y)', but continue parsing.
+ if (Actions.getTypeName(*ParmII, Tok.getLocation(), getCurScope()))
+ Diag(Tok, diag::err_unexpected_typedef_ident) << ParmII;
+
+ // Verify that the argument identifier has not already been mentioned.
+ if (!ParamsSoFar.insert(ParmII)) {
+ Diag(Tok, diag::err_param_redefinition) << ParmII;
+ } else {
+ // Remember this identifier in ParamInfo.
+ ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ Tok.getLocation(),
+ 0));
+ }
+
+ // Eat the identifier.
+ ConsumeToken();
+
+ // The list continues if we see a comma.
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken();
+ }
+}
+
+/// ParseParameterDeclarationClause - Parse a (possibly empty) parameter-list
+/// after the opening parenthesis. This function will not parse a K&R-style
+/// identifier list.
+///
+/// D is the declarator being parsed. If FirstArgAttrs is non-null, then the
+/// caller parsed those arguments immediately after the open paren - they should
+/// be considered to be part of the first parameter.
+///
+/// After returning, ParamInfo will hold the parsed parameters. EllipsisLoc will
+/// be the location of the ellipsis, if any was parsed.
+///
+/// parameter-type-list: [C99 6.7.5]
+/// parameter-list
+/// parameter-list ',' '...'
+/// [C++] parameter-list '...'
+///
+/// parameter-list: [C99 6.7.5]
+/// parameter-declaration
+/// parameter-list ',' parameter-declaration
+///
+/// parameter-declaration: [C99 6.7.5]
+/// declaration-specifiers declarator
+/// [C++] declaration-specifiers declarator '=' assignment-expression
+/// [C++11] initializer-clause
+/// [GNU] declaration-specifiers declarator attributes
+/// declaration-specifiers abstract-declarator[opt]
+/// [C++] declaration-specifiers abstract-declarator[opt]
+/// '=' assignment-expression
+/// [GNU] declaration-specifiers abstract-declarator[opt] attributes
+/// [C++11] attribute-specifier-seq parameter-declaration
+///
+void Parser::ParseParameterDeclarationClause(
+ Declarator &D,
+ ParsedAttributes &FirstArgAttrs,
+ SmallVector<DeclaratorChunk::ParamInfo, 16> &ParamInfo,
+ SourceLocation &EllipsisLoc) {
+
+ while (1) {
+ if (Tok.is(tok::ellipsis)) {
+ // FIXME: Issue a diagnostic if we parsed an attribute-specifier-seq
+ // before deciding this was a parameter-declaration-clause.
+ EllipsisLoc = ConsumeToken(); // Consume the ellipsis.
+ break;
+ }
+
+ // Parse the declaration-specifiers.
+ // Just use the ParsingDeclaration "scope" of the declarator.
+ DeclSpec DS(AttrFactory);
+
+ // Parse any C++11 attributes.
+ MaybeParseCXX0XAttributes(DS.getAttributes());
+
+ // Skip any Microsoft attributes before a param.
+ if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
+ ParseMicrosoftAttributes(DS.getAttributes());
+
+ SourceLocation DSStart = Tok.getLocation();
+
+ // If the caller parsed attributes for the first argument, add them now.
+ // Take them so that we only apply the attributes to the first parameter.
+ // FIXME: If we can leave the attributes in the token stream somehow, we can
+ // get rid of a parameter (FirstArgAttrs) and this statement. It might be
+ // too much hassle.
+ DS.takeAttributesFrom(FirstArgAttrs);
+
+ ParseDeclarationSpecifiers(DS);
+
+ // Parse the declarator. This is "PrototypeContext", because we must
+ // accept either 'declarator' or 'abstract-declarator' here.
+ Declarator ParmDecl(DS, Declarator::PrototypeContext);
+ ParseDeclarator(ParmDecl);
+
+ // Parse GNU attributes, if present.
+ MaybeParseGNUAttributes(ParmDecl);
+
+ // Remember this parsed parameter in ParamInfo.
+ IdentifierInfo *ParmII = ParmDecl.getIdentifier();
+
+ // DefArgToks is used when the parsing of default arguments needs
+ // to be delayed.
+ CachedTokens *DefArgToks = 0;
+
+ // If no parameter was specified, verify that *something* was specified,
+ // otherwise we have a missing type and identifier.
+ if (DS.isEmpty() && ParmDecl.getIdentifier() == 0 &&
+ ParmDecl.getNumTypeObjects() == 0) {
+ // Completely missing, emit error.
+ Diag(DSStart, diag::err_missing_param);
+ } else {
+ // Otherwise, we have something. Add it and let semantic analysis try
+ // to grok it and add the result to the ParamInfo we are building.
+
+ // Inform the actions module about the parameter declarator, so it gets
+ // added to the current scope.
+ Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
+
+ // Parse the default argument, if any. We parse the default
+ // arguments in all dialects; the semantic analysis in
+ // ActOnParamDefaultArgument will reject the default argument in
+ // C.
+ if (Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = Tok.getLocation();
+
+ // Parse the default argument
+ if (D.getContext() == Declarator::MemberContext) {
+ // If we're inside a class definition, cache the tokens
+ // corresponding to the default argument. We'll actually parse
+ // them when we see the end of the class definition.
+ // FIXME: Templates will require something similar.
+ // FIXME: Can we use a smart pointer for Toks?
+ DefArgToks = new CachedTokens;
+
+ if (!ConsumeAndStoreUntil(tok::comma, tok::r_paren, *DefArgToks,
+ /*StopAtSemi=*/true,
+ /*ConsumeFinalToken=*/false)) {
+ delete DefArgToks;
+ DefArgToks = 0;
+ Actions.ActOnParamDefaultArgumentError(Param);
+ } else {
+ // Mark the end of the default argument so that we know when to
+ // stop when we parse it later on.
+ Token DefArgEnd;
+ DefArgEnd.startToken();
+ DefArgEnd.setKind(tok::cxx_defaultarg_end);
+ DefArgEnd.setLocation(Tok.getLocation());
+ DefArgToks->push_back(DefArgEnd);
+ Actions.ActOnParamUnparsedDefaultArgument(Param, EqualLoc,
+ (*DefArgToks)[1].getLocation());
+ }
+ } else {
+ // Consume the '='.
+ ConsumeToken();
+
+ // The argument isn't actually potentially evaluated unless it is
+ // used.
+ EnterExpressionEvaluationContext Eval(Actions,
+ Sema::PotentiallyEvaluatedIfUsed,
+ Param);
+
+ ExprResult DefArgResult;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ DefArgResult = ParseBraceInitializer();
+ } else
+ DefArgResult = ParseAssignmentExpression();
+ if (DefArgResult.isInvalid()) {
+ Actions.ActOnParamDefaultArgumentError(Param);
+ SkipUntil(tok::comma, tok::r_paren, true, true);
+ } else {
+ // Inform the actions module about the default argument
+ Actions.ActOnParamDefaultArgument(Param, EqualLoc,
+ DefArgResult.take());
+ }
+ }
+ }
+
+ ParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ ParmDecl.getIdentifierLoc(), Param,
+ DefArgToks));
+ }
+
+ // If the next token is a comma, consume it and keep reading arguments.
+ if (Tok.isNot(tok::comma)) {
+ if (Tok.is(tok::ellipsis)) {
+ EllipsisLoc = ConsumeToken(); // Consume the ellipsis.
+
+ if (!getLangOpts().CPlusPlus) {
+ // We have ellipsis without a preceding ',', which is ill-formed
+ // in C. Complain and provide the fix.
+ Diag(EllipsisLoc, diag::err_missing_comma_before_ellipsis)
+ << FixItHint::CreateInsertion(EllipsisLoc, ", ");
+ }
+ }
+
+ break;
+ }
+
+ // Consume the comma.
+ ConsumeToken();
+ }
+
+}
+
+/// [C90] direct-declarator '[' constant-expression[opt] ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] assignment-expr[opt] ']'
+/// [C99] direct-declarator '[' 'static' type-qual-list[opt] assign-expr ']'
+/// [C99] direct-declarator '[' type-qual-list 'static' assignment-expr ']'
+/// [C99] direct-declarator '[' type-qual-list[opt] '*' ']'
+/// [C++11] direct-declarator '[' constant-expression[opt] ']'
+/// attribute-specifier-seq[opt]
+void Parser::ParseBracketDeclarator(Declarator &D) {
+ if (CheckProhibitedCXX11Attribute())
+ return;
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ // C array syntax has many features, but by-far the most common is [] and [4].
+ // This code does a fast path to handle some of the most obvious cases.
+ if (Tok.getKind() == tok::r_square) {
+ T.consumeClose();
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+
+ // Remember that we parsed the empty array type.
+ ExprResult NumElements;
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, false, false, 0,
+ T.getOpenLocation(),
+ T.getCloseLocation()),
+ attrs, T.getCloseLocation());
+ return;
+ } else if (Tok.getKind() == tok::numeric_constant &&
+ GetLookAheadToken(1).is(tok::r_square)) {
+ // [4] is very common. Parse the numeric constant expression.
+ ExprResult ExprRes(Actions.ActOnNumericConstant(Tok, getCurScope()));
+ ConsumeToken();
+
+ T.consumeClose();
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+
+ // Remember that we parsed a array type, and remember its features.
+ D.AddTypeInfo(DeclaratorChunk::getArray(0, false, 0,
+ ExprRes.release(),
+ T.getOpenLocation(),
+ T.getCloseLocation()),
+ attrs, T.getCloseLocation());
+ return;
+ }
+
+ // If valid, this location is the position where we read the 'static' keyword.
+ SourceLocation StaticLoc;
+ if (Tok.is(tok::kw_static))
+ StaticLoc = ConsumeToken();
+
+ // If there is a type-qualifier-list, read it now.
+ // Type qualifiers in an array subscript are a C99 feature.
+ DeclSpec DS(AttrFactory);
+ ParseTypeQualifierListOpt(DS, false /*no attributes*/);
+
+ // If we haven't already read 'static', check to see if there is one after the
+ // type-qualifier-list.
+ if (!StaticLoc.isValid() && Tok.is(tok::kw_static))
+ StaticLoc = ConsumeToken();
+
+ // Handle "direct-declarator [ type-qual-list[opt] * ]".
+ bool isStar = false;
+ ExprResult NumElements;
+
+ // Handle the case where we have '[*]' as the array size. However, a leading
+ // star could be the start of an expression, for example 'X[*p + 4]'. Verify
+ // the the token after the star is a ']'. Since stars in arrays are
+ // infrequent, use of lookahead is not costly here.
+ if (Tok.is(tok::star) && GetLookAheadToken(1).is(tok::r_square)) {
+ ConsumeToken(); // Eat the '*'.
+
+ if (StaticLoc.isValid()) {
+ Diag(StaticLoc, diag::err_unspecified_vla_size_with_static);
+ StaticLoc = SourceLocation(); // Drop the static.
+ }
+ isStar = true;
+ } else if (Tok.isNot(tok::r_square)) {
+ // Note, in C89, this production uses the constant-expr production instead
+ // of assignment-expr. The only difference is that assignment-expr allows
+ // things like '=' and '*='. Sema rejects these in C89 mode because they
+ // are not i-c-e's, so we don't need to distinguish between the two here.
+
+ // Parse the constant-expression or assignment-expression now (depending
+ // on dialect).
+ if (getLangOpts().CPlusPlus) {
+ NumElements = ParseConstantExpression();
+ } else {
+ EnterExpressionEvaluationContext Unevaluated(Actions,
+ Sema::ConstantEvaluated);
+ NumElements = ParseAssignmentExpression();
+ }
+ }
+
+ // If there was an error parsing the assignment-expression, recover.
+ if (NumElements.isInvalid()) {
+ D.setInvalidType(true);
+ // If the expression was invalid, skip it.
+ SkipUntil(tok::r_square);
+ return;
+ }
+
+ T.consumeClose();
+
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+
+ // Remember that we parsed a array type, and remember its features.
+ D.AddTypeInfo(DeclaratorChunk::getArray(DS.getTypeQualifiers(),
+ StaticLoc.isValid(), isStar,
+ NumElements.release(),
+ T.getOpenLocation(),
+ T.getCloseLocation()),
+ attrs, T.getCloseLocation());
+}
+
+/// [GNU] typeof-specifier:
+/// typeof ( expressions )
+/// typeof ( type-name )
+/// [GNU/C++] typeof unary-expression
+///
+void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
+ assert(Tok.is(tok::kw_typeof) && "Not a typeof specifier");
+ Token OpTok = Tok;
+ SourceLocation StartLoc = ConsumeToken();
+
+ const bool hasParens = Tok.is(tok::l_paren);
+
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+
+ bool isCastExpr;
+ ParsedType CastTy;
+ SourceRange CastRange;
+ ExprResult Operand = ParseExprAfterUnaryExprOrTypeTrait(OpTok, isCastExpr,
+ CastTy, CastRange);
+ if (hasParens)
+ DS.setTypeofParensRange(CastRange);
+
+ if (CastRange.getEnd().isInvalid())
+ // FIXME: Not accurate, the range gets one token more than it should.
+ DS.SetRangeEnd(Tok.getLocation());
+ else
+ DS.SetRangeEnd(CastRange.getEnd());
+
+ if (isCastExpr) {
+ if (!CastTy) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ // Check for duplicate type specifiers (e.g. "int typeof(int)").
+ if (DS.SetTypeSpecType(DeclSpec::TST_typeofType, StartLoc, PrevSpec,
+ DiagID, CastTy))
+ Diag(StartLoc, DiagID) << PrevSpec;
+ return;
+ }
+
+ // If we get here, the operand to the typeof was an expresion.
+ if (Operand.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ // We might need to transform the operand if it is potentially evaluated.
+ Operand = Actions.HandleExprEvaluationContextForTypeof(Operand.get());
+ if (Operand.isInvalid()) {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ // Check for duplicate type specifiers (e.g. "int typeof(int)").
+ if (DS.SetTypeSpecType(DeclSpec::TST_typeofExpr, StartLoc, PrevSpec,
+ DiagID, Operand.get()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+}
+
+/// [C11] atomic-specifier:
+/// _Atomic ( type-name )
+///
+void Parser::ParseAtomicSpecifier(DeclSpec &DS) {
+ assert(Tok.is(tok::kw__Atomic) && "Not an atomic specifier");
+
+ SourceLocation StartLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "_Atomic")) {
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ TypeResult Result = ParseTypeName();
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ // Match the ')'
+ T.consumeClose();
+
+ if (T.getCloseLocation().isInvalid())
+ return;
+
+ DS.setTypeofParensRange(T.getRange());
+ DS.SetRangeEnd(T.getCloseLocation());
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ if (DS.SetTypeSpecType(DeclSpec::TST_atomic, StartLoc, PrevSpec,
+ DiagID, Result.release()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+}
+
+
+/// TryAltiVecVectorTokenOutOfLine - Out of line body that should only be called
+/// from TryAltiVecVectorToken.
+bool Parser::TryAltiVecVectorTokenOutOfLine() {
+ Token Next = NextToken();
+ switch (Next.getKind()) {
+ default: return false;
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw___pixel:
+ Tok.setKind(tok::kw___vector);
+ return true;
+ case tok::identifier:
+ if (Next.getIdentifierInfo() == Ident_pixel) {
+ Tok.setKind(tok::kw___vector);
+ return true;
+ }
+ return false;
+ }
+}
+
+bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
+ bool &isInvalid) {
+ if (Tok.getIdentifierInfo() == Ident_vector) {
+ Token Next = NextToken();
+ switch (Next.getKind()) {
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw___pixel:
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+ return true;
+ case tok::identifier:
+ if (Next.getIdentifierInfo() == Ident_pixel) {
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ } else if ((Tok.getIdentifierInfo() == Ident_pixel) &&
+ DS.isTypeAltiVecVector()) {
+ isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID);
+ return true;
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
new file mode 100644
index 0000000..b2a65ff
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseDeclCXX.cpp
@@ -0,0 +1,3013 @@
+//===--- ParseDeclCXX.cpp - C++ Declaration Parsing -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the C++ Declaration portions of the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/OperatorKinds.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "llvm/ADT/SmallString.h"
+#include "RAIIObjectsForParser.h"
+using namespace clang;
+
+/// ParseNamespace - We know that the current token is a namespace keyword. This
+/// may either be a top level namespace or a block-level namespace alias. If
+/// there was an inline keyword, it has already been parsed.
+///
+/// namespace-definition: [C++ 7.3: basic.namespace]
+/// named-namespace-definition
+/// unnamed-namespace-definition
+///
+/// unnamed-namespace-definition:
+/// 'inline'[opt] 'namespace' attributes[opt] '{' namespace-body '}'
+///
+/// named-namespace-definition:
+/// original-namespace-definition
+/// extension-namespace-definition
+///
+/// original-namespace-definition:
+/// 'inline'[opt] 'namespace' identifier attributes[opt]
+/// '{' namespace-body '}'
+///
+/// extension-namespace-definition:
+/// 'inline'[opt] 'namespace' original-namespace-name
+/// '{' namespace-body '}'
+///
+/// namespace-alias-definition: [C++ 7.3.2: namespace.alias]
+/// 'namespace' identifier '=' qualified-namespace-specifier ';'
+///
+Decl *Parser::ParseNamespace(unsigned Context,
+ SourceLocation &DeclEnd,
+ SourceLocation InlineLoc) {
+ assert(Tok.is(tok::kw_namespace) && "Not a namespace!");
+ SourceLocation NamespaceLoc = ConsumeToken(); // eat the 'namespace'.
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteNamespaceDecl(getCurScope());
+ cutOffParsing();
+ return 0;
+ }
+
+ SourceLocation IdentLoc;
+ IdentifierInfo *Ident = 0;
+ std::vector<SourceLocation> ExtraIdentLoc;
+ std::vector<IdentifierInfo*> ExtraIdent;
+ std::vector<SourceLocation> ExtraNamespaceLoc;
+
+ Token attrTok;
+
+ if (Tok.is(tok::identifier)) {
+ Ident = Tok.getIdentifierInfo();
+ IdentLoc = ConsumeToken(); // eat the identifier.
+ while (Tok.is(tok::coloncolon) && NextToken().is(tok::identifier)) {
+ ExtraNamespaceLoc.push_back(ConsumeToken());
+ ExtraIdent.push_back(Tok.getIdentifierInfo());
+ ExtraIdentLoc.push_back(ConsumeToken());
+ }
+ }
+
+ // Read label attributes, if present.
+ ParsedAttributes attrs(AttrFactory);
+ if (Tok.is(tok::kw___attribute)) {
+ attrTok = Tok;
+ ParseGNUAttributes(attrs);
+ }
+
+ if (Tok.is(tok::equal)) {
+ if (!attrs.empty())
+ Diag(attrTok, diag::err_unexpected_namespace_attributes_alias);
+ if (InlineLoc.isValid())
+ Diag(InlineLoc, diag::err_inline_namespace_alias)
+ << FixItHint::CreateRemoval(InlineLoc);
+ return ParseNamespaceAlias(NamespaceLoc, IdentLoc, Ident, DeclEnd);
+ }
+
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ if (T.consumeOpen()) {
+ if (!ExtraIdent.empty()) {
+ Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
+ << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
+ }
+ Diag(Tok, Ident ? diag::err_expected_lbrace :
+ diag::err_expected_ident_lbrace);
+ return 0;
+ }
+
+ if (getCurScope()->isClassScope() || getCurScope()->isTemplateParamScope() ||
+ getCurScope()->isInObjcMethodScope() || getCurScope()->getBlockParent() ||
+ getCurScope()->getFnParent()) {
+ if (!ExtraIdent.empty()) {
+ Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
+ << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
+ }
+ Diag(T.getOpenLocation(), diag::err_namespace_nonnamespace_scope);
+ SkipUntil(tok::r_brace, false);
+ return 0;
+ }
+
+ if (!ExtraIdent.empty()) {
+ TentativeParsingAction TPA(*this);
+ SkipUntil(tok::r_brace, /*StopAtSemi*/false, /*DontConsume*/true);
+ Token rBraceToken = Tok;
+ TPA.Revert();
+
+ if (!rBraceToken.is(tok::r_brace)) {
+ Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
+ << SourceRange(ExtraNamespaceLoc.front(), ExtraIdentLoc.back());
+ } else {
+ std::string NamespaceFix;
+ for (std::vector<IdentifierInfo*>::iterator I = ExtraIdent.begin(),
+ E = ExtraIdent.end(); I != E; ++I) {
+ NamespaceFix += " { namespace ";
+ NamespaceFix += (*I)->getName();
+ }
+
+ std::string RBraces;
+ for (unsigned i = 0, e = ExtraIdent.size(); i != e; ++i)
+ RBraces += "} ";
+
+ Diag(ExtraNamespaceLoc[0], diag::err_nested_namespaces_with_double_colon)
+ << FixItHint::CreateReplacement(SourceRange(ExtraNamespaceLoc.front(),
+ ExtraIdentLoc.back()),
+ NamespaceFix)
+ << FixItHint::CreateInsertion(rBraceToken.getLocation(), RBraces);
+ }
+ }
+
+ // If we're still good, complain about inline namespaces in non-C++0x now.
+ if (InlineLoc.isValid())
+ Diag(InlineLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_inline_namespace : diag::ext_inline_namespace);
+
+ // Enter a scope for the namespace.
+ ParseScope NamespaceScope(this, Scope::DeclScope);
+
+ Decl *NamespcDecl =
+ Actions.ActOnStartNamespaceDef(getCurScope(), InlineLoc, NamespaceLoc,
+ IdentLoc, Ident, T.getOpenLocation(),
+ attrs.getList());
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, NamespcDecl, NamespaceLoc,
+ "parsing namespace");
+
+ // Parse the contents of the namespace. This includes parsing recovery on
+ // any improperly nested namespaces.
+ ParseInnerNamespace(ExtraIdentLoc, ExtraIdent, ExtraNamespaceLoc, 0,
+ InlineLoc, attrs, T);
+
+ // Leave the namespace scope.
+ NamespaceScope.Exit();
+
+ DeclEnd = T.getCloseLocation();
+ Actions.ActOnFinishNamespaceDef(NamespcDecl, DeclEnd);
+
+ return NamespcDecl;
+}
+
+/// ParseInnerNamespace - Parse the contents of a namespace.
+void Parser::ParseInnerNamespace(std::vector<SourceLocation>& IdentLoc,
+ std::vector<IdentifierInfo*>& Ident,
+ std::vector<SourceLocation>& NamespaceLoc,
+ unsigned int index, SourceLocation& InlineLoc,
+ ParsedAttributes& attrs,
+ BalancedDelimiterTracker &Tracker) {
+ if (index == Ident.size()) {
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ ParseExternalDeclaration(attrs);
+ }
+
+ // The caller is what called check -- we are simply calling
+ // the close for it.
+ Tracker.consumeClose();
+
+ return;
+ }
+
+ // Parse improperly nested namespaces.
+ ParseScope NamespaceScope(this, Scope::DeclScope);
+ Decl *NamespcDecl =
+ Actions.ActOnStartNamespaceDef(getCurScope(), SourceLocation(),
+ NamespaceLoc[index], IdentLoc[index],
+ Ident[index], Tracker.getOpenLocation(),
+ attrs.getList());
+
+ ParseInnerNamespace(IdentLoc, Ident, NamespaceLoc, ++index, InlineLoc,
+ attrs, Tracker);
+
+ NamespaceScope.Exit();
+
+ Actions.ActOnFinishNamespaceDef(NamespcDecl, Tracker.getCloseLocation());
+}
+
+/// ParseNamespaceAlias - Parse the part after the '=' in a namespace
+/// alias definition.
+///
+Decl *Parser::ParseNamespaceAlias(SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ SourceLocation &DeclEnd) {
+ assert(Tok.is(tok::equal) && "Not equal token");
+
+ ConsumeToken(); // eat the '='.
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteNamespaceAliasDecl(getCurScope());
+ cutOffParsing();
+ return 0;
+ }
+
+ CXXScopeSpec SS;
+ // Parse (optional) nested-name-specifier.
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ if (SS.isInvalid() || Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_namespace_name);
+ // Skip to end of the definition and eat the ';'.
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ // Parse identifier.
+ IdentifierInfo *Ident = Tok.getIdentifierInfo();
+ SourceLocation IdentLoc = ConsumeToken();
+
+ // Eat the ';'.
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_namespace_name,
+ "", tok::semi);
+
+ return Actions.ActOnNamespaceAliasDef(getCurScope(), NamespaceLoc, AliasLoc, Alias,
+ SS, IdentLoc, Ident);
+}
+
+/// ParseLinkage - We know that the current token is a string_literal
+/// and just before that, that extern was seen.
+///
+/// linkage-specification: [C++ 7.5p2: dcl.link]
+/// 'extern' string-literal '{' declaration-seq[opt] '}'
+/// 'extern' string-literal declaration
+///
+Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, unsigned Context) {
+ assert(Tok.is(tok::string_literal) && "Not a string literal!");
+ SmallString<8> LangBuffer;
+ bool Invalid = false;
+ StringRef Lang = PP.getSpelling(Tok, LangBuffer, &Invalid);
+ if (Invalid)
+ return 0;
+
+ // FIXME: This is incorrect: linkage-specifiers are parsed in translation
+ // phase 7, so string-literal concatenation is supposed to occur.
+ // extern "" "C" "" "+" "+" { } is legal.
+ if (Tok.hasUDSuffix())
+ Diag(Tok, diag::err_invalid_string_udl);
+ SourceLocation Loc = ConsumeStringToken();
+
+ ParseScope LinkageScope(this, Scope::DeclScope);
+ Decl *LinkageSpec
+ = Actions.ActOnStartLinkageSpecification(getCurScope(),
+ DS.getSourceRange().getBegin(),
+ Loc, Lang,
+ Tok.is(tok::l_brace) ? Tok.getLocation()
+ : SourceLocation());
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+
+ if (Tok.isNot(tok::l_brace)) {
+ // Reset the source range in DS, as the leading "extern"
+ // does not really belong to the inner declaration ...
+ DS.SetRangeStart(SourceLocation());
+ DS.SetRangeEnd(SourceLocation());
+ // ... but anyway remember that such an "extern" was seen.
+ DS.setExternInLinkageSpec(true);
+ ParseExternalDeclaration(attrs, &DS);
+ return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec,
+ SourceLocation());
+ }
+
+ DS.abort();
+
+ ProhibitAttributes(attrs);
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ ParseExternalDeclaration(attrs);
+ }
+
+ T.consumeClose();
+ return Actions.ActOnFinishLinkageSpecification(getCurScope(), LinkageSpec,
+ T.getCloseLocation());
+}
+
+/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
+/// using-directive. Assumes that current token is 'using'.
+Decl *Parser::ParseUsingDirectiveOrDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation &DeclEnd,
+ ParsedAttributesWithRange &attrs,
+ Decl **OwnedType) {
+ assert(Tok.is(tok::kw_using) && "Not using token");
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ // Eat 'using'.
+ SourceLocation UsingLoc = ConsumeToken();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteUsing(getCurScope());
+ cutOffParsing();
+ return 0;
+ }
+
+ // 'using namespace' means this is a using-directive.
+ if (Tok.is(tok::kw_namespace)) {
+ // Template parameters are always an error here.
+ if (TemplateInfo.Kind) {
+ SourceRange R = TemplateInfo.getSourceRange();
+ Diag(UsingLoc, diag::err_templated_using_directive)
+ << R << FixItHint::CreateRemoval(R);
+ }
+
+ return ParseUsingDirective(Context, UsingLoc, DeclEnd, attrs);
+ }
+
+ // Otherwise, it must be a using-declaration or an alias-declaration.
+
+ // Using declarations can't have attributes.
+ ProhibitAttributes(attrs);
+
+ return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd,
+ AS_none, OwnedType);
+}
+
+/// ParseUsingDirective - Parse C++ using-directive, assumes
+/// that current token is 'namespace' and 'using' was already parsed.
+///
+/// using-directive: [C++ 7.3.p4: namespace.udir]
+/// 'using' 'namespace' ::[opt] nested-name-specifier[opt]
+/// namespace-name ;
+/// [GNU] using-directive:
+/// 'using' 'namespace' ::[opt] nested-name-specifier[opt]
+/// namespace-name attributes[opt] ;
+///
+Decl *Parser::ParseUsingDirective(unsigned Context,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &attrs) {
+ assert(Tok.is(tok::kw_namespace) && "Not 'namespace' token");
+
+ // Eat 'namespace'.
+ SourceLocation NamespcLoc = ConsumeToken();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteUsingDirective(getCurScope());
+ cutOffParsing();
+ return 0;
+ }
+
+ CXXScopeSpec SS;
+ // Parse (optional) nested-name-specifier.
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ IdentifierInfo *NamespcName = 0;
+ SourceLocation IdentLoc = SourceLocation();
+
+ // Parse namespace-name.
+ if (SS.isInvalid() || Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_namespace_name);
+ // If there was invalid namespace name, skip to end of decl, and eat ';'.
+ SkipUntil(tok::semi);
+ // FIXME: Are there cases, when we would like to call ActOnUsingDirective?
+ return 0;
+ }
+
+ // Parse identifier.
+ NamespcName = Tok.getIdentifierInfo();
+ IdentLoc = ConsumeToken();
+
+ // Parse (optional) attributes (most likely GNU strong-using extension).
+ bool GNUAttr = false;
+ if (Tok.is(tok::kw___attribute)) {
+ GNUAttr = true;
+ ParseGNUAttributes(attrs);
+ }
+
+ // Eat ';'.
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsume(tok::semi,
+ GNUAttr ? diag::err_expected_semi_after_attribute_list
+ : diag::err_expected_semi_after_namespace_name,
+ "", tok::semi);
+
+ return Actions.ActOnUsingDirective(getCurScope(), UsingLoc, NamespcLoc, SS,
+ IdentLoc, NamespcName, attrs.getList());
+}
+
+/// ParseUsingDeclaration - Parse C++ using-declaration or alias-declaration.
+/// Assumes that 'using' was already seen.
+///
+/// using-declaration: [C++ 7.3.p3: namespace.udecl]
+/// 'using' 'typename'[opt] ::[opt] nested-name-specifier
+/// unqualified-id
+/// 'using' :: unqualified-id
+///
+/// alias-declaration: C++0x [decl.typedef]p2
+/// 'using' identifier = type-id ;
+///
+Decl *Parser::ParseUsingDeclaration(unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation UsingLoc,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ Decl **OwnedType) {
+ CXXScopeSpec SS;
+ SourceLocation TypenameLoc;
+ bool IsTypeName;
+
+ // Ignore optional 'typename'.
+ // FIXME: This is wrong; we should parse this as a typename-specifier.
+ if (Tok.is(tok::kw_typename)) {
+ TypenameLoc = Tok.getLocation();
+ ConsumeToken();
+ IsTypeName = true;
+ }
+ else
+ IsTypeName = false;
+
+ // Parse nested-name-specifier.
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ // Check nested-name specifier.
+ if (SS.isInvalid()) {
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ // Parse the unqualified-id. We allow parsing of both constructor and
+ // destructor names and allow the action module to diagnose any semantic
+ // errors.
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ if (ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/true,
+ /*AllowConstructorName=*/true,
+ ParsedType(),
+ TemplateKWLoc,
+ Name)) {
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ ParsedAttributes attrs(AttrFactory);
+
+ // Maybe this is an alias-declaration.
+ bool IsAliasDecl = Tok.is(tok::equal);
+ TypeResult TypeAlias;
+ if (IsAliasDecl) {
+ // TODO: Attribute support. C++0x attributes may appear before the equals.
+ // Where can GNU attributes appear?
+ ConsumeToken();
+
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_alias_declaration :
+ diag::ext_alias_declaration);
+
+ // Type alias templates cannot be specialized.
+ int SpecKind = -1;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::Template &&
+ Name.getKind() == UnqualifiedId::IK_TemplateId)
+ SpecKind = 0;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization)
+ SpecKind = 1;
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+ SpecKind = 2;
+ if (SpecKind != -1) {
+ SourceRange Range;
+ if (SpecKind == 0)
+ Range = SourceRange(Name.TemplateId->LAngleLoc,
+ Name.TemplateId->RAngleLoc);
+ else
+ Range = TemplateInfo.getSourceRange();
+ Diag(Range.getBegin(), diag::err_alias_declaration_specialization)
+ << SpecKind << Range;
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ // Name must be an identifier.
+ if (Name.getKind() != UnqualifiedId::IK_Identifier) {
+ Diag(Name.StartLocation, diag::err_alias_declaration_not_identifier);
+ // No removal fixit: can't recover from this.
+ SkipUntil(tok::semi);
+ return 0;
+ } else if (IsTypeName)
+ Diag(TypenameLoc, diag::err_alias_declaration_not_identifier)
+ << FixItHint::CreateRemoval(SourceRange(TypenameLoc,
+ SS.isNotEmpty() ? SS.getEndLoc() : TypenameLoc));
+ else if (SS.isNotEmpty())
+ Diag(SS.getBeginLoc(), diag::err_alias_declaration_not_identifier)
+ << FixItHint::CreateRemoval(SS.getRange());
+
+ TypeAlias = ParseTypeName(0, TemplateInfo.Kind ?
+ Declarator::AliasTemplateContext :
+ Declarator::AliasDeclContext, AS, OwnedType);
+ } else
+ // Parse (optional) attributes (most likely GNU strong-using extension).
+ MaybeParseGNUAttributes(attrs);
+
+ // Eat ';'.
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ !attrs.empty() ? "attributes list" :
+ IsAliasDecl ? "alias declaration" : "using declaration",
+ tok::semi);
+
+ // Diagnose an attempt to declare a templated using-declaration.
+ // In C++0x, alias-declarations can be templates:
+ // template <...> using id = type;
+ if (TemplateInfo.Kind && !IsAliasDecl) {
+ SourceRange R = TemplateInfo.getSourceRange();
+ Diag(UsingLoc, diag::err_templated_using_declaration)
+ << R << FixItHint::CreateRemoval(R);
+
+ // Unfortunately, we have to bail out instead of recovering by
+ // ignoring the parameters, just in case the nested name specifier
+ // depends on the parameters.
+ return 0;
+ }
+
+ // "typename" keyword is allowed for identifiers only,
+ // because it may be a type definition.
+ if (IsTypeName && Name.getKind() != UnqualifiedId::IK_Identifier) {
+ Diag(Name.getSourceRange().getBegin(), diag::err_typename_identifiers_only)
+ << FixItHint::CreateRemoval(SourceRange(TypenameLoc));
+ // Proceed parsing, but reset the IsTypeName flag.
+ IsTypeName = false;
+ }
+
+ if (IsAliasDecl) {
+ TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
+ MultiTemplateParamsArg TemplateParamsArg(Actions,
+ TemplateParams ? TemplateParams->data() : 0,
+ TemplateParams ? TemplateParams->size() : 0);
+ return Actions.ActOnAliasDeclaration(getCurScope(), AS, TemplateParamsArg,
+ UsingLoc, Name, TypeAlias);
+ }
+
+ return Actions.ActOnUsingDeclaration(getCurScope(), AS, true, UsingLoc, SS,
+ Name, attrs.getList(),
+ IsTypeName, TypenameLoc);
+}
+
+/// ParseStaticAssertDeclaration - Parse C++0x or C11 static_assert-declaration.
+///
+/// [C++0x] static_assert-declaration:
+/// static_assert ( constant-expression , string-literal ) ;
+///
+/// [C11] static_assert-declaration:
+/// _Static_assert ( constant-expression , string-literal ) ;
+///
+Decl *Parser::ParseStaticAssertDeclaration(SourceLocation &DeclEnd){
+ assert((Tok.is(tok::kw_static_assert) || Tok.is(tok::kw__Static_assert)) &&
+ "Not a static_assert declaration");
+
+ if (Tok.is(tok::kw__Static_assert) && !getLangOpts().C11)
+ Diag(Tok, diag::ext_c11_static_assert);
+ if (Tok.is(tok::kw_static_assert))
+ Diag(Tok, diag::warn_cxx98_compat_static_assert);
+
+ SourceLocation StaticAssertLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lparen);
+ return 0;
+ }
+
+ ExprResult AssertExpr(ParseConstantExpression());
+ if (AssertExpr.isInvalid()) {
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "", tok::semi))
+ return 0;
+
+ if (!isTokenStringLiteral()) {
+ Diag(Tok, diag::err_expected_string_literal);
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ ExprResult AssertMessage(ParseStringLiteralExpression());
+ if (AssertMessage.isInvalid()) {
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ T.consumeClose();
+
+ DeclEnd = Tok.getLocation();
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_static_assert);
+
+ return Actions.ActOnStaticAssertDeclaration(StaticAssertLoc,
+ AssertExpr.take(),
+ AssertMessage.take(),
+ T.getCloseLocation());
+}
+
+/// ParseDecltypeSpecifier - Parse a C++0x decltype specifier.
+///
+/// 'decltype' ( expression )
+///
+SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
+ assert((Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype))
+ && "Not a decltype specifier");
+
+
+ ExprResult Result;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+
+ if (Tok.is(tok::annot_decltype)) {
+ Result = getExprAnnotation(Tok);
+ EndLoc = Tok.getAnnotationEndLoc();
+ ConsumeToken();
+ if (Result.isInvalid()) {
+ DS.SetTypeSpecError();
+ return EndLoc;
+ }
+ } else {
+ if (Tok.getIdentifierInfo()->isStr("decltype"))
+ Diag(Tok, diag::warn_cxx98_compat_decltype);
+
+ ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ "decltype", tok::r_paren)) {
+ DS.SetTypeSpecError();
+ return T.getOpenLocation() == Tok.getLocation() ?
+ StartLoc : T.getOpenLocation();
+ }
+
+ // Parse the expression
+
+ // C++0x [dcl.type.simple]p4:
+ // The operand of the decltype specifier is an unevaluated operand.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated,
+ 0, /*IsDecltype=*/true);
+ Result = ParseExpression();
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ DS.SetTypeSpecError();
+ return StartLoc;
+ }
+
+ // Match the ')'
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid()) {
+ DS.SetTypeSpecError();
+ // FIXME: this should return the location of the last token
+ // that was consumed (by "consumeClose()")
+ return T.getCloseLocation();
+ }
+
+ Result = Actions.ActOnDecltypeExpression(Result.take());
+ if (Result.isInvalid()) {
+ DS.SetTypeSpecError();
+ return T.getCloseLocation();
+ }
+
+ EndLoc = T.getCloseLocation();
+ }
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ // Check for duplicate type specifiers (e.g. "int decltype(a)").
+ if (DS.SetTypeSpecType(DeclSpec::TST_decltype, StartLoc, PrevSpec,
+ DiagID, Result.release())) {
+ Diag(StartLoc, DiagID) << PrevSpec;
+ DS.SetTypeSpecError();
+ }
+ return EndLoc;
+}
+
+void Parser::AnnotateExistingDecltypeSpecifier(const DeclSpec& DS,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // make sure we have a token we can turn into an annotation token
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+
+ Tok.setKind(tok::annot_decltype);
+ setExprAnnotation(Tok, DS.getTypeSpecType() == TST_decltype ?
+ DS.getRepAsExpr() : ExprResult());
+ Tok.setAnnotationEndLoc(EndLoc);
+ Tok.setLocation(StartLoc);
+ PP.AnnotateCachedTokens(Tok);
+}
+
+void Parser::ParseUnderlyingTypeSpecifier(DeclSpec &DS) {
+ assert(Tok.is(tok::kw___underlying_type) &&
+ "Not an underlying type specifier");
+
+ SourceLocation StartLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen_after,
+ "__underlying_type", tok::r_paren)) {
+ return;
+ }
+
+ TypeResult Result = ParseTypeName();
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ // Match the ')'
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return;
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ if (DS.SetTypeSpecType(DeclSpec::TST_underlyingType, StartLoc, PrevSpec,
+ DiagID, Result.release()))
+ Diag(StartLoc, DiagID) << PrevSpec;
+}
+
+/// ParseBaseTypeSpecifier - Parse a C++ base-type-specifier which is either a
+/// class name or decltype-specifier. Note that we only check that the result
+/// names a type; semantic analysis will need to verify that the type names a
+/// class. The result is either a type or null, depending on whether a type
+/// name was found.
+///
+/// base-type-specifier: [C++ 10.1]
+/// class-or-decltype
+/// class-or-decltype: [C++ 10.1]
+/// nested-name-specifier[opt] class-name
+/// decltype-specifier
+/// class-name: [C++ 9.1]
+/// identifier
+/// simple-template-id
+///
+Parser::TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
+ SourceLocation &EndLocation) {
+ // Ignore attempts to use typename
+ if (Tok.is(tok::kw_typename)) {
+ Diag(Tok, diag::err_expected_class_name_not_template)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ }
+
+ // Parse optional nested-name-specifier
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ BaseLoc = Tok.getLocation();
+
+ // Parse decltype-specifier
+ // tok == kw_decltype is just error recovery, it can only happen when SS
+ // isn't empty
+ if (Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype)) {
+ if (SS.isNotEmpty())
+ Diag(SS.getBeginLoc(), diag::err_unexpected_scope_on_base_decltype)
+ << FixItHint::CreateRemoval(SS.getRange());
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+
+ EndLocation = ParseDecltypeSpecifier(DS);
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+
+ // Check whether we have a template-id that names a type.
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Type_template ||
+ TemplateId->Kind == TNK_Dependent_template_name) {
+ AnnotateTemplateIdTokenAsType();
+
+ assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
+ ParsedType Type = getTypeAnnotation(Tok);
+ EndLocation = Tok.getAnnotationEndLoc();
+ ConsumeToken();
+
+ if (Type)
+ return Type;
+ return true;
+ }
+
+ // Fall through to produce an error below.
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_class_name);
+ return true;
+ }
+
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+
+ if (Tok.is(tok::less)) {
+ // It looks the user intended to write a template-id here, but the
+ // template-name was wrong. Try to fix that.
+ TemplateNameKind TNK = TNK_Type_template;
+ TemplateTy Template;
+ if (!Actions.DiagnoseUnknownTemplateName(*Id, IdLoc, getCurScope(),
+ &SS, Template, TNK)) {
+ Diag(IdLoc, diag::err_unknown_template_name)
+ << Id;
+ }
+
+ if (!Template)
+ return true;
+
+ // Form the template name
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(Id, IdLoc);
+
+ // Parse the full template-id, then turn it into a type.
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, true))
+ return true;
+ if (TNK == TNK_Dependent_template_name)
+ AnnotateTemplateIdTokenAsType();
+
+ // If we didn't end up with a typename token, there's nothing more we
+ // can do.
+ if (Tok.isNot(tok::annot_typename))
+ return true;
+
+ // Retrieve the type from the annotation token, consume that token, and
+ // return.
+ EndLocation = Tok.getAnnotationEndLoc();
+ ParsedType Type = getTypeAnnotation(Tok);
+ ConsumeToken();
+ return Type;
+ }
+
+ // We have an identifier; check whether it is actually a type.
+ ParsedType Type = Actions.getTypeName(*Id, IdLoc, getCurScope(), &SS, true,
+ false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialTypeSourceInfo=*/true);
+ if (!Type) {
+ Diag(IdLoc, diag::err_expected_class_name);
+ return true;
+ }
+
+ // Consume the identifier.
+ EndLocation = IdLoc;
+
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+ DS.SetRangeStart(IdLoc);
+ DS.SetRangeEnd(EndLocation);
+ DS.getTypeSpecScope() = SS;
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ DS.SetTypeSpecType(TST_typename, IdLoc, PrevSpec, DiagID, Type);
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+}
+
+/// ParseClassSpecifier - Parse a C++ class-specifier [C++ class] or
+/// elaborated-type-specifier [C++ dcl.type.elab]; we can't tell which
+/// until we reach the start of a definition or see a token that
+/// cannot start a definition.
+///
+/// class-specifier: [C++ class]
+/// class-head '{' member-specification[opt] '}'
+/// class-head '{' member-specification[opt] '}' attributes[opt]
+/// class-head:
+/// class-key identifier[opt] base-clause[opt]
+/// class-key nested-name-specifier identifier base-clause[opt]
+/// class-key nested-name-specifier[opt] simple-template-id
+/// base-clause[opt]
+/// [GNU] class-key attributes[opt] identifier[opt] base-clause[opt]
+/// [GNU] class-key attributes[opt] nested-name-specifier
+/// identifier base-clause[opt]
+/// [GNU] class-key attributes[opt] nested-name-specifier[opt]
+/// simple-template-id base-clause[opt]
+/// class-key:
+/// 'class'
+/// 'struct'
+/// 'union'
+///
+/// elaborated-type-specifier: [C++ dcl.type.elab]
+/// class-key ::[opt] nested-name-specifier[opt] identifier
+/// class-key ::[opt] nested-name-specifier[opt] 'template'[opt]
+/// simple-template-id
+///
+/// Note that the C++ class-specifier and elaborated-type-specifier,
+/// together, subsume the C99 struct-or-union-specifier:
+///
+/// struct-or-union-specifier: [C99 6.7.2.1]
+/// struct-or-union identifier[opt] '{' struct-contents '}'
+/// struct-or-union identifier
+/// [GNU] struct-or-union attributes[opt] identifier[opt] '{' struct-contents
+/// '}' attributes[opt]
+/// [GNU] struct-or-union attributes[opt] identifier
+/// struct-or-union:
+/// 'struct'
+/// 'union'
+void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
+ SourceLocation StartLoc, DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
+ AccessSpecifier AS,
+ bool EnteringContext, DeclSpecContext DSC) {
+ DeclSpec::TST TagType;
+ if (TagTokKind == tok::kw_struct)
+ TagType = DeclSpec::TST_struct;
+ else if (TagTokKind == tok::kw_class)
+ TagType = DeclSpec::TST_class;
+ else {
+ assert(TagTokKind == tok::kw_union && "Not a class specifier");
+ TagType = DeclSpec::TST_union;
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ // Code completion for a struct, class, or union name.
+ Actions.CodeCompleteTag(getCurScope(), TagType);
+ return cutOffParsing();
+ }
+
+ // C++03 [temp.explicit] 14.7.2/8:
+ // The usual access checking rules do not apply to names used to specify
+ // explicit instantiations.
+ //
+ // As an extension we do not perform access checking on the names used to
+ // specify explicit specializations either. This is important to allow
+ // specializing traits classes for private types.
+ Sema::SuppressAccessChecksRAII SuppressAccess(Actions,
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation ||
+ TemplateInfo.Kind == ParsedTemplateInfo::ExplicitSpecialization);
+
+ ParsedAttributes attrs(AttrFactory);
+ // If attributes exist after tag, parse them.
+ if (Tok.is(tok::kw___attribute))
+ ParseGNUAttributes(attrs);
+
+ // If declspecs exist after tag, parse them.
+ while (Tok.is(tok::kw___declspec))
+ ParseMicrosoftDeclSpec(attrs);
+
+ // If C++0x attributes exist here, parse them.
+ // FIXME: Are we consistent with the ordering of parsing of different
+ // styles of attributes?
+ MaybeParseCXX0XAttributes(attrs);
+
+ if (TagType == DeclSpec::TST_struct &&
+ !Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() &&
+ (Tok.is(tok::kw___is_arithmetic) ||
+ Tok.is(tok::kw___is_convertible) ||
+ Tok.is(tok::kw___is_empty) ||
+ Tok.is(tok::kw___is_floating_point) ||
+ Tok.is(tok::kw___is_function) ||
+ Tok.is(tok::kw___is_fundamental) ||
+ Tok.is(tok::kw___is_integral) ||
+ Tok.is(tok::kw___is_member_function_pointer) ||
+ Tok.is(tok::kw___is_member_pointer) ||
+ Tok.is(tok::kw___is_pod) ||
+ Tok.is(tok::kw___is_pointer) ||
+ Tok.is(tok::kw___is_same) ||
+ Tok.is(tok::kw___is_scalar) ||
+ Tok.is(tok::kw___is_signed) ||
+ Tok.is(tok::kw___is_unsigned) ||
+ Tok.is(tok::kw___is_void))) {
+ // GNU libstdc++ 4.2 and libc++ use certain intrinsic names as the
+ // name of struct templates, but some are keywords in GCC >= 4.3
+ // and Clang. Therefore, when we see the token sequence "struct
+ // X", make X into a normal identifier rather than a keyword, to
+ // allow libstdc++ 4.2 and libc++ to work properly.
+ Tok.getIdentifierInfo()->RevertTokenIDToIdentifier();
+ Tok.setKind(tok::identifier);
+ }
+
+ // Parse the (optional) nested-name-specifier.
+ CXXScopeSpec &SS = DS.getTypeSpecScope();
+ if (getLangOpts().CPlusPlus) {
+ // "FOO : BAR" is not a potential typo for "FOO::BAR".
+ ColonProtectionRAIIObject X(*this);
+
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
+ DS.SetTypeSpecError();
+ if (SS.isSet())
+ if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id))
+ Diag(Tok, diag::err_expected_ident);
+ }
+
+ TemplateParameterLists *TemplateParams = TemplateInfo.TemplateParams;
+
+ // Parse the (optional) class name or simple-template-id.
+ IdentifierInfo *Name = 0;
+ SourceLocation NameLoc;
+ TemplateIdAnnotation *TemplateId = 0;
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+
+ if (Tok.is(tok::less) && getLangOpts().CPlusPlus) {
+ // The name was supposed to refer to a template, but didn't.
+ // Eat the template argument list and try to continue parsing this as
+ // a class (or template thereof).
+ TemplateArgList TemplateArgs;
+ SourceLocation LAngleLoc, RAngleLoc;
+ if (ParseTemplateIdAfterTemplateName(TemplateTy(), NameLoc, SS,
+ true, LAngleLoc,
+ TemplateArgs, RAngleLoc)) {
+ // We couldn't parse the template argument list at all, so don't
+ // try to give any location information for the list.
+ LAngleLoc = RAngleLoc = SourceLocation();
+ }
+
+ Diag(NameLoc, diag::err_explicit_spec_non_template)
+ << (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
+ << (TagType == DeclSpec::TST_class? 0
+ : TagType == DeclSpec::TST_struct? 1
+ : 2)
+ << Name
+ << SourceRange(LAngleLoc, RAngleLoc);
+
+ // Strip off the last template parameter list if it was empty, since
+ // we've removed its template argument list.
+ if (TemplateParams && TemplateInfo.LastParameterListWasEmpty) {
+ if (TemplateParams && TemplateParams->size() > 1) {
+ TemplateParams->pop_back();
+ } else {
+ TemplateParams = 0;
+ const_cast<ParsedTemplateInfo&>(TemplateInfo).Kind
+ = ParsedTemplateInfo::NonTemplate;
+ }
+ } else if (TemplateInfo.Kind
+ == ParsedTemplateInfo::ExplicitInstantiation) {
+ // Pretend this is just a forward declaration.
+ TemplateParams = 0;
+ const_cast<ParsedTemplateInfo&>(TemplateInfo).Kind
+ = ParsedTemplateInfo::NonTemplate;
+ const_cast<ParsedTemplateInfo&>(TemplateInfo).TemplateLoc
+ = SourceLocation();
+ const_cast<ParsedTemplateInfo&>(TemplateInfo).ExternLoc
+ = SourceLocation();
+ }
+ }
+ } else if (Tok.is(tok::annot_template_id)) {
+ TemplateId = takeTemplateIdAnnotation(Tok);
+ NameLoc = ConsumeToken();
+
+ if (TemplateId->Kind != TNK_Type_template &&
+ TemplateId->Kind != TNK_Dependent_template_name) {
+ // The template-name in the simple-template-id refers to
+ // something other than a class template. Give an appropriate
+ // error message and skip to the ';'.
+ SourceRange Range(NameLoc);
+ if (SS.isNotEmpty())
+ Range.setBegin(SS.getBeginLoc());
+
+ Diag(TemplateId->LAngleLoc, diag::err_template_spec_syntax_non_template)
+ << Name << static_cast<int>(TemplateId->Kind) << Range;
+
+ DS.SetTypeSpecError();
+ SkipUntil(tok::semi, false, true);
+ return;
+ }
+ }
+
+ // As soon as we're finished parsing the class's template-id, turn access
+ // checking back on.
+ SuppressAccess.done();
+
+ // There are four options here.
+ // - If we are in a trailing return type, this is always just a reference,
+ // and we must not try to parse a definition. For instance,
+ // [] () -> struct S { };
+ // does not define a type.
+ // - If we have 'struct foo {...', 'struct foo :...',
+ // 'struct foo final :' or 'struct foo final {', then this is a definition.
+ // - If we have 'struct foo;', then this is either a forward declaration
+ // or a friend declaration, which have to be treated differently.
+ // - Otherwise we have something like 'struct foo xyz', a reference.
+ // However, in type-specifier-seq's, things look like declarations but are
+ // just references, e.g.
+ // new struct s;
+ // or
+ // &T::operator struct s;
+ // For these, DSC is DSC_type_specifier.
+ Sema::TagUseKind TUK;
+ if (DSC == DSC_trailing)
+ TUK = Sema::TUK_Reference;
+ else if (Tok.is(tok::l_brace) ||
+ (getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
+ (isCXX0XFinalKeyword() &&
+ (NextToken().is(tok::l_brace) || NextToken().is(tok::colon)))) {
+ if (DS.isFriendSpecified()) {
+ // C++ [class.friend]p2:
+ // A class shall not be defined in a friend declaration.
+ Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
+ << SourceRange(DS.getFriendSpecLoc());
+
+ // Skip everything up to the semicolon, so that this looks like a proper
+ // friend class (or template thereof) declaration.
+ SkipUntil(tok::semi, true, true);
+ TUK = Sema::TUK_Friend;
+ } else {
+ // Okay, this is a class definition.
+ TUK = Sema::TUK_Definition;
+ }
+ } else if (Tok.is(tok::semi) && DSC != DSC_type_specifier)
+ TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ else
+ TUK = Sema::TUK_Reference;
+
+ if (!Name && !TemplateId && (DS.getTypeSpecType() == DeclSpec::TST_error ||
+ TUK != Sema::TUK_Definition)) {
+ if (DS.getTypeSpecType() != DeclSpec::TST_error) {
+ // We have a declaration or reference to an anonymous class.
+ Diag(StartLoc, diag::err_anon_type_definition)
+ << DeclSpec::getSpecifierName(TagType);
+ }
+
+ SkipUntil(tok::comma, true);
+ return;
+ }
+
+ // Create the tag portion of the class or class template.
+ DeclResult TagOrTempResult = true; // invalid
+ TypeResult TypeResult = true; // invalid
+
+ bool Owned = false;
+ if (TemplateId) {
+ // Explicit specialization, class template partial specialization,
+ // or explicit instantiation.
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ TUK == Sema::TUK_Declaration) {
+ // This is an explicit instantiation of a class template.
+ TagOrTempResult
+ = Actions.ActOnExplicitInstantiation(getCurScope(),
+ TemplateInfo.ExternLoc,
+ TemplateInfo.TemplateLoc,
+ TagType,
+ StartLoc,
+ SS,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ attrs.getList());
+
+ // Friend template-ids are treated as references unless
+ // they have template headers, in which case they're ill-formed
+ // (FIXME: "template <class T> friend class A<T>::B<int>;").
+ // We diagnose this error in ActOnClassTemplateSpecialization.
+ } else if (TUK == Sema::TUK_Reference ||
+ (TUK == Sema::TUK_Friend &&
+ TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
+ TypeResult = Actions.ActOnTagTemplateIdType(TUK, TagType, StartLoc,
+ TemplateId->SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ } else {
+ // This is an explicit specialization or a class template
+ // partial specialization.
+ TemplateParameterLists FakedParamLists;
+
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
+ // This looks like an explicit instantiation, because we have
+ // something like
+ //
+ // template class Foo<X>
+ //
+ // but it actually has a definition. Most likely, this was
+ // meant to be an explicit specialization, but the user forgot
+ // the '<>' after 'template'.
+ assert(TUK == Sema::TUK_Definition && "Expected a definition here");
+
+ SourceLocation LAngleLoc
+ = PP.getLocForEndOfToken(TemplateInfo.TemplateLoc);
+ Diag(TemplateId->TemplateNameLoc,
+ diag::err_explicit_instantiation_with_definition)
+ << SourceRange(TemplateInfo.TemplateLoc)
+ << FixItHint::CreateInsertion(LAngleLoc, "<>");
+
+ // Create a fake template parameter list that contains only
+ // "template<>", so that we treat this construct as a class
+ // template specialization.
+ FakedParamLists.push_back(
+ Actions.ActOnTemplateParameterList(0, SourceLocation(),
+ TemplateInfo.TemplateLoc,
+ LAngleLoc,
+ 0, 0,
+ LAngleLoc));
+ TemplateParams = &FakedParamLists;
+ }
+
+ // Build the class template specialization.
+ TagOrTempResult
+ = Actions.ActOnClassTemplateSpecialization(getCurScope(), TagType, TUK,
+ StartLoc, DS.getModulePrivateSpecLoc(), SS,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ attrs.getList(),
+ MultiTemplateParamsArg(Actions,
+ TemplateParams? &(*TemplateParams)[0] : 0,
+ TemplateParams? TemplateParams->size() : 0));
+ }
+ } else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ TUK == Sema::TUK_Declaration) {
+ // Explicit instantiation of a member of a class template
+ // specialization, e.g.,
+ //
+ // template struct Outer<int>::Inner;
+ //
+ TagOrTempResult
+ = Actions.ActOnExplicitInstantiation(getCurScope(),
+ TemplateInfo.ExternLoc,
+ TemplateInfo.TemplateLoc,
+ TagType, StartLoc, SS, Name,
+ NameLoc, attrs.getList());
+ } else if (TUK == Sema::TUK_Friend &&
+ TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
+ TagOrTempResult =
+ Actions.ActOnTemplatedFriendTag(getCurScope(), DS.getFriendSpecLoc(),
+ TagType, StartLoc, SS,
+ Name, NameLoc, attrs.getList(),
+ MultiTemplateParamsArg(Actions,
+ TemplateParams? &(*TemplateParams)[0] : 0,
+ TemplateParams? TemplateParams->size() : 0));
+ } else {
+ if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
+ TUK == Sema::TUK_Definition) {
+ // FIXME: Diagnose this particular error.
+ }
+
+ bool IsDependent = false;
+
+ // Don't pass down template parameter lists if this is just a tag
+ // reference. For example, we don't need the template parameters here:
+ // template <class T> class A *makeA(T t);
+ MultiTemplateParamsArg TParams;
+ if (TUK != Sema::TUK_Reference && TemplateParams)
+ TParams =
+ MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
+
+ // Declaration or definition of a class type
+ TagOrTempResult = Actions.ActOnTag(getCurScope(), TagType, TUK, StartLoc,
+ SS, Name, NameLoc, attrs.getList(), AS,
+ DS.getModulePrivateSpecLoc(),
+ TParams, Owned, IsDependent,
+ SourceLocation(), false,
+ clang::TypeResult());
+
+ // If ActOnTag said the type was dependent, try again with the
+ // less common call.
+ if (IsDependent) {
+ assert(TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend);
+ TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK,
+ SS, Name, StartLoc, NameLoc);
+ }
+ }
+
+ // If there is a body, parse it and inform the actions module.
+ if (TUK == Sema::TUK_Definition) {
+ assert(Tok.is(tok::l_brace) ||
+ (getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
+ isCXX0XFinalKeyword());
+ if (getLangOpts().CPlusPlus)
+ ParseCXXMemberSpecification(StartLoc, TagType, TagOrTempResult.get());
+ else
+ ParseStructUnionBody(StartLoc, TagType, TagOrTempResult.get());
+ }
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ bool Result;
+ if (!TypeResult.isInvalid()) {
+ Result = DS.SetTypeSpecType(DeclSpec::TST_typename, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TypeResult.get());
+ } else if (!TagOrTempResult.isInvalid()) {
+ Result = DS.SetTypeSpecType(TagType, StartLoc,
+ NameLoc.isValid() ? NameLoc : StartLoc,
+ PrevSpec, DiagID, TagOrTempResult.get(), Owned);
+ } else {
+ DS.SetTypeSpecError();
+ return;
+ }
+
+ if (Result)
+ Diag(StartLoc, DiagID) << PrevSpec;
+
+ // At this point, we've successfully parsed a class-specifier in 'definition'
+ // form (e.g. "struct foo { int x; }". While we could just return here, we're
+ // going to look at what comes after it to improve error recovery. If an
+ // impossible token occurs next, we assume that the programmer forgot a ; at
+ // the end of the declaration and recover that way.
+ //
+ // This switch enumerates the valid "follow" set for definition.
+ if (TUK == Sema::TUK_Definition) {
+ bool ExpectedSemi = true;
+ switch (Tok.getKind()) {
+ default: break;
+ case tok::semi: // struct foo {...} ;
+ case tok::star: // struct foo {...} * P;
+ case tok::amp: // struct foo {...} & R = ...
+ case tok::identifier: // struct foo {...} V ;
+ case tok::r_paren: //(struct foo {...} ) {4}
+ case tok::annot_cxxscope: // struct foo {...} a:: b;
+ case tok::annot_typename: // struct foo {...} a ::b;
+ case tok::annot_template_id: // struct foo {...} a<int> ::b;
+ case tok::l_paren: // struct foo {...} ( x);
+ case tok::comma: // __builtin_offsetof(struct foo{...} ,
+ ExpectedSemi = false;
+ break;
+ // Type qualifiers
+ case tok::kw_const: // struct foo {...} const x;
+ case tok::kw_volatile: // struct foo {...} volatile x;
+ case tok::kw_restrict: // struct foo {...} restrict x;
+ case tok::kw_inline: // struct foo {...} inline foo() {};
+ // Storage-class specifiers
+ case tok::kw_static: // struct foo {...} static x;
+ case tok::kw_extern: // struct foo {...} extern x;
+ case tok::kw_typedef: // struct foo {...} typedef x;
+ case tok::kw_register: // struct foo {...} register x;
+ case tok::kw_auto: // struct foo {...} auto x;
+ case tok::kw_mutable: // struct foo {...} mutable x;
+ case tok::kw_constexpr: // struct foo {...} constexpr x;
+ // As shown above, type qualifiers and storage class specifiers absolutely
+ // can occur after class specifiers according to the grammar. However,
+ // almost no one actually writes code like this. If we see one of these,
+ // it is much more likely that someone missed a semi colon and the
+ // type/storage class specifier we're seeing is part of the *next*
+ // intended declaration, as in:
+ //
+ // struct foo { ... }
+ // typedef int X;
+ //
+ // We'd really like to emit a missing semicolon error instead of emitting
+ // an error on the 'int' saying that you can't have two type specifiers in
+ // the same declaration of X. Because of this, we look ahead past this
+ // token to see if it's a type specifier. If so, we know the code is
+ // otherwise invalid, so we can produce the expected semi error.
+ if (!isKnownToBeTypeSpecifier(NextToken()))
+ ExpectedSemi = false;
+ break;
+
+ case tok::r_brace: // struct bar { struct foo {...} }
+ // Missing ';' at end of struct is accepted as an extension in C mode.
+ if (!getLangOpts().CPlusPlus)
+ ExpectedSemi = false;
+ break;
+ }
+
+ // C++ [temp]p3 In a template-declaration which defines a class, no
+ // declarator is permitted.
+ if (TemplateInfo.Kind)
+ ExpectedSemi = true;
+
+ if (ExpectedSemi) {
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
+ TagType == DeclSpec::TST_class ? "class"
+ : TagType == DeclSpec::TST_struct? "struct" : "union");
+ // Push this token back into the preprocessor and change our current token
+ // to ';' so that the rest of the code recovers as though there were an
+ // ';' after the definition.
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::semi);
+ }
+ }
+}
+
+/// ParseBaseClause - Parse the base-clause of a C++ class [C++ class.derived].
+///
+/// base-clause : [C++ class.derived]
+/// ':' base-specifier-list
+/// base-specifier-list:
+/// base-specifier '...'[opt]
+/// base-specifier-list ',' base-specifier '...'[opt]
+void Parser::ParseBaseClause(Decl *ClassDecl) {
+ assert(Tok.is(tok::colon) && "Not a base clause");
+ ConsumeToken();
+
+ // Build up an array of parsed base specifiers.
+ SmallVector<CXXBaseSpecifier *, 8> BaseInfo;
+
+ while (true) {
+ // Parse a base-specifier.
+ BaseResult Result = ParseBaseSpecifier(ClassDecl);
+ if (Result.isInvalid()) {
+ // Skip the rest of this base specifier, up until the comma or
+ // opening brace.
+ SkipUntil(tok::comma, tok::l_brace, true, true);
+ } else {
+ // Add this to our array of base specifiers.
+ BaseInfo.push_back(Result.get());
+ }
+
+ // If the next token is a comma, consume it and keep reading
+ // base-specifiers.
+ if (Tok.isNot(tok::comma)) break;
+
+ // Consume the comma.
+ ConsumeToken();
+ }
+
+ // Attach the base specifiers
+ Actions.ActOnBaseSpecifiers(ClassDecl, BaseInfo.data(), BaseInfo.size());
+}
+
+/// ParseBaseSpecifier - Parse a C++ base-specifier. A base-specifier is
+/// one entry in the base class list of a class specifier, for example:
+/// class foo : public bar, virtual private baz {
+/// 'public bar' and 'virtual private baz' are each base-specifiers.
+///
+/// base-specifier: [C++ class.derived]
+/// ::[opt] nested-name-specifier[opt] class-name
+/// 'virtual' access-specifier[opt] ::[opt] nested-name-specifier[opt]
+/// base-type-specifier
+/// access-specifier 'virtual'[opt] ::[opt] nested-name-specifier[opt]
+/// base-type-specifier
+Parser::BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
+ bool IsVirtual = false;
+ SourceLocation StartLoc = Tok.getLocation();
+
+ // Parse the 'virtual' keyword.
+ if (Tok.is(tok::kw_virtual)) {
+ ConsumeToken();
+ IsVirtual = true;
+ }
+
+ // Parse an (optional) access specifier.
+ AccessSpecifier Access = getAccessSpecifierIfPresent();
+ if (Access != AS_none)
+ ConsumeToken();
+
+ // Parse the 'virtual' keyword (again!), in case it came after the
+ // access specifier.
+ if (Tok.is(tok::kw_virtual)) {
+ SourceLocation VirtualLoc = ConsumeToken();
+ if (IsVirtual) {
+ // Complain about duplicate 'virtual'
+ Diag(VirtualLoc, diag::err_dup_virtual)
+ << FixItHint::CreateRemoval(VirtualLoc);
+ }
+
+ IsVirtual = true;
+ }
+
+ // Parse the class-name.
+ SourceLocation EndLocation;
+ SourceLocation BaseLoc;
+ TypeResult BaseType = ParseBaseTypeSpecifier(BaseLoc, EndLocation);
+ if (BaseType.isInvalid())
+ return true;
+
+ // Parse the optional ellipsis (for a pack expansion). The ellipsis is
+ // actually part of the base-specifier-list grammar productions, but we
+ // parse it here for convenience.
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
+ // Find the complete source range for the base-specifier.
+ SourceRange Range(StartLoc, EndLocation);
+
+ // Notify semantic analysis that we have parsed a complete
+ // base-specifier.
+ return Actions.ActOnBaseSpecifier(ClassDecl, Range, IsVirtual, Access,
+ BaseType.get(), BaseLoc, EllipsisLoc);
+}
+
+/// getAccessSpecifierIfPresent - Determine whether the next token is
+/// a C++ access-specifier.
+///
+/// access-specifier: [C++ class.derived]
+/// 'private'
+/// 'protected'
+/// 'public'
+AccessSpecifier Parser::getAccessSpecifierIfPresent() const {
+ switch (Tok.getKind()) {
+ default: return AS_none;
+ case tok::kw_private: return AS_private;
+ case tok::kw_protected: return AS_protected;
+ case tok::kw_public: return AS_public;
+ }
+}
+
+void Parser::HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo,
+ Decl *ThisDecl) {
+ // We just declared a member function. If this member function
+ // has any default arguments, we'll need to parse them later.
+ LateParsedMethodDeclaration *LateMethod = 0;
+ DeclaratorChunk::FunctionTypeInfo &FTI
+ = DeclaratorInfo.getFunctionTypeInfo();
+ for (unsigned ParamIdx = 0; ParamIdx < FTI.NumArgs; ++ParamIdx) {
+ if (LateMethod || FTI.ArgInfo[ParamIdx].DefaultArgTokens) {
+ if (!LateMethod) {
+ // Push this method onto the stack of late-parsed method
+ // declarations.
+ LateMethod = new LateParsedMethodDeclaration(this, ThisDecl);
+ getCurrentClass().LateParsedDeclarations.push_back(LateMethod);
+ LateMethod->TemplateScope = getCurScope()->isTemplateParamScope();
+
+ // Add all of the parameters prior to this one (they don't
+ // have default arguments).
+ LateMethod->DefaultArgs.reserve(FTI.NumArgs);
+ for (unsigned I = 0; I < ParamIdx; ++I)
+ LateMethod->DefaultArgs.push_back(
+ LateParsedDefaultArgument(FTI.ArgInfo[I].Param));
+ }
+
+ // Add this parameter to the list of parameters (it or may
+ // not have a default argument).
+ LateMethod->DefaultArgs.push_back(
+ LateParsedDefaultArgument(FTI.ArgInfo[ParamIdx].Param,
+ FTI.ArgInfo[ParamIdx].DefaultArgTokens));
+ }
+ }
+}
+
+/// isCXX0XVirtSpecifier - Determine whether the given token is a C++0x
+/// virt-specifier.
+///
+/// virt-specifier:
+/// override
+/// final
+VirtSpecifiers::Specifier Parser::isCXX0XVirtSpecifier(const Token &Tok) const {
+ if (!getLangOpts().CPlusPlus)
+ return VirtSpecifiers::VS_None;
+
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ // Initialize the contextual keywords.
+ if (!Ident_final) {
+ Ident_final = &PP.getIdentifierTable().get("final");
+ Ident_override = &PP.getIdentifierTable().get("override");
+ }
+
+ if (II == Ident_override)
+ return VirtSpecifiers::VS_Override;
+
+ if (II == Ident_final)
+ return VirtSpecifiers::VS_Final;
+ }
+
+ return VirtSpecifiers::VS_None;
+}
+
+/// ParseOptionalCXX0XVirtSpecifierSeq - Parse a virt-specifier-seq.
+///
+/// virt-specifier-seq:
+/// virt-specifier
+/// virt-specifier-seq virt-specifier
+void Parser::ParseOptionalCXX0XVirtSpecifierSeq(VirtSpecifiers &VS) {
+ while (true) {
+ VirtSpecifiers::Specifier Specifier = isCXX0XVirtSpecifier();
+ if (Specifier == VirtSpecifiers::VS_None)
+ return;
+
+ // C++ [class.mem]p8:
+ // A virt-specifier-seq shall contain at most one of each virt-specifier.
+ const char *PrevSpec = 0;
+ if (VS.SetSpecifier(Specifier, Tok.getLocation(), PrevSpec))
+ Diag(Tok.getLocation(), diag::err_duplicate_virt_specifier)
+ << PrevSpec
+ << FixItHint::CreateRemoval(Tok.getLocation());
+
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_override_control_keyword :
+ diag::ext_override_control_keyword)
+ << VirtSpecifiers::getSpecifierName(Specifier);
+ ConsumeToken();
+ }
+}
+
+/// isCXX0XFinalKeyword - Determine whether the next token is a C++0x
+/// contextual 'final' keyword.
+bool Parser::isCXX0XFinalKeyword() const {
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ if (!Tok.is(tok::identifier))
+ return false;
+
+ // Initialize the contextual keywords.
+ if (!Ident_final) {
+ Ident_final = &PP.getIdentifierTable().get("final");
+ Ident_override = &PP.getIdentifierTable().get("override");
+ }
+
+ return Tok.getIdentifierInfo() == Ident_final;
+}
+
+/// ParseCXXClassMemberDeclaration - Parse a C++ class member declaration.
+///
+/// member-declaration:
+/// decl-specifier-seq[opt] member-declarator-list[opt] ';'
+/// function-definition ';'[opt]
+/// ::[opt] nested-name-specifier template[opt] unqualified-id ';'[TODO]
+/// using-declaration [TODO]
+/// [C++0x] static_assert-declaration
+/// template-declaration
+/// [GNU] '__extension__' member-declaration
+///
+/// member-declarator-list:
+/// member-declarator
+/// member-declarator-list ',' member-declarator
+///
+/// member-declarator:
+/// declarator virt-specifier-seq[opt] pure-specifier[opt]
+/// declarator constant-initializer[opt]
+/// [C++11] declarator brace-or-equal-initializer[opt]
+/// identifier[opt] ':' constant-expression
+///
+/// virt-specifier-seq:
+/// virt-specifier
+/// virt-specifier-seq virt-specifier
+///
+/// virt-specifier:
+/// override
+/// final
+///
+/// pure-specifier:
+/// '= 0'
+///
+/// constant-initializer:
+/// '=' constant-expression
+///
+void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
+ AttributeList *AccessAttrs,
+ const ParsedTemplateInfo &TemplateInfo,
+ ParsingDeclRAIIObject *TemplateDiags) {
+ if (Tok.is(tok::at)) {
+ if (getLangOpts().ObjC1 && NextToken().isObjCAtKeyword(tok::objc_defs))
+ Diag(Tok, diag::err_at_defs_cxx);
+ else
+ Diag(Tok, diag::err_at_in_class);
+
+ ConsumeToken();
+ SkipUntil(tok::r_brace);
+ return;
+ }
+
+ // Access declarations.
+ if (!TemplateInfo.Kind &&
+ (Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) &&
+ !TryAnnotateCXXScopeToken() &&
+ Tok.is(tok::annot_cxxscope)) {
+ bool isAccessDecl = false;
+ if (NextToken().is(tok::identifier))
+ isAccessDecl = GetLookAheadToken(2).is(tok::semi);
+ else
+ isAccessDecl = NextToken().is(tok::kw_operator);
+
+ if (isAccessDecl) {
+ // Collect the scope specifier token we annotated earlier.
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false);
+
+ // Try to parse an unqualified-id.
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ if (ParseUnqualifiedId(SS, false, true, true, ParsedType(),
+ TemplateKWLoc, Name)) {
+ SkipUntil(tok::semi);
+ return;
+ }
+
+ // TODO: recover from mistakenly-qualified operator declarations.
+ if (ExpectAndConsume(tok::semi,
+ diag::err_expected_semi_after,
+ "access declaration",
+ tok::semi))
+ return;
+
+ Actions.ActOnUsingDeclaration(getCurScope(), AS,
+ false, SourceLocation(),
+ SS, Name,
+ /* AttrList */ 0,
+ /* IsTypeName */ false,
+ SourceLocation());
+ return;
+ }
+ }
+
+ // static_assert-declaration
+ if (Tok.is(tok::kw_static_assert) || Tok.is(tok::kw__Static_assert)) {
+ // FIXME: Check for templates
+ SourceLocation DeclEnd;
+ ParseStaticAssertDeclaration(DeclEnd);
+ return;
+ }
+
+ if (Tok.is(tok::kw_template)) {
+ assert(!TemplateInfo.TemplateParams &&
+ "Nested template improperly parsed?");
+ SourceLocation DeclEnd;
+ ParseDeclarationStartingWithTemplate(Declarator::MemberContext, DeclEnd,
+ AS, AccessAttrs);
+ return;
+ }
+
+ // Handle: member-declaration ::= '__extension__' member-declaration
+ if (Tok.is(tok::kw___extension__)) {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseCXXClassMemberDeclaration(AS, AccessAttrs,
+ TemplateInfo, TemplateDiags);
+ }
+
+ // Don't parse FOO:BAR as if it were a typo for FOO::BAR, in this context it
+ // is a bitfield.
+ ColonProtectionRAIIObject X(*this);
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ // Optional C++0x attribute-specifier
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+
+ if (Tok.is(tok::kw_using)) {
+ ProhibitAttributes(attrs);
+
+ // Eat 'using'.
+ SourceLocation UsingLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_namespace)) {
+ Diag(UsingLoc, diag::err_using_namespace_in_class);
+ SkipUntil(tok::semi, true, true);
+ } else {
+ SourceLocation DeclEnd;
+ // Otherwise, it must be a using-declaration or an alias-declaration.
+ ParseUsingDeclaration(Declarator::MemberContext, TemplateInfo,
+ UsingLoc, DeclEnd, AS);
+ }
+ return;
+ }
+
+ // Hold late-parsed attributes so we can attach a Decl to them later.
+ LateParsedAttrList CommonLateParsedAttrs;
+
+ // decl-specifier-seq:
+ // Parse the common declaration-specifiers piece.
+ ParsingDeclSpec DS(*this, TemplateDiags);
+ DS.takeAttributesFrom(attrs);
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS, DSC_class,
+ &CommonLateParsedAttrs);
+
+ MultiTemplateParamsArg TemplateParams(Actions,
+ TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->data() : 0,
+ TemplateInfo.TemplateParams? TemplateInfo.TemplateParams->size() : 0);
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ Decl *TheDecl =
+ Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS, TemplateParams);
+ DS.complete(TheDecl);
+ return;
+ }
+
+ ParsingDeclarator DeclaratorInfo(*this, DS, Declarator::MemberContext);
+ VirtSpecifiers VS;
+
+ // Hold late-parsed attributes so we can attach a Decl to them later.
+ LateParsedAttrList LateParsedAttrs;
+
+ SourceLocation EqualLoc;
+ bool HasInitializer = false;
+ ExprResult Init;
+ if (Tok.isNot(tok::colon)) {
+ // Don't parse FOO:BAR as if it were a typo for FOO::BAR.
+ ColonProtectionRAIIObject X(*this);
+
+ // Parse the first declarator.
+ ParseDeclarator(DeclaratorInfo);
+ // Error parsing the declarator?
+ if (!DeclaratorInfo.hasName()) {
+ // If so, skip until the semi-colon or a }.
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return;
+ }
+
+ ParseOptionalCXX0XVirtSpecifierSeq(VS);
+
+ // If attributes exist after the declarator, but before an '{', parse them.
+ MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+
+ // MSVC permits pure specifier on inline functions declared at class scope.
+ // Hence check for =0 before checking for function definition.
+ if (getLangOpts().MicrosoftExt && Tok.is(tok::equal) &&
+ DeclaratorInfo.isFunctionDeclarator() &&
+ NextToken().is(tok::numeric_constant)) {
+ EqualLoc = ConsumeToken();
+ Init = ParseInitializer();
+ if (Init.isInvalid())
+ SkipUntil(tok::comma, true, true);
+ else
+ HasInitializer = true;
+ }
+
+ FunctionDefinitionKind DefinitionKind = FDK_Declaration;
+ // function-definition:
+ //
+ // In C++11, a non-function declarator followed by an open brace is a
+ // braced-init-list for an in-class member initialization, not an
+ // erroneous function definition.
+ if (Tok.is(tok::l_brace) && !getLangOpts().CPlusPlus0x) {
+ DefinitionKind = FDK_Definition;
+ } else if (DeclaratorInfo.isFunctionDeclarator()) {
+ if (Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try)) {
+ DefinitionKind = FDK_Definition;
+ } else if (Tok.is(tok::equal)) {
+ const Token &KW = NextToken();
+ if (KW.is(tok::kw_default))
+ DefinitionKind = FDK_Defaulted;
+ else if (KW.is(tok::kw_delete))
+ DefinitionKind = FDK_Deleted;
+ }
+ }
+
+ if (DefinitionKind) {
+ if (!DeclaratorInfo.isFunctionDeclarator()) {
+ Diag(DeclaratorInfo.getIdentifierLoc(), diag::err_func_def_no_params);
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi*/false);
+
+ // Consume the optional ';'
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return;
+ }
+
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(DeclaratorInfo.getIdentifierLoc(),
+ diag::err_function_declared_typedef);
+ // This recovery skips the entire function body. It would be nice
+ // to simply call ParseCXXInlineMethodDef() below, however Sema
+ // assumes the declarator represents a function, not a typedef.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi*/false);
+
+ // Consume the optional ';'
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return;
+ }
+
+ Decl *FunDecl =
+ ParseCXXInlineMethodDef(AS, AccessAttrs, DeclaratorInfo, TemplateInfo,
+ VS, DefinitionKind, Init);
+
+ for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i) {
+ CommonLateParsedAttrs[i]->addDecl(FunDecl);
+ }
+ for (unsigned i = 0, ni = LateParsedAttrs.size(); i < ni; ++i) {
+ LateParsedAttrs[i]->addDecl(FunDecl);
+ }
+ LateParsedAttrs.clear();
+
+ // Consume the ';' - it's optional unless we have a delete or default
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ }
+
+ return;
+ }
+ }
+
+ // member-declarator-list:
+ // member-declarator
+ // member-declarator-list ',' member-declarator
+
+ SmallVector<Decl *, 8> DeclsInGroup;
+ ExprResult BitfieldSize;
+ bool ExpectSemi = true;
+
+ while (1) {
+ // member-declarator:
+ // declarator pure-specifier[opt]
+ // declarator brace-or-equal-initializer[opt]
+ // identifier[opt] ':' constant-expression
+ if (Tok.is(tok::colon)) {
+ ConsumeToken();
+ BitfieldSize = ParseConstantExpression();
+ if (BitfieldSize.isInvalid())
+ SkipUntil(tok::comma, true, true);
+ }
+
+ // If a simple-asm-expr is present, parse it.
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ ExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid())
+ SkipUntil(tok::comma, true, true);
+
+ DeclaratorInfo.setAsmLabel(AsmLabel.release());
+ DeclaratorInfo.SetRangeEnd(Loc);
+ }
+
+ // If attributes exist after the declarator, parse them.
+ MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+
+ // FIXME: When g++ adds support for this, we'll need to check whether it
+ // goes before or after the GNU attributes and __asm__.
+ ParseOptionalCXX0XVirtSpecifierSeq(VS);
+
+ bool HasDeferredInitializer = false;
+ if ((Tok.is(tok::equal) || Tok.is(tok::l_brace)) && !HasInitializer) {
+ if (BitfieldSize.get()) {
+ Diag(Tok, diag::err_bitfield_member_init);
+ SkipUntil(tok::comma, true, true);
+ } else {
+ HasInitializer = true;
+ HasDeferredInitializer = !DeclaratorInfo.isDeclarationOfFunction() &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_static &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_typedef;
+ }
+ }
+
+ // NOTE: If Sema is the Action module and declarator is an instance field,
+ // this call will *not* return the created decl; It will return null.
+ // See Sema::ActOnCXXMemberDeclarator for details.
+
+ Decl *ThisDecl = 0;
+ if (DS.isFriendSpecified()) {
+ // TODO: handle initializers, bitfields, 'delete'
+ ThisDecl = Actions.ActOnFriendFunctionDecl(getCurScope(), DeclaratorInfo,
+ move(TemplateParams));
+ } else {
+ ThisDecl = Actions.ActOnCXXMemberDeclarator(getCurScope(), AS,
+ DeclaratorInfo,
+ move(TemplateParams),
+ BitfieldSize.release(),
+ VS, HasDeferredInitializer);
+ if (AccessAttrs)
+ Actions.ProcessDeclAttributeList(getCurScope(), ThisDecl, AccessAttrs,
+ false, true);
+ }
+
+ // Set the Decl for any late parsed attributes
+ for (unsigned i = 0, ni = CommonLateParsedAttrs.size(); i < ni; ++i) {
+ CommonLateParsedAttrs[i]->addDecl(ThisDecl);
+ }
+ for (unsigned i = 0, ni = LateParsedAttrs.size(); i < ni; ++i) {
+ LateParsedAttrs[i]->addDecl(ThisDecl);
+ }
+ LateParsedAttrs.clear();
+
+ // Handle the initializer.
+ if (HasDeferredInitializer) {
+ // The initializer was deferred; parse it and cache the tokens.
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_nonstatic_member_init :
+ diag::ext_nonstatic_member_init);
+
+ if (DeclaratorInfo.isArrayOfUnknownBound()) {
+ // C++0x [dcl.array]p3: An array bound may also be omitted when the
+ // declarator is followed by an initializer.
+ //
+ // A brace-or-equal-initializer for a member-declarator is not an
+ // initializer in the grammar, so this is ill-formed.
+ Diag(Tok, diag::err_incomplete_array_member_init);
+ SkipUntil(tok::comma, true, true);
+ if (ThisDecl)
+ // Avoid later warnings about a class member of incomplete type.
+ ThisDecl->setInvalidDecl();
+ } else
+ ParseCXXNonStaticMemberInitializer(ThisDecl);
+ } else if (HasInitializer) {
+ // Normal initializer.
+ if (!Init.isUsable())
+ Init = ParseCXXMemberInitializer(ThisDecl,
+ DeclaratorInfo.isDeclarationOfFunction(), EqualLoc);
+
+ if (Init.isInvalid())
+ SkipUntil(tok::comma, true, true);
+ else if (ThisDecl)
+ Actions.AddInitializerToDecl(ThisDecl, Init.get(), EqualLoc.isInvalid(),
+ DS.getTypeSpecType() == DeclSpec::TST_auto);
+ } else if (ThisDecl && DS.getStorageClassSpec() == DeclSpec::SCS_static) {
+ // No initializer.
+ Actions.ActOnUninitializedDecl(ThisDecl,
+ DS.getTypeSpecType() == DeclSpec::TST_auto);
+ }
+
+ if (ThisDecl) {
+ Actions.FinalizeDeclaration(ThisDecl);
+ DeclsInGroup.push_back(ThisDecl);
+ }
+
+ if (DeclaratorInfo.isFunctionDeclarator() &&
+ DeclaratorInfo.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_typedef) {
+ HandleMemberFunctionDefaultArgs(DeclaratorInfo, ThisDecl);
+ }
+
+ DeclaratorInfo.complete(ThisDecl);
+
+ // If we don't have a comma, it is either the end of the list (a ';')
+ // or an error, bail out.
+ if (Tok.isNot(tok::comma))
+ break;
+
+ // Consume the comma.
+ SourceLocation CommaLoc = ConsumeToken();
+
+ if (Tok.isAtStartOfLine() &&
+ !MightBeDeclarator(Declarator::MemberContext)) {
+ // This comma was followed by a line-break and something which can't be
+ // the start of a declarator. The comma was probably a typo for a
+ // semicolon.
+ Diag(CommaLoc, diag::err_expected_semi_declaration)
+ << FixItHint::CreateReplacement(CommaLoc, ";");
+ ExpectSemi = false;
+ break;
+ }
+
+ // Parse the next declarator.
+ DeclaratorInfo.clear();
+ VS.clear();
+ BitfieldSize = true;
+ Init = true;
+ HasInitializer = false;
+ DeclaratorInfo.setCommaLoc(CommaLoc);
+
+ // Attributes are only allowed on the second declarator.
+ MaybeParseGNUAttributes(DeclaratorInfo);
+
+ if (Tok.isNot(tok::colon))
+ ParseDeclarator(DeclaratorInfo);
+ }
+
+ if (ExpectSemi &&
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list)) {
+ // Skip to end of block or statement.
+ SkipUntil(tok::r_brace, true, true);
+ // If we stopped at a ';', eat it.
+ if (Tok.is(tok::semi)) ConsumeToken();
+ return;
+ }
+
+ Actions.FinalizeDeclaratorGroup(getCurScope(), DS, DeclsInGroup.data(),
+ DeclsInGroup.size());
+}
+
+/// ParseCXXMemberInitializer - Parse the brace-or-equal-initializer or
+/// pure-specifier. Also detect and reject any attempted defaulted/deleted
+/// function definition. The location of the '=', if any, will be placed in
+/// EqualLoc.
+///
+/// pure-specifier:
+/// '= 0'
+///
+/// brace-or-equal-initializer:
+/// '=' initializer-expression
+/// braced-init-list
+///
+/// initializer-clause:
+/// assignment-expression
+/// braced-init-list
+///
+/// defaulted/deleted function-definition:
+/// '=' 'default'
+/// '=' 'delete'
+///
+/// Prior to C++0x, the assignment-expression in an initializer-clause must
+/// be a constant-expression.
+ExprResult Parser::ParseCXXMemberInitializer(Decl *D, bool IsFunction,
+ SourceLocation &EqualLoc) {
+ assert((Tok.is(tok::equal) || Tok.is(tok::l_brace))
+ && "Data member initializer not starting with '=' or '{'");
+
+ EnterExpressionEvaluationContext Context(Actions,
+ Sema::PotentiallyEvaluated,
+ D);
+ if (Tok.is(tok::equal)) {
+ EqualLoc = ConsumeToken();
+ if (Tok.is(tok::kw_delete)) {
+ // In principle, an initializer of '= delete p;' is legal, but it will
+ // never type-check. It's better to diagnose it as an ill-formed expression
+ // than as an ill-formed deleted non-function member.
+ // An initializer of '= delete p, foo' will never be parsed, because
+ // a top-level comma always ends the initializer expression.
+ const Token &Next = NextToken();
+ if (IsFunction || Next.is(tok::semi) || Next.is(tok::comma) ||
+ Next.is(tok::eof)) {
+ if (IsFunction)
+ Diag(ConsumeToken(), diag::err_default_delete_in_multiple_declaration)
+ << 1 /* delete */;
+ else
+ Diag(ConsumeToken(), diag::err_deleted_non_function);
+ return ExprResult();
+ }
+ } else if (Tok.is(tok::kw_default)) {
+ if (IsFunction)
+ Diag(Tok, diag::err_default_delete_in_multiple_declaration)
+ << 0 /* default */;
+ else
+ Diag(ConsumeToken(), diag::err_default_special_members);
+ return ExprResult();
+ }
+
+ }
+ return ParseInitializer();
+}
+
+/// ParseCXXMemberSpecification - Parse the class definition.
+///
+/// member-specification:
+/// member-declaration member-specification[opt]
+/// access-specifier ':' member-specification[opt]
+///
+void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
+ unsigned TagType, Decl *TagDecl) {
+ assert((TagType == DeclSpec::TST_struct ||
+ TagType == DeclSpec::TST_union ||
+ TagType == DeclSpec::TST_class) && "Invalid TagType!");
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, TagDecl, RecordLoc,
+ "parsing struct/union/class body");
+
+ // Determine whether this is a non-nested class. Note that local
+ // classes are *not* considered to be nested classes.
+ bool NonNestedClass = true;
+ if (!ClassStack.empty()) {
+ for (const Scope *S = getCurScope(); S; S = S->getParent()) {
+ if (S->isClassScope()) {
+ // We're inside a class scope, so this is a nested class.
+ NonNestedClass = false;
+ break;
+ }
+
+ if ((S->getFlags() & Scope::FnScope)) {
+ // If we're in a function or function template declared in the
+ // body of a class, then this is a local class rather than a
+ // nested class.
+ const Scope *Parent = S->getParent();
+ if (Parent->isTemplateParamScope())
+ Parent = Parent->getParent();
+ if (Parent->isClassScope())
+ break;
+ }
+ }
+ }
+
+ // Enter a scope for the class.
+ ParseScope ClassScope(this, Scope::ClassScope|Scope::DeclScope);
+
+ // Note that we are parsing a new (potentially-nested) class definition.
+ ParsingClassDefinition ParsingDef(*this, TagDecl, NonNestedClass);
+
+ if (TagDecl)
+ Actions.ActOnTagStartDefinition(getCurScope(), TagDecl);
+
+ SourceLocation FinalLoc;
+
+ // Parse the optional 'final' keyword.
+ if (getLangOpts().CPlusPlus && Tok.is(tok::identifier)) {
+ assert(isCXX0XFinalKeyword() && "not a class definition");
+ FinalLoc = ConsumeToken();
+
+ Diag(FinalLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_override_control_keyword :
+ diag::ext_override_control_keyword) << "final";
+ }
+
+ if (Tok.is(tok::colon)) {
+ ParseBaseClause(TagDecl);
+
+ if (!Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lbrace_after_base_specifiers);
+
+ if (TagDecl)
+ Actions.ActOnTagDefinitionError(getCurScope(), TagDecl);
+ return;
+ }
+ }
+
+ assert(Tok.is(tok::l_brace));
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+
+ if (TagDecl)
+ Actions.ActOnStartCXXMemberDeclarations(getCurScope(), TagDecl, FinalLoc,
+ T.getOpenLocation());
+
+ // C++ 11p3: Members of a class defined with the keyword class are private
+ // by default. Members of a class defined with the keywords struct or union
+ // are public by default.
+ AccessSpecifier CurAS;
+ if (TagType == DeclSpec::TST_class)
+ CurAS = AS_private;
+ else
+ CurAS = AS_public;
+ ParsedAttributes AccessAttrs(AttrFactory);
+
+ if (TagDecl) {
+ // While we still have something to read, read the member-declarations.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one member-declaration.
+
+ if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
+ Tok.is(tok::kw___if_not_exists))) {
+ ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType, CurAS);
+ continue;
+ }
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ Diag(Tok, diag::ext_extra_struct_semi)
+ << DeclSpec::getSpecifierName((DeclSpec::TST)TagType)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ continue;
+ }
+
+ if (Tok.is(tok::annot_pragma_vis)) {
+ HandlePragmaVisibility();
+ continue;
+ }
+
+ if (Tok.is(tok::annot_pragma_pack)) {
+ HandlePragmaPack();
+ continue;
+ }
+
+ AccessSpecifier AS = getAccessSpecifierIfPresent();
+ if (AS != AS_none) {
+ // Current token is a C++ access specifier.
+ CurAS = AS;
+ SourceLocation ASLoc = Tok.getLocation();
+ unsigned TokLength = Tok.getLength();
+ ConsumeToken();
+ AccessAttrs.clear();
+ MaybeParseGNUAttributes(AccessAttrs);
+
+ SourceLocation EndLoc;
+ if (Tok.is(tok::colon)) {
+ EndLoc = Tok.getLocation();
+ ConsumeToken();
+ } else if (Tok.is(tok::semi)) {
+ EndLoc = Tok.getLocation();
+ ConsumeToken();
+ Diag(EndLoc, diag::err_expected_colon)
+ << FixItHint::CreateReplacement(EndLoc, ":");
+ } else {
+ EndLoc = ASLoc.getLocWithOffset(TokLength);
+ Diag(EndLoc, diag::err_expected_colon)
+ << FixItHint::CreateInsertion(EndLoc, ":");
+ }
+
+ if (Actions.ActOnAccessSpecifier(AS, ASLoc, EndLoc,
+ AccessAttrs.getList())) {
+ // found another attribute than only annotations
+ AccessAttrs.clear();
+ }
+
+ continue;
+ }
+
+ // FIXME: Make sure we don't have a template here.
+
+ // Parse all the comma separated declarators.
+ ParseCXXClassMemberDeclaration(CurAS, AccessAttrs.getList());
+ }
+
+ T.consumeClose();
+ } else {
+ SkipUntil(tok::r_brace, false, false);
+ }
+
+ // If attributes exist after class contents, parse them.
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseGNUAttributes(attrs);
+
+ if (TagDecl)
+ Actions.ActOnFinishCXXMemberSpecification(getCurScope(), RecordLoc, TagDecl,
+ T.getOpenLocation(),
+ T.getCloseLocation(),
+ attrs.getList());
+
+ // C++0x [class.mem]p2: Within the class member-specification, the class is
+ // regarded as complete within function bodies, default arguments, exception-
+ // specifications, and brace-or-equal-initializers for non-static data
+ // members (including such things in nested classes).
+ //
+ // FIXME: Only function bodies and brace-or-equal-initializers are currently
+ // handled. Fix the others!
+ if (TagDecl && NonNestedClass) {
+ // We are not inside a nested class. This class and its nested classes
+ // are complete and we can parse the delayed portions of method
+ // declarations and the lexed inline method definitions, along with any
+ // delayed attributes.
+ SourceLocation SavedPrevTokLocation = PrevTokLocation;
+ ParseLexedAttributes(getCurrentClass());
+ ParseLexedMethodDeclarations(getCurrentClass());
+ ParseLexedMemberInitializers(getCurrentClass());
+ ParseLexedMethodDefs(getCurrentClass());
+ PrevTokLocation = SavedPrevTokLocation;
+ }
+
+ if (TagDecl)
+ Actions.ActOnTagFinishDefinition(getCurScope(), TagDecl,
+ T.getCloseLocation());
+
+ // Leave the class scope.
+ ParsingDef.Pop();
+ ClassScope.Exit();
+}
+
+/// ParseConstructorInitializer - Parse a C++ constructor initializer,
+/// which explicitly initializes the members or base classes of a
+/// class (C++ [class.base.init]). For example, the three initializers
+/// after the ':' in the Derived constructor below:
+///
+/// @code
+/// class Base { };
+/// class Derived : Base {
+/// int x;
+/// float f;
+/// public:
+/// Derived(float f) : Base(), x(17), f(f) { }
+/// };
+/// @endcode
+///
+/// [C++] ctor-initializer:
+/// ':' mem-initializer-list
+///
+/// [C++] mem-initializer-list:
+/// mem-initializer ...[opt]
+/// mem-initializer ...[opt] , mem-initializer-list
+void Parser::ParseConstructorInitializer(Decl *ConstructorDecl) {
+ assert(Tok.is(tok::colon) && "Constructor initializer always starts with ':'");
+
+ // Poison the SEH identifiers so they are flagged as illegal in constructor initializers
+ PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
+ SourceLocation ColonLoc = ConsumeToken();
+
+ SmallVector<CXXCtorInitializer*, 4> MemInitializers;
+ bool AnyErrors = false;
+
+ do {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteConstructorInitializer(ConstructorDecl,
+ MemInitializers.data(),
+ MemInitializers.size());
+ return cutOffParsing();
+ } else {
+ MemInitResult MemInit = ParseMemInitializer(ConstructorDecl);
+ if (!MemInit.isInvalid())
+ MemInitializers.push_back(MemInit.get());
+ else
+ AnyErrors = true;
+ }
+
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ else if (Tok.is(tok::l_brace))
+ break;
+ // If the next token looks like a base or member initializer, assume that
+ // we're just missing a comma.
+ else if (Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) {
+ SourceLocation Loc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(Loc, diag::err_ctor_init_missing_comma)
+ << FixItHint::CreateInsertion(Loc, ", ");
+ } else {
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ Diag(Tok.getLocation(), diag::err_expected_lbrace_or_comma);
+ SkipUntil(tok::l_brace, true, true);
+ break;
+ }
+ } while (true);
+
+ Actions.ActOnMemInitializers(ConstructorDecl, ColonLoc,
+ MemInitializers.data(), MemInitializers.size(),
+ AnyErrors);
+}
+
+/// ParseMemInitializer - Parse a C++ member initializer, which is
+/// part of a constructor initializer that explicitly initializes one
+/// member or base class (C++ [class.base.init]). See
+/// ParseConstructorInitializer for an example.
+///
+/// [C++] mem-initializer:
+/// mem-initializer-id '(' expression-list[opt] ')'
+/// [C++0x] mem-initializer-id braced-init-list
+///
+/// [C++] mem-initializer-id:
+/// '::'[opt] nested-name-specifier[opt] class-name
+/// identifier
+Parser::MemInitResult Parser::ParseMemInitializer(Decl *ConstructorDecl) {
+ // parse '::'[opt] nested-name-specifier[opt]
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+ ParsedType TemplateTypeTy;
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Type_template ||
+ TemplateId->Kind == TNK_Dependent_template_name) {
+ AnnotateTemplateIdTokenAsType();
+ assert(Tok.is(tok::annot_typename) && "template-id -> type failed");
+ TemplateTypeTy = getTypeAnnotation(Tok);
+ }
+ }
+ // Uses of decltype will already have been converted to annot_decltype by
+ // ParseOptionalCXXScopeSpecifier at this point.
+ if (!TemplateTypeTy && Tok.isNot(tok::identifier)
+ && Tok.isNot(tok::annot_decltype)) {
+ Diag(Tok, diag::err_expected_member_or_base_name);
+ return true;
+ }
+
+ IdentifierInfo *II = 0;
+ DeclSpec DS(AttrFactory);
+ SourceLocation IdLoc = Tok.getLocation();
+ if (Tok.is(tok::annot_decltype)) {
+ // Get the decltype expression, if there is one.
+ ParseDecltypeSpecifier(DS);
+ } else {
+ if (Tok.is(tok::identifier))
+ // Get the identifier. This may be a member name or a class name,
+ // but we'll let the semantic analysis determine which it is.
+ II = Tok.getIdentifierInfo();
+ ConsumeToken();
+ }
+
+
+ // Parse the '('.
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
+ ExprResult InitList = ParseBraceInitializer();
+ if (InitList.isInvalid())
+ return true;
+
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
+ return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
+ TemplateTypeTy, DS, IdLoc,
+ InitList.take(), EllipsisLoc);
+ } else if(Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ // Parse the optional expression-list.
+ ExprVector ArgExprs(Actions);
+ CommaLocsTy CommaLocs;
+ if (Tok.isNot(tok::r_paren) && ParseExpressionList(ArgExprs, CommaLocs)) {
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+
+ T.consumeClose();
+
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
+ return Actions.ActOnMemInitializer(ConstructorDecl, getCurScope(), SS, II,
+ TemplateTypeTy, DS, IdLoc,
+ T.getOpenLocation(), ArgExprs.take(),
+ ArgExprs.size(), T.getCloseLocation(),
+ EllipsisLoc);
+ }
+
+ Diag(Tok, getLangOpts().CPlusPlus0x ? diag::err_expected_lparen_or_lbrace
+ : diag::err_expected_lparen);
+ return true;
+}
+
+/// \brief Parse a C++ exception-specification if present (C++0x [except.spec]).
+///
+/// exception-specification:
+/// dynamic-exception-specification
+/// noexcept-specification
+///
+/// noexcept-specification:
+/// 'noexcept'
+/// 'noexcept' '(' constant-expression ')'
+ExceptionSpecificationType
+Parser::MaybeParseExceptionSpecification(SourceRange &SpecificationRange,
+ SmallVectorImpl<ParsedType> &DynamicExceptions,
+ SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
+ ExprResult &NoexceptExpr) {
+ ExceptionSpecificationType Result = EST_None;
+
+ // See if there's a dynamic specification.
+ if (Tok.is(tok::kw_throw)) {
+ Result = ParseDynamicExceptionSpecification(SpecificationRange,
+ DynamicExceptions,
+ DynamicExceptionRanges);
+ assert(DynamicExceptions.size() == DynamicExceptionRanges.size() &&
+ "Produced different number of exception types and ranges.");
+ }
+
+ // If there's no noexcept specification, we're done.
+ if (Tok.isNot(tok::kw_noexcept))
+ return Result;
+
+ Diag(Tok, diag::warn_cxx98_compat_noexcept_decl);
+
+ // If we already had a dynamic specification, parse the noexcept for,
+ // recovery, but emit a diagnostic and don't store the results.
+ SourceRange NoexceptRange;
+ ExceptionSpecificationType NoexceptType = EST_None;
+
+ SourceLocation KeywordLoc = ConsumeToken();
+ if (Tok.is(tok::l_paren)) {
+ // There is an argument.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ NoexceptType = EST_ComputedNoexcept;
+ NoexceptExpr = ParseConstantExpression();
+ // The argument must be contextually convertible to bool. We use
+ // ActOnBooleanCondition for this purpose.
+ if (!NoexceptExpr.isInvalid())
+ NoexceptExpr = Actions.ActOnBooleanCondition(getCurScope(), KeywordLoc,
+ NoexceptExpr.get());
+ T.consumeClose();
+ NoexceptRange = SourceRange(KeywordLoc, T.getCloseLocation());
+ } else {
+ // There is no argument.
+ NoexceptType = EST_BasicNoexcept;
+ NoexceptRange = SourceRange(KeywordLoc, KeywordLoc);
+ }
+
+ if (Result == EST_None) {
+ SpecificationRange = NoexceptRange;
+ Result = NoexceptType;
+
+ // If there's a dynamic specification after a noexcept specification,
+ // parse that and ignore the results.
+ if (Tok.is(tok::kw_throw)) {
+ Diag(Tok.getLocation(), diag::err_dynamic_and_noexcept_specification);
+ ParseDynamicExceptionSpecification(NoexceptRange, DynamicExceptions,
+ DynamicExceptionRanges);
+ }
+ } else {
+ Diag(Tok.getLocation(), diag::err_dynamic_and_noexcept_specification);
+ }
+
+ return Result;
+}
+
+/// ParseDynamicExceptionSpecification - Parse a C++
+/// dynamic-exception-specification (C++ [except.spec]).
+///
+/// dynamic-exception-specification:
+/// 'throw' '(' type-id-list [opt] ')'
+/// [MS] 'throw' '(' '...' ')'
+///
+/// type-id-list:
+/// type-id ... [opt]
+/// type-id-list ',' type-id ... [opt]
+///
+ExceptionSpecificationType Parser::ParseDynamicExceptionSpecification(
+ SourceRange &SpecificationRange,
+ SmallVectorImpl<ParsedType> &Exceptions,
+ SmallVectorImpl<SourceRange> &Ranges) {
+ assert(Tok.is(tok::kw_throw) && "expected throw");
+
+ SpecificationRange.setBegin(ConsumeToken());
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lparen_after) << "throw";
+ SpecificationRange.setEnd(SpecificationRange.getBegin());
+ return EST_DynamicNone;
+ }
+
+ // Parse throw(...), a Microsoft extension that means "this function
+ // can throw anything".
+ if (Tok.is(tok::ellipsis)) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ if (!getLangOpts().MicrosoftExt)
+ Diag(EllipsisLoc, diag::ext_ellipsis_exception_spec);
+ T.consumeClose();
+ SpecificationRange.setEnd(T.getCloseLocation());
+ return EST_MSAny;
+ }
+
+ // Parse the sequence of type-ids.
+ SourceRange Range;
+ while (Tok.isNot(tok::r_paren)) {
+ TypeResult Res(ParseTypeName(&Range));
+
+ if (Tok.is(tok::ellipsis)) {
+ // C++0x [temp.variadic]p5:
+ // - In a dynamic-exception-specification (15.4); the pattern is a
+ // type-id.
+ SourceLocation Ellipsis = ConsumeToken();
+ Range.setEnd(Ellipsis);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnPackExpansion(Res.get(), Ellipsis);
+ }
+
+ if (!Res.isInvalid()) {
+ Exceptions.push_back(Res.get());
+ Ranges.push_back(Range);
+ }
+
+ if (Tok.is(tok::comma))
+ ConsumeToken();
+ else
+ break;
+ }
+
+ T.consumeClose();
+ SpecificationRange.setEnd(T.getCloseLocation());
+ return Exceptions.empty() ? EST_DynamicNone : EST_Dynamic;
+}
+
+/// ParseTrailingReturnType - Parse a trailing return type on a new-style
+/// function declaration.
+TypeResult Parser::ParseTrailingReturnType(SourceRange &Range) {
+ assert(Tok.is(tok::arrow) && "expected arrow");
+
+ ConsumeToken();
+
+ return ParseTypeName(&Range, Declarator::TrailingReturnContext);
+}
+
+/// \brief We have just started parsing the definition of a new class,
+/// so push that class onto our stack of classes that is currently
+/// being parsed.
+Sema::ParsingClassState
+Parser::PushParsingClass(Decl *ClassDecl, bool NonNestedClass) {
+ assert((NonNestedClass || !ClassStack.empty()) &&
+ "Nested class without outer class");
+ ClassStack.push(new ParsingClass(ClassDecl, NonNestedClass));
+ return Actions.PushParsingClass();
+}
+
+/// \brief Deallocate the given parsed class and all of its nested
+/// classes.
+void Parser::DeallocateParsedClasses(Parser::ParsingClass *Class) {
+ for (unsigned I = 0, N = Class->LateParsedDeclarations.size(); I != N; ++I)
+ delete Class->LateParsedDeclarations[I];
+ delete Class;
+}
+
+/// \brief Pop the top class of the stack of classes that are
+/// currently being parsed.
+///
+/// This routine should be called when we have finished parsing the
+/// definition of a class, but have not yet popped the Scope
+/// associated with the class's definition.
+///
+/// \returns true if the class we've popped is a top-level class,
+/// false otherwise.
+void Parser::PopParsingClass(Sema::ParsingClassState state) {
+ assert(!ClassStack.empty() && "Mismatched push/pop for class parsing");
+
+ Actions.PopParsingClass(state);
+
+ ParsingClass *Victim = ClassStack.top();
+ ClassStack.pop();
+ if (Victim->TopLevelClass) {
+ // Deallocate all of the nested classes of this class,
+ // recursively: we don't need to keep any of this information.
+ DeallocateParsedClasses(Victim);
+ return;
+ }
+ assert(!ClassStack.empty() && "Missing top-level class?");
+
+ if (Victim->LateParsedDeclarations.empty()) {
+ // The victim is a nested class, but we will not need to perform
+ // any processing after the definition of this class since it has
+ // no members whose handling was delayed. Therefore, we can just
+ // remove this nested class.
+ DeallocateParsedClasses(Victim);
+ return;
+ }
+
+ // This nested class has some members that will need to be processed
+ // after the top-level class is completely defined. Therefore, add
+ // it to the list of nested classes within its parent.
+ assert(getCurScope()->isClassScope() && "Nested class outside of class scope?");
+ ClassStack.top()->LateParsedDeclarations.push_back(new LateParsedClass(this, Victim));
+ Victim->TemplateScope = getCurScope()->getParent()->isTemplateParamScope();
+}
+
+/// \brief Try to parse an 'identifier' which appears within an attribute-token.
+///
+/// \return the parsed identifier on success, and 0 if the next token is not an
+/// attribute-token.
+///
+/// C++11 [dcl.attr.grammar]p3:
+/// If a keyword or an alternative token that satisfies the syntactic
+/// requirements of an identifier is contained in an attribute-token,
+/// it is considered an identifier.
+IdentifierInfo *Parser::TryParseCXX11AttributeIdentifier(SourceLocation &Loc) {
+ switch (Tok.getKind()) {
+ default:
+ // Identifiers and keywords have identifier info attached.
+ if (IdentifierInfo *II = Tok.getIdentifierInfo()) {
+ Loc = ConsumeToken();
+ return II;
+ }
+ return 0;
+
+ case tok::ampamp: // 'and'
+ case tok::pipe: // 'bitor'
+ case tok::pipepipe: // 'or'
+ case tok::caret: // 'xor'
+ case tok::tilde: // 'compl'
+ case tok::amp: // 'bitand'
+ case tok::ampequal: // 'and_eq'
+ case tok::pipeequal: // 'or_eq'
+ case tok::caretequal: // 'xor_eq'
+ case tok::exclaim: // 'not'
+ case tok::exclaimequal: // 'not_eq'
+ // Alternative tokens do not have identifier info, but their spelling
+ // starts with an alphabetical character.
+ llvm::SmallString<8> SpellingBuf;
+ StringRef Spelling = PP.getSpelling(Tok.getLocation(), SpellingBuf);
+ if (std::isalpha(Spelling[0])) {
+ Loc = ConsumeToken();
+ return &PP.getIdentifierTable().get(Spelling.data());
+ }
+ return 0;
+ }
+}
+
+/// ParseCXX11AttributeSpecifier - Parse a C++11 attribute-specifier. Currently
+/// only parses standard attributes.
+///
+/// [C++11] attribute-specifier:
+/// '[' '[' attribute-list ']' ']'
+/// alignment-specifier
+///
+/// [C++11] attribute-list:
+/// attribute[opt]
+/// attribute-list ',' attribute[opt]
+/// attribute '...'
+/// attribute-list ',' attribute '...'
+///
+/// [C++11] attribute:
+/// attribute-token attribute-argument-clause[opt]
+///
+/// [C++11] attribute-token:
+/// identifier
+/// attribute-scoped-token
+///
+/// [C++11] attribute-scoped-token:
+/// attribute-namespace '::' identifier
+///
+/// [C++11] attribute-namespace:
+/// identifier
+///
+/// [C++11] attribute-argument-clause:
+/// '(' balanced-token-seq ')'
+///
+/// [C++11] balanced-token-seq:
+/// balanced-token
+/// balanced-token-seq balanced-token
+///
+/// [C++11] balanced-token:
+/// '(' balanced-token-seq ')'
+/// '[' balanced-token-seq ']'
+/// '{' balanced-token-seq '}'
+/// any token but '(', ')', '[', ']', '{', or '}'
+void Parser::ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
+ SourceLocation *endLoc) {
+ if (Tok.is(tok::kw_alignas)) {
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_alignas);
+ ParseAlignmentSpecifier(attrs, endLoc);
+ return;
+ }
+
+ assert(Tok.is(tok::l_square) && NextToken().is(tok::l_square)
+ && "Not a C++11 attribute list");
+
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_attribute);
+
+ ConsumeBracket();
+ ConsumeBracket();
+
+ while (Tok.isNot(tok::r_square)) {
+ // attribute not present
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ continue;
+ }
+
+ SourceLocation ScopeLoc, AttrLoc;
+ IdentifierInfo *ScopeName = 0, *AttrName = 0;
+
+ AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
+ if (!AttrName)
+ // Break out to the "expected ']'" diagnostic.
+ break;
+
+ // scoped attribute
+ if (Tok.is(tok::coloncolon)) {
+ ConsumeToken();
+
+ ScopeName = AttrName;
+ ScopeLoc = AttrLoc;
+
+ AttrName = TryParseCXX11AttributeIdentifier(AttrLoc);
+ if (!AttrName) {
+ Diag(Tok.getLocation(), diag::err_expected_ident);
+ SkipUntil(tok::r_square, tok::comma, true, true);
+ continue;
+ }
+ }
+
+ bool AttrParsed = false;
+ // No scoped names are supported; ideally we could put all non-standard
+ // attributes into namespaces.
+ if (!ScopeName) {
+ switch (AttributeList::getKind(AttrName)) {
+ // No arguments
+ case AttributeList::AT_carries_dependency:
+ case AttributeList::AT_noreturn: {
+ if (Tok.is(tok::l_paren)) {
+ Diag(Tok.getLocation(), diag::err_cxx11_attribute_forbids_arguments)
+ << AttrName->getName();
+ break;
+ }
+
+ attrs.addNew(AttrName, AttrLoc, 0, AttrLoc, 0,
+ SourceLocation(), 0, 0, false, true);
+ AttrParsed = true;
+ break;
+ }
+
+ // Silence warnings
+ default: break;
+ }
+ }
+
+ // Skip the entire parameter clause, if any
+ if (!AttrParsed && Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ // SkipUntil maintains the balancedness of tokens.
+ SkipUntil(tok::r_paren, false);
+ }
+
+ if (Tok.is(tok::ellipsis)) {
+ if (AttrParsed)
+ Diag(Tok, diag::err_cxx11_attribute_forbids_ellipsis)
+ << AttrName->getName();
+ ConsumeToken();
+ }
+ }
+
+ if (ExpectAndConsume(tok::r_square, diag::err_expected_rsquare))
+ SkipUntil(tok::r_square, false);
+ if (endLoc)
+ *endLoc = Tok.getLocation();
+ if (ExpectAndConsume(tok::r_square, diag::err_expected_rsquare))
+ SkipUntil(tok::r_square, false);
+}
+
+/// ParseCXX11Attributes - Parse a C++0x attribute-specifier-seq.
+///
+/// attribute-specifier-seq:
+/// attribute-specifier-seq[opt] attribute-specifier
+void Parser::ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
+ SourceLocation *endLoc) {
+ SourceLocation StartLoc = Tok.getLocation(), Loc;
+ if (!endLoc)
+ endLoc = &Loc;
+
+ do {
+ ParseCXX11AttributeSpecifier(attrs, endLoc);
+ } while (isCXX11AttributeSpecifier());
+
+ attrs.Range = SourceRange(StartLoc, *endLoc);
+}
+
+/// ParseMicrosoftAttributes - Parse a Microsoft attribute [Attr]
+///
+/// [MS] ms-attribute:
+/// '[' token-seq ']'
+///
+/// [MS] ms-attribute-seq:
+/// ms-attribute[opt]
+/// ms-attribute ms-attribute-seq
+void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
+ SourceLocation *endLoc) {
+ assert(Tok.is(tok::l_square) && "Not a Microsoft attribute list");
+
+ while (Tok.is(tok::l_square)) {
+ // FIXME: If this is actually a C++11 attribute, parse it as one.
+ ConsumeBracket();
+ SkipUntil(tok::r_square, true, true);
+ if (endLoc) *endLoc = Tok.getLocation();
+ ExpectAndConsume(tok::r_square, diag::err_expected_rsquare);
+ }
+}
+
+void Parser::ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
+ AccessSpecifier& CurAS) {
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return;
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the declarations below.
+ break;
+
+ case IEB_Dependent:
+ Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
+ << Result.IsIfExists;
+ // Fall through to skip.
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return;
+ }
+
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // __if_exists, __if_not_exists can nest.
+ if ((Tok.is(tok::kw___if_exists) || Tok.is(tok::kw___if_not_exists))) {
+ ParseMicrosoftIfExistsClassDeclaration((DeclSpec::TST)TagType, CurAS);
+ continue;
+ }
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ Diag(Tok, diag::ext_extra_struct_semi)
+ << DeclSpec::getSpecifierName((DeclSpec::TST)TagType)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ continue;
+ }
+
+ AccessSpecifier AS = getAccessSpecifierIfPresent();
+ if (AS != AS_none) {
+ // Current token is a C++ access specifier.
+ CurAS = AS;
+ SourceLocation ASLoc = Tok.getLocation();
+ ConsumeToken();
+ if (Tok.is(tok::colon))
+ Actions.ActOnAccessSpecifier(AS, ASLoc, Tok.getLocation());
+ else
+ Diag(Tok, diag::err_expected_colon);
+ ConsumeToken();
+ continue;
+ }
+
+ // Parse all the comma separated declarators.
+ ParseCXXClassMemberDeclaration(CurAS, 0);
+ }
+
+ Braces.consumeClose();
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
new file mode 100644
index 0000000..7f3a815
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExpr.cpp
@@ -0,0 +1,2433 @@
+//===--- ParseExpr.cpp - Expression Parsing -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expression parsing implementation. Expressions in
+// C99 basically consist of a bunch of binary operators with unary operators and
+// other random stuff at the leaves.
+//
+// In the C99 grammar, these unary operators bind tightest and are represented
+// as the 'cast-expression' production. Everything else is either a binary
+// operator (e.g. '/') or a ternary operator ("?:"). The unary leaves are
+// handled by ParseCastExpression, the higher level pieces are handled by
+// ParseBinaryExpression.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/TypoCorrection.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "RAIIObjectsForParser.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+/// getBinOpPrecedence - Return the precedence of the specified binary operator
+/// token.
+static prec::Level getBinOpPrecedence(tok::TokenKind Kind,
+ bool GreaterThanIsOperator,
+ bool CPlusPlus0x) {
+ switch (Kind) {
+ case tok::greater:
+ // C++ [temp.names]p3:
+ // [...] When parsing a template-argument-list, the first
+ // non-nested > is taken as the ending delimiter rather than a
+ // greater-than operator. [...]
+ if (GreaterThanIsOperator)
+ return prec::Relational;
+ return prec::Unknown;
+
+ case tok::greatergreater:
+ // C++0x [temp.names]p3:
+ //
+ // [...] Similarly, the first non-nested >> is treated as two
+ // consecutive but distinct > tokens, the first of which is
+ // taken as the end of the template-argument-list and completes
+ // the template-id. [...]
+ if (GreaterThanIsOperator || !CPlusPlus0x)
+ return prec::Shift;
+ return prec::Unknown;
+
+ default: return prec::Unknown;
+ case tok::comma: return prec::Comma;
+ case tok::equal:
+ case tok::starequal:
+ case tok::slashequal:
+ case tok::percentequal:
+ case tok::plusequal:
+ case tok::minusequal:
+ case tok::lesslessequal:
+ case tok::greatergreaterequal:
+ case tok::ampequal:
+ case tok::caretequal:
+ case tok::pipeequal: return prec::Assignment;
+ case tok::question: return prec::Conditional;
+ case tok::pipepipe: return prec::LogicalOr;
+ case tok::ampamp: return prec::LogicalAnd;
+ case tok::pipe: return prec::InclusiveOr;
+ case tok::caret: return prec::ExclusiveOr;
+ case tok::amp: return prec::And;
+ case tok::exclaimequal:
+ case tok::equalequal: return prec::Equality;
+ case tok::lessequal:
+ case tok::less:
+ case tok::greaterequal: return prec::Relational;
+ case tok::lessless: return prec::Shift;
+ case tok::plus:
+ case tok::minus: return prec::Additive;
+ case tok::percent:
+ case tok::slash:
+ case tok::star: return prec::Multiplicative;
+ case tok::periodstar:
+ case tok::arrowstar: return prec::PointerToMember;
+ }
+}
+
+
+/// ParseExpression - Simple precedence-based parser for binary/ternary
+/// operators.
+///
+/// Note: we diverge from the C99 grammar when parsing the assignment-expression
+/// production. C99 specifies that the LHS of an assignment operator should be
+/// parsed as a unary-expression, but consistency dictates that it be a
+/// conditional-expession. In practice, the important thing here is that the
+/// LHS of an assignment has to be an l-value, which productions between
+/// unary-expression and conditional-expression don't produce. Because we want
+/// consistency, we parse the LHS as a conditional-expression, then check for
+/// l-value-ness in semantic analysis stages.
+///
+/// pm-expression: [C++ 5.5]
+/// cast-expression
+/// pm-expression '.*' cast-expression
+/// pm-expression '->*' cast-expression
+///
+/// multiplicative-expression: [C99 6.5.5]
+/// Note: in C++, apply pm-expression instead of cast-expression
+/// cast-expression
+/// multiplicative-expression '*' cast-expression
+/// multiplicative-expression '/' cast-expression
+/// multiplicative-expression '%' cast-expression
+///
+/// additive-expression: [C99 6.5.6]
+/// multiplicative-expression
+/// additive-expression '+' multiplicative-expression
+/// additive-expression '-' multiplicative-expression
+///
+/// shift-expression: [C99 6.5.7]
+/// additive-expression
+/// shift-expression '<<' additive-expression
+/// shift-expression '>>' additive-expression
+///
+/// relational-expression: [C99 6.5.8]
+/// shift-expression
+/// relational-expression '<' shift-expression
+/// relational-expression '>' shift-expression
+/// relational-expression '<=' shift-expression
+/// relational-expression '>=' shift-expression
+///
+/// equality-expression: [C99 6.5.9]
+/// relational-expression
+/// equality-expression '==' relational-expression
+/// equality-expression '!=' relational-expression
+///
+/// AND-expression: [C99 6.5.10]
+/// equality-expression
+/// AND-expression '&' equality-expression
+///
+/// exclusive-OR-expression: [C99 6.5.11]
+/// AND-expression
+/// exclusive-OR-expression '^' AND-expression
+///
+/// inclusive-OR-expression: [C99 6.5.12]
+/// exclusive-OR-expression
+/// inclusive-OR-expression '|' exclusive-OR-expression
+///
+/// logical-AND-expression: [C99 6.5.13]
+/// inclusive-OR-expression
+/// logical-AND-expression '&&' inclusive-OR-expression
+///
+/// logical-OR-expression: [C99 6.5.14]
+/// logical-AND-expression
+/// logical-OR-expression '||' logical-AND-expression
+///
+/// conditional-expression: [C99 6.5.15]
+/// logical-OR-expression
+/// logical-OR-expression '?' expression ':' conditional-expression
+/// [GNU] logical-OR-expression '?' ':' conditional-expression
+/// [C++] the third operand is an assignment-expression
+///
+/// assignment-expression: [C99 6.5.16]
+/// conditional-expression
+/// unary-expression assignment-operator assignment-expression
+/// [C++] throw-expression [C++ 15]
+///
+/// assignment-operator: one of
+/// = *= /= %= += -= <<= >>= &= ^= |=
+///
+/// expression: [C99 6.5.17]
+/// assignment-expression ...[opt]
+/// expression ',' assignment-expression ...[opt]
+ExprResult Parser::ParseExpression(TypeCastState isTypeCast) {
+ ExprResult LHS(ParseAssignmentExpression(isTypeCast));
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+}
+
+/// This routine is called when the '@' is seen and consumed.
+/// Current token is an Identifier and is not a 'try'. This
+/// routine is necessary to disambiguate @try-statement from,
+/// for example, @encode-expression.
+///
+ExprResult
+Parser::ParseExpressionWithLeadingAt(SourceLocation AtLoc) {
+ ExprResult LHS(ParseObjCAtExpression(AtLoc));
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+}
+
+/// This routine is called when a leading '__extension__' is seen and
+/// consumed. This is necessary because the token gets consumed in the
+/// process of disambiguating between an expression and a declaration.
+ExprResult
+Parser::ParseExpressionWithLeadingExtension(SourceLocation ExtLoc) {
+ ExprResult LHS(true);
+ {
+ // Silence extension warnings in the sub-expression
+ ExtensionRAIIObject O(Diags);
+
+ LHS = ParseCastExpression(false);
+ }
+
+ if (!LHS.isInvalid())
+ LHS = Actions.ActOnUnaryOp(getCurScope(), ExtLoc, tok::kw___extension__,
+ LHS.take());
+
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Comma);
+}
+
+/// ParseAssignmentExpression - Parse an expr that doesn't include commas.
+ExprResult Parser::ParseAssignmentExpression(TypeCastState isTypeCast) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ if (Tok.is(tok::kw_throw))
+ return ParseThrowExpression();
+
+ ExprResult LHS = ParseCastExpression(/*isUnaryExpression=*/false,
+ /*isAddressOfOperand=*/false,
+ isTypeCast);
+ return ParseRHSOfBinaryExpression(move(LHS), prec::Assignment);
+}
+
+/// ParseAssignmentExprWithObjCMessageExprStart - Parse an assignment expression
+/// where part of an objc message send has already been parsed. In this case
+/// LBracLoc indicates the location of the '[' of the message send, and either
+/// ReceiverName or ReceiverExpr is non-null indicating the receiver of the
+/// message.
+///
+/// Since this handles full assignment-expression's, it handles postfix
+/// expressions and other binary operators for these expressions as well.
+ExprResult
+Parser::ParseAssignmentExprWithObjCMessageExprStart(SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ ParsedType ReceiverType,
+ Expr *ReceiverExpr) {
+ ExprResult R
+ = ParseObjCMessageExpressionBody(LBracLoc, SuperLoc,
+ ReceiverType, ReceiverExpr);
+ R = ParsePostfixExpressionSuffix(R);
+ return ParseRHSOfBinaryExpression(R, prec::Assignment);
+}
+
+
+ExprResult Parser::ParseConstantExpression(TypeCastState isTypeCast) {
+ // C++03 [basic.def.odr]p2:
+ // An expression is potentially evaluated unless it appears where an
+ // integral constant expression is required (see 5.19) [...].
+ // C++98 and C++11 have no such rule, but this is only a defect in C++98.
+ EnterExpressionEvaluationContext Unevaluated(Actions,
+ Sema::ConstantEvaluated);
+
+ ExprResult LHS(ParseCastExpression(false, false, isTypeCast));
+ ExprResult Res(ParseRHSOfBinaryExpression(LHS, prec::Conditional));
+ return Actions.ActOnConstantExpression(Res);
+}
+
+/// ParseRHSOfBinaryExpression - Parse a binary expression that starts with
+/// LHS and has a precedence of at least MinPrec.
+ExprResult
+Parser::ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec) {
+ prec::Level NextTokPrec = getBinOpPrecedence(Tok.getKind(),
+ GreaterThanIsOperator,
+ getLangOpts().CPlusPlus0x);
+ SourceLocation ColonLoc;
+
+ while (1) {
+ // If this token has a lower precedence than we are allowed to parse (e.g.
+ // because we are called recursively, or because the token is not a binop),
+ // then we are done!
+ if (NextTokPrec < MinPrec)
+ return move(LHS);
+
+ // Consume the operator, saving the operator token for error reporting.
+ Token OpToken = Tok;
+ ConsumeToken();
+
+ // Special case handling for the ternary operator.
+ ExprResult TernaryMiddle(true);
+ if (NextTokPrec == prec::Conditional) {
+ if (Tok.isNot(tok::colon)) {
+ // Don't parse FOO:BAR as if it were a typo for FOO::BAR.
+ ColonProtectionRAIIObject X(*this);
+
+ // Handle this production specially:
+ // logical-OR-expression '?' expression ':' conditional-expression
+ // In particular, the RHS of the '?' is 'expression', not
+ // 'logical-OR-expression' as we might expect.
+ TernaryMiddle = ParseExpression();
+ if (TernaryMiddle.isInvalid()) {
+ LHS = ExprError();
+ TernaryMiddle = 0;
+ }
+ } else {
+ // Special case handling of "X ? Y : Z" where Y is empty:
+ // logical-OR-expression '?' ':' conditional-expression [GNU]
+ TernaryMiddle = 0;
+ Diag(Tok, diag::ext_gnu_conditional_expr);
+ }
+
+ if (Tok.is(tok::colon)) {
+ // Eat the colon.
+ ColonLoc = ConsumeToken();
+ } else {
+ // Otherwise, we're missing a ':'. Assume that this was a typo that
+ // the user forgot. If we're not in a macro expansion, we can suggest
+ // a fixit hint. If there were two spaces before the current token,
+ // suggest inserting the colon in between them, otherwise insert ": ".
+ SourceLocation FILoc = Tok.getLocation();
+ const char *FIText = ": ";
+ const SourceManager &SM = PP.getSourceManager();
+ if (FILoc.isFileID() || PP.isAtStartOfMacroExpansion(FILoc, &FILoc)) {
+ assert(FILoc.isFileID());
+ bool IsInvalid = false;
+ const char *SourcePtr =
+ SM.getCharacterData(FILoc.getLocWithOffset(-1), &IsInvalid);
+ if (!IsInvalid && *SourcePtr == ' ') {
+ SourcePtr =
+ SM.getCharacterData(FILoc.getLocWithOffset(-2), &IsInvalid);
+ if (!IsInvalid && *SourcePtr == ' ') {
+ FILoc = FILoc.getLocWithOffset(-1);
+ FIText = ":";
+ }
+ }
+ }
+
+ Diag(Tok, diag::err_expected_colon)
+ << FixItHint::CreateInsertion(FILoc, FIText);
+ Diag(OpToken, diag::note_matching) << "?";
+ ColonLoc = Tok.getLocation();
+ }
+ }
+
+ // Code completion for the right-hand side of an assignment expression
+ // goes through a special hook that takes the left-hand side into account.
+ if (Tok.is(tok::code_completion) && NextTokPrec == prec::Assignment) {
+ Actions.CodeCompleteAssignmentRHS(getCurScope(), LHS.get());
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Parse another leaf here for the RHS of the operator.
+ // ParseCastExpression works here because all RHS expressions in C have it
+ // as a prefix, at least. However, in C++, an assignment-expression could
+ // be a throw-expression, which is not a valid cast-expression.
+ // Therefore we need some special-casing here.
+ // Also note that the third operand of the conditional operator is
+ // an assignment-expression in C++, and in C++11, we can have a
+ // braced-init-list on the RHS of an assignment. For better diagnostics,
+ // parse as if we were allowed braced-init-lists everywhere, and check that
+ // they only appear on the RHS of assignments later.
+ ExprResult RHS;
+ bool RHSIsInitList = false;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ RHS = ParseBraceInitializer();
+ RHSIsInitList = true;
+ } else if (getLangOpts().CPlusPlus && NextTokPrec <= prec::Conditional)
+ RHS = ParseAssignmentExpression();
+ else
+ RHS = ParseCastExpression(false);
+
+ if (RHS.isInvalid())
+ LHS = ExprError();
+
+ // Remember the precedence of this operator and get the precedence of the
+ // operator immediately to the right of the RHS.
+ prec::Level ThisPrec = NextTokPrec;
+ NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
+ getLangOpts().CPlusPlus0x);
+
+ // Assignment and conditional expressions are right-associative.
+ bool isRightAssoc = ThisPrec == prec::Conditional ||
+ ThisPrec == prec::Assignment;
+
+ // Get the precedence of the operator to the right of the RHS. If it binds
+ // more tightly with RHS than we do, evaluate it completely first.
+ if (ThisPrec < NextTokPrec ||
+ (ThisPrec == NextTokPrec && isRightAssoc)) {
+ if (!RHS.isInvalid() && RHSIsInitList) {
+ Diag(Tok, diag::err_init_list_bin_op)
+ << /*LHS*/0 << PP.getSpelling(Tok) << Actions.getExprRange(RHS.get());
+ RHS = ExprError();
+ }
+ // If this is left-associative, only parse things on the RHS that bind
+ // more tightly than the current operator. If it is left-associative, it
+ // is okay, to bind exactly as tightly. For example, compile A=B=C=D as
+ // A=(B=(C=D)), where each paren is a level of recursion here.
+ // The function takes ownership of the RHS.
+ RHS = ParseRHSOfBinaryExpression(RHS,
+ static_cast<prec::Level>(ThisPrec + !isRightAssoc));
+ RHSIsInitList = false;
+
+ if (RHS.isInvalid())
+ LHS = ExprError();
+
+ NextTokPrec = getBinOpPrecedence(Tok.getKind(), GreaterThanIsOperator,
+ getLangOpts().CPlusPlus0x);
+ }
+ assert(NextTokPrec <= ThisPrec && "Recursion didn't work!");
+
+ if (!RHS.isInvalid() && RHSIsInitList) {
+ if (ThisPrec == prec::Assignment) {
+ Diag(OpToken, diag::warn_cxx98_compat_generalized_initializer_lists)
+ << Actions.getExprRange(RHS.get());
+ } else {
+ Diag(OpToken, diag::err_init_list_bin_op)
+ << /*RHS*/1 << PP.getSpelling(OpToken)
+ << Actions.getExprRange(RHS.get());
+ LHS = ExprError();
+ }
+ }
+
+ if (!LHS.isInvalid()) {
+ // Combine the LHS and RHS into the LHS (e.g. build AST).
+ if (TernaryMiddle.isInvalid()) {
+ // If we're using '>>' as an operator within a template
+ // argument list (in C++98), suggest the addition of
+ // parentheses so that the code remains well-formed in C++0x.
+ if (!GreaterThanIsOperator && OpToken.is(tok::greatergreater))
+ SuggestParentheses(OpToken.getLocation(),
+ diag::warn_cxx0x_right_shift_in_template_arg,
+ SourceRange(Actions.getExprRange(LHS.get()).getBegin(),
+ Actions.getExprRange(RHS.get()).getEnd()));
+
+ LHS = Actions.ActOnBinOp(getCurScope(), OpToken.getLocation(),
+ OpToken.getKind(), LHS.take(), RHS.take());
+ } else
+ LHS = Actions.ActOnConditionalOp(OpToken.getLocation(), ColonLoc,
+ LHS.take(), TernaryMiddle.take(),
+ RHS.take());
+ }
+ }
+}
+
+/// ParseCastExpression - Parse a cast-expression, or, if isUnaryExpression is
+/// true, parse a unary-expression. isAddressOfOperand exists because an
+/// id-expression that is the operand of address-of gets special treatment
+/// due to member pointers.
+///
+ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
+ bool isAddressOfOperand,
+ TypeCastState isTypeCast) {
+ bool NotCastExpr;
+ ExprResult Res = ParseCastExpression(isUnaryExpression,
+ isAddressOfOperand,
+ NotCastExpr,
+ isTypeCast);
+ if (NotCastExpr)
+ Diag(Tok, diag::err_expected_expression);
+ return move(Res);
+}
+
+namespace {
+class CastExpressionIdValidator : public CorrectionCandidateCallback {
+ public:
+ CastExpressionIdValidator(bool AllowTypes, bool AllowNonTypes)
+ : AllowNonTypes(AllowNonTypes) {
+ WantTypeSpecifiers = AllowTypes;
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ if (!ND)
+ return candidate.isKeyword();
+
+ if (isa<TypeDecl>(ND))
+ return WantTypeSpecifiers;
+ return AllowNonTypes;
+ }
+
+ private:
+ bool AllowNonTypes;
+};
+}
+
+/// ParseCastExpression - Parse a cast-expression, or, if isUnaryExpression is
+/// true, parse a unary-expression. isAddressOfOperand exists because an
+/// id-expression that is the operand of address-of gets special treatment
+/// due to member pointers. NotCastExpr is set to true if the token is not the
+/// start of a cast-expression, and no diagnostic is emitted in this case.
+///
+/// cast-expression: [C99 6.5.4]
+/// unary-expression
+/// '(' type-name ')' cast-expression
+///
+/// unary-expression: [C99 6.5.3]
+/// postfix-expression
+/// '++' unary-expression
+/// '--' unary-expression
+/// unary-operator cast-expression
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [C++11] 'sizeof' '...' '(' identifier ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C++11] 'alignof' '(' type-id ')'
+/// [GNU] '&&' identifier
+/// [C++11] 'noexcept' '(' expression ')' [C++11 5.3.7]
+/// [C++] new-expression
+/// [C++] delete-expression
+///
+/// unary-operator: one of
+/// '&' '*' '+' '-' '~' '!'
+/// [GNU] '__extension__' '__real' '__imag'
+///
+/// primary-expression: [C99 6.5.1]
+/// [C99] identifier
+/// [C++] id-expression
+/// constant
+/// string-literal
+/// [C++] boolean-literal [C++ 2.13.5]
+/// [C++11] 'nullptr' [C++11 2.14.7]
+/// [C++11] user-defined-literal
+/// '(' expression ')'
+/// [C11] generic-selection
+/// '__func__' [C99 6.4.2.2]
+/// [GNU] '__FUNCTION__'
+/// [GNU] '__PRETTY_FUNCTION__'
+/// [GNU] '(' compound-statement ')'
+/// [GNU] '__builtin_va_arg' '(' assignment-expression ',' type-name ')'
+/// [GNU] '__builtin_offsetof' '(' type-name ',' offsetof-member-designator')'
+/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
+/// assign-expr ')'
+/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
+/// [GNU] '__null'
+/// [OBJC] '[' objc-message-expr ']'
+/// [OBJC] '@selector' '(' objc-selector-arg ')'
+/// [OBJC] '@protocol' '(' identifier ')'
+/// [OBJC] '@encode' '(' type-name ')'
+/// [OBJC] objc-string-literal
+/// [C++] simple-type-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
+/// [C++11] simple-type-specifier braced-init-list [C++11 5.2.3]
+/// [C++] typename-specifier '(' expression-list[opt] ')' [C++ 5.2.3]
+/// [C++11] typename-specifier braced-init-list [C++11 5.2.3]
+/// [C++] 'const_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'dynamic_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'reinterpret_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'static_cast' '<' type-name '>' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'typeid' '(' expression ')' [C++ 5.2p1]
+/// [C++] 'typeid' '(' type-id ')' [C++ 5.2p1]
+/// [C++] 'this' [C++ 9.3.2]
+/// [G++] unary-type-trait '(' type-id ')'
+/// [G++] binary-type-trait '(' type-id ',' type-id ')' [TODO]
+/// [EMBT] array-type-trait '(' type-id ',' integer ')'
+/// [clang] '^' block-literal
+///
+/// constant: [C99 6.4.4]
+/// integer-constant
+/// floating-constant
+/// enumeration-constant -> identifier
+/// character-constant
+///
+/// id-expression: [C++ 5.1]
+/// unqualified-id
+/// qualified-id
+///
+/// unqualified-id: [C++ 5.1]
+/// identifier
+/// operator-function-id
+/// conversion-function-id
+/// '~' class-name
+/// template-id
+///
+/// new-expression: [C++ 5.3.4]
+/// '::'[opt] 'new' new-placement[opt] new-type-id
+/// new-initializer[opt]
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// delete-expression: [C++ 5.3.5]
+/// '::'[opt] 'delete' cast-expression
+/// '::'[opt] 'delete' '[' ']' cast-expression
+///
+/// [GNU/Embarcadero] unary-type-trait:
+/// '__is_arithmetic'
+/// '__is_floating_point'
+/// '__is_integral'
+/// '__is_lvalue_expr'
+/// '__is_rvalue_expr'
+/// '__is_complete_type'
+/// '__is_void'
+/// '__is_array'
+/// '__is_function'
+/// '__is_reference'
+/// '__is_lvalue_reference'
+/// '__is_rvalue_reference'
+/// '__is_fundamental'
+/// '__is_object'
+/// '__is_scalar'
+/// '__is_compound'
+/// '__is_pointer'
+/// '__is_member_object_pointer'
+/// '__is_member_function_pointer'
+/// '__is_member_pointer'
+/// '__is_const'
+/// '__is_volatile'
+/// '__is_trivial'
+/// '__is_standard_layout'
+/// '__is_signed'
+/// '__is_unsigned'
+///
+/// [GNU] unary-type-trait:
+/// '__has_nothrow_assign'
+/// '__has_nothrow_copy'
+/// '__has_nothrow_constructor'
+/// '__has_trivial_assign' [TODO]
+/// '__has_trivial_copy' [TODO]
+/// '__has_trivial_constructor'
+/// '__has_trivial_destructor'
+/// '__has_virtual_destructor'
+/// '__is_abstract' [TODO]
+/// '__is_class'
+/// '__is_empty' [TODO]
+/// '__is_enum'
+/// '__is_final'
+/// '__is_pod'
+/// '__is_polymorphic'
+/// '__is_trivial'
+/// '__is_union'
+///
+/// [Clang] unary-type-trait:
+/// '__trivially_copyable'
+///
+/// binary-type-trait:
+/// [GNU] '__is_base_of'
+/// [MS] '__is_convertible_to'
+/// '__is_convertible'
+/// '__is_same'
+///
+/// [Embarcadero] array-type-trait:
+/// '__array_rank'
+/// '__array_extent'
+///
+/// [Embarcadero] expression-trait:
+/// '__is_lvalue_expr'
+/// '__is_rvalue_expr'
+///
+ExprResult Parser::ParseCastExpression(bool isUnaryExpression,
+ bool isAddressOfOperand,
+ bool &NotCastExpr,
+ TypeCastState isTypeCast) {
+ ExprResult Res;
+ tok::TokenKind SavedKind = Tok.getKind();
+ NotCastExpr = false;
+
+ // This handles all of cast-expression, unary-expression, postfix-expression,
+ // and primary-expression. We handle them together like this for efficiency
+ // and to simplify handling of an expression starting with a '(' token: which
+ // may be one of a parenthesized expression, cast-expression, compound literal
+ // expression, or statement expression.
+ //
+ // If the parsed tokens consist of a primary-expression, the cases below
+ // break out of the switch; at the end we call ParsePostfixExpressionSuffix
+ // to handle the postfix expression suffixes. Cases that cannot be followed
+ // by postfix exprs should return without invoking
+ // ParsePostfixExpressionSuffix.
+ switch (SavedKind) {
+ case tok::l_paren: {
+ // If this expression is limited to being a unary-expression, the parent can
+ // not start a cast expression.
+ ParenParseOption ParenExprType =
+ (isUnaryExpression && !getLangOpts().CPlusPlus)? CompoundLiteral : CastExpr;
+ ParsedType CastTy;
+ SourceLocation RParenLoc;
+
+ {
+ // The inside of the parens don't need to be a colon protected scope, and
+ // isn't immediately a message send.
+ ColonProtectionRAIIObject X(*this, false);
+
+ Res = ParseParenExpression(ParenExprType, false/*stopIfCastExr*/,
+ isTypeCast == IsTypeCast, CastTy, RParenLoc);
+ }
+
+ switch (ParenExprType) {
+ case SimpleExpr: break; // Nothing else to do.
+ case CompoundStmt: break; // Nothing else to do.
+ case CompoundLiteral:
+ // We parsed '(' type-name ')' '{' ... '}'. If any suffixes of
+ // postfix-expression exist, parse them now.
+ break;
+ case CastExpr:
+ // We have parsed the cast-expression and no postfix-expr pieces are
+ // following.
+ return move(Res);
+ }
+
+ break;
+ }
+
+ // primary-expression
+ case tok::numeric_constant:
+ // constant: integer-constant
+ // constant: floating-constant
+
+ Res = Actions.ActOnNumericConstant(Tok, /*UDLScope*/getCurScope());
+ ConsumeToken();
+ break;
+
+ case tok::kw_true:
+ case tok::kw_false:
+ return ParseCXXBoolLiteral();
+
+ case tok::kw___objc_yes:
+ case tok::kw___objc_no:
+ return ParseObjCBoolLiteral();
+
+ case tok::kw_nullptr:
+ Diag(Tok, diag::warn_cxx98_compat_nullptr);
+ return Actions.ActOnCXXNullPtrLiteral(ConsumeToken());
+
+ case tok::annot_primary_expr:
+ assert(Res.get() == 0 && "Stray primary-expression annotation?");
+ Res = getExprAnnotation(Tok);
+ ConsumeToken();
+ break;
+
+ case tok::kw_decltype:
+ case tok::identifier: { // primary-expression: identifier
+ // unqualified-id: identifier
+ // constant: enumeration-constant
+ // Turn a potentially qualified name into a annot_typename or
+ // annot_cxxscope if it would be valid. This handles things like x::y, etc.
+ if (getLangOpts().CPlusPlus) {
+ // Avoid the unnecessary parse-time lookup in the common case
+ // where the syntax forbids a type.
+ const Token &Next = NextToken();
+ if (Next.is(tok::coloncolon) ||
+ (!ColonIsSacred && Next.is(tok::colon)) ||
+ Next.is(tok::less) ||
+ Next.is(tok::l_paren) ||
+ Next.is(tok::l_brace)) {
+ // If TryAnnotateTypeOrScopeToken annotates the token, tail recurse.
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ if (!Tok.is(tok::identifier))
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
+ }
+ }
+
+ // Consume the identifier so that we can see if it is followed by a '(' or
+ // '.'.
+ IdentifierInfo &II = *Tok.getIdentifierInfo();
+ SourceLocation ILoc = ConsumeToken();
+
+ // Support 'Class.property' and 'super.property' notation.
+ if (getLangOpts().ObjC1 && Tok.is(tok::period) &&
+ (Actions.getTypeName(II, ILoc, getCurScope()) ||
+ // Allow the base to be 'super' if in an objc-method.
+ (&II == Ident_super && getCurScope()->isInObjcMethodScope()))) {
+ ConsumeToken();
+
+ // Allow either an identifier or the keyword 'class' (in C++).
+ if (Tok.isNot(tok::identifier) &&
+ !(getLangOpts().CPlusPlus && Tok.is(tok::kw_class))) {
+ Diag(Tok, diag::err_expected_property_name);
+ return ExprError();
+ }
+ IdentifierInfo &PropertyName = *Tok.getIdentifierInfo();
+ SourceLocation PropertyLoc = ConsumeToken();
+
+ Res = Actions.ActOnClassPropertyRefExpr(II, PropertyName,
+ ILoc, PropertyLoc);
+ break;
+ }
+
+ // In an Objective-C method, if we have "super" followed by an identifier,
+ // the token sequence is ill-formed. However, if there's a ':' or ']' after
+ // that identifier, this is probably a message send with a missing open
+ // bracket. Treat it as such.
+ if (getLangOpts().ObjC1 && &II == Ident_super && !InMessageExpression &&
+ getCurScope()->isInObjcMethodScope() &&
+ ((Tok.is(tok::identifier) &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) ||
+ Tok.is(tok::code_completion))) {
+ Res = ParseObjCMessageExpressionBody(SourceLocation(), ILoc, ParsedType(),
+ 0);
+ break;
+ }
+
+ // If we have an Objective-C class name followed by an identifier
+ // and either ':' or ']', this is an Objective-C class message
+ // send that's missing the opening '['. Recovery
+ // appropriately. Also take this path if we're performing code
+ // completion after an Objective-C class name.
+ if (getLangOpts().ObjC1 &&
+ ((Tok.is(tok::identifier) && !InMessageExpression) ||
+ Tok.is(tok::code_completion))) {
+ const Token& Next = NextToken();
+ if (Tok.is(tok::code_completion) ||
+ Next.is(tok::colon) || Next.is(tok::r_square))
+ if (ParsedType Typ = Actions.getTypeName(II, ILoc, getCurScope()))
+ if (Typ.get()->isObjCObjectOrInterfaceType()) {
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+ DS.SetRangeStart(ILoc);
+ DS.SetRangeEnd(ILoc);
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ);
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(),
+ DeclaratorInfo);
+ if (Ty.isInvalid())
+ break;
+
+ Res = ParseObjCMessageExpressionBody(SourceLocation(),
+ SourceLocation(),
+ Ty.get(), 0);
+ break;
+ }
+ }
+
+ // Make sure to pass down the right value for isAddressOfOperand.
+ if (isAddressOfOperand && isPostfixExpressionSuffixStart())
+ isAddressOfOperand = false;
+
+ // Function designators are allowed to be undeclared (C99 6.5.1p2), so we
+ // need to know whether or not this identifier is a function designator or
+ // not.
+ UnqualifiedId Name;
+ CXXScopeSpec ScopeSpec;
+ SourceLocation TemplateKWLoc;
+ CastExpressionIdValidator Validator(isTypeCast != NotTypeCast,
+ isTypeCast != IsTypeCast);
+ Name.setIdentifier(&II, ILoc);
+ Res = Actions.ActOnIdExpression(getCurScope(), ScopeSpec, TemplateKWLoc,
+ Name, Tok.is(tok::l_paren),
+ isAddressOfOperand, &Validator);
+ break;
+ }
+ case tok::char_constant: // constant: character-constant
+ case tok::wide_char_constant:
+ case tok::utf16_char_constant:
+ case tok::utf32_char_constant:
+ Res = Actions.ActOnCharacterConstant(Tok, /*UDLScope*/getCurScope());
+ ConsumeToken();
+ break;
+ case tok::kw___func__: // primary-expression: __func__ [C99 6.4.2.2]
+ case tok::kw___FUNCTION__: // primary-expression: __FUNCTION__ [GNU]
+ case tok::kw___PRETTY_FUNCTION__: // primary-expression: __P..Y_F..N__ [GNU]
+ Res = Actions.ActOnPredefinedExpr(Tok.getLocation(), SavedKind);
+ ConsumeToken();
+ break;
+ case tok::string_literal: // primary-expression: string-literal
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ Res = ParseStringLiteralExpression(true);
+ break;
+ case tok::kw__Generic: // primary-expression: generic-selection [C11 6.5.1]
+ Res = ParseGenericSelectionExpression();
+ break;
+ case tok::kw___builtin_va_arg:
+ case tok::kw___builtin_offsetof:
+ case tok::kw___builtin_choose_expr:
+ case tok::kw___builtin_astype: // primary-expression: [OCL] as_type()
+ return ParseBuiltinPrimaryExpression();
+ case tok::kw___null:
+ return Actions.ActOnGNUNullExpr(ConsumeToken());
+
+ case tok::plusplus: // unary-expression: '++' unary-expression [C99]
+ case tok::minusminus: { // unary-expression: '--' unary-expression [C99]
+ // C++ [expr.unary] has:
+ // unary-expression:
+ // ++ cast-expression
+ // -- cast-expression
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(!getLangOpts().CPlusPlus);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ return move(Res);
+ }
+ case tok::amp: { // unary-expression: '&' cast-expression
+ // Special treatment because of member pointers
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false, true);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ return move(Res);
+ }
+
+ case tok::star: // unary-expression: '*' cast-expression
+ case tok::plus: // unary-expression: '+' cast-expression
+ case tok::minus: // unary-expression: '-' cast-expression
+ case tok::tilde: // unary-expression: '~' cast-expression
+ case tok::exclaim: // unary-expression: '!' cast-expression
+ case tok::kw___real: // unary-expression: '__real' cast-expression [GNU]
+ case tok::kw___imag: { // unary-expression: '__imag' cast-expression [GNU]
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ return move(Res);
+ }
+
+ case tok::kw___extension__:{//unary-expression:'__extension__' cast-expr [GNU]
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ SourceLocation SavedLoc = ConsumeToken();
+ Res = ParseCastExpression(false);
+ if (!Res.isInvalid())
+ Res = Actions.ActOnUnaryOp(getCurScope(), SavedLoc, SavedKind, Res.get());
+ return move(Res);
+ }
+ case tok::kw_sizeof: // unary-expression: 'sizeof' unary-expression
+ // unary-expression: 'sizeof' '(' type-name ')'
+ case tok::kw_alignof:
+ case tok::kw___alignof: // unary-expression: '__alignof' unary-expression
+ // unary-expression: '__alignof' '(' type-name ')'
+ // unary-expression: 'alignof' '(' type-id ')'
+ case tok::kw_vec_step: // unary-expression: OpenCL 'vec_step' expression
+ return ParseUnaryExprOrTypeTraitExpression();
+ case tok::ampamp: { // unary-expression: '&&' identifier
+ SourceLocation AmpAmpLoc = ConsumeToken();
+ if (Tok.isNot(tok::identifier))
+ return ExprError(Diag(Tok, diag::err_expected_ident));
+
+ if (getCurScope()->getFnParent() == 0)
+ return ExprError(Diag(Tok, diag::err_address_of_label_outside_fn));
+
+ Diag(AmpAmpLoc, diag::ext_gnu_address_of_label);
+ LabelDecl *LD = Actions.LookupOrCreateLabel(Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ Res = Actions.ActOnAddrLabel(AmpAmpLoc, Tok.getLocation(), LD);
+ ConsumeToken();
+ return move(Res);
+ }
+ case tok::kw_const_cast:
+ case tok::kw_dynamic_cast:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_static_cast:
+ Res = ParseCXXCasts();
+ break;
+ case tok::kw_typeid:
+ Res = ParseCXXTypeid();
+ break;
+ case tok::kw___uuidof:
+ Res = ParseCXXUuidof();
+ break;
+ case tok::kw_this:
+ Res = ParseCXXThis();
+ break;
+
+ case tok::annot_typename:
+ if (isStartOfObjCClassMessageMissingOpenBracket()) {
+ ParsedType Type = getTypeAnnotation(Tok);
+
+ // Fake up a Declarator to use with ActOnTypeName.
+ DeclSpec DS(AttrFactory);
+ DS.SetRangeStart(Tok.getLocation());
+ DS.SetRangeEnd(Tok.getLastLoc());
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ DS.SetTypeSpecType(TST_typename, Tok.getAnnotationEndLoc(),
+ PrevSpec, DiagID, Type);
+
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ if (Ty.isInvalid())
+ break;
+
+ ConsumeToken();
+ Res = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
+ Ty.get(), 0);
+ break;
+ }
+ // Fall through
+
+ case tok::annot_decltype:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::kw_typename:
+ case tok::kw_typeof:
+ case tok::kw___vector: {
+ if (!getLangOpts().CPlusPlus) {
+ Diag(Tok, diag::err_expected_expression);
+ return ExprError();
+ }
+
+ if (SavedKind == tok::kw_typename) {
+ // postfix-expression: typename-specifier '(' expression-list[opt] ')'
+ // typename-specifier braced-init-list
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ }
+
+ // postfix-expression: simple-type-specifier '(' expression-list[opt] ')'
+ // simple-type-specifier braced-init-list
+ //
+ DeclSpec DS(AttrFactory);
+ ParseCXXSimpleTypeSpecifier(DS);
+ if (Tok.isNot(tok::l_paren) &&
+ (!getLangOpts().CPlusPlus0x || Tok.isNot(tok::l_brace)))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after_type)
+ << DS.getSourceRange());
+
+ if (Tok.is(tok::l_brace))
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+
+ Res = ParseCXXTypeConstructExpression(DS);
+ break;
+ }
+
+ case tok::annot_cxxscope: { // [C++] id-expression: qualified-id
+ // If TryAnnotateTypeOrScopeToken annotates the token, tail recurse.
+ // (We can end up in this situation after tentative parsing.)
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ if (!Tok.is(tok::annot_cxxscope))
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+
+ Token Next = NextToken();
+ if (Next.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Next);
+ if (TemplateId->Kind == TNK_Type_template) {
+ // We have a qualified template-id that we know refers to a
+ // type, translate it into a type and continue parsing as a
+ // cast expression.
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false);
+ AnnotateTemplateIdTokenAsType();
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
+ }
+
+ // Parse as an id-expression.
+ Res = ParseCXXIdExpression(isAddressOfOperand);
+ break;
+ }
+
+ case tok::annot_template_id: { // [C++] template-id
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Type_template) {
+ // We have a template-id that we know refers to a type,
+ // translate it into a type and continue parsing as a cast
+ // expression.
+ AnnotateTemplateIdTokenAsType();
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand,
+ NotCastExpr, isTypeCast);
+ }
+
+ // Fall through to treat the template-id as an id-expression.
+ }
+
+ case tok::kw_operator: // [C++] id-expression: operator/conversion-function-id
+ Res = ParseCXXIdExpression(isAddressOfOperand);
+ break;
+
+ case tok::coloncolon: {
+ // ::foo::bar -> global qualified name etc. If TryAnnotateTypeOrScopeToken
+ // annotates the token, tail recurse.
+ if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ if (!Tok.is(tok::coloncolon))
+ return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
+
+ // ::new -> [C++] new-expression
+ // ::delete -> [C++] delete-expression
+ SourceLocation CCLoc = ConsumeToken();
+ if (Tok.is(tok::kw_new))
+ return ParseCXXNewExpression(true, CCLoc);
+ if (Tok.is(tok::kw_delete))
+ return ParseCXXDeleteExpression(true, CCLoc);
+
+ // This is not a type name or scope specifier, it is an invalid expression.
+ Diag(CCLoc, diag::err_expected_expression);
+ return ExprError();
+ }
+
+ case tok::kw_new: // [C++] new-expression
+ return ParseCXXNewExpression(false, Tok.getLocation());
+
+ case tok::kw_delete: // [C++] delete-expression
+ return ParseCXXDeleteExpression(false, Tok.getLocation());
+
+ case tok::kw_noexcept: { // [C++0x] 'noexcept' '(' expression ')'
+ Diag(Tok, diag::warn_cxx98_compat_noexcept_expr);
+ SourceLocation KeyLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "noexcept"))
+ return ExprError();
+ // C++11 [expr.unary.noexcept]p1:
+ // The noexcept operator determines whether the evaluation of its operand,
+ // which is an unevaluated operand, can throw an exception.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ ExprResult Result = ParseExpression();
+
+ T.consumeClose();
+
+ if (!Result.isInvalid())
+ Result = Actions.ActOnNoexceptExpr(KeyLoc, T.getOpenLocation(),
+ Result.take(), T.getCloseLocation());
+ return move(Result);
+ }
+
+ case tok::kw___is_abstract: // [GNU] unary-type-trait
+ case tok::kw___is_class:
+ case tok::kw___is_empty:
+ case tok::kw___is_enum:
+ case tok::kw___is_literal:
+ case tok::kw___is_arithmetic:
+ case tok::kw___is_integral:
+ case tok::kw___is_floating_point:
+ case tok::kw___is_complete_type:
+ case tok::kw___is_void:
+ case tok::kw___is_array:
+ case tok::kw___is_function:
+ case tok::kw___is_reference:
+ case tok::kw___is_lvalue_reference:
+ case tok::kw___is_rvalue_reference:
+ case tok::kw___is_fundamental:
+ case tok::kw___is_object:
+ case tok::kw___is_scalar:
+ case tok::kw___is_compound:
+ case tok::kw___is_pointer:
+ case tok::kw___is_member_object_pointer:
+ case tok::kw___is_member_function_pointer:
+ case tok::kw___is_member_pointer:
+ case tok::kw___is_const:
+ case tok::kw___is_volatile:
+ case tok::kw___is_standard_layout:
+ case tok::kw___is_signed:
+ case tok::kw___is_unsigned:
+ case tok::kw___is_literal_type:
+ case tok::kw___is_pod:
+ case tok::kw___is_polymorphic:
+ case tok::kw___is_trivial:
+ case tok::kw___is_trivially_copyable:
+ case tok::kw___is_union:
+ case tok::kw___is_final:
+ case tok::kw___has_trivial_constructor:
+ case tok::kw___has_trivial_copy:
+ case tok::kw___has_trivial_assign:
+ case tok::kw___has_trivial_destructor:
+ case tok::kw___has_nothrow_assign:
+ case tok::kw___has_nothrow_copy:
+ case tok::kw___has_nothrow_constructor:
+ case tok::kw___has_virtual_destructor:
+ return ParseUnaryTypeTrait();
+
+ case tok::kw___builtin_types_compatible_p:
+ case tok::kw___is_base_of:
+ case tok::kw___is_same:
+ case tok::kw___is_convertible:
+ case tok::kw___is_convertible_to:
+ case tok::kw___is_trivially_assignable:
+ return ParseBinaryTypeTrait();
+
+ case tok::kw___is_trivially_constructible:
+ return ParseTypeTrait();
+
+ case tok::kw___array_rank:
+ case tok::kw___array_extent:
+ return ParseArrayTypeTrait();
+
+ case tok::kw___is_lvalue_expr:
+ case tok::kw___is_rvalue_expr:
+ return ParseExpressionTrait();
+
+ case tok::at: {
+ SourceLocation AtLoc = ConsumeToken();
+ return ParseObjCAtExpression(AtLoc);
+ }
+ case tok::caret:
+ Res = ParseBlockLiteralExpression();
+ break;
+ case tok::code_completion: {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
+ cutOffParsing();
+ return ExprError();
+ }
+ case tok::l_square:
+ if (getLangOpts().CPlusPlus0x) {
+ if (getLangOpts().ObjC1) {
+ // C++11 lambda expressions and Objective-C message sends both start with a
+ // square bracket. There are three possibilities here:
+ // we have a valid lambda expression, we have an invalid lambda
+ // expression, or we have something that doesn't appear to be a lambda.
+ // If we're in the last case, we fall back to ParseObjCMessageExpression.
+ Res = TryParseLambdaExpression();
+ if (!Res.isInvalid() && !Res.get())
+ Res = ParseObjCMessageExpression();
+ break;
+ }
+ Res = ParseLambdaExpression();
+ break;
+ }
+ if (getLangOpts().ObjC1) {
+ Res = ParseObjCMessageExpression();
+ break;
+ }
+ // FALL THROUGH.
+ default:
+ NotCastExpr = true;
+ return ExprError();
+ }
+
+ // These can be followed by postfix-expr pieces.
+ return ParsePostfixExpressionSuffix(Res);
+}
+
+/// ParsePostfixExpressionSuffix - Once the leading part of a postfix-expression
+/// is parsed, this method parses any suffixes that apply.
+///
+/// postfix-expression: [C99 6.5.2]
+/// primary-expression
+/// postfix-expression '[' expression ']'
+/// postfix-expression '[' braced-init-list ']'
+/// postfix-expression '(' argument-expression-list[opt] ')'
+/// postfix-expression '.' identifier
+/// postfix-expression '->' identifier
+/// postfix-expression '++'
+/// postfix-expression '--'
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+///
+/// argument-expression-list: [C99 6.5.2]
+/// argument-expression ...[opt]
+/// argument-expression-list ',' assignment-expression ...[opt]
+///
+ExprResult
+Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
+ // Now that the primary-expression piece of the postfix-expression has been
+ // parsed, see if there are any postfix-expression pieces here.
+ SourceLocation Loc;
+ while (1) {
+ switch (Tok.getKind()) {
+ case tok::code_completion:
+ if (InMessageExpression)
+ return move(LHS);
+
+ Actions.CodeCompletePostfixExpression(getCurScope(), LHS);
+ cutOffParsing();
+ return ExprError();
+
+ case tok::identifier:
+ // If we see identifier: after an expression, and we're not already in a
+ // message send, then this is probably a message send with a missing
+ // opening bracket '['.
+ if (getLangOpts().ObjC1 && !InMessageExpression &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
+ LHS = ParseObjCMessageExpressionBody(SourceLocation(), SourceLocation(),
+ ParsedType(), LHS.get());
+ break;
+ }
+
+ // Fall through; this isn't a message send.
+
+ default: // Not a postfix-expression suffix.
+ return move(LHS);
+ case tok::l_square: { // postfix-expression: p-e '[' expression ']'
+ // If we have a array postfix expression that starts on a new line and
+ // Objective-C is enabled, it is highly likely that the user forgot a
+ // semicolon after the base expression and that the array postfix-expr is
+ // actually another message send. In this case, do some look-ahead to see
+ // if the contents of the square brackets are obviously not a valid
+ // expression and recover by pretending there is no suffix.
+ if (getLangOpts().ObjC1 && Tok.isAtStartOfLine() &&
+ isSimpleObjCMessageExpression())
+ return move(LHS);
+
+ // Reject array indices starting with a lambda-expression. '[[' is
+ // reserved for attributes.
+ if (CheckProhibitedCXX11Attribute())
+ return ExprError();
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ Loc = T.getOpenLocation();
+ ExprResult Idx;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ Idx = ParseBraceInitializer();
+ } else
+ Idx = ParseExpression();
+
+ SourceLocation RLoc = Tok.getLocation();
+
+ if (!LHS.isInvalid() && !Idx.isInvalid() && Tok.is(tok::r_square)) {
+ LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.take(), Loc,
+ Idx.take(), RLoc);
+ } else
+ LHS = ExprError();
+
+ // Match the ']'.
+ T.consumeClose();
+ break;
+ }
+
+ case tok::l_paren: // p-e: p-e '(' argument-expression-list[opt] ')'
+ case tok::lesslessless: { // p-e: p-e '<<<' argument-expression-list '>>>'
+ // '(' argument-expression-list[opt] ')'
+ tok::TokenKind OpKind = Tok.getKind();
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ Expr *ExecConfig = 0;
+
+ BalancedDelimiterTracker PT(*this, tok::l_paren);
+
+ if (OpKind == tok::lesslessless) {
+ ExprVector ExecConfigExprs(Actions);
+ CommaLocsTy ExecConfigCommaLocs;
+ SourceLocation OpenLoc = ConsumeToken();
+
+ if (ParseExpressionList(ExecConfigExprs, ExecConfigCommaLocs)) {
+ LHS = ExprError();
+ }
+
+ SourceLocation CloseLoc = Tok.getLocation();
+ if (Tok.is(tok::greatergreatergreater)) {
+ ConsumeToken();
+ } else if (LHS.isInvalid()) {
+ SkipUntil(tok::greatergreatergreater);
+ } else {
+ // There was an error closing the brackets
+ Diag(Tok, diag::err_expected_ggg);
+ Diag(OpenLoc, diag::note_matching) << "<<<";
+ SkipUntil(tok::greatergreatergreater);
+ LHS = ExprError();
+ }
+
+ if (!LHS.isInvalid()) {
+ if (ExpectAndConsume(tok::l_paren, diag::err_expected_lparen, ""))
+ LHS = ExprError();
+ else
+ Loc = PrevTokLocation;
+ }
+
+ if (!LHS.isInvalid()) {
+ ExprResult ECResult = Actions.ActOnCUDAExecConfigExpr(getCurScope(),
+ OpenLoc,
+ move_arg(ExecConfigExprs),
+ CloseLoc);
+ if (ECResult.isInvalid())
+ LHS = ExprError();
+ else
+ ExecConfig = ECResult.get();
+ }
+ } else {
+ PT.consumeOpen();
+ Loc = PT.getOpenLocation();
+ }
+
+ ExprVector ArgExprs(Actions);
+ CommaLocsTy CommaLocs;
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteCall(getCurScope(), LHS.get(),
+ llvm::ArrayRef<Expr *>());
+ cutOffParsing();
+ return ExprError();
+ }
+
+ if (OpKind == tok::l_paren || !LHS.isInvalid()) {
+ if (Tok.isNot(tok::r_paren)) {
+ if (ParseExpressionList(ArgExprs, CommaLocs, &Sema::CodeCompleteCall,
+ LHS.get())) {
+ LHS = ExprError();
+ }
+ }
+ }
+
+ // Match the ')'.
+ if (LHS.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ } else if (Tok.isNot(tok::r_paren)) {
+ PT.consumeClose();
+ LHS = ExprError();
+ } else {
+ assert((ArgExprs.size() == 0 ||
+ ArgExprs.size()-1 == CommaLocs.size())&&
+ "Unexpected number of commas!");
+ LHS = Actions.ActOnCallExpr(getCurScope(), LHS.take(), Loc,
+ move_arg(ArgExprs), Tok.getLocation(),
+ ExecConfig);
+ PT.consumeClose();
+ }
+
+ break;
+ }
+ case tok::arrow:
+ case tok::period: {
+ // postfix-expression: p-e '->' template[opt] id-expression
+ // postfix-expression: p-e '.' template[opt] id-expression
+ tok::TokenKind OpKind = Tok.getKind();
+ SourceLocation OpLoc = ConsumeToken(); // Eat the "." or "->" token.
+
+ CXXScopeSpec SS;
+ ParsedType ObjectType;
+ bool MayBePseudoDestructor = false;
+ if (getLangOpts().CPlusPlus && !LHS.isInvalid()) {
+ LHS = Actions.ActOnStartCXXMemberReference(getCurScope(), LHS.take(),
+ OpLoc, OpKind, ObjectType,
+ MayBePseudoDestructor);
+ if (LHS.isInvalid())
+ break;
+
+ ParseOptionalCXXScopeSpecifier(SS, ObjectType,
+ /*EnteringContext=*/false,
+ &MayBePseudoDestructor);
+ if (SS.isNotEmpty())
+ ObjectType = ParsedType();
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ // Code completion for a member access expression.
+ Actions.CodeCompleteMemberReferenceExpr(getCurScope(), LHS.get(),
+ OpLoc, OpKind == tok::arrow);
+
+ cutOffParsing();
+ return ExprError();
+ }
+
+ if (MayBePseudoDestructor && !LHS.isInvalid()) {
+ LHS = ParseCXXPseudoDestructor(LHS.take(), OpLoc, OpKind, SS,
+ ObjectType);
+ break;
+ }
+
+ // Either the action has told is that this cannot be a
+ // pseudo-destructor expression (based on the type of base
+ // expression), or we didn't see a '~' in the right place. We
+ // can still parse a destructor name here, but in that case it
+ // names a real destructor.
+ // Allow explicit constructor calls in Microsoft mode.
+ // FIXME: Add support for explicit call of template constructor.
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ if (getLangOpts().ObjC2 && OpKind == tok::period && Tok.is(tok::kw_class)) {
+ // Objective-C++:
+ // After a '.' in a member access expression, treat the keyword
+ // 'class' as if it were an identifier.
+ //
+ // This hack allows property access to the 'class' method because it is
+ // such a common method name. For other C++ keywords that are
+ // Objective-C method names, one must use the message send syntax.
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ SourceLocation Loc = ConsumeToken();
+ Name.setIdentifier(Id, Loc);
+ } else if (ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/true,
+ /*AllowConstructorName=*/
+ getLangOpts().MicrosoftExt,
+ ObjectType, TemplateKWLoc, Name))
+ LHS = ExprError();
+
+ if (!LHS.isInvalid())
+ LHS = Actions.ActOnMemberAccessExpr(getCurScope(), LHS.take(), OpLoc,
+ OpKind, SS, TemplateKWLoc, Name,
+ CurParsedObjCImpl ? CurParsedObjCImpl->Dcl : 0,
+ Tok.is(tok::l_paren));
+ break;
+ }
+ case tok::plusplus: // postfix-expression: postfix-expression '++'
+ case tok::minusminus: // postfix-expression: postfix-expression '--'
+ if (!LHS.isInvalid()) {
+ LHS = Actions.ActOnPostfixUnaryOp(getCurScope(), Tok.getLocation(),
+ Tok.getKind(), LHS.take());
+ }
+ ConsumeToken();
+ break;
+ }
+ }
+}
+
+/// ParseExprAfterUnaryExprOrTypeTrait - We parsed a typeof/sizeof/alignof/
+/// vec_step and we are at the start of an expression or a parenthesized
+/// type-id. OpTok is the operand token (typeof/sizeof/alignof). Returns the
+/// expression (isCastExpr == false) or the type (isCastExpr == true).
+///
+/// unary-expression: [C99 6.5.3]
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C++0x] 'alignof' '(' type-id ')'
+///
+/// [GNU] typeof-specifier:
+/// typeof ( expressions )
+/// typeof ( type-name )
+/// [GNU/C++] typeof unary-expression
+///
+/// [OpenCL 1.1 6.11.12] vec_step built-in function:
+/// vec_step ( expressions )
+/// vec_step ( type-name )
+///
+ExprResult
+Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
+ bool &isCastExpr,
+ ParsedType &CastTy,
+ SourceRange &CastRange) {
+
+ assert((OpTok.is(tok::kw_typeof) || OpTok.is(tok::kw_sizeof) ||
+ OpTok.is(tok::kw___alignof) || OpTok.is(tok::kw_alignof) ||
+ OpTok.is(tok::kw_vec_step)) &&
+ "Not a typeof/sizeof/alignof/vec_step expression!");
+
+ ExprResult Operand;
+
+ // If the operand doesn't start with an '(', it must be an expression.
+ if (Tok.isNot(tok::l_paren)) {
+ isCastExpr = false;
+ if (OpTok.is(tok::kw_typeof) && !getLangOpts().CPlusPlus) {
+ Diag(Tok,diag::err_expected_lparen_after_id) << OpTok.getIdentifierInfo();
+ return ExprError();
+ }
+
+ Operand = ParseCastExpression(true/*isUnaryExpression*/);
+ } else {
+ // If it starts with a '(', we know that it is either a parenthesized
+ // type-name, or it is a unary-expression that starts with a compound
+ // literal, or starts with a primary-expression that is a parenthesized
+ // expression.
+ ParenParseOption ExprType = CastExpr;
+ SourceLocation LParenLoc = Tok.getLocation(), RParenLoc;
+
+ Operand = ParseParenExpression(ExprType, true/*stopIfCastExpr*/,
+ false, CastTy, RParenLoc);
+ CastRange = SourceRange(LParenLoc, RParenLoc);
+
+ // If ParseParenExpression parsed a '(typename)' sequence only, then this is
+ // a type.
+ if (ExprType == CastExpr) {
+ isCastExpr = true;
+ return ExprEmpty();
+ }
+
+ if (getLangOpts().CPlusPlus || OpTok.isNot(tok::kw_typeof)) {
+ // GNU typeof in C requires the expression to be parenthesized. Not so for
+ // sizeof/alignof or in C++. Therefore, the parenthesized expression is
+ // the start of a unary-expression, but doesn't include any postfix
+ // pieces. Parse these now if present.
+ if (!Operand.isInvalid())
+ Operand = ParsePostfixExpressionSuffix(Operand.get());
+ }
+ }
+
+ // If we get here, the operand to the typeof/sizeof/alignof was an expresion.
+ isCastExpr = false;
+ return move(Operand);
+}
+
+
+/// ParseUnaryExprOrTypeTraitExpression - Parse a sizeof or alignof expression.
+/// unary-expression: [C99 6.5.3]
+/// 'sizeof' unary-expression
+/// 'sizeof' '(' type-name ')'
+/// [C++0x] 'sizeof' '...' '(' identifier ')'
+/// [GNU] '__alignof' unary-expression
+/// [GNU] '__alignof' '(' type-name ')'
+/// [C++0x] 'alignof' '(' type-id ')'
+ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
+ assert((Tok.is(tok::kw_sizeof) || Tok.is(tok::kw___alignof)
+ || Tok.is(tok::kw_alignof) || Tok.is(tok::kw_vec_step)) &&
+ "Not a sizeof/alignof/vec_step expression!");
+ Token OpTok = Tok;
+ ConsumeToken();
+
+ // [C++0x] 'sizeof' '...' '(' identifier ')'
+ if (Tok.is(tok::ellipsis) && OpTok.is(tok::kw_sizeof)) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ SourceLocation LParenLoc, RParenLoc;
+ IdentifierInfo *Name = 0;
+ SourceLocation NameLoc;
+ if (Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ LParenLoc = T.getOpenLocation();
+ if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ if (RParenLoc.isInvalid())
+ RParenLoc = PP.getLocForEndOfToken(NameLoc);
+ } else {
+ Diag(Tok, diag::err_expected_parameter_pack);
+ SkipUntil(tok::r_paren);
+ }
+ } else if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ LParenLoc = PP.getLocForEndOfToken(EllipsisLoc);
+ RParenLoc = PP.getLocForEndOfToken(NameLoc);
+ Diag(LParenLoc, diag::err_paren_sizeof_parameter_pack)
+ << Name
+ << FixItHint::CreateInsertion(LParenLoc, "(")
+ << FixItHint::CreateInsertion(RParenLoc, ")");
+ } else {
+ Diag(Tok, diag::err_sizeof_parameter_pack);
+ }
+
+ if (!Name)
+ return ExprError();
+
+ return Actions.ActOnSizeofParameterPackExpr(getCurScope(),
+ OpTok.getLocation(),
+ *Name, NameLoc,
+ RParenLoc);
+ }
+
+ if (OpTok.is(tok::kw_alignof))
+ Diag(OpTok, diag::warn_cxx98_compat_alignof);
+
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+
+ bool isCastExpr;
+ ParsedType CastTy;
+ SourceRange CastRange;
+ ExprResult Operand = ParseExprAfterUnaryExprOrTypeTrait(OpTok,
+ isCastExpr,
+ CastTy,
+ CastRange);
+
+ UnaryExprOrTypeTrait ExprKind = UETT_SizeOf;
+ if (OpTok.is(tok::kw_alignof) || OpTok.is(tok::kw___alignof))
+ ExprKind = UETT_AlignOf;
+ else if (OpTok.is(tok::kw_vec_step))
+ ExprKind = UETT_VecStep;
+
+ if (isCastExpr)
+ return Actions.ActOnUnaryExprOrTypeTraitExpr(OpTok.getLocation(),
+ ExprKind,
+ /*isType=*/true,
+ CastTy.getAsOpaquePtr(),
+ CastRange);
+
+ // If we get here, the operand to the sizeof/alignof was an expresion.
+ if (!Operand.isInvalid())
+ Operand = Actions.ActOnUnaryExprOrTypeTraitExpr(OpTok.getLocation(),
+ ExprKind,
+ /*isType=*/false,
+ Operand.release(),
+ CastRange);
+ return move(Operand);
+}
+
+/// ParseBuiltinPrimaryExpression
+///
+/// primary-expression: [C99 6.5.1]
+/// [GNU] '__builtin_va_arg' '(' assignment-expression ',' type-name ')'
+/// [GNU] '__builtin_offsetof' '(' type-name ',' offsetof-member-designator')'
+/// [GNU] '__builtin_choose_expr' '(' assign-expr ',' assign-expr ','
+/// assign-expr ')'
+/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
+/// [OCL] '__builtin_astype' '(' assignment-expression ',' type-name ')'
+///
+/// [GNU] offsetof-member-designator:
+/// [GNU] identifier
+/// [GNU] offsetof-member-designator '.' identifier
+/// [GNU] offsetof-member-designator '[' expression ']'
+///
+ExprResult Parser::ParseBuiltinPrimaryExpression() {
+ ExprResult Res;
+ const IdentifierInfo *BuiltinII = Tok.getIdentifierInfo();
+
+ tok::TokenKind T = Tok.getKind();
+ SourceLocation StartLoc = ConsumeToken(); // Eat the builtin identifier.
+
+ // All of these start with an open paren.
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after_id)
+ << BuiltinII);
+
+ BalancedDelimiterTracker PT(*this, tok::l_paren);
+ PT.consumeOpen();
+
+ // TODO: Build AST.
+
+ switch (T) {
+ default: llvm_unreachable("Not a builtin primary expression!");
+ case tok::kw___builtin_va_arg: {
+ ExprResult Expr(ParseAssignmentExpression());
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ Expr = ExprError();
+
+ TypeResult Ty = ParseTypeName();
+
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ Expr = ExprError();
+ }
+
+ if (Expr.isInvalid() || Ty.isInvalid())
+ Res = ExprError();
+ else
+ Res = Actions.ActOnVAArg(StartLoc, Expr.take(), Ty.get(), ConsumeParen());
+ break;
+ }
+ case tok::kw___builtin_offsetof: {
+ SourceLocation TypeLoc = Tok.getLocation();
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ return ExprError();
+
+ // We must have at least one identifier here.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ // Keep track of the various subcomponents we see.
+ SmallVector<Sema::OffsetOfComponent, 4> Comps;
+
+ Comps.push_back(Sema::OffsetOfComponent());
+ Comps.back().isBrackets = false;
+ Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
+ Comps.back().LocStart = Comps.back().LocEnd = ConsumeToken();
+
+ // FIXME: This loop leaks the index expressions on error.
+ while (1) {
+ if (Tok.is(tok::period)) {
+ // offsetof-member-designator: offsetof-member-designator '.' identifier
+ Comps.push_back(Sema::OffsetOfComponent());
+ Comps.back().isBrackets = false;
+ Comps.back().LocStart = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ Comps.back().U.IdentInfo = Tok.getIdentifierInfo();
+ Comps.back().LocEnd = ConsumeToken();
+
+ } else if (Tok.is(tok::l_square)) {
+ if (CheckProhibitedCXX11Attribute())
+ return ExprError();
+
+ // offsetof-member-designator: offsetof-member-design '[' expression ']'
+ Comps.push_back(Sema::OffsetOfComponent());
+ Comps.back().isBrackets = true;
+ BalancedDelimiterTracker ST(*this, tok::l_square);
+ ST.consumeOpen();
+ Comps.back().LocStart = ST.getOpenLocation();
+ Res = ParseExpression();
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return move(Res);
+ }
+ Comps.back().U.E = Res.release();
+
+ ST.consumeClose();
+ Comps.back().LocEnd = ST.getCloseLocation();
+ } else {
+ if (Tok.isNot(tok::r_paren)) {
+ PT.consumeClose();
+ Res = ExprError();
+ } else if (Ty.isInvalid()) {
+ Res = ExprError();
+ } else {
+ PT.consumeClose();
+ Res = Actions.ActOnBuiltinOffsetOf(getCurScope(), StartLoc, TypeLoc,
+ Ty.get(), &Comps[0], Comps.size(),
+ PT.getCloseLocation());
+ }
+ break;
+ }
+ }
+ break;
+ }
+ case tok::kw___builtin_choose_expr: {
+ ExprResult Cond(ParseAssignmentExpression());
+ if (Cond.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return move(Cond);
+ }
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ return ExprError();
+
+ ExprResult Expr1(ParseAssignmentExpression());
+ if (Expr1.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return move(Expr1);
+ }
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",tok::r_paren))
+ return ExprError();
+
+ ExprResult Expr2(ParseAssignmentExpression());
+ if (Expr2.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return move(Expr2);
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ return ExprError();
+ }
+ Res = Actions.ActOnChooseExpr(StartLoc, Cond.take(), Expr1.take(),
+ Expr2.take(), ConsumeParen());
+ break;
+ }
+ case tok::kw___builtin_astype: {
+ // The first argument is an expression to be converted, followed by a comma.
+ ExprResult Expr(ParseAssignmentExpression());
+ if (Expr.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "",
+ tok::r_paren))
+ return ExprError();
+
+ // Second argument is the type to bitcast to.
+ TypeResult DestTy = ParseTypeName();
+ if (DestTy.isInvalid())
+ return ExprError();
+
+ // Attempt to consume the r-paren.
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ Res = Actions.ActOnAsTypeExpr(Expr.take(), DestTy.get(), StartLoc,
+ ConsumeParen());
+ break;
+ }
+ }
+
+ if (Res.isInvalid())
+ return ExprError();
+
+ // These can be followed by postfix-expr pieces because they are
+ // primary-expressions.
+ return ParsePostfixExpressionSuffix(Res.take());
+}
+
+/// ParseParenExpression - This parses the unit that starts with a '(' token,
+/// based on what is allowed by ExprType. The actual thing parsed is returned
+/// in ExprType. If stopIfCastExpr is true, it will only return the parsed type,
+/// not the parsed cast-expression.
+///
+/// primary-expression: [C99 6.5.1]
+/// '(' expression ')'
+/// [GNU] '(' compound-statement ')' (if !ParenExprOnly)
+/// postfix-expression: [C99 6.5.2]
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+/// cast-expression: [C99 6.5.4]
+/// '(' type-name ')' cast-expression
+/// [ARC] bridged-cast-expression
+///
+/// [ARC] bridged-cast-expression:
+/// (__bridge type-name) cast-expression
+/// (__bridge_transfer type-name) cast-expression
+/// (__bridge_retained type-name) cast-expression
+ExprResult
+Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
+ bool isTypeCast, ParsedType &CastTy,
+ SourceLocation &RParenLoc) {
+ assert(Tok.is(tok::l_paren) && "Not a paren expr!");
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, true);
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen())
+ return ExprError();
+ SourceLocation OpenLoc = T.getOpenLocation();
+
+ ExprResult Result(true);
+ bool isAmbiguousTypeId;
+ CastTy = ParsedType();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ ExprType >= CompoundLiteral? Sema::PCC_ParenthesizedExpression
+ : Sema::PCC_Expression);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Diagnose use of bridge casts in non-arc mode.
+ bool BridgeCast = (getLangOpts().ObjC2 &&
+ (Tok.is(tok::kw___bridge) ||
+ Tok.is(tok::kw___bridge_transfer) ||
+ Tok.is(tok::kw___bridge_retained) ||
+ Tok.is(tok::kw___bridge_retain)));
+ if (BridgeCast && !getLangOpts().ObjCAutoRefCount) {
+ StringRef BridgeCastName = Tok.getName();
+ SourceLocation BridgeKeywordLoc = ConsumeToken();
+ if (!PP.getSourceManager().isInSystemHeader(BridgeKeywordLoc))
+ Diag(BridgeKeywordLoc, diag::warn_arc_bridge_cast_nonarc)
+ << BridgeCastName
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "");
+ BridgeCast = false;
+ }
+
+ // None of these cases should fall through with an invalid Result
+ // unless they've already reported an error.
+ if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::ext_gnu_statement_expr);
+
+ Actions.ActOnStartStmtExpr();
+
+ ParsedAttributes attrs(AttrFactory);
+ StmtResult Stmt(ParseCompoundStatement(attrs, true));
+ ExprType = CompoundStmt;
+
+ // If the substmt parsed correctly, build the AST node.
+ if (!Stmt.isInvalid()) {
+ Result = Actions.ActOnStmtExpr(OpenLoc, Stmt.take(), Tok.getLocation());
+ } else {
+ Actions.ActOnStmtExprError();
+ }
+ } else if (ExprType >= CompoundLiteral && BridgeCast) {
+ tok::TokenKind tokenKind = Tok.getKind();
+ SourceLocation BridgeKeywordLoc = ConsumeToken();
+
+ // Parse an Objective-C ARC ownership cast expression.
+ ObjCBridgeCastKind Kind;
+ if (tokenKind == tok::kw___bridge)
+ Kind = OBC_Bridge;
+ else if (tokenKind == tok::kw___bridge_transfer)
+ Kind = OBC_BridgeTransfer;
+ else if (tokenKind == tok::kw___bridge_retained)
+ Kind = OBC_BridgeRetained;
+ else {
+ // As a hopefully temporary workaround, allow __bridge_retain as
+ // a synonym for __bridge_retained, but only in system headers.
+ assert(tokenKind == tok::kw___bridge_retain);
+ Kind = OBC_BridgeRetained;
+ if (!PP.getSourceManager().isInSystemHeader(BridgeKeywordLoc))
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_retain)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ "__bridge_retained");
+ }
+
+ TypeResult Ty = ParseTypeName();
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ ExprResult SubExpr = ParseCastExpression(/*isUnaryExpression=*/false);
+
+ if (Ty.isInvalid() || SubExpr.isInvalid())
+ return ExprError();
+
+ return Actions.ActOnObjCBridgedCast(getCurScope(), OpenLoc, Kind,
+ BridgeKeywordLoc, Ty.get(),
+ RParenLoc, SubExpr.get());
+ } else if (ExprType >= CompoundLiteral &&
+ isTypeIdInParens(isAmbiguousTypeId)) {
+
+ // Otherwise, this is a compound literal expression or cast expression.
+
+ // In C++, if the type-id is ambiguous we disambiguate based on context.
+ // If stopIfCastExpr is true the context is a typeof/sizeof/alignof
+ // in which case we should treat it as type-id.
+ // if stopIfCastExpr is false, we need to determine the context past the
+ // parens, so we defer to ParseCXXAmbiguousParenExpression for that.
+ if (isAmbiguousTypeId && !stopIfCastExpr) {
+ ExprResult res = ParseCXXAmbiguousParenExpression(ExprType, CastTy, T);
+ RParenLoc = T.getCloseLocation();
+ return res;
+ }
+
+ // Parse the type declarator.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ // If our type is followed by an identifier and either ':' or ']', then
+ // this is probably an Objective-C message send where the leading '[' is
+ // missing. Recover as if that were the case.
+ if (!DeclaratorInfo.isInvalidType() && Tok.is(tok::identifier) &&
+ !InMessageExpression && getLangOpts().ObjC1 &&
+ (NextToken().is(tok::colon) || NextToken().is(tok::r_square))) {
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+ Result = ParseObjCMessageExpressionBody(SourceLocation(),
+ SourceLocation(),
+ Ty.get(), 0);
+ } else {
+ // Match the ')'.
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ if (Tok.is(tok::l_brace)) {
+ ExprType = CompoundLiteral;
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+ return ParseCompoundLiteralExpression(Ty.get(), OpenLoc, RParenLoc);
+ }
+
+ if (ExprType == CastExpr) {
+ // We parsed '(' type-name ')' and the thing after it wasn't a '{'.
+
+ if (DeclaratorInfo.isInvalidType())
+ return ExprError();
+
+ // Note that this doesn't parse the subsequent cast-expression, it just
+ // returns the parsed type to the callee.
+ if (stopIfCastExpr) {
+ TypeResult Ty;
+ {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ }
+ CastTy = Ty.get();
+ return ExprResult();
+ }
+
+ // Reject the cast of super idiom in ObjC.
+ if (Tok.is(tok::identifier) && getLangOpts().ObjC1 &&
+ Tok.getIdentifierInfo() == Ident_super &&
+ getCurScope()->isInObjcMethodScope() &&
+ GetLookAheadToken(1).isNot(tok::period)) {
+ Diag(Tok.getLocation(), diag::err_illegal_super_cast)
+ << SourceRange(OpenLoc, RParenLoc);
+ return ExprError();
+ }
+
+ // Parse the cast-expression that follows it next.
+ // TODO: For cast expression with CastTy.
+ Result = ParseCastExpression(/*isUnaryExpression=*/false,
+ /*isAddressOfOperand=*/false,
+ /*isTypeCast=*/IsTypeCast);
+ if (!Result.isInvalid()) {
+ Result = Actions.ActOnCastExpr(getCurScope(), OpenLoc,
+ DeclaratorInfo, CastTy,
+ RParenLoc, Result.take());
+ }
+ return move(Result);
+ }
+
+ Diag(Tok, diag::err_expected_lbrace_in_compound_literal);
+ return ExprError();
+ }
+ } else if (isTypeCast) {
+ // Parse the expression-list.
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ ExprVector ArgExprs(Actions);
+ CommaLocsTy CommaLocs;
+
+ if (!ParseExpressionList(ArgExprs, CommaLocs)) {
+ ExprType = SimpleExpr;
+ Result = Actions.ActOnParenListExpr(OpenLoc, Tok.getLocation(),
+ move_arg(ArgExprs));
+ }
+ } else {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ Result = ParseExpression(MaybeTypeCast);
+ ExprType = SimpleExpr;
+
+ // Don't build a paren expression unless we actually match a ')'.
+ if (!Result.isInvalid() && Tok.is(tok::r_paren))
+ Result = Actions.ActOnParenExpr(OpenLoc, Tok.getLocation(), Result.take());
+ }
+
+ // Match the ')'.
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ return move(Result);
+}
+
+/// ParseCompoundLiteralExpression - We have parsed the parenthesized type-name
+/// and we are at the left brace.
+///
+/// postfix-expression: [C99 6.5.2]
+/// '(' type-name ')' '{' initializer-list '}'
+/// '(' type-name ')' '{' initializer-list ',' '}'
+///
+ExprResult
+Parser::ParseCompoundLiteralExpression(ParsedType Ty,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ assert(Tok.is(tok::l_brace) && "Not a compound literal!");
+ if (!getLangOpts().C99) // Compound literals don't exist in C90.
+ Diag(LParenLoc, diag::ext_c99_compound_literal);
+ ExprResult Result = ParseInitializer();
+ if (!Result.isInvalid() && Ty)
+ return Actions.ActOnCompoundLiteral(LParenLoc, Ty, RParenLoc, Result.take());
+ return move(Result);
+}
+
+/// ParseStringLiteralExpression - This handles the various token types that
+/// form string literals, and also handles string concatenation [C99 5.1.1.2,
+/// translation phase #6].
+///
+/// primary-expression: [C99 6.5.1]
+/// string-literal
+ExprResult Parser::ParseStringLiteralExpression(bool AllowUserDefinedLiteral) {
+ assert(isTokenStringLiteral() && "Not a string literal!");
+
+ // String concat. Note that keywords like __func__ and __FUNCTION__ are not
+ // considered to be strings for concatenation purposes.
+ SmallVector<Token, 4> StringToks;
+
+ do {
+ StringToks.push_back(Tok);
+ ConsumeStringToken();
+ } while (isTokenStringLiteral());
+
+ // Pass the set of string tokens, ready for concatenation, to the actions.
+ return Actions.ActOnStringLiteral(&StringToks[0], StringToks.size(),
+ AllowUserDefinedLiteral ? getCurScope() : 0);
+}
+
+/// ParseGenericSelectionExpression - Parse a C11 generic-selection
+/// [C11 6.5.1.1].
+///
+/// generic-selection:
+/// _Generic ( assignment-expression , generic-assoc-list )
+/// generic-assoc-list:
+/// generic-association
+/// generic-assoc-list , generic-association
+/// generic-association:
+/// type-name : assignment-expression
+/// default : assignment-expression
+ExprResult Parser::ParseGenericSelectionExpression() {
+ assert(Tok.is(tok::kw__Generic) && "_Generic keyword expected");
+ SourceLocation KeyLoc = ConsumeToken();
+
+ if (!getLangOpts().C11)
+ Diag(KeyLoc, diag::ext_c11_generic_selection);
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen))
+ return ExprError();
+
+ ExprResult ControllingExpr;
+ {
+ // C11 6.5.1.1p3 "The controlling expression of a generic selection is
+ // not evaluated."
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ ControllingExpr = ParseAssignmentExpression();
+ if (ControllingExpr.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma, "")) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ SourceLocation DefaultLoc;
+ TypeVector Types(Actions);
+ ExprVector Exprs(Actions);
+ while (1) {
+ ParsedType Ty;
+ if (Tok.is(tok::kw_default)) {
+ // C11 6.5.1.1p2 "A generic selection shall have no more than one default
+ // generic association."
+ if (!DefaultLoc.isInvalid()) {
+ Diag(Tok, diag::err_duplicate_default_assoc);
+ Diag(DefaultLoc, diag::note_previous_default_assoc);
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ DefaultLoc = ConsumeToken();
+ Ty = ParsedType();
+ } else {
+ ColonProtectionRAIIObject X(*this);
+ TypeResult TR = ParseTypeName();
+ if (TR.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ Ty = TR.release();
+ }
+ Types.push_back(Ty);
+
+ if (ExpectAndConsume(tok::colon, diag::err_expected_colon, "")) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ // FIXME: These expressions should be parsed in a potentially potentially
+ // evaluated context.
+ ExprResult ER(ParseAssignmentExpression());
+ if (ER.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ Exprs.push_back(ER.release());
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken();
+ }
+
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return ExprError();
+
+ return Actions.ActOnGenericSelectionExpr(KeyLoc, DefaultLoc,
+ T.getCloseLocation(),
+ ControllingExpr.release(),
+ move_arg(Types), move_arg(Exprs));
+}
+
+/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
+///
+/// argument-expression-list:
+/// assignment-expression
+/// argument-expression-list , assignment-expression
+///
+/// [C++] expression-list:
+/// [C++] assignment-expression
+/// [C++] expression-list , assignment-expression
+///
+/// [C++0x] expression-list:
+/// [C++0x] initializer-list
+///
+/// [C++0x] initializer-list
+/// [C++0x] initializer-clause ...[opt]
+/// [C++0x] initializer-list , initializer-clause ...[opt]
+///
+/// [C++0x] initializer-clause:
+/// [C++0x] assignment-expression
+/// [C++0x] braced-init-list
+///
+bool Parser::ParseExpressionList(SmallVectorImpl<Expr*> &Exprs,
+ SmallVectorImpl<SourceLocation> &CommaLocs,
+ void (Sema::*Completer)(Scope *S,
+ Expr *Data,
+ llvm::ArrayRef<Expr *> Args),
+ Expr *Data) {
+ while (1) {
+ if (Tok.is(tok::code_completion)) {
+ if (Completer)
+ (Actions.*Completer)(getCurScope(), Data, Exprs);
+ else
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Expression);
+ cutOffParsing();
+ return true;
+ }
+
+ ExprResult Expr;
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ Expr = ParseBraceInitializer();
+ } else
+ Expr = ParseAssignmentExpression();
+
+ if (Tok.is(tok::ellipsis))
+ Expr = Actions.ActOnPackExpansion(Expr.get(), ConsumeToken());
+ if (Expr.isInvalid())
+ return true;
+
+ Exprs.push_back(Expr.release());
+
+ if (Tok.isNot(tok::comma))
+ return false;
+ // Move to the next argument, remember where the comma was.
+ CommaLocs.push_back(ConsumeToken());
+ }
+}
+
+/// ParseBlockId - Parse a block-id, which roughly looks like int (int x).
+///
+/// [clang] block-id:
+/// [clang] specifier-qualifier-list block-declarator
+///
+void Parser::ParseBlockId() {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Type);
+ return cutOffParsing();
+ }
+
+ // Parse the specifier-qualifier-list piece.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+
+ // Parse the block-declarator.
+ Declarator DeclaratorInfo(DS, Declarator::BlockLiteralContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ // We do this for: ^ __attribute__((noreturn)) {, as DS has the attributes.
+ DeclaratorInfo.takeAttributes(DS.getAttributes(), SourceLocation());
+
+ MaybeParseGNUAttributes(DeclaratorInfo);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(DeclaratorInfo, getCurScope());
+}
+
+/// ParseBlockLiteralExpression - Parse a block literal, which roughly looks
+/// like ^(int x){ return x+1; }
+///
+/// block-literal:
+/// [clang] '^' block-args[opt] compound-statement
+/// [clang] '^' block-id compound-statement
+/// [clang] block-args:
+/// [clang] '(' parameter-list ')'
+///
+ExprResult Parser::ParseBlockLiteralExpression() {
+ assert(Tok.is(tok::caret) && "block literal starts with ^");
+ SourceLocation CaretLoc = ConsumeToken();
+
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), CaretLoc,
+ "block literal parsing");
+
+ // Enter a scope to hold everything within the block. This includes the
+ // argument decls, decls within the compound expression, etc. This also
+ // allows determining whether a variable reference inside the block is
+ // within or outside of the block.
+ ParseScope BlockScope(this, Scope::BlockScope | Scope::FnScope |
+ Scope::DeclScope);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockStart(CaretLoc, getCurScope());
+
+ // Parse the return type if present.
+ DeclSpec DS(AttrFactory);
+ Declarator ParamInfo(DS, Declarator::BlockLiteralContext);
+ // FIXME: Since the return type isn't actually parsed, it can't be used to
+ // fill ParamInfo with an initial valid range, so do it manually.
+ ParamInfo.SetSourceRange(SourceRange(Tok.getLocation(), Tok.getLocation()));
+
+ // If this block has arguments, parse them. There is no ambiguity here with
+ // the expression case, because the expression case requires a parameter list.
+ if (Tok.is(tok::l_paren)) {
+ ParseParenDeclarator(ParamInfo);
+ // Parse the pieces after the identifier as if we had "int(...)".
+ // SetIdentifier sets the source range end, but in this case we're past
+ // that location.
+ SourceLocation Tmp = ParamInfo.getSourceRange().getEnd();
+ ParamInfo.SetIdentifier(0, CaretLoc);
+ ParamInfo.SetRangeEnd(Tmp);
+ if (ParamInfo.isInvalidType()) {
+ // If there was an error parsing the arguments, they may have
+ // tried to use ^(x+y) which requires an argument list. Just
+ // skip the whole block literal.
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
+ return ExprError();
+ }
+
+ MaybeParseGNUAttributes(ParamInfo);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(ParamInfo, getCurScope());
+ } else if (!Tok.is(tok::l_brace)) {
+ ParseBlockId();
+ } else {
+ // Otherwise, pretend we saw (void).
+ ParsedAttributes attrs(AttrFactory);
+ ParamInfo.AddTypeInfo(DeclaratorChunk::getFunction(true, false,
+ SourceLocation(),
+ 0, 0, 0,
+ true, SourceLocation(),
+ SourceLocation(),
+ SourceLocation(),
+ SourceLocation(),
+ EST_None,
+ SourceLocation(),
+ 0, 0, 0, 0,
+ CaretLoc, CaretLoc,
+ ParamInfo),
+ attrs, CaretLoc);
+
+ MaybeParseGNUAttributes(ParamInfo);
+
+ // Inform sema that we are starting a block.
+ Actions.ActOnBlockArguments(ParamInfo, getCurScope());
+ }
+
+
+ ExprResult Result(true);
+ if (!Tok.is(tok::l_brace)) {
+ // Saw something like: ^expr
+ Diag(Tok, diag::err_expected_expression);
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
+ return ExprError();
+ }
+
+ StmtResult Stmt(ParseCompoundStatementBody());
+ BlockScope.Exit();
+ if (!Stmt.isInvalid())
+ Result = Actions.ActOnBlockStmtExpr(CaretLoc, Stmt.take(), getCurScope());
+ else
+ Actions.ActOnBlockError(CaretLoc, getCurScope());
+ return move(Result);
+}
+
+/// ParseObjCBoolLiteral - This handles the objective-c Boolean literals.
+///
+/// '__objc_yes'
+/// '__objc_no'
+ExprResult Parser::ParseObjCBoolLiteral() {
+ tok::TokenKind Kind = Tok.getKind();
+ return Actions.ActOnObjCBoolLiteral(ConsumeToken(), Kind);
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
new file mode 100644
index 0000000..2af7482
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseExprCXX.cpp
@@ -0,0 +1,2848 @@
+//===--- ParseExprCXX.cpp - C++ Expression Parsing ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Expression parsing implementation for C++.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+static int SelectDigraphErrorMessage(tok::TokenKind Kind) {
+ switch (Kind) {
+ case tok::kw_template: return 0;
+ case tok::kw_const_cast: return 1;
+ case tok::kw_dynamic_cast: return 2;
+ case tok::kw_reinterpret_cast: return 3;
+ case tok::kw_static_cast: return 4;
+ default:
+ llvm_unreachable("Unknown type for digraph error message.");
+ }
+}
+
+// Are the two tokens adjacent in the same source file?
+static bool AreTokensAdjacent(Preprocessor &PP, Token &First, Token &Second) {
+ SourceManager &SM = PP.getSourceManager();
+ SourceLocation FirstLoc = SM.getSpellingLoc(First.getLocation());
+ SourceLocation FirstEnd = FirstLoc.getLocWithOffset(First.getLength());
+ return FirstEnd == SM.getSpellingLoc(Second.getLocation());
+}
+
+// Suggest fixit for "<::" after a cast.
+static void FixDigraph(Parser &P, Preprocessor &PP, Token &DigraphToken,
+ Token &ColonToken, tok::TokenKind Kind, bool AtDigraph) {
+ // Pull '<:' and ':' off token stream.
+ if (!AtDigraph)
+ PP.Lex(DigraphToken);
+ PP.Lex(ColonToken);
+
+ SourceRange Range;
+ Range.setBegin(DigraphToken.getLocation());
+ Range.setEnd(ColonToken.getLocation());
+ P.Diag(DigraphToken.getLocation(), diag::err_missing_whitespace_digraph)
+ << SelectDigraphErrorMessage(Kind)
+ << FixItHint::CreateReplacement(Range, "< ::");
+
+ // Update token information to reflect their change in token type.
+ ColonToken.setKind(tok::coloncolon);
+ ColonToken.setLocation(ColonToken.getLocation().getLocWithOffset(-1));
+ ColonToken.setLength(2);
+ DigraphToken.setKind(tok::less);
+ DigraphToken.setLength(1);
+
+ // Push new tokens back to token stream.
+ PP.EnterToken(ColonToken);
+ if (!AtDigraph)
+ PP.EnterToken(DigraphToken);
+}
+
+// Check for '<::' which should be '< ::' instead of '[:' when following
+// a template name.
+void Parser::CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectType,
+ bool EnteringContext,
+ IdentifierInfo &II, CXXScopeSpec &SS) {
+ if (!Next.is(tok::l_square) || Next.getLength() != 2)
+ return;
+
+ Token SecondToken = GetLookAheadToken(2);
+ if (!SecondToken.is(tok::colon) || !AreTokensAdjacent(PP, Next, SecondToken))
+ return;
+
+ TemplateTy Template;
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(&II, Tok.getLocation());
+ bool MemberOfUnknownSpecialization;
+ if (!Actions.isTemplateName(getCurScope(), SS, /*hasTemplateKeyword=*/false,
+ TemplateName, ObjectType, EnteringContext,
+ Template, MemberOfUnknownSpecialization))
+ return;
+
+ FixDigraph(*this, PP, Next, SecondToken, tok::kw_template,
+ /*AtDigraph*/false);
+}
+
+/// \brief Parse global scope or nested-name-specifier if present.
+///
+/// Parses a C++ global scope specifier ('::') or nested-name-specifier (which
+/// may be preceded by '::'). Note that this routine will not parse ::new or
+/// ::delete; it will just leave them in the token stream.
+///
+/// '::'[opt] nested-name-specifier
+/// '::'
+///
+/// nested-name-specifier:
+/// type-name '::'
+/// namespace-name '::'
+/// nested-name-specifier identifier '::'
+/// nested-name-specifier 'template'[opt] simple-template-id '::'
+///
+///
+/// \param SS the scope specifier that will be set to the parsed
+/// nested-name-specifier (or empty)
+///
+/// \param ObjectType if this nested-name-specifier is being parsed following
+/// the "." or "->" of a member access expression, this parameter provides the
+/// type of the object whose members are being accessed.
+///
+/// \param EnteringContext whether we will be entering into the context of
+/// the nested-name-specifier after parsing it.
+///
+/// \param MayBePseudoDestructor When non-NULL, points to a flag that
+/// indicates whether this nested-name-specifier may be part of a
+/// pseudo-destructor name. In this case, the flag will be set false
+/// if we don't actually end up parsing a destructor name. Moreorover,
+/// if we do end up determining that we are parsing a destructor name,
+/// the last component of the nested-name-specifier is not parsed as
+/// part of the scope specifier.
+
+/// member access expression, e.g., the \p T:: in \p p->T::m.
+///
+/// \returns true if there was an error parsing a scope specifier
+bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ bool *MayBePseudoDestructor,
+ bool IsTypename) {
+ assert(getLangOpts().CPlusPlus &&
+ "Call sites of this function should be guarded by checking for C++");
+
+ if (Tok.is(tok::annot_cxxscope)) {
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+ ConsumeToken();
+ return false;
+ }
+
+ bool HasScopeSpecifier = false;
+
+ if (Tok.is(tok::coloncolon)) {
+ // ::new and ::delete aren't nested-name-specifiers.
+ tok::TokenKind NextKind = NextToken().getKind();
+ if (NextKind == tok::kw_new || NextKind == tok::kw_delete)
+ return false;
+
+ // '::' - Global scope qualifier.
+ if (Actions.ActOnCXXGlobalScopeSpecifier(getCurScope(), ConsumeToken(), SS))
+ return true;
+
+ HasScopeSpecifier = true;
+ }
+
+ bool CheckForDestructor = false;
+ if (MayBePseudoDestructor && *MayBePseudoDestructor) {
+ CheckForDestructor = true;
+ *MayBePseudoDestructor = false;
+ }
+
+ if (Tok.is(tok::kw_decltype) || Tok.is(tok::annot_decltype)) {
+ DeclSpec DS(AttrFactory);
+ SourceLocation DeclLoc = Tok.getLocation();
+ SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
+ if (Tok.isNot(tok::coloncolon)) {
+ AnnotateExistingDecltypeSpecifier(DS, DeclLoc, EndLoc);
+ return false;
+ }
+
+ SourceLocation CCLoc = ConsumeToken();
+ if (Actions.ActOnCXXNestedNameSpecifierDecltype(SS, DS, CCLoc))
+ SS.SetInvalid(SourceRange(DeclLoc, CCLoc));
+
+ HasScopeSpecifier = true;
+ }
+
+ while (true) {
+ if (HasScopeSpecifier) {
+ // C++ [basic.lookup.classref]p5:
+ // If the qualified-id has the form
+ //
+ // ::class-name-or-namespace-name::...
+ //
+ // the class-name-or-namespace-name is looked up in global scope as a
+ // class-name or namespace-name.
+ //
+ // To implement this, we clear out the object type as soon as we've
+ // seen a leading '::' or part of a nested-name-specifier.
+ ObjectType = ParsedType();
+
+ if (Tok.is(tok::code_completion)) {
+ // Code completion for a nested-name-specifier, where the code
+ // code completion token follows the '::'.
+ Actions.CodeCompleteQualifiedId(getCurScope(), SS, EnteringContext);
+ // Include code completion token into the range of the scope otherwise
+ // when we try to annotate the scope tokens the dangling code completion
+ // token will cause assertion in
+ // Preprocessor::AnnotatePreviousCachedTokens.
+ SS.setEndLoc(Tok.getLocation());
+ cutOffParsing();
+ return true;
+ }
+ }
+
+ // nested-name-specifier:
+ // nested-name-specifier 'template'[opt] simple-template-id '::'
+
+ // Parse the optional 'template' keyword, then make sure we have
+ // 'identifier <' after it.
+ if (Tok.is(tok::kw_template)) {
+ // If we don't have a scope specifier or an object type, this isn't a
+ // nested-name-specifier, since they aren't allowed to start with
+ // 'template'.
+ if (!HasScopeSpecifier && !ObjectType)
+ break;
+
+ TentativeParsingAction TPA(*this);
+ SourceLocation TemplateKWLoc = ConsumeToken();
+
+ UnqualifiedId TemplateName;
+ if (Tok.is(tok::identifier)) {
+ // Consume the identifier.
+ TemplateName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken();
+ } else if (Tok.is(tok::kw_operator)) {
+ if (ParseUnqualifiedIdOperator(SS, EnteringContext, ObjectType,
+ TemplateName)) {
+ TPA.Commit();
+ break;
+ }
+
+ if (TemplateName.getKind() != UnqualifiedId::IK_OperatorFunctionId &&
+ TemplateName.getKind() != UnqualifiedId::IK_LiteralOperatorId) {
+ Diag(TemplateName.getSourceRange().getBegin(),
+ diag::err_id_after_template_in_nested_name_spec)
+ << TemplateName.getSourceRange();
+ TPA.Commit();
+ break;
+ }
+ } else {
+ TPA.Revert();
+ break;
+ }
+
+ // If the next token is not '<', we have a qualified-id that refers
+ // to a template name, such as T::template apply, but is not a
+ // template-id.
+ if (Tok.isNot(tok::less)) {
+ TPA.Revert();
+ break;
+ }
+
+ // Commit to parsing the template-id.
+ TPA.Commit();
+ TemplateTy Template;
+ if (TemplateNameKind TNK
+ = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, TemplateName,
+ ObjectType, EnteringContext,
+ Template)) {
+ if (AnnotateTemplateIdToken(Template, TNK, SS, TemplateKWLoc,
+ TemplateName, false))
+ return true;
+ } else
+ return true;
+
+ continue;
+ }
+
+ if (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) {
+ // We have
+ //
+ // simple-template-id '::'
+ //
+ // So we need to check whether the simple-template-id is of the
+ // right kind (it should name a type or be dependent), and then
+ // convert it into a type within the nested-name-specifier.
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde)) {
+ *MayBePseudoDestructor = true;
+ return false;
+ }
+
+ // Consume the template-id token.
+ ConsumeToken();
+
+ assert(Tok.is(tok::coloncolon) && "NextToken() not working properly!");
+ SourceLocation CCLoc = ConsumeToken();
+
+ HasScopeSpecifier = true;
+
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+
+ if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(),
+ SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc,
+ CCLoc,
+ EnteringContext)) {
+ SourceLocation StartLoc
+ = SS.getBeginLoc().isValid()? SS.getBeginLoc()
+ : TemplateId->TemplateNameLoc;
+ SS.SetInvalid(SourceRange(StartLoc, CCLoc));
+ }
+
+ continue;
+ }
+
+
+ // The rest of the nested-name-specifier possibilities start with
+ // tok::identifier.
+ if (Tok.isNot(tok::identifier))
+ break;
+
+ IdentifierInfo &II = *Tok.getIdentifierInfo();
+
+ // nested-name-specifier:
+ // type-name '::'
+ // namespace-name '::'
+ // nested-name-specifier identifier '::'
+ Token Next = NextToken();
+
+ // If we get foo:bar, this is almost certainly a typo for foo::bar. Recover
+ // and emit a fixit hint for it.
+ if (Next.is(tok::colon) && !ColonIsSacred) {
+ if (Actions.IsInvalidUnlessNestedName(getCurScope(), SS, II,
+ Tok.getLocation(),
+ Next.getLocation(), ObjectType,
+ EnteringContext) &&
+ // If the token after the colon isn't an identifier, it's still an
+ // error, but they probably meant something else strange so don't
+ // recover like this.
+ PP.LookAhead(1).is(tok::identifier)) {
+ Diag(Next, diag::err_unexected_colon_in_nested_name_spec)
+ << FixItHint::CreateReplacement(Next.getLocation(), "::");
+
+ // Recover as if the user wrote '::'.
+ Next.setKind(tok::coloncolon);
+ }
+ }
+
+ if (Next.is(tok::coloncolon)) {
+ if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) &&
+ !Actions.isNonTypeNestedNameSpecifier(getCurScope(), SS, Tok.getLocation(),
+ II, ObjectType)) {
+ *MayBePseudoDestructor = true;
+ return false;
+ }
+
+ // We have an identifier followed by a '::'. Lookup this name
+ // as the name in a nested-name-specifier.
+ SourceLocation IdLoc = ConsumeToken();
+ assert((Tok.is(tok::coloncolon) || Tok.is(tok::colon)) &&
+ "NextToken() not working properly!");
+ SourceLocation CCLoc = ConsumeToken();
+
+ HasScopeSpecifier = true;
+ if (Actions.ActOnCXXNestedNameSpecifier(getCurScope(), II, IdLoc, CCLoc,
+ ObjectType, EnteringContext, SS))
+ SS.SetInvalid(SourceRange(IdLoc, CCLoc));
+
+ continue;
+ }
+
+ CheckForTemplateAndDigraph(Next, ObjectType, EnteringContext, II, SS);
+
+ // nested-name-specifier:
+ // type-name '<'
+ if (Next.is(tok::less)) {
+ TemplateTy Template;
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(&II, Tok.getLocation());
+ bool MemberOfUnknownSpecialization;
+ if (TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS,
+ /*hasTemplateKeyword=*/false,
+ TemplateName,
+ ObjectType,
+ EnteringContext,
+ Template,
+ MemberOfUnknownSpecialization)) {
+ // We have found a template name, so annotate this token
+ // with a template-id annotation. We do not permit the
+ // template-id to be translated into a type annotation,
+ // because some clients (e.g., the parsing of class template
+ // specializations) still want to see the original template-id
+ // token.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, false))
+ return true;
+ continue;
+ }
+
+ if (MemberOfUnknownSpecialization && (ObjectType || SS.isSet()) &&
+ (IsTypename || IsTemplateArgumentList(1))) {
+ // We have something like t::getAs<T>, where getAs is a
+ // member of an unknown specialization. However, this will only
+ // parse correctly as a template, so suggest the keyword 'template'
+ // before 'getAs' and treat this as a dependent template name.
+ unsigned DiagID = diag::err_missing_dependent_template_keyword;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_missing_dependent_template_keyword;
+
+ Diag(Tok.getLocation(), DiagID)
+ << II.getName()
+ << FixItHint::CreateInsertion(Tok.getLocation(), "template ");
+
+ if (TemplateNameKind TNK
+ = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, SourceLocation(),
+ TemplateName, ObjectType,
+ EnteringContext, Template)) {
+ // Consume the identifier.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName, false))
+ return true;
+ }
+ else
+ return true;
+
+ continue;
+ }
+ }
+
+ // We don't have any tokens that form the beginning of a
+ // nested-name-specifier, so we're done.
+ break;
+ }
+
+ // Even if we didn't see any pieces of a nested-name-specifier, we
+ // still check whether there is a tilde in this position, which
+ // indicates a potential pseudo-destructor.
+ if (CheckForDestructor && Tok.is(tok::tilde))
+ *MayBePseudoDestructor = true;
+
+ return false;
+}
+
+/// ParseCXXIdExpression - Handle id-expression.
+///
+/// id-expression:
+/// unqualified-id
+/// qualified-id
+///
+/// qualified-id:
+/// '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+/// '::' identifier
+/// '::' operator-function-id
+/// '::' template-id
+///
+/// NOTE: The standard specifies that, for qualified-id, the parser does not
+/// expect:
+///
+/// '::' conversion-function-id
+/// '::' '~' class-name
+///
+/// This may cause a slight inconsistency on diagnostics:
+///
+/// class C {};
+/// namespace A {}
+/// void f() {
+/// :: A :: ~ C(); // Some Sema error about using destructor with a
+/// // namespace.
+/// :: ~ C(); // Some Parser error like 'unexpected ~'.
+/// }
+///
+/// We simplify the parser a bit and make it work like:
+///
+/// qualified-id:
+/// '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+/// '::' unqualified-id
+///
+/// That way Sema can handle and report similar errors for namespaces and the
+/// global scope.
+///
+/// The isAddressOfOperand parameter indicates that this id-expression is a
+/// direct operand of the address-of operator. This is, besides member contexts,
+/// the only place where a qualified-id naming a non-static class member may
+/// appear.
+///
+ExprResult Parser::ParseCXXIdExpression(bool isAddressOfOperand) {
+ // qualified-id:
+ // '::'[opt] nested-name-specifier 'template'[opt] unqualified-id
+ // '::' unqualified-id
+ //
+ CXXScopeSpec SS;
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(), /*EnteringContext=*/false);
+
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ if (ParseUnqualifiedId(SS,
+ /*EnteringContext=*/false,
+ /*AllowDestructorName=*/false,
+ /*AllowConstructorName=*/false,
+ /*ObjectType=*/ ParsedType(),
+ TemplateKWLoc,
+ Name))
+ return ExprError();
+
+ // This is only the direct operand of an & operator if it is not
+ // followed by a postfix-expression suffix.
+ if (isAddressOfOperand && isPostfixExpressionSuffixStart())
+ isAddressOfOperand = false;
+
+ return Actions.ActOnIdExpression(getCurScope(), SS, TemplateKWLoc, Name,
+ Tok.is(tok::l_paren), isAddressOfOperand);
+}
+
+/// ParseLambdaExpression - Parse a C++0x lambda expression.
+///
+/// lambda-expression:
+/// lambda-introducer lambda-declarator[opt] compound-statement
+///
+/// lambda-introducer:
+/// '[' lambda-capture[opt] ']'
+///
+/// lambda-capture:
+/// capture-default
+/// capture-list
+/// capture-default ',' capture-list
+///
+/// capture-default:
+/// '&'
+/// '='
+///
+/// capture-list:
+/// capture
+/// capture-list ',' capture
+///
+/// capture:
+/// identifier
+/// '&' identifier
+/// 'this'
+///
+/// lambda-declarator:
+/// '(' parameter-declaration-clause ')' attribute-specifier[opt]
+/// 'mutable'[opt] exception-specification[opt]
+/// trailing-return-type[opt]
+///
+ExprResult Parser::ParseLambdaExpression() {
+ // Parse lambda-introducer.
+ LambdaIntroducer Intro;
+
+ llvm::Optional<unsigned> DiagID(ParseLambdaIntroducer(Intro));
+ if (DiagID) {
+ Diag(Tok, DiagID.getValue());
+ SkipUntil(tok::r_square);
+ SkipUntil(tok::l_brace);
+ SkipUntil(tok::r_brace);
+ return ExprError();
+ }
+
+ return ParseLambdaExpressionAfterIntroducer(Intro);
+}
+
+/// TryParseLambdaExpression - Use lookahead and potentially tentative
+/// parsing to determine if we are looking at a C++0x lambda expression, and parse
+/// it if we are.
+///
+/// If we are not looking at a lambda expression, returns ExprError().
+ExprResult Parser::TryParseLambdaExpression() {
+ assert(getLangOpts().CPlusPlus0x
+ && Tok.is(tok::l_square)
+ && "Not at the start of a possible lambda expression.");
+
+ const Token Next = NextToken(), After = GetLookAheadToken(2);
+
+ // If lookahead indicates this is a lambda...
+ if (Next.is(tok::r_square) || // []
+ Next.is(tok::equal) || // [=
+ (Next.is(tok::amp) && // [&] or [&,
+ (After.is(tok::r_square) ||
+ After.is(tok::comma))) ||
+ (Next.is(tok::identifier) && // [identifier]
+ After.is(tok::r_square))) {
+ return ParseLambdaExpression();
+ }
+
+ // If lookahead indicates an ObjC message send...
+ // [identifier identifier
+ if (Next.is(tok::identifier) && After.is(tok::identifier)) {
+ return ExprEmpty();
+ }
+
+ // Here, we're stuck: lambda introducers and Objective-C message sends are
+ // unambiguous, but it requires arbitrary lookhead. [a,b,c,d,e,f,g] is a
+ // lambda, and [a,b,c,d,e,f,g h] is a Objective-C message send. Instead of
+ // writing two routines to parse a lambda introducer, just try to parse
+ // a lambda introducer first, and fall back if that fails.
+ // (TryParseLambdaIntroducer never produces any diagnostic output.)
+ LambdaIntroducer Intro;
+ if (TryParseLambdaIntroducer(Intro))
+ return ExprEmpty();
+ return ParseLambdaExpressionAfterIntroducer(Intro);
+}
+
+/// ParseLambdaExpression - Parse a lambda introducer.
+///
+/// Returns a DiagnosticID if it hit something unexpected.
+llvm::Optional<unsigned> Parser::ParseLambdaIntroducer(LambdaIntroducer &Intro){
+ typedef llvm::Optional<unsigned> DiagResult;
+
+ assert(Tok.is(tok::l_square) && "Lambda expressions begin with '['.");
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ Intro.Range.setBegin(T.getOpenLocation());
+
+ bool first = true;
+
+ // Parse capture-default.
+ if (Tok.is(tok::amp) &&
+ (NextToken().is(tok::comma) || NextToken().is(tok::r_square))) {
+ Intro.Default = LCD_ByRef;
+ Intro.DefaultLoc = ConsumeToken();
+ first = false;
+ } else if (Tok.is(tok::equal)) {
+ Intro.Default = LCD_ByCopy;
+ Intro.DefaultLoc = ConsumeToken();
+ first = false;
+ }
+
+ while (Tok.isNot(tok::r_square)) {
+ if (!first) {
+ if (Tok.isNot(tok::comma)) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/false);
+ ConsumeCodeCompletionToken();
+ break;
+ }
+
+ return DiagResult(diag::err_expected_comma_or_rsquare);
+ }
+ ConsumeToken();
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ // If we're in Objective-C++ and we have a bare '[', then this is more
+ // likely to be a message receiver.
+ if (getLangOpts().ObjC1 && first)
+ Actions.CodeCompleteObjCMessageReceiver(getCurScope());
+ else
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/false);
+ ConsumeCodeCompletionToken();
+ break;
+ }
+
+ first = false;
+
+ // Parse capture.
+ LambdaCaptureKind Kind = LCK_ByCopy;
+ SourceLocation Loc;
+ IdentifierInfo* Id = 0;
+ SourceLocation EllipsisLoc;
+
+ if (Tok.is(tok::kw_this)) {
+ Kind = LCK_This;
+ Loc = ConsumeToken();
+ } else {
+ if (Tok.is(tok::amp)) {
+ Kind = LCK_ByRef;
+ ConsumeToken();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteLambdaIntroducer(getCurScope(), Intro,
+ /*AfterAmpersand=*/true);
+ ConsumeCodeCompletionToken();
+ break;
+ }
+ }
+
+ if (Tok.is(tok::identifier)) {
+ Id = Tok.getIdentifierInfo();
+ Loc = ConsumeToken();
+
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+ } else if (Tok.is(tok::kw_this)) {
+ // FIXME: If we want to suggest a fixit here, will need to return more
+ // than just DiagnosticID. Perhaps full DiagnosticBuilder that can be
+ // Clear()ed to prevent emission in case of tentative parsing?
+ return DiagResult(diag::err_this_captured_by_reference);
+ } else {
+ return DiagResult(diag::err_expected_capture);
+ }
+ }
+
+ Intro.addCapture(Kind, Loc, Id, EllipsisLoc);
+ }
+
+ T.consumeClose();
+ Intro.Range.setEnd(T.getCloseLocation());
+
+ return DiagResult();
+}
+
+/// TryParseLambdaIntroducer - Tentatively parse a lambda introducer.
+///
+/// Returns true if it hit something unexpected.
+bool Parser::TryParseLambdaIntroducer(LambdaIntroducer &Intro) {
+ TentativeParsingAction PA(*this);
+
+ llvm::Optional<unsigned> DiagID(ParseLambdaIntroducer(Intro));
+
+ if (DiagID) {
+ PA.Revert();
+ return true;
+ }
+
+ PA.Commit();
+ return false;
+}
+
+/// ParseLambdaExpressionAfterIntroducer - Parse the rest of a lambda
+/// expression.
+ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
+ LambdaIntroducer &Intro) {
+ SourceLocation LambdaBeginLoc = Intro.Range.getBegin();
+ Diag(LambdaBeginLoc, diag::warn_cxx98_compat_lambda);
+
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(), LambdaBeginLoc,
+ "lambda expression parsing");
+
+ // Parse lambda-declarator[opt].
+ DeclSpec DS(AttrFactory);
+ Declarator D(DS, Declarator::LambdaExprContext);
+
+ if (Tok.is(tok::l_paren)) {
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope |
+ Scope::DeclScope);
+
+ SourceLocation DeclLoc, DeclEndLoc;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ DeclLoc = T.getOpenLocation();
+
+ // Parse parameter-declaration-clause.
+ ParsedAttributes Attr(AttrFactory);
+ llvm::SmallVector<DeclaratorChunk::ParamInfo, 16> ParamInfo;
+ SourceLocation EllipsisLoc;
+
+ if (Tok.isNot(tok::r_paren))
+ ParseParameterDeclarationClause(D, Attr, ParamInfo, EllipsisLoc);
+
+ T.consumeClose();
+ DeclEndLoc = T.getCloseLocation();
+
+ // Parse 'mutable'[opt].
+ SourceLocation MutableLoc;
+ if (Tok.is(tok::kw_mutable)) {
+ MutableLoc = ConsumeToken();
+ DeclEndLoc = MutableLoc;
+ }
+
+ // Parse exception-specification[opt].
+ ExceptionSpecificationType ESpecType = EST_None;
+ SourceRange ESpecRange;
+ llvm::SmallVector<ParsedType, 2> DynamicExceptions;
+ llvm::SmallVector<SourceRange, 2> DynamicExceptionRanges;
+ ExprResult NoexceptExpr;
+ ESpecType = MaybeParseExceptionSpecification(ESpecRange,
+ DynamicExceptions,
+ DynamicExceptionRanges,
+ NoexceptExpr);
+
+ if (ESpecType != EST_None)
+ DeclEndLoc = ESpecRange.getEnd();
+
+ // Parse attribute-specifier[opt].
+ MaybeParseCXX0XAttributes(Attr, &DeclEndLoc);
+
+ // Parse trailing-return-type[opt].
+ ParsedType TrailingReturnType;
+ if (Tok.is(tok::arrow)) {
+ SourceRange Range;
+ TrailingReturnType = ParseTrailingReturnType(Range).get();
+ if (Range.getEnd().isValid())
+ DeclEndLoc = Range.getEnd();
+ }
+
+ PrototypeScope.Exit();
+
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
+ /*isVariadic=*/EllipsisLoc.isValid(),
+ EllipsisLoc,
+ ParamInfo.data(), ParamInfo.size(),
+ DS.getTypeQualifiers(),
+ /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierLoc=*/SourceLocation(),
+ /*ConstQualifierLoc=*/SourceLocation(),
+ /*VolatileQualifierLoc=*/SourceLocation(),
+ MutableLoc,
+ ESpecType, ESpecRange.getBegin(),
+ DynamicExceptions.data(),
+ DynamicExceptionRanges.data(),
+ DynamicExceptions.size(),
+ NoexceptExpr.isUsable() ?
+ NoexceptExpr.get() : 0,
+ DeclLoc, DeclEndLoc, D,
+ TrailingReturnType),
+ Attr, DeclEndLoc);
+ } else if (Tok.is(tok::kw_mutable) || Tok.is(tok::arrow)) {
+ // It's common to forget that one needs '()' before 'mutable' or the
+ // result type. Deal with this.
+ Diag(Tok, diag::err_lambda_missing_parens)
+ << Tok.is(tok::arrow)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "() ");
+ SourceLocation DeclLoc = Tok.getLocation();
+ SourceLocation DeclEndLoc = DeclLoc;
+
+ // Parse 'mutable', if it's there.
+ SourceLocation MutableLoc;
+ if (Tok.is(tok::kw_mutable)) {
+ MutableLoc = ConsumeToken();
+ DeclEndLoc = MutableLoc;
+ }
+
+ // Parse the return type, if there is one.
+ ParsedType TrailingReturnType;
+ if (Tok.is(tok::arrow)) {
+ SourceRange Range;
+ TrailingReturnType = ParseTrailingReturnType(Range).get();
+ if (Range.getEnd().isValid())
+ DeclEndLoc = Range.getEnd();
+ }
+
+ ParsedAttributes Attr(AttrFactory);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(/*hasProto=*/true,
+ /*isVariadic=*/false,
+ /*EllipsisLoc=*/SourceLocation(),
+ /*Params=*/0, /*NumParams=*/0,
+ /*TypeQuals=*/0,
+ /*RefQualifierIsLValueRef=*/true,
+ /*RefQualifierLoc=*/SourceLocation(),
+ /*ConstQualifierLoc=*/SourceLocation(),
+ /*VolatileQualifierLoc=*/SourceLocation(),
+ MutableLoc,
+ EST_None,
+ /*ESpecLoc=*/SourceLocation(),
+ /*Exceptions=*/0,
+ /*ExceptionRanges=*/0,
+ /*NumExceptions=*/0,
+ /*NoexceptExpr=*/0,
+ DeclLoc, DeclEndLoc, D,
+ TrailingReturnType),
+ Attr, DeclEndLoc);
+ }
+
+
+ // FIXME: Rename BlockScope -> ClosureScope if we decide to continue using
+ // it.
+ unsigned ScopeFlags = Scope::BlockScope | Scope::FnScope | Scope::DeclScope;
+ if (getCurScope()->getFlags() & Scope::ThisScope)
+ ScopeFlags |= Scope::ThisScope;
+ ParseScope BodyScope(this, ScopeFlags);
+
+ Actions.ActOnStartOfLambdaDefinition(Intro, D, getCurScope());
+
+ // Parse compound-statement.
+ if (!Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lambda_body);
+ Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
+ return ExprError();
+ }
+
+ StmtResult Stmt(ParseCompoundStatementBody());
+ BodyScope.Exit();
+
+ if (!Stmt.isInvalid())
+ return Actions.ActOnLambdaExpr(LambdaBeginLoc, Stmt.take(), getCurScope());
+
+ Actions.ActOnLambdaError(LambdaBeginLoc, getCurScope());
+ return ExprError();
+}
+
+/// ParseCXXCasts - This handles the various ways to cast expressions to another
+/// type.
+///
+/// postfix-expression: [C++ 5.2p1]
+/// 'dynamic_cast' '<' type-name '>' '(' expression ')'
+/// 'static_cast' '<' type-name '>' '(' expression ')'
+/// 'reinterpret_cast' '<' type-name '>' '(' expression ')'
+/// 'const_cast' '<' type-name '>' '(' expression ')'
+///
+ExprResult Parser::ParseCXXCasts() {
+ tok::TokenKind Kind = Tok.getKind();
+ const char *CastName = 0; // For error messages
+
+ switch (Kind) {
+ default: llvm_unreachable("Unknown C++ cast!");
+ case tok::kw_const_cast: CastName = "const_cast"; break;
+ case tok::kw_dynamic_cast: CastName = "dynamic_cast"; break;
+ case tok::kw_reinterpret_cast: CastName = "reinterpret_cast"; break;
+ case tok::kw_static_cast: CastName = "static_cast"; break;
+ }
+
+ SourceLocation OpLoc = ConsumeToken();
+ SourceLocation LAngleBracketLoc = Tok.getLocation();
+
+ // Check for "<::" which is parsed as "[:". If found, fix token stream,
+ // diagnose error, suggest fix, and recover parsing.
+ Token Next = NextToken();
+ if (Tok.is(tok::l_square) && Tok.getLength() == 2 && Next.is(tok::colon) &&
+ AreTokensAdjacent(PP, Tok, Next))
+ FixDigraph(*this, PP, Tok, Next, Kind, /*AtDigraph*/true);
+
+ if (ExpectAndConsume(tok::less, diag::err_expected_less_after, CastName))
+ return ExprError();
+
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+
+ // Parse the abstract-declarator, if present.
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ SourceLocation RAngleBracketLoc = Tok.getLocation();
+
+ if (ExpectAndConsume(tok::greater, diag::err_expected_greater))
+ return ExprError(Diag(LAngleBracketLoc, diag::note_matching) << "<");
+
+ SourceLocation LParenLoc, RParenLoc;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ if (T.expectAndConsume(diag::err_expected_lparen_after, CastName))
+ return ExprError();
+
+ ExprResult Result = ParseExpression();
+
+ // Match the ')'.
+ T.consumeClose();
+
+ if (!Result.isInvalid() && !DeclaratorInfo.isInvalidType())
+ Result = Actions.ActOnCXXNamedCast(OpLoc, Kind,
+ LAngleBracketLoc, DeclaratorInfo,
+ RAngleBracketLoc,
+ T.getOpenLocation(), Result.take(),
+ T.getCloseLocation());
+
+ return move(Result);
+}
+
+/// ParseCXXTypeid - This handles the C++ typeid expression.
+///
+/// postfix-expression: [C++ 5.2p1]
+/// 'typeid' '(' expression ')'
+/// 'typeid' '(' type-id ')'
+///
+ExprResult Parser::ParseCXXTypeid() {
+ assert(Tok.is(tok::kw_typeid) && "Not 'typeid'!");
+
+ SourceLocation OpLoc = ConsumeToken();
+ SourceLocation LParenLoc, RParenLoc;
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ // typeid expressions are always parenthesized.
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "typeid"))
+ return ExprError();
+ LParenLoc = T.getOpenLocation();
+
+ ExprResult Result;
+
+ if (isTypeIdInParens()) {
+ TypeResult Ty = ParseTypeName();
+
+ // Match the ')'.
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ if (Ty.isInvalid() || RParenLoc.isInvalid())
+ return ExprError();
+
+ Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/true,
+ Ty.get().getAsOpaquePtr(), RParenLoc);
+ } else {
+ // C++0x [expr.typeid]p3:
+ // When typeid is applied to an expression other than an lvalue of a
+ // polymorphic class type [...] The expression is an unevaluated
+ // operand (Clause 5).
+ //
+ // Note that we can't tell whether the expression is an lvalue of a
+ // polymorphic class type until after we've parsed the expression; we
+ // speculatively assume the subexpression is unevaluated, and fix it up
+ // later.
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ Result = ParseExpression();
+
+ // Match the ')'.
+ if (Result.isInvalid())
+ SkipUntil(tok::r_paren);
+ else {
+ T.consumeClose();
+ RParenLoc = T.getCloseLocation();
+ if (RParenLoc.isInvalid())
+ return ExprError();
+
+ Result = Actions.ActOnCXXTypeid(OpLoc, LParenLoc, /*isType=*/false,
+ Result.release(), RParenLoc);
+ }
+ }
+
+ return move(Result);
+}
+
+/// ParseCXXUuidof - This handles the Microsoft C++ __uuidof expression.
+///
+/// '__uuidof' '(' expression ')'
+/// '__uuidof' '(' type-id ')'
+///
+ExprResult Parser::ParseCXXUuidof() {
+ assert(Tok.is(tok::kw___uuidof) && "Not '__uuidof'!");
+
+ SourceLocation OpLoc = ConsumeToken();
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+
+ // __uuidof expressions are always parenthesized.
+ if (T.expectAndConsume(diag::err_expected_lparen_after, "__uuidof"))
+ return ExprError();
+
+ ExprResult Result;
+
+ if (isTypeIdInParens()) {
+ TypeResult Ty = ParseTypeName();
+
+ // Match the ')'.
+ T.consumeClose();
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ Result = Actions.ActOnCXXUuidof(OpLoc, T.getOpenLocation(), /*isType=*/true,
+ Ty.get().getAsOpaquePtr(),
+ T.getCloseLocation());
+ } else {
+ EnterExpressionEvaluationContext Unevaluated(Actions, Sema::Unevaluated);
+ Result = ParseExpression();
+
+ // Match the ')'.
+ if (Result.isInvalid())
+ SkipUntil(tok::r_paren);
+ else {
+ T.consumeClose();
+
+ Result = Actions.ActOnCXXUuidof(OpLoc, T.getOpenLocation(),
+ /*isType=*/false,
+ Result.release(), T.getCloseLocation());
+ }
+ }
+
+ return move(Result);
+}
+
+/// \brief Parse a C++ pseudo-destructor expression after the base,
+/// . or -> operator, and nested-name-specifier have already been
+/// parsed.
+///
+/// postfix-expression: [C++ 5.2]
+/// postfix-expression . pseudo-destructor-name
+/// postfix-expression -> pseudo-destructor-name
+///
+/// pseudo-destructor-name:
+/// ::[opt] nested-name-specifier[opt] type-name :: ~type-name
+/// ::[opt] nested-name-specifier template simple-template-id ::
+/// ~type-name
+/// ::[opt] nested-name-specifier[opt] ~type-name
+///
+ExprResult
+Parser::ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ ParsedType ObjectType) {
+ // We're parsing either a pseudo-destructor-name or a dependent
+ // member access that has the same form as a
+ // pseudo-destructor-name. We parse both in the same way and let
+ // the action model sort them out.
+ //
+ // Note that the ::[opt] nested-name-specifier[opt] has already
+ // been parsed, and if there was a simple-template-id, it has
+ // been coalesced into a template-id annotation token.
+ UnqualifiedId FirstTypeName;
+ SourceLocation CCLoc;
+ if (Tok.is(tok::identifier)) {
+ FirstTypeName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken();
+ assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
+ CCLoc = ConsumeToken();
+ } else if (Tok.is(tok::annot_template_id)) {
+ // FIXME: retrieve TemplateKWLoc from template-id annotation and
+ // store it in the pseudo-dtor node (to be used when instantiating it).
+ FirstTypeName.setTemplateId(
+ (TemplateIdAnnotation *)Tok.getAnnotationValue());
+ ConsumeToken();
+ assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
+ CCLoc = ConsumeToken();
+ } else {
+ FirstTypeName.setIdentifier(0, SourceLocation());
+ }
+
+ // Parse the tilde.
+ assert(Tok.is(tok::tilde) && "ParseOptionalCXXScopeSpecifier fail");
+ SourceLocation TildeLoc = ConsumeToken();
+
+ if (Tok.is(tok::kw_decltype) && !FirstTypeName.isValid() && SS.isEmpty()) {
+ DeclSpec DS(AttrFactory);
+ ParseDecltypeSpecifier(DS);
+ if (DS.getTypeSpecType() == TST_error)
+ return ExprError();
+ return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base, OpLoc,
+ OpKind, TildeLoc, DS,
+ Tok.is(tok::l_paren));
+ }
+
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok, diag::err_destructor_tilde_identifier);
+ return ExprError();
+ }
+
+ // Parse the second type.
+ UnqualifiedId SecondTypeName;
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = ConsumeToken();
+ SecondTypeName.setIdentifier(Name, NameLoc);
+
+ // If there is a '<', the second type name is a template-id. Parse
+ // it as such.
+ if (Tok.is(tok::less) &&
+ ParseUnqualifiedIdTemplateId(SS, SourceLocation(),
+ Name, NameLoc,
+ false, ObjectType, SecondTypeName,
+ /*AssumeTemplateName=*/true))
+ return ExprError();
+
+ return Actions.ActOnPseudoDestructorExpr(getCurScope(), Base,
+ OpLoc, OpKind,
+ SS, FirstTypeName, CCLoc,
+ TildeLoc, SecondTypeName,
+ Tok.is(tok::l_paren));
+}
+
+/// ParseCXXBoolLiteral - This handles the C++ Boolean literals.
+///
+/// boolean-literal: [C++ 2.13.5]
+/// 'true'
+/// 'false'
+ExprResult Parser::ParseCXXBoolLiteral() {
+ tok::TokenKind Kind = Tok.getKind();
+ return Actions.ActOnCXXBoolLiteral(ConsumeToken(), Kind);
+}
+
+/// ParseThrowExpression - This handles the C++ throw expression.
+///
+/// throw-expression: [C++ 15]
+/// 'throw' assignment-expression[opt]
+ExprResult Parser::ParseThrowExpression() {
+ assert(Tok.is(tok::kw_throw) && "Not throw!");
+ SourceLocation ThrowLoc = ConsumeToken(); // Eat the throw token.
+
+ // If the current token isn't the start of an assignment-expression,
+ // then the expression is not present. This handles things like:
+ // "C ? throw : (void)42", which is crazy but legal.
+ switch (Tok.getKind()) { // FIXME: move this predicate somewhere common.
+ case tok::semi:
+ case tok::r_paren:
+ case tok::r_square:
+ case tok::r_brace:
+ case tok::colon:
+ case tok::comma:
+ return Actions.ActOnCXXThrow(getCurScope(), ThrowLoc, 0);
+
+ default:
+ ExprResult Expr(ParseAssignmentExpression());
+ if (Expr.isInvalid()) return move(Expr);
+ return Actions.ActOnCXXThrow(getCurScope(), ThrowLoc, Expr.take());
+ }
+}
+
+/// ParseCXXThis - This handles the C++ 'this' pointer.
+///
+/// C++ 9.3.2: In the body of a non-static member function, the keyword this is
+/// a non-lvalue expression whose value is the address of the object for which
+/// the function is called.
+ExprResult Parser::ParseCXXThis() {
+ assert(Tok.is(tok::kw_this) && "Not 'this'!");
+ SourceLocation ThisLoc = ConsumeToken();
+ return Actions.ActOnCXXThis(ThisLoc);
+}
+
+/// ParseCXXTypeConstructExpression - Parse construction of a specified type.
+/// Can be interpreted either as function-style casting ("int(x)")
+/// or class type construction ("ClassType(x,y,z)")
+/// or creation of a value-initialized type ("int()").
+/// See [C++ 5.2.3].
+///
+/// postfix-expression: [C++ 5.2p1]
+/// simple-type-specifier '(' expression-list[opt] ')'
+/// [C++0x] simple-type-specifier braced-init-list
+/// typename-specifier '(' expression-list[opt] ')'
+/// [C++0x] typename-specifier braced-init-list
+///
+ExprResult
+Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
+
+ assert((Tok.is(tok::l_paren) ||
+ (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)))
+ && "Expected '(' or '{'!");
+
+ if (Tok.is(tok::l_brace)) {
+ ExprResult Init = ParseBraceInitializer();
+ if (Init.isInvalid())
+ return Init;
+ Expr *InitList = Init.take();
+ return Actions.ActOnCXXTypeConstructExpr(TypeRep, SourceLocation(),
+ MultiExprArg(&InitList, 1),
+ SourceLocation());
+ } else {
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, true);
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprVector Exprs(Actions);
+ CommaLocsTy CommaLocs;
+
+ if (Tok.isNot(tok::r_paren)) {
+ if (ParseExpressionList(Exprs, CommaLocs)) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+ }
+
+ // Match the ')'.
+ T.consumeClose();
+
+ // TypeRep could be null, if it references an invalid typedef.
+ if (!TypeRep)
+ return ExprError();
+
+ assert((Exprs.size() == 0 || Exprs.size()-1 == CommaLocs.size())&&
+ "Unexpected number of commas!");
+ return Actions.ActOnCXXTypeConstructExpr(TypeRep, T.getOpenLocation(),
+ move_arg(Exprs),
+ T.getCloseLocation());
+ }
+}
+
+/// ParseCXXCondition - if/switch/while condition expression.
+///
+/// condition:
+/// expression
+/// type-specifier-seq declarator '=' assignment-expression
+/// [C++11] type-specifier-seq declarator '=' initializer-clause
+/// [C++11] type-specifier-seq declarator braced-init-list
+/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
+/// '=' assignment-expression
+///
+/// \param ExprResult if the condition was parsed as an expression, the
+/// parsed expression.
+///
+/// \param DeclResult if the condition was parsed as a declaration, the
+/// parsed declaration.
+///
+/// \param Loc The location of the start of the statement that requires this
+/// condition, e.g., the "for" in a for loop.
+///
+/// \param ConvertToBoolean Whether the condition expression should be
+/// converted to a boolean value.
+///
+/// \returns true if there was a parsing, false otherwise.
+bool Parser::ParseCXXCondition(ExprResult &ExprOut,
+ Decl *&DeclOut,
+ SourceLocation Loc,
+ bool ConvertToBoolean) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Condition);
+ cutOffParsing();
+ return true;
+ }
+
+ if (!isCXXConditionDeclaration()) {
+ // Parse the expression.
+ ExprOut = ParseExpression(); // expression
+ DeclOut = 0;
+ if (ExprOut.isInvalid())
+ return true;
+
+ // If required, convert to a boolean value.
+ if (ConvertToBoolean)
+ ExprOut
+ = Actions.ActOnBooleanCondition(getCurScope(), Loc, ExprOut.get());
+ return ExprOut.isInvalid();
+ }
+
+ // type-specifier-seq
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+
+ // declarator
+ Declarator DeclaratorInfo(DS, Declarator::ConditionContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ // simple-asm-expr[opt]
+ if (Tok.is(tok::kw_asm)) {
+ SourceLocation Loc;
+ ExprResult AsmLabel(ParseSimpleAsm(&Loc));
+ if (AsmLabel.isInvalid()) {
+ SkipUntil(tok::semi);
+ return true;
+ }
+ DeclaratorInfo.setAsmLabel(AsmLabel.release());
+ DeclaratorInfo.SetRangeEnd(Loc);
+ }
+
+ // If attributes are present, parse them.
+ MaybeParseGNUAttributes(DeclaratorInfo);
+
+ // Type-check the declaration itself.
+ DeclResult Dcl = Actions.ActOnCXXConditionDeclaration(getCurScope(),
+ DeclaratorInfo);
+ DeclOut = Dcl.get();
+ ExprOut = ExprError();
+
+ // '=' assignment-expression
+ // If a '==' or '+=' is found, suggest a fixit to '='.
+ bool CopyInitialization = isTokenEqualOrEqualTypo();
+ if (CopyInitialization)
+ ConsumeToken();
+
+ ExprResult InitExpr = ExprError();
+ if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace)) {
+ Diag(Tok.getLocation(),
+ diag::warn_cxx98_compat_generalized_initializer_lists);
+ InitExpr = ParseBraceInitializer();
+ } else if (CopyInitialization) {
+ InitExpr = ParseAssignmentExpression();
+ } else if (Tok.is(tok::l_paren)) {
+ // This was probably an attempt to initialize the variable.
+ SourceLocation LParen = ConsumeParen(), RParen = LParen;
+ if (SkipUntil(tok::r_paren, true, /*DontConsume=*/true))
+ RParen = ConsumeParen();
+ Diag(DeclOut ? DeclOut->getLocation() : LParen,
+ diag::err_expected_init_in_condition_lparen)
+ << SourceRange(LParen, RParen);
+ } else {
+ Diag(DeclOut ? DeclOut->getLocation() : Tok.getLocation(),
+ diag::err_expected_init_in_condition);
+ }
+
+ if (!InitExpr.isInvalid())
+ Actions.AddInitializerToDecl(DeclOut, InitExpr.take(), !CopyInitialization,
+ DS.getTypeSpecType() == DeclSpec::TST_auto);
+
+ // FIXME: Build a reference to this declaration? Convert it to bool?
+ // (This is currently handled by Sema).
+
+ Actions.FinalizeDeclaration(DeclOut);
+
+ return false;
+}
+
+/// \brief Determine whether the current token starts a C++
+/// simple-type-specifier.
+bool Parser::isCXXSimpleTypeSpecifier() const {
+ switch (Tok.getKind()) {
+ case tok::annot_typename:
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_bool:
+ case tok::kw_decltype:
+ case tok::kw_typeof:
+ case tok::kw___underlying_type:
+ return true;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
+/// This should only be called when the current token is known to be part of
+/// simple-type-specifier.
+///
+/// simple-type-specifier:
+/// '::'[opt] nested-name-specifier[opt] type-name
+/// '::'[opt] nested-name-specifier 'template' simple-template-id [TODO]
+/// char
+/// wchar_t
+/// bool
+/// short
+/// int
+/// long
+/// signed
+/// unsigned
+/// float
+/// double
+/// void
+/// [GNU] typeof-specifier
+/// [C++0x] auto [TODO]
+///
+/// type-name:
+/// class-name
+/// enum-name
+/// typedef-name
+///
+void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
+ DS.SetRangeStart(Tok.getLocation());
+ const char *PrevSpec;
+ unsigned DiagID;
+ SourceLocation Loc = Tok.getLocation();
+
+ switch (Tok.getKind()) {
+ case tok::identifier: // foo::bar
+ case tok::coloncolon: // ::foo::bar
+ llvm_unreachable("Annotation token should already be formed!");
+ default:
+ llvm_unreachable("Not a simple-type-specifier token!");
+
+ // type-name
+ case tok::annot_typename: {
+ if (getTypeAnnotation(Tok))
+ DS.SetTypeSpecType(DeclSpec::TST_typename, Loc, PrevSpec, DiagID,
+ getTypeAnnotation(Tok));
+ else
+ DS.SetTypeSpecError();
+
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ ConsumeToken();
+
+ // Objective-C supports syntax of the form 'id<proto1,proto2>' where 'id'
+ // is a specific typedef and 'itf<proto1,proto2>' where 'itf' is an
+ // Objective-C interface. If we don't have Objective-C or a '<', this is
+ // just a normal reference to a typedef name.
+ if (Tok.is(tok::less) && getLangOpts().ObjC1)
+ ParseObjCProtocolQualifiers(DS);
+
+ DS.Finish(Diags, PP);
+ return;
+ }
+
+ // builtin types
+ case tok::kw_short:
+ DS.SetTypeSpecWidth(DeclSpec::TSW_short, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_long:
+ DS.SetTypeSpecWidth(DeclSpec::TSW_long, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw___int64:
+ DS.SetTypeSpecWidth(DeclSpec::TSW_longlong, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_signed:
+ DS.SetTypeSpecSign(DeclSpec::TSS_signed, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_unsigned:
+ DS.SetTypeSpecSign(DeclSpec::TSS_unsigned, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_void:
+ DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_char:
+ DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_int:
+ DS.SetTypeSpecType(DeclSpec::TST_int, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw___int128:
+ DS.SetTypeSpecType(DeclSpec::TST_int128, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_half:
+ DS.SetTypeSpecType(DeclSpec::TST_half, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_float:
+ DS.SetTypeSpecType(DeclSpec::TST_float, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_double:
+ DS.SetTypeSpecType(DeclSpec::TST_double, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_wchar_t:
+ DS.SetTypeSpecType(DeclSpec::TST_wchar, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_char16_t:
+ DS.SetTypeSpecType(DeclSpec::TST_char16, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_char32_t:
+ DS.SetTypeSpecType(DeclSpec::TST_char32, Loc, PrevSpec, DiagID);
+ break;
+ case tok::kw_bool:
+ DS.SetTypeSpecType(DeclSpec::TST_bool, Loc, PrevSpec, DiagID);
+ break;
+ case tok::annot_decltype:
+ case tok::kw_decltype:
+ DS.SetRangeEnd(ParseDecltypeSpecifier(DS));
+ return DS.Finish(Diags, PP);
+
+ // GNU typeof support.
+ case tok::kw_typeof:
+ ParseTypeofSpecifier(DS);
+ DS.Finish(Diags, PP);
+ return;
+ }
+ if (Tok.is(tok::annot_typename))
+ DS.SetRangeEnd(Tok.getAnnotationEndLoc());
+ else
+ DS.SetRangeEnd(Tok.getLocation());
+ ConsumeToken();
+ DS.Finish(Diags, PP);
+}
+
+/// ParseCXXTypeSpecifierSeq - Parse a C++ type-specifier-seq (C++
+/// [dcl.name]), which is a non-empty sequence of type-specifiers,
+/// e.g., "const short int". Note that the DeclSpec is *not* finished
+/// by parsing the type-specifier-seq, because these sequences are
+/// typically followed by some form of declarator. Returns true and
+/// emits diagnostics if this is not a type-specifier-seq, false
+/// otherwise.
+///
+/// type-specifier-seq: [C++ 8.1]
+/// type-specifier type-specifier-seq[opt]
+///
+bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
+ ParseSpecifierQualifierList(DS, AS_none, DSC_type_specifier);
+ DS.Finish(Diags, PP);
+ return false;
+}
+
+/// \brief Finish parsing a C++ unqualified-id that is a template-id of
+/// some form.
+///
+/// This routine is invoked when a '<' is encountered after an identifier or
+/// operator-function-id is parsed by \c ParseUnqualifiedId() to determine
+/// whether the unqualified-id is actually a template-id. This routine will
+/// then parse the template arguments and form the appropriate template-id to
+/// return to the caller.
+///
+/// \param SS the nested-name-specifier that precedes this template-id, if
+/// we're actually parsing a qualified-id.
+///
+/// \param Name for constructor and destructor names, this is the actual
+/// identifier that may be a template-name.
+///
+/// \param NameLoc the location of the class-name in a constructor or
+/// destructor.
+///
+/// \param EnteringContext whether we're entering the scope of the
+/// nested-name-specifier.
+///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param Id as input, describes the template-name or operator-function-id
+/// that precedes the '<'. If template arguments were parsed successfully,
+/// will be updated with the template-id.
+///
+/// \param AssumeTemplateId When true, this routine will assume that the name
+/// refers to a template without performing name lookup to verify.
+///
+/// \returns true if a parse error occurred, false otherwise.
+bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ bool EnteringContext,
+ ParsedType ObjectType,
+ UnqualifiedId &Id,
+ bool AssumeTemplateId) {
+ assert((AssumeTemplateId || Tok.is(tok::less)) &&
+ "Expected '<' to finish parsing a template-id");
+
+ TemplateTy Template;
+ TemplateNameKind TNK = TNK_Non_template;
+ switch (Id.getKind()) {
+ case UnqualifiedId::IK_Identifier:
+ case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedId::IK_LiteralOperatorId:
+ if (AssumeTemplateId) {
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(), SS, TemplateKWLoc,
+ Id, ObjectType, EnteringContext,
+ Template);
+ if (TNK == TNK_Non_template)
+ return true;
+ } else {
+ bool MemberOfUnknownSpecialization;
+ TNK = Actions.isTemplateName(getCurScope(), SS,
+ TemplateKWLoc.isValid(), Id,
+ ObjectType, EnteringContext, Template,
+ MemberOfUnknownSpecialization);
+
+ if (TNK == TNK_Non_template && MemberOfUnknownSpecialization &&
+ ObjectType && IsTemplateArgumentList()) {
+ // We have something like t->getAs<T>(), where getAs is a
+ // member of an unknown specialization. However, this will only
+ // parse correctly as a template, so suggest the keyword 'template'
+ // before 'getAs' and treat this as a dependent template name.
+ std::string Name;
+ if (Id.getKind() == UnqualifiedId::IK_Identifier)
+ Name = Id.Identifier->getName();
+ else {
+ Name = "operator ";
+ if (Id.getKind() == UnqualifiedId::IK_OperatorFunctionId)
+ Name += getOperatorSpelling(Id.OperatorFunctionId.Operator);
+ else
+ Name += Id.Identifier->getName();
+ }
+ Diag(Id.StartLocation, diag::err_missing_dependent_template_keyword)
+ << Name
+ << FixItHint::CreateInsertion(Id.StartLocation, "template ");
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, Id,
+ ObjectType, EnteringContext,
+ Template);
+ if (TNK == TNK_Non_template)
+ return true;
+ }
+ }
+ break;
+
+ case UnqualifiedId::IK_ConstructorName: {
+ UnqualifiedId TemplateName;
+ bool MemberOfUnknownSpecialization;
+ TemplateName.setIdentifier(Name, NameLoc);
+ TNK = Actions.isTemplateName(getCurScope(), SS, TemplateKWLoc.isValid(),
+ TemplateName, ObjectType,
+ EnteringContext, Template,
+ MemberOfUnknownSpecialization);
+ break;
+ }
+
+ case UnqualifiedId::IK_DestructorName: {
+ UnqualifiedId TemplateName;
+ bool MemberOfUnknownSpecialization;
+ TemplateName.setIdentifier(Name, NameLoc);
+ if (ObjectType) {
+ TNK = Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, TemplateName,
+ ObjectType, EnteringContext,
+ Template);
+ if (TNK == TNK_Non_template)
+ return true;
+ } else {
+ TNK = Actions.isTemplateName(getCurScope(), SS, TemplateKWLoc.isValid(),
+ TemplateName, ObjectType,
+ EnteringContext, Template,
+ MemberOfUnknownSpecialization);
+
+ if (TNK == TNK_Non_template && !Id.DestructorName.get()) {
+ Diag(NameLoc, diag::err_destructor_template_id)
+ << Name << SS.getRange();
+ return true;
+ }
+ }
+ break;
+ }
+
+ default:
+ return false;
+ }
+
+ if (TNK == TNK_Non_template)
+ return false;
+
+ // Parse the enclosed template argument list.
+ SourceLocation LAngleLoc, RAngleLoc;
+ TemplateArgList TemplateArgs;
+ if (Tok.is(tok::less) &&
+ ParseTemplateIdAfterTemplateName(Template, Id.StartLocation,
+ SS, true, LAngleLoc,
+ TemplateArgs,
+ RAngleLoc))
+ return true;
+
+ if (Id.getKind() == UnqualifiedId::IK_Identifier ||
+ Id.getKind() == UnqualifiedId::IK_OperatorFunctionId ||
+ Id.getKind() == UnqualifiedId::IK_LiteralOperatorId) {
+ // Form a parsed representation of the template-id to be stored in the
+ // UnqualifiedId.
+ TemplateIdAnnotation *TemplateId
+ = TemplateIdAnnotation::Allocate(TemplateArgs.size());
+
+ if (Id.getKind() == UnqualifiedId::IK_Identifier) {
+ TemplateId->Name = Id.Identifier;
+ TemplateId->Operator = OO_None;
+ TemplateId->TemplateNameLoc = Id.StartLocation;
+ } else {
+ TemplateId->Name = 0;
+ TemplateId->Operator = Id.OperatorFunctionId.Operator;
+ TemplateId->TemplateNameLoc = Id.StartLocation;
+ }
+
+ TemplateId->SS = SS;
+ TemplateId->TemplateKWLoc = TemplateKWLoc;
+ TemplateId->Template = Template;
+ TemplateId->Kind = TNK;
+ TemplateId->LAngleLoc = LAngleLoc;
+ TemplateId->RAngleLoc = RAngleLoc;
+ ParsedTemplateArgument *Args = TemplateId->getTemplateArgs();
+ for (unsigned Arg = 0, ArgEnd = TemplateArgs.size();
+ Arg != ArgEnd; ++Arg)
+ Args[Arg] = TemplateArgs[Arg];
+
+ Id.setTemplateId(TemplateId);
+ return false;
+ }
+
+ // Bundle the template arguments together.
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions, TemplateArgs.data(),
+ TemplateArgs.size());
+
+ // Constructor and destructor names.
+ TypeResult Type
+ = Actions.ActOnTemplateIdType(SS, TemplateKWLoc,
+ Template, NameLoc,
+ LAngleLoc, TemplateArgsPtr, RAngleLoc,
+ /*IsCtorOrDtorName=*/true);
+ if (Type.isInvalid())
+ return true;
+
+ if (Id.getKind() == UnqualifiedId::IK_ConstructorName)
+ Id.setConstructorName(Type.get(), NameLoc, RAngleLoc);
+ else
+ Id.setDestructorName(Id.StartLocation, Type.get(), RAngleLoc);
+
+ return false;
+}
+
+/// \brief Parse an operator-function-id or conversion-function-id as part
+/// of a C++ unqualified-id.
+///
+/// This routine is responsible only for parsing the operator-function-id or
+/// conversion-function-id; it does not handle template arguments in any way.
+///
+/// \code
+/// operator-function-id: [C++ 13.5]
+/// 'operator' operator
+///
+/// operator: one of
+/// new delete new[] delete[]
+/// + - * / % ^ & | ~
+/// ! = < > += -= *= /= %=
+/// ^= &= |= << >> >>= <<= == !=
+/// <= >= && || ++ -- , ->* ->
+/// () []
+///
+/// conversion-function-id: [C++ 12.3.2]
+/// operator conversion-type-id
+///
+/// conversion-type-id:
+/// type-specifier-seq conversion-declarator[opt]
+///
+/// conversion-declarator:
+/// ptr-operator conversion-declarator[opt]
+/// \endcode
+///
+/// \param The nested-name-specifier that preceded this unqualified-id. If
+/// non-empty, then we are parsing the unqualified-id of a qualified-id.
+///
+/// \param EnteringContext whether we are entering the scope of the
+/// nested-name-specifier.
+///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param Result on a successful parse, contains the parsed unqualified-id.
+///
+/// \returns true if parsing fails, false otherwise.
+bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
+ ParsedType ObjectType,
+ UnqualifiedId &Result) {
+ assert(Tok.is(tok::kw_operator) && "Expected 'operator' keyword");
+
+ // Consume the 'operator' keyword.
+ SourceLocation KeywordLoc = ConsumeToken();
+
+ // Determine what kind of operator name we have.
+ unsigned SymbolIdx = 0;
+ SourceLocation SymbolLocations[3];
+ OverloadedOperatorKind Op = OO_None;
+ switch (Tok.getKind()) {
+ case tok::kw_new:
+ case tok::kw_delete: {
+ bool isNew = Tok.getKind() == tok::kw_new;
+ // Consume the 'new' or 'delete'.
+ SymbolLocations[SymbolIdx++] = ConsumeToken();
+ // Check for array new/delete.
+ if (Tok.is(tok::l_square) &&
+ (!getLangOpts().CPlusPlus0x || NextToken().isNot(tok::l_square))) {
+ // Consume the '[' and ']'.
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return true;
+
+ SymbolLocations[SymbolIdx++] = T.getOpenLocation();
+ SymbolLocations[SymbolIdx++] = T.getCloseLocation();
+ Op = isNew? OO_Array_New : OO_Array_Delete;
+ } else {
+ Op = isNew? OO_New : OO_Delete;
+ }
+ break;
+ }
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case tok::Token: \
+ SymbolLocations[SymbolIdx++] = ConsumeToken(); \
+ Op = OO_##Name; \
+ break;
+#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#include "clang/Basic/OperatorKinds.def"
+
+ case tok::l_paren: {
+ // Consume the '(' and ')'.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return true;
+
+ SymbolLocations[SymbolIdx++] = T.getOpenLocation();
+ SymbolLocations[SymbolIdx++] = T.getCloseLocation();
+ Op = OO_Call;
+ break;
+ }
+
+ case tok::l_square: {
+ // Consume the '[' and ']'.
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return true;
+
+ SymbolLocations[SymbolIdx++] = T.getOpenLocation();
+ SymbolLocations[SymbolIdx++] = T.getCloseLocation();
+ Op = OO_Subscript;
+ break;
+ }
+
+ case tok::code_completion: {
+ // Code completion for the operator name.
+ Actions.CodeCompleteOperatorName(getCurScope());
+ cutOffParsing();
+ // Don't try to parse any further.
+ return true;
+ }
+
+ default:
+ break;
+ }
+
+ if (Op != OO_None) {
+ // We have parsed an operator-function-id.
+ Result.setOperatorFunctionId(KeywordLoc, Op, SymbolLocations);
+ return false;
+ }
+
+ // Parse a literal-operator-id.
+ //
+ // literal-operator-id: [C++0x 13.5.8]
+ // operator "" identifier
+
+ if (getLangOpts().CPlusPlus0x && isTokenStringLiteral()) {
+ Diag(Tok.getLocation(), diag::warn_cxx98_compat_literal_operator);
+
+ SourceLocation DiagLoc;
+ unsigned DiagId = 0;
+
+ // We're past translation phase 6, so perform string literal concatenation
+ // before checking for "".
+ llvm::SmallVector<Token, 4> Toks;
+ llvm::SmallVector<SourceLocation, 4> TokLocs;
+ while (isTokenStringLiteral()) {
+ if (!Tok.is(tok::string_literal) && !DiagId) {
+ DiagLoc = Tok.getLocation();
+ DiagId = diag::err_literal_operator_string_prefix;
+ }
+ Toks.push_back(Tok);
+ TokLocs.push_back(ConsumeStringToken());
+ }
+
+ StringLiteralParser Literal(Toks.data(), Toks.size(), PP);
+ if (Literal.hadError)
+ return true;
+
+ // Grab the literal operator's suffix, which will be either the next token
+ // or a ud-suffix from the string literal.
+ IdentifierInfo *II = 0;
+ SourceLocation SuffixLoc;
+ if (!Literal.getUDSuffix().empty()) {
+ II = &PP.getIdentifierTable().get(Literal.getUDSuffix());
+ SuffixLoc =
+ Lexer::AdvanceToTokenCharacter(TokLocs[Literal.getUDSuffixToken()],
+ Literal.getUDSuffixOffset(),
+ PP.getSourceManager(), getLangOpts());
+ // This form is not permitted by the standard (yet).
+ DiagLoc = SuffixLoc;
+ DiagId = diag::err_literal_operator_missing_space;
+ } else if (Tok.is(tok::identifier)) {
+ II = Tok.getIdentifierInfo();
+ SuffixLoc = ConsumeToken();
+ TokLocs.push_back(SuffixLoc);
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected_ident);
+ return true;
+ }
+
+ // The string literal must be empty.
+ if (!Literal.GetString().empty() || Literal.Pascal) {
+ DiagLoc = TokLocs.front();
+ DiagId = diag::err_literal_operator_string_not_empty;
+ }
+
+ if (DiagId) {
+ // This isn't a valid literal-operator-id, but we think we know
+ // what the user meant. Tell them what they should have written.
+ llvm::SmallString<32> Str;
+ Str += "\"\" ";
+ Str += II->getName();
+ Diag(DiagLoc, DiagId) << FixItHint::CreateReplacement(
+ SourceRange(TokLocs.front(), TokLocs.back()), Str);
+ }
+
+ Result.setLiteralOperatorId(II, KeywordLoc, SuffixLoc);
+ return false;
+ }
+
+ // Parse a conversion-function-id.
+ //
+ // conversion-function-id: [C++ 12.3.2]
+ // operator conversion-type-id
+ //
+ // conversion-type-id:
+ // type-specifier-seq conversion-declarator[opt]
+ //
+ // conversion-declarator:
+ // ptr-operator conversion-declarator[opt]
+
+ // Parse the type-specifier-seq.
+ DeclSpec DS(AttrFactory);
+ if (ParseCXXTypeSpecifierSeq(DS)) // FIXME: ObjectType?
+ return true;
+
+ // Parse the conversion-declarator, which is merely a sequence of
+ // ptr-operators.
+ Declarator D(DS, Declarator::TypeNameContext);
+ ParseDeclaratorInternal(D, /*DirectDeclParser=*/0);
+
+ // Finish up the type.
+ TypeResult Ty = Actions.ActOnTypeName(getCurScope(), D);
+ if (Ty.isInvalid())
+ return true;
+
+ // Note that this is a conversion-function-id.
+ Result.setConversionFunctionId(KeywordLoc, Ty.get(),
+ D.getSourceRange().getEnd());
+ return false;
+}
+
+/// \brief Parse a C++ unqualified-id (or a C identifier), which describes the
+/// name of an entity.
+///
+/// \code
+/// unqualified-id: [C++ expr.prim.general]
+/// identifier
+/// operator-function-id
+/// conversion-function-id
+/// [C++0x] literal-operator-id [TODO]
+/// ~ class-name
+/// template-id
+///
+/// \endcode
+///
+/// \param The nested-name-specifier that preceded this unqualified-id. If
+/// non-empty, then we are parsing the unqualified-id of a qualified-id.
+///
+/// \param EnteringContext whether we are entering the scope of the
+/// nested-name-specifier.
+///
+/// \param AllowDestructorName whether we allow parsing of a destructor name.
+///
+/// \param AllowConstructorName whether we allow parsing a constructor name.
+///
+/// \param ObjectType if this unqualified-id occurs within a member access
+/// expression, the type of the base object whose member is being accessed.
+///
+/// \param Result on a successful parse, contains the parsed unqualified-id.
+///
+/// \returns true if parsing fails, false otherwise.
+bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
+ bool AllowDestructorName,
+ bool AllowConstructorName,
+ ParsedType ObjectType,
+ SourceLocation& TemplateKWLoc,
+ UnqualifiedId &Result) {
+
+ // Handle 'A::template B'. This is for template-ids which have not
+ // already been annotated by ParseOptionalCXXScopeSpecifier().
+ bool TemplateSpecified = false;
+ if (getLangOpts().CPlusPlus && Tok.is(tok::kw_template) &&
+ (ObjectType || SS.isSet())) {
+ TemplateSpecified = true;
+ TemplateKWLoc = ConsumeToken();
+ }
+
+ // unqualified-id:
+ // identifier
+ // template-id (when it hasn't already been annotated)
+ if (Tok.is(tok::identifier)) {
+ // Consume the identifier.
+ IdentifierInfo *Id = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+
+ if (!getLangOpts().CPlusPlus) {
+ // If we're not in C++, only identifiers matter. Record the
+ // identifier and return.
+ Result.setIdentifier(Id, IdLoc);
+ return false;
+ }
+
+ if (AllowConstructorName &&
+ Actions.isCurrentClassName(*Id, getCurScope(), &SS)) {
+ // We have parsed a constructor name.
+ ParsedType Ty = Actions.getTypeName(*Id, IdLoc, getCurScope(),
+ &SS, false, false,
+ ParsedType(),
+ /*IsCtorOrDtorName=*/true,
+ /*NonTrivialTypeSourceInfo=*/true);
+ Result.setConstructorName(Ty, IdLoc, IdLoc);
+ } else {
+ // We have parsed an identifier.
+ Result.setIdentifier(Id, IdLoc);
+ }
+
+ // If the next token is a '<', we may have a template.
+ if (TemplateSpecified || Tok.is(tok::less))
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc, Id, IdLoc,
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
+
+ return false;
+ }
+
+ // unqualified-id:
+ // template-id (already parsed and annotated)
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+
+ // If the template-name names the current class, then this is a constructor
+ if (AllowConstructorName && TemplateId->Name &&
+ Actions.isCurrentClassName(*TemplateId->Name, getCurScope(), &SS)) {
+ if (SS.isSet()) {
+ // C++ [class.qual]p2 specifies that a qualified template-name
+ // is taken as the constructor name where a constructor can be
+ // declared. Thus, the template arguments are extraneous, so
+ // complain about them and remove them entirely.
+ Diag(TemplateId->TemplateNameLoc,
+ diag::err_out_of_line_constructor_template_id)
+ << TemplateId->Name
+ << FixItHint::CreateRemoval(
+ SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc));
+ ParsedType Ty = Actions.getTypeName(*TemplateId->Name,
+ TemplateId->TemplateNameLoc,
+ getCurScope(),
+ &SS, false, false,
+ ParsedType(),
+ /*IsCtorOrDtorName=*/true,
+ /*NontrivialTypeSourceInfo=*/true);
+ Result.setConstructorName(Ty, TemplateId->TemplateNameLoc,
+ TemplateId->RAngleLoc);
+ ConsumeToken();
+ return false;
+ }
+
+ Result.setConstructorTemplateId(TemplateId);
+ ConsumeToken();
+ return false;
+ }
+
+ // We have already parsed a template-id; consume the annotation token as
+ // our unqualified-id.
+ Result.setTemplateId(TemplateId);
+ TemplateKWLoc = TemplateId->TemplateKWLoc;
+ ConsumeToken();
+ return false;
+ }
+
+ // unqualified-id:
+ // operator-function-id
+ // conversion-function-id
+ if (Tok.is(tok::kw_operator)) {
+ if (ParseUnqualifiedIdOperator(SS, EnteringContext, ObjectType, Result))
+ return true;
+
+ // If we have an operator-function-id or a literal-operator-id and the next
+ // token is a '<', we may have a
+ //
+ // template-id:
+ // operator-function-id < template-argument-list[opt] >
+ if ((Result.getKind() == UnqualifiedId::IK_OperatorFunctionId ||
+ Result.getKind() == UnqualifiedId::IK_LiteralOperatorId) &&
+ (TemplateSpecified || Tok.is(tok::less)))
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
+ 0, SourceLocation(),
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
+
+ return false;
+ }
+
+ if (getLangOpts().CPlusPlus &&
+ (AllowDestructorName || SS.isSet()) && Tok.is(tok::tilde)) {
+ // C++ [expr.unary.op]p10:
+ // There is an ambiguity in the unary-expression ~X(), where X is a
+ // class-name. The ambiguity is resolved in favor of treating ~ as a
+ // unary complement rather than treating ~X as referring to a destructor.
+
+ // Parse the '~'.
+ SourceLocation TildeLoc = ConsumeToken();
+
+ if (SS.isEmpty() && Tok.is(tok::kw_decltype)) {
+ DeclSpec DS(AttrFactory);
+ SourceLocation EndLoc = ParseDecltypeSpecifier(DS);
+ if (ParsedType Type = Actions.getDestructorType(DS, ObjectType)) {
+ Result.setDestructorName(TildeLoc, Type, EndLoc);
+ return false;
+ }
+ return true;
+ }
+
+ // Parse the class-name.
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_destructor_tilde_identifier);
+ return true;
+ }
+
+ // Parse the class-name (or template-name in a simple-template-id).
+ IdentifierInfo *ClassName = Tok.getIdentifierInfo();
+ SourceLocation ClassNameLoc = ConsumeToken();
+
+ if (TemplateSpecified || Tok.is(tok::less)) {
+ Result.setDestructorName(TildeLoc, ParsedType(), ClassNameLoc);
+ return ParseUnqualifiedIdTemplateId(SS, TemplateKWLoc,
+ ClassName, ClassNameLoc,
+ EnteringContext, ObjectType,
+ Result, TemplateSpecified);
+ }
+
+ // Note that this is a destructor name.
+ ParsedType Ty = Actions.getDestructorName(TildeLoc, *ClassName,
+ ClassNameLoc, getCurScope(),
+ SS, ObjectType,
+ EnteringContext);
+ if (!Ty)
+ return true;
+
+ Result.setDestructorName(TildeLoc, Ty, ClassNameLoc);
+ return false;
+ }
+
+ Diag(Tok, diag::err_expected_unqualified_id)
+ << getLangOpts().CPlusPlus;
+ return true;
+}
+
+/// ParseCXXNewExpression - Parse a C++ new-expression. New is used to allocate
+/// memory in a typesafe manner and call constructors.
+///
+/// This method is called to parse the new expression after the optional :: has
+/// been already parsed. If the :: was present, "UseGlobal" is true and "Start"
+/// is its location. Otherwise, "Start" is the location of the 'new' token.
+///
+/// new-expression:
+/// '::'[opt] 'new' new-placement[opt] new-type-id
+/// new-initializer[opt]
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// new-placement:
+/// '(' expression-list ')'
+///
+/// new-type-id:
+/// type-specifier-seq new-declarator[opt]
+/// [GNU] attributes type-specifier-seq new-declarator[opt]
+///
+/// new-declarator:
+/// ptr-operator new-declarator[opt]
+/// direct-new-declarator
+///
+/// new-initializer:
+/// '(' expression-list[opt] ')'
+/// [C++0x] braced-init-list
+///
+ExprResult
+Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
+ assert(Tok.is(tok::kw_new) && "expected 'new' token");
+ ConsumeToken(); // Consume 'new'
+
+ // A '(' now can be a new-placement or the '(' wrapping the type-id in the
+ // second form of new-expression. It can't be a new-type-id.
+
+ ExprVector PlacementArgs(Actions);
+ SourceLocation PlacementLParen, PlacementRParen;
+
+ SourceRange TypeIdParens;
+ DeclSpec DS(AttrFactory);
+ Declarator DeclaratorInfo(DS, Declarator::CXXNewContext);
+ if (Tok.is(tok::l_paren)) {
+ // If it turns out to be a placement, we change the type location.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ PlacementLParen = T.getOpenLocation();
+ if (ParseExpressionListOrTypeId(PlacementArgs, DeclaratorInfo)) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+
+ T.consumeClose();
+ PlacementRParen = T.getCloseLocation();
+ if (PlacementRParen.isInvalid()) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+
+ if (PlacementArgs.empty()) {
+ // Reset the placement locations. There was no placement.
+ TypeIdParens = T.getRange();
+ PlacementLParen = PlacementRParen = SourceLocation();
+ } else {
+ // We still need the type.
+ if (Tok.is(tok::l_paren)) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ MaybeParseGNUAttributes(DeclaratorInfo);
+ ParseSpecifierQualifierList(DS);
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclarator(DeclaratorInfo);
+ T.consumeClose();
+ TypeIdParens = T.getRange();
+ } else {
+ MaybeParseGNUAttributes(DeclaratorInfo);
+ if (ParseCXXTypeSpecifierSeq(DS))
+ DeclaratorInfo.setInvalidType(true);
+ else {
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclaratorInternal(DeclaratorInfo,
+ &Parser::ParseDirectNewDeclarator);
+ }
+ }
+ }
+ } else {
+ // A new-type-id is a simplified type-id, where essentially the
+ // direct-declarator is replaced by a direct-new-declarator.
+ MaybeParseGNUAttributes(DeclaratorInfo);
+ if (ParseCXXTypeSpecifierSeq(DS))
+ DeclaratorInfo.setInvalidType(true);
+ else {
+ DeclaratorInfo.SetSourceRange(DS.getSourceRange());
+ ParseDeclaratorInternal(DeclaratorInfo,
+ &Parser::ParseDirectNewDeclarator);
+ }
+ }
+ if (DeclaratorInfo.isInvalidType()) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+
+ ExprResult Initializer;
+
+ if (Tok.is(tok::l_paren)) {
+ SourceLocation ConstructorLParen, ConstructorRParen;
+ ExprVector ConstructorArgs(Actions);
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ConstructorLParen = T.getOpenLocation();
+ if (Tok.isNot(tok::r_paren)) {
+ CommaLocsTy CommaLocs;
+ if (ParseExpressionList(ConstructorArgs, CommaLocs)) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+ }
+ T.consumeClose();
+ ConstructorRParen = T.getCloseLocation();
+ if (ConstructorRParen.isInvalid()) {
+ SkipUntil(tok::semi, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ return ExprError();
+ }
+ Initializer = Actions.ActOnParenListExpr(ConstructorLParen,
+ ConstructorRParen,
+ move_arg(ConstructorArgs));
+ } else if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus0x) {
+ Diag(Tok.getLocation(),
+ diag::warn_cxx98_compat_generalized_initializer_lists);
+ Initializer = ParseBraceInitializer();
+ }
+ if (Initializer.isInvalid())
+ return Initializer;
+
+ return Actions.ActOnCXXNew(Start, UseGlobal, PlacementLParen,
+ move_arg(PlacementArgs), PlacementRParen,
+ TypeIdParens, DeclaratorInfo, Initializer.take());
+}
+
+/// ParseDirectNewDeclarator - Parses a direct-new-declarator. Intended to be
+/// passed to ParseDeclaratorInternal.
+///
+/// direct-new-declarator:
+/// '[' expression ']'
+/// direct-new-declarator '[' constant-expression ']'
+///
+void Parser::ParseDirectNewDeclarator(Declarator &D) {
+ // Parse the array dimensions.
+ bool first = true;
+ while (Tok.is(tok::l_square)) {
+ // An array-size expression can't start with a lambda.
+ if (CheckProhibitedCXX11Attribute())
+ continue;
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ ExprResult Size(first ? ParseExpression()
+ : ParseConstantExpression());
+ if (Size.isInvalid()) {
+ // Recover
+ SkipUntil(tok::r_square);
+ return;
+ }
+ first = false;
+
+ T.consumeClose();
+
+ // Attributes here appertain to the array type. C++11 [expr.new]p5.
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(Attrs);
+
+ D.AddTypeInfo(DeclaratorChunk::getArray(0,
+ /*static=*/false, /*star=*/false,
+ Size.release(),
+ T.getOpenLocation(),
+ T.getCloseLocation()),
+ Attrs, T.getCloseLocation());
+
+ if (T.getCloseLocation().isInvalid())
+ return;
+ }
+}
+
+/// ParseExpressionListOrTypeId - Parse either an expression-list or a type-id.
+/// This ambiguity appears in the syntax of the C++ new operator.
+///
+/// new-expression:
+/// '::'[opt] 'new' new-placement[opt] '(' type-id ')'
+/// new-initializer[opt]
+///
+/// new-placement:
+/// '(' expression-list ')'
+///
+bool Parser::ParseExpressionListOrTypeId(
+ SmallVectorImpl<Expr*> &PlacementArgs,
+ Declarator &D) {
+ // The '(' was already consumed.
+ if (isTypeIdInParens()) {
+ ParseSpecifierQualifierList(D.getMutableDeclSpec());
+ D.SetSourceRange(D.getDeclSpec().getSourceRange());
+ ParseDeclarator(D);
+ return D.isInvalidType();
+ }
+
+ // It's not a type, it has to be an expression list.
+ // Discard the comma locations - ActOnCXXNew has enough parameters.
+ CommaLocsTy CommaLocs;
+ return ParseExpressionList(PlacementArgs, CommaLocs);
+}
+
+/// ParseCXXDeleteExpression - Parse a C++ delete-expression. Delete is used
+/// to free memory allocated by new.
+///
+/// This method is called to parse the 'delete' expression after the optional
+/// '::' has been already parsed. If the '::' was present, "UseGlobal" is true
+/// and "Start" is its location. Otherwise, "Start" is the location of the
+/// 'delete' token.
+///
+/// delete-expression:
+/// '::'[opt] 'delete' cast-expression
+/// '::'[opt] 'delete' '[' ']' cast-expression
+ExprResult
+Parser::ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start) {
+ assert(Tok.is(tok::kw_delete) && "Expected 'delete' keyword");
+ ConsumeToken(); // Consume 'delete'
+
+ // Array delete?
+ bool ArrayDelete = false;
+ if (Tok.is(tok::l_square) && NextToken().is(tok::r_square)) {
+ // FIXME: This could be the start of a lambda-expression. We should
+ // disambiguate this, but that will require arbitrary lookahead if
+ // the next token is '(':
+ // delete [](int*){ /* ... */
+ ArrayDelete = true;
+ BalancedDelimiterTracker T(*this, tok::l_square);
+
+ T.consumeOpen();
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return ExprError();
+ }
+
+ ExprResult Operand(ParseCastExpression(false));
+ if (Operand.isInvalid())
+ return move(Operand);
+
+ return Actions.ActOnCXXDelete(Start, UseGlobal, ArrayDelete, Operand.take());
+}
+
+static UnaryTypeTrait UnaryTypeTraitFromTokKind(tok::TokenKind kind) {
+ switch(kind) {
+ default: llvm_unreachable("Not a known unary type trait.");
+ case tok::kw___has_nothrow_assign: return UTT_HasNothrowAssign;
+ case tok::kw___has_nothrow_constructor: return UTT_HasNothrowConstructor;
+ case tok::kw___has_nothrow_copy: return UTT_HasNothrowCopy;
+ case tok::kw___has_trivial_assign: return UTT_HasTrivialAssign;
+ case tok::kw___has_trivial_constructor:
+ return UTT_HasTrivialDefaultConstructor;
+ case tok::kw___has_trivial_copy: return UTT_HasTrivialCopy;
+ case tok::kw___has_trivial_destructor: return UTT_HasTrivialDestructor;
+ case tok::kw___has_virtual_destructor: return UTT_HasVirtualDestructor;
+ case tok::kw___is_abstract: return UTT_IsAbstract;
+ case tok::kw___is_arithmetic: return UTT_IsArithmetic;
+ case tok::kw___is_array: return UTT_IsArray;
+ case tok::kw___is_class: return UTT_IsClass;
+ case tok::kw___is_complete_type: return UTT_IsCompleteType;
+ case tok::kw___is_compound: return UTT_IsCompound;
+ case tok::kw___is_const: return UTT_IsConst;
+ case tok::kw___is_empty: return UTT_IsEmpty;
+ case tok::kw___is_enum: return UTT_IsEnum;
+ case tok::kw___is_final: return UTT_IsFinal;
+ case tok::kw___is_floating_point: return UTT_IsFloatingPoint;
+ case tok::kw___is_function: return UTT_IsFunction;
+ case tok::kw___is_fundamental: return UTT_IsFundamental;
+ case tok::kw___is_integral: return UTT_IsIntegral;
+ case tok::kw___is_lvalue_reference: return UTT_IsLvalueReference;
+ case tok::kw___is_member_function_pointer: return UTT_IsMemberFunctionPointer;
+ case tok::kw___is_member_object_pointer: return UTT_IsMemberObjectPointer;
+ case tok::kw___is_member_pointer: return UTT_IsMemberPointer;
+ case tok::kw___is_object: return UTT_IsObject;
+ case tok::kw___is_literal: return UTT_IsLiteral;
+ case tok::kw___is_literal_type: return UTT_IsLiteral;
+ case tok::kw___is_pod: return UTT_IsPOD;
+ case tok::kw___is_pointer: return UTT_IsPointer;
+ case tok::kw___is_polymorphic: return UTT_IsPolymorphic;
+ case tok::kw___is_reference: return UTT_IsReference;
+ case tok::kw___is_rvalue_reference: return UTT_IsRvalueReference;
+ case tok::kw___is_scalar: return UTT_IsScalar;
+ case tok::kw___is_signed: return UTT_IsSigned;
+ case tok::kw___is_standard_layout: return UTT_IsStandardLayout;
+ case tok::kw___is_trivial: return UTT_IsTrivial;
+ case tok::kw___is_trivially_copyable: return UTT_IsTriviallyCopyable;
+ case tok::kw___is_union: return UTT_IsUnion;
+ case tok::kw___is_unsigned: return UTT_IsUnsigned;
+ case tok::kw___is_void: return UTT_IsVoid;
+ case tok::kw___is_volatile: return UTT_IsVolatile;
+ }
+}
+
+static BinaryTypeTrait BinaryTypeTraitFromTokKind(tok::TokenKind kind) {
+ switch(kind) {
+ default: llvm_unreachable("Not a known binary type trait");
+ case tok::kw___is_base_of: return BTT_IsBaseOf;
+ case tok::kw___is_convertible: return BTT_IsConvertible;
+ case tok::kw___is_same: return BTT_IsSame;
+ case tok::kw___builtin_types_compatible_p: return BTT_TypeCompatible;
+ case tok::kw___is_convertible_to: return BTT_IsConvertibleTo;
+ case tok::kw___is_trivially_assignable: return BTT_IsTriviallyAssignable;
+ }
+}
+
+static TypeTrait TypeTraitFromTokKind(tok::TokenKind kind) {
+ switch (kind) {
+ default: llvm_unreachable("Not a known type trait");
+ case tok::kw___is_trivially_constructible:
+ return TT_IsTriviallyConstructible;
+ }
+}
+
+static ArrayTypeTrait ArrayTypeTraitFromTokKind(tok::TokenKind kind) {
+ switch(kind) {
+ default: llvm_unreachable("Not a known binary type trait");
+ case tok::kw___array_rank: return ATT_ArrayRank;
+ case tok::kw___array_extent: return ATT_ArrayExtent;
+ }
+}
+
+static ExpressionTrait ExpressionTraitFromTokKind(tok::TokenKind kind) {
+ switch(kind) {
+ default: llvm_unreachable("Not a known unary expression trait.");
+ case tok::kw___is_lvalue_expr: return ET_IsLValueExpr;
+ case tok::kw___is_rvalue_expr: return ET_IsRValueExpr;
+ }
+}
+
+/// ParseUnaryTypeTrait - Parse the built-in unary type-trait
+/// pseudo-functions that allow implementation of the TR1/C++0x type traits
+/// templates.
+///
+/// primary-expression:
+/// [GNU] unary-type-trait '(' type-id ')'
+///
+ExprResult Parser::ParseUnaryTypeTrait() {
+ UnaryTypeTrait UTT = UnaryTypeTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen))
+ return ExprError();
+
+ // FIXME: Error reporting absolutely sucks! If the this fails to parse a type
+ // there will be cryptic errors about mismatched parentheses and missing
+ // specifiers.
+ TypeResult Ty = ParseTypeName();
+
+ T.consumeClose();
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ return Actions.ActOnUnaryTypeTrait(UTT, Loc, Ty.get(), T.getCloseLocation());
+}
+
+/// ParseBinaryTypeTrait - Parse the built-in binary type-trait
+/// pseudo-functions that allow implementation of the TR1/C++0x type traits
+/// templates.
+///
+/// primary-expression:
+/// [GNU] binary-type-trait '(' type-id ',' type-id ')'
+///
+ExprResult Parser::ParseBinaryTypeTrait() {
+ BinaryTypeTrait BTT = BinaryTypeTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen))
+ return ExprError();
+
+ TypeResult LhsTy = ParseTypeName();
+ if (LhsTy.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma)) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ TypeResult RhsTy = ParseTypeName();
+ if (RhsTy.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ T.consumeClose();
+
+ return Actions.ActOnBinaryTypeTrait(BTT, Loc, LhsTy.get(), RhsTy.get(),
+ T.getCloseLocation());
+}
+
+/// \brief Parse the built-in type-trait pseudo-functions that allow
+/// implementation of the TR1/C++11 type traits templates.
+///
+/// primary-expression:
+/// type-trait '(' type-id-seq ')'
+///
+/// type-id-seq:
+/// type-id ...[opt] type-id-seq[opt]
+///
+ExprResult Parser::ParseTypeTrait() {
+ TypeTrait Kind = TypeTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker Parens(*this, tok::l_paren);
+ if (Parens.expectAndConsume(diag::err_expected_lparen))
+ return ExprError();
+
+ llvm::SmallVector<ParsedType, 2> Args;
+ do {
+ // Parse the next type.
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ Parens.skipToEnd();
+ return ExprError();
+ }
+
+ // Parse the ellipsis, if present.
+ if (Tok.is(tok::ellipsis)) {
+ Ty = Actions.ActOnPackExpansion(Ty.get(), ConsumeToken());
+ if (Ty.isInvalid()) {
+ Parens.skipToEnd();
+ return ExprError();
+ }
+ }
+
+ // Add this type to the list of arguments.
+ Args.push_back(Ty.get());
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ continue;
+ }
+
+ break;
+ } while (true);
+
+ if (Parens.consumeClose())
+ return ExprError();
+
+ return Actions.ActOnTypeTrait(Kind, Loc, Args, Parens.getCloseLocation());
+}
+
+/// ParseArrayTypeTrait - Parse the built-in array type-trait
+/// pseudo-functions.
+///
+/// primary-expression:
+/// [Embarcadero] '__array_rank' '(' type-id ')'
+/// [Embarcadero] '__array_extent' '(' type-id ',' expression ')'
+///
+ExprResult Parser::ParseArrayTypeTrait() {
+ ArrayTypeTrait ATT = ArrayTypeTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen))
+ return ExprError();
+
+ TypeResult Ty = ParseTypeName();
+ if (Ty.isInvalid()) {
+ SkipUntil(tok::comma);
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ switch (ATT) {
+ case ATT_ArrayRank: {
+ T.consumeClose();
+ return Actions.ActOnArrayTypeTrait(ATT, Loc, Ty.get(), NULL,
+ T.getCloseLocation());
+ }
+ case ATT_ArrayExtent: {
+ if (ExpectAndConsume(tok::comma, diag::err_expected_comma)) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ ExprResult DimExpr = ParseExpression();
+ T.consumeClose();
+
+ return Actions.ActOnArrayTypeTrait(ATT, Loc, Ty.get(), DimExpr.get(),
+ T.getCloseLocation());
+ }
+ }
+ llvm_unreachable("Invalid ArrayTypeTrait!");
+}
+
+/// ParseExpressionTrait - Parse built-in expression-trait
+/// pseudo-functions like __is_lvalue_expr( xxx ).
+///
+/// primary-expression:
+/// [Embarcadero] expression-trait '(' expression ')'
+///
+ExprResult Parser::ParseExpressionTrait() {
+ ExpressionTrait ET = ExpressionTraitFromTokKind(Tok.getKind());
+ SourceLocation Loc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen))
+ return ExprError();
+
+ ExprResult Expr = ParseExpression();
+
+ T.consumeClose();
+
+ return Actions.ActOnExpressionTrait(ET, Loc, Expr.get(),
+ T.getCloseLocation());
+}
+
+
+/// ParseCXXAmbiguousParenExpression - We have parsed the left paren of a
+/// parenthesized ambiguous type-id. This uses tentative parsing to disambiguate
+/// based on the context past the parens.
+ExprResult
+Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
+ ParsedType &CastTy,
+ BalancedDelimiterTracker &Tracker) {
+ assert(getLangOpts().CPlusPlus && "Should only be called for C++!");
+ assert(ExprType == CastExpr && "Compound literals are not ambiguous!");
+ assert(isTypeIdInParens() && "Not a type-id!");
+
+ ExprResult Result(true);
+ CastTy = ParsedType();
+
+ // We need to disambiguate a very ugly part of the C++ syntax:
+ //
+ // (T())x; - type-id
+ // (T())*x; - type-id
+ // (T())/x; - expression
+ // (T()); - expression
+ //
+ // The bad news is that we cannot use the specialized tentative parser, since
+ // it can only verify that the thing inside the parens can be parsed as
+ // type-id, it is not useful for determining the context past the parens.
+ //
+ // The good news is that the parser can disambiguate this part without
+ // making any unnecessary Action calls.
+ //
+ // It uses a scheme similar to parsing inline methods. The parenthesized
+ // tokens are cached, the context that follows is determined (possibly by
+ // parsing a cast-expression), and then we re-introduce the cached tokens
+ // into the token stream and parse them appropriately.
+
+ ParenParseOption ParseAs;
+ CachedTokens Toks;
+
+ // Store the tokens of the parentheses. We will parse them after we determine
+ // the context that follows them.
+ if (!ConsumeAndStoreUntil(tok::r_paren, Toks)) {
+ // We didn't find the ')' we expected.
+ Tracker.consumeClose();
+ return ExprError();
+ }
+
+ if (Tok.is(tok::l_brace)) {
+ ParseAs = CompoundLiteral;
+ } else {
+ bool NotCastExpr;
+ // FIXME: Special-case ++ and --: "(S())++;" is not a cast-expression
+ if (Tok.is(tok::l_paren) && NextToken().is(tok::r_paren)) {
+ NotCastExpr = true;
+ } else {
+ // Try parsing the cast-expression that may follow.
+ // If it is not a cast-expression, NotCastExpr will be true and no token
+ // will be consumed.
+ Result = ParseCastExpression(false/*isUnaryExpression*/,
+ false/*isAddressofOperand*/,
+ NotCastExpr,
+ // type-id has priority.
+ IsTypeCast);
+ }
+
+ // If we parsed a cast-expression, it's really a type-id, otherwise it's
+ // an expression.
+ ParseAs = NotCastExpr ? SimpleExpr : CastExpr;
+ }
+
+ // The current token should go after the cached tokens.
+ Toks.push_back(Tok);
+ // Re-enter the stored parenthesized tokens into the token stream, so we may
+ // parse them now.
+ PP.EnterTokenStream(Toks.data(), Toks.size(),
+ true/*DisableMacroExpansion*/, false/*OwnsTokens*/);
+ // Drop the current token and bring the first cached one. It's the same token
+ // as when we entered this function.
+ ConsumeAnyToken();
+
+ if (ParseAs >= CompoundLiteral) {
+ // Parse the type declarator.
+ DeclSpec DS(AttrFactory);
+ ParseSpecifierQualifierList(DS);
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ ParseDeclarator(DeclaratorInfo);
+
+ // Match the ')'.
+ Tracker.consumeClose();
+
+ if (ParseAs == CompoundLiteral) {
+ ExprType = CompoundLiteral;
+ TypeResult Ty = ParseTypeName();
+ return ParseCompoundLiteralExpression(Ty.get(),
+ Tracker.getOpenLocation(),
+ Tracker.getCloseLocation());
+ }
+
+ // We parsed '(' type-id ')' and the thing after it wasn't a '{'.
+ assert(ParseAs == CastExpr);
+
+ if (DeclaratorInfo.isInvalidType())
+ return ExprError();
+
+ // Result is what ParseCastExpression returned earlier.
+ if (!Result.isInvalid())
+ Result = Actions.ActOnCastExpr(getCurScope(), Tracker.getOpenLocation(),
+ DeclaratorInfo, CastTy,
+ Tracker.getCloseLocation(), Result.take());
+ return move(Result);
+ }
+
+ // Not a compound literal, and not followed by a cast-expression.
+ assert(ParseAs == SimpleExpr);
+
+ ExprType = SimpleExpr;
+ Result = ParseExpression();
+ if (!Result.isInvalid() && Tok.is(tok::r_paren))
+ Result = Actions.ActOnParenExpr(Tracker.getOpenLocation(),
+ Tok.getLocation(), Result.take());
+
+ // Match the ')'.
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return ExprError();
+ }
+
+ Tracker.consumeClose();
+ return move(Result);
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
new file mode 100644
index 0000000..1c349fd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseInit.cpp
@@ -0,0 +1,547 @@
+//===--- ParseInit.cpp - Initializer Parsing ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements initializer parsing as specified by C99 6.7.8.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/Sema/Designator.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+
+/// MayBeDesignationStart - Return true if the current token might be the start
+/// of a designator. If we can tell it is impossible that it is a designator,
+/// return false.
+bool Parser::MayBeDesignationStart() {
+ switch (Tok.getKind()) {
+ default:
+ return false;
+
+ case tok::period: // designator: '.' identifier
+ return true;
+
+ case tok::l_square: { // designator: array-designator
+ if (!PP.getLangOpts().CPlusPlus0x)
+ return true;
+
+ // C++11 lambda expressions and C99 designators can be ambiguous all the
+ // way through the closing ']' and to the next character. Handle the easy
+ // cases here, and fall back to tentative parsing if those fail.
+ switch (PP.LookAhead(0).getKind()) {
+ case tok::equal:
+ case tok::r_square:
+ // Definitely starts a lambda expression.
+ return false;
+
+ case tok::amp:
+ case tok::kw_this:
+ case tok::identifier:
+ // We have to do additional analysis, because these could be the
+ // start of a constant expression or a lambda capture list.
+ break;
+
+ default:
+ // Anything not mentioned above cannot occur following a '[' in a
+ // lambda expression.
+ return true;
+ }
+
+ // Handle the complicated case below.
+ break;
+ }
+ case tok::identifier: // designation: identifier ':'
+ return PP.LookAhead(0).is(tok::colon);
+ }
+
+ // Parse up to (at most) the token after the closing ']' to determine
+ // whether this is a C99 designator or a lambda.
+ TentativeParsingAction Tentative(*this);
+ ConsumeBracket();
+ while (true) {
+ switch (Tok.getKind()) {
+ case tok::equal:
+ case tok::amp:
+ case tok::identifier:
+ case tok::kw_this:
+ // These tokens can occur in a capture list or a constant-expression.
+ // Keep looking.
+ ConsumeToken();
+ continue;
+
+ case tok::comma:
+ // Since a comma cannot occur in a constant-expression, this must
+ // be a lambda.
+ Tentative.Revert();
+ return false;
+
+ case tok::r_square: {
+ // Once we hit the closing square bracket, we look at the next
+ // token. If it's an '=', this is a designator. Otherwise, it's a
+ // lambda expression. This decision favors lambdas over the older
+ // GNU designator syntax, which allows one to omit the '=', but is
+ // consistent with GCC.
+ ConsumeBracket();
+ tok::TokenKind Kind = Tok.getKind();
+ Tentative.Revert();
+ return Kind == tok::equal;
+ }
+
+ default:
+ // Anything else cannot occur in a lambda capture list, so it
+ // must be a designator.
+ Tentative.Revert();
+ return true;
+ }
+ }
+
+ return true;
+}
+
+static void CheckArrayDesignatorSyntax(Parser &P, SourceLocation Loc,
+ Designation &Desig) {
+ // If we have exactly one array designator, this used the GNU
+ // 'designation: array-designator' extension, otherwise there should be no
+ // designators at all!
+ if (Desig.getNumDesignators() == 1 &&
+ (Desig.getDesignator(0).isArrayDesignator() ||
+ Desig.getDesignator(0).isArrayRangeDesignator()))
+ P.Diag(Loc, diag::ext_gnu_missing_equal_designator);
+ else if (Desig.getNumDesignators() > 0)
+ P.Diag(Loc, diag::err_expected_equal_designator);
+}
+
+/// ParseInitializerWithPotentialDesignator - Parse the 'initializer' production
+/// checking to see if the token stream starts with a designator.
+///
+/// designation:
+/// designator-list '='
+/// [GNU] array-designator
+/// [GNU] identifier ':'
+///
+/// designator-list:
+/// designator
+/// designator-list designator
+///
+/// designator:
+/// array-designator
+/// '.' identifier
+///
+/// array-designator:
+/// '[' constant-expression ']'
+/// [GNU] '[' constant-expression '...' constant-expression ']'
+///
+/// NOTE: [OBC] allows '[ objc-receiver objc-message-args ]' as an
+/// initializer (because it is an expression). We need to consider this case
+/// when parsing array designators.
+///
+ExprResult Parser::ParseInitializerWithPotentialDesignator() {
+
+ // If this is the old-style GNU extension:
+ // designation ::= identifier ':'
+ // Handle it as a field designator. Otherwise, this must be the start of a
+ // normal expression.
+ if (Tok.is(tok::identifier)) {
+ const IdentifierInfo *FieldName = Tok.getIdentifierInfo();
+
+ SmallString<256> NewSyntax;
+ llvm::raw_svector_ostream(NewSyntax) << '.' << FieldName->getName()
+ << " = ";
+
+ SourceLocation NameLoc = ConsumeToken(); // Eat the identifier.
+
+ assert(Tok.is(tok::colon) && "MayBeDesignationStart not working properly!");
+ SourceLocation ColonLoc = ConsumeToken();
+
+ Diag(NameLoc, diag::ext_gnu_old_style_field_designator)
+ << FixItHint::CreateReplacement(SourceRange(NameLoc, ColonLoc),
+ NewSyntax.str());
+
+ Designation D;
+ D.AddDesignator(Designator::getField(FieldName, SourceLocation(), NameLoc));
+ return Actions.ActOnDesignatedInitializer(D, ColonLoc, true,
+ ParseInitializer());
+ }
+
+ // Desig - This is initialized when we see our first designator. We may have
+ // an objc message send with no designator, so we don't want to create this
+ // eagerly.
+ Designation Desig;
+
+ // Parse each designator in the designator list until we find an initializer.
+ while (Tok.is(tok::period) || Tok.is(tok::l_square)) {
+ if (Tok.is(tok::period)) {
+ // designator: '.' identifier
+ SourceLocation DotLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected_field_designator);
+ return ExprError();
+ }
+
+ Desig.AddDesignator(Designator::getField(Tok.getIdentifierInfo(), DotLoc,
+ Tok.getLocation()));
+ ConsumeToken(); // Eat the identifier.
+ continue;
+ }
+
+ // We must have either an array designator now or an objc message send.
+ assert(Tok.is(tok::l_square) && "Unexpected token!");
+
+ // Handle the two forms of array designator:
+ // array-designator: '[' constant-expression ']'
+ // array-designator: '[' constant-expression '...' constant-expression ']'
+ //
+ // Also, we have to handle the case where the expression after the
+ // designator an an objc message send: '[' objc-message-expr ']'.
+ // Interesting cases are:
+ // [foo bar] -> objc message send
+ // [foo] -> array designator
+ // [foo ... bar] -> array designator
+ // [4][foo bar] -> obsolete GNU designation with objc message send.
+ //
+ // We do not need to check for an expression starting with [[ here. If it
+ // contains an Objective-C message send, then it is not an ill-formed
+ // attribute. If it is a lambda-expression within an array-designator, then
+ // it will be rejected because a constant-expression cannot begin with a
+ // lambda-expression.
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+ SourceLocation StartLoc = T.getOpenLocation();
+
+ ExprResult Idx;
+
+ // If Objective-C is enabled and this is a typename (class message
+ // send) or send to 'super', parse this as a message send
+ // expression. We handle C++ and C separately, since C++ requires
+ // much more complicated parsing.
+ if (getLangOpts().ObjC1 && getLangOpts().CPlusPlus) {
+ // Send to 'super'.
+ if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
+ NextToken().isNot(tok::period) &&
+ getCurScope()->isInObjcMethodScope()) {
+ CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ ConsumeToken(),
+ ParsedType(),
+ 0);
+ }
+
+ // Parse the receiver, which is either a type or an expression.
+ bool IsExpr;
+ void *TypeOrExpr;
+ if (ParseObjCXXMessageReceiver(IsExpr, TypeOrExpr)) {
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ // If the receiver was a type, we have a class message; parse
+ // the rest of it.
+ if (!IsExpr) {
+ CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ SourceLocation(),
+ ParsedType::getFromOpaquePtr(TypeOrExpr),
+ 0);
+ }
+
+ // If the receiver was an expression, we still don't know
+ // whether we have a message send or an array designator; just
+ // adopt the expression for further analysis below.
+ // FIXME: potentially-potentially evaluated expression above?
+ Idx = ExprResult(static_cast<Expr*>(TypeOrExpr));
+ } else if (getLangOpts().ObjC1 && Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation IILoc = Tok.getLocation();
+ ParsedType ReceiverType;
+ // Three cases. This is a message send to a type: [type foo]
+ // This is a message send to super: [super foo]
+ // This is a message sent to an expr: [super.bar foo]
+ switch (Sema::ObjCMessageKind Kind
+ = Actions.getObjCMessageKind(getCurScope(), II, IILoc,
+ II == Ident_super,
+ NextToken().is(tok::period),
+ ReceiverType)) {
+ case Sema::ObjCSuperMessage:
+ case Sema::ObjCClassMessage:
+ CheckArrayDesignatorSyntax(*this, StartLoc, Desig);
+ if (Kind == Sema::ObjCSuperMessage)
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ ConsumeToken(),
+ ParsedType(),
+ 0);
+ ConsumeToken(); // the identifier
+ if (!ReceiverType) {
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ SourceLocation(),
+ ReceiverType,
+ 0);
+
+ case Sema::ObjCInstanceMessage:
+ // Fall through; we'll just parse the expression and
+ // (possibly) treat this like an Objective-C message send
+ // later.
+ break;
+ }
+ }
+
+ // Parse the index expression, if we haven't already gotten one
+ // above (which can only happen in Objective-C++).
+ // Note that we parse this as an assignment expression, not a constant
+ // expression (allowing *=, =, etc) to handle the objc case. Sema needs
+ // to validate that the expression is a constant.
+ // FIXME: We also need to tell Sema that we're in a
+ // potentially-potentially evaluated context.
+ if (!Idx.get()) {
+ Idx = ParseAssignmentExpression();
+ if (Idx.isInvalid()) {
+ SkipUntil(tok::r_square);
+ return move(Idx);
+ }
+ }
+
+ // Given an expression, we could either have a designator (if the next
+ // tokens are '...' or ']' or an objc message send. If this is an objc
+ // message send, handle it now. An objc-message send is the start of
+ // an assignment-expression production.
+ if (getLangOpts().ObjC1 && Tok.isNot(tok::ellipsis) &&
+ Tok.isNot(tok::r_square)) {
+ CheckArrayDesignatorSyntax(*this, Tok.getLocation(), Desig);
+ return ParseAssignmentExprWithObjCMessageExprStart(StartLoc,
+ SourceLocation(),
+ ParsedType(),
+ Idx.take());
+ }
+
+ // If this is a normal array designator, remember it.
+ if (Tok.isNot(tok::ellipsis)) {
+ Desig.AddDesignator(Designator::getArray(Idx.release(), StartLoc));
+ } else {
+ // Handle the gnu array range extension.
+ Diag(Tok, diag::ext_gnu_array_range);
+ SourceLocation EllipsisLoc = ConsumeToken();
+
+ ExprResult RHS(ParseConstantExpression());
+ if (RHS.isInvalid()) {
+ SkipUntil(tok::r_square);
+ return move(RHS);
+ }
+ Desig.AddDesignator(Designator::getArrayRange(Idx.release(),
+ RHS.release(),
+ StartLoc, EllipsisLoc));
+ }
+
+ T.consumeClose();
+ Desig.getDesignator(Desig.getNumDesignators() - 1).setRBracketLoc(
+ T.getCloseLocation());
+ }
+
+ // Okay, we're done with the designator sequence. We know that there must be
+ // at least one designator, because the only case we can get into this method
+ // without a designator is when we have an objc message send. That case is
+ // handled and returned from above.
+ assert(!Desig.empty() && "Designator is empty?");
+
+ // Handle a normal designator sequence end, which is an equal.
+ if (Tok.is(tok::equal)) {
+ SourceLocation EqualLoc = ConsumeToken();
+ return Actions.ActOnDesignatedInitializer(Desig, EqualLoc, false,
+ ParseInitializer());
+ }
+
+ // We read some number of designators and found something that isn't an = or
+ // an initializer. If we have exactly one array designator, this
+ // is the GNU 'designation: array-designator' extension. Otherwise, it is a
+ // parse error.
+ if (Desig.getNumDesignators() == 1 &&
+ (Desig.getDesignator(0).isArrayDesignator() ||
+ Desig.getDesignator(0).isArrayRangeDesignator())) {
+ Diag(Tok, diag::ext_gnu_missing_equal_designator)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "= ");
+ return Actions.ActOnDesignatedInitializer(Desig, Tok.getLocation(),
+ true, ParseInitializer());
+ }
+
+ Diag(Tok, diag::err_expected_equal_designator);
+ return ExprError();
+}
+
+
+/// ParseBraceInitializer - Called when parsing an initializer that has a
+/// leading open brace.
+///
+/// initializer: [C99 6.7.8]
+/// '{' initializer-list '}'
+/// '{' initializer-list ',' '}'
+/// [GNU] '{' '}'
+///
+/// initializer-list:
+/// designation[opt] initializer ...[opt]
+/// initializer-list ',' designation[opt] initializer ...[opt]
+///
+ExprResult Parser::ParseBraceInitializer() {
+ InMessageExpressionRAIIObject InMessage(*this, false);
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+ SourceLocation LBraceLoc = T.getOpenLocation();
+
+ /// InitExprs - This is the actual list of expressions contained in the
+ /// initializer.
+ ExprVector InitExprs(Actions);
+
+ if (Tok.is(tok::r_brace)) {
+ // Empty initializers are a C++ feature and a GNU extension to C.
+ if (!getLangOpts().CPlusPlus)
+ Diag(LBraceLoc, diag::ext_gnu_empty_initializer);
+ // Match the '}'.
+ return Actions.ActOnInitList(LBraceLoc, MultiExprArg(Actions),
+ ConsumeBrace());
+ }
+
+ bool InitExprsOk = true;
+
+ while (1) {
+ // Handle Microsoft __if_exists/if_not_exists if necessary.
+ if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
+ Tok.is(tok::kw___if_not_exists))) {
+ if (ParseMicrosoftIfExistsBraceInitializer(InitExprs, InitExprsOk)) {
+ if (Tok.isNot(tok::comma)) break;
+ ConsumeToken();
+ }
+ if (Tok.is(tok::r_brace)) break;
+ continue;
+ }
+
+ // Parse: designation[opt] initializer
+
+ // If we know that this cannot be a designation, just parse the nested
+ // initializer directly.
+ ExprResult SubElt;
+ if (MayBeDesignationStart())
+ SubElt = ParseInitializerWithPotentialDesignator();
+ else
+ SubElt = ParseInitializer();
+
+ if (Tok.is(tok::ellipsis))
+ SubElt = Actions.ActOnPackExpansion(SubElt.get(), ConsumeToken());
+
+ // If we couldn't parse the subelement, bail out.
+ if (!SubElt.isInvalid()) {
+ InitExprs.push_back(SubElt.release());
+ } else {
+ InitExprsOk = false;
+
+ // We have two ways to try to recover from this error: if the code looks
+ // grammatically ok (i.e. we have a comma coming up) try to continue
+ // parsing the rest of the initializer. This allows us to emit
+ // diagnostics for later elements that we find. If we don't see a comma,
+ // assume there is a parse error, and just skip to recover.
+ // FIXME: This comment doesn't sound right. If there is a r_brace
+ // immediately, it can't be an error, since there is no other way of
+ // leaving this loop except through this if.
+ if (Tok.isNot(tok::comma)) {
+ SkipUntil(tok::r_brace, false, true);
+ break;
+ }
+ }
+
+ // If we don't have a comma continued list, we're done.
+ if (Tok.isNot(tok::comma)) break;
+
+ // TODO: save comma locations if some client cares.
+ ConsumeToken();
+
+ // Handle trailing comma.
+ if (Tok.is(tok::r_brace)) break;
+ }
+
+ bool closed = !T.consumeClose();
+
+ if (InitExprsOk && closed)
+ return Actions.ActOnInitList(LBraceLoc, move_arg(InitExprs),
+ T.getCloseLocation());
+
+ return ExprError(); // an error occurred.
+}
+
+
+// Return true if a comma (or closing brace) is necessary after the
+// __if_exists/if_not_exists statement.
+bool Parser::ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
+ bool &InitExprsOk) {
+ bool trailingComma = false;
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return false;
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return false;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the declarations below.
+ break;
+
+ case IEB_Dependent:
+ Diag(Result.KeywordLoc, diag::warn_microsoft_dependent_exists)
+ << Result.IsIfExists;
+ // Fall through to skip.
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return false;
+ }
+
+ while (Tok.isNot(tok::eof)) {
+ trailingComma = false;
+ // If we know that this cannot be a designation, just parse the nested
+ // initializer directly.
+ ExprResult SubElt;
+ if (MayBeDesignationStart())
+ SubElt = ParseInitializerWithPotentialDesignator();
+ else
+ SubElt = ParseInitializer();
+
+ if (Tok.is(tok::ellipsis))
+ SubElt = Actions.ActOnPackExpansion(SubElt.get(), ConsumeToken());
+
+ // If we couldn't parse the subelement, bail out.
+ if (!SubElt.isInvalid())
+ InitExprs.push_back(SubElt.release());
+ else
+ InitExprsOk = false;
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ trailingComma = true;
+ }
+
+ if (Tok.is(tok::r_brace))
+ break;
+ }
+
+ Braces.consumeClose();
+
+ return !trailingComma;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
new file mode 100644
index 0000000..789a8ae
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseObjc.cpp
@@ -0,0 +1,2846 @@
+//===--- ParseObjC.cpp - Objective C Parsing ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Objective-C portions of the Parser interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Scope.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+
+
+/// ParseObjCAtDirectives - Handle parts of the external-declaration production:
+/// external-declaration: [C99 6.9]
+/// [OBJC] objc-class-definition
+/// [OBJC] objc-class-declaration
+/// [OBJC] objc-alias-declaration
+/// [OBJC] objc-protocol-definition
+/// [OBJC] objc-method-definition
+/// [OBJC] '@' 'end'
+Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives() {
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtDirective(getCurScope());
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ Decl *SingleDecl = 0;
+ switch (Tok.getObjCKeywordID()) {
+ case tok::objc_class:
+ return ParseObjCAtClassDeclaration(AtLoc);
+ case tok::objc_interface: {
+ ParsedAttributes attrs(AttrFactory);
+ SingleDecl = ParseObjCAtInterfaceDeclaration(AtLoc, attrs);
+ break;
+ }
+ case tok::objc_protocol: {
+ ParsedAttributes attrs(AttrFactory);
+ return ParseObjCAtProtocolDeclaration(AtLoc, attrs);
+ }
+ case tok::objc_implementation:
+ return ParseObjCAtImplementationDeclaration(AtLoc);
+ case tok::objc_end:
+ return ParseObjCAtEndDeclaration(AtLoc);
+ case tok::objc_compatibility_alias:
+ SingleDecl = ParseObjCAtAliasDeclaration(AtLoc);
+ break;
+ case tok::objc_synthesize:
+ SingleDecl = ParseObjCPropertySynthesize(AtLoc);
+ break;
+ case tok::objc_dynamic:
+ SingleDecl = ParseObjCPropertyDynamic(AtLoc);
+ break;
+ case tok::objc___experimental_modules_import:
+ if (getLangOpts().Modules)
+ return ParseModuleImport(AtLoc);
+
+ // Fall through
+
+ default:
+ Diag(AtLoc, diag::err_unexpected_at);
+ SkipUntil(tok::semi);
+ SingleDecl = 0;
+ break;
+ }
+ return Actions.ConvertDeclToDeclGroup(SingleDecl);
+}
+
+///
+/// objc-class-declaration:
+/// '@' 'class' identifier-list ';'
+///
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtClassDeclaration(SourceLocation atLoc) {
+ ConsumeToken(); // the identifier "class"
+ SmallVector<IdentifierInfo *, 8> ClassNames;
+ SmallVector<SourceLocation, 8> ClassLocs;
+
+
+ while (1) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::semi);
+ return Actions.ConvertDeclToDeclGroup(0);
+ }
+ ClassNames.push_back(Tok.getIdentifierInfo());
+ ClassLocs.push_back(Tok.getLocation());
+ ConsumeToken();
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ }
+
+ // Consume the ';'.
+ if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@class"))
+ return Actions.ConvertDeclToDeclGroup(0);
+
+ return Actions.ActOnForwardClassDeclaration(atLoc, ClassNames.data(),
+ ClassLocs.data(),
+ ClassNames.size());
+}
+
+void Parser::CheckNestedObjCContexts(SourceLocation AtLoc)
+{
+ Sema::ObjCContainerKind ock = Actions.getObjCContainerKind();
+ if (ock == Sema::OCK_None)
+ return;
+
+ Decl *Decl = Actions.getObjCDeclContext();
+ if (CurParsedObjCImpl) {
+ CurParsedObjCImpl->finish(AtLoc);
+ } else {
+ Actions.ActOnAtEnd(getCurScope(), AtLoc);
+ }
+ Diag(AtLoc, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(AtLoc, "@end\n");
+ if (Decl)
+ Diag(Decl->getLocStart(), diag::note_objc_container_start)
+ << (int) ock;
+}
+
+///
+/// objc-interface:
+/// objc-class-interface-attributes[opt] objc-class-interface
+/// objc-category-interface
+///
+/// objc-class-interface:
+/// '@' 'interface' identifier objc-superclass[opt]
+/// objc-protocol-refs[opt]
+/// objc-class-instance-variables[opt]
+/// objc-interface-decl-list
+/// @end
+///
+/// objc-category-interface:
+/// '@' 'interface' identifier '(' identifier[opt] ')'
+/// objc-protocol-refs[opt]
+/// objc-interface-decl-list
+/// @end
+///
+/// objc-superclass:
+/// ':' identifier
+///
+/// objc-class-interface-attributes:
+/// __attribute__((visibility("default")))
+/// __attribute__((visibility("hidden")))
+/// __attribute__((deprecated))
+/// __attribute__((unavailable))
+/// __attribute__((objc_exception)) - used by NSException on 64-bit
+/// __attribute__((objc_root_class))
+///
+Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
+ ParsedAttributes &attrs) {
+ assert(Tok.isObjCAtKeyword(tok::objc_interface) &&
+ "ParseObjCAtInterfaceDeclaration(): Expected @interface");
+ CheckNestedObjCContexts(AtLoc);
+ ConsumeToken(); // the "interface" identifier
+
+ // Code completion after '@interface'.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCInterfaceDecl(getCurScope());
+ cutOffParsing();
+ return 0;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing class or category name.
+ return 0;
+ }
+
+ // We have a class or category name - consume it.
+ IdentifierInfo *nameId = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken();
+ if (Tok.is(tok::l_paren) &&
+ !isKnownToBeTypeSpecifier(GetLookAheadToken(1))) { // we have a category.
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ SourceLocation categoryLoc;
+ IdentifierInfo *categoryId = 0;
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCInterfaceCategory(getCurScope(), nameId, nameLoc);
+ cutOffParsing();
+ return 0;
+ }
+
+ // For ObjC2, the category name is optional (not an error).
+ if (Tok.is(tok::identifier)) {
+ categoryId = Tok.getIdentifierInfo();
+ categoryLoc = ConsumeToken();
+ }
+ else if (!getLangOpts().ObjC2) {
+ Diag(Tok, diag::err_expected_ident); // missing category name.
+ return 0;
+ }
+
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return 0;
+
+ if (!attrs.empty()) { // categories don't support attributes.
+ Diag(nameLoc, diag::err_objc_no_attributes_on_category);
+ attrs.clear();
+ }
+
+ // Next, we need to check for any protocol references.
+ SourceLocation LAngleLoc, EndProtoLoc;
+ SmallVector<Decl *, 8> ProtocolRefs;
+ SmallVector<SourceLocation, 8> ProtocolLocs;
+ if (Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, true,
+ LAngleLoc, EndProtoLoc))
+ return 0;
+
+ Decl *CategoryType =
+ Actions.ActOnStartCategoryInterface(AtLoc,
+ nameId, nameLoc,
+ categoryId, categoryLoc,
+ ProtocolRefs.data(),
+ ProtocolRefs.size(),
+ ProtocolLocs.data(),
+ EndProtoLoc);
+
+ if (Tok.is(tok::l_brace))
+ ParseObjCClassInstanceVariables(CategoryType, tok::objc_private, AtLoc);
+
+ ParseObjCInterfaceDeclList(tok::objc_not_keyword, CategoryType);
+ return CategoryType;
+ }
+ // Parse a class interface.
+ IdentifierInfo *superClassId = 0;
+ SourceLocation superClassLoc;
+
+ if (Tok.is(tok::colon)) { // a super class is specified.
+ ConsumeToken();
+
+ // Code completion of superclass names.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCSuperclass(getCurScope(), nameId, nameLoc);
+ cutOffParsing();
+ return 0;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing super class name.
+ return 0;
+ }
+ superClassId = Tok.getIdentifierInfo();
+ superClassLoc = ConsumeToken();
+ }
+ // Next, we need to check for any protocol references.
+ SmallVector<Decl *, 8> ProtocolRefs;
+ SmallVector<SourceLocation, 8> ProtocolLocs;
+ SourceLocation LAngleLoc, EndProtoLoc;
+ if (Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, true,
+ LAngleLoc, EndProtoLoc))
+ return 0;
+
+ Decl *ClsType =
+ Actions.ActOnStartClassInterface(AtLoc, nameId, nameLoc,
+ superClassId, superClassLoc,
+ ProtocolRefs.data(), ProtocolRefs.size(),
+ ProtocolLocs.data(),
+ EndProtoLoc, attrs.getList());
+
+ if (Tok.is(tok::l_brace))
+ ParseObjCClassInstanceVariables(ClsType, tok::objc_protected, AtLoc);
+
+ ParseObjCInterfaceDeclList(tok::objc_interface, ClsType);
+ return ClsType;
+}
+
+/// The Objective-C property callback. This should be defined where
+/// it's used, but instead it's been lifted to here to support VS2005.
+struct Parser::ObjCPropertyCallback : FieldCallback {
+private:
+ virtual void anchor();
+public:
+ Parser &P;
+ SmallVectorImpl<Decl *> &Props;
+ ObjCDeclSpec &OCDS;
+ SourceLocation AtLoc;
+ SourceLocation LParenLoc;
+ tok::ObjCKeywordKind MethodImplKind;
+
+ ObjCPropertyCallback(Parser &P,
+ SmallVectorImpl<Decl *> &Props,
+ ObjCDeclSpec &OCDS, SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ tok::ObjCKeywordKind MethodImplKind) :
+ P(P), Props(Props), OCDS(OCDS), AtLoc(AtLoc), LParenLoc(LParenLoc),
+ MethodImplKind(MethodImplKind) {
+ }
+
+ Decl *invoke(FieldDeclarator &FD) {
+ if (FD.D.getIdentifier() == 0) {
+ P.Diag(AtLoc, diag::err_objc_property_requires_field_name)
+ << FD.D.getSourceRange();
+ return 0;
+ }
+ if (FD.BitfieldSize) {
+ P.Diag(AtLoc, diag::err_objc_property_bitfield)
+ << FD.D.getSourceRange();
+ return 0;
+ }
+
+ // Install the property declarator into interfaceDecl.
+ IdentifierInfo *SelName =
+ OCDS.getGetterName() ? OCDS.getGetterName() : FD.D.getIdentifier();
+
+ Selector GetterSel =
+ P.PP.getSelectorTable().getNullarySelector(SelName);
+ IdentifierInfo *SetterName = OCDS.getSetterName();
+ Selector SetterSel;
+ if (SetterName)
+ SetterSel = P.PP.getSelectorTable().getSelector(1, &SetterName);
+ else
+ SetterSel = SelectorTable::constructSetterName(P.PP.getIdentifierTable(),
+ P.PP.getSelectorTable(),
+ FD.D.getIdentifier());
+ bool isOverridingProperty = false;
+ Decl *Property =
+ P.Actions.ActOnProperty(P.getCurScope(), AtLoc, LParenLoc,
+ FD, OCDS,
+ GetterSel, SetterSel,
+ &isOverridingProperty,
+ MethodImplKind);
+ if (!isOverridingProperty)
+ Props.push_back(Property);
+
+ return Property;
+ }
+};
+
+void Parser::ObjCPropertyCallback::anchor() {
+}
+
+/// objc-interface-decl-list:
+/// empty
+/// objc-interface-decl-list objc-property-decl [OBJC2]
+/// objc-interface-decl-list objc-method-requirement [OBJC2]
+/// objc-interface-decl-list objc-method-proto ';'
+/// objc-interface-decl-list declaration
+/// objc-interface-decl-list ';'
+///
+/// objc-method-requirement: [OBJC2]
+/// @required
+/// @optional
+///
+void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
+ Decl *CDecl) {
+ SmallVector<Decl *, 32> allMethods;
+ SmallVector<Decl *, 16> allProperties;
+ SmallVector<DeclGroupPtrTy, 8> allTUVariables;
+ tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword;
+
+ SourceRange AtEnd;
+
+ while (1) {
+ // If this is a method prototype, parse it.
+ if (Tok.is(tok::minus) || Tok.is(tok::plus)) {
+ Decl *methodPrototype =
+ ParseObjCMethodPrototype(MethodImplKind, false);
+ allMethods.push_back(methodPrototype);
+ // Consume the ';' here, since ParseObjCMethodPrototype() is re-used for
+ // method definitions.
+ if (ExpectAndConsumeSemi(diag::err_expected_semi_after_method_proto)) {
+ // We didn't find a semi and we error'ed out. Skip until a ';' or '@'.
+ SkipUntil(tok::at, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
+ continue;
+ }
+ if (Tok.is(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_minus_or_plus);
+ ParseObjCMethodDecl(Tok.getLocation(),
+ tok::minus,
+ MethodImplKind, false);
+ continue;
+ }
+ // Ignore excess semicolons.
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ continue;
+ }
+
+ // If we got to the end of the file, exit the loop.
+ if (Tok.is(tok::eof))
+ break;
+
+ // Code completion within an Objective-C interface.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ CurParsedObjCImpl? Sema::PCC_ObjCImplementation
+ : Sema::PCC_ObjCInterface);
+ return cutOffParsing();
+ }
+
+ // If we don't have an @ directive, parse it as a function definition.
+ if (Tok.isNot(tok::at)) {
+ // The code below does not consume '}'s because it is afraid of eating the
+ // end of a namespace. Because of the way this code is structured, an
+ // erroneous r_brace would cause an infinite loop if not handled here.
+ if (Tok.is(tok::r_brace))
+ break;
+ ParsedAttributes attrs(AttrFactory);
+ allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(attrs));
+ continue;
+ }
+
+ // Otherwise, we have an @ directive, eat the @.
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtDirective(getCurScope());
+ return cutOffParsing();
+ }
+
+ tok::ObjCKeywordKind DirectiveKind = Tok.getObjCKeywordID();
+
+ if (DirectiveKind == tok::objc_end) { // @end -> terminate list
+ AtEnd.setBegin(AtLoc);
+ AtEnd.setEnd(Tok.getLocation());
+ break;
+ } else if (DirectiveKind == tok::objc_not_keyword) {
+ Diag(Tok, diag::err_objc_unknown_at);
+ SkipUntil(tok::semi);
+ continue;
+ }
+
+ // Eat the identifier.
+ ConsumeToken();
+
+ switch (DirectiveKind) {
+ default:
+ // FIXME: If someone forgets an @end on a protocol, this loop will
+ // continue to eat up tons of stuff and spew lots of nonsense errors. It
+ // would probably be better to bail out if we saw an @class or @interface
+ // or something like that.
+ Diag(AtLoc, diag::err_objc_illegal_interface_qual);
+ // Skip until we see an '@' or '}' or ';'.
+ SkipUntil(tok::r_brace, tok::at);
+ break;
+
+ case tok::objc_implementation:
+ case tok::objc_interface:
+ Diag(AtLoc, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(AtLoc, "@end\n");
+ Diag(CDecl->getLocStart(), diag::note_objc_container_start)
+ << (int) Actions.getObjCContainerKind();
+ ConsumeToken();
+ break;
+
+ case tok::objc_required:
+ case tok::objc_optional:
+ // This is only valid on protocols.
+ // FIXME: Should this check for ObjC2 being enabled?
+ if (contextKey != tok::objc_protocol)
+ Diag(AtLoc, diag::err_objc_directive_only_in_protocol);
+ else
+ MethodImplKind = DirectiveKind;
+ break;
+
+ case tok::objc_property:
+ if (!getLangOpts().ObjC2)
+ Diag(AtLoc, diag::err_objc_properties_require_objc2);
+
+ ObjCDeclSpec OCDS;
+ SourceLocation LParenLoc;
+ // Parse property attribute list, if any.
+ if (Tok.is(tok::l_paren)) {
+ LParenLoc = Tok.getLocation();
+ ParseObjCPropertyAttribute(OCDS);
+ }
+
+ ObjCPropertyCallback Callback(*this, allProperties,
+ OCDS, AtLoc, LParenLoc, MethodImplKind);
+
+ // Parse all the comma separated declarators.
+ DeclSpec DS(AttrFactory);
+ ParseStructDeclaration(DS, Callback);
+
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_decl_list);
+ break;
+ }
+ }
+
+ // We break out of the big loop in two cases: when we see @end or when we see
+ // EOF. In the former case, eat the @end. In the later case, emit an error.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtDirective(getCurScope());
+ return cutOffParsing();
+ } else if (Tok.isObjCAtKeyword(tok::objc_end)) {
+ ConsumeToken(); // the "end" identifier
+ } else {
+ Diag(Tok, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(Tok.getLocation(), "\n@end\n");
+ Diag(CDecl->getLocStart(), diag::note_objc_container_start)
+ << (int) Actions.getObjCContainerKind();
+ AtEnd.setBegin(Tok.getLocation());
+ AtEnd.setEnd(Tok.getLocation());
+ }
+
+ // Insert collected methods declarations into the @interface object.
+ // This passes in an invalid SourceLocation for AtEndLoc when EOF is hit.
+ Actions.ActOnAtEnd(getCurScope(), AtEnd,
+ allMethods.data(), allMethods.size(),
+ allProperties.data(), allProperties.size(),
+ allTUVariables.data(), allTUVariables.size());
+}
+
+/// Parse property attribute declarations.
+///
+/// property-attr-decl: '(' property-attrlist ')'
+/// property-attrlist:
+/// property-attribute
+/// property-attrlist ',' property-attribute
+/// property-attribute:
+/// getter '=' identifier
+/// setter '=' identifier ':'
+/// readonly
+/// readwrite
+/// assign
+/// retain
+/// copy
+/// nonatomic
+/// atomic
+/// strong
+/// weak
+/// unsafe_unretained
+///
+void Parser::ParseObjCPropertyAttribute(ObjCDeclSpec &DS) {
+ assert(Tok.getKind() == tok::l_paren);
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ while (1) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPropertyFlags(getCurScope(), DS);
+ return cutOffParsing();
+ }
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+
+ // If this is not an identifier at all, bail out early.
+ if (II == 0) {
+ T.consumeClose();
+ return;
+ }
+
+ SourceLocation AttrName = ConsumeToken(); // consume last attribute name
+
+ if (II->isStr("readonly"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readonly);
+ else if (II->isStr("assign"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_assign);
+ else if (II->isStr("unsafe_unretained"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_unsafe_unretained);
+ else if (II->isStr("readwrite"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_readwrite);
+ else if (II->isStr("retain"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_retain);
+ else if (II->isStr("strong"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_strong);
+ else if (II->isStr("copy"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_copy);
+ else if (II->isStr("nonatomic"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_nonatomic);
+ else if (II->isStr("atomic"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_atomic);
+ else if (II->isStr("weak"))
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_weak);
+ else if (II->isStr("getter") || II->isStr("setter")) {
+ bool IsSetter = II->getNameStart()[0] == 's';
+
+ // getter/setter require extra treatment.
+ unsigned DiagID = IsSetter ? diag::err_objc_expected_equal_for_setter :
+ diag::err_objc_expected_equal_for_getter;
+
+ if (ExpectAndConsume(tok::equal, DiagID, "", tok::r_paren))
+ return;
+
+ if (Tok.is(tok::code_completion)) {
+ if (IsSetter)
+ Actions.CodeCompleteObjCPropertySetter(getCurScope());
+ else
+ Actions.CodeCompleteObjCPropertyGetter(getCurScope());
+ return cutOffParsing();
+ }
+
+
+ SourceLocation SelLoc;
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(SelLoc);
+
+ if (!SelIdent) {
+ Diag(Tok, diag::err_objc_expected_selector_for_getter_setter)
+ << IsSetter;
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ if (IsSetter) {
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_setter);
+ DS.setSetterName(SelIdent);
+
+ if (ExpectAndConsume(tok::colon,
+ diag::err_expected_colon_after_setter_name, "",
+ tok::r_paren))
+ return;
+ } else {
+ DS.setPropertyAttributes(ObjCDeclSpec::DQ_PR_getter);
+ DS.setGetterName(SelIdent);
+ }
+ } else {
+ Diag(AttrName, diag::err_objc_expected_property_attr) << II;
+ SkipUntil(tok::r_paren);
+ return;
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ }
+
+ T.consumeClose();
+}
+
+/// objc-method-proto:
+/// objc-instance-method objc-method-decl objc-method-attributes[opt]
+/// objc-class-method objc-method-decl objc-method-attributes[opt]
+///
+/// objc-instance-method: '-'
+/// objc-class-method: '+'
+///
+/// objc-method-attributes: [OBJC2]
+/// __attribute__((deprecated))
+///
+Decl *Parser::ParseObjCMethodPrototype(tok::ObjCKeywordKind MethodImplKind,
+ bool MethodDefinition) {
+ assert((Tok.is(tok::minus) || Tok.is(tok::plus)) && "expected +/-");
+
+ tok::TokenKind methodType = Tok.getKind();
+ SourceLocation mLoc = ConsumeToken();
+ Decl *MDecl = ParseObjCMethodDecl(mLoc, methodType, MethodImplKind,
+ MethodDefinition);
+ // Since this rule is used for both method declarations and definitions,
+ // the caller is (optionally) responsible for consuming the ';'.
+ return MDecl;
+}
+
+/// objc-selector:
+/// identifier
+/// one of
+/// enum struct union if else while do for switch case default
+/// break continue return goto asm sizeof typeof __alignof
+/// unsigned long const short volatile signed restrict _Complex
+/// in out inout bycopy byref oneway int char float double void _Bool
+///
+IdentifierInfo *Parser::ParseObjCSelectorPiece(SourceLocation &SelectorLoc) {
+
+ switch (Tok.getKind()) {
+ default:
+ return 0;
+ case tok::ampamp:
+ case tok::ampequal:
+ case tok::amp:
+ case tok::pipe:
+ case tok::tilde:
+ case tok::exclaim:
+ case tok::exclaimequal:
+ case tok::pipepipe:
+ case tok::pipeequal:
+ case tok::caret:
+ case tok::caretequal: {
+ std::string ThisTok(PP.getSpelling(Tok));
+ if (isalpha(ThisTok[0])) {
+ IdentifierInfo *II = &PP.getIdentifierTable().get(ThisTok.data());
+ Tok.setKind(tok::identifier);
+ SelectorLoc = ConsumeToken();
+ return II;
+ }
+ return 0;
+ }
+
+ case tok::identifier:
+ case tok::kw_asm:
+ case tok::kw_auto:
+ case tok::kw_bool:
+ case tok::kw_break:
+ case tok::kw_case:
+ case tok::kw_catch:
+ case tok::kw_char:
+ case tok::kw_class:
+ case tok::kw_const:
+ case tok::kw_const_cast:
+ case tok::kw_continue:
+ case tok::kw_default:
+ case tok::kw_delete:
+ case tok::kw_do:
+ case tok::kw_double:
+ case tok::kw_dynamic_cast:
+ case tok::kw_else:
+ case tok::kw_enum:
+ case tok::kw_explicit:
+ case tok::kw_export:
+ case tok::kw_extern:
+ case tok::kw_false:
+ case tok::kw_float:
+ case tok::kw_for:
+ case tok::kw_friend:
+ case tok::kw_goto:
+ case tok::kw_if:
+ case tok::kw_inline:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw_mutable:
+ case tok::kw_namespace:
+ case tok::kw_new:
+ case tok::kw_operator:
+ case tok::kw_private:
+ case tok::kw_protected:
+ case tok::kw_public:
+ case tok::kw_register:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_restrict:
+ case tok::kw_return:
+ case tok::kw_short:
+ case tok::kw_signed:
+ case tok::kw_sizeof:
+ case tok::kw_static:
+ case tok::kw_static_cast:
+ case tok::kw_struct:
+ case tok::kw_switch:
+ case tok::kw_template:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_try:
+ case tok::kw_typedef:
+ case tok::kw_typeid:
+ case tok::kw_typename:
+ case tok::kw_typeof:
+ case tok::kw_union:
+ case tok::kw_unsigned:
+ case tok::kw_using:
+ case tok::kw_virtual:
+ case tok::kw_void:
+ case tok::kw_volatile:
+ case tok::kw_wchar_t:
+ case tok::kw_while:
+ case tok::kw__Bool:
+ case tok::kw__Complex:
+ case tok::kw___alignof:
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SelectorLoc = ConsumeToken();
+ return II;
+ }
+}
+
+/// objc-for-collection-in: 'in'
+///
+bool Parser::isTokIdentifier_in() const {
+ // FIXME: May have to do additional look-ahead to only allow for
+ // valid tokens following an 'in'; such as an identifier, unary operators,
+ // '[' etc.
+ return (getLangOpts().ObjC2 && Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == ObjCTypeQuals[objc_in]);
+}
+
+/// ParseObjCTypeQualifierList - This routine parses the objective-c's type
+/// qualifier list and builds their bitmask representation in the input
+/// argument.
+///
+/// objc-type-qualifiers:
+/// objc-type-qualifier
+/// objc-type-qualifiers objc-type-qualifier
+///
+void Parser::ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
+ Declarator::TheContext Context) {
+ assert(Context == Declarator::ObjCParameterContext ||
+ Context == Declarator::ObjCResultContext);
+
+ while (1) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPassingType(getCurScope(), DS,
+ Context == Declarator::ObjCParameterContext);
+ return cutOffParsing();
+ }
+
+ if (Tok.isNot(tok::identifier))
+ return;
+
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ for (unsigned i = 0; i != objc_NumQuals; ++i) {
+ if (II != ObjCTypeQuals[i])
+ continue;
+
+ ObjCDeclSpec::ObjCDeclQualifier Qual;
+ switch (i) {
+ default: llvm_unreachable("Unknown decl qualifier");
+ case objc_in: Qual = ObjCDeclSpec::DQ_In; break;
+ case objc_out: Qual = ObjCDeclSpec::DQ_Out; break;
+ case objc_inout: Qual = ObjCDeclSpec::DQ_Inout; break;
+ case objc_oneway: Qual = ObjCDeclSpec::DQ_Oneway; break;
+ case objc_bycopy: Qual = ObjCDeclSpec::DQ_Bycopy; break;
+ case objc_byref: Qual = ObjCDeclSpec::DQ_Byref; break;
+ }
+ DS.setObjCDeclQualifier(Qual);
+ ConsumeToken();
+ II = 0;
+ break;
+ }
+
+ // If this wasn't a recognized qualifier, bail out.
+ if (II) return;
+ }
+}
+
+/// Take all the decl attributes out of the given list and add
+/// them to the given attribute set.
+static void takeDeclAttributes(ParsedAttributes &attrs,
+ AttributeList *list) {
+ while (list) {
+ AttributeList *cur = list;
+ list = cur->getNext();
+
+ if (!cur->isUsedAsTypeAttr()) {
+ // Clear out the next pointer. We're really completely
+ // destroying the internal invariants of the declarator here,
+ // but it doesn't matter because we're done with it.
+ cur->setNext(0);
+ attrs.add(cur);
+ }
+ }
+}
+
+/// takeDeclAttributes - Take all the decl attributes from the given
+/// declarator and add them to the given list.
+static void takeDeclAttributes(ParsedAttributes &attrs,
+ Declarator &D) {
+ // First, take ownership of all attributes.
+ attrs.getPool().takeAllFrom(D.getAttributePool());
+ attrs.getPool().takeAllFrom(D.getDeclSpec().getAttributePool());
+
+ // Now actually move the attributes over.
+ takeDeclAttributes(attrs, D.getDeclSpec().getAttributes().getList());
+ takeDeclAttributes(attrs, D.getAttributes());
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i)
+ takeDeclAttributes(attrs,
+ const_cast<AttributeList*>(D.getTypeObject(i).getAttrs()));
+}
+
+/// objc-type-name:
+/// '(' objc-type-qualifiers[opt] type-name ')'
+/// '(' objc-type-qualifiers[opt] ')'
+///
+ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
+ Declarator::TheContext context,
+ ParsedAttributes *paramAttrs) {
+ assert(context == Declarator::ObjCParameterContext ||
+ context == Declarator::ObjCResultContext);
+ assert((paramAttrs != 0) == (context == Declarator::ObjCParameterContext));
+
+ assert(Tok.is(tok::l_paren) && "expected (");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ SourceLocation TypeStartLoc = Tok.getLocation();
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ // Parse type qualifiers, in, inout, etc.
+ ParseObjCTypeQualifierList(DS, context);
+
+ ParsedType Ty;
+ if (isTypeSpecifierQualifier()) {
+ // Parse an abstract declarator.
+ DeclSpec declSpec(AttrFactory);
+ declSpec.setObjCQualifiers(&DS);
+ ParseSpecifierQualifierList(declSpec);
+ Declarator declarator(declSpec, context);
+ ParseDeclarator(declarator);
+
+ // If that's not invalid, extract a type.
+ if (!declarator.isInvalidType()) {
+ TypeResult type = Actions.ActOnTypeName(getCurScope(), declarator);
+ if (!type.isInvalid())
+ Ty = type.get();
+
+ // If we're parsing a parameter, steal all the decl attributes
+ // and add them to the decl spec.
+ if (context == Declarator::ObjCParameterContext)
+ takeDeclAttributes(*paramAttrs, declarator);
+ }
+ } else if (context == Declarator::ObjCResultContext &&
+ Tok.is(tok::identifier)) {
+ if (!Ident_instancetype)
+ Ident_instancetype = PP.getIdentifierInfo("instancetype");
+
+ if (Tok.getIdentifierInfo() == Ident_instancetype) {
+ Ty = Actions.ActOnObjCInstanceType(Tok.getLocation());
+ ConsumeToken();
+ }
+ }
+
+ if (Tok.is(tok::r_paren))
+ T.consumeClose();
+ else if (Tok.getLocation() == TypeStartLoc) {
+ // If we didn't eat any tokens, then this isn't a type.
+ Diag(Tok, diag::err_expected_type);
+ SkipUntil(tok::r_paren);
+ } else {
+ // Otherwise, we found *something*, but didn't get a ')' in the right
+ // place. Emit an error then return what we have as the type.
+ T.consumeClose();
+ }
+ return Ty;
+}
+
+/// objc-method-decl:
+/// objc-selector
+/// objc-keyword-selector objc-parmlist[opt]
+/// objc-type-name objc-selector
+/// objc-type-name objc-keyword-selector objc-parmlist[opt]
+///
+/// objc-keyword-selector:
+/// objc-keyword-decl
+/// objc-keyword-selector objc-keyword-decl
+///
+/// objc-keyword-decl:
+/// objc-selector ':' objc-type-name objc-keyword-attributes[opt] identifier
+/// objc-selector ':' objc-keyword-attributes[opt] identifier
+/// ':' objc-type-name objc-keyword-attributes[opt] identifier
+/// ':' objc-keyword-attributes[opt] identifier
+///
+/// objc-parmlist:
+/// objc-parms objc-ellipsis[opt]
+///
+/// objc-parms:
+/// objc-parms , parameter-declaration
+///
+/// objc-ellipsis:
+/// , ...
+///
+/// objc-keyword-attributes: [OBJC2]
+/// __attribute__((unused))
+///
+Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
+ tok::TokenKind mType,
+ tok::ObjCKeywordKind MethodImplKind,
+ bool MethodDefinition) {
+ ParsingDeclRAIIObject PD(*this);
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
+ /*ReturnType=*/ ParsedType());
+ cutOffParsing();
+ return 0;
+ }
+
+ // Parse the return type if present.
+ ParsedType ReturnType;
+ ObjCDeclSpec DSRet;
+ if (Tok.is(tok::l_paren))
+ ReturnType = ParseObjCTypeName(DSRet, Declarator::ObjCResultContext, 0);
+
+ // If attributes exist before the method, parse them.
+ ParsedAttributes methodAttrs(AttrFactory);
+ if (getLangOpts().ObjC2)
+ MaybeParseGNUAttributes(methodAttrs);
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMethodDecl(getCurScope(), mType == tok::minus,
+ ReturnType);
+ cutOffParsing();
+ return 0;
+ }
+
+ // Now parse the selector.
+ SourceLocation selLoc;
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(selLoc);
+
+ // An unnamed colon is valid.
+ if (!SelIdent && Tok.isNot(tok::colon)) { // missing selector name.
+ Diag(Tok, diag::err_expected_selector_for_method)
+ << SourceRange(mLoc, Tok.getLocation());
+ // Skip until we get a ; or {}.
+ SkipUntil(tok::r_brace);
+ return 0;
+ }
+
+ SmallVector<DeclaratorChunk::ParamInfo, 8> CParamInfo;
+ if (Tok.isNot(tok::colon)) {
+ // If attributes exist after the method, parse them.
+ if (getLangOpts().ObjC2)
+ MaybeParseGNUAttributes(methodAttrs);
+
+ Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
+ Decl *Result
+ = Actions.ActOnMethodDeclaration(getCurScope(), mLoc, Tok.getLocation(),
+ mType, DSRet, ReturnType,
+ selLoc, Sel, 0,
+ CParamInfo.data(), CParamInfo.size(),
+ methodAttrs.getList(), MethodImplKind,
+ false, MethodDefinition);
+ PD.complete(Result);
+ return Result;
+ }
+
+ SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SmallVector<SourceLocation, 12> KeyLocs;
+ SmallVector<Sema::ObjCArgInfo, 12> ArgInfos;
+ ParseScope PrototypeScope(this,
+ Scope::FunctionPrototypeScope|Scope::DeclScope);
+
+ AttributePool allParamAttrs(AttrFactory);
+
+ while (1) {
+ ParsedAttributes paramAttrs(AttrFactory);
+ Sema::ObjCArgInfo ArgInfo;
+
+ // Each iteration parses a single keyword argument.
+ if (Tok.isNot(tok::colon)) {
+ Diag(Tok, diag::err_expected_colon);
+ break;
+ }
+ ConsumeToken(); // Eat the ':'.
+
+ ArgInfo.Type = ParsedType();
+ if (Tok.is(tok::l_paren)) // Parse the argument type if present.
+ ArgInfo.Type = ParseObjCTypeName(ArgInfo.DeclSpec,
+ Declarator::ObjCParameterContext,
+ &paramAttrs);
+
+ // If attributes exist before the argument name, parse them.
+ // Regardless, collect all the attributes we've parsed so far.
+ ArgInfo.ArgAttrs = 0;
+ if (getLangOpts().ObjC2) {
+ MaybeParseGNUAttributes(paramAttrs);
+ ArgInfo.ArgAttrs = paramAttrs.getList();
+ }
+
+ // Code completion for the next piece of the selector.
+ if (Tok.is(tok::code_completion)) {
+ KeyIdents.push_back(SelIdent);
+ Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
+ mType == tok::minus,
+ /*AtParameterName=*/true,
+ ReturnType,
+ KeyIdents.data(),
+ KeyIdents.size());
+ cutOffParsing();
+ return 0;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing argument name.
+ break;
+ }
+
+ ArgInfo.Name = Tok.getIdentifierInfo();
+ ArgInfo.NameLoc = Tok.getLocation();
+ ConsumeToken(); // Eat the identifier.
+
+ ArgInfos.push_back(ArgInfo);
+ KeyIdents.push_back(SelIdent);
+ KeyLocs.push_back(selLoc);
+
+ // Make sure the attributes persist.
+ allParamAttrs.takeAllFrom(paramAttrs.getPool());
+
+ // Code completion for the next piece of the selector.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMethodDeclSelector(getCurScope(),
+ mType == tok::minus,
+ /*AtParameterName=*/false,
+ ReturnType,
+ KeyIdents.data(),
+ KeyIdents.size());
+ cutOffParsing();
+ return 0;
+ }
+
+ // Check for another keyword selector.
+ SelIdent = ParseObjCSelectorPiece(selLoc);
+ if (!SelIdent && Tok.isNot(tok::colon))
+ break;
+ // We have a selector or a colon, continue parsing.
+ }
+
+ bool isVariadic = false;
+
+ // Parse the (optional) parameter list.
+ while (Tok.is(tok::comma)) {
+ ConsumeToken();
+ if (Tok.is(tok::ellipsis)) {
+ isVariadic = true;
+ ConsumeToken();
+ break;
+ }
+ DeclSpec DS(AttrFactory);
+ ParseDeclarationSpecifiers(DS);
+ // Parse the declarator.
+ Declarator ParmDecl(DS, Declarator::PrototypeContext);
+ ParseDeclarator(ParmDecl);
+ IdentifierInfo *ParmII = ParmDecl.getIdentifier();
+ Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
+ CParamInfo.push_back(DeclaratorChunk::ParamInfo(ParmII,
+ ParmDecl.getIdentifierLoc(),
+ Param,
+ 0));
+
+ }
+
+ // FIXME: Add support for optional parameter list...
+ // If attributes exist after the method, parse them.
+ if (getLangOpts().ObjC2)
+ MaybeParseGNUAttributes(methodAttrs);
+
+ if (KeyIdents.size() == 0)
+ return 0;
+
+ Selector Sel = PP.getSelectorTable().getSelector(KeyIdents.size(),
+ &KeyIdents[0]);
+ Decl *Result
+ = Actions.ActOnMethodDeclaration(getCurScope(), mLoc, Tok.getLocation(),
+ mType, DSRet, ReturnType,
+ KeyLocs, Sel, &ArgInfos[0],
+ CParamInfo.data(), CParamInfo.size(),
+ methodAttrs.getList(),
+ MethodImplKind, isVariadic, MethodDefinition);
+
+ PD.complete(Result);
+ return Result;
+}
+
+/// objc-protocol-refs:
+/// '<' identifier-list '>'
+///
+bool Parser::
+ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &Protocols,
+ SmallVectorImpl<SourceLocation> &ProtocolLocs,
+ bool WarnOnDeclarations,
+ SourceLocation &LAngleLoc, SourceLocation &EndLoc) {
+ assert(Tok.is(tok::less) && "expected <");
+
+ LAngleLoc = ConsumeToken(); // the "<"
+
+ SmallVector<IdentifierLocPair, 8> ProtocolIdents;
+
+ while (1) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCProtocolReferences(ProtocolIdents.data(),
+ ProtocolIdents.size());
+ cutOffParsing();
+ return true;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::greater);
+ return true;
+ }
+ ProtocolIdents.push_back(std::make_pair(Tok.getIdentifierInfo(),
+ Tok.getLocation()));
+ ProtocolLocs.push_back(Tok.getLocation());
+ ConsumeToken();
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken();
+ }
+
+ // Consume the '>'.
+ if (Tok.isNot(tok::greater)) {
+ Diag(Tok, diag::err_expected_greater);
+ return true;
+ }
+
+ EndLoc = ConsumeToken();
+
+ // Convert the list of protocols identifiers into a list of protocol decls.
+ Actions.FindProtocolDeclaration(WarnOnDeclarations,
+ &ProtocolIdents[0], ProtocolIdents.size(),
+ Protocols);
+ return false;
+}
+
+/// \brief Parse the Objective-C protocol qualifiers that follow a typename
+/// in a decl-specifier-seq, starting at the '<'.
+bool Parser::ParseObjCProtocolQualifiers(DeclSpec &DS) {
+ assert(Tok.is(tok::less) && "Protocol qualifiers start with '<'");
+ assert(getLangOpts().ObjC1 && "Protocol qualifiers only exist in Objective-C");
+ SourceLocation LAngleLoc, EndProtoLoc;
+ SmallVector<Decl *, 8> ProtocolDecl;
+ SmallVector<SourceLocation, 8> ProtocolLocs;
+ bool Result = ParseObjCProtocolReferences(ProtocolDecl, ProtocolLocs, false,
+ LAngleLoc, EndProtoLoc);
+ DS.setProtocolQualifiers(ProtocolDecl.data(), ProtocolDecl.size(),
+ ProtocolLocs.data(), LAngleLoc);
+ if (EndProtoLoc.isValid())
+ DS.SetRangeEnd(EndProtoLoc);
+ return Result;
+}
+
+
+/// objc-class-instance-variables:
+/// '{' objc-instance-variable-decl-list[opt] '}'
+///
+/// objc-instance-variable-decl-list:
+/// objc-visibility-spec
+/// objc-instance-variable-decl ';'
+/// ';'
+/// objc-instance-variable-decl-list objc-visibility-spec
+/// objc-instance-variable-decl-list objc-instance-variable-decl ';'
+/// objc-instance-variable-decl-list ';'
+///
+/// objc-visibility-spec:
+/// @private
+/// @protected
+/// @public
+/// @package [OBJC2]
+///
+/// objc-instance-variable-decl:
+/// struct-declaration
+///
+void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
+ tok::ObjCKeywordKind visibility,
+ SourceLocation atLoc) {
+ assert(Tok.is(tok::l_brace) && "expected {");
+ SmallVector<Decl *, 32> AllIvarDecls;
+
+ ParseScope ClassScope(this, Scope::DeclScope|Scope::ClassScope);
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ T.consumeOpen();
+
+ // While we still have something to read, read the instance variables.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ // Each iteration of this loop reads one objc-instance-variable-decl.
+
+ // Check for extraneous top-level semicolon.
+ if (Tok.is(tok::semi)) {
+ Diag(Tok, diag::ext_extra_ivar_semi)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeToken();
+ continue;
+ }
+
+ // Set the default visibility to private.
+ if (Tok.is(tok::at)) { // parse objc-visibility-spec
+ ConsumeToken(); // eat the @ sign
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtVisibility(getCurScope());
+ return cutOffParsing();
+ }
+
+ switch (Tok.getObjCKeywordID()) {
+ case tok::objc_private:
+ case tok::objc_public:
+ case tok::objc_protected:
+ case tok::objc_package:
+ visibility = Tok.getObjCKeywordID();
+ ConsumeToken();
+ continue;
+ default:
+ Diag(Tok, diag::err_objc_illegal_visibility_spec);
+ continue;
+ }
+ }
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ Sema::PCC_ObjCInstanceVariableList);
+ return cutOffParsing();
+ }
+
+ struct ObjCIvarCallback : FieldCallback {
+ Parser &P;
+ Decl *IDecl;
+ tok::ObjCKeywordKind visibility;
+ SmallVectorImpl<Decl *> &AllIvarDecls;
+
+ ObjCIvarCallback(Parser &P, Decl *IDecl, tok::ObjCKeywordKind V,
+ SmallVectorImpl<Decl *> &AllIvarDecls) :
+ P(P), IDecl(IDecl), visibility(V), AllIvarDecls(AllIvarDecls) {
+ }
+
+ Decl *invoke(FieldDeclarator &FD) {
+ P.Actions.ActOnObjCContainerStartDefinition(IDecl);
+ // Install the declarator into the interface decl.
+ Decl *Field
+ = P.Actions.ActOnIvar(P.getCurScope(),
+ FD.D.getDeclSpec().getSourceRange().getBegin(),
+ FD.D, FD.BitfieldSize, visibility);
+ P.Actions.ActOnObjCContainerFinishDefinition();
+ if (Field)
+ AllIvarDecls.push_back(Field);
+ return Field;
+ }
+ } Callback(*this, interfaceDecl, visibility, AllIvarDecls);
+
+ // Parse all the comma separated declarators.
+ DeclSpec DS(AttrFactory);
+ ParseStructDeclaration(DS, Callback);
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected_semi_decl_list);
+ // Skip to end of block or statement
+ SkipUntil(tok::r_brace, true, true);
+ }
+ }
+ T.consumeClose();
+
+ Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
+ Actions.ActOnLastBitfield(T.getCloseLocation(), AllIvarDecls);
+ Actions.ActOnObjCContainerFinishDefinition();
+ // Call ActOnFields() even if we don't have any decls. This is useful
+ // for code rewriting tools that need to be aware of the empty list.
+ Actions.ActOnFields(getCurScope(), atLoc, interfaceDecl,
+ AllIvarDecls,
+ T.getOpenLocation(), T.getCloseLocation(), 0);
+ return;
+}
+
+/// objc-protocol-declaration:
+/// objc-protocol-definition
+/// objc-protocol-forward-reference
+///
+/// objc-protocol-definition:
+/// @protocol identifier
+/// objc-protocol-refs[opt]
+/// objc-interface-decl-list
+/// @end
+///
+/// objc-protocol-forward-reference:
+/// @protocol identifier-list ';'
+///
+/// "@protocol identifier ;" should be resolved as "@protocol
+/// identifier-list ;": objc-interface-decl-list may not start with a
+/// semicolon in the first alternative if objc-protocol-refs are omitted.
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtProtocolDeclaration(SourceLocation AtLoc,
+ ParsedAttributes &attrs) {
+ assert(Tok.isObjCAtKeyword(tok::objc_protocol) &&
+ "ParseObjCAtProtocolDeclaration(): Expected @protocol");
+ ConsumeToken(); // the "protocol" identifier
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCProtocolDecl(getCurScope());
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing protocol name.
+ return DeclGroupPtrTy();
+ }
+ // Save the protocol name, then consume it.
+ IdentifierInfo *protocolName = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken();
+
+ if (Tok.is(tok::semi)) { // forward declaration of one protocol.
+ IdentifierLocPair ProtoInfo(protocolName, nameLoc);
+ ConsumeToken();
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc, &ProtoInfo, 1,
+ attrs.getList());
+ }
+
+ CheckNestedObjCContexts(AtLoc);
+
+ if (Tok.is(tok::comma)) { // list of forward declarations.
+ SmallVector<IdentifierLocPair, 8> ProtocolRefs;
+ ProtocolRefs.push_back(std::make_pair(protocolName, nameLoc));
+
+ // Parse the list of forward declarations.
+ while (1) {
+ ConsumeToken(); // the ','
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+ ProtocolRefs.push_back(IdentifierLocPair(Tok.getIdentifierInfo(),
+ Tok.getLocation()));
+ ConsumeToken(); // the identifier
+
+ if (Tok.isNot(tok::comma))
+ break;
+ }
+ // Consume the ';'.
+ if (ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@protocol"))
+ return DeclGroupPtrTy();
+
+ return Actions.ActOnForwardProtocolDeclaration(AtLoc,
+ &ProtocolRefs[0],
+ ProtocolRefs.size(),
+ attrs.getList());
+ }
+
+ // Last, and definitely not least, parse a protocol declaration.
+ SourceLocation LAngleLoc, EndProtoLoc;
+
+ SmallVector<Decl *, 8> ProtocolRefs;
+ SmallVector<SourceLocation, 8> ProtocolLocs;
+ if (Tok.is(tok::less) &&
+ ParseObjCProtocolReferences(ProtocolRefs, ProtocolLocs, false,
+ LAngleLoc, EndProtoLoc))
+ return DeclGroupPtrTy();
+
+ Decl *ProtoType =
+ Actions.ActOnStartProtocolInterface(AtLoc, protocolName, nameLoc,
+ ProtocolRefs.data(),
+ ProtocolRefs.size(),
+ ProtocolLocs.data(),
+ EndProtoLoc, attrs.getList());
+
+ ParseObjCInterfaceDeclList(tok::objc_protocol, ProtoType);
+ return Actions.ConvertDeclToDeclGroup(ProtoType);
+}
+
+/// objc-implementation:
+/// objc-class-implementation-prologue
+/// objc-category-implementation-prologue
+///
+/// objc-class-implementation-prologue:
+/// @implementation identifier objc-superclass[opt]
+/// objc-class-instance-variables[opt]
+///
+/// objc-category-implementation-prologue:
+/// @implementation identifier ( identifier )
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_implementation) &&
+ "ParseObjCAtImplementationDeclaration(): Expected @implementation");
+ CheckNestedObjCContexts(AtLoc);
+ ConsumeToken(); // the "implementation" identifier
+
+ // Code completion after '@implementation'.
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCImplementationDecl(getCurScope());
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing class or category name.
+ return DeclGroupPtrTy();
+ }
+ // We have a class or category name - consume it.
+ IdentifierInfo *nameId = Tok.getIdentifierInfo();
+ SourceLocation nameLoc = ConsumeToken(); // consume class or category name
+ Decl *ObjCImpDecl = 0;
+
+ if (Tok.is(tok::l_paren)) {
+ // we have a category implementation.
+ ConsumeParen();
+ SourceLocation categoryLoc, rparenLoc;
+ IdentifierInfo *categoryId = 0;
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCImplementationCategory(getCurScope(), nameId, nameLoc);
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ if (Tok.is(tok::identifier)) {
+ categoryId = Tok.getIdentifierInfo();
+ categoryLoc = ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected_ident); // missing category name.
+ return DeclGroupPtrTy();
+ }
+ if (Tok.isNot(tok::r_paren)) {
+ Diag(Tok, diag::err_expected_rparen);
+ SkipUntil(tok::r_paren, false); // don't stop at ';'
+ return DeclGroupPtrTy();
+ }
+ rparenLoc = ConsumeParen();
+ ObjCImpDecl = Actions.ActOnStartCategoryImplementation(
+ AtLoc, nameId, nameLoc, categoryId,
+ categoryLoc);
+
+ } else {
+ // We have a class implementation
+ SourceLocation superClassLoc;
+ IdentifierInfo *superClassId = 0;
+ if (Tok.is(tok::colon)) {
+ // We have a super class
+ ConsumeToken();
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident); // missing super class name.
+ return DeclGroupPtrTy();
+ }
+ superClassId = Tok.getIdentifierInfo();
+ superClassLoc = ConsumeToken(); // Consume super class name
+ }
+ ObjCImpDecl = Actions.ActOnStartClassImplementation(
+ AtLoc, nameId, nameLoc,
+ superClassId, superClassLoc);
+
+ if (Tok.is(tok::l_brace)) // we have ivars
+ ParseObjCClassInstanceVariables(ObjCImpDecl, tok::objc_private, AtLoc);
+ }
+ assert(ObjCImpDecl);
+
+ SmallVector<Decl *, 8> DeclsInGroup;
+
+ {
+ ObjCImplParsingDataRAII ObjCImplParsing(*this, ObjCImpDecl);
+ while (!ObjCImplParsing.isFinished() && Tok.isNot(tok::eof)) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ if (DeclGroupPtrTy DGP = ParseExternalDeclaration(attrs)) {
+ DeclGroupRef DG = DGP.get();
+ DeclsInGroup.append(DG.begin(), DG.end());
+ }
+ }
+ }
+
+ return Actions.ActOnFinishObjCImplementation(ObjCImpDecl, DeclsInGroup);
+}
+
+Parser::DeclGroupPtrTy
+Parser::ParseObjCAtEndDeclaration(SourceRange atEnd) {
+ assert(Tok.isObjCAtKeyword(tok::objc_end) &&
+ "ParseObjCAtEndDeclaration(): Expected @end");
+ ConsumeToken(); // the "end" identifier
+ if (CurParsedObjCImpl)
+ CurParsedObjCImpl->finish(atEnd);
+ else
+ // missing @implementation
+ Diag(atEnd.getBegin(), diag::err_expected_objc_container);
+ return DeclGroupPtrTy();
+}
+
+Parser::ObjCImplParsingDataRAII::~ObjCImplParsingDataRAII() {
+ if (!Finished) {
+ finish(P.Tok.getLocation());
+ if (P.Tok.is(tok::eof)) {
+ P.Diag(P.Tok, diag::err_objc_missing_end)
+ << FixItHint::CreateInsertion(P.Tok.getLocation(), "\n@end\n");
+ P.Diag(Dcl->getLocStart(), diag::note_objc_container_start)
+ << Sema::OCK_Implementation;
+ }
+ }
+ P.CurParsedObjCImpl = 0;
+ assert(LateParsedObjCMethods.empty());
+}
+
+void Parser::ObjCImplParsingDataRAII::finish(SourceRange AtEnd) {
+ assert(!Finished);
+ P.Actions.DefaultSynthesizeProperties(P.getCurScope(), Dcl);
+ for (size_t i = 0; i < LateParsedObjCMethods.size(); ++i)
+ P.ParseLexedObjCMethodDefs(*LateParsedObjCMethods[i]);
+
+ P.Actions.ActOnAtEnd(P.getCurScope(), AtEnd);
+
+ /// \brief Clear and free the cached objc methods.
+ for (LateParsedObjCMethodContainer::iterator
+ I = LateParsedObjCMethods.begin(),
+ E = LateParsedObjCMethods.end(); I != E; ++I)
+ delete *I;
+ LateParsedObjCMethods.clear();
+
+ Finished = true;
+}
+
+/// compatibility-alias-decl:
+/// @compatibility_alias alias-name class-name ';'
+///
+Decl *Parser::ParseObjCAtAliasDeclaration(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_compatibility_alias) &&
+ "ParseObjCAtAliasDeclaration(): Expected @compatibility_alias");
+ ConsumeToken(); // consume compatibility_alias
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ return 0;
+ }
+ IdentifierInfo *aliasId = Tok.getIdentifierInfo();
+ SourceLocation aliasLoc = ConsumeToken(); // consume alias-name
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ return 0;
+ }
+ IdentifierInfo *classId = Tok.getIdentifierInfo();
+ SourceLocation classLoc = ConsumeToken(); // consume class-name;
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ "@compatibility_alias");
+ return Actions.ActOnCompatiblityAlias(atLoc, aliasId, aliasLoc,
+ classId, classLoc);
+}
+
+/// property-synthesis:
+/// @synthesize property-ivar-list ';'
+///
+/// property-ivar-list:
+/// property-ivar
+/// property-ivar-list ',' property-ivar
+///
+/// property-ivar:
+/// identifier
+/// identifier '=' identifier
+///
+Decl *Parser::ParseObjCPropertySynthesize(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_synthesize) &&
+ "ParseObjCPropertyDynamic(): Expected '@synthesize'");
+ ConsumeToken(); // consume synthesize
+
+ while (true) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
+ cutOffParsing();
+ return 0;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_synthesized_property_name);
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ IdentifierInfo *propertyIvar = 0;
+ IdentifierInfo *propertyId = Tok.getIdentifierInfo();
+ SourceLocation propertyLoc = ConsumeToken(); // consume property name
+ SourceLocation propertyIvarLoc;
+ if (Tok.is(tok::equal)) {
+ // property '=' ivar-name
+ ConsumeToken(); // consume '='
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPropertySynthesizeIvar(getCurScope(), propertyId);
+ cutOffParsing();
+ return 0;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ break;
+ }
+ propertyIvar = Tok.getIdentifierInfo();
+ propertyIvarLoc = ConsumeToken(); // consume ivar-name
+ }
+ Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, true,
+ propertyId, propertyIvar, propertyIvarLoc);
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // consume ','
+ }
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@synthesize");
+ return 0;
+}
+
+/// property-dynamic:
+/// @dynamic property-list
+///
+/// property-list:
+/// identifier
+/// property-list ',' identifier
+///
+Decl *Parser::ParseObjCPropertyDynamic(SourceLocation atLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_dynamic) &&
+ "ParseObjCPropertyDynamic(): Expected '@dynamic'");
+ ConsumeToken(); // consume dynamic
+ while (true) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCPropertyDefinition(getCurScope());
+ cutOffParsing();
+ return 0;
+ }
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::semi);
+ return 0;
+ }
+
+ IdentifierInfo *propertyId = Tok.getIdentifierInfo();
+ SourceLocation propertyLoc = ConsumeToken(); // consume property name
+ Actions.ActOnPropertyImplDecl(getCurScope(), atLoc, propertyLoc, false,
+ propertyId, 0, SourceLocation());
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // consume ','
+ }
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@dynamic");
+ return 0;
+}
+
+/// objc-throw-statement:
+/// throw expression[opt];
+///
+StmtResult Parser::ParseObjCThrowStmt(SourceLocation atLoc) {
+ ExprResult Res;
+ ConsumeToken(); // consume throw
+ if (Tok.isNot(tok::semi)) {
+ Res = ParseExpression();
+ if (Res.isInvalid()) {
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+ }
+ // consume ';'
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after, "@throw");
+ return Actions.ActOnObjCAtThrowStmt(atLoc, Res.take(), getCurScope());
+}
+
+/// objc-synchronized-statement:
+/// @synchronized '(' expression ')' compound-statement
+///
+StmtResult
+Parser::ParseObjCSynchronizedStmt(SourceLocation atLoc) {
+ ConsumeToken(); // consume synchronized
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "@synchronized";
+ return StmtError();
+ }
+
+ // The operand is surrounded with parentheses.
+ ConsumeParen(); // '('
+ ExprResult operand(ParseExpression());
+
+ if (Tok.is(tok::r_paren)) {
+ ConsumeParen(); // ')'
+ } else {
+ if (!operand.isInvalid())
+ Diag(Tok, diag::err_expected_rparen);
+
+ // Skip forward until we see a left brace, but don't consume it.
+ SkipUntil(tok::l_brace, true, true);
+ }
+
+ // Require a compound statement.
+ if (Tok.isNot(tok::l_brace)) {
+ if (!operand.isInvalid())
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+
+ // Check the @synchronized operand now.
+ if (!operand.isInvalid())
+ operand = Actions.ActOnObjCAtSynchronizedOperand(atLoc, operand.take());
+
+ // Parse the compound statement within a new scope.
+ ParseScope bodyScope(this, Scope::DeclScope);
+ StmtResult body(ParseCompoundStatementBody());
+ bodyScope.Exit();
+
+ // If there was a semantic or parse error earlier with the
+ // operand, fail now.
+ if (operand.isInvalid())
+ return StmtError();
+
+ if (body.isInvalid())
+ body = Actions.ActOnNullStmt(Tok.getLocation());
+
+ return Actions.ActOnObjCAtSynchronizedStmt(atLoc, operand.get(), body.get());
+}
+
+/// objc-try-catch-statement:
+/// @try compound-statement objc-catch-list[opt]
+/// @try compound-statement objc-catch-list[opt] @finally compound-statement
+///
+/// objc-catch-list:
+/// @catch ( parameter-declaration ) compound-statement
+/// objc-catch-list @catch ( catch-parameter-declaration ) compound-statement
+/// catch-parameter-declaration:
+/// parameter-declaration
+/// '...' [OBJC2]
+///
+StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
+ bool catch_or_finally_seen = false;
+
+ ConsumeToken(); // consume try
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+ StmtVector CatchStmts(Actions);
+ StmtResult FinallyStmt;
+ ParseScope TryScope(this, Scope::DeclScope);
+ StmtResult TryBody(ParseCompoundStatementBody());
+ TryScope.Exit();
+ if (TryBody.isInvalid())
+ TryBody = Actions.ActOnNullStmt(Tok.getLocation());
+
+ while (Tok.is(tok::at)) {
+ // At this point, we need to lookahead to determine if this @ is the start
+ // of an @catch or @finally. We don't want to consume the @ token if this
+ // is an @try or @encode or something else.
+ Token AfterAt = GetLookAheadToken(1);
+ if (!AfterAt.isObjCAtKeyword(tok::objc_catch) &&
+ !AfterAt.isObjCAtKeyword(tok::objc_finally))
+ break;
+
+ SourceLocation AtCatchFinallyLoc = ConsumeToken();
+ if (Tok.isObjCAtKeyword(tok::objc_catch)) {
+ Decl *FirstPart = 0;
+ ConsumeToken(); // consume catch
+ if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ ParseScope CatchScope(this, Scope::DeclScope|Scope::AtCatchScope);
+ if (Tok.isNot(tok::ellipsis)) {
+ DeclSpec DS(AttrFactory);
+ ParseDeclarationSpecifiers(DS);
+ Declarator ParmDecl(DS, Declarator::ObjCCatchContext);
+ ParseDeclarator(ParmDecl);
+
+ // Inform the actions module about the declarator, so it
+ // gets added to the current scope.
+ FirstPart = Actions.ActOnObjCExceptionDecl(getCurScope(), ParmDecl);
+ } else
+ ConsumeToken(); // consume '...'
+
+ SourceLocation RParenLoc;
+
+ if (Tok.is(tok::r_paren))
+ RParenLoc = ConsumeParen();
+ else // Skip over garbage, until we get to ')'. Eat the ')'.
+ SkipUntil(tok::r_paren, true, false);
+
+ StmtResult CatchBody(true);
+ if (Tok.is(tok::l_brace))
+ CatchBody = ParseCompoundStatementBody();
+ else
+ Diag(Tok, diag::err_expected_lbrace);
+ if (CatchBody.isInvalid())
+ CatchBody = Actions.ActOnNullStmt(Tok.getLocation());
+
+ StmtResult Catch = Actions.ActOnObjCAtCatchStmt(AtCatchFinallyLoc,
+ RParenLoc,
+ FirstPart,
+ CatchBody.take());
+ if (!Catch.isInvalid())
+ CatchStmts.push_back(Catch.release());
+
+ } else {
+ Diag(AtCatchFinallyLoc, diag::err_expected_lparen_after)
+ << "@catch clause";
+ return StmtError();
+ }
+ catch_or_finally_seen = true;
+ } else {
+ assert(Tok.isObjCAtKeyword(tok::objc_finally) && "Lookahead confused?");
+ ConsumeToken(); // consume finally
+ ParseScope FinallyScope(this, Scope::DeclScope);
+
+ StmtResult FinallyBody(true);
+ if (Tok.is(tok::l_brace))
+ FinallyBody = ParseCompoundStatementBody();
+ else
+ Diag(Tok, diag::err_expected_lbrace);
+ if (FinallyBody.isInvalid())
+ FinallyBody = Actions.ActOnNullStmt(Tok.getLocation());
+ FinallyStmt = Actions.ActOnObjCAtFinallyStmt(AtCatchFinallyLoc,
+ FinallyBody.take());
+ catch_or_finally_seen = true;
+ break;
+ }
+ }
+ if (!catch_or_finally_seen) {
+ Diag(atLoc, diag::err_missing_catch_finally);
+ return StmtError();
+ }
+
+ return Actions.ActOnObjCAtTryStmt(atLoc, TryBody.take(),
+ move_arg(CatchStmts),
+ FinallyStmt.take());
+}
+
+/// objc-autoreleasepool-statement:
+/// @autoreleasepool compound-statement
+///
+StmtResult
+Parser::ParseObjCAutoreleasePoolStmt(SourceLocation atLoc) {
+ ConsumeToken(); // consume autoreleasepool
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+ // Enter a scope to hold everything within the compound stmt. Compound
+ // statements can always hold declarations.
+ ParseScope BodyScope(this, Scope::DeclScope);
+
+ StmtResult AutoreleasePoolBody(ParseCompoundStatementBody());
+
+ BodyScope.Exit();
+ if (AutoreleasePoolBody.isInvalid())
+ AutoreleasePoolBody = Actions.ActOnNullStmt(Tok.getLocation());
+ return Actions.ActOnObjCAutoreleasePoolStmt(atLoc,
+ AutoreleasePoolBody.take());
+}
+
+/// objc-method-def: objc-method-proto ';'[opt] '{' body '}'
+///
+Decl *Parser::ParseObjCMethodDefinition() {
+ Decl *MDecl = ParseObjCMethodPrototype();
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, MDecl, Tok.getLocation(),
+ "parsing Objective-C method");
+
+ // parse optional ';'
+ if (Tok.is(tok::semi)) {
+ if (CurParsedObjCImpl) {
+ Diag(Tok, diag::warn_semicolon_before_method_body)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ }
+ ConsumeToken();
+ }
+
+ // We should have an opening brace now.
+ if (Tok.isNot(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_method_body);
+
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ SkipUntil(tok::l_brace, true, true);
+
+ // If we didn't find the '{', bail out.
+ if (Tok.isNot(tok::l_brace))
+ return 0;
+ }
+
+ if (!MDecl) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/false);
+ return 0;
+ }
+
+ // Allow the rest of sema to find private method decl implementations.
+ Actions.AddAnyMethodToGlobalPool(MDecl);
+
+ if (CurParsedObjCImpl) {
+ // Consume the tokens and store them for later parsing.
+ LexedMethod* LM = new LexedMethod(this, MDecl);
+ CurParsedObjCImpl->LateParsedObjCMethods.push_back(LM);
+ CachedTokens &Toks = LM->Toks;
+ // Begin by storing the '{' token.
+ Toks.push_back(Tok);
+ ConsumeBrace();
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+
+ } else {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/false);
+ }
+
+ return MDecl;
+}
+
+StmtResult Parser::ParseObjCAtStatement(SourceLocation AtLoc) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCAtStatement(getCurScope());
+ cutOffParsing();
+ return StmtError();
+ }
+
+ if (Tok.isObjCAtKeyword(tok::objc_try))
+ return ParseObjCTryStmt(AtLoc);
+
+ if (Tok.isObjCAtKeyword(tok::objc_throw))
+ return ParseObjCThrowStmt(AtLoc);
+
+ if (Tok.isObjCAtKeyword(tok::objc_synchronized))
+ return ParseObjCSynchronizedStmt(AtLoc);
+
+ if (Tok.isObjCAtKeyword(tok::objc_autoreleasepool))
+ return ParseObjCAutoreleasePoolStmt(AtLoc);
+
+ ExprResult Res(ParseExpressionWithLeadingAt(AtLoc));
+ if (Res.isInvalid()) {
+ // If the expression is invalid, skip ahead to the next semicolon. Not
+ // doing this opens us up to the possibility of infinite loops if
+ // ParseExpression does not consume any tokens.
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ // Otherwise, eat the semicolon.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+ return Actions.ActOnExprStmt(Actions.MakeFullExpr(Res.take()));
+}
+
+ExprResult Parser::ParseObjCAtExpression(SourceLocation AtLoc) {
+ switch (Tok.getKind()) {
+ case tok::code_completion:
+ Actions.CodeCompleteObjCAtExpression(getCurScope());
+ cutOffParsing();
+ return ExprError();
+
+ case tok::minus:
+ case tok::plus: {
+ tok::TokenKind Kind = Tok.getKind();
+ SourceLocation OpLoc = ConsumeToken();
+
+ if (!Tok.is(tok::numeric_constant)) {
+ const char *Symbol = 0;
+ switch (Kind) {
+ case tok::minus: Symbol = "-"; break;
+ case tok::plus: Symbol = "+"; break;
+ default: llvm_unreachable("missing unary operator case");
+ }
+ Diag(Tok, diag::err_nsnumber_nonliteral_unary)
+ << Symbol;
+ return ExprError();
+ }
+
+ ExprResult Lit(Actions.ActOnNumericConstant(Tok));
+ if (Lit.isInvalid()) {
+ return move(Lit);
+ }
+ ConsumeToken(); // Consume the literal token.
+
+ Lit = Actions.ActOnUnaryOp(getCurScope(), OpLoc, Kind, Lit.take());
+ if (Lit.isInvalid())
+ return move(Lit);
+
+ return ParsePostfixExpressionSuffix(
+ Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
+ }
+
+ case tok::string_literal: // primary-expression: string-literal
+ case tok::wide_string_literal:
+ return ParsePostfixExpressionSuffix(ParseObjCStringLiteral(AtLoc));
+
+ case tok::char_constant:
+ return ParsePostfixExpressionSuffix(ParseObjCCharacterLiteral(AtLoc));
+
+ case tok::numeric_constant:
+ return ParsePostfixExpressionSuffix(ParseObjCNumericLiteral(AtLoc));
+
+ case tok::kw_true: // Objective-C++, etc.
+ case tok::kw___objc_yes: // c/c++/objc/objc++ __objc_yes
+ return ParsePostfixExpressionSuffix(ParseObjCBooleanLiteral(AtLoc, true));
+ case tok::kw_false: // Objective-C++, etc.
+ case tok::kw___objc_no: // c/c++/objc/objc++ __objc_no
+ return ParsePostfixExpressionSuffix(ParseObjCBooleanLiteral(AtLoc, false));
+
+ case tok::l_square:
+ // Objective-C array literal
+ return ParsePostfixExpressionSuffix(ParseObjCArrayLiteral(AtLoc));
+
+ case tok::l_brace:
+ // Objective-C dictionary literal
+ return ParsePostfixExpressionSuffix(ParseObjCDictionaryLiteral(AtLoc));
+
+ default:
+ if (Tok.getIdentifierInfo() == 0)
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at));
+
+ switch (Tok.getIdentifierInfo()->getObjCKeywordID()) {
+ case tok::objc_encode:
+ return ParsePostfixExpressionSuffix(ParseObjCEncodeExpression(AtLoc));
+ case tok::objc_protocol:
+ return ParsePostfixExpressionSuffix(ParseObjCProtocolExpression(AtLoc));
+ case tok::objc_selector:
+ return ParsePostfixExpressionSuffix(ParseObjCSelectorExpression(AtLoc));
+ default:
+ return ExprError(Diag(AtLoc, diag::err_unexpected_at));
+ }
+ }
+}
+
+/// \brirg Parse the receiver of an Objective-C++ message send.
+///
+/// This routine parses the receiver of a message send in
+/// Objective-C++ either as a type or as an expression. Note that this
+/// routine must not be called to parse a send to 'super', since it
+/// has no way to return such a result.
+///
+/// \param IsExpr Whether the receiver was parsed as an expression.
+///
+/// \param TypeOrExpr If the receiver was parsed as an expression (\c
+/// IsExpr is true), the parsed expression. If the receiver was parsed
+/// as a type (\c IsExpr is false), the parsed type.
+///
+/// \returns True if an error occurred during parsing or semantic
+/// analysis, in which case the arguments do not have valid
+/// values. Otherwise, returns false for a successful parse.
+///
+/// objc-receiver: [C++]
+/// 'super' [not parsed here]
+/// expression
+/// simple-type-specifier
+/// typename-specifier
+bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
+ if (Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
+ Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope))
+ TryAnnotateTypeOrScopeToken();
+
+ if (!isCXXSimpleTypeSpecifier()) {
+ // objc-receiver:
+ // expression
+ ExprResult Receiver = ParseExpression();
+ if (Receiver.isInvalid())
+ return true;
+
+ IsExpr = true;
+ TypeOrExpr = Receiver.take();
+ return false;
+ }
+
+ // objc-receiver:
+ // typename-specifier
+ // simple-type-specifier
+ // expression (that starts with one of the above)
+ DeclSpec DS(AttrFactory);
+ ParseCXXSimpleTypeSpecifier(DS);
+
+ if (Tok.is(tok::l_paren)) {
+ // If we see an opening parentheses at this point, we are
+ // actually parsing an expression that starts with a
+ // function-style cast, e.g.,
+ //
+ // postfix-expression:
+ // simple-type-specifier ( expression-list [opt] )
+ // typename-specifier ( expression-list [opt] )
+ //
+ // Parse the remainder of this case, then the (optional)
+ // postfix-expression suffix, followed by the (optional)
+ // right-hand side of the binary expression. We have an
+ // instance method.
+ ExprResult Receiver = ParseCXXTypeConstructExpression(DS);
+ if (!Receiver.isInvalid())
+ Receiver = ParsePostfixExpressionSuffix(Receiver.take());
+ if (!Receiver.isInvalid())
+ Receiver = ParseRHSOfBinaryExpression(Receiver.take(), prec::Comma);
+ if (Receiver.isInvalid())
+ return true;
+
+ IsExpr = true;
+ TypeOrExpr = Receiver.take();
+ return false;
+ }
+
+ // We have a class message. Turn the simple-type-specifier or
+ // typename-specifier we parsed into a type and parse the
+ // remainder of the class message.
+ Declarator DeclaratorInfo(DS, Declarator::TypeNameContext);
+ TypeResult Type = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
+ if (Type.isInvalid())
+ return true;
+
+ IsExpr = false;
+ TypeOrExpr = Type.get().getAsOpaquePtr();
+ return false;
+}
+
+/// \brief Determine whether the parser is currently referring to a an
+/// Objective-C message send, using a simplified heuristic to avoid overhead.
+///
+/// This routine will only return true for a subset of valid message-send
+/// expressions.
+bool Parser::isSimpleObjCMessageExpression() {
+ assert(Tok.is(tok::l_square) && getLangOpts().ObjC1 &&
+ "Incorrect start for isSimpleObjCMessageExpression");
+ return GetLookAheadToken(1).is(tok::identifier) &&
+ GetLookAheadToken(2).is(tok::identifier);
+}
+
+bool Parser::isStartOfObjCClassMessageMissingOpenBracket() {
+ if (!getLangOpts().ObjC1 || !NextToken().is(tok::identifier) ||
+ InMessageExpression)
+ return false;
+
+
+ ParsedType Type;
+
+ if (Tok.is(tok::annot_typename))
+ Type = getTypeAnnotation(Tok);
+ else if (Tok.is(tok::identifier))
+ Type = Actions.getTypeName(*Tok.getIdentifierInfo(), Tok.getLocation(),
+ getCurScope());
+ else
+ return false;
+
+ if (!Type.get().isNull() && Type.get()->isObjCObjectOrInterfaceType()) {
+ const Token &AfterNext = GetLookAheadToken(2);
+ if (AfterNext.is(tok::colon) || AfterNext.is(tok::r_square)) {
+ if (Tok.is(tok::identifier))
+ TryAnnotateTypeOrScopeToken();
+
+ return Tok.is(tok::annot_typename);
+ }
+ }
+
+ return false;
+}
+
+/// objc-message-expr:
+/// '[' objc-receiver objc-message-args ']'
+///
+/// objc-receiver: [C]
+/// 'super'
+/// expression
+/// class-name
+/// type-name
+///
+ExprResult Parser::ParseObjCMessageExpression() {
+ assert(Tok.is(tok::l_square) && "'[' expected");
+ SourceLocation LBracLoc = ConsumeBracket(); // consume '['
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCMessageReceiver(getCurScope());
+ cutOffParsing();
+ return ExprError();
+ }
+
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
+ if (getLangOpts().CPlusPlus) {
+ // We completely separate the C and C++ cases because C++ requires
+ // more complicated (read: slower) parsing.
+
+ // Handle send to super.
+ // FIXME: This doesn't benefit from the same typo-correction we
+ // get in Objective-C.
+ if (Tok.is(tok::identifier) && Tok.getIdentifierInfo() == Ident_super &&
+ NextToken().isNot(tok::period) && getCurScope()->isInObjcMethodScope())
+ return ParseObjCMessageExpressionBody(LBracLoc, ConsumeToken(),
+ ParsedType(), 0);
+
+ // Parse the receiver, which is either a type or an expression.
+ bool IsExpr;
+ void *TypeOrExpr = NULL;
+ if (ParseObjCXXMessageReceiver(IsExpr, TypeOrExpr)) {
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ if (IsExpr)
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ ParsedType(),
+ static_cast<Expr*>(TypeOrExpr));
+
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ ParsedType::getFromOpaquePtr(TypeOrExpr),
+ 0);
+ }
+
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+ ParsedType ReceiverType;
+ switch (Actions.getObjCMessageKind(getCurScope(), Name, NameLoc,
+ Name == Ident_super,
+ NextToken().is(tok::period),
+ ReceiverType)) {
+ case Sema::ObjCSuperMessage:
+ return ParseObjCMessageExpressionBody(LBracLoc, ConsumeToken(),
+ ParsedType(), 0);
+
+ case Sema::ObjCClassMessage:
+ if (!ReceiverType) {
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ ConsumeToken(); // the type name
+
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ ReceiverType, 0);
+
+ case Sema::ObjCInstanceMessage:
+ // Fall through to parse an expression.
+ break;
+ }
+ }
+
+ // Otherwise, an arbitrary expression can be the receiver of a send.
+ ExprResult Res(ParseExpression());
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_square);
+ return move(Res);
+ }
+
+ return ParseObjCMessageExpressionBody(LBracLoc, SourceLocation(),
+ ParsedType(), Res.take());
+}
+
+/// \brief Parse the remainder of an Objective-C message following the
+/// '[' objc-receiver.
+///
+/// This routine handles sends to super, class messages (sent to a
+/// class name), and instance messages (sent to an object), and the
+/// target is represented by \p SuperLoc, \p ReceiverType, or \p
+/// ReceiverExpr, respectively. Only one of these parameters may have
+/// a valid value.
+///
+/// \param LBracLoc The location of the opening '['.
+///
+/// \param SuperLoc If this is a send to 'super', the location of the
+/// 'super' keyword that indicates a send to the superclass.
+///
+/// \param ReceiverType If this is a class message, the type of the
+/// class we are sending a message to.
+///
+/// \param ReceiverExpr If this is an instance message, the expression
+/// used to compute the receiver object.
+///
+/// objc-message-args:
+/// objc-selector
+/// objc-keywordarg-list
+///
+/// objc-keywordarg-list:
+/// objc-keywordarg
+/// objc-keywordarg-list objc-keywordarg
+///
+/// objc-keywordarg:
+/// selector-name[opt] ':' objc-keywordexpr
+///
+/// objc-keywordexpr:
+/// nonempty-expr-list
+///
+/// nonempty-expr-list:
+/// assignment-expression
+/// nonempty-expr-list , assignment-expression
+///
+ExprResult
+Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
+ SourceLocation SuperLoc,
+ ParsedType ReceiverType,
+ ExprArg ReceiverExpr) {
+ InMessageExpressionRAIIObject InMessage(*this, true);
+
+ if (Tok.is(tok::code_completion)) {
+ if (SuperLoc.isValid())
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc, 0, 0,
+ false);
+ else if (ReceiverType)
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType, 0, 0,
+ false);
+ else
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
+ 0, 0, false);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Parse objc-selector
+ SourceLocation Loc;
+ IdentifierInfo *selIdent = ParseObjCSelectorPiece(Loc);
+
+ SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SmallVector<SourceLocation, 12> KeyLocs;
+ ExprVector KeyExprs(Actions);
+
+ if (Tok.is(tok::colon)) {
+ while (1) {
+ // Each iteration parses a single keyword argument.
+ KeyIdents.push_back(selIdent);
+ KeyLocs.push_back(Loc);
+
+ if (Tok.isNot(tok::colon)) {
+ Diag(Tok, diag::err_expected_colon);
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ ConsumeToken(); // Eat the ':'.
+ /// Parse the expression after ':'
+
+ if (Tok.is(tok::code_completion)) {
+ if (SuperLoc.isValid())
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/true);
+ else if (ReceiverType)
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/true);
+ else
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/true);
+
+ cutOffParsing();
+ return ExprError();
+ }
+
+ ExprResult Res(ParseAssignmentExpression());
+ if (Res.isInvalid()) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return move(Res);
+ }
+
+ // We have a valid expression.
+ KeyExprs.push_back(Res.release());
+
+ // Code completion after each argument.
+ if (Tok.is(tok::code_completion)) {
+ if (SuperLoc.isValid())
+ Actions.CodeCompleteObjCSuperMessage(getCurScope(), SuperLoc,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/false);
+ else if (ReceiverType)
+ Actions.CodeCompleteObjCClassMessage(getCurScope(), ReceiverType,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/false);
+ else
+ Actions.CodeCompleteObjCInstanceMessage(getCurScope(), ReceiverExpr,
+ KeyIdents.data(),
+ KeyIdents.size(),
+ /*AtArgumentEpression=*/false);
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Check for another keyword selector.
+ selIdent = ParseObjCSelectorPiece(Loc);
+ if (!selIdent && Tok.isNot(tok::colon))
+ break;
+ // We have a selector or a colon, continue parsing.
+ }
+ // Parse the, optional, argument list, comma separated.
+ while (Tok.is(tok::comma)) {
+ ConsumeToken(); // Eat the ','.
+ /// Parse the expression after ','
+ ExprResult Res(ParseAssignmentExpression());
+ if (Res.isInvalid()) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return move(Res);
+ }
+
+ // We have a valid expression.
+ KeyExprs.push_back(Res.release());
+ }
+ } else if (!selIdent) {
+ Diag(Tok, diag::err_expected_ident); // missing selector name.
+
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ if (Tok.isNot(tok::r_square)) {
+ if (Tok.is(tok::identifier))
+ Diag(Tok, diag::err_expected_colon);
+ else
+ Diag(Tok, diag::err_expected_rsquare);
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return ExprError();
+ }
+
+ SourceLocation RBracLoc = ConsumeBracket(); // consume ']'
+
+ unsigned nKeys = KeyIdents.size();
+ if (nKeys == 0) {
+ KeyIdents.push_back(selIdent);
+ KeyLocs.push_back(Loc);
+ }
+ Selector Sel = PP.getSelectorTable().getSelector(nKeys, &KeyIdents[0]);
+
+ if (SuperLoc.isValid())
+ return Actions.ActOnSuperMessage(getCurScope(), SuperLoc, Sel,
+ LBracLoc, KeyLocs, RBracLoc,
+ MultiExprArg(Actions,
+ KeyExprs.take(),
+ KeyExprs.size()));
+ else if (ReceiverType)
+ return Actions.ActOnClassMessage(getCurScope(), ReceiverType, Sel,
+ LBracLoc, KeyLocs, RBracLoc,
+ MultiExprArg(Actions,
+ KeyExprs.take(),
+ KeyExprs.size()));
+ return Actions.ActOnInstanceMessage(getCurScope(), ReceiverExpr, Sel,
+ LBracLoc, KeyLocs, RBracLoc,
+ MultiExprArg(Actions,
+ KeyExprs.take(),
+ KeyExprs.size()));
+}
+
+ExprResult Parser::ParseObjCStringLiteral(SourceLocation AtLoc) {
+ ExprResult Res(ParseStringLiteralExpression());
+ if (Res.isInvalid()) return move(Res);
+
+ // @"foo" @"bar" is a valid concatenated string. Eat any subsequent string
+ // expressions. At this point, we know that the only valid thing that starts
+ // with '@' is an @"".
+ SmallVector<SourceLocation, 4> AtLocs;
+ ExprVector AtStrings(Actions);
+ AtLocs.push_back(AtLoc);
+ AtStrings.push_back(Res.release());
+
+ while (Tok.is(tok::at)) {
+ AtLocs.push_back(ConsumeToken()); // eat the @.
+
+ // Invalid unless there is a string literal.
+ if (!isTokenStringLiteral())
+ return ExprError(Diag(Tok, diag::err_objc_concat_string));
+
+ ExprResult Lit(ParseStringLiteralExpression());
+ if (Lit.isInvalid())
+ return move(Lit);
+
+ AtStrings.push_back(Lit.release());
+ }
+
+ return Owned(Actions.ParseObjCStringLiteral(&AtLocs[0], AtStrings.take(),
+ AtStrings.size()));
+}
+
+/// ParseObjCBooleanLiteral -
+/// objc-scalar-literal : '@' boolean-keyword
+/// ;
+/// boolean-keyword: 'true' | 'false' | '__objc_yes' | '__objc_no'
+/// ;
+ExprResult Parser::ParseObjCBooleanLiteral(SourceLocation AtLoc,
+ bool ArgValue) {
+ SourceLocation EndLoc = ConsumeToken(); // consume the keyword.
+ return Actions.ActOnObjCBoolLiteral(AtLoc, EndLoc, ArgValue);
+}
+
+/// ParseObjCCharacterLiteral -
+/// objc-scalar-literal : '@' character-literal
+/// ;
+ExprResult Parser::ParseObjCCharacterLiteral(SourceLocation AtLoc) {
+ ExprResult Lit(Actions.ActOnCharacterConstant(Tok));
+ if (Lit.isInvalid()) {
+ return move(Lit);
+ }
+ ConsumeToken(); // Consume the literal token.
+ return Owned(Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
+}
+
+/// ParseObjCNumericLiteral -
+/// objc-scalar-literal : '@' scalar-literal
+/// ;
+/// scalar-literal : | numeric-constant /* any numeric constant. */
+/// ;
+ExprResult Parser::ParseObjCNumericLiteral(SourceLocation AtLoc) {
+ ExprResult Lit(Actions.ActOnNumericConstant(Tok));
+ if (Lit.isInvalid()) {
+ return move(Lit);
+ }
+ ConsumeToken(); // Consume the literal token.
+ return Owned(Actions.BuildObjCNumericLiteral(AtLoc, Lit.take()));
+}
+
+ExprResult Parser::ParseObjCArrayLiteral(SourceLocation AtLoc) {
+ ExprVector ElementExprs(Actions); // array elements.
+ ConsumeBracket(); // consume the l_square.
+
+ while (Tok.isNot(tok::r_square)) {
+ // Parse list of array element expressions (all must be id types).
+ ExprResult Res(ParseAssignmentExpression());
+ if (Res.isInvalid()) {
+ // We must manually skip to a ']', otherwise the expression skipper will
+ // stop at the ']' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_square);
+ return move(Res);
+ }
+
+ // Parse the ellipsis that indicates a pack expansion.
+ if (Tok.is(tok::ellipsis))
+ Res = Actions.ActOnPackExpansion(Res.get(), ConsumeToken());
+ if (Res.isInvalid())
+ return true;
+
+ ElementExprs.push_back(Res.release());
+
+ if (Tok.is(tok::comma))
+ ConsumeToken(); // Eat the ','.
+ else if (Tok.isNot(tok::r_square))
+ return ExprError(Diag(Tok, diag::err_expected_rsquare_or_comma));
+ }
+ SourceLocation EndLoc = ConsumeBracket(); // location of ']'
+ MultiExprArg Args(Actions, ElementExprs.take(), ElementExprs.size());
+ return Owned(Actions.BuildObjCArrayLiteral(SourceRange(AtLoc, EndLoc), Args));
+}
+
+ExprResult Parser::ParseObjCDictionaryLiteral(SourceLocation AtLoc) {
+ SmallVector<ObjCDictionaryElement, 4> Elements; // dictionary elements.
+ ConsumeBrace(); // consume the l_square.
+ while (Tok.isNot(tok::r_brace)) {
+ // Parse the comma separated key : value expressions.
+ ExprResult KeyExpr;
+ {
+ ColonProtectionRAIIObject X(*this);
+ KeyExpr = ParseAssignmentExpression();
+ if (KeyExpr.isInvalid()) {
+ // We must manually skip to a '}', otherwise the expression skipper will
+ // stop at the '}' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_brace);
+ return move(KeyExpr);
+ }
+ }
+
+ if (Tok.is(tok::colon)) {
+ ConsumeToken();
+ } else {
+ return ExprError(Diag(Tok, diag::err_expected_colon));
+ }
+
+ ExprResult ValueExpr(ParseAssignmentExpression());
+ if (ValueExpr.isInvalid()) {
+ // We must manually skip to a '}', otherwise the expression skipper will
+ // stop at the '}' when it skips to the ';'. We want it to skip beyond
+ // the enclosing expression.
+ SkipUntil(tok::r_brace);
+ return move(ValueExpr);
+ }
+
+ // Parse the ellipsis that designates this as a pack expansion.
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis) && getLangOpts().CPlusPlus)
+ EllipsisLoc = ConsumeToken();
+
+ // We have a valid expression. Collect it in a vector so we can
+ // build the argument list.
+ ObjCDictionaryElement Element = {
+ KeyExpr.get(), ValueExpr.get(), EllipsisLoc, llvm::Optional<unsigned>()
+ };
+ Elements.push_back(Element);
+
+ if (Tok.is(tok::comma))
+ ConsumeToken(); // Eat the ','.
+ else if (Tok.isNot(tok::r_brace))
+ return ExprError(Diag(Tok, diag::err_expected_rbrace_or_comma));
+ }
+ SourceLocation EndLoc = ConsumeBrace();
+
+ // Create the ObjCDictionaryLiteral.
+ return Owned(Actions.BuildObjCDictionaryLiteral(SourceRange(AtLoc, EndLoc),
+ Elements.data(),
+ Elements.size()));
+}
+
+/// objc-encode-expression:
+/// @encode ( type-name )
+ExprResult
+Parser::ParseObjCEncodeExpression(SourceLocation AtLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc_encode) && "Not an @encode expression!");
+
+ SourceLocation EncLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@encode");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ TypeResult Ty = ParseTypeName();
+
+ T.consumeClose();
+
+ if (Ty.isInvalid())
+ return ExprError();
+
+ return Owned(Actions.ParseObjCEncodeExpression(AtLoc, EncLoc,
+ T.getOpenLocation(), Ty.get(),
+ T.getCloseLocation()));
+}
+
+/// objc-protocol-expression
+/// @protocol ( protocol-name )
+ExprResult
+Parser::ParseObjCProtocolExpression(SourceLocation AtLoc) {
+ SourceLocation ProtoLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@protocol");
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ if (Tok.isNot(tok::identifier))
+ return ExprError(Diag(Tok, diag::err_expected_ident));
+
+ IdentifierInfo *protocolId = Tok.getIdentifierInfo();
+ ConsumeToken();
+
+ T.consumeClose();
+
+ return Owned(Actions.ParseObjCProtocolExpression(protocolId, AtLoc, ProtoLoc,
+ T.getOpenLocation(),
+ T.getCloseLocation()));
+}
+
+/// objc-selector-expression
+/// @selector '(' objc-keyword-selector ')'
+ExprResult Parser::ParseObjCSelectorExpression(SourceLocation AtLoc) {
+ SourceLocation SelectorLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren))
+ return ExprError(Diag(Tok, diag::err_expected_lparen_after) << "@selector");
+
+ SmallVector<IdentifierInfo *, 12> KeyIdents;
+ SourceLocation sLoc;
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents.data(),
+ KeyIdents.size());
+ cutOffParsing();
+ return ExprError();
+ }
+
+ IdentifierInfo *SelIdent = ParseObjCSelectorPiece(sLoc);
+ if (!SelIdent && // missing selector name.
+ Tok.isNot(tok::colon) && Tok.isNot(tok::coloncolon))
+ return ExprError(Diag(Tok, diag::err_expected_ident));
+
+ KeyIdents.push_back(SelIdent);
+ unsigned nColons = 0;
+ if (Tok.isNot(tok::r_paren)) {
+ while (1) {
+ if (Tok.is(tok::coloncolon)) { // Handle :: in C++.
+ ++nColons;
+ KeyIdents.push_back(0);
+ } else if (Tok.isNot(tok::colon))
+ return ExprError(Diag(Tok, diag::err_expected_colon));
+
+ ++nColons;
+ ConsumeToken(); // Eat the ':' or '::'.
+ if (Tok.is(tok::r_paren))
+ break;
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCSelector(getCurScope(), KeyIdents.data(),
+ KeyIdents.size());
+ cutOffParsing();
+ return ExprError();
+ }
+
+ // Check for another keyword selector.
+ SourceLocation Loc;
+ SelIdent = ParseObjCSelectorPiece(Loc);
+ KeyIdents.push_back(SelIdent);
+ if (!SelIdent && Tok.isNot(tok::colon) && Tok.isNot(tok::coloncolon))
+ break;
+ }
+ }
+ T.consumeClose();
+ Selector Sel = PP.getSelectorTable().getSelector(nColons, &KeyIdents[0]);
+ return Owned(Actions.ParseObjCSelectorExpression(Sel, AtLoc, SelectorLoc,
+ T.getOpenLocation(),
+ T.getCloseLocation()));
+ }
+
+Decl *Parser::ParseLexedObjCMethodDefs(LexedMethod &LM) {
+
+ // Save the current token position.
+ SourceLocation OrigLoc = Tok.getLocation();
+
+ assert(!LM.Toks.empty() && "ParseLexedObjCMethodDef - Empty body!");
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LM.Toks.push_back(Tok);
+ PP.EnterTokenStream(LM.Toks.data(), LM.Toks.size(), true, false);
+
+ // MDecl might be null due to error in method prototype, etc.
+ Decl *MDecl = LM.D;
+ // Consume the previously pushed token.
+ ConsumeAnyToken();
+
+ assert(Tok.is(tok::l_brace) && "Inline objective-c method not starting with '{'");
+ SourceLocation BraceLoc = Tok.getLocation();
+ // Enter a scope for the method body.
+ ParseScope BodyScope(this,
+ Scope::ObjCMethodScope|Scope::FnScope|Scope::DeclScope);
+
+ // Tell the actions module that we have entered a method definition with the
+ // specified Declarator for the method.
+ Actions.ActOnStartOfObjCMethodDef(getCurScope(), MDecl);
+
+ if (SkipFunctionBodies && trySkippingFunctionBody()) {
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(MDecl, 0);
+ }
+
+ StmtResult FnBody(ParseCompoundStatementBody());
+
+ // If the function body could not be parsed, make a bogus compoundstmt.
+ if (FnBody.isInvalid()) {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+ FnBody = Actions.ActOnCompoundStmt(BraceLoc, BraceLoc,
+ MultiStmtArg(Actions), false);
+ }
+
+ // Leave the function body scope.
+ BodyScope.Exit();
+
+ MDecl = Actions.ActOnFinishFunctionBody(MDecl, FnBody.take());
+
+ if (Tok.getLocation() != OrigLoc) {
+ // Due to parsing error, we either went over the cached tokens or
+ // there are still cached tokens left. If it's the latter case skip the
+ // leftover tokens.
+ // Since this is an uncommon situation that should be avoided, use the
+ // expensive isBeforeInTranslationUnit call.
+ if (PP.getSourceManager().isBeforeInTranslationUnit(Tok.getLocation(),
+ OrigLoc))
+ while (Tok.getLocation() != OrigLoc && Tok.isNot(tok::eof))
+ ConsumeAnyToken();
+ }
+
+ return MDecl;
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
new file mode 100644
index 0000000..eb13e0d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.cpp
@@ -0,0 +1,568 @@
+//===--- ParsePragma.cpp - Language specific pragma parsing ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the language specific #pragma handlers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ParsePragma.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+/// \brief Handle the annotation token produced for #pragma unused(...)
+///
+/// Each annot_pragma_unused is followed by the argument token so e.g.
+/// "#pragma unused(x,y)" becomes:
+/// annot_pragma_unused 'x' annot_pragma_unused 'y'
+void Parser::HandlePragmaUnused() {
+ assert(Tok.is(tok::annot_pragma_unused));
+ SourceLocation UnusedLoc = ConsumeToken();
+ Actions.ActOnPragmaUnused(Tok, getCurScope(), UnusedLoc);
+ ConsumeToken(); // The argument token.
+}
+
+void Parser::HandlePragmaVisibility() {
+ assert(Tok.is(tok::annot_pragma_vis));
+ const IdentifierInfo *VisType =
+ static_cast<IdentifierInfo *>(Tok.getAnnotationValue());
+ SourceLocation VisLoc = ConsumeToken();
+ Actions.ActOnPragmaVisibility(VisType, VisLoc);
+}
+
+struct PragmaPackInfo {
+ Sema::PragmaPackKind Kind;
+ IdentifierInfo *Name;
+ Expr *Alignment;
+ SourceLocation LParenLoc;
+ SourceLocation RParenLoc;
+};
+
+void Parser::HandlePragmaPack() {
+ assert(Tok.is(tok::annot_pragma_pack));
+ PragmaPackInfo *Info =
+ static_cast<PragmaPackInfo *>(Tok.getAnnotationValue());
+ SourceLocation PragmaLoc = ConsumeToken();
+ Actions.ActOnPragmaPack(Info->Kind, Info->Name, Info->Alignment, PragmaLoc,
+ Info->LParenLoc, Info->RParenLoc);
+}
+
+// #pragma GCC visibility comes in two variants:
+// 'push' '(' [visibility] ')'
+// 'pop'
+void PragmaGCCVisibilityHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &VisTok) {
+ SourceLocation VisLoc = VisTok.getLocation();
+
+ Token Tok;
+ PP.LexUnexpandedToken(Tok);
+
+ const IdentifierInfo *PushPop = Tok.getIdentifierInfo();
+
+ const IdentifierInfo *VisType;
+ if (PushPop && PushPop->isStr("pop")) {
+ VisType = 0;
+ } else if (PushPop && PushPop->isStr("push")) {
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen)
+ << "visibility";
+ return;
+ }
+ PP.LexUnexpandedToken(Tok);
+ VisType = Tok.getIdentifierInfo();
+ if (!VisType) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "visibility";
+ return;
+ }
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen)
+ << "visibility";
+ return;
+ }
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "visibility";
+ return;
+ }
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "visibility";
+ return;
+ }
+
+ Token *Toks = new Token[1];
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_vis);
+ Toks[0].setLocation(VisLoc);
+ Toks[0].setAnnotationValue(
+ const_cast<void*>(static_cast<const void*>(VisType)));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/true);
+}
+
+// #pragma pack(...) comes in the following delicious flavors:
+// pack '(' [integer] ')'
+// pack '(' 'show' ')'
+// pack '(' ('push' | 'pop') [',' identifier] [, integer] ')'
+void PragmaPackHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &PackTok) {
+ SourceLocation PackLoc = PackTok.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "pack";
+ return;
+ }
+
+ Sema::PragmaPackKind Kind = Sema::PPK_Default;
+ IdentifierInfo *Name = 0;
+ ExprResult Alignment;
+ SourceLocation LParenLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.is(tok::numeric_constant)) {
+ Alignment = Actions.ActOnNumericConstant(Tok);
+ if (Alignment.isInvalid())
+ return;
+
+ PP.Lex(Tok);
+
+ // In MSVC/gcc, #pragma pack(4) sets the alignment without affecting
+ // the push/pop stack.
+ // In Apple gcc, #pragma pack(4) is equivalent to #pragma pack(push, 4)
+ if (PP.getLangOpts().ApplePragmaPack)
+ Kind = Sema::PPK_Push;
+ } else if (Tok.is(tok::identifier)) {
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("show")) {
+ Kind = Sema::PPK_Show;
+ PP.Lex(Tok);
+ } else {
+ if (II->isStr("push")) {
+ Kind = Sema::PPK_Push;
+ } else if (II->isStr("pop")) {
+ Kind = Sema::PPK_Pop;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_invalid_action);
+ return;
+ }
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::numeric_constant)) {
+ Alignment = Actions.ActOnNumericConstant(Tok);
+ if (Alignment.isInvalid())
+ return;
+
+ PP.Lex(Tok);
+ } else if (Tok.is(tok::identifier)) {
+ Name = Tok.getIdentifierInfo();
+ PP.Lex(Tok);
+
+ if (Tok.is(tok::comma)) {
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::numeric_constant)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_malformed);
+ return;
+ }
+
+ Alignment = Actions.ActOnNumericConstant(Tok);
+ if (Alignment.isInvalid())
+ return;
+
+ PP.Lex(Tok);
+ }
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_pack_malformed);
+ return;
+ }
+ }
+ }
+ } else if (PP.getLangOpts().ApplePragmaPack) {
+ // In MSVC/gcc, #pragma pack() resets the alignment without affecting
+ // the push/pop stack.
+ // In Apple gcc #pragma pack() is equivalent to #pragma pack(pop).
+ Kind = Sema::PPK_Pop;
+ }
+
+ if (Tok.isNot(tok::r_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen) << "pack";
+ return;
+ }
+
+ SourceLocation RParenLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) << "pack";
+ return;
+ }
+
+ PragmaPackInfo *Info =
+ (PragmaPackInfo*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(PragmaPackInfo), llvm::alignOf<PragmaPackInfo>());
+ new (Info) PragmaPackInfo();
+ Info->Kind = Kind;
+ Info->Name = Name;
+ Info->Alignment = Alignment.release();
+ Info->LParenLoc = LParenLoc;
+ Info->RParenLoc = RParenLoc;
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 1, llvm::alignOf<Token>());
+ new (Toks) Token();
+ Toks[0].startToken();
+ Toks[0].setKind(tok::annot_pragma_pack);
+ Toks[0].setLocation(PackLoc);
+ Toks[0].setAnnotationValue(static_cast<void*>(Info));
+ PP.EnterTokenStream(Toks, 1, /*DisableMacroExpansion=*/true,
+ /*OwnsTokens=*/false);
+}
+
+// #pragma ms_struct on
+// #pragma ms_struct off
+void PragmaMSStructHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &MSStructTok) {
+ Sema::PragmaMSStructKind Kind = Sema::PMSST_OFF;
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_ms_struct);
+ return;
+ }
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("on")) {
+ Kind = Sema::PMSST_ON;
+ PP.Lex(Tok);
+ }
+ else if (II->isStr("off") || II->isStr("reset"))
+ PP.Lex(Tok);
+ else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_ms_struct);
+ return;
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << "ms_struct";
+ return;
+ }
+ Actions.ActOnPragmaMSStruct(Kind);
+}
+
+// #pragma 'align' '=' {'native','natural','mac68k','power','reset'}
+// #pragma 'options 'align' '=' {'native','natural','mac68k','power','reset'}
+static void ParseAlignPragma(Sema &Actions, Preprocessor &PP, Token &FirstTok,
+ bool IsOptions) {
+ Token Tok;
+
+ if (IsOptions) {
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier) ||
+ !Tok.getIdentifierInfo()->isStr("align")) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_options_expected_align);
+ return;
+ }
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::equal)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_align_expected_equal)
+ << IsOptions;
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << (IsOptions ? "options" : "align");
+ return;
+ }
+
+ Sema::PragmaOptionsAlignKind Kind = Sema::POAK_Natural;
+ const IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (II->isStr("native"))
+ Kind = Sema::POAK_Native;
+ else if (II->isStr("natural"))
+ Kind = Sema::POAK_Natural;
+ else if (II->isStr("packed"))
+ Kind = Sema::POAK_Packed;
+ else if (II->isStr("power"))
+ Kind = Sema::POAK_Power;
+ else if (II->isStr("mac68k"))
+ Kind = Sema::POAK_Mac68k;
+ else if (II->isStr("reset"))
+ Kind = Sema::POAK_Reset;
+ else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_align_invalid_option)
+ << IsOptions;
+ return;
+ }
+
+ SourceLocation KindLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
+ << (IsOptions ? "options" : "align");
+ return;
+ }
+
+ Actions.ActOnPragmaOptionsAlign(Kind, FirstTok.getLocation(), KindLoc);
+}
+
+void PragmaAlignHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &AlignTok) {
+ ParseAlignPragma(Actions, PP, AlignTok, /*IsOptions=*/false);
+}
+
+void PragmaOptionsHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &OptionsTok) {
+ ParseAlignPragma(Actions, PP, OptionsTok, /*IsOptions=*/true);
+}
+
+// #pragma unused(identifier)
+void PragmaUnusedHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &UnusedTok) {
+ // FIXME: Should we be expanding macros here? My guess is no.
+ SourceLocation UnusedLoc = UnusedTok.getLocation();
+
+ // Lex the left '('.
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::l_paren)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "unused";
+ return;
+ }
+
+ // Lex the declaration reference(s).
+ SmallVector<Token, 5> Identifiers;
+ SourceLocation RParenLoc;
+ bool LexID = true;
+
+ while (true) {
+ PP.Lex(Tok);
+
+ if (LexID) {
+ if (Tok.is(tok::identifier)) {
+ Identifiers.push_back(Tok);
+ LexID = false;
+ continue;
+ }
+
+ // Illegal token!
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_unused_expected_var);
+ return;
+ }
+
+ // We are execting a ')' or a ','.
+ if (Tok.is(tok::comma)) {
+ LexID = true;
+ continue;
+ }
+
+ if (Tok.is(tok::r_paren)) {
+ RParenLoc = Tok.getLocation();
+ break;
+ }
+
+ // Illegal token!
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_unused_expected_punc);
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) <<
+ "unused";
+ return;
+ }
+
+ // Verify that we have a location for the right parenthesis.
+ assert(RParenLoc.isValid() && "Valid '#pragma unused' must have ')'");
+ assert(!Identifiers.empty() && "Valid '#pragma unused' must have arguments");
+
+ // For each identifier token, insert into the token stream a
+ // annot_pragma_unused token followed by the identifier token.
+ // This allows us to cache a "#pragma unused" that occurs inside an inline
+ // C++ member function.
+
+ Token *Toks =
+ (Token*) PP.getPreprocessorAllocator().Allocate(
+ sizeof(Token) * 2 * Identifiers.size(), llvm::alignOf<Token>());
+ for (unsigned i=0; i != Identifiers.size(); i++) {
+ Token &pragmaUnusedTok = Toks[2*i], &idTok = Toks[2*i+1];
+ pragmaUnusedTok.startToken();
+ pragmaUnusedTok.setKind(tok::annot_pragma_unused);
+ pragmaUnusedTok.setLocation(UnusedLoc);
+ idTok = Identifiers[i];
+ }
+ PP.EnterTokenStream(Toks, 2*Identifiers.size(),
+ /*DisableMacroExpansion=*/true, /*OwnsTokens=*/false);
+}
+
+// #pragma weak identifier
+// #pragma weak identifier '=' identifier
+void PragmaWeakHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &WeakTok) {
+ // FIXME: Should we be expanding macros here? My guess is no.
+ SourceLocation WeakLoc = WeakTok.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) << "weak";
+ return;
+ }
+
+ IdentifierInfo *WeakName = Tok.getIdentifierInfo(), *AliasName = 0;
+ SourceLocation WeakNameLoc = Tok.getLocation(), AliasNameLoc;
+
+ PP.Lex(Tok);
+ if (Tok.is(tok::equal)) {
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "weak";
+ return;
+ }
+ AliasName = Tok.getIdentifierInfo();
+ AliasNameLoc = Tok.getLocation();
+ PP.Lex(Tok);
+ }
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) << "weak";
+ return;
+ }
+
+ if (AliasName) {
+ Actions.ActOnPragmaWeakAlias(WeakName, AliasName, WeakLoc, WeakNameLoc,
+ AliasNameLoc);
+ } else {
+ Actions.ActOnPragmaWeakID(WeakName, WeakLoc, WeakNameLoc);
+ }
+}
+
+// #pragma redefine_extname identifier identifier
+void PragmaRedefineExtnameHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &RedefToken) {
+ SourceLocation RedefLoc = RedefToken.getLocation();
+
+ Token Tok;
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) <<
+ "redefine_extname";
+ return;
+ }
+
+ IdentifierInfo *RedefName = Tok.getIdentifierInfo(), *AliasName = 0;
+ SourceLocation RedefNameLoc = Tok.getLocation(), AliasNameLoc;
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << "redefine_extname";
+ return;
+ }
+ AliasName = Tok.getIdentifierInfo();
+ AliasNameLoc = Tok.getLocation();
+ PP.Lex(Tok);
+
+ if (Tok.isNot(tok::eod)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol) <<
+ "redefine_extname";
+ return;
+ }
+
+ Actions.ActOnPragmaRedefineExtname(RedefName, AliasName, RedefLoc,
+ RedefNameLoc, AliasNameLoc);
+}
+
+
+void
+PragmaFPContractHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ tok::OnOffSwitch OOS;
+ if (PP.LexOnOffSwitch(OOS))
+ return;
+
+ Actions.ActOnPragmaFPContract(OOS);
+}
+
+void
+PragmaOpenCLExtensionHandler::HandlePragma(Preprocessor &PP,
+ PragmaIntroducerKind Introducer,
+ Token &Tok) {
+ PP.LexUnexpandedToken(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier) <<
+ "OPENCL";
+ return;
+ }
+ IdentifierInfo *ename = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::colon)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_colon) << ename;
+ return;
+ }
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_enable_disable);
+ return;
+ }
+ IdentifierInfo *op = Tok.getIdentifierInfo();
+
+ unsigned state;
+ if (op->isStr("enable")) {
+ state = 1;
+ } else if (op->isStr("disable")) {
+ state = 0;
+ } else {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_enable_disable);
+ return;
+ }
+
+ OpenCLOptions &f = Actions.getOpenCLOptions();
+ // OpenCL 1.1 9.1: "The all variant sets the behavior for all extensions,
+ // overriding all previously issued extension directives, but only if the
+ // behavior is set to disable."
+ if (state == 0 && ename->isStr("all")) {
+#define OPENCLEXT(nm) f.nm = 0;
+#include "clang/Basic/OpenCLExtensions.def"
+ }
+#define OPENCLEXT(nm) else if (ename->isStr(#nm)) { f.nm = state; }
+#include "clang/Basic/OpenCLExtensions.def"
+ else {
+ PP.Diag(NameLoc, diag::warn_pragma_unknown_extension) << ename;
+ return;
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
new file mode 100644
index 0000000..ebb185a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParsePragma.h
@@ -0,0 +1,127 @@
+//===---- ParserPragmas.h - Language specific pragmas -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines #pragma handlers for language specific pragmas.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PARSE_PARSEPRAGMA_H
+#define LLVM_CLANG_PARSE_PARSEPRAGMA_H
+
+#include "clang/Lex/Pragma.h"
+
+namespace clang {
+ class Sema;
+ class Parser;
+
+class PragmaAlignHandler : public PragmaHandler {
+ Sema &Actions;
+public:
+ explicit PragmaAlignHandler(Sema &A) : PragmaHandler("align"), Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+class PragmaGCCVisibilityHandler : public PragmaHandler {
+ Sema &Actions;
+public:
+ explicit PragmaGCCVisibilityHandler(Sema &A) : PragmaHandler("visibility"),
+ Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+class PragmaOptionsHandler : public PragmaHandler {
+ Sema &Actions;
+public:
+ explicit PragmaOptionsHandler(Sema &A) : PragmaHandler("options"),
+ Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+class PragmaPackHandler : public PragmaHandler {
+ Sema &Actions;
+public:
+ explicit PragmaPackHandler(Sema &A) : PragmaHandler("pack"),
+ Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+class PragmaMSStructHandler : public PragmaHandler {
+ Sema &Actions;
+public:
+ explicit PragmaMSStructHandler(Sema &A) : PragmaHandler("ms_struct"),
+ Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+class PragmaUnusedHandler : public PragmaHandler {
+ Sema &Actions;
+ Parser &parser;
+public:
+ PragmaUnusedHandler(Sema &A, Parser& p)
+ : PragmaHandler("unused"), Actions(A), parser(p) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+class PragmaWeakHandler : public PragmaHandler {
+ Sema &Actions;
+public:
+ explicit PragmaWeakHandler(Sema &A)
+ : PragmaHandler("weak"), Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+class PragmaRedefineExtnameHandler : public PragmaHandler {
+ Sema &Actions;
+public:
+ explicit PragmaRedefineExtnameHandler(Sema &A)
+ : PragmaHandler("redefine_extname"), Actions(A) {}
+
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+class PragmaOpenCLExtensionHandler : public PragmaHandler {
+ Sema &Actions;
+ Parser &parser;
+public:
+ PragmaOpenCLExtensionHandler(Sema &S, Parser& p) :
+ PragmaHandler("EXTENSION"), Actions(S), parser(p) {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+
+class PragmaFPContractHandler : public PragmaHandler {
+ Sema &Actions;
+ Parser &parser;
+public:
+ PragmaFPContractHandler(Sema &S, Parser& p) :
+ PragmaHandler("FP_CONTRACT"), Actions(S), parser(p) {}
+ virtual void HandlePragma(Preprocessor &PP, PragmaIntroducerKind Introducer,
+ Token &FirstToken);
+};
+
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
new file mode 100644
index 0000000..fdb9788
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseStmt.cpp
@@ -0,0 +1,2235 @@
+//===--- ParseStmt.cpp - Statement and Block Parser -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Statement and Block portions of the Parser
+// interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "clang/Basic/SourceManager.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// C99 6.8: Statements and Blocks.
+//===----------------------------------------------------------------------===//
+
+/// ParseStatementOrDeclaration - Read 'statement' or 'declaration'.
+/// StatementOrDeclaration:
+/// statement
+/// declaration
+///
+/// statement:
+/// labeled-statement
+/// compound-statement
+/// expression-statement
+/// selection-statement
+/// iteration-statement
+/// jump-statement
+/// [C++] declaration-statement
+/// [C++] try-block
+/// [MS] seh-try-block
+/// [OBC] objc-throw-statement
+/// [OBC] objc-try-catch-statement
+/// [OBC] objc-synchronized-statement
+/// [GNU] asm-statement
+/// [OMP] openmp-construct [TODO]
+///
+/// labeled-statement:
+/// identifier ':' statement
+/// 'case' constant-expression ':' statement
+/// 'default' ':' statement
+///
+/// selection-statement:
+/// if-statement
+/// switch-statement
+///
+/// iteration-statement:
+/// while-statement
+/// do-statement
+/// for-statement
+///
+/// expression-statement:
+/// expression[opt] ';'
+///
+/// jump-statement:
+/// 'goto' identifier ';'
+/// 'continue' ';'
+/// 'break' ';'
+/// 'return' expression[opt] ';'
+/// [GNU] 'goto' '*' expression ';'
+///
+/// [OBC] objc-throw-statement:
+/// [OBC] '@' 'throw' expression ';'
+/// [OBC] '@' 'throw' ';'
+///
+StmtResult
+Parser::ParseStatementOrDeclaration(StmtVector &Stmts, bool OnlyStatement,
+ SourceLocation *TrailingElseLoc) {
+ const char *SemiError = 0;
+ StmtResult Res;
+
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs, 0, /*MightBeObjCMessageSend*/ true);
+
+ // Cases in this switch statement should fall through if the parser expects
+ // the token to end in a semicolon (in which case SemiError should be set),
+ // or they directly 'return;' if not.
+Retry:
+ tok::TokenKind Kind = Tok.getKind();
+ SourceLocation AtLoc;
+ switch (Kind) {
+ case tok::at: // May be a @try or @throw statement
+ {
+ AtLoc = ConsumeToken(); // consume @
+ return ParseObjCAtStatement(AtLoc);
+ }
+
+ case tok::code_completion:
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Statement);
+ cutOffParsing();
+ return StmtError();
+
+ case tok::identifier: {
+ Token Next = NextToken();
+ if (Next.is(tok::colon)) { // C99 6.8.1: labeled-statement
+ // identifier ':' statement
+ return ParseLabeledStatement(attrs);
+ }
+
+ if (Next.isNot(tok::coloncolon)) {
+ CXXScopeSpec SS;
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
+
+ if (getLangOpts().CPlusPlus)
+ CheckForTemplateAndDigraph(Next, ParsedType(),
+ /*EnteringContext=*/false, *Name, SS);
+
+ Sema::NameClassification Classification
+ = Actions.ClassifyName(getCurScope(), SS, Name, NameLoc, Next);
+ switch (Classification.getKind()) {
+ case Sema::NC_Keyword:
+ // The identifier was corrected to a keyword. Update the token
+ // to this keyword, and try again.
+ if (Name->getTokenID() != tok::identifier) {
+ Tok.setIdentifierInfo(Name);
+ Tok.setKind(Name->getTokenID());
+ goto Retry;
+ }
+
+ // Fall through via the normal error path.
+ // FIXME: This seems like it could only happen for context-sensitive
+ // keywords.
+
+ case Sema::NC_Error:
+ // Handle errors here by skipping up to the next semicolon or '}', and
+ // eat the semicolon if that's what stopped us.
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return StmtError();
+
+ case Sema::NC_Unknown:
+ // Either we don't know anything about this identifier, or we know that
+ // we're in a syntactic context we haven't handled yet.
+ break;
+
+ case Sema::NC_Type:
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Classification.getType());
+ Tok.setAnnotationEndLoc(NameLoc);
+ PP.AnnotateCachedTokens(Tok);
+ break;
+
+ case Sema::NC_Expression:
+ Tok.setKind(tok::annot_primary_expr);
+ setExprAnnotation(Tok, Classification.getExpression());
+ Tok.setAnnotationEndLoc(NameLoc);
+ PP.AnnotateCachedTokens(Tok);
+ break;
+
+ case Sema::NC_TypeTemplate:
+ case Sema::NC_FunctionTemplate: {
+ ConsumeToken(); // the identifier
+ UnqualifiedId Id;
+ Id.setIdentifier(Name, NameLoc);
+ if (AnnotateTemplateIdToken(
+ TemplateTy::make(Classification.getTemplateName()),
+ Classification.getTemplateNameKind(),
+ SS, SourceLocation(), Id,
+ /*AllowTypeAnnotation=*/false)) {
+ // Handle errors here by skipping up to the next semicolon or '}', and
+ // eat the semicolon if that's what stopped us.
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return StmtError();
+ }
+
+ // If the next token is '::', jump right into parsing a
+ // nested-name-specifier. We don't want to leave the template-id
+ // hanging.
+ if (NextToken().is(tok::coloncolon) && TryAnnotateCXXScopeToken(false)){
+ // Handle errors here by skipping up to the next semicolon or '}', and
+ // eat the semicolon if that's what stopped us.
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return StmtError();
+ }
+
+ // We've annotated a template-id, so try again now.
+ goto Retry;
+ }
+
+ case Sema::NC_NestedNameSpecifier:
+ // FIXME: Implement this!
+ break;
+ }
+ }
+
+ // Fall through
+ }
+
+ default: {
+ if ((getLangOpts().CPlusPlus || !OnlyStatement) && isDeclarationStatement()) {
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy Decl = ParseDeclaration(Stmts, Declarator::BlockContext,
+ DeclEnd, attrs);
+ return Actions.ActOnDeclStmt(Decl, DeclStart, DeclEnd);
+ }
+
+ if (Tok.is(tok::r_brace)) {
+ Diag(Tok, diag::err_expected_statement);
+ return StmtError();
+ }
+
+ return ParseExprStatement(attrs);
+ }
+
+ case tok::kw_case: // C99 6.8.1: labeled-statement
+ return ParseCaseStatement(attrs);
+ case tok::kw_default: // C99 6.8.1: labeled-statement
+ return ParseDefaultStatement(attrs);
+
+ case tok::l_brace: // C99 6.8.2: compound-statement
+ return ParseCompoundStatement(attrs);
+ case tok::semi: { // C99 6.8.3p3: expression[opt] ';'
+ bool HasLeadingEmptyMacro = Tok.hasLeadingEmptyMacro();
+ return Actions.ActOnNullStmt(ConsumeToken(), HasLeadingEmptyMacro);
+ }
+
+ case tok::kw_if: // C99 6.8.4.1: if-statement
+ return ParseIfStatement(attrs, TrailingElseLoc);
+ case tok::kw_switch: // C99 6.8.4.2: switch-statement
+ return ParseSwitchStatement(attrs, TrailingElseLoc);
+
+ case tok::kw_while: // C99 6.8.5.1: while-statement
+ return ParseWhileStatement(attrs, TrailingElseLoc);
+ case tok::kw_do: // C99 6.8.5.2: do-statement
+ Res = ParseDoStatement(attrs);
+ SemiError = "do/while";
+ break;
+ case tok::kw_for: // C99 6.8.5.3: for-statement
+ return ParseForStatement(attrs, TrailingElseLoc);
+
+ case tok::kw_goto: // C99 6.8.6.1: goto-statement
+ Res = ParseGotoStatement(attrs);
+ SemiError = "goto";
+ break;
+ case tok::kw_continue: // C99 6.8.6.2: continue-statement
+ Res = ParseContinueStatement(attrs);
+ SemiError = "continue";
+ break;
+ case tok::kw_break: // C99 6.8.6.3: break-statement
+ Res = ParseBreakStatement(attrs);
+ SemiError = "break";
+ break;
+ case tok::kw_return: // C99 6.8.6.4: return-statement
+ Res = ParseReturnStatement(attrs);
+ SemiError = "return";
+ break;
+
+ case tok::kw_asm: {
+ ProhibitAttributes(attrs);
+ bool msAsm = false;
+ Res = ParseAsmStatement(msAsm);
+ Res = Actions.ActOnFinishFullStmt(Res.get());
+ if (msAsm) return move(Res);
+ SemiError = "asm";
+ break;
+ }
+
+ case tok::kw_try: // C++ 15: try-block
+ return ParseCXXTryBlock(attrs);
+
+ case tok::kw___try:
+ return ParseSEHTryBlock(attrs);
+
+ case tok::annot_pragma_vis:
+ HandlePragmaVisibility();
+ return StmtEmpty();
+
+ case tok::annot_pragma_pack:
+ HandlePragmaPack();
+ return StmtEmpty();
+ }
+
+ // If we reached this code, the statement must end in a semicolon.
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else if (!Res.isInvalid()) {
+ // If the result was valid, then we do want to diagnose this. Use
+ // ExpectAndConsume to emit the diagnostic, even though we know it won't
+ // succeed.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after_stmt, SemiError);
+ // Skip until we see a } or ;, but don't eat it.
+ SkipUntil(tok::r_brace, true, true);
+ }
+
+ return move(Res);
+}
+
+/// \brief Parse an expression statement.
+StmtResult Parser::ParseExprStatement(ParsedAttributes &Attrs) {
+ // If a case keyword is missing, this is where it should be inserted.
+ Token OldToken = Tok;
+
+ // FIXME: Use the attributes
+ // expression[opt] ';'
+ ExprResult Expr(ParseExpression());
+ if (Expr.isInvalid()) {
+ // If the expression is invalid, skip ahead to the next semicolon or '}'.
+ // Not doing this opens us up to the possibility of infinite loops if
+ // ParseExpression does not consume any tokens.
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return StmtError();
+ }
+
+ if (Tok.is(tok::colon) && getCurScope()->isSwitchScope() &&
+ Actions.CheckCaseExpression(Expr.get())) {
+ // If a constant expression is followed by a colon inside a switch block,
+ // suggest a missing case keyword.
+ Diag(OldToken, diag::err_expected_case_before_expression)
+ << FixItHint::CreateInsertion(OldToken.getLocation(), "case ");
+
+ // Recover parsing as a case statement.
+ return ParseCaseStatement(Attrs, /*MissingCase=*/true, Expr);
+ }
+
+ // Otherwise, eat the semicolon.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+ return Actions.ActOnExprStmt(Actions.MakeFullExpr(Expr.get()));
+}
+
+StmtResult Parser::ParseSEHTryBlock(ParsedAttributes & Attrs) {
+ assert(Tok.is(tok::kw___try) && "Expected '__try'");
+ SourceLocation Loc = ConsumeToken();
+ return ParseSEHTryBlockCommon(Loc);
+}
+
+/// ParseSEHTryBlockCommon
+///
+/// seh-try-block:
+/// '__try' compound-statement seh-handler
+///
+/// seh-handler:
+/// seh-except-block
+/// seh-finally-block
+///
+StmtResult Parser::ParseSEHTryBlockCommon(SourceLocation TryLoc) {
+ if(Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok,diag::err_expected_lbrace));
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ StmtResult TryBlock(ParseCompoundStatement(attrs));
+ if(TryBlock.isInvalid())
+ return move(TryBlock);
+
+ StmtResult Handler;
+ if (Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == getSEHExceptKeyword()) {
+ SourceLocation Loc = ConsumeToken();
+ Handler = ParseSEHExceptBlock(Loc);
+ } else if (Tok.is(tok::kw___finally)) {
+ SourceLocation Loc = ConsumeToken();
+ Handler = ParseSEHFinallyBlock(Loc);
+ } else {
+ return StmtError(Diag(Tok,diag::err_seh_expected_handler));
+ }
+
+ if(Handler.isInvalid())
+ return move(Handler);
+
+ return Actions.ActOnSEHTryBlock(false /* IsCXXTry */,
+ TryLoc,
+ TryBlock.take(),
+ Handler.take());
+}
+
+/// ParseSEHExceptBlock - Handle __except
+///
+/// seh-except-block:
+/// '__except' '(' seh-filter-expression ')' compound-statement
+///
+StmtResult Parser::ParseSEHExceptBlock(SourceLocation ExceptLoc) {
+ PoisonIdentifierRAIIObject raii(Ident__exception_code, false),
+ raii2(Ident___exception_code, false),
+ raii3(Ident_GetExceptionCode, false);
+
+ if(ExpectAndConsume(tok::l_paren,diag::err_expected_lparen))
+ return StmtError();
+
+ ParseScope ExpectScope(this, Scope::DeclScope | Scope::ControlScope);
+
+ if (getLangOpts().Borland) {
+ Ident__exception_info->setIsPoisoned(false);
+ Ident___exception_info->setIsPoisoned(false);
+ Ident_GetExceptionInfo->setIsPoisoned(false);
+ }
+ ExprResult FilterExpr(ParseExpression());
+
+ if (getLangOpts().Borland) {
+ Ident__exception_info->setIsPoisoned(true);
+ Ident___exception_info->setIsPoisoned(true);
+ Ident_GetExceptionInfo->setIsPoisoned(true);
+ }
+
+ if(FilterExpr.isInvalid())
+ return StmtError();
+
+ if(ExpectAndConsume(tok::r_paren,diag::err_expected_rparen))
+ return StmtError();
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ StmtResult Block(ParseCompoundStatement(attrs));
+
+ if(Block.isInvalid())
+ return move(Block);
+
+ return Actions.ActOnSEHExceptBlock(ExceptLoc, FilterExpr.take(), Block.take());
+}
+
+/// ParseSEHFinallyBlock - Handle __finally
+///
+/// seh-finally-block:
+/// '__finally' compound-statement
+///
+StmtResult Parser::ParseSEHFinallyBlock(SourceLocation FinallyBlock) {
+ PoisonIdentifierRAIIObject raii(Ident__abnormal_termination, false),
+ raii2(Ident___abnormal_termination, false),
+ raii3(Ident_AbnormalTermination, false);
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ StmtResult Block(ParseCompoundStatement(attrs));
+ if(Block.isInvalid())
+ return move(Block);
+
+ return Actions.ActOnSEHFinallyBlock(FinallyBlock,Block.take());
+}
+
+/// ParseLabeledStatement - We have an identifier and a ':' after it.
+///
+/// labeled-statement:
+/// identifier ':' statement
+/// [GNU] identifier ':' attributes[opt] statement
+///
+StmtResult Parser::ParseLabeledStatement(ParsedAttributes &attrs) {
+ assert(Tok.is(tok::identifier) && Tok.getIdentifierInfo() &&
+ "Not an identifier!");
+
+ Token IdentTok = Tok; // Save the whole token.
+ ConsumeToken(); // eat the identifier.
+
+ assert(Tok.is(tok::colon) && "Not a label!");
+
+ // identifier ':' statement
+ SourceLocation ColonLoc = ConsumeToken();
+
+ // Read label attributes, if present.
+ MaybeParseGNUAttributes(attrs);
+
+ StmtResult SubStmt(ParseStatement());
+
+ // Broken substmt shouldn't prevent the label from being added to the AST.
+ if (SubStmt.isInvalid())
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
+
+ LabelDecl *LD = Actions.LookupOrCreateLabel(IdentTok.getIdentifierInfo(),
+ IdentTok.getLocation());
+ if (AttributeList *Attrs = attrs.getList())
+ Actions.ProcessDeclAttributeList(Actions.CurScope, LD, Attrs);
+
+ return Actions.ActOnLabelStmt(IdentTok.getLocation(), LD, ColonLoc,
+ SubStmt.get());
+}
+
+/// ParseCaseStatement
+/// labeled-statement:
+/// 'case' constant-expression ':' statement
+/// [GNU] 'case' constant-expression '...' constant-expression ':' statement
+///
+StmtResult Parser::ParseCaseStatement(ParsedAttributes &attrs, bool MissingCase,
+ ExprResult Expr) {
+ assert((MissingCase || Tok.is(tok::kw_case)) && "Not a case stmt!");
+ // FIXME: Use attributes?
+
+ // It is very very common for code to contain many case statements recursively
+ // nested, as in (but usually without indentation):
+ // case 1:
+ // case 2:
+ // case 3:
+ // case 4:
+ // case 5: etc.
+ //
+ // Parsing this naively works, but is both inefficient and can cause us to run
+ // out of stack space in our recursive descent parser. As a special case,
+ // flatten this recursion into an iterative loop. This is complex and gross,
+ // but all the grossness is constrained to ParseCaseStatement (and some
+ // wierdness in the actions), so this is just local grossness :).
+
+ // TopLevelCase - This is the highest level we have parsed. 'case 1' in the
+ // example above.
+ StmtResult TopLevelCase(true);
+
+ // DeepestParsedCaseStmt - This is the deepest statement we have parsed, which
+ // gets updated each time a new case is parsed, and whose body is unset so
+ // far. When parsing 'case 4', this is the 'case 3' node.
+ Stmt *DeepestParsedCaseStmt = 0;
+
+ // While we have case statements, eat and stack them.
+ SourceLocation ColonLoc;
+ do {
+ SourceLocation CaseLoc = MissingCase ? Expr.get()->getExprLoc() :
+ ConsumeToken(); // eat the 'case'.
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteCase(getCurScope());
+ cutOffParsing();
+ return StmtError();
+ }
+
+ /// We don't want to treat 'case x : y' as a potential typo for 'case x::y'.
+ /// Disable this form of error recovery while we're parsing the case
+ /// expression.
+ ColonProtectionRAIIObject ColonProtection(*this);
+
+ ExprResult LHS(MissingCase ? Expr : ParseConstantExpression());
+ MissingCase = false;
+ if (LHS.isInvalid()) {
+ SkipUntil(tok::colon);
+ return StmtError();
+ }
+
+ // GNU case range extension.
+ SourceLocation DotDotDotLoc;
+ ExprResult RHS;
+ if (Tok.is(tok::ellipsis)) {
+ Diag(Tok, diag::ext_gnu_case_range);
+ DotDotDotLoc = ConsumeToken();
+
+ RHS = ParseConstantExpression();
+ if (RHS.isInvalid()) {
+ SkipUntil(tok::colon);
+ return StmtError();
+ }
+ }
+
+ ColonProtection.restore();
+
+ if (Tok.is(tok::colon)) {
+ ColonLoc = ConsumeToken();
+
+ // Treat "case blah;" as a typo for "case blah:".
+ } else if (Tok.is(tok::semi)) {
+ ColonLoc = ConsumeToken();
+ Diag(ColonLoc, diag::err_expected_colon_after) << "'case'"
+ << FixItHint::CreateReplacement(ColonLoc, ":");
+ } else {
+ SourceLocation ExpectedLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(ExpectedLoc, diag::err_expected_colon_after) << "'case'"
+ << FixItHint::CreateInsertion(ExpectedLoc, ":");
+ ColonLoc = ExpectedLoc;
+ }
+
+ StmtResult Case =
+ Actions.ActOnCaseStmt(CaseLoc, LHS.get(), DotDotDotLoc,
+ RHS.get(), ColonLoc);
+
+ // If we had a sema error parsing this case, then just ignore it and
+ // continue parsing the sub-stmt.
+ if (Case.isInvalid()) {
+ if (TopLevelCase.isInvalid()) // No parsed case stmts.
+ return ParseStatement();
+ // Otherwise, just don't add it as a nested case.
+ } else {
+ // If this is the first case statement we parsed, it becomes TopLevelCase.
+ // Otherwise we link it into the current chain.
+ Stmt *NextDeepest = Case.get();
+ if (TopLevelCase.isInvalid())
+ TopLevelCase = move(Case);
+ else
+ Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, Case.get());
+ DeepestParsedCaseStmt = NextDeepest;
+ }
+
+ // Handle all case statements.
+ } while (Tok.is(tok::kw_case));
+
+ assert(!TopLevelCase.isInvalid() && "Should have parsed at least one case!");
+
+ // If we found a non-case statement, start by parsing it.
+ StmtResult SubStmt;
+
+ if (Tok.isNot(tok::r_brace)) {
+ SubStmt = ParseStatement();
+ } else {
+ // Nicely diagnose the common error "switch (X) { case 4: }", which is
+ // not valid.
+ SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
+ Diag(AfterColonLoc, diag::err_label_end_of_compound_statement)
+ << FixItHint::CreateInsertion(AfterColonLoc, " ;");
+ SubStmt = true;
+ }
+
+ // Broken sub-stmt shouldn't prevent forming the case statement properly.
+ if (SubStmt.isInvalid())
+ SubStmt = Actions.ActOnNullStmt(SourceLocation());
+
+ // Install the body into the most deeply-nested case.
+ Actions.ActOnCaseStmtBody(DeepestParsedCaseStmt, SubStmt.get());
+
+ // Return the top level parsed statement tree.
+ return move(TopLevelCase);
+}
+
+/// ParseDefaultStatement
+/// labeled-statement:
+/// 'default' ':' statement
+/// Note that this does not parse the 'statement' at the end.
+///
+StmtResult Parser::ParseDefaultStatement(ParsedAttributes &attrs) {
+ //FIXME: Use attributes?
+
+ assert(Tok.is(tok::kw_default) && "Not a default stmt!");
+ SourceLocation DefaultLoc = ConsumeToken(); // eat the 'default'.
+
+ SourceLocation ColonLoc;
+ if (Tok.is(tok::colon)) {
+ ColonLoc = ConsumeToken();
+
+ // Treat "default;" as a typo for "default:".
+ } else if (Tok.is(tok::semi)) {
+ ColonLoc = ConsumeToken();
+ Diag(ColonLoc, diag::err_expected_colon_after) << "'default'"
+ << FixItHint::CreateReplacement(ColonLoc, ":");
+ } else {
+ SourceLocation ExpectedLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ Diag(ExpectedLoc, diag::err_expected_colon_after) << "'default'"
+ << FixItHint::CreateInsertion(ExpectedLoc, ":");
+ ColonLoc = ExpectedLoc;
+ }
+
+ StmtResult SubStmt;
+
+ if (Tok.isNot(tok::r_brace)) {
+ SubStmt = ParseStatement();
+ } else {
+ // Diagnose the common error "switch (X) {... default: }", which is
+ // not valid.
+ SourceLocation AfterColonLoc = PP.getLocForEndOfToken(ColonLoc);
+ Diag(AfterColonLoc, diag::err_label_end_of_compound_statement)
+ << FixItHint::CreateInsertion(AfterColonLoc, " ;");
+ SubStmt = true;
+ }
+
+ // Broken sub-stmt shouldn't prevent forming the case statement properly.
+ if (SubStmt.isInvalid())
+ SubStmt = Actions.ActOnNullStmt(ColonLoc);
+
+ return Actions.ActOnDefaultStmt(DefaultLoc, ColonLoc,
+ SubStmt.get(), getCurScope());
+}
+
+StmtResult Parser::ParseCompoundStatement(ParsedAttributes &Attr,
+ bool isStmtExpr) {
+ return ParseCompoundStatement(Attr, isStmtExpr, Scope::DeclScope);
+}
+
+/// ParseCompoundStatement - Parse a "{}" block.
+///
+/// compound-statement: [C99 6.8.2]
+/// { block-item-list[opt] }
+/// [GNU] { label-declarations block-item-list } [TODO]
+///
+/// block-item-list:
+/// block-item
+/// block-item-list block-item
+///
+/// block-item:
+/// declaration
+/// [GNU] '__extension__' declaration
+/// statement
+/// [OMP] openmp-directive [TODO]
+///
+/// [GNU] label-declarations:
+/// [GNU] label-declaration
+/// [GNU] label-declarations label-declaration
+///
+/// [GNU] label-declaration:
+/// [GNU] '__label__' identifier-list ';'
+///
+/// [OMP] openmp-directive: [TODO]
+/// [OMP] barrier-directive
+/// [OMP] flush-directive
+///
+StmtResult Parser::ParseCompoundStatement(ParsedAttributes &attrs,
+ bool isStmtExpr,
+ unsigned ScopeFlags) {
+ //FIXME: Use attributes?
+
+ assert(Tok.is(tok::l_brace) && "Not a compount stmt!");
+
+ // Enter a scope to hold everything within the compound stmt. Compound
+ // statements can always hold declarations.
+ ParseScope CompoundScope(this, ScopeFlags);
+
+ // Parse the statements in the body.
+ return ParseCompoundStatementBody(isStmtExpr);
+}
+
+/// ParseCompoundStatementBody - Parse a sequence of statements and invoke the
+/// ActOnCompoundStmt action. This expects the '{' to be the current token, and
+/// consume the '}' at the end of the block. It does not manipulate the scope
+/// stack.
+StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
+ PrettyStackTraceLoc CrashInfo(PP.getSourceManager(),
+ Tok.getLocation(),
+ "in compound statement ('{}')");
+ InMessageExpressionRAIIObject InMessage(*this, false);
+ BalancedDelimiterTracker T(*this, tok::l_brace);
+ if (T.consumeOpen())
+ return StmtError();
+
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+
+ StmtVector Stmts(Actions);
+
+ // "__label__ X, Y, Z;" is the GNU "Local Label" extension. These are
+ // only allowed at the start of a compound stmt regardless of the language.
+ while (Tok.is(tok::kw___label__)) {
+ SourceLocation LabelLoc = ConsumeToken();
+ Diag(LabelLoc, diag::ext_gnu_local_label);
+
+ SmallVector<Decl *, 8> DeclsInGroup;
+ while (1) {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ break;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation IdLoc = ConsumeToken();
+ DeclsInGroup.push_back(Actions.LookupOrCreateLabel(II, IdLoc, LabelLoc));
+
+ if (!Tok.is(tok::comma))
+ break;
+ ConsumeToken();
+ }
+
+ DeclSpec DS(AttrFactory);
+ DeclGroupPtrTy Res = Actions.FinalizeDeclaratorGroup(getCurScope(), DS,
+ DeclsInGroup.data(), DeclsInGroup.size());
+ StmtResult R = Actions.ActOnDeclStmt(Res, LabelLoc, Tok.getLocation());
+
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_declaration);
+ if (R.isUsable())
+ Stmts.push_back(R.release());
+ }
+
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ if (Tok.is(tok::annot_pragma_unused)) {
+ HandlePragmaUnused();
+ continue;
+ }
+
+ if (getLangOpts().MicrosoftExt && (Tok.is(tok::kw___if_exists) ||
+ Tok.is(tok::kw___if_not_exists))) {
+ ParseMicrosoftIfExistsStatement(Stmts);
+ continue;
+ }
+
+ StmtResult R;
+ if (Tok.isNot(tok::kw___extension__)) {
+ R = ParseStatementOrDeclaration(Stmts, false);
+ } else {
+ // __extension__ can start declarations and it can also be a unary
+ // operator for expressions. Consume multiple __extension__ markers here
+ // until we can determine which is which.
+ // FIXME: This loses extension expressions in the AST!
+ SourceLocation ExtLoc = ConsumeToken();
+ while (Tok.is(tok::kw___extension__))
+ ConsumeToken();
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs, 0, /*MightBeObjCMessageSend*/ true);
+
+ // If this is the start of a declaration, parse it as such.
+ if (isDeclarationStatement()) {
+ // __extension__ silences extension warnings in the subdeclaration.
+ // FIXME: Save the __extension__ on the decl as a node somehow?
+ ExtensionRAIIObject O(Diags);
+
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ DeclGroupPtrTy Res = ParseDeclaration(Stmts,
+ Declarator::BlockContext, DeclEnd,
+ attrs);
+ R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
+ } else {
+ // Otherwise this was a unary __extension__ marker.
+ ExprResult Res(ParseExpressionWithLeadingExtension(ExtLoc));
+
+ if (Res.isInvalid()) {
+ SkipUntil(tok::semi);
+ continue;
+ }
+
+ // FIXME: Use attributes?
+ // Eat the semicolon at the end of stmt and convert the expr into a
+ // statement.
+ ExpectAndConsumeSemi(diag::err_expected_semi_after_expr);
+ R = Actions.ActOnExprStmt(Actions.MakeFullExpr(Res.get()));
+ }
+ }
+
+ if (R.isUsable())
+ Stmts.push_back(R.release());
+ }
+
+ SourceLocation CloseLoc = Tok.getLocation();
+
+ // We broke out of the while loop because we found a '}' or EOF.
+ if (Tok.isNot(tok::r_brace)) {
+ Diag(Tok, diag::err_expected_rbrace);
+ Diag(T.getOpenLocation(), diag::note_matching) << "{";
+ // Recover by creating a compound statement with what we parsed so far,
+ // instead of dropping everything and returning StmtError();
+ } else {
+ if (!T.consumeClose())
+ CloseLoc = T.getCloseLocation();
+ }
+
+ return Actions.ActOnCompoundStmt(T.getOpenLocation(), CloseLoc,
+ move_arg(Stmts), isStmtExpr);
+}
+
+/// ParseParenExprOrCondition:
+/// [C ] '(' expression ')'
+/// [C++] '(' condition ')' [not allowed if OnlyAllowCondition=true]
+///
+/// This function parses and performs error recovery on the specified condition
+/// or expression (depending on whether we're in C++ or C mode). This function
+/// goes out of its way to recover well. It returns true if there was a parser
+/// error (the right paren couldn't be found), which indicates that the caller
+/// should try to recover harder. It returns false if the condition is
+/// successfully parsed. Note that a successful parse can still have semantic
+/// errors in the condition.
+bool Parser::ParseParenExprOrCondition(ExprResult &ExprResult,
+ Decl *&DeclResult,
+ SourceLocation Loc,
+ bool ConvertToBoolean) {
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ if (getLangOpts().CPlusPlus)
+ ParseCXXCondition(ExprResult, DeclResult, Loc, ConvertToBoolean);
+ else {
+ ExprResult = ParseExpression();
+ DeclResult = 0;
+
+ // If required, convert to a boolean value.
+ if (!ExprResult.isInvalid() && ConvertToBoolean)
+ ExprResult
+ = Actions.ActOnBooleanCondition(getCurScope(), Loc, ExprResult.get());
+ }
+
+ // If the parser was confused by the condition and we don't have a ')', try to
+ // recover by skipping ahead to a semi and bailing out. If condexp is
+ // semantically invalid but we have well formed code, keep going.
+ if (ExprResult.isInvalid() && !DeclResult && Tok.isNot(tok::r_paren)) {
+ SkipUntil(tok::semi);
+ // Skipping may have stopped if it found the containing ')'. If so, we can
+ // continue parsing the if statement.
+ if (Tok.isNot(tok::r_paren))
+ return true;
+ }
+
+ // Otherwise the condition is valid or the rparen is present.
+ T.consumeClose();
+ return false;
+}
+
+
+/// ParseIfStatement
+/// if-statement: [C99 6.8.4.1]
+/// 'if' '(' expression ')' statement
+/// 'if' '(' expression ')' statement 'else' statement
+/// [C++] 'if' '(' condition ')' statement
+/// [C++] 'if' '(' condition ')' statement 'else' statement
+///
+StmtResult Parser::ParseIfStatement(ParsedAttributes &attrs,
+ SourceLocation *TrailingElseLoc) {
+ // FIXME: Use attributes?
+
+ assert(Tok.is(tok::kw_if) && "Not an if stmt!");
+ SourceLocation IfLoc = ConsumeToken(); // eat the 'if'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "if";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
+
+ // C99 6.8.4p3 - In C99, the if statement is a block. This is not
+ // the case for C90.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ ParseScope IfScope(this, Scope::DeclScope | Scope::ControlScope, C99orCXX);
+
+ // Parse the condition.
+ ExprResult CondExp;
+ Decl *CondVar = 0;
+ if (ParseParenExprOrCondition(CondExp, CondVar, IfLoc, true))
+ return StmtError();
+
+ FullExprArg FullCondExp(Actions.MakeFullExpr(CondExp.get()));
+
+ // C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ // For C++ we create a scope for the condition and a new scope for
+ // substatements because:
+ // -When the 'then' scope exits, we want the condition declaration to still be
+ // active for the 'else' scope too.
+ // -Sema will detect name clashes by considering declarations of a
+ // 'ControlScope' as part of its direct subscope.
+ // -If we wanted the condition and substatement to be in the same scope, we
+ // would have to notify ParseStatement not to create a new scope. It's
+ // simpler to let it create a new scope.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXX && Tok.isNot(tok::l_brace));
+
+ // Read the 'then' stmt.
+ SourceLocation ThenStmtLoc = Tok.getLocation();
+
+ SourceLocation InnerStatementTrailingElseLoc;
+ StmtResult ThenStmt(ParseStatement(&InnerStatementTrailingElseLoc));
+
+ // Pop the 'if' scope if needed.
+ InnerScope.Exit();
+
+ // If it has an else, parse it.
+ SourceLocation ElseLoc;
+ SourceLocation ElseStmtLoc;
+ StmtResult ElseStmt;
+
+ if (Tok.is(tok::kw_else)) {
+ if (TrailingElseLoc)
+ *TrailingElseLoc = Tok.getLocation();
+
+ ElseLoc = ConsumeToken();
+ ElseStmtLoc = Tok.getLocation();
+
+ // C99 6.8.4p3 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do
+ // this if the body isn't a compound statement to avoid push/pop in common
+ // cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXX && Tok.isNot(tok::l_brace));
+
+ ElseStmt = ParseStatement();
+
+ // Pop the 'else' scope if needed.
+ InnerScope.Exit();
+ } else if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteAfterIf(getCurScope());
+ cutOffParsing();
+ return StmtError();
+ } else if (InnerStatementTrailingElseLoc.isValid()) {
+ Diag(InnerStatementTrailingElseLoc, diag::warn_dangling_else);
+ }
+
+ IfScope.Exit();
+
+ // If the condition was invalid, discard the if statement. We could recover
+ // better by replacing it with a valid expr, but don't do that yet.
+ if (CondExp.isInvalid() && !CondVar)
+ return StmtError();
+
+ // If the then or else stmt is invalid and the other is valid (and present),
+ // make turn the invalid one into a null stmt to avoid dropping the other
+ // part. If both are invalid, return error.
+ if ((ThenStmt.isInvalid() && ElseStmt.isInvalid()) ||
+ (ThenStmt.isInvalid() && ElseStmt.get() == 0) ||
+ (ThenStmt.get() == 0 && ElseStmt.isInvalid())) {
+ // Both invalid, or one is invalid and other is non-present: return error.
+ return StmtError();
+ }
+
+ // Now if either are invalid, replace with a ';'.
+ if (ThenStmt.isInvalid())
+ ThenStmt = Actions.ActOnNullStmt(ThenStmtLoc);
+ if (ElseStmt.isInvalid())
+ ElseStmt = Actions.ActOnNullStmt(ElseStmtLoc);
+
+ return Actions.ActOnIfStmt(IfLoc, FullCondExp, CondVar, ThenStmt.get(),
+ ElseLoc, ElseStmt.get());
+}
+
+/// ParseSwitchStatement
+/// switch-statement:
+/// 'switch' '(' expression ')' statement
+/// [C++] 'switch' '(' condition ')' statement
+StmtResult Parser::ParseSwitchStatement(ParsedAttributes &attrs,
+ SourceLocation *TrailingElseLoc) {
+ // FIXME: Use attributes?
+
+ assert(Tok.is(tok::kw_switch) && "Not a switch stmt!");
+ SourceLocation SwitchLoc = ConsumeToken(); // eat the 'switch'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "switch";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
+
+ // C99 6.8.4p3 - In C99, the switch statement is a block. This is
+ // not the case for C90. Start the switch scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ unsigned ScopeFlags = Scope::BreakScope | Scope::SwitchScope;
+ if (C99orCXX)
+ ScopeFlags |= Scope::DeclScope | Scope::ControlScope;
+ ParseScope SwitchScope(this, ScopeFlags);
+
+ // Parse the condition.
+ ExprResult Cond;
+ Decl *CondVar = 0;
+ if (ParseParenExprOrCondition(Cond, CondVar, SwitchLoc, false))
+ return StmtError();
+
+ StmtResult Switch
+ = Actions.ActOnStartOfSwitchStmt(SwitchLoc, Cond.get(), CondVar);
+
+ if (Switch.isInvalid()) {
+ // Skip the switch body.
+ // FIXME: This is not optimal recovery, but parsing the body is more
+ // dangerous due to the presence of case and default statements, which
+ // will have no place to connect back with the switch.
+ if (Tok.is(tok::l_brace)) {
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, false, false);
+ } else
+ SkipUntil(tok::semi);
+ return move(Switch);
+ }
+
+ // C99 6.8.4p3 - In C99, the body of the switch statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.4p1:
+ // The substatement in a selection-statement (each substatement, in the else
+ // form of the if statement) implicitly defines a local scope.
+ //
+ // See comments in ParseIfStatement for why we create a scope for the
+ // condition and a new scope for substatement in C++.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXX && Tok.isNot(tok::l_brace));
+
+ // Read the body statement.
+ StmtResult Body(ParseStatement(TrailingElseLoc));
+
+ // Pop the scopes.
+ InnerScope.Exit();
+ SwitchScope.Exit();
+
+ if (Body.isInvalid()) {
+ // FIXME: Remove the case statement list from the Switch statement.
+
+ // Put the synthesized null statement on the same line as the end of switch
+ // condition.
+ SourceLocation SynthesizedNullStmtLocation = Cond.get()->getLocEnd();
+ Body = Actions.ActOnNullStmt(SynthesizedNullStmtLocation);
+ }
+
+ return Actions.ActOnFinishSwitchStmt(SwitchLoc, Switch.get(), Body.get());
+}
+
+/// ParseWhileStatement
+/// while-statement: [C99 6.8.5.1]
+/// 'while' '(' expression ')' statement
+/// [C++] 'while' '(' condition ')' statement
+StmtResult Parser::ParseWhileStatement(ParsedAttributes &attrs,
+ SourceLocation *TrailingElseLoc) {
+ // FIXME: Use attributes?
+
+ assert(Tok.is(tok::kw_while) && "Not a while stmt!");
+ SourceLocation WhileLoc = Tok.getLocation();
+ ConsumeToken(); // eat the 'while'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "while";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXX = getLangOpts().C99 || getLangOpts().CPlusPlus;
+
+ // C99 6.8.5p5 - In C99, the while statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ //
+ unsigned ScopeFlags;
+ if (C99orCXX)
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope |
+ Scope::DeclScope | Scope::ControlScope;
+ else
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+ ParseScope WhileScope(this, ScopeFlags);
+
+ // Parse the condition.
+ ExprResult Cond;
+ Decl *CondVar = 0;
+ if (ParseParenExprOrCondition(Cond, CondVar, WhileLoc, true))
+ return StmtError();
+
+ FullExprArg FullCond(Actions.MakeFullExpr(Cond.get()));
+
+ // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ // See comments in ParseIfStatement for why we create a scope for the
+ // condition and a new scope for substatement in C++.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXX && Tok.isNot(tok::l_brace));
+
+ // Read the body statement.
+ StmtResult Body(ParseStatement(TrailingElseLoc));
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+ WhileScope.Exit();
+
+ if ((Cond.isInvalid() && !CondVar) || Body.isInvalid())
+ return StmtError();
+
+ return Actions.ActOnWhileStmt(WhileLoc, FullCond, CondVar, Body.get());
+}
+
+/// ParseDoStatement
+/// do-statement: [C99 6.8.5.2]
+/// 'do' statement 'while' '(' expression ')' ';'
+/// Note: this lets the caller parse the end ';'.
+StmtResult Parser::ParseDoStatement(ParsedAttributes &attrs) {
+ // FIXME: Use attributes?
+
+ assert(Tok.is(tok::kw_do) && "Not a do stmt!");
+ SourceLocation DoLoc = ConsumeToken(); // eat the 'do'.
+
+ // C99 6.8.5p5 - In C99, the do statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ unsigned ScopeFlags;
+ if (getLangOpts().C99)
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope | Scope::DeclScope;
+ else
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+
+ ParseScope DoScope(this, ScopeFlags);
+
+ // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ (getLangOpts().C99 || getLangOpts().CPlusPlus) &&
+ Tok.isNot(tok::l_brace));
+
+ // Read the body statement.
+ StmtResult Body(ParseStatement());
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+
+ if (Tok.isNot(tok::kw_while)) {
+ if (!Body.isInvalid()) {
+ Diag(Tok, diag::err_expected_while);
+ Diag(DoLoc, diag::note_matching) << "do";
+ SkipUntil(tok::semi, false, true);
+ }
+ return StmtError();
+ }
+ SourceLocation WhileLoc = ConsumeToken();
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "do/while";
+ SkipUntil(tok::semi, false, true);
+ return StmtError();
+ }
+
+ // Parse the parenthesized condition.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ExprResult Cond = ParseExpression();
+ T.consumeClose();
+ DoScope.Exit();
+
+ if (Cond.isInvalid() || Body.isInvalid())
+ return StmtError();
+
+ return Actions.ActOnDoStmt(DoLoc, Body.get(), WhileLoc, T.getOpenLocation(),
+ Cond.get(), T.getCloseLocation());
+}
+
+/// ParseForStatement
+/// for-statement: [C99 6.8.5.3]
+/// 'for' '(' expr[opt] ';' expr[opt] ';' expr[opt] ')' statement
+/// 'for' '(' declaration expr[opt] ';' expr[opt] ')' statement
+/// [C++] 'for' '(' for-init-statement condition[opt] ';' expression[opt] ')'
+/// [C++] statement
+/// [C++0x] 'for' '(' for-range-declaration : for-range-initializer ) statement
+/// [OBJC2] 'for' '(' declaration 'in' expr ')' statement
+/// [OBJC2] 'for' '(' expr 'in' expr ')' statement
+///
+/// [C++] for-init-statement:
+/// [C++] expression-statement
+/// [C++] simple-declaration
+///
+/// [C++0x] for-range-declaration:
+/// [C++0x] attribute-specifier-seq[opt] type-specifier-seq declarator
+/// [C++0x] for-range-initializer:
+/// [C++0x] expression
+/// [C++0x] braced-init-list [TODO]
+StmtResult Parser::ParseForStatement(ParsedAttributes &attrs,
+ SourceLocation *TrailingElseLoc) {
+ // FIXME: Use attributes?
+
+ assert(Tok.is(tok::kw_for) && "Not a for stmt!");
+ SourceLocation ForLoc = ConsumeToken(); // eat the 'for'.
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "for";
+ SkipUntil(tok::semi);
+ return StmtError();
+ }
+
+ bool C99orCXXorObjC = getLangOpts().C99 || getLangOpts().CPlusPlus || getLangOpts().ObjC1;
+
+ // C99 6.8.5p5 - In C99, the for statement is a block. This is not
+ // the case for C90. Start the loop scope.
+ //
+ // C++ 6.4p3:
+ // A name introduced by a declaration in a condition is in scope from its
+ // point of declaration until the end of the substatements controlled by the
+ // condition.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement).
+ // C++ 6.5.3p1:
+ // Names declared in the for-init-statement are in the same declarative-region
+ // as those declared in the condition.
+ //
+ unsigned ScopeFlags;
+ if (C99orCXXorObjC)
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope |
+ Scope::DeclScope | Scope::ControlScope;
+ else
+ ScopeFlags = Scope::BreakScope | Scope::ContinueScope;
+
+ ParseScope ForScope(this, ScopeFlags);
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprResult Value;
+
+ bool ForEach = false, ForRange = false;
+ StmtResult FirstPart;
+ bool SecondPartIsInvalid = false;
+ FullExprArg SecondPart(Actions);
+ ExprResult Collection;
+ ForRangeInit ForRangeInit;
+ FullExprArg ThirdPart(Actions);
+ Decl *SecondVar = 0;
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ C99orCXXorObjC? Sema::PCC_ForInit
+ : Sema::PCC_Expression);
+ cutOffParsing();
+ return StmtError();
+ }
+
+ // Parse the first part of the for specifier.
+ if (Tok.is(tok::semi)) { // for (;
+ // no first part, eat the ';'.
+ ConsumeToken();
+ } else if (isForInitDeclaration()) { // for (int X = 4;
+ // Parse declaration, which eats the ';'.
+ if (!C99orCXXorObjC) // Use of C99-style for loops in C90 mode?
+ Diag(Tok, diag::ext_c99_variable_decl_in_for_loop);
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+
+ // In C++0x, "for (T NS:a" might not be a typo for ::
+ bool MightBeForRangeStmt = getLangOpts().CPlusPlus;
+ ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
+
+ SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ StmtVector Stmts(Actions);
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(Stmts, Declarator::ForContext,
+ DeclEnd, attrs, false,
+ MightBeForRangeStmt ?
+ &ForRangeInit : 0);
+ FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
+
+ if (ForRangeInit.ParsedForRangeDecl()) {
+ Diag(ForRangeInit.ColonLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_for_range : diag::ext_for_range);
+
+ ForRange = true;
+ } else if (Tok.is(tok::semi)) { // for (int x = 4;
+ ConsumeToken();
+ } else if ((ForEach = isTokIdentifier_in())) {
+ Actions.ActOnForEachDeclStmt(DG);
+ // ObjC: for (id x in expr)
+ ConsumeToken(); // consume 'in'
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCForCollection(getCurScope(), DG);
+ cutOffParsing();
+ return StmtError();
+ }
+ Collection = ParseExpression();
+ } else {
+ Diag(Tok, diag::err_expected_semi_for);
+ }
+ } else {
+ Value = ParseExpression();
+
+ ForEach = isTokIdentifier_in();
+
+ // Turn the expression into a stmt.
+ if (!Value.isInvalid()) {
+ if (ForEach)
+ FirstPart = Actions.ActOnForEachLValueExpr(Value.get());
+ else
+ FirstPart = Actions.ActOnExprStmt(Actions.MakeFullExpr(Value.get()));
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else if (ForEach) {
+ ConsumeToken(); // consume 'in'
+
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteObjCForCollection(getCurScope(), DeclGroupPtrTy());
+ cutOffParsing();
+ return StmtError();
+ }
+ Collection = ParseExpression();
+ } else if (getLangOpts().CPlusPlus0x && Tok.is(tok::colon) && FirstPart.get()) {
+ // User tried to write the reasonable, but ill-formed, for-range-statement
+ // for (expr : expr) { ... }
+ Diag(Tok, diag::err_for_range_expected_decl)
+ << FirstPart.get()->getSourceRange();
+ SkipUntil(tok::r_paren, false, true);
+ SecondPartIsInvalid = true;
+ } else {
+ if (!Value.isInvalid()) {
+ Diag(Tok, diag::err_expected_semi_for);
+ } else {
+ // Skip until semicolon or rparen, don't consume it.
+ SkipUntil(tok::r_paren, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
+ }
+ }
+ if (!ForEach && !ForRange) {
+ assert(!SecondPart.get() && "Shouldn't have a second expression yet.");
+ // Parse the second part of the for specifier.
+ if (Tok.is(tok::semi)) { // for (...;;
+ // no second part.
+ } else if (Tok.is(tok::r_paren)) {
+ // missing both semicolons.
+ } else {
+ ExprResult Second;
+ if (getLangOpts().CPlusPlus)
+ ParseCXXCondition(Second, SecondVar, ForLoc, true);
+ else {
+ Second = ParseExpression();
+ if (!Second.isInvalid())
+ Second = Actions.ActOnBooleanCondition(getCurScope(), ForLoc,
+ Second.get());
+ }
+ SecondPartIsInvalid = Second.isInvalid();
+ SecondPart = Actions.MakeFullExpr(Second.get());
+ }
+
+ if (Tok.isNot(tok::semi)) {
+ if (!SecondPartIsInvalid || SecondVar)
+ Diag(Tok, diag::err_expected_semi_for);
+ else
+ // Skip until semicolon or rparen, don't consume it.
+ SkipUntil(tok::r_paren, true, true);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ }
+
+ // Parse the third part of the for specifier.
+ if (Tok.isNot(tok::r_paren)) { // for (...;...;)
+ ExprResult Third = ParseExpression();
+ ThirdPart = Actions.MakeFullExpr(Third.take());
+ }
+ }
+ // Match the ')'.
+ T.consumeClose();
+
+ // We need to perform most of the semantic analysis for a C++0x for-range
+ // statememt before parsing the body, in order to be able to deduce the type
+ // of an auto-typed loop variable.
+ StmtResult ForRangeStmt;
+ if (ForRange) {
+ ForRangeStmt = Actions.ActOnCXXForRangeStmt(ForLoc, T.getOpenLocation(),
+ FirstPart.take(),
+ ForRangeInit.ColonLoc,
+ ForRangeInit.RangeExpr.get(),
+ T.getCloseLocation());
+
+
+ // Similarly, we need to do the semantic analysis for a for-range
+ // statement immediately in order to close over temporaries correctly.
+ } else if (ForEach) {
+ if (!Collection.isInvalid())
+ Collection =
+ Actions.ActOnObjCForCollectionOperand(ForLoc, Collection.take());
+ }
+
+ // C99 6.8.5p5 - In C99, the body of the if statement is a scope, even if
+ // there is no compound stmt. C90 does not have this clause. We only do this
+ // if the body isn't a compound statement to avoid push/pop in common cases.
+ //
+ // C++ 6.5p2:
+ // The substatement in an iteration-statement implicitly defines a local scope
+ // which is entered and exited each time through the loop.
+ //
+ // See comments in ParseIfStatement for why we create a scope for
+ // for-init-statement/condition and a new scope for substatement in C++.
+ //
+ ParseScope InnerScope(this, Scope::DeclScope,
+ C99orCXXorObjC && Tok.isNot(tok::l_brace));
+
+ // Read the body statement.
+ StmtResult Body(ParseStatement(TrailingElseLoc));
+
+ // Pop the body scope if needed.
+ InnerScope.Exit();
+
+ // Leave the for-scope.
+ ForScope.Exit();
+
+ if (Body.isInvalid())
+ return StmtError();
+
+ if (ForEach)
+ return Actions.ActOnObjCForCollectionStmt(ForLoc, T.getOpenLocation(),
+ FirstPart.take(),
+ Collection.take(),
+ T.getCloseLocation(),
+ Body.take());
+
+ if (ForRange)
+ return Actions.FinishCXXForRangeStmt(ForRangeStmt.take(), Body.take());
+
+ return Actions.ActOnForStmt(ForLoc, T.getOpenLocation(), FirstPart.take(),
+ SecondPart, SecondVar, ThirdPart,
+ T.getCloseLocation(), Body.take());
+}
+
+/// ParseGotoStatement
+/// jump-statement:
+/// 'goto' identifier ';'
+/// [GNU] 'goto' '*' expression ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+StmtResult Parser::ParseGotoStatement(ParsedAttributes &attrs) {
+ // FIXME: Use attributes?
+
+ assert(Tok.is(tok::kw_goto) && "Not a goto stmt!");
+ SourceLocation GotoLoc = ConsumeToken(); // eat the 'goto'.
+
+ StmtResult Res;
+ if (Tok.is(tok::identifier)) {
+ LabelDecl *LD = Actions.LookupOrCreateLabel(Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ Res = Actions.ActOnGotoStmt(GotoLoc, Tok.getLocation(), LD);
+ ConsumeToken();
+ } else if (Tok.is(tok::star)) {
+ // GNU indirect goto extension.
+ Diag(Tok, diag::ext_gnu_indirect_goto);
+ SourceLocation StarLoc = ConsumeToken();
+ ExprResult R(ParseExpression());
+ if (R.isInvalid()) { // Skip to the semicolon, but don't consume it.
+ SkipUntil(tok::semi, false, true);
+ return StmtError();
+ }
+ Res = Actions.ActOnIndirectGotoStmt(GotoLoc, StarLoc, R.take());
+ } else {
+ Diag(Tok, diag::err_expected_ident);
+ return StmtError();
+ }
+
+ return move(Res);
+}
+
+/// ParseContinueStatement
+/// jump-statement:
+/// 'continue' ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+StmtResult Parser::ParseContinueStatement(ParsedAttributes &attrs) {
+ // FIXME: Use attributes?
+
+ SourceLocation ContinueLoc = ConsumeToken(); // eat the 'continue'.
+ return Actions.ActOnContinueStmt(ContinueLoc, getCurScope());
+}
+
+/// ParseBreakStatement
+/// jump-statement:
+/// 'break' ';'
+///
+/// Note: this lets the caller parse the end ';'.
+///
+StmtResult Parser::ParseBreakStatement(ParsedAttributes &attrs) {
+ // FIXME: Use attributes?
+
+ SourceLocation BreakLoc = ConsumeToken(); // eat the 'break'.
+ return Actions.ActOnBreakStmt(BreakLoc, getCurScope());
+}
+
+/// ParseReturnStatement
+/// jump-statement:
+/// 'return' expression[opt] ';'
+StmtResult Parser::ParseReturnStatement(ParsedAttributes &attrs) {
+ // FIXME: Use attributes?
+
+ assert(Tok.is(tok::kw_return) && "Not a return stmt!");
+ SourceLocation ReturnLoc = ConsumeToken(); // eat the 'return'.
+
+ ExprResult R;
+ if (Tok.isNot(tok::semi)) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteReturn(getCurScope());
+ cutOffParsing();
+ return StmtError();
+ }
+
+ if (Tok.is(tok::l_brace) && getLangOpts().CPlusPlus) {
+ R = ParseInitializer();
+ if (R.isUsable())
+ Diag(R.get()->getLocStart(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_generalized_initializer_lists :
+ diag::ext_generalized_initializer_lists)
+ << R.get()->getSourceRange();
+ } else
+ R = ParseExpression();
+ if (R.isInvalid()) { // Skip to the semicolon, but don't consume it.
+ SkipUntil(tok::semi, false, true);
+ return StmtError();
+ }
+ }
+ return Actions.ActOnReturnStmt(ReturnLoc, R.take());
+}
+
+/// ParseMicrosoftAsmStatement. When -fms-extensions/-fasm-blocks is enabled,
+/// this routine is called to collect the tokens for an MS asm statement.
+StmtResult Parser::ParseMicrosoftAsmStatement(SourceLocation AsmLoc) {
+ SourceManager &SrcMgr = PP.getSourceManager();
+ SourceLocation EndLoc = AsmLoc;
+ do {
+ bool InBraces = false;
+ unsigned short savedBraceCount = 0;
+ bool InAsmComment = false;
+ FileID FID;
+ unsigned LineNo = 0;
+ unsigned NumTokensRead = 0;
+ SourceLocation LBraceLoc;
+
+ if (Tok.is(tok::l_brace)) {
+ // Braced inline asm: consume the opening brace.
+ InBraces = true;
+ savedBraceCount = BraceCount;
+ EndLoc = LBraceLoc = ConsumeBrace();
+ ++NumTokensRead;
+ } else {
+ // Single-line inline asm; compute which line it is on.
+ std::pair<FileID, unsigned> ExpAsmLoc =
+ SrcMgr.getDecomposedExpansionLoc(EndLoc);
+ FID = ExpAsmLoc.first;
+ LineNo = SrcMgr.getLineNumber(FID, ExpAsmLoc.second);
+ }
+
+ SourceLocation TokLoc = Tok.getLocation();
+ do {
+ // If we hit EOF, we're done, period.
+ if (Tok.is(tok::eof))
+ break;
+ // When we consume the closing brace, we're done.
+ if (InBraces && BraceCount == savedBraceCount)
+ break;
+
+ if (!InAsmComment && Tok.is(tok::semi)) {
+ // A semicolon in an asm is the start of a comment.
+ InAsmComment = true;
+ if (InBraces) {
+ // Compute which line the comment is on.
+ std::pair<FileID, unsigned> ExpSemiLoc =
+ SrcMgr.getDecomposedExpansionLoc(TokLoc);
+ FID = ExpSemiLoc.first;
+ LineNo = SrcMgr.getLineNumber(FID, ExpSemiLoc.second);
+ }
+ } else if (!InBraces || InAsmComment) {
+ // If end-of-line is significant, check whether this token is on a
+ // new line.
+ std::pair<FileID, unsigned> ExpLoc =
+ SrcMgr.getDecomposedExpansionLoc(TokLoc);
+ if (ExpLoc.first != FID ||
+ SrcMgr.getLineNumber(ExpLoc.first, ExpLoc.second) != LineNo) {
+ // If this is a single-line __asm, we're done.
+ if (!InBraces)
+ break;
+ // We're no longer in a comment.
+ InAsmComment = false;
+ } else if (!InAsmComment && Tok.is(tok::r_brace)) {
+ // Single-line asm always ends when a closing brace is seen.
+ // FIXME: This is compatible with Apple gcc's -fasm-blocks; what
+ // does MSVC do here?
+ break;
+ }
+ }
+
+ // Consume the next token; make sure we don't modify the brace count etc.
+ // if we are in a comment.
+ EndLoc = TokLoc;
+ if (InAsmComment)
+ PP.Lex(Tok);
+ else
+ ConsumeAnyToken();
+ TokLoc = Tok.getLocation();
+ ++NumTokensRead;
+ } while (1);
+
+ if (InBraces && BraceCount != savedBraceCount) {
+ // __asm without closing brace (this can happen at EOF).
+ Diag(Tok, diag::err_expected_rbrace);
+ Diag(LBraceLoc, diag::note_matching) << "{";
+ return StmtError();
+ } else if (NumTokensRead == 0) {
+ // Empty __asm.
+ Diag(Tok, diag::err_expected_lbrace);
+ return StmtError();
+ }
+ // Multiple adjacent asm's form together into a single asm statement
+ // in the AST.
+ if (!Tok.is(tok::kw_asm))
+ break;
+ EndLoc = ConsumeToken();
+ } while (1);
+ // FIXME: Need to actually grab the data and pass it on to Sema. Ideally,
+ // what Sema wants is a string of the entire inline asm, with one instruction
+ // per line and all the __asm keywords stripped out, and a way of mapping
+ // from any character of that string to its location in the original source
+ // code. I'm not entirely sure how to go about that, though.
+ Token t;
+ t.setKind(tok::string_literal);
+ t.setLiteralData("\"/*FIXME: not done*/\"");
+ t.clearFlag(Token::NeedsCleaning);
+ t.setLength(21);
+ ExprResult AsmString(Actions.ActOnStringLiteral(&t, 1));
+ ExprVector Constraints(Actions);
+ ExprVector Exprs(Actions);
+ ExprVector Clobbers(Actions);
+ return Actions.ActOnAsmStmt(AsmLoc, true, true, 0, 0, 0,
+ move_arg(Constraints), move_arg(Exprs),
+ AsmString.take(), move_arg(Clobbers),
+ EndLoc, true);
+}
+
+/// ParseAsmStatement - Parse a GNU extended asm statement.
+/// asm-statement:
+/// gnu-asm-statement
+/// ms-asm-statement
+///
+/// [GNU] gnu-asm-statement:
+/// 'asm' type-qualifier[opt] '(' asm-argument ')' ';'
+///
+/// [GNU] asm-argument:
+/// asm-string-literal
+/// asm-string-literal ':' asm-operands[opt]
+/// asm-string-literal ':' asm-operands[opt] ':' asm-operands[opt]
+/// asm-string-literal ':' asm-operands[opt] ':' asm-operands[opt]
+/// ':' asm-clobbers
+///
+/// [GNU] asm-clobbers:
+/// asm-string-literal
+/// asm-clobbers ',' asm-string-literal
+///
+/// [MS] ms-asm-statement:
+/// ms-asm-block
+/// ms-asm-block ms-asm-statement
+///
+/// [MS] ms-asm-block:
+/// '__asm' ms-asm-line '\n'
+/// '__asm' '{' ms-asm-instruction-block[opt] '}' ';'[opt]
+///
+/// [MS] ms-asm-instruction-block
+/// ms-asm-line
+/// ms-asm-line '\n' ms-asm-instruction-block
+///
+StmtResult Parser::ParseAsmStatement(bool &msAsm) {
+ assert(Tok.is(tok::kw_asm) && "Not an asm stmt");
+ SourceLocation AsmLoc = ConsumeToken();
+
+ if (getLangOpts().MicrosoftExt && Tok.isNot(tok::l_paren) && !isTypeQualifier()) {
+ msAsm = true;
+ return ParseMicrosoftAsmStatement(AsmLoc);
+ }
+ DeclSpec DS(AttrFactory);
+ SourceLocation Loc = Tok.getLocation();
+ ParseTypeQualifierListOpt(DS, true, false);
+
+ // GNU asms accept, but warn, about type-qualifiers other than volatile.
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(Loc, diag::w_asm_qualifier_ignored) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
+ Diag(Loc, diag::w_asm_qualifier_ignored) << "restrict";
+
+ // Remember if this was a volatile asm.
+ bool isVolatile = DS.getTypeQualifiers() & DeclSpec::TQ_volatile;
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm";
+ SkipUntil(tok::r_paren);
+ return StmtError();
+ }
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+
+ ExprResult AsmString(ParseAsmStringLiteral());
+ if (AsmString.isInvalid()) {
+ // Consume up to and including the closing paren.
+ T.skipToEnd();
+ return StmtError();
+ }
+
+ SmallVector<IdentifierInfo *, 4> Names;
+ ExprVector Constraints(Actions);
+ ExprVector Exprs(Actions);
+ ExprVector Clobbers(Actions);
+
+ if (Tok.is(tok::r_paren)) {
+ // We have a simple asm expression like 'asm("foo")'.
+ T.consumeClose();
+ return Actions.ActOnAsmStmt(AsmLoc, /*isSimple*/ true, isVolatile,
+ /*NumOutputs*/ 0, /*NumInputs*/ 0, 0,
+ move_arg(Constraints), move_arg(Exprs),
+ AsmString.take(), move_arg(Clobbers),
+ T.getCloseLocation());
+ }
+
+ // Parse Outputs, if present.
+ bool AteExtraColon = false;
+ if (Tok.is(tok::colon) || Tok.is(tok::coloncolon)) {
+ // In C++ mode, parse "::" like ": :".
+ AteExtraColon = Tok.is(tok::coloncolon);
+ ConsumeToken();
+
+ if (!AteExtraColon &&
+ ParseAsmOperandsOpt(Names, Constraints, Exprs))
+ return StmtError();
+ }
+
+ unsigned NumOutputs = Names.size();
+
+ // Parse Inputs, if present.
+ if (AteExtraColon ||
+ Tok.is(tok::colon) || Tok.is(tok::coloncolon)) {
+ // In C++ mode, parse "::" like ": :".
+ if (AteExtraColon)
+ AteExtraColon = false;
+ else {
+ AteExtraColon = Tok.is(tok::coloncolon);
+ ConsumeToken();
+ }
+
+ if (!AteExtraColon &&
+ ParseAsmOperandsOpt(Names, Constraints, Exprs))
+ return StmtError();
+ }
+
+ assert(Names.size() == Constraints.size() &&
+ Constraints.size() == Exprs.size() &&
+ "Input operand size mismatch!");
+
+ unsigned NumInputs = Names.size() - NumOutputs;
+
+ // Parse the clobbers, if present.
+ if (AteExtraColon || Tok.is(tok::colon)) {
+ if (!AteExtraColon)
+ ConsumeToken();
+
+ // Parse the asm-string list for clobbers if present.
+ if (Tok.isNot(tok::r_paren)) {
+ while (1) {
+ ExprResult Clobber(ParseAsmStringLiteral());
+
+ if (Clobber.isInvalid())
+ break;
+
+ Clobbers.push_back(Clobber.release());
+
+ if (Tok.isNot(tok::comma)) break;
+ ConsumeToken();
+ }
+ }
+ }
+
+ T.consumeClose();
+ return Actions.ActOnAsmStmt(AsmLoc, false, isVolatile,
+ NumOutputs, NumInputs, Names.data(),
+ move_arg(Constraints), move_arg(Exprs),
+ AsmString.take(), move_arg(Clobbers),
+ T.getCloseLocation());
+}
+
+/// ParseAsmOperands - Parse the asm-operands production as used by
+/// asm-statement, assuming the leading ':' token was eaten.
+///
+/// [GNU] asm-operands:
+/// asm-operand
+/// asm-operands ',' asm-operand
+///
+/// [GNU] asm-operand:
+/// asm-string-literal '(' expression ')'
+/// '[' identifier ']' asm-string-literal '(' expression ')'
+///
+//
+// FIXME: Avoid unnecessary std::string trashing.
+bool Parser::ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
+ SmallVectorImpl<Expr *> &Constraints,
+ SmallVectorImpl<Expr *> &Exprs) {
+ // 'asm-operands' isn't present?
+ if (!isTokenStringLiteral() && Tok.isNot(tok::l_square))
+ return false;
+
+ while (1) {
+ // Read the [id] if present.
+ if (Tok.is(tok::l_square)) {
+ BalancedDelimiterTracker T(*this, tok::l_square);
+ T.consumeOpen();
+
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_expected_ident);
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ ConsumeToken();
+
+ Names.push_back(II);
+ T.consumeClose();
+ } else
+ Names.push_back(0);
+
+ ExprResult Constraint(ParseAsmStringLiteral());
+ if (Constraint.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+ Constraints.push_back(Constraint.release());
+
+ if (Tok.isNot(tok::l_paren)) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm operand";
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+
+ // Read the parenthesized expression.
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ T.consumeOpen();
+ ExprResult Res(ParseExpression());
+ T.consumeClose();
+ if (Res.isInvalid()) {
+ SkipUntil(tok::r_paren);
+ return true;
+ }
+ Exprs.push_back(Res.release());
+ // Eat the comma and continue parsing if it exists.
+ if (Tok.isNot(tok::comma)) return false;
+ ConsumeToken();
+ }
+}
+
+Decl *Parser::ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope) {
+ assert(Tok.is(tok::l_brace));
+ SourceLocation LBraceLoc = Tok.getLocation();
+
+ if (SkipFunctionBodies && trySkippingFunctionBody()) {
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(Decl, 0);
+ }
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, LBraceLoc,
+ "parsing function body");
+
+ // Do not enter a scope for the brace, as the arguments are in the same scope
+ // (the function body) as the body itself. Instead, just read the statement
+ // list and put it into a CompoundStmt for safe keeping.
+ StmtResult FnBody(ParseCompoundStatementBody());
+
+ // If the function body could not be parsed, make a bogus compoundstmt.
+ if (FnBody.isInvalid()) {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+ FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc,
+ MultiStmtArg(Actions), false);
+ }
+
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(Decl, FnBody.take());
+}
+
+/// ParseFunctionTryBlock - Parse a C++ function-try-block.
+///
+/// function-try-block:
+/// 'try' ctor-initializer[opt] compound-statement handler-seq
+///
+Decl *Parser::ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope) {
+ assert(Tok.is(tok::kw_try) && "Expected 'try'");
+ SourceLocation TryLoc = ConsumeToken();
+
+ PrettyDeclStackTraceEntry CrashInfo(Actions, Decl, TryLoc,
+ "parsing function try block");
+
+ // Constructor initializer list?
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(Decl);
+ else
+ Actions.ActOnDefaultCtorInitializers(Decl);
+
+ if (SkipFunctionBodies && trySkippingFunctionBody()) {
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(Decl, 0);
+ }
+
+ SourceLocation LBraceLoc = Tok.getLocation();
+ StmtResult FnBody(ParseCXXTryBlockCommon(TryLoc));
+ // If we failed to parse the try-catch, we just give the function an empty
+ // compound statement as the body.
+ if (FnBody.isInvalid()) {
+ Sema::CompoundScopeRAII CompoundScope(Actions);
+ FnBody = Actions.ActOnCompoundStmt(LBraceLoc, LBraceLoc,
+ MultiStmtArg(Actions), false);
+ }
+
+ BodyScope.Exit();
+ return Actions.ActOnFinishFunctionBody(Decl, FnBody.take());
+}
+
+bool Parser::trySkippingFunctionBody() {
+ assert(Tok.is(tok::l_brace));
+ assert(SkipFunctionBodies &&
+ "Should only be called when SkipFunctionBodies is enabled");
+
+ // We're in code-completion mode. Skip parsing for all function bodies unless
+ // the body contains the code-completion point.
+ TentativeParsingAction PA(*this);
+ ConsumeBrace();
+ if (SkipUntil(tok::r_brace, /*StopAtSemi=*/false, /*DontConsume=*/false,
+ /*StopAtCodeCompletion=*/PP.isCodeCompletionEnabled())) {
+ PA.Commit();
+ return true;
+ }
+
+ PA.Revert();
+ return false;
+}
+
+/// ParseCXXTryBlock - Parse a C++ try-block.
+///
+/// try-block:
+/// 'try' compound-statement handler-seq
+///
+StmtResult Parser::ParseCXXTryBlock(ParsedAttributes &attrs) {
+ // FIXME: Add attributes?
+
+ assert(Tok.is(tok::kw_try) && "Expected 'try'");
+
+ SourceLocation TryLoc = ConsumeToken();
+ return ParseCXXTryBlockCommon(TryLoc);
+}
+
+/// ParseCXXTryBlockCommon - Parse the common part of try-block and
+/// function-try-block.
+///
+/// try-block:
+/// 'try' compound-statement handler-seq
+///
+/// function-try-block:
+/// 'try' ctor-initializer[opt] compound-statement handler-seq
+///
+/// handler-seq:
+/// handler handler-seq[opt]
+///
+/// [Borland] try-block:
+/// 'try' compound-statement seh-except-block
+/// 'try' compound-statment seh-finally-block
+///
+StmtResult Parser::ParseCXXTryBlockCommon(SourceLocation TryLoc) {
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected_lbrace));
+ // FIXME: Possible draft standard bug: attribute-specifier should be allowed?
+ ParsedAttributesWithRange attrs(AttrFactory);
+ StmtResult TryBlock(ParseCompoundStatement(attrs, /*isStmtExpr=*/false,
+ Scope::DeclScope|Scope::TryScope));
+ if (TryBlock.isInvalid())
+ return move(TryBlock);
+
+ // Borland allows SEH-handlers with 'try'
+
+ if((Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo() == getSEHExceptKeyword()) ||
+ Tok.is(tok::kw___finally)) {
+ // TODO: Factor into common return ParseSEHHandlerCommon(...)
+ StmtResult Handler;
+ if(Tok.getIdentifierInfo() == getSEHExceptKeyword()) {
+ SourceLocation Loc = ConsumeToken();
+ Handler = ParseSEHExceptBlock(Loc);
+ }
+ else {
+ SourceLocation Loc = ConsumeToken();
+ Handler = ParseSEHFinallyBlock(Loc);
+ }
+ if(Handler.isInvalid())
+ return move(Handler);
+
+ return Actions.ActOnSEHTryBlock(true /* IsCXXTry */,
+ TryLoc,
+ TryBlock.take(),
+ Handler.take());
+ }
+ else {
+ StmtVector Handlers(Actions);
+ MaybeParseCXX0XAttributes(attrs);
+ ProhibitAttributes(attrs);
+
+ if (Tok.isNot(tok::kw_catch))
+ return StmtError(Diag(Tok, diag::err_expected_catch));
+ while (Tok.is(tok::kw_catch)) {
+ StmtResult Handler(ParseCXXCatchBlock());
+ if (!Handler.isInvalid())
+ Handlers.push_back(Handler.release());
+ }
+ // Don't bother creating the full statement if we don't have any usable
+ // handlers.
+ if (Handlers.empty())
+ return StmtError();
+
+ return Actions.ActOnCXXTryBlock(TryLoc, TryBlock.take(), move_arg(Handlers));
+ }
+}
+
+/// ParseCXXCatchBlock - Parse a C++ catch block, called handler in the standard
+///
+/// handler:
+/// 'catch' '(' exception-declaration ')' compound-statement
+///
+/// exception-declaration:
+/// type-specifier-seq declarator
+/// type-specifier-seq abstract-declarator
+/// type-specifier-seq
+/// '...'
+///
+StmtResult Parser::ParseCXXCatchBlock() {
+ assert(Tok.is(tok::kw_catch) && "Expected 'catch'");
+
+ SourceLocation CatchLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.expectAndConsume(diag::err_expected_lparen))
+ return StmtError();
+
+ // C++ 3.3.2p3:
+ // The name in a catch exception-declaration is local to the handler and
+ // shall not be redeclared in the outermost block of the handler.
+ ParseScope CatchScope(this, Scope::DeclScope | Scope::ControlScope);
+
+ // exception-declaration is equivalent to '...' or a parameter-declaration
+ // without default arguments.
+ Decl *ExceptionDecl = 0;
+ if (Tok.isNot(tok::ellipsis)) {
+ DeclSpec DS(AttrFactory);
+ if (ParseCXXTypeSpecifierSeq(DS))
+ return StmtError();
+ Declarator ExDecl(DS, Declarator::CXXCatchContext);
+ ParseDeclarator(ExDecl);
+ ExceptionDecl = Actions.ActOnExceptionDeclarator(getCurScope(), ExDecl);
+ } else
+ ConsumeToken();
+
+ T.consumeClose();
+ if (T.getCloseLocation().isInvalid())
+ return StmtError();
+
+ if (Tok.isNot(tok::l_brace))
+ return StmtError(Diag(Tok, diag::err_expected_lbrace));
+
+ // FIXME: Possible draft standard bug: attribute-specifier should be allowed?
+ ParsedAttributes attrs(AttrFactory);
+ StmtResult Block(ParseCompoundStatement(attrs));
+ if (Block.isInvalid())
+ return move(Block);
+
+ return Actions.ActOnCXXCatchBlock(CatchLoc, ExceptionDecl, Block.take());
+}
+
+void Parser::ParseMicrosoftIfExistsStatement(StmtVector &Stmts) {
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return;
+
+ // Handle dependent statements by parsing the braces as a compound statement.
+ // This is not the same behavior as Visual C++, which don't treat this as a
+ // compound statement, but for Clang's type checking we can't have anything
+ // inside these braces escaping to the surrounding code.
+ if (Result.Behavior == IEB_Dependent) {
+ if (!Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return;
+ }
+
+ ParsedAttributes Attrs(AttrFactory);
+ StmtResult Compound = ParseCompoundStatement(Attrs);
+ if (Compound.isInvalid())
+ return;
+
+ StmtResult DepResult = Actions.ActOnMSDependentExistsStmt(Result.KeywordLoc,
+ Result.IsIfExists,
+ Result.SS,
+ Result.Name,
+ Compound.get());
+ if (DepResult.isUsable())
+ Stmts.push_back(DepResult.get());
+ return;
+ }
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse the statements below.
+ break;
+
+ case IEB_Dependent:
+ llvm_unreachable("Dependent case handled above");
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return;
+ }
+
+ // Condition is true, parse the statements.
+ while (Tok.isNot(tok::r_brace)) {
+ StmtResult R = ParseStatementOrDeclaration(Stmts, false);
+ if (R.isUsable())
+ Stmts.push_back(R.release());
+ }
+ Braces.consumeClose();
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
new file mode 100644
index 0000000..61cd9f2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTemplate.cpp
@@ -0,0 +1,1292 @@
+//===--- ParseTemplate.cpp - Template Parsing -----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements parsing of C++ templates.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/Scope.h"
+#include "RAIIObjectsForParser.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTConsumer.h"
+using namespace clang;
+
+/// \brief Parse a template declaration, explicit instantiation, or
+/// explicit specialization.
+Decl *
+Parser::ParseDeclarationStartingWithTemplate(unsigned Context,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ AttributeList *AccessAttrs) {
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ if (Tok.is(tok::kw_template) && NextToken().isNot(tok::less)) {
+ return ParseExplicitInstantiation(Context,
+ SourceLocation(), ConsumeToken(),
+ DeclEnd, AS);
+ }
+ return ParseTemplateDeclarationOrSpecialization(Context, DeclEnd, AS,
+ AccessAttrs);
+}
+
+/// \brief RAII class that manages the template parameter depth.
+namespace {
+ class TemplateParameterDepthCounter {
+ unsigned &Depth;
+ unsigned AddedLevels;
+
+ public:
+ explicit TemplateParameterDepthCounter(unsigned &Depth)
+ : Depth(Depth), AddedLevels(0) { }
+
+ ~TemplateParameterDepthCounter() {
+ Depth -= AddedLevels;
+ }
+
+ void operator++() {
+ ++Depth;
+ ++AddedLevels;
+ }
+
+ operator unsigned() const { return Depth; }
+ };
+}
+
+/// \brief Parse a template declaration or an explicit specialization.
+///
+/// Template declarations include one or more template parameter lists
+/// and either the function or class template declaration. Explicit
+/// specializations contain one or more 'template < >' prefixes
+/// followed by a (possibly templated) declaration. Since the
+/// syntactic form of both features is nearly identical, we parse all
+/// of the template headers together and let semantic analysis sort
+/// the declarations from the explicit specializations.
+///
+/// template-declaration: [C++ temp]
+/// 'export'[opt] 'template' '<' template-parameter-list '>' declaration
+///
+/// explicit-specialization: [ C++ temp.expl.spec]
+/// 'template' '<' '>' declaration
+Decl *
+Parser::ParseTemplateDeclarationOrSpecialization(unsigned Context,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ AttributeList *AccessAttrs) {
+ assert((Tok.is(tok::kw_export) || Tok.is(tok::kw_template)) &&
+ "Token does not start a template declaration.");
+
+ // Enter template-parameter scope.
+ ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
+
+ // Tell the action that names should be checked in the context of
+ // the declaration to come.
+ ParsingDeclRAIIObject ParsingTemplateParams(*this);
+
+ // Parse multiple levels of template headers within this template
+ // parameter scope, e.g.,
+ //
+ // template<typename T>
+ // template<typename U>
+ // class A<T>::B { ... };
+ //
+ // We parse multiple levels non-recursively so that we can build a
+ // single data structure containing all of the template parameter
+ // lists to easily differentiate between the case above and:
+ //
+ // template<typename T>
+ // class A {
+ // template<typename U> class B;
+ // };
+ //
+ // In the first case, the action for declaring A<T>::B receives
+ // both template parameter lists. In the second case, the action for
+ // defining A<T>::B receives just the inner template parameter list
+ // (and retrieves the outer template parameter list from its
+ // context).
+ bool isSpecialization = true;
+ bool LastParamListWasEmpty = false;
+ TemplateParameterLists ParamLists;
+ TemplateParameterDepthCounter Depth(TemplateParameterDepth);
+ do {
+ // Consume the 'export', if any.
+ SourceLocation ExportLoc;
+ if (Tok.is(tok::kw_export)) {
+ ExportLoc = ConsumeToken();
+ }
+
+ // Consume the 'template', which should be here.
+ SourceLocation TemplateLoc;
+ if (Tok.is(tok::kw_template)) {
+ TemplateLoc = ConsumeToken();
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected_template);
+ return 0;
+ }
+
+ // Parse the '<' template-parameter-list '>'
+ SourceLocation LAngleLoc, RAngleLoc;
+ SmallVector<Decl*, 4> TemplateParams;
+ if (ParseTemplateParameters(Depth, TemplateParams, LAngleLoc,
+ RAngleLoc)) {
+ // Skip until the semi-colon or a }.
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return 0;
+ }
+
+ ParamLists.push_back(
+ Actions.ActOnTemplateParameterList(Depth, ExportLoc,
+ TemplateLoc, LAngleLoc,
+ TemplateParams.data(),
+ TemplateParams.size(), RAngleLoc));
+
+ if (!TemplateParams.empty()) {
+ isSpecialization = false;
+ ++Depth;
+ } else {
+ LastParamListWasEmpty = true;
+ }
+ } while (Tok.is(tok::kw_export) || Tok.is(tok::kw_template));
+
+ // Parse the actual template declaration.
+ return ParseSingleDeclarationAfterTemplate(Context,
+ ParsedTemplateInfo(&ParamLists,
+ isSpecialization,
+ LastParamListWasEmpty),
+ ParsingTemplateParams,
+ DeclEnd, AS, AccessAttrs);
+}
+
+/// \brief Parse a single declaration that declares a template,
+/// template specialization, or explicit instantiation of a template.
+///
+/// \param TemplateParams if non-NULL, the template parameter lists
+/// that preceded this declaration. In this case, the declaration is a
+/// template declaration, out-of-line definition of a template, or an
+/// explicit template specialization. When NULL, the declaration is an
+/// explicit template instantiation.
+///
+/// \param TemplateLoc when TemplateParams is NULL, the location of
+/// the 'template' keyword that indicates that we have an explicit
+/// template instantiation.
+///
+/// \param DeclEnd will receive the source location of the last token
+/// within this declaration.
+///
+/// \param AS the access specifier associated with this
+/// declaration. Will be AS_none for namespace-scope declarations.
+///
+/// \returns the new declaration.
+Decl *
+Parser::ParseSingleDeclarationAfterTemplate(
+ unsigned Context,
+ const ParsedTemplateInfo &TemplateInfo,
+ ParsingDeclRAIIObject &DiagsFromTParams,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS,
+ AttributeList *AccessAttrs) {
+ assert(TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
+ "Template information required");
+
+ if (Context == Declarator::MemberContext) {
+ // We are parsing a member template.
+ ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
+ &DiagsFromTParams);
+ return 0;
+ }
+
+ ParsedAttributesWithRange prefixAttrs(AttrFactory);
+ MaybeParseCXX0XAttributes(prefixAttrs);
+
+ if (Tok.is(tok::kw_using))
+ return ParseUsingDirectiveOrDeclaration(Context, TemplateInfo, DeclEnd,
+ prefixAttrs);
+
+ // Parse the declaration specifiers, stealing the accumulated
+ // diagnostics from the template parameters.
+ ParsingDeclSpec DS(*this, &DiagsFromTParams);
+
+ DS.takeAttributesFrom(prefixAttrs);
+
+ ParseDeclarationSpecifiers(DS, TemplateInfo, AS,
+ getDeclSpecContextFromDeclaratorContext(Context));
+
+ if (Tok.is(tok::semi)) {
+ DeclEnd = ConsumeToken();
+ Decl *Decl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS);
+ DS.complete(Decl);
+ return Decl;
+ }
+
+ // Parse the declarator.
+ ParsingDeclarator DeclaratorInfo(*this, DS, (Declarator::TheContext)Context);
+ ParseDeclarator(DeclaratorInfo);
+ // Error parsing the declarator?
+ if (!DeclaratorInfo.hasName()) {
+ // If so, skip until the semi-colon or a }.
+ SkipUntil(tok::r_brace, true, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ return 0;
+ }
+
+ LateParsedAttrList LateParsedAttrs;
+ if (DeclaratorInfo.isFunctionDeclarator())
+ MaybeParseGNUAttributes(DeclaratorInfo, &LateParsedAttrs);
+
+ // If we have a declaration or declarator list, handle it.
+ if (isDeclarationAfterDeclarator()) {
+ // Parse this declaration.
+ Decl *ThisDecl = ParseDeclarationAfterDeclarator(DeclaratorInfo,
+ TemplateInfo);
+
+ if (Tok.is(tok::comma)) {
+ Diag(Tok, diag::err_multiple_template_declarators)
+ << (int)TemplateInfo.Kind;
+ SkipUntil(tok::semi, true, false);
+ return ThisDecl;
+ }
+
+ // Eat the semi colon after the declaration.
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_declaration);
+ if (LateParsedAttrs.size() > 0)
+ ParseLexedAttributeList(LateParsedAttrs, ThisDecl, true, false);
+ DeclaratorInfo.complete(ThisDecl);
+ return ThisDecl;
+ }
+
+ if (DeclaratorInfo.isFunctionDeclarator() &&
+ isStartOfFunctionDefinition(DeclaratorInfo)) {
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ // Recover by ignoring the 'typedef'. This was probably supposed to be
+ // the 'typename' keyword, which we should have already suggested adding
+ // if it's appropriate.
+ Diag(DS.getStorageClassSpecLoc(), diag::err_function_declared_typedef)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+ DS.ClearStorageClassSpecs();
+ }
+ return ParseFunctionDefinition(DeclaratorInfo, TemplateInfo,
+ &LateParsedAttrs);
+ }
+
+ if (DeclaratorInfo.isFunctionDeclarator())
+ Diag(Tok, diag::err_expected_fn_body);
+ else
+ Diag(Tok, diag::err_invalid_token_after_toplevel_declarator);
+ SkipUntil(tok::semi);
+ return 0;
+}
+
+/// ParseTemplateParameters - Parses a template-parameter-list enclosed in
+/// angle brackets. Depth is the depth of this template-parameter-list, which
+/// is the number of template headers directly enclosing this template header.
+/// TemplateParams is the current list of template parameters we're building.
+/// The template parameter we parse will be added to this list. LAngleLoc and
+/// RAngleLoc will receive the positions of the '<' and '>', respectively,
+/// that enclose this template parameter list.
+///
+/// \returns true if an error occurred, false otherwise.
+bool Parser::ParseTemplateParameters(unsigned Depth,
+ SmallVectorImpl<Decl*> &TemplateParams,
+ SourceLocation &LAngleLoc,
+ SourceLocation &RAngleLoc) {
+ // Get the template parameter list.
+ if (!Tok.is(tok::less)) {
+ Diag(Tok.getLocation(), diag::err_expected_less_after) << "template";
+ return true;
+ }
+ LAngleLoc = ConsumeToken();
+
+ // Try to parse the template parameter list.
+ bool Failed = false;
+ if (!Tok.is(tok::greater) && !Tok.is(tok::greatergreater))
+ Failed = ParseTemplateParameterList(Depth, TemplateParams);
+
+ if (Tok.is(tok::greatergreater)) {
+ Tok.setKind(tok::greater);
+ RAngleLoc = Tok.getLocation();
+ Tok.setLocation(Tok.getLocation().getLocWithOffset(1));
+ } else if (Tok.is(tok::greater))
+ RAngleLoc = ConsumeToken();
+ else if (Failed) {
+ Diag(Tok.getLocation(), diag::err_expected_greater);
+ return true;
+ }
+ return false;
+}
+
+/// ParseTemplateParameterList - Parse a template parameter list. If
+/// the parsing fails badly (i.e., closing bracket was left out), this
+/// will try to put the token stream in a reasonable position (closing
+/// a statement, etc.) and return false.
+///
+/// template-parameter-list: [C++ temp]
+/// template-parameter
+/// template-parameter-list ',' template-parameter
+bool
+Parser::ParseTemplateParameterList(unsigned Depth,
+ SmallVectorImpl<Decl*> &TemplateParams) {
+ while (1) {
+ if (Decl *TmpParam
+ = ParseTemplateParameter(Depth, TemplateParams.size())) {
+ TemplateParams.push_back(TmpParam);
+ } else {
+ // If we failed to parse a template parameter, skip until we find
+ // a comma or closing brace.
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater, true, true);
+ }
+
+ // Did we find a comma or the end of the template parmeter list?
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ } else if (Tok.is(tok::greater) || Tok.is(tok::greatergreater)) {
+ // Don't consume this... that's done by template parser.
+ break;
+ } else {
+ // Somebody probably forgot to close the template. Skip ahead and
+ // try to get out of the expression. This error is currently
+ // subsumed by whatever goes on in ParseTemplateParameter.
+ Diag(Tok.getLocation(), diag::err_expected_comma_greater);
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater, true, true);
+ return false;
+ }
+ }
+ return true;
+}
+
+/// \brief Determine whether the parser is at the start of a template
+/// type parameter.
+bool Parser::isStartOfTemplateTypeParameter() {
+ if (Tok.is(tok::kw_class)) {
+ // "class" may be the start of an elaborated-type-specifier or a
+ // type-parameter. Per C++ [temp.param]p3, we prefer the type-parameter.
+ switch (NextToken().getKind()) {
+ case tok::equal:
+ case tok::comma:
+ case tok::greater:
+ case tok::greatergreater:
+ case tok::ellipsis:
+ return true;
+
+ case tok::identifier:
+ // This may be either a type-parameter or an elaborated-type-specifier.
+ // We have to look further.
+ break;
+
+ default:
+ return false;
+ }
+
+ switch (GetLookAheadToken(2).getKind()) {
+ case tok::equal:
+ case tok::comma:
+ case tok::greater:
+ case tok::greatergreater:
+ return true;
+
+ default:
+ return false;
+ }
+ }
+
+ if (Tok.isNot(tok::kw_typename))
+ return false;
+
+ // C++ [temp.param]p2:
+ // There is no semantic difference between class and typename in a
+ // template-parameter. typename followed by an unqualified-id
+ // names a template type parameter. typename followed by a
+ // qualified-id denotes the type in a non-type
+ // parameter-declaration.
+ Token Next = NextToken();
+
+ // If we have an identifier, skip over it.
+ if (Next.getKind() == tok::identifier)
+ Next = GetLookAheadToken(2);
+
+ switch (Next.getKind()) {
+ case tok::equal:
+ case tok::comma:
+ case tok::greater:
+ case tok::greatergreater:
+ case tok::ellipsis:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+/// ParseTemplateParameter - Parse a template-parameter (C++ [temp.param]).
+///
+/// template-parameter: [C++ temp.param]
+/// type-parameter
+/// parameter-declaration
+///
+/// type-parameter: (see below)
+/// 'class' ...[opt] identifier[opt]
+/// 'class' identifier[opt] '=' type-id
+/// 'typename' ...[opt] identifier[opt]
+/// 'typename' identifier[opt] '=' type-id
+/// 'template' '<' template-parameter-list '>'
+/// 'class' ...[opt] identifier[opt]
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
+/// = id-expression
+Decl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
+ if (isStartOfTemplateTypeParameter())
+ return ParseTypeParameter(Depth, Position);
+
+ if (Tok.is(tok::kw_template))
+ return ParseTemplateTemplateParameter(Depth, Position);
+
+ // If it's none of the above, then it must be a parameter declaration.
+ // NOTE: This will pick up errors in the closure of the template parameter
+ // list (e.g., template < ; Check here to implement >> style closures.
+ return ParseNonTypeTemplateParameter(Depth, Position);
+}
+
+/// ParseTypeParameter - Parse a template type parameter (C++ [temp.param]).
+/// Other kinds of template parameters are parsed in
+/// ParseTemplateTemplateParameter and ParseNonTypeTemplateParameter.
+///
+/// type-parameter: [C++ temp.param]
+/// 'class' ...[opt][C++0x] identifier[opt]
+/// 'class' identifier[opt] '=' type-id
+/// 'typename' ...[opt][C++0x] identifier[opt]
+/// 'typename' identifier[opt] '=' type-id
+Decl *Parser::ParseTypeParameter(unsigned Depth, unsigned Position) {
+ assert((Tok.is(tok::kw_class) || Tok.is(tok::kw_typename)) &&
+ "A type-parameter starts with 'class' or 'typename'");
+
+ // Consume the 'class' or 'typename' keyword.
+ bool TypenameKeyword = Tok.is(tok::kw_typename);
+ SourceLocation KeyLoc = ConsumeToken();
+
+ // Grab the ellipsis (if given).
+ bool Ellipsis = false;
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis)) {
+ Ellipsis = true;
+ EllipsisLoc = ConsumeToken();
+
+ Diag(EllipsisLoc,
+ getLangOpts().CPlusPlus0x
+ ? diag::warn_cxx98_compat_variadic_templates
+ : diag::ext_variadic_templates);
+ }
+
+ // Grab the template parameter name (if given)
+ SourceLocation NameLoc;
+ IdentifierInfo* ParamName = 0;
+ if (Tok.is(tok::identifier)) {
+ ParamName = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ } else if (Tok.is(tok::equal) || Tok.is(tok::comma) ||
+ Tok.is(tok::greater) || Tok.is(tok::greatergreater)) {
+ // Unnamed template parameter. Don't have to do anything here, just
+ // don't consume this token.
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected_ident);
+ return 0;
+ }
+
+ // Grab a default argument (if available).
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the type parameter into the local scope.
+ SourceLocation EqualLoc;
+ ParsedType DefaultArg;
+ if (Tok.is(tok::equal)) {
+ EqualLoc = ConsumeToken();
+ DefaultArg = ParseTypeName(/*Range=*/0,
+ Declarator::TemplateTypeArgContext).get();
+ }
+
+ return Actions.ActOnTypeParameter(getCurScope(), TypenameKeyword, Ellipsis,
+ EllipsisLoc, KeyLoc, ParamName, NameLoc,
+ Depth, Position, EqualLoc, DefaultArg);
+}
+
+/// ParseTemplateTemplateParameter - Handle the parsing of template
+/// template parameters.
+///
+/// type-parameter: [C++ temp.param]
+/// 'template' '<' template-parameter-list '>' 'class'
+/// ...[opt] identifier[opt]
+/// 'template' '<' template-parameter-list '>' 'class' identifier[opt]
+/// = id-expression
+Decl *
+Parser::ParseTemplateTemplateParameter(unsigned Depth, unsigned Position) {
+ assert(Tok.is(tok::kw_template) && "Expected 'template' keyword");
+
+ // Handle the template <...> part.
+ SourceLocation TemplateLoc = ConsumeToken();
+ SmallVector<Decl*,8> TemplateParams;
+ SourceLocation LAngleLoc, RAngleLoc;
+ {
+ ParseScope TemplateParmScope(this, Scope::TemplateParamScope);
+ if (ParseTemplateParameters(Depth + 1, TemplateParams, LAngleLoc,
+ RAngleLoc)) {
+ return 0;
+ }
+ }
+
+ // Generate a meaningful error if the user forgot to put class before the
+ // identifier, comma, or greater. Provide a fixit if the identifier, comma,
+ // or greater appear immediately or after 'typename' or 'struct'. In the
+ // latter case, replace the keyword with 'class'.
+ if (!Tok.is(tok::kw_class)) {
+ bool Replace = Tok.is(tok::kw_typename) || Tok.is(tok::kw_struct);
+ const Token& Next = Replace ? NextToken() : Tok;
+ if (Next.is(tok::identifier) || Next.is(tok::comma) ||
+ Next.is(tok::greater) || Next.is(tok::greatergreater) ||
+ Next.is(tok::ellipsis))
+ Diag(Tok.getLocation(), diag::err_class_on_template_template_param)
+ << (Replace ? FixItHint::CreateReplacement(Tok.getLocation(), "class")
+ : FixItHint::CreateInsertion(Tok.getLocation(), "class "));
+ else
+ Diag(Tok.getLocation(), diag::err_class_on_template_template_param);
+
+ if (Replace)
+ ConsumeToken();
+ } else
+ ConsumeToken();
+
+ // Parse the ellipsis, if given.
+ SourceLocation EllipsisLoc;
+ if (Tok.is(tok::ellipsis)) {
+ EllipsisLoc = ConsumeToken();
+
+ Diag(EllipsisLoc,
+ getLangOpts().CPlusPlus0x
+ ? diag::warn_cxx98_compat_variadic_templates
+ : diag::ext_variadic_templates);
+ }
+
+ // Get the identifier, if given.
+ SourceLocation NameLoc;
+ IdentifierInfo* ParamName = 0;
+ if (Tok.is(tok::identifier)) {
+ ParamName = Tok.getIdentifierInfo();
+ NameLoc = ConsumeToken();
+ } else if (Tok.is(tok::equal) || Tok.is(tok::comma) ||
+ Tok.is(tok::greater) || Tok.is(tok::greatergreater)) {
+ // Unnamed template parameter. Don't have to do anything here, just
+ // don't consume this token.
+ } else {
+ Diag(Tok.getLocation(), diag::err_expected_ident);
+ return 0;
+ }
+
+ TemplateParameterList *ParamList =
+ Actions.ActOnTemplateParameterList(Depth, SourceLocation(),
+ TemplateLoc, LAngleLoc,
+ TemplateParams.data(),
+ TemplateParams.size(),
+ RAngleLoc);
+
+ // Grab a default argument (if available).
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the template parameter into the local scope.
+ SourceLocation EqualLoc;
+ ParsedTemplateArgument DefaultArg;
+ if (Tok.is(tok::equal)) {
+ EqualLoc = ConsumeToken();
+ DefaultArg = ParseTemplateTemplateArgument();
+ if (DefaultArg.isInvalid()) {
+ Diag(Tok.getLocation(),
+ diag::err_default_template_template_parameter_not_template);
+ SkipUntil(tok::comma, tok::greater, tok::greatergreater, true, true);
+ }
+ }
+
+ return Actions.ActOnTemplateTemplateParameter(getCurScope(), TemplateLoc,
+ ParamList, EllipsisLoc,
+ ParamName, NameLoc, Depth,
+ Position, EqualLoc, DefaultArg);
+}
+
+/// ParseNonTypeTemplateParameter - Handle the parsing of non-type
+/// template parameters (e.g., in "template<int Size> class array;").
+///
+/// template-parameter:
+/// ...
+/// parameter-declaration
+Decl *
+Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
+ // Parse the declaration-specifiers (i.e., the type).
+ // FIXME: The type should probably be restricted in some way... Not all
+ // declarators (parts of declarators?) are accepted for parameters.
+ DeclSpec DS(AttrFactory);
+ ParseDeclarationSpecifiers(DS);
+
+ // Parse this as a typename.
+ Declarator ParamDecl(DS, Declarator::TemplateParamContext);
+ ParseDeclarator(ParamDecl);
+ if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
+ Diag(Tok.getLocation(), diag::err_expected_template_parameter);
+ return 0;
+ }
+
+ // If there is a default value, parse it.
+ // Per C++0x [basic.scope.pdecl]p9, we parse the default argument before
+ // we introduce the template parameter into the local scope.
+ SourceLocation EqualLoc;
+ ExprResult DefaultArg;
+ if (Tok.is(tok::equal)) {
+ EqualLoc = ConsumeToken();
+
+ // C++ [temp.param]p15:
+ // When parsing a default template-argument for a non-type
+ // template-parameter, the first non-nested > is taken as the
+ // end of the template-parameter-list rather than a greater-than
+ // operator.
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+
+ DefaultArg = ParseAssignmentExpression();
+ if (DefaultArg.isInvalid())
+ SkipUntil(tok::comma, tok::greater, true, true);
+ }
+
+ // Create the parameter.
+ return Actions.ActOnNonTypeTemplateParameter(getCurScope(), ParamDecl,
+ Depth, Position, EqualLoc,
+ DefaultArg.take());
+}
+
+/// \brief Parses a template-id that after the template name has
+/// already been parsed.
+///
+/// This routine takes care of parsing the enclosed template argument
+/// list ('<' template-parameter-list [opt] '>') and placing the
+/// results into a form that can be transferred to semantic analysis.
+///
+/// \param Template the template declaration produced by isTemplateName
+///
+/// \param TemplateNameLoc the source location of the template name
+///
+/// \param SS if non-NULL, the nested-name-specifier preceding the
+/// template name.
+///
+/// \param ConsumeLastToken if true, then we will consume the last
+/// token that forms the template-id. Otherwise, we will leave the
+/// last token in the stream (e.g., so that it can be replaced with an
+/// annotation token).
+bool
+Parser::ParseTemplateIdAfterTemplateName(TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ const CXXScopeSpec &SS,
+ bool ConsumeLastToken,
+ SourceLocation &LAngleLoc,
+ TemplateArgList &TemplateArgs,
+ SourceLocation &RAngleLoc) {
+ assert(Tok.is(tok::less) && "Must have already parsed the template-name");
+
+ // Consume the '<'.
+ LAngleLoc = ConsumeToken();
+
+ // Parse the optional template-argument-list.
+ bool Invalid = false;
+ {
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+ if (Tok.isNot(tok::greater) && Tok.isNot(tok::greatergreater))
+ Invalid = ParseTemplateArgumentList(TemplateArgs);
+
+ if (Invalid) {
+ // Try to find the closing '>'.
+ SkipUntil(tok::greater, true, !ConsumeLastToken);
+
+ return true;
+ }
+ }
+
+ if (Tok.isNot(tok::greater) && Tok.isNot(tok::greatergreater)) {
+ Diag(Tok.getLocation(), diag::err_expected_greater);
+ return true;
+ }
+
+ // Determine the location of the '>' or '>>'. Only consume this
+ // token if the caller asked us to.
+ RAngleLoc = Tok.getLocation();
+
+ if (Tok.is(tok::greatergreater)) {
+ const char *ReplaceStr = "> >";
+ if (NextToken().is(tok::greater) || NextToken().is(tok::greatergreater))
+ ReplaceStr = "> > ";
+
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_two_right_angle_brackets :
+ diag::err_two_right_angle_brackets_need_space)
+ << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()),
+ ReplaceStr);
+
+ Tok.setKind(tok::greater);
+ if (!ConsumeLastToken) {
+ // Since we're not supposed to consume the '>>' token, we need
+ // to insert a second '>' token after the first.
+ PP.EnterToken(Tok);
+ }
+ } else if (ConsumeLastToken)
+ ConsumeToken();
+
+ return false;
+}
+
+/// \brief Replace the tokens that form a simple-template-id with an
+/// annotation token containing the complete template-id.
+///
+/// The first token in the stream must be the name of a template that
+/// is followed by a '<'. This routine will parse the complete
+/// simple-template-id and replace the tokens with a single annotation
+/// token with one of two different kinds: if the template-id names a
+/// type (and \p AllowTypeAnnotation is true), the annotation token is
+/// a type annotation that includes the optional nested-name-specifier
+/// (\p SS). Otherwise, the annotation token is a template-id
+/// annotation that does not include the optional
+/// nested-name-specifier.
+///
+/// \param Template the declaration of the template named by the first
+/// token (an identifier), as returned from \c Action::isTemplateName().
+///
+/// \param TemplateNameKind the kind of template that \p Template
+/// refers to, as returned from \c Action::isTemplateName().
+///
+/// \param SS if non-NULL, the nested-name-specifier that precedes
+/// this template name.
+///
+/// \param TemplateKWLoc if valid, specifies that this template-id
+/// annotation was preceded by the 'template' keyword and gives the
+/// location of that keyword. If invalid (the default), then this
+/// template-id was not preceded by a 'template' keyword.
+///
+/// \param AllowTypeAnnotation if true (the default), then a
+/// simple-template-id that refers to a class template, template
+/// template parameter, or other template that produces a type will be
+/// replaced with a type annotation token. Otherwise, the
+/// simple-template-id is always replaced with a template-id
+/// annotation token.
+///
+/// If an unrecoverable parse error occurs and no annotation token can be
+/// formed, this function returns true.
+///
+bool Parser::AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &TemplateName,
+ bool AllowTypeAnnotation) {
+ assert(getLangOpts().CPlusPlus && "Can only annotate template-ids in C++");
+ assert(Template && Tok.is(tok::less) &&
+ "Parser isn't at the beginning of a template-id");
+
+ // Consume the template-name.
+ SourceLocation TemplateNameLoc = TemplateName.getSourceRange().getBegin();
+
+ // Parse the enclosed template argument list.
+ SourceLocation LAngleLoc, RAngleLoc;
+ TemplateArgList TemplateArgs;
+ bool Invalid = ParseTemplateIdAfterTemplateName(Template,
+ TemplateNameLoc,
+ SS, false, LAngleLoc,
+ TemplateArgs,
+ RAngleLoc);
+
+ if (Invalid) {
+ // If we failed to parse the template ID but skipped ahead to a >, we're not
+ // going to be able to form a token annotation. Eat the '>' if present.
+ if (Tok.is(tok::greater))
+ ConsumeToken();
+ return true;
+ }
+
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions, TemplateArgs.data(),
+ TemplateArgs.size());
+
+ // Build the annotation token.
+ if (TNK == TNK_Type_template && AllowTypeAnnotation) {
+ TypeResult Type
+ = Actions.ActOnTemplateIdType(SS, TemplateKWLoc,
+ Template, TemplateNameLoc,
+ LAngleLoc, TemplateArgsPtr, RAngleLoc);
+ if (Type.isInvalid()) {
+ // If we failed to parse the template ID but skipped ahead to a >, we're not
+ // going to be able to form a token annotation. Eat the '>' if present.
+ if (Tok.is(tok::greater))
+ ConsumeToken();
+ return true;
+ }
+
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Type.get());
+ if (SS.isNotEmpty())
+ Tok.setLocation(SS.getBeginLoc());
+ else if (TemplateKWLoc.isValid())
+ Tok.setLocation(TemplateKWLoc);
+ else
+ Tok.setLocation(TemplateNameLoc);
+ } else {
+ // Build a template-id annotation token that can be processed
+ // later.
+ Tok.setKind(tok::annot_template_id);
+ TemplateIdAnnotation *TemplateId
+ = TemplateIdAnnotation::Allocate(TemplateArgs.size());
+ TemplateId->TemplateNameLoc = TemplateNameLoc;
+ if (TemplateName.getKind() == UnqualifiedId::IK_Identifier) {
+ TemplateId->Name = TemplateName.Identifier;
+ TemplateId->Operator = OO_None;
+ } else {
+ TemplateId->Name = 0;
+ TemplateId->Operator = TemplateName.OperatorFunctionId.Operator;
+ }
+ TemplateId->SS = SS;
+ TemplateId->TemplateKWLoc = TemplateKWLoc;
+ TemplateId->Template = Template;
+ TemplateId->Kind = TNK;
+ TemplateId->LAngleLoc = LAngleLoc;
+ TemplateId->RAngleLoc = RAngleLoc;
+ ParsedTemplateArgument *Args = TemplateId->getTemplateArgs();
+ for (unsigned Arg = 0, ArgEnd = TemplateArgs.size(); Arg != ArgEnd; ++Arg)
+ Args[Arg] = ParsedTemplateArgument(TemplateArgs[Arg]);
+ Tok.setAnnotationValue(TemplateId);
+ if (TemplateKWLoc.isValid())
+ Tok.setLocation(TemplateKWLoc);
+ else
+ Tok.setLocation(TemplateNameLoc);
+
+ TemplateArgsPtr.release();
+ }
+
+ // Common fields for the annotation token
+ Tok.setAnnotationEndLoc(RAngleLoc);
+
+ // In case the tokens were cached, have Preprocessor replace them with the
+ // annotation token.
+ PP.AnnotateCachedTokens(Tok);
+ return false;
+}
+
+/// \brief Replaces a template-id annotation token with a type
+/// annotation token.
+///
+/// If there was a failure when forming the type from the template-id,
+/// a type annotation token will still be created, but will have a
+/// NULL type pointer to signify an error.
+void Parser::AnnotateTemplateIdTokenAsType() {
+ assert(Tok.is(tok::annot_template_id) && "Requires template-id tokens");
+
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ assert((TemplateId->Kind == TNK_Type_template ||
+ TemplateId->Kind == TNK_Dependent_template_name) &&
+ "Only works for type and dependent templates");
+
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+
+ TypeResult Type
+ = Actions.ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ // Create the new "type" annotation token.
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Type.isInvalid() ? ParsedType() : Type.get());
+ if (TemplateId->SS.isNotEmpty()) // it was a C++ qualified type name.
+ Tok.setLocation(TemplateId->SS.getBeginLoc());
+ // End location stays the same
+
+ // Replace the template-id annotation token, and possible the scope-specifier
+ // that precedes it, with the typename annotation token.
+ PP.AnnotateCachedTokens(Tok);
+}
+
+/// \brief Determine whether the given token can end a template argument.
+static bool isEndOfTemplateArgument(Token Tok) {
+ return Tok.is(tok::comma) || Tok.is(tok::greater) ||
+ Tok.is(tok::greatergreater);
+}
+
+/// \brief Parse a C++ template template argument.
+ParsedTemplateArgument Parser::ParseTemplateTemplateArgument() {
+ if (!Tok.is(tok::identifier) && !Tok.is(tok::coloncolon) &&
+ !Tok.is(tok::annot_cxxscope))
+ return ParsedTemplateArgument();
+
+ // C++0x [temp.arg.template]p1:
+ // A template-argument for a template template-parameter shall be the name
+ // of a class template or an alias template, expressed as id-expression.
+ //
+ // We parse an id-expression that refers to a class template or alias
+ // template. The grammar we parse is:
+ //
+ // nested-name-specifier[opt] template[opt] identifier ...[opt]
+ //
+ // followed by a token that terminates a template argument, such as ',',
+ // '>', or (in some cases) '>>'.
+ CXXScopeSpec SS; // nested-name-specifier, if present
+ ParseOptionalCXXScopeSpecifier(SS, ParsedType(),
+ /*EnteringContext=*/false);
+
+ ParsedTemplateArgument Result;
+ SourceLocation EllipsisLoc;
+ if (SS.isSet() && Tok.is(tok::kw_template)) {
+ // Parse the optional 'template' keyword following the
+ // nested-name-specifier.
+ SourceLocation TemplateKWLoc = ConsumeToken();
+
+ if (Tok.is(tok::identifier)) {
+ // We appear to have a dependent template name.
+ UnqualifiedId Name;
+ Name.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken(); // the identifier
+
+ // Parse the ellipsis.
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
+ // If the next token signals the end of a template argument,
+ // then we have a dependent template name that could be a template
+ // template argument.
+ TemplateTy Template;
+ if (isEndOfTemplateArgument(Tok) &&
+ Actions.ActOnDependentTemplateName(getCurScope(),
+ SS, TemplateKWLoc, Name,
+ /*ObjectType=*/ ParsedType(),
+ /*EnteringContext=*/false,
+ Template))
+ Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ }
+ } else if (Tok.is(tok::identifier)) {
+ // We may have a (non-dependent) template name.
+ TemplateTy Template;
+ UnqualifiedId Name;
+ Name.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken(); // the identifier
+
+ // Parse the ellipsis.
+ if (Tok.is(tok::ellipsis))
+ EllipsisLoc = ConsumeToken();
+
+ if (isEndOfTemplateArgument(Tok)) {
+ bool MemberOfUnknownSpecialization;
+ TemplateNameKind TNK = Actions.isTemplateName(getCurScope(), SS,
+ /*hasTemplateKeyword=*/false,
+ Name,
+ /*ObjectType=*/ ParsedType(),
+ /*EnteringContext=*/false,
+ Template,
+ MemberOfUnknownSpecialization);
+ if (TNK == TNK_Dependent_template_name || TNK == TNK_Type_template) {
+ // We have an id-expression that refers to a class template or
+ // (C++0x) alias template.
+ Result = ParsedTemplateArgument(SS, Template, Name.StartLocation);
+ }
+ }
+ }
+
+ // If this is a pack expansion, build it as such.
+ if (EllipsisLoc.isValid() && !Result.isInvalid())
+ Result = Actions.ActOnPackExpansion(Result, EllipsisLoc);
+
+ return Result;
+}
+
+/// ParseTemplateArgument - Parse a C++ template argument (C++ [temp.names]).
+///
+/// template-argument: [C++ 14.2]
+/// constant-expression
+/// type-id
+/// id-expression
+ParsedTemplateArgument Parser::ParseTemplateArgument() {
+ // C++ [temp.arg]p2:
+ // In a template-argument, an ambiguity between a type-id and an
+ // expression is resolved to a type-id, regardless of the form of
+ // the corresponding template-parameter.
+ //
+ // Therefore, we initially try to parse a type-id.
+ if (isCXXTypeId(TypeIdAsTemplateArgument)) {
+ SourceLocation Loc = Tok.getLocation();
+ TypeResult TypeArg = ParseTypeName(/*Range=*/0,
+ Declarator::TemplateTypeArgContext);
+ if (TypeArg.isInvalid())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(ParsedTemplateArgument::Type,
+ TypeArg.get().getAsOpaquePtr(),
+ Loc);
+ }
+
+ // Try to parse a template template argument.
+ {
+ TentativeParsingAction TPA(*this);
+
+ ParsedTemplateArgument TemplateTemplateArgument
+ = ParseTemplateTemplateArgument();
+ if (!TemplateTemplateArgument.isInvalid()) {
+ TPA.Commit();
+ return TemplateTemplateArgument;
+ }
+
+ // Revert this tentative parse to parse a non-type template argument.
+ TPA.Revert();
+ }
+
+ // Parse a non-type template argument.
+ SourceLocation Loc = Tok.getLocation();
+ ExprResult ExprArg = ParseConstantExpression(MaybeTypeCast);
+ if (ExprArg.isInvalid() || !ExprArg.get())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(ParsedTemplateArgument::NonType,
+ ExprArg.release(), Loc);
+}
+
+/// \brief Determine whether the current tokens can only be parsed as a
+/// template argument list (starting with the '<') and never as a '<'
+/// expression.
+bool Parser::IsTemplateArgumentList(unsigned Skip) {
+ struct AlwaysRevertAction : TentativeParsingAction {
+ AlwaysRevertAction(Parser &P) : TentativeParsingAction(P) { }
+ ~AlwaysRevertAction() { Revert(); }
+ } Tentative(*this);
+
+ while (Skip) {
+ ConsumeToken();
+ --Skip;
+ }
+
+ // '<'
+ if (!Tok.is(tok::less))
+ return false;
+ ConsumeToken();
+
+ // An empty template argument list.
+ if (Tok.is(tok::greater))
+ return true;
+
+ // See whether we have declaration specifiers, which indicate a type.
+ while (isCXXDeclarationSpecifier() == TPResult::True())
+ ConsumeToken();
+
+ // If we have a '>' or a ',' then this is a template argument list.
+ return Tok.is(tok::greater) || Tok.is(tok::comma);
+}
+
+/// ParseTemplateArgumentList - Parse a C++ template-argument-list
+/// (C++ [temp.names]). Returns true if there was an error.
+///
+/// template-argument-list: [C++ 14.2]
+/// template-argument
+/// template-argument-list ',' template-argument
+bool
+Parser::ParseTemplateArgumentList(TemplateArgList &TemplateArgs) {
+ while (true) {
+ ParsedTemplateArgument Arg = ParseTemplateArgument();
+ if (Tok.is(tok::ellipsis)) {
+ SourceLocation EllipsisLoc = ConsumeToken();
+ Arg = Actions.ActOnPackExpansion(Arg, EllipsisLoc);
+ }
+
+ if (Arg.isInvalid()) {
+ SkipUntil(tok::comma, tok::greater, true, true);
+ return true;
+ }
+
+ // Save this template argument.
+ TemplateArgs.push_back(Arg);
+
+ // If the next token is a comma, consume it and keep reading
+ // arguments.
+ if (Tok.isNot(tok::comma)) break;
+
+ // Consume the comma.
+ ConsumeToken();
+ }
+
+ return false;
+}
+
+/// \brief Parse a C++ explicit template instantiation
+/// (C++ [temp.explicit]).
+///
+/// explicit-instantiation:
+/// 'extern' [opt] 'template' declaration
+///
+/// Note that the 'extern' is a GNU extension and C++0x feature.
+Decl *Parser::ParseExplicitInstantiation(unsigned Context,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation &DeclEnd,
+ AccessSpecifier AS) {
+ // This isn't really required here.
+ ParsingDeclRAIIObject ParsingTemplateParams(*this);
+
+ return ParseSingleDeclarationAfterTemplate(Context,
+ ParsedTemplateInfo(ExternLoc,
+ TemplateLoc),
+ ParsingTemplateParams,
+ DeclEnd, AS);
+}
+
+SourceRange Parser::ParsedTemplateInfo::getSourceRange() const {
+ if (TemplateParams)
+ return getTemplateParamsRange(TemplateParams->data(),
+ TemplateParams->size());
+
+ SourceRange R(TemplateLoc);
+ if (ExternLoc.isValid())
+ R.setBegin(ExternLoc);
+ return R;
+}
+
+void Parser::LateTemplateParserCallback(void *P, const FunctionDecl *FD) {
+ ((Parser*)P)->LateTemplateParser(FD);
+}
+
+
+void Parser::LateTemplateParser(const FunctionDecl *FD) {
+ LateParsedTemplatedFunction *LPT = LateParsedTemplateMap[FD];
+ if (LPT) {
+ ParseLateTemplatedFuncDef(*LPT);
+ return;
+ }
+
+ llvm_unreachable("Late templated function without associated lexed tokens");
+}
+
+/// \brief Late parse a C++ function template in Microsoft mode.
+void Parser::ParseLateTemplatedFuncDef(LateParsedTemplatedFunction &LMT) {
+ if(!LMT.D)
+ return;
+
+ // Get the FunctionDecl.
+ FunctionDecl *FD = 0;
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(LMT.D))
+ FD = FunTmpl->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(LMT.D);
+
+ // To restore the context after late parsing.
+ Sema::ContextRAII GlobalSavedContext(Actions, Actions.CurContext);
+
+ SmallVector<ParseScope*, 4> TemplateParamScopeStack;
+ DeclaratorDecl* Declarator = dyn_cast<DeclaratorDecl>(FD);
+ if (Declarator && Declarator->getNumTemplateParameterLists() != 0) {
+ TemplateParamScopeStack.push_back(new ParseScope(this, Scope::TemplateParamScope));
+ Actions.ActOnReenterDeclaratorTemplateScope(getCurScope(), Declarator);
+ Actions.ActOnReenterTemplateScope(getCurScope(), LMT.D);
+ } else {
+ // Get the list of DeclContext to reenter.
+ SmallVector<DeclContext*, 4> DeclContextToReenter;
+ DeclContext *DD = FD->getLexicalParent();
+ while (DD && !DD->isTranslationUnit()) {
+ DeclContextToReenter.push_back(DD);
+ DD = DD->getLexicalParent();
+ }
+
+ // Reenter template scopes from outmost to innermost.
+ SmallVector<DeclContext*, 4>::reverse_iterator II =
+ DeclContextToReenter.rbegin();
+ for (; II != DeclContextToReenter.rend(); ++II) {
+ if (ClassTemplatePartialSpecializationDecl* MD =
+ dyn_cast_or_null<ClassTemplatePartialSpecializationDecl>(*II)) {
+ TemplateParamScopeStack.push_back(new ParseScope(this,
+ Scope::TemplateParamScope));
+ Actions.ActOnReenterTemplateScope(getCurScope(), MD);
+ } else if (CXXRecordDecl* MD = dyn_cast_or_null<CXXRecordDecl>(*II)) {
+ TemplateParamScopeStack.push_back(new ParseScope(this,
+ Scope::TemplateParamScope,
+ MD->getDescribedClassTemplate() != 0 ));
+ Actions.ActOnReenterTemplateScope(getCurScope(),
+ MD->getDescribedClassTemplate());
+ }
+ TemplateParamScopeStack.push_back(new ParseScope(this, Scope::DeclScope));
+ Actions.PushDeclContext(Actions.getCurScope(), *II);
+ }
+ TemplateParamScopeStack.push_back(new ParseScope(this,
+ Scope::TemplateParamScope));
+ Actions.ActOnReenterTemplateScope(getCurScope(), LMT.D);
+ }
+
+ assert(!LMT.Toks.empty() && "Empty body!");
+
+ // Append the current token at the end of the new token stream so that it
+ // doesn't get lost.
+ LMT.Toks.push_back(Tok);
+ PP.EnterTokenStream(LMT.Toks.data(), LMT.Toks.size(), true, false);
+
+ // Consume the previously pushed token.
+ ConsumeAnyToken();
+ assert((Tok.is(tok::l_brace) || Tok.is(tok::colon) || Tok.is(tok::kw_try))
+ && "Inline method not starting with '{', ':' or 'try'");
+
+ // Parse the method body. Function body parsing code is similar enough
+ // to be re-used for method bodies as well.
+ ParseScope FnScope(this, Scope::FnScope|Scope::DeclScope);
+
+ // Recreate the containing function DeclContext.
+ Sema::ContextRAII FunctionSavedContext(Actions, Actions.getContainingDC(FD));
+
+ if (FunctionTemplateDecl *FunctionTemplate
+ = dyn_cast_or_null<FunctionTemplateDecl>(LMT.D))
+ Actions.ActOnStartOfFunctionDef(getCurScope(),
+ FunctionTemplate->getTemplatedDecl());
+ if (FunctionDecl *Function = dyn_cast_or_null<FunctionDecl>(LMT.D))
+ Actions.ActOnStartOfFunctionDef(getCurScope(), Function);
+
+
+ if (Tok.is(tok::kw_try)) {
+ ParseFunctionTryBlock(LMT.D, FnScope);
+ } else {
+ if (Tok.is(tok::colon))
+ ParseConstructorInitializer(LMT.D);
+ else
+ Actions.ActOnDefaultCtorInitializers(LMT.D);
+
+ if (Tok.is(tok::l_brace)) {
+ ParseFunctionStatementBody(LMT.D, FnScope);
+ Actions.MarkAsLateParsedTemplate(FD, false);
+ } else
+ Actions.ActOnFinishFunctionBody(LMT.D, 0);
+ }
+
+ // Exit scopes.
+ FnScope.Exit();
+ SmallVector<ParseScope*, 4>::reverse_iterator I =
+ TemplateParamScopeStack.rbegin();
+ for (; I != TemplateParamScopeStack.rend(); ++I)
+ delete *I;
+
+ DeclGroupPtrTy grp = Actions.ConvertDeclToDeclGroup(LMT.D);
+ if (grp)
+ Actions.getASTConsumer().HandleTopLevelDecl(grp.get());
+}
+
+/// \brief Lex a delayed template function for late parsing.
+void Parser::LexTemplateFunctionForLateParsing(CachedTokens &Toks) {
+ tok::TokenKind kind = Tok.getKind();
+ if (!ConsumeAndStoreFunctionPrologue(Toks)) {
+ // Consume everything up to (and including) the matching right brace.
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+
+ // If we're in a function-try-block, we need to store all the catch blocks.
+ if (kind == tok::kw_try) {
+ while (Tok.is(tok::kw_catch)) {
+ ConsumeAndStoreUntil(tok::l_brace, Toks, /*StopAtSemi=*/false);
+ ConsumeAndStoreUntil(tok::r_brace, Toks, /*StopAtSemi=*/false);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
new file mode 100644
index 0000000..28c5e8b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/ParseTentative.cpp
@@ -0,0 +1,1444 @@
+//===--- ParseTentative.cpp - Ambiguity Resolution Parsing ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the tentative parsing portions of the Parser
+// interfaces, for ambiguity resolution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/ParsedTemplate.h"
+using namespace clang;
+
+/// isCXXDeclarationStatement - C++-specialized function that disambiguates
+/// between a declaration or an expression statement, when parsing function
+/// bodies. Returns true for declaration, false for expression.
+///
+/// declaration-statement:
+/// block-declaration
+///
+/// block-declaration:
+/// simple-declaration
+/// asm-definition
+/// namespace-alias-definition
+/// using-declaration
+/// using-directive
+/// [C++0x] static_assert-declaration
+///
+/// asm-definition:
+/// 'asm' '(' string-literal ')' ';'
+///
+/// namespace-alias-definition:
+/// 'namespace' identifier = qualified-namespace-specifier ';'
+///
+/// using-declaration:
+/// 'using' typename[opt] '::'[opt] nested-name-specifier
+/// unqualified-id ';'
+/// 'using' '::' unqualified-id ;
+///
+/// using-directive:
+/// 'using' 'namespace' '::'[opt] nested-name-specifier[opt]
+/// namespace-name ';'
+///
+bool Parser::isCXXDeclarationStatement() {
+ switch (Tok.getKind()) {
+ // asm-definition
+ case tok::kw_asm:
+ // namespace-alias-definition
+ case tok::kw_namespace:
+ // using-declaration
+ // using-directive
+ case tok::kw_using:
+ // static_assert-declaration
+ case tok::kw_static_assert:
+ case tok::kw__Static_assert:
+ return true;
+ // simple-declaration
+ default:
+ return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/false);
+ }
+}
+
+/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
+/// between a simple-declaration or an expression-statement.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+/// Returns false if the statement is disambiguated as expression.
+///
+/// simple-declaration:
+/// decl-specifier-seq init-declarator-list[opt] ';'
+///
+/// (if AllowForRangeDecl specified)
+/// for ( for-range-declaration : for-range-initializer ) statement
+/// for-range-declaration:
+/// attribute-specifier-seqopt type-specifier-seq declarator
+bool Parser::isCXXSimpleDeclaration(bool AllowForRangeDecl) {
+ // C++ 6.8p1:
+ // There is an ambiguity in the grammar involving expression-statements and
+ // declarations: An expression-statement with a function-style explicit type
+ // conversion (5.2.3) as its leftmost subexpression can be indistinguishable
+ // from a declaration where the first declarator starts with a '('. In those
+ // cases the statement is a declaration. [Note: To disambiguate, the whole
+ // statement might have to be examined to determine if it is an
+ // expression-statement or a declaration].
+
+ // C++ 6.8p3:
+ // The disambiguation is purely syntactic; that is, the meaning of the names
+ // occurring in such a statement, beyond whether they are type-names or not,
+ // is not generally used in or changed by the disambiguation. Class
+ // templates are instantiated as necessary to determine if a qualified name
+ // is a type-name. Disambiguation precedes parsing, and a statement
+ // disambiguated as a declaration may be an ill-formed declaration.
+
+ // We don't have to parse all of the decl-specifier-seq part. There's only
+ // an ambiguity if the first decl-specifier is
+ // simple-type-specifier/typename-specifier followed by a '(', which may
+ // indicate a function-style cast expression.
+ // isCXXDeclarationSpecifier will return TPResult::Ambiguous() only in such
+ // a case.
+
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR != TPResult::False(); // Returns true for TPResult::True() or
+ // TPResult::Error().
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
+ // We need tentative parsing...
+
+ TentativeParsingAction PA(*this);
+ TPR = TryParseSimpleDeclaration(AllowForRangeDecl);
+ PA.Revert();
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error())
+ return true;
+
+ // Declarations take precedence over expressions.
+ if (TPR == TPResult::Ambiguous())
+ TPR = TPResult::True();
+
+ assert(TPR == TPResult::True() || TPR == TPResult::False());
+ return TPR == TPResult::True();
+}
+
+/// simple-declaration:
+/// decl-specifier-seq init-declarator-list[opt] ';'
+///
+/// (if AllowForRangeDecl specified)
+/// for ( for-range-declaration : for-range-initializer ) statement
+/// for-range-declaration:
+/// attribute-specifier-seqopt type-specifier-seq declarator
+///
+Parser::TPResult Parser::TryParseSimpleDeclaration(bool AllowForRangeDecl) {
+ // We know that we have a simple-type-specifier/typename-specifier followed
+ // by a '('.
+ assert(isCXXDeclarationSpecifier() == TPResult::Ambiguous());
+
+ if (Tok.is(tok::kw_typeof))
+ TryParseTypeofSpecifier();
+ else {
+ ConsumeToken();
+
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
+ TryParseProtocolQualifiers();
+ }
+
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ TPResult TPR = TryParseInitDeclaratorList();
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ if (Tok.isNot(tok::semi) && (!AllowForRangeDecl || Tok.isNot(tok::colon)))
+ return TPResult::False();
+
+ return TPResult::Ambiguous();
+}
+
+/// init-declarator-list:
+/// init-declarator
+/// init-declarator-list ',' init-declarator
+///
+/// init-declarator:
+/// declarator initializer[opt]
+/// [GNU] declarator simple-asm-expr[opt] attributes[opt] initializer[opt]
+///
+/// initializer:
+/// '=' initializer-clause
+/// '(' expression-list ')'
+///
+/// initializer-clause:
+/// assignment-expression
+/// '{' initializer-list ','[opt] '}'
+/// '{' '}'
+///
+Parser::TPResult Parser::TryParseInitDeclaratorList() {
+ while (1) {
+ // declarator
+ TPResult TPR = TryParseDeclarator(false/*mayBeAbstract*/);
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ // [GNU] simple-asm-expr[opt] attributes[opt]
+ if (Tok.is(tok::kw_asm) || Tok.is(tok::kw___attribute))
+ return TPResult::True();
+
+ // initializer[opt]
+ if (Tok.is(tok::l_paren)) {
+ // Parse through the parens.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+ } else if (Tok.is(tok::equal) || isTokIdentifier_in()) {
+ // MSVC and g++ won't examine the rest of declarators if '=' is
+ // encountered; they just conclude that we have a declaration.
+ // EDG parses the initializer completely, which is the proper behavior
+ // for this case.
+ //
+ // At present, Clang follows MSVC and g++, since the parser does not have
+ // the ability to parse an expression fully without recording the
+ // results of that parse.
+ // Also allow 'in' after on objective-c declaration as in:
+ // for (int (^b)(void) in array). Ideally this should be done in the
+ // context of parsing for-init-statement of a foreach statement only. But,
+ // in any other context 'in' is invalid after a declaration and parser
+ // issues the error regardless of outcome of this decision.
+ // FIXME. Change if above assumption does not hold.
+ return TPResult::True();
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // the comma.
+ }
+
+ return TPResult::Ambiguous();
+}
+
+/// isCXXConditionDeclaration - Disambiguates between a declaration or an
+/// expression for a condition of a if/switch/while/for statement.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+///
+/// condition:
+/// expression
+/// type-specifier-seq declarator '=' assignment-expression
+/// [C++11] type-specifier-seq declarator '=' initializer-clause
+/// [C++11] type-specifier-seq declarator braced-init-list
+/// [GNU] type-specifier-seq declarator simple-asm-expr[opt] attributes[opt]
+/// '=' assignment-expression
+///
+bool Parser::isCXXConditionDeclaration() {
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR != TPResult::False(); // Returns true for TPResult::True() or
+ // TPResult::Error().
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
+ // We need tentative parsing...
+
+ TentativeParsingAction PA(*this);
+
+ // type-specifier-seq
+ if (Tok.is(tok::kw_typeof))
+ TryParseTypeofSpecifier();
+ else {
+ ConsumeToken();
+
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
+ TryParseProtocolQualifiers();
+ }
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ // declarator
+ TPR = TryParseDeclarator(false/*mayBeAbstract*/);
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error())
+ TPR = TPResult::True();
+
+ if (TPR == TPResult::Ambiguous()) {
+ // '='
+ // [GNU] simple-asm-expr[opt] attributes[opt]
+ if (Tok.is(tok::equal) ||
+ Tok.is(tok::kw_asm) || Tok.is(tok::kw___attribute))
+ TPR = TPResult::True();
+ else if (getLangOpts().CPlusPlus0x && Tok.is(tok::l_brace))
+ TPR = TPResult::True();
+ else
+ TPR = TPResult::False();
+ }
+
+ PA.Revert();
+
+ assert(TPR == TPResult::True() || TPR == TPResult::False());
+ return TPR == TPResult::True();
+}
+
+ /// \brief Determine whether the next set of tokens contains a type-id.
+ ///
+ /// The context parameter states what context we're parsing right
+ /// now, which affects how this routine copes with the token
+ /// following the type-id. If the context is TypeIdInParens, we have
+ /// already parsed the '(' and we will cease lookahead when we hit
+ /// the corresponding ')'. If the context is
+ /// TypeIdAsTemplateArgument, we've already parsed the '<' or ','
+ /// before this template argument, and will cease lookahead when we
+ /// hit a '>', '>>' (in C++0x), or ','. Returns true for a type-id
+ /// and false for an expression. If during the disambiguation
+ /// process a parsing error is encountered, the function returns
+ /// true to let the declaration parsing code handle it.
+ ///
+ /// type-id:
+ /// type-specifier-seq abstract-declarator[opt]
+ ///
+bool Parser::isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous) {
+
+ isAmbiguous = false;
+
+ // C++ 8.2p2:
+ // The ambiguity arising from the similarity between a function-style cast and
+ // a type-id can occur in different contexts. The ambiguity appears as a
+ // choice between a function-style cast expression and a declaration of a
+ // type. The resolution is that any construct that could possibly be a type-id
+ // in its syntactic context shall be considered a type-id.
+
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR != TPResult::False(); // Returns true for TPResult::True() or
+ // TPResult::Error().
+
+ // FIXME: Add statistics about the number of ambiguous statements encountered
+ // and how they were resolved (number of declarations+number of expressions).
+
+ // Ok, we have a simple-type-specifier/typename-specifier followed by a '('.
+ // We need tentative parsing...
+
+ TentativeParsingAction PA(*this);
+
+ // type-specifier-seq
+ if (Tok.is(tok::kw_typeof))
+ TryParseTypeofSpecifier();
+ else {
+ ConsumeToken();
+
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
+ TryParseProtocolQualifiers();
+ }
+
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+
+ // declarator
+ TPR = TryParseDeclarator(true/*mayBeAbstract*/, false/*mayHaveIdentifier*/);
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error())
+ TPR = TPResult::True();
+
+ if (TPR == TPResult::Ambiguous()) {
+ // We are supposed to be inside parens, so if after the abstract declarator
+ // we encounter a ')' this is a type-id, otherwise it's an expression.
+ if (Context == TypeIdInParens && Tok.is(tok::r_paren)) {
+ TPR = TPResult::True();
+ isAmbiguous = true;
+
+ // We are supposed to be inside a template argument, so if after
+ // the abstract declarator we encounter a '>', '>>' (in C++0x), or
+ // ',', this is a type-id. Otherwise, it's an expression.
+ } else if (Context == TypeIdAsTemplateArgument &&
+ (Tok.is(tok::greater) || Tok.is(tok::comma) ||
+ (getLangOpts().CPlusPlus0x && Tok.is(tok::greatergreater)))) {
+ TPR = TPResult::True();
+ isAmbiguous = true;
+
+ } else
+ TPR = TPResult::False();
+ }
+
+ PA.Revert();
+
+ assert(TPR == TPResult::True() || TPR == TPResult::False());
+ return TPR == TPResult::True();
+}
+
+/// \brief Returns true if this is a C++11 attribute-specifier. Per
+/// C++11 [dcl.attr.grammar]p6, two consecutive left square bracket tokens
+/// always introduce an attribute. In Objective-C++11, this rule does not
+/// apply if either '[' begins a message-send.
+///
+/// If Disambiguate is true, we try harder to determine whether a '[[' starts
+/// an attribute-specifier, and return CAK_InvalidAttributeSpecifier if not.
+///
+/// If OuterMightBeMessageSend is true, we assume the outer '[' is either an
+/// Obj-C message send or the start of an attribute. Otherwise, we assume it
+/// is not an Obj-C message send.
+///
+/// C++11 [dcl.attr.grammar]:
+///
+/// attribute-specifier:
+/// '[' '[' attribute-list ']' ']'
+/// alignment-specifier
+///
+/// attribute-list:
+/// attribute[opt]
+/// attribute-list ',' attribute[opt]
+/// attribute '...'
+/// attribute-list ',' attribute '...'
+///
+/// attribute:
+/// attribute-token attribute-argument-clause[opt]
+///
+/// attribute-token:
+/// identifier
+/// identifier '::' identifier
+///
+/// attribute-argument-clause:
+/// '(' balanced-token-seq ')'
+Parser::CXX11AttributeKind
+Parser::isCXX11AttributeSpecifier(bool Disambiguate,
+ bool OuterMightBeMessageSend) {
+ if (Tok.is(tok::kw_alignas))
+ return CAK_AttributeSpecifier;
+
+ if (Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square))
+ return CAK_NotAttributeSpecifier;
+
+ // No tentative parsing if we don't need to look for ']]' or a lambda.
+ if (!Disambiguate && !getLangOpts().ObjC1)
+ return CAK_AttributeSpecifier;
+
+ TentativeParsingAction PA(*this);
+
+ // Opening brackets were checked for above.
+ ConsumeBracket();
+
+ // Outside Obj-C++11, treat anything with a matching ']]' as an attribute.
+ if (!getLangOpts().ObjC1) {
+ ConsumeBracket();
+
+ bool IsAttribute = SkipUntil(tok::r_square, false);
+ IsAttribute &= Tok.is(tok::r_square);
+
+ PA.Revert();
+
+ return IsAttribute ? CAK_AttributeSpecifier : CAK_InvalidAttributeSpecifier;
+ }
+
+ // In Obj-C++11, we need to distinguish four situations:
+ // 1a) int x[[attr]]; C++11 attribute.
+ // 1b) [[attr]]; C++11 statement attribute.
+ // 2) int x[[obj](){ return 1; }()]; Lambda in array size/index.
+ // 3a) int x[[obj get]]; Message send in array size/index.
+ // 3b) [[Class alloc] init]; Message send in message send.
+ // 4) [[obj]{ return self; }() doStuff]; Lambda in message send.
+ // (1) is an attribute, (2) is ill-formed, and (3) and (4) are accepted.
+
+ // If we have a lambda-introducer, then this is definitely not a message send.
+ // FIXME: If this disambiguation is too slow, fold the tentative lambda parse
+ // into the tentative attribute parse below.
+ LambdaIntroducer Intro;
+ if (!TryParseLambdaIntroducer(Intro)) {
+ // A lambda cannot end with ']]', and an attribute must.
+ bool IsAttribute = Tok.is(tok::r_square);
+
+ PA.Revert();
+
+ if (IsAttribute)
+ // Case 1: C++11 attribute.
+ return CAK_AttributeSpecifier;
+
+ if (OuterMightBeMessageSend)
+ // Case 4: Lambda in message send.
+ return CAK_NotAttributeSpecifier;
+
+ // Case 2: Lambda in array size / index.
+ return CAK_InvalidAttributeSpecifier;
+ }
+
+ ConsumeBracket();
+
+ // If we don't have a lambda-introducer, then we have an attribute or a
+ // message-send.
+ bool IsAttribute = true;
+ while (Tok.isNot(tok::r_square)) {
+ if (Tok.is(tok::comma)) {
+ // Case 1: Stray commas can only occur in attributes.
+ PA.Revert();
+ return CAK_AttributeSpecifier;
+ }
+
+ // Parse the attribute-token, if present.
+ // C++11 [dcl.attr.grammar]:
+ // If a keyword or an alternative token that satisfies the syntactic
+ // requirements of an identifier is contained in an attribute-token,
+ // it is considered an identifier.
+ SourceLocation Loc;
+ if (!TryParseCXX11AttributeIdentifier(Loc)) {
+ IsAttribute = false;
+ break;
+ }
+ if (Tok.is(tok::coloncolon)) {
+ ConsumeToken();
+ if (!TryParseCXX11AttributeIdentifier(Loc)) {
+ IsAttribute = false;
+ break;
+ }
+ }
+
+ // Parse the attribute-argument-clause, if present.
+ if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren, false)) {
+ IsAttribute = false;
+ break;
+ }
+ }
+
+ if (Tok.is(tok::ellipsis))
+ ConsumeToken();
+
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ConsumeToken();
+ }
+
+ // An attribute must end ']]'.
+ if (IsAttribute) {
+ if (Tok.is(tok::r_square)) {
+ ConsumeBracket();
+ IsAttribute = Tok.is(tok::r_square);
+ } else {
+ IsAttribute = false;
+ }
+ }
+
+ PA.Revert();
+
+ if (IsAttribute)
+ // Case 1: C++11 statement attribute.
+ return CAK_AttributeSpecifier;
+
+ // Case 3: Message send.
+ return CAK_NotAttributeSpecifier;
+}
+
+/// declarator:
+/// direct-declarator
+/// ptr-operator declarator
+///
+/// direct-declarator:
+/// declarator-id
+/// direct-declarator '(' parameter-declaration-clause ')'
+/// cv-qualifier-seq[opt] exception-specification[opt]
+/// direct-declarator '[' constant-expression[opt] ']'
+/// '(' declarator ')'
+/// [GNU] '(' attributes declarator ')'
+///
+/// abstract-declarator:
+/// ptr-operator abstract-declarator[opt]
+/// direct-abstract-declarator
+/// ...
+///
+/// direct-abstract-declarator:
+/// direct-abstract-declarator[opt]
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+/// direct-abstract-declarator[opt] '[' constant-expression[opt] ']'
+/// '(' abstract-declarator ')'
+///
+/// ptr-operator:
+/// '*' cv-qualifier-seq[opt]
+/// '&'
+/// [C++0x] '&&' [TODO]
+/// '::'[opt] nested-name-specifier '*' cv-qualifier-seq[opt]
+///
+/// cv-qualifier-seq:
+/// cv-qualifier cv-qualifier-seq[opt]
+///
+/// cv-qualifier:
+/// 'const'
+/// 'volatile'
+///
+/// declarator-id:
+/// '...'[opt] id-expression
+///
+/// id-expression:
+/// unqualified-id
+/// qualified-id [TODO]
+///
+/// unqualified-id:
+/// identifier
+/// operator-function-id [TODO]
+/// conversion-function-id [TODO]
+/// '~' class-name [TODO]
+/// template-id [TODO]
+///
+Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
+ bool mayHaveIdentifier) {
+ // declarator:
+ // direct-declarator
+ // ptr-operator declarator
+
+ while (1) {
+ if (Tok.is(tok::coloncolon) || Tok.is(tok::identifier))
+ if (TryAnnotateCXXScopeToken(true))
+ return TPResult::Error();
+
+ if (Tok.is(tok::star) || Tok.is(tok::amp) || Tok.is(tok::caret) ||
+ Tok.is(tok::ampamp) ||
+ (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::star))) {
+ // ptr-operator
+ ConsumeToken();
+ while (Tok.is(tok::kw_const) ||
+ Tok.is(tok::kw_volatile) ||
+ Tok.is(tok::kw_restrict))
+ ConsumeToken();
+ } else {
+ break;
+ }
+ }
+
+ // direct-declarator:
+ // direct-abstract-declarator:
+ if (Tok.is(tok::ellipsis))
+ ConsumeToken();
+
+ if ((Tok.is(tok::identifier) ||
+ (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier))) &&
+ mayHaveIdentifier) {
+ // declarator-id
+ if (Tok.is(tok::annot_cxxscope))
+ ConsumeToken();
+ ConsumeToken();
+ } else if (Tok.is(tok::l_paren)) {
+ ConsumeParen();
+ if (mayBeAbstract &&
+ (Tok.is(tok::r_paren) || // 'int()' is a function.
+ // 'int(...)' is a function.
+ (Tok.is(tok::ellipsis) && NextToken().is(tok::r_paren)) ||
+ isDeclarationSpecifier())) { // 'int(int)' is a function.
+ // '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+ // exception-specification[opt]
+ TPResult TPR = TryParseFunctionDeclarator();
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+ } else {
+ // '(' declarator ')'
+ // '(' attributes declarator ')'
+ // '(' abstract-declarator ')'
+ if (Tok.is(tok::kw___attribute) ||
+ Tok.is(tok::kw___declspec) ||
+ Tok.is(tok::kw___cdecl) ||
+ Tok.is(tok::kw___stdcall) ||
+ Tok.is(tok::kw___fastcall) ||
+ Tok.is(tok::kw___thiscall) ||
+ Tok.is(tok::kw___unaligned))
+ return TPResult::True(); // attributes indicate declaration
+ TPResult TPR = TryParseDeclarator(mayBeAbstract, mayHaveIdentifier);
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+ if (Tok.isNot(tok::r_paren))
+ return TPResult::False();
+ ConsumeParen();
+ }
+ } else if (!mayBeAbstract) {
+ return TPResult::False();
+ }
+
+ while (1) {
+ TPResult TPR(TPResult::Ambiguous());
+
+ // abstract-declarator: ...
+ if (Tok.is(tok::ellipsis))
+ ConsumeToken();
+
+ if (Tok.is(tok::l_paren)) {
+ // Check whether we have a function declarator or a possible ctor-style
+ // initializer that follows the declarator. Note that ctor-style
+ // initializers are not possible in contexts where abstract declarators
+ // are allowed.
+ if (!mayBeAbstract && !isCXXFunctionDeclarator(false/*warnIfAmbiguous*/))
+ break;
+
+ // direct-declarator '(' parameter-declaration-clause ')'
+ // cv-qualifier-seq[opt] exception-specification[opt]
+ ConsumeParen();
+ TPR = TryParseFunctionDeclarator();
+ } else if (Tok.is(tok::l_square)) {
+ // direct-declarator '[' constant-expression[opt] ']'
+ // direct-abstract-declarator[opt] '[' constant-expression[opt] ']'
+ TPR = TryParseBracketDeclarator();
+ } else {
+ break;
+ }
+
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+ }
+
+ return TPResult::Ambiguous();
+}
+
+Parser::TPResult
+Parser::isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind) {
+ switch (Kind) {
+ // Obviously starts an expression.
+ case tok::numeric_constant:
+ case tok::char_constant:
+ case tok::wide_char_constant:
+ case tok::utf16_char_constant:
+ case tok::utf32_char_constant:
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ case tok::l_square:
+ case tok::l_paren:
+ case tok::amp:
+ case tok::ampamp:
+ case tok::star:
+ case tok::plus:
+ case tok::plusplus:
+ case tok::minus:
+ case tok::minusminus:
+ case tok::tilde:
+ case tok::exclaim:
+ case tok::kw_sizeof:
+ case tok::kw___func__:
+ case tok::kw_const_cast:
+ case tok::kw_delete:
+ case tok::kw_dynamic_cast:
+ case tok::kw_false:
+ case tok::kw_new:
+ case tok::kw_operator:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_static_cast:
+ case tok::kw_this:
+ case tok::kw_throw:
+ case tok::kw_true:
+ case tok::kw_typeid:
+ case tok::kw_alignof:
+ case tok::kw_noexcept:
+ case tok::kw_nullptr:
+ case tok::kw___null:
+ case tok::kw___alignof:
+ case tok::kw___builtin_choose_expr:
+ case tok::kw___builtin_offsetof:
+ case tok::kw___builtin_types_compatible_p:
+ case tok::kw___builtin_va_arg:
+ case tok::kw___imag:
+ case tok::kw___real:
+ case tok::kw___FUNCTION__:
+ case tok::kw___PRETTY_FUNCTION__:
+ case tok::kw___has_nothrow_assign:
+ case tok::kw___has_nothrow_copy:
+ case tok::kw___has_nothrow_constructor:
+ case tok::kw___has_trivial_assign:
+ case tok::kw___has_trivial_copy:
+ case tok::kw___has_trivial_constructor:
+ case tok::kw___has_trivial_destructor:
+ case tok::kw___has_virtual_destructor:
+ case tok::kw___is_abstract:
+ case tok::kw___is_base_of:
+ case tok::kw___is_class:
+ case tok::kw___is_convertible_to:
+ case tok::kw___is_empty:
+ case tok::kw___is_enum:
+ case tok::kw___is_final:
+ case tok::kw___is_literal:
+ case tok::kw___is_literal_type:
+ case tok::kw___is_pod:
+ case tok::kw___is_polymorphic:
+ case tok::kw___is_trivial:
+ case tok::kw___is_trivially_assignable:
+ case tok::kw___is_trivially_constructible:
+ case tok::kw___is_trivially_copyable:
+ case tok::kw___is_union:
+ case tok::kw___uuidof:
+ return TPResult::True();
+
+ // Obviously starts a type-specifier-seq:
+ case tok::kw_char:
+ case tok::kw_const:
+ case tok::kw_double:
+ case tok::kw_enum:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_restrict:
+ case tok::kw_short:
+ case tok::kw_signed:
+ case tok::kw_struct:
+ case tok::kw_union:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_volatile:
+ case tok::kw__Bool:
+ case tok::kw__Complex:
+ case tok::kw_class:
+ case tok::kw_typename:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw___underlying_type:
+ case tok::kw_thread_local:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___thread:
+ case tok::kw_typeof:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___unaligned:
+ case tok::kw___vector:
+ case tok::kw___pixel:
+ case tok::kw__Atomic:
+ return TPResult::False();
+
+ default:
+ break;
+ }
+
+ return TPResult::Ambiguous();
+}
+
+/// isCXXDeclarationSpecifier - Returns TPResult::True() if it is a declaration
+/// specifier, TPResult::False() if it is not, TPResult::Ambiguous() if it could
+/// be either a decl-specifier or a function-style cast, and TPResult::Error()
+/// if a parsing error was found and reported.
+///
+/// decl-specifier:
+/// storage-class-specifier
+/// type-specifier
+/// function-specifier
+/// 'friend'
+/// 'typedef'
+/// [C++0x] 'constexpr'
+/// [GNU] attributes declaration-specifiers[opt]
+///
+/// storage-class-specifier:
+/// 'register'
+/// 'static'
+/// 'extern'
+/// 'mutable'
+/// 'auto'
+/// [GNU] '__thread'
+///
+/// function-specifier:
+/// 'inline'
+/// 'virtual'
+/// 'explicit'
+///
+/// typedef-name:
+/// identifier
+///
+/// type-specifier:
+/// simple-type-specifier
+/// class-specifier
+/// enum-specifier
+/// elaborated-type-specifier
+/// typename-specifier
+/// cv-qualifier
+///
+/// simple-type-specifier:
+/// '::'[opt] nested-name-specifier[opt] type-name
+/// '::'[opt] nested-name-specifier 'template'
+/// simple-template-id [TODO]
+/// 'char'
+/// 'wchar_t'
+/// 'bool'
+/// 'short'
+/// 'int'
+/// 'long'
+/// 'signed'
+/// 'unsigned'
+/// 'float'
+/// 'double'
+/// 'void'
+/// [GNU] typeof-specifier
+/// [GNU] '_Complex'
+/// [C++0x] 'auto' [TODO]
+/// [C++0x] 'decltype' ( expression )
+///
+/// type-name:
+/// class-name
+/// enum-name
+/// typedef-name
+///
+/// elaborated-type-specifier:
+/// class-key '::'[opt] nested-name-specifier[opt] identifier
+/// class-key '::'[opt] nested-name-specifier[opt] 'template'[opt]
+/// simple-template-id
+/// 'enum' '::'[opt] nested-name-specifier[opt] identifier
+///
+/// enum-name:
+/// identifier
+///
+/// enum-specifier:
+/// 'enum' identifier[opt] '{' enumerator-list[opt] '}'
+/// 'enum' identifier[opt] '{' enumerator-list ',' '}'
+///
+/// class-specifier:
+/// class-head '{' member-specification[opt] '}'
+///
+/// class-head:
+/// class-key identifier[opt] base-clause[opt]
+/// class-key nested-name-specifier identifier base-clause[opt]
+/// class-key nested-name-specifier[opt] simple-template-id
+/// base-clause[opt]
+///
+/// class-key:
+/// 'class'
+/// 'struct'
+/// 'union'
+///
+/// cv-qualifier:
+/// 'const'
+/// 'volatile'
+/// [GNU] restrict
+///
+Parser::TPResult
+Parser::isCXXDeclarationSpecifier(Parser::TPResult BracedCastResult) {
+ switch (Tok.getKind()) {
+ case tok::identifier: // foo::bar
+ // Check for need to substitute AltiVec __vector keyword
+ // for "vector" identifier.
+ if (TryAltiVecVectorToken())
+ return TPResult::True();
+ // Fall through.
+ case tok::kw_typename: // typename T::type
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error();
+ if (Tok.is(tok::identifier))
+ return TPResult::False();
+ return isCXXDeclarationSpecifier(BracedCastResult);
+
+ case tok::coloncolon: { // ::foo::bar
+ const Token &Next = NextToken();
+ if (Next.is(tok::kw_new) || // ::new
+ Next.is(tok::kw_delete)) // ::delete
+ return TPResult::False();
+ }
+ // Fall through.
+ case tok::kw_decltype:
+ // Annotate typenames and C++ scope specifiers. If we get one, just
+ // recurse to handle whatever we get.
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error();
+ return isCXXDeclarationSpecifier(BracedCastResult);
+
+ // decl-specifier:
+ // storage-class-specifier
+ // type-specifier
+ // function-specifier
+ // 'friend'
+ // 'typedef'
+ // 'constexpr'
+ case tok::kw_friend:
+ case tok::kw_typedef:
+ case tok::kw_constexpr:
+ // storage-class-specifier
+ case tok::kw_register:
+ case tok::kw_static:
+ case tok::kw_extern:
+ case tok::kw_mutable:
+ case tok::kw_auto:
+ case tok::kw___thread:
+ // function-specifier
+ case tok::kw_inline:
+ case tok::kw_virtual:
+ case tok::kw_explicit:
+
+ // Modules
+ case tok::kw___module_private__:
+
+ // type-specifier:
+ // simple-type-specifier
+ // class-specifier
+ // enum-specifier
+ // elaborated-type-specifier
+ // typename-specifier
+ // cv-qualifier
+
+ // class-specifier
+ // elaborated-type-specifier
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+ // cv-qualifier
+ case tok::kw_const:
+ case tok::kw_volatile:
+
+ // GNU
+ case tok::kw_restrict:
+ case tok::kw__Complex:
+ case tok::kw___attribute:
+ return TPResult::True();
+
+ // Microsoft
+ case tok::kw___declspec:
+ case tok::kw___cdecl:
+ case tok::kw___stdcall:
+ case tok::kw___fastcall:
+ case tok::kw___thiscall:
+ case tok::kw___w64:
+ case tok::kw___ptr64:
+ case tok::kw___ptr32:
+ case tok::kw___forceinline:
+ case tok::kw___unaligned:
+ return TPResult::True();
+
+ // Borland
+ case tok::kw___pascal:
+ return TPResult::True();
+
+ // AltiVec
+ case tok::kw___vector:
+ return TPResult::True();
+
+ case tok::annot_template_id: {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind != TNK_Type_template)
+ return TPResult::False();
+ CXXScopeSpec SS;
+ AnnotateTemplateIdTokenAsType();
+ assert(Tok.is(tok::annot_typename));
+ goto case_typename;
+ }
+
+ case tok::annot_cxxscope: // foo::bar or ::foo::bar, but already parsed
+ // We've already annotated a scope; try to annotate a type.
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error();
+ if (!Tok.is(tok::annot_typename)) {
+ // If the next token is an identifier or a type qualifier, then this
+ // can't possibly be a valid expression either.
+ if (Tok.is(tok::annot_cxxscope) && NextToken().is(tok::identifier)) {
+ CXXScopeSpec SS;
+ Actions.RestoreNestedNameSpecifierAnnotation(Tok.getAnnotationValue(),
+ Tok.getAnnotationRange(),
+ SS);
+ if (SS.getScopeRep() && SS.getScopeRep()->isDependent()) {
+ TentativeParsingAction PA(*this);
+ ConsumeToken();
+ ConsumeToken();
+ bool isIdentifier = Tok.is(tok::identifier);
+ TPResult TPR = TPResult::False();
+ if (!isIdentifier)
+ TPR = isCXXDeclarationSpecifier(BracedCastResult);
+ PA.Revert();
+
+ if (isIdentifier ||
+ TPR == TPResult::True() || TPR == TPResult::Error())
+ return TPResult::Error();
+ }
+ }
+ return TPResult::False();
+ }
+ // If that succeeded, fallthrough into the generic simple-type-id case.
+
+ // The ambiguity resides in a simple-type-specifier/typename-specifier
+ // followed by a '('. The '(' could either be the start of:
+ //
+ // direct-declarator:
+ // '(' declarator ')'
+ //
+ // direct-abstract-declarator:
+ // '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+ // exception-specification[opt]
+ // '(' abstract-declarator ')'
+ //
+ // or part of a function-style cast expression:
+ //
+ // simple-type-specifier '(' expression-list[opt] ')'
+ //
+
+ // simple-type-specifier:
+
+ case tok::annot_typename:
+ case_typename:
+ // In Objective-C, we might have a protocol-qualified type.
+ if (getLangOpts().ObjC1 && NextToken().is(tok::less)) {
+ // Tentatively parse the
+ TentativeParsingAction PA(*this);
+ ConsumeToken(); // The type token
+
+ TPResult TPR = TryParseProtocolQualifiers();
+ bool isFollowedByParen = Tok.is(tok::l_paren);
+ bool isFollowedByBrace = Tok.is(tok::l_brace);
+
+ PA.Revert();
+
+ if (TPR == TPResult::Error())
+ return TPResult::Error();
+
+ if (isFollowedByParen)
+ return TPResult::Ambiguous();
+
+ if (getLangOpts().CPlusPlus0x && isFollowedByBrace)
+ return BracedCastResult;
+
+ return TPResult::True();
+ }
+
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_bool:
+ case tok::kw_short:
+ case tok::kw_int:
+ case tok::kw_long:
+ case tok::kw___int64:
+ case tok::kw___int128:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_half:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_void:
+ case tok::annot_decltype:
+ if (NextToken().is(tok::l_paren))
+ return TPResult::Ambiguous();
+
+ // This is a function-style cast in all cases we disambiguate other than
+ // one:
+ // struct S {
+ // enum E : int { a = 4 }; // enum
+ // enum E : int { 4 }; // bit-field
+ // };
+ if (getLangOpts().CPlusPlus0x && NextToken().is(tok::l_brace))
+ return BracedCastResult;
+
+ if (isStartOfObjCClassMessageMissingOpenBracket())
+ return TPResult::False();
+
+ return TPResult::True();
+
+ // GNU typeof support.
+ case tok::kw_typeof: {
+ if (NextToken().isNot(tok::l_paren))
+ return TPResult::True();
+
+ TentativeParsingAction PA(*this);
+
+ TPResult TPR = TryParseTypeofSpecifier();
+ bool isFollowedByParen = Tok.is(tok::l_paren);
+ bool isFollowedByBrace = Tok.is(tok::l_brace);
+
+ PA.Revert();
+
+ if (TPR == TPResult::Error())
+ return TPResult::Error();
+
+ if (isFollowedByParen)
+ return TPResult::Ambiguous();
+
+ if (getLangOpts().CPlusPlus0x && isFollowedByBrace)
+ return BracedCastResult;
+
+ return TPResult::True();
+ }
+
+ // C++0x type traits support
+ case tok::kw___underlying_type:
+ return TPResult::True();
+
+ // C11 _Atomic
+ case tok::kw__Atomic:
+ return TPResult::True();
+
+ default:
+ return TPResult::False();
+ }
+}
+
+/// [GNU] typeof-specifier:
+/// 'typeof' '(' expressions ')'
+/// 'typeof' '(' type-name ')'
+///
+Parser::TPResult Parser::TryParseTypeofSpecifier() {
+ assert(Tok.is(tok::kw_typeof) && "Expected 'typeof'!");
+ ConsumeToken();
+
+ assert(Tok.is(tok::l_paren) && "Expected '('");
+ // Parse through the parens after 'typeof'.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+
+ return TPResult::Ambiguous();
+}
+
+/// [ObjC] protocol-qualifiers:
+//// '<' identifier-list '>'
+Parser::TPResult Parser::TryParseProtocolQualifiers() {
+ assert(Tok.is(tok::less) && "Expected '<' for qualifier list");
+ ConsumeToken();
+ do {
+ if (Tok.isNot(tok::identifier))
+ return TPResult::Error();
+ ConsumeToken();
+
+ if (Tok.is(tok::comma)) {
+ ConsumeToken();
+ continue;
+ }
+
+ if (Tok.is(tok::greater)) {
+ ConsumeToken();
+ return TPResult::Ambiguous();
+ }
+ } while (false);
+
+ return TPResult::Error();
+}
+
+Parser::TPResult Parser::TryParseDeclarationSpecifier() {
+ TPResult TPR = isCXXDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ if (Tok.is(tok::kw_typeof))
+ TryParseTypeofSpecifier();
+ else {
+ ConsumeToken();
+
+ if (getLangOpts().ObjC1 && Tok.is(tok::less))
+ TryParseProtocolQualifiers();
+ }
+
+ assert(Tok.is(tok::l_paren) && "Expected '('!");
+ return TPResult::Ambiguous();
+}
+
+/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
+/// a constructor-style initializer, when parsing declaration statements.
+/// Returns true for function declarator and false for constructor-style
+/// initializer.
+/// If during the disambiguation process a parsing error is encountered,
+/// the function returns true to let the declaration parsing code handle it.
+///
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+///
+bool Parser::isCXXFunctionDeclarator(bool warnIfAmbiguous) {
+
+ // C++ 8.2p1:
+ // The ambiguity arising from the similarity between a function-style cast and
+ // a declaration mentioned in 6.8 can also occur in the context of a
+ // declaration. In that context, the choice is between a function declaration
+ // with a redundant set of parentheses around a parameter name and an object
+ // declaration with a function-style cast as the initializer. Just as for the
+ // ambiguities mentioned in 6.8, the resolution is to consider any construct
+ // that could possibly be a declaration a declaration.
+
+ TentativeParsingAction PA(*this);
+
+ ConsumeParen();
+ TPResult TPR = TryParseParameterDeclarationClause();
+ if (TPR == TPResult::Ambiguous() && Tok.isNot(tok::r_paren))
+ TPR = TPResult::False();
+
+ SourceLocation TPLoc = Tok.getLocation();
+ PA.Revert();
+
+ // In case of an error, let the declaration parsing code handle it.
+ if (TPR == TPResult::Error())
+ return true;
+
+ if (TPR == TPResult::Ambiguous()) {
+ // Function declarator has precedence over constructor-style initializer.
+ // Emit a warning just in case the author intended a variable definition.
+ if (warnIfAmbiguous)
+ Diag(Tok, diag::warn_parens_disambiguated_as_function_decl)
+ << SourceRange(Tok.getLocation(), TPLoc);
+ return true;
+ }
+
+ return TPR == TPResult::True();
+}
+
+/// parameter-declaration-clause:
+/// parameter-declaration-list[opt] '...'[opt]
+/// parameter-declaration-list ',' '...'
+///
+/// parameter-declaration-list:
+/// parameter-declaration
+/// parameter-declaration-list ',' parameter-declaration
+///
+/// parameter-declaration:
+/// attribute-specifier-seq[opt] decl-specifier-seq declarator attributes[opt]
+/// attribute-specifier-seq[opt] decl-specifier-seq declarator attributes[opt]
+/// '=' assignment-expression
+/// attribute-specifier-seq[opt] decl-specifier-seq abstract-declarator[opt]
+/// attributes[opt]
+/// attribute-specifier-seq[opt] decl-specifier-seq abstract-declarator[opt]
+/// attributes[opt] '=' assignment-expression
+///
+Parser::TPResult Parser::TryParseParameterDeclarationClause() {
+
+ if (Tok.is(tok::r_paren))
+ return TPResult::True();
+
+ // parameter-declaration-list[opt] '...'[opt]
+ // parameter-declaration-list ',' '...'
+ //
+ // parameter-declaration-list:
+ // parameter-declaration
+ // parameter-declaration-list ',' parameter-declaration
+ //
+ while (1) {
+ // '...'[opt]
+ if (Tok.is(tok::ellipsis)) {
+ ConsumeToken();
+ if (Tok.is(tok::r_paren))
+ return TPResult::True(); // '...)' is a sign of a function declarator.
+ else
+ return TPResult::False();
+ }
+
+ // An attribute-specifier-seq here is a sign of a function declarator.
+ if (isCXX11AttributeSpecifier(/*Disambiguate*/false,
+ /*OuterMightBeMessageSend*/true))
+ return TPResult::True();
+
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseMicrosoftAttributes(attrs);
+
+ // decl-specifier-seq
+ // A parameter-declaration's initializer must be preceded by an '=', so
+ // decl-specifier-seq '{' is not a parameter in C++11.
+ TPResult TPR = TryParseDeclarationSpecifier();
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ // declarator
+ // abstract-declarator[opt]
+ TPR = TryParseDeclarator(true/*mayBeAbstract*/);
+ if (TPR != TPResult::Ambiguous())
+ return TPR;
+
+ // [GNU] attributes[opt]
+ if (Tok.is(tok::kw___attribute))
+ return TPResult::True();
+
+ if (Tok.is(tok::equal)) {
+ // '=' assignment-expression
+ // Parse through assignment-expression.
+ if (!SkipUntil(tok::comma, tok::r_paren, true/*StopAtSemi*/,
+ true/*DontConsume*/))
+ return TPResult::Error();
+ }
+
+ if (Tok.is(tok::ellipsis)) {
+ ConsumeToken();
+ if (Tok.is(tok::r_paren))
+ return TPResult::True(); // '...)' is a sign of a function declarator.
+ else
+ return TPResult::False();
+ }
+
+ if (Tok.isNot(tok::comma))
+ break;
+ ConsumeToken(); // the comma.
+ }
+
+ return TPResult::Ambiguous();
+}
+
+/// TryParseFunctionDeclarator - We parsed a '(' and we want to try to continue
+/// parsing as a function declarator.
+/// If TryParseFunctionDeclarator fully parsed the function declarator, it will
+/// return TPResult::Ambiguous(), otherwise it will return either False() or
+/// Error().
+///
+/// '(' parameter-declaration-clause ')' cv-qualifier-seq[opt]
+/// exception-specification[opt]
+///
+/// exception-specification:
+/// 'throw' '(' type-id-list[opt] ')'
+///
+Parser::TPResult Parser::TryParseFunctionDeclarator() {
+
+ // The '(' is already parsed.
+
+ TPResult TPR = TryParseParameterDeclarationClause();
+ if (TPR == TPResult::Ambiguous() && Tok.isNot(tok::r_paren))
+ TPR = TPResult::False();
+
+ if (TPR == TPResult::False() || TPR == TPResult::Error())
+ return TPR;
+
+ // Parse through the parens.
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+
+ // cv-qualifier-seq
+ while (Tok.is(tok::kw_const) ||
+ Tok.is(tok::kw_volatile) ||
+ Tok.is(tok::kw_restrict) )
+ ConsumeToken();
+
+ // ref-qualifier[opt]
+ if (Tok.is(tok::amp) || Tok.is(tok::ampamp))
+ ConsumeToken();
+
+ // exception-specification
+ if (Tok.is(tok::kw_throw)) {
+ ConsumeToken();
+ if (Tok.isNot(tok::l_paren))
+ return TPResult::Error();
+
+ // Parse through the parens after 'throw'.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+ }
+ if (Tok.is(tok::kw_noexcept)) {
+ ConsumeToken();
+ // Possibly an expression as well.
+ if (Tok.is(tok::l_paren)) {
+ // Find the matching rparen.
+ ConsumeParen();
+ if (!SkipUntil(tok::r_paren))
+ return TPResult::Error();
+ }
+ }
+
+ return TPResult::Ambiguous();
+}
+
+/// '[' constant-expression[opt] ']'
+///
+Parser::TPResult Parser::TryParseBracketDeclarator() {
+ ConsumeBracket();
+ if (!SkipUntil(tok::r_square))
+ return TPResult::Error();
+
+ return TPResult::Ambiguous();
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/Parser.cpp b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
new file mode 100644
index 0000000..054a8fd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/Parser.cpp
@@ -0,0 +1,1700 @@
+//===--- Parser.cpp - C Language Family Parser ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Parser interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "llvm/Support/raw_ostream.h"
+#include "RAIIObjectsForParser.h"
+#include "ParsePragma.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ASTConsumer.h"
+using namespace clang;
+
+IdentifierInfo *Parser::getSEHExceptKeyword() {
+ // __except is accepted as a (contextual) keyword
+ if (!Ident__except && (getLangOpts().MicrosoftExt || getLangOpts().Borland))
+ Ident__except = PP.getIdentifierInfo("__except");
+
+ return Ident__except;
+}
+
+Parser::Parser(Preprocessor &pp, Sema &actions, bool SkipFunctionBodies)
+ : PP(pp), Actions(actions), Diags(PP.getDiagnostics()),
+ GreaterThanIsOperator(true), ColonIsSacred(false),
+ InMessageExpression(false), TemplateParameterDepth(0),
+ SkipFunctionBodies(SkipFunctionBodies) {
+ Tok.setKind(tok::eof);
+ Actions.CurScope = 0;
+ NumCachedScopes = 0;
+ ParenCount = BracketCount = BraceCount = 0;
+ CurParsedObjCImpl = 0;
+
+ // Add #pragma handlers. These are removed and destroyed in the
+ // destructor.
+ AlignHandler.reset(new PragmaAlignHandler(actions));
+ PP.AddPragmaHandler(AlignHandler.get());
+
+ GCCVisibilityHandler.reset(new PragmaGCCVisibilityHandler(actions));
+ PP.AddPragmaHandler("GCC", GCCVisibilityHandler.get());
+
+ OptionsHandler.reset(new PragmaOptionsHandler(actions));
+ PP.AddPragmaHandler(OptionsHandler.get());
+
+ PackHandler.reset(new PragmaPackHandler(actions));
+ PP.AddPragmaHandler(PackHandler.get());
+
+ MSStructHandler.reset(new PragmaMSStructHandler(actions));
+ PP.AddPragmaHandler(MSStructHandler.get());
+
+ UnusedHandler.reset(new PragmaUnusedHandler(actions, *this));
+ PP.AddPragmaHandler(UnusedHandler.get());
+
+ WeakHandler.reset(new PragmaWeakHandler(actions));
+ PP.AddPragmaHandler(WeakHandler.get());
+
+ RedefineExtnameHandler.reset(new PragmaRedefineExtnameHandler(actions));
+ PP.AddPragmaHandler(RedefineExtnameHandler.get());
+
+ FPContractHandler.reset(new PragmaFPContractHandler(actions, *this));
+ PP.AddPragmaHandler("STDC", FPContractHandler.get());
+
+ if (getLangOpts().OpenCL) {
+ OpenCLExtensionHandler.reset(
+ new PragmaOpenCLExtensionHandler(actions, *this));
+ PP.AddPragmaHandler("OPENCL", OpenCLExtensionHandler.get());
+
+ PP.AddPragmaHandler("OPENCL", FPContractHandler.get());
+ }
+
+ PP.setCodeCompletionHandler(*this);
+}
+
+/// If a crash happens while the parser is active, print out a line indicating
+/// what the current token is.
+void PrettyStackTraceParserEntry::print(raw_ostream &OS) const {
+ const Token &Tok = P.getCurToken();
+ if (Tok.is(tok::eof)) {
+ OS << "<eof> parser at end of file\n";
+ return;
+ }
+
+ if (Tok.getLocation().isInvalid()) {
+ OS << "<unknown> parser at unknown location\n";
+ return;
+ }
+
+ const Preprocessor &PP = P.getPreprocessor();
+ Tok.getLocation().print(OS, PP.getSourceManager());
+ if (Tok.isAnnotation())
+ OS << ": at annotation token \n";
+ else
+ OS << ": current parser token '" << PP.getSpelling(Tok) << "'\n";
+}
+
+
+DiagnosticBuilder Parser::Diag(SourceLocation Loc, unsigned DiagID) {
+ return Diags.Report(Loc, DiagID);
+}
+
+DiagnosticBuilder Parser::Diag(const Token &Tok, unsigned DiagID) {
+ return Diag(Tok.getLocation(), DiagID);
+}
+
+/// \brief Emits a diagnostic suggesting parentheses surrounding a
+/// given range.
+///
+/// \param Loc The location where we'll emit the diagnostic.
+/// \param Loc The kind of diagnostic to emit.
+/// \param ParenRange Source range enclosing code that should be parenthesized.
+void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
+ SourceRange ParenRange) {
+ SourceLocation EndLoc = PP.getLocForEndOfToken(ParenRange.getEnd());
+ if (!ParenRange.getEnd().isFileID() || EndLoc.isInvalid()) {
+ // We can't display the parentheses, so just dig the
+ // warning/error and return.
+ Diag(Loc, DK);
+ return;
+ }
+
+ Diag(Loc, DK)
+ << FixItHint::CreateInsertion(ParenRange.getBegin(), "(")
+ << FixItHint::CreateInsertion(EndLoc, ")");
+}
+
+static bool IsCommonTypo(tok::TokenKind ExpectedTok, const Token &Tok) {
+ switch (ExpectedTok) {
+ case tok::semi: return Tok.is(tok::colon); // : for ;
+ default: return false;
+ }
+}
+
+/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
+/// input. If so, it is consumed and false is returned.
+///
+/// If the input is malformed, this emits the specified diagnostic. Next, if
+/// SkipToTok is specified, it calls SkipUntil(SkipToTok). Finally, true is
+/// returned.
+bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
+ const char *Msg, tok::TokenKind SkipToTok) {
+ if (Tok.is(ExpectedTok) || Tok.is(tok::code_completion)) {
+ ConsumeAnyToken();
+ return false;
+ }
+
+ // Detect common single-character typos and resume.
+ if (IsCommonTypo(ExpectedTok, Tok)) {
+ SourceLocation Loc = Tok.getLocation();
+ Diag(Loc, DiagID)
+ << Msg
+ << FixItHint::CreateReplacement(SourceRange(Loc),
+ getTokenSimpleSpelling(ExpectedTok));
+ ConsumeAnyToken();
+
+ // Pretend there wasn't a problem.
+ return false;
+ }
+
+ const char *Spelling = 0;
+ SourceLocation EndLoc = PP.getLocForEndOfToken(PrevTokLocation);
+ if (EndLoc.isValid() &&
+ (Spelling = tok::getTokenSimpleSpelling(ExpectedTok))) {
+ // Show what code to insert to fix this problem.
+ Diag(EndLoc, DiagID)
+ << Msg
+ << FixItHint::CreateInsertion(EndLoc, Spelling);
+ } else
+ Diag(Tok, DiagID) << Msg;
+
+ if (SkipToTok != tok::unknown)
+ SkipUntil(SkipToTok);
+ return true;
+}
+
+bool Parser::ExpectAndConsumeSemi(unsigned DiagID) {
+ if (Tok.is(tok::semi) || Tok.is(tok::code_completion)) {
+ ConsumeAnyToken();
+ return false;
+ }
+
+ if ((Tok.is(tok::r_paren) || Tok.is(tok::r_square)) &&
+ NextToken().is(tok::semi)) {
+ Diag(Tok, diag::err_extraneous_token_before_semi)
+ << PP.getSpelling(Tok)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+ ConsumeAnyToken(); // The ')' or ']'.
+ ConsumeToken(); // The ';'.
+ return false;
+ }
+
+ return ExpectAndConsume(tok::semi, DiagID);
+}
+
+//===----------------------------------------------------------------------===//
+// Error recovery.
+//===----------------------------------------------------------------------===//
+
+/// SkipUntil - Read tokens until we get to the specified token, then consume
+/// it (unless DontConsume is true). Because we cannot guarantee that the
+/// token will ever occur, this skips to the next token, or to some likely
+/// good stopping point. If StopAtSemi is true, skipping will stop at a ';'
+/// character.
+///
+/// If SkipUntil finds the specified token, it returns true, otherwise it
+/// returns false.
+bool Parser::SkipUntil(ArrayRef<tok::TokenKind> Toks, bool StopAtSemi,
+ bool DontConsume, bool StopAtCodeCompletion) {
+ // We always want this function to skip at least one token if the first token
+ // isn't T and if not at EOF.
+ bool isFirstTokenSkipped = true;
+ while (1) {
+ // If we found one of the tokens, stop and return true.
+ for (unsigned i = 0, NumToks = Toks.size(); i != NumToks; ++i) {
+ if (Tok.is(Toks[i])) {
+ if (DontConsume) {
+ // Noop, don't consume the token.
+ } else {
+ ConsumeAnyToken();
+ }
+ return true;
+ }
+ }
+
+ switch (Tok.getKind()) {
+ case tok::eof:
+ // Ran out of tokens.
+ return false;
+
+ case tok::code_completion:
+ if (!StopAtCodeCompletion)
+ ConsumeToken();
+ return false;
+
+ case tok::l_paren:
+ // Recursively skip properly-nested parens.
+ ConsumeParen();
+ SkipUntil(tok::r_paren, false, false, StopAtCodeCompletion);
+ break;
+ case tok::l_square:
+ // Recursively skip properly-nested square brackets.
+ ConsumeBracket();
+ SkipUntil(tok::r_square, false, false, StopAtCodeCompletion);
+ break;
+ case tok::l_brace:
+ // Recursively skip properly-nested braces.
+ ConsumeBrace();
+ SkipUntil(tok::r_brace, false, false, StopAtCodeCompletion);
+ break;
+
+ // Okay, we found a ']' or '}' or ')', which we think should be balanced.
+ // Since the user wasn't looking for this token (if they were, it would
+ // already be handled), this isn't balanced. If there is a LHS token at a
+ // higher level, we will assume that this matches the unbalanced token
+ // and return it. Otherwise, this is a spurious RHS token, which we skip.
+ case tok::r_paren:
+ if (ParenCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeParen();
+ break;
+ case tok::r_square:
+ if (BracketCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeBracket();
+ break;
+ case tok::r_brace:
+ if (BraceCount && !isFirstTokenSkipped)
+ return false; // Matches something.
+ ConsumeBrace();
+ break;
+
+ case tok::string_literal:
+ case tok::wide_string_literal:
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ ConsumeStringToken();
+ break;
+
+ case tok::semi:
+ if (StopAtSemi)
+ return false;
+ // FALL THROUGH.
+ default:
+ // Skip this token.
+ ConsumeToken();
+ break;
+ }
+ isFirstTokenSkipped = false;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Scope manipulation
+//===----------------------------------------------------------------------===//
+
+/// EnterScope - Start a new scope.
+void Parser::EnterScope(unsigned ScopeFlags) {
+ if (NumCachedScopes) {
+ Scope *N = ScopeCache[--NumCachedScopes];
+ N->Init(getCurScope(), ScopeFlags);
+ Actions.CurScope = N;
+ } else {
+ Actions.CurScope = new Scope(getCurScope(), ScopeFlags, Diags);
+ }
+}
+
+/// ExitScope - Pop a scope off the scope stack.
+void Parser::ExitScope() {
+ assert(getCurScope() && "Scope imbalance!");
+
+ // Inform the actions module that this scope is going away if there are any
+ // decls in it.
+ if (!getCurScope()->decl_empty())
+ Actions.ActOnPopScope(Tok.getLocation(), getCurScope());
+
+ Scope *OldScope = getCurScope();
+ Actions.CurScope = OldScope->getParent();
+
+ if (NumCachedScopes == ScopeCacheSize)
+ delete OldScope;
+ else
+ ScopeCache[NumCachedScopes++] = OldScope;
+}
+
+/// Set the flags for the current scope to ScopeFlags. If ManageFlags is false,
+/// this object does nothing.
+Parser::ParseScopeFlags::ParseScopeFlags(Parser *Self, unsigned ScopeFlags,
+ bool ManageFlags)
+ : CurScope(ManageFlags ? Self->getCurScope() : 0) {
+ if (CurScope) {
+ OldFlags = CurScope->getFlags();
+ CurScope->setFlags(ScopeFlags);
+ }
+}
+
+/// Restore the flags for the current scope to what they were before this
+/// object overrode them.
+Parser::ParseScopeFlags::~ParseScopeFlags() {
+ if (CurScope)
+ CurScope->setFlags(OldFlags);
+}
+
+
+//===----------------------------------------------------------------------===//
+// C99 6.9: External Definitions.
+//===----------------------------------------------------------------------===//
+
+Parser::~Parser() {
+ // If we still have scopes active, delete the scope tree.
+ delete getCurScope();
+ Actions.CurScope = 0;
+
+ // Free the scope cache.
+ for (unsigned i = 0, e = NumCachedScopes; i != e; ++i)
+ delete ScopeCache[i];
+
+ // Free LateParsedTemplatedFunction nodes.
+ for (LateParsedTemplateMapT::iterator it = LateParsedTemplateMap.begin();
+ it != LateParsedTemplateMap.end(); ++it)
+ delete it->second;
+
+ // Remove the pragma handlers we installed.
+ PP.RemovePragmaHandler(AlignHandler.get());
+ AlignHandler.reset();
+ PP.RemovePragmaHandler("GCC", GCCVisibilityHandler.get());
+ GCCVisibilityHandler.reset();
+ PP.RemovePragmaHandler(OptionsHandler.get());
+ OptionsHandler.reset();
+ PP.RemovePragmaHandler(PackHandler.get());
+ PackHandler.reset();
+ PP.RemovePragmaHandler(MSStructHandler.get());
+ MSStructHandler.reset();
+ PP.RemovePragmaHandler(UnusedHandler.get());
+ UnusedHandler.reset();
+ PP.RemovePragmaHandler(WeakHandler.get());
+ WeakHandler.reset();
+ PP.RemovePragmaHandler(RedefineExtnameHandler.get());
+ RedefineExtnameHandler.reset();
+
+ if (getLangOpts().OpenCL) {
+ PP.RemovePragmaHandler("OPENCL", OpenCLExtensionHandler.get());
+ OpenCLExtensionHandler.reset();
+ PP.RemovePragmaHandler("OPENCL", FPContractHandler.get());
+ }
+
+ PP.RemovePragmaHandler("STDC", FPContractHandler.get());
+ FPContractHandler.reset();
+ PP.clearCodeCompletionHandler();
+}
+
+/// Initialize - Warm up the parser.
+///
+void Parser::Initialize() {
+ // Create the translation unit scope. Install it as the current scope.
+ assert(getCurScope() == 0 && "A scope is already active?");
+ EnterScope(Scope::DeclScope);
+ Actions.ActOnTranslationUnitScope(getCurScope());
+
+ // Prime the lexer look-ahead.
+ ConsumeToken();
+
+ if (Tok.is(tok::eof) &&
+ !getLangOpts().CPlusPlus) // Empty source file is an extension in C
+ Diag(Tok, diag::ext_empty_source_file);
+
+ // Initialization for Objective-C context sensitive keywords recognition.
+ // Referenced in Parser::ParseObjCTypeQualifierList.
+ if (getLangOpts().ObjC1) {
+ ObjCTypeQuals[objc_in] = &PP.getIdentifierTable().get("in");
+ ObjCTypeQuals[objc_out] = &PP.getIdentifierTable().get("out");
+ ObjCTypeQuals[objc_inout] = &PP.getIdentifierTable().get("inout");
+ ObjCTypeQuals[objc_oneway] = &PP.getIdentifierTable().get("oneway");
+ ObjCTypeQuals[objc_bycopy] = &PP.getIdentifierTable().get("bycopy");
+ ObjCTypeQuals[objc_byref] = &PP.getIdentifierTable().get("byref");
+ }
+
+ Ident_instancetype = 0;
+ Ident_final = 0;
+ Ident_override = 0;
+
+ Ident_super = &PP.getIdentifierTable().get("super");
+
+ if (getLangOpts().AltiVec) {
+ Ident_vector = &PP.getIdentifierTable().get("vector");
+ Ident_pixel = &PP.getIdentifierTable().get("pixel");
+ }
+
+ Ident_introduced = 0;
+ Ident_deprecated = 0;
+ Ident_obsoleted = 0;
+ Ident_unavailable = 0;
+
+ Ident__except = 0;
+
+ Ident__exception_code = Ident__exception_info = Ident__abnormal_termination = 0;
+ Ident___exception_code = Ident___exception_info = Ident___abnormal_termination = 0;
+ Ident_GetExceptionCode = Ident_GetExceptionInfo = Ident_AbnormalTermination = 0;
+
+ if(getLangOpts().Borland) {
+ Ident__exception_info = PP.getIdentifierInfo("_exception_info");
+ Ident___exception_info = PP.getIdentifierInfo("__exception_info");
+ Ident_GetExceptionInfo = PP.getIdentifierInfo("GetExceptionInformation");
+ Ident__exception_code = PP.getIdentifierInfo("_exception_code");
+ Ident___exception_code = PP.getIdentifierInfo("__exception_code");
+ Ident_GetExceptionCode = PP.getIdentifierInfo("GetExceptionCode");
+ Ident__abnormal_termination = PP.getIdentifierInfo("_abnormal_termination");
+ Ident___abnormal_termination = PP.getIdentifierInfo("__abnormal_termination");
+ Ident_AbnormalTermination = PP.getIdentifierInfo("AbnormalTermination");
+
+ PP.SetPoisonReason(Ident__exception_code,diag::err_seh___except_block);
+ PP.SetPoisonReason(Ident___exception_code,diag::err_seh___except_block);
+ PP.SetPoisonReason(Ident_GetExceptionCode,diag::err_seh___except_block);
+ PP.SetPoisonReason(Ident__exception_info,diag::err_seh___except_filter);
+ PP.SetPoisonReason(Ident___exception_info,diag::err_seh___except_filter);
+ PP.SetPoisonReason(Ident_GetExceptionInfo,diag::err_seh___except_filter);
+ PP.SetPoisonReason(Ident__abnormal_termination,diag::err_seh___finally_block);
+ PP.SetPoisonReason(Ident___abnormal_termination,diag::err_seh___finally_block);
+ PP.SetPoisonReason(Ident_AbnormalTermination,diag::err_seh___finally_block);
+ }
+}
+
+/// ParseTopLevelDecl - Parse one top-level declaration, return whatever the
+/// action tells us to. This returns true if the EOF was encountered.
+bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result) {
+ DelayedCleanupPoint CleanupRAII(TopLevelDeclCleanupPool);
+
+ // Skip over the EOF token, flagging end of previous input for incremental
+ // processing
+ if (PP.isIncrementalProcessingEnabled() && Tok.is(tok::eof))
+ ConsumeToken();
+
+ while (Tok.is(tok::annot_pragma_unused))
+ HandlePragmaUnused();
+
+ Result = DeclGroupPtrTy();
+ if (Tok.is(tok::eof)) {
+ // Late template parsing can begin.
+ if (getLangOpts().DelayedTemplateParsing)
+ Actions.SetLateTemplateParser(LateTemplateParserCallback, this);
+ if (!PP.isIncrementalProcessingEnabled())
+ Actions.ActOnEndOfTranslationUnit();
+ //else don't tell Sema that we ended parsing: more input might come.
+
+ return true;
+ }
+
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+
+ Result = ParseExternalDeclaration(attrs);
+ return false;
+}
+
+/// ParseTranslationUnit:
+/// translation-unit: [C99 6.9]
+/// external-declaration
+/// translation-unit external-declaration
+void Parser::ParseTranslationUnit() {
+ Initialize();
+
+ DeclGroupPtrTy Res;
+ while (!ParseTopLevelDecl(Res))
+ /*parse them all*/;
+
+ ExitScope();
+ assert(getCurScope() == 0 && "Scope imbalance!");
+}
+
+/// ParseExternalDeclaration:
+///
+/// external-declaration: [C99 6.9], declaration: [C++ dcl.dcl]
+/// function-definition
+/// declaration
+/// [C++0x] empty-declaration
+/// [GNU] asm-definition
+/// [GNU] __extension__ external-declaration
+/// [OBJC] objc-class-definition
+/// [OBJC] objc-class-declaration
+/// [OBJC] objc-alias-declaration
+/// [OBJC] objc-protocol-definition
+/// [OBJC] objc-method-definition
+/// [OBJC] @end
+/// [C++] linkage-specification
+/// [GNU] asm-definition:
+/// simple-asm-expr ';'
+///
+/// [C++0x] empty-declaration:
+/// ';'
+///
+/// [C++0x/GNU] 'extern' 'template' declaration
+Parser::DeclGroupPtrTy
+Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
+ ParsingDeclSpec *DS) {
+ DelayedCleanupPoint CleanupRAII(TopLevelDeclCleanupPool);
+ ParenBraceBracketBalancer BalancerRAIIObj(*this);
+
+ if (PP.isCodeCompletionReached()) {
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ }
+
+ Decl *SingleDecl = 0;
+ switch (Tok.getKind()) {
+ case tok::annot_pragma_vis:
+ HandlePragmaVisibility();
+ return DeclGroupPtrTy();
+ case tok::annot_pragma_pack:
+ HandlePragmaPack();
+ return DeclGroupPtrTy();
+ case tok::semi:
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_top_level_semi : diag::ext_top_level_semi)
+ << FixItHint::CreateRemoval(Tok.getLocation());
+
+ ConsumeToken();
+ // TODO: Invoke action for top-level semicolon.
+ return DeclGroupPtrTy();
+ case tok::r_brace:
+ Diag(Tok, diag::err_extraneous_closing_brace);
+ ConsumeBrace();
+ return DeclGroupPtrTy();
+ case tok::eof:
+ Diag(Tok, diag::err_expected_external_declaration);
+ return DeclGroupPtrTy();
+ case tok::kw___extension__: {
+ // __extension__ silences extension warnings in the subexpression.
+ ExtensionRAIIObject O(Diags); // Use RAII to do this.
+ ConsumeToken();
+ return ParseExternalDeclaration(attrs);
+ }
+ case tok::kw_asm: {
+ ProhibitAttributes(attrs);
+
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc;
+ ExprResult Result(ParseSimpleAsm(&EndLoc));
+
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ "top-level asm block");
+
+ if (Result.isInvalid())
+ return DeclGroupPtrTy();
+ SingleDecl = Actions.ActOnFileScopeAsmDecl(Result.get(), StartLoc, EndLoc);
+ break;
+ }
+ case tok::at:
+ return ParseObjCAtDirectives();
+ case tok::minus:
+ case tok::plus:
+ if (!getLangOpts().ObjC1) {
+ Diag(Tok, diag::err_expected_external_declaration);
+ ConsumeToken();
+ return DeclGroupPtrTy();
+ }
+ SingleDecl = ParseObjCMethodDefinition();
+ break;
+ case tok::code_completion:
+ Actions.CodeCompleteOrdinaryName(getCurScope(),
+ CurParsedObjCImpl? Sema::PCC_ObjCImplementation
+ : Sema::PCC_Namespace);
+ cutOffParsing();
+ return DeclGroupPtrTy();
+ case tok::kw_using:
+ case tok::kw_namespace:
+ case tok::kw_typedef:
+ case tok::kw_template:
+ case tok::kw_export: // As in 'export template'
+ case tok::kw_static_assert:
+ case tok::kw__Static_assert:
+ // A function definition cannot start with a these keywords.
+ {
+ SourceLocation DeclEnd;
+ StmtVector Stmts(Actions);
+ return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ }
+
+ case tok::kw_static:
+ // Parse (then ignore) 'static' prior to a template instantiation. This is
+ // a GCC extension that we intentionally do not support.
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_template)) {
+ Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
+ << 0;
+ SourceLocation DeclEnd;
+ StmtVector Stmts(Actions);
+ return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ }
+ goto dont_know;
+
+ case tok::kw_inline:
+ if (getLangOpts().CPlusPlus) {
+ tok::TokenKind NextKind = NextToken().getKind();
+
+ // Inline namespaces. Allowed as an extension even in C++03.
+ if (NextKind == tok::kw_namespace) {
+ SourceLocation DeclEnd;
+ StmtVector Stmts(Actions);
+ return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ }
+
+ // Parse (then ignore) 'inline' prior to a template instantiation. This is
+ // a GCC extension that we intentionally do not support.
+ if (NextKind == tok::kw_template) {
+ Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
+ << 1;
+ SourceLocation DeclEnd;
+ StmtVector Stmts(Actions);
+ return ParseDeclaration(Stmts, Declarator::FileContext, DeclEnd, attrs);
+ }
+ }
+ goto dont_know;
+
+ case tok::kw_extern:
+ if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_template)) {
+ // Extern templates
+ SourceLocation ExternLoc = ConsumeToken();
+ SourceLocation TemplateLoc = ConsumeToken();
+ Diag(ExternLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_extern_template :
+ diag::ext_extern_template) << SourceRange(ExternLoc, TemplateLoc);
+ SourceLocation DeclEnd;
+ return Actions.ConvertDeclToDeclGroup(
+ ParseExplicitInstantiation(Declarator::FileContext,
+ ExternLoc, TemplateLoc, DeclEnd));
+ }
+ // FIXME: Detect C++ linkage specifications here?
+ goto dont_know;
+
+ case tok::kw___if_exists:
+ case tok::kw___if_not_exists:
+ ParseMicrosoftIfExistsExternalDeclaration();
+ return DeclGroupPtrTy();
+
+ default:
+ dont_know:
+ // We can't tell whether this is a function-definition or declaration yet.
+ if (DS) {
+ DS->takeAttributesFrom(attrs);
+ return ParseDeclarationOrFunctionDefinition(*DS);
+ } else {
+ return ParseDeclarationOrFunctionDefinition(attrs);
+ }
+ }
+
+ // This routine returns a DeclGroup, if the thing we parsed only contains a
+ // single decl, convert it now.
+ return Actions.ConvertDeclToDeclGroup(SingleDecl);
+}
+
+/// \brief Determine whether the current token, if it occurs after a
+/// declarator, continues a declaration or declaration list.
+bool Parser::isDeclarationAfterDeclarator() {
+ // Check for '= delete' or '= default'
+ if (getLangOpts().CPlusPlus && Tok.is(tok::equal)) {
+ const Token &KW = NextToken();
+ if (KW.is(tok::kw_default) || KW.is(tok::kw_delete))
+ return false;
+ }
+
+ return Tok.is(tok::equal) || // int X()= -> not a function def
+ Tok.is(tok::comma) || // int X(), -> not a function def
+ Tok.is(tok::semi) || // int X(); -> not a function def
+ Tok.is(tok::kw_asm) || // int X() __asm__ -> not a function def
+ Tok.is(tok::kw___attribute) || // int X() __attr__ -> not a function def
+ (getLangOpts().CPlusPlus &&
+ Tok.is(tok::l_paren)); // int X(0) -> not a function def [C++]
+}
+
+/// \brief Determine whether the current token, if it occurs after a
+/// declarator, indicates the start of a function definition.
+bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
+ assert(Declarator.isFunctionDeclarator() && "Isn't a function declarator");
+ if (Tok.is(tok::l_brace)) // int X() {}
+ return true;
+
+ // Handle K&R C argument lists: int X(f) int f; {}
+ if (!getLangOpts().CPlusPlus &&
+ Declarator.getFunctionTypeInfo().isKNRPrototype())
+ return isDeclarationSpecifier();
+
+ if (getLangOpts().CPlusPlus && Tok.is(tok::equal)) {
+ const Token &KW = NextToken();
+ return KW.is(tok::kw_default) || KW.is(tok::kw_delete);
+ }
+
+ return Tok.is(tok::colon) || // X() : Base() {} (used for ctors)
+ Tok.is(tok::kw_try); // X() try { ... }
+}
+
+/// ParseDeclarationOrFunctionDefinition - Parse either a function-definition or
+/// a declaration. We can't tell which we have until we read up to the
+/// compound-statement in function-definition. TemplateParams, if
+/// non-NULL, provides the template parameters when we're parsing a
+/// C++ template-declaration.
+///
+/// function-definition: [C99 6.9.1]
+/// decl-specs declarator declaration-list[opt] compound-statement
+/// [C90] function-definition: [C99 6.7.1] - implicit int result
+/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
+///
+/// declaration: [C99 6.7]
+/// declaration-specifiers init-declarator-list[opt] ';'
+/// [!C99] init-declarator-list ';' [TODO: warn in c99 mode]
+/// [OMP] threadprivate-directive [TODO]
+///
+Parser::DeclGroupPtrTy
+Parser::ParseDeclarationOrFunctionDefinition(ParsingDeclSpec &DS,
+ AccessSpecifier AS) {
+ // Parse the common declaration-specifiers piece.
+ ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS, DSC_top_level);
+
+ // C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
+ // declaration-specifiers init-declarator-list[opt] ';'
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS, DS);
+ DS.complete(TheDecl);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ // ObjC2 allows prefix attributes on class interfaces and protocols.
+ // FIXME: This still needs better diagnostics. We should only accept
+ // attributes here, no types, etc.
+ if (getLangOpts().ObjC2 && Tok.is(tok::at)) {
+ SourceLocation AtLoc = ConsumeToken(); // the "@"
+ if (!Tok.isObjCAtKeyword(tok::objc_interface) &&
+ !Tok.isObjCAtKeyword(tok::objc_protocol)) {
+ Diag(Tok, diag::err_objc_unexpected_attr);
+ SkipUntil(tok::semi); // FIXME: better skip?
+ return DeclGroupPtrTy();
+ }
+
+ DS.abort();
+
+ const char *PrevSpec = 0;
+ unsigned DiagID;
+ if (DS.SetTypeSpecType(DeclSpec::TST_unspecified, AtLoc, PrevSpec, DiagID))
+ Diag(AtLoc, DiagID) << PrevSpec;
+
+ if (Tok.isObjCAtKeyword(tok::objc_protocol))
+ return ParseObjCAtProtocolDeclaration(AtLoc, DS.getAttributes());
+
+ return Actions.ConvertDeclToDeclGroup(
+ ParseObjCAtInterfaceDeclaration(AtLoc, DS.getAttributes()));
+ }
+
+ // If the declspec consisted only of 'extern' and we have a string
+ // literal following it, this must be a C++ linkage specifier like
+ // 'extern "C"'.
+ if (Tok.is(tok::string_literal) && getLangOpts().CPlusPlus &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
+ DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
+ Decl *TheDecl = ParseLinkage(DS, Declarator::FileContext);
+ return Actions.ConvertDeclToDeclGroup(TheDecl);
+ }
+
+ return ParseDeclGroup(DS, Declarator::FileContext, true);
+}
+
+Parser::DeclGroupPtrTy
+Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributes &attrs,
+ AccessSpecifier AS) {
+ ParsingDeclSpec DS(*this);
+ DS.takeAttributesFrom(attrs);
+ // Must temporarily exit the objective-c container scope for
+ // parsing c constructs and re-enter objc container scope
+ // afterwards.
+ ObjCDeclContextSwitch ObjCDC(*this);
+
+ return ParseDeclarationOrFunctionDefinition(DS, AS);
+}
+
+/// ParseFunctionDefinition - We parsed and verified that the specified
+/// Declarator is well formed. If this is a K&R-style function, read the
+/// parameters declaration-list, then start the compound-statement.
+///
+/// function-definition: [C99 6.9.1]
+/// decl-specs declarator declaration-list[opt] compound-statement
+/// [C90] function-definition: [C99 6.7.1] - implicit int result
+/// [C90] decl-specs[opt] declarator declaration-list[opt] compound-statement
+/// [C++] function-definition: [C++ 8.4]
+/// decl-specifier-seq[opt] declarator ctor-initializer[opt]
+/// function-body
+/// [C++] function-definition: [C++ 8.4]
+/// decl-specifier-seq[opt] declarator function-try-block
+///
+Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
+ const ParsedTemplateInfo &TemplateInfo,
+ LateParsedAttrList *LateParsedAttrs) {
+ // Poison the SEH identifiers so they are flagged as illegal in function bodies
+ PoisonSEHIdentifiersRAIIObject PoisonSEHIdentifiers(*this, true);
+ const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ // If this is C90 and the declspecs were completely missing, fudge in an
+ // implicit int. We do this here because this is the only place where
+ // declaration-specifiers are completely optional in the grammar.
+ if (getLangOpts().ImplicitInt && D.getDeclSpec().isEmpty()) {
+ const char *PrevSpec;
+ unsigned DiagID;
+ D.getMutableDeclSpec().SetTypeSpecType(DeclSpec::TST_int,
+ D.getIdentifierLoc(),
+ PrevSpec, DiagID);
+ D.SetRangeBegin(D.getDeclSpec().getSourceRange().getBegin());
+ }
+
+ // If this declaration was formed with a K&R-style identifier list for the
+ // arguments, parse declarations for all of the args next.
+ // int foo(a,b) int a; float b; {}
+ if (FTI.isKNRPrototype())
+ ParseKNRParamDeclarations(D);
+
+ // We should have either an opening brace or, in a C++ constructor,
+ // we may have a colon.
+ if (Tok.isNot(tok::l_brace) &&
+ (!getLangOpts().CPlusPlus ||
+ (Tok.isNot(tok::colon) && Tok.isNot(tok::kw_try) &&
+ Tok.isNot(tok::equal)))) {
+ Diag(Tok, diag::err_expected_fn_body);
+
+ // Skip over garbage, until we get to '{'. Don't eat the '{'.
+ SkipUntil(tok::l_brace, true, true);
+
+ // If we didn't find the '{', bail out.
+ if (Tok.isNot(tok::l_brace))
+ return 0;
+ }
+
+ // Check to make sure that any normal attributes are allowed to be on
+ // a definition. Late parsed attributes are checked at the end.
+ if (Tok.isNot(tok::equal)) {
+ AttributeList *DtorAttrs = D.getAttributes();
+ while (DtorAttrs) {
+ if (!IsThreadSafetyAttribute(DtorAttrs->getName()->getName())) {
+ Diag(DtorAttrs->getLoc(), diag::warn_attribute_on_function_definition)
+ << DtorAttrs->getName()->getName();
+ }
+ DtorAttrs = DtorAttrs->getNext();
+ }
+ }
+
+ // In delayed template parsing mode, for function template we consume the
+ // tokens and store them for late parsing at the end of the translation unit.
+ if (getLangOpts().DelayedTemplateParsing &&
+ TemplateInfo.Kind == ParsedTemplateInfo::Template) {
+ MultiTemplateParamsArg TemplateParameterLists(Actions,
+ TemplateInfo.TemplateParams->data(),
+ TemplateInfo.TemplateParams->size());
+
+ ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
+ Scope *ParentScope = getCurScope()->getParent();
+
+ D.setFunctionDefinitionKind(FDK_Definition);
+ Decl *DP = Actions.HandleDeclarator(ParentScope, D,
+ move(TemplateParameterLists));
+ D.complete(DP);
+ D.getMutableDeclSpec().abort();
+
+ if (DP) {
+ LateParsedTemplatedFunction *LPT = new LateParsedTemplatedFunction(DP);
+
+ FunctionDecl *FnD = 0;
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(DP))
+ FnD = FunTmpl->getTemplatedDecl();
+ else
+ FnD = cast<FunctionDecl>(DP);
+ Actions.CheckForFunctionRedefinition(FnD);
+
+ LateParsedTemplateMap[FnD] = LPT;
+ Actions.MarkAsLateParsedTemplate(FnD);
+ LexTemplateFunctionForLateParsing(LPT->Toks);
+ } else {
+ CachedTokens Toks;
+ LexTemplateFunctionForLateParsing(Toks);
+ }
+ return DP;
+ }
+
+ // Enter a scope for the function body.
+ ParseScope BodyScope(this, Scope::FnScope|Scope::DeclScope);
+
+ // Tell the actions module that we have entered a function definition with the
+ // specified Declarator for the function.
+ Decl *Res = TemplateInfo.TemplateParams?
+ Actions.ActOnStartOfFunctionTemplateDef(getCurScope(),
+ MultiTemplateParamsArg(Actions,
+ TemplateInfo.TemplateParams->data(),
+ TemplateInfo.TemplateParams->size()),
+ D)
+ : Actions.ActOnStartOfFunctionDef(getCurScope(), D);
+
+ // Break out of the ParsingDeclarator context before we parse the body.
+ D.complete(Res);
+
+ // Break out of the ParsingDeclSpec context, too. This const_cast is
+ // safe because we're always the sole owner.
+ D.getMutableDeclSpec().abort();
+
+ if (Tok.is(tok::equal)) {
+ assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='");
+ ConsumeToken();
+
+ Actions.ActOnFinishFunctionBody(Res, 0, false);
+
+ bool Delete = false;
+ SourceLocation KWLoc;
+ if (Tok.is(tok::kw_delete)) {
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_deleted_function :
+ diag::ext_deleted_function);
+
+ KWLoc = ConsumeToken();
+ Actions.SetDeclDeleted(Res, KWLoc);
+ Delete = true;
+ } else if (Tok.is(tok::kw_default)) {
+ Diag(Tok, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_defaulted_function :
+ diag::ext_defaulted_function);
+
+ KWLoc = ConsumeToken();
+ Actions.SetDeclDefaulted(Res, KWLoc);
+ } else {
+ llvm_unreachable("function definition after = not 'delete' or 'default'");
+ }
+
+ if (Tok.is(tok::comma)) {
+ Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
+ << Delete;
+ SkipUntil(tok::semi);
+ } else {
+ ExpectAndConsume(tok::semi, diag::err_expected_semi_after,
+ Delete ? "delete" : "default", tok::semi);
+ }
+
+ return Res;
+ }
+
+ if (Tok.is(tok::kw_try))
+ return ParseFunctionTryBlock(Res, BodyScope);
+
+ // If we have a colon, then we're probably parsing a C++
+ // ctor-initializer.
+ if (Tok.is(tok::colon)) {
+ ParseConstructorInitializer(Res);
+
+ // Recover from error.
+ if (!Tok.is(tok::l_brace)) {
+ BodyScope.Exit();
+ Actions.ActOnFinishFunctionBody(Res, 0);
+ return Res;
+ }
+ } else
+ Actions.ActOnDefaultCtorInitializers(Res);
+
+ // Late attributes are parsed in the same scope as the function body.
+ if (LateParsedAttrs)
+ ParseLexedAttributeList(*LateParsedAttrs, Res, false, true);
+
+ return ParseFunctionStatementBody(Res, BodyScope);
+}
+
+/// ParseKNRParamDeclarations - Parse 'declaration-list[opt]' which provides
+/// types for a function with a K&R-style identifier list for arguments.
+void Parser::ParseKNRParamDeclarations(Declarator &D) {
+ // We know that the top-level of this declarator is a function.
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ // Enter function-declaration scope, limiting any declarators to the
+ // function prototype scope, including parameter declarators.
+ ParseScope PrototypeScope(this, Scope::FunctionPrototypeScope|Scope::DeclScope);
+
+ // Read all the argument declarations.
+ while (isDeclarationSpecifier()) {
+ SourceLocation DSStart = Tok.getLocation();
+
+ // Parse the common declaration-specifiers piece.
+ DeclSpec DS(AttrFactory);
+ ParseDeclarationSpecifiers(DS);
+
+ // C99 6.9.1p6: 'each declaration in the declaration list shall have at
+ // least one declarator'.
+ // NOTE: GCC just makes this an ext-warn. It's not clear what it does with
+ // the declarations though. It's trivial to ignore them, really hard to do
+ // anything else with them.
+ if (Tok.is(tok::semi)) {
+ Diag(DSStart, diag::err_declaration_does_not_declare_param);
+ ConsumeToken();
+ continue;
+ }
+
+ // C99 6.9.1p6: Declarations shall contain no storage-class specifiers other
+ // than register.
+ if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_register) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ DS.ClearStorageClassSpecs();
+ }
+ if (DS.isThreadSpecified()) {
+ Diag(DS.getThreadSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ DS.ClearStorageClassSpecs();
+ }
+
+ // Parse the first declarator attached to this declspec.
+ Declarator ParmDeclarator(DS, Declarator::KNRTypeListContext);
+ ParseDeclarator(ParmDeclarator);
+
+ // Handle the full declarator list.
+ while (1) {
+ // If attributes are present, parse them.
+ MaybeParseGNUAttributes(ParmDeclarator);
+
+ // Ask the actions module to compute the type for this declarator.
+ Decl *Param =
+ Actions.ActOnParamDeclarator(getCurScope(), ParmDeclarator);
+
+ if (Param &&
+ // A missing identifier has already been diagnosed.
+ ParmDeclarator.getIdentifier()) {
+
+ // Scan the argument list looking for the correct param to apply this
+ // type.
+ for (unsigned i = 0; ; ++i) {
+ // C99 6.9.1p6: those declarators shall declare only identifiers from
+ // the identifier list.
+ if (i == FTI.NumArgs) {
+ Diag(ParmDeclarator.getIdentifierLoc(), diag::err_no_matching_param)
+ << ParmDeclarator.getIdentifier();
+ break;
+ }
+
+ if (FTI.ArgInfo[i].Ident == ParmDeclarator.getIdentifier()) {
+ // Reject redefinitions of parameters.
+ if (FTI.ArgInfo[i].Param) {
+ Diag(ParmDeclarator.getIdentifierLoc(),
+ diag::err_param_redefinition)
+ << ParmDeclarator.getIdentifier();
+ } else {
+ FTI.ArgInfo[i].Param = Param;
+ }
+ break;
+ }
+ }
+ }
+
+ // If we don't have a comma, it is either the end of the list (a ';') or
+ // an error, bail out.
+ if (Tok.isNot(tok::comma))
+ break;
+
+ ParmDeclarator.clear();
+
+ // Consume the comma.
+ ParmDeclarator.setCommaLoc(ConsumeToken());
+
+ // Parse the next declarator.
+ ParseDeclarator(ParmDeclarator);
+ }
+
+ if (Tok.is(tok::semi)) {
+ ConsumeToken();
+ } else {
+ Diag(Tok, diag::err_expected_semi_declaration);
+ // Skip to end of block or statement
+ SkipUntil(tok::semi, true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
+ }
+ }
+
+ // The actions module must verify that all arguments were declared.
+ Actions.ActOnFinishKNRParamDeclarations(getCurScope(), D, Tok.getLocation());
+}
+
+
+/// ParseAsmStringLiteral - This is just a normal string-literal, but is not
+/// allowed to be a wide string, and is not subject to character translation.
+///
+/// [GNU] asm-string-literal:
+/// string-literal
+///
+Parser::ExprResult Parser::ParseAsmStringLiteral() {
+ switch (Tok.getKind()) {
+ case tok::string_literal:
+ break;
+ case tok::utf8_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ case tok::wide_string_literal: {
+ SourceLocation L = Tok.getLocation();
+ Diag(Tok, diag::err_asm_operand_wide_string_literal)
+ << (Tok.getKind() == tok::wide_string_literal)
+ << SourceRange(L, L);
+ return ExprError();
+ }
+ default:
+ Diag(Tok, diag::err_expected_string_literal);
+ return ExprError();
+ }
+
+ return ParseStringLiteralExpression();
+}
+
+/// ParseSimpleAsm
+///
+/// [GNU] simple-asm-expr:
+/// 'asm' '(' asm-string-literal ')'
+///
+Parser::ExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
+ assert(Tok.is(tok::kw_asm) && "Not an asm!");
+ SourceLocation Loc = ConsumeToken();
+
+ if (Tok.is(tok::kw_volatile)) {
+ // Remove from the end of 'asm' to the end of 'volatile'.
+ SourceRange RemovalRange(PP.getLocForEndOfToken(Loc),
+ PP.getLocForEndOfToken(Tok.getLocation()));
+
+ Diag(Tok, diag::warn_file_asm_volatile)
+ << FixItHint::CreateRemoval(RemovalRange);
+ ConsumeToken();
+ }
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lparen_after) << "asm";
+ return ExprError();
+ }
+
+ ExprResult Result(ParseAsmStringLiteral());
+
+ if (Result.isInvalid()) {
+ SkipUntil(tok::r_paren, true, true);
+ if (EndLoc)
+ *EndLoc = Tok.getLocation();
+ ConsumeAnyToken();
+ } else {
+ // Close the paren and get the location of the end bracket
+ T.consumeClose();
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
+ }
+
+ return move(Result);
+}
+
+/// \brief Get the TemplateIdAnnotation from the token and put it in the
+/// cleanup pool so that it gets destroyed when parsing the current top level
+/// declaration is finished.
+TemplateIdAnnotation *Parser::takeTemplateIdAnnotation(const Token &tok) {
+ assert(tok.is(tok::annot_template_id) && "Expected template-id token");
+ TemplateIdAnnotation *
+ Id = static_cast<TemplateIdAnnotation *>(tok.getAnnotationValue());
+ TopLevelDeclCleanupPool.delayMemberFunc< TemplateIdAnnotation,
+ &TemplateIdAnnotation::Destroy>(Id);
+ return Id;
+}
+
+/// TryAnnotateTypeOrScopeToken - If the current token position is on a
+/// typename (possibly qualified in C++) or a C++ scope specifier not followed
+/// by a typename, TryAnnotateTypeOrScopeToken will replace one or more tokens
+/// with a single annotation token representing the typename or C++ scope
+/// respectively.
+/// This simplifies handling of C++ scope specifiers and allows efficient
+/// backtracking without the need to re-parse and resolve nested-names and
+/// typenames.
+/// It will mainly be called when we expect to treat identifiers as typenames
+/// (if they are typenames). For example, in C we do not expect identifiers
+/// inside expressions to be treated as typenames so it will not be called
+/// for expressions in C.
+/// The benefit for C/ObjC is that a typename will be annotated and
+/// Actions.getTypeName will not be needed to be called again (e.g. getTypeName
+/// will not be called twice, once to check whether we have a declaration
+/// specifier, and another one to get the actual type inside
+/// ParseDeclarationSpecifiers).
+///
+/// This returns true if an error occurred.
+///
+/// Note that this routine emits an error if you call it with ::new or ::delete
+/// as the current tokens, so only call it in contexts where these are invalid.
+bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext, bool NeedType) {
+ assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon)
+ || Tok.is(tok::kw_typename) || Tok.is(tok::annot_cxxscope)
+ || Tok.is(tok::kw_decltype)) && "Cannot be a type or scope token!");
+
+ if (Tok.is(tok::kw_typename)) {
+ // Parse a C++ typename-specifier, e.g., "typename T::type".
+ //
+ // typename-specifier:
+ // 'typename' '::' [opt] nested-name-specifier identifier
+ // 'typename' '::' [opt] nested-name-specifier template [opt]
+ // simple-template-id
+ SourceLocation TypenameLoc = ConsumeToken();
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/ParsedType(),
+ /*EnteringContext=*/false,
+ 0, /*IsTypename*/true))
+ return true;
+ if (!SS.isSet()) {
+ if (getLangOpts().MicrosoftExt)
+ Diag(Tok.getLocation(), diag::warn_expected_qualified_after_typename);
+ else
+ Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
+ return true;
+ }
+
+ TypeResult Ty;
+ if (Tok.is(tok::identifier)) {
+ // FIXME: check whether the next token is '<', first!
+ Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
+ *Tok.getIdentifierInfo(),
+ Tok.getLocation());
+ } else if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Function_template) {
+ Diag(Tok, diag::err_typename_refers_to_non_type_template)
+ << Tok.getAnnotationRange();
+ return true;
+ }
+
+ ASTTemplateArgsPtr TemplateArgsPtr(Actions,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+
+ Ty = Actions.ActOnTypenameType(getCurScope(), TypenameLoc, SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ } else {
+ Diag(Tok, diag::err_expected_type_name_after_typename)
+ << SS.getRange();
+ return true;
+ }
+
+ SourceLocation EndLoc = Tok.getLastLoc();
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Ty.isInvalid() ? ParsedType() : Ty.get());
+ Tok.setAnnotationEndLoc(EndLoc);
+ Tok.setLocation(TypenameLoc);
+ PP.AnnotateCachedTokens(Tok);
+ return false;
+ }
+
+ // Remembers whether the token was originally a scope annotation.
+ bool wasScopeAnnotation = Tok.is(tok::annot_cxxscope);
+
+ CXXScopeSpec SS;
+ if (getLangOpts().CPlusPlus)
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
+ return true;
+
+ if (Tok.is(tok::identifier)) {
+ IdentifierInfo *CorrectedII = 0;
+ // Determine whether the identifier is a type name.
+ if (ParsedType Ty = Actions.getTypeName(*Tok.getIdentifierInfo(),
+ Tok.getLocation(), getCurScope(),
+ &SS, false,
+ NextToken().is(tok::period),
+ ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialTypeSourceInfo*/true,
+ NeedType ? &CorrectedII : NULL)) {
+ // A FixIt was applied as a result of typo correction
+ if (CorrectedII)
+ Tok.setIdentifierInfo(CorrectedII);
+ // This is a typename. Replace the current token in-place with an
+ // annotation type token.
+ Tok.setKind(tok::annot_typename);
+ setTypeAnnotation(Tok, Ty);
+ Tok.setAnnotationEndLoc(Tok.getLocation());
+ if (SS.isNotEmpty()) // it was a C++ qualified type name.
+ Tok.setLocation(SS.getBeginLoc());
+
+ // In case the tokens were cached, have Preprocessor replace
+ // them with the annotation token.
+ PP.AnnotateCachedTokens(Tok);
+ return false;
+ }
+
+ if (!getLangOpts().CPlusPlus) {
+ // If we're in C, we can't have :: tokens at all (the lexer won't return
+ // them). If the identifier is not a type, then it can't be scope either,
+ // just early exit.
+ return false;
+ }
+
+ // If this is a template-id, annotate with a template-id or type token.
+ if (NextToken().is(tok::less)) {
+ TemplateTy Template;
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ bool MemberOfUnknownSpecialization;
+ if (TemplateNameKind TNK
+ = Actions.isTemplateName(getCurScope(), SS,
+ /*hasTemplateKeyword=*/false, TemplateName,
+ /*ObjectType=*/ ParsedType(),
+ EnteringContext,
+ Template, MemberOfUnknownSpecialization)) {
+ // Consume the identifier.
+ ConsumeToken();
+ if (AnnotateTemplateIdToken(Template, TNK, SS, SourceLocation(),
+ TemplateName)) {
+ // If an unrecoverable error occurred, we need to return true here,
+ // because the token stream is in a damaged state. We may not return
+ // a valid identifier.
+ return true;
+ }
+ }
+ }
+
+ // The current token, which is either an identifier or a
+ // template-id, is not part of the annotation. Fall through to
+ // push that token back into the stream and complete the C++ scope
+ // specifier annotation.
+ }
+
+ if (Tok.is(tok::annot_template_id)) {
+ TemplateIdAnnotation *TemplateId = takeTemplateIdAnnotation(Tok);
+ if (TemplateId->Kind == TNK_Type_template) {
+ // A template-id that refers to a type was parsed into a
+ // template-id annotation in a context where we weren't allowed
+ // to produce a type annotation token. Update the template-id
+ // annotation token to a type annotation token now.
+ AnnotateTemplateIdTokenAsType();
+ return false;
+ }
+ }
+
+ if (SS.isEmpty())
+ return false;
+
+ // A C++ scope specifier that isn't followed by a typename.
+ // Push the current token back into the token stream (or revert it if it is
+ // cached) and use an annotation scope token for current token.
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::annot_cxxscope);
+ Tok.setAnnotationValue(Actions.SaveNestedNameSpecifierAnnotation(SS));
+ Tok.setAnnotationRange(SS.getRange());
+
+ // In case the tokens were cached, have Preprocessor replace them
+ // with the annotation token. We don't need to do this if we've
+ // just reverted back to the state we were in before being called.
+ if (!wasScopeAnnotation)
+ PP.AnnotateCachedTokens(Tok);
+ return false;
+}
+
+/// TryAnnotateScopeToken - Like TryAnnotateTypeOrScopeToken but only
+/// annotates C++ scope specifiers and template-ids. This returns
+/// true if the token was annotated or there was an error that could not be
+/// recovered from.
+///
+/// Note that this routine emits an error if you call it with ::new or ::delete
+/// as the current tokens, so only call it in contexts where these are invalid.
+bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
+ assert(getLangOpts().CPlusPlus &&
+ "Call sites of this function should be guarded by checking for C++");
+ assert((Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
+ (Tok.is(tok::annot_template_id) && NextToken().is(tok::coloncolon)) ||
+ Tok.is(tok::kw_decltype)) && "Cannot be a type or scope token!");
+
+ CXXScopeSpec SS;
+ if (ParseOptionalCXXScopeSpecifier(SS, ParsedType(), EnteringContext))
+ return true;
+ if (SS.isEmpty())
+ return false;
+
+ // Push the current token back into the token stream (or revert it if it is
+ // cached) and use an annotation scope token for current token.
+ if (PP.isBacktrackEnabled())
+ PP.RevertCachedTokens(1);
+ else
+ PP.EnterToken(Tok);
+ Tok.setKind(tok::annot_cxxscope);
+ Tok.setAnnotationValue(Actions.SaveNestedNameSpecifierAnnotation(SS));
+ Tok.setAnnotationRange(SS.getRange());
+
+ // In case the tokens were cached, have Preprocessor replace them with the
+ // annotation token.
+ PP.AnnotateCachedTokens(Tok);
+ return false;
+}
+
+bool Parser::isTokenEqualOrEqualTypo() {
+ tok::TokenKind Kind = Tok.getKind();
+ switch (Kind) {
+ default:
+ return false;
+ case tok::ampequal: // &=
+ case tok::starequal: // *=
+ case tok::plusequal: // +=
+ case tok::minusequal: // -=
+ case tok::exclaimequal: // !=
+ case tok::slashequal: // /=
+ case tok::percentequal: // %=
+ case tok::lessequal: // <=
+ case tok::lesslessequal: // <<=
+ case tok::greaterequal: // >=
+ case tok::greatergreaterequal: // >>=
+ case tok::caretequal: // ^=
+ case tok::pipeequal: // |=
+ case tok::equalequal: // ==
+ Diag(Tok, diag::err_invalid_token_after_declarator_suggest_equal)
+ << getTokenSimpleSpelling(Kind)
+ << FixItHint::CreateReplacement(SourceRange(Tok.getLocation()), "=");
+ case tok::equal:
+ return true;
+ }
+}
+
+SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
+ assert(Tok.is(tok::code_completion));
+ PrevTokLocation = Tok.getLocation();
+
+ for (Scope *S = getCurScope(); S; S = S->getParent()) {
+ if (S->getFlags() & Scope::FnScope) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_RecoveryInFunction);
+ cutOffParsing();
+ return PrevTokLocation;
+ }
+
+ if (S->getFlags() & Scope::ClassScope) {
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Class);
+ cutOffParsing();
+ return PrevTokLocation;
+ }
+ }
+
+ Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Namespace);
+ cutOffParsing();
+ return PrevTokLocation;
+}
+
+// Anchor the Parser::FieldCallback vtable to this translation unit.
+// We use a spurious method instead of the destructor because
+// destroying FieldCallbacks can actually be slightly
+// performance-sensitive.
+void Parser::FieldCallback::_anchor() {
+}
+
+// Code-completion pass-through functions
+
+void Parser::CodeCompleteDirective(bool InConditional) {
+ Actions.CodeCompletePreprocessorDirective(InConditional);
+}
+
+void Parser::CodeCompleteInConditionalExclusion() {
+ Actions.CodeCompleteInPreprocessorConditionalExclusion(getCurScope());
+}
+
+void Parser::CodeCompleteMacroName(bool IsDefinition) {
+ Actions.CodeCompletePreprocessorMacroName(IsDefinition);
+}
+
+void Parser::CodeCompletePreprocessorExpression() {
+ Actions.CodeCompletePreprocessorExpression();
+}
+
+void Parser::CodeCompleteMacroArgument(IdentifierInfo *Macro,
+ MacroInfo *MacroInfo,
+ unsigned ArgumentIndex) {
+ Actions.CodeCompletePreprocessorMacroArgument(getCurScope(), Macro, MacroInfo,
+ ArgumentIndex);
+}
+
+void Parser::CodeCompleteNaturalLanguage() {
+ Actions.CodeCompleteNaturalLanguage();
+}
+
+bool Parser::ParseMicrosoftIfExistsCondition(IfExistsCondition& Result) {
+ assert((Tok.is(tok::kw___if_exists) || Tok.is(tok::kw___if_not_exists)) &&
+ "Expected '__if_exists' or '__if_not_exists'");
+ Result.IsIfExists = Tok.is(tok::kw___if_exists);
+ Result.KeywordLoc = ConsumeToken();
+
+ BalancedDelimiterTracker T(*this, tok::l_paren);
+ if (T.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lparen_after)
+ << (Result.IsIfExists? "__if_exists" : "__if_not_exists");
+ return true;
+ }
+
+ // Parse nested-name-specifier.
+ ParseOptionalCXXScopeSpecifier(Result.SS, ParsedType(),
+ /*EnteringContext=*/false);
+
+ // Check nested-name specifier.
+ if (Result.SS.isInvalid()) {
+ T.skipToEnd();
+ return true;
+ }
+
+ // Parse the unqualified-id.
+ SourceLocation TemplateKWLoc; // FIXME: parsed, but unused.
+ if (ParseUnqualifiedId(Result.SS, false, true, true, ParsedType(),
+ TemplateKWLoc, Result.Name)) {
+ T.skipToEnd();
+ return true;
+ }
+
+ if (T.consumeClose())
+ return true;
+
+ // Check if the symbol exists.
+ switch (Actions.CheckMicrosoftIfExistsSymbol(getCurScope(), Result.KeywordLoc,
+ Result.IsIfExists, Result.SS,
+ Result.Name)) {
+ case Sema::IER_Exists:
+ Result.Behavior = Result.IsIfExists ? IEB_Parse : IEB_Skip;
+ break;
+
+ case Sema::IER_DoesNotExist:
+ Result.Behavior = !Result.IsIfExists ? IEB_Parse : IEB_Skip;
+ break;
+
+ case Sema::IER_Dependent:
+ Result.Behavior = IEB_Dependent;
+ break;
+
+ case Sema::IER_Error:
+ return true;
+ }
+
+ return false;
+}
+
+void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
+ IfExistsCondition Result;
+ if (ParseMicrosoftIfExistsCondition(Result))
+ return;
+
+ BalancedDelimiterTracker Braces(*this, tok::l_brace);
+ if (Braces.consumeOpen()) {
+ Diag(Tok, diag::err_expected_lbrace);
+ return;
+ }
+
+ switch (Result.Behavior) {
+ case IEB_Parse:
+ // Parse declarations below.
+ break;
+
+ case IEB_Dependent:
+ llvm_unreachable("Cannot have a dependent external declaration");
+
+ case IEB_Skip:
+ Braces.skipToEnd();
+ return;
+ }
+
+ // Parse the declarations.
+ while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
+ ParsedAttributesWithRange attrs(AttrFactory);
+ MaybeParseCXX0XAttributes(attrs);
+ MaybeParseMicrosoftAttributes(attrs);
+ DeclGroupPtrTy Result = ParseExternalDeclaration(attrs);
+ if (Result && !getCurScope()->getParent())
+ Actions.getASTConsumer().HandleTopLevelDecl(Result.get());
+ }
+ Braces.consumeClose();
+}
+
+Parser::DeclGroupPtrTy Parser::ParseModuleImport(SourceLocation AtLoc) {
+ assert(Tok.isObjCAtKeyword(tok::objc___experimental_modules_import) &&
+ "Improper start to module import");
+ SourceLocation ImportLoc = ConsumeToken();
+
+ llvm::SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+
+ // Parse the module path.
+ do {
+ if (!Tok.is(tok::identifier)) {
+ if (Tok.is(tok::code_completion)) {
+ Actions.CodeCompleteModuleImport(ImportLoc, Path);
+ ConsumeCodeCompletionToken();
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ Diag(Tok, diag::err_module_expected_ident);
+ SkipUntil(tok::semi);
+ return DeclGroupPtrTy();
+ }
+
+ // Record this part of the module path.
+ Path.push_back(std::make_pair(Tok.getIdentifierInfo(), Tok.getLocation()));
+ ConsumeToken();
+
+ if (Tok.is(tok::period)) {
+ ConsumeToken();
+ continue;
+ }
+
+ break;
+ } while (true);
+
+ DeclResult Import = Actions.ActOnModuleImport(AtLoc, ImportLoc, Path);
+ ExpectAndConsumeSemi(diag::err_module_expected_semi);
+ if (Import.isInvalid())
+ return DeclGroupPtrTy();
+
+ return Actions.ConvertDeclToDeclGroup(Import.get());
+}
+
+bool Parser::BalancedDelimiterTracker::diagnoseOverflow() {
+ P.Diag(P.Tok, diag::err_parser_impl_limit_overflow);
+ P.SkipUntil(tok::eof);
+ return true;
+}
+
+bool Parser::BalancedDelimiterTracker::expectAndConsume(unsigned DiagID,
+ const char *Msg,
+ tok::TokenKind SkipToToc ) {
+ LOpen = P.Tok.getLocation();
+ if (P.ExpectAndConsume(Kind, DiagID, Msg, SkipToToc))
+ return true;
+
+ if (getDepth() < MaxDepth)
+ return false;
+
+ return diagnoseOverflow();
+}
+
+bool Parser::BalancedDelimiterTracker::diagnoseMissingClose() {
+ assert(!P.Tok.is(Close) && "Should have consumed closing delimiter");
+
+ const char *LHSName = "unknown";
+ diag::kind DID;
+ switch (Close) {
+ default: llvm_unreachable("Unexpected balanced token");
+ case tok::r_paren : LHSName = "("; DID = diag::err_expected_rparen; break;
+ case tok::r_brace : LHSName = "{"; DID = diag::err_expected_rbrace; break;
+ case tok::r_square: LHSName = "["; DID = diag::err_expected_rsquare; break;
+ }
+ P.Diag(P.Tok, DID);
+ P.Diag(LOpen, diag::note_matching) << LHSName;
+ if (P.SkipUntil(Close))
+ LClose = P.Tok.getLocation();
+ return true;
+}
+
+void Parser::BalancedDelimiterTracker::skipToEnd() {
+ P.SkipUntil(Close, false);
+}
diff --git a/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
new file mode 100644
index 0000000..ef17aee
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Parse/RAIIObjectsForParser.h
@@ -0,0 +1,142 @@
+//===--- RAIIObjectsForParser.h - RAII helpers for the parser ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines and implements the some simple RAII objects that are used
+// by the parser to manage bits in recursion.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_PARSE_RAII_OBJECTS_FOR_PARSER_H
+#define LLVM_CLANG_PARSE_RAII_OBJECTS_FOR_PARSER_H
+
+#include "clang/Parse/ParseDiagnostic.h"
+
+namespace clang {
+ // TODO: move ParsingDeclRAIIObject here.
+ // TODO: move ParsingClassDefinition here.
+ // TODO: move TentativeParsingAction here.
+
+
+ /// ExtensionRAIIObject - This saves the state of extension warnings when
+ /// constructed and disables them. When destructed, it restores them back to
+ /// the way they used to be. This is used to handle __extension__ in the
+ /// parser.
+ class ExtensionRAIIObject {
+ void operator=(const ExtensionRAIIObject &); // DO NOT IMPLEMENT
+ ExtensionRAIIObject(const ExtensionRAIIObject&); // DO NOT IMPLEMENT
+ DiagnosticsEngine &Diags;
+ public:
+ ExtensionRAIIObject(DiagnosticsEngine &diags) : Diags(diags) {
+ Diags.IncrementAllExtensionsSilenced();
+ }
+
+ ~ExtensionRAIIObject() {
+ Diags.DecrementAllExtensionsSilenced();
+ }
+ };
+
+ /// ColonProtectionRAIIObject - This sets the Parser::ColonIsSacred bool and
+ /// restores it when destroyed. This says that "foo:" should not be
+ /// considered a possible typo for "foo::" for error recovery purposes.
+ class ColonProtectionRAIIObject {
+ Parser &P;
+ bool OldVal;
+ public:
+ ColonProtectionRAIIObject(Parser &p, bool Value = true)
+ : P(p), OldVal(P.ColonIsSacred) {
+ P.ColonIsSacred = Value;
+ }
+
+ /// restore - This can be used to restore the state early, before the dtor
+ /// is run.
+ void restore() {
+ P.ColonIsSacred = OldVal;
+ }
+
+ ~ColonProtectionRAIIObject() {
+ restore();
+ }
+ };
+
+ /// \brief RAII object that makes '>' behave either as an operator
+ /// or as the closing angle bracket for a template argument list.
+ class GreaterThanIsOperatorScope {
+ bool &GreaterThanIsOperator;
+ bool OldGreaterThanIsOperator;
+ public:
+ GreaterThanIsOperatorScope(bool &GTIO, bool Val)
+ : GreaterThanIsOperator(GTIO), OldGreaterThanIsOperator(GTIO) {
+ GreaterThanIsOperator = Val;
+ }
+
+ ~GreaterThanIsOperatorScope() {
+ GreaterThanIsOperator = OldGreaterThanIsOperator;
+ }
+ };
+
+ class InMessageExpressionRAIIObject {
+ bool &InMessageExpression;
+ bool OldValue;
+
+ public:
+ InMessageExpressionRAIIObject(Parser &P, bool Value)
+ : InMessageExpression(P.InMessageExpression),
+ OldValue(P.InMessageExpression) {
+ InMessageExpression = Value;
+ }
+
+ ~InMessageExpressionRAIIObject() {
+ InMessageExpression = OldValue;
+ }
+ };
+
+ /// \brief RAII object that makes sure paren/bracket/brace count is correct
+ /// after declaration/statement parsing, even when there's a parsing error.
+ class ParenBraceBracketBalancer {
+ Parser &P;
+ unsigned short ParenCount, BracketCount, BraceCount;
+ public:
+ ParenBraceBracketBalancer(Parser &p)
+ : P(p), ParenCount(p.ParenCount), BracketCount(p.BracketCount),
+ BraceCount(p.BraceCount) { }
+
+ ~ParenBraceBracketBalancer() {
+ P.ParenCount = ParenCount;
+ P.BracketCount = BracketCount;
+ P.BraceCount = BraceCount;
+ }
+ };
+
+ class PoisonSEHIdentifiersRAIIObject {
+ PoisonIdentifierRAIIObject Ident_AbnormalTermination;
+ PoisonIdentifierRAIIObject Ident_GetExceptionCode;
+ PoisonIdentifierRAIIObject Ident_GetExceptionInfo;
+ PoisonIdentifierRAIIObject Ident__abnormal_termination;
+ PoisonIdentifierRAIIObject Ident__exception_code;
+ PoisonIdentifierRAIIObject Ident__exception_info;
+ PoisonIdentifierRAIIObject Ident___abnormal_termination;
+ PoisonIdentifierRAIIObject Ident___exception_code;
+ PoisonIdentifierRAIIObject Ident___exception_info;
+ public:
+ PoisonSEHIdentifiersRAIIObject(Parser &Self, bool NewValue)
+ : Ident_AbnormalTermination(Self.Ident_AbnormalTermination, NewValue),
+ Ident_GetExceptionCode(Self.Ident_GetExceptionCode, NewValue),
+ Ident_GetExceptionInfo(Self.Ident_GetExceptionInfo, NewValue),
+ Ident__abnormal_termination(Self.Ident__abnormal_termination, NewValue),
+ Ident__exception_code(Self.Ident__exception_code, NewValue),
+ Ident__exception_info(Self.Ident__exception_info, NewValue),
+ Ident___abnormal_termination(Self.Ident___abnormal_termination, NewValue),
+ Ident___exception_code(Self.Ident___exception_code, NewValue),
+ Ident___exception_info(Self.Ident___exception_info, NewValue) {
+ }
+ };
+
+} // end namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp b/contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp
new file mode 100644
index 0000000..4297dc8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/DeltaTree.cpp
@@ -0,0 +1,467 @@
+//===--- DeltaTree.cpp - B-Tree for Rewrite Delta tracking ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the DeltaTree and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/DeltaTree.h"
+#include "clang/Basic/LLVM.h"
+#include <cstring>
+#include <cstdio>
+using namespace clang;
+
+/// The DeltaTree class is a multiway search tree (BTree) structure with some
+/// fancy features. B-Trees are generally more memory and cache efficient
+/// than binary trees, because they store multiple keys/values in each node.
+///
+/// DeltaTree implements a key/value mapping from FileIndex to Delta, allowing
+/// fast lookup by FileIndex. However, an added (important) bonus is that it
+/// can also efficiently tell us the full accumulated delta for a specific
+/// file offset as well, without traversing the whole tree.
+///
+/// The nodes of the tree are made up of instances of two classes:
+/// DeltaTreeNode and DeltaTreeInteriorNode. The later subclasses the
+/// former and adds children pointers. Each node knows the full delta of all
+/// entries (recursively) contained inside of it, which allows us to get the
+/// full delta implied by a whole subtree in constant time.
+
+namespace {
+ /// SourceDelta - As code in the original input buffer is added and deleted,
+ /// SourceDelta records are used to keep track of how the input SourceLocation
+ /// object is mapped into the output buffer.
+ struct SourceDelta {
+ unsigned FileLoc;
+ int Delta;
+
+ static SourceDelta get(unsigned Loc, int D) {
+ SourceDelta Delta;
+ Delta.FileLoc = Loc;
+ Delta.Delta = D;
+ return Delta;
+ }
+ };
+
+ /// DeltaTreeNode - The common part of all nodes.
+ ///
+ class DeltaTreeNode {
+ public:
+ struct InsertResult {
+ DeltaTreeNode *LHS, *RHS;
+ SourceDelta Split;
+ };
+
+ private:
+ friend class DeltaTreeInteriorNode;
+
+ /// WidthFactor - This controls the number of K/V slots held in the BTree:
+ /// how wide it is. Each level of the BTree is guaranteed to have at least
+ /// WidthFactor-1 K/V pairs (except the root) and may have at most
+ /// 2*WidthFactor-1 K/V pairs.
+ enum { WidthFactor = 8 };
+
+ /// Values - This tracks the SourceDelta's currently in this node.
+ ///
+ SourceDelta Values[2*WidthFactor-1];
+
+ /// NumValuesUsed - This tracks the number of values this node currently
+ /// holds.
+ unsigned char NumValuesUsed;
+
+ /// IsLeaf - This is true if this is a leaf of the btree. If false, this is
+ /// an interior node, and is actually an instance of DeltaTreeInteriorNode.
+ bool IsLeaf;
+
+ /// FullDelta - This is the full delta of all the values in this node and
+ /// all children nodes.
+ int FullDelta;
+ public:
+ DeltaTreeNode(bool isLeaf = true)
+ : NumValuesUsed(0), IsLeaf(isLeaf), FullDelta(0) {}
+
+ bool isLeaf() const { return IsLeaf; }
+ int getFullDelta() const { return FullDelta; }
+ bool isFull() const { return NumValuesUsed == 2*WidthFactor-1; }
+
+ unsigned getNumValuesUsed() const { return NumValuesUsed; }
+ const SourceDelta &getValue(unsigned i) const {
+ assert(i < NumValuesUsed && "Invalid value #");
+ return Values[i];
+ }
+ SourceDelta &getValue(unsigned i) {
+ assert(i < NumValuesUsed && "Invalid value #");
+ return Values[i];
+ }
+
+ /// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into
+ /// this node. If insertion is easy, do it and return false. Otherwise,
+ /// split the node, populate InsertRes with info about the split, and return
+ /// true.
+ bool DoInsertion(unsigned FileIndex, int Delta, InsertResult *InsertRes);
+
+ void DoSplit(InsertResult &InsertRes);
+
+
+ /// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a
+ /// local walk over our contained deltas.
+ void RecomputeFullDeltaLocally();
+
+ void Destroy();
+
+ //static inline bool classof(const DeltaTreeNode *) { return true; }
+ };
+} // end anonymous namespace
+
+namespace {
+ /// DeltaTreeInteriorNode - When isLeaf = false, a node has child pointers.
+ /// This class tracks them.
+ class DeltaTreeInteriorNode : public DeltaTreeNode {
+ DeltaTreeNode *Children[2*WidthFactor];
+ ~DeltaTreeInteriorNode() {
+ for (unsigned i = 0, e = NumValuesUsed+1; i != e; ++i)
+ Children[i]->Destroy();
+ }
+ friend class DeltaTreeNode;
+ public:
+ DeltaTreeInteriorNode() : DeltaTreeNode(false /*nonleaf*/) {}
+
+ DeltaTreeInteriorNode(const InsertResult &IR)
+ : DeltaTreeNode(false /*nonleaf*/) {
+ Children[0] = IR.LHS;
+ Children[1] = IR.RHS;
+ Values[0] = IR.Split;
+ FullDelta = IR.LHS->getFullDelta()+IR.RHS->getFullDelta()+IR.Split.Delta;
+ NumValuesUsed = 1;
+ }
+
+ const DeltaTreeNode *getChild(unsigned i) const {
+ assert(i < getNumValuesUsed()+1 && "Invalid child");
+ return Children[i];
+ }
+ DeltaTreeNode *getChild(unsigned i) {
+ assert(i < getNumValuesUsed()+1 && "Invalid child");
+ return Children[i];
+ }
+
+ //static inline bool classof(const DeltaTreeInteriorNode *) { return true; }
+ static inline bool classof(const DeltaTreeNode *N) { return !N->isLeaf(); }
+ };
+}
+
+
+/// Destroy - A 'virtual' destructor.
+void DeltaTreeNode::Destroy() {
+ if (isLeaf())
+ delete this;
+ else
+ delete cast<DeltaTreeInteriorNode>(this);
+}
+
+/// RecomputeFullDeltaLocally - Recompute the FullDelta field by doing a
+/// local walk over our contained deltas.
+void DeltaTreeNode::RecomputeFullDeltaLocally() {
+ int NewFullDelta = 0;
+ for (unsigned i = 0, e = getNumValuesUsed(); i != e; ++i)
+ NewFullDelta += Values[i].Delta;
+ if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this))
+ for (unsigned i = 0, e = getNumValuesUsed()+1; i != e; ++i)
+ NewFullDelta += IN->getChild(i)->getFullDelta();
+ FullDelta = NewFullDelta;
+}
+
+/// DoInsertion - Do an insertion of the specified FileIndex/Delta pair into
+/// this node. If insertion is easy, do it and return false. Otherwise,
+/// split the node, populate InsertRes with info about the split, and return
+/// true.
+bool DeltaTreeNode::DoInsertion(unsigned FileIndex, int Delta,
+ InsertResult *InsertRes) {
+ // Maintain full delta for this node.
+ FullDelta += Delta;
+
+ // Find the insertion point, the first delta whose index is >= FileIndex.
+ unsigned i = 0, e = getNumValuesUsed();
+ while (i != e && FileIndex > getValue(i).FileLoc)
+ ++i;
+
+ // If we found an a record for exactly this file index, just merge this
+ // value into the pre-existing record and finish early.
+ if (i != e && getValue(i).FileLoc == FileIndex) {
+ // NOTE: Delta could drop to zero here. This means that the delta entry is
+ // useless and could be removed. Supporting erases is more complex than
+ // leaving an entry with Delta=0, so we just leave an entry with Delta=0 in
+ // the tree.
+ Values[i].Delta += Delta;
+ return false;
+ }
+
+ // Otherwise, we found an insertion point, and we know that the value at the
+ // specified index is > FileIndex. Handle the leaf case first.
+ if (isLeaf()) {
+ if (!isFull()) {
+ // For an insertion into a non-full leaf node, just insert the value in
+ // its sorted position. This requires moving later values over.
+ if (i != e)
+ memmove(&Values[i+1], &Values[i], sizeof(Values[0])*(e-i));
+ Values[i] = SourceDelta::get(FileIndex, Delta);
+ ++NumValuesUsed;
+ return false;
+ }
+
+ // Otherwise, if this is leaf is full, split the node at its median, insert
+ // the value into one of the children, and return the result.
+ assert(InsertRes && "No result location specified");
+ DoSplit(*InsertRes);
+
+ if (InsertRes->Split.FileLoc > FileIndex)
+ InsertRes->LHS->DoInsertion(FileIndex, Delta, 0 /*can't fail*/);
+ else
+ InsertRes->RHS->DoInsertion(FileIndex, Delta, 0 /*can't fail*/);
+ return true;
+ }
+
+ // Otherwise, this is an interior node. Send the request down the tree.
+ DeltaTreeInteriorNode *IN = cast<DeltaTreeInteriorNode>(this);
+ if (!IN->Children[i]->DoInsertion(FileIndex, Delta, InsertRes))
+ return false; // If there was space in the child, just return.
+
+ // Okay, this split the subtree, producing a new value and two children to
+ // insert here. If this node is non-full, we can just insert it directly.
+ if (!isFull()) {
+ // Now that we have two nodes and a new element, insert the perclated value
+ // into ourself by moving all the later values/children down, then inserting
+ // the new one.
+ if (i != e)
+ memmove(&IN->Children[i+2], &IN->Children[i+1],
+ (e-i)*sizeof(IN->Children[0]));
+ IN->Children[i] = InsertRes->LHS;
+ IN->Children[i+1] = InsertRes->RHS;
+
+ if (e != i)
+ memmove(&Values[i+1], &Values[i], (e-i)*sizeof(Values[0]));
+ Values[i] = InsertRes->Split;
+ ++NumValuesUsed;
+ return false;
+ }
+
+ // Finally, if this interior node was full and a node is percolated up, split
+ // ourself and return that up the chain. Start by saving all our info to
+ // avoid having the split clobber it.
+ IN->Children[i] = InsertRes->LHS;
+ DeltaTreeNode *SubRHS = InsertRes->RHS;
+ SourceDelta SubSplit = InsertRes->Split;
+
+ // Do the split.
+ DoSplit(*InsertRes);
+
+ // Figure out where to insert SubRHS/NewSplit.
+ DeltaTreeInteriorNode *InsertSide;
+ if (SubSplit.FileLoc < InsertRes->Split.FileLoc)
+ InsertSide = cast<DeltaTreeInteriorNode>(InsertRes->LHS);
+ else
+ InsertSide = cast<DeltaTreeInteriorNode>(InsertRes->RHS);
+
+ // We now have a non-empty interior node 'InsertSide' to insert
+ // SubRHS/SubSplit into. Find out where to insert SubSplit.
+
+ // Find the insertion point, the first delta whose index is >SubSplit.FileLoc.
+ i = 0; e = InsertSide->getNumValuesUsed();
+ while (i != e && SubSplit.FileLoc > InsertSide->getValue(i).FileLoc)
+ ++i;
+
+ // Now we know that i is the place to insert the split value into. Insert it
+ // and the child right after it.
+ if (i != e)
+ memmove(&InsertSide->Children[i+2], &InsertSide->Children[i+1],
+ (e-i)*sizeof(IN->Children[0]));
+ InsertSide->Children[i+1] = SubRHS;
+
+ if (e != i)
+ memmove(&InsertSide->Values[i+1], &InsertSide->Values[i],
+ (e-i)*sizeof(Values[0]));
+ InsertSide->Values[i] = SubSplit;
+ ++InsertSide->NumValuesUsed;
+ InsertSide->FullDelta += SubSplit.Delta + SubRHS->getFullDelta();
+ return true;
+}
+
+/// DoSplit - Split the currently full node (which has 2*WidthFactor-1 values)
+/// into two subtrees each with "WidthFactor-1" values and a pivot value.
+/// Return the pieces in InsertRes.
+void DeltaTreeNode::DoSplit(InsertResult &InsertRes) {
+ assert(isFull() && "Why split a non-full node?");
+
+ // Since this node is full, it contains 2*WidthFactor-1 values. We move
+ // the first 'WidthFactor-1' values to the LHS child (which we leave in this
+ // node), propagate one value up, and move the last 'WidthFactor-1' values
+ // into the RHS child.
+
+ // Create the new child node.
+ DeltaTreeNode *NewNode;
+ if (DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(this)) {
+ // If this is an interior node, also move over 'WidthFactor' children
+ // into the new node.
+ DeltaTreeInteriorNode *New = new DeltaTreeInteriorNode();
+ memcpy(&New->Children[0], &IN->Children[WidthFactor],
+ WidthFactor*sizeof(IN->Children[0]));
+ NewNode = New;
+ } else {
+ // Just create the new leaf node.
+ NewNode = new DeltaTreeNode();
+ }
+
+ // Move over the last 'WidthFactor-1' values from here to NewNode.
+ memcpy(&NewNode->Values[0], &Values[WidthFactor],
+ (WidthFactor-1)*sizeof(Values[0]));
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumValuesUsed = NumValuesUsed = WidthFactor-1;
+
+ // Recompute the two nodes' full delta.
+ NewNode->RecomputeFullDeltaLocally();
+ RecomputeFullDeltaLocally();
+
+ InsertRes.LHS = this;
+ InsertRes.RHS = NewNode;
+ InsertRes.Split = Values[WidthFactor-1];
+}
+
+
+
+//===----------------------------------------------------------------------===//
+// DeltaTree Implementation
+//===----------------------------------------------------------------------===//
+
+//#define VERIFY_TREE
+
+#ifdef VERIFY_TREE
+/// VerifyTree - Walk the btree performing assertions on various properties to
+/// verify consistency. This is useful for debugging new changes to the tree.
+static void VerifyTree(const DeltaTreeNode *N) {
+ const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(N);
+ if (IN == 0) {
+ // Verify leaves, just ensure that FullDelta matches up and the elements
+ // are in proper order.
+ int FullDelta = 0;
+ for (unsigned i = 0, e = N->getNumValuesUsed(); i != e; ++i) {
+ if (i)
+ assert(N->getValue(i-1).FileLoc < N->getValue(i).FileLoc);
+ FullDelta += N->getValue(i).Delta;
+ }
+ assert(FullDelta == N->getFullDelta());
+ return;
+ }
+
+ // Verify interior nodes: Ensure that FullDelta matches up and the
+ // elements are in proper order and the children are in proper order.
+ int FullDelta = 0;
+ for (unsigned i = 0, e = IN->getNumValuesUsed(); i != e; ++i) {
+ const SourceDelta &IVal = N->getValue(i);
+ const DeltaTreeNode *IChild = IN->getChild(i);
+ if (i)
+ assert(IN->getValue(i-1).FileLoc < IVal.FileLoc);
+ FullDelta += IVal.Delta;
+ FullDelta += IChild->getFullDelta();
+
+ // The largest value in child #i should be smaller than FileLoc.
+ assert(IChild->getValue(IChild->getNumValuesUsed()-1).FileLoc <
+ IVal.FileLoc);
+
+ // The smallest value in child #i+1 should be larger than FileLoc.
+ assert(IN->getChild(i+1)->getValue(0).FileLoc > IVal.FileLoc);
+ VerifyTree(IChild);
+ }
+
+ FullDelta += IN->getChild(IN->getNumValuesUsed())->getFullDelta();
+
+ assert(FullDelta == N->getFullDelta());
+}
+#endif // VERIFY_TREE
+
+static DeltaTreeNode *getRoot(void *Root) {
+ return (DeltaTreeNode*)Root;
+}
+
+DeltaTree::DeltaTree() {
+ Root = new DeltaTreeNode();
+}
+DeltaTree::DeltaTree(const DeltaTree &RHS) {
+ // Currently we only support copying when the RHS is empty.
+ assert(getRoot(RHS.Root)->getNumValuesUsed() == 0 &&
+ "Can only copy empty tree");
+ Root = new DeltaTreeNode();
+}
+
+DeltaTree::~DeltaTree() {
+ getRoot(Root)->Destroy();
+}
+
+/// getDeltaAt - Return the accumulated delta at the specified file offset.
+/// This includes all insertions or delections that occurred *before* the
+/// specified file index.
+int DeltaTree::getDeltaAt(unsigned FileIndex) const {
+ const DeltaTreeNode *Node = getRoot(Root);
+
+ int Result = 0;
+
+ // Walk down the tree.
+ while (1) {
+ // For all nodes, include any local deltas before the specified file
+ // index by summing them up directly. Keep track of how many were
+ // included.
+ unsigned NumValsGreater = 0;
+ for (unsigned e = Node->getNumValuesUsed(); NumValsGreater != e;
+ ++NumValsGreater) {
+ const SourceDelta &Val = Node->getValue(NumValsGreater);
+
+ if (Val.FileLoc >= FileIndex)
+ break;
+ Result += Val.Delta;
+ }
+
+ // If we have an interior node, include information about children and
+ // recurse. Otherwise, if we have a leaf, we're done.
+ const DeltaTreeInteriorNode *IN = dyn_cast<DeltaTreeInteriorNode>(Node);
+ if (!IN) return Result;
+
+ // Include any children to the left of the values we skipped, all of
+ // their deltas should be included as well.
+ for (unsigned i = 0; i != NumValsGreater; ++i)
+ Result += IN->getChild(i)->getFullDelta();
+
+ // If we found exactly the value we were looking for, break off the
+ // search early. There is no need to search the RHS of the value for
+ // partial results.
+ if (NumValsGreater != Node->getNumValuesUsed() &&
+ Node->getValue(NumValsGreater).FileLoc == FileIndex)
+ return Result+IN->getChild(NumValsGreater)->getFullDelta();
+
+ // Otherwise, traverse down the tree. The selected subtree may be
+ // partially included in the range.
+ Node = IN->getChild(NumValsGreater);
+ }
+ // NOT REACHED.
+}
+
+/// AddDelta - When a change is made that shifts around the text buffer,
+/// this method is used to record that info. It inserts a delta of 'Delta'
+/// into the current DeltaTree at offset FileIndex.
+void DeltaTree::AddDelta(unsigned FileIndex, int Delta) {
+ assert(Delta && "Adding a noop?");
+ DeltaTreeNode *MyRoot = getRoot(Root);
+
+ DeltaTreeNode::InsertResult InsertRes;
+ if (MyRoot->DoInsertion(FileIndex, Delta, &InsertRes)) {
+ Root = MyRoot = new DeltaTreeInteriorNode(InsertRes);
+ }
+
+#ifdef VERIFY_TREE
+ VerifyTree(MyRoot);
+#endif
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
new file mode 100644
index 0000000..3863adb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FixItRewriter.cpp
@@ -0,0 +1,205 @@
+//===--- FixItRewriter.cpp - Fix-It Rewriter Diagnostic Client --*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a diagnostic client adaptor that performs rewrites as
+// suggested by code modification hints attached to diagnostics. It
+// then forwards any diagnostics to the adapted diagnostic client.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/FixItRewriter.h"
+#include "clang/Edit/Commit.h"
+#include "clang/Edit/EditsReceiver.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <cstdio>
+
+using namespace clang;
+
+FixItRewriter::FixItRewriter(DiagnosticsEngine &Diags, SourceManager &SourceMgr,
+ const LangOptions &LangOpts,
+ FixItOptions *FixItOpts)
+ : Diags(Diags),
+ Editor(SourceMgr, LangOpts),
+ Rewrite(SourceMgr, LangOpts),
+ FixItOpts(FixItOpts),
+ NumFailures(0),
+ PrevDiagSilenced(false) {
+ OwnsClient = Diags.ownsClient();
+ Client = Diags.takeClient();
+ Diags.setClient(this);
+}
+
+FixItRewriter::~FixItRewriter() {
+ Diags.takeClient();
+ Diags.setClient(Client, OwnsClient);
+}
+
+bool FixItRewriter::WriteFixedFile(FileID ID, raw_ostream &OS) {
+ const RewriteBuffer *RewriteBuf = Rewrite.getRewriteBufferFor(ID);
+ if (!RewriteBuf) return true;
+ RewriteBuf->write(OS);
+ OS.flush();
+ return false;
+}
+
+namespace {
+
+class RewritesReceiver : public edit::EditsReceiver {
+ Rewriter &Rewrite;
+
+public:
+ RewritesReceiver(Rewriter &Rewrite) : Rewrite(Rewrite) { }
+
+ virtual void insert(SourceLocation loc, StringRef text) {
+ Rewrite.InsertText(loc, text);
+ }
+ virtual void replace(CharSourceRange range, StringRef text) {
+ Rewrite.ReplaceText(range.getBegin(), Rewrite.getRangeSize(range), text);
+ }
+};
+
+}
+
+bool FixItRewriter::WriteFixedFiles(
+ std::vector<std::pair<std::string, std::string> > *RewrittenFiles) {
+ if (NumFailures > 0 && !FixItOpts->FixWhatYouCan) {
+ Diag(FullSourceLoc(), diag::warn_fixit_no_changes);
+ return true;
+ }
+
+ RewritesReceiver Rec(Rewrite);
+ Editor.applyRewrites(Rec);
+
+ for (iterator I = buffer_begin(), E = buffer_end(); I != E; ++I) {
+ const FileEntry *Entry = Rewrite.getSourceMgr().getFileEntryForID(I->first);
+ int fd;
+ std::string Filename = FixItOpts->RewriteFilename(Entry->getName(), fd);
+ std::string Err;
+ OwningPtr<llvm::raw_fd_ostream> OS;
+ if (fd != -1) {
+ OS.reset(new llvm::raw_fd_ostream(fd, /*shouldClose=*/true));
+ } else {
+ OS.reset(new llvm::raw_fd_ostream(Filename.c_str(), Err,
+ llvm::raw_fd_ostream::F_Binary));
+ }
+ if (!Err.empty()) {
+ Diags.Report(clang::diag::err_fe_unable_to_open_output)
+ << Filename << Err;
+ continue;
+ }
+ RewriteBuffer &RewriteBuf = I->second;
+ RewriteBuf.write(*OS);
+ OS->flush();
+
+ if (RewrittenFiles)
+ RewrittenFiles->push_back(std::make_pair(Entry->getName(), Filename));
+ }
+
+ return false;
+}
+
+bool FixItRewriter::IncludeInDiagnosticCounts() const {
+ return Client ? Client->IncludeInDiagnosticCounts() : true;
+}
+
+void FixItRewriter::HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) {
+ // Default implementation (Warnings/errors count).
+ DiagnosticConsumer::HandleDiagnostic(DiagLevel, Info);
+
+ if (!FixItOpts->Silent ||
+ DiagLevel >= DiagnosticsEngine::Error ||
+ (DiagLevel == DiagnosticsEngine::Note && !PrevDiagSilenced) ||
+ (DiagLevel > DiagnosticsEngine::Note && Info.getNumFixItHints())) {
+ Client->HandleDiagnostic(DiagLevel, Info);
+ PrevDiagSilenced = false;
+ } else {
+ PrevDiagSilenced = true;
+ }
+
+ // Skip over any diagnostics that are ignored or notes.
+ if (DiagLevel <= DiagnosticsEngine::Note)
+ return;
+ // Skip over errors if we are only fixing warnings.
+ if (DiagLevel >= DiagnosticsEngine::Error && FixItOpts->FixOnlyWarnings) {
+ ++NumFailures;
+ return;
+ }
+
+ // Make sure that we can perform all of the modifications we
+ // in this diagnostic.
+ edit::Commit commit(Editor);
+ for (unsigned Idx = 0, Last = Info.getNumFixItHints();
+ Idx < Last; ++Idx) {
+ const FixItHint &Hint = Info.getFixItHint(Idx);
+
+ if (Hint.CodeToInsert.empty()) {
+ if (Hint.InsertFromRange.isValid())
+ commit.insertFromRange(Hint.RemoveRange.getBegin(),
+ Hint.InsertFromRange, /*afterToken=*/false,
+ Hint.BeforePreviousInsertions);
+ else
+ commit.remove(Hint.RemoveRange);
+ } else {
+ if (Hint.RemoveRange.isTokenRange() ||
+ Hint.RemoveRange.getBegin() != Hint.RemoveRange.getEnd())
+ commit.replace(Hint.RemoveRange, Hint.CodeToInsert);
+ else
+ commit.insert(Hint.RemoveRange.getBegin(), Hint.CodeToInsert,
+ /*afterToken=*/false, Hint.BeforePreviousInsertions);
+ }
+ }
+ bool CanRewrite = Info.getNumFixItHints() > 0 && commit.isCommitable();
+
+ if (!CanRewrite) {
+ if (Info.getNumFixItHints() > 0)
+ Diag(Info.getLocation(), diag::note_fixit_in_macro);
+
+ // If this was an error, refuse to perform any rewriting.
+ if (DiagLevel >= DiagnosticsEngine::Error) {
+ if (++NumFailures == 1)
+ Diag(Info.getLocation(), diag::note_fixit_unfixed_error);
+ }
+ return;
+ }
+
+ if (!Editor.commit(commit)) {
+ ++NumFailures;
+ Diag(Info.getLocation(), diag::note_fixit_failed);
+ return;
+ }
+
+ Diag(Info.getLocation(), diag::note_fixit_applied);
+}
+
+/// \brief Emit a diagnostic via the adapted diagnostic client.
+void FixItRewriter::Diag(SourceLocation Loc, unsigned DiagID) {
+ // When producing this diagnostic, we temporarily bypass ourselves,
+ // clear out any current diagnostic, and let the downstream client
+ // format the diagnostic.
+ Diags.takeClient();
+ Diags.setClient(Client);
+ Diags.Clear();
+ Diags.Report(Loc, DiagID);
+ Diags.takeClient();
+ Diags.setClient(this);
+}
+
+DiagnosticConsumer *FixItRewriter::clone(DiagnosticsEngine &Diags) const {
+ return new FixItRewriter(Diags, Diags.getSourceManager(),
+ Rewrite.getLangOpts(), FixItOpts);
+}
+
+FixItOptions::~FixItOptions() {}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
new file mode 100644
index 0000000..1753325
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/FrontendActions.cpp
@@ -0,0 +1,183 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/FrontendActions.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/Utils.h"
+#include "clang/Rewrite/ASTConsumers.h"
+#include "clang/Rewrite/FixItRewriter.h"
+#include "clang/Rewrite/Rewriters.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/FileSystem.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// AST Consumer Actions
+//===----------------------------------------------------------------------===//
+
+ASTConsumer *HTMLPrintAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile))
+ return CreateHTMLPrinter(OS, CI.getPreprocessor());
+ return 0;
+}
+
+FixItAction::FixItAction() {}
+FixItAction::~FixItAction() {}
+
+ASTConsumer *FixItAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return new ASTConsumer();
+}
+
+namespace {
+class FixItRewriteInPlace : public FixItOptions {
+public:
+ std::string RewriteFilename(const std::string &Filename, int &fd) {
+ fd = -1;
+ return Filename;
+ }
+};
+
+class FixItActionSuffixInserter : public FixItOptions {
+ std::string NewSuffix;
+
+public:
+ FixItActionSuffixInserter(std::string NewSuffix, bool FixWhatYouCan)
+ : NewSuffix(NewSuffix) {
+ this->FixWhatYouCan = FixWhatYouCan;
+ }
+
+ std::string RewriteFilename(const std::string &Filename, int &fd) {
+ fd = -1;
+ SmallString<128> Path(Filename);
+ llvm::sys::path::replace_extension(Path,
+ NewSuffix + llvm::sys::path::extension(Path));
+ return Path.str();
+ }
+};
+
+class FixItRewriteToTemp : public FixItOptions {
+public:
+ std::string RewriteFilename(const std::string &Filename, int &fd) {
+ SmallString<128> Path;
+ Path = llvm::sys::path::filename(Filename);
+ Path += "-%%%%%%%%";
+ Path += llvm::sys::path::extension(Filename);
+ SmallString<128> NewPath;
+ llvm::sys::fs::unique_file(Path.str(), fd, NewPath);
+ return NewPath.str();
+ }
+};
+} // end anonymous namespace
+
+bool FixItAction::BeginSourceFileAction(CompilerInstance &CI,
+ StringRef Filename) {
+ const FrontendOptions &FEOpts = getCompilerInstance().getFrontendOpts();
+ if (!FEOpts.FixItSuffix.empty()) {
+ FixItOpts.reset(new FixItActionSuffixInserter(FEOpts.FixItSuffix,
+ FEOpts.FixWhatYouCan));
+ } else {
+ FixItOpts.reset(new FixItRewriteInPlace);
+ FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan;
+ }
+ Rewriter.reset(new FixItRewriter(CI.getDiagnostics(), CI.getSourceManager(),
+ CI.getLangOpts(), FixItOpts.get()));
+ return true;
+}
+
+void FixItAction::EndSourceFileAction() {
+ // Otherwise rewrite all files.
+ Rewriter->WriteFixedFiles();
+}
+
+bool FixItRecompile::BeginInvocation(CompilerInstance &CI) {
+
+ std::vector<std::pair<std::string, std::string> > RewrittenFiles;
+ bool err = false;
+ {
+ const FrontendOptions &FEOpts = CI.getFrontendOpts();
+ OwningPtr<FrontendAction> FixAction(new SyntaxOnlyAction());
+ if (FixAction->BeginSourceFile(CI, FEOpts.Inputs[0])) {
+ OwningPtr<FixItOptions> FixItOpts;
+ if (FEOpts.FixToTemporaries)
+ FixItOpts.reset(new FixItRewriteToTemp());
+ else
+ FixItOpts.reset(new FixItRewriteInPlace());
+ FixItOpts->Silent = true;
+ FixItOpts->FixWhatYouCan = FEOpts.FixWhatYouCan;
+ FixItOpts->FixOnlyWarnings = FEOpts.FixOnlyWarnings;
+ FixItRewriter Rewriter(CI.getDiagnostics(), CI.getSourceManager(),
+ CI.getLangOpts(), FixItOpts.get());
+ FixAction->Execute();
+
+ err = Rewriter.WriteFixedFiles(&RewrittenFiles);
+
+ FixAction->EndSourceFile();
+ CI.setSourceManager(0);
+ CI.setFileManager(0);
+ } else {
+ err = true;
+ }
+ }
+ if (err)
+ return false;
+ CI.getDiagnosticClient().clear();
+ CI.getDiagnostics().Reset();
+
+ PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
+ PPOpts.RemappedFiles.insert(PPOpts.RemappedFiles.end(),
+ RewrittenFiles.begin(), RewrittenFiles.end());
+ PPOpts.RemappedFilesKeepOriginalName = false;
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Actions
+//===----------------------------------------------------------------------===//
+
+ASTConsumer *RewriteObjCAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ if (raw_ostream *OS = CI.createDefaultOutputFile(false, InFile, "cpp")) {
+ if (CI.getLangOpts().ObjCNonFragileABI)
+ return CreateModernObjCRewriter(InFile, OS,
+ CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getDiagnosticOpts().NoRewriteMacros);
+ return CreateObjCRewriter(InFile, OS,
+ CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getDiagnosticOpts().NoRewriteMacros);
+ }
+ return 0;
+}
+
+void RewriteMacrosAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = CI.createDefaultOutputFile(true, getCurrentFile());
+ if (!OS) return;
+
+ RewriteMacrosInInput(CI.getPreprocessor(), OS);
+}
+
+void RewriteTestAction::ExecuteAction() {
+ CompilerInstance &CI = getCompilerInstance();
+ raw_ostream *OS = CI.createDefaultOutputFile(false, getCurrentFile());
+ if (!OS) return;
+
+ DoRewriteTest(CI.getPreprocessor(), OS);
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp
new file mode 100644
index 0000000..3d190ab
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLPrint.cpp
@@ -0,0 +1,94 @@
+//===--- HTMLPrint.cpp - Source code -> HTML pretty-printing --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Pretty-printing of source code to HTML.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/ASTConsumers.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/HTMLRewrite.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Functional HTML pretty-printing.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class HTMLPrinter : public ASTConsumer {
+ Rewriter R;
+ raw_ostream *Out;
+ Preprocessor &PP;
+ bool SyntaxHighlight, HighlightMacros;
+
+ public:
+ HTMLPrinter(raw_ostream *OS, Preprocessor &pp,
+ bool _SyntaxHighlight, bool _HighlightMacros)
+ : Out(OS), PP(pp), SyntaxHighlight(_SyntaxHighlight),
+ HighlightMacros(_HighlightMacros) {}
+
+ void Initialize(ASTContext &context);
+ void HandleTranslationUnit(ASTContext &Ctx);
+ };
+}
+
+ASTConsumer* clang::CreateHTMLPrinter(raw_ostream *OS,
+ Preprocessor &PP,
+ bool SyntaxHighlight,
+ bool HighlightMacros) {
+ return new HTMLPrinter(OS, PP, SyntaxHighlight, HighlightMacros);
+}
+
+void HTMLPrinter::Initialize(ASTContext &context) {
+ R.setSourceMgr(context.getSourceManager(), context.getLangOpts());
+}
+
+void HTMLPrinter::HandleTranslationUnit(ASTContext &Ctx) {
+ if (PP.getDiagnostics().hasErrorOccurred())
+ return;
+
+ // Format the file.
+ FileID FID = R.getSourceMgr().getMainFileID();
+ const FileEntry* Entry = R.getSourceMgr().getFileEntryForID(FID);
+ const char* Name;
+ // In some cases, in particular the case where the input is from stdin,
+ // there is no entry. Fall back to the memory buffer for a name in those
+ // cases.
+ if (Entry)
+ Name = Entry->getName();
+ else
+ Name = R.getSourceMgr().getBuffer(FID)->getBufferIdentifier();
+
+ html::AddLineNumbers(R, FID);
+ html::AddHeaderFooterInternalBuiltinCSS(R, FID, Name);
+
+ // If we have a preprocessor, relex the file and syntax highlight.
+ // We might not have a preprocessor if we come from a deserialized AST file,
+ // for example.
+
+ if (SyntaxHighlight) html::SyntaxHighlight(R, FID, PP);
+ if (HighlightMacros) html::HighlightMacros(R, FID, PP);
+ html::EscapeText(R, FID, false, true);
+
+ // Emit the HTML.
+ const RewriteBuffer &RewriteBuf = R.getEditBuffer(FID);
+ char *Buffer = (char*)malloc(RewriteBuf.size());
+ std::copy(RewriteBuf.begin(), RewriteBuf.end(), Buffer);
+ Out->write(Buffer, RewriteBuf.size());
+ free(Buffer);
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
new file mode 100644
index 0000000..dc39dde
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/HTMLRewrite.cpp
@@ -0,0 +1,576 @@
+//== HTMLRewrite.cpp - Translate source code into prettified HTML --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HTMLRewriter clas, which is used to translate the
+// text of a source file into prettified HTML.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/HTMLRewrite.h"
+#include "clang/Lex/TokenConcatenation.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+
+/// HighlightRange - Highlight a range in the source code with the specified
+/// start/end tags. B/E must be in the same file. This ensures that
+/// start/end tags are placed at the start/end of each line if the range is
+/// multiline.
+void html::HighlightRange(Rewriter &R, SourceLocation B, SourceLocation E,
+ const char *StartTag, const char *EndTag) {
+ SourceManager &SM = R.getSourceMgr();
+ B = SM.getExpansionLoc(B);
+ E = SM.getExpansionLoc(E);
+ FileID FID = SM.getFileID(B);
+ assert(SM.getFileID(E) == FID && "B/E not in the same file!");
+
+ unsigned BOffset = SM.getFileOffset(B);
+ unsigned EOffset = SM.getFileOffset(E);
+
+ // Include the whole end token in the range.
+ EOffset += Lexer::MeasureTokenLength(E, R.getSourceMgr(), R.getLangOpts());
+
+ bool Invalid = false;
+ const char *BufferStart = SM.getBufferData(FID, &Invalid).data();
+ if (Invalid)
+ return;
+
+ HighlightRange(R.getEditBuffer(FID), BOffset, EOffset,
+ BufferStart, StartTag, EndTag);
+}
+
+/// HighlightRange - This is the same as the above method, but takes
+/// decomposed file locations.
+void html::HighlightRange(RewriteBuffer &RB, unsigned B, unsigned E,
+ const char *BufferStart,
+ const char *StartTag, const char *EndTag) {
+ // Insert the tag at the absolute start/end of the range.
+ RB.InsertTextAfter(B, StartTag);
+ RB.InsertTextBefore(E, EndTag);
+
+ // Scan the range to see if there is a \r or \n. If so, and if the line is
+ // not blank, insert tags on that line as well.
+ bool HadOpenTag = true;
+
+ unsigned LastNonWhiteSpace = B;
+ for (unsigned i = B; i != E; ++i) {
+ switch (BufferStart[i]) {
+ case '\r':
+ case '\n':
+ // Okay, we found a newline in the range. If we have an open tag, we need
+ // to insert a close tag at the first non-whitespace before the newline.
+ if (HadOpenTag)
+ RB.InsertTextBefore(LastNonWhiteSpace+1, EndTag);
+
+ // Instead of inserting an open tag immediately after the newline, we
+ // wait until we see a non-whitespace character. This prevents us from
+ // inserting tags around blank lines, and also allows the open tag to
+ // be put *after* whitespace on a non-blank line.
+ HadOpenTag = false;
+ break;
+ case '\0':
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ // Ignore whitespace.
+ break;
+
+ default:
+ // If there is no tag open, do it now.
+ if (!HadOpenTag) {
+ RB.InsertTextAfter(i, StartTag);
+ HadOpenTag = true;
+ }
+
+ // Remember this character.
+ LastNonWhiteSpace = i;
+ break;
+ }
+ }
+}
+
+void html::EscapeText(Rewriter &R, FileID FID,
+ bool EscapeSpaces, bool ReplaceTabs) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* C = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+
+ assert (C <= FileEnd);
+
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ unsigned ColNo = 0;
+ for (unsigned FilePos = 0; C != FileEnd ; ++C, ++FilePos) {
+ switch (*C) {
+ default: ++ColNo; break;
+ case '\n':
+ case '\r':
+ ColNo = 0;
+ break;
+
+ case ' ':
+ if (EscapeSpaces)
+ RB.ReplaceText(FilePos, 1, "&nbsp;");
+ ++ColNo;
+ break;
+ case '\f':
+ RB.ReplaceText(FilePos, 1, "<hr>");
+ ColNo = 0;
+ break;
+
+ case '\t': {
+ if (!ReplaceTabs)
+ break;
+ unsigned NumSpaces = 8-(ColNo&7);
+ if (EscapeSpaces)
+ RB.ReplaceText(FilePos, 1,
+ StringRef("&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;"
+ "&nbsp;&nbsp;&nbsp;", 6*NumSpaces));
+ else
+ RB.ReplaceText(FilePos, 1, StringRef(" ", NumSpaces));
+ ColNo += NumSpaces;
+ break;
+ }
+ case '<':
+ RB.ReplaceText(FilePos, 1, "&lt;");
+ ++ColNo;
+ break;
+
+ case '>':
+ RB.ReplaceText(FilePos, 1, "&gt;");
+ ++ColNo;
+ break;
+
+ case '&':
+ RB.ReplaceText(FilePos, 1, "&amp;");
+ ++ColNo;
+ break;
+ }
+ }
+}
+
+std::string html::EscapeText(const std::string& s, bool EscapeSpaces,
+ bool ReplaceTabs) {
+
+ unsigned len = s.size();
+ std::string Str;
+ llvm::raw_string_ostream os(Str);
+
+ for (unsigned i = 0 ; i < len; ++i) {
+
+ char c = s[i];
+ switch (c) {
+ default:
+ os << c; break;
+
+ case ' ':
+ if (EscapeSpaces) os << "&nbsp;";
+ else os << ' ';
+ break;
+
+ case '\t':
+ if (ReplaceTabs) {
+ if (EscapeSpaces)
+ for (unsigned i = 0; i < 4; ++i)
+ os << "&nbsp;";
+ else
+ for (unsigned i = 0; i < 4; ++i)
+ os << " ";
+ }
+ else
+ os << c;
+
+ break;
+
+ case '<': os << "&lt;"; break;
+ case '>': os << "&gt;"; break;
+ case '&': os << "&amp;"; break;
+ }
+ }
+
+ return os.str();
+}
+
+static void AddLineNumber(RewriteBuffer &RB, unsigned LineNo,
+ unsigned B, unsigned E) {
+ SmallString<256> Str;
+ llvm::raw_svector_ostream OS(Str);
+
+ OS << "<tr><td class=\"num\" id=\"LN"
+ << LineNo << "\">"
+ << LineNo << "</td><td class=\"line\">";
+
+ if (B == E) { // Handle empty lines.
+ OS << " </td></tr>";
+ RB.InsertTextBefore(B, OS.str());
+ } else {
+ RB.InsertTextBefore(B, OS.str());
+ RB.InsertTextBefore(E, "</td></tr>");
+ }
+}
+
+void html::AddLineNumbers(Rewriter& R, FileID FID) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* FileBeg = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+ const char* C = FileBeg;
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ assert (C <= FileEnd);
+
+ unsigned LineNo = 0;
+ unsigned FilePos = 0;
+
+ while (C != FileEnd) {
+
+ ++LineNo;
+ unsigned LineStartPos = FilePos;
+ unsigned LineEndPos = FileEnd - FileBeg;
+
+ assert (FilePos <= LineEndPos);
+ assert (C < FileEnd);
+
+ // Scan until the newline (or end-of-file).
+
+ while (C != FileEnd) {
+ char c = *C;
+ ++C;
+
+ if (c == '\n') {
+ LineEndPos = FilePos++;
+ break;
+ }
+
+ ++FilePos;
+ }
+
+ AddLineNumber(RB, LineNo, LineStartPos, LineEndPos);
+ }
+
+ // Add one big table tag that surrounds all of the code.
+ RB.InsertTextBefore(0, "<table class=\"code\">\n");
+ RB.InsertTextAfter(FileEnd - FileBeg, "</table>");
+}
+
+void html::AddHeaderFooterInternalBuiltinCSS(Rewriter& R, FileID FID,
+ const char *title) {
+
+ const llvm::MemoryBuffer *Buf = R.getSourceMgr().getBuffer(FID);
+ const char* FileStart = Buf->getBufferStart();
+ const char* FileEnd = Buf->getBufferEnd();
+
+ SourceLocation StartLoc = R.getSourceMgr().getLocForStartOfFile(FID);
+ SourceLocation EndLoc = StartLoc.getLocWithOffset(FileEnd-FileStart);
+
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ os << "<!doctype html>\n" // Use HTML 5 doctype
+ "<html>\n<head>\n";
+
+ if (title)
+ os << "<title>" << html::EscapeText(title) << "</title>\n";
+
+ os << "<style type=\"text/css\">\n"
+ " body { color:#000000; background-color:#ffffff }\n"
+ " body { font-family:Helvetica, sans-serif; font-size:10pt }\n"
+ " h1 { font-size:14pt }\n"
+ " .code { border-collapse:collapse; width:100%; }\n"
+ " .code { font-family: \"Monospace\", monospace; font-size:10pt }\n"
+ " .code { line-height: 1.2em }\n"
+ " .comment { color: green; font-style: oblique }\n"
+ " .keyword { color: blue }\n"
+ " .string_literal { color: red }\n"
+ " .directive { color: darkmagenta }\n"
+ // Macro expansions.
+ " .expansion { display: none; }\n"
+ " .macro:hover .expansion { display: block; border: 2px solid #FF0000; "
+ "padding: 2px; background-color:#FFF0F0; font-weight: normal; "
+ " -webkit-border-radius:5px; -webkit-box-shadow:1px 1px 7px #000; "
+ "position: absolute; top: -1em; left:10em; z-index: 1 } \n"
+ " .macro { color: darkmagenta; background-color:LemonChiffon;"
+ // Macros are position: relative to provide base for expansions.
+ " position: relative }\n"
+ " .num { width:2.5em; padding-right:2ex; background-color:#eeeeee }\n"
+ " .num { text-align:right; font-size:8pt }\n"
+ " .num { color:#444444 }\n"
+ " .line { padding-left: 1ex; border-left: 3px solid #ccc }\n"
+ " .line { white-space: pre }\n"
+ " .msg { -webkit-box-shadow:1px 1px 7px #000 }\n"
+ " .msg { -webkit-border-radius:5px }\n"
+ " .msg { font-family:Helvetica, sans-serif; font-size:8pt }\n"
+ " .msg { float:left }\n"
+ " .msg { padding:0.25em 1ex 0.25em 1ex }\n"
+ " .msg { margin-top:10px; margin-bottom:10px }\n"
+ " .msg { font-weight:bold }\n"
+ " .msg { max-width:60em; word-wrap: break-word; white-space: pre-wrap }\n"
+ " .msgT { padding:0x; spacing:0x }\n"
+ " .msgEvent { background-color:#fff8b4; color:#000000 }\n"
+ " .msgControl { background-color:#bbbbbb; color:#000000 }\n"
+ " .mrange { background-color:#dfddf3 }\n"
+ " .mrange { border-bottom:1px solid #6F9DBE }\n"
+ " .PathIndex { font-weight: bold; padding:0px 5px 0px 5px; "
+ "margin-right:5px; }\n"
+ " .PathIndex { -webkit-border-radius:8px }\n"
+ " .PathIndexEvent { background-color:#bfba87 }\n"
+ " .PathIndexControl { background-color:#8c8c8c }\n"
+ " .CodeInsertionHint { font-weight: bold; background-color: #10dd10 }\n"
+ " .CodeRemovalHint { background-color:#de1010 }\n"
+ " .CodeRemovalHint { border-bottom:1px solid #6F9DBE }\n"
+ " table.simpletable {\n"
+ " padding: 5px;\n"
+ " font-size:12pt;\n"
+ " margin:20px;\n"
+ " border-collapse: collapse; border-spacing: 0px;\n"
+ " }\n"
+ " td.rowname {\n"
+ " text-align:right; font-weight:bold; color:#444444;\n"
+ " padding-right:2ex; }\n"
+ "</style>\n</head>\n<body>";
+
+ // Generate header
+ R.InsertTextBefore(StartLoc, os.str());
+ // Generate footer
+
+ R.InsertTextAfter(EndLoc, "</body></html>\n");
+}
+
+/// SyntaxHighlight - Relex the specified FileID and annotate the HTML with
+/// information about keywords, macro expansions etc. This uses the macro
+/// table state from the end of the file, so it won't be perfectly perfect,
+/// but it will be reasonably close.
+void html::SyntaxHighlight(Rewriter &R, FileID FID, const Preprocessor &PP) {
+ RewriteBuffer &RB = R.getEditBuffer(FID);
+
+ const SourceManager &SM = PP.getSourceManager();
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer L(FID, FromFile, SM, PP.getLangOpts());
+ const char *BufferStart = L.getBufferStart();
+
+ // Inform the preprocessor that we want to retain comments as tokens, so we
+ // can highlight them.
+ L.SetCommentRetentionState(true);
+
+ // Lex all the tokens in raw mode, to avoid entering #includes or expanding
+ // macros.
+ Token Tok;
+ L.LexFromRawLexer(Tok);
+
+ while (Tok.isNot(tok::eof)) {
+ // Since we are lexing unexpanded tokens, all tokens are from the main
+ // FileID.
+ unsigned TokOffs = SM.getFileOffset(Tok.getLocation());
+ unsigned TokLen = Tok.getLength();
+ switch (Tok.getKind()) {
+ default: break;
+ case tok::identifier:
+ llvm_unreachable("tok::identifier in raw lexing mode!");
+ case tok::raw_identifier: {
+ // Fill in Result.IdentifierInfo and update the token kind,
+ // looking up the identifier in the identifier table.
+ PP.LookUpIdentifierInfo(Tok);
+
+ // If this is a pp-identifier, for a keyword, highlight it as such.
+ if (Tok.isNot(tok::identifier))
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='keyword'>", "</span>");
+ break;
+ }
+ case tok::comment:
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='comment'>", "</span>");
+ break;
+ case tok::utf8_string_literal:
+ // Chop off the u part of u8 prefix
+ ++TokOffs;
+ --TokLen;
+ // FALL THROUGH to chop the 8
+ case tok::wide_string_literal:
+ case tok::utf16_string_literal:
+ case tok::utf32_string_literal:
+ // Chop off the L, u, U or 8 prefix
+ ++TokOffs;
+ --TokLen;
+ // FALL THROUGH.
+ case tok::string_literal:
+ // FIXME: Exclude the optional ud-suffix from the highlighted range.
+ HighlightRange(RB, TokOffs, TokOffs+TokLen, BufferStart,
+ "<span class='string_literal'>", "</span>");
+ break;
+ case tok::hash: {
+ // If this is a preprocessor directive, all tokens to end of line are too.
+ if (!Tok.isAtStartOfLine())
+ break;
+
+ // Eat all of the tokens until we get to the next one at the start of
+ // line.
+ unsigned TokEnd = TokOffs+TokLen;
+ L.LexFromRawLexer(Tok);
+ while (!Tok.isAtStartOfLine() && Tok.isNot(tok::eof)) {
+ TokEnd = SM.getFileOffset(Tok.getLocation())+Tok.getLength();
+ L.LexFromRawLexer(Tok);
+ }
+
+ // Find end of line. This is a hack.
+ HighlightRange(RB, TokOffs, TokEnd, BufferStart,
+ "<span class='directive'>", "</span>");
+
+ // Don't skip the next token.
+ continue;
+ }
+ }
+
+ L.LexFromRawLexer(Tok);
+ }
+}
+
+/// HighlightMacros - This uses the macro table state from the end of the
+/// file, to re-expand macros and insert (into the HTML) information about the
+/// macro expansions. This won't be perfectly perfect, but it will be
+/// reasonably close.
+void html::HighlightMacros(Rewriter &R, FileID FID, const Preprocessor& PP) {
+ // Re-lex the raw token stream into a token buffer.
+ const SourceManager &SM = PP.getSourceManager();
+ std::vector<Token> TokenStream;
+
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer L(FID, FromFile, SM, PP.getLangOpts());
+
+ // Lex all the tokens in raw mode, to avoid entering #includes or expanding
+ // macros.
+ while (1) {
+ Token Tok;
+ L.LexFromRawLexer(Tok);
+
+ // If this is a # at the start of a line, discard it from the token stream.
+ // We don't want the re-preprocess step to see #defines, #includes or other
+ // preprocessor directives.
+ if (Tok.is(tok::hash) && Tok.isAtStartOfLine())
+ continue;
+
+ // If this is a ## token, change its kind to unknown so that repreprocessing
+ // it will not produce an error.
+ if (Tok.is(tok::hashhash))
+ Tok.setKind(tok::unknown);
+
+ // If this raw token is an identifier, the raw lexer won't have looked up
+ // the corresponding identifier info for it. Do this now so that it will be
+ // macro expanded when we re-preprocess it.
+ if (Tok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(Tok);
+
+ TokenStream.push_back(Tok);
+
+ if (Tok.is(tok::eof)) break;
+ }
+
+ // Temporarily change the diagnostics object so that we ignore any generated
+ // diagnostics from this pass.
+ DiagnosticsEngine TmpDiags(PP.getDiagnostics().getDiagnosticIDs(),
+ new IgnoringDiagConsumer);
+
+ // FIXME: This is a huge hack; we reuse the input preprocessor because we want
+ // its state, but we aren't actually changing it (we hope). This should really
+ // construct a copy of the preprocessor.
+ Preprocessor &TmpPP = const_cast<Preprocessor&>(PP);
+ DiagnosticsEngine *OldDiags = &TmpPP.getDiagnostics();
+ TmpPP.setDiagnostics(TmpDiags);
+
+ // Inform the preprocessor that we don't want comments.
+ TmpPP.SetCommentRetentionState(false, false);
+
+ // Enter the tokens we just lexed. This will cause them to be macro expanded
+ // but won't enter sub-files (because we removed #'s).
+ TmpPP.EnterTokenStream(&TokenStream[0], TokenStream.size(), false, false);
+
+ TokenConcatenation ConcatInfo(TmpPP);
+
+ // Lex all the tokens.
+ Token Tok;
+ TmpPP.Lex(Tok);
+ while (Tok.isNot(tok::eof)) {
+ // Ignore non-macro tokens.
+ if (!Tok.getLocation().isMacroID()) {
+ TmpPP.Lex(Tok);
+ continue;
+ }
+
+ // Okay, we have the first token of a macro expansion: highlight the
+ // expansion by inserting a start tag before the macro expansion and
+ // end tag after it.
+ std::pair<SourceLocation, SourceLocation> LLoc =
+ SM.getExpansionRange(Tok.getLocation());
+
+ // Ignore tokens whose instantiation location was not the main file.
+ if (SM.getFileID(LLoc.first) != FID) {
+ TmpPP.Lex(Tok);
+ continue;
+ }
+
+ assert(SM.getFileID(LLoc.second) == FID &&
+ "Start and end of expansion must be in the same ultimate file!");
+
+ std::string Expansion = EscapeText(TmpPP.getSpelling(Tok));
+ unsigned LineLen = Expansion.size();
+
+ Token PrevPrevTok;
+ Token PrevTok = Tok;
+ // Okay, eat this token, getting the next one.
+ TmpPP.Lex(Tok);
+
+ // Skip all the rest of the tokens that are part of this macro
+ // instantiation. It would be really nice to pop up a window with all the
+ // spelling of the tokens or something.
+ while (!Tok.is(tok::eof) &&
+ SM.getExpansionLoc(Tok.getLocation()) == LLoc.first) {
+ // Insert a newline if the macro expansion is getting large.
+ if (LineLen > 60) {
+ Expansion += "<br>";
+ LineLen = 0;
+ }
+
+ LineLen -= Expansion.size();
+
+ // If the tokens were already space separated, or if they must be to avoid
+ // them being implicitly pasted, add a space between them.
+ if (Tok.hasLeadingSpace() ||
+ ConcatInfo.AvoidConcat(PrevPrevTok, PrevTok, Tok))
+ Expansion += ' ';
+
+ // Escape any special characters in the token text.
+ Expansion += EscapeText(TmpPP.getSpelling(Tok));
+ LineLen += Expansion.size();
+
+ PrevPrevTok = PrevTok;
+ PrevTok = Tok;
+ TmpPP.Lex(Tok);
+ }
+
+
+ // Insert the expansion as the end tag, so that multi-line macros all get
+ // highlighted.
+ Expansion = "<span class='expansion'>" + Expansion + "</span></span>";
+
+ HighlightRange(R, LLoc.first, LLoc.second,
+ "<span class='macro'>", Expansion.c_str());
+ }
+
+ // Restore diagnostics object back to its own thing.
+ TmpPP.setDiagnostics(*OldDiags);
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
new file mode 100644
index 0000000..3fa0bdb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteMacros.cpp
@@ -0,0 +1,217 @@
+//===--- RewriteMacros.cpp - Rewrite macros into their expansions ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This code rewrites macro invocations into their expansions. This gives you
+// a macro expanded file that retains comments and #includes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Rewriters.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+#include "llvm/ADT/OwningPtr.h"
+#include <cstdio>
+
+using namespace clang;
+
+/// isSameToken - Return true if the two specified tokens start have the same
+/// content.
+static bool isSameToken(Token &RawTok, Token &PPTok) {
+ // If two tokens have the same kind and the same identifier info, they are
+ // obviously the same.
+ if (PPTok.getKind() == RawTok.getKind() &&
+ PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo())
+ return true;
+
+ // Otherwise, if they are different but have the same identifier info, they
+ // are also considered to be the same. This allows keywords and raw lexed
+ // identifiers with the same name to be treated the same.
+ if (PPTok.getIdentifierInfo() &&
+ PPTok.getIdentifierInfo() == RawTok.getIdentifierInfo())
+ return true;
+
+ return false;
+}
+
+
+/// GetNextRawTok - Return the next raw token in the stream, skipping over
+/// comments if ReturnComment is false.
+static const Token &GetNextRawTok(const std::vector<Token> &RawTokens,
+ unsigned &CurTok, bool ReturnComment) {
+ assert(CurTok < RawTokens.size() && "Overran eof!");
+
+ // If the client doesn't want comments and we have one, skip it.
+ if (!ReturnComment && RawTokens[CurTok].is(tok::comment))
+ ++CurTok;
+
+ return RawTokens[CurTok++];
+}
+
+
+/// LexRawTokensFromMainFile - Lets all the raw tokens from the main file into
+/// the specified vector.
+static void LexRawTokensFromMainFile(Preprocessor &PP,
+ std::vector<Token> &RawTokens) {
+ SourceManager &SM = PP.getSourceManager();
+
+ // Create a lexer to lex all the tokens of the main file in raw mode. Even
+ // though it is in raw mode, it will not return comments.
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(SM.getMainFileID());
+ Lexer RawLex(SM.getMainFileID(), FromFile, SM, PP.getLangOpts());
+
+ // Switch on comment lexing because we really do want them.
+ RawLex.SetCommentRetentionState(true);
+
+ Token RawTok;
+ do {
+ RawLex.LexFromRawLexer(RawTok);
+
+ // If we have an identifier with no identifier info for our raw token, look
+ // up the indentifier info. This is important for equality comparison of
+ // identifier tokens.
+ if (RawTok.is(tok::raw_identifier))
+ PP.LookUpIdentifierInfo(RawTok);
+
+ RawTokens.push_back(RawTok);
+ } while (RawTok.isNot(tok::eof));
+}
+
+
+/// RewriteMacrosInInput - Implement -rewrite-macros mode.
+void clang::RewriteMacrosInInput(Preprocessor &PP, raw_ostream *OS) {
+ SourceManager &SM = PP.getSourceManager();
+
+ Rewriter Rewrite;
+ Rewrite.setSourceMgr(SM, PP.getLangOpts());
+ RewriteBuffer &RB = Rewrite.getEditBuffer(SM.getMainFileID());
+
+ std::vector<Token> RawTokens;
+ LexRawTokensFromMainFile(PP, RawTokens);
+ unsigned CurRawTok = 0;
+ Token RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+
+
+ // Get the first preprocessing token.
+ PP.EnterMainSourceFile();
+ Token PPTok;
+ PP.Lex(PPTok);
+
+ // Preprocess the input file in parallel with raw lexing the main file. Ignore
+ // all tokens that are preprocessed from a file other than the main file (e.g.
+ // a header). If we see tokens that are in the preprocessed file but not the
+ // lexed file, we have a macro expansion. If we see tokens in the lexed file
+ // that aren't in the preprocessed view, we have macros that expand to no
+ // tokens, or macro arguments etc.
+ while (RawTok.isNot(tok::eof) || PPTok.isNot(tok::eof)) {
+ SourceLocation PPLoc = SM.getExpansionLoc(PPTok.getLocation());
+
+ // If PPTok is from a different source file, ignore it.
+ if (!SM.isFromMainFile(PPLoc)) {
+ PP.Lex(PPTok);
+ continue;
+ }
+
+ // If the raw file hits a preprocessor directive, they will be extra tokens
+ // in the raw file that don't exist in the preprocsesed file. However, we
+ // choose to preserve them in the output file and otherwise handle them
+ // specially.
+ if (RawTok.is(tok::hash) && RawTok.isAtStartOfLine()) {
+ // If this is a #warning directive or #pragma mark (GNU extensions),
+ // comment the line out.
+ if (RawTokens[CurRawTok].is(tok::identifier)) {
+ const IdentifierInfo *II = RawTokens[CurRawTok].getIdentifierInfo();
+ if (II->getName() == "warning") {
+ // Comment out #warning.
+ RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//");
+ } else if (II->getName() == "pragma" &&
+ RawTokens[CurRawTok+1].is(tok::identifier) &&
+ (RawTokens[CurRawTok+1].getIdentifierInfo()->getName() ==
+ "mark")) {
+ // Comment out #pragma mark.
+ RB.InsertTextAfter(SM.getFileOffset(RawTok.getLocation()), "//");
+ }
+ }
+
+ // Otherwise, if this is a #include or some other directive, just leave it
+ // in the file by skipping over the line.
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ while (!RawTok.isAtStartOfLine() && RawTok.isNot(tok::eof))
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ continue;
+ }
+
+ // Okay, both tokens are from the same file. Get their offsets from the
+ // start of the file.
+ unsigned PPOffs = SM.getFileOffset(PPLoc);
+ unsigned RawOffs = SM.getFileOffset(RawTok.getLocation());
+
+ // If the offsets are the same and the token kind is the same, ignore them.
+ if (PPOffs == RawOffs && isSameToken(RawTok, PPTok)) {
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ PP.Lex(PPTok);
+ continue;
+ }
+
+ // If the PP token is farther along than the raw token, something was
+ // deleted. Comment out the raw token.
+ if (RawOffs <= PPOffs) {
+ // Comment out a whole run of tokens instead of bracketing each one with
+ // comments. Add a leading space if RawTok didn't have one.
+ bool HasSpace = RawTok.hasLeadingSpace();
+ RB.InsertTextAfter(RawOffs, &" /*"[HasSpace]);
+ unsigned EndPos;
+
+ do {
+ EndPos = RawOffs+RawTok.getLength();
+
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, true);
+ RawOffs = SM.getFileOffset(RawTok.getLocation());
+
+ if (RawTok.is(tok::comment)) {
+ // Skip past the comment.
+ RawTok = GetNextRawTok(RawTokens, CurRawTok, false);
+ break;
+ }
+
+ } while (RawOffs <= PPOffs && !RawTok.isAtStartOfLine() &&
+ (PPOffs != RawOffs || !isSameToken(RawTok, PPTok)));
+
+ RB.InsertTextBefore(EndPos, "*/");
+ continue;
+ }
+
+ // Otherwise, there was a replacement an expansion. Insert the new token
+ // in the output buffer. Insert the whole run of new tokens at once to get
+ // them in the right order.
+ unsigned InsertPos = PPOffs;
+ std::string Expansion;
+ while (PPOffs < RawOffs) {
+ Expansion += ' ' + PP.getSpelling(PPTok);
+ PP.Lex(PPTok);
+ PPLoc = SM.getExpansionLoc(PPTok.getLocation());
+ PPOffs = SM.getFileOffset(PPLoc);
+ }
+ Expansion += ' ';
+ RB.InsertTextBefore(InsertPos, Expansion);
+ }
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(SM.getMainFileID())) {
+ //printf("Changed:\n");
+ *OS << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ fprintf(stderr, "No changes\n");
+ }
+ OS->flush();
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp
new file mode 100644
index 0000000..57109de
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteModernObjC.cpp
@@ -0,0 +1,7275 @@
+//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hacks and fun related to the code rewriter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/ASTConsumers.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace clang;
+using llvm::utostr;
+
+namespace {
+ class RewriteModernObjC : public ASTConsumer {
+ protected:
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the
+ __block variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+ };
+
+ enum {
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GC = (1 << 27),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_HAS_DESCRIPTOR = (1 << 29)
+ };
+ static const int OBJC_ABI_VERSION = 7;
+
+ Rewriter Rewrite;
+ DiagnosticsEngine &Diags;
+ const LangOptions &LangOpts;
+ ASTContext *Context;
+ SourceManager *SM;
+ TranslationUnitDecl *TUDecl;
+ FileID MainFileID;
+ const char *MainFileStart, *MainFileEnd;
+ Stmt *CurrentBody;
+ ParentMap *PropParentMap; // created lazily.
+ std::string InFileName;
+ raw_ostream* OutFile;
+ std::string Preamble;
+
+ TypeDecl *ProtocolTypeDecl;
+ VarDecl *GlobalVarDecl;
+ Expr *GlobalConstructionExp;
+ unsigned RewriteFailedDiag;
+ unsigned GlobalBlockRewriteFailedDiag;
+ // ObjC string constant support.
+ unsigned NumObjCStringLiterals;
+ VarDecl *ConstantStringClassReference;
+ RecordDecl *NSStringRecord;
+
+ // ObjC foreach break/continue generation support.
+ int BcLabelCount;
+
+ unsigned TryFinallyContainsReturnDiag;
+ // Needed for super.
+ ObjCMethodDecl *CurMethodDef;
+ RecordDecl *SuperStructDecl;
+ RecordDecl *ConstantStringDecl;
+
+ FunctionDecl *MsgSendFunctionDecl;
+ FunctionDecl *MsgSendSuperFunctionDecl;
+ FunctionDecl *MsgSendStretFunctionDecl;
+ FunctionDecl *MsgSendSuperStretFunctionDecl;
+ FunctionDecl *MsgSendFpretFunctionDecl;
+ FunctionDecl *GetClassFunctionDecl;
+ FunctionDecl *GetMetaClassFunctionDecl;
+ FunctionDecl *GetSuperClassFunctionDecl;
+ FunctionDecl *SelGetUidFunctionDecl;
+ FunctionDecl *CFStringFunctionDecl;
+ FunctionDecl *SuperContructorFunctionDecl;
+ FunctionDecl *CurFunctionDef;
+ FunctionDecl *CurFunctionDeclToDeclareForBlock;
+
+ /* Misc. containers needed for meta-data rewrite. */
+ SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
+ SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCWrittenInterfaces;
+ llvm::SmallPtrSet<TagDecl*, 8> TagsDefinedInIvarDecls;
+ SmallVector<ObjCInterfaceDecl*, 32> ObjCInterfacesSeen;
+ /// DefinedNonLazyClasses - List of defined "non-lazy" classes.
+ SmallVector<ObjCInterfaceDecl*, 8> DefinedNonLazyClasses;
+
+ /// DefinedNonLazyCategories - List of defined "non-lazy" categories.
+ llvm::SmallVector<ObjCCategoryDecl*, 8> DefinedNonLazyCategories;
+
+ SmallVector<Stmt *, 32> Stmts;
+ SmallVector<int, 8> ObjCBcLabelNo;
+ // Remember all the @protocol(<expr>) expressions.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
+
+ llvm::DenseSet<uint64_t> CopyDestroyCache;
+
+ // Block expressions.
+ SmallVector<BlockExpr *, 32> Blocks;
+ SmallVector<int, 32> InnerDeclRefsCount;
+ SmallVector<DeclRefExpr *, 32> InnerDeclRefs;
+
+ SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
+
+ // Block related declarations.
+ SmallVector<ValueDecl *, 8> BlockByCopyDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDeclsPtrSet;
+ SmallVector<ValueDecl *, 8> BlockByRefDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDeclsPtrSet;
+ llvm::DenseMap<ValueDecl *, unsigned> BlockByRefDeclNo;
+ llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
+ llvm::SmallPtrSet<VarDecl *, 8> ImportedLocalExternalDecls;
+
+ llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
+ llvm::DenseMap<ObjCInterfaceDecl *,
+ llvm::SmallPtrSet<ObjCIvarDecl *, 8> > ReferencedIvars;
+
+ // This maps an original source AST to it's rewritten form. This allows
+ // us to avoid rewriting the same node twice (which is very uncommon).
+ // This is needed to support some of the exotic property rewriting.
+ llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
+
+ // Needed for header files being rewritten
+ bool IsHeader;
+ bool SilenceRewriteMacroWarning;
+ bool objc_impl_method;
+
+ bool DisableReplaceStmt;
+ class DisableReplaceStmtScope {
+ RewriteModernObjC &R;
+ bool SavedValue;
+
+ public:
+ DisableReplaceStmtScope(RewriteModernObjC &R)
+ : R(R), SavedValue(R.DisableReplaceStmt) {
+ R.DisableReplaceStmt = true;
+ }
+ ~DisableReplaceStmtScope() {
+ R.DisableReplaceStmt = SavedValue;
+ }
+ };
+ void InitializeCommon(ASTContext &context);
+
+ public:
+ llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
+ // Top Level Driver code.
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
+ if (!Class->isThisDeclarationADefinition()) {
+ RewriteForwardClassDecl(D);
+ break;
+ } else {
+ // Keep track of all interface declarations seen.
+ ObjCInterfacesSeen.push_back(Class);
+ break;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*I)) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ RewriteForwardProtocolDecl(D);
+ break;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*I);
+ }
+ return true;
+ }
+ void HandleTopLevelSingleDecl(Decl *D);
+ void HandleDeclInMainFile(Decl *D);
+ RewriteModernObjC(std::string inFile, raw_ostream *OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn);
+
+ ~RewriteModernObjC() {}
+
+ virtual void HandleTranslationUnit(ASTContext &C);
+
+ void ReplaceStmt(Stmt *Old, Stmt *New) {
+ Stmt *ReplacingStmt = ReplacedNodes[Old];
+
+ if (ReplacingStmt)
+ return; // We can't rewrite the same node twice.
+
+ if (DisableReplaceStmt)
+ return;
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceStmt(Old, New)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
+ if (DisableReplaceStmt)
+ return;
+
+ // Measure the old text.
+ int Size = Rewrite.getRangeSize(SrcRange);
+ if (Size == -1) {
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ return;
+ }
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ New->printPretty(S, *Context, 0, PrintingPolicy(LangOpts));
+ const std::string &Str = S.str();
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void InsertText(SourceLocation Loc, StringRef Str,
+ bool InsertAfter = true) {
+ // If insertion succeeded or warning disabled return with no warning.
+ if (!Rewrite.InsertText(Loc, Str, InsertAfter) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
+ }
+
+ void ReplaceText(SourceLocation Start, unsigned OrigLength,
+ StringRef Str) {
+ // If removal succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(Start, OrigLength, Str) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
+ }
+
+ // Syntactic Rewriting.
+ void RewriteRecordBody(RecordDecl *RD);
+ void RewriteInclude();
+ void RewriteForwardClassDecl(DeclGroupRef D);
+ void RewriteForwardClassDecl(const llvm::SmallVector<Decl*, 8> &DG);
+ void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString);
+ void RewriteImplementations();
+ void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID);
+ void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
+ void RewriteImplementationDecl(Decl *Dcl);
+ void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *MDecl, std::string &ResultStr);
+ void RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType);
+ void RewriteByRefString(std::string &ResultStr, const std::string &Name,
+ ValueDecl *VD, bool def=false);
+ void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
+ void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
+ void RewriteForwardProtocolDecl(DeclGroupRef D);
+ void RewriteForwardProtocolDecl(const llvm::SmallVector<Decl*, 8> &DG);
+ void RewriteMethodDeclaration(ObjCMethodDecl *Method);
+ void RewriteProperty(ObjCPropertyDecl *prop);
+ void RewriteFunctionDecl(FunctionDecl *FD);
+ void RewriteBlockPointerType(std::string& Str, QualType Type);
+ void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
+ void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
+ void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
+ void RewriteTypeOfDecl(VarDecl *VD);
+ void RewriteObjCQualifiedInterfaceTypes(Expr *E);
+
+ // Expression Rewriting.
+ Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
+ Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
+ Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
+ Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
+ Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
+ Stmt *RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp);
+ Stmt *RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp);
+ Stmt *RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp);
+ Stmt *RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp);
+ Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
+ Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
+ Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
+ Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd);
+ Stmt *RewriteBreakStmt(BreakStmt *S);
+ Stmt *RewriteContinueStmt(ContinueStmt *S);
+ void RewriteCastExpr(CStyleCastExpr *CE);
+ void RewriteImplicitCastObjCExpr(CastExpr *IE);
+ void RewriteLinkageSpec(LinkageSpecDecl *LSD);
+
+ // Block rewriting.
+ void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
+
+ // Block specific rewrite rules.
+ void RewriteBlockPointerDecl(NamedDecl *VD);
+ void RewriteByRefVar(VarDecl *VD);
+ Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
+ Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
+ void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
+
+ void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ void RewriteObjCFieldDecl(FieldDecl *fieldDecl, std::string &Result);
+
+ bool RewriteObjCFieldDeclType(QualType &Type, std::string &Result);
+
+ void RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ virtual void Initialize(ASTContext &context);
+
+ // Misc. AST transformation routines. Somtimes they end up calling
+ // rewriting routines on the new ASTs.
+ CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ Expr **args, unsigned nargs,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ void SynthCountByEnumWithState(std::string &buf);
+ void SynthMsgSendFunctionDecl();
+ void SynthMsgSendSuperFunctionDecl();
+ void SynthMsgSendStretFunctionDecl();
+ void SynthMsgSendFpretFunctionDecl();
+ void SynthMsgSendSuperStretFunctionDecl();
+ void SynthGetClassFunctionDecl();
+ void SynthGetMetaClassFunctionDecl();
+ void SynthGetSuperClassFunctionDecl();
+ void SynthSelGetUidFunctionDecl();
+ void SynthSuperContructorFunctionDecl();
+
+ // Rewriting metadata
+ template<typename MethodIterator>
+ void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result);
+ void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ std::string &Result);
+ virtual void RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix, StringRef ClassName, std::string &Result);
+ virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result);
+ virtual void RewriteClassSetupInitHook(std::string &Result);
+
+ virtual void RewriteMetaDataIntoBuffer(std::string &Result);
+ virtual void WriteImageInfo(std::string &Result);
+ virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result);
+ virtual void RewriteCategorySetupInitHook(std::string &Result);
+
+ // Rewriting ivar
+ virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result);
+ virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
+
+
+ std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
+ std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockImpl(BlockExpr *CE,
+ std::string Tag, std::string Desc);
+ std::string SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag,
+ int i, StringRef funcName,
+ unsigned hasCopy);
+ Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
+ void SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName);
+ FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
+ Stmt *SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs);
+
+ // Misc. helper routines.
+ QualType getProtocolType();
+ void WarnAboutReturnGotoStmts(Stmt *S);
+ void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
+ void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
+ void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
+
+ bool IsDeclStmtInForeachHeader(DeclStmt *DS);
+ void CollectBlockDeclRefInfo(BlockExpr *Exp);
+ void GetBlockDeclRefExprs(Stmt *S);
+ void GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs,
+ llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts);
+
+ // We avoid calling Type::isBlockPointerType(), since it operates on the
+ // canonical type. We only care if the top-level type is a closure pointer.
+ bool isTopLevelBlockPointerType(QualType T) {
+ return isa<BlockPointerType>(T);
+ }
+
+ /// convertBlockPointerToFunctionPointer - Converts a block-pointer type
+ /// to a function pointer type and upon success, returns true; false
+ /// otherwise.
+ bool convertBlockPointerToFunctionPointer(QualType &T) {
+ if (isTopLevelBlockPointerType(T)) {
+ const BlockPointerType *BPT = T->getAs<BlockPointerType>();
+ T = Context->getPointerType(BPT->getPointeeType());
+ return true;
+ }
+ return false;
+ }
+
+ bool convertObjCTypeToCStyleType(QualType &T);
+
+ bool needToScanForQualifiers(QualType T);
+ QualType getSuperStructType();
+ QualType getConstantStringStructType();
+ QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
+ bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
+
+ void convertToUnqualifiedObjCType(QualType &T) {
+ if (T->isObjCQualifiedIdType()) {
+ bool isConst = T.isConstQualified();
+ T = isConst ? Context->getObjCIdType().withConst()
+ : Context->getObjCIdType();
+ }
+ else if (T->isObjCQualifiedClassType())
+ T = Context->getObjCClassType();
+ else if (T->isObjCObjectPointerType() &&
+ T->getPointeeType()->isObjCQualifiedInterfaceType()) {
+ if (const ObjCObjectPointerType * OBJPT =
+ T->getAsObjCInterfacePointerType()) {
+ const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
+ T = QualType(IFaceT, 0);
+ T = Context->getPointerType(T);
+ }
+ }
+ }
+
+ // FIXME: This predicate seems like it would be useful to add to ASTContext.
+ bool isObjCType(QualType T) {
+ if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ return false;
+
+ QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
+
+ if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
+ OCT == Context->getCanonicalType(Context->getObjCClassType()))
+ return true;
+
+ if (const PointerType *PT = OCT->getAs<PointerType>()) {
+ if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
+ PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ return false;
+ }
+ bool PointerTypeTakesAnyBlockArguments(QualType QT);
+ bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
+ void GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen);
+
+ void QuoteDoublequotes(std::string &From, std::string &To) {
+ for (unsigned i = 0; i < From.length(); i++) {
+ if (From[i] == '"')
+ To += "\\\"";
+ else
+ To += From[i];
+ }
+ }
+
+ QualType getSimpleFunctionType(QualType result,
+ const QualType *args,
+ unsigned numArgs,
+ bool variadic = false) {
+ if (result == Context->getObjCInstanceType())
+ result = Context->getObjCIdType();
+ FunctionProtoType::ExtProtoInfo fpi;
+ fpi.Variadic = variadic;
+ return Context->getFunctionType(result, args, numArgs, fpi);
+ }
+
+ // Helper function: create a CStyleCastExpr with trivial type source info.
+ CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
+ CastKind Kind, Expr *E) {
+ TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo,
+ SourceLocation(), SourceLocation());
+ }
+
+ bool ImplementationIsNonLazy(const ObjCImplDecl *OD) const {
+ IdentifierInfo* II = &Context->Idents.get("load");
+ Selector LoadSel = Context->Selectors.getSelector(0, &II);
+ return OD->getClassMethod(LoadSel) != 0;
+ }
+ };
+
+}
+
+void RewriteModernObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
+ NamedDecl *D) {
+ if (const FunctionProtoType *fproto
+ = dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
+ for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(),
+ E = fproto->arg_type_end(); I && (I != E); ++I)
+ if (isTopLevelBlockPointerType(*I)) {
+ // All the args are checked/rewritten. Don't call twice!
+ RewriteBlockPointerDecl(D);
+ break;
+ }
+ }
+}
+
+void RewriteModernObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
+ const PointerType *PT = funcType->getAs<PointerType>();
+ if (PT && PointerTypeTakesAnyBlockArguments(funcType))
+ RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND);
+}
+
+static bool IsHeaderFile(const std::string &Filename) {
+ std::string::size_type DotPos = Filename.rfind('.');
+
+ if (DotPos == std::string::npos) {
+ // no file extension
+ return false;
+ }
+
+ std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ // C header: .h
+ // C++ header: .hh or .H;
+ return Ext == "h" || Ext == "hh" || Ext == "H";
+}
+
+RewriteModernObjC::RewriteModernObjC(std::string inFile, raw_ostream* OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn)
+ : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS),
+ SilenceRewriteMacroWarning(silenceMacroWarn) {
+ IsHeader = IsHeaderFile(inFile);
+ RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "rewriting sub-expression within a macro (may not be correct)");
+ // FIXME. This should be an error. But if block is not called, it is OK. And it
+ // may break including some headers.
+ GlobalBlockRewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "rewriting block literal declared in global scope is not implemented");
+
+ TryFinallyContainsReturnDiag = Diags.getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "rewriter doesn't support user-specified control flow semantics "
+ "for @try/@finally (code may not execute properly)");
+}
+
+ASTConsumer *clang::CreateModernObjCRewriter(const std::string& InFile,
+ raw_ostream* OS,
+ DiagnosticsEngine &Diags,
+ const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning) {
+ return new RewriteModernObjC(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning);
+}
+
+void RewriteModernObjC::InitializeCommon(ASTContext &context) {
+ Context = &context;
+ SM = &Context->getSourceManager();
+ TUDecl = Context->getTranslationUnitDecl();
+ MsgSendFunctionDecl = 0;
+ MsgSendSuperFunctionDecl = 0;
+ MsgSendStretFunctionDecl = 0;
+ MsgSendSuperStretFunctionDecl = 0;
+ MsgSendFpretFunctionDecl = 0;
+ GetClassFunctionDecl = 0;
+ GetMetaClassFunctionDecl = 0;
+ GetSuperClassFunctionDecl = 0;
+ SelGetUidFunctionDecl = 0;
+ CFStringFunctionDecl = 0;
+ ConstantStringClassReference = 0;
+ NSStringRecord = 0;
+ CurMethodDef = 0;
+ CurFunctionDef = 0;
+ CurFunctionDeclToDeclareForBlock = 0;
+ GlobalVarDecl = 0;
+ GlobalConstructionExp = 0;
+ SuperStructDecl = 0;
+ ProtocolTypeDecl = 0;
+ ConstantStringDecl = 0;
+ BcLabelCount = 0;
+ SuperContructorFunctionDecl = 0;
+ NumObjCStringLiterals = 0;
+ PropParentMap = 0;
+ CurrentBody = 0;
+ DisableReplaceStmt = false;
+ objc_impl_method = false;
+
+ // Get the ID and start/end of the main file.
+ MainFileID = SM->getMainFileID();
+ const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
+ MainFileStart = MainBuf->getBufferStart();
+ MainFileEnd = MainBuf->getBufferEnd();
+
+ Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Driver Code
+//===----------------------------------------------------------------------===//
+
+void RewriteModernObjC::HandleTopLevelSingleDecl(Decl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Two cases: either the decl could be in the main file, or it could be in a
+ // #included file. If the former, rewrite it now. If the later, check to see
+ // if we rewrote the #include/#import.
+ SourceLocation Loc = D->getLocation();
+ Loc = SM->getExpansionLoc(Loc);
+
+ // If this is for a builtin, ignore it.
+ if (Loc.isInvalid()) return;
+
+ // Look for built-in declarations that we need to refer during the rewrite.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ RewriteFunctionDecl(FD);
+ } else if (VarDecl *FVD = dyn_cast<VarDecl>(D)) {
+ // declared in <Foundation/NSString.h>
+ if (FVD->getName() == "_NSConstantStringClassReference") {
+ ConstantStringClassReference = FVD;
+ return;
+ }
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
+ RewriteCategoryDecl(CD);
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (PD->isThisDeclarationADefinition())
+ RewriteProtocolDecl(PD);
+ } else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
+ // FIXME. This will not work in all situations and leaving it out
+ // is harmless.
+ // RewriteLinkageSpec(LSD);
+
+ // Recurse into linkage specifications
+ for (DeclContext::decl_iterator DI = LSD->decls_begin(),
+ DIEnd = LSD->decls_end();
+ DI != DIEnd; ) {
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
+ if (!IFace->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = IFace->getLocStart();
+ do {
+ if (isa<ObjCInterfaceDecl>(*DI) &&
+ !cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardClassDecl(DG);
+ continue;
+ }
+ else {
+ // Keep track of all interface declarations seen.
+ ObjCInterfacesSeen.push_back(IFace);
+ ++DI;
+ continue;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = Proto->getLocStart();
+ do {
+ if (isa<ObjCProtocolDecl>(*DI) &&
+ !cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardProtocolDecl(DG);
+ continue;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*DI);
+ ++DI;
+ }
+ }
+ // If we have a decl in the main file, see if we should rewrite it.
+ if (SM->isFromMainFile(Loc))
+ return HandleDeclInMainFile(D);
+}
+
+//===----------------------------------------------------------------------===//
+// Syntactic (non-AST) Rewriting Code
+//===----------------------------------------------------------------------===//
+
+void RewriteModernObjC::RewriteInclude() {
+ SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID);
+ StringRef MainBuf = SM->getBufferData(MainFileID);
+ const char *MainBufStart = MainBuf.begin();
+ const char *MainBufEnd = MainBuf.end();
+ size_t ImportLen = strlen("import");
+
+ // Loop over the whole file, looking for includes.
+ for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) {
+ if (*BufPtr == '#') {
+ if (++BufPtr == MainBufEnd)
+ return;
+ while (*BufPtr == ' ' || *BufPtr == '\t')
+ if (++BufPtr == MainBufEnd)
+ return;
+ if (!strncmp(BufPtr, "import", ImportLen)) {
+ // replace import with include
+ SourceLocation ImportLoc =
+ LocStart.getLocWithOffset(BufPtr-MainBufStart);
+ ReplaceText(ImportLoc, ImportLen, "include");
+ BufPtr += ImportLen;
+ }
+ }
+ }
+}
+
+static std::string getIvarAccessString(ObjCIvarDecl *OID) {
+ const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface();
+ std::string S;
+ S = "((struct ";
+ S += ClassDecl->getIdentifier()->getName();
+ S += "_IMPL *)self)->";
+ S += OID->getName();
+ return S;
+}
+
+void RewriteModernObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID) {
+ static bool objcGetPropertyDefined = false;
+ static bool objcSetPropertyDefined = false;
+ SourceLocation startLoc = PID->getLocStart();
+ InsertText(startLoc, "// ");
+ const char *startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @synthesize location");
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@synthesize: can't find ';'");
+ SourceLocation onePastSemiLoc =
+ startLoc.getLocWithOffset(semiBuf-startBuf+1);
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ return; // FIXME: is this correct?
+
+ // Generate the 'getter' function.
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
+
+ if (!OID)
+ return;
+ unsigned Attributes = PD->getPropertyAttributes();
+ if (!PD->getGetterMethodDecl()->isDefined()) {
+ bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy));
+ std::string Getr;
+ if (GenGetProperty && !objcGetPropertyDefined) {
+ objcGetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Getr = "\nextern \"C\" __declspec(dllimport) "
+ "id objc_getProperty(id, SEL, long, bool);\n";
+ }
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getGetterMethodDecl(), Getr);
+ Getr += "{ ";
+ // Synthesize an explicit cast to gain access to the ivar.
+ // See objc-act.c:objc_synthesize_new_getter() for details.
+ if (GenGetProperty) {
+ // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
+ Getr += "typedef ";
+ const FunctionType *FPRetType = 0;
+ RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr,
+ FPRetType);
+ Getr += " _TYPE";
+ if (FPRetType) {
+ Getr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
+ Getr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) Getr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->getPrintingPolicy());
+ Getr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) Getr += ", ";
+ Getr += "...";
+ }
+ Getr += ")";
+ } else
+ Getr += "()";
+ }
+ Getr += ";\n";
+ Getr += "return (_TYPE)";
+ Getr += "objc_getProperty(self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Getr);
+ Getr += ", 1)";
+ }
+ else
+ Getr += "return " + getIvarAccessString(OID);
+ Getr += "; }";
+ InsertText(onePastSemiLoc, Getr);
+ }
+
+ if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined())
+ return;
+
+ // Generate the 'setter' function.
+ std::string Setr;
+ bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy);
+ if (GenSetProperty && !objcSetPropertyDefined) {
+ objcSetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Setr = "\nextern \"C\" __declspec(dllimport) "
+ "void objc_setProperty (id, SEL, long, id, bool, bool);\n";
+ }
+
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getSetterMethodDecl(), Setr);
+ Setr += "{ ";
+ // Synthesize an explicit cast to initialize the ivar.
+ // See objc-act.c:objc_synthesize_new_setter() for details.
+ if (GenSetProperty) {
+ Setr += "objc_setProperty (self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Setr);
+ Setr += ", (id)";
+ Setr += PD->getName();
+ Setr += ", ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ Setr += "0, ";
+ else
+ Setr += "1, ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ Setr += "1)";
+ else
+ Setr += "0)";
+ }
+ else {
+ Setr += getIvarAccessString(OID) + " = ";
+ Setr += PD->getName();
+ }
+ Setr += "; }";
+ InsertText(onePastSemiLoc, Setr);
+}
+
+static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
+ std::string &typedefString) {
+ typedefString += "#ifndef _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "#define _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "typedef struct objc_object ";
+ typedefString += ForwardDecl->getNameAsString();
+ // typedef struct { } _objc_exc_Classname;
+ typedefString += ";\ntypedef struct {} _objc_exc_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n#endif\n";
+}
+
+void RewriteModernObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString) {
+ SourceLocation startLoc = ClassDecl->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *semiPtr = strchr(startBuf, ';');
+ // Replace the @class with typedefs corresponding to the classes.
+ ReplaceText(startLoc, semiPtr-startBuf+1, typedefString);
+}
+
+void RewriteModernObjC::RewriteForwardClassDecl(DeclGroupRef D) {
+ std::string typedefString;
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(*I);
+ if (I == D.begin()) {
+ // Translate to typedef's that forward reference structs with the same name
+ // as the class. As a convenience, we include the original declaration
+ // as a comment.
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ DeclGroupRef::iterator I = D.begin();
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(*I), typedefString);
+}
+
+void RewriteModernObjC::RewriteForwardClassDecl(
+ const llvm::SmallVector<Decl*, 8> &D) {
+ std::string typedefString;
+ for (unsigned i = 0; i < D.size(); i++) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(D[i]);
+ if (i == 0) {
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(D[0]), typedefString);
+}
+
+void RewriteModernObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
+ // When method is a synthesized one, such as a getter/setter there is
+ // nothing to rewrite.
+ if (Method->isImplicit())
+ return;
+ SourceLocation LocStart = Method->getLocStart();
+ SourceLocation LocEnd = Method->getLocEnd();
+
+ if (SM->getExpansionLineNumber(LocEnd) >
+ SM->getExpansionLineNumber(LocStart)) {
+ InsertText(LocStart, "#if 0\n");
+ ReplaceText(LocEnd, 1, ";\n#endif\n");
+ } else {
+ InsertText(LocStart, "// ");
+ }
+}
+
+void RewriteModernObjC::RewriteProperty(ObjCPropertyDecl *prop) {
+ SourceLocation Loc = prop->getAtLoc();
+
+ ReplaceText(Loc, 0, "// ");
+ // FIXME: handle properties that are declared across multiple lines.
+}
+
+void RewriteModernObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
+ SourceLocation LocStart = CatDecl->getLocStart();
+
+ // FIXME: handle category headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+ if (CatDecl->getIvarLBraceLoc().isValid())
+ InsertText(CatDecl->getIvarLBraceLoc(), "// ");
+ for (ObjCCategoryDecl::ivar_iterator
+ I = CatDecl->ivar_begin(), E = CatDecl->ivar_end(); I != E; ++I) {
+ ObjCIvarDecl *Ivar = (*I);
+ SourceLocation LocStart = Ivar->getLocStart();
+ ReplaceText(LocStart, 0, "// ");
+ }
+ if (CatDecl->getIvarRBraceLoc().isValid())
+ InsertText(CatDecl->getIvarRBraceLoc(), "// ");
+
+ for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(),
+ E = CatDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+
+ for (ObjCCategoryDecl::instmeth_iterator
+ I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCCategoryDecl::classmeth_iterator
+ I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(CatDecl->getAtEndRange().getBegin(),
+ strlen("@end"), "/* @end */");
+}
+
+void RewriteModernObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
+ SourceLocation LocStart = PDecl->getLocStart();
+ assert(PDecl->isThisDeclarationADefinition());
+
+ // FIXME: handle protocol headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(),
+ E = PDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+
+ // Lastly, comment out the @end.
+ SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
+ ReplaceText(LocEnd, strlen("@end"), "/* @end */");
+
+ // Must comment out @optional/@required
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ for (const char *p = startBuf; p < endBuf; p++) {
+ if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */");
+
+ }
+ else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@required"), "/* @required */");
+
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
+ SourceLocation LocStart = (*D.begin())->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteModernObjC::RewriteForwardProtocolDecl(const llvm::SmallVector<Decl*, 8> &DG) {
+ SourceLocation LocStart = DG[0]->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteModernObjC::RewriteLinkageSpec(LinkageSpecDecl *LSD) {
+ SourceLocation LocStart = LSD->getExternLoc();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid extern SourceLocation");
+
+ ReplaceText(LocStart, 0, "// ");
+ if (!LSD->hasBraces())
+ return;
+ // FIXME. We don't rewrite well if '{' is not on same line as 'extern'.
+ SourceLocation LocRBrace = LSD->getRBraceLoc();
+ if (LocRBrace.isInvalid())
+ llvm_unreachable("Invalid rbrace SourceLocation");
+ ReplaceText(LocRBrace, 0, "// ");
+}
+
+void RewriteModernObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType) {
+ if (T->isObjCQualifiedIdType())
+ ResultStr += "id";
+ else if (T->isFunctionPointerType() ||
+ T->isBlockPointerType()) {
+ // needs special handling, since pointer-to-functions have special
+ // syntax (where a decaration models use).
+ QualType retType = T;
+ QualType PointeeTy;
+ if (const PointerType* PT = retType->getAs<PointerType>())
+ PointeeTy = PT->getPointeeType();
+ else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
+ PointeeTy = BPT->getPointeeType();
+ if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
+ ResultStr += FPRetType->getResultType().getAsString(
+ Context->getPrintingPolicy());
+ ResultStr += "(*";
+ }
+ } else
+ ResultStr += T.getAsString(Context->getPrintingPolicy());
+}
+
+void RewriteModernObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *OMD,
+ std::string &ResultStr) {
+ //fprintf(stderr,"In RewriteObjCMethodDecl\n");
+ const FunctionType *FPRetType = 0;
+ ResultStr += "\nstatic ";
+ RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType);
+ ResultStr += " ";
+
+ // Unique method name
+ std::string NameStr;
+
+ if (OMD->isInstanceMethod())
+ NameStr += "_I_";
+ else
+ NameStr += "_C_";
+
+ NameStr += IDecl->getNameAsString();
+ NameStr += "_";
+
+ if (ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
+ NameStr += CID->getNameAsString();
+ NameStr += "_";
+ }
+ // Append selector names, replacing ':' with '_'
+ {
+ std::string selString = OMD->getSelector().getAsString();
+ int len = selString.size();
+ for (int i = 0; i < len; i++)
+ if (selString[i] == ':')
+ selString[i] = '_';
+ NameStr += selString;
+ }
+ // Remember this name for metadata emission
+ MethodInternalNames[OMD] = NameStr;
+ ResultStr += NameStr;
+
+ // Rewrite arguments
+ ResultStr += "(";
+
+ // invisible arguments
+ if (OMD->isInstanceMethod()) {
+ QualType selfTy = Context->getObjCInterfaceType(IDecl);
+ selfTy = Context->getPointerType(selfTy);
+ if (!LangOpts.MicrosoftExt) {
+ if (ObjCSynthesizedStructs.count(const_cast<ObjCInterfaceDecl*>(IDecl)))
+ ResultStr += "struct ";
+ }
+ // When rewriting for Microsoft, explicitly omit the structure name.
+ ResultStr += IDecl->getNameAsString();
+ ResultStr += " *";
+ }
+ else
+ ResultStr += Context->getObjCClassType().getAsString(
+ Context->getPrintingPolicy());
+
+ ResultStr += " self, ";
+ ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy());
+ ResultStr += " _cmd";
+
+ // Method arguments.
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PDecl = *PI;
+ ResultStr += ", ";
+ if (PDecl->getType()->isObjCQualifiedIdType()) {
+ ResultStr += "id ";
+ ResultStr += PDecl->getNameAsString();
+ } else {
+ std::string Name = PDecl->getNameAsString();
+ QualType QT = PDecl->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ ResultStr += Name;
+ }
+ }
+ if (OMD->isVariadic())
+ ResultStr += ", ...";
+ ResultStr += ") ";
+
+ if (FPRetType) {
+ ResultStr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
+ ResultStr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) ResultStr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->getPrintingPolicy());
+ ResultStr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) ResultStr += ", ";
+ ResultStr += "...";
+ }
+ ResultStr += ")";
+ } else {
+ ResultStr += "()";
+ }
+ }
+}
+void RewriteModernObjC::RewriteImplementationDecl(Decl *OID) {
+ ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
+ ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
+
+ if (IMD) {
+ InsertText(IMD->getLocStart(), "// ");
+ if (IMD->getIvarLBraceLoc().isValid())
+ InsertText(IMD->getIvarLBraceLoc(), "// ");
+ for (ObjCImplementationDecl::ivar_iterator
+ I = IMD->ivar_begin(), E = IMD->ivar_end(); I != E; ++I) {
+ ObjCIvarDecl *Ivar = (*I);
+ SourceLocation LocStart = Ivar->getLocStart();
+ ReplaceText(LocStart, 0, "// ");
+ }
+ if (IMD->getIvarRBraceLoc().isValid())
+ InsertText(IMD->getIvarRBraceLoc(), "// ");
+ }
+ else
+ InsertText(CID->getLocStart(), "// ");
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(),
+ E = IMD ? IMD->instmeth_end() : CID->instmeth_end();
+ I != E; ++I) {
+ std::string ResultStr;
+ ObjCMethodDecl *OMD = *I;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(),
+ E = IMD ? IMD->classmeth_end() : CID->classmeth_end();
+ I != E; ++I) {
+ std::string ResultStr;
+ ObjCMethodDecl *OMD = *I;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(),
+ E = IMD ? IMD->propimpl_end() : CID->propimpl_end();
+ I != E; ++I) {
+ RewritePropertyImplDecl(*I, IMD, CID);
+ }
+
+ InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
+}
+
+void RewriteModernObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
+ // Do not synthesize more than once.
+ if (ObjCSynthesizedStructs.count(ClassDecl))
+ return;
+ // Make sure super class's are written before current class is written.
+ ObjCInterfaceDecl *SuperClass = ClassDecl->getSuperClass();
+ while (SuperClass) {
+ RewriteInterfaceDecl(SuperClass);
+ SuperClass = SuperClass->getSuperClass();
+ }
+ std::string ResultStr;
+ if (!ObjCWrittenInterfaces.count(ClassDecl->getCanonicalDecl())) {
+ // we haven't seen a forward decl - generate a typedef.
+ RewriteOneForwardClassDecl(ClassDecl, ResultStr);
+ RewriteIvarOffsetSymbols(ClassDecl, ResultStr);
+
+ RewriteObjCInternalStruct(ClassDecl, ResultStr);
+ // Mark this typedef as having been written into its c++ equivalent.
+ ObjCWrittenInterfaces.insert(ClassDecl->getCanonicalDecl());
+
+ for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(),
+ E = ClassDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+ for (ObjCInterfaceDecl::instmeth_iterator
+ I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCInterfaceDecl::classmeth_iterator
+ I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"),
+ "/* @end */");
+ }
+}
+
+Stmt *RewriteModernObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getSemanticExpr(
+ PseudoOp->getNumSemanticExprs() - 1));
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base;
+ SmallVector<Expr*, 2> Args;
+ {
+ DisableReplaceStmtScope S(*this);
+
+ // Rebuild the base expression if we have one.
+ Base = 0;
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+
+ unsigned numArgs = OldMsg->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ Expr *Arg = OldMsg->getArg(i);
+ if (isa<OpaqueValueExpr>(Arg))
+ Arg = cast<OpaqueValueExpr>(Arg)->getSourceExpr();
+ Arg = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Arg));
+ Args.push_back(Arg);
+ }
+ }
+
+ // TODO: avoid this copy.
+ SmallVector<SourceLocation, 1> SelLocs;
+ OldMsg->getSelectorLocs(SelLocs);
+
+ ObjCMessageExpr *NewMsg = 0;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+Stmt *RewriteModernObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getResultExpr()->IgnoreImplicit());
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base = 0;
+ SmallVector<Expr*, 1> Args;
+ {
+ DisableReplaceStmtScope S(*this);
+ // Rebuild the base expression if we have one.
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+ unsigned numArgs = OldMsg->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ Expr *Arg = OldMsg->getArg(i);
+ if (isa<OpaqueValueExpr>(Arg))
+ Arg = cast<OpaqueValueExpr>(Arg)->getSourceExpr();
+ Arg = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Arg));
+ Args.push_back(Arg);
+ }
+ }
+
+ // Intentionally empty.
+ SmallVector<SourceLocation, 1> SelLocs;
+
+ ObjCMessageExpr *NewMsg = 0;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+/// SynthCountByEnumWithState - To print:
+/// ((unsigned int (*)
+/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+/// (void *)objc_msgSend)((id)l_collection,
+/// sel_registerName(
+/// "countByEnumeratingWithState:objects:count:"),
+/// &enumState,
+/// (id *)__rw_items, (unsigned int)16)
+///
+void RewriteModernObjC::SynthCountByEnumWithState(std::string &buf) {
+ buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, "
+ "id *, unsigned int))(void *)objc_msgSend)";
+ buf += "\n\t\t";
+ buf += "((id)l_collection,\n\t\t";
+ buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
+ buf += "\n\t\t";
+ buf += "&enumState, "
+ "(id *)__rw_items, (unsigned int)16)";
+}
+
+/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
+/// statement to exit to its outer synthesized loop.
+///
+Stmt *RewriteModernObjC::RewriteBreakStmt(BreakStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace break with goto __break_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("break"), buf);
+
+ return 0;
+}
+
+/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
+/// statement to continue with its inner synthesized loop.
+///
+Stmt *RewriteModernObjC::RewriteContinueStmt(ContinueStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace continue with goto __continue_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("continue"), buf);
+
+ return 0;
+}
+
+/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement.
+/// It rewrites:
+/// for ( type elem in collection) { stmts; }
+
+/// Into:
+/// {
+/// type elem;
+/// struct __objcFastEnumerationState enumState = { 0 };
+/// id __rw_items[16];
+/// id l_collection = (id)collection;
+/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16];
+/// if (limit) {
+/// unsigned long startMutations = *enumState.mutationsPtr;
+/// do {
+/// unsigned long counter = 0;
+/// do {
+/// if (startMutations != *enumState.mutationsPtr)
+/// objc_enumerationMutation(l_collection);
+/// elem = (type)enumState.itemsPtr[counter++];
+/// stmts;
+/// __continue_label: ;
+/// } while (counter < limit);
+/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16]);
+/// elem = nil;
+/// __break_label: ;
+/// }
+/// else
+/// elem = nil;
+/// }
+///
+Stmt *RewriteModernObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd) {
+ assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty");
+ assert(isa<ObjCForCollectionStmt>(Stmts.back()) &&
+ "ObjCForCollectionStmt Statement stack mismatch");
+ assert(!ObjCBcLabelNo.empty() &&
+ "ObjCForCollectionStmt - Label No stack empty");
+
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ StringRef elementName;
+ std::string elementTypeAsString;
+ std::string buf;
+ buf = "\n{\n\t";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
+ // type elem;
+ NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
+ QualType ElementType = cast<ValueDecl>(D)->getType();
+ if (ElementType->isObjCQualifiedIdType() ||
+ ElementType->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy());
+ buf += elementTypeAsString;
+ buf += " ";
+ elementName = D->getName();
+ buf += elementName;
+ buf += ";\n\t";
+ }
+ else {
+ DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
+ elementName = DR->getDecl()->getName();
+ ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
+ if (VD->getType()->isObjCQualifiedIdType() ||
+ VD->getType()->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy());
+ }
+
+ // struct __objcFastEnumerationState enumState = { 0 };
+ buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
+ // id __rw_items[16];
+ buf += "id __rw_items[16];\n\t";
+ // id l_collection = (id)
+ buf += "id l_collection = (id)";
+ // Find start location of 'collection' the hard way!
+ const char *startCollectionBuf = startBuf;
+ startCollectionBuf += 3; // skip 'for'
+ startCollectionBuf = strchr(startCollectionBuf, '(');
+ startCollectionBuf++; // skip '('
+ // find 'in' and skip it.
+ while (*startCollectionBuf != ' ' ||
+ *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' ||
+ (*(startCollectionBuf+3) != ' ' &&
+ *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '('))
+ startCollectionBuf++;
+ startCollectionBuf += 3;
+
+ // Replace: "for (type element in" with string constructed thus far.
+ ReplaceText(startLoc, startCollectionBuf - startBuf, buf);
+ // Replace ')' in for '(' type elem in collection ')' with ';'
+ SourceLocation rightParenLoc = S->getRParenLoc();
+ const char *rparenBuf = SM->getCharacterData(rightParenLoc);
+ SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf);
+ buf = ";\n\t";
+
+ // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+ // objects:__rw_items count:16];
+ // which is synthesized into:
+ // unsigned int limit =
+ // ((unsigned int (*)
+ // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+ // (void *)objc_msgSend)((id)l_collection,
+ // sel_registerName(
+ // "countByEnumeratingWithState:objects:count:"),
+ // (struct __objcFastEnumerationState *)&state,
+ // (id *)__rw_items, (unsigned int)16);
+ buf += "unsigned long limit =\n\t\t";
+ SynthCountByEnumWithState(buf);
+ buf += ";\n\t";
+ /// if (limit) {
+ /// unsigned long startMutations = *enumState.mutationsPtr;
+ /// do {
+ /// unsigned long counter = 0;
+ /// do {
+ /// if (startMutations != *enumState.mutationsPtr)
+ /// objc_enumerationMutation(l_collection);
+ /// elem = (type)enumState.itemsPtr[counter++];
+ buf += "if (limit) {\n\t";
+ buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t";
+ buf += "do {\n\t\t";
+ buf += "unsigned long counter = 0;\n\t\t";
+ buf += "do {\n\t\t\t";
+ buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t";
+ buf += "objc_enumerationMutation(l_collection);\n\t\t\t";
+ buf += elementName;
+ buf += " = (";
+ buf += elementTypeAsString;
+ buf += ")enumState.itemsPtr[counter++];";
+ // Replace ')' in for '(' type elem in collection ')' with all of these.
+ ReplaceText(lparenLoc, 1, buf);
+
+ /// __continue_label: ;
+ /// } while (counter < limit);
+ /// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+ /// objects:__rw_items count:16]);
+ /// elem = nil;
+ /// __break_label: ;
+ /// }
+ /// else
+ /// elem = nil;
+ /// }
+ ///
+ buf = ";\n\t";
+ buf += "__continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;";
+ buf += "\n\t\t";
+ buf += "} while (counter < limit);\n\t";
+ buf += "} while (limit = ";
+ SynthCountByEnumWithState(buf);
+ buf += ");\n\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "__break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;\n\t";
+ buf += "}\n\t";
+ buf += "else\n\t\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "}\n";
+
+ // Insert all these *after* the statement body.
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (isa<CompoundStmt>(S->getBody())) {
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1);
+ InsertText(endBodyLoc, buf);
+ } else {
+ /* Need to treat single statements specially. For example:
+ *
+ * for (A *a in b) if (stuff()) break;
+ * for (A *a in b) xxxyy;
+ *
+ * The following code simply scans ahead to the semi to find the actual end.
+ */
+ const char *stmtBuf = SM->getCharacterData(OrigEnd);
+ const char *semiBuf = strchr(stmtBuf, ';');
+ assert(semiBuf && "Can't find ';'");
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1);
+ InsertText(endBodyLoc, buf);
+ }
+ Stmts.pop_back();
+ ObjCBcLabelNo.pop_back();
+ return 0;
+}
+
+static void Write_RethrowObject(std::string &buf) {
+ buf += "{ struct _FIN { _FIN(id reth) : rethrow(reth) {}\n";
+ buf += "\t~_FIN() { if (rethrow) objc_exception_throw(rethrow); }\n";
+ buf += "\tid rethrow;\n";
+ buf += "\t} _fin_force_rethow(_rethrow);";
+}
+
+/// RewriteObjCSynchronizedStmt -
+/// This routine rewrites @synchronized(expr) stmt;
+/// into:
+/// objc_sync_enter(expr);
+/// @try stmt @finally { objc_sync_exit(expr); }
+///
+Stmt *RewriteModernObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @synchronized location");
+
+ std::string buf;
+ buf = "{ id _rethrow = 0; id _sync_obj = ";
+
+ const char *lparenBuf = startBuf;
+ while (*lparenBuf != '(') lparenBuf++;
+ ReplaceText(startLoc, lparenBuf-startBuf+1, buf);
+
+ buf = "; objc_sync_enter(_sync_obj);\n";
+ buf += "try {\n\tstruct _SYNC_EXIT { _SYNC_EXIT(id arg) : sync_exit(arg) {}";
+ buf += "\n\t~_SYNC_EXIT() {objc_sync_exit(sync_exit);}";
+ buf += "\n\tid sync_exit;";
+ buf += "\n\t} _sync_exit(_sync_obj);\n";
+
+ // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
+ // the sync expression is typically a message expression that's already
+ // been rewritten! (which implies the SourceLocation's are invalid).
+ SourceLocation RParenExprLoc = S->getSynchBody()->getLocStart();
+ const char *RParenExprLocBuf = SM->getCharacterData(RParenExprLoc);
+ while (*RParenExprLocBuf != ')') RParenExprLocBuf--;
+ RParenExprLoc = startLoc.getLocWithOffset(RParenExprLocBuf-startBuf);
+
+ SourceLocation LBranceLoc = S->getSynchBody()->getLocStart();
+ const char *LBraceLocBuf = SM->getCharacterData(LBranceLoc);
+ assert (*LBraceLocBuf == '{');
+ ReplaceText(RParenExprLoc, (LBraceLocBuf - SM->getCharacterData(RParenExprLoc) + 1), buf);
+
+ SourceLocation startRBraceLoc = S->getSynchBody()->getLocEnd();
+ assert((*SM->getCharacterData(startRBraceLoc) == '}') &&
+ "bogus @synchronized block");
+
+ buf = "} catch (id e) {_rethrow = e;}\n";
+ Write_RethrowObject(buf);
+ buf += "}\n";
+ buf += "}\n";
+
+ ReplaceText(startRBraceLoc, 1, buf);
+
+ return 0;
+}
+
+void RewriteModernObjC::WarnAboutReturnGotoStmts(Stmt *S)
+{
+ // Perform a bottom up traversal of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI)
+ WarnAboutReturnGotoStmts(*CI);
+
+ if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
+ Diags.Report(Context->getFullLoc(S->getLocStart()),
+ TryFinallyContainsReturnDiag);
+ }
+ return;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
+ ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt();
+ bool noCatch = S->getNumCatchStmts() == 0;
+ std::string buf;
+
+ if (finalStmt) {
+ if (noCatch)
+ buf = "{ id volatile _rethrow = 0;\n";
+ else {
+ buf = "{ id volatile _rethrow = 0;\ntry {\n";
+ }
+ }
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @try location");
+ if (finalStmt)
+ ReplaceText(startLoc, 1, buf);
+ else
+ // @try -> try
+ ReplaceText(startLoc, 1, "");
+
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
+ VarDecl *catchDecl = Catch->getCatchParamDecl();
+
+ startLoc = Catch->getLocStart();
+ bool AtRemoved = false;
+ if (catchDecl) {
+ QualType t = catchDecl->getType();
+ if (const ObjCObjectPointerType *Ptr = t->getAs<ObjCObjectPointerType>()) {
+ // Should be a pointer to a class.
+ ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
+ if (IDecl) {
+ std::string Result;
+ startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @catch location");
+ SourceLocation rParenLoc = Catch->getRParenLoc();
+ const char *rParenBuf = SM->getCharacterData(rParenLoc);
+
+ // _objc_exc_Foo *_e as argument to catch.
+ Result = "catch (_objc_exc_"; Result += IDecl->getNameAsString();
+ Result += " *_"; Result += catchDecl->getNameAsString();
+ Result += ")";
+ ReplaceText(startLoc, rParenBuf-startBuf+1, Result);
+ // Foo *e = (Foo *)_e;
+ Result.clear();
+ Result = "{ ";
+ Result += IDecl->getNameAsString();
+ Result += " *"; Result += catchDecl->getNameAsString();
+ Result += " = ("; Result += IDecl->getNameAsString(); Result += "*)";
+ Result += "_"; Result += catchDecl->getNameAsString();
+
+ Result += "; ";
+ SourceLocation lBraceLoc = Catch->getCatchBody()->getLocStart();
+ ReplaceText(lBraceLoc, 1, Result);
+ AtRemoved = true;
+ }
+ }
+ }
+ if (!AtRemoved)
+ // @catch -> catch
+ ReplaceText(startLoc, 1, "");
+
+ }
+ if (finalStmt) {
+ buf.clear();
+ if (noCatch)
+ buf = "catch (id e) {_rethrow = e;}\n";
+ else
+ buf = "}\ncatch (id e) {_rethrow = e;}\n";
+
+ SourceLocation startFinalLoc = finalStmt->getLocStart();
+ ReplaceText(startFinalLoc, 8, buf);
+ Stmt *body = finalStmt->getFinallyBody();
+ SourceLocation startFinalBodyLoc = body->getLocStart();
+ buf.clear();
+ Write_RethrowObject(buf);
+ ReplaceText(startFinalBodyLoc, 1, buf);
+
+ SourceLocation endFinalBodyLoc = body->getLocEnd();
+ ReplaceText(endFinalBodyLoc, 1, "}\n}");
+ // Now check for any return/continue/go statements within the @try.
+ WarnAboutReturnGotoStmts(S->getTryBody());
+ }
+
+ return 0;
+}
+
+// This can't be done with ReplaceStmt(S, ThrowExpr), since
+// the throw expression is typically a message expression that's already
+// been rewritten! (which implies the SourceLocation's are invalid).
+Stmt *RewriteModernObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @throw location");
+
+ std::string buf;
+ /* void objc_exception_throw(id) __attribute__((noreturn)); */
+ if (S->getThrowExpr())
+ buf = "objc_exception_throw(";
+ else
+ buf = "throw";
+
+ // handle "@ throw" correctly.
+ const char *wBuf = strchr(startBuf, 'w');
+ assert((*wBuf == 'w') && "@throw: can't find 'w'");
+ ReplaceText(startLoc, wBuf-startBuf+1, buf);
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@throw: can't find ';'");
+ SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf);
+ if (S->getThrowExpr())
+ ReplaceText(semiLoc, 1, ");");
+ return 0;
+}
+
+Stmt *RewriteModernObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
+ // Create a new string expression.
+ QualType StrType = Context->getPointerType(Context->CharTy);
+ std::string StrEncoding;
+ Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
+ Expr *Replacement = StringLiteral::Create(*Context, StrEncoding,
+ StringLiteral::Ascii, false,
+ StrType, SourceLocation());
+ ReplaceStmt(Exp, Replacement);
+
+ // Replace this subexpr in the parent.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return Replacement;
+}
+
+Stmt *RewriteModernObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
+ // Create a call to sel_registerName("selName").
+ SmallVector<Expr*, 8> SelExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ Exp->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size());
+ ReplaceStmt(Exp, SelExp);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return SelExp;
+}
+
+CallExpr *RewriteModernObjC::SynthesizeCallToFunctionDecl(
+ FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = FD->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE =
+ new (Context) DeclRefExpr(FD, false, msgSendType, VK_LValue, SourceLocation());
+
+ // Now, we cast the reference to a pointer to the objc_msgSend type.
+ QualType pToFunc = Context->getPointerType(msgSendType);
+ ImplicitCastExpr *ICE =
+ ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
+ DRE, 0, VK_RValue);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+
+ CallExpr *Exp =
+ new (Context) CallExpr(*Context, ICE, args, nargs,
+ FT->getCallResultType(*Context),
+ VK_RValue, EndLoc);
+ return Exp;
+}
+
+static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
+ const char *&startRef, const char *&endRef) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '<')
+ startRef = startBuf; // mark the start.
+ if (*startBuf == '>') {
+ if (startRef && *startRef == '<') {
+ endRef = startBuf; // mark the end.
+ return true;
+ }
+ return false;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+static void scanToNextArgument(const char *&argRef) {
+ int angle = 0;
+ while (*argRef != ')' && (*argRef != ',' || angle > 0)) {
+ if (*argRef == '<')
+ angle++;
+ else if (*argRef == '>')
+ angle--;
+ argRef++;
+ }
+ assert(angle == 0 && "scanToNextArgument - bad protocol type syntax");
+}
+
+bool RewriteModernObjC::needToScanForQualifiers(QualType T) {
+ if (T->isObjCQualifiedIdType())
+ return true;
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ if (PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ if (T->isObjCObjectPointerType()) {
+ T = T->getPointeeType();
+ return T->isObjCQualifiedInterfaceType();
+ }
+ if (T->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(T);
+ return needToScanForQualifiers(ElemTy);
+ }
+ return false;
+}
+
+void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
+ QualType Type = E->getType();
+ if (needToScanForQualifiers(Type)) {
+ SourceLocation Loc, EndLoc;
+
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E)) {
+ Loc = ECE->getLParenLoc();
+ EndLoc = ECE->getRParenLoc();
+ } else {
+ Loc = E->getLocStart();
+ EndLoc = E->getLocEnd();
+ }
+ // This will defend against trying to rewrite synthesized expressions.
+ if (Loc.isInvalid() || EndLoc.isInvalid())
+ return;
+
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *endBuf = SM->getCharacterData(EndLoc);
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) {
+ SourceLocation Loc;
+ QualType Type;
+ const FunctionProtoType *proto = 0;
+ if (VarDecl *VD = dyn_cast<VarDecl>(Dcl)) {
+ Loc = VD->getLocation();
+ Type = VD->getType();
+ }
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ // Check for ObjC 'id' and class types that have been adorned with protocol
+ // information (id<p>, C<p>*). The protocol references need to be rewritten!
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ assert(funcType && "missing function type");
+ proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ Type = proto->getResultType();
+ }
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ Type = FD->getType();
+ }
+ else
+ return;
+
+ if (needToScanForQualifiers(Type)) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = SM->getCharacterData(Loc);
+ const char *startBuf = endBuf;
+ while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart)
+ startBuf--; // scan backward (from the decl location) for return type.
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+ if (!proto)
+ return; // most likely, was a variable
+ // Now check arguments.
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *startFuncBuf = startBuf;
+ for (unsigned i = 0; i < proto->getNumArgs(); i++) {
+ if (needToScanForQualifiers(proto->getArgType(i))) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = startBuf;
+ // scan forward (from the decl location) for argument types.
+ scanToNextArgument(endBuf);
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc =
+ Loc.getLocWithOffset(startRef-startFuncBuf);
+ SourceLocation GreaterLoc =
+ Loc.getLocWithOffset(endRef-startFuncBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ startBuf = ++endBuf;
+ }
+ else {
+ // If the function name is derived from a macro expansion, then the
+ // argument buffer will not follow the name. Need to speak with Chris.
+ while (*startBuf && *startBuf != ')' && *startBuf != ',')
+ startBuf++; // scan forward (from the decl location) for argument types.
+ startBuf++;
+ }
+ }
+}
+
+void RewriteModernObjC::RewriteTypeOfDecl(VarDecl *ND) {
+ QualType QT = ND->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (!isa<TypeOfExprType>(TypePtr))
+ return;
+ while (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ TypePtr = QT->getAs<Type>();
+ }
+ // FIXME. This will not work for multiple declarators; as in:
+ // __typeof__(a) b,c,d;
+ std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy()));
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ if (ND->getInit()) {
+ std::string Name(ND->getNameAsString());
+ TypeAsString += " " + Name + " = ";
+ Expr *E = ND->getInit();
+ SourceLocation startLoc;
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ const char *endBuf = SM->getCharacterData(startLoc);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+ else {
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+}
+
+// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str);
+void RewriteModernObjC::SynthSelGetUidFunctionDecl() {
+ IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getFuncType =
+ getSimpleFunctionType(Context->getObjCSelType(), &ArgTys[0], ArgTys.size());
+ SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ SelGetUidIdent, getFuncType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+void RewriteModernObjC::RewriteFunctionDecl(FunctionDecl *FD) {
+ // declared in <objc/objc.h>
+ if (FD->getIdentifier() &&
+ FD->getName() == "sel_registerName") {
+ SelGetUidFunctionDecl = FD;
+ return;
+ }
+ RewriteObjCQualifiedInterfaceTypes(FD);
+}
+
+void RewriteModernObjC::RewriteBlockPointerType(std::string& Str, QualType Type) {
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ if (!strchr(argPtr, '^')) {
+ Str += TypeString;
+ return;
+ }
+ while (*argPtr) {
+ Str += (*argPtr == '^' ? '*' : *argPtr);
+ argPtr++;
+ }
+}
+
+// FIXME. Consolidate this routine with RewriteBlockPointerType.
+void RewriteModernObjC::RewriteBlockPointerTypeVariable(std::string& Str,
+ ValueDecl *VD) {
+ QualType Type = VD->getType();
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ int paren = 0;
+ while (*argPtr) {
+ switch (*argPtr) {
+ case '(':
+ Str += *argPtr;
+ paren++;
+ break;
+ case ')':
+ Str += *argPtr;
+ paren--;
+ break;
+ case '^':
+ Str += '*';
+ if (paren == 1)
+ Str += VD->getNameAsString();
+ break;
+ default:
+ Str += *argPtr;
+ break;
+ }
+ argPtr++;
+ }
+}
+
+
+void RewriteModernObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ QualType Type = proto->getResultType();
+ std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
+ FdStr += " ";
+ FdStr += FD->getName();
+ FdStr += "(";
+ unsigned numArgs = proto->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ QualType ArgType = proto->getArgType(i);
+ RewriteBlockPointerType(FdStr, ArgType);
+ if (i+1 < numArgs)
+ FdStr += ", ";
+ }
+ FdStr += ");\n";
+ InsertText(FunLocStart, FdStr);
+ CurFunctionDeclToDeclareForBlock = 0;
+}
+
+// SynthSuperContructorFunctionDecl - id objc_super(id obj, id super);
+void RewriteModernObjC::SynthSuperContructorFunctionDecl() {
+ if (SuperContructorFunctionDecl)
+ return;
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(struct objc_super *, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendSuperFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
+ SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendSuperStretFunctionDecl -
+// id objc_msgSendSuper_stret(struct objc_super *, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendSuperStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent =
+ &Context->Idents.get("objc_msgSendSuper_stret");
+ SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...);
+void RewriteModernObjC::SynthMsgSendFpretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthGetClassFunctionDecl - id objc_getClass(const char *name);
+void RewriteModernObjC::SynthGetClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls);
+void RewriteModernObjC::SynthGetSuperClassFunctionDecl() {
+ IdentifierInfo *getSuperClassIdent =
+ &Context->Idents.get("class_getSuperclass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getObjCClassType());
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
+ &ArgTys[0], ArgTys.size());
+ GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getSuperClassIdent,
+ getClassType, 0,
+ SC_Extern,
+ SC_None,
+ false);
+}
+
+// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name);
+void RewriteModernObjC::SynthGetMetaClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+Stmt *RewriteModernObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
+ QualType strType = getConstantStringStructType();
+
+ std::string S = "__NSConstantStringImpl_";
+
+ std::string tmpName = InFileName;
+ unsigned i;
+ for (i=0; i < tmpName.length(); i++) {
+ char c = tmpName.at(i);
+ // replace any non alphanumeric characters with '_'.
+ if (!isalpha(c) && (c < '0' || c > '9'))
+ tmpName[i] = '_';
+ }
+ S += tmpName;
+ S += "_";
+ S += utostr(NumObjCStringLiterals++);
+
+ Preamble += "static __NSConstantStringImpl " + S;
+ Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,";
+ Preamble += "0x000007c8,"; // utf8_str
+ // The pretty printer for StringLiteral handles escape characters properly.
+ std::string prettyBufS;
+ llvm::raw_string_ostream prettyBuf(prettyBufS);
+ Exp->getString()->printPretty(prettyBuf, *Context, 0,
+ PrintingPolicy(LangOpts));
+ Preamble += prettyBuf.str();
+ Preamble += ",";
+ Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(S),
+ strType, 0, SC_Static, SC_None);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
+ SourceLocation());
+ Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ // cast to NSConstantString *
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
+ CK_CPointerToObjCPointerCast, Unop);
+ ReplaceStmt(Exp, cast);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return cast;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCBoolLiteralExpr(ObjCBoolLiteralExpr *Exp) {
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+
+ Expr *FlagExp = IntegerLiteral::Create(*Context,
+ llvm::APInt(IntSize, Exp->getValue()),
+ Context->IntTy, Exp->getLocation());
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Context->ObjCBuiltinBoolTy,
+ CK_BitCast, FlagExp);
+ ParenExpr *PE = new (Context) ParenExpr(Exp->getLocation(), Exp->getExprLoc(),
+ cast);
+ ReplaceStmt(Exp, PE);
+ return PE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCNumericLiteralExpr(ObjCNumericLiteral *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 4> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ QualType expType = Exp->getType();
+
+ // Create a call to objc_getClass("NSNumber"). It will be th 1st argument.
+ ObjCInterfaceDecl *Class =
+ expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("numberWithBool:"), etc.
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ ObjCMethodDecl *NumericMethod = Exp->getObjCNumericLiteralMethod();
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ NumericMethod->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // User provided numeric literal is the 3rd, and last, argument.
+ Expr *userExpr = Exp->getNumber();
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ QualType type = ICE->getType();
+ const Expr *SubExpr = ICE->IgnoreParenImpCasts();
+ CastKind CK = CK_BitCast;
+ if (SubExpr->getType()->isIntegralType(*Context) && type->isBooleanType())
+ CK = CK_IntegralToBoolean;
+ userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
+ }
+ MsgExprs.push_back(userExpr);
+
+ SmallVector<QualType, 4> ArgTypes;
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (ObjCMethodDecl::param_iterator PI = NumericMethod->param_begin(),
+ E = NumericMethod->param_end(); PI != E; ++PI)
+ ArgTypes.push_back((*PI)->getType());
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ NumericMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCArrayLiteralExpr(ObjCArrayLiteral *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Build the expression: __NSContainer_literal(int, ...).arr
+ QualType IntQT = Context->IntTy;
+ QualType NSArrayFType =
+ getSimpleFunctionType(Context->VoidTy, &IntQT, 1, true);
+ std::string NSArrayFName("__NSContainer_literal");
+ FunctionDecl *NSArrayFD = SynthBlockInitFunctionDecl(NSArrayFName);
+ DeclRefExpr *NSArrayDRE =
+ new (Context) DeclRefExpr(NSArrayFD, false, NSArrayFType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 16> InitExprs;
+ unsigned NumElements = Exp->getNumElements();
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *count = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ InitExprs.push_back(count);
+ for (unsigned i = 0; i < NumElements; i++)
+ InitExprs.push_back(Exp->getElement(i));
+ Expr *NSArrayCallExpr =
+ new (Context) CallExpr(*Context, NSArrayDRE, &InitExprs[0], InitExprs.size(),
+ NSArrayFType, VK_LValue, SourceLocation());
+
+ FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("arr"),
+ Context->getPointerType(Context->VoidPtrTy), 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *ArrayLiteralME =
+ new (Context) MemberExpr(NSArrayCallExpr, false, ARRFD,
+ SourceLocation(),
+ ARRFD->getType(), VK_LValue,
+ OK_Ordinary);
+ QualType ConstIdT = Context->getObjCIdType().withConst();
+ CStyleCastExpr * ArrayLiteralObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ ArrayLiteralME);
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 32> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ QualType expType = Exp->getType();
+
+ // Create a call to objc_getClass("NSArray"). It will be th 1st argument.
+ ObjCInterfaceDecl *Class =
+ expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("arrayWithObjects:count:").
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ ObjCMethodDecl *ArrayMethod = Exp->getArrayWithObjectsMethod();
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ ArrayMethod->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // (const id [])objects
+ MsgExprs.push_back(ArrayLiteralObjects);
+
+ // (NSUInteger)cnt
+ Expr *cnt = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ MsgExprs.push_back(cnt);
+
+
+ SmallVector<QualType, 4> ArgTypes;
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (ObjCMethodDecl::param_iterator PI = ArrayMethod->param_begin(),
+ E = ArrayMethod->param_end(); PI != E; ++PI)
+ ArgTypes.push_back((*PI)->getType());
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ ArrayMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+Stmt *RewriteModernObjC::RewriteObjCDictionaryLiteralExpr(ObjCDictionaryLiteral *Exp) {
+ // synthesize declaration of helper functions needed in this routine.
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ // use objc_msgSend() for all.
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ SourceLocation StartLoc = Exp->getLocStart();
+ SourceLocation EndLoc = Exp->getLocEnd();
+
+ // Build the expression: __NSContainer_literal(int, ...).arr
+ QualType IntQT = Context->IntTy;
+ QualType NSDictFType =
+ getSimpleFunctionType(Context->VoidTy, &IntQT, 1, true);
+ std::string NSDictFName("__NSContainer_literal");
+ FunctionDecl *NSDictFD = SynthBlockInitFunctionDecl(NSDictFName);
+ DeclRefExpr *NSDictDRE =
+ new (Context) DeclRefExpr(NSDictFD, false, NSDictFType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 16> KeyExprs;
+ SmallVector<Expr*, 16> ValueExprs;
+
+ unsigned NumElements = Exp->getNumElements();
+ unsigned UnsignedIntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->UnsignedIntTy));
+ Expr *count = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ KeyExprs.push_back(count);
+ ValueExprs.push_back(count);
+ for (unsigned i = 0; i < NumElements; i++) {
+ ObjCDictionaryElement Element = Exp->getKeyValueElement(i);
+ KeyExprs.push_back(Element.Key);
+ ValueExprs.push_back(Element.Value);
+ }
+
+ // (const id [])objects
+ Expr *NSValueCallExpr =
+ new (Context) CallExpr(*Context, NSDictDRE, &ValueExprs[0], ValueExprs.size(),
+ NSDictFType, VK_LValue, SourceLocation());
+
+ FieldDecl *ARRFD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("arr"),
+ Context->getPointerType(Context->VoidPtrTy), 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *DictLiteralValueME =
+ new (Context) MemberExpr(NSValueCallExpr, false, ARRFD,
+ SourceLocation(),
+ ARRFD->getType(), VK_LValue,
+ OK_Ordinary);
+ QualType ConstIdT = Context->getObjCIdType().withConst();
+ CStyleCastExpr * DictValueObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ DictLiteralValueME);
+ // (const id <NSCopying> [])keys
+ Expr *NSKeyCallExpr =
+ new (Context) CallExpr(*Context, NSDictDRE, &KeyExprs[0], KeyExprs.size(),
+ NSDictFType, VK_LValue, SourceLocation());
+
+ MemberExpr *DictLiteralKeyME =
+ new (Context) MemberExpr(NSKeyCallExpr, false, ARRFD,
+ SourceLocation(),
+ ARRFD->getType(), VK_LValue,
+ OK_Ordinary);
+
+ CStyleCastExpr * DictKeyObjects =
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(ConstIdT),
+ CK_BitCast,
+ DictLiteralKeyME);
+
+
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 32> MsgExprs;
+ SmallVector<Expr*, 4> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ QualType expType = Exp->getType();
+
+ // Create a call to objc_getClass("NSArray"). It will be th 1st argument.
+ ObjCInterfaceDecl *Class =
+ expType->getPointeeType()->getAs<ObjCObjectType>()->getInterface();
+
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+
+ // Create a call to sel_registerName("arrayWithObjects:count:").
+ // it will be the 2nd argument.
+ SmallVector<Expr*, 4> SelExprs;
+ ObjCMethodDecl *DictMethod = Exp->getDictWithObjectsMethod();
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ DictMethod->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // (const id [])objects
+ MsgExprs.push_back(DictValueObjects);
+
+ // (const id <NSCopying> [])keys
+ MsgExprs.push_back(DictKeyObjects);
+
+ // (NSUInteger)cnt
+ Expr *cnt = IntegerLiteral::Create(*Context,
+ llvm::APInt(UnsignedIntSize, NumElements),
+ Context->UnsignedIntTy, SourceLocation());
+ MsgExprs.push_back(cnt);
+
+
+ SmallVector<QualType, 8> ArgTypes;
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ for (ObjCMethodDecl::param_iterator PI = DictMethod->param_begin(),
+ E = DictMethod->param_end(); PI != E; ++PI) {
+ QualType T = (*PI)->getType();
+ if (const PointerType* PT = T->getAs<PointerType>()) {
+ QualType PointeeTy = PT->getPointeeType();
+ convertToUnqualifiedObjCType(PointeeTy);
+ T = Context->getPointerType(PointeeTy);
+ }
+ ArgTypes.push_back(T);
+ }
+
+ QualType returnType = Exp->getType();
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ DictMethod->isVariadic());
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ ReplaceStmt(Exp, CE);
+ return CE;
+}
+
+// struct objc_super { struct objc_object *receiver; struct objc_class *super; };
+QualType RewriteModernObjC::getSuperStructType() {
+ if (!SuperStructDecl) {
+ SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType FieldTypes[2];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // struct objc_class *super;
+ FieldTypes[1] = Context->getObjCClassType();
+
+ // Create fields
+ for (unsigned i = 0; i < 2; ++i) {
+ SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], 0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false));
+ }
+
+ SuperStructDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(SuperStructDecl);
+}
+
+QualType RewriteModernObjC::getConstantStringStructType() {
+ if (!ConstantStringDecl) {
+ ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__NSConstantStringImpl"));
+ QualType FieldTypes[4];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // int flags;
+ FieldTypes[1] = Context->IntTy;
+ // char *str;
+ FieldTypes[2] = Context->getPointerType(Context->CharTy);
+ // long length;
+ FieldTypes[3] = Context->LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ ConstantStringDecl->addDecl(FieldDecl::Create(*Context,
+ ConstantStringDecl,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], 0,
+ /*BitWidth=*/0,
+ /*Mutable=*/true,
+ /*HasInit=*/false));
+ }
+
+ ConstantStringDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(ConstantStringDecl);
+}
+
+Stmt *RewriteModernObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!MsgSendSuperFunctionDecl)
+ SynthMsgSendSuperFunctionDecl();
+ if (!MsgSendStretFunctionDecl)
+ SynthMsgSendStretFunctionDecl();
+ if (!MsgSendSuperStretFunctionDecl)
+ SynthMsgSendSuperStretFunctionDecl();
+ if (!MsgSendFpretFunctionDecl)
+ SynthMsgSendFpretFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+ if (!GetSuperClassFunctionDecl)
+ SynthGetSuperClassFunctionDecl();
+ if (!GetMetaClassFunctionDecl)
+ SynthGetMetaClassFunctionDecl();
+
+ // default to objc_msgSend().
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ // May need to use objc_msgSend_stret() as well.
+ FunctionDecl *MsgSendStretFlavor = 0;
+ if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
+ QualType resultType = mDecl->getResultType();
+ if (resultType->isRecordType())
+ MsgSendStretFlavor = MsgSendStretFunctionDecl;
+ else if (resultType->isRealFloatingType())
+ MsgSendFlavor = MsgSendFpretFunctionDecl;
+ }
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 8> MsgExprs;
+ switch (Exp->getReceiverKind()) {
+ case ObjCMessageExpr::SuperClass: {
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // set the receiver to self, the first argument to all methods.
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue,
+ SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ ClassDecl->getIdentifier()->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc,
+ EndLoc);
+ // (Class)objc_getClass("CurrentClass")
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCClassType(),
+ CK_BitCast, Cls);
+ ClsExprs.clear();
+ ClsExprs.push_back(ArgExpr);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
+ &ClsExprs[0], ClsExprs.size(),
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back( // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperContructorFunctionDecl();
+ // Simulate a contructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
+ InitExprs.size(),
+ superType, VK_LValue,
+ SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(),
+ &InitExprs[0], InitExprs.size(),
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_LValue,
+ ILE, false);
+ // struct objc_super *
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Class: {
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ObjCInterfaceDecl *Class
+ = Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:{
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+ SmallVector<Expr*, 4> InitExprs;
+
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue, SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ ClassDecl->getIdentifier()->getName(),
+ StringLiteral::Ascii, false, argType,
+ SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ // (Class)objc_getClass("CurrentClass")
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCClassType(),
+ CK_BitCast, Cls);
+ ClsExprs.clear();
+ ClsExprs.push_back(ArgExpr);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
+ &ClsExprs[0], ClsExprs.size(),
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back(
+ // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperContructorFunctionDecl();
+ // Simulate a contructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
+ InitExprs.size(),
+ superType, VK_LValue, SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(),
+ &InitExprs[0], InitExprs.size(),
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_RValue, ILE,
+ false);
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Instance: {
+ // Remove all type-casts because it may contain objc-style types; e.g.
+ // Foo<Proto> *.
+ Expr *recExpr = Exp->getInstanceReceiver();
+ while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
+ recExpr = CE->getSubExpr();
+ CastKind CK = recExpr->getType()->isObjCObjectPointerType()
+ ? CK_BitCast : recExpr->getType()->isBlockPointerType()
+ ? CK_BlockPointerToObjCPointerCast
+ : CK_CPointerToObjCPointerCast;
+
+ recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, recExpr);
+ MsgExprs.push_back(recExpr);
+ break;
+ }
+ }
+
+ // Create a call to sel_registerName("selName"), it will be the 2nd argument.
+ SmallVector<Expr*, 8> SelExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ Exp->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc,
+ EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // Now push any user supplied arguments.
+ for (unsigned i = 0; i < Exp->getNumArgs(); i++) {
+ Expr *userExpr = Exp->getArg(i);
+ // Make all implicit casts explicit...ICE comes in handy:-)
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ // Reuse the ICE type, it is exactly what the doctor ordered.
+ QualType type = ICE->getType();
+ if (needToScanForQualifiers(type))
+ type = Context->getObjCIdType();
+ // Make sure we convert "type (^)(...)" to "type (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(type);
+ const Expr *SubExpr = ICE->IgnoreParenImpCasts();
+ CastKind CK;
+ if (SubExpr->getType()->isIntegralType(*Context) &&
+ type->isBooleanType()) {
+ CK = CK_IntegralToBoolean;
+ } else if (type->isObjCObjectPointerType()) {
+ if (SubExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (SubExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ } else {
+ CK = CK_BitCast;
+ }
+
+ userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
+ }
+ // Make id<P...> cast into an 'id' cast.
+ else if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(userExpr)) {
+ if (CE->getType()->isObjCQualifiedIdType()) {
+ while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
+ userExpr = CE->getSubExpr();
+ CastKind CK;
+ if (userExpr->getType()->isIntegralType(*Context)) {
+ CK = CK_IntegralToPointer;
+ } else if (userExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (userExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, userExpr);
+ }
+ }
+ MsgExprs.push_back(userExpr);
+ // We've transferred the ownership to MsgExprs. For now, we *don't* null
+ // out the argument in the original expression (since we aren't deleting
+ // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info.
+ //Exp->setArg(i, 0);
+ }
+ // Generate the funky cast.
+ CastExpr *cast;
+ SmallVector<QualType, 8> ArgTypes;
+ QualType returnType;
+
+ // Push 'id' and 'SEL', the 2 implicit arguments.
+ if (MsgSendFlavor == MsgSendSuperFunctionDecl)
+ ArgTypes.push_back(Context->getPointerType(getSuperStructType()));
+ else
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
+ // Push any user argument types.
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ QualType t = (*PI)->getType()->isObjCQualifiedIdType()
+ ? Context->getObjCIdType()
+ : (*PI)->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(t);
+ ArgTypes.push_back(t);
+ }
+ returnType = Exp->getType();
+ convertToUnqualifiedObjCType(returnType);
+ (void)convertBlockPointerToFunctionPointer(returnType);
+ } else {
+ returnType = Context->getObjCIdType();
+ }
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
+ // If we don't do this cast, we get the following bizarre warning/note:
+ // xx.m:13: warning: function called through a non-compatible type
+ // xx.m:13: note: if this code is reached, the program will abort
+ cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ // If we don't have a method decl, force a variadic cast.
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ Stmt *ReplacingStmt = CE;
+ if (MsgSendStretFlavor) {
+ // We have the method which returns a struct/union. Must also generate
+ // call to objc_msgSend_stret and hang both varieties on a conditional
+ // expression which dictate which one to envoke depending on size of
+ // method's return type.
+
+ // Create a reference to the objc_msgSend_stret() declaration.
+ DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
+ false, msgSendType,
+ VK_LValue, SourceLocation());
+ // Need to cast objc_msgSend_stret to "void *" (see above comment).
+ cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, STDRE);
+ // Now do the "normal" pointer to function cast.
+ castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
+
+ FT = msgSendType->getAs<FunctionType>();
+ CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ SourceLocation());
+
+ // Build sizeof(returnType)
+ UnaryExprOrTypeTraitExpr *sizeofExpr =
+ new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf,
+ Context->getTrivialTypeSourceInfo(returnType),
+ Context->getSizeType(), SourceLocation(),
+ SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases.
+ // For X86 it is more complicated and some kind of target specific routine
+ // is needed to decide what to do.
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ IntegerLiteral *limit = IntegerLiteral::Create(*Context,
+ llvm::APInt(IntSize, 8),
+ Context->IntTy,
+ SourceLocation());
+ BinaryOperator *lessThanExpr =
+ new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
+ VK_RValue, OK_Ordinary, SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(lessThanExpr,
+ SourceLocation(), CE,
+ SourceLocation(), STCE,
+ returnType, VK_RValue, OK_Ordinary);
+ ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ CondExpr);
+ }
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+Stmt *RewriteModernObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
+ Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
+ Exp->getLocEnd());
+
+ // Now do the actual rewrite.
+ ReplaceStmt(Exp, ReplacingStmt);
+
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+// typedef struct objc_object Protocol;
+QualType RewriteModernObjC::getProtocolType() {
+ if (!ProtocolTypeDecl) {
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(Context->getObjCIdType());
+ ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("Protocol"),
+ TInfo);
+ }
+ return Context->getTypeDeclType(ProtocolTypeDecl);
+}
+
+/// RewriteObjCProtocolExpr - Rewrite a protocol expression into
+/// a synthesized/forward data reference (to the protocol's metadata).
+/// The forward references (and metadata) are generated in
+/// RewriteModernObjC::HandleTranslationUnit().
+Stmt *RewriteModernObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
+ std::string Name = "_OBJC_PROTOCOL_REFERENCE_$_" +
+ Exp->getProtocol()->getNameAsString();
+ IdentifierInfo *ID = &Context->Idents.get(Name);
+ VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, getProtocolType(), 0,
+ SC_Extern, SC_None);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
+ VK_LValue, SourceLocation());
+ Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
+ CK_BitCast,
+ DerefExpr);
+ ReplaceStmt(Exp, castExpr);
+ ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return castExpr;
+
+}
+
+bool RewriteModernObjC::BufferContainsPPDirectives(const char *startBuf,
+ const char *endBuf) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '#') {
+ // Skip whitespace.
+ for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf)
+ ;
+ if (!strncmp(startBuf, "if", strlen("if")) ||
+ !strncmp(startBuf, "ifdef", strlen("ifdef")) ||
+ !strncmp(startBuf, "ifndef", strlen("ifndef")) ||
+ !strncmp(startBuf, "define", strlen("define")) ||
+ !strncmp(startBuf, "undef", strlen("undef")) ||
+ !strncmp(startBuf, "else", strlen("else")) ||
+ !strncmp(startBuf, "elif", strlen("elif")) ||
+ !strncmp(startBuf, "endif", strlen("endif")) ||
+ !strncmp(startBuf, "pragma", strlen("pragma")) ||
+ !strncmp(startBuf, "include", strlen("include")) ||
+ !strncmp(startBuf, "import", strlen("import")) ||
+ !strncmp(startBuf, "include_next", strlen("include_next")))
+ return true;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+/// RewriteObjCFieldDeclType - This routine rewrites a type into the buffer.
+/// It handles elaborated types, as well as enum types in the process.
+bool RewriteModernObjC::RewriteObjCFieldDeclType(QualType &Type,
+ std::string &Result) {
+ if (Type->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(Type);
+ return RewriteObjCFieldDeclType(ElemTy, Result);
+ }
+ else if (Type->isRecordType()) {
+ RecordDecl *RD = Type->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition()) {
+ if (RD->isStruct())
+ Result += "\n\tstruct ";
+ else if (RD->isUnion())
+ Result += "\n\tunion ";
+ else
+ assert(false && "class not allowed as an ivar type");
+
+ Result += RD->getName();
+ if (TagsDefinedInIvarDecls.count(RD)) {
+ // This struct is already defined. Do not write its definition again.
+ Result += " ";
+ return true;
+ }
+ TagsDefinedInIvarDecls.insert(RD);
+ Result += " {\n";
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i) {
+ FieldDecl *FD = *i;
+ RewriteObjCFieldDecl(FD, Result);
+ }
+ Result += "\t} ";
+ return true;
+ }
+ }
+ else if (Type->isEnumeralType()) {
+ EnumDecl *ED = Type->getAs<EnumType>()->getDecl();
+ if (ED->isCompleteDefinition()) {
+ Result += "\n\tenum ";
+ Result += ED->getName();
+ if (TagsDefinedInIvarDecls.count(ED)) {
+ // This enum is already defined. Do not write its definition again.
+ Result += " ";
+ return true;
+ }
+ TagsDefinedInIvarDecls.insert(ED);
+
+ Result += " {\n";
+ for (EnumDecl::enumerator_iterator EC = ED->enumerator_begin(),
+ ECEnd = ED->enumerator_end(); EC != ECEnd; ++EC) {
+ Result += "\t"; Result += EC->getName(); Result += " = ";
+ llvm::APSInt Val = EC->getInitVal();
+ Result += Val.toString(10);
+ Result += ",\n";
+ }
+ Result += "\t} ";
+ return true;
+ }
+ }
+
+ Result += "\t";
+ convertObjCTypeToCStyleType(Type);
+ return false;
+}
+
+
+/// RewriteObjCFieldDecl - This routine rewrites a field into the buffer.
+/// It handles elaborated types, as well as enum types in the process.
+void RewriteModernObjC::RewriteObjCFieldDecl(FieldDecl *fieldDecl,
+ std::string &Result) {
+ QualType Type = fieldDecl->getType();
+ std::string Name = fieldDecl->getNameAsString();
+
+ bool EleboratedType = RewriteObjCFieldDeclType(Type, Result);
+ if (!EleboratedType)
+ Type.getAsStringInternal(Name, Context->getPrintingPolicy());
+ Result += Name;
+ if (fieldDecl->isBitField()) {
+ Result += " : "; Result += utostr(fieldDecl->getBitWidthValue(*Context));
+ }
+ else if (EleboratedType && Type->isArrayType()) {
+ CanQualType CType = Context->getCanonicalType(Type);
+ while (isa<ArrayType>(CType)) {
+ if (const ConstantArrayType *CAT = Context->getAsConstantArrayType(CType)) {
+ Result += "[";
+ llvm::APInt Dim = CAT->getSize();
+ Result += utostr(Dim.getZExtValue());
+ Result += "]";
+ }
+ CType = CType->getAs<ArrayType>()->getElementType();
+ }
+ }
+
+ Result += ";\n";
+}
+
+/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
+/// an objective-c class with ivars.
+void RewriteModernObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
+ assert(CDecl->getName() != "" &&
+ "Name missing in SynthesizeObjCInternalStruct");
+ ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+ for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar())
+ IVars.push_back(IVD);
+
+ SourceLocation LocStart = CDecl->getLocStart();
+ SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ // If no ivars and no root or if its root, directly or indirectly,
+ // have no ivars (thus not synthesized) then no need to synthesize this class.
+ if ((!CDecl->isThisDeclarationADefinition() || IVars.size() == 0) &&
+ (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ return;
+ }
+
+ Result += "\nstruct ";
+ Result += CDecl->getNameAsString();
+ Result += "_IMPL {\n";
+
+ if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) {
+ Result += "\tstruct "; Result += RCDecl->getNameAsString();
+ Result += "_IMPL "; Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n";
+ }
+ TagsDefinedInIvarDecls.clear();
+ for (unsigned i = 0, e = IVars.size(); i < e; i++)
+ RewriteObjCFieldDecl(IVars[i], Result);
+
+ Result += "};\n";
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ // Mark this struct as having been generated.
+ if (!ObjCSynthesizedStructs.insert(CDecl))
+ llvm_unreachable("struct already synthesize- RewriteObjCInternalStruct");
+}
+
+static void WriteInternalIvarName(ObjCInterfaceDecl *IDecl,
+ ObjCIvarDecl *IvarDecl, std::string &Result) {
+ Result += "OBJC_IVAR_$_";
+ Result += IDecl->getName();
+ Result += "$";
+ Result += IvarDecl->getName();
+}
+
+/// RewriteIvarOffsetSymbols - Rewrite ivar offset symbols of those ivars which
+/// have been referenced in an ivar access expression.
+void RewriteModernObjC::RewriteIvarOffsetSymbols(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ // write out ivar offset symbols which have been referenced in an ivar
+ // access expression.
+ llvm::SmallPtrSet<ObjCIvarDecl *, 8> Ivars = ReferencedIvars[CDecl];
+ if (Ivars.empty())
+ return;
+ for (llvm::SmallPtrSet<ObjCIvarDecl *, 8>::iterator i = Ivars.begin(),
+ e = Ivars.end(); i != e; i++) {
+ ObjCIvarDecl *IvarDecl = (*i);
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_ivar$B\")) ";
+ Result += "extern \"C\" ";
+ if (LangOpts.MicrosoftExt &&
+ IvarDecl->getAccessControl() != ObjCIvarDecl::Private &&
+ IvarDecl->getAccessControl() != ObjCIvarDecl::Package)
+ Result += "__declspec(dllimport) ";
+
+ Result += "unsigned long ";
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += ";";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Meta Data Emission
+//===----------------------------------------------------------------------===//
+
+
+/// RewriteImplementations - This routine rewrites all method implementations
+/// and emits meta-data.
+
+void RewriteModernObjC::RewriteImplementations() {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // Rewrite implemented methods
+ for (int i = 0; i < ClsDefCount; i++) {
+ ObjCImplementationDecl *OIMP = ClassImplementation[i];
+ ObjCInterfaceDecl *CDecl = OIMP->getClassInterface();
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+ RewriteImplementationDecl(OIMP);
+ }
+
+ for (int i = 0; i < CatDefCount; i++) {
+ ObjCCategoryImplDecl *CIMP = CategoryImplementation[i];
+ ObjCInterfaceDecl *CDecl = CIMP->getClassInterface();
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+ RewriteImplementationDecl(CIMP);
+ }
+}
+
+void RewriteModernObjC::RewriteByRefString(std::string &ResultStr,
+ const std::string &Name,
+ ValueDecl *VD, bool def) {
+ assert(BlockByRefDeclNo.count(VD) &&
+ "RewriteByRefString: ByRef decl missing");
+ if (def)
+ ResultStr += "struct ";
+ ResultStr += "__Block_byref_" + Name +
+ "_" + utostr(BlockByRefDeclNo[VD]) ;
+}
+
+static bool HasLocalVariableExternalStorage(ValueDecl *VD) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage());
+ return false;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ const FunctionType *AFT = CE->getFunctionType();
+ QualType RT = AFT->getResultType();
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
+ funcName.str() + "_block_func_" + utostr(i);
+
+ BlockDecl *BD = CE->getBlockDecl();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ // No user-supplied arguments. Still need to pass in a pointer to the
+ // block (to reference imported block decl refs).
+ S += "(" + StructRef + " *__cself)";
+ } else if (BD->param_empty()) {
+ S += "(" + StructRef + " *__cself)";
+ } else {
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ assert(FT && "SynthesizeBlockFunc: No function proto");
+ S += '(';
+ // first add the implicit argument.
+ S += StructRef + " *__cself, ";
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) S += ", ";
+ ParamStr = (*AI)->getNameAsString();
+ QualType QT = (*AI)->getType();
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
+ S += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) S += ", ";
+ S += "...";
+ }
+ S += ')';
+ }
+ S += " {\n";
+
+ // Create local declarations to avoid rewriting all closure decl ref exprs.
+ // First, emit a declaration for all "by ref" decls.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ std::string TypeString;
+ RewriteByRefString(TypeString, Name, (*I));
+ TypeString += " *";
+ Name = TypeString + Name;
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
+ }
+ // Next, emit a declaration for all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedClosure)(void);
+ // myImportedClosure = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherClosure)(void);
+ // anotherClosure = ^(void) {
+ // myImportedClosure(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ RewriteBlockPointerTypeVariable(S, (*I));
+ S += " = (";
+ RewriteBlockPointerType(S, (*I)->getType());
+ S += ")";
+ S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ else {
+ std::string Name = (*I)->getNameAsString();
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ S += Name + " = __cself->" +
+ (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ }
+ std::string RewrittenStr = RewrittenBlockExprs[CE];
+ const char *cstr = RewrittenStr.c_str();
+ while (*cstr++ != '{') ;
+ S += cstr;
+ S += "\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static void __";
+
+ S += funcName;
+ S += "_block_copy_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*dst, " + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ ValueDecl *VD = (*I);
+ S += "_Block_object_assign((void*)&dst->";
+ S += (*I)->getNameAsString();
+ S += ", (void*)src->";
+ S += (*I)->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count((*I)))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+
+ S += "\nstatic void __";
+ S += funcName;
+ S += "_block_dispose_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ ValueDecl *VD = (*I);
+ S += "_Block_object_dispose((void*)src->";
+ S += (*I)->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count((*I)))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ std::string Desc) {
+ std::string S = "\nstruct " + Tag;
+ std::string Constructor = " " + Tag;
+
+ S += " {\n struct __block_impl impl;\n";
+ S += " struct " + Desc;
+ S += "* Desc;\n";
+
+ Constructor += "(void *fp, "; // Invoke function pointer.
+ Constructor += "struct " + Desc; // Descriptor pointer.
+ Constructor += " *desc";
+
+ if (BlockDeclRefs.size()) {
+ // Output all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(FieldName, Context->getPrintingPolicy());
+ QT.getAsStringInternal(ArgName, Context->getPrintingPolicy());
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + ";\n";
+ }
+ // Output all "by ref" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ {
+ std::string TypeString;
+ RewriteByRefString(TypeString, FieldName, (*I));
+ TypeString += " *";
+ FieldName = TypeString + FieldName;
+ ArgName = TypeString + ArgName;
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + "; // by ref\n";
+ }
+ // Finish writing the constructor.
+ Constructor += ", int flags=0)";
+ // Initialize all "by copy" arguments.
+ bool firsTime = true;
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ if (isTopLevelBlockPointerType((*I)->getType()))
+ Constructor += Name + "((struct __block_impl *)_" + Name + ")";
+ else
+ Constructor += Name + "(_" + Name + ")";
+ }
+ // Initialize all "by ref" arguments.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ Constructor += Name + "(_" + Name + "->__forwarding)";
+ }
+
+ Constructor += " {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+
+ Constructor += " Desc = desc;\n";
+ } else {
+ // Finish writing the constructor.
+ Constructor += ", int flags=0) {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+ Constructor += " Desc = desc;\n";
+ }
+ Constructor += " ";
+ Constructor += "}\n";
+ S += Constructor;
+ S += "};\n";
+ return S;
+}
+
+std::string RewriteModernObjC::SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag, int i,
+ StringRef FunName,
+ unsigned hasCopy) {
+ std::string S = "\nstatic struct " + DescTag;
+
+ S += " {\n unsigned long reserved;\n";
+ S += " unsigned long Block_size;\n";
+ if (hasCopy) {
+ S += " void (*copy)(struct ";
+ S += ImplTag; S += "*, struct ";
+ S += ImplTag; S += "*);\n";
+
+ S += " void (*dispose)(struct ";
+ S += ImplTag; S += "*);\n";
+ }
+ S += "} ";
+
+ S += DescTag + "_DATA = { 0, sizeof(struct ";
+ S += ImplTag + ")";
+ if (hasCopy) {
+ S += ", __" + FunName.str() + "_block_copy_" + utostr(i);
+ S += ", __" + FunName.str() + "_block_dispose_" + utostr(i);
+ }
+ S += "};\n";
+ return S;
+}
+
+void RewriteModernObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName) {
+ // Insert declaration for the function in which block literal is used.
+ if (CurFunctionDeclToDeclareForBlock && !Blocks.empty())
+ RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
+ bool RewriteSC = (GlobalVarDecl &&
+ !Blocks.empty() &&
+ GlobalVarDecl->getStorageClass() == SC_Static &&
+ GlobalVarDecl->getType().getCVRQualifiers());
+ if (RewriteSC) {
+ std::string SC(" void __");
+ SC += GlobalVarDecl->getNameAsString();
+ SC += "() {}";
+ InsertText(FunLocStart, SC);
+ }
+
+ // Insert closures that were part of the function.
+ for (unsigned i = 0, count=0; i < Blocks.size(); i++) {
+ CollectBlockDeclRefInfo(Blocks[i]);
+ // Need to copy-in the inner copied-in variables not actually used in this
+ // block.
+ for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
+ DeclRefExpr *Exp = InnerDeclRefs[count++];
+ ValueDecl *VD = Exp->getDecl();
+ BlockDeclRefs.push_back(Exp);
+ if (!VD->hasAttr<BlocksAttr>()) {
+ if (!BlockByCopyDeclsPtrSet.count(VD)) {
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ continue;
+ }
+
+ if (!BlockByRefDeclsPtrSet.count(VD)) {
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+
+ // imported objects in the inner blocks not used in the outer
+ // blocks must be copied/disposed in the outer block as well.
+ if (VD->getType()->isObjCObjectPointerType() ||
+ VD->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(VD);
+ }
+
+ std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i);
+ std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i);
+
+ std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag);
+
+ InsertText(FunLocStart, CI);
+
+ std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag);
+
+ InsertText(FunLocStart, CF);
+
+ if (ImportedBlockDecls.size()) {
+ std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag);
+ InsertText(FunLocStart, HF);
+ }
+ std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName,
+ ImportedBlockDecls.size() > 0);
+ InsertText(FunLocStart, BD);
+
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ }
+ if (RewriteSC) {
+ // Must insert any 'const/volatile/static here. Since it has been
+ // removed as result of rewriting of block literals.
+ std::string SC;
+ if (GlobalVarDecl->getStorageClass() == SC_Static)
+ SC = "static ";
+ if (GlobalVarDecl->getType().isConstQualified())
+ SC += "const ";
+ if (GlobalVarDecl->getType().isVolatileQualified())
+ SC += "volatile ";
+ if (GlobalVarDecl->getType().isRestrictQualified())
+ SC += "restrict ";
+ InsertText(FunLocStart, SC);
+ }
+ if (GlobalConstructionExp) {
+ // extra fancy dance for global literal expression.
+
+ // Always the latest block expression on the block stack.
+ std::string Tag = "__";
+ Tag += FunName;
+ Tag += "_block_impl_";
+ Tag += utostr(Blocks.size()-1);
+ std::string globalBuf = "static ";
+ globalBuf += Tag; globalBuf += " ";
+ std::string SStr;
+
+ llvm::raw_string_ostream constructorExprBuf(SStr);
+ GlobalConstructionExp->printPretty(constructorExprBuf, *Context, 0,
+ PrintingPolicy(LangOpts));
+ globalBuf += constructorExprBuf.str();
+ globalBuf += ";\n";
+ InsertText(FunLocStart, globalBuf);
+ GlobalConstructionExp = 0;
+ }
+
+ Blocks.clear();
+ InnerDeclRefsCount.clear();
+ InnerDeclRefs.clear();
+ RewrittenBlockExprs.clear();
+}
+
+void RewriteModernObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ StringRef FuncName = FD->getName();
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+static void BuildUniqueMethodName(std::string &Name,
+ ObjCMethodDecl *MD) {
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ Name = IFace->getName();
+ Name += "__" + MD->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = Name.find(":", loc)) != std::string::npos)
+ Name.replace(loc, 1, "_");
+}
+
+void RewriteModernObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
+ //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
+ //SourceLocation FunLocStart = MD->getLocStart();
+ SourceLocation FunLocStart = MD->getLocStart();
+ std::string FuncName;
+ BuildUniqueMethodName(FuncName, MD);
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+void RewriteModernObjC::GetBlockDeclRefExprs(Stmt *S) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
+ GetBlockDeclRefExprs(CBE->getBody());
+ else
+ GetBlockDeclRefExprs(*CI);
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S))
+ if (DRE->refersToEnclosingLocal() &&
+ HasLocalVariableExternalStorage(DRE->getDecl())) {
+ BlockDeclRefs.push_back(DRE);
+ }
+
+ return;
+}
+
+void RewriteModernObjC::GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs,
+ llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI)) {
+ InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
+ GetInnerBlockDeclRefExprs(CBE->getBody(),
+ InnerBlockDeclRefs,
+ InnerContexts);
+ }
+ else
+ GetInnerBlockDeclRefExprs(*CI,
+ InnerBlockDeclRefs,
+ InnerContexts);
+
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ if (DRE->refersToEnclosingLocal()) {
+ if (!isa<FunctionDecl>(DRE->getDecl()) &&
+ !InnerContexts.count(DRE->getDecl()->getDeclContext()))
+ InnerBlockDeclRefs.push_back(DRE);
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (Var->isFunctionOrMethodVarDecl())
+ ImportedLocalExternalDecls.insert(Var);
+ }
+ }
+
+ return;
+}
+
+/// convertObjCTypeToCStyleType - This routine converts such objc types
+/// as qualified objects, and blocks to their closest c/c++ types that
+/// it can. It returns true if input type was modified.
+bool RewriteModernObjC::convertObjCTypeToCStyleType(QualType &T) {
+ QualType oldT = T;
+ convertBlockPointerToFunctionPointer(T);
+ if (T->isFunctionPointerType()) {
+ QualType PointeeTy;
+ if (const PointerType* PT = T->getAs<PointerType>()) {
+ PointeeTy = PT->getPointeeType();
+ if (const FunctionType *FT = PointeeTy->getAs<FunctionType>()) {
+ T = convertFunctionTypeOfBlocks(FT);
+ T = Context->getPointerType(T);
+ }
+ }
+ }
+
+ convertToUnqualifiedObjCType(T);
+ return T != oldT;
+}
+
+/// convertFunctionTypeOfBlocks - This routine converts a function type
+/// whose result type may be a block pointer or whose argument type(s)
+/// might be block pointers to an equivalent function type replacing
+/// all block pointers to function pointers.
+QualType RewriteModernObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) {
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+ QualType Res = FT->getResultType();
+ bool modified = convertObjCTypeToCStyleType(Res);
+
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I && (I != E); ++I) {
+ QualType t = *I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (convertObjCTypeToCStyleType(t))
+ modified = true;
+ ArgTypes.push_back(t);
+ }
+ }
+ QualType FuncType;
+ if (modified)
+ FuncType = getSimpleFunctionType(Res, &ArgTypes[0], ArgTypes.size());
+ else FuncType = QualType(FT, 0);
+ return FuncType;
+}
+
+Stmt *RewriteModernObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
+ // Navigate to relevant type information.
+ const BlockPointerType *CPT = 0;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BlockExp)) {
+ CPT = DRE->getType()->getAs<BlockPointerType>();
+ } else if (const MemberExpr *MExpr = dyn_cast<MemberExpr>(BlockExp)) {
+ CPT = MExpr->getType()->getAs<BlockPointerType>();
+ }
+ else if (const ParenExpr *PRE = dyn_cast<ParenExpr>(BlockExp)) {
+ return SynthesizeBlockCall(Exp, PRE->getSubExpr());
+ }
+ else if (const ImplicitCastExpr *IEXPR = dyn_cast<ImplicitCastExpr>(BlockExp))
+ CPT = IEXPR->getType()->getAs<BlockPointerType>();
+ else if (const ConditionalOperator *CEXPR =
+ dyn_cast<ConditionalOperator>(BlockExp)) {
+ Expr *LHSExp = CEXPR->getLHS();
+ Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp);
+ Expr *RHSExp = CEXPR->getRHS();
+ Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
+ Expr *CONDExp = CEXPR->getCond();
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(CONDExp,
+ SourceLocation(), cast<Expr>(LHSStmt),
+ SourceLocation(), cast<Expr>(RHSStmt),
+ Exp->getType(), VK_RValue, OK_Ordinary);
+ return CondExpr;
+ } else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
+ CPT = IRE->getType()->getAs<BlockPointerType>();
+ } else if (const PseudoObjectExpr *POE
+ = dyn_cast<PseudoObjectExpr>(BlockExp)) {
+ CPT = POE->getType()->castAs<BlockPointerType>();
+ } else {
+ assert(1 && "RewriteBlockClass: Bad type");
+ }
+ assert(CPT && "RewriteBlockClass: Bad type");
+ const FunctionType *FT = CPT->getPointeeType()->getAs<FunctionType>();
+ assert(FT && "RewriteBlockClass: Bad type");
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__block_impl"));
+ QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+
+ // Push the block argument type.
+ ArgTypes.push_back(PtrBlock);
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I && (I != E); ++I) {
+ QualType t = *I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (!convertBlockPointerToFunctionPointer(t))
+ convertToUnqualifiedObjCType(t);
+ ArgTypes.push_back(t);
+ }
+ }
+ // Now do the pointer to function cast.
+ QualType PtrToFuncCastType
+ = getSimpleFunctionType(Exp->getType(), &ArgTypes[0], ArgTypes.size());
+
+ PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
+
+ CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock,
+ CK_BitCast,
+ const_cast<Expr*>(BlockExp));
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ BlkCast);
+ //PE->dump();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("FuncPtr"),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+
+
+ CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
+ CK_BitCast, ME);
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
+
+ SmallVector<Expr*, 8> BlkExprs;
+ // Add the implicit argument.
+ BlkExprs.push_back(BlkCast);
+ // Add the user arguments.
+ for (CallExpr::arg_iterator I = Exp->arg_begin(),
+ E = Exp->arg_end(); I != E; ++I) {
+ BlkExprs.push_back(*I);
+ }
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0],
+ BlkExprs.size(),
+ Exp->getType(), VK_RValue,
+ SourceLocation());
+ return CE;
+}
+
+// We need to return the rewritten expression to handle cases where the
+// DeclRefExpr is embedded in another expression being rewritten.
+// For example:
+//
+// int main() {
+// __block Foo *f;
+// __block int i;
+//
+// void (^myblock)() = ^() {
+// [f test]; // f is a DeclRefExpr embedded in a message (which is being rewritten).
+// i = 77;
+// };
+//}
+Stmt *RewriteModernObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
+ // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
+ // for each DeclRefExp where BYREFVAR is name of the variable.
+ ValueDecl *VD = DeclRefExp->getDecl();
+ bool isArrow = DeclRefExp->refersToEnclosingLocal();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("__forwarding"),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow,
+ FD, SourceLocation(),
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+
+ StringRef Name = VD->getName();
+ FD = FieldDecl::Create(*Context, 0, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(Name),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(),
+ DeclRefExp->getType(), VK_LValue, OK_Ordinary);
+
+
+
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
+ DeclRefExp->getExprLoc(),
+ ME);
+ ReplaceStmt(DeclRefExp, PE);
+ return PE;
+}
+
+// Rewrites the imported local variable V with external storage
+// (static, extern, etc.) as *V
+//
+Stmt *RewriteModernObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ if (!ImportedLocalExternalDecls.count(Var))
+ return DRE;
+ Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary,
+ DRE->getLocation());
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Exp);
+ ReplaceStmt(DRE, PE);
+ return PE;
+}
+
+void RewriteModernObjC::RewriteCastExpr(CStyleCastExpr *CE) {
+ SourceLocation LocStart = CE->getLParenLoc();
+ SourceLocation LocEnd = CE->getRParenLoc();
+
+ // Need to avoid trying to rewrite synthesized casts.
+ if (LocStart.isInvalid())
+ return;
+ // Need to avoid trying to rewrite casts contained in macros.
+ if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
+ return;
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ QualType QT = CE->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ std::string TypeAsString = "(";
+ RewriteBlockPointerType(TypeAsString, QT);
+ TypeAsString += ")";
+ ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString);
+ return;
+ }
+ // advance the location to startArgList.
+ const char *argPtr = startBuf;
+
+ while (*argPtr++ && (argPtr < endBuf)) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ LocStart = LocStart.getLocWithOffset(argPtr-startBuf);
+ ReplaceText(LocStart, 1, "*");
+ break;
+ }
+ }
+ return;
+}
+
+void RewriteModernObjC::RewriteImplicitCastObjCExpr(CastExpr *IC) {
+ CastKind CastKind = IC->getCastKind();
+
+ if (CastKind == CK_BlockPointerToObjCPointerCast) {
+ CStyleCastExpr * CastExpr =
+ NoTypeInfoCStyleCastExpr(Context, IC->getType(), CK_BitCast, IC);
+ ReplaceStmt(IC, CastExpr);
+ }
+ else if (CastKind == CK_AnyPointerToBlockPointerCast) {
+ QualType BlockT = IC->getType();
+ (void)convertBlockPointerToFunctionPointer(BlockT);
+ CStyleCastExpr * CastExpr =
+ NoTypeInfoCStyleCastExpr(Context, BlockT, CK_BitCast, IC);
+ ReplaceStmt(IC, CastExpr);
+ }
+ return;
+}
+
+void RewriteModernObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
+ SourceLocation DeclLoc = FD->getLocation();
+ unsigned parenCount = 0;
+
+ // We have 1 or more arguments that have closure pointers.
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *startArgList = strchr(startBuf, '(');
+
+ assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
+
+ parenCount++;
+ // advance the location to startArgList.
+ DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf);
+ assert((DeclLoc.isValid()) && "Invalid DeclLoc");
+
+ const char *argPtr = startArgList;
+
+ while (*argPtr++ && parenCount) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList);
+ ReplaceText(DeclLoc, 1, "*");
+ break;
+ case '(':
+ parenCount++;
+ break;
+ case ')':
+ parenCount--;
+ break;
+ }
+ }
+ return;
+}
+
+bool RewriteModernObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I)
+ if (isTopLevelBlockPointerType(*I))
+ return true;
+ }
+ return false;
+}
+
+bool RewriteModernObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I) {
+ if ((*I)->isObjCQualifiedIdType())
+ return true;
+ if ((*I)->isObjCObjectPointerType() &&
+ (*I)->getPointeeType()->isObjCQualifiedInterfaceType())
+ return true;
+ }
+
+ }
+ return false;
+}
+
+void RewriteModernObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen) {
+ const char *argPtr = strchr(Name, '(');
+ assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
+
+ LParen = argPtr; // output the start.
+ argPtr++; // skip past the left paren.
+ unsigned parenCount = 1;
+
+ while (*argPtr && parenCount) {
+ switch (*argPtr) {
+ case '(': parenCount++; break;
+ case ')': parenCount--; break;
+ default: break;
+ }
+ if (parenCount) argPtr++;
+ }
+ assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
+ RParen = argPtr; // output the end
+}
+
+void RewriteModernObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ RewriteBlockPointerFunctionArgs(FD);
+ return;
+ }
+ // Handle Variables and Typedefs.
+ SourceLocation DeclLoc = ND->getLocation();
+ QualType DeclT;
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ DeclT = VD->getType();
+ else if (TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(ND))
+ DeclT = TDD->getUnderlyingType();
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
+ DeclT = FD->getType();
+ else
+ llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled");
+
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *endBuf = startBuf;
+ // scan backward (from the decl location) for the end of the previous decl.
+ while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
+ startBuf--;
+ SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf);
+ std::string buf;
+ unsigned OrigLength=0;
+ // *startBuf != '^' if we are dealing with a pointer to function that
+ // may take block argument types (which will be handled below).
+ if (*startBuf == '^') {
+ // Replace the '^' with '*', computing a negative offset.
+ buf = '*';
+ startBuf++;
+ OrigLength++;
+ }
+ while (*startBuf != ')') {
+ buf += *startBuf;
+ startBuf++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+
+ if (PointerTypeTakesAnyBlockArguments(DeclT) ||
+ PointerTypeTakesAnyObjCQualifiedType(DeclT)) {
+ // Replace the '^' with '*' for arguments.
+ // Replace id<P> with id/*<>*/
+ DeclLoc = ND->getLocation();
+ startBuf = SM->getCharacterData(DeclLoc);
+ const char *argListBegin, *argListEnd;
+ GetExtentOfArgList(startBuf, argListBegin, argListEnd);
+ while (argListBegin < argListEnd) {
+ if (*argListBegin == '^')
+ buf += '*';
+ else if (*argListBegin == '<') {
+ buf += "/*";
+ buf += *argListBegin++;
+ OrigLength++;;
+ while (*argListBegin != '>') {
+ buf += *argListBegin++;
+ OrigLength++;
+ }
+ buf += *argListBegin;
+ buf += "*/";
+ }
+ else
+ buf += *argListBegin;
+ argListBegin++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+ }
+ ReplaceText(Start, OrigLength, buf);
+
+ return;
+}
+
+
+/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes:
+/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst,
+/// struct Block_byref_id_object *src) {
+/// _Block_object_assign (&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_assign(&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+/// And:
+/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) {
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+
+std::string RewriteModernObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD,
+ int flag) {
+ std::string S;
+ if (CopyDestroyCache.count(flag))
+ return S;
+ CopyDestroyCache.insert(flag);
+ S = "static void __Block_byref_id_object_copy_";
+ S += utostr(flag);
+ S += "(void *dst, void *src) {\n";
+
+ // offset into the object pointer is computed as:
+ // void * + void* + int + int + void* + void *
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ unsigned VoidPtrSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->VoidPtrTy));
+
+ unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth();
+ S += " _Block_object_assign((char*)dst + ";
+ S += utostr(offset);
+ S += ", *(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+
+ S += "static void __Block_byref_id_object_dispose_";
+ S += utostr(flag);
+ S += "(void *src) {\n";
+ S += " _Block_object_dispose(*(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+ return S;
+}
+
+/// RewriteByRefVar - For each __block typex ND variable this routine transforms
+/// the declaration into:
+/// struct __Block_byref_ND {
+/// void *__isa; // NULL for everything except __weak pointers
+/// struct __Block_byref_ND *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object
+/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object
+/// typex ND;
+/// };
+///
+/// It then replaces declaration of ND variable with:
+/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag,
+/// __size=sizeof(struct __Block_byref_ND),
+/// ND=initializer-if-any};
+///
+///
+void RewriteModernObjC::RewriteByRefVar(VarDecl *ND) {
+ // Insert declaration for the function in which block literal is
+ // used.
+ if (CurFunctionDeclToDeclareForBlock)
+ RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
+ int flag = 0;
+ int isa = 0;
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ if (DeclLoc.isInvalid())
+ // If type location is missing, it is because of missing type (a warning).
+ // Use variable's location which is good for this case.
+ DeclLoc = ND->getLocation();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ std::string Name(ND->getNameAsString());
+ std::string ByrefType;
+ RewriteByRefString(ByrefType, Name, ND, true);
+ ByrefType += " {\n";
+ ByrefType += " void *__isa;\n";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += " *__forwarding;\n";
+ ByrefType += " int __flags;\n";
+ ByrefType += " int __size;\n";
+ // Add void *__Block_byref_id_object_copy;
+ // void *__Block_byref_id_object_dispose; if needed.
+ QualType Ty = ND->getType();
+ bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty);
+ if (HasCopyAndDispose) {
+ ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n";
+ ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n";
+ }
+
+ QualType T = Ty;
+ (void)convertBlockPointerToFunctionPointer(T);
+ T.getAsStringInternal(Name, Context->getPrintingPolicy());
+
+ ByrefType += " " + Name + ";\n";
+ ByrefType += "};\n";
+ // Insert this type in global scope. It is needed by helper function.
+ SourceLocation FunLocStart;
+ if (CurFunctionDef)
+ FunLocStart = CurFunctionDef->getTypeSpecStartLoc();
+ else {
+ assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
+ FunLocStart = CurMethodDef->getLocStart();
+ }
+ InsertText(FunLocStart, ByrefType);
+ if (Ty.isObjCGCWeak()) {
+ flag |= BLOCK_FIELD_IS_WEAK;
+ isa = 1;
+ }
+
+ if (HasCopyAndDispose) {
+ flag = BLOCK_BYREF_CALLER;
+ QualType Ty = ND->getType();
+ // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well.
+ if (Ty->isBlockPointerType())
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ else
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag);
+ if (!HF.empty())
+ InsertText(FunLocStart, HF);
+ }
+
+ // struct __Block_byref_ND ND =
+ // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND),
+ // initializer-if-any};
+ bool hasInit = (ND->getInit() != 0);
+ // FIXME. rewriter does not support __block c++ objects which
+ // require construction.
+ if (hasInit && dyn_cast<CXXConstructExpr>(ND->getInit()))
+ hasInit = false;
+ unsigned flags = 0;
+ if (HasCopyAndDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ Name = ND->getNameAsString();
+ ByrefType.clear();
+ RewriteByRefString(ByrefType, Name, ND);
+ std::string ForwardingCastType("(");
+ ForwardingCastType += ByrefType + " *)";
+ if (!hasInit) {
+ ByrefType += " " + Name + " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += ")";
+ if (HasCopyAndDispose) {
+ ByrefType += ", __Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ }
+ ByrefType += "};\n";
+ unsigned nameSize = Name.size();
+ // for block or function pointer declaration. Name is aleady
+ // part of the declaration.
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType())
+ nameSize = 1;
+ ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType);
+ }
+ else {
+ SourceLocation startLoc;
+ Expr *E = ND->getInit();
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ endBuf = SM->getCharacterData(startLoc);
+ ByrefType += " " + Name;
+ ByrefType += " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += "), ";
+ if (HasCopyAndDispose) {
+ ByrefType += "__Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ ByrefType += ", ";
+ }
+ ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
+
+ // Complete the newly synthesized compound expression by inserting a right
+ // curly brace before the end of the declaration.
+ // FIXME: This approach avoids rewriting the initializer expression. It
+ // also assumes there is only one declarator. For example, the following
+ // isn't currently supported by this routine (in general):
+ //
+ // double __block BYREFVAR = 1.34, BYREFVAR2 = 1.37;
+ //
+ const char *startInitializerBuf = SM->getCharacterData(startLoc);
+ const char *semiBuf = strchr(startInitializerBuf, ';');
+ assert((*semiBuf == ';') && "RewriteByRefVar: can't find ';'");
+ SourceLocation semiLoc =
+ startLoc.getLocWithOffset(semiBuf-startInitializerBuf);
+
+ InsertText(semiLoc, "}");
+ }
+ return;
+}
+
+void RewriteModernObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
+ // Add initializers for any closure decl refs.
+ GetBlockDeclRefExprs(Exp->getBody());
+ if (BlockDeclRefs.size()) {
+ // Unique all "by copy" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Unique all "by ref" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ BlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+}
+
+FunctionDecl *RewriteModernObjC::SynthBlockInitFunctionDecl(StringRef name) {
+ IdentifierInfo *ID = &Context->Idents.get(name);
+ QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy);
+ return FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, FType, 0, SC_Extern,
+ SC_None, false, false);
+}
+
+Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs) {
+
+ const BlockDecl *block = Exp->getBlockDecl();
+
+ Blocks.push_back(Exp);
+
+ CollectBlockDeclRefInfo(Exp);
+
+ // Add inner imported variables now used in current block.
+ int countOfInnerDecls = 0;
+ if (!InnerBlockDeclRefs.empty()) {
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
+ DeclRefExpr *Exp = InnerBlockDeclRefs[i];
+ ValueDecl *VD = Exp->getDecl();
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ // We need to save the copied-in variables in nested
+ // blocks because it is needed at the end for some of the API generations.
+ // See SynthesizeBlockLiterals routine.
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
+ if (InnerBlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
+ }
+ InnerDeclRefsCount.push_back(countOfInnerDecls);
+
+ std::string FuncName;
+
+ if (CurFunctionDef)
+ FuncName = CurFunctionDef->getNameAsString();
+ else if (CurMethodDef)
+ BuildUniqueMethodName(FuncName, CurMethodDef);
+ else if (GlobalVarDecl)
+ FuncName = std::string(GlobalVarDecl->getNameAsString());
+
+ bool GlobalBlockExpr =
+ block->getDeclContext()->getRedeclContext()->isFileContext();
+
+ if (GlobalBlockExpr && !GlobalVarDecl) {
+ Diags.Report(block->getLocation(), GlobalBlockRewriteFailedDiag);
+ GlobalBlockExpr = false;
+ }
+
+ std::string BlockNumber = utostr(Blocks.size()-1);
+
+ std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
+
+ // Get a pointer to the function type so we can cast appropriately.
+ QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType());
+ QualType FType = Context->getPointerType(BFT);
+
+ FunctionDecl *FD;
+ Expr *NewRep;
+
+ // Simulate a contructor call...
+ std::string Tag;
+
+ if (GlobalBlockExpr)
+ Tag = "__global_";
+ else
+ Tag = "__";
+ Tag += FuncName + "_block_impl_" + BlockNumber;
+
+ FD = SynthBlockInitFunctionDecl(Tag);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // Initialize the block function.
+ FD = SynthBlockInitFunctionDecl(Func);
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ InitExprs.push_back(castExpr);
+
+ // Initialize the block descriptor.
+ std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(DescData.c_str()),
+ Context->VoidPtrTy, 0,
+ SC_Static, SC_None);
+ UnaryOperator *DescRefExpr =
+ new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
+ Context->VoidPtrTy,
+ VK_LValue,
+ SourceLocation()),
+ UO_AddrOf,
+ Context->getPointerType(Context->VoidPtrTy),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ InitExprs.push_back(DescRefExpr);
+
+ // Add initializers for any closure decl refs.
+ if (BlockDeclRefs.size()) {
+ Expr *Exp;
+ // Output all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ if (isObjCType((*I)->getType())) {
+ // FIXME: Conform to ABI ([[obj retain] autorelease]).
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+ } else if (isTopLevelBlockPointerType((*I)->getType())) {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ } else {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+
+ }
+ InitExprs.push_back(Exp);
+ }
+ // Output all "by ref" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ ValueDecl *ND = (*I);
+ std::string Name(ND->getNameAsString());
+ std::string RecName;
+ RewriteByRefString(RecName, Name, ND, true);
+ IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ + sizeof("struct"));
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ bool isNestedCapturedVar = false;
+ if (block)
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ if (variable == ND && ci->isNested()) {
+ assert (ci->isByRef() &&
+ "SynthBlockInitExpr - captured block variable is not byref");
+ isNestedCapturedVar = true;
+ break;
+ }
+ }
+ // captured nested byref variable has its address passed. Do not take
+ // its address again.
+ if (!isNestedCapturedVar)
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
+ InitExprs.push_back(Exp);
+ }
+ }
+ if (ImportedBlockDecls.size()) {
+ // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR
+ int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR);
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag),
+ Context->IntTy, SourceLocation());
+ InitExprs.push_back(FlagExp);
+ }
+ NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(),
+ FType, VK_LValue, SourceLocation());
+
+ if (GlobalBlockExpr) {
+ assert (GlobalConstructionExp == 0 &&
+ "SynthBlockInitExpr - GlobalConstructionExp must be null");
+ GlobalConstructionExp = NewRep;
+ NewRep = DRE;
+ }
+
+ NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
+ NewRep);
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ return NewRep;
+}
+
+bool RewriteModernObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) {
+ if (const ObjCForCollectionStmt * CS =
+ dyn_cast<ObjCForCollectionStmt>(Stmts.back()))
+ return CS->getElement() == DS;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Body / Expression rewriting
+//===----------------------------------------------------------------------===//
+
+Stmt *RewriteModernObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S))
+ Stmts.push_back(S);
+ else if (isa<ObjCForCollectionStmt>(S)) {
+ Stmts.push_back(S);
+ ObjCBcLabelNo.push_back(++BcLabelCount);
+ }
+
+ // Pseudo-object operations and ivar references need special
+ // treatment because we're going to recursively rewrite them.
+ if (PseudoObjectExpr *PseudoOp = dyn_cast<PseudoObjectExpr>(S)) {
+ if (isa<BinaryOperator>(PseudoOp->getSyntacticForm())) {
+ return RewritePropertyOrImplicitSetter(PseudoOp);
+ } else {
+ return RewritePropertyOrImplicitGetter(PseudoOp);
+ }
+ } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
+ return RewriteObjCIvarRefExpr(IvarRefExpr);
+ }
+
+ SourceRange OrigStmtRange = S->getSourceRange();
+
+ // Perform a bottom up rewrite of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ Stmt *childStmt = (*CI);
+ Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt);
+ if (newStmt) {
+ *CI = newStmt;
+ }
+ }
+
+ if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ SmallVector<DeclRefExpr *, 8> InnerBlockDeclRefs;
+ llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
+ InnerContexts.insert(BE->getBlockDecl());
+ ImportedLocalExternalDecls.clear();
+ GetInnerBlockDeclRefExprs(BE->getBody(),
+ InnerBlockDeclRefs, InnerContexts);
+ // Rewrite the block body in place.
+ Stmt *SaveCurrentBody = CurrentBody;
+ CurrentBody = BE->getBody();
+ PropParentMap = 0;
+ // block literal on rhs of a property-dot-sytax assignment
+ // must be replaced by its synthesize ast so getRewrittenText
+ // works as expected. In this case, what actually ends up on RHS
+ // is the blockTranscribed which is the helper function for the
+ // block literal; as in: self.c = ^() {[ace ARR];};
+ bool saveDisableReplaceStmt = DisableReplaceStmt;
+ DisableReplaceStmt = false;
+ RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
+ DisableReplaceStmt = saveDisableReplaceStmt;
+ CurrentBody = SaveCurrentBody;
+ PropParentMap = 0;
+ ImportedLocalExternalDecls.clear();
+ // Now we snarf the rewritten text and stash it away for later use.
+ std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
+ RewrittenBlockExprs[BE] = Str;
+
+ Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs);
+
+ //blockTranscribed->dump();
+ ReplaceStmt(S, blockTranscribed);
+ return blockTranscribed;
+ }
+ // Handle specific things.
+ if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
+ return RewriteAtEncode(AtEncode);
+
+ if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
+ return RewriteAtSelector(AtSelector);
+
+ if (ObjCStringLiteral *AtString = dyn_cast<ObjCStringLiteral>(S))
+ return RewriteObjCStringLiteral(AtString);
+
+ if (ObjCBoolLiteralExpr *BoolLitExpr = dyn_cast<ObjCBoolLiteralExpr>(S))
+ return RewriteObjCBoolLiteralExpr(BoolLitExpr);
+
+ if (ObjCNumericLiteral *NumericLitExpr = dyn_cast<ObjCNumericLiteral>(S))
+ return RewriteObjCNumericLiteralExpr(NumericLitExpr);
+
+ if (ObjCArrayLiteral *ArrayLitExpr = dyn_cast<ObjCArrayLiteral>(S))
+ return RewriteObjCArrayLiteralExpr(ArrayLitExpr);
+
+ if (ObjCDictionaryLiteral *DictionaryLitExpr =
+ dyn_cast<ObjCDictionaryLiteral>(S))
+ return RewriteObjCDictionaryLiteralExpr(DictionaryLitExpr);
+
+ if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
+#if 0
+ // Before we rewrite it, put the original message expression in a comment.
+ SourceLocation startLoc = MessExpr->getLocStart();
+ SourceLocation endLoc = MessExpr->getLocEnd();
+
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *endBuf = SM->getCharacterData(endLoc);
+
+ std::string messString;
+ messString += "// ";
+ messString.append(startBuf, endBuf-startBuf+1);
+ messString += "\n";
+
+ // FIXME: Missing definition of
+ // InsertText(clang::SourceLocation, char const*, unsigned int).
+ // InsertText(startLoc, messString.c_str(), messString.size());
+ // Tried this, but it didn't work either...
+ // ReplaceText(startLoc, 0, messString.c_str(), messString.size());
+#endif
+ return RewriteMessageExpr(MessExpr);
+ }
+
+ if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
+ return RewriteObjCTryStmt(StmtTry);
+
+ if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast<ObjCAtSynchronizedStmt>(S))
+ return RewriteObjCSynchronizedStmt(StmtTry);
+
+ if (ObjCAtThrowStmt *StmtThrow = dyn_cast<ObjCAtThrowStmt>(S))
+ return RewriteObjCThrowStmt(StmtThrow);
+
+ if (ObjCProtocolExpr *ProtocolExp = dyn_cast<ObjCProtocolExpr>(S))
+ return RewriteObjCProtocolExpr(ProtocolExp);
+
+ if (ObjCForCollectionStmt *StmtForCollection =
+ dyn_cast<ObjCForCollectionStmt>(S))
+ return RewriteObjCForCollectionStmt(StmtForCollection,
+ OrigStmtRange.getEnd());
+ if (BreakStmt *StmtBreakStmt =
+ dyn_cast<BreakStmt>(S))
+ return RewriteBreakStmt(StmtBreakStmt);
+ if (ContinueStmt *StmtContinueStmt =
+ dyn_cast<ContinueStmt>(S))
+ return RewriteContinueStmt(StmtContinueStmt);
+
+ // Need to check for protocol refs (id <P>, Foo <P> *) in variable decls
+ // and cast exprs.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ // FIXME: What we're doing here is modifying the type-specifier that
+ // precedes the first Decl. In the future the DeclGroup should have
+ // a separate type-specifier that we can rewrite.
+ // NOTE: We need to avoid rewriting the DeclStmt if it is within
+ // the context of an ObjCForCollectionStmt. For example:
+ // NSArray *someArray;
+ // for (id <FooProtocol> index in someArray) ;
+ // This is because RewriteObjCForCollectionStmt() does textual rewriting
+ // and it depends on the original text locations/positions.
+ if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS))
+ RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
+
+ // Blocks rewrite rules.
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ Decl *SD = *DI;
+ if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
+ if (isTopLevelBlockPointerType(ND->getType()))
+ RewriteBlockPointerDecl(ND);
+ else if (ND->getType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(ND->getType(), ND);
+ if (VarDecl *VD = dyn_cast<VarDecl>(SD)) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ static unsigned uniqueByrefDeclCount = 0;
+ assert(!BlockByRefDeclNo.count(ND) &&
+ "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl");
+ BlockByRefDeclNo[ND] = uniqueByrefDeclCount++;
+ RewriteByRefVar(VD);
+ }
+ else
+ RewriteTypeOfDecl(VD);
+ }
+ }
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ }
+ }
+
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S))
+ RewriteObjCQualifiedInterfaceTypes(CE);
+
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S)) {
+ assert(!Stmts.empty() && "Statement stack is empty");
+ assert ((isa<SwitchStmt>(Stmts.back()) || isa<WhileStmt>(Stmts.back()) ||
+ isa<DoStmt>(Stmts.back()) || isa<ForStmt>(Stmts.back()))
+ && "Statement stack mismatch");
+ Stmts.pop_back();
+ }
+ // Handle blocks rewriting.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VD->hasAttr<BlocksAttr>())
+ return RewriteBlockDeclRefExpr(DRE);
+ if (HasLocalVariableExternalStorage(VD))
+ return RewriteLocalVariableExternalStorage(DRE);
+ }
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee());
+ ReplaceStmt(S, BlockCall);
+ return BlockCall;
+ }
+ }
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S)) {
+ RewriteCastExpr(CE);
+ }
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ RewriteImplicitCastObjCExpr(ICE);
+ }
+#if 0
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ CastExpr *Replacement = new (Context) CastExpr(ICE->getType(),
+ ICE->getSubExpr(),
+ SourceLocation());
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream Buf(SStr);
+ Replacement->printPretty(Buf, *Context);
+ const std::string &Str = Buf.str();
+
+ printf("CAST = %s\n", &Str[0]);
+ InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size());
+ delete S;
+ return Replacement;
+ }
+#endif
+ // Return this stmt unmodified.
+ return S;
+}
+
+void RewriteModernObjC::RewriteRecordBody(RecordDecl *RD) {
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i) {
+ FieldDecl *FD = *i;
+ if (isTopLevelBlockPointerType(FD->getType()))
+ RewriteBlockPointerDecl(FD);
+ if (FD->getType()->isObjCQualifiedIdType() ||
+ FD->getType()->isObjCQualifiedInterfaceType())
+ RewriteObjCQualifiedInterfaceTypes(FD);
+ }
+}
+
+/// HandleDeclInMainFile - This is called for each top-level decl defined in the
+/// main file of the input.
+void RewriteModernObjC::HandleDeclInMainFile(Decl *D) {
+ switch (D->getKind()) {
+ case Decl::Function: {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (FD->isOverloadedOperator())
+ return;
+
+ // Since function prototypes don't have ParmDecl's, we check the function
+ // prototype. This enables us to rewrite function declarations and
+ // definitions using the same code.
+ RewriteBlocksInFunctionProtoType(FD->getType(), FD);
+
+ if (!FD->isThisDeclarationADefinition())
+ break;
+
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
+ CurFunctionDef = FD;
+ CurFunctionDeclToDeclareForBlock = FD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ FD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ // This synthesizes and inserts the block "impl" struct, invoke function,
+ // and any copy/dispose helper functions.
+ InsertBlockLiteralsWithinFunction(FD);
+ CurFunctionDef = 0;
+ CurFunctionDeclToDeclareForBlock = 0;
+ }
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *MD = cast<ObjCMethodDecl>(D);
+ if (CompoundStmt *Body = MD->getCompoundBody()) {
+ CurMethodDef = MD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ MD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ InsertBlockLiteralsWithinMethod(MD);
+ CurMethodDef = 0;
+ }
+ break;
+ }
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *CI = cast<ObjCImplementationDecl>(D);
+ ClassImplementation.push_back(CI);
+ break;
+ }
+ case Decl::ObjCCategoryImpl: {
+ ObjCCategoryImplDecl *CI = cast<ObjCCategoryImplDecl>(D);
+ CategoryImplementation.push_back(CI);
+ break;
+ }
+ case Decl::Var: {
+ VarDecl *VD = cast<VarDecl>(D);
+ RewriteObjCQualifiedInterfaceTypes(VD);
+ if (isTopLevelBlockPointerType(VD->getType()))
+ RewriteBlockPointerDecl(VD);
+ else if (VD->getType()->isFunctionPointerType()) {
+ CheckFunctionPointerDecl(VD->getType(), VD);
+ if (VD->getInit()) {
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ } else if (VD->getType()->isRecordType()) {
+ RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ }
+ if (VD->getInit()) {
+ GlobalVarDecl = VD;
+ CurrentBody = VD->getInit();
+ RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName());
+ GlobalVarDecl = 0;
+
+ // This is needed for blocks.
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ break;
+ }
+ case Decl::TypeAlias:
+ case Decl::Typedef: {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ break;
+ }
+ case Decl::CXXRecord:
+ case Decl::Record: {
+ RecordDecl *RD = cast<RecordDecl>(D);
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ break;
+ }
+ default:
+ break;
+ }
+ // Nothing yet.
+}
+
+/// Write_ProtocolExprReferencedMetadata - This routine writer out the
+/// protocol reference symbols in the for of:
+/// struct _protocol_t *PROTOCOL_REF = &PROTOCOL_METADATA.
+static void Write_ProtocolExprReferencedMetadata(ASTContext *Context,
+ ObjCProtocolDecl *PDecl,
+ std::string &Result) {
+ // Also output .objc_protorefs$B section and its meta-data.
+ if (Context->getLangOpts().MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_protorefs$B\")) ";
+ Result += "struct _protocol_t *";
+ Result += "_OBJC_PROTOCOL_REFERENCE_$_";
+ Result += PDecl->getNameAsString();
+ Result += " = &";
+ Result += "_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString();
+ Result += ";\n";
+}
+
+void RewriteModernObjC::HandleTranslationUnit(ASTContext &C) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ RewriteInclude();
+
+ // Here's a great place to add any extra declarations that may be needed.
+ // Write out meta data for each @protocol(<expr>).
+ for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
+ E = ProtocolExprDecls.end(); I != E; ++I) {
+ RewriteObjCProtocolMetaData(*I, Preamble);
+ Write_ProtocolExprReferencedMetadata(Context, (*I), Preamble);
+ }
+
+ InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
+ for (unsigned i = 0, e = ObjCInterfacesSeen.size(); i < e; i++) {
+ ObjCInterfaceDecl *CDecl = ObjCInterfacesSeen[i];
+ // Write struct declaration for the class matching its ivar declarations.
+ // Note that for modern abi, this is postponed until the end of TU
+ // because class extensions and the implementation might declare their own
+ // private ivars.
+ RewriteInterfaceDecl(CDecl);
+ }
+
+ if (ClassImplementation.size() || CategoryImplementation.size())
+ RewriteImplementations();
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(MainFileID)) {
+ //printf("Changed:\n");
+ *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ llvm::errs() << "No changes\n";
+ }
+
+ if (ClassImplementation.size() || CategoryImplementation.size() ||
+ ProtocolExprDecls.size()) {
+ // Rewrite Objective-c meta data*
+ std::string ResultStr;
+ RewriteMetaDataIntoBuffer(ResultStr);
+ // Emit metadata.
+ *OutFile << ResultStr;
+ }
+ // Emit ImageInfo;
+ {
+ std::string ResultStr;
+ WriteImageInfo(ResultStr);
+ *OutFile << ResultStr;
+ }
+ OutFile->flush();
+}
+
+void RewriteModernObjC::Initialize(ASTContext &context) {
+ InitializeCommon(context);
+
+ Preamble += "#ifndef __OBJC2__\n";
+ Preamble += "#define __OBJC2__\n";
+ Preamble += "#endif\n";
+
+ // declaring objc_selector outside the parameter list removes a silly
+ // scope related warning...
+ if (IsHeader)
+ Preamble = "#pragma once\n";
+ Preamble += "struct objc_selector; struct objc_class;\n";
+ Preamble += "struct __rw_objc_super { \n\tstruct objc_object *object; ";
+ Preamble += "\n\tstruct objc_object *superClass; ";
+ // Add a constructor for creating temporary objects.
+ Preamble += "\n\t__rw_objc_super(struct objc_object *o, struct objc_object *s) ";
+ Preamble += ": object(o), superClass(s) {} ";
+ Preamble += "\n};\n";
+
+ if (LangOpts.MicrosoftExt) {
+ // Define all sections using syntax that makes sense.
+ // These are currently generated.
+ Preamble += "\n#pragma section(\".objc_classlist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_catlist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_protolist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_imageinfo$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_nlclslist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_nlcatlist$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_protorefs$B\", long, read, write)\n";
+ // These are generated but not necessary for functionality.
+ Preamble += "#pragma section(\".datacoal_nt$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".cat_cls_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".inst_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".cls_meth$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_ivar$B\", long, read, write)\n";
+
+ // These need be generated for performance. Currently they are not,
+ // using API calls instead.
+ Preamble += "#pragma section(\".objc_selrefs$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_classrefs$B\", long, read, write)\n";
+ Preamble += "#pragma section(\".objc_superrefs$B\", long, read, write)\n";
+
+ }
+ Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
+ Preamble += "typedef struct objc_object Protocol;\n";
+ Preamble += "#define _REWRITER_typedef_Protocol\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
+ Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
+ }
+ else
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
+
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_stret(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSendSuper_stret(void);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_msgSend_fpret(void);\n";
+
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
+ Preamble += "(struct objc_class *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw( struct objc_object *);\n";
+ // @synchronized hooks.
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter( struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit( struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
+ Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
+ Preamble += "struct __objcFastEnumerationState {\n\t";
+ Preamble += "unsigned long state;\n\t";
+ Preamble += "void **itemsPtr;\n\t";
+ Preamble += "unsigned long *mutationsPtr;\n\t";
+ Preamble += "unsigned long extra[5];\n};\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
+ Preamble += "#define __FASTENUMERATIONSTATE\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "struct __NSConstantStringImpl {\n";
+ Preamble += " int *isa;\n";
+ Preamble += " int flags;\n";
+ Preamble += " char *str;\n";
+ Preamble += " long length;\n";
+ Preamble += "};\n";
+ Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
+ Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "#endif\n";
+ // Blocks preamble.
+ Preamble += "#ifndef BLOCK_IMPL\n";
+ Preamble += "#define BLOCK_IMPL\n";
+ Preamble += "struct __block_impl {\n";
+ Preamble += " void *isa;\n";
+ Preamble += " int Flags;\n";
+ Preamble += " int Reserved;\n";
+ Preamble += " void *FuncPtr;\n";
+ Preamble += "};\n";
+ Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
+ Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
+ Preamble += "extern \"C\" __declspec(dllexport) "
+ "void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#endif\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
+ Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
+ Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
+ Preamble += "#define __attribute__(X)\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __weak\n";
+ Preamble += "#define __weak\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __block\n";
+ Preamble += "#define __block\n";
+ Preamble += "#endif\n";
+ }
+ else {
+ Preamble += "#define __block\n";
+ Preamble += "#define __weak\n";
+ }
+
+ // Declarations required for modern objective-c array and dictionary literals.
+ Preamble += "\n#include <stdarg.h>\n";
+ Preamble += "struct __NSContainer_literal {\n";
+ Preamble += " void * *arr;\n";
+ Preamble += " __NSContainer_literal (unsigned int count, ...) {\n";
+ Preamble += "\tva_list marker;\n";
+ Preamble += "\tva_start(marker, count);\n";
+ Preamble += "\tarr = new void *[count];\n";
+ Preamble += "\tfor (unsigned i = 0; i < count; i++)\n";
+ Preamble += "\t arr[i] = va_arg(marker, void *);\n";
+ Preamble += "\tva_end( marker );\n";
+ Preamble += " };\n";
+ Preamble += " __NSContainer_literal() {\n";
+ Preamble += "\tdelete[] arr;\n";
+ Preamble += " }\n";
+ Preamble += "};\n";
+
+ // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
+ // as this avoids warning in any 64bit/32bit compilation model.
+ Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
+}
+
+/// RewriteIvarOffsetComputation - This rutine synthesizes computation of
+/// ivar offset.
+void RewriteModernObjC::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) {
+ if (ivar->isBitField()) {
+ // FIXME: The hack below doesn't work for bitfields. For now, we simply
+ // place all bitfields at offset 0.
+ Result += "0";
+ } else {
+ Result += "__OFFSETOFIVAR__(struct ";
+ Result += ivar->getContainingInterface()->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ", ";
+ Result += ivar->getNameAsString();
+ Result += ")";
+ }
+}
+
+/// WriteModernMetadataDeclarations - Writes out metadata declarations for modern ABI.
+/// struct _prop_t {
+/// const char *name;
+/// char *attributes;
+/// }
+
+/// struct _prop_list_t {
+/// uint32_t entsize; // sizeof(struct _prop_t)
+/// uint32_t count_of_properties;
+/// struct _prop_t prop_list[count_of_properties];
+/// }
+
+/// struct _protocol_t;
+
+/// struct _protocol_list_t {
+/// long protocol_count; // Note, this is 32/64 bit
+/// struct _protocol_t * protocol_list[protocol_count];
+/// }
+
+/// struct _objc_method {
+/// SEL _cmd;
+/// const char *method_type;
+/// char *_imp;
+/// }
+
+/// struct _method_list_t {
+/// uint32_t entsize; // sizeof(struct _objc_method)
+/// uint32_t method_count;
+/// struct _objc_method method_list[method_count];
+/// }
+
+/// struct _protocol_t {
+/// id isa; // NULL
+/// const char *protocol_name;
+/// const struct _protocol_list_t * protocol_list; // super protocols
+/// const struct method_list_t *instance_methods;
+/// const struct method_list_t *class_methods;
+/// const struct method_list_t *optionalInstanceMethods;
+/// const struct method_list_t *optionalClassMethods;
+/// const struct _prop_list_t * properties;
+/// const uint32_t size; // sizeof(struct _protocol_t)
+/// const uint32_t flags; // = 0
+/// const char ** extendedMethodTypes;
+/// }
+
+/// struct _ivar_t {
+/// unsigned long int *offset; // pointer to ivar offset location
+/// const char *name;
+/// const char *type;
+/// uint32_t alignment;
+/// uint32_t size;
+/// }
+
+/// struct _ivar_list_t {
+/// uint32 entsize; // sizeof(struct _ivar_t)
+/// uint32 count;
+/// struct _ivar_t list[count];
+/// }
+
+/// struct _class_ro_t {
+/// uint32_t flags;
+/// uint32_t instanceStart;
+/// uint32_t instanceSize;
+/// uint32_t reserved; // only when building for 64bit targets
+/// const uint8_t *ivarLayout;
+/// const char *name;
+/// const struct _method_list_t *baseMethods;
+/// const struct _protocol_list_t *baseProtocols;
+/// const struct _ivar_list_t *ivars;
+/// const uint8_t *weakIvarLayout;
+/// const struct _prop_list_t *properties;
+/// }
+
+/// struct _class_t {
+/// struct _class_t *isa;
+/// struct _class_t *superclass;
+/// void *cache;
+/// IMP *vtable;
+/// struct _class_ro_t *ro;
+/// }
+
+/// struct _category_t {
+/// const char *name;
+/// struct _class_t *cls;
+/// const struct _method_list_t *instance_methods;
+/// const struct _method_list_t *class_methods;
+/// const struct _protocol_list_t *protocols;
+/// const struct _prop_list_t *properties;
+/// }
+
+/// MessageRefTy - LLVM for:
+/// struct _message_ref_t {
+/// IMP messenger;
+/// SEL name;
+/// };
+
+/// SuperMessageRefTy - LLVM for:
+/// struct _super_message_ref_t {
+/// SUPER_IMP messenger;
+/// SEL name;
+/// };
+
+static void WriteModernMetadataDeclarations(ASTContext *Context, std::string &Result) {
+ static bool meta_data_declared = false;
+ if (meta_data_declared)
+ return;
+
+ Result += "\nstruct _prop_t {\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst char *attributes;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _protocol_t;\n";
+
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tstruct objc_selector * _cmd;\n";
+ Result += "\tconst char *method_type;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _protocol_t {\n";
+ Result += "\tvoid * isa; // NULL\n";
+ Result += "\tconst char *protocol_name;\n";
+ Result += "\tconst struct _protocol_list_t * protocol_list; // super protocols\n";
+ Result += "\tconst struct method_list_t *instance_methods;\n";
+ Result += "\tconst struct method_list_t *class_methods;\n";
+ Result += "\tconst struct method_list_t *optionalInstanceMethods;\n";
+ Result += "\tconst struct method_list_t *optionalClassMethods;\n";
+ Result += "\tconst struct _prop_list_t * properties;\n";
+ Result += "\tconst unsigned int size; // sizeof(struct _protocol_t)\n";
+ Result += "\tconst unsigned int flags; // = 0\n";
+ Result += "\tconst char ** extendedMethodTypes;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _ivar_t {\n";
+ Result += "\tunsigned long int *offset; // pointer to ivar offset location\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst char *type;\n";
+ Result += "\tunsigned int alignment;\n";
+ Result += "\tunsigned int size;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _class_ro_t {\n";
+ Result += "\tunsigned int flags;\n";
+ Result += "\tunsigned int instanceStart;\n";
+ Result += "\tunsigned int instanceSize;\n";
+ const llvm::Triple &Triple(Context->getTargetInfo().getTriple());
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ Result += "\tunsigned int reserved;\n";
+ Result += "\tconst unsigned char *ivarLayout;\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tconst struct _method_list_t *baseMethods;\n";
+ Result += "\tconst struct _objc_protocol_list *baseProtocols;\n";
+ Result += "\tconst struct _ivar_list_t *ivars;\n";
+ Result += "\tconst unsigned char *weakIvarLayout;\n";
+ Result += "\tconst struct _prop_list_t *properties;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _class_t {\n";
+ Result += "\tstruct _class_t *isa;\n";
+ Result += "\tstruct _class_t *superclass;\n";
+ Result += "\tvoid *cache;\n";
+ Result += "\tvoid *vtable;\n";
+ Result += "\tstruct _class_ro_t *ro;\n";
+ Result += "};\n";
+
+ Result += "\nstruct _category_t {\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tstruct _class_t *cls;\n";
+ Result += "\tconst struct _method_list_t *instance_methods;\n";
+ Result += "\tconst struct _method_list_t *class_methods;\n";
+ Result += "\tconst struct _protocol_list_t *protocols;\n";
+ Result += "\tconst struct _prop_list_t *properties;\n";
+ Result += "};\n";
+
+ Result += "extern \"C\" __declspec(dllimport) struct objc_cache _objc_empty_cache;\n";
+ Result += "#pragma warning(disable:4273)\n";
+ meta_data_declared = true;
+}
+
+static void Write_protocol_list_t_TypeDecl(std::string &Result,
+ long super_protocol_count) {
+ Result += "struct /*_protocol_list_t*/"; Result += " {\n";
+ Result += "\tlong protocol_count; // Note, this is 32/64 bit\n";
+ Result += "\tstruct _protocol_t *super_protocols[";
+ Result += utostr(super_protocol_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write_method_list_t_TypeDecl(std::string &Result,
+ unsigned int method_count) {
+ Result += "struct /*_method_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _objc_method)\n";
+ Result += "\tunsigned int method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(method_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write__prop_list_t_TypeDecl(std::string &Result,
+ unsigned int property_count) {
+ Result += "struct /*_prop_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n";
+ Result += "\tunsigned int count_of_properties;\n";
+ Result += "\tstruct _prop_t prop_list[";
+ Result += utostr(property_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write__ivar_list_t_TypeDecl(std::string &Result,
+ unsigned int ivar_count) {
+ Result += "struct /*_ivar_list_t*/"; Result += " {\n";
+ Result += "\tunsigned int entsize; // sizeof(struct _prop_t)\n";
+ Result += "\tunsigned int count;\n";
+ Result += "\tstruct _ivar_t ivar_list[";
+ Result += utostr(ivar_count); Result += "];\n";
+ Result += "}";
+}
+
+static void Write_protocol_list_initializer(ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCProtocolDecl *> SuperProtocols,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (SuperProtocols.size() > 0) {
+ Result += "\nstatic ";
+ Write_protocol_list_t_TypeDecl(Result, SuperProtocols.size());
+ Result += " "; Result += VarName;
+ Result += ProtocolName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += utostr(SuperProtocols.size()); Result += ",\n";
+ for (unsigned i = 0, e = SuperProtocols.size(); i < e; i++) {
+ ObjCProtocolDecl *SuperPD = SuperProtocols[i];
+ Result += "\t&"; Result += "_OBJC_PROTOCOL_";
+ Result += SuperPD->getNameAsString();
+ if (i == e-1)
+ Result += "\n};\n";
+ else
+ Result += ",\n";
+ }
+ }
+}
+
+static void Write_method_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCMethodDecl *> Methods,
+ StringRef VarName,
+ StringRef TopLevelDeclName,
+ bool MethodImpl) {
+ if (Methods.size() > 0) {
+ Result += "\nstatic ";
+ Write_method_list_t_TypeDecl(Result, Methods.size());
+ Result += " "; Result += VarName;
+ Result += TopLevelDeclName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_objc_method)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Methods.size()); Result += ",\n";
+ for (unsigned i = 0, e = Methods.size(); i < e; i++) {
+ ObjCMethodDecl *MD = Methods[i];
+ if (i == 0)
+ Result += "\t{{(struct objc_selector *)\"";
+ else
+ Result += "\t{(struct objc_selector *)\"";
+ Result += (MD)->getSelector().getAsString(); Result += "\"";
+ Result += ", ";
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(MD, MethodTypeString);
+ Result += "\""; Result += MethodTypeString; Result += "\"";
+ Result += ", ";
+ if (!MethodImpl)
+ Result += "0";
+ else {
+ Result += "(void *)";
+ Result += RewriteObj.MethodInternalNames[MD];
+ }
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+static void Write_prop_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCPropertyDecl *> Properties,
+ const Decl *Container,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (Properties.size() > 0) {
+ Result += "\nstatic ";
+ Write__prop_list_t_TypeDecl(Result, Properties.size());
+ Result += " "; Result += VarName;
+ Result += ProtocolName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_prop_t)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Properties.size()); Result += ",\n";
+ for (unsigned i = 0, e = Properties.size(); i < e; i++) {
+ ObjCPropertyDecl *PropDecl = Properties[i];
+ if (i == 0)
+ Result += "\t{{\"";
+ else
+ Result += "\t{\"";
+ Result += PropDecl->getName(); Result += "\",";
+ std::string PropertyTypeString, QuotePropertyTypeString;
+ Context->getObjCEncodingForPropertyDecl(PropDecl, Container, PropertyTypeString);
+ RewriteObj.QuoteDoublequotes(PropertyTypeString, QuotePropertyTypeString);
+ Result += "\""; Result += QuotePropertyTypeString; Result += "\"";
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+// Metadata flags
+enum MetaDataDlags {
+ CLS = 0x0,
+ CLS_META = 0x1,
+ CLS_ROOT = 0x2,
+ OBJC2_CLS_HIDDEN = 0x10,
+ CLS_EXCEPTION = 0x20,
+
+ /// (Obsolete) ARC-specific: this class has a .release_ivars method
+ CLS_HAS_IVAR_RELEASER = 0x40,
+ /// class was compiled with -fobjc-arr
+ CLS_COMPILED_BY_ARC = 0x80 // (1<<7)
+};
+
+static void Write__class_ro_t_initializer(ASTContext *Context, std::string &Result,
+ unsigned int flags,
+ const std::string &InstanceStart,
+ const std::string &InstanceSize,
+ ArrayRef<ObjCMethodDecl *>baseMethods,
+ ArrayRef<ObjCProtocolDecl *>baseProtocols,
+ ArrayRef<ObjCIvarDecl *>ivars,
+ ArrayRef<ObjCPropertyDecl *>Properties,
+ StringRef VarName,
+ StringRef ClassName) {
+ Result += "\nstatic struct _class_ro_t ";
+ Result += VarName; Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t";
+ Result += llvm::utostr(flags); Result += ", ";
+ Result += InstanceStart; Result += ", ";
+ Result += InstanceSize; Result += ", \n";
+ Result += "\t";
+ const llvm::Triple &Triple(Context->getTargetInfo().getTriple());
+ if (Triple.getArch() == llvm::Triple::x86_64)
+ // uint32_t const reserved; // only when building for 64bit targets
+ Result += "(unsigned int)0, \n\t";
+ // const uint8_t * const ivarLayout;
+ Result += "0, \n\t";
+ Result += "\""; Result += ClassName; Result += "\",\n\t";
+ bool metaclass = ((flags & CLS_META) != 0);
+ if (baseMethods.size() > 0) {
+ Result += "(const struct _method_list_t *)&";
+ if (metaclass)
+ Result += "_OBJC_$_CLASS_METHODS_";
+ else
+ Result += "_OBJC_$_INSTANCE_METHODS_";
+ Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ if (!metaclass && baseProtocols.size() > 0) {
+ Result += "(const struct _objc_protocol_list *)&";
+ Result += "_OBJC_CLASS_PROTOCOLS_$_"; Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ if (!metaclass && ivars.size() > 0) {
+ Result += "(const struct _ivar_list_t *)&";
+ Result += "_OBJC_$_INSTANCE_VARIABLES_"; Result += ClassName;
+ Result += ",\n\t";
+ }
+ else
+ Result += "0, \n\t";
+
+ // weakIvarLayout
+ Result += "0, \n\t";
+ if (!metaclass && Properties.size() > 0) {
+ Result += "(const struct _prop_list_t *)&";
+ Result += "_OBJC_$_PROP_LIST_"; Result += ClassName;
+ Result += ",\n";
+ }
+ else
+ Result += "0, \n";
+
+ Result += "};\n";
+}
+
+static void Write_class_t(ASTContext *Context, std::string &Result,
+ StringRef VarName,
+ const ObjCInterfaceDecl *CDecl, bool metaclass) {
+ bool rootClass = (!CDecl->getSuperClass());
+ const ObjCInterfaceDecl *RootClass = CDecl;
+
+ if (!rootClass) {
+ // Find the Root class
+ RootClass = CDecl->getSuperClass();
+ while (RootClass->getSuperClass()) {
+ RootClass = RootClass->getSuperClass();
+ }
+ }
+
+ if (metaclass && rootClass) {
+ // Need to handle a case of use of forward declaration.
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (CDecl->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t OBJC_CLASS_$_";
+ Result += CDecl->getNameAsString();
+ Result += ";\n";
+ }
+ // Also, for possibility of 'super' metadata class not having been defined yet.
+ if (!rootClass) {
+ ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (SuperClass->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += VarName;
+ Result += SuperClass->getNameAsString();
+ Result += ";\n";
+
+ if (metaclass && RootClass != SuperClass) {
+ Result += "extern \"C\" ";
+ if (RootClass->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += VarName;
+ Result += RootClass->getNameAsString();
+ Result += ";\n";
+ }
+ }
+
+ Result += "\nextern \"C\" __declspec(dllexport) struct _class_t ";
+ Result += VarName; Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_data\"))) = {\n";
+ Result += "\t";
+ if (metaclass) {
+ if (!rootClass) {
+ Result += "0, // &"; Result += VarName;
+ Result += RootClass->getNameAsString();
+ Result += ",\n\t";
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getSuperClass()->getNameAsString();
+ Result += ",\n\t";
+ }
+ else {
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ Result += "0, // &OBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ }
+ }
+ else {
+ Result += "0, // &OBJC_METACLASS_$_";
+ Result += CDecl->getNameAsString();
+ Result += ",\n\t";
+ if (!rootClass) {
+ Result += "0, // &"; Result += VarName;
+ Result += CDecl->getSuperClass()->getNameAsString();
+ Result += ",\n\t";
+ }
+ else
+ Result += "0,\n\t";
+ }
+ Result += "0, // (void *)&_objc_empty_cache,\n\t";
+ Result += "0, // unused, was (void *)&_objc_empty_vtable,\n\t";
+ if (metaclass)
+ Result += "&_OBJC_METACLASS_RO_$_";
+ else
+ Result += "&_OBJC_CLASS_RO_$_";
+ Result += CDecl->getNameAsString();
+ Result += ",\n};\n";
+
+ // Add static function to initialize some of the meta-data fields.
+ // avoid doing it twice.
+ if (metaclass)
+ return;
+
+ const ObjCInterfaceDecl *SuperClass =
+ rootClass ? CDecl : CDecl->getSuperClass();
+
+ Result += "static void OBJC_CLASS_SETUP_$_";
+ Result += CDecl->getNameAsString();
+ Result += "(void ) {\n";
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".isa = "; Result += "&OBJC_METACLASS_$_";
+ Result += RootClass->getNameAsString(); Result += ";\n";
+
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".superclass = ";
+ if (rootClass)
+ Result += "&OBJC_CLASS_$_";
+ else
+ Result += "&OBJC_METACLASS_$_";
+
+ Result += SuperClass->getNameAsString(); Result += ";\n";
+
+ Result += "\tOBJC_METACLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n";
+
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".isa = "; Result += "&OBJC_METACLASS_$_";
+ Result += CDecl->getNameAsString(); Result += ";\n";
+
+ if (!rootClass) {
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".superclass = "; Result += "&OBJC_CLASS_$_";
+ Result += SuperClass->getNameAsString(); Result += ";\n";
+ }
+
+ Result += "\tOBJC_CLASS_$_"; Result += CDecl->getNameAsString();
+ Result += ".cache = "; Result += "&_objc_empty_cache"; Result += ";\n";
+ Result += "}\n";
+}
+
+static void Write_category_t(RewriteModernObjC &RewriteObj, ASTContext *Context,
+ std::string &Result,
+ ObjCCategoryDecl *CatDecl,
+ ObjCInterfaceDecl *ClassDecl,
+ ArrayRef<ObjCMethodDecl *> InstanceMethods,
+ ArrayRef<ObjCMethodDecl *> ClassMethods,
+ ArrayRef<ObjCProtocolDecl *> RefedProtocols,
+ ArrayRef<ObjCPropertyDecl *> ClassProperties) {
+ StringRef CatName = CatDecl->getName();
+ StringRef ClassName = ClassDecl->getName();
+ // must declare an extern class object in case this class is not implemented
+ // in this TU.
+ Result += "\n";
+ Result += "extern \"C\" ";
+ if (ClassDecl->getImplementation())
+ Result += "__declspec(dllexport) ";
+ else
+ Result += "__declspec(dllimport) ";
+
+ Result += "struct _class_t ";
+ Result += "OBJC_CLASS_$_"; Result += ClassName;
+ Result += ";\n";
+
+ Result += "\nstatic struct _category_t ";
+ Result += "_OBJC_$_CATEGORY_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n";
+ Result += "{\n";
+ Result += "\t\""; Result += ClassName; Result += "\",\n";
+ Result += "\t0, // &"; Result += "OBJC_CLASS_$_"; Result += ClassName;
+ Result += ",\n";
+ if (InstanceMethods.size() > 0) {
+ Result += "\t(const struct _method_list_t *)&";
+ Result += "_OBJC_$_CATEGORY_INSTANCE_METHODS_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassMethods.size() > 0) {
+ Result += "\t(const struct _method_list_t *)&";
+ Result += "_OBJC_$_CATEGORY_CLASS_METHODS_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (RefedProtocols.size() > 0) {
+ Result += "\t(const struct _protocol_list_t *)&";
+ Result += "_OBJC_CATEGORY_PROTOCOLS_$_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassProperties.size() > 0) {
+ Result += "\t(const struct _prop_list_t *)&"; Result += "_OBJC_$_PROP_LIST_";
+ Result += ClassName; Result += "_$_"; Result += CatName;
+ Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ Result += "};\n";
+
+ // Add static function to initialize the class pointer in the category structure.
+ Result += "static void OBJC_CATEGORY_SETUP_$_";
+ Result += ClassDecl->getNameAsString();
+ Result += "_$_";
+ Result += CatName;
+ Result += "(void ) {\n";
+ Result += "\t_OBJC_$_CATEGORY_";
+ Result += ClassDecl->getNameAsString();
+ Result += "_$_";
+ Result += CatName;
+ Result += ".cls = "; Result += "&OBJC_CLASS_$_"; Result += ClassName;
+ Result += ";\n}\n";
+}
+
+static void Write__extendedMethodTypes_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCMethodDecl *> Methods,
+ StringRef VarName,
+ StringRef ProtocolName) {
+ if (Methods.size() == 0)
+ return;
+
+ Result += "\nstatic const char *";
+ Result += VarName; Result += ProtocolName;
+ Result += " [] __attribute__ ((used, section (\"__DATA,__objc_const\"))) = \n";
+ Result += "{\n";
+ for (unsigned i = 0, e = Methods.size(); i < e; i++) {
+ ObjCMethodDecl *MD = Methods[i];
+ std::string MethodTypeString, QuoteMethodTypeString;
+ Context->getObjCEncodingForMethodDecl(MD, MethodTypeString, true);
+ RewriteObj.QuoteDoublequotes(MethodTypeString, QuoteMethodTypeString);
+ Result += "\t\""; Result += QuoteMethodTypeString; Result += "\"";
+ if (i == e-1)
+ Result += "\n};\n";
+ else {
+ Result += ",\n";
+ }
+ }
+}
+
+static void Write_IvarOffsetVar(RewriteModernObjC &RewriteObj,
+ ASTContext *Context,
+ std::string &Result,
+ ArrayRef<ObjCIvarDecl *> Ivars,
+ ObjCInterfaceDecl *CDecl) {
+ // FIXME. visibilty of offset symbols may have to be set; for Darwin
+ // this is what happens:
+ /**
+ if (Ivar->getAccessControl() == ObjCIvarDecl::Private ||
+ Ivar->getAccessControl() == ObjCIvarDecl::Package ||
+ Class->getVisibility() == HiddenVisibility)
+ Visibility shoud be: HiddenVisibility;
+ else
+ Visibility shoud be: DefaultVisibility;
+ */
+
+ Result += "\n";
+ for (unsigned i =0, e = Ivars.size(); i < e; i++) {
+ ObjCIvarDecl *IvarDecl = Ivars[i];
+ if (Context->getLangOpts().MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_ivar$B\")) ";
+
+ if (!Context->getLangOpts().MicrosoftExt ||
+ IvarDecl->getAccessControl() == ObjCIvarDecl::Private ||
+ IvarDecl->getAccessControl() == ObjCIvarDecl::Package)
+ Result += "extern \"C\" unsigned long int ";
+ else
+ Result += "extern \"C\" __declspec(dllexport) unsigned long int ";
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_ivar\")))";
+ Result += " = ";
+ RewriteObj.RewriteIvarOffsetComputation(IvarDecl, Result);
+ Result += ";\n";
+ }
+}
+
+static void Write__ivar_list_t_initializer(RewriteModernObjC &RewriteObj,
+ ASTContext *Context, std::string &Result,
+ ArrayRef<ObjCIvarDecl *> Ivars,
+ StringRef VarName,
+ ObjCInterfaceDecl *CDecl) {
+ if (Ivars.size() > 0) {
+ Write_IvarOffsetVar(RewriteObj, Context, Result, Ivars, CDecl);
+
+ Result += "\nstatic ";
+ Write__ivar_list_t_TypeDecl(Result, Ivars.size());
+ Result += " "; Result += VarName;
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__objc_const\"))) = {\n";
+ Result += "\t"; Result += "sizeof(_ivar_t)"; Result += ",\n";
+ Result += "\t"; Result += utostr(Ivars.size()); Result += ",\n";
+ for (unsigned i =0, e = Ivars.size(); i < e; i++) {
+ ObjCIvarDecl *IvarDecl = Ivars[i];
+ if (i == 0)
+ Result += "\t{{";
+ else
+ Result += "\t {";
+ Result += "(unsigned long int *)&";
+ WriteInternalIvarName(CDecl, IvarDecl, Result);
+ Result += ", ";
+
+ Result += "\""; Result += IvarDecl->getName(); Result += "\", ";
+ std::string IvarTypeString, QuoteIvarTypeString;
+ Context->getObjCEncodingForType(IvarDecl->getType(), IvarTypeString,
+ IvarDecl);
+ RewriteObj.QuoteDoublequotes(IvarTypeString, QuoteIvarTypeString);
+ Result += "\""; Result += QuoteIvarTypeString; Result += "\", ";
+
+ // FIXME. this alignment represents the host alignment and need be changed to
+ // represent the target alignment.
+ unsigned Align = Context->getTypeAlign(IvarDecl->getType())/8;
+ Align = llvm::Log2_32(Align);
+ Result += llvm::utostr(Align); Result += ", ";
+ CharUnits Size = Context->getTypeSizeInChars(IvarDecl->getType());
+ Result += llvm::utostr(Size.getQuantity());
+ if (i == e-1)
+ Result += "}}\n";
+ else
+ Result += "},\n";
+ }
+ Result += "};\n";
+ }
+}
+
+/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
+void RewriteModernObjC::RewriteObjCProtocolMetaData(ObjCProtocolDecl *PDecl,
+ std::string &Result) {
+
+ // Do not synthesize the protocol more than once.
+ if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl()))
+ return;
+ WriteModernMetadataDeclarations(Context, Result);
+
+ if (ObjCProtocolDecl *Def = PDecl->getDefinition())
+ PDecl = Def;
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); I != E; ++I)
+ RewriteObjCProtocolMetaData(*I, Result);
+
+ // Construct method lists.
+ std::vector<ObjCMethodDecl *> InstanceMethods, ClassMethods;
+ std::vector<ObjCMethodDecl *> OptInstanceMethods, OptClassMethods;
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
+ I != E; ++I) {
+ ObjCMethodDecl *MD = *I;
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptInstanceMethods.push_back(MD);
+ } else {
+ InstanceMethods.push_back(MD);
+ }
+ }
+
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I) {
+ ObjCMethodDecl *MD = *I;
+ if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
+ OptClassMethods.push_back(MD);
+ } else {
+ ClassMethods.push_back(MD);
+ }
+ }
+ std::vector<ObjCMethodDecl *> AllMethods;
+ for (unsigned i = 0, e = InstanceMethods.size(); i < e; i++)
+ AllMethods.push_back(InstanceMethods[i]);
+ for (unsigned i = 0, e = ClassMethods.size(); i < e; i++)
+ AllMethods.push_back(ClassMethods[i]);
+ for (unsigned i = 0, e = OptInstanceMethods.size(); i < e; i++)
+ AllMethods.push_back(OptInstanceMethods[i]);
+ for (unsigned i = 0, e = OptClassMethods.size(); i < e; i++)
+ AllMethods.push_back(OptClassMethods[i]);
+
+ Write__extendedMethodTypes_initializer(*this, Context, Result,
+ AllMethods,
+ "_OBJC_PROTOCOL_METHOD_TYPES_",
+ PDecl->getNameAsString());
+ // Protocol's super protocol list
+ std::vector<ObjCProtocolDecl *> SuperProtocols;
+ for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); I != E; ++I)
+ SuperProtocols.push_back(*I);
+
+ Write_protocol_list_initializer(Context, Result, SuperProtocols,
+ "_OBJC_PROTOCOL_REFS_",
+ PDecl->getNameAsString());
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_PROTOCOL_INSTANCE_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_PROTOCOL_CLASS_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, OptInstanceMethods,
+ "_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ Write_method_list_t_initializer(*this, Context, Result, OptClassMethods,
+ "_OBJC_PROTOCOL_OPT_CLASS_METHODS_",
+ PDecl->getNameAsString(), false);
+
+ // Protocol's property metadata.
+ std::vector<ObjCPropertyDecl *> ProtocolProperties;
+ for (ObjCContainerDecl::prop_iterator I = PDecl->prop_begin(),
+ E = PDecl->prop_end(); I != E; ++I)
+ ProtocolProperties.push_back(*I);
+
+ Write_prop_list_t_initializer(*this, Context, Result, ProtocolProperties,
+ /* Container */0,
+ "_OBJC_PROTOCOL_PROPERTIES_",
+ PDecl->getNameAsString());
+
+ // Writer out root metadata for current protocol: struct _protocol_t
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".datacoal_nt$B\")) ";
+ Result += "struct _protocol_t _OBJC_PROTOCOL_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__DATA,__datacoal_nt,coalesced\"))) = {\n";
+ Result += "\t0,\n"; // id is; is null
+ Result += "\t\""; Result += PDecl->getNameAsString(); Result += "\",\n";
+ if (SuperProtocols.size() > 0) {
+ Result += "\t(const struct _protocol_list_t *)&"; Result += "_OBJC_PROTOCOL_REFS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+ if (InstanceMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ClassMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (OptInstanceMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (OptClassMethods.size() > 0) {
+ Result += "\t(const struct method_list_t *)&_OBJC_PROTOCOL_OPT_CLASS_METHODS_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ if (ProtocolProperties.size() > 0) {
+ Result += "\t(const struct _prop_list_t *)&_OBJC_PROTOCOL_PROPERTIES_";
+ Result += PDecl->getNameAsString(); Result += ",\n";
+ }
+ else
+ Result += "\t0,\n";
+
+ Result += "\t"; Result += "sizeof(_protocol_t)"; Result += ",\n";
+ Result += "\t0,\n";
+
+ if (AllMethods.size() > 0) {
+ Result += "\t(const char **)&"; Result += "_OBJC_PROTOCOL_METHOD_TYPES_";
+ Result += PDecl->getNameAsString();
+ Result += "\n};\n";
+ }
+ else
+ Result += "\t0\n};\n";
+
+ // Use this protocol meta-data to build protocol list table in section
+ // .objc_protolist$B
+ // Unspecified visibility means 'private extern'.
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_protolist$B\")) ";
+ Result += "struct _protocol_t *";
+ Result += "_OBJC_LABEL_PROTOCOL_$_"; Result += PDecl->getNameAsString();
+ Result += " = &_OBJC_PROTOCOL_"; Result += PDecl->getNameAsString();
+ Result += ";\n";
+
+ // Mark this protocol as having been generated.
+ if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()))
+ llvm_unreachable("protocol already synthesized");
+
+}
+
+void RewriteModernObjC::RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Protocols,
+ StringRef prefix, StringRef ClassName,
+ std::string &Result) {
+ if (Protocols.empty()) return;
+
+ for (unsigned i = 0; i != Protocols.size(); i++)
+ RewriteObjCProtocolMetaData(Protocols[i], Result);
+
+ // Output the top lovel protocol meta-data for the class.
+ /* struct _objc_protocol_list {
+ struct _objc_protocol_list *next;
+ int protocol_count;
+ struct _objc_protocol *class_protocols[];
+ }
+ */
+ Result += "\n";
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".cat_cls_meth$B\")) ";
+ Result += "static struct {\n";
+ Result += "\tstruct _objc_protocol_list *next;\n";
+ Result += "\tint protocol_count;\n";
+ Result += "\tstruct _objc_protocol *class_protocols[";
+ Result += utostr(Protocols.size());
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += "_PROTOCOLS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t0, ";
+ Result += utostr(Protocols.size());
+ Result += "\n";
+
+ Result += "\t,{&_OBJC_PROTOCOL_";
+ Result += Protocols[0]->getNameAsString();
+ Result += " \n";
+
+ for (unsigned i = 1; i != Protocols.size(); i++) {
+ Result += "\t ,&_OBJC_PROTOCOL_";
+ Result += Protocols[i]->getNameAsString();
+ Result += "\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+/// hasObjCExceptionAttribute - Return true if this class or any super
+/// class has the __objc_exception__ attribute.
+/// FIXME. Move this to ASTContext.cpp as it is also used for IRGen.
+static bool hasObjCExceptionAttribute(ASTContext &Context,
+ const ObjCInterfaceDecl *OID) {
+ if (OID->hasAttr<ObjCExceptionAttr>())
+ return true;
+ if (const ObjCInterfaceDecl *Super = OID->getSuperClass())
+ return hasObjCExceptionAttribute(Context, Super);
+ return false;
+}
+
+void RewriteModernObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+
+ // Explicitly declared @interface's are already synthesized.
+ if (CDecl->isImplicitInterfaceDecl())
+ assert(false &&
+ "Legacy implicit interface rewriting not supported in moder abi");
+
+ WriteModernMetadataDeclarations(Context, Result);
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+
+ for (ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ IVD; IVD = IVD->getNextIvar()) {
+ // Ignore unnamed bit-fields.
+ if (!IVD->getDeclName())
+ continue;
+ IVars.push_back(IVD);
+ }
+
+ Write__ivar_list_t_initializer(*this, Context, Result, IVars,
+ "_OBJC_$_INSTANCE_VARIABLES_",
+ CDecl);
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
+ PropEnd = IDecl->propimpl_end();
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ if (!Getter->isDefined())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ if (!Setter->isDefined())
+ InstanceMethods.push_back(Setter);
+ }
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_$_INSTANCE_METHODS_",
+ IDecl->getNameAsString(), true);
+
+ SmallVector<ObjCMethodDecl *, 32>
+ ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end());
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_$_CLASS_METHODS_",
+ IDecl->getNameAsString(), true);
+
+ // Protocols referenced in class declaration?
+ // Protocol's super protocol list
+ std::vector<ObjCProtocolDecl *> RefedProtocols;
+ const ObjCList<ObjCProtocolDecl> &Protocols = CDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I) {
+ RefedProtocols.push_back(*I);
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ RewriteObjCProtocolMetaData(*I, Result);
+ }
+
+ Write_protocol_list_initializer(Context, Result,
+ RefedProtocols,
+ "_OBJC_CLASS_PROTOCOLS_$_",
+ IDecl->getNameAsString());
+
+ // Protocol's property metadata.
+ std::vector<ObjCPropertyDecl *> ClassProperties;
+ for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(),
+ E = CDecl->prop_end(); I != E; ++I)
+ ClassProperties.push_back(*I);
+
+ Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
+ /* Container */IDecl,
+ "_OBJC_$_PROP_LIST_",
+ CDecl->getNameAsString());
+
+
+ // Data for initializing _class_ro_t metaclass meta-data
+ uint32_t flags = CLS_META;
+ std::string InstanceSize;
+ std::string InstanceStart;
+
+
+ bool classIsHidden = CDecl->getVisibility() == HiddenVisibility;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+
+ if (!CDecl->getSuperClass())
+ // class is root
+ flags |= CLS_ROOT;
+ InstanceSize = "sizeof(struct _class_t)";
+ InstanceStart = InstanceSize;
+ Write__class_ro_t_initializer(Context, Result, flags,
+ InstanceStart, InstanceSize,
+ ClassMethods,
+ 0,
+ 0,
+ 0,
+ "_OBJC_METACLASS_RO_$_",
+ CDecl->getNameAsString());
+
+
+ // Data for initializing _class_ro_t meta-data
+ flags = CLS;
+ if (classIsHidden)
+ flags |= OBJC2_CLS_HIDDEN;
+
+ if (hasObjCExceptionAttribute(*Context, CDecl))
+ flags |= CLS_EXCEPTION;
+
+ if (!CDecl->getSuperClass())
+ // class is root
+ flags |= CLS_ROOT;
+
+ InstanceSize.clear();
+ InstanceStart.clear();
+ if (!ObjCSynthesizedStructs.count(CDecl)) {
+ InstanceSize = "0";
+ InstanceStart = "0";
+ }
+ else {
+ InstanceSize = "sizeof(struct ";
+ InstanceSize += CDecl->getNameAsString();
+ InstanceSize += "_IMPL)";
+
+ ObjCIvarDecl *IVD = CDecl->all_declared_ivar_begin();
+ if (IVD) {
+ RewriteIvarOffsetComputation(IVD, InstanceStart);
+ }
+ else
+ InstanceStart = InstanceSize;
+ }
+ Write__class_ro_t_initializer(Context, Result, flags,
+ InstanceStart, InstanceSize,
+ InstanceMethods,
+ RefedProtocols,
+ IVars,
+ ClassProperties,
+ "_OBJC_CLASS_RO_$_",
+ CDecl->getNameAsString());
+
+ Write_class_t(Context, Result,
+ "OBJC_METACLASS_$_",
+ CDecl, /*metaclass*/true);
+
+ Write_class_t(Context, Result,
+ "OBJC_CLASS_$_",
+ CDecl, /*metaclass*/false);
+
+ if (ImplementationIsNonLazy(IDecl))
+ DefinedNonLazyClasses.push_back(CDecl);
+
+}
+
+void RewriteModernObjC::RewriteClassSetupInitHook(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ if (!ClsDefCount)
+ return;
+ Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n";
+ Result += "__declspec(allocate(\".objc_inithooks$B\")) ";
+ Result += "static void *OBJC_CLASS_SETUP[] = {\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ ObjCImplementationDecl *IDecl = ClassImplementation[i];
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+ Result += "\t(void *)&OBJC_CLASS_SETUP_$_";
+ Result += CDecl->getName(); Result += ",\n";
+ }
+ Result += "};\n";
+}
+
+void RewriteModernObjC::RewriteMetaDataIntoBuffer(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // For each implemented class, write out all its meta data.
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteObjCClassMetaData(ClassImplementation[i], Result);
+
+ RewriteClassSetupInitHook(Result);
+
+ // For each implemented category, write out all its meta data.
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
+
+ RewriteCategorySetupInitHook(Result);
+
+ if (ClsDefCount > 0) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_classlist$B\")) ";
+ Result += "static struct _class_t *L_OBJC_LABEL_CLASS_$ [";
+ Result += llvm::utostr(ClsDefCount); Result += "]";
+ Result +=
+ " __attribute__((used, section (\"__DATA, __objc_classlist,"
+ "regular,no_dead_strip\")))= {\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ Result += "\t&OBJC_CLASS_$_";
+ Result += ClassImplementation[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+
+ if (!DefinedNonLazyClasses.empty()) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_nlclslist$B\")) \n";
+ Result += "static struct _class_t *_OBJC_LABEL_NONLAZY_CLASS_$[] = {\n\t";
+ for (unsigned i = 0, e = DefinedNonLazyClasses.size(); i < e; i++) {
+ Result += "\t&OBJC_CLASS_$_"; Result += DefinedNonLazyClasses[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+ }
+
+ if (CatDefCount > 0) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_catlist$B\")) ";
+ Result += "static struct _category_t *L_OBJC_LABEL_CATEGORY_$ [";
+ Result += llvm::utostr(CatDefCount); Result += "]";
+ Result +=
+ " __attribute__((used, section (\"__DATA, __objc_catlist,"
+ "regular,no_dead_strip\")))= {\n";
+ for (int i = 0; i < CatDefCount; i++) {
+ Result += "\t&_OBJC_$_CATEGORY_";
+ Result +=
+ CategoryImplementation[i]->getClassInterface()->getNameAsString();
+ Result += "_$_";
+ Result += CategoryImplementation[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+
+ if (!DefinedNonLazyCategories.empty()) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_nlcatlist$B\")) \n";
+ Result += "static struct _category_t *_OBJC_LABEL_NONLAZY_CATEGORY_$[] = {\n\t";
+ for (unsigned i = 0, e = DefinedNonLazyCategories.size(); i < e; i++) {
+ Result += "\t&_OBJC_$_CATEGORY_";
+ Result +=
+ DefinedNonLazyCategories[i]->getClassInterface()->getNameAsString();
+ Result += "_$_";
+ Result += DefinedNonLazyCategories[i]->getNameAsString();
+ Result += ",\n";
+ }
+ Result += "};\n";
+ }
+}
+
+void RewriteModernObjC::WriteImageInfo(std::string &Result) {
+ if (LangOpts.MicrosoftExt)
+ Result += "__declspec(allocate(\".objc_imageinfo$B\")) \n";
+
+ Result += "static struct IMAGE_INFO { unsigned version; unsigned flag; } ";
+ // version 0, ObjCABI is 2
+ Result += "_OBJC_IMAGE_INFO = { 0, 2 };\n";
+}
+
+/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
+/// implementation.
+void RewriteModernObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
+ std::string &Result) {
+ WriteModernMetadataDeclarations(Context, Result);
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ // Find category declaration for this implementation.
+ ObjCCategoryDecl *CDecl=0;
+ for (CDecl = ClassDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->getIdentifier() == IDecl->getIdentifier())
+ break;
+
+ std::string FullCategoryName = ClassDecl->getNameAsString();
+ FullCategoryName += "_$_";
+ FullCategoryName += CDecl->getNameAsString();
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
+ PropEnd = IDecl->propimpl_end();
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ InstanceMethods.push_back(Setter);
+ }
+
+ Write_method_list_t_initializer(*this, Context, Result, InstanceMethods,
+ "_OBJC_$_CATEGORY_INSTANCE_METHODS_",
+ FullCategoryName, true);
+
+ SmallVector<ObjCMethodDecl *, 32>
+ ClassMethods(IDecl->classmeth_begin(), IDecl->classmeth_end());
+
+ Write_method_list_t_initializer(*this, Context, Result, ClassMethods,
+ "_OBJC_$_CATEGORY_CLASS_METHODS_",
+ FullCategoryName, true);
+
+ // Protocols referenced in class declaration?
+ // Protocol's super protocol list
+ std::vector<ObjCProtocolDecl *> RefedProtocols;
+ for (ObjCInterfaceDecl::protocol_iterator I = CDecl->protocol_begin(),
+ E = CDecl->protocol_end();
+
+ I != E; ++I) {
+ RefedProtocols.push_back(*I);
+ // Must write out all protocol definitions in current qualifier list,
+ // and in their nested qualifiers before writing out current definition.
+ RewriteObjCProtocolMetaData(*I, Result);
+ }
+
+ Write_protocol_list_initializer(Context, Result,
+ RefedProtocols,
+ "_OBJC_CATEGORY_PROTOCOLS_$_",
+ FullCategoryName);
+
+ // Protocol's property metadata.
+ std::vector<ObjCPropertyDecl *> ClassProperties;
+ for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(),
+ E = CDecl->prop_end(); I != E; ++I)
+ ClassProperties.push_back(*I);
+
+ Write_prop_list_t_initializer(*this, Context, Result, ClassProperties,
+ /* Container */0,
+ "_OBJC_$_PROP_LIST_",
+ FullCategoryName);
+
+ Write_category_t(*this, Context, Result,
+ CDecl,
+ ClassDecl,
+ InstanceMethods,
+ ClassMethods,
+ RefedProtocols,
+ ClassProperties);
+
+ // Determine if this category is also "non-lazy".
+ if (ImplementationIsNonLazy(IDecl))
+ DefinedNonLazyCategories.push_back(CDecl);
+
+}
+
+void RewriteModernObjC::RewriteCategorySetupInitHook(std::string &Result) {
+ int CatDefCount = CategoryImplementation.size();
+ if (!CatDefCount)
+ return;
+ Result += "#pragma section(\".objc_inithooks$B\", long, read, write)\n";
+ Result += "__declspec(allocate(\".objc_inithooks$B\")) ";
+ Result += "static void *OBJC_CATEGORY_SETUP[] = {\n";
+ for (int i = 0; i < CatDefCount; i++) {
+ ObjCCategoryImplDecl *IDecl = CategoryImplementation[i];
+ ObjCCategoryDecl *CatDecl= IDecl->getCategoryDecl();
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ Result += "\t(void *)&OBJC_CATEGORY_SETUP_$_";
+ Result += ClassDecl->getName();
+ Result += "_$_";
+ Result += CatDecl->getName();
+ Result += ",\n";
+ }
+ Result += "};\n";
+}
+
+// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
+/// class methods.
+template<typename MethodIterator>
+void RewriteModernObjC::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) {
+ if (MethodBegin == MethodEnd) return;
+
+ if (!objc_impl_method) {
+ /* struct _objc_method {
+ SEL _cmd;
+ char *method_types;
+ void *_imp;
+ }
+ */
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tSEL _cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ objc_impl_method = true;
+ }
+
+ // Build _objc_method_list for class's methods if needed
+
+ /* struct {
+ struct _objc_method_list *next_method;
+ int method_count;
+ struct _objc_method method_list[];
+ }
+ */
+ unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
+ Result += "\n";
+ if (LangOpts.MicrosoftExt) {
+ if (IsInstanceMethod)
+ Result += "__declspec(allocate(\".inst_meth$B\")) ";
+ else
+ Result += "__declspec(allocate(\".cls_meth$B\")) ";
+ }
+ Result += "static struct {\n";
+ Result += "\tstruct _objc_method_list *next_method;\n";
+ Result += "\tint method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
+ Result += "_METHODS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __";
+ Result += IsInstanceMethod ? "inst" : "cls";
+ Result += "_meth\")))= ";
+ Result += "{\n\t0, " + utostr(NumMethods) + "\n";
+
+ Result += "\t,{{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
+ Result += "\t ,{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+Stmt *RewriteModernObjC::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
+ SourceRange OldRange = IV->getSourceRange();
+ Expr *BaseExpr = IV->getBase();
+
+ // Rewrite the base, but without actually doing replaces.
+ {
+ DisableReplaceStmtScope S(*this);
+ BaseExpr = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(BaseExpr));
+ IV->setBase(BaseExpr);
+ }
+
+ ObjCIvarDecl *D = IV->getDecl();
+
+ Expr *Replacement = IV;
+
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = 0;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Build name of symbol holding ivar offset.
+ std::string IvarOffsetName;
+ WriteInternalIvarName(clsDeclared, D, IvarOffsetName);
+
+ ReferencedIvars[clsDeclared].insert(D);
+
+ // cast offset to "char *".
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->CharTy),
+ CK_BitCast,
+ BaseExpr);
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(IvarOffsetName),
+ Context->UnsignedLongTy, 0, SC_Extern, SC_None);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false,
+ Context->UnsignedLongTy, VK_LValue,
+ SourceLocation());
+ BinaryOperator *addExpr =
+ new (Context) BinaryOperator(castExpr, DRE, BO_Add,
+ Context->getPointerType(Context->CharTy),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(),
+ SourceLocation(),
+ addExpr);
+ QualType IvarT = D->getType();
+ convertObjCTypeToCStyleType(IvarT);
+ QualType castT = Context->getPointerType(IvarT);
+
+ castExpr = NoTypeInfoCStyleCastExpr(Context,
+ castT,
+ CK_BitCast,
+ PE);
+ Expr *Exp = new (Context) UnaryOperator(castExpr, UO_Deref, IvarT,
+ VK_LValue, OK_Ordinary,
+ SourceLocation());
+ PE = new (Context) ParenExpr(OldRange.getBegin(),
+ OldRange.getEnd(),
+ Exp);
+
+ Replacement = PE;
+ }
+
+ ReplaceStmtWithRange(IV, Replacement, OldRange);
+ return Replacement;
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
new file mode 100644
index 0000000..9c0737f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteObjC.cpp
@@ -0,0 +1,6018 @@
+//===--- RewriteObjC.cpp - Playground for the code rewriter ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Hacks and fun related to the code rewriter.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/ASTConsumers.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/AST/AST.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace clang;
+using llvm::utostr;
+
+namespace {
+ class RewriteObjC : public ASTConsumer {
+ protected:
+
+ enum {
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)),
+ block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the
+ __block variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy
+ helpers */
+ BLOCK_BYREF_CALLER = 128, /* called from __block (byref) copy/dispose
+ support routines */
+ BLOCK_BYREF_CURRENT_MAX = 256
+ };
+
+ enum {
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CXX_OBJ = (1 << 26),
+ BLOCK_IS_GC = (1 << 27),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_HAS_DESCRIPTOR = (1 << 29)
+ };
+ static const int OBJC_ABI_VERSION = 7;
+
+ Rewriter Rewrite;
+ DiagnosticsEngine &Diags;
+ const LangOptions &LangOpts;
+ ASTContext *Context;
+ SourceManager *SM;
+ TranslationUnitDecl *TUDecl;
+ FileID MainFileID;
+ const char *MainFileStart, *MainFileEnd;
+ Stmt *CurrentBody;
+ ParentMap *PropParentMap; // created lazily.
+ std::string InFileName;
+ raw_ostream* OutFile;
+ std::string Preamble;
+
+ TypeDecl *ProtocolTypeDecl;
+ VarDecl *GlobalVarDecl;
+ unsigned RewriteFailedDiag;
+ // ObjC string constant support.
+ unsigned NumObjCStringLiterals;
+ VarDecl *ConstantStringClassReference;
+ RecordDecl *NSStringRecord;
+
+ // ObjC foreach break/continue generation support.
+ int BcLabelCount;
+
+ unsigned TryFinallyContainsReturnDiag;
+ // Needed for super.
+ ObjCMethodDecl *CurMethodDef;
+ RecordDecl *SuperStructDecl;
+ RecordDecl *ConstantStringDecl;
+
+ FunctionDecl *MsgSendFunctionDecl;
+ FunctionDecl *MsgSendSuperFunctionDecl;
+ FunctionDecl *MsgSendStretFunctionDecl;
+ FunctionDecl *MsgSendSuperStretFunctionDecl;
+ FunctionDecl *MsgSendFpretFunctionDecl;
+ FunctionDecl *GetClassFunctionDecl;
+ FunctionDecl *GetMetaClassFunctionDecl;
+ FunctionDecl *GetSuperClassFunctionDecl;
+ FunctionDecl *SelGetUidFunctionDecl;
+ FunctionDecl *CFStringFunctionDecl;
+ FunctionDecl *SuperContructorFunctionDecl;
+ FunctionDecl *CurFunctionDef;
+ FunctionDecl *CurFunctionDeclToDeclareForBlock;
+
+ /* Misc. containers needed for meta-data rewrite. */
+ SmallVector<ObjCImplementationDecl *, 8> ClassImplementation;
+ SmallVector<ObjCCategoryImplDecl *, 8> CategoryImplementation;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCSynthesizedStructs;
+ llvm::SmallPtrSet<ObjCProtocolDecl*, 8> ObjCSynthesizedProtocols;
+ llvm::SmallPtrSet<ObjCInterfaceDecl*, 8> ObjCForwardDecls;
+ llvm::DenseMap<ObjCMethodDecl*, std::string> MethodInternalNames;
+ SmallVector<Stmt *, 32> Stmts;
+ SmallVector<int, 8> ObjCBcLabelNo;
+ // Remember all the @protocol(<expr>) expressions.
+ llvm::SmallPtrSet<ObjCProtocolDecl *, 32> ProtocolExprDecls;
+
+ llvm::DenseSet<uint64_t> CopyDestroyCache;
+
+ // Block expressions.
+ SmallVector<BlockExpr *, 32> Blocks;
+ SmallVector<int, 32> InnerDeclRefsCount;
+ SmallVector<DeclRefExpr *, 32> InnerDeclRefs;
+
+ SmallVector<DeclRefExpr *, 32> BlockDeclRefs;
+
+ // Block related declarations.
+ SmallVector<ValueDecl *, 8> BlockByCopyDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByCopyDeclsPtrSet;
+ SmallVector<ValueDecl *, 8> BlockByRefDecls;
+ llvm::SmallPtrSet<ValueDecl *, 8> BlockByRefDeclsPtrSet;
+ llvm::DenseMap<ValueDecl *, unsigned> BlockByRefDeclNo;
+ llvm::SmallPtrSet<ValueDecl *, 8> ImportedBlockDecls;
+ llvm::SmallPtrSet<VarDecl *, 8> ImportedLocalExternalDecls;
+
+ llvm::DenseMap<BlockExpr *, std::string> RewrittenBlockExprs;
+
+ // This maps an original source AST to it's rewritten form. This allows
+ // us to avoid rewriting the same node twice (which is very uncommon).
+ // This is needed to support some of the exotic property rewriting.
+ llvm::DenseMap<Stmt *, Stmt *> ReplacedNodes;
+
+ // Needed for header files being rewritten
+ bool IsHeader;
+ bool SilenceRewriteMacroWarning;
+ bool objc_impl_method;
+
+ bool DisableReplaceStmt;
+ class DisableReplaceStmtScope {
+ RewriteObjC &R;
+ bool SavedValue;
+
+ public:
+ DisableReplaceStmtScope(RewriteObjC &R)
+ : R(R), SavedValue(R.DisableReplaceStmt) {
+ R.DisableReplaceStmt = true;
+ }
+ ~DisableReplaceStmtScope() {
+ R.DisableReplaceStmt = SavedValue;
+ }
+ };
+ void InitializeCommon(ASTContext &context);
+
+ public:
+
+ // Top Level Driver code.
+ virtual bool HandleTopLevelDecl(DeclGroupRef D) {
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*I)) {
+ if (!Class->isThisDeclarationADefinition()) {
+ RewriteForwardClassDecl(D);
+ break;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*I)) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ RewriteForwardProtocolDecl(D);
+ break;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*I);
+ }
+ return true;
+ }
+ void HandleTopLevelSingleDecl(Decl *D);
+ void HandleDeclInMainFile(Decl *D);
+ RewriteObjC(std::string inFile, raw_ostream *OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn);
+
+ ~RewriteObjC() {}
+
+ virtual void HandleTranslationUnit(ASTContext &C);
+
+ void ReplaceStmt(Stmt *Old, Stmt *New) {
+ Stmt *ReplacingStmt = ReplacedNodes[Old];
+
+ if (ReplacingStmt)
+ return; // We can't rewrite the same node twice.
+
+ if (DisableReplaceStmt)
+ return;
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceStmt(Old, New)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void ReplaceStmtWithRange(Stmt *Old, Stmt *New, SourceRange SrcRange) {
+ if (DisableReplaceStmt)
+ return;
+
+ // Measure the old text.
+ int Size = Rewrite.getRangeSize(SrcRange);
+ if (Size == -1) {
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ return;
+ }
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ New->printPretty(S, *Context, 0, PrintingPolicy(LangOpts));
+ const std::string &Str = S.str();
+
+ // If replacement succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(SrcRange.getBegin(), Size, Str)) {
+ ReplacedNodes[Old] = New;
+ return;
+ }
+ if (SilenceRewriteMacroWarning)
+ return;
+ Diags.Report(Context->getFullLoc(Old->getLocStart()), RewriteFailedDiag)
+ << Old->getSourceRange();
+ }
+
+ void InsertText(SourceLocation Loc, StringRef Str,
+ bool InsertAfter = true) {
+ // If insertion succeeded or warning disabled return with no warning.
+ if (!Rewrite.InsertText(Loc, Str, InsertAfter) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Loc), RewriteFailedDiag);
+ }
+
+ void ReplaceText(SourceLocation Start, unsigned OrigLength,
+ StringRef Str) {
+ // If removal succeeded or warning disabled return with no warning.
+ if (!Rewrite.ReplaceText(Start, OrigLength, Str) ||
+ SilenceRewriteMacroWarning)
+ return;
+
+ Diags.Report(Context->getFullLoc(Start), RewriteFailedDiag);
+ }
+
+ // Syntactic Rewriting.
+ void RewriteRecordBody(RecordDecl *RD);
+ void RewriteInclude();
+ void RewriteForwardClassDecl(DeclGroupRef D);
+ void RewriteForwardClassDecl(const llvm::SmallVector<Decl*, 8> &DG);
+ void RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString);
+ void RewriteImplementations();
+ void RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID);
+ void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
+ void RewriteImplementationDecl(Decl *Dcl);
+ void RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *MDecl, std::string &ResultStr);
+ void RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType);
+ void RewriteByRefString(std::string &ResultStr, const std::string &Name,
+ ValueDecl *VD, bool def=false);
+ void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
+ void RewriteProtocolDecl(ObjCProtocolDecl *Dcl);
+ void RewriteForwardProtocolDecl(DeclGroupRef D);
+ void RewriteForwardProtocolDecl(const llvm::SmallVector<Decl*, 8> &DG);
+ void RewriteMethodDeclaration(ObjCMethodDecl *Method);
+ void RewriteProperty(ObjCPropertyDecl *prop);
+ void RewriteFunctionDecl(FunctionDecl *FD);
+ void RewriteBlockPointerType(std::string& Str, QualType Type);
+ void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD);
+ void RewriteBlockLiteralFunctionDecl(FunctionDecl *FD);
+ void RewriteObjCQualifiedInterfaceTypes(Decl *Dcl);
+ void RewriteTypeOfDecl(VarDecl *VD);
+ void RewriteObjCQualifiedInterfaceTypes(Expr *E);
+
+ // Expression Rewriting.
+ Stmt *RewriteFunctionBodyOrGlobalInitializer(Stmt *S);
+ Stmt *RewriteAtEncode(ObjCEncodeExpr *Exp);
+ Stmt *RewritePropertyOrImplicitGetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewritePropertyOrImplicitSetter(PseudoObjectExpr *Pseudo);
+ Stmt *RewriteAtSelector(ObjCSelectorExpr *Exp);
+ Stmt *RewriteMessageExpr(ObjCMessageExpr *Exp);
+ Stmt *RewriteObjCStringLiteral(ObjCStringLiteral *Exp);
+ Stmt *RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp);
+ void RewriteTryReturnStmts(Stmt *S);
+ void RewriteSyncReturnStmts(Stmt *S, std::string buf);
+ Stmt *RewriteObjCTryStmt(ObjCAtTryStmt *S);
+ Stmt *RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S);
+ Stmt *RewriteObjCThrowStmt(ObjCAtThrowStmt *S);
+ Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd);
+ Stmt *RewriteBreakStmt(BreakStmt *S);
+ Stmt *RewriteContinueStmt(ContinueStmt *S);
+ void RewriteCastExpr(CStyleCastExpr *CE);
+
+ // Block rewriting.
+ void RewriteBlocksInFunctionProtoType(QualType funcType, NamedDecl *D);
+
+ // Block specific rewrite rules.
+ void RewriteBlockPointerDecl(NamedDecl *VD);
+ void RewriteByRefVar(VarDecl *VD);
+ Stmt *RewriteBlockDeclRefExpr(DeclRefExpr *VD);
+ Stmt *RewriteLocalVariableExternalStorage(DeclRefExpr *DRE);
+ void RewriteBlockPointerFunctionArgs(FunctionDecl *FD);
+
+ void RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result);
+
+ virtual void Initialize(ASTContext &context) = 0;
+
+ // Metadata Rewriting.
+ virtual void RewriteMetaDataIntoBuffer(std::string &Result) = 0;
+ virtual void RewriteObjCProtocolListMetaData(const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) = 0;
+ virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result) = 0;
+ virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) = 0;
+ virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) = 0;
+
+ // Rewriting ivar access
+ virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) = 0;
+ virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) = 0;
+
+ // Misc. AST transformation routines. Somtimes they end up calling
+ // rewriting routines on the new ASTs.
+ CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
+ Expr **args, unsigned nargs,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+
+ void SynthCountByEnumWithState(std::string &buf);
+ void SynthMsgSendFunctionDecl();
+ void SynthMsgSendSuperFunctionDecl();
+ void SynthMsgSendStretFunctionDecl();
+ void SynthMsgSendFpretFunctionDecl();
+ void SynthMsgSendSuperStretFunctionDecl();
+ void SynthGetClassFunctionDecl();
+ void SynthGetMetaClassFunctionDecl();
+ void SynthGetSuperClassFunctionDecl();
+ void SynthSelGetUidFunctionDecl();
+ void SynthSuperContructorFunctionDecl();
+
+ std::string SynthesizeByrefCopyDestroyHelper(VarDecl *VD, int flag);
+ std::string SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName, std::string Tag);
+ std::string SynthesizeBlockImpl(BlockExpr *CE,
+ std::string Tag, std::string Desc);
+ std::string SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag,
+ int i, StringRef funcName,
+ unsigned hasCopy);
+ Stmt *SynthesizeBlockCall(CallExpr *Exp, const Expr* BlockExp);
+ void SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName);
+ FunctionDecl *SynthBlockInitFunctionDecl(StringRef name);
+ Stmt *SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs);
+
+ // Misc. helper routines.
+ QualType getProtocolType();
+ void WarnAboutReturnGotoStmts(Stmt *S);
+ void HasReturnStmts(Stmt *S, bool &hasReturns);
+ void CheckFunctionPointerDecl(QualType dType, NamedDecl *ND);
+ void InsertBlockLiteralsWithinFunction(FunctionDecl *FD);
+ void InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD);
+
+ bool IsDeclStmtInForeachHeader(DeclStmt *DS);
+ void CollectBlockDeclRefInfo(BlockExpr *Exp);
+ void GetBlockDeclRefExprs(Stmt *S);
+ void GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs,
+ llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts);
+
+ // We avoid calling Type::isBlockPointerType(), since it operates on the
+ // canonical type. We only care if the top-level type is a closure pointer.
+ bool isTopLevelBlockPointerType(QualType T) {
+ return isa<BlockPointerType>(T);
+ }
+
+ /// convertBlockPointerToFunctionPointer - Converts a block-pointer type
+ /// to a function pointer type and upon success, returns true; false
+ /// otherwise.
+ bool convertBlockPointerToFunctionPointer(QualType &T) {
+ if (isTopLevelBlockPointerType(T)) {
+ const BlockPointerType *BPT = T->getAs<BlockPointerType>();
+ T = Context->getPointerType(BPT->getPointeeType());
+ return true;
+ }
+ return false;
+ }
+
+ bool needToScanForQualifiers(QualType T);
+ QualType getSuperStructType();
+ QualType getConstantStringStructType();
+ QualType convertFunctionTypeOfBlocks(const FunctionType *FT);
+ bool BufferContainsPPDirectives(const char *startBuf, const char *endBuf);
+
+ void convertToUnqualifiedObjCType(QualType &T) {
+ if (T->isObjCQualifiedIdType())
+ T = Context->getObjCIdType();
+ else if (T->isObjCQualifiedClassType())
+ T = Context->getObjCClassType();
+ else if (T->isObjCObjectPointerType() &&
+ T->getPointeeType()->isObjCQualifiedInterfaceType()) {
+ if (const ObjCObjectPointerType * OBJPT =
+ T->getAsObjCInterfacePointerType()) {
+ const ObjCInterfaceType *IFaceT = OBJPT->getInterfaceType();
+ T = QualType(IFaceT, 0);
+ T = Context->getPointerType(T);
+ }
+ }
+ }
+
+ // FIXME: This predicate seems like it would be useful to add to ASTContext.
+ bool isObjCType(QualType T) {
+ if (!LangOpts.ObjC1 && !LangOpts.ObjC2)
+ return false;
+
+ QualType OCT = Context->getCanonicalType(T).getUnqualifiedType();
+
+ if (OCT == Context->getCanonicalType(Context->getObjCIdType()) ||
+ OCT == Context->getCanonicalType(Context->getObjCClassType()))
+ return true;
+
+ if (const PointerType *PT = OCT->getAs<PointerType>()) {
+ if (isa<ObjCInterfaceType>(PT->getPointeeType()) ||
+ PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ return false;
+ }
+ bool PointerTypeTakesAnyBlockArguments(QualType QT);
+ bool PointerTypeTakesAnyObjCQualifiedType(QualType QT);
+ void GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen);
+
+ void QuoteDoublequotes(std::string &From, std::string &To) {
+ for (unsigned i = 0; i < From.length(); i++) {
+ if (From[i] == '"')
+ To += "\\\"";
+ else
+ To += From[i];
+ }
+ }
+
+ QualType getSimpleFunctionType(QualType result,
+ const QualType *args,
+ unsigned numArgs,
+ bool variadic = false) {
+ if (result == Context->getObjCInstanceType())
+ result = Context->getObjCIdType();
+ FunctionProtoType::ExtProtoInfo fpi;
+ fpi.Variadic = variadic;
+ return Context->getFunctionType(result, args, numArgs, fpi);
+ }
+
+ // Helper function: create a CStyleCastExpr with trivial type source info.
+ CStyleCastExpr* NoTypeInfoCStyleCastExpr(ASTContext *Ctx, QualType Ty,
+ CastKind Kind, Expr *E) {
+ TypeSourceInfo *TInfo = Ctx->getTrivialTypeSourceInfo(Ty, SourceLocation());
+ return CStyleCastExpr::Create(*Ctx, Ty, VK_RValue, Kind, E, 0, TInfo,
+ SourceLocation(), SourceLocation());
+ }
+ };
+
+ class RewriteObjCFragileABI : public RewriteObjC {
+ public:
+
+ RewriteObjCFragileABI(std::string inFile, raw_ostream *OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn) : RewriteObjC(inFile, OS,
+ D, LOpts,
+ silenceMacroWarn) {}
+
+ ~RewriteObjCFragileABI() {}
+ virtual void Initialize(ASTContext &context);
+
+ // Rewriting metadata
+ template<typename MethodIterator>
+ void RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result);
+ virtual void RewriteObjCProtocolMetaData(ObjCProtocolDecl *Protocol,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result);
+ virtual void RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Prots,
+ StringRef prefix, StringRef ClassName, std::string &Result);
+ virtual void RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result);
+ virtual void RewriteMetaDataIntoBuffer(std::string &Result);
+ virtual void RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *CDecl,
+ std::string &Result);
+
+ // Rewriting ivar
+ virtual void RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result);
+ virtual Stmt *RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV);
+ };
+}
+
+void RewriteObjC::RewriteBlocksInFunctionProtoType(QualType funcType,
+ NamedDecl *D) {
+ if (const FunctionProtoType *fproto
+ = dyn_cast<FunctionProtoType>(funcType.IgnoreParens())) {
+ for (FunctionProtoType::arg_type_iterator I = fproto->arg_type_begin(),
+ E = fproto->arg_type_end(); I && (I != E); ++I)
+ if (isTopLevelBlockPointerType(*I)) {
+ // All the args are checked/rewritten. Don't call twice!
+ RewriteBlockPointerDecl(D);
+ break;
+ }
+ }
+}
+
+void RewriteObjC::CheckFunctionPointerDecl(QualType funcType, NamedDecl *ND) {
+ const PointerType *PT = funcType->getAs<PointerType>();
+ if (PT && PointerTypeTakesAnyBlockArguments(funcType))
+ RewriteBlocksInFunctionProtoType(PT->getPointeeType(), ND);
+}
+
+static bool IsHeaderFile(const std::string &Filename) {
+ std::string::size_type DotPos = Filename.rfind('.');
+
+ if (DotPos == std::string::npos) {
+ // no file extension
+ return false;
+ }
+
+ std::string Ext = std::string(Filename.begin()+DotPos+1, Filename.end());
+ // C header: .h
+ // C++ header: .hh or .H;
+ return Ext == "h" || Ext == "hh" || Ext == "H";
+}
+
+RewriteObjC::RewriteObjC(std::string inFile, raw_ostream* OS,
+ DiagnosticsEngine &D, const LangOptions &LOpts,
+ bool silenceMacroWarn)
+ : Diags(D), LangOpts(LOpts), InFileName(inFile), OutFile(OS),
+ SilenceRewriteMacroWarning(silenceMacroWarn) {
+ IsHeader = IsHeaderFile(inFile);
+ RewriteFailedDiag = Diags.getCustomDiagID(DiagnosticsEngine::Warning,
+ "rewriting sub-expression within a macro (may not be correct)");
+ TryFinallyContainsReturnDiag = Diags.getCustomDiagID(
+ DiagnosticsEngine::Warning,
+ "rewriter doesn't support user-specified control flow semantics "
+ "for @try/@finally (code may not execute properly)");
+}
+
+ASTConsumer *clang::CreateObjCRewriter(const std::string& InFile,
+ raw_ostream* OS,
+ DiagnosticsEngine &Diags,
+ const LangOptions &LOpts,
+ bool SilenceRewriteMacroWarning) {
+ return new RewriteObjCFragileABI(InFile, OS, Diags, LOpts, SilenceRewriteMacroWarning);
+}
+
+void RewriteObjC::InitializeCommon(ASTContext &context) {
+ Context = &context;
+ SM = &Context->getSourceManager();
+ TUDecl = Context->getTranslationUnitDecl();
+ MsgSendFunctionDecl = 0;
+ MsgSendSuperFunctionDecl = 0;
+ MsgSendStretFunctionDecl = 0;
+ MsgSendSuperStretFunctionDecl = 0;
+ MsgSendFpretFunctionDecl = 0;
+ GetClassFunctionDecl = 0;
+ GetMetaClassFunctionDecl = 0;
+ GetSuperClassFunctionDecl = 0;
+ SelGetUidFunctionDecl = 0;
+ CFStringFunctionDecl = 0;
+ ConstantStringClassReference = 0;
+ NSStringRecord = 0;
+ CurMethodDef = 0;
+ CurFunctionDef = 0;
+ CurFunctionDeclToDeclareForBlock = 0;
+ GlobalVarDecl = 0;
+ SuperStructDecl = 0;
+ ProtocolTypeDecl = 0;
+ ConstantStringDecl = 0;
+ BcLabelCount = 0;
+ SuperContructorFunctionDecl = 0;
+ NumObjCStringLiterals = 0;
+ PropParentMap = 0;
+ CurrentBody = 0;
+ DisableReplaceStmt = false;
+ objc_impl_method = false;
+
+ // Get the ID and start/end of the main file.
+ MainFileID = SM->getMainFileID();
+ const llvm::MemoryBuffer *MainBuf = SM->getBuffer(MainFileID);
+ MainFileStart = MainBuf->getBufferStart();
+ MainFileEnd = MainBuf->getBufferEnd();
+
+ Rewrite.setSourceMgr(Context->getSourceManager(), Context->getLangOpts());
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Driver Code
+//===----------------------------------------------------------------------===//
+
+void RewriteObjC::HandleTopLevelSingleDecl(Decl *D) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ // Two cases: either the decl could be in the main file, or it could be in a
+ // #included file. If the former, rewrite it now. If the later, check to see
+ // if we rewrote the #include/#import.
+ SourceLocation Loc = D->getLocation();
+ Loc = SM->getExpansionLoc(Loc);
+
+ // If this is for a builtin, ignore it.
+ if (Loc.isInvalid()) return;
+
+ // Look for built-in declarations that we need to refer during the rewrite.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ RewriteFunctionDecl(FD);
+ } else if (VarDecl *FVD = dyn_cast<VarDecl>(D)) {
+ // declared in <Foundation/NSString.h>
+ if (FVD->getName() == "_NSConstantStringClassReference") {
+ ConstantStringClassReference = FVD;
+ return;
+ }
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ if (ID->isThisDeclarationADefinition())
+ RewriteInterfaceDecl(ID);
+ } else if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(D)) {
+ RewriteCategoryDecl(CD);
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ if (PD->isThisDeclarationADefinition())
+ RewriteProtocolDecl(PD);
+ } else if (LinkageSpecDecl *LSD = dyn_cast<LinkageSpecDecl>(D)) {
+ // Recurse into linkage specifications
+ for (DeclContext::decl_iterator DI = LSD->decls_begin(),
+ DIEnd = LSD->decls_end();
+ DI != DIEnd; ) {
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>((*DI))) {
+ if (!IFace->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = IFace->getLocStart();
+ do {
+ if (isa<ObjCInterfaceDecl>(*DI) &&
+ !cast<ObjCInterfaceDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardClassDecl(DG);
+ continue;
+ }
+ }
+
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>((*DI))) {
+ if (!Proto->isThisDeclarationADefinition()) {
+ SmallVector<Decl *, 8> DG;
+ SourceLocation StartLoc = Proto->getLocStart();
+ do {
+ if (isa<ObjCProtocolDecl>(*DI) &&
+ !cast<ObjCProtocolDecl>(*DI)->isThisDeclarationADefinition() &&
+ StartLoc == (*DI)->getLocStart())
+ DG.push_back(*DI);
+ else
+ break;
+
+ ++DI;
+ } while (DI != DIEnd);
+ RewriteForwardProtocolDecl(DG);
+ continue;
+ }
+ }
+
+ HandleTopLevelSingleDecl(*DI);
+ ++DI;
+ }
+ }
+ // If we have a decl in the main file, see if we should rewrite it.
+ if (SM->isFromMainFile(Loc))
+ return HandleDeclInMainFile(D);
+}
+
+//===----------------------------------------------------------------------===//
+// Syntactic (non-AST) Rewriting Code
+//===----------------------------------------------------------------------===//
+
+void RewriteObjC::RewriteInclude() {
+ SourceLocation LocStart = SM->getLocForStartOfFile(MainFileID);
+ StringRef MainBuf = SM->getBufferData(MainFileID);
+ const char *MainBufStart = MainBuf.begin();
+ const char *MainBufEnd = MainBuf.end();
+ size_t ImportLen = strlen("import");
+
+ // Loop over the whole file, looking for includes.
+ for (const char *BufPtr = MainBufStart; BufPtr < MainBufEnd; ++BufPtr) {
+ if (*BufPtr == '#') {
+ if (++BufPtr == MainBufEnd)
+ return;
+ while (*BufPtr == ' ' || *BufPtr == '\t')
+ if (++BufPtr == MainBufEnd)
+ return;
+ if (!strncmp(BufPtr, "import", ImportLen)) {
+ // replace import with include
+ SourceLocation ImportLoc =
+ LocStart.getLocWithOffset(BufPtr-MainBufStart);
+ ReplaceText(ImportLoc, ImportLen, "include");
+ BufPtr += ImportLen;
+ }
+ }
+ }
+}
+
+static std::string getIvarAccessString(ObjCIvarDecl *OID) {
+ const ObjCInterfaceDecl *ClassDecl = OID->getContainingInterface();
+ std::string S;
+ S = "((struct ";
+ S += ClassDecl->getIdentifier()->getName();
+ S += "_IMPL *)self)->";
+ S += OID->getName();
+ return S;
+}
+
+void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
+ ObjCImplementationDecl *IMD,
+ ObjCCategoryImplDecl *CID) {
+ static bool objcGetPropertyDefined = false;
+ static bool objcSetPropertyDefined = false;
+ SourceLocation startLoc = PID->getLocStart();
+ InsertText(startLoc, "// ");
+ const char *startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @synthesize location");
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@synthesize: can't find ';'");
+ SourceLocation onePastSemiLoc =
+ startLoc.getLocWithOffset(semiBuf-startBuf+1);
+
+ if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ return; // FIXME: is this correct?
+
+ // Generate the 'getter' function.
+ ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ ObjCIvarDecl *OID = PID->getPropertyIvarDecl();
+
+ if (!OID)
+ return;
+ unsigned Attributes = PD->getPropertyAttributes();
+ if (!PD->getGetterMethodDecl()->isDefined()) {
+ bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy));
+ std::string Getr;
+ if (GenGetProperty && !objcGetPropertyDefined) {
+ objcGetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Getr = "\nextern \"C\" __declspec(dllimport) "
+ "id objc_getProperty(id, SEL, long, bool);\n";
+ }
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getGetterMethodDecl(), Getr);
+ Getr += "{ ";
+ // Synthesize an explicit cast to gain access to the ivar.
+ // See objc-act.c:objc_synthesize_new_getter() for details.
+ if (GenGetProperty) {
+ // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
+ Getr += "typedef ";
+ const FunctionType *FPRetType = 0;
+ RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr,
+ FPRetType);
+ Getr += " _TYPE";
+ if (FPRetType) {
+ Getr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)){
+ Getr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) Getr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->getPrintingPolicy());
+ Getr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) Getr += ", ";
+ Getr += "...";
+ }
+ Getr += ")";
+ } else
+ Getr += "()";
+ }
+ Getr += ";\n";
+ Getr += "return (_TYPE)";
+ Getr += "objc_getProperty(self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Getr);
+ Getr += ", 1)";
+ }
+ else
+ Getr += "return " + getIvarAccessString(OID);
+ Getr += "; }";
+ InsertText(onePastSemiLoc, Getr);
+ }
+
+ if (PD->isReadOnly() || PD->getSetterMethodDecl()->isDefined())
+ return;
+
+ // Generate the 'setter' function.
+ std::string Setr;
+ bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy);
+ if (GenSetProperty && !objcSetPropertyDefined) {
+ objcSetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Setr = "\nextern \"C\" __declspec(dllimport) "
+ "void objc_setProperty (id, SEL, long, id, bool, bool);\n";
+ }
+
+ RewriteObjCMethodDecl(OID->getContainingInterface(),
+ PD->getSetterMethodDecl(), Setr);
+ Setr += "{ ";
+ // Synthesize an explicit cast to initialize the ivar.
+ // See objc-act.c:objc_synthesize_new_setter() for details.
+ if (GenSetProperty) {
+ Setr += "objc_setProperty (self, _cmd, ";
+ RewriteIvarOffsetComputation(OID, Setr);
+ Setr += ", (id)";
+ Setr += PD->getName();
+ Setr += ", ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ Setr += "0, ";
+ else
+ Setr += "1, ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ Setr += "1)";
+ else
+ Setr += "0)";
+ }
+ else {
+ Setr += getIvarAccessString(OID) + " = ";
+ Setr += PD->getName();
+ }
+ Setr += "; }";
+ InsertText(onePastSemiLoc, Setr);
+}
+
+static void RewriteOneForwardClassDecl(ObjCInterfaceDecl *ForwardDecl,
+ std::string &typedefString) {
+ typedefString += "#ifndef _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "#define _REWRITER_typedef_";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += "\n";
+ typedefString += "typedef struct objc_object ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n#endif\n";
+}
+
+void RewriteObjC::RewriteForwardClassEpilogue(ObjCInterfaceDecl *ClassDecl,
+ const std::string &typedefString) {
+ SourceLocation startLoc = ClassDecl->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *semiPtr = strchr(startBuf, ';');
+ // Replace the @class with typedefs corresponding to the classes.
+ ReplaceText(startLoc, semiPtr-startBuf+1, typedefString);
+}
+
+void RewriteObjC::RewriteForwardClassDecl(DeclGroupRef D) {
+ std::string typedefString;
+ for (DeclGroupRef::iterator I = D.begin(), E = D.end(); I != E; ++I) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(*I);
+ if (I == D.begin()) {
+ // Translate to typedef's that forward reference structs with the same name
+ // as the class. As a convenience, we include the original declaration
+ // as a comment.
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ DeclGroupRef::iterator I = D.begin();
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(*I), typedefString);
+}
+
+void RewriteObjC::RewriteForwardClassDecl(
+ const llvm::SmallVector<Decl*, 8> &D) {
+ std::string typedefString;
+ for (unsigned i = 0; i < D.size(); i++) {
+ ObjCInterfaceDecl *ForwardDecl = cast<ObjCInterfaceDecl>(D[i]);
+ if (i == 0) {
+ typedefString += "// @class ";
+ typedefString += ForwardDecl->getNameAsString();
+ typedefString += ";\n";
+ }
+ RewriteOneForwardClassDecl(ForwardDecl, typedefString);
+ }
+ RewriteForwardClassEpilogue(cast<ObjCInterfaceDecl>(D[0]), typedefString);
+}
+
+void RewriteObjC::RewriteMethodDeclaration(ObjCMethodDecl *Method) {
+ // When method is a synthesized one, such as a getter/setter there is
+ // nothing to rewrite.
+ if (Method->isImplicit())
+ return;
+ SourceLocation LocStart = Method->getLocStart();
+ SourceLocation LocEnd = Method->getLocEnd();
+
+ if (SM->getExpansionLineNumber(LocEnd) >
+ SM->getExpansionLineNumber(LocStart)) {
+ InsertText(LocStart, "#if 0\n");
+ ReplaceText(LocEnd, 1, ";\n#endif\n");
+ } else {
+ InsertText(LocStart, "// ");
+ }
+}
+
+void RewriteObjC::RewriteProperty(ObjCPropertyDecl *prop) {
+ SourceLocation Loc = prop->getAtLoc();
+
+ ReplaceText(Loc, 0, "// ");
+ // FIXME: handle properties that are declared across multiple lines.
+}
+
+void RewriteObjC::RewriteCategoryDecl(ObjCCategoryDecl *CatDecl) {
+ SourceLocation LocStart = CatDecl->getLocStart();
+
+ // FIXME: handle category headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+
+ for (ObjCCategoryDecl::prop_iterator I = CatDecl->prop_begin(),
+ E = CatDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+
+ for (ObjCCategoryDecl::instmeth_iterator
+ I = CatDecl->instmeth_begin(), E = CatDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCCategoryDecl::classmeth_iterator
+ I = CatDecl->classmeth_begin(), E = CatDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(CatDecl->getAtEndRange().getBegin(),
+ strlen("@end"), "/* @end */");
+}
+
+void RewriteObjC::RewriteProtocolDecl(ObjCProtocolDecl *PDecl) {
+ SourceLocation LocStart = PDecl->getLocStart();
+ assert(PDecl->isThisDeclarationADefinition());
+
+ // FIXME: handle protocol headers that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ for (ObjCInterfaceDecl::prop_iterator I = PDecl->prop_begin(),
+ E = PDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+
+ // Lastly, comment out the @end.
+ SourceLocation LocEnd = PDecl->getAtEndRange().getBegin();
+ ReplaceText(LocEnd, strlen("@end"), "/* @end */");
+
+ // Must comment out @optional/@required
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ for (const char *p = startBuf; p < endBuf; p++) {
+ if (*p == '@' && !strncmp(p+1, "optional", strlen("optional"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@optional"), "/* @optional */");
+
+ }
+ else if (*p == '@' && !strncmp(p+1, "required", strlen("required"))) {
+ SourceLocation OptionalLoc = LocStart.getLocWithOffset(p-startBuf);
+ ReplaceText(OptionalLoc, strlen("@required"), "/* @required */");
+
+ }
+ }
+}
+
+void RewriteObjC::RewriteForwardProtocolDecl(DeclGroupRef D) {
+ SourceLocation LocStart = (*D.begin())->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void
+RewriteObjC::RewriteForwardProtocolDecl(const llvm::SmallVector<Decl*, 8> &DG) {
+ SourceLocation LocStart = DG[0]->getLocStart();
+ if (LocStart.isInvalid())
+ llvm_unreachable("Invalid SourceLocation");
+ // FIXME: handle forward protocol that are declared across multiple lines.
+ ReplaceText(LocStart, 0, "// ");
+}
+
+void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType) {
+ if (T->isObjCQualifiedIdType())
+ ResultStr += "id";
+ else if (T->isFunctionPointerType() ||
+ T->isBlockPointerType()) {
+ // needs special handling, since pointer-to-functions have special
+ // syntax (where a decaration models use).
+ QualType retType = T;
+ QualType PointeeTy;
+ if (const PointerType* PT = retType->getAs<PointerType>())
+ PointeeTy = PT->getPointeeType();
+ else if (const BlockPointerType *BPT = retType->getAs<BlockPointerType>())
+ PointeeTy = BPT->getPointeeType();
+ if ((FPRetType = PointeeTy->getAs<FunctionType>())) {
+ ResultStr += FPRetType->getResultType().getAsString(
+ Context->getPrintingPolicy());
+ ResultStr += "(*";
+ }
+ } else
+ ResultStr += T.getAsString(Context->getPrintingPolicy());
+}
+
+void RewriteObjC::RewriteObjCMethodDecl(const ObjCInterfaceDecl *IDecl,
+ ObjCMethodDecl *OMD,
+ std::string &ResultStr) {
+ //fprintf(stderr,"In RewriteObjCMethodDecl\n");
+ const FunctionType *FPRetType = 0;
+ ResultStr += "\nstatic ";
+ RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType);
+ ResultStr += " ";
+
+ // Unique method name
+ std::string NameStr;
+
+ if (OMD->isInstanceMethod())
+ NameStr += "_I_";
+ else
+ NameStr += "_C_";
+
+ NameStr += IDecl->getNameAsString();
+ NameStr += "_";
+
+ if (ObjCCategoryImplDecl *CID =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
+ NameStr += CID->getNameAsString();
+ NameStr += "_";
+ }
+ // Append selector names, replacing ':' with '_'
+ {
+ std::string selString = OMD->getSelector().getAsString();
+ int len = selString.size();
+ for (int i = 0; i < len; i++)
+ if (selString[i] == ':')
+ selString[i] = '_';
+ NameStr += selString;
+ }
+ // Remember this name for metadata emission
+ MethodInternalNames[OMD] = NameStr;
+ ResultStr += NameStr;
+
+ // Rewrite arguments
+ ResultStr += "(";
+
+ // invisible arguments
+ if (OMD->isInstanceMethod()) {
+ QualType selfTy = Context->getObjCInterfaceType(IDecl);
+ selfTy = Context->getPointerType(selfTy);
+ if (!LangOpts.MicrosoftExt) {
+ if (ObjCSynthesizedStructs.count(const_cast<ObjCInterfaceDecl*>(IDecl)))
+ ResultStr += "struct ";
+ }
+ // When rewriting for Microsoft, explicitly omit the structure name.
+ ResultStr += IDecl->getNameAsString();
+ ResultStr += " *";
+ }
+ else
+ ResultStr += Context->getObjCClassType().getAsString(
+ Context->getPrintingPolicy());
+
+ ResultStr += " self, ";
+ ResultStr += Context->getObjCSelType().getAsString(Context->getPrintingPolicy());
+ ResultStr += " _cmd";
+
+ // Method arguments.
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ ParmVarDecl *PDecl = *PI;
+ ResultStr += ", ";
+ if (PDecl->getType()->isObjCQualifiedIdType()) {
+ ResultStr += "id ";
+ ResultStr += PDecl->getNameAsString();
+ } else {
+ std::string Name = PDecl->getNameAsString();
+ QualType QT = PDecl->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ ResultStr += Name;
+ }
+ }
+ if (OMD->isVariadic())
+ ResultStr += ", ...";
+ ResultStr += ") ";
+
+ if (FPRetType) {
+ ResultStr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
+ ResultStr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) ResultStr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString(
+ Context->getPrintingPolicy());
+ ResultStr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) ResultStr += ", ";
+ ResultStr += "...";
+ }
+ ResultStr += ")";
+ } else {
+ ResultStr += "()";
+ }
+ }
+}
+void RewriteObjC::RewriteImplementationDecl(Decl *OID) {
+ ObjCImplementationDecl *IMD = dyn_cast<ObjCImplementationDecl>(OID);
+ ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(OID);
+
+ InsertText(IMD ? IMD->getLocStart() : CID->getLocStart(), "// ");
+
+ for (ObjCCategoryImplDecl::instmeth_iterator
+ I = IMD ? IMD->instmeth_begin() : CID->instmeth_begin(),
+ E = IMD ? IMD->instmeth_end() : CID->instmeth_end();
+ I != E; ++I) {
+ std::string ResultStr;
+ ObjCMethodDecl *OMD = *I;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+
+ for (ObjCCategoryImplDecl::classmeth_iterator
+ I = IMD ? IMD->classmeth_begin() : CID->classmeth_begin(),
+ E = IMD ? IMD->classmeth_end() : CID->classmeth_end();
+ I != E; ++I) {
+ std::string ResultStr;
+ ObjCMethodDecl *OMD = *I;
+ RewriteObjCMethodDecl(OMD->getClassInterface(), OMD, ResultStr);
+ SourceLocation LocStart = OMD->getLocStart();
+ SourceLocation LocEnd = OMD->getCompoundBody()->getLocStart();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ ReplaceText(LocStart, endBuf-startBuf, ResultStr);
+ }
+ for (ObjCCategoryImplDecl::propimpl_iterator
+ I = IMD ? IMD->propimpl_begin() : CID->propimpl_begin(),
+ E = IMD ? IMD->propimpl_end() : CID->propimpl_end();
+ I != E; ++I) {
+ RewritePropertyImplDecl(*I, IMD, CID);
+ }
+
+ InsertText(IMD ? IMD->getLocEnd() : CID->getLocEnd(), "// ");
+}
+
+void RewriteObjC::RewriteInterfaceDecl(ObjCInterfaceDecl *ClassDecl) {
+ std::string ResultStr;
+ if (!ObjCForwardDecls.count(ClassDecl->getCanonicalDecl())) {
+ // we haven't seen a forward decl - generate a typedef.
+ ResultStr = "#ifndef _REWRITER_typedef_";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += "\n";
+ ResultStr += "#define _REWRITER_typedef_";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += "\n";
+ ResultStr += "typedef struct objc_object ";
+ ResultStr += ClassDecl->getNameAsString();
+ ResultStr += ";\n#endif\n";
+ // Mark this typedef as having been generated.
+ ObjCForwardDecls.insert(ClassDecl->getCanonicalDecl());
+ }
+ RewriteObjCInternalStruct(ClassDecl, ResultStr);
+
+ for (ObjCInterfaceDecl::prop_iterator I = ClassDecl->prop_begin(),
+ E = ClassDecl->prop_end(); I != E; ++I)
+ RewriteProperty(*I);
+ for (ObjCInterfaceDecl::instmeth_iterator
+ I = ClassDecl->instmeth_begin(), E = ClassDecl->instmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+ for (ObjCInterfaceDecl::classmeth_iterator
+ I = ClassDecl->classmeth_begin(), E = ClassDecl->classmeth_end();
+ I != E; ++I)
+ RewriteMethodDeclaration(*I);
+
+ // Lastly, comment out the @end.
+ ReplaceText(ClassDecl->getAtEndRange().getBegin(), strlen("@end"),
+ "/* @end */");
+}
+
+Stmt *RewriteObjC::RewritePropertyOrImplicitSetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getSemanticExpr(
+ PseudoOp->getNumSemanticExprs() - 1));
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base, *RHS;
+ {
+ DisableReplaceStmtScope S(*this);
+
+ // Rebuild the base expression if we have one.
+ Base = 0;
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+
+ // Rebuild the RHS.
+ RHS = cast<BinaryOperator>(PseudoOp->getSyntacticForm())->getRHS();
+ RHS = cast<OpaqueValueExpr>(RHS)->getSourceExpr();
+ RHS = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(RHS));
+ }
+
+ // TODO: avoid this copy.
+ SmallVector<SourceLocation, 1> SelLocs;
+ OldMsg->getSelectorLocs(SelLocs);
+
+ ObjCMessageExpr *NewMsg = 0;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ RHS,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+Stmt *RewriteObjC::RewritePropertyOrImplicitGetter(PseudoObjectExpr *PseudoOp) {
+ SourceRange OldRange = PseudoOp->getSourceRange();
+
+ // We just magically know some things about the structure of this
+ // expression.
+ ObjCMessageExpr *OldMsg =
+ cast<ObjCMessageExpr>(PseudoOp->getResultExpr()->IgnoreImplicit());
+
+ // Because the rewriter doesn't allow us to rewrite rewritten code,
+ // we need to suppress rewriting the sub-statements.
+ Expr *Base = 0;
+ {
+ DisableReplaceStmtScope S(*this);
+
+ // Rebuild the base expression if we have one.
+ if (OldMsg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ Base = OldMsg->getInstanceReceiver();
+ Base = cast<OpaqueValueExpr>(Base)->getSourceExpr();
+ Base = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(Base));
+ }
+ }
+
+ // Intentionally empty.
+ SmallVector<SourceLocation, 1> SelLocs;
+ SmallVector<Expr*, 1> Args;
+
+ ObjCMessageExpr *NewMsg = 0;
+ switch (OldMsg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getClassReceiverTypeInfo(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::Instance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ Base,
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ NewMsg = ObjCMessageExpr::Create(*Context, OldMsg->getType(),
+ OldMsg->getValueKind(),
+ OldMsg->getLeftLoc(),
+ OldMsg->getSuperLoc(),
+ OldMsg->getReceiverKind() == ObjCMessageExpr::SuperInstance,
+ OldMsg->getSuperType(),
+ OldMsg->getSelector(),
+ SelLocs,
+ OldMsg->getMethodDecl(),
+ Args,
+ OldMsg->getRightLoc(),
+ OldMsg->isImplicit());
+ break;
+ }
+
+ Stmt *Replacement = SynthMessageExpr(NewMsg);
+ ReplaceStmtWithRange(PseudoOp, Replacement, OldRange);
+ return Replacement;
+}
+
+/// SynthCountByEnumWithState - To print:
+/// ((unsigned int (*)
+/// (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+/// (void *)objc_msgSend)((id)l_collection,
+/// sel_registerName(
+/// "countByEnumeratingWithState:objects:count:"),
+/// &enumState,
+/// (id *)__rw_items, (unsigned int)16)
+///
+void RewriteObjC::SynthCountByEnumWithState(std::string &buf) {
+ buf += "((unsigned int (*) (id, SEL, struct __objcFastEnumerationState *, "
+ "id *, unsigned int))(void *)objc_msgSend)";
+ buf += "\n\t\t";
+ buf += "((id)l_collection,\n\t\t";
+ buf += "sel_registerName(\"countByEnumeratingWithState:objects:count:\"),";
+ buf += "\n\t\t";
+ buf += "&enumState, "
+ "(id *)__rw_items, (unsigned int)16)";
+}
+
+/// RewriteBreakStmt - Rewrite for a break-stmt inside an ObjC2's foreach
+/// statement to exit to its outer synthesized loop.
+///
+Stmt *RewriteObjC::RewriteBreakStmt(BreakStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace break with goto __break_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("break"), buf);
+
+ return 0;
+}
+
+/// RewriteContinueStmt - Rewrite for a continue-stmt inside an ObjC2's foreach
+/// statement to continue with its inner synthesized loop.
+///
+Stmt *RewriteObjC::RewriteContinueStmt(ContinueStmt *S) {
+ if (Stmts.empty() || !isa<ObjCForCollectionStmt>(Stmts.back()))
+ return S;
+ // replace continue with goto __continue_label
+ std::string buf;
+
+ SourceLocation startLoc = S->getLocStart();
+ buf = "goto __continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ ReplaceText(startLoc, strlen("continue"), buf);
+
+ return 0;
+}
+
+/// RewriteObjCForCollectionStmt - Rewriter for ObjC2's foreach statement.
+/// It rewrites:
+/// for ( type elem in collection) { stmts; }
+
+/// Into:
+/// {
+/// type elem;
+/// struct __objcFastEnumerationState enumState = { 0 };
+/// id __rw_items[16];
+/// id l_collection = (id)collection;
+/// unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16];
+/// if (limit) {
+/// unsigned long startMutations = *enumState.mutationsPtr;
+/// do {
+/// unsigned long counter = 0;
+/// do {
+/// if (startMutations != *enumState.mutationsPtr)
+/// objc_enumerationMutation(l_collection);
+/// elem = (type)enumState.itemsPtr[counter++];
+/// stmts;
+/// __continue_label: ;
+/// } while (counter < limit);
+/// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+/// objects:__rw_items count:16]);
+/// elem = nil;
+/// __break_label: ;
+/// }
+/// else
+/// elem = nil;
+/// }
+///
+Stmt *RewriteObjC::RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
+ SourceLocation OrigEnd) {
+ assert(!Stmts.empty() && "ObjCForCollectionStmt - Statement stack empty");
+ assert(isa<ObjCForCollectionStmt>(Stmts.back()) &&
+ "ObjCForCollectionStmt Statement stack mismatch");
+ assert(!ObjCBcLabelNo.empty() &&
+ "ObjCForCollectionStmt - Label No stack empty");
+
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+ StringRef elementName;
+ std::string elementTypeAsString;
+ std::string buf;
+ buf = "\n{\n\t";
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S->getElement())) {
+ // type elem;
+ NamedDecl* D = cast<NamedDecl>(DS->getSingleDecl());
+ QualType ElementType = cast<ValueDecl>(D)->getType();
+ if (ElementType->isObjCQualifiedIdType() ||
+ ElementType->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = ElementType.getAsString(Context->getPrintingPolicy());
+ buf += elementTypeAsString;
+ buf += " ";
+ elementName = D->getName();
+ buf += elementName;
+ buf += ";\n\t";
+ }
+ else {
+ DeclRefExpr *DR = cast<DeclRefExpr>(S->getElement());
+ elementName = DR->getDecl()->getName();
+ ValueDecl *VD = cast<ValueDecl>(DR->getDecl());
+ if (VD->getType()->isObjCQualifiedIdType() ||
+ VD->getType()->isObjCQualifiedInterfaceType())
+ // Simply use 'id' for all qualified types.
+ elementTypeAsString = "id";
+ else
+ elementTypeAsString = VD->getType().getAsString(Context->getPrintingPolicy());
+ }
+
+ // struct __objcFastEnumerationState enumState = { 0 };
+ buf += "struct __objcFastEnumerationState enumState = { 0 };\n\t";
+ // id __rw_items[16];
+ buf += "id __rw_items[16];\n\t";
+ // id l_collection = (id)
+ buf += "id l_collection = (id)";
+ // Find start location of 'collection' the hard way!
+ const char *startCollectionBuf = startBuf;
+ startCollectionBuf += 3; // skip 'for'
+ startCollectionBuf = strchr(startCollectionBuf, '(');
+ startCollectionBuf++; // skip '('
+ // find 'in' and skip it.
+ while (*startCollectionBuf != ' ' ||
+ *(startCollectionBuf+1) != 'i' || *(startCollectionBuf+2) != 'n' ||
+ (*(startCollectionBuf+3) != ' ' &&
+ *(startCollectionBuf+3) != '[' && *(startCollectionBuf+3) != '('))
+ startCollectionBuf++;
+ startCollectionBuf += 3;
+
+ // Replace: "for (type element in" with string constructed thus far.
+ ReplaceText(startLoc, startCollectionBuf - startBuf, buf);
+ // Replace ')' in for '(' type elem in collection ')' with ';'
+ SourceLocation rightParenLoc = S->getRParenLoc();
+ const char *rparenBuf = SM->getCharacterData(rightParenLoc);
+ SourceLocation lparenLoc = startLoc.getLocWithOffset(rparenBuf-startBuf);
+ buf = ";\n\t";
+
+ // unsigned long limit = [l_collection countByEnumeratingWithState:&enumState
+ // objects:__rw_items count:16];
+ // which is synthesized into:
+ // unsigned int limit =
+ // ((unsigned int (*)
+ // (id, SEL, struct __objcFastEnumerationState *, id *, unsigned int))
+ // (void *)objc_msgSend)((id)l_collection,
+ // sel_registerName(
+ // "countByEnumeratingWithState:objects:count:"),
+ // (struct __objcFastEnumerationState *)&state,
+ // (id *)__rw_items, (unsigned int)16);
+ buf += "unsigned long limit =\n\t\t";
+ SynthCountByEnumWithState(buf);
+ buf += ";\n\t";
+ /// if (limit) {
+ /// unsigned long startMutations = *enumState.mutationsPtr;
+ /// do {
+ /// unsigned long counter = 0;
+ /// do {
+ /// if (startMutations != *enumState.mutationsPtr)
+ /// objc_enumerationMutation(l_collection);
+ /// elem = (type)enumState.itemsPtr[counter++];
+ buf += "if (limit) {\n\t";
+ buf += "unsigned long startMutations = *enumState.mutationsPtr;\n\t";
+ buf += "do {\n\t\t";
+ buf += "unsigned long counter = 0;\n\t\t";
+ buf += "do {\n\t\t\t";
+ buf += "if (startMutations != *enumState.mutationsPtr)\n\t\t\t\t";
+ buf += "objc_enumerationMutation(l_collection);\n\t\t\t";
+ buf += elementName;
+ buf += " = (";
+ buf += elementTypeAsString;
+ buf += ")enumState.itemsPtr[counter++];";
+ // Replace ')' in for '(' type elem in collection ')' with all of these.
+ ReplaceText(lparenLoc, 1, buf);
+
+ /// __continue_label: ;
+ /// } while (counter < limit);
+ /// } while (limit = [l_collection countByEnumeratingWithState:&enumState
+ /// objects:__rw_items count:16]);
+ /// elem = nil;
+ /// __break_label: ;
+ /// }
+ /// else
+ /// elem = nil;
+ /// }
+ ///
+ buf = ";\n\t";
+ buf += "__continue_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;";
+ buf += "\n\t\t";
+ buf += "} while (counter < limit);\n\t";
+ buf += "} while (limit = ";
+ SynthCountByEnumWithState(buf);
+ buf += ");\n\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "__break_label_";
+ buf += utostr(ObjCBcLabelNo.back());
+ buf += ": ;\n\t";
+ buf += "}\n\t";
+ buf += "else\n\t\t";
+ buf += elementName;
+ buf += " = ((";
+ buf += elementTypeAsString;
+ buf += ")0);\n\t";
+ buf += "}\n";
+
+ // Insert all these *after* the statement body.
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (isa<CompoundStmt>(S->getBody())) {
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(1);
+ InsertText(endBodyLoc, buf);
+ } else {
+ /* Need to treat single statements specially. For example:
+ *
+ * for (A *a in b) if (stuff()) break;
+ * for (A *a in b) xxxyy;
+ *
+ * The following code simply scans ahead to the semi to find the actual end.
+ */
+ const char *stmtBuf = SM->getCharacterData(OrigEnd);
+ const char *semiBuf = strchr(stmtBuf, ';');
+ assert(semiBuf && "Can't find ';'");
+ SourceLocation endBodyLoc = OrigEnd.getLocWithOffset(semiBuf-stmtBuf+1);
+ InsertText(endBodyLoc, buf);
+ }
+ Stmts.pop_back();
+ ObjCBcLabelNo.pop_back();
+ return 0;
+}
+
+/// RewriteObjCSynchronizedStmt -
+/// This routine rewrites @synchronized(expr) stmt;
+/// into:
+/// objc_sync_enter(expr);
+/// @try stmt @finally { objc_sync_exit(expr); }
+///
+Stmt *RewriteObjC::RewriteObjCSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @synchronized location");
+
+ std::string buf;
+ buf = "objc_sync_enter((id)";
+ const char *lparenBuf = startBuf;
+ while (*lparenBuf != '(') lparenBuf++;
+ ReplaceText(startLoc, lparenBuf-startBuf+1, buf);
+ // We can't use S->getSynchExpr()->getLocEnd() to find the end location, since
+ // the sync expression is typically a message expression that's already
+ // been rewritten! (which implies the SourceLocation's are invalid).
+ SourceLocation endLoc = S->getSynchBody()->getLocStart();
+ const char *endBuf = SM->getCharacterData(endLoc);
+ while (*endBuf != ')') endBuf--;
+ SourceLocation rparenLoc = startLoc.getLocWithOffset(endBuf-startBuf);
+ buf = ");\n";
+ // declare a new scope with two variables, _stack and _rethrow.
+ buf += "/* @try scope begin */ \n{ struct _objc_exception_data {\n";
+ buf += "int buf[18/*32-bit i386*/];\n";
+ buf += "char *pointers[4];} _stack;\n";
+ buf += "id volatile _rethrow = 0;\n";
+ buf += "objc_exception_try_enter(&_stack);\n";
+ buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
+ ReplaceText(rparenLoc, 1, buf);
+ startLoc = S->getSynchBody()->getLocEnd();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '}') && "bogus @synchronized block");
+ SourceLocation lastCurlyLoc = startLoc;
+ buf = "}\nelse {\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += "}\n";
+ buf += "{ /* implicit finally clause */\n";
+ buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
+
+ std::string syncBuf;
+ syncBuf += " objc_sync_exit(";
+
+ Expr *syncExpr = S->getSynchExpr();
+ CastKind CK = syncExpr->getType()->isObjCObjectPointerType()
+ ? CK_BitCast :
+ syncExpr->getType()->isBlockPointerType()
+ ? CK_BlockPointerToObjCPointerCast
+ : CK_CPointerToObjCPointerCast;
+ syncExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, syncExpr);
+ std::string syncExprBufS;
+ llvm::raw_string_ostream syncExprBuf(syncExprBufS);
+ syncExpr->printPretty(syncExprBuf, *Context, 0,
+ PrintingPolicy(LangOpts));
+ syncBuf += syncExprBuf.str();
+ syncBuf += ");";
+
+ buf += syncBuf;
+ buf += "\n if (_rethrow) objc_exception_throw(_rethrow);\n";
+ buf += "}\n";
+ buf += "}";
+
+ ReplaceText(lastCurlyLoc, 1, buf);
+
+ bool hasReturns = false;
+ HasReturnStmts(S->getSynchBody(), hasReturns);
+ if (hasReturns)
+ RewriteSyncReturnStmts(S->getSynchBody(), syncBuf);
+
+ return 0;
+}
+
+void RewriteObjC::WarnAboutReturnGotoStmts(Stmt *S)
+{
+ // Perform a bottom up traversal of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI)
+ WarnAboutReturnGotoStmts(*CI);
+
+ if (isa<ReturnStmt>(S) || isa<GotoStmt>(S)) {
+ Diags.Report(Context->getFullLoc(S->getLocStart()),
+ TryFinallyContainsReturnDiag);
+ }
+ return;
+}
+
+void RewriteObjC::HasReturnStmts(Stmt *S, bool &hasReturns)
+{
+ // Perform a bottom up traversal of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI)
+ HasReturnStmts(*CI, hasReturns);
+
+ if (isa<ReturnStmt>(S))
+ hasReturns = true;
+ return;
+}
+
+void RewriteObjC::RewriteTryReturnStmts(Stmt *S) {
+ // Perform a bottom up traversal of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ RewriteTryReturnStmts(*CI);
+ }
+ if (isa<ReturnStmt>(S)) {
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "RewriteTryReturnStmts: can't find ';'");
+ SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
+
+ std::string buf;
+ buf = "{ objc_exception_try_exit(&_stack); return";
+
+ ReplaceText(startLoc, 6, buf);
+ InsertText(onePastSemiLoc, "}");
+ }
+ return;
+}
+
+void RewriteObjC::RewriteSyncReturnStmts(Stmt *S, std::string syncExitBuf) {
+ // Perform a bottom up traversal of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ RewriteSyncReturnStmts(*CI, syncExitBuf);
+ }
+ if (isa<ReturnStmt>(S)) {
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "RewriteSyncReturnStmts: can't find ';'");
+ SourceLocation onePastSemiLoc = startLoc.getLocWithOffset(semiBuf-startBuf+1);
+
+ std::string buf;
+ buf = "{ objc_exception_try_exit(&_stack);";
+ buf += syncExitBuf;
+ buf += " return";
+
+ ReplaceText(startLoc, 6, buf);
+ InsertText(onePastSemiLoc, "}");
+ }
+ return;
+}
+
+Stmt *RewriteObjC::RewriteObjCTryStmt(ObjCAtTryStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @try location");
+
+ std::string buf;
+ // declare a new scope with two variables, _stack and _rethrow.
+ buf = "/* @try scope begin */ { struct _objc_exception_data {\n";
+ buf += "int buf[18/*32-bit i386*/];\n";
+ buf += "char *pointers[4];} _stack;\n";
+ buf += "id volatile _rethrow = 0;\n";
+ buf += "objc_exception_try_enter(&_stack);\n";
+ buf += "if (!_setjmp(_stack.buf)) /* @try block continue */\n";
+
+ ReplaceText(startLoc, 4, buf);
+
+ startLoc = S->getTryBody()->getLocEnd();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '}') && "bogus @try block");
+
+ SourceLocation lastCurlyLoc = startLoc;
+ if (S->getNumCatchStmts()) {
+ startLoc = startLoc.getLocWithOffset(1);
+ buf = " /* @catch begin */ else {\n";
+ buf += " id _caught = objc_exception_extract(&_stack);\n";
+ buf += " objc_exception_try_enter (&_stack);\n";
+ buf += " if (_setjmp(_stack.buf))\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += " else { /* @catch continue */";
+
+ InsertText(startLoc, buf);
+ } else { /* no catch list */
+ buf = "}\nelse {\n";
+ buf += " _rethrow = objc_exception_extract(&_stack);\n";
+ buf += "}";
+ ReplaceText(lastCurlyLoc, 1, buf);
+ }
+ Stmt *lastCatchBody = 0;
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *Catch = S->getCatchStmt(I);
+ VarDecl *catchDecl = Catch->getCatchParamDecl();
+
+ if (I == 0)
+ buf = "if ("; // we are generating code for the first catch clause
+ else
+ buf = "else if (";
+ startLoc = Catch->getLocStart();
+ startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @catch location");
+
+ const char *lParenLoc = strchr(startBuf, '(');
+
+ if (Catch->hasEllipsis()) {
+ // Now rewrite the body...
+ lastCatchBody = Catch->getCatchBody();
+ SourceLocation bodyLoc = lastCatchBody->getLocStart();
+ const char *bodyBuf = SM->getCharacterData(bodyLoc);
+ assert(*SM->getCharacterData(Catch->getRParenLoc()) == ')' &&
+ "bogus @catch paren location");
+ assert((*bodyBuf == '{') && "bogus @catch body location");
+
+ buf += "1) { id _tmp = _caught;";
+ Rewrite.ReplaceText(startLoc, bodyBuf-startBuf+1, buf);
+ } else if (catchDecl) {
+ QualType t = catchDecl->getType();
+ if (t == Context->getObjCIdType()) {
+ buf += "1) { ";
+ ReplaceText(startLoc, lParenLoc-startBuf+1, buf);
+ } else if (const ObjCObjectPointerType *Ptr =
+ t->getAs<ObjCObjectPointerType>()) {
+ // Should be a pointer to a class.
+ ObjCInterfaceDecl *IDecl = Ptr->getObjectType()->getInterface();
+ if (IDecl) {
+ buf += "objc_exception_match((struct objc_class *)objc_getClass(\"";
+ buf += IDecl->getNameAsString();
+ buf += "\"), (struct objc_object *)_caught)) { ";
+ ReplaceText(startLoc, lParenLoc-startBuf+1, buf);
+ }
+ }
+ // Now rewrite the body...
+ lastCatchBody = Catch->getCatchBody();
+ SourceLocation rParenLoc = Catch->getRParenLoc();
+ SourceLocation bodyLoc = lastCatchBody->getLocStart();
+ const char *bodyBuf = SM->getCharacterData(bodyLoc);
+ const char *rParenBuf = SM->getCharacterData(rParenLoc);
+ assert((*rParenBuf == ')') && "bogus @catch paren location");
+ assert((*bodyBuf == '{') && "bogus @catch body location");
+
+ // Here we replace ") {" with "= _caught;" (which initializes and
+ // declares the @catch parameter).
+ ReplaceText(rParenLoc, bodyBuf-rParenBuf+1, " = _caught;");
+ } else {
+ llvm_unreachable("@catch rewrite bug");
+ }
+ }
+ // Complete the catch list...
+ if (lastCatchBody) {
+ SourceLocation bodyLoc = lastCatchBody->getLocEnd();
+ assert(*SM->getCharacterData(bodyLoc) == '}' &&
+ "bogus @catch body location");
+
+ // Insert the last (implicit) else clause *before* the right curly brace.
+ bodyLoc = bodyLoc.getLocWithOffset(-1);
+ buf = "} /* last catch end */\n";
+ buf += "else {\n";
+ buf += " _rethrow = _caught;\n";
+ buf += " objc_exception_try_exit(&_stack);\n";
+ buf += "} } /* @catch end */\n";
+ if (!S->getFinallyStmt())
+ buf += "}\n";
+ InsertText(bodyLoc, buf);
+
+ // Set lastCurlyLoc
+ lastCurlyLoc = lastCatchBody->getLocEnd();
+ }
+ if (ObjCAtFinallyStmt *finalStmt = S->getFinallyStmt()) {
+ startLoc = finalStmt->getLocStart();
+ startBuf = SM->getCharacterData(startLoc);
+ assert((*startBuf == '@') && "bogus @finally start");
+
+ ReplaceText(startLoc, 8, "/* @finally */");
+
+ Stmt *body = finalStmt->getFinallyBody();
+ SourceLocation startLoc = body->getLocStart();
+ SourceLocation endLoc = body->getLocEnd();
+ assert(*SM->getCharacterData(startLoc) == '{' &&
+ "bogus @finally body location");
+ assert(*SM->getCharacterData(endLoc) == '}' &&
+ "bogus @finally body location");
+
+ startLoc = startLoc.getLocWithOffset(1);
+ InsertText(startLoc, " if (!_rethrow) objc_exception_try_exit(&_stack);\n");
+ endLoc = endLoc.getLocWithOffset(-1);
+ InsertText(endLoc, " if (_rethrow) objc_exception_throw(_rethrow);\n");
+
+ // Set lastCurlyLoc
+ lastCurlyLoc = body->getLocEnd();
+
+ // Now check for any return/continue/go statements within the @try.
+ WarnAboutReturnGotoStmts(S->getTryBody());
+ } else { /* no finally clause - make sure we synthesize an implicit one */
+ buf = "{ /* implicit finally clause */\n";
+ buf += " if (!_rethrow) objc_exception_try_exit(&_stack);\n";
+ buf += " if (_rethrow) objc_exception_throw(_rethrow);\n";
+ buf += "}";
+ ReplaceText(lastCurlyLoc, 1, buf);
+
+ // Now check for any return/continue/go statements within the @try.
+ // The implicit finally clause won't called if the @try contains any
+ // jump statements.
+ bool hasReturns = false;
+ HasReturnStmts(S->getTryBody(), hasReturns);
+ if (hasReturns)
+ RewriteTryReturnStmts(S->getTryBody());
+ }
+ // Now emit the final closing curly brace...
+ lastCurlyLoc = lastCurlyLoc.getLocWithOffset(1);
+ InsertText(lastCurlyLoc, " } /* @try scope end */\n");
+ return 0;
+}
+
+// This can't be done with ReplaceStmt(S, ThrowExpr), since
+// the throw expression is typically a message expression that's already
+// been rewritten! (which implies the SourceLocation's are invalid).
+Stmt *RewriteObjC::RewriteObjCThrowStmt(ObjCAtThrowStmt *S) {
+ // Get the start location and compute the semi location.
+ SourceLocation startLoc = S->getLocStart();
+ const char *startBuf = SM->getCharacterData(startLoc);
+
+ assert((*startBuf == '@') && "bogus @throw location");
+
+ std::string buf;
+ /* void objc_exception_throw(id) __attribute__((noreturn)); */
+ if (S->getThrowExpr())
+ buf = "objc_exception_throw(";
+ else // add an implicit argument
+ buf = "objc_exception_throw(_caught";
+
+ // handle "@ throw" correctly.
+ const char *wBuf = strchr(startBuf, 'w');
+ assert((*wBuf == 'w') && "@throw: can't find 'w'");
+ ReplaceText(startLoc, wBuf-startBuf+1, buf);
+
+ const char *semiBuf = strchr(startBuf, ';');
+ assert((*semiBuf == ';') && "@throw: can't find ';'");
+ SourceLocation semiLoc = startLoc.getLocWithOffset(semiBuf-startBuf);
+ ReplaceText(semiLoc, 1, ");");
+ return 0;
+}
+
+Stmt *RewriteObjC::RewriteAtEncode(ObjCEncodeExpr *Exp) {
+ // Create a new string expression.
+ QualType StrType = Context->getPointerType(Context->CharTy);
+ std::string StrEncoding;
+ Context->getObjCEncodingForType(Exp->getEncodedType(), StrEncoding);
+ Expr *Replacement = StringLiteral::Create(*Context, StrEncoding,
+ StringLiteral::Ascii, false,
+ StrType, SourceLocation());
+ ReplaceStmt(Exp, Replacement);
+
+ // Replace this subexpr in the parent.
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return Replacement;
+}
+
+Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ assert(SelGetUidFunctionDecl && "Can't find sel_registerName() decl");
+ // Create a call to sel_registerName("selName").
+ SmallVector<Expr*, 8> SelExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ Exp->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size());
+ ReplaceStmt(Exp, SelExp);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return SelExp;
+}
+
+CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
+ FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = FD->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ // Now, we cast the reference to a pointer to the objc_msgSend type.
+ QualType pToFunc = Context->getPointerType(msgSendType);
+ ImplicitCastExpr *ICE =
+ ImplicitCastExpr::Create(*Context, pToFunc, CK_FunctionToPointerDecay,
+ DRE, 0, VK_RValue);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+
+ CallExpr *Exp =
+ new (Context) CallExpr(*Context, ICE, args, nargs,
+ FT->getCallResultType(*Context),
+ VK_RValue, EndLoc);
+ return Exp;
+}
+
+static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
+ const char *&startRef, const char *&endRef) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '<')
+ startRef = startBuf; // mark the start.
+ if (*startBuf == '>') {
+ if (startRef && *startRef == '<') {
+ endRef = startBuf; // mark the end.
+ return true;
+ }
+ return false;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+static void scanToNextArgument(const char *&argRef) {
+ int angle = 0;
+ while (*argRef != ')' && (*argRef != ',' || angle > 0)) {
+ if (*argRef == '<')
+ angle++;
+ else if (*argRef == '>')
+ angle--;
+ argRef++;
+ }
+ assert(angle == 0 && "scanToNextArgument - bad protocol type syntax");
+}
+
+bool RewriteObjC::needToScanForQualifiers(QualType T) {
+ if (T->isObjCQualifiedIdType())
+ return true;
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ if (PT->getPointeeType()->isObjCQualifiedIdType())
+ return true;
+ }
+ if (T->isObjCObjectPointerType()) {
+ T = T->getPointeeType();
+ return T->isObjCQualifiedInterfaceType();
+ }
+ if (T->isArrayType()) {
+ QualType ElemTy = Context->getBaseElementType(T);
+ return needToScanForQualifiers(ElemTy);
+ }
+ return false;
+}
+
+void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Expr *E) {
+ QualType Type = E->getType();
+ if (needToScanForQualifiers(Type)) {
+ SourceLocation Loc, EndLoc;
+
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E)) {
+ Loc = ECE->getLParenLoc();
+ EndLoc = ECE->getRParenLoc();
+ } else {
+ Loc = E->getLocStart();
+ EndLoc = E->getLocEnd();
+ }
+ // This will defend against trying to rewrite synthesized expressions.
+ if (Loc.isInvalid() || EndLoc.isInvalid())
+ return;
+
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *endBuf = SM->getCharacterData(EndLoc);
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-startBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-startBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+}
+
+void RewriteObjC::RewriteObjCQualifiedInterfaceTypes(Decl *Dcl) {
+ SourceLocation Loc;
+ QualType Type;
+ const FunctionProtoType *proto = 0;
+ if (VarDecl *VD = dyn_cast<VarDecl>(Dcl)) {
+ Loc = VD->getLocation();
+ Type = VD->getType();
+ }
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ // Check for ObjC 'id' and class types that have been adorned with protocol
+ // information (id<p>, C<p>*). The protocol references need to be rewritten!
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ assert(funcType && "missing function type");
+ proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ Type = proto->getResultType();
+ }
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(Dcl)) {
+ Loc = FD->getLocation();
+ Type = FD->getType();
+ }
+ else
+ return;
+
+ if (needToScanForQualifiers(Type)) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = SM->getCharacterData(Loc);
+ const char *startBuf = endBuf;
+ while (*startBuf != ';' && *startBuf != '<' && startBuf != MainFileStart)
+ startBuf--; // scan backward (from the decl location) for return type.
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc = Loc.getLocWithOffset(startRef-endBuf);
+ SourceLocation GreaterLoc = Loc.getLocWithOffset(endRef-endBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ }
+ if (!proto)
+ return; // most likely, was a variable
+ // Now check arguments.
+ const char *startBuf = SM->getCharacterData(Loc);
+ const char *startFuncBuf = startBuf;
+ for (unsigned i = 0; i < proto->getNumArgs(); i++) {
+ if (needToScanForQualifiers(proto->getArgType(i))) {
+ // Since types are unique, we need to scan the buffer.
+
+ const char *endBuf = startBuf;
+ // scan forward (from the decl location) for argument types.
+ scanToNextArgument(endBuf);
+ const char *startRef = 0, *endRef = 0;
+ if (scanForProtocolRefs(startBuf, endBuf, startRef, endRef)) {
+ // Get the locations of the startRef, endRef.
+ SourceLocation LessLoc =
+ Loc.getLocWithOffset(startRef-startFuncBuf);
+ SourceLocation GreaterLoc =
+ Loc.getLocWithOffset(endRef-startFuncBuf+1);
+ // Comment out the protocol references.
+ InsertText(LessLoc, "/*");
+ InsertText(GreaterLoc, "*/");
+ }
+ startBuf = ++endBuf;
+ }
+ else {
+ // If the function name is derived from a macro expansion, then the
+ // argument buffer will not follow the name. Need to speak with Chris.
+ while (*startBuf && *startBuf != ')' && *startBuf != ',')
+ startBuf++; // scan forward (from the decl location) for argument types.
+ startBuf++;
+ }
+ }
+}
+
+void RewriteObjC::RewriteTypeOfDecl(VarDecl *ND) {
+ QualType QT = ND->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (!isa<TypeOfExprType>(TypePtr))
+ return;
+ while (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ TypePtr = QT->getAs<Type>();
+ }
+ // FIXME. This will not work for multiple declarators; as in:
+ // __typeof__(a) b,c,d;
+ std::string TypeAsString(QT.getAsString(Context->getPrintingPolicy()));
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ if (ND->getInit()) {
+ std::string Name(ND->getNameAsString());
+ TypeAsString += " " + Name + " = ";
+ Expr *E = ND->getInit();
+ SourceLocation startLoc;
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ const char *endBuf = SM->getCharacterData(startLoc);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+ else {
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ ReplaceText(DeclLoc, endBuf-startBuf-1, TypeAsString);
+ }
+}
+
+// SynthSelGetUidFunctionDecl - SEL sel_registerName(const char *str);
+void RewriteObjC::SynthSelGetUidFunctionDecl() {
+ IdentifierInfo *SelGetUidIdent = &Context->Idents.get("sel_registerName");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getFuncType =
+ getSimpleFunctionType(Context->getObjCSelType(), &ArgTys[0], ArgTys.size());
+ SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ SelGetUidIdent, getFuncType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+void RewriteObjC::RewriteFunctionDecl(FunctionDecl *FD) {
+ // declared in <objc/objc.h>
+ if (FD->getIdentifier() &&
+ FD->getName() == "sel_registerName") {
+ SelGetUidFunctionDecl = FD;
+ return;
+ }
+ RewriteObjCQualifiedInterfaceTypes(FD);
+}
+
+void RewriteObjC::RewriteBlockPointerType(std::string& Str, QualType Type) {
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ if (!strchr(argPtr, '^')) {
+ Str += TypeString;
+ return;
+ }
+ while (*argPtr) {
+ Str += (*argPtr == '^' ? '*' : *argPtr);
+ argPtr++;
+ }
+}
+
+// FIXME. Consolidate this routine with RewriteBlockPointerType.
+void RewriteObjC::RewriteBlockPointerTypeVariable(std::string& Str,
+ ValueDecl *VD) {
+ QualType Type = VD->getType();
+ std::string TypeString(Type.getAsString(Context->getPrintingPolicy()));
+ const char *argPtr = TypeString.c_str();
+ int paren = 0;
+ while (*argPtr) {
+ switch (*argPtr) {
+ case '(':
+ Str += *argPtr;
+ paren++;
+ break;
+ case ')':
+ Str += *argPtr;
+ paren--;
+ break;
+ case '^':
+ Str += '*';
+ if (paren == 1)
+ Str += VD->getNameAsString();
+ break;
+ default:
+ Str += *argPtr;
+ break;
+ }
+ argPtr++;
+ }
+}
+
+
+void RewriteObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
+ const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(funcType);
+ if (!proto)
+ return;
+ QualType Type = proto->getResultType();
+ std::string FdStr = Type.getAsString(Context->getPrintingPolicy());
+ FdStr += " ";
+ FdStr += FD->getName();
+ FdStr += "(";
+ unsigned numArgs = proto->getNumArgs();
+ for (unsigned i = 0; i < numArgs; i++) {
+ QualType ArgType = proto->getArgType(i);
+ RewriteBlockPointerType(FdStr, ArgType);
+ if (i+1 < numArgs)
+ FdStr += ", ";
+ }
+ FdStr += ");\n";
+ InsertText(FunLocStart, FdStr);
+ CurFunctionDeclToDeclareForBlock = 0;
+}
+
+// SynthSuperContructorFunctionDecl - id objc_super(id obj, id super);
+void RewriteObjC::SynthSuperContructorFunctionDecl() {
+ if (SuperContructorFunctionDecl)
+ return;
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("__rw_objc_super");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendFunctionDecl - id objc_msgSend(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendSuperFunctionDecl - id objc_msgSendSuper(struct objc_super *, SEL op, ...);
+void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSendSuper");
+ SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendStretFunctionDecl - id objc_msgSend_stret(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_stret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendSuperStretFunctionDecl -
+// id objc_msgSendSuper_stret(struct objc_super *, SEL op, ...);
+void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
+ IdentifierInfo *msgSendIdent =
+ &Context->Idents.get("objc_msgSendSuper_stret");
+ SmallVector<QualType, 16> ArgTys;
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType argT = Context->getPointerType(Context->getTagDeclType(RD));
+ assert(!argT.isNull() && "Can't build 'struct objc_super *' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthMsgSendFpretFunctionDecl - double objc_msgSend_fpret(id self, SEL op, ...);
+void RewriteObjC::SynthMsgSendFpretFunctionDecl() {
+ IdentifierInfo *msgSendIdent = &Context->Idents.get("objc_msgSend_fpret");
+ SmallVector<QualType, 16> ArgTys;
+ QualType argT = Context->getObjCIdType();
+ assert(!argT.isNull() && "Can't find 'id' type");
+ ArgTys.push_back(argT);
+ argT = Context->getObjCSelType();
+ assert(!argT.isNull() && "Can't find 'SEL' type");
+ ArgTys.push_back(argT);
+ QualType msgSendType = getSimpleFunctionType(Context->DoubleTy,
+ &ArgTys[0], ArgTys.size(),
+ true /*isVariadic*/);
+ MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ msgSendIdent, msgSendType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthGetClassFunctionDecl - id objc_getClass(const char *name);
+void RewriteObjC::SynthGetClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+// SynthGetSuperClassFunctionDecl - Class class_getSuperclass(Class cls);
+void RewriteObjC::SynthGetSuperClassFunctionDecl() {
+ IdentifierInfo *getSuperClassIdent =
+ &Context->Idents.get("class_getSuperclass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getObjCClassType());
+ QualType getClassType = getSimpleFunctionType(Context->getObjCClassType(),
+ &ArgTys[0], ArgTys.size());
+ GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getSuperClassIdent,
+ getClassType, 0,
+ SC_Extern,
+ SC_None,
+ false);
+}
+
+// SynthGetMetaClassFunctionDecl - id objc_getMetaClass(const char *name);
+void RewriteObjC::SynthGetMetaClassFunctionDecl() {
+ IdentifierInfo *getClassIdent = &Context->Idents.get("objc_getMetaClass");
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
+ QualType getClassType = getSimpleFunctionType(Context->getObjCIdType(),
+ &ArgTys[0], ArgTys.size());
+ GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
+ SourceLocation(),
+ SourceLocation(),
+ getClassIdent, getClassType, 0,
+ SC_Extern,
+ SC_None, false);
+}
+
+Stmt *RewriteObjC::RewriteObjCStringLiteral(ObjCStringLiteral *Exp) {
+ QualType strType = getConstantStringStructType();
+
+ std::string S = "__NSConstantStringImpl_";
+
+ std::string tmpName = InFileName;
+ unsigned i;
+ for (i=0; i < tmpName.length(); i++) {
+ char c = tmpName.at(i);
+ // replace any non alphanumeric characters with '_'.
+ if (!isalpha(c) && (c < '0' || c > '9'))
+ tmpName[i] = '_';
+ }
+ S += tmpName;
+ S += "_";
+ S += utostr(NumObjCStringLiterals++);
+
+ Preamble += "static __NSConstantStringImpl " + S;
+ Preamble += " __attribute__ ((section (\"__DATA, __cfstring\"))) = {__CFConstantStringClassReference,";
+ Preamble += "0x000007c8,"; // utf8_str
+ // The pretty printer for StringLiteral handles escape characters properly.
+ std::string prettyBufS;
+ llvm::raw_string_ostream prettyBuf(prettyBufS);
+ Exp->getString()->printPretty(prettyBuf, *Context, 0,
+ PrintingPolicy(LangOpts));
+ Preamble += prettyBuf.str();
+ Preamble += ",";
+ Preamble += utostr(Exp->getString()->getByteLength()) + "};\n";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), &Context->Idents.get(S),
+ strType, 0, SC_Static, SC_None);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(NewVD, false, strType, VK_LValue,
+ SourceLocation());
+ Expr *Unop = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ // cast to NSConstantString *
+ CastExpr *cast = NoTypeInfoCStyleCastExpr(Context, Exp->getType(),
+ CK_CPointerToObjCPointerCast, Unop);
+ ReplaceStmt(Exp, cast);
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return cast;
+}
+
+// struct objc_super { struct objc_object *receiver; struct objc_class *super; };
+QualType RewriteObjC::getSuperStructType() {
+ if (!SuperStructDecl) {
+ SuperStructDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("objc_super"));
+ QualType FieldTypes[2];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // struct objc_class *super;
+ FieldTypes[1] = Context->getObjCClassType();
+
+ // Create fields
+ for (unsigned i = 0; i < 2; ++i) {
+ SuperStructDecl->addDecl(FieldDecl::Create(*Context, SuperStructDecl,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], 0,
+ /*BitWidth=*/0,
+ /*Mutable=*/false,
+ /*HasInit=*/false));
+ }
+
+ SuperStructDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(SuperStructDecl);
+}
+
+QualType RewriteObjC::getConstantStringStructType() {
+ if (!ConstantStringDecl) {
+ ConstantStringDecl = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__NSConstantStringImpl"));
+ QualType FieldTypes[4];
+
+ // struct objc_object *receiver;
+ FieldTypes[0] = Context->getObjCIdType();
+ // int flags;
+ FieldTypes[1] = Context->IntTy;
+ // char *str;
+ FieldTypes[2] = Context->getPointerType(Context->CharTy);
+ // long length;
+ FieldTypes[3] = Context->LongTy;
+
+ // Create fields
+ for (unsigned i = 0; i < 4; ++i) {
+ ConstantStringDecl->addDecl(FieldDecl::Create(*Context,
+ ConstantStringDecl,
+ SourceLocation(),
+ SourceLocation(), 0,
+ FieldTypes[i], 0,
+ /*BitWidth=*/0,
+ /*Mutable=*/true,
+ /*HasInit=*/false));
+ }
+
+ ConstantStringDecl->completeDefinition();
+ }
+ return Context->getTagDeclType(ConstantStringDecl);
+}
+
+Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!SelGetUidFunctionDecl)
+ SynthSelGetUidFunctionDecl();
+ if (!MsgSendFunctionDecl)
+ SynthMsgSendFunctionDecl();
+ if (!MsgSendSuperFunctionDecl)
+ SynthMsgSendSuperFunctionDecl();
+ if (!MsgSendStretFunctionDecl)
+ SynthMsgSendStretFunctionDecl();
+ if (!MsgSendSuperStretFunctionDecl)
+ SynthMsgSendSuperStretFunctionDecl();
+ if (!MsgSendFpretFunctionDecl)
+ SynthMsgSendFpretFunctionDecl();
+ if (!GetClassFunctionDecl)
+ SynthGetClassFunctionDecl();
+ if (!GetSuperClassFunctionDecl)
+ SynthGetSuperClassFunctionDecl();
+ if (!GetMetaClassFunctionDecl)
+ SynthGetMetaClassFunctionDecl();
+
+ // default to objc_msgSend().
+ FunctionDecl *MsgSendFlavor = MsgSendFunctionDecl;
+ // May need to use objc_msgSend_stret() as well.
+ FunctionDecl *MsgSendStretFlavor = 0;
+ if (ObjCMethodDecl *mDecl = Exp->getMethodDecl()) {
+ QualType resultType = mDecl->getResultType();
+ if (resultType->isRecordType())
+ MsgSendStretFlavor = MsgSendStretFunctionDecl;
+ else if (resultType->isRealFloatingType())
+ MsgSendFlavor = MsgSendFpretFunctionDecl;
+ }
+
+ // Synthesize a call to objc_msgSend().
+ SmallVector<Expr*, 8> MsgExprs;
+ switch (Exp->getReceiverKind()) {
+ case ObjCMessageExpr::SuperClass: {
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // set the receiver to self, the first argument to all methods.
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue,
+ SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ ClassDecl->getIdentifier()->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc,
+ EndLoc);
+ // (Class)objc_getClass("CurrentClass")
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCClassType(),
+ CK_BitCast, Cls);
+ ClsExprs.clear();
+ ClsExprs.push_back(ArgExpr);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
+ &ClsExprs[0], ClsExprs.size(),
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back( // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperContructorFunctionDecl();
+ // Simulate a contructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
+ InitExprs.size(),
+ superType, VK_LValue,
+ SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(),
+ &InitExprs[0], InitExprs.size(),
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_LValue,
+ ILE, false);
+ // struct objc_super *
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Class: {
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ObjCInterfaceDecl *Class
+ = Exp->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
+ IdentifierInfo *clsName = Class->getIdentifier();
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ clsName->getName(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ MsgExprs.push_back(Cls);
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:{
+ MsgSendFlavor = MsgSendSuperFunctionDecl;
+ if (MsgSendStretFlavor)
+ MsgSendStretFlavor = MsgSendSuperStretFunctionDecl;
+ assert(MsgSendFlavor && "MsgSendFlavor is NULL!");
+ ObjCInterfaceDecl *ClassDecl = CurMethodDef->getClassInterface();
+ SmallVector<Expr*, 4> InitExprs;
+
+ InitExprs.push_back(
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast,
+ new (Context) DeclRefExpr(CurMethodDef->getSelfDecl(),
+ false,
+ Context->getObjCIdType(),
+ VK_RValue, SourceLocation()))
+ ); // set the 'receiver'.
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ SmallVector<Expr*, 8> ClsExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ ClsExprs.push_back(StringLiteral::Create(*Context,
+ ClassDecl->getIdentifier()->getName(),
+ StringLiteral::Ascii, false, argType,
+ SourceLocation()));
+ CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
+ &ClsExprs[0],
+ ClsExprs.size(),
+ StartLoc, EndLoc);
+ // (Class)objc_getClass("CurrentClass")
+ CastExpr *ArgExpr = NoTypeInfoCStyleCastExpr(Context,
+ Context->getObjCClassType(),
+ CK_BitCast, Cls);
+ ClsExprs.clear();
+ ClsExprs.push_back(ArgExpr);
+ Cls = SynthesizeCallToFunctionDecl(GetSuperClassFunctionDecl,
+ &ClsExprs[0], ClsExprs.size(),
+ StartLoc, EndLoc);
+
+ // (id)class_getSuperclass((Class)objc_getClass("CurrentClass"))
+ // To turn off a warning, type-cast to 'id'
+ InitExprs.push_back(
+ // set 'super class', using class_getSuperclass().
+ NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK_BitCast, Cls));
+ // struct objc_super
+ QualType superType = getSuperStructType();
+ Expr *SuperRep;
+
+ if (LangOpts.MicrosoftExt) {
+ SynthSuperContructorFunctionDecl();
+ // Simulate a contructor call...
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(SuperContructorFunctionDecl,
+ false, superType, VK_LValue,
+ SourceLocation());
+ SuperRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0],
+ InitExprs.size(),
+ superType, VK_LValue, SourceLocation());
+ // The code for super is a little tricky to prevent collision with
+ // the structure definition in the header. The rewriter has it's own
+ // internal definition (__rw_objc_super) that is uses. This is why
+ // we need the cast below. For example:
+ // (struct objc_super *)&__rw_objc_super((id)self, (id)objc_getClass("SUPER"))
+ //
+ SuperRep = new (Context) UnaryOperator(SuperRep, UO_AddrOf,
+ Context->getPointerType(SuperRep->getType()),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ SuperRep = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(superType),
+ CK_BitCast, SuperRep);
+ } else {
+ // (struct objc_super) { <exprs from above> }
+ InitListExpr *ILE =
+ new (Context) InitListExpr(*Context, SourceLocation(),
+ &InitExprs[0], InitExprs.size(),
+ SourceLocation());
+ TypeSourceInfo *superTInfo
+ = Context->getTrivialTypeSourceInfo(superType);
+ SuperRep = new (Context) CompoundLiteralExpr(SourceLocation(), superTInfo,
+ superType, VK_RValue, ILE,
+ false);
+ }
+ MsgExprs.push_back(SuperRep);
+ break;
+ }
+
+ case ObjCMessageExpr::Instance: {
+ // Remove all type-casts because it may contain objc-style types; e.g.
+ // Foo<Proto> *.
+ Expr *recExpr = Exp->getInstanceReceiver();
+ while (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(recExpr))
+ recExpr = CE->getSubExpr();
+ CastKind CK = recExpr->getType()->isObjCObjectPointerType()
+ ? CK_BitCast : recExpr->getType()->isBlockPointerType()
+ ? CK_BlockPointerToObjCPointerCast
+ : CK_CPointerToObjCPointerCast;
+
+ recExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, recExpr);
+ MsgExprs.push_back(recExpr);
+ break;
+ }
+ }
+
+ // Create a call to sel_registerName("selName"), it will be the 2nd argument.
+ SmallVector<Expr*, 8> SelExprs;
+ QualType argType = Context->getPointerType(Context->CharTy);
+ SelExprs.push_back(StringLiteral::Create(*Context,
+ Exp->getSelector().getAsString(),
+ StringLiteral::Ascii, false,
+ argType, SourceLocation()));
+ CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
+ &SelExprs[0], SelExprs.size(),
+ StartLoc,
+ EndLoc);
+ MsgExprs.push_back(SelExp);
+
+ // Now push any user supplied arguments.
+ for (unsigned i = 0; i < Exp->getNumArgs(); i++) {
+ Expr *userExpr = Exp->getArg(i);
+ // Make all implicit casts explicit...ICE comes in handy:-)
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(userExpr)) {
+ // Reuse the ICE type, it is exactly what the doctor ordered.
+ QualType type = ICE->getType();
+ if (needToScanForQualifiers(type))
+ type = Context->getObjCIdType();
+ // Make sure we convert "type (^)(...)" to "type (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(type);
+ const Expr *SubExpr = ICE->IgnoreParenImpCasts();
+ CastKind CK;
+ if (SubExpr->getType()->isIntegralType(*Context) &&
+ type->isBooleanType()) {
+ CK = CK_IntegralToBoolean;
+ } else if (type->isObjCObjectPointerType()) {
+ if (SubExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (SubExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ } else {
+ CK = CK_BitCast;
+ }
+
+ userExpr = NoTypeInfoCStyleCastExpr(Context, type, CK, userExpr);
+ }
+ // Make id<P...> cast into an 'id' cast.
+ else if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(userExpr)) {
+ if (CE->getType()->isObjCQualifiedIdType()) {
+ while ((CE = dyn_cast<CStyleCastExpr>(userExpr)))
+ userExpr = CE->getSubExpr();
+ CastKind CK;
+ if (userExpr->getType()->isIntegralType(*Context)) {
+ CK = CK_IntegralToPointer;
+ } else if (userExpr->getType()->isBlockPointerType()) {
+ CK = CK_BlockPointerToObjCPointerCast;
+ } else if (userExpr->getType()->isPointerType()) {
+ CK = CK_CPointerToObjCPointerCast;
+ } else {
+ CK = CK_BitCast;
+ }
+ userExpr = NoTypeInfoCStyleCastExpr(Context, Context->getObjCIdType(),
+ CK, userExpr);
+ }
+ }
+ MsgExprs.push_back(userExpr);
+ // We've transferred the ownership to MsgExprs. For now, we *don't* null
+ // out the argument in the original expression (since we aren't deleting
+ // the ObjCMessageExpr). See RewritePropertyOrImplicitSetter() usage for more info.
+ //Exp->setArg(i, 0);
+ }
+ // Generate the funky cast.
+ CastExpr *cast;
+ SmallVector<QualType, 8> ArgTypes;
+ QualType returnType;
+
+ // Push 'id' and 'SEL', the 2 implicit arguments.
+ if (MsgSendFlavor == MsgSendSuperFunctionDecl)
+ ArgTypes.push_back(Context->getPointerType(getSuperStructType()));
+ else
+ ArgTypes.push_back(Context->getObjCIdType());
+ ArgTypes.push_back(Context->getObjCSelType());
+ if (ObjCMethodDecl *OMD = Exp->getMethodDecl()) {
+ // Push any user argument types.
+ for (ObjCMethodDecl::param_iterator PI = OMD->param_begin(),
+ E = OMD->param_end(); PI != E; ++PI) {
+ QualType t = (*PI)->getType()->isObjCQualifiedIdType()
+ ? Context->getObjCIdType()
+ : (*PI)->getType();
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ (void)convertBlockPointerToFunctionPointer(t);
+ ArgTypes.push_back(t);
+ }
+ returnType = Exp->getType();
+ convertToUnqualifiedObjCType(returnType);
+ (void)convertBlockPointerToFunctionPointer(returnType);
+ } else {
+ returnType = Context->getObjCIdType();
+ }
+ // Get the type, we will need to reference it in a couple spots.
+ QualType msgSendType = MsgSendFlavor->getType();
+
+ // Create a reference to the objc_msgSend() declaration.
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(MsgSendFlavor, false, msgSendType,
+ VK_LValue, SourceLocation());
+
+ // Need to cast objc_msgSend to "void *" (to workaround a GCC bandaid).
+ // If we don't do this cast, we get the following bizarre warning/note:
+ // xx.m:13: warning: function called through a non-compatible type
+ // xx.m:13: note: if this code is reached, the program will abort
+ cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, DRE);
+
+ // Now do the "normal" pointer to function cast.
+ QualType castType =
+ getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ // If we don't have a method decl, force a variadic cast.
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
+
+ const FunctionType *FT = msgSendType->getAs<FunctionType>();
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ EndLoc);
+ Stmt *ReplacingStmt = CE;
+ if (MsgSendStretFlavor) {
+ // We have the method which returns a struct/union. Must also generate
+ // call to objc_msgSend_stret and hang both varieties on a conditional
+ // expression which dictate which one to envoke depending on size of
+ // method's return type.
+
+ // Create a reference to the objc_msgSend_stret() declaration.
+ DeclRefExpr *STDRE = new (Context) DeclRefExpr(MsgSendStretFlavor,
+ false, msgSendType,
+ VK_LValue, SourceLocation());
+ // Need to cast objc_msgSend_stret to "void *" (see above comment).
+ cast = NoTypeInfoCStyleCastExpr(Context,
+ Context->getPointerType(Context->VoidTy),
+ CK_BitCast, STDRE);
+ // Now do the "normal" pointer to function cast.
+ castType = getSimpleFunctionType(returnType, &ArgTypes[0], ArgTypes.size(),
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false);
+ castType = Context->getPointerType(castType);
+ cast = NoTypeInfoCStyleCastExpr(Context, castType, CK_BitCast,
+ cast);
+
+ // Don't forget the parens to enforce the proper binding.
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
+
+ FT = msgSendType->getAs<FunctionType>();
+ CallExpr *STCE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
+ MsgExprs.size(),
+ FT->getResultType(), VK_RValue,
+ SourceLocation());
+
+ // Build sizeof(returnType)
+ UnaryExprOrTypeTraitExpr *sizeofExpr =
+ new (Context) UnaryExprOrTypeTraitExpr(UETT_SizeOf,
+ Context->getTrivialTypeSourceInfo(returnType),
+ Context->getSizeType(), SourceLocation(),
+ SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ // FIXME: Value of 8 is base on ppc32/x86 ABI for the most common cases.
+ // For X86 it is more complicated and some kind of target specific routine
+ // is needed to decide what to do.
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ IntegerLiteral *limit = IntegerLiteral::Create(*Context,
+ llvm::APInt(IntSize, 8),
+ Context->IntTy,
+ SourceLocation());
+ BinaryOperator *lessThanExpr =
+ new (Context) BinaryOperator(sizeofExpr, limit, BO_LE, Context->IntTy,
+ VK_RValue, OK_Ordinary, SourceLocation());
+ // (sizeof(returnType) <= 8 ? objc_msgSend(...) : objc_msgSend_stret(...))
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(lessThanExpr,
+ SourceLocation(), CE,
+ SourceLocation(), STCE,
+ returnType, VK_RValue, OK_Ordinary);
+ ReplacingStmt = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ CondExpr);
+ }
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
+ Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
+ Exp->getLocEnd());
+
+ // Now do the actual rewrite.
+ ReplaceStmt(Exp, ReplacingStmt);
+
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return ReplacingStmt;
+}
+
+// typedef struct objc_object Protocol;
+QualType RewriteObjC::getProtocolType() {
+ if (!ProtocolTypeDecl) {
+ TypeSourceInfo *TInfo
+ = Context->getTrivialTypeSourceInfo(Context->getObjCIdType());
+ ProtocolTypeDecl = TypedefDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("Protocol"),
+ TInfo);
+ }
+ return Context->getTypeDeclType(ProtocolTypeDecl);
+}
+
+/// RewriteObjCProtocolExpr - Rewrite a protocol expression into
+/// a synthesized/forward data reference (to the protocol's metadata).
+/// The forward references (and metadata) are generated in
+/// RewriteObjC::HandleTranslationUnit().
+Stmt *RewriteObjC::RewriteObjCProtocolExpr(ObjCProtocolExpr *Exp) {
+ std::string Name = "_OBJC_PROTOCOL_" + Exp->getProtocol()->getNameAsString();
+ IdentifierInfo *ID = &Context->Idents.get(Name);
+ VarDecl *VD = VarDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, getProtocolType(), 0,
+ SC_Extern, SC_None);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(VD, false, getProtocolType(),
+ VK_LValue, SourceLocation());
+ Expr *DerefExpr = new (Context) UnaryOperator(DRE, UO_AddrOf,
+ Context->getPointerType(DRE->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, DerefExpr->getType(),
+ CK_BitCast,
+ DerefExpr);
+ ReplaceStmt(Exp, castExpr);
+ ProtocolExprDecls.insert(Exp->getProtocol()->getCanonicalDecl());
+ // delete Exp; leak for now, see RewritePropertyOrImplicitSetter() usage for more info.
+ return castExpr;
+
+}
+
+bool RewriteObjC::BufferContainsPPDirectives(const char *startBuf,
+ const char *endBuf) {
+ while (startBuf < endBuf) {
+ if (*startBuf == '#') {
+ // Skip whitespace.
+ for (++startBuf; startBuf[0] == ' ' || startBuf[0] == '\t'; ++startBuf)
+ ;
+ if (!strncmp(startBuf, "if", strlen("if")) ||
+ !strncmp(startBuf, "ifdef", strlen("ifdef")) ||
+ !strncmp(startBuf, "ifndef", strlen("ifndef")) ||
+ !strncmp(startBuf, "define", strlen("define")) ||
+ !strncmp(startBuf, "undef", strlen("undef")) ||
+ !strncmp(startBuf, "else", strlen("else")) ||
+ !strncmp(startBuf, "elif", strlen("elif")) ||
+ !strncmp(startBuf, "endif", strlen("endif")) ||
+ !strncmp(startBuf, "pragma", strlen("pragma")) ||
+ !strncmp(startBuf, "include", strlen("include")) ||
+ !strncmp(startBuf, "import", strlen("import")) ||
+ !strncmp(startBuf, "include_next", strlen("include_next")))
+ return true;
+ }
+ startBuf++;
+ }
+ return false;
+}
+
+/// RewriteObjCInternalStruct - Rewrite one internal struct corresponding to
+/// an objective-c class with ivars.
+void RewriteObjC::RewriteObjCInternalStruct(ObjCInterfaceDecl *CDecl,
+ std::string &Result) {
+ assert(CDecl && "Class missing in SynthesizeObjCInternalStruct");
+ assert(CDecl->getName() != "" &&
+ "Name missing in SynthesizeObjCInternalStruct");
+ // Do not synthesize more than once.
+ if (ObjCSynthesizedStructs.count(CDecl))
+ return;
+ ObjCInterfaceDecl *RCDecl = CDecl->getSuperClass();
+ int NumIvars = CDecl->ivar_size();
+ SourceLocation LocStart = CDecl->getLocStart();
+ SourceLocation LocEnd = CDecl->getEndOfDefinitionLoc();
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+
+ // If no ivars and no root or if its root, directly or indirectly,
+ // have no ivars (thus not synthesized) then no need to synthesize this class.
+ if ((!CDecl->isThisDeclarationADefinition() || NumIvars == 0) &&
+ (!RCDecl || !ObjCSynthesizedStructs.count(RCDecl))) {
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ return;
+ }
+
+ // FIXME: This has potential of causing problem. If
+ // SynthesizeObjCInternalStruct is ever called recursively.
+ Result += "\nstruct ";
+ Result += CDecl->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+
+ if (NumIvars > 0) {
+ const char *cursor = strchr(startBuf, '{');
+ assert((cursor && endBuf)
+ && "SynthesizeObjCInternalStruct - malformed @interface");
+ // If the buffer contains preprocessor directives, we do more fine-grained
+ // rewrites. This is intended to fix code that looks like (which occurs in
+ // NSURL.h, for example):
+ //
+ // #ifdef XYZ
+ // @interface Foo : NSObject
+ // #else
+ // @interface FooBar : NSObject
+ // #endif
+ // {
+ // int i;
+ // }
+ // @end
+ //
+ // This clause is segregated to avoid breaking the common case.
+ if (BufferContainsPPDirectives(startBuf, cursor)) {
+ SourceLocation L = RCDecl ? CDecl->getSuperClassLoc() :
+ CDecl->getAtStartLoc();
+ const char *endHeader = SM->getCharacterData(L);
+ endHeader += Lexer::MeasureTokenLength(L, *SM, LangOpts);
+
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ // advance to the end of the referenced protocols.
+ while (endHeader < cursor && *endHeader != '>') endHeader++;
+ endHeader++;
+ }
+ // rewrite the original header
+ ReplaceText(LocStart, endHeader-startBuf, Result);
+ } else {
+ // rewrite the original header *without* disturbing the '{'
+ ReplaceText(LocStart, cursor-startBuf, Result);
+ }
+ if (RCDecl && ObjCSynthesizedStructs.count(RCDecl)) {
+ Result = "\n struct ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IMPL ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n";
+
+ // insert the super class structure definition.
+ SourceLocation OnePastCurly =
+ LocStart.getLocWithOffset(cursor-startBuf+1);
+ InsertText(OnePastCurly, Result);
+ }
+ cursor++; // past '{'
+
+ // Now comment out any visibility specifiers.
+ while (cursor < endBuf) {
+ if (*cursor == '@') {
+ SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf);
+ // Skip whitespace.
+ for (++cursor; cursor[0] == ' ' || cursor[0] == '\t'; ++cursor)
+ /*scan*/;
+
+ // FIXME: presence of @public, etc. inside comment results in
+ // this transformation as well, which is still correct c-code.
+ if (!strncmp(cursor, "public", strlen("public")) ||
+ !strncmp(cursor, "private", strlen("private")) ||
+ !strncmp(cursor, "package", strlen("package")) ||
+ !strncmp(cursor, "protected", strlen("protected")))
+ InsertText(atLoc, "// ");
+ }
+ // FIXME: If there are cases where '<' is used in ivar declaration part
+ // of user code, then scan the ivar list and use needToScanForQualifiers
+ // for type checking.
+ else if (*cursor == '<') {
+ SourceLocation atLoc = LocStart.getLocWithOffset(cursor-startBuf);
+ InsertText(atLoc, "/* ");
+ cursor = strchr(cursor, '>');
+ cursor++;
+ atLoc = LocStart.getLocWithOffset(cursor-startBuf);
+ InsertText(atLoc, " */");
+ } else if (*cursor == '^') { // rewrite block specifier.
+ SourceLocation caretLoc = LocStart.getLocWithOffset(cursor-startBuf);
+ ReplaceText(caretLoc, 1, "*");
+ }
+ cursor++;
+ }
+ // Don't forget to add a ';'!!
+ InsertText(LocEnd.getLocWithOffset(1), ";");
+ } else { // we don't have any instance variables - insert super struct.
+ endBuf += Lexer::MeasureTokenLength(LocEnd, *SM, LangOpts);
+ Result += " {\n struct ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IMPL ";
+ Result += RCDecl->getNameAsString();
+ Result += "_IVARS;\n};\n";
+ ReplaceText(LocStart, endBuf-startBuf, Result);
+ }
+ // Mark this struct as having been generated.
+ if (!ObjCSynthesizedStructs.insert(CDecl))
+ llvm_unreachable("struct already synthesize- SynthesizeObjCInternalStruct");
+}
+
+//===----------------------------------------------------------------------===//
+// Meta Data Emission
+//===----------------------------------------------------------------------===//
+
+
+/// RewriteImplementations - This routine rewrites all method implementations
+/// and emits meta-data.
+
+void RewriteObjC::RewriteImplementations() {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // Rewrite implemented methods
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteImplementationDecl(ClassImplementation[i]);
+
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteImplementationDecl(CategoryImplementation[i]);
+}
+
+void RewriteObjC::RewriteByRefString(std::string &ResultStr,
+ const std::string &Name,
+ ValueDecl *VD, bool def) {
+ assert(BlockByRefDeclNo.count(VD) &&
+ "RewriteByRefString: ByRef decl missing");
+ if (def)
+ ResultStr += "struct ";
+ ResultStr += "__Block_byref_" + Name +
+ "_" + utostr(BlockByRefDeclNo[VD]) ;
+}
+
+static bool HasLocalVariableExternalStorage(ValueDecl *VD) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ return (Var->isFunctionOrMethodVarDecl() && !Var->hasLocalStorage());
+ return false;
+}
+
+std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ const FunctionType *AFT = CE->getFunctionType();
+ QualType RT = AFT->getResultType();
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static " + RT.getAsString(Context->getPrintingPolicy()) + " __" +
+ funcName.str() + "_" + "block_func_" + utostr(i);
+
+ BlockDecl *BD = CE->getBlockDecl();
+
+ if (isa<FunctionNoProtoType>(AFT)) {
+ // No user-supplied arguments. Still need to pass in a pointer to the
+ // block (to reference imported block decl refs).
+ S += "(" + StructRef + " *__cself)";
+ } else if (BD->param_empty()) {
+ S += "(" + StructRef + " *__cself)";
+ } else {
+ const FunctionProtoType *FT = cast<FunctionProtoType>(AFT);
+ assert(FT && "SynthesizeBlockFunc: No function proto");
+ S += '(';
+ // first add the implicit argument.
+ S += StructRef + " *__cself, ";
+ std::string ParamStr;
+ for (BlockDecl::param_iterator AI = BD->param_begin(),
+ E = BD->param_end(); AI != E; ++AI) {
+ if (AI != BD->param_begin()) S += ", ";
+ ParamStr = (*AI)->getNameAsString();
+ QualType QT = (*AI)->getType();
+ (void)convertBlockPointerToFunctionPointer(QT);
+ QT.getAsStringInternal(ParamStr, Context->getPrintingPolicy());
+ S += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (!BD->param_empty()) S += ", ";
+ S += "...";
+ }
+ S += ')';
+ }
+ S += " {\n";
+
+ // Create local declarations to avoid rewriting all closure decl ref exprs.
+ // First, emit a declaration for all "by ref" decls.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string Name = (*I)->getNameAsString();
+ std::string TypeString;
+ RewriteByRefString(TypeString, Name, (*I));
+ TypeString += " *";
+ Name = TypeString + Name;
+ S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by ref\n";
+ }
+ // Next, emit a declaration for all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedClosure)(void);
+ // myImportedClosure = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherClosure)(void);
+ // anotherClosure = ^(void) {
+ // myImportedClosure(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ RewriteBlockPointerTypeVariable(S, (*I));
+ S += " = (";
+ RewriteBlockPointerType(S, (*I)->getType());
+ S += ")";
+ S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ else {
+ std::string Name = (*I)->getNameAsString();
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(Name, Context->getPrintingPolicy());
+ S += Name + " = __cself->" +
+ (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ }
+ std::string RewrittenStr = RewrittenBlockExprs[CE];
+ const char *cstr = RewrittenStr.c_str();
+ while (*cstr++ != '{') ;
+ S += cstr;
+ S += "\n";
+ return S;
+}
+
+std::string RewriteObjC::SynthesizeBlockHelperFuncs(BlockExpr *CE, int i,
+ StringRef funcName,
+ std::string Tag) {
+ std::string StructRef = "struct " + Tag;
+ std::string S = "static void __";
+
+ S += funcName;
+ S += "_block_copy_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*dst, " + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ ValueDecl *VD = (*I);
+ S += "_Block_object_assign((void*)&dst->";
+ S += (*I)->getNameAsString();
+ S += ", (void*)src->";
+ S += (*I)->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count((*I)))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+
+ S += "\nstatic void __";
+ S += funcName;
+ S += "_block_dispose_" + utostr(i);
+ S += "(" + StructRef;
+ S += "*src) {";
+ for (llvm::SmallPtrSet<ValueDecl*,8>::iterator I = ImportedBlockDecls.begin(),
+ E = ImportedBlockDecls.end(); I != E; ++I) {
+ ValueDecl *VD = (*I);
+ S += "_Block_object_dispose((void*)src->";
+ S += (*I)->getNameAsString();
+ if (BlockByRefDeclsPtrSet.count((*I)))
+ S += ", " + utostr(BLOCK_FIELD_IS_BYREF) + "/*BLOCK_FIELD_IS_BYREF*/);";
+ else if (VD->getType()->isBlockPointerType())
+ S += ", " + utostr(BLOCK_FIELD_IS_BLOCK) + "/*BLOCK_FIELD_IS_BLOCK*/);";
+ else
+ S += ", " + utostr(BLOCK_FIELD_IS_OBJECT) + "/*BLOCK_FIELD_IS_OBJECT*/);";
+ }
+ S += "}\n";
+ return S;
+}
+
+std::string RewriteObjC::SynthesizeBlockImpl(BlockExpr *CE, std::string Tag,
+ std::string Desc) {
+ std::string S = "\nstruct " + Tag;
+ std::string Constructor = " " + Tag;
+
+ S += " {\n struct __block_impl impl;\n";
+ S += " struct " + Desc;
+ S += "* Desc;\n";
+
+ Constructor += "(void *fp, "; // Invoke function pointer.
+ Constructor += "struct " + Desc; // Descriptor pointer.
+ Constructor += " *desc";
+
+ if (BlockDeclRefs.size()) {
+ // Output all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ // Handle nested closure invocation. For example:
+ //
+ // void (^myImportedBlock)(void);
+ // myImportedBlock = ^(void) { setGlobalInt(x + y); };
+ //
+ // void (^anotherBlock)(void);
+ // anotherBlock = ^(void) {
+ // myImportedBlock(); // import and invoke the closure
+ // };
+ //
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ S += "struct __block_impl *";
+ Constructor += ", void *" + ArgName;
+ } else {
+ QualType QT = (*I)->getType();
+ if (HasLocalVariableExternalStorage(*I))
+ QT = Context->getPointerType(QT);
+ QT.getAsStringInternal(FieldName, Context->getPrintingPolicy());
+ QT.getAsStringInternal(ArgName, Context->getPrintingPolicy());
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + ";\n";
+ }
+ // Output all "by ref" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ S += " ";
+ std::string FieldName = (*I)->getNameAsString();
+ std::string ArgName = "_" + FieldName;
+ {
+ std::string TypeString;
+ RewriteByRefString(TypeString, FieldName, (*I));
+ TypeString += " *";
+ FieldName = TypeString + FieldName;
+ ArgName = TypeString + ArgName;
+ Constructor += ", " + ArgName;
+ }
+ S += FieldName + "; // by ref\n";
+ }
+ // Finish writing the constructor.
+ Constructor += ", int flags=0)";
+ // Initialize all "by copy" arguments.
+ bool firsTime = true;
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ if (isTopLevelBlockPointerType((*I)->getType()))
+ Constructor += Name + "((struct __block_impl *)_" + Name + ")";
+ else
+ Constructor += Name + "(_" + Name + ")";
+ }
+ // Initialize all "by ref" arguments.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ std::string Name = (*I)->getNameAsString();
+ if (firsTime) {
+ Constructor += " : ";
+ firsTime = false;
+ }
+ else
+ Constructor += ", ";
+ Constructor += Name + "(_" + Name + "->__forwarding)";
+ }
+
+ Constructor += " {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+
+ Constructor += " Desc = desc;\n";
+ } else {
+ // Finish writing the constructor.
+ Constructor += ", int flags=0) {\n";
+ if (GlobalVarDecl)
+ Constructor += " impl.isa = &_NSConcreteGlobalBlock;\n";
+ else
+ Constructor += " impl.isa = &_NSConcreteStackBlock;\n";
+ Constructor += " impl.Flags = flags;\n impl.FuncPtr = fp;\n";
+ Constructor += " Desc = desc;\n";
+ }
+ Constructor += " ";
+ Constructor += "}\n";
+ S += Constructor;
+ S += "};\n";
+ return S;
+}
+
+std::string RewriteObjC::SynthesizeBlockDescriptor(std::string DescTag,
+ std::string ImplTag, int i,
+ StringRef FunName,
+ unsigned hasCopy) {
+ std::string S = "\nstatic struct " + DescTag;
+
+ S += " {\n unsigned long reserved;\n";
+ S += " unsigned long Block_size;\n";
+ if (hasCopy) {
+ S += " void (*copy)(struct ";
+ S += ImplTag; S += "*, struct ";
+ S += ImplTag; S += "*);\n";
+
+ S += " void (*dispose)(struct ";
+ S += ImplTag; S += "*);\n";
+ }
+ S += "} ";
+
+ S += DescTag + "_DATA = { 0, sizeof(struct ";
+ S += ImplTag + ")";
+ if (hasCopy) {
+ S += ", __" + FunName.str() + "_block_copy_" + utostr(i);
+ S += ", __" + FunName.str() + "_block_dispose_" + utostr(i);
+ }
+ S += "};\n";
+ return S;
+}
+
+void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
+ StringRef FunName) {
+ // Insert declaration for the function in which block literal is used.
+ if (CurFunctionDeclToDeclareForBlock && !Blocks.empty())
+ RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
+ bool RewriteSC = (GlobalVarDecl &&
+ !Blocks.empty() &&
+ GlobalVarDecl->getStorageClass() == SC_Static &&
+ GlobalVarDecl->getType().getCVRQualifiers());
+ if (RewriteSC) {
+ std::string SC(" void __");
+ SC += GlobalVarDecl->getNameAsString();
+ SC += "() {}";
+ InsertText(FunLocStart, SC);
+ }
+
+ // Insert closures that were part of the function.
+ for (unsigned i = 0, count=0; i < Blocks.size(); i++) {
+ CollectBlockDeclRefInfo(Blocks[i]);
+ // Need to copy-in the inner copied-in variables not actually used in this
+ // block.
+ for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
+ DeclRefExpr *Exp = InnerDeclRefs[count++];
+ ValueDecl *VD = Exp->getDecl();
+ BlockDeclRefs.push_back(Exp);
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ // imported objects in the inner blocks not used in the outer
+ // blocks must be copied/disposed in the outer block as well.
+ if (VD->hasAttr<BlocksAttr>() ||
+ VD->getType()->isObjCObjectPointerType() ||
+ VD->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(VD);
+ }
+
+ std::string ImplTag = "__" + FunName.str() + "_block_impl_" + utostr(i);
+ std::string DescTag = "__" + FunName.str() + "_block_desc_" + utostr(i);
+
+ std::string CI = SynthesizeBlockImpl(Blocks[i], ImplTag, DescTag);
+
+ InsertText(FunLocStart, CI);
+
+ std::string CF = SynthesizeBlockFunc(Blocks[i], i, FunName, ImplTag);
+
+ InsertText(FunLocStart, CF);
+
+ if (ImportedBlockDecls.size()) {
+ std::string HF = SynthesizeBlockHelperFuncs(Blocks[i], i, FunName, ImplTag);
+ InsertText(FunLocStart, HF);
+ }
+ std::string BD = SynthesizeBlockDescriptor(DescTag, ImplTag, i, FunName,
+ ImportedBlockDecls.size() > 0);
+ InsertText(FunLocStart, BD);
+
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ }
+ if (RewriteSC) {
+ // Must insert any 'const/volatile/static here. Since it has been
+ // removed as result of rewriting of block literals.
+ std::string SC;
+ if (GlobalVarDecl->getStorageClass() == SC_Static)
+ SC = "static ";
+ if (GlobalVarDecl->getType().isConstQualified())
+ SC += "const ";
+ if (GlobalVarDecl->getType().isVolatileQualified())
+ SC += "volatile ";
+ if (GlobalVarDecl->getType().isRestrictQualified())
+ SC += "restrict ";
+ InsertText(FunLocStart, SC);
+ }
+
+ Blocks.clear();
+ InnerDeclRefsCount.clear();
+ InnerDeclRefs.clear();
+ RewrittenBlockExprs.clear();
+}
+
+void RewriteObjC::InsertBlockLiteralsWithinFunction(FunctionDecl *FD) {
+ SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
+ StringRef FuncName = FD->getName();
+
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+static void BuildUniqueMethodName(std::string &Name,
+ ObjCMethodDecl *MD) {
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ Name = IFace->getName();
+ Name += "__" + MD->getSelector().getAsString();
+ // Convert colons to underscores.
+ std::string::size_type loc = 0;
+ while ((loc = Name.find(":", loc)) != std::string::npos)
+ Name.replace(loc, 1, "_");
+}
+
+void RewriteObjC::InsertBlockLiteralsWithinMethod(ObjCMethodDecl *MD) {
+ //fprintf(stderr,"In InsertBlockLiteralsWitinMethod\n");
+ //SourceLocation FunLocStart = MD->getLocStart();
+ SourceLocation FunLocStart = MD->getLocStart();
+ std::string FuncName;
+ BuildUniqueMethodName(FuncName, MD);
+ SynthesizeBlockLiterals(FunLocStart, FuncName);
+}
+
+void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
+ GetBlockDeclRefExprs(CBE->getBody());
+ else
+ GetBlockDeclRefExprs(*CI);
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ if (DRE->refersToEnclosingLocal()) {
+ // FIXME: Handle enums.
+ if (!isa<FunctionDecl>(DRE->getDecl()))
+ BlockDeclRefs.push_back(DRE);
+ if (HasLocalVariableExternalStorage(DRE->getDecl()))
+ BlockDeclRefs.push_back(DRE);
+ }
+ }
+
+ return;
+}
+
+void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
+ SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs,
+ llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI)) {
+ InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
+ GetInnerBlockDeclRefExprs(CBE->getBody(),
+ InnerBlockDeclRefs,
+ InnerContexts);
+ }
+ else
+ GetInnerBlockDeclRefExprs(*CI,
+ InnerBlockDeclRefs,
+ InnerContexts);
+
+ }
+ // Handle specific things.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ if (DRE->refersToEnclosingLocal()) {
+ if (!isa<FunctionDecl>(DRE->getDecl()) &&
+ !InnerContexts.count(DRE->getDecl()->getDeclContext()))
+ InnerBlockDeclRefs.push_back(DRE);
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (Var->isFunctionOrMethodVarDecl())
+ ImportedLocalExternalDecls.insert(Var);
+ }
+ }
+
+ return;
+}
+
+/// convertFunctionTypeOfBlocks - This routine converts a function type
+/// whose result type may be a block pointer or whose argument type(s)
+/// might be block pointers to an equivalent function type replacing
+/// all block pointers to function pointers.
+QualType RewriteObjC::convertFunctionTypeOfBlocks(const FunctionType *FT) {
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+ QualType Res = FT->getResultType();
+ bool HasBlockType = convertBlockPointerToFunctionPointer(Res);
+
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I && (I != E); ++I) {
+ QualType t = *I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (convertBlockPointerToFunctionPointer(t))
+ HasBlockType = true;
+ ArgTypes.push_back(t);
+ }
+ }
+ QualType FuncType;
+ // FIXME. Does this work if block takes no argument but has a return type
+ // which is of block type?
+ if (HasBlockType)
+ FuncType = getSimpleFunctionType(Res, &ArgTypes[0], ArgTypes.size());
+ else FuncType = QualType(FT, 0);
+ return FuncType;
+}
+
+Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
+ // Navigate to relevant type information.
+ const BlockPointerType *CPT = 0;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BlockExp)) {
+ CPT = DRE->getType()->getAs<BlockPointerType>();
+ } else if (const MemberExpr *MExpr = dyn_cast<MemberExpr>(BlockExp)) {
+ CPT = MExpr->getType()->getAs<BlockPointerType>();
+ }
+ else if (const ParenExpr *PRE = dyn_cast<ParenExpr>(BlockExp)) {
+ return SynthesizeBlockCall(Exp, PRE->getSubExpr());
+ }
+ else if (const ImplicitCastExpr *IEXPR = dyn_cast<ImplicitCastExpr>(BlockExp))
+ CPT = IEXPR->getType()->getAs<BlockPointerType>();
+ else if (const ConditionalOperator *CEXPR =
+ dyn_cast<ConditionalOperator>(BlockExp)) {
+ Expr *LHSExp = CEXPR->getLHS();
+ Stmt *LHSStmt = SynthesizeBlockCall(Exp, LHSExp);
+ Expr *RHSExp = CEXPR->getRHS();
+ Stmt *RHSStmt = SynthesizeBlockCall(Exp, RHSExp);
+ Expr *CONDExp = CEXPR->getCond();
+ ConditionalOperator *CondExpr =
+ new (Context) ConditionalOperator(CONDExp,
+ SourceLocation(), cast<Expr>(LHSStmt),
+ SourceLocation(), cast<Expr>(RHSStmt),
+ Exp->getType(), VK_RValue, OK_Ordinary);
+ return CondExpr;
+ } else if (const ObjCIvarRefExpr *IRE = dyn_cast<ObjCIvarRefExpr>(BlockExp)) {
+ CPT = IRE->getType()->getAs<BlockPointerType>();
+ } else if (const PseudoObjectExpr *POE
+ = dyn_cast<PseudoObjectExpr>(BlockExp)) {
+ CPT = POE->getType()->castAs<BlockPointerType>();
+ } else {
+ assert(1 && "RewriteBlockClass: Bad type");
+ }
+ assert(CPT && "RewriteBlockClass: Bad type");
+ const FunctionType *FT = CPT->getPointeeType()->getAs<FunctionType>();
+ assert(FT && "RewriteBlockClass: Bad type");
+ const FunctionProtoType *FTP = dyn_cast<FunctionProtoType>(FT);
+ // FTP will be null for closures that don't take arguments.
+
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get("__block_impl"));
+ QualType PtrBlock = Context->getPointerType(Context->getTagDeclType(RD));
+
+ // Generate a funky cast.
+ SmallVector<QualType, 8> ArgTypes;
+
+ // Push the block argument type.
+ ArgTypes.push_back(PtrBlock);
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I && (I != E); ++I) {
+ QualType t = *I;
+ // Make sure we convert "t (^)(...)" to "t (*)(...)".
+ if (!convertBlockPointerToFunctionPointer(t))
+ convertToUnqualifiedObjCType(t);
+ ArgTypes.push_back(t);
+ }
+ }
+ // Now do the pointer to function cast.
+ QualType PtrToFuncCastType
+ = getSimpleFunctionType(Exp->getType(), &ArgTypes[0], ArgTypes.size());
+
+ PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
+
+ CastExpr *BlkCast = NoTypeInfoCStyleCastExpr(Context, PtrBlock,
+ CK_BitCast,
+ const_cast<Expr*>(BlockExp));
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ BlkCast);
+ //PE->dump();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("FuncPtr"),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, FD, SourceLocation(),
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+
+
+ CastExpr *FunkCast = NoTypeInfoCStyleCastExpr(Context, PtrToFuncCastType,
+ CK_BitCast, ME);
+ PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), FunkCast);
+
+ SmallVector<Expr*, 8> BlkExprs;
+ // Add the implicit argument.
+ BlkExprs.push_back(BlkCast);
+ // Add the user arguments.
+ for (CallExpr::arg_iterator I = Exp->arg_begin(),
+ E = Exp->arg_end(); I != E; ++I) {
+ BlkExprs.push_back(*I);
+ }
+ CallExpr *CE = new (Context) CallExpr(*Context, PE, &BlkExprs[0],
+ BlkExprs.size(),
+ Exp->getType(), VK_RValue,
+ SourceLocation());
+ return CE;
+}
+
+// We need to return the rewritten expression to handle cases where the
+// BlockDeclRefExpr is embedded in another expression being rewritten.
+// For example:
+//
+// int main() {
+// __block Foo *f;
+// __block int i;
+//
+// void (^myblock)() = ^() {
+// [f test]; // f is a BlockDeclRefExpr embedded in a message (which is being rewritten).
+// i = 77;
+// };
+//}
+Stmt *RewriteObjC::RewriteBlockDeclRefExpr(DeclRefExpr *DeclRefExp) {
+ // Rewrite the byref variable into BYREFVAR->__forwarding->BYREFVAR
+ // for each DeclRefExp where BYREFVAR is name of the variable.
+ ValueDecl *VD = DeclRefExp->getDecl();
+ bool isArrow = DeclRefExp->refersToEnclosingLocal();
+
+ FieldDecl *FD = FieldDecl::Create(*Context, 0, SourceLocation(),
+ SourceLocation(),
+ &Context->Idents.get("__forwarding"),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ MemberExpr *ME = new (Context) MemberExpr(DeclRefExp, isArrow,
+ FD, SourceLocation(),
+ FD->getType(), VK_LValue,
+ OK_Ordinary);
+
+ StringRef Name = VD->getName();
+ FD = FieldDecl::Create(*Context, 0, SourceLocation(), SourceLocation(),
+ &Context->Idents.get(Name),
+ Context->VoidPtrTy, 0,
+ /*BitWidth=*/0, /*Mutable=*/true,
+ /*HasInit=*/false);
+ ME = new (Context) MemberExpr(ME, true, FD, SourceLocation(),
+ DeclRefExp->getType(), VK_LValue, OK_Ordinary);
+
+
+
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(DeclRefExp->getExprLoc(),
+ DeclRefExp->getExprLoc(),
+ ME);
+ ReplaceStmt(DeclRefExp, PE);
+ return PE;
+}
+
+// Rewrites the imported local variable V with external storage
+// (static, extern, etc.) as *V
+//
+Stmt *RewriteObjC::RewriteLocalVariableExternalStorage(DeclRefExpr *DRE) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VarDecl *Var = dyn_cast<VarDecl>(VD))
+ if (!ImportedLocalExternalDecls.count(Var))
+ return DRE;
+ Expr *Exp = new (Context) UnaryOperator(DRE, UO_Deref, DRE->getType(),
+ VK_LValue, OK_Ordinary,
+ DRE->getLocation());
+ // Need parens to enforce precedence.
+ ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(),
+ Exp);
+ ReplaceStmt(DRE, PE);
+ return PE;
+}
+
+void RewriteObjC::RewriteCastExpr(CStyleCastExpr *CE) {
+ SourceLocation LocStart = CE->getLParenLoc();
+ SourceLocation LocEnd = CE->getRParenLoc();
+
+ // Need to avoid trying to rewrite synthesized casts.
+ if (LocStart.isInvalid())
+ return;
+ // Need to avoid trying to rewrite casts contained in macros.
+ if (!Rewriter::isRewritable(LocStart) || !Rewriter::isRewritable(LocEnd))
+ return;
+
+ const char *startBuf = SM->getCharacterData(LocStart);
+ const char *endBuf = SM->getCharacterData(LocEnd);
+ QualType QT = CE->getType();
+ const Type* TypePtr = QT->getAs<Type>();
+ if (isa<TypeOfExprType>(TypePtr)) {
+ const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
+ QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
+ std::string TypeAsString = "(";
+ RewriteBlockPointerType(TypeAsString, QT);
+ TypeAsString += ")";
+ ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString);
+ return;
+ }
+ // advance the location to startArgList.
+ const char *argPtr = startBuf;
+
+ while (*argPtr++ && (argPtr < endBuf)) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ LocStart = LocStart.getLocWithOffset(argPtr-startBuf);
+ ReplaceText(LocStart, 1, "*");
+ break;
+ }
+ }
+ return;
+}
+
+void RewriteObjC::RewriteBlockPointerFunctionArgs(FunctionDecl *FD) {
+ SourceLocation DeclLoc = FD->getLocation();
+ unsigned parenCount = 0;
+
+ // We have 1 or more arguments that have closure pointers.
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *startArgList = strchr(startBuf, '(');
+
+ assert((*startArgList == '(') && "Rewriter fuzzy parser confused");
+
+ parenCount++;
+ // advance the location to startArgList.
+ DeclLoc = DeclLoc.getLocWithOffset(startArgList-startBuf);
+ assert((DeclLoc.isValid()) && "Invalid DeclLoc");
+
+ const char *argPtr = startArgList;
+
+ while (*argPtr++ && parenCount) {
+ switch (*argPtr) {
+ case '^':
+ // Replace the '^' with '*'.
+ DeclLoc = DeclLoc.getLocWithOffset(argPtr-startArgList);
+ ReplaceText(DeclLoc, 1, "*");
+ break;
+ case '(':
+ parenCount++;
+ break;
+ case ')':
+ parenCount--;
+ break;
+ }
+ }
+ return;
+}
+
+bool RewriteObjC::PointerTypeTakesAnyBlockArguments(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I)
+ if (isTopLevelBlockPointerType(*I))
+ return true;
+ }
+ return false;
+}
+
+bool RewriteObjC::PointerTypeTakesAnyObjCQualifiedType(QualType QT) {
+ const FunctionProtoType *FTP;
+ const PointerType *PT = QT->getAs<PointerType>();
+ if (PT) {
+ FTP = PT->getPointeeType()->getAs<FunctionProtoType>();
+ } else {
+ const BlockPointerType *BPT = QT->getAs<BlockPointerType>();
+ assert(BPT && "BlockPointerTypeTakeAnyBlockArguments(): not a block pointer type");
+ FTP = BPT->getPointeeType()->getAs<FunctionProtoType>();
+ }
+ if (FTP) {
+ for (FunctionProtoType::arg_type_iterator I = FTP->arg_type_begin(),
+ E = FTP->arg_type_end(); I != E; ++I) {
+ if ((*I)->isObjCQualifiedIdType())
+ return true;
+ if ((*I)->isObjCObjectPointerType() &&
+ (*I)->getPointeeType()->isObjCQualifiedInterfaceType())
+ return true;
+ }
+
+ }
+ return false;
+}
+
+void RewriteObjC::GetExtentOfArgList(const char *Name, const char *&LParen,
+ const char *&RParen) {
+ const char *argPtr = strchr(Name, '(');
+ assert((*argPtr == '(') && "Rewriter fuzzy parser confused");
+
+ LParen = argPtr; // output the start.
+ argPtr++; // skip past the left paren.
+ unsigned parenCount = 1;
+
+ while (*argPtr && parenCount) {
+ switch (*argPtr) {
+ case '(': parenCount++; break;
+ case ')': parenCount--; break;
+ default: break;
+ }
+ if (parenCount) argPtr++;
+ }
+ assert((*argPtr == ')') && "Rewriter fuzzy parser confused");
+ RParen = argPtr; // output the end
+}
+
+void RewriteObjC::RewriteBlockPointerDecl(NamedDecl *ND) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ RewriteBlockPointerFunctionArgs(FD);
+ return;
+ }
+ // Handle Variables and Typedefs.
+ SourceLocation DeclLoc = ND->getLocation();
+ QualType DeclT;
+ if (VarDecl *VD = dyn_cast<VarDecl>(ND))
+ DeclT = VD->getType();
+ else if (TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(ND))
+ DeclT = TDD->getUnderlyingType();
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(ND))
+ DeclT = FD->getType();
+ else
+ llvm_unreachable("RewriteBlockPointerDecl(): Decl type not yet handled");
+
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ const char *endBuf = startBuf;
+ // scan backward (from the decl location) for the end of the previous decl.
+ while (*startBuf != '^' && *startBuf != ';' && startBuf != MainFileStart)
+ startBuf--;
+ SourceLocation Start = DeclLoc.getLocWithOffset(startBuf-endBuf);
+ std::string buf;
+ unsigned OrigLength=0;
+ // *startBuf != '^' if we are dealing with a pointer to function that
+ // may take block argument types (which will be handled below).
+ if (*startBuf == '^') {
+ // Replace the '^' with '*', computing a negative offset.
+ buf = '*';
+ startBuf++;
+ OrigLength++;
+ }
+ while (*startBuf != ')') {
+ buf += *startBuf;
+ startBuf++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+
+ if (PointerTypeTakesAnyBlockArguments(DeclT) ||
+ PointerTypeTakesAnyObjCQualifiedType(DeclT)) {
+ // Replace the '^' with '*' for arguments.
+ // Replace id<P> with id/*<>*/
+ DeclLoc = ND->getLocation();
+ startBuf = SM->getCharacterData(DeclLoc);
+ const char *argListBegin, *argListEnd;
+ GetExtentOfArgList(startBuf, argListBegin, argListEnd);
+ while (argListBegin < argListEnd) {
+ if (*argListBegin == '^')
+ buf += '*';
+ else if (*argListBegin == '<') {
+ buf += "/*";
+ buf += *argListBegin++;
+ OrigLength++;;
+ while (*argListBegin != '>') {
+ buf += *argListBegin++;
+ OrigLength++;
+ }
+ buf += *argListBegin;
+ buf += "*/";
+ }
+ else
+ buf += *argListBegin;
+ argListBegin++;
+ OrigLength++;
+ }
+ buf += ')';
+ OrigLength++;
+ }
+ ReplaceText(Start, OrigLength, buf);
+
+ return;
+}
+
+
+/// SynthesizeByrefCopyDestroyHelper - This routine synthesizes:
+/// void __Block_byref_id_object_copy(struct Block_byref_id_object *dst,
+/// struct Block_byref_id_object *src) {
+/// _Block_object_assign (&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_assign(&_dest->object, _src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+/// And:
+/// void __Block_byref_id_object_dispose(struct Block_byref_id_object *_src) {
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_OBJECT
+/// [|BLOCK_FIELD_IS_WEAK]) // object
+/// _Block_object_dispose(_src->object,
+/// BLOCK_BYREF_CALLER | BLOCK_FIELD_IS_BLOCK
+/// [|BLOCK_FIELD_IS_WEAK]) // block
+/// }
+
+std::string RewriteObjC::SynthesizeByrefCopyDestroyHelper(VarDecl *VD,
+ int flag) {
+ std::string S;
+ if (CopyDestroyCache.count(flag))
+ return S;
+ CopyDestroyCache.insert(flag);
+ S = "static void __Block_byref_id_object_copy_";
+ S += utostr(flag);
+ S += "(void *dst, void *src) {\n";
+
+ // offset into the object pointer is computed as:
+ // void * + void* + int + int + void* + void *
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ unsigned VoidPtrSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->VoidPtrTy));
+
+ unsigned offset = (VoidPtrSize*4 + IntSize + IntSize)/Context->getCharWidth();
+ S += " _Block_object_assign((char*)dst + ";
+ S += utostr(offset);
+ S += ", *(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+
+ S += "static void __Block_byref_id_object_dispose_";
+ S += utostr(flag);
+ S += "(void *src) {\n";
+ S += " _Block_object_dispose(*(void * *) ((char*)src + ";
+ S += utostr(offset);
+ S += "), ";
+ S += utostr(flag);
+ S += ");\n}\n";
+ return S;
+}
+
+/// RewriteByRefVar - For each __block typex ND variable this routine transforms
+/// the declaration into:
+/// struct __Block_byref_ND {
+/// void *__isa; // NULL for everything except __weak pointers
+/// struct __Block_byref_ND *__forwarding;
+/// int32_t __flags;
+/// int32_t __size;
+/// void *__Block_byref_id_object_copy; // If variable is __block ObjC object
+/// void *__Block_byref_id_object_dispose; // If variable is __block ObjC object
+/// typex ND;
+/// };
+///
+/// It then replaces declaration of ND variable with:
+/// struct __Block_byref_ND ND = {__isa=0B, __forwarding=&ND, __flags=some_flag,
+/// __size=sizeof(struct __Block_byref_ND),
+/// ND=initializer-if-any};
+///
+///
+void RewriteObjC::RewriteByRefVar(VarDecl *ND) {
+ // Insert declaration for the function in which block literal is
+ // used.
+ if (CurFunctionDeclToDeclareForBlock)
+ RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
+ int flag = 0;
+ int isa = 0;
+ SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ if (DeclLoc.isInvalid())
+ // If type location is missing, it is because of missing type (a warning).
+ // Use variable's location which is good for this case.
+ DeclLoc = ND->getLocation();
+ const char *startBuf = SM->getCharacterData(DeclLoc);
+ SourceLocation X = ND->getLocEnd();
+ X = SM->getExpansionLoc(X);
+ const char *endBuf = SM->getCharacterData(X);
+ std::string Name(ND->getNameAsString());
+ std::string ByrefType;
+ RewriteByRefString(ByrefType, Name, ND, true);
+ ByrefType += " {\n";
+ ByrefType += " void *__isa;\n";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += " *__forwarding;\n";
+ ByrefType += " int __flags;\n";
+ ByrefType += " int __size;\n";
+ // Add void *__Block_byref_id_object_copy;
+ // void *__Block_byref_id_object_dispose; if needed.
+ QualType Ty = ND->getType();
+ bool HasCopyAndDispose = Context->BlockRequiresCopying(Ty);
+ if (HasCopyAndDispose) {
+ ByrefType += " void (*__Block_byref_id_object_copy)(void*, void*);\n";
+ ByrefType += " void (*__Block_byref_id_object_dispose)(void*);\n";
+ }
+
+ QualType T = Ty;
+ (void)convertBlockPointerToFunctionPointer(T);
+ T.getAsStringInternal(Name, Context->getPrintingPolicy());
+
+ ByrefType += " " + Name + ";\n";
+ ByrefType += "};\n";
+ // Insert this type in global scope. It is needed by helper function.
+ SourceLocation FunLocStart;
+ if (CurFunctionDef)
+ FunLocStart = CurFunctionDef->getTypeSpecStartLoc();
+ else {
+ assert(CurMethodDef && "RewriteByRefVar - CurMethodDef is null");
+ FunLocStart = CurMethodDef->getLocStart();
+ }
+ InsertText(FunLocStart, ByrefType);
+ if (Ty.isObjCGCWeak()) {
+ flag |= BLOCK_FIELD_IS_WEAK;
+ isa = 1;
+ }
+
+ if (HasCopyAndDispose) {
+ flag = BLOCK_BYREF_CALLER;
+ QualType Ty = ND->getType();
+ // FIXME. Handle __weak variable (BLOCK_FIELD_IS_WEAK) as well.
+ if (Ty->isBlockPointerType())
+ flag |= BLOCK_FIELD_IS_BLOCK;
+ else
+ flag |= BLOCK_FIELD_IS_OBJECT;
+ std::string HF = SynthesizeByrefCopyDestroyHelper(ND, flag);
+ if (!HF.empty())
+ InsertText(FunLocStart, HF);
+ }
+
+ // struct __Block_byref_ND ND =
+ // {0, &ND, some_flag, __size=sizeof(struct __Block_byref_ND),
+ // initializer-if-any};
+ bool hasInit = (ND->getInit() != 0);
+ unsigned flags = 0;
+ if (HasCopyAndDispose)
+ flags |= BLOCK_HAS_COPY_DISPOSE;
+ Name = ND->getNameAsString();
+ ByrefType.clear();
+ RewriteByRefString(ByrefType, Name, ND);
+ std::string ForwardingCastType("(");
+ ForwardingCastType += ByrefType + " *)";
+ if (!hasInit) {
+ ByrefType += " " + Name + " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += ")";
+ if (HasCopyAndDispose) {
+ ByrefType += ", __Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ }
+ ByrefType += "};\n";
+ unsigned nameSize = Name.size();
+ // for block or function pointer declaration. Name is aleady
+ // part of the declaration.
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType())
+ nameSize = 1;
+ ReplaceText(DeclLoc, endBuf-startBuf+nameSize, ByrefType);
+ }
+ else {
+ SourceLocation startLoc;
+ Expr *E = ND->getInit();
+ if (const CStyleCastExpr *ECE = dyn_cast<CStyleCastExpr>(E))
+ startLoc = ECE->getLParenLoc();
+ else
+ startLoc = E->getLocStart();
+ startLoc = SM->getExpansionLoc(startLoc);
+ endBuf = SM->getCharacterData(startLoc);
+ ByrefType += " " + Name;
+ ByrefType += " = {(void*)";
+ ByrefType += utostr(isa);
+ ByrefType += "," + ForwardingCastType + "&" + Name + ", ";
+ ByrefType += utostr(flags);
+ ByrefType += ", ";
+ ByrefType += "sizeof(";
+ RewriteByRefString(ByrefType, Name, ND);
+ ByrefType += "), ";
+ if (HasCopyAndDispose) {
+ ByrefType += "__Block_byref_id_object_copy_";
+ ByrefType += utostr(flag);
+ ByrefType += ", __Block_byref_id_object_dispose_";
+ ByrefType += utostr(flag);
+ ByrefType += ", ";
+ }
+ ReplaceText(DeclLoc, endBuf-startBuf, ByrefType);
+
+ // Complete the newly synthesized compound expression by inserting a right
+ // curly brace before the end of the declaration.
+ // FIXME: This approach avoids rewriting the initializer expression. It
+ // also assumes there is only one declarator. For example, the following
+ // isn't currently supported by this routine (in general):
+ //
+ // double __block BYREFVAR = 1.34, BYREFVAR2 = 1.37;
+ //
+ const char *startInitializerBuf = SM->getCharacterData(startLoc);
+ const char *semiBuf = strchr(startInitializerBuf, ';');
+ assert((*semiBuf == ';') && "RewriteByRefVar: can't find ';'");
+ SourceLocation semiLoc =
+ startLoc.getLocWithOffset(semiBuf-startInitializerBuf);
+
+ InsertText(semiLoc, "}");
+ }
+ return;
+}
+
+void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
+ // Add initializers for any closure decl refs.
+ GetBlockDeclRefExprs(Exp->getBody());
+ if (BlockDeclRefs.size()) {
+ // Unique all "by copy" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (!BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByCopyDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByCopyDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByCopyDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Unique all "by ref" declarations.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>()) {
+ if (!BlockByRefDeclsPtrSet.count(BlockDeclRefs[i]->getDecl())) {
+ BlockByRefDeclsPtrSet.insert(BlockDeclRefs[i]->getDecl());
+ BlockByRefDecls.push_back(BlockDeclRefs[i]->getDecl());
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
+ if (BlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ BlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
+ }
+}
+
+FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(StringRef name) {
+ IdentifierInfo *ID = &Context->Idents.get(name);
+ QualType FType = Context->getFunctionNoProtoType(Context->VoidPtrTy);
+ return FunctionDecl::Create(*Context, TUDecl, SourceLocation(),
+ SourceLocation(), ID, FType, 0, SC_Extern,
+ SC_None, false, false);
+}
+
+Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
+ const SmallVector<DeclRefExpr *, 8> &InnerBlockDeclRefs) {
+ const BlockDecl *block = Exp->getBlockDecl();
+ Blocks.push_back(Exp);
+
+ CollectBlockDeclRefInfo(Exp);
+
+ // Add inner imported variables now used in current block.
+ int countOfInnerDecls = 0;
+ if (!InnerBlockDeclRefs.empty()) {
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
+ DeclRefExpr *Exp = InnerBlockDeclRefs[i];
+ ValueDecl *VD = Exp->getDecl();
+ if (!VD->hasAttr<BlocksAttr>() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ // We need to save the copied-in variables in nested
+ // blocks because it is needed at the end for some of the API generations.
+ // See SynthesizeBlockLiterals routine.
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (VD->hasAttr<BlocksAttr>() && !BlockByRefDeclsPtrSet.count(VD)) {
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
+ if (InnerBlockDeclRefs[i]->getDecl()->hasAttr<BlocksAttr>() ||
+ InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
+ }
+ InnerDeclRefsCount.push_back(countOfInnerDecls);
+
+ std::string FuncName;
+
+ if (CurFunctionDef)
+ FuncName = CurFunctionDef->getNameAsString();
+ else if (CurMethodDef)
+ BuildUniqueMethodName(FuncName, CurMethodDef);
+ else if (GlobalVarDecl)
+ FuncName = std::string(GlobalVarDecl->getNameAsString());
+
+ std::string BlockNumber = utostr(Blocks.size()-1);
+
+ std::string Tag = "__" + FuncName + "_block_impl_" + BlockNumber;
+ std::string Func = "__" + FuncName + "_block_func_" + BlockNumber;
+
+ // Get a pointer to the function type so we can cast appropriately.
+ QualType BFT = convertFunctionTypeOfBlocks(Exp->getFunctionType());
+ QualType FType = Context->getPointerType(BFT);
+
+ FunctionDecl *FD;
+ Expr *NewRep;
+
+ // Simulate a contructor call...
+ FD = SynthBlockInitFunctionDecl(Tag);
+ DeclRefExpr *DRE = new (Context) DeclRefExpr(FD, false, FType, VK_RValue,
+ SourceLocation());
+
+ SmallVector<Expr*, 4> InitExprs;
+
+ // Initialize the block function.
+ FD = SynthBlockInitFunctionDecl(Func);
+ DeclRefExpr *Arg = new (Context) DeclRefExpr(FD, false, FD->getType(),
+ VK_LValue, SourceLocation());
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ InitExprs.push_back(castExpr);
+
+ // Initialize the block descriptor.
+ std::string DescData = "__" + FuncName + "_block_desc_" + BlockNumber + "_DATA";
+
+ VarDecl *NewVD = VarDecl::Create(*Context, TUDecl,
+ SourceLocation(), SourceLocation(),
+ &Context->Idents.get(DescData.c_str()),
+ Context->VoidPtrTy, 0,
+ SC_Static, SC_None);
+ UnaryOperator *DescRefExpr =
+ new (Context) UnaryOperator(new (Context) DeclRefExpr(NewVD, false,
+ Context->VoidPtrTy,
+ VK_LValue,
+ SourceLocation()),
+ UO_AddrOf,
+ Context->getPointerType(Context->VoidPtrTy),
+ VK_RValue, OK_Ordinary,
+ SourceLocation());
+ InitExprs.push_back(DescRefExpr);
+
+ // Add initializers for any closure decl refs.
+ if (BlockDeclRefs.size()) {
+ Expr *Exp;
+ // Output all "by copy" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
+ E = BlockByCopyDecls.end(); I != E; ++I) {
+ if (isObjCType((*I)->getType())) {
+ // FIXME: Conform to ABI ([[obj retain] autorelease]).
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+ } else if (isTopLevelBlockPointerType((*I)->getType())) {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Arg = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, Context->VoidPtrTy,
+ CK_BitCast, Arg);
+ } else {
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ if (HasLocalVariableExternalStorage(*I)) {
+ QualType QT = (*I)->getType();
+ QT = Context->getPointerType(QT);
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf, QT, VK_RValue,
+ OK_Ordinary, SourceLocation());
+ }
+
+ }
+ InitExprs.push_back(Exp);
+ }
+ // Output all "by ref" declarations.
+ for (SmallVector<ValueDecl*,8>::iterator I = BlockByRefDecls.begin(),
+ E = BlockByRefDecls.end(); I != E; ++I) {
+ ValueDecl *ND = (*I);
+ std::string Name(ND->getNameAsString());
+ std::string RecName;
+ RewriteByRefString(RecName, Name, ND, true);
+ IdentifierInfo *II = &Context->Idents.get(RecName.c_str()
+ + sizeof("struct"));
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "SynthBlockInitExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+
+ FD = SynthBlockInitFunctionDecl((*I)->getName());
+ Exp = new (Context) DeclRefExpr(FD, false, FD->getType(), VK_LValue,
+ SourceLocation());
+ bool isNestedCapturedVar = false;
+ if (block)
+ for (BlockDecl::capture_const_iterator ci = block->capture_begin(),
+ ce = block->capture_end(); ci != ce; ++ci) {
+ const VarDecl *variable = ci->getVariable();
+ if (variable == ND && ci->isNested()) {
+ assert (ci->isByRef() &&
+ "SynthBlockInitExpr - captured block variable is not byref");
+ isNestedCapturedVar = true;
+ break;
+ }
+ }
+ // captured nested byref variable has its address passed. Do not take
+ // its address again.
+ if (!isNestedCapturedVar)
+ Exp = new (Context) UnaryOperator(Exp, UO_AddrOf,
+ Context->getPointerType(Exp->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ Exp = NoTypeInfoCStyleCastExpr(Context, castT, CK_BitCast, Exp);
+ InitExprs.push_back(Exp);
+ }
+ }
+ if (ImportedBlockDecls.size()) {
+ // generate BLOCK_HAS_COPY_DISPOSE(have helper funcs) | BLOCK_HAS_DESCRIPTOR
+ int flag = (BLOCK_HAS_COPY_DISPOSE | BLOCK_HAS_DESCRIPTOR);
+ unsigned IntSize =
+ static_cast<unsigned>(Context->getTypeSize(Context->IntTy));
+ Expr *FlagExp = IntegerLiteral::Create(*Context, llvm::APInt(IntSize, flag),
+ Context->IntTy, SourceLocation());
+ InitExprs.push_back(FlagExp);
+ }
+ NewRep = new (Context) CallExpr(*Context, DRE, &InitExprs[0], InitExprs.size(),
+ FType, VK_LValue, SourceLocation());
+ NewRep = new (Context) UnaryOperator(NewRep, UO_AddrOf,
+ Context->getPointerType(NewRep->getType()),
+ VK_RValue, OK_Ordinary, SourceLocation());
+ NewRep = NoTypeInfoCStyleCastExpr(Context, FType, CK_BitCast,
+ NewRep);
+ BlockDeclRefs.clear();
+ BlockByRefDecls.clear();
+ BlockByRefDeclsPtrSet.clear();
+ BlockByCopyDecls.clear();
+ BlockByCopyDeclsPtrSet.clear();
+ ImportedBlockDecls.clear();
+ return NewRep;
+}
+
+bool RewriteObjC::IsDeclStmtInForeachHeader(DeclStmt *DS) {
+ if (const ObjCForCollectionStmt * CS =
+ dyn_cast<ObjCForCollectionStmt>(Stmts.back()))
+ return CS->getElement() == DS;
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Function Body / Expression rewriting
+//===----------------------------------------------------------------------===//
+
+Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S))
+ Stmts.push_back(S);
+ else if (isa<ObjCForCollectionStmt>(S)) {
+ Stmts.push_back(S);
+ ObjCBcLabelNo.push_back(++BcLabelCount);
+ }
+
+ // Pseudo-object operations and ivar references need special
+ // treatment because we're going to recursively rewrite them.
+ if (PseudoObjectExpr *PseudoOp = dyn_cast<PseudoObjectExpr>(S)) {
+ if (isa<BinaryOperator>(PseudoOp->getSyntacticForm())) {
+ return RewritePropertyOrImplicitSetter(PseudoOp);
+ } else {
+ return RewritePropertyOrImplicitGetter(PseudoOp);
+ }
+ } else if (ObjCIvarRefExpr *IvarRefExpr = dyn_cast<ObjCIvarRefExpr>(S)) {
+ return RewriteObjCIvarRefExpr(IvarRefExpr);
+ }
+
+ SourceRange OrigStmtRange = S->getSourceRange();
+
+ // Perform a bottom up rewrite of all children.
+ for (Stmt::child_range CI = S->children(); CI; ++CI)
+ if (*CI) {
+ Stmt *childStmt = (*CI);
+ Stmt *newStmt = RewriteFunctionBodyOrGlobalInitializer(childStmt);
+ if (newStmt) {
+ *CI = newStmt;
+ }
+ }
+
+ if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ SmallVector<DeclRefExpr *, 8> InnerBlockDeclRefs;
+ llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
+ InnerContexts.insert(BE->getBlockDecl());
+ ImportedLocalExternalDecls.clear();
+ GetInnerBlockDeclRefExprs(BE->getBody(),
+ InnerBlockDeclRefs, InnerContexts);
+ // Rewrite the block body in place.
+ Stmt *SaveCurrentBody = CurrentBody;
+ CurrentBody = BE->getBody();
+ PropParentMap = 0;
+ // block literal on rhs of a property-dot-sytax assignment
+ // must be replaced by its synthesize ast so getRewrittenText
+ // works as expected. In this case, what actually ends up on RHS
+ // is the blockTranscribed which is the helper function for the
+ // block literal; as in: self.c = ^() {[ace ARR];};
+ bool saveDisableReplaceStmt = DisableReplaceStmt;
+ DisableReplaceStmt = false;
+ RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
+ DisableReplaceStmt = saveDisableReplaceStmt;
+ CurrentBody = SaveCurrentBody;
+ PropParentMap = 0;
+ ImportedLocalExternalDecls.clear();
+ // Now we snarf the rewritten text and stash it away for later use.
+ std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
+ RewrittenBlockExprs[BE] = Str;
+
+ Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs);
+
+ //blockTranscribed->dump();
+ ReplaceStmt(S, blockTranscribed);
+ return blockTranscribed;
+ }
+ // Handle specific things.
+ if (ObjCEncodeExpr *AtEncode = dyn_cast<ObjCEncodeExpr>(S))
+ return RewriteAtEncode(AtEncode);
+
+ if (ObjCSelectorExpr *AtSelector = dyn_cast<ObjCSelectorExpr>(S))
+ return RewriteAtSelector(AtSelector);
+
+ if (ObjCStringLiteral *AtString = dyn_cast<ObjCStringLiteral>(S))
+ return RewriteObjCStringLiteral(AtString);
+
+ if (ObjCMessageExpr *MessExpr = dyn_cast<ObjCMessageExpr>(S)) {
+#if 0
+ // Before we rewrite it, put the original message expression in a comment.
+ SourceLocation startLoc = MessExpr->getLocStart();
+ SourceLocation endLoc = MessExpr->getLocEnd();
+
+ const char *startBuf = SM->getCharacterData(startLoc);
+ const char *endBuf = SM->getCharacterData(endLoc);
+
+ std::string messString;
+ messString += "// ";
+ messString.append(startBuf, endBuf-startBuf+1);
+ messString += "\n";
+
+ // FIXME: Missing definition of
+ // InsertText(clang::SourceLocation, char const*, unsigned int).
+ // InsertText(startLoc, messString.c_str(), messString.size());
+ // Tried this, but it didn't work either...
+ // ReplaceText(startLoc, 0, messString.c_str(), messString.size());
+#endif
+ return RewriteMessageExpr(MessExpr);
+ }
+
+ if (ObjCAtTryStmt *StmtTry = dyn_cast<ObjCAtTryStmt>(S))
+ return RewriteObjCTryStmt(StmtTry);
+
+ if (ObjCAtSynchronizedStmt *StmtTry = dyn_cast<ObjCAtSynchronizedStmt>(S))
+ return RewriteObjCSynchronizedStmt(StmtTry);
+
+ if (ObjCAtThrowStmt *StmtThrow = dyn_cast<ObjCAtThrowStmt>(S))
+ return RewriteObjCThrowStmt(StmtThrow);
+
+ if (ObjCProtocolExpr *ProtocolExp = dyn_cast<ObjCProtocolExpr>(S))
+ return RewriteObjCProtocolExpr(ProtocolExp);
+
+ if (ObjCForCollectionStmt *StmtForCollection =
+ dyn_cast<ObjCForCollectionStmt>(S))
+ return RewriteObjCForCollectionStmt(StmtForCollection,
+ OrigStmtRange.getEnd());
+ if (BreakStmt *StmtBreakStmt =
+ dyn_cast<BreakStmt>(S))
+ return RewriteBreakStmt(StmtBreakStmt);
+ if (ContinueStmt *StmtContinueStmt =
+ dyn_cast<ContinueStmt>(S))
+ return RewriteContinueStmt(StmtContinueStmt);
+
+ // Need to check for protocol refs (id <P>, Foo <P> *) in variable decls
+ // and cast exprs.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(S)) {
+ // FIXME: What we're doing here is modifying the type-specifier that
+ // precedes the first Decl. In the future the DeclGroup should have
+ // a separate type-specifier that we can rewrite.
+ // NOTE: We need to avoid rewriting the DeclStmt if it is within
+ // the context of an ObjCForCollectionStmt. For example:
+ // NSArray *someArray;
+ // for (id <FooProtocol> index in someArray) ;
+ // This is because RewriteObjCForCollectionStmt() does textual rewriting
+ // and it depends on the original text locations/positions.
+ if (Stmts.empty() || !IsDeclStmtInForeachHeader(DS))
+ RewriteObjCQualifiedInterfaceTypes(*DS->decl_begin());
+
+ // Blocks rewrite rules.
+ for (DeclStmt::decl_iterator DI = DS->decl_begin(), DE = DS->decl_end();
+ DI != DE; ++DI) {
+ Decl *SD = *DI;
+ if (ValueDecl *ND = dyn_cast<ValueDecl>(SD)) {
+ if (isTopLevelBlockPointerType(ND->getType()))
+ RewriteBlockPointerDecl(ND);
+ else if (ND->getType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(ND->getType(), ND);
+ if (VarDecl *VD = dyn_cast<VarDecl>(SD)) {
+ if (VD->hasAttr<BlocksAttr>()) {
+ static unsigned uniqueByrefDeclCount = 0;
+ assert(!BlockByRefDeclNo.count(ND) &&
+ "RewriteFunctionBodyOrGlobalInitializer: Duplicate byref decl");
+ BlockByRefDeclNo[ND] = uniqueByrefDeclCount++;
+ RewriteByRefVar(VD);
+ }
+ else
+ RewriteTypeOfDecl(VD);
+ }
+ }
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ }
+ }
+
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S))
+ RewriteObjCQualifiedInterfaceTypes(CE);
+
+ if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
+ isa<DoStmt>(S) || isa<ForStmt>(S)) {
+ assert(!Stmts.empty() && "Statement stack is empty");
+ assert ((isa<SwitchStmt>(Stmts.back()) || isa<WhileStmt>(Stmts.back()) ||
+ isa<DoStmt>(Stmts.back()) || isa<ForStmt>(Stmts.back()))
+ && "Statement stack mismatch");
+ Stmts.pop_back();
+ }
+ // Handle blocks rewriting.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(S)) {
+ ValueDecl *VD = DRE->getDecl();
+ if (VD->hasAttr<BlocksAttr>())
+ return RewriteBlockDeclRefExpr(DRE);
+ if (HasLocalVariableExternalStorage(VD))
+ return RewriteLocalVariableExternalStorage(DRE);
+ }
+
+ if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ if (CE->getCallee()->getType()->isBlockPointerType()) {
+ Stmt *BlockCall = SynthesizeBlockCall(CE, CE->getCallee());
+ ReplaceStmt(S, BlockCall);
+ return BlockCall;
+ }
+ }
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(S)) {
+ RewriteCastExpr(CE);
+ }
+#if 0
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(S)) {
+ CastExpr *Replacement = new (Context) CastExpr(ICE->getType(),
+ ICE->getSubExpr(),
+ SourceLocation());
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream Buf(SStr);
+ Replacement->printPretty(Buf, *Context);
+ const std::string &Str = Buf.str();
+
+ printf("CAST = %s\n", &Str[0]);
+ InsertText(ICE->getSubExpr()->getLocStart(), &Str[0], Str.size());
+ delete S;
+ return Replacement;
+ }
+#endif
+ // Return this stmt unmodified.
+ return S;
+}
+
+void RewriteObjC::RewriteRecordBody(RecordDecl *RD) {
+ for (RecordDecl::field_iterator i = RD->field_begin(),
+ e = RD->field_end(); i != e; ++i) {
+ FieldDecl *FD = *i;
+ if (isTopLevelBlockPointerType(FD->getType()))
+ RewriteBlockPointerDecl(FD);
+ if (FD->getType()->isObjCQualifiedIdType() ||
+ FD->getType()->isObjCQualifiedInterfaceType())
+ RewriteObjCQualifiedInterfaceTypes(FD);
+ }
+}
+
+/// HandleDeclInMainFile - This is called for each top-level decl defined in the
+/// main file of the input.
+void RewriteObjC::HandleDeclInMainFile(Decl *D) {
+ switch (D->getKind()) {
+ case Decl::Function: {
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (FD->isOverloadedOperator())
+ return;
+
+ // Since function prototypes don't have ParmDecl's, we check the function
+ // prototype. This enables us to rewrite function declarations and
+ // definitions using the same code.
+ RewriteBlocksInFunctionProtoType(FD->getType(), FD);
+
+ if (!FD->isThisDeclarationADefinition())
+ break;
+
+ // FIXME: If this should support Obj-C++, support CXXTryStmt
+ if (CompoundStmt *Body = dyn_cast_or_null<CompoundStmt>(FD->getBody())) {
+ CurFunctionDef = FD;
+ CurFunctionDeclToDeclareForBlock = FD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ FD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ // This synthesizes and inserts the block "impl" struct, invoke function,
+ // and any copy/dispose helper functions.
+ InsertBlockLiteralsWithinFunction(FD);
+ CurFunctionDef = 0;
+ CurFunctionDeclToDeclareForBlock = 0;
+ }
+ break;
+ }
+ case Decl::ObjCMethod: {
+ ObjCMethodDecl *MD = cast<ObjCMethodDecl>(D);
+ if (CompoundStmt *Body = MD->getCompoundBody()) {
+ CurMethodDef = MD;
+ CurrentBody = Body;
+ Body =
+ cast_or_null<CompoundStmt>(RewriteFunctionBodyOrGlobalInitializer(Body));
+ MD->setBody(Body);
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ InsertBlockLiteralsWithinMethod(MD);
+ CurMethodDef = 0;
+ }
+ break;
+ }
+ case Decl::ObjCImplementation: {
+ ObjCImplementationDecl *CI = cast<ObjCImplementationDecl>(D);
+ ClassImplementation.push_back(CI);
+ break;
+ }
+ case Decl::ObjCCategoryImpl: {
+ ObjCCategoryImplDecl *CI = cast<ObjCCategoryImplDecl>(D);
+ CategoryImplementation.push_back(CI);
+ break;
+ }
+ case Decl::Var: {
+ VarDecl *VD = cast<VarDecl>(D);
+ RewriteObjCQualifiedInterfaceTypes(VD);
+ if (isTopLevelBlockPointerType(VD->getType()))
+ RewriteBlockPointerDecl(VD);
+ else if (VD->getType()->isFunctionPointerType()) {
+ CheckFunctionPointerDecl(VD->getType(), VD);
+ if (VD->getInit()) {
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ } else if (VD->getType()->isRecordType()) {
+ RecordDecl *RD = VD->getType()->getAs<RecordType>()->getDecl();
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ }
+ if (VD->getInit()) {
+ GlobalVarDecl = VD;
+ CurrentBody = VD->getInit();
+ RewriteFunctionBodyOrGlobalInitializer(VD->getInit());
+ CurrentBody = 0;
+ if (PropParentMap) {
+ delete PropParentMap;
+ PropParentMap = 0;
+ }
+ SynthesizeBlockLiterals(VD->getTypeSpecStartLoc(), VD->getName());
+ GlobalVarDecl = 0;
+
+ // This is needed for blocks.
+ if (CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(VD->getInit())) {
+ RewriteCastExpr(CE);
+ }
+ }
+ break;
+ }
+ case Decl::TypeAlias:
+ case Decl::Typedef: {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ if (isTopLevelBlockPointerType(TD->getUnderlyingType()))
+ RewriteBlockPointerDecl(TD);
+ else if (TD->getUnderlyingType()->isFunctionPointerType())
+ CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
+ }
+ break;
+ }
+ case Decl::CXXRecord:
+ case Decl::Record: {
+ RecordDecl *RD = cast<RecordDecl>(D);
+ if (RD->isCompleteDefinition())
+ RewriteRecordBody(RD);
+ break;
+ }
+ default:
+ break;
+ }
+ // Nothing yet.
+}
+
+void RewriteObjC::HandleTranslationUnit(ASTContext &C) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ RewriteInclude();
+
+ // Here's a great place to add any extra declarations that may be needed.
+ // Write out meta data for each @protocol(<expr>).
+ for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
+ E = ProtocolExprDecls.end(); I != E; ++I)
+ RewriteObjCProtocolMetaData(*I, "", "", Preamble);
+
+ InsertText(SM->getLocForStartOfFile(MainFileID), Preamble, false);
+ if (ClassImplementation.size() || CategoryImplementation.size())
+ RewriteImplementations();
+
+ // Get the buffer corresponding to MainFileID. If we haven't changed it, then
+ // we are done.
+ if (const RewriteBuffer *RewriteBuf =
+ Rewrite.getRewriteBufferFor(MainFileID)) {
+ //printf("Changed:\n");
+ *OutFile << std::string(RewriteBuf->begin(), RewriteBuf->end());
+ } else {
+ llvm::errs() << "No changes\n";
+ }
+
+ if (ClassImplementation.size() || CategoryImplementation.size() ||
+ ProtocolExprDecls.size()) {
+ // Rewrite Objective-c meta data*
+ std::string ResultStr;
+ RewriteMetaDataIntoBuffer(ResultStr);
+ // Emit metadata.
+ *OutFile << ResultStr;
+ }
+ OutFile->flush();
+}
+
+void RewriteObjCFragileABI::Initialize(ASTContext &context) {
+ InitializeCommon(context);
+
+ // declaring objc_selector outside the parameter list removes a silly
+ // scope related warning...
+ if (IsHeader)
+ Preamble = "#pragma once\n";
+ Preamble += "struct objc_selector; struct objc_class;\n";
+ Preamble += "struct __rw_objc_super { struct objc_object *object; ";
+ Preamble += "struct objc_object *superClass; ";
+ if (LangOpts.MicrosoftExt) {
+ // Add a constructor for creating temporary objects.
+ Preamble += "__rw_objc_super(struct objc_object *o, struct objc_object *s) "
+ ": ";
+ Preamble += "object(o), superClass(s) {} ";
+ }
+ Preamble += "};\n";
+ Preamble += "#ifndef _REWRITER_typedef_Protocol\n";
+ Preamble += "typedef struct objc_object Protocol;\n";
+ Preamble += "#define _REWRITER_typedef_Protocol\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
+ Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
+ } else
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper";
+ Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSend_stret";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object* objc_msgSendSuper_stret";
+ Preamble += "(struct objc_super *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT double objc_msgSend_fpret";
+ Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_class *class_getSuperclass";
+ Preamble += "(struct objc_class *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_getMetaClass";
+ Preamble += "(const char *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_throw(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_enter(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_exception_try_exit(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_exception_extract(void *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int objc_exception_match";
+ Preamble += "(struct objc_class *, struct objc_object *);\n";
+ // @synchronized hooks.
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_enter(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_sync_exit(struct objc_object *);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT Protocol *objc_getProtocol(const char *);\n";
+ Preamble += "#ifndef __FASTENUMERATIONSTATE\n";
+ Preamble += "struct __objcFastEnumerationState {\n\t";
+ Preamble += "unsigned long state;\n\t";
+ Preamble += "void **itemsPtr;\n\t";
+ Preamble += "unsigned long *mutationsPtr;\n\t";
+ Preamble += "unsigned long extra[5];\n};\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void objc_enumerationMutation(struct objc_object *);\n";
+ Preamble += "#define __FASTENUMERATIONSTATE\n";
+ Preamble += "#endif\n";
+ Preamble += "#ifndef __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "struct __NSConstantStringImpl {\n";
+ Preamble += " int *isa;\n";
+ Preamble += " int flags;\n";
+ Preamble += " char *str;\n";
+ Preamble += " long length;\n";
+ Preamble += "};\n";
+ Preamble += "#ifdef CF_EXPORT_CONSTANT_STRING\n";
+ Preamble += "extern \"C\" __declspec(dllexport) int __CFConstantStringClassReference[];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT int __CFConstantStringClassReference[];\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __NSCONSTANTSTRINGIMPL\n";
+ Preamble += "#endif\n";
+ // Blocks preamble.
+ Preamble += "#ifndef BLOCK_IMPL\n";
+ Preamble += "#define BLOCK_IMPL\n";
+ Preamble += "struct __block_impl {\n";
+ Preamble += " void *isa;\n";
+ Preamble += " int Flags;\n";
+ Preamble += " int Reserved;\n";
+ Preamble += " void *FuncPtr;\n";
+ Preamble += "};\n";
+ Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
+ Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
+ Preamble += "extern \"C\" __declspec(dllexport) "
+ "void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#else\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void _Block_object_dispose(const void *, const int);\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteGlobalBlock[32];\n";
+ Preamble += "__OBJC_RW_DLLIMPORT void *_NSConcreteStackBlock[32];\n";
+ Preamble += "#endif\n";
+ Preamble += "#endif\n";
+ if (LangOpts.MicrosoftExt) {
+ Preamble += "#undef __OBJC_RW_DLLIMPORT\n";
+ Preamble += "#undef __OBJC_RW_STATICIMPORT\n";
+ Preamble += "#ifndef KEEP_ATTRIBUTES\n"; // We use this for clang tests.
+ Preamble += "#define __attribute__(X)\n";
+ Preamble += "#endif\n";
+ Preamble += "#define __weak\n";
+ }
+ else {
+ Preamble += "#define __block\n";
+ Preamble += "#define __weak\n";
+ }
+ // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
+ // as this avoids warning in any 64bit/32bit compilation model.
+ Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
+}
+
+/// RewriteIvarOffsetComputation - This rutine synthesizes computation of
+/// ivar offset.
+void RewriteObjCFragileABI::RewriteIvarOffsetComputation(ObjCIvarDecl *ivar,
+ std::string &Result) {
+ if (ivar->isBitField()) {
+ // FIXME: The hack below doesn't work for bitfields. For now, we simply
+ // place all bitfields at offset 0.
+ Result += "0";
+ } else {
+ Result += "__OFFSETOFIVAR__(struct ";
+ Result += ivar->getContainingInterface()->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ", ";
+ Result += ivar->getNameAsString();
+ Result += ")";
+ }
+}
+
+/// RewriteObjCProtocolMetaData - Rewrite protocols meta-data.
+void RewriteObjCFragileABI::RewriteObjCProtocolMetaData(
+ ObjCProtocolDecl *PDecl, StringRef prefix,
+ StringRef ClassName, std::string &Result) {
+ static bool objc_protocol_methods = false;
+
+ // Output struct protocol_methods holder of method selector and type.
+ if (!objc_protocol_methods && PDecl->hasDefinition()) {
+ /* struct protocol_methods {
+ SEL _cmd;
+ char *method_types;
+ }
+ */
+ Result += "\nstruct _protocol_methods {\n";
+ Result += "\tstruct objc_selector *_cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "};\n";
+
+ objc_protocol_methods = true;
+ }
+ // Do not synthesize the protocol more than once.
+ if (ObjCSynthesizedProtocols.count(PDecl->getCanonicalDecl()))
+ return;
+
+ if (ObjCProtocolDecl *Def = PDecl->getDefinition())
+ PDecl = Def;
+
+ if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
+ unsigned NumMethods = std::distance(PDecl->instmeth_begin(),
+ PDecl->instmeth_end());
+ /* struct _objc_protocol_method_list {
+ int protocol_method_count;
+ struct protocol_methods protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint protocol_method_count;\n";
+ Result += "\tstruct _protocol_methods protocol_methods[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_inst_meth\")))= "
+ "{\n\t" + utostr(NumMethods) + "\n";
+
+ // Output instance methods declared in this protocol.
+ for (ObjCProtocolDecl::instmeth_iterator
+ I = PDecl->instmeth_begin(), E = PDecl->instmeth_end();
+ I != E; ++I) {
+ if (I == PDecl->instmeth_begin())
+ Result += "\t ,{{(struct objc_selector *)\"";
+ else
+ Result += "\t ,{(struct objc_selector *)\"";
+ Result += (*I)->getSelector().getAsString();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\"}\n";
+ }
+ Result += "\t }\n};\n";
+ }
+
+ // Output class methods declared in this protocol.
+ unsigned NumMethods = std::distance(PDecl->classmeth_begin(),
+ PDecl->classmeth_end());
+ if (NumMethods > 0) {
+ /* struct _objc_protocol_method_list {
+ int protocol_method_count;
+ struct protocol_methods protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint protocol_method_count;\n";
+ Result += "\tstruct _protocol_methods protocol_methods[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t";
+ Result += utostr(NumMethods);
+ Result += "\n";
+
+ // Output instance methods declared in this protocol.
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I) {
+ if (I == PDecl->classmeth_begin())
+ Result += "\t ,{{(struct objc_selector *)\"";
+ else
+ Result += "\t ,{(struct objc_selector *)\"";
+ Result += (*I)->getSelector().getAsString();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl((*I), MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\"}\n";
+ }
+ Result += "\t }\n};\n";
+ }
+
+ // Output:
+ /* struct _objc_protocol {
+ // Objective-C 1.0 extensions
+ struct _objc_protocol_extension *isa;
+ char *protocol_name;
+ struct _objc_protocol **protocol_list;
+ struct _objc_protocol_method_list *instance_methods;
+ struct _objc_protocol_method_list *class_methods;
+ };
+ */
+ static bool objc_protocol = false;
+ if (!objc_protocol) {
+ Result += "\nstruct _objc_protocol {\n";
+ Result += "\tstruct _objc_protocol_extension *isa;\n";
+ Result += "\tchar *protocol_name;\n";
+ Result += "\tstruct _objc_protocol **protocol_list;\n";
+ Result += "\tstruct _objc_protocol_method_list *instance_methods;\n";
+ Result += "\tstruct _objc_protocol_method_list *class_methods;\n";
+ Result += "};\n";
+
+ objc_protocol = true;
+ }
+
+ Result += "\nstatic struct _objc_protocol _OBJC_PROTOCOL_";
+ Result += PDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __protocol\")))= "
+ "{\n\t0, \"";
+ Result += PDecl->getNameAsString();
+ Result += "\", 0, ";
+ if (PDecl->instmeth_begin() != PDecl->instmeth_end()) {
+ Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_INSTANCE_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += ", ";
+ }
+ else
+ Result += "0, ";
+ if (PDecl->classmeth_begin() != PDecl->classmeth_end()) {
+ Result += "(struct _objc_protocol_method_list *)&_OBJC_PROTOCOL_CLASS_METHODS_";
+ Result += PDecl->getNameAsString();
+ Result += "\n";
+ }
+ else
+ Result += "0\n";
+ Result += "};\n";
+
+ // Mark this protocol as having been generated.
+ if (!ObjCSynthesizedProtocols.insert(PDecl->getCanonicalDecl()))
+ llvm_unreachable("protocol already synthesized");
+
+}
+
+void RewriteObjCFragileABI::RewriteObjCProtocolListMetaData(
+ const ObjCList<ObjCProtocolDecl> &Protocols,
+ StringRef prefix, StringRef ClassName,
+ std::string &Result) {
+ if (Protocols.empty()) return;
+
+ for (unsigned i = 0; i != Protocols.size(); i++)
+ RewriteObjCProtocolMetaData(Protocols[i], prefix, ClassName, Result);
+
+ // Output the top lovel protocol meta-data for the class.
+ /* struct _objc_protocol_list {
+ struct _objc_protocol_list *next;
+ int protocol_count;
+ struct _objc_protocol *class_protocols[];
+ }
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tstruct _objc_protocol_list *next;\n";
+ Result += "\tint protocol_count;\n";
+ Result += "\tstruct _objc_protocol *class_protocols[";
+ Result += utostr(Protocols.size());
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += "_PROTOCOLS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __cat_cls_meth\")))= "
+ "{\n\t0, ";
+ Result += utostr(Protocols.size());
+ Result += "\n";
+
+ Result += "\t,{&_OBJC_PROTOCOL_";
+ Result += Protocols[0]->getNameAsString();
+ Result += " \n";
+
+ for (unsigned i = 1; i != Protocols.size(); i++) {
+ Result += "\t ,&_OBJC_PROTOCOL_";
+ Result += Protocols[i]->getNameAsString();
+ Result += "\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+void RewriteObjCFragileABI::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *CDecl = IDecl->getClassInterface();
+
+ // Explicitly declared @interface's are already synthesized.
+ if (CDecl->isImplicitInterfaceDecl()) {
+ // FIXME: Implementation of a class with no @interface (legacy) does not
+ // produce correct synthesis as yet.
+ RewriteObjCInternalStruct(CDecl, Result);
+ }
+
+ // Build _objc_ivar_list metadata for classes ivars if needed
+ unsigned NumIvars = !IDecl->ivar_empty()
+ ? IDecl->ivar_size()
+ : (CDecl ? CDecl->ivar_size() : 0);
+ if (NumIvars > 0) {
+ static bool objc_ivar = false;
+ if (!objc_ivar) {
+ /* struct _objc_ivar {
+ char *ivar_name;
+ char *ivar_type;
+ int ivar_offset;
+ };
+ */
+ Result += "\nstruct _objc_ivar {\n";
+ Result += "\tchar *ivar_name;\n";
+ Result += "\tchar *ivar_type;\n";
+ Result += "\tint ivar_offset;\n";
+ Result += "};\n";
+
+ objc_ivar = true;
+ }
+
+ /* struct {
+ int ivar_count;
+ struct _objc_ivar ivar_list[nIvars];
+ };
+ */
+ Result += "\nstatic struct {\n";
+ Result += "\tint ivar_count;\n";
+ Result += "\tstruct _objc_ivar ivar_list[";
+ Result += utostr(NumIvars);
+ Result += "];\n} _OBJC_INSTANCE_VARIABLES_";
+ Result += IDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __instance_vars\")))= "
+ "{\n\t";
+ Result += utostr(NumIvars);
+ Result += "\n";
+
+ ObjCInterfaceDecl::ivar_iterator IVI, IVE;
+ SmallVector<ObjCIvarDecl *, 8> IVars;
+ if (!IDecl->ivar_empty()) {
+ for (ObjCInterfaceDecl::ivar_iterator
+ IV = IDecl->ivar_begin(), IVEnd = IDecl->ivar_end();
+ IV != IVEnd; ++IV)
+ IVars.push_back(*IV);
+ IVI = IDecl->ivar_begin();
+ IVE = IDecl->ivar_end();
+ } else {
+ IVI = CDecl->ivar_begin();
+ IVE = CDecl->ivar_end();
+ }
+ Result += "\t,{{\"";
+ Result += (*IVI)->getNameAsString();
+ Result += "\", \"";
+ std::string TmpString, StrEncoding;
+ Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
+ QuoteDoublequotes(TmpString, StrEncoding);
+ Result += StrEncoding;
+ Result += "\", ";
+ RewriteIvarOffsetComputation(*IVI, Result);
+ Result += "}\n";
+ for (++IVI; IVI != IVE; ++IVI) {
+ Result += "\t ,{\"";
+ Result += (*IVI)->getNameAsString();
+ Result += "\", \"";
+ std::string TmpString, StrEncoding;
+ Context->getObjCEncodingForType((*IVI)->getType(), TmpString, *IVI);
+ QuoteDoublequotes(TmpString, StrEncoding);
+ Result += StrEncoding;
+ Result += "\", ";
+ RewriteIvarOffsetComputation((*IVI), Result);
+ Result += "}\n";
+ }
+
+ Result += "\t }\n};\n";
+ }
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
+ PropEnd = IDecl->propimpl_end();
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ if (!Getter->isDefined())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ if (!Setter->isDefined())
+ InstanceMethods.push_back(Setter);
+ }
+ RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
+ true, "", IDecl->getName(), Result);
+
+ // Build _objc_method_list for class's class methods if needed
+ RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
+ false, "", IDecl->getName(), Result);
+
+ // Protocols referenced in class declaration?
+ RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(),
+ "CLASS", CDecl->getName(), Result);
+
+ // Declaration of class/meta-class metadata
+ /* struct _objc_class {
+ struct _objc_class *isa; // or const char *root_class_name when metadata
+ const char *super_class_name;
+ char *name;
+ long version;
+ long info;
+ long instance_size;
+ struct _objc_ivar_list *ivars;
+ struct _objc_method_list *methods;
+ struct objc_cache *cache;
+ struct objc_protocol_list *protocols;
+ const char *ivar_layout;
+ struct _objc_class_ext *ext;
+ };
+ */
+ static bool objc_class = false;
+ if (!objc_class) {
+ Result += "\nstruct _objc_class {\n";
+ Result += "\tstruct _objc_class *isa;\n";
+ Result += "\tconst char *super_class_name;\n";
+ Result += "\tchar *name;\n";
+ Result += "\tlong version;\n";
+ Result += "\tlong info;\n";
+ Result += "\tlong instance_size;\n";
+ Result += "\tstruct _objc_ivar_list *ivars;\n";
+ Result += "\tstruct _objc_method_list *methods;\n";
+ Result += "\tstruct objc_cache *cache;\n";
+ Result += "\tstruct _objc_protocol_list *protocols;\n";
+ Result += "\tconst char *ivar_layout;\n";
+ Result += "\tstruct _objc_class_ext *ext;\n";
+ Result += "};\n";
+ objc_class = true;
+ }
+
+ // Meta-class metadata generation.
+ ObjCInterfaceDecl *RootClass = 0;
+ ObjCInterfaceDecl *SuperClass = CDecl->getSuperClass();
+ while (SuperClass) {
+ RootClass = SuperClass;
+ SuperClass = SuperClass->getSuperClass();
+ }
+ SuperClass = CDecl->getSuperClass();
+
+ Result += "\nstatic struct _objc_class _OBJC_METACLASS_";
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __meta_class\")))= "
+ "{\n\t(struct _objc_class *)\"";
+ Result += (RootClass ? RootClass->getNameAsString() : CDecl->getNameAsString());
+ Result += "\"";
+
+ if (SuperClass) {
+ Result += ", \"";
+ Result += SuperClass->getNameAsString();
+ Result += "\", \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ else {
+ Result += ", 0, \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ // Set 'ivars' field for root class to 0. ObjC1 runtime does not use it.
+ // 'info' field is initialized to CLS_META(2) for metaclass
+ Result += ", 0,2, sizeof(struct _objc_class), 0";
+ if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
+ Result += "\n\t, (struct _objc_method_list *)&_OBJC_CLASS_METHODS_";
+ Result += IDecl->getNameAsString();
+ Result += "\n";
+ }
+ else
+ Result += ", 0\n";
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += "\t,0, (struct _objc_protocol_list *)&_OBJC_CLASS_PROTOCOLS_";
+ Result += CDecl->getNameAsString();
+ Result += ",0,0\n";
+ }
+ else
+ Result += "\t,0,0,0,0\n";
+ Result += "};\n";
+
+ // class metadata generation.
+ Result += "\nstatic struct _objc_class _OBJC_CLASS_";
+ Result += CDecl->getNameAsString();
+ Result += " __attribute__ ((used, section (\"__OBJC, __class\")))= "
+ "{\n\t&_OBJC_METACLASS_";
+ Result += CDecl->getNameAsString();
+ if (SuperClass) {
+ Result += ", \"";
+ Result += SuperClass->getNameAsString();
+ Result += "\", \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ else {
+ Result += ", 0, \"";
+ Result += CDecl->getNameAsString();
+ Result += "\"";
+ }
+ // 'info' field is initialized to CLS_CLASS(1) for class
+ Result += ", 0,1";
+ if (!ObjCSynthesizedStructs.count(CDecl))
+ Result += ",0";
+ else {
+ // class has size. Must synthesize its size.
+ Result += ",sizeof(struct ";
+ Result += CDecl->getNameAsString();
+ if (LangOpts.MicrosoftExt)
+ Result += "_IMPL";
+ Result += ")";
+ }
+ if (NumIvars > 0) {
+ Result += ", (struct _objc_ivar_list *)&_OBJC_INSTANCE_VARIABLES_";
+ Result += CDecl->getNameAsString();
+ Result += "\n\t";
+ }
+ else
+ Result += ",0";
+ if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
+ Result += ", (struct _objc_method_list *)&_OBJC_INSTANCE_METHODS_";
+ Result += CDecl->getNameAsString();
+ Result += ", 0\n\t";
+ }
+ else
+ Result += ",0,0";
+ if (CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += ", (struct _objc_protocol_list*)&_OBJC_CLASS_PROTOCOLS_";
+ Result += CDecl->getNameAsString();
+ Result += ", 0,0\n";
+ }
+ else
+ Result += ",0,0,0\n";
+ Result += "};\n";
+}
+
+void RewriteObjCFragileABI::RewriteMetaDataIntoBuffer(std::string &Result) {
+ int ClsDefCount = ClassImplementation.size();
+ int CatDefCount = CategoryImplementation.size();
+
+ // For each implemented class, write out all its meta data.
+ for (int i = 0; i < ClsDefCount; i++)
+ RewriteObjCClassMetaData(ClassImplementation[i], Result);
+
+ // For each implemented category, write out all its meta data.
+ for (int i = 0; i < CatDefCount; i++)
+ RewriteObjCCategoryImplDecl(CategoryImplementation[i], Result);
+
+ // Write objc_symtab metadata
+ /*
+ struct _objc_symtab
+ {
+ long sel_ref_cnt;
+ SEL *refs;
+ short cls_def_cnt;
+ short cat_def_cnt;
+ void *defs[cls_def_cnt + cat_def_cnt];
+ };
+ */
+
+ Result += "\nstruct _objc_symtab {\n";
+ Result += "\tlong sel_ref_cnt;\n";
+ Result += "\tSEL *refs;\n";
+ Result += "\tshort cls_def_cnt;\n";
+ Result += "\tshort cat_def_cnt;\n";
+ Result += "\tvoid *defs[" + utostr(ClsDefCount + CatDefCount)+ "];\n";
+ Result += "};\n\n";
+
+ Result += "static struct _objc_symtab "
+ "_OBJC_SYMBOLS __attribute__((used, section (\"__OBJC, __symbols\")))= {\n";
+ Result += "\t0, 0, " + utostr(ClsDefCount)
+ + ", " + utostr(CatDefCount) + "\n";
+ for (int i = 0; i < ClsDefCount; i++) {
+ Result += "\t,&_OBJC_CLASS_";
+ Result += ClassImplementation[i]->getNameAsString();
+ Result += "\n";
+ }
+
+ for (int i = 0; i < CatDefCount; i++) {
+ Result += "\t,&_OBJC_CATEGORY_";
+ Result += CategoryImplementation[i]->getClassInterface()->getNameAsString();
+ Result += "_";
+ Result += CategoryImplementation[i]->getNameAsString();
+ Result += "\n";
+ }
+
+ Result += "};\n\n";
+
+ // Write objc_module metadata
+
+ /*
+ struct _objc_module {
+ long version;
+ long size;
+ const char *name;
+ struct _objc_symtab *symtab;
+ }
+ */
+
+ Result += "\nstruct _objc_module {\n";
+ Result += "\tlong version;\n";
+ Result += "\tlong size;\n";
+ Result += "\tconst char *name;\n";
+ Result += "\tstruct _objc_symtab *symtab;\n";
+ Result += "};\n\n";
+ Result += "static struct _objc_module "
+ "_OBJC_MODULES __attribute__ ((used, section (\"__OBJC, __module_info\")))= {\n";
+ Result += "\t" + utostr(OBJC_ABI_VERSION) +
+ ", sizeof(struct _objc_module), \"\", &_OBJC_SYMBOLS\n";
+ Result += "};\n\n";
+
+ if (LangOpts.MicrosoftExt) {
+ if (ProtocolExprDecls.size()) {
+ Result += "#pragma section(\".objc_protocol$B\",long,read,write)\n";
+ Result += "#pragma data_seg(push, \".objc_protocol$B\")\n";
+ for (llvm::SmallPtrSet<ObjCProtocolDecl *,8>::iterator I = ProtocolExprDecls.begin(),
+ E = ProtocolExprDecls.end(); I != E; ++I) {
+ Result += "static struct _objc_protocol *_POINTER_OBJC_PROTOCOL_";
+ Result += (*I)->getNameAsString();
+ Result += " = &_OBJC_PROTOCOL_";
+ Result += (*I)->getNameAsString();
+ Result += ";\n";
+ }
+ Result += "#pragma data_seg(pop)\n\n";
+ }
+ Result += "#pragma section(\".objc_module_info$B\",long,read,write)\n";
+ Result += "#pragma data_seg(push, \".objc_module_info$B\")\n";
+ Result += "static struct _objc_module *_POINTER_OBJC_MODULES = ";
+ Result += "&_OBJC_MODULES;\n";
+ Result += "#pragma data_seg(pop)\n\n";
+ }
+}
+
+/// RewriteObjCCategoryImplDecl - Rewrite metadata for each category
+/// implementation.
+void RewriteObjCFragileABI::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
+ std::string &Result) {
+ ObjCInterfaceDecl *ClassDecl = IDecl->getClassInterface();
+ // Find category declaration for this implementation.
+ ObjCCategoryDecl *CDecl;
+ for (CDecl = ClassDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->getIdentifier() == IDecl->getIdentifier())
+ break;
+
+ std::string FullCategoryName = ClassDecl->getNameAsString();
+ FullCategoryName += '_';
+ FullCategoryName += IDecl->getNameAsString();
+
+ // Build _objc_method_list for class's instance methods if needed
+ SmallVector<ObjCMethodDecl *, 32>
+ InstanceMethods(IDecl->instmeth_begin(), IDecl->instmeth_end());
+
+ // If any of our property implementations have associated getters or
+ // setters, produce metadata for them as well.
+ for (ObjCImplDecl::propimpl_iterator Prop = IDecl->propimpl_begin(),
+ PropEnd = IDecl->propimpl_end();
+ Prop != PropEnd; ++Prop) {
+ if ((*Prop)->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!(*Prop)->getPropertyIvarDecl())
+ continue;
+ ObjCPropertyDecl *PD = (*Prop)->getPropertyDecl();
+ if (!PD)
+ continue;
+ if (ObjCMethodDecl *Getter = PD->getGetterMethodDecl())
+ InstanceMethods.push_back(Getter);
+ if (PD->isReadOnly())
+ continue;
+ if (ObjCMethodDecl *Setter = PD->getSetterMethodDecl())
+ InstanceMethods.push_back(Setter);
+ }
+ RewriteObjCMethodsMetaData(InstanceMethods.begin(), InstanceMethods.end(),
+ true, "CATEGORY_", FullCategoryName.c_str(),
+ Result);
+
+ // Build _objc_method_list for class's class methods if needed
+ RewriteObjCMethodsMetaData(IDecl->classmeth_begin(), IDecl->classmeth_end(),
+ false, "CATEGORY_", FullCategoryName.c_str(),
+ Result);
+
+ // Protocols referenced in class declaration?
+ // Null CDecl is case of a category implementation with no category interface
+ if (CDecl)
+ RewriteObjCProtocolListMetaData(CDecl->getReferencedProtocols(), "CATEGORY",
+ FullCategoryName, Result);
+ /* struct _objc_category {
+ char *category_name;
+ char *class_name;
+ struct _objc_method_list *instance_methods;
+ struct _objc_method_list *class_methods;
+ struct _objc_protocol_list *protocols;
+ // Objective-C 1.0 extensions
+ uint32_t size; // sizeof (struct _objc_category)
+ struct _objc_property_list *instance_properties; // category's own
+ // @property decl.
+ };
+ */
+
+ static bool objc_category = false;
+ if (!objc_category) {
+ Result += "\nstruct _objc_category {\n";
+ Result += "\tchar *category_name;\n";
+ Result += "\tchar *class_name;\n";
+ Result += "\tstruct _objc_method_list *instance_methods;\n";
+ Result += "\tstruct _objc_method_list *class_methods;\n";
+ Result += "\tstruct _objc_protocol_list *protocols;\n";
+ Result += "\tunsigned int size;\n";
+ Result += "\tstruct _objc_property_list *instance_properties;\n";
+ Result += "};\n";
+ objc_category = true;
+ }
+ Result += "\nstatic struct _objc_category _OBJC_CATEGORY_";
+ Result += FullCategoryName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __category\")))= {\n\t\"";
+ Result += IDecl->getNameAsString();
+ Result += "\"\n\t, \"";
+ Result += ClassDecl->getNameAsString();
+ Result += "\"\n";
+
+ if (IDecl->instmeth_begin() != IDecl->instmeth_end()) {
+ Result += "\t, (struct _objc_method_list *)"
+ "&_OBJC_CATEGORY_INSTANCE_METHODS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+ if (IDecl->classmeth_begin() != IDecl->classmeth_end()) {
+ Result += "\t, (struct _objc_method_list *)"
+ "&_OBJC_CATEGORY_CLASS_METHODS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+
+ if (CDecl && CDecl->protocol_begin() != CDecl->protocol_end()) {
+ Result += "\t, (struct _objc_protocol_list *)&_OBJC_CATEGORY_PROTOCOLS_";
+ Result += FullCategoryName;
+ Result += "\n";
+ }
+ else
+ Result += "\t, 0\n";
+ Result += "\t, sizeof(struct _objc_category), 0\n};\n";
+}
+
+// RewriteObjCMethodsMetaData - Rewrite methods metadata for instance or
+/// class methods.
+template<typename MethodIterator>
+void RewriteObjCFragileABI::RewriteObjCMethodsMetaData(MethodIterator MethodBegin,
+ MethodIterator MethodEnd,
+ bool IsInstanceMethod,
+ StringRef prefix,
+ StringRef ClassName,
+ std::string &Result) {
+ if (MethodBegin == MethodEnd) return;
+
+ if (!objc_impl_method) {
+ /* struct _objc_method {
+ SEL _cmd;
+ char *method_types;
+ void *_imp;
+ }
+ */
+ Result += "\nstruct _objc_method {\n";
+ Result += "\tSEL _cmd;\n";
+ Result += "\tchar *method_types;\n";
+ Result += "\tvoid *_imp;\n";
+ Result += "};\n";
+
+ objc_impl_method = true;
+ }
+
+ // Build _objc_method_list for class's methods if needed
+
+ /* struct {
+ struct _objc_method_list *next_method;
+ int method_count;
+ struct _objc_method method_list[];
+ }
+ */
+ unsigned NumMethods = std::distance(MethodBegin, MethodEnd);
+ Result += "\nstatic struct {\n";
+ Result += "\tstruct _objc_method_list *next_method;\n";
+ Result += "\tint method_count;\n";
+ Result += "\tstruct _objc_method method_list[";
+ Result += utostr(NumMethods);
+ Result += "];\n} _OBJC_";
+ Result += prefix;
+ Result += IsInstanceMethod ? "INSTANCE" : "CLASS";
+ Result += "_METHODS_";
+ Result += ClassName;
+ Result += " __attribute__ ((used, section (\"__OBJC, __";
+ Result += IsInstanceMethod ? "inst" : "cls";
+ Result += "_meth\")))= ";
+ Result += "{\n\t0, " + utostr(NumMethods) + "\n";
+
+ Result += "\t,{{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ for (++MethodBegin; MethodBegin != MethodEnd; ++MethodBegin) {
+ Result += "\t ,{(SEL)\"";
+ Result += (*MethodBegin)->getSelector().getAsString().c_str();
+ std::string MethodTypeString;
+ Context->getObjCEncodingForMethodDecl(*MethodBegin, MethodTypeString);
+ Result += "\", \"";
+ Result += MethodTypeString;
+ Result += "\", (void *)";
+ Result += MethodInternalNames[*MethodBegin];
+ Result += "}\n";
+ }
+ Result += "\t }\n};\n";
+}
+
+Stmt *RewriteObjCFragileABI::RewriteObjCIvarRefExpr(ObjCIvarRefExpr *IV) {
+ SourceRange OldRange = IV->getSourceRange();
+ Expr *BaseExpr = IV->getBase();
+
+ // Rewrite the base, but without actually doing replaces.
+ {
+ DisableReplaceStmtScope S(*this);
+ BaseExpr = cast<Expr>(RewriteFunctionBodyOrGlobalInitializer(BaseExpr));
+ IV->setBase(BaseExpr);
+ }
+
+ ObjCIvarDecl *D = IV->getDecl();
+
+ Expr *Replacement = IV;
+ if (CurMethodDef) {
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ assert(iFaceDecl && "RewriteObjCIvarRefExpr - iFaceDecl is null");
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = 0;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Synthesize an explicit cast to gain access to the ivar.
+ std::string RecName = clsDeclared->getIdentifier()->getName();
+ RecName += "_IMPL";
+ IdentifierInfo *II = &Context->Idents.get(RecName);
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
+ CK_BitCast,
+ IV->getBase());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(OldRange.getBegin(),
+ OldRange.getEnd(),
+ castExpr);
+ if (IV->isFreeIvar() &&
+ declaresSameEntity(CurMethodDef->getClassInterface(), iFaceDecl->getDecl())) {
+ MemberExpr *ME = new (Context) MemberExpr(PE, true, D,
+ IV->getLocation(),
+ D->getType(),
+ VK_LValue, OK_Ordinary);
+ Replacement = ME;
+ } else {
+ IV->setBase(PE);
+ }
+ }
+ } else { // we are outside a method.
+ assert(!IV->isFreeIvar() && "Cannot have a free standing ivar outside a method");
+
+ // Explicit ivar refs need to have a cast inserted.
+ // FIXME: consider sharing some of this code with the code above.
+ if (BaseExpr->getType()->isObjCObjectPointerType()) {
+ const ObjCInterfaceType *iFaceDecl =
+ dyn_cast<ObjCInterfaceType>(BaseExpr->getType()->getPointeeType());
+ // lookup which class implements the instance variable.
+ ObjCInterfaceDecl *clsDeclared = 0;
+ iFaceDecl->getDecl()->lookupInstanceVariable(D->getIdentifier(),
+ clsDeclared);
+ assert(clsDeclared && "RewriteObjCIvarRefExpr(): Can't find class");
+
+ // Synthesize an explicit cast to gain access to the ivar.
+ std::string RecName = clsDeclared->getIdentifier()->getName();
+ RecName += "_IMPL";
+ IdentifierInfo *II = &Context->Idents.get(RecName);
+ RecordDecl *RD = RecordDecl::Create(*Context, TTK_Struct, TUDecl,
+ SourceLocation(), SourceLocation(),
+ II);
+ assert(RD && "RewriteObjCIvarRefExpr(): Can't find RecordDecl");
+ QualType castT = Context->getPointerType(Context->getTagDeclType(RD));
+ CastExpr *castExpr = NoTypeInfoCStyleCastExpr(Context, castT,
+ CK_BitCast,
+ IV->getBase());
+ // Don't forget the parens to enforce the proper binding.
+ ParenExpr *PE = new (Context) ParenExpr(IV->getBase()->getLocStart(),
+ IV->getBase()->getLocEnd(), castExpr);
+ // Cannot delete IV->getBase(), since PE points to it.
+ // Replace the old base with the cast. This is important when doing
+ // embedded rewrites. For example, [newInv->_container addObject:0].
+ IV->setBase(PE);
+ }
+ }
+
+ ReplaceStmtWithRange(IV, Replacement, OldRange);
+ return Replacement;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
new file mode 100644
index 0000000..6c211b2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteRope.cpp
@@ -0,0 +1,806 @@
+//===--- RewriteRope.cpp - Rope specialized for rewriter --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the RewriteRope class, which is a powerful string.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/RewriteRope.h"
+#include "clang/Basic/LLVM.h"
+#include <algorithm>
+using namespace clang;
+
+/// RewriteRope is a "strong" string class, designed to make insertions and
+/// deletions in the middle of the string nearly constant time (really, they are
+/// O(log N), but with a very low constant factor).
+///
+/// The implementation of this datastructure is a conceptual linear sequence of
+/// RopePiece elements. Each RopePiece represents a view on a separately
+/// allocated and reference counted string. This means that splitting a very
+/// long string can be done in constant time by splitting a RopePiece that
+/// references the whole string into two rope pieces that reference each half.
+/// Once split, another string can be inserted in between the two halves by
+/// inserting a RopePiece in between the two others. All of this is very
+/// inexpensive: it takes time proportional to the number of RopePieces, not the
+/// length of the strings they represent.
+///
+/// While a linear sequences of RopePieces is the conceptual model, the actual
+/// implementation captures them in an adapted B+ Tree. Using a B+ tree (which
+/// is a tree that keeps the values in the leaves and has where each node
+/// contains a reasonable number of pointers to children/values) allows us to
+/// maintain efficient operation when the RewriteRope contains a *huge* number
+/// of RopePieces. The basic idea of the B+ Tree is that it allows us to find
+/// the RopePiece corresponding to some offset very efficiently, and it
+/// automatically balances itself on insertions of RopePieces (which can happen
+/// for both insertions and erases of string ranges).
+///
+/// The one wrinkle on the theory is that we don't attempt to keep the tree
+/// properly balanced when erases happen. Erases of string data can both insert
+/// new RopePieces (e.g. when the middle of some other rope piece is deleted,
+/// which results in two rope pieces, which is just like an insert) or it can
+/// reduce the number of RopePieces maintained by the B+Tree. In the case when
+/// the number of RopePieces is reduced, we don't attempt to maintain the
+/// standard 'invariant' that each node in the tree contains at least
+/// 'WidthFactor' children/values. For our use cases, this doesn't seem to
+/// matter.
+///
+/// The implementation below is primarily implemented in terms of three classes:
+/// RopePieceBTreeNode - Common base class for:
+///
+/// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
+/// nodes. This directly represents a chunk of the string with those
+/// RopePieces contatenated.
+/// RopePieceBTreeInterior - An interior node in the B+ Tree, which manages
+/// up to '2*WidthFactor' other nodes in the tree.
+
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeNode Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeNode - Common base class of RopePieceBTreeLeaf and
+ /// RopePieceBTreeInterior. This provides some 'virtual' dispatching methods
+ /// and a flag that determines which subclass the instance is. Also
+ /// important, this node knows the full extend of the node, including any
+ /// children that it has. This allows efficient skipping over entire subtrees
+ /// when looking for an offset in the BTree.
+ class RopePieceBTreeNode {
+ protected:
+ /// WidthFactor - This controls the number of K/V slots held in the BTree:
+ /// how wide it is. Each level of the BTree is guaranteed to have at least
+ /// 'WidthFactor' elements in it (either ropepieces or children), (except
+ /// the root, which may have less) and may have at most 2*WidthFactor
+ /// elements.
+ enum { WidthFactor = 8 };
+
+ /// Size - This is the number of bytes of file this node (including any
+ /// potential children) covers.
+ unsigned Size;
+
+ /// IsLeaf - True if this is an instance of RopePieceBTreeLeaf, false if it
+ /// is an instance of RopePieceBTreeInterior.
+ bool IsLeaf;
+
+ RopePieceBTreeNode(bool isLeaf) : Size(0), IsLeaf(isLeaf) {}
+ ~RopePieceBTreeNode() {}
+ public:
+
+ bool isLeaf() const { return IsLeaf; }
+ unsigned size() const { return Size; }
+
+ void Destroy();
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ //static inline bool classof(const RopePieceBTreeNode *) { return true; }
+
+ };
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeLeaf Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeLeaf - Directly manages up to '2*WidthFactor' RopePiece
+ /// nodes. This directly represents a chunk of the string with those
+ /// RopePieces contatenated. Since this is a B+Tree, all values (in this case
+ /// instances of RopePiece) are stored in leaves like this. To make iteration
+ /// over the leaves efficient, they maintain a singly linked list through the
+ /// NextLeaf field. This allows the B+Tree forward iterator to be constant
+ /// time for all increments.
+ class RopePieceBTreeLeaf : public RopePieceBTreeNode {
+ /// NumPieces - This holds the number of rope pieces currently active in the
+ /// Pieces array.
+ unsigned char NumPieces;
+
+ /// Pieces - This tracks the file chunks currently in this leaf.
+ ///
+ RopePiece Pieces[2*WidthFactor];
+
+ /// NextLeaf - This is a pointer to the next leaf in the tree, allowing
+ /// efficient in-order forward iteration of the tree without traversal.
+ RopePieceBTreeLeaf **PrevLeaf, *NextLeaf;
+ public:
+ RopePieceBTreeLeaf() : RopePieceBTreeNode(true), NumPieces(0),
+ PrevLeaf(0), NextLeaf(0) {}
+ ~RopePieceBTreeLeaf() {
+ if (PrevLeaf || NextLeaf)
+ removeFromLeafInOrder();
+ clear();
+ }
+
+ bool isFull() const { return NumPieces == 2*WidthFactor; }
+
+ /// clear - Remove all rope pieces from this leaf.
+ void clear() {
+ while (NumPieces)
+ Pieces[--NumPieces] = RopePiece();
+ Size = 0;
+ }
+
+ unsigned getNumPieces() const { return NumPieces; }
+
+ const RopePiece &getPiece(unsigned i) const {
+ assert(i < getNumPieces() && "Invalid piece ID");
+ return Pieces[i];
+ }
+
+ const RopePieceBTreeLeaf *getNextLeafInOrder() const { return NextLeaf; }
+ void insertAfterLeafInOrder(RopePieceBTreeLeaf *Node) {
+ assert(PrevLeaf == 0 && NextLeaf == 0 && "Already in ordering");
+
+ NextLeaf = Node->NextLeaf;
+ if (NextLeaf)
+ NextLeaf->PrevLeaf = &NextLeaf;
+ PrevLeaf = &Node->NextLeaf;
+ Node->NextLeaf = this;
+ }
+
+ void removeFromLeafInOrder() {
+ if (PrevLeaf) {
+ *PrevLeaf = NextLeaf;
+ if (NextLeaf)
+ NextLeaf->PrevLeaf = PrevLeaf;
+ } else if (NextLeaf) {
+ NextLeaf->PrevLeaf = 0;
+ }
+ }
+
+ /// FullRecomputeSizeLocally - This method recomputes the 'Size' field by
+ /// summing the size of all RopePieces.
+ void FullRecomputeSizeLocally() {
+ Size = 0;
+ for (unsigned i = 0, e = getNumPieces(); i != e; ++i)
+ Size += getPiece(i).size();
+ }
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ //static inline bool classof(const RopePieceBTreeLeaf *) { return true; }
+ static inline bool classof(const RopePieceBTreeNode *N) {
+ return N->isLeaf();
+ }
+ };
+} // end anonymous namespace
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeLeaf::split(unsigned Offset) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ if (Offset == 0 || Offset == size()) {
+ // Fastpath for a common case. There is already a splitpoint at the end.
+ return 0;
+ }
+
+ // Find the piece that this offset lands in.
+ unsigned PieceOffs = 0;
+ unsigned i = 0;
+ while (Offset >= PieceOffs+Pieces[i].size()) {
+ PieceOffs += Pieces[i].size();
+ ++i;
+ }
+
+ // If there is already a split point at the specified offset, just return
+ // success.
+ if (PieceOffs == Offset)
+ return 0;
+
+ // Otherwise, we need to split piece 'i' at Offset-PieceOffs. Convert Offset
+ // to being Piece relative.
+ unsigned IntraPieceOffset = Offset-PieceOffs;
+
+ // We do this by shrinking the RopePiece and then doing an insert of the tail.
+ RopePiece Tail(Pieces[i].StrData, Pieces[i].StartOffs+IntraPieceOffset,
+ Pieces[i].EndOffs);
+ Size -= Pieces[i].size();
+ Pieces[i].EndOffs = Pieces[i].StartOffs+IntraPieceOffset;
+ Size += Pieces[i].size();
+
+ return insert(Offset, Tail);
+}
+
+
+/// insert - Insert the specified RopePiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeLeaf::insert(unsigned Offset,
+ const RopePiece &R) {
+ // If this node is not full, insert the piece.
+ if (!isFull()) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ unsigned i = 0, e = getNumPieces();
+ if (Offset == size()) {
+ // Fastpath for a common case.
+ i = e;
+ } else {
+ unsigned SlotOffs = 0;
+ for (; Offset > SlotOffs; ++i)
+ SlotOffs += getPiece(i).size();
+ assert(SlotOffs == Offset && "Split didn't occur before insertion!");
+ }
+
+ // For an insertion into a non-full leaf node, just insert the value in
+ // its sorted position. This requires moving later values over.
+ for (; i != e; --e)
+ Pieces[e] = Pieces[e-1];
+ Pieces[i] = R;
+ ++NumPieces;
+ Size += R.size();
+ return 0;
+ }
+
+ // Otherwise, if this is leaf is full, split it in two halves. Since this
+ // node is full, it contains 2*WidthFactor values. We move the first
+ // 'WidthFactor' values to the LHS child (which we leave in this node) and
+ // move the last 'WidthFactor' values into the RHS child.
+
+ // Create the new node.
+ RopePieceBTreeLeaf *NewNode = new RopePieceBTreeLeaf();
+
+ // Move over the last 'WidthFactor' values from here to NewNode.
+ std::copy(&Pieces[WidthFactor], &Pieces[2*WidthFactor],
+ &NewNode->Pieces[0]);
+ // Replace old pieces with null RopePieces to drop refcounts.
+ std::fill(&Pieces[WidthFactor], &Pieces[2*WidthFactor], RopePiece());
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumPieces = NumPieces = WidthFactor;
+
+ // Recompute the two nodes' size.
+ NewNode->FullRecomputeSizeLocally();
+ FullRecomputeSizeLocally();
+
+ // Update the list of leaves.
+ NewNode->insertAfterLeafInOrder(this);
+
+ // These insertions can't fail.
+ if (this->size() >= Offset)
+ this->insert(Offset, R);
+ else
+ NewNode->insert(Offset - this->size(), R);
+ return NewNode;
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeLeaf::erase(unsigned Offset, unsigned NumBytes) {
+ // Since we are guaranteed that there is a split at Offset, we start by
+ // finding the Piece that starts there.
+ unsigned PieceOffs = 0;
+ unsigned i = 0;
+ for (; Offset > PieceOffs; ++i)
+ PieceOffs += getPiece(i).size();
+ assert(PieceOffs == Offset && "Split didn't occur before erase!");
+
+ unsigned StartPiece = i;
+
+ // Figure out how many pieces completely cover 'NumBytes'. We want to remove
+ // all of them.
+ for (; Offset+NumBytes > PieceOffs+getPiece(i).size(); ++i)
+ PieceOffs += getPiece(i).size();
+
+ // If we exactly include the last one, include it in the region to delete.
+ if (Offset+NumBytes == PieceOffs+getPiece(i).size())
+ PieceOffs += getPiece(i).size(), ++i;
+
+ // If we completely cover some RopePieces, erase them now.
+ if (i != StartPiece) {
+ unsigned NumDeleted = i-StartPiece;
+ for (; i != getNumPieces(); ++i)
+ Pieces[i-NumDeleted] = Pieces[i];
+
+ // Drop references to dead rope pieces.
+ std::fill(&Pieces[getNumPieces()-NumDeleted], &Pieces[getNumPieces()],
+ RopePiece());
+ NumPieces -= NumDeleted;
+
+ unsigned CoverBytes = PieceOffs-Offset;
+ NumBytes -= CoverBytes;
+ Size -= CoverBytes;
+ }
+
+ // If we completely removed some stuff, we could be done.
+ if (NumBytes == 0) return;
+
+ // Okay, now might be erasing part of some Piece. If this is the case, then
+ // move the start point of the piece.
+ assert(getPiece(StartPiece).size() > NumBytes);
+ Pieces[StartPiece].StartOffs += NumBytes;
+
+ // The size of this node just shrunk by NumBytes.
+ Size -= NumBytes;
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeInterior Class
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// RopePieceBTreeInterior - This represents an interior node in the B+Tree,
+ /// which holds up to 2*WidthFactor pointers to child nodes.
+ class RopePieceBTreeInterior : public RopePieceBTreeNode {
+ /// NumChildren - This holds the number of children currently active in the
+ /// Children array.
+ unsigned char NumChildren;
+ RopePieceBTreeNode *Children[2*WidthFactor];
+ public:
+ RopePieceBTreeInterior() : RopePieceBTreeNode(false), NumChildren(0) {}
+
+ RopePieceBTreeInterior(RopePieceBTreeNode *LHS, RopePieceBTreeNode *RHS)
+ : RopePieceBTreeNode(false) {
+ Children[0] = LHS;
+ Children[1] = RHS;
+ NumChildren = 2;
+ Size = LHS->size() + RHS->size();
+ }
+
+ bool isFull() const { return NumChildren == 2*WidthFactor; }
+
+ unsigned getNumChildren() const { return NumChildren; }
+ const RopePieceBTreeNode *getChild(unsigned i) const {
+ assert(i < NumChildren && "invalid child #");
+ return Children[i];
+ }
+ RopePieceBTreeNode *getChild(unsigned i) {
+ assert(i < NumChildren && "invalid child #");
+ return Children[i];
+ }
+
+ /// FullRecomputeSizeLocally - Recompute the Size field of this node by
+ /// summing up the sizes of the child nodes.
+ void FullRecomputeSizeLocally() {
+ Size = 0;
+ for (unsigned i = 0, e = getNumChildren(); i != e; ++i)
+ Size += getChild(i)->size();
+ }
+
+
+ /// split - Split the range containing the specified offset so that we are
+ /// guaranteed that there is a place to do an insertion at the specified
+ /// offset. The offset is relative, so "0" is the start of the node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *split(unsigned Offset);
+
+
+ /// insert - Insert the specified ropepiece into this tree node at the
+ /// specified offset. The offset is relative, so "0" is the start of the
+ /// node.
+ ///
+ /// If there is no space in this subtree for the extra piece, the extra tree
+ /// node is returned and must be inserted into a parent.
+ RopePieceBTreeNode *insert(unsigned Offset, const RopePiece &R);
+
+ /// HandleChildPiece - A child propagated an insertion result up to us.
+ /// Insert the new child, and/or propagate the result further up the tree.
+ RopePieceBTreeNode *HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS);
+
+ /// erase - Remove NumBytes from this node at the specified offset. We are
+ /// guaranteed that there is a split at Offset.
+ void erase(unsigned Offset, unsigned NumBytes);
+
+ //static inline bool classof(const RopePieceBTreeInterior *) { return true; }
+ static inline bool classof(const RopePieceBTreeNode *N) {
+ return !N->isLeaf();
+ }
+ };
+} // end anonymous namespace
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeInterior::split(unsigned Offset) {
+ // Figure out which child to split.
+ if (Offset == 0 || Offset == size())
+ return 0; // If we have an exact offset, we're already split.
+
+ unsigned ChildOffset = 0;
+ unsigned i = 0;
+ for (; Offset >= ChildOffset+getChild(i)->size(); ++i)
+ ChildOffset += getChild(i)->size();
+
+ // If already split there, we're done.
+ if (ChildOffset == Offset)
+ return 0;
+
+ // Otherwise, recursively split the child.
+ if (RopePieceBTreeNode *RHS = getChild(i)->split(Offset-ChildOffset))
+ return HandleChildPiece(i, RHS);
+ return 0; // Done!
+}
+
+/// insert - Insert the specified ropepiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the
+/// node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeInterior::insert(unsigned Offset,
+ const RopePiece &R) {
+ // Find the insertion point. We are guaranteed that there is a split at the
+ // specified offset so find it.
+ unsigned i = 0, e = getNumChildren();
+
+ unsigned ChildOffs = 0;
+ if (Offset == size()) {
+ // Fastpath for a common case. Insert at end of last child.
+ i = e-1;
+ ChildOffs = size()-getChild(i)->size();
+ } else {
+ for (; Offset > ChildOffs+getChild(i)->size(); ++i)
+ ChildOffs += getChild(i)->size();
+ }
+
+ Size += R.size();
+
+ // Insert at the end of this child.
+ if (RopePieceBTreeNode *RHS = getChild(i)->insert(Offset-ChildOffs, R))
+ return HandleChildPiece(i, RHS);
+
+ return 0;
+}
+
+/// HandleChildPiece - A child propagated an insertion result up to us.
+/// Insert the new child, and/or propagate the result further up the tree.
+RopePieceBTreeNode *
+RopePieceBTreeInterior::HandleChildPiece(unsigned i, RopePieceBTreeNode *RHS) {
+ // Otherwise the child propagated a subtree up to us as a new child. See if
+ // we have space for it here.
+ if (!isFull()) {
+ // Insert RHS after child 'i'.
+ if (i + 1 != getNumChildren())
+ memmove(&Children[i+2], &Children[i+1],
+ (getNumChildren()-i-1)*sizeof(Children[0]));
+ Children[i+1] = RHS;
+ ++NumChildren;
+ return 0;
+ }
+
+ // Okay, this node is full. Split it in half, moving WidthFactor children to
+ // a newly allocated interior node.
+
+ // Create the new node.
+ RopePieceBTreeInterior *NewNode = new RopePieceBTreeInterior();
+
+ // Move over the last 'WidthFactor' values from here to NewNode.
+ memcpy(&NewNode->Children[0], &Children[WidthFactor],
+ WidthFactor*sizeof(Children[0]));
+
+ // Decrease the number of values in the two nodes.
+ NewNode->NumChildren = NumChildren = WidthFactor;
+
+ // Finally, insert the two new children in the side the can (now) hold them.
+ // These insertions can't fail.
+ if (i < WidthFactor)
+ this->HandleChildPiece(i, RHS);
+ else
+ NewNode->HandleChildPiece(i-WidthFactor, RHS);
+
+ // Recompute the two nodes' size.
+ NewNode->FullRecomputeSizeLocally();
+ FullRecomputeSizeLocally();
+ return NewNode;
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeInterior::erase(unsigned Offset, unsigned NumBytes) {
+ // This will shrink this node by NumBytes.
+ Size -= NumBytes;
+
+ // Find the first child that overlaps with Offset.
+ unsigned i = 0;
+ for (; Offset >= getChild(i)->size(); ++i)
+ Offset -= getChild(i)->size();
+
+ // Propagate the delete request into overlapping children, or completely
+ // delete the children as appropriate.
+ while (NumBytes) {
+ RopePieceBTreeNode *CurChild = getChild(i);
+
+ // If we are deleting something contained entirely in the child, pass on the
+ // request.
+ if (Offset+NumBytes < CurChild->size()) {
+ CurChild->erase(Offset, NumBytes);
+ return;
+ }
+
+ // If this deletion request starts somewhere in the middle of the child, it
+ // must be deleting to the end of the child.
+ if (Offset) {
+ unsigned BytesFromChild = CurChild->size()-Offset;
+ CurChild->erase(Offset, BytesFromChild);
+ NumBytes -= BytesFromChild;
+ // Start at the beginning of the next child.
+ Offset = 0;
+ ++i;
+ continue;
+ }
+
+ // If the deletion request completely covers the child, delete it and move
+ // the rest down.
+ NumBytes -= CurChild->size();
+ CurChild->Destroy();
+ --NumChildren;
+ if (i != getNumChildren())
+ memmove(&Children[i], &Children[i+1],
+ (getNumChildren()-i)*sizeof(Children[0]));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeNode Implementation
+//===----------------------------------------------------------------------===//
+
+void RopePieceBTreeNode::Destroy() {
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ delete Leaf;
+ else
+ delete cast<RopePieceBTreeInterior>(this);
+}
+
+/// split - Split the range containing the specified offset so that we are
+/// guaranteed that there is a place to do an insertion at the specified
+/// offset. The offset is relative, so "0" is the start of the node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeNode::split(unsigned Offset) {
+ assert(Offset <= size() && "Invalid offset to split!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->split(Offset);
+ return cast<RopePieceBTreeInterior>(this)->split(Offset);
+}
+
+/// insert - Insert the specified ropepiece into this tree node at the
+/// specified offset. The offset is relative, so "0" is the start of the
+/// node.
+///
+/// If there is no space in this subtree for the extra piece, the extra tree
+/// node is returned and must be inserted into a parent.
+RopePieceBTreeNode *RopePieceBTreeNode::insert(unsigned Offset,
+ const RopePiece &R) {
+ assert(Offset <= size() && "Invalid offset to insert!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->insert(Offset, R);
+ return cast<RopePieceBTreeInterior>(this)->insert(Offset, R);
+}
+
+/// erase - Remove NumBytes from this node at the specified offset. We are
+/// guaranteed that there is a split at Offset.
+void RopePieceBTreeNode::erase(unsigned Offset, unsigned NumBytes) {
+ assert(Offset+NumBytes <= size() && "Invalid offset to erase!");
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(this))
+ return Leaf->erase(Offset, NumBytes);
+ return cast<RopePieceBTreeInterior>(this)->erase(Offset, NumBytes);
+}
+
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTreeIterator Implementation
+//===----------------------------------------------------------------------===//
+
+static const RopePieceBTreeLeaf *getCN(const void *P) {
+ return static_cast<const RopePieceBTreeLeaf*>(P);
+}
+
+// begin iterator.
+RopePieceBTreeIterator::RopePieceBTreeIterator(const void *n) {
+ const RopePieceBTreeNode *N = static_cast<const RopePieceBTreeNode*>(n);
+
+ // Walk down the left side of the tree until we get to a leaf.
+ while (const RopePieceBTreeInterior *IN = dyn_cast<RopePieceBTreeInterior>(N))
+ N = IN->getChild(0);
+
+ // We must have at least one leaf.
+ CurNode = cast<RopePieceBTreeLeaf>(N);
+
+ // If we found a leaf that happens to be empty, skip over it until we get
+ // to something full.
+ while (CurNode && getCN(CurNode)->getNumPieces() == 0)
+ CurNode = getCN(CurNode)->getNextLeafInOrder();
+
+ if (CurNode != 0)
+ CurPiece = &getCN(CurNode)->getPiece(0);
+ else // Empty tree, this is an end() iterator.
+ CurPiece = 0;
+ CurChar = 0;
+}
+
+void RopePieceBTreeIterator::MoveToNextPiece() {
+ if (CurPiece != &getCN(CurNode)->getPiece(getCN(CurNode)->getNumPieces()-1)) {
+ CurChar = 0;
+ ++CurPiece;
+ return;
+ }
+
+ // Find the next non-empty leaf node.
+ do
+ CurNode = getCN(CurNode)->getNextLeafInOrder();
+ while (CurNode && getCN(CurNode)->getNumPieces() == 0);
+
+ if (CurNode != 0)
+ CurPiece = &getCN(CurNode)->getPiece(0);
+ else // Hit end().
+ CurPiece = 0;
+ CurChar = 0;
+}
+
+//===----------------------------------------------------------------------===//
+// RopePieceBTree Implementation
+//===----------------------------------------------------------------------===//
+
+static RopePieceBTreeNode *getRoot(void *P) {
+ return static_cast<RopePieceBTreeNode*>(P);
+}
+
+RopePieceBTree::RopePieceBTree() {
+ Root = new RopePieceBTreeLeaf();
+}
+RopePieceBTree::RopePieceBTree(const RopePieceBTree &RHS) {
+ assert(RHS.empty() && "Can't copy non-empty tree yet");
+ Root = new RopePieceBTreeLeaf();
+}
+RopePieceBTree::~RopePieceBTree() {
+ getRoot(Root)->Destroy();
+}
+
+unsigned RopePieceBTree::size() const {
+ return getRoot(Root)->size();
+}
+
+void RopePieceBTree::clear() {
+ if (RopePieceBTreeLeaf *Leaf = dyn_cast<RopePieceBTreeLeaf>(getRoot(Root)))
+ Leaf->clear();
+ else {
+ getRoot(Root)->Destroy();
+ Root = new RopePieceBTreeLeaf();
+ }
+}
+
+void RopePieceBTree::insert(unsigned Offset, const RopePiece &R) {
+ // #1. Split at Offset.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+
+ // #2. Do the insertion.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->insert(Offset, R))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+}
+
+void RopePieceBTree::erase(unsigned Offset, unsigned NumBytes) {
+ // #1. Split at Offset.
+ if (RopePieceBTreeNode *RHS = getRoot(Root)->split(Offset))
+ Root = new RopePieceBTreeInterior(getRoot(Root), RHS);
+
+ // #2. Do the erasing.
+ getRoot(Root)->erase(Offset, NumBytes);
+}
+
+//===----------------------------------------------------------------------===//
+// RewriteRope Implementation
+//===----------------------------------------------------------------------===//
+
+/// MakeRopeString - This copies the specified byte range into some instance of
+/// RopeRefCountString, and return a RopePiece that represents it. This uses
+/// the AllocBuffer object to aggregate requests for small strings into one
+/// allocation instead of doing tons of tiny allocations.
+RopePiece RewriteRope::MakeRopeString(const char *Start, const char *End) {
+ unsigned Len = End-Start;
+ assert(Len && "Zero length RopePiece is invalid!");
+
+ // If we have space for this string in the current alloc buffer, use it.
+ if (AllocOffs+Len <= AllocChunkSize) {
+ memcpy(AllocBuffer->Data+AllocOffs, Start, Len);
+ AllocOffs += Len;
+ return RopePiece(AllocBuffer, AllocOffs-Len, AllocOffs);
+ }
+
+ // If we don't have enough room because this specific allocation is huge,
+ // just allocate a new rope piece for it alone.
+ if (Len > AllocChunkSize) {
+ unsigned Size = End-Start+sizeof(RopeRefCountString)-1;
+ RopeRefCountString *Res =
+ reinterpret_cast<RopeRefCountString *>(new char[Size]);
+ Res->RefCount = 0;
+ memcpy(Res->Data, Start, End-Start);
+ return RopePiece(Res, 0, End-Start);
+ }
+
+ // Otherwise, this was a small request but we just don't have space for it
+ // Make a new chunk and share it with later allocations.
+
+ // If we had an old allocation, drop our reference to it.
+ if (AllocBuffer && --AllocBuffer->RefCount == 0)
+ delete [] (char*)AllocBuffer;
+
+ unsigned AllocSize = offsetof(RopeRefCountString, Data) + AllocChunkSize;
+ AllocBuffer = reinterpret_cast<RopeRefCountString *>(new char[AllocSize]);
+ AllocBuffer->RefCount = 0;
+ memcpy(AllocBuffer->Data, Start, Len);
+ AllocOffs = Len;
+
+ // Start out the new allocation with a refcount of 1, since we have an
+ // internal reference to it.
+ AllocBuffer->addRef();
+ return RopePiece(AllocBuffer, 0, Len);
+}
+
+
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp b/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp
new file mode 100644
index 0000000..019e5e7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/RewriteTest.cpp
@@ -0,0 +1,39 @@
+//===--- RewriteTest.cpp - Rewriter playground ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a testbed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Rewriters.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Rewrite/TokenRewriter.h"
+#include "llvm/Support/raw_ostream.h"
+
+void clang::DoRewriteTest(Preprocessor &PP, raw_ostream* OS) {
+ SourceManager &SM = PP.getSourceManager();
+ const LangOptions &LangOpts = PP.getLangOpts();
+
+ TokenRewriter Rewriter(SM.getMainFileID(), SM, LangOpts);
+
+ // Throw <i> </i> tags around comments.
+ for (TokenRewriter::token_iterator I = Rewriter.token_begin(),
+ E = Rewriter.token_end(); I != E; ++I) {
+ if (I->isNot(tok::comment)) continue;
+
+ Rewriter.AddTokenBefore(I, "<i>");
+ Rewriter.AddTokenAfter(I, "</i>");
+ }
+
+
+ // Print out the output.
+ for (TokenRewriter::token_iterator I = Rewriter.token_begin(),
+ E = Rewriter.token_end(); I != E; ++I)
+ *OS << PP.getSpelling(*I);
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
new file mode 100644
index 0000000..43fb01b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/Rewriter.cpp
@@ -0,0 +1,414 @@
+//===--- Rewriter.cpp - Code rewriting interface --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Rewriter class, which is used for code
+// transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Decl.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+raw_ostream &RewriteBuffer::write(raw_ostream &os) const {
+ // FIXME: eliminate the copy by writing out each chunk at a time
+ os << std::string(begin(), end());
+ return os;
+}
+
+/// \brief Return true if this character is non-new-line whitespace:
+/// ' ', '\t', '\f', '\v', '\r'.
+static inline bool isWhitespace(unsigned char c) {
+ switch (c) {
+ case ' ':
+ case '\t':
+ case '\f':
+ case '\v':
+ case '\r':
+ return true;
+ default:
+ return false;
+ }
+}
+
+void RewriteBuffer::RemoveText(unsigned OrigOffset, unsigned Size,
+ bool removeLineIfEmpty) {
+ // Nothing to remove, exit early.
+ if (Size == 0) return;
+
+ unsigned RealOffset = getMappedOffset(OrigOffset, true);
+ assert(RealOffset+Size < Buffer.size() && "Invalid location");
+
+ // Remove the dead characters.
+ Buffer.erase(RealOffset, Size);
+
+ // Add a delta so that future changes are offset correctly.
+ AddReplaceDelta(OrigOffset, -Size);
+
+ if (removeLineIfEmpty) {
+ // Find the line that the remove occurred and if it is completely empty
+ // remove the line as well.
+
+ iterator curLineStart = begin();
+ unsigned curLineStartOffs = 0;
+ iterator posI = begin();
+ for (unsigned i = 0; i != RealOffset; ++i) {
+ if (*posI == '\n') {
+ curLineStart = posI;
+ ++curLineStart;
+ curLineStartOffs = i + 1;
+ }
+ ++posI;
+ }
+
+ unsigned lineSize = 0;
+ posI = curLineStart;
+ while (posI != end() && isWhitespace(*posI)) {
+ ++posI;
+ ++lineSize;
+ }
+ if (posI != end() && *posI == '\n') {
+ Buffer.erase(curLineStartOffs, lineSize + 1/* + '\n'*/);
+ AddReplaceDelta(curLineStartOffs, -(lineSize + 1/* + '\n'*/));
+ }
+ }
+}
+
+void RewriteBuffer::InsertText(unsigned OrigOffset, StringRef Str,
+ bool InsertAfter) {
+
+ // Nothing to insert, exit early.
+ if (Str.empty()) return;
+
+ unsigned RealOffset = getMappedOffset(OrigOffset, InsertAfter);
+ Buffer.insert(RealOffset, Str.begin(), Str.end());
+
+ // Add a delta so that future changes are offset correctly.
+ AddInsertDelta(OrigOffset, Str.size());
+}
+
+/// ReplaceText - This method replaces a range of characters in the input
+/// buffer with a new string. This is effectively a combined "remove+insert"
+/// operation.
+void RewriteBuffer::ReplaceText(unsigned OrigOffset, unsigned OrigLength,
+ StringRef NewStr) {
+ unsigned RealOffset = getMappedOffset(OrigOffset, true);
+ Buffer.erase(RealOffset, OrigLength);
+ Buffer.insert(RealOffset, NewStr.begin(), NewStr.end());
+ if (OrigLength != NewStr.size())
+ AddReplaceDelta(OrigOffset, NewStr.size() - OrigLength);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Rewriter class
+//===----------------------------------------------------------------------===//
+
+/// getRangeSize - Return the size in bytes of the specified range if they
+/// are in the same file. If not, this returns -1.
+int Rewriter::getRangeSize(const CharSourceRange &Range,
+ RewriteOptions opts) const {
+ if (!isRewritable(Range.getBegin()) ||
+ !isRewritable(Range.getEnd())) return -1;
+
+ FileID StartFileID, EndFileID;
+ unsigned StartOff, EndOff;
+
+ StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
+ EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
+
+ if (StartFileID != EndFileID)
+ return -1;
+
+ // If edits have been made to this buffer, the delta between the range may
+ // have changed.
+ std::map<FileID, RewriteBuffer>::const_iterator I =
+ RewriteBuffers.find(StartFileID);
+ if (I != RewriteBuffers.end()) {
+ const RewriteBuffer &RB = I->second;
+ EndOff = RB.getMappedOffset(EndOff, opts.IncludeInsertsAtEndOfRange);
+ StartOff = RB.getMappedOffset(StartOff, !opts.IncludeInsertsAtBeginOfRange);
+ }
+
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token if this is a token range.
+ if (Range.isTokenRange())
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+
+ return EndOff-StartOff;
+}
+
+int Rewriter::getRangeSize(SourceRange Range, RewriteOptions opts) const {
+ return getRangeSize(CharSourceRange::getTokenRange(Range), opts);
+}
+
+
+/// getRewrittenText - Return the rewritten form of the text in the specified
+/// range. If the start or end of the range was unrewritable or if they are
+/// in different buffers, this returns an empty string.
+///
+/// Note that this method is not particularly efficient.
+///
+std::string Rewriter::getRewrittenText(SourceRange Range) const {
+ if (!isRewritable(Range.getBegin()) ||
+ !isRewritable(Range.getEnd()))
+ return "";
+
+ FileID StartFileID, EndFileID;
+ unsigned StartOff, EndOff;
+ StartOff = getLocationOffsetAndFileID(Range.getBegin(), StartFileID);
+ EndOff = getLocationOffsetAndFileID(Range.getEnd(), EndFileID);
+
+ if (StartFileID != EndFileID)
+ return ""; // Start and end in different buffers.
+
+ // If edits have been made to this buffer, the delta between the range may
+ // have changed.
+ std::map<FileID, RewriteBuffer>::const_iterator I =
+ RewriteBuffers.find(StartFileID);
+ if (I == RewriteBuffers.end()) {
+ // If the buffer hasn't been rewritten, just return the text from the input.
+ const char *Ptr = SourceMgr->getCharacterData(Range.getBegin());
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token.
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+ return std::string(Ptr, Ptr+EndOff-StartOff);
+ }
+
+ const RewriteBuffer &RB = I->second;
+ EndOff = RB.getMappedOffset(EndOff, true);
+ StartOff = RB.getMappedOffset(StartOff);
+
+ // Adjust the end offset to the end of the last token, instead of being the
+ // start of the last token.
+ EndOff += Lexer::MeasureTokenLength(Range.getEnd(), *SourceMgr, *LangOpts);
+
+ // Advance the iterators to the right spot, yay for linear time algorithms.
+ RewriteBuffer::iterator Start = RB.begin();
+ std::advance(Start, StartOff);
+ RewriteBuffer::iterator End = Start;
+ std::advance(End, EndOff-StartOff);
+
+ return std::string(Start, End);
+}
+
+unsigned Rewriter::getLocationOffsetAndFileID(SourceLocation Loc,
+ FileID &FID) const {
+ assert(Loc.isValid() && "Invalid location");
+ std::pair<FileID,unsigned> V = SourceMgr->getDecomposedLoc(Loc);
+ FID = V.first;
+ return V.second;
+}
+
+
+/// getEditBuffer - Get or create a RewriteBuffer for the specified FileID.
+///
+RewriteBuffer &Rewriter::getEditBuffer(FileID FID) {
+ std::map<FileID, RewriteBuffer>::iterator I =
+ RewriteBuffers.lower_bound(FID);
+ if (I != RewriteBuffers.end() && I->first == FID)
+ return I->second;
+ I = RewriteBuffers.insert(I, std::make_pair(FID, RewriteBuffer()));
+
+ StringRef MB = SourceMgr->getBufferData(FID);
+ I->second.Initialize(MB.begin(), MB.end());
+
+ return I->second;
+}
+
+/// InsertText - Insert the specified string at the specified location in the
+/// original buffer.
+bool Rewriter::InsertText(SourceLocation Loc, StringRef Str,
+ bool InsertAfter, bool indentNewLines) {
+ if (!isRewritable(Loc)) return true;
+ FileID FID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
+
+ SmallString<128> indentedStr;
+ if (indentNewLines && Str.find('\n') != StringRef::npos) {
+ StringRef MB = SourceMgr->getBufferData(FID);
+
+ unsigned lineNo = SourceMgr->getLineNumber(FID, StartOffs) - 1;
+ const SrcMgr::ContentCache *
+ Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
+ unsigned lineOffs = Content->SourceLineCache[lineNo];
+
+ // Find the whitespace at the start of the line.
+ StringRef indentSpace;
+ {
+ unsigned i = lineOffs;
+ while (isWhitespace(MB[i]))
+ ++i;
+ indentSpace = MB.substr(lineOffs, i-lineOffs);
+ }
+
+ SmallVector<StringRef, 4> lines;
+ Str.split(lines, "\n");
+
+ for (unsigned i = 0, e = lines.size(); i != e; ++i) {
+ indentedStr += lines[i];
+ if (i < e-1) {
+ indentedStr += '\n';
+ indentedStr += indentSpace;
+ }
+ }
+ Str = indentedStr.str();
+ }
+
+ getEditBuffer(FID).InsertText(StartOffs, Str, InsertAfter);
+ return false;
+}
+
+bool Rewriter::InsertTextAfterToken(SourceLocation Loc, StringRef Str) {
+ if (!isRewritable(Loc)) return true;
+ FileID FID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Loc, FID);
+ RewriteOptions rangeOpts;
+ rangeOpts.IncludeInsertsAtBeginOfRange = false;
+ StartOffs += getRangeSize(SourceRange(Loc, Loc), rangeOpts);
+ getEditBuffer(FID).InsertText(StartOffs, Str, /*InsertAfter*/true);
+ return false;
+}
+
+/// RemoveText - Remove the specified text region.
+bool Rewriter::RemoveText(SourceLocation Start, unsigned Length,
+ RewriteOptions opts) {
+ if (!isRewritable(Start)) return true;
+ FileID FID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Start, FID);
+ getEditBuffer(FID).RemoveText(StartOffs, Length, opts.RemoveLineIfEmpty);
+ return false;
+}
+
+/// ReplaceText - This method replaces a range of characters in the input
+/// buffer with a new string. This is effectively a combined "remove/insert"
+/// operation.
+bool Rewriter::ReplaceText(SourceLocation Start, unsigned OrigLength,
+ StringRef NewStr) {
+ if (!isRewritable(Start)) return true;
+ FileID StartFileID;
+ unsigned StartOffs = getLocationOffsetAndFileID(Start, StartFileID);
+
+ getEditBuffer(StartFileID).ReplaceText(StartOffs, OrigLength, NewStr);
+ return false;
+}
+
+bool Rewriter::ReplaceText(SourceRange range, SourceRange replacementRange) {
+ if (!isRewritable(range.getBegin())) return true;
+ if (!isRewritable(range.getEnd())) return true;
+ if (replacementRange.isInvalid()) return true;
+ SourceLocation start = range.getBegin();
+ unsigned origLength = getRangeSize(range);
+ unsigned newLength = getRangeSize(replacementRange);
+ FileID FID;
+ unsigned newOffs = getLocationOffsetAndFileID(replacementRange.getBegin(),
+ FID);
+ StringRef MB = SourceMgr->getBufferData(FID);
+ return ReplaceText(start, origLength, MB.substr(newOffs, newLength));
+}
+
+/// ReplaceStmt - This replaces a Stmt/Expr with another, using the pretty
+/// printer to generate the replacement code. This returns true if the input
+/// could not be rewritten, or false if successful.
+bool Rewriter::ReplaceStmt(Stmt *From, Stmt *To) {
+ // Measaure the old text.
+ int Size = getRangeSize(From->getSourceRange());
+ if (Size == -1)
+ return true;
+
+ // Get the new text.
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ To->printPretty(S, 0, PrintingPolicy(*LangOpts));
+ const std::string &Str = S.str();
+
+ ReplaceText(From->getLocStart(), Size, Str);
+ return false;
+}
+
+std::string Rewriter::ConvertToString(Stmt *From) {
+ std::string SStr;
+ llvm::raw_string_ostream S(SStr);
+ From->printPretty(S, 0, PrintingPolicy(*LangOpts));
+ return S.str();
+}
+
+bool Rewriter::IncreaseIndentation(CharSourceRange range,
+ SourceLocation parentIndent) {
+ if (range.isInvalid()) return true;
+ if (!isRewritable(range.getBegin())) return true;
+ if (!isRewritable(range.getEnd())) return true;
+ if (!isRewritable(parentIndent)) return true;
+
+ FileID StartFileID, EndFileID, parentFileID;
+ unsigned StartOff, EndOff, parentOff;
+
+ StartOff = getLocationOffsetAndFileID(range.getBegin(), StartFileID);
+ EndOff = getLocationOffsetAndFileID(range.getEnd(), EndFileID);
+ parentOff = getLocationOffsetAndFileID(parentIndent, parentFileID);
+
+ if (StartFileID != EndFileID || StartFileID != parentFileID)
+ return true;
+ if (StartOff > EndOff)
+ return true;
+
+ FileID FID = StartFileID;
+ StringRef MB = SourceMgr->getBufferData(FID);
+
+ unsigned parentLineNo = SourceMgr->getLineNumber(FID, parentOff) - 1;
+ unsigned startLineNo = SourceMgr->getLineNumber(FID, StartOff) - 1;
+ unsigned endLineNo = SourceMgr->getLineNumber(FID, EndOff) - 1;
+
+ const SrcMgr::ContentCache *
+ Content = SourceMgr->getSLocEntry(FID).getFile().getContentCache();
+
+ // Find where the lines start.
+ unsigned parentLineOffs = Content->SourceLineCache[parentLineNo];
+ unsigned startLineOffs = Content->SourceLineCache[startLineNo];
+
+ // Find the whitespace at the start of each line.
+ StringRef parentSpace, startSpace;
+ {
+ unsigned i = parentLineOffs;
+ while (isWhitespace(MB[i]))
+ ++i;
+ parentSpace = MB.substr(parentLineOffs, i-parentLineOffs);
+
+ i = startLineOffs;
+ while (isWhitespace(MB[i]))
+ ++i;
+ startSpace = MB.substr(startLineOffs, i-startLineOffs);
+ }
+ if (parentSpace.size() >= startSpace.size())
+ return true;
+ if (!startSpace.startswith(parentSpace))
+ return true;
+
+ StringRef indent = startSpace.substr(parentSpace.size());
+
+ // Indent the lines between start/end offsets.
+ RewriteBuffer &RB = getEditBuffer(FID);
+ for (unsigned lineNo = startLineNo; lineNo <= endLineNo; ++lineNo) {
+ unsigned offs = Content->SourceLineCache[lineNo];
+ unsigned i = offs;
+ while (isWhitespace(MB[i]))
+ ++i;
+ StringRef origIndent = MB.substr(offs, i-offs);
+ if (origIndent.startswith(startSpace))
+ RB.InsertText(offs, indent, /*InsertAfter=*/false);
+ }
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp b/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp
new file mode 100644
index 0000000..03ce63e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Rewrite/TokenRewriter.cpp
@@ -0,0 +1,99 @@
+//===--- TokenRewriter.cpp - Token-based code rewriting interface ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the TokenRewriter class, which is used for code
+// transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Rewrite/TokenRewriter.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/ScratchBuffer.h"
+#include "clang/Basic/SourceManager.h"
+using namespace clang;
+
+TokenRewriter::TokenRewriter(FileID FID, SourceManager &SM,
+ const LangOptions &LangOpts) {
+ ScratchBuf.reset(new ScratchBuffer(SM));
+
+ // Create a lexer to lex all the tokens of the main file in raw mode.
+ const llvm::MemoryBuffer *FromFile = SM.getBuffer(FID);
+ Lexer RawLex(FID, FromFile, SM, LangOpts);
+
+ // Return all comments and whitespace as tokens.
+ RawLex.SetKeepWhitespaceMode(true);
+
+ // Lex the file, populating our datastructures.
+ Token RawTok;
+ RawLex.LexFromRawLexer(RawTok);
+ while (RawTok.isNot(tok::eof)) {
+#if 0
+ if (Tok.is(tok::raw_identifier)) {
+ // Look up the identifier info for the token. This should use
+ // IdentifierTable directly instead of PP.
+ PP.LookUpIdentifierInfo(Tok);
+ }
+#endif
+
+ AddToken(RawTok, TokenList.end());
+ RawLex.LexFromRawLexer(RawTok);
+ }
+}
+
+TokenRewriter::~TokenRewriter() {
+}
+
+
+/// RemapIterator - Convert from token_iterator (a const iterator) to
+/// TokenRefTy (a non-const iterator).
+TokenRewriter::TokenRefTy TokenRewriter::RemapIterator(token_iterator I) {
+ if (I == token_end()) return TokenList.end();
+
+ // FIXME: This is horrible, we should use our own list or something to avoid
+ // this.
+ std::map<SourceLocation, TokenRefTy>::iterator MapIt =
+ TokenAtLoc.find(I->getLocation());
+ assert(MapIt != TokenAtLoc.end() && "iterator not in rewriter?");
+ return MapIt->second;
+}
+
+
+/// AddToken - Add the specified token into the Rewriter before the other
+/// position.
+TokenRewriter::TokenRefTy
+TokenRewriter::AddToken(const Token &T, TokenRefTy Where) {
+ Where = TokenList.insert(Where, T);
+
+ bool InsertSuccess = TokenAtLoc.insert(std::make_pair(T.getLocation(),
+ Where)).second;
+ assert(InsertSuccess && "Token location already in rewriter!");
+ (void)InsertSuccess;
+ return Where;
+}
+
+
+TokenRewriter::token_iterator
+TokenRewriter::AddTokenBefore(token_iterator I, const char *Val) {
+ unsigned Len = strlen(Val);
+
+ // Plop the string into the scratch buffer, then create a token for this
+ // string.
+ Token Tok;
+ Tok.startToken();
+ const char *Spelling;
+ Tok.setLocation(ScratchBuf->getToken(Val, Len, Spelling));
+ Tok.setLength(Len);
+
+ // TODO: Form a whole lexer around this and relex the token! For now, just
+ // set kind to tok::unknown.
+ Tok.setKind(tok::unknown);
+
+ return AddToken(Tok, RemapIterator(I));
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
new file mode 100644
index 0000000..a8e6791
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -0,0 +1,1016 @@
+//=- AnalysisBasedWarnings.cpp - Sema warnings based on libAnalysis -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines analysis_warnings::[Policy,Executor].
+// Together they are used by Sema to issue warnings based on inexpensive
+// static analysis algorithms in libAnalysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/AnalysisBasedWarnings.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/Analysis/Analyses/ThreadSafety.h"
+#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Analysis/Analyses/UninitializedValues.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <vector>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Unreachable code analysis.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class UnreachableCodeHandler : public reachable_code::Callback {
+ Sema &S;
+ public:
+ UnreachableCodeHandler(Sema &s) : S(s) {}
+
+ void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) {
+ S.Diag(L, diag::warn_unreachable) << R1 << R2;
+ }
+ };
+}
+
+/// CheckUnreachable - Check for unreachable code.
+static void CheckUnreachable(Sema &S, AnalysisDeclContext &AC) {
+ UnreachableCodeHandler UC(S);
+ reachable_code::FindUnreachableCode(AC, UC);
+}
+
+//===----------------------------------------------------------------------===//
+// Check for missing return value.
+//===----------------------------------------------------------------------===//
+
+enum ControlFlowKind {
+ UnknownFallThrough,
+ NeverFallThrough,
+ MaybeFallThrough,
+ AlwaysFallThrough,
+ NeverFallThroughOrReturn
+};
+
+/// CheckFallThrough - Check that we don't fall off the end of a
+/// Statement that should return a value.
+///
+/// \returns AlwaysFallThrough iff we always fall off the end of the statement,
+/// MaybeFallThrough iff we might or might not fall off the end,
+/// NeverFallThroughOrReturn iff we never fall off the end of the statement or
+/// return. We assume NeverFallThrough iff we never fall off the end of the
+/// statement but we may return. We assume that functions not marked noreturn
+/// will return.
+static ControlFlowKind CheckFallThrough(AnalysisDeclContext &AC) {
+ CFG *cfg = AC.getCFG();
+ if (cfg == 0) return UnknownFallThrough;
+
+ // The CFG leaves in dead things, and we don't want the dead code paths to
+ // confuse us, so we mark all live things first.
+ llvm::BitVector live(cfg->getNumBlockIDs());
+ unsigned count = reachable_code::ScanReachableFromBlock(&cfg->getEntry(),
+ live);
+
+ bool AddEHEdges = AC.getAddEHEdges();
+ if (!AddEHEdges && count != cfg->getNumBlockIDs())
+ // When there are things remaining dead, and we didn't add EH edges
+ // from CallExprs to the catch clauses, we have to go back and
+ // mark them as live.
+ for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
+ CFGBlock &b = **I;
+ if (!live[b.getBlockID()]) {
+ if (b.pred_begin() == b.pred_end()) {
+ if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator()))
+ // When not adding EH edges from calls, catch clauses
+ // can otherwise seem dead. Avoid noting them as dead.
+ count += reachable_code::ScanReachableFromBlock(&b, live);
+ continue;
+ }
+ }
+ }
+
+ // Now we know what is live, we check the live precessors of the exit block
+ // and look for fall through paths, being careful to ignore normal returns,
+ // and exceptional paths.
+ bool HasLiveReturn = false;
+ bool HasFakeEdge = false;
+ bool HasPlainEdge = false;
+ bool HasAbnormalEdge = false;
+
+ // Ignore default cases that aren't likely to be reachable because all
+ // enums in a switch(X) have explicit case statements.
+ CFGBlock::FilterOptions FO;
+ FO.IgnoreDefaultsWithCoveredEnums = 1;
+
+ for (CFGBlock::filtered_pred_iterator
+ I = cfg->getExit().filtered_pred_start_end(FO); I.hasMore(); ++I) {
+ const CFGBlock& B = **I;
+ if (!live[B.getBlockID()])
+ continue;
+
+ // Skip blocks which contain an element marked as no-return. They don't
+ // represent actually viable edges into the exit block, so mark them as
+ // abnormal.
+ if (B.hasNoReturnElement()) {
+ HasAbnormalEdge = true;
+ continue;
+ }
+
+ // Destructors can appear after the 'return' in the CFG. This is
+ // normal. We need to look pass the destructors for the return
+ // statement (if it exists).
+ CFGBlock::const_reverse_iterator ri = B.rbegin(), re = B.rend();
+
+ for ( ; ri != re ; ++ri)
+ if (isa<CFGStmt>(*ri))
+ break;
+
+ // No more CFGElements in the block?
+ if (ri == re) {
+ if (B.getTerminator() && isa<CXXTryStmt>(B.getTerminator())) {
+ HasAbnormalEdge = true;
+ continue;
+ }
+ // A labeled empty statement, or the entry block...
+ HasPlainEdge = true;
+ continue;
+ }
+
+ CFGStmt CS = cast<CFGStmt>(*ri);
+ const Stmt *S = CS.getStmt();
+ if (isa<ReturnStmt>(S)) {
+ HasLiveReturn = true;
+ continue;
+ }
+ if (isa<ObjCAtThrowStmt>(S)) {
+ HasFakeEdge = true;
+ continue;
+ }
+ if (isa<CXXThrowExpr>(S)) {
+ HasFakeEdge = true;
+ continue;
+ }
+ if (const AsmStmt *AS = dyn_cast<AsmStmt>(S)) {
+ if (AS->isMSAsm()) {
+ HasFakeEdge = true;
+ HasLiveReturn = true;
+ continue;
+ }
+ }
+ if (isa<CXXTryStmt>(S)) {
+ HasAbnormalEdge = true;
+ continue;
+ }
+ if (std::find(B.succ_begin(), B.succ_end(), &cfg->getExit())
+ == B.succ_end()) {
+ HasAbnormalEdge = true;
+ continue;
+ }
+
+ HasPlainEdge = true;
+ }
+ if (!HasPlainEdge) {
+ if (HasLiveReturn)
+ return NeverFallThrough;
+ return NeverFallThroughOrReturn;
+ }
+ if (HasAbnormalEdge || HasFakeEdge || HasLiveReturn)
+ return MaybeFallThrough;
+ // This says AlwaysFallThrough for calls to functions that are not marked
+ // noreturn, that don't return. If people would like this warning to be more
+ // accurate, such functions should be marked as noreturn.
+ return AlwaysFallThrough;
+}
+
+namespace {
+
+struct CheckFallThroughDiagnostics {
+ unsigned diag_MaybeFallThrough_HasNoReturn;
+ unsigned diag_MaybeFallThrough_ReturnsNonVoid;
+ unsigned diag_AlwaysFallThrough_HasNoReturn;
+ unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
+ unsigned diag_NeverFallThroughOrReturn;
+ enum { Function, Block, Lambda } funMode;
+ SourceLocation FuncLoc;
+
+ static CheckFallThroughDiagnostics MakeForFunction(const Decl *Func) {
+ CheckFallThroughDiagnostics D;
+ D.FuncLoc = Func->getLocation();
+ D.diag_MaybeFallThrough_HasNoReturn =
+ diag::warn_falloff_noreturn_function;
+ D.diag_MaybeFallThrough_ReturnsNonVoid =
+ diag::warn_maybe_falloff_nonvoid_function;
+ D.diag_AlwaysFallThrough_HasNoReturn =
+ diag::warn_falloff_noreturn_function;
+ D.diag_AlwaysFallThrough_ReturnsNonVoid =
+ diag::warn_falloff_nonvoid_function;
+
+ // Don't suggest that virtual functions be marked "noreturn", since they
+ // might be overridden by non-noreturn functions.
+ bool isVirtualMethod = false;
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Func))
+ isVirtualMethod = Method->isVirtual();
+
+ // Don't suggest that template instantiations be marked "noreturn"
+ bool isTemplateInstantiation = false;
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(Func))
+ isTemplateInstantiation = Function->isTemplateInstantiation();
+
+ if (!isVirtualMethod && !isTemplateInstantiation)
+ D.diag_NeverFallThroughOrReturn =
+ diag::warn_suggest_noreturn_function;
+ else
+ D.diag_NeverFallThroughOrReturn = 0;
+
+ D.funMode = Function;
+ return D;
+ }
+
+ static CheckFallThroughDiagnostics MakeForBlock() {
+ CheckFallThroughDiagnostics D;
+ D.diag_MaybeFallThrough_HasNoReturn =
+ diag::err_noreturn_block_has_return_expr;
+ D.diag_MaybeFallThrough_ReturnsNonVoid =
+ diag::err_maybe_falloff_nonvoid_block;
+ D.diag_AlwaysFallThrough_HasNoReturn =
+ diag::err_noreturn_block_has_return_expr;
+ D.diag_AlwaysFallThrough_ReturnsNonVoid =
+ diag::err_falloff_nonvoid_block;
+ D.diag_NeverFallThroughOrReturn =
+ diag::warn_suggest_noreturn_block;
+ D.funMode = Block;
+ return D;
+ }
+
+ static CheckFallThroughDiagnostics MakeForLambda() {
+ CheckFallThroughDiagnostics D;
+ D.diag_MaybeFallThrough_HasNoReturn =
+ diag::err_noreturn_lambda_has_return_expr;
+ D.diag_MaybeFallThrough_ReturnsNonVoid =
+ diag::warn_maybe_falloff_nonvoid_lambda;
+ D.diag_AlwaysFallThrough_HasNoReturn =
+ diag::err_noreturn_lambda_has_return_expr;
+ D.diag_AlwaysFallThrough_ReturnsNonVoid =
+ diag::warn_falloff_nonvoid_lambda;
+ D.diag_NeverFallThroughOrReturn = 0;
+ D.funMode = Lambda;
+ return D;
+ }
+
+ bool checkDiagnostics(DiagnosticsEngine &D, bool ReturnsVoid,
+ bool HasNoReturn) const {
+ if (funMode == Function) {
+ return (ReturnsVoid ||
+ D.getDiagnosticLevel(diag::warn_maybe_falloff_nonvoid_function,
+ FuncLoc) == DiagnosticsEngine::Ignored)
+ && (!HasNoReturn ||
+ D.getDiagnosticLevel(diag::warn_noreturn_function_has_return_expr,
+ FuncLoc) == DiagnosticsEngine::Ignored)
+ && (!ReturnsVoid ||
+ D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
+ == DiagnosticsEngine::Ignored);
+ }
+
+ // For blocks / lambdas.
+ return ReturnsVoid && !HasNoReturn
+ && ((funMode == Lambda) ||
+ D.getDiagnosticLevel(diag::warn_suggest_noreturn_block, FuncLoc)
+ == DiagnosticsEngine::Ignored);
+ }
+};
+
+}
+
+/// CheckFallThroughForFunctionDef - Check that we don't fall off the end of a
+/// function that should return a value. Check that we don't fall off the end
+/// of a noreturn function. We assume that functions and blocks not marked
+/// noreturn will return.
+static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
+ const BlockExpr *blkExpr,
+ const CheckFallThroughDiagnostics& CD,
+ AnalysisDeclContext &AC) {
+
+ bool ReturnsVoid = false;
+ bool HasNoReturn = false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ ReturnsVoid = FD->getResultType()->isVoidType();
+ HasNoReturn = FD->hasAttr<NoReturnAttr>() ||
+ FD->getType()->getAs<FunctionType>()->getNoReturnAttr();
+ }
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ ReturnsVoid = MD->getResultType()->isVoidType();
+ HasNoReturn = MD->hasAttr<NoReturnAttr>();
+ }
+ else if (isa<BlockDecl>(D)) {
+ QualType BlockTy = blkExpr->getType();
+ if (const FunctionType *FT =
+ BlockTy->getPointeeType()->getAs<FunctionType>()) {
+ if (FT->getResultType()->isVoidType())
+ ReturnsVoid = true;
+ if (FT->getNoReturnAttr())
+ HasNoReturn = true;
+ }
+ }
+
+ DiagnosticsEngine &Diags = S.getDiagnostics();
+
+ // Short circuit for compilation speed.
+ if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
+ return;
+
+ // FIXME: Function try block
+ if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
+ switch (CheckFallThrough(AC)) {
+ case UnknownFallThrough:
+ break;
+
+ case MaybeFallThrough:
+ if (HasNoReturn)
+ S.Diag(Compound->getRBracLoc(),
+ CD.diag_MaybeFallThrough_HasNoReturn);
+ else if (!ReturnsVoid)
+ S.Diag(Compound->getRBracLoc(),
+ CD.diag_MaybeFallThrough_ReturnsNonVoid);
+ break;
+ case AlwaysFallThrough:
+ if (HasNoReturn)
+ S.Diag(Compound->getRBracLoc(),
+ CD.diag_AlwaysFallThrough_HasNoReturn);
+ else if (!ReturnsVoid)
+ S.Diag(Compound->getRBracLoc(),
+ CD.diag_AlwaysFallThrough_ReturnsNonVoid);
+ break;
+ case NeverFallThroughOrReturn:
+ if (ReturnsVoid && !HasNoReturn && CD.diag_NeverFallThroughOrReturn) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
+ << 0 << FD;
+ } else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn)
+ << 1 << MD;
+ } else {
+ S.Diag(Compound->getLBracLoc(), CD.diag_NeverFallThroughOrReturn);
+ }
+ }
+ break;
+ case NeverFallThrough:
+ break;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// -Wuninitialized
+//===----------------------------------------------------------------------===//
+
+namespace {
+/// ContainsReference - A visitor class to search for references to
+/// a particular declaration (the needle) within any evaluated component of an
+/// expression (recursively).
+class ContainsReference : public EvaluatedExprVisitor<ContainsReference> {
+ bool FoundReference;
+ const DeclRefExpr *Needle;
+
+public:
+ ContainsReference(ASTContext &Context, const DeclRefExpr *Needle)
+ : EvaluatedExprVisitor<ContainsReference>(Context),
+ FoundReference(false), Needle(Needle) {}
+
+ void VisitExpr(Expr *E) {
+ // Stop evaluating if we already have a reference.
+ if (FoundReference)
+ return;
+
+ EvaluatedExprVisitor<ContainsReference>::VisitExpr(E);
+ }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E == Needle)
+ FoundReference = true;
+ else
+ EvaluatedExprVisitor<ContainsReference>::VisitDeclRefExpr(E);
+ }
+
+ bool doesContainReference() const { return FoundReference; }
+};
+}
+
+static bool SuggestInitializationFixit(Sema &S, const VarDecl *VD) {
+ QualType VariableTy = VD->getType().getCanonicalType();
+ if (VariableTy->isBlockPointerType() &&
+ !VD->hasAttr<BlocksAttr>()) {
+ S.Diag(VD->getLocation(), diag::note_block_var_fixit_add_initialization) << VD->getDeclName()
+ << FixItHint::CreateInsertion(VD->getLocation(), "__block ");
+ return true;
+ }
+
+ // Don't issue a fixit if there is already an initializer.
+ if (VD->getInit())
+ return false;
+
+ // Suggest possible initialization (if any).
+ const char *Init = S.getFixItZeroInitializerForType(VariableTy);
+ if (!Init)
+ return false;
+ SourceLocation Loc = S.PP.getLocForEndOfToken(VD->getLocEnd());
+
+ S.Diag(Loc, diag::note_var_fixit_add_initialization) << VD->getDeclName()
+ << FixItHint::CreateInsertion(Loc, Init);
+ return true;
+}
+
+/// DiagnoseUninitializedUse -- Helper function for diagnosing uses of an
+/// uninitialized variable. This manages the different forms of diagnostic
+/// emitted for particular types of uses. Returns true if the use was diagnosed
+/// as a warning. If a pariticular use is one we omit warnings for, returns
+/// false.
+static bool DiagnoseUninitializedUse(Sema &S, const VarDecl *VD,
+ const Expr *E, bool isAlwaysUninit,
+ bool alwaysReportSelfInit = false) {
+ bool isSelfInit = false;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (isAlwaysUninit) {
+ // Inspect the initializer of the variable declaration which is
+ // being referenced prior to its initialization. We emit
+ // specialized diagnostics for self-initialization, and we
+ // specifically avoid warning about self references which take the
+ // form of:
+ //
+ // int x = x;
+ //
+ // This is used to indicate to GCC that 'x' is intentionally left
+ // uninitialized. Proven code paths which access 'x' in
+ // an uninitialized state after this will still warn.
+ //
+ // TODO: Should we suppress maybe-uninitialized warnings for
+ // variables initialized in this way?
+ if (const Expr *Initializer = VD->getInit()) {
+ if (!alwaysReportSelfInit && DRE == Initializer->IgnoreParenImpCasts())
+ return false;
+
+ ContainsReference CR(S.Context, DRE);
+ CR.Visit(const_cast<Expr*>(Initializer));
+ isSelfInit = CR.doesContainReference();
+ }
+ if (isSelfInit) {
+ S.Diag(DRE->getLocStart(),
+ diag::warn_uninit_self_reference_in_init)
+ << VD->getDeclName() << VD->getLocation() << DRE->getSourceRange();
+ } else {
+ S.Diag(DRE->getLocStart(), diag::warn_uninit_var)
+ << VD->getDeclName() << DRE->getSourceRange();
+ }
+ } else {
+ S.Diag(DRE->getLocStart(), diag::warn_maybe_uninit_var)
+ << VD->getDeclName() << DRE->getSourceRange();
+ }
+ } else {
+ const BlockExpr *BE = cast<BlockExpr>(E);
+ if (VD->getType()->isBlockPointerType() &&
+ !VD->hasAttr<BlocksAttr>())
+ S.Diag(BE->getLocStart(), diag::warn_uninit_byref_blockvar_captured_by_block)
+ << VD->getDeclName();
+ else
+ S.Diag(BE->getLocStart(),
+ isAlwaysUninit ? diag::warn_uninit_var_captured_by_block
+ : diag::warn_maybe_uninit_var_captured_by_block)
+ << VD->getDeclName();
+ }
+
+ // Report where the variable was declared when the use wasn't within
+ // the initializer of that declaration & we didn't already suggest
+ // an initialization fixit.
+ if (!isSelfInit && !SuggestInitializationFixit(S, VD))
+ S.Diag(VD->getLocStart(), diag::note_uninit_var_def)
+ << VD->getDeclName();
+
+ return true;
+}
+
+typedef std::pair<const Expr*, bool> UninitUse;
+
+namespace {
+struct SLocSort {
+ bool operator()(const UninitUse &a, const UninitUse &b) {
+ SourceLocation aLoc = a.first->getLocStart();
+ SourceLocation bLoc = b.first->getLocStart();
+ return aLoc.getRawEncoding() < bLoc.getRawEncoding();
+ }
+};
+
+class UninitValsDiagReporter : public UninitVariablesHandler {
+ Sema &S;
+ typedef SmallVector<UninitUse, 2> UsesVec;
+ typedef llvm::DenseMap<const VarDecl *, std::pair<UsesVec*, bool> > UsesMap;
+ UsesMap *uses;
+
+public:
+ UninitValsDiagReporter(Sema &S) : S(S), uses(0) {}
+ ~UninitValsDiagReporter() {
+ flushDiagnostics();
+ }
+
+ std::pair<UsesVec*, bool> &getUses(const VarDecl *vd) {
+ if (!uses)
+ uses = new UsesMap();
+
+ UsesMap::mapped_type &V = (*uses)[vd];
+ UsesVec *&vec = V.first;
+ if (!vec)
+ vec = new UsesVec();
+
+ return V;
+ }
+
+ void handleUseOfUninitVariable(const Expr *ex, const VarDecl *vd,
+ bool isAlwaysUninit) {
+ getUses(vd).first->push_back(std::make_pair(ex, isAlwaysUninit));
+ }
+
+ void handleSelfInit(const VarDecl *vd) {
+ getUses(vd).second = true;
+ }
+
+ void flushDiagnostics() {
+ if (!uses)
+ return;
+
+ for (UsesMap::iterator i = uses->begin(), e = uses->end(); i != e; ++i) {
+ const VarDecl *vd = i->first;
+ const UsesMap::mapped_type &V = i->second;
+
+ UsesVec *vec = V.first;
+ bool hasSelfInit = V.second;
+
+ // Specially handle the case where we have uses of an uninitialized
+ // variable, but the root cause is an idiomatic self-init. We want
+ // to report the diagnostic at the self-init since that is the root cause.
+ if (!vec->empty() && hasSelfInit && hasAlwaysUninitializedUse(vec))
+ DiagnoseUninitializedUse(S, vd, vd->getInit()->IgnoreParenCasts(),
+ /* isAlwaysUninit */ true,
+ /* alwaysReportSelfInit */ true);
+ else {
+ // Sort the uses by their SourceLocations. While not strictly
+ // guaranteed to produce them in line/column order, this will provide
+ // a stable ordering.
+ std::sort(vec->begin(), vec->end(), SLocSort());
+
+ for (UsesVec::iterator vi = vec->begin(), ve = vec->end(); vi != ve;
+ ++vi) {
+ if (DiagnoseUninitializedUse(S, vd, vi->first,
+ /*isAlwaysUninit=*/vi->second))
+ // Skip further diagnostics for this variable. We try to warn only
+ // on the first point at which a variable is used uninitialized.
+ break;
+ }
+ }
+
+ // Release the uses vector.
+ delete vec;
+ }
+ delete uses;
+ }
+
+private:
+ static bool hasAlwaysUninitializedUse(const UsesVec* vec) {
+ for (UsesVec::const_iterator i = vec->begin(), e = vec->end(); i != e; ++i) {
+ if (i->second) {
+ return true;
+ }
+ }
+ return false;
+}
+};
+}
+
+
+//===----------------------------------------------------------------------===//
+// -Wthread-safety
+//===----------------------------------------------------------------------===//
+namespace clang {
+namespace thread_safety {
+typedef llvm::SmallVector<PartialDiagnosticAt, 1> OptionalNotes;
+typedef std::pair<PartialDiagnosticAt, OptionalNotes> DelayedDiag;
+typedef std::list<DelayedDiag> DiagList;
+
+struct SortDiagBySourceLocation {
+ SourceManager &SM;
+ SortDiagBySourceLocation(SourceManager &SM) : SM(SM) {}
+
+ bool operator()(const DelayedDiag &left, const DelayedDiag &right) {
+ // Although this call will be slow, this is only called when outputting
+ // multiple warnings.
+ return SM.isBeforeInTranslationUnit(left.first.first, right.first.first);
+ }
+};
+
+namespace {
+class ThreadSafetyReporter : public clang::thread_safety::ThreadSafetyHandler {
+ Sema &S;
+ DiagList Warnings;
+ SourceLocation FunLocation, FunEndLocation;
+
+ // Helper functions
+ void warnLockMismatch(unsigned DiagID, Name LockName, SourceLocation Loc) {
+ // Gracefully handle rare cases when the analysis can't get a more
+ // precise source location.
+ if (!Loc.isValid())
+ Loc = FunLocation;
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID) << LockName);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ }
+
+ public:
+ ThreadSafetyReporter(Sema &S, SourceLocation FL, SourceLocation FEL)
+ : S(S), FunLocation(FL), FunEndLocation(FEL) {}
+
+ /// \brief Emit all buffered diagnostics in order of sourcelocation.
+ /// We need to output diagnostics produced while iterating through
+ /// the lockset in deterministic order, so this function orders diagnostics
+ /// and outputs them.
+ void emitDiagnostics() {
+ Warnings.sort(SortDiagBySourceLocation(S.getSourceManager()));
+ for (DiagList::iterator I = Warnings.begin(), E = Warnings.end();
+ I != E; ++I) {
+ S.Diag(I->first.first, I->first.second);
+ const OptionalNotes &Notes = I->second;
+ for (unsigned NoteI = 0, NoteN = Notes.size(); NoteI != NoteN; ++NoteI)
+ S.Diag(Notes[NoteI].first, Notes[NoteI].second);
+ }
+ }
+
+ void handleInvalidLockExp(SourceLocation Loc) {
+ PartialDiagnosticAt Warning(Loc,
+ S.PDiag(diag::warn_cannot_resolve_lock) << Loc);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ }
+ void handleUnmatchedUnlock(Name LockName, SourceLocation Loc) {
+ warnLockMismatch(diag::warn_unlock_but_no_lock, LockName, Loc);
+ }
+
+ void handleDoubleLock(Name LockName, SourceLocation Loc) {
+ warnLockMismatch(diag::warn_double_lock, LockName, Loc);
+ }
+
+ void handleMutexHeldEndOfScope(Name LockName, SourceLocation LocLocked,
+ SourceLocation LocEndOfScope,
+ LockErrorKind LEK){
+ unsigned DiagID = 0;
+ switch (LEK) {
+ case LEK_LockedSomePredecessors:
+ DiagID = diag::warn_lock_some_predecessors;
+ break;
+ case LEK_LockedSomeLoopIterations:
+ DiagID = diag::warn_expecting_lock_held_on_loop;
+ break;
+ case LEK_LockedAtEndOfFunction:
+ DiagID = diag::warn_no_unlock;
+ break;
+ }
+ if (LocEndOfScope.isInvalid())
+ LocEndOfScope = FunEndLocation;
+
+ PartialDiagnosticAt Warning(LocEndOfScope, S.PDiag(DiagID) << LockName);
+ PartialDiagnosticAt Note(LocLocked, S.PDiag(diag::note_locked_here));
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
+ }
+
+
+ void handleExclusiveAndShared(Name LockName, SourceLocation Loc1,
+ SourceLocation Loc2) {
+ PartialDiagnosticAt Warning(
+ Loc1, S.PDiag(diag::warn_lock_exclusive_and_shared) << LockName);
+ PartialDiagnosticAt Note(
+ Loc2, S.PDiag(diag::note_lock_exclusive_and_shared) << LockName);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes(1, Note)));
+ }
+
+ void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
+ AccessKind AK, SourceLocation Loc) {
+ assert((POK == POK_VarAccess || POK == POK_VarDereference)
+ && "Only works for variables");
+ unsigned DiagID = POK == POK_VarAccess?
+ diag::warn_variable_requires_any_lock:
+ diag::warn_var_deref_requires_any_lock;
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
+ << D->getName() << getLockKindFromAccessKind(AK));
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ }
+
+ void handleMutexNotHeld(const NamedDecl *D, ProtectedOperationKind POK,
+ Name LockName, LockKind LK, SourceLocation Loc) {
+ unsigned DiagID = 0;
+ switch (POK) {
+ case POK_VarAccess:
+ DiagID = diag::warn_variable_requires_lock;
+ break;
+ case POK_VarDereference:
+ DiagID = diag::warn_var_deref_requires_lock;
+ break;
+ case POK_FunctionCall:
+ DiagID = diag::warn_fun_requires_lock;
+ break;
+ }
+ PartialDiagnosticAt Warning(Loc, S.PDiag(DiagID)
+ << D->getName() << LockName << LK);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ }
+
+ void handleFunExcludesLock(Name FunName, Name LockName, SourceLocation Loc) {
+ PartialDiagnosticAt Warning(Loc,
+ S.PDiag(diag::warn_fun_excludes_mutex) << FunName << LockName);
+ Warnings.push_back(DelayedDiag(Warning, OptionalNotes()));
+ }
+};
+}
+}
+}
+
+//===----------------------------------------------------------------------===//
+// AnalysisBasedWarnings - Worker object used by Sema to execute analysis-based
+// warnings on a function, method, or block.
+//===----------------------------------------------------------------------===//
+
+clang::sema::AnalysisBasedWarnings::Policy::Policy() {
+ enableCheckFallThrough = 1;
+ enableCheckUnreachable = 0;
+ enableThreadSafetyAnalysis = 0;
+}
+
+clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s)
+ : S(s),
+ NumFunctionsAnalyzed(0),
+ NumFunctionsWithBadCFGs(0),
+ NumCFGBlocks(0),
+ MaxCFGBlocksPerFunction(0),
+ NumUninitAnalysisFunctions(0),
+ NumUninitAnalysisVariables(0),
+ MaxUninitAnalysisVariablesPerFunction(0),
+ NumUninitAnalysisBlockVisits(0),
+ MaxUninitAnalysisBlockVisitsPerFunction(0) {
+ DiagnosticsEngine &D = S.getDiagnostics();
+ DefaultPolicy.enableCheckUnreachable = (unsigned)
+ (D.getDiagnosticLevel(diag::warn_unreachable, SourceLocation()) !=
+ DiagnosticsEngine::Ignored);
+ DefaultPolicy.enableThreadSafetyAnalysis = (unsigned)
+ (D.getDiagnosticLevel(diag::warn_double_lock, SourceLocation()) !=
+ DiagnosticsEngine::Ignored);
+
+}
+
+static void flushDiagnostics(Sema &S, sema::FunctionScopeInfo *fscope) {
+ for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
+ i = fscope->PossiblyUnreachableDiags.begin(),
+ e = fscope->PossiblyUnreachableDiags.end();
+ i != e; ++i) {
+ const sema::PossiblyUnreachableDiag &D = *i;
+ S.Diag(D.Loc, D.PD);
+ }
+}
+
+void clang::sema::
+AnalysisBasedWarnings::IssueWarnings(sema::AnalysisBasedWarnings::Policy P,
+ sema::FunctionScopeInfo *fscope,
+ const Decl *D, const BlockExpr *blkExpr) {
+
+ // We avoid doing analysis-based warnings when there are errors for
+ // two reasons:
+ // (1) The CFGs often can't be constructed (if the body is invalid), so
+ // don't bother trying.
+ // (2) The code already has problems; running the analysis just takes more
+ // time.
+ DiagnosticsEngine &Diags = S.getDiagnostics();
+
+ // Do not do any analysis for declarations in system headers if we are
+ // going to just ignore them.
+ if (Diags.getSuppressSystemWarnings() &&
+ S.SourceMgr.isInSystemHeader(D->getLocation()))
+ return;
+
+ // For code in dependent contexts, we'll do this at instantiation time.
+ if (cast<DeclContext>(D)->isDependentContext())
+ return;
+
+ if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred()) {
+ // Flush out any possibly unreachable diagnostics.
+ flushDiagnostics(S, fscope);
+ return;
+ }
+
+ const Stmt *Body = D->getBody();
+ assert(Body);
+
+ AnalysisDeclContext AC(/* AnalysisDeclContextManager */ 0, D, 0);
+
+ // Don't generate EH edges for CallExprs as we'd like to avoid the n^2
+ // explosion for destrutors that can result and the compile time hit.
+ AC.getCFGBuildOptions().PruneTriviallyFalseEdges = true;
+ AC.getCFGBuildOptions().AddEHEdges = false;
+ AC.getCFGBuildOptions().AddInitializers = true;
+ AC.getCFGBuildOptions().AddImplicitDtors = true;
+
+ // Force that certain expressions appear as CFGElements in the CFG. This
+ // is used to speed up various analyses.
+ // FIXME: This isn't the right factoring. This is here for initial
+ // prototyping, but we need a way for analyses to say what expressions they
+ // expect to always be CFGElements and then fill in the BuildOptions
+ // appropriately. This is essentially a layering violation.
+ if (P.enableCheckUnreachable || P.enableThreadSafetyAnalysis) {
+ // Unreachable code analysis and thread safety require a linearized CFG.
+ AC.getCFGBuildOptions().setAllAlwaysAdd();
+ }
+ else {
+ AC.getCFGBuildOptions()
+ .setAlwaysAdd(Stmt::BinaryOperatorClass)
+ .setAlwaysAdd(Stmt::BlockExprClass)
+ .setAlwaysAdd(Stmt::CStyleCastExprClass)
+ .setAlwaysAdd(Stmt::DeclRefExprClass)
+ .setAlwaysAdd(Stmt::ImplicitCastExprClass)
+ .setAlwaysAdd(Stmt::UnaryOperatorClass);
+ }
+
+ // Construct the analysis context with the specified CFG build options.
+
+ // Emit delayed diagnostics.
+ if (!fscope->PossiblyUnreachableDiags.empty()) {
+ bool analyzed = false;
+
+ // Register the expressions with the CFGBuilder.
+ for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
+ i = fscope->PossiblyUnreachableDiags.begin(),
+ e = fscope->PossiblyUnreachableDiags.end();
+ i != e; ++i) {
+ if (const Stmt *stmt = i->stmt)
+ AC.registerForcedBlockExpression(stmt);
+ }
+
+ if (AC.getCFG()) {
+ analyzed = true;
+ for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
+ i = fscope->PossiblyUnreachableDiags.begin(),
+ e = fscope->PossiblyUnreachableDiags.end();
+ i != e; ++i)
+ {
+ const sema::PossiblyUnreachableDiag &D = *i;
+ bool processed = false;
+ if (const Stmt *stmt = i->stmt) {
+ const CFGBlock *block = AC.getBlockForRegisteredExpression(stmt);
+ CFGReverseBlockReachabilityAnalysis *cra =
+ AC.getCFGReachablityAnalysis();
+ // FIXME: We should be able to assert that block is non-null, but
+ // the CFG analysis can skip potentially-evaluated expressions in
+ // edge cases; see test/Sema/vla-2.c.
+ if (block && cra) {
+ // Can this block be reached from the entrance?
+ if (cra->isReachable(&AC.getCFG()->getEntry(), block))
+ S.Diag(D.Loc, D.PD);
+ processed = true;
+ }
+ }
+ if (!processed) {
+ // Emit the warning anyway if we cannot map to a basic block.
+ S.Diag(D.Loc, D.PD);
+ }
+ }
+ }
+
+ if (!analyzed)
+ flushDiagnostics(S, fscope);
+ }
+
+
+ // Warning: check missing 'return'
+ if (P.enableCheckFallThrough) {
+ const CheckFallThroughDiagnostics &CD =
+ (isa<BlockDecl>(D) ? CheckFallThroughDiagnostics::MakeForBlock()
+ : (isa<CXXMethodDecl>(D) &&
+ cast<CXXMethodDecl>(D)->getOverloadedOperator() == OO_Call &&
+ cast<CXXMethodDecl>(D)->getParent()->isLambda())
+ ? CheckFallThroughDiagnostics::MakeForLambda()
+ : CheckFallThroughDiagnostics::MakeForFunction(D));
+ CheckFallThroughForBody(S, D, Body, blkExpr, CD, AC);
+ }
+
+ // Warning: check for unreachable code
+ if (P.enableCheckUnreachable) {
+ // Only check for unreachable code on non-template instantiations.
+ // Different template instantiations can effectively change the control-flow
+ // and it is very difficult to prove that a snippet of code in a template
+ // is unreachable for all instantiations.
+ bool isTemplateInstantiation = false;
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
+ isTemplateInstantiation = Function->isTemplateInstantiation();
+ if (!isTemplateInstantiation)
+ CheckUnreachable(S, AC);
+ }
+
+ // Check for thread safety violations
+ if (P.enableThreadSafetyAnalysis) {
+ SourceLocation FL = AC.getDecl()->getLocation();
+ SourceLocation FEL = AC.getDecl()->getLocEnd();
+ thread_safety::ThreadSafetyReporter Reporter(S, FL, FEL);
+ thread_safety::runThreadSafetyAnalysis(AC, Reporter);
+ Reporter.emitDiagnostics();
+ }
+
+ if (Diags.getDiagnosticLevel(diag::warn_uninit_var, D->getLocStart())
+ != DiagnosticsEngine::Ignored ||
+ Diags.getDiagnosticLevel(diag::warn_maybe_uninit_var, D->getLocStart())
+ != DiagnosticsEngine::Ignored) {
+ if (CFG *cfg = AC.getCFG()) {
+ UninitValsDiagReporter reporter(S);
+ UninitVariablesAnalysisStats stats;
+ std::memset(&stats, 0, sizeof(UninitVariablesAnalysisStats));
+ runUninitializedVariablesAnalysis(*cast<DeclContext>(D), *cfg, AC,
+ reporter, stats);
+
+ if (S.CollectStats && stats.NumVariablesAnalyzed > 0) {
+ ++NumUninitAnalysisFunctions;
+ NumUninitAnalysisVariables += stats.NumVariablesAnalyzed;
+ NumUninitAnalysisBlockVisits += stats.NumBlockVisits;
+ MaxUninitAnalysisVariablesPerFunction =
+ std::max(MaxUninitAnalysisVariablesPerFunction,
+ stats.NumVariablesAnalyzed);
+ MaxUninitAnalysisBlockVisitsPerFunction =
+ std::max(MaxUninitAnalysisBlockVisitsPerFunction,
+ stats.NumBlockVisits);
+ }
+ }
+ }
+
+ // Collect statistics about the CFG if it was built.
+ if (S.CollectStats && AC.isCFGBuilt()) {
+ ++NumFunctionsAnalyzed;
+ if (CFG *cfg = AC.getCFG()) {
+ // If we successfully built a CFG for this context, record some more
+ // detail information about it.
+ NumCFGBlocks += cfg->getNumBlockIDs();
+ MaxCFGBlocksPerFunction = std::max(MaxCFGBlocksPerFunction,
+ cfg->getNumBlockIDs());
+ } else {
+ ++NumFunctionsWithBadCFGs;
+ }
+ }
+}
+
+void clang::sema::AnalysisBasedWarnings::PrintStats() const {
+ llvm::errs() << "\n*** Analysis Based Warnings Stats:\n";
+
+ unsigned NumCFGsBuilt = NumFunctionsAnalyzed - NumFunctionsWithBadCFGs;
+ unsigned AvgCFGBlocksPerFunction =
+ !NumCFGsBuilt ? 0 : NumCFGBlocks/NumCFGsBuilt;
+ llvm::errs() << NumFunctionsAnalyzed << " functions analyzed ("
+ << NumFunctionsWithBadCFGs << " w/o CFGs).\n"
+ << " " << NumCFGBlocks << " CFG blocks built.\n"
+ << " " << AvgCFGBlocksPerFunction
+ << " average CFG blocks per function.\n"
+ << " " << MaxCFGBlocksPerFunction
+ << " max CFG blocks per function.\n";
+
+ unsigned AvgUninitVariablesPerFunction = !NumUninitAnalysisFunctions ? 0
+ : NumUninitAnalysisVariables/NumUninitAnalysisFunctions;
+ unsigned AvgUninitBlockVisitsPerFunction = !NumUninitAnalysisFunctions ? 0
+ : NumUninitAnalysisBlockVisits/NumUninitAnalysisFunctions;
+ llvm::errs() << NumUninitAnalysisFunctions
+ << " functions analyzed for uninitialiazed variables\n"
+ << " " << NumUninitAnalysisVariables << " variables analyzed.\n"
+ << " " << AvgUninitVariablesPerFunction
+ << " average variables per function.\n"
+ << " " << MaxUninitAnalysisVariablesPerFunction
+ << " max variables per function.\n"
+ << " " << NumUninitAnalysisBlockVisits << " block visits.\n"
+ << " " << AvgUninitBlockVisitsPerFunction
+ << " average block visits per function.\n"
+ << " " << MaxUninitAnalysisBlockVisitsPerFunction
+ << " max block visits per function.\n";
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
new file mode 100644
index 0000000..f142ab4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/AttributeList.cpp
@@ -0,0 +1,126 @@
+//===--- AttributeList.cpp --------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the AttributeList class implementation
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/AttributeList.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/StringSwitch.h"
+using namespace clang;
+
+size_t AttributeList::allocated_size() const {
+ if (IsAvailability) return AttributeFactory::AvailabilityAllocSize;
+ return (sizeof(AttributeList) + NumArgs * sizeof(Expr*));
+}
+
+AttributeFactory::AttributeFactory() {
+ // Go ahead and configure all the inline capacity. This is just a memset.
+ FreeLists.resize(InlineFreeListsCapacity);
+}
+AttributeFactory::~AttributeFactory() {}
+
+static size_t getFreeListIndexForSize(size_t size) {
+ assert(size >= sizeof(AttributeList));
+ assert((size % sizeof(void*)) == 0);
+ return ((size - sizeof(AttributeList)) / sizeof(void*));
+}
+
+void *AttributeFactory::allocate(size_t size) {
+ // Check for a previously reclaimed attribute.
+ size_t index = getFreeListIndexForSize(size);
+ if (index < FreeLists.size()) {
+ if (AttributeList *attr = FreeLists[index]) {
+ FreeLists[index] = attr->NextInPool;
+ return attr;
+ }
+ }
+
+ // Otherwise, allocate something new.
+ return Alloc.Allocate(size, llvm::AlignOf<AttributeFactory>::Alignment);
+}
+
+void AttributeFactory::reclaimPool(AttributeList *cur) {
+ assert(cur && "reclaiming empty pool!");
+ do {
+ // Read this here, because we're going to overwrite NextInPool
+ // when we toss 'cur' into the appropriate queue.
+ AttributeList *next = cur->NextInPool;
+
+ size_t size = cur->allocated_size();
+ size_t freeListIndex = getFreeListIndexForSize(size);
+
+ // Expand FreeLists to the appropriate size, if required.
+ if (freeListIndex >= FreeLists.size())
+ FreeLists.resize(freeListIndex+1);
+
+ // Add 'cur' to the appropriate free-list.
+ cur->NextInPool = FreeLists[freeListIndex];
+ FreeLists[freeListIndex] = cur;
+
+ cur = next;
+ } while (cur);
+}
+
+void AttributePool::takePool(AttributeList *pool) {
+ assert(pool);
+
+ // Fast path: this pool is empty.
+ if (!Head) {
+ Head = pool;
+ return;
+ }
+
+ // Reverse the pool onto the current head. This optimizes for the
+ // pattern of pulling a lot of pools into a single pool.
+ do {
+ AttributeList *next = pool->NextInPool;
+ pool->NextInPool = Head;
+ Head = pool;
+ pool = next;
+ } while (pool);
+}
+
+AttributeList *
+AttributePool::createIntegerAttribute(ASTContext &C, IdentifierInfo *Name,
+ SourceLocation TokLoc, int Arg) {
+ Expr *IArg = IntegerLiteral::Create(C, llvm::APInt(32, (uint64_t) Arg),
+ C.IntTy, TokLoc);
+ return create(Name, TokLoc, 0, TokLoc, 0, TokLoc, &IArg, 1, 0);
+}
+
+AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
+ StringRef AttrName = Name->getName();
+
+ // Normalize the attribute name, __foo__ becomes foo.
+ if (AttrName.startswith("__") && AttrName.endswith("__") &&
+ AttrName.size() >= 4)
+ AttrName = AttrName.substr(2, AttrName.size() - 4);
+
+ return llvm::StringSwitch<AttributeList::Kind>(AttrName)
+ #include "clang/Sema/AttrParsedAttrKinds.inc"
+ .Case("address_space", AT_address_space)
+ .Case("align", AT_aligned) // FIXME - should it be "aligned"?
+ .Case("base_check", AT_base_check)
+ .Case("bounded", IgnoredAttribute) // OpenBSD
+ .Case("__const", AT_const) // some GCC headers do contain this spelling
+ .Case("cf_returns_autoreleased", AT_cf_returns_autoreleased)
+ .Case("mode", AT_mode)
+ .Case("vec_type_hint", IgnoredAttribute)
+ .Case("ext_vector_type", AT_ext_vector_type)
+ .Case("neon_vector_type", AT_neon_vector_type)
+ .Case("neon_polyvector_type", AT_neon_polyvector_type)
+ .Case("opencl_image_access", AT_opencl_image_access)
+ .Case("objc_gc", AT_objc_gc)
+ .Case("objc_ownership", AT_objc_ownership)
+ .Case("vector_size", AT_vector_size)
+ .Default(UnknownAttribute);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
new file mode 100644
index 0000000..ce9bbb9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -0,0 +1,641 @@
+//===--- CodeCompleteConsumer.cpp - Code Completion Interface ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the CodeCompleteConsumer class.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/Sema.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang-c/Index.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+#include <algorithm>
+#include <cstring>
+#include <functional>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Code completion context implementation
+//===----------------------------------------------------------------------===//
+
+bool CodeCompletionContext::wantConstructorResults() const {
+ switch (Kind) {
+ case CCC_Recovery:
+ case CCC_Statement:
+ case CCC_Expression:
+ case CCC_ObjCMessageReceiver:
+ case CCC_ParenthesizedExpression:
+ return true;
+
+ case CCC_TopLevel:
+ case CCC_ObjCInterface:
+ case CCC_ObjCImplementation:
+ case CCC_ObjCIvarList:
+ case CCC_ClassStructUnion:
+ case CCC_DotMemberAccess:
+ case CCC_ArrowMemberAccess:
+ case CCC_ObjCPropertyAccess:
+ case CCC_EnumTag:
+ case CCC_UnionTag:
+ case CCC_ClassOrStructTag:
+ case CCC_ObjCProtocolName:
+ case CCC_Namespace:
+ case CCC_Type:
+ case CCC_Name:
+ case CCC_PotentiallyQualifiedName:
+ case CCC_MacroName:
+ case CCC_MacroNameUse:
+ case CCC_PreprocessorExpression:
+ case CCC_PreprocessorDirective:
+ case CCC_NaturalLanguage:
+ case CCC_SelectorName:
+ case CCC_TypeQualifiers:
+ case CCC_Other:
+ case CCC_OtherWithMacros:
+ case CCC_ObjCInstanceMessage:
+ case CCC_ObjCClassMessage:
+ case CCC_ObjCInterfaceName:
+ case CCC_ObjCCategoryName:
+ return false;
+ }
+
+ llvm_unreachable("Invalid CodeCompletionContext::Kind!");
+}
+
+//===----------------------------------------------------------------------===//
+// Code completion string implementation
+//===----------------------------------------------------------------------===//
+CodeCompletionString::Chunk::Chunk(ChunkKind Kind, const char *Text)
+ : Kind(Kind), Text("")
+{
+ switch (Kind) {
+ case CK_TypedText:
+ case CK_Text:
+ case CK_Placeholder:
+ case CK_Informative:
+ case CK_ResultType:
+ case CK_CurrentParameter:
+ this->Text = Text;
+ break;
+
+ case CK_Optional:
+ llvm_unreachable("Optional strings cannot be created from text");
+
+ case CK_LeftParen:
+ this->Text = "(";
+ break;
+
+ case CK_RightParen:
+ this->Text = ")";
+ break;
+
+ case CK_LeftBracket:
+ this->Text = "[";
+ break;
+
+ case CK_RightBracket:
+ this->Text = "]";
+ break;
+
+ case CK_LeftBrace:
+ this->Text = "{";
+ break;
+
+ case CK_RightBrace:
+ this->Text = "}";
+ break;
+
+ case CK_LeftAngle:
+ this->Text = "<";
+ break;
+
+ case CK_RightAngle:
+ this->Text = ">";
+ break;
+
+ case CK_Comma:
+ this->Text = ", ";
+ break;
+
+ case CK_Colon:
+ this->Text = ":";
+ break;
+
+ case CK_SemiColon:
+ this->Text = ";";
+ break;
+
+ case CK_Equal:
+ this->Text = " = ";
+ break;
+
+ case CK_HorizontalSpace:
+ this->Text = " ";
+ break;
+
+ case CK_VerticalSpace:
+ this->Text = "\n";
+ break;
+ }
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateText(const char *Text) {
+ return Chunk(CK_Text, Text);
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateOptional(CodeCompletionString *Optional) {
+ Chunk Result;
+ Result.Kind = CK_Optional;
+ Result.Optional = Optional;
+ return Result;
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreatePlaceholder(const char *Placeholder) {
+ return Chunk(CK_Placeholder, Placeholder);
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateInformative(const char *Informative) {
+ return Chunk(CK_Informative, Informative);
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateResultType(const char *ResultType) {
+ return Chunk(CK_ResultType, ResultType);
+}
+
+CodeCompletionString::Chunk
+CodeCompletionString::Chunk::CreateCurrentParameter(
+ const char *CurrentParameter) {
+ return Chunk(CK_CurrentParameter, CurrentParameter);
+}
+
+CodeCompletionString::CodeCompletionString(const Chunk *Chunks,
+ unsigned NumChunks,
+ unsigned Priority,
+ CXAvailabilityKind Availability,
+ const char **Annotations,
+ unsigned NumAnnotations,
+ CXCursorKind ParentKind,
+ StringRef ParentName)
+ : NumChunks(NumChunks), NumAnnotations(NumAnnotations),
+ Priority(Priority), Availability(Availability), ParentKind(ParentKind),
+ ParentName(ParentName)
+{
+ assert(NumChunks <= 0xffff);
+ assert(NumAnnotations <= 0xffff);
+
+ Chunk *StoredChunks = reinterpret_cast<Chunk *>(this + 1);
+ for (unsigned I = 0; I != NumChunks; ++I)
+ StoredChunks[I] = Chunks[I];
+
+ const char **StoredAnnotations = reinterpret_cast<const char **>(StoredChunks + NumChunks);
+ for (unsigned I = 0; I != NumAnnotations; ++I)
+ StoredAnnotations[I] = Annotations[I];
+}
+
+unsigned CodeCompletionString::getAnnotationCount() const {
+ return NumAnnotations;
+}
+
+const char *CodeCompletionString::getAnnotation(unsigned AnnotationNr) const {
+ if (AnnotationNr < NumAnnotations)
+ return reinterpret_cast<const char * const*>(end())[AnnotationNr];
+ else
+ return 0;
+}
+
+
+std::string CodeCompletionString::getAsString() const {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+
+ for (iterator C = begin(), CEnd = end(); C != CEnd; ++C) {
+ switch (C->Kind) {
+ case CK_Optional: OS << "{#" << C->Optional->getAsString() << "#}"; break;
+ case CK_Placeholder: OS << "<#" << C->Text << "#>"; break;
+
+ case CK_Informative:
+ case CK_ResultType:
+ OS << "[#" << C->Text << "#]";
+ break;
+
+ case CK_CurrentParameter: OS << "<#" << C->Text << "#>"; break;
+ default: OS << C->Text; break;
+ }
+ }
+ return OS.str();
+}
+
+const char *CodeCompletionString::getTypedText() const {
+ for (iterator C = begin(), CEnd = end(); C != CEnd; ++C)
+ if (C->Kind == CK_TypedText)
+ return C->Text;
+
+ return 0;
+}
+
+const char *CodeCompletionAllocator::CopyString(StringRef String) {
+ char *Mem = (char *)Allocate(String.size() + 1, 1);
+ std::copy(String.begin(), String.end(), Mem);
+ Mem[String.size()] = 0;
+ return Mem;
+}
+
+const char *CodeCompletionAllocator::CopyString(Twine String) {
+ // FIXME: It would be more efficient to teach Twine to tell us its size and
+ // then add a routine there to fill in an allocated char* with the contents
+ // of the string.
+ SmallString<128> Data;
+ return CopyString(String.toStringRef(Data));
+}
+
+StringRef CodeCompletionTUInfo::getParentName(DeclContext *DC) {
+ NamedDecl *ND = dyn_cast<NamedDecl>(DC);
+ if (!ND)
+ return StringRef();
+
+ // Check whether we've already cached the parent name.
+ StringRef &CachedParentName = ParentNames[DC];
+ if (!CachedParentName.empty())
+ return CachedParentName;
+
+ // If we already processed this DeclContext and assigned empty to it, the
+ // data pointer will be non-null.
+ if (CachedParentName.data() != 0)
+ return StringRef();
+
+ // Find the interesting names.
+ llvm::SmallVector<DeclContext *, 2> Contexts;
+ while (DC && !DC->isFunctionOrMethod()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(DC)) {
+ if (ND->getIdentifier())
+ Contexts.push_back(DC);
+ }
+
+ DC = DC->getParent();
+ }
+
+ {
+ llvm::SmallString<128> S;
+ llvm::raw_svector_ostream OS(S);
+ bool First = true;
+ for (unsigned I = Contexts.size(); I != 0; --I) {
+ if (First)
+ First = false;
+ else {
+ OS << "::";
+ }
+
+ DeclContext *CurDC = Contexts[I-1];
+ if (ObjCCategoryImplDecl *CatImpl = dyn_cast<ObjCCategoryImplDecl>(CurDC))
+ CurDC = CatImpl->getCategoryDecl();
+
+ if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(CurDC)) {
+ ObjCInterfaceDecl *Interface = Cat->getClassInterface();
+ if (!Interface) {
+ // Assign an empty StringRef but with non-null data to distinguish
+ // between empty because we didn't process the DeclContext yet.
+ CachedParentName = StringRef((const char *)~0U, 0);
+ return StringRef();
+ }
+
+ OS << Interface->getName() << '(' << Cat->getName() << ')';
+ } else {
+ OS << cast<NamedDecl>(CurDC)->getName();
+ }
+ }
+
+ CachedParentName = AllocatorRef->CopyString(OS.str());
+ }
+
+ return CachedParentName;
+}
+
+CodeCompletionString *CodeCompletionBuilder::TakeString() {
+ void *Mem = getAllocator().Allocate(
+ sizeof(CodeCompletionString) + sizeof(Chunk) * Chunks.size()
+ + sizeof(const char *) * Annotations.size(),
+ llvm::alignOf<CodeCompletionString>());
+ CodeCompletionString *Result
+ = new (Mem) CodeCompletionString(Chunks.data(), Chunks.size(),
+ Priority, Availability,
+ Annotations.data(), Annotations.size(),
+ ParentKind, ParentName);
+ Chunks.clear();
+ return Result;
+}
+
+void CodeCompletionBuilder::AddTypedTextChunk(const char *Text) {
+ Chunks.push_back(Chunk(CodeCompletionString::CK_TypedText, Text));
+}
+
+void CodeCompletionBuilder::AddTextChunk(const char *Text) {
+ Chunks.push_back(Chunk::CreateText(Text));
+}
+
+void CodeCompletionBuilder::AddOptionalChunk(CodeCompletionString *Optional) {
+ Chunks.push_back(Chunk::CreateOptional(Optional));
+}
+
+void CodeCompletionBuilder::AddPlaceholderChunk(const char *Placeholder) {
+ Chunks.push_back(Chunk::CreatePlaceholder(Placeholder));
+}
+
+void CodeCompletionBuilder::AddInformativeChunk(const char *Text) {
+ Chunks.push_back(Chunk::CreateInformative(Text));
+}
+
+void CodeCompletionBuilder::AddResultTypeChunk(const char *ResultType) {
+ Chunks.push_back(Chunk::CreateResultType(ResultType));
+}
+
+void
+CodeCompletionBuilder::AddCurrentParameterChunk(const char *CurrentParameter) {
+ Chunks.push_back(Chunk::CreateCurrentParameter(CurrentParameter));
+}
+
+void CodeCompletionBuilder::AddChunk(CodeCompletionString::ChunkKind CK,
+ const char *Text) {
+ Chunks.push_back(Chunk(CK, Text));
+}
+
+void CodeCompletionBuilder::addParentContext(DeclContext *DC) {
+ if (DC->isTranslationUnit()) {
+ ParentKind = CXCursor_TranslationUnit;
+ return;
+ }
+
+ if (DC->isFunctionOrMethod())
+ return;
+
+ NamedDecl *ND = dyn_cast<NamedDecl>(DC);
+ if (!ND)
+ return;
+
+ ParentKind = getCursorKindForDecl(ND);
+ ParentName = getCodeCompletionTUInfo().getParentName(DC);
+}
+
+unsigned CodeCompletionResult::getPriorityFromDecl(NamedDecl *ND) {
+ if (!ND)
+ return CCP_Unlikely;
+
+ // Context-based decisions.
+ DeclContext *DC = ND->getDeclContext()->getRedeclContext();
+ if (DC->isFunctionOrMethod() || isa<BlockDecl>(DC)) {
+ // _cmd is relatively rare
+ if (ImplicitParamDecl *ImplicitParam = dyn_cast<ImplicitParamDecl>(ND))
+ if (ImplicitParam->getIdentifier() &&
+ ImplicitParam->getIdentifier()->isStr("_cmd"))
+ return CCP_ObjC_cmd;
+
+ return CCP_LocalDeclaration;
+ }
+ if (DC->isRecord() || isa<ObjCContainerDecl>(DC))
+ return CCP_MemberDeclaration;
+
+ // Content-based decisions.
+ if (isa<EnumConstantDecl>(ND))
+ return CCP_Constant;
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND))
+ return CCP_Type;
+
+ return CCP_Declaration;
+}
+
+//===----------------------------------------------------------------------===//
+// Code completion overload candidate implementation
+//===----------------------------------------------------------------------===//
+FunctionDecl *
+CodeCompleteConsumer::OverloadCandidate::getFunction() const {
+ if (getKind() == CK_Function)
+ return Function;
+ else if (getKind() == CK_FunctionTemplate)
+ return FunctionTemplate->getTemplatedDecl();
+ else
+ return 0;
+}
+
+const FunctionType *
+CodeCompleteConsumer::OverloadCandidate::getFunctionType() const {
+ switch (Kind) {
+ case CK_Function:
+ return Function->getType()->getAs<FunctionType>();
+
+ case CK_FunctionTemplate:
+ return FunctionTemplate->getTemplatedDecl()->getType()
+ ->getAs<FunctionType>();
+
+ case CK_FunctionType:
+ return Type;
+ }
+
+ llvm_unreachable("Invalid CandidateKind!");
+}
+
+//===----------------------------------------------------------------------===//
+// Code completion consumer implementation
+//===----------------------------------------------------------------------===//
+
+CodeCompleteConsumer::~CodeCompleteConsumer() { }
+
+void
+PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(Sema &SemaRef,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults) {
+ std::stable_sort(Results, Results + NumResults);
+
+ // Print the results.
+ for (unsigned I = 0; I != NumResults; ++I) {
+ OS << "COMPLETION: ";
+ switch (Results[I].Kind) {
+ case CodeCompletionResult::RK_Declaration:
+ OS << *Results[I].Declaration;
+ if (Results[I].Hidden)
+ OS << " (Hidden)";
+ if (CodeCompletionString *CCS
+ = Results[I].CreateCodeCompletionString(SemaRef, getAllocator(),
+ CCTUInfo)) {
+ OS << " : " << CCS->getAsString();
+ }
+
+ OS << '\n';
+ break;
+
+ case CodeCompletionResult::RK_Keyword:
+ OS << Results[I].Keyword << '\n';
+ break;
+
+ case CodeCompletionResult::RK_Macro: {
+ OS << Results[I].Macro->getName();
+ if (CodeCompletionString *CCS
+ = Results[I].CreateCodeCompletionString(SemaRef, getAllocator(),
+ CCTUInfo)) {
+ OS << " : " << CCS->getAsString();
+ }
+ OS << '\n';
+ break;
+ }
+
+ case CodeCompletionResult::RK_Pattern: {
+ OS << "Pattern : "
+ << Results[I].Pattern->getAsString() << '\n';
+ break;
+ }
+ }
+ }
+}
+
+void
+PrintingCodeCompleteConsumer::ProcessOverloadCandidates(Sema &SemaRef,
+ unsigned CurrentArg,
+ OverloadCandidate *Candidates,
+ unsigned NumCandidates) {
+ for (unsigned I = 0; I != NumCandidates; ++I) {
+ if (CodeCompletionString *CCS
+ = Candidates[I].CreateSignatureString(CurrentArg, SemaRef,
+ getAllocator(), CCTUInfo)) {
+ OS << "OVERLOAD: " << CCS->getAsString() << "\n";
+ }
+ }
+}
+
+/// \brief Retrieve the effective availability of the given declaration.
+static AvailabilityResult getDeclAvailability(Decl *D) {
+ AvailabilityResult AR = D->getAvailability();
+ if (isa<EnumConstantDecl>(D))
+ AR = std::max(AR, cast<Decl>(D->getDeclContext())->getAvailability());
+ return AR;
+}
+
+void CodeCompletionResult::computeCursorKindAndAvailability(bool Accessible) {
+ switch (Kind) {
+ case RK_Pattern:
+ if (!Declaration) {
+ // Do nothing: Patterns can come with cursor kinds!
+ break;
+ }
+ // Fall through
+
+ case RK_Declaration: {
+ // Set the availability based on attributes.
+ switch (getDeclAvailability(Declaration)) {
+ case AR_Available:
+ case AR_NotYetIntroduced:
+ Availability = CXAvailability_Available;
+ break;
+
+ case AR_Deprecated:
+ Availability = CXAvailability_Deprecated;
+ break;
+
+ case AR_Unavailable:
+ Availability = CXAvailability_NotAvailable;
+ break;
+ }
+
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Declaration))
+ if (Function->isDeleted())
+ Availability = CXAvailability_NotAvailable;
+
+ CursorKind = getCursorKindForDecl(Declaration);
+ if (CursorKind == CXCursor_UnexposedDecl) {
+ // FIXME: Forward declarations of Objective-C classes and protocols
+ // are not directly exposed, but we want code completion to treat them
+ // like a definition.
+ if (isa<ObjCInterfaceDecl>(Declaration))
+ CursorKind = CXCursor_ObjCInterfaceDecl;
+ else if (isa<ObjCProtocolDecl>(Declaration))
+ CursorKind = CXCursor_ObjCProtocolDecl;
+ else
+ CursorKind = CXCursor_NotImplemented;
+ }
+ break;
+ }
+
+ case RK_Macro:
+ Availability = CXAvailability_Available;
+ CursorKind = CXCursor_MacroDefinition;
+ break;
+
+ case RK_Keyword:
+ Availability = CXAvailability_Available;
+ CursorKind = CXCursor_NotImplemented;
+ break;
+ }
+
+ if (!Accessible)
+ Availability = CXAvailability_NotAccessible;
+}
+
+/// \brief Retrieve the name that should be used to order a result.
+///
+/// If the name needs to be constructed as a string, that string will be
+/// saved into Saved and the returned StringRef will refer to it.
+static StringRef getOrderedName(const CodeCompletionResult &R,
+ std::string &Saved) {
+ switch (R.Kind) {
+ case CodeCompletionResult::RK_Keyword:
+ return R.Keyword;
+
+ case CodeCompletionResult::RK_Pattern:
+ return R.Pattern->getTypedText();
+
+ case CodeCompletionResult::RK_Macro:
+ return R.Macro->getName();
+
+ case CodeCompletionResult::RK_Declaration:
+ // Handle declarations below.
+ break;
+ }
+
+ DeclarationName Name = R.Declaration->getDeclName();
+
+ // If the name is a simple identifier (by far the common case), or a
+ // zero-argument selector, just return a reference to that identifier.
+ if (IdentifierInfo *Id = Name.getAsIdentifierInfo())
+ return Id->getName();
+ if (Name.isObjCZeroArgSelector())
+ if (IdentifierInfo *Id
+ = Name.getObjCSelector().getIdentifierInfoForSlot(0))
+ return Id->getName();
+
+ Saved = Name.getAsString();
+ return Saved;
+}
+
+bool clang::operator<(const CodeCompletionResult &X,
+ const CodeCompletionResult &Y) {
+ std::string XSaved, YSaved;
+ StringRef XStr = getOrderedName(X, XSaved);
+ StringRef YStr = getOrderedName(Y, YSaved);
+ int cmp = XStr.compare_lower(YStr);
+ if (cmp)
+ return cmp < 0;
+
+ // If case-insensitive comparison fails, try case-sensitive comparison.
+ cmp = XStr.compare(YStr);
+ if (cmp)
+ return cmp < 0;
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
new file mode 100644
index 0000000..b531acc
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/DeclSpec.cpp
@@ -0,0 +1,986 @@
+//===--- SemaDeclSpec.cpp - Declaration Specifier Semantic Analysis -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for declaration specifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Parse/ParseDiagnostic.h" // FIXME: remove this back-dependency!
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/LocInfoType.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Sema/Sema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cstring>
+using namespace clang;
+
+
+static DiagnosticBuilder Diag(DiagnosticsEngine &D, SourceLocation Loc,
+ unsigned DiagID) {
+ return D.Report(Loc, DiagID);
+}
+
+
+void UnqualifiedId::setTemplateId(TemplateIdAnnotation *TemplateId) {
+ assert(TemplateId && "NULL template-id annotation?");
+ Kind = IK_TemplateId;
+ this->TemplateId = TemplateId;
+ StartLocation = TemplateId->TemplateNameLoc;
+ EndLocation = TemplateId->RAngleLoc;
+}
+
+void UnqualifiedId::setConstructorTemplateId(TemplateIdAnnotation *TemplateId) {
+ assert(TemplateId && "NULL template-id annotation?");
+ Kind = IK_ConstructorTemplateId;
+ this->TemplateId = TemplateId;
+ StartLocation = TemplateId->TemplateNameLoc;
+ EndLocation = TemplateId->RAngleLoc;
+}
+
+void CXXScopeSpec::Extend(ASTContext &Context, SourceLocation TemplateKWLoc,
+ TypeLoc TL, SourceLocation ColonColonLoc) {
+ Builder.Extend(Context, TemplateKWLoc, TL, ColonColonLoc);
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(TL.getBeginLoc());
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::Extend(ASTContext &Context, IdentifierInfo *Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.Extend(Context, Identifier, IdentifierLoc, ColonColonLoc);
+
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(IdentifierLoc);
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::Extend(ASTContext &Context, NamespaceDecl *Namespace,
+ SourceLocation NamespaceLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.Extend(Context, Namespace, NamespaceLoc, ColonColonLoc);
+
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(NamespaceLoc);
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::Extend(ASTContext &Context, NamespaceAliasDecl *Alias,
+ SourceLocation AliasLoc,
+ SourceLocation ColonColonLoc) {
+ Builder.Extend(Context, Alias, AliasLoc, ColonColonLoc);
+
+ if (Range.getBegin().isInvalid())
+ Range.setBegin(AliasLoc);
+ Range.setEnd(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::MakeGlobal(ASTContext &Context,
+ SourceLocation ColonColonLoc) {
+ Builder.MakeGlobal(Context, ColonColonLoc);
+
+ Range = SourceRange(ColonColonLoc);
+
+ assert(Range == Builder.getSourceRange() &&
+ "NestedNameSpecifierLoc range computation incorrect");
+}
+
+void CXXScopeSpec::MakeTrivial(ASTContext &Context,
+ NestedNameSpecifier *Qualifier, SourceRange R) {
+ Builder.MakeTrivial(Context, Qualifier, R);
+ Range = R;
+}
+
+void CXXScopeSpec::Adopt(NestedNameSpecifierLoc Other) {
+ if (!Other) {
+ Range = SourceRange();
+ Builder.Clear();
+ return;
+ }
+
+ Range = Other.getSourceRange();
+ Builder.Adopt(Other);
+}
+
+SourceLocation CXXScopeSpec::getLastQualifierNameLoc() const {
+ if (!Builder.getRepresentation())
+ return SourceLocation();
+ return Builder.getTemporary().getLocalBeginLoc();
+}
+
+NestedNameSpecifierLoc
+CXXScopeSpec::getWithLocInContext(ASTContext &Context) const {
+ if (!Builder.getRepresentation())
+ return NestedNameSpecifierLoc();
+
+ return Builder.getWithLocInContext(Context);
+}
+
+/// DeclaratorChunk::getFunction - Return a DeclaratorChunk for a function.
+/// "TheDeclarator" is the declarator that this will be added to.
+DeclaratorChunk DeclaratorChunk::getFunction(bool hasProto, bool isVariadic,
+ SourceLocation EllipsisLoc,
+ ParamInfo *ArgInfo,
+ unsigned NumArgs,
+ unsigned TypeQuals,
+ bool RefQualifierIsLvalueRef,
+ SourceLocation RefQualifierLoc,
+ SourceLocation ConstQualifierLoc,
+ SourceLocation
+ VolatileQualifierLoc,
+ SourceLocation MutableLoc,
+ ExceptionSpecificationType
+ ESpecType,
+ SourceLocation ESpecLoc,
+ ParsedType *Exceptions,
+ SourceRange *ExceptionRanges,
+ unsigned NumExceptions,
+ Expr *NoexceptExpr,
+ SourceLocation LocalRangeBegin,
+ SourceLocation LocalRangeEnd,
+ Declarator &TheDeclarator,
+ ParsedType TrailingReturnType) {
+ DeclaratorChunk I;
+ I.Kind = Function;
+ I.Loc = LocalRangeBegin;
+ I.EndLoc = LocalRangeEnd;
+ I.Fun.AttrList = 0;
+ I.Fun.hasPrototype = hasProto;
+ I.Fun.isVariadic = isVariadic;
+ I.Fun.EllipsisLoc = EllipsisLoc.getRawEncoding();
+ I.Fun.DeleteArgInfo = false;
+ I.Fun.TypeQuals = TypeQuals;
+ I.Fun.NumArgs = NumArgs;
+ I.Fun.ArgInfo = 0;
+ I.Fun.RefQualifierIsLValueRef = RefQualifierIsLvalueRef;
+ I.Fun.RefQualifierLoc = RefQualifierLoc.getRawEncoding();
+ I.Fun.ConstQualifierLoc = ConstQualifierLoc.getRawEncoding();
+ I.Fun.VolatileQualifierLoc = VolatileQualifierLoc.getRawEncoding();
+ I.Fun.MutableLoc = MutableLoc.getRawEncoding();
+ I.Fun.ExceptionSpecType = ESpecType;
+ I.Fun.ExceptionSpecLoc = ESpecLoc.getRawEncoding();
+ I.Fun.NumExceptions = 0;
+ I.Fun.Exceptions = 0;
+ I.Fun.NoexceptExpr = 0;
+ I.Fun.TrailingReturnType = TrailingReturnType.getAsOpaquePtr();
+
+ // new[] an argument array if needed.
+ if (NumArgs) {
+ // If the 'InlineParams' in Declarator is unused and big enough, put our
+ // parameter list there (in an effort to avoid new/delete traffic). If it
+ // is already used (consider a function returning a function pointer) or too
+ // small (function taking too many arguments), go to the heap.
+ if (!TheDeclarator.InlineParamsUsed &&
+ NumArgs <= llvm::array_lengthof(TheDeclarator.InlineParams)) {
+ I.Fun.ArgInfo = TheDeclarator.InlineParams;
+ I.Fun.DeleteArgInfo = false;
+ TheDeclarator.InlineParamsUsed = true;
+ } else {
+ I.Fun.ArgInfo = new DeclaratorChunk::ParamInfo[NumArgs];
+ I.Fun.DeleteArgInfo = true;
+ }
+ memcpy(I.Fun.ArgInfo, ArgInfo, sizeof(ArgInfo[0])*NumArgs);
+ }
+
+ // Check what exception specification information we should actually store.
+ switch (ESpecType) {
+ default: break; // By default, save nothing.
+ case EST_Dynamic:
+ // new[] an exception array if needed
+ if (NumExceptions) {
+ I.Fun.NumExceptions = NumExceptions;
+ I.Fun.Exceptions = new DeclaratorChunk::TypeAndRange[NumExceptions];
+ for (unsigned i = 0; i != NumExceptions; ++i) {
+ I.Fun.Exceptions[i].Ty = Exceptions[i];
+ I.Fun.Exceptions[i].Range = ExceptionRanges[i];
+ }
+ }
+ break;
+
+ case EST_ComputedNoexcept:
+ I.Fun.NoexceptExpr = NoexceptExpr;
+ break;
+ }
+ return I;
+}
+
+bool Declarator::isDeclarationOfFunction() const {
+ for (unsigned i = 0, i_end = DeclTypeInfo.size(); i < i_end; ++i) {
+ switch (DeclTypeInfo[i].Kind) {
+ case DeclaratorChunk::Function:
+ return true;
+ case DeclaratorChunk::Paren:
+ continue;
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::MemberPointer:
+ return false;
+ }
+ llvm_unreachable("Invalid type chunk");
+ }
+
+ switch (DS.getTypeSpecType()) {
+ case TST_atomic:
+ case TST_auto:
+ case TST_bool:
+ case TST_char:
+ case TST_char16:
+ case TST_char32:
+ case TST_class:
+ case TST_decimal128:
+ case TST_decimal32:
+ case TST_decimal64:
+ case TST_double:
+ case TST_enum:
+ case TST_error:
+ case TST_float:
+ case TST_half:
+ case TST_int:
+ case TST_int128:
+ case TST_struct:
+ case TST_union:
+ case TST_unknown_anytype:
+ case TST_unspecified:
+ case TST_void:
+ case TST_wchar:
+ return false;
+
+ case TST_decltype:
+ case TST_typeofExpr:
+ if (Expr *E = DS.getRepAsExpr())
+ return E->getType()->isFunctionType();
+ return false;
+
+ case TST_underlyingType:
+ case TST_typename:
+ case TST_typeofType: {
+ QualType QT = DS.getRepAsType().get();
+ if (QT.isNull())
+ return false;
+
+ if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT))
+ QT = LIT->getType();
+
+ if (QT.isNull())
+ return false;
+
+ return QT->isFunctionType();
+ }
+ }
+
+ llvm_unreachable("Invalid TypeSpecType!");
+}
+
+/// getParsedSpecifiers - Return a bitmask of which flavors of specifiers this
+/// declaration specifier includes.
+///
+unsigned DeclSpec::getParsedSpecifiers() const {
+ unsigned Res = 0;
+ if (StorageClassSpec != SCS_unspecified ||
+ SCS_thread_specified)
+ Res |= PQ_StorageClassSpecifier;
+
+ if (TypeQualifiers != TQ_unspecified)
+ Res |= PQ_TypeQualifier;
+
+ if (hasTypeSpecifier())
+ Res |= PQ_TypeSpecifier;
+
+ if (FS_inline_specified || FS_virtual_specified || FS_explicit_specified)
+ Res |= PQ_FunctionSpecifier;
+ return Res;
+}
+
+template <class T> static bool BadSpecifier(T TNew, T TPrev,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ PrevSpec = DeclSpec::getSpecifierName(TPrev);
+ DiagID = (TNew == TPrev ? diag::ext_duplicate_declspec
+ : diag::err_invalid_decl_spec_combination);
+ return true;
+}
+
+const char *DeclSpec::getSpecifierName(DeclSpec::SCS S) {
+ switch (S) {
+ case DeclSpec::SCS_unspecified: return "unspecified";
+ case DeclSpec::SCS_typedef: return "typedef";
+ case DeclSpec::SCS_extern: return "extern";
+ case DeclSpec::SCS_static: return "static";
+ case DeclSpec::SCS_auto: return "auto";
+ case DeclSpec::SCS_register: return "register";
+ case DeclSpec::SCS_private_extern: return "__private_extern__";
+ case DeclSpec::SCS_mutable: return "mutable";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(TSW W) {
+ switch (W) {
+ case TSW_unspecified: return "unspecified";
+ case TSW_short: return "short";
+ case TSW_long: return "long";
+ case TSW_longlong: return "long long";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(TSC C) {
+ switch (C) {
+ case TSC_unspecified: return "unspecified";
+ case TSC_imaginary: return "imaginary";
+ case TSC_complex: return "complex";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+
+const char *DeclSpec::getSpecifierName(TSS S) {
+ switch (S) {
+ case TSS_unspecified: return "unspecified";
+ case TSS_signed: return "signed";
+ case TSS_unsigned: return "unsigned";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(DeclSpec::TST T) {
+ switch (T) {
+ case DeclSpec::TST_unspecified: return "unspecified";
+ case DeclSpec::TST_void: return "void";
+ case DeclSpec::TST_char: return "char";
+ case DeclSpec::TST_wchar: return "wchar_t";
+ case DeclSpec::TST_char16: return "char16_t";
+ case DeclSpec::TST_char32: return "char32_t";
+ case DeclSpec::TST_int: return "int";
+ case DeclSpec::TST_int128: return "__int128";
+ case DeclSpec::TST_half: return "half";
+ case DeclSpec::TST_float: return "float";
+ case DeclSpec::TST_double: return "double";
+ case DeclSpec::TST_bool: return "_Bool";
+ case DeclSpec::TST_decimal32: return "_Decimal32";
+ case DeclSpec::TST_decimal64: return "_Decimal64";
+ case DeclSpec::TST_decimal128: return "_Decimal128";
+ case DeclSpec::TST_enum: return "enum";
+ case DeclSpec::TST_class: return "class";
+ case DeclSpec::TST_union: return "union";
+ case DeclSpec::TST_struct: return "struct";
+ case DeclSpec::TST_typename: return "type-name";
+ case DeclSpec::TST_typeofType:
+ case DeclSpec::TST_typeofExpr: return "typeof";
+ case DeclSpec::TST_auto: return "auto";
+ case DeclSpec::TST_decltype: return "(decltype)";
+ case DeclSpec::TST_underlyingType: return "__underlying_type";
+ case DeclSpec::TST_unknown_anytype: return "__unknown_anytype";
+ case DeclSpec::TST_atomic: return "_Atomic";
+ case DeclSpec::TST_error: return "(error)";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+const char *DeclSpec::getSpecifierName(TQ T) {
+ switch (T) {
+ case DeclSpec::TQ_unspecified: return "unspecified";
+ case DeclSpec::TQ_const: return "const";
+ case DeclSpec::TQ_restrict: return "restrict";
+ case DeclSpec::TQ_volatile: return "volatile";
+ }
+ llvm_unreachable("Unknown typespec!");
+}
+
+bool DeclSpec::SetStorageClassSpec(Sema &S, SCS SC, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ // OpenCL 1.1 6.8g: "The extern, static, auto and register storage-class
+ // specifiers are not supported."
+ // It seems sensible to prohibit private_extern too
+ // The cl_clang_storage_class_specifiers extension enables support for
+ // these storage-class specifiers.
+ if (S.getLangOpts().OpenCL &&
+ !S.getOpenCLOptions().cl_clang_storage_class_specifiers) {
+ switch (SC) {
+ case SCS_extern:
+ case SCS_private_extern:
+ case SCS_auto:
+ case SCS_register:
+ case SCS_static:
+ DiagID = diag::err_not_opencl_storage_class_specifier;
+ PrevSpec = getSpecifierName(SC);
+ return true;
+ default:
+ break;
+ }
+ }
+
+ if (StorageClassSpec != SCS_unspecified) {
+ // Maybe this is an attempt to use C++0x 'auto' outside of C++0x mode.
+ bool isInvalid = true;
+ if (TypeSpecType == TST_unspecified && S.getLangOpts().CPlusPlus) {
+ if (SC == SCS_auto)
+ return SetTypeSpecType(TST_auto, Loc, PrevSpec, DiagID);
+ if (StorageClassSpec == SCS_auto) {
+ isInvalid = SetTypeSpecType(TST_auto, StorageClassSpecLoc,
+ PrevSpec, DiagID);
+ assert(!isInvalid && "auto SCS -> TST recovery failed");
+ }
+ }
+
+ // Changing storage class is allowed only if the previous one
+ // was the 'extern' that is part of a linkage specification and
+ // the new storage class is 'typedef'.
+ if (isInvalid &&
+ !(SCS_extern_in_linkage_spec &&
+ StorageClassSpec == SCS_extern &&
+ SC == SCS_typedef))
+ return BadSpecifier(SC, (SCS)StorageClassSpec, PrevSpec, DiagID);
+ }
+ StorageClassSpec = SC;
+ StorageClassSpecLoc = Loc;
+ assert((unsigned)SC == StorageClassSpec && "SCS constants overflow bitfield");
+ return false;
+}
+
+bool DeclSpec::SetStorageClassSpecThread(SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (SCS_thread_specified) {
+ PrevSpec = "__thread";
+ DiagID = diag::ext_duplicate_declspec;
+ return true;
+ }
+ SCS_thread_specified = true;
+ SCS_threadLoc = Loc;
+ return false;
+}
+
+/// These methods set the specified attribute of the DeclSpec, but return true
+/// and ignore the request if invalid (e.g. "extern" then "auto" is
+/// specified).
+bool DeclSpec::SetTypeSpecWidth(TSW W, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ // Overwrite TSWLoc only if TypeSpecWidth was unspecified, so that
+ // for 'long long' we will keep the source location of the first 'long'.
+ if (TypeSpecWidth == TSW_unspecified)
+ TSWLoc = Loc;
+ // Allow turning long -> long long.
+ else if (W != TSW_longlong || TypeSpecWidth != TSW_long)
+ return BadSpecifier(W, (TSW)TypeSpecWidth, PrevSpec, DiagID);
+ TypeSpecWidth = W;
+ if (TypeAltiVecVector && !TypeAltiVecBool &&
+ ((TypeSpecWidth == TSW_long) || (TypeSpecWidth == TSW_longlong))) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+ DiagID = diag::warn_vector_long_decl_spec_combination;
+ return true;
+ }
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecComplex(TSC C, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (TypeSpecComplex != TSC_unspecified)
+ return BadSpecifier(C, (TSC)TypeSpecComplex, PrevSpec, DiagID);
+ TypeSpecComplex = C;
+ TSCLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecSign(TSS S, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (TypeSpecSign != TSS_unspecified)
+ return BadSpecifier(S, (TSS)TypeSpecSign, PrevSpec, DiagID);
+ TypeSpecSign = S;
+ TSSLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ ParsedType Rep) {
+ return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep);
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
+ SourceLocation TagNameLoc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ ParsedType Rep) {
+ assert(isTypeRep(T) && "T does not store a type");
+ assert(Rep && "no type provided!");
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+ TypeSpecType = T;
+ TypeRep = Rep;
+ TSTLoc = TagKwLoc;
+ TSTNameLoc = TagNameLoc;
+ TypeSpecOwned = false;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ Expr *Rep) {
+ assert(isExprRep(T) && "T does not store an expr");
+ assert(Rep && "no expression provided!");
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+ TypeSpecType = T;
+ ExprRep = Rep;
+ TSTLoc = Loc;
+ TSTNameLoc = Loc;
+ TypeSpecOwned = false;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ Decl *Rep, bool Owned) {
+ return SetTypeSpecType(T, Loc, Loc, PrevSpec, DiagID, Rep, Owned);
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation TagKwLoc,
+ SourceLocation TagNameLoc,
+ const char *&PrevSpec,
+ unsigned &DiagID,
+ Decl *Rep, bool Owned) {
+ assert(isDeclRep(T) && "T does not store a decl");
+ // Unlike the other cases, we don't assert that we actually get a decl.
+
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+ TypeSpecType = T;
+ DeclRep = Rep;
+ TSTLoc = TagKwLoc;
+ TSTNameLoc = TagNameLoc;
+ TypeSpecOwned = Owned;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecType(TST T, SourceLocation Loc,
+ const char *&PrevSpec,
+ unsigned &DiagID) {
+ assert(!isDeclRep(T) && !isTypeRep(T) && !isExprRep(T) &&
+ "rep required for these type-spec kinds!");
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+ DiagID = diag::err_invalid_decl_spec_combination;
+ return true;
+ }
+ TSTLoc = Loc;
+ TSTNameLoc = Loc;
+ if (TypeAltiVecVector && (T == TST_bool) && !TypeAltiVecBool) {
+ TypeAltiVecBool = true;
+ return false;
+ }
+ TypeSpecType = T;
+ TypeSpecOwned = false;
+ if (TypeAltiVecVector && !TypeAltiVecBool && (TypeSpecType == TST_double)) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+ DiagID = diag::err_invalid_vector_decl_spec;
+ return true;
+ }
+ return false;
+}
+
+bool DeclSpec::SetTypeAltiVecVector(bool isAltiVecVector, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID) {
+ if (TypeSpecType != TST_unspecified) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+ DiagID = diag::err_invalid_vector_decl_spec_combination;
+ return true;
+ }
+ TypeAltiVecVector = isAltiVecVector;
+ AltiVecLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeAltiVecPixel(bool isAltiVecPixel, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID) {
+ if (!TypeAltiVecVector || TypeAltiVecPixel ||
+ (TypeSpecType != TST_unspecified)) {
+ PrevSpec = DeclSpec::getSpecifierName((TST) TypeSpecType);
+ DiagID = diag::err_invalid_pixel_decl_spec_combination;
+ return true;
+ }
+ TypeAltiVecPixel = isAltiVecPixel;
+ TSTLoc = Loc;
+ TSTNameLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetTypeSpecError() {
+ TypeSpecType = TST_error;
+ TypeSpecOwned = false;
+ TSTLoc = SourceLocation();
+ TSTNameLoc = SourceLocation();
+ return false;
+}
+
+bool DeclSpec::SetTypeQual(TQ T, SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID, const LangOptions &Lang) {
+ // Duplicates turn into warnings pre-C99.
+ if ((TypeQualifiers & T) && !Lang.C99)
+ return BadSpecifier(T, T, PrevSpec, DiagID);
+ TypeQualifiers |= T;
+
+ switch (T) {
+ default: llvm_unreachable("Unknown type qualifier!");
+ case TQ_const: TQ_constLoc = Loc; break;
+ case TQ_restrict: TQ_restrictLoc = Loc; break;
+ case TQ_volatile: TQ_volatileLoc = Loc; break;
+ }
+ return false;
+}
+
+bool DeclSpec::SetFunctionSpecInline(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ // 'inline inline' is ok.
+ FS_inline_specified = true;
+ FS_inlineLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetFunctionSpecVirtual(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ // 'virtual virtual' is ok.
+ FS_virtual_specified = true;
+ FS_virtualLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetFunctionSpecExplicit(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ // 'explicit explicit' is ok.
+ FS_explicit_specified = true;
+ FS_explicitLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetFriendSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (Friend_specified) {
+ PrevSpec = "friend";
+ DiagID = diag::ext_duplicate_declspec;
+ return true;
+ }
+
+ Friend_specified = true;
+ FriendLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::setModulePrivateSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ if (isModulePrivateSpecified()) {
+ PrevSpec = "__module_private__";
+ DiagID = diag::ext_duplicate_declspec;
+ return true;
+ }
+
+ ModulePrivateLoc = Loc;
+ return false;
+}
+
+bool DeclSpec::SetConstexprSpec(SourceLocation Loc, const char *&PrevSpec,
+ unsigned &DiagID) {
+ // 'constexpr constexpr' is ok.
+ Constexpr_specified = true;
+ ConstexprLoc = Loc;
+ return false;
+}
+
+void DeclSpec::setProtocolQualifiers(Decl * const *Protos,
+ unsigned NP,
+ SourceLocation *ProtoLocs,
+ SourceLocation LAngleLoc) {
+ if (NP == 0) return;
+ ProtocolQualifiers = new Decl*[NP];
+ ProtocolLocs = new SourceLocation[NP];
+ memcpy((void*)ProtocolQualifiers, Protos, sizeof(Decl*)*NP);
+ memcpy(ProtocolLocs, ProtoLocs, sizeof(SourceLocation)*NP);
+ NumProtocolQualifiers = NP;
+ ProtocolLAngleLoc = LAngleLoc;
+}
+
+void DeclSpec::SaveWrittenBuiltinSpecs() {
+ writtenBS.Sign = getTypeSpecSign();
+ writtenBS.Width = getTypeSpecWidth();
+ writtenBS.Type = getTypeSpecType();
+ // Search the list of attributes for the presence of a mode attribute.
+ writtenBS.ModeAttr = false;
+ AttributeList* attrs = getAttributes().getList();
+ while (attrs) {
+ if (attrs->getKind() == AttributeList::AT_mode) {
+ writtenBS.ModeAttr = true;
+ break;
+ }
+ attrs = attrs->getNext();
+ }
+}
+
+void DeclSpec::SaveStorageSpecifierAsWritten() {
+ if (SCS_extern_in_linkage_spec && StorageClassSpec == SCS_extern)
+ // If 'extern' is part of a linkage specification,
+ // then it is not a storage class "as written".
+ StorageClassSpecAsWritten = SCS_unspecified;
+ else
+ StorageClassSpecAsWritten = StorageClassSpec;
+}
+
+/// Finish - This does final analysis of the declspec, rejecting things like
+/// "_Imaginary" (lacking an FP type). This returns a diagnostic to issue or
+/// diag::NUM_DIAGNOSTICS if there is no error. After calling this method,
+/// DeclSpec is guaranteed self-consistent, even if an error occurred.
+void DeclSpec::Finish(DiagnosticsEngine &D, Preprocessor &PP) {
+ // Before possibly changing their values, save specs as written.
+ SaveWrittenBuiltinSpecs();
+ SaveStorageSpecifierAsWritten();
+
+ // Check the type specifier components first.
+
+ // Validate and finalize AltiVec vector declspec.
+ if (TypeAltiVecVector) {
+ if (TypeAltiVecBool) {
+ // Sign specifiers are not allowed with vector bool. (PIM 2.1)
+ if (TypeSpecSign != TSS_unspecified) {
+ Diag(D, TSSLoc, diag::err_invalid_vector_bool_decl_spec)
+ << getSpecifierName((TSS)TypeSpecSign);
+ }
+
+ // Only char/int are valid with vector bool. (PIM 2.1)
+ if (((TypeSpecType != TST_unspecified) && (TypeSpecType != TST_char) &&
+ (TypeSpecType != TST_int)) || TypeAltiVecPixel) {
+ Diag(D, TSTLoc, diag::err_invalid_vector_bool_decl_spec)
+ << (TypeAltiVecPixel ? "__pixel" :
+ getSpecifierName((TST)TypeSpecType));
+ }
+
+ // Only 'short' is valid with vector bool. (PIM 2.1)
+ if ((TypeSpecWidth != TSW_unspecified) && (TypeSpecWidth != TSW_short))
+ Diag(D, TSWLoc, diag::err_invalid_vector_bool_decl_spec)
+ << getSpecifierName((TSW)TypeSpecWidth);
+
+ // Elements of vector bool are interpreted as unsigned. (PIM 2.1)
+ if ((TypeSpecType == TST_char) || (TypeSpecType == TST_int) ||
+ (TypeSpecWidth != TSW_unspecified))
+ TypeSpecSign = TSS_unsigned;
+ }
+
+ if (TypeAltiVecPixel) {
+ //TODO: perform validation
+ TypeSpecType = TST_int;
+ TypeSpecSign = TSS_unsigned;
+ TypeSpecWidth = TSW_short;
+ TypeSpecOwned = false;
+ }
+ }
+
+ // signed/unsigned are only valid with int/char/wchar_t.
+ if (TypeSpecSign != TSS_unspecified) {
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // unsigned -> unsigned int, signed -> signed int.
+ else if (TypeSpecType != TST_int && TypeSpecType != TST_int128 &&
+ TypeSpecType != TST_char && TypeSpecType != TST_wchar) {
+ Diag(D, TSSLoc, diag::err_invalid_sign_spec)
+ << getSpecifierName((TST)TypeSpecType);
+ // signed double -> double.
+ TypeSpecSign = TSS_unspecified;
+ }
+ }
+
+ // Validate the width of the type.
+ switch (TypeSpecWidth) {
+ case TSW_unspecified: break;
+ case TSW_short: // short int
+ case TSW_longlong: // long long int
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // short -> short int, long long -> long long int.
+ else if (TypeSpecType != TST_int) {
+ Diag(D, TSWLoc,
+ TypeSpecWidth == TSW_short ? diag::err_invalid_short_spec
+ : diag::err_invalid_longlong_spec)
+ << getSpecifierName((TST)TypeSpecType);
+ TypeSpecType = TST_int;
+ TypeSpecOwned = false;
+ }
+ break;
+ case TSW_long: // long double, long int
+ if (TypeSpecType == TST_unspecified)
+ TypeSpecType = TST_int; // long -> long int.
+ else if (TypeSpecType != TST_int && TypeSpecType != TST_double) {
+ Diag(D, TSWLoc, diag::err_invalid_long_spec)
+ << getSpecifierName((TST)TypeSpecType);
+ TypeSpecType = TST_int;
+ TypeSpecOwned = false;
+ }
+ break;
+ }
+
+ // TODO: if the implementation does not implement _Complex or _Imaginary,
+ // disallow their use. Need information about the backend.
+ if (TypeSpecComplex != TSC_unspecified) {
+ if (TypeSpecType == TST_unspecified) {
+ Diag(D, TSCLoc, diag::ext_plain_complex)
+ << FixItHint::CreateInsertion(
+ PP.getLocForEndOfToken(getTypeSpecComplexLoc()),
+ " double");
+ TypeSpecType = TST_double; // _Complex -> _Complex double.
+ } else if (TypeSpecType == TST_int || TypeSpecType == TST_char) {
+ // Note that this intentionally doesn't include _Complex _Bool.
+ if (!PP.getLangOpts().CPlusPlus)
+ Diag(D, TSTLoc, diag::ext_integer_complex);
+ } else if (TypeSpecType != TST_float && TypeSpecType != TST_double) {
+ Diag(D, TSCLoc, diag::err_invalid_complex_spec)
+ << getSpecifierName((TST)TypeSpecType);
+ TypeSpecComplex = TSC_unspecified;
+ }
+ }
+
+ // If no type specifier was provided and we're parsing a language where
+ // the type specifier is not optional, but we got 'auto' as a storage
+ // class specifier, then assume this is an attempt to use C++0x's 'auto'
+ // type specifier.
+ // FIXME: Does Microsoft really support implicit int in C++?
+ if (PP.getLangOpts().CPlusPlus && !PP.getLangOpts().MicrosoftExt &&
+ TypeSpecType == TST_unspecified && StorageClassSpec == SCS_auto) {
+ TypeSpecType = TST_auto;
+ StorageClassSpec = StorageClassSpecAsWritten = SCS_unspecified;
+ TSTLoc = TSTNameLoc = StorageClassSpecLoc;
+ StorageClassSpecLoc = SourceLocation();
+ }
+ // Diagnose if we've recovered from an ill-formed 'auto' storage class
+ // specifier in a pre-C++0x dialect of C++.
+ if (!PP.getLangOpts().CPlusPlus0x && TypeSpecType == TST_auto)
+ Diag(D, TSTLoc, diag::ext_auto_type_specifier);
+ if (PP.getLangOpts().CPlusPlus && !PP.getLangOpts().CPlusPlus0x &&
+ StorageClassSpec == SCS_auto)
+ Diag(D, StorageClassSpecLoc, diag::warn_auto_storage_class)
+ << FixItHint::CreateRemoval(StorageClassSpecLoc);
+ if (TypeSpecType == TST_char16 || TypeSpecType == TST_char32)
+ Diag(D, TSTLoc, diag::warn_cxx98_compat_unicode_type)
+ << (TypeSpecType == TST_char16 ? "char16_t" : "char32_t");
+ if (Constexpr_specified)
+ Diag(D, ConstexprLoc, diag::warn_cxx98_compat_constexpr);
+
+ // C++ [class.friend]p6:
+ // No storage-class-specifier shall appear in the decl-specifier-seq
+ // of a friend declaration.
+ if (isFriendSpecified() && getStorageClassSpec()) {
+ DeclSpec::SCS SC = getStorageClassSpec();
+ const char *SpecName = getSpecifierName(SC);
+
+ SourceLocation SCLoc = getStorageClassSpecLoc();
+ SourceLocation SCEndLoc = SCLoc.getLocWithOffset(strlen(SpecName));
+
+ Diag(D, SCLoc, diag::err_friend_storage_spec)
+ << SpecName
+ << FixItHint::CreateRemoval(SourceRange(SCLoc, SCEndLoc));
+
+ ClearStorageClassSpecs();
+ }
+
+ assert(!TypeSpecOwned || isDeclRep((TST) TypeSpecType));
+
+ // Okay, now we can infer the real type.
+
+ // TODO: return "auto function" and other bad things based on the real type.
+
+ // 'data definition has no type or storage class'?
+}
+
+bool DeclSpec::isMissingDeclaratorOk() {
+ TST tst = getTypeSpecType();
+ return isDeclRep(tst) && getRepAsDecl() != 0 &&
+ StorageClassSpec != DeclSpec::SCS_typedef;
+}
+
+void UnqualifiedId::clear() {
+ Kind = IK_Identifier;
+ Identifier = 0;
+ StartLocation = SourceLocation();
+ EndLocation = SourceLocation();
+}
+
+void UnqualifiedId::setOperatorFunctionId(SourceLocation OperatorLoc,
+ OverloadedOperatorKind Op,
+ SourceLocation SymbolLocations[3]) {
+ Kind = IK_OperatorFunctionId;
+ StartLocation = OperatorLoc;
+ EndLocation = OperatorLoc;
+ OperatorFunctionId.Operator = Op;
+ for (unsigned I = 0; I != 3; ++I) {
+ OperatorFunctionId.SymbolLocations[I] = SymbolLocations[I].getRawEncoding();
+
+ if (SymbolLocations[I].isValid())
+ EndLocation = SymbolLocations[I];
+ }
+}
+
+bool VirtSpecifiers::SetSpecifier(Specifier VS, SourceLocation Loc,
+ const char *&PrevSpec) {
+ LastLocation = Loc;
+
+ if (Specifiers & VS) {
+ PrevSpec = getSpecifierName(VS);
+ return true;
+ }
+
+ Specifiers |= VS;
+
+ switch (VS) {
+ default: llvm_unreachable("Unknown specifier!");
+ case VS_Override: VS_overrideLoc = Loc; break;
+ case VS_Final: VS_finalLoc = Loc; break;
+ }
+
+ return false;
+}
+
+const char *VirtSpecifiers::getSpecifierName(Specifier VS) {
+ switch (VS) {
+ default: llvm_unreachable("Unknown specifier");
+ case VS_Override: return "override";
+ case VS_Final: return "final";
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
new file mode 100644
index 0000000..876f9d7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/DelayedDiagnostic.cpp
@@ -0,0 +1,56 @@
+//===--- DelayedDiagnostic.cpp - Delayed declarator diagnostics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the DelayedDiagnostic class implementation, which
+// is used to record diagnostics that are being conditionally produced
+// during declarator parsing.
+//
+// This file also defines AccessedEntity.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/DelayedDiagnostic.h"
+#include <string.h>
+using namespace clang;
+using namespace sema;
+
+DelayedDiagnostic DelayedDiagnostic::makeDeprecation(SourceLocation Loc,
+ const NamedDecl *D,
+ const ObjCInterfaceDecl *UnknownObjCClass,
+ StringRef Msg) {
+ DelayedDiagnostic DD;
+ DD.Kind = Deprecation;
+ DD.Triggered = false;
+ DD.Loc = Loc;
+ DD.DeprecationData.Decl = D;
+ DD.DeprecationData.UnknownObjCClass = UnknownObjCClass;
+ char *MessageData = 0;
+ if (Msg.size()) {
+ MessageData = new char [Msg.size()];
+ memcpy(MessageData, Msg.data(), Msg.size());
+ }
+
+ DD.DeprecationData.Message = MessageData;
+ DD.DeprecationData.MessageLen = Msg.size();
+ return DD;
+}
+
+void DelayedDiagnostic::Destroy() {
+ switch (Kind) {
+ case Access:
+ getAccessData().~AccessedEntity();
+ break;
+
+ case Deprecation:
+ delete [] DeprecationData.Message;
+ break;
+
+ case ForbiddenType:
+ break;
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
new file mode 100644
index 0000000..4d62cab
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/IdentifierResolver.cpp
@@ -0,0 +1,444 @@
+//===- IdentifierResolver.cpp - Lexical Scope Name lookup -------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the IdentifierResolver class, which is used for lexical
+// scoped lookup, based on declaration names.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/IdentifierResolver.h"
+#include "clang/Sema/Scope.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Lex/ExternalPreprocessorSource.h"
+#include "clang/Lex/Preprocessor.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfoMap class
+//===----------------------------------------------------------------------===//
+
+/// IdDeclInfoMap - Associates IdDeclInfos with declaration names.
+/// Allocates 'pools' (vectors of IdDeclInfos) to avoid allocating each
+/// individual IdDeclInfo to heap.
+class IdentifierResolver::IdDeclInfoMap {
+ static const unsigned int POOL_SIZE = 512;
+
+ /// We use our own linked-list implementation because it is sadly
+ /// impossible to add something to a pre-C++0x STL container without
+ /// a completely unnecessary copy.
+ struct IdDeclInfoPool {
+ IdDeclInfoPool(IdDeclInfoPool *Next) : Next(Next) {}
+
+ IdDeclInfoPool *Next;
+ IdDeclInfo Pool[POOL_SIZE];
+ };
+
+ IdDeclInfoPool *CurPool;
+ unsigned int CurIndex;
+
+public:
+ IdDeclInfoMap() : CurPool(0), CurIndex(POOL_SIZE) {}
+
+ ~IdDeclInfoMap() {
+ IdDeclInfoPool *Cur = CurPool;
+ while (IdDeclInfoPool *P = Cur) {
+ Cur = Cur->Next;
+ delete P;
+ }
+ }
+
+ /// Returns the IdDeclInfo associated to the DeclarationName.
+ /// It creates a new IdDeclInfo if one was not created before for this id.
+ IdDeclInfo &operator[](DeclarationName Name);
+};
+
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfo Implementation
+//===----------------------------------------------------------------------===//
+
+/// RemoveDecl - Remove the decl from the scope chain.
+/// The decl must already be part of the decl chain.
+void IdentifierResolver::IdDeclInfo::RemoveDecl(NamedDecl *D) {
+ for (DeclsTy::iterator I = Decls.end(); I != Decls.begin(); --I) {
+ if (D == *(I-1)) {
+ Decls.erase(I-1);
+ return;
+ }
+ }
+
+ llvm_unreachable("Didn't find this decl on its identifier's chain!");
+}
+
+bool
+IdentifierResolver::IdDeclInfo::ReplaceDecl(NamedDecl *Old, NamedDecl *New) {
+ for (DeclsTy::iterator I = Decls.end(); I != Decls.begin(); --I) {
+ if (Old == *(I-1)) {
+ *(I - 1) = New;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+//===----------------------------------------------------------------------===//
+// IdentifierResolver Implementation
+//===----------------------------------------------------------------------===//
+
+IdentifierResolver::IdentifierResolver(Preprocessor &PP)
+ : LangOpt(PP.getLangOpts()), PP(PP),
+ IdDeclInfos(new IdDeclInfoMap) {
+}
+
+IdentifierResolver::~IdentifierResolver() {
+ delete IdDeclInfos;
+}
+
+/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
+/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
+/// true if 'D' belongs to the given declaration context.
+bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx,
+ ASTContext &Context, Scope *S,
+ bool ExplicitInstantiationOrSpecialization) const {
+ Ctx = Ctx->getRedeclContext();
+
+ if (Ctx->isFunctionOrMethod() || S->isFunctionPrototypeScope()) {
+ // Ignore the scopes associated within transparent declaration contexts.
+ while (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->isTransparentContext())
+ S = S->getParent();
+
+ if (S->isDeclScope(D))
+ return true;
+ if (LangOpt.CPlusPlus) {
+ // C++ 3.3.2p3:
+ // The name declared in a catch exception-declaration is local to the
+ // handler and shall not be redeclared in the outermost block of the
+ // handler.
+ // C++ 3.3.2p4:
+ // Names declared in the for-init-statement, and in the condition of if,
+ // while, for, and switch statements are local to the if, while, for, or
+ // switch statement (including the controlled statement), and shall not be
+ // redeclared in a subsequent condition of that statement nor in the
+ // outermost block (or, for the if statement, any of the outermost blocks)
+ // of the controlled statement.
+ //
+ assert(S->getParent() && "No TUScope?");
+ if (S->getParent()->getFlags() & Scope::ControlScope)
+ return S->getParent()->isDeclScope(D);
+ }
+ return false;
+ }
+
+ DeclContext *DCtx = D->getDeclContext()->getRedeclContext();
+ return ExplicitInstantiationOrSpecialization
+ ? Ctx->InEnclosingNamespaceSetOf(DCtx)
+ : Ctx->Equals(DCtx);
+}
+
+/// AddDecl - Link the decl to its shadowed decl chain.
+void IdentifierResolver::AddDecl(NamedDecl *D) {
+ DeclarationName Name = D->getDeclName();
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ updatingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr) {
+ Name.setFETokenInfo(D);
+ return;
+ }
+
+ IdDeclInfo *IDI;
+
+ if (isDeclPtr(Ptr)) {
+ Name.setFETokenInfo(NULL);
+ IDI = &(*IdDeclInfos)[Name];
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+ IDI->AddDecl(PrevD);
+ } else
+ IDI = toIdDeclInfo(Ptr);
+
+ IDI->AddDecl(D);
+}
+
+void IdentifierResolver::InsertDeclAfter(iterator Pos, NamedDecl *D) {
+ DeclarationName Name = D->getDeclName();
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ updatingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr) {
+ AddDecl(D);
+ return;
+ }
+
+ if (isDeclPtr(Ptr)) {
+ // We only have a single declaration: insert before or after it,
+ // as appropriate.
+ if (Pos == iterator()) {
+ // Add the new declaration before the existing declaration.
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+ RemoveDecl(PrevD);
+ AddDecl(D);
+ AddDecl(PrevD);
+ } else {
+ // Add new declaration after the existing declaration.
+ AddDecl(D);
+ }
+
+ return;
+ }
+
+ // General case: insert the declaration at the appropriate point in the
+ // list, which already has at least two elements.
+ IdDeclInfo *IDI = toIdDeclInfo(Ptr);
+ if (Pos.isIterator()) {
+ IDI->InsertDecl(Pos.getIterator() + 1, D);
+ } else
+ IDI->InsertDecl(IDI->decls_begin(), D);
+}
+
+/// RemoveDecl - Unlink the decl from its shadowed decl chain.
+/// The decl must already be part of the decl chain.
+void IdentifierResolver::RemoveDecl(NamedDecl *D) {
+ assert(D && "null param passed");
+ DeclarationName Name = D->getDeclName();
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ updatingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ assert(Ptr && "Didn't find this decl on its identifier's chain!");
+
+ if (isDeclPtr(Ptr)) {
+ assert(D == Ptr && "Didn't find this decl on its identifier's chain!");
+ Name.setFETokenInfo(NULL);
+ return;
+ }
+
+ return toIdDeclInfo(Ptr)->RemoveDecl(D);
+}
+
+bool IdentifierResolver::ReplaceDecl(NamedDecl *Old, NamedDecl *New) {
+ assert(Old->getDeclName() == New->getDeclName() &&
+ "Cannot replace a decl with another decl of a different name");
+
+ DeclarationName Name = Old->getDeclName();
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ updatingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr)
+ return false;
+
+ if (isDeclPtr(Ptr)) {
+ if (Ptr == Old) {
+ Name.setFETokenInfo(New);
+ return true;
+ }
+ return false;
+ }
+
+ return toIdDeclInfo(Ptr)->ReplaceDecl(Old, New);
+}
+
+/// begin - Returns an iterator for decls with name 'Name'.
+IdentifierResolver::iterator
+IdentifierResolver::begin(DeclarationName Name) {
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ readingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+ if (!Ptr) return end();
+
+ if (isDeclPtr(Ptr))
+ return iterator(static_cast<NamedDecl*>(Ptr));
+
+ IdDeclInfo *IDI = toIdDeclInfo(Ptr);
+
+ IdDeclInfo::DeclsTy::iterator I = IDI->decls_end();
+ if (I != IDI->decls_begin())
+ return iterator(I-1);
+ // No decls found.
+ return end();
+}
+
+namespace {
+ enum DeclMatchKind {
+ DMK_Different,
+ DMK_Replace,
+ DMK_Ignore
+ };
+}
+
+/// \brief Compare two declarations to see whether they are different or,
+/// if they are the same, whether the new declaration should replace the
+/// existing declaration.
+static DeclMatchKind compareDeclarations(NamedDecl *Existing, NamedDecl *New) {
+ // If the declarations are identical, ignore the new one.
+ if (Existing == New)
+ return DMK_Ignore;
+
+ // If the declarations have different kinds, they're obviously different.
+ if (Existing->getKind() != New->getKind())
+ return DMK_Different;
+
+ // If the declarations are redeclarations of each other, keep the newest one.
+ if (Existing->getCanonicalDecl() == New->getCanonicalDecl()) {
+ // If the existing declaration is somewhere in the previous declaration
+ // chain of the new declaration, then prefer the new declaration.
+ for (Decl::redecl_iterator RD = New->redecls_begin(),
+ RDEnd = New->redecls_end();
+ RD != RDEnd; ++RD) {
+ if (*RD == Existing)
+ return DMK_Replace;
+
+ if (RD->isCanonicalDecl())
+ break;
+ }
+
+ return DMK_Ignore;
+ }
+
+ return DMK_Different;
+}
+
+bool IdentifierResolver::tryAddTopLevelDecl(NamedDecl *D, DeclarationName Name){
+ if (IdentifierInfo *II = Name.getAsIdentifierInfo())
+ readingIdentifier(*II);
+
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (!Ptr) {
+ Name.setFETokenInfo(D);
+ return true;
+ }
+
+ IdDeclInfo *IDI;
+
+ if (isDeclPtr(Ptr)) {
+ NamedDecl *PrevD = static_cast<NamedDecl*>(Ptr);
+
+ switch (compareDeclarations(PrevD, D)) {
+ case DMK_Different:
+ break;
+
+ case DMK_Ignore:
+ return false;
+
+ case DMK_Replace:
+ Name.setFETokenInfo(D);
+ return true;
+ }
+
+ Name.setFETokenInfo(NULL);
+ IDI = &(*IdDeclInfos)[Name];
+
+ // If the existing declaration is not visible in translation unit scope,
+ // then add the new top-level declaration first.
+ if (!PrevD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ IDI->AddDecl(D);
+ IDI->AddDecl(PrevD);
+ } else {
+ IDI->AddDecl(PrevD);
+ IDI->AddDecl(D);
+ }
+ return true;
+ }
+
+ IDI = toIdDeclInfo(Ptr);
+
+ // See whether this declaration is identical to any existing declarations.
+ // If not, find the right place to insert it.
+ for (IdDeclInfo::DeclsTy::iterator I = IDI->decls_begin(),
+ IEnd = IDI->decls_end();
+ I != IEnd; ++I) {
+
+ switch (compareDeclarations(*I, D)) {
+ case DMK_Different:
+ break;
+
+ case DMK_Ignore:
+ return false;
+
+ case DMK_Replace:
+ *I = D;
+ return true;
+ }
+
+ if (!(*I)->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ // We've found a declaration that is not visible from the translation
+ // unit (it's in an inner scope). Insert our declaration here.
+ IDI->InsertDecl(I, D);
+ return true;
+ }
+ }
+
+ // Add the declaration to the end.
+ IDI->AddDecl(D);
+ return true;
+}
+
+void IdentifierResolver::readingIdentifier(IdentifierInfo &II) {
+ if (II.isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(II);
+}
+
+void IdentifierResolver::updatingIdentifier(IdentifierInfo &II) {
+ if (II.isOutOfDate())
+ PP.getExternalSource()->updateOutOfDateIdentifier(II);
+
+ if (II.isFromAST())
+ II.setChangedSinceDeserialization();
+}
+
+//===----------------------------------------------------------------------===//
+// IdDeclInfoMap Implementation
+//===----------------------------------------------------------------------===//
+
+/// Returns the IdDeclInfo associated to the DeclarationName.
+/// It creates a new IdDeclInfo if one was not created before for this id.
+IdentifierResolver::IdDeclInfo &
+IdentifierResolver::IdDeclInfoMap::operator[](DeclarationName Name) {
+ void *Ptr = Name.getFETokenInfo<void>();
+
+ if (Ptr) return *toIdDeclInfo(Ptr);
+
+ if (CurIndex == POOL_SIZE) {
+ CurPool = new IdDeclInfoPool(CurPool);
+ CurIndex = 0;
+ }
+ IdDeclInfo *IDI = &CurPool->Pool[CurIndex];
+ Name.setFETokenInfo(reinterpret_cast<void*>(
+ reinterpret_cast<uintptr_t>(IDI) | 0x1)
+ );
+ ++CurIndex;
+ return *IDI;
+}
+
+void IdentifierResolver::iterator::incrementSlowCase() {
+ NamedDecl *D = **this;
+ void *InfoPtr = D->getDeclName().getFETokenInfo<void>();
+ assert(!isDeclPtr(InfoPtr) && "Decl with wrong id ?");
+ IdDeclInfo *Info = toIdDeclInfo(InfoPtr);
+
+ BaseIter I = getIterator();
+ if (I != Info->decls_begin())
+ *this = iterator(I-1);
+ else // No more decls.
+ *this = iterator();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
new file mode 100644
index 0000000..ab786c6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/JumpDiagnostics.cpp
@@ -0,0 +1,770 @@
+//===--- JumpDiagnostics.cpp - Protected scope jump analysis ------*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the JumpScopeChecker class, which is used to diagnose
+// jumps that enter a protected scope in an invalid way.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/ADT/BitVector.h"
+using namespace clang;
+
+namespace {
+
+/// JumpScopeChecker - This object is used by Sema to diagnose invalid jumps
+/// into VLA and other protected scopes. For example, this rejects:
+/// goto L;
+/// int a[n];
+/// L:
+///
+class JumpScopeChecker {
+ Sema &S;
+
+ /// GotoScope - This is a record that we use to keep track of all of the
+ /// scopes that are introduced by VLAs and other things that scope jumps like
+ /// gotos. This scope tree has nothing to do with the source scope tree,
+ /// because you can have multiple VLA scopes per compound statement, and most
+ /// compound statements don't introduce any scopes.
+ struct GotoScope {
+ /// ParentScope - The index in ScopeMap of the parent scope. This is 0 for
+ /// the parent scope is the function body.
+ unsigned ParentScope;
+
+ /// InDiag - The note to emit if there is a jump into this scope.
+ unsigned InDiag;
+
+ /// OutDiag - The note to emit if there is an indirect jump out
+ /// of this scope. Direct jumps always clean up their current scope
+ /// in an orderly way.
+ unsigned OutDiag;
+
+ /// Loc - Location to emit the diagnostic.
+ SourceLocation Loc;
+
+ GotoScope(unsigned parentScope, unsigned InDiag, unsigned OutDiag,
+ SourceLocation L)
+ : ParentScope(parentScope), InDiag(InDiag), OutDiag(OutDiag), Loc(L) {}
+ };
+
+ SmallVector<GotoScope, 48> Scopes;
+ llvm::DenseMap<Stmt*, unsigned> LabelAndGotoScopes;
+ SmallVector<Stmt*, 16> Jumps;
+
+ SmallVector<IndirectGotoStmt*, 4> IndirectJumps;
+ SmallVector<LabelDecl*, 4> IndirectJumpTargets;
+public:
+ JumpScopeChecker(Stmt *Body, Sema &S);
+private:
+ void BuildScopeInformation(Decl *D, unsigned &ParentScope);
+ void BuildScopeInformation(VarDecl *D, const BlockDecl *BDecl,
+ unsigned &ParentScope);
+ void BuildScopeInformation(Stmt *S, unsigned &origParentScope);
+
+ void VerifyJumps();
+ void VerifyIndirectJumps();
+ void NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes);
+ void DiagnoseIndirectJump(IndirectGotoStmt *IG, unsigned IGScope,
+ LabelDecl *Target, unsigned TargetScope);
+ void CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
+ unsigned JumpDiag, unsigned JumpDiagWarning,
+ unsigned JumpDiagCXX98Compat);
+
+ unsigned GetDeepestCommonScope(unsigned A, unsigned B);
+};
+} // end anonymous namespace
+
+
+JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s) : S(s) {
+ // Add a scope entry for function scope.
+ Scopes.push_back(GotoScope(~0U, ~0U, ~0U, SourceLocation()));
+
+ // Build information for the top level compound statement, so that we have a
+ // defined scope record for every "goto" and label.
+ unsigned BodyParentScope = 0;
+ BuildScopeInformation(Body, BodyParentScope);
+
+ // Check that all jumps we saw are kosher.
+ VerifyJumps();
+ VerifyIndirectJumps();
+}
+
+/// GetDeepestCommonScope - Finds the innermost scope enclosing the
+/// two scopes.
+unsigned JumpScopeChecker::GetDeepestCommonScope(unsigned A, unsigned B) {
+ while (A != B) {
+ // Inner scopes are created after outer scopes and therefore have
+ // higher indices.
+ if (A < B) {
+ assert(Scopes[B].ParentScope < B);
+ B = Scopes[B].ParentScope;
+ } else {
+ assert(Scopes[A].ParentScope < A);
+ A = Scopes[A].ParentScope;
+ }
+ }
+ return A;
+}
+
+typedef std::pair<unsigned,unsigned> ScopePair;
+
+/// GetDiagForGotoScopeDecl - If this decl induces a new goto scope, return a
+/// diagnostic that should be emitted if control goes over it. If not, return 0.
+static ScopePair GetDiagForGotoScopeDecl(ASTContext &Context, const Decl *D) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ unsigned InDiag = 0, OutDiag = 0;
+ if (VD->getType()->isVariablyModifiedType())
+ InDiag = diag::note_protected_by_vla;
+
+ if (VD->hasAttr<BlocksAttr>())
+ return ScopePair(diag::note_protected_by___block,
+ diag::note_exits___block);
+
+ if (VD->hasAttr<CleanupAttr>())
+ return ScopePair(diag::note_protected_by_cleanup,
+ diag::note_exits_cleanup);
+
+ if (Context.getLangOpts().ObjCAutoRefCount && VD->hasLocalStorage()) {
+ switch (VD->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ return ScopePair(diag::note_protected_by_objc_ownership,
+ diag::note_exits_objc_ownership);
+ }
+ }
+
+ if (Context.getLangOpts().CPlusPlus && VD->hasLocalStorage()) {
+ // C++11 [stmt.dcl]p3:
+ // A program that jumps from a point where a variable with automatic
+ // storage duration is not in scope to a point where it is in scope
+ // is ill-formed unless the variable has scalar type, class type with
+ // a trivial default constructor and a trivial destructor, a
+ // cv-qualified version of one of these types, or an array of one of
+ // the preceding types and is declared without an initializer.
+
+ // C++03 [stmt.dcl.p3:
+ // A program that jumps from a point where a local variable
+ // with automatic storage duration is not in scope to a point
+ // where it is in scope is ill-formed unless the variable has
+ // POD type and is declared without an initializer.
+
+ if (const Expr *init = VD->getInit()) {
+ // We actually give variables of record type (or array thereof)
+ // an initializer even if that initializer only calls a trivial
+ // ctor. Detect that case.
+ // FIXME: With generalized initializer lists, this may
+ // classify "X x{};" as having no initializer.
+ unsigned inDiagToUse = diag::note_protected_by_variable_init;
+
+ const CXXRecordDecl *record = 0;
+
+ if (const CXXConstructExpr *cce = dyn_cast<CXXConstructExpr>(init)) {
+ const CXXConstructorDecl *ctor = cce->getConstructor();
+ record = ctor->getParent();
+
+ if (ctor->isTrivial() && ctor->isDefaultConstructor()) {
+ if (!record->hasTrivialDestructor())
+ inDiagToUse = diag::note_protected_by_variable_nontriv_destructor;
+ else if (!record->isPOD())
+ inDiagToUse = diag::note_protected_by_variable_non_pod;
+ else
+ inDiagToUse = 0;
+ }
+ } else if (VD->getType()->isArrayType()) {
+ record = VD->getType()->getBaseElementTypeUnsafe()
+ ->getAsCXXRecordDecl();
+ }
+
+ if (inDiagToUse)
+ InDiag = inDiagToUse;
+
+ // Also object to indirect jumps which leave scopes with dtors.
+ if (record && !record->hasTrivialDestructor())
+ OutDiag = diag::note_exits_dtor;
+ }
+ }
+
+ return ScopePair(InDiag, OutDiag);
+ }
+
+ if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
+ if (TD->getUnderlyingType()->isVariablyModifiedType())
+ return ScopePair(diag::note_protected_by_vla_typedef, 0);
+ }
+
+ if (const TypeAliasDecl *TD = dyn_cast<TypeAliasDecl>(D)) {
+ if (TD->getUnderlyingType()->isVariablyModifiedType())
+ return ScopePair(diag::note_protected_by_vla_type_alias, 0);
+ }
+
+ return ScopePair(0U, 0U);
+}
+
+/// \brief Build scope information for a declaration that is part of a DeclStmt.
+void JumpScopeChecker::BuildScopeInformation(Decl *D, unsigned &ParentScope) {
+ // If this decl causes a new scope, push and switch to it.
+ std::pair<unsigned,unsigned> Diags = GetDiagForGotoScopeDecl(S.Context, D);
+ if (Diags.first || Diags.second) {
+ Scopes.push_back(GotoScope(ParentScope, Diags.first, Diags.second,
+ D->getLocation()));
+ ParentScope = Scopes.size()-1;
+ }
+
+ // If the decl has an initializer, walk it with the potentially new
+ // scope we just installed.
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (Expr *Init = VD->getInit())
+ BuildScopeInformation(Init, ParentScope);
+}
+
+/// \brief Build scope information for a captured block literal variables.
+void JumpScopeChecker::BuildScopeInformation(VarDecl *D,
+ const BlockDecl *BDecl,
+ unsigned &ParentScope) {
+ // exclude captured __block variables; there's no destructor
+ // associated with the block literal for them.
+ if (D->hasAttr<BlocksAttr>())
+ return;
+ QualType T = D->getType();
+ QualType::DestructionKind destructKind = T.isDestructedType();
+ if (destructKind != QualType::DK_none) {
+ std::pair<unsigned,unsigned> Diags;
+ switch (destructKind) {
+ case QualType::DK_cxx_destructor:
+ Diags = ScopePair(diag::note_enters_block_captures_cxx_obj,
+ diag::note_exits_block_captures_cxx_obj);
+ break;
+ case QualType::DK_objc_strong_lifetime:
+ Diags = ScopePair(diag::note_enters_block_captures_strong,
+ diag::note_exits_block_captures_strong);
+ break;
+ case QualType::DK_objc_weak_lifetime:
+ Diags = ScopePair(diag::note_enters_block_captures_weak,
+ diag::note_exits_block_captures_weak);
+ break;
+ case QualType::DK_none:
+ llvm_unreachable("non-lifetime captured variable");
+ }
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ Loc = BDecl->getLocation();
+ Scopes.push_back(GotoScope(ParentScope,
+ Diags.first, Diags.second, Loc));
+ ParentScope = Scopes.size()-1;
+ }
+}
+
+/// BuildScopeInformation - The statements from CI to CE are known to form a
+/// coherent VLA scope with a specified parent node. Walk through the
+/// statements, adding any labels or gotos to LabelAndGotoScopes and recursively
+/// walking the AST as needed.
+void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned &origParentScope) {
+ // If this is a statement, rather than an expression, scopes within it don't
+ // propagate out into the enclosing scope. Otherwise we have to worry
+ // about block literals, which have the lifetime of their enclosing statement.
+ unsigned independentParentScope = origParentScope;
+ unsigned &ParentScope = ((isa<Expr>(S) && !isa<StmtExpr>(S))
+ ? origParentScope : independentParentScope);
+
+ bool SkipFirstSubStmt = false;
+
+ // If we found a label, remember that it is in ParentScope scope.
+ switch (S->getStmtClass()) {
+ case Stmt::AddrLabelExprClass:
+ IndirectJumpTargets.push_back(cast<AddrLabelExpr>(S)->getLabel());
+ break;
+
+ case Stmt::IndirectGotoStmtClass:
+ // "goto *&&lbl;" is a special case which we treat as equivalent
+ // to a normal goto. In addition, we don't calculate scope in the
+ // operand (to avoid recording the address-of-label use), which
+ // works only because of the restricted set of expressions which
+ // we detect as constant targets.
+ if (cast<IndirectGotoStmt>(S)->getConstantTarget()) {
+ LabelAndGotoScopes[S] = ParentScope;
+ Jumps.push_back(S);
+ return;
+ }
+
+ LabelAndGotoScopes[S] = ParentScope;
+ IndirectJumps.push_back(cast<IndirectGotoStmt>(S));
+ break;
+
+ case Stmt::SwitchStmtClass:
+ // Evaluate the condition variable before entering the scope of the switch
+ // statement.
+ if (VarDecl *Var = cast<SwitchStmt>(S)->getConditionVariable()) {
+ BuildScopeInformation(Var, ParentScope);
+ SkipFirstSubStmt = true;
+ }
+ // Fall through
+
+ case Stmt::GotoStmtClass:
+ // Remember both what scope a goto is in as well as the fact that we have
+ // it. This makes the second scan not have to walk the AST again.
+ LabelAndGotoScopes[S] = ParentScope;
+ Jumps.push_back(S);
+ break;
+
+ default:
+ break;
+ }
+
+ for (Stmt::child_range CI = S->children(); CI; ++CI) {
+ if (SkipFirstSubStmt) {
+ SkipFirstSubStmt = false;
+ continue;
+ }
+
+ Stmt *SubStmt = *CI;
+ if (SubStmt == 0) continue;
+
+ // Cases, labels, and defaults aren't "scope parents". It's also
+ // important to handle these iteratively instead of recursively in
+ // order to avoid blowing out the stack.
+ while (true) {
+ Stmt *Next;
+ if (CaseStmt *CS = dyn_cast<CaseStmt>(SubStmt))
+ Next = CS->getSubStmt();
+ else if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SubStmt))
+ Next = DS->getSubStmt();
+ else if (LabelStmt *LS = dyn_cast<LabelStmt>(SubStmt))
+ Next = LS->getSubStmt();
+ else
+ break;
+
+ LabelAndGotoScopes[SubStmt] = ParentScope;
+ SubStmt = Next;
+ }
+
+ // If this is a declstmt with a VLA definition, it defines a scope from here
+ // to the end of the containing context.
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(SubStmt)) {
+ // The decl statement creates a scope if any of the decls in it are VLAs
+ // or have the cleanup attribute.
+ for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
+ I != E; ++I)
+ BuildScopeInformation(*I, ParentScope);
+ continue;
+ }
+ // Disallow jumps into any part of an @try statement by pushing a scope and
+ // walking all sub-stmts in that scope.
+ if (ObjCAtTryStmt *AT = dyn_cast<ObjCAtTryStmt>(SubStmt)) {
+ unsigned newParentScope;
+ // Recursively walk the AST for the @try part.
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_try,
+ diag::note_exits_objc_try,
+ AT->getAtTryLoc()));
+ if (Stmt *TryPart = AT->getTryBody())
+ BuildScopeInformation(TryPart, (newParentScope = Scopes.size()-1));
+
+ // Jump from the catch to the finally or try is not valid.
+ for (unsigned I = 0, N = AT->getNumCatchStmts(); I != N; ++I) {
+ ObjCAtCatchStmt *AC = AT->getCatchStmt(I);
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_catch,
+ diag::note_exits_objc_catch,
+ AC->getAtCatchLoc()));
+ // @catches are nested and it isn't
+ BuildScopeInformation(AC->getCatchBody(),
+ (newParentScope = Scopes.size()-1));
+ }
+
+ // Jump from the finally to the try or catch is not valid.
+ if (ObjCAtFinallyStmt *AF = AT->getFinallyStmt()) {
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_finally,
+ diag::note_exits_objc_finally,
+ AF->getAtFinallyLoc()));
+ BuildScopeInformation(AF, (newParentScope = Scopes.size()-1));
+ }
+
+ continue;
+ }
+
+ unsigned newParentScope;
+ // Disallow jumps into the protected statement of an @synchronized, but
+ // allow jumps into the object expression it protects.
+ if (ObjCAtSynchronizedStmt *AS = dyn_cast<ObjCAtSynchronizedStmt>(SubStmt)){
+ // Recursively walk the AST for the @synchronized object expr, it is
+ // evaluated in the normal scope.
+ BuildScopeInformation(AS->getSynchExpr(), ParentScope);
+
+ // Recursively walk the AST for the @synchronized part, protected by a new
+ // scope.
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_synchronized,
+ diag::note_exits_objc_synchronized,
+ AS->getAtSynchronizedLoc()));
+ BuildScopeInformation(AS->getSynchBody(),
+ (newParentScope = Scopes.size()-1));
+ continue;
+ }
+
+ // Disallow jumps into any part of a C++ try statement. This is pretty
+ // much the same as for Obj-C.
+ if (CXXTryStmt *TS = dyn_cast<CXXTryStmt>(SubStmt)) {
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_cxx_try,
+ diag::note_exits_cxx_try,
+ TS->getSourceRange().getBegin()));
+ if (Stmt *TryBlock = TS->getTryBlock())
+ BuildScopeInformation(TryBlock, (newParentScope = Scopes.size()-1));
+
+ // Jump from the catch into the try is not allowed either.
+ for (unsigned I = 0, E = TS->getNumHandlers(); I != E; ++I) {
+ CXXCatchStmt *CS = TS->getHandler(I);
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_cxx_catch,
+ diag::note_exits_cxx_catch,
+ CS->getSourceRange().getBegin()));
+ BuildScopeInformation(CS->getHandlerBlock(),
+ (newParentScope = Scopes.size()-1));
+ }
+
+ continue;
+ }
+
+ // Disallow jumps into the protected statement of an @autoreleasepool.
+ if (ObjCAutoreleasePoolStmt *AS = dyn_cast<ObjCAutoreleasePoolStmt>(SubStmt)){
+ // Recursively walk the AST for the @autoreleasepool part, protected by a new
+ // scope.
+ Scopes.push_back(GotoScope(ParentScope,
+ diag::note_protected_by_objc_autoreleasepool,
+ diag::note_exits_objc_autoreleasepool,
+ AS->getAtLoc()));
+ BuildScopeInformation(AS->getSubStmt(), (newParentScope = Scopes.size()-1));
+ continue;
+ }
+
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(SubStmt)) {
+ const BlockDecl *BDecl = BE->getBlockDecl();
+ for (BlockDecl::capture_const_iterator ci = BDecl->capture_begin(),
+ ce = BDecl->capture_end(); ci != ce; ++ci) {
+ VarDecl *variable = ci->getVariable();
+ BuildScopeInformation(variable, BDecl, ParentScope);
+ }
+ }
+
+ // Recursively walk the AST.
+ BuildScopeInformation(SubStmt, ParentScope);
+ }
+}
+
+/// VerifyJumps - Verify each element of the Jumps array to see if they are
+/// valid, emitting diagnostics if not.
+void JumpScopeChecker::VerifyJumps() {
+ while (!Jumps.empty()) {
+ Stmt *Jump = Jumps.pop_back_val();
+
+ // With a goto,
+ if (GotoStmt *GS = dyn_cast<GotoStmt>(Jump)) {
+ CheckJump(GS, GS->getLabel()->getStmt(), GS->getGotoLoc(),
+ diag::err_goto_into_protected_scope,
+ diag::warn_goto_into_protected_scope,
+ diag::warn_cxx98_compat_goto_into_protected_scope);
+ continue;
+ }
+
+ // We only get indirect gotos here when they have a constant target.
+ if (IndirectGotoStmt *IGS = dyn_cast<IndirectGotoStmt>(Jump)) {
+ LabelDecl *Target = IGS->getConstantTarget();
+ CheckJump(IGS, Target->getStmt(), IGS->getGotoLoc(),
+ diag::err_goto_into_protected_scope,
+ diag::warn_goto_into_protected_scope,
+ diag::warn_cxx98_compat_goto_into_protected_scope);
+ continue;
+ }
+
+ SwitchStmt *SS = cast<SwitchStmt>(Jump);
+ for (SwitchCase *SC = SS->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ assert(LabelAndGotoScopes.count(SC) && "Case not visited?");
+ CheckJump(SS, SC, SC->getLocStart(),
+ diag::err_switch_into_protected_scope, 0,
+ diag::warn_cxx98_compat_switch_into_protected_scope);
+ }
+ }
+}
+
+/// VerifyIndirectJumps - Verify whether any possible indirect jump
+/// might cross a protection boundary. Unlike direct jumps, indirect
+/// jumps count cleanups as protection boundaries: since there's no
+/// way to know where the jump is going, we can't implicitly run the
+/// right cleanups the way we can with direct jumps.
+///
+/// Thus, an indirect jump is "trivial" if it bypasses no
+/// initializations and no teardowns. More formally, an indirect jump
+/// from A to B is trivial if the path out from A to DCA(A,B) is
+/// trivial and the path in from DCA(A,B) to B is trivial, where
+/// DCA(A,B) is the deepest common ancestor of A and B.
+/// Jump-triviality is transitive but asymmetric.
+///
+/// A path in is trivial if none of the entered scopes have an InDiag.
+/// A path out is trivial is none of the exited scopes have an OutDiag.
+///
+/// Under these definitions, this function checks that the indirect
+/// jump between A and B is trivial for every indirect goto statement A
+/// and every label B whose address was taken in the function.
+void JumpScopeChecker::VerifyIndirectJumps() {
+ if (IndirectJumps.empty()) return;
+
+ // If there aren't any address-of-label expressions in this function,
+ // complain about the first indirect goto.
+ if (IndirectJumpTargets.empty()) {
+ S.Diag(IndirectJumps[0]->getGotoLoc(),
+ diag::err_indirect_goto_without_addrlabel);
+ return;
+ }
+
+ // Collect a single representative of every scope containing an
+ // indirect goto. For most code bases, this substantially cuts
+ // down on the number of jump sites we'll have to consider later.
+ typedef std::pair<unsigned, IndirectGotoStmt*> JumpScope;
+ SmallVector<JumpScope, 32> JumpScopes;
+ {
+ llvm::DenseMap<unsigned, IndirectGotoStmt*> JumpScopesMap;
+ for (SmallVectorImpl<IndirectGotoStmt*>::iterator
+ I = IndirectJumps.begin(), E = IndirectJumps.end(); I != E; ++I) {
+ IndirectGotoStmt *IG = *I;
+ assert(LabelAndGotoScopes.count(IG) &&
+ "indirect jump didn't get added to scopes?");
+ unsigned IGScope = LabelAndGotoScopes[IG];
+ IndirectGotoStmt *&Entry = JumpScopesMap[IGScope];
+ if (!Entry) Entry = IG;
+ }
+ JumpScopes.reserve(JumpScopesMap.size());
+ for (llvm::DenseMap<unsigned, IndirectGotoStmt*>::iterator
+ I = JumpScopesMap.begin(), E = JumpScopesMap.end(); I != E; ++I)
+ JumpScopes.push_back(*I);
+ }
+
+ // Collect a single representative of every scope containing a
+ // label whose address was taken somewhere in the function.
+ // For most code bases, there will be only one such scope.
+ llvm::DenseMap<unsigned, LabelDecl*> TargetScopes;
+ for (SmallVectorImpl<LabelDecl*>::iterator
+ I = IndirectJumpTargets.begin(), E = IndirectJumpTargets.end();
+ I != E; ++I) {
+ LabelDecl *TheLabel = *I;
+ assert(LabelAndGotoScopes.count(TheLabel->getStmt()) &&
+ "Referenced label didn't get added to scopes?");
+ unsigned LabelScope = LabelAndGotoScopes[TheLabel->getStmt()];
+ LabelDecl *&Target = TargetScopes[LabelScope];
+ if (!Target) Target = TheLabel;
+ }
+
+ // For each target scope, make sure it's trivially reachable from
+ // every scope containing a jump site.
+ //
+ // A path between scopes always consists of exitting zero or more
+ // scopes, then entering zero or more scopes. We build a set of
+ // of scopes S from which the target scope can be trivially
+ // entered, then verify that every jump scope can be trivially
+ // exitted to reach a scope in S.
+ llvm::BitVector Reachable(Scopes.size(), false);
+ for (llvm::DenseMap<unsigned,LabelDecl*>::iterator
+ TI = TargetScopes.begin(), TE = TargetScopes.end(); TI != TE; ++TI) {
+ unsigned TargetScope = TI->first;
+ LabelDecl *TargetLabel = TI->second;
+
+ Reachable.reset();
+
+ // Mark all the enclosing scopes from which you can safely jump
+ // into the target scope. 'Min' will end up being the index of
+ // the shallowest such scope.
+ unsigned Min = TargetScope;
+ while (true) {
+ Reachable.set(Min);
+
+ // Don't go beyond the outermost scope.
+ if (Min == 0) break;
+
+ // Stop if we can't trivially enter the current scope.
+ if (Scopes[Min].InDiag) break;
+
+ Min = Scopes[Min].ParentScope;
+ }
+
+ // Walk through all the jump sites, checking that they can trivially
+ // reach this label scope.
+ for (SmallVectorImpl<JumpScope>::iterator
+ I = JumpScopes.begin(), E = JumpScopes.end(); I != E; ++I) {
+ unsigned Scope = I->first;
+
+ // Walk out the "scope chain" for this scope, looking for a scope
+ // we've marked reachable. For well-formed code this amortizes
+ // to O(JumpScopes.size() / Scopes.size()): we only iterate
+ // when we see something unmarked, and in well-formed code we
+ // mark everything we iterate past.
+ bool IsReachable = false;
+ while (true) {
+ if (Reachable.test(Scope)) {
+ // If we find something reachable, mark all the scopes we just
+ // walked through as reachable.
+ for (unsigned S = I->first; S != Scope; S = Scopes[S].ParentScope)
+ Reachable.set(S);
+ IsReachable = true;
+ break;
+ }
+
+ // Don't walk out if we've reached the top-level scope or we've
+ // gotten shallower than the shallowest reachable scope.
+ if (Scope == 0 || Scope < Min) break;
+
+ // Don't walk out through an out-diagnostic.
+ if (Scopes[Scope].OutDiag) break;
+
+ Scope = Scopes[Scope].ParentScope;
+ }
+
+ // Only diagnose if we didn't find something.
+ if (IsReachable) continue;
+
+ DiagnoseIndirectJump(I->second, I->first, TargetLabel, TargetScope);
+ }
+ }
+}
+
+/// Return true if a particular error+note combination must be downgraded to a
+/// warning in Microsoft mode.
+static bool IsMicrosoftJumpWarning(unsigned JumpDiag, unsigned InDiagNote) {
+ return (JumpDiag == diag::err_goto_into_protected_scope &&
+ (InDiagNote == diag::note_protected_by_variable_init ||
+ InDiagNote == diag::note_protected_by_variable_nontriv_destructor));
+}
+
+/// Return true if a particular note should be downgraded to a compatibility
+/// warning in C++11 mode.
+static bool IsCXX98CompatWarning(Sema &S, unsigned InDiagNote) {
+ return S.getLangOpts().CPlusPlus0x &&
+ InDiagNote == diag::note_protected_by_variable_non_pod;
+}
+
+/// Produce primary diagnostic for an indirect jump statement.
+static void DiagnoseIndirectJumpStmt(Sema &S, IndirectGotoStmt *Jump,
+ LabelDecl *Target, bool &Diagnosed) {
+ if (Diagnosed)
+ return;
+ S.Diag(Jump->getGotoLoc(), diag::err_indirect_goto_in_protected_scope);
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
+ Diagnosed = true;
+}
+
+/// Produce note diagnostics for a jump into a protected scope.
+void JumpScopeChecker::NoteJumpIntoScopes(ArrayRef<unsigned> ToScopes) {
+ assert(!ToScopes.empty());
+ for (unsigned I = 0, E = ToScopes.size(); I != E; ++I)
+ if (Scopes[ToScopes[I]].InDiag)
+ S.Diag(Scopes[ToScopes[I]].Loc, Scopes[ToScopes[I]].InDiag);
+}
+
+/// Diagnose an indirect jump which is known to cross scopes.
+void JumpScopeChecker::DiagnoseIndirectJump(IndirectGotoStmt *Jump,
+ unsigned JumpScope,
+ LabelDecl *Target,
+ unsigned TargetScope) {
+ assert(JumpScope != TargetScope);
+
+ unsigned Common = GetDeepestCommonScope(JumpScope, TargetScope);
+ bool Diagnosed = false;
+
+ // Walk out the scope chain until we reach the common ancestor.
+ for (unsigned I = JumpScope; I != Common; I = Scopes[I].ParentScope)
+ if (Scopes[I].OutDiag) {
+ DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
+ S.Diag(Scopes[I].Loc, Scopes[I].OutDiag);
+ }
+
+ SmallVector<unsigned, 10> ToScopesCXX98Compat;
+
+ // Now walk into the scopes containing the label whose address was taken.
+ for (unsigned I = TargetScope; I != Common; I = Scopes[I].ParentScope)
+ if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
+ ToScopesCXX98Compat.push_back(I);
+ else if (Scopes[I].InDiag) {
+ DiagnoseIndirectJumpStmt(S, Jump, Target, Diagnosed);
+ S.Diag(Scopes[I].Loc, Scopes[I].InDiag);
+ }
+
+ // Diagnose this jump if it would be ill-formed in C++98.
+ if (!Diagnosed && !ToScopesCXX98Compat.empty()) {
+ S.Diag(Jump->getGotoLoc(),
+ diag::warn_cxx98_compat_indirect_goto_in_protected_scope);
+ S.Diag(Target->getStmt()->getIdentLoc(), diag::note_indirect_goto_target);
+ NoteJumpIntoScopes(ToScopesCXX98Compat);
+ }
+}
+
+/// CheckJump - Validate that the specified jump statement is valid: that it is
+/// jumping within or out of its current scope, not into a deeper one.
+void JumpScopeChecker::CheckJump(Stmt *From, Stmt *To, SourceLocation DiagLoc,
+ unsigned JumpDiagError, unsigned JumpDiagWarning,
+ unsigned JumpDiagCXX98Compat) {
+ assert(LabelAndGotoScopes.count(From) && "Jump didn't get added to scopes?");
+ unsigned FromScope = LabelAndGotoScopes[From];
+
+ assert(LabelAndGotoScopes.count(To) && "Jump didn't get added to scopes?");
+ unsigned ToScope = LabelAndGotoScopes[To];
+
+ // Common case: exactly the same scope, which is fine.
+ if (FromScope == ToScope) return;
+
+ unsigned CommonScope = GetDeepestCommonScope(FromScope, ToScope);
+
+ // It's okay to jump out from a nested scope.
+ if (CommonScope == ToScope) return;
+
+ // Pull out (and reverse) any scopes we might need to diagnose skipping.
+ SmallVector<unsigned, 10> ToScopesCXX98Compat;
+ SmallVector<unsigned, 10> ToScopesError;
+ SmallVector<unsigned, 10> ToScopesWarning;
+ for (unsigned I = ToScope; I != CommonScope; I = Scopes[I].ParentScope) {
+ if (S.getLangOpts().MicrosoftMode && JumpDiagWarning != 0 &&
+ IsMicrosoftJumpWarning(JumpDiagError, Scopes[I].InDiag))
+ ToScopesWarning.push_back(I);
+ else if (IsCXX98CompatWarning(S, Scopes[I].InDiag))
+ ToScopesCXX98Compat.push_back(I);
+ else if (Scopes[I].InDiag)
+ ToScopesError.push_back(I);
+ }
+
+ // Handle warnings.
+ if (!ToScopesWarning.empty()) {
+ S.Diag(DiagLoc, JumpDiagWarning);
+ NoteJumpIntoScopes(ToScopesWarning);
+ }
+
+ // Handle errors.
+ if (!ToScopesError.empty()) {
+ S.Diag(DiagLoc, JumpDiagError);
+ NoteJumpIntoScopes(ToScopesError);
+ }
+
+ // Handle -Wc++98-compat warnings if the jump is well-formed.
+ if (ToScopesError.empty() && !ToScopesCXX98Compat.empty()) {
+ S.Diag(DiagLoc, JumpDiagCXX98Compat);
+ NoteJumpIntoScopes(ToScopesCXX98Compat);
+ }
+}
+
+void Sema::DiagnoseInvalidJumps(Stmt *Body) {
+ (void)JumpScopeChecker(Body, *this);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/Scope.cpp b/contrib/llvm/tools/clang/lib/Sema/Scope.cpp
new file mode 100644
index 0000000..10f12ce
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/Scope.cpp
@@ -0,0 +1,71 @@
+//===- Scope.cpp - Lexical scope information --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Scope class, which is used for recording
+// information about a lexical scope.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Scope.h"
+
+using namespace clang;
+
+void Scope::Init(Scope *parent, unsigned flags) {
+ AnyParent = parent;
+ Flags = flags;
+
+ if (parent && !(flags & FnScope)) {
+ BreakParent = parent->BreakParent;
+ ContinueParent = parent->ContinueParent;
+ } else {
+ // Control scopes do not contain the contents of nested function scopes for
+ // control flow purposes.
+ BreakParent = ContinueParent = 0;
+ }
+
+ if (parent) {
+ Depth = parent->Depth + 1;
+ PrototypeDepth = parent->PrototypeDepth;
+ PrototypeIndex = 0;
+ FnParent = parent->FnParent;
+ BlockParent = parent->BlockParent;
+ TemplateParamParent = parent->TemplateParamParent;
+ } else {
+ Depth = 0;
+ PrototypeDepth = 0;
+ PrototypeIndex = 0;
+ FnParent = BlockParent = 0;
+ TemplateParamParent = 0;
+ }
+
+ // If this scope is a function or contains breaks/continues, remember it.
+ if (flags & FnScope) FnParent = this;
+ if (flags & BreakScope) BreakParent = this;
+ if (flags & ContinueScope) ContinueParent = this;
+ if (flags & BlockScope) BlockParent = this;
+ if (flags & TemplateParamScope) TemplateParamParent = this;
+
+ // If this is a prototype scope, record that.
+ if (flags & FunctionPrototypeScope) PrototypeDepth++;
+
+ DeclsInScope.clear();
+ UsingDirectives.clear();
+ Entity = 0;
+ ErrorTrap.reset();
+}
+
+bool Scope::containedInPrototypeScope() const {
+ const Scope *S = this;
+ while (S) {
+ if (S->isFunctionPrototypeScope())
+ return true;
+ S = S->getParent();
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/Sema.cpp b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
new file mode 100644
index 0000000..fcdfcac
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/Sema.cpp
@@ -0,0 +1,1101 @@
+//===--- Sema.cpp - AST Builder and Semantic Analysis Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the actions class which performs semantic analysis and
+// builds an AST out of a parse stream.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "TargetAttributesSema.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/APFloat.h"
+#include "clang/Sema/CXXFieldCollector.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/ObjCMethodList.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+using namespace clang;
+using namespace sema;
+
+FunctionScopeInfo::~FunctionScopeInfo() { }
+
+void FunctionScopeInfo::Clear() {
+ HasBranchProtectedScope = false;
+ HasBranchIntoScope = false;
+ HasIndirectGoto = false;
+
+ SwitchStack.clear();
+ Returns.clear();
+ ErrorTrap.reset();
+ PossiblyUnreachableDiags.clear();
+}
+
+BlockScopeInfo::~BlockScopeInfo() { }
+LambdaScopeInfo::~LambdaScopeInfo() { }
+
+PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
+ const Preprocessor &PP) {
+ PrintingPolicy Policy = Context.getPrintingPolicy();
+ Policy.Bool = Context.getLangOpts().Bool;
+ if (!Policy.Bool) {
+ if (MacroInfo *BoolMacro = PP.getMacroInfo(&Context.Idents.get("bool"))) {
+ Policy.Bool = BoolMacro->isObjectLike() &&
+ BoolMacro->getNumTokens() == 1 &&
+ BoolMacro->getReplacementToken(0).is(tok::kw__Bool);
+ }
+ }
+
+ return Policy;
+}
+
+void Sema::ActOnTranslationUnitScope(Scope *S) {
+ TUScope = S;
+ PushDeclContext(S, Context.getTranslationUnitDecl());
+
+ VAListTagName = PP.getIdentifierInfo("__va_list_tag");
+}
+
+Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
+ TranslationUnitKind TUKind,
+ CodeCompleteConsumer *CodeCompleter)
+ : TheTargetAttributesSema(0), FPFeatures(pp.getLangOpts()),
+ LangOpts(pp.getLangOpts()), PP(pp), Context(ctxt), Consumer(consumer),
+ Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
+ CollectStats(false), ExternalSource(0), CodeCompleter(CodeCompleter),
+ CurContext(0), OriginalLexicalContext(0),
+ PackContext(0), MSStructPragmaOn(false), VisContext(0),
+ ExprNeedsCleanups(false), LateTemplateParser(0), OpaqueParser(0),
+ IdResolver(pp), StdInitializerList(0), CXXTypeInfoDecl(0), MSVCGuidDecl(0),
+ NSNumberDecl(0), NSArrayDecl(0), ArrayWithObjectsMethod(0),
+ NSDictionaryDecl(0), DictionaryWithObjectsMethod(0),
+ GlobalNewDeleteDeclared(false),
+ ObjCShouldCallSuperDealloc(false),
+ ObjCShouldCallSuperFinalize(false),
+ TUKind(TUKind),
+ NumSFINAEErrors(0), InFunctionDeclarator(0), SuppressAccessChecking(false),
+ AccessCheckingSFINAE(false), InNonInstantiationSFINAEContext(false),
+ NonInstantiationEntries(0), ArgumentPackSubstitutionIndex(-1),
+ CurrentInstantiationScope(0), TyposCorrected(0),
+ AnalysisWarnings(*this)
+{
+ TUScope = 0;
+ LoadedExternalKnownNamespaces = false;
+ for (unsigned I = 0; I != NSAPI::NumNSNumberLiteralMethods; ++I)
+ NSNumberLiteralMethods[I] = 0;
+
+ if (getLangOpts().ObjC1)
+ NSAPIObj.reset(new NSAPI(Context));
+
+ if (getLangOpts().CPlusPlus)
+ FieldCollector.reset(new CXXFieldCollector());
+
+ // Tell diagnostics how to render things from the AST library.
+ PP.getDiagnostics().SetArgToStringFn(&FormatASTNodeDiagnosticArgument,
+ &Context);
+
+ ExprEvalContexts.push_back(
+ ExpressionEvaluationContextRecord(PotentiallyEvaluated, 0,
+ false, 0, false));
+
+ FunctionScopes.push_back(new FunctionScopeInfo(Diags));
+}
+
+void Sema::Initialize() {
+ // Tell the AST consumer about this Sema object.
+ Consumer.Initialize(Context);
+
+ // FIXME: Isn't this redundant with the initialization above?
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
+ SC->InitializeSema(*this);
+
+ // Tell the external Sema source about this Sema object.
+ if (ExternalSemaSource *ExternalSema
+ = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
+ ExternalSema->InitializeSema(*this);
+
+ // Initialize predefined 128-bit integer types, if needed.
+ if (PP.getTargetInfo().getPointerWidth(0) >= 64) {
+ // If either of the 128-bit integer types are unavailable to name lookup,
+ // define them now.
+ DeclarationName Int128 = &Context.Idents.get("__int128_t");
+ if (IdResolver.begin(Int128) == IdResolver.end())
+ PushOnScopeChains(Context.getInt128Decl(), TUScope);
+
+ DeclarationName UInt128 = &Context.Idents.get("__uint128_t");
+ if (IdResolver.begin(UInt128) == IdResolver.end())
+ PushOnScopeChains(Context.getUInt128Decl(), TUScope);
+ }
+
+
+ // Initialize predefined Objective-C types:
+ if (PP.getLangOpts().ObjC1) {
+ // If 'SEL' does not yet refer to any declarations, make it refer to the
+ // predefined 'SEL'.
+ DeclarationName SEL = &Context.Idents.get("SEL");
+ if (IdResolver.begin(SEL) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCSelDecl(), TUScope);
+
+ // If 'id' does not yet refer to any declarations, make it refer to the
+ // predefined 'id'.
+ DeclarationName Id = &Context.Idents.get("id");
+ if (IdResolver.begin(Id) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCIdDecl(), TUScope);
+
+ // Create the built-in typedef for 'Class'.
+ DeclarationName Class = &Context.Idents.get("Class");
+ if (IdResolver.begin(Class) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCClassDecl(), TUScope);
+
+ // Create the built-in forward declaratino for 'Protocol'.
+ DeclarationName Protocol = &Context.Idents.get("Protocol");
+ if (IdResolver.begin(Protocol) == IdResolver.end())
+ PushOnScopeChains(Context.getObjCProtocolDecl(), TUScope);
+ }
+}
+
+Sema::~Sema() {
+ if (PackContext) FreePackedContext();
+ if (VisContext) FreeVisContext();
+ delete TheTargetAttributesSema;
+ MSStructPragmaOn = false;
+ // Kill all the active scopes.
+ for (unsigned I = 1, E = FunctionScopes.size(); I != E; ++I)
+ delete FunctionScopes[I];
+ if (FunctionScopes.size() == 1)
+ delete FunctionScopes[0];
+
+ // Tell the SemaConsumer to forget about us; we're going out of scope.
+ if (SemaConsumer *SC = dyn_cast<SemaConsumer>(&Consumer))
+ SC->ForgetSema();
+
+ // Detach from the external Sema source.
+ if (ExternalSemaSource *ExternalSema
+ = dyn_cast_or_null<ExternalSemaSource>(Context.getExternalSource()))
+ ExternalSema->ForgetSema();
+}
+
+
+/// makeUnavailableInSystemHeader - There is an error in the current
+/// context. If we're still in a system header, and we can plausibly
+/// make the relevant declaration unavailable instead of erroring, do
+/// so and return true.
+bool Sema::makeUnavailableInSystemHeader(SourceLocation loc,
+ StringRef msg) {
+ // If we're not in a function, it's an error.
+ FunctionDecl *fn = dyn_cast<FunctionDecl>(CurContext);
+ if (!fn) return false;
+
+ // If we're in template instantiation, it's an error.
+ if (!ActiveTemplateInstantiations.empty())
+ return false;
+
+ // If that function's not in a system header, it's an error.
+ if (!Context.getSourceManager().isInSystemHeader(loc))
+ return false;
+
+ // If the function is already unavailable, it's not an error.
+ if (fn->hasAttr<UnavailableAttr>()) return true;
+
+ fn->addAttr(new (Context) UnavailableAttr(loc, Context, msg));
+ return true;
+}
+
+ASTMutationListener *Sema::getASTMutationListener() const {
+ return getASTConsumer().GetASTMutationListener();
+}
+
+/// \brief Print out statistics about the semantic analysis.
+void Sema::PrintStats() const {
+ llvm::errs() << "\n*** Semantic Analysis Stats:\n";
+ llvm::errs() << NumSFINAEErrors << " SFINAE diagnostics trapped.\n";
+
+ BumpAlloc.PrintStats();
+ AnalysisWarnings.PrintStats();
+}
+
+/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
+/// If there is already an implicit cast, merge into the existing one.
+/// The result is of the given category.
+ExprResult Sema::ImpCastExprToType(Expr *E, QualType Ty,
+ CastKind Kind, ExprValueKind VK,
+ const CXXCastPath *BasePath,
+ CheckedConversionKind CCK) {
+#ifndef NDEBUG
+ if (VK == VK_RValue && !E->isRValue()) {
+ switch (Kind) {
+ default:
+ assert(0 && "can't implicitly cast lvalue to rvalue with this cast kind");
+ case CK_LValueToRValue:
+ case CK_ArrayToPointerDecay:
+ case CK_FunctionToPointerDecay:
+ case CK_ToVoid:
+ break;
+ }
+ }
+ assert((VK == VK_RValue || !E->isRValue()) && "can't cast rvalue to lvalue");
+#endif
+
+ QualType ExprTy = Context.getCanonicalType(E->getType());
+ QualType TypeTy = Context.getCanonicalType(Ty);
+
+ if (ExprTy == TypeTy)
+ return Owned(E);
+
+ if (getLangOpts().ObjCAutoRefCount)
+ CheckObjCARCConversion(SourceRange(), Ty, E, CCK);
+
+ // If this is a derived-to-base cast to a through a virtual base, we
+ // need a vtable.
+ if (Kind == CK_DerivedToBase &&
+ BasePathInvolvesVirtualBase(*BasePath)) {
+ QualType T = E->getType();
+ if (const PointerType *Pointer = T->getAs<PointerType>())
+ T = Pointer->getPointeeType();
+ if (const RecordType *RecordTy = T->getAs<RecordType>())
+ MarkVTableUsed(E->getLocStart(),
+ cast<CXXRecordDecl>(RecordTy->getDecl()));
+ }
+
+ if (ImplicitCastExpr *ImpCast = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ImpCast->getCastKind() == Kind && (!BasePath || BasePath->empty())) {
+ ImpCast->setType(Ty);
+ ImpCast->setValueKind(VK);
+ return Owned(E);
+ }
+ }
+
+ return Owned(ImplicitCastExpr::Create(Context, Ty, Kind, E, BasePath, VK));
+}
+
+/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
+/// to the conversion from scalar type ScalarTy to the Boolean type.
+CastKind Sema::ScalarTypeToBooleanCastKind(QualType ScalarTy) {
+ switch (ScalarTy->getScalarTypeKind()) {
+ case Type::STK_Bool: return CK_NoOp;
+ case Type::STK_CPointer: return CK_PointerToBoolean;
+ case Type::STK_BlockPointer: return CK_PointerToBoolean;
+ case Type::STK_ObjCObjectPointer: return CK_PointerToBoolean;
+ case Type::STK_MemberPointer: return CK_MemberPointerToBoolean;
+ case Type::STK_Integral: return CK_IntegralToBoolean;
+ case Type::STK_Floating: return CK_FloatingToBoolean;
+ case Type::STK_IntegralComplex: return CK_IntegralComplexToBoolean;
+ case Type::STK_FloatingComplex: return CK_FloatingComplexToBoolean;
+ }
+ return CK_Invalid;
+}
+
+/// \brief Used to prune the decls of Sema's UnusedFileScopedDecls vector.
+static bool ShouldRemoveFromUnused(Sema *SemaRef, const DeclaratorDecl *D) {
+ if (D->isUsed())
+ return true;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // UnusedFileScopedDecls stores the first declaration.
+ // The declaration may have become definition so check again.
+ const FunctionDecl *DeclToCheck;
+ if (FD->hasBody(DeclToCheck))
+ return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
+
+ // Later redecls may add new information resulting in not having to warn,
+ // so check again.
+ DeclToCheck = FD->getMostRecentDecl();
+ if (DeclToCheck != FD)
+ return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ // UnusedFileScopedDecls stores the first declaration.
+ // The declaration may have become definition so check again.
+ const VarDecl *DeclToCheck = VD->getDefinition();
+ if (DeclToCheck)
+ return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
+
+ // Later redecls may add new information resulting in not having to warn,
+ // so check again.
+ DeclToCheck = VD->getMostRecentDecl();
+ if (DeclToCheck != VD)
+ return !SemaRef->ShouldWarnIfUnusedFileScopedDecl(DeclToCheck);
+ }
+
+ return false;
+}
+
+namespace {
+ struct UndefinedInternal {
+ NamedDecl *decl;
+ FullSourceLoc useLoc;
+
+ UndefinedInternal(NamedDecl *decl, FullSourceLoc useLoc)
+ : decl(decl), useLoc(useLoc) {}
+ };
+
+ bool operator<(const UndefinedInternal &l, const UndefinedInternal &r) {
+ return l.useLoc.isBeforeInTranslationUnitThan(r.useLoc);
+ }
+}
+
+/// checkUndefinedInternals - Check for undefined objects with internal linkage.
+static void checkUndefinedInternals(Sema &S) {
+ if (S.UndefinedInternals.empty()) return;
+
+ // Collect all the still-undefined entities with internal linkage.
+ SmallVector<UndefinedInternal, 16> undefined;
+ for (llvm::DenseMap<NamedDecl*,SourceLocation>::iterator
+ i = S.UndefinedInternals.begin(), e = S.UndefinedInternals.end();
+ i != e; ++i) {
+ NamedDecl *decl = i->first;
+
+ // Ignore attributes that have become invalid.
+ if (decl->isInvalidDecl()) continue;
+
+ // __attribute__((weakref)) is basically a definition.
+ if (decl->hasAttr<WeakRefAttr>()) continue;
+
+ if (FunctionDecl *fn = dyn_cast<FunctionDecl>(decl)) {
+ if (fn->isPure() || fn->hasBody())
+ continue;
+ } else {
+ if (cast<VarDecl>(decl)->hasDefinition() != VarDecl::DeclarationOnly)
+ continue;
+ }
+
+ // We build a FullSourceLoc so that we can sort with array_pod_sort.
+ FullSourceLoc loc(i->second, S.Context.getSourceManager());
+ undefined.push_back(UndefinedInternal(decl, loc));
+ }
+
+ if (undefined.empty()) return;
+
+ // Sort (in order of use site) so that we're not (as) dependent on
+ // the iteration order through an llvm::DenseMap.
+ llvm::array_pod_sort(undefined.begin(), undefined.end());
+
+ for (SmallVectorImpl<UndefinedInternal>::iterator
+ i = undefined.begin(), e = undefined.end(); i != e; ++i) {
+ NamedDecl *decl = i->decl;
+ S.Diag(decl->getLocation(), diag::warn_undefined_internal)
+ << isa<VarDecl>(decl) << decl;
+ S.Diag(i->useLoc, diag::note_used_here);
+ }
+}
+
+void Sema::LoadExternalWeakUndeclaredIdentifiers() {
+ if (!ExternalSource)
+ return;
+
+ SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
+ ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
+ for (unsigned I = 0, N = WeakIDs.size(); I != N; ++I) {
+ llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator Pos
+ = WeakUndeclaredIdentifiers.find(WeakIDs[I].first);
+ if (Pos != WeakUndeclaredIdentifiers.end())
+ continue;
+
+ WeakUndeclaredIdentifiers.insert(WeakIDs[I]);
+ }
+}
+
+/// ActOnEndOfTranslationUnit - This is called at the very end of the
+/// translation unit when EOF is reached and all but the top-level scope is
+/// popped.
+void Sema::ActOnEndOfTranslationUnit() {
+ // Only complete translation units define vtables and perform implicit
+ // instantiations.
+ if (TUKind == TU_Complete) {
+ DiagnoseUseOfUnimplementedSelectors();
+
+ // If any dynamic classes have their key function defined within
+ // this translation unit, then those vtables are considered "used" and must
+ // be emitted.
+ for (DynamicClassesType::iterator I = DynamicClasses.begin(ExternalSource),
+ E = DynamicClasses.end();
+ I != E; ++I) {
+ assert(!(*I)->isDependentType() &&
+ "Should not see dependent types here!");
+ if (const CXXMethodDecl *KeyFunction = Context.getKeyFunction(*I)) {
+ const FunctionDecl *Definition = 0;
+ if (KeyFunction->hasBody(Definition))
+ MarkVTableUsed(Definition->getLocation(), *I, true);
+ }
+ }
+
+ // If DefinedUsedVTables ends up marking any virtual member functions it
+ // might lead to more pending template instantiations, which we then need
+ // to instantiate.
+ DefineUsedVTables();
+
+ // C++: Perform implicit template instantiations.
+ //
+ // FIXME: When we perform these implicit instantiations, we do not
+ // carefully keep track of the point of instantiation (C++ [temp.point]).
+ // This means that name lookup that occurs within the template
+ // instantiation will always happen at the end of the translation unit,
+ // so it will find some names that should not be found. Although this is
+ // common behavior for C++ compilers, it is technically wrong. In the
+ // future, we either need to be able to filter the results of name lookup
+ // or we need to perform template instantiations earlier.
+ PerformPendingInstantiations();
+ }
+
+ // Remove file scoped decls that turned out to be used.
+ UnusedFileScopedDecls.erase(std::remove_if(UnusedFileScopedDecls.begin(0,
+ true),
+ UnusedFileScopedDecls.end(),
+ std::bind1st(std::ptr_fun(ShouldRemoveFromUnused),
+ this)),
+ UnusedFileScopedDecls.end());
+
+ if (TUKind == TU_Prefix) {
+ // Translation unit prefixes don't need any of the checking below.
+ TUScope = 0;
+ return;
+ }
+
+ // Check for #pragma weak identifiers that were never declared
+ // FIXME: This will cause diagnostics to be emitted in a non-determinstic
+ // order! Iterating over a densemap like this is bad.
+ LoadExternalWeakUndeclaredIdentifiers();
+ for (llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator
+ I = WeakUndeclaredIdentifiers.begin(),
+ E = WeakUndeclaredIdentifiers.end(); I != E; ++I) {
+ if (I->second.getUsed()) continue;
+
+ Diag(I->second.getLocation(), diag::warn_weak_identifier_undeclared)
+ << I->first;
+ }
+
+ if (TUKind == TU_Module) {
+ // If we are building a module, resolve all of the exported declarations
+ // now.
+ if (Module *CurrentModule = PP.getCurrentModule()) {
+ ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
+
+ llvm::SmallVector<Module *, 2> Stack;
+ Stack.push_back(CurrentModule);
+ while (!Stack.empty()) {
+ Module *Mod = Stack.back();
+ Stack.pop_back();
+
+ // Resolve the exported declarations.
+ // FIXME: Actually complain, once we figure out how to teach the
+ // diagnostic client to deal with complains in the module map at this
+ // point.
+ ModMap.resolveExports(Mod, /*Complain=*/false);
+
+ // Queue the submodules, so their exports will also be resolved.
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ Stack.push_back(*Sub);
+ }
+ }
+ }
+
+ // Modules don't need any of the checking below.
+ TUScope = 0;
+ return;
+ }
+
+ // C99 6.9.2p2:
+ // A declaration of an identifier for an object that has file
+ // scope without an initializer, and without a storage-class
+ // specifier or with the storage-class specifier static,
+ // constitutes a tentative definition. If a translation unit
+ // contains one or more tentative definitions for an identifier,
+ // and the translation unit contains no external definition for
+ // that identifier, then the behavior is exactly as if the
+ // translation unit contains a file scope declaration of that
+ // identifier, with the composite type as of the end of the
+ // translation unit, with an initializer equal to 0.
+ llvm::SmallSet<VarDecl *, 32> Seen;
+ for (TentativeDefinitionsType::iterator
+ T = TentativeDefinitions.begin(ExternalSource),
+ TEnd = TentativeDefinitions.end();
+ T != TEnd; ++T)
+ {
+ VarDecl *VD = (*T)->getActingDefinition();
+
+ // If the tentative definition was completed, getActingDefinition() returns
+ // null. If we've already seen this variable before, insert()'s second
+ // return value is false.
+ if (VD == 0 || VD->isInvalidDecl() || !Seen.insert(VD))
+ continue;
+
+ if (const IncompleteArrayType *ArrayT
+ = Context.getAsIncompleteArrayType(VD->getType())) {
+ if (RequireCompleteType(VD->getLocation(),
+ ArrayT->getElementType(),
+ diag::err_tentative_def_incomplete_type_arr)) {
+ VD->setInvalidDecl();
+ continue;
+ }
+
+ // Set the length of the array to 1 (C99 6.9.2p5).
+ Diag(VD->getLocation(), diag::warn_tentative_incomplete_array);
+ llvm::APInt One(Context.getTypeSize(Context.getSizeType()), true);
+ QualType T = Context.getConstantArrayType(ArrayT->getElementType(),
+ One, ArrayType::Normal, 0);
+ VD->setType(T);
+ } else if (RequireCompleteType(VD->getLocation(), VD->getType(),
+ diag::err_tentative_def_incomplete_type))
+ VD->setInvalidDecl();
+
+ // Notify the consumer that we've completed a tentative definition.
+ if (!VD->isInvalidDecl())
+ Consumer.CompleteTentativeDefinition(VD);
+
+ }
+
+ if (LangOpts.CPlusPlus0x &&
+ Diags.getDiagnosticLevel(diag::warn_delegating_ctor_cycle,
+ SourceLocation())
+ != DiagnosticsEngine::Ignored)
+ CheckDelegatingCtorCycles();
+
+ // If there were errors, disable 'unused' warnings since they will mostly be
+ // noise.
+ if (!Diags.hasErrorOccurred()) {
+ // Output warning for unused file scoped decls.
+ for (UnusedFileScopedDeclsType::iterator
+ I = UnusedFileScopedDecls.begin(ExternalSource),
+ E = UnusedFileScopedDecls.end(); I != E; ++I) {
+ if (ShouldRemoveFromUnused(this, *I))
+ continue;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ const FunctionDecl *DiagD;
+ if (!FD->hasBody(DiagD))
+ DiagD = FD;
+ if (DiagD->isDeleted())
+ continue; // Deleted functions are supposed to be unused.
+ if (DiagD->isReferenced()) {
+ if (isa<CXXMethodDecl>(DiagD))
+ Diag(DiagD->getLocation(), diag::warn_unneeded_member_function)
+ << DiagD->getDeclName();
+ else
+ Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
+ << /*function*/0 << DiagD->getDeclName();
+ } else {
+ Diag(DiagD->getLocation(),
+ isa<CXXMethodDecl>(DiagD) ? diag::warn_unused_member_function
+ : diag::warn_unused_function)
+ << DiagD->getDeclName();
+ }
+ } else {
+ const VarDecl *DiagD = cast<VarDecl>(*I)->getDefinition();
+ if (!DiagD)
+ DiagD = cast<VarDecl>(*I);
+ if (DiagD->isReferenced()) {
+ Diag(DiagD->getLocation(), diag::warn_unneeded_internal_decl)
+ << /*variable*/1 << DiagD->getDeclName();
+ } else {
+ Diag(DiagD->getLocation(), diag::warn_unused_variable)
+ << DiagD->getDeclName();
+ }
+ }
+ }
+
+ checkUndefinedInternals(*this);
+ }
+
+ // Check we've noticed that we're no longer parsing the initializer for every
+ // variable. If we miss cases, then at best we have a performance issue and
+ // at worst a rejects-valid bug.
+ assert(ParsingInitForAutoVars.empty() &&
+ "Didn't unmark var as having its initializer parsed");
+
+ TUScope = 0;
+}
+
+
+//===----------------------------------------------------------------------===//
+// Helper functions.
+//===----------------------------------------------------------------------===//
+
+DeclContext *Sema::getFunctionLevelDeclContext() {
+ DeclContext *DC = CurContext;
+
+ while (true) {
+ if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC)) {
+ DC = DC->getParent();
+ } else if (isa<CXXMethodDecl>(DC) &&
+ cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
+ cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
+ DC = DC->getParent()->getParent();
+ }
+ else break;
+ }
+
+ return DC;
+}
+
+/// getCurFunctionDecl - If inside of a function body, this returns a pointer
+/// to the function decl for the function being parsed. If we're currently
+/// in a 'block', this returns the containing context.
+FunctionDecl *Sema::getCurFunctionDecl() {
+ DeclContext *DC = getFunctionLevelDeclContext();
+ return dyn_cast<FunctionDecl>(DC);
+}
+
+ObjCMethodDecl *Sema::getCurMethodDecl() {
+ DeclContext *DC = getFunctionLevelDeclContext();
+ return dyn_cast<ObjCMethodDecl>(DC);
+}
+
+NamedDecl *Sema::getCurFunctionOrMethodDecl() {
+ DeclContext *DC = getFunctionLevelDeclContext();
+ if (isa<ObjCMethodDecl>(DC) || isa<FunctionDecl>(DC))
+ return cast<NamedDecl>(DC);
+ return 0;
+}
+
+void Sema::EmitCurrentDiagnostic(unsigned DiagID) {
+ // FIXME: It doesn't make sense to me that DiagID is an incoming argument here
+ // and yet we also use the current diag ID on the DiagnosticsEngine. This has
+ // been made more painfully obvious by the refactor that introduced this
+ // function, but it is possible that the incoming argument can be
+ // eliminnated. If it truly cannot be (for example, there is some reentrancy
+ // issue I am not seeing yet), then there should at least be a clarifying
+ // comment somewhere.
+ if (llvm::Optional<TemplateDeductionInfo*> Info = isSFINAEContext()) {
+ switch (DiagnosticIDs::getDiagnosticSFINAEResponse(
+ Diags.getCurrentDiagID())) {
+ case DiagnosticIDs::SFINAE_Report:
+ // We'll report the diagnostic below.
+ break;
+
+ case DiagnosticIDs::SFINAE_SubstitutionFailure:
+ // Count this failure so that we know that template argument deduction
+ // has failed.
+ ++NumSFINAEErrors;
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
+ return;
+
+ case DiagnosticIDs::SFINAE_AccessControl: {
+ // Per C++ Core Issue 1170, access control is part of SFINAE.
+ // Additionally, the AccessCheckingSFINAE flag can be used to temporarily
+ // make access control a part of SFINAE for the purposes of checking
+ // type traits.
+ if (!AccessCheckingSFINAE && !getLangOpts().CPlusPlus0x)
+ break;
+
+ SourceLocation Loc = Diags.getCurrentDiagLoc();
+
+ // Suppress this diagnostic.
+ ++NumSFINAEErrors;
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
+
+ // Now the diagnostic state is clear, produce a C++98 compatibility
+ // warning.
+ Diag(Loc, diag::warn_cxx98_compat_sfinae_access_control);
+
+ // The last diagnostic which Sema produced was ignored. Suppress any
+ // notes attached to it.
+ Diags.setLastDiagnosticIgnored();
+ return;
+ }
+
+ case DiagnosticIDs::SFINAE_Suppress:
+ // Make a copy of this suppressed diagnostic and store it with the
+ // template-deduction information;
+ Diagnostic DiagInfo(&Diags);
+
+ if (*Info)
+ (*Info)->addSuppressedDiagnostic(DiagInfo.getLocation(),
+ PartialDiagnostic(DiagInfo,Context.getDiagAllocator()));
+
+ // Suppress this diagnostic.
+ Diags.setLastDiagnosticIgnored();
+ Diags.Clear();
+ return;
+ }
+ }
+
+ // Set up the context's printing policy based on our current state.
+ Context.setPrintingPolicy(getPrintingPolicy());
+
+ // Emit the diagnostic.
+ if (!Diags.EmitCurrentDiagnostic())
+ return;
+
+ // If this is not a note, and we're in a template instantiation
+ // that is different from the last template instantiation where
+ // we emitted an error, print a template instantiation
+ // backtrace.
+ if (!DiagnosticIDs::isBuiltinNote(DiagID) &&
+ !ActiveTemplateInstantiations.empty() &&
+ ActiveTemplateInstantiations.back()
+ != LastTemplateInstantiationErrorContext) {
+ PrintInstantiationStack();
+ LastTemplateInstantiationErrorContext = ActiveTemplateInstantiations.back();
+ }
+}
+
+Sema::SemaDiagnosticBuilder
+Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
+ SemaDiagnosticBuilder Builder(Diag(Loc, PD.getDiagID()));
+ PD.Emit(Builder);
+
+ return Builder;
+}
+
+/// \brief Looks through the macro-expansion chain for the given
+/// location, looking for a macro expansion with the given name.
+/// If one is found, returns true and sets the location to that
+/// expansion loc.
+bool Sema::findMacroSpelling(SourceLocation &locref, StringRef name) {
+ SourceLocation loc = locref;
+ if (!loc.isMacroID()) return false;
+
+ // There's no good way right now to look at the intermediate
+ // expansions, so just jump to the expansion location.
+ loc = getSourceManager().getExpansionLoc(loc);
+
+ // If that's written with the name, stop here.
+ SmallVector<char, 16> buffer;
+ if (getPreprocessor().getSpelling(loc, buffer) == name) {
+ locref = loc;
+ return true;
+ }
+ return false;
+}
+
+/// \brief Determines the active Scope associated with the given declaration
+/// context.
+///
+/// This routine maps a declaration context to the active Scope object that
+/// represents that declaration context in the parser. It is typically used
+/// from "scope-less" code (e.g., template instantiation, lazy creation of
+/// declarations) that injects a name for name-lookup purposes and, therefore,
+/// must update the Scope.
+///
+/// \returns The scope corresponding to the given declaraion context, or NULL
+/// if no such scope is open.
+Scope *Sema::getScopeForContext(DeclContext *Ctx) {
+
+ if (!Ctx)
+ return 0;
+
+ Ctx = Ctx->getPrimaryContext();
+ for (Scope *S = getCurScope(); S; S = S->getParent()) {
+ // Ignore scopes that cannot have declarations. This is important for
+ // out-of-line definitions of static class members.
+ if (S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope))
+ if (DeclContext *Entity = static_cast<DeclContext *> (S->getEntity()))
+ if (Ctx == Entity->getPrimaryContext())
+ return S;
+ }
+
+ return 0;
+}
+
+/// \brief Enter a new function scope
+void Sema::PushFunctionScope() {
+ if (FunctionScopes.size() == 1) {
+ // Use the "top" function scope rather than having to allocate
+ // memory for a new scope.
+ FunctionScopes.back()->Clear();
+ FunctionScopes.push_back(FunctionScopes.back());
+ return;
+ }
+
+ FunctionScopes.push_back(new FunctionScopeInfo(getDiagnostics()));
+}
+
+void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
+ FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics(),
+ BlockScope, Block));
+}
+
+void Sema::PushLambdaScope(CXXRecordDecl *Lambda,
+ CXXMethodDecl *CallOperator) {
+ FunctionScopes.push_back(new LambdaScopeInfo(getDiagnostics(), Lambda,
+ CallOperator));
+}
+
+void Sema::PopFunctionScopeInfo(const AnalysisBasedWarnings::Policy *WP,
+ const Decl *D, const BlockExpr *blkExpr) {
+ FunctionScopeInfo *Scope = FunctionScopes.pop_back_val();
+ assert(!FunctionScopes.empty() && "mismatched push/pop!");
+
+ // Issue any analysis-based warnings.
+ if (WP && D)
+ AnalysisWarnings.IssueWarnings(*WP, Scope, D, blkExpr);
+ else {
+ for (SmallVectorImpl<sema::PossiblyUnreachableDiag>::iterator
+ i = Scope->PossiblyUnreachableDiags.begin(),
+ e = Scope->PossiblyUnreachableDiags.end();
+ i != e; ++i) {
+ const sema::PossiblyUnreachableDiag &D = *i;
+ Diag(D.Loc, D.PD);
+ }
+ }
+
+ if (FunctionScopes.back() != Scope) {
+ delete Scope;
+ }
+}
+
+void Sema::PushCompoundScope() {
+ getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo());
+}
+
+void Sema::PopCompoundScope() {
+ FunctionScopeInfo *CurFunction = getCurFunction();
+ assert(!CurFunction->CompoundScopes.empty() && "mismatched push/pop");
+
+ CurFunction->CompoundScopes.pop_back();
+}
+
+/// \brief Determine whether any errors occurred within this function/method/
+/// block.
+bool Sema::hasAnyUnrecoverableErrorsInThisFunction() const {
+ return getCurFunction()->ErrorTrap.hasUnrecoverableErrorOccurred();
+}
+
+BlockScopeInfo *Sema::getCurBlock() {
+ if (FunctionScopes.empty())
+ return 0;
+
+ return dyn_cast<BlockScopeInfo>(FunctionScopes.back());
+}
+
+LambdaScopeInfo *Sema::getCurLambda() {
+ if (FunctionScopes.empty())
+ return 0;
+
+ return dyn_cast<LambdaScopeInfo>(FunctionScopes.back());
+}
+
+// Pin this vtable to this file.
+ExternalSemaSource::~ExternalSemaSource() {}
+
+void ExternalSemaSource::ReadMethodPool(Selector Sel) { }
+
+void ExternalSemaSource::ReadKnownNamespaces(
+ SmallVectorImpl<NamespaceDecl *> &Namespaces) {
+}
+
+void PrettyDeclStackTraceEntry::print(raw_ostream &OS) const {
+ SourceLocation Loc = this->Loc;
+ if (!Loc.isValid() && TheDecl) Loc = TheDecl->getLocation();
+ if (Loc.isValid()) {
+ Loc.print(OS, S.getSourceManager());
+ OS << ": ";
+ }
+ OS << Message;
+
+ if (TheDecl && isa<NamedDecl>(TheDecl)) {
+ std::string Name = cast<NamedDecl>(TheDecl)->getNameAsString();
+ if (!Name.empty())
+ OS << " '" << Name << '\'';
+ }
+
+ OS << '\n';
+}
+
+/// \brief Figure out if an expression could be turned into a call.
+///
+/// Use this when trying to recover from an error where the programmer may have
+/// written just the name of a function instead of actually calling it.
+///
+/// \param E - The expression to examine.
+/// \param ZeroArgCallReturnTy - If the expression can be turned into a call
+/// with no arguments, this parameter is set to the type returned by such a
+/// call; otherwise, it is set to an empty QualType.
+/// \param OverloadSet - If the expression is an overloaded function
+/// name, this parameter is populated with the decls of the various overloads.
+bool Sema::isExprCallable(const Expr &E, QualType &ZeroArgCallReturnTy,
+ UnresolvedSetImpl &OverloadSet) {
+ ZeroArgCallReturnTy = QualType();
+ OverloadSet.clear();
+
+ if (E.getType() == Context.OverloadTy) {
+ OverloadExpr::FindResult FR = OverloadExpr::find(const_cast<Expr*>(&E));
+ const OverloadExpr *Overloads = FR.Expression;
+
+ for (OverloadExpr::decls_iterator it = Overloads->decls_begin(),
+ DeclsEnd = Overloads->decls_end(); it != DeclsEnd; ++it) {
+ OverloadSet.addDecl(*it);
+
+ // Check whether the function is a non-template which takes no
+ // arguments.
+ if (const FunctionDecl *OverloadDecl
+ = dyn_cast<FunctionDecl>((*it)->getUnderlyingDecl())) {
+ if (OverloadDecl->getMinRequiredArguments() == 0)
+ ZeroArgCallReturnTy = OverloadDecl->getResultType();
+ }
+ }
+
+ // Ignore overloads that are pointer-to-member constants.
+ if (FR.HasFormOfMemberPointer)
+ return false;
+
+ return true;
+ }
+
+ if (const DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E.IgnoreParens())) {
+ if (const FunctionDecl *Fun = dyn_cast<FunctionDecl>(DeclRef->getDecl())) {
+ if (Fun->getMinRequiredArguments() == 0)
+ ZeroArgCallReturnTy = Fun->getResultType();
+ return true;
+ }
+ }
+
+ // We don't have an expression that's convenient to get a FunctionDecl from,
+ // but we can at least check if the type is "function of 0 arguments".
+ QualType ExprTy = E.getType();
+ const FunctionType *FunTy = NULL;
+ QualType PointeeTy = ExprTy->getPointeeType();
+ if (!PointeeTy.isNull())
+ FunTy = PointeeTy->getAs<FunctionType>();
+ if (!FunTy)
+ FunTy = ExprTy->getAs<FunctionType>();
+ if (!FunTy && ExprTy == Context.BoundMemberTy) {
+ // Look for the bound-member type. If it's still overloaded, give up,
+ // although we probably should have fallen into the OverloadExpr case above
+ // if we actually have an overloaded bound member.
+ QualType BoundMemberTy = Expr::findBoundMemberType(&E);
+ if (!BoundMemberTy.isNull())
+ FunTy = BoundMemberTy->castAs<FunctionType>();
+ }
+
+ if (const FunctionProtoType *FPT =
+ dyn_cast_or_null<FunctionProtoType>(FunTy)) {
+ if (FPT->getNumArgs() == 0)
+ ZeroArgCallReturnTy = FunTy->getResultType();
+ return true;
+ }
+ return false;
+}
+
+/// \brief Give notes for a set of overloads.
+///
+/// A companion to isExprCallable. In cases when the name that the programmer
+/// wrote was an overloaded function, we may be able to make some guesses about
+/// plausible overloads based on their return types; such guesses can be handed
+/// off to this method to be emitted as notes.
+///
+/// \param Overloads - The overloads to note.
+/// \param FinalNoteLoc - If we've suppressed printing some overloads due to
+/// -fshow-overloads=best, this is the location to attach to the note about too
+/// many candidates. Typically this will be the location of the original
+/// ill-formed expression.
+static void noteOverloads(Sema &S, const UnresolvedSetImpl &Overloads,
+ const SourceLocation FinalNoteLoc) {
+ int ShownOverloads = 0;
+ int SuppressedOverloads = 0;
+ for (UnresolvedSetImpl::iterator It = Overloads.begin(),
+ DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
+ // FIXME: Magic number for max shown overloads stolen from
+ // OverloadCandidateSet::NoteCandidates.
+ if (ShownOverloads >= 4 &&
+ S.Diags.getShowOverloads() == DiagnosticsEngine::Ovl_Best) {
+ ++SuppressedOverloads;
+ continue;
+ }
+
+ NamedDecl *Fn = (*It)->getUnderlyingDecl();
+ S.Diag(Fn->getLocation(), diag::note_possible_target_of_call);
+ ++ShownOverloads;
+ }
+
+ if (SuppressedOverloads)
+ S.Diag(FinalNoteLoc, diag::note_ovl_too_many_candidates)
+ << SuppressedOverloads;
+}
+
+static void notePlausibleOverloads(Sema &S, SourceLocation Loc,
+ const UnresolvedSetImpl &Overloads,
+ bool (*IsPlausibleResult)(QualType)) {
+ if (!IsPlausibleResult)
+ return noteOverloads(S, Overloads, Loc);
+
+ UnresolvedSet<2> PlausibleOverloads;
+ for (OverloadExpr::decls_iterator It = Overloads.begin(),
+ DeclsEnd = Overloads.end(); It != DeclsEnd; ++It) {
+ const FunctionDecl *OverloadDecl = cast<FunctionDecl>(*It);
+ QualType OverloadResultTy = OverloadDecl->getResultType();
+ if (IsPlausibleResult(OverloadResultTy))
+ PlausibleOverloads.addDecl(It.getDecl());
+ }
+ noteOverloads(S, PlausibleOverloads, Loc);
+}
+
+/// Determine whether the given expression can be called by just
+/// putting parentheses after it. Notably, expressions with unary
+/// operators can't be because the unary operator will start parsing
+/// outside the call.
+static bool IsCallableWithAppend(Expr *E) {
+ E = E->IgnoreImplicit();
+ return (!isa<CStyleCastExpr>(E) &&
+ !isa<UnaryOperator>(E) &&
+ !isa<BinaryOperator>(E) &&
+ !isa<CXXOperatorCallExpr>(E));
+}
+
+bool Sema::tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
+ bool ForceComplain,
+ bool (*IsPlausibleResult)(QualType)) {
+ SourceLocation Loc = E.get()->getExprLoc();
+ SourceRange Range = E.get()->getSourceRange();
+
+ QualType ZeroArgCallTy;
+ UnresolvedSet<4> Overloads;
+ if (isExprCallable(*E.get(), ZeroArgCallTy, Overloads) &&
+ !ZeroArgCallTy.isNull() &&
+ (!IsPlausibleResult || IsPlausibleResult(ZeroArgCallTy))) {
+ // At this point, we know E is potentially callable with 0
+ // arguments and that it returns something of a reasonable type,
+ // so we can emit a fixit and carry on pretending that E was
+ // actually a CallExpr.
+ SourceLocation ParenInsertionLoc =
+ PP.getLocForEndOfToken(Range.getEnd());
+ Diag(Loc, PD)
+ << /*zero-arg*/ 1 << Range
+ << (IsCallableWithAppend(E.get())
+ ? FixItHint::CreateInsertion(ParenInsertionLoc, "()")
+ : FixItHint());
+ notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
+
+ // FIXME: Try this before emitting the fixit, and suppress diagnostics
+ // while doing so.
+ E = ActOnCallExpr(0, E.take(), ParenInsertionLoc,
+ MultiExprArg(*this, 0, 0),
+ ParenInsertionLoc.getLocWithOffset(1));
+ return true;
+ }
+
+ if (!ForceComplain) return false;
+
+ Diag(Loc, PD) << /*not zero-arg*/ 0 << Range;
+ notePlausibleOverloads(*this, Loc, Overloads, IsPlausibleResult);
+ E = ExprError();
+ return true;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
new file mode 100644
index 0000000..dea5e76
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAccess.cpp
@@ -0,0 +1,1843 @@
+//===---- SemaAccess.cpp - C++ Access Control -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ access control semantics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DependentDiagnostic.h"
+#include "clang/AST/ExprCXX.h"
+
+using namespace clang;
+using namespace sema;
+
+/// A copy of Sema's enum without AR_delayed.
+enum AccessResult {
+ AR_accessible,
+ AR_inaccessible,
+ AR_dependent
+};
+
+/// SetMemberAccessSpecifier - Set the access specifier of a member.
+/// Returns true on error (when the previous member decl access specifier
+/// is different from the new member decl access specifier).
+bool Sema::SetMemberAccessSpecifier(NamedDecl *MemberDecl,
+ NamedDecl *PrevMemberDecl,
+ AccessSpecifier LexicalAS) {
+ if (!PrevMemberDecl) {
+ // Use the lexical access specifier.
+ MemberDecl->setAccess(LexicalAS);
+ return false;
+ }
+
+ // C++ [class.access.spec]p3: When a member is redeclared its access
+ // specifier must be same as its initial declaration.
+ if (LexicalAS != AS_none && LexicalAS != PrevMemberDecl->getAccess()) {
+ Diag(MemberDecl->getLocation(),
+ diag::err_class_redeclared_with_different_access)
+ << MemberDecl << LexicalAS;
+ Diag(PrevMemberDecl->getLocation(), diag::note_previous_access_declaration)
+ << PrevMemberDecl << PrevMemberDecl->getAccess();
+
+ MemberDecl->setAccess(LexicalAS);
+ return true;
+ }
+
+ MemberDecl->setAccess(PrevMemberDecl->getAccess());
+ return false;
+}
+
+static CXXRecordDecl *FindDeclaringClass(NamedDecl *D) {
+ DeclContext *DC = D->getDeclContext();
+
+ // This can only happen at top: enum decls only "publish" their
+ // immediate members.
+ if (isa<EnumDecl>(DC))
+ DC = cast<EnumDecl>(DC)->getDeclContext();
+
+ CXXRecordDecl *DeclaringClass = cast<CXXRecordDecl>(DC);
+ while (DeclaringClass->isAnonymousStructOrUnion())
+ DeclaringClass = cast<CXXRecordDecl>(DeclaringClass->getDeclContext());
+ return DeclaringClass;
+}
+
+namespace {
+struct EffectiveContext {
+ EffectiveContext() : Inner(0), Dependent(false) {}
+
+ explicit EffectiveContext(DeclContext *DC)
+ : Inner(DC),
+ Dependent(DC->isDependentContext()) {
+
+ // C++ [class.access.nest]p1:
+ // A nested class is a member and as such has the same access
+ // rights as any other member.
+ // C++ [class.access]p2:
+ // A member of a class can also access all the names to which
+ // the class has access. A local class of a member function
+ // may access the same names that the member function itself
+ // may access.
+ // This almost implies that the privileges of nesting are transitive.
+ // Technically it says nothing about the local classes of non-member
+ // functions (which can gain privileges through friendship), but we
+ // take that as an oversight.
+ while (true) {
+ if (isa<CXXRecordDecl>(DC)) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC)->getCanonicalDecl();
+ Records.push_back(Record);
+ DC = Record->getDeclContext();
+ } else if (isa<FunctionDecl>(DC)) {
+ FunctionDecl *Function = cast<FunctionDecl>(DC)->getCanonicalDecl();
+ Functions.push_back(Function);
+
+ if (Function->getFriendObjectKind())
+ DC = Function->getLexicalDeclContext();
+ else
+ DC = Function->getDeclContext();
+ } else if (DC->isFileContext()) {
+ break;
+ } else {
+ DC = DC->getParent();
+ }
+ }
+ }
+
+ bool isDependent() const { return Dependent; }
+
+ bool includesClass(const CXXRecordDecl *R) const {
+ R = R->getCanonicalDecl();
+ return std::find(Records.begin(), Records.end(), R)
+ != Records.end();
+ }
+
+ /// Retrieves the innermost "useful" context. Can be null if we're
+ /// doing access-control without privileges.
+ DeclContext *getInnerContext() const {
+ return Inner;
+ }
+
+ typedef SmallVectorImpl<CXXRecordDecl*>::const_iterator record_iterator;
+
+ DeclContext *Inner;
+ SmallVector<FunctionDecl*, 4> Functions;
+ SmallVector<CXXRecordDecl*, 4> Records;
+ bool Dependent;
+};
+
+/// Like sema::AccessedEntity, but kindly lets us scribble all over
+/// it.
+struct AccessTarget : public AccessedEntity {
+ AccessTarget(const AccessedEntity &Entity)
+ : AccessedEntity(Entity) {
+ initialize();
+ }
+
+ AccessTarget(ASTContext &Context,
+ MemberNonce _,
+ CXXRecordDecl *NamingClass,
+ DeclAccessPair FoundDecl,
+ QualType BaseObjectType)
+ : AccessedEntity(Context, Member, NamingClass, FoundDecl, BaseObjectType) {
+ initialize();
+ }
+
+ AccessTarget(ASTContext &Context,
+ BaseNonce _,
+ CXXRecordDecl *BaseClass,
+ CXXRecordDecl *DerivedClass,
+ AccessSpecifier Access)
+ : AccessedEntity(Context, Base, BaseClass, DerivedClass, Access) {
+ initialize();
+ }
+
+ bool isInstanceMember() const {
+ return (isMemberAccess() && getTargetDecl()->isCXXInstanceMember());
+ }
+
+ bool hasInstanceContext() const {
+ return HasInstanceContext;
+ }
+
+ class SavedInstanceContext {
+ public:
+ ~SavedInstanceContext() {
+ Target.HasInstanceContext = Has;
+ }
+
+ private:
+ friend struct AccessTarget;
+ explicit SavedInstanceContext(AccessTarget &Target)
+ : Target(Target), Has(Target.HasInstanceContext) {}
+ AccessTarget &Target;
+ bool Has;
+ };
+
+ SavedInstanceContext saveInstanceContext() {
+ return SavedInstanceContext(*this);
+ }
+
+ void suppressInstanceContext() {
+ HasInstanceContext = false;
+ }
+
+ const CXXRecordDecl *resolveInstanceContext(Sema &S) const {
+ assert(HasInstanceContext);
+ if (CalculatedInstanceContext)
+ return InstanceContext;
+
+ CalculatedInstanceContext = true;
+ DeclContext *IC = S.computeDeclContext(getBaseObjectType());
+ InstanceContext = (IC ? cast<CXXRecordDecl>(IC)->getCanonicalDecl() : 0);
+ return InstanceContext;
+ }
+
+ const CXXRecordDecl *getDeclaringClass() const {
+ return DeclaringClass;
+ }
+
+private:
+ void initialize() {
+ HasInstanceContext = (isMemberAccess() &&
+ !getBaseObjectType().isNull() &&
+ getTargetDecl()->isCXXInstanceMember());
+ CalculatedInstanceContext = false;
+ InstanceContext = 0;
+
+ if (isMemberAccess())
+ DeclaringClass = FindDeclaringClass(getTargetDecl());
+ else
+ DeclaringClass = getBaseClass();
+ DeclaringClass = DeclaringClass->getCanonicalDecl();
+ }
+
+ bool HasInstanceContext : 1;
+ mutable bool CalculatedInstanceContext : 1;
+ mutable const CXXRecordDecl *InstanceContext;
+ const CXXRecordDecl *DeclaringClass;
+};
+
+}
+
+/// Checks whether one class might instantiate to the other.
+static bool MightInstantiateTo(const CXXRecordDecl *From,
+ const CXXRecordDecl *To) {
+ // Declaration names are always preserved by instantiation.
+ if (From->getDeclName() != To->getDeclName())
+ return false;
+
+ const DeclContext *FromDC = From->getDeclContext()->getPrimaryContext();
+ const DeclContext *ToDC = To->getDeclContext()->getPrimaryContext();
+ if (FromDC == ToDC) return true;
+ if (FromDC->isFileContext() || ToDC->isFileContext()) return false;
+
+ // Be conservative.
+ return true;
+}
+
+/// Checks whether one class is derived from another, inclusively.
+/// Properly indicates when it couldn't be determined due to
+/// dependence.
+///
+/// This should probably be donated to AST or at least Sema.
+static AccessResult IsDerivedFromInclusive(const CXXRecordDecl *Derived,
+ const CXXRecordDecl *Target) {
+ assert(Derived->getCanonicalDecl() == Derived);
+ assert(Target->getCanonicalDecl() == Target);
+
+ if (Derived == Target) return AR_accessible;
+
+ bool CheckDependent = Derived->isDependentContext();
+ if (CheckDependent && MightInstantiateTo(Derived, Target))
+ return AR_dependent;
+
+ AccessResult OnFailure = AR_inaccessible;
+ SmallVector<const CXXRecordDecl*, 8> Queue; // actually a stack
+
+ while (true) {
+ if (Derived->isDependentContext() && !Derived->hasDefinition())
+ return AR_dependent;
+
+ for (CXXRecordDecl::base_class_const_iterator
+ I = Derived->bases_begin(), E = Derived->bases_end(); I != E; ++I) {
+
+ const CXXRecordDecl *RD;
+
+ QualType T = I->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ RD = cast<CXXRecordDecl>(RT->getDecl());
+ } else if (const InjectedClassNameType *IT
+ = T->getAs<InjectedClassNameType>()) {
+ RD = IT->getDecl();
+ } else {
+ assert(T->isDependentType() && "non-dependent base wasn't a record?");
+ OnFailure = AR_dependent;
+ continue;
+ }
+
+ RD = RD->getCanonicalDecl();
+ if (RD == Target) return AR_accessible;
+ if (CheckDependent && MightInstantiateTo(RD, Target))
+ OnFailure = AR_dependent;
+
+ Queue.push_back(RD);
+ }
+
+ if (Queue.empty()) break;
+
+ Derived = Queue.back();
+ Queue.pop_back();
+ }
+
+ return OnFailure;
+}
+
+
+static bool MightInstantiateTo(Sema &S, DeclContext *Context,
+ DeclContext *Friend) {
+ if (Friend == Context)
+ return true;
+
+ assert(!Friend->isDependentContext() &&
+ "can't handle friends with dependent contexts here");
+
+ if (!Context->isDependentContext())
+ return false;
+
+ if (Friend->isFileContext())
+ return false;
+
+ // TODO: this is very conservative
+ return true;
+}
+
+// Asks whether the type in 'context' can ever instantiate to the type
+// in 'friend'.
+static bool MightInstantiateTo(Sema &S, CanQualType Context, CanQualType Friend) {
+ if (Friend == Context)
+ return true;
+
+ if (!Friend->isDependentType() && !Context->isDependentType())
+ return false;
+
+ // TODO: this is very conservative.
+ return true;
+}
+
+static bool MightInstantiateTo(Sema &S,
+ FunctionDecl *Context,
+ FunctionDecl *Friend) {
+ if (Context->getDeclName() != Friend->getDeclName())
+ return false;
+
+ if (!MightInstantiateTo(S,
+ Context->getDeclContext(),
+ Friend->getDeclContext()))
+ return false;
+
+ CanQual<FunctionProtoType> FriendTy
+ = S.Context.getCanonicalType(Friend->getType())
+ ->getAs<FunctionProtoType>();
+ CanQual<FunctionProtoType> ContextTy
+ = S.Context.getCanonicalType(Context->getType())
+ ->getAs<FunctionProtoType>();
+
+ // There isn't any way that I know of to add qualifiers
+ // during instantiation.
+ if (FriendTy.getQualifiers() != ContextTy.getQualifiers())
+ return false;
+
+ if (FriendTy->getNumArgs() != ContextTy->getNumArgs())
+ return false;
+
+ if (!MightInstantiateTo(S,
+ ContextTy->getResultType(),
+ FriendTy->getResultType()))
+ return false;
+
+ for (unsigned I = 0, E = FriendTy->getNumArgs(); I != E; ++I)
+ if (!MightInstantiateTo(S,
+ ContextTy->getArgType(I),
+ FriendTy->getArgType(I)))
+ return false;
+
+ return true;
+}
+
+static bool MightInstantiateTo(Sema &S,
+ FunctionTemplateDecl *Context,
+ FunctionTemplateDecl *Friend) {
+ return MightInstantiateTo(S,
+ Context->getTemplatedDecl(),
+ Friend->getTemplatedDecl());
+}
+
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ const CXXRecordDecl *Friend) {
+ if (EC.includesClass(Friend))
+ return AR_accessible;
+
+ if (EC.isDependent()) {
+ CanQualType FriendTy
+ = S.Context.getCanonicalType(S.Context.getTypeDeclType(Friend));
+
+ for (EffectiveContext::record_iterator
+ I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
+ CanQualType ContextTy
+ = S.Context.getCanonicalType(S.Context.getTypeDeclType(*I));
+ if (MightInstantiateTo(S, ContextTy, FriendTy))
+ return AR_dependent;
+ }
+ }
+
+ return AR_inaccessible;
+}
+
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ CanQualType Friend) {
+ if (const RecordType *RT = Friend->getAs<RecordType>())
+ return MatchesFriend(S, EC, cast<CXXRecordDecl>(RT->getDecl()));
+
+ // TODO: we can do better than this
+ if (Friend->isDependentType())
+ return AR_dependent;
+
+ return AR_inaccessible;
+}
+
+/// Determines whether the given friend class template matches
+/// anything in the effective context.
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ ClassTemplateDecl *Friend) {
+ AccessResult OnFailure = AR_inaccessible;
+
+ // Check whether the friend is the template of a class in the
+ // context chain.
+ for (SmallVectorImpl<CXXRecordDecl*>::const_iterator
+ I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
+ CXXRecordDecl *Record = *I;
+
+ // Figure out whether the current class has a template:
+ ClassTemplateDecl *CTD;
+
+ // A specialization of the template...
+ if (isa<ClassTemplateSpecializationDecl>(Record)) {
+ CTD = cast<ClassTemplateSpecializationDecl>(Record)
+ ->getSpecializedTemplate();
+
+ // ... or the template pattern itself.
+ } else {
+ CTD = Record->getDescribedClassTemplate();
+ if (!CTD) continue;
+ }
+
+ // It's a match.
+ if (Friend == CTD->getCanonicalDecl())
+ return AR_accessible;
+
+ // If the context isn't dependent, it can't be a dependent match.
+ if (!EC.isDependent())
+ continue;
+
+ // If the template names don't match, it can't be a dependent
+ // match.
+ if (CTD->getDeclName() != Friend->getDeclName())
+ continue;
+
+ // If the class's context can't instantiate to the friend's
+ // context, it can't be a dependent match.
+ if (!MightInstantiateTo(S, CTD->getDeclContext(),
+ Friend->getDeclContext()))
+ continue;
+
+ // Otherwise, it's a dependent match.
+ OnFailure = AR_dependent;
+ }
+
+ return OnFailure;
+}
+
+/// Determines whether the given friend function matches anything in
+/// the effective context.
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ FunctionDecl *Friend) {
+ AccessResult OnFailure = AR_inaccessible;
+
+ for (SmallVectorImpl<FunctionDecl*>::const_iterator
+ I = EC.Functions.begin(), E = EC.Functions.end(); I != E; ++I) {
+ if (Friend == *I)
+ return AR_accessible;
+
+ if (EC.isDependent() && MightInstantiateTo(S, *I, Friend))
+ OnFailure = AR_dependent;
+ }
+
+ return OnFailure;
+}
+
+/// Determines whether the given friend function template matches
+/// anything in the effective context.
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ FunctionTemplateDecl *Friend) {
+ if (EC.Functions.empty()) return AR_inaccessible;
+
+ AccessResult OnFailure = AR_inaccessible;
+
+ for (SmallVectorImpl<FunctionDecl*>::const_iterator
+ I = EC.Functions.begin(), E = EC.Functions.end(); I != E; ++I) {
+
+ FunctionTemplateDecl *FTD = (*I)->getPrimaryTemplate();
+ if (!FTD)
+ FTD = (*I)->getDescribedFunctionTemplate();
+ if (!FTD)
+ continue;
+
+ FTD = FTD->getCanonicalDecl();
+
+ if (Friend == FTD)
+ return AR_accessible;
+
+ if (EC.isDependent() && MightInstantiateTo(S, FTD, Friend))
+ OnFailure = AR_dependent;
+ }
+
+ return OnFailure;
+}
+
+/// Determines whether the given friend declaration matches anything
+/// in the effective context.
+static AccessResult MatchesFriend(Sema &S,
+ const EffectiveContext &EC,
+ FriendDecl *FriendD) {
+ // Whitelist accesses if there's an invalid or unsupported friend
+ // declaration.
+ if (FriendD->isInvalidDecl() || FriendD->isUnsupportedFriend())
+ return AR_accessible;
+
+ if (TypeSourceInfo *T = FriendD->getFriendType())
+ return MatchesFriend(S, EC, T->getType()->getCanonicalTypeUnqualified());
+
+ NamedDecl *Friend
+ = cast<NamedDecl>(FriendD->getFriendDecl()->getCanonicalDecl());
+
+ // FIXME: declarations with dependent or templated scope.
+
+ if (isa<ClassTemplateDecl>(Friend))
+ return MatchesFriend(S, EC, cast<ClassTemplateDecl>(Friend));
+
+ if (isa<FunctionTemplateDecl>(Friend))
+ return MatchesFriend(S, EC, cast<FunctionTemplateDecl>(Friend));
+
+ if (isa<CXXRecordDecl>(Friend))
+ return MatchesFriend(S, EC, cast<CXXRecordDecl>(Friend));
+
+ assert(isa<FunctionDecl>(Friend) && "unknown friend decl kind");
+ return MatchesFriend(S, EC, cast<FunctionDecl>(Friend));
+}
+
+static AccessResult GetFriendKind(Sema &S,
+ const EffectiveContext &EC,
+ const CXXRecordDecl *Class) {
+ AccessResult OnFailure = AR_inaccessible;
+
+ // Okay, check friends.
+ for (CXXRecordDecl::friend_iterator I = Class->friend_begin(),
+ E = Class->friend_end(); I != E; ++I) {
+ FriendDecl *Friend = *I;
+
+ switch (MatchesFriend(S, EC, Friend)) {
+ case AR_accessible:
+ return AR_accessible;
+
+ case AR_inaccessible:
+ continue;
+
+ case AR_dependent:
+ OnFailure = AR_dependent;
+ break;
+ }
+ }
+
+ // That's it, give up.
+ return OnFailure;
+}
+
+namespace {
+
+/// A helper class for checking for a friend which will grant access
+/// to a protected instance member.
+struct ProtectedFriendContext {
+ Sema &S;
+ const EffectiveContext &EC;
+ const CXXRecordDecl *NamingClass;
+ bool CheckDependent;
+ bool EverDependent;
+
+ /// The path down to the current base class.
+ SmallVector<const CXXRecordDecl*, 20> CurPath;
+
+ ProtectedFriendContext(Sema &S, const EffectiveContext &EC,
+ const CXXRecordDecl *InstanceContext,
+ const CXXRecordDecl *NamingClass)
+ : S(S), EC(EC), NamingClass(NamingClass),
+ CheckDependent(InstanceContext->isDependentContext() ||
+ NamingClass->isDependentContext()),
+ EverDependent(false) {}
+
+ /// Check classes in the current path for friendship, starting at
+ /// the given index.
+ bool checkFriendshipAlongPath(unsigned I) {
+ assert(I < CurPath.size());
+ for (unsigned E = CurPath.size(); I != E; ++I) {
+ switch (GetFriendKind(S, EC, CurPath[I])) {
+ case AR_accessible: return true;
+ case AR_inaccessible: continue;
+ case AR_dependent: EverDependent = true; continue;
+ }
+ }
+ return false;
+ }
+
+ /// Perform a search starting at the given class.
+ ///
+ /// PrivateDepth is the index of the last (least derived) class
+ /// along the current path such that a notional public member of
+ /// the final class in the path would have access in that class.
+ bool findFriendship(const CXXRecordDecl *Cur, unsigned PrivateDepth) {
+ // If we ever reach the naming class, check the current path for
+ // friendship. We can also stop recursing because we obviously
+ // won't find the naming class there again.
+ if (Cur == NamingClass)
+ return checkFriendshipAlongPath(PrivateDepth);
+
+ if (CheckDependent && MightInstantiateTo(Cur, NamingClass))
+ EverDependent = true;
+
+ // Recurse into the base classes.
+ for (CXXRecordDecl::base_class_const_iterator
+ I = Cur->bases_begin(), E = Cur->bases_end(); I != E; ++I) {
+
+ // If this is private inheritance, then a public member of the
+ // base will not have any access in classes derived from Cur.
+ unsigned BasePrivateDepth = PrivateDepth;
+ if (I->getAccessSpecifier() == AS_private)
+ BasePrivateDepth = CurPath.size() - 1;
+
+ const CXXRecordDecl *RD;
+
+ QualType T = I->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ RD = cast<CXXRecordDecl>(RT->getDecl());
+ } else if (const InjectedClassNameType *IT
+ = T->getAs<InjectedClassNameType>()) {
+ RD = IT->getDecl();
+ } else {
+ assert(T->isDependentType() && "non-dependent base wasn't a record?");
+ EverDependent = true;
+ continue;
+ }
+
+ // Recurse. We don't need to clean up if this returns true.
+ CurPath.push_back(RD);
+ if (findFriendship(RD->getCanonicalDecl(), BasePrivateDepth))
+ return true;
+ CurPath.pop_back();
+ }
+
+ return false;
+ }
+
+ bool findFriendship(const CXXRecordDecl *Cur) {
+ assert(CurPath.empty());
+ CurPath.push_back(Cur);
+ return findFriendship(Cur, 0);
+ }
+};
+}
+
+/// Search for a class P that EC is a friend of, under the constraint
+/// InstanceContext <= P
+/// if InstanceContext exists, or else
+/// NamingClass <= P
+/// and with the additional restriction that a protected member of
+/// NamingClass would have some natural access in P, which implicitly
+/// imposes the constraint that P <= NamingClass.
+///
+/// This isn't quite the condition laid out in the standard.
+/// Instead of saying that a notional protected member of NamingClass
+/// would have to have some natural access in P, it says the actual
+/// target has to have some natural access in P, which opens up the
+/// possibility that the target (which is not necessarily a member
+/// of NamingClass) might be more accessible along some path not
+/// passing through it. That's really a bad idea, though, because it
+/// introduces two problems:
+/// - Most importantly, it breaks encapsulation because you can
+/// access a forbidden base class's members by directly subclassing
+/// it elsewhere.
+/// - It also makes access substantially harder to compute because it
+/// breaks the hill-climbing algorithm: knowing that the target is
+/// accessible in some base class would no longer let you change
+/// the question solely to whether the base class is accessible,
+/// because the original target might have been more accessible
+/// because of crazy subclassing.
+/// So we don't implement that.
+static AccessResult GetProtectedFriendKind(Sema &S, const EffectiveContext &EC,
+ const CXXRecordDecl *InstanceContext,
+ const CXXRecordDecl *NamingClass) {
+ assert(InstanceContext == 0 ||
+ InstanceContext->getCanonicalDecl() == InstanceContext);
+ assert(NamingClass->getCanonicalDecl() == NamingClass);
+
+ // If we don't have an instance context, our constraints give us
+ // that NamingClass <= P <= NamingClass, i.e. P == NamingClass.
+ // This is just the usual friendship check.
+ if (!InstanceContext) return GetFriendKind(S, EC, NamingClass);
+
+ ProtectedFriendContext PRC(S, EC, InstanceContext, NamingClass);
+ if (PRC.findFriendship(InstanceContext)) return AR_accessible;
+ if (PRC.EverDependent) return AR_dependent;
+ return AR_inaccessible;
+}
+
+static AccessResult HasAccess(Sema &S,
+ const EffectiveContext &EC,
+ const CXXRecordDecl *NamingClass,
+ AccessSpecifier Access,
+ const AccessTarget &Target) {
+ assert(NamingClass->getCanonicalDecl() == NamingClass &&
+ "declaration should be canonicalized before being passed here");
+
+ if (Access == AS_public) return AR_accessible;
+ assert(Access == AS_private || Access == AS_protected);
+
+ AccessResult OnFailure = AR_inaccessible;
+
+ for (EffectiveContext::record_iterator
+ I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
+ // All the declarations in EC have been canonicalized, so pointer
+ // equality from this point on will work fine.
+ const CXXRecordDecl *ECRecord = *I;
+
+ // [B2] and [M2]
+ if (Access == AS_private) {
+ if (ECRecord == NamingClass)
+ return AR_accessible;
+
+ if (EC.isDependent() && MightInstantiateTo(ECRecord, NamingClass))
+ OnFailure = AR_dependent;
+
+ // [B3] and [M3]
+ } else {
+ assert(Access == AS_protected);
+ switch (IsDerivedFromInclusive(ECRecord, NamingClass)) {
+ case AR_accessible: break;
+ case AR_inaccessible: continue;
+ case AR_dependent: OnFailure = AR_dependent; continue;
+ }
+
+ // C++ [class.protected]p1:
+ // An additional access check beyond those described earlier in
+ // [class.access] is applied when a non-static data member or
+ // non-static member function is a protected member of its naming
+ // class. As described earlier, access to a protected member is
+ // granted because the reference occurs in a friend or member of
+ // some class C. If the access is to form a pointer to member,
+ // the nested-name-specifier shall name C or a class derived from
+ // C. All other accesses involve a (possibly implicit) object
+ // expression. In this case, the class of the object expression
+ // shall be C or a class derived from C.
+ //
+ // We interpret this as a restriction on [M3].
+
+ // In this part of the code, 'C' is just our context class ECRecord.
+
+ // These rules are different if we don't have an instance context.
+ if (!Target.hasInstanceContext()) {
+ // If it's not an instance member, these restrictions don't apply.
+ if (!Target.isInstanceMember()) return AR_accessible;
+
+ // If it's an instance member, use the pointer-to-member rule
+ // that the naming class has to be derived from the effective
+ // context.
+
+ // Despite the standard's confident wording, there is a case
+ // where you can have an instance member that's neither in a
+ // pointer-to-member expression nor in a member access: when
+ // it names a field in an unevaluated context that can't be an
+ // implicit member. Pending clarification, we just apply the
+ // same naming-class restriction here.
+ // FIXME: we're probably not correctly adding the
+ // protected-member restriction when we retroactively convert
+ // an expression to being evaluated.
+
+ // We know that ECRecord derives from NamingClass. The
+ // restriction says to check whether NamingClass derives from
+ // ECRecord, but that's not really necessary: two distinct
+ // classes can't be recursively derived from each other. So
+ // along this path, we just need to check whether the classes
+ // are equal.
+ if (NamingClass == ECRecord) return AR_accessible;
+
+ // Otherwise, this context class tells us nothing; on to the next.
+ continue;
+ }
+
+ assert(Target.isInstanceMember());
+
+ const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S);
+ if (!InstanceContext) {
+ OnFailure = AR_dependent;
+ continue;
+ }
+
+ switch (IsDerivedFromInclusive(InstanceContext, ECRecord)) {
+ case AR_accessible: return AR_accessible;
+ case AR_inaccessible: continue;
+ case AR_dependent: OnFailure = AR_dependent; continue;
+ }
+ }
+ }
+
+ // [M3] and [B3] say that, if the target is protected in N, we grant
+ // access if the access occurs in a friend or member of some class P
+ // that's a subclass of N and where the target has some natural
+ // access in P. The 'member' aspect is easy to handle because P
+ // would necessarily be one of the effective-context records, and we
+ // address that above. The 'friend' aspect is completely ridiculous
+ // to implement because there are no restrictions at all on P
+ // *unless* the [class.protected] restriction applies. If it does,
+ // however, we should ignore whether the naming class is a friend,
+ // and instead rely on whether any potential P is a friend.
+ if (Access == AS_protected && Target.isInstanceMember()) {
+ // Compute the instance context if possible.
+ const CXXRecordDecl *InstanceContext = 0;
+ if (Target.hasInstanceContext()) {
+ InstanceContext = Target.resolveInstanceContext(S);
+ if (!InstanceContext) return AR_dependent;
+ }
+
+ switch (GetProtectedFriendKind(S, EC, InstanceContext, NamingClass)) {
+ case AR_accessible: return AR_accessible;
+ case AR_inaccessible: return OnFailure;
+ case AR_dependent: return AR_dependent;
+ }
+ llvm_unreachable("impossible friendship kind");
+ }
+
+ switch (GetFriendKind(S, EC, NamingClass)) {
+ case AR_accessible: return AR_accessible;
+ case AR_inaccessible: return OnFailure;
+ case AR_dependent: return AR_dependent;
+ }
+
+ // Silence bogus warnings
+ llvm_unreachable("impossible friendship kind");
+}
+
+/// Finds the best path from the naming class to the declaring class,
+/// taking friend declarations into account.
+///
+/// C++0x [class.access.base]p5:
+/// A member m is accessible at the point R when named in class N if
+/// [M1] m as a member of N is public, or
+/// [M2] m as a member of N is private, and R occurs in a member or
+/// friend of class N, or
+/// [M3] m as a member of N is protected, and R occurs in a member or
+/// friend of class N, or in a member or friend of a class P
+/// derived from N, where m as a member of P is public, private,
+/// or protected, or
+/// [M4] there exists a base class B of N that is accessible at R, and
+/// m is accessible at R when named in class B.
+///
+/// C++0x [class.access.base]p4:
+/// A base class B of N is accessible at R, if
+/// [B1] an invented public member of B would be a public member of N, or
+/// [B2] R occurs in a member or friend of class N, and an invented public
+/// member of B would be a private or protected member of N, or
+/// [B3] R occurs in a member or friend of a class P derived from N, and an
+/// invented public member of B would be a private or protected member
+/// of P, or
+/// [B4] there exists a class S such that B is a base class of S accessible
+/// at R and S is a base class of N accessible at R.
+///
+/// Along a single inheritance path we can restate both of these
+/// iteratively:
+///
+/// First, we note that M1-4 are equivalent to B1-4 if the member is
+/// treated as a notional base of its declaring class with inheritance
+/// access equivalent to the member's access. Therefore we need only
+/// ask whether a class B is accessible from a class N in context R.
+///
+/// Let B_1 .. B_n be the inheritance path in question (i.e. where
+/// B_1 = N, B_n = B, and for all i, B_{i+1} is a direct base class of
+/// B_i). For i in 1..n, we will calculate ACAB(i), the access to the
+/// closest accessible base in the path:
+/// Access(a, b) = (* access on the base specifier from a to b *)
+/// Merge(a, forbidden) = forbidden
+/// Merge(a, private) = forbidden
+/// Merge(a, b) = min(a,b)
+/// Accessible(c, forbidden) = false
+/// Accessible(c, private) = (R is c) || IsFriend(c, R)
+/// Accessible(c, protected) = (R derived from c) || IsFriend(c, R)
+/// Accessible(c, public) = true
+/// ACAB(n) = public
+/// ACAB(i) =
+/// let AccessToBase = Merge(Access(B_i, B_{i+1}), ACAB(i+1)) in
+/// if Accessible(B_i, AccessToBase) then public else AccessToBase
+///
+/// B is an accessible base of N at R iff ACAB(1) = public.
+///
+/// \param FinalAccess the access of the "final step", or AS_public if
+/// there is no final step.
+/// \return null if friendship is dependent
+static CXXBasePath *FindBestPath(Sema &S,
+ const EffectiveContext &EC,
+ AccessTarget &Target,
+ AccessSpecifier FinalAccess,
+ CXXBasePaths &Paths) {
+ // Derive the paths to the desired base.
+ const CXXRecordDecl *Derived = Target.getNamingClass();
+ const CXXRecordDecl *Base = Target.getDeclaringClass();
+
+ // FIXME: fail correctly when there are dependent paths.
+ bool isDerived = Derived->isDerivedFrom(const_cast<CXXRecordDecl*>(Base),
+ Paths);
+ assert(isDerived && "derived class not actually derived from base");
+ (void) isDerived;
+
+ CXXBasePath *BestPath = 0;
+
+ assert(FinalAccess != AS_none && "forbidden access after declaring class");
+
+ bool AnyDependent = false;
+
+ // Derive the friend-modified access along each path.
+ for (CXXBasePaths::paths_iterator PI = Paths.begin(), PE = Paths.end();
+ PI != PE; ++PI) {
+ AccessTarget::SavedInstanceContext _ = Target.saveInstanceContext();
+
+ // Walk through the path backwards.
+ AccessSpecifier PathAccess = FinalAccess;
+ CXXBasePath::iterator I = PI->end(), E = PI->begin();
+ while (I != E) {
+ --I;
+
+ assert(PathAccess != AS_none);
+
+ // If the declaration is a private member of a base class, there
+ // is no level of friendship in derived classes that can make it
+ // accessible.
+ if (PathAccess == AS_private) {
+ PathAccess = AS_none;
+ break;
+ }
+
+ const CXXRecordDecl *NC = I->Class->getCanonicalDecl();
+
+ AccessSpecifier BaseAccess = I->Base->getAccessSpecifier();
+ PathAccess = std::max(PathAccess, BaseAccess);
+
+ switch (HasAccess(S, EC, NC, PathAccess, Target)) {
+ case AR_inaccessible: break;
+ case AR_accessible:
+ PathAccess = AS_public;
+
+ // Future tests are not against members and so do not have
+ // instance context.
+ Target.suppressInstanceContext();
+ break;
+ case AR_dependent:
+ AnyDependent = true;
+ goto Next;
+ }
+ }
+
+ // Note that we modify the path's Access field to the
+ // friend-modified access.
+ if (BestPath == 0 || PathAccess < BestPath->Access) {
+ BestPath = &*PI;
+ BestPath->Access = PathAccess;
+
+ // Short-circuit if we found a public path.
+ if (BestPath->Access == AS_public)
+ return BestPath;
+ }
+
+ Next: ;
+ }
+
+ assert((!BestPath || BestPath->Access != AS_public) &&
+ "fell out of loop with public path");
+
+ // We didn't find a public path, but at least one path was subject
+ // to dependent friendship, so delay the check.
+ if (AnyDependent)
+ return 0;
+
+ return BestPath;
+}
+
+/// Given that an entity has protected natural access, check whether
+/// access might be denied because of the protected member access
+/// restriction.
+///
+/// \return true if a note was emitted
+static bool TryDiagnoseProtectedAccess(Sema &S, const EffectiveContext &EC,
+ AccessTarget &Target) {
+ // Only applies to instance accesses.
+ if (!Target.isInstanceMember())
+ return false;
+
+ assert(Target.isMemberAccess());
+
+ const CXXRecordDecl *NamingClass = Target.getNamingClass();
+ NamingClass = NamingClass->getCanonicalDecl();
+
+ for (EffectiveContext::record_iterator
+ I = EC.Records.begin(), E = EC.Records.end(); I != E; ++I) {
+ const CXXRecordDecl *ECRecord = *I;
+ switch (IsDerivedFromInclusive(ECRecord, NamingClass)) {
+ case AR_accessible: break;
+ case AR_inaccessible: continue;
+ case AR_dependent: continue;
+ }
+
+ // The effective context is a subclass of the declaring class.
+ // Check whether the [class.protected] restriction is limiting
+ // access.
+
+ // To get this exactly right, this might need to be checked more
+ // holistically; it's not necessarily the case that gaining
+ // access here would grant us access overall.
+
+ NamedDecl *D = Target.getTargetDecl();
+
+ // If we don't have an instance context, [class.protected] says the
+ // naming class has to equal the context class.
+ if (!Target.hasInstanceContext()) {
+ // If it does, the restriction doesn't apply.
+ if (NamingClass == ECRecord) continue;
+
+ // TODO: it would be great to have a fixit here, since this is
+ // such an obvious error.
+ S.Diag(D->getLocation(), diag::note_access_protected_restricted_noobject)
+ << S.Context.getTypeDeclType(ECRecord);
+ return true;
+ }
+
+ const CXXRecordDecl *InstanceContext = Target.resolveInstanceContext(S);
+ assert(InstanceContext && "diagnosing dependent access");
+
+ switch (IsDerivedFromInclusive(InstanceContext, ECRecord)) {
+ case AR_accessible: continue;
+ case AR_dependent: continue;
+ case AR_inaccessible:
+ break;
+ }
+
+ // Okay, the restriction seems to be what's limiting us.
+
+ // Use a special diagnostic for constructors and destructors.
+ if (isa<CXXConstructorDecl>(D) || isa<CXXDestructorDecl>(D) ||
+ (isa<FunctionTemplateDecl>(D) &&
+ isa<CXXConstructorDecl>(
+ cast<FunctionTemplateDecl>(D)->getTemplatedDecl()))) {
+ S.Diag(D->getLocation(), diag::note_access_protected_restricted_ctordtor)
+ << isa<CXXDestructorDecl>(D);
+ return true;
+ }
+
+ // Otherwise, use the generic diagnostic.
+ S.Diag(D->getLocation(), diag::note_access_protected_restricted_object)
+ << S.Context.getTypeDeclType(ECRecord);
+ return true;
+ }
+
+ return false;
+}
+
+/// Diagnose the path which caused the given declaration or base class
+/// to become inaccessible.
+static void DiagnoseAccessPath(Sema &S,
+ const EffectiveContext &EC,
+ AccessTarget &Entity) {
+ AccessSpecifier Access = Entity.getAccess();
+
+ NamedDecl *D = (Entity.isMemberAccess() ? Entity.getTargetDecl() : 0);
+ const CXXRecordDecl *DeclaringClass = Entity.getDeclaringClass();
+
+ // Easy case: the decl's natural access determined its path access.
+ // We have to check against AS_private here in case Access is AS_none,
+ // indicating a non-public member of a private base class.
+ if (D && (Access == D->getAccess() || D->getAccess() == AS_private)) {
+ switch (HasAccess(S, EC, DeclaringClass, D->getAccess(), Entity)) {
+ case AR_inaccessible: {
+ if (Access == AS_protected &&
+ TryDiagnoseProtectedAccess(S, EC, Entity))
+ return;
+
+ // Find an original declaration.
+ while (D->isOutOfLine()) {
+ NamedDecl *PrevDecl = 0;
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ PrevDecl = VD->getPreviousDecl();
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ PrevDecl = FD->getPreviousDecl();
+ else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(D))
+ PrevDecl = TND->getPreviousDecl();
+ else if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ if (isa<RecordDecl>(D) && cast<RecordDecl>(D)->isInjectedClassName())
+ break;
+ PrevDecl = TD->getPreviousDecl();
+ }
+ if (!PrevDecl) break;
+ D = PrevDecl;
+ }
+
+ CXXRecordDecl *DeclaringClass = FindDeclaringClass(D);
+ Decl *ImmediateChild;
+ if (D->getDeclContext() == DeclaringClass)
+ ImmediateChild = D;
+ else {
+ DeclContext *DC = D->getDeclContext();
+ while (DC->getParent() != DeclaringClass)
+ DC = DC->getParent();
+ ImmediateChild = cast<Decl>(DC);
+ }
+
+ // Check whether there's an AccessSpecDecl preceding this in the
+ // chain of the DeclContext.
+ bool Implicit = true;
+ for (CXXRecordDecl::decl_iterator
+ I = DeclaringClass->decls_begin(), E = DeclaringClass->decls_end();
+ I != E; ++I) {
+ if (*I == ImmediateChild) break;
+ if (isa<AccessSpecDecl>(*I)) {
+ Implicit = false;
+ break;
+ }
+ }
+
+ S.Diag(D->getLocation(), diag::note_access_natural)
+ << (unsigned) (Access == AS_protected)
+ << Implicit;
+ return;
+ }
+
+ case AR_accessible: break;
+
+ case AR_dependent:
+ llvm_unreachable("can't diagnose dependent access failures");
+ }
+ }
+
+ CXXBasePaths Paths;
+ CXXBasePath &Path = *FindBestPath(S, EC, Entity, AS_public, Paths);
+
+ CXXBasePath::iterator I = Path.end(), E = Path.begin();
+ while (I != E) {
+ --I;
+
+ const CXXBaseSpecifier *BS = I->Base;
+ AccessSpecifier BaseAccess = BS->getAccessSpecifier();
+
+ // If this is public inheritance, or the derived class is a friend,
+ // skip this step.
+ if (BaseAccess == AS_public)
+ continue;
+
+ switch (GetFriendKind(S, EC, I->Class)) {
+ case AR_accessible: continue;
+ case AR_inaccessible: break;
+ case AR_dependent:
+ llvm_unreachable("can't diagnose dependent access failures");
+ }
+
+ // Check whether this base specifier is the tighest point
+ // constraining access. We have to check against AS_private for
+ // the same reasons as above.
+ if (BaseAccess == AS_private || BaseAccess >= Access) {
+
+ // We're constrained by inheritance, but we want to say
+ // "declared private here" if we're diagnosing a hierarchy
+ // conversion and this is the final step.
+ unsigned diagnostic;
+ if (D) diagnostic = diag::note_access_constrained_by_path;
+ else if (I + 1 == Path.end()) diagnostic = diag::note_access_natural;
+ else diagnostic = diag::note_access_constrained_by_path;
+
+ S.Diag(BS->getSourceRange().getBegin(), diagnostic)
+ << BS->getSourceRange()
+ << (BaseAccess == AS_protected)
+ << (BS->getAccessSpecifierAsWritten() == AS_none);
+
+ if (D)
+ S.Diag(D->getLocation(), diag::note_field_decl);
+
+ return;
+ }
+ }
+
+ llvm_unreachable("access not apparently constrained by path");
+}
+
+static void DiagnoseBadAccess(Sema &S, SourceLocation Loc,
+ const EffectiveContext &EC,
+ AccessTarget &Entity) {
+ const CXXRecordDecl *NamingClass = Entity.getNamingClass();
+ const CXXRecordDecl *DeclaringClass = Entity.getDeclaringClass();
+ NamedDecl *D = (Entity.isMemberAccess() ? Entity.getTargetDecl() : 0);
+
+ S.Diag(Loc, Entity.getDiag())
+ << (Entity.getAccess() == AS_protected)
+ << (D ? D->getDeclName() : DeclarationName())
+ << S.Context.getTypeDeclType(NamingClass)
+ << S.Context.getTypeDeclType(DeclaringClass);
+ DiagnoseAccessPath(S, EC, Entity);
+}
+
+/// MSVC has a bug where if during an using declaration name lookup,
+/// the declaration found is unaccessible (private) and that declaration
+/// was bring into scope via another using declaration whose target
+/// declaration is accessible (public) then no error is generated.
+/// Example:
+/// class A {
+/// public:
+/// int f();
+/// };
+/// class B : public A {
+/// private:
+/// using A::f;
+/// };
+/// class C : public B {
+/// private:
+/// using B::f;
+/// };
+///
+/// Here, B::f is private so this should fail in Standard C++, but
+/// because B::f refers to A::f which is public MSVC accepts it.
+static bool IsMicrosoftUsingDeclarationAccessBug(Sema& S,
+ SourceLocation AccessLoc,
+ AccessTarget &Entity) {
+ if (UsingShadowDecl *Shadow =
+ dyn_cast<UsingShadowDecl>(Entity.getTargetDecl())) {
+ const NamedDecl *OrigDecl = Entity.getTargetDecl()->getUnderlyingDecl();
+ if (Entity.getTargetDecl()->getAccess() == AS_private &&
+ (OrigDecl->getAccess() == AS_public ||
+ OrigDecl->getAccess() == AS_protected)) {
+ S.Diag(AccessLoc, diag::ext_ms_using_declaration_inaccessible)
+ << Shadow->getUsingDecl()->getQualifiedNameAsString()
+ << OrigDecl->getQualifiedNameAsString();
+ return true;
+ }
+ }
+ return false;
+}
+
+/// Determines whether the accessed entity is accessible. Public members
+/// have been weeded out by this point.
+static AccessResult IsAccessible(Sema &S,
+ const EffectiveContext &EC,
+ AccessTarget &Entity) {
+ // Determine the actual naming class.
+ CXXRecordDecl *NamingClass = Entity.getNamingClass();
+ while (NamingClass->isAnonymousStructOrUnion())
+ NamingClass = cast<CXXRecordDecl>(NamingClass->getParent());
+ NamingClass = NamingClass->getCanonicalDecl();
+
+ AccessSpecifier UnprivilegedAccess = Entity.getAccess();
+ assert(UnprivilegedAccess != AS_public && "public access not weeded out");
+
+ // Before we try to recalculate access paths, try to white-list
+ // accesses which just trade in on the final step, i.e. accesses
+ // which don't require [M4] or [B4]. These are by far the most
+ // common forms of privileged access.
+ if (UnprivilegedAccess != AS_none) {
+ switch (HasAccess(S, EC, NamingClass, UnprivilegedAccess, Entity)) {
+ case AR_dependent:
+ // This is actually an interesting policy decision. We don't
+ // *have* to delay immediately here: we can do the full access
+ // calculation in the hope that friendship on some intermediate
+ // class will make the declaration accessible non-dependently.
+ // But that's not cheap, and odds are very good (note: assertion
+ // made without data) that the friend declaration will determine
+ // access.
+ return AR_dependent;
+
+ case AR_accessible: return AR_accessible;
+ case AR_inaccessible: break;
+ }
+ }
+
+ AccessTarget::SavedInstanceContext _ = Entity.saveInstanceContext();
+
+ // We lower member accesses to base accesses by pretending that the
+ // member is a base class of its declaring class.
+ AccessSpecifier FinalAccess;
+
+ if (Entity.isMemberAccess()) {
+ // Determine if the declaration is accessible from EC when named
+ // in its declaring class.
+ NamedDecl *Target = Entity.getTargetDecl();
+ const CXXRecordDecl *DeclaringClass = Entity.getDeclaringClass();
+
+ FinalAccess = Target->getAccess();
+ switch (HasAccess(S, EC, DeclaringClass, FinalAccess, Entity)) {
+ case AR_accessible:
+ FinalAccess = AS_public;
+ break;
+ case AR_inaccessible: break;
+ case AR_dependent: return AR_dependent; // see above
+ }
+
+ if (DeclaringClass == NamingClass)
+ return (FinalAccess == AS_public ? AR_accessible : AR_inaccessible);
+
+ Entity.suppressInstanceContext();
+ } else {
+ FinalAccess = AS_public;
+ }
+
+ assert(Entity.getDeclaringClass() != NamingClass);
+
+ // Append the declaration's access if applicable.
+ CXXBasePaths Paths;
+ CXXBasePath *Path = FindBestPath(S, EC, Entity, FinalAccess, Paths);
+ if (!Path)
+ return AR_dependent;
+
+ assert(Path->Access <= UnprivilegedAccess &&
+ "access along best path worse than direct?");
+ if (Path->Access == AS_public)
+ return AR_accessible;
+ return AR_inaccessible;
+}
+
+static void DelayDependentAccess(Sema &S,
+ const EffectiveContext &EC,
+ SourceLocation Loc,
+ const AccessTarget &Entity) {
+ assert(EC.isDependent() && "delaying non-dependent access");
+ DeclContext *DC = EC.getInnerContext();
+ assert(DC->isDependentContext() && "delaying non-dependent access");
+ DependentDiagnostic::Create(S.Context, DC, DependentDiagnostic::Access,
+ Loc,
+ Entity.isMemberAccess(),
+ Entity.getAccess(),
+ Entity.getTargetDecl(),
+ Entity.getNamingClass(),
+ Entity.getBaseObjectType(),
+ Entity.getDiag());
+}
+
+/// Checks access to an entity from the given effective context.
+static AccessResult CheckEffectiveAccess(Sema &S,
+ const EffectiveContext &EC,
+ SourceLocation Loc,
+ AccessTarget &Entity) {
+ assert(Entity.getAccess() != AS_public && "called for public access!");
+
+ if (S.getLangOpts().MicrosoftMode &&
+ IsMicrosoftUsingDeclarationAccessBug(S, Loc, Entity))
+ return AR_accessible;
+
+ switch (IsAccessible(S, EC, Entity)) {
+ case AR_dependent:
+ DelayDependentAccess(S, EC, Loc, Entity);
+ return AR_dependent;
+
+ case AR_inaccessible:
+ if (!Entity.isQuiet())
+ DiagnoseBadAccess(S, Loc, EC, Entity);
+ return AR_inaccessible;
+
+ case AR_accessible:
+ return AR_accessible;
+ }
+
+ // silence unnecessary warning
+ llvm_unreachable("invalid access result");
+}
+
+static Sema::AccessResult CheckAccess(Sema &S, SourceLocation Loc,
+ AccessTarget &Entity) {
+ // If the access path is public, it's accessible everywhere.
+ if (Entity.getAccess() == AS_public)
+ return Sema::AR_accessible;
+
+ if (S.SuppressAccessChecking)
+ return Sema::AR_accessible;
+
+ // If we're currently parsing a declaration, we may need to delay
+ // access control checking, because our effective context might be
+ // different based on what the declaration comes out as.
+ //
+ // For example, we might be parsing a declaration with a scope
+ // specifier, like this:
+ // A::private_type A::foo() { ... }
+ //
+ // Or we might be parsing something that will turn out to be a friend:
+ // void foo(A::private_type);
+ // void B::foo(A::private_type);
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(DelayedDiagnostic::makeAccess(Loc, Entity));
+ return Sema::AR_delayed;
+ }
+
+ EffectiveContext EC(S.CurContext);
+ switch (CheckEffectiveAccess(S, EC, Loc, Entity)) {
+ case AR_accessible: return Sema::AR_accessible;
+ case AR_inaccessible: return Sema::AR_inaccessible;
+ case AR_dependent: return Sema::AR_dependent;
+ }
+ llvm_unreachable("falling off end");
+}
+
+void Sema::HandleDelayedAccessCheck(DelayedDiagnostic &DD, Decl *decl) {
+ // Access control for names used in the declarations of functions
+ // and function templates should normally be evaluated in the context
+ // of the declaration, just in case it's a friend of something.
+ // However, this does not apply to local extern declarations.
+
+ DeclContext *DC = decl->getDeclContext();
+ if (FunctionDecl *fn = dyn_cast<FunctionDecl>(decl)) {
+ if (!DC->isFunctionOrMethod()) DC = fn;
+ } else if (FunctionTemplateDecl *fnt = dyn_cast<FunctionTemplateDecl>(decl)) {
+ // Never a local declaration.
+ DC = fnt->getTemplatedDecl();
+ }
+
+ EffectiveContext EC(DC);
+
+ AccessTarget Target(DD.getAccessData());
+
+ if (CheckEffectiveAccess(*this, EC, DD.Loc, Target) == ::AR_inaccessible)
+ DD.Triggered = true;
+}
+
+void Sema::HandleDependentAccessCheck(const DependentDiagnostic &DD,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ SourceLocation Loc = DD.getAccessLoc();
+ AccessSpecifier Access = DD.getAccess();
+
+ Decl *NamingD = FindInstantiatedDecl(Loc, DD.getAccessNamingClass(),
+ TemplateArgs);
+ if (!NamingD) return;
+ Decl *TargetD = FindInstantiatedDecl(Loc, DD.getAccessTarget(),
+ TemplateArgs);
+ if (!TargetD) return;
+
+ if (DD.isAccessToMember()) {
+ CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(NamingD);
+ NamedDecl *TargetDecl = cast<NamedDecl>(TargetD);
+ QualType BaseObjectType = DD.getAccessBaseObjectType();
+ if (!BaseObjectType.isNull()) {
+ BaseObjectType = SubstType(BaseObjectType, TemplateArgs, Loc,
+ DeclarationName());
+ if (BaseObjectType.isNull()) return;
+ }
+
+ AccessTarget Entity(Context,
+ AccessTarget::Member,
+ NamingClass,
+ DeclAccessPair::make(TargetDecl, Access),
+ BaseObjectType);
+ Entity.setDiag(DD.getDiagnostic());
+ CheckAccess(*this, Loc, Entity);
+ } else {
+ AccessTarget Entity(Context,
+ AccessTarget::Base,
+ cast<CXXRecordDecl>(TargetD),
+ cast<CXXRecordDecl>(NamingD),
+ Access);
+ Entity.setDiag(DD.getDiagnostic());
+ CheckAccess(*this, Loc, Entity);
+ }
+}
+
+Sema::AccessResult Sema::CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ !E->getNamingClass() ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ AccessTarget Entity(Context, AccessTarget::Member, E->getNamingClass(),
+ Found, QualType());
+ Entity.setDiag(diag::err_access) << E->getSourceRange();
+
+ return CheckAccess(*this, E->getNameLoc(), Entity);
+}
+
+/// Perform access-control checking on a previously-unresolved member
+/// access which has now been resolved to a member.
+Sema::AccessResult Sema::CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ QualType BaseType = E->getBaseType();
+ if (E->isArrow())
+ BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+
+ AccessTarget Entity(Context, AccessTarget::Member, E->getNamingClass(),
+ Found, BaseType);
+ Entity.setDiag(diag::err_access) << E->getSourceRange();
+
+ return CheckAccess(*this, E->getMemberLoc(), Entity);
+}
+
+/// Is the given special member function accessible for the purposes of
+/// deciding whether to define a special member function as deleted?
+bool Sema::isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
+ AccessSpecifier access,
+ QualType objectType) {
+ // Fast path.
+ if (access == AS_public || !getLangOpts().AccessControl) return true;
+
+ AccessTarget entity(Context, AccessTarget::Member, decl->getParent(),
+ DeclAccessPair::make(decl, access), objectType);
+
+ // Suppress diagnostics.
+ entity.setDiag(PDiag());
+
+ switch (CheckAccess(*this, SourceLocation(), entity)) {
+ case AR_accessible: return true;
+ case AR_inaccessible: return false;
+ case AR_dependent: llvm_unreachable("dependent for =delete computation");
+ case AR_delayed: llvm_unreachable("cannot delay =delete computation");
+ }
+ llvm_unreachable("bad access result");
+}
+
+Sema::AccessResult Sema::CheckDestructorAccess(SourceLocation Loc,
+ CXXDestructorDecl *Dtor,
+ const PartialDiagnostic &PDiag,
+ QualType ObjectTy) {
+ if (!getLangOpts().AccessControl)
+ return AR_accessible;
+
+ // There's never a path involved when checking implicit destructor access.
+ AccessSpecifier Access = Dtor->getAccess();
+ if (Access == AS_public)
+ return AR_accessible;
+
+ CXXRecordDecl *NamingClass = Dtor->getParent();
+ if (ObjectTy.isNull()) ObjectTy = Context.getTypeDeclType(NamingClass);
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass,
+ DeclAccessPair::make(Dtor, Access),
+ ObjectTy);
+ Entity.setDiag(PDiag); // TODO: avoid copy
+
+ return CheckAccess(*this, Loc, Entity);
+}
+
+/// Checks access to a constructor.
+Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
+ CXXConstructorDecl *Constructor,
+ const InitializedEntity &Entity,
+ AccessSpecifier Access,
+ bool IsCopyBindingRefToTemp) {
+ if (!getLangOpts().AccessControl || Access == AS_public)
+ return AR_accessible;
+
+ PartialDiagnostic PD(PDiag());
+ switch (Entity.getKind()) {
+ default:
+ PD = PDiag(IsCopyBindingRefToTemp
+ ? diag::ext_rvalue_to_reference_access_ctor
+ : diag::err_access_ctor);
+
+ break;
+
+ case InitializedEntity::EK_Base:
+ PD = PDiag(diag::err_access_base_ctor);
+ PD << Entity.isInheritedVirtualBase()
+ << Entity.getBaseSpecifier()->getType() << getSpecialMember(Constructor);
+ break;
+
+ case InitializedEntity::EK_Member: {
+ const FieldDecl *Field = cast<FieldDecl>(Entity.getDecl());
+ PD = PDiag(diag::err_access_field_ctor);
+ PD << Field->getType() << getSpecialMember(Constructor);
+ break;
+ }
+
+ case InitializedEntity::EK_LambdaCapture: {
+ const VarDecl *Var = Entity.getCapturedVar();
+ PD = PDiag(diag::err_access_lambda_capture);
+ PD << Var->getName() << Entity.getType() << getSpecialMember(Constructor);
+ break;
+ }
+
+ }
+
+ return CheckConstructorAccess(UseLoc, Constructor, Entity, Access, PD);
+}
+
+/// Checks access to a constructor.
+Sema::AccessResult Sema::CheckConstructorAccess(SourceLocation UseLoc,
+ CXXConstructorDecl *Constructor,
+ const InitializedEntity &Entity,
+ AccessSpecifier Access,
+ const PartialDiagnostic &PD) {
+ if (!getLangOpts().AccessControl ||
+ Access == AS_public)
+ return AR_accessible;
+
+ CXXRecordDecl *NamingClass = Constructor->getParent();
+
+ // Initializing a base sub-object is an instance method call on an
+ // object of the derived class. Otherwise, we have an instance method
+ // call on an object of the constructed type.
+ CXXRecordDecl *ObjectClass;
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ ObjectClass = cast<CXXConstructorDecl>(CurContext)->getParent();
+ } else {
+ ObjectClass = NamingClass;
+ }
+
+ AccessTarget AccessEntity(Context, AccessTarget::Member, NamingClass,
+ DeclAccessPair::make(Constructor, Access),
+ Context.getTypeDeclType(ObjectClass));
+ AccessEntity.setDiag(PD);
+
+ return CheckAccess(*this, UseLoc, AccessEntity);
+}
+
+/// Checks direct (i.e. non-inherited) access to an arbitrary class
+/// member.
+Sema::AccessResult Sema::CheckDirectMemberAccess(SourceLocation UseLoc,
+ NamedDecl *Target,
+ const PartialDiagnostic &Diag) {
+ AccessSpecifier Access = Target->getAccess();
+ if (!getLangOpts().AccessControl ||
+ Access == AS_public)
+ return AR_accessible;
+
+ CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(Target->getDeclContext());
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass,
+ DeclAccessPair::make(Target, Access),
+ QualType());
+ Entity.setDiag(Diag);
+ return CheckAccess(*this, UseLoc, Entity);
+}
+
+
+/// Checks access to an overloaded operator new or delete.
+Sema::AccessResult Sema::CheckAllocationAccess(SourceLocation OpLoc,
+ SourceRange PlacementRange,
+ CXXRecordDecl *NamingClass,
+ DeclAccessPair Found,
+ bool Diagnose) {
+ if (!getLangOpts().AccessControl ||
+ !NamingClass ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
+ QualType());
+ if (Diagnose)
+ Entity.setDiag(diag::err_access)
+ << PlacementRange;
+
+ return CheckAccess(*this, OpLoc, Entity);
+}
+
+/// Checks access to an overloaded member operator, including
+/// conversion operators.
+Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
+ Expr *ObjectExpr,
+ Expr *ArgExpr,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ const RecordType *RT = ObjectExpr->getType()->castAs<RecordType>();
+ CXXRecordDecl *NamingClass = cast<CXXRecordDecl>(RT->getDecl());
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
+ ObjectExpr->getType());
+ Entity.setDiag(diag::err_access)
+ << ObjectExpr->getSourceRange()
+ << (ArgExpr ? ArgExpr->getSourceRange() : SourceRange());
+
+ return CheckAccess(*this, OpLoc, Entity);
+}
+
+Sema::AccessResult Sema::CheckAddressOfMemberAccess(Expr *OvlExpr,
+ DeclAccessPair Found) {
+ if (!getLangOpts().AccessControl ||
+ Found.getAccess() == AS_none ||
+ Found.getAccess() == AS_public)
+ return AR_accessible;
+
+ OverloadExpr *Ovl = OverloadExpr::find(OvlExpr).Expression;
+ CXXRecordDecl *NamingClass = Ovl->getNamingClass();
+
+ AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
+ /*no instance context*/ QualType());
+ Entity.setDiag(diag::err_access)
+ << Ovl->getSourceRange();
+
+ return CheckAccess(*this, Ovl->getNameLoc(), Entity);
+}
+
+/// Checks access for a hierarchy conversion.
+///
+/// \param IsBaseToDerived whether this is a base-to-derived conversion (true)
+/// or a derived-to-base conversion (false)
+/// \param ForceCheck true if this check should be performed even if access
+/// control is disabled; some things rely on this for semantics
+/// \param ForceUnprivileged true if this check should proceed as if the
+/// context had no special privileges
+/// \param ADK controls the kind of diagnostics that are used
+Sema::AccessResult Sema::CheckBaseClassAccess(SourceLocation AccessLoc,
+ QualType Base,
+ QualType Derived,
+ const CXXBasePath &Path,
+ unsigned DiagID,
+ bool ForceCheck,
+ bool ForceUnprivileged) {
+ if (!ForceCheck && !getLangOpts().AccessControl)
+ return AR_accessible;
+
+ if (Path.Access == AS_public)
+ return AR_accessible;
+
+ CXXRecordDecl *BaseD, *DerivedD;
+ BaseD = cast<CXXRecordDecl>(Base->getAs<RecordType>()->getDecl());
+ DerivedD = cast<CXXRecordDecl>(Derived->getAs<RecordType>()->getDecl());
+
+ AccessTarget Entity(Context, AccessTarget::Base, BaseD, DerivedD,
+ Path.Access);
+ if (DiagID)
+ Entity.setDiag(DiagID) << Derived << Base;
+
+ if (ForceUnprivileged) {
+ switch (CheckEffectiveAccess(*this, EffectiveContext(),
+ AccessLoc, Entity)) {
+ case ::AR_accessible: return Sema::AR_accessible;
+ case ::AR_inaccessible: return Sema::AR_inaccessible;
+ case ::AR_dependent: return Sema::AR_dependent;
+ }
+ llvm_unreachable("unexpected result from CheckEffectiveAccess");
+ }
+ return CheckAccess(*this, AccessLoc, Entity);
+}
+
+/// Checks access to all the declarations in the given result set.
+void Sema::CheckLookupAccess(const LookupResult &R) {
+ assert(getLangOpts().AccessControl
+ && "performing access check without access control");
+ assert(R.getNamingClass() && "performing access check without naming class");
+
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ if (I.getAccess() != AS_public) {
+ AccessTarget Entity(Context, AccessedEntity::Member,
+ R.getNamingClass(), I.getPair(),
+ R.getBaseObjectType());
+ Entity.setDiag(diag::err_access);
+ CheckAccess(*this, R.getNameLoc(), Entity);
+ }
+ }
+}
+
+/// Checks access to Decl from the given class. The check will take access
+/// specifiers into account, but no member access expressions and such.
+///
+/// \param Decl the declaration to check if it can be accessed
+/// \param Class the class/context from which to start the search
+/// \return true if the Decl is accessible from the Class, false otherwise.
+bool Sema::IsSimplyAccessible(NamedDecl *Decl, DeclContext *Ctx) {
+ if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx)) {
+ if (!Decl->isCXXClassMember())
+ return true;
+
+ QualType qType = Class->getTypeForDecl()->getCanonicalTypeInternal();
+ AccessTarget Entity(Context, AccessedEntity::Member, Class,
+ DeclAccessPair::make(Decl, Decl->getAccess()),
+ qType);
+ if (Entity.getAccess() == AS_public)
+ return true;
+
+ EffectiveContext EC(CurContext);
+ return ::IsAccessible(*this, EC, Entity) != ::AR_inaccessible;
+ }
+
+ if (ObjCIvarDecl *Ivar = dyn_cast<ObjCIvarDecl>(Decl)) {
+ // @public and @package ivars are always accessible.
+ if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Public ||
+ Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Package)
+ return true;
+
+
+
+ // If we are inside a class or category implementation, determine the
+ // interface we're in.
+ ObjCInterfaceDecl *ClassOfMethodDecl = 0;
+ if (ObjCMethodDecl *MD = getCurMethodDecl())
+ ClassOfMethodDecl = MD->getClassInterface();
+ else if (FunctionDecl *FD = getCurFunctionDecl()) {
+ if (ObjCImplDecl *Impl
+ = dyn_cast<ObjCImplDecl>(FD->getLexicalDeclContext())) {
+ if (ObjCImplementationDecl *IMPD
+ = dyn_cast<ObjCImplementationDecl>(Impl))
+ ClassOfMethodDecl = IMPD->getClassInterface();
+ else if (ObjCCategoryImplDecl* CatImplClass
+ = dyn_cast<ObjCCategoryImplDecl>(Impl))
+ ClassOfMethodDecl = CatImplClass->getClassInterface();
+ }
+ }
+
+ // If we're not in an interface, this ivar is inaccessible.
+ if (!ClassOfMethodDecl)
+ return false;
+
+ // If we're inside the same interface that owns the ivar, we're fine.
+ if (declaresSameEntity(ClassOfMethodDecl, Ivar->getContainingInterface()))
+ return true;
+
+ // If the ivar is private, it's inaccessible.
+ if (Ivar->getCanonicalAccessControl() == ObjCIvarDecl::Private)
+ return false;
+
+ return Ivar->getContainingInterface()->isSuperClassOf(ClassOfMethodDecl);
+ }
+
+ return true;
+}
+
+void Sema::ActOnStartSuppressingAccessChecks() {
+ assert(!SuppressAccessChecking &&
+ "Tried to start access check suppression when already started.");
+ SuppressAccessChecking = true;
+}
+
+void Sema::ActOnStopSuppressingAccessChecks() {
+ assert(SuppressAccessChecking &&
+ "Tried to stop access check suprression when already stopped.");
+ SuppressAccessChecking = false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
new file mode 100644
index 0000000..e935fc7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaAttr.cpp
@@ -0,0 +1,426 @@
+//===--- SemaAttr.cpp - Semantic Analysis for Attributes ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for non-trivial attributes and
+// pragmas.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Pragma 'pack' and 'options align'
+//===----------------------------------------------------------------------===//
+
+namespace {
+ struct PackStackEntry {
+ // We just use a sentinel to represent when the stack is set to mac68k
+ // alignment.
+ static const unsigned kMac68kAlignmentSentinel = ~0U;
+
+ unsigned Alignment;
+ IdentifierInfo *Name;
+ };
+
+ /// PragmaPackStack - Simple class to wrap the stack used by #pragma
+ /// pack.
+ class PragmaPackStack {
+ typedef std::vector<PackStackEntry> stack_ty;
+
+ /// Alignment - The current user specified alignment.
+ unsigned Alignment;
+
+ /// Stack - Entries in the #pragma pack stack, consisting of saved
+ /// alignments and optional names.
+ stack_ty Stack;
+
+ public:
+ PragmaPackStack() : Alignment(0) {}
+
+ void setAlignment(unsigned A) { Alignment = A; }
+ unsigned getAlignment() { return Alignment; }
+
+ /// push - Push the current alignment onto the stack, optionally
+ /// using the given \arg Name for the record, if non-zero.
+ void push(IdentifierInfo *Name) {
+ PackStackEntry PSE = { Alignment, Name };
+ Stack.push_back(PSE);
+ }
+
+ /// pop - Pop a record from the stack and restore the current
+ /// alignment to the previous value. If \arg Name is non-zero then
+ /// the first such named record is popped, otherwise the top record
+ /// is popped. Returns true if the pop succeeded.
+ bool pop(IdentifierInfo *Name, bool IsReset);
+ };
+} // end anonymous namespace.
+
+bool PragmaPackStack::pop(IdentifierInfo *Name, bool IsReset) {
+ // If name is empty just pop top.
+ if (!Name) {
+ // An empty stack is a special case...
+ if (Stack.empty()) {
+ // If this isn't a reset, it is always an error.
+ if (!IsReset)
+ return false;
+
+ // Otherwise, it is an error only if some alignment has been set.
+ if (!Alignment)
+ return false;
+
+ // Otherwise, reset to the default alignment.
+ Alignment = 0;
+ } else {
+ Alignment = Stack.back().Alignment;
+ Stack.pop_back();
+ }
+
+ return true;
+ }
+
+ // Otherwise, find the named record.
+ for (unsigned i = Stack.size(); i != 0; ) {
+ --i;
+ if (Stack[i].Name == Name) {
+ // Found it, pop up to and including this record.
+ Alignment = Stack[i].Alignment;
+ Stack.erase(Stack.begin() + i, Stack.end());
+ return true;
+ }
+ }
+
+ return false;
+}
+
+
+/// FreePackedContext - Deallocate and null out PackContext.
+void Sema::FreePackedContext() {
+ delete static_cast<PragmaPackStack*>(PackContext);
+ PackContext = 0;
+}
+
+void Sema::AddAlignmentAttributesForRecord(RecordDecl *RD) {
+ // If there is no pack context, we don't need any attributes.
+ if (!PackContext)
+ return;
+
+ PragmaPackStack *Stack = static_cast<PragmaPackStack*>(PackContext);
+
+ // Otherwise, check to see if we need a max field alignment attribute.
+ if (unsigned Alignment = Stack->getAlignment()) {
+ if (Alignment == PackStackEntry::kMac68kAlignmentSentinel)
+ RD->addAttr(::new (Context) AlignMac68kAttr(SourceLocation(), Context));
+ else
+ RD->addAttr(::new (Context) MaxFieldAlignmentAttr(SourceLocation(),
+ Context,
+ Alignment * 8));
+ }
+}
+
+void Sema::AddMsStructLayoutForRecord(RecordDecl *RD) {
+ if (!MSStructPragmaOn)
+ return;
+ RD->addAttr(::new (Context) MsStructAttr(SourceLocation(), Context));
+}
+
+void Sema::ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
+ SourceLocation PragmaLoc,
+ SourceLocation KindLoc) {
+ if (PackContext == 0)
+ PackContext = new PragmaPackStack();
+
+ PragmaPackStack *Context = static_cast<PragmaPackStack*>(PackContext);
+
+ // Reset just pops the top of the stack, or resets the current alignment to
+ // default.
+ if (Kind == Sema::POAK_Reset) {
+ if (!Context->pop(0, /*IsReset=*/true)) {
+ Diag(PragmaLoc, diag::warn_pragma_options_align_reset_failed)
+ << "stack empty";
+ }
+ return;
+ }
+
+ switch (Kind) {
+ // For all targets we support native and natural are the same.
+ //
+ // FIXME: This is not true on Darwin/PPC.
+ case POAK_Native:
+ case POAK_Power:
+ case POAK_Natural:
+ Context->push(0);
+ Context->setAlignment(0);
+ break;
+
+ // Note that '#pragma options align=packed' is not equivalent to attribute
+ // packed, it has a different precedence relative to attribute aligned.
+ case POAK_Packed:
+ Context->push(0);
+ Context->setAlignment(1);
+ break;
+
+ case POAK_Mac68k:
+ // Check if the target supports this.
+ if (!PP.getTargetInfo().hasAlignMac68kSupport()) {
+ Diag(PragmaLoc, diag::err_pragma_options_align_mac68k_target_unsupported);
+ return;
+ }
+ Context->push(0);
+ Context->setAlignment(PackStackEntry::kMac68kAlignmentSentinel);
+ break;
+
+ default:
+ Diag(PragmaLoc, diag::warn_pragma_options_align_unsupported_option)
+ << KindLoc;
+ break;
+ }
+}
+
+void Sema::ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name,
+ Expr *alignment, SourceLocation PragmaLoc,
+ SourceLocation LParenLoc, SourceLocation RParenLoc) {
+ Expr *Alignment = static_cast<Expr *>(alignment);
+
+ // If specified then alignment must be a "small" power of two.
+ unsigned AlignmentVal = 0;
+ if (Alignment) {
+ llvm::APSInt Val;
+
+ // pack(0) is like pack(), which just works out since that is what
+ // we use 0 for in PackAttr.
+ if (Alignment->isTypeDependent() ||
+ Alignment->isValueDependent() ||
+ !Alignment->isIntegerConstantExpr(Val, Context) ||
+ !(Val == 0 || Val.isPowerOf2()) ||
+ Val.getZExtValue() > 16) {
+ Diag(PragmaLoc, diag::warn_pragma_pack_invalid_alignment);
+ return; // Ignore
+ }
+
+ AlignmentVal = (unsigned) Val.getZExtValue();
+ }
+
+ if (PackContext == 0)
+ PackContext = new PragmaPackStack();
+
+ PragmaPackStack *Context = static_cast<PragmaPackStack*>(PackContext);
+
+ switch (Kind) {
+ case Sema::PPK_Default: // pack([n])
+ Context->setAlignment(AlignmentVal);
+ break;
+
+ case Sema::PPK_Show: // pack(show)
+ // Show the current alignment, making sure to show the right value
+ // for the default.
+ AlignmentVal = Context->getAlignment();
+ // FIXME: This should come from the target.
+ if (AlignmentVal == 0)
+ AlignmentVal = 8;
+ if (AlignmentVal == PackStackEntry::kMac68kAlignmentSentinel)
+ Diag(PragmaLoc, diag::warn_pragma_pack_show) << "mac68k";
+ else
+ Diag(PragmaLoc, diag::warn_pragma_pack_show) << AlignmentVal;
+ break;
+
+ case Sema::PPK_Push: // pack(push [, id] [, [n])
+ Context->push(Name);
+ // Set the new alignment if specified.
+ if (Alignment)
+ Context->setAlignment(AlignmentVal);
+ break;
+
+ case Sema::PPK_Pop: // pack(pop [, id] [, n])
+ // MSDN, C/C++ Preprocessor Reference > Pragma Directives > pack:
+ // "#pragma pack(pop, identifier, n) is undefined"
+ if (Alignment && Name)
+ Diag(PragmaLoc, diag::warn_pragma_pack_pop_identifer_and_alignment);
+
+ // Do the pop.
+ if (!Context->pop(Name, /*IsReset=*/false)) {
+ // If a name was specified then failure indicates the name
+ // wasn't found. Otherwise failure indicates the stack was
+ // empty.
+ Diag(PragmaLoc, diag::warn_pragma_pack_pop_failed)
+ << (Name ? "no record matching name" : "stack empty");
+
+ // FIXME: Warn about popping named records as MSVC does.
+ } else {
+ // Pop succeeded, set the new alignment if specified.
+ if (Alignment)
+ Context->setAlignment(AlignmentVal);
+ }
+ break;
+ }
+}
+
+void Sema::ActOnPragmaMSStruct(PragmaMSStructKind Kind) {
+ MSStructPragmaOn = (Kind == PMSST_ON);
+}
+
+void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
+ SourceLocation PragmaLoc) {
+
+ IdentifierInfo *Name = IdTok.getIdentifierInfo();
+ LookupResult Lookup(*this, Name, IdTok.getLocation(), LookupOrdinaryName);
+ LookupParsedName(Lookup, curScope, NULL, true);
+
+ if (Lookup.empty()) {
+ Diag(PragmaLoc, diag::warn_pragma_unused_undeclared_var)
+ << Name << SourceRange(IdTok.getLocation());
+ return;
+ }
+
+ VarDecl *VD = Lookup.getAsSingle<VarDecl>();
+ if (!VD) {
+ Diag(PragmaLoc, diag::warn_pragma_unused_expected_var_arg)
+ << Name << SourceRange(IdTok.getLocation());
+ return;
+ }
+
+ // Warn if this was used before being marked unused.
+ if (VD->isUsed())
+ Diag(PragmaLoc, diag::warn_used_but_marked_unused) << Name;
+
+ VD->addAttr(::new (Context) UnusedAttr(IdTok.getLocation(), Context));
+}
+
+void Sema::AddCFAuditedAttribute(Decl *D) {
+ SourceLocation Loc = PP.getPragmaARCCFCodeAuditedLoc();
+ if (!Loc.isValid()) return;
+
+ // Don't add a redundant or conflicting attribute.
+ if (D->hasAttr<CFAuditedTransferAttr>() ||
+ D->hasAttr<CFUnknownTransferAttr>())
+ return;
+
+ D->addAttr(::new (Context) CFAuditedTransferAttr(Loc, Context));
+}
+
+typedef std::vector<std::pair<unsigned, SourceLocation> > VisStack;
+enum { NoVisibility = (unsigned) -1 };
+
+void Sema::AddPushedVisibilityAttribute(Decl *D) {
+ if (!VisContext)
+ return;
+
+ if (isa<NamedDecl>(D) && cast<NamedDecl>(D)->getExplicitVisibility())
+ return;
+
+ VisStack *Stack = static_cast<VisStack*>(VisContext);
+ unsigned rawType = Stack->back().first;
+ if (rawType == NoVisibility) return;
+
+ VisibilityAttr::VisibilityType type
+ = (VisibilityAttr::VisibilityType) rawType;
+ SourceLocation loc = Stack->back().second;
+
+ D->addAttr(::new (Context) VisibilityAttr(loc, Context, type));
+}
+
+/// FreeVisContext - Deallocate and null out VisContext.
+void Sema::FreeVisContext() {
+ delete static_cast<VisStack*>(VisContext);
+ VisContext = 0;
+}
+
+static void PushPragmaVisibility(Sema &S, unsigned type, SourceLocation loc) {
+ // Put visibility on stack.
+ if (!S.VisContext)
+ S.VisContext = new VisStack;
+
+ VisStack *Stack = static_cast<VisStack*>(S.VisContext);
+ Stack->push_back(std::make_pair(type, loc));
+}
+
+void Sema::ActOnPragmaVisibility(const IdentifierInfo* VisType,
+ SourceLocation PragmaLoc) {
+ if (VisType) {
+ // Compute visibility to use.
+ VisibilityAttr::VisibilityType type;
+ if (VisType->isStr("default"))
+ type = VisibilityAttr::Default;
+ else if (VisType->isStr("hidden"))
+ type = VisibilityAttr::Hidden;
+ else if (VisType->isStr("internal"))
+ type = VisibilityAttr::Hidden; // FIXME
+ else if (VisType->isStr("protected"))
+ type = VisibilityAttr::Protected;
+ else {
+ Diag(PragmaLoc, diag::warn_attribute_unknown_visibility) <<
+ VisType->getName();
+ return;
+ }
+ PushPragmaVisibility(*this, type, PragmaLoc);
+ } else {
+ PopPragmaVisibility(false, PragmaLoc);
+ }
+}
+
+void Sema::ActOnPragmaFPContract(tok::OnOffSwitch OOS) {
+ switch (OOS) {
+ case tok::OOS_ON:
+ FPFeatures.fp_contract = 1;
+ break;
+ case tok::OOS_OFF:
+ FPFeatures.fp_contract = 0;
+ break;
+ case tok::OOS_DEFAULT:
+ FPFeatures.fp_contract = getLangOpts().DefaultFPContract;
+ break;
+ }
+}
+
+void Sema::PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
+ SourceLocation Loc) {
+ // Visibility calculations will consider the namespace's visibility.
+ // Here we just want to note that we're in a visibility context
+ // which overrides any enclosing #pragma context, but doesn't itself
+ // contribute visibility.
+ PushPragmaVisibility(*this, NoVisibility, Loc);
+}
+
+void Sema::PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc) {
+ if (!VisContext) {
+ Diag(EndLoc, diag::err_pragma_pop_visibility_mismatch);
+ return;
+ }
+
+ // Pop visibility from stack
+ VisStack *Stack = static_cast<VisStack*>(VisContext);
+
+ const std::pair<unsigned, SourceLocation> *Back = &Stack->back();
+ bool StartsWithPragma = Back->first != NoVisibility;
+ if (StartsWithPragma && IsNamespaceEnd) {
+ Diag(Back->second, diag::err_pragma_push_visibility_mismatch);
+ Diag(EndLoc, diag::note_surrounding_namespace_ends_here);
+
+ // For better error recovery, eat all pushes inside the namespace.
+ do {
+ Stack->pop_back();
+ Back = &Stack->back();
+ StartsWithPragma = Back->first != NoVisibility;
+ } while (StartsWithPragma);
+ } else if (!StartsWithPragma && !IsNamespaceEnd) {
+ Diag(EndLoc, diag::err_pragma_pop_visibility_mismatch);
+ Diag(Back->second, diag::note_surrounding_namespace_starts_here);
+ return;
+ }
+
+ Stack->pop_back();
+ // To simplify the implementation, never keep around an empty stack.
+ if (Stack->empty())
+ FreeVisContext();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
new file mode 100644
index 0000000..5a0fcec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -0,0 +1,958 @@
+//===--- SemaCXXScopeSpec.cpp - Semantic Analysis for C++ scope specifiers-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements C++ semantic analysis for scope specifiers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Template.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "TypeLocBuilder.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+
+/// \brief Find the current instantiation that associated with the given type.
+static CXXRecordDecl *getCurrentInstantiationOf(QualType T,
+ DeclContext *CurContext) {
+ if (T.isNull())
+ return 0;
+
+ const Type *Ty = T->getCanonicalTypeInternal().getTypePtr();
+ if (const RecordType *RecordTy = dyn_cast<RecordType>(Ty)) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (!T->isDependentType())
+ return Record;
+
+ // This may be a member of a class template or class template partial
+ // specialization. If it's part of the current semantic context, then it's
+ // an injected-class-name;
+ for (; !CurContext->isFileContext(); CurContext = CurContext->getParent())
+ if (CurContext->Equals(Record))
+ return Record;
+
+ return 0;
+ } else if (isa<InjectedClassNameType>(Ty))
+ return cast<InjectedClassNameType>(Ty)->getDecl();
+ else
+ return 0;
+}
+
+/// \brief Compute the DeclContext that is associated with the given type.
+///
+/// \param T the type for which we are attempting to find a DeclContext.
+///
+/// \returns the declaration context represented by the type T,
+/// or NULL if the declaration context cannot be computed (e.g., because it is
+/// dependent and not the current instantiation).
+DeclContext *Sema::computeDeclContext(QualType T) {
+ if (!T->isDependentType())
+ if (const TagType *Tag = T->getAs<TagType>())
+ return Tag->getDecl();
+
+ return ::getCurrentInstantiationOf(T, CurContext);
+}
+
+/// \brief Compute the DeclContext that is associated with the given
+/// scope specifier.
+///
+/// \param SS the C++ scope specifier as it appears in the source
+///
+/// \param EnteringContext when true, we will be entering the context of
+/// this scope specifier, so we can retrieve the declaration context of a
+/// class template or class template partial specialization even if it is
+/// not the current instantiation.
+///
+/// \returns the declaration context represented by the scope specifier @p SS,
+/// or NULL if the declaration context cannot be computed (e.g., because it is
+/// dependent and not the current instantiation).
+DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
+ bool EnteringContext) {
+ if (!SS.isSet() || SS.isInvalid())
+ return 0;
+
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ if (NNS->isDependent()) {
+ // If this nested-name-specifier refers to the current
+ // instantiation, return its DeclContext.
+ if (CXXRecordDecl *Record = getCurrentInstantiationOf(NNS))
+ return Record;
+
+ if (EnteringContext) {
+ const Type *NNSType = NNS->getAsType();
+ if (!NNSType) {
+ return 0;
+ }
+
+ // Look through type alias templates, per C++0x [temp.dep.type]p1.
+ NNSType = Context.getCanonicalType(NNSType);
+ if (const TemplateSpecializationType *SpecType
+ = NNSType->getAs<TemplateSpecializationType>()) {
+ // We are entering the context of the nested name specifier, so try to
+ // match the nested name specifier to either a primary class template
+ // or a class template partial specialization.
+ if (ClassTemplateDecl *ClassTemplate
+ = dyn_cast_or_null<ClassTemplateDecl>(
+ SpecType->getTemplateName().getAsTemplateDecl())) {
+ QualType ContextType
+ = Context.getCanonicalType(QualType(SpecType, 0));
+
+ // If the type of the nested name specifier is the same as the
+ // injected class name of the named class template, we're entering
+ // into that class template definition.
+ QualType Injected
+ = ClassTemplate->getInjectedClassNameSpecialization();
+ if (Context.hasSameType(Injected, ContextType))
+ return ClassTemplate->getTemplatedDecl();
+
+ // If the type of the nested name specifier is the same as the
+ // type of one of the class template's class template partial
+ // specializations, we're entering into the definition of that
+ // class template partial specialization.
+ if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = ClassTemplate->findPartialSpecialization(ContextType))
+ return PartialSpec;
+ }
+ } else if (const RecordType *RecordT = NNSType->getAs<RecordType>()) {
+ // The nested name specifier refers to a member of a class template.
+ return RecordT->getDecl();
+ }
+ }
+
+ return 0;
+ }
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ llvm_unreachable("Dependent nested-name-specifier has no DeclContext");
+
+ case NestedNameSpecifier::Namespace:
+ return NNS->getAsNamespace();
+
+ case NestedNameSpecifier::NamespaceAlias:
+ return NNS->getAsNamespaceAlias()->getNamespace();
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const TagType *Tag = NNS->getAsType()->getAs<TagType>();
+ assert(Tag && "Non-tag type in nested-name-specifier");
+ return Tag->getDecl();
+ }
+
+ case NestedNameSpecifier::Global:
+ return Context.getTranslationUnitDecl();
+ }
+
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+bool Sema::isDependentScopeSpecifier(const CXXScopeSpec &SS) {
+ if (!SS.isSet() || SS.isInvalid())
+ return false;
+
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ return NNS->isDependent();
+}
+
+// \brief Determine whether this C++ scope specifier refers to an
+// unknown specialization, i.e., a dependent type that is not the
+// current instantiation.
+bool Sema::isUnknownSpecialization(const CXXScopeSpec &SS) {
+ if (!isDependentScopeSpecifier(SS))
+ return false;
+
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ return getCurrentInstantiationOf(NNS) == 0;
+}
+
+/// \brief If the given nested name specifier refers to the current
+/// instantiation, return the declaration that corresponds to that
+/// current instantiation (C++0x [temp.dep.type]p1).
+///
+/// \param NNS a dependent nested name specifier.
+CXXRecordDecl *Sema::getCurrentInstantiationOf(NestedNameSpecifier *NNS) {
+ assert(getLangOpts().CPlusPlus && "Only callable in C++");
+ assert(NNS->isDependent() && "Only dependent nested-name-specifier allowed");
+
+ if (!NNS->getAsType())
+ return 0;
+
+ QualType T = QualType(NNS->getAsType(), 0);
+ return ::getCurrentInstantiationOf(T, CurContext);
+}
+
+/// \brief Require that the context specified by SS be complete.
+///
+/// If SS refers to a type, this routine checks whether the type is
+/// complete enough (or can be made complete enough) for name lookup
+/// into the DeclContext. A type that is not yet completed can be
+/// considered "complete enough" if it is a class/struct/union/enum
+/// that is currently being defined. Or, if we have a type that names
+/// a class template specialization that is not a complete type, we
+/// will attempt to instantiate that class template.
+bool Sema::RequireCompleteDeclContext(CXXScopeSpec &SS,
+ DeclContext *DC) {
+ assert(DC != 0 && "given null context");
+
+ TagDecl *tag = dyn_cast<TagDecl>(DC);
+
+ // If this is a dependent type, then we consider it complete.
+ if (!tag || tag->isDependentContext())
+ return false;
+
+ // If we're currently defining this type, then lookup into the
+ // type is okay: don't complain that it isn't complete yet.
+ QualType type = Context.getTypeDeclType(tag);
+ const TagType *tagType = type->getAs<TagType>();
+ if (tagType && tagType->isBeingDefined())
+ return false;
+
+ SourceLocation loc = SS.getLastQualifierNameLoc();
+ if (loc.isInvalid()) loc = SS.getRange().getBegin();
+
+ // The type must be complete.
+ if (RequireCompleteType(loc, type,
+ PDiag(diag::err_incomplete_nested_name_spec)
+ << SS.getRange())) {
+ SS.SetInvalid(SS.getRange());
+ return true;
+ }
+
+ // Fixed enum types are complete, but they aren't valid as scopes
+ // until we see a definition, so awkwardly pull out this special
+ // case.
+ const EnumType *enumType = dyn_cast_or_null<EnumType>(tagType);
+ if (!enumType || enumType->getDecl()->isCompleteDefinition())
+ return false;
+
+ // Try to instantiate the definition, if this is a specialization of an
+ // enumeration temploid.
+ EnumDecl *ED = enumType->getDecl();
+ if (EnumDecl *Pattern = ED->getInstantiatedFromMemberEnum()) {
+ MemberSpecializationInfo *MSI = ED->getMemberSpecializationInfo();
+ if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization) {
+ if (InstantiateEnum(loc, ED, Pattern, getTemplateInstantiationArgs(ED),
+ TSK_ImplicitInstantiation)) {
+ SS.SetInvalid(SS.getRange());
+ return true;
+ }
+ return false;
+ }
+ }
+
+ Diag(loc, diag::err_incomplete_nested_name_spec)
+ << type << SS.getRange();
+ SS.SetInvalid(SS.getRange());
+ return true;
+}
+
+bool Sema::ActOnCXXGlobalScopeSpecifier(Scope *S, SourceLocation CCLoc,
+ CXXScopeSpec &SS) {
+ SS.MakeGlobal(Context, CCLoc);
+ return false;
+}
+
+/// \brief Determines whether the given declaration is an valid acceptable
+/// result for name lookup of a nested-name-specifier.
+bool Sema::isAcceptableNestedNameSpecifier(NamedDecl *SD) {
+ if (!SD)
+ return false;
+
+ // Namespace and namespace aliases are fine.
+ if (isa<NamespaceDecl>(SD) || isa<NamespaceAliasDecl>(SD))
+ return true;
+
+ if (!isa<TypeDecl>(SD))
+ return false;
+
+ // Determine whether we have a class (or, in C++11, an enum) or
+ // a typedef thereof. If so, build the nested-name-specifier.
+ QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD));
+ if (T->isDependentType())
+ return true;
+ else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(SD)) {
+ if (TD->getUnderlyingType()->isRecordType() ||
+ (Context.getLangOpts().CPlusPlus0x &&
+ TD->getUnderlyingType()->isEnumeralType()))
+ return true;
+ } else if (isa<RecordDecl>(SD) ||
+ (Context.getLangOpts().CPlusPlus0x && isa<EnumDecl>(SD)))
+ return true;
+
+ return false;
+}
+
+/// \brief If the given nested-name-specifier begins with a bare identifier
+/// (e.g., Base::), perform name lookup for that identifier as a
+/// nested-name-specifier within the given scope, and return the result of that
+/// name lookup.
+NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
+ if (!S || !NNS)
+ return 0;
+
+ while (NNS->getPrefix())
+ NNS = NNS->getPrefix();
+
+ if (NNS->getKind() != NestedNameSpecifier::Identifier)
+ return 0;
+
+ LookupResult Found(*this, NNS->getAsIdentifier(), SourceLocation(),
+ LookupNestedNameSpecifierName);
+ LookupName(Found, S);
+ assert(!Found.isAmbiguous() && "Cannot handle ambiguities here yet");
+
+ if (!Found.isSingleResult())
+ return 0;
+
+ NamedDecl *Result = Found.getFoundDecl();
+ if (isAcceptableNestedNameSpecifier(Result))
+ return Result;
+
+ return 0;
+}
+
+bool Sema::isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
+ SourceLocation IdLoc,
+ IdentifierInfo &II,
+ ParsedType ObjectTypePtr) {
+ QualType ObjectType = GetTypeFromParser(ObjectTypePtr);
+ LookupResult Found(*this, &II, IdLoc, LookupNestedNameSpecifierName);
+
+ // Determine where to perform name lookup
+ DeclContext *LookupCtx = 0;
+ bool isDependent = false;
+ if (!ObjectType.isNull()) {
+ // This nested-name-specifier occurs in a member access expression, e.g.,
+ // x->B::f, and we are looking into the type of the object.
+ assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ LookupCtx = computeDeclContext(ObjectType);
+ isDependent = ObjectType->isDependentType();
+ } else if (SS.isSet()) {
+ // This nested-name-specifier occurs after another nested-name-specifier,
+ // so long into the context associated with the prior nested-name-specifier.
+ LookupCtx = computeDeclContext(SS, false);
+ isDependent = isDependentScopeSpecifier(SS);
+ Found.setContextRange(SS.getRange());
+ }
+
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+
+ // The declaration context must be complete.
+ if (!LookupCtx->isDependentContext() &&
+ RequireCompleteDeclContext(SS, LookupCtx))
+ return false;
+
+ LookupQualifiedName(Found, LookupCtx);
+ } else if (isDependent) {
+ return false;
+ } else {
+ LookupName(Found, S);
+ }
+ Found.suppressDiagnostics();
+
+ if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
+ return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
+
+ return false;
+}
+
+namespace {
+
+// Callback to only accept typo corrections that can be a valid C++ member
+// intializer: either a non-static field member or a base class.
+class NestedNameSpecifierValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ explicit NestedNameSpecifierValidatorCCC(Sema &SRef)
+ : SRef(SRef) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return SRef.isAcceptableNestedNameSpecifier(candidate.getCorrectionDecl());
+ }
+
+ private:
+ Sema &SRef;
+};
+
+}
+
+/// \brief Build a new nested-name-specifier for "identifier::", as described
+/// by ActOnCXXNestedNameSpecifier.
+///
+/// This routine differs only slightly from ActOnCXXNestedNameSpecifier, in
+/// that it contains an extra parameter \p ScopeLookupResult, which provides
+/// the result of name lookup within the scope of the nested-name-specifier
+/// that was computed at template definition time.
+///
+/// If ErrorRecoveryLookup is true, then this call is used to improve error
+/// recovery. This means that it should not emit diagnostics, it should
+/// just return true on failure. It also means it should only return a valid
+/// scope if it *knows* that the result is correct. It should not return in a
+/// dependent context, for example. Nor will it extend \p SS with the scope
+/// specifier.
+bool Sema::BuildCXXNestedNameSpecifier(Scope *S,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation CCLoc,
+ QualType ObjectType,
+ bool EnteringContext,
+ CXXScopeSpec &SS,
+ NamedDecl *ScopeLookupResult,
+ bool ErrorRecoveryLookup) {
+ LookupResult Found(*this, &Identifier, IdentifierLoc,
+ LookupNestedNameSpecifierName);
+
+ // Determine where to perform name lookup
+ DeclContext *LookupCtx = 0;
+ bool isDependent = false;
+ if (!ObjectType.isNull()) {
+ // This nested-name-specifier occurs in a member access expression, e.g.,
+ // x->B::f, and we are looking into the type of the object.
+ assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ LookupCtx = computeDeclContext(ObjectType);
+ isDependent = ObjectType->isDependentType();
+ } else if (SS.isSet()) {
+ // This nested-name-specifier occurs after another nested-name-specifier,
+ // so look into the context associated with the prior nested-name-specifier.
+ LookupCtx = computeDeclContext(SS, EnteringContext);
+ isDependent = isDependentScopeSpecifier(SS);
+ Found.setContextRange(SS.getRange());
+ }
+
+
+ bool ObjectTypeSearchedInScope = false;
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+
+ // The declaration context must be complete.
+ if (!LookupCtx->isDependentContext() &&
+ RequireCompleteDeclContext(SS, LookupCtx))
+ return true;
+
+ LookupQualifiedName(Found, LookupCtx);
+
+ if (!ObjectType.isNull() && Found.empty()) {
+ // C++ [basic.lookup.classref]p4:
+ // If the id-expression in a class member access is a qualified-id of
+ // the form
+ //
+ // class-name-or-namespace-name::...
+ //
+ // the class-name-or-namespace-name following the . or -> operator is
+ // looked up both in the context of the entire postfix-expression and in
+ // the scope of the class of the object expression. If the name is found
+ // only in the scope of the class of the object expression, the name
+ // shall refer to a class-name. If the name is found only in the
+ // context of the entire postfix-expression, the name shall refer to a
+ // class-name or namespace-name. [...]
+ //
+ // Qualified name lookup into a class will not find a namespace-name,
+ // so we do not need to diagnose that case specifically. However,
+ // this qualified name lookup may find nothing. In that case, perform
+ // unqualified name lookup in the given scope (if available) or
+ // reconstruct the result from when name lookup was performed at template
+ // definition time.
+ if (S)
+ LookupName(Found, S);
+ else if (ScopeLookupResult)
+ Found.addDecl(ScopeLookupResult);
+
+ ObjectTypeSearchedInScope = true;
+ }
+ } else if (!isDependent) {
+ // Perform unqualified name lookup in the current scope.
+ LookupName(Found, S);
+ }
+
+ // If we performed lookup into a dependent context and did not find anything,
+ // that's fine: just build a dependent nested-name-specifier.
+ if (Found.empty() && isDependent &&
+ !(LookupCtx && LookupCtx->isRecord() &&
+ (!cast<CXXRecordDecl>(LookupCtx)->hasDefinition() ||
+ !cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases()))) {
+ // Don't speculate if we're just trying to improve error recovery.
+ if (ErrorRecoveryLookup)
+ return true;
+
+ // We were not able to compute the declaration context for a dependent
+ // base object type or prior nested-name-specifier, so this
+ // nested-name-specifier refers to an unknown specialization. Just build
+ // a dependent nested-name-specifier.
+ SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
+ return false;
+ }
+
+ // FIXME: Deal with ambiguities cleanly.
+
+ if (Found.empty() && !ErrorRecoveryLookup) {
+ // We haven't found anything, and we're not recovering from a
+ // different kind of error, so look for typos.
+ DeclarationName Name = Found.getLookupName();
+ NestedNameSpecifierValidatorCCC Validator(*this);
+ TypoCorrection Corrected;
+ Found.clear();
+ if ((Corrected = CorrectTypo(Found.getLookupNameInfo(),
+ Found.getLookupKind(), S, &SS, Validator,
+ LookupCtx, EnteringContext))) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
+ if (LookupCtx)
+ Diag(Found.getNameLoc(), diag::err_no_member_suggest)
+ << Name << LookupCtx << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
+ else
+ Diag(Found.getNameLoc(), diag::err_undeclared_var_use_suggest)
+ << Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
+
+ if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
+ Diag(ND->getLocation(), diag::note_previous_decl) << CorrectedQuotedStr;
+ Found.addDecl(ND);
+ }
+ Found.setLookupName(Corrected.getCorrection());
+ } else {
+ Found.setLookupName(&Identifier);
+ }
+ }
+
+ NamedDecl *SD = Found.getAsSingle<NamedDecl>();
+ if (isAcceptableNestedNameSpecifier(SD)) {
+ if (!ObjectType.isNull() && !ObjectTypeSearchedInScope) {
+ // C++ [basic.lookup.classref]p4:
+ // [...] If the name is found in both contexts, the
+ // class-name-or-namespace-name shall refer to the same entity.
+ //
+ // We already found the name in the scope of the object. Now, look
+ // into the current scope (the scope of the postfix-expression) to
+ // see if we can find the same name there. As above, if there is no
+ // scope, reconstruct the result from the template instantiation itself.
+ NamedDecl *OuterDecl;
+ if (S) {
+ LookupResult FoundOuter(*this, &Identifier, IdentifierLoc,
+ LookupNestedNameSpecifierName);
+ LookupName(FoundOuter, S);
+ OuterDecl = FoundOuter.getAsSingle<NamedDecl>();
+ } else
+ OuterDecl = ScopeLookupResult;
+
+ if (isAcceptableNestedNameSpecifier(OuterDecl) &&
+ OuterDecl->getCanonicalDecl() != SD->getCanonicalDecl() &&
+ (!isa<TypeDecl>(OuterDecl) || !isa<TypeDecl>(SD) ||
+ !Context.hasSameType(
+ Context.getTypeDeclType(cast<TypeDecl>(OuterDecl)),
+ Context.getTypeDeclType(cast<TypeDecl>(SD))))) {
+ if (ErrorRecoveryLookup)
+ return true;
+
+ Diag(IdentifierLoc,
+ diag::err_nested_name_member_ref_lookup_ambiguous)
+ << &Identifier;
+ Diag(SD->getLocation(), diag::note_ambig_member_ref_object_type)
+ << ObjectType;
+ Diag(OuterDecl->getLocation(), diag::note_ambig_member_ref_scope);
+
+ // Fall through so that we'll pick the name we found in the object
+ // type, since that's probably what the user wanted anyway.
+ }
+ }
+
+ // If we're just performing this lookup for error-recovery purposes,
+ // don't extend the nested-name-specifier. Just return now.
+ if (ErrorRecoveryLookup)
+ return false;
+
+ if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(SD)) {
+ SS.Extend(Context, Namespace, IdentifierLoc, CCLoc);
+ return false;
+ }
+
+ if (NamespaceAliasDecl *Alias = dyn_cast<NamespaceAliasDecl>(SD)) {
+ SS.Extend(Context, Alias, IdentifierLoc, CCLoc);
+ return false;
+ }
+
+ QualType T = Context.getTypeDeclType(cast<TypeDecl>(SD));
+ TypeLocBuilder TLB;
+ if (isa<InjectedClassNameType>(T)) {
+ InjectedClassNameTypeLoc InjectedTL
+ = TLB.push<InjectedClassNameTypeLoc>(T);
+ InjectedTL.setNameLoc(IdentifierLoc);
+ } else if (isa<RecordType>(T)) {
+ RecordTypeLoc RecordTL = TLB.push<RecordTypeLoc>(T);
+ RecordTL.setNameLoc(IdentifierLoc);
+ } else if (isa<TypedefType>(T)) {
+ TypedefTypeLoc TypedefTL = TLB.push<TypedefTypeLoc>(T);
+ TypedefTL.setNameLoc(IdentifierLoc);
+ } else if (isa<EnumType>(T)) {
+ EnumTypeLoc EnumTL = TLB.push<EnumTypeLoc>(T);
+ EnumTL.setNameLoc(IdentifierLoc);
+ } else if (isa<TemplateTypeParmType>(T)) {
+ TemplateTypeParmTypeLoc TemplateTypeTL
+ = TLB.push<TemplateTypeParmTypeLoc>(T);
+ TemplateTypeTL.setNameLoc(IdentifierLoc);
+ } else if (isa<UnresolvedUsingType>(T)) {
+ UnresolvedUsingTypeLoc UnresolvedTL
+ = TLB.push<UnresolvedUsingTypeLoc>(T);
+ UnresolvedTL.setNameLoc(IdentifierLoc);
+ } else if (isa<SubstTemplateTypeParmType>(T)) {
+ SubstTemplateTypeParmTypeLoc TL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(T);
+ TL.setNameLoc(IdentifierLoc);
+ } else if (isa<SubstTemplateTypeParmPackType>(T)) {
+ SubstTemplateTypeParmPackTypeLoc TL
+ = TLB.push<SubstTemplateTypeParmPackTypeLoc>(T);
+ TL.setNameLoc(IdentifierLoc);
+ } else {
+ llvm_unreachable("Unhandled TypeDecl node in nested-name-specifier");
+ }
+
+ if (T->isEnumeralType())
+ Diag(IdentifierLoc, diag::warn_cxx98_compat_enum_nested_name_spec);
+
+ SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
+ CCLoc);
+ return false;
+ }
+
+ // Otherwise, we have an error case. If we don't want diagnostics, just
+ // return an error now.
+ if (ErrorRecoveryLookup)
+ return true;
+
+ // If we didn't find anything during our lookup, try again with
+ // ordinary name lookup, which can help us produce better error
+ // messages.
+ if (Found.empty()) {
+ Found.clear(LookupOrdinaryName);
+ LookupName(Found, S);
+ }
+
+ // In Microsoft mode, if we are within a templated function and we can't
+ // resolve Identifier, then extend the SS with Identifier. This will have
+ // the effect of resolving Identifier during template instantiation.
+ // The goal is to be able to resolve a function call whose
+ // nested-name-specifier is located inside a dependent base class.
+ // Example:
+ //
+ // class C {
+ // public:
+ // static void foo2() { }
+ // };
+ // template <class T> class A { public: typedef C D; };
+ //
+ // template <class T> class B : public A<T> {
+ // public:
+ // void foo() { D::foo2(); }
+ // };
+ if (getLangOpts().MicrosoftExt) {
+ DeclContext *DC = LookupCtx ? LookupCtx : CurContext;
+ if (DC->isDependentContext() && DC->isFunctionOrMethod()) {
+ SS.Extend(Context, &Identifier, IdentifierLoc, CCLoc);
+ return false;
+ }
+ }
+
+ unsigned DiagID;
+ if (!Found.empty())
+ DiagID = diag::err_expected_class_or_namespace;
+ else if (SS.isSet()) {
+ Diag(IdentifierLoc, diag::err_no_member)
+ << &Identifier << LookupCtx << SS.getRange();
+ return true;
+ } else
+ DiagID = diag::err_undeclared_var_use;
+
+ if (SS.isSet())
+ Diag(IdentifierLoc, DiagID) << &Identifier << SS.getRange();
+ else
+ Diag(IdentifierLoc, DiagID) << &Identifier;
+
+ return true;
+}
+
+bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation CCLoc,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ CXXScopeSpec &SS) {
+ if (SS.isInvalid())
+ return true;
+
+ return BuildCXXNestedNameSpecifier(S, Identifier, IdentifierLoc, CCLoc,
+ GetTypeFromParser(ObjectType),
+ EnteringContext, SS,
+ /*ScopeLookupResult=*/0, false);
+}
+
+bool Sema::ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
+ const DeclSpec &DS,
+ SourceLocation ColonColonLoc) {
+ if (SS.isInvalid() || DS.getTypeSpecType() == DeclSpec::TST_error)
+ return true;
+
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype);
+
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ if (!T->isDependentType() && !T->getAs<TagType>()) {
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_expected_class)
+ << T << getLangOpts().CPlusPlus;
+ return true;
+ }
+
+ TypeLocBuilder TLB;
+ DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
+ DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ SS.Extend(Context, SourceLocation(), TLB.getTypeLocInContext(Context, T),
+ ColonColonLoc);
+ return false;
+}
+
+/// IsInvalidUnlessNestedName - This method is used for error recovery
+/// purposes to determine whether the specified identifier is only valid as
+/// a nested name specifier, for example a namespace name. It is
+/// conservatively correct to always return false from this method.
+///
+/// The arguments are the same as those passed to ActOnCXXNestedNameSpecifier.
+bool Sema::IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
+ IdentifierInfo &Identifier,
+ SourceLocation IdentifierLoc,
+ SourceLocation ColonLoc,
+ ParsedType ObjectType,
+ bool EnteringContext) {
+ if (SS.isInvalid())
+ return false;
+
+ return !BuildCXXNestedNameSpecifier(S, Identifier, IdentifierLoc, ColonLoc,
+ GetTypeFromParser(ObjectType),
+ EnteringContext, SS,
+ /*ScopeLookupResult=*/0, true);
+}
+
+bool Sema::ActOnCXXNestedNameSpecifier(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ TemplateTy Template,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc,
+ SourceLocation CCLoc,
+ bool EnteringContext) {
+ if (SS.isInvalid())
+ return true;
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ if (DependentTemplateName *DTN = Template.get().getAsDependentTemplateName()){
+ // Handle a dependent template specialization for which we cannot resolve
+ // the template name.
+ assert(DTN->getQualifier()
+ == static_cast<NestedNameSpecifier*>(SS.getScopeRep()));
+ QualType T = Context.getDependentTemplateSpecializationType(ETK_None,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+
+ // Create source-location information for this type.
+ TypeLocBuilder Builder;
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = Builder.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(SourceLocation());
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+
+ SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T),
+ CCLoc);
+ return false;
+ }
+
+
+ if (Template.get().getAsOverloadedTemplate() ||
+ isa<FunctionTemplateDecl>(Template.get().getAsTemplateDecl())) {
+ SourceRange R(TemplateNameLoc, RAngleLoc);
+ if (SS.getRange().isValid())
+ R.setBegin(SS.getRange().getBegin());
+
+ Diag(CCLoc, diag::err_non_type_template_in_nested_name_specifier)
+ << Template.get() << R;
+ NoteAllFoundTemplates(Template.get());
+ return true;
+ }
+
+ // We were able to resolve the template name to an actual template.
+ // Build an appropriate nested-name-specifier.
+ QualType T = CheckTemplateIdType(Template.get(), TemplateNameLoc,
+ TemplateArgs);
+ if (T.isNull())
+ return true;
+
+ // Alias template specializations can produce types which are not valid
+ // nested name specifiers.
+ if (!T->isDependentType() && !T->getAs<TagType>()) {
+ Diag(TemplateNameLoc, diag::err_nested_name_spec_non_tag) << T;
+ NoteAllFoundTemplates(Template.get());
+ return true;
+ }
+
+ // Provide source-location information for the template specialization type.
+ TypeLocBuilder Builder;
+ TemplateSpecializationTypeLoc SpecTL
+ = Builder.push<TemplateSpecializationTypeLoc>(T);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+
+
+ SS.Extend(Context, TemplateKWLoc, Builder.getTypeLocInContext(Context, T),
+ CCLoc);
+ return false;
+}
+
+namespace {
+ /// \brief A structure that stores a nested-name-specifier annotation,
+ /// including both the nested-name-specifier
+ struct NestedNameSpecifierAnnotation {
+ NestedNameSpecifier *NNS;
+ };
+}
+
+void *Sema::SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS) {
+ if (SS.isEmpty() || SS.isInvalid())
+ return 0;
+
+ void *Mem = Context.Allocate((sizeof(NestedNameSpecifierAnnotation) +
+ SS.location_size()),
+ llvm::alignOf<NestedNameSpecifierAnnotation>());
+ NestedNameSpecifierAnnotation *Annotation
+ = new (Mem) NestedNameSpecifierAnnotation;
+ Annotation->NNS = SS.getScopeRep();
+ memcpy(Annotation + 1, SS.location_data(), SS.location_size());
+ return Annotation;
+}
+
+void Sema::RestoreNestedNameSpecifierAnnotation(void *AnnotationPtr,
+ SourceRange AnnotationRange,
+ CXXScopeSpec &SS) {
+ if (!AnnotationPtr) {
+ SS.SetInvalid(AnnotationRange);
+ return;
+ }
+
+ NestedNameSpecifierAnnotation *Annotation
+ = static_cast<NestedNameSpecifierAnnotation *>(AnnotationPtr);
+ SS.Adopt(NestedNameSpecifierLoc(Annotation->NNS, Annotation + 1));
+}
+
+bool Sema::ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
+ assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+
+ NestedNameSpecifier *Qualifier =
+ static_cast<NestedNameSpecifier*>(SS.getScopeRep());
+
+ // There are only two places a well-formed program may qualify a
+ // declarator: first, when defining a namespace or class member
+ // out-of-line, and second, when naming an explicitly-qualified
+ // friend function. The latter case is governed by
+ // C++03 [basic.lookup.unqual]p10:
+ // In a friend declaration naming a member function, a name used
+ // in the function declarator and not part of a template-argument
+ // in a template-id is first looked up in the scope of the member
+ // function's class. If it is not found, or if the name is part of
+ // a template-argument in a template-id, the look up is as
+ // described for unqualified names in the definition of the class
+ // granting friendship.
+ // i.e. we don't push a scope unless it's a class member.
+
+ switch (Qualifier->getKind()) {
+ case NestedNameSpecifier::Global:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ // These are always namespace scopes. We never want to enter a
+ // namespace scope from anything but a file context.
+ return CurContext->getRedeclContext()->isFileContext();
+
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ // These are never namespace scopes.
+ return true;
+ }
+
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
+/// scope or nested-name-specifier) is parsed, part of a declarator-id.
+/// After this method is called, according to [C++ 3.4.3p3], names should be
+/// looked up in the declarator-id's scope, until the declarator is parsed and
+/// ActOnCXXExitDeclaratorScope is called.
+/// The 'SS' should be a non-empty valid CXXScopeSpec.
+bool Sema::ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS) {
+ assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+
+ if (SS.isInvalid()) return true;
+
+ DeclContext *DC = computeDeclContext(SS, true);
+ if (!DC) return true;
+
+ // Before we enter a declarator's context, we need to make sure that
+ // it is a complete declaration context.
+ if (!DC->isDependentContext() && RequireCompleteDeclContext(SS, DC))
+ return true;
+
+ EnterDeclaratorContext(S, DC);
+
+ // Rebuild the nested name specifier for the new scope.
+ if (DC->isDependentContext())
+ RebuildNestedNameSpecifierInCurrentInstantiation(SS);
+
+ return false;
+}
+
+/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
+/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
+/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
+/// Used to indicate that names should revert to being looked up in the
+/// defining scope.
+void Sema::ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS) {
+ assert(SS.isSet() && "Parser passed invalid CXXScopeSpec.");
+ if (SS.isInvalid())
+ return;
+ assert(!SS.isInvalid() && computeDeclContext(SS, true) &&
+ "exiting declarator scope we never really entered");
+ ExitDeclaratorContext(S);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
new file mode 100644
index 0000000..54683e1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCast.cpp
@@ -0,0 +1,2112 @@
+//===--- SemaCast.cpp - Semantic Analysis for Casts -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for cast expressions, including
+// 1) C-style casts like '(int) x'
+// 2) C++ functional casts like 'int(x)'
+// 3) C++ named casts like 'static_cast<int>(x)'
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/ADT/SmallVector.h"
+#include <set>
+using namespace clang;
+
+
+
+enum TryCastResult {
+ TC_NotApplicable, ///< The cast method is not applicable.
+ TC_Success, ///< The cast method is appropriate and successful.
+ TC_Failed ///< The cast method is appropriate, but failed. A
+ ///< diagnostic has been emitted.
+};
+
+enum CastType {
+ CT_Const, ///< const_cast
+ CT_Static, ///< static_cast
+ CT_Reinterpret, ///< reinterpret_cast
+ CT_Dynamic, ///< dynamic_cast
+ CT_CStyle, ///< (Type)expr
+ CT_Functional ///< Type(expr)
+};
+
+namespace {
+ struct CastOperation {
+ CastOperation(Sema &S, QualType destType, ExprResult src)
+ : Self(S), SrcExpr(src), DestType(destType),
+ ResultType(destType.getNonLValueExprType(S.Context)),
+ ValueKind(Expr::getValueKindForType(destType)),
+ Kind(CK_Dependent), IsARCUnbridgedCast(false) {
+
+ if (const BuiltinType *placeholder =
+ src.get()->getType()->getAsPlaceholderType()) {
+ PlaceholderKind = placeholder->getKind();
+ } else {
+ PlaceholderKind = (BuiltinType::Kind) 0;
+ }
+ }
+
+ Sema &Self;
+ ExprResult SrcExpr;
+ QualType DestType;
+ QualType ResultType;
+ ExprValueKind ValueKind;
+ CastKind Kind;
+ BuiltinType::Kind PlaceholderKind;
+ CXXCastPath BasePath;
+ bool IsARCUnbridgedCast;
+
+ SourceRange OpRange;
+ SourceRange DestRange;
+
+ // Top-level semantics-checking routines.
+ void CheckConstCast();
+ void CheckReinterpretCast();
+ void CheckStaticCast();
+ void CheckDynamicCast();
+ void CheckCXXCStyleCast(bool FunctionalCast, bool ListInitialization);
+ void CheckCStyleCast();
+
+ /// Complete an apparently-successful cast operation that yields
+ /// the given expression.
+ ExprResult complete(CastExpr *castExpr) {
+ // If this is an unbridged cast, wrap the result in an implicit
+ // cast that yields the unbridged-cast placeholder type.
+ if (IsARCUnbridgedCast) {
+ castExpr = ImplicitCastExpr::Create(Self.Context,
+ Self.Context.ARCUnbridgedCastTy,
+ CK_Dependent, castExpr, 0,
+ castExpr->getValueKind());
+ }
+ return Self.Owned(castExpr);
+ }
+
+ // Internal convenience methods.
+
+ /// Try to handle the given placeholder expression kind. Return
+ /// true if the source expression has the appropriate placeholder
+ /// kind. A placeholder can only be claimed once.
+ bool claimPlaceholder(BuiltinType::Kind K) {
+ if (PlaceholderKind != K) return false;
+
+ PlaceholderKind = (BuiltinType::Kind) 0;
+ return true;
+ }
+
+ bool isPlaceholder() const {
+ return PlaceholderKind != 0;
+ }
+ bool isPlaceholder(BuiltinType::Kind K) const {
+ return PlaceholderKind == K;
+ }
+
+ void checkCastAlign() {
+ Self.CheckCastAlign(SrcExpr.get(), DestType, OpRange);
+ }
+
+ void checkObjCARCConversion(Sema::CheckedConversionKind CCK) {
+ assert(Self.getLangOpts().ObjCAutoRefCount);
+
+ Expr *src = SrcExpr.get();
+ if (Self.CheckObjCARCConversion(OpRange, DestType, src, CCK) ==
+ Sema::ACR_unbridged)
+ IsARCUnbridgedCast = true;
+ SrcExpr = src;
+ }
+
+ /// Check for and handle non-overload placeholder expressions.
+ void checkNonOverloadPlaceholders() {
+ if (!isPlaceholder() || isPlaceholder(BuiltinType::Overload))
+ return;
+
+ SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.take());
+ if (SrcExpr.isInvalid())
+ return;
+ PlaceholderKind = (BuiltinType::Kind) 0;
+ }
+ };
+}
+
+static bool CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
+ bool CheckCVR, bool CheckObjCLifetime);
+
+// The Try functions attempt a specific way of casting. If they succeed, they
+// return TC_Success. If their way of casting is not appropriate for the given
+// arguments, they return TC_NotApplicable and *may* set diag to a diagnostic
+// to emit if no other way succeeds. If their way of casting is appropriate but
+// fails, they return TC_Failed and *must* set diag; they can set it to 0 if
+// they emit a specialized diagnostic.
+// All diagnostics returned by these functions must expect the same three
+// arguments:
+// %0: Cast Type (a value from the CastType enumeration)
+// %1: Source Type
+// %2: Destination Type
+static TryCastResult TryLValueToRValueCast(Sema &Self, Expr *SrcExpr,
+ QualType DestType, bool CStyle,
+ CastKind &Kind,
+ CXXCastPath &BasePath,
+ unsigned &msg);
+static TryCastResult TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr,
+ QualType DestType, bool CStyle,
+ const SourceRange &OpRange,
+ unsigned &msg,
+ CastKind &Kind,
+ CXXCastPath &BasePath);
+static TryCastResult TryStaticPointerDowncast(Sema &Self, QualType SrcType,
+ QualType DestType, bool CStyle,
+ const SourceRange &OpRange,
+ unsigned &msg,
+ CastKind &Kind,
+ CXXCastPath &BasePath);
+static TryCastResult TryStaticDowncast(Sema &Self, CanQualType SrcType,
+ CanQualType DestType, bool CStyle,
+ const SourceRange &OpRange,
+ QualType OrigSrcType,
+ QualType OrigDestType, unsigned &msg,
+ CastKind &Kind,
+ CXXCastPath &BasePath);
+static TryCastResult TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr,
+ QualType SrcType,
+ QualType DestType,bool CStyle,
+ const SourceRange &OpRange,
+ unsigned &msg,
+ CastKind &Kind,
+ CXXCastPath &BasePath);
+
+static TryCastResult TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
+ const SourceRange &OpRange,
+ unsigned &msg, CastKind &Kind,
+ bool ListInitialization);
+static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
+ const SourceRange &OpRange,
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath,
+ bool ListInitialization);
+static TryCastResult TryConstCast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ bool CStyle, unsigned &msg);
+static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ const SourceRange &OpRange,
+ unsigned &msg,
+ CastKind &Kind);
+
+
+/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
+ExprResult
+Sema::ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
+ SourceLocation LAngleBracketLoc, Declarator &D,
+ SourceLocation RAngleBracketLoc,
+ SourceLocation LParenLoc, Expr *E,
+ SourceLocation RParenLoc) {
+
+ assert(!D.isInvalidType());
+
+ TypeSourceInfo *TInfo = GetTypeForDeclaratorCast(D, E->getType());
+ if (D.isInvalidType())
+ return ExprError();
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ return BuildCXXNamedCast(OpLoc, Kind, TInfo, move(E),
+ SourceRange(LAngleBracketLoc, RAngleBracketLoc),
+ SourceRange(LParenLoc, RParenLoc));
+}
+
+ExprResult
+Sema::BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind,
+ TypeSourceInfo *DestTInfo, Expr *E,
+ SourceRange AngleBrackets, SourceRange Parens) {
+ ExprResult Ex = Owned(E);
+ QualType DestType = DestTInfo->getType();
+
+ // If the type is dependent, we won't do the semantic analysis now.
+ // FIXME: should we check this in a more fine-grained manner?
+ bool TypeDependent = DestType->isDependentType() || Ex.get()->isTypeDependent();
+
+ CastOperation Op(*this, DestType, E);
+ Op.OpRange = SourceRange(OpLoc, Parens.getEnd());
+ Op.DestRange = AngleBrackets;
+
+ switch (Kind) {
+ default: llvm_unreachable("Unknown C++ cast!");
+
+ case tok::kw_const_cast:
+ if (!TypeDependent) {
+ Op.CheckConstCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+ return Op.complete(CXXConstCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.SrcExpr.take(), DestTInfo,
+ OpLoc, Parens.getEnd()));
+
+ case tok::kw_dynamic_cast: {
+ if (!TypeDependent) {
+ Op.CheckDynamicCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+ return Op.complete(CXXDynamicCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.take(),
+ &Op.BasePath, DestTInfo,
+ OpLoc, Parens.getEnd()));
+ }
+ case tok::kw_reinterpret_cast: {
+ if (!TypeDependent) {
+ Op.CheckReinterpretCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+ return Op.complete(CXXReinterpretCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.take(),
+ 0, DestTInfo, OpLoc,
+ Parens.getEnd()));
+ }
+ case tok::kw_static_cast: {
+ if (!TypeDependent) {
+ Op.CheckStaticCast();
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+ }
+
+ return Op.complete(CXXStaticCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.take(),
+ &Op.BasePath, DestTInfo,
+ OpLoc, Parens.getEnd()));
+ }
+ }
+}
+
+/// Try to diagnose a failed overloaded cast. Returns true if
+/// diagnostics were emitted.
+static bool tryDiagnoseOverloadedCast(Sema &S, CastType CT,
+ SourceRange range, Expr *src,
+ QualType destType,
+ bool listInitialization) {
+ switch (CT) {
+ // These cast kinds don't consider user-defined conversions.
+ case CT_Const:
+ case CT_Reinterpret:
+ case CT_Dynamic:
+ return false;
+
+ // These do.
+ case CT_Static:
+ case CT_CStyle:
+ case CT_Functional:
+ break;
+ }
+
+ QualType srcType = src->getType();
+ if (!destType->isRecordType() && !srcType->isRecordType())
+ return false;
+
+ InitializedEntity entity = InitializedEntity::InitializeTemporary(destType);
+ InitializationKind initKind
+ = (CT == CT_CStyle)? InitializationKind::CreateCStyleCast(range.getBegin(),
+ range, listInitialization)
+ : (CT == CT_Functional)? InitializationKind::CreateFunctionalCast(range,
+ listInitialization)
+ : InitializationKind::CreateCast(/*type range?*/ range);
+ InitializationSequence sequence(S, entity, initKind, &src, 1);
+
+ assert(sequence.Failed() && "initialization succeeded on second try?");
+ switch (sequence.getFailureKind()) {
+ default: return false;
+
+ case InitializationSequence::FK_ConstructorOverloadFailed:
+ case InitializationSequence::FK_UserConversionOverloadFailed:
+ break;
+ }
+
+ OverloadCandidateSet &candidates = sequence.getFailedCandidateSet();
+
+ unsigned msg = 0;
+ OverloadCandidateDisplayKind howManyCandidates = OCD_AllCandidates;
+
+ switch (sequence.getFailedOverloadResult()) {
+ case OR_Success: llvm_unreachable("successful failed overload");
+ case OR_No_Viable_Function:
+ if (candidates.empty())
+ msg = diag::err_ovl_no_conversion_in_cast;
+ else
+ msg = diag::err_ovl_no_viable_conversion_in_cast;
+ howManyCandidates = OCD_AllCandidates;
+ break;
+
+ case OR_Ambiguous:
+ msg = diag::err_ovl_ambiguous_conversion_in_cast;
+ howManyCandidates = OCD_ViableCandidates;
+ break;
+
+ case OR_Deleted:
+ msg = diag::err_ovl_deleted_conversion_in_cast;
+ howManyCandidates = OCD_ViableCandidates;
+ break;
+ }
+
+ S.Diag(range.getBegin(), msg)
+ << CT << srcType << destType
+ << range << src->getSourceRange();
+
+ candidates.NoteCandidates(S, howManyCandidates, src);
+
+ return true;
+}
+
+/// Diagnose a failed cast.
+static void diagnoseBadCast(Sema &S, unsigned msg, CastType castType,
+ SourceRange opRange, Expr *src, QualType destType,
+ bool listInitialization) {
+ if (src->getType() == S.Context.BoundMemberTy) {
+ (void) S.CheckPlaceholderExpr(src); // will always fail
+ return;
+ }
+
+ if (msg == diag::err_bad_cxx_cast_generic &&
+ tryDiagnoseOverloadedCast(S, castType, opRange, src, destType,
+ listInitialization))
+ return;
+
+ S.Diag(opRange.getBegin(), msg) << castType
+ << src->getType() << destType << opRange << src->getSourceRange();
+}
+
+/// UnwrapDissimilarPointerTypes - Like Sema::UnwrapSimilarPointerTypes,
+/// this removes one level of indirection from both types, provided that they're
+/// the same kind of pointer (plain or to-member). Unlike the Sema function,
+/// this one doesn't care if the two pointers-to-member don't point into the
+/// same class. This is because CastsAwayConstness doesn't care.
+static bool UnwrapDissimilarPointerTypes(QualType& T1, QualType& T2) {
+ const PointerType *T1PtrType = T1->getAs<PointerType>(),
+ *T2PtrType = T2->getAs<PointerType>();
+ if (T1PtrType && T2PtrType) {
+ T1 = T1PtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+ const ObjCObjectPointerType *T1ObjCPtrType =
+ T1->getAs<ObjCObjectPointerType>(),
+ *T2ObjCPtrType =
+ T2->getAs<ObjCObjectPointerType>();
+ if (T1ObjCPtrType) {
+ if (T2ObjCPtrType) {
+ T1 = T1ObjCPtrType->getPointeeType();
+ T2 = T2ObjCPtrType->getPointeeType();
+ return true;
+ }
+ else if (T2PtrType) {
+ T1 = T1ObjCPtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+ }
+ else if (T2ObjCPtrType) {
+ if (T1PtrType) {
+ T2 = T2ObjCPtrType->getPointeeType();
+ T1 = T1PtrType->getPointeeType();
+ return true;
+ }
+ }
+
+ const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
+ *T2MPType = T2->getAs<MemberPointerType>();
+ if (T1MPType && T2MPType) {
+ T1 = T1MPType->getPointeeType();
+ T2 = T2MPType->getPointeeType();
+ return true;
+ }
+
+ const BlockPointerType *T1BPType = T1->getAs<BlockPointerType>(),
+ *T2BPType = T2->getAs<BlockPointerType>();
+ if (T1BPType && T2BPType) {
+ T1 = T1BPType->getPointeeType();
+ T2 = T2BPType->getPointeeType();
+ return true;
+ }
+
+ return false;
+}
+
+/// CastsAwayConstness - Check if the pointer conversion from SrcType to
+/// DestType casts away constness as defined in C++ 5.2.11p8ff. This is used by
+/// the cast checkers. Both arguments must denote pointer (possibly to member)
+/// types.
+///
+/// \param CheckCVR Whether to check for const/volatile/restrict qualifiers.
+///
+/// \param CheckObjCLifetime Whether to check Objective-C lifetime qualifiers.
+static bool
+CastsAwayConstness(Sema &Self, QualType SrcType, QualType DestType,
+ bool CheckCVR, bool CheckObjCLifetime) {
+ // If the only checking we care about is for Objective-C lifetime qualifiers,
+ // and we're not in ARC mode, there's nothing to check.
+ if (!CheckCVR && CheckObjCLifetime &&
+ !Self.Context.getLangOpts().ObjCAutoRefCount)
+ return false;
+
+ // Casting away constness is defined in C++ 5.2.11p8 with reference to
+ // C++ 4.4. We piggyback on Sema::IsQualificationConversion for this, since
+ // the rules are non-trivial. So first we construct Tcv *...cv* as described
+ // in C++ 5.2.11p8.
+ assert((SrcType->isAnyPointerType() || SrcType->isMemberPointerType() ||
+ SrcType->isBlockPointerType()) &&
+ "Source type is not pointer or pointer to member.");
+ assert((DestType->isAnyPointerType() || DestType->isMemberPointerType() ||
+ DestType->isBlockPointerType()) &&
+ "Destination type is not pointer or pointer to member.");
+
+ QualType UnwrappedSrcType = Self.Context.getCanonicalType(SrcType),
+ UnwrappedDestType = Self.Context.getCanonicalType(DestType);
+ SmallVector<Qualifiers, 8> cv1, cv2;
+
+ // Find the qualifiers. We only care about cvr-qualifiers for the
+ // purpose of this check, because other qualifiers (address spaces,
+ // Objective-C GC, etc.) are part of the type's identity.
+ while (UnwrapDissimilarPointerTypes(UnwrappedSrcType, UnwrappedDestType)) {
+ // Determine the relevant qualifiers at this level.
+ Qualifiers SrcQuals, DestQuals;
+ Self.Context.getUnqualifiedArrayType(UnwrappedSrcType, SrcQuals);
+ Self.Context.getUnqualifiedArrayType(UnwrappedDestType, DestQuals);
+
+ Qualifiers RetainedSrcQuals, RetainedDestQuals;
+ if (CheckCVR) {
+ RetainedSrcQuals.setCVRQualifiers(SrcQuals.getCVRQualifiers());
+ RetainedDestQuals.setCVRQualifiers(DestQuals.getCVRQualifiers());
+ }
+
+ if (CheckObjCLifetime &&
+ !DestQuals.compatiblyIncludesObjCLifetime(SrcQuals))
+ return true;
+
+ cv1.push_back(RetainedSrcQuals);
+ cv2.push_back(RetainedDestQuals);
+ }
+ if (cv1.empty())
+ return false;
+
+ // Construct void pointers with those qualifiers (in reverse order of
+ // unwrapping, of course).
+ QualType SrcConstruct = Self.Context.VoidTy;
+ QualType DestConstruct = Self.Context.VoidTy;
+ ASTContext &Context = Self.Context;
+ for (SmallVector<Qualifiers, 8>::reverse_iterator i1 = cv1.rbegin(),
+ i2 = cv2.rbegin();
+ i1 != cv1.rend(); ++i1, ++i2) {
+ SrcConstruct
+ = Context.getPointerType(Context.getQualifiedType(SrcConstruct, *i1));
+ DestConstruct
+ = Context.getPointerType(Context.getQualifiedType(DestConstruct, *i2));
+ }
+
+ // Test if they're compatible.
+ bool ObjCLifetimeConversion;
+ return SrcConstruct != DestConstruct &&
+ !Self.IsQualificationConversion(SrcConstruct, DestConstruct, false,
+ ObjCLifetimeConversion);
+}
+
+/// CheckDynamicCast - Check that a dynamic_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.7 for details. Dynamic casts are used mostly for runtime-
+/// checked downcasts in class hierarchies.
+void CastOperation::CheckDynamicCast() {
+ if (ValueKind == VK_RValue)
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
+ else if (isPlaceholder())
+ SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.take());
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+
+ QualType OrigSrcType = SrcExpr.get()->getType();
+ QualType DestType = Self.Context.getCanonicalType(this->DestType);
+
+ // C++ 5.2.7p1: T shall be a pointer or reference to a complete class type,
+ // or "pointer to cv void".
+
+ QualType DestPointee;
+ const PointerType *DestPointer = DestType->getAs<PointerType>();
+ const ReferenceType *DestReference = 0;
+ if (DestPointer) {
+ DestPointee = DestPointer->getPointeeType();
+ } else if ((DestReference = DestType->getAs<ReferenceType>())) {
+ DestPointee = DestReference->getPointeeType();
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ref_or_ptr)
+ << this->DestType << DestRange;
+ return;
+ }
+
+ const RecordType *DestRecord = DestPointee->getAs<RecordType>();
+ if (DestPointee->isVoidType()) {
+ assert(DestPointer && "Reference to void is not possible");
+ } else if (DestRecord) {
+ if (Self.RequireCompleteType(OpRange.getBegin(), DestPointee,
+ Self.PDiag(diag::err_bad_dynamic_cast_incomplete)
+ << DestRange))
+ return;
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
+ << DestPointee.getUnqualifiedType() << DestRange;
+ return;
+ }
+
+ // C++0x 5.2.7p2: If T is a pointer type, v shall be an rvalue of a pointer to
+ // complete class type, [...]. If T is an lvalue reference type, v shall be
+ // an lvalue of a complete class type, [...]. If T is an rvalue reference
+ // type, v shall be an expression having a complete class type, [...]
+ QualType SrcType = Self.Context.getCanonicalType(OrigSrcType);
+ QualType SrcPointee;
+ if (DestPointer) {
+ if (const PointerType *SrcPointer = SrcType->getAs<PointerType>()) {
+ SrcPointee = SrcPointer->getPointeeType();
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_ptr)
+ << OrigSrcType << SrcExpr.get()->getSourceRange();
+ return;
+ }
+ } else if (DestReference->isLValueReferenceType()) {
+ if (!SrcExpr.get()->isLValue()) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_rvalue)
+ << CT_Dynamic << OrigSrcType << this->DestType << OpRange;
+ }
+ SrcPointee = SrcType;
+ } else {
+ SrcPointee = SrcType;
+ }
+
+ const RecordType *SrcRecord = SrcPointee->getAs<RecordType>();
+ if (SrcRecord) {
+ if (Self.RequireCompleteType(OpRange.getBegin(), SrcPointee,
+ Self.PDiag(diag::err_bad_dynamic_cast_incomplete)
+ << SrcExpr.get()->getSourceRange()))
+ return;
+ } else {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_class)
+ << SrcPointee.getUnqualifiedType() << SrcExpr.get()->getSourceRange();
+ return;
+ }
+
+ assert((DestPointer || DestReference) &&
+ "Bad destination non-ptr/ref slipped through.");
+ assert((DestRecord || DestPointee->isVoidType()) &&
+ "Bad destination pointee slipped through.");
+ assert(SrcRecord && "Bad source pointee slipped through.");
+
+ // C++ 5.2.7p1: The dynamic_cast operator shall not cast away constness.
+ if (!DestPointee.isAtLeastAsQualifiedAs(SrcPointee)) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_cxx_cast_qualifiers_away)
+ << CT_Dynamic << OrigSrcType << this->DestType << OpRange;
+ return;
+ }
+
+ // C++ 5.2.7p3: If the type of v is the same as the required result type,
+ // [except for cv].
+ if (DestRecord == SrcRecord) {
+ Kind = CK_NoOp;
+ return;
+ }
+
+ // C++ 5.2.7p5
+ // Upcasts are resolved statically.
+ if (DestRecord && Self.IsDerivedFrom(SrcPointee, DestPointee)) {
+ if (Self.CheckDerivedToBaseConversion(SrcPointee, DestPointee,
+ OpRange.getBegin(), OpRange,
+ &BasePath))
+ return;
+
+ Kind = CK_DerivedToBase;
+
+ // If we are casting to or through a virtual base class, we need a
+ // vtable.
+ if (Self.BasePathInvolvesVirtualBase(BasePath))
+ Self.MarkVTableUsed(OpRange.getBegin(),
+ cast<CXXRecordDecl>(SrcRecord->getDecl()));
+ return;
+ }
+
+ // C++ 5.2.7p6: Otherwise, v shall be [polymorphic].
+ const RecordDecl *SrcDecl = SrcRecord->getDecl()->getDefinition();
+ assert(SrcDecl && "Definition missing");
+ if (!cast<CXXRecordDecl>(SrcDecl)->isPolymorphic()) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_dynamic_cast_not_polymorphic)
+ << SrcPointee.getUnqualifiedType() << SrcExpr.get()->getSourceRange();
+ }
+ Self.MarkVTableUsed(OpRange.getBegin(),
+ cast<CXXRecordDecl>(SrcRecord->getDecl()));
+
+ // Done. Everything else is run-time checks.
+ Kind = CK_Dynamic;
+}
+
+/// CheckConstCast - Check that a const_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.11 for details. const_cast is typically used in code
+/// like this:
+/// const char *str = "literal";
+/// legacy_function(const_cast\<char*\>(str));
+void CastOperation::CheckConstCast() {
+ if (ValueKind == VK_RValue)
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
+ else if (isPlaceholder())
+ SrcExpr = Self.CheckPlaceholderExpr(SrcExpr.take());
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ if (TryConstCast(Self, SrcExpr.get(), DestType, /*CStyle*/false, msg) != TC_Success
+ && msg != 0)
+ Self.Diag(OpRange.getBegin(), msg) << CT_Const
+ << SrcExpr.get()->getType() << DestType << OpRange;
+}
+
+/// CheckReinterpretCast - Check that a reinterpret_cast\<DestType\>(SrcExpr) is
+/// valid.
+/// Refer to C++ 5.2.10 for details. reinterpret_cast is typically used in code
+/// like this:
+/// char *bytes = reinterpret_cast\<char*\>(int_ptr);
+void CastOperation::CheckReinterpretCast() {
+ if (ValueKind == VK_RValue && !isPlaceholder(BuiltinType::Overload))
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
+ else
+ checkNonOverloadPlaceholders();
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ TryCastResult tcr =
+ TryReinterpretCast(Self, SrcExpr, DestType,
+ /*CStyle*/false, OpRange, msg, Kind);
+ if (tcr != TC_Success && msg != 0)
+ {
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ //FIXME: &f<int>; is overloaded and resolvable
+ Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_overload)
+ << OverloadExpr::find(SrcExpr.get()).Expression->getName()
+ << DestType << OpRange;
+ Self.NoteAllOverloadCandidates(SrcExpr.get());
+
+ } else {
+ diagnoseBadCast(Self, msg, CT_Reinterpret, OpRange, SrcExpr.get(),
+ DestType, /*listInitialization=*/false);
+ }
+ } else if (tcr == TC_Success && Self.getLangOpts().ObjCAutoRefCount) {
+ checkObjCARCConversion(Sema::CCK_OtherCast);
+ }
+}
+
+
+/// CheckStaticCast - Check that a static_cast\<DestType\>(SrcExpr) is valid.
+/// Refer to C++ 5.2.9 for details. Static casts are mostly used for making
+/// implicit conversions explicit and getting rid of data loss warnings.
+void CastOperation::CheckStaticCast() {
+ if (isPlaceholder()) {
+ checkNonOverloadPlaceholders();
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ // This test is outside everything else because it's the only case where
+ // a non-lvalue-reference target type does not lead to decay.
+ // C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
+ if (DestType->isVoidType()) {
+ Kind = CK_ToVoid;
+
+ if (claimPlaceholder(BuiltinType::Overload)) {
+ Self.ResolveAndFixSingleFunctionTemplateSpecialization(SrcExpr,
+ false, // Decay Function to ptr
+ true, // Complain
+ OpRange, DestType, diag::err_bad_static_cast_overload);
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ SrcExpr = Self.IgnoredValueConversions(SrcExpr.take());
+ return;
+ }
+
+ if (ValueKind == VK_RValue && !DestType->isRecordType() &&
+ !isPlaceholder(BuiltinType::Overload)) {
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
+ if (SrcExpr.isInvalid()) // if conversion failed, don't report another error
+ return;
+ }
+
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ TryCastResult tcr
+ = TryStaticCast(Self, SrcExpr, DestType, Sema::CCK_OtherCast, OpRange, msg,
+ Kind, BasePath, /*ListInitialization=*/false);
+ if (tcr != TC_Success && msg != 0) {
+ if (SrcExpr.isInvalid())
+ return;
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ OverloadExpr* oe = OverloadExpr::find(SrcExpr.get()).Expression;
+ Self.Diag(OpRange.getBegin(), diag::err_bad_static_cast_overload)
+ << oe->getName() << DestType << OpRange
+ << oe->getQualifierLoc().getSourceRange();
+ Self.NoteAllOverloadCandidates(SrcExpr.get());
+ } else {
+ diagnoseBadCast(Self, msg, CT_Static, OpRange, SrcExpr.get(), DestType,
+ /*listInitialization=*/false);
+ }
+ } else if (tcr == TC_Success) {
+ if (Kind == CK_BitCast)
+ checkCastAlign();
+ if (Self.getLangOpts().ObjCAutoRefCount)
+ checkObjCARCConversion(Sema::CCK_OtherCast);
+ } else if (Kind == CK_BitCast) {
+ checkCastAlign();
+ }
+}
+
+/// TryStaticCast - Check if a static cast can be performed, and do so if
+/// possible. If @p CStyle, ignore access restrictions on hierarchy casting
+/// and casting away constness.
+static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType,
+ Sema::CheckedConversionKind CCK,
+ const SourceRange &OpRange, unsigned &msg,
+ CastKind &Kind, CXXCastPath &BasePath,
+ bool ListInitialization) {
+ // Determine whether we have the semantics of a C-style cast.
+ bool CStyle
+ = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
+
+ // The order the tests is not entirely arbitrary. There is one conversion
+ // that can be handled in two different ways. Given:
+ // struct A {};
+ // struct B : public A {
+ // B(); B(const A&);
+ // };
+ // const A &a = B();
+ // the cast static_cast<const B&>(a) could be seen as either a static
+ // reference downcast, or an explicit invocation of the user-defined
+ // conversion using B's conversion constructor.
+ // DR 427 specifies that the downcast is to be applied here.
+
+ // C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
+ // Done outside this function.
+
+ TryCastResult tcr;
+
+ // C++ 5.2.9p5, reference downcast.
+ // See the function for details.
+ // DR 427 specifies that this is to be applied before paragraph 2.
+ tcr = TryStaticReferenceDowncast(Self, SrcExpr.get(), DestType, CStyle,
+ OpRange, msg, Kind, BasePath);
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // C++0x [expr.static.cast]p3:
+ // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to cv2
+ // T2" if "cv2 T2" is reference-compatible with "cv1 T1".
+ tcr = TryLValueToRValueCast(Self, SrcExpr.get(), DestType, CStyle, Kind,
+ BasePath, msg);
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // C++ 5.2.9p2: An expression e can be explicitly converted to a type T
+ // [...] if the declaration "T t(e);" is well-formed, [...].
+ tcr = TryStaticImplicitCast(Self, SrcExpr, DestType, CCK, OpRange, msg,
+ Kind, ListInitialization);
+ if (SrcExpr.isInvalid())
+ return TC_Failed;
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // C++ 5.2.9p6: May apply the reverse of any standard conversion, except
+ // lvalue-to-rvalue, array-to-pointer, function-to-pointer, and boolean
+ // conversions, subject to further restrictions.
+ // Also, C++ 5.2.9p1 forbids casting away constness, which makes reversal
+ // of qualification conversions impossible.
+ // In the CStyle case, the earlier attempt to const_cast should have taken
+ // care of reverse qualification conversions.
+
+ QualType SrcType = Self.Context.getCanonicalType(SrcExpr.get()->getType());
+
+ // C++0x 5.2.9p9: A value of a scoped enumeration type can be explicitly
+ // converted to an integral type. [...] A value of a scoped enumeration type
+ // can also be explicitly converted to a floating-point type [...].
+ if (const EnumType *Enum = SrcType->getAs<EnumType>()) {
+ if (Enum->getDecl()->isScoped()) {
+ if (DestType->isBooleanType()) {
+ Kind = CK_IntegralToBoolean;
+ return TC_Success;
+ } else if (DestType->isIntegralType(Self.Context)) {
+ Kind = CK_IntegralCast;
+ return TC_Success;
+ } else if (DestType->isRealFloatingType()) {
+ Kind = CK_IntegralToFloating;
+ return TC_Success;
+ }
+ }
+ }
+
+ // Reverse integral promotion/conversion. All such conversions are themselves
+ // again integral promotions or conversions and are thus already handled by
+ // p2 (TryDirectInitialization above).
+ // (Note: any data loss warnings should be suppressed.)
+ // The exception is the reverse of enum->integer, i.e. integer->enum (and
+ // enum->enum). See also C++ 5.2.9p7.
+ // The same goes for reverse floating point promotion/conversion and
+ // floating-integral conversions. Again, only floating->enum is relevant.
+ if (DestType->isEnumeralType()) {
+ if (SrcType->isIntegralOrEnumerationType()) {
+ Kind = CK_IntegralCast;
+ return TC_Success;
+ } else if (SrcType->isRealFloatingType()) {
+ Kind = CK_FloatingToIntegral;
+ return TC_Success;
+ }
+ }
+
+ // Reverse pointer upcast. C++ 4.10p3 specifies pointer upcast.
+ // C++ 5.2.9p8 additionally disallows a cast path through virtual inheritance.
+ tcr = TryStaticPointerDowncast(Self, SrcType, DestType, CStyle, OpRange, msg,
+ Kind, BasePath);
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // Reverse member pointer conversion. C++ 4.11 specifies member pointer
+ // conversion. C++ 5.2.9p9 has additional information.
+ // DR54's access restrictions apply here also.
+ tcr = TryStaticMemberPointerUpcast(Self, SrcExpr, SrcType, DestType, CStyle,
+ OpRange, msg, Kind, BasePath);
+ if (tcr != TC_NotApplicable)
+ return tcr;
+
+ // Reverse pointer conversion to void*. C++ 4.10.p2 specifies conversion to
+ // void*. C++ 5.2.9p10 specifies additional restrictions, which really is
+ // just the usual constness stuff.
+ if (const PointerType *SrcPointer = SrcType->getAs<PointerType>()) {
+ QualType SrcPointee = SrcPointer->getPointeeType();
+ if (SrcPointee->isVoidType()) {
+ if (const PointerType *DestPointer = DestType->getAs<PointerType>()) {
+ QualType DestPointee = DestPointer->getPointeeType();
+ if (DestPointee->isIncompleteOrObjectType()) {
+ // This is definitely the intended conversion, but it might fail due
+ // to a qualifier violation. Note that we permit Objective-C lifetime
+ // and GC qualifier mismatches here.
+ if (!CStyle) {
+ Qualifiers DestPointeeQuals = DestPointee.getQualifiers();
+ Qualifiers SrcPointeeQuals = SrcPointee.getQualifiers();
+ DestPointeeQuals.removeObjCGCAttr();
+ DestPointeeQuals.removeObjCLifetime();
+ SrcPointeeQuals.removeObjCGCAttr();
+ SrcPointeeQuals.removeObjCLifetime();
+ if (DestPointeeQuals != SrcPointeeQuals &&
+ !DestPointeeQuals.compatiblyIncludes(SrcPointeeQuals)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
+ }
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+ }
+ else if (DestType->isObjCObjectPointerType()) {
+ // allow both c-style cast and static_cast of objective-c pointers as
+ // they are pervasive.
+ Kind = CK_CPointerToObjCPointerCast;
+ return TC_Success;
+ }
+ else if (CStyle && DestType->isBlockPointerType()) {
+ // allow c-style cast of void * to block pointers.
+ Kind = CK_AnyPointerToBlockPointerCast;
+ return TC_Success;
+ }
+ }
+ }
+ // Allow arbitray objective-c pointer conversion with static casts.
+ if (SrcType->isObjCObjectPointerType() &&
+ DestType->isObjCObjectPointerType()) {
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+
+ // We tried everything. Everything! Nothing works! :-(
+ return TC_NotApplicable;
+}
+
+/// Tests whether a conversion according to N2844 is valid.
+TryCastResult
+TryLValueToRValueCast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ bool CStyle, CastKind &Kind, CXXCastPath &BasePath,
+ unsigned &msg) {
+ // C++0x [expr.static.cast]p3:
+ // A glvalue of type "cv1 T1" can be cast to type "rvalue reference to
+ // cv2 T2" if "cv2 T2" is reference-compatible with "cv1 T1".
+ const RValueReferenceType *R = DestType->getAs<RValueReferenceType>();
+ if (!R)
+ return TC_NotApplicable;
+
+ if (!SrcExpr->isGLValue())
+ return TC_NotApplicable;
+
+ // Because we try the reference downcast before this function, from now on
+ // this is the only cast possibility, so we issue an error if we fail now.
+ // FIXME: Should allow casting away constness if CStyle.
+ bool DerivedToBase;
+ bool ObjCConversion;
+ bool ObjCLifetimeConversion;
+ QualType FromType = SrcExpr->getType();
+ QualType ToType = R->getPointeeType();
+ if (CStyle) {
+ FromType = FromType.getUnqualifiedType();
+ ToType = ToType.getUnqualifiedType();
+ }
+
+ if (Self.CompareReferenceRelationship(SrcExpr->getLocStart(),
+ ToType, FromType,
+ DerivedToBase, ObjCConversion,
+ ObjCLifetimeConversion)
+ < Sema::Ref_Compatible_With_Added_Qualification) {
+ msg = diag::err_bad_lvalue_to_rvalue_cast;
+ return TC_Failed;
+ }
+
+ if (DerivedToBase) {
+ Kind = CK_DerivedToBase;
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(SrcExpr->getType(), R->getPointeeType(), Paths))
+ return TC_NotApplicable;
+
+ Self.BuildBasePathArray(Paths, BasePath);
+ } else
+ Kind = CK_NoOp;
+
+ return TC_Success;
+}
+
+/// Tests whether a conversion according to C++ 5.2.9p5 is valid.
+TryCastResult
+TryStaticReferenceDowncast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ bool CStyle, const SourceRange &OpRange,
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath) {
+ // C++ 5.2.9p5: An lvalue of type "cv1 B", where B is a class type, can be
+ // cast to type "reference to cv2 D", where D is a class derived from B,
+ // if a valid standard conversion from "pointer to D" to "pointer to B"
+ // exists, cv2 >= cv1, and B is not a virtual base class of D.
+ // In addition, DR54 clarifies that the base must be accessible in the
+ // current context. Although the wording of DR54 only applies to the pointer
+ // variant of this rule, the intent is clearly for it to apply to the this
+ // conversion as well.
+
+ const ReferenceType *DestReference = DestType->getAs<ReferenceType>();
+ if (!DestReference) {
+ return TC_NotApplicable;
+ }
+ bool RValueRef = DestReference->isRValueReferenceType();
+ if (!RValueRef && !SrcExpr->isLValue()) {
+ // We know the left side is an lvalue reference, so we can suggest a reason.
+ msg = diag::err_bad_cxx_cast_rvalue;
+ return TC_NotApplicable;
+ }
+
+ QualType DestPointee = DestReference->getPointeeType();
+
+ return TryStaticDowncast(Self,
+ Self.Context.getCanonicalType(SrcExpr->getType()),
+ Self.Context.getCanonicalType(DestPointee), CStyle,
+ OpRange, SrcExpr->getType(), DestType, msg, Kind,
+ BasePath);
+}
+
+/// Tests whether a conversion according to C++ 5.2.9p8 is valid.
+TryCastResult
+TryStaticPointerDowncast(Sema &Self, QualType SrcType, QualType DestType,
+ bool CStyle, const SourceRange &OpRange,
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath) {
+ // C++ 5.2.9p8: An rvalue of type "pointer to cv1 B", where B is a class
+ // type, can be converted to an rvalue of type "pointer to cv2 D", where D
+ // is a class derived from B, if a valid standard conversion from "pointer
+ // to D" to "pointer to B" exists, cv2 >= cv1, and B is not a virtual base
+ // class of D.
+ // In addition, DR54 clarifies that the base must be accessible in the
+ // current context.
+
+ const PointerType *DestPointer = DestType->getAs<PointerType>();
+ if (!DestPointer) {
+ return TC_NotApplicable;
+ }
+
+ const PointerType *SrcPointer = SrcType->getAs<PointerType>();
+ if (!SrcPointer) {
+ msg = diag::err_bad_static_cast_pointer_nonpointer;
+ return TC_NotApplicable;
+ }
+
+ return TryStaticDowncast(Self,
+ Self.Context.getCanonicalType(SrcPointer->getPointeeType()),
+ Self.Context.getCanonicalType(DestPointer->getPointeeType()),
+ CStyle, OpRange, SrcType, DestType, msg, Kind,
+ BasePath);
+}
+
+/// TryStaticDowncast - Common functionality of TryStaticReferenceDowncast and
+/// TryStaticPointerDowncast. Tests whether a static downcast from SrcType to
+/// DestType is possible and allowed.
+TryCastResult
+TryStaticDowncast(Sema &Self, CanQualType SrcType, CanQualType DestType,
+ bool CStyle, const SourceRange &OpRange, QualType OrigSrcType,
+ QualType OrigDestType, unsigned &msg,
+ CastKind &Kind, CXXCastPath &BasePath) {
+ // We can only work with complete types. But don't complain if it doesn't work
+ if (Self.RequireCompleteType(OpRange.getBegin(), SrcType, Self.PDiag(0)) ||
+ Self.RequireCompleteType(OpRange.getBegin(), DestType, Self.PDiag(0)))
+ return TC_NotApplicable;
+
+ // Downcast can only happen in class hierarchies, so we need classes.
+ if (!DestType->getAs<RecordType>() || !SrcType->getAs<RecordType>()) {
+ return TC_NotApplicable;
+ }
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(DestType, SrcType, Paths)) {
+ return TC_NotApplicable;
+ }
+
+ // Target type does derive from source type. Now we're serious. If an error
+ // appears now, it's not ignored.
+ // This may not be entirely in line with the standard. Take for example:
+ // struct A {};
+ // struct B : virtual A {
+ // B(A&);
+ // };
+ //
+ // void f()
+ // {
+ // (void)static_cast<const B&>(*((A*)0));
+ // }
+ // As far as the standard is concerned, p5 does not apply (A is virtual), so
+ // p2 should be used instead - "const B& t(*((A*)0));" is perfectly valid.
+ // However, both GCC and Comeau reject this example, and accepting it would
+ // mean more complex code if we're to preserve the nice error message.
+ // FIXME: Being 100% compliant here would be nice to have.
+
+ // Must preserve cv, as always, unless we're in C-style mode.
+ if (!CStyle && !DestType.isAtLeastAsQualifiedAs(SrcType)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
+
+ if (Paths.isAmbiguous(SrcType.getUnqualifiedType())) {
+ // This code is analoguous to that in CheckDerivedToBaseConversion, except
+ // that it builds the paths in reverse order.
+ // To sum up: record all paths to the base and build a nice string from
+ // them. Use it to spice up the error message.
+ if (!Paths.isRecordingPaths()) {
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ Self.IsDerivedFrom(DestType, SrcType, Paths);
+ }
+ std::string PathDisplayStr;
+ std::set<unsigned> DisplayedPaths;
+ for (CXXBasePaths::paths_iterator PI = Paths.begin(), PE = Paths.end();
+ PI != PE; ++PI) {
+ if (DisplayedPaths.insert(PI->back().SubobjectNumber).second) {
+ // We haven't displayed a path to this particular base
+ // class subobject yet.
+ PathDisplayStr += "\n ";
+ for (CXXBasePath::const_reverse_iterator EI = PI->rbegin(),
+ EE = PI->rend();
+ EI != EE; ++EI)
+ PathDisplayStr += EI->Base->getType().getAsString() + " -> ";
+ PathDisplayStr += QualType(DestType).getAsString();
+ }
+ }
+
+ Self.Diag(OpRange.getBegin(), diag::err_ambiguous_base_to_derived_cast)
+ << QualType(SrcType).getUnqualifiedType()
+ << QualType(DestType).getUnqualifiedType()
+ << PathDisplayStr << OpRange;
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (Paths.getDetectedVirtual() != 0) {
+ QualType VirtualBase(Paths.getDetectedVirtual(), 0);
+ Self.Diag(OpRange.getBegin(), diag::err_static_downcast_via_virtual)
+ << OrigSrcType << OrigDestType << VirtualBase << OpRange;
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (!CStyle) {
+ switch (Self.CheckBaseClassAccess(OpRange.getBegin(),
+ SrcType, DestType,
+ Paths.front(),
+ diag::err_downcast_from_inaccessible_base)) {
+ case Sema::AR_accessible:
+ case Sema::AR_delayed: // be optimistic
+ case Sema::AR_dependent: // be optimistic
+ break;
+
+ case Sema::AR_inaccessible:
+ msg = 0;
+ return TC_Failed;
+ }
+ }
+
+ Self.BuildBasePathArray(Paths, BasePath);
+ Kind = CK_BaseToDerived;
+ return TC_Success;
+}
+
+/// TryStaticMemberPointerUpcast - Tests whether a conversion according to
+/// C++ 5.2.9p9 is valid:
+///
+/// An rvalue of type "pointer to member of D of type cv1 T" can be
+/// converted to an rvalue of type "pointer to member of B of type cv2 T",
+/// where B is a base class of D [...].
+///
+TryCastResult
+TryStaticMemberPointerUpcast(Sema &Self, ExprResult &SrcExpr, QualType SrcType,
+ QualType DestType, bool CStyle,
+ const SourceRange &OpRange,
+ unsigned &msg, CastKind &Kind,
+ CXXCastPath &BasePath) {
+ const MemberPointerType *DestMemPtr = DestType->getAs<MemberPointerType>();
+ if (!DestMemPtr)
+ return TC_NotApplicable;
+
+ bool WasOverloadedFunction = false;
+ DeclAccessPair FoundOverload;
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ if (FunctionDecl *Fn
+ = Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(), DestType, false,
+ FoundOverload)) {
+ CXXMethodDecl *M = cast<CXXMethodDecl>(Fn);
+ SrcType = Self.Context.getMemberPointerType(Fn->getType(),
+ Self.Context.getTypeDeclType(M->getParent()).getTypePtr());
+ WasOverloadedFunction = true;
+ }
+ }
+
+ const MemberPointerType *SrcMemPtr = SrcType->getAs<MemberPointerType>();
+ if (!SrcMemPtr) {
+ msg = diag::err_bad_static_cast_member_pointer_nonmp;
+ return TC_NotApplicable;
+ }
+
+ // T == T, modulo cv
+ if (!Self.Context.hasSameUnqualifiedType(SrcMemPtr->getPointeeType(),
+ DestMemPtr->getPointeeType()))
+ return TC_NotApplicable;
+
+ // B base of D
+ QualType SrcClass(SrcMemPtr->getClass(), 0);
+ QualType DestClass(DestMemPtr->getClass(), 0);
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ if (!Self.IsDerivedFrom(SrcClass, DestClass, Paths)) {
+ return TC_NotApplicable;
+ }
+
+ // B is a base of D. But is it an allowed base? If not, it's a hard error.
+ if (Paths.isAmbiguous(Self.Context.getCanonicalType(DestClass))) {
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ bool StillOkay = Self.IsDerivedFrom(SrcClass, DestClass, Paths);
+ assert(StillOkay);
+ (void)StillOkay;
+ std::string PathDisplayStr = Self.getAmbiguousPathsDisplayString(Paths);
+ Self.Diag(OpRange.getBegin(), diag::err_ambiguous_memptr_conv)
+ << 1 << SrcClass << DestClass << PathDisplayStr << OpRange;
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (const RecordType *VBase = Paths.getDetectedVirtual()) {
+ Self.Diag(OpRange.getBegin(), diag::err_memptr_conv_via_virtual)
+ << SrcClass << DestClass << QualType(VBase, 0) << OpRange;
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (!CStyle) {
+ switch (Self.CheckBaseClassAccess(OpRange.getBegin(),
+ DestClass, SrcClass,
+ Paths.front(),
+ diag::err_upcast_to_inaccessible_base)) {
+ case Sema::AR_accessible:
+ case Sema::AR_delayed:
+ case Sema::AR_dependent:
+ // Optimistically assume that the delayed and dependent cases
+ // will work out.
+ break;
+
+ case Sema::AR_inaccessible:
+ msg = 0;
+ return TC_Failed;
+ }
+ }
+
+ if (WasOverloadedFunction) {
+ // Resolve the address of the overloaded function again, this time
+ // allowing complaints if something goes wrong.
+ FunctionDecl *Fn = Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(),
+ DestType,
+ true,
+ FoundOverload);
+ if (!Fn) {
+ msg = 0;
+ return TC_Failed;
+ }
+
+ SrcExpr = Self.FixOverloadedFunctionReference(SrcExpr, FoundOverload, Fn);
+ if (!SrcExpr.isUsable()) {
+ msg = 0;
+ return TC_Failed;
+ }
+ }
+
+ Self.BuildBasePathArray(Paths, BasePath);
+ Kind = CK_DerivedToBaseMemberPointer;
+ return TC_Success;
+}
+
+/// TryStaticImplicitCast - Tests whether a conversion according to C++ 5.2.9p2
+/// is valid:
+///
+/// An expression e can be explicitly converted to a type T using a
+/// @c static_cast if the declaration "T t(e);" is well-formed [...].
+TryCastResult
+TryStaticImplicitCast(Sema &Self, ExprResult &SrcExpr, QualType DestType,
+ Sema::CheckedConversionKind CCK,
+ const SourceRange &OpRange, unsigned &msg,
+ CastKind &Kind, bool ListInitialization) {
+ if (DestType->isRecordType()) {
+ if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
+ diag::err_bad_dynamic_cast_incomplete)) {
+ msg = 0;
+ return TC_Failed;
+ }
+ }
+
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(DestType);
+ InitializationKind InitKind
+ = (CCK == Sema::CCK_CStyleCast)
+ ? InitializationKind::CreateCStyleCast(OpRange.getBegin(), OpRange,
+ ListInitialization)
+ : (CCK == Sema::CCK_FunctionalCast)
+ ? InitializationKind::CreateFunctionalCast(OpRange, ListInitialization)
+ : InitializationKind::CreateCast(OpRange);
+ Expr *SrcExprRaw = SrcExpr.get();
+ InitializationSequence InitSeq(Self, Entity, InitKind, &SrcExprRaw, 1);
+
+ // At this point of CheckStaticCast, if the destination is a reference,
+ // or the expression is an overload expression this has to work.
+ // There is no other way that works.
+ // On the other hand, if we're checking a C-style cast, we've still got
+ // the reinterpret_cast way.
+ bool CStyle
+ = (CCK == Sema::CCK_CStyleCast || CCK == Sema::CCK_FunctionalCast);
+ if (InitSeq.Failed() && (CStyle || !DestType->isReferenceType()))
+ return TC_NotApplicable;
+
+ ExprResult Result
+ = InitSeq.Perform(Self, Entity, InitKind, MultiExprArg(Self, &SrcExprRaw, 1));
+ if (Result.isInvalid()) {
+ msg = 0;
+ return TC_Failed;
+ }
+
+ if (InitSeq.isConstructorInitialization())
+ Kind = CK_ConstructorConversion;
+ else
+ Kind = CK_NoOp;
+
+ SrcExpr = move(Result);
+ return TC_Success;
+}
+
+/// TryConstCast - See if a const_cast from source to destination is allowed,
+/// and perform it if it is.
+static TryCastResult TryConstCast(Sema &Self, Expr *SrcExpr, QualType DestType,
+ bool CStyle, unsigned &msg) {
+ DestType = Self.Context.getCanonicalType(DestType);
+ QualType SrcType = SrcExpr->getType();
+ if (const ReferenceType *DestTypeTmp =DestType->getAs<ReferenceType>()) {
+ if (DestTypeTmp->isLValueReferenceType() && !SrcExpr->isLValue()) {
+ // Cannot const_cast non-lvalue to lvalue reference type. But if this
+ // is C-style, static_cast might find a way, so we simply suggest a
+ // message and tell the parent to keep searching.
+ msg = diag::err_bad_cxx_cast_rvalue;
+ return TC_NotApplicable;
+ }
+
+ // C++ 5.2.11p4: An lvalue of type T1 can be [cast] to an lvalue of type T2
+ // [...] if a pointer to T1 can be [cast] to the type pointer to T2.
+ DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
+ SrcType = Self.Context.getPointerType(SrcType);
+ }
+
+ // C++ 5.2.11p5: For a const_cast involving pointers to data members [...]
+ // the rules for const_cast are the same as those used for pointers.
+
+ if (!DestType->isPointerType() &&
+ !DestType->isMemberPointerType() &&
+ !DestType->isObjCObjectPointerType()) {
+ // Cannot cast to non-pointer, non-reference type. Note that, if DestType
+ // was a reference type, we converted it to a pointer above.
+ // The status of rvalue references isn't entirely clear, but it looks like
+ // conversion to them is simply invalid.
+ // C++ 5.2.11p3: For two pointer types [...]
+ if (!CStyle)
+ msg = diag::err_bad_const_cast_dest;
+ return TC_NotApplicable;
+ }
+ if (DestType->isFunctionPointerType() ||
+ DestType->isMemberFunctionPointerType()) {
+ // Cannot cast direct function pointers.
+ // C++ 5.2.11p2: [...] where T is any object type or the void type [...]
+ // T is the ultimate pointee of source and target type.
+ if (!CStyle)
+ msg = diag::err_bad_const_cast_dest;
+ return TC_NotApplicable;
+ }
+ SrcType = Self.Context.getCanonicalType(SrcType);
+
+ // Unwrap the pointers. Ignore qualifiers. Terminate early if the types are
+ // completely equal.
+ // C++ 5.2.11p3 describes the core semantics of const_cast. All cv specifiers
+ // in multi-level pointers may change, but the level count must be the same,
+ // as must be the final pointee type.
+ while (SrcType != DestType &&
+ Self.Context.UnwrapSimilarPointerTypes(SrcType, DestType)) {
+ Qualifiers SrcQuals, DestQuals;
+ SrcType = Self.Context.getUnqualifiedArrayType(SrcType, SrcQuals);
+ DestType = Self.Context.getUnqualifiedArrayType(DestType, DestQuals);
+
+ // const_cast is permitted to strip cvr-qualifiers, only. Make sure that
+ // the other qualifiers (e.g., address spaces) are identical.
+ SrcQuals.removeCVRQualifiers();
+ DestQuals.removeCVRQualifiers();
+ if (SrcQuals != DestQuals)
+ return TC_NotApplicable;
+ }
+
+ // Since we're dealing in canonical types, the remainder must be the same.
+ if (SrcType != DestType)
+ return TC_NotApplicable;
+
+ return TC_Success;
+}
+
+// Checks for undefined behavior in reinterpret_cast.
+// The cases that is checked for is:
+// *reinterpret_cast<T*>(&a)
+// reinterpret_cast<T&>(a)
+// where accessing 'a' as type 'T' will result in undefined behavior.
+void Sema::CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
+ bool IsDereference,
+ SourceRange Range) {
+ unsigned DiagID = IsDereference ?
+ diag::warn_pointer_indirection_from_incompatible_type :
+ diag::warn_undefined_reinterpret_cast;
+
+ if (Diags.getDiagnosticLevel(DiagID, Range.getBegin()) ==
+ DiagnosticsEngine::Ignored) {
+ return;
+ }
+
+ QualType SrcTy, DestTy;
+ if (IsDereference) {
+ if (!SrcType->getAs<PointerType>() || !DestType->getAs<PointerType>()) {
+ return;
+ }
+ SrcTy = SrcType->getPointeeType();
+ DestTy = DestType->getPointeeType();
+ } else {
+ if (!DestType->getAs<ReferenceType>()) {
+ return;
+ }
+ SrcTy = SrcType;
+ DestTy = DestType->getPointeeType();
+ }
+
+ // Cast is compatible if the types are the same.
+ if (Context.hasSameUnqualifiedType(DestTy, SrcTy)) {
+ return;
+ }
+ // or one of the types is a char or void type
+ if (DestTy->isAnyCharacterType() || DestTy->isVoidType() ||
+ SrcTy->isAnyCharacterType() || SrcTy->isVoidType()) {
+ return;
+ }
+ // or one of the types is a tag type.
+ if (SrcTy->getAs<TagType>() || DestTy->getAs<TagType>()) {
+ return;
+ }
+
+ // FIXME: Scoped enums?
+ if ((SrcTy->isUnsignedIntegerType() && DestTy->isSignedIntegerType()) ||
+ (SrcTy->isSignedIntegerType() && DestTy->isUnsignedIntegerType())) {
+ if (Context.getTypeSize(DestTy) == Context.getTypeSize(SrcTy)) {
+ return;
+ }
+ }
+
+ Diag(Range.getBegin(), DiagID) << SrcType << DestType << Range;
+}
+
+static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
+ QualType DestType, bool CStyle,
+ const SourceRange &OpRange,
+ unsigned &msg,
+ CastKind &Kind) {
+ bool IsLValueCast = false;
+
+ DestType = Self.Context.getCanonicalType(DestType);
+ QualType SrcType = SrcExpr.get()->getType();
+
+ // Is the source an overloaded name? (i.e. &foo)
+ // If so, reinterpret_cast can not help us here (13.4, p1, bullet 5) ...
+ if (SrcType == Self.Context.OverloadTy) {
+ // ... unless foo<int> resolves to an lvalue unambiguously.
+ // TODO: what if this fails because of DiagnoseUseOfDecl or something
+ // like it?
+ ExprResult SingleFunctionExpr = SrcExpr;
+ if (Self.ResolveAndFixSingleFunctionTemplateSpecialization(
+ SingleFunctionExpr,
+ Expr::getValueKindForType(DestType) == VK_RValue // Convert Fun to Ptr
+ ) && SingleFunctionExpr.isUsable()) {
+ SrcExpr = move(SingleFunctionExpr);
+ SrcType = SrcExpr.get()->getType();
+ } else {
+ return TC_NotApplicable;
+ }
+ }
+
+ if (const ReferenceType *DestTypeTmp = DestType->getAs<ReferenceType>()) {
+ bool LValue = DestTypeTmp->isLValueReferenceType();
+ if (LValue && !SrcExpr.get()->isLValue()) {
+ // Cannot cast non-lvalue to lvalue reference type. See the similar
+ // comment in const_cast.
+ msg = diag::err_bad_cxx_cast_rvalue;
+ return TC_NotApplicable;
+ }
+
+ if (!CStyle) {
+ Self.CheckCompatibleReinterpretCast(SrcType, DestType,
+ /*isDereference=*/false, OpRange);
+ }
+
+ // C++ 5.2.10p10: [...] a reference cast reinterpret_cast<T&>(x) has the
+ // same effect as the conversion *reinterpret_cast<T*>(&x) with the
+ // built-in & and * operators.
+
+ const char *inappropriate = 0;
+ switch (SrcExpr.get()->getObjectKind()) {
+ case OK_Ordinary:
+ break;
+ case OK_BitField: inappropriate = "bit-field"; break;
+ case OK_VectorComponent: inappropriate = "vector element"; break;
+ case OK_ObjCProperty: inappropriate = "property expression"; break;
+ case OK_ObjCSubscript: inappropriate = "container subscripting expression";
+ break;
+ }
+ if (inappropriate) {
+ Self.Diag(OpRange.getBegin(), diag::err_bad_reinterpret_cast_reference)
+ << inappropriate << DestType
+ << OpRange << SrcExpr.get()->getSourceRange();
+ msg = 0; SrcExpr = ExprError();
+ return TC_NotApplicable;
+ }
+
+ // This code does this transformation for the checked types.
+ DestType = Self.Context.getPointerType(DestTypeTmp->getPointeeType());
+ SrcType = Self.Context.getPointerType(SrcType);
+
+ IsLValueCast = true;
+ }
+
+ // Canonicalize source for comparison.
+ SrcType = Self.Context.getCanonicalType(SrcType);
+
+ const MemberPointerType *DestMemPtr = DestType->getAs<MemberPointerType>(),
+ *SrcMemPtr = SrcType->getAs<MemberPointerType>();
+ if (DestMemPtr && SrcMemPtr) {
+ // C++ 5.2.10p9: An rvalue of type "pointer to member of X of type T1"
+ // can be explicitly converted to an rvalue of type "pointer to member
+ // of Y of type T2" if T1 and T2 are both function types or both object
+ // types.
+ if (DestMemPtr->getPointeeType()->isFunctionType() !=
+ SrcMemPtr->getPointeeType()->isFunctionType())
+ return TC_NotApplicable;
+
+ // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away
+ // constness.
+ // A reinterpret_cast followed by a const_cast can, though, so in C-style,
+ // we accept it.
+ if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
+ /*CheckObjCLifetime=*/CStyle)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
+
+ // Don't allow casting between member pointers of different sizes.
+ if (Self.Context.getTypeSize(DestMemPtr) !=
+ Self.Context.getTypeSize(SrcMemPtr)) {
+ msg = diag::err_bad_cxx_cast_member_pointer_size;
+ return TC_Failed;
+ }
+
+ // A valid member pointer cast.
+ assert(!IsLValueCast);
+ Kind = CK_ReinterpretMemberPointer;
+ return TC_Success;
+ }
+
+ // See below for the enumeral issue.
+ if (SrcType->isNullPtrType() && DestType->isIntegralType(Self.Context)) {
+ // C++0x 5.2.10p4: A pointer can be explicitly converted to any integral
+ // type large enough to hold it. A value of std::nullptr_t can be
+ // converted to an integral type; the conversion has the same meaning
+ // and validity as a conversion of (void*)0 to the integral type.
+ if (Self.Context.getTypeSize(SrcType) >
+ Self.Context.getTypeSize(DestType)) {
+ msg = diag::err_bad_reinterpret_cast_small_int;
+ return TC_Failed;
+ }
+ Kind = CK_PointerToIntegral;
+ return TC_Success;
+ }
+
+ bool destIsVector = DestType->isVectorType();
+ bool srcIsVector = SrcType->isVectorType();
+ if (srcIsVector || destIsVector) {
+ // FIXME: Should this also apply to floating point types?
+ bool srcIsScalar = SrcType->isIntegralType(Self.Context);
+ bool destIsScalar = DestType->isIntegralType(Self.Context);
+
+ // Check if this is a cast between a vector and something else.
+ if (!(srcIsScalar && destIsVector) && !(srcIsVector && destIsScalar) &&
+ !(srcIsVector && destIsVector))
+ return TC_NotApplicable;
+
+ // If both types have the same size, we can successfully cast.
+ if (Self.Context.getTypeSize(SrcType)
+ == Self.Context.getTypeSize(DestType)) {
+ Kind = CK_BitCast;
+ return TC_Success;
+ }
+
+ if (destIsScalar)
+ msg = diag::err_bad_cxx_cast_vector_to_scalar_different_size;
+ else if (srcIsScalar)
+ msg = diag::err_bad_cxx_cast_scalar_to_vector_different_size;
+ else
+ msg = diag::err_bad_cxx_cast_vector_to_vector_different_size;
+
+ return TC_Failed;
+ }
+
+ if (SrcType == DestType) {
+ // C++ 5.2.10p2 has a note that mentions that, subject to all other
+ // restrictions, a cast to the same type is allowed so long as it does not
+ // cast away constness. In C++98, the intent was not entirely clear here,
+ // since all other paragraphs explicitly forbid casts to the same type.
+ // C++11 clarifies this case with p2.
+ //
+ // The only allowed types are: integral, enumeration, pointer, or
+ // pointer-to-member types. We also won't restrict Obj-C pointers either.
+ Kind = CK_NoOp;
+ TryCastResult Result = TC_NotApplicable;
+ if (SrcType->isIntegralOrEnumerationType() ||
+ SrcType->isAnyPointerType() ||
+ SrcType->isMemberPointerType() ||
+ SrcType->isBlockPointerType()) {
+ Result = TC_Success;
+ }
+ return Result;
+ }
+
+ bool destIsPtr = DestType->isAnyPointerType() ||
+ DestType->isBlockPointerType();
+ bool srcIsPtr = SrcType->isAnyPointerType() ||
+ SrcType->isBlockPointerType();
+ if (!destIsPtr && !srcIsPtr) {
+ // Except for std::nullptr_t->integer and lvalue->reference, which are
+ // handled above, at least one of the two arguments must be a pointer.
+ return TC_NotApplicable;
+ }
+
+ if (DestType->isIntegralType(Self.Context)) {
+ assert(srcIsPtr && "One type must be a pointer");
+ // C++ 5.2.10p4: A pointer can be explicitly converted to any integral
+ // type large enough to hold it; except in Microsoft mode, where the
+ // integral type size doesn't matter.
+ if ((Self.Context.getTypeSize(SrcType) >
+ Self.Context.getTypeSize(DestType)) &&
+ !Self.getLangOpts().MicrosoftExt) {
+ msg = diag::err_bad_reinterpret_cast_small_int;
+ return TC_Failed;
+ }
+ Kind = CK_PointerToIntegral;
+ return TC_Success;
+ }
+
+ if (SrcType->isIntegralOrEnumerationType()) {
+ assert(destIsPtr && "One type must be a pointer");
+ // C++ 5.2.10p5: A value of integral or enumeration type can be explicitly
+ // converted to a pointer.
+ // C++ 5.2.10p9: [Note: ...a null pointer constant of integral type is not
+ // necessarily converted to a null pointer value.]
+ Kind = CK_IntegralToPointer;
+ return TC_Success;
+ }
+
+ if (!destIsPtr || !srcIsPtr) {
+ // With the valid non-pointer conversions out of the way, we can be even
+ // more stringent.
+ return TC_NotApplicable;
+ }
+
+ // C++ 5.2.10p2: The reinterpret_cast operator shall not cast away constness.
+ // The C-style cast operator can.
+ if (CastsAwayConstness(Self, SrcType, DestType, /*CheckCVR=*/!CStyle,
+ /*CheckObjCLifetime=*/CStyle)) {
+ msg = diag::err_bad_cxx_cast_qualifiers_away;
+ return TC_Failed;
+ }
+
+ // Cannot convert between block pointers and Objective-C object pointers.
+ if ((SrcType->isBlockPointerType() && DestType->isObjCObjectPointerType()) ||
+ (DestType->isBlockPointerType() && SrcType->isObjCObjectPointerType()))
+ return TC_NotApplicable;
+
+ if (IsLValueCast) {
+ Kind = CK_LValueBitCast;
+ } else if (DestType->isObjCObjectPointerType()) {
+ Kind = Self.PrepareCastToObjCObjectPointer(SrcExpr);
+ } else if (DestType->isBlockPointerType()) {
+ if (!SrcType->isBlockPointerType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
+ } else {
+ Kind = CK_BitCast;
+ }
+ } else {
+ Kind = CK_BitCast;
+ }
+
+ // Any pointer can be cast to an Objective-C pointer type with a C-style
+ // cast.
+ if (CStyle && DestType->isObjCObjectPointerType()) {
+ return TC_Success;
+ }
+
+ // Not casting away constness, so the only remaining check is for compatible
+ // pointer categories.
+
+ if (SrcType->isFunctionPointerType()) {
+ if (DestType->isFunctionPointerType()) {
+ // C++ 5.2.10p6: A pointer to a function can be explicitly converted to
+ // a pointer to a function of a different type.
+ return TC_Success;
+ }
+
+ // C++0x 5.2.10p8: Converting a pointer to a function into a pointer to
+ // an object type or vice versa is conditionally-supported.
+ // Compilers support it in C++03 too, though, because it's necessary for
+ // casting the return value of dlsym() and GetProcAddress().
+ // FIXME: Conditionally-supported behavior should be configurable in the
+ // TargetInfo or similar.
+ Self.Diag(OpRange.getBegin(),
+ Self.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
+ << OpRange;
+ return TC_Success;
+ }
+
+ if (DestType->isFunctionPointerType()) {
+ // See above.
+ Self.Diag(OpRange.getBegin(),
+ Self.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_cast_fn_obj : diag::ext_cast_fn_obj)
+ << OpRange;
+ return TC_Success;
+ }
+
+ // C++ 5.2.10p7: A pointer to an object can be explicitly converted to
+ // a pointer to an object of different type.
+ // Void pointers are not specified, but supported by every compiler out there.
+ // So we finish by allowing everything that remains - it's got to be two
+ // object pointers.
+ return TC_Success;
+}
+
+void CastOperation::CheckCXXCStyleCast(bool FunctionalStyle,
+ bool ListInitialization) {
+ // Handle placeholders.
+ if (isPlaceholder()) {
+ // C-style casts can resolve __unknown_any types.
+ if (claimPlaceholder(BuiltinType::UnknownAny)) {
+ SrcExpr = Self.checkUnknownAnyCast(DestRange, DestType,
+ SrcExpr.get(), Kind,
+ ValueKind, BasePath);
+ return;
+ }
+
+ checkNonOverloadPlaceholders();
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ // C++ 5.2.9p4: Any expression can be explicitly converted to type "cv void".
+ // This test is outside everything else because it's the only case where
+ // a non-lvalue-reference target type does not lead to decay.
+ if (DestType->isVoidType()) {
+ Kind = CK_ToVoid;
+
+ if (claimPlaceholder(BuiltinType::Overload)) {
+ Self.ResolveAndFixSingleFunctionTemplateSpecialization(
+ SrcExpr, /* Decay Function to ptr */ false,
+ /* Complain */ true, DestRange, DestType,
+ diag::err_bad_cstyle_cast_overload);
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ SrcExpr = Self.IgnoredValueConversions(SrcExpr.take());
+ if (SrcExpr.isInvalid())
+ return;
+
+ return;
+ }
+
+ // If the type is dependent, we won't do any other semantic analysis now.
+ if (DestType->isDependentType() || SrcExpr.get()->isTypeDependent()) {
+ assert(Kind == CK_Dependent);
+ return;
+ }
+
+ if (ValueKind == VK_RValue && !DestType->isRecordType() &&
+ !isPlaceholder(BuiltinType::Overload)) {
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
+ if (SrcExpr.isInvalid())
+ return;
+ }
+
+ // AltiVec vector initialization with a single literal.
+ if (const VectorType *vecTy = DestType->getAs<VectorType>())
+ if (vecTy->getVectorKind() == VectorType::AltiVecVector
+ && (SrcExpr.get()->getType()->isIntegerType()
+ || SrcExpr.get()->getType()->isFloatingType())) {
+ Kind = CK_VectorSplat;
+ return;
+ }
+
+ // C++ [expr.cast]p5: The conversions performed by
+ // - a const_cast,
+ // - a static_cast,
+ // - a static_cast followed by a const_cast,
+ // - a reinterpret_cast, or
+ // - a reinterpret_cast followed by a const_cast,
+ // can be performed using the cast notation of explicit type conversion.
+ // [...] If a conversion can be interpreted in more than one of the ways
+ // listed above, the interpretation that appears first in the list is used,
+ // even if a cast resulting from that interpretation is ill-formed.
+ // In plain language, this means trying a const_cast ...
+ unsigned msg = diag::err_bad_cxx_cast_generic;
+ TryCastResult tcr = TryConstCast(Self, SrcExpr.get(), DestType,
+ /*CStyle*/true, msg);
+ if (tcr == TC_Success)
+ Kind = CK_NoOp;
+
+ Sema::CheckedConversionKind CCK
+ = FunctionalStyle? Sema::CCK_FunctionalCast
+ : Sema::CCK_CStyleCast;
+ if (tcr == TC_NotApplicable) {
+ // ... or if that is not possible, a static_cast, ignoring const, ...
+ tcr = TryStaticCast(Self, SrcExpr, DestType, CCK, OpRange,
+ msg, Kind, BasePath, ListInitialization);
+ if (SrcExpr.isInvalid())
+ return;
+
+ if (tcr == TC_NotApplicable) {
+ // ... and finally a reinterpret_cast, ignoring const.
+ tcr = TryReinterpretCast(Self, SrcExpr, DestType, /*CStyle*/true,
+ OpRange, msg, Kind);
+ if (SrcExpr.isInvalid())
+ return;
+ }
+ }
+
+ if (Self.getLangOpts().ObjCAutoRefCount && tcr == TC_Success)
+ checkObjCARCConversion(CCK);
+
+ if (tcr != TC_Success && msg != 0) {
+ if (SrcExpr.get()->getType() == Self.Context.OverloadTy) {
+ DeclAccessPair Found;
+ FunctionDecl *Fn = Self.ResolveAddressOfOverloadedFunction(SrcExpr.get(),
+ DestType,
+ /*Complain*/ true,
+ Found);
+
+ assert(!Fn && "cast failed but able to resolve overload expression!!");
+ (void)Fn;
+
+ } else {
+ diagnoseBadCast(Self, msg, (FunctionalStyle ? CT_Functional : CT_CStyle),
+ OpRange, SrcExpr.get(), DestType, ListInitialization);
+ }
+ } else if (Kind == CK_BitCast) {
+ checkCastAlign();
+ }
+
+ // Clear out SrcExpr if there was a fatal error.
+ if (tcr != TC_Success)
+ SrcExpr = ExprError();
+}
+
+/// Check the semantics of a C-style cast operation, in C.
+void CastOperation::CheckCStyleCast() {
+ assert(!Self.getLangOpts().CPlusPlus);
+
+ // C-style casts can resolve __unknown_any types.
+ if (claimPlaceholder(BuiltinType::UnknownAny)) {
+ SrcExpr = Self.checkUnknownAnyCast(DestRange, DestType,
+ SrcExpr.get(), Kind,
+ ValueKind, BasePath);
+ return;
+ }
+
+ // C99 6.5.4p2: the cast type needs to be void or scalar and the expression
+ // type needs to be scalar.
+ if (DestType->isVoidType()) {
+ // We don't necessarily do lvalue-to-rvalue conversions on this.
+ SrcExpr = Self.IgnoredValueConversions(SrcExpr.take());
+ if (SrcExpr.isInvalid())
+ return;
+
+ // Cast to void allows any expr type.
+ Kind = CK_ToVoid;
+ return;
+ }
+
+ SrcExpr = Self.DefaultFunctionArrayLvalueConversion(SrcExpr.take());
+ if (SrcExpr.isInvalid())
+ return;
+ QualType SrcType = SrcExpr.get()->getType();
+
+ // You can cast an _Atomic(T) to anything you can cast a T to.
+ if (const AtomicType *AtomicSrcType = SrcType->getAs<AtomicType>())
+ SrcType = AtomicSrcType->getValueType();
+
+ assert(!SrcType->isPlaceholderType());
+
+ if (Self.RequireCompleteType(OpRange.getBegin(), DestType,
+ diag::err_typecheck_cast_to_incomplete)) {
+ SrcExpr = ExprError();
+ return;
+ }
+
+ if (!DestType->isScalarType() && !DestType->isVectorType()) {
+ const RecordType *DestRecordTy = DestType->getAs<RecordType>();
+
+ if (DestRecordTy && Self.Context.hasSameUnqualifiedType(DestType, SrcType)){
+ // GCC struct/union extension: allow cast to self.
+ Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_nonscalar)
+ << DestType << SrcExpr.get()->getSourceRange();
+ Kind = CK_NoOp;
+ return;
+ }
+
+ // GCC's cast to union extension.
+ if (DestRecordTy && DestRecordTy->getDecl()->isUnion()) {
+ RecordDecl *RD = DestRecordTy->getDecl();
+ RecordDecl::field_iterator Field, FieldEnd;
+ for (Field = RD->field_begin(), FieldEnd = RD->field_end();
+ Field != FieldEnd; ++Field) {
+ if (Self.Context.hasSameUnqualifiedType(Field->getType(), SrcType) &&
+ !Field->isUnnamedBitfield()) {
+ Self.Diag(OpRange.getBegin(), diag::ext_typecheck_cast_to_union)
+ << SrcExpr.get()->getSourceRange();
+ break;
+ }
+ }
+ if (Field == FieldEnd) {
+ Self.Diag(OpRange.getBegin(), diag::err_typecheck_cast_to_union_no_type)
+ << SrcType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ Kind = CK_ToUnion;
+ return;
+ }
+
+ // Reject any other conversions to non-scalar types.
+ Self.Diag(OpRange.getBegin(), diag::err_typecheck_cond_expect_scalar)
+ << DestType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // The type we're casting to is known to be a scalar or vector.
+
+ // Require the operand to be a scalar or vector.
+ if (!SrcType->isScalarType() && !SrcType->isVectorType()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(),
+ diag::err_typecheck_expect_scalar_operand)
+ << SrcType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+
+ if (DestType->isExtVectorType()) {
+ SrcExpr = Self.CheckExtVectorCast(OpRange, DestType, SrcExpr.take(), Kind);
+ return;
+ }
+
+ if (const VectorType *DestVecTy = DestType->getAs<VectorType>()) {
+ if (DestVecTy->getVectorKind() == VectorType::AltiVecVector &&
+ (SrcType->isIntegerType() || SrcType->isFloatingType())) {
+ Kind = CK_VectorSplat;
+ } else if (Self.CheckVectorCast(OpRange, DestType, SrcType, Kind)) {
+ SrcExpr = ExprError();
+ }
+ return;
+ }
+
+ if (SrcType->isVectorType()) {
+ if (Self.CheckVectorCast(OpRange, SrcType, DestType, Kind))
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // The source and target types are both scalars, i.e.
+ // - arithmetic types (fundamental, enum, and complex)
+ // - all kinds of pointers
+ // Note that member pointers were filtered out with C++, above.
+
+ if (isa<ObjCSelectorExpr>(SrcExpr.get())) {
+ Self.Diag(SrcExpr.get()->getExprLoc(), diag::err_cast_selector_expr);
+ SrcExpr = ExprError();
+ return;
+ }
+
+ // If either type is a pointer, the other type has to be either an
+ // integer or a pointer.
+ if (!DestType->isArithmeticType()) {
+ if (!SrcType->isIntegralType(Self.Context) && SrcType->isArithmeticType()) {
+ Self.Diag(SrcExpr.get()->getExprLoc(),
+ diag::err_cast_pointer_from_non_pointer_int)
+ << SrcType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ } else if (!SrcType->isArithmeticType()) {
+ if (!DestType->isIntegralType(Self.Context) &&
+ DestType->isArithmeticType()) {
+ Self.Diag(SrcExpr.get()->getLocStart(),
+ diag::err_cast_pointer_to_non_pointer_int)
+ << DestType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ }
+
+ // ARC imposes extra restrictions on casts.
+ if (Self.getLangOpts().ObjCAutoRefCount) {
+ checkObjCARCConversion(Sema::CCK_CStyleCast);
+ if (SrcExpr.isInvalid())
+ return;
+
+ if (const PointerType *CastPtr = DestType->getAs<PointerType>()) {
+ if (const PointerType *ExprPtr = SrcType->getAs<PointerType>()) {
+ Qualifiers CastQuals = CastPtr->getPointeeType().getQualifiers();
+ Qualifiers ExprQuals = ExprPtr->getPointeeType().getQualifiers();
+ if (CastPtr->getPointeeType()->isObjCLifetimeType() &&
+ ExprPtr->getPointeeType()->isObjCLifetimeType() &&
+ !CastQuals.compatiblyIncludesObjCLifetime(ExprQuals)) {
+ Self.Diag(SrcExpr.get()->getLocStart(),
+ diag::err_typecheck_incompatible_ownership)
+ << SrcType << DestType << Sema::AA_Casting
+ << SrcExpr.get()->getSourceRange();
+ return;
+ }
+ }
+ }
+ else if (!Self.CheckObjCARCUnavailableWeakConversion(DestType, SrcType)) {
+ Self.Diag(SrcExpr.get()->getLocStart(),
+ diag::err_arc_convesion_of_weak_unavailable)
+ << 1 << SrcType << DestType << SrcExpr.get()->getSourceRange();
+ SrcExpr = ExprError();
+ return;
+ }
+ }
+
+ Kind = Self.PrepareScalarCast(SrcExpr, DestType);
+ if (SrcExpr.isInvalid())
+ return;
+
+ if (Kind == CK_BitCast)
+ checkCastAlign();
+}
+
+ExprResult Sema::BuildCStyleCastExpr(SourceLocation LPLoc,
+ TypeSourceInfo *CastTypeInfo,
+ SourceLocation RPLoc,
+ Expr *CastExpr) {
+ CastOperation Op(*this, CastTypeInfo->getType(), CastExpr);
+ Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
+ Op.OpRange = SourceRange(LPLoc, CastExpr->getLocEnd());
+
+ if (getLangOpts().CPlusPlus) {
+ Op.CheckCXXCStyleCast(/*FunctionalStyle=*/ false,
+ isa<InitListExpr>(CastExpr));
+ } else {
+ Op.CheckCStyleCast();
+ }
+
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+
+ return Op.complete(CStyleCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, Op.Kind, Op.SrcExpr.take(),
+ &Op.BasePath, CastTypeInfo, LPLoc, RPLoc));
+}
+
+ExprResult Sema::BuildCXXFunctionalCastExpr(TypeSourceInfo *CastTypeInfo,
+ SourceLocation LPLoc,
+ Expr *CastExpr,
+ SourceLocation RPLoc) {
+ assert(LPLoc.isValid() && "List-initialization shouldn't get here.");
+ CastOperation Op(*this, CastTypeInfo->getType(), CastExpr);
+ Op.DestRange = CastTypeInfo->getTypeLoc().getSourceRange();
+ Op.OpRange = SourceRange(Op.DestRange.getBegin(), CastExpr->getLocEnd());
+
+ Op.CheckCXXCStyleCast(/*FunctionalStyle=*/true, /*ListInit=*/false);
+ if (Op.SrcExpr.isInvalid())
+ return ExprError();
+
+ return Op.complete(CXXFunctionalCastExpr::Create(Context, Op.ResultType,
+ Op.ValueKind, CastTypeInfo, Op.DestRange.getBegin(),
+ Op.Kind, Op.SrcExpr.take(), &Op.BasePath, RPLoc));
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
new file mode 100644
index 0000000..fdc2349
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaChecking.cpp
@@ -0,0 +1,5186 @@
+//===--- SemaChecking.cpp - Extra Semantic Checking -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements extra semantic analysis beyond what is enforced
+// by the C type system.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/raw_ostream.h"
+#include "clang/Basic/TargetBuiltins.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/ConvertUTF.h"
+#include <limits>
+using namespace clang;
+using namespace sema;
+
+SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
+ unsigned ByteNo) const {
+ return SL->getLocationOfByte(ByteNo, PP.getSourceManager(),
+ PP.getLangOpts(), PP.getTargetInfo());
+}
+
+/// Checks that a call expression's argument count is the desired number.
+/// This is useful when doing custom type-checking. Returns true on error.
+static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
+ unsigned argCount = call->getNumArgs();
+ if (argCount == desiredArgCount) return false;
+
+ if (argCount < desiredArgCount)
+ return S.Diag(call->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/ << desiredArgCount << argCount
+ << call->getSourceRange();
+
+ // Highlight all the excess arguments.
+ SourceRange range(call->getArg(desiredArgCount)->getLocStart(),
+ call->getArg(argCount - 1)->getLocEnd());
+
+ return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << desiredArgCount << argCount
+ << call->getArg(1)->getSourceRange();
+}
+
+/// CheckBuiltinAnnotationString - Checks that string argument to the builtin
+/// annotation is a non wide string literal.
+static bool CheckBuiltinAnnotationString(Sema &S, Expr *Arg) {
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
+ if (!Literal || !Literal->isAscii()) {
+ S.Diag(Arg->getLocStart(), diag::err_builtin_annotation_not_string_constant)
+ << Arg->getSourceRange();
+ return true;
+ }
+ return false;
+}
+
+ExprResult
+Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ ExprResult TheCallResult(Owned(TheCall));
+
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ Context.GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ if (Error != ASTContext::GE_None)
+ ICEArguments = 0; // Don't diagnose previously diagnosed errors.
+
+ // If any arguments are required to be ICE's, check and diagnose.
+ for (unsigned ArgNo = 0; ICEArguments != 0; ++ArgNo) {
+ // Skip arguments not required to be ICE's.
+ if ((ICEArguments & (1 << ArgNo)) == 0) continue;
+
+ llvm::APSInt Result;
+ if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
+ return true;
+ ICEArguments &= ~(1 << ArgNo);
+ }
+
+ switch (BuiltinID) {
+ case Builtin::BI__builtin___CFStringMakeConstantString:
+ assert(TheCall->getNumArgs() == 1 &&
+ "Wrong # arguments to builtin CFStringMakeConstantString");
+ if (CheckObjCString(TheCall->getArg(0)))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_stdarg_start:
+ case Builtin::BI__builtin_va_start:
+ if (SemaBuiltinVAStart(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_isgreater:
+ case Builtin::BI__builtin_isgreaterequal:
+ case Builtin::BI__builtin_isless:
+ case Builtin::BI__builtin_islessequal:
+ case Builtin::BI__builtin_islessgreater:
+ case Builtin::BI__builtin_isunordered:
+ if (SemaBuiltinUnorderedCompare(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_fpclassify:
+ if (SemaBuiltinFPClassification(TheCall, 6))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_isfinite:
+ case Builtin::BI__builtin_isinf:
+ case Builtin::BI__builtin_isinf_sign:
+ case Builtin::BI__builtin_isnan:
+ case Builtin::BI__builtin_isnormal:
+ if (SemaBuiltinFPClassification(TheCall, 1))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_shufflevector:
+ return SemaBuiltinShuffleVector(TheCall);
+ // TheCall will be freed by the smart pointer here, but that's fine, since
+ // SemaBuiltinShuffleVector guts it, but then doesn't release it.
+ case Builtin::BI__builtin_prefetch:
+ if (SemaBuiltinPrefetch(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_object_size:
+ if (SemaBuiltinObjectSize(TheCall))
+ return ExprError();
+ break;
+ case Builtin::BI__builtin_longjmp:
+ if (SemaBuiltinLongjmp(TheCall))
+ return ExprError();
+ break;
+
+ case Builtin::BI__builtin_classify_type:
+ if (checkArgCount(*this, TheCall, 1)) return true;
+ TheCall->setType(Context.IntTy);
+ break;
+ case Builtin::BI__builtin_constant_p:
+ if (checkArgCount(*this, TheCall, 1)) return true;
+ TheCall->setType(Context.IntTy);
+ break;
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16:
+ case Builtin::BI__sync_swap:
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ return SemaBuiltinAtomicOverloaded(move(TheCallResult));
+#define BUILTIN(ID, TYPE, ATTRS)
+#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
+ case Builtin::BI##ID: \
+ return SemaAtomicOpsOverloaded(move(TheCallResult), AtomicExpr::AO##ID);
+#include "clang/Basic/Builtins.def"
+ case Builtin::BI__builtin_annotation:
+ if (CheckBuiltinAnnotationString(*this, TheCall->getArg(1)))
+ return ExprError();
+ break;
+ }
+
+ // Since the target specific builtins for each arch overlap, only check those
+ // of the arch we are compiling for.
+ if (BuiltinID >= Builtin::FirstTSBuiltin) {
+ switch (Context.getTargetInfo().getTriple().getArch()) {
+ case llvm::Triple::arm:
+ case llvm::Triple::thumb:
+ if (CheckARMBuiltinFunctionCall(BuiltinID, TheCall))
+ return ExprError();
+ break;
+ default:
+ break;
+ }
+ }
+
+ return move(TheCallResult);
+}
+
+// Get the valid immediate range for the specified NEON type code.
+static unsigned RFT(unsigned t, bool shift = false) {
+ NeonTypeFlags Type(t);
+ int IsQuad = Type.isQuad();
+ switch (Type.getEltType()) {
+ case NeonTypeFlags::Int8:
+ case NeonTypeFlags::Poly8:
+ return shift ? 7 : (8 << IsQuad) - 1;
+ case NeonTypeFlags::Int16:
+ case NeonTypeFlags::Poly16:
+ return shift ? 15 : (4 << IsQuad) - 1;
+ case NeonTypeFlags::Int32:
+ return shift ? 31 : (2 << IsQuad) - 1;
+ case NeonTypeFlags::Int64:
+ return shift ? 63 : (1 << IsQuad) - 1;
+ case NeonTypeFlags::Float16:
+ assert(!shift && "cannot shift float types!");
+ return (4 << IsQuad) - 1;
+ case NeonTypeFlags::Float32:
+ assert(!shift && "cannot shift float types!");
+ return (2 << IsQuad) - 1;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+/// getNeonEltType - Return the QualType corresponding to the elements of
+/// the vector type specified by the NeonTypeFlags. This is used to check
+/// the pointer arguments for Neon load/store intrinsics.
+static QualType getNeonEltType(NeonTypeFlags Flags, ASTContext &Context) {
+ switch (Flags.getEltType()) {
+ case NeonTypeFlags::Int8:
+ return Flags.isUnsigned() ? Context.UnsignedCharTy : Context.SignedCharTy;
+ case NeonTypeFlags::Int16:
+ return Flags.isUnsigned() ? Context.UnsignedShortTy : Context.ShortTy;
+ case NeonTypeFlags::Int32:
+ return Flags.isUnsigned() ? Context.UnsignedIntTy : Context.IntTy;
+ case NeonTypeFlags::Int64:
+ return Flags.isUnsigned() ? Context.UnsignedLongLongTy : Context.LongLongTy;
+ case NeonTypeFlags::Poly8:
+ return Context.SignedCharTy;
+ case NeonTypeFlags::Poly16:
+ return Context.ShortTy;
+ case NeonTypeFlags::Float16:
+ return Context.UnsignedShortTy;
+ case NeonTypeFlags::Float32:
+ return Context.FloatTy;
+ }
+ llvm_unreachable("Invalid NeonTypeFlag!");
+}
+
+bool Sema::CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
+ llvm::APSInt Result;
+
+ unsigned mask = 0;
+ unsigned TV = 0;
+ int PtrArgNum = -1;
+ bool HasConstPtr = false;
+ switch (BuiltinID) {
+#define GET_NEON_OVERLOAD_CHECK
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_OVERLOAD_CHECK
+ }
+
+ // For NEON intrinsics which are overloaded on vector element type, validate
+ // the immediate which specifies which variant to emit.
+ unsigned ImmArg = TheCall->getNumArgs()-1;
+ if (mask) {
+ if (SemaBuiltinConstantArg(TheCall, ImmArg, Result))
+ return true;
+
+ TV = Result.getLimitedValue(64);
+ if ((TV > 63) || (mask & (1 << TV)) == 0)
+ return Diag(TheCall->getLocStart(), diag::err_invalid_neon_type_code)
+ << TheCall->getArg(ImmArg)->getSourceRange();
+ }
+
+ if (PtrArgNum >= 0) {
+ // Check that pointer arguments have the specified type.
+ Expr *Arg = TheCall->getArg(PtrArgNum);
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = ICE->getSubExpr();
+ ExprResult RHS = DefaultFunctionArrayLvalueConversion(Arg);
+ QualType RHSTy = RHS.get()->getType();
+ QualType EltTy = getNeonEltType(NeonTypeFlags(TV), Context);
+ if (HasConstPtr)
+ EltTy = EltTy.withConst();
+ QualType LHSTy = Context.getPointerType(EltTy);
+ AssignConvertType ConvTy;
+ ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
+ if (RHS.isInvalid())
+ return true;
+ if (DiagnoseAssignmentResult(ConvTy, Arg->getLocStart(), LHSTy, RHSTy,
+ RHS.get(), AA_Assigning))
+ return true;
+ }
+
+ // For NEON intrinsics which take an immediate value as part of the
+ // instruction, range check them here.
+ unsigned i = 0, l = 0, u = 0;
+ switch (BuiltinID) {
+ default: return false;
+ case ARM::BI__builtin_arm_ssat: i = 1; l = 1; u = 31; break;
+ case ARM::BI__builtin_arm_usat: i = 1; u = 31; break;
+ case ARM::BI__builtin_arm_vcvtr_f:
+ case ARM::BI__builtin_arm_vcvtr_d: i = 1; u = 1; break;
+#define GET_NEON_IMMEDIATE_CHECK
+#include "clang/Basic/arm_neon.inc"
+#undef GET_NEON_IMMEDIATE_CHECK
+ };
+
+ // Check that the immediate argument is actually a constant.
+ if (SemaBuiltinConstantArg(TheCall, i, Result))
+ return true;
+
+ // Range check against the upper/lower values for this isntruction.
+ unsigned Val = Result.getZExtValue();
+ if (Val < l || Val > (u + l))
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << l << u+l << TheCall->getArg(i)->getSourceRange();
+
+ // FIXME: VFP Intrinsics should error if VFP not present.
+ return false;
+}
+
+/// CheckFunctionCall - Check a direct function call for various correctness
+/// and safety properties not strictly enforced by the C type system.
+bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall) {
+ // Get the IdentifierInfo* for the called function.
+ IdentifierInfo *FnInfo = FDecl->getIdentifier();
+
+ // None of the checks below are needed for functions that don't have
+ // simple names (e.g., C++ conversion functions).
+ if (!FnInfo)
+ return false;
+
+ // FIXME: This mechanism should be abstracted to be less fragile and
+ // more efficient. For example, just map function ids to custom
+ // handlers.
+
+ // Printf and scanf checking.
+ for (specific_attr_iterator<FormatAttr>
+ i = FDecl->specific_attr_begin<FormatAttr>(),
+ e = FDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+ CheckFormatArguments(*i, TheCall);
+ }
+
+ for (specific_attr_iterator<NonNullAttr>
+ i = FDecl->specific_attr_begin<NonNullAttr>(),
+ e = FDecl->specific_attr_end<NonNullAttr>(); i != e; ++i) {
+ CheckNonNullArguments(*i, TheCall->getArgs(),
+ TheCall->getCallee()->getLocStart());
+ }
+
+ unsigned CMId = FDecl->getMemoryFunctionKind();
+ if (CMId == 0)
+ return false;
+
+ // Handle memory setting and copying functions.
+ if (CMId == Builtin::BIstrlcpy || CMId == Builtin::BIstrlcat)
+ CheckStrlcpycatArguments(TheCall, FnInfo);
+ else if (CMId == Builtin::BIstrncat)
+ CheckStrncatArguments(TheCall, FnInfo);
+ else
+ CheckMemaccessArguments(TheCall, CMId, FnInfo);
+
+ return false;
+}
+
+bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
+ Expr **Args, unsigned NumArgs) {
+ for (specific_attr_iterator<FormatAttr>
+ i = Method->specific_attr_begin<FormatAttr>(),
+ e = Method->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+
+ CheckFormatArguments(*i, Args, NumArgs, false, lbrac,
+ Method->getSourceRange());
+ }
+
+ // diagnose nonnull arguments.
+ for (specific_attr_iterator<NonNullAttr>
+ i = Method->specific_attr_begin<NonNullAttr>(),
+ e = Method->specific_attr_end<NonNullAttr>(); i != e; ++i) {
+ CheckNonNullArguments(*i, Args, lbrac);
+ }
+
+ return false;
+}
+
+bool Sema::CheckBlockCall(NamedDecl *NDecl, CallExpr *TheCall) {
+ const VarDecl *V = dyn_cast<VarDecl>(NDecl);
+ if (!V)
+ return false;
+
+ QualType Ty = V->getType();
+ if (!Ty->isBlockPointerType())
+ return false;
+
+ // format string checking.
+ for (specific_attr_iterator<FormatAttr>
+ i = NDecl->specific_attr_begin<FormatAttr>(),
+ e = NDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+ CheckFormatArguments(*i, TheCall);
+ }
+
+ return false;
+}
+
+ExprResult Sema::SemaAtomicOpsOverloaded(ExprResult TheCallResult,
+ AtomicExpr::AtomicOp Op) {
+ CallExpr *TheCall = cast<CallExpr>(TheCallResult.get());
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+
+ // All these operations take one of the following forms:
+ enum {
+ // C __c11_atomic_init(A *, C)
+ Init,
+ // C __c11_atomic_load(A *, int)
+ Load,
+ // void __atomic_load(A *, CP, int)
+ Copy,
+ // C __c11_atomic_add(A *, M, int)
+ Arithmetic,
+ // C __atomic_exchange_n(A *, CP, int)
+ Xchg,
+ // void __atomic_exchange(A *, C *, CP, int)
+ GNUXchg,
+ // bool __c11_atomic_compare_exchange_strong(A *, C *, CP, int, int)
+ C11CmpXchg,
+ // bool __atomic_compare_exchange(A *, C *, CP, bool, int, int)
+ GNUCmpXchg
+ } Form = Init;
+ const unsigned NumArgs[] = { 2, 2, 3, 3, 3, 4, 5, 6 };
+ const unsigned NumVals[] = { 1, 0, 1, 1, 1, 2, 2, 3 };
+ // where:
+ // C is an appropriate type,
+ // A is volatile _Atomic(C) for __c11 builtins and is C for GNU builtins,
+ // CP is C for __c11 builtins and GNU _n builtins and is C * otherwise,
+ // M is C if C is an integer, and ptrdiff_t if C is a pointer, and
+ // the int parameters are for orderings.
+
+ assert(AtomicExpr::AO__c11_atomic_init == 0 &&
+ AtomicExpr::AO__c11_atomic_fetch_xor + 1 == AtomicExpr::AO__atomic_load
+ && "need to update code for modified C11 atomics");
+ bool IsC11 = Op >= AtomicExpr::AO__c11_atomic_init &&
+ Op <= AtomicExpr::AO__c11_atomic_fetch_xor;
+ bool IsN = Op == AtomicExpr::AO__atomic_load_n ||
+ Op == AtomicExpr::AO__atomic_store_n ||
+ Op == AtomicExpr::AO__atomic_exchange_n ||
+ Op == AtomicExpr::AO__atomic_compare_exchange_n;
+ bool IsAddSub = false;
+
+ switch (Op) {
+ case AtomicExpr::AO__c11_atomic_init:
+ Form = Init;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_load:
+ case AtomicExpr::AO__atomic_load_n:
+ Form = Load;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_store:
+ case AtomicExpr::AO__atomic_load:
+ case AtomicExpr::AO__atomic_store:
+ case AtomicExpr::AO__atomic_store_n:
+ Form = Copy;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_add_fetch:
+ case AtomicExpr::AO__atomic_sub_fetch:
+ IsAddSub = true;
+ // Fall through.
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_nand:
+ case AtomicExpr::AO__atomic_and_fetch:
+ case AtomicExpr::AO__atomic_or_fetch:
+ case AtomicExpr::AO__atomic_xor_fetch:
+ case AtomicExpr::AO__atomic_nand_fetch:
+ Form = Arithmetic;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_exchange:
+ case AtomicExpr::AO__atomic_exchange_n:
+ Form = Xchg;
+ break;
+
+ case AtomicExpr::AO__atomic_exchange:
+ Form = GNUXchg;
+ break;
+
+ case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
+ case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
+ Form = C11CmpXchg;
+ break;
+
+ case AtomicExpr::AO__atomic_compare_exchange:
+ case AtomicExpr::AO__atomic_compare_exchange_n:
+ Form = GNUCmpXchg;
+ break;
+ }
+
+ // Check we have the right number of arguments.
+ if (TheCall->getNumArgs() < NumArgs[Form]) {
+ Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << NumArgs[Form] << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ } else if (TheCall->getNumArgs() > NumArgs[Form]) {
+ Diag(TheCall->getArg(NumArgs[Form])->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 << NumArgs[Form] << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ }
+
+ // Inspect the first argument of the atomic operation.
+ Expr *Ptr = TheCall->getArg(0);
+ Ptr = DefaultFunctionArrayLvalueConversion(Ptr).get();
+ const PointerType *pointerType = Ptr->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+
+ // For a __c11 builtin, this should be a pointer to an _Atomic type.
+ QualType AtomTy = pointerType->getPointeeType(); // 'A'
+ QualType ValType = AtomTy; // 'C'
+ if (IsC11) {
+ if (!AtomTy->isAtomicType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ ValType = AtomTy->getAs<AtomicType>()->getValueType();
+ }
+
+ // For an arithmetic operation, the implied arithmetic must be well-formed.
+ if (Form == Arithmetic) {
+ // gcc does not enforce these rules for GNU atomics, but we do so for sanity.
+ if (IsAddSub && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ if (!IsAddSub && !ValType->isIntegerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_op_bitwise_needs_atomic_int)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+ } else if (IsN && !ValType->isIntegerType() && !ValType->isPointerType()) {
+ // For __atomic_*_n operations, the value type must be a scalar integral or
+ // pointer type which is 1, 2, 4, 8 or 16 bytes in length.
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_atomic_int_or_ptr)
+ << IsC11 << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+
+ if (!IsC11 && !AtomTy.isTriviallyCopyableType(Context)) {
+ // For GNU atomics, require a trivially-copyable type. This is not part of
+ // the GNU atomics specification, but we enforce it for sanity.
+ Diag(DRE->getLocStart(), diag::err_atomic_op_needs_trivial_copy)
+ << Ptr->getType() << Ptr->getSourceRange();
+ return ExprError();
+ }
+
+ // FIXME: For any builtin other than a load, the ValType must not be
+ // const-qualified.
+
+ switch (ValType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // okay
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ // FIXME: Can this happen? By this point, ValType should be known
+ // to be trivially copyable.
+ Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
+ << ValType << Ptr->getSourceRange();
+ return ExprError();
+ }
+
+ QualType ResultType = ValType;
+ if (Form == Copy || Form == GNUXchg || Form == Init)
+ ResultType = Context.VoidTy;
+ else if (Form == C11CmpXchg || Form == GNUCmpXchg)
+ ResultType = Context.BoolTy;
+
+ // The type of a parameter passed 'by value'. In the GNU atomics, such
+ // arguments are actually passed as pointers.
+ QualType ByValType = ValType; // 'CP'
+ if (!IsC11 && !IsN)
+ ByValType = Ptr->getType();
+
+ // The first argument --- the pointer --- has a fixed type; we
+ // deduce the types of the rest of the arguments accordingly. Walk
+ // the remaining arguments, converting them to the deduced value type.
+ for (unsigned i = 1; i != NumArgs[Form]; ++i) {
+ QualType Ty;
+ if (i < NumVals[Form] + 1) {
+ switch (i) {
+ case 1:
+ // The second argument is the non-atomic operand. For arithmetic, this
+ // is always passed by value, and for a compare_exchange it is always
+ // passed by address. For the rest, GNU uses by-address and C11 uses
+ // by-value.
+ assert(Form != Load);
+ if (Form == Init || (Form == Arithmetic && ValType->isIntegerType()))
+ Ty = ValType;
+ else if (Form == Copy || Form == Xchg)
+ Ty = ByValType;
+ else if (Form == Arithmetic)
+ Ty = Context.getPointerDiffType();
+ else
+ Ty = Context.getPointerType(ValType.getUnqualifiedType());
+ break;
+ case 2:
+ // The third argument to compare_exchange / GNU exchange is a
+ // (pointer to a) desired value.
+ Ty = ByValType;
+ break;
+ case 3:
+ // The fourth argument to GNU compare_exchange is a 'weak' flag.
+ Ty = Context.BoolTy;
+ break;
+ }
+ } else {
+ // The order(s) are always converted to int.
+ Ty = Context.IntTy;
+ }
+
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(Context, Ty, false);
+ ExprResult Arg = TheCall->getArg(i);
+ Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return true;
+ TheCall->setArg(i, Arg.get());
+ }
+
+ // Permute the arguments into a 'consistent' order.
+ SmallVector<Expr*, 5> SubExprs;
+ SubExprs.push_back(Ptr);
+ switch (Form) {
+ case Init:
+ // Note, AtomicExpr::getVal1() has a special case for this atomic.
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ break;
+ case Load:
+ SubExprs.push_back(TheCall->getArg(1)); // Order
+ break;
+ case Copy:
+ case Arithmetic:
+ case Xchg:
+ SubExprs.push_back(TheCall->getArg(2)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ break;
+ case GNUXchg:
+ // Note, AtomicExpr::getVal2() has a special case for this atomic.
+ SubExprs.push_back(TheCall->getArg(3)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ break;
+ case C11CmpXchg:
+ SubExprs.push_back(TheCall->getArg(3)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(4)); // OrderFail
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ break;
+ case GNUCmpXchg:
+ SubExprs.push_back(TheCall->getArg(4)); // Order
+ SubExprs.push_back(TheCall->getArg(1)); // Val1
+ SubExprs.push_back(TheCall->getArg(5)); // OrderFail
+ SubExprs.push_back(TheCall->getArg(2)); // Val2
+ SubExprs.push_back(TheCall->getArg(3)); // Weak
+ break;
+ }
+
+ return Owned(new (Context) AtomicExpr(TheCall->getCallee()->getLocStart(),
+ SubExprs.data(), SubExprs.size(),
+ ResultType, Op,
+ TheCall->getRParenLoc()));
+}
+
+
+/// checkBuiltinArgument - Given a call to a builtin function, perform
+/// normal type-checking on the given argument, updating the call in
+/// place. This is useful when a builtin function requires custom
+/// type-checking for some of its arguments but not necessarily all of
+/// them.
+///
+/// Returns true on error.
+static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
+ FunctionDecl *Fn = E->getDirectCallee();
+ assert(Fn && "builtin call without direct callee!");
+
+ ParmVarDecl *Param = Fn->getParamDecl(ArgIndex);
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, Param);
+
+ ExprResult Arg = E->getArg(0);
+ Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return true;
+
+ E->setArg(ArgIndex, Arg.take());
+ return false;
+}
+
+/// SemaBuiltinAtomicOverloaded - We have a call to a function like
+/// __sync_fetch_and_add, which is an overloaded function based on the pointer
+/// type of its first argument. The main ActOnCallExpr routines have already
+/// promoted the types of arguments because all of these calls are prototyped as
+/// void(...).
+///
+/// This function goes through and does final semantic checking for these
+/// builtins,
+ExprResult
+Sema::SemaBuiltinAtomicOverloaded(ExprResult TheCallResult) {
+ CallExpr *TheCall = (CallExpr *)TheCallResult.get();
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
+
+ // Ensure that we have at least one argument to do type inference from.
+ if (TheCall->getNumArgs() < 1) {
+ Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 1 << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ }
+
+ // Inspect the first argument of the atomic builtin. This should always be
+ // a pointer type, whose element is an integral scalar or pointer type.
+ // Because it is a pointer type, we don't have to worry about any implicit
+ // casts here.
+ // FIXME: We don't allow floating point scalars as input.
+ Expr *FirstArg = TheCall->getArg(0);
+ ExprResult FirstArgResult = DefaultFunctionArrayLvalueConversion(FirstArg);
+ if (FirstArgResult.isInvalid())
+ return ExprError();
+ FirstArg = FirstArgResult.take();
+ TheCall->setArg(0, FirstArg);
+
+ const PointerType *pointerType = FirstArg->getType()->getAs<PointerType>();
+ if (!pointerType) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
+ QualType ValType = pointerType->getPointeeType();
+ if (!ValType->isIntegerType() && !ValType->isAnyPointerType() &&
+ !ValType->isBlockPointerType()) {
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_must_be_pointer_intptr)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
+ switch (ValType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // okay
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Autoreleasing:
+ Diag(DRE->getLocStart(), diag::err_arc_atomic_ownership)
+ << ValType << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
+ // Strip any qualifiers off ValType.
+ ValType = ValType.getUnqualifiedType();
+
+ // The majority of builtins return a value, but a few have special return
+ // types, so allow them to override appropriately below.
+ QualType ResultType = ValType;
+
+ // We need to figure out which concrete builtin this maps onto. For example,
+ // __sync_fetch_and_add with a 2 byte object turns into
+ // __sync_fetch_and_add_2.
+#define BUILTIN_ROW(x) \
+ { Builtin::BI##x##_1, Builtin::BI##x##_2, Builtin::BI##x##_4, \
+ Builtin::BI##x##_8, Builtin::BI##x##_16 }
+
+ static const unsigned BuiltinIndices[][5] = {
+ BUILTIN_ROW(__sync_fetch_and_add),
+ BUILTIN_ROW(__sync_fetch_and_sub),
+ BUILTIN_ROW(__sync_fetch_and_or),
+ BUILTIN_ROW(__sync_fetch_and_and),
+ BUILTIN_ROW(__sync_fetch_and_xor),
+
+ BUILTIN_ROW(__sync_add_and_fetch),
+ BUILTIN_ROW(__sync_sub_and_fetch),
+ BUILTIN_ROW(__sync_and_and_fetch),
+ BUILTIN_ROW(__sync_or_and_fetch),
+ BUILTIN_ROW(__sync_xor_and_fetch),
+
+ BUILTIN_ROW(__sync_val_compare_and_swap),
+ BUILTIN_ROW(__sync_bool_compare_and_swap),
+ BUILTIN_ROW(__sync_lock_test_and_set),
+ BUILTIN_ROW(__sync_lock_release),
+ BUILTIN_ROW(__sync_swap)
+ };
+#undef BUILTIN_ROW
+
+ // Determine the index of the size.
+ unsigned SizeIndex;
+ switch (Context.getTypeSizeInChars(ValType).getQuantity()) {
+ case 1: SizeIndex = 0; break;
+ case 2: SizeIndex = 1; break;
+ case 4: SizeIndex = 2; break;
+ case 8: SizeIndex = 3; break;
+ case 16: SizeIndex = 4; break;
+ default:
+ Diag(DRE->getLocStart(), diag::err_atomic_builtin_pointer_size)
+ << FirstArg->getType() << FirstArg->getSourceRange();
+ return ExprError();
+ }
+
+ // Each of these builtins has one pointer argument, followed by some number of
+ // values (0, 1 or 2) followed by a potentially empty varags list of stuff
+ // that we ignore. Find out which row of BuiltinIndices to read from as well
+ // as the number of fixed args.
+ unsigned BuiltinID = FDecl->getBuiltinID();
+ unsigned BuiltinIndex, NumFixed = 1;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unknown overloaded atomic builtin!");
+ case Builtin::BI__sync_fetch_and_add:
+ case Builtin::BI__sync_fetch_and_add_1:
+ case Builtin::BI__sync_fetch_and_add_2:
+ case Builtin::BI__sync_fetch_and_add_4:
+ case Builtin::BI__sync_fetch_and_add_8:
+ case Builtin::BI__sync_fetch_and_add_16:
+ BuiltinIndex = 0;
+ break;
+
+ case Builtin::BI__sync_fetch_and_sub:
+ case Builtin::BI__sync_fetch_and_sub_1:
+ case Builtin::BI__sync_fetch_and_sub_2:
+ case Builtin::BI__sync_fetch_and_sub_4:
+ case Builtin::BI__sync_fetch_and_sub_8:
+ case Builtin::BI__sync_fetch_and_sub_16:
+ BuiltinIndex = 1;
+ break;
+
+ case Builtin::BI__sync_fetch_and_or:
+ case Builtin::BI__sync_fetch_and_or_1:
+ case Builtin::BI__sync_fetch_and_or_2:
+ case Builtin::BI__sync_fetch_and_or_4:
+ case Builtin::BI__sync_fetch_and_or_8:
+ case Builtin::BI__sync_fetch_and_or_16:
+ BuiltinIndex = 2;
+ break;
+
+ case Builtin::BI__sync_fetch_and_and:
+ case Builtin::BI__sync_fetch_and_and_1:
+ case Builtin::BI__sync_fetch_and_and_2:
+ case Builtin::BI__sync_fetch_and_and_4:
+ case Builtin::BI__sync_fetch_and_and_8:
+ case Builtin::BI__sync_fetch_and_and_16:
+ BuiltinIndex = 3;
+ break;
+
+ case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_xor_1:
+ case Builtin::BI__sync_fetch_and_xor_2:
+ case Builtin::BI__sync_fetch_and_xor_4:
+ case Builtin::BI__sync_fetch_and_xor_8:
+ case Builtin::BI__sync_fetch_and_xor_16:
+ BuiltinIndex = 4;
+ break;
+
+ case Builtin::BI__sync_add_and_fetch:
+ case Builtin::BI__sync_add_and_fetch_1:
+ case Builtin::BI__sync_add_and_fetch_2:
+ case Builtin::BI__sync_add_and_fetch_4:
+ case Builtin::BI__sync_add_and_fetch_8:
+ case Builtin::BI__sync_add_and_fetch_16:
+ BuiltinIndex = 5;
+ break;
+
+ case Builtin::BI__sync_sub_and_fetch:
+ case Builtin::BI__sync_sub_and_fetch_1:
+ case Builtin::BI__sync_sub_and_fetch_2:
+ case Builtin::BI__sync_sub_and_fetch_4:
+ case Builtin::BI__sync_sub_and_fetch_8:
+ case Builtin::BI__sync_sub_and_fetch_16:
+ BuiltinIndex = 6;
+ break;
+
+ case Builtin::BI__sync_and_and_fetch:
+ case Builtin::BI__sync_and_and_fetch_1:
+ case Builtin::BI__sync_and_and_fetch_2:
+ case Builtin::BI__sync_and_and_fetch_4:
+ case Builtin::BI__sync_and_and_fetch_8:
+ case Builtin::BI__sync_and_and_fetch_16:
+ BuiltinIndex = 7;
+ break;
+
+ case Builtin::BI__sync_or_and_fetch:
+ case Builtin::BI__sync_or_and_fetch_1:
+ case Builtin::BI__sync_or_and_fetch_2:
+ case Builtin::BI__sync_or_and_fetch_4:
+ case Builtin::BI__sync_or_and_fetch_8:
+ case Builtin::BI__sync_or_and_fetch_16:
+ BuiltinIndex = 8;
+ break;
+
+ case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_xor_and_fetch_1:
+ case Builtin::BI__sync_xor_and_fetch_2:
+ case Builtin::BI__sync_xor_and_fetch_4:
+ case Builtin::BI__sync_xor_and_fetch_8:
+ case Builtin::BI__sync_xor_and_fetch_16:
+ BuiltinIndex = 9;
+ break;
+
+ case Builtin::BI__sync_val_compare_and_swap:
+ case Builtin::BI__sync_val_compare_and_swap_1:
+ case Builtin::BI__sync_val_compare_and_swap_2:
+ case Builtin::BI__sync_val_compare_and_swap_4:
+ case Builtin::BI__sync_val_compare_and_swap_8:
+ case Builtin::BI__sync_val_compare_and_swap_16:
+ BuiltinIndex = 10;
+ NumFixed = 2;
+ break;
+
+ case Builtin::BI__sync_bool_compare_and_swap:
+ case Builtin::BI__sync_bool_compare_and_swap_1:
+ case Builtin::BI__sync_bool_compare_and_swap_2:
+ case Builtin::BI__sync_bool_compare_and_swap_4:
+ case Builtin::BI__sync_bool_compare_and_swap_8:
+ case Builtin::BI__sync_bool_compare_and_swap_16:
+ BuiltinIndex = 11;
+ NumFixed = 2;
+ ResultType = Context.BoolTy;
+ break;
+
+ case Builtin::BI__sync_lock_test_and_set:
+ case Builtin::BI__sync_lock_test_and_set_1:
+ case Builtin::BI__sync_lock_test_and_set_2:
+ case Builtin::BI__sync_lock_test_and_set_4:
+ case Builtin::BI__sync_lock_test_and_set_8:
+ case Builtin::BI__sync_lock_test_and_set_16:
+ BuiltinIndex = 12;
+ break;
+
+ case Builtin::BI__sync_lock_release:
+ case Builtin::BI__sync_lock_release_1:
+ case Builtin::BI__sync_lock_release_2:
+ case Builtin::BI__sync_lock_release_4:
+ case Builtin::BI__sync_lock_release_8:
+ case Builtin::BI__sync_lock_release_16:
+ BuiltinIndex = 13;
+ NumFixed = 0;
+ ResultType = Context.VoidTy;
+ break;
+
+ case Builtin::BI__sync_swap:
+ case Builtin::BI__sync_swap_1:
+ case Builtin::BI__sync_swap_2:
+ case Builtin::BI__sync_swap_4:
+ case Builtin::BI__sync_swap_8:
+ case Builtin::BI__sync_swap_16:
+ BuiltinIndex = 14;
+ break;
+ }
+
+ // Now that we know how many fixed arguments we expect, first check that we
+ // have at least that many.
+ if (TheCall->getNumArgs() < 1+NumFixed) {
+ Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args_at_least)
+ << 0 << 1+NumFixed << TheCall->getNumArgs()
+ << TheCall->getCallee()->getSourceRange();
+ return ExprError();
+ }
+
+ // Get the decl for the concrete builtin from this, we can tell what the
+ // concrete integer type we should convert to is.
+ unsigned NewBuiltinID = BuiltinIndices[BuiltinIndex][SizeIndex];
+ const char *NewBuiltinName = Context.BuiltinInfo.GetName(NewBuiltinID);
+ IdentifierInfo *NewBuiltinII = PP.getIdentifierInfo(NewBuiltinName);
+ FunctionDecl *NewBuiltinDecl =
+ cast<FunctionDecl>(LazilyCreateBuiltin(NewBuiltinII, NewBuiltinID,
+ TUScope, false, DRE->getLocStart()));
+
+ // The first argument --- the pointer --- has a fixed type; we
+ // deduce the types of the rest of the arguments accordingly. Walk
+ // the remaining arguments, converting them to the deduced value type.
+ for (unsigned i = 0; i != NumFixed; ++i) {
+ ExprResult Arg = TheCall->getArg(i+1);
+
+ // GCC does an implicit conversion to the pointer or integer ValType. This
+ // can fail in some cases (1i -> int**), check for this error case now.
+ // Initialize the argument.
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ ValType, /*consume*/ false);
+ Arg = PerformCopyInitialization(Entity, SourceLocation(), Arg);
+ if (Arg.isInvalid())
+ return ExprError();
+
+ // Okay, we have something that *can* be converted to the right type. Check
+ // to see if there is a potentially weird extension going on here. This can
+ // happen when you do an atomic operation on something like an char* and
+ // pass in 42. The 42 gets converted to char. This is even more strange
+ // for things like 45.123 -> char, etc.
+ // FIXME: Do this check.
+ TheCall->setArg(i+1, Arg.take());
+ }
+
+ ASTContext& Context = this->getASTContext();
+
+ // Create a new DeclRefExpr to refer to the new decl.
+ DeclRefExpr* NewDRE = DeclRefExpr::Create(
+ Context,
+ DRE->getQualifierLoc(),
+ SourceLocation(),
+ NewBuiltinDecl,
+ /*enclosing*/ false,
+ DRE->getLocation(),
+ NewBuiltinDecl->getType(),
+ DRE->getValueKind());
+
+ // Set the callee in the CallExpr.
+ // FIXME: This leaks the original parens and implicit casts.
+ ExprResult PromotedCall = UsualUnaryConversions(NewDRE);
+ if (PromotedCall.isInvalid())
+ return ExprError();
+ TheCall->setCallee(PromotedCall.take());
+
+ // Change the result type of the call to match the original value type. This
+ // is arbitrary, but the codegen for these builtins ins design to handle it
+ // gracefully.
+ TheCall->setType(ResultType);
+
+ return move(TheCallResult);
+}
+
+/// CheckObjCString - Checks that the argument to the builtin
+/// CFString constructor is correct
+/// Note: It might also make sense to do the UTF-16 conversion here (would
+/// simplify the backend).
+bool Sema::CheckObjCString(Expr *Arg) {
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
+
+ if (!Literal || !Literal->isAscii()) {
+ Diag(Arg->getLocStart(), diag::err_cfstring_literal_not_string_constant)
+ << Arg->getSourceRange();
+ return true;
+ }
+
+ if (Literal->containsNonAsciiOrNull()) {
+ StringRef String = Literal->getString();
+ unsigned NumBytes = String.size();
+ SmallVector<UTF16, 128> ToBuf(NumBytes);
+ const UTF8 *FromPtr = (UTF8 *)String.data();
+ UTF16 *ToPtr = &ToBuf[0];
+
+ ConversionResult Result = ConvertUTF8toUTF16(&FromPtr, FromPtr + NumBytes,
+ &ToPtr, ToPtr + NumBytes,
+ strictConversion);
+ // Check for conversion failure.
+ if (Result != conversionOK)
+ Diag(Arg->getLocStart(),
+ diag::warn_cfstring_truncated) << Arg->getSourceRange();
+ }
+ return false;
+}
+
+/// SemaBuiltinVAStart - Check the arguments to __builtin_va_start for validity.
+/// Emit an error and return true on failure, return false on success.
+bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
+ Expr *Fn = TheCall->getCallee();
+ if (TheCall->getNumArgs() > 2) {
+ Diag(TheCall->getArg(2)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
+ << Fn->getSourceRange()
+ << SourceRange(TheCall->getArg(2)->getLocStart(),
+ (*(TheCall->arg_end()-1))->getLocEnd());
+ return true;
+ }
+
+ if (TheCall->getNumArgs() < 2) {
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_few_args_at_least)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs();
+ }
+
+ // Type-check the first argument normally.
+ if (checkBuiltinArgument(*this, TheCall, 0))
+ return true;
+
+ // Determine whether the current function is variadic or not.
+ BlockScopeInfo *CurBlock = getCurBlock();
+ bool isVariadic;
+ if (CurBlock)
+ isVariadic = CurBlock->TheDecl->isVariadic();
+ else if (FunctionDecl *FD = getCurFunctionDecl())
+ isVariadic = FD->isVariadic();
+ else
+ isVariadic = getCurMethodDecl()->isVariadic();
+
+ if (!isVariadic) {
+ Diag(Fn->getLocStart(), diag::err_va_start_used_in_non_variadic_function);
+ return true;
+ }
+
+ // Verify that the second argument to the builtin is the last argument of the
+ // current function or method.
+ bool SecondArgIsLastNamedArgument = false;
+ const Expr *Arg = TheCall->getArg(1)->IgnoreParenCasts();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Arg)) {
+ if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(DR->getDecl())) {
+ // FIXME: This isn't correct for methods (results in bogus warning).
+ // Get the last formal in the current function.
+ const ParmVarDecl *LastArg;
+ if (CurBlock)
+ LastArg = *(CurBlock->TheDecl->param_end()-1);
+ else if (FunctionDecl *FD = getCurFunctionDecl())
+ LastArg = *(FD->param_end()-1);
+ else
+ LastArg = *(getCurMethodDecl()->param_end()-1);
+ SecondArgIsLastNamedArgument = PV == LastArg;
+ }
+ }
+
+ if (!SecondArgIsLastNamedArgument)
+ Diag(TheCall->getArg(1)->getLocStart(),
+ diag::warn_second_parameter_of_va_start_not_last_named_argument);
+ return false;
+}
+
+/// SemaBuiltinUnorderedCompare - Handle functions like __builtin_isgreater and
+/// friends. This is declared to take (...), so we have to check everything.
+bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() < 2)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << 2 << TheCall->getNumArgs()/*function call*/;
+ if (TheCall->getNumArgs() > 2)
+ return Diag(TheCall->getArg(2)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
+ << SourceRange(TheCall->getArg(2)->getLocStart(),
+ (*(TheCall->arg_end()-1))->getLocEnd());
+
+ ExprResult OrigArg0 = TheCall->getArg(0);
+ ExprResult OrigArg1 = TheCall->getArg(1);
+
+ // Do standard promotions between the two arguments, returning their common
+ // type.
+ QualType Res = UsualArithmeticConversions(OrigArg0, OrigArg1, false);
+ if (OrigArg0.isInvalid() || OrigArg1.isInvalid())
+ return true;
+
+ // Make sure any conversions are pushed back into the call; this is
+ // type safe since unordered compare builtins are declared as "_Bool
+ // foo(...)".
+ TheCall->setArg(0, OrigArg0.get());
+ TheCall->setArg(1, OrigArg1.get());
+
+ if (OrigArg0.get()->isTypeDependent() || OrigArg1.get()->isTypeDependent())
+ return false;
+
+ // If the common type isn't a real floating type, then the arguments were
+ // invalid for this operation.
+ if (!Res->isRealFloatingType())
+ return Diag(OrigArg0.get()->getLocStart(),
+ diag::err_typecheck_call_invalid_ordered_compare)
+ << OrigArg0.get()->getType() << OrigArg1.get()->getType()
+ << SourceRange(OrigArg0.get()->getLocStart(), OrigArg1.get()->getLocEnd());
+
+ return false;
+}
+
+/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
+/// __builtin_isnan and friends. This is declared to take (...), so we have
+/// to check everything. We expect the last argument to be a floating point
+/// value.
+bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
+ if (TheCall->getNumArgs() < NumArgs)
+ return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
+ << 0 << NumArgs << TheCall->getNumArgs()/*function call*/;
+ if (TheCall->getNumArgs() > NumArgs)
+ return Diag(TheCall->getArg(NumArgs)->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << NumArgs << TheCall->getNumArgs()
+ << SourceRange(TheCall->getArg(NumArgs)->getLocStart(),
+ (*(TheCall->arg_end()-1))->getLocEnd());
+
+ Expr *OrigArg = TheCall->getArg(NumArgs-1);
+
+ if (OrigArg->isTypeDependent())
+ return false;
+
+ // This operation requires a non-_Complex floating-point number.
+ if (!OrigArg->getType()->isRealFloatingType())
+ return Diag(OrigArg->getLocStart(),
+ diag::err_typecheck_call_invalid_unary_fp)
+ << OrigArg->getType() << OrigArg->getSourceRange();
+
+ // If this is an implicit conversion from float -> double, remove it.
+ if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(OrigArg)) {
+ Expr *CastArg = Cast->getSubExpr();
+ if (CastArg->getType()->isSpecificBuiltinType(BuiltinType::Float)) {
+ assert(Cast->getType()->isSpecificBuiltinType(BuiltinType::Double) &&
+ "promotion from float to double is the only expected cast here");
+ Cast->setSubExpr(0);
+ TheCall->setArg(NumArgs-1, CastArg);
+ }
+ }
+
+ return false;
+}
+
+/// SemaBuiltinShuffleVector - Handle __builtin_shufflevector.
+// This is declared to take (...), so we have to check everything.
+ExprResult Sema::SemaBuiltinShuffleVector(CallExpr *TheCall) {
+ if (TheCall->getNumArgs() < 2)
+ return ExprError(Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_few_args_at_least)
+ << 0 /*function call*/ << 2 << TheCall->getNumArgs()
+ << TheCall->getSourceRange());
+
+ // Determine which of the following types of shufflevector we're checking:
+ // 1) unary, vector mask: (lhs, mask)
+ // 2) binary, vector mask: (lhs, rhs, mask)
+ // 3) binary, scalar mask: (lhs, rhs, index, ..., index)
+ QualType resType = TheCall->getArg(0)->getType();
+ unsigned numElements = 0;
+
+ if (!TheCall->getArg(0)->isTypeDependent() &&
+ !TheCall->getArg(1)->isTypeDependent()) {
+ QualType LHSType = TheCall->getArg(0)->getType();
+ QualType RHSType = TheCall->getArg(1)->getType();
+
+ if (!LHSType->isVectorType() || !RHSType->isVectorType()) {
+ Diag(TheCall->getLocStart(), diag::err_shufflevector_non_vector)
+ << SourceRange(TheCall->getArg(0)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd());
+ return ExprError();
+ }
+
+ numElements = LHSType->getAs<VectorType>()->getNumElements();
+ unsigned numResElements = TheCall->getNumArgs() - 2;
+
+ // Check to see if we have a call with 2 vector arguments, the unary shuffle
+ // with mask. If so, verify that RHS is an integer vector type with the
+ // same number of elts as lhs.
+ if (TheCall->getNumArgs() == 2) {
+ if (!RHSType->hasIntegerRepresentation() ||
+ RHSType->getAs<VectorType>()->getNumElements() != numElements)
+ Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector)
+ << SourceRange(TheCall->getArg(1)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd());
+ numResElements = numElements;
+ }
+ else if (!Context.hasSameUnqualifiedType(LHSType, RHSType)) {
+ Diag(TheCall->getLocStart(), diag::err_shufflevector_incompatible_vector)
+ << SourceRange(TheCall->getArg(0)->getLocStart(),
+ TheCall->getArg(1)->getLocEnd());
+ return ExprError();
+ } else if (numElements != numResElements) {
+ QualType eltType = LHSType->getAs<VectorType>()->getElementType();
+ resType = Context.getVectorType(eltType, numResElements,
+ VectorType::GenericVector);
+ }
+ }
+
+ for (unsigned i = 2; i < TheCall->getNumArgs(); i++) {
+ if (TheCall->getArg(i)->isTypeDependent() ||
+ TheCall->getArg(i)->isValueDependent())
+ continue;
+
+ llvm::APSInt Result(32);
+ if (!TheCall->getArg(i)->isIntegerConstantExpr(Result, Context))
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_nonconstant_argument)
+ << TheCall->getArg(i)->getSourceRange());
+
+ if (Result.getActiveBits() > 64 || Result.getZExtValue() >= numElements*2)
+ return ExprError(Diag(TheCall->getLocStart(),
+ diag::err_shufflevector_argument_too_large)
+ << TheCall->getArg(i)->getSourceRange());
+ }
+
+ SmallVector<Expr*, 32> exprs;
+
+ for (unsigned i = 0, e = TheCall->getNumArgs(); i != e; i++) {
+ exprs.push_back(TheCall->getArg(i));
+ TheCall->setArg(i, 0);
+ }
+
+ return Owned(new (Context) ShuffleVectorExpr(Context, exprs.begin(),
+ exprs.size(), resType,
+ TheCall->getCallee()->getLocStart(),
+ TheCall->getRParenLoc()));
+}
+
+/// SemaBuiltinPrefetch - Handle __builtin_prefetch.
+// This is declared to take (const void*, ...) and can take two
+// optional constant int args.
+bool Sema::SemaBuiltinPrefetch(CallExpr *TheCall) {
+ unsigned NumArgs = TheCall->getNumArgs();
+
+ if (NumArgs > 3)
+ return Diag(TheCall->getLocEnd(),
+ diag::err_typecheck_call_too_many_args_at_most)
+ << 0 /*function call*/ << 3 << NumArgs
+ << TheCall->getSourceRange();
+
+ // Argument 0 is checked for us and the remaining arguments must be
+ // constant integers.
+ for (unsigned i = 1; i != NumArgs; ++i) {
+ Expr *Arg = TheCall->getArg(i);
+
+ llvm::APSInt Result;
+ if (SemaBuiltinConstantArg(TheCall, i, Result))
+ return true;
+
+ // FIXME: gcc issues a warning and rewrites these to 0. These
+ // seems especially odd for the third argument since the default
+ // is 3.
+ if (i == 1) {
+ if (Result.getLimitedValue() > 1)
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << "0" << "1" << Arg->getSourceRange();
+ } else {
+ if (Result.getLimitedValue() > 3)
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << "0" << "3" << Arg->getSourceRange();
+ }
+ }
+
+ return false;
+}
+
+/// SemaBuiltinConstantArg - Handle a check if argument ArgNum of CallExpr
+/// TheCall is a constant expression.
+bool Sema::SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
+ llvm::APSInt &Result) {
+ Expr *Arg = TheCall->getArg(ArgNum);
+ DeclRefExpr *DRE =cast<DeclRefExpr>(TheCall->getCallee()->IgnoreParenCasts());
+ FunctionDecl *FDecl = cast<FunctionDecl>(DRE->getDecl());
+
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) return false;
+
+ if (!Arg->isIntegerConstantExpr(Result, Context))
+ return Diag(TheCall->getLocStart(), diag::err_constant_integer_arg_type)
+ << FDecl->getDeclName() << Arg->getSourceRange();
+
+ return false;
+}
+
+/// SemaBuiltinObjectSize - Handle __builtin_object_size(void *ptr,
+/// int type). This simply type checks that type is one of the defined
+/// constants (0-3).
+// For compatibility check 0-3, llvm only handles 0 and 2.
+bool Sema::SemaBuiltinObjectSize(CallExpr *TheCall) {
+ llvm::APSInt Result;
+
+ // Check constant-ness first.
+ if (SemaBuiltinConstantArg(TheCall, 1, Result))
+ return true;
+
+ Expr *Arg = TheCall->getArg(1);
+ if (Result.getSExtValue() < 0 || Result.getSExtValue() > 3) {
+ return Diag(TheCall->getLocStart(), diag::err_argument_invalid_range)
+ << "0" << "3" << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+ }
+
+ return false;
+}
+
+/// SemaBuiltinLongjmp - Handle __builtin_longjmp(void *env[5], int val).
+/// This checks that val is a constant 1.
+bool Sema::SemaBuiltinLongjmp(CallExpr *TheCall) {
+ Expr *Arg = TheCall->getArg(1);
+ llvm::APSInt Result;
+
+ // TODO: This is less than ideal. Overload this to take a value.
+ if (SemaBuiltinConstantArg(TheCall, 1, Result))
+ return true;
+
+ if (Result != 1)
+ return Diag(TheCall->getLocStart(), diag::err_builtin_longjmp_invalid_val)
+ << SourceRange(Arg->getLocStart(), Arg->getLocEnd());
+
+ return false;
+}
+
+// Handle i > 1 ? "x" : "y", recursively.
+bool Sema::SemaCheckStringLiteral(const Expr *E, Expr **Args,
+ unsigned NumArgs, bool HasVAListArg,
+ unsigned format_idx, unsigned firstDataArg,
+ FormatStringType Type, bool inFunctionCall) {
+ tryAgain:
+ if (E->isTypeDependent() || E->isValueDependent())
+ return false;
+
+ E = E->IgnoreParenCasts();
+
+ if (E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull))
+ // Technically -Wformat-nonliteral does not warn about this case.
+ // The behavior of printf and friends in this case is implementation
+ // dependent. Ideally if the format string cannot be null then
+ // it should have a 'nonnull' attribute in the function prototype.
+ return true;
+
+ switch (E->getStmtClass()) {
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: {
+ const AbstractConditionalOperator *C = cast<AbstractConditionalOperator>(E);
+ return SemaCheckStringLiteral(C->getTrueExpr(), Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type,
+ inFunctionCall)
+ && SemaCheckStringLiteral(C->getFalseExpr(), Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type,
+ inFunctionCall);
+ }
+
+ case Stmt::ImplicitCastExprClass: {
+ E = cast<ImplicitCastExpr>(E)->getSubExpr();
+ goto tryAgain;
+ }
+
+ case Stmt::OpaqueValueExprClass:
+ if (const Expr *src = cast<OpaqueValueExpr>(E)->getSourceExpr()) {
+ E = src;
+ goto tryAgain;
+ }
+ return false;
+
+ case Stmt::PredefinedExprClass:
+ // While __func__, etc., are technically not string literals, they
+ // cannot contain format specifiers and thus are not a security
+ // liability.
+ return true;
+
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ // As an exception, do not flag errors for variables binding to
+ // const string literals.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ bool isConstant = false;
+ QualType T = DR->getType();
+
+ if (const ArrayType *AT = Context.getAsArrayType(T)) {
+ isConstant = AT->getElementType().isConstant(Context);
+ } else if (const PointerType *PT = T->getAs<PointerType>()) {
+ isConstant = T.isConstant(Context) &&
+ PT->getPointeeType().isConstant(Context);
+ } else if (T->isObjCObjectPointerType()) {
+ // In ObjC, there is usually no "const ObjectPointer" type,
+ // so don't check if the pointee type is constant.
+ isConstant = T.isConstant(Context);
+ }
+
+ if (isConstant) {
+ if (const Expr *Init = VD->getAnyInitializer())
+ return SemaCheckStringLiteral(Init, Args, NumArgs,
+ HasVAListArg, format_idx, firstDataArg,
+ Type, /*inFunctionCall*/false);
+ }
+
+ // For vprintf* functions (i.e., HasVAListArg==true), we add a
+ // special check to see if the format string is a function parameter
+ // of the function calling the printf function. If the function
+ // has an attribute indicating it is a printf-like function, then we
+ // should suppress warnings concerning non-literals being used in a call
+ // to a vprintf function. For example:
+ //
+ // void
+ // logmessage(char const *fmt __attribute__ (format (printf, 1, 2)), ...){
+ // va_list ap;
+ // va_start(ap, fmt);
+ // vprintf(fmt, ap); // Do NOT emit a warning about "fmt".
+ // ...
+ //
+ if (HasVAListArg) {
+ if (const ParmVarDecl *PV = dyn_cast<ParmVarDecl>(VD)) {
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(PV->getDeclContext())) {
+ int PVIndex = PV->getFunctionScopeIndex() + 1;
+ for (specific_attr_iterator<FormatAttr>
+ i = ND->specific_attr_begin<FormatAttr>(),
+ e = ND->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+ FormatAttr *PVFormat = *i;
+ // adjust for implicit parameter
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isInstance())
+ ++PVIndex;
+ // We also check if the formats are compatible.
+ // We can't pass a 'scanf' string to a 'printf' function.
+ if (PVIndex == PVFormat->getFormatIdx() &&
+ Type == GetFormatStringType(PVFormat))
+ return true;
+ }
+ }
+ }
+ }
+ }
+
+ return false;
+ }
+
+ case Stmt::CallExprClass:
+ case Stmt::CXXMemberCallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(CE->getCalleeDecl())) {
+ if (const FormatArgAttr *FA = ND->getAttr<FormatArgAttr>()) {
+ unsigned ArgIndex = FA->getFormatIdx();
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(ND))
+ if (MD->isInstance())
+ --ArgIndex;
+ const Expr *Arg = CE->getArg(ArgIndex - 1);
+
+ return SemaCheckStringLiteral(Arg, Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type,
+ inFunctionCall);
+ }
+ }
+
+ return false;
+ }
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::StringLiteralClass: {
+ const StringLiteral *StrE = NULL;
+
+ if (const ObjCStringLiteral *ObjCFExpr = dyn_cast<ObjCStringLiteral>(E))
+ StrE = ObjCFExpr->getString();
+ else
+ StrE = cast<StringLiteral>(E);
+
+ if (StrE) {
+ CheckFormatString(StrE, E, Args, NumArgs, HasVAListArg, format_idx,
+ firstDataArg, Type, inFunctionCall);
+ return true;
+ }
+
+ return false;
+ }
+
+ default:
+ return false;
+ }
+}
+
+void
+Sema::CheckNonNullArguments(const NonNullAttr *NonNull,
+ const Expr * const *ExprArgs,
+ SourceLocation CallSiteLoc) {
+ for (NonNullAttr::args_iterator i = NonNull->args_begin(),
+ e = NonNull->args_end();
+ i != e; ++i) {
+ const Expr *ArgExpr = ExprArgs[*i];
+ if (ArgExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull))
+ Diag(CallSiteLoc, diag::warn_null_arg) << ArgExpr->getSourceRange();
+ }
+}
+
+Sema::FormatStringType Sema::GetFormatStringType(const FormatAttr *Format) {
+ return llvm::StringSwitch<FormatStringType>(Format->getType())
+ .Case("scanf", FST_Scanf)
+ .Cases("printf", "printf0", FST_Printf)
+ .Cases("NSString", "CFString", FST_NSString)
+ .Case("strftime", FST_Strftime)
+ .Case("strfmon", FST_Strfmon)
+ .Cases("kprintf", "cmn_err", "vcmn_err", "zcmn_err", FST_Kprintf)
+ .Default(FST_Unknown);
+}
+
+/// CheckPrintfScanfArguments - Check calls to printf and scanf (and similar
+/// functions) for correct use of format strings.
+void Sema::CheckFormatArguments(const FormatAttr *Format, CallExpr *TheCall) {
+ bool IsCXXMember = false;
+ // The way the format attribute works in GCC, the implicit this argument
+ // of member functions is counted. However, it doesn't appear in our own
+ // lists, so decrement format_idx in that case.
+ IsCXXMember = isa<CXXMemberCallExpr>(TheCall);
+ CheckFormatArguments(Format, TheCall->getArgs(), TheCall->getNumArgs(),
+ IsCXXMember, TheCall->getRParenLoc(),
+ TheCall->getCallee()->getSourceRange());
+}
+
+void Sema::CheckFormatArguments(const FormatAttr *Format, Expr **Args,
+ unsigned NumArgs, bool IsCXXMember,
+ SourceLocation Loc, SourceRange Range) {
+ bool HasVAListArg = Format->getFirstArg() == 0;
+ unsigned format_idx = Format->getFormatIdx() - 1;
+ unsigned firstDataArg = HasVAListArg ? 0 : Format->getFirstArg() - 1;
+ if (IsCXXMember) {
+ if (format_idx == 0)
+ return;
+ --format_idx;
+ if(firstDataArg != 0)
+ --firstDataArg;
+ }
+ CheckFormatArguments(Args, NumArgs, HasVAListArg, format_idx,
+ firstDataArg, GetFormatStringType(Format), Loc, Range);
+}
+
+void Sema::CheckFormatArguments(Expr **Args, unsigned NumArgs,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ SourceLocation Loc, SourceRange Range) {
+ // CHECK: printf/scanf-like function is called with no format string.
+ if (format_idx >= NumArgs) {
+ Diag(Loc, diag::warn_missing_format_string) << Range;
+ return;
+ }
+
+ const Expr *OrigFormatExpr = Args[format_idx]->IgnoreParenCasts();
+
+ // CHECK: format string is not a string literal.
+ //
+ // Dynamically generated format strings are difficult to
+ // automatically vet at compile time. Requiring that format strings
+ // are string literals: (1) permits the checking of format strings by
+ // the compiler and thereby (2) can practically remove the source of
+ // many format string exploits.
+
+ // Format string can be either ObjC string (e.g. @"%d") or
+ // C string (e.g. "%d")
+ // ObjC string uses the same format specifiers as C string, so we can use
+ // the same format string checking logic for both ObjC and C strings.
+ if (SemaCheckStringLiteral(OrigFormatExpr, Args, NumArgs, HasVAListArg,
+ format_idx, firstDataArg, Type))
+ return; // Literal format string found, check done!
+
+ // Strftime is particular as it always uses a single 'time' argument,
+ // so it is safe to pass a non-literal string.
+ if (Type == FST_Strftime)
+ return;
+
+ // Do not emit diag when the string param is a macro expansion and the
+ // format is either NSString or CFString. This is a hack to prevent
+ // diag when using the NSLocalizedString and CFCopyLocalizedString macros
+ // which are usually used in place of NS and CF string literals.
+ if (Type == FST_NSString && Args[format_idx]->getLocStart().isMacroID())
+ return;
+
+ // If there are no arguments specified, warn with -Wformat-security, otherwise
+ // warn only with -Wformat-nonliteral.
+ if (NumArgs == format_idx+1)
+ Diag(Args[format_idx]->getLocStart(),
+ diag::warn_format_nonliteral_noargs)
+ << OrigFormatExpr->getSourceRange();
+ else
+ Diag(Args[format_idx]->getLocStart(),
+ diag::warn_format_nonliteral)
+ << OrigFormatExpr->getSourceRange();
+}
+
+namespace {
+class CheckFormatHandler : public analyze_format_string::FormatStringHandler {
+protected:
+ Sema &S;
+ const StringLiteral *FExpr;
+ const Expr *OrigFormatExpr;
+ const unsigned FirstDataArg;
+ const unsigned NumDataArgs;
+ const bool IsObjCLiteral;
+ const char *Beg; // Start of format string.
+ const bool HasVAListArg;
+ const Expr * const *Args;
+ const unsigned NumArgs;
+ unsigned FormatIdx;
+ llvm::BitVector CoveredArgs;
+ bool usesPositionalArgs;
+ bool atFirstArg;
+ bool inFunctionCall;
+public:
+ CheckFormatHandler(Sema &s, const StringLiteral *fexpr,
+ const Expr *origFormatExpr, unsigned firstDataArg,
+ unsigned numDataArgs, bool isObjCLiteral,
+ const char *beg, bool hasVAListArg,
+ Expr **args, unsigned numArgs,
+ unsigned formatIdx, bool inFunctionCall)
+ : S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr),
+ FirstDataArg(firstDataArg),
+ NumDataArgs(numDataArgs),
+ IsObjCLiteral(isObjCLiteral), Beg(beg),
+ HasVAListArg(hasVAListArg),
+ Args(args), NumArgs(numArgs), FormatIdx(formatIdx),
+ usesPositionalArgs(false), atFirstArg(true),
+ inFunctionCall(inFunctionCall) {
+ CoveredArgs.resize(numDataArgs);
+ CoveredArgs.reset();
+ }
+
+ void DoneProcessing();
+
+ void HandleIncompleteSpecifier(const char *startSpecifier,
+ unsigned specifierLen);
+
+ void HandleNonStandardLengthModifier(
+ const analyze_format_string::LengthModifier &LM,
+ const char *startSpecifier, unsigned specifierLen);
+
+ void HandleNonStandardConversionSpecifier(
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen);
+
+ void HandleNonStandardConversionSpecification(
+ const analyze_format_string::LengthModifier &LM,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen);
+
+ virtual void HandlePosition(const char *startPos, unsigned posLen);
+
+ virtual void HandleInvalidPosition(const char *startSpecifier,
+ unsigned specifierLen,
+ analyze_format_string::PositionContext p);
+
+ virtual void HandleZeroPosition(const char *startPos, unsigned posLen);
+
+ void HandleNullChar(const char *nullCharacter);
+
+ template <typename Range>
+ static void EmitFormatDiagnostic(Sema &S, bool inFunctionCall,
+ const Expr *ArgumentExpr,
+ PartialDiagnostic PDiag,
+ SourceLocation StringLoc,
+ bool IsStringLocation, Range StringRange,
+ FixItHint Fixit = FixItHint());
+
+protected:
+ bool HandleInvalidConversionSpecifier(unsigned argIndex, SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen,
+ const char *csStart, unsigned csLen);
+
+ void HandlePositionalNonpositionalArgs(SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen);
+
+ SourceRange getFormatStringRange();
+ CharSourceRange getSpecifierRange(const char *startSpecifier,
+ unsigned specifierLen);
+ SourceLocation getLocationOfByte(const char *x);
+
+ const Expr *getDataArg(unsigned i) const;
+
+ bool CheckNumArgs(const analyze_format_string::FormatSpecifier &FS,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen,
+ unsigned argIndex);
+
+ template <typename Range>
+ void EmitFormatDiagnostic(PartialDiagnostic PDiag, SourceLocation StringLoc,
+ bool IsStringLocation, Range StringRange,
+ FixItHint Fixit = FixItHint());
+
+ void CheckPositionalAndNonpositionalArgs(
+ const analyze_format_string::FormatSpecifier *FS);
+};
+}
+
+SourceRange CheckFormatHandler::getFormatStringRange() {
+ return OrigFormatExpr->getSourceRange();
+}
+
+CharSourceRange CheckFormatHandler::
+getSpecifierRange(const char *startSpecifier, unsigned specifierLen) {
+ SourceLocation Start = getLocationOfByte(startSpecifier);
+ SourceLocation End = getLocationOfByte(startSpecifier + specifierLen - 1);
+
+ // Advance the end SourceLocation by one due to half-open ranges.
+ End = End.getLocWithOffset(1);
+
+ return CharSourceRange::getCharRange(Start, End);
+}
+
+SourceLocation CheckFormatHandler::getLocationOfByte(const char *x) {
+ return S.getLocationOfStringLiteralByte(FExpr, x - Beg);
+}
+
+void CheckFormatHandler::HandleIncompleteSpecifier(const char *startSpecifier,
+ unsigned specifierLen){
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_incomplete_specifier),
+ getLocationOfByte(startSpecifier),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandleNonStandardLengthModifier(
+ const analyze_format_string::LengthModifier &LM,
+ const char *startSpecifier, unsigned specifierLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << LM.toString()
+ << 0,
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandleNonStandardConversionSpecifier(
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard) << CS.toString()
+ << 1,
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandleNonStandardConversionSpecification(
+ const analyze_format_string::LengthModifier &LM,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_conversion_spec)
+ << LM.toString() << CS.toString(),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+}
+
+void CheckFormatHandler::HandlePosition(const char *startPos,
+ unsigned posLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_non_standard_positional_arg),
+ getLocationOfByte(startPos),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
+}
+
+void
+CheckFormatHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
+ analyze_format_string::PositionContext p) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_positional_specifier)
+ << (unsigned) p,
+ getLocationOfByte(startPos), /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
+}
+
+void CheckFormatHandler::HandleZeroPosition(const char *startPos,
+ unsigned posLen) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_zero_positional_specifier),
+ getLocationOfByte(startPos),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startPos, posLen));
+}
+
+void CheckFormatHandler::HandleNullChar(const char *nullCharacter) {
+ if (!IsObjCLiteral) {
+ // The presence of a null character is likely an error.
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_format_string_contains_null_char),
+ getLocationOfByte(nullCharacter), /*IsStringLocation*/true,
+ getFormatStringRange());
+ }
+}
+
+const Expr *CheckFormatHandler::getDataArg(unsigned i) const {
+ return Args[FirstDataArg + i];
+}
+
+void CheckFormatHandler::DoneProcessing() {
+ // Does the number of data arguments exceed the number of
+ // format conversions in the format string?
+ if (!HasVAListArg) {
+ // Find any arguments that weren't covered.
+ CoveredArgs.flip();
+ signed notCoveredArg = CoveredArgs.find_first();
+ if (notCoveredArg >= 0) {
+ assert((unsigned)notCoveredArg < NumDataArgs);
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_data_arg_not_used),
+ getDataArg((unsigned) notCoveredArg)->getLocStart(),
+ /*IsStringLocation*/false, getFormatStringRange());
+ }
+ }
+}
+
+bool
+CheckFormatHandler::HandleInvalidConversionSpecifier(unsigned argIndex,
+ SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen,
+ const char *csStart,
+ unsigned csLen) {
+
+ bool keepGoing = true;
+ if (argIndex < NumDataArgs) {
+ // Consider the argument coverered, even though the specifier doesn't
+ // make sense.
+ CoveredArgs.set(argIndex);
+ }
+ else {
+ // If argIndex exceeds the number of data arguments we
+ // don't issue a warning because that is just a cascade of warnings (and
+ // they may have intended '%%' anyway). We don't want to continue processing
+ // the format string after this point, however, as we will like just get
+ // gibberish when trying to match arguments.
+ keepGoing = false;
+ }
+
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_invalid_conversion)
+ << StringRef(csStart, csLen),
+ Loc, /*IsStringLocation*/true,
+ getSpecifierRange(startSpec, specifierLen));
+
+ return keepGoing;
+}
+
+void
+CheckFormatHandler::HandlePositionalNonpositionalArgs(SourceLocation Loc,
+ const char *startSpec,
+ unsigned specifierLen) {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_format_mix_positional_nonpositional_args),
+ Loc, /*isStringLoc*/true, getSpecifierRange(startSpec, specifierLen));
+}
+
+bool
+CheckFormatHandler::CheckNumArgs(
+ const analyze_format_string::FormatSpecifier &FS,
+ const analyze_format_string::ConversionSpecifier &CS,
+ const char *startSpecifier, unsigned specifierLen, unsigned argIndex) {
+
+ if (argIndex >= NumDataArgs) {
+ PartialDiagnostic PDiag = FS.usesPositionalArg()
+ ? (S.PDiag(diag::warn_printf_positional_arg_exceeds_data_args)
+ << (argIndex+1) << NumDataArgs)
+ : S.PDiag(diag::warn_printf_insufficient_data_args);
+ EmitFormatDiagnostic(
+ PDiag, getLocationOfByte(CS.getStart()), /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ return false;
+ }
+ return true;
+}
+
+template<typename Range>
+void CheckFormatHandler::EmitFormatDiagnostic(PartialDiagnostic PDiag,
+ SourceLocation Loc,
+ bool IsStringLocation,
+ Range StringRange,
+ FixItHint FixIt) {
+ EmitFormatDiagnostic(S, inFunctionCall, Args[FormatIdx], PDiag,
+ Loc, IsStringLocation, StringRange, FixIt);
+}
+
+/// \brief If the format string is not within the funcion call, emit a note
+/// so that the function call and string are in diagnostic messages.
+///
+/// \param inFunctionCall if true, the format string is within the function
+/// call and only one diagnostic message will be produced. Otherwise, an
+/// extra note will be emitted pointing to location of the format string.
+///
+/// \param ArgumentExpr the expression that is passed as the format string
+/// argument in the function call. Used for getting locations when two
+/// diagnostics are emitted.
+///
+/// \param PDiag the callee should already have provided any strings for the
+/// diagnostic message. This function only adds locations and fixits
+/// to diagnostics.
+///
+/// \param Loc primary location for diagnostic. If two diagnostics are
+/// required, one will be at Loc and a new SourceLocation will be created for
+/// the other one.
+///
+/// \param IsStringLocation if true, Loc points to the format string should be
+/// used for the note. Otherwise, Loc points to the argument list and will
+/// be used with PDiag.
+///
+/// \param StringRange some or all of the string to highlight. This is
+/// templated so it can accept either a CharSourceRange or a SourceRange.
+///
+/// \param Fixit optional fix it hint for the format string.
+template<typename Range>
+void CheckFormatHandler::EmitFormatDiagnostic(Sema &S, bool InFunctionCall,
+ const Expr *ArgumentExpr,
+ PartialDiagnostic PDiag,
+ SourceLocation Loc,
+ bool IsStringLocation,
+ Range StringRange,
+ FixItHint FixIt) {
+ if (InFunctionCall)
+ S.Diag(Loc, PDiag) << StringRange << FixIt;
+ else {
+ S.Diag(IsStringLocation ? ArgumentExpr->getExprLoc() : Loc, PDiag)
+ << ArgumentExpr->getSourceRange();
+ S.Diag(IsStringLocation ? Loc : StringRange.getBegin(),
+ diag::note_format_string_defined)
+ << StringRange << FixIt;
+ }
+}
+
+//===--- CHECK: Printf format string checking ------------------------------===//
+
+namespace {
+class CheckPrintfHandler : public CheckFormatHandler {
+public:
+ CheckPrintfHandler(Sema &s, const StringLiteral *fexpr,
+ const Expr *origFormatExpr, unsigned firstDataArg,
+ unsigned numDataArgs, bool isObjCLiteral,
+ const char *beg, bool hasVAListArg,
+ Expr **Args, unsigned NumArgs,
+ unsigned formatIdx, bool inFunctionCall)
+ : CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
+ numDataArgs, isObjCLiteral, beg, hasVAListArg,
+ Args, NumArgs, formatIdx, inFunctionCall) {}
+
+
+ bool HandleInvalidPrintfConversionSpecifier(
+ const analyze_printf::PrintfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen);
+
+ bool HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen);
+
+ bool HandleAmount(const analyze_format_string::OptionalAmount &Amt, unsigned k,
+ const char *startSpecifier, unsigned specifierLen);
+ void HandleInvalidAmount(const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalAmount &Amt,
+ unsigned type,
+ const char *startSpecifier, unsigned specifierLen);
+ void HandleFlag(const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier, unsigned specifierLen);
+ void HandleIgnoredFlag(const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalFlag &ignoredFlag,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier, unsigned specifierLen);
+};
+}
+
+bool CheckPrintfHandler::HandleInvalidPrintfConversionSpecifier(
+ const analyze_printf::PrintfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ const analyze_printf::PrintfConversionSpecifier &CS =
+ FS.getConversionSpecifier();
+
+ return HandleInvalidConversionSpecifier(FS.getArgIndex(),
+ getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen,
+ CS.getStart(), CS.getLength());
+}
+
+bool CheckPrintfHandler::HandleAmount(
+ const analyze_format_string::OptionalAmount &Amt,
+ unsigned k, const char *startSpecifier,
+ unsigned specifierLen) {
+
+ if (Amt.hasDataArgument()) {
+ if (!HasVAListArg) {
+ unsigned argIndex = Amt.getArgIndex();
+ if (argIndex >= NumDataArgs) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_missing_arg)
+ << k,
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ // Don't do any more checking. We will just emit
+ // spurious errors.
+ return false;
+ }
+
+ // Type check the data argument. It should be an 'int'.
+ // Although not in conformance with C99, we also allow the argument to be
+ // an 'unsigned int' as that is a reasonably safe case. GCC also
+ // doesn't emit a warning for that case.
+ CoveredArgs.set(argIndex);
+ const Expr *Arg = getDataArg(argIndex);
+ QualType T = Arg->getType();
+
+ const analyze_printf::ArgTypeResult &ATR = Amt.getArgType(S.Context);
+ assert(ATR.isValid());
+
+ if (!ATR.matchesType(S.Context, T)) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_asterisk_wrong_type)
+ << k << ATR.getRepresentativeTypeName(S.Context)
+ << T << Arg->getSourceRange(),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ // Don't do any more checking. We will just emit
+ // spurious errors.
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void CheckPrintfHandler::HandleInvalidAmount(
+ const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalAmount &Amt,
+ unsigned type,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ const analyze_printf::PrintfConversionSpecifier &CS =
+ FS.getConversionSpecifier();
+
+ FixItHint fixit =
+ Amt.getHowSpecified() == analyze_printf::OptionalAmount::Constant
+ ? FixItHint::CreateRemoval(getSpecifierRange(Amt.getStart(),
+ Amt.getConstantLength()))
+ : FixItHint();
+
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_optional_amount)
+ << type << CS.toString(),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ fixit);
+}
+
+void CheckPrintfHandler::HandleFlag(const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ // Warn about pointless flag with a fixit removal.
+ const analyze_printf::PrintfConversionSpecifier &CS =
+ FS.getConversionSpecifier();
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_nonsensical_flag)
+ << flag.toString() << CS.toString(),
+ getLocationOfByte(flag.getPosition()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateRemoval(
+ getSpecifierRange(flag.getPosition(), 1)));
+}
+
+void CheckPrintfHandler::HandleIgnoredFlag(
+ const analyze_printf::PrintfSpecifier &FS,
+ const analyze_printf::OptionalFlag &ignoredFlag,
+ const analyze_printf::OptionalFlag &flag,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+ // Warn about ignored flag with a fixit removal.
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_ignored_flag)
+ << ignoredFlag.toString() << flag.toString(),
+ getLocationOfByte(ignoredFlag.getPosition()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateRemoval(
+ getSpecifierRange(ignoredFlag.getPosition(), 1)));
+}
+
+bool
+CheckPrintfHandler::HandlePrintfSpecifier(const analyze_printf::PrintfSpecifier
+ &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+
+ using namespace analyze_format_string;
+ using namespace analyze_printf;
+ const PrintfConversionSpecifier &CS = FS.getConversionSpecifier();
+
+ if (FS.consumesDataArgument()) {
+ if (atFirstArg) {
+ atFirstArg = false;
+ usesPositionalArgs = FS.usesPositionalArg();
+ }
+ else if (usesPositionalArgs != FS.usesPositionalArg()) {
+ HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen);
+ return false;
+ }
+ }
+
+ // First check if the field width, precision, and conversion specifier
+ // have matching data arguments.
+ if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0,
+ startSpecifier, specifierLen)) {
+ return false;
+ }
+
+ if (!HandleAmount(FS.getPrecision(), /* precision */ 1,
+ startSpecifier, specifierLen)) {
+ return false;
+ }
+
+ if (!CS.consumesDataArgument()) {
+ // FIXME: Technically specifying a precision or field width here
+ // makes no sense. Worth issuing a warning at some point.
+ return true;
+ }
+
+ // Consume the argument.
+ unsigned argIndex = FS.getArgIndex();
+ if (argIndex < NumDataArgs) {
+ // The check to see if the argIndex is valid will come later.
+ // We set the bit here because we may exit early from this
+ // function if we encounter some other error.
+ CoveredArgs.set(argIndex);
+ }
+
+ // FreeBSD extensions
+ if (CS.getKind() == ConversionSpecifier::bArg || CS.getKind() == ConversionSpecifier::DArg) {
+ // claim the second argument
+ CoveredArgs.set(argIndex + 1);
+
+ // Now type check the data expression that matches the
+ // format specifier.
+ const Expr *Ex = getDataArg(argIndex);
+ const analyze_printf::ArgTypeResult &ATR =
+ (CS.getKind() == ConversionSpecifier::bArg) ?
+ ArgTypeResult(S.Context.IntTy) : ArgTypeResult::CStrTy;
+ if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType()))
+ S.Diag(getLocationOfByte(CS.getStart()),
+ diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeType(S.Context) << Ex->getType()
+ << getSpecifierRange(startSpecifier, specifierLen)
+ << Ex->getSourceRange();
+
+ // Now type check the data expression that matches the
+ // format specifier.
+ Ex = getDataArg(argIndex + 1);
+ const analyze_printf::ArgTypeResult &ATR2 = ArgTypeResult::CStrTy;
+ if (ATR2.isValid() && !ATR2.matchesType(S.Context, Ex->getType()))
+ S.Diag(getLocationOfByte(CS.getStart()),
+ diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR2.getRepresentativeType(S.Context) << Ex->getType()
+ << getSpecifierRange(startSpecifier, specifierLen)
+ << Ex->getSourceRange();
+
+ return true;
+ }
+ // END OF FREEBSD EXTENSIONS
+
+ // Check for using an Objective-C specific conversion specifier
+ // in a non-ObjC literal.
+ if (!IsObjCLiteral && CS.isObjCArg()) {
+ return HandleInvalidPrintfConversionSpecifier(FS, startSpecifier,
+ specifierLen);
+ }
+
+ // Check for invalid use of field width
+ if (!FS.hasValidFieldWidth()) {
+ HandleInvalidAmount(FS, FS.getFieldWidth(), /* field width */ 0,
+ startSpecifier, specifierLen);
+ }
+
+ // Check for invalid use of precision
+ if (!FS.hasValidPrecision()) {
+ HandleInvalidAmount(FS, FS.getPrecision(), /* precision */ 1,
+ startSpecifier, specifierLen);
+ }
+
+ // Check each flag does not conflict with any other component.
+ if (!FS.hasValidThousandsGroupingPrefix())
+ HandleFlag(FS, FS.hasThousandsGrouping(), startSpecifier, specifierLen);
+ if (!FS.hasValidLeadingZeros())
+ HandleFlag(FS, FS.hasLeadingZeros(), startSpecifier, specifierLen);
+ if (!FS.hasValidPlusPrefix())
+ HandleFlag(FS, FS.hasPlusPrefix(), startSpecifier, specifierLen);
+ if (!FS.hasValidSpacePrefix())
+ HandleFlag(FS, FS.hasSpacePrefix(), startSpecifier, specifierLen);
+ if (!FS.hasValidAlternativeForm())
+ HandleFlag(FS, FS.hasAlternativeForm(), startSpecifier, specifierLen);
+ if (!FS.hasValidLeftJustified())
+ HandleFlag(FS, FS.isLeftJustified(), startSpecifier, specifierLen);
+
+ // Check that flags are not ignored by another flag
+ if (FS.hasSpacePrefix() && FS.hasPlusPrefix()) // ' ' ignored by '+'
+ HandleIgnoredFlag(FS, FS.hasSpacePrefix(), FS.hasPlusPrefix(),
+ startSpecifier, specifierLen);
+ if (FS.hasLeadingZeros() && FS.isLeftJustified()) // '0' ignored by '-'
+ HandleIgnoredFlag(FS, FS.hasLeadingZeros(), FS.isLeftJustified(),
+ startSpecifier, specifierLen);
+
+ // Check the length modifier is valid with the given conversion specifier.
+ const LengthModifier &LM = FS.getLengthModifier();
+ if (!FS.hasValidLengthModifier())
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_nonsensical_length)
+ << LM.toString() << CS.toString(),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateRemoval(
+ getSpecifierRange(LM.getStart(),
+ LM.getLength())));
+ if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(LM, startSpecifier, specifierLen);
+ if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
+ HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
+ if (!FS.hasStandardLengthConversionCombination())
+ HandleNonStandardConversionSpecification(LM, CS, startSpecifier,
+ specifierLen);
+
+ // Are we using '%n'?
+ if (CS.getKind() == ConversionSpecifier::nArg) {
+ // Issue a warning about this being a possible security issue.
+ EmitFormatDiagnostic(S.PDiag(diag::warn_printf_write_back),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ // Continue checking the other format specifiers.
+ return true;
+ }
+
+ // The remaining checks depend on the data arguments.
+ if (HasVAListArg)
+ return true;
+
+ if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
+ return false;
+
+ // Now type check the data expression that matches the
+ // format specifier.
+ const Expr *Ex = getDataArg(argIndex);
+ const analyze_printf::ArgTypeResult &ATR = FS.getArgType(S.Context,
+ IsObjCLiteral);
+ if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType())) {
+ // Check if we didn't match because of an implicit cast from a 'char'
+ // or 'short' to an 'int'. This is done because printf is a varargs
+ // function.
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Ex))
+ if (ICE->getType() == S.Context.IntTy) {
+ // All further checking is done on the subexpression.
+ Ex = ICE->getSubExpr();
+ if (ATR.matchesType(S.Context, Ex->getType()))
+ return true;
+ }
+
+ // We may be able to offer a FixItHint if it is a supported type.
+ PrintfSpecifier fixedFS = FS;
+ bool success = fixedFS.fixType(Ex->getType(), S.getLangOpts(),
+ S.Context, IsObjCLiteral);
+
+ if (success) {
+ // Get the fix string from the fixed format specifier
+ SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+ fixedFS.toString(os);
+
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << Ex->getSourceRange(),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateReplacement(
+ getSpecifierRange(startSpecifier, specifierLen),
+ os.str()));
+ }
+ else {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << getSpecifierRange(startSpecifier, specifierLen)
+ << Ex->getSourceRange(),
+ getLocationOfByte(CS.getStart()),
+ true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+ }
+
+ return true;
+}
+
+//===--- CHECK: Scanf format string checking ------------------------------===//
+
+namespace {
+class CheckScanfHandler : public CheckFormatHandler {
+public:
+ CheckScanfHandler(Sema &s, const StringLiteral *fexpr,
+ const Expr *origFormatExpr, unsigned firstDataArg,
+ unsigned numDataArgs, bool isObjCLiteral,
+ const char *beg, bool hasVAListArg,
+ Expr **Args, unsigned NumArgs,
+ unsigned formatIdx, bool inFunctionCall)
+ : CheckFormatHandler(s, fexpr, origFormatExpr, firstDataArg,
+ numDataArgs, isObjCLiteral, beg, hasVAListArg,
+ Args, NumArgs, formatIdx, inFunctionCall) {}
+
+ bool HandleScanfSpecifier(const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen);
+
+ bool HandleInvalidScanfConversionSpecifier(
+ const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen);
+
+ void HandleIncompleteScanList(const char *start, const char *end);
+};
+}
+
+void CheckScanfHandler::HandleIncompleteScanList(const char *start,
+ const char *end) {
+ EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_scanlist_incomplete),
+ getLocationOfByte(end), /*IsStringLocation*/true,
+ getSpecifierRange(start, end - start));
+}
+
+bool CheckScanfHandler::HandleInvalidScanfConversionSpecifier(
+ const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+
+ const analyze_scanf::ScanfConversionSpecifier &CS =
+ FS.getConversionSpecifier();
+
+ return HandleInvalidConversionSpecifier(FS.getArgIndex(),
+ getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen,
+ CS.getStart(), CS.getLength());
+}
+
+bool CheckScanfHandler::HandleScanfSpecifier(
+ const analyze_scanf::ScanfSpecifier &FS,
+ const char *startSpecifier,
+ unsigned specifierLen) {
+
+ using namespace analyze_scanf;
+ using namespace analyze_format_string;
+
+ const ScanfConversionSpecifier &CS = FS.getConversionSpecifier();
+
+ // Handle case where '%' and '*' don't consume an argument. These shouldn't
+ // be used to decide if we are using positional arguments consistently.
+ if (FS.consumesDataArgument()) {
+ if (atFirstArg) {
+ atFirstArg = false;
+ usesPositionalArgs = FS.usesPositionalArg();
+ }
+ else if (usesPositionalArgs != FS.usesPositionalArg()) {
+ HandlePositionalNonpositionalArgs(getLocationOfByte(CS.getStart()),
+ startSpecifier, specifierLen);
+ return false;
+ }
+ }
+
+ // Check if the field with is non-zero.
+ const OptionalAmount &Amt = FS.getFieldWidth();
+ if (Amt.getHowSpecified() == OptionalAmount::Constant) {
+ if (Amt.getConstantAmount() == 0) {
+ const CharSourceRange &R = getSpecifierRange(Amt.getStart(),
+ Amt.getConstantLength());
+ EmitFormatDiagnostic(S.PDiag(diag::warn_scanf_nonzero_width),
+ getLocationOfByte(Amt.getStart()),
+ /*IsStringLocation*/true, R,
+ FixItHint::CreateRemoval(R));
+ }
+ }
+
+ if (!FS.consumesDataArgument()) {
+ // FIXME: Technically specifying a precision or field width here
+ // makes no sense. Worth issuing a warning at some point.
+ return true;
+ }
+
+ // Consume the argument.
+ unsigned argIndex = FS.getArgIndex();
+ if (argIndex < NumDataArgs) {
+ // The check to see if the argIndex is valid will come later.
+ // We set the bit here because we may exit early from this
+ // function if we encounter some other error.
+ CoveredArgs.set(argIndex);
+ }
+
+ // Check the length modifier is valid with the given conversion specifier.
+ const LengthModifier &LM = FS.getLengthModifier();
+ if (!FS.hasValidLengthModifier()) {
+ const CharSourceRange &R = getSpecifierRange(LM.getStart(), LM.getLength());
+ EmitFormatDiagnostic(S.PDiag(diag::warn_format_nonsensical_length)
+ << LM.toString() << CS.toString()
+ << getSpecifierRange(startSpecifier, specifierLen),
+ getLocationOfByte(LM.getStart()),
+ /*IsStringLocation*/true, R,
+ FixItHint::CreateRemoval(R));
+ }
+
+ if (!FS.hasStandardLengthModifier())
+ HandleNonStandardLengthModifier(LM, startSpecifier, specifierLen);
+ if (!FS.hasStandardConversionSpecifier(S.getLangOpts()))
+ HandleNonStandardConversionSpecifier(CS, startSpecifier, specifierLen);
+ if (!FS.hasStandardLengthConversionCombination())
+ HandleNonStandardConversionSpecification(LM, CS, startSpecifier,
+ specifierLen);
+
+ // The remaining checks depend on the data arguments.
+ if (HasVAListArg)
+ return true;
+
+ if (!CheckNumArgs(FS, CS, startSpecifier, specifierLen, argIndex))
+ return false;
+
+ // Check that the argument type matches the format specifier.
+ const Expr *Ex = getDataArg(argIndex);
+ const analyze_scanf::ScanfArgTypeResult &ATR = FS.getArgType(S.Context);
+ if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType())) {
+ ScanfSpecifier fixedFS = FS;
+ bool success = fixedFS.fixType(Ex->getType(), S.getLangOpts(),
+ S.Context);
+
+ if (success) {
+ // Get the fix string from the fixed format specifier.
+ SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+ fixedFS.toString(os);
+
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << Ex->getSourceRange(),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen),
+ FixItHint::CreateReplacement(
+ getSpecifierRange(startSpecifier, specifierLen),
+ os.str()));
+ } else {
+ EmitFormatDiagnostic(
+ S.PDiag(diag::warn_printf_conversion_argument_type_mismatch)
+ << ATR.getRepresentativeTypeName(S.Context) << Ex->getType()
+ << Ex->getSourceRange(),
+ getLocationOfByte(CS.getStart()),
+ /*IsStringLocation*/true,
+ getSpecifierRange(startSpecifier, specifierLen));
+ }
+ }
+
+ return true;
+}
+
+void Sema::CheckFormatString(const StringLiteral *FExpr,
+ const Expr *OrigFormatExpr,
+ Expr **Args, unsigned NumArgs,
+ bool HasVAListArg, unsigned format_idx,
+ unsigned firstDataArg, FormatStringType Type,
+ bool inFunctionCall) {
+
+ // CHECK: is the format string a wide literal?
+ if (!FExpr->isAscii()) {
+ CheckFormatHandler::EmitFormatDiagnostic(
+ *this, inFunctionCall, Args[format_idx],
+ PDiag(diag::warn_format_string_is_wide_literal), FExpr->getLocStart(),
+ /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
+ return;
+ }
+
+ // Str - The format string. NOTE: this is NOT null-terminated!
+ StringRef StrRef = FExpr->getString();
+ const char *Str = StrRef.data();
+ unsigned StrLen = StrRef.size();
+ const unsigned numDataArgs = NumArgs - firstDataArg;
+
+ // CHECK: empty format string?
+ if (StrLen == 0 && numDataArgs > 0) {
+ CheckFormatHandler::EmitFormatDiagnostic(
+ *this, inFunctionCall, Args[format_idx],
+ PDiag(diag::warn_empty_format_string), FExpr->getLocStart(),
+ /*IsStringLocation*/true, OrigFormatExpr->getSourceRange());
+ return;
+ }
+
+ if (Type == FST_Printf || Type == FST_NSString) {
+ CheckPrintfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg,
+ numDataArgs, isa<ObjCStringLiteral>(OrigFormatExpr),
+ Str, HasVAListArg, Args, NumArgs, format_idx,
+ inFunctionCall);
+
+ if (!analyze_format_string::ParsePrintfString(H, Str, Str + StrLen,
+ getLangOpts()))
+ H.DoneProcessing();
+ } else if (Type == FST_Scanf) {
+ CheckScanfHandler H(*this, FExpr, OrigFormatExpr, firstDataArg,
+ numDataArgs, isa<ObjCStringLiteral>(OrigFormatExpr),
+ Str, HasVAListArg, Args, NumArgs, format_idx,
+ inFunctionCall);
+
+ if (!analyze_format_string::ParseScanfString(H, Str, Str + StrLen,
+ getLangOpts()))
+ H.DoneProcessing();
+ } // TODO: handle other formats
+}
+
+//===--- CHECK: Standard memory functions ---------------------------------===//
+
+/// \brief Determine whether the given type is a dynamic class type (e.g.,
+/// whether it has a vtable).
+static bool isDynamicClassType(QualType T) {
+ if (CXXRecordDecl *Record = T->getAsCXXRecordDecl())
+ if (CXXRecordDecl *Definition = Record->getDefinition())
+ if (Definition->isDynamicClass())
+ return true;
+
+ return false;
+}
+
+/// \brief If E is a sizeof expression, returns its argument expression,
+/// otherwise returns NULL.
+static const Expr *getSizeOfExprArg(const Expr* E) {
+ if (const UnaryExprOrTypeTraitExpr *SizeOf =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (SizeOf->getKind() == clang::UETT_SizeOf && !SizeOf->isArgumentType())
+ return SizeOf->getArgumentExpr()->IgnoreParenImpCasts();
+
+ return 0;
+}
+
+/// \brief If E is a sizeof expression, returns its argument type.
+static QualType getSizeOfArgType(const Expr* E) {
+ if (const UnaryExprOrTypeTraitExpr *SizeOf =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (SizeOf->getKind() == clang::UETT_SizeOf)
+ return SizeOf->getTypeOfArgument();
+
+ return QualType();
+}
+
+/// \brief Check for dangerous or invalid arguments to memset().
+///
+/// This issues warnings on known problematic, dangerous or unspecified
+/// arguments to the standard 'memset', 'memcpy', 'memmove', and 'memcmp'
+/// function calls.
+///
+/// \param Call The call expression to diagnose.
+void Sema::CheckMemaccessArguments(const CallExpr *Call,
+ unsigned BId,
+ IdentifierInfo *FnName) {
+ assert(BId != 0);
+
+ // It is possible to have a non-standard definition of memset. Validate
+ // we have enough arguments, and if not, abort further checking.
+ unsigned ExpectedNumArgs = (BId == Builtin::BIstrndup ? 2 : 3);
+ if (Call->getNumArgs() < ExpectedNumArgs)
+ return;
+
+ unsigned LastArg = (BId == Builtin::BImemset ||
+ BId == Builtin::BIstrndup ? 1 : 2);
+ unsigned LenArg = (BId == Builtin::BIstrndup ? 1 : 2);
+ const Expr *LenExpr = Call->getArg(LenArg)->IgnoreParenImpCasts();
+
+ // We have special checking when the length is a sizeof expression.
+ QualType SizeOfArgTy = getSizeOfArgType(LenExpr);
+ const Expr *SizeOfArg = getSizeOfExprArg(LenExpr);
+ llvm::FoldingSetNodeID SizeOfArgID;
+
+ for (unsigned ArgIdx = 0; ArgIdx != LastArg; ++ArgIdx) {
+ const Expr *Dest = Call->getArg(ArgIdx)->IgnoreParenImpCasts();
+ SourceRange ArgRange = Call->getArg(ArgIdx)->getSourceRange();
+
+ QualType DestTy = Dest->getType();
+ if (const PointerType *DestPtrTy = DestTy->getAs<PointerType>()) {
+ QualType PointeeTy = DestPtrTy->getPointeeType();
+
+ // Never warn about void type pointers. This can be used to suppress
+ // false positives.
+ if (PointeeTy->isVoidType())
+ continue;
+
+ // Catch "memset(p, 0, sizeof(p))" -- needs to be sizeof(*p). Do this by
+ // actually comparing the expressions for equality. Because computing the
+ // expression IDs can be expensive, we only do this if the diagnostic is
+ // enabled.
+ if (SizeOfArg &&
+ Diags.getDiagnosticLevel(diag::warn_sizeof_pointer_expr_memaccess,
+ SizeOfArg->getExprLoc())) {
+ // We only compute IDs for expressions if the warning is enabled, and
+ // cache the sizeof arg's ID.
+ if (SizeOfArgID == llvm::FoldingSetNodeID())
+ SizeOfArg->Profile(SizeOfArgID, Context, true);
+ llvm::FoldingSetNodeID DestID;
+ Dest->Profile(DestID, Context, true);
+ if (DestID == SizeOfArgID) {
+ // TODO: For strncpy() and friends, this could suggest sizeof(dst)
+ // over sizeof(src) as well.
+ unsigned ActionIdx = 0; // Default is to suggest dereferencing.
+ if (const UnaryOperator *UnaryOp = dyn_cast<UnaryOperator>(Dest))
+ if (UnaryOp->getOpcode() == UO_AddrOf)
+ ActionIdx = 1; // If its an address-of operator, just remove it.
+ if (Context.getTypeSize(PointeeTy) == Context.getCharWidth())
+ ActionIdx = 2; // If the pointee's size is sizeof(char),
+ // suggest an explicit length.
+ unsigned DestSrcSelect =
+ (BId == Builtin::BIstrndup ? 1 : ArgIdx);
+ DiagRuntimeBehavior(SizeOfArg->getExprLoc(), Dest,
+ PDiag(diag::warn_sizeof_pointer_expr_memaccess)
+ << FnName << DestSrcSelect << ActionIdx
+ << Dest->getSourceRange()
+ << SizeOfArg->getSourceRange());
+ break;
+ }
+ }
+
+ // Also check for cases where the sizeof argument is the exact same
+ // type as the memory argument, and where it points to a user-defined
+ // record type.
+ if (SizeOfArgTy != QualType()) {
+ if (PointeeTy->isRecordType() &&
+ Context.typesAreCompatible(SizeOfArgTy, DestTy)) {
+ DiagRuntimeBehavior(LenExpr->getExprLoc(), Dest,
+ PDiag(diag::warn_sizeof_pointer_type_memaccess)
+ << FnName << SizeOfArgTy << ArgIdx
+ << PointeeTy << Dest->getSourceRange()
+ << LenExpr->getSourceRange());
+ break;
+ }
+ }
+
+ // Always complain about dynamic classes.
+ if (isDynamicClassType(PointeeTy)) {
+
+ unsigned OperationType = 0;
+ // "overwritten" if we're warning about the destination for any call
+ // but memcmp; otherwise a verb appropriate to the call.
+ if (ArgIdx != 0 || BId == Builtin::BImemcmp) {
+ if (BId == Builtin::BImemcpy)
+ OperationType = 1;
+ else if(BId == Builtin::BImemmove)
+ OperationType = 2;
+ else if (BId == Builtin::BImemcmp)
+ OperationType = 3;
+ }
+
+ DiagRuntimeBehavior(
+ Dest->getExprLoc(), Dest,
+ PDiag(diag::warn_dyn_class_memaccess)
+ << (BId == Builtin::BImemcmp ? ArgIdx + 2 : ArgIdx)
+ << FnName << PointeeTy
+ << OperationType
+ << Call->getCallee()->getSourceRange());
+ } else if (PointeeTy.hasNonTrivialObjCLifetime() &&
+ BId != Builtin::BImemset)
+ DiagRuntimeBehavior(
+ Dest->getExprLoc(), Dest,
+ PDiag(diag::warn_arc_object_memaccess)
+ << ArgIdx << FnName << PointeeTy
+ << Call->getCallee()->getSourceRange());
+ else
+ continue;
+
+ DiagRuntimeBehavior(
+ Dest->getExprLoc(), Dest,
+ PDiag(diag::note_bad_memaccess_silence)
+ << FixItHint::CreateInsertion(ArgRange.getBegin(), "(void*)"));
+ break;
+ }
+ }
+}
+
+// A little helper routine: ignore addition and subtraction of integer literals.
+// This intentionally does not ignore all integer constant expressions because
+// we don't want to remove sizeof().
+static const Expr *ignoreLiteralAdditions(const Expr *Ex, ASTContext &Ctx) {
+ Ex = Ex->IgnoreParenCasts();
+
+ for (;;) {
+ const BinaryOperator * BO = dyn_cast<BinaryOperator>(Ex);
+ if (!BO || !BO->isAdditiveOp())
+ break;
+
+ const Expr *RHS = BO->getRHS()->IgnoreParenCasts();
+ const Expr *LHS = BO->getLHS()->IgnoreParenCasts();
+
+ if (isa<IntegerLiteral>(RHS))
+ Ex = LHS;
+ else if (isa<IntegerLiteral>(LHS))
+ Ex = RHS;
+ else
+ break;
+ }
+
+ return Ex;
+}
+
+// Warn if the user has made the 'size' argument to strlcpy or strlcat
+// be the size of the source, instead of the destination.
+void Sema::CheckStrlcpycatArguments(const CallExpr *Call,
+ IdentifierInfo *FnName) {
+
+ // Don't crash if the user has the wrong number of arguments
+ if (Call->getNumArgs() != 3)
+ return;
+
+ const Expr *SrcArg = ignoreLiteralAdditions(Call->getArg(1), Context);
+ const Expr *SizeArg = ignoreLiteralAdditions(Call->getArg(2), Context);
+ const Expr *CompareWithSrc = NULL;
+
+ // Look for 'strlcpy(dst, x, sizeof(x))'
+ if (const Expr *Ex = getSizeOfExprArg(SizeArg))
+ CompareWithSrc = Ex;
+ else {
+ // Look for 'strlcpy(dst, x, strlen(x))'
+ if (const CallExpr *SizeCall = dyn_cast<CallExpr>(SizeArg)) {
+ if (SizeCall->isBuiltinCall() == Builtin::BIstrlen
+ && SizeCall->getNumArgs() == 1)
+ CompareWithSrc = ignoreLiteralAdditions(SizeCall->getArg(0), Context);
+ }
+ }
+
+ if (!CompareWithSrc)
+ return;
+
+ // Determine if the argument to sizeof/strlen is equal to the source
+ // argument. In principle there's all kinds of things you could do
+ // here, for instance creating an == expression and evaluating it with
+ // EvaluateAsBooleanCondition, but this uses a more direct technique:
+ const DeclRefExpr *SrcArgDRE = dyn_cast<DeclRefExpr>(SrcArg);
+ if (!SrcArgDRE)
+ return;
+
+ const DeclRefExpr *CompareWithSrcDRE = dyn_cast<DeclRefExpr>(CompareWithSrc);
+ if (!CompareWithSrcDRE ||
+ SrcArgDRE->getDecl() != CompareWithSrcDRE->getDecl())
+ return;
+
+ const Expr *OriginalSizeArg = Call->getArg(2);
+ Diag(CompareWithSrcDRE->getLocStart(), diag::warn_strlcpycat_wrong_size)
+ << OriginalSizeArg->getSourceRange() << FnName;
+
+ // Output a FIXIT hint if the destination is an array (rather than a
+ // pointer to an array). This could be enhanced to handle some
+ // pointers if we know the actual size, like if DstArg is 'array+2'
+ // we could say 'sizeof(array)-2'.
+ const Expr *DstArg = Call->getArg(0)->IgnoreParenImpCasts();
+ QualType DstArgTy = DstArg->getType();
+
+ // Only handle constant-sized or VLAs, but not flexible members.
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(DstArgTy)) {
+ // Only issue the FIXIT for arrays of size > 1.
+ if (CAT->getSize().getSExtValue() <= 1)
+ return;
+ } else if (!DstArgTy->isVariableArrayType()) {
+ return;
+ }
+
+ SmallString<128> sizeString;
+ llvm::raw_svector_ostream OS(sizeString);
+ OS << "sizeof(";
+ DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
+ OS << ")";
+
+ Diag(OriginalSizeArg->getLocStart(), diag::note_strlcpycat_wrong_size)
+ << FixItHint::CreateReplacement(OriginalSizeArg->getSourceRange(),
+ OS.str());
+}
+
+/// Check if two expressions refer to the same declaration.
+static bool referToTheSameDecl(const Expr *E1, const Expr *E2) {
+ if (const DeclRefExpr *D1 = dyn_cast_or_null<DeclRefExpr>(E1))
+ if (const DeclRefExpr *D2 = dyn_cast_or_null<DeclRefExpr>(E2))
+ return D1->getDecl() == D2->getDecl();
+ return false;
+}
+
+static const Expr *getStrlenExprArg(const Expr *E) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD || FD->getMemoryFunctionKind() != Builtin::BIstrlen)
+ return 0;
+ return CE->getArg(0)->IgnoreParenCasts();
+ }
+ return 0;
+}
+
+// Warn on anti-patterns as the 'size' argument to strncat.
+// The correct size argument should look like following:
+// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
+void Sema::CheckStrncatArguments(const CallExpr *CE,
+ IdentifierInfo *FnName) {
+ // Don't crash if the user has the wrong number of arguments.
+ if (CE->getNumArgs() < 3)
+ return;
+ const Expr *DstArg = CE->getArg(0)->IgnoreParenCasts();
+ const Expr *SrcArg = CE->getArg(1)->IgnoreParenCasts();
+ const Expr *LenArg = CE->getArg(2)->IgnoreParenCasts();
+
+ // Identify common expressions, which are wrongly used as the size argument
+ // to strncat and may lead to buffer overflows.
+ unsigned PatternType = 0;
+ if (const Expr *SizeOfArg = getSizeOfExprArg(LenArg)) {
+ // - sizeof(dst)
+ if (referToTheSameDecl(SizeOfArg, DstArg))
+ PatternType = 1;
+ // - sizeof(src)
+ else if (referToTheSameDecl(SizeOfArg, SrcArg))
+ PatternType = 2;
+ } else if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(LenArg)) {
+ if (BE->getOpcode() == BO_Sub) {
+ const Expr *L = BE->getLHS()->IgnoreParenCasts();
+ const Expr *R = BE->getRHS()->IgnoreParenCasts();
+ // - sizeof(dst) - strlen(dst)
+ if (referToTheSameDecl(DstArg, getSizeOfExprArg(L)) &&
+ referToTheSameDecl(DstArg, getStrlenExprArg(R)))
+ PatternType = 1;
+ // - sizeof(src) - (anything)
+ else if (referToTheSameDecl(SrcArg, getSizeOfExprArg(L)))
+ PatternType = 2;
+ }
+ }
+
+ if (PatternType == 0)
+ return;
+
+ // Generate the diagnostic.
+ SourceLocation SL = LenArg->getLocStart();
+ SourceRange SR = LenArg->getSourceRange();
+ SourceManager &SM = PP.getSourceManager();
+
+ // If the function is defined as a builtin macro, do not show macro expansion.
+ if (SM.isMacroArgExpansion(SL)) {
+ SL = SM.getSpellingLoc(SL);
+ SR = SourceRange(SM.getSpellingLoc(SR.getBegin()),
+ SM.getSpellingLoc(SR.getEnd()));
+ }
+
+ if (PatternType == 1)
+ Diag(SL, diag::warn_strncat_large_size) << SR;
+ else
+ Diag(SL, diag::warn_strncat_src_size) << SR;
+
+ // Output a FIXIT hint if the destination is an array (rather than a
+ // pointer to an array). This could be enhanced to handle some
+ // pointers if we know the actual size, like if DstArg is 'array+2'
+ // we could say 'sizeof(array)-2'.
+ QualType DstArgTy = DstArg->getType();
+
+ // Only handle constant-sized or VLAs, but not flexible members.
+ if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(DstArgTy)) {
+ // Only issue the FIXIT for arrays of size > 1.
+ if (CAT->getSize().getSExtValue() <= 1)
+ return;
+ } else if (!DstArgTy->isVariableArrayType()) {
+ return;
+ }
+
+ SmallString<128> sizeString;
+ llvm::raw_svector_ostream OS(sizeString);
+ OS << "sizeof(";
+ DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
+ OS << ") - ";
+ OS << "strlen(";
+ DstArg->printPretty(OS, Context, 0, getPrintingPolicy());
+ OS << ") - 1";
+
+ Diag(SL, diag::note_strncat_wrong_size)
+ << FixItHint::CreateReplacement(SR, OS.str());
+}
+
+//===--- CHECK: Return Address of Stack Variable --------------------------===//
+
+static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars);
+static Expr *EvalAddr(Expr* E, SmallVectorImpl<DeclRefExpr *> &refVars);
+
+/// CheckReturnStackAddr - Check if a return statement returns the address
+/// of a stack variable.
+void
+Sema::CheckReturnStackAddr(Expr *RetValExp, QualType lhsType,
+ SourceLocation ReturnLoc) {
+
+ Expr *stackE = 0;
+ SmallVector<DeclRefExpr *, 8> refVars;
+
+ // Perform checking for returned stack addresses, local blocks,
+ // label addresses or references to temporaries.
+ if (lhsType->isPointerType() ||
+ (!getLangOpts().ObjCAutoRefCount && lhsType->isBlockPointerType())) {
+ stackE = EvalAddr(RetValExp, refVars);
+ } else if (lhsType->isReferenceType()) {
+ stackE = EvalVal(RetValExp, refVars);
+ }
+
+ if (stackE == 0)
+ return; // Nothing suspicious was found.
+
+ SourceLocation diagLoc;
+ SourceRange diagRange;
+ if (refVars.empty()) {
+ diagLoc = stackE->getLocStart();
+ diagRange = stackE->getSourceRange();
+ } else {
+ // We followed through a reference variable. 'stackE' contains the
+ // problematic expression but we will warn at the return statement pointing
+ // at the reference variable. We will later display the "trail" of
+ // reference variables using notes.
+ diagLoc = refVars[0]->getLocStart();
+ diagRange = refVars[0]->getSourceRange();
+ }
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(stackE)) { //address of local var.
+ Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_stack_ref
+ : diag::warn_ret_stack_addr)
+ << DR->getDecl()->getDeclName() << diagRange;
+ } else if (isa<BlockExpr>(stackE)) { // local block.
+ Diag(diagLoc, diag::err_ret_local_block) << diagRange;
+ } else if (isa<AddrLabelExpr>(stackE)) { // address of label.
+ Diag(diagLoc, diag::warn_ret_addr_label) << diagRange;
+ } else { // local temporary.
+ Diag(diagLoc, lhsType->isReferenceType() ? diag::warn_ret_local_temp_ref
+ : diag::warn_ret_local_temp_addr)
+ << diagRange;
+ }
+
+ // Display the "trail" of reference variables that we followed until we
+ // found the problematic expression using notes.
+ for (unsigned i = 0, e = refVars.size(); i != e; ++i) {
+ VarDecl *VD = cast<VarDecl>(refVars[i]->getDecl());
+ // If this var binds to another reference var, show the range of the next
+ // var, otherwise the var binds to the problematic expression, in which case
+ // show the range of the expression.
+ SourceRange range = (i < e-1) ? refVars[i+1]->getSourceRange()
+ : stackE->getSourceRange();
+ Diag(VD->getLocation(), diag::note_ref_var_local_bind)
+ << VD->getDeclName() << range;
+ }
+}
+
+/// EvalAddr - EvalAddr and EvalVal are mutually recursive functions that
+/// check if the expression in a return statement evaluates to an address
+/// to a location on the stack, a local block, an address of a label, or a
+/// reference to local temporary. The recursion is used to traverse the
+/// AST of the return expression, with recursion backtracking when we
+/// encounter a subexpression that (1) clearly does not lead to one of the
+/// above problematic expressions (2) is something we cannot determine leads to
+/// a problematic expression based on such local checking.
+///
+/// Both EvalAddr and EvalVal follow through reference variables to evaluate
+/// the expression that they point to. Such variables are added to the
+/// 'refVars' vector so that we know what the reference variable "trail" was.
+///
+/// EvalAddr processes expressions that are pointers that are used as
+/// references (and not L-values). EvalVal handles all other values.
+/// At the base case of the recursion is a check for the above problematic
+/// expressions.
+///
+/// This implementation handles:
+///
+/// * pointer-to-pointer casts
+/// * implicit conversions from array references to pointers
+/// * taking the address of fields
+/// * arbitrary interplay between "&" and "*" operators
+/// * pointer arithmetic from an address of a stack variable
+/// * taking the address of an array element where the array is on the stack
+static Expr *EvalAddr(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
+ if (E->isTypeDependent())
+ return NULL;
+
+ // We should only be called for evaluating pointer expressions.
+ assert((E->getType()->isAnyPointerType() ||
+ E->getType()->isBlockPointerType() ||
+ E->getType()->isObjCQualifiedIdType()) &&
+ "EvalAddr only works on pointers");
+
+ E = E->IgnoreParens();
+
+ // Our "symbolic interpreter" is just a dispatch off the currently
+ // viewed AST node. We then recursively traverse the AST by calling
+ // EvalAddr and EvalVal appropriately.
+ switch (E->getStmtClass()) {
+ case Stmt::DeclRefExprClass: {
+ DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
+ // If this is a reference variable, follow through to the expression that
+ // it points to.
+ if (V->hasLocalStorage() &&
+ V->getType()->isReferenceType() && V->hasInit()) {
+ // Add the reference variable to the "trail".
+ refVars.push_back(DR);
+ return EvalAddr(V->getInit(), refVars);
+ }
+
+ return NULL;
+ }
+
+ case Stmt::UnaryOperatorClass: {
+ // The only unary operator that make sense to handle here
+ // is AddrOf. All others don't make sense as pointers.
+ UnaryOperator *U = cast<UnaryOperator>(E);
+
+ if (U->getOpcode() == UO_AddrOf)
+ return EvalVal(U->getSubExpr(), refVars);
+ else
+ return NULL;
+ }
+
+ case Stmt::BinaryOperatorClass: {
+ // Handle pointer arithmetic. All other binary operators are not valid
+ // in this context.
+ BinaryOperator *B = cast<BinaryOperator>(E);
+ BinaryOperatorKind op = B->getOpcode();
+
+ if (op != BO_Add && op != BO_Sub)
+ return NULL;
+
+ Expr *Base = B->getLHS();
+
+ // Determine which argument is the real pointer base. It could be
+ // the RHS argument instead of the LHS.
+ if (!Base->getType()->isPointerType()) Base = B->getRHS();
+
+ assert (Base->getType()->isPointerType());
+ return EvalAddr(Base, refVars);
+ }
+
+ // For conditional operators we need to see if either the LHS or RHS are
+ // valid DeclRefExpr*s. If one of them is valid, we return it.
+ case Stmt::ConditionalOperatorClass: {
+ ConditionalOperator *C = cast<ConditionalOperator>(E);
+
+ // Handle the GNU extension for missing LHS.
+ if (Expr *lhsExpr = C->getLHS()) {
+ // In C++, we can have a throw-expression, which has 'void' type.
+ if (!lhsExpr->getType()->isVoidType())
+ if (Expr* LHS = EvalAddr(lhsExpr, refVars))
+ return LHS;
+ }
+
+ // In C++, we can have a throw-expression, which has 'void' type.
+ if (C->getRHS()->getType()->isVoidType())
+ return NULL;
+
+ return EvalAddr(C->getRHS(), refVars);
+ }
+
+ case Stmt::BlockExprClass:
+ if (cast<BlockExpr>(E)->getBlockDecl()->hasCaptures())
+ return E; // local block.
+ return NULL;
+
+ case Stmt::AddrLabelExprClass:
+ return E; // address of label.
+
+ case Stmt::ExprWithCleanupsClass:
+ return EvalAddr(cast<ExprWithCleanups>(E)->getSubExpr(), refVars);
+
+ // For casts, we need to handle conversions from arrays to
+ // pointer values, and pointer-to-pointer conversions.
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::ObjCBridgedCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::CXXDynamicCastExprClass:
+ case Stmt::CXXConstCastExprClass:
+ case Stmt::CXXReinterpretCastExprClass: {
+ Expr* SubExpr = cast<CastExpr>(E)->getSubExpr();
+ switch (cast<CastExpr>(E)->getCastKind()) {
+ case CK_BitCast:
+ case CK_LValueToRValue:
+ case CK_NoOp:
+ case CK_BaseToDerived:
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase:
+ case CK_Dynamic:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ return EvalAddr(SubExpr, refVars);
+
+ case CK_ArrayToPointerDecay:
+ return EvalVal(SubExpr, refVars);
+
+ default:
+ return 0;
+ }
+ }
+
+ case Stmt::MaterializeTemporaryExprClass:
+ if (Expr *Result = EvalAddr(
+ cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
+ refVars))
+ return Result;
+
+ return E;
+
+ // Everything else: we simply don't reason about them.
+ default:
+ return NULL;
+ }
+}
+
+
+/// EvalVal - This function is complements EvalAddr in the mutual recursion.
+/// See the comments for EvalAddr for more details.
+static Expr *EvalVal(Expr *E, SmallVectorImpl<DeclRefExpr *> &refVars) {
+do {
+ // We should only be called for evaluating non-pointer expressions, or
+ // expressions with a pointer type that are not used as references but instead
+ // are l-values (e.g., DeclRefExpr with a pointer type).
+
+ // Our "symbolic interpreter" is just a dispatch off the currently
+ // viewed AST node. We then recursively traverse the AST by calling
+ // EvalAddr and EvalVal appropriately.
+
+ E = E->IgnoreParens();
+ switch (E->getStmtClass()) {
+ case Stmt::ImplicitCastExprClass: {
+ ImplicitCastExpr *IE = cast<ImplicitCastExpr>(E);
+ if (IE->getValueKind() == VK_LValue) {
+ E = IE->getSubExpr();
+ continue;
+ }
+ return NULL;
+ }
+
+ case Stmt::ExprWithCleanupsClass:
+ return EvalVal(cast<ExprWithCleanups>(E)->getSubExpr(), refVars);
+
+ case Stmt::DeclRefExprClass: {
+ // When we hit a DeclRefExpr we are looking at code that refers to a
+ // variable's name. If it's not a reference variable we check if it has
+ // local storage within the function, and if so, return the expression.
+ DeclRefExpr *DR = cast<DeclRefExpr>(E);
+
+ if (VarDecl *V = dyn_cast<VarDecl>(DR->getDecl()))
+ if (V->hasLocalStorage()) {
+ if (!V->getType()->isReferenceType())
+ return DR;
+
+ // Reference variable, follow through to the expression that
+ // it points to.
+ if (V->hasInit()) {
+ // Add the reference variable to the "trail".
+ refVars.push_back(DR);
+ return EvalVal(V->getInit(), refVars);
+ }
+ }
+
+ return NULL;
+ }
+
+ case Stmt::UnaryOperatorClass: {
+ // The only unary operator that make sense to handle here
+ // is Deref. All others don't resolve to a "name." This includes
+ // handling all sorts of rvalues passed to a unary operator.
+ UnaryOperator *U = cast<UnaryOperator>(E);
+
+ if (U->getOpcode() == UO_Deref)
+ return EvalAddr(U->getSubExpr(), refVars);
+
+ return NULL;
+ }
+
+ case Stmt::ArraySubscriptExprClass: {
+ // Array subscripts are potential references to data on the stack. We
+ // retrieve the DeclRefExpr* for the array variable if it indeed
+ // has local storage.
+ return EvalAddr(cast<ArraySubscriptExpr>(E)->getBase(), refVars);
+ }
+
+ case Stmt::ConditionalOperatorClass: {
+ // For conditional operators we need to see if either the LHS or RHS are
+ // non-NULL Expr's. If one is non-NULL, we return it.
+ ConditionalOperator *C = cast<ConditionalOperator>(E);
+
+ // Handle the GNU extension for missing LHS.
+ if (Expr *lhsExpr = C->getLHS())
+ if (Expr *LHS = EvalVal(lhsExpr, refVars))
+ return LHS;
+
+ return EvalVal(C->getRHS(), refVars);
+ }
+
+ // Accesses to members are potential references to data on the stack.
+ case Stmt::MemberExprClass: {
+ MemberExpr *M = cast<MemberExpr>(E);
+
+ // Check for indirect access. We only want direct field accesses.
+ if (M->isArrow())
+ return NULL;
+
+ // Check whether the member type is itself a reference, in which case
+ // we're not going to refer to the member, but to what the member refers to.
+ if (M->getMemberDecl()->getType()->isReferenceType())
+ return NULL;
+
+ return EvalVal(M->getBase(), refVars);
+ }
+
+ case Stmt::MaterializeTemporaryExprClass:
+ if (Expr *Result = EvalVal(
+ cast<MaterializeTemporaryExpr>(E)->GetTemporaryExpr(),
+ refVars))
+ return Result;
+
+ return E;
+
+ default:
+ // Check that we don't return or take the address of a reference to a
+ // temporary. This is only useful in C++.
+ if (!E->isTypeDependent() && E->isRValue())
+ return E;
+
+ // Everything else: we simply don't reason about them.
+ return NULL;
+ }
+} while (true);
+}
+
+//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
+
+/// Check for comparisons of floating point operands using != and ==.
+/// Issue a warning if these are no self-comparisons, as they are not likely
+/// to do what the programmer intended.
+void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
+ bool EmitWarning = true;
+
+ Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
+ Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
+
+ // Special case: check for x == x (which is OK).
+ // Do not emit warnings for such cases.
+ if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LeftExprSansParen))
+ if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RightExprSansParen))
+ if (DRL->getDecl() == DRR->getDecl())
+ EmitWarning = false;
+
+
+ // Special case: check for comparisons against literals that can be exactly
+ // represented by APFloat. In such cases, do not emit a warning. This
+ // is a heuristic: often comparison against such literals are used to
+ // detect if a value in a variable has not changed. This clearly can
+ // lead to false negatives.
+ if (EmitWarning) {
+ if (FloatingLiteral* FLL = dyn_cast<FloatingLiteral>(LeftExprSansParen)) {
+ if (FLL->isExact())
+ EmitWarning = false;
+ } else
+ if (FloatingLiteral* FLR = dyn_cast<FloatingLiteral>(RightExprSansParen)){
+ if (FLR->isExact())
+ EmitWarning = false;
+ }
+ }
+
+ // Check for comparisons with builtin types.
+ if (EmitWarning)
+ if (CallExpr* CL = dyn_cast<CallExpr>(LeftExprSansParen))
+ if (CL->isBuiltinCall())
+ EmitWarning = false;
+
+ if (EmitWarning)
+ if (CallExpr* CR = dyn_cast<CallExpr>(RightExprSansParen))
+ if (CR->isBuiltinCall())
+ EmitWarning = false;
+
+ // Emit the diagnostic.
+ if (EmitWarning)
+ Diag(Loc, diag::warn_floatingpoint_eq)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+}
+
+//===--- CHECK: Integer mixed-sign comparisons (-Wsign-compare) --------===//
+//===--- CHECK: Lossy implicit conversions (-Wconversion) --------------===//
+
+namespace {
+
+/// Structure recording the 'active' range of an integer-valued
+/// expression.
+struct IntRange {
+ /// The number of bits active in the int.
+ unsigned Width;
+
+ /// True if the int is known not to have negative values.
+ bool NonNegative;
+
+ IntRange(unsigned Width, bool NonNegative)
+ : Width(Width), NonNegative(NonNegative)
+ {}
+
+ /// Returns the range of the bool type.
+ static IntRange forBoolType() {
+ return IntRange(1, true);
+ }
+
+ /// Returns the range of an opaque value of the given integral type.
+ static IntRange forValueOfType(ASTContext &C, QualType T) {
+ return forValueOfCanonicalType(C,
+ T->getCanonicalTypeInternal().getTypePtr());
+ }
+
+ /// Returns the range of an opaque value of a canonical integral type.
+ static IntRange forValueOfCanonicalType(ASTContext &C, const Type *T) {
+ assert(T->isCanonicalUnqualified());
+
+ if (const VectorType *VT = dyn_cast<VectorType>(T))
+ T = VT->getElementType().getTypePtr();
+ if (const ComplexType *CT = dyn_cast<ComplexType>(T))
+ T = CT->getElementType().getTypePtr();
+
+ // For enum types, use the known bit width of the enumerators.
+ if (const EnumType *ET = dyn_cast<EnumType>(T)) {
+ EnumDecl *Enum = ET->getDecl();
+ if (!Enum->isCompleteDefinition())
+ return IntRange(C.getIntWidth(QualType(T, 0)), false);
+
+ unsigned NumPositive = Enum->getNumPositiveBits();
+ unsigned NumNegative = Enum->getNumNegativeBits();
+
+ return IntRange(std::max(NumPositive, NumNegative), NumNegative == 0);
+ }
+
+ const BuiltinType *BT = cast<BuiltinType>(T);
+ assert(BT->isInteger());
+
+ return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
+ }
+
+ /// Returns the "target" range of a canonical integral type, i.e.
+ /// the range of values expressible in the type.
+ ///
+ /// This matches forValueOfCanonicalType except that enums have the
+ /// full range of their type, not the range of their enumerators.
+ static IntRange forTargetOfCanonicalType(ASTContext &C, const Type *T) {
+ assert(T->isCanonicalUnqualified());
+
+ if (const VectorType *VT = dyn_cast<VectorType>(T))
+ T = VT->getElementType().getTypePtr();
+ if (const ComplexType *CT = dyn_cast<ComplexType>(T))
+ T = CT->getElementType().getTypePtr();
+ if (const EnumType *ET = dyn_cast<EnumType>(T))
+ T = C.getCanonicalType(ET->getDecl()->getIntegerType()).getTypePtr();
+
+ const BuiltinType *BT = cast<BuiltinType>(T);
+ assert(BT->isInteger());
+
+ return IntRange(C.getIntWidth(QualType(T, 0)), BT->isUnsignedInteger());
+ }
+
+ /// Returns the supremum of two ranges: i.e. their conservative merge.
+ static IntRange join(IntRange L, IntRange R) {
+ return IntRange(std::max(L.Width, R.Width),
+ L.NonNegative && R.NonNegative);
+ }
+
+ /// Returns the infinum of two ranges: i.e. their aggressive merge.
+ static IntRange meet(IntRange L, IntRange R) {
+ return IntRange(std::min(L.Width, R.Width),
+ L.NonNegative || R.NonNegative);
+ }
+};
+
+static IntRange GetValueRange(ASTContext &C, llvm::APSInt &value,
+ unsigned MaxWidth) {
+ if (value.isSigned() && value.isNegative())
+ return IntRange(value.getMinSignedBits(), false);
+
+ if (value.getBitWidth() > MaxWidth)
+ value = value.trunc(MaxWidth);
+
+ // isNonNegative() just checks the sign bit without considering
+ // signedness.
+ return IntRange(value.getActiveBits(), true);
+}
+
+static IntRange GetValueRange(ASTContext &C, APValue &result, QualType Ty,
+ unsigned MaxWidth) {
+ if (result.isInt())
+ return GetValueRange(C, result.getInt(), MaxWidth);
+
+ if (result.isVector()) {
+ IntRange R = GetValueRange(C, result.getVectorElt(0), Ty, MaxWidth);
+ for (unsigned i = 1, e = result.getVectorLength(); i != e; ++i) {
+ IntRange El = GetValueRange(C, result.getVectorElt(i), Ty, MaxWidth);
+ R = IntRange::join(R, El);
+ }
+ return R;
+ }
+
+ if (result.isComplexInt()) {
+ IntRange R = GetValueRange(C, result.getComplexIntReal(), MaxWidth);
+ IntRange I = GetValueRange(C, result.getComplexIntImag(), MaxWidth);
+ return IntRange::join(R, I);
+ }
+
+ // This can happen with lossless casts to intptr_t of "based" lvalues.
+ // Assume it might use arbitrary bits.
+ // FIXME: The only reason we need to pass the type in here is to get
+ // the sign right on this one case. It would be nice if APValue
+ // preserved this.
+ assert(result.isLValue() || result.isAddrLabelDiff());
+ return IntRange(MaxWidth, Ty->isUnsignedIntegerOrEnumerationType());
+}
+
+/// Pseudo-evaluate the given integer expression, estimating the
+/// range of values it might take.
+///
+/// \param MaxWidth - the width to which the value will be truncated
+static IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
+ E = E->IgnoreParens();
+
+ // Try a full evaluation first.
+ Expr::EvalResult result;
+ if (E->EvaluateAsRValue(result, C))
+ return GetValueRange(C, result.Val, E->getType(), MaxWidth);
+
+ // I think we only want to look through implicit casts here; if the
+ // user has an explicit widening cast, we should treat the value as
+ // being of the new, wider type.
+ if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (CE->getCastKind() == CK_NoOp || CE->getCastKind() == CK_LValueToRValue)
+ return GetExprRange(C, CE->getSubExpr(), MaxWidth);
+
+ IntRange OutputTypeRange = IntRange::forValueOfType(C, CE->getType());
+
+ bool isIntegerCast = (CE->getCastKind() == CK_IntegralCast);
+
+ // Assume that non-integer casts can span the full range of the type.
+ if (!isIntegerCast)
+ return OutputTypeRange;
+
+ IntRange SubRange
+ = GetExprRange(C, CE->getSubExpr(),
+ std::min(MaxWidth, OutputTypeRange.Width));
+
+ // Bail out if the subexpr's range is as wide as the cast type.
+ if (SubRange.Width >= OutputTypeRange.Width)
+ return OutputTypeRange;
+
+ // Otherwise, we take the smaller width, and we're non-negative if
+ // either the output type or the subexpr is.
+ return IntRange(SubRange.Width,
+ SubRange.NonNegative || OutputTypeRange.NonNegative);
+ }
+
+ if (ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ // If we can fold the condition, just take that operand.
+ bool CondResult;
+ if (CO->getCond()->EvaluateAsBooleanCondition(CondResult, C))
+ return GetExprRange(C, CondResult ? CO->getTrueExpr()
+ : CO->getFalseExpr(),
+ MaxWidth);
+
+ // Otherwise, conservatively merge.
+ IntRange L = GetExprRange(C, CO->getTrueExpr(), MaxWidth);
+ IntRange R = GetExprRange(C, CO->getFalseExpr(), MaxWidth);
+ return IntRange::join(L, R);
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ switch (BO->getOpcode()) {
+
+ // Boolean-valued operations are single-bit and positive.
+ case BO_LAnd:
+ case BO_LOr:
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ return IntRange::forBoolType();
+
+ // The type of the assignments is the type of the LHS, so the RHS
+ // is not necessarily the same type.
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ // TODO: bitfields?
+ return IntRange::forValueOfType(C, E->getType());
+
+ // Simple assignments just pass through the RHS, which will have
+ // been coerced to the LHS type.
+ case BO_Assign:
+ // TODO: bitfields?
+ return GetExprRange(C, BO->getRHS(), MaxWidth);
+
+ // Operations with opaque sources are black-listed.
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ return IntRange::forValueOfType(C, E->getType());
+
+ // Bitwise-and uses the *infinum* of the two source ranges.
+ case BO_And:
+ case BO_AndAssign:
+ return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth),
+ GetExprRange(C, BO->getRHS(), MaxWidth));
+
+ // Left shift gets black-listed based on a judgement call.
+ case BO_Shl:
+ // ...except that we want to treat '1 << (blah)' as logically
+ // positive. It's an important idiom.
+ if (IntegerLiteral *I
+ = dyn_cast<IntegerLiteral>(BO->getLHS()->IgnoreParenCasts())) {
+ if (I->getValue() == 1) {
+ IntRange R = IntRange::forValueOfType(C, E->getType());
+ return IntRange(R.Width, /*NonNegative*/ true);
+ }
+ }
+ // fallthrough
+
+ case BO_ShlAssign:
+ return IntRange::forValueOfType(C, E->getType());
+
+ // Right shift by a constant can narrow its left argument.
+ case BO_Shr:
+ case BO_ShrAssign: {
+ IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
+
+ // If the shift amount is a positive constant, drop the width by
+ // that much.
+ llvm::APSInt shift;
+ if (BO->getRHS()->isIntegerConstantExpr(shift, C) &&
+ shift.isNonNegative()) {
+ unsigned zext = shift.getZExtValue();
+ if (zext >= L.Width)
+ L.Width = (L.NonNegative ? 0 : 1);
+ else
+ L.Width -= zext;
+ }
+
+ return L;
+ }
+
+ // Comma acts as its right operand.
+ case BO_Comma:
+ return GetExprRange(C, BO->getRHS(), MaxWidth);
+
+ // Black-list pointer subtractions.
+ case BO_Sub:
+ if (BO->getLHS()->getType()->isPointerType())
+ return IntRange::forValueOfType(C, E->getType());
+ break;
+
+ // The width of a division result is mostly determined by the size
+ // of the LHS.
+ case BO_Div: {
+ // Don't 'pre-truncate' the operands.
+ unsigned opWidth = C.getIntWidth(E->getType());
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
+
+ // If the divisor is constant, use that.
+ llvm::APSInt divisor;
+ if (BO->getRHS()->isIntegerConstantExpr(divisor, C)) {
+ unsigned log2 = divisor.logBase2(); // floor(log_2(divisor))
+ if (log2 >= L.Width)
+ L.Width = (L.NonNegative ? 0 : 1);
+ else
+ L.Width = std::min(L.Width - log2, MaxWidth);
+ return L;
+ }
+
+ // Otherwise, just use the LHS's width.
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
+ return IntRange(L.Width, L.NonNegative && R.NonNegative);
+ }
+
+ // The result of a remainder can't be larger than the result of
+ // either side.
+ case BO_Rem: {
+ // Don't 'pre-truncate' the operands.
+ unsigned opWidth = C.getIntWidth(E->getType());
+ IntRange L = GetExprRange(C, BO->getLHS(), opWidth);
+ IntRange R = GetExprRange(C, BO->getRHS(), opWidth);
+
+ IntRange meet = IntRange::meet(L, R);
+ meet.Width = std::min(meet.Width, MaxWidth);
+ return meet;
+ }
+
+ // The default behavior is okay for these.
+ case BO_Mul:
+ case BO_Add:
+ case BO_Xor:
+ case BO_Or:
+ break;
+ }
+
+ // The default case is to treat the operation as if it were closed
+ // on the narrowest type that encompasses both operands.
+ IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
+ IntRange R = GetExprRange(C, BO->getRHS(), MaxWidth);
+ return IntRange::join(L, R);
+ }
+
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ switch (UO->getOpcode()) {
+ // Boolean-valued operations are white-listed.
+ case UO_LNot:
+ return IntRange::forBoolType();
+
+ // Operations with opaque sources are black-listed.
+ case UO_Deref:
+ case UO_AddrOf: // should be impossible
+ return IntRange::forValueOfType(C, E->getType());
+
+ default:
+ return GetExprRange(C, UO->getSubExpr(), MaxWidth);
+ }
+ }
+
+ if (dyn_cast<OffsetOfExpr>(E)) {
+ IntRange::forValueOfType(C, E->getType());
+ }
+
+ if (FieldDecl *BitField = E->getBitField())
+ return IntRange(BitField->getBitWidthValue(C),
+ BitField->getType()->isUnsignedIntegerOrEnumerationType());
+
+ return IntRange::forValueOfType(C, E->getType());
+}
+
+static IntRange GetExprRange(ASTContext &C, Expr *E) {
+ return GetExprRange(C, E, C.getIntWidth(E->getType()));
+}
+
+/// Checks whether the given value, which currently has the given
+/// source semantics, has the same value when coerced through the
+/// target semantics.
+static bool IsSameFloatAfterCast(const llvm::APFloat &value,
+ const llvm::fltSemantics &Src,
+ const llvm::fltSemantics &Tgt) {
+ llvm::APFloat truncated = value;
+
+ bool ignored;
+ truncated.convert(Src, llvm::APFloat::rmNearestTiesToEven, &ignored);
+ truncated.convert(Tgt, llvm::APFloat::rmNearestTiesToEven, &ignored);
+
+ return truncated.bitwiseIsEqual(value);
+}
+
+/// Checks whether the given value, which currently has the given
+/// source semantics, has the same value when coerced through the
+/// target semantics.
+///
+/// The value might be a vector of floats (or a complex number).
+static bool IsSameFloatAfterCast(const APValue &value,
+ const llvm::fltSemantics &Src,
+ const llvm::fltSemantics &Tgt) {
+ if (value.isFloat())
+ return IsSameFloatAfterCast(value.getFloat(), Src, Tgt);
+
+ if (value.isVector()) {
+ for (unsigned i = 0, e = value.getVectorLength(); i != e; ++i)
+ if (!IsSameFloatAfterCast(value.getVectorElt(i), Src, Tgt))
+ return false;
+ return true;
+ }
+
+ assert(value.isComplexFloat());
+ return (IsSameFloatAfterCast(value.getComplexFloatReal(), Src, Tgt) &&
+ IsSameFloatAfterCast(value.getComplexFloatImag(), Src, Tgt));
+}
+
+static void AnalyzeImplicitConversions(Sema &S, Expr *E, SourceLocation CC);
+
+static bool IsZero(Sema &S, Expr *E) {
+ // Suppress cases where we are comparing against an enum constant.
+ if (const DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts()))
+ if (isa<EnumConstantDecl>(DR->getDecl()))
+ return false;
+
+ // Suppress cases where the '0' value is expanded from a macro.
+ if (E->getLocStart().isMacroID())
+ return false;
+
+ llvm::APSInt Value;
+ return E->isIntegerConstantExpr(Value, S.Context) && Value == 0;
+}
+
+static bool HasEnumType(Expr *E) {
+ // Strip off implicit integral promotions.
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() != CK_IntegralCast &&
+ ICE->getCastKind() != CK_NoOp)
+ break;
+ E = ICE->getSubExpr();
+ }
+
+ return E->getType()->isEnumeralType();
+}
+
+static void CheckTrivialUnsignedComparison(Sema &S, BinaryOperator *E) {
+ BinaryOperatorKind op = E->getOpcode();
+ if (E->isValueDependent())
+ return;
+
+ if (op == BO_LT && IsZero(S, E->getRHS())) {
+ S.Diag(E->getOperatorLoc(), diag::warn_lunsigned_always_true_comparison)
+ << "< 0" << "false" << HasEnumType(E->getLHS())
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ } else if (op == BO_GE && IsZero(S, E->getRHS())) {
+ S.Diag(E->getOperatorLoc(), diag::warn_lunsigned_always_true_comparison)
+ << ">= 0" << "true" << HasEnumType(E->getLHS())
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ } else if (op == BO_GT && IsZero(S, E->getLHS())) {
+ S.Diag(E->getOperatorLoc(), diag::warn_runsigned_always_true_comparison)
+ << "0 >" << "false" << HasEnumType(E->getRHS())
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ } else if (op == BO_LE && IsZero(S, E->getLHS())) {
+ S.Diag(E->getOperatorLoc(), diag::warn_runsigned_always_true_comparison)
+ << "0 <=" << "true" << HasEnumType(E->getRHS())
+ << E->getLHS()->getSourceRange() << E->getRHS()->getSourceRange();
+ }
+}
+
+/// Analyze the operands of the given comparison. Implements the
+/// fallback case from AnalyzeComparison.
+static void AnalyzeImpConvsInComparison(Sema &S, BinaryOperator *E) {
+ AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
+ AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
+}
+
+/// \brief Implements -Wsign-compare.
+///
+/// \param E the binary operator to check for warnings
+static void AnalyzeComparison(Sema &S, BinaryOperator *E) {
+ // The type the comparison is being performed in.
+ QualType T = E->getLHS()->getType();
+ assert(S.Context.hasSameUnqualifiedType(T, E->getRHS()->getType())
+ && "comparison with mismatched types");
+
+ // We don't do anything special if this isn't an unsigned integral
+ // comparison: we're only interested in integral comparisons, and
+ // signed comparisons only happen in cases we don't care to warn about.
+ //
+ // We also don't care about value-dependent expressions or expressions
+ // whose result is a constant.
+ if (!T->hasUnsignedIntegerRepresentation()
+ || E->isValueDependent() || E->isIntegerConstantExpr(S.Context))
+ return AnalyzeImpConvsInComparison(S, E);
+
+ Expr *LHS = E->getLHS()->IgnoreParenImpCasts();
+ Expr *RHS = E->getRHS()->IgnoreParenImpCasts();
+
+ // Check to see if one of the (unmodified) operands is of different
+ // signedness.
+ Expr *signedOperand, *unsignedOperand;
+ if (LHS->getType()->hasSignedIntegerRepresentation()) {
+ assert(!RHS->getType()->hasSignedIntegerRepresentation() &&
+ "unsigned comparison between two signed integer expressions?");
+ signedOperand = LHS;
+ unsignedOperand = RHS;
+ } else if (RHS->getType()->hasSignedIntegerRepresentation()) {
+ signedOperand = RHS;
+ unsignedOperand = LHS;
+ } else {
+ CheckTrivialUnsignedComparison(S, E);
+ return AnalyzeImpConvsInComparison(S, E);
+ }
+
+ // Otherwise, calculate the effective range of the signed operand.
+ IntRange signedRange = GetExprRange(S.Context, signedOperand);
+
+ // Go ahead and analyze implicit conversions in the operands. Note
+ // that we skip the implicit conversions on both sides.
+ AnalyzeImplicitConversions(S, LHS, E->getOperatorLoc());
+ AnalyzeImplicitConversions(S, RHS, E->getOperatorLoc());
+
+ // If the signed range is non-negative, -Wsign-compare won't fire,
+ // but we should still check for comparisons which are always true
+ // or false.
+ if (signedRange.NonNegative)
+ return CheckTrivialUnsignedComparison(S, E);
+
+ // For (in)equality comparisons, if the unsigned operand is a
+ // constant which cannot collide with a overflowed signed operand,
+ // then reinterpreting the signed operand as unsigned will not
+ // change the result of the comparison.
+ if (E->isEqualityOp()) {
+ unsigned comparisonWidth = S.Context.getIntWidth(T);
+ IntRange unsignedRange = GetExprRange(S.Context, unsignedOperand);
+
+ // We should never be unable to prove that the unsigned operand is
+ // non-negative.
+ assert(unsignedRange.NonNegative && "unsigned range includes negative?");
+
+ if (unsignedRange.Width < comparisonWidth)
+ return;
+ }
+
+ S.Diag(E->getOperatorLoc(), diag::warn_mixed_sign_comparison)
+ << LHS->getType() << RHS->getType()
+ << LHS->getSourceRange() << RHS->getSourceRange();
+}
+
+/// Analyzes an attempt to assign the given value to a bitfield.
+///
+/// Returns true if there was something fishy about the attempt.
+static bool AnalyzeBitFieldAssignment(Sema &S, FieldDecl *Bitfield, Expr *Init,
+ SourceLocation InitLoc) {
+ assert(Bitfield->isBitField());
+ if (Bitfield->isInvalidDecl())
+ return false;
+
+ // White-list bool bitfields.
+ if (Bitfield->getType()->isBooleanType())
+ return false;
+
+ // Ignore value- or type-dependent expressions.
+ if (Bitfield->getBitWidth()->isValueDependent() ||
+ Bitfield->getBitWidth()->isTypeDependent() ||
+ Init->isValueDependent() ||
+ Init->isTypeDependent())
+ return false;
+
+ Expr *OriginalInit = Init->IgnoreParenImpCasts();
+
+ llvm::APSInt Value;
+ if (!OriginalInit->EvaluateAsInt(Value, S.Context, Expr::SE_AllowSideEffects))
+ return false;
+
+ unsigned OriginalWidth = Value.getBitWidth();
+ unsigned FieldWidth = Bitfield->getBitWidthValue(S.Context);
+
+ if (OriginalWidth <= FieldWidth)
+ return false;
+
+ // Compute the value which the bitfield will contain.
+ llvm::APSInt TruncatedValue = Value.trunc(FieldWidth);
+ TruncatedValue.setIsSigned(Bitfield->getType()->isSignedIntegerType());
+
+ // Check whether the stored value is equal to the original value.
+ TruncatedValue = TruncatedValue.extend(OriginalWidth);
+ if (Value == TruncatedValue)
+ return false;
+
+ // Special-case bitfields of width 1: booleans are naturally 0/1, and
+ // therefore don't strictly fit into a signed bitfield of width 1.
+ if (FieldWidth == 1 && Value == 1)
+ return false;
+
+ std::string PrettyValue = Value.toString(10);
+ std::string PrettyTrunc = TruncatedValue.toString(10);
+
+ S.Diag(InitLoc, diag::warn_impcast_bitfield_precision_constant)
+ << PrettyValue << PrettyTrunc << OriginalInit->getType()
+ << Init->getSourceRange();
+
+ return true;
+}
+
+/// Analyze the given simple or compound assignment for warning-worthy
+/// operations.
+static void AnalyzeAssignment(Sema &S, BinaryOperator *E) {
+ // Just recurse on the LHS.
+ AnalyzeImplicitConversions(S, E->getLHS(), E->getOperatorLoc());
+
+ // We want to recurse on the RHS as normal unless we're assigning to
+ // a bitfield.
+ if (FieldDecl *Bitfield = E->getLHS()->getBitField()) {
+ if (AnalyzeBitFieldAssignment(S, Bitfield, E->getRHS(),
+ E->getOperatorLoc())) {
+ // Recurse, ignoring any implicit conversions on the RHS.
+ return AnalyzeImplicitConversions(S, E->getRHS()->IgnoreParenImpCasts(),
+ E->getOperatorLoc());
+ }
+ }
+
+ AnalyzeImplicitConversions(S, E->getRHS(), E->getOperatorLoc());
+}
+
+/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
+static void DiagnoseImpCast(Sema &S, Expr *E, QualType SourceType, QualType T,
+ SourceLocation CContext, unsigned diag,
+ bool pruneControlFlow = false) {
+ if (pruneControlFlow) {
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag)
+ << SourceType << T << E->getSourceRange()
+ << SourceRange(CContext));
+ return;
+ }
+ S.Diag(E->getExprLoc(), diag)
+ << SourceType << T << E->getSourceRange() << SourceRange(CContext);
+}
+
+/// Diagnose an implicit cast; purely a helper for CheckImplicitConversion.
+static void DiagnoseImpCast(Sema &S, Expr *E, QualType T,
+ SourceLocation CContext, unsigned diag,
+ bool pruneControlFlow = false) {
+ DiagnoseImpCast(S, E, E->getType(), T, CContext, diag, pruneControlFlow);
+}
+
+/// Diagnose an implicit cast from a literal expression. Does not warn when the
+/// cast wouldn't lose information.
+void DiagnoseFloatingLiteralImpCast(Sema &S, FloatingLiteral *FL, QualType T,
+ SourceLocation CContext) {
+ // Try to convert the literal exactly to an integer. If we can, don't warn.
+ bool isExact = false;
+ const llvm::APFloat &Value = FL->getValue();
+ llvm::APSInt IntegerValue(S.Context.getIntWidth(T),
+ T->hasUnsignedIntegerRepresentation());
+ if (Value.convertToInteger(IntegerValue,
+ llvm::APFloat::rmTowardZero, &isExact)
+ == llvm::APFloat::opOK && isExact)
+ return;
+
+ S.Diag(FL->getExprLoc(), diag::warn_impcast_literal_float_to_integer)
+ << FL->getType() << T << FL->getSourceRange() << SourceRange(CContext);
+}
+
+std::string PrettyPrintInRange(const llvm::APSInt &Value, IntRange Range) {
+ if (!Range.Width) return "0";
+
+ llvm::APSInt ValueInRange = Value;
+ ValueInRange.setIsSigned(!Range.NonNegative);
+ ValueInRange = ValueInRange.trunc(Range.Width);
+ return ValueInRange.toString(10);
+}
+
+void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
+ SourceLocation CC, bool *ICContext = 0) {
+ if (E->isTypeDependent() || E->isValueDependent()) return;
+
+ const Type *Source = S.Context.getCanonicalType(E->getType()).getTypePtr();
+ const Type *Target = S.Context.getCanonicalType(T).getTypePtr();
+ if (Source == Target) return;
+ if (Target->isDependentType()) return;
+
+ // If the conversion context location is invalid don't complain. We also
+ // don't want to emit a warning if the issue occurs from the expansion of
+ // a system macro. The problem is that 'getSpellingLoc()' is slow, so we
+ // delay this check as long as possible. Once we detect we are in that
+ // scenario, we just return.
+ if (CC.isInvalid())
+ return;
+
+ // Diagnose implicit casts to bool.
+ if (Target->isSpecificBuiltinType(BuiltinType::Bool)) {
+ if (isa<StringLiteral>(E))
+ // Warn on string literal to bool. Checks for string literals in logical
+ // expressions, for instances, assert(0 && "error here"), is prevented
+ // by a check in AnalyzeImplicitConversions().
+ return DiagnoseImpCast(S, E, T, CC,
+ diag::warn_impcast_string_literal_to_bool);
+ if (Source->isFunctionType()) {
+ // Warn on function to bool. Checks free functions and static member
+ // functions. Weakly imported functions are excluded from the check,
+ // since it's common to test their value to check whether the linker
+ // found a definition for them.
+ ValueDecl *D = 0;
+ if (DeclRefExpr* R = dyn_cast<DeclRefExpr>(E)) {
+ D = R->getDecl();
+ } else if (MemberExpr *M = dyn_cast<MemberExpr>(E)) {
+ D = M->getMemberDecl();
+ }
+
+ if (D && !D->isWeak()) {
+ if (FunctionDecl* F = dyn_cast<FunctionDecl>(D)) {
+ S.Diag(E->getExprLoc(), diag::warn_impcast_function_to_bool)
+ << F << E->getSourceRange() << SourceRange(CC);
+ S.Diag(E->getExprLoc(), diag::note_function_to_bool_silence)
+ << FixItHint::CreateInsertion(E->getExprLoc(), "&");
+ QualType ReturnType;
+ UnresolvedSet<4> NonTemplateOverloads;
+ S.isExprCallable(*E, ReturnType, NonTemplateOverloads);
+ if (!ReturnType.isNull()
+ && ReturnType->isSpecificBuiltinType(BuiltinType::Bool))
+ S.Diag(E->getExprLoc(), diag::note_function_to_bool_call)
+ << FixItHint::CreateInsertion(
+ S.getPreprocessor().getLocForEndOfToken(E->getLocEnd()), "()");
+ return;
+ }
+ }
+ }
+ return; // Other casts to bool are not checked.
+ }
+
+ // Strip vector types.
+ if (isa<VectorType>(Source)) {
+ if (!isa<VectorType>(Target)) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_vector_scalar);
+ }
+
+ // If the vector cast is cast between two vectors of the same size, it is
+ // a bitcast, not a conversion.
+ if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
+ return;
+
+ Source = cast<VectorType>(Source)->getElementType().getTypePtr();
+ Target = cast<VectorType>(Target)->getElementType().getTypePtr();
+ }
+
+ // Strip complex types.
+ if (isa<ComplexType>(Source)) {
+ if (!isa<ComplexType>(Target)) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_complex_scalar);
+ }
+
+ Source = cast<ComplexType>(Source)->getElementType().getTypePtr();
+ Target = cast<ComplexType>(Target)->getElementType().getTypePtr();
+ }
+
+ const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
+ const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
+
+ // If the source is floating point...
+ if (SourceBT && SourceBT->isFloatingPoint()) {
+ // ...and the target is floating point...
+ if (TargetBT && TargetBT->isFloatingPoint()) {
+ // ...then warn if we're dropping FP rank.
+
+ // Builtin FP kinds are ordered by increasing FP rank.
+ if (SourceBT->getKind() > TargetBT->getKind()) {
+ // Don't warn about float constants that are precisely
+ // representable in the target type.
+ Expr::EvalResult result;
+ if (E->EvaluateAsRValue(result, S.Context)) {
+ // Value might be a float, a float vector, or a float complex.
+ if (IsSameFloatAfterCast(result.Val,
+ S.Context.getFloatTypeSemantics(QualType(TargetBT, 0)),
+ S.Context.getFloatTypeSemantics(QualType(SourceBT, 0))))
+ return;
+ }
+
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_precision);
+ }
+ return;
+ }
+
+ // If the target is integral, always warn.
+ if ((TargetBT && TargetBT->isInteger())) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ Expr *InnerE = E->IgnoreParenImpCasts();
+ // We also want to warn on, e.g., "int i = -1.234"
+ if (UnaryOperator *UOp = dyn_cast<UnaryOperator>(InnerE))
+ if (UOp->getOpcode() == UO_Minus || UOp->getOpcode() == UO_Plus)
+ InnerE = UOp->getSubExpr()->IgnoreParenImpCasts();
+
+ if (FloatingLiteral *FL = dyn_cast<FloatingLiteral>(InnerE)) {
+ DiagnoseFloatingLiteralImpCast(S, FL, T, CC);
+ } else {
+ DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_float_integer);
+ }
+ }
+
+ return;
+ }
+
+ if (!Source->isIntegerType() || !Target->isIntegerType())
+ return;
+
+ if ((E->isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull)
+ == Expr::NPCK_GNUNull) && Target->isIntegerType()) {
+ SourceLocation Loc = E->getSourceRange().getBegin();
+ if (Loc.isMacroID())
+ Loc = S.SourceMgr.getImmediateExpansionRange(Loc).first;
+ S.Diag(Loc, diag::warn_impcast_null_pointer_to_integer)
+ << T << Loc << clang::SourceRange(CC);
+ return;
+ }
+
+ IntRange SourceRange = GetExprRange(S.Context, E);
+ IntRange TargetRange = IntRange::forTargetOfCanonicalType(S.Context, Target);
+
+ if (SourceRange.Width > TargetRange.Width) {
+ // If the source is a constant, use a default-on diagnostic.
+ // TODO: this should happen for bitfield stores, too.
+ llvm::APSInt Value(32);
+ if (E->isIntegerConstantExpr(Value, S.Context)) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ std::string PrettySourceValue = Value.toString(10);
+ std::string PrettyTargetValue = PrettyPrintInRange(Value, TargetRange);
+
+ S.DiagRuntimeBehavior(E->getExprLoc(), E,
+ S.PDiag(diag::warn_impcast_integer_precision_constant)
+ << PrettySourceValue << PrettyTargetValue
+ << E->getType() << T << E->getSourceRange()
+ << clang::SourceRange(CC));
+ return;
+ }
+
+ // People want to build with -Wshorten-64-to-32 and not -Wconversion.
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ if (TargetRange.Width == 32 && S.Context.getIntWidth(E->getType()) == 64)
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_64_32,
+ /* pruneControlFlow */ true);
+ return DiagnoseImpCast(S, E, T, CC, diag::warn_impcast_integer_precision);
+ }
+
+ if ((TargetRange.NonNegative && !SourceRange.NonNegative) ||
+ (!TargetRange.NonNegative && SourceRange.NonNegative &&
+ SourceRange.Width == TargetRange.Width)) {
+
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ unsigned DiagID = diag::warn_impcast_integer_sign;
+
+ // Traditionally, gcc has warned about this under -Wsign-compare.
+ // We also want to warn about it in -Wconversion.
+ // So if -Wconversion is off, use a completely identical diagnostic
+ // in the sign-compare group.
+ // The conditional-checking code will
+ if (ICContext) {
+ DiagID = diag::warn_impcast_integer_sign_conditional;
+ *ICContext = true;
+ }
+
+ return DiagnoseImpCast(S, E, T, CC, DiagID);
+ }
+
+ // Diagnose conversions between different enumeration types.
+ // In C, we pretend that the type of an EnumConstantDecl is its enumeration
+ // type, to give us better diagnostics.
+ QualType SourceType = E->getType();
+ if (!S.getLangOpts().CPlusPlus) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ if (EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
+ EnumDecl *Enum = cast<EnumDecl>(ECD->getDeclContext());
+ SourceType = S.Context.getTypeDeclType(Enum);
+ Source = S.Context.getCanonicalType(SourceType).getTypePtr();
+ }
+ }
+
+ if (const EnumType *SourceEnum = Source->getAs<EnumType>())
+ if (const EnumType *TargetEnum = Target->getAs<EnumType>())
+ if ((SourceEnum->getDecl()->getIdentifier() ||
+ SourceEnum->getDecl()->getTypedefNameForAnonDecl()) &&
+ (TargetEnum->getDecl()->getIdentifier() ||
+ TargetEnum->getDecl()->getTypedefNameForAnonDecl()) &&
+ SourceEnum != TargetEnum) {
+ if (S.SourceMgr.isInSystemMacro(CC))
+ return;
+
+ return DiagnoseImpCast(S, E, SourceType, T, CC,
+ diag::warn_impcast_different_enum_types);
+ }
+
+ return;
+}
+
+void CheckConditionalOperator(Sema &S, ConditionalOperator *E, QualType T);
+
+void CheckConditionalOperand(Sema &S, Expr *E, QualType T,
+ SourceLocation CC, bool &ICContext) {
+ E = E->IgnoreParenImpCasts();
+
+ if (isa<ConditionalOperator>(E))
+ return CheckConditionalOperator(S, cast<ConditionalOperator>(E), T);
+
+ AnalyzeImplicitConversions(S, E, CC);
+ if (E->getType() != T)
+ return CheckImplicitConversion(S, E, T, CC, &ICContext);
+ return;
+}
+
+void CheckConditionalOperator(Sema &S, ConditionalOperator *E, QualType T) {
+ SourceLocation CC = E->getQuestionLoc();
+
+ AnalyzeImplicitConversions(S, E->getCond(), CC);
+
+ bool Suspicious = false;
+ CheckConditionalOperand(S, E->getTrueExpr(), T, CC, Suspicious);
+ CheckConditionalOperand(S, E->getFalseExpr(), T, CC, Suspicious);
+
+ // If -Wconversion would have warned about either of the candidates
+ // for a signedness conversion to the context type...
+ if (!Suspicious) return;
+
+ // ...but it's currently ignored...
+ if (S.Diags.getDiagnosticLevel(diag::warn_impcast_integer_sign_conditional,
+ CC))
+ return;
+
+ // ...then check whether it would have warned about either of the
+ // candidates for a signedness conversion to the condition type.
+ if (E->getType() == T) return;
+
+ Suspicious = false;
+ CheckImplicitConversion(S, E->getTrueExpr()->IgnoreParenImpCasts(),
+ E->getType(), CC, &Suspicious);
+ if (!Suspicious)
+ CheckImplicitConversion(S, E->getFalseExpr()->IgnoreParenImpCasts(),
+ E->getType(), CC, &Suspicious);
+}
+
+/// AnalyzeImplicitConversions - Find and report any interesting
+/// implicit conversions in the given expression. There are a couple
+/// of competing diagnostics here, -Wconversion and -Wsign-compare.
+void AnalyzeImplicitConversions(Sema &S, Expr *OrigE, SourceLocation CC) {
+ QualType T = OrigE->getType();
+ Expr *E = OrigE->IgnoreParenImpCasts();
+
+ if (E->isTypeDependent() || E->isValueDependent())
+ return;
+
+ // For conditional operators, we analyze the arguments as if they
+ // were being fed directly into the output.
+ if (isa<ConditionalOperator>(E)) {
+ ConditionalOperator *CO = cast<ConditionalOperator>(E);
+ CheckConditionalOperator(S, CO, T);
+ return;
+ }
+
+ // Go ahead and check any implicit conversions we might have skipped.
+ // The non-canonical typecheck is just an optimization;
+ // CheckImplicitConversion will filter out dead implicit conversions.
+ if (E->getType() != T)
+ CheckImplicitConversion(S, E, T, CC);
+
+ // Now continue drilling into this expression.
+
+ // Skip past explicit casts.
+ if (isa<ExplicitCastExpr>(E)) {
+ E = cast<ExplicitCastExpr>(E)->getSubExpr()->IgnoreParenImpCasts();
+ return AnalyzeImplicitConversions(S, E, CC);
+ }
+
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ // Do a somewhat different check with comparison operators.
+ if (BO->isComparisonOp())
+ return AnalyzeComparison(S, BO);
+
+ // And with simple assignments.
+ if (BO->getOpcode() == BO_Assign)
+ return AnalyzeAssignment(S, BO);
+ }
+
+ // These break the otherwise-useful invariant below. Fortunately,
+ // we don't really need to recurse into them, because any internal
+ // expressions should have been analyzed already when they were
+ // built into statements.
+ if (isa<StmtExpr>(E)) return;
+
+ // Don't descend into unevaluated contexts.
+ if (isa<UnaryExprOrTypeTraitExpr>(E)) return;
+
+ // Now just recurse over the expression's children.
+ CC = E->getExprLoc();
+ BinaryOperator *BO = dyn_cast<BinaryOperator>(E);
+ bool IsLogicalOperator = BO && BO->isLogicalOp();
+ for (Stmt::child_range I = E->children(); I; ++I) {
+ Expr *ChildExpr = dyn_cast_or_null<Expr>(*I);
+ if (!ChildExpr)
+ continue;
+
+ if (IsLogicalOperator &&
+ isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
+ // Ignore checking string literals that are in logical operators.
+ continue;
+ AnalyzeImplicitConversions(S, ChildExpr, CC);
+ }
+}
+
+} // end anonymous namespace
+
+/// Diagnoses "dangerous" implicit conversions within the given
+/// expression (which is a full expression). Implements -Wconversion
+/// and -Wsign-compare.
+///
+/// \param CC the "context" location of the implicit conversion, i.e.
+/// the most location of the syntactic entity requiring the implicit
+/// conversion
+void Sema::CheckImplicitConversions(Expr *E, SourceLocation CC) {
+ // Don't diagnose in unevaluated contexts.
+ if (ExprEvalContexts.back().Context == Sema::Unevaluated)
+ return;
+
+ // Don't diagnose for value- or type-dependent expressions.
+ if (E->isTypeDependent() || E->isValueDependent())
+ return;
+
+ // Check for array bounds violations in cases where the check isn't triggered
+ // elsewhere for other Expr types (like BinaryOperators), e.g. when an
+ // ArraySubscriptExpr is on the RHS of a variable initialization.
+ CheckArrayAccess(E);
+
+ // This is not the right CC for (e.g.) a variable initialization.
+ AnalyzeImplicitConversions(*this, E, CC);
+}
+
+void Sema::CheckBitFieldInitialization(SourceLocation InitLoc,
+ FieldDecl *BitField,
+ Expr *Init) {
+ (void) AnalyzeBitFieldAssignment(*this, BitField, Init, InitLoc);
+}
+
+/// CheckParmsForFunctionDef - Check that the parameters of the given
+/// function are appropriate for the definition of a function. This
+/// takes care of any checks that cannot be performed on the
+/// declaration itself, e.g., that the types of each of the function
+/// parameters are complete.
+bool Sema::CheckParmsForFunctionDef(ParmVarDecl **P, ParmVarDecl **PEnd,
+ bool CheckParameterNames) {
+ bool HasInvalidParm = false;
+ for (; P != PEnd; ++P) {
+ ParmVarDecl *Param = *P;
+
+ // C99 6.7.5.3p4: the parameters in a parameter type list in a
+ // function declarator that is part of a function definition of
+ // that function shall not have incomplete type.
+ //
+ // This is also C++ [dcl.fct]p6.
+ if (!Param->isInvalidDecl() &&
+ RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type)) {
+ Param->setInvalidDecl();
+ HasInvalidParm = true;
+ }
+
+ // C99 6.9.1p5: If the declarator includes a parameter type list, the
+ // declaration of each parameter shall include an identifier.
+ if (CheckParameterNames &&
+ Param->getIdentifier() == 0 &&
+ !Param->isImplicit() &&
+ !getLangOpts().CPlusPlus)
+ Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+
+ // C99 6.7.5.3p12:
+ // If the function declarator is not part of a definition of that
+ // function, parameters may have incomplete type and may use the [*]
+ // notation in their sequences of declarator specifiers to specify
+ // variable length array types.
+ QualType PType = Param->getOriginalType();
+ if (const ArrayType *AT = Context.getAsArrayType(PType)) {
+ if (AT->getSizeModifier() == ArrayType::Star) {
+ // FIXME: This diagnosic should point the the '[*]' if source-location
+ // information is added for it.
+ Diag(Param->getLocation(), diag::err_array_star_in_function_definition);
+ }
+ }
+ }
+
+ return HasInvalidParm;
+}
+
+/// CheckCastAlign - Implements -Wcast-align, which warns when a
+/// pointer cast increases the alignment requirements.
+void Sema::CheckCastAlign(Expr *Op, QualType T, SourceRange TRange) {
+ // This is actually a lot of work to potentially be doing on every
+ // cast; don't do it if we're ignoring -Wcast_align (as is the default).
+ if (getDiagnostics().getDiagnosticLevel(diag::warn_cast_align,
+ TRange.getBegin())
+ == DiagnosticsEngine::Ignored)
+ return;
+
+ // Ignore dependent types.
+ if (T->isDependentType() || Op->getType()->isDependentType())
+ return;
+
+ // Require that the destination be a pointer type.
+ const PointerType *DestPtr = T->getAs<PointerType>();
+ if (!DestPtr) return;
+
+ // If the destination has alignment 1, we're done.
+ QualType DestPointee = DestPtr->getPointeeType();
+ if (DestPointee->isIncompleteType()) return;
+ CharUnits DestAlign = Context.getTypeAlignInChars(DestPointee);
+ if (DestAlign.isOne()) return;
+
+ // Require that the source be a pointer type.
+ const PointerType *SrcPtr = Op->getType()->getAs<PointerType>();
+ if (!SrcPtr) return;
+ QualType SrcPointee = SrcPtr->getPointeeType();
+
+ // Whitelist casts from cv void*. We already implicitly
+ // whitelisted casts to cv void*, since they have alignment 1.
+ // Also whitelist casts involving incomplete types, which implicitly
+ // includes 'void'.
+ if (SrcPointee->isIncompleteType()) return;
+
+ CharUnits SrcAlign = Context.getTypeAlignInChars(SrcPointee);
+ if (SrcAlign >= DestAlign) return;
+
+ Diag(TRange.getBegin(), diag::warn_cast_align)
+ << Op->getType() << T
+ << static_cast<unsigned>(SrcAlign.getQuantity())
+ << static_cast<unsigned>(DestAlign.getQuantity())
+ << TRange << Op->getSourceRange();
+}
+
+static const Type* getElementType(const Expr *BaseExpr) {
+ const Type* EltType = BaseExpr->getType().getTypePtr();
+ if (EltType->isAnyPointerType())
+ return EltType->getPointeeType().getTypePtr();
+ else if (EltType->isArrayType())
+ return EltType->getBaseElementTypeUnsafe();
+ return EltType;
+}
+
+/// \brief Check whether this array fits the idiom of a size-one tail padded
+/// array member of a struct.
+///
+/// We avoid emitting out-of-bounds access warnings for such arrays as they are
+/// commonly used to emulate flexible arrays in C89 code.
+static bool IsTailPaddedMemberArray(Sema &S, llvm::APInt Size,
+ const NamedDecl *ND) {
+ if (Size != 1 || !ND) return false;
+
+ const FieldDecl *FD = dyn_cast<FieldDecl>(ND);
+ if (!FD) return false;
+
+ // Don't consider sizes resulting from macro expansions or template argument
+ // substitution to form C89 tail-padded arrays.
+ ConstantArrayTypeLoc TL =
+ cast<ConstantArrayTypeLoc>(FD->getTypeSourceInfo()->getTypeLoc());
+ const Expr *SizeExpr = dyn_cast<IntegerLiteral>(TL.getSizeExpr());
+ if (!SizeExpr || SizeExpr->getExprLoc().isMacroID())
+ return false;
+
+ const RecordDecl *RD = dyn_cast<RecordDecl>(FD->getDeclContext());
+ if (!RD) return false;
+ if (RD->isUnion()) return false;
+ if (const CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CRD->isStandardLayout()) return false;
+ }
+
+ // See if this is the last field decl in the record.
+ const Decl *D = FD;
+ while ((D = D->getNextDeclInContext()))
+ if (isa<FieldDecl>(D))
+ return false;
+ return true;
+}
+
+void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
+ const ArraySubscriptExpr *ASE,
+ bool AllowOnePastEnd, bool IndexNegated) {
+ IndexExpr = IndexExpr->IgnoreParenImpCasts();
+ if (IndexExpr->isValueDependent())
+ return;
+
+ const Type *EffectiveType = getElementType(BaseExpr);
+ BaseExpr = BaseExpr->IgnoreParenCasts();
+ const ConstantArrayType *ArrayTy =
+ Context.getAsConstantArrayType(BaseExpr->getType());
+ if (!ArrayTy)
+ return;
+
+ llvm::APSInt index;
+ if (!IndexExpr->EvaluateAsInt(index, Context))
+ return;
+ if (IndexNegated)
+ index = -index;
+
+ const NamedDecl *ND = NULL;
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(DRE->getDecl());
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(ME->getMemberDecl());
+
+ if (index.isUnsigned() || !index.isNegative()) {
+ llvm::APInt size = ArrayTy->getSize();
+ if (!size.isStrictlyPositive())
+ return;
+
+ const Type* BaseType = getElementType(BaseExpr);
+ if (BaseType != EffectiveType) {
+ // Make sure we're comparing apples to apples when comparing index to size
+ uint64_t ptrarith_typesize = Context.getTypeSize(EffectiveType);
+ uint64_t array_typesize = Context.getTypeSize(BaseType);
+ // Handle ptrarith_typesize being zero, such as when casting to void*
+ if (!ptrarith_typesize) ptrarith_typesize = 1;
+ if (ptrarith_typesize != array_typesize) {
+ // There's a cast to a different size type involved
+ uint64_t ratio = array_typesize / ptrarith_typesize;
+ // TODO: Be smarter about handling cases where array_typesize is not a
+ // multiple of ptrarith_typesize
+ if (ptrarith_typesize * ratio == array_typesize)
+ size *= llvm::APInt(size.getBitWidth(), ratio);
+ }
+ }
+
+ if (size.getBitWidth() > index.getBitWidth())
+ index = index.zext(size.getBitWidth());
+ else if (size.getBitWidth() < index.getBitWidth())
+ size = size.zext(index.getBitWidth());
+
+ // For array subscripting the index must be less than size, but for pointer
+ // arithmetic also allow the index (offset) to be equal to size since
+ // computing the next address after the end of the array is legal and
+ // commonly done e.g. in C++ iterators and range-based for loops.
+ if (AllowOnePastEnd ? index.ule(size) : index.ult(size))
+ return;
+
+ // Also don't warn for arrays of size 1 which are members of some
+ // structure. These are often used to approximate flexible arrays in C89
+ // code.
+ if (IsTailPaddedMemberArray(*this, size, ND))
+ return;
+
+ // Suppress the warning if the subscript expression (as identified by the
+ // ']' location) and the index expression are both from macro expansions
+ // within a system header.
+ if (ASE) {
+ SourceLocation RBracketLoc = SourceMgr.getSpellingLoc(
+ ASE->getRBracketLoc());
+ if (SourceMgr.isInSystemHeader(RBracketLoc)) {
+ SourceLocation IndexLoc = SourceMgr.getSpellingLoc(
+ IndexExpr->getLocStart());
+ if (SourceMgr.isFromSameFile(RBracketLoc, IndexLoc))
+ return;
+ }
+ }
+
+ unsigned DiagID = diag::warn_ptr_arith_exceeds_bounds;
+ if (ASE)
+ DiagID = diag::warn_array_index_exceeds_bounds;
+
+ DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
+ PDiag(DiagID) << index.toString(10, true)
+ << size.toString(10, true)
+ << (unsigned)size.getLimitedValue(~0U)
+ << IndexExpr->getSourceRange());
+ } else {
+ unsigned DiagID = diag::warn_array_index_precedes_bounds;
+ if (!ASE) {
+ DiagID = diag::warn_ptr_arith_precedes_bounds;
+ if (index.isNegative()) index = -index;
+ }
+
+ DiagRuntimeBehavior(BaseExpr->getLocStart(), BaseExpr,
+ PDiag(DiagID) << index.toString(10, true)
+ << IndexExpr->getSourceRange());
+ }
+
+ if (!ND) {
+ // Try harder to find a NamedDecl to point at in the note.
+ while (const ArraySubscriptExpr *ASE =
+ dyn_cast<ArraySubscriptExpr>(BaseExpr))
+ BaseExpr = ASE->getBase()->IgnoreParenCasts();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(DRE->getDecl());
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BaseExpr))
+ ND = dyn_cast<NamedDecl>(ME->getMemberDecl());
+ }
+
+ if (ND)
+ DiagRuntimeBehavior(ND->getLocStart(), BaseExpr,
+ PDiag(diag::note_array_index_out_of_bounds)
+ << ND->getDeclName());
+}
+
+void Sema::CheckArrayAccess(const Expr *expr) {
+ int AllowOnePastEnd = 0;
+ while (expr) {
+ expr = expr->IgnoreParenImpCasts();
+ switch (expr->getStmtClass()) {
+ case Stmt::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(expr);
+ CheckArrayAccess(ASE->getBase(), ASE->getIdx(), ASE,
+ AllowOnePastEnd > 0);
+ return;
+ }
+ case Stmt::UnaryOperatorClass: {
+ // Only unwrap the * and & unary operators
+ const UnaryOperator *UO = cast<UnaryOperator>(expr);
+ expr = UO->getSubExpr();
+ switch (UO->getOpcode()) {
+ case UO_AddrOf:
+ AllowOnePastEnd++;
+ break;
+ case UO_Deref:
+ AllowOnePastEnd--;
+ break;
+ default:
+ return;
+ }
+ break;
+ }
+ case Stmt::ConditionalOperatorClass: {
+ const ConditionalOperator *cond = cast<ConditionalOperator>(expr);
+ if (const Expr *lhs = cond->getLHS())
+ CheckArrayAccess(lhs);
+ if (const Expr *rhs = cond->getRHS())
+ CheckArrayAccess(rhs);
+ return;
+ }
+ default:
+ return;
+ }
+ }
+}
+
+//===--- CHECK: Objective-C retain cycles ----------------------------------//
+
+namespace {
+ struct RetainCycleOwner {
+ RetainCycleOwner() : Variable(0), Indirect(false) {}
+ VarDecl *Variable;
+ SourceRange Range;
+ SourceLocation Loc;
+ bool Indirect;
+
+ void setLocsFrom(Expr *e) {
+ Loc = e->getExprLoc();
+ Range = e->getSourceRange();
+ }
+ };
+}
+
+/// Consider whether capturing the given variable can possibly lead to
+/// a retain cycle.
+static bool considerVariable(VarDecl *var, Expr *ref, RetainCycleOwner &owner) {
+ // In ARC, it's captured strongly iff the variable has __strong
+ // lifetime. In MRR, it's captured strongly if the variable is
+ // __block and has an appropriate type.
+ if (var->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return false;
+
+ owner.Variable = var;
+ owner.setLocsFrom(ref);
+ return true;
+}
+
+static bool findRetainCycleOwner(Sema &S, Expr *e, RetainCycleOwner &owner) {
+ while (true) {
+ e = e->IgnoreParens();
+ if (CastExpr *cast = dyn_cast<CastExpr>(e)) {
+ switch (cast->getCastKind()) {
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_LValueToRValue:
+ case CK_ARCReclaimReturnedObject:
+ e = cast->getSubExpr();
+ continue;
+
+ default:
+ return false;
+ }
+ }
+
+ if (ObjCIvarRefExpr *ref = dyn_cast<ObjCIvarRefExpr>(e)) {
+ ObjCIvarDecl *ivar = ref->getDecl();
+ if (ivar->getType().getObjCLifetime() != Qualifiers::OCL_Strong)
+ return false;
+
+ // Try to find a retain cycle in the base.
+ if (!findRetainCycleOwner(S, ref->getBase(), owner))
+ return false;
+
+ if (ref->isFreeIvar()) owner.setLocsFrom(ref);
+ owner.Indirect = true;
+ return true;
+ }
+
+ if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(e)) {
+ VarDecl *var = dyn_cast<VarDecl>(ref->getDecl());
+ if (!var) return false;
+ return considerVariable(var, ref, owner);
+ }
+
+ if (MemberExpr *member = dyn_cast<MemberExpr>(e)) {
+ if (member->isArrow()) return false;
+
+ // Don't count this as an indirect ownership.
+ e = member->getBase();
+ continue;
+ }
+
+ if (PseudoObjectExpr *pseudo = dyn_cast<PseudoObjectExpr>(e)) {
+ // Only pay attention to pseudo-objects on property references.
+ ObjCPropertyRefExpr *pre
+ = dyn_cast<ObjCPropertyRefExpr>(pseudo->getSyntacticForm()
+ ->IgnoreParens());
+ if (!pre) return false;
+ if (pre->isImplicitProperty()) return false;
+ ObjCPropertyDecl *property = pre->getExplicitProperty();
+ if (!property->isRetaining() &&
+ !(property->getPropertyIvarDecl() &&
+ property->getPropertyIvarDecl()->getType()
+ .getObjCLifetime() == Qualifiers::OCL_Strong))
+ return false;
+
+ owner.Indirect = true;
+ if (pre->isSuperReceiver()) {
+ owner.Variable = S.getCurMethodDecl()->getSelfDecl();
+ if (!owner.Variable)
+ return false;
+ owner.Loc = pre->getLocation();
+ owner.Range = pre->getSourceRange();
+ return true;
+ }
+ e = const_cast<Expr*>(cast<OpaqueValueExpr>(pre->getBase())
+ ->getSourceExpr());
+ continue;
+ }
+
+ // Array ivars?
+
+ return false;
+ }
+}
+
+namespace {
+ struct FindCaptureVisitor : EvaluatedExprVisitor<FindCaptureVisitor> {
+ FindCaptureVisitor(ASTContext &Context, VarDecl *variable)
+ : EvaluatedExprVisitor<FindCaptureVisitor>(Context),
+ Variable(variable), Capturer(0) {}
+
+ VarDecl *Variable;
+ Expr *Capturer;
+
+ void VisitDeclRefExpr(DeclRefExpr *ref) {
+ if (ref->getDecl() == Variable && !Capturer)
+ Capturer = ref;
+ }
+
+ void VisitObjCIvarRefExpr(ObjCIvarRefExpr *ref) {
+ if (Capturer) return;
+ Visit(ref->getBase());
+ if (Capturer && ref->isFreeIvar())
+ Capturer = ref;
+ }
+
+ void VisitBlockExpr(BlockExpr *block) {
+ // Look inside nested blocks
+ if (block->getBlockDecl()->capturesVariable(Variable))
+ Visit(block->getBlockDecl()->getBody());
+ }
+ };
+}
+
+/// Check whether the given argument is a block which captures a
+/// variable.
+static Expr *findCapturingExpr(Sema &S, Expr *e, RetainCycleOwner &owner) {
+ assert(owner.Variable && owner.Loc.isValid());
+
+ e = e->IgnoreParenCasts();
+ BlockExpr *block = dyn_cast<BlockExpr>(e);
+ if (!block || !block->getBlockDecl()->capturesVariable(owner.Variable))
+ return 0;
+
+ FindCaptureVisitor visitor(S.Context, owner.Variable);
+ visitor.Visit(block->getBlockDecl()->getBody());
+ return visitor.Capturer;
+}
+
+static void diagnoseRetainCycle(Sema &S, Expr *capturer,
+ RetainCycleOwner &owner) {
+ assert(capturer);
+ assert(owner.Variable && owner.Loc.isValid());
+
+ S.Diag(capturer->getExprLoc(), diag::warn_arc_retain_cycle)
+ << owner.Variable << capturer->getSourceRange();
+ S.Diag(owner.Loc, diag::note_arc_retain_cycle_owner)
+ << owner.Indirect << owner.Range;
+}
+
+/// Check for a keyword selector that starts with the word 'add' or
+/// 'set'.
+static bool isSetterLikeSelector(Selector sel) {
+ if (sel.isUnarySelector()) return false;
+
+ StringRef str = sel.getNameForSlot(0);
+ while (!str.empty() && str.front() == '_') str = str.substr(1);
+ if (str.startswith("set"))
+ str = str.substr(3);
+ else if (str.startswith("add")) {
+ // Specially whitelist 'addOperationWithBlock:'.
+ if (sel.getNumArgs() == 1 && str.startswith("addOperationWithBlock"))
+ return false;
+ str = str.substr(3);
+ }
+ else
+ return false;
+
+ if (str.empty()) return true;
+ return !islower(str.front());
+}
+
+/// Check a message send to see if it's likely to cause a retain cycle.
+void Sema::checkRetainCycles(ObjCMessageExpr *msg) {
+ // Only check instance methods whose selector looks like a setter.
+ if (!msg->isInstanceMessage() || !isSetterLikeSelector(msg->getSelector()))
+ return;
+
+ // Try to find a variable that the receiver is strongly owned by.
+ RetainCycleOwner owner;
+ if (msg->getReceiverKind() == ObjCMessageExpr::Instance) {
+ if (!findRetainCycleOwner(*this, msg->getInstanceReceiver(), owner))
+ return;
+ } else {
+ assert(msg->getReceiverKind() == ObjCMessageExpr::SuperInstance);
+ owner.Variable = getCurMethodDecl()->getSelfDecl();
+ owner.Loc = msg->getSuperLoc();
+ owner.Range = msg->getSuperLoc();
+ }
+
+ // Check whether the receiver is captured by any of the arguments.
+ for (unsigned i = 0, e = msg->getNumArgs(); i != e; ++i)
+ if (Expr *capturer = findCapturingExpr(*this, msg->getArg(i), owner))
+ return diagnoseRetainCycle(*this, capturer, owner);
+}
+
+/// Check a property assign to see if it's likely to cause a retain cycle.
+void Sema::checkRetainCycles(Expr *receiver, Expr *argument) {
+ RetainCycleOwner owner;
+ if (!findRetainCycleOwner(*this, receiver, owner))
+ return;
+
+ if (Expr *capturer = findCapturingExpr(*this, argument, owner))
+ diagnoseRetainCycle(*this, capturer, owner);
+}
+
+bool Sema::checkUnsafeAssigns(SourceLocation Loc,
+ QualType LHS, Expr *RHS) {
+ Qualifiers::ObjCLifetime LT = LHS.getObjCLifetime();
+ if (LT != Qualifiers::OCL_Weak && LT != Qualifiers::OCL_ExplicitNone)
+ return false;
+ // strip off any implicit cast added to get to the one arc-specific
+ while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
+ if (cast->getCastKind() == CK_ARCConsumeObject) {
+ Diag(Loc, diag::warn_arc_retained_assign)
+ << (LT == Qualifiers::OCL_ExplicitNone)
+ << RHS->getSourceRange();
+ return true;
+ }
+ RHS = cast->getSubExpr();
+ }
+ return false;
+}
+
+void Sema::checkUnsafeExprAssigns(SourceLocation Loc,
+ Expr *LHS, Expr *RHS) {
+ QualType LHSType;
+ // PropertyRef on LHS type need be directly obtained from
+ // its declaration as it has a PsuedoType.
+ ObjCPropertyRefExpr *PRE
+ = dyn_cast<ObjCPropertyRefExpr>(LHS->IgnoreParens());
+ if (PRE && !PRE->isImplicitProperty()) {
+ const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
+ if (PD)
+ LHSType = PD->getType();
+ }
+
+ if (LHSType.isNull())
+ LHSType = LHS->getType();
+ if (checkUnsafeAssigns(Loc, LHSType, RHS))
+ return;
+ Qualifiers::ObjCLifetime LT = LHSType.getObjCLifetime();
+ // FIXME. Check for other life times.
+ if (LT != Qualifiers::OCL_None)
+ return;
+
+ if (PRE) {
+ if (PRE->isImplicitProperty())
+ return;
+ const ObjCPropertyDecl *PD = PRE->getExplicitProperty();
+ if (!PD)
+ return;
+
+ unsigned Attributes = PD->getPropertyAttributes();
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_assign) {
+ // when 'assign' attribute was not explicitly specified
+ // by user, ignore it and rely on property type itself
+ // for lifetime info.
+ unsigned AsWrittenAttr = PD->getPropertyAttributesAsWritten();
+ if (!(AsWrittenAttr & ObjCPropertyDecl::OBJC_PR_assign) &&
+ LHSType->isObjCRetainableType())
+ return;
+
+ while (ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(RHS)) {
+ if (cast->getCastKind() == CK_ARCConsumeObject) {
+ Diag(Loc, diag::warn_arc_retained_property_assign)
+ << RHS->getSourceRange();
+ return;
+ }
+ RHS = cast->getSubExpr();
+ }
+ }
+ }
+}
+
+//===--- CHECK: Empty statement body (-Wempty-body) ---------------------===//
+
+namespace {
+bool ShouldDiagnoseEmptyStmtBody(const SourceManager &SourceMgr,
+ SourceLocation StmtLoc,
+ const NullStmt *Body) {
+ // Do not warn if the body is a macro that expands to nothing, e.g:
+ //
+ // #define CALL(x)
+ // if (condition)
+ // CALL(0);
+ //
+ if (Body->hasLeadingEmptyMacro())
+ return false;
+
+ // Get line numbers of statement and body.
+ bool StmtLineInvalid;
+ unsigned StmtLine = SourceMgr.getSpellingLineNumber(StmtLoc,
+ &StmtLineInvalid);
+ if (StmtLineInvalid)
+ return false;
+
+ bool BodyLineInvalid;
+ unsigned BodyLine = SourceMgr.getSpellingLineNumber(Body->getSemiLoc(),
+ &BodyLineInvalid);
+ if (BodyLineInvalid)
+ return false;
+
+ // Warn if null statement and body are on the same line.
+ if (StmtLine != BodyLine)
+ return false;
+
+ return true;
+}
+} // Unnamed namespace
+
+void Sema::DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
+ const Stmt *Body,
+ unsigned DiagID) {
+ // Since this is a syntactic check, don't emit diagnostic for template
+ // instantiations, this just adds noise.
+ if (CurrentInstantiationScope)
+ return;
+
+ // The body should be a null statement.
+ const NullStmt *NBody = dyn_cast<NullStmt>(Body);
+ if (!NBody)
+ return;
+
+ // Do the usual checks.
+ if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
+ return;
+
+ Diag(NBody->getSemiLoc(), DiagID);
+ Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
+}
+
+void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
+ const Stmt *PossibleBody) {
+ assert(!CurrentInstantiationScope); // Ensured by caller
+
+ SourceLocation StmtLoc;
+ const Stmt *Body;
+ unsigned DiagID;
+ if (const ForStmt *FS = dyn_cast<ForStmt>(S)) {
+ StmtLoc = FS->getRParenLoc();
+ Body = FS->getBody();
+ DiagID = diag::warn_empty_for_body;
+ } else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
+ StmtLoc = WS->getCond()->getSourceRange().getEnd();
+ Body = WS->getBody();
+ DiagID = diag::warn_empty_while_body;
+ } else
+ return; // Neither `for' nor `while'.
+
+ // The body should be a null statement.
+ const NullStmt *NBody = dyn_cast<NullStmt>(Body);
+ if (!NBody)
+ return;
+
+ // Skip expensive checks if diagnostic is disabled.
+ if (Diags.getDiagnosticLevel(DiagID, NBody->getSemiLoc()) ==
+ DiagnosticsEngine::Ignored)
+ return;
+
+ // Do the usual checks.
+ if (!ShouldDiagnoseEmptyStmtBody(SourceMgr, StmtLoc, NBody))
+ return;
+
+ // `for(...);' and `while(...);' are popular idioms, so in order to keep
+ // noise level low, emit diagnostics only if for/while is followed by a
+ // CompoundStmt, e.g.:
+ // for (int i = 0; i < n; i++);
+ // {
+ // a(i);
+ // }
+ // or if for/while is followed by a statement with more indentation
+ // than for/while itself:
+ // for (int i = 0; i < n; i++);
+ // a(i);
+ bool ProbableTypo = isa<CompoundStmt>(PossibleBody);
+ if (!ProbableTypo) {
+ bool BodyColInvalid;
+ unsigned BodyCol = SourceMgr.getPresumedColumnNumber(
+ PossibleBody->getLocStart(),
+ &BodyColInvalid);
+ if (BodyColInvalid)
+ return;
+
+ bool StmtColInvalid;
+ unsigned StmtCol = SourceMgr.getPresumedColumnNumber(
+ S->getLocStart(),
+ &StmtColInvalid);
+ if (StmtColInvalid)
+ return;
+
+ if (BodyCol > StmtCol)
+ ProbableTypo = true;
+ }
+
+ if (ProbableTypo) {
+ Diag(NBody->getSemiLoc(), DiagID);
+ Diag(NBody->getSemiLoc(), diag::note_empty_body_on_separate_line);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
new file mode 100644
index 0000000..1ee7532
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaCodeComplete.cpp
@@ -0,0 +1,7178 @@
+//===---------------- SemaCodeComplete.cpp - Code Completion ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the code-completion semantic actions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Overload.h"
+#include "clang/Sema/CodeCompleteConsumer.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Twine.h"
+#include <list>
+#include <map>
+#include <vector>
+
+using namespace clang;
+using namespace sema;
+
+namespace {
+ /// \brief A container of code-completion results.
+ class ResultBuilder {
+ public:
+ /// \brief The type of a name-lookup filter, which can be provided to the
+ /// name-lookup routines to specify which declarations should be included in
+ /// the result set (when it returns true) and which declarations should be
+ /// filtered out (returns false).
+ typedef bool (ResultBuilder::*LookupFilter)(NamedDecl *) const;
+
+ typedef CodeCompletionResult Result;
+
+ private:
+ /// \brief The actual results we have found.
+ std::vector<Result> Results;
+
+ /// \brief A record of all of the declarations we have found and placed
+ /// into the result set, used to ensure that no declaration ever gets into
+ /// the result set twice.
+ llvm::SmallPtrSet<Decl*, 16> AllDeclsFound;
+
+ typedef std::pair<NamedDecl *, unsigned> DeclIndexPair;
+
+ /// \brief An entry in the shadow map, which is optimized to store
+ /// a single (declaration, index) mapping (the common case) but
+ /// can also store a list of (declaration, index) mappings.
+ class ShadowMapEntry {
+ typedef SmallVector<DeclIndexPair, 4> DeclIndexPairVector;
+
+ /// \brief Contains either the solitary NamedDecl * or a vector
+ /// of (declaration, index) pairs.
+ llvm::PointerUnion<NamedDecl *, DeclIndexPairVector*> DeclOrVector;
+
+ /// \brief When the entry contains a single declaration, this is
+ /// the index associated with that entry.
+ unsigned SingleDeclIndex;
+
+ public:
+ ShadowMapEntry() : DeclOrVector(), SingleDeclIndex(0) { }
+
+ void Add(NamedDecl *ND, unsigned Index) {
+ if (DeclOrVector.isNull()) {
+ // 0 - > 1 elements: just set the single element information.
+ DeclOrVector = ND;
+ SingleDeclIndex = Index;
+ return;
+ }
+
+ if (NamedDecl *PrevND = DeclOrVector.dyn_cast<NamedDecl *>()) {
+ // 1 -> 2 elements: create the vector of results and push in the
+ // existing declaration.
+ DeclIndexPairVector *Vec = new DeclIndexPairVector;
+ Vec->push_back(DeclIndexPair(PrevND, SingleDeclIndex));
+ DeclOrVector = Vec;
+ }
+
+ // Add the new element to the end of the vector.
+ DeclOrVector.get<DeclIndexPairVector*>()->push_back(
+ DeclIndexPair(ND, Index));
+ }
+
+ void Destroy() {
+ if (DeclIndexPairVector *Vec
+ = DeclOrVector.dyn_cast<DeclIndexPairVector *>()) {
+ delete Vec;
+ DeclOrVector = ((NamedDecl *)0);
+ }
+ }
+
+ // Iteration.
+ class iterator;
+ iterator begin() const;
+ iterator end() const;
+ };
+
+ /// \brief A mapping from declaration names to the declarations that have
+ /// this name within a particular scope and their index within the list of
+ /// results.
+ typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
+
+ /// \brief The semantic analysis object for which results are being
+ /// produced.
+ Sema &SemaRef;
+
+ /// \brief The allocator used to allocate new code-completion strings.
+ CodeCompletionAllocator &Allocator;
+
+ CodeCompletionTUInfo &CCTUInfo;
+
+ /// \brief If non-NULL, a filter function used to remove any code-completion
+ /// results that are not desirable.
+ LookupFilter Filter;
+
+ /// \brief Whether we should allow declarations as
+ /// nested-name-specifiers that would otherwise be filtered out.
+ bool AllowNestedNameSpecifiers;
+
+ /// \brief If set, the type that we would prefer our resulting value
+ /// declarations to have.
+ ///
+ /// Closely matching the preferred type gives a boost to a result's
+ /// priority.
+ CanQualType PreferredType;
+
+ /// \brief A list of shadow maps, which is used to model name hiding at
+ /// different levels of, e.g., the inheritance hierarchy.
+ std::list<ShadowMap> ShadowMaps;
+
+ /// \brief If we're potentially referring to a C++ member function, the set
+ /// of qualifiers applied to the object type.
+ Qualifiers ObjectTypeQualifiers;
+
+ /// \brief Whether the \p ObjectTypeQualifiers field is active.
+ bool HasObjectTypeQualifiers;
+
+ /// \brief The selector that we prefer.
+ Selector PreferredSelector;
+
+ /// \brief The completion context in which we are gathering results.
+ CodeCompletionContext CompletionContext;
+
+ /// \brief If we are in an instance method definition, the @implementation
+ /// object.
+ ObjCImplementationDecl *ObjCImplementation;
+
+ void AdjustResultPriorityForDecl(Result &R);
+
+ void MaybeAddConstructorResults(Result R);
+
+ public:
+ explicit ResultBuilder(Sema &SemaRef, CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ const CodeCompletionContext &CompletionContext,
+ LookupFilter Filter = 0)
+ : SemaRef(SemaRef), Allocator(Allocator), CCTUInfo(CCTUInfo),
+ Filter(Filter),
+ AllowNestedNameSpecifiers(false), HasObjectTypeQualifiers(false),
+ CompletionContext(CompletionContext),
+ ObjCImplementation(0)
+ {
+ // If this is an Objective-C instance method definition, dig out the
+ // corresponding implementation.
+ switch (CompletionContext.getKind()) {
+ case CodeCompletionContext::CCC_Expression:
+ case CodeCompletionContext::CCC_ObjCMessageReceiver:
+ case CodeCompletionContext::CCC_ParenthesizedExpression:
+ case CodeCompletionContext::CCC_Statement:
+ case CodeCompletionContext::CCC_Recovery:
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl())
+ if (Method->isInstanceMethod())
+ if (ObjCInterfaceDecl *Interface = Method->getClassInterface())
+ ObjCImplementation = Interface->getImplementation();
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ /// \brief Whether we should include code patterns in the completion
+ /// results.
+ bool includeCodePatterns() const {
+ return SemaRef.CodeCompleter &&
+ SemaRef.CodeCompleter->includeCodePatterns();
+ }
+
+ /// \brief Set the filter used for code-completion results.
+ void setFilter(LookupFilter Filter) {
+ this->Filter = Filter;
+ }
+
+ Result *data() { return Results.empty()? 0 : &Results.front(); }
+ unsigned size() const { return Results.size(); }
+ bool empty() const { return Results.empty(); }
+
+ /// \brief Specify the preferred type.
+ void setPreferredType(QualType T) {
+ PreferredType = SemaRef.Context.getCanonicalType(T);
+ }
+
+ /// \brief Set the cv-qualifiers on the object type, for us in filtering
+ /// calls to member functions.
+ ///
+ /// When there are qualifiers in this set, they will be used to filter
+ /// out member functions that aren't available (because there will be a
+ /// cv-qualifier mismatch) or prefer functions with an exact qualifier
+ /// match.
+ void setObjectTypeQualifiers(Qualifiers Quals) {
+ ObjectTypeQualifiers = Quals;
+ HasObjectTypeQualifiers = true;
+ }
+
+ /// \brief Set the preferred selector.
+ ///
+ /// When an Objective-C method declaration result is added, and that
+ /// method's selector matches this preferred selector, we give that method
+ /// a slight priority boost.
+ void setPreferredSelector(Selector Sel) {
+ PreferredSelector = Sel;
+ }
+
+ /// \brief Retrieve the code-completion context for which results are
+ /// being collected.
+ const CodeCompletionContext &getCompletionContext() const {
+ return CompletionContext;
+ }
+
+ /// \brief Specify whether nested-name-specifiers are allowed.
+ void allowNestedNameSpecifiers(bool Allow = true) {
+ AllowNestedNameSpecifiers = Allow;
+ }
+
+ /// \brief Return the semantic analysis object for which we are collecting
+ /// code completion results.
+ Sema &getSema() const { return SemaRef; }
+
+ /// \brief Retrieve the allocator used to allocate code completion strings.
+ CodeCompletionAllocator &getAllocator() const { return Allocator; }
+
+ CodeCompletionTUInfo &getCodeCompletionTUInfo() const { return CCTUInfo; }
+
+ /// \brief Determine whether the given declaration is at all interesting
+ /// as a code-completion result.
+ ///
+ /// \param ND the declaration that we are inspecting.
+ ///
+ /// \param AsNestedNameSpecifier will be set true if this declaration is
+ /// only interesting when it is a nested-name-specifier.
+ bool isInterestingDecl(NamedDecl *ND, bool &AsNestedNameSpecifier) const;
+
+ /// \brief Check whether the result is hidden by the Hiding declaration.
+ ///
+ /// \returns true if the result is hidden and cannot be found, false if
+ /// the hidden result could still be found. When false, \p R may be
+ /// modified to describe how the result can be found (e.g., via extra
+ /// qualification).
+ bool CheckHiddenResult(Result &R, DeclContext *CurContext,
+ NamedDecl *Hiding);
+
+ /// \brief Add a new result to this result set (if it isn't already in one
+ /// of the shadow maps), or replace an existing result (for, e.g., a
+ /// redeclaration).
+ ///
+ /// \param R the result to add (if it is unique).
+ ///
+ /// \param CurContext the context in which this result will be named.
+ void MaybeAddResult(Result R, DeclContext *CurContext = 0);
+
+ /// \brief Add a new result to this result set, where we already know
+ /// the hiding declation (if any).
+ ///
+ /// \param R the result to add (if it is unique).
+ ///
+ /// \param CurContext the context in which this result will be named.
+ ///
+ /// \param Hiding the declaration that hides the result.
+ ///
+ /// \param InBaseClass whether the result was found in a base
+ /// class of the searched context.
+ void AddResult(Result R, DeclContext *CurContext, NamedDecl *Hiding,
+ bool InBaseClass);
+
+ /// \brief Add a new non-declaration result to this result set.
+ void AddResult(Result R);
+
+ /// \brief Enter into a new scope.
+ void EnterNewScope();
+
+ /// \brief Exit from the current scope.
+ void ExitScope();
+
+ /// \brief Ignore this declaration, if it is seen again.
+ void Ignore(Decl *D) { AllDeclsFound.insert(D->getCanonicalDecl()); }
+
+ /// \name Name lookup predicates
+ ///
+ /// These predicates can be passed to the name lookup functions to filter the
+ /// results of name lookup. All of the predicates have the same type, so that
+ ///
+ //@{
+ bool IsOrdinaryName(NamedDecl *ND) const;
+ bool IsOrdinaryNonTypeName(NamedDecl *ND) const;
+ bool IsIntegralConstantValue(NamedDecl *ND) const;
+ bool IsOrdinaryNonValueName(NamedDecl *ND) const;
+ bool IsNestedNameSpecifier(NamedDecl *ND) const;
+ bool IsEnum(NamedDecl *ND) const;
+ bool IsClassOrStruct(NamedDecl *ND) const;
+ bool IsUnion(NamedDecl *ND) const;
+ bool IsNamespace(NamedDecl *ND) const;
+ bool IsNamespaceOrAlias(NamedDecl *ND) const;
+ bool IsType(NamedDecl *ND) const;
+ bool IsMember(NamedDecl *ND) const;
+ bool IsObjCIvar(NamedDecl *ND) const;
+ bool IsObjCMessageReceiver(NamedDecl *ND) const;
+ bool IsObjCMessageReceiverOrLambdaCapture(NamedDecl *ND) const;
+ bool IsObjCCollection(NamedDecl *ND) const;
+ bool IsImpossibleToSatisfy(NamedDecl *ND) const;
+ //@}
+ };
+}
+
+class ResultBuilder::ShadowMapEntry::iterator {
+ llvm::PointerUnion<NamedDecl*, const DeclIndexPair*> DeclOrIterator;
+ unsigned SingleDeclIndex;
+
+public:
+ typedef DeclIndexPair value_type;
+ typedef value_type reference;
+ typedef std::ptrdiff_t difference_type;
+ typedef std::input_iterator_tag iterator_category;
+
+ class pointer {
+ DeclIndexPair Value;
+
+ public:
+ pointer(const DeclIndexPair &Value) : Value(Value) { }
+
+ const DeclIndexPair *operator->() const {
+ return &Value;
+ }
+ };
+
+ iterator() : DeclOrIterator((NamedDecl *)0), SingleDeclIndex(0) { }
+
+ iterator(NamedDecl *SingleDecl, unsigned Index)
+ : DeclOrIterator(SingleDecl), SingleDeclIndex(Index) { }
+
+ iterator(const DeclIndexPair *Iterator)
+ : DeclOrIterator(Iterator), SingleDeclIndex(0) { }
+
+ iterator &operator++() {
+ if (DeclOrIterator.is<NamedDecl *>()) {
+ DeclOrIterator = (NamedDecl *)0;
+ SingleDeclIndex = 0;
+ return *this;
+ }
+
+ const DeclIndexPair *I = DeclOrIterator.get<const DeclIndexPair*>();
+ ++I;
+ DeclOrIterator = I;
+ return *this;
+ }
+
+ /*iterator operator++(int) {
+ iterator tmp(*this);
+ ++(*this);
+ return tmp;
+ }*/
+
+ reference operator*() const {
+ if (NamedDecl *ND = DeclOrIterator.dyn_cast<NamedDecl *>())
+ return reference(ND, SingleDeclIndex);
+
+ return *DeclOrIterator.get<const DeclIndexPair*>();
+ }
+
+ pointer operator->() const {
+ return pointer(**this);
+ }
+
+ friend bool operator==(const iterator &X, const iterator &Y) {
+ return X.DeclOrIterator.getOpaqueValue()
+ == Y.DeclOrIterator.getOpaqueValue() &&
+ X.SingleDeclIndex == Y.SingleDeclIndex;
+ }
+
+ friend bool operator!=(const iterator &X, const iterator &Y) {
+ return !(X == Y);
+ }
+};
+
+ResultBuilder::ShadowMapEntry::iterator
+ResultBuilder::ShadowMapEntry::begin() const {
+ if (DeclOrVector.isNull())
+ return iterator();
+
+ if (NamedDecl *ND = DeclOrVector.dyn_cast<NamedDecl *>())
+ return iterator(ND, SingleDeclIndex);
+
+ return iterator(DeclOrVector.get<DeclIndexPairVector *>()->begin());
+}
+
+ResultBuilder::ShadowMapEntry::iterator
+ResultBuilder::ShadowMapEntry::end() const {
+ if (DeclOrVector.is<NamedDecl *>() || DeclOrVector.isNull())
+ return iterator();
+
+ return iterator(DeclOrVector.get<DeclIndexPairVector *>()->end());
+}
+
+/// \brief Compute the qualification required to get from the current context
+/// (\p CurContext) to the target context (\p TargetContext).
+///
+/// \param Context the AST context in which the qualification will be used.
+///
+/// \param CurContext the context where an entity is being named, which is
+/// typically based on the current scope.
+///
+/// \param TargetContext the context in which the named entity actually
+/// resides.
+///
+/// \returns a nested name specifier that refers into the target context, or
+/// NULL if no qualification is needed.
+static NestedNameSpecifier *
+getRequiredQualification(ASTContext &Context,
+ DeclContext *CurContext,
+ DeclContext *TargetContext) {
+ SmallVector<DeclContext *, 4> TargetParents;
+
+ for (DeclContext *CommonAncestor = TargetContext;
+ CommonAncestor && !CommonAncestor->Encloses(CurContext);
+ CommonAncestor = CommonAncestor->getLookupParent()) {
+ if (CommonAncestor->isTransparentContext() ||
+ CommonAncestor->isFunctionOrMethod())
+ continue;
+
+ TargetParents.push_back(CommonAncestor);
+ }
+
+ NestedNameSpecifier *Result = 0;
+ while (!TargetParents.empty()) {
+ DeclContext *Parent = TargetParents.back();
+ TargetParents.pop_back();
+
+ if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Parent)) {
+ if (!Namespace->getIdentifier())
+ continue;
+
+ Result = NestedNameSpecifier::Create(Context, Result, Namespace);
+ }
+ else if (TagDecl *TD = dyn_cast<TagDecl>(Parent))
+ Result = NestedNameSpecifier::Create(Context, Result,
+ false,
+ Context.getTypeDeclType(TD).getTypePtr());
+ }
+ return Result;
+}
+
+bool ResultBuilder::isInterestingDecl(NamedDecl *ND,
+ bool &AsNestedNameSpecifier) const {
+ AsNestedNameSpecifier = false;
+
+ ND = ND->getUnderlyingDecl();
+ unsigned IDNS = ND->getIdentifierNamespace();
+
+ // Skip unnamed entities.
+ if (!ND->getDeclName())
+ return false;
+
+ // Friend declarations and declarations introduced due to friends are never
+ // added as results.
+ if (IDNS & (Decl::IDNS_OrdinaryFriend | Decl::IDNS_TagFriend))
+ return false;
+
+ // Class template (partial) specializations are never added as results.
+ if (isa<ClassTemplateSpecializationDecl>(ND) ||
+ isa<ClassTemplatePartialSpecializationDecl>(ND))
+ return false;
+
+ // Using declarations themselves are never added as results.
+ if (isa<UsingDecl>(ND))
+ return false;
+
+ // Some declarations have reserved names that we don't want to ever show.
+ if (const IdentifierInfo *Id = ND->getIdentifier()) {
+ // __va_list_tag is a freak of nature. Find it and skip it.
+ if (Id->isStr("__va_list_tag") || Id->isStr("__builtin_va_list"))
+ return false;
+
+ // Filter out names reserved for the implementation (C99 7.1.3,
+ // C++ [lib.global.names]) if they come from a system header.
+ //
+ // FIXME: Add predicate for this.
+ if (Id->getLength() >= 2) {
+ const char *Name = Id->getNameStart();
+ if (Name[0] == '_' &&
+ (Name[1] == '_' || (Name[1] >= 'A' && Name[1] <= 'Z')) &&
+ (ND->getLocation().isInvalid() ||
+ SemaRef.SourceMgr.isInSystemHeader(
+ SemaRef.SourceMgr.getSpellingLoc(ND->getLocation()))))
+ return false;
+ }
+ }
+
+ if (Filter == &ResultBuilder::IsNestedNameSpecifier ||
+ ((isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) &&
+ Filter != &ResultBuilder::IsNamespace &&
+ Filter != &ResultBuilder::IsNamespaceOrAlias &&
+ Filter != 0))
+ AsNestedNameSpecifier = true;
+
+ // Filter out any unwanted results.
+ if (Filter && !(this->*Filter)(ND)) {
+ // Check whether it is interesting as a nested-name-specifier.
+ if (AllowNestedNameSpecifiers && SemaRef.getLangOpts().CPlusPlus &&
+ IsNestedNameSpecifier(ND) &&
+ (Filter != &ResultBuilder::IsMember ||
+ (isa<CXXRecordDecl>(ND) &&
+ cast<CXXRecordDecl>(ND)->isInjectedClassName()))) {
+ AsNestedNameSpecifier = true;
+ return true;
+ }
+
+ return false;
+ }
+ // ... then it must be interesting!
+ return true;
+}
+
+bool ResultBuilder::CheckHiddenResult(Result &R, DeclContext *CurContext,
+ NamedDecl *Hiding) {
+ // In C, there is no way to refer to a hidden name.
+ // FIXME: This isn't true; we can find a tag name hidden by an ordinary
+ // name if we introduce the tag type.
+ if (!SemaRef.getLangOpts().CPlusPlus)
+ return true;
+
+ DeclContext *HiddenCtx = R.Declaration->getDeclContext()->getRedeclContext();
+
+ // There is no way to qualify a name declared in a function or method.
+ if (HiddenCtx->isFunctionOrMethod())
+ return true;
+
+ if (HiddenCtx == Hiding->getDeclContext()->getRedeclContext())
+ return true;
+
+ // We can refer to the result with the appropriate qualification. Do it.
+ R.Hidden = true;
+ R.QualifierIsInformative = false;
+
+ if (!R.Qualifier)
+ R.Qualifier = getRequiredQualification(SemaRef.Context,
+ CurContext,
+ R.Declaration->getDeclContext());
+ return false;
+}
+
+/// \brief A simplified classification of types used to determine whether two
+/// types are "similar enough" when adjusting priorities.
+SimplifiedTypeClass clang::getSimplifiedTypeClass(CanQualType T) {
+ switch (T->getTypeClass()) {
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ case BuiltinType::Void:
+ return STC_Void;
+
+ case BuiltinType::NullPtr:
+ return STC_Pointer;
+
+ case BuiltinType::Overload:
+ case BuiltinType::Dependent:
+ return STC_Other;
+
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return STC_ObjectiveC;
+
+ default:
+ return STC_Arithmetic;
+ }
+
+ case Type::Complex:
+ return STC_Arithmetic;
+
+ case Type::Pointer:
+ return STC_Pointer;
+
+ case Type::BlockPointer:
+ return STC_Block;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ return getSimplifiedTypeClass(T->getAs<ReferenceType>()->getPointeeType());
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ return STC_Array;
+
+ case Type::DependentSizedExtVector:
+ case Type::Vector:
+ case Type::ExtVector:
+ return STC_Arithmetic;
+
+ case Type::FunctionProto:
+ case Type::FunctionNoProto:
+ return STC_Function;
+
+ case Type::Record:
+ return STC_Record;
+
+ case Type::Enum:
+ return STC_Arithmetic;
+
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ return STC_ObjectiveC;
+
+ default:
+ return STC_Other;
+ }
+}
+
+/// \brief Get the type that a given expression will have if this declaration
+/// is used as an expression in its "typical" code-completion form.
+QualType clang::getDeclUsageType(ASTContext &C, NamedDecl *ND) {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(ND))
+ return C.getTypeDeclType(Type);
+ if (ObjCInterfaceDecl *Iface = dyn_cast<ObjCInterfaceDecl>(ND))
+ return C.getObjCInterfaceType(Iface);
+
+ QualType T;
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(ND))
+ T = Function->getCallResultType();
+ else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
+ T = Method->getSendResultType();
+ else if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND))
+ T = FunTmpl->getTemplatedDecl()->getCallResultType();
+ else if (EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
+ T = C.getTypeDeclType(cast<EnumDecl>(Enumerator->getDeclContext()));
+ else if (ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
+ T = Property->getType();
+ else if (ValueDecl *Value = dyn_cast<ValueDecl>(ND))
+ T = Value->getType();
+ else
+ return QualType();
+
+ // Dig through references, function pointers, and block pointers to
+ // get down to the likely type of an expression when the entity is
+ // used.
+ do {
+ if (const ReferenceType *Ref = T->getAs<ReferenceType>()) {
+ T = Ref->getPointeeType();
+ continue;
+ }
+
+ if (const PointerType *Pointer = T->getAs<PointerType>()) {
+ if (Pointer->getPointeeType()->isFunctionType()) {
+ T = Pointer->getPointeeType();
+ continue;
+ }
+
+ break;
+ }
+
+ if (const BlockPointerType *Block = T->getAs<BlockPointerType>()) {
+ T = Block->getPointeeType();
+ continue;
+ }
+
+ if (const FunctionType *Function = T->getAs<FunctionType>()) {
+ T = Function->getResultType();
+ continue;
+ }
+
+ break;
+ } while (true);
+
+ return T;
+}
+
+void ResultBuilder::AdjustResultPriorityForDecl(Result &R) {
+ // If this is an Objective-C method declaration whose selector matches our
+ // preferred selector, give it a priority boost.
+ if (!PreferredSelector.isNull())
+ if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(R.Declaration))
+ if (PreferredSelector == Method->getSelector())
+ R.Priority += CCD_SelectorMatch;
+
+ // If we have a preferred type, adjust the priority for results with exactly-
+ // matching or nearly-matching types.
+ if (!PreferredType.isNull()) {
+ QualType T = getDeclUsageType(SemaRef.Context, R.Declaration);
+ if (!T.isNull()) {
+ CanQualType TC = SemaRef.Context.getCanonicalType(T);
+ // Check for exactly-matching types (modulo qualifiers).
+ if (SemaRef.Context.hasSameUnqualifiedType(PreferredType, TC))
+ R.Priority /= CCF_ExactTypeMatch;
+ // Check for nearly-matching types, based on classification of each.
+ else if ((getSimplifiedTypeClass(PreferredType)
+ == getSimplifiedTypeClass(TC)) &&
+ !(PreferredType->isEnumeralType() && TC->isEnumeralType()))
+ R.Priority /= CCF_SimilarTypeMatch;
+ }
+ }
+}
+
+void ResultBuilder::MaybeAddConstructorResults(Result R) {
+ if (!SemaRef.getLangOpts().CPlusPlus || !R.Declaration ||
+ !CompletionContext.wantConstructorResults())
+ return;
+
+ ASTContext &Context = SemaRef.Context;
+ NamedDecl *D = R.Declaration;
+ CXXRecordDecl *Record = 0;
+ if (ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(D))
+ Record = ClassTemplate->getTemplatedDecl();
+ else if ((Record = dyn_cast<CXXRecordDecl>(D))) {
+ // Skip specializations and partial specializations.
+ if (isa<ClassTemplateSpecializationDecl>(Record))
+ return;
+ } else {
+ // There are no constructors here.
+ return;
+ }
+
+ Record = Record->getDefinition();
+ if (!Record)
+ return;
+
+
+ QualType RecordTy = Context.getTypeDeclType(Record);
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(RecordTy));
+ for (DeclContext::lookup_result Ctors = Record->lookup(ConstructorName);
+ Ctors.first != Ctors.second; ++Ctors.first) {
+ R.Declaration = *Ctors.first;
+ R.CursorKind = getCursorKindForDecl(R.Declaration);
+ Results.push_back(R);
+ }
+}
+
+void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
+ assert(!ShadowMaps.empty() && "Must enter into a results scope");
+
+ if (R.Kind != Result::RK_Declaration) {
+ // For non-declaration results, just add the result.
+ Results.push_back(R);
+ return;
+ }
+
+ // Look through using declarations.
+ if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
+ MaybeAddResult(Result(Using->getTargetDecl(), R.Qualifier), CurContext);
+ return;
+ }
+
+ Decl *CanonDecl = R.Declaration->getCanonicalDecl();
+ unsigned IDNS = CanonDecl->getIdentifierNamespace();
+
+ bool AsNestedNameSpecifier = false;
+ if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
+ return;
+
+ // C++ constructors are never found by name lookup.
+ if (isa<CXXConstructorDecl>(R.Declaration))
+ return;
+
+ ShadowMap &SMap = ShadowMaps.back();
+ ShadowMapEntry::iterator I, IEnd;
+ ShadowMap::iterator NamePos = SMap.find(R.Declaration->getDeclName());
+ if (NamePos != SMap.end()) {
+ I = NamePos->second.begin();
+ IEnd = NamePos->second.end();
+ }
+
+ for (; I != IEnd; ++I) {
+ NamedDecl *ND = I->first;
+ unsigned Index = I->second;
+ if (ND->getCanonicalDecl() == CanonDecl) {
+ // This is a redeclaration. Always pick the newer declaration.
+ Results[Index].Declaration = R.Declaration;
+
+ // We're done.
+ return;
+ }
+ }
+
+ // This is a new declaration in this scope. However, check whether this
+ // declaration name is hidden by a similarly-named declaration in an outer
+ // scope.
+ std::list<ShadowMap>::iterator SM, SMEnd = ShadowMaps.end();
+ --SMEnd;
+ for (SM = ShadowMaps.begin(); SM != SMEnd; ++SM) {
+ ShadowMapEntry::iterator I, IEnd;
+ ShadowMap::iterator NamePos = SM->find(R.Declaration->getDeclName());
+ if (NamePos != SM->end()) {
+ I = NamePos->second.begin();
+ IEnd = NamePos->second.end();
+ }
+ for (; I != IEnd; ++I) {
+ // A tag declaration does not hide a non-tag declaration.
+ if (I->first->hasTagIdentifierNamespace() &&
+ (IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary |
+ Decl::IDNS_ObjCProtocol)))
+ continue;
+
+ // Protocols are in distinct namespaces from everything else.
+ if (((I->first->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol)
+ || (IDNS & Decl::IDNS_ObjCProtocol)) &&
+ I->first->getIdentifierNamespace() != IDNS)
+ continue;
+
+ // The newly-added result is hidden by an entry in the shadow map.
+ if (CheckHiddenResult(R, CurContext, I->first))
+ return;
+
+ break;
+ }
+ }
+
+ // Make sure that any given declaration only shows up in the result set once.
+ if (!AllDeclsFound.insert(CanonDecl))
+ return;
+
+ // If the filter is for nested-name-specifiers, then this result starts a
+ // nested-name-specifier.
+ if (AsNestedNameSpecifier) {
+ R.StartsNestedNameSpecifier = true;
+ R.Priority = CCP_NestedNameSpecifier;
+ } else
+ AdjustResultPriorityForDecl(R);
+
+ // If this result is supposed to have an informative qualifier, add one.
+ if (R.QualifierIsInformative && !R.Qualifier &&
+ !R.StartsNestedNameSpecifier) {
+ DeclContext *Ctx = R.Declaration->getDeclContext();
+ if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, 0, Namespace);
+ else if (TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, 0, false,
+ SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ else
+ R.QualifierIsInformative = false;
+ }
+
+ // Insert this result into the set of results and into the current shadow
+ // map.
+ SMap[R.Declaration->getDeclName()].Add(R.Declaration, Results.size());
+ Results.push_back(R);
+
+ if (!AsNestedNameSpecifier)
+ MaybeAddConstructorResults(R);
+}
+
+void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
+ NamedDecl *Hiding, bool InBaseClass = false) {
+ if (R.Kind != Result::RK_Declaration) {
+ // For non-declaration results, just add the result.
+ Results.push_back(R);
+ return;
+ }
+
+ // Look through using declarations.
+ if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
+ AddResult(Result(Using->getTargetDecl(), R.Qualifier), CurContext, Hiding);
+ return;
+ }
+
+ bool AsNestedNameSpecifier = false;
+ if (!isInterestingDecl(R.Declaration, AsNestedNameSpecifier))
+ return;
+
+ // C++ constructors are never found by name lookup.
+ if (isa<CXXConstructorDecl>(R.Declaration))
+ return;
+
+ if (Hiding && CheckHiddenResult(R, CurContext, Hiding))
+ return;
+
+ // Make sure that any given declaration only shows up in the result set once.
+ if (!AllDeclsFound.insert(R.Declaration->getCanonicalDecl()))
+ return;
+
+ // If the filter is for nested-name-specifiers, then this result starts a
+ // nested-name-specifier.
+ if (AsNestedNameSpecifier) {
+ R.StartsNestedNameSpecifier = true;
+ R.Priority = CCP_NestedNameSpecifier;
+ }
+ else if (Filter == &ResultBuilder::IsMember && !R.Qualifier && InBaseClass &&
+ isa<CXXRecordDecl>(R.Declaration->getDeclContext()
+ ->getRedeclContext()))
+ R.QualifierIsInformative = true;
+
+ // If this result is supposed to have an informative qualifier, add one.
+ if (R.QualifierIsInformative && !R.Qualifier &&
+ !R.StartsNestedNameSpecifier) {
+ DeclContext *Ctx = R.Declaration->getDeclContext();
+ if (NamespaceDecl *Namespace = dyn_cast<NamespaceDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, 0, Namespace);
+ else if (TagDecl *Tag = dyn_cast<TagDecl>(Ctx))
+ R.Qualifier = NestedNameSpecifier::Create(SemaRef.Context, 0, false,
+ SemaRef.Context.getTypeDeclType(Tag).getTypePtr());
+ else
+ R.QualifierIsInformative = false;
+ }
+
+ // Adjust the priority if this result comes from a base class.
+ if (InBaseClass)
+ R.Priority += CCD_InBaseClass;
+
+ AdjustResultPriorityForDecl(R);
+
+ if (HasObjectTypeQualifiers)
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(R.Declaration))
+ if (Method->isInstance()) {
+ Qualifiers MethodQuals
+ = Qualifiers::fromCVRMask(Method->getTypeQualifiers());
+ if (ObjectTypeQualifiers == MethodQuals)
+ R.Priority += CCD_ObjectQualifierMatch;
+ else if (ObjectTypeQualifiers - MethodQuals) {
+ // The method cannot be invoked, because doing so would drop
+ // qualifiers.
+ return;
+ }
+ }
+
+ // Insert this result into the set of results.
+ Results.push_back(R);
+
+ if (!AsNestedNameSpecifier)
+ MaybeAddConstructorResults(R);
+}
+
+void ResultBuilder::AddResult(Result R) {
+ assert(R.Kind != Result::RK_Declaration &&
+ "Declaration results need more context");
+ Results.push_back(R);
+}
+
+/// \brief Enter into a new scope.
+void ResultBuilder::EnterNewScope() {
+ ShadowMaps.push_back(ShadowMap());
+}
+
+/// \brief Exit from the current scope.
+void ResultBuilder::ExitScope() {
+ for (ShadowMap::iterator E = ShadowMaps.back().begin(),
+ EEnd = ShadowMaps.back().end();
+ E != EEnd;
+ ++E)
+ E->second.Destroy();
+
+ ShadowMaps.pop_back();
+}
+
+/// \brief Determines whether this given declaration will be found by
+/// ordinary name lookup.
+bool ResultBuilder::IsOrdinaryName(NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ if (SemaRef.getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
+ else if (SemaRef.getLangOpts().ObjC1) {
+ if (isa<ObjCIvarDecl>(ND))
+ return true;
+ }
+
+ return ND->getIdentifierNamespace() & IDNS;
+}
+
+/// \brief Determines whether this given declaration will be found by
+/// ordinary name lookup but is not a type name.
+bool ResultBuilder::IsOrdinaryNonTypeName(NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND))
+ return false;
+
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ if (SemaRef.getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace | Decl::IDNS_Member;
+ else if (SemaRef.getLangOpts().ObjC1) {
+ if (isa<ObjCIvarDecl>(ND))
+ return true;
+ }
+
+ return ND->getIdentifierNamespace() & IDNS;
+}
+
+bool ResultBuilder::IsIntegralConstantValue(NamedDecl *ND) const {
+ if (!IsOrdinaryNonTypeName(ND))
+ return 0;
+
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(ND->getUnderlyingDecl()))
+ if (VD->getType()->isIntegralOrEnumerationType())
+ return true;
+
+ return false;
+}
+
+/// \brief Determines whether this given declaration will be found by
+/// ordinary name lookup.
+bool ResultBuilder::IsOrdinaryNonValueName(NamedDecl *ND) const {
+ ND = cast<NamedDecl>(ND->getUnderlyingDecl());
+
+ unsigned IDNS = Decl::IDNS_Ordinary;
+ if (SemaRef.getLangOpts().CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Namespace;
+
+ return (ND->getIdentifierNamespace() & IDNS) &&
+ !isa<ValueDecl>(ND) && !isa<FunctionTemplateDecl>(ND) &&
+ !isa<ObjCPropertyDecl>(ND);
+}
+
+/// \brief Determines whether the given declaration is suitable as the
+/// start of a C++ nested-name-specifier, e.g., a class or namespace.
+bool ResultBuilder::IsNestedNameSpecifier(NamedDecl *ND) const {
+ // Allow us to find class templates, too.
+ if (ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ ND = ClassTemplate->getTemplatedDecl();
+
+ return SemaRef.isAcceptableNestedNameSpecifier(ND);
+}
+
+/// \brief Determines whether the given declaration is an enumeration.
+bool ResultBuilder::IsEnum(NamedDecl *ND) const {
+ return isa<EnumDecl>(ND);
+}
+
+/// \brief Determines whether the given declaration is a class or struct.
+bool ResultBuilder::IsClassOrStruct(NamedDecl *ND) const {
+ // Allow us to find class templates, too.
+ if (ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ ND = ClassTemplate->getTemplatedDecl();
+
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(ND))
+ return RD->getTagKind() == TTK_Class ||
+ RD->getTagKind() == TTK_Struct;
+
+ return false;
+}
+
+/// \brief Determines whether the given declaration is a union.
+bool ResultBuilder::IsUnion(NamedDecl *ND) const {
+ // Allow us to find class templates, too.
+ if (ClassTemplateDecl *ClassTemplate = dyn_cast<ClassTemplateDecl>(ND))
+ ND = ClassTemplate->getTemplatedDecl();
+
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(ND))
+ return RD->getTagKind() == TTK_Union;
+
+ return false;
+}
+
+/// \brief Determines whether the given declaration is a namespace.
+bool ResultBuilder::IsNamespace(NamedDecl *ND) const {
+ return isa<NamespaceDecl>(ND);
+}
+
+/// \brief Determines whether the given declaration is a namespace or
+/// namespace alias.
+bool ResultBuilder::IsNamespaceOrAlias(NamedDecl *ND) const {
+ return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
+}
+
+/// \brief Determines whether the given declaration is a type.
+bool ResultBuilder::IsType(NamedDecl *ND) const {
+ if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(ND))
+ ND = Using->getTargetDecl();
+
+ return isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND);
+}
+
+/// \brief Determines which members of a class should be visible via
+/// "." or "->". Only value declarations, nested name specifiers, and
+/// using declarations thereof should show up.
+bool ResultBuilder::IsMember(NamedDecl *ND) const {
+ if (UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(ND))
+ ND = Using->getTargetDecl();
+
+ return isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND) ||
+ isa<ObjCPropertyDecl>(ND);
+}
+
+static bool isObjCReceiverType(ASTContext &C, QualType T) {
+ T = C.getCanonicalType(T);
+ switch (T->getTypeClass()) {
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ return true;
+
+ case Type::Builtin:
+ switch (cast<BuiltinType>(T)->getKind()) {
+ case BuiltinType::ObjCId:
+ case BuiltinType::ObjCClass:
+ case BuiltinType::ObjCSel:
+ return true;
+
+ default:
+ break;
+ }
+ return false;
+
+ default:
+ break;
+ }
+
+ if (!C.getLangOpts().CPlusPlus)
+ return false;
+
+ // FIXME: We could perform more analysis here to determine whether a
+ // particular class type has any conversions to Objective-C types. For now,
+ // just accept all class types.
+ return T->isDependentType() || T->isRecordType();
+}
+
+bool ResultBuilder::IsObjCMessageReceiver(NamedDecl *ND) const {
+ QualType T = getDeclUsageType(SemaRef.Context, ND);
+ if (T.isNull())
+ return false;
+
+ T = SemaRef.Context.getBaseElementType(T);
+ return isObjCReceiverType(SemaRef.Context, T);
+}
+
+bool ResultBuilder::IsObjCMessageReceiverOrLambdaCapture(NamedDecl *ND) const {
+ if (IsObjCMessageReceiver(ND))
+ return true;
+
+ VarDecl *Var = dyn_cast<VarDecl>(ND);
+ if (!Var)
+ return false;
+
+ return Var->hasLocalStorage() && !Var->hasAttr<BlocksAttr>();
+}
+
+bool ResultBuilder::IsObjCCollection(NamedDecl *ND) const {
+ if ((SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryName(ND)) ||
+ (!SemaRef.getLangOpts().CPlusPlus && !IsOrdinaryNonTypeName(ND)))
+ return false;
+
+ QualType T = getDeclUsageType(SemaRef.Context, ND);
+ if (T.isNull())
+ return false;
+
+ T = SemaRef.Context.getBaseElementType(T);
+ return T->isObjCObjectType() || T->isObjCObjectPointerType() ||
+ T->isObjCIdType() ||
+ (SemaRef.getLangOpts().CPlusPlus && T->isRecordType());
+}
+
+bool ResultBuilder::IsImpossibleToSatisfy(NamedDecl *ND) const {
+ return false;
+}
+
+/// \rief Determines whether the given declaration is an Objective-C
+/// instance variable.
+bool ResultBuilder::IsObjCIvar(NamedDecl *ND) const {
+ return isa<ObjCIvarDecl>(ND);
+}
+
+namespace {
+ /// \brief Visible declaration consumer that adds a code-completion result
+ /// for each visible declaration.
+ class CodeCompletionDeclConsumer : public VisibleDeclConsumer {
+ ResultBuilder &Results;
+ DeclContext *CurContext;
+
+ public:
+ CodeCompletionDeclConsumer(ResultBuilder &Results, DeclContext *CurContext)
+ : Results(Results), CurContext(CurContext) { }
+
+ virtual void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
+ bool InBaseClass) {
+ bool Accessible = true;
+ if (Ctx)
+ Accessible = Results.getSema().IsSimplyAccessible(ND, Ctx);
+
+ ResultBuilder::Result Result(ND, 0, false, Accessible);
+ Results.AddResult(Result, CurContext, Hiding, InBaseClass);
+ }
+ };
+}
+
+/// \brief Add type specifiers for the current language as keyword results.
+static void AddTypeSpecifierResults(const LangOptions &LangOpts,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+ Results.AddResult(Result("short", CCP_Type));
+ Results.AddResult(Result("long", CCP_Type));
+ Results.AddResult(Result("signed", CCP_Type));
+ Results.AddResult(Result("unsigned", CCP_Type));
+ Results.AddResult(Result("void", CCP_Type));
+ Results.AddResult(Result("char", CCP_Type));
+ Results.AddResult(Result("int", CCP_Type));
+ Results.AddResult(Result("float", CCP_Type));
+ Results.AddResult(Result("double", CCP_Type));
+ Results.AddResult(Result("enum", CCP_Type));
+ Results.AddResult(Result("struct", CCP_Type));
+ Results.AddResult(Result("union", CCP_Type));
+ Results.AddResult(Result("const", CCP_Type));
+ Results.AddResult(Result("volatile", CCP_Type));
+
+ if (LangOpts.C99) {
+ // C99-specific
+ Results.AddResult(Result("_Complex", CCP_Type));
+ Results.AddResult(Result("_Imaginary", CCP_Type));
+ Results.AddResult(Result("_Bool", CCP_Type));
+ Results.AddResult(Result("restrict", CCP_Type));
+ }
+
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ if (LangOpts.CPlusPlus) {
+ // C++-specific
+ Results.AddResult(Result("bool", CCP_Type +
+ (LangOpts.ObjC1? CCD_bool_in_ObjC : 0)));
+ Results.AddResult(Result("class", CCP_Type));
+ Results.AddResult(Result("wchar_t", CCP_Type));
+
+ // typename qualified-id
+ Builder.AddTypedTextChunk("typename");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (LangOpts.CPlusPlus0x) {
+ Results.AddResult(Result("auto", CCP_Type));
+ Results.AddResult(Result("char16_t", CCP_Type));
+ Results.AddResult(Result("char32_t", CCP_Type));
+
+ Builder.AddTypedTextChunk("decltype");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+
+ // GNU extensions
+ if (LangOpts.GNUMode) {
+ // FIXME: Enable when we actually support decimal floating point.
+ // Results.AddResult(Result("_Decimal32"));
+ // Results.AddResult(Result("_Decimal64"));
+ // Results.AddResult(Result("_Decimal128"));
+
+ Builder.AddTypedTextChunk("typeof");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ Builder.AddTypedTextChunk("typeof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+}
+
+static void AddStorageSpecifiers(Sema::ParserCompletionContext CCC,
+ const LangOptions &LangOpts,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+ // Note: we don't suggest either "auto" or "register", because both
+ // are pointless as storage specifiers. Elsewhere, we suggest "auto"
+ // in C++0x as a type specifier.
+ Results.AddResult(Result("extern"));
+ Results.AddResult(Result("static"));
+}
+
+static void AddFunctionSpecifiers(Sema::ParserCompletionContext CCC,
+ const LangOptions &LangOpts,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+ switch (CCC) {
+ case Sema::PCC_Class:
+ case Sema::PCC_MemberTemplate:
+ if (LangOpts.CPlusPlus) {
+ Results.AddResult(Result("explicit"));
+ Results.AddResult(Result("friend"));
+ Results.AddResult(Result("mutable"));
+ Results.AddResult(Result("virtual"));
+ }
+ // Fall through
+
+ case Sema::PCC_ObjCInterface:
+ case Sema::PCC_ObjCImplementation:
+ case Sema::PCC_Namespace:
+ case Sema::PCC_Template:
+ if (LangOpts.CPlusPlus || LangOpts.C99)
+ Results.AddResult(Result("inline"));
+ break;
+
+ case Sema::PCC_ObjCInstanceVariableList:
+ case Sema::PCC_Expression:
+ case Sema::PCC_Statement:
+ case Sema::PCC_ForInit:
+ case Sema::PCC_Condition:
+ case Sema::PCC_RecoveryInFunction:
+ case Sema::PCC_Type:
+ case Sema::PCC_ParenthesizedExpression:
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ break;
+ }
+}
+
+static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt);
+static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt);
+static void AddObjCVisibilityResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt);
+static void AddObjCImplementationResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt);
+static void AddObjCInterfaceResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt);
+static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt);
+
+static void AddTypedefResult(ResultBuilder &Results) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk("typedef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+}
+
+static bool WantTypesInContext(Sema::ParserCompletionContext CCC,
+ const LangOptions &LangOpts) {
+ switch (CCC) {
+ case Sema::PCC_Namespace:
+ case Sema::PCC_Class:
+ case Sema::PCC_ObjCInstanceVariableList:
+ case Sema::PCC_Template:
+ case Sema::PCC_MemberTemplate:
+ case Sema::PCC_Statement:
+ case Sema::PCC_RecoveryInFunction:
+ case Sema::PCC_Type:
+ case Sema::PCC_ParenthesizedExpression:
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ return true;
+
+ case Sema::PCC_Expression:
+ case Sema::PCC_Condition:
+ return LangOpts.CPlusPlus;
+
+ case Sema::PCC_ObjCInterface:
+ case Sema::PCC_ObjCImplementation:
+ return false;
+
+ case Sema::PCC_ForInit:
+ return LangOpts.CPlusPlus || LangOpts.ObjC1 || LangOpts.C99;
+ }
+
+ llvm_unreachable("Invalid ParserCompletionContext!");
+}
+
+static PrintingPolicy getCompletionPrintingPolicy(const ASTContext &Context,
+ const Preprocessor &PP) {
+ PrintingPolicy Policy = Sema::getPrintingPolicy(Context, PP);
+ Policy.AnonymousTagLocations = false;
+ Policy.SuppressStrongLifetime = true;
+ Policy.SuppressUnwrittenScope = true;
+ return Policy;
+}
+
+/// \brief Retrieve a printing policy suitable for code completion.
+static PrintingPolicy getCompletionPrintingPolicy(Sema &S) {
+ return getCompletionPrintingPolicy(S.Context, S.PP);
+}
+
+/// \brief Retrieve the string representation of the given type as a string
+/// that has the appropriate lifetime for code completion.
+///
+/// This routine provides a fast path where we provide constant strings for
+/// common type names.
+static const char *GetCompletionTypeString(QualType T,
+ ASTContext &Context,
+ const PrintingPolicy &Policy,
+ CodeCompletionAllocator &Allocator) {
+ if (!T.getLocalQualifiers()) {
+ // Built-in type names are constant strings.
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T))
+ return BT->getName(Policy);
+
+ // Anonymous tag types are constant strings.
+ if (const TagType *TagT = dyn_cast<TagType>(T))
+ if (TagDecl *Tag = TagT->getDecl())
+ if (!Tag->getIdentifier() && !Tag->getTypedefNameForAnonDecl()) {
+ switch (Tag->getTagKind()) {
+ case TTK_Struct: return "struct <anonymous>";
+ case TTK_Class: return "class <anonymous>";
+ case TTK_Union: return "union <anonymous>";
+ case TTK_Enum: return "enum <anonymous>";
+ }
+ }
+ }
+
+ // Slow path: format the type as a string.
+ std::string Result;
+ T.getAsStringInternal(Result, Policy);
+ return Allocator.CopyString(Result);
+}
+
+/// \brief Add a completion for "this", if we're in a member function.
+static void addThisCompletion(Sema &S, ResultBuilder &Results) {
+ QualType ThisTy = S.getCurrentThisType();
+ if (ThisTy.isNull())
+ return;
+
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ PrintingPolicy Policy = getCompletionPrintingPolicy(S);
+ Builder.AddResultTypeChunk(GetCompletionTypeString(ThisTy,
+ S.Context,
+ Policy,
+ Allocator));
+ Builder.AddTypedTextChunk("this");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+}
+
+/// \brief Add language constructs that show up for "ordinary" names.
+static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC,
+ Scope *S,
+ Sema &SemaRef,
+ ResultBuilder &Results) {
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ PrintingPolicy Policy = getCompletionPrintingPolicy(SemaRef);
+
+ typedef CodeCompletionResult Result;
+ switch (CCC) {
+ case Sema::PCC_Namespace:
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ if (Results.includeCodePatterns()) {
+ // namespace <identifier> { declarations }
+ Builder.AddTypedTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("declarations");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // namespace identifier = identifier ;
+ Builder.AddTypedTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Builder.AddChunk(CodeCompletionString::CK_Equal);
+ Builder.AddPlaceholderChunk("namespace");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // Using directives
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // asm(string-literal)
+ Builder.AddTypedTextChunk("asm");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("string-literal");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (Results.includeCodePatterns()) {
+ // Explicit template instantiation
+ Builder.AddTypedTextChunk("template");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("declaration");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+
+ if (SemaRef.getLangOpts().ObjC1)
+ AddObjCTopLevelResults(Results, true);
+
+ AddTypedefResult(Results);
+ // Fall through
+
+ case Sema::PCC_Class:
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ // Using declaration
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // using typename qualifier::name (only in a dependent context)
+ if (SemaRef.CurContext->isDependentContext()) {
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("typename");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("qualifier");
+ Builder.AddTextChunk("::");
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ if (CCC == Sema::PCC_Class) {
+ AddTypedefResult(Results);
+
+ // public:
+ Builder.AddTypedTextChunk("public");
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // protected:
+ Builder.AddTypedTextChunk("protected");
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // private:
+ Builder.AddTypedTextChunk("private");
+ if (Results.includeCodePatterns())
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+ // Fall through
+
+ case Sema::PCC_Template:
+ case Sema::PCC_MemberTemplate:
+ if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns()) {
+ // template < parameters >
+ Builder.AddTypedTextChunk("template");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("parameters");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ break;
+
+ case Sema::PCC_ObjCInterface:
+ AddObjCInterfaceResults(SemaRef.getLangOpts(), Results, true);
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ break;
+
+ case Sema::PCC_ObjCImplementation:
+ AddObjCImplementationResults(SemaRef.getLangOpts(), Results, true);
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ AddFunctionSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ break;
+
+ case Sema::PCC_ObjCInstanceVariableList:
+ AddObjCVisibilityResults(SemaRef.getLangOpts(), Results, true);
+ break;
+
+ case Sema::PCC_RecoveryInFunction:
+ case Sema::PCC_Statement: {
+ AddTypedefResult(Results);
+
+ if (SemaRef.getLangOpts().CPlusPlus && Results.includeCodePatterns() &&
+ SemaRef.getLangOpts().CXXExceptions) {
+ Builder.AddTypedTextChunk("try");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("catch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("declaration");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ if (SemaRef.getLangOpts().ObjC1)
+ AddObjCStatementResults(Results, true);
+
+ if (Results.includeCodePatterns()) {
+ // if (condition) { statements }
+ Builder.AddTypedTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (SemaRef.getLangOpts().CPlusPlus)
+ Builder.AddPlaceholderChunk("condition");
+ else
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // switch (condition) { }
+ Builder.AddTypedTextChunk("switch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (SemaRef.getLangOpts().CPlusPlus)
+ Builder.AddPlaceholderChunk("condition");
+ else
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // Switch-specific statements.
+ if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
+ // case expression:
+ Builder.AddTypedTextChunk("case");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // default:
+ Builder.AddTypedTextChunk("default");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ if (Results.includeCodePatterns()) {
+ /// while (condition) { statements }
+ Builder.AddTypedTextChunk("while");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (SemaRef.getLangOpts().CPlusPlus)
+ Builder.AddPlaceholderChunk("condition");
+ else
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // do { statements } while ( expression );
+ Builder.AddTypedTextChunk("do");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("while");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // for ( for-init-statement ; condition ; expression ) { statements }
+ Builder.AddTypedTextChunk("for");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (SemaRef.getLangOpts().CPlusPlus || SemaRef.getLangOpts().C99)
+ Builder.AddPlaceholderChunk("init-statement");
+ else
+ Builder.AddPlaceholderChunk("init-expression");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddPlaceholderChunk("condition");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ Builder.AddPlaceholderChunk("inc-expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ if (S->getContinueParent()) {
+ // continue ;
+ Builder.AddTypedTextChunk("continue");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ if (S->getBreakParent()) {
+ // break ;
+ Builder.AddTypedTextChunk("break");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // "return expression ;" or "return ;", depending on whether we
+ // know the function is void or not.
+ bool isVoid = false;
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(SemaRef.CurContext))
+ isVoid = Function->getResultType()->isVoidType();
+ else if (ObjCMethodDecl *Method
+ = dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
+ isVoid = Method->getResultType()->isVoidType();
+ else if (SemaRef.getCurBlock() &&
+ !SemaRef.getCurBlock()->ReturnType.isNull())
+ isVoid = SemaRef.getCurBlock()->ReturnType->isVoidType();
+ Builder.AddTypedTextChunk("return");
+ if (!isVoid) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ }
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // goto identifier ;
+ Builder.AddTypedTextChunk("goto");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("label");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // Using directives
+ Builder.AddTypedTextChunk("using");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("namespace");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("identifier");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // Fall through (for statement expressions).
+ case Sema::PCC_ForInit:
+ case Sema::PCC_Condition:
+ AddStorageSpecifiers(CCC, SemaRef.getLangOpts(), Results);
+ // Fall through: conditions and statements can have expressions.
+
+ case Sema::PCC_ParenthesizedExpression:
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ CCC == Sema::PCC_ParenthesizedExpression) {
+ // (__bridge <type>)<expression>
+ Builder.AddTypedTextChunk("__bridge");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // (__bridge_transfer <Objective-C type>)<expression>
+ Builder.AddTypedTextChunk("__bridge_transfer");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("Objective-C type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // (__bridge_retained <CF type>)<expression>
+ Builder.AddTypedTextChunk("__bridge_retained");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("CF type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ // Fall through
+
+ case Sema::PCC_Expression: {
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ // 'this', if we're in a non-static member function.
+ addThisCompletion(SemaRef, Results);
+
+ // true
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("true");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // false
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("false");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (SemaRef.getLangOpts().RTTI) {
+ // dynamic_cast < type-id > ( expression )
+ Builder.AddTypedTextChunk("dynamic_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // static_cast < type-id > ( expression )
+ Builder.AddTypedTextChunk("static_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // reinterpret_cast < type-id > ( expression )
+ Builder.AddTypedTextChunk("reinterpret_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // const_cast < type-id > ( expression )
+ Builder.AddTypedTextChunk("const_cast");
+ Builder.AddChunk(CodeCompletionString::CK_LeftAngle);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightAngle);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (SemaRef.getLangOpts().RTTI) {
+ // typeid ( expression-or-type )
+ Builder.AddResultTypeChunk("std::type_info");
+ Builder.AddTypedTextChunk("typeid");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression-or-type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // new T ( ... )
+ Builder.AddTypedTextChunk("new");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expressions");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // new T [ ] ( ... )
+ Builder.AddTypedTextChunk("new");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
+ Builder.AddPlaceholderChunk("size");
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expressions");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // delete expression
+ Builder.AddResultTypeChunk("void");
+ Builder.AddTypedTextChunk("delete");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // delete [] expression
+ Builder.AddResultTypeChunk("void");
+ Builder.AddTypedTextChunk("delete");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBracket);
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (SemaRef.getLangOpts().CXXExceptions) {
+ // throw expression
+ Builder.AddResultTypeChunk("void");
+ Builder.AddTypedTextChunk("throw");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // FIXME: Rethrow?
+
+ if (SemaRef.getLangOpts().CPlusPlus0x) {
+ // nullptr
+ Builder.AddResultTypeChunk("std::nullptr_t");
+ Builder.AddTypedTextChunk("nullptr");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // alignof
+ Builder.AddResultTypeChunk("size_t");
+ Builder.AddTypedTextChunk("alignof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // noexcept
+ Builder.AddResultTypeChunk("bool");
+ Builder.AddTypedTextChunk("noexcept");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // sizeof... expression
+ Builder.AddResultTypeChunk("size_t");
+ Builder.AddTypedTextChunk("sizeof...");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("parameter-pack");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+
+ if (SemaRef.getLangOpts().ObjC1) {
+ // Add "super", if we're in an Objective-C class with a superclass.
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
+ // The interface can be NULL.
+ if (ObjCInterfaceDecl *ID = Method->getClassInterface())
+ if (ID->getSuperClass()) {
+ std::string SuperType;
+ SuperType = ID->getSuperClass()->getNameAsString();
+ if (Method->isInstanceMethod())
+ SuperType += " *";
+
+ Builder.AddResultTypeChunk(Allocator.CopyString(SuperType));
+ Builder.AddTypedTextChunk("super");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+ }
+
+ AddObjCExpressionResults(Results, true);
+ }
+
+ // sizeof expression
+ Builder.AddResultTypeChunk("size_t");
+ Builder.AddTypedTextChunk("sizeof");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression-or-type");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+ break;
+ }
+
+ case Sema::PCC_Type:
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ break;
+ }
+
+ if (WantTypesInContext(CCC, SemaRef.getLangOpts()))
+ AddTypeSpecifierResults(SemaRef.getLangOpts(), Results);
+
+ if (SemaRef.getLangOpts().CPlusPlus && CCC != Sema::PCC_Type)
+ Results.AddResult(Result("operator"));
+}
+
+/// \brief If the given declaration has an associated type, add it as a result
+/// type chunk.
+static void AddResultTypeChunk(ASTContext &Context,
+ const PrintingPolicy &Policy,
+ NamedDecl *ND,
+ CodeCompletionBuilder &Result) {
+ if (!ND)
+ return;
+
+ // Skip constructors and conversion functions, which have their return types
+ // built into their names.
+ if (isa<CXXConstructorDecl>(ND) || isa<CXXConversionDecl>(ND))
+ return;
+
+ // Determine the type of the declaration (if it has a type).
+ QualType T;
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(ND))
+ T = Function->getResultType();
+ else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND))
+ T = Method->getResultType();
+ else if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND))
+ T = FunTmpl->getTemplatedDecl()->getResultType();
+ else if (EnumConstantDecl *Enumerator = dyn_cast<EnumConstantDecl>(ND))
+ T = Context.getTypeDeclType(cast<TypeDecl>(Enumerator->getDeclContext()));
+ else if (isa<UnresolvedUsingValueDecl>(ND)) {
+ /* Do nothing: ignore unresolved using declarations*/
+ } else if (ValueDecl *Value = dyn_cast<ValueDecl>(ND)) {
+ T = Value->getType();
+ } else if (ObjCPropertyDecl *Property = dyn_cast<ObjCPropertyDecl>(ND))
+ T = Property->getType();
+
+ if (T.isNull() || Context.hasSameType(T, Context.DependentTy))
+ return;
+
+ Result.AddResultTypeChunk(GetCompletionTypeString(T, Context, Policy,
+ Result.getAllocator()));
+}
+
+static void MaybeAddSentinel(ASTContext &Context, NamedDecl *FunctionOrMethod,
+ CodeCompletionBuilder &Result) {
+ if (SentinelAttr *Sentinel = FunctionOrMethod->getAttr<SentinelAttr>())
+ if (Sentinel->getSentinel() == 0) {
+ if (Context.getLangOpts().ObjC1 &&
+ Context.Idents.get("nil").hasMacroDefinition())
+ Result.AddTextChunk(", nil");
+ else if (Context.Idents.get("NULL").hasMacroDefinition())
+ Result.AddTextChunk(", NULL");
+ else
+ Result.AddTextChunk(", (void*)0");
+ }
+}
+
+static std::string formatObjCParamQualifiers(unsigned ObjCQuals) {
+ std::string Result;
+ if (ObjCQuals & Decl::OBJC_TQ_In)
+ Result += "in ";
+ else if (ObjCQuals & Decl::OBJC_TQ_Inout)
+ Result += "inout ";
+ else if (ObjCQuals & Decl::OBJC_TQ_Out)
+ Result += "out ";
+ if (ObjCQuals & Decl::OBJC_TQ_Bycopy)
+ Result += "bycopy ";
+ else if (ObjCQuals & Decl::OBJC_TQ_Byref)
+ Result += "byref ";
+ if (ObjCQuals & Decl::OBJC_TQ_Oneway)
+ Result += "oneway ";
+ return Result;
+}
+
+static std::string FormatFunctionParameter(ASTContext &Context,
+ const PrintingPolicy &Policy,
+ ParmVarDecl *Param,
+ bool SuppressName = false,
+ bool SuppressBlock = false) {
+ bool ObjCMethodParam = isa<ObjCMethodDecl>(Param->getDeclContext());
+ if (Param->getType()->isDependentType() ||
+ !Param->getType()->isBlockPointerType()) {
+ // The argument for a dependent or non-block parameter is a placeholder
+ // containing that parameter's type.
+ std::string Result;
+
+ if (Param->getIdentifier() && !ObjCMethodParam && !SuppressName)
+ Result = Param->getIdentifier()->getName();
+
+ Param->getType().getAsStringInternal(Result, Policy);
+
+ if (ObjCMethodParam) {
+ Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier())
+ + Result + ")";
+ if (Param->getIdentifier() && !SuppressName)
+ Result += Param->getIdentifier()->getName();
+ }
+ return Result;
+ }
+
+ // The argument for a block pointer parameter is a block literal with
+ // the appropriate type.
+ FunctionTypeLoc *Block = 0;
+ FunctionProtoTypeLoc *BlockProto = 0;
+ TypeLoc TL;
+ if (TypeSourceInfo *TSInfo = Param->getTypeSourceInfo()) {
+ TL = TSInfo->getTypeLoc().getUnqualifiedLoc();
+ while (true) {
+ // Look through typedefs.
+ if (!SuppressBlock) {
+ if (TypedefTypeLoc *TypedefTL = dyn_cast<TypedefTypeLoc>(&TL)) {
+ if (TypeSourceInfo *InnerTSInfo
+ = TypedefTL->getTypedefNameDecl()->getTypeSourceInfo()) {
+ TL = InnerTSInfo->getTypeLoc().getUnqualifiedLoc();
+ continue;
+ }
+ }
+
+ // Look through qualified types
+ if (QualifiedTypeLoc *QualifiedTL = dyn_cast<QualifiedTypeLoc>(&TL)) {
+ TL = QualifiedTL->getUnqualifiedLoc();
+ continue;
+ }
+ }
+
+ // Try to get the function prototype behind the block pointer type,
+ // then we're done.
+ if (BlockPointerTypeLoc *BlockPtr
+ = dyn_cast<BlockPointerTypeLoc>(&TL)) {
+ TL = BlockPtr->getPointeeLoc().IgnoreParens();
+ Block = dyn_cast<FunctionTypeLoc>(&TL);
+ BlockProto = dyn_cast<FunctionProtoTypeLoc>(&TL);
+ }
+ break;
+ }
+ }
+
+ if (!Block) {
+ // We were unable to find a FunctionProtoTypeLoc with parameter names
+ // for the block; just use the parameter type as a placeholder.
+ std::string Result;
+ if (!ObjCMethodParam && Param->getIdentifier())
+ Result = Param->getIdentifier()->getName();
+
+ Param->getType().getUnqualifiedType().getAsStringInternal(Result, Policy);
+
+ if (ObjCMethodParam) {
+ Result = "(" + formatObjCParamQualifiers(Param->getObjCDeclQualifier())
+ + Result + ")";
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
+ }
+
+ return Result;
+ }
+
+ // We have the function prototype behind the block pointer type, as it was
+ // written in the source.
+ std::string Result;
+ QualType ResultType = Block->getTypePtr()->getResultType();
+ if (!ResultType->isVoidType() || SuppressBlock)
+ ResultType.getAsStringInternal(Result, Policy);
+
+ // Format the parameter list.
+ std::string Params;
+ if (!BlockProto || Block->getNumArgs() == 0) {
+ if (BlockProto && BlockProto->getTypePtr()->isVariadic())
+ Params = "(...)";
+ else
+ Params = "(void)";
+ } else {
+ Params += "(";
+ for (unsigned I = 0, N = Block->getNumArgs(); I != N; ++I) {
+ if (I)
+ Params += ", ";
+ Params += FormatFunctionParameter(Context, Policy, Block->getArg(I),
+ /*SuppressName=*/false,
+ /*SuppressBlock=*/true);
+
+ if (I == N - 1 && BlockProto->getTypePtr()->isVariadic())
+ Params += ", ...";
+ }
+ Params += ")";
+ }
+
+ if (SuppressBlock) {
+ // Format as a parameter.
+ Result = Result + " (^";
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
+ Result += ")";
+ Result += Params;
+ } else {
+ // Format as a block literal argument.
+ Result = '^' + Result;
+ Result += Params;
+
+ if (Param->getIdentifier())
+ Result += Param->getIdentifier()->getName();
+ }
+
+ return Result;
+}
+
+/// \brief Add function parameter chunks to the given code completion string.
+static void AddFunctionParameterChunks(ASTContext &Context,
+ const PrintingPolicy &Policy,
+ FunctionDecl *Function,
+ CodeCompletionBuilder &Result,
+ unsigned Start = 0,
+ bool InOptional = false) {
+ bool FirstParameter = true;
+
+ for (unsigned P = Start, N = Function->getNumParams(); P != N; ++P) {
+ ParmVarDecl *Param = Function->getParamDecl(P);
+
+ if (Param->hasDefaultArg() && !InOptional) {
+ // When we see an optional default argument, put that argument and
+ // the remaining default arguments into a new, optional string.
+ CodeCompletionBuilder Opt(Result.getAllocator(),
+ Result.getCodeCompletionTUInfo());
+ if (!FirstParameter)
+ Opt.AddChunk(CodeCompletionString::CK_Comma);
+ AddFunctionParameterChunks(Context, Policy, Function, Opt, P, true);
+ Result.AddOptionalChunk(Opt.TakeString());
+ break;
+ }
+
+ if (FirstParameter)
+ FirstParameter = false;
+ else
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ InOptional = false;
+
+ // Format the placeholder string.
+ std::string PlaceholderStr = FormatFunctionParameter(Context, Policy,
+ Param);
+
+ if (Function->isVariadic() && P == N - 1)
+ PlaceholderStr += ", ...";
+
+ // Add the placeholder string.
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString(PlaceholderStr));
+ }
+
+ if (const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>())
+ if (Proto->isVariadic()) {
+ if (Proto->getNumArgs() == 0)
+ Result.AddPlaceholderChunk("...");
+
+ MaybeAddSentinel(Context, Function, Result);
+ }
+}
+
+/// \brief Add template parameter chunks to the given code completion string.
+static void AddTemplateParameterChunks(ASTContext &Context,
+ const PrintingPolicy &Policy,
+ TemplateDecl *Template,
+ CodeCompletionBuilder &Result,
+ unsigned MaxParameters = 0,
+ unsigned Start = 0,
+ bool InDefaultArg = false) {
+ bool FirstParameter = true;
+
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ TemplateParameterList::iterator PEnd = Params->end();
+ if (MaxParameters)
+ PEnd = Params->begin() + MaxParameters;
+ for (TemplateParameterList::iterator P = Params->begin() + Start;
+ P != PEnd; ++P) {
+ bool HasDefaultArg = false;
+ std::string PlaceholderStr;
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ if (TTP->wasDeclaredWithTypename())
+ PlaceholderStr = "typename";
+ else
+ PlaceholderStr = "class";
+
+ if (TTP->getIdentifier()) {
+ PlaceholderStr += ' ';
+ PlaceholderStr += TTP->getIdentifier()->getName();
+ }
+
+ HasDefaultArg = TTP->hasDefaultArgument();
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ if (NTTP->getIdentifier())
+ PlaceholderStr = NTTP->getIdentifier()->getName();
+ NTTP->getType().getAsStringInternal(PlaceholderStr, Policy);
+ HasDefaultArg = NTTP->hasDefaultArgument();
+ } else {
+ assert(isa<TemplateTemplateParmDecl>(*P));
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+
+ // Since putting the template argument list into the placeholder would
+ // be very, very long, we just use an abbreviation.
+ PlaceholderStr = "template<...> class";
+ if (TTP->getIdentifier()) {
+ PlaceholderStr += ' ';
+ PlaceholderStr += TTP->getIdentifier()->getName();
+ }
+
+ HasDefaultArg = TTP->hasDefaultArgument();
+ }
+
+ if (HasDefaultArg && !InDefaultArg) {
+ // When we see an optional default argument, put that argument and
+ // the remaining default arguments into a new, optional string.
+ CodeCompletionBuilder Opt(Result.getAllocator(),
+ Result.getCodeCompletionTUInfo());
+ if (!FirstParameter)
+ Opt.AddChunk(CodeCompletionString::CK_Comma);
+ AddTemplateParameterChunks(Context, Policy, Template, Opt, MaxParameters,
+ P - Params->begin(), true);
+ Result.AddOptionalChunk(Opt.TakeString());
+ break;
+ }
+
+ InDefaultArg = false;
+
+ if (FirstParameter)
+ FirstParameter = false;
+ else
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ // Add the placeholder string.
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString(PlaceholderStr));
+ }
+}
+
+/// \brief Add a qualifier to the given code-completion string, if the
+/// provided nested-name-specifier is non-NULL.
+static void
+AddQualifierToCompletionString(CodeCompletionBuilder &Result,
+ NestedNameSpecifier *Qualifier,
+ bool QualifierIsInformative,
+ ASTContext &Context,
+ const PrintingPolicy &Policy) {
+ if (!Qualifier)
+ return;
+
+ std::string PrintedNNS;
+ {
+ llvm::raw_string_ostream OS(PrintedNNS);
+ Qualifier->print(OS, Policy);
+ }
+ if (QualifierIsInformative)
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(PrintedNNS));
+ else
+ Result.AddTextChunk(Result.getAllocator().CopyString(PrintedNNS));
+}
+
+static void
+AddFunctionTypeQualsToCompletionString(CodeCompletionBuilder &Result,
+ FunctionDecl *Function) {
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ if (!Proto || !Proto->getTypeQuals())
+ return;
+
+ // FIXME: Add ref-qualifier!
+
+ // Handle single qualifiers without copying
+ if (Proto->getTypeQuals() == Qualifiers::Const) {
+ Result.AddInformativeChunk(" const");
+ return;
+ }
+
+ if (Proto->getTypeQuals() == Qualifiers::Volatile) {
+ Result.AddInformativeChunk(" volatile");
+ return;
+ }
+
+ if (Proto->getTypeQuals() == Qualifiers::Restrict) {
+ Result.AddInformativeChunk(" restrict");
+ return;
+ }
+
+ // Handle multiple qualifiers.
+ std::string QualsStr;
+ if (Proto->getTypeQuals() & Qualifiers::Const)
+ QualsStr += " const";
+ if (Proto->getTypeQuals() & Qualifiers::Volatile)
+ QualsStr += " volatile";
+ if (Proto->getTypeQuals() & Qualifiers::Restrict)
+ QualsStr += " restrict";
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(QualsStr));
+}
+
+/// \brief Add the name of the given declaration
+static void AddTypedNameChunk(ASTContext &Context, const PrintingPolicy &Policy,
+ NamedDecl *ND, CodeCompletionBuilder &Result) {
+ DeclarationName Name = ND->getDeclName();
+ if (!Name)
+ return;
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXOperatorName: {
+ const char *OperatorName = 0;
+ switch (Name.getCXXOverloadedOperator()) {
+ case OO_None:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ OperatorName = "operator";
+ break;
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case OO_##Name: OperatorName = "operator" Spelling; break;
+#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#include "clang/Basic/OperatorKinds.def"
+
+ case OO_New: OperatorName = "operator new"; break;
+ case OO_Delete: OperatorName = "operator delete"; break;
+ case OO_Array_New: OperatorName = "operator new[]"; break;
+ case OO_Array_Delete: OperatorName = "operator delete[]"; break;
+ case OO_Call: OperatorName = "operator()"; break;
+ case OO_Subscript: OperatorName = "operator[]"; break;
+ }
+ Result.AddTypedTextChunk(OperatorName);
+ break;
+ }
+
+ case DeclarationName::Identifier:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ break;
+
+ case DeclarationName::CXXConstructorName: {
+ CXXRecordDecl *Record = 0;
+ QualType Ty = Name.getCXXNameType();
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>())
+ Record = cast<CXXRecordDecl>(RecordTy->getDecl());
+ else if (const InjectedClassNameType *InjectedTy
+ = Ty->getAs<InjectedClassNameType>())
+ Record = InjectedTy->getDecl();
+ else {
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ break;
+ }
+
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Record->getNameAsString()));
+ if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate()) {
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
+ AddTemplateParameterChunks(Context, Policy, Template, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
+ }
+ break;
+ }
+ }
+}
+
+CodeCompletionString *CodeCompletionResult::CreateCodeCompletionString(Sema &S,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) {
+ return CreateCodeCompletionString(S.Context, S.PP, Allocator, CCTUInfo);
+}
+
+/// \brief If possible, create a new code completion string for the given
+/// result.
+///
+/// \returns Either a new, heap-allocated code completion string describing
+/// how to use this result, or NULL to indicate that the string or name of the
+/// result is all that is needed.
+CodeCompletionString *
+CodeCompletionResult::CreateCodeCompletionString(ASTContext &Ctx,
+ Preprocessor &PP,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) {
+ CodeCompletionBuilder Result(Allocator, CCTUInfo, Priority, Availability);
+
+ PrintingPolicy Policy = getCompletionPrintingPolicy(Ctx, PP);
+ if (Kind == RK_Pattern) {
+ Pattern->Priority = Priority;
+ Pattern->Availability = Availability;
+
+ if (Declaration) {
+ Result.addParentContext(Declaration->getDeclContext());
+ Pattern->ParentKind = Result.getParentKind();
+ Pattern->ParentName = Result.getParentName();
+ }
+
+ return Pattern;
+ }
+
+ if (Kind == RK_Keyword) {
+ Result.AddTypedTextChunk(Keyword);
+ return Result.TakeString();
+ }
+
+ if (Kind == RK_Macro) {
+ MacroInfo *MI = PP.getMacroInfo(Macro);
+ assert(MI && "Not a macro?");
+
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Macro->getName()));
+
+ if (!MI->isFunctionLike())
+ return Result.TakeString();
+
+ // Format a function-like macro with placeholders for the arguments.
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ MacroInfo::arg_iterator A = MI->arg_begin(), AEnd = MI->arg_end();
+
+ // C99 variadic macros add __VA_ARGS__ at the end. Skip it.
+ if (MI->isC99Varargs()) {
+ --AEnd;
+
+ if (A == AEnd) {
+ Result.AddPlaceholderChunk("...");
+ }
+ }
+
+ for (MacroInfo::arg_iterator A = MI->arg_begin(); A != AEnd; ++A) {
+ if (A != MI->arg_begin())
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ if (MI->isVariadic() && (A+1) == AEnd) {
+ SmallString<32> Arg = (*A)->getName();
+ if (MI->isC99Varargs())
+ Arg += ", ...";
+ else
+ Arg += "...";
+ Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
+ break;
+ }
+
+ // Non-variadic macros are simple.
+ Result.AddPlaceholderChunk(
+ Result.getAllocator().CopyString((*A)->getName()));
+ }
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ return Result.TakeString();
+ }
+
+ assert(Kind == RK_Declaration && "Missed a result kind?");
+ NamedDecl *ND = Declaration;
+ Result.addParentContext(ND->getDeclContext());
+
+ if (StartsNestedNameSpecifier) {
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ Result.AddTextChunk("::");
+ return Result.TakeString();
+ }
+
+ for (Decl::attr_iterator i = ND->attr_begin(); i != ND->attr_end(); ++i) {
+ if (AnnotateAttr *Attr = dyn_cast_or_null<AnnotateAttr>(*i)) {
+ Result.AddAnnotation(Result.getAllocator().CopyString(Attr->getAnnotation()));
+ }
+ }
+
+ AddResultTypeChunk(Ctx, Policy, ND, Result);
+
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(ND)) {
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ Ctx, Policy);
+ AddTypedNameChunk(Ctx, Policy, ND, Result);
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ AddFunctionParameterChunks(Ctx, Policy, Function, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ AddFunctionTypeQualsToCompletionString(Result, Function);
+ return Result.TakeString();
+ }
+
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(ND)) {
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ Ctx, Policy);
+ FunctionDecl *Function = FunTmpl->getTemplatedDecl();
+ AddTypedNameChunk(Ctx, Policy, Function, Result);
+
+ // Figure out which template parameters are deduced (or have default
+ // arguments).
+ llvm::SmallBitVector Deduced;
+ Sema::MarkDeducedTemplateParameters(Ctx, FunTmpl, Deduced);
+ unsigned LastDeducibleArgument;
+ for (LastDeducibleArgument = Deduced.size(); LastDeducibleArgument > 0;
+ --LastDeducibleArgument) {
+ if (!Deduced[LastDeducibleArgument - 1]) {
+ // C++0x: Figure out if the template argument has a default. If so,
+ // the user doesn't need to type this argument.
+ // FIXME: We need to abstract template parameters better!
+ bool HasDefaultArg = false;
+ NamedDecl *Param = FunTmpl->getTemplateParameters()->getParam(
+ LastDeducibleArgument - 1);
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
+ HasDefaultArg = TTP->hasDefaultArgument();
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param))
+ HasDefaultArg = NTTP->hasDefaultArgument();
+ else {
+ assert(isa<TemplateTemplateParmDecl>(Param));
+ HasDefaultArg
+ = cast<TemplateTemplateParmDecl>(Param)->hasDefaultArgument();
+ }
+
+ if (!HasDefaultArg)
+ break;
+ }
+ }
+
+ if (LastDeducibleArgument) {
+ // Some of the function template arguments cannot be deduced from a
+ // function call, so we introduce an explicit template argument list
+ // containing all of the arguments up to the first deducible argument.
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
+ AddTemplateParameterChunks(Ctx, Policy, FunTmpl, Result,
+ LastDeducibleArgument);
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
+ }
+
+ // Add the function parameters
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ AddFunctionParameterChunks(Ctx, Policy, Function, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ AddFunctionTypeQualsToCompletionString(Result, Function);
+ return Result.TakeString();
+ }
+
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(ND)) {
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ Ctx, Policy);
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(Template->getNameAsString()));
+ Result.AddChunk(CodeCompletionString::CK_LeftAngle);
+ AddTemplateParameterChunks(Ctx, Policy, Template, Result);
+ Result.AddChunk(CodeCompletionString::CK_RightAngle);
+ return Result.TakeString();
+ }
+
+ if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(ND)) {
+ Selector Sel = Method->getSelector();
+ if (Sel.isUnarySelector()) {
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ return Result.TakeString();
+ }
+
+ std::string SelName = Sel.getNameForSlot(0).str();
+ SelName += ':';
+ if (StartParameter == 0)
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(SelName));
+ else {
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(SelName));
+
+ // If there is only one parameter, and we're past it, add an empty
+ // typed-text chunk since there is nothing to type.
+ if (Method->param_size() == 1)
+ Result.AddTypedTextChunk("");
+ }
+ unsigned Idx = 0;
+ for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; (void)++P, ++Idx) {
+ if (Idx > 0) {
+ std::string Keyword;
+ if (Idx > StartParameter)
+ Result.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(Idx))
+ Keyword += II->getName();
+ Keyword += ":";
+ if (Idx < StartParameter || AllParametersAreInformative)
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(Keyword));
+ else
+ Result.AddTypedTextChunk(Result.getAllocator().CopyString(Keyword));
+ }
+
+ // If we're before the starting parameter, skip the placeholder.
+ if (Idx < StartParameter)
+ continue;
+
+ std::string Arg;
+
+ if ((*P)->getType()->isBlockPointerType() && !DeclaringEntity)
+ Arg = FormatFunctionParameter(Ctx, Policy, *P, true);
+ else {
+ (*P)->getType().getAsStringInternal(Arg, Policy);
+ Arg = "(" + formatObjCParamQualifiers((*P)->getObjCDeclQualifier())
+ + Arg + ")";
+ if (IdentifierInfo *II = (*P)->getIdentifier())
+ if (DeclaringEntity || AllParametersAreInformative)
+ Arg += II->getName();
+ }
+
+ if (Method->isVariadic() && (P + 1) == PEnd)
+ Arg += ", ...";
+
+ if (DeclaringEntity)
+ Result.AddTextChunk(Result.getAllocator().CopyString(Arg));
+ else if (AllParametersAreInformative)
+ Result.AddInformativeChunk(Result.getAllocator().CopyString(Arg));
+ else
+ Result.AddPlaceholderChunk(Result.getAllocator().CopyString(Arg));
+ }
+
+ if (Method->isVariadic()) {
+ if (Method->param_size() == 0) {
+ if (DeclaringEntity)
+ Result.AddTextChunk(", ...");
+ else if (AllParametersAreInformative)
+ Result.AddInformativeChunk(", ...");
+ else
+ Result.AddPlaceholderChunk(", ...");
+ }
+
+ MaybeAddSentinel(Ctx, Method, Result);
+ }
+
+ return Result.TakeString();
+ }
+
+ if (Qualifier)
+ AddQualifierToCompletionString(Result, Qualifier, QualifierIsInformative,
+ Ctx, Policy);
+
+ Result.AddTypedTextChunk(
+ Result.getAllocator().CopyString(ND->getNameAsString()));
+ return Result.TakeString();
+}
+
+CodeCompletionString *
+CodeCompleteConsumer::OverloadCandidate::CreateSignatureString(
+ unsigned CurrentArg,
+ Sema &S,
+ CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo) const {
+ PrintingPolicy Policy = getCompletionPrintingPolicy(S);
+
+ // FIXME: Set priority, availability appropriately.
+ CodeCompletionBuilder Result(Allocator,CCTUInfo, 1, CXAvailability_Available);
+ FunctionDecl *FDecl = getFunction();
+ AddResultTypeChunk(S.Context, Policy, FDecl, Result);
+ const FunctionProtoType *Proto
+ = dyn_cast<FunctionProtoType>(getFunctionType());
+ if (!FDecl && !Proto) {
+ // Function without a prototype. Just give the return type and a
+ // highlighted ellipsis.
+ const FunctionType *FT = getFunctionType();
+ Result.AddTextChunk(GetCompletionTypeString(FT->getResultType(),
+ S.Context, Policy,
+ Result.getAllocator()));
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+ return Result.TakeString();
+ }
+
+ if (FDecl)
+ Result.AddTextChunk(
+ Result.getAllocator().CopyString(FDecl->getNameAsString()));
+ else
+ Result.AddTextChunk(
+ Result.getAllocator().CopyString(
+ Proto->getResultType().getAsString(Policy)));
+
+ Result.AddChunk(CodeCompletionString::CK_LeftParen);
+ unsigned NumParams = FDecl? FDecl->getNumParams() : Proto->getNumArgs();
+ for (unsigned I = 0; I != NumParams; ++I) {
+ if (I)
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+
+ std::string ArgString;
+ QualType ArgType;
+
+ if (FDecl) {
+ ArgString = FDecl->getParamDecl(I)->getNameAsString();
+ ArgType = FDecl->getParamDecl(I)->getOriginalType();
+ } else {
+ ArgType = Proto->getArgType(I);
+ }
+
+ ArgType.getAsStringInternal(ArgString, Policy);
+
+ if (I == CurrentArg)
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter,
+ Result.getAllocator().CopyString(ArgString));
+ else
+ Result.AddTextChunk(Result.getAllocator().CopyString(ArgString));
+ }
+
+ if (Proto && Proto->isVariadic()) {
+ Result.AddChunk(CodeCompletionString::CK_Comma);
+ if (CurrentArg < NumParams)
+ Result.AddTextChunk("...");
+ else
+ Result.AddChunk(CodeCompletionString::CK_CurrentParameter, "...");
+ }
+ Result.AddChunk(CodeCompletionString::CK_RightParen);
+
+ return Result.TakeString();
+}
+
+unsigned clang::getMacroUsagePriority(StringRef MacroName,
+ const LangOptions &LangOpts,
+ bool PreferredTypeIsPointer) {
+ unsigned Priority = CCP_Macro;
+
+ // Treat the "nil", "Nil" and "NULL" macros as null pointer constants.
+ if (MacroName.equals("nil") || MacroName.equals("NULL") ||
+ MacroName.equals("Nil")) {
+ Priority = CCP_Constant;
+ if (PreferredTypeIsPointer)
+ Priority = Priority / CCF_SimilarTypeMatch;
+ }
+ // Treat "YES", "NO", "true", and "false" as constants.
+ else if (MacroName.equals("YES") || MacroName.equals("NO") ||
+ MacroName.equals("true") || MacroName.equals("false"))
+ Priority = CCP_Constant;
+ // Treat "bool" as a type.
+ else if (MacroName.equals("bool"))
+ Priority = CCP_Type + (LangOpts.ObjC1? CCD_bool_in_ObjC : 0);
+
+
+ return Priority;
+}
+
+CXCursorKind clang::getCursorKindForDecl(Decl *D) {
+ if (!D)
+ return CXCursor_UnexposedDecl;
+
+ switch (D->getKind()) {
+ case Decl::Enum: return CXCursor_EnumDecl;
+ case Decl::EnumConstant: return CXCursor_EnumConstantDecl;
+ case Decl::Field: return CXCursor_FieldDecl;
+ case Decl::Function:
+ return CXCursor_FunctionDecl;
+ case Decl::ObjCCategory: return CXCursor_ObjCCategoryDecl;
+ case Decl::ObjCCategoryImpl: return CXCursor_ObjCCategoryImplDecl;
+ case Decl::ObjCImplementation: return CXCursor_ObjCImplementationDecl;
+
+ case Decl::ObjCInterface: return CXCursor_ObjCInterfaceDecl;
+ case Decl::ObjCIvar: return CXCursor_ObjCIvarDecl;
+ case Decl::ObjCMethod:
+ return cast<ObjCMethodDecl>(D)->isInstanceMethod()
+ ? CXCursor_ObjCInstanceMethodDecl : CXCursor_ObjCClassMethodDecl;
+ case Decl::CXXMethod: return CXCursor_CXXMethod;
+ case Decl::CXXConstructor: return CXCursor_Constructor;
+ case Decl::CXXDestructor: return CXCursor_Destructor;
+ case Decl::CXXConversion: return CXCursor_ConversionFunction;
+ case Decl::ObjCProperty: return CXCursor_ObjCPropertyDecl;
+ case Decl::ObjCProtocol: return CXCursor_ObjCProtocolDecl;
+ case Decl::ParmVar: return CXCursor_ParmDecl;
+ case Decl::Typedef: return CXCursor_TypedefDecl;
+ case Decl::TypeAlias: return CXCursor_TypeAliasDecl;
+ case Decl::Var: return CXCursor_VarDecl;
+ case Decl::Namespace: return CXCursor_Namespace;
+ case Decl::NamespaceAlias: return CXCursor_NamespaceAlias;
+ case Decl::TemplateTypeParm: return CXCursor_TemplateTypeParameter;
+ case Decl::NonTypeTemplateParm:return CXCursor_NonTypeTemplateParameter;
+ case Decl::TemplateTemplateParm:return CXCursor_TemplateTemplateParameter;
+ case Decl::FunctionTemplate: return CXCursor_FunctionTemplate;
+ case Decl::ClassTemplate: return CXCursor_ClassTemplate;
+ case Decl::AccessSpec: return CXCursor_CXXAccessSpecifier;
+ case Decl::ClassTemplatePartialSpecialization:
+ return CXCursor_ClassTemplatePartialSpecialization;
+ case Decl::UsingDirective: return CXCursor_UsingDirective;
+
+ case Decl::Using:
+ case Decl::UnresolvedUsingValue:
+ case Decl::UnresolvedUsingTypename:
+ return CXCursor_UsingDeclaration;
+
+ case Decl::ObjCPropertyImpl:
+ switch (cast<ObjCPropertyImplDecl>(D)->getPropertyImplementation()) {
+ case ObjCPropertyImplDecl::Dynamic:
+ return CXCursor_ObjCDynamicDecl;
+
+ case ObjCPropertyImplDecl::Synthesize:
+ return CXCursor_ObjCSynthesizeDecl;
+ }
+
+ default:
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ switch (TD->getTagKind()) {
+ case TTK_Struct: return CXCursor_StructDecl;
+ case TTK_Class: return CXCursor_ClassDecl;
+ case TTK_Union: return CXCursor_UnionDecl;
+ case TTK_Enum: return CXCursor_EnumDecl;
+ }
+ }
+ }
+
+ return CXCursor_UnexposedDecl;
+}
+
+static void AddMacroResults(Preprocessor &PP, ResultBuilder &Results,
+ bool TargetTypeIsPointer = false) {
+ typedef CodeCompletionResult Result;
+
+ Results.EnterNewScope();
+
+ for (Preprocessor::macro_iterator M = PP.macro_begin(),
+ MEnd = PP.macro_end();
+ M != MEnd; ++M) {
+ Results.AddResult(Result(M->first,
+ getMacroUsagePriority(M->first->getName(),
+ PP.getLangOpts(),
+ TargetTypeIsPointer)));
+ }
+
+ Results.ExitScope();
+
+}
+
+static void AddPrettyFunctionResults(const LangOptions &LangOpts,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+
+ Results.EnterNewScope();
+
+ Results.AddResult(Result("__PRETTY_FUNCTION__", CCP_Constant));
+ Results.AddResult(Result("__FUNCTION__", CCP_Constant));
+ if (LangOpts.C99 || LangOpts.CPlusPlus0x)
+ Results.AddResult(Result("__func__", CCP_Constant));
+ Results.ExitScope();
+}
+
+static void HandleCodeCompleteResults(Sema *S,
+ CodeCompleteConsumer *CodeCompleter,
+ CodeCompletionContext Context,
+ CodeCompletionResult *Results,
+ unsigned NumResults) {
+ if (CodeCompleter)
+ CodeCompleter->ProcessCodeCompleteResults(*S, Context, Results, NumResults);
+}
+
+static enum CodeCompletionContext::Kind mapCodeCompletionContext(Sema &S,
+ Sema::ParserCompletionContext PCC) {
+ switch (PCC) {
+ case Sema::PCC_Namespace:
+ return CodeCompletionContext::CCC_TopLevel;
+
+ case Sema::PCC_Class:
+ return CodeCompletionContext::CCC_ClassStructUnion;
+
+ case Sema::PCC_ObjCInterface:
+ return CodeCompletionContext::CCC_ObjCInterface;
+
+ case Sema::PCC_ObjCImplementation:
+ return CodeCompletionContext::CCC_ObjCImplementation;
+
+ case Sema::PCC_ObjCInstanceVariableList:
+ return CodeCompletionContext::CCC_ObjCIvarList;
+
+ case Sema::PCC_Template:
+ case Sema::PCC_MemberTemplate:
+ if (S.CurContext->isFileContext())
+ return CodeCompletionContext::CCC_TopLevel;
+ if (S.CurContext->isRecord())
+ return CodeCompletionContext::CCC_ClassStructUnion;
+ return CodeCompletionContext::CCC_Other;
+
+ case Sema::PCC_RecoveryInFunction:
+ return CodeCompletionContext::CCC_Recovery;
+
+ case Sema::PCC_ForInit:
+ if (S.getLangOpts().CPlusPlus || S.getLangOpts().C99 ||
+ S.getLangOpts().ObjC1)
+ return CodeCompletionContext::CCC_ParenthesizedExpression;
+ else
+ return CodeCompletionContext::CCC_Expression;
+
+ case Sema::PCC_Expression:
+ case Sema::PCC_Condition:
+ return CodeCompletionContext::CCC_Expression;
+
+ case Sema::PCC_Statement:
+ return CodeCompletionContext::CCC_Statement;
+
+ case Sema::PCC_Type:
+ return CodeCompletionContext::CCC_Type;
+
+ case Sema::PCC_ParenthesizedExpression:
+ return CodeCompletionContext::CCC_ParenthesizedExpression;
+
+ case Sema::PCC_LocalDeclarationSpecifiers:
+ return CodeCompletionContext::CCC_Type;
+ }
+
+ llvm_unreachable("Invalid ParserCompletionContext!");
+}
+
+/// \brief If we're in a C++ virtual member function, add completion results
+/// that invoke the functions we override, since it's common to invoke the
+/// overridden function as well as adding new functionality.
+///
+/// \param S The semantic analysis object for which we are generating results.
+///
+/// \param InContext This context in which the nested-name-specifier preceding
+/// the code-completion point
+static void MaybeAddOverrideCalls(Sema &S, DeclContext *InContext,
+ ResultBuilder &Results) {
+ // Look through blocks.
+ DeclContext *CurContext = S.CurContext;
+ while (isa<BlockDecl>(CurContext))
+ CurContext = CurContext->getParent();
+
+
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(CurContext);
+ if (!Method || !Method->isVirtual())
+ return;
+
+ // We need to have names for all of the parameters, if we're going to
+ // generate a forwarding call.
+ for (CXXMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd;
+ ++P) {
+ if (!(*P)->getDeclName())
+ return;
+ }
+
+ PrintingPolicy Policy = getCompletionPrintingPolicy(S);
+ for (CXXMethodDecl::method_iterator M = Method->begin_overridden_methods(),
+ MEnd = Method->end_overridden_methods();
+ M != MEnd; ++M) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ CXXMethodDecl *Overridden = const_cast<CXXMethodDecl *>(*M);
+ if (Overridden->getCanonicalDecl() == Method->getCanonicalDecl())
+ continue;
+
+ // If we need a nested-name-specifier, add one now.
+ if (!InContext) {
+ NestedNameSpecifier *NNS
+ = getRequiredQualification(S.Context, CurContext,
+ Overridden->getDeclContext());
+ if (NNS) {
+ std::string Str;
+ llvm::raw_string_ostream OS(Str);
+ NNS->print(OS, Policy);
+ Builder.AddTextChunk(Results.getAllocator().CopyString(OS.str()));
+ }
+ } else if (!InContext->Equals(Overridden->getDeclContext()))
+ continue;
+
+ Builder.AddTypedTextChunk(Results.getAllocator().CopyString(
+ Overridden->getNameAsString()));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ bool FirstParam = true;
+ for (CXXMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; ++P) {
+ if (FirstParam)
+ FirstParam = false;
+ else
+ Builder.AddChunk(CodeCompletionString::CK_Comma);
+
+ Builder.AddPlaceholderChunk(Results.getAllocator().CopyString(
+ (*P)->getIdentifier()->getName()));
+ }
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ CCP_SuperCompletion,
+ CXCursor_CXXMethod,
+ CXAvailability_Available,
+ Overridden));
+ Results.Ignore(Overridden);
+ }
+}
+
+void Sema::CodeCompleteModuleImport(SourceLocation ImportLoc,
+ ModuleIdPath Path) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+ typedef CodeCompletionResult Result;
+ if (Path.empty()) {
+ // Enumerate all top-level modules.
+ llvm::SmallVector<Module *, 8> Modules;
+ PP.getHeaderSearchInfo().collectAllModules(Modules);
+ for (unsigned I = 0, N = Modules.size(); I != N; ++I) {
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Modules[I]->Name));
+ Results.AddResult(Result(Builder.TakeString(),
+ CCP_Declaration,
+ CXCursor_NotImplemented,
+ Modules[I]->isAvailable()
+ ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
+ }
+ } else {
+ // Load the named module.
+ Module *Mod = PP.getModuleLoader().loadModule(ImportLoc, Path,
+ Module::AllVisible,
+ /*IsInclusionDirective=*/false);
+ // Enumerate submodules.
+ if (Mod) {
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString((*Sub)->Name));
+ Results.AddResult(Result(Builder.TakeString(),
+ CCP_Declaration,
+ CXCursor_NotImplemented,
+ (*Sub)->isAvailable()
+ ? CXAvailability_Available
+ : CXAvailability_NotAvailable));
+ }
+ }
+ }
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteOrdinaryName(Scope *S,
+ ParserCompletionContext CompletionContext) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ mapCodeCompletionContext(*this, CompletionContext));
+ Results.EnterNewScope();
+
+ // Determine how to filter results, e.g., so that the names of
+ // values (functions, enumerators, function templates, etc.) are
+ // only allowed where we can have an expression.
+ switch (CompletionContext) {
+ case PCC_Namespace:
+ case PCC_Class:
+ case PCC_ObjCInterface:
+ case PCC_ObjCImplementation:
+ case PCC_ObjCInstanceVariableList:
+ case PCC_Template:
+ case PCC_MemberTemplate:
+ case PCC_Type:
+ case PCC_LocalDeclarationSpecifiers:
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
+ break;
+
+ case PCC_Statement:
+ case PCC_ParenthesizedExpression:
+ case PCC_Expression:
+ case PCC_ForInit:
+ case PCC_Condition:
+ if (WantTypesInContext(CompletionContext, getLangOpts()))
+ Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ else
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
+
+ if (getLangOpts().CPlusPlus)
+ MaybeAddOverrideCalls(*this, /*InContext=*/0, Results);
+ break;
+
+ case PCC_RecoveryInFunction:
+ // Unfiltered
+ break;
+ }
+
+ // If we are in a C++ non-static member function, check the qualifiers on
+ // the member function to filter/prioritize the results list.
+ if (CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext))
+ if (CurMethod->isInstance())
+ Results.setObjectTypeQualifiers(
+ Qualifiers::fromCVRMask(CurMethod->getTypeQualifiers()));
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ AddOrdinaryNameResults(CompletionContext, S, *this, Results);
+ Results.ExitScope();
+
+ switch (CompletionContext) {
+ case PCC_ParenthesizedExpression:
+ case PCC_Expression:
+ case PCC_Statement:
+ case PCC_RecoveryInFunction:
+ if (S->getFnParent())
+ AddPrettyFunctionResults(PP.getLangOpts(), Results);
+ break;
+
+ case PCC_Namespace:
+ case PCC_Class:
+ case PCC_ObjCInterface:
+ case PCC_ObjCImplementation:
+ case PCC_ObjCInstanceVariableList:
+ case PCC_Template:
+ case PCC_MemberTemplate:
+ case PCC_ForInit:
+ case PCC_Condition:
+ case PCC_Type:
+ case PCC_LocalDeclarationSpecifiers:
+ break;
+ }
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results);
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
+ ParsedType Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper,
+ ResultBuilder &Results);
+
+void Sema::CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
+ bool AllowNonIdentifiers,
+ bool AllowNestedNameSpecifiers) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ AllowNestedNameSpecifiers
+ ? CodeCompletionContext::CCC_PotentiallyQualifiedName
+ : CodeCompletionContext::CCC_Name);
+ Results.EnterNewScope();
+
+ // Type qualifiers can come after names.
+ Results.AddResult(Result("const"));
+ Results.AddResult(Result("volatile"));
+ if (getLangOpts().C99)
+ Results.AddResult(Result("restrict"));
+
+ if (getLangOpts().CPlusPlus) {
+ if (AllowNonIdentifiers) {
+ Results.AddResult(Result("operator"));
+ }
+
+ // Add nested-name-specifiers.
+ if (AllowNestedNameSpecifiers) {
+ Results.allowNestedNameSpecifiers();
+ Results.setFilter(&ResultBuilder::IsImpossibleToSatisfy);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer,
+ CodeCompleter->includeGlobals());
+ Results.setFilter(0);
+ }
+ }
+ Results.ExitScope();
+
+ // If we're in a context where we might have an expression (rather than a
+ // declaration), and what we've seen so far is an Objective-C type that could
+ // be a receiver of a class message, this may be a class message send with
+ // the initial opening bracket '[' missing. Add appropriate completions.
+ if (AllowNonIdentifiers && !AllowNestedNameSpecifiers &&
+ DS.getTypeSpecType() == DeclSpec::TST_typename &&
+ DS.getStorageClassSpecAsWritten() == DeclSpec::SCS_unspecified &&
+ !DS.isThreadSpecified() && !DS.isExternInLinkageSpec() &&
+ DS.getTypeSpecComplex() == DeclSpec::TSC_unspecified &&
+ DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ DS.getTypeQualifiers() == 0 &&
+ S &&
+ (S->getFlags() & Scope::DeclScope) != 0 &&
+ (S->getFlags() & (Scope::ClassScope | Scope::TemplateParamScope |
+ Scope::FunctionPrototypeScope |
+ Scope::AtCatchScope)) == 0) {
+ ParsedType T = DS.getRepAsType();
+ if (!T.get().isNull() && T.get()->isObjCObjectOrInterfaceType())
+ AddClassMessageCompletions(*this, S, T, 0, 0, false, false, Results);
+ }
+
+ // Note that we intentionally suppress macro results here, since we do not
+ // encourage using macros to produce the names of entities.
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+struct Sema::CodeCompleteExpressionData {
+ CodeCompleteExpressionData(QualType PreferredType = QualType())
+ : PreferredType(PreferredType), IntegralConstantExpression(false),
+ ObjCCollection(false) { }
+
+ QualType PreferredType;
+ bool IntegralConstantExpression;
+ bool ObjCCollection;
+ SmallVector<Decl *, 4> IgnoreDecls;
+};
+
+/// \brief Perform code-completion in an expression context when we know what
+/// type we're looking for.
+///
+/// \param IntegralConstantExpression Only permit integral constant
+/// expressions.
+void Sema::CodeCompleteExpression(Scope *S,
+ const CodeCompleteExpressionData &Data) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Expression);
+ if (Data.ObjCCollection)
+ Results.setFilter(&ResultBuilder::IsObjCCollection);
+ else if (Data.IntegralConstantExpression)
+ Results.setFilter(&ResultBuilder::IsIntegralConstantValue);
+ else if (WantTypesInContext(PCC_Expression, getLangOpts()))
+ Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ else
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonTypeName);
+
+ if (!Data.PreferredType.isNull())
+ Results.setPreferredType(Data.PreferredType.getNonReferenceType());
+
+ // Ignore any declarations that we were told that we don't care about.
+ for (unsigned I = 0, N = Data.IgnoreDecls.size(); I != N; ++I)
+ Results.Ignore(Data.IgnoreDecls[I]);
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ Results.EnterNewScope();
+ AddOrdinaryNameResults(PCC_Expression, S, *this, Results);
+ Results.ExitScope();
+
+ bool PreferredTypeIsPointer = false;
+ if (!Data.PreferredType.isNull())
+ PreferredTypeIsPointer = Data.PreferredType->isAnyPointerType()
+ || Data.PreferredType->isMemberPointerType()
+ || Data.PreferredType->isBlockPointerType();
+
+ if (S->getFnParent() &&
+ !Data.ObjCCollection &&
+ !Data.IntegralConstantExpression)
+ AddPrettyFunctionResults(PP.getLangOpts(), Results);
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, PreferredTypeIsPointer);
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext(CodeCompletionContext::CCC_Expression,
+ Data.PreferredType),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompletePostfixExpression(Scope *S, ExprResult E) {
+ if (E.isInvalid())
+ CodeCompleteOrdinaryName(S, PCC_RecoveryInFunction);
+ else if (getLangOpts().ObjC1)
+ CodeCompleteObjCInstanceMessage(S, E.take(), 0, 0, false);
+}
+
+/// \brief The set of properties that have already been added, referenced by
+/// property name.
+typedef llvm::SmallPtrSet<IdentifierInfo*, 16> AddedPropertiesSet;
+
+static void AddObjCProperties(ObjCContainerDecl *Container,
+ bool AllowCategories,
+ bool AllowNullaryMethods,
+ DeclContext *CurContext,
+ AddedPropertiesSet &AddedProperties,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+
+ // Add properties in this container.
+ for (ObjCContainerDecl::prop_iterator P = Container->prop_begin(),
+ PEnd = Container->prop_end();
+ P != PEnd;
+ ++P) {
+ if (AddedProperties.insert(P->getIdentifier()))
+ Results.MaybeAddResult(Result(*P, 0), CurContext);
+ }
+
+ // Add nullary methods
+ if (AllowNullaryMethods) {
+ ASTContext &Context = Container->getASTContext();
+ PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
+ for (ObjCContainerDecl::method_iterator M = Container->meth_begin(),
+ MEnd = Container->meth_end();
+ M != MEnd; ++M) {
+ if (M->getSelector().isUnarySelector())
+ if (IdentifierInfo *Name = M->getSelector().getIdentifierInfoForSlot(0))
+ if (AddedProperties.insert(Name)) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ AddResultTypeChunk(Context, Policy, *M, Builder);
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(Name->getName()));
+
+ Results.MaybeAddResult(Result(Builder.TakeString(), *M,
+ CCP_MemberDeclaration + CCD_MethodAsProperty),
+ CurContext);
+ }
+ }
+ }
+
+
+ // Add properties in referenced protocols.
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ for (ObjCProtocolDecl::protocol_iterator P = Protocol->protocol_begin(),
+ PEnd = Protocol->protocol_end();
+ P != PEnd; ++P)
+ AddObjCProperties(*P, AllowCategories, AllowNullaryMethods, CurContext,
+ AddedProperties, Results);
+ } else if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)){
+ if (AllowCategories) {
+ // Look through categories.
+ for (ObjCCategoryDecl *Category = IFace->getCategoryList();
+ Category; Category = Category->getNextClassCategory())
+ AddObjCProperties(Category, AllowCategories, AllowNullaryMethods,
+ CurContext, AddedProperties, Results);
+ }
+
+ // Look through protocols.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ I = IFace->all_referenced_protocol_begin(),
+ E = IFace->all_referenced_protocol_end(); I != E; ++I)
+ AddObjCProperties(*I, AllowCategories, AllowNullaryMethods, CurContext,
+ AddedProperties, Results);
+
+ // Look in the superclass.
+ if (IFace->getSuperClass())
+ AddObjCProperties(IFace->getSuperClass(), AllowCategories,
+ AllowNullaryMethods, CurContext,
+ AddedProperties, Results);
+ } else if (const ObjCCategoryDecl *Category
+ = dyn_cast<ObjCCategoryDecl>(Container)) {
+ // Look through protocols.
+ for (ObjCCategoryDecl::protocol_iterator P = Category->protocol_begin(),
+ PEnd = Category->protocol_end();
+ P != PEnd; ++P)
+ AddObjCProperties(*P, AllowCategories, AllowNullaryMethods, CurContext,
+ AddedProperties, Results);
+ }
+}
+
+void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ bool IsArrow) {
+ if (!Base || !CodeCompleter)
+ return;
+
+ ExprResult ConvertedBase = PerformMemberExprBaseConversion(Base, IsArrow);
+ if (ConvertedBase.isInvalid())
+ return;
+ Base = ConvertedBase.get();
+
+ typedef CodeCompletionResult Result;
+
+ QualType BaseType = Base->getType();
+
+ if (IsArrow) {
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (BaseType->isObjCObjectPointerType())
+ /*Do nothing*/ ;
+ else
+ return;
+ }
+
+ enum CodeCompletionContext::Kind contextKind;
+
+ if (IsArrow) {
+ contextKind = CodeCompletionContext::CCC_ArrowMemberAccess;
+ }
+ else {
+ if (BaseType->isObjCObjectPointerType() ||
+ BaseType->isObjCObjectOrInterfaceType()) {
+ contextKind = CodeCompletionContext::CCC_ObjCPropertyAccess;
+ }
+ else {
+ contextKind = CodeCompletionContext::CCC_DotMemberAccess;
+ }
+ }
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(contextKind,
+ BaseType),
+ &ResultBuilder::IsMember);
+ Results.EnterNewScope();
+ if (const RecordType *Record = BaseType->getAs<RecordType>()) {
+ // Indicate that we are performing a member access, and the cv-qualifiers
+ // for the base object type.
+ Results.setObjectTypeQualifiers(BaseType.getQualifiers());
+
+ // Access to a C/C++ class, struct, or union.
+ Results.allowNestedNameSpecifiers();
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(Record->getDecl(), LookupMemberName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ if (getLangOpts().CPlusPlus) {
+ if (!Results.empty()) {
+ // The "template" keyword can follow "->" or "." in the grammar.
+ // However, we only want to suggest the template keyword if something
+ // is dependent.
+ bool IsDependent = BaseType->isDependentType();
+ if (!IsDependent) {
+ for (Scope *DepScope = S; DepScope; DepScope = DepScope->getParent())
+ if (DeclContext *Ctx = (DeclContext *)DepScope->getEntity()) {
+ IsDependent = Ctx->isDependentContext();
+ break;
+ }
+ }
+
+ if (IsDependent)
+ Results.AddResult(Result("template"));
+ }
+ }
+ } else if (!IsArrow && BaseType->getAsObjCInterfacePointerType()) {
+ // Objective-C property reference.
+ AddedPropertiesSet AddedProperties;
+
+ // Add property results based on our interface.
+ const ObjCObjectPointerType *ObjCPtr
+ = BaseType->getAsObjCInterfacePointerType();
+ assert(ObjCPtr && "Non-NULL pointer guaranteed above!");
+ AddObjCProperties(ObjCPtr->getInterfaceDecl(), true,
+ /*AllowNullaryMethods=*/true, CurContext,
+ AddedProperties, Results);
+
+ // Add properties from the protocols in a qualified interface.
+ for (ObjCObjectPointerType::qual_iterator I = ObjCPtr->qual_begin(),
+ E = ObjCPtr->qual_end();
+ I != E; ++I)
+ AddObjCProperties(*I, true, /*AllowNullaryMethods=*/true, CurContext,
+ AddedProperties, Results);
+ } else if ((IsArrow && BaseType->isObjCObjectPointerType()) ||
+ (!IsArrow && BaseType->isObjCObjectType())) {
+ // Objective-C instance variable access.
+ ObjCInterfaceDecl *Class = 0;
+ if (const ObjCObjectPointerType *ObjCPtr
+ = BaseType->getAs<ObjCObjectPointerType>())
+ Class = ObjCPtr->getInterfaceDecl();
+ else
+ Class = BaseType->getAs<ObjCObjectType>()->getInterface();
+
+ // Add all ivars from this class and its superclasses.
+ if (Class) {
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ Results.setFilter(&ResultBuilder::IsObjCIvar);
+ LookupVisibleDecls(Class, LookupMemberName, Consumer,
+ CodeCompleter->includeGlobals());
+ }
+ }
+
+ // FIXME: How do we cope with isa?
+
+ Results.ExitScope();
+
+ // Hand off the results found for code completion.
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteTag(Scope *S, unsigned TagSpec) {
+ if (!CodeCompleter)
+ return;
+
+ typedef CodeCompletionResult Result;
+ ResultBuilder::LookupFilter Filter = 0;
+ enum CodeCompletionContext::Kind ContextKind
+ = CodeCompletionContext::CCC_Other;
+ switch ((DeclSpec::TST)TagSpec) {
+ case DeclSpec::TST_enum:
+ Filter = &ResultBuilder::IsEnum;
+ ContextKind = CodeCompletionContext::CCC_EnumTag;
+ break;
+
+ case DeclSpec::TST_union:
+ Filter = &ResultBuilder::IsUnion;
+ ContextKind = CodeCompletionContext::CCC_UnionTag;
+ break;
+
+ case DeclSpec::TST_struct:
+ case DeclSpec::TST_class:
+ Filter = &ResultBuilder::IsClassOrStruct;
+ ContextKind = CodeCompletionContext::CCC_ClassOrStructTag;
+ break;
+
+ default:
+ llvm_unreachable("Unknown type specifier kind in CodeCompleteTag");
+ }
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(), ContextKind);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+
+ // First pass: look for tags.
+ Results.setFilter(Filter);
+ LookupVisibleDecls(S, LookupTagName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ if (CodeCompleter->includeGlobals()) {
+ // Second pass: look for nested name specifiers.
+ Results.setFilter(&ResultBuilder::IsNestedNameSpecifier);
+ LookupVisibleDecls(S, LookupNestedNameSpecifierName, Consumer);
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteTypeQualifiers(DeclSpec &DS) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_TypeQualifiers);
+ Results.EnterNewScope();
+ if (!(DS.getTypeQualifiers() & DeclSpec::TQ_const))
+ Results.AddResult("const");
+ if (!(DS.getTypeQualifiers() & DeclSpec::TQ_volatile))
+ Results.AddResult("volatile");
+ if (getLangOpts().C99 &&
+ !(DS.getTypeQualifiers() & DeclSpec::TQ_restrict))
+ Results.AddResult("restrict");
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompleteCase(Scope *S) {
+ if (getCurFunction()->SwitchStack.empty() || !CodeCompleter)
+ return;
+
+ SwitchStmt *Switch = getCurFunction()->SwitchStack.back();
+ QualType type = Switch->getCond()->IgnoreImplicit()->getType();
+ if (!type->isEnumeralType()) {
+ CodeCompleteExpressionData Data(type);
+ Data.IntegralConstantExpression = true;
+ CodeCompleteExpression(S, Data);
+ return;
+ }
+
+ // Code-complete the cases of a switch statement over an enumeration type
+ // by providing the list of
+ EnumDecl *Enum = type->castAs<EnumType>()->getDecl();
+
+ // Determine which enumerators we have already seen in the switch statement.
+ // FIXME: Ideally, we would also be able to look *past* the code-completion
+ // token, in case we are code-completing in the middle of the switch and not
+ // at the end. However, we aren't able to do so at the moment.
+ llvm::SmallPtrSet<EnumConstantDecl *, 8> EnumeratorsSeen;
+ NestedNameSpecifier *Qualifier = 0;
+ for (SwitchCase *SC = Switch->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase()) {
+ CaseStmt *Case = dyn_cast<CaseStmt>(SC);
+ if (!Case)
+ continue;
+
+ Expr *CaseVal = Case->getLHS()->IgnoreParenCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CaseVal))
+ if (EnumConstantDecl *Enumerator
+ = dyn_cast<EnumConstantDecl>(DRE->getDecl())) {
+ // We look into the AST of the case statement to determine which
+ // enumerator was named. Alternatively, we could compute the value of
+ // the integral constant expression, then compare it against the
+ // values of each enumerator. However, value-based approach would not
+ // work as well with C++ templates where enumerators declared within a
+ // template are type- and value-dependent.
+ EnumeratorsSeen.insert(Enumerator);
+
+ // If this is a qualified-id, keep track of the nested-name-specifier
+ // so that we can reproduce it as part of code completion, e.g.,
+ //
+ // switch (TagD.getKind()) {
+ // case TagDecl::TK_enum:
+ // break;
+ // case XXX
+ //
+ // At the XXX, our completions are TagDecl::TK_union,
+ // TagDecl::TK_struct, and TagDecl::TK_class, rather than TK_union,
+ // TK_struct, and TK_class.
+ Qualifier = DRE->getQualifier();
+ }
+ }
+
+ if (getLangOpts().CPlusPlus && !Qualifier && EnumeratorsSeen.empty()) {
+ // If there are no prior enumerators in C++, check whether we have to
+ // qualify the names of the enumerators that we suggest, because they
+ // may not be visible in this scope.
+ Qualifier = getRequiredQualification(Context, CurContext, Enum);
+ }
+
+ // Add any enumerators that have not yet been mentioned.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Expression);
+ Results.EnterNewScope();
+ for (EnumDecl::enumerator_iterator E = Enum->enumerator_begin(),
+ EEnd = Enum->enumerator_end();
+ E != EEnd; ++E) {
+ if (EnumeratorsSeen.count(*E))
+ continue;
+
+ CodeCompletionResult R(*E, Qualifier);
+ R.Priority = CCP_EnumInCase;
+ Results.AddResult(R, CurContext, 0, false);
+ }
+ Results.ExitScope();
+
+ //We need to make sure we're setting the right context,
+ //so only say we include macros if the code completer says we do
+ enum CodeCompletionContext::Kind kind = CodeCompletionContext::CCC_Other;
+ if (CodeCompleter->includeMacros()) {
+ AddMacroResults(PP, Results);
+ kind = CodeCompletionContext::CCC_OtherWithMacros;
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ kind,
+ Results.data(),Results.size());
+}
+
+namespace {
+ struct IsBetterOverloadCandidate {
+ Sema &S;
+ SourceLocation Loc;
+
+ public:
+ explicit IsBetterOverloadCandidate(Sema &S, SourceLocation Loc)
+ : S(S), Loc(Loc) { }
+
+ bool
+ operator()(const OverloadCandidate &X, const OverloadCandidate &Y) const {
+ return isBetterOverloadCandidate(S, X, Y, Loc);
+ }
+ };
+}
+
+static bool anyNullArguments(llvm::ArrayRef<Expr*> Args) {
+ if (Args.size() && !Args.data())
+ return true;
+
+ for (unsigned I = 0; I != Args.size(); ++I)
+ if (!Args[I])
+ return true;
+
+ return false;
+}
+
+void Sema::CodeCompleteCall(Scope *S, Expr *FnIn,
+ llvm::ArrayRef<Expr *> Args) {
+ if (!CodeCompleter)
+ return;
+
+ // When we're code-completing for a call, we fall back to ordinary
+ // name code-completion whenever we can't produce specific
+ // results. We may want to revisit this strategy in the future,
+ // e.g., by merging the two kinds of results.
+
+ Expr *Fn = (Expr *)FnIn;
+
+ // Ignore type-dependent call expressions entirely.
+ if (!Fn || Fn->isTypeDependent() || anyNullArguments(Args) ||
+ Expr::hasAnyTypeDependentArguments(Args)) {
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ return;
+ }
+
+ // Build an overload candidate set based on the functions we find.
+ SourceLocation Loc = Fn->getExprLoc();
+ OverloadCandidateSet CandidateSet(Loc);
+
+ // FIXME: What if we're calling something that isn't a function declaration?
+ // FIXME: What if we're calling a pseudo-destructor?
+ // FIXME: What if we're calling a member function?
+
+ typedef CodeCompleteConsumer::OverloadCandidate ResultCandidate;
+ SmallVector<ResultCandidate, 8> Results;
+
+ Expr *NakedFn = Fn->IgnoreParenCasts();
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(NakedFn))
+ AddOverloadedCallCandidates(ULE, Args, CandidateSet,
+ /*PartialOverloading=*/ true);
+ else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(NakedFn)) {
+ FunctionDecl *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl());
+ if (FDecl) {
+ if (!getLangOpts().CPlusPlus ||
+ !FDecl->getType()->getAs<FunctionProtoType>())
+ Results.push_back(ResultCandidate(FDecl));
+ else
+ // FIXME: access?
+ AddOverloadCandidate(FDecl, DeclAccessPair::make(FDecl, AS_none), Args,
+ CandidateSet, false, /*PartialOverloading*/true);
+ }
+ }
+
+ QualType ParamType;
+
+ if (!CandidateSet.empty()) {
+ // Sort the overload candidate set by placing the best overloads first.
+ std::stable_sort(CandidateSet.begin(), CandidateSet.end(),
+ IsBetterOverloadCandidate(*this, Loc));
+
+ // Add the remaining viable overload candidates as code-completion reslults.
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
+ CandEnd = CandidateSet.end();
+ Cand != CandEnd; ++Cand) {
+ if (Cand->Viable)
+ Results.push_back(ResultCandidate(Cand->Function));
+ }
+
+ // From the viable candidates, try to determine the type of this parameter.
+ for (unsigned I = 0, N = Results.size(); I != N; ++I) {
+ if (const FunctionType *FType = Results[I].getFunctionType())
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FType))
+ if (Args.size() < Proto->getNumArgs()) {
+ if (ParamType.isNull())
+ ParamType = Proto->getArgType(Args.size());
+ else if (!Context.hasSameUnqualifiedType(
+ ParamType.getNonReferenceType(),
+ Proto->getArgType(Args.size()).getNonReferenceType())) {
+ ParamType = QualType();
+ break;
+ }
+ }
+ }
+ } else {
+ // Try to determine the parameter type from the type of the expression
+ // being called.
+ QualType FunctionType = Fn->getType();
+ if (const PointerType *Ptr = FunctionType->getAs<PointerType>())
+ FunctionType = Ptr->getPointeeType();
+ else if (const BlockPointerType *BlockPtr
+ = FunctionType->getAs<BlockPointerType>())
+ FunctionType = BlockPtr->getPointeeType();
+ else if (const MemberPointerType *MemPtr
+ = FunctionType->getAs<MemberPointerType>())
+ FunctionType = MemPtr->getPointeeType();
+
+ if (const FunctionProtoType *Proto
+ = FunctionType->getAs<FunctionProtoType>()) {
+ if (Args.size() < Proto->getNumArgs())
+ ParamType = Proto->getArgType(Args.size());
+ }
+ }
+
+ if (ParamType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, ParamType);
+
+ if (!Results.empty())
+ CodeCompleter->ProcessOverloadCandidates(*this, Args.size(), Results.data(),
+ Results.size());
+}
+
+void Sema::CodeCompleteInitializer(Scope *S, Decl *D) {
+ ValueDecl *VD = dyn_cast_or_null<ValueDecl>(D);
+ if (!VD) {
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ return;
+ }
+
+ CodeCompleteExpression(S, VD->getType());
+}
+
+void Sema::CodeCompleteReturn(Scope *S) {
+ QualType ResultType;
+ if (isa<BlockDecl>(CurContext)) {
+ if (BlockScopeInfo *BSI = getCurBlock())
+ ResultType = BSI->ReturnType;
+ } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(CurContext))
+ ResultType = Function->getResultType();
+ else if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(CurContext))
+ ResultType = Method->getResultType();
+
+ if (ResultType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, ResultType);
+}
+
+void Sema::CodeCompleteAfterIf(Scope *S) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ mapCodeCompletionContext(*this, PCC_Statement));
+ Results.setFilter(&ResultBuilder::IsOrdinaryName);
+ Results.EnterNewScope();
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ AddOrdinaryNameResults(PCC_Statement, S, *this, Results);
+
+ // "else" block
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk("else");
+ if (Results.includeCodePatterns()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ }
+ Results.AddResult(Builder.TakeString());
+
+ // "else if" block
+ Builder.AddTypedTextChunk("else");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (getLangOpts().CPlusPlus)
+ Builder.AddPlaceholderChunk("condition");
+ else
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ if (Results.includeCodePatterns()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ }
+ Results.AddResult(Builder.TakeString());
+
+ Results.ExitScope();
+
+ if (S->getFnParent())
+ AddPrettyFunctionResults(PP.getLangOpts(), Results);
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results);
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteAssignmentRHS(Scope *S, Expr *LHS) {
+ if (LHS)
+ CodeCompleteExpression(S, static_cast<Expr *>(LHS)->getType());
+ else
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+}
+
+void Sema::CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS,
+ bool EnteringContext) {
+ if (!SS.getScopeRep() || !CodeCompleter)
+ return;
+
+ DeclContext *Ctx = computeDeclContext(SS, EnteringContext);
+ if (!Ctx)
+ return;
+
+ // Try to instantiate any non-dependent declaration contexts before
+ // we look in them.
+ if (!isDependentScopeSpecifier(SS) && RequireCompleteDeclContext(SS, Ctx))
+ return;
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Name);
+ Results.EnterNewScope();
+
+ // The "template" keyword can follow "::" in the grammar, but only
+ // put it into the grammar if the nested-name-specifier is dependent.
+ NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
+ if (!Results.empty() && NNS->isDependent())
+ Results.AddResult("template");
+
+ // Add calls to overridden virtual functions, if there are any.
+ //
+ // FIXME: This isn't wonderful, because we don't know whether we're actually
+ // in a context that permits expressions. This is a general issue with
+ // qualified-id completions.
+ if (!EnteringContext)
+ MaybeAddOverrideCalls(*this, Ctx, Results);
+ Results.ExitScope();
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(Ctx, LookupOrdinaryName, Consumer);
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteUsing(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_PotentiallyQualifiedName,
+ &ResultBuilder::IsNestedNameSpecifier);
+ Results.EnterNewScope();
+
+ // If we aren't in class scope, we could see the "namespace" keyword.
+ if (!S->isClassScope())
+ Results.AddResult(CodeCompletionResult("namespace"));
+
+ // After "using", we can see anything that would start a
+ // nested-name-specifier.
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_PotentiallyQualifiedName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteUsingDirective(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ // After "using namespace", we expect to see a namespace name or namespace
+ // alias.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Namespace,
+ &ResultBuilder::IsNamespaceOrAlias);
+ Results.EnterNewScope();
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Namespace,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteNamespaceDecl(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ DeclContext *Ctx = (DeclContext *)S->getEntity();
+ if (!S->getParent())
+ Ctx = Context.getTranslationUnitDecl();
+
+ bool SuppressedGlobalResults
+ = Ctx && !CodeCompleter->includeGlobals() && isa<TranslationUnitDecl>(Ctx);
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ SuppressedGlobalResults
+ ? CodeCompletionContext::CCC_Namespace
+ : CodeCompletionContext::CCC_Other,
+ &ResultBuilder::IsNamespace);
+
+ if (Ctx && Ctx->isFileContext() && !SuppressedGlobalResults) {
+ // We only want to see those namespaces that have already been defined
+ // within this scope, because its likely that the user is creating an
+ // extended namespace declaration. Keep track of the most recent
+ // definition of each namespace.
+ std::map<NamespaceDecl *, NamespaceDecl *> OrigToLatest;
+ for (DeclContext::specific_decl_iterator<NamespaceDecl>
+ NS(Ctx->decls_begin()), NSEnd(Ctx->decls_end());
+ NS != NSEnd; ++NS)
+ OrigToLatest[NS->getOriginalNamespace()] = *NS;
+
+ // Add the most recent definition (or extended definition) of each
+ // namespace to the list of results.
+ Results.EnterNewScope();
+ for (std::map<NamespaceDecl *, NamespaceDecl *>::iterator
+ NS = OrigToLatest.begin(),
+ NSEnd = OrigToLatest.end();
+ NS != NSEnd; ++NS)
+ Results.AddResult(CodeCompletionResult(NS->second, 0),
+ CurContext, 0, false);
+ Results.ExitScope();
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteNamespaceAliasDecl(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ // After "namespace", we expect to see a namespace or alias.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Namespace,
+ &ResultBuilder::IsNamespaceOrAlias);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteOperatorName(Scope *S) {
+ if (!CodeCompleter)
+ return;
+
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Type,
+ &ResultBuilder::IsType);
+ Results.EnterNewScope();
+
+ // Add the names of overloadable operators.
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ if (std::strcmp(Spelling, "?")) \
+ Results.AddResult(Result(Spelling));
+#include "clang/Basic/OperatorKinds.def"
+
+ // Add any type names visible from the current scope
+ Results.allowNestedNameSpecifiers();
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ // Add any type specifiers
+ AddTypeSpecifierResults(getLangOpts(), Results);
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Type,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteConstructorInitializer(Decl *ConstructorD,
+ CXXCtorInitializer** Initializers,
+ unsigned NumInitializers) {
+ PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
+ CXXConstructorDecl *Constructor
+ = static_cast<CXXConstructorDecl *>(ConstructorD);
+ if (!Constructor)
+ return;
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_PotentiallyQualifiedName);
+ Results.EnterNewScope();
+
+ // Fill in any already-initialized fields or base classes.
+ llvm::SmallPtrSet<FieldDecl *, 4> InitializedFields;
+ llvm::SmallPtrSet<CanQualType, 4> InitializedBases;
+ for (unsigned I = 0; I != NumInitializers; ++I) {
+ if (Initializers[I]->isBaseInitializer())
+ InitializedBases.insert(
+ Context.getCanonicalType(QualType(Initializers[I]->getBaseClass(), 0)));
+ else
+ InitializedFields.insert(cast<FieldDecl>(
+ Initializers[I]->getAnyMember()));
+ }
+
+ // Add completions for base classes.
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ bool SawLastInitializer = (NumInitializers == 0);
+ CXXRecordDecl *ClassDecl = Constructor->getParent();
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ Base != BaseEnd; ++Base) {
+ if (!InitializedBases.insert(Context.getCanonicalType(Base->getType()))) {
+ SawLastInitializer
+ = NumInitializers > 0 &&
+ Initializers[NumInitializers - 1]->isBaseInitializer() &&
+ Context.hasSameUnqualifiedType(Base->getType(),
+ QualType(Initializers[NumInitializers - 1]->getBaseClass(), 0));
+ continue;
+ }
+
+ Builder.AddTypedTextChunk(
+ Results.getAllocator().CopyString(
+ Base->getType().getAsString(Policy)));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ SawLastInitializer? CCP_NextInitializer
+ : CCP_MemberDeclaration));
+ SawLastInitializer = false;
+ }
+
+ // Add completions for virtual base classes.
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd; ++Base) {
+ if (!InitializedBases.insert(Context.getCanonicalType(Base->getType()))) {
+ SawLastInitializer
+ = NumInitializers > 0 &&
+ Initializers[NumInitializers - 1]->isBaseInitializer() &&
+ Context.hasSameUnqualifiedType(Base->getType(),
+ QualType(Initializers[NumInitializers - 1]->getBaseClass(), 0));
+ continue;
+ }
+
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(
+ Base->getType().getAsString(Policy)));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ SawLastInitializer? CCP_NextInitializer
+ : CCP_MemberDeclaration));
+ SawLastInitializer = false;
+ }
+
+ // Add completions for members.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ if (!InitializedFields.insert(cast<FieldDecl>(Field->getCanonicalDecl()))) {
+ SawLastInitializer
+ = NumInitializers > 0 &&
+ Initializers[NumInitializers - 1]->isAnyMemberInitializer() &&
+ Initializers[NumInitializers - 1]->getAnyMember() == *Field;
+ continue;
+ }
+
+ if (!Field->getDeclName())
+ continue;
+
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Field->getIdentifier()->getName()));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(),
+ SawLastInitializer? CCP_NextInitializer
+ : CCP_MemberDeclaration,
+ CXCursor_MemberRef,
+ CXAvailability_Available,
+ *Field));
+ SawLastInitializer = false;
+ }
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+/// \brief Determine whether this scope denotes a namespace.
+static bool isNamespaceScope(Scope *S) {
+ DeclContext *DC = static_cast<DeclContext *>(S->getEntity());
+ if (!DC)
+ return false;
+
+ return DC->isFileContext();
+}
+
+void Sema::CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
+ bool AfterAmpersand) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ // Note what has already been captured.
+ llvm::SmallPtrSet<IdentifierInfo *, 4> Known;
+ bool IncludedThis = false;
+ for (SmallVectorImpl<LambdaCapture>::iterator C = Intro.Captures.begin(),
+ CEnd = Intro.Captures.end();
+ C != CEnd; ++C) {
+ if (C->Kind == LCK_This) {
+ IncludedThis = true;
+ continue;
+ }
+
+ Known.insert(C->Id);
+ }
+
+ // Look for other capturable variables.
+ for (; S && !isNamespaceScope(S); S = S->getParent()) {
+ for (Scope::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
+ D != DEnd; ++D) {
+ VarDecl *Var = dyn_cast<VarDecl>(*D);
+ if (!Var ||
+ !Var->hasLocalStorage() ||
+ Var->hasAttr<BlocksAttr>())
+ continue;
+
+ if (Known.insert(Var->getIdentifier()))
+ Results.AddResult(CodeCompletionResult(Var), CurContext, 0, false);
+ }
+ }
+
+ // Add 'this', if it would be valid.
+ if (!IncludedThis && !AfterAmpersand && Intro.Default != LCD_ByCopy)
+ addThisCompletion(*this, Results);
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+// Macro that expands to @Keyword or Keyword, depending on whether NeedAt is
+// true or false.
+#define OBJC_AT_KEYWORD_NAME(NeedAt,Keyword) NeedAt? "@" #Keyword : #Keyword
+static void AddObjCImplementationResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ // Since we have an implementation, we can end it.
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,end)));
+
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ if (LangOpts.ObjC2) {
+ // @dynamic
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,dynamic));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("property");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @synthesize
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synthesize));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("property");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+}
+
+static void AddObjCInterfaceResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt) {
+ typedef CodeCompletionResult Result;
+
+ // Since we have an interface or protocol, we can end it.
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,end)));
+
+ if (LangOpts.ObjC2) {
+ // @property
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,property)));
+
+ // @required
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,required)));
+
+ // @optional
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,optional)));
+ }
+}
+
+static void AddObjCTopLevelResults(ResultBuilder &Results, bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ // @class name ;
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,class));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("name");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (Results.includeCodePatterns()) {
+ // @interface name
+ // FIXME: Could introduce the whole pattern, including superclasses and
+ // such.
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,interface));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @protocol name
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("protocol");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @implementation name
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,implementation));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // @compatibility_alias name
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,compatibility_alias));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("alias");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("class");
+ Results.AddResult(Result(Builder.TakeString()));
+}
+
+void Sema::CodeCompleteObjCAtDirective(Scope *S) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ if (isa<ObjCImplDecl>(CurContext))
+ AddObjCImplementationResults(getLangOpts(), Results, false);
+ else if (CurContext->isObjCContainer())
+ AddObjCInterfaceResults(getLangOpts(), Results, false);
+ else
+ AddObjCTopLevelResults(Results, false);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+static void AddObjCExpressionResults(ResultBuilder &Results, bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ // @encode ( type-name )
+ const char *EncodeType = "char[]";
+ if (Results.getSema().getLangOpts().CPlusPlus ||
+ Results.getSema().getLangOpts().ConstStrings)
+ EncodeType = " const char[]";
+ Builder.AddResultTypeChunk(EncodeType);
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,encode));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("type-name");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @protocol ( protocol-name )
+ Builder.AddResultTypeChunk("Protocol *");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,protocol));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("protocol-name");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @selector ( selector )
+ Builder.AddResultTypeChunk("SEL");
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,selector));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("selector");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @[ objects, ... ]
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,[));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("objects, ...");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBracket);
+ Results.AddResult(Result(Builder.TakeString()));
+
+ // @{ key : object, ... }
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,{));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("key");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("object, ...");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+}
+
+static void AddObjCStatementResults(ResultBuilder &Results, bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ if (Results.includeCodePatterns()) {
+ // @try { statements } @catch ( declaration ) { statements } @finally
+ // { statements }
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,try));
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("@catch");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("parameter");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Builder.AddTextChunk("@finally");
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+
+ // @throw
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,throw));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Results.AddResult(Result(Builder.TakeString()));
+
+ if (Results.includeCodePatterns()) {
+ // @synchronized ( expression ) { statements }
+ Builder.AddTypedTextChunk(OBJC_AT_KEYWORD_NAME(NeedAt,synchronized));
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddPlaceholderChunk("statements");
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ Results.AddResult(Result(Builder.TakeString()));
+ }
+}
+
+static void AddObjCVisibilityResults(const LangOptions &LangOpts,
+ ResultBuilder &Results,
+ bool NeedAt) {
+ typedef CodeCompletionResult Result;
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,private)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,protected)));
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,public)));
+ if (LangOpts.ObjC2)
+ Results.AddResult(Result(OBJC_AT_KEYWORD_NAME(NeedAt,package)));
+}
+
+void Sema::CodeCompleteObjCAtVisibility(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ AddObjCVisibilityResults(getLangOpts(), Results, false);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCAtStatement(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ AddObjCStatementResults(Results, false);
+ AddObjCExpressionResults(Results, false);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCAtExpression(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ AddObjCExpressionResults(Results, false);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+/// \brief Determine whether the addition of the given flag to an Objective-C
+/// property's attributes will cause a conflict.
+static bool ObjCPropertyFlagConflicts(unsigned Attributes, unsigned NewFlag) {
+ // Check if we've already added this flag.
+ if (Attributes & NewFlag)
+ return true;
+
+ Attributes |= NewFlag;
+
+ // Check for collisions with "readonly".
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
+ ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong)))
+ return true;
+
+ // Check for more than one of { assign, copy, retain, strong }.
+ unsigned AssignCopyRetMask = Attributes & (ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain|
+ ObjCDeclSpec::DQ_PR_strong);
+ if (AssignCopyRetMask &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_assign &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_unsafe_unretained &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_copy &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_retain &&
+ AssignCopyRetMask != ObjCDeclSpec::DQ_PR_strong)
+ return true;
+
+ return false;
+}
+
+void Sema::CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS) {
+ if (!CodeCompleter)
+ return;
+
+ unsigned Attributes = ODS.getPropertyAttributes();
+
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readonly))
+ Results.AddResult(CodeCompletionResult("readonly"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_assign))
+ Results.AddResult(CodeCompletionResult("assign"));
+ if (!ObjCPropertyFlagConflicts(Attributes,
+ ObjCDeclSpec::DQ_PR_unsafe_unretained))
+ Results.AddResult(CodeCompletionResult("unsafe_unretained"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_readwrite))
+ Results.AddResult(CodeCompletionResult("readwrite"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_retain))
+ Results.AddResult(CodeCompletionResult("retain"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_strong))
+ Results.AddResult(CodeCompletionResult("strong"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_copy))
+ Results.AddResult(CodeCompletionResult("copy"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_nonatomic))
+ Results.AddResult(CodeCompletionResult("nonatomic"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_atomic))
+ Results.AddResult(CodeCompletionResult("atomic"));
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_setter)) {
+ CodeCompletionBuilder Setter(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Setter.AddTypedTextChunk("setter");
+ Setter.AddTextChunk(" = ");
+ Setter.AddPlaceholderChunk("method");
+ Results.AddResult(CodeCompletionResult(Setter.TakeString()));
+ }
+ if (!ObjCPropertyFlagConflicts(Attributes, ObjCDeclSpec::DQ_PR_getter)) {
+ CodeCompletionBuilder Getter(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Getter.AddTypedTextChunk("getter");
+ Getter.AddTextChunk(" = ");
+ Getter.AddPlaceholderChunk("method");
+ Results.AddResult(CodeCompletionResult(Getter.TakeString()));
+ }
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+/// \brief Descripts the kind of Objective-C method that we want to find
+/// via code completion.
+enum ObjCMethodKind {
+ MK_Any, //< Any kind of method, provided it means other specified criteria.
+ MK_ZeroArgSelector, //< Zero-argument (unary) selector.
+ MK_OneArgSelector //< One-argument selector.
+};
+
+static bool isAcceptableObjCSelector(Selector Sel,
+ ObjCMethodKind WantKind,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AllowSameLength = true) {
+ if (NumSelIdents > Sel.getNumArgs())
+ return false;
+
+ switch (WantKind) {
+ case MK_Any: break;
+ case MK_ZeroArgSelector: return Sel.isUnarySelector();
+ case MK_OneArgSelector: return Sel.getNumArgs() == 1;
+ }
+
+ if (!AllowSameLength && NumSelIdents && NumSelIdents == Sel.getNumArgs())
+ return false;
+
+ for (unsigned I = 0; I != NumSelIdents; ++I)
+ if (SelIdents[I] != Sel.getIdentifierInfoForSlot(I))
+ return false;
+
+ return true;
+}
+
+static bool isAcceptableObjCMethod(ObjCMethodDecl *Method,
+ ObjCMethodKind WantKind,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AllowSameLength = true) {
+ return isAcceptableObjCSelector(Method->getSelector(), WantKind, SelIdents,
+ NumSelIdents, AllowSameLength);
+}
+
+namespace {
+ /// \brief A set of selectors, which is used to avoid introducing multiple
+ /// completions with the same selector into the result set.
+ typedef llvm::SmallPtrSet<Selector, 16> VisitedSelectorSet;
+}
+
+/// \brief Add all of the Objective-C methods in the given Objective-C
+/// container to the set of results.
+///
+/// The container will be a class, protocol, category, or implementation of
+/// any of the above. This mether will recurse to include methods from
+/// the superclasses of classes along with their categories, protocols, and
+/// implementations.
+///
+/// \param Container the container in which we'll look to find methods.
+///
+/// \param WantInstance whether to add instance methods (only); if false, this
+/// routine will add factory methods (only).
+///
+/// \param CurContext the context in which we're performing the lookup that
+/// finds methods.
+///
+/// \param AllowSameLength Whether we allow a method to be added to the list
+/// when it has the same number of parameters as we have selector identifiers.
+///
+/// \param Results the structure into which we'll add results.
+static void AddObjCMethods(ObjCContainerDecl *Container,
+ bool WantInstanceMethods,
+ ObjCMethodKind WantKind,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ DeclContext *CurContext,
+ VisitedSelectorSet &Selectors,
+ bool AllowSameLength,
+ ResultBuilder &Results,
+ bool InOriginalClass = true) {
+ typedef CodeCompletionResult Result;
+ for (ObjCContainerDecl::method_iterator M = Container->meth_begin(),
+ MEnd = Container->meth_end();
+ M != MEnd; ++M) {
+ if ((*M)->isInstanceMethod() == WantInstanceMethods) {
+ // Check whether the selector identifiers we've been given are a
+ // subset of the identifiers for this particular method.
+ if (!isAcceptableObjCMethod(*M, WantKind, SelIdents, NumSelIdents,
+ AllowSameLength))
+ continue;
+
+ if (!Selectors.insert((*M)->getSelector()))
+ continue;
+
+ Result R = Result(*M, 0);
+ R.StartParameter = NumSelIdents;
+ R.AllParametersAreInformative = (WantKind != MK_Any);
+ if (!InOriginalClass)
+ R.Priority += CCD_InBaseClass;
+ Results.MaybeAddResult(R, CurContext);
+ }
+ }
+
+ // Visit the protocols of protocols.
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ if (Protocol->hasDefinition()) {
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Protocol->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, false);
+ }
+ }
+
+ ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container);
+ if (!IFace || !IFace->hasDefinition())
+ return;
+
+ // Add methods in protocols.
+ for (ObjCInterfaceDecl::protocol_iterator I = IFace->protocol_begin(),
+ E = IFace->protocol_end();
+ I != E; ++I)
+ AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents, NumSelIdents,
+ CurContext, Selectors, AllowSameLength, Results, false);
+
+ // Add methods in categories.
+ for (ObjCCategoryDecl *CatDecl = IFace->getCategoryList(); CatDecl;
+ CatDecl = CatDecl->getNextClassCategory()) {
+ AddObjCMethods(CatDecl, WantInstanceMethods, WantKind, SelIdents,
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
+
+ // Add a categories protocol methods.
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = CatDecl->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ AddObjCMethods(*I, WantInstanceMethods, WantKind, SelIdents,
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, false);
+
+ // Add methods in category implementations.
+ if (ObjCCategoryImplDecl *Impl = CatDecl->getImplementation())
+ AddObjCMethods(Impl, WantInstanceMethods, WantKind, SelIdents,
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
+ }
+
+ // Add methods in superclass.
+ if (IFace->getSuperClass())
+ AddObjCMethods(IFace->getSuperClass(), WantInstanceMethods, WantKind,
+ SelIdents, NumSelIdents, CurContext, Selectors,
+ AllowSameLength, Results, false);
+
+ // Add methods in our implementation, if any.
+ if (ObjCImplementationDecl *Impl = IFace->getImplementation())
+ AddObjCMethods(Impl, WantInstanceMethods, WantKind, SelIdents,
+ NumSelIdents, CurContext, Selectors, AllowSameLength,
+ Results, InOriginalClass);
+}
+
+
+void Sema::CodeCompleteObjCPropertyGetter(Scope *S) {
+ typedef CodeCompletionResult Result;
+
+ // Try to find the interface where getters might live.
+ ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
+ if (!Class) {
+ if (ObjCCategoryDecl *Category
+ = dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ Class = Category->getClassInterface();
+
+ if (!Class)
+ return;
+ }
+
+ // Find all of the potential getters.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ VisitedSelectorSet Selectors;
+ AddObjCMethods(Class, true, MK_ZeroArgSelector, 0, 0, CurContext, Selectors,
+ /*AllowSameLength=*/true, Results);
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCPropertySetter(Scope *S) {
+ typedef CodeCompletionResult Result;
+
+ // Try to find the interface where setters might live.
+ ObjCInterfaceDecl *Class
+ = dyn_cast_or_null<ObjCInterfaceDecl>(CurContext);
+ if (!Class) {
+ if (ObjCCategoryDecl *Category
+ = dyn_cast_or_null<ObjCCategoryDecl>(CurContext))
+ Class = Category->getClassInterface();
+
+ if (!Class)
+ return;
+ }
+
+ // Find all of the potential getters.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ VisitedSelectorSet Selectors;
+ AddObjCMethods(Class, true, MK_OneArgSelector, 0, 0, CurContext,
+ Selectors, /*AllowSameLength=*/true, Results);
+
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
+ bool IsParameter) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Type);
+ Results.EnterNewScope();
+
+ // Add context-sensitive, Objective-C parameter-passing keywords.
+ bool AddedInOut = false;
+ if ((DS.getObjCDeclQualifier() &
+ (ObjCDeclSpec::DQ_In | ObjCDeclSpec::DQ_Inout)) == 0) {
+ Results.AddResult("in");
+ Results.AddResult("inout");
+ AddedInOut = true;
+ }
+ if ((DS.getObjCDeclQualifier() &
+ (ObjCDeclSpec::DQ_Out | ObjCDeclSpec::DQ_Inout)) == 0) {
+ Results.AddResult("out");
+ if (!AddedInOut)
+ Results.AddResult("inout");
+ }
+ if ((DS.getObjCDeclQualifier() &
+ (ObjCDeclSpec::DQ_Bycopy | ObjCDeclSpec::DQ_Byref |
+ ObjCDeclSpec::DQ_Oneway)) == 0) {
+ Results.AddResult("bycopy");
+ Results.AddResult("byref");
+ Results.AddResult("oneway");
+ }
+
+ // If we're completing the return type of an Objective-C method and the
+ // identifier IBAction refers to a macro, provide a completion item for
+ // an action, e.g.,
+ // IBAction)<#selector#>:(id)sender
+ if (DS.getObjCDeclQualifier() == 0 && !IsParameter &&
+ Context.Idents.get("IBAction").hasMacroDefinition()) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo(),
+ CCP_CodePattern, CXAvailability_Available);
+ Builder.AddTypedTextChunk("IBAction");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddPlaceholderChunk("selector");
+ Builder.AddChunk(CodeCompletionString::CK_Colon);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("sender");
+ Results.AddResult(CodeCompletionResult(Builder.TakeString()));
+ }
+
+ // Add various builtin type names and specifiers.
+ AddOrdinaryNameResults(PCC_Type, S, *this, Results);
+ Results.ExitScope();
+
+ // Add the various type names
+ Results.setFilter(&ResultBuilder::IsOrdinaryNonValueName);
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results);
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Type,
+ Results.data(), Results.size());
+}
+
+/// \brief When we have an expression with type "id", we may assume
+/// that it has some more-specific class type based on knowledge of
+/// common uses of Objective-C. This routine returns that class type,
+/// or NULL if no better result could be determined.
+static ObjCInterfaceDecl *GetAssumedMessageSendExprType(Expr *E) {
+ ObjCMessageExpr *Msg = dyn_cast_or_null<ObjCMessageExpr>(E);
+ if (!Msg)
+ return 0;
+
+ Selector Sel = Msg->getSelector();
+ if (Sel.isNull())
+ return 0;
+
+ IdentifierInfo *Id = Sel.getIdentifierInfoForSlot(0);
+ if (!Id)
+ return 0;
+
+ ObjCMethodDecl *Method = Msg->getMethodDecl();
+ if (!Method)
+ return 0;
+
+ // Determine the class that we're sending the message to.
+ ObjCInterfaceDecl *IFace = 0;
+ switch (Msg->getReceiverKind()) {
+ case ObjCMessageExpr::Class:
+ if (const ObjCObjectType *ObjType
+ = Msg->getClassReceiver()->getAs<ObjCObjectType>())
+ IFace = ObjType->getInterface();
+ break;
+
+ case ObjCMessageExpr::Instance: {
+ QualType T = Msg->getInstanceReceiver()->getType();
+ if (const ObjCObjectPointerType *Ptr = T->getAs<ObjCObjectPointerType>())
+ IFace = Ptr->getInterfaceDecl();
+ break;
+ }
+
+ case ObjCMessageExpr::SuperInstance:
+ case ObjCMessageExpr::SuperClass:
+ break;
+ }
+
+ if (!IFace)
+ return 0;
+
+ ObjCInterfaceDecl *Super = IFace->getSuperClass();
+ if (Method->isInstanceMethod())
+ return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
+ .Case("retain", IFace)
+ .Case("strong", IFace)
+ .Case("autorelease", IFace)
+ .Case("copy", IFace)
+ .Case("copyWithZone", IFace)
+ .Case("mutableCopy", IFace)
+ .Case("mutableCopyWithZone", IFace)
+ .Case("awakeFromCoder", IFace)
+ .Case("replacementObjectFromCoder", IFace)
+ .Case("class", IFace)
+ .Case("classForCoder", IFace)
+ .Case("superclass", Super)
+ .Default(0);
+
+ return llvm::StringSwitch<ObjCInterfaceDecl *>(Id->getName())
+ .Case("new", IFace)
+ .Case("alloc", IFace)
+ .Case("allocWithZone", IFace)
+ .Case("class", IFace)
+ .Case("superclass", Super)
+ .Default(0);
+}
+
+// Add a special completion for a message send to "super", which fills in the
+// most likely case of forwarding all of our arguments to the superclass
+// function.
+///
+/// \param S The semantic analysis object.
+///
+/// \param S NeedSuperKeyword Whether we need to prefix this completion with
+/// the "super" keyword. Otherwise, we just need to provide the arguments.
+///
+/// \param SelIdents The identifiers in the selector that have already been
+/// provided as arguments for a send to "super".
+///
+/// \param NumSelIdents The number of identifiers in \p SelIdents.
+///
+/// \param Results The set of results to augment.
+///
+/// \returns the Objective-C method declaration that would be invoked by
+/// this "super" completion. If NULL, no completion was added.
+static ObjCMethodDecl *AddSuperSendCompletion(Sema &S, bool NeedSuperKeyword,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ ResultBuilder &Results) {
+ ObjCMethodDecl *CurMethod = S.getCurMethodDecl();
+ if (!CurMethod)
+ return 0;
+
+ ObjCInterfaceDecl *Class = CurMethod->getClassInterface();
+ if (!Class)
+ return 0;
+
+ // Try to find a superclass method with the same selector.
+ ObjCMethodDecl *SuperMethod = 0;
+ while ((Class = Class->getSuperClass()) && !SuperMethod) {
+ // Check in the class
+ SuperMethod = Class->getMethod(CurMethod->getSelector(),
+ CurMethod->isInstanceMethod());
+
+ // Check in categories or class extensions.
+ if (!SuperMethod) {
+ for (ObjCCategoryDecl *Category = Class->getCategoryList(); Category;
+ Category = Category->getNextClassCategory())
+ if ((SuperMethod = Category->getMethod(CurMethod->getSelector(),
+ CurMethod->isInstanceMethod())))
+ break;
+ }
+ }
+
+ if (!SuperMethod)
+ return 0;
+
+ // Check whether the superclass method has the same signature.
+ if (CurMethod->param_size() != SuperMethod->param_size() ||
+ CurMethod->isVariadic() != SuperMethod->isVariadic())
+ return 0;
+
+ for (ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin(),
+ CurPEnd = CurMethod->param_end(),
+ SuperP = SuperMethod->param_begin();
+ CurP != CurPEnd; ++CurP, ++SuperP) {
+ // Make sure the parameter types are compatible.
+ if (!S.Context.hasSameUnqualifiedType((*CurP)->getType(),
+ (*SuperP)->getType()))
+ return 0;
+
+ // Make sure we have a parameter name to forward!
+ if (!(*CurP)->getIdentifier())
+ return 0;
+ }
+
+ // We have a superclass method. Now, form the send-to-super completion.
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ // Give this completion a return type.
+ AddResultTypeChunk(S.Context, getCompletionPrintingPolicy(S), SuperMethod,
+ Builder);
+
+ // If we need the "super" keyword, add it (plus some spacing).
+ if (NeedSuperKeyword) {
+ Builder.AddTypedTextChunk("super");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ }
+
+ Selector Sel = CurMethod->getSelector();
+ if (Sel.isUnarySelector()) {
+ if (NeedSuperKeyword)
+ Builder.AddTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ else
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ } else {
+ ObjCMethodDecl::param_iterator CurP = CurMethod->param_begin();
+ for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I, ++CurP) {
+ if (I > NumSelIdents)
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+
+ if (I < NumSelIdents)
+ Builder.AddInformativeChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
+ else if (NeedSuperKeyword || I > NumSelIdents) {
+ Builder.AddTextChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
+ Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
+ (*CurP)->getIdentifier()->getName()));
+ } else {
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(I) + ":"));
+ Builder.AddPlaceholderChunk(Builder.getAllocator().CopyString(
+ (*CurP)->getIdentifier()->getName()));
+ }
+ }
+ }
+
+ Results.AddResult(CodeCompletionResult(Builder.TakeString(), SuperMethod,
+ CCP_SuperCompletion));
+ return SuperMethod;
+}
+
+void Sema::CodeCompleteObjCMessageReceiver(Scope *S) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCMessageReceiver,
+ getLangOpts().CPlusPlus0x
+ ? &ResultBuilder::IsObjCMessageReceiverOrLambdaCapture
+ : &ResultBuilder::IsObjCMessageReceiver);
+
+ CodeCompletionDeclConsumer Consumer(Results, CurContext);
+ Results.EnterNewScope();
+ LookupVisibleDecls(S, LookupOrdinaryName, Consumer,
+ CodeCompleter->includeGlobals());
+
+ // If we are in an Objective-C method inside a class that has a superclass,
+ // add "super" as an option.
+ if (ObjCMethodDecl *Method = getCurMethodDecl())
+ if (ObjCInterfaceDecl *Iface = Method->getClassInterface())
+ if (Iface->getSuperClass()) {
+ Results.AddResult(Result("super"));
+
+ AddSuperSendCompletion(*this, /*NeedSuperKeyword=*/true, 0, 0, Results);
+ }
+
+ if (getLangOpts().CPlusPlus0x)
+ addThisCompletion(*this, Results);
+
+ Results.ExitScope();
+
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results);
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+
+}
+
+void Sema::CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression) {
+ ObjCInterfaceDecl *CDecl = 0;
+ if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
+ // Figure out which interface we're in.
+ CDecl = CurMethod->getClassInterface();
+ if (!CDecl)
+ return;
+
+ // Find the superclass of this class.
+ CDecl = CDecl->getSuperClass();
+ if (!CDecl)
+ return;
+
+ if (CurMethod->isInstanceMethod()) {
+ // We are inside an instance method, which means that the message
+ // send [super ...] is actually calling an instance method on the
+ // current object.
+ return CodeCompleteObjCInstanceMessage(S, 0,
+ SelIdents, NumSelIdents,
+ AtArgumentExpression,
+ CDecl);
+ }
+
+ // Fall through to send to the superclass in CDecl.
+ } else {
+ // "super" may be the name of a type or variable. Figure out which
+ // it is.
+ IdentifierInfo *Super = &Context.Idents.get("super");
+ NamedDecl *ND = LookupSingleName(S, Super, SuperLoc,
+ LookupOrdinaryName);
+ if ((CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(ND))) {
+ // "super" names an interface. Use it.
+ } else if (TypeDecl *TD = dyn_cast_or_null<TypeDecl>(ND)) {
+ if (const ObjCObjectType *Iface
+ = Context.getTypeDeclType(TD)->getAs<ObjCObjectType>())
+ CDecl = Iface->getInterface();
+ } else if (ND && isa<UnresolvedUsingTypenameDecl>(ND)) {
+ // "super" names an unresolved type; we can't be more specific.
+ } else {
+ // Assume that "super" names some kind of value and parse that way.
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Super, SuperLoc);
+ ExprResult SuperExpr = ActOnIdExpression(S, SS, TemplateKWLoc, id,
+ false, false);
+ return CodeCompleteObjCInstanceMessage(S, (Expr *)SuperExpr.get(),
+ SelIdents, NumSelIdents,
+ AtArgumentExpression);
+ }
+
+ // Fall through
+ }
+
+ ParsedType Receiver;
+ if (CDecl)
+ Receiver = ParsedType::make(Context.getObjCInterfaceType(CDecl));
+ return CodeCompleteObjCClassMessage(S, Receiver, SelIdents,
+ NumSelIdents, AtArgumentExpression,
+ /*IsSuper=*/true);
+}
+
+/// \brief Given a set of code-completion results for the argument of a message
+/// send, determine the preferred type (if any) for that argument expression.
+static QualType getPreferredArgumentTypeForMessageSend(ResultBuilder &Results,
+ unsigned NumSelIdents) {
+ typedef CodeCompletionResult Result;
+ ASTContext &Context = Results.getSema().Context;
+
+ QualType PreferredType;
+ unsigned BestPriority = CCP_Unlikely * 2;
+ Result *ResultsData = Results.data();
+ for (unsigned I = 0, N = Results.size(); I != N; ++I) {
+ Result &R = ResultsData[I];
+ if (R.Kind == Result::RK_Declaration &&
+ isa<ObjCMethodDecl>(R.Declaration)) {
+ if (R.Priority <= BestPriority) {
+ ObjCMethodDecl *Method = cast<ObjCMethodDecl>(R.Declaration);
+ if (NumSelIdents <= Method->param_size()) {
+ QualType MyPreferredType = Method->param_begin()[NumSelIdents - 1]
+ ->getType();
+ if (R.Priority < BestPriority || PreferredType.isNull()) {
+ BestPriority = R.Priority;
+ PreferredType = MyPreferredType;
+ } else if (!Context.hasSameUnqualifiedType(PreferredType,
+ MyPreferredType)) {
+ PreferredType = QualType();
+ }
+ }
+ }
+ }
+ }
+
+ return PreferredType;
+}
+
+static void AddClassMessageCompletions(Sema &SemaRef, Scope *S,
+ ParsedType Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+ ObjCInterfaceDecl *CDecl = 0;
+
+ // If the given name refers to an interface type, retrieve the
+ // corresponding declaration.
+ if (Receiver) {
+ QualType T = SemaRef.GetTypeFromParser(Receiver, 0);
+ if (!T.isNull())
+ if (const ObjCObjectType *Interface = T->getAs<ObjCObjectType>())
+ CDecl = Interface->getInterface();
+ }
+
+ // Add all of the factory methods in this Objective-C class, its protocols,
+ // superclasses, categories, implementation, etc.
+ Results.EnterNewScope();
+
+ // If this is a send-to-super, try to add the special "super" send
+ // completion.
+ if (IsSuper) {
+ if (ObjCMethodDecl *SuperMethod
+ = AddSuperSendCompletion(SemaRef, false, SelIdents, NumSelIdents,
+ Results))
+ Results.Ignore(SuperMethod);
+ }
+
+ // If we're inside an Objective-C method definition, prefer its selector to
+ // others.
+ if (ObjCMethodDecl *CurMethod = SemaRef.getCurMethodDecl())
+ Results.setPreferredSelector(CurMethod->getSelector());
+
+ VisitedSelectorSet Selectors;
+ if (CDecl)
+ AddObjCMethods(CDecl, false, MK_Any, SelIdents, NumSelIdents,
+ SemaRef.CurContext, Selectors, AtArgumentExpression,
+ Results);
+ else {
+ // We're messaging "id" as a type; provide all class/factory methods.
+
+ // If we have an external source, load the entire class method
+ // pool from the AST file.
+ if (SemaRef.ExternalSource) {
+ for (uint32_t I = 0,
+ N = SemaRef.ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = SemaRef.ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || SemaRef.MethodPool.count(Sel))
+ continue;
+
+ SemaRef.ReadMethodPool(Sel);
+ }
+ }
+
+ for (Sema::GlobalMethodPool::iterator M = SemaRef.MethodPool.begin(),
+ MEnd = SemaRef.MethodPool.end();
+ M != MEnd; ++M) {
+ for (ObjCMethodList *MethList = &M->second.second;
+ MethList && MethList->Method;
+ MethList = MethList->Next) {
+ if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents,
+ NumSelIdents))
+ continue;
+
+ Result R(MethList->Method, 0);
+ R.StartParameter = NumSelIdents;
+ R.AllParametersAreInformative = false;
+ Results.MaybeAddResult(R, SemaRef.CurContext);
+ }
+ }
+ }
+
+ Results.ExitScope();
+}
+
+void Sema::CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ bool IsSuper) {
+
+ QualType T = this->GetTypeFromParser(Receiver);
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_ObjCClassMessage,
+ T, SelIdents, NumSelIdents));
+
+ AddClassMessageCompletions(*this, S, Receiver, SelIdents, NumSelIdents,
+ AtArgumentExpression, IsSuper, Results);
+
+ // If we're actually at the argument expression (rather than prior to the
+ // selector), we're actually performing code completion for an expression.
+ // Determine whether we have a single, best method. If so, we can
+ // code-complete the expression using the corresponding parameter type as
+ // our preferred type, improving completion results.
+ if (AtArgumentExpression) {
+ QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
+ NumSelIdents);
+ if (PreferredType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, PreferredType);
+ return;
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents,
+ bool AtArgumentExpression,
+ ObjCInterfaceDecl *Super) {
+ typedef CodeCompletionResult Result;
+
+ Expr *RecExpr = static_cast<Expr *>(Receiver);
+
+ // If necessary, apply function/array conversion to the receiver.
+ // C99 6.7.5.3p[7,8].
+ if (RecExpr) {
+ ExprResult Conv = DefaultFunctionArrayLvalueConversion(RecExpr);
+ if (Conv.isInvalid()) // conversion failed. bail.
+ return;
+ RecExpr = Conv.take();
+ }
+ QualType ReceiverType = RecExpr? RecExpr->getType()
+ : Super? Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(Super))
+ : Context.getObjCIdType();
+
+ // If we're messaging an expression with type "id" or "Class", check
+ // whether we know something special about the receiver that allows
+ // us to assume a more-specific receiver type.
+ if (ReceiverType->isObjCIdType() || ReceiverType->isObjCClassType())
+ if (ObjCInterfaceDecl *IFace = GetAssumedMessageSendExprType(RecExpr)) {
+ if (ReceiverType->isObjCClassType())
+ return CodeCompleteObjCClassMessage(S,
+ ParsedType::make(Context.getObjCInterfaceType(IFace)),
+ SelIdents, NumSelIdents,
+ AtArgumentExpression, Super);
+
+ ReceiverType = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(IFace));
+ }
+
+ // Build the set of methods we can see.
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext(CodeCompletionContext::CCC_ObjCInstanceMessage,
+ ReceiverType, SelIdents, NumSelIdents));
+
+ Results.EnterNewScope();
+
+ // If this is a send-to-super, try to add the special "super" send
+ // completion.
+ if (Super) {
+ if (ObjCMethodDecl *SuperMethod
+ = AddSuperSendCompletion(*this, false, SelIdents, NumSelIdents,
+ Results))
+ Results.Ignore(SuperMethod);
+ }
+
+ // If we're inside an Objective-C method definition, prefer its selector to
+ // others.
+ if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
+ Results.setPreferredSelector(CurMethod->getSelector());
+
+ // Keep track of the selectors we've already added.
+ VisitedSelectorSet Selectors;
+
+ // Handle messages to Class. This really isn't a message to an instance
+ // method, so we treat it the same way we would treat a message send to a
+ // class method.
+ if (ReceiverType->isObjCClassType() ||
+ ReceiverType->isObjCQualifiedClassType()) {
+ if (ObjCMethodDecl *CurMethod = getCurMethodDecl()) {
+ if (ObjCInterfaceDecl *ClassDecl = CurMethod->getClassInterface())
+ AddObjCMethods(ClassDecl, false, MK_Any, SelIdents, NumSelIdents,
+ CurContext, Selectors, AtArgumentExpression, Results);
+ }
+ }
+ // Handle messages to a qualified ID ("id<foo>").
+ else if (const ObjCObjectPointerType *QualID
+ = ReceiverType->getAsObjCQualifiedIdType()) {
+ // Search protocols for instance methods.
+ for (ObjCObjectPointerType::qual_iterator I = QualID->qual_begin(),
+ E = QualID->qual_end();
+ I != E; ++I)
+ AddObjCMethods(*I, true, MK_Any, SelIdents, NumSelIdents, CurContext,
+ Selectors, AtArgumentExpression, Results);
+ }
+ // Handle messages to a pointer to interface type.
+ else if (const ObjCObjectPointerType *IFacePtr
+ = ReceiverType->getAsObjCInterfacePointerType()) {
+ // Search the class, its superclasses, etc., for instance methods.
+ AddObjCMethods(IFacePtr->getInterfaceDecl(), true, MK_Any, SelIdents,
+ NumSelIdents, CurContext, Selectors, AtArgumentExpression,
+ Results);
+
+ // Search protocols for instance methods.
+ for (ObjCObjectPointerType::qual_iterator I = IFacePtr->qual_begin(),
+ E = IFacePtr->qual_end();
+ I != E; ++I)
+ AddObjCMethods(*I, true, MK_Any, SelIdents, NumSelIdents, CurContext,
+ Selectors, AtArgumentExpression, Results);
+ }
+ // Handle messages to "id".
+ else if (ReceiverType->isObjCIdType()) {
+ // We're messaging "id", so provide all instance methods we know
+ // about as code-completion results.
+
+ // If we have an external source, load the entire class method
+ // pool from the AST file.
+ if (ExternalSource) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || MethodPool.count(Sel))
+ continue;
+
+ ReadMethodPool(Sel);
+ }
+ }
+
+ for (GlobalMethodPool::iterator M = MethodPool.begin(),
+ MEnd = MethodPool.end();
+ M != MEnd; ++M) {
+ for (ObjCMethodList *MethList = &M->second.first;
+ MethList && MethList->Method;
+ MethList = MethList->Next) {
+ if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents,
+ NumSelIdents))
+ continue;
+
+ if (!Selectors.insert(MethList->Method->getSelector()))
+ continue;
+
+ Result R(MethList->Method, 0);
+ R.StartParameter = NumSelIdents;
+ R.AllParametersAreInformative = false;
+ Results.MaybeAddResult(R, CurContext);
+ }
+ }
+ }
+ Results.ExitScope();
+
+
+ // If we're actually at the argument expression (rather than prior to the
+ // selector), we're actually performing code completion for an expression.
+ // Determine whether we have a single, best method. If so, we can
+ // code-complete the expression using the corresponding parameter type as
+ // our preferred type, improving completion results.
+ if (AtArgumentExpression) {
+ QualType PreferredType = getPreferredArgumentTypeForMessageSend(Results,
+ NumSelIdents);
+ if (PreferredType.isNull())
+ CodeCompleteOrdinaryName(S, PCC_Expression);
+ else
+ CodeCompleteExpression(S, PreferredType);
+ return;
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ Results.getCompletionContext(),
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCForCollection(Scope *S,
+ DeclGroupPtrTy IterationVar) {
+ CodeCompleteExpressionData Data;
+ Data.ObjCCollection = true;
+
+ if (IterationVar.getAsOpaquePtr()) {
+ DeclGroupRef DG = IterationVar.getAsVal<DeclGroupRef>();
+ for (DeclGroupRef::iterator I = DG.begin(), End = DG.end(); I != End; ++I) {
+ if (*I)
+ Data.IgnoreDecls.push_back(*I);
+ }
+ }
+
+ CodeCompleteExpression(S, Data);
+}
+
+void Sema::CodeCompleteObjCSelector(Scope *S, IdentifierInfo **SelIdents,
+ unsigned NumSelIdents) {
+ // If we have an external source, load the entire class method
+ // pool from the AST file.
+ if (ExternalSource) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || MethodPool.count(Sel))
+ continue;
+
+ ReadMethodPool(Sel);
+ }
+ }
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_SelectorName);
+ Results.EnterNewScope();
+ for (GlobalMethodPool::iterator M = MethodPool.begin(),
+ MEnd = MethodPool.end();
+ M != MEnd; ++M) {
+
+ Selector Sel = M->first;
+ if (!isAcceptableObjCSelector(Sel, MK_Any, SelIdents, NumSelIdents))
+ continue;
+
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ if (Sel.isUnarySelector()) {
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+ Results.AddResult(Builder.TakeString());
+ continue;
+ }
+
+ std::string Accumulator;
+ for (unsigned I = 0, N = Sel.getNumArgs(); I != N; ++I) {
+ if (I == NumSelIdents) {
+ if (!Accumulator.empty()) {
+ Builder.AddInformativeChunk(Builder.getAllocator().CopyString(
+ Accumulator));
+ Accumulator.clear();
+ }
+ }
+
+ Accumulator += Sel.getNameForSlot(I);
+ Accumulator += ':';
+ }
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString( Accumulator));
+ Results.AddResult(Builder.TakeString());
+ }
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_SelectorName,
+ Results.data(), Results.size());
+}
+
+/// \brief Add all of the protocol declarations that we find in the given
+/// (translation unit) context.
+static void AddProtocolResults(DeclContext *Ctx, DeclContext *CurContext,
+ bool OnlyForwardDeclarations,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+
+ for (DeclContext::decl_iterator D = Ctx->decls_begin(),
+ DEnd = Ctx->decls_end();
+ D != DEnd; ++D) {
+ // Record any protocols we find.
+ if (ObjCProtocolDecl *Proto = dyn_cast<ObjCProtocolDecl>(*D))
+ if (!OnlyForwardDeclarations || !Proto->hasDefinition())
+ Results.AddResult(Result(Proto, 0), CurContext, 0, false);
+ }
+}
+
+void Sema::CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols,
+ unsigned NumProtocols) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCProtocolName);
+
+ if (CodeCompleter && CodeCompleter->includeGlobals()) {
+ Results.EnterNewScope();
+
+ // Tell the result set to ignore all of the protocols we have
+ // already seen.
+ // FIXME: This doesn't work when caching code-completion results.
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ if (ObjCProtocolDecl *Protocol = LookupProtocol(Protocols[I].first,
+ Protocols[I].second))
+ Results.Ignore(Protocol);
+
+ // Add all protocols.
+ AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, false,
+ Results);
+
+ Results.ExitScope();
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCProtocolName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCProtocolDecl(Scope *) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCProtocolName);
+
+ if (CodeCompleter && CodeCompleter->includeGlobals()) {
+ Results.EnterNewScope();
+
+ // Add all protocols.
+ AddProtocolResults(Context.getTranslationUnitDecl(), CurContext, true,
+ Results);
+
+ Results.ExitScope();
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCProtocolName,
+ Results.data(),Results.size());
+}
+
+/// \brief Add all of the Objective-C interface declarations that we find in
+/// the given (translation unit) context.
+static void AddInterfaceResults(DeclContext *Ctx, DeclContext *CurContext,
+ bool OnlyForwardDeclarations,
+ bool OnlyUnimplemented,
+ ResultBuilder &Results) {
+ typedef CodeCompletionResult Result;
+
+ for (DeclContext::decl_iterator D = Ctx->decls_begin(),
+ DEnd = Ctx->decls_end();
+ D != DEnd; ++D) {
+ // Record any interfaces we find.
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(*D))
+ if ((!OnlyForwardDeclarations || !Class->hasDefinition()) &&
+ (!OnlyUnimplemented || !Class->getImplementation()))
+ Results.AddResult(Result(Class, 0), CurContext, 0, false);
+ }
+}
+
+void Sema::CodeCompleteObjCInterfaceDecl(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ if (CodeCompleter->includeGlobals()) {
+ // Add all classes.
+ AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
+ false, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCInterfaceName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCInterfaceName);
+ Results.EnterNewScope();
+
+ // Make sure that we ignore the class we're currently defining.
+ NamedDecl *CurClass
+ = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ if (CurClass && isa<ObjCInterfaceDecl>(CurClass))
+ Results.Ignore(CurClass);
+
+ if (CodeCompleter->includeGlobals()) {
+ // Add all classes.
+ AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
+ false, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCInterfaceName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCImplementationDecl(Scope *S) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+
+ if (CodeCompleter->includeGlobals()) {
+ // Add all unimplemented classes.
+ AddInterfaceResults(Context.getTranslationUnitDecl(), CurContext, false,
+ true, Results);
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCInterfaceName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCInterfaceCategory(Scope *S,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc) {
+ typedef CodeCompletionResult Result;
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCCategoryName);
+
+ // Ignore any categories we find that have already been implemented by this
+ // interface.
+ llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
+ NamedDecl *CurClass
+ = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ if (ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass))
+ for (ObjCCategoryDecl *Category = Class->getCategoryList(); Category;
+ Category = Category->getNextClassCategory())
+ CategoryNames.insert(Category->getIdentifier());
+
+ // Add all of the categories we know about.
+ Results.EnterNewScope();
+ TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ for (DeclContext::decl_iterator D = TU->decls_begin(),
+ DEnd = TU->decls_end();
+ D != DEnd; ++D)
+ if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(*D))
+ if (CategoryNames.insert(Category->getIdentifier()))
+ Results.AddResult(Result(Category, 0), CurContext, 0, false);
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCCategoryName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCImplementationCategory(Scope *S,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassNameLoc) {
+ typedef CodeCompletionResult Result;
+
+ // Find the corresponding interface. If we couldn't find the interface, the
+ // program itself is ill-formed. However, we'll try to be helpful still by
+ // providing the list of all of the categories we know about.
+ NamedDecl *CurClass
+ = LookupSingleName(TUScope, ClassName, ClassNameLoc, LookupOrdinaryName);
+ ObjCInterfaceDecl *Class = dyn_cast_or_null<ObjCInterfaceDecl>(CurClass);
+ if (!Class)
+ return CodeCompleteObjCInterfaceCategory(S, ClassName, ClassNameLoc);
+
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_ObjCCategoryName);
+
+ // Add all of the categories that have have corresponding interface
+ // declarations in this class and any of its superclasses, except for
+ // already-implemented categories in the class itself.
+ llvm::SmallPtrSet<IdentifierInfo *, 16> CategoryNames;
+ Results.EnterNewScope();
+ bool IgnoreImplemented = true;
+ while (Class) {
+ for (ObjCCategoryDecl *Category = Class->getCategoryList(); Category;
+ Category = Category->getNextClassCategory())
+ if ((!IgnoreImplemented || !Category->getImplementation()) &&
+ CategoryNames.insert(Category->getIdentifier()))
+ Results.AddResult(Result(Category, 0), CurContext, 0, false);
+
+ Class = Class->getSuperClass();
+ IgnoreImplemented = false;
+ }
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_ObjCCategoryName,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCPropertyDefinition(Scope *S) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+
+ // Figure out where this @synthesize lives.
+ ObjCContainerDecl *Container
+ = dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ if (!Container ||
+ (!isa<ObjCImplementationDecl>(Container) &&
+ !isa<ObjCCategoryImplDecl>(Container)))
+ return;
+
+ // Ignore any properties that have already been implemented.
+ for (DeclContext::decl_iterator D = Container->decls_begin(),
+ DEnd = Container->decls_end();
+ D != DEnd; ++D)
+ if (ObjCPropertyImplDecl *PropertyImpl = dyn_cast<ObjCPropertyImplDecl>(*D))
+ Results.Ignore(PropertyImpl->getPropertyDecl());
+
+ // Add any properties that we find.
+ AddedPropertiesSet AddedProperties;
+ Results.EnterNewScope();
+ if (ObjCImplementationDecl *ClassImpl
+ = dyn_cast<ObjCImplementationDecl>(Container))
+ AddObjCProperties(ClassImpl->getClassInterface(), false,
+ /*AllowNullaryMethods=*/false, CurContext,
+ AddedProperties, Results);
+ else
+ AddObjCProperties(cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl(),
+ false, /*AllowNullaryMethods=*/false, CurContext,
+ AddedProperties, Results);
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
+ IdentifierInfo *PropertyName) {
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+
+ // Figure out where this @synthesize lives.
+ ObjCContainerDecl *Container
+ = dyn_cast_or_null<ObjCContainerDecl>(CurContext);
+ if (!Container ||
+ (!isa<ObjCImplementationDecl>(Container) &&
+ !isa<ObjCCategoryImplDecl>(Container)))
+ return;
+
+ // Figure out which interface we're looking into.
+ ObjCInterfaceDecl *Class = 0;
+ if (ObjCImplementationDecl *ClassImpl
+ = dyn_cast<ObjCImplementationDecl>(Container))
+ Class = ClassImpl->getClassInterface();
+ else
+ Class = cast<ObjCCategoryImplDecl>(Container)->getCategoryDecl()
+ ->getClassInterface();
+
+ // Determine the type of the property we're synthesizing.
+ QualType PropertyType = Context.getObjCIdType();
+ if (Class) {
+ if (ObjCPropertyDecl *Property
+ = Class->FindPropertyDeclaration(PropertyName)) {
+ PropertyType
+ = Property->getType().getNonReferenceType().getUnqualifiedType();
+
+ // Give preference to ivars
+ Results.setPreferredType(PropertyType);
+ }
+ }
+
+ // Add all of the instance variables in this class and its superclasses.
+ Results.EnterNewScope();
+ bool SawSimilarlyNamedIvar = false;
+ std::string NameWithPrefix;
+ NameWithPrefix += '_';
+ NameWithPrefix += PropertyName->getName();
+ std::string NameWithSuffix = PropertyName->getName().str();
+ NameWithSuffix += '_';
+ for(; Class; Class = Class->getSuperClass()) {
+ for (ObjCIvarDecl *Ivar = Class->all_declared_ivar_begin(); Ivar;
+ Ivar = Ivar->getNextIvar()) {
+ Results.AddResult(Result(Ivar, 0), CurContext, 0, false);
+
+ // Determine whether we've seen an ivar with a name similar to the
+ // property.
+ if ((PropertyName == Ivar->getIdentifier() ||
+ NameWithPrefix == Ivar->getName() ||
+ NameWithSuffix == Ivar->getName())) {
+ SawSimilarlyNamedIvar = true;
+
+ // Reduce the priority of this result by one, to give it a slight
+ // advantage over other results whose names don't match so closely.
+ if (Results.size() &&
+ Results.data()[Results.size() - 1].Kind
+ == CodeCompletionResult::RK_Declaration &&
+ Results.data()[Results.size() - 1].Declaration == Ivar)
+ Results.data()[Results.size() - 1].Priority--;
+ }
+ }
+ }
+
+ if (!SawSimilarlyNamedIvar) {
+ // Create ivar result _propName, that the user can use to synthesize
+ // an ivar of the appropriate type.
+ unsigned Priority = CCP_MemberDeclaration + 1;
+ typedef CodeCompletionResult Result;
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo(),
+ Priority,CXAvailability_Available);
+
+ PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
+ Builder.AddResultTypeChunk(GetCompletionTypeString(PropertyType, Context,
+ Policy, Allocator));
+ Builder.AddTypedTextChunk(Allocator.CopyString(NameWithPrefix));
+ Results.AddResult(Result(Builder.TakeString(), Priority,
+ CXCursor_ObjCIvarDecl));
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+// Mapping from selectors to the methods that implement that selector, along
+// with the "in original class" flag.
+typedef llvm::DenseMap<Selector, std::pair<ObjCMethodDecl *, bool> >
+ KnownMethodsMap;
+
+/// \brief Find all of the methods that reside in the given container
+/// (and its superclasses, protocols, etc.) that meet the given
+/// criteria. Insert those methods into the map of known methods,
+/// indexed by selector so they can be easily found.
+static void FindImplementableMethods(ASTContext &Context,
+ ObjCContainerDecl *Container,
+ bool WantInstanceMethods,
+ QualType ReturnType,
+ KnownMethodsMap &KnownMethods,
+ bool InOriginalClass = true) {
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Container)) {
+ // Recurse into protocols.
+ if (!IFace->hasDefinition())
+ return;
+
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = IFace->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
+ KnownMethods, InOriginalClass);
+
+ // Add methods from any class extensions and categories.
+ for (const ObjCCategoryDecl *Cat = IFace->getCategoryList(); Cat;
+ Cat = Cat->getNextClassCategory())
+ FindImplementableMethods(Context, const_cast<ObjCCategoryDecl*>(Cat),
+ WantInstanceMethods, ReturnType,
+ KnownMethods, false);
+
+ // Visit the superclass.
+ if (IFace->getSuperClass())
+ FindImplementableMethods(Context, IFace->getSuperClass(),
+ WantInstanceMethods, ReturnType,
+ KnownMethods, false);
+ }
+
+ if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Container)) {
+ // Recurse into protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Category->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
+ KnownMethods, InOriginalClass);
+
+ // If this category is the original class, jump to the interface.
+ if (InOriginalClass && Category->getClassInterface())
+ FindImplementableMethods(Context, Category->getClassInterface(),
+ WantInstanceMethods, ReturnType, KnownMethods,
+ false);
+ }
+
+ if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Container)) {
+ if (Protocol->hasDefinition()) {
+ // Recurse into protocols.
+ const ObjCList<ObjCProtocolDecl> &Protocols
+ = Protocol->getReferencedProtocols();
+ for (ObjCList<ObjCProtocolDecl>::iterator I = Protocols.begin(),
+ E = Protocols.end();
+ I != E; ++I)
+ FindImplementableMethods(Context, *I, WantInstanceMethods, ReturnType,
+ KnownMethods, false);
+ }
+ }
+
+ // Add methods in this container. This operation occurs last because
+ // we want the methods from this container to override any methods
+ // we've previously seen with the same selector.
+ for (ObjCContainerDecl::method_iterator M = Container->meth_begin(),
+ MEnd = Container->meth_end();
+ M != MEnd; ++M) {
+ if ((*M)->isInstanceMethod() == WantInstanceMethods) {
+ if (!ReturnType.isNull() &&
+ !Context.hasSameUnqualifiedType(ReturnType, (*M)->getResultType()))
+ continue;
+
+ KnownMethods[(*M)->getSelector()] = std::make_pair(*M, InOriginalClass);
+ }
+ }
+}
+
+/// \brief Add the parenthesized return or parameter type chunk to a code
+/// completion string.
+static void AddObjCPassingTypeChunk(QualType Type,
+ unsigned ObjCDeclQuals,
+ ASTContext &Context,
+ const PrintingPolicy &Policy,
+ CodeCompletionBuilder &Builder) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ std::string Quals = formatObjCParamQualifiers(ObjCDeclQuals);
+ if (!Quals.empty())
+ Builder.AddTextChunk(Builder.getAllocator().CopyString(Quals));
+ Builder.AddTextChunk(GetCompletionTypeString(Type, Context, Policy,
+ Builder.getAllocator()));
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+}
+
+/// \brief Determine whether the given class is or inherits from a class by
+/// the given name.
+static bool InheritsFromClassNamed(ObjCInterfaceDecl *Class,
+ StringRef Name) {
+ if (!Class)
+ return false;
+
+ if (Class->getIdentifier() && Class->getIdentifier()->getName() == Name)
+ return true;
+
+ return InheritsFromClassNamed(Class->getSuperClass(), Name);
+}
+
+/// \brief Add code completions for Objective-C Key-Value Coding (KVC) and
+/// Key-Value Observing (KVO).
+static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
+ bool IsInstanceMethod,
+ QualType ReturnType,
+ ASTContext &Context,
+ VisitedSelectorSet &KnownSelectors,
+ ResultBuilder &Results) {
+ IdentifierInfo *PropName = Property->getIdentifier();
+ if (!PropName || PropName->getLength() == 0)
+ return;
+
+ PrintingPolicy Policy = getCompletionPrintingPolicy(Results.getSema());
+
+ // Builder that will create each code completion.
+ typedef CodeCompletionResult Result;
+ CodeCompletionAllocator &Allocator = Results.getAllocator();
+ CodeCompletionBuilder Builder(Allocator, Results.getCodeCompletionTUInfo());
+
+ // The selector table.
+ SelectorTable &Selectors = Context.Selectors;
+
+ // The property name, copied into the code completion allocation region
+ // on demand.
+ struct KeyHolder {
+ CodeCompletionAllocator &Allocator;
+ StringRef Key;
+ const char *CopiedKey;
+
+ KeyHolder(CodeCompletionAllocator &Allocator, StringRef Key)
+ : Allocator(Allocator), Key(Key), CopiedKey(0) { }
+
+ operator const char *() {
+ if (CopiedKey)
+ return CopiedKey;
+
+ return CopiedKey = Allocator.CopyString(Key);
+ }
+ } Key(Allocator, PropName->getName());
+
+ // The uppercased name of the property name.
+ std::string UpperKey = PropName->getName();
+ if (!UpperKey.empty())
+ UpperKey[0] = toupper(UpperKey[0]);
+
+ bool ReturnTypeMatchesProperty = ReturnType.isNull() ||
+ Context.hasSameUnqualifiedType(ReturnType.getNonReferenceType(),
+ Property->getType());
+ bool ReturnTypeMatchesVoid
+ = ReturnType.isNull() || ReturnType->isVoidType();
+
+ // Add the normal accessor -(type)key.
+ if (IsInstanceMethod &&
+ KnownSelectors.insert(Selectors.getNullarySelector(PropName)) &&
+ ReturnTypeMatchesProperty && !Property->getGetterMethodDecl()) {
+ if (ReturnType.isNull())
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
+ Context, Policy, Builder);
+
+ Builder.AddTypedTextChunk(Key);
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+
+ // If we have an integral or boolean property (or the user has provided
+ // an integral or boolean return type), add the accessor -(type)isKey.
+ if (IsInstanceMethod &&
+ ((!ReturnType.isNull() &&
+ (ReturnType->isIntegerType() || ReturnType->isBooleanType())) ||
+ (ReturnType.isNull() &&
+ (Property->getType()->isIntegerType() ||
+ Property->getType()->isBooleanType())))) {
+ std::string SelectorName = (Twine("is") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("BOOL");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add the normal mutator.
+ if (IsInstanceMethod && ReturnTypeMatchesVoid &&
+ !Property->getSetterMethodDecl()) {
+ std::string SelectorName = (Twine("set") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Builder.AddTypedTextChunk(":");
+ AddObjCPassingTypeChunk(Property->getType(), /*Quals=*/0,
+ Context, Policy, Builder);
+ Builder.AddTextChunk(Key);
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Indexed and unordered accessors
+ unsigned IndexedGetterPriority = CCP_CodePattern;
+ unsigned IndexedSetterPriority = CCP_CodePattern;
+ unsigned UnorderedGetterPriority = CCP_CodePattern;
+ unsigned UnorderedSetterPriority = CCP_CodePattern;
+ if (const ObjCObjectPointerType *ObjCPointer
+ = Property->getType()->getAs<ObjCObjectPointerType>()) {
+ if (ObjCInterfaceDecl *IFace = ObjCPointer->getInterfaceDecl()) {
+ // If this interface type is not provably derived from a known
+ // collection, penalize the corresponding completions.
+ if (!InheritsFromClassNamed(IFace, "NSMutableArray")) {
+ IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
+ if (!InheritsFromClassNamed(IFace, "NSArray"))
+ IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+
+ if (!InheritsFromClassNamed(IFace, "NSMutableSet")) {
+ UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
+ if (!InheritsFromClassNamed(IFace, "NSSet"))
+ UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+ }
+ } else {
+ IndexedGetterPriority += CCD_ProbablyNotObjCCollection;
+ IndexedSetterPriority += CCD_ProbablyNotObjCCollection;
+ UnorderedGetterPriority += CCD_ProbablyNotObjCCollection;
+ UnorderedSetterPriority += CCD_ProbablyNotObjCCollection;
+ }
+
+ // Add -(NSUInteger)countOf<key>
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isIntegerType())) {
+ std::string SelectorName = (Twine("countOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(
+ Allocator.CopyString(SelectorId->getName()));
+ Results.AddResult(Result(Builder.TakeString(),
+ std::min(IndexedGetterPriority,
+ UnorderedGetterPriority),
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Indexed getters
+ // Add -(id)objectInKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
+ std::string SelectorName
+ = (Twine("objectIn") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add -(NSArray *)keyAtIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSArray"))) {
+ std::string SelectorName
+ = (Twine(Property->getName()) + "AtIndexes").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Add -(void)getKey:(type **)buffer range:(NSRange)inRange
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("get") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("range")
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" **");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("buffer");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("range:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSRange");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("inRange");
+ Results.AddResult(Result(Builder.TakeString(), IndexedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Mutable indexed accessors
+
+ // - (void)insertObject:(type *)object inKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("in") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get("insertObject"),
+ &Context.Idents.get(SelectorName)
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk("insertObject:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)insertKey:(NSArray *)array atIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("insert") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("atIndexes")
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("array");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("atIndexes:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // -(void)removeObjectFromKeyAtIndex:(NSUInteger)index
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("removeObjectFrom") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // -(void)removeKeyAtIndexes:(NSIndexSet *)indexes
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("remove") + UpperKey + "AtIndexes").str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)replaceObjectInKeyAtIndex:(NSUInteger)index withObject:(id)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("replaceObjectIn") + UpperKey + "AtIndex").str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName),
+ &Context.Idents.get("withObject")
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSUInteger");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("index");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk("withObject:");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("id");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)replaceKeyAtIndexes:(NSIndexSet *)indexes withKey:(NSArray *)array
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName1
+ = (Twine("replace") + UpperKey + "AtIndexes").str();
+ std::string SelectorName2 = (Twine("with") + UpperKey).str();
+ IdentifierInfo *SelectorIds[2] = {
+ &Context.Idents.get(SelectorName1),
+ &Context.Idents.get(SelectorName2)
+ };
+
+ if (KnownSelectors.insert(Selectors.getSelector(2, SelectorIds))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName1 + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("NSIndexSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("indexes");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName2 + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSArray *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("array");
+ Results.AddResult(Result(Builder.TakeString(), IndexedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Unordered getters
+ // - (NSEnumerator *)enumeratorOfKey
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSEnumerator"))) {
+ std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSEnumerator *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
+ Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (type *)memberOfKey:(type *)object
+ if (IsInstanceMethod &&
+ (ReturnType.isNull() || ReturnType->isObjCObjectPointerType())) {
+ std::string SelectorName = (Twine("memberOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ if (ReturnType.isNull()) {
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ } else {
+ Builder.AddTextChunk(GetCompletionTypeString(ReturnType, Context,
+ Policy,
+ Builder.getAllocator()));
+ }
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedGetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Mutable unordered accessors
+ // - (void)addKeyObject:(type *)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("add") + UpperKey + Twine("Object")).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)addKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("add") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)removeKeyObject:(type *)object
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName
+ = (Twine("remove") + UpperKey + Twine("Object")).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("object-type");
+ Builder.AddTextChunk(" *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("object");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)removeKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("remove") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // - (void)intersectKey:(NSSet *)objects
+ if (IsInstanceMethod && ReturnTypeMatchesVoid) {
+ std::string SelectorName = (Twine("intersect") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getUnarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("void");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName + ":"));
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Builder.AddTextChunk("objects");
+ Results.AddResult(Result(Builder.TakeString(), UnorderedSetterPriority,
+ CXCursor_ObjCInstanceMethodDecl));
+ }
+ }
+
+ // Key-Value Observing
+ // + (NSSet *)keyPathsForValuesAffectingKey
+ if (!IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ (ReturnType->isObjCObjectPointerType() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl()
+ ->getName() == "NSSet"))) {
+ std::string SelectorName
+ = (Twine("keyPathsForValuesAffecting") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("NSSet *");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCClassMethodDecl));
+ }
+ }
+
+ // + (BOOL)automaticallyNotifiesObserversForKey
+ if (!IsInstanceMethod &&
+ (ReturnType.isNull() ||
+ ReturnType->isIntegerType() ||
+ ReturnType->isBooleanType())) {
+ std::string SelectorName
+ = (Twine("automaticallyNotifiesObserversOf") + UpperKey).str();
+ IdentifierInfo *SelectorId = &Context.Idents.get(SelectorName);
+ if (KnownSelectors.insert(Selectors.getNullarySelector(SelectorId))) {
+ if (ReturnType.isNull()) {
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddTextChunk("BOOL");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ }
+
+ Builder.AddTypedTextChunk(Allocator.CopyString(SelectorName));
+ Results.AddResult(Result(Builder.TakeString(), CCP_CodePattern,
+ CXCursor_ObjCClassMethodDecl));
+ }
+ }
+}
+
+void Sema::CodeCompleteObjCMethodDecl(Scope *S,
+ bool IsInstanceMethod,
+ ParsedType ReturnTy) {
+ // Determine the return type of the method we're declaring, if
+ // provided.
+ QualType ReturnType = GetTypeFromParser(ReturnTy);
+ Decl *IDecl = 0;
+ if (CurContext->isObjCContainer()) {
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ IDecl = cast<Decl>(OCD);
+ }
+ // Determine where we should start searching for methods.
+ ObjCContainerDecl *SearchDecl = 0;
+ bool IsInImplementation = false;
+ if (Decl *D = IDecl) {
+ if (ObjCImplementationDecl *Impl = dyn_cast<ObjCImplementationDecl>(D)) {
+ SearchDecl = Impl->getClassInterface();
+ IsInImplementation = true;
+ } else if (ObjCCategoryImplDecl *CatImpl
+ = dyn_cast<ObjCCategoryImplDecl>(D)) {
+ SearchDecl = CatImpl->getCategoryDecl();
+ IsInImplementation = true;
+ } else
+ SearchDecl = dyn_cast<ObjCContainerDecl>(D);
+ }
+
+ if (!SearchDecl && S) {
+ if (DeclContext *DC = static_cast<DeclContext *>(S->getEntity()))
+ SearchDecl = dyn_cast<ObjCContainerDecl>(DC);
+ }
+
+ if (!SearchDecl) {
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ 0, 0);
+ return;
+ }
+
+ // Find all of the methods that we could declare/implement here.
+ KnownMethodsMap KnownMethods;
+ FindImplementableMethods(Context, SearchDecl, IsInstanceMethod,
+ ReturnType, KnownMethods);
+
+ // Add declarations or definitions for each of the known methods.
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+ Results.EnterNewScope();
+ PrintingPolicy Policy = getCompletionPrintingPolicy(*this);
+ for (KnownMethodsMap::iterator M = KnownMethods.begin(),
+ MEnd = KnownMethods.end();
+ M != MEnd; ++M) {
+ ObjCMethodDecl *Method = M->second.first;
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+
+ // If the result type was not already provided, add it to the
+ // pattern as (type).
+ if (ReturnType.isNull())
+ AddObjCPassingTypeChunk(Method->getResultType(),
+ Method->getObjCDeclQualifier(),
+ Context, Policy,
+ Builder);
+
+ Selector Sel = Method->getSelector();
+
+ // Add the first part of the selector to the pattern.
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Sel.getNameForSlot(0)));
+
+ // Add parameters to the pattern.
+ unsigned I = 0;
+ for (ObjCMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; (void)++P, ++I) {
+ // Add the part of the selector name.
+ if (I == 0)
+ Builder.AddTypedTextChunk(":");
+ else if (I < Sel.getNumArgs()) {
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTypedTextChunk(
+ Builder.getAllocator().CopyString(Sel.getNameForSlot(I) + ":"));
+ } else
+ break;
+
+ // Add the parameter type.
+ AddObjCPassingTypeChunk((*P)->getOriginalType(),
+ (*P)->getObjCDeclQualifier(),
+ Context, Policy,
+ Builder);
+
+ if (IdentifierInfo *Id = (*P)->getIdentifier())
+ Builder.AddTextChunk(Builder.getAllocator().CopyString( Id->getName()));
+ }
+
+ if (Method->isVariadic()) {
+ if (Method->param_size() > 0)
+ Builder.AddChunk(CodeCompletionString::CK_Comma);
+ Builder.AddTextChunk("...");
+ }
+
+ if (IsInImplementation && Results.includeCodePatterns()) {
+ // We will be defining the method here, so add a compound statement.
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftBrace);
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ if (!Method->getResultType()->isVoidType()) {
+ // If the result type is not void, add a return clause.
+ Builder.AddTextChunk("return");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("expression");
+ Builder.AddChunk(CodeCompletionString::CK_SemiColon);
+ } else
+ Builder.AddPlaceholderChunk("statements");
+
+ Builder.AddChunk(CodeCompletionString::CK_VerticalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_RightBrace);
+ }
+
+ unsigned Priority = CCP_CodePattern;
+ if (!M->second.second)
+ Priority += CCD_InBaseClass;
+
+ Results.AddResult(Result(Builder.TakeString(), Method, Priority));
+ }
+
+ // Add Key-Value-Coding and Key-Value-Observing accessor methods for all of
+ // the properties in this class and its categories.
+ if (Context.getLangOpts().ObjC2) {
+ SmallVector<ObjCContainerDecl *, 4> Containers;
+ Containers.push_back(SearchDecl);
+
+ VisitedSelectorSet KnownSelectors;
+ for (KnownMethodsMap::iterator M = KnownMethods.begin(),
+ MEnd = KnownMethods.end();
+ M != MEnd; ++M)
+ KnownSelectors.insert(M->first);
+
+
+ ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(SearchDecl);
+ if (!IFace)
+ if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(SearchDecl))
+ IFace = Category->getClassInterface();
+
+ if (IFace) {
+ for (ObjCCategoryDecl *Category = IFace->getCategoryList(); Category;
+ Category = Category->getNextClassCategory())
+ Containers.push_back(Category);
+ }
+
+ for (unsigned I = 0, N = Containers.size(); I != N; ++I) {
+ for (ObjCContainerDecl::prop_iterator P = Containers[I]->prop_begin(),
+ PEnd = Containers[I]->prop_end();
+ P != PEnd; ++P) {
+ AddObjCKeyValueCompletions(*P, IsInstanceMethod, ReturnType, Context,
+ KnownSelectors, Results);
+ }
+ }
+ }
+
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompleteObjCMethodDeclSelector(Scope *S,
+ bool IsInstanceMethod,
+ bool AtParameterName,
+ ParsedType ReturnTy,
+ IdentifierInfo **SelIdents,
+ unsigned NumSelIdents) {
+ // If we have an external source, load the entire class method
+ // pool from the AST file.
+ if (ExternalSource) {
+ for (uint32_t I = 0, N = ExternalSource->GetNumExternalSelectors();
+ I != N; ++I) {
+ Selector Sel = ExternalSource->GetExternalSelector(I);
+ if (Sel.isNull() || MethodPool.count(Sel))
+ continue;
+
+ ReadMethodPool(Sel);
+ }
+ }
+
+ // Build the set of methods we can see.
+ typedef CodeCompletionResult Result;
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_Other);
+
+ if (ReturnTy)
+ Results.setPreferredType(GetTypeFromParser(ReturnTy).getNonReferenceType());
+
+ Results.EnterNewScope();
+ for (GlobalMethodPool::iterator M = MethodPool.begin(),
+ MEnd = MethodPool.end();
+ M != MEnd; ++M) {
+ for (ObjCMethodList *MethList = IsInstanceMethod ? &M->second.first :
+ &M->second.second;
+ MethList && MethList->Method;
+ MethList = MethList->Next) {
+ if (!isAcceptableObjCMethod(MethList->Method, MK_Any, SelIdents,
+ NumSelIdents))
+ continue;
+
+ if (AtParameterName) {
+ // Suggest parameter names we've seen before.
+ if (NumSelIdents && NumSelIdents <= MethList->Method->param_size()) {
+ ParmVarDecl *Param = MethList->Method->param_begin()[NumSelIdents-1];
+ if (Param->getIdentifier()) {
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ Param->getIdentifier()->getName()));
+ Results.AddResult(Builder.TakeString());
+ }
+ }
+
+ continue;
+ }
+
+ Result R(MethList->Method, 0);
+ R.StartParameter = NumSelIdents;
+ R.AllParametersAreInformative = false;
+ R.DeclaringEntity = true;
+ Results.MaybeAddResult(R, CurContext);
+ }
+ }
+
+ Results.ExitScope();
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_Other,
+ Results.data(),Results.size());
+}
+
+void Sema::CodeCompletePreprocessorDirective(bool InConditional) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_PreprocessorDirective);
+ Results.EnterNewScope();
+
+ // #if <condition>
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk("if");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("condition");
+ Results.AddResult(Builder.TakeString());
+
+ // #ifdef <macro>
+ Builder.AddTypedTextChunk("ifdef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ // #ifndef <macro>
+ Builder.AddTypedTextChunk("ifndef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ if (InConditional) {
+ // #elif <condition>
+ Builder.AddTypedTextChunk("elif");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("condition");
+ Results.AddResult(Builder.TakeString());
+
+ // #else
+ Builder.AddTypedTextChunk("else");
+ Results.AddResult(Builder.TakeString());
+
+ // #endif
+ Builder.AddTypedTextChunk("endif");
+ Results.AddResult(Builder.TakeString());
+ }
+
+ // #include "header"
+ Builder.AddTypedTextChunk("include");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
+
+ // #include <header>
+ Builder.AddTypedTextChunk("include");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
+
+ // #define <macro>
+ Builder.AddTypedTextChunk("define");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ // #define <macro>(<args>)
+ Builder.AddTypedTextChunk("define");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("args");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Builder.TakeString());
+
+ // #undef <macro>
+ Builder.AddTypedTextChunk("undef");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("macro");
+ Results.AddResult(Builder.TakeString());
+
+ // #line <number>
+ Builder.AddTypedTextChunk("line");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("number");
+ Results.AddResult(Builder.TakeString());
+
+ // #line <number> "filename"
+ Builder.AddTypedTextChunk("line");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("number");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("filename");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
+
+ // #error <message>
+ Builder.AddTypedTextChunk("error");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("message");
+ Results.AddResult(Builder.TakeString());
+
+ // #pragma <arguments>
+ Builder.AddTypedTextChunk("pragma");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("arguments");
+ Results.AddResult(Builder.TakeString());
+
+ if (getLangOpts().ObjC1) {
+ // #import "header"
+ Builder.AddTypedTextChunk("import");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
+
+ // #import <header>
+ Builder.AddTypedTextChunk("import");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
+ }
+
+ // #include_next "header"
+ Builder.AddTypedTextChunk("include_next");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("\"");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk("\"");
+ Results.AddResult(Builder.TakeString());
+
+ // #include_next <header>
+ Builder.AddTypedTextChunk("include_next");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddTextChunk("<");
+ Builder.AddPlaceholderChunk("header");
+ Builder.AddTextChunk(">");
+ Results.AddResult(Builder.TakeString());
+
+ // #warning <message>
+ Builder.AddTypedTextChunk("warning");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddPlaceholderChunk("message");
+ Results.AddResult(Builder.TakeString());
+
+ // Note: #ident and #sccs are such crazy anachronisms that we don't provide
+ // completions for them. And __include_macros is a Clang-internal extension
+ // that we don't want to encourage anyone to use.
+
+ // FIXME: we don't support #assert or #unassert, so don't suggest them.
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_PreprocessorDirective,
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompleteInPreprocessorConditionalExclusion(Scope *S) {
+ CodeCompleteOrdinaryName(S,
+ S->getFnParent()? Sema::PCC_RecoveryInFunction
+ : Sema::PCC_Namespace);
+}
+
+void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ IsDefinition? CodeCompletionContext::CCC_MacroName
+ : CodeCompletionContext::CCC_MacroNameUse);
+ if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
+ // Add just the names of macros, not their arguments.
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Results.EnterNewScope();
+ for (Preprocessor::macro_iterator M = PP.macro_begin(),
+ MEnd = PP.macro_end();
+ M != MEnd; ++M) {
+ Builder.AddTypedTextChunk(Builder.getAllocator().CopyString(
+ M->first->getName()));
+ Results.AddResult(Builder.TakeString());
+ }
+ Results.ExitScope();
+ } else if (IsDefinition) {
+ // FIXME: Can we detect when the user just wrote an include guard above?
+ }
+
+ HandleCodeCompleteResults(this, CodeCompleter, Results.getCompletionContext(),
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompletePreprocessorExpression() {
+ ResultBuilder Results(*this, CodeCompleter->getAllocator(),
+ CodeCompleter->getCodeCompletionTUInfo(),
+ CodeCompletionContext::CCC_PreprocessorExpression);
+
+ if (!CodeCompleter || CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results);
+
+ // defined (<macro>)
+ Results.EnterNewScope();
+ CodeCompletionBuilder Builder(Results.getAllocator(),
+ Results.getCodeCompletionTUInfo());
+ Builder.AddTypedTextChunk("defined");
+ Builder.AddChunk(CodeCompletionString::CK_HorizontalSpace);
+ Builder.AddChunk(CodeCompletionString::CK_LeftParen);
+ Builder.AddPlaceholderChunk("macro");
+ Builder.AddChunk(CodeCompletionString::CK_RightParen);
+ Results.AddResult(Builder.TakeString());
+ Results.ExitScope();
+
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_PreprocessorExpression,
+ Results.data(), Results.size());
+}
+
+void Sema::CodeCompletePreprocessorMacroArgument(Scope *S,
+ IdentifierInfo *Macro,
+ MacroInfo *MacroInfo,
+ unsigned Argument) {
+ // FIXME: In the future, we could provide "overload" results, much like we
+ // do for function calls.
+
+ // Now just ignore this. There will be another code-completion callback
+ // for the expanded tokens.
+}
+
+void Sema::CodeCompleteNaturalLanguage() {
+ HandleCodeCompleteResults(this, CodeCompleter,
+ CodeCompletionContext::CCC_NaturalLanguage,
+ 0, 0);
+}
+
+void Sema::GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
+ CodeCompletionTUInfo &CCTUInfo,
+ SmallVectorImpl<CodeCompletionResult> &Results) {
+ ResultBuilder Builder(*this, Allocator, CCTUInfo,
+ CodeCompletionContext::CCC_Recovery);
+ if (!CodeCompleter || CodeCompleter->includeGlobals()) {
+ CodeCompletionDeclConsumer Consumer(Builder,
+ Context.getTranslationUnitDecl());
+ LookupVisibleDecls(Context.getTranslationUnitDecl(), LookupAnyName,
+ Consumer);
+ }
+
+ if (!CodeCompleter || CodeCompleter->includeMacros())
+ AddMacroResults(PP, Builder);
+
+ Results.clear();
+ Results.insert(Results.end(),
+ Builder.data(), Builder.data() + Builder.size());
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp
new file mode 100644
index 0000000..d83a13e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaConsumer.cpp
@@ -0,0 +1,14 @@
+//===-- SemaConsumer.cpp - Abstract interface for AST semantics -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaConsumer.h"
+
+using namespace clang;
+
+void SemaConsumer::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
new file mode 100644
index 0000000..8b314b5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDecl.cpp
@@ -0,0 +1,10461 @@
+//===--- SemaDecl.cpp - Semantic Analysis for Declarations ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/CXXFieldCollector.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "TypeLocBuilder.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+// FIXME: layering (ideally, Sema shouldn't be dependent on Lex API's)
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Lex/ModuleLoader.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Triple.h"
+#include <algorithm>
+#include <cstring>
+#include <functional>
+using namespace clang;
+using namespace sema;
+
+Sema::DeclGroupPtrTy Sema::ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType) {
+ if (OwnedType) {
+ Decl *Group[2] = { OwnedType, Ptr };
+ return DeclGroupPtrTy::make(DeclGroupRef::Create(Context, Group, 2));
+ }
+
+ return DeclGroupPtrTy::make(DeclGroupRef(Ptr));
+}
+
+namespace {
+
+class TypeNameValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ TypeNameValidatorCCC(bool AllowInvalid) : AllowInvalidDecl(AllowInvalid) {
+ WantExpressionKeywords = false;
+ WantCXXNamedCasts = false;
+ WantRemainingKeywords = false;
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (NamedDecl *ND = candidate.getCorrectionDecl())
+ return (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) &&
+ (AllowInvalidDecl || !ND->isInvalidDecl());
+ else
+ return candidate.isKeyword();
+ }
+
+ private:
+ bool AllowInvalidDecl;
+};
+
+}
+
+/// \brief If the identifier refers to a type name within this scope,
+/// return the declaration of that type.
+///
+/// This routine performs ordinary name lookup of the identifier II
+/// within the given scope, with optional C++ scope specifier SS, to
+/// determine whether the name refers to a type. If so, returns an
+/// opaque pointer (actually a QualType) corresponding to that
+/// type. Otherwise, returns NULL.
+///
+/// If name lookup results in an ambiguity, this routine will complain
+/// and then return NULL.
+ParsedType Sema::getTypeName(IdentifierInfo &II, SourceLocation NameLoc,
+ Scope *S, CXXScopeSpec *SS,
+ bool isClassName, bool HasTrailingDot,
+ ParsedType ObjectTypePtr,
+ bool IsCtorOrDtorName,
+ bool WantNontrivialTypeSourceInfo,
+ IdentifierInfo **CorrectedII) {
+ // Determine where we will perform name lookup.
+ DeclContext *LookupCtx = 0;
+ if (ObjectTypePtr) {
+ QualType ObjectType = ObjectTypePtr.get();
+ if (ObjectType->isRecordType())
+ LookupCtx = computeDeclContext(ObjectType);
+ } else if (SS && SS->isNotEmpty()) {
+ LookupCtx = computeDeclContext(*SS, false);
+
+ if (!LookupCtx) {
+ if (isDependentScopeSpecifier(*SS)) {
+ // C++ [temp.res]p3:
+ // A qualified-id that refers to a type and in which the
+ // nested-name-specifier depends on a template-parameter (14.6.2)
+ // shall be prefixed by the keyword typename to indicate that the
+ // qualified-id denotes a type, forming an
+ // elaborated-type-specifier (7.1.5.3).
+ //
+ // We therefore do not perform any name lookup if the result would
+ // refer to a member of an unknown specialization.
+ if (!isClassName && !IsCtorOrDtorName)
+ return ParsedType();
+
+ // We know from the grammar that this name refers to a type,
+ // so build a dependent node to describe the type.
+ if (WantNontrivialTypeSourceInfo)
+ return ActOnTypenameType(S, SourceLocation(), *SS, II, NameLoc).get();
+
+ NestedNameSpecifierLoc QualifierLoc = SS->getWithLocInContext(Context);
+ QualType T =
+ CheckTypenameType(ETK_None, SourceLocation(), QualifierLoc,
+ II, NameLoc);
+
+ return ParsedType::make(T);
+ }
+
+ return ParsedType();
+ }
+
+ if (!LookupCtx->isDependentContext() &&
+ RequireCompleteDeclContext(*SS, LookupCtx))
+ return ParsedType();
+ }
+
+ // FIXME: LookupNestedNameSpecifierName isn't the right kind of
+ // lookup for class-names.
+ LookupNameKind Kind = isClassName ? LookupNestedNameSpecifierName :
+ LookupOrdinaryName;
+ LookupResult Result(*this, &II, NameLoc, Kind);
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+ LookupQualifiedName(Result, LookupCtx);
+
+ if (ObjectTypePtr && Result.empty()) {
+ // C++ [basic.lookup.classref]p3:
+ // If the unqualified-id is ~type-name, the type-name is looked up
+ // in the context of the entire postfix-expression. If the type T of
+ // the object expression is of a class type C, the type-name is also
+ // looked up in the scope of class C. At least one of the lookups shall
+ // find a name that refers to (possibly cv-qualified) T.
+ LookupName(Result, S);
+ }
+ } else {
+ // Perform unqualified name lookup.
+ LookupName(Result, S);
+ }
+
+ NamedDecl *IIDecl = 0;
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ if (CorrectedII) {
+ TypeNameValidatorCCC Validator(true);
+ TypoCorrection Correction = CorrectTypo(Result.getLookupNameInfo(),
+ Kind, S, SS, Validator);
+ IdentifierInfo *NewII = Correction.getCorrectionAsIdentifierInfo();
+ TemplateTy Template;
+ bool MemberOfUnknownSpecialization;
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(NewII, NameLoc);
+ NestedNameSpecifier *NNS = Correction.getCorrectionSpecifier();
+ CXXScopeSpec NewSS, *NewSSPtr = SS;
+ if (SS && NNS) {
+ NewSS.MakeTrivial(Context, NNS, SourceRange(NameLoc));
+ NewSSPtr = &NewSS;
+ }
+ if (Correction && (NNS || NewII != &II) &&
+ // Ignore a correction to a template type as the to-be-corrected
+ // identifier is not a template (typo correction for template names
+ // is handled elsewhere).
+ !(getLangOpts().CPlusPlus && NewSSPtr &&
+ isTemplateName(S, *NewSSPtr, false, TemplateName, ParsedType(),
+ false, Template, MemberOfUnknownSpecialization))) {
+ ParsedType Ty = getTypeName(*NewII, NameLoc, S, NewSSPtr,
+ isClassName, HasTrailingDot, ObjectTypePtr,
+ IsCtorOrDtorName,
+ WantNontrivialTypeSourceInfo);
+ if (Ty) {
+ std::string CorrectedStr(Correction.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(
+ Correction.getQuoted(getLangOpts()));
+ Diag(NameLoc, diag::err_unknown_typename_suggest)
+ << Result.getLookupName() << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(SourceRange(NameLoc),
+ CorrectedStr);
+ if (NamedDecl *FirstDecl = Correction.getCorrectionDecl())
+ Diag(FirstDecl->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+
+ if (SS && NNS)
+ SS->MakeTrivial(Context, NNS, SourceRange(NameLoc));
+ *CorrectedII = NewII;
+ return Ty;
+ }
+ }
+ }
+ // If typo correction failed or was not performed, fall through
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ Result.suppressDiagnostics();
+ return ParsedType();
+
+ case LookupResult::Ambiguous:
+ // Recover from type-hiding ambiguities by hiding the type. We'll
+ // do the lookup again when looking for an object, and we can
+ // diagnose the error then. If we don't do this, then the error
+ // about hiding the type will be immediately followed by an error
+ // that only makes sense if the identifier was treated like a type.
+ if (Result.getAmbiguityKind() == LookupResult::AmbiguousTagHiding) {
+ Result.suppressDiagnostics();
+ return ParsedType();
+ }
+
+ // Look to see if we have a type anywhere in the list of results.
+ for (LookupResult::iterator Res = Result.begin(), ResEnd = Result.end();
+ Res != ResEnd; ++Res) {
+ if (isa<TypeDecl>(*Res) || isa<ObjCInterfaceDecl>(*Res)) {
+ if (!IIDecl ||
+ (*Res)->getLocation().getRawEncoding() <
+ IIDecl->getLocation().getRawEncoding())
+ IIDecl = *Res;
+ }
+ }
+
+ if (!IIDecl) {
+ // None of the entities we found is a type, so there is no way
+ // to even assume that the result is a type. In this case, don't
+ // complain about the ambiguity. The parser will either try to
+ // perform this lookup again (e.g., as an object name), which
+ // will produce the ambiguity, or will complain that it expected
+ // a type name.
+ Result.suppressDiagnostics();
+ return ParsedType();
+ }
+
+ // We found a type within the ambiguous lookup; diagnose the
+ // ambiguity and then return that type. This might be the right
+ // answer, or it might not be, but it suppresses any attempt to
+ // perform the name lookup again.
+ break;
+
+ case LookupResult::Found:
+ IIDecl = Result.getFoundDecl();
+ break;
+ }
+
+ assert(IIDecl && "Didn't find decl");
+
+ QualType T;
+ if (TypeDecl *TD = dyn_cast<TypeDecl>(IIDecl)) {
+ DiagnoseUseOfDecl(IIDecl, NameLoc);
+
+ if (T.isNull())
+ T = Context.getTypeDeclType(TD);
+
+ // NOTE: avoid constructing an ElaboratedType(Loc) if this is a
+ // constructor or destructor name (in such a case, the scope specifier
+ // will be attached to the enclosing Expr or Decl node).
+ if (SS && SS->isNotEmpty() && !IsCtorOrDtorName) {
+ if (WantNontrivialTypeSourceInfo) {
+ // Construct a type with type-source information.
+ TypeLocBuilder Builder;
+ Builder.pushTypeSpec(T).setNameLoc(NameLoc);
+
+ T = getElaboratedType(ETK_None, *SS, T);
+ ElaboratedTypeLoc ElabTL = Builder.push<ElaboratedTypeLoc>(T);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ ElabTL.setQualifierLoc(SS->getWithLocInContext(Context));
+ return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+ } else {
+ T = getElaboratedType(ETK_None, *SS, T);
+ }
+ }
+ } else if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(IIDecl)) {
+ (void)DiagnoseUseOfDecl(IDecl, NameLoc);
+ if (!HasTrailingDot)
+ T = Context.getObjCInterfaceType(IDecl);
+ }
+
+ if (T.isNull()) {
+ // If it's not plausibly a type, suppress diagnostics.
+ Result.suppressDiagnostics();
+ return ParsedType();
+ }
+ return ParsedType::make(T);
+}
+
+/// isTagName() - This method is called *for error recovery purposes only*
+/// to determine if the specified name is a valid tag name ("struct foo"). If
+/// so, this returns the TST for the tag corresponding to it (TST_enum,
+/// TST_union, TST_struct, TST_class). This is used to diagnose cases in C
+/// where the user forgot to specify the tag.
+DeclSpec::TST Sema::isTagName(IdentifierInfo &II, Scope *S) {
+ // Do a tag name lookup in this scope.
+ LookupResult R(*this, &II, SourceLocation(), LookupTagName);
+ LookupName(R, S, false);
+ R.suppressDiagnostics();
+ if (R.getResultKind() == LookupResult::Found)
+ if (const TagDecl *TD = R.getAsSingle<TagDecl>()) {
+ switch (TD->getTagKind()) {
+ case TTK_Struct: return DeclSpec::TST_struct;
+ case TTK_Union: return DeclSpec::TST_union;
+ case TTK_Class: return DeclSpec::TST_class;
+ case TTK_Enum: return DeclSpec::TST_enum;
+ }
+ }
+
+ return DeclSpec::TST_unspecified;
+}
+
+/// isMicrosoftMissingTypename - In Microsoft mode, within class scope,
+/// if a CXXScopeSpec's type is equal to the type of one of the base classes
+/// then downgrade the missing typename error to a warning.
+/// This is needed for MSVC compatibility; Example:
+/// @code
+/// template<class T> class A {
+/// public:
+/// typedef int TYPE;
+/// };
+/// template<class T> class B : public A<T> {
+/// public:
+/// A<T>::TYPE a; // no typename required because A<T> is a base class.
+/// };
+/// @endcode
+bool Sema::isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S) {
+ if (CurContext->isRecord()) {
+ const Type *Ty = SS->getScopeRep()->getAsType();
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(CurContext);
+ for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
+ BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base)
+ if (Context.hasSameUnqualifiedType(QualType(Ty, 1), Base->getType()))
+ return true;
+ return S->isFunctionPrototypeScope();
+ }
+ return CurContext->isFunctionOrMethod() || S->isFunctionPrototypeScope();
+}
+
+bool Sema::DiagnoseUnknownTypeName(const IdentifierInfo &II,
+ SourceLocation IILoc,
+ Scope *S,
+ CXXScopeSpec *SS,
+ ParsedType &SuggestedType) {
+ // We don't have anything to suggest (yet).
+ SuggestedType = ParsedType();
+
+ // There may have been a typo in the name of the type. Look up typo
+ // results, in case we have something that we can suggest.
+ TypeNameValidatorCCC Validator(false);
+ if (TypoCorrection Corrected = CorrectTypo(DeclarationNameInfo(&II, IILoc),
+ LookupOrdinaryName, S, SS,
+ Validator)) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
+
+ if (Corrected.isKeyword()) {
+ // We corrected to a keyword.
+ // FIXME: Actually recover with the keyword we suggest, and emit a fix-it.
+ Diag(IILoc, diag::err_unknown_typename_suggest)
+ << &II << CorrectedQuotedStr;
+ } else {
+ NamedDecl *Result = Corrected.getCorrectionDecl();
+ // We found a similarly-named type or interface; suggest that.
+ if (!SS || !SS->isSet())
+ Diag(IILoc, diag::err_unknown_typename_suggest)
+ << &II << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
+ else if (DeclContext *DC = computeDeclContext(*SS, false))
+ Diag(IILoc, diag::err_unknown_nested_typename_suggest)
+ << &II << DC << CorrectedQuotedStr << SS->getRange()
+ << FixItHint::CreateReplacement(SourceRange(IILoc), CorrectedStr);
+ else
+ llvm_unreachable("could not have corrected a typo here");
+
+ Diag(Result->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+
+ SuggestedType = getTypeName(*Result->getIdentifier(), IILoc, S, SS,
+ false, false, ParsedType(),
+ /*IsCtorOrDtorName=*/false,
+ /*NonTrivialTypeSourceInfo=*/true);
+ }
+ return true;
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // See if II is a class template that the user forgot to pass arguments to.
+ UnqualifiedId Name;
+ Name.setIdentifier(&II, IILoc);
+ CXXScopeSpec EmptySS;
+ TemplateTy TemplateResult;
+ bool MemberOfUnknownSpecialization;
+ if (isTemplateName(S, SS ? *SS : EmptySS, /*hasTemplateKeyword=*/false,
+ Name, ParsedType(), true, TemplateResult,
+ MemberOfUnknownSpecialization) == TNK_Type_template) {
+ TemplateName TplName = TemplateResult.getAsVal<TemplateName>();
+ Diag(IILoc, diag::err_template_missing_args) << TplName;
+ if (TemplateDecl *TplDecl = TplName.getAsTemplateDecl()) {
+ Diag(TplDecl->getLocation(), diag::note_template_decl_here)
+ << TplDecl->getTemplateParameters()->getSourceRange();
+ }
+ return true;
+ }
+ }
+
+ // FIXME: Should we move the logic that tries to recover from a missing tag
+ // (struct, union, enum) from Parser::ParseImplicitInt here, instead?
+
+ if (!SS || (!SS->isSet() && !SS->isInvalid()))
+ Diag(IILoc, diag::err_unknown_typename) << &II;
+ else if (DeclContext *DC = computeDeclContext(*SS, false))
+ Diag(IILoc, diag::err_typename_nested_not_found)
+ << &II << DC << SS->getRange();
+ else if (isDependentScopeSpecifier(*SS)) {
+ unsigned DiagID = diag::err_typename_missing;
+ if (getLangOpts().MicrosoftMode && isMicrosoftMissingTypename(SS, S))
+ DiagID = diag::warn_typename_missing;
+
+ Diag(SS->getRange().getBegin(), DiagID)
+ << (NestedNameSpecifier *)SS->getScopeRep() << II.getName()
+ << SourceRange(SS->getRange().getBegin(), IILoc)
+ << FixItHint::CreateInsertion(SS->getRange().getBegin(), "typename ");
+ SuggestedType = ActOnTypenameType(S, SourceLocation(), *SS, II, IILoc)
+ .get();
+ } else {
+ assert(SS && SS->isInvalid() &&
+ "Invalid scope specifier has already been diagnosed");
+ }
+
+ return true;
+}
+
+/// \brief Determine whether the given result set contains either a type name
+/// or
+static bool isResultTypeOrTemplate(LookupResult &R, const Token &NextToken) {
+ bool CheckTemplate = R.getSema().getLangOpts().CPlusPlus &&
+ NextToken.is(tok::less);
+
+ for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I) {
+ if (isa<TypeDecl>(*I) || isa<ObjCInterfaceDecl>(*I))
+ return true;
+
+ if (CheckTemplate && isa<TemplateDecl>(*I))
+ return true;
+ }
+
+ return false;
+}
+
+Sema::NameClassification Sema::ClassifyName(Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *&Name,
+ SourceLocation NameLoc,
+ const Token &NextToken) {
+ DeclarationNameInfo NameInfo(Name, NameLoc);
+ ObjCMethodDecl *CurMethod = getCurMethodDecl();
+
+ if (NextToken.is(tok::coloncolon)) {
+ BuildCXXNestedNameSpecifier(S, *Name, NameLoc, NextToken.getLocation(),
+ QualType(), false, SS, 0, false);
+
+ }
+
+ LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
+ LookupParsedName(Result, S, &SS, !CurMethod);
+
+ // Perform lookup for Objective-C instance variables (including automatically
+ // synthesized instance variables), if we're in an Objective-C method.
+ // FIXME: This lookup really, really needs to be folded in to the normal
+ // unqualified lookup mechanism.
+ if (!SS.isSet() && CurMethod && !isResultTypeOrTemplate(Result, NextToken)) {
+ ExprResult E = LookupInObjCMethod(Result, S, Name, true);
+ if (E.get() || E.isInvalid())
+ return E;
+ }
+
+ bool SecondTry = false;
+ bool IsFilteredTemplateName = false;
+
+Corrected:
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ // If an unqualified-id is followed by a '(', then we have a function
+ // call.
+ if (!SS.isSet() && NextToken.is(tok::l_paren)) {
+ // In C++, this is an ADL-only call.
+ // FIXME: Reference?
+ if (getLangOpts().CPlusPlus)
+ return BuildDeclarationNameExpr(SS, Result, /*ADL=*/true);
+
+ // C90 6.3.2.2:
+ // If the expression that precedes the parenthesized argument list in a
+ // function call consists solely of an identifier, and if no
+ // declaration is visible for this identifier, the identifier is
+ // implicitly declared exactly as if, in the innermost block containing
+ // the function call, the declaration
+ //
+ // extern int identifier ();
+ //
+ // appeared.
+ //
+ // We also allow this in C99 as an extension.
+ if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S)) {
+ Result.addDecl(D);
+ Result.resolveKind();
+ return BuildDeclarationNameExpr(SS, Result, /*ADL=*/false);
+ }
+ }
+
+ // In C, we first see whether there is a tag type by the same name, in
+ // which case it's likely that the user just forget to write "enum",
+ // "struct", or "union".
+ if (!getLangOpts().CPlusPlus && !SecondTry) {
+ Result.clear(LookupTagName);
+ LookupParsedName(Result, S, &SS);
+ if (TagDecl *Tag = Result.getAsSingle<TagDecl>()) {
+ const char *TagName = 0;
+ const char *FixItTagName = 0;
+ switch (Tag->getTagKind()) {
+ case TTK_Class:
+ TagName = "class";
+ FixItTagName = "class ";
+ break;
+
+ case TTK_Enum:
+ TagName = "enum";
+ FixItTagName = "enum ";
+ break;
+
+ case TTK_Struct:
+ TagName = "struct";
+ FixItTagName = "struct ";
+ break;
+
+ case TTK_Union:
+ TagName = "union";
+ FixItTagName = "union ";
+ break;
+ }
+
+ Diag(NameLoc, diag::err_use_of_tag_name_without_tag)
+ << Name << TagName << getLangOpts().CPlusPlus
+ << FixItHint::CreateInsertion(NameLoc, FixItTagName);
+ break;
+ }
+
+ Result.clear(LookupOrdinaryName);
+ }
+
+ // Perform typo correction to determine if there is another name that is
+ // close to this name.
+ if (!SecondTry) {
+ SecondTry = true;
+ CorrectionCandidateCallback DefaultValidator;
+ if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
+ Result.getLookupKind(), S,
+ &SS, DefaultValidator)) {
+ unsigned UnqualifiedDiag = diag::err_undeclared_var_use_suggest;
+ unsigned QualifiedDiag = diag::err_no_member_suggest;
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
+
+ NamedDecl *FirstDecl = Corrected.getCorrectionDecl();
+ NamedDecl *UnderlyingFirstDecl
+ = FirstDecl? FirstDecl->getUnderlyingDecl() : 0;
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
+ UnderlyingFirstDecl && isa<TemplateDecl>(UnderlyingFirstDecl)) {
+ UnqualifiedDiag = diag::err_no_template_suggest;
+ QualifiedDiag = diag::err_no_member_template_suggest;
+ } else if (UnderlyingFirstDecl &&
+ (isa<TypeDecl>(UnderlyingFirstDecl) ||
+ isa<ObjCInterfaceDecl>(UnderlyingFirstDecl) ||
+ isa<ObjCCompatibleAliasDecl>(UnderlyingFirstDecl))) {
+ UnqualifiedDiag = diag::err_unknown_typename_suggest;
+ QualifiedDiag = diag::err_unknown_nested_typename_suggest;
+ }
+
+ if (SS.isEmpty())
+ Diag(NameLoc, UnqualifiedDiag)
+ << Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(NameLoc, CorrectedStr);
+ else
+ Diag(NameLoc, QualifiedDiag)
+ << Name << computeDeclContext(SS, false) << CorrectedQuotedStr
+ << SS.getRange()
+ << FixItHint::CreateReplacement(NameLoc, CorrectedStr);
+
+ // Update the name, so that the caller has the new name.
+ Name = Corrected.getCorrectionAsIdentifierInfo();
+
+ // Typo correction corrected to a keyword.
+ if (Corrected.isKeyword())
+ return Corrected.getCorrectionAsIdentifierInfo();
+
+ // Also update the LookupResult...
+ // FIXME: This should probably go away at some point
+ Result.clear();
+ Result.setLookupName(Corrected.getCorrection());
+ if (FirstDecl) {
+ Result.addDecl(FirstDecl);
+ Diag(FirstDecl->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+ }
+
+ // If we found an Objective-C instance variable, let
+ // LookupInObjCMethod build the appropriate expression to
+ // reference the ivar.
+ // FIXME: This is a gross hack.
+ if (ObjCIvarDecl *Ivar = Result.getAsSingle<ObjCIvarDecl>()) {
+ Result.clear();
+ ExprResult E(LookupInObjCMethod(Result, S, Ivar->getIdentifier()));
+ return move(E);
+ }
+
+ goto Corrected;
+ }
+ }
+
+ // We failed to correct; just fall through and let the parser deal with it.
+ Result.suppressDiagnostics();
+ return NameClassification::Unknown();
+
+ case LookupResult::NotFoundInCurrentInstantiation: {
+ // We performed name lookup into the current instantiation, and there were
+ // dependent bases, so we treat this result the same way as any other
+ // dependent nested-name-specifier.
+
+ // C++ [temp.res]p2:
+ // A name used in a template declaration or definition and that is
+ // dependent on a template-parameter is assumed not to name a type
+ // unless the applicable name lookup finds a type name or the name is
+ // qualified by the keyword typename.
+ //
+ // FIXME: If the next token is '<', we might want to ask the parser to
+ // perform some heroics to see if we actually have a
+ // template-argument-list, which would indicate a missing 'template'
+ // keyword here.
+ return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, /*TemplateArgs=*/0);
+ }
+
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ break;
+
+ case LookupResult::Ambiguous:
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
+ hasAnyAcceptableTemplateNames(Result)) {
+ // C++ [temp.local]p3:
+ // A lookup that finds an injected-class-name (10.2) can result in an
+ // ambiguity in certain cases (for example, if it is found in more than
+ // one base class). If all of the injected-class-names that are found
+ // refer to specializations of the same class template, and if the name
+ // is followed by a template-argument-list, the reference refers to the
+ // class template itself and not a specialization thereof, and is not
+ // ambiguous.
+ //
+ // This filtering can make an ambiguous result into an unambiguous one,
+ // so try again after filtering out template names.
+ FilterAcceptableTemplateNames(Result);
+ if (!Result.isAmbiguous()) {
+ IsFilteredTemplateName = true;
+ break;
+ }
+ }
+
+ // Diagnose the ambiguity and return an error.
+ return NameClassification::Error();
+ }
+
+ if (getLangOpts().CPlusPlus && NextToken.is(tok::less) &&
+ (IsFilteredTemplateName || hasAnyAcceptableTemplateNames(Result))) {
+ // C++ [temp.names]p3:
+ // After name lookup (3.4) finds that a name is a template-name or that
+ // an operator-function-id or a literal- operator-id refers to a set of
+ // overloaded functions any member of which is a function template if
+ // this is followed by a <, the < is always taken as the delimiter of a
+ // template-argument-list and never as the less-than operator.
+ if (!IsFilteredTemplateName)
+ FilterAcceptableTemplateNames(Result);
+
+ if (!Result.empty()) {
+ bool IsFunctionTemplate;
+ TemplateName Template;
+ if (Result.end() - Result.begin() > 1) {
+ IsFunctionTemplate = true;
+ Template = Context.getOverloadedTemplateName(Result.begin(),
+ Result.end());
+ } else {
+ TemplateDecl *TD
+ = cast<TemplateDecl>((*Result.begin())->getUnderlyingDecl());
+ IsFunctionTemplate = isa<FunctionTemplateDecl>(TD);
+
+ if (SS.isSet() && !SS.isInvalid())
+ Template = Context.getQualifiedTemplateName(SS.getScopeRep(),
+ /*TemplateKeyword=*/false,
+ TD);
+ else
+ Template = TemplateName(TD);
+ }
+
+ if (IsFunctionTemplate) {
+ // Function templates always go through overload resolution, at which
+ // point we'll perform the various checks (e.g., accessibility) we need
+ // to based on which function we selected.
+ Result.suppressDiagnostics();
+
+ return NameClassification::FunctionTemplate(Template);
+ }
+
+ return NameClassification::TypeTemplate(Template);
+ }
+ }
+
+ NamedDecl *FirstDecl = (*Result.begin())->getUnderlyingDecl();
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(FirstDecl)) {
+ DiagnoseUseOfDecl(Type, NameLoc);
+ QualType T = Context.getTypeDeclType(Type);
+ return ParsedType::make(T);
+ }
+
+ ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(FirstDecl);
+ if (!Class) {
+ // FIXME: It's unfortunate that we don't have a Type node for handling this.
+ if (ObjCCompatibleAliasDecl *Alias
+ = dyn_cast<ObjCCompatibleAliasDecl>(FirstDecl))
+ Class = Alias->getClassInterface();
+ }
+
+ if (Class) {
+ DiagnoseUseOfDecl(Class, NameLoc);
+
+ if (NextToken.is(tok::period)) {
+ // Interface. <something> is parsed as a property reference expression.
+ // Just return "unknown" as a fall-through for now.
+ Result.suppressDiagnostics();
+ return NameClassification::Unknown();
+ }
+
+ QualType T = Context.getObjCInterfaceType(Class);
+ return ParsedType::make(T);
+ }
+
+ if (!Result.empty() && (*Result.begin())->isCXXClassMember())
+ return BuildPossibleImplicitMemberExpr(SS, SourceLocation(), Result, 0);
+
+ bool ADL = UseArgumentDependentLookup(SS, Result, NextToken.is(tok::l_paren));
+ return BuildDeclarationNameExpr(SS, Result, ADL);
+}
+
+// Determines the context to return to after temporarily entering a
+// context. This depends in an unnecessarily complicated way on the
+// exact ordering of callbacks from the parser.
+DeclContext *Sema::getContainingDC(DeclContext *DC) {
+
+ // Functions defined inline within classes aren't parsed until we've
+ // finished parsing the top-level class, so the top-level class is
+ // the context we'll need to return to.
+ if (isa<FunctionDecl>(DC)) {
+ DC = DC->getLexicalParent();
+
+ // A function not defined within a class will always return to its
+ // lexical context.
+ if (!isa<CXXRecordDecl>(DC))
+ return DC;
+
+ // A C++ inline method/friend is parsed *after* the topmost class
+ // it was declared in is fully parsed ("complete"); the topmost
+ // class is the context we need to return to.
+ while (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC->getLexicalParent()))
+ DC = RD;
+
+ // Return the declaration context of the topmost class the inline method is
+ // declared in.
+ return DC;
+ }
+
+ return DC->getLexicalParent();
+}
+
+void Sema::PushDeclContext(Scope *S, DeclContext *DC) {
+ assert(getContainingDC(DC) == CurContext &&
+ "The next DeclContext should be lexically contained in the current one.");
+ CurContext = DC;
+ S->setEntity(DC);
+}
+
+void Sema::PopDeclContext() {
+ assert(CurContext && "DeclContext imbalance!");
+
+ CurContext = getContainingDC(CurContext);
+ assert(CurContext && "Popped translation unit!");
+}
+
+/// EnterDeclaratorContext - Used when we must lookup names in the context
+/// of a declarator's nested name specifier.
+///
+void Sema::EnterDeclaratorContext(Scope *S, DeclContext *DC) {
+ // C++0x [basic.lookup.unqual]p13:
+ // A name used in the definition of a static data member of class
+ // X (after the qualified-id of the static member) is looked up as
+ // if the name was used in a member function of X.
+ // C++0x [basic.lookup.unqual]p14:
+ // If a variable member of a namespace is defined outside of the
+ // scope of its namespace then any name used in the definition of
+ // the variable member (after the declarator-id) is looked up as
+ // if the definition of the variable member occurred in its
+ // namespace.
+ // Both of these imply that we should push a scope whose context
+ // is the semantic context of the declaration. We can't use
+ // PushDeclContext here because that context is not necessarily
+ // lexically contained in the current context. Fortunately,
+ // the containing scope should have the appropriate information.
+
+ assert(!S->getEntity() && "scope already has entity");
+
+#ifndef NDEBUG
+ Scope *Ancestor = S->getParent();
+ while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent();
+ assert(Ancestor->getEntity() == CurContext && "ancestor context mismatch");
+#endif
+
+ CurContext = DC;
+ S->setEntity(DC);
+}
+
+void Sema::ExitDeclaratorContext(Scope *S) {
+ assert(S->getEntity() == CurContext && "Context imbalance!");
+
+ // Switch back to the lexical context. The safety of this is
+ // enforced by an assert in EnterDeclaratorContext.
+ Scope *Ancestor = S->getParent();
+ while (!Ancestor->getEntity()) Ancestor = Ancestor->getParent();
+ CurContext = (DeclContext*) Ancestor->getEntity();
+
+ // We don't need to do anything with the scope, which is going to
+ // disappear.
+}
+
+
+void Sema::ActOnReenterFunctionContext(Scope* S, Decl *D) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (FunctionTemplateDecl *TFD = dyn_cast_or_null<FunctionTemplateDecl>(D)) {
+ // We assume that the caller has already called
+ // ActOnReenterTemplateScope
+ FD = TFD->getTemplatedDecl();
+ }
+ if (!FD)
+ return;
+
+ // Same implementation as PushDeclContext, but enters the context
+ // from the lexical parent, rather than the top-level class.
+ assert(CurContext == FD->getLexicalParent() &&
+ "The next DeclContext should be lexically contained in the current one.");
+ CurContext = FD;
+ S->setEntity(CurContext);
+
+ for (unsigned P = 0, NumParams = FD->getNumParams(); P < NumParams; ++P) {
+ ParmVarDecl *Param = FD->getParamDecl(P);
+ // If the parameter has an identifier, then add it to the scope
+ if (Param->getIdentifier()) {
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+ }
+}
+
+
+void Sema::ActOnExitFunctionContext() {
+ // Same implementation as PopDeclContext, but returns to the lexical parent,
+ // rather than the top-level class.
+ assert(CurContext && "DeclContext imbalance!");
+ CurContext = CurContext->getLexicalParent();
+ assert(CurContext && "Popped translation unit!");
+}
+
+
+/// \brief Determine whether we allow overloading of the function
+/// PrevDecl with another declaration.
+///
+/// This routine determines whether overloading is possible, not
+/// whether some new function is actually an overload. It will return
+/// true in C++ (where we can always provide overloads) or, as an
+/// extension, in C when the previous function is already an
+/// overloaded function declaration or has the "overloadable"
+/// attribute.
+static bool AllowOverloadingOfFunction(LookupResult &Previous,
+ ASTContext &Context) {
+ if (Context.getLangOpts().CPlusPlus)
+ return true;
+
+ if (Previous.getResultKind() == LookupResult::FoundOverloaded)
+ return true;
+
+ return (Previous.getResultKind() == LookupResult::Found
+ && Previous.getFoundDecl()->hasAttr<OverloadableAttr>());
+}
+
+/// Add this decl to the scope shadowed decl chains.
+void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
+ // Move up the scope chain until we find the nearest enclosing
+ // non-transparent context. The declaration will be introduced into this
+ // scope.
+ while (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->isTransparentContext())
+ S = S->getParent();
+
+ // Add scoped declarations into their context, so that they can be
+ // found later. Declarations without a context won't be inserted
+ // into any context.
+ if (AddToContext)
+ CurContext->addDecl(D);
+
+ // Out-of-line definitions shouldn't be pushed into scope in C++.
+ // Out-of-line variable and function definitions shouldn't even in C.
+ if ((getLangOpts().CPlusPlus || isa<VarDecl>(D) || isa<FunctionDecl>(D)) &&
+ D->isOutOfLine() &&
+ !D->getDeclContext()->getRedeclContext()->Equals(
+ D->getLexicalDeclContext()->getRedeclContext()))
+ return;
+
+ // Template instantiations should also not be pushed into scope.
+ if (isa<FunctionDecl>(D) &&
+ cast<FunctionDecl>(D)->isFunctionTemplateSpecialization())
+ return;
+
+ // If this replaces anything in the current scope,
+ IdentifierResolver::iterator I = IdResolver.begin(D->getDeclName()),
+ IEnd = IdResolver.end();
+ for (; I != IEnd; ++I) {
+ if (S->isDeclScope(*I) && D->declarationReplaces(*I)) {
+ S->RemoveDecl(*I);
+ IdResolver.RemoveDecl(*I);
+
+ // Should only need to replace one decl.
+ break;
+ }
+ }
+
+ S->AddDecl(D);
+
+ if (isa<LabelDecl>(D) && !cast<LabelDecl>(D)->isGnuLocal()) {
+ // Implicitly-generated labels may end up getting generated in an order that
+ // isn't strictly lexical, which breaks name lookup. Be careful to insert
+ // the label at the appropriate place in the identifier chain.
+ for (I = IdResolver.begin(D->getDeclName()); I != IEnd; ++I) {
+ DeclContext *IDC = (*I)->getLexicalDeclContext()->getRedeclContext();
+ if (IDC == CurContext) {
+ if (!S->isDeclScope(*I))
+ continue;
+ } else if (IDC->Encloses(CurContext))
+ break;
+ }
+
+ IdResolver.InsertDeclAfter(I, D);
+ } else {
+ IdResolver.AddDecl(D);
+ }
+}
+
+void Sema::pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name) {
+ if (IdResolver.tryAddTopLevelDecl(D, Name) && TUScope)
+ TUScope->AddDecl(D);
+}
+
+bool Sema::isDeclInScope(NamedDecl *&D, DeclContext *Ctx, Scope *S,
+ bool ExplicitInstantiationOrSpecialization) {
+ return IdResolver.isDeclInScope(D, Ctx, Context, S,
+ ExplicitInstantiationOrSpecialization);
+}
+
+Scope *Sema::getScopeForDeclContext(Scope *S, DeclContext *DC) {
+ DeclContext *TargetDC = DC->getPrimaryContext();
+ do {
+ if (DeclContext *ScopeDC = (DeclContext*) S->getEntity())
+ if (ScopeDC->getPrimaryContext() == TargetDC)
+ return S;
+ } while ((S = S->getParent()));
+
+ return 0;
+}
+
+static bool isOutOfScopePreviousDeclaration(NamedDecl *,
+ DeclContext*,
+ ASTContext&);
+
+/// Filters out lookup results that don't fall within the given scope
+/// as determined by isDeclInScope.
+void Sema::FilterLookupForScope(LookupResult &R,
+ DeclContext *Ctx, Scope *S,
+ bool ConsiderLinkage,
+ bool ExplicitInstantiationOrSpecialization) {
+ LookupResult::Filter F = R.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+
+ if (isDeclInScope(D, Ctx, S, ExplicitInstantiationOrSpecialization))
+ continue;
+
+ if (ConsiderLinkage &&
+ isOutOfScopePreviousDeclaration(D, Ctx, Context))
+ continue;
+
+ F.erase();
+ }
+
+ F.done();
+}
+
+static bool isUsingDecl(NamedDecl *D) {
+ return isa<UsingShadowDecl>(D) ||
+ isa<UnresolvedUsingTypenameDecl>(D) ||
+ isa<UnresolvedUsingValueDecl>(D);
+}
+
+/// Removes using shadow declarations from the lookup results.
+static void RemoveUsingDecls(LookupResult &R) {
+ LookupResult::Filter F = R.makeFilter();
+ while (F.hasNext())
+ if (isUsingDecl(F.next()))
+ F.erase();
+
+ F.done();
+}
+
+/// \brief Check for this common pattern:
+/// @code
+/// class S {
+/// S(const S&); // DO NOT IMPLEMENT
+/// void operator=(const S&); // DO NOT IMPLEMENT
+/// };
+/// @endcode
+static bool IsDisallowedCopyOrAssign(const CXXMethodDecl *D) {
+ // FIXME: Should check for private access too but access is set after we get
+ // the decl here.
+ if (D->doesThisDeclarationHaveABody())
+ return false;
+
+ if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
+ return CD->isCopyConstructor();
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ return Method->isCopyAssignmentOperator();
+ return false;
+}
+
+bool Sema::ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const {
+ assert(D);
+
+ if (D->isInvalidDecl() || D->isUsed() || D->hasAttr<UnusedAttr>())
+ return false;
+
+ // Ignore class templates.
+ if (D->getDeclContext()->isDependentContext() ||
+ D->getLexicalDeclContext()->isDependentContext())
+ return false;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return false;
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ if (MD->isVirtual() || IsDisallowedCopyOrAssign(MD))
+ return false;
+ } else {
+ // 'static inline' functions are used in headers; don't warn.
+ if (FD->getStorageClass() == SC_Static &&
+ FD->isInlineSpecified())
+ return false;
+ }
+
+ if (FD->doesThisDeclarationHaveABody() &&
+ Context.DeclMustBeEmitted(FD))
+ return false;
+ } else if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (!VD->isFileVarDecl() ||
+ VD->getType().isConstant(Context) ||
+ Context.DeclMustBeEmitted(VD))
+ return false;
+
+ if (VD->isStaticDataMember() &&
+ VD->getTemplateSpecializationKind() == TSK_ImplicitInstantiation)
+ return false;
+
+ } else {
+ return false;
+ }
+
+ // Only warn for unused decls internal to the translation unit.
+ if (D->getLinkage() == ExternalLinkage)
+ return false;
+
+ return true;
+}
+
+void Sema::MarkUnusedFileScopedDecl(const DeclaratorDecl *D) {
+ if (!D)
+ return;
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ const FunctionDecl *First = FD->getFirstDeclaration();
+ if (FD != First && ShouldWarnIfUnusedFileScopedDecl(First))
+ return; // First should already be in the vector.
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ const VarDecl *First = VD->getFirstDeclaration();
+ if (VD != First && ShouldWarnIfUnusedFileScopedDecl(First))
+ return; // First should already be in the vector.
+ }
+
+ if (ShouldWarnIfUnusedFileScopedDecl(D))
+ UnusedFileScopedDecls.push_back(D);
+ }
+
+static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
+ if (D->isInvalidDecl())
+ return false;
+
+ if (D->isReferenced() || D->isUsed() || D->hasAttr<UnusedAttr>())
+ return false;
+
+ if (isa<LabelDecl>(D))
+ return true;
+
+ // White-list anything that isn't a local variable.
+ if (!isa<VarDecl>(D) || isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D) ||
+ !D->getDeclContext()->isFunctionOrMethod())
+ return false;
+
+ // Types of valid local variables should be complete, so this should succeed.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+
+ // White-list anything with an __attribute__((unused)) type.
+ QualType Ty = VD->getType();
+
+ // Only look at the outermost level of typedef.
+ if (const TypedefType *TT = dyn_cast<TypedefType>(Ty)) {
+ if (TT->getDecl()->hasAttr<UnusedAttr>())
+ return false;
+ }
+
+ // If we failed to complete the type for some reason, or if the type is
+ // dependent, don't diagnose the variable.
+ if (Ty->isIncompleteType() || Ty->isDependentType())
+ return false;
+
+ if (const TagType *TT = Ty->getAs<TagType>()) {
+ const TagDecl *Tag = TT->getDecl();
+ if (Tag->hasAttr<UnusedAttr>())
+ return false;
+
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Tag)) {
+ if (!RD->hasTrivialDestructor())
+ return false;
+
+ if (const Expr *Init = VD->getInit()) {
+ const CXXConstructExpr *Construct =
+ dyn_cast<CXXConstructExpr>(Init);
+ if (Construct && !Construct->isElidable()) {
+ CXXConstructorDecl *CD = Construct->getConstructor();
+ if (!CD->isTrivial())
+ return false;
+ }
+ }
+ }
+ }
+
+ // TODO: __attribute__((unused)) templates?
+ }
+
+ return true;
+}
+
+static void GenerateFixForUnusedDecl(const NamedDecl *D, ASTContext &Ctx,
+ FixItHint &Hint) {
+ if (isa<LabelDecl>(D)) {
+ SourceLocation AfterColon = Lexer::findLocationAfterToken(D->getLocEnd(),
+ tok::colon, Ctx.getSourceManager(), Ctx.getLangOpts(), true);
+ if (AfterColon.isInvalid())
+ return;
+ Hint = FixItHint::CreateRemoval(CharSourceRange::
+ getCharRange(D->getLocStart(), AfterColon));
+ }
+ return;
+}
+
+/// DiagnoseUnusedDecl - Emit warnings about declarations that are not used
+/// unless they are marked attr(unused).
+void Sema::DiagnoseUnusedDecl(const NamedDecl *D) {
+ FixItHint Hint;
+ if (!ShouldDiagnoseUnusedDecl(D))
+ return;
+
+ GenerateFixForUnusedDecl(D, Context, Hint);
+
+ unsigned DiagID;
+ if (isa<VarDecl>(D) && cast<VarDecl>(D)->isExceptionVariable())
+ DiagID = diag::warn_unused_exception_param;
+ else if (isa<LabelDecl>(D))
+ DiagID = diag::warn_unused_label;
+ else
+ DiagID = diag::warn_unused_variable;
+
+ Diag(D->getLocation(), DiagID) << D->getDeclName() << Hint;
+}
+
+static void CheckPoppedLabel(LabelDecl *L, Sema &S) {
+ // Verify that we have no forward references left. If so, there was a goto
+ // or address of a label taken, but no definition of it. Label fwd
+ // definitions are indicated with a null substmt.
+ if (L->getStmt() == 0)
+ S.Diag(L->getLocation(), diag::err_undeclared_label_use) <<L->getDeclName();
+}
+
+void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
+ if (S->decl_empty()) return;
+ assert((S->getFlags() & (Scope::DeclScope | Scope::TemplateParamScope)) &&
+ "Scope shouldn't contain decls!");
+
+ for (Scope::decl_iterator I = S->decl_begin(), E = S->decl_end();
+ I != E; ++I) {
+ Decl *TmpD = (*I);
+ assert(TmpD && "This decl didn't get pushed??");
+
+ assert(isa<NamedDecl>(TmpD) && "Decl isn't NamedDecl?");
+ NamedDecl *D = cast<NamedDecl>(TmpD);
+
+ if (!D->getDeclName()) continue;
+
+ // Diagnose unused variables in this scope.
+ if (!S->hasErrorOccurred())
+ DiagnoseUnusedDecl(D);
+
+ // If this was a forward reference to a label, verify it was defined.
+ if (LabelDecl *LD = dyn_cast<LabelDecl>(D))
+ CheckPoppedLabel(LD, *this);
+
+ // Remove this name from our lexical scope.
+ IdResolver.RemoveDecl(D);
+ }
+}
+
+void Sema::ActOnStartFunctionDeclarator() {
+ ++InFunctionDeclarator;
+}
+
+void Sema::ActOnEndFunctionDeclarator() {
+ assert(InFunctionDeclarator);
+ --InFunctionDeclarator;
+}
+
+/// \brief Look for an Objective-C class in the translation unit.
+///
+/// \param Id The name of the Objective-C class we're looking for. If
+/// typo-correction fixes this name, the Id will be updated
+/// to the fixed name.
+///
+/// \param IdLoc The location of the name in the translation unit.
+///
+/// \param TypoCorrection If true, this routine will attempt typo correction
+/// if there is no class with the given name.
+///
+/// \returns The declaration of the named Objective-C class, or NULL if the
+/// class could not be found.
+ObjCInterfaceDecl *Sema::getObjCInterfaceDecl(IdentifierInfo *&Id,
+ SourceLocation IdLoc,
+ bool DoTypoCorrection) {
+ // The third "scope" argument is 0 since we aren't enabling lazy built-in
+ // creation from this context.
+ NamedDecl *IDecl = LookupSingleName(TUScope, Id, IdLoc, LookupOrdinaryName);
+
+ if (!IDecl && DoTypoCorrection) {
+ // Perform typo correction at the given location, but only if we
+ // find an Objective-C class name.
+ DeclFilterCCC<ObjCInterfaceDecl> Validator;
+ if (TypoCorrection C = CorrectTypo(DeclarationNameInfo(Id, IdLoc),
+ LookupOrdinaryName, TUScope, NULL,
+ Validator)) {
+ IDecl = C.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ Diag(IdLoc, diag::err_undef_interface_suggest)
+ << Id << IDecl->getDeclName()
+ << FixItHint::CreateReplacement(IdLoc, IDecl->getNameAsString());
+ Diag(IDecl->getLocation(), diag::note_previous_decl)
+ << IDecl->getDeclName();
+
+ Id = IDecl->getIdentifier();
+ }
+ }
+ ObjCInterfaceDecl *Def = dyn_cast_or_null<ObjCInterfaceDecl>(IDecl);
+ // This routine must always return a class definition, if any.
+ if (Def && Def->getDefinition())
+ Def = Def->getDefinition();
+ return Def;
+}
+
+/// getNonFieldDeclScope - Retrieves the innermost scope, starting
+/// from S, where a non-field would be declared. This routine copes
+/// with the difference between C and C++ scoping rules in structs and
+/// unions. For example, the following code is well-formed in C but
+/// ill-formed in C++:
+/// @code
+/// struct S6 {
+/// enum { BAR } e;
+/// };
+///
+/// void test_S6() {
+/// struct S6 a;
+/// a.e = BAR;
+/// }
+/// @endcode
+/// For the declaration of BAR, this routine will return a different
+/// scope. The scope S will be the scope of the unnamed enumeration
+/// within S6. In C++, this routine will return the scope associated
+/// with S6, because the enumeration's scope is a transparent
+/// context but structures can contain non-field names. In C, this
+/// routine will return the translation unit scope, since the
+/// enumeration's scope is a transparent context and structures cannot
+/// contain non-field names.
+Scope *Sema::getNonFieldDeclScope(Scope *S) {
+ while (((S->getFlags() & Scope::DeclScope) == 0) ||
+ (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->isTransparentContext()) ||
+ (S->isClassScope() && !getLangOpts().CPlusPlus))
+ S = S->getParent();
+ return S;
+}
+
+/// LazilyCreateBuiltin - The specified Builtin-ID was first used at
+/// file scope. lazily create a decl for it. ForRedeclaration is true
+/// if we're creating this built-in in anticipation of redeclaring the
+/// built-in.
+NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned bid,
+ Scope *S, bool ForRedeclaration,
+ SourceLocation Loc) {
+ Builtin::ID BID = (Builtin::ID)bid;
+
+ ASTContext::GetBuiltinTypeError Error;
+ QualType R = Context.GetBuiltinType(BID, Error);
+ switch (Error) {
+ case ASTContext::GE_None:
+ // Okay
+ break;
+
+ case ASTContext::GE_Missing_stdio:
+ if (ForRedeclaration)
+ Diag(Loc, diag::warn_implicit_decl_requires_stdio)
+ << Context.BuiltinInfo.GetName(BID);
+ return 0;
+
+ case ASTContext::GE_Missing_setjmp:
+ if (ForRedeclaration)
+ Diag(Loc, diag::warn_implicit_decl_requires_setjmp)
+ << Context.BuiltinInfo.GetName(BID);
+ return 0;
+
+ case ASTContext::GE_Missing_ucontext:
+ if (ForRedeclaration)
+ Diag(Loc, diag::warn_implicit_decl_requires_ucontext)
+ << Context.BuiltinInfo.GetName(BID);
+ return 0;
+ }
+
+ if (!ForRedeclaration && Context.BuiltinInfo.isPredefinedLibFunction(BID)) {
+ Diag(Loc, diag::ext_implicit_lib_function_decl)
+ << Context.BuiltinInfo.GetName(BID)
+ << R;
+ if (Context.BuiltinInfo.getHeaderName(BID) &&
+ Diags.getDiagnosticLevel(diag::ext_implicit_lib_function_decl, Loc)
+ != DiagnosticsEngine::Ignored)
+ Diag(Loc, diag::note_please_include_header)
+ << Context.BuiltinInfo.getHeaderName(BID)
+ << Context.BuiltinInfo.GetName(BID);
+ }
+
+ FunctionDecl *New = FunctionDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ Loc, Loc, II, R, /*TInfo=*/0,
+ SC_Extern,
+ SC_None, false,
+ /*hasPrototype=*/true);
+ New->setImplicit();
+
+ // Create Decl objects for each parameter, adding them to the
+ // FunctionDecl.
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(R)) {
+ SmallVector<ParmVarDecl*, 16> Params;
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ ParmVarDecl *parm =
+ ParmVarDecl::Create(Context, New, SourceLocation(),
+ SourceLocation(), 0,
+ FT->getArgType(i), /*TInfo=*/0,
+ SC_None, SC_None, 0);
+ parm->setScopeInfo(0, i);
+ Params.push_back(parm);
+ }
+ New->setParams(Params);
+ }
+
+ AddKnownFunctionAttributes(New);
+
+ // TUScope is the translation-unit scope to insert this function into.
+ // FIXME: This is hideous. We need to teach PushOnScopeChains to
+ // relate Scopes to DeclContexts, and probably eliminate CurContext
+ // entirely, but we're not there yet.
+ DeclContext *SavedContext = CurContext;
+ CurContext = Context.getTranslationUnitDecl();
+ PushOnScopeChains(New, TUScope);
+ CurContext = SavedContext;
+ return New;
+}
+
+bool Sema::isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New) {
+ QualType OldType;
+ if (TypedefNameDecl *OldTypedef = dyn_cast<TypedefNameDecl>(Old))
+ OldType = OldTypedef->getUnderlyingType();
+ else
+ OldType = Context.getTypeDeclType(Old);
+ QualType NewType = New->getUnderlyingType();
+
+ if (NewType->isVariablyModifiedType()) {
+ // Must not redefine a typedef with a variably-modified type.
+ int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
+ Diag(New->getLocation(), diag::err_redefinition_variably_modified_typedef)
+ << Kind << NewType;
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return true;
+ }
+
+ if (OldType != NewType &&
+ !OldType->isDependentType() &&
+ !NewType->isDependentType() &&
+ !Context.hasSameType(OldType, NewType)) {
+ int Kind = isa<TypeAliasDecl>(Old) ? 1 : 0;
+ Diag(New->getLocation(), diag::err_redefinition_different_typedef)
+ << Kind << NewType << OldType;
+ if (Old->getLocation().isValid())
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return true;
+ }
+ return false;
+}
+
+/// MergeTypedefNameDecl - We just parsed a typedef 'New' which has the
+/// same name and scope as a previous declaration 'Old'. Figure out
+/// how to resolve this situation, merging decls or emitting
+/// diagnostics as appropriate. If there was an error, set New to be invalid.
+///
+void Sema::MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls) {
+ // If the new decl is known invalid already, don't bother doing any
+ // merging checks.
+ if (New->isInvalidDecl()) return;
+
+ // Allow multiple definitions for ObjC built-in typedefs.
+ // FIXME: Verify the underlying types are equivalent!
+ if (getLangOpts().ObjC1) {
+ const IdentifierInfo *TypeID = New->getIdentifier();
+ switch (TypeID->getLength()) {
+ default: break;
+ case 2:
+ if (!TypeID->isStr("id"))
+ break;
+ Context.setObjCIdRedefinitionType(New->getUnderlyingType());
+ // Install the built-in type for 'id', ignoring the current definition.
+ New->setTypeForDecl(Context.getObjCIdType().getTypePtr());
+ return;
+ case 5:
+ if (!TypeID->isStr("Class"))
+ break;
+ Context.setObjCClassRedefinitionType(New->getUnderlyingType());
+ // Install the built-in type for 'Class', ignoring the current definition.
+ New->setTypeForDecl(Context.getObjCClassType().getTypePtr());
+ return;
+ case 3:
+ if (!TypeID->isStr("SEL"))
+ break;
+ Context.setObjCSelRedefinitionType(New->getUnderlyingType());
+ // Install the built-in type for 'SEL', ignoring the current definition.
+ New->setTypeForDecl(Context.getObjCSelType().getTypePtr());
+ return;
+ }
+ // Fall through - the typedef name was not a builtin type.
+ }
+
+ // Verify the old decl was also a type.
+ TypeDecl *Old = OldDecls.getAsSingle<TypeDecl>();
+ if (!Old) {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+
+ NamedDecl *OldD = OldDecls.getRepresentativeDecl();
+ if (OldD->getLocation().isValid())
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+
+ return New->setInvalidDecl();
+ }
+
+ // If the old declaration is invalid, just give up here.
+ if (Old->isInvalidDecl())
+ return New->setInvalidDecl();
+
+ // If the typedef types are not identical, reject them in all languages and
+ // with any extensions enabled.
+ if (isIncompatibleTypedef(Old, New))
+ return;
+
+ // The types match. Link up the redeclaration chain if the old
+ // declaration was a typedef.
+ if (TypedefNameDecl *Typedef = dyn_cast<TypedefNameDecl>(Old))
+ New->setPreviousDeclaration(Typedef);
+
+ if (getLangOpts().MicrosoftExt)
+ return;
+
+ if (getLangOpts().CPlusPlus) {
+ // C++ [dcl.typedef]p2:
+ // In a given non-class scope, a typedef specifier can be used to
+ // redefine the name of any type declared in that scope to refer
+ // to the type to which it already refers.
+ if (!isa<CXXRecordDecl>(CurContext))
+ return;
+
+ // C++0x [dcl.typedef]p4:
+ // In a given class scope, a typedef specifier can be used to redefine
+ // any class-name declared in that scope that is not also a typedef-name
+ // to refer to the type to which it already refers.
+ //
+ // This wording came in via DR424, which was a correction to the
+ // wording in DR56, which accidentally banned code like:
+ //
+ // struct S {
+ // typedef struct A { } A;
+ // };
+ //
+ // in the C++03 standard. We implement the C++0x semantics, which
+ // allow the above but disallow
+ //
+ // struct S {
+ // typedef int I;
+ // typedef int I;
+ // };
+ //
+ // since that was the intent of DR56.
+ if (!isa<TypedefNameDecl>(Old))
+ return;
+
+ Diag(New->getLocation(), diag::err_redefinition)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // Modules always permit redefinition of typedefs, as does C11.
+ if (getLangOpts().Modules || getLangOpts().C11)
+ return;
+
+ // If we have a redefinition of a typedef in C, emit a warning. This warning
+ // is normally mapped to an error, but can be controlled with
+ // -Wtypedef-redefinition. If either the original or the redefinition is
+ // in a system header, don't emit this for compatibility with GCC.
+ if (getDiagnostics().getSuppressSystemWarnings() &&
+ (Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
+ Context.getSourceManager().isInSystemHeader(New->getLocation())))
+ return;
+
+ Diag(New->getLocation(), diag::warn_redefinition_of_typedef)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return;
+}
+
+/// DeclhasAttr - returns true if decl Declaration already has the target
+/// attribute.
+static bool
+DeclHasAttr(const Decl *D, const Attr *A) {
+ const OwnershipAttr *OA = dyn_cast<OwnershipAttr>(A);
+ const AnnotateAttr *Ann = dyn_cast<AnnotateAttr>(A);
+ for (Decl::attr_iterator i = D->attr_begin(), e = D->attr_end(); i != e; ++i)
+ if ((*i)->getKind() == A->getKind()) {
+ if (Ann) {
+ if (Ann->getAnnotation() == cast<AnnotateAttr>(*i)->getAnnotation())
+ return true;
+ continue;
+ }
+ // FIXME: Don't hardcode this check
+ if (OA && isa<OwnershipAttr>(*i))
+ return OA->getOwnKind() == cast<OwnershipAttr>(*i)->getOwnKind();
+ return true;
+ }
+
+ return false;
+}
+
+/// mergeDeclAttributes - Copy attributes from the Old decl to the New one.
+void Sema::mergeDeclAttributes(Decl *New, Decl *Old,
+ bool MergeDeprecation) {
+ if (!Old->hasAttrs())
+ return;
+
+ bool foundAny = New->hasAttrs();
+
+ // Ensure that any moving of objects within the allocated map is done before
+ // we process them.
+ if (!foundAny) New->setAttrs(AttrVec());
+
+ for (specific_attr_iterator<InheritableAttr>
+ i = Old->specific_attr_begin<InheritableAttr>(),
+ e = Old->specific_attr_end<InheritableAttr>();
+ i != e; ++i) {
+ // Ignore deprecated/unavailable/availability attributes if requested.
+ if (!MergeDeprecation &&
+ (isa<DeprecatedAttr>(*i) ||
+ isa<UnavailableAttr>(*i) ||
+ isa<AvailabilityAttr>(*i)))
+ continue;
+
+ if (!DeclHasAttr(New, *i)) {
+ InheritableAttr *newAttr = cast<InheritableAttr>((*i)->clone(Context));
+ newAttr->setInherited(true);
+ New->addAttr(newAttr);
+ foundAny = true;
+ }
+ }
+
+ if (!foundAny) New->dropAttrs();
+}
+
+/// mergeParamDeclAttributes - Copy attributes from the old parameter
+/// to the new one.
+static void mergeParamDeclAttributes(ParmVarDecl *newDecl,
+ const ParmVarDecl *oldDecl,
+ ASTContext &C) {
+ if (!oldDecl->hasAttrs())
+ return;
+
+ bool foundAny = newDecl->hasAttrs();
+
+ // Ensure that any moving of objects within the allocated map is
+ // done before we process them.
+ if (!foundAny) newDecl->setAttrs(AttrVec());
+
+ for (specific_attr_iterator<InheritableParamAttr>
+ i = oldDecl->specific_attr_begin<InheritableParamAttr>(),
+ e = oldDecl->specific_attr_end<InheritableParamAttr>(); i != e; ++i) {
+ if (!DeclHasAttr(newDecl, *i)) {
+ InheritableAttr *newAttr = cast<InheritableParamAttr>((*i)->clone(C));
+ newAttr->setInherited(true);
+ newDecl->addAttr(newAttr);
+ foundAny = true;
+ }
+ }
+
+ if (!foundAny) newDecl->dropAttrs();
+}
+
+namespace {
+
+/// Used in MergeFunctionDecl to keep track of function parameters in
+/// C.
+struct GNUCompatibleParamWarning {
+ ParmVarDecl *OldParm;
+ ParmVarDecl *NewParm;
+ QualType PromotedType;
+};
+
+}
+
+/// getSpecialMember - get the special member enum for a method.
+Sema::CXXSpecialMember Sema::getSpecialMember(const CXXMethodDecl *MD) {
+ if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
+ if (Ctor->isDefaultConstructor())
+ return Sema::CXXDefaultConstructor;
+
+ if (Ctor->isCopyConstructor())
+ return Sema::CXXCopyConstructor;
+
+ if (Ctor->isMoveConstructor())
+ return Sema::CXXMoveConstructor;
+ } else if (isa<CXXDestructorDecl>(MD)) {
+ return Sema::CXXDestructor;
+ } else if (MD->isCopyAssignmentOperator()) {
+ return Sema::CXXCopyAssignment;
+ } else if (MD->isMoveAssignmentOperator()) {
+ return Sema::CXXMoveAssignment;
+ }
+
+ return Sema::CXXInvalid;
+}
+
+/// canRedefineFunction - checks if a function can be redefined. Currently,
+/// only extern inline functions can be redefined, and even then only in
+/// GNU89 mode.
+static bool canRedefineFunction(const FunctionDecl *FD,
+ const LangOptions& LangOpts) {
+ return ((FD->hasAttr<GNUInlineAttr>() || LangOpts.GNUInline) &&
+ !LangOpts.CPlusPlus &&
+ FD->isInlineSpecified() &&
+ FD->getStorageClass() == SC_Extern);
+}
+
+/// MergeFunctionDecl - We just parsed a function 'New' from
+/// declarator D which has the same name and scope as a previous
+/// declaration 'Old'. Figure out how to resolve this situation,
+/// merging decls or emitting diagnostics as appropriate.
+///
+/// In C++, New and Old must be declarations that are not
+/// overloaded. Use IsOverload to determine whether New and Old are
+/// overloaded, and to select the Old declaration that New should be
+/// merged with.
+///
+/// Returns true if there was an error, false otherwise.
+bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD, Scope *S) {
+ // Verify the old decl was also a function.
+ FunctionDecl *Old = 0;
+ if (FunctionTemplateDecl *OldFunctionTemplate
+ = dyn_cast<FunctionTemplateDecl>(OldD))
+ Old = OldFunctionTemplate->getTemplatedDecl();
+ else
+ Old = dyn_cast<FunctionDecl>(OldD);
+ if (!Old) {
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(OldD)) {
+ Diag(New->getLocation(), diag::err_using_decl_conflict_reverse);
+ Diag(Shadow->getTargetDecl()->getLocation(),
+ diag::note_using_decl_target);
+ Diag(Shadow->getUsingDecl()->getLocation(),
+ diag::note_using_decl) << 0;
+ return true;
+ }
+
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+ return true;
+ }
+
+ // Determine whether the previous declaration was a definition,
+ // implicit declaration, or a declaration.
+ diag::kind PrevDiag;
+ if (Old->isThisDeclarationADefinition())
+ PrevDiag = diag::note_previous_definition;
+ else if (Old->isImplicit())
+ PrevDiag = diag::note_previous_implicit_declaration;
+ else
+ PrevDiag = diag::note_previous_declaration;
+
+ QualType OldQType = Context.getCanonicalType(Old->getType());
+ QualType NewQType = Context.getCanonicalType(New->getType());
+
+ // Don't complain about this if we're in GNU89 mode and the old function
+ // is an extern inline function.
+ if (!isa<CXXMethodDecl>(New) && !isa<CXXMethodDecl>(Old) &&
+ New->getStorageClass() == SC_Static &&
+ Old->getStorageClass() != SC_Static &&
+ !canRedefineFunction(Old, getLangOpts())) {
+ if (getLangOpts().MicrosoftExt) {
+ Diag(New->getLocation(), diag::warn_static_non_static) << New;
+ Diag(Old->getLocation(), PrevDiag);
+ } else {
+ Diag(New->getLocation(), diag::err_static_non_static) << New;
+ Diag(Old->getLocation(), PrevDiag);
+ return true;
+ }
+ }
+
+ // If a function is first declared with a calling convention, but is
+ // later declared or defined without one, the second decl assumes the
+ // calling convention of the first.
+ //
+ // For the new decl, we have to look at the NON-canonical type to tell the
+ // difference between a function that really doesn't have a calling
+ // convention and one that is declared cdecl. That's because in
+ // canonicalization (see ASTContext.cpp), cdecl is canonicalized away
+ // because it is the default calling convention.
+ //
+ // Note also that we DO NOT return at this point, because we still have
+ // other tests to run.
+ const FunctionType *OldType = cast<FunctionType>(OldQType);
+ const FunctionType *NewType = New->getType()->getAs<FunctionType>();
+ FunctionType::ExtInfo OldTypeInfo = OldType->getExtInfo();
+ FunctionType::ExtInfo NewTypeInfo = NewType->getExtInfo();
+ bool RequiresAdjustment = false;
+ if (OldTypeInfo.getCC() != CC_Default &&
+ NewTypeInfo.getCC() == CC_Default) {
+ NewTypeInfo = NewTypeInfo.withCallingConv(OldTypeInfo.getCC());
+ RequiresAdjustment = true;
+ } else if (!Context.isSameCallConv(OldTypeInfo.getCC(),
+ NewTypeInfo.getCC())) {
+ // Calling conventions really aren't compatible, so complain.
+ Diag(New->getLocation(), diag::err_cconv_change)
+ << FunctionType::getNameForCallConv(NewTypeInfo.getCC())
+ << (OldTypeInfo.getCC() == CC_Default)
+ << (OldTypeInfo.getCC() == CC_Default ? "" :
+ FunctionType::getNameForCallConv(OldTypeInfo.getCC()));
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+
+ // FIXME: diagnose the other way around?
+ if (OldTypeInfo.getNoReturn() && !NewTypeInfo.getNoReturn()) {
+ NewTypeInfo = NewTypeInfo.withNoReturn(true);
+ RequiresAdjustment = true;
+ }
+
+ // Merge regparm attribute.
+ if (OldTypeInfo.getHasRegParm() != NewTypeInfo.getHasRegParm() ||
+ OldTypeInfo.getRegParm() != NewTypeInfo.getRegParm()) {
+ if (NewTypeInfo.getHasRegParm()) {
+ Diag(New->getLocation(), diag::err_regparm_mismatch)
+ << NewType->getRegParmType()
+ << OldType->getRegParmType();
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+
+ NewTypeInfo = NewTypeInfo.withRegParm(OldTypeInfo.getRegParm());
+ RequiresAdjustment = true;
+ }
+
+ // Merge ns_returns_retained attribute.
+ if (OldTypeInfo.getProducesResult() != NewTypeInfo.getProducesResult()) {
+ if (NewTypeInfo.getProducesResult()) {
+ Diag(New->getLocation(), diag::err_returns_retained_mismatch);
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ return true;
+ }
+
+ NewTypeInfo = NewTypeInfo.withProducesResult(true);
+ RequiresAdjustment = true;
+ }
+
+ if (RequiresAdjustment) {
+ NewType = Context.adjustFunctionType(NewType, NewTypeInfo);
+ New->setType(QualType(NewType, 0));
+ NewQType = Context.getCanonicalType(New->getType());
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // (C++98 13.1p2):
+ // Certain function declarations cannot be overloaded:
+ // -- Function declarations that differ only in the return type
+ // cannot be overloaded.
+ QualType OldReturnType = OldType->getResultType();
+ QualType NewReturnType = cast<FunctionType>(NewQType)->getResultType();
+ QualType ResQT;
+ if (OldReturnType != NewReturnType) {
+ if (NewReturnType->isObjCObjectPointerType()
+ && OldReturnType->isObjCObjectPointerType())
+ ResQT = Context.mergeObjCGCQualifiers(NewQType, OldQType);
+ if (ResQT.isNull()) {
+ if (New->isCXXClassMember() && New->isOutOfLine())
+ Diag(New->getLocation(),
+ diag::err_member_def_does_not_match_ret_type) << New;
+ else
+ Diag(New->getLocation(), diag::err_ovl_diff_return_type);
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ return true;
+ }
+ else
+ NewQType = ResQT;
+ }
+
+ const CXXMethodDecl* OldMethod = dyn_cast<CXXMethodDecl>(Old);
+ CXXMethodDecl* NewMethod = dyn_cast<CXXMethodDecl>(New);
+ if (OldMethod && NewMethod) {
+ // Preserve triviality.
+ NewMethod->setTrivial(OldMethod->isTrivial());
+
+ // MSVC allows explicit template specialization at class scope:
+ // 2 CXMethodDecls referring to the same function will be injected.
+ // We don't want a redeclartion error.
+ bool IsClassScopeExplicitSpecialization =
+ OldMethod->isFunctionTemplateSpecialization() &&
+ NewMethod->isFunctionTemplateSpecialization();
+ bool isFriend = NewMethod->getFriendObjectKind();
+
+ if (!isFriend && NewMethod->getLexicalDeclContext()->isRecord() &&
+ !IsClassScopeExplicitSpecialization) {
+ // -- Member function declarations with the same name and the
+ // same parameter types cannot be overloaded if any of them
+ // is a static member function declaration.
+ if (OldMethod->isStatic() || NewMethod->isStatic()) {
+ Diag(New->getLocation(), diag::err_ovl_static_nonstatic_member);
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ return true;
+ }
+
+ // C++ [class.mem]p1:
+ // [...] A member shall not be declared twice in the
+ // member-specification, except that a nested class or member
+ // class template can be declared and then later defined.
+ unsigned NewDiag;
+ if (isa<CXXConstructorDecl>(OldMethod))
+ NewDiag = diag::err_constructor_redeclared;
+ else if (isa<CXXDestructorDecl>(NewMethod))
+ NewDiag = diag::err_destructor_redeclared;
+ else if (isa<CXXConversionDecl>(NewMethod))
+ NewDiag = diag::err_conv_function_redeclared;
+ else
+ NewDiag = diag::err_member_redeclared;
+
+ Diag(New->getLocation(), NewDiag);
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+
+ // Complain if this is an explicit declaration of a special
+ // member that was initially declared implicitly.
+ //
+ // As an exception, it's okay to befriend such methods in order
+ // to permit the implicit constructor/destructor/operator calls.
+ } else if (OldMethod->isImplicit()) {
+ if (isFriend) {
+ NewMethod->setImplicit();
+ } else {
+ Diag(NewMethod->getLocation(),
+ diag::err_definition_of_implicitly_declared_member)
+ << New << getSpecialMember(OldMethod);
+ return true;
+ }
+ } else if (OldMethod->isExplicitlyDefaulted()) {
+ Diag(NewMethod->getLocation(),
+ diag::err_definition_of_explicitly_defaulted_member)
+ << getSpecialMember(OldMethod);
+ return true;
+ }
+ }
+
+ // (C++98 8.3.5p3):
+ // All declarations for a function shall agree exactly in both the
+ // return type and the parameter-type-list.
+ // We also want to respect all the extended bits except noreturn.
+
+ // noreturn should now match unless the old type info didn't have it.
+ QualType OldQTypeForComparison = OldQType;
+ if (!OldTypeInfo.getNoReturn() && NewTypeInfo.getNoReturn()) {
+ assert(OldQType == QualType(OldType, 0));
+ const FunctionType *OldTypeForComparison
+ = Context.adjustFunctionType(OldType, OldTypeInfo.withNoReturn(true));
+ OldQTypeForComparison = QualType(OldTypeForComparison, 0);
+ assert(OldQTypeForComparison.isCanonical());
+ }
+
+ if (OldQTypeForComparison == NewQType)
+ return MergeCompatibleFunctionDecls(New, Old, S);
+
+ // Fall through for conflicting redeclarations and redefinitions.
+ }
+
+ // C: Function types need to be compatible, not identical. This handles
+ // duplicate function decls like "void f(int); void f(enum X);" properly.
+ if (!getLangOpts().CPlusPlus &&
+ Context.typesAreCompatible(OldQType, NewQType)) {
+ const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
+ const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
+ const FunctionProtoType *OldProto = 0;
+ if (isa<FunctionNoProtoType>(NewFuncType) &&
+ (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
+ // The old declaration provided a function prototype, but the
+ // new declaration does not. Merge in the prototype.
+ assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
+ SmallVector<QualType, 16> ParamTypes(OldProto->arg_type_begin(),
+ OldProto->arg_type_end());
+ NewQType = Context.getFunctionType(NewFuncType->getResultType(),
+ ParamTypes.data(), ParamTypes.size(),
+ OldProto->getExtProtoInfo());
+ New->setType(NewQType);
+ New->setHasInheritedPrototype();
+
+ // Synthesize a parameter for each argument type.
+ SmallVector<ParmVarDecl*, 16> Params;
+ for (FunctionProtoType::arg_type_iterator
+ ParamType = OldProto->arg_type_begin(),
+ ParamEnd = OldProto->arg_type_end();
+ ParamType != ParamEnd; ++ParamType) {
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, New,
+ SourceLocation(),
+ SourceLocation(), 0,
+ *ParamType, /*TInfo=*/0,
+ SC_None, SC_None,
+ 0);
+ Param->setScopeInfo(0, Params.size());
+ Param->setImplicit();
+ Params.push_back(Param);
+ }
+
+ New->setParams(Params);
+ }
+
+ return MergeCompatibleFunctionDecls(New, Old, S);
+ }
+
+ // GNU C permits a K&R definition to follow a prototype declaration
+ // if the declared types of the parameters in the K&R definition
+ // match the types in the prototype declaration, even when the
+ // promoted types of the parameters from the K&R definition differ
+ // from the types in the prototype. GCC then keeps the types from
+ // the prototype.
+ //
+ // If a variadic prototype is followed by a non-variadic K&R definition,
+ // the K&R definition becomes variadic. This is sort of an edge case, but
+ // it's legal per the standard depending on how you read C99 6.7.5.3p15 and
+ // C99 6.9.1p8.
+ if (!getLangOpts().CPlusPlus &&
+ Old->hasPrototype() && !New->hasPrototype() &&
+ New->getType()->getAs<FunctionProtoType>() &&
+ Old->getNumParams() == New->getNumParams()) {
+ SmallVector<QualType, 16> ArgTypes;
+ SmallVector<GNUCompatibleParamWarning, 16> Warnings;
+ const FunctionProtoType *OldProto
+ = Old->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *NewProto
+ = New->getType()->getAs<FunctionProtoType>();
+
+ // Determine whether this is the GNU C extension.
+ QualType MergedReturn = Context.mergeTypes(OldProto->getResultType(),
+ NewProto->getResultType());
+ bool LooseCompatible = !MergedReturn.isNull();
+ for (unsigned Idx = 0, End = Old->getNumParams();
+ LooseCompatible && Idx != End; ++Idx) {
+ ParmVarDecl *OldParm = Old->getParamDecl(Idx);
+ ParmVarDecl *NewParm = New->getParamDecl(Idx);
+ if (Context.typesAreCompatible(OldParm->getType(),
+ NewProto->getArgType(Idx))) {
+ ArgTypes.push_back(NewParm->getType());
+ } else if (Context.typesAreCompatible(OldParm->getType(),
+ NewParm->getType(),
+ /*CompareUnqualified=*/true)) {
+ GNUCompatibleParamWarning Warn
+ = { OldParm, NewParm, NewProto->getArgType(Idx) };
+ Warnings.push_back(Warn);
+ ArgTypes.push_back(NewParm->getType());
+ } else
+ LooseCompatible = false;
+ }
+
+ if (LooseCompatible) {
+ for (unsigned Warn = 0; Warn < Warnings.size(); ++Warn) {
+ Diag(Warnings[Warn].NewParm->getLocation(),
+ diag::ext_param_promoted_not_compatible_with_prototype)
+ << Warnings[Warn].PromotedType
+ << Warnings[Warn].OldParm->getType();
+ if (Warnings[Warn].OldParm->getLocation().isValid())
+ Diag(Warnings[Warn].OldParm->getLocation(),
+ diag::note_previous_declaration);
+ }
+
+ New->setType(Context.getFunctionType(MergedReturn, &ArgTypes[0],
+ ArgTypes.size(),
+ OldProto->getExtProtoInfo()));
+ return MergeCompatibleFunctionDecls(New, Old, S);
+ }
+
+ // Fall through to diagnose conflicting types.
+ }
+
+ // A function that has already been declared has been redeclared or defined
+ // with a different type- show appropriate diagnostic
+ if (unsigned BuiltinID = Old->getBuiltinID()) {
+ // The user has declared a builtin function with an incompatible
+ // signature.
+ if (Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) {
+ // The function the user is redeclaring is a library-defined
+ // function like 'malloc' or 'printf'. Warn about the
+ // redeclaration, then pretend that we don't know about this
+ // library built-in.
+ Diag(New->getLocation(), diag::warn_redecl_library_builtin) << New;
+ Diag(Old->getLocation(), diag::note_previous_builtin_declaration)
+ << Old << Old->getType();
+ New->getIdentifier()->setBuiltinID(Builtin::NotBuiltin);
+ Old->setInvalidDecl();
+ return false;
+ }
+
+ PrevDiag = diag::note_previous_builtin_declaration;
+ }
+
+ Diag(New->getLocation(), diag::err_conflicting_types) << New->getDeclName();
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ return true;
+}
+
+/// \brief Completes the merge of two function declarations that are
+/// known to be compatible.
+///
+/// This routine handles the merging of attributes and other
+/// properties of function declarations form the old declaration to
+/// the new declaration, once we know that New is in fact a
+/// redeclaration of Old.
+///
+/// \returns false
+bool Sema::MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
+ Scope *S) {
+ // Merge the attributes
+ mergeDeclAttributes(New, Old);
+
+ // Merge the storage class.
+ if (Old->getStorageClass() != SC_Extern &&
+ Old->getStorageClass() != SC_None)
+ New->setStorageClass(Old->getStorageClass());
+
+ // Merge "pure" flag.
+ if (Old->isPure())
+ New->setPure();
+
+ // Merge attributes from the parameters. These can mismatch with K&R
+ // declarations.
+ if (New->getNumParams() == Old->getNumParams())
+ for (unsigned i = 0, e = New->getNumParams(); i != e; ++i)
+ mergeParamDeclAttributes(New->getParamDecl(i), Old->getParamDecl(i),
+ Context);
+
+ if (getLangOpts().CPlusPlus)
+ return MergeCXXFunctionDecl(New, Old, S);
+
+ return false;
+}
+
+
+void Sema::mergeObjCMethodDecls(ObjCMethodDecl *newMethod,
+ ObjCMethodDecl *oldMethod) {
+ // We don't want to merge unavailable and deprecated attributes
+ // except from interface to implementation.
+ bool mergeDeprecation = isa<ObjCImplDecl>(newMethod->getDeclContext());
+
+ // Merge the attributes.
+ mergeDeclAttributes(newMethod, oldMethod, mergeDeprecation);
+
+ // Merge attributes from the parameters.
+ ObjCMethodDecl::param_const_iterator oi = oldMethod->param_begin();
+ for (ObjCMethodDecl::param_iterator
+ ni = newMethod->param_begin(), ne = newMethod->param_end();
+ ni != ne; ++ni, ++oi)
+ mergeParamDeclAttributes(*ni, *oi, Context);
+
+ CheckObjCMethodOverride(newMethod, oldMethod, true);
+}
+
+/// MergeVarDeclTypes - We parsed a variable 'New' which has the same name and
+/// scope as a previous declaration 'Old'. Figure out how to merge their types,
+/// emitting diagnostics as appropriate.
+///
+/// Declarations using the auto type specifier (C++ [decl.spec.auto]) call back
+/// to here in AddInitializerToDecl. We can't check them before the initializer
+/// is attached.
+void Sema::MergeVarDeclTypes(VarDecl *New, VarDecl *Old) {
+ if (New->isInvalidDecl() || Old->isInvalidDecl())
+ return;
+
+ QualType MergedT;
+ if (getLangOpts().CPlusPlus) {
+ AutoType *AT = New->getType()->getContainedAutoType();
+ if (AT && !AT->isDeduced()) {
+ // We don't know what the new type is until the initializer is attached.
+ return;
+ } else if (Context.hasSameType(New->getType(), Old->getType())) {
+ // These could still be something that needs exception specs checked.
+ return MergeVarDeclExceptionSpecs(New, Old);
+ }
+ // C++ [basic.link]p10:
+ // [...] the types specified by all declarations referring to a given
+ // object or function shall be identical, except that declarations for an
+ // array object can specify array types that differ by the presence or
+ // absence of a major array bound (8.3.4).
+ else if (Old->getType()->isIncompleteArrayType() &&
+ New->getType()->isArrayType()) {
+ CanQual<ArrayType> OldArray
+ = Context.getCanonicalType(Old->getType())->getAs<ArrayType>();
+ CanQual<ArrayType> NewArray
+ = Context.getCanonicalType(New->getType())->getAs<ArrayType>();
+ if (OldArray->getElementType() == NewArray->getElementType())
+ MergedT = New->getType();
+ } else if (Old->getType()->isArrayType() &&
+ New->getType()->isIncompleteArrayType()) {
+ CanQual<ArrayType> OldArray
+ = Context.getCanonicalType(Old->getType())->getAs<ArrayType>();
+ CanQual<ArrayType> NewArray
+ = Context.getCanonicalType(New->getType())->getAs<ArrayType>();
+ if (OldArray->getElementType() == NewArray->getElementType())
+ MergedT = Old->getType();
+ } else if (New->getType()->isObjCObjectPointerType()
+ && Old->getType()->isObjCObjectPointerType()) {
+ MergedT = Context.mergeObjCGCQualifiers(New->getType(),
+ Old->getType());
+ }
+ } else {
+ MergedT = Context.mergeTypes(New->getType(), Old->getType());
+ }
+ if (MergedT.isNull()) {
+ Diag(New->getLocation(), diag::err_redefinition_different_type)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+ New->setType(MergedT);
+}
+
+/// MergeVarDecl - We just parsed a variable 'New' which has the same name
+/// and scope as a previous declaration 'Old'. Figure out how to resolve this
+/// situation, merging decls or emitting diagnostics as appropriate.
+///
+/// Tentative definition rules (C99 6.9.2p2) are checked by
+/// FinalizeDeclaratorGroup. Unfortunately, we can't analyze tentative
+/// definitions here, since the initializer hasn't been attached.
+///
+void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
+ // If the new decl is already invalid, don't do any other checking.
+ if (New->isInvalidDecl())
+ return;
+
+ // Verify the old decl was also a variable.
+ VarDecl *Old = 0;
+ if (!Previous.isSingleResult() ||
+ !(Old = dyn_cast<VarDecl>(Previous.getFoundDecl()))) {
+ Diag(New->getLocation(), diag::err_redefinition_different_kind)
+ << New->getDeclName();
+ Diag(Previous.getRepresentativeDecl()->getLocation(),
+ diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // C++ [class.mem]p1:
+ // A member shall not be declared twice in the member-specification [...]
+ //
+ // Here, we need only consider static data members.
+ if (Old->isStaticDataMember() && !New->isOutOfLine()) {
+ Diag(New->getLocation(), diag::err_duplicate_member)
+ << New->getIdentifier();
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ New->setInvalidDecl();
+ }
+
+ mergeDeclAttributes(New, Old);
+ // Warn if an already-declared variable is made a weak_import in a subsequent
+ // declaration
+ if (New->getAttr<WeakImportAttr>() &&
+ Old->getStorageClass() == SC_None &&
+ !Old->getAttr<WeakImportAttr>()) {
+ Diag(New->getLocation(), diag::warn_weak_import) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ // Remove weak_import attribute on new declaration.
+ New->dropAttr<WeakImportAttr>();
+ }
+
+ // Merge the types.
+ MergeVarDeclTypes(New, Old);
+ if (New->isInvalidDecl())
+ return;
+
+ // C99 6.2.2p4: Check if we have a static decl followed by a non-static.
+ if (New->getStorageClass() == SC_Static &&
+ (Old->getStorageClass() == SC_None || Old->hasExternalStorage())) {
+ Diag(New->getLocation(), diag::err_static_non_static) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+ // C99 6.2.2p4:
+ // For an identifier declared with the storage-class specifier
+ // extern in a scope in which a prior declaration of that
+ // identifier is visible,23) if the prior declaration specifies
+ // internal or external linkage, the linkage of the identifier at
+ // the later declaration is the same as the linkage specified at
+ // the prior declaration. If no prior declaration is visible, or
+ // if the prior declaration specifies no linkage, then the
+ // identifier has external linkage.
+ if (New->hasExternalStorage() && Old->hasLinkage())
+ /* Okay */;
+ else if (New->getStorageClass() != SC_Static &&
+ Old->getStorageClass() == SC_Static) {
+ Diag(New->getLocation(), diag::err_non_static_static) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // Check if extern is followed by non-extern and vice-versa.
+ if (New->hasExternalStorage() &&
+ !Old->hasLinkage() && Old->isLocalVarDecl()) {
+ Diag(New->getLocation(), diag::err_extern_non_extern) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+ if (Old->hasExternalStorage() &&
+ !New->hasLinkage() && New->isLocalVarDecl()) {
+ Diag(New->getLocation(), diag::err_non_extern_extern) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ // Variables with external linkage are analyzed in FinalizeDeclaratorGroup.
+
+ // FIXME: The test for external storage here seems wrong? We still
+ // need to check for mismatches.
+ if (!New->hasExternalStorage() && !New->isFileVarDecl() &&
+ // Don't complain about out-of-line definitions of static members.
+ !(Old->getLexicalDeclContext()->isRecord() &&
+ !New->getLexicalDeclContext()->isRecord())) {
+ Diag(New->getLocation(), diag::err_redefinition) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ return New->setInvalidDecl();
+ }
+
+ if (New->isThreadSpecified() && !Old->isThreadSpecified()) {
+ Diag(New->getLocation(), diag::err_thread_non_thread) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ } else if (!New->isThreadSpecified() && Old->isThreadSpecified()) {
+ Diag(New->getLocation(), diag::err_non_thread_thread) << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_previous_definition);
+ }
+
+ // C++ doesn't have tentative definitions, so go right ahead and check here.
+ const VarDecl *Def;
+ if (getLangOpts().CPlusPlus &&
+ New->isThisDeclarationADefinition() == VarDecl::Definition &&
+ (Def = Old->getDefinition())) {
+ Diag(New->getLocation(), diag::err_redefinition)
+ << New->getDeclName();
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ New->setInvalidDecl();
+ return;
+ }
+ // c99 6.2.2 P4.
+ // For an identifier declared with the storage-class specifier extern in a
+ // scope in which a prior declaration of that identifier is visible, if
+ // the prior declaration specifies internal or external linkage, the linkage
+ // of the identifier at the later declaration is the same as the linkage
+ // specified at the prior declaration.
+ // FIXME. revisit this code.
+ if (New->hasExternalStorage() &&
+ Old->getLinkage() == InternalLinkage &&
+ New->getDeclContext() == Old->getDeclContext())
+ New->setStorageClass(Old->getStorageClass());
+
+ // Keep a chain of previous declarations.
+ New->setPreviousDeclaration(Old);
+
+ // Inherit access appropriately.
+ New->setAccess(Old->getAccess());
+}
+
+/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
+/// no declarator (e.g. "struct foo;") is parsed.
+Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS) {
+ return ParsedFreeStandingDeclSpec(S, AS, DS,
+ MultiTemplateParamsArg(*this, 0, 0));
+}
+
+/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
+/// no declarator (e.g. "struct foo;") is parsed. It also accopts template
+/// parameters to cope with template friend declarations.
+Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS,
+ MultiTemplateParamsArg TemplateParams) {
+ Decl *TagD = 0;
+ TagDecl *Tag = 0;
+ if (DS.getTypeSpecType() == DeclSpec::TST_class ||
+ DS.getTypeSpecType() == DeclSpec::TST_struct ||
+ DS.getTypeSpecType() == DeclSpec::TST_union ||
+ DS.getTypeSpecType() == DeclSpec::TST_enum) {
+ TagD = DS.getRepAsDecl();
+
+ if (!TagD) // We probably had an error
+ return 0;
+
+ // Note that the above type specs guarantee that the
+ // type rep is a Decl, whereas in many of the others
+ // it's a Type.
+ if (isa<TagDecl>(TagD))
+ Tag = cast<TagDecl>(TagD);
+ else if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(TagD))
+ Tag = CTD->getTemplatedDecl();
+ }
+
+ if (Tag) {
+ Tag->setFreeStanding();
+ if (Tag->isInvalidDecl())
+ return Tag;
+ }
+
+ if (unsigned TypeQuals = DS.getTypeQualifiers()) {
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from object
+ // or incomplete types shall not be restrict-qualified."
+ if (TypeQuals & DeclSpec::TQ_restrict)
+ Diag(DS.getRestrictSpecLoc(),
+ diag::err_typecheck_invalid_restrict_not_pointer_noarg)
+ << DS.getSourceRange();
+ }
+
+ if (DS.isConstexprSpecified()) {
+ // C++0x [dcl.constexpr]p1: constexpr can only be applied to declarations
+ // and definitions of functions and variables.
+ if (Tag)
+ Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_tag)
+ << (DS.getTypeSpecType() == DeclSpec::TST_class ? 0 :
+ DS.getTypeSpecType() == DeclSpec::TST_struct ? 1 :
+ DS.getTypeSpecType() == DeclSpec::TST_union ? 2 : 3);
+ else
+ Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_no_declarators);
+ // Don't emit warnings after this error.
+ return TagD;
+ }
+
+ if (DS.isFriendSpecified()) {
+ // If we're dealing with a decl but not a TagDecl, assume that
+ // whatever routines created it handled the friendship aspect.
+ if (TagD && !Tag)
+ return 0;
+ return ActOnFriendTypeDecl(S, DS, TemplateParams);
+ }
+
+ // Track whether we warned about the fact that there aren't any
+ // declarators.
+ bool emittedWarning = false;
+
+ if (RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag)) {
+ if (!Record->getDeclName() && Record->isCompleteDefinition() &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_typedef) {
+ if (getLangOpts().CPlusPlus ||
+ Record->getDeclContext()->isRecord())
+ return BuildAnonymousStructOrUnion(S, DS, AS, Record);
+
+ Diag(DS.getLocStart(), diag::ext_no_declarators)
+ << DS.getSourceRange();
+ emittedWarning = true;
+ }
+ }
+
+ // Check for Microsoft C extension: anonymous struct.
+ if (getLangOpts().MicrosoftExt && !getLangOpts().CPlusPlus &&
+ CurContext->isRecord() &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_unspecified) {
+ // Handle 2 kinds of anonymous struct:
+ // struct STRUCT;
+ // and
+ // STRUCT_TYPE; <- where STRUCT_TYPE is a typedef struct.
+ RecordDecl *Record = dyn_cast_or_null<RecordDecl>(Tag);
+ if ((Record && Record->getDeclName() && !Record->isCompleteDefinition()) ||
+ (DS.getTypeSpecType() == DeclSpec::TST_typename &&
+ DS.getRepAsType().get()->isStructureType())) {
+ Diag(DS.getLocStart(), diag::ext_ms_anonymous_struct)
+ << DS.getSourceRange();
+ return BuildMicrosoftCAnonymousStruct(S, DS, Record);
+ }
+ }
+
+ if (getLangOpts().CPlusPlus &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_typedef)
+ if (EnumDecl *Enum = dyn_cast_or_null<EnumDecl>(Tag))
+ if (Enum->enumerator_begin() == Enum->enumerator_end() &&
+ !Enum->getIdentifier() && !Enum->isInvalidDecl()) {
+ Diag(Enum->getLocation(), diag::ext_no_declarators)
+ << DS.getSourceRange();
+ emittedWarning = true;
+ }
+
+ // Skip all the checks below if we have a type error.
+ if (DS.getTypeSpecType() == DeclSpec::TST_error) return TagD;
+
+ if (!DS.isMissingDeclaratorOk()) {
+ // Warn about typedefs of enums without names, since this is an
+ // extension in both Microsoft and GNU.
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_typedef &&
+ Tag && isa<EnumDecl>(Tag)) {
+ Diag(DS.getLocStart(), diag::ext_typedef_without_a_name)
+ << DS.getSourceRange();
+ return Tag;
+ }
+
+ Diag(DS.getLocStart(), diag::ext_no_declarators)
+ << DS.getSourceRange();
+ emittedWarning = true;
+ }
+
+ // We're going to complain about a bunch of spurious specifiers;
+ // only do this if we're declaring a tag, because otherwise we
+ // should be getting diag::ext_no_declarators.
+ if (emittedWarning || (TagD && TagD->isInvalidDecl()))
+ return TagD;
+
+ // Note that a linkage-specification sets a storage class, but
+ // 'extern "C" struct foo;' is actually valid and not theoretically
+ // useless.
+ if (DeclSpec::SCS scs = DS.getStorageClassSpec())
+ if (!DS.isExternInLinkageSpec())
+ Diag(DS.getStorageClassSpecLoc(), diag::warn_standalone_specifier)
+ << DeclSpec::getSpecifierName(scs);
+
+ if (DS.isThreadSpecified())
+ Diag(DS.getThreadSpecLoc(), diag::warn_standalone_specifier) << "__thread";
+ if (DS.getTypeQualifiers()) {
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(DS.getConstSpecLoc(), diag::warn_standalone_specifier) << "const";
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
+ Diag(DS.getConstSpecLoc(), diag::warn_standalone_specifier) << "volatile";
+ // Restrict is covered above.
+ }
+ if (DS.isInlineSpecified())
+ Diag(DS.getInlineSpecLoc(), diag::warn_standalone_specifier) << "inline";
+ if (DS.isVirtualSpecified())
+ Diag(DS.getVirtualSpecLoc(), diag::warn_standalone_specifier) << "virtual";
+ if (DS.isExplicitSpecified())
+ Diag(DS.getExplicitSpecLoc(), diag::warn_standalone_specifier) <<"explicit";
+
+ if (DS.isModulePrivateSpecified() &&
+ Tag && Tag->getDeclContext()->isFunctionOrMethod())
+ Diag(DS.getModulePrivateSpecLoc(), diag::err_module_private_local_class)
+ << Tag->getTagKind()
+ << FixItHint::CreateRemoval(DS.getModulePrivateSpecLoc());
+
+ // Warn about ignored type attributes, for example:
+ // __attribute__((aligned)) struct A;
+ // Attributes should be placed after tag to apply to type declaration.
+ if (!DS.getAttributes().empty()) {
+ DeclSpec::TST TypeSpecType = DS.getTypeSpecType();
+ if (TypeSpecType == DeclSpec::TST_class ||
+ TypeSpecType == DeclSpec::TST_struct ||
+ TypeSpecType == DeclSpec::TST_union ||
+ TypeSpecType == DeclSpec::TST_enum) {
+ AttributeList* attrs = DS.getAttributes().getList();
+ while (attrs) {
+ Diag(attrs->getScopeLoc(),
+ diag::warn_declspec_attribute_ignored)
+ << attrs->getName()
+ << (TypeSpecType == DeclSpec::TST_class ? 0 :
+ TypeSpecType == DeclSpec::TST_struct ? 1 :
+ TypeSpecType == DeclSpec::TST_union ? 2 : 3);
+ attrs = attrs->getNext();
+ }
+ }
+ }
+
+ return TagD;
+}
+
+/// We are trying to inject an anonymous member into the given scope;
+/// check if there's an existing declaration that can't be overloaded.
+///
+/// \return true if this is a forbidden redeclaration
+static bool CheckAnonMemberRedeclaration(Sema &SemaRef,
+ Scope *S,
+ DeclContext *Owner,
+ DeclarationName Name,
+ SourceLocation NameLoc,
+ unsigned diagnostic) {
+ LookupResult R(SemaRef, Name, NameLoc, Sema::LookupMemberName,
+ Sema::ForRedeclaration);
+ if (!SemaRef.LookupName(R, S)) return false;
+
+ if (R.getAsSingle<TagDecl>())
+ return false;
+
+ // Pick a representative declaration.
+ NamedDecl *PrevDecl = R.getRepresentativeDecl()->getUnderlyingDecl();
+ assert(PrevDecl && "Expected a non-null Decl");
+
+ if (!SemaRef.isDeclInScope(PrevDecl, Owner, S))
+ return false;
+
+ SemaRef.Diag(NameLoc, diagnostic) << Name;
+ SemaRef.Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+
+ return true;
+}
+
+/// InjectAnonymousStructOrUnionMembers - Inject the members of the
+/// anonymous struct or union AnonRecord into the owning context Owner
+/// and scope S. This routine will be invoked just after we realize
+/// that an unnamed union or struct is actually an anonymous union or
+/// struct, e.g.,
+///
+/// @code
+/// union {
+/// int i;
+/// float f;
+/// }; // InjectAnonymousStructOrUnionMembers called here to inject i and
+/// // f into the surrounding scope.x
+/// @endcode
+///
+/// This routine is recursive, injecting the names of nested anonymous
+/// structs/unions into the owning context and scope as well.
+static bool InjectAnonymousStructOrUnionMembers(Sema &SemaRef, Scope *S,
+ DeclContext *Owner,
+ RecordDecl *AnonRecord,
+ AccessSpecifier AS,
+ SmallVector<NamedDecl*, 2> &Chaining,
+ bool MSAnonStruct) {
+ unsigned diagKind
+ = AnonRecord->isUnion() ? diag::err_anonymous_union_member_redecl
+ : diag::err_anonymous_struct_member_redecl;
+
+ bool Invalid = false;
+
+ // Look every FieldDecl and IndirectFieldDecl with a name.
+ for (RecordDecl::decl_iterator D = AnonRecord->decls_begin(),
+ DEnd = AnonRecord->decls_end();
+ D != DEnd; ++D) {
+ if ((isa<FieldDecl>(*D) || isa<IndirectFieldDecl>(*D)) &&
+ cast<NamedDecl>(*D)->getDeclName()) {
+ ValueDecl *VD = cast<ValueDecl>(*D);
+ if (CheckAnonMemberRedeclaration(SemaRef, S, Owner, VD->getDeclName(),
+ VD->getLocation(), diagKind)) {
+ // C++ [class.union]p2:
+ // The names of the members of an anonymous union shall be
+ // distinct from the names of any other entity in the
+ // scope in which the anonymous union is declared.
+ Invalid = true;
+ } else {
+ // C++ [class.union]p2:
+ // For the purpose of name lookup, after the anonymous union
+ // definition, the members of the anonymous union are
+ // considered to have been defined in the scope in which the
+ // anonymous union is declared.
+ unsigned OldChainingSize = Chaining.size();
+ if (IndirectFieldDecl *IF = dyn_cast<IndirectFieldDecl>(VD))
+ for (IndirectFieldDecl::chain_iterator PI = IF->chain_begin(),
+ PE = IF->chain_end(); PI != PE; ++PI)
+ Chaining.push_back(*PI);
+ else
+ Chaining.push_back(VD);
+
+ assert(Chaining.size() >= 2);
+ NamedDecl **NamedChain =
+ new (SemaRef.Context)NamedDecl*[Chaining.size()];
+ for (unsigned i = 0; i < Chaining.size(); i++)
+ NamedChain[i] = Chaining[i];
+
+ IndirectFieldDecl* IndirectField =
+ IndirectFieldDecl::Create(SemaRef.Context, Owner, VD->getLocation(),
+ VD->getIdentifier(), VD->getType(),
+ NamedChain, Chaining.size());
+
+ IndirectField->setAccess(AS);
+ IndirectField->setImplicit();
+ SemaRef.PushOnScopeChains(IndirectField, S);
+
+ // That includes picking up the appropriate access specifier.
+ if (AS != AS_none) IndirectField->setAccess(AS);
+
+ Chaining.resize(OldChainingSize);
+ }
+ }
+ }
+
+ return Invalid;
+}
+
+/// StorageClassSpecToVarDeclStorageClass - Maps a DeclSpec::SCS to
+/// a VarDecl::StorageClass. Any error reporting is up to the caller:
+/// illegal input values are mapped to SC_None.
+static StorageClass
+StorageClassSpecToVarDeclStorageClass(DeclSpec::SCS StorageClassSpec) {
+ switch (StorageClassSpec) {
+ case DeclSpec::SCS_unspecified: return SC_None;
+ case DeclSpec::SCS_extern: return SC_Extern;
+ case DeclSpec::SCS_static: return SC_Static;
+ case DeclSpec::SCS_auto: return SC_Auto;
+ case DeclSpec::SCS_register: return SC_Register;
+ case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
+ // Illegal SCSs map to None: error reporting is up to the caller.
+ case DeclSpec::SCS_mutable: // Fall through.
+ case DeclSpec::SCS_typedef: return SC_None;
+ }
+ llvm_unreachable("unknown storage class specifier");
+}
+
+/// StorageClassSpecToFunctionDeclStorageClass - Maps a DeclSpec::SCS to
+/// a StorageClass. Any error reporting is up to the caller:
+/// illegal input values are mapped to SC_None.
+static StorageClass
+StorageClassSpecToFunctionDeclStorageClass(DeclSpec::SCS StorageClassSpec) {
+ switch (StorageClassSpec) {
+ case DeclSpec::SCS_unspecified: return SC_None;
+ case DeclSpec::SCS_extern: return SC_Extern;
+ case DeclSpec::SCS_static: return SC_Static;
+ case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
+ // Illegal SCSs map to None: error reporting is up to the caller.
+ case DeclSpec::SCS_auto: // Fall through.
+ case DeclSpec::SCS_mutable: // Fall through.
+ case DeclSpec::SCS_register: // Fall through.
+ case DeclSpec::SCS_typedef: return SC_None;
+ }
+ llvm_unreachable("unknown storage class specifier");
+}
+
+/// BuildAnonymousStructOrUnion - Handle the declaration of an
+/// anonymous structure or union. Anonymous unions are a C++ feature
+/// (C++ [class.union]) and a C11 feature; anonymous structures
+/// are a C11 feature and GNU C++ extension.
+Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
+ AccessSpecifier AS,
+ RecordDecl *Record) {
+ DeclContext *Owner = Record->getDeclContext();
+
+ // Diagnose whether this anonymous struct/union is an extension.
+ if (Record->isUnion() && !getLangOpts().CPlusPlus && !getLangOpts().C11)
+ Diag(Record->getLocation(), diag::ext_anonymous_union);
+ else if (!Record->isUnion() && getLangOpts().CPlusPlus)
+ Diag(Record->getLocation(), diag::ext_gnu_anonymous_struct);
+ else if (!Record->isUnion() && !getLangOpts().C11)
+ Diag(Record->getLocation(), diag::ext_c11_anonymous_struct);
+
+ // C and C++ require different kinds of checks for anonymous
+ // structs/unions.
+ bool Invalid = false;
+ if (getLangOpts().CPlusPlus) {
+ const char* PrevSpec = 0;
+ unsigned DiagID;
+ if (Record->isUnion()) {
+ // C++ [class.union]p6:
+ // Anonymous unions declared in a named namespace or in the
+ // global namespace shall be declared static.
+ if (DS.getStorageClassSpec() != DeclSpec::SCS_static &&
+ (isa<TranslationUnitDecl>(Owner) ||
+ (isa<NamespaceDecl>(Owner) &&
+ cast<NamespaceDecl>(Owner)->getDeclName()))) {
+ Diag(Record->getLocation(), diag::err_anonymous_union_not_static)
+ << FixItHint::CreateInsertion(Record->getLocation(), "static ");
+
+ // Recover by adding 'static'.
+ DS.SetStorageClassSpec(*this, DeclSpec::SCS_static, SourceLocation(),
+ PrevSpec, DiagID);
+ }
+ // C++ [class.union]p6:
+ // A storage class is not allowed in a declaration of an
+ // anonymous union in a class scope.
+ else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified &&
+ isa<RecordDecl>(Owner)) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_anonymous_union_with_storage_spec)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+
+ // Recover by removing the storage specifier.
+ DS.SetStorageClassSpec(*this, DeclSpec::SCS_unspecified,
+ SourceLocation(),
+ PrevSpec, DiagID);
+ }
+ }
+
+ // Ignore const/volatile/restrict qualifiers.
+ if (DS.getTypeQualifiers()) {
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_const)
+ Diag(DS.getConstSpecLoc(), diag::ext_anonymous_struct_union_qualified)
+ << Record->isUnion() << 0
+ << FixItHint::CreateRemoval(DS.getConstSpecLoc());
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_volatile)
+ Diag(DS.getVolatileSpecLoc(),
+ diag::ext_anonymous_struct_union_qualified)
+ << Record->isUnion() << 1
+ << FixItHint::CreateRemoval(DS.getVolatileSpecLoc());
+ if (DS.getTypeQualifiers() & DeclSpec::TQ_restrict)
+ Diag(DS.getRestrictSpecLoc(),
+ diag::ext_anonymous_struct_union_qualified)
+ << Record->isUnion() << 2
+ << FixItHint::CreateRemoval(DS.getRestrictSpecLoc());
+
+ DS.ClearTypeQualifiers();
+ }
+
+ // C++ [class.union]p2:
+ // The member-specification of an anonymous union shall only
+ // define non-static data members. [Note: nested types and
+ // functions cannot be declared within an anonymous union. ]
+ for (DeclContext::decl_iterator Mem = Record->decls_begin(),
+ MemEnd = Record->decls_end();
+ Mem != MemEnd; ++Mem) {
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(*Mem)) {
+ // C++ [class.union]p3:
+ // An anonymous union shall not have private or protected
+ // members (clause 11).
+ assert(FD->getAccess() != AS_none);
+ if (FD->getAccess() != AS_public) {
+ Diag(FD->getLocation(), diag::err_anonymous_record_nonpublic_member)
+ << (int)Record->isUnion() << (int)(FD->getAccess() == AS_protected);
+ Invalid = true;
+ }
+
+ // C++ [class.union]p1
+ // An object of a class with a non-trivial constructor, a non-trivial
+ // copy constructor, a non-trivial destructor, or a non-trivial copy
+ // assignment operator cannot be a member of a union, nor can an
+ // array of such objects.
+ if (CheckNontrivialField(FD))
+ Invalid = true;
+ } else if ((*Mem)->isImplicit()) {
+ // Any implicit members are fine.
+ } else if (isa<TagDecl>(*Mem) && (*Mem)->getDeclContext() != Record) {
+ // This is a type that showed up in an
+ // elaborated-type-specifier inside the anonymous struct or
+ // union, but which actually declares a type outside of the
+ // anonymous struct or union. It's okay.
+ } else if (RecordDecl *MemRecord = dyn_cast<RecordDecl>(*Mem)) {
+ if (!MemRecord->isAnonymousStructOrUnion() &&
+ MemRecord->getDeclName()) {
+ // Visual C++ allows type definition in anonymous struct or union.
+ if (getLangOpts().MicrosoftExt)
+ Diag(MemRecord->getLocation(), diag::ext_anonymous_record_with_type)
+ << (int)Record->isUnion();
+ else {
+ // This is a nested type declaration.
+ Diag(MemRecord->getLocation(), diag::err_anonymous_record_with_type)
+ << (int)Record->isUnion();
+ Invalid = true;
+ }
+ }
+ } else if (isa<AccessSpecDecl>(*Mem)) {
+ // Any access specifier is fine.
+ } else {
+ // We have something that isn't a non-static data
+ // member. Complain about it.
+ unsigned DK = diag::err_anonymous_record_bad_member;
+ if (isa<TypeDecl>(*Mem))
+ DK = diag::err_anonymous_record_with_type;
+ else if (isa<FunctionDecl>(*Mem))
+ DK = diag::err_anonymous_record_with_function;
+ else if (isa<VarDecl>(*Mem))
+ DK = diag::err_anonymous_record_with_static;
+
+ // Visual C++ allows type definition in anonymous struct or union.
+ if (getLangOpts().MicrosoftExt &&
+ DK == diag::err_anonymous_record_with_type)
+ Diag((*Mem)->getLocation(), diag::ext_anonymous_record_with_type)
+ << (int)Record->isUnion();
+ else {
+ Diag((*Mem)->getLocation(), DK)
+ << (int)Record->isUnion();
+ Invalid = true;
+ }
+ }
+ }
+ }
+
+ if (!Record->isUnion() && !Owner->isRecord()) {
+ Diag(Record->getLocation(), diag::err_anonymous_struct_not_member)
+ << (int)getLangOpts().CPlusPlus;
+ Invalid = true;
+ }
+
+ // Mock up a declarator.
+ Declarator Dc(DS, Declarator::MemberContext);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
+ assert(TInfo && "couldn't build declarator info for anonymous struct/union");
+
+ // Create a declaration for this anonymous struct/union.
+ NamedDecl *Anon = 0;
+ if (RecordDecl *OwningClass = dyn_cast<RecordDecl>(Owner)) {
+ Anon = FieldDecl::Create(Context, OwningClass,
+ DS.getLocStart(),
+ Record->getLocation(),
+ /*IdentifierInfo=*/0,
+ Context.getTypeDeclType(Record),
+ TInfo,
+ /*BitWidth=*/0, /*Mutable=*/false,
+ /*HasInit=*/false);
+ Anon->setAccess(AS);
+ if (getLangOpts().CPlusPlus)
+ FieldCollector->Add(cast<FieldDecl>(Anon));
+ } else {
+ DeclSpec::SCS SCSpec = DS.getStorageClassSpec();
+ assert(SCSpec != DeclSpec::SCS_typedef &&
+ "Parser allowed 'typedef' as storage class VarDecl.");
+ VarDecl::StorageClass SC = StorageClassSpecToVarDeclStorageClass(SCSpec);
+ if (SCSpec == DeclSpec::SCS_mutable) {
+ // mutable can only appear on non-static class members, so it's always
+ // an error here
+ Diag(Record->getLocation(), diag::err_mutable_nonmember);
+ Invalid = true;
+ SC = SC_None;
+ }
+ SCSpec = DS.getStorageClassSpecAsWritten();
+ VarDecl::StorageClass SCAsWritten
+ = StorageClassSpecToVarDeclStorageClass(SCSpec);
+
+ Anon = VarDecl::Create(Context, Owner,
+ DS.getLocStart(),
+ Record->getLocation(), /*IdentifierInfo=*/0,
+ Context.getTypeDeclType(Record),
+ TInfo, SC, SCAsWritten);
+
+ // Default-initialize the implicit variable. This initialization will be
+ // trivial in almost all cases, except if a union member has an in-class
+ // initializer:
+ // union { int n = 0; };
+ ActOnUninitializedDecl(Anon, /*TypeMayContainAuto=*/false);
+ }
+ Anon->setImplicit();
+
+ // Add the anonymous struct/union object to the current
+ // context. We'll be referencing this object when we refer to one of
+ // its members.
+ Owner->addDecl(Anon);
+
+ // Inject the members of the anonymous struct/union into the owning
+ // context and into the identifier resolver chain for name lookup
+ // purposes.
+ SmallVector<NamedDecl*, 2> Chain;
+ Chain.push_back(Anon);
+
+ if (InjectAnonymousStructOrUnionMembers(*this, S, Owner, Record, AS,
+ Chain, false))
+ Invalid = true;
+
+ // Mark this as an anonymous struct/union type. Note that we do not
+ // do this until after we have already checked and injected the
+ // members of this anonymous struct/union type, because otherwise
+ // the members could be injected twice: once by DeclContext when it
+ // builds its lookup table, and once by
+ // InjectAnonymousStructOrUnionMembers.
+ Record->setAnonymousStructOrUnion(true);
+
+ if (Invalid)
+ Anon->setInvalidDecl();
+
+ return Anon;
+}
+
+/// BuildMicrosoftCAnonymousStruct - Handle the declaration of an
+/// Microsoft C anonymous structure.
+/// Ref: http://msdn.microsoft.com/en-us/library/z2cx9y4f.aspx
+/// Example:
+///
+/// struct A { int a; };
+/// struct B { struct A; int b; };
+///
+/// void foo() {
+/// B var;
+/// var.a = 3;
+/// }
+///
+Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
+ RecordDecl *Record) {
+
+ // If there is no Record, get the record via the typedef.
+ if (!Record)
+ Record = DS.getRepAsType().get()->getAsStructureType()->getDecl();
+
+ // Mock up a declarator.
+ Declarator Dc(DS, Declarator::TypeNameContext);
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
+ assert(TInfo && "couldn't build declarator info for anonymous struct");
+
+ // Create a declaration for this anonymous struct.
+ NamedDecl* Anon = FieldDecl::Create(Context,
+ cast<RecordDecl>(CurContext),
+ DS.getLocStart(),
+ DS.getLocStart(),
+ /*IdentifierInfo=*/0,
+ Context.getTypeDeclType(Record),
+ TInfo,
+ /*BitWidth=*/0, /*Mutable=*/false,
+ /*HasInit=*/false);
+ Anon->setImplicit();
+
+ // Add the anonymous struct object to the current context.
+ CurContext->addDecl(Anon);
+
+ // Inject the members of the anonymous struct into the current
+ // context and into the identifier resolver chain for name lookup
+ // purposes.
+ SmallVector<NamedDecl*, 2> Chain;
+ Chain.push_back(Anon);
+
+ RecordDecl *RecordDef = Record->getDefinition();
+ if (!RecordDef || InjectAnonymousStructOrUnionMembers(*this, S, CurContext,
+ RecordDef, AS_none,
+ Chain, true))
+ Anon->setInvalidDecl();
+
+ return Anon;
+}
+
+/// GetNameForDeclarator - Determine the full declaration name for the
+/// given Declarator.
+DeclarationNameInfo Sema::GetNameForDeclarator(Declarator &D) {
+ return GetNameFromUnqualifiedId(D.getName());
+}
+
+/// \brief Retrieves the declaration name from a parsed unqualified-id.
+DeclarationNameInfo
+Sema::GetNameFromUnqualifiedId(const UnqualifiedId &Name) {
+ DeclarationNameInfo NameInfo;
+ NameInfo.setLoc(Name.StartLocation);
+
+ switch (Name.getKind()) {
+
+ case UnqualifiedId::IK_ImplicitSelfParam:
+ case UnqualifiedId::IK_Identifier:
+ NameInfo.setName(Name.Identifier);
+ NameInfo.setLoc(Name.StartLocation);
+ return NameInfo;
+
+ case UnqualifiedId::IK_OperatorFunctionId:
+ NameInfo.setName(Context.DeclarationNames.getCXXOperatorName(
+ Name.OperatorFunctionId.Operator));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.getInfo().CXXOperatorName.BeginOpNameLoc
+ = Name.OperatorFunctionId.SymbolLocations[0];
+ NameInfo.getInfo().CXXOperatorName.EndOpNameLoc
+ = Name.EndLocation.getRawEncoding();
+ return NameInfo;
+
+ case UnqualifiedId::IK_LiteralOperatorId:
+ NameInfo.setName(Context.DeclarationNames.getCXXLiteralOperatorName(
+ Name.Identifier));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.setCXXLiteralOperatorNameLoc(Name.EndLocation);
+ return NameInfo;
+
+ case UnqualifiedId::IK_ConversionFunctionId: {
+ TypeSourceInfo *TInfo;
+ QualType Ty = GetTypeFromParser(Name.ConversionFunctionId, &TInfo);
+ if (Ty.isNull())
+ return DeclarationNameInfo();
+ NameInfo.setName(Context.DeclarationNames.getCXXConversionFunctionName(
+ Context.getCanonicalType(Ty)));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.setNamedTypeInfo(TInfo);
+ return NameInfo;
+ }
+
+ case UnqualifiedId::IK_ConstructorName: {
+ TypeSourceInfo *TInfo;
+ QualType Ty = GetTypeFromParser(Name.ConstructorName, &TInfo);
+ if (Ty.isNull())
+ return DeclarationNameInfo();
+ NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(Ty)));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.setNamedTypeInfo(TInfo);
+ return NameInfo;
+ }
+
+ case UnqualifiedId::IK_ConstructorTemplateId: {
+ // In well-formed code, we can only have a constructor
+ // template-id that refers to the current context, so go there
+ // to find the actual type being constructed.
+ CXXRecordDecl *CurClass = dyn_cast<CXXRecordDecl>(CurContext);
+ if (!CurClass || CurClass->getIdentifier() != Name.TemplateId->Name)
+ return DeclarationNameInfo();
+
+ // Determine the type of the class being constructed.
+ QualType CurClassType = Context.getTypeDeclType(CurClass);
+
+ // FIXME: Check two things: that the template-id names the same type as
+ // CurClassType, and that the template-id does not occur when the name
+ // was qualified.
+
+ NameInfo.setName(Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(CurClassType)));
+ NameInfo.setLoc(Name.StartLocation);
+ // FIXME: should we retrieve TypeSourceInfo?
+ NameInfo.setNamedTypeInfo(0);
+ return NameInfo;
+ }
+
+ case UnqualifiedId::IK_DestructorName: {
+ TypeSourceInfo *TInfo;
+ QualType Ty = GetTypeFromParser(Name.DestructorName, &TInfo);
+ if (Ty.isNull())
+ return DeclarationNameInfo();
+ NameInfo.setName(Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(Ty)));
+ NameInfo.setLoc(Name.StartLocation);
+ NameInfo.setNamedTypeInfo(TInfo);
+ return NameInfo;
+ }
+
+ case UnqualifiedId::IK_TemplateId: {
+ TemplateName TName = Name.TemplateId->Template.get();
+ SourceLocation TNameLoc = Name.TemplateId->TemplateNameLoc;
+ return Context.getNameForTemplate(TName, TNameLoc);
+ }
+
+ } // switch (Name.getKind())
+
+ llvm_unreachable("Unknown name kind");
+}
+
+static QualType getCoreType(QualType Ty) {
+ do {
+ if (Ty->isPointerType() || Ty->isReferenceType())
+ Ty = Ty->getPointeeType();
+ else if (Ty->isArrayType())
+ Ty = Ty->castAsArrayTypeUnsafe()->getElementType();
+ else
+ return Ty.withoutLocalFastQualifiers();
+ } while (true);
+}
+
+/// hasSimilarParameters - Determine whether the C++ functions Declaration
+/// and Definition have "nearly" matching parameters. This heuristic is
+/// used to improve diagnostics in the case where an out-of-line function
+/// definition doesn't match any declaration within the class or namespace.
+/// Also sets Params to the list of indices to the parameters that differ
+/// between the declaration and the definition. If hasSimilarParameters
+/// returns true and Params is empty, then all of the parameters match.
+static bool hasSimilarParameters(ASTContext &Context,
+ FunctionDecl *Declaration,
+ FunctionDecl *Definition,
+ llvm::SmallVectorImpl<unsigned> &Params) {
+ Params.clear();
+ if (Declaration->param_size() != Definition->param_size())
+ return false;
+ for (unsigned Idx = 0; Idx < Declaration->param_size(); ++Idx) {
+ QualType DeclParamTy = Declaration->getParamDecl(Idx)->getType();
+ QualType DefParamTy = Definition->getParamDecl(Idx)->getType();
+
+ // The parameter types are identical
+ if (Context.hasSameType(DefParamTy, DeclParamTy))
+ continue;
+
+ QualType DeclParamBaseTy = getCoreType(DeclParamTy);
+ QualType DefParamBaseTy = getCoreType(DefParamTy);
+ const IdentifierInfo *DeclTyName = DeclParamBaseTy.getBaseTypeIdentifier();
+ const IdentifierInfo *DefTyName = DefParamBaseTy.getBaseTypeIdentifier();
+
+ if (Context.hasSameUnqualifiedType(DeclParamBaseTy, DefParamBaseTy) ||
+ (DeclTyName && DeclTyName == DefTyName))
+ Params.push_back(Idx);
+ else // The two parameters aren't even close
+ return false;
+ }
+
+ return true;
+}
+
+/// NeedsRebuildingInCurrentInstantiation - Checks whether the given
+/// declarator needs to be rebuilt in the current instantiation.
+/// Any bits of declarator which appear before the name are valid for
+/// consideration here. That's specifically the type in the decl spec
+/// and the base type in any member-pointer chunks.
+static bool RebuildDeclaratorInCurrentInstantiation(Sema &S, Declarator &D,
+ DeclarationName Name) {
+ // The types we specifically need to rebuild are:
+ // - typenames, typeofs, and decltypes
+ // - types which will become injected class names
+ // Of course, we also need to rebuild any type referencing such a
+ // type. It's safest to just say "dependent", but we call out a
+ // few cases here.
+
+ DeclSpec &DS = D.getMutableDeclSpec();
+ switch (DS.getTypeSpecType()) {
+ case DeclSpec::TST_typename:
+ case DeclSpec::TST_typeofType:
+ case DeclSpec::TST_decltype:
+ case DeclSpec::TST_underlyingType:
+ case DeclSpec::TST_atomic: {
+ // Grab the type from the parser.
+ TypeSourceInfo *TSI = 0;
+ QualType T = S.GetTypeFromParser(DS.getRepAsType(), &TSI);
+ if (T.isNull() || !T->isDependentType()) break;
+
+ // Make sure there's a type source info. This isn't really much
+ // of a waste; most dependent types should have type source info
+ // attached already.
+ if (!TSI)
+ TSI = S.Context.getTrivialTypeSourceInfo(T, DS.getTypeSpecTypeLoc());
+
+ // Rebuild the type in the current instantiation.
+ TSI = S.RebuildTypeInCurrentInstantiation(TSI, D.getIdentifierLoc(), Name);
+ if (!TSI) return true;
+
+ // Store the new type back in the decl spec.
+ ParsedType LocType = S.CreateParsedType(TSI->getType(), TSI);
+ DS.UpdateTypeRep(LocType);
+ break;
+ }
+
+ case DeclSpec::TST_typeofExpr: {
+ Expr *E = DS.getRepAsExpr();
+ ExprResult Result = S.RebuildExprInCurrentInstantiation(E);
+ if (Result.isInvalid()) return true;
+ DS.UpdateExprRep(Result.get());
+ break;
+ }
+
+ default:
+ // Nothing to do for these decl specs.
+ break;
+ }
+
+ // It doesn't matter what order we do this in.
+ for (unsigned I = 0, E = D.getNumTypeObjects(); I != E; ++I) {
+ DeclaratorChunk &Chunk = D.getTypeObject(I);
+
+ // The only type information in the declarator which can come
+ // before the declaration name is the base type of a member
+ // pointer.
+ if (Chunk.Kind != DeclaratorChunk::MemberPointer)
+ continue;
+
+ // Rebuild the scope specifier in-place.
+ CXXScopeSpec &SS = Chunk.Mem.Scope();
+ if (S.RebuildNestedNameSpecifierInCurrentInstantiation(SS))
+ return true;
+ }
+
+ return false;
+}
+
+Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
+ D.setFunctionDefinitionKind(FDK_Declaration);
+ Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg(*this));
+
+ if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
+ Dcl->getDeclContext()->isFileContext())
+ Dcl->setTopLevelDeclInObjCContainer();
+
+ return Dcl;
+}
+
+/// DiagnoseClassNameShadow - Implement C++ [class.mem]p13:
+/// If T is the name of a class, then each of the following shall have a
+/// name different from T:
+/// - every static data member of class T;
+/// - every member function of class T
+/// - every member of class T that is itself a type;
+/// \returns true if the declaration name violates these rules.
+bool Sema::DiagnoseClassNameShadow(DeclContext *DC,
+ DeclarationNameInfo NameInfo) {
+ DeclarationName Name = NameInfo.getName();
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getIdentifier() && Record->getDeclName() == Name) {
+ Diag(NameInfo.getLoc(), diag::err_member_name_of_class) << Name;
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Diagnose a declaration whose declarator-id has the given
+/// nested-name-specifier.
+///
+/// \param SS The nested-name-specifier of the declarator-id.
+///
+/// \param DC The declaration context to which the nested-name-specifier
+/// resolves.
+///
+/// \param Name The name of the entity being declared.
+///
+/// \param Loc The location of the name of the entity being declared.
+///
+/// \returns true if we cannot safely recover from this error, false otherwise.
+bool Sema::diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
+ DeclarationName Name,
+ SourceLocation Loc) {
+ DeclContext *Cur = CurContext;
+ while (isa<LinkageSpecDecl>(Cur))
+ Cur = Cur->getParent();
+
+ // C++ [dcl.meaning]p1:
+ // A declarator-id shall not be qualified except for the definition
+ // of a member function (9.3) or static data member (9.4) outside of
+ // its class, the definition or explicit instantiation of a function
+ // or variable member of a namespace outside of its namespace, or the
+ // definition of an explicit specialization outside of its namespace,
+ // or the declaration of a friend function that is a member of
+ // another class or namespace (11.3). [...]
+
+ // The user provided a superfluous scope specifier that refers back to the
+ // class or namespaces in which the entity is already declared.
+ //
+ // class X {
+ // void X::f();
+ // };
+ if (Cur->Equals(DC)) {
+ Diag(Loc, diag::warn_member_extra_qualification)
+ << Name << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ return false;
+ }
+
+ // Check whether the qualifying scope encloses the scope of the original
+ // declaration.
+ if (!Cur->Encloses(DC)) {
+ if (Cur->isRecord())
+ Diag(Loc, diag::err_member_qualification)
+ << Name << SS.getRange();
+ else if (isa<TranslationUnitDecl>(DC))
+ Diag(Loc, diag::err_invalid_declarator_global_scope)
+ << Name << SS.getRange();
+ else if (isa<FunctionDecl>(Cur))
+ Diag(Loc, diag::err_invalid_declarator_in_function)
+ << Name << SS.getRange();
+ else
+ Diag(Loc, diag::err_invalid_declarator_scope)
+ << Name << cast<NamedDecl>(Cur) << cast<NamedDecl>(DC) << SS.getRange();
+
+ return true;
+ }
+
+ if (Cur->isRecord()) {
+ // Cannot qualify members within a class.
+ Diag(Loc, diag::err_member_qualification)
+ << Name << SS.getRange();
+ SS.clear();
+
+ // C++ constructors and destructors with incorrect scopes can break
+ // our AST invariants by having the wrong underlying types. If
+ // that's the case, then drop this declaration entirely.
+ if ((Name.getNameKind() == DeclarationName::CXXConstructorName ||
+ Name.getNameKind() == DeclarationName::CXXDestructorName) &&
+ !Context.hasSameType(Name.getCXXNameType(),
+ Context.getTypeDeclType(cast<CXXRecordDecl>(Cur))))
+ return true;
+
+ return false;
+ }
+
+ // C++11 [dcl.meaning]p1:
+ // [...] "The nested-name-specifier of the qualified declarator-id shall
+ // not begin with a decltype-specifer"
+ NestedNameSpecifierLoc SpecLoc(SS.getScopeRep(), SS.location_data());
+ while (SpecLoc.getPrefix())
+ SpecLoc = SpecLoc.getPrefix();
+ if (dyn_cast_or_null<DecltypeType>(
+ SpecLoc.getNestedNameSpecifier()->getAsType()))
+ Diag(Loc, diag::err_decltype_in_declarator)
+ << SpecLoc.getTypeLoc().getSourceRange();
+
+ return false;
+}
+
+Decl *Sema::HandleDeclarator(Scope *S, Declarator &D,
+ MultiTemplateParamsArg TemplateParamLists) {
+ // TODO: consider using NameInfo for diagnostic.
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+
+ // All of these full declarators require an identifier. If it doesn't have
+ // one, the ParsedFreeStandingDeclSpec action should be used.
+ if (!Name) {
+ if (!D.isInvalidType()) // Reject this if we think it is valid.
+ Diag(D.getDeclSpec().getLocStart(),
+ diag::err_declarator_need_ident)
+ << D.getDeclSpec().getSourceRange() << D.getSourceRange();
+ return 0;
+ } else if (DiagnoseUnexpandedParameterPack(NameInfo, UPPC_DeclarationType))
+ return 0;
+
+ // The scope passed in may not be a decl scope. Zip up the scope tree until
+ // we find one that is.
+ while ((S->getFlags() & Scope::DeclScope) == 0 ||
+ (S->getFlags() & Scope::TemplateParamScope) != 0)
+ S = S->getParent();
+
+ DeclContext *DC = CurContext;
+ if (D.getCXXScopeSpec().isInvalid())
+ D.setInvalidType();
+ else if (D.getCXXScopeSpec().isSet()) {
+ if (DiagnoseUnexpandedParameterPack(D.getCXXScopeSpec(),
+ UPPC_DeclarationQualifier))
+ return 0;
+
+ bool EnteringContext = !D.getDeclSpec().isFriendSpecified();
+ DC = computeDeclContext(D.getCXXScopeSpec(), EnteringContext);
+ if (!DC) {
+ // If we could not compute the declaration context, it's because the
+ // declaration context is dependent but does not refer to a class,
+ // class template, or class template partial specialization. Complain
+ // and return early, to avoid the coming semantic disaster.
+ Diag(D.getIdentifierLoc(),
+ diag::err_template_qualified_declarator_no_match)
+ << (NestedNameSpecifier*)D.getCXXScopeSpec().getScopeRep()
+ << D.getCXXScopeSpec().getRange();
+ return 0;
+ }
+ bool IsDependentContext = DC->isDependentContext();
+
+ if (!IsDependentContext &&
+ RequireCompleteDeclContext(D.getCXXScopeSpec(), DC))
+ return 0;
+
+ if (isa<CXXRecordDecl>(DC) && !cast<CXXRecordDecl>(DC)->hasDefinition()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_member_def_undefined_record)
+ << Name << DC << D.getCXXScopeSpec().getRange();
+ D.setInvalidType();
+ } else if (!D.getDeclSpec().isFriendSpecified()) {
+ if (diagnoseQualifiedDeclaration(D.getCXXScopeSpec(), DC,
+ Name, D.getIdentifierLoc())) {
+ if (DC->isRecord())
+ return 0;
+
+ D.setInvalidType();
+ }
+ }
+
+ // Check whether we need to rebuild the type of the given
+ // declaration in the current instantiation.
+ if (EnteringContext && IsDependentContext &&
+ TemplateParamLists.size() != 0) {
+ ContextRAII SavedContext(*this, DC);
+ if (RebuildDeclaratorInCurrentInstantiation(*this, D, Name))
+ D.setInvalidType();
+ }
+ }
+
+ if (DiagnoseClassNameShadow(DC, NameInfo))
+ // If this is a typedef, we'll end up spewing multiple diagnostics.
+ // Just return early; it's safer.
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ return 0;
+
+ NamedDecl *New;
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType R = TInfo->getType();
+
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_DeclarationType))
+ D.setInvalidType();
+
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForRedeclaration);
+
+ // See if this is a redefinition of a variable in the same scope.
+ if (!D.getCXXScopeSpec().isSet()) {
+ bool IsLinkageLookup = false;
+
+ // If the declaration we're planning to build will be a function
+ // or object with linkage, then look for another declaration with
+ // linkage (C99 6.2.2p4-5 and C++ [basic.link]p6).
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ /* Do nothing*/;
+ else if (R->isFunctionType()) {
+ if (CurContext->isFunctionOrMethod() ||
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static)
+ IsLinkageLookup = true;
+ } else if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_extern)
+ IsLinkageLookup = true;
+ else if (CurContext->getRedeclContext()->isTranslationUnit() &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static)
+ IsLinkageLookup = true;
+
+ if (IsLinkageLookup)
+ Previous.clear(LookupRedeclarationWithLinkage);
+
+ LookupName(Previous, S, /* CreateBuiltins = */ IsLinkageLookup);
+ } else { // Something like "int foo::x;"
+ LookupQualifiedName(Previous, DC);
+
+ // C++ [dcl.meaning]p1:
+ // When the declarator-id is qualified, the declaration shall refer to a
+ // previously declared member of the class or namespace to which the
+ // qualifier refers (or, in the case of a namespace, of an element of the
+ // inline namespace set of that namespace (7.3.1)) or to a specialization
+ // thereof; [...]
+ //
+ // Note that we already checked the context above, and that we do not have
+ // enough information to make sure that Previous contains the declaration
+ // we want to match. For example, given:
+ //
+ // class X {
+ // void f();
+ // void f(float);
+ // };
+ //
+ // void X::f(int) { } // ill-formed
+ //
+ // In this case, Previous will point to the overload set
+ // containing the two f's declared in X, but neither of them
+ // matches.
+
+ // C++ [dcl.meaning]p1:
+ // [...] the member shall not merely have been introduced by a
+ // using-declaration in the scope of the class or namespace nominated by
+ // the nested-name-specifier of the declarator-id.
+ RemoveUsingDecls(Previous);
+ }
+
+ if (Previous.isSingleResult() &&
+ Previous.getFoundDecl()->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ if (!D.isInvalidType())
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(),
+ Previous.getFoundDecl());
+
+ // Just pretend that we didn't see the previous declaration.
+ Previous.clear();
+ }
+
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (Previous.isSingleTagDecl() &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef)
+ Previous.clear();
+
+ bool AddToScope = true;
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ if (TemplateParamLists.size()) {
+ Diag(D.getIdentifierLoc(), diag::err_template_typedef);
+ return 0;
+ }
+
+ New = ActOnTypedefDeclarator(S, D, DC, TInfo, Previous);
+ } else if (R->isFunctionType()) {
+ New = ActOnFunctionDeclarator(S, D, DC, TInfo, Previous,
+ move(TemplateParamLists),
+ AddToScope);
+ } else {
+ New = ActOnVariableDeclarator(S, D, DC, TInfo, Previous,
+ move(TemplateParamLists));
+ }
+
+ if (New == 0)
+ return 0;
+
+ // If this has an identifier and is not an invalid redeclaration or
+ // function template specialization, add it to the scope stack.
+ if (New->getDeclName() && AddToScope &&
+ !(D.isRedeclaration() && New->isInvalidDecl()))
+ PushOnScopeChains(New, S);
+
+ return New;
+}
+
+/// TryToFixInvalidVariablyModifiedType - Helper method to turn variable array
+/// types into constant array types in certain situations which would otherwise
+/// be errors (for GCC compatibility).
+static QualType TryToFixInvalidVariablyModifiedType(QualType T,
+ ASTContext &Context,
+ bool &SizeIsNegative,
+ llvm::APSInt &Oversized) {
+ // This method tries to turn a variable array into a constant
+ // array even when the size isn't an ICE. This is necessary
+ // for compatibility with code that depends on gcc's buggy
+ // constant expression folding, like struct {char x[(int)(char*)2];}
+ SizeIsNegative = false;
+ Oversized = 0;
+
+ if (T->isDependentType())
+ return QualType();
+
+ QualifierCollector Qs;
+ const Type *Ty = Qs.strip(T);
+
+ if (const PointerType* PTy = dyn_cast<PointerType>(Ty)) {
+ QualType Pointee = PTy->getPointeeType();
+ QualType FixedType =
+ TryToFixInvalidVariablyModifiedType(Pointee, Context, SizeIsNegative,
+ Oversized);
+ if (FixedType.isNull()) return FixedType;
+ FixedType = Context.getPointerType(FixedType);
+ return Qs.apply(Context, FixedType);
+ }
+ if (const ParenType* PTy = dyn_cast<ParenType>(Ty)) {
+ QualType Inner = PTy->getInnerType();
+ QualType FixedType =
+ TryToFixInvalidVariablyModifiedType(Inner, Context, SizeIsNegative,
+ Oversized);
+ if (FixedType.isNull()) return FixedType;
+ FixedType = Context.getParenType(FixedType);
+ return Qs.apply(Context, FixedType);
+ }
+
+ const VariableArrayType* VLATy = dyn_cast<VariableArrayType>(T);
+ if (!VLATy)
+ return QualType();
+ // FIXME: We should probably handle this case
+ if (VLATy->getElementType()->isVariablyModifiedType())
+ return QualType();
+
+ llvm::APSInt Res;
+ if (!VLATy->getSizeExpr() ||
+ !VLATy->getSizeExpr()->EvaluateAsInt(Res, Context))
+ return QualType();
+
+ // Check whether the array size is negative.
+ if (Res.isSigned() && Res.isNegative()) {
+ SizeIsNegative = true;
+ return QualType();
+ }
+
+ // Check whether the array is too large to be addressed.
+ unsigned ActiveSizeBits
+ = ConstantArrayType::getNumAddressingBits(Context, VLATy->getElementType(),
+ Res);
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
+ Oversized = Res;
+ return QualType();
+ }
+
+ return Context.getConstantArrayType(VLATy->getElementType(),
+ Res, ArrayType::Normal, 0);
+}
+
+/// \brief Register the given locally-scoped external C declaration so
+/// that it can be found later for redeclarations
+void
+Sema::RegisterLocallyScopedExternCDecl(NamedDecl *ND,
+ const LookupResult &Previous,
+ Scope *S) {
+ assert(ND->getLexicalDeclContext()->isFunctionOrMethod() &&
+ "Decl is not a locally-scoped decl!");
+ // Note that we have a locally-scoped external with this name.
+ LocallyScopedExternalDecls[ND->getDeclName()] = ND;
+
+ if (!Previous.isSingleResult())
+ return;
+
+ NamedDecl *PrevDecl = Previous.getFoundDecl();
+
+ // If there was a previous declaration of this variable, it may be
+ // in our identifier chain. Update the identifier chain with the new
+ // declaration.
+ if (S && IdResolver.ReplaceDecl(PrevDecl, ND)) {
+ // The previous declaration was found on the identifer resolver
+ // chain, so remove it from its scope.
+
+ if (S->isDeclScope(PrevDecl)) {
+ // Special case for redeclarations in the SAME scope.
+ // Because this declaration is going to be added to the identifier chain
+ // later, we should temporarily take it OFF the chain.
+ IdResolver.RemoveDecl(ND);
+
+ } else {
+ // Find the scope for the original declaration.
+ while (S && !S->isDeclScope(PrevDecl))
+ S = S->getParent();
+ }
+
+ if (S)
+ S->RemoveDecl(PrevDecl);
+ }
+}
+
+llvm::DenseMap<DeclarationName, NamedDecl *>::iterator
+Sema::findLocallyScopedExternalDecl(DeclarationName Name) {
+ if (ExternalSource) {
+ // Load locally-scoped external decls from the external source.
+ SmallVector<NamedDecl *, 4> Decls;
+ ExternalSource->ReadLocallyScopedExternalDecls(Decls);
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ llvm::DenseMap<DeclarationName, NamedDecl *>::iterator Pos
+ = LocallyScopedExternalDecls.find(Decls[I]->getDeclName());
+ if (Pos == LocallyScopedExternalDecls.end())
+ LocallyScopedExternalDecls[Decls[I]->getDeclName()] = Decls[I];
+ }
+ }
+
+ return LocallyScopedExternalDecls.find(Name);
+}
+
+/// \brief Diagnose function specifiers on a declaration of an identifier that
+/// does not identify a function.
+void Sema::DiagnoseFunctionSpecifiers(Declarator& D) {
+ // FIXME: We should probably indicate the identifier in question to avoid
+ // confusion for constructs like "inline int a(), b;"
+ if (D.getDeclSpec().isInlineSpecified())
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ diag::err_inline_non_function);
+
+ if (D.getDeclSpec().isVirtualSpecified())
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_non_function);
+
+ if (D.getDeclSpec().isExplicitSpecified())
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::err_explicit_non_function);
+}
+
+NamedDecl*
+Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
+ TypeSourceInfo *TInfo, LookupResult &Previous) {
+ // Typedef declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_typedef_declarator)
+ << D.getCXXScopeSpec().getRange();
+ D.setInvalidType();
+ // Pretend we didn't see the scope specifier.
+ DC = CurContext;
+ Previous.clear();
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ DiagnoseFunctionSpecifiers(D);
+
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+ if (D.getDeclSpec().isConstexprSpecified())
+ Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
+ << 1;
+
+ if (D.getName().Kind != UnqualifiedId::IK_Identifier) {
+ Diag(D.getName().StartLocation, diag::err_typedef_not_identifier)
+ << D.getName().getSourceRange();
+ return 0;
+ }
+
+ TypedefDecl *NewTD = ParseTypedefDecl(S, D, TInfo->getType(), TInfo);
+ if (!NewTD) return 0;
+
+ // Handle attributes prior to checking for duplicates in MergeVarDecl
+ ProcessDeclAttributes(S, NewTD, D);
+
+ CheckTypedefForVariablyModifiedType(S, NewTD);
+
+ bool Redeclaration = D.isRedeclaration();
+ NamedDecl *ND = ActOnTypedefNameDecl(S, DC, NewTD, Previous, Redeclaration);
+ D.setRedeclaration(Redeclaration);
+ return ND;
+}
+
+void
+Sema::CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *NewTD) {
+ // C99 6.7.7p2: If a typedef name specifies a variably modified type
+ // then it shall have block scope.
+ // Note that variably modified types must be fixed before merging the decl so
+ // that redeclarations will match.
+ QualType T = NewTD->getUnderlyingType();
+ if (T->isVariablyModifiedType()) {
+ getCurFunction()->setHasBranchProtectedScope();
+
+ if (S->getFnParent() == 0) {
+ bool SizeIsNegative;
+ llvm::APSInt Oversized;
+ QualType FixedTy =
+ TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative,
+ Oversized);
+ if (!FixedTy.isNull()) {
+ Diag(NewTD->getLocation(), diag::warn_illegal_constant_array_size);
+ NewTD->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(FixedTy));
+ } else {
+ if (SizeIsNegative)
+ Diag(NewTD->getLocation(), diag::err_typecheck_negative_array_size);
+ else if (T->isVariableArrayType())
+ Diag(NewTD->getLocation(), diag::err_vla_decl_in_file_scope);
+ else if (Oversized.getBoolValue())
+ Diag(NewTD->getLocation(), diag::err_array_too_large)
+ << Oversized.toString(10);
+ else
+ Diag(NewTD->getLocation(), diag::err_vm_decl_in_file_scope);
+ NewTD->setInvalidDecl();
+ }
+ }
+ }
+}
+
+
+/// ActOnTypedefNameDecl - Perform semantic checking for a declaration which
+/// declares a typedef-name, either using the 'typedef' type specifier or via
+/// a C++0x [dcl.typedef]p2 alias-declaration: 'using T = A;'.
+NamedDecl*
+Sema::ActOnTypedefNameDecl(Scope *S, DeclContext *DC, TypedefNameDecl *NewTD,
+ LookupResult &Previous, bool &Redeclaration) {
+ // Merge the decl with the existing one if appropriate. If the decl is
+ // in an outer scope, it isn't the same thing.
+ FilterLookupForScope(Previous, DC, S, /*ConsiderLinkage*/ false,
+ /*ExplicitInstantiationOrSpecialization=*/false);
+ if (!Previous.empty()) {
+ Redeclaration = true;
+ MergeTypedefNameDecl(NewTD, Previous);
+ }
+
+ // If this is the C FILE type, notify the AST context.
+ if (IdentifierInfo *II = NewTD->getIdentifier())
+ if (!NewTD->isInvalidDecl() &&
+ NewTD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (II->isStr("FILE"))
+ Context.setFILEDecl(NewTD);
+ else if (II->isStr("jmp_buf"))
+ Context.setjmp_bufDecl(NewTD);
+ else if (II->isStr("sigjmp_buf"))
+ Context.setsigjmp_bufDecl(NewTD);
+ else if (II->isStr("ucontext_t"))
+ Context.setucontext_tDecl(NewTD);
+ else if (II->isStr("__builtin_va_list"))
+ Context.setBuiltinVaListType(Context.getTypedefType(NewTD));
+ }
+
+ return NewTD;
+}
+
+/// \brief Determines whether the given declaration is an out-of-scope
+/// previous declaration.
+///
+/// This routine should be invoked when name lookup has found a
+/// previous declaration (PrevDecl) that is not in the scope where a
+/// new declaration by the same name is being introduced. If the new
+/// declaration occurs in a local scope, previous declarations with
+/// linkage may still be considered previous declarations (C99
+/// 6.2.2p4-5, C++ [basic.link]p6).
+///
+/// \param PrevDecl the previous declaration found by name
+/// lookup
+///
+/// \param DC the context in which the new declaration is being
+/// declared.
+///
+/// \returns true if PrevDecl is an out-of-scope previous declaration
+/// for a new delcaration with the same name.
+static bool
+isOutOfScopePreviousDeclaration(NamedDecl *PrevDecl, DeclContext *DC,
+ ASTContext &Context) {
+ if (!PrevDecl)
+ return false;
+
+ if (!PrevDecl->hasLinkage())
+ return false;
+
+ if (Context.getLangOpts().CPlusPlus) {
+ // C++ [basic.link]p6:
+ // If there is a visible declaration of an entity with linkage
+ // having the same name and type, ignoring entities declared
+ // outside the innermost enclosing namespace scope, the block
+ // scope declaration declares that same entity and receives the
+ // linkage of the previous declaration.
+ DeclContext *OuterContext = DC->getRedeclContext();
+ if (!OuterContext->isFunctionOrMethod())
+ // This rule only applies to block-scope declarations.
+ return false;
+
+ DeclContext *PrevOuterContext = PrevDecl->getDeclContext();
+ if (PrevOuterContext->isRecord())
+ // We found a member function: ignore it.
+ return false;
+
+ // Find the innermost enclosing namespace for the new and
+ // previous declarations.
+ OuterContext = OuterContext->getEnclosingNamespaceContext();
+ PrevOuterContext = PrevOuterContext->getEnclosingNamespaceContext();
+
+ // The previous declaration is in a different namespace, so it
+ // isn't the same function.
+ if (!OuterContext->Equals(PrevOuterContext))
+ return false;
+ }
+
+ return true;
+}
+
+static void SetNestedNameSpecifier(DeclaratorDecl *DD, Declarator &D) {
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
+ if (!SS.isSet()) return;
+ DD->setQualifierInfo(SS.getWithLocInContext(DD->getASTContext()));
+}
+
+bool Sema::inferObjCARCLifetime(ValueDecl *decl) {
+ QualType type = decl->getType();
+ Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
+ if (lifetime == Qualifiers::OCL_Autoreleasing) {
+ // Various kinds of declaration aren't allowed to be __autoreleasing.
+ unsigned kind = -1U;
+ if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
+ if (var->hasAttr<BlocksAttr>())
+ kind = 0; // __block
+ else if (!var->hasLocalStorage())
+ kind = 1; // global
+ } else if (isa<ObjCIvarDecl>(decl)) {
+ kind = 3; // ivar
+ } else if (isa<FieldDecl>(decl)) {
+ kind = 2; // field
+ }
+
+ if (kind != -1U) {
+ Diag(decl->getLocation(), diag::err_arc_autoreleasing_var)
+ << kind;
+ }
+ } else if (lifetime == Qualifiers::OCL_None) {
+ // Try to infer lifetime.
+ if (!type->isObjCLifetimeType())
+ return false;
+
+ lifetime = type->getObjCARCImplicitLifetime();
+ type = Context.getLifetimeQualifiedType(type, lifetime);
+ decl->setType(type);
+ }
+
+ if (VarDecl *var = dyn_cast<VarDecl>(decl)) {
+ // Thread-local variables cannot have lifetime.
+ if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone &&
+ var->isThreadSpecified()) {
+ Diag(var->getLocation(), diag::err_arc_thread_ownership)
+ << var->getType();
+ return true;
+ }
+ }
+
+ return false;
+}
+
+NamedDecl*
+Sema::ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
+ TypeSourceInfo *TInfo, LookupResult &Previous,
+ MultiTemplateParamsArg TemplateParamLists) {
+ QualType R = TInfo->getType();
+ DeclarationName Name = GetNameForDeclarator(D).getName();
+
+ // Check that there are no default arguments (C++ only).
+ if (getLangOpts().CPlusPlus)
+ CheckExtraCXXDefaultArguments(D);
+
+ DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpec();
+ assert(SCSpec != DeclSpec::SCS_typedef &&
+ "Parser allowed 'typedef' as storage class VarDecl.");
+ VarDecl::StorageClass SC = StorageClassSpecToVarDeclStorageClass(SCSpec);
+ if (SCSpec == DeclSpec::SCS_mutable) {
+ // mutable can only appear on non-static class members, so it's always
+ // an error here
+ Diag(D.getIdentifierLoc(), diag::err_mutable_nonmember);
+ D.setInvalidType();
+ SC = SC_None;
+ }
+ SCSpec = D.getDeclSpec().getStorageClassSpecAsWritten();
+ VarDecl::StorageClass SCAsWritten
+ = StorageClassSpecToVarDeclStorageClass(SCSpec);
+
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ if (!II) {
+ Diag(D.getIdentifierLoc(), diag::err_bad_variable_name)
+ << Name;
+ return 0;
+ }
+
+ DiagnoseFunctionSpecifiers(D);
+
+ if (!DC->isRecord() && S->getFnParent() == 0) {
+ // C99 6.9p2: The storage-class specifiers auto and register shall not
+ // appear in the declaration specifiers in an external declaration.
+ if (SC == SC_Auto || SC == SC_Register) {
+
+ // If this is a register variable with an asm label specified, then this
+ // is a GNU extension.
+ if (SC == SC_Register && D.getAsmLabel())
+ Diag(D.getIdentifierLoc(), diag::err_unsupported_global_register);
+ else
+ Diag(D.getIdentifierLoc(), diag::err_typecheck_sclass_fscope);
+ D.setInvalidType();
+ }
+ }
+
+ if (getLangOpts().OpenCL) {
+ // Set up the special work-group-local storage class for variables in the
+ // OpenCL __local address space.
+ if (R.getAddressSpace() == LangAS::opencl_local)
+ SC = SC_OpenCLWorkGroupLocal;
+ }
+
+ bool isExplicitSpecialization = false;
+ VarDecl *NewVD;
+ if (!getLangOpts().CPlusPlus) {
+ NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
+ D.getIdentifierLoc(), II,
+ R, TInfo, SC, SCAsWritten);
+
+ if (D.isInvalidType())
+ NewVD->setInvalidDecl();
+ } else {
+ if (DC->isRecord() && !CurContext->isRecord()) {
+ // This is an out-of-line definition of a static data member.
+ if (SC == SC_Static) {
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ } else if (SC == SC_None)
+ SC = SC_Static;
+ }
+ if (SC == SC_Static && CurContext->isRecord()) {
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
+ if (RD->isLocalClass())
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_local_class)
+ << Name << RD->getDeclName();
+
+ // C++98 [class.union]p1: If a union contains a static data member,
+ // the program is ill-formed. C++11 drops this restriction.
+ if (RD->isUnion())
+ Diag(D.getIdentifierLoc(),
+ getLangOpts().CPlusPlus0x
+ ? diag::warn_cxx98_compat_static_data_member_in_union
+ : diag::ext_static_data_member_in_union) << Name;
+ // We conservatively disallow static data members in anonymous structs.
+ else if (!RD->getDeclName())
+ Diag(D.getIdentifierLoc(),
+ diag::err_static_data_member_not_allowed_in_anon_struct)
+ << Name << RD->isUnion();
+ }
+ }
+
+ // Match up the template parameter lists with the scope specifier, then
+ // determine whether we have a template or a template specialization.
+ isExplicitSpecialization = false;
+ bool Invalid = false;
+ if (TemplateParameterList *TemplateParams
+ = MatchTemplateParametersToScopeSpecifier(
+ D.getDeclSpec().getLocStart(),
+ D.getIdentifierLoc(),
+ D.getCXXScopeSpec(),
+ TemplateParamLists.get(),
+ TemplateParamLists.size(),
+ /*never a friend*/ false,
+ isExplicitSpecialization,
+ Invalid)) {
+ if (TemplateParams->size() > 0) {
+ // There is no such thing as a variable template.
+ Diag(D.getIdentifierLoc(), diag::err_template_variable)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
+ return 0;
+ } else {
+ // There is an extraneous 'template<>' for this variable. Complain
+ // about it, but allow the declaration of the variable.
+ Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_variable_noparams)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
+ }
+ }
+
+ NewVD = VarDecl::Create(Context, DC, D.getLocStart(),
+ D.getIdentifierLoc(), II,
+ R, TInfo, SC, SCAsWritten);
+
+ // If this decl has an auto type in need of deduction, make a note of the
+ // Decl so we can diagnose uses of it in its own initializer.
+ if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
+ R->getContainedAutoType())
+ ParsingInitForAutoVars.insert(NewVD);
+
+ if (D.isInvalidType() || Invalid)
+ NewVD->setInvalidDecl();
+
+ SetNestedNameSpecifier(NewVD, D);
+
+ if (TemplateParamLists.size() > 0 && D.getCXXScopeSpec().isSet()) {
+ NewVD->setTemplateParameterListsInfo(Context,
+ TemplateParamLists.size(),
+ TemplateParamLists.release());
+ }
+
+ if (D.getDeclSpec().isConstexprSpecified())
+ NewVD->setConstexpr(true);
+ }
+
+ // Set the lexical context. If the declarator has a C++ scope specifier, the
+ // lexical context will be different from the semantic context.
+ NewVD->setLexicalDeclContext(CurContext);
+
+ if (D.getDeclSpec().isThreadSpecified()) {
+ if (NewVD->hasLocalStorage())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_thread_non_global);
+ else if (!Context.getTargetInfo().isTLSSupported())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_thread_unsupported);
+ else
+ NewVD->setThreadSpecified(true);
+ }
+
+ if (D.getDeclSpec().isModulePrivateSpecified()) {
+ if (isExplicitSpecialization)
+ Diag(NewVD->getLocation(), diag::err_module_private_specialization)
+ << 2
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ else if (NewVD->hasLocalStorage())
+ Diag(NewVD->getLocation(), diag::err_module_private_local)
+ << 0 << NewVD->getDeclName()
+ << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ else
+ NewVD->setModulePrivate();
+ }
+
+ // Handle attributes prior to checking for duplicates in MergeVarDecl
+ ProcessDeclAttributes(S, NewVD, D);
+
+ // In auto-retain/release, infer strong retension for variables of
+ // retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewVD))
+ NewVD->setInvalidDecl();
+
+ // Handle GNU asm-label extension (encoded as an attribute).
+ if (Expr *E = (Expr*)D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ StringRef Label = SE->getString();
+ if (S->getFnParent() != 0) {
+ switch (SC) {
+ case SC_None:
+ case SC_Auto:
+ Diag(E->getExprLoc(), diag::warn_asm_label_on_auto_decl) << Label;
+ break;
+ case SC_Register:
+ if (!Context.getTargetInfo().isValidGCCRegisterName(Label))
+ Diag(E->getExprLoc(), diag::err_asm_unknown_register_name) << Label;
+ break;
+ case SC_Static:
+ case SC_Extern:
+ case SC_PrivateExtern:
+ case SC_OpenCLWorkGroupLocal:
+ break;
+ }
+ }
+
+ NewVD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0),
+ Context, Label));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewVD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ NewVD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ }
+ }
+
+ // Diagnose shadowed variables before filtering for scope.
+ if (!D.getCXXScopeSpec().isSet())
+ CheckShadow(S, NewVD, Previous);
+
+ // Don't consider existing declarations that are in a different
+ // scope and are out-of-semantic-context declarations (if the new
+ // declaration has linkage).
+ FilterLookupForScope(Previous, DC, S, NewVD->hasLinkage(),
+ isExplicitSpecialization);
+
+ if (!getLangOpts().CPlusPlus) {
+ D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
+ } else {
+ // Merge the decl with the existing one if appropriate.
+ if (!Previous.empty()) {
+ if (Previous.isSingleResult() &&
+ isa<FieldDecl>(Previous.getFoundDecl()) &&
+ D.getCXXScopeSpec().isSet()) {
+ // The user tried to define a non-static data member
+ // out-of-line (C++ [dcl.meaning]p1).
+ Diag(NewVD->getLocation(), diag::err_nonstatic_member_out_of_line)
+ << D.getCXXScopeSpec().getRange();
+ Previous.clear();
+ NewVD->setInvalidDecl();
+ }
+ } else if (D.getCXXScopeSpec().isSet()) {
+ // No previous declaration in the qualifying scope.
+ Diag(D.getIdentifierLoc(), diag::err_no_member)
+ << Name << computeDeclContext(D.getCXXScopeSpec(), true)
+ << D.getCXXScopeSpec().getRange();
+ NewVD->setInvalidDecl();
+ }
+
+ D.setRedeclaration(CheckVariableDeclaration(NewVD, Previous));
+
+ // This is an explicit specialization of a static data member. Check it.
+ if (isExplicitSpecialization && !NewVD->isInvalidDecl() &&
+ CheckMemberSpecialization(NewVD, Previous))
+ NewVD->setInvalidDecl();
+ }
+
+ // attributes declared post-definition are currently ignored
+ // FIXME: This should be handled in attribute merging, not
+ // here.
+ if (Previous.isSingleResult()) {
+ VarDecl *Def = dyn_cast<VarDecl>(Previous.getFoundDecl());
+ if (Def && (Def = Def->getDefinition()) &&
+ Def != NewVD && D.hasAttributes()) {
+ Diag(NewVD->getLocation(), diag::warn_attribute_precede_definition);
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ }
+ }
+
+ // If this is a locally-scoped extern C variable, update the map of
+ // such variables.
+ if (CurContext->isFunctionOrMethod() && NewVD->isExternC() &&
+ !NewVD->isInvalidDecl())
+ RegisterLocallyScopedExternCDecl(NewVD, Previous, S);
+
+ // If there's a #pragma GCC visibility in scope, and this isn't a class
+ // member, set the visibility of this variable.
+ if (NewVD->getLinkage() == ExternalLinkage && !DC->isRecord())
+ AddPushedVisibilityAttribute(NewVD);
+
+ MarkUnusedFileScopedDecl(NewVD);
+
+ return NewVD;
+}
+
+/// \brief Diagnose variable or built-in function shadowing. Implements
+/// -Wshadow.
+///
+/// This method is called whenever a VarDecl is added to a "useful"
+/// scope.
+///
+/// \param S the scope in which the shadowing name is being declared
+/// \param R the lookup of the name
+///
+void Sema::CheckShadow(Scope *S, VarDecl *D, const LookupResult& R) {
+ // Return if warning is ignored.
+ if (Diags.getDiagnosticLevel(diag::warn_decl_shadow, R.getNameLoc()) ==
+ DiagnosticsEngine::Ignored)
+ return;
+
+ // Don't diagnose declarations at file scope.
+ if (D->hasGlobalStorage())
+ return;
+
+ DeclContext *NewDC = D->getDeclContext();
+
+ // Only diagnose if we're shadowing an unambiguous field or variable.
+ if (R.getResultKind() != LookupResult::Found)
+ return;
+
+ NamedDecl* ShadowedDecl = R.getFoundDecl();
+ if (!isa<VarDecl>(ShadowedDecl) && !isa<FieldDecl>(ShadowedDecl))
+ return;
+
+ // Fields are not shadowed by variables in C++ static methods.
+ if (isa<FieldDecl>(ShadowedDecl))
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewDC))
+ if (MD->isStatic())
+ return;
+
+ if (VarDecl *shadowedVar = dyn_cast<VarDecl>(ShadowedDecl))
+ if (shadowedVar->isExternC()) {
+ // For shadowing external vars, make sure that we point to the global
+ // declaration, not a locally scoped extern declaration.
+ for (VarDecl::redecl_iterator
+ I = shadowedVar->redecls_begin(), E = shadowedVar->redecls_end();
+ I != E; ++I)
+ if (I->isFileVarDecl()) {
+ ShadowedDecl = *I;
+ break;
+ }
+ }
+
+ DeclContext *OldDC = ShadowedDecl->getDeclContext();
+
+ // Only warn about certain kinds of shadowing for class members.
+ if (NewDC && NewDC->isRecord()) {
+ // In particular, don't warn about shadowing non-class members.
+ if (!OldDC->isRecord())
+ return;
+
+ // TODO: should we warn about static data members shadowing
+ // static data members from base classes?
+
+ // TODO: don't diagnose for inaccessible shadowed members.
+ // This is hard to do perfectly because we might friend the
+ // shadowing context, but that's just a false negative.
+ }
+
+ // Determine what kind of declaration we're shadowing.
+ unsigned Kind;
+ if (isa<RecordDecl>(OldDC)) {
+ if (isa<FieldDecl>(ShadowedDecl))
+ Kind = 3; // field
+ else
+ Kind = 2; // static data member
+ } else if (OldDC->isFileContext())
+ Kind = 1; // global
+ else
+ Kind = 0; // local
+
+ DeclarationName Name = R.getLookupName();
+
+ // Emit warning and note.
+ Diag(R.getNameLoc(), diag::warn_decl_shadow) << Name << Kind << OldDC;
+ Diag(ShadowedDecl->getLocation(), diag::note_previous_declaration);
+}
+
+/// \brief Check -Wshadow without the advantage of a previous lookup.
+void Sema::CheckShadow(Scope *S, VarDecl *D) {
+ if (Diags.getDiagnosticLevel(diag::warn_decl_shadow, D->getLocation()) ==
+ DiagnosticsEngine::Ignored)
+ return;
+
+ LookupResult R(*this, D->getDeclName(), D->getLocation(),
+ Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+ LookupName(R, S);
+ CheckShadow(S, D, R);
+}
+
+/// \brief Perform semantic checking on a newly-created variable
+/// declaration.
+///
+/// This routine performs all of the type-checking required for a
+/// variable declaration once it has been built. It is used both to
+/// check variables after they have been parsed and their declarators
+/// have been translated into a declaration, and to check variables
+/// that have been instantiated from a template.
+///
+/// Sets NewVD->isInvalidDecl() if an error was encountered.
+///
+/// Returns true if the variable declaration is a redeclaration.
+bool Sema::CheckVariableDeclaration(VarDecl *NewVD,
+ LookupResult &Previous) {
+ // If the decl is already known invalid, don't check it.
+ if (NewVD->isInvalidDecl())
+ return false;
+
+ QualType T = NewVD->getType();
+
+ if (T->isObjCObjectType()) {
+ Diag(NewVD->getLocation(), diag::err_statically_allocated_object)
+ << FixItHint::CreateInsertion(NewVD->getLocation(), "*");
+ T = Context.getObjCObjectPointerType(T);
+ NewVD->setType(T);
+ }
+
+ // Emit an error if an address space was applied to decl with local storage.
+ // This includes arrays of objects with address space qualifiers, but not
+ // automatic variables that point to other address spaces.
+ // ISO/IEC TR 18037 S5.1.2
+ if (NewVD->hasLocalStorage() && T.getAddressSpace() != 0) {
+ Diag(NewVD->getLocation(), diag::err_as_qualified_auto_decl);
+ NewVD->setInvalidDecl();
+ return false;
+ }
+
+ if (NewVD->hasLocalStorage() && T.isObjCGCWeak()
+ && !NewVD->hasAttr<BlocksAttr>()) {
+ if (getLangOpts().getGC() != LangOptions::NonGC)
+ Diag(NewVD->getLocation(), diag::warn_gc_attribute_weak_on_local);
+ else
+ Diag(NewVD->getLocation(), diag::warn_attribute_weak_on_local);
+ }
+
+ bool isVM = T->isVariablyModifiedType();
+ if (isVM || NewVD->hasAttr<CleanupAttr>() ||
+ NewVD->hasAttr<BlocksAttr>())
+ getCurFunction()->setHasBranchProtectedScope();
+
+ if ((isVM && NewVD->hasLinkage()) ||
+ (T->isVariableArrayType() && NewVD->hasGlobalStorage())) {
+ bool SizeIsNegative;
+ llvm::APSInt Oversized;
+ QualType FixedTy =
+ TryToFixInvalidVariablyModifiedType(T, Context, SizeIsNegative,
+ Oversized);
+
+ if (FixedTy.isNull() && T->isVariableArrayType()) {
+ const VariableArrayType *VAT = Context.getAsVariableArrayType(T);
+ // FIXME: This won't give the correct result for
+ // int a[10][n];
+ SourceRange SizeRange = VAT->getSizeExpr()->getSourceRange();
+
+ if (NewVD->isFileVarDecl())
+ Diag(NewVD->getLocation(), diag::err_vla_decl_in_file_scope)
+ << SizeRange;
+ else if (NewVD->getStorageClass() == SC_Static)
+ Diag(NewVD->getLocation(), diag::err_vla_decl_has_static_storage)
+ << SizeRange;
+ else
+ Diag(NewVD->getLocation(), diag::err_vla_decl_has_extern_linkage)
+ << SizeRange;
+ NewVD->setInvalidDecl();
+ return false;
+ }
+
+ if (FixedTy.isNull()) {
+ if (NewVD->isFileVarDecl())
+ Diag(NewVD->getLocation(), diag::err_vm_decl_in_file_scope);
+ else
+ Diag(NewVD->getLocation(), diag::err_vm_decl_has_extern_linkage);
+ NewVD->setInvalidDecl();
+ return false;
+ }
+
+ Diag(NewVD->getLocation(), diag::warn_illegal_constant_array_size);
+ NewVD->setType(FixedTy);
+ }
+
+ if (Previous.empty() && NewVD->isExternC()) {
+ // Since we did not find anything by this name and we're declaring
+ // an extern "C" variable, look for a non-visible extern "C"
+ // declaration with the same name.
+ llvm::DenseMap<DeclarationName, NamedDecl *>::iterator Pos
+ = findLocallyScopedExternalDecl(NewVD->getDeclName());
+ if (Pos != LocallyScopedExternalDecls.end())
+ Previous.addDecl(Pos->second);
+ }
+
+ if (T->isVoidType() && !NewVD->hasExternalStorage()) {
+ Diag(NewVD->getLocation(), diag::err_typecheck_decl_incomplete_type)
+ << T;
+ NewVD->setInvalidDecl();
+ return false;
+ }
+
+ if (!NewVD->hasLocalStorage() && NewVD->hasAttr<BlocksAttr>()) {
+ Diag(NewVD->getLocation(), diag::err_block_on_nonlocal);
+ NewVD->setInvalidDecl();
+ return false;
+ }
+
+ if (isVM && NewVD->hasAttr<BlocksAttr>()) {
+ Diag(NewVD->getLocation(), diag::err_block_on_vm);
+ NewVD->setInvalidDecl();
+ return false;
+ }
+
+ if (NewVD->isConstexpr() && !T->isDependentType() &&
+ RequireLiteralType(NewVD->getLocation(), T,
+ PDiag(diag::err_constexpr_var_non_literal))) {
+ NewVD->setInvalidDecl();
+ return false;
+ }
+
+ if (!Previous.empty()) {
+ MergeVarDecl(NewVD, Previous);
+ return true;
+ }
+ return false;
+}
+
+/// \brief Data used with FindOverriddenMethod
+struct FindOverriddenMethodData {
+ Sema *S;
+ CXXMethodDecl *Method;
+};
+
+/// \brief Member lookup function that determines whether a given C++
+/// method overrides a method in a base class, to be used with
+/// CXXRecordDecl::lookupInBases().
+static bool FindOverriddenMethod(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *UserData) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ FindOverriddenMethodData *Data
+ = reinterpret_cast<FindOverriddenMethodData*>(UserData);
+
+ DeclarationName Name = Data->Method->getDeclName();
+
+ // FIXME: Do we care about other names here too?
+ if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ // We really want to find the base class destructor here.
+ QualType T = Data->S->Context.getTypeDeclType(BaseRecord);
+ CanQualType CT = Data->S->Context.getCanonicalType(T);
+
+ Name = Data->S->Context.DeclarationNames.getCXXDestructorName(CT);
+ }
+
+ for (Path.Decls = BaseRecord->lookup(Name);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ NamedDecl *D = *Path.Decls.first;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (MD->isVirtual() && !Data->S->IsOverload(Data->Method, MD, false))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// AddOverriddenMethods - See if a method overrides any in the base classes,
+/// and if so, check that it's a valid override and remember it.
+bool Sema::AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
+ // Look for virtual methods in base classes that this method might override.
+ CXXBasePaths Paths;
+ FindOverriddenMethodData Data;
+ Data.Method = MD;
+ Data.S = this;
+ bool AddedAny = false;
+ if (DC->lookupInBases(&FindOverriddenMethod, &Data, Paths)) {
+ for (CXXBasePaths::decl_iterator I = Paths.found_decls_begin(),
+ E = Paths.found_decls_end(); I != E; ++I) {
+ if (CXXMethodDecl *OldMD = dyn_cast<CXXMethodDecl>(*I)) {
+ MD->addOverriddenMethod(OldMD->getCanonicalDecl());
+ if (!CheckOverridingFunctionReturnType(MD, OldMD) &&
+ !CheckOverridingFunctionExceptionSpec(MD, OldMD) &&
+ !CheckIfOverriddenFunctionIsMarkedFinal(MD, OldMD)) {
+ AddedAny = true;
+ }
+ }
+ }
+ }
+
+ return AddedAny;
+}
+
+namespace {
+ // Struct for holding all of the extra arguments needed by
+ // DiagnoseInvalidRedeclaration to call Sema::ActOnFunctionDeclarator.
+ struct ActOnFDArgs {
+ Scope *S;
+ Declarator &D;
+ MultiTemplateParamsArg TemplateParamLists;
+ bool AddToScope;
+ };
+}
+
+namespace {
+
+// Callback to only accept typo corrections that have a non-zero edit distance.
+// Also only accept corrections that have the same parent decl.
+class DifferentNameValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ DifferentNameValidatorCCC(CXXRecordDecl *Parent)
+ : ExpectedParent(Parent ? Parent->getCanonicalDecl() : 0) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (candidate.getEditDistance() == 0)
+ return false;
+
+ if (CXXMethodDecl *MD = candidate.getCorrectionDeclAs<CXXMethodDecl>()) {
+ CXXRecordDecl *Parent = MD->getParent();
+ return Parent && Parent->getCanonicalDecl() == ExpectedParent;
+ }
+
+ return !ExpectedParent;
+ }
+
+ private:
+ CXXRecordDecl *ExpectedParent;
+};
+
+}
+
+/// \brief Generate diagnostics for an invalid function redeclaration.
+///
+/// This routine handles generating the diagnostic messages for an invalid
+/// function redeclaration, including finding possible similar declarations
+/// or performing typo correction if there are no previous declarations with
+/// the same name.
+///
+/// Returns a NamedDecl iff typo correction was performed and substituting in
+/// the new declaration name does not cause new errors.
+static NamedDecl* DiagnoseInvalidRedeclaration(
+ Sema &SemaRef, LookupResult &Previous, FunctionDecl *NewFD,
+ ActOnFDArgs &ExtraArgs) {
+ NamedDecl *Result = NULL;
+ DeclarationName Name = NewFD->getDeclName();
+ DeclContext *NewDC = NewFD->getDeclContext();
+ LookupResult Prev(SemaRef, Name, NewFD->getLocation(),
+ Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+ llvm::SmallVector<unsigned, 1> MismatchedParams;
+ llvm::SmallVector<std::pair<FunctionDecl*, unsigned>, 1> NearMatches;
+ TypoCorrection Correction;
+ bool isFriendDecl = (SemaRef.getLangOpts().CPlusPlus &&
+ ExtraArgs.D.getDeclSpec().isFriendSpecified());
+ unsigned DiagMsg = isFriendDecl ? diag::err_no_matching_local_friend
+ : diag::err_member_def_does_not_match;
+
+ NewFD->setInvalidDecl();
+ SemaRef.LookupQualifiedName(Prev, NewDC);
+ assert(!Prev.isAmbiguous() &&
+ "Cannot have an ambiguity in previous-declaration lookup");
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
+ DifferentNameValidatorCCC Validator(MD ? MD->getParent() : 0);
+ if (!Prev.empty()) {
+ for (LookupResult::iterator Func = Prev.begin(), FuncEnd = Prev.end();
+ Func != FuncEnd; ++Func) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(*Func);
+ if (FD &&
+ hasSimilarParameters(SemaRef.Context, FD, NewFD, MismatchedParams)) {
+ // Add 1 to the index so that 0 can mean the mismatch didn't
+ // involve a parameter
+ unsigned ParamNum =
+ MismatchedParams.empty() ? 0 : MismatchedParams.front() + 1;
+ NearMatches.push_back(std::make_pair(FD, ParamNum));
+ }
+ }
+ // If the qualified name lookup yielded nothing, try typo correction
+ } else if ((Correction = SemaRef.CorrectTypo(Prev.getLookupNameInfo(),
+ Prev.getLookupKind(), 0, 0,
+ Validator, NewDC))) {
+ // Trap errors.
+ Sema::SFINAETrap Trap(SemaRef);
+
+ // Set up everything for the call to ActOnFunctionDeclarator
+ ExtraArgs.D.SetIdentifier(Correction.getCorrectionAsIdentifierInfo(),
+ ExtraArgs.D.getIdentifierLoc());
+ Previous.clear();
+ Previous.setLookupName(Correction.getCorrection());
+ for (TypoCorrection::decl_iterator CDecl = Correction.begin(),
+ CDeclEnd = Correction.end();
+ CDecl != CDeclEnd; ++CDecl) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(*CDecl);
+ if (FD && hasSimilarParameters(SemaRef.Context, FD, NewFD,
+ MismatchedParams)) {
+ Previous.addDecl(FD);
+ }
+ }
+ bool wasRedeclaration = ExtraArgs.D.isRedeclaration();
+ // TODO: Refactor ActOnFunctionDeclarator so that we can call only the
+ // pieces need to verify the typo-corrected C++ declaraction and hopefully
+ // eliminate the need for the parameter pack ExtraArgs.
+ Result = SemaRef.ActOnFunctionDeclarator(
+ ExtraArgs.S, ExtraArgs.D,
+ Correction.getCorrectionDecl()->getDeclContext(),
+ NewFD->getTypeSourceInfo(), Previous, ExtraArgs.TemplateParamLists,
+ ExtraArgs.AddToScope);
+ if (Trap.hasErrorOccurred()) {
+ // Pretend the typo correction never occurred
+ ExtraArgs.D.SetIdentifier(Name.getAsIdentifierInfo(),
+ ExtraArgs.D.getIdentifierLoc());
+ ExtraArgs.D.setRedeclaration(wasRedeclaration);
+ Previous.clear();
+ Previous.setLookupName(Name);
+ Result = NULL;
+ } else {
+ for (LookupResult::iterator Func = Previous.begin(),
+ FuncEnd = Previous.end();
+ Func != FuncEnd; ++Func) {
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*Func))
+ NearMatches.push_back(std::make_pair(FD, 0));
+ }
+ }
+ if (NearMatches.empty()) {
+ // Ignore the correction if it didn't yield any close FunctionDecl matches
+ Correction = TypoCorrection();
+ } else {
+ DiagMsg = isFriendDecl ? diag::err_no_matching_local_friend_suggest
+ : diag::err_member_def_does_not_match_suggest;
+ }
+ }
+
+ if (Correction)
+ SemaRef.Diag(NewFD->getLocation(), DiagMsg)
+ << Name << NewDC << Correction.getQuoted(SemaRef.getLangOpts())
+ << FixItHint::CreateReplacement(
+ NewFD->getLocation(),
+ Correction.getAsString(SemaRef.getLangOpts()));
+ else
+ SemaRef.Diag(NewFD->getLocation(), DiagMsg)
+ << Name << NewDC << NewFD->getLocation();
+
+ bool NewFDisConst = false;
+ if (CXXMethodDecl *NewMD = dyn_cast<CXXMethodDecl>(NewFD))
+ NewFDisConst = NewMD->getTypeQualifiers() & Qualifiers::Const;
+
+ for (llvm::SmallVector<std::pair<FunctionDecl*, unsigned>, 1>::iterator
+ NearMatch = NearMatches.begin(), NearMatchEnd = NearMatches.end();
+ NearMatch != NearMatchEnd; ++NearMatch) {
+ FunctionDecl *FD = NearMatch->first;
+ bool FDisConst = false;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ FDisConst = MD->getTypeQualifiers() & Qualifiers::Const;
+
+ if (unsigned Idx = NearMatch->second) {
+ ParmVarDecl *FDParam = FD->getParamDecl(Idx-1);
+ SourceLocation Loc = FDParam->getTypeSpecStartLoc();
+ if (Loc.isInvalid()) Loc = FD->getLocation();
+ SemaRef.Diag(Loc, diag::note_member_def_close_param_match)
+ << Idx << FDParam->getType() << NewFD->getParamDecl(Idx-1)->getType();
+ } else if (Correction) {
+ SemaRef.Diag(FD->getLocation(), diag::note_previous_decl)
+ << Correction.getQuoted(SemaRef.getLangOpts());
+ } else if (FDisConst != NewFDisConst) {
+ SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
+ << NewFDisConst << FD->getSourceRange().getEnd();
+ } else
+ SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_match);
+ }
+ return Result;
+}
+
+static FunctionDecl::StorageClass getFunctionStorageClass(Sema &SemaRef,
+ Declarator &D) {
+ switch (D.getDeclSpec().getStorageClassSpec()) {
+ default: llvm_unreachable("Unknown storage class!");
+ case DeclSpec::SCS_auto:
+ case DeclSpec::SCS_register:
+ case DeclSpec::SCS_mutable:
+ SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_typecheck_sclass_func);
+ D.setInvalidType();
+ break;
+ case DeclSpec::SCS_unspecified: break;
+ case DeclSpec::SCS_extern: return SC_Extern;
+ case DeclSpec::SCS_static: {
+ if (SemaRef.CurContext->getRedeclContext()->isFunctionOrMethod()) {
+ // C99 6.7.1p5:
+ // The declaration of an identifier for a function that has
+ // block scope shall have no explicit storage-class specifier
+ // other than extern
+ // See also (C++ [dcl.stc]p4).
+ SemaRef.Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_block_func);
+ break;
+ } else
+ return SC_Static;
+ }
+ case DeclSpec::SCS_private_extern: return SC_PrivateExtern;
+ }
+
+ // No explicit storage class has already been returned
+ return SC_None;
+}
+
+static FunctionDecl* CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
+ DeclContext *DC, QualType &R,
+ TypeSourceInfo *TInfo,
+ FunctionDecl::StorageClass SC,
+ bool &IsVirtualOkay) {
+ DeclarationNameInfo NameInfo = SemaRef.GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+
+ FunctionDecl *NewFD = 0;
+ bool isInline = D.getDeclSpec().isInlineSpecified();
+ DeclSpec::SCS SCSpec = D.getDeclSpec().getStorageClassSpecAsWritten();
+ FunctionDecl::StorageClass SCAsWritten
+ = StorageClassSpecToFunctionDeclStorageClass(SCSpec);
+
+ if (!SemaRef.getLangOpts().CPlusPlus) {
+ // Determine whether the function was written with a
+ // prototype. This true when:
+ // - there is a prototype in the declarator, or
+ // - the type R of the function is some kind of typedef or other reference
+ // to a type name (which eventually refers to a function type).
+ bool HasPrototype =
+ (D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
+ (!isa<FunctionType>(R.getTypePtr()) && R->isFunctionProtoType());
+
+ NewFD = FunctionDecl::Create(SemaRef.Context, DC,
+ D.getLocStart(), NameInfo, R,
+ TInfo, SC, SCAsWritten, isInline,
+ HasPrototype);
+ if (D.isInvalidType())
+ NewFD->setInvalidDecl();
+
+ // Set the lexical context.
+ NewFD->setLexicalDeclContext(SemaRef.CurContext);
+
+ return NewFD;
+ }
+
+ bool isExplicit = D.getDeclSpec().isExplicitSpecified();
+ bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
+
+ // Check that the return type is not an abstract class type.
+ // For record types, this is done by the AbstractClassUsageDiagnoser once
+ // the class has been completely parsed.
+ if (!DC->isRecord() &&
+ SemaRef.RequireNonAbstractType(D.getIdentifierLoc(),
+ R->getAs<FunctionType>()->getResultType(),
+ diag::err_abstract_type_in_decl,
+ SemaRef.AbstractReturnType))
+ D.setInvalidType();
+
+ if (Name.getNameKind() == DeclarationName::CXXConstructorName) {
+ // This is a C++ constructor declaration.
+ assert(DC->isRecord() &&
+ "Constructors can only be declared in a member context");
+
+ R = SemaRef.CheckConstructorDeclarator(D, R, SC);
+ return CXXConstructorDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
+ D.getLocStart(), NameInfo,
+ R, TInfo, isExplicit, isInline,
+ /*isImplicitlyDeclared=*/false,
+ isConstexpr);
+
+ } else if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ // This is a C++ destructor declaration.
+ if (DC->isRecord()) {
+ R = SemaRef.CheckDestructorDeclarator(D, R, SC);
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ CXXDestructorDecl *NewDD = CXXDestructorDecl::Create(
+ SemaRef.Context, Record,
+ D.getLocStart(),
+ NameInfo, R, TInfo, isInline,
+ /*isImplicitlyDeclared=*/false);
+
+ // If the class is complete, then we now create the implicit exception
+ // specification. If the class is incomplete or dependent, we can't do
+ // it yet.
+ if (SemaRef.getLangOpts().CPlusPlus0x && !Record->isDependentType() &&
+ Record->getDefinition() && !Record->isBeingDefined() &&
+ R->getAs<FunctionProtoType>()->getExceptionSpecType() == EST_None) {
+ SemaRef.AdjustDestructorExceptionSpec(Record, NewDD);
+ }
+
+ IsVirtualOkay = true;
+ return NewDD;
+
+ } else {
+ SemaRef.Diag(D.getIdentifierLoc(), diag::err_destructor_not_member);
+ D.setInvalidType();
+
+ // Create a FunctionDecl to satisfy the function definition parsing
+ // code path.
+ return FunctionDecl::Create(SemaRef.Context, DC,
+ D.getLocStart(),
+ D.getIdentifierLoc(), Name, R, TInfo,
+ SC, SCAsWritten, isInline,
+ /*hasPrototype=*/true, isConstexpr);
+ }
+
+ } else if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
+ if (!DC->isRecord()) {
+ SemaRef.Diag(D.getIdentifierLoc(),
+ diag::err_conv_function_not_member);
+ return 0;
+ }
+
+ SemaRef.CheckConversionDeclarator(D, R, SC);
+ IsVirtualOkay = true;
+ return CXXConversionDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
+ D.getLocStart(), NameInfo,
+ R, TInfo, isInline, isExplicit,
+ isConstexpr, SourceLocation());
+
+ } else if (DC->isRecord()) {
+ // If the name of the function is the same as the name of the record,
+ // then this must be an invalid constructor that has a return type.
+ // (The parser checks for a return type and makes the declarator a
+ // constructor if it has no return type).
+ if (Name.getAsIdentifierInfo() &&
+ Name.getAsIdentifierInfo() == cast<CXXRecordDecl>(DC)->getIdentifier()){
+ SemaRef.Diag(D.getIdentifierLoc(), diag::err_constructor_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ return 0;
+ }
+
+ bool isStatic = SC == SC_Static;
+
+ // [class.free]p1:
+ // Any allocation function for a class T is a static member
+ // (even if not explicitly declared static).
+ if (Name.getCXXOverloadedOperator() == OO_New ||
+ Name.getCXXOverloadedOperator() == OO_Array_New)
+ isStatic = true;
+
+ // [class.free]p6 Any deallocation function for a class X is a static member
+ // (even if not explicitly declared static).
+ if (Name.getCXXOverloadedOperator() == OO_Delete ||
+ Name.getCXXOverloadedOperator() == OO_Array_Delete)
+ isStatic = true;
+
+ IsVirtualOkay = !isStatic;
+
+ // This is a C++ method declaration.
+ return CXXMethodDecl::Create(SemaRef.Context, cast<CXXRecordDecl>(DC),
+ D.getLocStart(), NameInfo, R,
+ TInfo, isStatic, SCAsWritten, isInline,
+ isConstexpr, SourceLocation());
+
+ } else {
+ // Determine whether the function was written with a
+ // prototype. This true when:
+ // - we're in C++ (where every function has a prototype),
+ return FunctionDecl::Create(SemaRef.Context, DC,
+ D.getLocStart(),
+ NameInfo, R, TInfo, SC, SCAsWritten, isInline,
+ true/*HasPrototype*/, isConstexpr);
+ }
+}
+
+NamedDecl*
+Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
+ TypeSourceInfo *TInfo, LookupResult &Previous,
+ MultiTemplateParamsArg TemplateParamLists,
+ bool &AddToScope) {
+ QualType R = TInfo->getType();
+
+ assert(R.getTypePtr()->isFunctionType());
+
+ // TODO: consider using NameInfo for diagnostic.
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+ FunctionDecl::StorageClass SC = getFunctionStorageClass(*this, D);
+
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+
+ // Do not allow returning a objc interface by-value.
+ if (R->getAs<FunctionType>()->getResultType()->isObjCObjectType()) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_object_cannot_be_passed_returned_by_value) << 0
+ << R->getAs<FunctionType>()->getResultType()
+ << FixItHint::CreateInsertion(D.getIdentifierLoc(), "*");
+
+ QualType T = R->getAs<FunctionType>()->getResultType();
+ T = Context.getObjCObjectPointerType(T);
+ if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(R)) {
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ R = Context.getFunctionType(T, FPT->arg_type_begin(),
+ FPT->getNumArgs(), EPI);
+ }
+ else if (isa<FunctionNoProtoType>(R))
+ R = Context.getFunctionNoProtoType(T);
+ }
+
+ bool isFriend = false;
+ FunctionTemplateDecl *FunctionTemplate = 0;
+ bool isExplicitSpecialization = false;
+ bool isFunctionTemplateSpecialization = false;
+ bool isDependentClassScopeExplicitSpecialization = false;
+ bool isVirtualOkay = false;
+
+ FunctionDecl *NewFD = CreateNewFunctionDecl(*this, D, DC, R, TInfo, SC,
+ isVirtualOkay);
+ if (!NewFD) return 0;
+
+ if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer())
+ NewFD->setTopLevelDeclInObjCContainer();
+
+ if (getLangOpts().CPlusPlus) {
+ bool isInline = D.getDeclSpec().isInlineSpecified();
+ bool isVirtual = D.getDeclSpec().isVirtualSpecified();
+ bool isExplicit = D.getDeclSpec().isExplicitSpecified();
+ bool isConstexpr = D.getDeclSpec().isConstexprSpecified();
+ isFriend = D.getDeclSpec().isFriendSpecified();
+ if (isFriend && !isInline && D.isFunctionDefinition()) {
+ // C++ [class.friend]p5
+ // A function can be defined in a friend declaration of a
+ // class . . . . Such a function is implicitly inline.
+ NewFD->setImplicitlyInline();
+ }
+
+ SetNestedNameSpecifier(NewFD, D);
+ isExplicitSpecialization = false;
+ isFunctionTemplateSpecialization = false;
+ if (D.isInvalidType())
+ NewFD->setInvalidDecl();
+
+ // Set the lexical context. If the declarator has a C++
+ // scope specifier, or is the object of a friend declaration, the
+ // lexical context will be different from the semantic context.
+ NewFD->setLexicalDeclContext(CurContext);
+
+ // Match up the template parameter lists with the scope specifier, then
+ // determine whether we have a template or a template specialization.
+ bool Invalid = false;
+ if (TemplateParameterList *TemplateParams
+ = MatchTemplateParametersToScopeSpecifier(
+ D.getDeclSpec().getLocStart(),
+ D.getIdentifierLoc(),
+ D.getCXXScopeSpec(),
+ TemplateParamLists.get(),
+ TemplateParamLists.size(),
+ isFriend,
+ isExplicitSpecialization,
+ Invalid)) {
+ if (TemplateParams->size() > 0) {
+ // This is a function template
+
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParams))
+ return 0;
+
+ // A destructor cannot be a template.
+ if (Name.getNameKind() == DeclarationName::CXXDestructorName) {
+ Diag(NewFD->getLocation(), diag::err_destructor_template);
+ return 0;
+ }
+
+ // If we're adding a template to a dependent context, we may need to
+ // rebuilding some of the types used within the template parameter list,
+ // now that we know what the current instantiation is.
+ if (DC->isDependentContext()) {
+ ContextRAII SavedContext(*this, DC);
+ if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
+ Invalid = true;
+ }
+
+
+ FunctionTemplate = FunctionTemplateDecl::Create(Context, DC,
+ NewFD->getLocation(),
+ Name, TemplateParams,
+ NewFD);
+ FunctionTemplate->setLexicalDeclContext(CurContext);
+ NewFD->setDescribedFunctionTemplate(FunctionTemplate);
+
+ // For source fidelity, store the other template param lists.
+ if (TemplateParamLists.size() > 1) {
+ NewFD->setTemplateParameterListsInfo(Context,
+ TemplateParamLists.size() - 1,
+ TemplateParamLists.release());
+ }
+ } else {
+ // This is a function template specialization.
+ isFunctionTemplateSpecialization = true;
+ // For source fidelity, store all the template param lists.
+ NewFD->setTemplateParameterListsInfo(Context,
+ TemplateParamLists.size(),
+ TemplateParamLists.release());
+
+ // C++0x [temp.expl.spec]p20 forbids "template<> friend void foo(int);".
+ if (isFriend) {
+ // We want to remove the "template<>", found here.
+ SourceRange RemoveRange = TemplateParams->getSourceRange();
+
+ // If we remove the template<> and the name is not a
+ // template-id, we're actually silently creating a problem:
+ // the friend declaration will refer to an untemplated decl,
+ // and clearly the user wants a template specialization. So
+ // we need to insert '<>' after the name.
+ SourceLocation InsertLoc;
+ if (D.getName().getKind() != UnqualifiedId::IK_TemplateId) {
+ InsertLoc = D.getName().getSourceRange().getEnd();
+ InsertLoc = PP.getLocForEndOfToken(InsertLoc);
+ }
+
+ Diag(D.getIdentifierLoc(), diag::err_template_spec_decl_friend)
+ << Name << RemoveRange
+ << FixItHint::CreateRemoval(RemoveRange)
+ << FixItHint::CreateInsertion(InsertLoc, "<>");
+ }
+ }
+ }
+ else {
+ // All template param lists were matched against the scope specifier:
+ // this is NOT (an explicit specialization of) a template.
+ if (TemplateParamLists.size() > 0)
+ // For source fidelity, store all the template param lists.
+ NewFD->setTemplateParameterListsInfo(Context,
+ TemplateParamLists.size(),
+ TemplateParamLists.release());
+ }
+
+ if (Invalid) {
+ NewFD->setInvalidDecl();
+ if (FunctionTemplate)
+ FunctionTemplate->setInvalidDecl();
+ }
+
+ // If we see "T var();" at block scope, where T is a class type, it is
+ // probably an attempt to initialize a variable, not a function declaration.
+ // We don't catch this case earlier, since there is no ambiguity here.
+ if (!FunctionTemplate && D.getFunctionDefinitionKind() == FDK_Declaration &&
+ CurContext->isFunctionOrMethod() &&
+ D.getNumTypeObjects() == 1 && D.isFunctionDeclarator() &&
+ D.getDeclSpec().getStorageClassSpecAsWritten()
+ == DeclSpec::SCS_unspecified) {
+ QualType T = R->getAs<FunctionType>()->getResultType();
+ DeclaratorChunk &C = D.getTypeObject(0);
+ if (!T->isVoidType() && C.Fun.NumArgs == 0 && !C.Fun.isVariadic &&
+ !C.Fun.TrailingReturnType &&
+ C.Fun.getExceptionSpecType() == EST_None) {
+ SourceRange ParenRange(C.Loc, C.EndLoc);
+ Diag(C.Loc, diag::warn_empty_parens_are_function_decl) << ParenRange;
+
+ // If the declaration looks like:
+ // T var1,
+ // f();
+ // and name lookup finds a function named 'f', then the ',' was
+ // probably intended to be a ';'.
+ if (!D.isFirstDeclarator() && D.getIdentifier()) {
+ FullSourceLoc Comma(D.getCommaLoc(), SourceMgr);
+ FullSourceLoc Name(D.getIdentifierLoc(), SourceMgr);
+ if (Comma.getFileID() != Name.getFileID() ||
+ Comma.getSpellingLineNumber() != Name.getSpellingLineNumber()) {
+ LookupResult Result(*this, D.getIdentifier(), SourceLocation(),
+ LookupOrdinaryName);
+ if (LookupName(Result, S))
+ Diag(D.getCommaLoc(), diag::note_empty_parens_function_call)
+ << FixItHint::CreateReplacement(D.getCommaLoc(), ";") << NewFD;
+ }
+ }
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ // Empty parens mean value-initialization, and no parens mean default
+ // initialization. These are equivalent if the default constructor is
+ // user-provided, or if zero-initialization is a no-op.
+ if (RD && RD->hasDefinition() &&
+ (RD->isEmpty() || RD->hasUserProvidedDefaultConstructor()))
+ Diag(C.Loc, diag::note_empty_parens_default_ctor)
+ << FixItHint::CreateRemoval(ParenRange);
+ else if (const char *Init = getFixItZeroInitializerForType(T))
+ Diag(C.Loc, diag::note_empty_parens_zero_initialize)
+ << FixItHint::CreateReplacement(ParenRange, Init);
+ else if (LangOpts.CPlusPlus0x)
+ Diag(C.Loc, diag::note_empty_parens_zero_initialize)
+ << FixItHint::CreateReplacement(ParenRange, "{}");
+ }
+ }
+
+ // C++ [dcl.fct.spec]p5:
+ // The virtual specifier shall only be used in declarations of
+ // nonstatic class member functions that appear within a
+ // member-specification of a class declaration; see 10.3.
+ //
+ if (isVirtual && !NewFD->isInvalidDecl()) {
+ if (!isVirtualOkay) {
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_non_function);
+ } else if (!CurContext->isRecord()) {
+ // 'virtual' was specified outside of the class.
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_out_of_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
+ } else if (NewFD->getDescribedFunctionTemplate()) {
+ // C++ [temp.mem]p3:
+ // A member function template shall not be virtual.
+ Diag(D.getDeclSpec().getVirtualSpecLoc(),
+ diag::err_virtual_member_function_template)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getVirtualSpecLoc());
+ } else {
+ // Okay: Add virtual to the method.
+ NewFD->setVirtualAsWritten(true);
+ }
+ }
+
+ // C++ [dcl.fct.spec]p3:
+ // The inline specifier shall not appear on a block scope function
+ // declaration.
+ if (isInline && !NewFD->isInvalidDecl()) {
+ if (CurContext->isFunctionOrMethod()) {
+ // 'inline' is not allowed on block scope function declaration.
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ diag::err_inline_declaration_block_scope) << Name
+ << FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
+ }
+ }
+
+ // C++ [dcl.fct.spec]p6:
+ // The explicit specifier shall be used only in the declaration of a
+ // constructor or conversion function within its class definition;
+ // see 12.3.1 and 12.3.2.
+ if (isExplicit && !NewFD->isInvalidDecl()) {
+ if (!CurContext->isRecord()) {
+ // 'explicit' was specified outside of the class.
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::err_explicit_out_of_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
+ } else if (!isa<CXXConstructorDecl>(NewFD) &&
+ !isa<CXXConversionDecl>(NewFD)) {
+ // 'explicit' was specified on a function that wasn't a constructor
+ // or conversion function.
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ diag::err_explicit_non_ctor_or_conv_function)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getExplicitSpecLoc());
+ }
+ }
+
+ if (isConstexpr) {
+ // C++0x [dcl.constexpr]p2: constexpr functions and constexpr constructors
+ // are implicitly inline.
+ NewFD->setImplicitlyInline();
+
+ // C++0x [dcl.constexpr]p3: functions declared constexpr are required to
+ // be either constructors or to return a literal type. Therefore,
+ // destructors cannot be declared constexpr.
+ if (isa<CXXDestructorDecl>(NewFD))
+ Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_constexpr_dtor);
+ }
+
+ // If __module_private__ was specified, mark the function accordingly.
+ if (D.getDeclSpec().isModulePrivateSpecified()) {
+ if (isFunctionTemplateSpecialization) {
+ SourceLocation ModulePrivateLoc
+ = D.getDeclSpec().getModulePrivateSpecLoc();
+ Diag(ModulePrivateLoc, diag::err_module_private_specialization)
+ << 0
+ << FixItHint::CreateRemoval(ModulePrivateLoc);
+ } else {
+ NewFD->setModulePrivate();
+ if (FunctionTemplate)
+ FunctionTemplate->setModulePrivate();
+ }
+ }
+
+ if (isFriend) {
+ // For now, claim that the objects have no previous declaration.
+ if (FunctionTemplate) {
+ FunctionTemplate->setObjectOfFriendDecl(false);
+ FunctionTemplate->setAccess(AS_public);
+ }
+ NewFD->setObjectOfFriendDecl(false);
+ NewFD->setAccess(AS_public);
+ }
+
+ // If a function is defined as defaulted or deleted, mark it as such now.
+ switch (D.getFunctionDefinitionKind()) {
+ case FDK_Declaration:
+ case FDK_Definition:
+ break;
+
+ case FDK_Defaulted:
+ NewFD->setDefaulted();
+ break;
+
+ case FDK_Deleted:
+ NewFD->setDeletedAsWritten();
+ break;
+ }
+
+ if (isa<CXXMethodDecl>(NewFD) && DC == CurContext &&
+ D.isFunctionDefinition()) {
+ // C++ [class.mfct]p2:
+ // A member function may be defined (8.4) in its class definition, in
+ // which case it is an inline member function (7.1.2)
+ NewFD->setImplicitlyInline();
+ }
+
+ if (SC == SC_Static && isa<CXXMethodDecl>(NewFD) &&
+ !CurContext->isRecord()) {
+ // C++ [class.static]p1:
+ // A data or function member of a class may be declared static
+ // in a class definition, in which case it is a static member of
+ // the class.
+
+ // Complain about the 'static' specifier if it's on an out-of-line
+ // member function definition.
+ Diag(D.getDeclSpec().getStorageClassSpecLoc(),
+ diag::err_static_out_of_line)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+ }
+ }
+
+ // Filter out previous declarations that don't match the scope.
+ FilterLookupForScope(Previous, DC, S, NewFD->hasLinkage(),
+ isExplicitSpecialization ||
+ isFunctionTemplateSpecialization);
+
+ // Handle GNU asm-label extension (encoded as an attribute).
+ if (Expr *E = (Expr*) D.getAsmLabel()) {
+ // The parser guarantees this is a string.
+ StringLiteral *SE = cast<StringLiteral>(E);
+ NewFD->addAttr(::new (Context) AsmLabelAttr(SE->getStrTokenLoc(0), Context,
+ SE->getString()));
+ } else if (!ExtnameUndeclaredIdentifiers.empty()) {
+ llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*>::iterator I =
+ ExtnameUndeclaredIdentifiers.find(NewFD->getIdentifier());
+ if (I != ExtnameUndeclaredIdentifiers.end()) {
+ NewFD->addAttr(I->second);
+ ExtnameUndeclaredIdentifiers.erase(I);
+ }
+ }
+
+ // Copy the parameter declarations from the declarator D to the function
+ // declaration NewFD, if they are available. First scavenge them into Params.
+ SmallVector<ParmVarDecl*, 16> Params;
+ if (D.isFunctionDeclarator()) {
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ // Check for C99 6.7.5.3p10 - foo(void) is a non-varargs
+ // function that takes no arguments, not a function that takes a
+ // single void argument.
+ // We let through "const void" here because Sema::GetTypeForDeclarator
+ // already checks for that case.
+ if (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
+ FTI.ArgInfo[0].Param &&
+ cast<ParmVarDecl>(FTI.ArgInfo[0].Param)->getType()->isVoidType()) {
+ // Empty arg list, don't push any params.
+ ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[0].Param);
+
+ // In C++, the empty parameter-type-list must be spelled "void"; a
+ // typedef of void is not permitted.
+ if (getLangOpts().CPlusPlus &&
+ Param->getType().getUnqualifiedType() != Context.VoidTy) {
+ bool IsTypeAlias = false;
+ if (const TypedefType *TT = Param->getType()->getAs<TypedefType>())
+ IsTypeAlias = isa<TypeAliasDecl>(TT->getDecl());
+ else if (const TemplateSpecializationType *TST =
+ Param->getType()->getAs<TemplateSpecializationType>())
+ IsTypeAlias = TST->isTypeAlias();
+ Diag(Param->getLocation(), diag::err_param_typedef_of_void)
+ << IsTypeAlias;
+ }
+ } else if (FTI.NumArgs > 0 && FTI.ArgInfo[0].Param != 0) {
+ for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
+ assert(Param->getDeclContext() != NewFD && "Was set before ?");
+ Param->setDeclContext(NewFD);
+ Params.push_back(Param);
+
+ if (Param->isInvalidDecl())
+ NewFD->setInvalidDecl();
+ }
+ }
+
+ } else if (const FunctionProtoType *FT = R->getAs<FunctionProtoType>()) {
+ // When we're declaring a function with a typedef, typeof, etc as in the
+ // following example, we'll need to synthesize (unnamed)
+ // parameters for use in the declaration.
+ //
+ // @code
+ // typedef void fn(int);
+ // fn f;
+ // @endcode
+
+ // Synthesize a parameter for each argument type.
+ for (FunctionProtoType::arg_type_iterator AI = FT->arg_type_begin(),
+ AE = FT->arg_type_end(); AI != AE; ++AI) {
+ ParmVarDecl *Param =
+ BuildParmVarDeclForTypedef(NewFD, D.getIdentifierLoc(), *AI);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ } else {
+ assert(R->isFunctionNoProtoType() && NewFD->getNumParams() == 0 &&
+ "Should not need args for typedef of non-prototype fn");
+ }
+
+ // Finally, we know we have the right number of parameters, install them.
+ NewFD->setParams(Params);
+
+ // Find all anonymous symbols defined during the declaration of this function
+ // and add to NewFD. This lets us track decls such 'enum Y' in:
+ //
+ // void f(enum Y {AA} x) {}
+ //
+ // which would otherwise incorrectly end up in the translation unit scope.
+ NewFD->setDeclsInPrototypeScope(DeclsInPrototypeScope);
+ DeclsInPrototypeScope.clear();
+
+ // Process the non-inheritable attributes on this declaration.
+ ProcessDeclAttributes(S, NewFD, D,
+ /*NonInheritable=*/true, /*Inheritable=*/false);
+
+ // Functions returning a variably modified type violate C99 6.7.5.2p2
+ // because all functions have linkage.
+ if (!NewFD->isInvalidDecl() &&
+ NewFD->getResultType()->isVariablyModifiedType()) {
+ Diag(NewFD->getLocation(), diag::err_vm_func_decl);
+ NewFD->setInvalidDecl();
+ }
+
+ if (!getLangOpts().CPlusPlus) {
+ // Perform semantic checking on the function declaration.
+ bool isExplicitSpecialization=false;
+ if (!NewFD->isInvalidDecl()) {
+ if (NewFD->isMain())
+ CheckMain(NewFD, D.getDeclSpec());
+ D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
+ isExplicitSpecialization));
+ }
+ assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
+ Previous.getResultKind() != LookupResult::FoundOverloaded) &&
+ "previous declaration set still overloaded");
+ } else {
+ // If the declarator is a template-id, translate the parser's template
+ // argument list into our AST format.
+ bool HasExplicitTemplateArgs = false;
+ TemplateArgumentListInfo TemplateArgs;
+ if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
+ TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
+ TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
+ ASTTemplateArgsPtr TemplateArgsPtr(*this,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ translateTemplateArguments(TemplateArgsPtr,
+ TemplateArgs);
+ TemplateArgsPtr.release();
+
+ HasExplicitTemplateArgs = true;
+
+ if (NewFD->isInvalidDecl()) {
+ HasExplicitTemplateArgs = false;
+ } else if (FunctionTemplate) {
+ // Function template with explicit template arguments.
+ Diag(D.getIdentifierLoc(), diag::err_function_template_partial_spec)
+ << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc);
+
+ HasExplicitTemplateArgs = false;
+ } else if (!isFunctionTemplateSpecialization &&
+ !D.getDeclSpec().isFriendSpecified()) {
+ // We have encountered something that the user meant to be a
+ // specialization (because it has explicitly-specified template
+ // arguments) but that was not introduced with a "template<>" (or had
+ // too few of them).
+ Diag(D.getIdentifierLoc(), diag::err_template_spec_needs_header)
+ << SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc)
+ << FixItHint::CreateInsertion(
+ D.getDeclSpec().getLocStart(),
+ "template<> ");
+ isFunctionTemplateSpecialization = true;
+ } else {
+ // "friend void foo<>(int);" is an implicit specialization decl.
+ isFunctionTemplateSpecialization = true;
+ }
+ } else if (isFriend && isFunctionTemplateSpecialization) {
+ // This combination is only possible in a recovery case; the user
+ // wrote something like:
+ // template <> friend void foo(int);
+ // which we're recovering from as if the user had written:
+ // friend void foo<>(int);
+ // Go ahead and fake up a template id.
+ HasExplicitTemplateArgs = true;
+ TemplateArgs.setLAngleLoc(D.getIdentifierLoc());
+ TemplateArgs.setRAngleLoc(D.getIdentifierLoc());
+ }
+
+ // If it's a friend (and only if it's a friend), it's possible
+ // that either the specialized function type or the specialized
+ // template is dependent, and therefore matching will fail. In
+ // this case, don't check the specialization yet.
+ bool InstantiationDependent = false;
+ if (isFunctionTemplateSpecialization && isFriend &&
+ (NewFD->getType()->isDependentType() || DC->isDependentContext() ||
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs.getArgumentArray(), TemplateArgs.size(),
+ InstantiationDependent))) {
+ assert(HasExplicitTemplateArgs &&
+ "friend function specialization without template args");
+ if (CheckDependentFunctionTemplateSpecialization(NewFD, TemplateArgs,
+ Previous))
+ NewFD->setInvalidDecl();
+ } else if (isFunctionTemplateSpecialization) {
+ if (CurContext->isDependentContext() && CurContext->isRecord()
+ && !isFriend) {
+ isDependentClassScopeExplicitSpecialization = true;
+ Diag(NewFD->getLocation(), getLangOpts().MicrosoftExt ?
+ diag::ext_function_specialization_in_class :
+ diag::err_function_specialization_in_class)
+ << NewFD->getDeclName();
+ } else if (CheckFunctionTemplateSpecialization(NewFD,
+ (HasExplicitTemplateArgs ? &TemplateArgs : 0),
+ Previous))
+ NewFD->setInvalidDecl();
+
+ // C++ [dcl.stc]p1:
+ // A storage-class-specifier shall not be specified in an explicit
+ // specialization (14.7.3)
+ if (SC != SC_None) {
+ if (SC != NewFD->getStorageClass())
+ Diag(NewFD->getLocation(),
+ diag::err_explicit_specialization_inconsistent_storage_class)
+ << SC
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+
+ else
+ Diag(NewFD->getLocation(),
+ diag::ext_explicit_specialization_storage_class)
+ << FixItHint::CreateRemoval(
+ D.getDeclSpec().getStorageClassSpecLoc());
+ }
+
+ } else if (isExplicitSpecialization && isa<CXXMethodDecl>(NewFD)) {
+ if (CheckMemberSpecialization(NewFD, Previous))
+ NewFD->setInvalidDecl();
+ }
+
+ // Perform semantic checking on the function declaration.
+ if (!isDependentClassScopeExplicitSpecialization) {
+ if (NewFD->isInvalidDecl()) {
+ // If this is a class member, mark the class invalid immediately.
+ // This avoids some consistency errors later.
+ if (CXXMethodDecl* methodDecl = dyn_cast<CXXMethodDecl>(NewFD))
+ methodDecl->getParent()->setInvalidDecl();
+ } else {
+ if (NewFD->isMain())
+ CheckMain(NewFD, D.getDeclSpec());
+ D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
+ isExplicitSpecialization));
+ }
+ }
+
+ assert((NewFD->isInvalidDecl() || !D.isRedeclaration() ||
+ Previous.getResultKind() != LookupResult::FoundOverloaded) &&
+ "previous declaration set still overloaded");
+
+ NamedDecl *PrincipalDecl = (FunctionTemplate
+ ? cast<NamedDecl>(FunctionTemplate)
+ : NewFD);
+
+ if (isFriend && D.isRedeclaration()) {
+ AccessSpecifier Access = AS_public;
+ if (!NewFD->isInvalidDecl())
+ Access = NewFD->getPreviousDecl()->getAccess();
+
+ NewFD->setAccess(Access);
+ if (FunctionTemplate) FunctionTemplate->setAccess(Access);
+
+ PrincipalDecl->setObjectOfFriendDecl(true);
+ }
+
+ if (NewFD->isOverloadedOperator() && !DC->isRecord() &&
+ PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ PrincipalDecl->setNonMemberOperator();
+
+ // If we have a function template, check the template parameter
+ // list. This will check and merge default template arguments.
+ if (FunctionTemplate) {
+ FunctionTemplateDecl *PrevTemplate =
+ FunctionTemplate->getPreviousDecl();
+ CheckTemplateParameterList(FunctionTemplate->getTemplateParameters(),
+ PrevTemplate ? PrevTemplate->getTemplateParameters() : 0,
+ D.getDeclSpec().isFriendSpecified()
+ ? (D.isFunctionDefinition()
+ ? TPC_FriendFunctionTemplateDefinition
+ : TPC_FriendFunctionTemplate)
+ : (D.getCXXScopeSpec().isSet() &&
+ DC && DC->isRecord() &&
+ DC->isDependentContext())
+ ? TPC_ClassTemplateMember
+ : TPC_FunctionTemplate);
+ }
+
+ if (NewFD->isInvalidDecl()) {
+ // Ignore all the rest of this.
+ } else if (!D.isRedeclaration()) {
+ struct ActOnFDArgs ExtraArgs = { S, D, TemplateParamLists,
+ AddToScope };
+ // Fake up an access specifier if it's supposed to be a class member.
+ if (isa<CXXRecordDecl>(NewFD->getDeclContext()))
+ NewFD->setAccess(AS_public);
+
+ // Qualified decls generally require a previous declaration.
+ if (D.getCXXScopeSpec().isSet()) {
+ // ...with the major exception of templated-scope or
+ // dependent-scope friend declarations.
+
+ // TODO: we currently also suppress this check in dependent
+ // contexts because (1) the parameter depth will be off when
+ // matching friend templates and (2) we might actually be
+ // selecting a friend based on a dependent factor. But there
+ // are situations where these conditions don't apply and we
+ // can actually do this check immediately.
+ if (isFriend &&
+ (TemplateParamLists.size() ||
+ D.getCXXScopeSpec().getScopeRep()->isDependent() ||
+ CurContext->isDependentContext())) {
+ // ignore these
+ } else {
+ // The user tried to provide an out-of-line definition for a
+ // function that is a member of a class or namespace, but there
+ // was no such member function declared (C++ [class.mfct]p2,
+ // C++ [namespace.memdef]p2). For example:
+ //
+ // class X {
+ // void f() const;
+ // };
+ //
+ // void X::f() { } // ill-formed
+ //
+ // Complain about this problem, and attempt to suggest close
+ // matches (e.g., those that differ only in cv-qualifiers and
+ // whether the parameter types are references).
+
+ if (NamedDecl *Result = DiagnoseInvalidRedeclaration(*this, Previous,
+ NewFD,
+ ExtraArgs)) {
+ AddToScope = ExtraArgs.AddToScope;
+ return Result;
+ }
+ }
+
+ // Unqualified local friend declarations are required to resolve
+ // to something.
+ } else if (isFriend && cast<CXXRecordDecl>(CurContext)->isLocalClass()) {
+ if (NamedDecl *Result = DiagnoseInvalidRedeclaration(*this, Previous,
+ NewFD,
+ ExtraArgs)) {
+ AddToScope = ExtraArgs.AddToScope;
+ return Result;
+ }
+ }
+
+ } else if (!D.isFunctionDefinition() && D.getCXXScopeSpec().isSet() &&
+ !isFriend && !isFunctionTemplateSpecialization &&
+ !isExplicitSpecialization) {
+ // An out-of-line member function declaration must also be a
+ // definition (C++ [dcl.meaning]p1).
+ // Note that this is not the case for explicit specializations of
+ // function templates or member functions of class templates, per
+ // C++ [temp.expl.spec]p2. We also allow these declarations as an
+ // extension for compatibility with old SWIG code which likes to
+ // generate them.
+ Diag(NewFD->getLocation(), diag::ext_out_of_line_declaration)
+ << D.getCXXScopeSpec().getRange();
+ }
+ }
+
+
+ // Handle attributes. We need to have merged decls when handling attributes
+ // (for example to check for conflicts, etc).
+ // FIXME: This needs to happen before we merge declarations. Then,
+ // let attribute merging cope with attribute conflicts.
+ ProcessDeclAttributes(S, NewFD, D,
+ /*NonInheritable=*/false, /*Inheritable=*/true);
+
+ // attributes declared post-definition are currently ignored
+ // FIXME: This should happen during attribute merging
+ if (D.isRedeclaration() && Previous.isSingleResult()) {
+ const FunctionDecl *Def;
+ FunctionDecl *PrevFD = dyn_cast<FunctionDecl>(Previous.getFoundDecl());
+ if (PrevFD && PrevFD->isDefined(Def) && D.hasAttributes()) {
+ Diag(NewFD->getLocation(), diag::warn_attribute_precede_definition);
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ }
+ }
+
+ AddKnownFunctionAttributes(NewFD);
+
+ if (NewFD->hasAttr<OverloadableAttr>() &&
+ !NewFD->getType()->getAs<FunctionProtoType>()) {
+ Diag(NewFD->getLocation(),
+ diag::err_attribute_overloadable_no_prototype)
+ << NewFD;
+
+ // Turn this into a variadic function with no parameters.
+ const FunctionType *FT = NewFD->getType()->getAs<FunctionType>();
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = true;
+ EPI.ExtInfo = FT->getExtInfo();
+
+ QualType R = Context.getFunctionType(FT->getResultType(), 0, 0, EPI);
+ NewFD->setType(R);
+ }
+
+ // If there's a #pragma GCC visibility in scope, and this isn't a class
+ // member, set the visibility of this function.
+ if (NewFD->getLinkage() == ExternalLinkage && !DC->isRecord())
+ AddPushedVisibilityAttribute(NewFD);
+
+ // If there's a #pragma clang arc_cf_code_audited in scope, consider
+ // marking the function.
+ AddCFAuditedAttribute(NewFD);
+
+ // If this is a locally-scoped extern C function, update the
+ // map of such names.
+ if (CurContext->isFunctionOrMethod() && NewFD->isExternC()
+ && !NewFD->isInvalidDecl())
+ RegisterLocallyScopedExternCDecl(NewFD, Previous, S);
+
+ // Set this FunctionDecl's range up to the right paren.
+ NewFD->setRangeEnd(D.getSourceRange().getEnd());
+
+ if (getLangOpts().CPlusPlus) {
+ if (FunctionTemplate) {
+ if (NewFD->isInvalidDecl())
+ FunctionTemplate->setInvalidDecl();
+ return FunctionTemplate;
+ }
+ }
+
+ MarkUnusedFileScopedDecl(NewFD);
+
+ if (getLangOpts().CUDA)
+ if (IdentifierInfo *II = NewFD->getIdentifier())
+ if (!NewFD->isInvalidDecl() &&
+ NewFD->getDeclContext()->getRedeclContext()->isTranslationUnit()) {
+ if (II->isStr("cudaConfigureCall")) {
+ if (!R->getAs<FunctionType>()->getResultType()->isScalarType())
+ Diag(NewFD->getLocation(), diag::err_config_scalar_return);
+
+ Context.setcudaConfigureCallDecl(NewFD);
+ }
+ }
+
+ // Here we have an function template explicit specialization at class scope.
+ // The actually specialization will be postponed to template instatiation
+ // time via the ClassScopeFunctionSpecializationDecl node.
+ if (isDependentClassScopeExplicitSpecialization) {
+ ClassScopeFunctionSpecializationDecl *NewSpec =
+ ClassScopeFunctionSpecializationDecl::Create(
+ Context, CurContext, SourceLocation(),
+ cast<CXXMethodDecl>(NewFD));
+ CurContext->addDecl(NewSpec);
+ AddToScope = false;
+ }
+
+ return NewFD;
+}
+
+/// \brief Perform semantic checking of a new function declaration.
+///
+/// Performs semantic analysis of the new function declaration
+/// NewFD. This routine performs all semantic checking that does not
+/// require the actual declarator involved in the declaration, and is
+/// used both for the declaration of functions as they are parsed
+/// (called via ActOnDeclarator) and for the declaration of functions
+/// that have been instantiated via C++ template instantiation (called
+/// via InstantiateDecl).
+///
+/// \param IsExplicitSpecialiation whether this new function declaration is
+/// an explicit specialization of the previous declaration.
+///
+/// This sets NewFD->isInvalidDecl() to true if there was an error.
+///
+/// Returns true if the function declaration is a redeclaration.
+bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
+ LookupResult &Previous,
+ bool IsExplicitSpecialization) {
+ assert(!NewFD->getResultType()->isVariablyModifiedType()
+ && "Variably modified return types are not handled here");
+
+ // Check for a previous declaration of this name.
+ if (Previous.empty() && NewFD->isExternC()) {
+ // Since we did not find anything by this name and we're declaring
+ // an extern "C" function, look for a non-visible extern "C"
+ // declaration with the same name.
+ llvm::DenseMap<DeclarationName, NamedDecl *>::iterator Pos
+ = findLocallyScopedExternalDecl(NewFD->getDeclName());
+ if (Pos != LocallyScopedExternalDecls.end())
+ Previous.addDecl(Pos->second);
+ }
+
+ bool Redeclaration = false;
+
+ // Merge or overload the declaration with an existing declaration of
+ // the same name, if appropriate.
+ if (!Previous.empty()) {
+ // Determine whether NewFD is an overload of PrevDecl or
+ // a declaration that requires merging. If it's an overload,
+ // there's no more work to do here; we'll just add the new
+ // function to the scope.
+
+ NamedDecl *OldDecl = 0;
+ if (!AllowOverloadingOfFunction(Previous, Context)) {
+ Redeclaration = true;
+ OldDecl = Previous.getFoundDecl();
+ } else {
+ switch (CheckOverload(S, NewFD, Previous, OldDecl,
+ /*NewIsUsingDecl*/ false)) {
+ case Ovl_Match:
+ Redeclaration = true;
+ break;
+
+ case Ovl_NonFunction:
+ Redeclaration = true;
+ break;
+
+ case Ovl_Overload:
+ Redeclaration = false;
+ break;
+ }
+
+ if (!getLangOpts().CPlusPlus && !NewFD->hasAttr<OverloadableAttr>()) {
+ // If a function name is overloadable in C, then every function
+ // with that name must be marked "overloadable".
+ Diag(NewFD->getLocation(), diag::err_attribute_overloadable_missing)
+ << Redeclaration << NewFD;
+ NamedDecl *OverloadedDecl = 0;
+ if (Redeclaration)
+ OverloadedDecl = OldDecl;
+ else if (!Previous.empty())
+ OverloadedDecl = Previous.getRepresentativeDecl();
+ if (OverloadedDecl)
+ Diag(OverloadedDecl->getLocation(),
+ diag::note_attribute_overloadable_prev_overload);
+ NewFD->addAttr(::new (Context) OverloadableAttr(SourceLocation(),
+ Context));
+ }
+ }
+
+ if (Redeclaration) {
+ // NewFD and OldDecl represent declarations that need to be
+ // merged.
+ if (MergeFunctionDecl(NewFD, OldDecl, S)) {
+ NewFD->setInvalidDecl();
+ return Redeclaration;
+ }
+
+ Previous.clear();
+ Previous.addDecl(OldDecl);
+
+ if (FunctionTemplateDecl *OldTemplateDecl
+ = dyn_cast<FunctionTemplateDecl>(OldDecl)) {
+ NewFD->setPreviousDeclaration(OldTemplateDecl->getTemplatedDecl());
+ FunctionTemplateDecl *NewTemplateDecl
+ = NewFD->getDescribedFunctionTemplate();
+ assert(NewTemplateDecl && "Template/non-template mismatch");
+ if (CXXMethodDecl *Method
+ = dyn_cast<CXXMethodDecl>(NewTemplateDecl->getTemplatedDecl())) {
+ Method->setAccess(OldTemplateDecl->getAccess());
+ NewTemplateDecl->setAccess(OldTemplateDecl->getAccess());
+ }
+
+ // If this is an explicit specialization of a member that is a function
+ // template, mark it as a member specialization.
+ if (IsExplicitSpecialization &&
+ NewTemplateDecl->getInstantiatedFromMemberTemplate()) {
+ NewTemplateDecl->setMemberSpecialization();
+ assert(OldTemplateDecl->isMemberSpecialization());
+ }
+
+ } else {
+ if (isa<CXXMethodDecl>(NewFD)) // Set access for out-of-line definitions
+ NewFD->setAccess(OldDecl->getAccess());
+ NewFD->setPreviousDeclaration(cast<FunctionDecl>(OldDecl));
+ }
+ }
+ }
+
+ // Semantic checking for this function declaration (in isolation).
+ if (getLangOpts().CPlusPlus) {
+ // C++-specific checks.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(NewFD)) {
+ CheckConstructor(Constructor);
+ } else if (CXXDestructorDecl *Destructor =
+ dyn_cast<CXXDestructorDecl>(NewFD)) {
+ CXXRecordDecl *Record = Destructor->getParent();
+ QualType ClassType = Context.getTypeDeclType(Record);
+
+ // FIXME: Shouldn't we be able to perform this check even when the class
+ // type is dependent? Both gcc and edg can handle that.
+ if (!ClassType->isDependentType()) {
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(ClassType));
+ if (NewFD->getDeclName() != Name) {
+ Diag(NewFD->getLocation(), diag::err_destructor_name);
+ NewFD->setInvalidDecl();
+ return Redeclaration;
+ }
+ }
+ } else if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>(NewFD)) {
+ ActOnConversionDeclarator(Conversion);
+ }
+
+ // Find any virtual functions that this function overrides.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD)) {
+ if (!Method->isFunctionTemplateSpecialization() &&
+ !Method->getDescribedFunctionTemplate()) {
+ if (AddOverriddenMethods(Method->getParent(), Method)) {
+ // If the function was marked as "static", we have a problem.
+ if (NewFD->getStorageClass() == SC_Static) {
+ Diag(NewFD->getLocation(), diag::err_static_overrides_virtual)
+ << NewFD->getDeclName();
+ for (CXXMethodDecl::method_iterator
+ Overridden = Method->begin_overridden_methods(),
+ OverriddenEnd = Method->end_overridden_methods();
+ Overridden != OverriddenEnd;
+ ++Overridden) {
+ Diag((*Overridden)->getLocation(),
+ diag::note_overridden_virtual_function);
+ }
+ }
+ }
+ }
+ }
+
+ // Extra checking for C++ overloaded operators (C++ [over.oper]).
+ if (NewFD->isOverloadedOperator() &&
+ CheckOverloadedOperatorDeclaration(NewFD)) {
+ NewFD->setInvalidDecl();
+ return Redeclaration;
+ }
+
+ // Extra checking for C++0x literal operators (C++0x [over.literal]).
+ if (NewFD->getLiteralIdentifier() &&
+ CheckLiteralOperatorDeclaration(NewFD)) {
+ NewFD->setInvalidDecl();
+ return Redeclaration;
+ }
+
+ // In C++, check default arguments now that we have merged decls. Unless
+ // the lexical context is the class, because in this case this is done
+ // during delayed parsing anyway.
+ if (!CurContext->isRecord())
+ CheckCXXDefaultArguments(NewFD);
+
+ // If this function declares a builtin function, check the type of this
+ // declaration against the expected type for the builtin.
+ if (unsigned BuiltinID = NewFD->getBuiltinID()) {
+ ASTContext::GetBuiltinTypeError Error;
+ QualType T = Context.GetBuiltinType(BuiltinID, Error);
+ if (!T.isNull() && !Context.hasSameType(T, NewFD->getType())) {
+ // The type of this function differs from the type of the builtin,
+ // so forget about the builtin entirely.
+ Context.BuiltinInfo.ForgetBuiltin(BuiltinID, Context.Idents);
+ }
+ }
+
+ // If this function is declared as being extern "C", then check to see if
+ // the function returns a UDT (class, struct, or union type) that is not C
+ // compatible, and if it does, warn the user.
+ if (NewFD->isExternC()) {
+ QualType R = NewFD->getResultType();
+ if (!R.isPODType(Context) &&
+ !R->isVoidType())
+ Diag( NewFD->getLocation(), diag::warn_return_value_udt )
+ << NewFD << R;
+ }
+ }
+ return Redeclaration;
+}
+
+void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
+ // C++11 [basic.start.main]p3: A program that declares main to be inline,
+ // static or constexpr is ill-formed.
+ // C99 6.7.4p4: In a hosted environment, the inline function specifier
+ // shall not appear in a declaration of main.
+ // static main is not an error under C99, but we should warn about it.
+ if (FD->getStorageClass() == SC_Static)
+ Diag(DS.getStorageClassSpecLoc(), getLangOpts().CPlusPlus
+ ? diag::err_static_main : diag::warn_static_main)
+ << FixItHint::CreateRemoval(DS.getStorageClassSpecLoc());
+ if (FD->isInlineSpecified())
+ Diag(DS.getInlineSpecLoc(), diag::err_inline_main)
+ << FixItHint::CreateRemoval(DS.getInlineSpecLoc());
+ if (FD->isConstexpr()) {
+ Diag(DS.getConstexprSpecLoc(), diag::err_constexpr_main)
+ << FixItHint::CreateRemoval(DS.getConstexprSpecLoc());
+ FD->setConstexpr(false);
+ }
+
+ QualType T = FD->getType();
+ assert(T->isFunctionType() && "function decl is not of function type");
+ const FunctionType* FT = T->castAs<FunctionType>();
+
+ // All the standards say that main() should should return 'int'.
+ if (Context.hasSameUnqualifiedType(FT->getResultType(), Context.IntTy)) {
+ // In C and C++, main magically returns 0 if you fall off the end;
+ // set the flag which tells us that.
+ // This is C++ [basic.start.main]p5 and C99 5.1.2.2.3.
+ FD->setHasImplicitReturnZero(true);
+
+ // In C with GNU extensions we allow main() to have non-integer return
+ // type, but we should warn about the extension, and we disable the
+ // implicit-return-zero rule.
+ } else if (getLangOpts().GNUMode && !getLangOpts().CPlusPlus) {
+ Diag(FD->getTypeSpecStartLoc(), diag::ext_main_returns_nonint);
+
+ // Otherwise, this is just a flat-out error.
+ } else {
+ Diag(FD->getTypeSpecStartLoc(), diag::err_main_returns_nonint);
+ FD->setInvalidDecl(true);
+ }
+
+ // Treat protoless main() as nullary.
+ if (isa<FunctionNoProtoType>(FT)) return;
+
+ const FunctionProtoType* FTP = cast<const FunctionProtoType>(FT);
+ unsigned nparams = FTP->getNumArgs();
+ assert(FD->getNumParams() == nparams);
+
+ bool HasExtraParameters = (nparams > 3);
+
+ // Darwin passes an undocumented fourth argument of type char**. If
+ // other platforms start sprouting these, the logic below will start
+ // getting shifty.
+ if (nparams == 4 && Context.getTargetInfo().getTriple().isOSDarwin())
+ HasExtraParameters = false;
+
+ if (HasExtraParameters) {
+ Diag(FD->getLocation(), diag::err_main_surplus_args) << nparams;
+ FD->setInvalidDecl(true);
+ nparams = 3;
+ }
+
+ // FIXME: a lot of the following diagnostics would be improved
+ // if we had some location information about types.
+
+ QualType CharPP =
+ Context.getPointerType(Context.getPointerType(Context.CharTy));
+ QualType Expected[] = { Context.IntTy, CharPP, CharPP, CharPP };
+
+ for (unsigned i = 0; i < nparams; ++i) {
+ QualType AT = FTP->getArgType(i);
+
+ bool mismatch = true;
+
+ if (Context.hasSameUnqualifiedType(AT, Expected[i]))
+ mismatch = false;
+ else if (Expected[i] == CharPP) {
+ // As an extension, the following forms are okay:
+ // char const **
+ // char const * const *
+ // char * const *
+
+ QualifierCollector qs;
+ const PointerType* PT;
+ if ((PT = qs.strip(AT)->getAs<PointerType>()) &&
+ (PT = qs.strip(PT->getPointeeType())->getAs<PointerType>()) &&
+ (QualType(qs.strip(PT->getPointeeType()), 0) == Context.CharTy)) {
+ qs.removeConst();
+ mismatch = !qs.empty();
+ }
+ }
+
+ if (mismatch) {
+ Diag(FD->getLocation(), diag::err_main_arg_wrong) << i << Expected[i];
+ // TODO: suggest replacing given type with expected type
+ FD->setInvalidDecl(true);
+ }
+ }
+
+ if (nparams == 1 && !FD->isInvalidDecl()) {
+ Diag(FD->getLocation(), diag::warn_main_one_arg);
+ }
+
+ if (!FD->isInvalidDecl() && FD->getDescribedFunctionTemplate()) {
+ Diag(FD->getLocation(), diag::err_main_template_decl);
+ FD->setInvalidDecl();
+ }
+}
+
+bool Sema::CheckForConstantInitializer(Expr *Init, QualType DclT) {
+ // FIXME: Need strict checking. In C89, we need to check for
+ // any assignment, increment, decrement, function-calls, or
+ // commas outside of a sizeof. In C99, it's the same list,
+ // except that the aforementioned are allowed in unevaluated
+ // expressions. Everything else falls under the
+ // "may accept other forms of constant expressions" exception.
+ // (We never end up here for C++, so the constant expression
+ // rules there don't matter.)
+ if (Init->isConstantInitializer(Context, false))
+ return false;
+ Diag(Init->getExprLoc(), diag::err_init_element_not_constant)
+ << Init->getSourceRange();
+ return true;
+}
+
+namespace {
+ // Visits an initialization expression to see if OrigDecl is evaluated in
+ // its own initialization and throws a warning if it does.
+ class SelfReferenceChecker
+ : public EvaluatedExprVisitor<SelfReferenceChecker> {
+ Sema &S;
+ Decl *OrigDecl;
+ bool isRecordType;
+ bool isPODType;
+
+ public:
+ typedef EvaluatedExprVisitor<SelfReferenceChecker> Inherited;
+
+ SelfReferenceChecker(Sema &S, Decl *OrigDecl) : Inherited(S.Context),
+ S(S), OrigDecl(OrigDecl) {
+ isPODType = false;
+ isRecordType = false;
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(OrigDecl)) {
+ isPODType = VD->getType().isPODType(S.Context);
+ isRecordType = VD->getType()->isRecordType();
+ }
+ }
+
+ void VisitExpr(Expr *E) {
+ if (isa<ObjCMessageExpr>(*E)) return;
+ if (isRecordType) {
+ Expr *expr = E;
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ ValueDecl *VD = ME->getMemberDecl();
+ if (isa<EnumConstantDecl>(VD) || isa<VarDecl>(VD)) return;
+ expr = ME->getBase();
+ }
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(expr)) {
+ HandleDeclRefExpr(DRE);
+ return;
+ }
+ }
+ Inherited::VisitExpr(E);
+ }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ if (E->getType()->canDecayToPointerType()) return;
+ ValueDecl *VD = E->getMemberDecl();
+ if (isa<FieldDecl>(VD) || isa<CXXMethodDecl>(VD))
+ if (DeclRefExpr *DRE
+ = dyn_cast<DeclRefExpr>(E->getBase()->IgnoreParenImpCasts())) {
+ HandleDeclRefExpr(DRE);
+ return;
+ }
+ Inherited::VisitMemberExpr(E);
+ }
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ if ((!isRecordType &&E->getCastKind() == CK_LValueToRValue) ||
+ (isRecordType && E->getCastKind() == CK_NoOp)) {
+ Expr* SubExpr = E->getSubExpr()->IgnoreParenImpCasts();
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(SubExpr))
+ SubExpr = ME->getBase()->IgnoreParenImpCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(SubExpr)) {
+ HandleDeclRefExpr(DRE);
+ return;
+ }
+ }
+ Inherited::VisitImplicitCastExpr(E);
+ }
+
+ void VisitUnaryOperator(UnaryOperator *E) {
+ // For POD record types, addresses of its own members are well-defined.
+ if (isRecordType && isPODType) return;
+ Inherited::VisitUnaryOperator(E);
+ }
+
+ void HandleDeclRefExpr(DeclRefExpr *DRE) {
+ Decl* ReferenceDecl = DRE->getDecl();
+ if (OrigDecl != ReferenceDecl) return;
+ LookupResult Result(S, DRE->getNameInfo(), Sema::LookupOrdinaryName,
+ Sema::NotForRedeclaration);
+ S.DiagRuntimeBehavior(DRE->getLocStart(), DRE,
+ S.PDiag(diag::warn_uninit_self_reference_in_init)
+ << Result.getLookupName()
+ << OrigDecl->getLocation()
+ << DRE->getSourceRange());
+ }
+ };
+}
+
+/// CheckSelfReference - Warns if OrigDecl is used in expression E.
+void Sema::CheckSelfReference(Decl* OrigDecl, Expr *E) {
+ SelfReferenceChecker(*this, OrigDecl).VisitExpr(E);
+}
+
+/// AddInitializerToDecl - Adds the initializer Init to the
+/// declaration dcl. If DirectInit is true, this is C++ direct
+/// initialization rather than copy initialization.
+void Sema::AddInitializerToDecl(Decl *RealDecl, Expr *Init,
+ bool DirectInit, bool TypeMayContainAuto) {
+ // If there is no declaration, there was an error parsing it. Just ignore
+ // the initializer.
+ if (RealDecl == 0 || RealDecl->isInvalidDecl())
+ return;
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(RealDecl)) {
+ // With declarators parsed the way they are, the parser cannot
+ // distinguish between a normal initializer and a pure-specifier.
+ // Thus this grotesque test.
+ IntegerLiteral *IL;
+ if ((IL = dyn_cast<IntegerLiteral>(Init)) && IL->getValue() == 0 &&
+ Context.getCanonicalType(IL->getType()) == Context.IntTy)
+ CheckPureMethod(Method, Init->getSourceRange());
+ else {
+ Diag(Method->getLocation(), diag::err_member_function_initialization)
+ << Method->getDeclName() << Init->getSourceRange();
+ Method->setInvalidDecl();
+ }
+ return;
+ }
+
+ VarDecl *VDecl = dyn_cast<VarDecl>(RealDecl);
+ if (!VDecl) {
+ assert(!isa<FieldDecl>(RealDecl) && "field init shouldn't get here");
+ Diag(RealDecl->getLocation(), diag::err_illegal_initializer);
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ // Check for self-references within variable initializers.
+ // Variables declared within a function/method body are handled
+ // by a dataflow analysis.
+ if (!VDecl->hasLocalStorage() && !VDecl->isStaticLocal())
+ CheckSelfReference(RealDecl, Init);
+
+ ParenListExpr *CXXDirectInit = dyn_cast<ParenListExpr>(Init);
+
+ // C++11 [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ if (TypeMayContainAuto && VDecl->getType()->getContainedAutoType()) {
+ Expr *DeduceInit = Init;
+ // Initializer could be a C++ direct-initializer. Deduction only works if it
+ // contains exactly one expression.
+ if (CXXDirectInit) {
+ if (CXXDirectInit->getNumExprs() == 0) {
+ // It isn't possible to write this directly, but it is possible to
+ // end up in this situation with "auto x(some_pack...);"
+ Diag(CXXDirectInit->getLocStart(),
+ diag::err_auto_var_init_no_expression)
+ << VDecl->getDeclName() << VDecl->getType()
+ << VDecl->getSourceRange();
+ RealDecl->setInvalidDecl();
+ return;
+ } else if (CXXDirectInit->getNumExprs() > 1) {
+ Diag(CXXDirectInit->getExpr(1)->getLocStart(),
+ diag::err_auto_var_init_multiple_expressions)
+ << VDecl->getDeclName() << VDecl->getType()
+ << VDecl->getSourceRange();
+ RealDecl->setInvalidDecl();
+ return;
+ } else {
+ DeduceInit = CXXDirectInit->getExpr(0);
+ }
+ }
+ TypeSourceInfo *DeducedType = 0;
+ if (DeduceAutoType(VDecl->getTypeSourceInfo(), DeduceInit, DeducedType) ==
+ DAR_Failed)
+ DiagnoseAutoDeductionFailure(VDecl, DeduceInit);
+ if (!DeducedType) {
+ RealDecl->setInvalidDecl();
+ return;
+ }
+ VDecl->setTypeSourceInfo(DeducedType);
+ VDecl->setType(DeducedType->getType());
+ VDecl->ClearLinkageCache();
+
+ // In ARC, infer lifetime.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(VDecl))
+ VDecl->setInvalidDecl();
+
+ // If this is a redeclaration, check that the type we just deduced matches
+ // the previously declared type.
+ if (VarDecl *Old = VDecl->getPreviousDecl())
+ MergeVarDeclTypes(VDecl, Old);
+ }
+
+ if (VDecl->isLocalVarDecl() && VDecl->hasExternalStorage()) {
+ // C99 6.7.8p5. C++ has no such restriction, but that is a defect.
+ Diag(VDecl->getLocation(), diag::err_block_extern_cant_init);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ if (!VDecl->getType()->isDependentType()) {
+ // A definition must end up with a complete type, which means it must be
+ // complete with the restriction that an array type might be completed by
+ // the initializer; note that later code assumes this restriction.
+ QualType BaseDeclType = VDecl->getType();
+ if (const ArrayType *Array = Context.getAsIncompleteArrayType(BaseDeclType))
+ BaseDeclType = Array->getElementType();
+ if (RequireCompleteType(VDecl->getLocation(), BaseDeclType,
+ diag::err_typecheck_decl_incomplete_type)) {
+ RealDecl->setInvalidDecl();
+ return;
+ }
+
+ // The variable can not have an abstract class type.
+ if (RequireNonAbstractType(VDecl->getLocation(), VDecl->getType(),
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ VDecl->setInvalidDecl();
+ }
+
+ const VarDecl *Def;
+ if ((Def = VDecl->getDefinition()) && Def != VDecl) {
+ Diag(VDecl->getLocation(), diag::err_redefinition)
+ << VDecl->getDeclName();
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ const VarDecl* PrevInit = 0;
+ if (getLangOpts().CPlusPlus) {
+ // C++ [class.static.data]p4
+ // If a static data member is of const integral or const
+ // enumeration type, its declaration in the class definition can
+ // specify a constant-initializer which shall be an integral
+ // constant expression (5.19). In that case, the member can appear
+ // in integral constant expressions. The member shall still be
+ // defined in a namespace scope if it is used in the program and the
+ // namespace scope definition shall not contain an initializer.
+ //
+ // We already performed a redefinition check above, but for static
+ // data members we also need to check whether there was an in-class
+ // declaration with an initializer.
+ if (VDecl->isStaticDataMember() && VDecl->getAnyInitializer(PrevInit)) {
+ Diag(VDecl->getLocation(), diag::err_redefinition)
+ << VDecl->getDeclName();
+ Diag(PrevInit->getLocation(), diag::note_previous_definition);
+ return;
+ }
+
+ if (VDecl->hasLocalStorage())
+ getCurFunction()->setHasBranchProtectedScope();
+
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer)) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+ }
+
+ // OpenCL 1.1 6.5.2: "Variables allocated in the __local address space inside
+ // a kernel function cannot be initialized."
+ if (VDecl->getStorageClass() == SC_OpenCLWorkGroupLocal) {
+ Diag(VDecl->getLocation(), diag::err_local_cant_init);
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ // Get the decls type and save a reference for later, since
+ // CheckInitializerTypes may change it.
+ QualType DclT = VDecl->getType(), SavT = DclT;
+
+ // Top-level message sends default to 'id' when we're in a debugger
+ // and we are assigning it to a variable of 'id' type.
+ if (getLangOpts().DebuggerCastResultToId && DclT->isObjCIdType())
+ if (Init->getType() == Context.UnknownAnyTy && isa<ObjCMessageExpr>(Init)) {
+ ExprResult Result = forceUnknownAnyToType(Init, Context.getObjCIdType());
+ if (Result.isInvalid()) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+ Init = Result.take();
+ }
+
+ // Perform the initialization.
+ if (!VDecl->isInvalidDecl()) {
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(VDecl);
+ InitializationKind Kind
+ = DirectInit ?
+ CXXDirectInit ? InitializationKind::CreateDirect(VDecl->getLocation(),
+ Init->getLocStart(),
+ Init->getLocEnd())
+ : InitializationKind::CreateDirectList(
+ VDecl->getLocation())
+ : InitializationKind::CreateCopy(VDecl->getLocation(),
+ Init->getLocStart());
+
+ Expr **Args = &Init;
+ unsigned NumArgs = 1;
+ if (CXXDirectInit) {
+ Args = CXXDirectInit->getExprs();
+ NumArgs = CXXDirectInit->getNumExprs();
+ }
+ InitializationSequence InitSeq(*this, Entity, Kind, Args, NumArgs);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(*this, Args,NumArgs),
+ &DclT);
+ if (Result.isInvalid()) {
+ VDecl->setInvalidDecl();
+ return;
+ }
+
+ Init = Result.takeAs<Expr>();
+ }
+
+ // If the type changed, it means we had an incomplete type that was
+ // completed by the initializer. For example:
+ // int ary[] = { 1, 3, 5 };
+ // "ary" transitions from an IncompleteArrayType to a ConstantArrayType.
+ if (!VDecl->isInvalidDecl() && (DclT != SavT))
+ VDecl->setType(DclT);
+
+ // Check any implicit conversions within the expression.
+ CheckImplicitConversions(Init, VDecl->getLocation());
+
+ if (!VDecl->isInvalidDecl())
+ checkUnsafeAssigns(VDecl->getLocation(), VDecl->getType(), Init);
+
+ Init = MaybeCreateExprWithCleanups(Init);
+ // Attach the initializer to the decl.
+ VDecl->setInit(Init);
+
+ if (VDecl->isLocalVarDecl()) {
+ // C99 6.7.8p4: All the expressions in an initializer for an object that has
+ // static storage duration shall be constant expressions or string literals.
+ // C++ does not have this restriction.
+ if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl() &&
+ VDecl->getStorageClass() == SC_Static)
+ CheckForConstantInitializer(Init, DclT);
+ } else if (VDecl->isStaticDataMember() &&
+ VDecl->getLexicalDeclContext()->isRecord()) {
+ // This is an in-class initialization for a static data member, e.g.,
+ //
+ // struct S {
+ // static const int value = 17;
+ // };
+
+ // C++ [class.mem]p4:
+ // A member-declarator can contain a constant-initializer only
+ // if it declares a static member (9.4) of const integral or
+ // const enumeration type, see 9.4.2.
+ //
+ // C++11 [class.static.data]p3:
+ // If a non-volatile const static data member is of integral or
+ // enumeration type, its declaration in the class definition can
+ // specify a brace-or-equal-initializer in which every initalizer-clause
+ // that is an assignment-expression is a constant expression. A static
+ // data member of literal type can be declared in the class definition
+ // with the constexpr specifier; if so, its declaration shall specify a
+ // brace-or-equal-initializer in which every initializer-clause that is
+ // an assignment-expression is a constant expression.
+
+ // Do nothing on dependent types.
+ if (DclT->isDependentType()) {
+
+ // Allow any 'static constexpr' members, whether or not they are of literal
+ // type. We separately check that every constexpr variable is of literal
+ // type.
+ } else if (VDecl->isConstexpr()) {
+
+ // Require constness.
+ } else if (!DclT.isConstQualified()) {
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_non_const)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+
+ // We allow integer constant expressions in all cases.
+ } else if (DclT->isIntegralOrEnumerationType()) {
+ // Check whether the expression is a constant expression.
+ SourceLocation Loc;
+ if (getLangOpts().CPlusPlus0x && DclT.isVolatileQualified())
+ // In C++11, a non-constexpr const static data member with an
+ // in-class initializer cannot be volatile.
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_volatile);
+ else if (Init->isValueDependent())
+ ; // Nothing to check.
+ else if (Init->isIntegerConstantExpr(Context, &Loc))
+ ; // Ok, it's an ICE!
+ else if (Init->isEvaluatable(Context)) {
+ // If we can constant fold the initializer through heroics, accept it,
+ // but report this as a use of an extension for -pedantic.
+ Diag(Loc, diag::ext_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ } else {
+ // Otherwise, this is some crazy unknown case. Report the issue at the
+ // location provided by the isIntegerConstantExpr failed check.
+ Diag(Loc, diag::err_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ }
+
+ // We allow foldable floating-point constants as an extension.
+ } else if (DclT->isFloatingType()) { // also permits complex, which is ok
+ Diag(VDecl->getLocation(), diag::ext_in_class_initializer_float_type)
+ << DclT << Init->getSourceRange();
+ if (getLangOpts().CPlusPlus0x)
+ Diag(VDecl->getLocation(),
+ diag::note_in_class_initializer_float_type_constexpr)
+ << FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
+
+ if (!Init->isValueDependent() && !Init->isEvaluatable(Context)) {
+ Diag(Init->getExprLoc(), diag::err_in_class_initializer_non_constant)
+ << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ }
+
+ // Suggest adding 'constexpr' in C++11 for literal types.
+ } else if (getLangOpts().CPlusPlus0x && DclT->isLiteralType()) {
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_literal_type)
+ << DclT << Init->getSourceRange()
+ << FixItHint::CreateInsertion(VDecl->getLocStart(), "constexpr ");
+ VDecl->setConstexpr(true);
+
+ } else {
+ Diag(VDecl->getLocation(), diag::err_in_class_initializer_bad_type)
+ << DclT << Init->getSourceRange();
+ VDecl->setInvalidDecl();
+ }
+ } else if (VDecl->isFileVarDecl()) {
+ if (VDecl->getStorageClassAsWritten() == SC_Extern &&
+ (!getLangOpts().CPlusPlus ||
+ !Context.getBaseElementType(VDecl->getType()).isConstQualified()))
+ Diag(VDecl->getLocation(), diag::warn_extern_init);
+
+ // C99 6.7.8p4. All file scoped initializers need to be constant.
+ if (!getLangOpts().CPlusPlus && !VDecl->isInvalidDecl())
+ CheckForConstantInitializer(Init, DclT);
+ }
+
+ // We will represent direct-initialization similarly to copy-initialization:
+ // int x(1); -as-> int x = 1;
+ // ClassType x(a,b,c); -as-> ClassType x = ClassType(a,b,c);
+ //
+ // Clients that want to distinguish between the two forms, can check for
+ // direct initializer using VarDecl::getInitStyle().
+ // A major benefit is that clients that don't particularly care about which
+ // exactly form was it (like the CodeGen) can handle both cases without
+ // special case code.
+
+ // C++ 8.5p11:
+ // The form of initialization (using parentheses or '=') is generally
+ // insignificant, but does matter when the entity being initialized has a
+ // class type.
+ if (CXXDirectInit) {
+ assert(DirectInit && "Call-style initializer must be direct init.");
+ VDecl->setInitStyle(VarDecl::CallInit);
+ } else if (DirectInit) {
+ // This must be list-initialization. No other way is direct-initialization.
+ VDecl->setInitStyle(VarDecl::ListInit);
+ }
+
+ CheckCompleteVariableDeclaration(VDecl);
+}
+
+/// ActOnInitializerError - Given that there was an error parsing an
+/// initializer for the given declaration, try to return to some form
+/// of sanity.
+void Sema::ActOnInitializerError(Decl *D) {
+ // Our main concern here is re-establishing invariants like "a
+ // variable's type is either dependent or complete".
+ if (!D || D->isInvalidDecl()) return;
+
+ VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (!VD) return;
+
+ // Auto types are meaningless if we can't make sense of the initializer.
+ if (ParsingInitForAutoVars.count(D)) {
+ D->setInvalidDecl();
+ return;
+ }
+
+ QualType Ty = VD->getType();
+ if (Ty->isDependentType()) return;
+
+ // Require a complete type.
+ if (RequireCompleteType(VD->getLocation(),
+ Context.getBaseElementType(Ty),
+ diag::err_typecheck_decl_incomplete_type)) {
+ VD->setInvalidDecl();
+ return;
+ }
+
+ // Require an abstract type.
+ if (RequireNonAbstractType(VD->getLocation(), Ty,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType)) {
+ VD->setInvalidDecl();
+ return;
+ }
+
+ // Don't bother complaining about constructors or destructors,
+ // though.
+}
+
+void Sema::ActOnUninitializedDecl(Decl *RealDecl,
+ bool TypeMayContainAuto) {
+ // If there is no declaration, there was an error parsing it. Just ignore it.
+ if (RealDecl == 0)
+ return;
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(RealDecl)) {
+ QualType Type = Var->getType();
+
+ // C++11 [dcl.spec.auto]p3
+ if (TypeMayContainAuto && Type->getContainedAutoType()) {
+ Diag(Var->getLocation(), diag::err_auto_var_requires_init)
+ << Var->getDeclName() << Type;
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // C++11 [class.static.data]p3: A static data member can be declared with
+ // the constexpr specifier; if so, its declaration shall specify
+ // a brace-or-equal-initializer.
+ // C++11 [dcl.constexpr]p1: The constexpr specifier shall be applied only to
+ // the definition of a variable [...] or the declaration of a static data
+ // member.
+ if (Var->isConstexpr() && !Var->isThisDeclarationADefinition()) {
+ if (Var->isStaticDataMember())
+ Diag(Var->getLocation(),
+ diag::err_constexpr_static_mem_var_requires_init)
+ << Var->getDeclName();
+ else
+ Diag(Var->getLocation(), diag::err_invalid_constexpr_var_decl);
+ Var->setInvalidDecl();
+ return;
+ }
+
+ switch (Var->isThisDeclarationADefinition()) {
+ case VarDecl::Definition:
+ if (!Var->isStaticDataMember() || !Var->getAnyInitializer())
+ break;
+
+ // We have an out-of-line definition of a static data member
+ // that has an in-class initializer, so we type-check this like
+ // a declaration.
+ //
+ // Fall through
+
+ case VarDecl::DeclarationOnly:
+ // It's only a declaration.
+
+ // Block scope. C99 6.7p7: If an identifier for an object is
+ // declared with no linkage (C99 6.2.2p6), the type for the
+ // object shall be complete.
+ if (!Type->isDependentType() && Var->isLocalVarDecl() &&
+ !Var->getLinkage() && !Var->isInvalidDecl() &&
+ RequireCompleteType(Var->getLocation(), Type,
+ diag::err_typecheck_decl_incomplete_type))
+ Var->setInvalidDecl();
+
+ // Make sure that the type is not abstract.
+ if (!Type->isDependentType() && !Var->isInvalidDecl() &&
+ RequireNonAbstractType(Var->getLocation(), Type,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ Var->setInvalidDecl();
+ return;
+
+ case VarDecl::TentativeDefinition:
+ // File scope. C99 6.9.2p2: A declaration of an identifier for an
+ // object that has file scope without an initializer, and without a
+ // storage-class specifier or with the storage-class specifier "static",
+ // constitutes a tentative definition. Note: A tentative definition with
+ // external linkage is valid (C99 6.2.2p5).
+ if (!Var->isInvalidDecl()) {
+ if (const IncompleteArrayType *ArrayT
+ = Context.getAsIncompleteArrayType(Type)) {
+ if (RequireCompleteType(Var->getLocation(),
+ ArrayT->getElementType(),
+ diag::err_illegal_decl_array_incomplete_type))
+ Var->setInvalidDecl();
+ } else if (Var->getStorageClass() == SC_Static) {
+ // C99 6.9.2p3: If the declaration of an identifier for an object is
+ // a tentative definition and has internal linkage (C99 6.2.2p3), the
+ // declared type shall not be an incomplete type.
+ // NOTE: code such as the following
+ // static struct s;
+ // struct s { int a; };
+ // is accepted by gcc. Hence here we issue a warning instead of
+ // an error and we do not invalidate the static declaration.
+ // NOTE: to avoid multiple warnings, only check the first declaration.
+ if (Var->getPreviousDecl() == 0)
+ RequireCompleteType(Var->getLocation(), Type,
+ diag::ext_typecheck_decl_incomplete_type);
+ }
+ }
+
+ // Record the tentative definition; we're done.
+ if (!Var->isInvalidDecl())
+ TentativeDefinitions.push_back(Var);
+ return;
+ }
+
+ // Provide a specific diagnostic for uninitialized variable
+ // definitions with incomplete array type.
+ if (Type->isIncompleteArrayType()) {
+ Diag(Var->getLocation(),
+ diag::err_typecheck_incomplete_array_needs_initializer);
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // Provide a specific diagnostic for uninitialized variable
+ // definitions with reference type.
+ if (Type->isReferenceType()) {
+ Diag(Var->getLocation(), diag::err_reference_var_requires_init)
+ << Var->getDeclName()
+ << SourceRange(Var->getLocation(), Var->getLocation());
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // Do not attempt to type-check the default initializer for a
+ // variable with dependent type.
+ if (Type->isDependentType())
+ return;
+
+ if (Var->isInvalidDecl())
+ return;
+
+ if (RequireCompleteType(Var->getLocation(),
+ Context.getBaseElementType(Type),
+ diag::err_typecheck_decl_incomplete_type)) {
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // The variable can not have an abstract class type.
+ if (RequireNonAbstractType(Var->getLocation(), Type,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType)) {
+ Var->setInvalidDecl();
+ return;
+ }
+
+ // Check for jumps past the implicit initializer. C++0x
+ // clarifies that this applies to a "variable with automatic
+ // storage duration", not a "local variable".
+ // C++11 [stmt.dcl]p3
+ // A program that jumps from a point where a variable with automatic
+ // storage duration is not in scope to a point where it is in scope is
+ // ill-formed unless the variable has scalar type, class type with a
+ // trivial default constructor and a trivial destructor, a cv-qualified
+ // version of one of these types, or an array of one of the preceding
+ // types and is declared without an initializer.
+ if (getLangOpts().CPlusPlus && Var->hasLocalStorage()) {
+ if (const RecordType *Record
+ = Context.getBaseElementType(Type)->getAs<RecordType>()) {
+ CXXRecordDecl *CXXRecord = cast<CXXRecordDecl>(Record->getDecl());
+ // Mark the function for further checking even if the looser rules of
+ // C++11 do not require such checks, so that we can diagnose
+ // incompatibilities with C++98.
+ if (!CXXRecord->isPOD())
+ getCurFunction()->setHasBranchProtectedScope();
+ }
+ }
+
+ // C++03 [dcl.init]p9:
+ // If no initializer is specified for an object, and the
+ // object is of (possibly cv-qualified) non-POD class type (or
+ // array thereof), the object shall be default-initialized; if
+ // the object is of const-qualified type, the underlying class
+ // type shall have a user-declared default
+ // constructor. Otherwise, if no initializer is specified for
+ // a non- static object, the object and its subobjects, if
+ // any, have an indeterminate initial value); if the object
+ // or any of its subobjects are of const-qualified type, the
+ // program is ill-formed.
+ // C++0x [dcl.init]p11:
+ // If no initializer is specified for an object, the object is
+ // default-initialized; [...].
+ InitializedEntity Entity = InitializedEntity::InitializeVariable(Var);
+ InitializationKind Kind
+ = InitializationKind::CreateDefault(Var->getLocation());
+
+ InitializationSequence InitSeq(*this, Entity, Kind, 0, 0);
+ ExprResult Init = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(*this, 0, 0));
+ if (Init.isInvalid())
+ Var->setInvalidDecl();
+ else if (Init.get()) {
+ Var->setInit(MaybeCreateExprWithCleanups(Init.get()));
+ // This is important for template substitution.
+ Var->setInitStyle(VarDecl::CallInit);
+ }
+
+ CheckCompleteVariableDeclaration(Var);
+ }
+}
+
+void Sema::ActOnCXXForRangeDecl(Decl *D) {
+ VarDecl *VD = dyn_cast<VarDecl>(D);
+ if (!VD) {
+ Diag(D->getLocation(), diag::err_for_range_decl_must_be_var);
+ D->setInvalidDecl();
+ return;
+ }
+
+ VD->setCXXForRangeDecl(true);
+
+ // for-range-declaration cannot be given a storage class specifier.
+ int Error = -1;
+ switch (VD->getStorageClassAsWritten()) {
+ case SC_None:
+ break;
+ case SC_Extern:
+ Error = 0;
+ break;
+ case SC_Static:
+ Error = 1;
+ break;
+ case SC_PrivateExtern:
+ Error = 2;
+ break;
+ case SC_Auto:
+ Error = 3;
+ break;
+ case SC_Register:
+ Error = 4;
+ break;
+ case SC_OpenCLWorkGroupLocal:
+ llvm_unreachable("Unexpected storage class");
+ }
+ if (VD->isConstexpr())
+ Error = 5;
+ if (Error != -1) {
+ Diag(VD->getOuterLocStart(), diag::err_for_range_storage_class)
+ << VD->getDeclName() << Error;
+ D->setInvalidDecl();
+ }
+}
+
+void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
+ if (var->isInvalidDecl()) return;
+
+ // In ARC, don't allow jumps past the implicit initialization of a
+ // local retaining variable.
+ if (getLangOpts().ObjCAutoRefCount &&
+ var->hasLocalStorage()) {
+ switch (var->getType().getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ getCurFunction()->setHasBranchProtectedScope();
+ break;
+ }
+ }
+
+ // All the following checks are C++ only.
+ if (!getLangOpts().CPlusPlus) return;
+
+ QualType baseType = Context.getBaseElementType(var->getType());
+ if (baseType->isDependentType()) return;
+
+ // __block variables might require us to capture a copy-initializer.
+ if (var->hasAttr<BlocksAttr>()) {
+ // It's currently invalid to ever have a __block variable with an
+ // array type; should we diagnose that here?
+
+ // Regardless, we don't want to ignore array nesting when
+ // constructing this copy.
+ QualType type = var->getType();
+
+ if (type->isStructureOrClassType()) {
+ SourceLocation poi = var->getLocation();
+ Expr *varRef =new (Context) DeclRefExpr(var, false, type, VK_LValue, poi);
+ ExprResult result =
+ PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(poi, type, false),
+ poi, Owned(varRef));
+ if (!result.isInvalid()) {
+ result = MaybeCreateExprWithCleanups(result);
+ Expr *init = result.takeAs<Expr>();
+ Context.setBlockVarCopyInits(var, init);
+ }
+ }
+ }
+
+ Expr *Init = var->getInit();
+ bool IsGlobal = var->hasGlobalStorage() && !var->isStaticLocal();
+
+ if (!var->getDeclContext()->isDependentContext() && Init) {
+ if (IsGlobal && !var->isConstexpr() &&
+ getDiagnostics().getDiagnosticLevel(diag::warn_global_constructor,
+ var->getLocation())
+ != DiagnosticsEngine::Ignored &&
+ !Init->isConstantInitializer(Context, baseType->isReferenceType()))
+ Diag(var->getLocation(), diag::warn_global_constructor)
+ << Init->getSourceRange();
+
+ if (var->isConstexpr()) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ if (!var->evaluateValue(Notes) || !var->isInitICE()) {
+ SourceLocation DiagLoc = var->getLocation();
+ // If the note doesn't add any useful information other than a source
+ // location, fold it into the primary diagnostic.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+ Diag(DiagLoc, diag::err_constexpr_var_requires_const_init)
+ << var << Init->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ Diag(Notes[I].first, Notes[I].second);
+ }
+ } else if (var->isUsableInConstantExpressions(Context)) {
+ // Check whether the initializer of a const variable of integral or
+ // enumeration type is an ICE now, since we can't tell whether it was
+ // initialized by a constant expression if we check later.
+ var->checkInitIsICE();
+ }
+ }
+
+ // Require the destructor.
+ if (const RecordType *recordType = baseType->getAs<RecordType>())
+ FinalizeVarWithDestructor(var, recordType);
+}
+
+/// FinalizeDeclaration - called by ParseDeclarationAfterDeclarator to perform
+/// any semantic actions necessary after any initializer has been attached.
+void
+Sema::FinalizeDeclaration(Decl *ThisDecl) {
+ // Note that we are no longer parsing the initializer for this declaration.
+ ParsingInitForAutoVars.erase(ThisDecl);
+}
+
+Sema::DeclGroupPtrTy
+Sema::FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
+ Decl **Group, unsigned NumDecls) {
+ SmallVector<Decl*, 8> Decls;
+
+ if (DS.isTypeSpecOwned())
+ Decls.push_back(DS.getRepAsDecl());
+
+ for (unsigned i = 0; i != NumDecls; ++i)
+ if (Decl *D = Group[i])
+ Decls.push_back(D);
+
+ return BuildDeclaratorGroup(Decls.data(), Decls.size(),
+ DS.getTypeSpecType() == DeclSpec::TST_auto);
+}
+
+/// BuildDeclaratorGroup - convert a list of declarations into a declaration
+/// group, performing any necessary semantic checking.
+Sema::DeclGroupPtrTy
+Sema::BuildDeclaratorGroup(Decl **Group, unsigned NumDecls,
+ bool TypeMayContainAuto) {
+ // C++0x [dcl.spec.auto]p7:
+ // If the type deduced for the template parameter U is not the same in each
+ // deduction, the program is ill-formed.
+ // FIXME: When initializer-list support is added, a distinction is needed
+ // between the deduced type U and the deduced type which 'auto' stands for.
+ // auto a = 0, b = { 1, 2, 3 };
+ // is legal because the deduced type U is 'int' in both cases.
+ if (TypeMayContainAuto && NumDecls > 1) {
+ QualType Deduced;
+ CanQualType DeducedCanon;
+ VarDecl *DeducedDecl = 0;
+ for (unsigned i = 0; i != NumDecls; ++i) {
+ if (VarDecl *D = dyn_cast<VarDecl>(Group[i])) {
+ AutoType *AT = D->getType()->getContainedAutoType();
+ // Don't reissue diagnostics when instantiating a template.
+ if (AT && D->isInvalidDecl())
+ break;
+ if (AT && AT->isDeduced()) {
+ QualType U = AT->getDeducedType();
+ CanQualType UCanon = Context.getCanonicalType(U);
+ if (Deduced.isNull()) {
+ Deduced = U;
+ DeducedCanon = UCanon;
+ DeducedDecl = D;
+ } else if (DeducedCanon != UCanon) {
+ Diag(D->getTypeSourceInfo()->getTypeLoc().getBeginLoc(),
+ diag::err_auto_different_deductions)
+ << Deduced << DeducedDecl->getDeclName()
+ << U << D->getDeclName()
+ << DeducedDecl->getInit()->getSourceRange()
+ << D->getInit()->getSourceRange();
+ D->setInvalidDecl();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ return DeclGroupPtrTy::make(DeclGroupRef::Create(Context, Group, NumDecls));
+}
+
+
+/// ActOnParamDeclarator - Called from Parser::ParseFunctionDeclarator()
+/// to introduce parameters into function prototype scope.
+Decl *Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
+ const DeclSpec &DS = D.getDeclSpec();
+
+ // Verify C99 6.7.5.3p2: The only SCS allowed is 'register'.
+ // C++03 [dcl.stc]p2 also permits 'auto'.
+ VarDecl::StorageClass StorageClass = SC_None;
+ VarDecl::StorageClass StorageClassAsWritten = SC_None;
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
+ StorageClass = SC_Register;
+ StorageClassAsWritten = SC_Register;
+ } else if (getLangOpts().CPlusPlus &&
+ DS.getStorageClassSpec() == DeclSpec::SCS_auto) {
+ StorageClass = SC_Auto;
+ StorageClassAsWritten = SC_Auto;
+ } else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) {
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_invalid_storage_class_in_func_decl);
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+ if (D.getDeclSpec().isConstexprSpecified())
+ Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
+ << 0;
+
+ DiagnoseFunctionSpecifiers(D);
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType parmDeclType = TInfo->getType();
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments inside the type of this
+ // parameter.
+ CheckExtraCXXDefaultArguments(D);
+
+ // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_param_declarator)
+ << D.getCXXScopeSpec().getRange();
+ D.getCXXScopeSpec().clear();
+ }
+ }
+
+ // Ensure we have a valid name
+ IdentifierInfo *II = 0;
+ if (D.hasName()) {
+ II = D.getIdentifier();
+ if (!II) {
+ Diag(D.getIdentifierLoc(), diag::err_bad_parameter_name)
+ << GetNameForDeclarator(D).getName().getAsString();
+ D.setInvalidType(true);
+ }
+ }
+
+ // Check for redeclaration of parameters, e.g. int foo(int x, int x);
+ if (II) {
+ LookupResult R(*this, II, D.getIdentifierLoc(), LookupOrdinaryName,
+ ForRedeclaration);
+ LookupName(R, S);
+ if (R.isSingleResult()) {
+ NamedDecl *PrevDecl = R.getFoundDecl();
+ if (PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ } else if (S->isDeclScope(PrevDecl)) {
+ Diag(D.getIdentifierLoc(), diag::err_param_redefinition) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+
+ // Recover by removing the name
+ II = 0;
+ D.SetIdentifier(0, D.getIdentifierLoc());
+ D.setInvalidType(true);
+ }
+ }
+ }
+
+ // Temporarily put parameter variables in the translation unit, not
+ // the enclosing context. This prevents them from accidentally
+ // looking like class members in C++.
+ ParmVarDecl *New = CheckParameter(Context.getTranslationUnitDecl(),
+ D.getLocStart(),
+ D.getIdentifierLoc(), II,
+ parmDeclType, TInfo,
+ StorageClass, StorageClassAsWritten);
+
+ if (D.isInvalidType())
+ New->setInvalidDecl();
+
+ assert(S->isFunctionPrototypeScope());
+ assert(S->getFunctionPrototypeDepth() >= 1);
+ New->setScopeInfo(S->getFunctionPrototypeDepth() - 1,
+ S->getNextFunctionPrototypeIndex());
+
+ // Add the parameter declaration into this scope.
+ S->AddDecl(New);
+ if (II)
+ IdResolver.AddDecl(New);
+
+ ProcessDeclAttributes(S, New, D);
+
+ if (D.getDeclSpec().isModulePrivateSpecified())
+ Diag(New->getLocation(), diag::err_module_private_local)
+ << 1 << New->getDeclName()
+ << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+
+ if (New->hasAttr<BlocksAttr>()) {
+ Diag(New->getLocation(), diag::err_block_on_nonlocal);
+ }
+ return New;
+}
+
+/// \brief Synthesizes a variable for a parameter arising from a
+/// typedef.
+ParmVarDecl *Sema::BuildParmVarDeclForTypedef(DeclContext *DC,
+ SourceLocation Loc,
+ QualType T) {
+ /* FIXME: setting StartLoc == Loc.
+ Would it be worth to modify callers so as to provide proper source
+ location for the unnamed parameters, embedding the parameter's type? */
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, DC, Loc, Loc, 0,
+ T, Context.getTrivialTypeSourceInfo(T, Loc),
+ SC_None, SC_None, 0);
+ Param->setImplicit();
+ return Param;
+}
+
+void Sema::DiagnoseUnusedParameters(ParmVarDecl * const *Param,
+ ParmVarDecl * const *ParamEnd) {
+ // Don't diagnose unused-parameter errors in template instantiations; we
+ // will already have done so in the template itself.
+ if (!ActiveTemplateInstantiations.empty())
+ return;
+
+ for (; Param != ParamEnd; ++Param) {
+ if (!(*Param)->isReferenced() && (*Param)->getDeclName() &&
+ !(*Param)->hasAttr<UnusedAttr>()) {
+ Diag((*Param)->getLocation(), diag::warn_unused_parameter)
+ << (*Param)->getDeclName();
+ }
+ }
+}
+
+void Sema::DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Param,
+ ParmVarDecl * const *ParamEnd,
+ QualType ReturnTy,
+ NamedDecl *D) {
+ if (LangOpts.NumLargeByValueCopy == 0) // No check.
+ return;
+
+ // Warn if the return value is pass-by-value and larger than the specified
+ // threshold.
+ if (!ReturnTy->isDependentType() && ReturnTy.isPODType(Context)) {
+ unsigned Size = Context.getTypeSizeInChars(ReturnTy).getQuantity();
+ if (Size > LangOpts.NumLargeByValueCopy)
+ Diag(D->getLocation(), diag::warn_return_value_size)
+ << D->getDeclName() << Size;
+ }
+
+ // Warn if any parameter is pass-by-value and larger than the specified
+ // threshold.
+ for (; Param != ParamEnd; ++Param) {
+ QualType T = (*Param)->getType();
+ if (T->isDependentType() || !T.isPODType(Context))
+ continue;
+ unsigned Size = Context.getTypeSizeInChars(T).getQuantity();
+ if (Size > LangOpts.NumLargeByValueCopy)
+ Diag((*Param)->getLocation(), diag::warn_parameter_size)
+ << (*Param)->getDeclName() << Size;
+ }
+}
+
+ParmVarDecl *Sema::CheckParameter(DeclContext *DC, SourceLocation StartLoc,
+ SourceLocation NameLoc, IdentifierInfo *Name,
+ QualType T, TypeSourceInfo *TSInfo,
+ VarDecl::StorageClass StorageClass,
+ VarDecl::StorageClass StorageClassAsWritten) {
+ // In ARC, infer a lifetime qualifier for appropriate parameter types.
+ if (getLangOpts().ObjCAutoRefCount &&
+ T.getObjCLifetime() == Qualifiers::OCL_None &&
+ T->isObjCLifetimeType()) {
+
+ Qualifiers::ObjCLifetime lifetime;
+
+ // Special cases for arrays:
+ // - if it's const, use __unsafe_unretained
+ // - otherwise, it's an error
+ if (T->isArrayType()) {
+ if (!T.isConstQualified()) {
+ DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(
+ NameLoc, diag::err_arc_array_param_no_ownership, T, false));
+ }
+ lifetime = Qualifiers::OCL_ExplicitNone;
+ } else {
+ lifetime = T->getObjCARCImplicitLifetime();
+ }
+ T = Context.getLifetimeQualifiedType(T, lifetime);
+ }
+
+ ParmVarDecl *New = ParmVarDecl::Create(Context, DC, StartLoc, NameLoc, Name,
+ Context.getAdjustedParameterType(T),
+ TSInfo,
+ StorageClass, StorageClassAsWritten,
+ 0);
+
+ // Parameters can not be abstract class types.
+ // For record types, this is done by the AbstractClassUsageDiagnoser once
+ // the class has been completely parsed.
+ if (!CurContext->isRecord() &&
+ RequireNonAbstractType(NameLoc, T, diag::err_abstract_type_in_decl,
+ AbstractParamType))
+ New->setInvalidDecl();
+
+ // Parameter declarators cannot be interface types. All ObjC objects are
+ // passed by reference.
+ if (T->isObjCObjectType()) {
+ Diag(NameLoc,
+ diag::err_object_cannot_be_passed_returned_by_value) << 1 << T
+ << FixItHint::CreateInsertion(NameLoc, "*");
+ T = Context.getObjCObjectPointerType(T);
+ New->setType(T);
+ }
+
+ // ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage
+ // duration shall not be qualified by an address-space qualifier."
+ // Since all parameters have automatic store duration, they can not have
+ // an address space.
+ if (T.getAddressSpace() != 0) {
+ Diag(NameLoc, diag::err_arg_with_address_space);
+ New->setInvalidDecl();
+ }
+
+ return New;
+}
+
+void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
+ SourceLocation LocAfterDecls) {
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ // Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
+ // for a K&R function.
+ if (!FTI.hasPrototype) {
+ for (int i = FTI.NumArgs; i != 0; /* decrement in loop */) {
+ --i;
+ if (FTI.ArgInfo[i].Param == 0) {
+ SmallString<256> Code;
+ llvm::raw_svector_ostream(Code) << " int "
+ << FTI.ArgInfo[i].Ident->getName()
+ << ";\n";
+ Diag(FTI.ArgInfo[i].IdentLoc, diag::ext_param_not_declared)
+ << FTI.ArgInfo[i].Ident
+ << FixItHint::CreateInsertion(LocAfterDecls, Code.str());
+
+ // Implicitly declare the argument as type 'int' for lack of a better
+ // type.
+ AttributeFactory attrs;
+ DeclSpec DS(attrs);
+ const char* PrevSpec; // unused
+ unsigned DiagID; // unused
+ DS.SetTypeSpecType(DeclSpec::TST_int, FTI.ArgInfo[i].IdentLoc,
+ PrevSpec, DiagID);
+ Declarator ParamD(DS, Declarator::KNRTypeListContext);
+ ParamD.SetIdentifier(FTI.ArgInfo[i].Ident, FTI.ArgInfo[i].IdentLoc);
+ FTI.ArgInfo[i].Param = ActOnParamDeclarator(S, ParamD);
+ }
+ }
+ }
+}
+
+Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope,
+ Declarator &D) {
+ assert(getCurFunctionDecl() == 0 && "Function parsing confused");
+ assert(D.isFunctionDeclarator() && "Not a function declarator!");
+ Scope *ParentScope = FnBodyScope->getParent();
+
+ D.setFunctionDefinitionKind(FDK_Definition);
+ Decl *DP = HandleDeclarator(ParentScope, D,
+ MultiTemplateParamsArg(*this));
+ return ActOnStartOfFunctionDef(FnBodyScope, DP);
+}
+
+static bool ShouldWarnAboutMissingPrototype(const FunctionDecl *FD) {
+ // Don't warn about invalid declarations.
+ if (FD->isInvalidDecl())
+ return false;
+
+ // Or declarations that aren't global.
+ if (!FD->isGlobal())
+ return false;
+
+ // Don't warn about C++ member functions.
+ if (isa<CXXMethodDecl>(FD))
+ return false;
+
+ // Don't warn about 'main'.
+ if (FD->isMain())
+ return false;
+
+ // Don't warn about inline functions.
+ if (FD->isInlined())
+ return false;
+
+ // Don't warn about function templates.
+ if (FD->getDescribedFunctionTemplate())
+ return false;
+
+ // Don't warn about function template specializations.
+ if (FD->isFunctionTemplateSpecialization())
+ return false;
+
+ bool MissingPrototype = true;
+ for (const FunctionDecl *Prev = FD->getPreviousDecl();
+ Prev; Prev = Prev->getPreviousDecl()) {
+ // Ignore any declarations that occur in function or method
+ // scope, because they aren't visible from the header.
+ if (Prev->getDeclContext()->isFunctionOrMethod())
+ continue;
+
+ MissingPrototype = !Prev->getType()->isFunctionProtoType();
+ break;
+ }
+
+ return MissingPrototype;
+}
+
+void Sema::CheckForFunctionRedefinition(FunctionDecl *FD) {
+ // Don't complain if we're in GNU89 mode and the previous definition
+ // was an extern inline function.
+ const FunctionDecl *Definition;
+ if (FD->isDefined(Definition) &&
+ !canRedefineFunction(Definition, getLangOpts())) {
+ if (getLangOpts().GNUMode && Definition->isInlineSpecified() &&
+ Definition->getStorageClass() == SC_Extern)
+ Diag(FD->getLocation(), diag::err_redefinition_extern_inline)
+ << FD->getDeclName() << getLangOpts().CPlusPlus;
+ else
+ Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
+ Diag(Definition->getLocation(), diag::note_previous_definition);
+ }
+}
+
+Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D) {
+ // Clear the last template instantiation error context.
+ LastTemplateInstantiationErrorContext = ActiveTemplateInstantiation();
+
+ if (!D)
+ return D;
+ FunctionDecl *FD = 0;
+
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D))
+ FD = FunTmpl->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(D);
+
+ // Enter a new function scope
+ PushFunctionScope();
+
+ // See if this is a redefinition.
+ if (!FD->isLateTemplateParsed())
+ CheckForFunctionRedefinition(FD);
+
+ // Builtin functions cannot be defined.
+ if (unsigned BuiltinID = FD->getBuiltinID()) {
+ if (!Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID)) {
+ Diag(FD->getLocation(), diag::err_builtin_definition) << FD;
+ FD->setInvalidDecl();
+ }
+ }
+
+ // The return type of a function definition must be complete
+ // (C99 6.9.1p3, C++ [dcl.fct]p6).
+ QualType ResultType = FD->getResultType();
+ if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
+ !FD->isInvalidDecl() &&
+ RequireCompleteType(FD->getLocation(), ResultType,
+ diag::err_func_def_incomplete_result))
+ FD->setInvalidDecl();
+
+ // GNU warning -Wmissing-prototypes:
+ // Warn if a global function is defined without a previous
+ // prototype declaration. This warning is issued even if the
+ // definition itself provides a prototype. The aim is to detect
+ // global functions that fail to be declared in header files.
+ if (ShouldWarnAboutMissingPrototype(FD))
+ Diag(FD->getLocation(), diag::warn_missing_prototype) << FD;
+
+ if (FnBodyScope)
+ PushDeclContext(FnBodyScope, FD);
+
+ // Check the validity of our function parameters
+ CheckParmsForFunctionDef(FD->param_begin(), FD->param_end(),
+ /*CheckParameterNames=*/true);
+
+ // Introduce our parameters into the function scope
+ for (unsigned p = 0, NumParams = FD->getNumParams(); p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ Param->setOwningFunction(FD);
+
+ // If this has an identifier, add it to the scope stack.
+ if (Param->getIdentifier() && FnBodyScope) {
+ CheckShadow(FnBodyScope, Param);
+
+ PushOnScopeChains(Param, FnBodyScope);
+ }
+ }
+
+ // If we had any tags defined in the function prototype,
+ // introduce them into the function scope.
+ if (FnBodyScope) {
+ for (llvm::ArrayRef<NamedDecl*>::iterator I = FD->getDeclsInPrototypeScope().begin(),
+ E = FD->getDeclsInPrototypeScope().end(); I != E; ++I) {
+ NamedDecl *D = *I;
+
+ // Some of these decls (like enums) may have been pinned to the translation unit
+ // for lack of a real context earlier. If so, remove from the translation unit
+ // and reattach to the current context.
+ if (D->getLexicalDeclContext() == Context.getTranslationUnitDecl()) {
+ // Is the decl actually in the context?
+ for (DeclContext::decl_iterator DI = Context.getTranslationUnitDecl()->decls_begin(),
+ DE = Context.getTranslationUnitDecl()->decls_end(); DI != DE; ++DI) {
+ if (*DI == D) {
+ Context.getTranslationUnitDecl()->removeDecl(D);
+ break;
+ }
+ }
+ // Either way, reassign the lexical decl context to our FunctionDecl.
+ D->setLexicalDeclContext(CurContext);
+ }
+
+ // If the decl has a non-null name, make accessible in the current scope.
+ if (!D->getName().empty())
+ PushOnScopeChains(D, FnBodyScope, /*AddToContext=*/false);
+
+ // Similarly, dive into enums and fish their constants out, making them
+ // accessible in this scope.
+ if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ for (EnumDecl::enumerator_iterator EI = ED->enumerator_begin(),
+ EE = ED->enumerator_end(); EI != EE; ++EI)
+ PushOnScopeChains(*EI, FnBodyScope, /*AddToContext=*/false);
+ }
+ }
+ }
+
+ // Checking attributes of current function definition
+ // dllimport attribute.
+ DLLImportAttr *DA = FD->getAttr<DLLImportAttr>();
+ if (DA && (!FD->getAttr<DLLExportAttr>())) {
+ // dllimport attribute cannot be directly applied to definition.
+ // Microsoft accepts dllimport for functions defined within class scope.
+ if (!DA->isInherited() &&
+ !(LangOpts.MicrosoftExt && FD->getLexicalDeclContext()->isRecord())) {
+ Diag(FD->getLocation(),
+ diag::err_attribute_can_be_applied_only_to_symbol_declaration)
+ << "dllimport";
+ FD->setInvalidDecl();
+ return FD;
+ }
+
+ // Visual C++ appears to not think this is an issue, so only issue
+ // a warning when Microsoft extensions are disabled.
+ if (!LangOpts.MicrosoftExt) {
+ // If a symbol previously declared dllimport is later defined, the
+ // attribute is ignored in subsequent references, and a warning is
+ // emitted.
+ Diag(FD->getLocation(),
+ diag::warn_redeclaration_without_attribute_prev_attribute_ignored)
+ << FD->getName() << "dllimport";
+ }
+ }
+ return FD;
+}
+
+/// \brief Given the set of return statements within a function body,
+/// compute the variables that are subject to the named return value
+/// optimization.
+///
+/// Each of the variables that is subject to the named return value
+/// optimization will be marked as NRVO variables in the AST, and any
+/// return statement that has a marked NRVO variable as its NRVO candidate can
+/// use the named return value optimization.
+///
+/// This function applies a very simplistic algorithm for NRVO: if every return
+/// statement in the function has the same NRVO candidate, that candidate is
+/// the NRVO variable.
+///
+/// FIXME: Employ a smarter algorithm that accounts for multiple return
+/// statements and the lifetimes of the NRVO candidates. We should be able to
+/// find a maximal set of NRVO variables.
+void Sema::computeNRVO(Stmt *Body, FunctionScopeInfo *Scope) {
+ ReturnStmt **Returns = Scope->Returns.data();
+
+ const VarDecl *NRVOCandidate = 0;
+ for (unsigned I = 0, E = Scope->Returns.size(); I != E; ++I) {
+ if (!Returns[I]->getNRVOCandidate())
+ return;
+
+ if (!NRVOCandidate)
+ NRVOCandidate = Returns[I]->getNRVOCandidate();
+ else if (NRVOCandidate != Returns[I]->getNRVOCandidate())
+ return;
+ }
+
+ if (NRVOCandidate)
+ const_cast<VarDecl*>(NRVOCandidate)->setNRVOVariable(true);
+}
+
+Decl *Sema::ActOnFinishFunctionBody(Decl *D, Stmt *BodyArg) {
+ return ActOnFinishFunctionBody(D, move(BodyArg), false);
+}
+
+Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
+ bool IsInstantiation) {
+ FunctionDecl *FD = 0;
+ FunctionTemplateDecl *FunTmpl = dyn_cast_or_null<FunctionTemplateDecl>(dcl);
+ if (FunTmpl)
+ FD = FunTmpl->getTemplatedDecl();
+ else
+ FD = dyn_cast_or_null<FunctionDecl>(dcl);
+
+ sema::AnalysisBasedWarnings::Policy WP = AnalysisWarnings.getDefaultPolicy();
+ sema::AnalysisBasedWarnings::Policy *ActivePolicy = 0;
+
+ if (FD) {
+ FD->setBody(Body);
+
+ // If the function implicitly returns zero (like 'main') or is naked,
+ // don't complain about missing return statements.
+ if (FD->hasImplicitReturnZero() || FD->hasAttr<NakedAttr>())
+ WP.disableCheckFallThrough();
+
+ // MSVC permits the use of pure specifier (=0) on function definition,
+ // defined at class scope, warn about this non standard construct.
+ if (getLangOpts().MicrosoftExt && FD->isPure())
+ Diag(FD->getLocation(), diag::warn_pure_function_definition);
+
+ if (!FD->isInvalidDecl()) {
+ DiagnoseUnusedParameters(FD->param_begin(), FD->param_end());
+ DiagnoseSizeOfParametersAndReturnValue(FD->param_begin(), FD->param_end(),
+ FD->getResultType(), FD);
+
+ // If this is a constructor, we need a vtable.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(FD))
+ MarkVTableUsed(FD->getLocation(), Constructor->getParent());
+
+ computeNRVO(Body, getCurFunction());
+ }
+
+ assert((FD == getCurFunctionDecl() || getCurLambda()->CallOperator == FD) &&
+ "Function parsing confused");
+ } else if (ObjCMethodDecl *MD = dyn_cast_or_null<ObjCMethodDecl>(dcl)) {
+ assert(MD == getCurMethodDecl() && "Method parsing confused");
+ MD->setBody(Body);
+ if (Body)
+ MD->setEndLoc(Body->getLocEnd());
+ if (!MD->isInvalidDecl()) {
+ DiagnoseUnusedParameters(MD->param_begin(), MD->param_end());
+ DiagnoseSizeOfParametersAndReturnValue(MD->param_begin(), MD->param_end(),
+ MD->getResultType(), MD);
+
+ if (Body)
+ computeNRVO(Body, getCurFunction());
+ }
+ if (ObjCShouldCallSuperDealloc) {
+ Diag(MD->getLocEnd(), diag::warn_objc_missing_super_dealloc);
+ ObjCShouldCallSuperDealloc = false;
+ }
+ if (ObjCShouldCallSuperFinalize) {
+ Diag(MD->getLocEnd(), diag::warn_objc_missing_super_finalize);
+ ObjCShouldCallSuperFinalize = false;
+ }
+ } else {
+ return 0;
+ }
+
+ assert(!ObjCShouldCallSuperDealloc && "This should only be set for "
+ "ObjC methods, which should have been handled in the block above.");
+ assert(!ObjCShouldCallSuperFinalize && "This should only be set for "
+ "ObjC methods, which should have been handled in the block above.");
+
+ // Verify and clean out per-function state.
+ if (Body) {
+ // C++ constructors that have function-try-blocks can't have return
+ // statements in the handlers of that block. (C++ [except.handle]p14)
+ // Verify this.
+ if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body))
+ DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
+
+ // Verify that gotos and switch cases don't jump into scopes illegally.
+ if (getCurFunction()->NeedsScopeChecking() &&
+ !dcl->isInvalidDecl() &&
+ !hasAnyUnrecoverableErrorsInThisFunction())
+ DiagnoseInvalidJumps(Body);
+
+ if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl)) {
+ if (!Destructor->getParent()->isDependentType())
+ CheckDestructor(Destructor);
+
+ MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
+ Destructor->getParent());
+ }
+
+ // If any errors have occurred, clear out any temporaries that may have
+ // been leftover. This ensures that these temporaries won't be picked up for
+ // deletion in some later function.
+ if (PP.getDiagnostics().hasErrorOccurred() ||
+ PP.getDiagnostics().getSuppressAllDiagnostics()) {
+ DiscardCleanupsInEvaluationContext();
+ } else if (!isa<FunctionTemplateDecl>(dcl)) {
+ // Since the body is valid, issue any analysis-based warnings that are
+ // enabled.
+ ActivePolicy = &WP;
+ }
+
+ if (!IsInstantiation && FD && FD->isConstexpr() && !FD->isInvalidDecl() &&
+ (!CheckConstexprFunctionDecl(FD) ||
+ !CheckConstexprFunctionBody(FD, Body)))
+ FD->setInvalidDecl();
+
+ assert(ExprCleanupObjects.empty() && "Leftover temporaries in function");
+ assert(!ExprNeedsCleanups && "Unaccounted cleanups in function");
+ assert(MaybeODRUseExprs.empty() &&
+ "Leftover expressions for odr-use checking");
+ }
+
+ if (!IsInstantiation)
+ PopDeclContext();
+
+ PopFunctionScopeInfo(ActivePolicy, dcl);
+
+ // If any errors have occurred, clear out any temporaries that may have
+ // been leftover. This ensures that these temporaries won't be picked up for
+ // deletion in some later function.
+ if (getDiagnostics().hasErrorOccurred()) {
+ DiscardCleanupsInEvaluationContext();
+ }
+
+ return dcl;
+}
+
+
+/// When we finish delayed parsing of an attribute, we must attach it to the
+/// relevant Decl.
+void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
+ ParsedAttributes &Attrs) {
+ // Always attach attributes to the underlying decl.
+ if (TemplateDecl *TD = dyn_cast<TemplateDecl>(D))
+ D = TD->getTemplatedDecl();
+ ProcessDeclAttributeList(S, D, Attrs.getList());
+}
+
+
+/// ImplicitlyDefineFunction - An undeclared identifier was used in a function
+/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
+NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
+ IdentifierInfo &II, Scope *S) {
+ // Before we produce a declaration for an implicitly defined
+ // function, see whether there was a locally-scoped declaration of
+ // this name as a function or variable. If so, use that
+ // (non-visible) declaration, and complain about it.
+ llvm::DenseMap<DeclarationName, NamedDecl *>::iterator Pos
+ = findLocallyScopedExternalDecl(&II);
+ if (Pos != LocallyScopedExternalDecls.end()) {
+ Diag(Loc, diag::warn_use_out_of_scope_declaration) << Pos->second;
+ Diag(Pos->second->getLocation(), diag::note_previous_declaration);
+ return Pos->second;
+ }
+
+ // Extension in C99. Legal in C90, but warn about it.
+ unsigned diag_id;
+ if (II.getName().startswith("__builtin_"))
+ diag_id = diag::warn_builtin_unknown;
+ else if (getLangOpts().C99)
+ diag_id = diag::ext_implicit_function_decl;
+ else
+ diag_id = diag::warn_implicit_function_decl;
+ Diag(Loc, diag_id) << &II;
+
+ // Because typo correction is expensive, only do it if the implicit
+ // function declaration is going to be treated as an error.
+ if (Diags.getDiagnosticLevel(diag_id, Loc) >= DiagnosticsEngine::Error) {
+ TypoCorrection Corrected;
+ DeclFilterCCC<FunctionDecl> Validator;
+ if (S && (Corrected = CorrectTypo(DeclarationNameInfo(&II, Loc),
+ LookupOrdinaryName, S, 0, Validator))) {
+ std::string CorrectedStr = Corrected.getAsString(getLangOpts());
+ std::string CorrectedQuotedStr = Corrected.getQuoted(getLangOpts());
+ FunctionDecl *Func = Corrected.getCorrectionDeclAs<FunctionDecl>();
+
+ Diag(Loc, diag::note_function_suggestion) << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(Loc, CorrectedStr);
+
+ if (Func->getLocation().isValid()
+ && !II.getName().startswith("__builtin_"))
+ Diag(Func->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+ }
+ }
+
+ // Set a Declarator for the implicit definition: int foo();
+ const char *Dummy;
+ AttributeFactory attrFactory;
+ DeclSpec DS(attrFactory);
+ unsigned DiagID;
+ bool Error = DS.SetTypeSpecType(DeclSpec::TST_int, Loc, Dummy, DiagID);
+ (void)Error; // Silence warning.
+ assert(!Error && "Error setting up implicit decl!");
+ Declarator D(DS, Declarator::BlockContext);
+ D.AddTypeInfo(DeclaratorChunk::getFunction(false, false, SourceLocation(), 0,
+ 0, 0, true, SourceLocation(),
+ SourceLocation(), SourceLocation(),
+ SourceLocation(),
+ EST_None, SourceLocation(),
+ 0, 0, 0, 0, Loc, Loc, D),
+ DS.getAttributes(),
+ SourceLocation());
+ D.SetIdentifier(&II, Loc);
+
+ // Insert this function into translation-unit scope.
+
+ DeclContext *PrevDC = CurContext;
+ CurContext = Context.getTranslationUnitDecl();
+
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(ActOnDeclarator(TUScope, D));
+ FD->setImplicit();
+
+ CurContext = PrevDC;
+
+ AddKnownFunctionAttributes(FD);
+
+ return FD;
+}
+
+/// \brief Adds any function attributes that we know a priori based on
+/// the declaration of this function.
+///
+/// These attributes can apply both to implicitly-declared builtins
+/// (like __builtin___printf_chk) or to library-declared functions
+/// like NSLog or printf.
+///
+/// We need to check for duplicate attributes both here and where user-written
+/// attributes are applied to declarations.
+void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
+ if (FD->isInvalidDecl())
+ return;
+
+ // If this is a built-in function, map its builtin attributes to
+ // actual attributes.
+ if (unsigned BuiltinID = FD->getBuiltinID()) {
+ // Handle printf-formatting attributes.
+ unsigned FormatIdx;
+ bool HasVAListArg;
+ if (Context.BuiltinInfo.isPrintfLike(BuiltinID, FormatIdx, HasVAListArg)) {
+ if (!FD->getAttr<FormatAttr>()) {
+ const char *fmt = "printf";
+ unsigned int NumParams = FD->getNumParams();
+ if (FormatIdx < NumParams && // NumParams may be 0 (e.g. vfprintf)
+ FD->getParamDecl(FormatIdx)->getType()->isObjCObjectPointerType())
+ fmt = "NSString";
+ FD->addAttr(::new (Context) FormatAttr(FD->getLocation(), Context,
+ fmt, FormatIdx+1,
+ HasVAListArg ? 0 : FormatIdx+2));
+ }
+ }
+ if (Context.BuiltinInfo.isScanfLike(BuiltinID, FormatIdx,
+ HasVAListArg)) {
+ if (!FD->getAttr<FormatAttr>())
+ FD->addAttr(::new (Context) FormatAttr(FD->getLocation(), Context,
+ "scanf", FormatIdx+1,
+ HasVAListArg ? 0 : FormatIdx+2));
+ }
+
+ // Mark const if we don't care about errno and that is the only
+ // thing preventing the function from being const. This allows
+ // IRgen to use LLVM intrinsics for such functions.
+ if (!getLangOpts().MathErrno &&
+ Context.BuiltinInfo.isConstWithoutErrno(BuiltinID)) {
+ if (!FD->getAttr<ConstAttr>())
+ FD->addAttr(::new (Context) ConstAttr(FD->getLocation(), Context));
+ }
+
+ if (Context.BuiltinInfo.isReturnsTwice(BuiltinID) &&
+ !FD->getAttr<ReturnsTwiceAttr>())
+ FD->addAttr(::new (Context) ReturnsTwiceAttr(FD->getLocation(), Context));
+ if (Context.BuiltinInfo.isNoThrow(BuiltinID) && !FD->getAttr<NoThrowAttr>())
+ FD->addAttr(::new (Context) NoThrowAttr(FD->getLocation(), Context));
+ if (Context.BuiltinInfo.isConst(BuiltinID) && !FD->getAttr<ConstAttr>())
+ FD->addAttr(::new (Context) ConstAttr(FD->getLocation(), Context));
+ }
+
+ IdentifierInfo *Name = FD->getIdentifier();
+ if (!Name)
+ return;
+ if ((!getLangOpts().CPlusPlus &&
+ FD->getDeclContext()->isTranslationUnit()) ||
+ (isa<LinkageSpecDecl>(FD->getDeclContext()) &&
+ cast<LinkageSpecDecl>(FD->getDeclContext())->getLanguage() ==
+ LinkageSpecDecl::lang_c)) {
+ // Okay: this could be a libc/libm/Objective-C function we know
+ // about.
+ } else
+ return;
+
+ if (Name->isStr("asprintf") || Name->isStr("vasprintf")) {
+ // FIXME: asprintf and vasprintf aren't C99 functions. Should they be
+ // target-specific builtins, perhaps?
+ if (!FD->getAttr<FormatAttr>())
+ FD->addAttr(::new (Context) FormatAttr(FD->getLocation(), Context,
+ "printf", 2,
+ Name->isStr("vasprintf") ? 0 : 3));
+ }
+}
+
+TypedefDecl *Sema::ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
+ TypeSourceInfo *TInfo) {
+ assert(D.getIdentifier() && "Wrong callback for declspec without declarator");
+ assert(!T.isNull() && "GetTypeForDeclarator() returned null type");
+
+ if (!TInfo) {
+ assert(D.isInvalidType() && "no declarator info for valid type");
+ TInfo = Context.getTrivialTypeSourceInfo(T);
+ }
+
+ // Scope manipulation handled by caller.
+ TypedefDecl *NewTD = TypedefDecl::Create(Context, CurContext,
+ D.getLocStart(),
+ D.getIdentifierLoc(),
+ D.getIdentifier(),
+ TInfo);
+
+ // Bail out immediately if we have an invalid declaration.
+ if (D.isInvalidType()) {
+ NewTD->setInvalidDecl();
+ return NewTD;
+ }
+
+ if (D.getDeclSpec().isModulePrivateSpecified()) {
+ if (CurContext->isFunctionOrMethod())
+ Diag(NewTD->getLocation(), diag::err_module_private_local)
+ << 2 << NewTD->getDeclName()
+ << SourceRange(D.getDeclSpec().getModulePrivateSpecLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getModulePrivateSpecLoc());
+ else
+ NewTD->setModulePrivate();
+ }
+
+ // C++ [dcl.typedef]p8:
+ // If the typedef declaration defines an unnamed class (or
+ // enum), the first typedef-name declared by the declaration
+ // to be that class type (or enum type) is used to denote the
+ // class type (or enum type) for linkage purposes only.
+ // We need to check whether the type was declared in the declaration.
+ switch (D.getDeclSpec().getTypeSpecType()) {
+ case TST_enum:
+ case TST_struct:
+ case TST_union:
+ case TST_class: {
+ TagDecl *tagFromDeclSpec = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
+
+ // Do nothing if the tag is not anonymous or already has an
+ // associated typedef (from an earlier typedef in this decl group).
+ if (tagFromDeclSpec->getIdentifier()) break;
+ if (tagFromDeclSpec->getTypedefNameForAnonDecl()) break;
+
+ // A well-formed anonymous tag must always be a TUK_Definition.
+ assert(tagFromDeclSpec->isThisDeclarationADefinition());
+
+ // The type must match the tag exactly; no qualifiers allowed.
+ if (!Context.hasSameType(T, Context.getTagDeclType(tagFromDeclSpec)))
+ break;
+
+ // Otherwise, set this is the anon-decl typedef for the tag.
+ tagFromDeclSpec->setTypedefNameForAnonDecl(NewTD);
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ return NewTD;
+}
+
+
+/// \brief Check that this is a valid underlying type for an enum declaration.
+bool Sema::CheckEnumUnderlyingType(TypeSourceInfo *TI) {
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ QualType T = TI->getType();
+
+ if (T->isDependentType() || T->isIntegralType(Context))
+ return false;
+
+ Diag(UnderlyingLoc, diag::err_enum_invalid_underlying) << T;
+ return true;
+}
+
+/// Check whether this is a valid redeclaration of a previous enumeration.
+/// \return true if the redeclaration was invalid.
+bool Sema::CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
+ QualType EnumUnderlyingTy,
+ const EnumDecl *Prev) {
+ bool IsFixed = !EnumUnderlyingTy.isNull();
+
+ if (IsScoped != Prev->isScoped()) {
+ Diag(EnumLoc, diag::err_enum_redeclare_scoped_mismatch)
+ << Prev->isScoped();
+ Diag(Prev->getLocation(), diag::note_previous_use);
+ return true;
+ }
+
+ if (IsFixed && Prev->isFixed()) {
+ if (!EnumUnderlyingTy->isDependentType() &&
+ !Prev->getIntegerType()->isDependentType() &&
+ !Context.hasSameUnqualifiedType(EnumUnderlyingTy,
+ Prev->getIntegerType())) {
+ Diag(EnumLoc, diag::err_enum_redeclare_type_mismatch)
+ << EnumUnderlyingTy << Prev->getIntegerType();
+ Diag(Prev->getLocation(), diag::note_previous_use);
+ return true;
+ }
+ } else if (IsFixed != Prev->isFixed()) {
+ Diag(EnumLoc, diag::err_enum_redeclare_fixed_mismatch)
+ << Prev->isFixed();
+ Diag(Prev->getLocation(), diag::note_previous_use);
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Determine whether a tag with a given kind is acceptable
+/// as a redeclaration of the given tag declaration.
+///
+/// \returns true if the new tag kind is acceptable, false otherwise.
+bool Sema::isAcceptableTagRedeclaration(const TagDecl *Previous,
+ TagTypeKind NewTag, bool isDefinition,
+ SourceLocation NewTagLoc,
+ const IdentifierInfo &Name) {
+ // C++ [dcl.type.elab]p3:
+ // The class-key or enum keyword present in the
+ // elaborated-type-specifier shall agree in kind with the
+ // declaration to which the name in the elaborated-type-specifier
+ // refers. This rule also applies to the form of
+ // elaborated-type-specifier that declares a class-name or
+ // friend class since it can be construed as referring to the
+ // definition of the class. Thus, in any
+ // elaborated-type-specifier, the enum keyword shall be used to
+ // refer to an enumeration (7.2), the union class-key shall be
+ // used to refer to a union (clause 9), and either the class or
+ // struct class-key shall be used to refer to a class (clause 9)
+ // declared using the class or struct class-key.
+ TagTypeKind OldTag = Previous->getTagKind();
+ if (!isDefinition || (NewTag != TTK_Class && NewTag != TTK_Struct))
+ if (OldTag == NewTag)
+ return true;
+
+ if ((OldTag == TTK_Struct || OldTag == TTK_Class) &&
+ (NewTag == TTK_Struct || NewTag == TTK_Class)) {
+ // Warn about the struct/class tag mismatch.
+ bool isTemplate = false;
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Previous))
+ isTemplate = Record->getDescribedClassTemplate();
+
+ if (!ActiveTemplateInstantiations.empty()) {
+ // In a template instantiation, do not offer fix-its for tag mismatches
+ // since they usually mess up the template instead of fixing the problem.
+ Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
+ << (NewTag == TTK_Class) << isTemplate << &Name;
+ return true;
+ }
+
+ if (isDefinition) {
+ // On definitions, check previous tags and issue a fix-it for each
+ // one that doesn't match the current tag.
+ if (Previous->getDefinition()) {
+ // Don't suggest fix-its for redefinitions.
+ return true;
+ }
+
+ bool previousMismatch = false;
+ for (TagDecl::redecl_iterator I(Previous->redecls_begin()),
+ E(Previous->redecls_end()); I != E; ++I) {
+ if (I->getTagKind() != NewTag) {
+ if (!previousMismatch) {
+ previousMismatch = true;
+ Diag(NewTagLoc, diag::warn_struct_class_previous_tag_mismatch)
+ << (NewTag == TTK_Class) << isTemplate << &Name;
+ }
+ Diag(I->getInnerLocStart(), diag::note_struct_class_suggestion)
+ << (NewTag == TTK_Class)
+ << FixItHint::CreateReplacement(I->getInnerLocStart(),
+ NewTag == TTK_Class?
+ "class" : "struct");
+ }
+ }
+ return true;
+ }
+
+ // Check for a previous definition. If current tag and definition
+ // are same type, do nothing. If no definition, but disagree with
+ // with previous tag type, give a warning, but no fix-it.
+ const TagDecl *Redecl = Previous->getDefinition() ?
+ Previous->getDefinition() : Previous;
+ if (Redecl->getTagKind() == NewTag) {
+ return true;
+ }
+
+ Diag(NewTagLoc, diag::warn_struct_class_tag_mismatch)
+ << (NewTag == TTK_Class)
+ << isTemplate << &Name;
+ Diag(Redecl->getLocation(), diag::note_previous_use);
+
+ // If there is a previous defintion, suggest a fix-it.
+ if (Previous->getDefinition()) {
+ Diag(NewTagLoc, diag::note_struct_class_suggestion)
+ << (Redecl->getTagKind() == TTK_Class)
+ << FixItHint::CreateReplacement(SourceRange(NewTagLoc),
+ Redecl->getTagKind() == TTK_Class? "class" : "struct");
+ }
+
+ return true;
+ }
+ return false;
+}
+
+/// ActOnTag - This is invoked when we see 'struct foo' or 'struct {'. In the
+/// former case, Name will be non-null. In the later case, Name will be null.
+/// TagSpec indicates what kind of tag this is. TUK indicates whether this is a
+/// reference/declaration/definition of a tag.
+Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr, AccessSpecifier AS,
+ SourceLocation ModulePrivateLoc,
+ MultiTemplateParamsArg TemplateParameterLists,
+ bool &OwnedDecl, bool &IsDependent,
+ SourceLocation ScopedEnumKWLoc,
+ bool ScopedEnumUsesClassTag,
+ TypeResult UnderlyingType) {
+ // If this is not a definition, it must have a name.
+ IdentifierInfo *OrigName = Name;
+ assert((Name != 0 || TUK == TUK_Definition) &&
+ "Nameless record must be a definition!");
+ assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
+
+ OwnedDecl = false;
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ bool ScopedEnum = ScopedEnumKWLoc.isValid();
+
+ // FIXME: Check explicit specializations more carefully.
+ bool isExplicitSpecialization = false;
+ bool Invalid = false;
+
+ // We only need to do this matching if we have template parameters
+ // or a scope specifier, which also conveniently avoids this work
+ // for non-C++ cases.
+ if (TemplateParameterLists.size() > 0 ||
+ (SS.isNotEmpty() && TUK != TUK_Reference)) {
+ if (TemplateParameterList *TemplateParams
+ = MatchTemplateParametersToScopeSpecifier(KWLoc, NameLoc, SS,
+ TemplateParameterLists.get(),
+ TemplateParameterLists.size(),
+ TUK == TUK_Friend,
+ isExplicitSpecialization,
+ Invalid)) {
+ if (TemplateParams->size() > 0) {
+ // This is a declaration or definition of a class template (which may
+ // be a member of another template).
+
+ if (Invalid)
+ return 0;
+
+ OwnedDecl = false;
+ DeclResult Result = CheckClassTemplate(S, TagSpec, TUK, KWLoc,
+ SS, Name, NameLoc, Attr,
+ TemplateParams, AS,
+ ModulePrivateLoc,
+ TemplateParameterLists.size() - 1,
+ (TemplateParameterList**) TemplateParameterLists.release());
+ return Result.get();
+ } else {
+ // The "template<>" header is extraneous.
+ Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
+ << TypeWithKeyword::getTagTypeKindName(Kind) << Name;
+ isExplicitSpecialization = true;
+ }
+ }
+ }
+
+ // Figure out the underlying type if this a enum declaration. We need to do
+ // this early, because it's needed to detect if this is an incompatible
+ // redeclaration.
+ llvm::PointerUnion<const Type*, TypeSourceInfo*> EnumUnderlying;
+
+ if (Kind == TTK_Enum) {
+ if (UnderlyingType.isInvalid() || (!UnderlyingType.get() && ScopedEnum))
+ // No underlying type explicitly specified, or we failed to parse the
+ // type, default to int.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+ else if (UnderlyingType.get()) {
+ // C++0x 7.2p2: The type-specifier-seq of an enum-base shall name an
+ // integral type; any cv-qualification is ignored.
+ TypeSourceInfo *TI = 0;
+ GetTypeFromParser(UnderlyingType.get(), &TI);
+ EnumUnderlying = TI;
+
+ if (CheckEnumUnderlyingType(TI))
+ // Recover by falling back to int.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+
+ if (DiagnoseUnexpandedParameterPack(TI->getTypeLoc().getBeginLoc(), TI,
+ UPPC_FixedUnderlyingType))
+ EnumUnderlying = Context.IntTy.getTypePtr();
+
+ } else if (getLangOpts().MicrosoftMode)
+ // Microsoft enums are always of int type.
+ EnumUnderlying = Context.IntTy.getTypePtr();
+ }
+
+ DeclContext *SearchDC = CurContext;
+ DeclContext *DC = CurContext;
+ bool isStdBadAlloc = false;
+
+ RedeclarationKind Redecl = ForRedeclaration;
+ if (TUK == TUK_Friend || TUK == TUK_Reference)
+ Redecl = NotForRedeclaration;
+
+ LookupResult Previous(*this, Name, NameLoc, LookupTagName, Redecl);
+
+ if (Name && SS.isNotEmpty()) {
+ // We have a nested-name tag ('struct foo::bar').
+
+ // Check for invalid 'foo::'.
+ if (SS.isInvalid()) {
+ Name = 0;
+ goto CreateNewDecl;
+ }
+
+ // If this is a friend or a reference to a class in a dependent
+ // context, don't try to make a decl for it.
+ if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ DC = computeDeclContext(SS, false);
+ if (!DC) {
+ IsDependent = true;
+ return 0;
+ }
+ } else {
+ DC = computeDeclContext(SS, true);
+ if (!DC) {
+ Diag(SS.getRange().getBegin(), diag::err_dependent_nested_name_spec)
+ << SS.getRange();
+ return 0;
+ }
+ }
+
+ if (RequireCompleteDeclContext(SS, DC))
+ return 0;
+
+ SearchDC = DC;
+ // Look-up name inside 'foo::'.
+ LookupQualifiedName(Previous, DC);
+
+ if (Previous.isAmbiguous())
+ return 0;
+
+ if (Previous.empty()) {
+ // Name lookup did not find anything. However, if the
+ // nested-name-specifier refers to the current instantiation,
+ // and that current instantiation has any dependent base
+ // classes, we might find something at instantiation time: treat
+ // this as a dependent elaborated-type-specifier.
+ // But this only makes any sense for reference-like lookups.
+ if (Previous.wasNotFoundInCurrentInstantiation() &&
+ (TUK == TUK_Reference || TUK == TUK_Friend)) {
+ IsDependent = true;
+ return 0;
+ }
+
+ // A tag 'foo::bar' must already exist.
+ Diag(NameLoc, diag::err_not_tag_in_scope)
+ << Kind << Name << DC << SS.getRange();
+ Name = 0;
+ Invalid = true;
+ goto CreateNewDecl;
+ }
+ } else if (Name) {
+ // If this is a named struct, check to see if there was a previous forward
+ // declaration or definition.
+ // FIXME: We're looking into outer scopes here, even when we
+ // shouldn't be. Doing so can result in ambiguities that we
+ // shouldn't be diagnosing.
+ LookupName(Previous, S);
+
+ if (Previous.isAmbiguous() &&
+ (TUK == TUK_Definition || TUK == TUK_Declaration)) {
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *ND = F.next();
+ if (ND->getDeclContext()->getRedeclContext() != SearchDC)
+ F.erase();
+ }
+ F.done();
+ }
+
+ // Note: there used to be some attempt at recovery here.
+ if (Previous.isAmbiguous())
+ return 0;
+
+ if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
+ // FIXME: This makes sure that we ignore the contexts associated
+ // with C structs, unions, and enums when looking for a matching
+ // tag declaration or definition. See the similar lookup tweak
+ // in Sema::LookupName; is there a better way to deal with this?
+ while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC))
+ SearchDC = SearchDC->getParent();
+ }
+ } else if (S->isFunctionPrototypeScope()) {
+ // If this is an enum declaration in function prototype scope, set its
+ // initial context to the translation unit.
+ // FIXME: [citation needed]
+ SearchDC = Context.getTranslationUnitDecl();
+ }
+
+ if (Previous.isSingleResult() &&
+ Previous.getFoundDecl()->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(NameLoc, Previous.getFoundDecl());
+ // Just pretend that we didn't see the previous declaration.
+ Previous.clear();
+ }
+
+ if (getLangOpts().CPlusPlus && Name && DC && StdNamespace &&
+ DC->Equals(getStdNamespace()) && Name->isStr("bad_alloc")) {
+ // This is a declaration of or a reference to "std::bad_alloc".
+ isStdBadAlloc = true;
+
+ if (Previous.empty() && StdBadAlloc) {
+ // std::bad_alloc has been implicitly declared (but made invisible to
+ // name lookup). Fill in this implicit declaration as the previous
+ // declaration, so that the declarations get chained appropriately.
+ Previous.addDecl(getStdBadAlloc());
+ }
+ }
+
+ // If we didn't find a previous declaration, and this is a reference
+ // (or friend reference), move to the correct scope. In C++, we
+ // also need to do a redeclaration lookup there, just in case
+ // there's a shadow friend decl.
+ if (Name && Previous.empty() &&
+ (TUK == TUK_Reference || TUK == TUK_Friend)) {
+ if (Invalid) goto CreateNewDecl;
+ assert(SS.isEmpty());
+
+ if (TUK == TUK_Reference) {
+ // C++ [basic.scope.pdecl]p5:
+ // -- for an elaborated-type-specifier of the form
+ //
+ // class-key identifier
+ //
+ // if the elaborated-type-specifier is used in the
+ // decl-specifier-seq or parameter-declaration-clause of a
+ // function defined in namespace scope, the identifier is
+ // declared as a class-name in the namespace that contains
+ // the declaration; otherwise, except as a friend
+ // declaration, the identifier is declared in the smallest
+ // non-class, non-function-prototype scope that contains the
+ // declaration.
+ //
+ // C99 6.7.2.3p8 has a similar (but not identical!) provision for
+ // C structs and unions.
+ //
+ // It is an error in C++ to declare (rather than define) an enum
+ // type, including via an elaborated type specifier. We'll
+ // diagnose that later; for now, declare the enum in the same
+ // scope as we would have picked for any other tag type.
+ //
+ // GNU C also supports this behavior as part of its incomplete
+ // enum types extension, while GNU C++ does not.
+ //
+ // Find the context where we'll be declaring the tag.
+ // FIXME: We would like to maintain the current DeclContext as the
+ // lexical context,
+ while (!SearchDC->isFileContext() && !SearchDC->isFunctionOrMethod())
+ SearchDC = SearchDC->getParent();
+
+ // Find the scope where we'll be declaring the tag.
+ while (S->isClassScope() ||
+ (getLangOpts().CPlusPlus &&
+ S->isFunctionPrototypeScope()) ||
+ ((S->getFlags() & Scope::DeclScope) == 0) ||
+ (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->isTransparentContext()))
+ S = S->getParent();
+ } else {
+ assert(TUK == TUK_Friend);
+ // C++ [namespace.memdef]p3:
+ // If a friend declaration in a non-local class first declares a
+ // class or function, the friend class or function is a member of
+ // the innermost enclosing namespace.
+ SearchDC = SearchDC->getEnclosingNamespaceContext();
+ }
+
+ // In C++, we need to do a redeclaration lookup to properly
+ // diagnose some problems.
+ if (getLangOpts().CPlusPlus) {
+ Previous.setRedeclarationKind(ForRedeclaration);
+ LookupQualifiedName(Previous, SearchDC);
+ }
+ }
+
+ if (!Previous.empty()) {
+ NamedDecl *PrevDecl = (*Previous.begin())->getUnderlyingDecl();
+
+ // It's okay to have a tag decl in the same scope as a typedef
+ // which hides a tag decl in the same scope. Finding this
+ // insanity with a redeclaration lookup can only actually happen
+ // in C++.
+ //
+ // This is also okay for elaborated-type-specifiers, which is
+ // technically forbidden by the current standard but which is
+ // okay according to the likely resolution of an open issue;
+ // see http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#407
+ if (getLangOpts().CPlusPlus) {
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(PrevDecl)) {
+ if (const TagType *TT = TD->getUnderlyingType()->getAs<TagType>()) {
+ TagDecl *Tag = TT->getDecl();
+ if (Tag->getDeclName() == Name &&
+ Tag->getDeclContext()->getRedeclContext()
+ ->Equals(TD->getDeclContext()->getRedeclContext())) {
+ PrevDecl = Tag;
+ Previous.clear();
+ Previous.addDecl(Tag);
+ Previous.resolveKind();
+ }
+ }
+ }
+ }
+
+ if (TagDecl *PrevTagDecl = dyn_cast<TagDecl>(PrevDecl)) {
+ // If this is a use of a previous tag, or if the tag is already declared
+ // in the same scope (so that the definition/declaration completes or
+ // rementions the tag), reuse the decl.
+ if (TUK == TUK_Reference || TUK == TUK_Friend ||
+ isDeclInScope(PrevDecl, SearchDC, S, isExplicitSpecialization)) {
+ // Make sure that this wasn't declared as an enum and now used as a
+ // struct or something similar.
+ if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
+ TUK == TUK_Definition, KWLoc,
+ *Name)) {
+ bool SafeToContinue
+ = (PrevTagDecl->getTagKind() != TTK_Enum &&
+ Kind != TTK_Enum);
+ if (SafeToContinue)
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << Name
+ << FixItHint::CreateReplacement(SourceRange(KWLoc),
+ PrevTagDecl->getKindName());
+ else
+ Diag(KWLoc, diag::err_use_with_wrong_tag) << Name;
+ Diag(PrevTagDecl->getLocation(), diag::note_previous_use);
+
+ if (SafeToContinue)
+ Kind = PrevTagDecl->getTagKind();
+ else {
+ // Recover by making this an anonymous redefinition.
+ Name = 0;
+ Previous.clear();
+ Invalid = true;
+ }
+ }
+
+ if (Kind == TTK_Enum && PrevTagDecl->getTagKind() == TTK_Enum) {
+ const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
+
+ // If this is an elaborated-type-specifier for a scoped enumeration,
+ // the 'class' keyword is not necessary and not permitted.
+ if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ if (ScopedEnum)
+ Diag(ScopedEnumKWLoc, diag::err_enum_class_reference)
+ << PrevEnum->isScoped()
+ << FixItHint::CreateRemoval(ScopedEnumKWLoc);
+ return PrevTagDecl;
+ }
+
+ QualType EnumUnderlyingTy;
+ if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
+ EnumUnderlyingTy = TI->getType();
+ else if (const Type *T = EnumUnderlying.dyn_cast<const Type*>())
+ EnumUnderlyingTy = QualType(T, 0);
+
+ // All conflicts with previous declarations are recovered by
+ // returning the previous declaration, unless this is a definition,
+ // in which case we want the caller to bail out.
+ if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
+ ScopedEnum, EnumUnderlyingTy, PrevEnum))
+ return TUK == TUK_Declaration ? PrevTagDecl : 0;
+ }
+
+ if (!Invalid) {
+ // If this is a use, just return the declaration we found.
+
+ // FIXME: In the future, return a variant or some other clue
+ // for the consumer of this Decl to know it doesn't own it.
+ // For our current ASTs this shouldn't be a problem, but will
+ // need to be changed with DeclGroups.
+ if ((TUK == TUK_Reference && (!PrevTagDecl->getFriendObjectKind() ||
+ getLangOpts().MicrosoftExt)) || TUK == TUK_Friend)
+ return PrevTagDecl;
+
+ // Diagnose attempts to redefine a tag.
+ if (TUK == TUK_Definition) {
+ if (TagDecl *Def = PrevTagDecl->getDefinition()) {
+ // If we're defining a specialization and the previous definition
+ // is from an implicit instantiation, don't emit an error
+ // here; we'll catch this in the general case below.
+ bool IsExplicitSpecializationAfterInstantiation = false;
+ if (isExplicitSpecialization) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ RD->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ else if (EnumDecl *ED = dyn_cast<EnumDecl>(Def))
+ IsExplicitSpecializationAfterInstantiation =
+ ED->getTemplateSpecializationKind() !=
+ TSK_ExplicitSpecialization;
+ }
+
+ if (!IsExplicitSpecializationAfterInstantiation) {
+ // A redeclaration in function prototype scope in C isn't
+ // visible elsewhere, so merely issue a warning.
+ if (!getLangOpts().CPlusPlus && S->containedInPrototypeScope())
+ Diag(NameLoc, diag::warn_redefinition_in_param_list) << Name;
+ else
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ // If this is a redefinition, recover by making this
+ // struct be anonymous, which will make any later
+ // references get the previous definition.
+ Name = 0;
+ Previous.clear();
+ Invalid = true;
+ }
+ } else {
+ // If the type is currently being defined, complain
+ // about a nested redefinition.
+ const TagType *Tag
+ = cast<TagType>(Context.getTagDeclType(PrevTagDecl));
+ if (Tag->isBeingDefined()) {
+ Diag(NameLoc, diag::err_nested_redefinition) << Name;
+ Diag(PrevTagDecl->getLocation(),
+ diag::note_previous_definition);
+ Name = 0;
+ Previous.clear();
+ Invalid = true;
+ }
+ }
+
+ // Okay, this is definition of a previously declared or referenced
+ // tag PrevDecl. We're going to create a new Decl for it.
+ }
+ }
+ // If we get here we have (another) forward declaration or we
+ // have a definition. Just create a new decl.
+
+ } else {
+ // If we get here, this is a definition of a new tag type in a nested
+ // scope, e.g. "struct foo; void bar() { struct foo; }", just create a
+ // new decl/type. We set PrevDecl to NULL so that the entities
+ // have distinct types.
+ Previous.clear();
+ }
+ // If we get here, we're going to create a new Decl. If PrevDecl
+ // is non-NULL, it's a definition of the tag declared by
+ // PrevDecl. If it's NULL, we have a new definition.
+
+
+ // Otherwise, PrevDecl is not a tag, but was found with tag
+ // lookup. This is only actually possible in C++, where a few
+ // things like templates still live in the tag namespace.
+ } else {
+ // Use a better diagnostic if an elaborated-type-specifier
+ // found the wrong kind of type on the first
+ // (non-redeclaration) lookup.
+ if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
+ !Previous.isForRedeclaration()) {
+ unsigned Kind = 0;
+ if (isa<TypedefDecl>(PrevDecl)) Kind = 1;
+ else if (isa<TypeAliasDecl>(PrevDecl)) Kind = 2;
+ else if (isa<ClassTemplateDecl>(PrevDecl)) Kind = 3;
+ Diag(NameLoc, diag::err_tag_reference_non_tag) << Kind;
+ Diag(PrevDecl->getLocation(), diag::note_declared_at);
+ Invalid = true;
+
+ // Otherwise, only diagnose if the declaration is in scope.
+ } else if (!isDeclInScope(PrevDecl, SearchDC, S,
+ isExplicitSpecialization)) {
+ // do nothing
+
+ // Diagnose implicit declarations introduced by elaborated types.
+ } else if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ unsigned Kind = 0;
+ if (isa<TypedefDecl>(PrevDecl)) Kind = 1;
+ else if (isa<TypeAliasDecl>(PrevDecl)) Kind = 2;
+ else if (isa<ClassTemplateDecl>(PrevDecl)) Kind = 3;
+ Diag(NameLoc, diag::err_tag_reference_conflict) << Kind;
+ Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
+ Invalid = true;
+
+ // Otherwise it's a declaration. Call out a particularly common
+ // case here.
+ } else if (TypedefNameDecl *TND = dyn_cast<TypedefNameDecl>(PrevDecl)) {
+ unsigned Kind = 0;
+ if (isa<TypeAliasDecl>(PrevDecl)) Kind = 1;
+ Diag(NameLoc, diag::err_tag_definition_of_typedef)
+ << Name << Kind << TND->getUnderlyingType();
+ Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
+ Invalid = true;
+
+ // Otherwise, diagnose.
+ } else {
+ // The tag name clashes with something else in the target scope,
+ // issue an error and recover by making this tag be anonymous.
+ Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ Name = 0;
+ Invalid = true;
+ }
+
+ // The existing declaration isn't relevant to us; we're in a
+ // new scope, so clear out the previous declaration.
+ Previous.clear();
+ }
+ }
+
+CreateNewDecl:
+
+ TagDecl *PrevDecl = 0;
+ if (Previous.isSingleResult())
+ PrevDecl = cast<TagDecl>(Previous.getFoundDecl());
+
+ // If there is an identifier, use the location of the identifier as the
+ // location of the decl, otherwise use the location of the struct/union
+ // keyword.
+ SourceLocation Loc = NameLoc.isValid() ? NameLoc : KWLoc;
+
+ // Otherwise, create a new declaration. If there is a previous
+ // declaration of the same entity, the two will be linked via
+ // PrevDecl.
+ TagDecl *New;
+
+ bool IsForwardReference = false;
+ if (Kind == TTK_Enum) {
+ // FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
+ // enum X { A, B, C } D; D should chain to X.
+ New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name,
+ cast_or_null<EnumDecl>(PrevDecl), ScopedEnum,
+ ScopedEnumUsesClassTag, !EnumUnderlying.isNull());
+ // If this is an undefined enum, warn.
+ if (TUK != TUK_Definition && !Invalid) {
+ TagDecl *Def;
+ if (getLangOpts().CPlusPlus0x && cast<EnumDecl>(New)->isFixed()) {
+ // C++0x: 7.2p2: opaque-enum-declaration.
+ // Conflicts are diagnosed above. Do nothing.
+ }
+ else if (PrevDecl && (Def = cast<EnumDecl>(PrevDecl)->getDefinition())) {
+ Diag(Loc, diag::ext_forward_ref_enum_def)
+ << New;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ } else {
+ unsigned DiagID = diag::ext_forward_ref_enum;
+ if (getLangOpts().MicrosoftMode)
+ DiagID = diag::ext_ms_forward_ref_enum;
+ else if (getLangOpts().CPlusPlus)
+ DiagID = diag::err_forward_ref_enum;
+ Diag(Loc, DiagID);
+
+ // If this is a forward-declared reference to an enumeration, make a
+ // note of it; we won't actually be introducing the declaration into
+ // the declaration context.
+ if (TUK == TUK_Reference)
+ IsForwardReference = true;
+ }
+ }
+
+ if (EnumUnderlying) {
+ EnumDecl *ED = cast<EnumDecl>(New);
+ if (TypeSourceInfo *TI = EnumUnderlying.dyn_cast<TypeSourceInfo*>())
+ ED->setIntegerTypeSourceInfo(TI);
+ else
+ ED->setIntegerType(QualType(EnumUnderlying.get<const Type*>(), 0));
+ ED->setPromotionType(ED->getIntegerType());
+ }
+
+ } else {
+ // struct/union/class
+
+ // FIXME: Tag decls should be chained to any simultaneous vardecls, e.g.:
+ // struct X { int A; } D; D should chain to X.
+ if (getLangOpts().CPlusPlus) {
+ // FIXME: Look for a way to use RecordDecl for simple structs.
+ New = CXXRecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
+ cast_or_null<CXXRecordDecl>(PrevDecl));
+
+ if (isStdBadAlloc && (!StdBadAlloc || getStdBadAlloc()->isImplicit()))
+ StdBadAlloc = cast<CXXRecordDecl>(New);
+ } else
+ New = RecordDecl::Create(Context, Kind, SearchDC, KWLoc, Loc, Name,
+ cast_or_null<RecordDecl>(PrevDecl));
+ }
+
+ // Maybe add qualifier info.
+ if (SS.isNotEmpty()) {
+ if (SS.isSet()) {
+ // If this is either a declaration or a definition, check the
+ // nested-name-specifier against the current context. We don't do this
+ // for explicit specializations, because they have similar checking
+ // (with more specific diagnostics) in the call to
+ // CheckMemberSpecialization, below.
+ if (!isExplicitSpecialization &&
+ (TUK == TUK_Definition || TUK == TUK_Declaration) &&
+ diagnoseQualifiedDeclaration(SS, DC, OrigName, NameLoc))
+ Invalid = true;
+
+ New->setQualifierInfo(SS.getWithLocInContext(Context));
+ if (TemplateParameterLists.size() > 0) {
+ New->setTemplateParameterListsInfo(Context,
+ TemplateParameterLists.size(),
+ (TemplateParameterList**) TemplateParameterLists.release());
+ }
+ }
+ else
+ Invalid = true;
+ }
+
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(New)) {
+ // Add alignment attributes if necessary; these attributes are checked when
+ // the ASTContext lays out the structure.
+ //
+ // It is important for implementing the correct semantics that this
+ // happen here (in act on tag decl). The #pragma pack stack is
+ // maintained as a result of parser callbacks which can occur at
+ // many points during the parsing of a struct declaration (because
+ // the #pragma tokens are effectively skipped over during the
+ // parsing of the struct).
+ AddAlignmentAttributesForRecord(RD);
+
+ AddMsStructLayoutForRecord(RD);
+ }
+
+ if (ModulePrivateLoc.isValid()) {
+ if (isExplicitSpecialization)
+ Diag(New->getLocation(), diag::err_module_private_specialization)
+ << 2
+ << FixItHint::CreateRemoval(ModulePrivateLoc);
+ // __module_private__ does not apply to local classes. However, we only
+ // diagnose this as an error when the declaration specifiers are
+ // freestanding. Here, we just ignore the __module_private__.
+ else if (!SearchDC->isFunctionOrMethod())
+ New->setModulePrivate();
+ }
+
+ // If this is a specialization of a member class (of a class template),
+ // check the specialization.
+ if (isExplicitSpecialization && CheckMemberSpecialization(New, Previous))
+ Invalid = true;
+
+ if (Invalid)
+ New->setInvalidDecl();
+
+ if (Attr)
+ ProcessDeclAttributeList(S, New, Attr);
+
+ // If we're declaring or defining a tag in function prototype scope
+ // in C, note that this type can only be used within the function.
+ if (Name && S->isFunctionPrototypeScope() && !getLangOpts().CPlusPlus)
+ Diag(Loc, diag::warn_decl_in_param_list) << Context.getTagDeclType(New);
+
+ // Set the lexical context. If the tag has a C++ scope specifier, the
+ // lexical context will be different from the semantic context.
+ New->setLexicalDeclContext(CurContext);
+
+ // Mark this as a friend decl if applicable.
+ // In Microsoft mode, a friend declaration also acts as a forward
+ // declaration so we always pass true to setObjectOfFriendDecl to make
+ // the tag name visible.
+ if (TUK == TUK_Friend)
+ New->setObjectOfFriendDecl(/* PreviouslyDeclared = */ !Previous.empty() ||
+ getLangOpts().MicrosoftExt);
+
+ // Set the access specifier.
+ if (!Invalid && SearchDC->isRecord())
+ SetMemberAccessSpecifier(New, PrevDecl, AS);
+
+ if (TUK == TUK_Definition)
+ New->startDefinition();
+
+ // If this has an identifier, add it to the scope stack.
+ if (TUK == TUK_Friend) {
+ // We might be replacing an existing declaration in the lookup tables;
+ // if so, borrow its access specifier.
+ if (PrevDecl)
+ New->setAccess(PrevDecl->getAccess());
+
+ DeclContext *DC = New->getDeclContext()->getRedeclContext();
+ DC->makeDeclVisibleInContext(New);
+ if (Name) // can be null along some error paths
+ if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
+ PushOnScopeChains(New, EnclosingScope, /* AddToContext = */ false);
+ } else if (Name) {
+ S = getNonFieldDeclScope(S);
+ PushOnScopeChains(New, S, !IsForwardReference);
+ if (IsForwardReference)
+ SearchDC->makeDeclVisibleInContext(New);
+
+ } else {
+ CurContext->addDecl(New);
+ }
+
+ // If this is the C FILE type, notify the AST context.
+ if (IdentifierInfo *II = New->getIdentifier())
+ if (!New->isInvalidDecl() &&
+ New->getDeclContext()->getRedeclContext()->isTranslationUnit() &&
+ II->isStr("FILE"))
+ Context.setFILEDecl(New);
+
+ // If we were in function prototype scope (and not in C++ mode), add this
+ // tag to the list of decls to inject into the function definition scope.
+ if (S->isFunctionPrototypeScope() && !getLangOpts().CPlusPlus &&
+ InFunctionDeclarator && Name)
+ DeclsInPrototypeScope.push_back(New);
+
+ OwnedDecl = true;
+ return New;
+}
+
+void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
+ AdjustDeclIfTemplate(TagD);
+ TagDecl *Tag = cast<TagDecl>(TagD);
+
+ // Enter the tag context.
+ PushDeclContext(S, Tag);
+}
+
+Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
+ assert(isa<ObjCContainerDecl>(IDecl) &&
+ "ActOnObjCContainerStartDefinition - Not ObjCContainerDecl");
+ DeclContext *OCD = cast<DeclContext>(IDecl);
+ assert(getContainingDC(OCD) == CurContext &&
+ "The next DeclContext should be lexically contained in the current one.");
+ CurContext = OCD;
+ return IDecl;
+}
+
+void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
+ SourceLocation FinalLoc,
+ SourceLocation LBraceLoc) {
+ AdjustDeclIfTemplate(TagD);
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(TagD);
+
+ FieldCollector->StartClass();
+
+ if (!Record->getIdentifier())
+ return;
+
+ if (FinalLoc.isValid())
+ Record->addAttr(new (Context) FinalAttr(FinalLoc, Context));
+
+ // C++ [class]p2:
+ // [...] The class-name is also inserted into the scope of the
+ // class itself; this is known as the injected-class-name. For
+ // purposes of access checking, the injected-class-name is treated
+ // as if it were a public member name.
+ CXXRecordDecl *InjectedClassName
+ = CXXRecordDecl::Create(Context, Record->getTagKind(), CurContext,
+ Record->getLocStart(), Record->getLocation(),
+ Record->getIdentifier(),
+ /*PrevDecl=*/0,
+ /*DelayTypeCreation=*/true);
+ Context.getTypeDeclType(InjectedClassName, Record);
+ InjectedClassName->setImplicit();
+ InjectedClassName->setAccess(AS_public);
+ if (ClassTemplateDecl *Template = Record->getDescribedClassTemplate())
+ InjectedClassName->setDescribedClassTemplate(Template);
+ PushOnScopeChains(InjectedClassName, S);
+ assert(InjectedClassName->isInjectedClassName() &&
+ "Broken injected-class-name");
+}
+
+void Sema::ActOnTagFinishDefinition(Scope *S, Decl *TagD,
+ SourceLocation RBraceLoc) {
+ AdjustDeclIfTemplate(TagD);
+ TagDecl *Tag = cast<TagDecl>(TagD);
+ Tag->setRBraceLoc(RBraceLoc);
+
+ // Make sure we "complete" the definition even it is invalid.
+ if (Tag->isBeingDefined()) {
+ assert(Tag->isInvalidDecl() && "We should already have completed it");
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
+ RD->completeDefinition();
+ }
+
+ if (isa<CXXRecordDecl>(Tag))
+ FieldCollector->FinishClass();
+
+ // Exit this scope of this tag's definition.
+ PopDeclContext();
+
+ // Notify the consumer that we've defined a tag.
+ Consumer.HandleTagDeclDefinition(Tag);
+}
+
+void Sema::ActOnObjCContainerFinishDefinition() {
+ // Exit this scope of this interface definition.
+ PopDeclContext();
+}
+
+void Sema::ActOnObjCTemporaryExitContainerContext(DeclContext *DC) {
+ assert(DC == CurContext && "Mismatch of container contexts");
+ OriginalLexicalContext = DC;
+ ActOnObjCContainerFinishDefinition();
+}
+
+void Sema::ActOnObjCReenterContainerContext(DeclContext *DC) {
+ ActOnObjCContainerStartDefinition(cast<Decl>(DC));
+ OriginalLexicalContext = 0;
+}
+
+void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
+ AdjustDeclIfTemplate(TagD);
+ TagDecl *Tag = cast<TagDecl>(TagD);
+ Tag->setInvalidDecl();
+
+ // Make sure we "complete" the definition even it is invalid.
+ if (Tag->isBeingDefined()) {
+ if (RecordDecl *RD = dyn_cast<RecordDecl>(Tag))
+ RD->completeDefinition();
+ }
+
+ // We're undoing ActOnTagStartDefinition here, not
+ // ActOnStartCXXMemberDeclarations, so we don't have to mess with
+ // the FieldCollector.
+
+ PopDeclContext();
+}
+
+// Note that FieldName may be null for anonymous bitfields.
+ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
+ IdentifierInfo *FieldName,
+ QualType FieldTy, Expr *BitWidth,
+ bool *ZeroWidth) {
+ // Default to true; that shouldn't confuse checks for emptiness
+ if (ZeroWidth)
+ *ZeroWidth = true;
+
+ // C99 6.7.2.1p4 - verify the field type.
+ // C++ 9.6p3: A bit-field shall have integral or enumeration type.
+ if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
+ // Handle incomplete types with specific error.
+ if (RequireCompleteType(FieldLoc, FieldTy, diag::err_field_incomplete))
+ return ExprError();
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_not_integral_type_bitfield)
+ << FieldName << FieldTy << BitWidth->getSourceRange();
+ return Diag(FieldLoc, diag::err_not_integral_type_anon_bitfield)
+ << FieldTy << BitWidth->getSourceRange();
+ } else if (DiagnoseUnexpandedParameterPack(const_cast<Expr *>(BitWidth),
+ UPPC_BitFieldWidth))
+ return ExprError();
+
+ // If the bit-width is type- or value-dependent, don't try to check
+ // it now.
+ if (BitWidth->isValueDependent() || BitWidth->isTypeDependent())
+ return Owned(BitWidth);
+
+ llvm::APSInt Value;
+ ExprResult ICE = VerifyIntegerConstantExpression(BitWidth, &Value);
+ if (ICE.isInvalid())
+ return ICE;
+ BitWidth = ICE.take();
+
+ if (Value != 0 && ZeroWidth)
+ *ZeroWidth = false;
+
+ // Zero-width bitfield is ok for anonymous field.
+ if (Value == 0 && FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_has_zero_width) << FieldName;
+
+ if (Value.isSigned() && Value.isNegative()) {
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_has_negative_width)
+ << FieldName << Value.toString(10);
+ return Diag(FieldLoc, diag::err_anon_bitfield_has_negative_width)
+ << Value.toString(10);
+ }
+
+ if (!FieldTy->isDependentType()) {
+ uint64_t TypeSize = Context.getTypeSize(FieldTy);
+ if (Value.getZExtValue() > TypeSize) {
+ if (!getLangOpts().CPlusPlus) {
+ if (FieldName)
+ return Diag(FieldLoc, diag::err_bitfield_width_exceeds_type_size)
+ << FieldName << (unsigned)Value.getZExtValue()
+ << (unsigned)TypeSize;
+
+ return Diag(FieldLoc, diag::err_anon_bitfield_width_exceeds_type_size)
+ << (unsigned)Value.getZExtValue() << (unsigned)TypeSize;
+ }
+
+ if (FieldName)
+ Diag(FieldLoc, diag::warn_bitfield_width_exceeds_type_size)
+ << FieldName << (unsigned)Value.getZExtValue()
+ << (unsigned)TypeSize;
+ else
+ Diag(FieldLoc, diag::warn_anon_bitfield_width_exceeds_type_size)
+ << (unsigned)Value.getZExtValue() << (unsigned)TypeSize;
+ }
+ }
+
+ return Owned(BitWidth);
+}
+
+/// ActOnField - Each field of a C struct/union is passed into this in order
+/// to create a FieldDecl object for it.
+Decl *Sema::ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
+ Declarator &D, Expr *BitfieldWidth) {
+ FieldDecl *Res = HandleField(S, cast_or_null<RecordDecl>(TagD),
+ DeclStart, D, static_cast<Expr*>(BitfieldWidth),
+ /*HasInit=*/false, AS_public);
+ return Res;
+}
+
+/// HandleField - Analyze a field of a C struct or a C++ data member.
+///
+FieldDecl *Sema::HandleField(Scope *S, RecordDecl *Record,
+ SourceLocation DeclStart,
+ Declarator &D, Expr *BitWidth, bool HasInit,
+ AccessSpecifier AS) {
+ IdentifierInfo *II = D.getIdentifier();
+ SourceLocation Loc = DeclStart;
+ if (II) Loc = D.getIdentifierLoc();
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+ if (getLangOpts().CPlusPlus) {
+ CheckExtraCXXDefaultArguments(D);
+
+ if (DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_DataMemberType)) {
+ D.setInvalidType();
+ T = Context.IntTy;
+ TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+ }
+ }
+
+ DiagnoseFunctionSpecifiers(D);
+
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+ if (D.getDeclSpec().isConstexprSpecified())
+ Diag(D.getDeclSpec().getConstexprSpecLoc(), diag::err_invalid_constexpr)
+ << 2;
+
+ // Check to see if this name was declared as a member previously
+ NamedDecl *PrevDecl = 0;
+ LookupResult Previous(*this, II, Loc, LookupMemberName, ForRedeclaration);
+ LookupName(Previous, S);
+ switch (Previous.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundUnresolvedValue:
+ PrevDecl = Previous.getAsSingle<NamedDecl>();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ PrevDecl = Previous.getRepresentativeDecl();
+ break;
+
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::Ambiguous:
+ break;
+ }
+ Previous.suppressDiagnostics();
+
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ }
+
+ if (PrevDecl && !isDeclInScope(PrevDecl, Record, S))
+ PrevDecl = 0;
+
+ bool Mutable
+ = (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_mutable);
+ SourceLocation TSSL = D.getLocStart();
+ FieldDecl *NewFD
+ = CheckFieldDecl(II, T, TInfo, Record, Loc, Mutable, BitWidth, HasInit,
+ TSSL, AS, PrevDecl, &D);
+
+ if (NewFD->isInvalidDecl())
+ Record->setInvalidDecl();
+
+ if (D.getDeclSpec().isModulePrivateSpecified())
+ NewFD->setModulePrivate();
+
+ if (NewFD->isInvalidDecl() && PrevDecl) {
+ // Don't introduce NewFD into scope; there's already something
+ // with the same name in the same scope.
+ } else if (II) {
+ PushOnScopeChains(NewFD, S);
+ } else
+ Record->addDecl(NewFD);
+
+ return NewFD;
+}
+
+/// \brief Build a new FieldDecl and check its well-formedness.
+///
+/// This routine builds a new FieldDecl given the fields name, type,
+/// record, etc. \p PrevDecl should refer to any previous declaration
+/// with the same name and in the same scope as the field to be
+/// created.
+///
+/// \returns a new FieldDecl.
+///
+/// \todo The Declarator argument is a hack. It will be removed once
+FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
+ TypeSourceInfo *TInfo,
+ RecordDecl *Record, SourceLocation Loc,
+ bool Mutable, Expr *BitWidth, bool HasInit,
+ SourceLocation TSSL,
+ AccessSpecifier AS, NamedDecl *PrevDecl,
+ Declarator *D) {
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ bool InvalidDecl = false;
+ if (D) InvalidDecl = D->isInvalidType();
+
+ // If we receive a broken type, recover by assuming 'int' and
+ // marking this declaration as invalid.
+ if (T.isNull()) {
+ InvalidDecl = true;
+ T = Context.IntTy;
+ }
+
+ QualType EltTy = Context.getBaseElementType(T);
+ if (!EltTy->isDependentType()) {
+ if (RequireCompleteType(Loc, EltTy, diag::err_field_incomplete)) {
+ // Fields of incomplete type force their record to be invalid.
+ Record->setInvalidDecl();
+ InvalidDecl = true;
+ } else {
+ NamedDecl *Def;
+ EltTy->isIncompleteType(&Def);
+ if (Def && Def->isInvalidDecl()) {
+ Record->setInvalidDecl();
+ InvalidDecl = true;
+ }
+ }
+ }
+
+ // C99 6.7.2.1p8: A member of a structure or union may have any type other
+ // than a variably modified type.
+ if (!InvalidDecl && T->isVariablyModifiedType()) {
+ bool SizeIsNegative;
+ llvm::APSInt Oversized;
+ QualType FixedTy = TryToFixInvalidVariablyModifiedType(T, Context,
+ SizeIsNegative,
+ Oversized);
+ if (!FixedTy.isNull()) {
+ Diag(Loc, diag::warn_illegal_constant_array_size);
+ T = FixedTy;
+ } else {
+ if (SizeIsNegative)
+ Diag(Loc, diag::err_typecheck_negative_array_size);
+ else if (Oversized.getBoolValue())
+ Diag(Loc, diag::err_array_too_large)
+ << Oversized.toString(10);
+ else
+ Diag(Loc, diag::err_typecheck_field_variable_size);
+ InvalidDecl = true;
+ }
+ }
+
+ // Fields can not have abstract class types
+ if (!InvalidDecl && RequireNonAbstractType(Loc, T,
+ diag::err_abstract_type_in_decl,
+ AbstractFieldType))
+ InvalidDecl = true;
+
+ bool ZeroWidth = false;
+ // If this is declared as a bit-field, check the bit-field.
+ if (!InvalidDecl && BitWidth) {
+ BitWidth = VerifyBitField(Loc, II, T, BitWidth, &ZeroWidth).take();
+ if (!BitWidth) {
+ InvalidDecl = true;
+ BitWidth = 0;
+ ZeroWidth = false;
+ }
+ }
+
+ // Check that 'mutable' is consistent with the type of the declaration.
+ if (!InvalidDecl && Mutable) {
+ unsigned DiagID = 0;
+ if (T->isReferenceType())
+ DiagID = diag::err_mutable_reference;
+ else if (T.isConstQualified())
+ DiagID = diag::err_mutable_const;
+
+ if (DiagID) {
+ SourceLocation ErrLoc = Loc;
+ if (D && D->getDeclSpec().getStorageClassSpecLoc().isValid())
+ ErrLoc = D->getDeclSpec().getStorageClassSpecLoc();
+ Diag(ErrLoc, DiagID);
+ Mutable = false;
+ InvalidDecl = true;
+ }
+ }
+
+ FieldDecl *NewFD = FieldDecl::Create(Context, Record, TSSL, Loc, II, T, TInfo,
+ BitWidth, Mutable, HasInit);
+ if (InvalidDecl)
+ NewFD->setInvalidDecl();
+
+ if (PrevDecl && !isa<TagDecl>(PrevDecl)) {
+ Diag(Loc, diag::err_duplicate_member) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+ NewFD->setInvalidDecl();
+ }
+
+ if (!InvalidDecl && getLangOpts().CPlusPlus) {
+ if (Record->isUnion()) {
+ if (const RecordType *RT = EltTy->getAs<RecordType>()) {
+ CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (RDecl->getDefinition()) {
+ // C++ [class.union]p1: An object of a class with a non-trivial
+ // constructor, a non-trivial copy constructor, a non-trivial
+ // destructor, or a non-trivial copy assignment operator
+ // cannot be a member of a union, nor can an array of such
+ // objects.
+ if (CheckNontrivialField(NewFD))
+ NewFD->setInvalidDecl();
+ }
+ }
+
+ // C++ [class.union]p1: If a union contains a member of reference type,
+ // the program is ill-formed.
+ if (EltTy->isReferenceType()) {
+ Diag(NewFD->getLocation(), diag::err_union_member_of_reference_type)
+ << NewFD->getDeclName() << EltTy;
+ NewFD->setInvalidDecl();
+ }
+ }
+ }
+
+ // FIXME: We need to pass in the attributes given an AST
+ // representation, not a parser representation.
+ if (D)
+ // FIXME: What to pass instead of TUScope?
+ ProcessDeclAttributes(TUScope, NewFD, *D);
+
+ // In auto-retain/release, infer strong retension for fields of
+ // retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewFD))
+ NewFD->setInvalidDecl();
+
+ if (T.isObjCGCWeak())
+ Diag(Loc, diag::warn_attribute_weak_on_field);
+
+ NewFD->setAccess(AS);
+ return NewFD;
+}
+
+bool Sema::CheckNontrivialField(FieldDecl *FD) {
+ assert(FD);
+ assert(getLangOpts().CPlusPlus && "valid check only for C++");
+
+ if (FD->isInvalidDecl())
+ return true;
+
+ QualType EltTy = Context.getBaseElementType(FD->getType());
+ if (const RecordType *RT = EltTy->getAs<RecordType>()) {
+ CXXRecordDecl* RDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (RDecl->getDefinition()) {
+ // We check for copy constructors before constructors
+ // because otherwise we'll never get complaints about
+ // copy constructors.
+
+ CXXSpecialMember member = CXXInvalid;
+ if (!RDecl->hasTrivialCopyConstructor())
+ member = CXXCopyConstructor;
+ else if (!RDecl->hasTrivialDefaultConstructor())
+ member = CXXDefaultConstructor;
+ else if (!RDecl->hasTrivialCopyAssignment())
+ member = CXXCopyAssignment;
+ else if (!RDecl->hasTrivialDestructor())
+ member = CXXDestructor;
+
+ if (member != CXXInvalid) {
+ if (!getLangOpts().CPlusPlus0x &&
+ getLangOpts().ObjCAutoRefCount && RDecl->hasObjectMember()) {
+ // Objective-C++ ARC: it is an error to have a non-trivial field of
+ // a union. However, system headers in Objective-C programs
+ // occasionally have Objective-C lifetime objects within unions,
+ // and rather than cause the program to fail, we make those
+ // members unavailable.
+ SourceLocation Loc = FD->getLocation();
+ if (getSourceManager().isInSystemHeader(Loc)) {
+ if (!FD->hasAttr<UnavailableAttr>())
+ FD->addAttr(new (Context) UnavailableAttr(Loc, Context,
+ "this system field has retaining ownership"));
+ return false;
+ }
+ }
+
+ Diag(FD->getLocation(), getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_nontrivial_union_or_anon_struct_member :
+ diag::err_illegal_union_or_anon_struct_member)
+ << (int)FD->getParent()->isUnion() << FD->getDeclName() << member;
+ DiagnoseNontrivial(RT, member);
+ return !getLangOpts().CPlusPlus0x;
+ }
+ }
+ }
+
+ return false;
+}
+
+/// If the given constructor is user-provided, produce a diagnostic explaining
+/// that it makes the class non-trivial.
+static bool DiagnoseNontrivialUserProvidedCtor(Sema &S, QualType QT,
+ CXXConstructorDecl *CD,
+ Sema::CXXSpecialMember CSM) {
+ if (!CD->isUserProvided())
+ return false;
+
+ SourceLocation CtorLoc = CD->getLocation();
+ S.Diag(CtorLoc, diag::note_nontrivial_user_defined) << QT << CSM;
+ return true;
+}
+
+/// DiagnoseNontrivial - Given that a class has a non-trivial
+/// special member, figure out why.
+void Sema::DiagnoseNontrivial(const RecordType* T, CXXSpecialMember member) {
+ QualType QT(T, 0U);
+ CXXRecordDecl* RD = cast<CXXRecordDecl>(T->getDecl());
+
+ // Check whether the member was user-declared.
+ switch (member) {
+ case CXXInvalid:
+ break;
+
+ case CXXDefaultConstructor:
+ if (RD->hasUserDeclaredConstructor()) {
+ typedef CXXRecordDecl::ctor_iterator ctor_iter;
+ for (ctor_iter CI = RD->ctor_begin(), CE = RD->ctor_end(); CI != CE; ++CI)
+ if (DiagnoseNontrivialUserProvidedCtor(*this, QT, *CI, member))
+ return;
+
+ // No user-provided constructors; look for constructor templates.
+ typedef CXXRecordDecl::specific_decl_iterator<FunctionTemplateDecl>
+ tmpl_iter;
+ for (tmpl_iter TI(RD->decls_begin()), TE(RD->decls_end());
+ TI != TE; ++TI) {
+ CXXConstructorDecl *CD =
+ dyn_cast<CXXConstructorDecl>(TI->getTemplatedDecl());
+ if (CD && DiagnoseNontrivialUserProvidedCtor(*this, QT, CD, member))
+ return;
+ }
+ }
+ break;
+
+ case CXXCopyConstructor:
+ if (RD->hasUserDeclaredCopyConstructor()) {
+ SourceLocation CtorLoc =
+ RD->getCopyConstructor(0)->getLocation();
+ Diag(CtorLoc, diag::note_nontrivial_user_defined) << QT << member;
+ return;
+ }
+ break;
+
+ case CXXMoveConstructor:
+ if (RD->hasUserDeclaredMoveConstructor()) {
+ SourceLocation CtorLoc = RD->getMoveConstructor()->getLocation();
+ Diag(CtorLoc, diag::note_nontrivial_user_defined) << QT << member;
+ return;
+ }
+ break;
+
+ case CXXCopyAssignment:
+ if (RD->hasUserDeclaredCopyAssignment()) {
+ // FIXME: this should use the location of the copy
+ // assignment, not the type.
+ SourceLocation TyLoc = RD->getLocStart();
+ Diag(TyLoc, diag::note_nontrivial_user_defined) << QT << member;
+ return;
+ }
+ break;
+
+ case CXXMoveAssignment:
+ if (RD->hasUserDeclaredMoveAssignment()) {
+ SourceLocation AssignLoc = RD->getMoveAssignmentOperator()->getLocation();
+ Diag(AssignLoc, diag::note_nontrivial_user_defined) << QT << member;
+ return;
+ }
+ break;
+
+ case CXXDestructor:
+ if (RD->hasUserDeclaredDestructor()) {
+ SourceLocation DtorLoc = LookupDestructor(RD)->getLocation();
+ Diag(DtorLoc, diag::note_nontrivial_user_defined) << QT << member;
+ return;
+ }
+ break;
+ }
+
+ typedef CXXRecordDecl::base_class_iterator base_iter;
+
+ // Virtual bases and members inhibit trivial copying/construction,
+ // but not trivial destruction.
+ if (member != CXXDestructor) {
+ // Check for virtual bases. vbases includes indirect virtual bases,
+ // so we just iterate through the direct bases.
+ for (base_iter bi = RD->bases_begin(), be = RD->bases_end(); bi != be; ++bi)
+ if (bi->isVirtual()) {
+ SourceLocation BaseLoc = bi->getLocStart();
+ Diag(BaseLoc, diag::note_nontrivial_has_virtual) << QT << 1;
+ return;
+ }
+
+ // Check for virtual methods.
+ typedef CXXRecordDecl::method_iterator meth_iter;
+ for (meth_iter mi = RD->method_begin(), me = RD->method_end(); mi != me;
+ ++mi) {
+ if (mi->isVirtual()) {
+ SourceLocation MLoc = mi->getLocStart();
+ Diag(MLoc, diag::note_nontrivial_has_virtual) << QT << 0;
+ return;
+ }
+ }
+ }
+
+ bool (CXXRecordDecl::*hasTrivial)() const;
+ switch (member) {
+ case CXXDefaultConstructor:
+ hasTrivial = &CXXRecordDecl::hasTrivialDefaultConstructor; break;
+ case CXXCopyConstructor:
+ hasTrivial = &CXXRecordDecl::hasTrivialCopyConstructor; break;
+ case CXXCopyAssignment:
+ hasTrivial = &CXXRecordDecl::hasTrivialCopyAssignment; break;
+ case CXXDestructor:
+ hasTrivial = &CXXRecordDecl::hasTrivialDestructor; break;
+ default:
+ llvm_unreachable("unexpected special member");
+ }
+
+ // Check for nontrivial bases (and recurse).
+ for (base_iter bi = RD->bases_begin(), be = RD->bases_end(); bi != be; ++bi) {
+ const RecordType *BaseRT = bi->getType()->getAs<RecordType>();
+ assert(BaseRT && "Don't know how to handle dependent bases");
+ CXXRecordDecl *BaseRecTy = cast<CXXRecordDecl>(BaseRT->getDecl());
+ if (!(BaseRecTy->*hasTrivial)()) {
+ SourceLocation BaseLoc = bi->getLocStart();
+ Diag(BaseLoc, diag::note_nontrivial_has_nontrivial) << QT << 1 << member;
+ DiagnoseNontrivial(BaseRT, member);
+ return;
+ }
+ }
+
+ // Check for nontrivial members (and recurse).
+ typedef RecordDecl::field_iterator field_iter;
+ for (field_iter fi = RD->field_begin(), fe = RD->field_end(); fi != fe;
+ ++fi) {
+ QualType EltTy = Context.getBaseElementType((*fi)->getType());
+ if (const RecordType *EltRT = EltTy->getAs<RecordType>()) {
+ CXXRecordDecl* EltRD = cast<CXXRecordDecl>(EltRT->getDecl());
+
+ if (!(EltRD->*hasTrivial)()) {
+ SourceLocation FLoc = (*fi)->getLocation();
+ Diag(FLoc, diag::note_nontrivial_has_nontrivial) << QT << 0 << member;
+ DiagnoseNontrivial(EltRT, member);
+ return;
+ }
+ }
+
+ if (EltTy->isObjCLifetimeType()) {
+ switch (EltTy.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Strong:
+ Diag((*fi)->getLocation(), diag::note_nontrivial_objc_ownership)
+ << QT << EltTy.getObjCLifetime();
+ return;
+ }
+ }
+ }
+
+ llvm_unreachable("found no explanation for non-trivial member");
+}
+
+/// TranslateIvarVisibility - Translate visibility from a token ID to an
+/// AST enum value.
+static ObjCIvarDecl::AccessControl
+TranslateIvarVisibility(tok::ObjCKeywordKind ivarVisibility) {
+ switch (ivarVisibility) {
+ default: llvm_unreachable("Unknown visitibility kind");
+ case tok::objc_private: return ObjCIvarDecl::Private;
+ case tok::objc_public: return ObjCIvarDecl::Public;
+ case tok::objc_protected: return ObjCIvarDecl::Protected;
+ case tok::objc_package: return ObjCIvarDecl::Package;
+ }
+}
+
+/// ActOnIvar - Each ivar field of an objective-c class is passed into this
+/// in order to create an IvarDecl object for it.
+Decl *Sema::ActOnIvar(Scope *S,
+ SourceLocation DeclStart,
+ Declarator &D, Expr *BitfieldWidth,
+ tok::ObjCKeywordKind Visibility) {
+
+ IdentifierInfo *II = D.getIdentifier();
+ Expr *BitWidth = (Expr*)BitfieldWidth;
+ SourceLocation Loc = DeclStart;
+ if (II) Loc = D.getIdentifierLoc();
+
+ // FIXME: Unnamed fields can be handled in various different ways, for
+ // example, unnamed unions inject all members into the struct namespace!
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+
+ if (BitWidth) {
+ // 6.7.2.1p3, 6.7.2.1p4
+ BitWidth = VerifyBitField(Loc, II, T, BitWidth).take();
+ if (!BitWidth)
+ D.setInvalidType();
+ } else {
+ // Not a bitfield.
+
+ // validate II.
+
+ }
+ if (T->isReferenceType()) {
+ Diag(Loc, diag::err_ivar_reference_type);
+ D.setInvalidType();
+ }
+ // C99 6.7.2.1p8: A member of a structure or union may have any type other
+ // than a variably modified type.
+ else if (T->isVariablyModifiedType()) {
+ Diag(Loc, diag::err_typecheck_ivar_variable_size);
+ D.setInvalidType();
+ }
+
+ // Get the visibility (access control) for this ivar.
+ ObjCIvarDecl::AccessControl ac =
+ Visibility != tok::objc_not_keyword ? TranslateIvarVisibility(Visibility)
+ : ObjCIvarDecl::None;
+ // Must set ivar's DeclContext to its enclosing interface.
+ ObjCContainerDecl *EnclosingDecl = cast<ObjCContainerDecl>(CurContext);
+ if (!EnclosingDecl || EnclosingDecl->isInvalidDecl())
+ return 0;
+ ObjCContainerDecl *EnclosingContext;
+ if (ObjCImplementationDecl *IMPDecl =
+ dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
+ if (!LangOpts.ObjCNonFragileABI2) {
+ // Case of ivar declared in an implementation. Context is that of its class.
+ EnclosingContext = IMPDecl->getClassInterface();
+ assert(EnclosingContext && "Implementation has no class interface!");
+ }
+ else
+ EnclosingContext = EnclosingDecl;
+ } else {
+ if (ObjCCategoryDecl *CDecl =
+ dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
+ if (!LangOpts.ObjCNonFragileABI2 || !CDecl->IsClassExtension()) {
+ Diag(Loc, diag::err_misplaced_ivar) << CDecl->IsClassExtension();
+ return 0;
+ }
+ }
+ EnclosingContext = EnclosingDecl;
+ }
+
+ // Construct the decl.
+ ObjCIvarDecl *NewID = ObjCIvarDecl::Create(Context, EnclosingContext,
+ DeclStart, Loc, II, T,
+ TInfo, ac, (Expr *)BitfieldWidth);
+
+ if (II) {
+ NamedDecl *PrevDecl = LookupSingleName(S, II, Loc, LookupMemberName,
+ ForRedeclaration);
+ if (PrevDecl && isDeclInScope(PrevDecl, EnclosingContext, S)
+ && !isa<TagDecl>(PrevDecl)) {
+ Diag(Loc, diag::err_duplicate_member) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
+ NewID->setInvalidDecl();
+ }
+ }
+
+ // Process attributes attached to the ivar.
+ ProcessDeclAttributes(S, NewID, D);
+
+ if (D.isInvalidType())
+ NewID->setInvalidDecl();
+
+ // In ARC, infer 'retaining' for ivars of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(NewID))
+ NewID->setInvalidDecl();
+
+ if (D.getDeclSpec().isModulePrivateSpecified())
+ NewID->setModulePrivate();
+
+ if (II) {
+ // FIXME: When interfaces are DeclContexts, we'll need to add
+ // these to the interface.
+ S->AddDecl(NewID);
+ IdResolver.AddDecl(NewID);
+ }
+
+ return NewID;
+}
+
+/// ActOnLastBitfield - This routine handles synthesized bitfields rules for
+/// class and class extensions. For every class @interface and class
+/// extension @interface, if the last ivar is a bitfield of any type,
+/// then add an implicit `char :0` ivar to the end of that interface.
+void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
+ SmallVectorImpl<Decl *> &AllIvarDecls) {
+ if (!LangOpts.ObjCNonFragileABI2 || AllIvarDecls.empty())
+ return;
+
+ Decl *ivarDecl = AllIvarDecls[AllIvarDecls.size()-1];
+ ObjCIvarDecl *Ivar = cast<ObjCIvarDecl>(ivarDecl);
+
+ if (!Ivar->isBitField() || Ivar->getBitWidthValue(Context) == 0)
+ return;
+ ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CurContext);
+ if (!ID) {
+ if (ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(CurContext)) {
+ if (!CD->IsClassExtension())
+ return;
+ }
+ // No need to add this to end of @implementation.
+ else
+ return;
+ }
+ // All conditions are met. Add a new bitfield to the tail end of ivars.
+ llvm::APInt Zero(Context.getTypeSize(Context.IntTy), 0);
+ Expr * BW = IntegerLiteral::Create(Context, Zero, Context.IntTy, DeclLoc);
+
+ Ivar = ObjCIvarDecl::Create(Context, cast<ObjCContainerDecl>(CurContext),
+ DeclLoc, DeclLoc, 0,
+ Context.CharTy,
+ Context.getTrivialTypeSourceInfo(Context.CharTy,
+ DeclLoc),
+ ObjCIvarDecl::Private, BW,
+ true);
+ AllIvarDecls.push_back(Ivar);
+}
+
+void Sema::ActOnFields(Scope* S,
+ SourceLocation RecLoc, Decl *EnclosingDecl,
+ llvm::ArrayRef<Decl *> Fields,
+ SourceLocation LBrac, SourceLocation RBrac,
+ AttributeList *Attr) {
+ assert(EnclosingDecl && "missing record or interface decl");
+
+ // If the decl this is being inserted into is invalid, then it may be a
+ // redeclaration or some other bogus case. Don't try to add fields to it.
+ if (EnclosingDecl->isInvalidDecl())
+ return;
+
+ RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
+
+ // Start counting up the number of named members; make sure to include
+ // members of anonymous structs and unions in the total.
+ unsigned NumNamedMembers = 0;
+ if (Record) {
+ for (RecordDecl::decl_iterator i = Record->decls_begin(),
+ e = Record->decls_end(); i != e; i++) {
+ if (IndirectFieldDecl *IFD = dyn_cast<IndirectFieldDecl>(*i))
+ if (IFD->getDeclName())
+ ++NumNamedMembers;
+ }
+ }
+
+ // Verify that all the fields are okay.
+ SmallVector<FieldDecl*, 32> RecFields;
+
+ bool ARCErrReported = false;
+ for (llvm::ArrayRef<Decl *>::iterator i = Fields.begin(), end = Fields.end();
+ i != end; ++i) {
+ FieldDecl *FD = cast<FieldDecl>(*i);
+
+ // Get the type for the field.
+ const Type *FDTy = FD->getType().getTypePtr();
+
+ if (!FD->isAnonymousStructOrUnion()) {
+ // Remember all fields written by the user.
+ RecFields.push_back(FD);
+ }
+
+ // If the field is already invalid for some reason, don't emit more
+ // diagnostics about it.
+ if (FD->isInvalidDecl()) {
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ }
+
+ // C99 6.7.2.1p2:
+ // A structure or union shall not contain a member with
+ // incomplete or function type (hence, a structure shall not
+ // contain an instance of itself, but may contain a pointer to
+ // an instance of itself), except that the last member of a
+ // structure with more than one named member may have incomplete
+ // array type; such a structure (and any union containing,
+ // possibly recursively, a member that is such a structure)
+ // shall not be a member of a structure or an element of an
+ // array.
+ if (FDTy->isFunctionType()) {
+ // Field declared as a function.
+ Diag(FD->getLocation(), diag::err_field_declared_as_function)
+ << FD->getDeclName();
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ } else if (FDTy->isIncompleteArrayType() && Record &&
+ ((i + 1 == Fields.end() && !Record->isUnion()) ||
+ ((getLangOpts().MicrosoftExt ||
+ getLangOpts().CPlusPlus) &&
+ (i + 1 == Fields.end() || Record->isUnion())))) {
+ // Flexible array member.
+ // Microsoft and g++ is more permissive regarding flexible array.
+ // It will accept flexible array in union and also
+ // as the sole element of a struct/class.
+ if (getLangOpts().MicrosoftExt) {
+ if (Record->isUnion())
+ Diag(FD->getLocation(), diag::ext_flexible_array_union_ms)
+ << FD->getDeclName();
+ else if (Fields.size() == 1)
+ Diag(FD->getLocation(), diag::ext_flexible_array_empty_aggregate_ms)
+ << FD->getDeclName() << Record->getTagKind();
+ } else if (getLangOpts().CPlusPlus) {
+ if (Record->isUnion())
+ Diag(FD->getLocation(), diag::ext_flexible_array_union_gnu)
+ << FD->getDeclName();
+ else if (Fields.size() == 1)
+ Diag(FD->getLocation(), diag::ext_flexible_array_empty_aggregate_gnu)
+ << FD->getDeclName() << Record->getTagKind();
+ } else if (!getLangOpts().C99) {
+ if (Record->isUnion())
+ Diag(FD->getLocation(), diag::ext_flexible_array_union_gnu)
+ << FD->getDeclName();
+ else
+ Diag(FD->getLocation(), diag::ext_c99_flexible_array_member)
+ << FD->getDeclName() << Record->getTagKind();
+ } else if (NumNamedMembers < 1) {
+ Diag(FD->getLocation(), diag::err_flexible_array_empty_struct)
+ << FD->getDeclName();
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ }
+ if (!FD->getType()->isDependentType() &&
+ !Context.getBaseElementType(FD->getType()).isPODType(Context)) {
+ Diag(FD->getLocation(), diag::err_flexible_array_has_nonpod_type)
+ << FD->getDeclName() << FD->getType();
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ }
+ // Okay, we have a legal flexible array member at the end of the struct.
+ if (Record)
+ Record->setHasFlexibleArrayMember(true);
+ } else if (!FDTy->isDependentType() &&
+ RequireCompleteType(FD->getLocation(), FD->getType(),
+ diag::err_field_incomplete)) {
+ // Incomplete type
+ FD->setInvalidDecl();
+ EnclosingDecl->setInvalidDecl();
+ continue;
+ } else if (const RecordType *FDTTy = FDTy->getAs<RecordType>()) {
+ if (FDTTy->getDecl()->hasFlexibleArrayMember()) {
+ // If this is a member of a union, then entire union becomes "flexible".
+ if (Record && Record->isUnion()) {
+ Record->setHasFlexibleArrayMember(true);
+ } else {
+ // If this is a struct/class and this is not the last element, reject
+ // it. Note that GCC supports variable sized arrays in the middle of
+ // structures.
+ if (i + 1 != Fields.end())
+ Diag(FD->getLocation(), diag::ext_variable_sized_type_in_struct)
+ << FD->getDeclName() << FD->getType();
+ else {
+ // We support flexible arrays at the end of structs in
+ // other structs as an extension.
+ Diag(FD->getLocation(), diag::ext_flexible_array_in_struct)
+ << FD->getDeclName();
+ if (Record)
+ Record->setHasFlexibleArrayMember(true);
+ }
+ }
+ }
+ if (Record && FDTTy->getDecl()->hasObjectMember())
+ Record->setHasObjectMember(true);
+ } else if (FDTy->isObjCObjectType()) {
+ /// A field cannot be an Objective-c object
+ Diag(FD->getLocation(), diag::err_statically_allocated_object)
+ << FixItHint::CreateInsertion(FD->getLocation(), "*");
+ QualType T = Context.getObjCObjectPointerType(FD->getType());
+ FD->setType(T);
+ }
+ else if (!getLangOpts().CPlusPlus) {
+ if (getLangOpts().ObjCAutoRefCount && Record && !ARCErrReported) {
+ // It's an error in ARC if a field has lifetime.
+ // We don't want to report this in a system header, though,
+ // so we just make the field unavailable.
+ // FIXME: that's really not sufficient; we need to make the type
+ // itself invalid to, say, initialize or copy.
+ QualType T = FD->getType();
+ Qualifiers::ObjCLifetime lifetime = T.getObjCLifetime();
+ if (lifetime && lifetime != Qualifiers::OCL_ExplicitNone) {
+ SourceLocation loc = FD->getLocation();
+ if (getSourceManager().isInSystemHeader(loc)) {
+ if (!FD->hasAttr<UnavailableAttr>()) {
+ FD->addAttr(new (Context) UnavailableAttr(loc, Context,
+ "this system field has retaining ownership"));
+ }
+ } else {
+ Diag(FD->getLocation(), diag::err_arc_objc_object_in_struct)
+ << T->isBlockPointerType();
+ }
+ ARCErrReported = true;
+ }
+ }
+ else if (getLangOpts().ObjC1 &&
+ getLangOpts().getGC() != LangOptions::NonGC &&
+ Record && !Record->hasObjectMember()) {
+ if (FD->getType()->isObjCObjectPointerType() ||
+ FD->getType().isObjCGCStrong())
+ Record->setHasObjectMember(true);
+ else if (Context.getAsArrayType(FD->getType())) {
+ QualType BaseType = Context.getBaseElementType(FD->getType());
+ if (BaseType->isRecordType() &&
+ BaseType->getAs<RecordType>()->getDecl()->hasObjectMember())
+ Record->setHasObjectMember(true);
+ else if (BaseType->isObjCObjectPointerType() ||
+ BaseType.isObjCGCStrong())
+ Record->setHasObjectMember(true);
+ }
+ }
+ }
+ // Keep track of the number of named members.
+ if (FD->getIdentifier())
+ ++NumNamedMembers;
+ }
+
+ // Okay, we successfully defined 'Record'.
+ if (Record) {
+ bool Completed = false;
+ if (CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(Record)) {
+ if (!CXXRecord->isInvalidDecl()) {
+ // Set access bits correctly on the directly-declared conversions.
+ UnresolvedSetImpl *Convs = CXXRecord->getConversionFunctions();
+ for (UnresolvedSetIterator I = Convs->begin(), E = Convs->end();
+ I != E; ++I)
+ Convs->setAccess(I, (*I)->getAccess());
+
+ if (!CXXRecord->isDependentType()) {
+ // Objective-C Automatic Reference Counting:
+ // If a class has a non-static data member of Objective-C pointer
+ // type (or array thereof), it is a non-POD type and its
+ // default constructor (if any), copy constructor, copy assignment
+ // operator, and destructor are non-trivial.
+ //
+ // This rule is also handled by CXXRecordDecl::completeDefinition().
+ // However, here we check whether this particular class is only
+ // non-POD because of the presence of an Objective-C pointer member.
+ // If so, objects of this type cannot be shared between code compiled
+ // with instant objects and code compiled with manual retain/release.
+ if (getLangOpts().ObjCAutoRefCount &&
+ CXXRecord->hasObjectMember() &&
+ CXXRecord->getLinkage() == ExternalLinkage) {
+ if (CXXRecord->isPOD()) {
+ Diag(CXXRecord->getLocation(),
+ diag::warn_arc_non_pod_class_with_object_member)
+ << CXXRecord;
+ } else {
+ // FIXME: Fix-Its would be nice here, but finding a good location
+ // for them is going to be tricky.
+ if (CXXRecord->hasTrivialCopyConstructor())
+ Diag(CXXRecord->getLocation(),
+ diag::warn_arc_trivial_member_function_with_object_member)
+ << CXXRecord << 0;
+ if (CXXRecord->hasTrivialCopyAssignment())
+ Diag(CXXRecord->getLocation(),
+ diag::warn_arc_trivial_member_function_with_object_member)
+ << CXXRecord << 1;
+ if (CXXRecord->hasTrivialDestructor())
+ Diag(CXXRecord->getLocation(),
+ diag::warn_arc_trivial_member_function_with_object_member)
+ << CXXRecord << 2;
+ }
+ }
+
+ // Adjust user-defined destructor exception spec.
+ if (getLangOpts().CPlusPlus0x &&
+ CXXRecord->hasUserDeclaredDestructor())
+ AdjustDestructorExceptionSpec(CXXRecord,CXXRecord->getDestructor());
+
+ // Add any implicitly-declared members to this class.
+ AddImplicitlyDeclaredMembersToClass(CXXRecord);
+
+ // If we have virtual base classes, we may end up finding multiple
+ // final overriders for a given virtual function. Check for this
+ // problem now.
+ if (CXXRecord->getNumVBases()) {
+ CXXFinalOverriderMap FinalOverriders;
+ CXXRecord->getFinalOverriders(FinalOverriders);
+
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
+ MEnd = FinalOverriders.end();
+ M != MEnd; ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd; ++SO) {
+ assert(SO->second.size() > 0 &&
+ "Virtual function without overridding functions?");
+ if (SO->second.size() == 1)
+ continue;
+
+ // C++ [class.virtual]p2:
+ // In a derived class, if a virtual member function of a base
+ // class subobject has more than one final overrider the
+ // program is ill-formed.
+ Diag(Record->getLocation(), diag::err_multiple_final_overriders)
+ << (NamedDecl *)M->first << Record;
+ Diag(M->first->getLocation(),
+ diag::note_overridden_virtual_function);
+ for (OverridingMethods::overriding_iterator
+ OM = SO->second.begin(),
+ OMEnd = SO->second.end();
+ OM != OMEnd; ++OM)
+ Diag(OM->Method->getLocation(), diag::note_final_overrider)
+ << (NamedDecl *)M->first << OM->Method->getParent();
+
+ Record->setInvalidDecl();
+ }
+ }
+ CXXRecord->completeDefinition(&FinalOverriders);
+ Completed = true;
+ }
+ }
+ }
+ }
+
+ if (!Completed)
+ Record->completeDefinition();
+
+ // Now that the record is complete, do any delayed exception spec checks
+ // we were missing.
+ while (!DelayedDestructorExceptionSpecChecks.empty()) {
+ const CXXDestructorDecl *Dtor =
+ DelayedDestructorExceptionSpecChecks.back().first;
+ if (Dtor->getParent() != Record)
+ break;
+
+ assert(!Dtor->getParent()->isDependentType() &&
+ "Should not ever add destructors of templates into the list.");
+ CheckOverridingFunctionExceptionSpec(Dtor,
+ DelayedDestructorExceptionSpecChecks.back().second);
+ DelayedDestructorExceptionSpecChecks.pop_back();
+ }
+
+ } else {
+ ObjCIvarDecl **ClsFields =
+ reinterpret_cast<ObjCIvarDecl**>(RecFields.data());
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) {
+ ID->setEndOfDefinitionLoc(RBrac);
+ // Add ivar's to class's DeclContext.
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ ClsFields[i]->setLexicalDeclContext(ID);
+ ID->addDecl(ClsFields[i]);
+ }
+ // Must enforce the rule that ivars in the base classes may not be
+ // duplicates.
+ if (ID->getSuperClass())
+ DiagnoseDuplicateIvars(ID, ID->getSuperClass());
+ } else if (ObjCImplementationDecl *IMPDecl =
+ dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
+ assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl");
+ for (unsigned I = 0, N = RecFields.size(); I != N; ++I)
+ // Ivar declared in @implementation never belongs to the implementation.
+ // Only it is in implementation's lexical context.
+ ClsFields[I]->setLexicalDeclContext(IMPDecl);
+ CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
+ IMPDecl->setIvarLBraceLoc(LBrac);
+ IMPDecl->setIvarRBraceLoc(RBrac);
+ } else if (ObjCCategoryDecl *CDecl =
+ dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
+ // case of ivars in class extension; all other cases have been
+ // reported as errors elsewhere.
+ // FIXME. Class extension does not have a LocEnd field.
+ // CDecl->setLocEnd(RBrac);
+ // Add ivar's to class extension's DeclContext.
+ // Diagnose redeclaration of private ivars.
+ ObjCInterfaceDecl *IDecl = CDecl->getClassInterface();
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ if (IDecl) {
+ if (const ObjCIvarDecl *ClsIvar =
+ IDecl->getIvarDecl(ClsFields[i]->getIdentifier())) {
+ Diag(ClsFields[i]->getLocation(),
+ diag::err_duplicate_ivar_declaration);
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ for (const ObjCCategoryDecl *ClsExtDecl =
+ IDecl->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) {
+ if (const ObjCIvarDecl *ClsExtIvar =
+ ClsExtDecl->getIvarDecl(ClsFields[i]->getIdentifier())) {
+ Diag(ClsFields[i]->getLocation(),
+ diag::err_duplicate_ivar_declaration);
+ Diag(ClsExtIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ }
+ }
+ ClsFields[i]->setLexicalDeclContext(CDecl);
+ CDecl->addDecl(ClsFields[i]);
+ }
+ CDecl->setIvarLBraceLoc(LBrac);
+ CDecl->setIvarRBraceLoc(RBrac);
+ }
+ }
+
+ if (Attr)
+ ProcessDeclAttributeList(S, Record, Attr);
+
+ // If there's a #pragma GCC visibility in scope, and this isn't a subclass,
+ // set the visibility of this record.
+ if (Record && !Record->getDeclContext()->isRecord())
+ AddPushedVisibilityAttribute(Record);
+}
+
+/// \brief Determine whether the given integral value is representable within
+/// the given type T.
+static bool isRepresentableIntegerValue(ASTContext &Context,
+ llvm::APSInt &Value,
+ QualType T) {
+ assert(T->isIntegralType(Context) && "Integral type required!");
+ unsigned BitWidth = Context.getIntWidth(T);
+
+ if (Value.isUnsigned() || Value.isNonNegative()) {
+ if (T->isSignedIntegerOrEnumerationType())
+ --BitWidth;
+ return Value.getActiveBits() <= BitWidth;
+ }
+ return Value.getMinSignedBits() <= BitWidth;
+}
+
+// \brief Given an integral type, return the next larger integral type
+// (or a NULL type of no such type exists).
+static QualType getNextLargerIntegralType(ASTContext &Context, QualType T) {
+ // FIXME: Int128/UInt128 support, which also needs to be introduced into
+ // enum checking below.
+ assert(T->isIntegralType(Context) && "Integral type required!");
+ const unsigned NumTypes = 4;
+ QualType SignedIntegralTypes[NumTypes] = {
+ Context.ShortTy, Context.IntTy, Context.LongTy, Context.LongLongTy
+ };
+ QualType UnsignedIntegralTypes[NumTypes] = {
+ Context.UnsignedShortTy, Context.UnsignedIntTy, Context.UnsignedLongTy,
+ Context.UnsignedLongLongTy
+ };
+
+ unsigned BitWidth = Context.getTypeSize(T);
+ QualType *Types = T->isSignedIntegerOrEnumerationType()? SignedIntegralTypes
+ : UnsignedIntegralTypes;
+ for (unsigned I = 0; I != NumTypes; ++I)
+ if (Context.getTypeSize(Types[I]) > BitWidth)
+ return Types[I];
+
+ return QualType();
+}
+
+EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
+ EnumConstantDecl *LastEnumConst,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ Expr *Val) {
+ unsigned IntWidth = Context.getTargetInfo().getIntWidth();
+ llvm::APSInt EnumVal(IntWidth);
+ QualType EltTy;
+
+ if (Val && DiagnoseUnexpandedParameterPack(Val, UPPC_EnumeratorValue))
+ Val = 0;
+
+ if (Val)
+ Val = DefaultLvalueConversion(Val).take();
+
+ if (Val) {
+ if (Enum->isDependentType() || Val->isTypeDependent())
+ EltTy = Context.DependentTy;
+ else {
+ SourceLocation ExpLoc;
+ if (getLangOpts().CPlusPlus0x && Enum->isFixed() &&
+ !getLangOpts().MicrosoftMode) {
+ // C++11 [dcl.enum]p5: If the underlying type is fixed, [...] the
+ // constant-expression in the enumerator-definition shall be a converted
+ // constant expression of the underlying type.
+ EltTy = Enum->getIntegerType();
+ ExprResult Converted =
+ CheckConvertedConstantExpression(Val, EltTy, EnumVal,
+ CCEK_Enumerator);
+ if (Converted.isInvalid())
+ Val = 0;
+ else
+ Val = Converted.take();
+ } else if (!Val->isValueDependent() &&
+ !(Val = VerifyIntegerConstantExpression(Val,
+ &EnumVal).take())) {
+ // C99 6.7.2.2p2: Make sure we have an integer constant expression.
+ } else {
+ if (Enum->isFixed()) {
+ EltTy = Enum->getIntegerType();
+
+ // In Obj-C and Microsoft mode, require the enumeration value to be
+ // representable in the underlying type of the enumeration. In C++11,
+ // we perform a non-narrowing conversion as part of converted constant
+ // expression checking.
+ if (!isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
+ if (getLangOpts().MicrosoftMode) {
+ Diag(IdLoc, diag::ext_enumerator_too_large) << EltTy;
+ Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).take();
+ } else
+ Diag(IdLoc, diag::err_enumerator_too_large) << EltTy;
+ } else
+ Val = ImpCastExprToType(Val, EltTy, CK_IntegralCast).take();
+ } else if (getLangOpts().CPlusPlus) {
+ // C++11 [dcl.enum]p5:
+ // If the underlying type is not fixed, the type of each enumerator
+ // is the type of its initializing value:
+ // - If an initializer is specified for an enumerator, the
+ // initializing value has the same type as the expression.
+ EltTy = Val->getType();
+ } else {
+ // C99 6.7.2.2p2:
+ // The expression that defines the value of an enumeration constant
+ // shall be an integer constant expression that has a value
+ // representable as an int.
+
+ // Complain if the value is not representable in an int.
+ if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy))
+ Diag(IdLoc, diag::ext_enum_value_not_int)
+ << EnumVal.toString(10) << Val->getSourceRange()
+ << (EnumVal.isUnsigned() || EnumVal.isNonNegative());
+ else if (!Context.hasSameType(Val->getType(), Context.IntTy)) {
+ // Force the type of the expression to 'int'.
+ Val = ImpCastExprToType(Val, Context.IntTy, CK_IntegralCast).take();
+ }
+ EltTy = Val->getType();
+ }
+ }
+ }
+ }
+
+ if (!Val) {
+ if (Enum->isDependentType())
+ EltTy = Context.DependentTy;
+ else if (!LastEnumConst) {
+ // C++0x [dcl.enum]p5:
+ // If the underlying type is not fixed, the type of each enumerator
+ // is the type of its initializing value:
+ // - If no initializer is specified for the first enumerator, the
+ // initializing value has an unspecified integral type.
+ //
+ // GCC uses 'int' for its unspecified integral type, as does
+ // C99 6.7.2.2p3.
+ if (Enum->isFixed()) {
+ EltTy = Enum->getIntegerType();
+ }
+ else {
+ EltTy = Context.IntTy;
+ }
+ } else {
+ // Assign the last value + 1.
+ EnumVal = LastEnumConst->getInitVal();
+ ++EnumVal;
+ EltTy = LastEnumConst->getType();
+
+ // Check for overflow on increment.
+ if (EnumVal < LastEnumConst->getInitVal()) {
+ // C++0x [dcl.enum]p5:
+ // If the underlying type is not fixed, the type of each enumerator
+ // is the type of its initializing value:
+ //
+ // - Otherwise the type of the initializing value is the same as
+ // the type of the initializing value of the preceding enumerator
+ // unless the incremented value is not representable in that type,
+ // in which case the type is an unspecified integral type
+ // sufficient to contain the incremented value. If no such type
+ // exists, the program is ill-formed.
+ QualType T = getNextLargerIntegralType(Context, EltTy);
+ if (T.isNull() || Enum->isFixed()) {
+ // There is no integral type larger enough to represent this
+ // value. Complain, then allow the value to wrap around.
+ EnumVal = LastEnumConst->getInitVal();
+ EnumVal = EnumVal.zext(EnumVal.getBitWidth() * 2);
+ ++EnumVal;
+ if (Enum->isFixed())
+ // When the underlying type is fixed, this is ill-formed.
+ Diag(IdLoc, diag::err_enumerator_wrapped)
+ << EnumVal.toString(10)
+ << EltTy;
+ else
+ Diag(IdLoc, diag::warn_enumerator_too_large)
+ << EnumVal.toString(10);
+ } else {
+ EltTy = T;
+ }
+
+ // Retrieve the last enumerator's value, extent that type to the
+ // type that is supposed to be large enough to represent the incremented
+ // value, then increment.
+ EnumVal = LastEnumConst->getInitVal();
+ EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
+ EnumVal = EnumVal.zextOrTrunc(Context.getIntWidth(EltTy));
+ ++EnumVal;
+
+ // If we're not in C++, diagnose the overflow of enumerator values,
+ // which in C99 means that the enumerator value is not representable in
+ // an int (C99 6.7.2.2p2). However, we support GCC's extension that
+ // permits enumerator values that are representable in some larger
+ // integral type.
+ if (!getLangOpts().CPlusPlus && !T.isNull())
+ Diag(IdLoc, diag::warn_enum_value_overflow);
+ } else if (!getLangOpts().CPlusPlus &&
+ !isRepresentableIntegerValue(Context, EnumVal, EltTy)) {
+ // Enforce C99 6.7.2.2p2 even when we compute the next value.
+ Diag(IdLoc, diag::ext_enum_value_not_int)
+ << EnumVal.toString(10) << 1;
+ }
+ }
+ }
+
+ if (!EltTy->isDependentType()) {
+ // Make the enumerator value match the signedness and size of the
+ // enumerator's type.
+ EnumVal = EnumVal.extOrTrunc(Context.getIntWidth(EltTy));
+ EnumVal.setIsSigned(EltTy->isSignedIntegerOrEnumerationType());
+ }
+
+ return EnumConstantDecl::Create(Context, Enum, IdLoc, Id, EltTy,
+ Val, EnumVal);
+}
+
+
+Decl *Sema::ActOnEnumConstant(Scope *S, Decl *theEnumDecl, Decl *lastEnumConst,
+ SourceLocation IdLoc, IdentifierInfo *Id,
+ AttributeList *Attr,
+ SourceLocation EqualLoc, Expr *Val) {
+ EnumDecl *TheEnumDecl = cast<EnumDecl>(theEnumDecl);
+ EnumConstantDecl *LastEnumConst =
+ cast_or_null<EnumConstantDecl>(lastEnumConst);
+
+ // The scope passed in may not be a decl scope. Zip up the scope tree until
+ // we find one that is.
+ S = getNonFieldDeclScope(S);
+
+ // Verify that there isn't already something declared with this name in this
+ // scope.
+ NamedDecl *PrevDecl = LookupSingleName(S, Id, IdLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(IdLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ }
+
+ if (PrevDecl) {
+ // When in C++, we may get a TagDecl with the same name; in this case the
+ // enum constant will 'hide' the tag.
+ assert((getLangOpts().CPlusPlus || !isa<TagDecl>(PrevDecl)) &&
+ "Received TagDecl when not in C++!");
+ if (!isa<TagDecl>(PrevDecl) && isDeclInScope(PrevDecl, CurContext, S)) {
+ if (isa<EnumConstantDecl>(PrevDecl))
+ Diag(IdLoc, diag::err_redefinition_of_enumerator) << Id;
+ else
+ Diag(IdLoc, diag::err_redefinition) << Id;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ return 0;
+ }
+ }
+
+ // C++ [class.mem]p13:
+ // If T is the name of a class, then each of the following shall have a
+ // name different from T:
+ // - every enumerator of every member of class T that is an enumerated
+ // type
+ if (CXXRecordDecl *Record
+ = dyn_cast<CXXRecordDecl>(
+ TheEnumDecl->getDeclContext()->getRedeclContext()))
+ if (Record->getIdentifier() && Record->getIdentifier() == Id)
+ Diag(IdLoc, diag::err_member_name_of_class) << Id;
+
+ EnumConstantDecl *New =
+ CheckEnumConstant(TheEnumDecl, LastEnumConst, IdLoc, Id, Val);
+
+ if (New) {
+ // Process attributes.
+ if (Attr) ProcessDeclAttributeList(S, New, Attr);
+
+ // Register this decl in the current scope stack.
+ New->setAccess(TheEnumDecl->getAccess());
+ PushOnScopeChains(New, S);
+ }
+
+ return New;
+}
+
+void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc,
+ SourceLocation RBraceLoc, Decl *EnumDeclX,
+ Decl **Elements, unsigned NumElements,
+ Scope *S, AttributeList *Attr) {
+ EnumDecl *Enum = cast<EnumDecl>(EnumDeclX);
+ QualType EnumType = Context.getTypeDeclType(Enum);
+
+ if (Attr)
+ ProcessDeclAttributeList(S, Enum, Attr);
+
+ if (Enum->isDependentType()) {
+ for (unsigned i = 0; i != NumElements; ++i) {
+ EnumConstantDecl *ECD =
+ cast_or_null<EnumConstantDecl>(Elements[i]);
+ if (!ECD) continue;
+
+ ECD->setType(EnumType);
+ }
+
+ Enum->completeDefinition(Context.DependentTy, Context.DependentTy, 0, 0);
+ return;
+ }
+
+ // TODO: If the result value doesn't fit in an int, it must be a long or long
+ // long value. ISO C does not support this, but GCC does as an extension,
+ // emit a warning.
+ unsigned IntWidth = Context.getTargetInfo().getIntWidth();
+ unsigned CharWidth = Context.getTargetInfo().getCharWidth();
+ unsigned ShortWidth = Context.getTargetInfo().getShortWidth();
+
+ // Verify that all the values are okay, compute the size of the values, and
+ // reverse the list.
+ unsigned NumNegativeBits = 0;
+ unsigned NumPositiveBits = 0;
+
+ // Keep track of whether all elements have type int.
+ bool AllElementsInt = true;
+
+ for (unsigned i = 0; i != NumElements; ++i) {
+ EnumConstantDecl *ECD =
+ cast_or_null<EnumConstantDecl>(Elements[i]);
+ if (!ECD) continue; // Already issued a diagnostic.
+
+ const llvm::APSInt &InitVal = ECD->getInitVal();
+
+ // Keep track of the size of positive and negative values.
+ if (InitVal.isUnsigned() || InitVal.isNonNegative())
+ NumPositiveBits = std::max(NumPositiveBits,
+ (unsigned)InitVal.getActiveBits());
+ else
+ NumNegativeBits = std::max(NumNegativeBits,
+ (unsigned)InitVal.getMinSignedBits());
+
+ // Keep track of whether every enum element has type int (very commmon).
+ if (AllElementsInt)
+ AllElementsInt = ECD->getType() == Context.IntTy;
+ }
+
+ // Figure out the type that should be used for this enum.
+ QualType BestType;
+ unsigned BestWidth;
+
+ // C++0x N3000 [conv.prom]p3:
+ // An rvalue of an unscoped enumeration type whose underlying
+ // type is not fixed can be converted to an rvalue of the first
+ // of the following types that can represent all the values of
+ // the enumeration: int, unsigned int, long int, unsigned long
+ // int, long long int, or unsigned long long int.
+ // C99 6.4.4.3p2:
+ // An identifier declared as an enumeration constant has type int.
+ // The C99 rule is modified by a gcc extension
+ QualType BestPromotionType;
+
+ bool Packed = Enum->getAttr<PackedAttr>() ? true : false;
+ // -fshort-enums is the equivalent to specifying the packed attribute on all
+ // enum definitions.
+ if (LangOpts.ShortEnums)
+ Packed = true;
+
+ if (Enum->isFixed()) {
+ BestType = Enum->getIntegerType();
+ if (BestType->isPromotableIntegerType())
+ BestPromotionType = Context.getPromotedIntegerType(BestType);
+ else
+ BestPromotionType = BestType;
+ // We don't need to set BestWidth, because BestType is going to be the type
+ // of the enumerators, but we do anyway because otherwise some compilers
+ // warn that it might be used uninitialized.
+ BestWidth = CharWidth;
+ }
+ else if (NumNegativeBits) {
+ // If there is a negative value, figure out the smallest integer type (of
+ // int/long/longlong) that fits.
+ // If it's packed, check also if it fits a char or a short.
+ if (Packed && NumNegativeBits <= CharWidth && NumPositiveBits < CharWidth) {
+ BestType = Context.SignedCharTy;
+ BestWidth = CharWidth;
+ } else if (Packed && NumNegativeBits <= ShortWidth &&
+ NumPositiveBits < ShortWidth) {
+ BestType = Context.ShortTy;
+ BestWidth = ShortWidth;
+ } else if (NumNegativeBits <= IntWidth && NumPositiveBits < IntWidth) {
+ BestType = Context.IntTy;
+ BestWidth = IntWidth;
+ } else {
+ BestWidth = Context.getTargetInfo().getLongWidth();
+
+ if (NumNegativeBits <= BestWidth && NumPositiveBits < BestWidth) {
+ BestType = Context.LongTy;
+ } else {
+ BestWidth = Context.getTargetInfo().getLongLongWidth();
+
+ if (NumNegativeBits > BestWidth || NumPositiveBits >= BestWidth)
+ Diag(Enum->getLocation(), diag::warn_enum_too_large);
+ BestType = Context.LongLongTy;
+ }
+ }
+ BestPromotionType = (BestWidth <= IntWidth ? Context.IntTy : BestType);
+ } else {
+ // If there is no negative value, figure out the smallest type that fits
+ // all of the enumerator values.
+ // If it's packed, check also if it fits a char or a short.
+ if (Packed && NumPositiveBits <= CharWidth) {
+ BestType = Context.UnsignedCharTy;
+ BestPromotionType = Context.IntTy;
+ BestWidth = CharWidth;
+ } else if (Packed && NumPositiveBits <= ShortWidth) {
+ BestType = Context.UnsignedShortTy;
+ BestPromotionType = Context.IntTy;
+ BestWidth = ShortWidth;
+ } else if (NumPositiveBits <= IntWidth) {
+ BestType = Context.UnsignedIntTy;
+ BestWidth = IntWidth;
+ BestPromotionType
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
+ ? Context.UnsignedIntTy : Context.IntTy;
+ } else if (NumPositiveBits <=
+ (BestWidth = Context.getTargetInfo().getLongWidth())) {
+ BestType = Context.UnsignedLongTy;
+ BestPromotionType
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
+ ? Context.UnsignedLongTy : Context.LongTy;
+ } else {
+ BestWidth = Context.getTargetInfo().getLongLongWidth();
+ assert(NumPositiveBits <= BestWidth &&
+ "How could an initializer get larger than ULL?");
+ BestType = Context.UnsignedLongLongTy;
+ BestPromotionType
+ = (NumPositiveBits == BestWidth || !getLangOpts().CPlusPlus)
+ ? Context.UnsignedLongLongTy : Context.LongLongTy;
+ }
+ }
+
+ // Loop over all of the enumerator constants, changing their types to match
+ // the type of the enum if needed.
+ for (unsigned i = 0; i != NumElements; ++i) {
+ EnumConstantDecl *ECD = cast_or_null<EnumConstantDecl>(Elements[i]);
+ if (!ECD) continue; // Already issued a diagnostic.
+
+ // Standard C says the enumerators have int type, but we allow, as an
+ // extension, the enumerators to be larger than int size. If each
+ // enumerator value fits in an int, type it as an int, otherwise type it the
+ // same as the enumerator decl itself. This means that in "enum { X = 1U }"
+ // that X has type 'int', not 'unsigned'.
+
+ // Determine whether the value fits into an int.
+ llvm::APSInt InitVal = ECD->getInitVal();
+
+ // If it fits into an integer type, force it. Otherwise force it to match
+ // the enum decl type.
+ QualType NewTy;
+ unsigned NewWidth;
+ bool NewSign;
+ if (!getLangOpts().CPlusPlus &&
+ !Enum->isFixed() &&
+ isRepresentableIntegerValue(Context, InitVal, Context.IntTy)) {
+ NewTy = Context.IntTy;
+ NewWidth = IntWidth;
+ NewSign = true;
+ } else if (ECD->getType() == BestType) {
+ // Already the right type!
+ if (getLangOpts().CPlusPlus)
+ // C++ [dcl.enum]p4: Following the closing brace of an
+ // enum-specifier, each enumerator has the type of its
+ // enumeration.
+ ECD->setType(EnumType);
+ continue;
+ } else {
+ NewTy = BestType;
+ NewWidth = BestWidth;
+ NewSign = BestType->isSignedIntegerOrEnumerationType();
+ }
+
+ // Adjust the APSInt value.
+ InitVal = InitVal.extOrTrunc(NewWidth);
+ InitVal.setIsSigned(NewSign);
+ ECD->setInitVal(InitVal);
+
+ // Adjust the Expr initializer and type.
+ if (ECD->getInitExpr() &&
+ !Context.hasSameType(NewTy, ECD->getInitExpr()->getType()))
+ ECD->setInitExpr(ImplicitCastExpr::Create(Context, NewTy,
+ CK_IntegralCast,
+ ECD->getInitExpr(),
+ /*base paths*/ 0,
+ VK_RValue));
+ if (getLangOpts().CPlusPlus)
+ // C++ [dcl.enum]p4: Following the closing brace of an
+ // enum-specifier, each enumerator has the type of its
+ // enumeration.
+ ECD->setType(EnumType);
+ else
+ ECD->setType(NewTy);
+ }
+
+ Enum->completeDefinition(BestType, BestPromotionType,
+ NumPositiveBits, NumNegativeBits);
+
+ // If we're declaring a function, ensure this decl isn't forgotten about -
+ // it needs to go into the function scope.
+ if (InFunctionDeclarator)
+ DeclsInPrototypeScope.push_back(Enum);
+
+}
+
+Decl *Sema::ActOnFileScopeAsmDecl(Expr *expr,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ StringLiteral *AsmString = cast<StringLiteral>(expr);
+
+ FileScopeAsmDecl *New = FileScopeAsmDecl::Create(Context, CurContext,
+ AsmString, StartLoc,
+ EndLoc);
+ CurContext->addDecl(New);
+ return New;
+}
+
+DeclResult Sema::ActOnModuleImport(SourceLocation AtLoc,
+ SourceLocation ImportLoc,
+ ModuleIdPath Path) {
+ Module *Mod = PP.getModuleLoader().loadModule(ImportLoc, Path,
+ Module::AllVisible,
+ /*IsIncludeDirective=*/false);
+ if (!Mod)
+ return true;
+
+ llvm::SmallVector<SourceLocation, 2> IdentifierLocs;
+ Module *ModCheck = Mod;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ // If we've run out of module parents, just drop the remaining identifiers.
+ // We need the length to be consistent.
+ if (!ModCheck)
+ break;
+ ModCheck = ModCheck->Parent;
+
+ IdentifierLocs.push_back(Path[I].second);
+ }
+
+ ImportDecl *Import = ImportDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ AtLoc.isValid()? AtLoc : ImportLoc,
+ Mod, IdentifierLocs);
+ Context.getTranslationUnitDecl()->addDecl(Import);
+ return Import;
+}
+
+void Sema::ActOnPragmaRedefineExtname(IdentifierInfo* Name,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc,
+ SourceLocation AliasNameLoc) {
+ Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc,
+ LookupOrdinaryName);
+ AsmLabelAttr *Attr =
+ ::new (Context) AsmLabelAttr(AliasNameLoc, Context, AliasName->getName());
+
+ if (PrevDecl)
+ PrevDecl->addAttr(Attr);
+ else
+ (void)ExtnameUndeclaredIdentifiers.insert(
+ std::pair<IdentifierInfo*,AsmLabelAttr*>(Name, Attr));
+}
+
+void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc) {
+ Decl *PrevDecl = LookupSingleName(TUScope, Name, NameLoc, LookupOrdinaryName);
+
+ if (PrevDecl) {
+ PrevDecl->addAttr(::new (Context) WeakAttr(PragmaLoc, Context));
+ } else {
+ (void)WeakUndeclaredIdentifiers.insert(
+ std::pair<IdentifierInfo*,WeakInfo>
+ (Name, WeakInfo((IdentifierInfo*)0, NameLoc)));
+ }
+}
+
+void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
+ IdentifierInfo* AliasName,
+ SourceLocation PragmaLoc,
+ SourceLocation NameLoc,
+ SourceLocation AliasNameLoc) {
+ Decl *PrevDecl = LookupSingleName(TUScope, AliasName, AliasNameLoc,
+ LookupOrdinaryName);
+ WeakInfo W = WeakInfo(Name, NameLoc);
+
+ if (PrevDecl) {
+ if (!PrevDecl->hasAttr<AliasAttr>())
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(PrevDecl))
+ DeclApplyPragmaWeak(TUScope, ND, W);
+ } else {
+ (void)WeakUndeclaredIdentifiers.insert(
+ std::pair<IdentifierInfo*,WeakInfo>(AliasName, W));
+ }
+}
+
+Decl *Sema::getObjCDeclContext() const {
+ return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
+}
+
+AvailabilityResult Sema::getCurContextAvailability() const {
+ const Decl *D = cast<Decl>(getCurLexicalContext());
+ // A category implicitly has the availability of the interface.
+ if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D))
+ D = CatD->getClassInterface();
+
+ return D->getAvailability();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
new file mode 100644
index 0000000..5c6ddd2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclAttr.cpp
@@ -0,0 +1,4171 @@
+//===--- SemaDeclAttr.cpp - Declaration Attribute Handling ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements decl-related attribute processing.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "TargetAttributesSema.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Lookup.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace sema;
+
+/// These constants match the enumerated choices of
+/// warn_attribute_wrong_decl_type and err_attribute_wrong_decl_type.
+enum AttributeDeclKind {
+ ExpectedFunction,
+ ExpectedUnion,
+ ExpectedVariableOrFunction,
+ ExpectedFunctionOrMethod,
+ ExpectedParameter,
+ ExpectedFunctionMethodOrBlock,
+ ExpectedFunctionMethodOrParameter,
+ ExpectedClass,
+ ExpectedVariable,
+ ExpectedMethod,
+ ExpectedVariableFunctionOrLabel,
+ ExpectedFieldOrGlobalVar,
+ ExpectedStruct
+};
+
+//===----------------------------------------------------------------------===//
+// Helper functions
+//===----------------------------------------------------------------------===//
+
+static const FunctionType *getFunctionType(const Decl *D,
+ bool blocksToo = true) {
+ QualType Ty;
+ if (const ValueDecl *decl = dyn_cast<ValueDecl>(D))
+ Ty = decl->getType();
+ else if (const FieldDecl *decl = dyn_cast<FieldDecl>(D))
+ Ty = decl->getType();
+ else if (const TypedefNameDecl* decl = dyn_cast<TypedefNameDecl>(D))
+ Ty = decl->getUnderlyingType();
+ else
+ return 0;
+
+ if (Ty->isFunctionPointerType())
+ Ty = Ty->getAs<PointerType>()->getPointeeType();
+ else if (blocksToo && Ty->isBlockPointerType())
+ Ty = Ty->getAs<BlockPointerType>()->getPointeeType();
+
+ return Ty->getAs<FunctionType>();
+}
+
+// FIXME: We should provide an abstraction around a method or function
+// to provide the following bits of information.
+
+/// isFunction - Return true if the given decl has function
+/// type (function or function-typed variable).
+static bool isFunction(const Decl *D) {
+ return getFunctionType(D, false) != NULL;
+}
+
+/// isFunctionOrMethod - Return true if the given decl has function
+/// type (function or function-typed variable) or an Objective-C
+/// method.
+static bool isFunctionOrMethod(const Decl *D) {
+ return isFunction(D)|| isa<ObjCMethodDecl>(D);
+}
+
+/// isFunctionOrMethodOrBlock - Return true if the given decl has function
+/// type (function or function-typed variable) or an Objective-C
+/// method or a block.
+static bool isFunctionOrMethodOrBlock(const Decl *D) {
+ if (isFunctionOrMethod(D))
+ return true;
+ // check for block is more involved.
+ if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
+ QualType Ty = V->getType();
+ return Ty->isBlockPointerType();
+ }
+ return isa<BlockDecl>(D);
+}
+
+/// Return true if the given decl has a declarator that should have
+/// been processed by Sema::GetTypeForDeclarator.
+static bool hasDeclarator(const Decl *D) {
+ // In some sense, TypedefDecl really *ought* to be a DeclaratorDecl.
+ return isa<DeclaratorDecl>(D) || isa<BlockDecl>(D) || isa<TypedefNameDecl>(D) ||
+ isa<ObjCPropertyDecl>(D);
+}
+
+/// hasFunctionProto - Return true if the given decl has a argument
+/// information. This decl should have already passed
+/// isFunctionOrMethod or isFunctionOrMethodOrBlock.
+static bool hasFunctionProto(const Decl *D) {
+ if (const FunctionType *FnTy = getFunctionType(D))
+ return isa<FunctionProtoType>(FnTy);
+ else {
+ assert(isa<ObjCMethodDecl>(D) || isa<BlockDecl>(D));
+ return true;
+ }
+}
+
+/// getFunctionOrMethodNumArgs - Return number of function or method
+/// arguments. It is an error to call this on a K&R function (use
+/// hasFunctionProto first).
+static unsigned getFunctionOrMethodNumArgs(const Decl *D) {
+ if (const FunctionType *FnTy = getFunctionType(D))
+ return cast<FunctionProtoType>(FnTy)->getNumArgs();
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->getNumParams();
+ return cast<ObjCMethodDecl>(D)->param_size();
+}
+
+static QualType getFunctionOrMethodArgType(const Decl *D, unsigned Idx) {
+ if (const FunctionType *FnTy = getFunctionType(D))
+ return cast<FunctionProtoType>(FnTy)->getArgType(Idx);
+ if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->getParamDecl(Idx)->getType();
+
+ return cast<ObjCMethodDecl>(D)->param_begin()[Idx]->getType();
+}
+
+static QualType getFunctionOrMethodResultType(const Decl *D) {
+ if (const FunctionType *FnTy = getFunctionType(D))
+ return cast<FunctionProtoType>(FnTy)->getResultType();
+ return cast<ObjCMethodDecl>(D)->getResultType();
+}
+
+static bool isFunctionOrMethodVariadic(const Decl *D) {
+ if (const FunctionType *FnTy = getFunctionType(D)) {
+ const FunctionProtoType *proto = cast<FunctionProtoType>(FnTy);
+ return proto->isVariadic();
+ } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ return BD->isVariadic();
+ else {
+ return cast<ObjCMethodDecl>(D)->isVariadic();
+ }
+}
+
+static bool isInstanceMethod(const Decl *D) {
+ if (const CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(D))
+ return MethodDecl->isInstance();
+ return false;
+}
+
+static inline bool isNSStringType(QualType T, ASTContext &Ctx) {
+ const ObjCObjectPointerType *PT = T->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return false;
+
+ ObjCInterfaceDecl *Cls = PT->getObjectType()->getInterface();
+ if (!Cls)
+ return false;
+
+ IdentifierInfo* ClsName = Cls->getIdentifier();
+
+ // FIXME: Should we walk the chain of classes?
+ return ClsName == &Ctx.Idents.get("NSString") ||
+ ClsName == &Ctx.Idents.get("NSMutableString");
+}
+
+static inline bool isCFStringType(QualType T, ASTContext &Ctx) {
+ const PointerType *PT = T->getAs<PointerType>();
+ if (!PT)
+ return false;
+
+ const RecordType *RT = PT->getPointeeType()->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const RecordDecl *RD = RT->getDecl();
+ if (RD->getTagKind() != TTK_Struct)
+ return false;
+
+ return RD->getIdentifier() == &Ctx.Idents.get("__CFString");
+}
+
+/// \brief Check if the attribute has exactly as many args as Num. May
+/// output an error.
+static bool checkAttributeNumArgs(Sema &S, const AttributeList &Attr,
+ unsigned int Num) {
+ if (Attr.getNumArgs() != Num) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << Num;
+ return false;
+ }
+
+ return true;
+}
+
+
+/// \brief Check if the attribute has at least as many args as Num. May
+/// output an error.
+static bool checkAttributeAtLeastNumArgs(Sema &S, const AttributeList &Attr,
+ unsigned int Num) {
+ if (Attr.getNumArgs() < Num) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_few_arguments) << Num;
+ return false;
+ }
+
+ return true;
+}
+
+///
+/// \brief Check if passed in Decl is a field or potentially shared global var
+/// \return true if the Decl is a field or potentially shared global variable
+///
+static bool mayBeSharedVariable(const Decl *D) {
+ if (isa<FieldDecl>(D))
+ return true;
+ if (const VarDecl *vd = dyn_cast<VarDecl>(D))
+ return (vd->hasGlobalStorage() && !(vd->isThreadSpecified()));
+
+ return false;
+}
+
+/// \brief Check if the passed-in expression is of type int or bool.
+static bool isIntOrBool(Expr *Exp) {
+ QualType QT = Exp->getType();
+ return QT->isBooleanType() || QT->isIntegerType();
+}
+
+///
+/// \brief Check if passed in Decl is a pointer type.
+/// Note that this function may produce an error message.
+/// \return true if the Decl is a pointer type; false otherwise
+///
+static bool checkIsPointer(Sema &S, const Decl *D, const AttributeList &Attr) {
+ if (const ValueDecl *vd = dyn_cast<ValueDecl>(D)) {
+ QualType QT = vd->getType();
+ if (QT->isAnyPointerType())
+ return true;
+ S.Diag(Attr.getLoc(), diag::warn_pointer_attribute_wrong_type)
+ << Attr.getName()->getName() << QT;
+ } else {
+ S.Diag(Attr.getLoc(), diag::err_attribute_can_be_applied_only_to_value_decl)
+ << Attr.getName();
+ }
+ return false;
+}
+
+/// \brief Checks that the passed in QualType either is of RecordType or points
+/// to RecordType. Returns the relevant RecordType, null if it does not exit.
+static const RecordType *getRecordType(QualType QT) {
+ if (const RecordType *RT = QT->getAs<RecordType>())
+ return RT;
+
+ // Now check if we point to record type.
+ if (const PointerType *PT = QT->getAs<PointerType>())
+ return PT->getPointeeType()->getAs<RecordType>();
+
+ return 0;
+}
+
+/// \brief Thread Safety Analysis: Checks that the passed in RecordType
+/// resolves to a lockable object. May flag an error.
+static void checkForLockableRecord(Sema &S, Decl *D, const AttributeList &Attr,
+ QualType Ty) {
+ const RecordType *RT = getRecordType(Ty);
+
+ // Warn if could not get record type for this argument.
+ if (!RT) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_argument_not_class)
+ << Attr.getName() << Ty.getAsString();
+ return;
+ }
+ // Don't check for lockable if the class hasn't been defined yet.
+ if (RT->isIncompleteType())
+ return;
+ // Warn if the type is not lockable.
+ if (!RT->getDecl()->getAttr<LockableAttr>()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_argument_not_lockable)
+ << Attr.getName() << Ty.getAsString();
+ return;
+ }
+}
+
+/// \brief Thread Safety Analysis: Checks that all attribute arguments, starting
+/// from Sidx, resolve to a lockable object. May flag an error.
+/// \param Sidx The attribute argument index to start checking with.
+/// \param ParamIdxOk Whether an argument can be indexing into a function
+/// parameter list.
+static bool checkAttrArgsAreLockableObjs(Sema &S, Decl *D,
+ const AttributeList &Attr,
+ SmallVectorImpl<Expr*> &Args,
+ int Sidx = 0,
+ bool ParamIdxOk = false) {
+ for(unsigned Idx = Sidx; Idx < Attr.getNumArgs(); ++Idx) {
+ Expr *ArgExp = Attr.getArg(Idx);
+
+ if (ArgExp->isTypeDependent()) {
+ // FIXME -- need to processs this again on template instantiation
+ Args.push_back(ArgExp);
+ continue;
+ }
+
+ QualType ArgTy = ArgExp->getType();
+
+ // First see if we can just cast to record type, or point to record type.
+ const RecordType *RT = getRecordType(ArgTy);
+
+ // Now check if we index into a record type function param.
+ if(!RT && ParamIdxOk) {
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ IntegerLiteral *IL = dyn_cast<IntegerLiteral>(ArgExp);
+ if(FD && IL) {
+ unsigned int NumParams = FD->getNumParams();
+ llvm::APInt ArgValue = IL->getValue();
+ uint64_t ParamIdxFromOne = ArgValue.getZExtValue();
+ uint64_t ParamIdxFromZero = ParamIdxFromOne - 1;
+ if(!ArgValue.isStrictlyPositive() || ParamIdxFromOne > NumParams) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_range)
+ << Attr.getName() << Idx + 1 << NumParams;
+ return false;
+ }
+ ArgTy = FD->getParamDecl(ParamIdxFromZero)->getType();
+ }
+ }
+
+ checkForLockableRecord(S, D, Attr, ArgTy);
+
+ Args.push_back(ArgExp);
+ }
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Implementations
+//===----------------------------------------------------------------------===//
+
+// FIXME: All this manual attribute parsing code is gross. At the
+// least add some helper functions to check most argument patterns (#
+// and types of args).
+
+static void handleGuardedVarAttr(Sema &S, Decl *D, const AttributeList &Attr,
+ bool pointer = false) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ // D must be either a member field or global (potentially shared) variable.
+ if (!mayBeSharedVariable(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFieldOrGlobalVar;
+ return;
+ }
+
+ if (pointer && !checkIsPointer(S, D, Attr))
+ return;
+
+ if (pointer)
+ D->addAttr(::new (S.Context) PtGuardedVarAttr(Attr.getRange(), S.Context));
+ else
+ D->addAttr(::new (S.Context) GuardedVarAttr(Attr.getRange(), S.Context));
+}
+
+static void handleGuardedByAttr(Sema &S, Decl *D, const AttributeList &Attr,
+ bool pointer = false) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ Expr *Arg = Attr.getArg(0);
+
+ // D must be either a member field or global (potentially shared) variable.
+ if (!mayBeSharedVariable(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFieldOrGlobalVar;
+ return;
+ }
+
+ if (pointer && !checkIsPointer(S, D, Attr))
+ return;
+
+ if (!Arg->isTypeDependent()) {
+ checkForLockableRecord(S, D, Attr, Arg->getType());
+ }
+
+ if (pointer)
+ D->addAttr(::new (S.Context) PtGuardedByAttr(Attr.getRange(),
+ S.Context, Arg));
+ else
+ D->addAttr(::new (S.Context) GuardedByAttr(Attr.getRange(), S.Context, Arg));
+}
+
+
+static void handleLockableAttr(Sema &S, Decl *D, const AttributeList &Attr,
+ bool scoped = false) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ // FIXME: Lockable structs for C code.
+ if (!isa<CXXRecordDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedClass;
+ return;
+ }
+
+ if (scoped)
+ D->addAttr(::new (S.Context) ScopedLockableAttr(Attr.getRange(), S.Context));
+ else
+ D->addAttr(::new (S.Context) LockableAttr(Attr.getRange(), S.Context));
+}
+
+static void handleNoThreadSafetyAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NoThreadSafetyAnalysisAttr(Attr.getRange(),
+ S.Context));
+}
+
+static void handleNoAddressSafetyAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NoAddressSafetyAnalysisAttr(Attr.getRange(),
+ S.Context));
+}
+
+static void handleAcquireOrderAttr(Sema &S, Decl *D, const AttributeList &Attr,
+ bool before) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ // D must be either a member field or global (potentially shared) variable.
+ ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (!VD || !mayBeSharedVariable(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFieldOrGlobalVar;
+ return;
+ }
+
+ // Check that this attribute only applies to lockable types
+ QualType QT = VD->getType();
+ if (!QT->isDependentType()) {
+ const RecordType *RT = getRecordType(QT);
+ if (!RT || !RT->getDecl()->getAttr<LockableAttr>()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_decl_not_lockable)
+ << Attr.getName();
+ return;
+ }
+ }
+
+ SmallVector<Expr*, 1> Args;
+ // check that all arguments are lockable objects
+ if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args))
+ return;
+
+ unsigned Size = Args.size();
+ assert(Size == Attr.getNumArgs());
+ Expr **StartArg = Size == 0 ? 0 : &Args[0];
+
+ if (before)
+ D->addAttr(::new (S.Context) AcquiredBeforeAttr(Attr.getRange(), S.Context,
+ StartArg, Size));
+ else
+ D->addAttr(::new (S.Context) AcquiredAfterAttr(Attr.getRange(), S.Context,
+ StartArg, Size));
+}
+
+static void handleLockFunAttr(Sema &S, Decl *D, const AttributeList &Attr,
+ bool exclusive = false) {
+ assert(!Attr.isInvalid());
+
+ // zero or more arguments ok
+
+ // check that the attribute is applied to a function
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ // check that all arguments are lockable objects
+ SmallVector<Expr*, 1> Args;
+ if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true))
+ return;
+
+ unsigned Size = Args.size();
+ assert(Size == Attr.getNumArgs());
+ Expr **StartArg = Size == 0 ? 0 : &Args[0];
+
+ if (exclusive)
+ D->addAttr(::new (S.Context) ExclusiveLockFunctionAttr(Attr.getRange(),
+ S.Context, StartArg,
+ Size));
+ else
+ D->addAttr(::new (S.Context) SharedLockFunctionAttr(Attr.getRange(),
+ S.Context, StartArg,
+ Size));
+}
+
+static void handleTrylockFunAttr(Sema &S, Decl *D, const AttributeList &Attr,
+ bool exclusive = false) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (!isIntOrBool(Attr.getArg(0))) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_first_argument_not_int_or_bool)
+ << Attr.getName();
+ return;
+ }
+
+ SmallVector<Expr*, 2> Args;
+ // check that all arguments are lockable objects
+ if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args, 1))
+ return;
+
+ unsigned Size = Args.size();
+ Expr **StartArg = Size == 0 ? 0 : &Args[0];
+
+ if (exclusive)
+ D->addAttr(::new (S.Context) ExclusiveTrylockFunctionAttr(Attr.getRange(),
+ S.Context,
+ Attr.getArg(0),
+ StartArg, Size));
+ else
+ D->addAttr(::new (S.Context) SharedTrylockFunctionAttr(Attr.getRange(),
+ S.Context,
+ Attr.getArg(0),
+ StartArg, Size));
+}
+
+static void handleLocksRequiredAttr(Sema &S, Decl *D, const AttributeList &Attr,
+ bool exclusive = false) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ // check that all arguments are lockable objects
+ SmallVector<Expr*, 1> Args;
+ if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args))
+ return;
+
+ unsigned Size = Args.size();
+ assert(Size == Attr.getNumArgs());
+ Expr **StartArg = Size == 0 ? 0 : &Args[0];
+
+ if (exclusive)
+ D->addAttr(::new (S.Context) ExclusiveLocksRequiredAttr(Attr.getRange(),
+ S.Context, StartArg,
+ Size));
+ else
+ D->addAttr(::new (S.Context) SharedLocksRequiredAttr(Attr.getRange(),
+ S.Context, StartArg,
+ Size));
+}
+
+static void handleUnlockFunAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+
+ // zero or more arguments ok
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ // check that all arguments are lockable objects
+ SmallVector<Expr*, 1> Args;
+ if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args, 0, /*ParamIdxOk=*/true))
+ return;
+
+ unsigned Size = Args.size();
+ assert(Size == Attr.getNumArgs());
+ Expr **StartArg = Size == 0 ? 0 : &Args[0];
+
+ D->addAttr(::new (S.Context) UnlockFunctionAttr(Attr.getRange(), S.Context,
+ StartArg, Size));
+}
+
+static void handleLockReturnedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+ Expr *Arg = Attr.getArg(0);
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (Arg->isTypeDependent())
+ return;
+
+ // check that the argument is lockable object
+ checkForLockableRecord(S, D, Attr, Arg->getType());
+
+ D->addAttr(::new (S.Context) LockReturnedAttr(Attr.getRange(), S.Context, Arg));
+}
+
+static void handleLocksExcludedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+
+ if (!checkAttributeAtLeastNumArgs(S, Attr, 1))
+ return;
+
+ if (!isa<FunctionDecl>(D) && !isa<FunctionTemplateDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ // check that all arguments are lockable objects
+ SmallVector<Expr*, 1> Args;
+ if (!checkAttrArgsAreLockableObjs(S, D, Attr, Args))
+ return;
+
+ unsigned Size = Args.size();
+ assert(Size == Attr.getNumArgs());
+ Expr **StartArg = Size == 0 ? 0 : &Args[0];
+
+ D->addAttr(::new (S.Context) LocksExcludedAttr(Attr.getRange(), S.Context,
+ StartArg, Size));
+}
+
+
+static void handleExtVectorTypeAttr(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr) {
+ TypedefNameDecl *tDecl = dyn_cast<TypedefNameDecl>(D);
+ if (tDecl == 0) {
+ S.Diag(Attr.getLoc(), diag::err_typecheck_ext_vector_not_typedef);
+ return;
+ }
+
+ QualType curType = tDecl->getUnderlyingType();
+
+ Expr *sizeExpr;
+
+ // Special case where the argument is a template id.
+ if (Attr.getParameterName()) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Attr.getParameterName(), Attr.getLoc());
+
+ ExprResult Size = S.ActOnIdExpression(scope, SS, TemplateKWLoc, id,
+ false, false);
+ if (Size.isInvalid())
+ return;
+
+ sizeExpr = Size.get();
+ } else {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ sizeExpr = Attr.getArg(0);
+ }
+
+ // Instantiate/Install the vector type, and let Sema build the type for us.
+ // This will run the reguired checks.
+ QualType T = S.BuildExtVectorType(curType, sizeExpr, Attr.getLoc());
+ if (!T.isNull()) {
+ // FIXME: preserve the old source info.
+ tDecl->setTypeSourceInfo(S.Context.getTrivialTypeSourceInfo(T));
+
+ // Remember this typedef decl, we will need it later for diagnostics.
+ S.ExtVectorDecls.push_back(tDecl);
+ }
+}
+
+static void handlePackedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (TagDecl *TD = dyn_cast<TagDecl>(D))
+ TD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context));
+ else if (FieldDecl *FD = dyn_cast<FieldDecl>(D)) {
+ // If the alignment is less than or equal to 8 bits, the packed attribute
+ // has no effect.
+ if (!FD->getType()->isIncompleteType() &&
+ S.Context.getTypeAlign(FD->getType()) <= 8)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
+ << Attr.getName() << FD->getType();
+ else
+ FD->addAttr(::new (S.Context) PackedAttr(Attr.getRange(), S.Context));
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+}
+
+static void handleMsStructAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (TagDecl *TD = dyn_cast<TagDecl>(D))
+ TD->addAttr(::new (S.Context) MsStructAttr(Attr.getRange(), S.Context));
+ else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+}
+
+static void handleIBAction(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ // The IBAction attributes only apply to instance methods.
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (MD->isInstanceMethod()) {
+ D->addAttr(::new (S.Context) IBActionAttr(Attr.getRange(), S.Context));
+ return;
+ }
+
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ibaction) << Attr.getName();
+}
+
+static bool checkIBOutletCommon(Sema &S, Decl *D, const AttributeList &Attr) {
+ // The IBOutlet/IBOutletCollection attributes only apply to instance
+ // variables or properties of Objective-C classes. The outlet must also
+ // have an object reference type.
+ if (const ObjCIvarDecl *VD = dyn_cast<ObjCIvarDecl>(D)) {
+ if (!VD->getType()->getAs<ObjCObjectPointerType>()) {
+ S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type)
+ << Attr.getName() << VD->getType() << 0;
+ return false;
+ }
+ }
+ else if (const ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D)) {
+ if (!PD->getType()->getAs<ObjCObjectPointerType>()) {
+ S.Diag(Attr.getLoc(), diag::warn_iboutlet_object_type)
+ << Attr.getName() << PD->getType() << 1;
+ return false;
+ }
+ }
+ else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_iboutlet) << Attr.getName();
+ return false;
+ }
+
+ return true;
+}
+
+static void handleIBOutlet(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!checkIBOutletCommon(S, D, Attr))
+ return;
+
+ D->addAttr(::new (S.Context) IBOutletAttr(Attr.getRange(), S.Context));
+}
+
+static void handleIBOutletCollection(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+
+ // The iboutletcollection attribute can have zero or one arguments.
+ if (Attr.getParameterName() && Attr.getNumArgs() > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ if (!checkIBOutletCommon(S, D, Attr))
+ return;
+
+ IdentifierInfo *II = Attr.getParameterName();
+ if (!II)
+ II = &S.Context.Idents.get("NSObject");
+
+ ParsedType TypeRep = S.getTypeName(*II, Attr.getLoc(),
+ S.getScopeForContext(D->getDeclContext()->getParent()));
+ if (!TypeRep) {
+ S.Diag(Attr.getLoc(), diag::err_iboutletcollection_type) << II;
+ return;
+ }
+ QualType QT = TypeRep.get();
+ // Diagnose use of non-object type in iboutletcollection attribute.
+ // FIXME. Gnu attribute extension ignores use of builtin types in
+ // attributes. So, __attribute__((iboutletcollection(char))) will be
+ // treated as __attribute__((iboutletcollection())).
+ if (!QT->isObjCIdType() && !QT->isObjCObjectType()) {
+ S.Diag(Attr.getLoc(), diag::err_iboutletcollection_type) << II;
+ return;
+ }
+ D->addAttr(::new (S.Context) IBOutletCollectionAttr(Attr.getRange(),S.Context,
+ QT, Attr.getParameterLoc()));
+}
+
+static void possibleTransparentUnionPointerType(QualType &T) {
+ if (const RecordType *UT = T->getAsUnionType())
+ if (UT && UT->getDecl()->hasAttr<TransparentUnionAttr>()) {
+ RecordDecl *UD = UT->getDecl();
+ for (RecordDecl::field_iterator it = UD->field_begin(),
+ itend = UD->field_end(); it != itend; ++it) {
+ QualType QT = it->getType();
+ if (QT->isAnyPointerType() || QT->isBlockPointerType()) {
+ T = QT;
+ return;
+ }
+ }
+ }
+}
+
+static void handleNonNullAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // GCC ignores the nonnull attribute on K&R style function prototypes, so we
+ // ignore it as well
+ if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
+
+ // The nonnull attribute only applies to pointers.
+ SmallVector<unsigned, 10> NonNullArgs;
+
+ for (AttributeList::arg_iterator I=Attr.arg_begin(),
+ E=Attr.arg_end(); I!=E; ++I) {
+
+
+ // The argument must be an integer constant expression.
+ Expr *Ex = *I;
+ llvm::APSInt ArgNum(32);
+ if (Ex->isTypeDependent() || Ex->isValueDependent() ||
+ !Ex->isIntegerConstantExpr(ArgNum, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "nonnull" << Ex->getSourceRange();
+ return;
+ }
+
+ unsigned x = (unsigned) ArgNum.getZExtValue();
+
+ if (x < 1 || x > NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "nonnull" << I.getArgNum() << Ex->getSourceRange();
+ return;
+ }
+
+ --x;
+ if (HasImplicitThisParam) {
+ if (x == 0) {
+ S.Diag(Attr.getLoc(),
+ diag::err_attribute_invalid_implicit_this_argument)
+ << "nonnull" << Ex->getSourceRange();
+ return;
+ }
+ --x;
+ }
+
+ // Is the function argument a pointer type?
+ QualType T = getFunctionOrMethodArgType(D, x).getNonReferenceType();
+ possibleTransparentUnionPointerType(T);
+
+ if (!T->isAnyPointerType() && !T->isBlockPointerType()) {
+ // FIXME: Should also highlight argument in decl.
+ S.Diag(Attr.getLoc(), diag::warn_nonnull_pointers_only)
+ << "nonnull" << Ex->getSourceRange();
+ continue;
+ }
+
+ NonNullArgs.push_back(x);
+ }
+
+ // If no arguments were specified to __attribute__((nonnull)) then all pointer
+ // arguments have a nonnull attribute.
+ if (NonNullArgs.empty()) {
+ for (unsigned I = 0, E = getFunctionOrMethodNumArgs(D); I != E; ++I) {
+ QualType T = getFunctionOrMethodArgType(D, I).getNonReferenceType();
+ possibleTransparentUnionPointerType(T);
+ if (T->isAnyPointerType() || T->isBlockPointerType())
+ NonNullArgs.push_back(I);
+ }
+
+ // No pointer arguments?
+ if (NonNullArgs.empty()) {
+ // Warn the trivial case only if attribute is not coming from a
+ // macro instantiation.
+ if (Attr.getLoc().isFileID())
+ S.Diag(Attr.getLoc(), diag::warn_attribute_nonnull_no_pointers);
+ return;
+ }
+ }
+
+ unsigned* start = &NonNullArgs[0];
+ unsigned size = NonNullArgs.size();
+ llvm::array_pod_sort(start, start + size);
+ D->addAttr(::new (S.Context) NonNullAttr(Attr.getRange(), S.Context, start,
+ size));
+}
+
+static void handleOwnershipAttr(Sema &S, Decl *D, const AttributeList &AL) {
+ // This attribute must be applied to a function declaration.
+ // The first argument to the attribute must be a string,
+ // the name of the resource, for example "malloc".
+ // The following arguments must be argument indexes, the arguments must be
+ // of integer type for Returns, otherwise of pointer type.
+ // The difference between Holds and Takes is that a pointer may still be used
+ // after being held. free() should be __attribute((ownership_takes)), whereas
+ // a list append function may well be __attribute((ownership_holds)).
+
+ if (!AL.getParameterName()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_n_not_string)
+ << AL.getName()->getName() << 1;
+ return;
+ }
+ // Figure out our Kind, and check arguments while we're at it.
+ OwnershipAttr::OwnershipKind K;
+ switch (AL.getKind()) {
+ case AttributeList::AT_ownership_takes:
+ K = OwnershipAttr::Takes;
+ if (AL.getNumArgs() < 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << 2;
+ return;
+ }
+ break;
+ case AttributeList::AT_ownership_holds:
+ K = OwnershipAttr::Holds;
+ if (AL.getNumArgs() < 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << 2;
+ return;
+ }
+ break;
+ case AttributeList::AT_ownership_returns:
+ K = OwnershipAttr::Returns;
+ if (AL.getNumArgs() > 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments)
+ << AL.getNumArgs() + 1;
+ return;
+ }
+ break;
+ default:
+ // This should never happen given how we are called.
+ llvm_unreachable("Unknown ownership attribute");
+ }
+
+ if (!isFunction(D) || !hasFunctionProto(D)) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << AL.getName() << ExpectedFunction;
+ return;
+ }
+
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
+
+ StringRef Module = AL.getParameterName()->getName();
+
+ // Normalize the argument, __foo__ becomes foo.
+ if (Module.startswith("__") && Module.endswith("__"))
+ Module = Module.substr(2, Module.size() - 4);
+
+ SmallVector<unsigned, 10> OwnershipArgs;
+
+ for (AttributeList::arg_iterator I = AL.arg_begin(), E = AL.arg_end(); I != E;
+ ++I) {
+
+ Expr *IdxExpr = *I;
+ llvm::APSInt ArgNum(32);
+ if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent()
+ || !IdxExpr->isIntegerConstantExpr(ArgNum, S.Context)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_not_int)
+ << AL.getName()->getName() << IdxExpr->getSourceRange();
+ continue;
+ }
+
+ unsigned x = (unsigned) ArgNum.getZExtValue();
+
+ if (x > NumArgs || x < 1) {
+ S.Diag(AL.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << AL.getName()->getName() << x << IdxExpr->getSourceRange();
+ continue;
+ }
+ --x;
+ if (HasImplicitThisParam) {
+ if (x == 0) {
+ S.Diag(AL.getLoc(), diag::err_attribute_invalid_implicit_this_argument)
+ << "ownership" << IdxExpr->getSourceRange();
+ return;
+ }
+ --x;
+ }
+
+ switch (K) {
+ case OwnershipAttr::Takes:
+ case OwnershipAttr::Holds: {
+ // Is the function argument a pointer type?
+ QualType T = getFunctionOrMethodArgType(D, x);
+ if (!T->isAnyPointerType() && !T->isBlockPointerType()) {
+ // FIXME: Should also highlight argument in decl.
+ S.Diag(AL.getLoc(), diag::err_ownership_type)
+ << ((K==OwnershipAttr::Takes)?"ownership_takes":"ownership_holds")
+ << "pointer"
+ << IdxExpr->getSourceRange();
+ continue;
+ }
+ break;
+ }
+ case OwnershipAttr::Returns: {
+ if (AL.getNumArgs() > 1) {
+ // Is the function argument an integer type?
+ Expr *IdxExpr = AL.getArg(0);
+ llvm::APSInt ArgNum(32);
+ if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent()
+ || !IdxExpr->isIntegerConstantExpr(ArgNum, S.Context)) {
+ S.Diag(AL.getLoc(), diag::err_ownership_type)
+ << "ownership_returns" << "integer"
+ << IdxExpr->getSourceRange();
+ return;
+ }
+ }
+ break;
+ }
+ } // switch
+
+ // Check we don't have a conflict with another ownership attribute.
+ for (specific_attr_iterator<OwnershipAttr>
+ i = D->specific_attr_begin<OwnershipAttr>(),
+ e = D->specific_attr_end<OwnershipAttr>();
+ i != e; ++i) {
+ if ((*i)->getOwnKind() != K) {
+ for (const unsigned *I = (*i)->args_begin(), *E = (*i)->args_end();
+ I!=E; ++I) {
+ if (x == *I) {
+ S.Diag(AL.getLoc(), diag::err_attributes_are_not_compatible)
+ << AL.getName()->getName() << "ownership_*";
+ }
+ }
+ }
+ }
+ OwnershipArgs.push_back(x);
+ }
+
+ unsigned* start = OwnershipArgs.data();
+ unsigned size = OwnershipArgs.size();
+ llvm::array_pod_sort(start, start + size);
+
+ if (K != OwnershipAttr::Returns && OwnershipArgs.empty()) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_number_arguments) << 2;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) OwnershipAttr(AL.getLoc(), S.Context, K, Module,
+ start, size));
+}
+
+/// Whether this declaration has internal linkage for the purposes of
+/// things that want to complain about things not have internal linkage.
+static bool hasEffectivelyInternalLinkage(NamedDecl *D) {
+ switch (D->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ return true;
+
+ // Template instantiations that go from external to unique-external
+ // shouldn't get diagnosed.
+ case UniqueExternalLinkage:
+ return true;
+
+ case ExternalLinkage:
+ return false;
+ }
+ llvm_unreachable("unknown linkage kind!");
+}
+
+static void handleWeakRefAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ if (!isa<VarDecl>(D) && !isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableOrFunction;
+ return;
+ }
+
+ NamedDecl *nd = cast<NamedDecl>(D);
+
+ // gcc rejects
+ // class c {
+ // static int a __attribute__((weakref ("v2")));
+ // static int b() __attribute__((weakref ("f3")));
+ // };
+ // and ignores the attributes of
+ // void f(void) {
+ // static int a __attribute__((weakref ("v2")));
+ // }
+ // we reject them
+ const DeclContext *Ctx = D->getDeclContext()->getRedeclContext();
+ if (!Ctx->isFileContext()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context) <<
+ nd->getNameAsString();
+ return;
+ }
+
+ // The GCC manual says
+ //
+ // At present, a declaration to which `weakref' is attached can only
+ // be `static'.
+ //
+ // It also says
+ //
+ // Without a TARGET,
+ // given as an argument to `weakref' or to `alias', `weakref' is
+ // equivalent to `weak'.
+ //
+ // gcc 4.4.1 will accept
+ // int a7 __attribute__((weakref));
+ // as
+ // int a7 __attribute__((weak));
+ // This looks like a bug in gcc. We reject that for now. We should revisit
+ // it if this behaviour is actually used.
+
+ if (!hasEffectivelyInternalLinkage(nd)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_static);
+ return;
+ }
+
+ // GCC rejects
+ // static ((alias ("y"), weakref)).
+ // Should we? How to check that weakref is before or after alias?
+
+ if (Attr.getNumArgs() == 1) {
+ Expr *Arg = Attr.getArg(0);
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+
+ if (!Str || !Str->isAscii()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "weakref" << 1;
+ return;
+ }
+ // GCC will accept anything as the argument of weakref. Should we
+ // check for an existing decl?
+ D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context,
+ Str->getString()));
+ }
+
+ D->addAttr(::new (S.Context) WeakRefAttr(Attr.getRange(), S.Context));
+}
+
+static void handleAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ Expr *Arg = Attr.getArg(0);
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+
+ if (!Str || !Str->isAscii()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "alias" << 1;
+ return;
+ }
+
+ if (S.Context.getTargetInfo().getTriple().isOSDarwin()) {
+ S.Diag(Attr.getLoc(), diag::err_alias_not_supported_on_darwin);
+ return;
+ }
+
+ // FIXME: check if target symbol exists in current file
+
+ D->addAttr(::new (S.Context) AliasAttr(Attr.getRange(), S.Context,
+ Str->getString()));
+}
+
+static void handleNakedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NakedAttr(Attr.getRange(), S.Context));
+}
+
+static void handleAlwaysInlineAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) AlwaysInlineAttr(Attr.getRange(), S.Context));
+}
+
+static void handleMallocAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ QualType RetTy = FD->getResultType();
+ if (RetTy->isAnyPointerType() || RetTy->isBlockPointerType()) {
+ D->addAttr(::new (S.Context) MallocAttr(Attr.getRange(), S.Context));
+ return;
+ }
+ }
+
+ S.Diag(Attr.getLoc(), diag::warn_attribute_malloc_pointer_only);
+}
+
+static void handleMayAliasAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ D->addAttr(::new (S.Context) MayAliasAttr(Attr.getRange(), S.Context));
+}
+
+static void handleNoCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+ if (isa<VarDecl>(D))
+ D->addAttr(::new (S.Context) NoCommonAttr(Attr.getRange(), S.Context));
+ else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariable;
+}
+
+static void handleCommonAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ assert(!Attr.isInvalid());
+ if (isa<VarDecl>(D))
+ D->addAttr(::new (S.Context) CommonAttr(Attr.getRange(), S.Context));
+ else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariable;
+}
+
+static void handleNoReturnAttr(Sema &S, Decl *D, const AttributeList &attr) {
+ if (hasDeclarator(D)) return;
+
+ if (S.CheckNoReturnAttr(attr)) return;
+
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NoReturnAttr(attr.getRange(), S.Context));
+}
+
+bool Sema::CheckNoReturnAttr(const AttributeList &attr) {
+ if (attr.hasParameterOrArguments()) {
+ Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ attr.setInvalid();
+ return true;
+ }
+
+ return false;
+}
+
+static void handleAnalyzerNoReturnAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+
+ // The checking path for 'noreturn' and 'analyzer_noreturn' are different
+ // because 'analyzer_noreturn' does not impact the type.
+
+ if(!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isFunctionOrMethod(D) && !isa<BlockDecl>(D)) {
+ ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (VD == 0 || (!VD->getType()->isBlockPointerType()
+ && !VD->getType()->isFunctionPointerType())) {
+ S.Diag(Attr.getLoc(),
+ Attr.isCXX0XAttribute() ? diag::err_attribute_wrong_decl_type
+ : diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context) AnalyzerNoReturnAttr(Attr.getRange(), S.Context));
+}
+
+// PS3 PPU-specific.
+static void handleVecReturnAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+/*
+ Returning a Vector Class in Registers
+
+ According to the PPU ABI specifications, a class with a single member of
+ vector type is returned in memory when used as the return value of a function.
+ This results in inefficient code when implementing vector classes. To return
+ the value in a single vector register, add the vecreturn attribute to the
+ class definition. This attribute is also applicable to struct types.
+
+ Example:
+
+ struct Vector
+ {
+ __vector float xyzw;
+ } __attribute__((vecreturn));
+
+ Vector Add(Vector lhs, Vector rhs)
+ {
+ Vector result;
+ result.xyzw = vec_add(lhs.xyzw, rhs.xyzw);
+ return result; // This will be returned in a register
+ }
+*/
+ if (!isa<RecordDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedClass;
+ return;
+ }
+
+ if (D->getAttr<VecReturnAttr>()) {
+ S.Diag(Attr.getLoc(), diag::err_repeat_attribute) << "vecreturn";
+ return;
+ }
+
+ RecordDecl *record = cast<RecordDecl>(D);
+ int count = 0;
+
+ if (!isa<CXXRecordDecl>(record)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
+ return;
+ }
+
+ if (!cast<CXXRecordDecl>(record)->isPOD()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_pod_record);
+ return;
+ }
+
+ for (RecordDecl::field_iterator iter = record->field_begin();
+ iter != record->field_end(); iter++) {
+ if ((count == 1) || !iter->getType()->isVectorType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_vecreturn_only_vector_member);
+ return;
+ }
+ count++;
+ }
+
+ D->addAttr(::new (S.Context) VecReturnAttr(Attr.getRange(), S.Context));
+}
+
+static void handleDependencyAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!isFunctionOrMethod(D) && !isa<ParmVarDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionMethodOrParameter;
+ return;
+ }
+ // FIXME: Actually store the attribute on the declaration
+}
+
+static void handleUnusedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<VarDecl>(D) && !isa<ObjCIvarDecl>(D) && !isFunctionOrMethod(D) &&
+ !isa<TypeDecl>(D) && !isa<LabelDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableFunctionOrLabel;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) UnusedAttr(Attr.getRange(), S.Context));
+}
+
+static void handleReturnsTwiceAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ReturnsTwiceAttr(Attr.getRange(), S.Context));
+}
+
+static void handleUsedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ if (VD->hasLocalStorage() || VD->hasExternalStorage()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "used";
+ return;
+ }
+ } else if (!isFunctionOrMethod(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableOrFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) UsedAttr(Attr.getRange(), S.Context));
+}
+
+static void handleConstructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
+ return;
+ }
+
+ int priority = 65535; // FIXME: Do not hardcode such constants.
+ if (Attr.getNumArgs() > 0) {
+ Expr *E = Attr.getArg(0);
+ llvm::APSInt Idx(32);
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ !E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "constructor" << 1 << E->getSourceRange();
+ return;
+ }
+ priority = Idx.getZExtValue();
+ }
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ConstructorAttr(Attr.getRange(), S.Context,
+ priority));
+}
+
+static void handleDestructorAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
+ return;
+ }
+
+ int priority = 65535; // FIXME: Do not hardcode such constants.
+ if (Attr.getNumArgs() > 0) {
+ Expr *E = Attr.getArg(0);
+ llvm::APSInt Idx(32);
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ !E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "destructor" << 1 << E->getSourceRange();
+ return;
+ }
+ priority = Idx.getZExtValue();
+ }
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) DestructorAttr(Attr.getRange(), S.Context,
+ priority));
+}
+
+static void handleDeprecatedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ unsigned NumArgs = Attr.getNumArgs();
+ if (NumArgs > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
+ return;
+ }
+
+ // Handle the case where deprecated attribute has a text message.
+ StringRef Str;
+ if (NumArgs == 1) {
+ StringLiteral *SE = dyn_cast<StringLiteral>(Attr.getArg(0));
+ if (!SE) {
+ S.Diag(Attr.getArg(0)->getLocStart(), diag::err_attribute_not_string)
+ << "deprecated";
+ return;
+ }
+ Str = SE->getString();
+ }
+
+ D->addAttr(::new (S.Context) DeprecatedAttr(Attr.getRange(), S.Context, Str));
+}
+
+static void handleUnavailableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ unsigned NumArgs = Attr.getNumArgs();
+ if (NumArgs > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 1;
+ return;
+ }
+
+ // Handle the case where unavailable attribute has a text message.
+ StringRef Str;
+ if (NumArgs == 1) {
+ StringLiteral *SE = dyn_cast<StringLiteral>(Attr.getArg(0));
+ if (!SE) {
+ S.Diag(Attr.getArg(0)->getLocStart(),
+ diag::err_attribute_not_string) << "unavailable";
+ return;
+ }
+ Str = SE->getString();
+ }
+ D->addAttr(::new (S.Context) UnavailableAttr(Attr.getRange(), S.Context, Str));
+}
+
+static void handleArcWeakrefUnavailableAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ unsigned NumArgs = Attr.getNumArgs();
+ if (NumArgs > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ArcWeakrefUnavailableAttr(
+ Attr.getRange(), S.Context));
+}
+
+static void handleObjCRootClassAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!isa<ObjCInterfaceDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_requires_objc_interface);
+ return;
+ }
+
+ unsigned NumArgs = Attr.getNumArgs();
+ if (NumArgs > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ObjCRootClassAttr(Attr.getRange(), S.Context));
+}
+
+static void handleObjCRequiresPropertyDefsAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!isa<ObjCInterfaceDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_suppress_autosynthesis);
+ return;
+ }
+
+ unsigned NumArgs = Attr.getNumArgs();
+ if (NumArgs > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ObjCRequiresPropertyDefsAttr(
+ Attr.getRange(), S.Context));
+}
+
+static void handleAvailabilityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ IdentifierInfo *Platform = Attr.getParameterName();
+ SourceLocation PlatformLoc = Attr.getParameterLoc();
+
+ StringRef PlatformName
+ = AvailabilityAttr::getPrettyPlatformName(Platform->getName());
+ if (PlatformName.empty()) {
+ S.Diag(PlatformLoc, diag::warn_availability_unknown_platform)
+ << Platform;
+
+ PlatformName = Platform->getName();
+ }
+
+ AvailabilityChange Introduced = Attr.getAvailabilityIntroduced();
+ AvailabilityChange Deprecated = Attr.getAvailabilityDeprecated();
+ AvailabilityChange Obsoleted = Attr.getAvailabilityObsoleted();
+ bool IsUnavailable = Attr.getUnavailableLoc().isValid();
+
+ // Ensure that Introduced <= Deprecated <= Obsoleted (although not all
+ // of these steps are needed).
+ if (Introduced.isValid() && Deprecated.isValid() &&
+ !(Introduced.Version <= Deprecated.Version)) {
+ S.Diag(Introduced.KeywordLoc, diag::warn_availability_version_ordering)
+ << 1 << PlatformName << Deprecated.Version.getAsString()
+ << 0 << Introduced.Version.getAsString();
+ return;
+ }
+
+ if (Introduced.isValid() && Obsoleted.isValid() &&
+ !(Introduced.Version <= Obsoleted.Version)) {
+ S.Diag(Introduced.KeywordLoc, diag::warn_availability_version_ordering)
+ << 2 << PlatformName << Obsoleted.Version.getAsString()
+ << 0 << Introduced.Version.getAsString();
+ return;
+ }
+
+ if (Deprecated.isValid() && Obsoleted.isValid() &&
+ !(Deprecated.Version <= Obsoleted.Version)) {
+ S.Diag(Deprecated.KeywordLoc, diag::warn_availability_version_ordering)
+ << 2 << PlatformName << Obsoleted.Version.getAsString()
+ << 1 << Deprecated.Version.getAsString();
+ return;
+ }
+
+ StringRef Str;
+ const StringLiteral *SE =
+ dyn_cast_or_null<const StringLiteral>(Attr.getMessageExpr());
+ if (SE)
+ Str = SE->getString();
+
+ D->addAttr(::new (S.Context) AvailabilityAttr(Attr.getRange(), S.Context,
+ Platform,
+ Introduced.Version,
+ Deprecated.Version,
+ Obsoleted.Version,
+ IsUnavailable,
+ Str));
+}
+
+static void handleVisibilityAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if(!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ Expr *Arg = Attr.getArg(0);
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+
+ if (!Str || !Str->isAscii()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "visibility" << 1;
+ return;
+ }
+
+ StringRef TypeStr = Str->getString();
+ VisibilityAttr::VisibilityType type;
+
+ if (TypeStr == "default")
+ type = VisibilityAttr::Default;
+ else if (TypeStr == "hidden")
+ type = VisibilityAttr::Hidden;
+ else if (TypeStr == "internal")
+ type = VisibilityAttr::Hidden; // FIXME
+ else if (TypeStr == "protected") {
+ // Complain about attempts to use protected visibility on targets
+ // (like Darwin) that don't support it.
+ if (!S.Context.getTargetInfo().hasProtectedVisibility()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_protected_visibility);
+ type = VisibilityAttr::Default;
+ } else {
+ type = VisibilityAttr::Protected;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_unknown_visibility) << TypeStr;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) VisibilityAttr(Attr.getRange(), S.Context, type));
+}
+
+static void handleObjCMethodFamilyAttr(Sema &S, Decl *decl,
+ const AttributeList &Attr) {
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(decl);
+ if (!method) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_decl_type)
+ << ExpectedMethod;
+ return;
+ }
+
+ if (Attr.getNumArgs() != 0 || !Attr.getParameterName()) {
+ if (!Attr.getParameterName() && Attr.getNumArgs() == 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "objc_method_family" << 1;
+ } else {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ }
+ Attr.setInvalid();
+ return;
+ }
+
+ StringRef param = Attr.getParameterName()->getName();
+ ObjCMethodFamilyAttr::FamilyKind family;
+ if (param == "none")
+ family = ObjCMethodFamilyAttr::OMF_None;
+ else if (param == "alloc")
+ family = ObjCMethodFamilyAttr::OMF_alloc;
+ else if (param == "copy")
+ family = ObjCMethodFamilyAttr::OMF_copy;
+ else if (param == "init")
+ family = ObjCMethodFamilyAttr::OMF_init;
+ else if (param == "mutableCopy")
+ family = ObjCMethodFamilyAttr::OMF_mutableCopy;
+ else if (param == "new")
+ family = ObjCMethodFamilyAttr::OMF_new;
+ else {
+ // Just warn and ignore it. This is future-proof against new
+ // families being used in system headers.
+ S.Diag(Attr.getParameterLoc(), diag::warn_unknown_method_family);
+ return;
+ }
+
+ if (family == ObjCMethodFamilyAttr::OMF_init &&
+ !method->getResultType()->isObjCObjectPointerType()) {
+ S.Diag(method->getLocation(), diag::err_init_method_bad_return_type)
+ << method->getResultType();
+ // Ignore the attribute.
+ return;
+ }
+
+ method->addAttr(new (S.Context) ObjCMethodFamilyAttr(Attr.getRange(),
+ S.Context, family));
+}
+
+static void handleObjCExceptionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ ObjCInterfaceDecl *OCI = dyn_cast<ObjCInterfaceDecl>(D);
+ if (OCI == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_requires_objc_interface);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) ObjCExceptionAttr(Attr.getRange(), S.Context));
+}
+
+static void handleObjCNSObject(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ QualType T = TD->getUnderlyingType();
+ if (!T->isPointerType() ||
+ !T->getAs<PointerType>()->getPointeeType()->isRecordType()) {
+ S.Diag(TD->getLocation(), diag::err_nsobject_attribute);
+ return;
+ }
+ }
+ else if (!isa<ObjCPropertyDecl>(D)) {
+ // It is okay to include this attribute on properties, e.g.:
+ //
+ // @property (retain, nonatomic) struct Bork *Q __attribute__((NSObject));
+ //
+ // In this case it follows tradition and suppresses an error in the above
+ // case.
+ S.Diag(D->getLocation(), diag::warn_nsobject_attribute);
+ }
+ D->addAttr(::new (S.Context) ObjCNSObjectAttr(Attr.getRange(), S.Context));
+}
+
+static void
+handleOverloadableAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_overloadable_not_function);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) OverloadableAttr(Attr.getRange(), S.Context));
+}
+
+static void handleBlocksAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!Attr.getParameterName()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "blocks" << 1;
+ return;
+ }
+
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ BlocksAttr::BlockType type;
+ if (Attr.getParameterName()->isStr("byref"))
+ type = BlocksAttr::ByRef;
+ else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << "blocks" << Attr.getParameterName();
+ return;
+ }
+
+ D->addAttr(::new (S.Context) BlocksAttr(Attr.getRange(), S.Context, type));
+}
+
+static void handleSentinelAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 2) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 2;
+ return;
+ }
+
+ unsigned sentinel = 0;
+ if (Attr.getNumArgs() > 0) {
+ Expr *E = Attr.getArg(0);
+ llvm::APSInt Idx(32);
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ !E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "sentinel" << 1 << E->getSourceRange();
+ return;
+ }
+
+ if (Idx.isSigned() && Idx.isNegative()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_less_than_zero)
+ << E->getSourceRange();
+ return;
+ }
+
+ sentinel = Idx.getZExtValue();
+ }
+
+ unsigned nullPos = 0;
+ if (Attr.getNumArgs() > 1) {
+ Expr *E = Attr.getArg(1);
+ llvm::APSInt Idx(32);
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ !E->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "sentinel" << 2 << E->getSourceRange();
+ return;
+ }
+ nullPos = Idx.getZExtValue();
+
+ if ((Idx.isSigned() && Idx.isNegative()) || nullPos > 1) {
+ // FIXME: This error message could be improved, it would be nice
+ // to say what the bounds actually are.
+ S.Diag(Attr.getLoc(), diag::err_attribute_sentinel_not_zero_or_one)
+ << E->getSourceRange();
+ return;
+ }
+ }
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ const FunctionType *FT = FD->getType()->castAs<FunctionType>();
+ if (isa<FunctionNoProtoType>(FT)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_named_arguments);
+ return;
+ }
+
+ if (!cast<FunctionProtoType>(FT)->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
+ return;
+ }
+ } else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ if (!MD->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 0;
+ return;
+ }
+ } else if (BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (!BD->isVariadic()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << 1;
+ return;
+ }
+ } else if (const VarDecl *V = dyn_cast<VarDecl>(D)) {
+ QualType Ty = V->getType();
+ if (Ty->isBlockPointerType() || Ty->isFunctionPointerType()) {
+ const FunctionType *FT = Ty->isFunctionPointerType() ? getFunctionType(D)
+ : Ty->getAs<BlockPointerType>()->getPointeeType()->getAs<FunctionType>();
+ if (!cast<FunctionProtoType>(FT)->isVariadic()) {
+ int m = Ty->isFunctionPointerType() ? 0 : 1;
+ S.Diag(Attr.getLoc(), diag::warn_attribute_sentinel_not_variadic) << m;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ return;
+ }
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionMethodOrBlock;
+ return;
+ }
+ D->addAttr(::new (S.Context) SentinelAttr(Attr.getRange(), S.Context, sentinel,
+ nullPos));
+}
+
+static void handleWarnUnusedResult(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isFunction(D) && !isa<ObjCMethodDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ if (isFunction(D) && getFunctionType(D)->getResultType()->isVoidType()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method)
+ << Attr.getName() << 0;
+ return;
+ }
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (MD->getResultType()->isVoidType()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_void_function_method)
+ << Attr.getName() << 1;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) WarnUnusedResultAttr(Attr.getRange(), S.Context));
+}
+
+static void handleWeakAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<VarDecl>(D) && !isa<FunctionDecl>(D)) {
+ if (isa<CXXRecordDecl>(D)) {
+ D->addAttr(::new (S.Context) WeakAttr(Attr.getRange(), S.Context));
+ return;
+ }
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableOrFunction;
+ return;
+ }
+
+ NamedDecl *nd = cast<NamedDecl>(D);
+
+ // 'weak' only applies to declarations with external linkage.
+ if (hasEffectivelyInternalLinkage(nd)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_weak_static);
+ return;
+ }
+
+ nd->addAttr(::new (S.Context) WeakAttr(Attr.getRange(), S.Context));
+}
+
+static void handleWeakImportAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+
+ // weak_import only applies to variable & function declarations.
+ bool isDef = false;
+ if (!D->canBeWeakImported(isDef)) {
+ if (isDef)
+ S.Diag(Attr.getLoc(),
+ diag::warn_attribute_weak_import_invalid_on_definition)
+ << "weak_import" << 2 /*variable and function*/;
+ else if (isa<ObjCPropertyDecl>(D) || isa<ObjCMethodDecl>(D) ||
+ (S.Context.getTargetInfo().getTriple().isOSDarwin() &&
+ (isa<ObjCInterfaceDecl>(D) || isa<EnumDecl>(D)))) {
+ // Nothing to warn about here.
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableOrFunction;
+
+ return;
+ }
+
+ D->addAttr(::new (S.Context) WeakImportAttr(Attr.getRange(), S.Context));
+}
+
+static void handleReqdWorkGroupSize(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // Attribute has 3 arguments.
+ if (!checkAttributeNumArgs(S, Attr, 3))
+ return;
+
+ unsigned WGSize[3];
+ for (unsigned i = 0; i < 3; ++i) {
+ Expr *E = Attr.getArg(i);
+ llvm::APSInt ArgNum(32);
+ if (E->isTypeDependent() || E->isValueDependent() ||
+ !E->isIntegerConstantExpr(ArgNum, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "reqd_work_group_size" << E->getSourceRange();
+ return;
+ }
+ WGSize[i] = (unsigned) ArgNum.getZExtValue();
+ }
+ D->addAttr(::new (S.Context) ReqdWorkGroupSizeAttr(Attr.getRange(), S.Context,
+ WGSize[0], WGSize[1],
+ WGSize[2]));
+}
+
+static void handleSectionAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // Attribute has no arguments.
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ // Make sure that there is a string literal as the sections's single
+ // argument.
+ Expr *ArgExpr = Attr.getArg(0);
+ StringLiteral *SE = dyn_cast<StringLiteral>(ArgExpr);
+ if (!SE) {
+ S.Diag(ArgExpr->getLocStart(), diag::err_attribute_not_string) << "section";
+ return;
+ }
+
+ // If the target wants to validate the section specifier, make it happen.
+ std::string Error = S.Context.getTargetInfo().isValidSectionSpecifier(SE->getString());
+ if (!Error.empty()) {
+ S.Diag(SE->getLocStart(), diag::err_attribute_section_invalid_for_target)
+ << Error;
+ return;
+ }
+
+ // This attribute cannot be applied to local variables.
+ if (isa<VarDecl>(D) && cast<VarDecl>(D)->hasLocalStorage()) {
+ S.Diag(SE->getLocStart(), diag::err_attribute_section_local_variable);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) SectionAttr(Attr.getRange(), S.Context,
+ SE->getString()));
+}
+
+
+static void handleNothrowAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (NoThrowAttr *Existing = D->getAttr<NoThrowAttr>()) {
+ if (Existing->getLocation().isInvalid())
+ Existing->setRange(Attr.getRange());
+ } else {
+ D->addAttr(::new (S.Context) NoThrowAttr(Attr.getRange(), S.Context));
+ }
+}
+
+static void handleConstAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (ConstAttr *Existing = D->getAttr<ConstAttr>()) {
+ if (Existing->getLocation().isInvalid())
+ Existing->setRange(Attr.getRange());
+ } else {
+ D->addAttr(::new (S.Context) ConstAttr(Attr.getRange(), S.Context));
+ }
+}
+
+static void handlePureAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ D->addAttr(::new (S.Context) PureAttr(Attr.getRange(), S.Context));
+}
+
+static void handleCleanupAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!Attr.getParameterName()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ VarDecl *VD = dyn_cast<VarDecl>(D);
+
+ if (!VD || !VD->hasLocalStorage()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "cleanup";
+ return;
+ }
+
+ // Look up the function
+ // FIXME: Lookup probably isn't looking in the right place
+ NamedDecl *CleanupDecl
+ = S.LookupSingleName(S.TUScope, Attr.getParameterName(),
+ Attr.getParameterLoc(), Sema::LookupOrdinaryName);
+ if (!CleanupDecl) {
+ S.Diag(Attr.getParameterLoc(), diag::err_attribute_cleanup_arg_not_found) <<
+ Attr.getParameterName();
+ return;
+ }
+
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(CleanupDecl);
+ if (!FD) {
+ S.Diag(Attr.getParameterLoc(),
+ diag::err_attribute_cleanup_arg_not_function)
+ << Attr.getParameterName();
+ return;
+ }
+
+ if (FD->getNumParams() != 1) {
+ S.Diag(Attr.getParameterLoc(),
+ diag::err_attribute_cleanup_func_must_take_one_arg)
+ << Attr.getParameterName();
+ return;
+ }
+
+ // We're currently more strict than GCC about what function types we accept.
+ // If this ever proves to be a problem it should be easy to fix.
+ QualType Ty = S.Context.getPointerType(VD->getType());
+ QualType ParamTy = FD->getParamDecl(0)->getType();
+ if (S.CheckAssignmentConstraints(FD->getParamDecl(0)->getLocation(),
+ ParamTy, Ty) != Sema::Compatible) {
+ S.Diag(Attr.getParameterLoc(),
+ diag::err_attribute_cleanup_func_arg_incompatible_type) <<
+ Attr.getParameterName() << ParamTy << Ty;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CleanupAttr(Attr.getRange(), S.Context, FD));
+ S.MarkFunctionReferenced(Attr.getParameterLoc(), FD);
+}
+
+/// Handle __attribute__((format_arg((idx)))) attribute based on
+/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+static void handleFormatArgAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ if (!isFunctionOrMethod(D) || !hasFunctionProto(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
+ unsigned FirstIdx = 1;
+
+ // checks for the 2nd argument
+ Expr *IdxExpr = Attr.getArg(0);
+ llvm::APSInt Idx(32);
+ if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
+ !IdxExpr->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "format" << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ if (Idx.getZExtValue() < FirstIdx || Idx.getZExtValue() > NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "format" << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ unsigned ArgIdx = Idx.getZExtValue() - 1;
+
+ if (HasImplicitThisParam) {
+ if (ArgIdx == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_implicit_this_argument)
+ << "format_arg" << IdxExpr->getSourceRange();
+ return;
+ }
+ ArgIdx--;
+ }
+
+ // make sure the format string is really a string
+ QualType Ty = getFunctionOrMethodArgType(D, ArgIdx);
+
+ bool not_nsstring_type = !isNSStringType(Ty, S.Context);
+ if (not_nsstring_type &&
+ !isCFStringType(Ty, S.Context) &&
+ (!Ty->isPointerType() ||
+ !Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
+ // FIXME: Should highlight the actual expression that has the wrong type.
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << (not_nsstring_type ? "a string type" : "an NSString")
+ << IdxExpr->getSourceRange();
+ return;
+ }
+ Ty = getFunctionOrMethodResultType(D);
+ if (!isNSStringType(Ty, S.Context) &&
+ !isCFStringType(Ty, S.Context) &&
+ (!Ty->isPointerType() ||
+ !Ty->getAs<PointerType>()->getPointeeType()->isCharType())) {
+ // FIXME: Should highlight the actual expression that has the wrong type.
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_result_not)
+ << (not_nsstring_type ? "string type" : "NSString")
+ << IdxExpr->getSourceRange();
+ return;
+ }
+
+ D->addAttr(::new (S.Context) FormatArgAttr(Attr.getRange(), S.Context,
+ Idx.getZExtValue()));
+}
+
+enum FormatAttrKind {
+ CFStringFormat,
+ NSStringFormat,
+ StrftimeFormat,
+ SupportedFormat,
+ IgnoredFormat,
+ InvalidFormat
+};
+
+/// getFormatAttrKind - Map from format attribute names to supported format
+/// types.
+static FormatAttrKind getFormatAttrKind(StringRef Format) {
+ // Check for formats that get handled specially.
+ if (Format == "NSString")
+ return NSStringFormat;
+ if (Format == "CFString")
+ return CFStringFormat;
+ if (Format == "strftime")
+ return StrftimeFormat;
+
+ // Otherwise, check for supported formats.
+ if (Format == "scanf" || Format == "printf" || Format == "printf0" ||
+ Format == "strfmon" || Format == "cmn_err" || Format == "vcmn_err" ||
+ Format == "zcmn_err" ||
+ Format == "kprintf") // OpenBSD.
+ return SupportedFormat;
+
+ if (Format == "gcc_diag" || Format == "gcc_cdiag" ||
+ Format == "gcc_cxxdiag" || Format == "gcc_tdiag")
+ return IgnoredFormat;
+
+ return InvalidFormat;
+}
+
+/// Handle __attribute__((init_priority(priority))) attributes based on
+/// http://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Attributes.html
+static void handleInitPriorityAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!S.getLangOpts().CPlusPlus) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
+ return;
+ }
+
+ if (!isa<VarDecl>(D) || S.getCurFunctionOrMethodDecl()) {
+ S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
+ Attr.setInvalid();
+ return;
+ }
+ QualType T = dyn_cast<VarDecl>(D)->getType();
+ if (S.Context.getAsArrayType(T))
+ T = S.Context.getBaseElementType(T);
+ if (!T->getAs<RecordType>()) {
+ S.Diag(Attr.getLoc(), diag::err_init_priority_object_attr);
+ Attr.setInvalid();
+ return;
+ }
+
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
+ return;
+ }
+ Expr *priorityExpr = Attr.getArg(0);
+
+ llvm::APSInt priority(32);
+ if (priorityExpr->isTypeDependent() || priorityExpr->isValueDependent() ||
+ !priorityExpr->isIntegerConstantExpr(priority, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "init_priority" << priorityExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ unsigned prioritynum = priority.getZExtValue();
+ if (prioritynum < 101 || prioritynum > 65535) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_outof_range)
+ << priorityExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ D->addAttr(::new (S.Context) InitPriorityAttr(Attr.getRange(), S.Context,
+ prioritynum));
+}
+
+/// Handle __attribute__((format(type,idx,firstarg))) attributes based on
+/// http://gcc.gnu.org/onlinedocs/gcc/Function-Attributes.html
+static void handleFormatAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+
+ if (!Attr.getParameterName()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "format" << 1;
+ return;
+ }
+
+ if (Attr.getNumArgs() != 2) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 3;
+ return;
+ }
+
+ if (!isFunctionOrMethodOrBlock(D) || !hasFunctionProto(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ // In C++ the implicit 'this' function parameter also counts, and they are
+ // counted from one.
+ bool HasImplicitThisParam = isInstanceMethod(D);
+ unsigned NumArgs = getFunctionOrMethodNumArgs(D) + HasImplicitThisParam;
+ unsigned FirstIdx = 1;
+
+ StringRef Format = Attr.getParameterName()->getName();
+
+ // Normalize the argument, __foo__ becomes foo.
+ if (Format.startswith("__") && Format.endswith("__"))
+ Format = Format.substr(2, Format.size() - 4);
+
+ // Check for supported formats.
+ FormatAttrKind Kind = getFormatAttrKind(Format);
+
+ if (Kind == IgnoredFormat)
+ return;
+
+ if (Kind == InvalidFormat) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << "format" << Attr.getParameterName()->getName();
+ return;
+ }
+
+ // checks for the 2nd argument
+ Expr *IdxExpr = Attr.getArg(0);
+ llvm::APSInt Idx(32);
+ if (IdxExpr->isTypeDependent() || IdxExpr->isValueDependent() ||
+ !IdxExpr->isIntegerConstantExpr(Idx, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "format" << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ if (Idx.getZExtValue() < FirstIdx || Idx.getZExtValue() > NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "format" << 2 << IdxExpr->getSourceRange();
+ return;
+ }
+
+ // FIXME: Do we need to bounds check?
+ unsigned ArgIdx = Idx.getZExtValue() - 1;
+
+ if (HasImplicitThisParam) {
+ if (ArgIdx == 0) {
+ S.Diag(Attr.getLoc(),
+ diag::err_format_attribute_implicit_this_format_string)
+ << IdxExpr->getSourceRange();
+ return;
+ }
+ ArgIdx--;
+ }
+
+ // make sure the format string is really a string
+ QualType Ty = getFunctionOrMethodArgType(D, ArgIdx);
+
+ if (Kind == CFStringFormat) {
+ if (!isCFStringType(Ty, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "a CFString" << IdxExpr->getSourceRange();
+ return;
+ }
+ } else if (Kind == NSStringFormat) {
+ // FIXME: do we need to check if the type is NSString*? What are the
+ // semantics?
+ if (!isNSStringType(Ty, S.Context)) {
+ // FIXME: Should highlight the actual expression that has the wrong type.
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "an NSString" << IdxExpr->getSourceRange();
+ return;
+ }
+ } else if (!Ty->isPointerType() ||
+ !Ty->getAs<PointerType>()->getPointeeType()->isCharType()) {
+ // FIXME: Should highlight the actual expression that has the wrong type.
+ S.Diag(Attr.getLoc(), diag::err_format_attribute_not)
+ << "a string type" << IdxExpr->getSourceRange();
+ return;
+ }
+
+ // check the 3rd argument
+ Expr *FirstArgExpr = Attr.getArg(1);
+ llvm::APSInt FirstArg(32);
+ if (FirstArgExpr->isTypeDependent() || FirstArgExpr->isValueDependent() ||
+ !FirstArgExpr->isIntegerConstantExpr(FirstArg, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "format" << 3 << FirstArgExpr->getSourceRange();
+ return;
+ }
+
+ // check if the function is variadic if the 3rd argument non-zero
+ if (FirstArg != 0) {
+ if (isFunctionOrMethodVariadic(D)) {
+ ++NumArgs; // +1 for ...
+ } else {
+ S.Diag(D->getLocation(), diag::err_format_attribute_requires_variadic);
+ return;
+ }
+ }
+
+ // strftime requires FirstArg to be 0 because it doesn't read from any
+ // variable the input is just the current time + the format string.
+ if (Kind == StrftimeFormat) {
+ if (FirstArg != 0) {
+ S.Diag(Attr.getLoc(), diag::err_format_strftime_third_parameter)
+ << FirstArgExpr->getSourceRange();
+ return;
+ }
+ // if 0 it disables parameter checking (to use with e.g. va_list)
+ } else if (FirstArg != 0 && FirstArg != NumArgs) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "format" << 3 << FirstArgExpr->getSourceRange();
+ return;
+ }
+
+ // Check whether we already have an equivalent format attribute.
+ for (specific_attr_iterator<FormatAttr>
+ i = D->specific_attr_begin<FormatAttr>(),
+ e = D->specific_attr_end<FormatAttr>();
+ i != e ; ++i) {
+ FormatAttr *f = *i;
+ if (f->getType() == Format &&
+ f->getFormatIdx() == (int)Idx.getZExtValue() &&
+ f->getFirstArg() == (int)FirstArg.getZExtValue()) {
+ // If we don't have a valid location for this attribute, adopt the
+ // location.
+ if (f->getLocation().isInvalid())
+ f->setRange(Attr.getRange());
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context) FormatAttr(Attr.getRange(), S.Context, Format,
+ Idx.getZExtValue(),
+ FirstArg.getZExtValue()));
+}
+
+static void handleTransparentUnionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+
+ // Try to find the underlying union declaration.
+ RecordDecl *RD = 0;
+ TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
+ if (TD && TD->getUnderlyingType()->isUnionType())
+ RD = TD->getUnderlyingType()->getAsUnionType()->getDecl();
+ else
+ RD = dyn_cast<RecordDecl>(D);
+
+ if (!RD || !RD->isUnion()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedUnion;
+ return;
+ }
+
+ if (!RD->isCompleteDefinition()) {
+ S.Diag(Attr.getLoc(),
+ diag::warn_transparent_union_attribute_not_definition);
+ return;
+ }
+
+ RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ if (Field == FieldEnd) {
+ S.Diag(Attr.getLoc(), diag::warn_transparent_union_attribute_zero_fields);
+ return;
+ }
+
+ FieldDecl *FirstField = *Field;
+ QualType FirstType = FirstField->getType();
+ if (FirstType->hasFloatingRepresentation() || FirstType->isVectorType()) {
+ S.Diag(FirstField->getLocation(),
+ diag::warn_transparent_union_attribute_floating)
+ << FirstType->isVectorType() << FirstType;
+ return;
+ }
+
+ uint64_t FirstSize = S.Context.getTypeSize(FirstType);
+ uint64_t FirstAlign = S.Context.getTypeAlign(FirstType);
+ for (; Field != FieldEnd; ++Field) {
+ QualType FieldType = Field->getType();
+ if (S.Context.getTypeSize(FieldType) != FirstSize ||
+ S.Context.getTypeAlign(FieldType) != FirstAlign) {
+ // Warn if we drop the attribute.
+ bool isSize = S.Context.getTypeSize(FieldType) != FirstSize;
+ unsigned FieldBits = isSize? S.Context.getTypeSize(FieldType)
+ : S.Context.getTypeAlign(FieldType);
+ S.Diag(Field->getLocation(),
+ diag::warn_transparent_union_attribute_field_size_align)
+ << isSize << Field->getDeclName() << FieldBits;
+ unsigned FirstBits = isSize? FirstSize : FirstAlign;
+ S.Diag(FirstField->getLocation(),
+ diag::note_transparent_union_first_field_size_align)
+ << isSize << FirstBits;
+ return;
+ }
+ }
+
+ RD->addAttr(::new (S.Context) TransparentUnionAttr(Attr.getRange(), S.Context));
+}
+
+static void handleAnnotateAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ Expr *ArgExpr = Attr.getArg(0);
+ StringLiteral *SE = dyn_cast<StringLiteral>(ArgExpr);
+
+ // Make sure that there is a string literal as the annotation's single
+ // argument.
+ if (!SE) {
+ S.Diag(ArgExpr->getLocStart(), diag::err_attribute_not_string) <<"annotate";
+ return;
+ }
+
+ // Don't duplicate annotations that are already set.
+ for (specific_attr_iterator<AnnotateAttr>
+ i = D->specific_attr_begin<AnnotateAttr>(),
+ e = D->specific_attr_end<AnnotateAttr>(); i != e; ++i) {
+ if ((*i)->getAnnotation() == SE->getString())
+ return;
+ }
+ D->addAttr(::new (S.Context) AnnotateAttr(Attr.getRange(), S.Context,
+ SE->getString()));
+}
+
+static void handleAlignedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ //FIXME: The C++0x version of this attribute has more limited applicabilty
+ // than GNU's, and should error out when it is used to specify a
+ // weaker alignment, rather than being silently ignored.
+
+ if (Attr.getNumArgs() == 0) {
+ D->addAttr(::new (S.Context) AlignedAttr(Attr.getRange(), S.Context, true, 0));
+ return;
+ }
+
+ S.AddAlignedAttr(Attr.getRange(), D, Attr.getArg(0));
+}
+
+void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E) {
+ // FIXME: Handle pack-expansions here.
+ if (DiagnoseUnexpandedParameterPack(E))
+ return;
+
+ if (E->isTypeDependent() || E->isValueDependent()) {
+ // Save dependent expressions in the AST to be instantiated.
+ D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, E));
+ return;
+ }
+
+ SourceLocation AttrLoc = AttrRange.getBegin();
+ // FIXME: Cache the number on the Attr object?
+ llvm::APSInt Alignment(32);
+ ExprResult ICE =
+ VerifyIntegerConstantExpression(E, &Alignment,
+ PDiag(diag::err_attribute_argument_not_int) << "aligned",
+ /*AllowFold*/ false);
+ if (ICE.isInvalid())
+ return;
+ if (!llvm::isPowerOf2_64(Alignment.getZExtValue())) {
+ Diag(AttrLoc, diag::err_attribute_aligned_not_power_of_two)
+ << E->getSourceRange();
+ return;
+ }
+
+ D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, true, ICE.take()));
+}
+
+void Sema::AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *TS) {
+ // FIXME: Cache the number on the Attr object if non-dependent?
+ // FIXME: Perform checking of type validity
+ D->addAttr(::new (Context) AlignedAttr(AttrRange, Context, false, TS));
+ return;
+}
+
+/// handleModeAttr - This attribute modifies the width of a decl with primitive
+/// type.
+///
+/// Despite what would be logical, the mode attribute is a decl attribute, not a
+/// type attribute: 'int ** __attribute((mode(HI))) *G;' tries to make 'G' be
+/// HImode, not an intermediate pointer.
+static void handleModeAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // This attribute isn't documented, but glibc uses it. It changes
+ // the width of an int or unsigned int to the specified size.
+
+ // Check that there aren't any arguments
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+
+ IdentifierInfo *Name = Attr.getParameterName();
+ if (!Name) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_missing_parameter_name);
+ return;
+ }
+
+ StringRef Str = Attr.getParameterName()->getName();
+
+ // Normalize the attribute name, __foo__ becomes foo.
+ if (Str.startswith("__") && Str.endswith("__"))
+ Str = Str.substr(2, Str.size() - 4);
+
+ unsigned DestWidth = 0;
+ bool IntegerMode = true;
+ bool ComplexMode = false;
+ switch (Str.size()) {
+ case 2:
+ switch (Str[0]) {
+ case 'Q': DestWidth = 8; break;
+ case 'H': DestWidth = 16; break;
+ case 'S': DestWidth = 32; break;
+ case 'D': DestWidth = 64; break;
+ case 'X': DestWidth = 96; break;
+ case 'T': DestWidth = 128; break;
+ }
+ if (Str[1] == 'F') {
+ IntegerMode = false;
+ } else if (Str[1] == 'C') {
+ IntegerMode = false;
+ ComplexMode = true;
+ } else if (Str[1] != 'I') {
+ DestWidth = 0;
+ }
+ break;
+ case 4:
+ // FIXME: glibc uses 'word' to define register_t; this is narrower than a
+ // pointer on PIC16 and other embedded platforms.
+ if (Str == "word")
+ DestWidth = S.Context.getTargetInfo().getPointerWidth(0);
+ else if (Str == "byte")
+ DestWidth = S.Context.getTargetInfo().getCharWidth();
+ break;
+ case 7:
+ if (Str == "pointer")
+ DestWidth = S.Context.getTargetInfo().getPointerWidth(0);
+ break;
+ }
+
+ QualType OldTy;
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D))
+ OldTy = TD->getUnderlyingType();
+ else if (ValueDecl *VD = dyn_cast<ValueDecl>(D))
+ OldTy = VD->getType();
+ else {
+ S.Diag(D->getLocation(), diag::err_attr_wrong_decl)
+ << "mode" << Attr.getRange();
+ return;
+ }
+
+ if (!OldTy->getAs<BuiltinType>() && !OldTy->isComplexType())
+ S.Diag(Attr.getLoc(), diag::err_mode_not_primitive);
+ else if (IntegerMode) {
+ if (!OldTy->isIntegralOrEnumerationType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ } else if (ComplexMode) {
+ if (!OldTy->isComplexType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ } else {
+ if (!OldTy->isFloatingType())
+ S.Diag(Attr.getLoc(), diag::err_mode_wrong_type);
+ }
+
+ // FIXME: Sync this with InitializePredefinedMacros; we need to match int8_t
+ // and friends, at least with glibc.
+ // FIXME: Make sure 32/64-bit integers don't get defined to types of the wrong
+ // width on unusual platforms.
+ // FIXME: Make sure floating-point mappings are accurate
+ // FIXME: Support XF and TF types
+ QualType NewTy;
+ switch (DestWidth) {
+ case 0:
+ S.Diag(Attr.getLoc(), diag::err_unknown_machine_mode) << Name;
+ return;
+ default:
+ S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+ return;
+ case 8:
+ if (!IntegerMode) {
+ S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+ return;
+ }
+ if (OldTy->isSignedIntegerType())
+ NewTy = S.Context.SignedCharTy;
+ else
+ NewTy = S.Context.UnsignedCharTy;
+ break;
+ case 16:
+ if (!IntegerMode) {
+ S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+ return;
+ }
+ if (OldTy->isSignedIntegerType())
+ NewTy = S.Context.ShortTy;
+ else
+ NewTy = S.Context.UnsignedShortTy;
+ break;
+ case 32:
+ if (!IntegerMode)
+ NewTy = S.Context.FloatTy;
+ else if (OldTy->isSignedIntegerType())
+ NewTy = S.Context.IntTy;
+ else
+ NewTy = S.Context.UnsignedIntTy;
+ break;
+ case 64:
+ if (!IntegerMode)
+ NewTy = S.Context.DoubleTy;
+ else if (OldTy->isSignedIntegerType())
+ if (S.Context.getTargetInfo().getLongWidth() == 64)
+ NewTy = S.Context.LongTy;
+ else
+ NewTy = S.Context.LongLongTy;
+ else
+ if (S.Context.getTargetInfo().getLongWidth() == 64)
+ NewTy = S.Context.UnsignedLongTy;
+ else
+ NewTy = S.Context.UnsignedLongLongTy;
+ break;
+ case 96:
+ NewTy = S.Context.LongDoubleTy;
+ break;
+ case 128:
+ if (!IntegerMode) {
+ S.Diag(Attr.getLoc(), diag::err_unsupported_machine_mode) << Name;
+ return;
+ }
+ if (OldTy->isSignedIntegerType())
+ NewTy = S.Context.Int128Ty;
+ else
+ NewTy = S.Context.UnsignedInt128Ty;
+ break;
+ }
+
+ if (ComplexMode) {
+ NewTy = S.Context.getComplexType(NewTy);
+ }
+
+ // Install the new type.
+ if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ // FIXME: preserve existing source info.
+ TD->setTypeSourceInfo(S.Context.getTrivialTypeSourceInfo(NewTy));
+ } else
+ cast<ValueDecl>(D)->setType(NewTy);
+}
+
+static void handleNoDebugAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NoDebugAttr(Attr.getRange(), S.Context));
+}
+
+static void handleNoInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NoInlineAttr(Attr.getRange(), S.Context));
+}
+
+static void handleNoInstrumentFunctionAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NoInstrumentFunctionAttr(Attr.getRange(),
+ S.Context));
+}
+
+static void handleConstantAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.hasParameterOrArguments()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<VarDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariable;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CUDAConstantAttr(Attr.getRange(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "constant";
+ }
+}
+
+static void handleDeviceAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ if (!isa<FunctionDecl>(D) && !isa<VarDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariableOrFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CUDADeviceAttr(Attr.getRange(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "device";
+ }
+}
+
+static void handleGlobalAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ FunctionDecl *FD = cast<FunctionDecl>(D);
+ if (!FD->getResultType()->isVoidType()) {
+ TypeLoc TL = FD->getTypeSourceInfo()->getTypeLoc().IgnoreParens();
+ if (FunctionTypeLoc* FTL = dyn_cast<FunctionTypeLoc>(&TL)) {
+ S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
+ << FD->getType()
+ << FixItHint::CreateReplacement(FTL->getResultLoc().getSourceRange(),
+ "void");
+ } else {
+ S.Diag(FD->getTypeSpecStartLoc(), diag::err_kern_type_not_void_return)
+ << FD->getType();
+ }
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CUDAGlobalAttr(Attr.getRange(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "global";
+ }
+}
+
+static void handleHostAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CUDAHostAttr(Attr.getRange(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "host";
+ }
+}
+
+static void handleSharedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+
+ if (!isa<VarDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedVariable;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) CUDASharedAttr(Attr.getRange(), S.Context));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "shared";
+ }
+}
+
+static void handleGNUInlineAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 0))
+ return;
+
+ FunctionDecl *Fn = dyn_cast<FunctionDecl>(D);
+ if (Fn == 0) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunction;
+ return;
+ }
+
+ if (!Fn->isInlineSpecified()) {
+ S.Diag(Attr.getLoc(), diag::warn_gnu_inline_attribute_requires_inline);
+ return;
+ }
+
+ D->addAttr(::new (S.Context) GNUInlineAttr(Attr.getRange(), S.Context));
+}
+
+static void handleCallConvAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (hasDeclarator(D)) return;
+
+ // Diagnostic is emitted elsewhere: here we store the (valid) Attr
+ // in the Decl node for syntactic reasoning, e.g., pretty-printing.
+ CallingConv CC;
+ if (S.CheckCallingConvAttr(Attr, CC))
+ return;
+
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ switch (Attr.getKind()) {
+ case AttributeList::AT_fastcall:
+ D->addAttr(::new (S.Context) FastCallAttr(Attr.getRange(), S.Context));
+ return;
+ case AttributeList::AT_stdcall:
+ D->addAttr(::new (S.Context) StdCallAttr(Attr.getRange(), S.Context));
+ return;
+ case AttributeList::AT_thiscall:
+ D->addAttr(::new (S.Context) ThisCallAttr(Attr.getRange(), S.Context));
+ return;
+ case AttributeList::AT_cdecl:
+ D->addAttr(::new (S.Context) CDeclAttr(Attr.getRange(), S.Context));
+ return;
+ case AttributeList::AT_pascal:
+ D->addAttr(::new (S.Context) PascalAttr(Attr.getRange(), S.Context));
+ return;
+ case AttributeList::AT_pcs: {
+ Expr *Arg = Attr.getArg(0);
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+ if (!Str || !Str->isAscii()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "pcs" << 1;
+ Attr.setInvalid();
+ return;
+ }
+
+ StringRef StrRef = Str->getString();
+ PcsAttr::PCSType PCS;
+ if (StrRef == "aapcs")
+ PCS = PcsAttr::AAPCS;
+ else if (StrRef == "aapcs-vfp")
+ PCS = PcsAttr::AAPCS_VFP;
+ else {
+ S.Diag(Attr.getLoc(), diag::err_invalid_pcs);
+ Attr.setInvalid();
+ return;
+ }
+
+ D->addAttr(::new (S.Context) PcsAttr(Attr.getRange(), S.Context, PCS));
+ }
+ default:
+ llvm_unreachable("unexpected attribute kind");
+ }
+}
+
+static void handleOpenCLKernelAttr(Sema &S, Decl *D, const AttributeList &Attr){
+ assert(!Attr.isInvalid());
+ D->addAttr(::new (S.Context) OpenCLKernelAttr(Attr.getRange(), S.Context));
+}
+
+bool Sema::CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC) {
+ if (attr.isInvalid())
+ return true;
+
+ if ((attr.getNumArgs() != 0 &&
+ !(attr.getKind() == AttributeList::AT_pcs && attr.getNumArgs() == 1)) ||
+ attr.getParameterName()) {
+ Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ attr.setInvalid();
+ return true;
+ }
+
+ // TODO: diagnose uses of these conventions on the wrong target. Or, better
+ // move to TargetAttributesSema one day.
+ switch (attr.getKind()) {
+ case AttributeList::AT_cdecl: CC = CC_C; break;
+ case AttributeList::AT_fastcall: CC = CC_X86FastCall; break;
+ case AttributeList::AT_stdcall: CC = CC_X86StdCall; break;
+ case AttributeList::AT_thiscall: CC = CC_X86ThisCall; break;
+ case AttributeList::AT_pascal: CC = CC_X86Pascal; break;
+ case AttributeList::AT_pcs: {
+ Expr *Arg = attr.getArg(0);
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+ if (!Str || !Str->isAscii()) {
+ Diag(attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "pcs" << 1;
+ attr.setInvalid();
+ return true;
+ }
+
+ StringRef StrRef = Str->getString();
+ if (StrRef == "aapcs") {
+ CC = CC_AAPCS;
+ break;
+ } else if (StrRef == "aapcs-vfp") {
+ CC = CC_AAPCS_VFP;
+ break;
+ }
+ // FALLS THROUGH
+ }
+ default: llvm_unreachable("unexpected attribute kind");
+ }
+
+ return false;
+}
+
+static void handleRegparmAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (hasDeclarator(D)) return;
+
+ unsigned numParams;
+ if (S.CheckRegparmAttr(Attr, numParams))
+ return;
+
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) RegparmAttr(Attr.getRange(), S.Context, numParams));
+}
+
+/// Checks a regparm attribute, returning true if it is ill-formed and
+/// otherwise setting numParams to the appropriate value.
+bool Sema::CheckRegparmAttr(const AttributeList &Attr, unsigned &numParams) {
+ if (Attr.isInvalid())
+ return true;
+
+ if (Attr.getNumArgs() != 1) {
+ Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
+ return true;
+ }
+
+ Expr *NumParamsExpr = Attr.getArg(0);
+ llvm::APSInt NumParams(32);
+ if (NumParamsExpr->isTypeDependent() || NumParamsExpr->isValueDependent() ||
+ !NumParamsExpr->isIntegerConstantExpr(NumParams, Context)) {
+ Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "regparm" << NumParamsExpr->getSourceRange();
+ Attr.setInvalid();
+ return true;
+ }
+
+ if (Context.getTargetInfo().getRegParmMax() == 0) {
+ Diag(Attr.getLoc(), diag::err_attribute_regparm_wrong_platform)
+ << NumParamsExpr->getSourceRange();
+ Attr.setInvalid();
+ return true;
+ }
+
+ numParams = NumParams.getZExtValue();
+ if (numParams > Context.getTargetInfo().getRegParmMax()) {
+ Diag(Attr.getLoc(), diag::err_attribute_regparm_invalid_number)
+ << Context.getTargetInfo().getRegParmMax() << NumParamsExpr->getSourceRange();
+ Attr.setInvalid();
+ return true;
+ }
+
+ return false;
+}
+
+static void handleLaunchBoundsAttr(Sema &S, Decl *D, const AttributeList &Attr){
+ if (S.LangOpts.CUDA) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1 && Attr.getNumArgs() != 2) {
+ // FIXME: 0 is not okay.
+ S.Diag(Attr.getLoc(), diag::err_attribute_too_many_arguments) << 2;
+ return;
+ }
+
+ if (!isFunctionOrMethod(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ Expr *MaxThreadsExpr = Attr.getArg(0);
+ llvm::APSInt MaxThreads(32);
+ if (MaxThreadsExpr->isTypeDependent() ||
+ MaxThreadsExpr->isValueDependent() ||
+ !MaxThreadsExpr->isIntegerConstantExpr(MaxThreads, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "launch_bounds" << 1 << MaxThreadsExpr->getSourceRange();
+ return;
+ }
+
+ llvm::APSInt MinBlocks(32);
+ if (Attr.getNumArgs() > 1) {
+ Expr *MinBlocksExpr = Attr.getArg(1);
+ if (MinBlocksExpr->isTypeDependent() ||
+ MinBlocksExpr->isValueDependent() ||
+ !MinBlocksExpr->isIntegerConstantExpr(MinBlocks, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_int)
+ << "launch_bounds" << 2 << MinBlocksExpr->getSourceRange();
+ return;
+ }
+ }
+
+ D->addAttr(::new (S.Context) CUDALaunchBoundsAttr(Attr.getRange(), S.Context,
+ MaxThreads.getZExtValue(),
+ MinBlocks.getZExtValue()));
+ } else {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "launch_bounds";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Checker-specific attribute handlers.
+//===----------------------------------------------------------------------===//
+
+static bool isValidSubjectOfNSAttribute(Sema &S, QualType type) {
+ return type->isDependentType() ||
+ type->isObjCObjectPointerType() ||
+ S.Context.isObjCNSObjectType(type);
+}
+static bool isValidSubjectOfCFAttribute(Sema &S, QualType type) {
+ return type->isDependentType() ||
+ type->isPointerType() ||
+ isValidSubjectOfNSAttribute(S, type);
+}
+
+static void handleNSConsumedAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ ParmVarDecl *param = dyn_cast<ParmVarDecl>(D);
+ if (!param) {
+ S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getRange() << Attr.getName() << ExpectedParameter;
+ return;
+ }
+
+ bool typeOK, cf;
+ if (Attr.getKind() == AttributeList::AT_ns_consumed) {
+ typeOK = isValidSubjectOfNSAttribute(S, param->getType());
+ cf = false;
+ } else {
+ typeOK = isValidSubjectOfCFAttribute(S, param->getType());
+ cf = true;
+ }
+
+ if (!typeOK) {
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_parameter_type)
+ << Attr.getRange() << Attr.getName() << cf;
+ return;
+ }
+
+ if (cf)
+ param->addAttr(::new (S.Context) CFConsumedAttr(Attr.getRange(), S.Context));
+ else
+ param->addAttr(::new (S.Context) NSConsumedAttr(Attr.getRange(), S.Context));
+}
+
+static void handleNSConsumesSelfAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!isa<ObjCMethodDecl>(D)) {
+ S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getRange() << Attr.getName() << ExpectedMethod;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) NSConsumesSelfAttr(Attr.getRange(), S.Context));
+}
+
+static void handleNSReturnsRetainedAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+
+ QualType returnType;
+
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ returnType = MD->getResultType();
+ else if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(D))
+ returnType = PD->getType();
+ else if (S.getLangOpts().ObjCAutoRefCount && hasDeclarator(D) &&
+ (Attr.getKind() == AttributeList::AT_ns_returns_retained))
+ return; // ignore: was handled as a type attribute
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ returnType = FD->getResultType();
+ else {
+ S.Diag(D->getLocStart(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getRange() << Attr.getName()
+ << ExpectedFunctionOrMethod;
+ return;
+ }
+
+ bool typeOK;
+ bool cf;
+ switch (Attr.getKind()) {
+ default: llvm_unreachable("invalid ownership attribute");
+ case AttributeList::AT_ns_returns_autoreleased:
+ case AttributeList::AT_ns_returns_retained:
+ case AttributeList::AT_ns_returns_not_retained:
+ typeOK = isValidSubjectOfNSAttribute(S, returnType);
+ cf = false;
+ break;
+
+ case AttributeList::AT_cf_returns_retained:
+ case AttributeList::AT_cf_returns_not_retained:
+ typeOK = isValidSubjectOfCFAttribute(S, returnType);
+ cf = true;
+ break;
+ }
+
+ if (!typeOK) {
+ S.Diag(D->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
+ << Attr.getRange() << Attr.getName() << isa<ObjCMethodDecl>(D) << cf;
+ return;
+ }
+
+ switch (Attr.getKind()) {
+ default:
+ llvm_unreachable("invalid ownership attribute");
+ case AttributeList::AT_ns_returns_autoreleased:
+ D->addAttr(::new (S.Context) NSReturnsAutoreleasedAttr(Attr.getRange(),
+ S.Context));
+ return;
+ case AttributeList::AT_cf_returns_not_retained:
+ D->addAttr(::new (S.Context) CFReturnsNotRetainedAttr(Attr.getRange(),
+ S.Context));
+ return;
+ case AttributeList::AT_ns_returns_not_retained:
+ D->addAttr(::new (S.Context) NSReturnsNotRetainedAttr(Attr.getRange(),
+ S.Context));
+ return;
+ case AttributeList::AT_cf_returns_retained:
+ D->addAttr(::new (S.Context) CFReturnsRetainedAttr(Attr.getRange(),
+ S.Context));
+ return;
+ case AttributeList::AT_ns_returns_retained:
+ D->addAttr(::new (S.Context) NSReturnsRetainedAttr(Attr.getRange(),
+ S.Context));
+ return;
+ };
+}
+
+static void handleObjCReturnsInnerPointerAttr(Sema &S, Decl *D,
+ const AttributeList &attr) {
+ SourceLocation loc = attr.getLoc();
+
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(D);
+
+ if (!isa<ObjCMethodDecl>(method)) {
+ S.Diag(method->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << SourceRange(loc, loc) << attr.getName() << ExpectedMethod;
+ return;
+ }
+
+ // Check that the method returns a normal pointer.
+ QualType resultType = method->getResultType();
+
+ if (!resultType->isReferenceType() &&
+ (!resultType->isPointerType() || resultType->isObjCRetainableType())) {
+ S.Diag(method->getLocStart(), diag::warn_ns_attribute_wrong_return_type)
+ << SourceRange(loc)
+ << attr.getName() << /*method*/ 1 << /*non-retainable pointer*/ 2;
+
+ // Drop the attribute.
+ return;
+ }
+
+ method->addAttr(
+ ::new (S.Context) ObjCReturnsInnerPointerAttr(attr.getRange(), S.Context));
+}
+
+/// Handle cf_audited_transfer and cf_unknown_transfer.
+static void handleCFTransferAttr(Sema &S, Decl *D, const AttributeList &A) {
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << A.getRange() << A.getName() << ExpectedFunction;
+ return;
+ }
+
+ bool IsAudited = (A.getKind() == AttributeList::AT_cf_audited_transfer);
+
+ // Check whether there's a conflicting attribute already present.
+ Attr *Existing;
+ if (IsAudited) {
+ Existing = D->getAttr<CFUnknownTransferAttr>();
+ } else {
+ Existing = D->getAttr<CFAuditedTransferAttr>();
+ }
+ if (Existing) {
+ S.Diag(D->getLocStart(), diag::err_attributes_are_not_compatible)
+ << A.getName()
+ << (IsAudited ? "cf_unknown_transfer" : "cf_audited_transfer")
+ << A.getRange() << Existing->getRange();
+ return;
+ }
+
+ // All clear; add the attribute.
+ if (IsAudited) {
+ D->addAttr(
+ ::new (S.Context) CFAuditedTransferAttr(A.getRange(), S.Context));
+ } else {
+ D->addAttr(
+ ::new (S.Context) CFUnknownTransferAttr(A.getRange(), S.Context));
+ }
+}
+
+static void handleNSBridgedAttr(Sema &S, Scope *Sc, Decl *D,
+ const AttributeList &Attr) {
+ RecordDecl *RD = dyn_cast<RecordDecl>(D);
+ if (!RD || RD->isUnion()) {
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << Attr.getRange() << Attr.getName() << ExpectedStruct;
+ }
+
+ IdentifierInfo *ParmName = Attr.getParameterName();
+
+ // In Objective-C, verify that the type names an Objective-C type.
+ // We don't want to check this outside of ObjC because people sometimes
+ // do crazy C declarations of Objective-C types.
+ if (ParmName && S.getLangOpts().ObjC1) {
+ // Check for an existing type with this name.
+ LookupResult R(S, DeclarationName(ParmName), Attr.getParameterLoc(),
+ Sema::LookupOrdinaryName);
+ if (S.LookupName(R, Sc)) {
+ NamedDecl *Target = R.getFoundDecl();
+ if (Target && !isa<ObjCInterfaceDecl>(Target)) {
+ S.Diag(D->getLocStart(), diag::err_ns_bridged_not_interface);
+ S.Diag(Target->getLocStart(), diag::note_declared_at);
+ }
+ }
+ }
+
+ D->addAttr(::new (S.Context) NSBridgedAttr(Attr.getRange(), S.Context,
+ ParmName));
+}
+
+static void handleObjCOwnershipAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (hasDeclarator(D)) return;
+
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << Attr.getRange() << Attr.getName() << ExpectedVariable;
+}
+
+static void handleObjCPreciseLifetimeAttr(Sema &S, Decl *D,
+ const AttributeList &Attr) {
+ if (!isa<VarDecl>(D) && !isa<FieldDecl>(D)) {
+ S.Diag(D->getLocStart(), diag::err_attribute_wrong_decl_type)
+ << Attr.getRange() << Attr.getName() << ExpectedVariable;
+ return;
+ }
+
+ ValueDecl *vd = cast<ValueDecl>(D);
+ QualType type = vd->getType();
+
+ if (!type->isDependentType() &&
+ !type->isObjCLifetimeType()) {
+ S.Diag(Attr.getLoc(), diag::err_objc_precise_lifetime_bad_type)
+ << type;
+ return;
+ }
+
+ Qualifiers::ObjCLifetime lifetime = type.getObjCLifetime();
+
+ // If we have no lifetime yet, check the lifetime we're presumably
+ // going to infer.
+ if (lifetime == Qualifiers::OCL_None && !type->isDependentType())
+ lifetime = type->getObjCARCImplicitLifetime();
+
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ assert(type->isDependentType() &&
+ "didn't infer lifetime for non-dependent type?");
+ break;
+
+ case Qualifiers::OCL_Weak: // meaningful
+ case Qualifiers::OCL_Strong: // meaningful
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ S.Diag(Attr.getLoc(), diag::warn_objc_precise_lifetime_meaningless)
+ << (lifetime == Qualifiers::OCL_Autoreleasing);
+ break;
+ }
+
+ D->addAttr(::new (S.Context)
+ ObjCPreciseLifetimeAttr(Attr.getRange(), S.Context));
+}
+
+static bool isKnownDeclSpecAttr(const AttributeList &Attr) {
+ switch (Attr.getKind()) {
+ default:
+ return false;
+ case AttributeList::AT_dllimport:
+ case AttributeList::AT_dllexport:
+ case AttributeList::AT_uuid:
+ case AttributeList::AT_deprecated:
+ case AttributeList::AT_noreturn:
+ case AttributeList::AT_nothrow:
+ case AttributeList::AT_naked:
+ case AttributeList::AT_noinline:
+ return true;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Microsoft specific attribute handlers.
+//===----------------------------------------------------------------------===//
+
+static void handleUuidAttr(Sema &S, Decl *D, const AttributeList &Attr) {
+ if (S.LangOpts.MicrosoftExt || S.LangOpts.Borland) {
+ // check the attribute arguments.
+ if (!checkAttributeNumArgs(S, Attr, 1))
+ return;
+
+ Expr *Arg = Attr.getArg(0);
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+ if (!Str || !Str->isAscii()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "uuid" << 1;
+ return;
+ }
+
+ StringRef StrRef = Str->getString();
+
+ bool IsCurly = StrRef.size() > 1 && StrRef.front() == '{' &&
+ StrRef.back() == '}';
+
+ // Validate GUID length.
+ if (IsCurly && StrRef.size() != 38) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+ if (!IsCurly && StrRef.size() != 36) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+
+ // GUID format is "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX" or
+ // "{XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX}"
+ StringRef::iterator I = StrRef.begin();
+ if (IsCurly) // Skip the optional '{'
+ ++I;
+
+ for (int i = 0; i < 36; ++i) {
+ if (i == 8 || i == 13 || i == 18 || i == 23) {
+ if (*I != '-') {
+ S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+ } else if (!isxdigit(*I)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_uuid_malformed_guid);
+ return;
+ }
+ I++;
+ }
+
+ D->addAttr(::new (S.Context) UuidAttr(Attr.getRange(), S.Context,
+ Str->getString()));
+ } else
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "uuid";
+}
+
+//===----------------------------------------------------------------------===//
+// Top Level Sema Entry Points
+//===----------------------------------------------------------------------===//
+
+static void ProcessNonInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr) {
+ switch (Attr.getKind()) {
+ case AttributeList::AT_device: handleDeviceAttr (S, D, Attr); break;
+ case AttributeList::AT_host: handleHostAttr (S, D, Attr); break;
+ case AttributeList::AT_overloadable:handleOverloadableAttr(S, D, Attr); break;
+ default:
+ break;
+ }
+}
+
+static void ProcessInheritableDeclAttr(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr) {
+ switch (Attr.getKind()) {
+ case AttributeList::AT_ibaction: handleIBAction(S, D, Attr); break;
+ case AttributeList::AT_iboutlet: handleIBOutlet(S, D, Attr); break;
+ case AttributeList::AT_iboutletcollection:
+ handleIBOutletCollection(S, D, Attr); break;
+ case AttributeList::AT_address_space:
+ case AttributeList::AT_opencl_image_access:
+ case AttributeList::AT_objc_gc:
+ case AttributeList::AT_vector_size:
+ case AttributeList::AT_neon_vector_type:
+ case AttributeList::AT_neon_polyvector_type:
+ // Ignore these, these are type attributes, handled by
+ // ProcessTypeAttributes.
+ break;
+ case AttributeList::AT_device:
+ case AttributeList::AT_host:
+ case AttributeList::AT_overloadable:
+ // Ignore, this is a non-inheritable attribute, handled
+ // by ProcessNonInheritableDeclAttr.
+ break;
+ case AttributeList::AT_alias: handleAliasAttr (S, D, Attr); break;
+ case AttributeList::AT_aligned: handleAlignedAttr (S, D, Attr); break;
+ case AttributeList::AT_always_inline:
+ handleAlwaysInlineAttr (S, D, Attr); break;
+ case AttributeList::AT_analyzer_noreturn:
+ handleAnalyzerNoReturnAttr (S, D, Attr); break;
+ case AttributeList::AT_annotate: handleAnnotateAttr (S, D, Attr); break;
+ case AttributeList::AT_availability:handleAvailabilityAttr(S, D, Attr); break;
+ case AttributeList::AT_carries_dependency:
+ handleDependencyAttr (S, D, Attr); break;
+ case AttributeList::AT_common: handleCommonAttr (S, D, Attr); break;
+ case AttributeList::AT_constant: handleConstantAttr (S, D, Attr); break;
+ case AttributeList::AT_constructor: handleConstructorAttr (S, D, Attr); break;
+ case AttributeList::AT_deprecated: handleDeprecatedAttr (S, D, Attr); break;
+ case AttributeList::AT_destructor: handleDestructorAttr (S, D, Attr); break;
+ case AttributeList::AT_ext_vector_type:
+ handleExtVectorTypeAttr(S, scope, D, Attr);
+ break;
+ case AttributeList::AT_format: handleFormatAttr (S, D, Attr); break;
+ case AttributeList::AT_format_arg: handleFormatArgAttr (S, D, Attr); break;
+ case AttributeList::AT_global: handleGlobalAttr (S, D, Attr); break;
+ case AttributeList::AT_gnu_inline: handleGNUInlineAttr (S, D, Attr); break;
+ case AttributeList::AT_launch_bounds:
+ handleLaunchBoundsAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_mode: handleModeAttr (S, D, Attr); break;
+ case AttributeList::AT_malloc: handleMallocAttr (S, D, Attr); break;
+ case AttributeList::AT_may_alias: handleMayAliasAttr (S, D, Attr); break;
+ case AttributeList::AT_nocommon: handleNoCommonAttr (S, D, Attr); break;
+ case AttributeList::AT_nonnull: handleNonNullAttr (S, D, Attr); break;
+ case AttributeList::AT_ownership_returns:
+ case AttributeList::AT_ownership_takes:
+ case AttributeList::AT_ownership_holds:
+ handleOwnershipAttr (S, D, Attr); break;
+ case AttributeList::AT_naked: handleNakedAttr (S, D, Attr); break;
+ case AttributeList::AT_noreturn: handleNoReturnAttr (S, D, Attr); break;
+ case AttributeList::AT_nothrow: handleNothrowAttr (S, D, Attr); break;
+ case AttributeList::AT_shared: handleSharedAttr (S, D, Attr); break;
+ case AttributeList::AT_vecreturn: handleVecReturnAttr (S, D, Attr); break;
+
+ case AttributeList::AT_objc_ownership:
+ handleObjCOwnershipAttr(S, D, Attr); break;
+ case AttributeList::AT_objc_precise_lifetime:
+ handleObjCPreciseLifetimeAttr(S, D, Attr); break;
+
+ case AttributeList::AT_objc_returns_inner_pointer:
+ handleObjCReturnsInnerPointerAttr(S, D, Attr); break;
+
+ case AttributeList::AT_ns_bridged:
+ handleNSBridgedAttr(S, scope, D, Attr); break;
+
+ case AttributeList::AT_cf_audited_transfer:
+ case AttributeList::AT_cf_unknown_transfer:
+ handleCFTransferAttr(S, D, Attr); break;
+
+ // Checker-specific.
+ case AttributeList::AT_cf_consumed:
+ case AttributeList::AT_ns_consumed: handleNSConsumedAttr (S, D, Attr); break;
+ case AttributeList::AT_ns_consumes_self:
+ handleNSConsumesSelfAttr(S, D, Attr); break;
+
+ case AttributeList::AT_ns_returns_autoreleased:
+ case AttributeList::AT_ns_returns_not_retained:
+ case AttributeList::AT_cf_returns_not_retained:
+ case AttributeList::AT_ns_returns_retained:
+ case AttributeList::AT_cf_returns_retained:
+ handleNSReturnsRetainedAttr(S, D, Attr); break;
+
+ case AttributeList::AT_reqd_work_group_size:
+ handleReqdWorkGroupSize(S, D, Attr); break;
+
+ case AttributeList::AT_init_priority:
+ handleInitPriorityAttr(S, D, Attr); break;
+
+ case AttributeList::AT_packed: handlePackedAttr (S, D, Attr); break;
+ case AttributeList::AT_ms_struct: handleMsStructAttr (S, D, Attr); break;
+ case AttributeList::AT_section: handleSectionAttr (S, D, Attr); break;
+ case AttributeList::AT_unavailable: handleUnavailableAttr (S, D, Attr); break;
+ case AttributeList::AT_objc_arc_weak_reference_unavailable:
+ handleArcWeakrefUnavailableAttr (S, D, Attr);
+ break;
+ case AttributeList::AT_objc_root_class:
+ handleObjCRootClassAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_objc_requires_property_definitions:
+ handleObjCRequiresPropertyDefsAttr (S, D, Attr);
+ break;
+ case AttributeList::AT_unused: handleUnusedAttr (S, D, Attr); break;
+ case AttributeList::AT_returns_twice:
+ handleReturnsTwiceAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_used: handleUsedAttr (S, D, Attr); break;
+ case AttributeList::AT_visibility: handleVisibilityAttr (S, D, Attr); break;
+ case AttributeList::AT_warn_unused_result: handleWarnUnusedResult(S, D, Attr);
+ break;
+ case AttributeList::AT_weak: handleWeakAttr (S, D, Attr); break;
+ case AttributeList::AT_weakref: handleWeakRefAttr (S, D, Attr); break;
+ case AttributeList::AT_weak_import: handleWeakImportAttr (S, D, Attr); break;
+ case AttributeList::AT_transparent_union:
+ handleTransparentUnionAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_objc_exception:
+ handleObjCExceptionAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_objc_method_family:
+ handleObjCMethodFamilyAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_NSObject: handleObjCNSObject (S, D, Attr); break;
+ case AttributeList::AT_blocks: handleBlocksAttr (S, D, Attr); break;
+ case AttributeList::AT_sentinel: handleSentinelAttr (S, D, Attr); break;
+ case AttributeList::AT_const: handleConstAttr (S, D, Attr); break;
+ case AttributeList::AT_pure: handlePureAttr (S, D, Attr); break;
+ case AttributeList::AT_cleanup: handleCleanupAttr (S, D, Attr); break;
+ case AttributeList::AT_nodebug: handleNoDebugAttr (S, D, Attr); break;
+ case AttributeList::AT_noinline: handleNoInlineAttr (S, D, Attr); break;
+ case AttributeList::AT_regparm: handleRegparmAttr (S, D, Attr); break;
+ case AttributeList::IgnoredAttribute:
+ // Just ignore
+ break;
+ case AttributeList::AT_no_instrument_function: // Interacts with -pg.
+ handleNoInstrumentFunctionAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_stdcall:
+ case AttributeList::AT_cdecl:
+ case AttributeList::AT_fastcall:
+ case AttributeList::AT_thiscall:
+ case AttributeList::AT_pascal:
+ case AttributeList::AT_pcs:
+ handleCallConvAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_opencl_kernel_function:
+ handleOpenCLKernelAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_uuid:
+ handleUuidAttr(S, D, Attr);
+ break;
+
+ // Thread safety attributes:
+ case AttributeList::AT_guarded_var:
+ handleGuardedVarAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_pt_guarded_var:
+ handleGuardedVarAttr(S, D, Attr, /*pointer = */true);
+ break;
+ case AttributeList::AT_scoped_lockable:
+ handleLockableAttr(S, D, Attr, /*scoped = */true);
+ break;
+ case AttributeList::AT_no_address_safety_analysis:
+ handleNoAddressSafetyAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_no_thread_safety_analysis:
+ handleNoThreadSafetyAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_lockable:
+ handleLockableAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_guarded_by:
+ handleGuardedByAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_pt_guarded_by:
+ handleGuardedByAttr(S, D, Attr, /*pointer = */true);
+ break;
+ case AttributeList::AT_exclusive_lock_function:
+ handleLockFunAttr(S, D, Attr, /*exclusive = */true);
+ break;
+ case AttributeList::AT_exclusive_locks_required:
+ handleLocksRequiredAttr(S, D, Attr, /*exclusive = */true);
+ break;
+ case AttributeList::AT_exclusive_trylock_function:
+ handleTrylockFunAttr(S, D, Attr, /*exclusive = */true);
+ break;
+ case AttributeList::AT_lock_returned:
+ handleLockReturnedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_locks_excluded:
+ handleLocksExcludedAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_shared_lock_function:
+ handleLockFunAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_shared_locks_required:
+ handleLocksRequiredAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_shared_trylock_function:
+ handleTrylockFunAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_unlock_function:
+ handleUnlockFunAttr(S, D, Attr);
+ break;
+ case AttributeList::AT_acquired_before:
+ handleAcquireOrderAttr(S, D, Attr, /*before = */true);
+ break;
+ case AttributeList::AT_acquired_after:
+ handleAcquireOrderAttr(S, D, Attr, /*before = */false);
+ break;
+
+ default:
+ // Ask target about the attribute.
+ const TargetAttributesSema &TargetAttrs = S.getTargetAttributesSema();
+ if (!TargetAttrs.ProcessDeclAttribute(scope, D, Attr, S))
+ S.Diag(Attr.getLoc(), diag::warn_unknown_attribute_ignored)
+ << Attr.getName();
+ break;
+ }
+}
+
+/// ProcessDeclAttribute - Apply the specific attribute to the specified decl if
+/// the attribute applies to decls. If the attribute is a type attribute, just
+/// silently ignore it if a GNU attribute. FIXME: Applying a C++0x attribute to
+/// the wrong thing is illegal (C++0x [dcl.attr.grammar]/4).
+static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
+ const AttributeList &Attr,
+ bool NonInheritable, bool Inheritable) {
+ if (Attr.isInvalid())
+ return;
+
+ if (Attr.isDeclspecAttribute() && !isKnownDeclSpecAttr(Attr))
+ // FIXME: Try to deal with other __declspec attributes!
+ return;
+
+ if (NonInheritable)
+ ProcessNonInheritableDeclAttr(S, scope, D, Attr);
+
+ if (Inheritable)
+ ProcessInheritableDeclAttr(S, scope, D, Attr);
+}
+
+/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
+/// attribute list to the specified decl, ignoring any type attributes.
+void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
+ const AttributeList *AttrList,
+ bool NonInheritable, bool Inheritable) {
+ for (const AttributeList* l = AttrList; l; l = l->getNext()) {
+ ProcessDeclAttribute(*this, S, D, *l, NonInheritable, Inheritable);
+ }
+
+ // GCC accepts
+ // static int a9 __attribute__((weakref));
+ // but that looks really pointless. We reject it.
+ if (Inheritable && D->hasAttr<WeakRefAttr>() && !D->hasAttr<AliasAttr>()) {
+ Diag(AttrList->getLoc(), diag::err_attribute_weakref_without_alias) <<
+ dyn_cast<NamedDecl>(D)->getNameAsString();
+ return;
+ }
+}
+
+// Annotation attributes are the only attributes allowed after an access
+// specifier.
+bool Sema::ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
+ const AttributeList *AttrList) {
+ for (const AttributeList* l = AttrList; l; l = l->getNext()) {
+ if (l->getKind() == AttributeList::AT_annotate) {
+ handleAnnotateAttr(*this, ASDecl, *l);
+ } else {
+ Diag(l->getLoc(), diag::err_only_annotate_after_access_spec);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// checkUnusedDeclAttributes - Check a list of attributes to see if it
+/// contains any decl attributes that we should warn about.
+static void checkUnusedDeclAttributes(Sema &S, const AttributeList *A) {
+ for ( ; A; A = A->getNext()) {
+ // Only warn if the attribute is an unignored, non-type attribute.
+ if (A->isUsedAsTypeAttr()) continue;
+ if (A->getKind() == AttributeList::IgnoredAttribute) continue;
+
+ if (A->getKind() == AttributeList::UnknownAttribute) {
+ S.Diag(A->getLoc(), diag::warn_unknown_attribute_ignored)
+ << A->getName() << A->getRange();
+ } else {
+ S.Diag(A->getLoc(), diag::warn_attribute_not_on_decl)
+ << A->getName() << A->getRange();
+ }
+ }
+}
+
+/// checkUnusedDeclAttributes - Given a declarator which is not being
+/// used to build a declaration, complain about any decl attributes
+/// which might be lying around on it.
+void Sema::checkUnusedDeclAttributes(Declarator &D) {
+ ::checkUnusedDeclAttributes(*this, D.getDeclSpec().getAttributes().getList());
+ ::checkUnusedDeclAttributes(*this, D.getAttributes());
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i)
+ ::checkUnusedDeclAttributes(*this, D.getTypeObject(i).getAttrs());
+}
+
+/// DeclClonePragmaWeak - clone existing decl (maybe definition),
+/// #pragma weak needs a non-definition decl and source may not have one
+NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
+ SourceLocation Loc) {
+ assert(isa<FunctionDecl>(ND) || isa<VarDecl>(ND));
+ NamedDecl *NewD = 0;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(ND)) {
+ FunctionDecl *NewFD;
+ // FIXME: Missing call to CheckFunctionDeclaration().
+ // FIXME: Mangling?
+ // FIXME: Is the qualifier info correct?
+ // FIXME: Is the DeclContext correct?
+ NewFD = FunctionDecl::Create(FD->getASTContext(), FD->getDeclContext(),
+ Loc, Loc, DeclarationName(II),
+ FD->getType(), FD->getTypeSourceInfo(),
+ SC_None, SC_None,
+ false/*isInlineSpecified*/,
+ FD->hasPrototype(),
+ false/*isConstexprSpecified*/);
+ NewD = NewFD;
+
+ if (FD->getQualifier())
+ NewFD->setQualifierInfo(FD->getQualifierLoc());
+
+ // Fake up parameter variables; they are declared as if this were
+ // a typedef.
+ QualType FDTy = FD->getType();
+ if (const FunctionProtoType *FT = FDTy->getAs<FunctionProtoType>()) {
+ SmallVector<ParmVarDecl*, 16> Params;
+ for (FunctionProtoType::arg_type_iterator AI = FT->arg_type_begin(),
+ AE = FT->arg_type_end(); AI != AE; ++AI) {
+ ParmVarDecl *Param = BuildParmVarDeclForTypedef(NewFD, Loc, *AI);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ NewFD->setParams(Params);
+ }
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(ND)) {
+ NewD = VarDecl::Create(VD->getASTContext(), VD->getDeclContext(),
+ VD->getInnerLocStart(), VD->getLocation(), II,
+ VD->getType(), VD->getTypeSourceInfo(),
+ VD->getStorageClass(),
+ VD->getStorageClassAsWritten());
+ if (VD->getQualifier()) {
+ VarDecl *NewVD = cast<VarDecl>(NewD);
+ NewVD->setQualifierInfo(VD->getQualifierLoc());
+ }
+ }
+ return NewD;
+}
+
+/// DeclApplyPragmaWeak - A declaration (maybe definition) needs #pragma weak
+/// applied to it, possibly with an alias.
+void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W) {
+ if (W.getUsed()) return; // only do this once
+ W.setUsed(true);
+ if (W.getAlias()) { // clone decl, impersonate __attribute(weak,alias(...))
+ IdentifierInfo *NDId = ND->getIdentifier();
+ NamedDecl *NewD = DeclClonePragmaWeak(ND, W.getAlias(), W.getLocation());
+ NewD->addAttr(::new (Context) AliasAttr(W.getLocation(), Context,
+ NDId->getName()));
+ NewD->addAttr(::new (Context) WeakAttr(W.getLocation(), Context));
+ WeakTopLevelDecl.push_back(NewD);
+ // FIXME: "hideous" code from Sema::LazilyCreateBuiltin
+ // to insert Decl at TU scope, sorry.
+ DeclContext *SavedContext = CurContext;
+ CurContext = Context.getTranslationUnitDecl();
+ PushOnScopeChains(NewD, S);
+ CurContext = SavedContext;
+ } else { // just add weak to existing
+ ND->addAttr(::new (Context) WeakAttr(W.getLocation(), Context));
+ }
+}
+
+/// ProcessDeclAttributes - Given a declarator (PD) with attributes indicated in
+/// it, apply them to D. This is a bit tricky because PD can have attributes
+/// specified in many different places, and we need to find and apply them all.
+void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD,
+ bool NonInheritable, bool Inheritable) {
+ // It's valid to "forward-declare" #pragma weak, in which case we
+ // have to do this.
+ if (Inheritable) {
+ LoadExternalWeakUndeclaredIdentifiers();
+ if (!WeakUndeclaredIdentifiers.empty()) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ if (IdentifierInfo *Id = ND->getIdentifier()) {
+ llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator I
+ = WeakUndeclaredIdentifiers.find(Id);
+ if (I != WeakUndeclaredIdentifiers.end() && ND->hasLinkage()) {
+ WeakInfo W = I->second;
+ DeclApplyPragmaWeak(S, ND, W);
+ WeakUndeclaredIdentifiers[Id] = W;
+ }
+ }
+ }
+ }
+ }
+
+ // Apply decl attributes from the DeclSpec if present.
+ if (const AttributeList *Attrs = PD.getDeclSpec().getAttributes().getList())
+ ProcessDeclAttributeList(S, D, Attrs, NonInheritable, Inheritable);
+
+ // Walk the declarator structure, applying decl attributes that were in a type
+ // position to the decl itself. This handles cases like:
+ // int *__attr__(x)** D;
+ // when X is a decl attribute.
+ for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i)
+ if (const AttributeList *Attrs = PD.getTypeObject(i).getAttrs())
+ ProcessDeclAttributeList(S, D, Attrs, NonInheritable, Inheritable);
+
+ // Finally, apply any attributes on the decl itself.
+ if (const AttributeList *Attrs = PD.getAttributes())
+ ProcessDeclAttributeList(S, D, Attrs, NonInheritable, Inheritable);
+}
+
+/// Is the given declaration allowed to use a forbidden type?
+static bool isForbiddenTypeAllowed(Sema &S, Decl *decl) {
+ // Private ivars are always okay. Unfortunately, people don't
+ // always properly make their ivars private, even in system headers.
+ // Plus we need to make fields okay, too.
+ // Function declarations in sys headers will be marked unavailable.
+ if (!isa<FieldDecl>(decl) && !isa<ObjCPropertyDecl>(decl) &&
+ !isa<FunctionDecl>(decl))
+ return false;
+
+ // Require it to be declared in a system header.
+ return S.Context.getSourceManager().isInSystemHeader(decl->getLocation());
+}
+
+/// Handle a delayed forbidden-type diagnostic.
+static void handleDelayedForbiddenType(Sema &S, DelayedDiagnostic &diag,
+ Decl *decl) {
+ if (decl && isForbiddenTypeAllowed(S, decl)) {
+ decl->addAttr(new (S.Context) UnavailableAttr(diag.Loc, S.Context,
+ "this system declaration uses an unsupported type"));
+ return;
+ }
+ if (S.getLangOpts().ObjCAutoRefCount)
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(decl)) {
+ // FIXME. we may want to supress diagnostics for all
+ // kind of forbidden type messages on unavailable functions.
+ if (FD->hasAttr<UnavailableAttr>() &&
+ diag.getForbiddenTypeDiagnostic() ==
+ diag::err_arc_array_param_no_ownership) {
+ diag.Triggered = true;
+ return;
+ }
+ }
+
+ S.Diag(diag.Loc, diag.getForbiddenTypeDiagnostic())
+ << diag.getForbiddenTypeOperand() << diag.getForbiddenTypeArgument();
+ diag.Triggered = true;
+}
+
+// This duplicates a vector push_back but hides the need to know the
+// size of the type.
+void Sema::DelayedDiagnostics::add(const DelayedDiagnostic &diag) {
+ assert(StackSize <= StackCapacity);
+
+ // Grow the stack if necessary.
+ if (StackSize == StackCapacity) {
+ unsigned newCapacity = 2 * StackCapacity + 2;
+ char *newBuffer = new char[newCapacity * sizeof(DelayedDiagnostic)];
+ const char *oldBuffer = (const char*) Stack;
+
+ if (StackCapacity)
+ memcpy(newBuffer, oldBuffer, StackCapacity * sizeof(DelayedDiagnostic));
+
+ delete[] oldBuffer;
+ Stack = reinterpret_cast<sema::DelayedDiagnostic*>(newBuffer);
+ StackCapacity = newCapacity;
+ }
+
+ assert(StackSize < StackCapacity);
+ new (&Stack[StackSize++]) DelayedDiagnostic(diag);
+}
+
+void Sema::DelayedDiagnostics::popParsingDecl(Sema &S, ParsingDeclState state,
+ Decl *decl) {
+ DelayedDiagnostics &DD = S.DelayedDiagnostics;
+
+ // Check the invariants.
+ assert(DD.StackSize >= state.SavedStackSize);
+ assert(state.SavedStackSize >= DD.ActiveStackBase);
+ assert(DD.ParsingDepth > 0);
+
+ // Drop the parsing depth.
+ DD.ParsingDepth--;
+
+ // If there are no active diagnostics, we're done.
+ if (DD.StackSize == DD.ActiveStackBase)
+ return;
+
+ // We only want to actually emit delayed diagnostics when we
+ // successfully parsed a decl.
+ if (decl) {
+ // We emit all the active diagnostics, not just those starting
+ // from the saved state. The idea is this: we get one push for a
+ // decl spec and another for each declarator; in a decl group like:
+ // deprecated_typedef foo, *bar, baz();
+ // only the declarator pops will be passed decls. This is correct;
+ // we really do need to consider delayed diagnostics from the decl spec
+ // for each of the different declarations.
+ for (unsigned i = DD.ActiveStackBase, e = DD.StackSize; i != e; ++i) {
+ DelayedDiagnostic &diag = DD.Stack[i];
+ if (diag.Triggered)
+ continue;
+
+ switch (diag.Kind) {
+ case DelayedDiagnostic::Deprecation:
+ // Don't bother giving deprecation diagnostics if the decl is invalid.
+ if (!decl->isInvalidDecl())
+ S.HandleDelayedDeprecationCheck(diag, decl);
+ break;
+
+ case DelayedDiagnostic::Access:
+ S.HandleDelayedAccessCheck(diag, decl);
+ break;
+
+ case DelayedDiagnostic::ForbiddenType:
+ handleDelayedForbiddenType(S, diag, decl);
+ break;
+ }
+ }
+ }
+
+ // Destroy all the delayed diagnostics we're about to pop off.
+ for (unsigned i = state.SavedStackSize, e = DD.StackSize; i != e; ++i)
+ DD.Stack[i].Destroy();
+
+ DD.StackSize = state.SavedStackSize;
+}
+
+static bool isDeclDeprecated(Decl *D) {
+ do {
+ if (D->isDeprecated())
+ return true;
+ // A category implicitly has the availability of the interface.
+ if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(D))
+ return CatD->getClassInterface()->isDeprecated();
+ } while ((D = cast_or_null<Decl>(D->getDeclContext())));
+ return false;
+}
+
+void Sema::HandleDelayedDeprecationCheck(DelayedDiagnostic &DD,
+ Decl *Ctx) {
+ if (isDeclDeprecated(Ctx))
+ return;
+
+ DD.Triggered = true;
+ if (!DD.getDeprecationMessage().empty())
+ Diag(DD.Loc, diag::warn_deprecated_message)
+ << DD.getDeprecationDecl()->getDeclName()
+ << DD.getDeprecationMessage();
+ else if (DD.getUnknownObjCClass()) {
+ Diag(DD.Loc, diag::warn_deprecated_fwdclass_message)
+ << DD.getDeprecationDecl()->getDeclName();
+ Diag(DD.getUnknownObjCClass()->getLocation(), diag::note_forward_class);
+ }
+ else
+ Diag(DD.Loc, diag::warn_deprecated)
+ << DD.getDeprecationDecl()->getDeclName();
+}
+
+void Sema::EmitDeprecationWarning(NamedDecl *D, StringRef Message,
+ SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass) {
+ // Delay if we're currently parsing a declaration.
+ if (DelayedDiagnostics.shouldDelayDiagnostics()) {
+ DelayedDiagnostics.add(DelayedDiagnostic::makeDeprecation(Loc, D,
+ UnknownObjCClass,
+ Message));
+ return;
+ }
+
+ // Otherwise, don't warn if our current context is deprecated.
+ if (isDeclDeprecated(cast<Decl>(getCurLexicalContext())))
+ return;
+ if (!Message.empty())
+ Diag(Loc, diag::warn_deprecated_message) << D->getDeclName()
+ << Message;
+ else {
+ if (!UnknownObjCClass)
+ Diag(Loc, diag::warn_deprecated) << D->getDeclName();
+ else {
+ Diag(Loc, diag::warn_deprecated_fwdclass_message) << D->getDeclName();
+ Diag(UnknownObjCClass->getLocation(), diag::note_forward_class);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
new file mode 100644
index 0000000..847f03c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclCXX.cpp
@@ -0,0 +1,11095 @@
+//===------ SemaDeclCXX.cpp - Semantic Analysis for C++ Declarations ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/CXXFieldCollector.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include <map>
+#include <set>
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// CheckDefaultArgumentVisitor
+//===----------------------------------------------------------------------===//
+
+namespace {
+ /// CheckDefaultArgumentVisitor - C++ [dcl.fct.default] Traverses
+ /// the default argument of a parameter to determine whether it
+ /// contains any ill-formed subexpressions. For example, this will
+ /// diagnose the use of local variables or parameters within the
+ /// default argument expression.
+ class CheckDefaultArgumentVisitor
+ : public StmtVisitor<CheckDefaultArgumentVisitor, bool> {
+ Expr *DefaultArg;
+ Sema *S;
+
+ public:
+ CheckDefaultArgumentVisitor(Expr *defarg, Sema *s)
+ : DefaultArg(defarg), S(s) {}
+
+ bool VisitExpr(Expr *Node);
+ bool VisitDeclRefExpr(DeclRefExpr *DRE);
+ bool VisitCXXThisExpr(CXXThisExpr *ThisE);
+ bool VisitLambdaExpr(LambdaExpr *Lambda);
+ };
+
+ /// VisitExpr - Visit all of the children of this expression.
+ bool CheckDefaultArgumentVisitor::VisitExpr(Expr *Node) {
+ bool IsInvalid = false;
+ for (Stmt::child_range I = Node->children(); I; ++I)
+ IsInvalid |= Visit(*I);
+ return IsInvalid;
+ }
+
+ /// VisitDeclRefExpr - Visit a reference to a declaration, to
+ /// determine whether this declaration can be used in the default
+ /// argument expression.
+ bool CheckDefaultArgumentVisitor::VisitDeclRefExpr(DeclRefExpr *DRE) {
+ NamedDecl *Decl = DRE->getDecl();
+ if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p9
+ // Default arguments are evaluated each time the function is
+ // called. The order of evaluation of function arguments is
+ // unspecified. Consequently, parameters of a function shall not
+ // be used in default argument expressions, even if they are not
+ // evaluated. Parameters of a function declared before a default
+ // argument expression are in scope and can hide namespace and
+ // class member names.
+ return S->Diag(DRE->getLocStart(),
+ diag::err_param_default_argument_references_param)
+ << Param->getDeclName() << DefaultArg->getSourceRange();
+ } else if (VarDecl *VDecl = dyn_cast<VarDecl>(Decl)) {
+ // C++ [dcl.fct.default]p7
+ // Local variables shall not be used in default argument
+ // expressions.
+ if (VDecl->isLocalVarDecl())
+ return S->Diag(DRE->getLocStart(),
+ diag::err_param_default_argument_references_local)
+ << VDecl->getDeclName() << DefaultArg->getSourceRange();
+ }
+
+ return false;
+ }
+
+ /// VisitCXXThisExpr - Visit a C++ "this" expression.
+ bool CheckDefaultArgumentVisitor::VisitCXXThisExpr(CXXThisExpr *ThisE) {
+ // C++ [dcl.fct.default]p8:
+ // The keyword this shall not be used in a default argument of a
+ // member function.
+ return S->Diag(ThisE->getLocStart(),
+ diag::err_param_default_argument_references_this)
+ << ThisE->getSourceRange();
+ }
+
+ bool CheckDefaultArgumentVisitor::VisitLambdaExpr(LambdaExpr *Lambda) {
+ // C++11 [expr.lambda.prim]p13:
+ // A lambda-expression appearing in a default argument shall not
+ // implicitly or explicitly capture any entity.
+ if (Lambda->capture_begin() == Lambda->capture_end())
+ return false;
+
+ return S->Diag(Lambda->getLocStart(),
+ diag::err_lambda_capture_default_arg);
+ }
+}
+
+void Sema::ImplicitExceptionSpecification::CalledDecl(CXXMethodDecl *Method) {
+ assert(Context && "ImplicitExceptionSpecification without an ASTContext");
+ // If we have an MSAny or unknown spec already, don't bother.
+ if (!Method || ComputedEST == EST_MSAny || ComputedEST == EST_Delayed)
+ return;
+
+ const FunctionProtoType *Proto
+ = Method->getType()->getAs<FunctionProtoType>();
+
+ ExceptionSpecificationType EST = Proto->getExceptionSpecType();
+
+ // If this function can throw any exceptions, make a note of that.
+ if (EST == EST_Delayed || EST == EST_MSAny || EST == EST_None) {
+ ClearExceptions();
+ ComputedEST = EST;
+ return;
+ }
+
+ // FIXME: If the call to this decl is using any of its default arguments, we
+ // need to search them for potentially-throwing calls.
+
+ // If this function has a basic noexcept, it doesn't affect the outcome.
+ if (EST == EST_BasicNoexcept)
+ return;
+
+ // If we have a throw-all spec at this point, ignore the function.
+ if (ComputedEST == EST_None)
+ return;
+
+ // If we're still at noexcept(true) and there's a nothrow() callee,
+ // change to that specification.
+ if (EST == EST_DynamicNone) {
+ if (ComputedEST == EST_BasicNoexcept)
+ ComputedEST = EST_DynamicNone;
+ return;
+ }
+
+ // Check out noexcept specs.
+ if (EST == EST_ComputedNoexcept) {
+ FunctionProtoType::NoexceptResult NR = Proto->getNoexceptSpec(*Context);
+ assert(NR != FunctionProtoType::NR_NoNoexcept &&
+ "Must have noexcept result for EST_ComputedNoexcept.");
+ assert(NR != FunctionProtoType::NR_Dependent &&
+ "Should not generate implicit declarations for dependent cases, "
+ "and don't know how to handle them anyway.");
+
+ // noexcept(false) -> no spec on the new function
+ if (NR == FunctionProtoType::NR_Throw) {
+ ClearExceptions();
+ ComputedEST = EST_None;
+ }
+ // noexcept(true) won't change anything either.
+ return;
+ }
+
+ assert(EST == EST_Dynamic && "EST case not considered earlier.");
+ assert(ComputedEST != EST_None &&
+ "Shouldn't collect exceptions when throw-all is guaranteed.");
+ ComputedEST = EST_Dynamic;
+ // Record the exceptions in this function's exception specification.
+ for (FunctionProtoType::exception_iterator E = Proto->exception_begin(),
+ EEnd = Proto->exception_end();
+ E != EEnd; ++E)
+ if (ExceptionsSeen.insert(Context->getCanonicalType(*E)))
+ Exceptions.push_back(*E);
+}
+
+void Sema::ImplicitExceptionSpecification::CalledExpr(Expr *E) {
+ if (!E || ComputedEST == EST_MSAny || ComputedEST == EST_Delayed)
+ return;
+
+ // FIXME:
+ //
+ // C++0x [except.spec]p14:
+ // [An] implicit exception-specification specifies the type-id T if and
+ // only if T is allowed by the exception-specification of a function directly
+ // invoked by f's implicit definition; f shall allow all exceptions if any
+ // function it directly invokes allows all exceptions, and f shall allow no
+ // exceptions if every function it directly invokes allows no exceptions.
+ //
+ // Note in particular that if an implicit exception-specification is generated
+ // for a function containing a throw-expression, that specification can still
+ // be noexcept(true).
+ //
+ // Note also that 'directly invoked' is not defined in the standard, and there
+ // is no indication that we should only consider potentially-evaluated calls.
+ //
+ // Ultimately we should implement the intent of the standard: the exception
+ // specification should be the set of exceptions which can be thrown by the
+ // implicit definition. For now, we assume that any non-nothrow expression can
+ // throw any exception.
+
+ if (E->CanThrow(*Context))
+ ComputedEST = EST_None;
+}
+
+bool
+Sema::SetParamDefaultArgument(ParmVarDecl *Param, Expr *Arg,
+ SourceLocation EqualLoc) {
+ if (RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type)) {
+ Param->setInvalidDecl();
+ return true;
+ }
+
+ // C++ [dcl.fct.default]p5
+ // A default argument expression is implicitly converted (clause
+ // 4) to the parameter type. The default argument expression has
+ // the same semantic constraints as the initializer expression in
+ // a declaration of a variable of the parameter type, using the
+ // copy-initialization semantics (8.5).
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ Param);
+ InitializationKind Kind = InitializationKind::CreateCopy(Param->getLocation(),
+ EqualLoc);
+ InitializationSequence InitSeq(*this, Entity, Kind, &Arg, 1);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(*this, &Arg, 1));
+ if (Result.isInvalid())
+ return true;
+ Arg = Result.takeAs<Expr>();
+
+ CheckImplicitConversions(Arg, EqualLoc);
+ Arg = MaybeCreateExprWithCleanups(Arg);
+
+ // Okay: add the default argument to the parameter
+ Param->setDefaultArg(Arg);
+
+ // We have already instantiated this parameter; provide each of the
+ // instantiations with the uninstantiated default argument.
+ UnparsedDefaultArgInstantiationsMap::iterator InstPos
+ = UnparsedDefaultArgInstantiations.find(Param);
+ if (InstPos != UnparsedDefaultArgInstantiations.end()) {
+ for (unsigned I = 0, N = InstPos->second.size(); I != N; ++I)
+ InstPos->second[I]->setUninstantiatedDefaultArg(Arg);
+
+ // We're done tracking this parameter's instantiations.
+ UnparsedDefaultArgInstantiations.erase(InstPos);
+ }
+
+ return false;
+}
+
+/// ActOnParamDefaultArgument - Check whether the default argument
+/// provided for a function parameter is well-formed. If so, attach it
+/// to the parameter declaration.
+void
+Sema::ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc,
+ Expr *DefaultArg) {
+ if (!param || !DefaultArg)
+ return;
+
+ ParmVarDecl *Param = cast<ParmVarDecl>(param);
+ UnparsedDefaultArgLocs.erase(Param);
+
+ // Default arguments are only permitted in C++
+ if (!getLangOpts().CPlusPlus) {
+ Diag(EqualLoc, diag::err_param_default_argument)
+ << DefaultArg->getSourceRange();
+ Param->setInvalidDecl();
+ return;
+ }
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(DefaultArg, UPPC_DefaultArgument)) {
+ Param->setInvalidDecl();
+ return;
+ }
+
+ // Check that the default argument is well-formed
+ CheckDefaultArgumentVisitor DefaultArgChecker(DefaultArg, this);
+ if (DefaultArgChecker.Visit(DefaultArg)) {
+ Param->setInvalidDecl();
+ return;
+ }
+
+ SetParamDefaultArgument(Param, DefaultArg, EqualLoc);
+}
+
+/// ActOnParamUnparsedDefaultArgument - We've seen a default
+/// argument for a function parameter, but we can't parse it yet
+/// because we're inside a class definition. Note that this default
+/// argument will be parsed later.
+void Sema::ActOnParamUnparsedDefaultArgument(Decl *param,
+ SourceLocation EqualLoc,
+ SourceLocation ArgLoc) {
+ if (!param)
+ return;
+
+ ParmVarDecl *Param = cast<ParmVarDecl>(param);
+ if (Param)
+ Param->setUnparsedDefaultArg();
+
+ UnparsedDefaultArgLocs[Param] = ArgLoc;
+}
+
+/// ActOnParamDefaultArgumentError - Parsing or semantic analysis of
+/// the default argument for the parameter param failed.
+void Sema::ActOnParamDefaultArgumentError(Decl *param) {
+ if (!param)
+ return;
+
+ ParmVarDecl *Param = cast<ParmVarDecl>(param);
+
+ Param->setInvalidDecl();
+
+ UnparsedDefaultArgLocs.erase(Param);
+}
+
+/// CheckExtraCXXDefaultArguments - Check for any extra default
+/// arguments in the declarator, which is not a function declaration
+/// or definition and therefore is not permitted to have default
+/// arguments. This routine should be invoked for every declarator
+/// that is not a function declaration or definition.
+void Sema::CheckExtraCXXDefaultArguments(Declarator &D) {
+ // C++ [dcl.fct.default]p3
+ // A default argument expression shall be specified only in the
+ // parameter-declaration-clause of a function declaration or in a
+ // template-parameter (14.1). It shall not be specified for a
+ // parameter pack. If it is specified in a
+ // parameter-declaration-clause, it shall not occur within a
+ // declarator or abstract-declarator of a parameter-declaration.
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = D.getTypeObject(i);
+ if (chunk.Kind == DeclaratorChunk::Function) {
+ for (unsigned argIdx = 0, e = chunk.Fun.NumArgs; argIdx != e; ++argIdx) {
+ ParmVarDecl *Param =
+ cast<ParmVarDecl>(chunk.Fun.ArgInfo[argIdx].Param);
+ if (Param->hasUnparsedDefaultArg()) {
+ CachedTokens *Toks = chunk.Fun.ArgInfo[argIdx].DefaultArgTokens;
+ Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
+ << SourceRange((*Toks)[1].getLocation(), Toks->back().getLocation());
+ delete Toks;
+ chunk.Fun.ArgInfo[argIdx].DefaultArgTokens = 0;
+ } else if (Param->getDefaultArg()) {
+ Diag(Param->getLocation(), diag::err_param_default_argument_nonfunc)
+ << Param->getDefaultArg()->getSourceRange();
+ Param->setDefaultArg(0);
+ }
+ }
+ }
+ }
+}
+
+// MergeCXXFunctionDecl - Merge two declarations of the same C++
+// function, once we already know that they have the same
+// type. Subroutine of MergeFunctionDecl. Returns true if there was an
+// error, false otherwise.
+bool Sema::MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old,
+ Scope *S) {
+ bool Invalid = false;
+
+ // C++ [dcl.fct.default]p4:
+ // For non-template functions, default arguments can be added in
+ // later declarations of a function in the same
+ // scope. Declarations in different scopes have completely
+ // distinct sets of default arguments. That is, declarations in
+ // inner scopes do not acquire default arguments from
+ // declarations in outer scopes, and vice versa. In a given
+ // function declaration, all parameters subsequent to a
+ // parameter with a default argument shall have default
+ // arguments supplied in this or previous declarations. A
+ // default argument shall not be redefined by a later
+ // declaration (not even to the same value).
+ //
+ // C++ [dcl.fct.default]p6:
+ // Except for member functions of class templates, the default arguments
+ // in a member function definition that appears outside of the class
+ // definition are added to the set of default arguments provided by the
+ // member function declaration in the class definition.
+ for (unsigned p = 0, NumParams = Old->getNumParams(); p < NumParams; ++p) {
+ ParmVarDecl *OldParam = Old->getParamDecl(p);
+ ParmVarDecl *NewParam = New->getParamDecl(p);
+
+ bool OldParamHasDfl = OldParam->hasDefaultArg();
+ bool NewParamHasDfl = NewParam->hasDefaultArg();
+
+ NamedDecl *ND = Old;
+ if (S && !isDeclInScope(ND, New->getDeclContext(), S))
+ // Ignore default parameters of old decl if they are not in
+ // the same scope.
+ OldParamHasDfl = false;
+
+ if (OldParamHasDfl && NewParamHasDfl) {
+
+ unsigned DiagDefaultParamID =
+ diag::err_param_default_argument_redefinition;
+
+ // MSVC accepts that default parameters be redefined for member functions
+ // of template class. The new default parameter's value is ignored.
+ Invalid = true;
+ if (getLangOpts().MicrosoftExt) {
+ CXXMethodDecl* MD = dyn_cast<CXXMethodDecl>(New);
+ if (MD && MD->getParent()->getDescribedClassTemplate()) {
+ // Merge the old default argument into the new parameter.
+ NewParam->setHasInheritedDefaultArg();
+ if (OldParam->hasUninstantiatedDefaultArg())
+ NewParam->setUninstantiatedDefaultArg(
+ OldParam->getUninstantiatedDefaultArg());
+ else
+ NewParam->setDefaultArg(OldParam->getInit());
+ DiagDefaultParamID = diag::warn_param_default_argument_redefinition;
+ Invalid = false;
+ }
+ }
+
+ // FIXME: If we knew where the '=' was, we could easily provide a fix-it
+ // hint here. Alternatively, we could walk the type-source information
+ // for NewParam to find the last source location in the type... but it
+ // isn't worth the effort right now. This is the kind of test case that
+ // is hard to get right:
+ // int f(int);
+ // void g(int (*fp)(int) = f);
+ // void g(int (*fp)(int) = &f);
+ Diag(NewParam->getLocation(), DiagDefaultParamID)
+ << NewParam->getDefaultArgRange();
+
+ // Look for the function declaration where the default argument was
+ // actually written, which may be a declaration prior to Old.
+ for (FunctionDecl *Older = Old->getPreviousDecl();
+ Older; Older = Older->getPreviousDecl()) {
+ if (!Older->getParamDecl(p)->hasDefaultArg())
+ break;
+
+ OldParam = Older->getParamDecl(p);
+ }
+
+ Diag(OldParam->getLocation(), diag::note_previous_definition)
+ << OldParam->getDefaultArgRange();
+ } else if (OldParamHasDfl) {
+ // Merge the old default argument into the new parameter.
+ // It's important to use getInit() here; getDefaultArg()
+ // strips off any top-level ExprWithCleanups.
+ NewParam->setHasInheritedDefaultArg();
+ if (OldParam->hasUninstantiatedDefaultArg())
+ NewParam->setUninstantiatedDefaultArg(
+ OldParam->getUninstantiatedDefaultArg());
+ else
+ NewParam->setDefaultArg(OldParam->getInit());
+ } else if (NewParamHasDfl) {
+ if (New->getDescribedFunctionTemplate()) {
+ // Paragraph 4, quoted above, only applies to non-template functions.
+ Diag(NewParam->getLocation(),
+ diag::err_param_default_argument_template_redecl)
+ << NewParam->getDefaultArgRange();
+ Diag(Old->getLocation(), diag::note_template_prev_declaration)
+ << false;
+ } else if (New->getTemplateSpecializationKind()
+ != TSK_ImplicitInstantiation &&
+ New->getTemplateSpecializationKind() != TSK_Undeclared) {
+ // C++ [temp.expr.spec]p21:
+ // Default function arguments shall not be specified in a declaration
+ // or a definition for one of the following explicit specializations:
+ // - the explicit specialization of a function template;
+ // - the explicit specialization of a member function template;
+ // - the explicit specialization of a member function of a class
+ // template where the class template specialization to which the
+ // member function specialization belongs is implicitly
+ // instantiated.
+ Diag(NewParam->getLocation(), diag::err_template_spec_default_arg)
+ << (New->getTemplateSpecializationKind() ==TSK_ExplicitSpecialization)
+ << New->getDeclName()
+ << NewParam->getDefaultArgRange();
+ } else if (New->getDeclContext()->isDependentContext()) {
+ // C++ [dcl.fct.default]p6 (DR217):
+ // Default arguments for a member function of a class template shall
+ // be specified on the initial declaration of the member function
+ // within the class template.
+ //
+ // Reading the tea leaves a bit in DR217 and its reference to DR205
+ // leads me to the conclusion that one cannot add default function
+ // arguments for an out-of-line definition of a member function of a
+ // dependent type.
+ int WhichKind = 2;
+ if (CXXRecordDecl *Record
+ = dyn_cast<CXXRecordDecl>(New->getDeclContext())) {
+ if (Record->getDescribedClassTemplate())
+ WhichKind = 0;
+ else if (isa<ClassTemplatePartialSpecializationDecl>(Record))
+ WhichKind = 1;
+ else
+ WhichKind = 2;
+ }
+
+ Diag(NewParam->getLocation(),
+ diag::err_param_default_argument_member_template_redecl)
+ << WhichKind
+ << NewParam->getDefaultArgRange();
+ } else if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(New)) {
+ CXXSpecialMember NewSM = getSpecialMember(Ctor),
+ OldSM = getSpecialMember(cast<CXXConstructorDecl>(Old));
+ if (NewSM != OldSM) {
+ Diag(NewParam->getLocation(),diag::warn_default_arg_makes_ctor_special)
+ << NewParam->getDefaultArgRange() << NewSM;
+ Diag(Old->getLocation(), diag::note_previous_declaration_special)
+ << OldSM;
+ }
+ }
+ }
+ }
+
+ // C++11 [dcl.constexpr]p1: If any declaration of a function or function
+ // template has a constexpr specifier then all its declarations shall
+ // contain the constexpr specifier.
+ if (New->isConstexpr() != Old->isConstexpr()) {
+ Diag(New->getLocation(), diag::err_constexpr_redecl_mismatch)
+ << New << New->isConstexpr();
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ Invalid = true;
+ }
+
+ if (CheckEquivalentExceptionSpec(Old, New))
+ Invalid = true;
+
+ return Invalid;
+}
+
+/// \brief Merge the exception specifications of two variable declarations.
+///
+/// This is called when there's a redeclaration of a VarDecl. The function
+/// checks if the redeclaration might have an exception specification and
+/// validates compatibility and merges the specs if necessary.
+void Sema::MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old) {
+ // Shortcut if exceptions are disabled.
+ if (!getLangOpts().CXXExceptions)
+ return;
+
+ assert(Context.hasSameType(New->getType(), Old->getType()) &&
+ "Should only be called if types are otherwise the same.");
+
+ QualType NewType = New->getType();
+ QualType OldType = Old->getType();
+
+ // We're only interested in pointers and references to functions, as well
+ // as pointers to member functions.
+ if (const ReferenceType *R = NewType->getAs<ReferenceType>()) {
+ NewType = R->getPointeeType();
+ OldType = OldType->getAs<ReferenceType>()->getPointeeType();
+ } else if (const PointerType *P = NewType->getAs<PointerType>()) {
+ NewType = P->getPointeeType();
+ OldType = OldType->getAs<PointerType>()->getPointeeType();
+ } else if (const MemberPointerType *M = NewType->getAs<MemberPointerType>()) {
+ NewType = M->getPointeeType();
+ OldType = OldType->getAs<MemberPointerType>()->getPointeeType();
+ }
+
+ if (!NewType->isFunctionProtoType())
+ return;
+
+ // There's lots of special cases for functions. For function pointers, system
+ // libraries are hopefully not as broken so that we don't need these
+ // workarounds.
+ if (CheckEquivalentExceptionSpec(
+ OldType->getAs<FunctionProtoType>(), Old->getLocation(),
+ NewType->getAs<FunctionProtoType>(), New->getLocation())) {
+ New->setInvalidDecl();
+ }
+}
+
+/// CheckCXXDefaultArguments - Verify that the default arguments for a
+/// function declaration are well-formed according to C++
+/// [dcl.fct.default].
+void Sema::CheckCXXDefaultArguments(FunctionDecl *FD) {
+ unsigned NumParams = FD->getNumParams();
+ unsigned p;
+
+ bool IsLambda = FD->getOverloadedOperator() == OO_Call &&
+ isa<CXXMethodDecl>(FD) &&
+ cast<CXXMethodDecl>(FD)->getParent()->isLambda();
+
+ // Find first parameter with a default argument
+ for (p = 0; p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (Param->hasDefaultArg()) {
+ // C++11 [expr.prim.lambda]p5:
+ // [...] Default arguments (8.3.6) shall not be specified in the
+ // parameter-declaration-clause of a lambda-declarator.
+ //
+ // FIXME: Core issue 974 strikes this sentence, we only provide an
+ // extension warning.
+ if (IsLambda)
+ Diag(Param->getLocation(), diag::ext_lambda_default_arguments)
+ << Param->getDefaultArgRange();
+ break;
+ }
+ }
+
+ // C++ [dcl.fct.default]p4:
+ // In a given function declaration, all parameters
+ // subsequent to a parameter with a default argument shall
+ // have default arguments supplied in this or previous
+ // declarations. A default argument shall not be redefined
+ // by a later declaration (not even to the same value).
+ unsigned LastMissingDefaultArg = 0;
+ for (; p < NumParams; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (!Param->hasDefaultArg()) {
+ if (Param->isInvalidDecl())
+ /* We already complained about this parameter. */;
+ else if (Param->getIdentifier())
+ Diag(Param->getLocation(),
+ diag::err_param_default_argument_missing_name)
+ << Param->getIdentifier();
+ else
+ Diag(Param->getLocation(),
+ diag::err_param_default_argument_missing);
+
+ LastMissingDefaultArg = p;
+ }
+ }
+
+ if (LastMissingDefaultArg > 0) {
+ // Some default arguments were missing. Clear out all of the
+ // default arguments up to (and including) the last missing
+ // default argument, so that we leave the function parameters
+ // in a semantically valid state.
+ for (p = 0; p <= LastMissingDefaultArg; ++p) {
+ ParmVarDecl *Param = FD->getParamDecl(p);
+ if (Param->hasDefaultArg()) {
+ Param->setDefaultArg(0);
+ }
+ }
+ }
+}
+
+// CheckConstexprParameterTypes - Check whether a function's parameter types
+// are all literal types. If so, return true. If not, produce a suitable
+// diagnostic and return false.
+static bool CheckConstexprParameterTypes(Sema &SemaRef,
+ const FunctionDecl *FD) {
+ unsigned ArgIndex = 0;
+ const FunctionProtoType *FT = FD->getType()->getAs<FunctionProtoType>();
+ for (FunctionProtoType::arg_type_iterator i = FT->arg_type_begin(),
+ e = FT->arg_type_end(); i != e; ++i, ++ArgIndex) {
+ const ParmVarDecl *PD = FD->getParamDecl(ArgIndex);
+ SourceLocation ParamLoc = PD->getLocation();
+ if (!(*i)->isDependentType() &&
+ SemaRef.RequireLiteralType(ParamLoc, *i,
+ SemaRef.PDiag(diag::err_constexpr_non_literal_param)
+ << ArgIndex+1 << PD->getSourceRange()
+ << isa<CXXConstructorDecl>(FD)))
+ return false;
+ }
+ return true;
+}
+
+// CheckConstexprFunctionDecl - Check whether a function declaration satisfies
+// the requirements of a constexpr function definition or a constexpr
+// constructor definition. If so, return true. If not, produce appropriate
+// diagnostics and return false.
+//
+// This implements C++11 [dcl.constexpr]p3,4, as amended by DR1360.
+bool Sema::CheckConstexprFunctionDecl(const FunctionDecl *NewFD) {
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(NewFD);
+ if (MD && MD->isInstance()) {
+ // C++11 [dcl.constexpr]p4:
+ // The definition of a constexpr constructor shall satisfy the following
+ // constraints:
+ // - the class shall not have any virtual base classes;
+ const CXXRecordDecl *RD = MD->getParent();
+ if (RD->getNumVBases()) {
+ Diag(NewFD->getLocation(), diag::err_constexpr_virtual_base)
+ << isa<CXXConstructorDecl>(NewFD) << RD->isStruct()
+ << RD->getNumVBases();
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I)
+ Diag(I->getLocStart(),
+ diag::note_constexpr_virtual_base_here) << I->getSourceRange();
+ return false;
+ }
+ }
+
+ if (!isa<CXXConstructorDecl>(NewFD)) {
+ // C++11 [dcl.constexpr]p3:
+ // The definition of a constexpr function shall satisfy the following
+ // constraints:
+ // - it shall not be virtual;
+ const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(NewFD);
+ if (Method && Method->isVirtual()) {
+ Diag(NewFD->getLocation(), diag::err_constexpr_virtual);
+
+ // If it's not obvious why this function is virtual, find an overridden
+ // function which uses the 'virtual' keyword.
+ const CXXMethodDecl *WrittenVirtual = Method;
+ while (!WrittenVirtual->isVirtualAsWritten())
+ WrittenVirtual = *WrittenVirtual->begin_overridden_methods();
+ if (WrittenVirtual != Method)
+ Diag(WrittenVirtual->getLocation(),
+ diag::note_overridden_virtual_function);
+ return false;
+ }
+
+ // - its return type shall be a literal type;
+ QualType RT = NewFD->getResultType();
+ if (!RT->isDependentType() &&
+ RequireLiteralType(NewFD->getLocation(), RT,
+ PDiag(diag::err_constexpr_non_literal_return)))
+ return false;
+ }
+
+ // - each of its parameter types shall be a literal type;
+ if (!CheckConstexprParameterTypes(*this, NewFD))
+ return false;
+
+ return true;
+}
+
+/// Check the given declaration statement is legal within a constexpr function
+/// body. C++0x [dcl.constexpr]p3,p4.
+///
+/// \return true if the body is OK, false if we have diagnosed a problem.
+static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
+ DeclStmt *DS) {
+ // C++0x [dcl.constexpr]p3 and p4:
+ // The definition of a constexpr function(p3) or constructor(p4) [...] shall
+ // contain only
+ for (DeclStmt::decl_iterator DclIt = DS->decl_begin(),
+ DclEnd = DS->decl_end(); DclIt != DclEnd; ++DclIt) {
+ switch ((*DclIt)->getKind()) {
+ case Decl::StaticAssert:
+ case Decl::Using:
+ case Decl::UsingShadow:
+ case Decl::UsingDirective:
+ case Decl::UnresolvedUsingTypename:
+ // - static_assert-declarations
+ // - using-declarations,
+ // - using-directives,
+ continue;
+
+ case Decl::Typedef:
+ case Decl::TypeAlias: {
+ // - typedef declarations and alias-declarations that do not define
+ // classes or enumerations,
+ TypedefNameDecl *TN = cast<TypedefNameDecl>(*DclIt);
+ if (TN->getUnderlyingType()->isVariablyModifiedType()) {
+ // Don't allow variably-modified types in constexpr functions.
+ TypeLoc TL = TN->getTypeSourceInfo()->getTypeLoc();
+ SemaRef.Diag(TL.getBeginLoc(), diag::err_constexpr_vla)
+ << TL.getSourceRange() << TL.getType()
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+ continue;
+ }
+
+ case Decl::Enum:
+ case Decl::CXXRecord:
+ // As an extension, we allow the declaration (but not the definition) of
+ // classes and enumerations in all declarations, not just in typedef and
+ // alias declarations.
+ if (cast<TagDecl>(*DclIt)->isThisDeclarationADefinition()) {
+ SemaRef.Diag(DS->getLocStart(), diag::err_constexpr_type_definition)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+ continue;
+
+ case Decl::Var:
+ SemaRef.Diag(DS->getLocStart(), diag::err_constexpr_var_declaration)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+
+ default:
+ SemaRef.Diag(DS->getLocStart(), diag::err_constexpr_body_invalid_stmt)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/// Check that the given field is initialized within a constexpr constructor.
+///
+/// \param Dcl The constexpr constructor being checked.
+/// \param Field The field being checked. This may be a member of an anonymous
+/// struct or union nested within the class being checked.
+/// \param Inits All declarations, including anonymous struct/union members and
+/// indirect members, for which any initialization was provided.
+/// \param Diagnosed Set to true if an error is produced.
+static void CheckConstexprCtorInitializer(Sema &SemaRef,
+ const FunctionDecl *Dcl,
+ FieldDecl *Field,
+ llvm::SmallSet<Decl*, 16> &Inits,
+ bool &Diagnosed) {
+ if (Field->isUnnamedBitfield())
+ return;
+
+ if (Field->isAnonymousStructOrUnion() &&
+ Field->getType()->getAsCXXRecordDecl()->isEmpty())
+ return;
+
+ if (!Inits.count(Field)) {
+ if (!Diagnosed) {
+ SemaRef.Diag(Dcl->getLocation(), diag::err_constexpr_ctor_missing_init);
+ Diagnosed = true;
+ }
+ SemaRef.Diag(Field->getLocation(), diag::note_constexpr_ctor_missing_init);
+ } else if (Field->isAnonymousStructOrUnion()) {
+ const RecordDecl *RD = Field->getType()->castAs<RecordType>()->getDecl();
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I)
+ // If an anonymous union contains an anonymous struct of which any member
+ // is initialized, all members must be initialized.
+ if (!RD->isUnion() || Inits.count(*I))
+ CheckConstexprCtorInitializer(SemaRef, Dcl, *I, Inits, Diagnosed);
+ }
+}
+
+/// Check the body for the given constexpr function declaration only contains
+/// the permitted types of statement. C++11 [dcl.constexpr]p3,p4.
+///
+/// \return true if the body is OK, false if we have diagnosed a problem.
+bool Sema::CheckConstexprFunctionBody(const FunctionDecl *Dcl, Stmt *Body) {
+ if (isa<CXXTryStmt>(Body)) {
+ // C++11 [dcl.constexpr]p3:
+ // The definition of a constexpr function shall satisfy the following
+ // constraints: [...]
+ // - its function-body shall be = delete, = default, or a
+ // compound-statement
+ //
+ // C++11 [dcl.constexpr]p4:
+ // In the definition of a constexpr constructor, [...]
+ // - its function-body shall not be a function-try-block;
+ Diag(Body->getLocStart(), diag::err_constexpr_function_try_block)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+
+ // - its function-body shall be [...] a compound-statement that contains only
+ CompoundStmt *CompBody = cast<CompoundStmt>(Body);
+
+ llvm::SmallVector<SourceLocation, 4> ReturnStmts;
+ for (CompoundStmt::body_iterator BodyIt = CompBody->body_begin(),
+ BodyEnd = CompBody->body_end(); BodyIt != BodyEnd; ++BodyIt) {
+ switch ((*BodyIt)->getStmtClass()) {
+ case Stmt::NullStmtClass:
+ // - null statements,
+ continue;
+
+ case Stmt::DeclStmtClass:
+ // - static_assert-declarations
+ // - using-declarations,
+ // - using-directives,
+ // - typedef declarations and alias-declarations that do not define
+ // classes or enumerations,
+ if (!CheckConstexprDeclStmt(*this, Dcl, cast<DeclStmt>(*BodyIt)))
+ return false;
+ continue;
+
+ case Stmt::ReturnStmtClass:
+ // - and exactly one return statement;
+ if (isa<CXXConstructorDecl>(Dcl))
+ break;
+
+ ReturnStmts.push_back((*BodyIt)->getLocStart());
+ continue;
+
+ default:
+ break;
+ }
+
+ Diag((*BodyIt)->getLocStart(), diag::err_constexpr_body_invalid_stmt)
+ << isa<CXXConstructorDecl>(Dcl);
+ return false;
+ }
+
+ if (const CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(Dcl)) {
+ const CXXRecordDecl *RD = Constructor->getParent();
+ // DR1359:
+ // - every non-variant non-static data member and base class sub-object
+ // shall be initialized;
+ // - if the class is a non-empty union, or for each non-empty anonymous
+ // union member of a non-union class, exactly one non-static data member
+ // shall be initialized;
+ if (RD->isUnion()) {
+ if (Constructor->getNumCtorInitializers() == 0 && !RD->isEmpty()) {
+ Diag(Dcl->getLocation(), diag::err_constexpr_union_ctor_no_init);
+ return false;
+ }
+ } else if (!Constructor->isDependentContext() &&
+ !Constructor->isDelegatingConstructor()) {
+ assert(RD->getNumVBases() == 0 && "constexpr ctor with virtual bases");
+
+ // Skip detailed checking if we have enough initializers, and we would
+ // allow at most one initializer per member.
+ bool AnyAnonStructUnionMembers = false;
+ unsigned Fields = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++Fields) {
+ if ((*I)->isAnonymousStructOrUnion()) {
+ AnyAnonStructUnionMembers = true;
+ break;
+ }
+ }
+ if (AnyAnonStructUnionMembers ||
+ Constructor->getNumCtorInitializers() != RD->getNumBases() + Fields) {
+ // Check initialization of non-static data members. Base classes are
+ // always initialized so do not need to be checked. Dependent bases
+ // might not have initializers in the member initializer list.
+ llvm::SmallSet<Decl*, 16> Inits;
+ for (CXXConstructorDecl::init_const_iterator
+ I = Constructor->init_begin(), E = Constructor->init_end();
+ I != E; ++I) {
+ if (FieldDecl *FD = (*I)->getMember())
+ Inits.insert(FD);
+ else if (IndirectFieldDecl *ID = (*I)->getIndirectMember())
+ Inits.insert(ID->chain_begin(), ID->chain_end());
+ }
+
+ bool Diagnosed = false;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I)
+ CheckConstexprCtorInitializer(*this, Dcl, *I, Inits, Diagnosed);
+ if (Diagnosed)
+ return false;
+ }
+ }
+ } else {
+ if (ReturnStmts.empty()) {
+ Diag(Dcl->getLocation(), diag::err_constexpr_body_no_return);
+ return false;
+ }
+ if (ReturnStmts.size() > 1) {
+ Diag(ReturnStmts.back(), diag::err_constexpr_body_multiple_return);
+ for (unsigned I = 0; I < ReturnStmts.size() - 1; ++I)
+ Diag(ReturnStmts[I], diag::note_constexpr_body_previous_return);
+ return false;
+ }
+ }
+
+ // C++11 [dcl.constexpr]p5:
+ // if no function argument values exist such that the function invocation
+ // substitution would produce a constant expression, the program is
+ // ill-formed; no diagnostic required.
+ // C++11 [dcl.constexpr]p3:
+ // - every constructor call and implicit conversion used in initializing the
+ // return value shall be one of those allowed in a constant expression.
+ // C++11 [dcl.constexpr]p4:
+ // - every constructor involved in initializing non-static data members and
+ // base class sub-objects shall be a constexpr constructor.
+ llvm::SmallVector<PartialDiagnosticAt, 8> Diags;
+ if (!Expr::isPotentialConstantExpr(Dcl, Diags)) {
+ Diag(Dcl->getLocation(), diag::err_constexpr_function_never_constant_expr)
+ << isa<CXXConstructorDecl>(Dcl);
+ for (size_t I = 0, N = Diags.size(); I != N; ++I)
+ Diag(Diags[I].first, Diags[I].second);
+ return false;
+ }
+
+ return true;
+}
+
+/// isCurrentClassName - Determine whether the identifier II is the
+/// name of the class type currently being defined. In the case of
+/// nested classes, this will only return true if II is the name of
+/// the innermost class.
+bool Sema::isCurrentClassName(const IdentifierInfo &II, Scope *,
+ const CXXScopeSpec *SS) {
+ assert(getLangOpts().CPlusPlus && "No class names in C!");
+
+ CXXRecordDecl *CurDecl;
+ if (SS && SS->isSet() && !SS->isInvalid()) {
+ DeclContext *DC = computeDeclContext(*SS, true);
+ CurDecl = dyn_cast_or_null<CXXRecordDecl>(DC);
+ } else
+ CurDecl = dyn_cast_or_null<CXXRecordDecl>(CurContext);
+
+ if (CurDecl && CurDecl->getIdentifier())
+ return &II == CurDecl->getIdentifier();
+ else
+ return false;
+}
+
+/// \brief Check the validity of a C++ base class specifier.
+///
+/// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics
+/// and returns NULL otherwise.
+CXXBaseSpecifier *
+Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ TypeSourceInfo *TInfo,
+ SourceLocation EllipsisLoc) {
+ QualType BaseType = TInfo->getType();
+
+ // C++ [class.union]p1:
+ // A union shall not have base classes.
+ if (Class->isUnion()) {
+ Diag(Class->getLocation(), diag::err_base_clause_on_union)
+ << SpecifierRange;
+ return 0;
+ }
+
+ if (EllipsisLoc.isValid() &&
+ !TInfo->getType()->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << TInfo->getTypeLoc().getSourceRange();
+ EllipsisLoc = SourceLocation();
+ }
+
+ if (BaseType->isDependentType())
+ return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
+ Class->getTagKind() == TTK_Class,
+ Access, TInfo, EllipsisLoc);
+
+ SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
+
+ // Base specifiers must be record types.
+ if (!BaseType->isRecordType()) {
+ Diag(BaseLoc, diag::err_base_must_be_class) << SpecifierRange;
+ return 0;
+ }
+
+ // C++ [class.union]p1:
+ // A union shall not be used as a base class.
+ if (BaseType->isUnionType()) {
+ Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
+ return 0;
+ }
+
+ // C++ [class.derived]p2:
+ // The class-name in a base-specifier shall not be an incompletely
+ // defined class.
+ if (RequireCompleteType(BaseLoc, BaseType,
+ PDiag(diag::err_incomplete_base_class)
+ << SpecifierRange)) {
+ Class->setInvalidDecl();
+ return 0;
+ }
+
+ // If the base class is polymorphic or isn't empty, the new one is/isn't, too.
+ RecordDecl *BaseDecl = BaseType->getAs<RecordType>()->getDecl();
+ assert(BaseDecl && "Record type has no declaration");
+ BaseDecl = BaseDecl->getDefinition();
+ assert(BaseDecl && "Base type is not incomplete, but has no definition");
+ CXXRecordDecl * CXXBaseDecl = cast<CXXRecordDecl>(BaseDecl);
+ assert(CXXBaseDecl && "Base type is not a C++ type");
+
+ // C++ [class]p3:
+ // If a class is marked final and it appears as a base-type-specifier in
+ // base-clause, the program is ill-formed.
+ if (CXXBaseDecl->hasAttr<FinalAttr>()) {
+ Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
+ << CXXBaseDecl->getDeclName();
+ Diag(CXXBaseDecl->getLocation(), diag::note_previous_decl)
+ << CXXBaseDecl->getDeclName();
+ return 0;
+ }
+
+ if (BaseDecl->isInvalidDecl())
+ Class->setInvalidDecl();
+
+ // Create the base specifier.
+ return new (Context) CXXBaseSpecifier(SpecifierRange, Virtual,
+ Class->getTagKind() == TTK_Class,
+ Access, TInfo, EllipsisLoc);
+}
+
+/// ActOnBaseSpecifier - Parsed a base specifier. A base specifier is
+/// one entry in the base class list of a class specifier, for
+/// example:
+/// class foo : public bar, virtual private baz {
+/// 'public bar' and 'virtual private baz' are each base-specifiers.
+BaseResult
+Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ ParsedType basetype, SourceLocation BaseLoc,
+ SourceLocation EllipsisLoc) {
+ if (!classdecl)
+ return true;
+
+ AdjustDeclIfTemplate(classdecl);
+ CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(classdecl);
+ if (!Class)
+ return true;
+
+ TypeSourceInfo *TInfo = 0;
+ GetTypeFromParser(basetype, &TInfo);
+
+ if (EllipsisLoc.isInvalid() &&
+ DiagnoseUnexpandedParameterPack(SpecifierRange.getBegin(), TInfo,
+ UPPC_BaseType))
+ return true;
+
+ if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange,
+ Virtual, Access, TInfo,
+ EllipsisLoc))
+ return BaseSpec;
+
+ return true;
+}
+
+/// \brief Performs the actual work of attaching the given base class
+/// specifiers to a C++ class.
+bool Sema::AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases,
+ unsigned NumBases) {
+ if (NumBases == 0)
+ return false;
+
+ // Used to keep track of which base types we have already seen, so
+ // that we can properly diagnose redundant direct base types. Note
+ // that the key is always the unqualified canonical type of the base
+ // class.
+ std::map<QualType, CXXBaseSpecifier*, QualTypeOrdering> KnownBaseTypes;
+
+ // Copy non-redundant base specifiers into permanent storage.
+ unsigned NumGoodBases = 0;
+ bool Invalid = false;
+ for (unsigned idx = 0; idx < NumBases; ++idx) {
+ QualType NewBaseType
+ = Context.getCanonicalType(Bases[idx]->getType());
+ NewBaseType = NewBaseType.getLocalUnqualifiedType();
+
+ CXXBaseSpecifier *&KnownBase = KnownBaseTypes[NewBaseType];
+ if (KnownBase) {
+ // C++ [class.mi]p3:
+ // A class shall not be specified as a direct base class of a
+ // derived class more than once.
+ Diag(Bases[idx]->getLocStart(),
+ diag::err_duplicate_base_class)
+ << KnownBase->getType()
+ << Bases[idx]->getSourceRange();
+
+ // Delete the duplicate base class specifier; we're going to
+ // overwrite its pointer later.
+ Context.Deallocate(Bases[idx]);
+
+ Invalid = true;
+ } else {
+ // Okay, add this new base class.
+ KnownBase = Bases[idx];
+ Bases[NumGoodBases++] = Bases[idx];
+ if (const RecordType *Record = NewBaseType->getAs<RecordType>())
+ if (const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl()))
+ if (RD->hasAttr<WeakAttr>())
+ Class->addAttr(::new (Context) WeakAttr(SourceRange(), Context));
+ }
+ }
+
+ // Attach the remaining base class specifiers to the derived class.
+ Class->setBases(Bases, NumGoodBases);
+
+ // Delete the remaining (good) base class specifiers, since their
+ // data has been copied into the CXXRecordDecl.
+ for (unsigned idx = 0; idx < NumGoodBases; ++idx)
+ Context.Deallocate(Bases[idx]);
+
+ return Invalid;
+}
+
+/// ActOnBaseSpecifiers - Attach the given base specifiers to the
+/// class, after checking whether there are any duplicate base
+/// classes.
+void Sema::ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases,
+ unsigned NumBases) {
+ if (!ClassDecl || !Bases || !NumBases)
+ return;
+
+ AdjustDeclIfTemplate(ClassDecl);
+ AttachBaseSpecifiers(cast<CXXRecordDecl>(ClassDecl),
+ (CXXBaseSpecifier**)(Bases), NumBases);
+}
+
+static CXXRecordDecl *GetClassForType(QualType T) {
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl());
+ else if (const InjectedClassNameType *ICT = T->getAs<InjectedClassNameType>())
+ return ICT->getDecl();
+ else
+ return 0;
+}
+
+/// \brief Determine whether the type \p Derived is a C++ class that is
+/// derived from the type \p Base.
+bool Sema::IsDerivedFrom(QualType Derived, QualType Base) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ CXXRecordDecl *DerivedRD = GetClassForType(Derived);
+ if (!DerivedRD)
+ return false;
+
+ CXXRecordDecl *BaseRD = GetClassForType(Base);
+ if (!BaseRD)
+ return false;
+
+ // FIXME: instantiate DerivedRD if necessary. We need a PoI for this.
+ return DerivedRD->hasDefinition() && DerivedRD->isDerivedFrom(BaseRD);
+}
+
+/// \brief Determine whether the type \p Derived is a C++ class that is
+/// derived from the type \p Base.
+bool Sema::IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ CXXRecordDecl *DerivedRD = GetClassForType(Derived);
+ if (!DerivedRD)
+ return false;
+
+ CXXRecordDecl *BaseRD = GetClassForType(Base);
+ if (!BaseRD)
+ return false;
+
+ return DerivedRD->isDerivedFrom(BaseRD, Paths);
+}
+
+void Sema::BuildBasePathArray(const CXXBasePaths &Paths,
+ CXXCastPath &BasePathArray) {
+ assert(BasePathArray.empty() && "Base path array must be empty!");
+ assert(Paths.isRecordingPaths() && "Must record paths!");
+
+ const CXXBasePath &Path = Paths.front();
+
+ // We first go backward and check if we have a virtual base.
+ // FIXME: It would be better if CXXBasePath had the base specifier for
+ // the nearest virtual base.
+ unsigned Start = 0;
+ for (unsigned I = Path.size(); I != 0; --I) {
+ if (Path[I - 1].Base->isVirtual()) {
+ Start = I - 1;
+ break;
+ }
+ }
+
+ // Now add all bases.
+ for (unsigned I = Start, E = Path.size(); I != E; ++I)
+ BasePathArray.push_back(const_cast<CXXBaseSpecifier*>(Path[I].Base));
+}
+
+/// \brief Determine whether the given base path includes a virtual
+/// base class.
+bool Sema::BasePathInvolvesVirtualBase(const CXXCastPath &BasePath) {
+ for (CXXCastPath::const_iterator B = BasePath.begin(),
+ BEnd = BasePath.end();
+ B != BEnd; ++B)
+ if ((*B)->isVirtual())
+ return true;
+
+ return false;
+}
+
+/// CheckDerivedToBaseConversion - Check whether the Derived-to-Base
+/// conversion (where Derived and Base are class types) is
+/// well-formed, meaning that the conversion is unambiguous (and
+/// that all of the base classes are accessible). Returns true
+/// and emits a diagnostic if the code is ill-formed, returns false
+/// otherwise. Loc is the location where this routine should point to
+/// if there is an error, and Range is the source range to highlight
+/// if there is an error.
+bool
+Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ unsigned InaccessibleBaseID,
+ unsigned AmbigiousBaseConvID,
+ SourceLocation Loc, SourceRange Range,
+ DeclarationName Name,
+ CXXCastPath *BasePath) {
+ // First, determine whether the path from Derived to Base is
+ // ambiguous. This is slightly more expensive than checking whether
+ // the Derived to Base conversion exists, because here we need to
+ // explore multiple paths to determine if there is an ambiguity.
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ bool DerivationOkay = IsDerivedFrom(Derived, Base, Paths);
+ assert(DerivationOkay &&
+ "Can only be used with a derived-to-base conversion");
+ (void)DerivationOkay;
+
+ if (!Paths.isAmbiguous(Context.getCanonicalType(Base).getUnqualifiedType())) {
+ if (InaccessibleBaseID) {
+ // Check that the base class can be accessed.
+ switch (CheckBaseClassAccess(Loc, Base, Derived, Paths.front(),
+ InaccessibleBaseID)) {
+ case AR_inaccessible:
+ return true;
+ case AR_accessible:
+ case AR_dependent:
+ case AR_delayed:
+ break;
+ }
+ }
+
+ // Build a base path if necessary.
+ if (BasePath)
+ BuildBasePathArray(Paths, *BasePath);
+ return false;
+ }
+
+ // We know that the derived-to-base conversion is ambiguous, and
+ // we're going to produce a diagnostic. Perform the derived-to-base
+ // search just one more time to compute all of the possible paths so
+ // that we can print them out. This is more expensive than any of
+ // the previous derived-to-base checks we've done, but at this point
+ // performance isn't as much of an issue.
+ Paths.clear();
+ Paths.setRecordingPaths(true);
+ bool StillOkay = IsDerivedFrom(Derived, Base, Paths);
+ assert(StillOkay && "Can only be used with a derived-to-base conversion");
+ (void)StillOkay;
+
+ // Build up a textual representation of the ambiguous paths, e.g.,
+ // D -> B -> A, that will be used to illustrate the ambiguous
+ // conversions in the diagnostic. We only print one of the paths
+ // to each base class subobject.
+ std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
+
+ Diag(Loc, AmbigiousBaseConvID)
+ << Derived << Base << PathDisplayStr << Range << Name;
+ return true;
+}
+
+bool
+Sema::CheckDerivedToBaseConversion(QualType Derived, QualType Base,
+ SourceLocation Loc, SourceRange Range,
+ CXXCastPath *BasePath,
+ bool IgnoreAccess) {
+ return CheckDerivedToBaseConversion(Derived, Base,
+ IgnoreAccess ? 0
+ : diag::err_upcast_to_inaccessible_base,
+ diag::err_ambiguous_derived_to_base_conv,
+ Loc, Range, DeclarationName(),
+ BasePath);
+}
+
+
+/// @brief Builds a string representing ambiguous paths from a
+/// specific derived class to different subobjects of the same base
+/// class.
+///
+/// This function builds a string that can be used in error messages
+/// to show the different paths that one can take through the
+/// inheritance hierarchy to go from the derived class to different
+/// subobjects of a base class. The result looks something like this:
+/// @code
+/// struct D -> struct B -> struct A
+/// struct D -> struct C -> struct A
+/// @endcode
+std::string Sema::getAmbiguousPathsDisplayString(CXXBasePaths &Paths) {
+ std::string PathDisplayStr;
+ std::set<unsigned> DisplayedPaths;
+ for (CXXBasePaths::paths_iterator Path = Paths.begin();
+ Path != Paths.end(); ++Path) {
+ if (DisplayedPaths.insert(Path->back().SubobjectNumber).second) {
+ // We haven't displayed a path to this particular base
+ // class subobject yet.
+ PathDisplayStr += "\n ";
+ PathDisplayStr += Context.getTypeDeclType(Paths.getOrigin()).getAsString();
+ for (CXXBasePath::const_iterator Element = Path->begin();
+ Element != Path->end(); ++Element)
+ PathDisplayStr += " -> " + Element->Base->getType().getAsString();
+ }
+ }
+
+ return PathDisplayStr;
+}
+
+//===----------------------------------------------------------------------===//
+// C++ class member Handling
+//===----------------------------------------------------------------------===//
+
+/// ActOnAccessSpecifier - Parsed an access specifier followed by a colon.
+bool Sema::ActOnAccessSpecifier(AccessSpecifier Access,
+ SourceLocation ASLoc,
+ SourceLocation ColonLoc,
+ AttributeList *Attrs) {
+ assert(Access != AS_none && "Invalid kind for syntactic access specifier!");
+ AccessSpecDecl *ASDecl = AccessSpecDecl::Create(Context, Access, CurContext,
+ ASLoc, ColonLoc);
+ CurContext->addHiddenDecl(ASDecl);
+ return ProcessAccessDeclAttributeList(ASDecl, Attrs);
+}
+
+/// CheckOverrideControl - Check C++0x override control semantics.
+void Sema::CheckOverrideControl(const Decl *D) {
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D);
+ if (!MD || !MD->isVirtual())
+ return;
+
+ if (MD->isDependentContext())
+ return;
+
+ // C++0x [class.virtual]p3:
+ // If a virtual function is marked with the virt-specifier override and does
+ // not override a member function of a base class,
+ // the program is ill-formed.
+ bool HasOverriddenMethods =
+ MD->begin_overridden_methods() != MD->end_overridden_methods();
+ if (MD->hasAttr<OverrideAttr>() && !HasOverriddenMethods) {
+ Diag(MD->getLocation(),
+ diag::err_function_marked_override_not_overriding)
+ << MD->getDeclName();
+ return;
+ }
+}
+
+/// CheckIfOverriddenFunctionIsMarkedFinal - Checks whether a virtual member
+/// function overrides a virtual member function marked 'final', according to
+/// C++0x [class.virtual]p3.
+bool Sema::CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ if (!Old->hasAttr<FinalAttr>())
+ return false;
+
+ Diag(New->getLocation(), diag::err_final_function_overridden)
+ << New->getDeclName();
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+}
+
+/// ActOnCXXMemberDeclarator - This is invoked when a C++ class member
+/// declarator is parsed. 'AS' is the access specifier, 'BW' specifies the
+/// bitfield width if there is one, 'InitExpr' specifies the initializer if
+/// one has been parsed, and 'HasDeferredInit' is true if an initializer is
+/// present but parsing it has been deferred.
+Decl *
+Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
+ MultiTemplateParamsArg TemplateParameterLists,
+ Expr *BW, const VirtSpecifiers &VS,
+ bool HasDeferredInit) {
+ const DeclSpec &DS = D.getDeclSpec();
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+ SourceLocation Loc = NameInfo.getLoc();
+
+ // For anonymous bitfields, the location should point to the type.
+ if (Loc.isInvalid())
+ Loc = D.getLocStart();
+
+ Expr *BitWidth = static_cast<Expr*>(BW);
+
+ assert(isa<CXXRecordDecl>(CurContext));
+ assert(!DS.isFriendSpecified());
+
+ bool isFunc = D.isDeclarationOfFunction();
+
+ // C++ 9.2p6: A member shall not be declared to have automatic storage
+ // duration (auto, register) or with the extern storage-class-specifier.
+ // C++ 7.1.1p8: The mutable specifier can be applied only to names of class
+ // data members and cannot be applied to names declared const or static,
+ // and cannot be applied to reference members.
+ switch (DS.getStorageClassSpec()) {
+ case DeclSpec::SCS_unspecified:
+ case DeclSpec::SCS_typedef:
+ case DeclSpec::SCS_static:
+ // FALL THROUGH.
+ break;
+ case DeclSpec::SCS_mutable:
+ if (isFunc) {
+ if (DS.getStorageClassSpecLoc().isValid())
+ Diag(DS.getStorageClassSpecLoc(), diag::err_mutable_function);
+ else
+ Diag(DS.getThreadSpecLoc(), diag::err_mutable_function);
+
+ // FIXME: It would be nicer if the keyword was ignored only for this
+ // declarator. Otherwise we could get follow-up errors.
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+ break;
+ default:
+ if (DS.getStorageClassSpecLoc().isValid())
+ Diag(DS.getStorageClassSpecLoc(),
+ diag::err_storageclass_invalid_for_member);
+ else
+ Diag(DS.getThreadSpecLoc(), diag::err_storageclass_invalid_for_member);
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+
+ bool isInstField = ((DS.getStorageClassSpec() == DeclSpec::SCS_unspecified ||
+ DS.getStorageClassSpec() == DeclSpec::SCS_mutable) &&
+ !isFunc);
+
+ Decl *Member;
+ if (isInstField) {
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
+
+ // Data members must have identifiers for names.
+ if (Name.getNameKind() != DeclarationName::Identifier) {
+ Diag(Loc, diag::err_bad_variable_name)
+ << Name;
+ return 0;
+ }
+
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+
+ // Member field could not be with "template" keyword.
+ // So TemplateParameterLists should be empty in this case.
+ if (TemplateParameterLists.size()) {
+ TemplateParameterList* TemplateParams = TemplateParameterLists.get()[0];
+ if (TemplateParams->size()) {
+ // There is no such thing as a member field template.
+ Diag(D.getIdentifierLoc(), diag::err_template_member)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
+ } else {
+ // There is an extraneous 'template<>' for this member.
+ Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_member_noparams)
+ << II
+ << SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc());
+ }
+ return 0;
+ }
+
+ if (SS.isSet() && !SS.isInvalid()) {
+ // The user provided a superfluous scope specifier inside a class
+ // definition:
+ //
+ // class X {
+ // int X::member;
+ // };
+ if (DeclContext *DC = computeDeclContext(SS, false))
+ diagnoseQualifiedDeclaration(SS, DC, Name, D.getIdentifierLoc());
+ else
+ Diag(D.getIdentifierLoc(), diag::err_member_qualification)
+ << Name << SS.getRange();
+
+ SS.clear();
+ }
+
+ Member = HandleField(S, cast<CXXRecordDecl>(CurContext), Loc, D, BitWidth,
+ HasDeferredInit, AS);
+ assert(Member && "HandleField never returns null");
+ } else {
+ assert(!HasDeferredInit);
+
+ Member = HandleDeclarator(S, D, move(TemplateParameterLists));
+ if (!Member) {
+ return 0;
+ }
+
+ // Non-instance-fields can't have a bitfield.
+ if (BitWidth) {
+ if (Member->isInvalidDecl()) {
+ // don't emit another diagnostic.
+ } else if (isa<VarDecl>(Member)) {
+ // C++ 9.6p3: A bit-field shall not be a static member.
+ // "static member 'A' cannot be a bit-field"
+ Diag(Loc, diag::err_static_not_bitfield)
+ << Name << BitWidth->getSourceRange();
+ } else if (isa<TypedefDecl>(Member)) {
+ // "typedef member 'x' cannot be a bit-field"
+ Diag(Loc, diag::err_typedef_not_bitfield)
+ << Name << BitWidth->getSourceRange();
+ } else {
+ // A function typedef ("typedef int f(); f a;").
+ // C++ 9.6p3: A bit-field shall have integral or enumeration type.
+ Diag(Loc, diag::err_not_integral_type_bitfield)
+ << Name << cast<ValueDecl>(Member)->getType()
+ << BitWidth->getSourceRange();
+ }
+
+ BitWidth = 0;
+ Member->setInvalidDecl();
+ }
+
+ Member->setAccess(AS);
+
+ // If we have declared a member function template, set the access of the
+ // templated declaration as well.
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Member))
+ FunTmpl->getTemplatedDecl()->setAccess(AS);
+ }
+
+ if (VS.isOverrideSpecified()) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member);
+ if (!MD || !MD->isVirtual()) {
+ Diag(Member->getLocStart(),
+ diag::override_keyword_only_allowed_on_virtual_member_functions)
+ << "override" << FixItHint::CreateRemoval(VS.getOverrideLoc());
+ } else
+ MD->addAttr(new (Context) OverrideAttr(VS.getOverrideLoc(), Context));
+ }
+ if (VS.isFinalSpecified()) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Member);
+ if (!MD || !MD->isVirtual()) {
+ Diag(Member->getLocStart(),
+ diag::override_keyword_only_allowed_on_virtual_member_functions)
+ << "final" << FixItHint::CreateRemoval(VS.getFinalLoc());
+ } else
+ MD->addAttr(new (Context) FinalAttr(VS.getFinalLoc(), Context));
+ }
+
+ if (VS.getLastLocation().isValid()) {
+ // Update the end location of a method that has a virt-specifiers.
+ if (CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(Member))
+ MD->setRangeEnd(VS.getLastLocation());
+ }
+
+ CheckOverrideControl(Member);
+
+ assert((Name || isInstField) && "No identifier for non-field ?");
+
+ if (isInstField)
+ FieldCollector->Add(cast<FieldDecl>(Member));
+ return Member;
+}
+
+/// ActOnCXXInClassMemberInitializer - This is invoked after parsing an
+/// in-class initializer for a non-static C++ class member, and after
+/// instantiating an in-class initializer in a class template. Such actions
+/// are deferred until the class is complete.
+void
+Sema::ActOnCXXInClassMemberInitializer(Decl *D, SourceLocation EqualLoc,
+ Expr *InitExpr) {
+ FieldDecl *FD = cast<FieldDecl>(D);
+
+ if (!InitExpr) {
+ FD->setInvalidDecl();
+ FD->removeInClassInitializer();
+ return;
+ }
+
+ if (DiagnoseUnexpandedParameterPack(InitExpr, UPPC_Initializer)) {
+ FD->setInvalidDecl();
+ FD->removeInClassInitializer();
+ return;
+ }
+
+ ExprResult Init = InitExpr;
+ if (!FD->getType()->isDependentType() && !InitExpr->isTypeDependent()) {
+ if (isa<InitListExpr>(InitExpr) && isStdInitializerList(FD->getType(), 0)) {
+ Diag(FD->getLocation(), diag::warn_dangling_std_initializer_list)
+ << /*at end of ctor*/1 << InitExpr->getSourceRange();
+ }
+ Expr **Inits = &InitExpr;
+ unsigned NumInits = 1;
+ InitializedEntity Entity = InitializedEntity::InitializeMember(FD);
+ InitializationKind Kind = EqualLoc.isInvalid()
+ ? InitializationKind::CreateDirectList(InitExpr->getLocStart())
+ : InitializationKind::CreateCopy(InitExpr->getLocStart(), EqualLoc);
+ InitializationSequence Seq(*this, Entity, Kind, Inits, NumInits);
+ Init = Seq.Perform(*this, Entity, Kind, MultiExprArg(Inits, NumInits));
+ if (Init.isInvalid()) {
+ FD->setInvalidDecl();
+ return;
+ }
+
+ CheckImplicitConversions(Init.get(), EqualLoc);
+ }
+
+ // C++0x [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ Init = MaybeCreateExprWithCleanups(Init);
+ if (Init.isInvalid()) {
+ FD->setInvalidDecl();
+ return;
+ }
+
+ InitExpr = Init.release();
+
+ FD->setInClassInitializer(InitExpr);
+}
+
+/// \brief Find the direct and/or virtual base specifiers that
+/// correspond to the given base type, for use in base initialization
+/// within a constructor.
+static bool FindBaseInitializer(Sema &SemaRef,
+ CXXRecordDecl *ClassDecl,
+ QualType BaseType,
+ const CXXBaseSpecifier *&DirectBaseSpec,
+ const CXXBaseSpecifier *&VirtualBaseSpec) {
+ // First, check for a direct base class.
+ DirectBaseSpec = 0;
+ for (CXXRecordDecl::base_class_const_iterator Base
+ = ClassDecl->bases_begin();
+ Base != ClassDecl->bases_end(); ++Base) {
+ if (SemaRef.Context.hasSameUnqualifiedType(BaseType, Base->getType())) {
+ // We found a direct base of this type. That's what we're
+ // initializing.
+ DirectBaseSpec = &*Base;
+ break;
+ }
+ }
+
+ // Check for a virtual base class.
+ // FIXME: We might be able to short-circuit this if we know in advance that
+ // there are no virtual bases.
+ VirtualBaseSpec = 0;
+ if (!DirectBaseSpec || !DirectBaseSpec->isVirtual()) {
+ // We haven't found a base yet; search the class hierarchy for a
+ // virtual base class.
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (SemaRef.IsDerivedFrom(SemaRef.Context.getTypeDeclType(ClassDecl),
+ BaseType, Paths)) {
+ for (CXXBasePaths::paths_iterator Path = Paths.begin();
+ Path != Paths.end(); ++Path) {
+ if (Path->back().Base->isVirtual()) {
+ VirtualBaseSpec = Path->back().Base;
+ break;
+ }
+ }
+ }
+ }
+
+ return DirectBaseSpec || VirtualBaseSpec;
+}
+
+/// \brief Handle a C++ member initializer using braced-init-list syntax.
+MemInitResult
+Sema::ActOnMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ Expr *InitList,
+ SourceLocation EllipsisLoc) {
+ return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
+ DS, IdLoc, InitList,
+ EllipsisLoc);
+}
+
+/// \brief Handle a C++ member initializer using parentheses syntax.
+MemInitResult
+Sema::ActOnMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ SourceLocation EllipsisLoc) {
+ Expr *List = new (Context) ParenListExpr(Context, LParenLoc, Args, NumArgs,
+ RParenLoc);
+ return BuildMemInitializer(ConstructorD, S, SS, MemberOrBase, TemplateTypeTy,
+ DS, IdLoc, List, EllipsisLoc);
+}
+
+namespace {
+
+// Callback to only accept typo corrections that can be a valid C++ member
+// intializer: either a non-static field member or a base class.
+class MemInitializerValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ explicit MemInitializerValidatorCCC(CXXRecordDecl *ClassDecl)
+ : ClassDecl(ClassDecl) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (NamedDecl *ND = candidate.getCorrectionDecl()) {
+ if (FieldDecl *Member = dyn_cast<FieldDecl>(ND))
+ return Member->getDeclContext()->getRedeclContext()->Equals(ClassDecl);
+ else
+ return isa<TypeDecl>(ND);
+ }
+ return false;
+ }
+
+ private:
+ CXXRecordDecl *ClassDecl;
+};
+
+}
+
+/// \brief Handle a C++ member initializer.
+MemInitResult
+Sema::BuildMemInitializer(Decl *ConstructorD,
+ Scope *S,
+ CXXScopeSpec &SS,
+ IdentifierInfo *MemberOrBase,
+ ParsedType TemplateTypeTy,
+ const DeclSpec &DS,
+ SourceLocation IdLoc,
+ Expr *Init,
+ SourceLocation EllipsisLoc) {
+ if (!ConstructorD)
+ return true;
+
+ AdjustDeclIfTemplate(ConstructorD);
+
+ CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ConstructorD);
+ if (!Constructor) {
+ // The user wrote a constructor initializer on a function that is
+ // not a C++ constructor. Ignore the error for now, because we may
+ // have more member initializers coming; we'll diagnose it just
+ // once in ActOnMemInitializers.
+ return true;
+ }
+
+ CXXRecordDecl *ClassDecl = Constructor->getParent();
+
+ // C++ [class.base.init]p2:
+ // Names in a mem-initializer-id are looked up in the scope of the
+ // constructor's class and, if not found in that scope, are looked
+ // up in the scope containing the constructor's definition.
+ // [Note: if the constructor's class contains a member with the
+ // same name as a direct or virtual base class of the class, a
+ // mem-initializer-id naming the member or base class and composed
+ // of a single identifier refers to the class member. A
+ // mem-initializer-id for the hidden base class may be specified
+ // using a qualified name. ]
+ if (!SS.getScopeRep() && !TemplateTypeTy) {
+ // Look for a member, first.
+ DeclContext::lookup_result Result
+ = ClassDecl->lookup(MemberOrBase);
+ if (Result.first != Result.second) {
+ ValueDecl *Member;
+ if ((Member = dyn_cast<FieldDecl>(*Result.first)) ||
+ (Member = dyn_cast<IndirectFieldDecl>(*Result.first))) {
+ if (EllipsisLoc.isValid())
+ Diag(EllipsisLoc, diag::err_pack_expansion_member_init)
+ << MemberOrBase
+ << SourceRange(IdLoc, Init->getSourceRange().getEnd());
+
+ return BuildMemberInitializer(Member, Init, IdLoc);
+ }
+ }
+ }
+ // It didn't name a member, so see if it names a class.
+ QualType BaseType;
+ TypeSourceInfo *TInfo = 0;
+
+ if (TemplateTypeTy) {
+ BaseType = GetTypeFromParser(TemplateTypeTy, &TInfo);
+ } else if (DS.getTypeSpecType() == TST_decltype) {
+ BaseType = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ } else {
+ LookupResult R(*this, MemberOrBase, IdLoc, LookupOrdinaryName);
+ LookupParsedName(R, S, &SS);
+
+ TypeDecl *TyD = R.getAsSingle<TypeDecl>();
+ if (!TyD) {
+ if (R.isAmbiguous()) return true;
+
+ // We don't want access-control diagnostics here.
+ R.suppressDiagnostics();
+
+ if (SS.isSet() && isDependentScopeSpecifier(SS)) {
+ bool NotUnknownSpecialization = false;
+ DeclContext *DC = computeDeclContext(SS, false);
+ if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(DC))
+ NotUnknownSpecialization = !Record->hasAnyDependentBases();
+
+ if (!NotUnknownSpecialization) {
+ // When the scope specifier can refer to a member of an unknown
+ // specialization, we take it as a type name.
+ BaseType = CheckTypenameType(ETK_None, SourceLocation(),
+ SS.getWithLocInContext(Context),
+ *MemberOrBase, IdLoc);
+ if (BaseType.isNull())
+ return true;
+
+ R.clear();
+ R.setLookupName(MemberOrBase);
+ }
+ }
+
+ // If no results were found, try to correct typos.
+ TypoCorrection Corr;
+ MemInitializerValidatorCCC Validator(ClassDecl);
+ if (R.empty() && BaseType.isNull() &&
+ (Corr = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(), S, &SS,
+ Validator, ClassDecl))) {
+ std::string CorrectedStr(Corr.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corr.getQuoted(getLangOpts()));
+ if (FieldDecl *Member = Corr.getCorrectionDeclAs<FieldDecl>()) {
+ // We have found a non-static data member with a similar
+ // name to what was typed; complain and initialize that
+ // member.
+ Diag(R.getNameLoc(), diag::err_mem_init_not_member_or_class_suggest)
+ << MemberOrBase << true << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ Diag(Member->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+
+ return BuildMemberInitializer(Member, Init, IdLoc);
+ } else if (TypeDecl *Type = Corr.getCorrectionDeclAs<TypeDecl>()) {
+ const CXXBaseSpecifier *DirectBaseSpec;
+ const CXXBaseSpecifier *VirtualBaseSpec;
+ if (FindBaseInitializer(*this, ClassDecl,
+ Context.getTypeDeclType(Type),
+ DirectBaseSpec, VirtualBaseSpec)) {
+ // We have found a direct or virtual base class with a
+ // similar name to what was typed; complain and initialize
+ // that base class.
+ Diag(R.getNameLoc(), diag::err_mem_init_not_member_or_class_suggest)
+ << MemberOrBase << false << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+
+ const CXXBaseSpecifier *BaseSpec = DirectBaseSpec? DirectBaseSpec
+ : VirtualBaseSpec;
+ Diag(BaseSpec->getLocStart(),
+ diag::note_base_class_specified_here)
+ << BaseSpec->getType()
+ << BaseSpec->getSourceRange();
+
+ TyD = Type;
+ }
+ }
+ }
+
+ if (!TyD && BaseType.isNull()) {
+ Diag(IdLoc, diag::err_mem_init_not_member_or_class)
+ << MemberOrBase << SourceRange(IdLoc,Init->getSourceRange().getEnd());
+ return true;
+ }
+ }
+
+ if (BaseType.isNull()) {
+ BaseType = Context.getTypeDeclType(TyD);
+ if (SS.isSet()) {
+ NestedNameSpecifier *Qualifier =
+ static_cast<NestedNameSpecifier*>(SS.getScopeRep());
+
+ // FIXME: preserve source range information
+ BaseType = Context.getElaboratedType(ETK_None, Qualifier, BaseType);
+ }
+ }
+ }
+
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(BaseType, IdLoc);
+
+ return BuildBaseInitializer(BaseType, TInfo, Init, ClassDecl, EllipsisLoc);
+}
+
+/// Checks a member initializer expression for cases where reference (or
+/// pointer) members are bound to by-value parameters (or their addresses).
+static void CheckForDanglingReferenceOrPointer(Sema &S, ValueDecl *Member,
+ Expr *Init,
+ SourceLocation IdLoc) {
+ QualType MemberTy = Member->getType();
+
+ // We only handle pointers and references currently.
+ // FIXME: Would this be relevant for ObjC object pointers? Or block pointers?
+ if (!MemberTy->isReferenceType() && !MemberTy->isPointerType())
+ return;
+
+ const bool IsPointer = MemberTy->isPointerType();
+ if (IsPointer) {
+ if (const UnaryOperator *Op
+ = dyn_cast<UnaryOperator>(Init->IgnoreParenImpCasts())) {
+ // The only case we're worried about with pointers requires taking the
+ // address.
+ if (Op->getOpcode() != UO_AddrOf)
+ return;
+
+ Init = Op->getSubExpr();
+ } else {
+ // We only handle address-of expression initializers for pointers.
+ return;
+ }
+ }
+
+ if (isa<MaterializeTemporaryExpr>(Init->IgnoreParens())) {
+ // Taking the address of a temporary will be diagnosed as a hard error.
+ if (IsPointer)
+ return;
+
+ S.Diag(Init->getExprLoc(), diag::warn_bind_ref_member_to_temporary)
+ << Member << Init->getSourceRange();
+ } else if (const DeclRefExpr *DRE
+ = dyn_cast<DeclRefExpr>(Init->IgnoreParens())) {
+ // We only warn when referring to a non-reference parameter declaration.
+ const ParmVarDecl *Parameter = dyn_cast<ParmVarDecl>(DRE->getDecl());
+ if (!Parameter || Parameter->getType()->isReferenceType())
+ return;
+
+ S.Diag(Init->getExprLoc(),
+ IsPointer ? diag::warn_init_ptr_member_to_parameter_addr
+ : diag::warn_bind_ref_member_to_parameter)
+ << Member << Parameter << Init->getSourceRange();
+ } else {
+ // Other initializers are fine.
+ return;
+ }
+
+ S.Diag(Member->getLocation(), diag::note_ref_or_ptr_member_declared_here)
+ << (unsigned)IsPointer;
+}
+
+/// Checks an initializer expression for use of uninitialized fields, such as
+/// containing the field that is being initialized. Returns true if there is an
+/// uninitialized field was used an updates the SourceLocation parameter; false
+/// otherwise.
+static bool InitExprContainsUninitializedFields(const Stmt *S,
+ const ValueDecl *LhsField,
+ SourceLocation *L) {
+ assert(isa<FieldDecl>(LhsField) || isa<IndirectFieldDecl>(LhsField));
+
+ if (isa<CallExpr>(S)) {
+ // Do not descend into function calls or constructors, as the use
+ // of an uninitialized field may be valid. One would have to inspect
+ // the contents of the function/ctor to determine if it is safe or not.
+ // i.e. Pass-by-value is never safe, but pass-by-reference and pointers
+ // may be safe, depending on what the function/ctor does.
+ return false;
+ }
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
+ const NamedDecl *RhsField = ME->getMemberDecl();
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(RhsField)) {
+ // The member expression points to a static data member.
+ assert(VD->isStaticDataMember() &&
+ "Member points to non-static data member!");
+ (void)VD;
+ return false;
+ }
+
+ if (isa<EnumConstantDecl>(RhsField)) {
+ // The member expression points to an enum.
+ return false;
+ }
+
+ if (RhsField == LhsField) {
+ // Initializing a field with itself. Throw a warning.
+ // But wait; there are exceptions!
+ // Exception #1: The field may not belong to this record.
+ // e.g. Foo(const Foo& rhs) : A(rhs.A) {}
+ const Expr *base = ME->getBase();
+ if (base != NULL && !isa<CXXThisExpr>(base->IgnoreParenCasts())) {
+ // Even though the field matches, it does not belong to this record.
+ return false;
+ }
+ // None of the exceptions triggered; return true to indicate an
+ // uninitialized field was used.
+ *L = ME->getMemberLoc();
+ return true;
+ }
+ } else if (isa<UnaryExprOrTypeTraitExpr>(S)) {
+ // sizeof/alignof doesn't reference contents, do not warn.
+ return false;
+ } else if (const UnaryOperator *UOE = dyn_cast<UnaryOperator>(S)) {
+ // address-of doesn't reference contents (the pointer may be dereferenced
+ // in the same expression but it would be rare; and weird).
+ if (UOE->getOpcode() == UO_AddrOf)
+ return false;
+ }
+ for (Stmt::const_child_range it = S->children(); it; ++it) {
+ if (!*it) {
+ // An expression such as 'member(arg ?: "")' may trigger this.
+ continue;
+ }
+ if (InitExprContainsUninitializedFields(*it, LhsField, L))
+ return true;
+ }
+ return false;
+}
+
+MemInitResult
+Sema::BuildMemberInitializer(ValueDecl *Member, Expr *Init,
+ SourceLocation IdLoc) {
+ FieldDecl *DirectMember = dyn_cast<FieldDecl>(Member);
+ IndirectFieldDecl *IndirectMember = dyn_cast<IndirectFieldDecl>(Member);
+ assert((DirectMember || IndirectMember) &&
+ "Member must be a FieldDecl or IndirectFieldDecl");
+
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer))
+ return true;
+
+ if (Member->isInvalidDecl())
+ return true;
+
+ // Diagnose value-uses of fields to initialize themselves, e.g.
+ // foo(foo)
+ // where foo is not also a parameter to the constructor.
+ // TODO: implement -Wuninitialized and fold this into that framework.
+ Expr **Args;
+ unsigned NumArgs;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ } else {
+ InitListExpr *InitList = cast<InitListExpr>(Init);
+ Args = InitList->getInits();
+ NumArgs = InitList->getNumInits();
+ }
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ SourceLocation L;
+ if (InitExprContainsUninitializedFields(Args[i], Member, &L)) {
+ // FIXME: Return true in the case when other fields are used before being
+ // uninitialized. For example, let this field be the i'th field. When
+ // initializing the i'th field, throw a warning if any of the >= i'th
+ // fields are used, as they are not yet initialized.
+ // Right now we are only handling the case where the i'th field uses
+ // itself in its initializer.
+ Diag(L, diag::warn_field_is_uninit);
+ }
+ }
+
+ SourceRange InitRange = Init->getSourceRange();
+
+ if (Member->getType()->isDependentType() || Init->isTypeDependent()) {
+ // Can't check initialization for a member of dependent type or when
+ // any of the arguments are type-dependent expressions.
+ DiscardCleanupsInEvaluationContext();
+ } else {
+ bool InitList = false;
+ if (isa<InitListExpr>(Init)) {
+ InitList = true;
+ Args = &Init;
+ NumArgs = 1;
+
+ if (isStdInitializerList(Member->getType(), 0)) {
+ Diag(IdLoc, diag::warn_dangling_std_initializer_list)
+ << /*at end of ctor*/1 << InitRange;
+ }
+ }
+
+ // Initialize the member.
+ InitializedEntity MemberEntity =
+ DirectMember ? InitializedEntity::InitializeMember(DirectMember, 0)
+ : InitializedEntity::InitializeMember(IndirectMember, 0);
+ InitializationKind Kind =
+ InitList ? InitializationKind::CreateDirectList(IdLoc)
+ : InitializationKind::CreateDirect(IdLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+
+ InitializationSequence InitSeq(*this, MemberEntity, Kind, Args, NumArgs);
+ ExprResult MemberInit = InitSeq.Perform(*this, MemberEntity, Kind,
+ MultiExprArg(*this, Args, NumArgs),
+ 0);
+ if (MemberInit.isInvalid())
+ return true;
+
+ CheckImplicitConversions(MemberInit.get(),
+ InitRange.getBegin());
+
+ // C++0x [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ MemberInit = MaybeCreateExprWithCleanups(MemberInit);
+ if (MemberInit.isInvalid())
+ return true;
+
+ // If we are in a dependent context, template instantiation will
+ // perform this type-checking again. Just save the arguments that we
+ // received.
+ // FIXME: This isn't quite ideal, since our ASTs don't capture all
+ // of the information that we have about the member
+ // initializer. However, deconstructing the ASTs is a dicey process,
+ // and this approach is far more likely to get the corner cases right.
+ if (CurContext->isDependentContext()) {
+ // The existing Init will do fine.
+ } else {
+ Init = MemberInit.get();
+ CheckForDanglingReferenceOrPointer(*this, Member, Init, IdLoc);
+ }
+ }
+
+ if (DirectMember) {
+ return new (Context) CXXCtorInitializer(Context, DirectMember, IdLoc,
+ InitRange.getBegin(), Init,
+ InitRange.getEnd());
+ } else {
+ return new (Context) CXXCtorInitializer(Context, IndirectMember, IdLoc,
+ InitRange.getBegin(), Init,
+ InitRange.getEnd());
+ }
+}
+
+MemInitResult
+Sema::BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init,
+ CXXRecordDecl *ClassDecl) {
+ SourceLocation NameLoc = TInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ if (!LangOpts.CPlusPlus0x)
+ return Diag(NameLoc, diag::err_delegating_ctor)
+ << TInfo->getTypeLoc().getLocalSourceRange();
+ Diag(NameLoc, diag::warn_cxx98_compat_delegating_ctor);
+
+ bool InitList = true;
+ Expr **Args = &Init;
+ unsigned NumArgs = 1;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ InitList = false;
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ }
+
+ SourceRange InitRange = Init->getSourceRange();
+ // Initialize the object.
+ InitializedEntity DelegationEntity = InitializedEntity::InitializeDelegation(
+ QualType(ClassDecl->getTypeForDecl(), 0));
+ InitializationKind Kind =
+ InitList ? InitializationKind::CreateDirectList(NameLoc)
+ : InitializationKind::CreateDirect(NameLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+ InitializationSequence InitSeq(*this, DelegationEntity, Kind, Args, NumArgs);
+ ExprResult DelegationInit = InitSeq.Perform(*this, DelegationEntity, Kind,
+ MultiExprArg(*this, Args,NumArgs),
+ 0);
+ if (DelegationInit.isInvalid())
+ return true;
+
+ assert(cast<CXXConstructExpr>(DelegationInit.get())->getConstructor() &&
+ "Delegating constructor with no target?");
+
+ CheckImplicitConversions(DelegationInit.get(), InitRange.getBegin());
+
+ // C++0x [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ DelegationInit = MaybeCreateExprWithCleanups(DelegationInit);
+ if (DelegationInit.isInvalid())
+ return true;
+
+ return new (Context) CXXCtorInitializer(Context, TInfo, InitRange.getBegin(),
+ DelegationInit.takeAs<Expr>(),
+ InitRange.getEnd());
+}
+
+MemInitResult
+Sema::BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo,
+ Expr *Init, CXXRecordDecl *ClassDecl,
+ SourceLocation EllipsisLoc) {
+ SourceLocation BaseLoc
+ = BaseTInfo->getTypeLoc().getLocalSourceRange().getBegin();
+
+ if (!BaseType->isDependentType() && !BaseType->isRecordType())
+ return Diag(BaseLoc, diag::err_base_init_does_not_name_class)
+ << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
+
+ // C++ [class.base.init]p2:
+ // [...] Unless the mem-initializer-id names a nonstatic data
+ // member of the constructor's class or a direct or virtual base
+ // of that class, the mem-initializer is ill-formed. A
+ // mem-initializer-list can initialize a base class using any
+ // name that denotes that base class type.
+ bool Dependent = BaseType->isDependentType() || Init->isTypeDependent();
+
+ SourceRange InitRange = Init->getSourceRange();
+ if (EllipsisLoc.isValid()) {
+ // This is a pack expansion.
+ if (!BaseType->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(BaseLoc, InitRange.getEnd());
+
+ EllipsisLoc = SourceLocation();
+ }
+ } else {
+ // Check for any unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(BaseLoc, BaseTInfo, UPPC_Initializer))
+ return true;
+
+ if (DiagnoseUnexpandedParameterPack(Init, UPPC_Initializer))
+ return true;
+ }
+
+ // Check for direct and virtual base classes.
+ const CXXBaseSpecifier *DirectBaseSpec = 0;
+ const CXXBaseSpecifier *VirtualBaseSpec = 0;
+ if (!Dependent) {
+ if (Context.hasSameUnqualifiedType(QualType(ClassDecl->getTypeForDecl(),0),
+ BaseType))
+ return BuildDelegatingInitializer(BaseTInfo, Init, ClassDecl);
+
+ FindBaseInitializer(*this, ClassDecl, BaseType, DirectBaseSpec,
+ VirtualBaseSpec);
+
+ // C++ [base.class.init]p2:
+ // Unless the mem-initializer-id names a nonstatic data member of the
+ // constructor's class or a direct or virtual base of that class, the
+ // mem-initializer is ill-formed.
+ if (!DirectBaseSpec && !VirtualBaseSpec) {
+ // If the class has any dependent bases, then it's possible that
+ // one of those types will resolve to the same type as
+ // BaseType. Therefore, just treat this as a dependent base
+ // class initialization. FIXME: Should we try to check the
+ // initialization anyway? It seems odd.
+ if (ClassDecl->hasAnyDependentBases())
+ Dependent = true;
+ else
+ return Diag(BaseLoc, diag::err_not_direct_base_or_virtual)
+ << BaseType << Context.getTypeDeclType(ClassDecl)
+ << BaseTInfo->getTypeLoc().getLocalSourceRange();
+ }
+ }
+
+ if (Dependent) {
+ DiscardCleanupsInEvaluationContext();
+
+ return new (Context) CXXCtorInitializer(Context, BaseTInfo,
+ /*IsVirtual=*/false,
+ InitRange.getBegin(), Init,
+ InitRange.getEnd(), EllipsisLoc);
+ }
+
+ // C++ [base.class.init]p2:
+ // If a mem-initializer-id is ambiguous because it designates both
+ // a direct non-virtual base class and an inherited virtual base
+ // class, the mem-initializer is ill-formed.
+ if (DirectBaseSpec && VirtualBaseSpec)
+ return Diag(BaseLoc, diag::err_base_init_direct_and_virtual)
+ << BaseType << BaseTInfo->getTypeLoc().getLocalSourceRange();
+
+ CXXBaseSpecifier *BaseSpec = const_cast<CXXBaseSpecifier *>(DirectBaseSpec);
+ if (!BaseSpec)
+ BaseSpec = const_cast<CXXBaseSpecifier *>(VirtualBaseSpec);
+
+ // Initialize the base.
+ bool InitList = true;
+ Expr **Args = &Init;
+ unsigned NumArgs = 1;
+ if (ParenListExpr *ParenList = dyn_cast<ParenListExpr>(Init)) {
+ InitList = false;
+ Args = ParenList->getExprs();
+ NumArgs = ParenList->getNumExprs();
+ }
+
+ InitializedEntity BaseEntity =
+ InitializedEntity::InitializeBase(Context, BaseSpec, VirtualBaseSpec);
+ InitializationKind Kind =
+ InitList ? InitializationKind::CreateDirectList(BaseLoc)
+ : InitializationKind::CreateDirect(BaseLoc, InitRange.getBegin(),
+ InitRange.getEnd());
+ InitializationSequence InitSeq(*this, BaseEntity, Kind, Args, NumArgs);
+ ExprResult BaseInit = InitSeq.Perform(*this, BaseEntity, Kind,
+ MultiExprArg(*this, Args, NumArgs),
+ 0);
+ if (BaseInit.isInvalid())
+ return true;
+
+ CheckImplicitConversions(BaseInit.get(), InitRange.getBegin());
+
+ // C++0x [class.base.init]p7:
+ // The initialization of each base and member constitutes a
+ // full-expression.
+ BaseInit = MaybeCreateExprWithCleanups(BaseInit);
+ if (BaseInit.isInvalid())
+ return true;
+
+ // If we are in a dependent context, template instantiation will
+ // perform this type-checking again. Just save the arguments that we
+ // received in a ParenListExpr.
+ // FIXME: This isn't quite ideal, since our ASTs don't capture all
+ // of the information that we have about the base
+ // initializer. However, deconstructing the ASTs is a dicey process,
+ // and this approach is far more likely to get the corner cases right.
+ if (CurContext->isDependentContext())
+ BaseInit = Owned(Init);
+
+ return new (Context) CXXCtorInitializer(Context, BaseTInfo,
+ BaseSpec->isVirtual(),
+ InitRange.getBegin(),
+ BaseInit.takeAs<Expr>(),
+ InitRange.getEnd(), EllipsisLoc);
+}
+
+// Create a static_cast\<T&&>(expr).
+static Expr *CastForMoving(Sema &SemaRef, Expr *E) {
+ QualType ExprType = E->getType();
+ QualType TargetType = SemaRef.Context.getRValueReferenceType(ExprType);
+ SourceLocation ExprLoc = E->getLocStart();
+ TypeSourceInfo *TargetLoc = SemaRef.Context.getTrivialTypeSourceInfo(
+ TargetType, ExprLoc);
+
+ return SemaRef.BuildCXXNamedCast(ExprLoc, tok::kw_static_cast, TargetLoc, E,
+ SourceRange(ExprLoc, ExprLoc),
+ E->getSourceRange()).take();
+}
+
+/// ImplicitInitializerKind - How an implicit base or member initializer should
+/// initialize its base or member.
+enum ImplicitInitializerKind {
+ IIK_Default,
+ IIK_Copy,
+ IIK_Move
+};
+
+static bool
+BuildImplicitBaseInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
+ ImplicitInitializerKind ImplicitInitKind,
+ CXXBaseSpecifier *BaseSpec,
+ bool IsInheritedVirtualBase,
+ CXXCtorInitializer *&CXXBaseInit) {
+ InitializedEntity InitEntity
+ = InitializedEntity::InitializeBase(SemaRef.Context, BaseSpec,
+ IsInheritedVirtualBase);
+
+ ExprResult BaseInit;
+
+ switch (ImplicitInitKind) {
+ case IIK_Default: {
+ InitializationKind InitKind
+ = InitializationKind::CreateDefault(Constructor->getLocation());
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, 0, 0);
+ BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind,
+ MultiExprArg(SemaRef, 0, 0));
+ break;
+ }
+
+ case IIK_Move:
+ case IIK_Copy: {
+ bool Moving = ImplicitInitKind == IIK_Move;
+ ParmVarDecl *Param = Constructor->getParamDecl(0);
+ QualType ParamType = Param->getType().getNonReferenceType();
+
+ Expr *CopyCtorArg =
+ DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(),
+ SourceLocation(), Param, false,
+ Constructor->getLocation(), ParamType,
+ VK_LValue, 0);
+
+ SemaRef.MarkDeclRefReferenced(cast<DeclRefExpr>(CopyCtorArg));
+
+ // Cast to the base class to avoid ambiguities.
+ QualType ArgTy =
+ SemaRef.Context.getQualifiedType(BaseSpec->getType().getUnqualifiedType(),
+ ParamType.getQualifiers());
+
+ if (Moving) {
+ CopyCtorArg = CastForMoving(SemaRef, CopyCtorArg);
+ }
+
+ CXXCastPath BasePath;
+ BasePath.push_back(BaseSpec);
+ CopyCtorArg = SemaRef.ImpCastExprToType(CopyCtorArg, ArgTy,
+ CK_UncheckedDerivedToBase,
+ Moving ? VK_XValue : VK_LValue,
+ &BasePath).take();
+
+ InitializationKind InitKind
+ = InitializationKind::CreateDirect(Constructor->getLocation(),
+ SourceLocation(), SourceLocation());
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind,
+ &CopyCtorArg, 1);
+ BaseInit = InitSeq.Perform(SemaRef, InitEntity, InitKind,
+ MultiExprArg(&CopyCtorArg, 1));
+ break;
+ }
+ }
+
+ BaseInit = SemaRef.MaybeCreateExprWithCleanups(BaseInit);
+ if (BaseInit.isInvalid())
+ return true;
+
+ CXXBaseInit =
+ new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context,
+ SemaRef.Context.getTrivialTypeSourceInfo(BaseSpec->getType(),
+ SourceLocation()),
+ BaseSpec->isVirtual(),
+ SourceLocation(),
+ BaseInit.takeAs<Expr>(),
+ SourceLocation(),
+ SourceLocation());
+
+ return false;
+}
+
+static bool RefersToRValueRef(Expr *MemRef) {
+ ValueDecl *Referenced = cast<MemberExpr>(MemRef)->getMemberDecl();
+ return Referenced->getType()->isRValueReferenceType();
+}
+
+static bool
+BuildImplicitMemberInitializer(Sema &SemaRef, CXXConstructorDecl *Constructor,
+ ImplicitInitializerKind ImplicitInitKind,
+ FieldDecl *Field, IndirectFieldDecl *Indirect,
+ CXXCtorInitializer *&CXXMemberInit) {
+ if (Field->isInvalidDecl())
+ return true;
+
+ SourceLocation Loc = Constructor->getLocation();
+
+ if (ImplicitInitKind == IIK_Copy || ImplicitInitKind == IIK_Move) {
+ bool Moving = ImplicitInitKind == IIK_Move;
+ ParmVarDecl *Param = Constructor->getParamDecl(0);
+ QualType ParamType = Param->getType().getNonReferenceType();
+
+ // Suppress copying zero-width bitfields.
+ if (Field->isBitField() && Field->getBitWidthValue(SemaRef.Context) == 0)
+ return false;
+
+ Expr *MemberExprBase =
+ DeclRefExpr::Create(SemaRef.Context, NestedNameSpecifierLoc(),
+ SourceLocation(), Param, false,
+ Loc, ParamType, VK_LValue, 0);
+
+ SemaRef.MarkDeclRefReferenced(cast<DeclRefExpr>(MemberExprBase));
+
+ if (Moving) {
+ MemberExprBase = CastForMoving(SemaRef, MemberExprBase);
+ }
+
+ // Build a reference to this field within the parameter.
+ CXXScopeSpec SS;
+ LookupResult MemberLookup(SemaRef, Field->getDeclName(), Loc,
+ Sema::LookupMemberName);
+ MemberLookup.addDecl(Indirect ? cast<ValueDecl>(Indirect)
+ : cast<ValueDecl>(Field), AS_public);
+ MemberLookup.resolveKind();
+ ExprResult CtorArg
+ = SemaRef.BuildMemberReferenceExpr(MemberExprBase,
+ ParamType, Loc,
+ /*IsArrow=*/false,
+ SS,
+ /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ MemberLookup,
+ /*TemplateArgs=*/0);
+ if (CtorArg.isInvalid())
+ return true;
+
+ // C++11 [class.copy]p15:
+ // - if a member m has rvalue reference type T&&, it is direct-initialized
+ // with static_cast<T&&>(x.m);
+ if (RefersToRValueRef(CtorArg.get())) {
+ CtorArg = CastForMoving(SemaRef, CtorArg.take());
+ }
+
+ // When the field we are copying is an array, create index variables for
+ // each dimension of the array. We use these index variables to subscript
+ // the source array, and other clients (e.g., CodeGen) will perform the
+ // necessary iteration with these index variables.
+ SmallVector<VarDecl *, 4> IndexVariables;
+ QualType BaseType = Field->getType();
+ QualType SizeType = SemaRef.Context.getSizeType();
+ bool InitializingArray = false;
+ while (const ConstantArrayType *Array
+ = SemaRef.Context.getAsConstantArrayType(BaseType)) {
+ InitializingArray = true;
+ // Create the iteration variable for this array index.
+ IdentifierInfo *IterationVarName = 0;
+ {
+ SmallString<8> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "__i" << IndexVariables.size();
+ IterationVarName = &SemaRef.Context.Idents.get(OS.str());
+ }
+ VarDecl *IterationVar
+ = VarDecl::Create(SemaRef.Context, SemaRef.CurContext, Loc, Loc,
+ IterationVarName, SizeType,
+ SemaRef.Context.getTrivialTypeSourceInfo(SizeType, Loc),
+ SC_None, SC_None);
+ IndexVariables.push_back(IterationVar);
+
+ // Create a reference to the iteration variable.
+ ExprResult IterationVarRef
+ = SemaRef.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc);
+ assert(!IterationVarRef.isInvalid() &&
+ "Reference to invented variable cannot fail!");
+ IterationVarRef = SemaRef.DefaultLvalueConversion(IterationVarRef.take());
+ assert(!IterationVarRef.isInvalid() &&
+ "Conversion of invented variable cannot fail!");
+
+ // Subscript the array with this iteration variable.
+ CtorArg = SemaRef.CreateBuiltinArraySubscriptExpr(CtorArg.take(), Loc,
+ IterationVarRef.take(),
+ Loc);
+ if (CtorArg.isInvalid())
+ return true;
+
+ BaseType = Array->getElementType();
+ }
+
+ // The array subscript expression is an lvalue, which is wrong for moving.
+ if (Moving && InitializingArray)
+ CtorArg = CastForMoving(SemaRef, CtorArg.take());
+
+ // Construct the entity that we will be initializing. For an array, this
+ // will be first element in the array, which may require several levels
+ // of array-subscript entities.
+ SmallVector<InitializedEntity, 4> Entities;
+ Entities.reserve(1 + IndexVariables.size());
+ if (Indirect)
+ Entities.push_back(InitializedEntity::InitializeMember(Indirect));
+ else
+ Entities.push_back(InitializedEntity::InitializeMember(Field));
+ for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I)
+ Entities.push_back(InitializedEntity::InitializeElement(SemaRef.Context,
+ 0,
+ Entities.back()));
+
+ // Direct-initialize to use the copy constructor.
+ InitializationKind InitKind =
+ InitializationKind::CreateDirect(Loc, SourceLocation(), SourceLocation());
+
+ Expr *CtorArgE = CtorArg.takeAs<Expr>();
+ InitializationSequence InitSeq(SemaRef, Entities.back(), InitKind,
+ &CtorArgE, 1);
+
+ ExprResult MemberInit
+ = InitSeq.Perform(SemaRef, Entities.back(), InitKind,
+ MultiExprArg(&CtorArgE, 1));
+ MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
+ if (MemberInit.isInvalid())
+ return true;
+
+ if (Indirect) {
+ assert(IndexVariables.size() == 0 &&
+ "Indirect field improperly initialized");
+ CXXMemberInit
+ = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect,
+ Loc, Loc,
+ MemberInit.takeAs<Expr>(),
+ Loc);
+ } else
+ CXXMemberInit = CXXCtorInitializer::Create(SemaRef.Context, Field, Loc,
+ Loc, MemberInit.takeAs<Expr>(),
+ Loc,
+ IndexVariables.data(),
+ IndexVariables.size());
+ return false;
+ }
+
+ assert(ImplicitInitKind == IIK_Default && "Unhandled implicit init kind!");
+
+ QualType FieldBaseElementType =
+ SemaRef.Context.getBaseElementType(Field->getType());
+
+ if (FieldBaseElementType->isRecordType()) {
+ InitializedEntity InitEntity
+ = Indirect? InitializedEntity::InitializeMember(Indirect)
+ : InitializedEntity::InitializeMember(Field);
+ InitializationKind InitKind =
+ InitializationKind::CreateDefault(Loc);
+
+ InitializationSequence InitSeq(SemaRef, InitEntity, InitKind, 0, 0);
+ ExprResult MemberInit =
+ InitSeq.Perform(SemaRef, InitEntity, InitKind, MultiExprArg());
+
+ MemberInit = SemaRef.MaybeCreateExprWithCleanups(MemberInit);
+ if (MemberInit.isInvalid())
+ return true;
+
+ if (Indirect)
+ CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context,
+ Indirect, Loc,
+ Loc,
+ MemberInit.get(),
+ Loc);
+ else
+ CXXMemberInit = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context,
+ Field, Loc, Loc,
+ MemberInit.get(),
+ Loc);
+ return false;
+ }
+
+ if (!Field->getParent()->isUnion()) {
+ if (FieldBaseElementType->isReferenceType()) {
+ SemaRef.Diag(Constructor->getLocation(),
+ diag::err_uninitialized_member_in_ctor)
+ << (int)Constructor->isImplicit()
+ << SemaRef.Context.getTagDeclType(Constructor->getParent())
+ << 0 << Field->getDeclName();
+ SemaRef.Diag(Field->getLocation(), diag::note_declared_at);
+ return true;
+ }
+
+ if (FieldBaseElementType.isConstQualified()) {
+ SemaRef.Diag(Constructor->getLocation(),
+ diag::err_uninitialized_member_in_ctor)
+ << (int)Constructor->isImplicit()
+ << SemaRef.Context.getTagDeclType(Constructor->getParent())
+ << 1 << Field->getDeclName();
+ SemaRef.Diag(Field->getLocation(), diag::note_declared_at);
+ return true;
+ }
+ }
+
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ FieldBaseElementType->isObjCRetainableType() &&
+ FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_None &&
+ FieldBaseElementType.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
+ // Instant objects:
+ // Default-initialize Objective-C pointers to NULL.
+ CXXMemberInit
+ = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field,
+ Loc, Loc,
+ new (SemaRef.Context) ImplicitValueInitExpr(Field->getType()),
+ Loc);
+ return false;
+ }
+
+ // Nothing to initialize.
+ CXXMemberInit = 0;
+ return false;
+}
+
+namespace {
+struct BaseAndFieldInfo {
+ Sema &S;
+ CXXConstructorDecl *Ctor;
+ bool AnyErrorsInInits;
+ ImplicitInitializerKind IIK;
+ llvm::DenseMap<const void *, CXXCtorInitializer*> AllBaseFields;
+ SmallVector<CXXCtorInitializer*, 8> AllToInit;
+
+ BaseAndFieldInfo(Sema &S, CXXConstructorDecl *Ctor, bool ErrorsInInits)
+ : S(S), Ctor(Ctor), AnyErrorsInInits(ErrorsInInits) {
+ bool Generated = Ctor->isImplicit() || Ctor->isDefaulted();
+ if (Generated && Ctor->isCopyConstructor())
+ IIK = IIK_Copy;
+ else if (Generated && Ctor->isMoveConstructor())
+ IIK = IIK_Move;
+ else
+ IIK = IIK_Default;
+ }
+
+ bool isImplicitCopyOrMove() const {
+ switch (IIK) {
+ case IIK_Copy:
+ case IIK_Move:
+ return true;
+
+ case IIK_Default:
+ return false;
+ }
+
+ llvm_unreachable("Invalid ImplicitInitializerKind!");
+ }
+};
+}
+
+/// \brief Determine whether the given indirect field declaration is somewhere
+/// within an anonymous union.
+static bool isWithinAnonymousUnion(IndirectFieldDecl *F) {
+ for (IndirectFieldDecl::chain_iterator C = F->chain_begin(),
+ CEnd = F->chain_end();
+ C != CEnd; ++C)
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>((*C)->getDeclContext()))
+ if (Record->isUnion())
+ return true;
+
+ return false;
+}
+
+/// \brief Determine whether the given type is an incomplete or zero-lenfgth
+/// array type.
+static bool isIncompleteOrZeroLengthArrayType(ASTContext &Context, QualType T) {
+ if (T->isIncompleteArrayType())
+ return true;
+
+ while (const ConstantArrayType *ArrayT = Context.getAsConstantArrayType(T)) {
+ if (!ArrayT->getSize())
+ return true;
+
+ T = ArrayT->getElementType();
+ }
+
+ return false;
+}
+
+static bool CollectFieldInitializer(Sema &SemaRef, BaseAndFieldInfo &Info,
+ FieldDecl *Field,
+ IndirectFieldDecl *Indirect = 0) {
+
+ // Overwhelmingly common case: we have a direct initializer for this field.
+ if (CXXCtorInitializer *Init = Info.AllBaseFields.lookup(Field)) {
+ Info.AllToInit.push_back(Init);
+ return false;
+ }
+
+ // C++0x [class.base.init]p8: if the entity is a non-static data member that
+ // has a brace-or-equal-initializer, the entity is initialized as specified
+ // in [dcl.init].
+ if (Field->hasInClassInitializer() && !Info.isImplicitCopyOrMove()) {
+ CXXCtorInitializer *Init;
+ if (Indirect)
+ Init = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Indirect,
+ SourceLocation(),
+ SourceLocation(), 0,
+ SourceLocation());
+ else
+ Init = new (SemaRef.Context) CXXCtorInitializer(SemaRef.Context, Field,
+ SourceLocation(),
+ SourceLocation(), 0,
+ SourceLocation());
+ Info.AllToInit.push_back(Init);
+ return false;
+ }
+
+ // Don't build an implicit initializer for union members if none was
+ // explicitly specified.
+ if (Field->getParent()->isUnion() ||
+ (Indirect && isWithinAnonymousUnion(Indirect)))
+ return false;
+
+ // Don't initialize incomplete or zero-length arrays.
+ if (isIncompleteOrZeroLengthArrayType(SemaRef.Context, Field->getType()))
+ return false;
+
+ // Don't try to build an implicit initializer if there were semantic
+ // errors in any of the initializers (and therefore we might be
+ // missing some that the user actually wrote).
+ if (Info.AnyErrorsInInits || Field->isInvalidDecl())
+ return false;
+
+ CXXCtorInitializer *Init = 0;
+ if (BuildImplicitMemberInitializer(Info.S, Info.Ctor, Info.IIK, Field,
+ Indirect, Init))
+ return true;
+
+ if (Init)
+ Info.AllToInit.push_back(Init);
+
+ return false;
+}
+
+bool
+Sema::SetDelegatingInitializer(CXXConstructorDecl *Constructor,
+ CXXCtorInitializer *Initializer) {
+ assert(Initializer->isDelegatingInitializer());
+ Constructor->setNumCtorInitializers(1);
+ CXXCtorInitializer **initializer =
+ new (Context) CXXCtorInitializer*[1];
+ memcpy(initializer, &Initializer, sizeof (CXXCtorInitializer*));
+ Constructor->setCtorInitializers(initializer);
+
+ if (CXXDestructorDecl *Dtor = LookupDestructor(Constructor->getParent())) {
+ MarkFunctionReferenced(Initializer->getSourceLocation(), Dtor);
+ DiagnoseUseOfDecl(Dtor, Initializer->getSourceLocation());
+ }
+
+ DelegatingCtorDecls.push_back(Constructor);
+
+ return false;
+}
+
+bool Sema::SetCtorInitializers(CXXConstructorDecl *Constructor,
+ CXXCtorInitializer **Initializers,
+ unsigned NumInitializers,
+ bool AnyErrors) {
+ if (Constructor->isDependentContext()) {
+ // Just store the initializers as written, they will be checked during
+ // instantiation.
+ if (NumInitializers > 0) {
+ Constructor->setNumCtorInitializers(NumInitializers);
+ CXXCtorInitializer **baseOrMemberInitializers =
+ new (Context) CXXCtorInitializer*[NumInitializers];
+ memcpy(baseOrMemberInitializers, Initializers,
+ NumInitializers * sizeof(CXXCtorInitializer*));
+ Constructor->setCtorInitializers(baseOrMemberInitializers);
+ }
+
+ return false;
+ }
+
+ BaseAndFieldInfo Info(*this, Constructor, AnyErrors);
+
+ // We need to build the initializer AST according to order of construction
+ // and not what user specified in the Initializers list.
+ CXXRecordDecl *ClassDecl = Constructor->getParent()->getDefinition();
+ if (!ClassDecl)
+ return true;
+
+ bool HadError = false;
+
+ for (unsigned i = 0; i < NumInitializers; i++) {
+ CXXCtorInitializer *Member = Initializers[i];
+
+ if (Member->isBaseInitializer())
+ Info.AllBaseFields[Member->getBaseClass()->getAs<RecordType>()] = Member;
+ else
+ Info.AllBaseFields[Member->getAnyMember()] = Member;
+ }
+
+ // Keep track of the direct virtual bases.
+ llvm::SmallPtrSet<CXXBaseSpecifier *, 16> DirectVBases;
+ for (CXXRecordDecl::base_class_iterator I = ClassDecl->bases_begin(),
+ E = ClassDecl->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ DirectVBases.insert(I);
+ }
+
+ // Push virtual bases before others.
+ for (CXXRecordDecl::base_class_iterator VBase = ClassDecl->vbases_begin(),
+ E = ClassDecl->vbases_end(); VBase != E; ++VBase) {
+
+ if (CXXCtorInitializer *Value
+ = Info.AllBaseFields.lookup(VBase->getType()->getAs<RecordType>())) {
+ Info.AllToInit.push_back(Value);
+ } else if (!AnyErrors) {
+ bool IsInheritedVirtualBase = !DirectVBases.count(VBase);
+ CXXCtorInitializer *CXXBaseInit;
+ if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK,
+ VBase, IsInheritedVirtualBase,
+ CXXBaseInit)) {
+ HadError = true;
+ continue;
+ }
+
+ Info.AllToInit.push_back(CXXBaseInit);
+ }
+ }
+
+ // Non-virtual bases.
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ E = ClassDecl->bases_end(); Base != E; ++Base) {
+ // Virtuals are in the virtual base list and already constructed.
+ if (Base->isVirtual())
+ continue;
+
+ if (CXXCtorInitializer *Value
+ = Info.AllBaseFields.lookup(Base->getType()->getAs<RecordType>())) {
+ Info.AllToInit.push_back(Value);
+ } else if (!AnyErrors) {
+ CXXCtorInitializer *CXXBaseInit;
+ if (BuildImplicitBaseInitializer(*this, Constructor, Info.IIK,
+ Base, /*IsInheritedVirtualBase=*/false,
+ CXXBaseInit)) {
+ HadError = true;
+ continue;
+ }
+
+ Info.AllToInit.push_back(CXXBaseInit);
+ }
+ }
+
+ // Fields.
+ for (DeclContext::decl_iterator Mem = ClassDecl->decls_begin(),
+ MemEnd = ClassDecl->decls_end();
+ Mem != MemEnd; ++Mem) {
+ if (FieldDecl *F = dyn_cast<FieldDecl>(*Mem)) {
+ // C++ [class.bit]p2:
+ // A declaration for a bit-field that omits the identifier declares an
+ // unnamed bit-field. Unnamed bit-fields are not members and cannot be
+ // initialized.
+ if (F->isUnnamedBitfield())
+ continue;
+
+ // If we're not generating the implicit copy/move constructor, then we'll
+ // handle anonymous struct/union fields based on their individual
+ // indirect fields.
+ if (F->isAnonymousStructOrUnion() && Info.IIK == IIK_Default)
+ continue;
+
+ if (CollectFieldInitializer(*this, Info, F))
+ HadError = true;
+ continue;
+ }
+
+ // Beyond this point, we only consider default initialization.
+ if (Info.IIK != IIK_Default)
+ continue;
+
+ if (IndirectFieldDecl *F = dyn_cast<IndirectFieldDecl>(*Mem)) {
+ if (F->getType()->isIncompleteArrayType()) {
+ assert(ClassDecl->hasFlexibleArrayMember() &&
+ "Incomplete array type is not valid");
+ continue;
+ }
+
+ // Initialize each field of an anonymous struct individually.
+ if (CollectFieldInitializer(*this, Info, F->getAnonField(), F))
+ HadError = true;
+
+ continue;
+ }
+ }
+
+ NumInitializers = Info.AllToInit.size();
+ if (NumInitializers > 0) {
+ Constructor->setNumCtorInitializers(NumInitializers);
+ CXXCtorInitializer **baseOrMemberInitializers =
+ new (Context) CXXCtorInitializer*[NumInitializers];
+ memcpy(baseOrMemberInitializers, Info.AllToInit.data(),
+ NumInitializers * sizeof(CXXCtorInitializer*));
+ Constructor->setCtorInitializers(baseOrMemberInitializers);
+
+ // Constructors implicitly reference the base and member
+ // destructors.
+ MarkBaseAndMemberDestructorsReferenced(Constructor->getLocation(),
+ Constructor->getParent());
+ }
+
+ return HadError;
+}
+
+static void *GetKeyForTopLevelField(FieldDecl *Field) {
+ // For anonymous unions, use the class declaration as the key.
+ if (const RecordType *RT = Field->getType()->getAs<RecordType>()) {
+ if (RT->getDecl()->isAnonymousStructOrUnion())
+ return static_cast<void *>(RT->getDecl());
+ }
+ return static_cast<void *>(Field);
+}
+
+static void *GetKeyForBase(ASTContext &Context, QualType BaseType) {
+ return const_cast<Type*>(Context.getCanonicalType(BaseType).getTypePtr());
+}
+
+static void *GetKeyForMember(ASTContext &Context,
+ CXXCtorInitializer *Member) {
+ if (!Member->isAnyMemberInitializer())
+ return GetKeyForBase(Context, QualType(Member->getBaseClass(), 0));
+
+ // For fields injected into the class via declaration of an anonymous union,
+ // use its anonymous union class declaration as the unique key.
+ FieldDecl *Field = Member->getAnyMember();
+
+ // If the field is a member of an anonymous struct or union, our key
+ // is the anonymous record decl that's a direct child of the class.
+ RecordDecl *RD = Field->getParent();
+ if (RD->isAnonymousStructOrUnion()) {
+ while (true) {
+ RecordDecl *Parent = cast<RecordDecl>(RD->getDeclContext());
+ if (Parent->isAnonymousStructOrUnion())
+ RD = Parent;
+ else
+ break;
+ }
+
+ return static_cast<void *>(RD);
+ }
+
+ return static_cast<void *>(Field);
+}
+
+static void
+DiagnoseBaseOrMemInitializerOrder(Sema &SemaRef,
+ const CXXConstructorDecl *Constructor,
+ CXXCtorInitializer **Inits,
+ unsigned NumInits) {
+ if (Constructor->getDeclContext()->isDependentContext())
+ return;
+
+ // Don't check initializers order unless the warning is enabled at the
+ // location of at least one initializer.
+ bool ShouldCheckOrder = false;
+ for (unsigned InitIndex = 0; InitIndex != NumInits; ++InitIndex) {
+ CXXCtorInitializer *Init = Inits[InitIndex];
+ if (SemaRef.Diags.getDiagnosticLevel(diag::warn_initializer_out_of_order,
+ Init->getSourceLocation())
+ != DiagnosticsEngine::Ignored) {
+ ShouldCheckOrder = true;
+ break;
+ }
+ }
+ if (!ShouldCheckOrder)
+ return;
+
+ // Build the list of bases and members in the order that they'll
+ // actually be initialized. The explicit initializers should be in
+ // this same order but may be missing things.
+ SmallVector<const void*, 32> IdealInitKeys;
+
+ const CXXRecordDecl *ClassDecl = Constructor->getParent();
+
+ // 1. Virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator VBase =
+ ClassDecl->vbases_begin(),
+ E = ClassDecl->vbases_end(); VBase != E; ++VBase)
+ IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, VBase->getType()));
+
+ // 2. Non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator Base = ClassDecl->bases_begin(),
+ E = ClassDecl->bases_end(); Base != E; ++Base) {
+ if (Base->isVirtual())
+ continue;
+ IdealInitKeys.push_back(GetKeyForBase(SemaRef.Context, Base->getType()));
+ }
+
+ // 3. Direct fields.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); Field != E; ++Field) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ IdealInitKeys.push_back(GetKeyForTopLevelField(*Field));
+ }
+
+ unsigned NumIdealInits = IdealInitKeys.size();
+ unsigned IdealIndex = 0;
+
+ CXXCtorInitializer *PrevInit = 0;
+ for (unsigned InitIndex = 0; InitIndex != NumInits; ++InitIndex) {
+ CXXCtorInitializer *Init = Inits[InitIndex];
+ void *InitKey = GetKeyForMember(SemaRef.Context, Init);
+
+ // Scan forward to try to find this initializer in the idealized
+ // initializers list.
+ for (; IdealIndex != NumIdealInits; ++IdealIndex)
+ if (InitKey == IdealInitKeys[IdealIndex])
+ break;
+
+ // If we didn't find this initializer, it must be because we
+ // scanned past it on a previous iteration. That can only
+ // happen if we're out of order; emit a warning.
+ if (IdealIndex == NumIdealInits && PrevInit) {
+ Sema::SemaDiagnosticBuilder D =
+ SemaRef.Diag(PrevInit->getSourceLocation(),
+ diag::warn_initializer_out_of_order);
+
+ if (PrevInit->isAnyMemberInitializer())
+ D << 0 << PrevInit->getAnyMember()->getDeclName();
+ else
+ D << 1 << PrevInit->getTypeSourceInfo()->getType();
+
+ if (Init->isAnyMemberInitializer())
+ D << 0 << Init->getAnyMember()->getDeclName();
+ else
+ D << 1 << Init->getTypeSourceInfo()->getType();
+
+ // Move back to the initializer's location in the ideal list.
+ for (IdealIndex = 0; IdealIndex != NumIdealInits; ++IdealIndex)
+ if (InitKey == IdealInitKeys[IdealIndex])
+ break;
+
+ assert(IdealIndex != NumIdealInits &&
+ "initializer not found in initializer list");
+ }
+
+ PrevInit = Init;
+ }
+}
+
+namespace {
+bool CheckRedundantInit(Sema &S,
+ CXXCtorInitializer *Init,
+ CXXCtorInitializer *&PrevInit) {
+ if (!PrevInit) {
+ PrevInit = Init;
+ return false;
+ }
+
+ if (FieldDecl *Field = Init->getMember())
+ S.Diag(Init->getSourceLocation(),
+ diag::err_multiple_mem_initialization)
+ << Field->getDeclName()
+ << Init->getSourceRange();
+ else {
+ const Type *BaseClass = Init->getBaseClass();
+ assert(BaseClass && "neither field nor base");
+ S.Diag(Init->getSourceLocation(),
+ diag::err_multiple_base_initialization)
+ << QualType(BaseClass, 0)
+ << Init->getSourceRange();
+ }
+ S.Diag(PrevInit->getSourceLocation(), diag::note_previous_initializer)
+ << 0 << PrevInit->getSourceRange();
+
+ return true;
+}
+
+typedef std::pair<NamedDecl *, CXXCtorInitializer *> UnionEntry;
+typedef llvm::DenseMap<RecordDecl*, UnionEntry> RedundantUnionMap;
+
+bool CheckRedundantUnionInit(Sema &S,
+ CXXCtorInitializer *Init,
+ RedundantUnionMap &Unions) {
+ FieldDecl *Field = Init->getAnyMember();
+ RecordDecl *Parent = Field->getParent();
+ NamedDecl *Child = Field;
+
+ while (Parent->isAnonymousStructOrUnion() || Parent->isUnion()) {
+ if (Parent->isUnion()) {
+ UnionEntry &En = Unions[Parent];
+ if (En.first && En.first != Child) {
+ S.Diag(Init->getSourceLocation(),
+ diag::err_multiple_mem_union_initialization)
+ << Field->getDeclName()
+ << Init->getSourceRange();
+ S.Diag(En.second->getSourceLocation(), diag::note_previous_initializer)
+ << 0 << En.second->getSourceRange();
+ return true;
+ }
+ if (!En.first) {
+ En.first = Child;
+ En.second = Init;
+ }
+ if (!Parent->isAnonymousStructOrUnion())
+ return false;
+ }
+
+ Child = Parent;
+ Parent = cast<RecordDecl>(Parent->getDeclContext());
+ }
+
+ return false;
+}
+}
+
+/// ActOnMemInitializers - Handle the member initializers for a constructor.
+void Sema::ActOnMemInitializers(Decl *ConstructorDecl,
+ SourceLocation ColonLoc,
+ CXXCtorInitializer **meminits,
+ unsigned NumMemInits,
+ bool AnyErrors) {
+ if (!ConstructorDecl)
+ return;
+
+ AdjustDeclIfTemplate(ConstructorDecl);
+
+ CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ConstructorDecl);
+
+ if (!Constructor) {
+ Diag(ColonLoc, diag::err_only_constructors_take_base_inits);
+ return;
+ }
+
+ CXXCtorInitializer **MemInits =
+ reinterpret_cast<CXXCtorInitializer **>(meminits);
+
+ // Mapping for the duplicate initializers check.
+ // For member initializers, this is keyed with a FieldDecl*.
+ // For base initializers, this is keyed with a Type*.
+ llvm::DenseMap<void*, CXXCtorInitializer *> Members;
+
+ // Mapping for the inconsistent anonymous-union initializers check.
+ RedundantUnionMap MemberUnions;
+
+ bool HadError = false;
+ for (unsigned i = 0; i < NumMemInits; i++) {
+ CXXCtorInitializer *Init = MemInits[i];
+
+ // Set the source order index.
+ Init->setSourceOrder(i);
+
+ if (Init->isAnyMemberInitializer()) {
+ FieldDecl *Field = Init->getAnyMember();
+ if (CheckRedundantInit(*this, Init, Members[Field]) ||
+ CheckRedundantUnionInit(*this, Init, MemberUnions))
+ HadError = true;
+ } else if (Init->isBaseInitializer()) {
+ void *Key = GetKeyForBase(Context, QualType(Init->getBaseClass(), 0));
+ if (CheckRedundantInit(*this, Init, Members[Key]))
+ HadError = true;
+ } else {
+ assert(Init->isDelegatingInitializer());
+ // This must be the only initializer
+ if (i != 0 || NumMemInits > 1) {
+ Diag(MemInits[0]->getSourceLocation(),
+ diag::err_delegating_initializer_alone)
+ << MemInits[0]->getSourceRange();
+ HadError = true;
+ // We will treat this as being the only initializer.
+ }
+ SetDelegatingInitializer(Constructor, MemInits[i]);
+ // Return immediately as the initializer is set.
+ return;
+ }
+ }
+
+ if (HadError)
+ return;
+
+ DiagnoseBaseOrMemInitializerOrder(*this, Constructor, MemInits, NumMemInits);
+
+ SetCtorInitializers(Constructor, MemInits, NumMemInits, AnyErrors);
+}
+
+void
+Sema::MarkBaseAndMemberDestructorsReferenced(SourceLocation Location,
+ CXXRecordDecl *ClassDecl) {
+ // Ignore dependent contexts. Also ignore unions, since their members never
+ // have destructors implicitly called.
+ if (ClassDecl->isDependentContext() || ClassDecl->isUnion())
+ return;
+
+ // FIXME: all the access-control diagnostics are positioned on the
+ // field/base declaration. That's probably good; that said, the
+ // user might reasonably want to know why the destructor is being
+ // emitted, and we currently don't say.
+
+ // Non-static data members.
+ for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
+ E = ClassDecl->field_end(); I != E; ++I) {
+ FieldDecl *Field = *I;
+ if (Field->isInvalidDecl())
+ continue;
+
+ // Don't destroy incomplete or zero-length arrays.
+ if (isIncompleteOrZeroLengthArrayType(Context, Field->getType()))
+ continue;
+
+ QualType FieldType = Context.getBaseElementType(Field->getType());
+
+ const RecordType* RT = FieldType->getAs<RecordType>();
+ if (!RT)
+ continue;
+
+ CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (FieldClassDecl->isInvalidDecl())
+ continue;
+ if (FieldClassDecl->hasIrrelevantDestructor())
+ continue;
+ // The destructor for an implicit anonymous union member is never invoked.
+ if (FieldClassDecl->isUnion() && FieldClassDecl->isAnonymousStructOrUnion())
+ continue;
+
+ CXXDestructorDecl *Dtor = LookupDestructor(FieldClassDecl);
+ assert(Dtor && "No dtor found for FieldClassDecl!");
+ CheckDestructorAccess(Field->getLocation(), Dtor,
+ PDiag(diag::err_access_dtor_field)
+ << Field->getDeclName()
+ << FieldType);
+
+ MarkFunctionReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ DiagnoseUseOfDecl(Dtor, Location);
+ }
+
+ llvm::SmallPtrSet<const RecordType *, 8> DirectVirtualBases;
+
+ // Bases.
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ E = ClassDecl->bases_end(); Base != E; ++Base) {
+ // Bases are always records in a well-formed non-dependent class.
+ const RecordType *RT = Base->getType()->getAs<RecordType>();
+
+ // Remember direct virtual bases.
+ if (Base->isVirtual())
+ DirectVirtualBases.insert(RT);
+
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ // If our base class is invalid, we probably can't get its dtor anyway.
+ if (BaseClassDecl->isInvalidDecl())
+ continue;
+ if (BaseClassDecl->hasIrrelevantDestructor())
+ continue;
+
+ CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
+ assert(Dtor && "No dtor found for BaseClassDecl!");
+
+ // FIXME: caret should be on the start of the class name
+ CheckDestructorAccess(Base->getLocStart(), Dtor,
+ PDiag(diag::err_access_dtor_base)
+ << Base->getType()
+ << Base->getSourceRange(),
+ Context.getTypeDeclType(ClassDecl));
+
+ MarkFunctionReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ DiagnoseUseOfDecl(Dtor, Location);
+ }
+
+ // Virtual bases.
+ for (CXXRecordDecl::base_class_iterator VBase = ClassDecl->vbases_begin(),
+ E = ClassDecl->vbases_end(); VBase != E; ++VBase) {
+
+ // Bases are always records in a well-formed non-dependent class.
+ const RecordType *RT = VBase->getType()->castAs<RecordType>();
+
+ // Ignore direct virtual bases.
+ if (DirectVirtualBases.count(RT))
+ continue;
+
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ // If our base class is invalid, we probably can't get its dtor anyway.
+ if (BaseClassDecl->isInvalidDecl())
+ continue;
+ if (BaseClassDecl->hasIrrelevantDestructor())
+ continue;
+
+ CXXDestructorDecl *Dtor = LookupDestructor(BaseClassDecl);
+ assert(Dtor && "No dtor found for BaseClassDecl!");
+ CheckDestructorAccess(ClassDecl->getLocation(), Dtor,
+ PDiag(diag::err_access_dtor_vbase)
+ << VBase->getType(),
+ Context.getTypeDeclType(ClassDecl));
+
+ MarkFunctionReferenced(Location, const_cast<CXXDestructorDecl*>(Dtor));
+ DiagnoseUseOfDecl(Dtor, Location);
+ }
+}
+
+void Sema::ActOnDefaultCtorInitializers(Decl *CDtorDecl) {
+ if (!CDtorDecl)
+ return;
+
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(CDtorDecl))
+ SetCtorInitializers(Constructor, 0, 0, /*AnyErrors=*/false);
+}
+
+bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
+ unsigned DiagID, AbstractDiagSelID SelID) {
+ if (SelID == -1)
+ return RequireNonAbstractType(Loc, T, PDiag(DiagID));
+ else
+ return RequireNonAbstractType(Loc, T, PDiag(DiagID) << SelID);
+}
+
+bool Sema::RequireNonAbstractType(SourceLocation Loc, QualType T,
+ const PartialDiagnostic &PD) {
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ if (const ArrayType *AT = Context.getAsArrayType(T))
+ return RequireNonAbstractType(Loc, AT->getElementType(), PD);
+
+ if (const PointerType *PT = T->getAs<PointerType>()) {
+ // Find the innermost pointer type.
+ while (const PointerType *T = PT->getPointeeType()->getAs<PointerType>())
+ PT = T;
+
+ if (const ArrayType *AT = Context.getAsArrayType(PT->getPointeeType()))
+ return RequireNonAbstractType(Loc, AT->getElementType(), PD);
+ }
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // We can't answer whether something is abstract until it has a
+ // definition. If it's currently being defined, we'll walk back
+ // over all the declarations when we have a full definition.
+ const CXXRecordDecl *Def = RD->getDefinition();
+ if (!Def || Def->isBeingDefined())
+ return false;
+
+ if (!RD->isAbstract())
+ return false;
+
+ Diag(Loc, PD) << RD->getDeclName();
+ DiagnoseAbstractType(RD);
+
+ return true;
+}
+
+void Sema::DiagnoseAbstractType(const CXXRecordDecl *RD) {
+ // Check if we've already emitted the list of pure virtual functions
+ // for this class.
+ if (PureVirtualClassDiagSet && PureVirtualClassDiagSet->count(RD))
+ return;
+
+ CXXFinalOverriderMap FinalOverriders;
+ RD->getFinalOverriders(FinalOverriders);
+
+ // Keep a set of seen pure methods so we won't diagnose the same method
+ // more than once.
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8> SeenPureMethods;
+
+ for (CXXFinalOverriderMap::iterator M = FinalOverriders.begin(),
+ MEnd = FinalOverriders.end();
+ M != MEnd;
+ ++M) {
+ for (OverridingMethods::iterator SO = M->second.begin(),
+ SOEnd = M->second.end();
+ SO != SOEnd; ++SO) {
+ // C++ [class.abstract]p4:
+ // A class is abstract if it contains or inherits at least one
+ // pure virtual function for which the final overrider is pure
+ // virtual.
+
+ //
+ if (SO->second.size() != 1)
+ continue;
+
+ if (!SO->second.front().Method->isPure())
+ continue;
+
+ if (!SeenPureMethods.insert(SO->second.front().Method))
+ continue;
+
+ Diag(SO->second.front().Method->getLocation(),
+ diag::note_pure_virtual_function)
+ << SO->second.front().Method->getDeclName() << RD->getDeclName();
+ }
+ }
+
+ if (!PureVirtualClassDiagSet)
+ PureVirtualClassDiagSet.reset(new RecordDeclSetTy);
+ PureVirtualClassDiagSet->insert(RD);
+}
+
+namespace {
+struct AbstractUsageInfo {
+ Sema &S;
+ CXXRecordDecl *Record;
+ CanQualType AbstractType;
+ bool Invalid;
+
+ AbstractUsageInfo(Sema &S, CXXRecordDecl *Record)
+ : S(S), Record(Record),
+ AbstractType(S.Context.getCanonicalType(
+ S.Context.getTypeDeclType(Record))),
+ Invalid(false) {}
+
+ void DiagnoseAbstractType() {
+ if (Invalid) return;
+ S.DiagnoseAbstractType(Record);
+ Invalid = true;
+ }
+
+ void CheckType(const NamedDecl *D, TypeLoc TL, Sema::AbstractDiagSelID Sel);
+};
+
+struct CheckAbstractUsage {
+ AbstractUsageInfo &Info;
+ const NamedDecl *Ctx;
+
+ CheckAbstractUsage(AbstractUsageInfo &Info, const NamedDecl *Ctx)
+ : Info(Info), Ctx(Ctx) {}
+
+ void Visit(TypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ switch (TL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: Check(cast<CLASS##TypeLoc>(TL), Sel); break;
+#include "clang/AST/TypeLocNodes.def"
+ }
+ }
+
+ void Check(FunctionProtoTypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ Visit(TL.getResultLoc(), Sema::AbstractReturnType);
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ if (!TL.getArg(I))
+ continue;
+
+ TypeSourceInfo *TSI = TL.getArg(I)->getTypeSourceInfo();
+ if (TSI) Visit(TSI->getTypeLoc(), Sema::AbstractParamType);
+ }
+ }
+
+ void Check(ArrayTypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ Visit(TL.getElementLoc(), Sema::AbstractArrayType);
+ }
+
+ void Check(TemplateSpecializationTypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ // Visit the type parameters from a permissive context.
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I) {
+ TemplateArgumentLoc TAL = TL.getArgLoc(I);
+ if (TAL.getArgument().getKind() == TemplateArgument::Type)
+ if (TypeSourceInfo *TSI = TAL.getTypeSourceInfo())
+ Visit(TSI->getTypeLoc(), Sema::AbstractNone);
+ // TODO: other template argument types?
+ }
+ }
+
+ // Visit pointee types from a permissive context.
+#define CheckPolymorphic(Type) \
+ void Check(Type TL, Sema::AbstractDiagSelID Sel) { \
+ Visit(TL.getNextTypeLoc(), Sema::AbstractNone); \
+ }
+ CheckPolymorphic(PointerTypeLoc)
+ CheckPolymorphic(ReferenceTypeLoc)
+ CheckPolymorphic(MemberPointerTypeLoc)
+ CheckPolymorphic(BlockPointerTypeLoc)
+ CheckPolymorphic(AtomicTypeLoc)
+
+ /// Handle all the types we haven't given a more specific
+ /// implementation for above.
+ void Check(TypeLoc TL, Sema::AbstractDiagSelID Sel) {
+ // Every other kind of type that we haven't called out already
+ // that has an inner type is either (1) sugar or (2) contains that
+ // inner type in some way as a subobject.
+ if (TypeLoc Next = TL.getNextTypeLoc())
+ return Visit(Next, Sel);
+
+ // If there's no inner type and we're in a permissive context,
+ // don't diagnose.
+ if (Sel == Sema::AbstractNone) return;
+
+ // Check whether the type matches the abstract type.
+ QualType T = TL.getType();
+ if (T->isArrayType()) {
+ Sel = Sema::AbstractArrayType;
+ T = Info.S.Context.getBaseElementType(T);
+ }
+ CanQualType CT = T->getCanonicalTypeUnqualified().getUnqualifiedType();
+ if (CT != Info.AbstractType) return;
+
+ // It matched; do some magic.
+ if (Sel == Sema::AbstractArrayType) {
+ Info.S.Diag(Ctx->getLocation(), diag::err_array_of_abstract_type)
+ << T << TL.getSourceRange();
+ } else {
+ Info.S.Diag(Ctx->getLocation(), diag::err_abstract_type_in_decl)
+ << Sel << T << TL.getSourceRange();
+ }
+ Info.DiagnoseAbstractType();
+ }
+};
+
+void AbstractUsageInfo::CheckType(const NamedDecl *D, TypeLoc TL,
+ Sema::AbstractDiagSelID Sel) {
+ CheckAbstractUsage(*this, D).Visit(TL, Sel);
+}
+
+}
+
+/// Check for invalid uses of an abstract type in a method declaration.
+static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
+ CXXMethodDecl *MD) {
+ // No need to do the check on definitions, which require that
+ // the return/param types be complete.
+ if (MD->doesThisDeclarationHaveABody())
+ return;
+
+ // For safety's sake, just ignore it if we don't have type source
+ // information. This should never happen for non-implicit methods,
+ // but...
+ if (TypeSourceInfo *TSI = MD->getTypeSourceInfo())
+ Info.CheckType(MD, TSI->getTypeLoc(), Sema::AbstractNone);
+}
+
+/// Check for invalid uses of an abstract type within a class definition.
+static void CheckAbstractClassUsage(AbstractUsageInfo &Info,
+ CXXRecordDecl *RD) {
+ for (CXXRecordDecl::decl_iterator
+ I = RD->decls_begin(), E = RD->decls_end(); I != E; ++I) {
+ Decl *D = *I;
+ if (D->isImplicit()) continue;
+
+ // Methods and method templates.
+ if (isa<CXXMethodDecl>(D)) {
+ CheckAbstractClassUsage(Info, cast<CXXMethodDecl>(D));
+ } else if (isa<FunctionTemplateDecl>(D)) {
+ FunctionDecl *FD = cast<FunctionTemplateDecl>(D)->getTemplatedDecl();
+ CheckAbstractClassUsage(Info, cast<CXXMethodDecl>(FD));
+
+ // Fields and static variables.
+ } else if (isa<FieldDecl>(D)) {
+ FieldDecl *FD = cast<FieldDecl>(D);
+ if (TypeSourceInfo *TSI = FD->getTypeSourceInfo())
+ Info.CheckType(FD, TSI->getTypeLoc(), Sema::AbstractFieldType);
+ } else if (isa<VarDecl>(D)) {
+ VarDecl *VD = cast<VarDecl>(D);
+ if (TypeSourceInfo *TSI = VD->getTypeSourceInfo())
+ Info.CheckType(VD, TSI->getTypeLoc(), Sema::AbstractVariableType);
+
+ // Nested classes and class templates.
+ } else if (isa<CXXRecordDecl>(D)) {
+ CheckAbstractClassUsage(Info, cast<CXXRecordDecl>(D));
+ } else if (isa<ClassTemplateDecl>(D)) {
+ CheckAbstractClassUsage(Info,
+ cast<ClassTemplateDecl>(D)->getTemplatedDecl());
+ }
+ }
+}
+
+/// \brief Perform semantic checks on a class definition that has been
+/// completing, introducing implicitly-declared members, checking for
+/// abstract types, etc.
+void Sema::CheckCompletedCXXClass(CXXRecordDecl *Record) {
+ if (!Record)
+ return;
+
+ if (Record->isAbstract() && !Record->isInvalidDecl()) {
+ AbstractUsageInfo Info(*this, Record);
+ CheckAbstractClassUsage(Info, Record);
+ }
+
+ // If this is not an aggregate type and has no user-declared constructor,
+ // complain about any non-static data members of reference or const scalar
+ // type, since they will never get initializers.
+ if (!Record->isInvalidDecl() && !Record->isDependentType() &&
+ !Record->isAggregate() && !Record->hasUserDeclaredConstructor() &&
+ !Record->isLambda()) {
+ bool Complained = false;
+ for (RecordDecl::field_iterator F = Record->field_begin(),
+ FEnd = Record->field_end();
+ F != FEnd; ++F) {
+ if (F->hasInClassInitializer() || F->isUnnamedBitfield())
+ continue;
+
+ if (F->getType()->isReferenceType() ||
+ (F->getType().isConstQualified() && F->getType()->isScalarType())) {
+ if (!Complained) {
+ Diag(Record->getLocation(), diag::warn_no_constructor_for_refconst)
+ << Record->getTagKind() << Record;
+ Complained = true;
+ }
+
+ Diag(F->getLocation(), diag::note_refconst_member_not_initialized)
+ << F->getType()->isReferenceType()
+ << F->getDeclName();
+ }
+ }
+ }
+
+ if (Record->isDynamicClass() && !Record->isDependentType())
+ DynamicClasses.push_back(Record);
+
+ if (Record->getIdentifier()) {
+ // C++ [class.mem]p13:
+ // If T is the name of a class, then each of the following shall have a
+ // name different from T:
+ // - every member of every anonymous union that is a member of class T.
+ //
+ // C++ [class.mem]p14:
+ // In addition, if class T has a user-declared constructor (12.1), every
+ // non-static data member of class T shall have a name different from T.
+ for (DeclContext::lookup_result R = Record->lookup(Record->getDeclName());
+ R.first != R.second; ++R.first) {
+ NamedDecl *D = *R.first;
+ if ((isa<FieldDecl>(D) && Record->hasUserDeclaredConstructor()) ||
+ isa<IndirectFieldDecl>(D)) {
+ Diag(D->getLocation(), diag::err_member_name_of_class)
+ << D->getDeclName();
+ break;
+ }
+ }
+ }
+
+ // Warn if the class has virtual methods but non-virtual public destructor.
+ if (Record->isPolymorphic() && !Record->isDependentType()) {
+ CXXDestructorDecl *dtor = Record->getDestructor();
+ if (!dtor || (!dtor->isVirtual() && dtor->getAccess() == AS_public))
+ Diag(dtor ? dtor->getLocation() : Record->getLocation(),
+ diag::warn_non_virtual_dtor) << Context.getRecordType(Record);
+ }
+
+ // See if a method overloads virtual methods in a base
+ /// class without overriding any.
+ if (!Record->isDependentType()) {
+ for (CXXRecordDecl::method_iterator M = Record->method_begin(),
+ MEnd = Record->method_end();
+ M != MEnd; ++M) {
+ if (!(*M)->isStatic())
+ DiagnoseHiddenVirtualMethods(Record, *M);
+ }
+ }
+
+ // C++0x [dcl.constexpr]p8: A constexpr specifier for a non-static member
+ // function that is not a constructor declares that member function to be
+ // const. [...] The class of which that function is a member shall be
+ // a literal type.
+ //
+ // If the class has virtual bases, any constexpr members will already have
+ // been diagnosed by the checks performed on the member declaration, so
+ // suppress this (less useful) diagnostic.
+ if (LangOpts.CPlusPlus0x && !Record->isDependentType() &&
+ !Record->isLiteral() && !Record->getNumVBases()) {
+ for (CXXRecordDecl::method_iterator M = Record->method_begin(),
+ MEnd = Record->method_end();
+ M != MEnd; ++M) {
+ if (M->isConstexpr() && M->isInstance() && !isa<CXXConstructorDecl>(*M)) {
+ switch (Record->getTemplateSpecializationKind()) {
+ case TSK_ImplicitInstantiation:
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ // If a template instantiates to a non-literal type, but its members
+ // instantiate to constexpr functions, the template is technically
+ // ill-formed, but we allow it for sanity.
+ continue;
+
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ RequireLiteralType((*M)->getLocation(), Context.getRecordType(Record),
+ PDiag(diag::err_constexpr_method_non_literal));
+ break;
+ }
+
+ // Only produce one error per class.
+ break;
+ }
+ }
+ }
+
+ // Declare inherited constructors. We do this eagerly here because:
+ // - The standard requires an eager diagnostic for conflicting inherited
+ // constructors from different classes.
+ // - The lazy declaration of the other implicit constructors is so as to not
+ // waste space and performance on classes that are not meant to be
+ // instantiated (e.g. meta-functions). This doesn't apply to classes that
+ // have inherited constructors.
+ DeclareInheritedConstructors(Record);
+
+ if (!Record->isDependentType())
+ CheckExplicitlyDefaultedMethods(Record);
+}
+
+void Sema::CheckExplicitlyDefaultedMethods(CXXRecordDecl *Record) {
+ for (CXXRecordDecl::method_iterator MI = Record->method_begin(),
+ ME = Record->method_end();
+ MI != ME; ++MI) {
+ if (!MI->isInvalidDecl() && MI->isExplicitlyDefaulted()) {
+ switch (getSpecialMember(*MI)) {
+ case CXXDefaultConstructor:
+ CheckExplicitlyDefaultedDefaultConstructor(
+ cast<CXXConstructorDecl>(*MI));
+ break;
+
+ case CXXDestructor:
+ CheckExplicitlyDefaultedDestructor(cast<CXXDestructorDecl>(*MI));
+ break;
+
+ case CXXCopyConstructor:
+ CheckExplicitlyDefaultedCopyConstructor(cast<CXXConstructorDecl>(*MI));
+ break;
+
+ case CXXCopyAssignment:
+ CheckExplicitlyDefaultedCopyAssignment(*MI);
+ break;
+
+ case CXXMoveConstructor:
+ CheckExplicitlyDefaultedMoveConstructor(cast<CXXConstructorDecl>(*MI));
+ break;
+
+ case CXXMoveAssignment:
+ CheckExplicitlyDefaultedMoveAssignment(*MI);
+ break;
+
+ case CXXInvalid:
+ llvm_unreachable("non-special member explicitly defaulted!");
+ }
+ }
+ }
+
+}
+
+void Sema::CheckExplicitlyDefaultedDefaultConstructor(CXXConstructorDecl *CD) {
+ assert(CD->isExplicitlyDefaulted() && CD->isDefaultConstructor());
+
+ // Whether this was the first-declared instance of the constructor.
+ // This affects whether we implicitly add an exception spec (and, eventually,
+ // constexpr). It is also ill-formed to explicitly default a constructor such
+ // that it would be deleted. (C++0x [decl.fct.def.default])
+ bool First = CD == CD->getCanonicalDecl();
+
+ bool HadError = false;
+ if (CD->getNumParams() != 0) {
+ Diag(CD->getLocation(), diag::err_defaulted_default_ctor_params)
+ << CD->getSourceRange();
+ HadError = true;
+ }
+
+ ImplicitExceptionSpecification Spec
+ = ComputeDefaultedDefaultCtorExceptionSpec(CD->getParent());
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ if (EPI.ExceptionSpecType == EST_Delayed) {
+ // Exception specification depends on some deferred part of the class. We'll
+ // try again when the class's definition has been fully processed.
+ return;
+ }
+ const FunctionProtoType *CtorType = CD->getType()->getAs<FunctionProtoType>(),
+ *ExceptionType = Context.getFunctionType(
+ Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+
+ // C++11 [dcl.fct.def.default]p2:
+ // An explicitly-defaulted function may be declared constexpr only if it
+ // would have been implicitly declared as constexpr,
+ // Do not apply this rule to templates, since core issue 1358 makes such
+ // functions always instantiate to constexpr functions.
+ if (CD->isConstexpr() &&
+ CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
+ if (!CD->getParent()->defaultedDefaultConstructorIsConstexpr()) {
+ Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
+ << CXXDefaultConstructor;
+ HadError = true;
+ }
+ }
+ // and may have an explicit exception-specification only if it is compatible
+ // with the exception-specification on the implicit declaration.
+ if (CtorType->hasExceptionSpec()) {
+ if (CheckEquivalentExceptionSpec(
+ PDiag(diag::err_incorrect_defaulted_exception_spec)
+ << CXXDefaultConstructor,
+ PDiag(),
+ ExceptionType, SourceLocation(),
+ CtorType, CD->getLocation())) {
+ HadError = true;
+ }
+ }
+
+ // If a function is explicitly defaulted on its first declaration,
+ if (First) {
+ // -- it is implicitly considered to be constexpr if the implicit
+ // definition would be,
+ CD->setConstexpr(CD->getParent()->defaultedDefaultConstructorIsConstexpr());
+
+ // -- it is implicitly considered to have the same
+ // exception-specification as if it had been implicitly declared
+ //
+ // FIXME: a compatible, but different, explicit exception specification
+ // will be silently overridden. We should issue a warning if this happens.
+ EPI.ExtInfo = CtorType->getExtInfo();
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ CD->setTrivial(CD->getParent()->hasTrivialDefaultConstructor());
+ }
+
+ if (HadError) {
+ CD->setInvalidDecl();
+ return;
+ }
+
+ if (ShouldDeleteSpecialMember(CD, CXXDefaultConstructor)) {
+ if (First) {
+ CD->setDeletedAsWritten();
+ } else {
+ Diag(CD->getLocation(), diag::err_out_of_line_default_deletes)
+ << CXXDefaultConstructor;
+ CD->setInvalidDecl();
+ }
+ }
+}
+
+void Sema::CheckExplicitlyDefaultedCopyConstructor(CXXConstructorDecl *CD) {
+ assert(CD->isExplicitlyDefaulted() && CD->isCopyConstructor());
+
+ // Whether this was the first-declared instance of the constructor.
+ bool First = CD == CD->getCanonicalDecl();
+
+ bool HadError = false;
+ if (CD->getNumParams() != 1) {
+ Diag(CD->getLocation(), diag::err_defaulted_copy_ctor_params)
+ << CD->getSourceRange();
+ HadError = true;
+ }
+
+ ImplicitExceptionSpecification Spec(Context);
+ bool Const;
+ llvm::tie(Spec, Const) =
+ ComputeDefaultedCopyCtorExceptionSpecAndConst(CD->getParent());
+
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ const FunctionProtoType *CtorType = CD->getType()->getAs<FunctionProtoType>(),
+ *ExceptionType = Context.getFunctionType(
+ Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+
+ // Check for parameter type matching.
+ // This is a copy ctor so we know it's a cv-qualified reference to T.
+ QualType ArgType = CtorType->getArgType(0);
+ if (ArgType->getPointeeType().isVolatileQualified()) {
+ Diag(CD->getLocation(), diag::err_defaulted_copy_ctor_volatile_param);
+ HadError = true;
+ }
+ if (ArgType->getPointeeType().isConstQualified() && !Const) {
+ Diag(CD->getLocation(), diag::err_defaulted_copy_ctor_const_param);
+ HadError = true;
+ }
+
+ // C++11 [dcl.fct.def.default]p2:
+ // An explicitly-defaulted function may be declared constexpr only if it
+ // would have been implicitly declared as constexpr,
+ // Do not apply this rule to templates, since core issue 1358 makes such
+ // functions always instantiate to constexpr functions.
+ if (CD->isConstexpr() &&
+ CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
+ if (!CD->getParent()->defaultedCopyConstructorIsConstexpr()) {
+ Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
+ << CXXCopyConstructor;
+ HadError = true;
+ }
+ }
+ // and may have an explicit exception-specification only if it is compatible
+ // with the exception-specification on the implicit declaration.
+ if (CtorType->hasExceptionSpec()) {
+ if (CheckEquivalentExceptionSpec(
+ PDiag(diag::err_incorrect_defaulted_exception_spec)
+ << CXXCopyConstructor,
+ PDiag(),
+ ExceptionType, SourceLocation(),
+ CtorType, CD->getLocation())) {
+ HadError = true;
+ }
+ }
+
+ // If a function is explicitly defaulted on its first declaration,
+ if (First) {
+ // -- it is implicitly considered to be constexpr if the implicit
+ // definition would be,
+ CD->setConstexpr(CD->getParent()->defaultedCopyConstructorIsConstexpr());
+
+ // -- it is implicitly considered to have the same
+ // exception-specification as if it had been implicitly declared, and
+ //
+ // FIXME: a compatible, but different, explicit exception specification
+ // will be silently overridden. We should issue a warning if this happens.
+ EPI.ExtInfo = CtorType->getExtInfo();
+
+ // -- [...] it shall have the same parameter type as if it had been
+ // implicitly declared.
+ CD->setType(Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ CD->setTrivial(CD->getParent()->hasTrivialCopyConstructor());
+ }
+
+ if (HadError) {
+ CD->setInvalidDecl();
+ return;
+ }
+
+ if (ShouldDeleteSpecialMember(CD, CXXCopyConstructor)) {
+ if (First) {
+ CD->setDeletedAsWritten();
+ } else {
+ Diag(CD->getLocation(), diag::err_out_of_line_default_deletes)
+ << CXXCopyConstructor;
+ CD->setInvalidDecl();
+ }
+ }
+}
+
+void Sema::CheckExplicitlyDefaultedCopyAssignment(CXXMethodDecl *MD) {
+ assert(MD->isExplicitlyDefaulted());
+
+ // Whether this was the first-declared instance of the operator
+ bool First = MD == MD->getCanonicalDecl();
+
+ bool HadError = false;
+ if (MD->getNumParams() != 1) {
+ Diag(MD->getLocation(), diag::err_defaulted_copy_assign_params)
+ << MD->getSourceRange();
+ HadError = true;
+ }
+
+ QualType ReturnType =
+ MD->getType()->getAs<FunctionType>()->getResultType();
+ if (!ReturnType->isLValueReferenceType() ||
+ !Context.hasSameType(
+ Context.getCanonicalType(ReturnType->getPointeeType()),
+ Context.getCanonicalType(Context.getTypeDeclType(MD->getParent())))) {
+ Diag(MD->getLocation(), diag::err_defaulted_copy_assign_return_type);
+ HadError = true;
+ }
+
+ ImplicitExceptionSpecification Spec(Context);
+ bool Const;
+ llvm::tie(Spec, Const) =
+ ComputeDefaultedCopyCtorExceptionSpecAndConst(MD->getParent());
+
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ const FunctionProtoType *OperType = MD->getType()->getAs<FunctionProtoType>(),
+ *ExceptionType = Context.getFunctionType(
+ Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+
+ QualType ArgType = OperType->getArgType(0);
+ if (!ArgType->isLValueReferenceType()) {
+ Diag(MD->getLocation(), diag::err_defaulted_copy_assign_not_ref);
+ HadError = true;
+ } else {
+ if (ArgType->getPointeeType().isVolatileQualified()) {
+ Diag(MD->getLocation(), diag::err_defaulted_copy_assign_volatile_param);
+ HadError = true;
+ }
+ if (ArgType->getPointeeType().isConstQualified() && !Const) {
+ Diag(MD->getLocation(), diag::err_defaulted_copy_assign_const_param);
+ HadError = true;
+ }
+ }
+
+ if (OperType->getTypeQuals()) {
+ Diag(MD->getLocation(), diag::err_defaulted_copy_assign_quals);
+ HadError = true;
+ }
+
+ if (OperType->hasExceptionSpec()) {
+ if (CheckEquivalentExceptionSpec(
+ PDiag(diag::err_incorrect_defaulted_exception_spec)
+ << CXXCopyAssignment,
+ PDiag(),
+ ExceptionType, SourceLocation(),
+ OperType, MD->getLocation())) {
+ HadError = true;
+ }
+ }
+ if (First) {
+ // We set the declaration to have the computed exception spec here.
+ // We duplicate the one parameter type.
+ EPI.RefQualifier = OperType->getRefQualifier();
+ EPI.ExtInfo = OperType->getExtInfo();
+ MD->setType(Context.getFunctionType(ReturnType, &ArgType, 1, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ MD->setTrivial(MD->getParent()->hasTrivialCopyAssignment());
+ }
+
+ if (HadError) {
+ MD->setInvalidDecl();
+ return;
+ }
+
+ if (ShouldDeleteSpecialMember(MD, CXXCopyAssignment)) {
+ if (First) {
+ MD->setDeletedAsWritten();
+ } else {
+ Diag(MD->getLocation(), diag::err_out_of_line_default_deletes)
+ << CXXCopyAssignment;
+ MD->setInvalidDecl();
+ }
+ }
+}
+
+void Sema::CheckExplicitlyDefaultedMoveConstructor(CXXConstructorDecl *CD) {
+ assert(CD->isExplicitlyDefaulted() && CD->isMoveConstructor());
+
+ // Whether this was the first-declared instance of the constructor.
+ bool First = CD == CD->getCanonicalDecl();
+
+ bool HadError = false;
+ if (CD->getNumParams() != 1) {
+ Diag(CD->getLocation(), diag::err_defaulted_move_ctor_params)
+ << CD->getSourceRange();
+ HadError = true;
+ }
+
+ ImplicitExceptionSpecification Spec(
+ ComputeDefaultedMoveCtorExceptionSpec(CD->getParent()));
+
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ const FunctionProtoType *CtorType = CD->getType()->getAs<FunctionProtoType>(),
+ *ExceptionType = Context.getFunctionType(
+ Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+
+ // Check for parameter type matching.
+ // This is a move ctor so we know it's a cv-qualified rvalue reference to T.
+ QualType ArgType = CtorType->getArgType(0);
+ if (ArgType->getPointeeType().isVolatileQualified()) {
+ Diag(CD->getLocation(), diag::err_defaulted_move_ctor_volatile_param);
+ HadError = true;
+ }
+ if (ArgType->getPointeeType().isConstQualified()) {
+ Diag(CD->getLocation(), diag::err_defaulted_move_ctor_const_param);
+ HadError = true;
+ }
+
+ // C++11 [dcl.fct.def.default]p2:
+ // An explicitly-defaulted function may be declared constexpr only if it
+ // would have been implicitly declared as constexpr,
+ // Do not apply this rule to templates, since core issue 1358 makes such
+ // functions always instantiate to constexpr functions.
+ if (CD->isConstexpr() &&
+ CD->getTemplatedKind() == FunctionDecl::TK_NonTemplate) {
+ if (!CD->getParent()->defaultedMoveConstructorIsConstexpr()) {
+ Diag(CD->getLocStart(), diag::err_incorrect_defaulted_constexpr)
+ << CXXMoveConstructor;
+ HadError = true;
+ }
+ }
+ // and may have an explicit exception-specification only if it is compatible
+ // with the exception-specification on the implicit declaration.
+ if (CtorType->hasExceptionSpec()) {
+ if (CheckEquivalentExceptionSpec(
+ PDiag(diag::err_incorrect_defaulted_exception_spec)
+ << CXXMoveConstructor,
+ PDiag(),
+ ExceptionType, SourceLocation(),
+ CtorType, CD->getLocation())) {
+ HadError = true;
+ }
+ }
+
+ // If a function is explicitly defaulted on its first declaration,
+ if (First) {
+ // -- it is implicitly considered to be constexpr if the implicit
+ // definition would be,
+ CD->setConstexpr(CD->getParent()->defaultedMoveConstructorIsConstexpr());
+
+ // -- it is implicitly considered to have the same
+ // exception-specification as if it had been implicitly declared, and
+ //
+ // FIXME: a compatible, but different, explicit exception specification
+ // will be silently overridden. We should issue a warning if this happens.
+ EPI.ExtInfo = CtorType->getExtInfo();
+
+ // -- [...] it shall have the same parameter type as if it had been
+ // implicitly declared.
+ CD->setType(Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ CD->setTrivial(CD->getParent()->hasTrivialMoveConstructor());
+ }
+
+ if (HadError) {
+ CD->setInvalidDecl();
+ return;
+ }
+
+ if (ShouldDeleteSpecialMember(CD, CXXMoveConstructor)) {
+ if (First) {
+ CD->setDeletedAsWritten();
+ } else {
+ Diag(CD->getLocation(), diag::err_out_of_line_default_deletes)
+ << CXXMoveConstructor;
+ CD->setInvalidDecl();
+ }
+ }
+}
+
+void Sema::CheckExplicitlyDefaultedMoveAssignment(CXXMethodDecl *MD) {
+ assert(MD->isExplicitlyDefaulted());
+
+ // Whether this was the first-declared instance of the operator
+ bool First = MD == MD->getCanonicalDecl();
+
+ bool HadError = false;
+ if (MD->getNumParams() != 1) {
+ Diag(MD->getLocation(), diag::err_defaulted_move_assign_params)
+ << MD->getSourceRange();
+ HadError = true;
+ }
+
+ QualType ReturnType =
+ MD->getType()->getAs<FunctionType>()->getResultType();
+ if (!ReturnType->isLValueReferenceType() ||
+ !Context.hasSameType(
+ Context.getCanonicalType(ReturnType->getPointeeType()),
+ Context.getCanonicalType(Context.getTypeDeclType(MD->getParent())))) {
+ Diag(MD->getLocation(), diag::err_defaulted_move_assign_return_type);
+ HadError = true;
+ }
+
+ ImplicitExceptionSpecification Spec(
+ ComputeDefaultedMoveCtorExceptionSpec(MD->getParent()));
+
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ const FunctionProtoType *OperType = MD->getType()->getAs<FunctionProtoType>(),
+ *ExceptionType = Context.getFunctionType(
+ Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+
+ QualType ArgType = OperType->getArgType(0);
+ if (!ArgType->isRValueReferenceType()) {
+ Diag(MD->getLocation(), diag::err_defaulted_move_assign_not_ref);
+ HadError = true;
+ } else {
+ if (ArgType->getPointeeType().isVolatileQualified()) {
+ Diag(MD->getLocation(), diag::err_defaulted_move_assign_volatile_param);
+ HadError = true;
+ }
+ if (ArgType->getPointeeType().isConstQualified()) {
+ Diag(MD->getLocation(), diag::err_defaulted_move_assign_const_param);
+ HadError = true;
+ }
+ }
+
+ if (OperType->getTypeQuals()) {
+ Diag(MD->getLocation(), diag::err_defaulted_move_assign_quals);
+ HadError = true;
+ }
+
+ if (OperType->hasExceptionSpec()) {
+ if (CheckEquivalentExceptionSpec(
+ PDiag(diag::err_incorrect_defaulted_exception_spec)
+ << CXXMoveAssignment,
+ PDiag(),
+ ExceptionType, SourceLocation(),
+ OperType, MD->getLocation())) {
+ HadError = true;
+ }
+ }
+ if (First) {
+ // We set the declaration to have the computed exception spec here.
+ // We duplicate the one parameter type.
+ EPI.RefQualifier = OperType->getRefQualifier();
+ EPI.ExtInfo = OperType->getExtInfo();
+ MD->setType(Context.getFunctionType(ReturnType, &ArgType, 1, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ MD->setTrivial(MD->getParent()->hasTrivialMoveAssignment());
+ }
+
+ if (HadError) {
+ MD->setInvalidDecl();
+ return;
+ }
+
+ if (ShouldDeleteSpecialMember(MD, CXXMoveAssignment)) {
+ if (First) {
+ MD->setDeletedAsWritten();
+ } else {
+ Diag(MD->getLocation(), diag::err_out_of_line_default_deletes)
+ << CXXMoveAssignment;
+ MD->setInvalidDecl();
+ }
+ }
+}
+
+void Sema::CheckExplicitlyDefaultedDestructor(CXXDestructorDecl *DD) {
+ assert(DD->isExplicitlyDefaulted());
+
+ // Whether this was the first-declared instance of the destructor.
+ bool First = DD == DD->getCanonicalDecl();
+
+ ImplicitExceptionSpecification Spec
+ = ComputeDefaultedDtorExceptionSpec(DD->getParent());
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ const FunctionProtoType *DtorType = DD->getType()->getAs<FunctionProtoType>(),
+ *ExceptionType = Context.getFunctionType(
+ Context.VoidTy, 0, 0, EPI)->getAs<FunctionProtoType>();
+
+ if (DtorType->hasExceptionSpec()) {
+ if (CheckEquivalentExceptionSpec(
+ PDiag(diag::err_incorrect_defaulted_exception_spec)
+ << CXXDestructor,
+ PDiag(),
+ ExceptionType, SourceLocation(),
+ DtorType, DD->getLocation())) {
+ DD->setInvalidDecl();
+ return;
+ }
+ }
+ if (First) {
+ // We set the declaration to have the computed exception spec here.
+ // There are no parameters.
+ EPI.ExtInfo = DtorType->getExtInfo();
+ DD->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
+
+ // Such a function is also trivial if the implicitly-declared function
+ // would have been.
+ DD->setTrivial(DD->getParent()->hasTrivialDestructor());
+ }
+
+ if (ShouldDeleteSpecialMember(DD, CXXDestructor)) {
+ if (First) {
+ DD->setDeletedAsWritten();
+ } else {
+ Diag(DD->getLocation(), diag::err_out_of_line_default_deletes)
+ << CXXDestructor;
+ DD->setInvalidDecl();
+ }
+ }
+}
+
+namespace {
+struct SpecialMemberDeletionInfo {
+ Sema &S;
+ CXXMethodDecl *MD;
+ Sema::CXXSpecialMember CSM;
+ bool Diagnose;
+
+ // Properties of the special member, computed for convenience.
+ bool IsConstructor, IsAssignment, IsMove, ConstArg, VolatileArg;
+ SourceLocation Loc;
+
+ bool AllFieldsAreConst;
+
+ SpecialMemberDeletionInfo(Sema &S, CXXMethodDecl *MD,
+ Sema::CXXSpecialMember CSM, bool Diagnose)
+ : S(S), MD(MD), CSM(CSM), Diagnose(Diagnose),
+ IsConstructor(false), IsAssignment(false), IsMove(false),
+ ConstArg(false), VolatileArg(false), Loc(MD->getLocation()),
+ AllFieldsAreConst(true) {
+ switch (CSM) {
+ case Sema::CXXDefaultConstructor:
+ case Sema::CXXCopyConstructor:
+ IsConstructor = true;
+ break;
+ case Sema::CXXMoveConstructor:
+ IsConstructor = true;
+ IsMove = true;
+ break;
+ case Sema::CXXCopyAssignment:
+ IsAssignment = true;
+ break;
+ case Sema::CXXMoveAssignment:
+ IsAssignment = true;
+ IsMove = true;
+ break;
+ case Sema::CXXDestructor:
+ break;
+ case Sema::CXXInvalid:
+ llvm_unreachable("invalid special member kind");
+ }
+
+ if (MD->getNumParams()) {
+ ConstArg = MD->getParamDecl(0)->getType().isConstQualified();
+ VolatileArg = MD->getParamDecl(0)->getType().isVolatileQualified();
+ }
+ }
+
+ bool inUnion() const { return MD->getParent()->isUnion(); }
+
+ /// Look up the corresponding special member in the given class.
+ Sema::SpecialMemberOverloadResult *lookupIn(CXXRecordDecl *Class) {
+ unsigned TQ = MD->getTypeQualifiers();
+ return S.LookupSpecialMember(Class, CSM, ConstArg, VolatileArg,
+ MD->getRefQualifier() == RQ_RValue,
+ TQ & Qualifiers::Const,
+ TQ & Qualifiers::Volatile);
+ }
+
+ typedef llvm::PointerUnion<CXXBaseSpecifier*, FieldDecl*> Subobject;
+
+ bool shouldDeleteForBase(CXXBaseSpecifier *Base);
+ bool shouldDeleteForField(FieldDecl *FD);
+ bool shouldDeleteForAllConstMembers();
+
+ bool shouldDeleteForClassSubobject(CXXRecordDecl *Class, Subobject Subobj);
+ bool shouldDeleteForSubobjectCall(Subobject Subobj,
+ Sema::SpecialMemberOverloadResult *SMOR,
+ bool IsDtorCallInCtor);
+
+ bool isAccessible(Subobject Subobj, CXXMethodDecl *D);
+};
+}
+
+/// Is the given special member inaccessible when used on the given
+/// sub-object.
+bool SpecialMemberDeletionInfo::isAccessible(Subobject Subobj,
+ CXXMethodDecl *target) {
+ /// If we're operating on a base class, the object type is the
+ /// type of this special member.
+ QualType objectTy;
+ AccessSpecifier access = target->getAccess();;
+ if (CXXBaseSpecifier *base = Subobj.dyn_cast<CXXBaseSpecifier*>()) {
+ objectTy = S.Context.getTypeDeclType(MD->getParent());
+ access = CXXRecordDecl::MergeAccess(base->getAccessSpecifier(), access);
+
+ // If we're operating on a field, the object type is the type of the field.
+ } else {
+ objectTy = S.Context.getTypeDeclType(target->getParent());
+ }
+
+ return S.isSpecialMemberAccessibleForDeletion(target, access, objectTy);
+}
+
+/// Check whether we should delete a special member due to the implicit
+/// definition containing a call to a special member of a subobject.
+bool SpecialMemberDeletionInfo::shouldDeleteForSubobjectCall(
+ Subobject Subobj, Sema::SpecialMemberOverloadResult *SMOR,
+ bool IsDtorCallInCtor) {
+ CXXMethodDecl *Decl = SMOR->getMethod();
+ FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>();
+
+ int DiagKind = -1;
+
+ if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::NoMemberOrDeleted)
+ DiagKind = !Decl ? 0 : 1;
+ else if (SMOR->getKind() == Sema::SpecialMemberOverloadResult::Ambiguous)
+ DiagKind = 2;
+ else if (!isAccessible(Subobj, Decl))
+ DiagKind = 3;
+ else if (!IsDtorCallInCtor && Field && Field->getParent()->isUnion() &&
+ !Decl->isTrivial()) {
+ // A member of a union must have a trivial corresponding special member.
+ // As a weird special case, a destructor call from a union's constructor
+ // must be accessible and non-deleted, but need not be trivial. Such a
+ // destructor is never actually called, but is semantically checked as
+ // if it were.
+ DiagKind = 4;
+ }
+
+ if (DiagKind == -1)
+ return false;
+
+ if (Diagnose) {
+ if (Field) {
+ S.Diag(Field->getLocation(),
+ diag::note_deleted_special_member_class_subobject)
+ << CSM << MD->getParent() << /*IsField*/true
+ << Field << DiagKind << IsDtorCallInCtor;
+ } else {
+ CXXBaseSpecifier *Base = Subobj.get<CXXBaseSpecifier*>();
+ S.Diag(Base->getLocStart(),
+ diag::note_deleted_special_member_class_subobject)
+ << CSM << MD->getParent() << /*IsField*/false
+ << Base->getType() << DiagKind << IsDtorCallInCtor;
+ }
+
+ if (DiagKind == 1)
+ S.NoteDeletedFunction(Decl);
+ // FIXME: Explain inaccessibility if DiagKind == 3.
+ }
+
+ return true;
+}
+
+/// Check whether we should delete a special member function due to having a
+/// direct or virtual base class or static data member of class type M.
+bool SpecialMemberDeletionInfo::shouldDeleteForClassSubobject(
+ CXXRecordDecl *Class, Subobject Subobj) {
+ FieldDecl *Field = Subobj.dyn_cast<FieldDecl*>();
+
+ // C++11 [class.ctor]p5:
+ // -- any direct or virtual base class, or non-static data member with no
+ // brace-or-equal-initializer, has class type M (or array thereof) and
+ // either M has no default constructor or overload resolution as applied
+ // to M's default constructor results in an ambiguity or in a function
+ // that is deleted or inaccessible
+ // C++11 [class.copy]p11, C++11 [class.copy]p23:
+ // -- a direct or virtual base class B that cannot be copied/moved because
+ // overload resolution, as applied to B's corresponding special member,
+ // results in an ambiguity or a function that is deleted or inaccessible
+ // from the defaulted special member
+ // C++11 [class.dtor]p5:
+ // -- any direct or virtual base class [...] has a type with a destructor
+ // that is deleted or inaccessible
+ if (!(CSM == Sema::CXXDefaultConstructor &&
+ Field && Field->hasInClassInitializer()) &&
+ shouldDeleteForSubobjectCall(Subobj, lookupIn(Class), false))
+ return true;
+
+ // C++11 [class.ctor]p5, C++11 [class.copy]p11:
+ // -- any direct or virtual base class or non-static data member has a
+ // type with a destructor that is deleted or inaccessible
+ if (IsConstructor) {
+ Sema::SpecialMemberOverloadResult *SMOR =
+ S.LookupSpecialMember(Class, Sema::CXXDestructor,
+ false, false, false, false, false);
+ if (shouldDeleteForSubobjectCall(Subobj, SMOR, true))
+ return true;
+ }
+
+ return false;
+}
+
+/// Check whether we should delete a special member function due to the class
+/// having a particular direct or virtual base class.
+bool SpecialMemberDeletionInfo::shouldDeleteForBase(CXXBaseSpecifier *Base) {
+ CXXRecordDecl *BaseClass = Base->getType()->getAsCXXRecordDecl();
+ return shouldDeleteForClassSubobject(BaseClass, Base);
+}
+
+/// Check whether we should delete a special member function due to the class
+/// having a particular non-static data member.
+bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
+ QualType FieldType = S.Context.getBaseElementType(FD->getType());
+ CXXRecordDecl *FieldRecord = FieldType->getAsCXXRecordDecl();
+
+ if (CSM == Sema::CXXDefaultConstructor) {
+ // For a default constructor, all references must be initialized in-class
+ // and, if a union, it must have a non-const member.
+ if (FieldType->isReferenceType() && !FD->hasInClassInitializer()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
+ << MD->getParent() << FD << FieldType << /*Reference*/0;
+ return true;
+ }
+ // C++11 [class.ctor]p5: any non-variant non-static data member of
+ // const-qualified type (or array thereof) with no
+ // brace-or-equal-initializer does not have a user-provided default
+ // constructor.
+ if (!inUnion() && FieldType.isConstQualified() &&
+ !FD->hasInClassInitializer() &&
+ (!FieldRecord || !FieldRecord->hasUserProvidedDefaultConstructor())) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
+ << MD->getParent() << FD << FieldType << /*Const*/1;
+ return true;
+ }
+
+ if (inUnion() && !FieldType.isConstQualified())
+ AllFieldsAreConst = false;
+ } else if (CSM == Sema::CXXCopyConstructor) {
+ // For a copy constructor, data members must not be of rvalue reference
+ // type.
+ if (FieldType->isRValueReferenceType()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_copy_ctor_rvalue_reference)
+ << MD->getParent() << FD << FieldType;
+ return true;
+ }
+ } else if (IsAssignment) {
+ // For an assignment operator, data members must not be of reference type.
+ if (FieldType->isReferenceType()) {
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_assign_field)
+ << IsMove << MD->getParent() << FD << FieldType << /*Reference*/0;
+ return true;
+ }
+ if (!FieldRecord && FieldType.isConstQualified()) {
+ // C++11 [class.copy]p23:
+ // -- a non-static data member of const non-class type (or array thereof)
+ if (Diagnose)
+ S.Diag(FD->getLocation(), diag::note_deleted_assign_field)
+ << IsMove << MD->getParent() << FD << FieldType << /*Const*/1;
+ return true;
+ }
+ }
+
+ if (FieldRecord) {
+ // Some additional restrictions exist on the variant members.
+ if (!inUnion() && FieldRecord->isUnion() &&
+ FieldRecord->isAnonymousStructOrUnion()) {
+ bool AllVariantFieldsAreConst = true;
+
+ // FIXME: Handle anonymous unions declared within anonymous unions.
+ for (CXXRecordDecl::field_iterator UI = FieldRecord->field_begin(),
+ UE = FieldRecord->field_end();
+ UI != UE; ++UI) {
+ QualType UnionFieldType = S.Context.getBaseElementType(UI->getType());
+
+ if (!UnionFieldType.isConstQualified())
+ AllVariantFieldsAreConst = false;
+
+ CXXRecordDecl *UnionFieldRecord = UnionFieldType->getAsCXXRecordDecl();
+ if (UnionFieldRecord &&
+ shouldDeleteForClassSubobject(UnionFieldRecord, *UI))
+ return true;
+ }
+
+ // At least one member in each anonymous union must be non-const
+ if (CSM == Sema::CXXDefaultConstructor && AllVariantFieldsAreConst &&
+ FieldRecord->field_begin() != FieldRecord->field_end()) {
+ if (Diagnose)
+ S.Diag(FieldRecord->getLocation(),
+ diag::note_deleted_default_ctor_all_const)
+ << MD->getParent() << /*anonymous union*/1;
+ return true;
+ }
+
+ // Don't check the implicit member of the anonymous union type.
+ // This is technically non-conformant, but sanity demands it.
+ return false;
+ }
+
+ if (shouldDeleteForClassSubobject(FieldRecord, FD))
+ return true;
+ }
+
+ return false;
+}
+
+/// C++11 [class.ctor] p5:
+/// A defaulted default constructor for a class X is defined as deleted if
+/// X is a union and all of its variant members are of const-qualified type.
+bool SpecialMemberDeletionInfo::shouldDeleteForAllConstMembers() {
+ // This is a silly definition, because it gives an empty union a deleted
+ // default constructor. Don't do that.
+ if (CSM == Sema::CXXDefaultConstructor && inUnion() && AllFieldsAreConst &&
+ (MD->getParent()->field_begin() != MD->getParent()->field_end())) {
+ if (Diagnose)
+ S.Diag(MD->getParent()->getLocation(),
+ diag::note_deleted_default_ctor_all_const)
+ << MD->getParent() << /*not anonymous union*/0;
+ return true;
+ }
+ return false;
+}
+
+/// Determine whether a defaulted special member function should be defined as
+/// deleted, as specified in C++11 [class.ctor]p5, C++11 [class.copy]p11,
+/// C++11 [class.copy]p23, and C++11 [class.dtor]p5.
+bool Sema::ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
+ bool Diagnose) {
+ assert(!MD->isInvalidDecl());
+ CXXRecordDecl *RD = MD->getParent();
+ assert(!RD->isDependentType() && "do deletion after instantiation");
+ if (!LangOpts.CPlusPlus0x || RD->isInvalidDecl())
+ return false;
+
+ // C++11 [expr.lambda.prim]p19:
+ // The closure type associated with a lambda-expression has a
+ // deleted (8.4.3) default constructor and a deleted copy
+ // assignment operator.
+ if (RD->isLambda() &&
+ (CSM == CXXDefaultConstructor || CSM == CXXCopyAssignment)) {
+ if (Diagnose)
+ Diag(RD->getLocation(), diag::note_lambda_decl);
+ return true;
+ }
+
+ // For an anonymous struct or union, the copy and assignment special members
+ // will never be used, so skip the check. For an anonymous union declared at
+ // namespace scope, the constructor and destructor are used.
+ if (CSM != CXXDefaultConstructor && CSM != CXXDestructor &&
+ RD->isAnonymousStructOrUnion())
+ return false;
+
+ // C++11 [class.copy]p7, p18:
+ // If the class definition declares a move constructor or move assignment
+ // operator, an implicitly declared copy constructor or copy assignment
+ // operator is defined as deleted.
+ if (MD->isImplicit() &&
+ (CSM == CXXCopyConstructor || CSM == CXXCopyAssignment)) {
+ CXXMethodDecl *UserDeclaredMove = 0;
+
+ // In Microsoft mode, a user-declared move only causes the deletion of the
+ // corresponding copy operation, not both copy operations.
+ if (RD->hasUserDeclaredMoveConstructor() &&
+ (!getLangOpts().MicrosoftMode || CSM == CXXCopyConstructor)) {
+ if (!Diagnose) return true;
+ UserDeclaredMove = RD->getMoveConstructor();
+ assert(UserDeclaredMove);
+ } else if (RD->hasUserDeclaredMoveAssignment() &&
+ (!getLangOpts().MicrosoftMode || CSM == CXXCopyAssignment)) {
+ if (!Diagnose) return true;
+ UserDeclaredMove = RD->getMoveAssignmentOperator();
+ assert(UserDeclaredMove);
+ }
+
+ if (UserDeclaredMove) {
+ Diag(UserDeclaredMove->getLocation(),
+ diag::note_deleted_copy_user_declared_move)
+ << (CSM == CXXCopyAssignment) << RD
+ << UserDeclaredMove->isMoveAssignmentOperator();
+ return true;
+ }
+ }
+
+ // Do access control from the special member function
+ ContextRAII MethodContext(*this, MD);
+
+ // C++11 [class.dtor]p5:
+ // -- for a virtual destructor, lookup of the non-array deallocation function
+ // results in an ambiguity or in a function that is deleted or inaccessible
+ if (CSM == CXXDestructor && MD->isVirtual()) {
+ FunctionDecl *OperatorDelete = 0;
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete);
+ if (FindDeallocationFunction(MD->getLocation(), MD->getParent(), Name,
+ OperatorDelete, false)) {
+ if (Diagnose)
+ Diag(RD->getLocation(), diag::note_deleted_dtor_no_operator_delete);
+ return true;
+ }
+ }
+
+ SpecialMemberDeletionInfo SMI(*this, MD, CSM, Diagnose);
+
+ for (CXXRecordDecl::base_class_iterator BI = RD->bases_begin(),
+ BE = RD->bases_end(); BI != BE; ++BI)
+ if (!BI->isVirtual() &&
+ SMI.shouldDeleteForBase(BI))
+ return true;
+
+ for (CXXRecordDecl::base_class_iterator BI = RD->vbases_begin(),
+ BE = RD->vbases_end(); BI != BE; ++BI)
+ if (SMI.shouldDeleteForBase(BI))
+ return true;
+
+ for (CXXRecordDecl::field_iterator FI = RD->field_begin(),
+ FE = RD->field_end(); FI != FE; ++FI)
+ if (!FI->isInvalidDecl() && !FI->isUnnamedBitfield() &&
+ SMI.shouldDeleteForField(*FI))
+ return true;
+
+ if (SMI.shouldDeleteForAllConstMembers())
+ return true;
+
+ return false;
+}
+
+/// \brief Data used with FindHiddenVirtualMethod
+namespace {
+ struct FindHiddenVirtualMethodData {
+ Sema *S;
+ CXXMethodDecl *Method;
+ llvm::SmallPtrSet<const CXXMethodDecl *, 8> OverridenAndUsingBaseMethods;
+ SmallVector<CXXMethodDecl *, 8> OverloadedMethods;
+ };
+}
+
+/// \brief Member lookup function that determines whether a given C++
+/// method overloads virtual methods in a base class without overriding any,
+/// to be used with CXXRecordDecl::lookupInBases().
+static bool FindHiddenVirtualMethod(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *UserData) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ FindHiddenVirtualMethodData &Data
+ = *static_cast<FindHiddenVirtualMethodData*>(UserData);
+
+ DeclarationName Name = Data.Method->getDeclName();
+ assert(Name.getNameKind() == DeclarationName::Identifier);
+
+ bool foundSameNameMethod = false;
+ SmallVector<CXXMethodDecl *, 8> overloadedMethods;
+ for (Path.Decls = BaseRecord->lookup(Name);
+ Path.Decls.first != Path.Decls.second;
+ ++Path.Decls.first) {
+ NamedDecl *D = *Path.Decls.first;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ MD = MD->getCanonicalDecl();
+ foundSameNameMethod = true;
+ // Interested only in hidden virtual methods.
+ if (!MD->isVirtual())
+ continue;
+ // If the method we are checking overrides a method from its base
+ // don't warn about the other overloaded methods.
+ if (!Data.S->IsOverload(Data.Method, MD, false))
+ return true;
+ // Collect the overload only if its hidden.
+ if (!Data.OverridenAndUsingBaseMethods.count(MD))
+ overloadedMethods.push_back(MD);
+ }
+ }
+
+ if (foundSameNameMethod)
+ Data.OverloadedMethods.append(overloadedMethods.begin(),
+ overloadedMethods.end());
+ return foundSameNameMethod;
+}
+
+/// \brief See if a method overloads virtual methods in a base class without
+/// overriding any.
+void Sema::DiagnoseHiddenVirtualMethods(CXXRecordDecl *DC, CXXMethodDecl *MD) {
+ if (Diags.getDiagnosticLevel(diag::warn_overloaded_virtual,
+ MD->getLocation()) == DiagnosticsEngine::Ignored)
+ return;
+ if (MD->getDeclName().getNameKind() != DeclarationName::Identifier)
+ return;
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, // true to look in all bases.
+ /*bool RecordPaths=*/false,
+ /*bool DetectVirtual=*/false);
+ FindHiddenVirtualMethodData Data;
+ Data.Method = MD;
+ Data.S = this;
+
+ // Keep the base methods that were overriden or introduced in the subclass
+ // by 'using' in a set. A base method not in this set is hidden.
+ for (DeclContext::lookup_result res = DC->lookup(MD->getDeclName());
+ res.first != res.second; ++res.first) {
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(*res.first))
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods();
+ I != E; ++I)
+ Data.OverridenAndUsingBaseMethods.insert((*I)->getCanonicalDecl());
+ if (UsingShadowDecl *shad = dyn_cast<UsingShadowDecl>(*res.first))
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(shad->getTargetDecl()))
+ Data.OverridenAndUsingBaseMethods.insert(MD->getCanonicalDecl());
+ }
+
+ if (DC->lookupInBases(&FindHiddenVirtualMethod, &Data, Paths) &&
+ !Data.OverloadedMethods.empty()) {
+ Diag(MD->getLocation(), diag::warn_overloaded_virtual)
+ << MD << (Data.OverloadedMethods.size() > 1);
+
+ for (unsigned i = 0, e = Data.OverloadedMethods.size(); i != e; ++i) {
+ CXXMethodDecl *overloadedMD = Data.OverloadedMethods[i];
+ Diag(overloadedMD->getLocation(),
+ diag::note_hidden_overloaded_virtual_declared_here) << overloadedMD;
+ }
+ }
+}
+
+void Sema::ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc,
+ Decl *TagDecl,
+ SourceLocation LBrac,
+ SourceLocation RBrac,
+ AttributeList *AttrList) {
+ if (!TagDecl)
+ return;
+
+ AdjustDeclIfTemplate(TagDecl);
+
+ ActOnFields(S, RLoc, TagDecl, llvm::makeArrayRef(
+ // strict aliasing violation!
+ reinterpret_cast<Decl**>(FieldCollector->getCurFields()),
+ FieldCollector->getCurNumFields()), LBrac, RBrac, AttrList);
+
+ CheckCompletedCXXClass(
+ dyn_cast_or_null<CXXRecordDecl>(TagDecl));
+}
+
+/// AddImplicitlyDeclaredMembersToClass - Adds any implicitly-declared
+/// special functions, such as the default constructor, copy
+/// constructor, or destructor, to the given C++ class (C++
+/// [special]p1). This routine can only be executed just before the
+/// definition of the class is complete.
+void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
+ if (!ClassDecl->hasUserDeclaredConstructor())
+ ++ASTContext::NumImplicitDefaultConstructors;
+
+ if (!ClassDecl->hasUserDeclaredCopyConstructor())
+ ++ASTContext::NumImplicitCopyConstructors;
+
+ if (getLangOpts().CPlusPlus0x && ClassDecl->needsImplicitMoveConstructor())
+ ++ASTContext::NumImplicitMoveConstructors;
+
+ if (!ClassDecl->hasUserDeclaredCopyAssignment()) {
+ ++ASTContext::NumImplicitCopyAssignmentOperators;
+
+ // If we have a dynamic class, then the copy assignment operator may be
+ // virtual, so we have to declare it immediately. This ensures that, e.g.,
+ // it shows up in the right place in the vtable and that we diagnose
+ // problems with the implicit exception specification.
+ if (ClassDecl->isDynamicClass())
+ DeclareImplicitCopyAssignment(ClassDecl);
+ }
+
+ if (getLangOpts().CPlusPlus0x && ClassDecl->needsImplicitMoveAssignment()) {
+ ++ASTContext::NumImplicitMoveAssignmentOperators;
+
+ // Likewise for the move assignment operator.
+ if (ClassDecl->isDynamicClass())
+ DeclareImplicitMoveAssignment(ClassDecl);
+ }
+
+ if (!ClassDecl->hasUserDeclaredDestructor()) {
+ ++ASTContext::NumImplicitDestructors;
+
+ // If we have a dynamic class, then the destructor may be virtual, so we
+ // have to declare the destructor immediately. This ensures that, e.g., it
+ // shows up in the right place in the vtable and that we diagnose problems
+ // with the implicit exception specification.
+ if (ClassDecl->isDynamicClass())
+ DeclareImplicitDestructor(ClassDecl);
+ }
+}
+
+void Sema::ActOnReenterDeclaratorTemplateScope(Scope *S, DeclaratorDecl *D) {
+ if (!D)
+ return;
+
+ int NumParamList = D->getNumTemplateParameterLists();
+ for (int i = 0; i < NumParamList; i++) {
+ TemplateParameterList* Params = D->getTemplateParameterList(i);
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; ++Param) {
+ NamedDecl *Named = cast<NamedDecl>(*Param);
+ if (Named->getDeclName()) {
+ S->AddDecl(Named);
+ IdResolver.AddDecl(Named);
+ }
+ }
+ }
+}
+
+void Sema::ActOnReenterTemplateScope(Scope *S, Decl *D) {
+ if (!D)
+ return;
+
+ TemplateParameterList *Params = 0;
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D))
+ Params = Template->getTemplateParameters();
+ else if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(D))
+ Params = PartialSpec->getTemplateParameters();
+ else
+ return;
+
+ for (TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ Param != ParamEnd; ++Param) {
+ NamedDecl *Named = cast<NamedDecl>(*Param);
+ if (Named->getDeclName()) {
+ S->AddDecl(Named);
+ IdResolver.AddDecl(Named);
+ }
+ }
+}
+
+void Sema::ActOnStartDelayedMemberDeclarations(Scope *S, Decl *RecordD) {
+ if (!RecordD) return;
+ AdjustDeclIfTemplate(RecordD);
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordD);
+ PushDeclContext(S, Record);
+}
+
+void Sema::ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *RecordD) {
+ if (!RecordD) return;
+ PopDeclContext();
+}
+
+/// ActOnStartDelayedCXXMethodDeclaration - We have completed
+/// parsing a top-level (non-nested) C++ class, and we are now
+/// parsing those parts of the given Method declaration that could
+/// not be parsed earlier (C++ [class.mem]p2), such as default
+/// arguments. This action should enter the scope of the given
+/// Method declaration as if we had just parsed the qualified method
+/// name. However, it should not bring the parameters into scope;
+/// that will be performed by ActOnDelayedCXXMethodParameter.
+void Sema::ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) {
+}
+
+/// ActOnDelayedCXXMethodParameter - We've already started a delayed
+/// C++ method declaration. We're (re-)introducing the given
+/// function parameter into scope for use in parsing later parts of
+/// the method declaration. For example, we could see an
+/// ActOnParamDefaultArgument event for this parameter.
+void Sema::ActOnDelayedCXXMethodParameter(Scope *S, Decl *ParamD) {
+ if (!ParamD)
+ return;
+
+ ParmVarDecl *Param = cast<ParmVarDecl>(ParamD);
+
+ // If this parameter has an unparsed default argument, clear it out
+ // to make way for the parsed default argument.
+ if (Param->hasUnparsedDefaultArg())
+ Param->setDefaultArg(0);
+
+ S->AddDecl(Param);
+ if (Param->getDeclName())
+ IdResolver.AddDecl(Param);
+}
+
+/// ActOnFinishDelayedCXXMethodDeclaration - We have finished
+/// processing the delayed method declaration for Method. The method
+/// declaration is now considered finished. There may be a separate
+/// ActOnStartOfFunctionDef action later (not necessarily
+/// immediately!) for this method, if it was also defined inside the
+/// class body.
+void Sema::ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *MethodD) {
+ if (!MethodD)
+ return;
+
+ AdjustDeclIfTemplate(MethodD);
+
+ FunctionDecl *Method = cast<FunctionDecl>(MethodD);
+
+ // Now that we have our default arguments, check the constructor
+ // again. It could produce additional diagnostics or affect whether
+ // the class has implicitly-declared destructors, among other
+ // things.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Method))
+ CheckConstructor(Constructor);
+
+ // Check the default arguments, which we may have added.
+ if (!Method->isInvalidDecl())
+ CheckCXXDefaultArguments(Method);
+}
+
+/// CheckConstructorDeclarator - Called by ActOnDeclarator to check
+/// the well-formedness of the constructor declarator @p D with type @p
+/// R. If there are any errors in the declarator, this routine will
+/// emit diagnostics and set the invalid bit to true. In any case, the type
+/// will be updated to reflect a well-formed type for the constructor and
+/// returned.
+QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
+ StorageClass &SC) {
+ bool isVirtual = D.getDeclSpec().isVirtualSpecified();
+
+ // C++ [class.ctor]p3:
+ // A constructor shall not be virtual (10.3) or static (9.4). A
+ // constructor can be invoked for a const, volatile or const
+ // volatile object. A constructor shall not be declared const,
+ // volatile, or const volatile (9.3.2).
+ if (isVirtual) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_constructor_cannot_be)
+ << "virtual" << SourceRange(D.getDeclSpec().getVirtualSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+ if (SC == SC_Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_constructor_cannot_be)
+ << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ SC = SC_None;
+ }
+
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ if (FTI.TypeQuals != 0) {
+ if (FTI.TypeQuals & Qualifiers::Const)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "const" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & Qualifiers::Volatile)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "volatile" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & Qualifiers::Restrict)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_constructor)
+ << "restrict" << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+
+ // C++0x [class.ctor]p4:
+ // A constructor shall not be declared with a ref-qualifier.
+ if (FTI.hasRefQualifier()) {
+ Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_constructor)
+ << FTI.RefQualifierIsLValueRef
+ << FixItHint::CreateRemoval(FTI.getRefQualifierLoc());
+ D.setInvalidType();
+ }
+
+ // Rebuild the function type "R" without any type qualifiers (in
+ // case any of the errors above fired) and with "void" as the
+ // return type, since constructors don't have return types.
+ const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
+ if (Proto->getResultType() == Context.VoidTy && !D.isInvalidType())
+ return R;
+
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+
+ return Context.getFunctionType(Context.VoidTy, Proto->arg_type_begin(),
+ Proto->getNumArgs(), EPI);
+}
+
+/// CheckConstructor - Checks a fully-formed constructor for
+/// well-formedness, issuing any diagnostics required. Returns true if
+/// the constructor declarator is invalid.
+void Sema::CheckConstructor(CXXConstructorDecl *Constructor) {
+ CXXRecordDecl *ClassDecl
+ = dyn_cast<CXXRecordDecl>(Constructor->getDeclContext());
+ if (!ClassDecl)
+ return Constructor->setInvalidDecl();
+
+ // C++ [class.copy]p3:
+ // A declaration of a constructor for a class X is ill-formed if
+ // its first parameter is of type (optionally cv-qualified) X and
+ // either there are no other parameters or else all other
+ // parameters have default arguments.
+ if (!Constructor->isInvalidDecl() &&
+ ((Constructor->getNumParams() == 1) ||
+ (Constructor->getNumParams() > 1 &&
+ Constructor->getParamDecl(1)->hasDefaultArg())) &&
+ Constructor->getTemplateSpecializationKind()
+ != TSK_ImplicitInstantiation) {
+ QualType ParamType = Constructor->getParamDecl(0)->getType();
+ QualType ClassTy = Context.getTagDeclType(ClassDecl);
+ if (Context.getCanonicalType(ParamType).getUnqualifiedType() == ClassTy) {
+ SourceLocation ParamLoc = Constructor->getParamDecl(0)->getLocation();
+ const char *ConstRef
+ = Constructor->getParamDecl(0)->getIdentifier() ? "const &"
+ : " const &";
+ Diag(ParamLoc, diag::err_constructor_byvalue_arg)
+ << FixItHint::CreateInsertion(ParamLoc, ConstRef);
+
+ // FIXME: Rather that making the constructor invalid, we should endeavor
+ // to fix the type.
+ Constructor->setInvalidDecl();
+ }
+ }
+}
+
+/// CheckDestructor - Checks a fully-formed destructor definition for
+/// well-formedness, issuing any diagnostics required. Returns true
+/// on error.
+bool Sema::CheckDestructor(CXXDestructorDecl *Destructor) {
+ CXXRecordDecl *RD = Destructor->getParent();
+
+ if (Destructor->isVirtual()) {
+ SourceLocation Loc;
+
+ if (!Destructor->isImplicit())
+ Loc = Destructor->getLocation();
+ else
+ Loc = RD->getLocation();
+
+ // If we have a virtual destructor, look up the deallocation function
+ FunctionDecl *OperatorDelete = 0;
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete);
+ if (FindDeallocationFunction(Loc, RD, Name, OperatorDelete))
+ return true;
+
+ MarkFunctionReferenced(Loc, OperatorDelete);
+
+ Destructor->setOperatorDelete(OperatorDelete);
+ }
+
+ return false;
+}
+
+static inline bool
+FTIHasSingleVoidArgument(DeclaratorChunk::FunctionTypeInfo &FTI) {
+ return (FTI.NumArgs == 1 && !FTI.isVariadic && FTI.ArgInfo[0].Ident == 0 &&
+ FTI.ArgInfo[0].Param &&
+ cast<ParmVarDecl>(FTI.ArgInfo[0].Param)->getType()->isVoidType());
+}
+
+/// CheckDestructorDeclarator - Called by ActOnDeclarator to check
+/// the well-formednes of the destructor declarator @p D with type @p
+/// R. If there are any errors in the declarator, this routine will
+/// emit diagnostics and set the declarator to invalid. Even if this happens,
+/// will be updated to reflect a well-formed type for the destructor and
+/// returned.
+QualType Sema::CheckDestructorDeclarator(Declarator &D, QualType R,
+ StorageClass& SC) {
+ // C++ [class.dtor]p1:
+ // [...] A typedef-name that names a class is a class-name
+ // (7.1.3); however, a typedef-name that names a class shall not
+ // be used as the identifier in the declarator for a destructor
+ // declaration.
+ QualType DeclaratorType = GetTypeFromParser(D.getName().DestructorName);
+ if (const TypedefType *TT = DeclaratorType->getAs<TypedefType>())
+ Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ << DeclaratorType << isa<TypeAliasDecl>(TT->getDecl());
+ else if (const TemplateSpecializationType *TST =
+ DeclaratorType->getAs<TemplateSpecializationType>())
+ if (TST->isTypeAlias())
+ Diag(D.getIdentifierLoc(), diag::err_destructor_typedef_name)
+ << DeclaratorType << 1;
+
+ // C++ [class.dtor]p2:
+ // A destructor is used to destroy objects of its class type. A
+ // destructor takes no parameters, and no return type can be
+ // specified for it (not even void). The address of a destructor
+ // shall not be taken. A destructor shall not be static. A
+ // destructor can be invoked for a const, volatile or const
+ // volatile object. A destructor shall not be declared const,
+ // volatile or const volatile (9.3.2).
+ if (SC == SC_Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_destructor_cannot_be)
+ << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << SourceRange(D.getIdentifierLoc())
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+
+ SC = SC_None;
+ }
+ if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) {
+ // Destructors don't have return types, but the parser will
+ // happily parse something like:
+ //
+ // class X {
+ // float ~X();
+ // };
+ //
+ // The return type will be eliminated later.
+ Diag(D.getIdentifierLoc(), diag::err_destructor_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ }
+
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+ if (FTI.TypeQuals != 0 && !D.isInvalidType()) {
+ if (FTI.TypeQuals & Qualifiers::Const)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "const" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & Qualifiers::Volatile)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "volatile" << SourceRange(D.getIdentifierLoc());
+ if (FTI.TypeQuals & Qualifiers::Restrict)
+ Diag(D.getIdentifierLoc(), diag::err_invalid_qualified_destructor)
+ << "restrict" << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+
+ // C++0x [class.dtor]p2:
+ // A destructor shall not be declared with a ref-qualifier.
+ if (FTI.hasRefQualifier()) {
+ Diag(FTI.getRefQualifierLoc(), diag::err_ref_qualifier_destructor)
+ << FTI.RefQualifierIsLValueRef
+ << FixItHint::CreateRemoval(FTI.getRefQualifierLoc());
+ D.setInvalidType();
+ }
+
+ // Make sure we don't have any parameters.
+ if (FTI.NumArgs > 0 && !FTIHasSingleVoidArgument(FTI)) {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_with_params);
+
+ // Delete the parameters.
+ FTI.freeArgs();
+ D.setInvalidType();
+ }
+
+ // Make sure the destructor isn't variadic.
+ if (FTI.isVariadic) {
+ Diag(D.getIdentifierLoc(), diag::err_destructor_variadic);
+ D.setInvalidType();
+ }
+
+ // Rebuild the function type "R" without any type qualifiers or
+ // parameters (in case any of the errors above fired) and with
+ // "void" as the return type, since destructors don't have return
+ // types.
+ if (!D.isInvalidType())
+ return R;
+
+ const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.Variadic = false;
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+ return Context.getFunctionType(Context.VoidTy, 0, 0, EPI);
+}
+
+/// CheckConversionDeclarator - Called by ActOnDeclarator to check the
+/// well-formednes of the conversion function declarator @p D with
+/// type @p R. If there are any errors in the declarator, this routine
+/// will emit diagnostics and return true. Otherwise, it will return
+/// false. Either way, the type @p R will be updated to reflect a
+/// well-formed type for the conversion operator.
+void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
+ StorageClass& SC) {
+ // C++ [class.conv.fct]p1:
+ // Neither parameter types nor return type can be specified. The
+ // type of a conversion function (8.3.5) is "function taking no
+ // parameter returning conversion-type-id."
+ if (SC == SC_Static) {
+ if (!D.isInvalidType())
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_not_member)
+ << "static" << SourceRange(D.getDeclSpec().getStorageClassSpecLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ SC = SC_None;
+ }
+
+ QualType ConvType = GetTypeFromParser(D.getName().ConversionFunctionId);
+
+ if (D.getDeclSpec().hasTypeSpecifier() && !D.isInvalidType()) {
+ // Conversion functions don't have return types, but the parser will
+ // happily parse something like:
+ //
+ // class X {
+ // float operator bool();
+ // };
+ //
+ // The return type will be changed later anyway.
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_return_type)
+ << SourceRange(D.getDeclSpec().getTypeSpecTypeLoc())
+ << SourceRange(D.getIdentifierLoc());
+ D.setInvalidType();
+ }
+
+ const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
+
+ // Make sure we don't have any parameters.
+ if (Proto->getNumArgs() > 0) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_with_params);
+
+ // Delete the parameters.
+ D.getFunctionTypeInfo().freeArgs();
+ D.setInvalidType();
+ } else if (Proto->isVariadic()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_variadic);
+ D.setInvalidType();
+ }
+
+ // Diagnose "&operator bool()" and other such nonsense. This
+ // is actually a gcc extension which we don't support.
+ if (Proto->getResultType() != ConvType) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_with_complex_decl)
+ << Proto->getResultType();
+ D.setInvalidType();
+ ConvType = Proto->getResultType();
+ }
+
+ // C++ [class.conv.fct]p4:
+ // The conversion-type-id shall not represent a function type nor
+ // an array type.
+ if (ConvType->isArrayType()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_to_array);
+ ConvType = Context.getPointerType(ConvType);
+ D.setInvalidType();
+ } else if (ConvType->isFunctionType()) {
+ Diag(D.getIdentifierLoc(), diag::err_conv_function_to_function);
+ ConvType = Context.getPointerType(ConvType);
+ D.setInvalidType();
+ }
+
+ // Rebuild the function type "R" without any parameters (in case any
+ // of the errors above fired) and with the conversion type as the
+ // return type.
+ if (D.isInvalidType())
+ R = Context.getFunctionType(ConvType, 0, 0, Proto->getExtProtoInfo());
+
+ // C++0x explicit conversion operators.
+ if (D.getDeclSpec().isExplicitSpecified())
+ Diag(D.getDeclSpec().getExplicitSpecLoc(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_explicit_conversion_functions :
+ diag::ext_explicit_conversion_functions)
+ << SourceRange(D.getDeclSpec().getExplicitSpecLoc());
+}
+
+/// ActOnConversionDeclarator - Called by ActOnDeclarator to complete
+/// the declaration of the given C++ conversion function. This routine
+/// is responsible for recording the conversion function in the C++
+/// class, if possible.
+Decl *Sema::ActOnConversionDeclarator(CXXConversionDecl *Conversion) {
+ assert(Conversion && "Expected to receive a conversion function declaration");
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Conversion->getDeclContext());
+
+ // Make sure we aren't redeclaring the conversion function.
+ QualType ConvType = Context.getCanonicalType(Conversion->getConversionType());
+
+ // C++ [class.conv.fct]p1:
+ // [...] A conversion function is never used to convert a
+ // (possibly cv-qualified) object to the (possibly cv-qualified)
+ // same object type (or a reference to it), to a (possibly
+ // cv-qualified) base class of that type (or a reference to it),
+ // or to (possibly cv-qualified) void.
+ // FIXME: Suppress this warning if the conversion function ends up being a
+ // virtual function that overrides a virtual function in a base class.
+ QualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ if (const ReferenceType *ConvTypeRef = ConvType->getAs<ReferenceType>())
+ ConvType = ConvTypeRef->getPointeeType();
+ if (Conversion->getTemplateSpecializationKind() != TSK_Undeclared &&
+ Conversion->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
+ /* Suppress diagnostics for instantiations. */;
+ else if (ConvType->isRecordType()) {
+ ConvType = Context.getCanonicalType(ConvType).getUnqualifiedType();
+ if (ConvType == ClassType)
+ Diag(Conversion->getLocation(), diag::warn_conv_to_self_not_used)
+ << ClassType;
+ else if (IsDerivedFrom(ClassType, ConvType))
+ Diag(Conversion->getLocation(), diag::warn_conv_to_base_not_used)
+ << ClassType << ConvType;
+ } else if (ConvType->isVoidType()) {
+ Diag(Conversion->getLocation(), diag::warn_conv_to_void_not_used)
+ << ClassType << ConvType;
+ }
+
+ if (FunctionTemplateDecl *ConversionTemplate
+ = Conversion->getDescribedFunctionTemplate())
+ return ConversionTemplate;
+
+ return Conversion;
+}
+
+//===----------------------------------------------------------------------===//
+// Namespace Handling
+//===----------------------------------------------------------------------===//
+
+
+
+/// ActOnStartNamespaceDef - This is called at the start of a namespace
+/// definition.
+Decl *Sema::ActOnStartNamespaceDef(Scope *NamespcScope,
+ SourceLocation InlineLoc,
+ SourceLocation NamespaceLoc,
+ SourceLocation IdentLoc,
+ IdentifierInfo *II,
+ SourceLocation LBrace,
+ AttributeList *AttrList) {
+ SourceLocation StartLoc = InlineLoc.isValid() ? InlineLoc : NamespaceLoc;
+ // For anonymous namespace, take the location of the left brace.
+ SourceLocation Loc = II ? IdentLoc : LBrace;
+ bool IsInline = InlineLoc.isValid();
+ bool IsInvalid = false;
+ bool IsStd = false;
+ bool AddToKnown = false;
+ Scope *DeclRegionScope = NamespcScope->getParent();
+
+ NamespaceDecl *PrevNS = 0;
+ if (II) {
+ // C++ [namespace.def]p2:
+ // The identifier in an original-namespace-definition shall not
+ // have been previously defined in the declarative region in
+ // which the original-namespace-definition appears. The
+ // identifier in an original-namespace-definition is the name of
+ // the namespace. Subsequently in that declarative region, it is
+ // treated as an original-namespace-name.
+ //
+ // Since namespace names are unique in their scope, and we don't
+ // look through using directives, just look for any ordinary names.
+
+ const unsigned IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Member |
+ Decl::IDNS_Type | Decl::IDNS_Using | Decl::IDNS_Tag |
+ Decl::IDNS_Namespace;
+ NamedDecl *PrevDecl = 0;
+ for (DeclContext::lookup_result R
+ = CurContext->getRedeclContext()->lookup(II);
+ R.first != R.second; ++R.first) {
+ if ((*R.first)->getIdentifierNamespace() & IDNS) {
+ PrevDecl = *R.first;
+ break;
+ }
+ }
+
+ PrevNS = dyn_cast_or_null<NamespaceDecl>(PrevDecl);
+
+ if (PrevNS) {
+ // This is an extended namespace definition.
+ if (IsInline != PrevNS->isInline()) {
+ // inline-ness must match
+ if (PrevNS->isInline()) {
+ // The user probably just forgot the 'inline', so suggest that it
+ // be added back.
+ Diag(Loc, diag::warn_inline_namespace_reopened_noninline)
+ << FixItHint::CreateInsertion(NamespaceLoc, "inline ");
+ } else {
+ Diag(Loc, diag::err_inline_namespace_mismatch)
+ << IsInline;
+ }
+ Diag(PrevNS->getLocation(), diag::note_previous_definition);
+
+ IsInline = PrevNS->isInline();
+ }
+ } else if (PrevDecl) {
+ // This is an invalid name redefinition.
+ Diag(Loc, diag::err_redefinition_different_kind)
+ << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ IsInvalid = true;
+ // Continue on to push Namespc as current DeclContext and return it.
+ } else if (II->isStr("std") &&
+ CurContext->getRedeclContext()->isTranslationUnit()) {
+ // This is the first "real" definition of the namespace "std", so update
+ // our cache of the "std" namespace to point at this definition.
+ PrevNS = getStdNamespace();
+ IsStd = true;
+ AddToKnown = !IsInline;
+ } else {
+ // We've seen this namespace for the first time.
+ AddToKnown = !IsInline;
+ }
+ } else {
+ // Anonymous namespaces.
+
+ // Determine whether the parent already has an anonymous namespace.
+ DeclContext *Parent = CurContext->getRedeclContext();
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(Parent)) {
+ PrevNS = TU->getAnonymousNamespace();
+ } else {
+ NamespaceDecl *ND = cast<NamespaceDecl>(Parent);
+ PrevNS = ND->getAnonymousNamespace();
+ }
+
+ if (PrevNS && IsInline != PrevNS->isInline()) {
+ // inline-ness must match
+ Diag(Loc, diag::err_inline_namespace_mismatch)
+ << IsInline;
+ Diag(PrevNS->getLocation(), diag::note_previous_definition);
+
+ // Recover by ignoring the new namespace's inline status.
+ IsInline = PrevNS->isInline();
+ }
+ }
+
+ NamespaceDecl *Namespc = NamespaceDecl::Create(Context, CurContext, IsInline,
+ StartLoc, Loc, II, PrevNS);
+ if (IsInvalid)
+ Namespc->setInvalidDecl();
+
+ ProcessDeclAttributeList(DeclRegionScope, Namespc, AttrList);
+
+ // FIXME: Should we be merging attributes?
+ if (const VisibilityAttr *Attr = Namespc->getAttr<VisibilityAttr>())
+ PushNamespaceVisibilityAttr(Attr, Loc);
+
+ if (IsStd)
+ StdNamespace = Namespc;
+ if (AddToKnown)
+ KnownNamespaces[Namespc] = false;
+
+ if (II) {
+ PushOnScopeChains(Namespc, DeclRegionScope);
+ } else {
+ // Link the anonymous namespace into its parent.
+ DeclContext *Parent = CurContext->getRedeclContext();
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(Parent)) {
+ TU->setAnonymousNamespace(Namespc);
+ } else {
+ cast<NamespaceDecl>(Parent)->setAnonymousNamespace(Namespc);
+ }
+
+ CurContext->addDecl(Namespc);
+
+ // C++ [namespace.unnamed]p1. An unnamed-namespace-definition
+ // behaves as if it were replaced by
+ // namespace unique { /* empty body */ }
+ // using namespace unique;
+ // namespace unique { namespace-body }
+ // where all occurrences of 'unique' in a translation unit are
+ // replaced by the same identifier and this identifier differs
+ // from all other identifiers in the entire program.
+
+ // We just create the namespace with an empty name and then add an
+ // implicit using declaration, just like the standard suggests.
+ //
+ // CodeGen enforces the "universally unique" aspect by giving all
+ // declarations semantically contained within an anonymous
+ // namespace internal linkage.
+
+ if (!PrevNS) {
+ UsingDirectiveDecl* UD
+ = UsingDirectiveDecl::Create(Context, CurContext,
+ /* 'using' */ LBrace,
+ /* 'namespace' */ SourceLocation(),
+ /* qualifier */ NestedNameSpecifierLoc(),
+ /* identifier */ SourceLocation(),
+ Namespc,
+ /* Ancestor */ CurContext);
+ UD->setImplicit();
+ CurContext->addDecl(UD);
+ }
+ }
+
+ // Although we could have an invalid decl (i.e. the namespace name is a
+ // redefinition), push it as current DeclContext and try to continue parsing.
+ // FIXME: We should be able to push Namespc here, so that the each DeclContext
+ // for the namespace has the declarations that showed up in that particular
+ // namespace definition.
+ PushDeclContext(NamespcScope, Namespc);
+ return Namespc;
+}
+
+/// getNamespaceDecl - Returns the namespace a decl represents. If the decl
+/// is a namespace alias, returns the namespace it points to.
+static inline NamespaceDecl *getNamespaceDecl(NamedDecl *D) {
+ if (NamespaceAliasDecl *AD = dyn_cast_or_null<NamespaceAliasDecl>(D))
+ return AD->getNamespace();
+ return dyn_cast_or_null<NamespaceDecl>(D);
+}
+
+/// ActOnFinishNamespaceDef - This callback is called after a namespace is
+/// exited. Decl is the DeclTy returned by ActOnStartNamespaceDef.
+void Sema::ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace) {
+ NamespaceDecl *Namespc = dyn_cast_or_null<NamespaceDecl>(Dcl);
+ assert(Namespc && "Invalid parameter, expected NamespaceDecl");
+ Namespc->setRBraceLoc(RBrace);
+ PopDeclContext();
+ if (Namespc->hasAttr<VisibilityAttr>())
+ PopPragmaVisibility(true, RBrace);
+}
+
+CXXRecordDecl *Sema::getStdBadAlloc() const {
+ return cast_or_null<CXXRecordDecl>(
+ StdBadAlloc.get(Context.getExternalSource()));
+}
+
+NamespaceDecl *Sema::getStdNamespace() const {
+ return cast_or_null<NamespaceDecl>(
+ StdNamespace.get(Context.getExternalSource()));
+}
+
+/// \brief Retrieve the special "std" namespace, which may require us to
+/// implicitly define the namespace.
+NamespaceDecl *Sema::getOrCreateStdNamespace() {
+ if (!StdNamespace) {
+ // The "std" namespace has not yet been defined, so build one implicitly.
+ StdNamespace = NamespaceDecl::Create(Context,
+ Context.getTranslationUnitDecl(),
+ /*Inline=*/false,
+ SourceLocation(), SourceLocation(),
+ &PP.getIdentifierTable().get("std"),
+ /*PrevDecl=*/0);
+ getStdNamespace()->setImplicit(true);
+ }
+
+ return getStdNamespace();
+}
+
+bool Sema::isStdInitializerList(QualType Ty, QualType *Element) {
+ assert(getLangOpts().CPlusPlus &&
+ "Looking for std::initializer_list outside of C++.");
+
+ // We're looking for implicit instantiations of
+ // template <typename E> class std::initializer_list.
+
+ if (!StdNamespace) // If we haven't seen namespace std yet, this can't be it.
+ return false;
+
+ ClassTemplateDecl *Template = 0;
+ const TemplateArgument *Arguments = 0;
+
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+
+ ClassTemplateSpecializationDecl *Specialization =
+ dyn_cast<ClassTemplateSpecializationDecl>(RT->getDecl());
+ if (!Specialization)
+ return false;
+
+ Template = Specialization->getSpecializedTemplate();
+ Arguments = Specialization->getTemplateArgs().data();
+ } else if (const TemplateSpecializationType *TST =
+ Ty->getAs<TemplateSpecializationType>()) {
+ Template = dyn_cast_or_null<ClassTemplateDecl>(
+ TST->getTemplateName().getAsTemplateDecl());
+ Arguments = TST->getArgs();
+ }
+ if (!Template)
+ return false;
+
+ if (!StdInitializerList) {
+ // Haven't recognized std::initializer_list yet, maybe this is it.
+ CXXRecordDecl *TemplateClass = Template->getTemplatedDecl();
+ if (TemplateClass->getIdentifier() !=
+ &PP.getIdentifierTable().get("initializer_list") ||
+ !getStdNamespace()->InEnclosingNamespaceSetOf(
+ TemplateClass->getDeclContext()))
+ return false;
+ // This is a template called std::initializer_list, but is it the right
+ // template?
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ if (Params->getMinRequiredArguments() != 1)
+ return false;
+ if (!isa<TemplateTypeParmDecl>(Params->getParam(0)))
+ return false;
+
+ // It's the right template.
+ StdInitializerList = Template;
+ }
+
+ if (Template != StdInitializerList)
+ return false;
+
+ // This is an instance of std::initializer_list. Find the argument type.
+ if (Element)
+ *Element = Arguments[0].getAsType();
+ return true;
+}
+
+static ClassTemplateDecl *LookupStdInitializerList(Sema &S, SourceLocation Loc){
+ NamespaceDecl *Std = S.getStdNamespace();
+ if (!Std) {
+ S.Diag(Loc, diag::err_implied_std_initializer_list_not_found);
+ return 0;
+ }
+
+ LookupResult Result(S, &S.PP.getIdentifierTable().get("initializer_list"),
+ Loc, Sema::LookupOrdinaryName);
+ if (!S.LookupQualifiedName(Result, Std)) {
+ S.Diag(Loc, diag::err_implied_std_initializer_list_not_found);
+ return 0;
+ }
+ ClassTemplateDecl *Template = Result.getAsSingle<ClassTemplateDecl>();
+ if (!Template) {
+ Result.suppressDiagnostics();
+ // We found something weird. Complain about the first thing we found.
+ NamedDecl *Found = *Result.begin();
+ S.Diag(Found->getLocation(), diag::err_malformed_std_initializer_list);
+ return 0;
+ }
+
+ // We found some template called std::initializer_list. Now verify that it's
+ // correct.
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ if (Params->getMinRequiredArguments() != 1 ||
+ !isa<TemplateTypeParmDecl>(Params->getParam(0))) {
+ S.Diag(Template->getLocation(), diag::err_malformed_std_initializer_list);
+ return 0;
+ }
+
+ return Template;
+}
+
+QualType Sema::BuildStdInitializerList(QualType Element, SourceLocation Loc) {
+ if (!StdInitializerList) {
+ StdInitializerList = LookupStdInitializerList(*this, Loc);
+ if (!StdInitializerList)
+ return QualType();
+ }
+
+ TemplateArgumentListInfo Args(Loc, Loc);
+ Args.addArgument(TemplateArgumentLoc(TemplateArgument(Element),
+ Context.getTrivialTypeSourceInfo(Element,
+ Loc)));
+ return Context.getCanonicalType(
+ CheckTemplateIdType(TemplateName(StdInitializerList), Loc, Args));
+}
+
+bool Sema::isInitListConstructor(const CXXConstructorDecl* Ctor) {
+ // C++ [dcl.init.list]p2:
+ // A constructor is an initializer-list constructor if its first parameter
+ // is of type std::initializer_list<E> or reference to possibly cv-qualified
+ // std::initializer_list<E> for some type E, and either there are no other
+ // parameters or else all other parameters have default arguments.
+ if (Ctor->getNumParams() < 1 ||
+ (Ctor->getNumParams() > 1 && !Ctor->getParamDecl(1)->hasDefaultArg()))
+ return false;
+
+ QualType ArgType = Ctor->getParamDecl(0)->getType();
+ if (const ReferenceType *RT = ArgType->getAs<ReferenceType>())
+ ArgType = RT->getPointeeType().getUnqualifiedType();
+
+ return isStdInitializerList(ArgType, 0);
+}
+
+/// \brief Determine whether a using statement is in a context where it will be
+/// apply in all contexts.
+static bool IsUsingDirectiveInToplevelContext(DeclContext *CurContext) {
+ switch (CurContext->getDeclKind()) {
+ case Decl::TranslationUnit:
+ return true;
+ case Decl::LinkageSpec:
+ return IsUsingDirectiveInToplevelContext(CurContext->getParent());
+ default:
+ return false;
+ }
+}
+
+namespace {
+
+// Callback to only accept typo corrections that are namespaces.
+class NamespaceValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (NamedDecl *ND = candidate.getCorrectionDecl()) {
+ return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
+ }
+ return false;
+ }
+};
+
+}
+
+static bool TryNamespaceTypoCorrection(Sema &S, LookupResult &R, Scope *Sc,
+ CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident) {
+ NamespaceValidatorCCC Validator;
+ R.clear();
+ if (TypoCorrection Corrected = S.CorrectTypo(R.getLookupNameInfo(),
+ R.getLookupKind(), Sc, &SS,
+ Validator)) {
+ std::string CorrectedStr(Corrected.getAsString(S.getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(S.getLangOpts()));
+ if (DeclContext *DC = S.computeDeclContext(SS, false))
+ S.Diag(IdentLoc, diag::err_using_directive_member_suggest)
+ << Ident << DC << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
+ else
+ S.Diag(IdentLoc, diag::err_using_directive_suggest)
+ << Ident << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(IdentLoc, CorrectedStr);
+
+ S.Diag(Corrected.getCorrectionDecl()->getLocation(),
+ diag::note_namespace_defined_here) << CorrectedQuotedStr;
+
+ R.addDecl(Corrected.getCorrectionDecl());
+ return true;
+ }
+ return false;
+}
+
+Decl *Sema::ActOnUsingDirective(Scope *S,
+ SourceLocation UsingLoc,
+ SourceLocation NamespcLoc,
+ CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *NamespcName,
+ AttributeList *AttrList) {
+ assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
+ assert(NamespcName && "Invalid NamespcName.");
+ assert(IdentLoc.isValid() && "Invalid NamespceName location.");
+
+ // This can only happen along a recovery path.
+ while (S->getFlags() & Scope::TemplateParamScope)
+ S = S->getParent();
+ assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
+
+ UsingDirectiveDecl *UDir = 0;
+ NestedNameSpecifier *Qualifier = 0;
+ if (SS.isSet())
+ Qualifier = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+
+ // Lookup namespace name.
+ LookupResult R(*this, NamespcName, IdentLoc, LookupNamespaceName);
+ LookupParsedName(R, S, &SS);
+ if (R.isAmbiguous())
+ return 0;
+
+ if (R.empty()) {
+ R.clear();
+ // Allow "using namespace std;" or "using namespace ::std;" even if
+ // "std" hasn't been defined yet, for GCC compatibility.
+ if ((!Qualifier || Qualifier->getKind() == NestedNameSpecifier::Global) &&
+ NamespcName->isStr("std")) {
+ Diag(IdentLoc, diag::ext_using_undefined_std);
+ R.addDecl(getOrCreateStdNamespace());
+ R.resolveKind();
+ }
+ // Otherwise, attempt typo correction.
+ else TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, NamespcName);
+ }
+
+ if (!R.empty()) {
+ NamedDecl *Named = R.getFoundDecl();
+ assert((isa<NamespaceDecl>(Named) || isa<NamespaceAliasDecl>(Named))
+ && "expected namespace decl");
+ // C++ [namespace.udir]p1:
+ // A using-directive specifies that the names in the nominated
+ // namespace can be used in the scope in which the
+ // using-directive appears after the using-directive. During
+ // unqualified name lookup (3.4.1), the names appear as if they
+ // were declared in the nearest enclosing namespace which
+ // contains both the using-directive and the nominated
+ // namespace. [Note: in this context, "contains" means "contains
+ // directly or indirectly". ]
+
+ // Find enclosing context containing both using-directive and
+ // nominated namespace.
+ NamespaceDecl *NS = getNamespaceDecl(Named);
+ DeclContext *CommonAncestor = cast<DeclContext>(NS);
+ while (CommonAncestor && !CommonAncestor->Encloses(CurContext))
+ CommonAncestor = CommonAncestor->getParent();
+
+ UDir = UsingDirectiveDecl::Create(Context, CurContext, UsingLoc, NamespcLoc,
+ SS.getWithLocInContext(Context),
+ IdentLoc, Named, CommonAncestor);
+
+ if (IsUsingDirectiveInToplevelContext(CurContext) &&
+ !SourceMgr.isFromMainFile(SourceMgr.getExpansionLoc(IdentLoc))) {
+ Diag(IdentLoc, diag::warn_using_directive_in_header);
+ }
+
+ PushUsingDirective(S, UDir);
+ } else {
+ Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
+ }
+
+ // FIXME: We ignore attributes for now.
+ return UDir;
+}
+
+void Sema::PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir) {
+ // If the scope has an associated entity and the using directive is at
+ // namespace or translation unit scope, add the UsingDirectiveDecl into
+ // its lookup structure so qualified name lookup can find it.
+ DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity());
+ if (Ctx && !Ctx->isFunctionOrMethod())
+ Ctx->addDecl(UDir);
+ else
+ // Otherwise, it is at block sope. The using-directives will affect lookup
+ // only to the end of the scope.
+ S->PushUsingDirective(UDir);
+}
+
+
+Decl *Sema::ActOnUsingDeclaration(Scope *S,
+ AccessSpecifier AS,
+ bool HasUsingKeyword,
+ SourceLocation UsingLoc,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ AttributeList *AttrList,
+ bool IsTypeName,
+ SourceLocation TypenameLoc) {
+ assert(S->getFlags() & Scope::DeclScope && "Invalid Scope.");
+
+ switch (Name.getKind()) {
+ case UnqualifiedId::IK_ImplicitSelfParam:
+ case UnqualifiedId::IK_Identifier:
+ case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedId::IK_ConversionFunctionId:
+ break;
+
+ case UnqualifiedId::IK_ConstructorName:
+ case UnqualifiedId::IK_ConstructorTemplateId:
+ // C++0x inherited constructors.
+ Diag(Name.getLocStart(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_using_decl_constructor :
+ diag::err_using_decl_constructor)
+ << SS.getRange();
+
+ if (getLangOpts().CPlusPlus0x) break;
+
+ return 0;
+
+ case UnqualifiedId::IK_DestructorName:
+ Diag(Name.getLocStart(), diag::err_using_decl_destructor)
+ << SS.getRange();
+ return 0;
+
+ case UnqualifiedId::IK_TemplateId:
+ Diag(Name.getLocStart(), diag::err_using_decl_template_id)
+ << SourceRange(Name.TemplateId->LAngleLoc, Name.TemplateId->RAngleLoc);
+ return 0;
+ }
+
+ DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
+ DeclarationName TargetName = TargetNameInfo.getName();
+ if (!TargetName)
+ return 0;
+
+ // Warn about using declarations.
+ // TODO: store that the declaration was written without 'using' and
+ // talk about access decls instead of using decls in the
+ // diagnostics.
+ if (!HasUsingKeyword) {
+ UsingLoc = Name.getLocStart();
+
+ Diag(UsingLoc, diag::warn_access_decl_deprecated)
+ << FixItHint::CreateInsertion(SS.getRange().getBegin(), "using ");
+ }
+
+ if (DiagnoseUnexpandedParameterPack(SS, UPPC_UsingDeclaration) ||
+ DiagnoseUnexpandedParameterPack(TargetNameInfo, UPPC_UsingDeclaration))
+ return 0;
+
+ NamedDecl *UD = BuildUsingDeclaration(S, AS, UsingLoc, SS,
+ TargetNameInfo, AttrList,
+ /* IsInstantiation */ false,
+ IsTypeName, TypenameLoc);
+ if (UD)
+ PushOnScopeChains(UD, S, /*AddToContext*/ false);
+
+ return UD;
+}
+
+/// \brief Determine whether a using declaration considers the given
+/// declarations as "equivalent", e.g., if they are redeclarations of
+/// the same entity or are both typedefs of the same type.
+static bool
+IsEquivalentForUsingDecl(ASTContext &Context, NamedDecl *D1, NamedDecl *D2,
+ bool &SuppressRedeclaration) {
+ if (D1->getCanonicalDecl() == D2->getCanonicalDecl()) {
+ SuppressRedeclaration = false;
+ return true;
+ }
+
+ if (TypedefNameDecl *TD1 = dyn_cast<TypedefNameDecl>(D1))
+ if (TypedefNameDecl *TD2 = dyn_cast<TypedefNameDecl>(D2)) {
+ SuppressRedeclaration = true;
+ return Context.hasSameType(TD1->getUnderlyingType(),
+ TD2->getUnderlyingType());
+ }
+
+ return false;
+}
+
+
+/// Determines whether to create a using shadow decl for a particular
+/// decl, given the set of decls existing prior to this using lookup.
+bool Sema::CheckUsingShadowDecl(UsingDecl *Using, NamedDecl *Orig,
+ const LookupResult &Previous) {
+ // Diagnose finding a decl which is not from a base class of the
+ // current class. We do this now because there are cases where this
+ // function will silently decide not to build a shadow decl, which
+ // will pre-empt further diagnostics.
+ //
+ // We don't need to do this in C++0x because we do the check once on
+ // the qualifier.
+ //
+ // FIXME: diagnose the following if we care enough:
+ // struct A { int foo; };
+ // struct B : A { using A::foo; };
+ // template <class T> struct C : A {};
+ // template <class T> struct D : C<T> { using B::foo; } // <---
+ // This is invalid (during instantiation) in C++03 because B::foo
+ // resolves to the using decl in B, which is not a base class of D<T>.
+ // We can't diagnose it immediately because C<T> is an unknown
+ // specialization. The UsingShadowDecl in D<T> then points directly
+ // to A::foo, which will look well-formed when we instantiate.
+ // The right solution is to not collapse the shadow-decl chain.
+ if (!getLangOpts().CPlusPlus0x && CurContext->isRecord()) {
+ DeclContext *OrigDC = Orig->getDeclContext();
+
+ // Handle enums and anonymous structs.
+ if (isa<EnumDecl>(OrigDC)) OrigDC = OrigDC->getParent();
+ CXXRecordDecl *OrigRec = cast<CXXRecordDecl>(OrigDC);
+ while (OrigRec->isAnonymousStructOrUnion())
+ OrigRec = cast<CXXRecordDecl>(OrigRec->getDeclContext());
+
+ if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom(OrigRec)) {
+ if (OrigDC == CurContext) {
+ Diag(Using->getLocation(),
+ diag::err_using_decl_nested_name_specifier_is_current_class)
+ << Using->getQualifierLoc().getSourceRange();
+ Diag(Orig->getLocation(), diag::note_using_decl_target);
+ return true;
+ }
+
+ Diag(Using->getQualifierLoc().getBeginLoc(),
+ diag::err_using_decl_nested_name_specifier_is_not_base_class)
+ << Using->getQualifier()
+ << cast<CXXRecordDecl>(CurContext)
+ << Using->getQualifierLoc().getSourceRange();
+ Diag(Orig->getLocation(), diag::note_using_decl_target);
+ return true;
+ }
+ }
+
+ if (Previous.empty()) return false;
+
+ NamedDecl *Target = Orig;
+ if (isa<UsingShadowDecl>(Target))
+ Target = cast<UsingShadowDecl>(Target)->getTargetDecl();
+
+ // If the target happens to be one of the previous declarations, we
+ // don't have a conflict.
+ //
+ // FIXME: but we might be increasing its access, in which case we
+ // should redeclare it.
+ NamedDecl *NonTag = 0, *Tag = 0;
+ for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
+ I != E; ++I) {
+ NamedDecl *D = (*I)->getUnderlyingDecl();
+ bool Result;
+ if (IsEquivalentForUsingDecl(Context, D, Target, Result))
+ return Result;
+
+ (isa<TagDecl>(D) ? Tag : NonTag) = D;
+ }
+
+ if (Target->isFunctionOrFunctionTemplate()) {
+ FunctionDecl *FD;
+ if (isa<FunctionTemplateDecl>(Target))
+ FD = cast<FunctionTemplateDecl>(Target)->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(Target);
+
+ NamedDecl *OldDecl = 0;
+ switch (CheckOverload(0, FD, Previous, OldDecl, /*IsForUsingDecl*/ true)) {
+ case Ovl_Overload:
+ return false;
+
+ case Ovl_NonFunction:
+ Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ break;
+
+ // We found a decl with the exact signature.
+ case Ovl_Match:
+ // If we're in a record, we want to hide the target, so we
+ // return true (without a diagnostic) to tell the caller not to
+ // build a shadow decl.
+ if (CurContext->isRecord())
+ return true;
+
+ // If we're not in a record, this is an error.
+ Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ break;
+ }
+
+ Diag(Target->getLocation(), diag::note_using_decl_target);
+ Diag(OldDecl->getLocation(), diag::note_using_decl_conflict);
+ return true;
+ }
+
+ // Target is not a function.
+
+ if (isa<TagDecl>(Target)) {
+ // No conflict between a tag and a non-tag.
+ if (!Tag) return false;
+
+ Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ Diag(Target->getLocation(), diag::note_using_decl_target);
+ Diag(Tag->getLocation(), diag::note_using_decl_conflict);
+ return true;
+ }
+
+ // No conflict between a tag and a non-tag.
+ if (!NonTag) return false;
+
+ Diag(Using->getLocation(), diag::err_using_decl_conflict);
+ Diag(Target->getLocation(), diag::note_using_decl_target);
+ Diag(NonTag->getLocation(), diag::note_using_decl_conflict);
+ return true;
+}
+
+/// Builds a shadow declaration corresponding to a 'using' declaration.
+UsingShadowDecl *Sema::BuildUsingShadowDecl(Scope *S,
+ UsingDecl *UD,
+ NamedDecl *Orig) {
+
+ // If we resolved to another shadow declaration, just coalesce them.
+ NamedDecl *Target = Orig;
+ if (isa<UsingShadowDecl>(Target)) {
+ Target = cast<UsingShadowDecl>(Target)->getTargetDecl();
+ assert(!isa<UsingShadowDecl>(Target) && "nested shadow declaration");
+ }
+
+ UsingShadowDecl *Shadow
+ = UsingShadowDecl::Create(Context, CurContext,
+ UD->getLocation(), UD, Target);
+ UD->addShadowDecl(Shadow);
+
+ Shadow->setAccess(UD->getAccess());
+ if (Orig->isInvalidDecl() || UD->isInvalidDecl())
+ Shadow->setInvalidDecl();
+
+ if (S)
+ PushOnScopeChains(Shadow, S);
+ else
+ CurContext->addDecl(Shadow);
+
+
+ return Shadow;
+}
+
+/// Hides a using shadow declaration. This is required by the current
+/// using-decl implementation when a resolvable using declaration in a
+/// class is followed by a declaration which would hide or override
+/// one or more of the using decl's targets; for example:
+///
+/// struct Base { void foo(int); };
+/// struct Derived : Base {
+/// using Base::foo;
+/// void foo(int);
+/// };
+///
+/// The governing language is C++03 [namespace.udecl]p12:
+///
+/// When a using-declaration brings names from a base class into a
+/// derived class scope, member functions in the derived class
+/// override and/or hide member functions with the same name and
+/// parameter types in a base class (rather than conflicting).
+///
+/// There are two ways to implement this:
+/// (1) optimistically create shadow decls when they're not hidden
+/// by existing declarations, or
+/// (2) don't create any shadow decls (or at least don't make them
+/// visible) until we've fully parsed/instantiated the class.
+/// The problem with (1) is that we might have to retroactively remove
+/// a shadow decl, which requires several O(n) operations because the
+/// decl structures are (very reasonably) not designed for removal.
+/// (2) avoids this but is very fiddly and phase-dependent.
+void Sema::HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow) {
+ if (Shadow->getDeclName().getNameKind() ==
+ DeclarationName::CXXConversionFunctionName)
+ cast<CXXRecordDecl>(Shadow->getDeclContext())->removeConversion(Shadow);
+
+ // Remove it from the DeclContext...
+ Shadow->getDeclContext()->removeDecl(Shadow);
+
+ // ...and the scope, if applicable...
+ if (S) {
+ S->RemoveDecl(Shadow);
+ IdResolver.RemoveDecl(Shadow);
+ }
+
+ // ...and the using decl.
+ Shadow->getUsingDecl()->removeShadowDecl(Shadow);
+
+ // TODO: complain somehow if Shadow was used. It shouldn't
+ // be possible for this to happen, because...?
+}
+
+/// Builds a using declaration.
+///
+/// \param IsInstantiation - Whether this call arises from an
+/// instantiation of an unresolved using declaration. We treat
+/// the lookup differently for these declarations.
+NamedDecl *Sema::BuildUsingDeclaration(Scope *S, AccessSpecifier AS,
+ SourceLocation UsingLoc,
+ CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo,
+ AttributeList *AttrList,
+ bool IsInstantiation,
+ bool IsTypeName,
+ SourceLocation TypenameLoc) {
+ assert(!SS.isInvalid() && "Invalid CXXScopeSpec.");
+ SourceLocation IdentLoc = NameInfo.getLoc();
+ assert(IdentLoc.isValid() && "Invalid TargetName location.");
+
+ // FIXME: We ignore attributes for now.
+
+ if (SS.isEmpty()) {
+ Diag(IdentLoc, diag::err_using_requires_qualname);
+ return 0;
+ }
+
+ // Do the redeclaration lookup in the current scope.
+ LookupResult Previous(*this, NameInfo, LookupUsingDeclName,
+ ForRedeclaration);
+ Previous.setHideTags(false);
+ if (S) {
+ LookupName(Previous, S);
+
+ // It is really dumb that we have to do this.
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (!isDeclInScope(D, CurContext, S))
+ F.erase();
+ }
+ F.done();
+ } else {
+ assert(IsInstantiation && "no scope in non-instantiation");
+ assert(CurContext->isRecord() && "scope not record in instantiation");
+ LookupQualifiedName(Previous, CurContext);
+ }
+
+ // Check for invalid redeclarations.
+ if (CheckUsingDeclRedeclaration(UsingLoc, IsTypeName, SS, IdentLoc, Previous))
+ return 0;
+
+ // Check for bad qualifiers.
+ if (CheckUsingDeclQualifier(UsingLoc, SS, IdentLoc))
+ return 0;
+
+ DeclContext *LookupContext = computeDeclContext(SS);
+ NamedDecl *D;
+ NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ if (!LookupContext) {
+ if (IsTypeName) {
+ // FIXME: not all declaration name kinds are legal here
+ D = UnresolvedUsingTypenameDecl::Create(Context, CurContext,
+ UsingLoc, TypenameLoc,
+ QualifierLoc,
+ IdentLoc, NameInfo.getName());
+ } else {
+ D = UnresolvedUsingValueDecl::Create(Context, CurContext, UsingLoc,
+ QualifierLoc, NameInfo);
+ }
+ } else {
+ D = UsingDecl::Create(Context, CurContext, UsingLoc, QualifierLoc,
+ NameInfo, IsTypeName);
+ }
+ D->setAccess(AS);
+ CurContext->addDecl(D);
+
+ if (!LookupContext) return D;
+ UsingDecl *UD = cast<UsingDecl>(D);
+
+ if (RequireCompleteDeclContext(SS, LookupContext)) {
+ UD->setInvalidDecl();
+ return UD;
+ }
+
+ // The normal rules do not apply to inheriting constructor declarations.
+ if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) {
+ if (CheckInheritingConstructorUsingDecl(UD))
+ UD->setInvalidDecl();
+ return UD;
+ }
+
+ // Otherwise, look up the target name.
+
+ LookupResult R(*this, NameInfo, LookupOrdinaryName);
+
+ // Unlike most lookups, we don't always want to hide tag
+ // declarations: tag names are visible through the using declaration
+ // even if hidden by ordinary names, *except* in a dependent context
+ // where it's important for the sanity of two-phase lookup.
+ if (!IsInstantiation)
+ R.setHideTags(false);
+
+ // For the purposes of this lookup, we have a base object type
+ // equal to that of the current context.
+ if (CurContext->isRecord()) {
+ R.setBaseObjectType(
+ Context.getTypeDeclType(cast<CXXRecordDecl>(CurContext)));
+ }
+
+ LookupQualifiedName(R, LookupContext);
+
+ if (R.empty()) {
+ Diag(IdentLoc, diag::err_no_member)
+ << NameInfo.getName() << LookupContext << SS.getRange();
+ UD->setInvalidDecl();
+ return UD;
+ }
+
+ if (R.isAmbiguous()) {
+ UD->setInvalidDecl();
+ return UD;
+ }
+
+ if (IsTypeName) {
+ // If we asked for a typename and got a non-type decl, error out.
+ if (!R.getAsSingle<TypeDecl>()) {
+ Diag(IdentLoc, diag::err_using_typename_non_type);
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
+ Diag((*I)->getUnderlyingDecl()->getLocation(),
+ diag::note_using_decl_target);
+ UD->setInvalidDecl();
+ return UD;
+ }
+ } else {
+ // If we asked for a non-typename and we got a type, error out,
+ // but only if this is an instantiation of an unresolved using
+ // decl. Otherwise just silently find the type name.
+ if (IsInstantiation && R.getAsSingle<TypeDecl>()) {
+ Diag(IdentLoc, diag::err_using_dependent_value_is_type);
+ Diag(R.getFoundDecl()->getLocation(), diag::note_using_decl_target);
+ UD->setInvalidDecl();
+ return UD;
+ }
+ }
+
+ // C++0x N2914 [namespace.udecl]p6:
+ // A using-declaration shall not name a namespace.
+ if (R.getAsSingle<NamespaceDecl>()) {
+ Diag(IdentLoc, diag::err_using_decl_can_not_refer_to_namespace)
+ << SS.getRange();
+ UD->setInvalidDecl();
+ return UD;
+ }
+
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ if (!CheckUsingShadowDecl(UD, *I, Previous))
+ BuildUsingShadowDecl(S, UD, *I);
+ }
+
+ return UD;
+}
+
+/// Additional checks for a using declaration referring to a constructor name.
+bool Sema::CheckInheritingConstructorUsingDecl(UsingDecl *UD) {
+ assert(!UD->isTypeName() && "expecting a constructor name");
+
+ const Type *SourceType = UD->getQualifier()->getAsType();
+ assert(SourceType &&
+ "Using decl naming constructor doesn't have type in scope spec.");
+ CXXRecordDecl *TargetClass = cast<CXXRecordDecl>(CurContext);
+
+ // Check whether the named type is a direct base class.
+ CanQualType CanonicalSourceType = SourceType->getCanonicalTypeUnqualified();
+ CXXRecordDecl::base_class_iterator BaseIt, BaseE;
+ for (BaseIt = TargetClass->bases_begin(), BaseE = TargetClass->bases_end();
+ BaseIt != BaseE; ++BaseIt) {
+ CanQualType BaseType = BaseIt->getType()->getCanonicalTypeUnqualified();
+ if (CanonicalSourceType == BaseType)
+ break;
+ if (BaseIt->getType()->isDependentType())
+ break;
+ }
+
+ if (BaseIt == BaseE) {
+ // Did not find SourceType in the bases.
+ Diag(UD->getUsingLocation(),
+ diag::err_using_decl_constructor_not_in_direct_base)
+ << UD->getNameInfo().getSourceRange()
+ << QualType(SourceType, 0) << TargetClass;
+ return true;
+ }
+
+ if (!CurContext->isDependentContext())
+ BaseIt->setInheritConstructors();
+
+ return false;
+}
+
+/// Checks that the given using declaration is not an invalid
+/// redeclaration. Note that this is checking only for the using decl
+/// itself, not for any ill-formedness among the UsingShadowDecls.
+bool Sema::CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
+ bool isTypeName,
+ const CXXScopeSpec &SS,
+ SourceLocation NameLoc,
+ const LookupResult &Prev) {
+ // C++03 [namespace.udecl]p8:
+ // C++0x [namespace.udecl]p10:
+ // A using-declaration is a declaration and can therefore be used
+ // repeatedly where (and only where) multiple declarations are
+ // allowed.
+ //
+ // That's in non-member contexts.
+ if (!CurContext->getRedeclContext()->isRecord())
+ return false;
+
+ NestedNameSpecifier *Qual
+ = static_cast<NestedNameSpecifier*>(SS.getScopeRep());
+
+ for (LookupResult::iterator I = Prev.begin(), E = Prev.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+
+ bool DTypename;
+ NestedNameSpecifier *DQual;
+ if (UsingDecl *UD = dyn_cast<UsingDecl>(D)) {
+ DTypename = UD->isTypeName();
+ DQual = UD->getQualifier();
+ } else if (UnresolvedUsingValueDecl *UD
+ = dyn_cast<UnresolvedUsingValueDecl>(D)) {
+ DTypename = false;
+ DQual = UD->getQualifier();
+ } else if (UnresolvedUsingTypenameDecl *UD
+ = dyn_cast<UnresolvedUsingTypenameDecl>(D)) {
+ DTypename = true;
+ DQual = UD->getQualifier();
+ } else continue;
+
+ // using decls differ if one says 'typename' and the other doesn't.
+ // FIXME: non-dependent using decls?
+ if (isTypeName != DTypename) continue;
+
+ // using decls differ if they name different scopes (but note that
+ // template instantiation can cause this check to trigger when it
+ // didn't before instantiation).
+ if (Context.getCanonicalNestedNameSpecifier(Qual) !=
+ Context.getCanonicalNestedNameSpecifier(DQual))
+ continue;
+
+ Diag(NameLoc, diag::err_using_decl_redeclaration) << SS.getRange();
+ Diag(D->getLocation(), diag::note_using_decl) << 1;
+ return true;
+ }
+
+ return false;
+}
+
+
+/// Checks that the given nested-name qualifier used in a using decl
+/// in the current context is appropriately related to the current
+/// scope. If an error is found, diagnoses it and returns true.
+bool Sema::CheckUsingDeclQualifier(SourceLocation UsingLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation NameLoc) {
+ DeclContext *NamedContext = computeDeclContext(SS);
+
+ if (!CurContext->isRecord()) {
+ // C++03 [namespace.udecl]p3:
+ // C++0x [namespace.udecl]p8:
+ // A using-declaration for a class member shall be a member-declaration.
+
+ // If we weren't able to compute a valid scope, it must be a
+ // dependent class scope.
+ if (!NamedContext || NamedContext->isRecord()) {
+ Diag(NameLoc, diag::err_using_decl_can_not_refer_to_class_member)
+ << SS.getRange();
+ return true;
+ }
+
+ // Otherwise, everything is known to be fine.
+ return false;
+ }
+
+ // The current scope is a record.
+
+ // If the named context is dependent, we can't decide much.
+ if (!NamedContext) {
+ // FIXME: in C++0x, we can diagnose if we can prove that the
+ // nested-name-specifier does not refer to a base class, which is
+ // still possible in some cases.
+
+ // Otherwise we have to conservatively report that things might be
+ // okay.
+ return false;
+ }
+
+ if (!NamedContext->isRecord()) {
+ // Ideally this would point at the last name in the specifier,
+ // but we don't have that level of source info.
+ Diag(SS.getRange().getBegin(),
+ diag::err_using_decl_nested_name_specifier_is_not_class)
+ << (NestedNameSpecifier*) SS.getScopeRep() << SS.getRange();
+ return true;
+ }
+
+ if (!NamedContext->isDependentContext() &&
+ RequireCompleteDeclContext(const_cast<CXXScopeSpec&>(SS), NamedContext))
+ return true;
+
+ if (getLangOpts().CPlusPlus0x) {
+ // C++0x [namespace.udecl]p3:
+ // In a using-declaration used as a member-declaration, the
+ // nested-name-specifier shall name a base class of the class
+ // being defined.
+
+ if (cast<CXXRecordDecl>(CurContext)->isProvablyNotDerivedFrom(
+ cast<CXXRecordDecl>(NamedContext))) {
+ if (CurContext == NamedContext) {
+ Diag(NameLoc,
+ diag::err_using_decl_nested_name_specifier_is_current_class)
+ << SS.getRange();
+ return true;
+ }
+
+ Diag(SS.getRange().getBegin(),
+ diag::err_using_decl_nested_name_specifier_is_not_base_class)
+ << (NestedNameSpecifier*) SS.getScopeRep()
+ << cast<CXXRecordDecl>(CurContext)
+ << SS.getRange();
+ return true;
+ }
+
+ return false;
+ }
+
+ // C++03 [namespace.udecl]p4:
+ // A using-declaration used as a member-declaration shall refer
+ // to a member of a base class of the class being defined [etc.].
+
+ // Salient point: SS doesn't have to name a base class as long as
+ // lookup only finds members from base classes. Therefore we can
+ // diagnose here only if we can prove that that can't happen,
+ // i.e. if the class hierarchies provably don't intersect.
+
+ // TODO: it would be nice if "definitely valid" results were cached
+ // in the UsingDecl and UsingShadowDecl so that these checks didn't
+ // need to be repeated.
+
+ struct UserData {
+ llvm::SmallPtrSet<const CXXRecordDecl*, 4> Bases;
+
+ static bool collect(const CXXRecordDecl *Base, void *OpaqueData) {
+ UserData *Data = reinterpret_cast<UserData*>(OpaqueData);
+ Data->Bases.insert(Base);
+ return true;
+ }
+
+ bool hasDependentBases(const CXXRecordDecl *Class) {
+ return !Class->forallBases(collect, this);
+ }
+
+ /// Returns true if the base is dependent or is one of the
+ /// accumulated base classes.
+ static bool doesNotContain(const CXXRecordDecl *Base, void *OpaqueData) {
+ UserData *Data = reinterpret_cast<UserData*>(OpaqueData);
+ return !Data->Bases.count(Base);
+ }
+
+ bool mightShareBases(const CXXRecordDecl *Class) {
+ return Bases.count(Class) || !Class->forallBases(doesNotContain, this);
+ }
+ };
+
+ UserData Data;
+
+ // Returns false if we find a dependent base.
+ if (Data.hasDependentBases(cast<CXXRecordDecl>(CurContext)))
+ return false;
+
+ // Returns false if the class has a dependent base or if it or one
+ // of its bases is present in the base set of the current context.
+ if (Data.mightShareBases(cast<CXXRecordDecl>(NamedContext)))
+ return false;
+
+ Diag(SS.getRange().getBegin(),
+ diag::err_using_decl_nested_name_specifier_is_not_base_class)
+ << (NestedNameSpecifier*) SS.getScopeRep()
+ << cast<CXXRecordDecl>(CurContext)
+ << SS.getRange();
+
+ return true;
+}
+
+Decl *Sema::ActOnAliasDeclaration(Scope *S,
+ AccessSpecifier AS,
+ MultiTemplateParamsArg TemplateParamLists,
+ SourceLocation UsingLoc,
+ UnqualifiedId &Name,
+ TypeResult Type) {
+ // Skip up to the relevant declaration scope.
+ while (S->getFlags() & Scope::TemplateParamScope)
+ S = S->getParent();
+ assert((S->getFlags() & Scope::DeclScope) &&
+ "got alias-declaration outside of declaration scope");
+
+ if (Type.isInvalid())
+ return 0;
+
+ bool Invalid = false;
+ DeclarationNameInfo NameInfo = GetNameFromUnqualifiedId(Name);
+ TypeSourceInfo *TInfo = 0;
+ GetTypeFromParser(Type.get(), &TInfo);
+
+ if (DiagnoseClassNameShadow(CurContext, NameInfo))
+ return 0;
+
+ if (DiagnoseUnexpandedParameterPack(Name.StartLocation, TInfo,
+ UPPC_DeclarationType)) {
+ Invalid = true;
+ TInfo = Context.getTrivialTypeSourceInfo(Context.IntTy,
+ TInfo->getTypeLoc().getBeginLoc());
+ }
+
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName, ForRedeclaration);
+ LookupName(Previous, S);
+
+ // Warn about shadowing the name of a template parameter.
+ if (Previous.isSingleResult() &&
+ Previous.getFoundDecl()->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(Name.StartLocation,Previous.getFoundDecl());
+ Previous.clear();
+ }
+
+ assert(Name.Kind == UnqualifiedId::IK_Identifier &&
+ "name in alias declaration must be an identifier");
+ TypeAliasDecl *NewTD = TypeAliasDecl::Create(Context, CurContext, UsingLoc,
+ Name.StartLocation,
+ Name.Identifier, TInfo);
+
+ NewTD->setAccess(AS);
+
+ if (Invalid)
+ NewTD->setInvalidDecl();
+
+ CheckTypedefForVariablyModifiedType(S, NewTD);
+ Invalid |= NewTD->isInvalidDecl();
+
+ bool Redeclaration = false;
+
+ NamedDecl *NewND;
+ if (TemplateParamLists.size()) {
+ TypeAliasTemplateDecl *OldDecl = 0;
+ TemplateParameterList *OldTemplateParams = 0;
+
+ if (TemplateParamLists.size() != 1) {
+ Diag(UsingLoc, diag::err_alias_template_extra_headers)
+ << SourceRange(TemplateParamLists.get()[1]->getTemplateLoc(),
+ TemplateParamLists.get()[TemplateParamLists.size()-1]->getRAngleLoc());
+ }
+ TemplateParameterList *TemplateParams = TemplateParamLists.get()[0];
+
+ // Only consider previous declarations in the same scope.
+ FilterLookupForScope(Previous, CurContext, S, /*ConsiderLinkage*/false,
+ /*ExplicitInstantiationOrSpecialization*/false);
+ if (!Previous.empty()) {
+ Redeclaration = true;
+
+ OldDecl = Previous.getAsSingle<TypeAliasTemplateDecl>();
+ if (!OldDecl && !Invalid) {
+ Diag(UsingLoc, diag::err_redefinition_different_kind)
+ << Name.Identifier;
+
+ NamedDecl *OldD = Previous.getRepresentativeDecl();
+ if (OldD->getLocation().isValid())
+ Diag(OldD->getLocation(), diag::note_previous_definition);
+
+ Invalid = true;
+ }
+
+ if (!Invalid && OldDecl && !OldDecl->isInvalidDecl()) {
+ if (TemplateParameterListsAreEqual(TemplateParams,
+ OldDecl->getTemplateParameters(),
+ /*Complain=*/true,
+ TPL_TemplateMatch))
+ OldTemplateParams = OldDecl->getTemplateParameters();
+ else
+ Invalid = true;
+
+ TypeAliasDecl *OldTD = OldDecl->getTemplatedDecl();
+ if (!Invalid &&
+ !Context.hasSameType(OldTD->getUnderlyingType(),
+ NewTD->getUnderlyingType())) {
+ // FIXME: The C++0x standard does not clearly say this is ill-formed,
+ // but we can't reasonably accept it.
+ Diag(NewTD->getLocation(), diag::err_redefinition_different_typedef)
+ << 2 << NewTD->getUnderlyingType() << OldTD->getUnderlyingType();
+ if (OldTD->getLocation().isValid())
+ Diag(OldTD->getLocation(), diag::note_previous_definition);
+ Invalid = true;
+ }
+ }
+ }
+
+ // Merge any previous default template arguments into our parameters,
+ // and check the parameter list.
+ if (CheckTemplateParameterList(TemplateParams, OldTemplateParams,
+ TPC_TypeAliasTemplate))
+ return 0;
+
+ TypeAliasTemplateDecl *NewDecl =
+ TypeAliasTemplateDecl::Create(Context, CurContext, UsingLoc,
+ Name.Identifier, TemplateParams,
+ NewTD);
+
+ NewDecl->setAccess(AS);
+
+ if (Invalid)
+ NewDecl->setInvalidDecl();
+ else if (OldDecl)
+ NewDecl->setPreviousDeclaration(OldDecl);
+
+ NewND = NewDecl;
+ } else {
+ ActOnTypedefNameDecl(S, CurContext, NewTD, Previous, Redeclaration);
+ NewND = NewTD;
+ }
+
+ if (!Redeclaration)
+ PushOnScopeChains(NewND, S);
+
+ return NewND;
+}
+
+Decl *Sema::ActOnNamespaceAliasDef(Scope *S,
+ SourceLocation NamespaceLoc,
+ SourceLocation AliasLoc,
+ IdentifierInfo *Alias,
+ CXXScopeSpec &SS,
+ SourceLocation IdentLoc,
+ IdentifierInfo *Ident) {
+
+ // Lookup the namespace name.
+ LookupResult R(*this, Ident, IdentLoc, LookupNamespaceName);
+ LookupParsedName(R, S, &SS);
+
+ // Check if we have a previous declaration with the same name.
+ NamedDecl *PrevDecl
+ = LookupSingleName(S, Alias, AliasLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ if (PrevDecl && !isDeclInScope(PrevDecl, CurContext, S))
+ PrevDecl = 0;
+
+ if (PrevDecl) {
+ if (NamespaceAliasDecl *AD = dyn_cast<NamespaceAliasDecl>(PrevDecl)) {
+ // We already have an alias with the same name that points to the same
+ // namespace, so don't create a new one.
+ // FIXME: At some point, we'll want to create the (redundant)
+ // declaration to maintain better source information.
+ if (!R.isAmbiguous() && !R.empty() &&
+ AD->getNamespace()->Equals(getNamespaceDecl(R.getFoundDecl())))
+ return 0;
+ }
+
+ unsigned DiagID = isa<NamespaceDecl>(PrevDecl) ? diag::err_redefinition :
+ diag::err_redefinition_different_kind;
+ Diag(AliasLoc, DiagID) << Alias;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ return 0;
+ }
+
+ if (R.isAmbiguous())
+ return 0;
+
+ if (R.empty()) {
+ if (!TryNamespaceTypoCorrection(*this, R, S, SS, IdentLoc, Ident)) {
+ Diag(IdentLoc, diag::err_expected_namespace_name) << SS.getRange();
+ return 0;
+ }
+ }
+
+ NamespaceAliasDecl *AliasDecl =
+ NamespaceAliasDecl::Create(Context, CurContext, NamespaceLoc, AliasLoc,
+ Alias, SS.getWithLocInContext(Context),
+ IdentLoc, R.getFoundDecl());
+
+ PushOnScopeChains(AliasDecl, S);
+ return AliasDecl;
+}
+
+namespace {
+ /// \brief Scoped object used to handle the state changes required in Sema
+ /// to implicitly define the body of a C++ member function;
+ class ImplicitlyDefinedFunctionScope {
+ Sema &S;
+ Sema::ContextRAII SavedContext;
+
+ public:
+ ImplicitlyDefinedFunctionScope(Sema &S, CXXMethodDecl *Method)
+ : S(S), SavedContext(S, Method)
+ {
+ S.PushFunctionScope();
+ S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+ }
+
+ ~ImplicitlyDefinedFunctionScope() {
+ S.PopExpressionEvaluationContext();
+ S.PopFunctionScopeInfo();
+ }
+ };
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedDefaultCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(Context);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // Direct base-class constructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
+ BEnd = ClassDecl->bases_end();
+ B != BEnd; ++B) {
+ if (B->isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ if (Constructor)
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+ // Virtual base-class constructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
+ BEnd = ClassDecl->vbases_end();
+ B != BEnd; ++B) {
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(BaseClassDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ if (Constructor)
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+ // Field constructors.
+ for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
+ FEnd = ClassDecl->field_end();
+ F != FEnd; ++F) {
+ if (F->hasInClassInitializer()) {
+ if (Expr *E = F->getInClassInitializer())
+ ExceptSpec.CalledExpr(E);
+ else if (!F->isInvalidDecl())
+ ExceptSpec.SetDelayed();
+ } else if (const RecordType *RecordTy
+ = Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXConstructorDecl *Constructor = LookupDefaultConstructor(FieldRecDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ // In particular, the problem is that this function never gets called. It
+ // might just be ill-formed because this function attempts to refer to
+ // a deleted function here.
+ if (Constructor)
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
+ CXXRecordDecl *ClassDecl) {
+ // C++ [class.ctor]p5:
+ // A default constructor for a class X is a constructor of class X
+ // that can be called without an argument. If there is no
+ // user-declared constructor for class X, a default constructor is
+ // implicitly declared. An implicitly-declared default constructor
+ // is an inline public member of its class.
+ assert(!ClassDecl->hasUserDeclaredConstructor() &&
+ "Should not build implicit default constructor!");
+
+ ImplicitExceptionSpecification Spec =
+ ComputeDefaultedDefaultCtorExceptionSpec(ClassDecl);
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+
+ // Create the actual constructor declaration.
+ CanQualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(ClassType);
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+ CXXConstructorDecl *DefaultCon = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo,
+ Context.getFunctionType(Context.VoidTy, 0, 0, EPI), /*TInfo=*/0,
+ /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ /*isConstexpr=*/ClassDecl->defaultedDefaultConstructorIsConstexpr() &&
+ getLangOpts().CPlusPlus0x);
+ DefaultCon->setAccess(AS_public);
+ DefaultCon->setDefaulted();
+ DefaultCon->setImplicit();
+ DefaultCon->setTrivial(ClassDecl->hasTrivialDefaultConstructor());
+
+ // Note that we have declared this constructor.
+ ++ASTContext::NumImplicitDefaultConstructorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(DefaultCon, S, false);
+ ClassDecl->addDecl(DefaultCon);
+
+ if (ShouldDeleteSpecialMember(DefaultCon, CXXDefaultConstructor))
+ DefaultCon->setDeletedAsWritten();
+
+ return DefaultCon;
+}
+
+void Sema::DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *Constructor) {
+ assert((Constructor->isDefaulted() && Constructor->isDefaultConstructor() &&
+ !Constructor->doesThisDeclarationHaveABody() &&
+ !Constructor->isDeleted()) &&
+ "DefineImplicitDefaultConstructor - call it for implicit default ctor");
+
+ CXXRecordDecl *ClassDecl = Constructor->getParent();
+ assert(ClassDecl && "DefineImplicitDefaultConstructor - invalid constructor");
+
+ ImplicitlyDefinedFunctionScope Scope(*this, Constructor);
+ DiagnosticErrorTrap Trap(Diags);
+ if (SetCtorInitializers(Constructor, 0, 0, /*AnyErrors=*/false) ||
+ Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXDefaultConstructor << Context.getTagDeclType(ClassDecl);
+ Constructor->setInvalidDecl();
+ return;
+ }
+
+ SourceLocation Loc = Constructor->getLocation();
+ Constructor->setBody(new (Context) CompoundStmt(Context, 0, 0, Loc, Loc));
+
+ Constructor->setUsed();
+ MarkVTableUsed(CurrentLocation, ClassDecl);
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Constructor);
+ }
+}
+
+/// Get any existing defaulted default constructor for the given class. Do not
+/// implicitly define one if it does not exist.
+static CXXConstructorDecl *getDefaultedDefaultConstructorUnsafe(Sema &Self,
+ CXXRecordDecl *D) {
+ ASTContext &Context = Self.Context;
+ QualType ClassType = Context.getTypeDeclType(D);
+ DeclarationName ConstructorName
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType.getUnqualifiedType()));
+
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = D->lookup(ConstructorName);
+ Con != ConEnd; ++Con) {
+ // A function template cannot be defaulted.
+ if (isa<FunctionTemplateDecl>(*Con))
+ continue;
+
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isDefaultConstructor())
+ return Constructor->isDefaulted() ? Constructor : 0;
+ }
+ return 0;
+}
+
+void Sema::ActOnFinishDelayedMemberInitializers(Decl *D) {
+ if (!D) return;
+ AdjustDeclIfTemplate(D);
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(D);
+ CXXConstructorDecl *CtorDecl
+ = getDefaultedDefaultConstructorUnsafe(*this, ClassDecl);
+
+ if (!CtorDecl) return;
+
+ // Compute the exception specification for the default constructor.
+ const FunctionProtoType *CtorTy =
+ CtorDecl->getType()->castAs<FunctionProtoType>();
+ if (CtorTy->getExceptionSpecType() == EST_Delayed) {
+ ImplicitExceptionSpecification Spec =
+ ComputeDefaultedDefaultCtorExceptionSpec(ClassDecl);
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ assert(EPI.ExceptionSpecType != EST_Delayed);
+
+ CtorDecl->setType(Context.getFunctionType(Context.VoidTy, 0, 0, EPI));
+ }
+
+ // If the default constructor is explicitly defaulted, checking the exception
+ // specification is deferred until now.
+ if (!CtorDecl->isInvalidDecl() && CtorDecl->isExplicitlyDefaulted() &&
+ !ClassDecl->isDependentType())
+ CheckExplicitlyDefaultedDefaultConstructor(CtorDecl);
+}
+
+void Sema::DeclareInheritedConstructors(CXXRecordDecl *ClassDecl) {
+ // We start with an initial pass over the base classes to collect those that
+ // inherit constructors from. If there are none, we can forgo all further
+ // processing.
+ typedef SmallVector<const RecordType *, 4> BasesVector;
+ BasesVector BasesToInheritFrom;
+ for (CXXRecordDecl::base_class_iterator BaseIt = ClassDecl->bases_begin(),
+ BaseE = ClassDecl->bases_end();
+ BaseIt != BaseE; ++BaseIt) {
+ if (BaseIt->getInheritConstructors()) {
+ QualType Base = BaseIt->getType();
+ if (Base->isDependentType()) {
+ // If we inherit constructors from anything that is dependent, just
+ // abort processing altogether. We'll get another chance for the
+ // instantiations.
+ return;
+ }
+ BasesToInheritFrom.push_back(Base->castAs<RecordType>());
+ }
+ }
+ if (BasesToInheritFrom.empty())
+ return;
+
+ // Now collect the constructors that we already have in the current class.
+ // Those take precedence over inherited constructors.
+ // C++0x [class.inhctor]p3: [...] a constructor is implicitly declared [...]
+ // unless there is a user-declared constructor with the same signature in
+ // the class where the using-declaration appears.
+ llvm::SmallSet<const Type *, 8> ExistingConstructors;
+ for (CXXRecordDecl::ctor_iterator CtorIt = ClassDecl->ctor_begin(),
+ CtorE = ClassDecl->ctor_end();
+ CtorIt != CtorE; ++CtorIt) {
+ ExistingConstructors.insert(
+ Context.getCanonicalType(CtorIt->getType()).getTypePtr());
+ }
+
+ DeclarationName CreatedCtorName =
+ Context.DeclarationNames.getCXXConstructorName(
+ ClassDecl->getTypeForDecl()->getCanonicalTypeUnqualified());
+
+ // Now comes the true work.
+ // First, we keep a map from constructor types to the base that introduced
+ // them. Needed for finding conflicting constructors. We also keep the
+ // actually inserted declarations in there, for pretty diagnostics.
+ typedef std::pair<CanQualType, CXXConstructorDecl *> ConstructorInfo;
+ typedef llvm::DenseMap<const Type *, ConstructorInfo> ConstructorToSourceMap;
+ ConstructorToSourceMap InheritedConstructors;
+ for (BasesVector::iterator BaseIt = BasesToInheritFrom.begin(),
+ BaseE = BasesToInheritFrom.end();
+ BaseIt != BaseE; ++BaseIt) {
+ const RecordType *Base = *BaseIt;
+ CanQualType CanonicalBase = Base->getCanonicalTypeUnqualified();
+ CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(Base->getDecl());
+ for (CXXRecordDecl::ctor_iterator CtorIt = BaseDecl->ctor_begin(),
+ CtorE = BaseDecl->ctor_end();
+ CtorIt != CtorE; ++CtorIt) {
+ // Find the using declaration for inheriting this base's constructors.
+ // FIXME: Don't perform name lookup just to obtain a source location!
+ DeclarationName Name =
+ Context.DeclarationNames.getCXXConstructorName(CanonicalBase);
+ LookupResult Result(*this, Name, SourceLocation(), LookupUsingDeclName);
+ LookupQualifiedName(Result, CurContext);
+ UsingDecl *UD = Result.getAsSingle<UsingDecl>();
+ SourceLocation UsingLoc = UD ? UD->getLocation() :
+ ClassDecl->getLocation();
+
+ // C++0x [class.inhctor]p1: The candidate set of inherited constructors
+ // from the class X named in the using-declaration consists of actual
+ // constructors and notional constructors that result from the
+ // transformation of defaulted parameters as follows:
+ // - all non-template default constructors of X, and
+ // - for each non-template constructor of X that has at least one
+ // parameter with a default argument, the set of constructors that
+ // results from omitting any ellipsis parameter specification and
+ // successively omitting parameters with a default argument from the
+ // end of the parameter-type-list.
+ CXXConstructorDecl *BaseCtor = *CtorIt;
+ bool CanBeCopyOrMove = BaseCtor->isCopyOrMoveConstructor();
+ const FunctionProtoType *BaseCtorType =
+ BaseCtor->getType()->getAs<FunctionProtoType>();
+
+ for (unsigned params = BaseCtor->getMinRequiredArguments(),
+ maxParams = BaseCtor->getNumParams();
+ params <= maxParams; ++params) {
+ // Skip default constructors. They're never inherited.
+ if (params == 0)
+ continue;
+ // Skip copy and move constructors for the same reason.
+ if (CanBeCopyOrMove && params == 1)
+ continue;
+
+ // Build up a function type for this particular constructor.
+ // FIXME: The working paper does not consider that the exception spec
+ // for the inheriting constructor might be larger than that of the
+ // source. This code doesn't yet, either. When it does, this code will
+ // need to be delayed until after exception specifications and in-class
+ // member initializers are attached.
+ const Type *NewCtorType;
+ if (params == maxParams)
+ NewCtorType = BaseCtorType;
+ else {
+ SmallVector<QualType, 16> Args;
+ for (unsigned i = 0; i < params; ++i) {
+ Args.push_back(BaseCtorType->getArgType(i));
+ }
+ FunctionProtoType::ExtProtoInfo ExtInfo =
+ BaseCtorType->getExtProtoInfo();
+ ExtInfo.Variadic = false;
+ NewCtorType = Context.getFunctionType(BaseCtorType->getResultType(),
+ Args.data(), params, ExtInfo)
+ .getTypePtr();
+ }
+ const Type *CanonicalNewCtorType =
+ Context.getCanonicalType(NewCtorType);
+
+ // Now that we have the type, first check if the class already has a
+ // constructor with this signature.
+ if (ExistingConstructors.count(CanonicalNewCtorType))
+ continue;
+
+ // Then we check if we have already declared an inherited constructor
+ // with this signature.
+ std::pair<ConstructorToSourceMap::iterator, bool> result =
+ InheritedConstructors.insert(std::make_pair(
+ CanonicalNewCtorType,
+ std::make_pair(CanonicalBase, (CXXConstructorDecl*)0)));
+ if (!result.second) {
+ // Already in the map. If it came from a different class, that's an
+ // error. Not if it's from the same.
+ CanQualType PreviousBase = result.first->second.first;
+ if (CanonicalBase != PreviousBase) {
+ const CXXConstructorDecl *PrevCtor = result.first->second.second;
+ const CXXConstructorDecl *PrevBaseCtor =
+ PrevCtor->getInheritedConstructor();
+ assert(PrevBaseCtor && "Conflicting constructor was not inherited");
+
+ Diag(UsingLoc, diag::err_using_decl_constructor_conflict);
+ Diag(BaseCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_current_ctor);
+ Diag(PrevBaseCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_previous_ctor);
+ Diag(PrevCtor->getLocation(),
+ diag::note_using_decl_constructor_conflict_previous_using);
+ }
+ continue;
+ }
+
+ // OK, we're there, now add the constructor.
+ // C++0x [class.inhctor]p8: [...] that would be performed by a
+ // user-written inline constructor [...]
+ DeclarationNameInfo DNI(CreatedCtorName, UsingLoc);
+ CXXConstructorDecl *NewCtor = CXXConstructorDecl::Create(
+ Context, ClassDecl, UsingLoc, DNI, QualType(NewCtorType, 0),
+ /*TInfo=*/0, BaseCtor->isExplicit(), /*Inline=*/true,
+ /*ImplicitlyDeclared=*/true,
+ // FIXME: Due to a defect in the standard, we treat inherited
+ // constructors as constexpr even if that makes them ill-formed.
+ /*Constexpr=*/BaseCtor->isConstexpr());
+ NewCtor->setAccess(BaseCtor->getAccess());
+
+ // Build up the parameter decls and add them.
+ SmallVector<ParmVarDecl *, 16> ParamDecls;
+ for (unsigned i = 0; i < params; ++i) {
+ ParamDecls.push_back(ParmVarDecl::Create(Context, NewCtor,
+ UsingLoc, UsingLoc,
+ /*IdentifierInfo=*/0,
+ BaseCtorType->getArgType(i),
+ /*TInfo=*/0, SC_None,
+ SC_None, /*DefaultArg=*/0));
+ }
+ NewCtor->setParams(ParamDecls);
+ NewCtor->setInheritedConstructor(BaseCtor);
+
+ ClassDecl->addDecl(NewCtor);
+ result.first->second.second = NewCtor;
+ }
+ }
+ }
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedDtorExceptionSpec(CXXRecordDecl *ClassDecl) {
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have
+ // an exception-specification.
+ ImplicitExceptionSpecification ExceptSpec(Context);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // Direct base-class destructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
+ BEnd = ClassDecl->bases_end();
+ B != BEnd; ++B) {
+ if (B->isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>())
+ ExceptSpec.CalledDecl(
+ LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl())));
+ }
+
+ // Virtual base-class destructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
+ BEnd = ClassDecl->vbases_end();
+ B != BEnd; ++B) {
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>())
+ ExceptSpec.CalledDecl(
+ LookupDestructor(cast<CXXRecordDecl>(BaseType->getDecl())));
+ }
+
+ // Field destructors.
+ for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
+ FEnd = ClassDecl->field_end();
+ F != FEnd; ++F) {
+ if (const RecordType *RecordTy
+ = Context.getBaseElementType(F->getType())->getAs<RecordType>())
+ ExceptSpec.CalledDecl(
+ LookupDestructor(cast<CXXRecordDecl>(RecordTy->getDecl())));
+ }
+
+ return ExceptSpec;
+}
+
+CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
+ // C++ [class.dtor]p2:
+ // If a class has no user-declared destructor, a destructor is
+ // declared implicitly. An implicitly-declared destructor is an
+ // inline public member of its class.
+
+ ImplicitExceptionSpecification Spec =
+ ComputeDefaultedDtorExceptionSpec(ClassDecl);
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+
+ // Create the actual destructor declaration.
+ QualType Ty = Context.getFunctionType(Context.VoidTy, 0, 0, EPI);
+
+ CanQualType ClassType
+ = Context.getCanonicalType(Context.getTypeDeclType(ClassDecl));
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXDestructorName(ClassType);
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+ CXXDestructorDecl *Destructor
+ = CXXDestructorDecl::Create(Context, ClassDecl, ClassLoc, NameInfo, Ty, 0,
+ /*isInline=*/true,
+ /*isImplicitlyDeclared=*/true);
+ Destructor->setAccess(AS_public);
+ Destructor->setDefaulted();
+ Destructor->setImplicit();
+ Destructor->setTrivial(ClassDecl->hasTrivialDestructor());
+
+ // Note that we have declared this destructor.
+ ++ASTContext::NumImplicitDestructorsDeclared;
+
+ // Introduce this destructor into its scope.
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(Destructor, S, false);
+ ClassDecl->addDecl(Destructor);
+
+ // This could be uniqued if it ever proves significant.
+ Destructor->setTypeSourceInfo(Context.getTrivialTypeSourceInfo(Ty));
+
+ AddOverriddenMethods(ClassDecl, Destructor);
+
+ if (ShouldDeleteSpecialMember(Destructor, CXXDestructor))
+ Destructor->setDeletedAsWritten();
+
+ return Destructor;
+}
+
+void Sema::DefineImplicitDestructor(SourceLocation CurrentLocation,
+ CXXDestructorDecl *Destructor) {
+ assert((Destructor->isDefaulted() &&
+ !Destructor->doesThisDeclarationHaveABody() &&
+ !Destructor->isDeleted()) &&
+ "DefineImplicitDestructor - call it for implicit default dtor");
+ CXXRecordDecl *ClassDecl = Destructor->getParent();
+ assert(ClassDecl && "DefineImplicitDestructor - invalid destructor");
+
+ if (Destructor->isInvalidDecl())
+ return;
+
+ ImplicitlyDefinedFunctionScope Scope(*this, Destructor);
+
+ DiagnosticErrorTrap Trap(Diags);
+ MarkBaseAndMemberDestructorsReferenced(Destructor->getLocation(),
+ Destructor->getParent());
+
+ if (CheckDestructor(Destructor) || Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXDestructor << Context.getTagDeclType(ClassDecl);
+
+ Destructor->setInvalidDecl();
+ return;
+ }
+
+ SourceLocation Loc = Destructor->getLocation();
+ Destructor->setBody(new (Context) CompoundStmt(Context, 0, 0, Loc, Loc));
+ Destructor->setImplicitlyDefined(true);
+ Destructor->setUsed();
+ MarkVTableUsed(CurrentLocation, ClassDecl);
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Destructor);
+ }
+}
+
+void Sema::AdjustDestructorExceptionSpec(CXXRecordDecl *classDecl,
+ CXXDestructorDecl *destructor) {
+ // C++11 [class.dtor]p3:
+ // A declaration of a destructor that does not have an exception-
+ // specification is implicitly considered to have the same exception-
+ // specification as an implicit declaration.
+ const FunctionProtoType *dtorType = destructor->getType()->
+ getAs<FunctionProtoType>();
+ if (dtorType->hasExceptionSpec())
+ return;
+
+ ImplicitExceptionSpecification exceptSpec =
+ ComputeDefaultedDtorExceptionSpec(classDecl);
+
+ // Replace the destructor's type, building off the existing one. Fortunately,
+ // the only thing of interest in the destructor type is its extended info.
+ // The return and arguments are fixed.
+ FunctionProtoType::ExtProtoInfo epi = dtorType->getExtProtoInfo();
+ epi.ExceptionSpecType = exceptSpec.getExceptionSpecType();
+ epi.NumExceptions = exceptSpec.size();
+ epi.Exceptions = exceptSpec.data();
+ QualType ty = Context.getFunctionType(Context.VoidTy, 0, 0, epi);
+
+ destructor->setType(ty);
+
+ // FIXME: If the destructor has a body that could throw, and the newly created
+ // spec doesn't allow exceptions, we should emit a warning, because this
+ // change in behavior can break conforming C++03 programs at runtime.
+ // However, we don't have a body yet, so it needs to be done somewhere else.
+}
+
+/// \brief Builds a statement that copies/moves the given entity from \p From to
+/// \c To.
+///
+/// This routine is used to copy/move the members of a class with an
+/// implicitly-declared copy/move assignment operator. When the entities being
+/// copied are arrays, this routine builds for loops to copy them.
+///
+/// \param S The Sema object used for type-checking.
+///
+/// \param Loc The location where the implicit copy/move is being generated.
+///
+/// \param T The type of the expressions being copied/moved. Both expressions
+/// must have this type.
+///
+/// \param To The expression we are copying/moving to.
+///
+/// \param From The expression we are copying/moving from.
+///
+/// \param CopyingBaseSubobject Whether we're copying/moving a base subobject.
+/// Otherwise, it's a non-static member subobject.
+///
+/// \param Copying Whether we're copying or moving.
+///
+/// \param Depth Internal parameter recording the depth of the recursion.
+///
+/// \returns A statement or a loop that copies the expressions.
+static StmtResult
+BuildSingleCopyAssign(Sema &S, SourceLocation Loc, QualType T,
+ Expr *To, Expr *From,
+ bool CopyingBaseSubobject, bool Copying,
+ unsigned Depth = 0) {
+ // C++0x [class.copy]p28:
+ // Each subobject is assigned in the manner appropriate to its type:
+ //
+ // - if the subobject is of class type, as if by a call to operator= with
+ // the subobject as the object expression and the corresponding
+ // subobject of x as a single function argument (as if by explicit
+ // qualification; that is, ignoring any possible virtual overriding
+ // functions in more derived classes);
+ if (const RecordType *RecordTy = T->getAs<RecordType>()) {
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+
+ // Look for operator=.
+ DeclarationName Name
+ = S.Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ LookupResult OpLookup(S, Name, Loc, Sema::LookupOrdinaryName);
+ S.LookupQualifiedName(OpLookup, ClassDecl, false);
+
+ // Filter out any result that isn't a copy/move-assignment operator.
+ LookupResult::Filter F = OpLookup.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ if (Method->isCopyAssignmentOperator() ||
+ (!Copying && Method->isMoveAssignmentOperator()))
+ continue;
+
+ F.erase();
+ }
+ F.done();
+
+ // Suppress the protected check (C++ [class.protected]) for each of the
+ // assignment operators we found. This strange dance is required when
+ // we're assigning via a base classes's copy-assignment operator. To
+ // ensure that we're getting the right base class subobject (without
+ // ambiguities), we need to cast "this" to that subobject type; to
+ // ensure that we don't go through the virtual call mechanism, we need
+ // to qualify the operator= name with the base class (see below). However,
+ // this means that if the base class has a protected copy assignment
+ // operator, the protected member access check will fail. So, we
+ // rewrite "protected" access to "public" access in this case, since we
+ // know by construction that we're calling from a derived class.
+ if (CopyingBaseSubobject) {
+ for (LookupResult::iterator L = OpLookup.begin(), LEnd = OpLookup.end();
+ L != LEnd; ++L) {
+ if (L.getAccess() == AS_protected)
+ L.setAccess(AS_public);
+ }
+ }
+
+ // Create the nested-name-specifier that will be used to qualify the
+ // reference to operator=; this is required to suppress the virtual
+ // call mechanism.
+ CXXScopeSpec SS;
+ const Type *CanonicalT = S.Context.getCanonicalType(T.getTypePtr());
+ SS.MakeTrivial(S.Context,
+ NestedNameSpecifier::Create(S.Context, 0, false,
+ CanonicalT),
+ Loc);
+
+ // Create the reference to operator=.
+ ExprResult OpEqualRef
+ = S.BuildMemberReferenceExpr(To, T, Loc, /*isArrow=*/false, SS,
+ /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ OpLookup,
+ /*TemplateArgs=*/0,
+ /*SuppressQualifierCheck=*/true);
+ if (OpEqualRef.isInvalid())
+ return StmtError();
+
+ // Build the call to the assignment operator.
+
+ ExprResult Call = S.BuildCallToMemberFunction(/*Scope=*/0,
+ OpEqualRef.takeAs<Expr>(),
+ Loc, &From, 1, Loc);
+ if (Call.isInvalid())
+ return StmtError();
+
+ return S.Owned(Call.takeAs<Stmt>());
+ }
+
+ // - if the subobject is of scalar type, the built-in assignment
+ // operator is used.
+ const ConstantArrayType *ArrayTy = S.Context.getAsConstantArrayType(T);
+ if (!ArrayTy) {
+ ExprResult Assignment = S.CreateBuiltinBinOp(Loc, BO_Assign, To, From);
+ if (Assignment.isInvalid())
+ return StmtError();
+
+ return S.Owned(Assignment.takeAs<Stmt>());
+ }
+
+ // - if the subobject is an array, each element is assigned, in the
+ // manner appropriate to the element type;
+
+ // Construct a loop over the array bounds, e.g.,
+ //
+ // for (__SIZE_TYPE__ i0 = 0; i0 != array-size; ++i0)
+ //
+ // that will copy each of the array elements.
+ QualType SizeType = S.Context.getSizeType();
+
+ // Create the iteration variable.
+ IdentifierInfo *IterationVarName = 0;
+ {
+ SmallString<8> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "__i" << Depth;
+ IterationVarName = &S.Context.Idents.get(OS.str());
+ }
+ VarDecl *IterationVar = VarDecl::Create(S.Context, S.CurContext, Loc, Loc,
+ IterationVarName, SizeType,
+ S.Context.getTrivialTypeSourceInfo(SizeType, Loc),
+ SC_None, SC_None);
+
+ // Initialize the iteration variable to zero.
+ llvm::APInt Zero(S.Context.getTypeSize(SizeType), 0);
+ IterationVar->setInit(IntegerLiteral::Create(S.Context, Zero, SizeType, Loc));
+
+ // Create a reference to the iteration variable; we'll use this several
+ // times throughout.
+ Expr *IterationVarRef
+ = S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc).take();
+ assert(IterationVarRef && "Reference to invented variable cannot fail!");
+ Expr *IterationVarRefRVal = S.DefaultLvalueConversion(IterationVarRef).take();
+ assert(IterationVarRefRVal && "Conversion of invented variable cannot fail!");
+
+ // Create the DeclStmt that holds the iteration variable.
+ Stmt *InitStmt = new (S.Context) DeclStmt(DeclGroupRef(IterationVar),Loc,Loc);
+
+ // Create the comparison against the array bound.
+ llvm::APInt Upper
+ = ArrayTy->getSize().zextOrTrunc(S.Context.getTypeSize(SizeType));
+ Expr *Comparison
+ = new (S.Context) BinaryOperator(IterationVarRefRVal,
+ IntegerLiteral::Create(S.Context, Upper, SizeType, Loc),
+ BO_NE, S.Context.BoolTy,
+ VK_RValue, OK_Ordinary, Loc);
+
+ // Create the pre-increment of the iteration variable.
+ Expr *Increment
+ = new (S.Context) UnaryOperator(IterationVarRef, UO_PreInc, SizeType,
+ VK_LValue, OK_Ordinary, Loc);
+
+ // Subscript the "from" and "to" expressions with the iteration variable.
+ From = AssertSuccess(S.CreateBuiltinArraySubscriptExpr(From, Loc,
+ IterationVarRefRVal,
+ Loc));
+ To = AssertSuccess(S.CreateBuiltinArraySubscriptExpr(To, Loc,
+ IterationVarRefRVal,
+ Loc));
+ if (!Copying) // Cast to rvalue
+ From = CastForMoving(S, From);
+
+ // Build the copy/move for an individual element of the array.
+ StmtResult Copy = BuildSingleCopyAssign(S, Loc, ArrayTy->getElementType(),
+ To, From, CopyingBaseSubobject,
+ Copying, Depth + 1);
+ if (Copy.isInvalid())
+ return StmtError();
+
+ // Construct the loop that copies all elements of this array.
+ return S.ActOnForStmt(Loc, Loc, InitStmt,
+ S.MakeFullExpr(Comparison),
+ 0, S.MakeFullExpr(Increment),
+ Loc, Copy.take());
+}
+
+std::pair<Sema::ImplicitExceptionSpecification, bool>
+Sema::ComputeDefaultedCopyAssignmentExceptionSpecAndConst(
+ CXXRecordDecl *ClassDecl) {
+ if (ClassDecl->isInvalidDecl())
+ return std::make_pair(ImplicitExceptionSpecification(Context), false);
+
+ // C++ [class.copy]p10:
+ // If the class definition does not explicitly declare a copy
+ // assignment operator, one is declared implicitly.
+ // The implicitly-defined copy assignment operator for a class X
+ // will have the form
+ //
+ // X& X::operator=(const X&)
+ //
+ // if
+ bool HasConstCopyAssignment = true;
+
+ // -- each direct base class B of X has a copy assignment operator
+ // whose parameter is of type const B&, const volatile B& or B,
+ // and
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ HasConstCopyAssignment && Base != BaseEnd; ++Base) {
+ // We'll handle this below
+ if (LangOpts.CPlusPlus0x && Base->isVirtual())
+ continue;
+
+ assert(!Base->getType()->isDependentType() &&
+ "Cannot generate implicit members for class with dependent bases.");
+ CXXRecordDecl *BaseClassDecl = Base->getType()->getAsCXXRecordDecl();
+ LookupCopyingAssignment(BaseClassDecl, Qualifiers::Const, false, 0,
+ &HasConstCopyAssignment);
+ }
+
+ // In C++11, the above citation has "or virtual" added
+ if (LangOpts.CPlusPlus0x) {
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ HasConstCopyAssignment && Base != BaseEnd; ++Base) {
+ assert(!Base->getType()->isDependentType() &&
+ "Cannot generate implicit members for class with dependent bases.");
+ CXXRecordDecl *BaseClassDecl = Base->getType()->getAsCXXRecordDecl();
+ LookupCopyingAssignment(BaseClassDecl, Qualifiers::Const, false, 0,
+ &HasConstCopyAssignment);
+ }
+ }
+
+ // -- for all the nonstatic data members of X that are of a class
+ // type M (or array thereof), each such class type has a copy
+ // assignment operator whose parameter is of type const M&,
+ // const volatile M& or M.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ HasConstCopyAssignment && Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ LookupCopyingAssignment(FieldClassDecl, Qualifiers::Const, false, 0,
+ &HasConstCopyAssignment);
+ }
+ }
+
+ // Otherwise, the implicitly declared copy assignment operator will
+ // have the form
+ //
+ // X& X::operator=(X&)
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+
+ // It is unspecified whether or not an implicit copy assignment operator
+ // attempts to deduplicate calls to assignment operators of virtual bases are
+ // made. As such, this exception specification is effectively unspecified.
+ // Based on a similar decision made for constness in C++0x, we're erring on
+ // the side of assuming such calls to be made regardless of whether they
+ // actually happen.
+ ImplicitExceptionSpecification ExceptSpec(Context);
+ unsigned ArgQuals = HasConstCopyAssignment ? Qualifiers::Const : 0;
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ Base != BaseEnd; ++Base) {
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl,
+ ArgQuals, false, 0))
+ ExceptSpec.CalledDecl(CopyAssign);
+ }
+
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd; ++Base) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *CopyAssign = LookupCopyingAssignment(BaseClassDecl,
+ ArgQuals, false, 0))
+ ExceptSpec.CalledDecl(CopyAssign);
+ }
+
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ if (CXXMethodDecl *CopyAssign =
+ LookupCopyingAssignment(FieldClassDecl, ArgQuals, false, 0))
+ ExceptSpec.CalledDecl(CopyAssign);
+ }
+ }
+
+ return std::make_pair(ExceptSpec, HasConstCopyAssignment);
+}
+
+CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
+ // Note: The following rules are largely analoguous to the copy
+ // constructor rules. Note that virtual bases are not taken into account
+ // for determining the argument type of the operator. Note also that
+ // operators taking an object instead of a reference are allowed.
+
+ ImplicitExceptionSpecification Spec(Context);
+ bool Const;
+ llvm::tie(Spec, Const) =
+ ComputeDefaultedCopyAssignmentExceptionSpecAndConst(ClassDecl);
+
+ QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ QualType RetType = Context.getLValueReferenceType(ArgType);
+ if (Const)
+ ArgType = ArgType.withConst();
+ ArgType = Context.getLValueReferenceType(ArgType);
+
+ // An implicitly-declared copy assignment operator is an inline public
+ // member of its class.
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+ CXXMethodDecl *CopyAssignment
+ = CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
+ Context.getFunctionType(RetType, &ArgType, 1, EPI),
+ /*TInfo=*/0, /*isStatic=*/false,
+ /*StorageClassAsWritten=*/SC_None,
+ /*isInline=*/true, /*isConstexpr=*/false,
+ SourceLocation());
+ CopyAssignment->setAccess(AS_public);
+ CopyAssignment->setDefaulted();
+ CopyAssignment->setImplicit();
+ CopyAssignment->setTrivial(ClassDecl->hasTrivialCopyAssignment());
+
+ // Add the parameter to the operator.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
+ ClassLoc, ClassLoc, /*Id=*/0,
+ ArgType, /*TInfo=*/0,
+ SC_None,
+ SC_None, 0);
+ CopyAssignment->setParams(FromParam);
+
+ // Note that we have added this copy-assignment operator.
+ ++ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(CopyAssignment, S, false);
+ ClassDecl->addDecl(CopyAssignment);
+
+ // C++0x [class.copy]p19:
+ // .... If the class definition does not explicitly declare a copy
+ // assignment operator, there is no user-declared move constructor, and
+ // there is no user-declared move assignment operator, a copy assignment
+ // operator is implicitly declared as defaulted.
+ if (ShouldDeleteSpecialMember(CopyAssignment, CXXCopyAssignment))
+ CopyAssignment->setDeletedAsWritten();
+
+ AddOverriddenMethods(ClassDecl, CopyAssignment);
+ return CopyAssignment;
+}
+
+void Sema::DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
+ CXXMethodDecl *CopyAssignOperator) {
+ assert((CopyAssignOperator->isDefaulted() &&
+ CopyAssignOperator->isOverloadedOperator() &&
+ CopyAssignOperator->getOverloadedOperator() == OO_Equal &&
+ !CopyAssignOperator->doesThisDeclarationHaveABody() &&
+ !CopyAssignOperator->isDeleted()) &&
+ "DefineImplicitCopyAssignment called for wrong function");
+
+ CXXRecordDecl *ClassDecl = CopyAssignOperator->getParent();
+
+ if (ClassDecl->isInvalidDecl() || CopyAssignOperator->isInvalidDecl()) {
+ CopyAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ CopyAssignOperator->setUsed();
+
+ ImplicitlyDefinedFunctionScope Scope(*this, CopyAssignOperator);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // C++0x [class.copy]p30:
+ // The implicitly-defined or explicitly-defaulted copy assignment operator
+ // for a non-union class X performs memberwise copy assignment of its
+ // subobjects. The direct base classes of X are assigned first, in the
+ // order of their declaration in the base-specifier-list, and then the
+ // immediate non-static data members of X are assigned, in the order in
+ // which they were declared in the class definition.
+
+ // The statements that form the synthesized function body.
+ ASTOwningVector<Stmt*> Statements(*this);
+
+ // The parameter for the "other" object, which we are copying from.
+ ParmVarDecl *Other = CopyAssignOperator->getParamDecl(0);
+ Qualifiers OtherQuals = Other->getType().getQualifiers();
+ QualType OtherRefType = Other->getType();
+ if (const LValueReferenceType *OtherRef
+ = OtherRefType->getAs<LValueReferenceType>()) {
+ OtherRefType = OtherRef->getPointeeType();
+ OtherQuals = OtherRefType.getQualifiers();
+ }
+
+ // Our location for everything implicitly-generated.
+ SourceLocation Loc = CopyAssignOperator->getLocation();
+
+ // Construct a reference to the "other" object. We'll be using this
+ // throughout the generated ASTs.
+ Expr *OtherRef = BuildDeclRefExpr(Other, OtherRefType, VK_LValue, Loc).take();
+ assert(OtherRef && "Reference to parameter cannot fail!");
+
+ // Construct the "this" pointer. We'll be using this throughout the generated
+ // ASTs.
+ Expr *This = ActOnCXXThis(Loc).takeAs<Expr>();
+ assert(This && "Reference to this cannot fail!");
+
+ // Assign base classes.
+ bool Invalid = false;
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ E = ClassDecl->bases_end(); Base != E; ++Base) {
+ // Form the assignment:
+ // static_cast<Base*>(this)->Base::operator=(static_cast<Base&>(other));
+ QualType BaseType = Base->getType().getUnqualifiedType();
+ if (!BaseType->isRecordType()) {
+ Invalid = true;
+ continue;
+ }
+
+ CXXCastPath BasePath;
+ BasePath.push_back(Base);
+
+ // Construct the "from" expression, which is an implicit cast to the
+ // appropriately-qualified base type.
+ Expr *From = OtherRef;
+ From = ImpCastExprToType(From, Context.getQualifiedType(BaseType, OtherQuals),
+ CK_UncheckedDerivedToBase,
+ VK_LValue, &BasePath).take();
+
+ // Dereference "this".
+ ExprResult To = CreateBuiltinUnaryOp(Loc, UO_Deref, This);
+
+ // Implicitly cast "this" to the appropriately-qualified base type.
+ To = ImpCastExprToType(To.take(),
+ Context.getCVRQualifiedType(BaseType,
+ CopyAssignOperator->getTypeQualifiers()),
+ CK_UncheckedDerivedToBase,
+ VK_LValue, &BasePath);
+
+ // Build the copy.
+ StmtResult Copy = BuildSingleCopyAssign(*this, Loc, BaseType,
+ To.get(), From,
+ /*CopyingBaseSubobject=*/true,
+ /*Copying=*/true);
+ if (Copy.isInvalid()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ CopyAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // Success! Record the copy.
+ Statements.push_back(Copy.takeAs<Expr>());
+ }
+
+ // \brief Reference to the __builtin_memcpy function.
+ Expr *BuiltinMemCpyRef = 0;
+ // \brief Reference to the __builtin_objc_memmove_collectable function.
+ Expr *CollectableMemCpyRef = 0;
+
+ // Assign non-static members.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // Check for members of reference type; we can't copy those.
+ if (Field->getType()->isReferenceType()) {
+ Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
+ << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName();
+ Diag(Field->getLocation(), diag::note_declared_at);
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ continue;
+ }
+
+ // Check for members of const-qualified, non-class type.
+ QualType BaseType = Context.getBaseElementType(Field->getType());
+ if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) {
+ Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
+ << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName();
+ Diag(Field->getLocation(), diag::note_declared_at);
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ continue;
+ }
+
+ // Suppress assigning zero-width bitfields.
+ if (Field->isBitField() && Field->getBitWidthValue(Context) == 0)
+ continue;
+
+ QualType FieldType = Field->getType().getNonReferenceType();
+ if (FieldType->isIncompleteArrayType()) {
+ assert(ClassDecl->hasFlexibleArrayMember() &&
+ "Incomplete array type is not valid");
+ continue;
+ }
+
+ // Build references to the field in the object we're copying from and to.
+ CXXScopeSpec SS; // Intentionally empty
+ LookupResult MemberLookup(*this, Field->getDeclName(), Loc,
+ LookupMemberName);
+ MemberLookup.addDecl(*Field);
+ MemberLookup.resolveKind();
+ ExprResult From = BuildMemberReferenceExpr(OtherRef, OtherRefType,
+ Loc, /*IsArrow=*/false,
+ SS, SourceLocation(), 0,
+ MemberLookup, 0);
+ ExprResult To = BuildMemberReferenceExpr(This, This->getType(),
+ Loc, /*IsArrow=*/true,
+ SS, SourceLocation(), 0,
+ MemberLookup, 0);
+ assert(!From.isInvalid() && "Implicit field reference cannot fail");
+ assert(!To.isInvalid() && "Implicit field reference cannot fail");
+
+ // If the field should be copied with __builtin_memcpy rather than via
+ // explicit assignments, do so. This optimization only applies for arrays
+ // of scalars and arrays of class type with trivial copy-assignment
+ // operators.
+ if (FieldType->isArrayType() && !FieldType.isVolatileQualified()
+ && BaseType.hasTrivialAssignment(Context, /*Copying=*/true)) {
+ // Compute the size of the memory buffer to be copied.
+ QualType SizeType = Context.getSizeType();
+ llvm::APInt Size(Context.getTypeSize(SizeType),
+ Context.getTypeSizeInChars(BaseType).getQuantity());
+ for (const ConstantArrayType *Array
+ = Context.getAsConstantArrayType(FieldType);
+ Array;
+ Array = Context.getAsConstantArrayType(Array->getElementType())) {
+ llvm::APInt ArraySize
+ = Array->getSize().zextOrTrunc(Size.getBitWidth());
+ Size *= ArraySize;
+ }
+
+ // Take the address of the field references for "from" and "to".
+ From = CreateBuiltinUnaryOp(Loc, UO_AddrOf, From.get());
+ To = CreateBuiltinUnaryOp(Loc, UO_AddrOf, To.get());
+
+ bool NeedsCollectableMemCpy =
+ (BaseType->isRecordType() &&
+ BaseType->getAs<RecordType>()->getDecl()->hasObjectMember());
+
+ if (NeedsCollectableMemCpy) {
+ if (!CollectableMemCpyRef) {
+ // Create a reference to the __builtin_objc_memmove_collectable function.
+ LookupResult R(*this,
+ &Context.Idents.get("__builtin_objc_memmove_collectable"),
+ Loc, LookupOrdinaryName);
+ LookupName(R, TUScope, true);
+
+ FunctionDecl *CollectableMemCpy = R.getAsSingle<FunctionDecl>();
+ if (!CollectableMemCpy) {
+ // Something went horribly wrong earlier, and we will have
+ // complained about it.
+ Invalid = true;
+ continue;
+ }
+
+ CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy,
+ CollectableMemCpy->getType(),
+ VK_LValue, Loc, 0).take();
+ assert(CollectableMemCpyRef && "Builtin reference cannot fail");
+ }
+ }
+ // Create a reference to the __builtin_memcpy builtin function.
+ else if (!BuiltinMemCpyRef) {
+ LookupResult R(*this, &Context.Idents.get("__builtin_memcpy"), Loc,
+ LookupOrdinaryName);
+ LookupName(R, TUScope, true);
+
+ FunctionDecl *BuiltinMemCpy = R.getAsSingle<FunctionDecl>();
+ if (!BuiltinMemCpy) {
+ // Something went horribly wrong earlier, and we will have complained
+ // about it.
+ Invalid = true;
+ continue;
+ }
+
+ BuiltinMemCpyRef = BuildDeclRefExpr(BuiltinMemCpy,
+ BuiltinMemCpy->getType(),
+ VK_LValue, Loc, 0).take();
+ assert(BuiltinMemCpyRef && "Builtin reference cannot fail");
+ }
+
+ ASTOwningVector<Expr*> CallArgs(*this);
+ CallArgs.push_back(To.takeAs<Expr>());
+ CallArgs.push_back(From.takeAs<Expr>());
+ CallArgs.push_back(IntegerLiteral::Create(Context, Size, SizeType, Loc));
+ ExprResult Call = ExprError();
+ if (NeedsCollectableMemCpy)
+ Call = ActOnCallExpr(/*Scope=*/0,
+ CollectableMemCpyRef,
+ Loc, move_arg(CallArgs),
+ Loc);
+ else
+ Call = ActOnCallExpr(/*Scope=*/0,
+ BuiltinMemCpyRef,
+ Loc, move_arg(CallArgs),
+ Loc);
+
+ assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!");
+ Statements.push_back(Call.takeAs<Expr>());
+ continue;
+ }
+
+ // Build the copy of this field.
+ StmtResult Copy = BuildSingleCopyAssign(*this, Loc, FieldType,
+ To.get(), From.get(),
+ /*CopyingBaseSubobject=*/false,
+ /*Copying=*/true);
+ if (Copy.isInvalid()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ CopyAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // Success! Record the copy.
+ Statements.push_back(Copy.takeAs<Stmt>());
+ }
+
+ if (!Invalid) {
+ // Add a "return *this;"
+ ExprResult ThisObj = CreateBuiltinUnaryOp(Loc, UO_Deref, This);
+
+ StmtResult Return = ActOnReturnStmt(Loc, ThisObj.get());
+ if (Return.isInvalid())
+ Invalid = true;
+ else {
+ Statements.push_back(Return.takeAs<Stmt>());
+
+ if (Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ }
+ }
+ }
+
+ if (Invalid) {
+ CopyAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ StmtResult Body;
+ {
+ CompoundScopeRAII CompoundScope(*this);
+ Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements),
+ /*isStmtExpr=*/false);
+ assert(!Body.isInvalid() && "Compound statement creation cannot fail");
+ }
+ CopyAssignOperator->setBody(Body.takeAs<Stmt>());
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(CopyAssignOperator);
+ }
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedMoveAssignmentExceptionSpec(CXXRecordDecl *ClassDecl) {
+ ImplicitExceptionSpecification ExceptSpec(Context);
+
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // C++0x [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+
+ // It is unspecified whether or not an implicit move assignment operator
+ // attempts to deduplicate calls to assignment operators of virtual bases are
+ // made. As such, this exception specification is effectively unspecified.
+ // Based on a similar decision made for constness in C++0x, we're erring on
+ // the side of assuming such calls to be made regardless of whether they
+ // actually happen.
+ // Note that a move constructor is not implicitly declared when there are
+ // virtual bases, but it can still be user-declared and explicitly defaulted.
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ Base != BaseEnd; ++Base) {
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl,
+ false, 0))
+ ExceptSpec.CalledDecl(MoveAssign);
+ }
+
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd; ++Base) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(BaseClassDecl,
+ false, 0))
+ ExceptSpec.CalledDecl(MoveAssign);
+ }
+
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ if (CXXMethodDecl *MoveAssign = LookupMovingAssignment(FieldClassDecl,
+ false, 0))
+ ExceptSpec.CalledDecl(MoveAssign);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+/// Determine whether the class type has any direct or indirect virtual base
+/// classes which have a non-trivial move assignment operator.
+static bool
+hasVirtualBaseWithNonTrivialMoveAssignment(Sema &S, CXXRecordDecl *ClassDecl) {
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd; ++Base) {
+ CXXRecordDecl *BaseClass =
+ cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+
+ // Try to declare the move assignment. If it would be deleted, then the
+ // class does not have a non-trivial move assignment.
+ if (BaseClass->needsImplicitMoveAssignment())
+ S.DeclareImplicitMoveAssignment(BaseClass);
+
+ // If the class has both a trivial move assignment and a non-trivial move
+ // assignment, hasTrivialMoveAssignment() is false.
+ if (BaseClass->hasDeclaredMoveAssignment() &&
+ !BaseClass->hasTrivialMoveAssignment())
+ return true;
+ }
+
+ return false;
+}
+
+/// Determine whether the given type either has a move constructor or is
+/// trivially copyable.
+static bool
+hasMoveOrIsTriviallyCopyable(Sema &S, QualType Type, bool IsConstructor) {
+ Type = S.Context.getBaseElementType(Type);
+
+ // FIXME: Technically, non-trivially-copyable non-class types, such as
+ // reference types, are supposed to return false here, but that appears
+ // to be a standard defect.
+ CXXRecordDecl *ClassDecl = Type->getAsCXXRecordDecl();
+ if (!ClassDecl)
+ return true;
+
+ if (Type.isTriviallyCopyableType(S.Context))
+ return true;
+
+ if (IsConstructor) {
+ if (ClassDecl->needsImplicitMoveConstructor())
+ S.DeclareImplicitMoveConstructor(ClassDecl);
+ return ClassDecl->hasDeclaredMoveConstructor();
+ }
+
+ if (ClassDecl->needsImplicitMoveAssignment())
+ S.DeclareImplicitMoveAssignment(ClassDecl);
+ return ClassDecl->hasDeclaredMoveAssignment();
+}
+
+/// Determine whether all non-static data members and direct or virtual bases
+/// of class \p ClassDecl have either a move operation, or are trivially
+/// copyable.
+static bool subobjectsHaveMoveOrTrivialCopy(Sema &S, CXXRecordDecl *ClassDecl,
+ bool IsConstructor) {
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ Base != BaseEnd; ++Base) {
+ if (Base->isVirtual())
+ continue;
+
+ if (!hasMoveOrIsTriviallyCopyable(S, Base->getType(), IsConstructor))
+ return false;
+ }
+
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd; ++Base) {
+ if (!hasMoveOrIsTriviallyCopyable(S, Base->getType(), IsConstructor))
+ return false;
+ }
+
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ if (!hasMoveOrIsTriviallyCopyable(S, (*Field)->getType(), IsConstructor))
+ return false;
+ }
+
+ return true;
+}
+
+CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
+ // C++11 [class.copy]p20:
+ // If the definition of a class X does not explicitly declare a move
+ // assignment operator, one will be implicitly declared as defaulted
+ // if and only if:
+ //
+ // - [first 4 bullets]
+ assert(ClassDecl->needsImplicitMoveAssignment());
+
+ // [Checked after we build the declaration]
+ // - the move assignment operator would not be implicitly defined as
+ // deleted,
+
+ // [DR1402]:
+ // - X has no direct or indirect virtual base class with a non-trivial
+ // move assignment operator, and
+ // - each of X's non-static data members and direct or virtual base classes
+ // has a type that either has a move assignment operator or is trivially
+ // copyable.
+ if (hasVirtualBaseWithNonTrivialMoveAssignment(*this, ClassDecl) ||
+ !subobjectsHaveMoveOrTrivialCopy(*this, ClassDecl,/*Constructor*/false)) {
+ ClassDecl->setFailedImplicitMoveAssignment();
+ return 0;
+ }
+
+ // Note: The following rules are largely analoguous to the move
+ // constructor rules.
+
+ ImplicitExceptionSpecification Spec(
+ ComputeDefaultedMoveAssignmentExceptionSpec(ClassDecl));
+
+ QualType ArgType = Context.getTypeDeclType(ClassDecl);
+ QualType RetType = Context.getLValueReferenceType(ArgType);
+ ArgType = Context.getRValueReferenceType(ArgType);
+
+ // An implicitly-declared move assignment operator is an inline public
+ // member of its class.
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+ CXXMethodDecl *MoveAssignment
+ = CXXMethodDecl::Create(Context, ClassDecl, ClassLoc, NameInfo,
+ Context.getFunctionType(RetType, &ArgType, 1, EPI),
+ /*TInfo=*/0, /*isStatic=*/false,
+ /*StorageClassAsWritten=*/SC_None,
+ /*isInline=*/true,
+ /*isConstexpr=*/false,
+ SourceLocation());
+ MoveAssignment->setAccess(AS_public);
+ MoveAssignment->setDefaulted();
+ MoveAssignment->setImplicit();
+ MoveAssignment->setTrivial(ClassDecl->hasTrivialMoveAssignment());
+
+ // Add the parameter to the operator.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment,
+ ClassLoc, ClassLoc, /*Id=*/0,
+ ArgType, /*TInfo=*/0,
+ SC_None,
+ SC_None, 0);
+ MoveAssignment->setParams(FromParam);
+
+ // Note that we have added this copy-assignment operator.
+ ++ASTContext::NumImplicitMoveAssignmentOperatorsDeclared;
+
+ // C++0x [class.copy]p9:
+ // If the definition of a class X does not explicitly declare a move
+ // assignment operator, one will be implicitly declared as defaulted if and
+ // only if:
+ // [...]
+ // - the move assignment operator would not be implicitly defined as
+ // deleted.
+ if (ShouldDeleteSpecialMember(MoveAssignment, CXXMoveAssignment)) {
+ // Cache this result so that we don't try to generate this over and over
+ // on every lookup, leaking memory and wasting time.
+ ClassDecl->setFailedImplicitMoveAssignment();
+ return 0;
+ }
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(MoveAssignment, S, false);
+ ClassDecl->addDecl(MoveAssignment);
+
+ AddOverriddenMethods(ClassDecl, MoveAssignment);
+ return MoveAssignment;
+}
+
+void Sema::DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
+ CXXMethodDecl *MoveAssignOperator) {
+ assert((MoveAssignOperator->isDefaulted() &&
+ MoveAssignOperator->isOverloadedOperator() &&
+ MoveAssignOperator->getOverloadedOperator() == OO_Equal &&
+ !MoveAssignOperator->doesThisDeclarationHaveABody() &&
+ !MoveAssignOperator->isDeleted()) &&
+ "DefineImplicitMoveAssignment called for wrong function");
+
+ CXXRecordDecl *ClassDecl = MoveAssignOperator->getParent();
+
+ if (ClassDecl->isInvalidDecl() || MoveAssignOperator->isInvalidDecl()) {
+ MoveAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ MoveAssignOperator->setUsed();
+
+ ImplicitlyDefinedFunctionScope Scope(*this, MoveAssignOperator);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // C++0x [class.copy]p28:
+ // The implicitly-defined or move assignment operator for a non-union class
+ // X performs memberwise move assignment of its subobjects. The direct base
+ // classes of X are assigned first, in the order of their declaration in the
+ // base-specifier-list, and then the immediate non-static data members of X
+ // are assigned, in the order in which they were declared in the class
+ // definition.
+
+ // The statements that form the synthesized function body.
+ ASTOwningVector<Stmt*> Statements(*this);
+
+ // The parameter for the "other" object, which we are move from.
+ ParmVarDecl *Other = MoveAssignOperator->getParamDecl(0);
+ QualType OtherRefType = Other->getType()->
+ getAs<RValueReferenceType>()->getPointeeType();
+ assert(OtherRefType.getQualifiers() == 0 &&
+ "Bad argument type of defaulted move assignment");
+
+ // Our location for everything implicitly-generated.
+ SourceLocation Loc = MoveAssignOperator->getLocation();
+
+ // Construct a reference to the "other" object. We'll be using this
+ // throughout the generated ASTs.
+ Expr *OtherRef = BuildDeclRefExpr(Other, OtherRefType, VK_LValue, Loc).take();
+ assert(OtherRef && "Reference to parameter cannot fail!");
+ // Cast to rvalue.
+ OtherRef = CastForMoving(*this, OtherRef);
+
+ // Construct the "this" pointer. We'll be using this throughout the generated
+ // ASTs.
+ Expr *This = ActOnCXXThis(Loc).takeAs<Expr>();
+ assert(This && "Reference to this cannot fail!");
+
+ // Assign base classes.
+ bool Invalid = false;
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ E = ClassDecl->bases_end(); Base != E; ++Base) {
+ // Form the assignment:
+ // static_cast<Base*>(this)->Base::operator=(static_cast<Base&&>(other));
+ QualType BaseType = Base->getType().getUnqualifiedType();
+ if (!BaseType->isRecordType()) {
+ Invalid = true;
+ continue;
+ }
+
+ CXXCastPath BasePath;
+ BasePath.push_back(Base);
+
+ // Construct the "from" expression, which is an implicit cast to the
+ // appropriately-qualified base type.
+ Expr *From = OtherRef;
+ From = ImpCastExprToType(From, BaseType, CK_UncheckedDerivedToBase,
+ VK_XValue, &BasePath).take();
+
+ // Dereference "this".
+ ExprResult To = CreateBuiltinUnaryOp(Loc, UO_Deref, This);
+
+ // Implicitly cast "this" to the appropriately-qualified base type.
+ To = ImpCastExprToType(To.take(),
+ Context.getCVRQualifiedType(BaseType,
+ MoveAssignOperator->getTypeQualifiers()),
+ CK_UncheckedDerivedToBase,
+ VK_LValue, &BasePath);
+
+ // Build the move.
+ StmtResult Move = BuildSingleCopyAssign(*this, Loc, BaseType,
+ To.get(), From,
+ /*CopyingBaseSubobject=*/true,
+ /*Copying=*/false);
+ if (Move.isInvalid()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ MoveAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // Success! Record the move.
+ Statements.push_back(Move.takeAs<Expr>());
+ }
+
+ // \brief Reference to the __builtin_memcpy function.
+ Expr *BuiltinMemCpyRef = 0;
+ // \brief Reference to the __builtin_objc_memmove_collectable function.
+ Expr *CollectableMemCpyRef = 0;
+
+ // Assign non-static members.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // Check for members of reference type; we can't move those.
+ if (Field->getType()->isReferenceType()) {
+ Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
+ << Context.getTagDeclType(ClassDecl) << 0 << Field->getDeclName();
+ Diag(Field->getLocation(), diag::note_declared_at);
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ continue;
+ }
+
+ // Check for members of const-qualified, non-class type.
+ QualType BaseType = Context.getBaseElementType(Field->getType());
+ if (!BaseType->getAs<RecordType>() && BaseType.isConstQualified()) {
+ Diag(ClassDecl->getLocation(), diag::err_uninitialized_member_for_assign)
+ << Context.getTagDeclType(ClassDecl) << 1 << Field->getDeclName();
+ Diag(Field->getLocation(), diag::note_declared_at);
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ continue;
+ }
+
+ // Suppress assigning zero-width bitfields.
+ if (Field->isBitField() && Field->getBitWidthValue(Context) == 0)
+ continue;
+
+ QualType FieldType = Field->getType().getNonReferenceType();
+ if (FieldType->isIncompleteArrayType()) {
+ assert(ClassDecl->hasFlexibleArrayMember() &&
+ "Incomplete array type is not valid");
+ continue;
+ }
+
+ // Build references to the field in the object we're copying from and to.
+ CXXScopeSpec SS; // Intentionally empty
+ LookupResult MemberLookup(*this, Field->getDeclName(), Loc,
+ LookupMemberName);
+ MemberLookup.addDecl(*Field);
+ MemberLookup.resolveKind();
+ ExprResult From = BuildMemberReferenceExpr(OtherRef, OtherRefType,
+ Loc, /*IsArrow=*/false,
+ SS, SourceLocation(), 0,
+ MemberLookup, 0);
+ ExprResult To = BuildMemberReferenceExpr(This, This->getType(),
+ Loc, /*IsArrow=*/true,
+ SS, SourceLocation(), 0,
+ MemberLookup, 0);
+ assert(!From.isInvalid() && "Implicit field reference cannot fail");
+ assert(!To.isInvalid() && "Implicit field reference cannot fail");
+
+ assert(!From.get()->isLValue() && // could be xvalue or prvalue
+ "Member reference with rvalue base must be rvalue except for reference "
+ "members, which aren't allowed for move assignment.");
+
+ // If the field should be copied with __builtin_memcpy rather than via
+ // explicit assignments, do so. This optimization only applies for arrays
+ // of scalars and arrays of class type with trivial move-assignment
+ // operators.
+ if (FieldType->isArrayType() && !FieldType.isVolatileQualified()
+ && BaseType.hasTrivialAssignment(Context, /*Copying=*/false)) {
+ // Compute the size of the memory buffer to be copied.
+ QualType SizeType = Context.getSizeType();
+ llvm::APInt Size(Context.getTypeSize(SizeType),
+ Context.getTypeSizeInChars(BaseType).getQuantity());
+ for (const ConstantArrayType *Array
+ = Context.getAsConstantArrayType(FieldType);
+ Array;
+ Array = Context.getAsConstantArrayType(Array->getElementType())) {
+ llvm::APInt ArraySize
+ = Array->getSize().zextOrTrunc(Size.getBitWidth());
+ Size *= ArraySize;
+ }
+
+ // Take the address of the field references for "from" and "to". We
+ // directly construct UnaryOperators here because semantic analysis
+ // does not permit us to take the address of an xvalue.
+ From = new (Context) UnaryOperator(From.get(), UO_AddrOf,
+ Context.getPointerType(From.get()->getType()),
+ VK_RValue, OK_Ordinary, Loc);
+ To = new (Context) UnaryOperator(To.get(), UO_AddrOf,
+ Context.getPointerType(To.get()->getType()),
+ VK_RValue, OK_Ordinary, Loc);
+
+ bool NeedsCollectableMemCpy =
+ (BaseType->isRecordType() &&
+ BaseType->getAs<RecordType>()->getDecl()->hasObjectMember());
+
+ if (NeedsCollectableMemCpy) {
+ if (!CollectableMemCpyRef) {
+ // Create a reference to the __builtin_objc_memmove_collectable function.
+ LookupResult R(*this,
+ &Context.Idents.get("__builtin_objc_memmove_collectable"),
+ Loc, LookupOrdinaryName);
+ LookupName(R, TUScope, true);
+
+ FunctionDecl *CollectableMemCpy = R.getAsSingle<FunctionDecl>();
+ if (!CollectableMemCpy) {
+ // Something went horribly wrong earlier, and we will have
+ // complained about it.
+ Invalid = true;
+ continue;
+ }
+
+ CollectableMemCpyRef = BuildDeclRefExpr(CollectableMemCpy,
+ CollectableMemCpy->getType(),
+ VK_LValue, Loc, 0).take();
+ assert(CollectableMemCpyRef && "Builtin reference cannot fail");
+ }
+ }
+ // Create a reference to the __builtin_memcpy builtin function.
+ else if (!BuiltinMemCpyRef) {
+ LookupResult R(*this, &Context.Idents.get("__builtin_memcpy"), Loc,
+ LookupOrdinaryName);
+ LookupName(R, TUScope, true);
+
+ FunctionDecl *BuiltinMemCpy = R.getAsSingle<FunctionDecl>();
+ if (!BuiltinMemCpy) {
+ // Something went horribly wrong earlier, and we will have complained
+ // about it.
+ Invalid = true;
+ continue;
+ }
+
+ BuiltinMemCpyRef = BuildDeclRefExpr(BuiltinMemCpy,
+ BuiltinMemCpy->getType(),
+ VK_LValue, Loc, 0).take();
+ assert(BuiltinMemCpyRef && "Builtin reference cannot fail");
+ }
+
+ ASTOwningVector<Expr*> CallArgs(*this);
+ CallArgs.push_back(To.takeAs<Expr>());
+ CallArgs.push_back(From.takeAs<Expr>());
+ CallArgs.push_back(IntegerLiteral::Create(Context, Size, SizeType, Loc));
+ ExprResult Call = ExprError();
+ if (NeedsCollectableMemCpy)
+ Call = ActOnCallExpr(/*Scope=*/0,
+ CollectableMemCpyRef,
+ Loc, move_arg(CallArgs),
+ Loc);
+ else
+ Call = ActOnCallExpr(/*Scope=*/0,
+ BuiltinMemCpyRef,
+ Loc, move_arg(CallArgs),
+ Loc);
+
+ assert(!Call.isInvalid() && "Call to __builtin_memcpy cannot fail!");
+ Statements.push_back(Call.takeAs<Expr>());
+ continue;
+ }
+
+ // Build the move of this field.
+ StmtResult Move = BuildSingleCopyAssign(*this, Loc, FieldType,
+ To.get(), From.get(),
+ /*CopyingBaseSubobject=*/false,
+ /*Copying=*/false);
+ if (Move.isInvalid()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ MoveAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ // Success! Record the copy.
+ Statements.push_back(Move.takeAs<Stmt>());
+ }
+
+ if (!Invalid) {
+ // Add a "return *this;"
+ ExprResult ThisObj = CreateBuiltinUnaryOp(Loc, UO_Deref, This);
+
+ StmtResult Return = ActOnReturnStmt(Loc, ThisObj.get());
+ if (Return.isInvalid())
+ Invalid = true;
+ else {
+ Statements.push_back(Return.takeAs<Stmt>());
+
+ if (Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveAssignment << Context.getTagDeclType(ClassDecl);
+ Invalid = true;
+ }
+ }
+ }
+
+ if (Invalid) {
+ MoveAssignOperator->setInvalidDecl();
+ return;
+ }
+
+ StmtResult Body;
+ {
+ CompoundScopeRAII CompoundScope(*this);
+ Body = ActOnCompoundStmt(Loc, Loc, move_arg(Statements),
+ /*isStmtExpr=*/false);
+ assert(!Body.isInvalid() && "Compound statement creation cannot fail");
+ }
+ MoveAssignOperator->setBody(Body.takeAs<Stmt>());
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(MoveAssignOperator);
+ }
+}
+
+std::pair<Sema::ImplicitExceptionSpecification, bool>
+Sema::ComputeDefaultedCopyCtorExceptionSpecAndConst(CXXRecordDecl *ClassDecl) {
+ if (ClassDecl->isInvalidDecl())
+ return std::make_pair(ImplicitExceptionSpecification(Context), false);
+
+ // C++ [class.copy]p5:
+ // The implicitly-declared copy constructor for a class X will
+ // have the form
+ //
+ // X::X(const X&)
+ //
+ // if
+ // FIXME: It ought to be possible to store this on the record.
+ bool HasConstCopyConstructor = true;
+
+ // -- each direct or virtual base class B of X has a copy
+ // constructor whose first parameter is of type const B& or
+ // const volatile B&, and
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ HasConstCopyConstructor && Base != BaseEnd;
+ ++Base) {
+ // Virtual bases are handled below.
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ LookupCopyingConstructor(BaseClassDecl, Qualifiers::Const,
+ &HasConstCopyConstructor);
+ }
+
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ HasConstCopyConstructor && Base != BaseEnd;
+ ++Base) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ LookupCopyingConstructor(BaseClassDecl, Qualifiers::Const,
+ &HasConstCopyConstructor);
+ }
+
+ // -- for all the nonstatic data members of X that are of a
+ // class type M (or array thereof), each such class type
+ // has a copy constructor whose first parameter is of type
+ // const M& or const volatile M&.
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ HasConstCopyConstructor && Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ LookupCopyingConstructor(FieldClassDecl, Qualifiers::Const,
+ &HasConstCopyConstructor);
+ }
+ }
+ // Otherwise, the implicitly declared copy constructor will have
+ // the form
+ //
+ // X::X(X&)
+
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(Context);
+ unsigned Quals = HasConstCopyConstructor? Qualifiers::Const : 0;
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->bases_begin(),
+ BaseEnd = ClassDecl->bases_end();
+ Base != BaseEnd;
+ ++Base) {
+ // Virtual bases are handled below.
+ if (Base->isVirtual())
+ continue;
+
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (CXXConstructorDecl *CopyConstructor =
+ LookupCopyingConstructor(BaseClassDecl, Quals))
+ ExceptSpec.CalledDecl(CopyConstructor);
+ }
+ for (CXXRecordDecl::base_class_iterator Base = ClassDecl->vbases_begin(),
+ BaseEnd = ClassDecl->vbases_end();
+ Base != BaseEnd;
+ ++Base) {
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base->getType()->getAs<RecordType>()->getDecl());
+ if (CXXConstructorDecl *CopyConstructor =
+ LookupCopyingConstructor(BaseClassDecl, Quals))
+ ExceptSpec.CalledDecl(CopyConstructor);
+ }
+ for (CXXRecordDecl::field_iterator Field = ClassDecl->field_begin(),
+ FieldEnd = ClassDecl->field_end();
+ Field != FieldEnd;
+ ++Field) {
+ QualType FieldType = Context.getBaseElementType((*Field)->getType());
+ if (CXXRecordDecl *FieldClassDecl = FieldType->getAsCXXRecordDecl()) {
+ if (CXXConstructorDecl *CopyConstructor =
+ LookupCopyingConstructor(FieldClassDecl, Quals))
+ ExceptSpec.CalledDecl(CopyConstructor);
+ }
+ }
+
+ return std::make_pair(ExceptSpec, HasConstCopyConstructor);
+}
+
+CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
+ CXXRecordDecl *ClassDecl) {
+ // C++ [class.copy]p4:
+ // If the class definition does not explicitly declare a copy
+ // constructor, one is declared implicitly.
+
+ ImplicitExceptionSpecification Spec(Context);
+ bool Const;
+ llvm::tie(Spec, Const) =
+ ComputeDefaultedCopyCtorExceptionSpecAndConst(ClassDecl);
+
+ QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ArgType = ClassType;
+ if (Const)
+ ArgType = ArgType.withConst();
+ ArgType = Context.getLValueReferenceType(ArgType);
+
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType));
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+
+ // An implicitly-declared copy constructor is an inline public
+ // member of its class.
+ CXXConstructorDecl *CopyConstructor = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo,
+ Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI), /*TInfo=*/0,
+ /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ /*isConstexpr=*/ClassDecl->defaultedCopyConstructorIsConstexpr() &&
+ getLangOpts().CPlusPlus0x);
+ CopyConstructor->setAccess(AS_public);
+ CopyConstructor->setDefaulted();
+ CopyConstructor->setTrivial(ClassDecl->hasTrivialCopyConstructor());
+
+ // Note that we have declared this constructor.
+ ++ASTContext::NumImplicitCopyConstructorsDeclared;
+
+ // Add the parameter to the constructor.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyConstructor,
+ ClassLoc, ClassLoc,
+ /*IdentifierInfo=*/0,
+ ArgType, /*TInfo=*/0,
+ SC_None,
+ SC_None, 0);
+ CopyConstructor->setParams(FromParam);
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(CopyConstructor, S, false);
+ ClassDecl->addDecl(CopyConstructor);
+
+ // C++11 [class.copy]p8:
+ // ... If the class definition does not explicitly declare a copy
+ // constructor, there is no user-declared move constructor, and there is no
+ // user-declared move assignment operator, a copy constructor is implicitly
+ // declared as defaulted.
+ if (ShouldDeleteSpecialMember(CopyConstructor, CXXCopyConstructor))
+ CopyConstructor->setDeletedAsWritten();
+
+ return CopyConstructor;
+}
+
+void Sema::DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *CopyConstructor) {
+ assert((CopyConstructor->isDefaulted() &&
+ CopyConstructor->isCopyConstructor() &&
+ !CopyConstructor->doesThisDeclarationHaveABody() &&
+ !CopyConstructor->isDeleted()) &&
+ "DefineImplicitCopyConstructor - call it for implicit copy ctor");
+
+ CXXRecordDecl *ClassDecl = CopyConstructor->getParent();
+ assert(ClassDecl && "DefineImplicitCopyConstructor - invalid constructor");
+
+ ImplicitlyDefinedFunctionScope Scope(*this, CopyConstructor);
+ DiagnosticErrorTrap Trap(Diags);
+
+ if (SetCtorInitializers(CopyConstructor, 0, 0, /*AnyErrors=*/false) ||
+ Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXCopyConstructor << Context.getTagDeclType(ClassDecl);
+ CopyConstructor->setInvalidDecl();
+ } else {
+ Sema::CompoundScopeRAII CompoundScope(*this);
+ CopyConstructor->setBody(ActOnCompoundStmt(CopyConstructor->getLocation(),
+ CopyConstructor->getLocation(),
+ MultiStmtArg(*this, 0, 0),
+ /*isStmtExpr=*/false)
+ .takeAs<Stmt>());
+ CopyConstructor->setImplicitlyDefined(true);
+ }
+
+ CopyConstructor->setUsed();
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(CopyConstructor);
+ }
+}
+
+Sema::ImplicitExceptionSpecification
+Sema::ComputeDefaultedMoveCtorExceptionSpec(CXXRecordDecl *ClassDecl) {
+ // C++ [except.spec]p14:
+ // An implicitly declared special member function (Clause 12) shall have an
+ // exception-specification. [...]
+ ImplicitExceptionSpecification ExceptSpec(Context);
+ if (ClassDecl->isInvalidDecl())
+ return ExceptSpec;
+
+ // Direct base-class constructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->bases_begin(),
+ BEnd = ClassDecl->bases_end();
+ B != BEnd; ++B) {
+ if (B->isVirtual()) // Handled below.
+ continue;
+
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXConstructorDecl *Constructor = LookupMovingConstructor(BaseClassDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ if (Constructor)
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+ // Virtual base-class constructors.
+ for (CXXRecordDecl::base_class_iterator B = ClassDecl->vbases_begin(),
+ BEnd = ClassDecl->vbases_end();
+ B != BEnd; ++B) {
+ if (const RecordType *BaseType = B->getType()->getAs<RecordType>()) {
+ CXXRecordDecl *BaseClassDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ CXXConstructorDecl *Constructor = LookupMovingConstructor(BaseClassDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ if (Constructor)
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+ // Field constructors.
+ for (RecordDecl::field_iterator F = ClassDecl->field_begin(),
+ FEnd = ClassDecl->field_end();
+ F != FEnd; ++F) {
+ if (const RecordType *RecordTy
+ = Context.getBaseElementType(F->getType())->getAs<RecordType>()) {
+ CXXRecordDecl *FieldRecDecl = cast<CXXRecordDecl>(RecordTy->getDecl());
+ CXXConstructorDecl *Constructor = LookupMovingConstructor(FieldRecDecl);
+ // If this is a deleted function, add it anyway. This might be conformant
+ // with the standard. This might not. I'm not sure. It might not matter.
+ // In particular, the problem is that this function never gets called. It
+ // might just be ill-formed because this function attempts to refer to
+ // a deleted function here.
+ if (Constructor)
+ ExceptSpec.CalledDecl(Constructor);
+ }
+ }
+
+ return ExceptSpec;
+}
+
+CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
+ CXXRecordDecl *ClassDecl) {
+ // C++11 [class.copy]p9:
+ // If the definition of a class X does not explicitly declare a move
+ // constructor, one will be implicitly declared as defaulted if and only if:
+ //
+ // - [first 4 bullets]
+ assert(ClassDecl->needsImplicitMoveConstructor());
+
+ // [Checked after we build the declaration]
+ // - the move assignment operator would not be implicitly defined as
+ // deleted,
+
+ // [DR1402]:
+ // - each of X's non-static data members and direct or virtual base classes
+ // has a type that either has a move constructor or is trivially copyable.
+ if (!subobjectsHaveMoveOrTrivialCopy(*this, ClassDecl, /*Constructor*/true)) {
+ ClassDecl->setFailedImplicitMoveConstructor();
+ return 0;
+ }
+
+ ImplicitExceptionSpecification Spec(
+ ComputeDefaultedMoveCtorExceptionSpec(ClassDecl));
+
+ QualType ClassType = Context.getTypeDeclType(ClassDecl);
+ QualType ArgType = Context.getRValueReferenceType(ClassType);
+
+ FunctionProtoType::ExtProtoInfo EPI = Spec.getEPI();
+
+ DeclarationName Name
+ = Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(ClassType));
+ SourceLocation ClassLoc = ClassDecl->getLocation();
+ DeclarationNameInfo NameInfo(Name, ClassLoc);
+
+ // C++0x [class.copy]p11:
+ // An implicitly-declared copy/move constructor is an inline public
+ // member of its class.
+ CXXConstructorDecl *MoveConstructor = CXXConstructorDecl::Create(
+ Context, ClassDecl, ClassLoc, NameInfo,
+ Context.getFunctionType(Context.VoidTy, &ArgType, 1, EPI), /*TInfo=*/0,
+ /*isExplicit=*/false, /*isInline=*/true, /*isImplicitlyDeclared=*/true,
+ /*isConstexpr=*/ClassDecl->defaultedMoveConstructorIsConstexpr() &&
+ getLangOpts().CPlusPlus0x);
+ MoveConstructor->setAccess(AS_public);
+ MoveConstructor->setDefaulted();
+ MoveConstructor->setTrivial(ClassDecl->hasTrivialMoveConstructor());
+
+ // Add the parameter to the constructor.
+ ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor,
+ ClassLoc, ClassLoc,
+ /*IdentifierInfo=*/0,
+ ArgType, /*TInfo=*/0,
+ SC_None,
+ SC_None, 0);
+ MoveConstructor->setParams(FromParam);
+
+ // C++0x [class.copy]p9:
+ // If the definition of a class X does not explicitly declare a move
+ // constructor, one will be implicitly declared as defaulted if and only if:
+ // [...]
+ // - the move constructor would not be implicitly defined as deleted.
+ if (ShouldDeleteSpecialMember(MoveConstructor, CXXMoveConstructor)) {
+ // Cache this result so that we don't try to generate this over and over
+ // on every lookup, leaking memory and wasting time.
+ ClassDecl->setFailedImplicitMoveConstructor();
+ return 0;
+ }
+
+ // Note that we have declared this constructor.
+ ++ASTContext::NumImplicitMoveConstructorsDeclared;
+
+ if (Scope *S = getScopeForContext(ClassDecl))
+ PushOnScopeChains(MoveConstructor, S, false);
+ ClassDecl->addDecl(MoveConstructor);
+
+ return MoveConstructor;
+}
+
+void Sema::DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
+ CXXConstructorDecl *MoveConstructor) {
+ assert((MoveConstructor->isDefaulted() &&
+ MoveConstructor->isMoveConstructor() &&
+ !MoveConstructor->doesThisDeclarationHaveABody() &&
+ !MoveConstructor->isDeleted()) &&
+ "DefineImplicitMoveConstructor - call it for implicit move ctor");
+
+ CXXRecordDecl *ClassDecl = MoveConstructor->getParent();
+ assert(ClassDecl && "DefineImplicitMoveConstructor - invalid constructor");
+
+ ImplicitlyDefinedFunctionScope Scope(*this, MoveConstructor);
+ DiagnosticErrorTrap Trap(Diags);
+
+ if (SetCtorInitializers(MoveConstructor, 0, 0, /*AnyErrors=*/false) ||
+ Trap.hasErrorOccurred()) {
+ Diag(CurrentLocation, diag::note_member_synthesized_at)
+ << CXXMoveConstructor << Context.getTagDeclType(ClassDecl);
+ MoveConstructor->setInvalidDecl();
+ } else {
+ Sema::CompoundScopeRAII CompoundScope(*this);
+ MoveConstructor->setBody(ActOnCompoundStmt(MoveConstructor->getLocation(),
+ MoveConstructor->getLocation(),
+ MultiStmtArg(*this, 0, 0),
+ /*isStmtExpr=*/false)
+ .takeAs<Stmt>());
+ MoveConstructor->setImplicitlyDefined(true);
+ }
+
+ MoveConstructor->setUsed();
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(MoveConstructor);
+ }
+}
+
+bool Sema::isImplicitlyDeleted(FunctionDecl *FD) {
+ return FD->isDeleted() &&
+ (FD->isDefaulted() || FD->isImplicit()) &&
+ isa<CXXMethodDecl>(FD);
+}
+
+/// \brief Mark the call operator of the given lambda closure type as "used".
+static void markLambdaCallOperatorUsed(Sema &S, CXXRecordDecl *Lambda) {
+ CXXMethodDecl *CallOperator
+ = cast<CXXMethodDecl>(
+ *Lambda->lookup(
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Call)).first);
+ CallOperator->setReferenced();
+ CallOperator->setUsed();
+}
+
+void Sema::DefineImplicitLambdaToFunctionPointerConversion(
+ SourceLocation CurrentLocation,
+ CXXConversionDecl *Conv)
+{
+ CXXRecordDecl *Lambda = Conv->getParent();
+
+ // Make sure that the lambda call operator is marked used.
+ markLambdaCallOperatorUsed(*this, Lambda);
+
+ Conv->setUsed();
+
+ ImplicitlyDefinedFunctionScope Scope(*this, Conv);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // Return the address of the __invoke function.
+ DeclarationName InvokeName = &Context.Idents.get("__invoke");
+ CXXMethodDecl *Invoke
+ = cast<CXXMethodDecl>(*Lambda->lookup(InvokeName).first);
+ Expr *FunctionRef = BuildDeclRefExpr(Invoke, Invoke->getType(),
+ VK_LValue, Conv->getLocation()).take();
+ assert(FunctionRef && "Can't refer to __invoke function?");
+ Stmt *Return = ActOnReturnStmt(Conv->getLocation(), FunctionRef).take();
+ Conv->setBody(new (Context) CompoundStmt(Context, &Return, 1,
+ Conv->getLocation(),
+ Conv->getLocation()));
+
+ // Fill in the __invoke function with a dummy implementation. IR generation
+ // will fill in the actual details.
+ Invoke->setUsed();
+ Invoke->setReferenced();
+ Invoke->setBody(new (Context) CompoundStmt(Context, 0, 0, Conv->getLocation(),
+ Conv->getLocation()));
+
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Conv);
+ L->CompletedImplicitDefinition(Invoke);
+ }
+}
+
+void Sema::DefineImplicitLambdaToBlockPointerConversion(
+ SourceLocation CurrentLocation,
+ CXXConversionDecl *Conv)
+{
+ Conv->setUsed();
+
+ ImplicitlyDefinedFunctionScope Scope(*this, Conv);
+ DiagnosticErrorTrap Trap(Diags);
+
+ // Copy-initialize the lambda object as needed to capture it.
+ Expr *This = ActOnCXXThis(CurrentLocation).take();
+ Expr *DerefThis =CreateBuiltinUnaryOp(CurrentLocation, UO_Deref, This).take();
+
+ ExprResult BuildBlock = BuildBlockForLambdaConversion(CurrentLocation,
+ Conv->getLocation(),
+ Conv, DerefThis);
+
+ // If we're not under ARC, make sure we still get the _Block_copy/autorelease
+ // behavior. Note that only the general conversion function does this
+ // (since it's unusable otherwise); in the case where we inline the
+ // block literal, it has block literal lifetime semantics.
+ if (!BuildBlock.isInvalid() && !getLangOpts().ObjCAutoRefCount)
+ BuildBlock = ImplicitCastExpr::Create(Context, BuildBlock.get()->getType(),
+ CK_CopyAndAutoreleaseBlockObject,
+ BuildBlock.get(), 0, VK_RValue);
+
+ if (BuildBlock.isInvalid()) {
+ Diag(CurrentLocation, diag::note_lambda_to_block_conv);
+ Conv->setInvalidDecl();
+ return;
+ }
+
+ // Create the return statement that returns the block from the conversion
+ // function.
+ StmtResult Return = ActOnReturnStmt(Conv->getLocation(), BuildBlock.get());
+ if (Return.isInvalid()) {
+ Diag(CurrentLocation, diag::note_lambda_to_block_conv);
+ Conv->setInvalidDecl();
+ return;
+ }
+
+ // Set the body of the conversion function.
+ Stmt *ReturnS = Return.take();
+ Conv->setBody(new (Context) CompoundStmt(Context, &ReturnS, 1,
+ Conv->getLocation(),
+ Conv->getLocation()));
+
+ // We're done; notify the mutation listener, if any.
+ if (ASTMutationListener *L = getASTMutationListener()) {
+ L->CompletedImplicitDefinition(Conv);
+ }
+}
+
+/// \brief Determine whether the given list arguments contains exactly one
+/// "real" (non-default) argument.
+static bool hasOneRealArgument(MultiExprArg Args) {
+ switch (Args.size()) {
+ case 0:
+ return false;
+
+ default:
+ if (!Args.get()[1]->isDefaultArgument())
+ return false;
+
+ // fall through
+ case 1:
+ return !Args.get()[0]->isDefaultArgument();
+ }
+
+ return false;
+}
+
+ExprResult
+Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
+ CXXConstructorDecl *Constructor,
+ MultiExprArg ExprArgs,
+ bool HadMultipleCandidates,
+ bool RequiresZeroInit,
+ unsigned ConstructKind,
+ SourceRange ParenRange) {
+ bool Elidable = false;
+
+ // C++0x [class.copy]p34:
+ // When certain criteria are met, an implementation is allowed to
+ // omit the copy/move construction of a class object, even if the
+ // copy/move constructor and/or destructor for the object have
+ // side effects. [...]
+ // - when a temporary class object that has not been bound to a
+ // reference (12.2) would be copied/moved to a class object
+ // with the same cv-unqualified type, the copy/move operation
+ // can be omitted by constructing the temporary object
+ // directly into the target of the omitted copy/move
+ if (ConstructKind == CXXConstructExpr::CK_Complete &&
+ Constructor->isCopyOrMoveConstructor() && hasOneRealArgument(ExprArgs)) {
+ Expr *SubExpr = ((Expr **)ExprArgs.get())[0];
+ Elidable = SubExpr->isTemporaryObject(Context, Constructor->getParent());
+ }
+
+ return BuildCXXConstructExpr(ConstructLoc, DeclInitType, Constructor,
+ Elidable, move(ExprArgs), HadMultipleCandidates,
+ RequiresZeroInit, ConstructKind, ParenRange);
+}
+
+/// BuildCXXConstructExpr - Creates a complete call to a constructor,
+/// including handling of its default argument expressions.
+ExprResult
+Sema::BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
+ CXXConstructorDecl *Constructor, bool Elidable,
+ MultiExprArg ExprArgs,
+ bool HadMultipleCandidates,
+ bool RequiresZeroInit,
+ unsigned ConstructKind,
+ SourceRange ParenRange) {
+ unsigned NumExprs = ExprArgs.size();
+ Expr **Exprs = (Expr **)ExprArgs.release();
+
+ for (specific_attr_iterator<NonNullAttr>
+ i = Constructor->specific_attr_begin<NonNullAttr>(),
+ e = Constructor->specific_attr_end<NonNullAttr>(); i != e; ++i) {
+ const NonNullAttr *NonNull = *i;
+ CheckNonNullArguments(NonNull, ExprArgs.get(), ConstructLoc);
+ }
+
+ MarkFunctionReferenced(ConstructLoc, Constructor);
+ return Owned(CXXConstructExpr::Create(Context, DeclInitType, ConstructLoc,
+ Constructor, Elidable, Exprs, NumExprs,
+ HadMultipleCandidates, /*FIXME*/false,
+ RequiresZeroInit,
+ static_cast<CXXConstructExpr::ConstructionKind>(ConstructKind),
+ ParenRange));
+}
+
+bool Sema::InitializeVarWithConstructor(VarDecl *VD,
+ CXXConstructorDecl *Constructor,
+ MultiExprArg Exprs,
+ bool HadMultipleCandidates) {
+ // FIXME: Provide the correct paren SourceRange when available.
+ ExprResult TempResult =
+ BuildCXXConstructExpr(VD->getLocation(), VD->getType(), Constructor,
+ move(Exprs), HadMultipleCandidates, false,
+ CXXConstructExpr::CK_Complete, SourceRange());
+ if (TempResult.isInvalid())
+ return true;
+
+ Expr *Temp = TempResult.takeAs<Expr>();
+ CheckImplicitConversions(Temp, VD->getLocation());
+ MarkFunctionReferenced(VD->getLocation(), Constructor);
+ Temp = MaybeCreateExprWithCleanups(Temp);
+ VD->setInit(Temp);
+
+ return false;
+}
+
+void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
+ if (VD->isInvalidDecl()) return;
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
+ if (ClassDecl->isInvalidDecl()) return;
+ if (ClassDecl->hasIrrelevantDestructor()) return;
+ if (ClassDecl->isDependentContext()) return;
+
+ CXXDestructorDecl *Destructor = LookupDestructor(ClassDecl);
+ MarkFunctionReferenced(VD->getLocation(), Destructor);
+ CheckDestructorAccess(VD->getLocation(), Destructor,
+ PDiag(diag::err_access_dtor_var)
+ << VD->getDeclName()
+ << VD->getType());
+ DiagnoseUseOfDecl(Destructor, VD->getLocation());
+
+ if (!VD->hasGlobalStorage()) return;
+
+ // Emit warning for non-trivial dtor in global scope (a real global,
+ // class-static, function-static).
+ Diag(VD->getLocation(), diag::warn_exit_time_destructor);
+
+ // TODO: this should be re-enabled for static locals by !CXAAtExit
+ if (!VD->isStaticLocal())
+ Diag(VD->getLocation(), diag::warn_global_destructor);
+}
+
+/// \brief Given a constructor and the set of arguments provided for the
+/// constructor, convert the arguments and add any required default arguments
+/// to form a proper call to this constructor.
+///
+/// \returns true if an error occurred, false otherwise.
+bool
+Sema::CompleteConstructorCall(CXXConstructorDecl *Constructor,
+ MultiExprArg ArgsPtr,
+ SourceLocation Loc,
+ ASTOwningVector<Expr*> &ConvertedArgs,
+ bool AllowExplicit) {
+ // FIXME: This duplicates a lot of code from Sema::ConvertArgumentsForCall.
+ unsigned NumArgs = ArgsPtr.size();
+ Expr **Args = (Expr **)ArgsPtr.get();
+
+ const FunctionProtoType *Proto
+ = Constructor->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "Constructor without a prototype?");
+ unsigned NumArgsInProto = Proto->getNumArgs();
+
+ // If too few arguments are available, we'll fill in the rest with defaults.
+ if (NumArgs < NumArgsInProto)
+ ConvertedArgs.reserve(NumArgsInProto);
+ else
+ ConvertedArgs.reserve(NumArgs);
+
+ VariadicCallType CallType =
+ Proto->isVariadic() ? VariadicConstructor : VariadicDoesNotApply;
+ SmallVector<Expr *, 8> AllArgs;
+ bool Invalid = GatherArgumentsForCall(Loc, Constructor,
+ Proto, 0, Args, NumArgs, AllArgs,
+ CallType, AllowExplicit);
+ ConvertedArgs.append(AllArgs.begin(), AllArgs.end());
+
+ DiagnoseSentinelCalls(Constructor, Loc, AllArgs.data(), AllArgs.size());
+
+ // FIXME: Missing call to CheckFunctionCall or equivalent
+
+ return Invalid;
+}
+
+static inline bool
+CheckOperatorNewDeleteDeclarationScope(Sema &SemaRef,
+ const FunctionDecl *FnDecl) {
+ const DeclContext *DC = FnDecl->getDeclContext()->getRedeclContext();
+ if (isa<NamespaceDecl>(DC)) {
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_declared_in_namespace)
+ << FnDecl->getDeclName();
+ }
+
+ if (isa<TranslationUnitDecl>(DC) &&
+ FnDecl->getStorageClass() == SC_Static) {
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_declared_static)
+ << FnDecl->getDeclName();
+ }
+
+ return false;
+}
+
+static inline bool
+CheckOperatorNewDeleteTypes(Sema &SemaRef, const FunctionDecl *FnDecl,
+ CanQualType ExpectedResultType,
+ CanQualType ExpectedFirstParamType,
+ unsigned DependentParamTypeDiag,
+ unsigned InvalidParamTypeDiag) {
+ QualType ResultType =
+ FnDecl->getType()->getAs<FunctionType>()->getResultType();
+
+ // Check that the result type is not dependent.
+ if (ResultType->isDependentType())
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_dependent_result_type)
+ << FnDecl->getDeclName() << ExpectedResultType;
+
+ // Check that the result type is what we expect.
+ if (SemaRef.Context.getCanonicalType(ResultType) != ExpectedResultType)
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_invalid_result_type)
+ << FnDecl->getDeclName() << ExpectedResultType;
+
+ // A function template must have at least 2 parameters.
+ if (FnDecl->getDescribedFunctionTemplate() && FnDecl->getNumParams() < 2)
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_template_too_few_parameters)
+ << FnDecl->getDeclName();
+
+ // The function decl must have at least 1 parameter.
+ if (FnDecl->getNumParams() == 0)
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_delete_too_few_parameters)
+ << FnDecl->getDeclName();
+
+ // Check the the first parameter type is not dependent.
+ QualType FirstParamType = FnDecl->getParamDecl(0)->getType();
+ if (FirstParamType->isDependentType())
+ return SemaRef.Diag(FnDecl->getLocation(), DependentParamTypeDiag)
+ << FnDecl->getDeclName() << ExpectedFirstParamType;
+
+ // Check that the first parameter type is what we expect.
+ if (SemaRef.Context.getCanonicalType(FirstParamType).getUnqualifiedType() !=
+ ExpectedFirstParamType)
+ return SemaRef.Diag(FnDecl->getLocation(), InvalidParamTypeDiag)
+ << FnDecl->getDeclName() << ExpectedFirstParamType;
+
+ return false;
+}
+
+static bool
+CheckOperatorNewDeclaration(Sema &SemaRef, const FunctionDecl *FnDecl) {
+ // C++ [basic.stc.dynamic.allocation]p1:
+ // A program is ill-formed if an allocation function is declared in a
+ // namespace scope other than global scope or declared static in global
+ // scope.
+ if (CheckOperatorNewDeleteDeclarationScope(SemaRef, FnDecl))
+ return true;
+
+ CanQualType SizeTy =
+ SemaRef.Context.getCanonicalType(SemaRef.Context.getSizeType());
+
+ // C++ [basic.stc.dynamic.allocation]p1:
+ // The return type shall be void*. The first parameter shall have type
+ // std::size_t.
+ if (CheckOperatorNewDeleteTypes(SemaRef, FnDecl, SemaRef.Context.VoidPtrTy,
+ SizeTy,
+ diag::err_operator_new_dependent_param_type,
+ diag::err_operator_new_param_type))
+ return true;
+
+ // C++ [basic.stc.dynamic.allocation]p1:
+ // The first parameter shall not have an associated default argument.
+ if (FnDecl->getParamDecl(0)->hasDefaultArg())
+ return SemaRef.Diag(FnDecl->getLocation(),
+ diag::err_operator_new_default_arg)
+ << FnDecl->getDeclName() << FnDecl->getParamDecl(0)->getDefaultArgRange();
+
+ return false;
+}
+
+static bool
+CheckOperatorDeleteDeclaration(Sema &SemaRef, const FunctionDecl *FnDecl) {
+ // C++ [basic.stc.dynamic.deallocation]p1:
+ // A program is ill-formed if deallocation functions are declared in a
+ // namespace scope other than global scope or declared static in global
+ // scope.
+ if (CheckOperatorNewDeleteDeclarationScope(SemaRef, FnDecl))
+ return true;
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // Each deallocation function shall return void and its first parameter
+ // shall be void*.
+ if (CheckOperatorNewDeleteTypes(SemaRef, FnDecl, SemaRef.Context.VoidTy,
+ SemaRef.Context.VoidPtrTy,
+ diag::err_operator_delete_dependent_param_type,
+ diag::err_operator_delete_param_type))
+ return true;
+
+ return false;
+}
+
+/// CheckOverloadedOperatorDeclaration - Check whether the declaration
+/// of this overloaded operator is well-formed. If so, returns false;
+/// otherwise, emits appropriate diagnostics and returns true.
+bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
+ assert(FnDecl && FnDecl->isOverloadedOperator() &&
+ "Expected an overloaded operator declaration");
+
+ OverloadedOperatorKind Op = FnDecl->getOverloadedOperator();
+
+ // C++ [over.oper]p5:
+ // The allocation and deallocation functions, operator new,
+ // operator new[], operator delete and operator delete[], are
+ // described completely in 3.7.3. The attributes and restrictions
+ // found in the rest of this subclause do not apply to them unless
+ // explicitly stated in 3.7.3.
+ if (Op == OO_Delete || Op == OO_Array_Delete)
+ return CheckOperatorDeleteDeclaration(*this, FnDecl);
+
+ if (Op == OO_New || Op == OO_Array_New)
+ return CheckOperatorNewDeclaration(*this, FnDecl);
+
+ // C++ [over.oper]p6:
+ // An operator function shall either be a non-static member
+ // function or be a non-member function and have at least one
+ // parameter whose type is a class, a reference to a class, an
+ // enumeration, or a reference to an enumeration.
+ if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ if (MethodDecl->isStatic())
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_static) << FnDecl->getDeclName();
+ } else {
+ bool ClassOrEnumParam = false;
+ for (FunctionDecl::param_iterator Param = FnDecl->param_begin(),
+ ParamEnd = FnDecl->param_end();
+ Param != ParamEnd; ++Param) {
+ QualType ParamType = (*Param)->getType().getNonReferenceType();
+ if (ParamType->isDependentType() || ParamType->isRecordType() ||
+ ParamType->isEnumeralType()) {
+ ClassOrEnumParam = true;
+ break;
+ }
+ }
+
+ if (!ClassOrEnumParam)
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_needs_class_or_enum)
+ << FnDecl->getDeclName();
+ }
+
+ // C++ [over.oper]p8:
+ // An operator function cannot have default arguments (8.3.6),
+ // except where explicitly stated below.
+ //
+ // Only the function-call operator allows default arguments
+ // (C++ [over.call]p1).
+ if (Op != OO_Call) {
+ for (FunctionDecl::param_iterator Param = FnDecl->param_begin();
+ Param != FnDecl->param_end(); ++Param) {
+ if ((*Param)->hasDefaultArg())
+ return Diag((*Param)->getLocation(),
+ diag::err_operator_overload_default_arg)
+ << FnDecl->getDeclName() << (*Param)->getDefaultArgRange();
+ }
+ }
+
+ static const bool OperatorUses[NUM_OVERLOADED_OPERATORS][3] = {
+ { false, false, false }
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ , { Unary, Binary, MemberOnly }
+#include "clang/Basic/OperatorKinds.def"
+ };
+
+ bool CanBeUnaryOperator = OperatorUses[Op][0];
+ bool CanBeBinaryOperator = OperatorUses[Op][1];
+ bool MustBeMemberOperator = OperatorUses[Op][2];
+
+ // C++ [over.oper]p8:
+ // [...] Operator functions cannot have more or fewer parameters
+ // than the number required for the corresponding operator, as
+ // described in the rest of this subclause.
+ unsigned NumParams = FnDecl->getNumParams()
+ + (isa<CXXMethodDecl>(FnDecl)? 1 : 0);
+ if (Op != OO_Call &&
+ ((NumParams == 1 && !CanBeUnaryOperator) ||
+ (NumParams == 2 && !CanBeBinaryOperator) ||
+ (NumParams < 1) || (NumParams > 2))) {
+ // We have the wrong number of parameters.
+ unsigned ErrorKind;
+ if (CanBeUnaryOperator && CanBeBinaryOperator) {
+ ErrorKind = 2; // 2 -> unary or binary.
+ } else if (CanBeUnaryOperator) {
+ ErrorKind = 0; // 0 -> unary
+ } else {
+ assert(CanBeBinaryOperator &&
+ "All non-call overloaded operators are unary or binary!");
+ ErrorKind = 1; // 1 -> binary
+ }
+
+ return Diag(FnDecl->getLocation(), diag::err_operator_overload_must_be)
+ << FnDecl->getDeclName() << NumParams << ErrorKind;
+ }
+
+ // Overloaded operators other than operator() cannot be variadic.
+ if (Op != OO_Call &&
+ FnDecl->getType()->getAs<FunctionProtoType>()->isVariadic()) {
+ return Diag(FnDecl->getLocation(), diag::err_operator_overload_variadic)
+ << FnDecl->getDeclName();
+ }
+
+ // Some operators must be non-static member functions.
+ if (MustBeMemberOperator && !isa<CXXMethodDecl>(FnDecl)) {
+ return Diag(FnDecl->getLocation(),
+ diag::err_operator_overload_must_be_member)
+ << FnDecl->getDeclName();
+ }
+
+ // C++ [over.inc]p1:
+ // The user-defined function called operator++ implements the
+ // prefix and postfix ++ operator. If this function is a member
+ // function with no parameters, or a non-member function with one
+ // parameter of class or enumeration type, it defines the prefix
+ // increment operator ++ for objects of that type. If the function
+ // is a member function with one parameter (which shall be of type
+ // int) or a non-member function with two parameters (the second
+ // of which shall be of type int), it defines the postfix
+ // increment operator ++ for objects of that type.
+ if ((Op == OO_PlusPlus || Op == OO_MinusMinus) && NumParams == 2) {
+ ParmVarDecl *LastParam = FnDecl->getParamDecl(FnDecl->getNumParams() - 1);
+ bool ParamIsInt = false;
+ if (const BuiltinType *BT = LastParam->getType()->getAs<BuiltinType>())
+ ParamIsInt = BT->getKind() == BuiltinType::Int;
+
+ if (!ParamIsInt)
+ return Diag(LastParam->getLocation(),
+ diag::err_operator_overload_post_incdec_must_be_int)
+ << LastParam->getType() << (Op == OO_MinusMinus);
+ }
+
+ return false;
+}
+
+/// CheckLiteralOperatorDeclaration - Check whether the declaration
+/// of this literal operator function is well-formed. If so, returns
+/// false; otherwise, emits appropriate diagnostics and returns true.
+bool Sema::CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl) {
+ if (isa<CXXMethodDecl>(FnDecl)) {
+ Diag(FnDecl->getLocation(), diag::err_literal_operator_outside_namespace)
+ << FnDecl->getDeclName();
+ return true;
+ }
+
+ if (FnDecl->isExternC()) {
+ Diag(FnDecl->getLocation(), diag::err_literal_operator_extern_c);
+ return true;
+ }
+
+ bool Valid = false;
+
+ // This might be the definition of a literal operator template.
+ FunctionTemplateDecl *TpDecl = FnDecl->getDescribedFunctionTemplate();
+ // This might be a specialization of a literal operator template.
+ if (!TpDecl)
+ TpDecl = FnDecl->getPrimaryTemplate();
+
+ // template <char...> type operator "" name() is the only valid template
+ // signature, and the only valid signature with no parameters.
+ if (TpDecl) {
+ if (FnDecl->param_size() == 0) {
+ // Must have only one template parameter
+ TemplateParameterList *Params = TpDecl->getTemplateParameters();
+ if (Params->size() == 1) {
+ NonTypeTemplateParmDecl *PmDecl =
+ cast<NonTypeTemplateParmDecl>(Params->getParam(0));
+
+ // The template parameter must be a char parameter pack.
+ if (PmDecl && PmDecl->isTemplateParameterPack() &&
+ Context.hasSameType(PmDecl->getType(), Context.CharTy))
+ Valid = true;
+ }
+ }
+ } else if (FnDecl->param_size()) {
+ // Check the first parameter
+ FunctionDecl::param_iterator Param = FnDecl->param_begin();
+
+ QualType T = (*Param)->getType().getUnqualifiedType();
+
+ // unsigned long long int, long double, and any character type are allowed
+ // as the only parameters.
+ if (Context.hasSameType(T, Context.UnsignedLongLongTy) ||
+ Context.hasSameType(T, Context.LongDoubleTy) ||
+ Context.hasSameType(T, Context.CharTy) ||
+ Context.hasSameType(T, Context.WCharTy) ||
+ Context.hasSameType(T, Context.Char16Ty) ||
+ Context.hasSameType(T, Context.Char32Ty)) {
+ if (++Param == FnDecl->param_end())
+ Valid = true;
+ goto FinishedParams;
+ }
+
+ // Otherwise it must be a pointer to const; let's strip those qualifiers.
+ const PointerType *PT = T->getAs<PointerType>();
+ if (!PT)
+ goto FinishedParams;
+ T = PT->getPointeeType();
+ if (!T.isConstQualified() || T.isVolatileQualified())
+ goto FinishedParams;
+ T = T.getUnqualifiedType();
+
+ // Move on to the second parameter;
+ ++Param;
+
+ // If there is no second parameter, the first must be a const char *
+ if (Param == FnDecl->param_end()) {
+ if (Context.hasSameType(T, Context.CharTy))
+ Valid = true;
+ goto FinishedParams;
+ }
+
+ // const char *, const wchar_t*, const char16_t*, and const char32_t*
+ // are allowed as the first parameter to a two-parameter function
+ if (!(Context.hasSameType(T, Context.CharTy) ||
+ Context.hasSameType(T, Context.WCharTy) ||
+ Context.hasSameType(T, Context.Char16Ty) ||
+ Context.hasSameType(T, Context.Char32Ty)))
+ goto FinishedParams;
+
+ // The second and final parameter must be an std::size_t
+ T = (*Param)->getType().getUnqualifiedType();
+ if (Context.hasSameType(T, Context.getSizeType()) &&
+ ++Param == FnDecl->param_end())
+ Valid = true;
+ }
+
+ // FIXME: This diagnostic is absolutely terrible.
+FinishedParams:
+ if (!Valid) {
+ Diag(FnDecl->getLocation(), diag::err_literal_operator_params)
+ << FnDecl->getDeclName();
+ return true;
+ }
+
+ // A parameter-declaration-clause containing a default argument is not
+ // equivalent to any of the permitted forms.
+ for (FunctionDecl::param_iterator Param = FnDecl->param_begin(),
+ ParamEnd = FnDecl->param_end();
+ Param != ParamEnd; ++Param) {
+ if ((*Param)->hasDefaultArg()) {
+ Diag((*Param)->getDefaultArgRange().getBegin(),
+ diag::err_literal_operator_default_argument)
+ << (*Param)->getDefaultArgRange();
+ break;
+ }
+ }
+
+ StringRef LiteralName
+ = FnDecl->getDeclName().getCXXLiteralIdentifier()->getName();
+ if (LiteralName[0] != '_') {
+ // C++11 [usrlit.suffix]p1:
+ // Literal suffix identifiers that do not start with an underscore
+ // are reserved for future standardization.
+ Diag(FnDecl->getLocation(), diag::warn_user_literal_reserved);
+ }
+
+ return false;
+}
+
+/// ActOnStartLinkageSpecification - Parsed the beginning of a C++
+/// linkage specification, including the language and (if present)
+/// the '{'. ExternLoc is the location of the 'extern', LangLoc is
+/// the location of the language string literal, which is provided
+/// by Lang/StrSize. LBraceLoc, if valid, provides the location of
+/// the '{' brace. Otherwise, this linkage specification does not
+/// have any braces.
+Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
+ SourceLocation LangLoc,
+ StringRef Lang,
+ SourceLocation LBraceLoc) {
+ LinkageSpecDecl::LanguageIDs Language;
+ if (Lang == "\"C\"")
+ Language = LinkageSpecDecl::lang_c;
+ else if (Lang == "\"C++\"")
+ Language = LinkageSpecDecl::lang_cxx;
+ else {
+ Diag(LangLoc, diag::err_bad_language);
+ return 0;
+ }
+
+ // FIXME: Add all the various semantics of linkage specifications
+
+ LinkageSpecDecl *D = LinkageSpecDecl::Create(Context, CurContext,
+ ExternLoc, LangLoc, Language);
+ CurContext->addDecl(D);
+ PushDeclContext(S, D);
+ return D;
+}
+
+/// ActOnFinishLinkageSpecification - Complete the definition of
+/// the C++ linkage specification LinkageSpec. If RBraceLoc is
+/// valid, it's the position of the closing '}' brace in a linkage
+/// specification that uses braces.
+Decl *Sema::ActOnFinishLinkageSpecification(Scope *S,
+ Decl *LinkageSpec,
+ SourceLocation RBraceLoc) {
+ if (LinkageSpec) {
+ if (RBraceLoc.isValid()) {
+ LinkageSpecDecl* LSDecl = cast<LinkageSpecDecl>(LinkageSpec);
+ LSDecl->setRBraceLoc(RBraceLoc);
+ }
+ PopDeclContext();
+ }
+ return LinkageSpec;
+}
+
+/// \brief Perform semantic analysis for the variable declaration that
+/// occurs within a C++ catch clause, returning the newly-created
+/// variable.
+VarDecl *Sema::BuildExceptionDeclaration(Scope *S,
+ TypeSourceInfo *TInfo,
+ SourceLocation StartLoc,
+ SourceLocation Loc,
+ IdentifierInfo *Name) {
+ bool Invalid = false;
+ QualType ExDeclType = TInfo->getType();
+
+ // Arrays and functions decay.
+ if (ExDeclType->isArrayType())
+ ExDeclType = Context.getArrayDecayedType(ExDeclType);
+ else if (ExDeclType->isFunctionType())
+ ExDeclType = Context.getPointerType(ExDeclType);
+
+ // C++ 15.3p1: The exception-declaration shall not denote an incomplete type.
+ // The exception-declaration shall not denote a pointer or reference to an
+ // incomplete type, other than [cv] void*.
+ // N2844 forbids rvalue references.
+ if (!ExDeclType->isDependentType() && ExDeclType->isRValueReferenceType()) {
+ Diag(Loc, diag::err_catch_rvalue_ref);
+ Invalid = true;
+ }
+
+ QualType BaseType = ExDeclType;
+ int Mode = 0; // 0 for direct type, 1 for pointer, 2 for reference
+ unsigned DK = diag::err_catch_incomplete;
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
+ BaseType = Ptr->getPointeeType();
+ Mode = 1;
+ DK = diag::err_catch_incomplete_ptr;
+ } else if (const ReferenceType *Ref = BaseType->getAs<ReferenceType>()) {
+ // For the purpose of error recovery, we treat rvalue refs like lvalue refs.
+ BaseType = Ref->getPointeeType();
+ Mode = 2;
+ DK = diag::err_catch_incomplete_ref;
+ }
+ if (!Invalid && (Mode == 0 || !BaseType->isVoidType()) &&
+ !BaseType->isDependentType() && RequireCompleteType(Loc, BaseType, DK))
+ Invalid = true;
+
+ if (!Invalid && !ExDeclType->isDependentType() &&
+ RequireNonAbstractType(Loc, ExDeclType,
+ diag::err_abstract_type_in_decl,
+ AbstractVariableType))
+ Invalid = true;
+
+ // Only the non-fragile NeXT runtime currently supports C++ catches
+ // of ObjC types, and no runtime supports catching ObjC types by value.
+ if (!Invalid && getLangOpts().ObjC1) {
+ QualType T = ExDeclType;
+ if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = RT->getPointeeType();
+
+ if (T->isObjCObjectType()) {
+ Diag(Loc, diag::err_objc_object_catch);
+ Invalid = true;
+ } else if (T->isObjCObjectPointerType()) {
+ if (!getLangOpts().ObjCNonFragileABI)
+ Diag(Loc, diag::warn_objc_pointer_cxx_catch_fragile);
+ }
+ }
+
+ VarDecl *ExDecl = VarDecl::Create(Context, CurContext, StartLoc, Loc, Name,
+ ExDeclType, TInfo, SC_None, SC_None);
+ ExDecl->setExceptionVariable(true);
+
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(ExDecl))
+ Invalid = true;
+
+ if (!Invalid && !ExDeclType->isDependentType()) {
+ if (const RecordType *recordType = ExDeclType->getAs<RecordType>()) {
+ // C++ [except.handle]p16:
+ // The object declared in an exception-declaration or, if the
+ // exception-declaration does not specify a name, a temporary (12.2) is
+ // copy-initialized (8.5) from the exception object. [...]
+ // The object is destroyed when the handler exits, after the destruction
+ // of any automatic objects initialized within the handler.
+ //
+ // We just pretend to initialize the object with itself, then make sure
+ // it can be destroyed later.
+ QualType initType = ExDeclType;
+
+ InitializedEntity entity =
+ InitializedEntity::InitializeVariable(ExDecl);
+ InitializationKind initKind =
+ InitializationKind::CreateCopy(Loc, SourceLocation());
+
+ Expr *opaqueValue =
+ new (Context) OpaqueValueExpr(Loc, initType, VK_LValue, OK_Ordinary);
+ InitializationSequence sequence(*this, entity, initKind, &opaqueValue, 1);
+ ExprResult result = sequence.Perform(*this, entity, initKind,
+ MultiExprArg(&opaqueValue, 1));
+ if (result.isInvalid())
+ Invalid = true;
+ else {
+ // If the constructor used was non-trivial, set this as the
+ // "initializer".
+ CXXConstructExpr *construct = cast<CXXConstructExpr>(result.take());
+ if (!construct->getConstructor()->isTrivial()) {
+ Expr *init = MaybeCreateExprWithCleanups(construct);
+ ExDecl->setInit(init);
+ }
+
+ // And make sure it's destructable.
+ FinalizeVarWithDestructor(ExDecl, recordType);
+ }
+ }
+ }
+
+ if (Invalid)
+ ExDecl->setInvalidDecl();
+
+ return ExDecl;
+}
+
+/// ActOnExceptionDeclarator - Parsed the exception-declarator in a C++ catch
+/// handler.
+Decl *Sema::ActOnExceptionDeclarator(Scope *S, Declarator &D) {
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ bool Invalid = D.isInvalidType();
+
+ // Check for unexpanded parameter packs.
+ if (TInfo && DiagnoseUnexpandedParameterPack(D.getIdentifierLoc(), TInfo,
+ UPPC_ExceptionType)) {
+ TInfo = Context.getTrivialTypeSourceInfo(Context.IntTy,
+ D.getIdentifierLoc());
+ Invalid = true;
+ }
+
+ IdentifierInfo *II = D.getIdentifier();
+ if (NamedDecl *PrevDecl = LookupSingleName(S, II, D.getIdentifierLoc(),
+ LookupOrdinaryName,
+ ForRedeclaration)) {
+ // The scope should be freshly made just for us. There is just no way
+ // it contains any previous declaration.
+ assert(!S->isDeclScope(PrevDecl));
+ if (PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ PrevDecl = 0;
+ }
+ }
+
+ if (D.getCXXScopeSpec().isSet() && !Invalid) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_catch_declarator)
+ << D.getCXXScopeSpec().getRange();
+ Invalid = true;
+ }
+
+ VarDecl *ExDecl = BuildExceptionDeclaration(S, TInfo,
+ D.getLocStart(),
+ D.getIdentifierLoc(),
+ D.getIdentifier());
+ if (Invalid)
+ ExDecl->setInvalidDecl();
+
+ // Add the exception declaration into this scope.
+ if (II)
+ PushOnScopeChains(ExDecl, S);
+ else
+ CurContext->addDecl(ExDecl);
+
+ ProcessDeclAttributes(S, ExDecl, D);
+ return ExDecl;
+}
+
+Decl *Sema::ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
+ Expr *AssertExpr,
+ Expr *AssertMessageExpr_,
+ SourceLocation RParenLoc) {
+ StringLiteral *AssertMessage = cast<StringLiteral>(AssertMessageExpr_);
+
+ if (!AssertExpr->isTypeDependent() && !AssertExpr->isValueDependent()) {
+ // In a static_assert-declaration, the constant-expression shall be a
+ // constant expression that can be contextually converted to bool.
+ ExprResult Converted = PerformContextuallyConvertToBool(AssertExpr);
+ if (Converted.isInvalid())
+ return 0;
+
+ llvm::APSInt Cond;
+ if (VerifyIntegerConstantExpression(Converted.get(), &Cond,
+ PDiag(diag::err_static_assert_expression_is_not_constant),
+ /*AllowFold=*/false).isInvalid())
+ return 0;
+
+ if (!Cond) {
+ llvm::SmallString<256> MsgBuffer;
+ llvm::raw_svector_ostream Msg(MsgBuffer);
+ AssertMessage->printPretty(Msg, Context, 0, getPrintingPolicy());
+ Diag(StaticAssertLoc, diag::err_static_assert_failed)
+ << Msg.str() << AssertExpr->getSourceRange();
+ }
+ }
+
+ if (DiagnoseUnexpandedParameterPack(AssertExpr, UPPC_StaticAssertExpression))
+ return 0;
+
+ Decl *Decl = StaticAssertDecl::Create(Context, CurContext, StaticAssertLoc,
+ AssertExpr, AssertMessage, RParenLoc);
+
+ CurContext->addDecl(Decl);
+ return Decl;
+}
+
+/// \brief Perform semantic analysis of the given friend type declaration.
+///
+/// \returns A friend declaration that.
+FriendDecl *Sema::CheckFriendTypeDecl(SourceLocation Loc,
+ SourceLocation FriendLoc,
+ TypeSourceInfo *TSInfo) {
+ assert(TSInfo && "NULL TypeSourceInfo for friend type declaration");
+
+ QualType T = TSInfo->getType();
+ SourceRange TypeRange = TSInfo->getTypeLoc().getLocalSourceRange();
+
+ // C++03 [class.friend]p2:
+ // An elaborated-type-specifier shall be used in a friend declaration
+ // for a class.*
+ //
+ // * The class-key of the elaborated-type-specifier is required.
+ if (!ActiveTemplateInstantiations.empty()) {
+ // Do not complain about the form of friend template types during
+ // template instantiation; we will already have complained when the
+ // template was declared.
+ } else if (!T->isElaboratedTypeSpecifier()) {
+ // If we evaluated the type to a record type, suggest putting
+ // a tag in front.
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ RecordDecl *RD = RT->getDecl();
+
+ std::string InsertionText = std::string(" ") + RD->getKindName();
+
+ Diag(TypeRange.getBegin(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_unelaborated_friend_type :
+ diag::ext_unelaborated_friend_type)
+ << (unsigned) RD->getTagKind()
+ << T
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(FriendLoc),
+ InsertionText);
+ } else {
+ Diag(FriendLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_nonclass_type_friend :
+ diag::ext_nonclass_type_friend)
+ << T
+ << SourceRange(FriendLoc, TypeRange.getEnd());
+ }
+ } else if (T->getAs<EnumType>()) {
+ Diag(FriendLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_enum_friend :
+ diag::ext_enum_friend)
+ << T
+ << SourceRange(FriendLoc, TypeRange.getEnd());
+ }
+
+ // C++0x [class.friend]p3:
+ // If the type specifier in a friend declaration designates a (possibly
+ // cv-qualified) class type, that class is declared as a friend; otherwise,
+ // the friend declaration is ignored.
+
+ // FIXME: C++0x has some syntactic restrictions on friend type declarations
+ // in [class.friend]p3 that we do not implement.
+
+ return FriendDecl::Create(Context, CurContext, Loc, TSInfo, FriendLoc);
+}
+
+/// Handle a friend tag declaration where the scope specifier was
+/// templated.
+Decl *Sema::ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
+ unsigned TagSpec, SourceLocation TagLoc,
+ CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TempParamLists) {
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+
+ bool isExplicitSpecialization = false;
+ bool Invalid = false;
+
+ if (TemplateParameterList *TemplateParams
+ = MatchTemplateParametersToScopeSpecifier(TagLoc, NameLoc, SS,
+ TempParamLists.get(),
+ TempParamLists.size(),
+ /*friend*/ true,
+ isExplicitSpecialization,
+ Invalid)) {
+ if (TemplateParams->size() > 0) {
+ // This is a declaration of a class template.
+ if (Invalid)
+ return 0;
+
+ return CheckClassTemplate(S, TagSpec, TUK_Friend, TagLoc,
+ SS, Name, NameLoc, Attr,
+ TemplateParams, AS_public,
+ /*ModulePrivateLoc=*/SourceLocation(),
+ TempParamLists.size() - 1,
+ (TemplateParameterList**) TempParamLists.release()).take();
+ } else {
+ // The "template<>" header is extraneous.
+ Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
+ << TypeWithKeyword::getTagTypeKindName(Kind) << Name;
+ isExplicitSpecialization = true;
+ }
+ }
+
+ if (Invalid) return 0;
+
+ bool isAllExplicitSpecializations = true;
+ for (unsigned I = TempParamLists.size(); I-- > 0; ) {
+ if (TempParamLists.get()[I]->size()) {
+ isAllExplicitSpecializations = false;
+ break;
+ }
+ }
+
+ // FIXME: don't ignore attributes.
+
+ // If it's explicit specializations all the way down, just forget
+ // about the template header and build an appropriate non-templated
+ // friend. TODO: for source fidelity, remember the headers.
+ if (isAllExplicitSpecializations) {
+ if (SS.isEmpty()) {
+ bool Owned = false;
+ bool IsDependent = false;
+ return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc,
+ Attr, AS_public,
+ /*ModulePrivateLoc=*/SourceLocation(),
+ MultiTemplateParamsArg(), Owned, IsDependent,
+ /*ScopedEnumKWLoc=*/SourceLocation(),
+ /*ScopedEnumUsesClassTag=*/false,
+ /*UnderlyingType=*/TypeResult());
+ }
+
+ NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ ElaboratedTypeKeyword Keyword
+ = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
+ QualType T = CheckTypenameType(Keyword, TagLoc, QualifierLoc,
+ *Name, NameLoc);
+ if (T.isNull())
+ return 0;
+
+ TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
+ if (isa<DependentNameType>(T)) {
+ DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
+ TL.setElaboratedKeywordLoc(TagLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.setNameLoc(NameLoc);
+ } else {
+ ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc());
+ TL.setElaboratedKeywordLoc(TagLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ cast<TypeSpecTypeLoc>(TL.getNamedTypeLoc()).setNameLoc(NameLoc);
+ }
+
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext, NameLoc,
+ TSI, FriendLoc);
+ Friend->setAccess(AS_public);
+ CurContext->addDecl(Friend);
+ return Friend;
+ }
+
+ assert(SS.isNotEmpty() && "valid templated tag with no SS and no direct?");
+
+
+
+ // Handle the case of a templated-scope friend class. e.g.
+ // template <class T> class A<T>::B;
+ // FIXME: we don't support these right now.
+ ElaboratedTypeKeyword ETK = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
+ QualType T = Context.getDependentNameType(ETK, SS.getScopeRep(), Name);
+ TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
+ DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
+ TL.setElaboratedKeywordLoc(TagLoc);
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(NameLoc);
+
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext, NameLoc,
+ TSI, FriendLoc);
+ Friend->setAccess(AS_public);
+ Friend->setUnsupportedFriend(true);
+ CurContext->addDecl(Friend);
+ return Friend;
+}
+
+
+/// Handle a friend type declaration. This works in tandem with
+/// ActOnTag.
+///
+/// Notes on friend class templates:
+///
+/// We generally treat friend class declarations as if they were
+/// declaring a class. So, for example, the elaborated type specifier
+/// in a friend declaration is required to obey the restrictions of a
+/// class-head (i.e. no typedefs in the scope chain), template
+/// parameters are required to match up with simple template-ids, &c.
+/// However, unlike when declaring a template specialization, it's
+/// okay to refer to a template specialization without an empty
+/// template parameter declaration, e.g.
+/// friend class A<T>::B<unsigned>;
+/// We permit this as a special case; if there are any template
+/// parameters present at all, require proper matching, i.e.
+/// template <> template <class T> friend class A<int>::B;
+Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
+ MultiTemplateParamsArg TempParams) {
+ SourceLocation Loc = DS.getLocStart();
+
+ assert(DS.isFriendSpecified());
+ assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
+
+ // Try to convert the decl specifier to a type. This works for
+ // friend templates because ActOnTag never produces a ClassTemplateDecl
+ // for a TUK_Friend.
+ Declarator TheDeclarator(DS, Declarator::MemberContext);
+ TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S);
+ QualType T = TSI->getType();
+ if (TheDeclarator.isInvalidType())
+ return 0;
+
+ if (DiagnoseUnexpandedParameterPack(Loc, TSI, UPPC_FriendDeclaration))
+ return 0;
+
+ // This is definitely an error in C++98. It's probably meant to
+ // be forbidden in C++0x, too, but the specification is just
+ // poorly written.
+ //
+ // The problem is with declarations like the following:
+ // template <T> friend A<T>::foo;
+ // where deciding whether a class C is a friend or not now hinges
+ // on whether there exists an instantiation of A that causes
+ // 'foo' to equal C. There are restrictions on class-heads
+ // (which we declare (by fiat) elaborated friend declarations to
+ // be) that makes this tractable.
+ //
+ // FIXME: handle "template <> friend class A<T>;", which
+ // is possibly well-formed? Who even knows?
+ if (TempParams.size() && !T->isElaboratedTypeSpecifier()) {
+ Diag(Loc, diag::err_tagless_friend_type_template)
+ << DS.getSourceRange();
+ return 0;
+ }
+
+ // C++98 [class.friend]p1: A friend of a class is a function
+ // or class that is not a member of the class . . .
+ // This is fixed in DR77, which just barely didn't make the C++03
+ // deadline. It's also a very silly restriction that seriously
+ // affects inner classes and which nobody else seems to implement;
+ // thus we never diagnose it, not even in -pedantic.
+ //
+ // But note that we could warn about it: it's always useless to
+ // friend one of your own members (it's not, however, worthless to
+ // friend a member of an arbitrary specialization of your template).
+
+ Decl *D;
+ if (unsigned NumTempParamLists = TempParams.size())
+ D = FriendTemplateDecl::Create(Context, CurContext, Loc,
+ NumTempParamLists,
+ TempParams.release(),
+ TSI,
+ DS.getFriendSpecLoc());
+ else
+ D = CheckFriendTypeDecl(Loc, DS.getFriendSpecLoc(), TSI);
+
+ if (!D)
+ return 0;
+
+ D->setAccess(AS_public);
+ CurContext->addDecl(D);
+
+ return D;
+}
+
+Decl *Sema::ActOnFriendFunctionDecl(Scope *S, Declarator &D,
+ MultiTemplateParamsArg TemplateParams) {
+ const DeclSpec &DS = D.getDeclSpec();
+
+ assert(DS.isFriendSpecified());
+ assert(DS.getStorageClassSpec() == DeclSpec::SCS_unspecified);
+
+ SourceLocation Loc = D.getIdentifierLoc();
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+
+ // C++ [class.friend]p1
+ // A friend of a class is a function or class....
+ // Note that this sees through typedefs, which is intended.
+ // It *doesn't* see through dependent types, which is correct
+ // according to [temp.arg.type]p3:
+ // If a declaration acquires a function type through a
+ // type dependent on a template-parameter and this causes
+ // a declaration that does not use the syntactic form of a
+ // function declarator to have a function type, the program
+ // is ill-formed.
+ if (!TInfo->getType()->isFunctionType()) {
+ Diag(Loc, diag::err_unexpected_friend);
+
+ // It might be worthwhile to try to recover by creating an
+ // appropriate declaration.
+ return 0;
+ }
+
+ // C++ [namespace.memdef]p3
+ // - If a friend declaration in a non-local class first declares a
+ // class or function, the friend class or function is a member
+ // of the innermost enclosing namespace.
+ // - The name of the friend is not found by simple name lookup
+ // until a matching declaration is provided in that namespace
+ // scope (either before or after the class declaration granting
+ // friendship).
+ // - If a friend function is called, its name may be found by the
+ // name lookup that considers functions from namespaces and
+ // classes associated with the types of the function arguments.
+ // - When looking for a prior declaration of a class or a function
+ // declared as a friend, scopes outside the innermost enclosing
+ // namespace scope are not considered.
+
+ CXXScopeSpec &SS = D.getCXXScopeSpec();
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+ assert(Name);
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Loc, TInfo, UPPC_FriendDeclaration) ||
+ DiagnoseUnexpandedParameterPack(NameInfo, UPPC_FriendDeclaration) ||
+ DiagnoseUnexpandedParameterPack(SS, UPPC_FriendDeclaration))
+ return 0;
+
+ // The context we found the declaration in, or in which we should
+ // create the declaration.
+ DeclContext *DC;
+ Scope *DCScope = S;
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName,
+ ForRedeclaration);
+
+ // FIXME: there are different rules in local classes
+
+ // There are four cases here.
+ // - There's no scope specifier, in which case we just go to the
+ // appropriate scope and look for a function or function template
+ // there as appropriate.
+ // Recover from invalid scope qualifiers as if they just weren't there.
+ if (SS.isInvalid() || !SS.isSet()) {
+ // C++0x [namespace.memdef]p3:
+ // If the name in a friend declaration is neither qualified nor
+ // a template-id and the declaration is a function or an
+ // elaborated-type-specifier, the lookup to determine whether
+ // the entity has been previously declared shall not consider
+ // any scopes outside the innermost enclosing namespace.
+ // C++0x [class.friend]p11:
+ // If a friend declaration appears in a local class and the name
+ // specified is an unqualified name, a prior declaration is
+ // looked up without considering scopes that are outside the
+ // innermost enclosing non-class scope. For a friend function
+ // declaration, if there is no prior declaration, the program is
+ // ill-formed.
+ bool isLocal = cast<CXXRecordDecl>(CurContext)->isLocalClass();
+ bool isTemplateId = D.getName().getKind() == UnqualifiedId::IK_TemplateId;
+
+ // Find the appropriate context according to the above.
+ DC = CurContext;
+ while (true) {
+ // Skip class contexts. If someone can cite chapter and verse
+ // for this behavior, that would be nice --- it's what GCC and
+ // EDG do, and it seems like a reasonable intent, but the spec
+ // really only says that checks for unqualified existing
+ // declarations should stop at the nearest enclosing namespace,
+ // not that they should only consider the nearest enclosing
+ // namespace.
+ while (DC->isRecord() || DC->isTransparentContext())
+ DC = DC->getParent();
+
+ LookupQualifiedName(Previous, DC);
+
+ // TODO: decide what we think about using declarations.
+ if (isLocal || !Previous.empty())
+ break;
+
+ if (isTemplateId) {
+ if (isa<TranslationUnitDecl>(DC)) break;
+ } else {
+ if (DC->isFileContext()) break;
+ }
+ DC = DC->getParent();
+ }
+
+ // C++ [class.friend]p1: A friend of a class is a function or
+ // class that is not a member of the class . . .
+ // C++11 changes this for both friend types and functions.
+ // Most C++ 98 compilers do seem to give an error here, so
+ // we do, too.
+ if (!Previous.empty() && DC->Equals(CurContext))
+ Diag(DS.getFriendSpecLoc(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_friend_is_member :
+ diag::err_friend_is_member);
+
+ DCScope = getScopeForDeclContext(S, DC);
+
+ // C++ [class.friend]p6:
+ // A function can be defined in a friend declaration of a class if and
+ // only if the class is a non-local class (9.8), the function name is
+ // unqualified, and the function has namespace scope.
+ if (isLocal && D.isFunctionDefinition()) {
+ Diag(NameInfo.getBeginLoc(), diag::err_friend_def_in_local_class);
+ }
+
+ // - There's a non-dependent scope specifier, in which case we
+ // compute it and do a previous lookup there for a function
+ // or function template.
+ } else if (!SS.getScopeRep()->isDependent()) {
+ DC = computeDeclContext(SS);
+ if (!DC) return 0;
+
+ if (RequireCompleteDeclContext(SS, DC)) return 0;
+
+ LookupQualifiedName(Previous, DC);
+
+ // Ignore things found implicitly in the wrong scope.
+ // TODO: better diagnostics for this case. Suggesting the right
+ // qualified scope would be nice...
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next();
+ if (!DC->InEnclosingNamespaceSetOf(
+ D->getDeclContext()->getRedeclContext()))
+ F.erase();
+ }
+ F.done();
+
+ if (Previous.empty()) {
+ D.setInvalidType();
+ Diag(Loc, diag::err_qualified_friend_not_found)
+ << Name << TInfo->getType();
+ return 0;
+ }
+
+ // C++ [class.friend]p1: A friend of a class is a function or
+ // class that is not a member of the class . . .
+ if (DC->Equals(CurContext))
+ Diag(DS.getFriendSpecLoc(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_friend_is_member :
+ diag::err_friend_is_member);
+
+ if (D.isFunctionDefinition()) {
+ // C++ [class.friend]p6:
+ // A function can be defined in a friend declaration of a class if and
+ // only if the class is a non-local class (9.8), the function name is
+ // unqualified, and the function has namespace scope.
+ SemaDiagnosticBuilder DB
+ = Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def);
+
+ DB << SS.getScopeRep();
+ if (DC->isFileContext())
+ DB << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ }
+
+ // - There's a scope specifier that does not match any template
+ // parameter lists, in which case we use some arbitrary context,
+ // create a method or method template, and wait for instantiation.
+ // - There's a scope specifier that does match some template
+ // parameter lists, which we don't handle right now.
+ } else {
+ if (D.isFunctionDefinition()) {
+ // C++ [class.friend]p6:
+ // A function can be defined in a friend declaration of a class if and
+ // only if the class is a non-local class (9.8), the function name is
+ // unqualified, and the function has namespace scope.
+ Diag(SS.getRange().getBegin(), diag::err_qualified_friend_def)
+ << SS.getScopeRep();
+ }
+
+ DC = CurContext;
+ assert(isa<CXXRecordDecl>(DC) && "friend declaration not in class?");
+ }
+
+ if (!DC->isRecord()) {
+ // This implies that it has to be an operator or function.
+ if (D.getName().getKind() == UnqualifiedId::IK_ConstructorName ||
+ D.getName().getKind() == UnqualifiedId::IK_DestructorName ||
+ D.getName().getKind() == UnqualifiedId::IK_ConversionFunctionId) {
+ Diag(Loc, diag::err_introducing_special_friend) <<
+ (D.getName().getKind() == UnqualifiedId::IK_ConstructorName ? 0 :
+ D.getName().getKind() == UnqualifiedId::IK_DestructorName ? 1 : 2);
+ return 0;
+ }
+ }
+
+ // FIXME: This is an egregious hack to cope with cases where the scope stack
+ // does not contain the declaration context, i.e., in an out-of-line
+ // definition of a class.
+ Scope FakeDCScope(S, Scope::DeclScope, Diags);
+ if (!DCScope) {
+ FakeDCScope.setEntity(DC);
+ DCScope = &FakeDCScope;
+ }
+
+ bool AddToScope = true;
+ NamedDecl *ND = ActOnFunctionDeclarator(DCScope, D, DC, TInfo, Previous,
+ move(TemplateParams), AddToScope);
+ if (!ND) return 0;
+
+ assert(ND->getDeclContext() == DC);
+ assert(ND->getLexicalDeclContext() == CurContext);
+
+ // Add the function declaration to the appropriate lookup tables,
+ // adjusting the redeclarations list as necessary. We don't
+ // want to do this yet if the friending class is dependent.
+ //
+ // Also update the scope-based lookup if the target context's
+ // lookup context is in lexical scope.
+ if (!CurContext->isDependentContext()) {
+ DC = DC->getRedeclContext();
+ DC->makeDeclVisibleInContext(ND);
+ if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
+ PushOnScopeChains(ND, EnclosingScope, /*AddToContext=*/ false);
+ }
+
+ FriendDecl *FrD = FriendDecl::Create(Context, CurContext,
+ D.getIdentifierLoc(), ND,
+ DS.getFriendSpecLoc());
+ FrD->setAccess(AS_public);
+ CurContext->addDecl(FrD);
+
+ if (ND->isInvalidDecl())
+ FrD->setInvalidDecl();
+ else {
+ FunctionDecl *FD;
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
+ FD = FTD->getTemplatedDecl();
+ else
+ FD = cast<FunctionDecl>(ND);
+
+ // Mark templated-scope function declarations as unsupported.
+ if (FD->getNumTemplateParameterLists())
+ FrD->setUnsupportedFriend(true);
+ }
+
+ return ND;
+}
+
+void Sema::SetDeclDeleted(Decl *Dcl, SourceLocation DelLoc) {
+ AdjustDeclIfTemplate(Dcl);
+
+ FunctionDecl *Fn = dyn_cast<FunctionDecl>(Dcl);
+ if (!Fn) {
+ Diag(DelLoc, diag::err_deleted_non_function);
+ return;
+ }
+ if (const FunctionDecl *Prev = Fn->getPreviousDecl()) {
+ Diag(DelLoc, diag::err_deleted_decl_not_first);
+ Diag(Prev->getLocation(), diag::note_previous_declaration);
+ // If the declaration wasn't the first, we delete the function anyway for
+ // recovery.
+ }
+ Fn->setDeletedAsWritten();
+
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Dcl);
+ if (!MD)
+ return;
+
+ // A deleted special member function is trivial if the corresponding
+ // implicitly-declared function would have been.
+ switch (getSpecialMember(MD)) {
+ case CXXInvalid:
+ break;
+ case CXXDefaultConstructor:
+ MD->setTrivial(MD->getParent()->hasTrivialDefaultConstructor());
+ break;
+ case CXXCopyConstructor:
+ MD->setTrivial(MD->getParent()->hasTrivialCopyConstructor());
+ break;
+ case CXXMoveConstructor:
+ MD->setTrivial(MD->getParent()->hasTrivialMoveConstructor());
+ break;
+ case CXXCopyAssignment:
+ MD->setTrivial(MD->getParent()->hasTrivialCopyAssignment());
+ break;
+ case CXXMoveAssignment:
+ MD->setTrivial(MD->getParent()->hasTrivialMoveAssignment());
+ break;
+ case CXXDestructor:
+ MD->setTrivial(MD->getParent()->hasTrivialDestructor());
+ break;
+ }
+}
+
+void Sema::SetDeclDefaulted(Decl *Dcl, SourceLocation DefaultLoc) {
+ CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Dcl);
+
+ if (MD) {
+ if (MD->getParent()->isDependentType()) {
+ MD->setDefaulted();
+ MD->setExplicitlyDefaulted();
+ return;
+ }
+
+ CXXSpecialMember Member = getSpecialMember(MD);
+ if (Member == CXXInvalid) {
+ Diag(DefaultLoc, diag::err_default_special_members);
+ return;
+ }
+
+ MD->setDefaulted();
+ MD->setExplicitlyDefaulted();
+
+ // If this definition appears within the record, do the checking when
+ // the record is complete.
+ const FunctionDecl *Primary = MD;
+ if (MD->getTemplatedKind() != FunctionDecl::TK_NonTemplate)
+ // Find the uninstantiated declaration that actually had the '= default'
+ // on it.
+ MD->getTemplateInstantiationPattern()->isDefined(Primary);
+
+ if (Primary == Primary->getCanonicalDecl())
+ return;
+
+ switch (Member) {
+ case CXXDefaultConstructor: {
+ CXXConstructorDecl *CD = cast<CXXConstructorDecl>(MD);
+ CheckExplicitlyDefaultedDefaultConstructor(CD);
+ if (!CD->isInvalidDecl())
+ DefineImplicitDefaultConstructor(DefaultLoc, CD);
+ break;
+ }
+
+ case CXXCopyConstructor: {
+ CXXConstructorDecl *CD = cast<CXXConstructorDecl>(MD);
+ CheckExplicitlyDefaultedCopyConstructor(CD);
+ if (!CD->isInvalidDecl())
+ DefineImplicitCopyConstructor(DefaultLoc, CD);
+ break;
+ }
+
+ case CXXCopyAssignment: {
+ CheckExplicitlyDefaultedCopyAssignment(MD);
+ if (!MD->isInvalidDecl())
+ DefineImplicitCopyAssignment(DefaultLoc, MD);
+ break;
+ }
+
+ case CXXDestructor: {
+ CXXDestructorDecl *DD = cast<CXXDestructorDecl>(MD);
+ CheckExplicitlyDefaultedDestructor(DD);
+ if (!DD->isInvalidDecl())
+ DefineImplicitDestructor(DefaultLoc, DD);
+ break;
+ }
+
+ case CXXMoveConstructor: {
+ CXXConstructorDecl *CD = cast<CXXConstructorDecl>(MD);
+ CheckExplicitlyDefaultedMoveConstructor(CD);
+ if (!CD->isInvalidDecl())
+ DefineImplicitMoveConstructor(DefaultLoc, CD);
+ break;
+ }
+
+ case CXXMoveAssignment: {
+ CheckExplicitlyDefaultedMoveAssignment(MD);
+ if (!MD->isInvalidDecl())
+ DefineImplicitMoveAssignment(DefaultLoc, MD);
+ break;
+ }
+
+ case CXXInvalid:
+ llvm_unreachable("Invalid special member.");
+ }
+ } else {
+ Diag(DefaultLoc, diag::err_default_special_members);
+ }
+}
+
+static void SearchForReturnInStmt(Sema &Self, Stmt *S) {
+ for (Stmt::child_range CI = S->children(); CI; ++CI) {
+ Stmt *SubStmt = *CI;
+ if (!SubStmt)
+ continue;
+ if (isa<ReturnStmt>(SubStmt))
+ Self.Diag(SubStmt->getLocStart(),
+ diag::err_return_in_constructor_handler);
+ if (!isa<Expr>(SubStmt))
+ SearchForReturnInStmt(Self, SubStmt);
+ }
+}
+
+void Sema::DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock) {
+ for (unsigned I = 0, E = TryBlock->getNumHandlers(); I != E; ++I) {
+ CXXCatchStmt *Handler = TryBlock->getHandler(I);
+ SearchForReturnInStmt(*this, Handler);
+ }
+}
+
+bool Sema::CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ QualType NewTy = New->getType()->getAs<FunctionType>()->getResultType();
+ QualType OldTy = Old->getType()->getAs<FunctionType>()->getResultType();
+
+ if (Context.hasSameType(NewTy, OldTy) ||
+ NewTy->isDependentType() || OldTy->isDependentType())
+ return false;
+
+ // Check if the return types are covariant
+ QualType NewClassTy, OldClassTy;
+
+ /// Both types must be pointers or references to classes.
+ if (const PointerType *NewPT = NewTy->getAs<PointerType>()) {
+ if (const PointerType *OldPT = OldTy->getAs<PointerType>()) {
+ NewClassTy = NewPT->getPointeeType();
+ OldClassTy = OldPT->getPointeeType();
+ }
+ } else if (const ReferenceType *NewRT = NewTy->getAs<ReferenceType>()) {
+ if (const ReferenceType *OldRT = OldTy->getAs<ReferenceType>()) {
+ if (NewRT->getTypeClass() == OldRT->getTypeClass()) {
+ NewClassTy = NewRT->getPointeeType();
+ OldClassTy = OldRT->getPointeeType();
+ }
+ }
+ }
+
+ // The return types aren't either both pointers or references to a class type.
+ if (NewClassTy.isNull()) {
+ Diag(New->getLocation(),
+ diag::err_different_return_type_for_overriding_virtual_function)
+ << New->getDeclName() << NewTy << OldTy;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+
+ return true;
+ }
+
+ // C++ [class.virtual]p6:
+ // If the return type of D::f differs from the return type of B::f, the
+ // class type in the return type of D::f shall be complete at the point of
+ // declaration of D::f or shall be the class type D.
+ if (const RecordType *RT = NewClassTy->getAs<RecordType>()) {
+ if (!RT->isBeingDefined() &&
+ RequireCompleteType(New->getLocation(), NewClassTy,
+ PDiag(diag::err_covariant_return_incomplete)
+ << New->getDeclName()))
+ return true;
+ }
+
+ if (!Context.hasSameUnqualifiedType(NewClassTy, OldClassTy)) {
+ // Check if the new class derives from the old class.
+ if (!IsDerivedFrom(NewClassTy, OldClassTy)) {
+ Diag(New->getLocation(),
+ diag::err_covariant_return_not_derived)
+ << New->getDeclName() << NewTy << OldTy;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ }
+
+ // Check if we the conversion from derived to base is valid.
+ if (CheckDerivedToBaseConversion(NewClassTy, OldClassTy,
+ diag::err_covariant_return_inaccessible_base,
+ diag::err_covariant_return_ambiguous_derived_to_base_conv,
+ // FIXME: Should this point to the return type?
+ New->getLocation(), SourceRange(), New->getDeclName(), 0)) {
+ // FIXME: this note won't trigger for delayed access control
+ // diagnostics, and it's impossible to get an undelayed error
+ // here from access control during the original parse because
+ // the ParsingDeclSpec/ParsingDeclarator are still in scope.
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ }
+ }
+
+ // The qualifiers of the return types must be the same.
+ if (NewTy.getLocalCVRQualifiers() != OldTy.getLocalCVRQualifiers()) {
+ Diag(New->getLocation(),
+ diag::err_covariant_return_type_different_qualifications)
+ << New->getDeclName() << NewTy << OldTy;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ };
+
+
+ // The new class type must have the same or less qualifiers as the old type.
+ if (NewClassTy.isMoreQualifiedThan(OldClassTy)) {
+ Diag(New->getLocation(),
+ diag::err_covariant_return_type_class_type_more_qualified)
+ << New->getDeclName() << NewTy << OldTy;
+ Diag(Old->getLocation(), diag::note_overridden_virtual_function);
+ return true;
+ };
+
+ return false;
+}
+
+/// \brief Mark the given method pure.
+///
+/// \param Method the method to be marked pure.
+///
+/// \param InitRange the source range that covers the "0" initializer.
+bool Sema::CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange) {
+ SourceLocation EndLoc = InitRange.getEnd();
+ if (EndLoc.isValid())
+ Method->setRangeEnd(EndLoc);
+
+ if (Method->isVirtual() || Method->getParent()->isDependentContext()) {
+ Method->setPure();
+ return false;
+ }
+
+ if (!Method->isInvalidDecl())
+ Diag(Method->getLocation(), diag::err_non_virtual_pure)
+ << Method->getDeclName() << InitRange;
+ return true;
+}
+
+/// \brief Determine whether the given declaration is a static data member.
+static bool isStaticDataMember(Decl *D) {
+ VarDecl *Var = dyn_cast_or_null<VarDecl>(D);
+ if (!Var)
+ return false;
+
+ return Var->isStaticDataMember();
+}
+/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse
+/// an initializer for the out-of-line declaration 'Dcl'. The scope
+/// is a fresh scope pushed for just this purpose.
+///
+/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
+/// static data member of class X, names should be looked up in the scope of
+/// class X.
+void Sema::ActOnCXXEnterDeclInitializer(Scope *S, Decl *D) {
+ // If there is no declaration, there was an error parsing it.
+ if (D == 0 || D->isInvalidDecl()) return;
+
+ // We should only get called for declarations with scope specifiers, like:
+ // int foo::bar;
+ assert(D->isOutOfLine());
+ EnterDeclaratorContext(S, D->getDeclContext());
+
+ // If we are parsing the initializer for a static data member, push a
+ // new expression evaluation context that is associated with this static
+ // data member.
+ if (isStaticDataMember(D))
+ PushExpressionEvaluationContext(PotentiallyEvaluated, D);
+}
+
+/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
+/// initializer for the out-of-line declaration 'D'.
+void Sema::ActOnCXXExitDeclInitializer(Scope *S, Decl *D) {
+ // If there is no declaration, there was an error parsing it.
+ if (D == 0 || D->isInvalidDecl()) return;
+
+ if (isStaticDataMember(D))
+ PopExpressionEvaluationContext();
+
+ assert(D->isOutOfLine());
+ ExitDeclaratorContext(S);
+}
+
+/// ActOnCXXConditionDeclarationExpr - Parsed a condition declaration of a
+/// C++ if/switch/while/for statement.
+/// e.g: "if (int x = f()) {...}"
+DeclResult Sema::ActOnCXXConditionDeclaration(Scope *S, Declarator &D) {
+ // C++ 6.4p2:
+ // The declarator shall not specify a function or an array.
+ // The type-specifier-seq shall not contain typedef and shall not declare a
+ // new class or enumeration.
+ assert(D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_typedef &&
+ "Parser allowed 'typedef' as storage class of condition decl.");
+
+ Decl *Dcl = ActOnDeclarator(S, D);
+ if (!Dcl)
+ return true;
+
+ if (isa<FunctionDecl>(Dcl)) { // The declarator shall not specify a function.
+ Diag(Dcl->getLocation(), diag::err_invalid_use_of_function_type)
+ << D.getSourceRange();
+ return true;
+ }
+
+ return Dcl;
+}
+
+void Sema::LoadExternalVTableUses() {
+ if (!ExternalSource)
+ return;
+
+ SmallVector<ExternalVTableUse, 4> VTables;
+ ExternalSource->ReadUsedVTables(VTables);
+ SmallVector<VTableUse, 4> NewUses;
+ for (unsigned I = 0, N = VTables.size(); I != N; ++I) {
+ llvm::DenseMap<CXXRecordDecl *, bool>::iterator Pos
+ = VTablesUsed.find(VTables[I].Record);
+ // Even if a definition wasn't required before, it may be required now.
+ if (Pos != VTablesUsed.end()) {
+ if (!Pos->second && VTables[I].DefinitionRequired)
+ Pos->second = true;
+ continue;
+ }
+
+ VTablesUsed[VTables[I].Record] = VTables[I].DefinitionRequired;
+ NewUses.push_back(VTableUse(VTables[I].Record, VTables[I].Location));
+ }
+
+ VTableUses.insert(VTableUses.begin(), NewUses.begin(), NewUses.end());
+}
+
+void Sema::MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
+ bool DefinitionRequired) {
+ // Ignore any vtable uses in unevaluated operands or for classes that do
+ // not have a vtable.
+ if (!Class->isDynamicClass() || Class->isDependentContext() ||
+ CurContext->isDependentContext() ||
+ ExprEvalContexts.back().Context == Unevaluated)
+ return;
+
+ // Try to insert this class into the map.
+ LoadExternalVTableUses();
+ Class = cast<CXXRecordDecl>(Class->getCanonicalDecl());
+ std::pair<llvm::DenseMap<CXXRecordDecl *, bool>::iterator, bool>
+ Pos = VTablesUsed.insert(std::make_pair(Class, DefinitionRequired));
+ if (!Pos.second) {
+ // If we already had an entry, check to see if we are promoting this vtable
+ // to required a definition. If so, we need to reappend to the VTableUses
+ // list, since we may have already processed the first entry.
+ if (DefinitionRequired && !Pos.first->second) {
+ Pos.first->second = true;
+ } else {
+ // Otherwise, we can early exit.
+ return;
+ }
+ }
+
+ // Local classes need to have their virtual members marked
+ // immediately. For all other classes, we mark their virtual members
+ // at the end of the translation unit.
+ if (Class->isLocalClass())
+ MarkVirtualMembersReferenced(Loc, Class);
+ else
+ VTableUses.push_back(std::make_pair(Class, Loc));
+}
+
+bool Sema::DefineUsedVTables() {
+ LoadExternalVTableUses();
+ if (VTableUses.empty())
+ return false;
+
+ // Note: The VTableUses vector could grow as a result of marking
+ // the members of a class as "used", so we check the size each
+ // time through the loop and prefer indices (with are stable) to
+ // iterators (which are not).
+ bool DefinedAnything = false;
+ for (unsigned I = 0; I != VTableUses.size(); ++I) {
+ CXXRecordDecl *Class = VTableUses[I].first->getDefinition();
+ if (!Class)
+ continue;
+
+ SourceLocation Loc = VTableUses[I].second;
+
+ // If this class has a key function, but that key function is
+ // defined in another translation unit, we don't need to emit the
+ // vtable even though we're using it.
+ const CXXMethodDecl *KeyFunction = Context.getKeyFunction(Class);
+ if (KeyFunction && !KeyFunction->hasBody()) {
+ switch (KeyFunction->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ case TSK_ExplicitInstantiationDeclaration:
+ // The key function is in another translation unit.
+ continue;
+
+ case TSK_ExplicitInstantiationDefinition:
+ case TSK_ImplicitInstantiation:
+ // We will be instantiating the key function.
+ break;
+ }
+ } else if (!KeyFunction) {
+ // If we have a class with no key function that is the subject
+ // of an explicit instantiation declaration, suppress the
+ // vtable; it will live with the explicit instantiation
+ // definition.
+ bool IsExplicitInstantiationDeclaration
+ = Class->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration;
+ for (TagDecl::redecl_iterator R = Class->redecls_begin(),
+ REnd = Class->redecls_end();
+ R != REnd; ++R) {
+ TemplateSpecializationKind TSK
+ = cast<CXXRecordDecl>(*R)->getTemplateSpecializationKind();
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ IsExplicitInstantiationDeclaration = true;
+ else if (TSK == TSK_ExplicitInstantiationDefinition) {
+ IsExplicitInstantiationDeclaration = false;
+ break;
+ }
+ }
+
+ if (IsExplicitInstantiationDeclaration)
+ continue;
+ }
+
+ // Mark all of the virtual members of this class as referenced, so
+ // that we can build a vtable. Then, tell the AST consumer that a
+ // vtable for this class is required.
+ DefinedAnything = true;
+ MarkVirtualMembersReferenced(Loc, Class);
+ CXXRecordDecl *Canonical = cast<CXXRecordDecl>(Class->getCanonicalDecl());
+ Consumer.HandleVTable(Class, VTablesUsed[Canonical]);
+
+ // Optionally warn if we're emitting a weak vtable.
+ if (Class->getLinkage() == ExternalLinkage &&
+ Class->getTemplateSpecializationKind() != TSK_ImplicitInstantiation) {
+ const FunctionDecl *KeyFunctionDef = 0;
+ if (!KeyFunction ||
+ (KeyFunction->hasBody(KeyFunctionDef) &&
+ KeyFunctionDef->isInlined()))
+ Diag(Class->getLocation(), Class->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition
+ ? diag::warn_weak_template_vtable : diag::warn_weak_vtable)
+ << Class;
+ }
+ }
+ VTableUses.clear();
+
+ return DefinedAnything;
+}
+
+void Sema::MarkVirtualMembersReferenced(SourceLocation Loc,
+ const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::method_iterator i = RD->method_begin(),
+ e = RD->method_end(); i != e; ++i) {
+ CXXMethodDecl *MD = *i;
+
+ // C++ [basic.def.odr]p2:
+ // [...] A virtual member function is used if it is not pure. [...]
+ if (MD->isVirtual() && !MD->isPure())
+ MarkFunctionReferenced(Loc, MD);
+ }
+
+ // Only classes that have virtual bases need a VTT.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
+ e = RD->bases_end(); i != e; ++i) {
+ const CXXRecordDecl *Base =
+ cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
+ if (Base->getNumVBases() == 0)
+ continue;
+ MarkVirtualMembersReferenced(Loc, Base);
+ }
+}
+
+/// SetIvarInitializers - This routine builds initialization ASTs for the
+/// Objective-C implementation whose ivars need be initialized.
+void Sema::SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation) {
+ if (!getLangOpts().CPlusPlus)
+ return;
+ if (ObjCInterfaceDecl *OID = ObjCImplementation->getClassInterface()) {
+ SmallVector<ObjCIvarDecl*, 8> ivars;
+ CollectIvarsToConstructOrDestruct(OID, ivars);
+ if (ivars.empty())
+ return;
+ SmallVector<CXXCtorInitializer*, 32> AllToInit;
+ for (unsigned i = 0; i < ivars.size(); i++) {
+ FieldDecl *Field = ivars[i];
+ if (Field->isInvalidDecl())
+ continue;
+
+ CXXCtorInitializer *Member;
+ InitializedEntity InitEntity = InitializedEntity::InitializeMember(Field);
+ InitializationKind InitKind =
+ InitializationKind::CreateDefault(ObjCImplementation->getLocation());
+
+ InitializationSequence InitSeq(*this, InitEntity, InitKind, 0, 0);
+ ExprResult MemberInit =
+ InitSeq.Perform(*this, InitEntity, InitKind, MultiExprArg());
+ MemberInit = MaybeCreateExprWithCleanups(MemberInit);
+ // Note, MemberInit could actually come back empty if no initialization
+ // is required (e.g., because it would call a trivial default constructor)
+ if (!MemberInit.get() || MemberInit.isInvalid())
+ continue;
+
+ Member =
+ new (Context) CXXCtorInitializer(Context, Field, SourceLocation(),
+ SourceLocation(),
+ MemberInit.takeAs<Expr>(),
+ SourceLocation());
+ AllToInit.push_back(Member);
+
+ // Be sure that the destructor is accessible and is marked as referenced.
+ if (const RecordType *RecordTy
+ = Context.getBaseElementType(Field->getType())
+ ->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+ if (CXXDestructorDecl *Destructor = LookupDestructor(RD)) {
+ MarkFunctionReferenced(Field->getLocation(), Destructor);
+ CheckDestructorAccess(Field->getLocation(), Destructor,
+ PDiag(diag::err_access_dtor_ivar)
+ << Context.getBaseElementType(Field->getType()));
+ }
+ }
+ }
+ ObjCImplementation->setIvarInitializers(Context,
+ AllToInit.data(), AllToInit.size());
+ }
+}
+
+static
+void DelegatingCycleHelper(CXXConstructorDecl* Ctor,
+ llvm::SmallSet<CXXConstructorDecl*, 4> &Valid,
+ llvm::SmallSet<CXXConstructorDecl*, 4> &Invalid,
+ llvm::SmallSet<CXXConstructorDecl*, 4> &Current,
+ Sema &S) {
+ llvm::SmallSet<CXXConstructorDecl*, 4>::iterator CI = Current.begin(),
+ CE = Current.end();
+ if (Ctor->isInvalidDecl())
+ return;
+
+ const FunctionDecl *FNTarget = 0;
+ CXXConstructorDecl *Target;
+
+ // We ignore the result here since if we don't have a body, Target will be
+ // null below.
+ (void)Ctor->getTargetConstructor()->hasBody(FNTarget);
+ Target
+= const_cast<CXXConstructorDecl*>(cast_or_null<CXXConstructorDecl>(FNTarget));
+
+ CXXConstructorDecl *Canonical = Ctor->getCanonicalDecl(),
+ // Avoid dereferencing a null pointer here.
+ *TCanonical = Target ? Target->getCanonicalDecl() : 0;
+
+ if (!Current.insert(Canonical))
+ return;
+
+ // We know that beyond here, we aren't chaining into a cycle.
+ if (!Target || !Target->isDelegatingConstructor() ||
+ Target->isInvalidDecl() || Valid.count(TCanonical)) {
+ for (CI = Current.begin(), CE = Current.end(); CI != CE; ++CI)
+ Valid.insert(*CI);
+ Current.clear();
+ // We've hit a cycle.
+ } else if (TCanonical == Canonical || Invalid.count(TCanonical) ||
+ Current.count(TCanonical)) {
+ // If we haven't diagnosed this cycle yet, do so now.
+ if (!Invalid.count(TCanonical)) {
+ S.Diag((*Ctor->init_begin())->getSourceLocation(),
+ diag::warn_delegating_ctor_cycle)
+ << Ctor;
+
+ // Don't add a note for a function delegating directo to itself.
+ if (TCanonical != Canonical)
+ S.Diag(Target->getLocation(), diag::note_it_delegates_to);
+
+ CXXConstructorDecl *C = Target;
+ while (C->getCanonicalDecl() != Canonical) {
+ (void)C->getTargetConstructor()->hasBody(FNTarget);
+ assert(FNTarget && "Ctor cycle through bodiless function");
+
+ C
+ = const_cast<CXXConstructorDecl*>(cast<CXXConstructorDecl>(FNTarget));
+ S.Diag(C->getLocation(), diag::note_which_delegates_to);
+ }
+ }
+
+ for (CI = Current.begin(), CE = Current.end(); CI != CE; ++CI)
+ Invalid.insert(*CI);
+ Current.clear();
+ } else {
+ DelegatingCycleHelper(Target, Valid, Invalid, Current, S);
+ }
+}
+
+
+void Sema::CheckDelegatingCtorCycles() {
+ llvm::SmallSet<CXXConstructorDecl*, 4> Valid, Invalid, Current;
+
+ llvm::SmallSet<CXXConstructorDecl*, 4>::iterator CI = Current.begin(),
+ CE = Current.end();
+
+ for (DelegatingCtorDeclsType::iterator
+ I = DelegatingCtorDecls.begin(ExternalSource),
+ E = DelegatingCtorDecls.end();
+ I != E; ++I) {
+ DelegatingCycleHelper(*I, Valid, Invalid, Current, *this);
+ }
+
+ for (CI = Invalid.begin(), CE = Invalid.end(); CI != CE; ++CI)
+ (*CI)->setInvalidDecl();
+}
+
+/// IdentifyCUDATarget - Determine the CUDA compilation target for this function
+Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D) {
+ // Implicitly declared functions (e.g. copy constructors) are
+ // __host__ __device__
+ if (D->isImplicit())
+ return CFT_HostDevice;
+
+ if (D->hasAttr<CUDAGlobalAttr>())
+ return CFT_Global;
+
+ if (D->hasAttr<CUDADeviceAttr>()) {
+ if (D->hasAttr<CUDAHostAttr>())
+ return CFT_HostDevice;
+ else
+ return CFT_Device;
+ }
+
+ return CFT_Host;
+}
+
+bool Sema::CheckCUDATarget(CUDAFunctionTarget CallerTarget,
+ CUDAFunctionTarget CalleeTarget) {
+ // CUDA B.1.1 "The __device__ qualifier declares a function that is...
+ // Callable from the device only."
+ if (CallerTarget == CFT_Host && CalleeTarget == CFT_Device)
+ return true;
+
+ // CUDA B.1.2 "The __global__ qualifier declares a function that is...
+ // Callable from the host only."
+ // CUDA B.1.3 "The __host__ qualifier declares a function that is...
+ // Callable from the host only."
+ if ((CallerTarget == CFT_Device || CallerTarget == CFT_Global) &&
+ (CalleeTarget == CFT_Host || CalleeTarget == CFT_Global))
+ return true;
+
+ if (CallerTarget == CFT_HostDevice && CalleeTarget != CFT_HostDevice)
+ return true;
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
new file mode 100644
index 0000000..a942d49
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaDeclObjC.cpp
@@ -0,0 +1,3121 @@
+//===--- SemaDeclObjC.cpp - Semantic Analysis for ObjC Declarations -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for Objective C declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/DenseSet.h"
+
+using namespace clang;
+
+/// Check whether the given method, which must be in the 'init'
+/// family, is a valid member of that family.
+///
+/// \param receiverTypeIfCall - if null, check this as if declaring it;
+/// if non-null, check this as if making a call to it with the given
+/// receiver type
+///
+/// \return true to indicate that there was an error and appropriate
+/// actions were taken
+bool Sema::checkInitMethod(ObjCMethodDecl *method,
+ QualType receiverTypeIfCall) {
+ if (method->isInvalidDecl()) return true;
+
+ // This castAs is safe: methods that don't return an object
+ // pointer won't be inferred as inits and will reject an explicit
+ // objc_method_family(init).
+
+ // We ignore protocols here. Should we? What about Class?
+
+ const ObjCObjectType *result = method->getResultType()
+ ->castAs<ObjCObjectPointerType>()->getObjectType();
+
+ if (result->isObjCId()) {
+ return false;
+ } else if (result->isObjCClass()) {
+ // fall through: always an error
+ } else {
+ ObjCInterfaceDecl *resultClass = result->getInterface();
+ assert(resultClass && "unexpected object type!");
+
+ // It's okay for the result type to still be a forward declaration
+ // if we're checking an interface declaration.
+ if (!resultClass->hasDefinition()) {
+ if (receiverTypeIfCall.isNull() &&
+ !isa<ObjCImplementationDecl>(method->getDeclContext()))
+ return false;
+
+ // Otherwise, we try to compare class types.
+ } else {
+ // If this method was declared in a protocol, we can't check
+ // anything unless we have a receiver type that's an interface.
+ const ObjCInterfaceDecl *receiverClass = 0;
+ if (isa<ObjCProtocolDecl>(method->getDeclContext())) {
+ if (receiverTypeIfCall.isNull())
+ return false;
+
+ receiverClass = receiverTypeIfCall->castAs<ObjCObjectPointerType>()
+ ->getInterfaceDecl();
+
+ // This can be null for calls to e.g. id<Foo>.
+ if (!receiverClass) return false;
+ } else {
+ receiverClass = method->getClassInterface();
+ assert(receiverClass && "method not associated with a class!");
+ }
+
+ // If either class is a subclass of the other, it's fine.
+ if (receiverClass->isSuperClassOf(resultClass) ||
+ resultClass->isSuperClassOf(receiverClass))
+ return false;
+ }
+ }
+
+ SourceLocation loc = method->getLocation();
+
+ // If we're in a system header, and this is not a call, just make
+ // the method unusable.
+ if (receiverTypeIfCall.isNull() && getSourceManager().isInSystemHeader(loc)) {
+ method->addAttr(new (Context) UnavailableAttr(loc, Context,
+ "init method returns a type unrelated to its receiver type"));
+ return true;
+ }
+
+ // Otherwise, it's an error.
+ Diag(loc, diag::err_arc_init_method_unrelated_result_type);
+ method->setInvalidDecl();
+ return true;
+}
+
+void Sema::CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
+ const ObjCMethodDecl *Overridden,
+ bool IsImplementation) {
+ if (Overridden->hasRelatedResultType() &&
+ !NewMethod->hasRelatedResultType()) {
+ // This can only happen when the method follows a naming convention that
+ // implies a related result type, and the original (overridden) method has
+ // a suitable return type, but the new (overriding) method does not have
+ // a suitable return type.
+ QualType ResultType = NewMethod->getResultType();
+ SourceRange ResultTypeRange;
+ if (const TypeSourceInfo *ResultTypeInfo
+ = NewMethod->getResultTypeSourceInfo())
+ ResultTypeRange = ResultTypeInfo->getTypeLoc().getSourceRange();
+
+ // Figure out which class this method is part of, if any.
+ ObjCInterfaceDecl *CurrentClass
+ = dyn_cast<ObjCInterfaceDecl>(NewMethod->getDeclContext());
+ if (!CurrentClass) {
+ DeclContext *DC = NewMethod->getDeclContext();
+ if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(DC))
+ CurrentClass = Cat->getClassInterface();
+ else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(DC))
+ CurrentClass = Impl->getClassInterface();
+ else if (ObjCCategoryImplDecl *CatImpl
+ = dyn_cast<ObjCCategoryImplDecl>(DC))
+ CurrentClass = CatImpl->getClassInterface();
+ }
+
+ if (CurrentClass) {
+ Diag(NewMethod->getLocation(),
+ diag::warn_related_result_type_compatibility_class)
+ << Context.getObjCInterfaceType(CurrentClass)
+ << ResultType
+ << ResultTypeRange;
+ } else {
+ Diag(NewMethod->getLocation(),
+ diag::warn_related_result_type_compatibility_protocol)
+ << ResultType
+ << ResultTypeRange;
+ }
+
+ if (ObjCMethodFamily Family = Overridden->getMethodFamily())
+ Diag(Overridden->getLocation(),
+ diag::note_related_result_type_overridden_family)
+ << Family;
+ else
+ Diag(Overridden->getLocation(),
+ diag::note_related_result_type_overridden);
+ }
+ if (getLangOpts().ObjCAutoRefCount) {
+ if ((NewMethod->hasAttr<NSReturnsRetainedAttr>() !=
+ Overridden->hasAttr<NSReturnsRetainedAttr>())) {
+ Diag(NewMethod->getLocation(),
+ diag::err_nsreturns_retained_attribute_mismatch) << 1;
+ Diag(Overridden->getLocation(), diag::note_previous_decl)
+ << "method";
+ }
+ if ((NewMethod->hasAttr<NSReturnsNotRetainedAttr>() !=
+ Overridden->hasAttr<NSReturnsNotRetainedAttr>())) {
+ Diag(NewMethod->getLocation(),
+ diag::err_nsreturns_retained_attribute_mismatch) << 0;
+ Diag(Overridden->getLocation(), diag::note_previous_decl)
+ << "method";
+ }
+ ObjCMethodDecl::param_const_iterator oi = Overridden->param_begin();
+ for (ObjCMethodDecl::param_iterator
+ ni = NewMethod->param_begin(), ne = NewMethod->param_end();
+ ni != ne; ++ni, ++oi) {
+ const ParmVarDecl *oldDecl = (*oi);
+ ParmVarDecl *newDecl = (*ni);
+ if (newDecl->hasAttr<NSConsumedAttr>() !=
+ oldDecl->hasAttr<NSConsumedAttr>()) {
+ Diag(newDecl->getLocation(),
+ diag::err_nsconsumed_attribute_mismatch);
+ Diag(oldDecl->getLocation(), diag::note_previous_decl)
+ << "parameter";
+ }
+ }
+ }
+}
+
+/// \brief Check a method declaration for compatibility with the Objective-C
+/// ARC conventions.
+static bool CheckARCMethodDecl(Sema &S, ObjCMethodDecl *method) {
+ ObjCMethodFamily family = method->getMethodFamily();
+ switch (family) {
+ case OMF_None:
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_self:
+ case OMF_performSelector:
+ return false;
+
+ case OMF_init:
+ // If the method doesn't obey the init rules, don't bother annotating it.
+ if (S.checkInitMethod(method, QualType()))
+ return true;
+
+ method->addAttr(new (S.Context) NSConsumesSelfAttr(SourceLocation(),
+ S.Context));
+
+ // Don't add a second copy of this attribute, but otherwise don't
+ // let it be suppressed.
+ if (method->hasAttr<NSReturnsRetainedAttr>())
+ return false;
+ break;
+
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ if (method->hasAttr<NSReturnsRetainedAttr>() ||
+ method->hasAttr<NSReturnsNotRetainedAttr>() ||
+ method->hasAttr<NSReturnsAutoreleasedAttr>())
+ return false;
+ break;
+ }
+
+ method->addAttr(new (S.Context) NSReturnsRetainedAttr(SourceLocation(),
+ S.Context));
+ return false;
+}
+
+static void DiagnoseObjCImplementedDeprecations(Sema &S,
+ NamedDecl *ND,
+ SourceLocation ImplLoc,
+ int select) {
+ if (ND && ND->isDeprecated()) {
+ S.Diag(ImplLoc, diag::warn_deprecated_def) << select;
+ if (select == 0)
+ S.Diag(ND->getLocation(), diag::note_method_declared_at)
+ << ND->getDeclName();
+ else
+ S.Diag(ND->getLocation(), diag::note_previous_decl) << "class";
+ }
+}
+
+/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
+/// pool.
+void Sema::AddAnyMethodToGlobalPool(Decl *D) {
+ ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
+
+ // If we don't have a valid method decl, simply return.
+ if (!MDecl)
+ return;
+ if (MDecl->isInstanceMethod())
+ AddInstanceMethodToGlobalPool(MDecl, true);
+ else
+ AddFactoryMethodToGlobalPool(MDecl, true);
+}
+
+/// ActOnStartOfObjCMethodDef - This routine sets up parameters; invisible
+/// and user declared, in the method definition's AST.
+void Sema::ActOnStartOfObjCMethodDef(Scope *FnBodyScope, Decl *D) {
+ assert(getCurMethodDecl() == 0 && "Method parsing confused");
+ ObjCMethodDecl *MDecl = dyn_cast_or_null<ObjCMethodDecl>(D);
+
+ // If we don't have a valid method decl, simply return.
+ if (!MDecl)
+ return;
+
+ // Allow all of Sema to see that we are entering a method definition.
+ PushDeclContext(FnBodyScope, MDecl);
+ PushFunctionScope();
+
+ // Create Decl objects for each parameter, entrring them in the scope for
+ // binding to their use.
+
+ // Insert the invisible arguments, self and _cmd!
+ MDecl->createImplicitParams(Context, MDecl->getClassInterface());
+
+ PushOnScopeChains(MDecl->getSelfDecl(), FnBodyScope);
+ PushOnScopeChains(MDecl->getCmdDecl(), FnBodyScope);
+
+ // Introduce all of the other parameters into this scope.
+ for (ObjCMethodDecl::param_iterator PI = MDecl->param_begin(),
+ E = MDecl->param_end(); PI != E; ++PI) {
+ ParmVarDecl *Param = (*PI);
+ if (!Param->isInvalidDecl() &&
+ RequireCompleteType(Param->getLocation(), Param->getType(),
+ diag::err_typecheck_decl_incomplete_type))
+ Param->setInvalidDecl();
+ if ((*PI)->getIdentifier())
+ PushOnScopeChains(*PI, FnBodyScope);
+ }
+
+ // In ARC, disallow definition of retain/release/autorelease/retainCount
+ if (getLangOpts().ObjCAutoRefCount) {
+ switch (MDecl->getMethodFamily()) {
+ case OMF_retain:
+ case OMF_retainCount:
+ case OMF_release:
+ case OMF_autorelease:
+ Diag(MDecl->getLocation(), diag::err_arc_illegal_method_def)
+ << MDecl->getSelector();
+ break;
+
+ case OMF_None:
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_alloc:
+ case OMF_init:
+ case OMF_mutableCopy:
+ case OMF_copy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_performSelector:
+ break;
+ }
+ }
+
+ // Warn on deprecated methods under -Wdeprecated-implementations,
+ // and prepare for warning on missing super calls.
+ if (ObjCInterfaceDecl *IC = MDecl->getClassInterface()) {
+ if (ObjCMethodDecl *IMD =
+ IC->lookupMethod(MDecl->getSelector(), MDecl->isInstanceMethod()))
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IMD),
+ MDecl->getLocation(), 0);
+
+ // If this is "dealloc" or "finalize", set some bit here.
+ // Then in ActOnSuperMessage() (SemaExprObjC), set it back to false.
+ // Finally, in ActOnFinishFunctionBody() (SemaDecl), warn if flag is set.
+ // Only do this if the current class actually has a superclass.
+ if (IC->getSuperClass()) {
+ ObjCShouldCallSuperDealloc =
+ !(Context.getLangOpts().ObjCAutoRefCount ||
+ Context.getLangOpts().getGC() == LangOptions::GCOnly) &&
+ MDecl->getMethodFamily() == OMF_dealloc;
+ ObjCShouldCallSuperFinalize =
+ Context.getLangOpts().getGC() != LangOptions::NonGC &&
+ MDecl->getMethodFamily() == OMF_finalize;
+ }
+ }
+}
+
+namespace {
+
+// Callback to only accept typo corrections that are Objective-C classes.
+// If an ObjCInterfaceDecl* is given to the constructor, then the validation
+// function will reject corrections to that class.
+class ObjCInterfaceValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ ObjCInterfaceValidatorCCC() : CurrentIDecl(0) {}
+ explicit ObjCInterfaceValidatorCCC(ObjCInterfaceDecl *IDecl)
+ : CurrentIDecl(IDecl) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ ObjCInterfaceDecl *ID = candidate.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ return ID && !declaresSameEntity(ID, CurrentIDecl);
+ }
+
+ private:
+ ObjCInterfaceDecl *CurrentIDecl;
+};
+
+}
+
+Decl *Sema::
+ActOnStartClassInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *SuperName, SourceLocation SuperLoc,
+ Decl * const *ProtoRefs, unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc, AttributeList *AttrList) {
+ assert(ClassName && "Missing class identifier");
+
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl = LookupSingleName(TUScope, ClassName, ClassLoc,
+ LookupOrdinaryName, ForRedeclaration);
+
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ }
+
+ // Create a declaration to describe this @interface.
+ ObjCInterfaceDecl* PrevIDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ ObjCInterfaceDecl *IDecl
+ = ObjCInterfaceDecl::Create(Context, CurContext, AtInterfaceLoc, ClassName,
+ PrevIDecl, ClassLoc);
+
+ if (PrevIDecl) {
+ // Class already seen. Was it a definition?
+ if (ObjCInterfaceDecl *Def = PrevIDecl->getDefinition()) {
+ Diag(AtInterfaceLoc, diag::err_duplicate_class_def)
+ << PrevIDecl->getDeclName();
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ IDecl->setInvalidDecl();
+ }
+ }
+
+ if (AttrList)
+ ProcessDeclAttributeList(TUScope, IDecl, AttrList);
+ PushOnScopeChains(IDecl, TUScope);
+
+ // Start the definition of this class. If we're in a redefinition case, there
+ // may already be a definition, so we'll end up adding to it.
+ if (!IDecl->hasDefinition())
+ IDecl->startDefinition();
+
+ if (SuperName) {
+ // Check if a different kind of symbol declared in this scope.
+ PrevDecl = LookupSingleName(TUScope, SuperName, SuperLoc,
+ LookupOrdinaryName);
+
+ if (!PrevDecl) {
+ // Try to correct for a typo in the superclass name without correcting
+ // to the class we're defining.
+ ObjCInterfaceValidatorCCC Validator(IDecl);
+ if (TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(SuperName, SuperLoc), LookupOrdinaryName, TUScope,
+ NULL, Validator)) {
+ PrevDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ Diag(SuperLoc, diag::err_undef_superclass_suggest)
+ << SuperName << ClassName << PrevDecl->getDeclName();
+ Diag(PrevDecl->getLocation(), diag::note_previous_decl)
+ << PrevDecl->getDeclName();
+ }
+ }
+
+ if (declaresSameEntity(PrevDecl, IDecl)) {
+ Diag(SuperLoc, diag::err_recursive_superclass)
+ << SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
+ } else {
+ ObjCInterfaceDecl *SuperClassDecl =
+ dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+
+ // Diagnose classes that inherit from deprecated classes.
+ if (SuperClassDecl)
+ (void)DiagnoseUseOfDecl(SuperClassDecl, SuperLoc);
+
+ if (PrevDecl && SuperClassDecl == 0) {
+ // The previous declaration was not a class decl. Check if we have a
+ // typedef. If we do, get the underlying class type.
+ if (const TypedefNameDecl *TDecl =
+ dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
+ QualType T = TDecl->getUnderlyingType();
+ if (T->isObjCObjectType()) {
+ if (NamedDecl *IDecl = T->getAs<ObjCObjectType>()->getInterface())
+ SuperClassDecl = dyn_cast<ObjCInterfaceDecl>(IDecl);
+ }
+ }
+
+ // This handles the following case:
+ //
+ // typedef int SuperClass;
+ // @interface MyClass : SuperClass {} @end
+ //
+ if (!SuperClassDecl) {
+ Diag(SuperLoc, diag::err_redefinition_different_kind) << SuperName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ }
+ }
+
+ if (!dyn_cast_or_null<TypedefNameDecl>(PrevDecl)) {
+ if (!SuperClassDecl)
+ Diag(SuperLoc, diag::err_undef_superclass)
+ << SuperName << ClassName << SourceRange(AtInterfaceLoc, ClassLoc);
+ else if (RequireCompleteType(SuperLoc,
+ Context.getObjCInterfaceType(SuperClassDecl),
+ PDiag(diag::err_forward_superclass)
+ << SuperClassDecl->getDeclName()
+ << ClassName
+ << SourceRange(AtInterfaceLoc, ClassLoc))) {
+ SuperClassDecl = 0;
+ }
+ }
+ IDecl->setSuperClass(SuperClassDecl);
+ IDecl->setSuperClassLoc(SuperLoc);
+ IDecl->setEndOfDefinitionLoc(SuperLoc);
+ }
+ } else { // we have a root class.
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
+ }
+
+ // Check then save referenced protocols.
+ if (NumProtoRefs) {
+ IDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
+ ProtoLocs, Context);
+ IDecl->setEndOfDefinitionLoc(EndProtoLoc);
+ }
+
+ CheckObjCDeclScope(IDecl);
+ return ActOnObjCContainerStartDefinition(IDecl);
+}
+
+/// ActOnCompatiblityAlias - this action is called after complete parsing of
+/// @compatibility_alias declaration. It sets up the alias relationships.
+Decl *Sema::ActOnCompatiblityAlias(SourceLocation AtLoc,
+ IdentifierInfo *AliasName,
+ SourceLocation AliasLocation,
+ IdentifierInfo *ClassName,
+ SourceLocation ClassLocation) {
+ // Look for previous declaration of alias name
+ NamedDecl *ADecl = LookupSingleName(TUScope, AliasName, AliasLocation,
+ LookupOrdinaryName, ForRedeclaration);
+ if (ADecl) {
+ if (isa<ObjCCompatibleAliasDecl>(ADecl))
+ Diag(AliasLocation, diag::warn_previous_alias_decl);
+ else
+ Diag(AliasLocation, diag::err_conflicting_aliasing_type) << AliasName;
+ Diag(ADecl->getLocation(), diag::note_previous_declaration);
+ return 0;
+ }
+ // Check for class declaration
+ NamedDecl *CDeclU = LookupSingleName(TUScope, ClassName, ClassLocation,
+ LookupOrdinaryName, ForRedeclaration);
+ if (const TypedefNameDecl *TDecl =
+ dyn_cast_or_null<TypedefNameDecl>(CDeclU)) {
+ QualType T = TDecl->getUnderlyingType();
+ if (T->isObjCObjectType()) {
+ if (NamedDecl *IDecl = T->getAs<ObjCObjectType>()->getInterface()) {
+ ClassName = IDecl->getIdentifier();
+ CDeclU = LookupSingleName(TUScope, ClassName, ClassLocation,
+ LookupOrdinaryName, ForRedeclaration);
+ }
+ }
+ }
+ ObjCInterfaceDecl *CDecl = dyn_cast_or_null<ObjCInterfaceDecl>(CDeclU);
+ if (CDecl == 0) {
+ Diag(ClassLocation, diag::warn_undef_interface) << ClassName;
+ if (CDeclU)
+ Diag(CDeclU->getLocation(), diag::note_previous_declaration);
+ return 0;
+ }
+
+ // Everything checked out, instantiate a new alias declaration AST.
+ ObjCCompatibleAliasDecl *AliasDecl =
+ ObjCCompatibleAliasDecl::Create(Context, CurContext, AtLoc, AliasName, CDecl);
+
+ if (!CheckObjCDeclScope(AliasDecl))
+ PushOnScopeChains(AliasDecl, TUScope);
+
+ return AliasDecl;
+}
+
+bool Sema::CheckForwardProtocolDeclarationForCircularDependency(
+ IdentifierInfo *PName,
+ SourceLocation &Ploc, SourceLocation PrevLoc,
+ const ObjCList<ObjCProtocolDecl> &PList) {
+
+ bool res = false;
+ for (ObjCList<ObjCProtocolDecl>::iterator I = PList.begin(),
+ E = PList.end(); I != E; ++I) {
+ if (ObjCProtocolDecl *PDecl = LookupProtocol((*I)->getIdentifier(),
+ Ploc)) {
+ if (PDecl->getIdentifier() == PName) {
+ Diag(Ploc, diag::err_protocol_has_circular_dependency);
+ Diag(PrevLoc, diag::note_previous_definition);
+ res = true;
+ }
+
+ if (!PDecl->hasDefinition())
+ continue;
+
+ if (CheckForwardProtocolDeclarationForCircularDependency(PName, Ploc,
+ PDecl->getLocation(), PDecl->getReferencedProtocols()))
+ res = true;
+ }
+ }
+ return res;
+}
+
+Decl *
+Sema::ActOnStartProtocolInterface(SourceLocation AtProtoInterfaceLoc,
+ IdentifierInfo *ProtocolName,
+ SourceLocation ProtocolLoc,
+ Decl * const *ProtoRefs,
+ unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc,
+ AttributeList *AttrList) {
+ bool err = false;
+ // FIXME: Deal with AttrList.
+ assert(ProtocolName && "Missing protocol identifier");
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(ProtocolName, ProtocolLoc,
+ ForRedeclaration);
+ ObjCProtocolDecl *PDecl = 0;
+ if (ObjCProtocolDecl *Def = PrevDecl? PrevDecl->getDefinition() : 0) {
+ // If we already have a definition, complain.
+ Diag(ProtocolLoc, diag::warn_duplicate_protocol_def) << ProtocolName;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+
+ // Create a new protocol that is completely distinct from previous
+ // declarations, and do not make this protocol available for name lookup.
+ // That way, we'll end up completely ignoring the duplicate.
+ // FIXME: Can we turn this into an error?
+ PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
+ ProtocolLoc, AtProtoInterfaceLoc,
+ /*PrevDecl=*/0);
+ PDecl->startDefinition();
+ } else {
+ if (PrevDecl) {
+ // Check for circular dependencies among protocol declarations. This can
+ // only happen if this protocol was forward-declared.
+ ObjCList<ObjCProtocolDecl> PList;
+ PList.set((ObjCProtocolDecl *const*)ProtoRefs, NumProtoRefs, Context);
+ err = CheckForwardProtocolDeclarationForCircularDependency(
+ ProtocolName, ProtocolLoc, PrevDecl->getLocation(), PList);
+ }
+
+ // Create the new declaration.
+ PDecl = ObjCProtocolDecl::Create(Context, CurContext, ProtocolName,
+ ProtocolLoc, AtProtoInterfaceLoc,
+ /*PrevDecl=*/PrevDecl);
+
+ PushOnScopeChains(PDecl, TUScope);
+ PDecl->startDefinition();
+ }
+
+ if (AttrList)
+ ProcessDeclAttributeList(TUScope, PDecl, AttrList);
+
+ // Merge attributes from previous declarations.
+ if (PrevDecl)
+ mergeDeclAttributes(PDecl, PrevDecl);
+
+ if (!err && NumProtoRefs ) {
+ /// Check then save referenced protocols.
+ PDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
+ ProtoLocs, Context);
+ }
+
+ CheckObjCDeclScope(PDecl);
+ return ActOnObjCContainerStartDefinition(PDecl);
+}
+
+/// FindProtocolDeclaration - This routine looks up protocols and
+/// issues an error if they are not declared. It returns list of
+/// protocol declarations in its 'Protocols' argument.
+void
+Sema::FindProtocolDeclaration(bool WarnOnDeclarations,
+ const IdentifierLocPair *ProtocolId,
+ unsigned NumProtocols,
+ SmallVectorImpl<Decl *> &Protocols) {
+ for (unsigned i = 0; i != NumProtocols; ++i) {
+ ObjCProtocolDecl *PDecl = LookupProtocol(ProtocolId[i].first,
+ ProtocolId[i].second);
+ if (!PDecl) {
+ DeclFilterCCC<ObjCProtocolDecl> Validator;
+ TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(ProtocolId[i].first, ProtocolId[i].second),
+ LookupObjCProtocolName, TUScope, NULL, Validator);
+ if ((PDecl = Corrected.getCorrectionDeclAs<ObjCProtocolDecl>())) {
+ Diag(ProtocolId[i].second, diag::err_undeclared_protocol_suggest)
+ << ProtocolId[i].first << Corrected.getCorrection();
+ Diag(PDecl->getLocation(), diag::note_previous_decl)
+ << PDecl->getDeclName();
+ }
+ }
+
+ if (!PDecl) {
+ Diag(ProtocolId[i].second, diag::err_undeclared_protocol)
+ << ProtocolId[i].first;
+ continue;
+ }
+
+ (void)DiagnoseUseOfDecl(PDecl, ProtocolId[i].second);
+
+ // If this is a forward declaration and we are supposed to warn in this
+ // case, do it.
+ if (WarnOnDeclarations && !PDecl->hasDefinition())
+ Diag(ProtocolId[i].second, diag::warn_undef_protocolref)
+ << ProtocolId[i].first;
+ Protocols.push_back(PDecl);
+ }
+}
+
+/// DiagnoseClassExtensionDupMethods - Check for duplicate declaration of
+/// a class method in its extension.
+///
+void Sema::DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
+ ObjCInterfaceDecl *ID) {
+ if (!ID)
+ return; // Possibly due to previous error
+
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> MethodMap;
+ for (ObjCInterfaceDecl::method_iterator i = ID->meth_begin(),
+ e = ID->meth_end(); i != e; ++i) {
+ ObjCMethodDecl *MD = *i;
+ MethodMap[MD->getSelector()] = MD;
+ }
+
+ if (MethodMap.empty())
+ return;
+ for (ObjCCategoryDecl::method_iterator i = CAT->meth_begin(),
+ e = CAT->meth_end(); i != e; ++i) {
+ ObjCMethodDecl *Method = *i;
+ const ObjCMethodDecl *&PrevMethod = MethodMap[Method->getSelector()];
+ if (PrevMethod && !MatchTwoMethodDeclarations(Method, PrevMethod)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+ }
+}
+
+/// ActOnForwardProtocolDeclaration - Handle @protocol foo;
+Sema::DeclGroupPtrTy
+Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
+ const IdentifierLocPair *IdentList,
+ unsigned NumElts,
+ AttributeList *attrList) {
+ SmallVector<Decl *, 8> DeclsInGroup;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ IdentifierInfo *Ident = IdentList[i].first;
+ ObjCProtocolDecl *PrevDecl = LookupProtocol(Ident, IdentList[i].second,
+ ForRedeclaration);
+ ObjCProtocolDecl *PDecl
+ = ObjCProtocolDecl::Create(Context, CurContext, Ident,
+ IdentList[i].second, AtProtocolLoc,
+ PrevDecl);
+
+ PushOnScopeChains(PDecl, TUScope);
+ CheckObjCDeclScope(PDecl);
+
+ if (attrList)
+ ProcessDeclAttributeList(TUScope, PDecl, attrList);
+
+ if (PrevDecl)
+ mergeDeclAttributes(PDecl, PrevDecl);
+
+ DeclsInGroup.push_back(PDecl);
+ }
+
+ return BuildDeclaratorGroup(DeclsInGroup.data(), DeclsInGroup.size(), false);
+}
+
+Decl *Sema::
+ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *CategoryName,
+ SourceLocation CategoryLoc,
+ Decl * const *ProtoRefs,
+ unsigned NumProtoRefs,
+ const SourceLocation *ProtoLocs,
+ SourceLocation EndProtoLoc) {
+ ObjCCategoryDecl *CDecl;
+ ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
+
+ /// Check that class of this category is already completely declared.
+
+ if (!IDecl
+ || RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ PDiag(diag::err_category_forward_interface)
+ << (CategoryName == 0))) {
+ // Create an invalid ObjCCategoryDecl to serve as context for
+ // the enclosing method declarations. We mark the decl invalid
+ // to make it clear that this isn't a valid AST.
+ CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
+ ClassLoc, CategoryLoc, CategoryName,IDecl);
+ CDecl->setInvalidDecl();
+ CurContext->addDecl(CDecl);
+
+ if (!IDecl)
+ Diag(ClassLoc, diag::err_undef_interface) << ClassName;
+ return ActOnObjCContainerStartDefinition(CDecl);
+ }
+
+ if (!CategoryName && IDecl->getImplementation()) {
+ Diag(ClassLoc, diag::err_class_extension_after_impl) << ClassName;
+ Diag(IDecl->getImplementation()->getLocation(),
+ diag::note_implementation_declared);
+ }
+
+ if (CategoryName) {
+ /// Check for duplicate interface declaration for this category
+ ObjCCategoryDecl *CDeclChain;
+ for (CDeclChain = IDecl->getCategoryList(); CDeclChain;
+ CDeclChain = CDeclChain->getNextClassCategory()) {
+ if (CDeclChain->getIdentifier() == CategoryName) {
+ // Class extensions can be declared multiple times.
+ Diag(CategoryLoc, diag::warn_dup_category_def)
+ << ClassName << CategoryName;
+ Diag(CDeclChain->getLocation(), diag::note_previous_definition);
+ break;
+ }
+ }
+ }
+
+ CDecl = ObjCCategoryDecl::Create(Context, CurContext, AtInterfaceLoc,
+ ClassLoc, CategoryLoc, CategoryName, IDecl);
+ // FIXME: PushOnScopeChains?
+ CurContext->addDecl(CDecl);
+
+ if (NumProtoRefs) {
+ CDecl->setProtocolList((ObjCProtocolDecl**)ProtoRefs, NumProtoRefs,
+ ProtoLocs, Context);
+ // Protocols in the class extension belong to the class.
+ if (CDecl->IsClassExtension())
+ IDecl->mergeClassExtensionProtocolList((ObjCProtocolDecl**)ProtoRefs,
+ NumProtoRefs, Context);
+ }
+
+ CheckObjCDeclScope(CDecl);
+ return ActOnObjCContainerStartDefinition(CDecl);
+}
+
+/// ActOnStartCategoryImplementation - Perform semantic checks on the
+/// category implementation declaration and build an ObjCCategoryImplDecl
+/// object.
+Decl *Sema::ActOnStartCategoryImplementation(
+ SourceLocation AtCatImplLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *CatName, SourceLocation CatLoc) {
+ ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
+ ObjCCategoryDecl *CatIDecl = 0;
+ if (IDecl && IDecl->hasDefinition()) {
+ CatIDecl = IDecl->FindCategoryDeclaration(CatName);
+ if (!CatIDecl) {
+ // Category @implementation with no corresponding @interface.
+ // Create and install one.
+ CatIDecl = ObjCCategoryDecl::Create(Context, CurContext, AtCatImplLoc,
+ ClassLoc, CatLoc,
+ CatName, IDecl);
+ CatIDecl->setImplicit();
+ }
+ }
+
+ ObjCCategoryImplDecl *CDecl =
+ ObjCCategoryImplDecl::Create(Context, CurContext, CatName, IDecl,
+ ClassLoc, AtCatImplLoc, CatLoc);
+ /// Check that class of this category is already completely declared.
+ if (!IDecl) {
+ Diag(ClassLoc, diag::err_undef_interface) << ClassName;
+ CDecl->setInvalidDecl();
+ } else if (RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::err_undef_interface)) {
+ CDecl->setInvalidDecl();
+ }
+
+ // FIXME: PushOnScopeChains?
+ CurContext->addDecl(CDecl);
+
+ // If the interface is deprecated/unavailable, warn/error about it.
+ if (IDecl)
+ DiagnoseUseOfDecl(IDecl, ClassLoc);
+
+ /// Check that CatName, category name, is not used in another implementation.
+ if (CatIDecl) {
+ if (CatIDecl->getImplementation()) {
+ Diag(ClassLoc, diag::err_dup_implementation_category) << ClassName
+ << CatName;
+ Diag(CatIDecl->getImplementation()->getLocation(),
+ diag::note_previous_definition);
+ } else {
+ CatIDecl->setImplementation(CDecl);
+ // Warn on implementating category of deprecated class under
+ // -Wdeprecated-implementations flag.
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IDecl),
+ CDecl->getLocation(), 2);
+ }
+ }
+
+ CheckObjCDeclScope(CDecl);
+ return ActOnObjCContainerStartDefinition(CDecl);
+}
+
+Decl *Sema::ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc,
+ IdentifierInfo *ClassName, SourceLocation ClassLoc,
+ IdentifierInfo *SuperClassname,
+ SourceLocation SuperClassLoc) {
+ ObjCInterfaceDecl* IDecl = 0;
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl
+ = LookupSingleName(TUScope, ClassName, ClassLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(ClassLoc, diag::err_redefinition_different_kind) << ClassName;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ } else if ((IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl))) {
+ RequireCompleteType(ClassLoc, Context.getObjCInterfaceType(IDecl),
+ diag::warn_undef_interface);
+ } else {
+ // We did not find anything with the name ClassName; try to correct for
+ // typos in the class name.
+ ObjCInterfaceValidatorCCC Validator;
+ if (TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(ClassName, ClassLoc), LookupOrdinaryName, TUScope,
+ NULL, Validator)) {
+ // Suggest the (potentially) correct interface name. However, put the
+ // fix-it hint itself in a separate note, since changing the name in
+ // the warning would make the fix-it change semantics.However, don't
+ // provide a code-modification hint or use the typo name for recovery,
+ // because this is just a warning. The program may actually be correct.
+ IDecl = Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>();
+ DeclarationName CorrectedName = Corrected.getCorrection();
+ Diag(ClassLoc, diag::warn_undef_interface_suggest)
+ << ClassName << CorrectedName;
+ Diag(IDecl->getLocation(), diag::note_previous_decl) << CorrectedName
+ << FixItHint::CreateReplacement(ClassLoc, CorrectedName.getAsString());
+ IDecl = 0;
+ } else {
+ Diag(ClassLoc, diag::warn_undef_interface) << ClassName;
+ }
+ }
+
+ // Check that super class name is valid class name
+ ObjCInterfaceDecl* SDecl = 0;
+ if (SuperClassname) {
+ // Check if a different kind of symbol declared in this scope.
+ PrevDecl = LookupSingleName(TUScope, SuperClassname, SuperClassLoc,
+ LookupOrdinaryName);
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ Diag(SuperClassLoc, diag::err_redefinition_different_kind)
+ << SuperClassname;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ } else {
+ SDecl = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ if (SDecl && !SDecl->hasDefinition())
+ SDecl = 0;
+ if (!SDecl)
+ Diag(SuperClassLoc, diag::err_undef_superclass)
+ << SuperClassname << ClassName;
+ else if (IDecl && !declaresSameEntity(IDecl->getSuperClass(), SDecl)) {
+ // This implementation and its interface do not have the same
+ // super class.
+ Diag(SuperClassLoc, diag::err_conflicting_super_class)
+ << SDecl->getDeclName();
+ Diag(SDecl->getLocation(), diag::note_previous_definition);
+ }
+ }
+ }
+
+ if (!IDecl) {
+ // Legacy case of @implementation with no corresponding @interface.
+ // Build, chain & install the interface decl into the identifier.
+
+ // FIXME: Do we support attributes on the @implementation? If so we should
+ // copy them over.
+ IDecl = ObjCInterfaceDecl::Create(Context, CurContext, AtClassImplLoc,
+ ClassName, /*PrevDecl=*/0, ClassLoc,
+ true);
+ IDecl->startDefinition();
+ if (SDecl) {
+ IDecl->setSuperClass(SDecl);
+ IDecl->setSuperClassLoc(SuperClassLoc);
+ IDecl->setEndOfDefinitionLoc(SuperClassLoc);
+ } else {
+ IDecl->setEndOfDefinitionLoc(ClassLoc);
+ }
+
+ PushOnScopeChains(IDecl, TUScope);
+ } else {
+ // Mark the interface as being completed, even if it was just as
+ // @class ....;
+ // declaration; the user cannot reopen it.
+ if (!IDecl->hasDefinition())
+ IDecl->startDefinition();
+ }
+
+ ObjCImplementationDecl* IMPDecl =
+ ObjCImplementationDecl::Create(Context, CurContext, IDecl, SDecl,
+ ClassLoc, AtClassImplLoc);
+
+ if (CheckObjCDeclScope(IMPDecl))
+ return ActOnObjCContainerStartDefinition(IMPDecl);
+
+ // Check that there is no duplicate implementation of this class.
+ if (IDecl->getImplementation()) {
+ // FIXME: Don't leak everything!
+ Diag(ClassLoc, diag::err_dup_implementation_class) << ClassName;
+ Diag(IDecl->getImplementation()->getLocation(),
+ diag::note_previous_definition);
+ } else { // add it to the list.
+ IDecl->setImplementation(IMPDecl);
+ PushOnScopeChains(IMPDecl, TUScope);
+ // Warn on implementating deprecated class under
+ // -Wdeprecated-implementations flag.
+ DiagnoseObjCImplementedDeprecations(*this,
+ dyn_cast<NamedDecl>(IDecl),
+ IMPDecl->getLocation(), 1);
+ }
+ return ActOnObjCContainerStartDefinition(IMPDecl);
+}
+
+Sema::DeclGroupPtrTy
+Sema::ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls) {
+ SmallVector<Decl *, 64> DeclsInGroup;
+ DeclsInGroup.reserve(Decls.size() + 1);
+
+ for (unsigned i = 0, e = Decls.size(); i != e; ++i) {
+ Decl *Dcl = Decls[i];
+ if (!Dcl)
+ continue;
+ if (Dcl->getDeclContext()->isFileContext())
+ Dcl->setTopLevelDeclInObjCContainer();
+ DeclsInGroup.push_back(Dcl);
+ }
+
+ DeclsInGroup.push_back(ObjCImpDecl);
+
+ return BuildDeclaratorGroup(DeclsInGroup.data(), DeclsInGroup.size(), false);
+}
+
+void Sema::CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
+ ObjCIvarDecl **ivars, unsigned numIvars,
+ SourceLocation RBrace) {
+ assert(ImpDecl && "missing implementation decl");
+ ObjCInterfaceDecl* IDecl = ImpDecl->getClassInterface();
+ if (!IDecl)
+ return;
+ /// Check case of non-existing @interface decl.
+ /// (legacy objective-c @implementation decl without an @interface decl).
+ /// Add implementations's ivar to the synthesize class's ivar list.
+ if (IDecl->isImplicitInterfaceDecl()) {
+ IDecl->setEndOfDefinitionLoc(RBrace);
+ // Add ivar's to class's DeclContext.
+ for (unsigned i = 0, e = numIvars; i != e; ++i) {
+ ivars[i]->setLexicalDeclContext(ImpDecl);
+ IDecl->makeDeclVisibleInContext(ivars[i]);
+ ImpDecl->addDecl(ivars[i]);
+ }
+
+ return;
+ }
+ // If implementation has empty ivar list, just return.
+ if (numIvars == 0)
+ return;
+
+ assert(ivars && "missing @implementation ivars");
+ if (LangOpts.ObjCNonFragileABI2) {
+ if (ImpDecl->getSuperClass())
+ Diag(ImpDecl->getLocation(), diag::warn_on_superclass_use);
+ for (unsigned i = 0; i < numIvars; i++) {
+ ObjCIvarDecl* ImplIvar = ivars[i];
+ if (const ObjCIvarDecl *ClsIvar =
+ IDecl->getIvarDecl(ImplIvar->getIdentifier())) {
+ Diag(ImplIvar->getLocation(), diag::err_duplicate_ivar_declaration);
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ // Instance ivar to Implementation's DeclContext.
+ ImplIvar->setLexicalDeclContext(ImpDecl);
+ IDecl->makeDeclVisibleInContext(ImplIvar);
+ ImpDecl->addDecl(ImplIvar);
+ }
+ return;
+ }
+ // Check interface's Ivar list against those in the implementation.
+ // names and types must match.
+ //
+ unsigned j = 0;
+ ObjCInterfaceDecl::ivar_iterator
+ IVI = IDecl->ivar_begin(), IVE = IDecl->ivar_end();
+ for (; numIvars > 0 && IVI != IVE; ++IVI) {
+ ObjCIvarDecl* ImplIvar = ivars[j++];
+ ObjCIvarDecl* ClsIvar = *IVI;
+ assert (ImplIvar && "missing implementation ivar");
+ assert (ClsIvar && "missing class ivar");
+
+ // First, make sure the types match.
+ if (!Context.hasSameType(ImplIvar->getType(), ClsIvar->getType())) {
+ Diag(ImplIvar->getLocation(), diag::err_conflicting_ivar_type)
+ << ImplIvar->getIdentifier()
+ << ImplIvar->getType() << ClsIvar->getType();
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ } else if (ImplIvar->isBitField() && ClsIvar->isBitField() &&
+ ImplIvar->getBitWidthValue(Context) !=
+ ClsIvar->getBitWidthValue(Context)) {
+ Diag(ImplIvar->getBitWidth()->getLocStart(),
+ diag::err_conflicting_ivar_bitwidth) << ImplIvar->getIdentifier();
+ Diag(ClsIvar->getBitWidth()->getLocStart(),
+ diag::note_previous_definition);
+ }
+ // Make sure the names are identical.
+ if (ImplIvar->getIdentifier() != ClsIvar->getIdentifier()) {
+ Diag(ImplIvar->getLocation(), diag::err_conflicting_ivar_name)
+ << ImplIvar->getIdentifier() << ClsIvar->getIdentifier();
+ Diag(ClsIvar->getLocation(), diag::note_previous_definition);
+ }
+ --numIvars;
+ }
+
+ if (numIvars > 0)
+ Diag(ivars[j]->getLocation(), diag::err_inconsistant_ivar_count);
+ else if (IVI != IVE)
+ Diag((*IVI)->getLocation(), diag::err_inconsistant_ivar_count);
+}
+
+void Sema::WarnUndefinedMethod(SourceLocation ImpLoc, ObjCMethodDecl *method,
+ bool &IncompleteImpl, unsigned DiagID) {
+ // No point warning no definition of method which is 'unavailable'.
+ if (method->hasAttr<UnavailableAttr>())
+ return;
+ if (!IncompleteImpl) {
+ Diag(ImpLoc, diag::warn_incomplete_impl);
+ IncompleteImpl = true;
+ }
+ if (DiagID == diag::warn_unimplemented_protocol_method)
+ Diag(ImpLoc, DiagID) << method->getDeclName();
+ else
+ Diag(method->getLocation(), DiagID) << method->getDeclName();
+}
+
+/// Determines if type B can be substituted for type A. Returns true if we can
+/// guarantee that anything that the user will do to an object of type A can
+/// also be done to an object of type B. This is trivially true if the two
+/// types are the same, or if B is a subclass of A. It becomes more complex
+/// in cases where protocols are involved.
+///
+/// Object types in Objective-C describe the minimum requirements for an
+/// object, rather than providing a complete description of a type. For
+/// example, if A is a subclass of B, then B* may refer to an instance of A.
+/// The principle of substitutability means that we may use an instance of A
+/// anywhere that we may use an instance of B - it will implement all of the
+/// ivars of B and all of the methods of B.
+///
+/// This substitutability is important when type checking methods, because
+/// the implementation may have stricter type definitions than the interface.
+/// The interface specifies minimum requirements, but the implementation may
+/// have more accurate ones. For example, a method may privately accept
+/// instances of B, but only publish that it accepts instances of A. Any
+/// object passed to it will be type checked against B, and so will implicitly
+/// by a valid A*. Similarly, a method may return a subclass of the class that
+/// it is declared as returning.
+///
+/// This is most important when considering subclassing. A method in a
+/// subclass must accept any object as an argument that its superclass's
+/// implementation accepts. It may, however, accept a more general type
+/// without breaking substitutability (i.e. you can still use the subclass
+/// anywhere that you can use the superclass, but not vice versa). The
+/// converse requirement applies to return types: the return type for a
+/// subclass method must be a valid object of the kind that the superclass
+/// advertises, but it may be specified more accurately. This avoids the need
+/// for explicit down-casting by callers.
+///
+/// Note: This is a stricter requirement than for assignment.
+static bool isObjCTypeSubstitutable(ASTContext &Context,
+ const ObjCObjectPointerType *A,
+ const ObjCObjectPointerType *B,
+ bool rejectId) {
+ // Reject a protocol-unqualified id.
+ if (rejectId && B->isObjCIdType()) return false;
+
+ // If B is a qualified id, then A must also be a qualified id and it must
+ // implement all of the protocols in B. It may not be a qualified class.
+ // For example, MyClass<A> can be assigned to id<A>, but MyClass<A> is a
+ // stricter definition so it is not substitutable for id<A>.
+ if (B->isObjCQualifiedIdType()) {
+ return A->isObjCQualifiedIdType() &&
+ Context.ObjCQualifiedIdTypesAreCompatible(QualType(A, 0),
+ QualType(B,0),
+ false);
+ }
+
+ /*
+ // id is a special type that bypasses type checking completely. We want a
+ // warning when it is used in one place but not another.
+ if (C.isObjCIdType(A) || C.isObjCIdType(B)) return false;
+
+
+ // If B is a qualified id, then A must also be a qualified id (which it isn't
+ // if we've got this far)
+ if (B->isObjCQualifiedIdType()) return false;
+ */
+
+ // Now we know that A and B are (potentially-qualified) class types. The
+ // normal rules for assignment apply.
+ return Context.canAssignObjCInterfaces(A, B);
+}
+
+static SourceRange getTypeRange(TypeSourceInfo *TSI) {
+ return (TSI ? TSI->getTypeLoc().getSourceRange() : SourceRange());
+}
+
+static bool CheckMethodOverrideReturn(Sema &S,
+ ObjCMethodDecl *MethodImpl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl,
+ bool IsOverridingMode,
+ bool Warn) {
+ if (IsProtocolMethodDecl &&
+ (MethodDecl->getObjCDeclQualifier() !=
+ MethodImpl->getObjCDeclQualifier())) {
+ if (Warn) {
+ S.Diag(MethodImpl->getLocation(),
+ (IsOverridingMode ?
+ diag::warn_conflicting_overriding_ret_type_modifiers
+ : diag::warn_conflicting_ret_type_modifiers))
+ << MethodImpl->getDeclName()
+ << getTypeRange(MethodImpl->getResultTypeSourceInfo());
+ S.Diag(MethodDecl->getLocation(), diag::note_previous_declaration)
+ << getTypeRange(MethodDecl->getResultTypeSourceInfo());
+ }
+ else
+ return false;
+ }
+
+ if (S.Context.hasSameUnqualifiedType(MethodImpl->getResultType(),
+ MethodDecl->getResultType()))
+ return true;
+ if (!Warn)
+ return false;
+
+ unsigned DiagID =
+ IsOverridingMode ? diag::warn_conflicting_overriding_ret_types
+ : diag::warn_conflicting_ret_types;
+
+ // Mismatches between ObjC pointers go into a different warning
+ // category, and sometimes they're even completely whitelisted.
+ if (const ObjCObjectPointerType *ImplPtrTy =
+ MethodImpl->getResultType()->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *IfacePtrTy =
+ MethodDecl->getResultType()->getAs<ObjCObjectPointerType>()) {
+ // Allow non-matching return types as long as they don't violate
+ // the principle of substitutability. Specifically, we permit
+ // return types that are subclasses of the declared return type,
+ // or that are more-qualified versions of the declared type.
+ if (isObjCTypeSubstitutable(S.Context, IfacePtrTy, ImplPtrTy, false))
+ return false;
+
+ DiagID =
+ IsOverridingMode ? diag::warn_non_covariant_overriding_ret_types
+ : diag::warn_non_covariant_ret_types;
+ }
+ }
+
+ S.Diag(MethodImpl->getLocation(), DiagID)
+ << MethodImpl->getDeclName()
+ << MethodDecl->getResultType()
+ << MethodImpl->getResultType()
+ << getTypeRange(MethodImpl->getResultTypeSourceInfo());
+ S.Diag(MethodDecl->getLocation(),
+ IsOverridingMode ? diag::note_previous_declaration
+ : diag::note_previous_definition)
+ << getTypeRange(MethodDecl->getResultTypeSourceInfo());
+ return false;
+}
+
+static bool CheckMethodOverrideParam(Sema &S,
+ ObjCMethodDecl *MethodImpl,
+ ObjCMethodDecl *MethodDecl,
+ ParmVarDecl *ImplVar,
+ ParmVarDecl *IfaceVar,
+ bool IsProtocolMethodDecl,
+ bool IsOverridingMode,
+ bool Warn) {
+ if (IsProtocolMethodDecl &&
+ (ImplVar->getObjCDeclQualifier() !=
+ IfaceVar->getObjCDeclQualifier())) {
+ if (Warn) {
+ if (IsOverridingMode)
+ S.Diag(ImplVar->getLocation(),
+ diag::warn_conflicting_overriding_param_modifiers)
+ << getTypeRange(ImplVar->getTypeSourceInfo())
+ << MethodImpl->getDeclName();
+ else S.Diag(ImplVar->getLocation(),
+ diag::warn_conflicting_param_modifiers)
+ << getTypeRange(ImplVar->getTypeSourceInfo())
+ << MethodImpl->getDeclName();
+ S.Diag(IfaceVar->getLocation(), diag::note_previous_declaration)
+ << getTypeRange(IfaceVar->getTypeSourceInfo());
+ }
+ else
+ return false;
+ }
+
+ QualType ImplTy = ImplVar->getType();
+ QualType IfaceTy = IfaceVar->getType();
+
+ if (S.Context.hasSameUnqualifiedType(ImplTy, IfaceTy))
+ return true;
+
+ if (!Warn)
+ return false;
+ unsigned DiagID =
+ IsOverridingMode ? diag::warn_conflicting_overriding_param_types
+ : diag::warn_conflicting_param_types;
+
+ // Mismatches between ObjC pointers go into a different warning
+ // category, and sometimes they're even completely whitelisted.
+ if (const ObjCObjectPointerType *ImplPtrTy =
+ ImplTy->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *IfacePtrTy =
+ IfaceTy->getAs<ObjCObjectPointerType>()) {
+ // Allow non-matching argument types as long as they don't
+ // violate the principle of substitutability. Specifically, the
+ // implementation must accept any objects that the superclass
+ // accepts, however it may also accept others.
+ if (isObjCTypeSubstitutable(S.Context, ImplPtrTy, IfacePtrTy, true))
+ return false;
+
+ DiagID =
+ IsOverridingMode ? diag::warn_non_contravariant_overriding_param_types
+ : diag::warn_non_contravariant_param_types;
+ }
+ }
+
+ S.Diag(ImplVar->getLocation(), DiagID)
+ << getTypeRange(ImplVar->getTypeSourceInfo())
+ << MethodImpl->getDeclName() << IfaceTy << ImplTy;
+ S.Diag(IfaceVar->getLocation(),
+ (IsOverridingMode ? diag::note_previous_declaration
+ : diag::note_previous_definition))
+ << getTypeRange(IfaceVar->getTypeSourceInfo());
+ return false;
+}
+
+/// In ARC, check whether the conventional meanings of the two methods
+/// match. If they don't, it's a hard error.
+static bool checkMethodFamilyMismatch(Sema &S, ObjCMethodDecl *impl,
+ ObjCMethodDecl *decl) {
+ ObjCMethodFamily implFamily = impl->getMethodFamily();
+ ObjCMethodFamily declFamily = decl->getMethodFamily();
+ if (implFamily == declFamily) return false;
+
+ // Since conventions are sorted by selector, the only possibility is
+ // that the types differ enough to cause one selector or the other
+ // to fall out of the family.
+ assert(implFamily == OMF_None || declFamily == OMF_None);
+
+ // No further diagnostics required on invalid declarations.
+ if (impl->isInvalidDecl() || decl->isInvalidDecl()) return true;
+
+ const ObjCMethodDecl *unmatched = impl;
+ ObjCMethodFamily family = declFamily;
+ unsigned errorID = diag::err_arc_lost_method_convention;
+ unsigned noteID = diag::note_arc_lost_method_convention;
+ if (declFamily == OMF_None) {
+ unmatched = decl;
+ family = implFamily;
+ errorID = diag::err_arc_gained_method_convention;
+ noteID = diag::note_arc_gained_method_convention;
+ }
+
+ // Indexes into a %select clause in the diagnostic.
+ enum FamilySelector {
+ F_alloc, F_copy, F_mutableCopy = F_copy, F_init, F_new
+ };
+ FamilySelector familySelector = FamilySelector();
+
+ switch (family) {
+ case OMF_None: llvm_unreachable("logic error, no method convention");
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_retainCount:
+ case OMF_self:
+ case OMF_performSelector:
+ // Mismatches for these methods don't change ownership
+ // conventions, so we don't care.
+ return false;
+
+ case OMF_init: familySelector = F_init; break;
+ case OMF_alloc: familySelector = F_alloc; break;
+ case OMF_copy: familySelector = F_copy; break;
+ case OMF_mutableCopy: familySelector = F_mutableCopy; break;
+ case OMF_new: familySelector = F_new; break;
+ }
+
+ enum ReasonSelector { R_NonObjectReturn, R_UnrelatedReturn };
+ ReasonSelector reasonSelector;
+
+ // The only reason these methods don't fall within their families is
+ // due to unusual result types.
+ if (unmatched->getResultType()->isObjCObjectPointerType()) {
+ reasonSelector = R_UnrelatedReturn;
+ } else {
+ reasonSelector = R_NonObjectReturn;
+ }
+
+ S.Diag(impl->getLocation(), errorID) << familySelector << reasonSelector;
+ S.Diag(decl->getLocation(), noteID) << familySelector << reasonSelector;
+
+ return true;
+}
+
+void Sema::WarnConflictingTypedMethods(ObjCMethodDecl *ImpMethodDecl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl) {
+ if (getLangOpts().ObjCAutoRefCount &&
+ checkMethodFamilyMismatch(*this, ImpMethodDecl, MethodDecl))
+ return;
+
+ CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl,
+ IsProtocolMethodDecl, false,
+ true);
+
+ for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
+ IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end();
+ IM != EM; ++IM, ++IF) {
+ CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl, *IM, *IF,
+ IsProtocolMethodDecl, false, true);
+ }
+
+ if (ImpMethodDecl->isVariadic() != MethodDecl->isVariadic()) {
+ Diag(ImpMethodDecl->getLocation(),
+ diag::warn_conflicting_variadic);
+ Diag(MethodDecl->getLocation(), diag::note_previous_declaration);
+ }
+}
+
+void Sema::CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
+ ObjCMethodDecl *Overridden,
+ bool IsProtocolMethodDecl) {
+
+ CheckMethodOverrideReturn(*this, Method, Overridden,
+ IsProtocolMethodDecl, true,
+ true);
+
+ for (ObjCMethodDecl::param_iterator IM = Method->param_begin(),
+ IF = Overridden->param_begin(), EM = Method->param_end();
+ IM != EM; ++IM, ++IF) {
+ CheckMethodOverrideParam(*this, Method, Overridden, *IM, *IF,
+ IsProtocolMethodDecl, true, true);
+ }
+
+ if (Method->isVariadic() != Overridden->isVariadic()) {
+ Diag(Method->getLocation(),
+ diag::warn_conflicting_overriding_variadic);
+ Diag(Overridden->getLocation(), diag::note_previous_declaration);
+ }
+}
+
+/// WarnExactTypedMethods - This routine issues a warning if method
+/// implementation declaration matches exactly that of its declaration.
+void Sema::WarnExactTypedMethods(ObjCMethodDecl *ImpMethodDecl,
+ ObjCMethodDecl *MethodDecl,
+ bool IsProtocolMethodDecl) {
+ // don't issue warning when protocol method is optional because primary
+ // class is not required to implement it and it is safe for protocol
+ // to implement it.
+ if (MethodDecl->getImplementationControl() == ObjCMethodDecl::Optional)
+ return;
+ // don't issue warning when primary class's method is
+ // depecated/unavailable.
+ if (MethodDecl->hasAttr<UnavailableAttr>() ||
+ MethodDecl->hasAttr<DeprecatedAttr>())
+ return;
+
+ bool match = CheckMethodOverrideReturn(*this, ImpMethodDecl, MethodDecl,
+ IsProtocolMethodDecl, false, false);
+ if (match)
+ for (ObjCMethodDecl::param_iterator IM = ImpMethodDecl->param_begin(),
+ IF = MethodDecl->param_begin(), EM = ImpMethodDecl->param_end();
+ IM != EM; ++IM, ++IF) {
+ match = CheckMethodOverrideParam(*this, ImpMethodDecl, MethodDecl,
+ *IM, *IF,
+ IsProtocolMethodDecl, false, false);
+ if (!match)
+ break;
+ }
+ if (match)
+ match = (ImpMethodDecl->isVariadic() == MethodDecl->isVariadic());
+ if (match)
+ match = !(MethodDecl->isClassMethod() &&
+ MethodDecl->getSelector() == GetNullarySelector("load", Context));
+
+ if (match) {
+ Diag(ImpMethodDecl->getLocation(),
+ diag::warn_category_method_impl_match);
+ Diag(MethodDecl->getLocation(), diag::note_method_declared_at)
+ << MethodDecl->getDeclName();
+ }
+}
+
+/// FIXME: Type hierarchies in Objective-C can be deep. We could most likely
+/// improve the efficiency of selector lookups and type checking by associating
+/// with each protocol / interface / category the flattened instance tables. If
+/// we used an immutable set to keep the table then it wouldn't add significant
+/// memory cost and it would be handy for lookups.
+
+/// CheckProtocolMethodDefs - This routine checks unimplemented methods
+/// Declared in protocol, and those referenced by it.
+void Sema::CheckProtocolMethodDefs(SourceLocation ImpLoc,
+ ObjCProtocolDecl *PDecl,
+ bool& IncompleteImpl,
+ const llvm::DenseSet<Selector> &InsMap,
+ const llvm::DenseSet<Selector> &ClsMap,
+ ObjCContainerDecl *CDecl) {
+ ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl);
+ ObjCInterfaceDecl *IDecl = C ? C->getClassInterface()
+ : dyn_cast<ObjCInterfaceDecl>(CDecl);
+ assert (IDecl && "CheckProtocolMethodDefs - IDecl is null");
+
+ ObjCInterfaceDecl *Super = IDecl->getSuperClass();
+ ObjCInterfaceDecl *NSIDecl = 0;
+ if (getLangOpts().NeXTRuntime) {
+ // check to see if class implements forwardInvocation method and objects
+ // of this class are derived from 'NSProxy' so that to forward requests
+ // from one object to another.
+ // Under such conditions, which means that every method possible is
+ // implemented in the class, we should not issue "Method definition not
+ // found" warnings.
+ // FIXME: Use a general GetUnarySelector method for this.
+ IdentifierInfo* II = &Context.Idents.get("forwardInvocation");
+ Selector fISelector = Context.Selectors.getSelector(1, &II);
+ if (InsMap.count(fISelector))
+ // Is IDecl derived from 'NSProxy'? If so, no instance methods
+ // need be implemented in the implementation.
+ NSIDecl = IDecl->lookupInheritedClass(&Context.Idents.get("NSProxy"));
+ }
+
+ // If a method lookup fails locally we still need to look and see if
+ // the method was implemented by a base class or an inherited
+ // protocol. This lookup is slow, but occurs rarely in correct code
+ // and otherwise would terminate in a warning.
+
+ // check unimplemented instance methods.
+ if (!NSIDecl)
+ for (ObjCProtocolDecl::instmeth_iterator I = PDecl->instmeth_begin(),
+ E = PDecl->instmeth_end(); I != E; ++I) {
+ ObjCMethodDecl *method = *I;
+ if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ !method->isSynthesized() && !InsMap.count(method->getSelector()) &&
+ (!Super ||
+ !Super->lookupInstanceMethod(method->getSelector()))) {
+ // If a method is not implemented in the category implementation but
+ // has been declared in its primary class, superclass,
+ // or in one of their protocols, no need to issue the warning.
+ // This is because method will be implemented in the primary class
+ // or one of its super class implementation.
+
+ // Ugly, but necessary. Method declared in protcol might have
+ // have been synthesized due to a property declared in the class which
+ // uses the protocol.
+ if (ObjCMethodDecl *MethodInClass =
+ IDecl->lookupInstanceMethod(method->getSelector(),
+ true /*shallowCategoryLookup*/))
+ if (C || MethodInClass->isSynthesized())
+ continue;
+ unsigned DIAG = diag::warn_unimplemented_protocol_method;
+ if (Diags.getDiagnosticLevel(DIAG, ImpLoc)
+ != DiagnosticsEngine::Ignored) {
+ WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
+ Diag(method->getLocation(), diag::note_method_declared_at)
+ << method->getDeclName();
+ Diag(CDecl->getLocation(), diag::note_required_for_protocol_at)
+ << PDecl->getDeclName();
+ }
+ }
+ }
+ // check unimplemented class methods
+ for (ObjCProtocolDecl::classmeth_iterator
+ I = PDecl->classmeth_begin(), E = PDecl->classmeth_end();
+ I != E; ++I) {
+ ObjCMethodDecl *method = *I;
+ if (method->getImplementationControl() != ObjCMethodDecl::Optional &&
+ !ClsMap.count(method->getSelector()) &&
+ (!Super || !Super->lookupClassMethod(method->getSelector()))) {
+ // See above comment for instance method lookups.
+ if (C && IDecl->lookupClassMethod(method->getSelector(),
+ true /*shallowCategoryLookup*/))
+ continue;
+ unsigned DIAG = diag::warn_unimplemented_protocol_method;
+ if (Diags.getDiagnosticLevel(DIAG, ImpLoc) !=
+ DiagnosticsEngine::Ignored) {
+ WarnUndefinedMethod(ImpLoc, method, IncompleteImpl, DIAG);
+ Diag(method->getLocation(), diag::note_method_declared_at)
+ << method->getDeclName();
+ Diag(IDecl->getLocation(), diag::note_required_for_protocol_at) <<
+ PDecl->getDeclName();
+ }
+ }
+ }
+ // Check on this protocols's referenced protocols, recursively.
+ for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); PI != E; ++PI)
+ CheckProtocolMethodDefs(ImpLoc, *PI, IncompleteImpl, InsMap, ClsMap, CDecl);
+}
+
+/// MatchAllMethodDeclarations - Check methods declared in interface
+/// or protocol against those declared in their implementations.
+///
+void Sema::MatchAllMethodDeclarations(const llvm::DenseSet<Selector> &InsMap,
+ const llvm::DenseSet<Selector> &ClsMap,
+ llvm::DenseSet<Selector> &InsMapSeen,
+ llvm::DenseSet<Selector> &ClsMapSeen,
+ ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* CDecl,
+ bool &IncompleteImpl,
+ bool ImmediateClass,
+ bool WarnCategoryMethodImpl) {
+ // Check and see if instance methods in class interface have been
+ // implemented in the implementation class. If so, their types match.
+ for (ObjCInterfaceDecl::instmeth_iterator I = CDecl->instmeth_begin(),
+ E = CDecl->instmeth_end(); I != E; ++I) {
+ if (InsMapSeen.count((*I)->getSelector()))
+ continue;
+ InsMapSeen.insert((*I)->getSelector());
+ if (!(*I)->isSynthesized() &&
+ !InsMap.count((*I)->getSelector())) {
+ if (ImmediateClass)
+ WarnUndefinedMethod(IMPDecl->getLocation(), *I, IncompleteImpl,
+ diag::note_undef_method_impl);
+ continue;
+ } else {
+ ObjCMethodDecl *ImpMethodDecl =
+ IMPDecl->getInstanceMethod((*I)->getSelector());
+ assert(CDecl->getInstanceMethod((*I)->getSelector()) &&
+ "Expected to find the method through lookup as well");
+ ObjCMethodDecl *MethodDecl = *I;
+ // ImpMethodDecl may be null as in a @dynamic property.
+ if (ImpMethodDecl) {
+ if (!WarnCategoryMethodImpl)
+ WarnConflictingTypedMethods(ImpMethodDecl, MethodDecl,
+ isa<ObjCProtocolDecl>(CDecl));
+ else if (!MethodDecl->isSynthesized())
+ WarnExactTypedMethods(ImpMethodDecl, MethodDecl,
+ isa<ObjCProtocolDecl>(CDecl));
+ }
+ }
+ }
+
+ // Check and see if class methods in class interface have been
+ // implemented in the implementation class. If so, their types match.
+ for (ObjCInterfaceDecl::classmeth_iterator
+ I = CDecl->classmeth_begin(), E = CDecl->classmeth_end(); I != E; ++I) {
+ if (ClsMapSeen.count((*I)->getSelector()))
+ continue;
+ ClsMapSeen.insert((*I)->getSelector());
+ if (!ClsMap.count((*I)->getSelector())) {
+ if (ImmediateClass)
+ WarnUndefinedMethod(IMPDecl->getLocation(), *I, IncompleteImpl,
+ diag::note_undef_method_impl);
+ } else {
+ ObjCMethodDecl *ImpMethodDecl =
+ IMPDecl->getClassMethod((*I)->getSelector());
+ assert(CDecl->getClassMethod((*I)->getSelector()) &&
+ "Expected to find the method through lookup as well");
+ ObjCMethodDecl *MethodDecl = *I;
+ if (!WarnCategoryMethodImpl)
+ WarnConflictingTypedMethods(ImpMethodDecl, MethodDecl,
+ isa<ObjCProtocolDecl>(CDecl));
+ else
+ WarnExactTypedMethods(ImpMethodDecl, MethodDecl,
+ isa<ObjCProtocolDecl>(CDecl));
+ }
+ }
+
+ if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
+ // Also methods in class extensions need be looked at next.
+ for (const ObjCCategoryDecl *ClsExtDecl = I->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ const_cast<ObjCCategoryDecl *>(ClsExtDecl),
+ IncompleteImpl, false,
+ WarnCategoryMethodImpl);
+
+ // Check for any implementation of a methods declared in protocol.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ PI = I->all_referenced_protocol_begin(),
+ E = I->all_referenced_protocol_end(); PI != E; ++PI)
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ (*PI), IncompleteImpl, false,
+ WarnCategoryMethodImpl);
+
+ // FIXME. For now, we are not checking for extact match of methods
+ // in category implementation and its primary class's super class.
+ if (!WarnCategoryMethodImpl && I->getSuperClass())
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl,
+ I->getSuperClass(), IncompleteImpl, false);
+ }
+}
+
+/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
+/// category matches with those implemented in its primary class and
+/// warns each time an exact match is found.
+void Sema::CheckCategoryVsClassMethodMatches(
+ ObjCCategoryImplDecl *CatIMPDecl) {
+ llvm::DenseSet<Selector> InsMap, ClsMap;
+
+ for (ObjCImplementationDecl::instmeth_iterator
+ I = CatIMPDecl->instmeth_begin(),
+ E = CatIMPDecl->instmeth_end(); I!=E; ++I)
+ InsMap.insert((*I)->getSelector());
+
+ for (ObjCImplementationDecl::classmeth_iterator
+ I = CatIMPDecl->classmeth_begin(),
+ E = CatIMPDecl->classmeth_end(); I != E; ++I)
+ ClsMap.insert((*I)->getSelector());
+ if (InsMap.empty() && ClsMap.empty())
+ return;
+
+ // Get category's primary class.
+ ObjCCategoryDecl *CatDecl = CatIMPDecl->getCategoryDecl();
+ if (!CatDecl)
+ return;
+ ObjCInterfaceDecl *IDecl = CatDecl->getClassInterface();
+ if (!IDecl)
+ return;
+ llvm::DenseSet<Selector> InsMapSeen, ClsMapSeen;
+ bool IncompleteImpl = false;
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ CatIMPDecl, IDecl,
+ IncompleteImpl, false,
+ true /*WarnCategoryMethodImpl*/);
+}
+
+void Sema::ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* CDecl,
+ bool IncompleteImpl) {
+ llvm::DenseSet<Selector> InsMap;
+ // Check and see if instance methods in class interface have been
+ // implemented in the implementation class.
+ for (ObjCImplementationDecl::instmeth_iterator
+ I = IMPDecl->instmeth_begin(), E = IMPDecl->instmeth_end(); I!=E; ++I)
+ InsMap.insert((*I)->getSelector());
+
+ // Check and see if properties declared in the interface have either 1)
+ // an implementation or 2) there is a @synthesize/@dynamic implementation
+ // of the property in the @implementation.
+ if (const ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ if (!(LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2) ||
+ IDecl->isObjCRequiresPropertyDefs())
+ DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, InsMap);
+
+ llvm::DenseSet<Selector> ClsMap;
+ for (ObjCImplementationDecl::classmeth_iterator
+ I = IMPDecl->classmeth_begin(),
+ E = IMPDecl->classmeth_end(); I != E; ++I)
+ ClsMap.insert((*I)->getSelector());
+
+ // Check for type conflict of methods declared in a class/protocol and
+ // its implementation; if any.
+ llvm::DenseSet<Selector> InsMapSeen, ClsMapSeen;
+ MatchAllMethodDeclarations(InsMap, ClsMap, InsMapSeen, ClsMapSeen,
+ IMPDecl, CDecl,
+ IncompleteImpl, true);
+
+ // check all methods implemented in category against those declared
+ // in its primary class.
+ if (ObjCCategoryImplDecl *CatDecl =
+ dyn_cast<ObjCCategoryImplDecl>(IMPDecl))
+ CheckCategoryVsClassMethodMatches(CatDecl);
+
+ // Check the protocol list for unimplemented methods in the @implementation
+ // class.
+ // Check and see if class methods in class interface have been
+ // implemented in the implementation class.
+
+ if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl> (CDecl)) {
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ PI = I->all_referenced_protocol_begin(),
+ E = I->all_referenced_protocol_end(); PI != E; ++PI)
+ CheckProtocolMethodDefs(IMPDecl->getLocation(), *PI, IncompleteImpl,
+ InsMap, ClsMap, I);
+ // Check class extensions (unnamed categories)
+ for (const ObjCCategoryDecl *Categories = I->getFirstClassExtension();
+ Categories; Categories = Categories->getNextClassExtension())
+ ImplMethodsVsClassMethods(S, IMPDecl,
+ const_cast<ObjCCategoryDecl*>(Categories),
+ IncompleteImpl);
+ } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ // For extended class, unimplemented methods in its protocols will
+ // be reported in the primary class.
+ if (!C->IsClassExtension()) {
+ for (ObjCCategoryDecl::protocol_iterator PI = C->protocol_begin(),
+ E = C->protocol_end(); PI != E; ++PI)
+ CheckProtocolMethodDefs(IMPDecl->getLocation(), *PI, IncompleteImpl,
+ InsMap, ClsMap, CDecl);
+ // Report unimplemented properties in the category as well.
+ // When reporting on missing setter/getters, do not report when
+ // setter/getter is implemented in category's primary class
+ // implementation.
+ if (ObjCInterfaceDecl *ID = C->getClassInterface())
+ if (ObjCImplDecl *IMP = ID->getImplementation()) {
+ for (ObjCImplementationDecl::instmeth_iterator
+ I = IMP->instmeth_begin(), E = IMP->instmeth_end(); I!=E; ++I)
+ InsMap.insert((*I)->getSelector());
+ }
+ DiagnoseUnimplementedProperties(S, IMPDecl, CDecl, InsMap);
+ }
+ } else
+ llvm_unreachable("invalid ObjCContainerDecl type.");
+}
+
+/// ActOnForwardClassDeclaration -
+Sema::DeclGroupPtrTy
+Sema::ActOnForwardClassDeclaration(SourceLocation AtClassLoc,
+ IdentifierInfo **IdentList,
+ SourceLocation *IdentLocs,
+ unsigned NumElts) {
+ SmallVector<Decl *, 8> DeclsInGroup;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // Check for another declaration kind with the same name.
+ NamedDecl *PrevDecl
+ = LookupSingleName(TUScope, IdentList[i], IdentLocs[i],
+ LookupOrdinaryName, ForRedeclaration);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(AtClassLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ }
+
+ if (PrevDecl && !isa<ObjCInterfaceDecl>(PrevDecl)) {
+ // GCC apparently allows the following idiom:
+ //
+ // typedef NSObject < XCElementTogglerP > XCElementToggler;
+ // @class XCElementToggler;
+ //
+ // Here we have chosen to ignore the forward class declaration
+ // with a warning. Since this is the implied behavior.
+ TypedefNameDecl *TDD = dyn_cast<TypedefNameDecl>(PrevDecl);
+ if (!TDD || !TDD->getUnderlyingType()->isObjCObjectType()) {
+ Diag(AtClassLoc, diag::err_redefinition_different_kind) << IdentList[i];
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ } else {
+ // a forward class declaration matching a typedef name of a class refers
+ // to the underlying class. Just ignore the forward class with a warning
+ // as this will force the intended behavior which is to lookup the typedef
+ // name.
+ if (isa<ObjCObjectType>(TDD->getUnderlyingType())) {
+ Diag(AtClassLoc, diag::warn_forward_class_redefinition) << IdentList[i];
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ continue;
+ }
+ }
+ }
+
+ // Create a declaration to describe this forward declaration.
+ ObjCInterfaceDecl *PrevIDecl
+ = dyn_cast_or_null<ObjCInterfaceDecl>(PrevDecl);
+ ObjCInterfaceDecl *IDecl
+ = ObjCInterfaceDecl::Create(Context, CurContext, AtClassLoc,
+ IdentList[i], PrevIDecl, IdentLocs[i]);
+ IDecl->setAtEndRange(IdentLocs[i]);
+
+ PushOnScopeChains(IDecl, TUScope);
+ CheckObjCDeclScope(IDecl);
+ DeclsInGroup.push_back(IDecl);
+ }
+
+ return BuildDeclaratorGroup(DeclsInGroup.data(), DeclsInGroup.size(), false);
+}
+
+static bool tryMatchRecordTypes(ASTContext &Context,
+ Sema::MethodMatchStrategy strategy,
+ const Type *left, const Type *right);
+
+static bool matchTypes(ASTContext &Context, Sema::MethodMatchStrategy strategy,
+ QualType leftQT, QualType rightQT) {
+ const Type *left =
+ Context.getCanonicalType(leftQT).getUnqualifiedType().getTypePtr();
+ const Type *right =
+ Context.getCanonicalType(rightQT).getUnqualifiedType().getTypePtr();
+
+ if (left == right) return true;
+
+ // If we're doing a strict match, the types have to match exactly.
+ if (strategy == Sema::MMS_strict) return false;
+
+ if (left->isIncompleteType() || right->isIncompleteType()) return false;
+
+ // Otherwise, use this absurdly complicated algorithm to try to
+ // validate the basic, low-level compatibility of the two types.
+
+ // As a minimum, require the sizes and alignments to match.
+ if (Context.getTypeInfo(left) != Context.getTypeInfo(right))
+ return false;
+
+ // Consider all the kinds of non-dependent canonical types:
+ // - functions and arrays aren't possible as return and parameter types
+
+ // - vector types of equal size can be arbitrarily mixed
+ if (isa<VectorType>(left)) return isa<VectorType>(right);
+ if (isa<VectorType>(right)) return false;
+
+ // - references should only match references of identical type
+ // - structs, unions, and Objective-C objects must match more-or-less
+ // exactly
+ // - everything else should be a scalar
+ if (!left->isScalarType() || !right->isScalarType())
+ return tryMatchRecordTypes(Context, strategy, left, right);
+
+ // Make scalars agree in kind, except count bools as chars, and group
+ // all non-member pointers together.
+ Type::ScalarTypeKind leftSK = left->getScalarTypeKind();
+ Type::ScalarTypeKind rightSK = right->getScalarTypeKind();
+ if (leftSK == Type::STK_Bool) leftSK = Type::STK_Integral;
+ if (rightSK == Type::STK_Bool) rightSK = Type::STK_Integral;
+ if (leftSK == Type::STK_CPointer || leftSK == Type::STK_BlockPointer)
+ leftSK = Type::STK_ObjCObjectPointer;
+ if (rightSK == Type::STK_CPointer || rightSK == Type::STK_BlockPointer)
+ rightSK = Type::STK_ObjCObjectPointer;
+
+ // Note that data member pointers and function member pointers don't
+ // intermix because of the size differences.
+
+ return (leftSK == rightSK);
+}
+
+static bool tryMatchRecordTypes(ASTContext &Context,
+ Sema::MethodMatchStrategy strategy,
+ const Type *lt, const Type *rt) {
+ assert(lt && rt && lt != rt);
+
+ if (!isa<RecordType>(lt) || !isa<RecordType>(rt)) return false;
+ RecordDecl *left = cast<RecordType>(lt)->getDecl();
+ RecordDecl *right = cast<RecordType>(rt)->getDecl();
+
+ // Require union-hood to match.
+ if (left->isUnion() != right->isUnion()) return false;
+
+ // Require an exact match if either is non-POD.
+ if ((isa<CXXRecordDecl>(left) && !cast<CXXRecordDecl>(left)->isPOD()) ||
+ (isa<CXXRecordDecl>(right) && !cast<CXXRecordDecl>(right)->isPOD()))
+ return false;
+
+ // Require size and alignment to match.
+ if (Context.getTypeInfo(lt) != Context.getTypeInfo(rt)) return false;
+
+ // Require fields to match.
+ RecordDecl::field_iterator li = left->field_begin(), le = left->field_end();
+ RecordDecl::field_iterator ri = right->field_begin(), re = right->field_end();
+ for (; li != le && ri != re; ++li, ++ri) {
+ if (!matchTypes(Context, strategy, li->getType(), ri->getType()))
+ return false;
+ }
+ return (li == le && ri == re);
+}
+
+/// MatchTwoMethodDeclarations - Checks that two methods have matching type and
+/// returns true, or false, accordingly.
+/// TODO: Handle protocol list; such as id<p1,p2> in type comparisons
+bool Sema::MatchTwoMethodDeclarations(const ObjCMethodDecl *left,
+ const ObjCMethodDecl *right,
+ MethodMatchStrategy strategy) {
+ if (!matchTypes(Context, strategy,
+ left->getResultType(), right->getResultType()))
+ return false;
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ (left->hasAttr<NSReturnsRetainedAttr>()
+ != right->hasAttr<NSReturnsRetainedAttr>() ||
+ left->hasAttr<NSConsumesSelfAttr>()
+ != right->hasAttr<NSConsumesSelfAttr>()))
+ return false;
+
+ ObjCMethodDecl::param_const_iterator
+ li = left->param_begin(), le = left->param_end(), ri = right->param_begin();
+
+ for (; li != le; ++li, ++ri) {
+ assert(ri != right->param_end() && "Param mismatch");
+ const ParmVarDecl *lparm = *li, *rparm = *ri;
+
+ if (!matchTypes(Context, strategy, lparm->getType(), rparm->getType()))
+ return false;
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ lparm->hasAttr<NSConsumedAttr>() != rparm->hasAttr<NSConsumedAttr>())
+ return false;
+ }
+ return true;
+}
+
+void Sema::addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method) {
+ // If the list is empty, make it a singleton list.
+ if (List->Method == 0) {
+ List->Method = Method;
+ List->Next = 0;
+ return;
+ }
+
+ // We've seen a method with this name, see if we have already seen this type
+ // signature.
+ ObjCMethodList *Previous = List;
+ for (; List; Previous = List, List = List->Next) {
+ if (!MatchTwoMethodDeclarations(Method, List->Method))
+ continue;
+
+ ObjCMethodDecl *PrevObjCMethod = List->Method;
+
+ // Propagate the 'defined' bit.
+ if (Method->isDefined())
+ PrevObjCMethod->setDefined(true);
+
+ // If a method is deprecated, push it in the global pool.
+ // This is used for better diagnostics.
+ if (Method->isDeprecated()) {
+ if (!PrevObjCMethod->isDeprecated())
+ List->Method = Method;
+ }
+ // If new method is unavailable, push it into global pool
+ // unless previous one is deprecated.
+ if (Method->isUnavailable()) {
+ if (PrevObjCMethod->getAvailability() < AR_Deprecated)
+ List->Method = Method;
+ }
+
+ return;
+ }
+
+ // We have a new signature for an existing method - add it.
+ // This is extremely rare. Only 1% of Cocoa selectors are "overloaded".
+ ObjCMethodList *Mem = BumpAlloc.Allocate<ObjCMethodList>();
+ Previous->Next = new (Mem) ObjCMethodList(Method, 0);
+}
+
+/// \brief Read the contents of the method pool for a given selector from
+/// external storage.
+void Sema::ReadMethodPool(Selector Sel) {
+ assert(ExternalSource && "We need an external AST source");
+ ExternalSource->ReadMethodPool(Sel);
+}
+
+void Sema::AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl,
+ bool instance) {
+ // Ignore methods of invalid containers.
+ if (cast<Decl>(Method->getDeclContext())->isInvalidDecl())
+ return;
+
+ if (ExternalSource)
+ ReadMethodPool(Method->getSelector());
+
+ GlobalMethodPool::iterator Pos = MethodPool.find(Method->getSelector());
+ if (Pos == MethodPool.end())
+ Pos = MethodPool.insert(std::make_pair(Method->getSelector(),
+ GlobalMethods())).first;
+
+ Method->setDefined(impl);
+
+ ObjCMethodList &Entry = instance ? Pos->second.first : Pos->second.second;
+ addMethodToGlobalList(&Entry, Method);
+}
+
+/// Determines if this is an "acceptable" loose mismatch in the global
+/// method pool. This exists mostly as a hack to get around certain
+/// global mismatches which we can't afford to make warnings / errors.
+/// Really, what we want is a way to take a method out of the global
+/// method pool.
+static bool isAcceptableMethodMismatch(ObjCMethodDecl *chosen,
+ ObjCMethodDecl *other) {
+ if (!chosen->isInstanceMethod())
+ return false;
+
+ Selector sel = chosen->getSelector();
+ if (!sel.isUnarySelector() || sel.getNameForSlot(0) != "length")
+ return false;
+
+ // Don't complain about mismatches for -length if the method we
+ // chose has an integral result type.
+ return (chosen->getResultType()->isIntegerType());
+}
+
+ObjCMethodDecl *Sema::LookupMethodInGlobalPool(Selector Sel, SourceRange R,
+ bool receiverIdOrClass,
+ bool warn, bool instance) {
+ if (ExternalSource)
+ ReadMethodPool(Sel);
+
+ GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
+ if (Pos == MethodPool.end())
+ return 0;
+
+ ObjCMethodList &MethList = instance ? Pos->second.first : Pos->second.second;
+
+ if (warn && MethList.Method && MethList.Next) {
+ bool issueDiagnostic = false, issueError = false;
+
+ // We support a warning which complains about *any* difference in
+ // method signature.
+ bool strictSelectorMatch =
+ (receiverIdOrClass && warn &&
+ (Diags.getDiagnosticLevel(diag::warn_strict_multiple_method_decl,
+ R.getBegin()) !=
+ DiagnosticsEngine::Ignored));
+ if (strictSelectorMatch)
+ for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next) {
+ if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method,
+ MMS_strict)) {
+ issueDiagnostic = true;
+ break;
+ }
+ }
+
+ // If we didn't see any strict differences, we won't see any loose
+ // differences. In ARC, however, we also need to check for loose
+ // mismatches, because most of them are errors.
+ if (!strictSelectorMatch ||
+ (issueDiagnostic && getLangOpts().ObjCAutoRefCount))
+ for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next) {
+ // This checks if the methods differ in type mismatch.
+ if (!MatchTwoMethodDeclarations(MethList.Method, Next->Method,
+ MMS_loose) &&
+ !isAcceptableMethodMismatch(MethList.Method, Next->Method)) {
+ issueDiagnostic = true;
+ if (getLangOpts().ObjCAutoRefCount)
+ issueError = true;
+ break;
+ }
+ }
+
+ if (issueDiagnostic) {
+ if (issueError)
+ Diag(R.getBegin(), diag::err_arc_multiple_method_decl) << Sel << R;
+ else if (strictSelectorMatch)
+ Diag(R.getBegin(), diag::warn_strict_multiple_method_decl) << Sel << R;
+ else
+ Diag(R.getBegin(), diag::warn_multiple_method_decl) << Sel << R;
+
+ Diag(MethList.Method->getLocStart(),
+ issueError ? diag::note_possibility : diag::note_using)
+ << MethList.Method->getSourceRange();
+ for (ObjCMethodList *Next = MethList.Next; Next; Next = Next->Next)
+ Diag(Next->Method->getLocStart(), diag::note_also_found)
+ << Next->Method->getSourceRange();
+ }
+ }
+ return MethList.Method;
+}
+
+ObjCMethodDecl *Sema::LookupImplementedMethodInGlobalPool(Selector Sel) {
+ GlobalMethodPool::iterator Pos = MethodPool.find(Sel);
+ if (Pos == MethodPool.end())
+ return 0;
+
+ GlobalMethods &Methods = Pos->second;
+
+ if (Methods.first.Method && Methods.first.Method->isDefined())
+ return Methods.first.Method;
+ if (Methods.second.Method && Methods.second.Method->isDefined())
+ return Methods.second.Method;
+ return 0;
+}
+
+/// CompareMethodParamsInBaseAndSuper - This routine compares methods with
+/// identical selector names in current and its super classes and issues
+/// a warning if any of their argument types are incompatible.
+void Sema::CompareMethodParamsInBaseAndSuper(Decl *ClassDecl,
+ ObjCMethodDecl *Method,
+ bool IsInstance) {
+ ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(ClassDecl);
+ if (ID == 0) return;
+
+ while (ObjCInterfaceDecl *SD = ID->getSuperClass()) {
+ ObjCMethodDecl *SuperMethodDecl =
+ SD->lookupMethod(Method->getSelector(), IsInstance);
+ if (SuperMethodDecl == 0) {
+ ID = SD;
+ continue;
+ }
+ ObjCMethodDecl::param_iterator ParamI = Method->param_begin(),
+ E = Method->param_end();
+ ObjCMethodDecl::param_iterator PrevI = SuperMethodDecl->param_begin();
+ for (; ParamI != E; ++ParamI, ++PrevI) {
+ // Number of parameters are the same and is guaranteed by selector match.
+ assert(PrevI != SuperMethodDecl->param_end() && "Param mismatch");
+ QualType T1 = Context.getCanonicalType((*ParamI)->getType());
+ QualType T2 = Context.getCanonicalType((*PrevI)->getType());
+ // If type of argument of method in this class does not match its
+ // respective argument type in the super class method, issue warning;
+ if (!Context.typesAreCompatible(T1, T2)) {
+ Diag((*ParamI)->getLocation(), diag::ext_typecheck_base_super)
+ << T1 << T2;
+ Diag(SuperMethodDecl->getLocation(), diag::note_previous_declaration);
+ return;
+ }
+ }
+ ID = SD;
+ }
+}
+
+/// DiagnoseDuplicateIvars -
+/// Check for duplicate ivars in the entire class at the start of
+/// @implementation. This becomes necesssary because class extension can
+/// add ivars to a class in random order which will not be known until
+/// class's @implementation is seen.
+void Sema::DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID,
+ ObjCInterfaceDecl *SID) {
+ for (ObjCInterfaceDecl::ivar_iterator IVI = ID->ivar_begin(),
+ IVE = ID->ivar_end(); IVI != IVE; ++IVI) {
+ ObjCIvarDecl* Ivar = (*IVI);
+ if (Ivar->isInvalidDecl())
+ continue;
+ if (IdentifierInfo *II = Ivar->getIdentifier()) {
+ ObjCIvarDecl* prevIvar = SID->lookupInstanceVariable(II);
+ if (prevIvar) {
+ Diag(Ivar->getLocation(), diag::err_duplicate_member) << II;
+ Diag(prevIvar->getLocation(), diag::note_previous_declaration);
+ Ivar->setInvalidDecl();
+ }
+ }
+ }
+}
+
+Sema::ObjCContainerKind Sema::getObjCContainerKind() const {
+ switch (CurContext->getDeclKind()) {
+ case Decl::ObjCInterface:
+ return Sema::OCK_Interface;
+ case Decl::ObjCProtocol:
+ return Sema::OCK_Protocol;
+ case Decl::ObjCCategory:
+ if (dyn_cast<ObjCCategoryDecl>(CurContext)->IsClassExtension())
+ return Sema::OCK_ClassExtension;
+ else
+ return Sema::OCK_Category;
+ case Decl::ObjCImplementation:
+ return Sema::OCK_Implementation;
+ case Decl::ObjCCategoryImpl:
+ return Sema::OCK_CategoryImplementation;
+
+ default:
+ return Sema::OCK_None;
+ }
+}
+
+// Note: For class/category implemenations, allMethods/allProperties is
+// always null.
+Decl *Sema::ActOnAtEnd(Scope *S, SourceRange AtEnd,
+ Decl **allMethods, unsigned allNum,
+ Decl **allProperties, unsigned pNum,
+ DeclGroupPtrTy *allTUVars, unsigned tuvNum) {
+
+ if (getObjCContainerKind() == Sema::OCK_None)
+ return 0;
+
+ assert(AtEnd.isValid() && "Invalid location for '@end'");
+
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ Decl *ClassDecl = cast<Decl>(OCD);
+
+ bool isInterfaceDeclKind =
+ isa<ObjCInterfaceDecl>(ClassDecl) || isa<ObjCCategoryDecl>(ClassDecl)
+ || isa<ObjCProtocolDecl>(ClassDecl);
+ bool checkIdenticalMethods = isa<ObjCImplementationDecl>(ClassDecl);
+
+ // FIXME: Remove these and use the ObjCContainerDecl/DeclContext.
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> InsMap;
+ llvm::DenseMap<Selector, const ObjCMethodDecl*> ClsMap;
+
+ for (unsigned i = 0; i < allNum; i++ ) {
+ ObjCMethodDecl *Method =
+ cast_or_null<ObjCMethodDecl>(allMethods[i]);
+
+ if (!Method) continue; // Already issued a diagnostic.
+ if (Method->isInstanceMethod()) {
+ /// Check for instance method of the same name with incompatible types
+ const ObjCMethodDecl *&PrevMethod = InsMap[Method->getSelector()];
+ bool match = PrevMethod ? MatchTwoMethodDeclarations(Method, PrevMethod)
+ : false;
+ if ((isInterfaceDeclKind && PrevMethod && !match)
+ || (checkIdenticalMethods && match)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ Method->setInvalidDecl();
+ } else {
+ if (PrevMethod) {
+ Method->setAsRedeclaration(PrevMethod);
+ if (!Context.getSourceManager().isInSystemHeader(
+ Method->getLocation()))
+ Diag(Method->getLocation(), diag::warn_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+ InsMap[Method->getSelector()] = Method;
+ /// The following allows us to typecheck messages to "id".
+ AddInstanceMethodToGlobalPool(Method);
+ // verify that the instance method conforms to the same definition of
+ // parent methods if it shadows one.
+ CompareMethodParamsInBaseAndSuper(ClassDecl, Method, true);
+ }
+ } else {
+ /// Check for class method of the same name with incompatible types
+ const ObjCMethodDecl *&PrevMethod = ClsMap[Method->getSelector()];
+ bool match = PrevMethod ? MatchTwoMethodDeclarations(Method, PrevMethod)
+ : false;
+ if ((isInterfaceDeclKind && PrevMethod && !match)
+ || (checkIdenticalMethods && match)) {
+ Diag(Method->getLocation(), diag::err_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ Method->setInvalidDecl();
+ } else {
+ if (PrevMethod) {
+ Method->setAsRedeclaration(PrevMethod);
+ if (!Context.getSourceManager().isInSystemHeader(
+ Method->getLocation()))
+ Diag(Method->getLocation(), diag::warn_duplicate_method_decl)
+ << Method->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+ ClsMap[Method->getSelector()] = Method;
+ /// The following allows us to typecheck messages to "Class".
+ AddFactoryMethodToGlobalPool(Method);
+ // verify that the class method conforms to the same definition of
+ // parent methods if it shadows one.
+ CompareMethodParamsInBaseAndSuper(ClassDecl, Method, false);
+ }
+ }
+ }
+ if (ObjCInterfaceDecl *I = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
+ // Compares properties declared in this class to those of its
+ // super class.
+ ComparePropertiesInBaseAndSuper(I);
+ CompareProperties(I, I);
+ } else if (ObjCCategoryDecl *C = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
+ // Categories are used to extend the class by declaring new methods.
+ // By the same token, they are also used to add new properties. No
+ // need to compare the added property to those in the class.
+
+ // Compare protocol properties with those in category
+ CompareProperties(C, C);
+ if (C->IsClassExtension()) {
+ ObjCInterfaceDecl *CCPrimary = C->getClassInterface();
+ DiagnoseClassExtensionDupMethods(C, CCPrimary);
+ }
+ }
+ if (ObjCContainerDecl *CDecl = dyn_cast<ObjCContainerDecl>(ClassDecl)) {
+ if (CDecl->getIdentifier())
+ // ProcessPropertyDecl is responsible for diagnosing conflicts with any
+ // user-defined setter/getter. It also synthesizes setter/getter methods
+ // and adds them to the DeclContext and global method pools.
+ for (ObjCContainerDecl::prop_iterator I = CDecl->prop_begin(),
+ E = CDecl->prop_end();
+ I != E; ++I)
+ ProcessPropertyDecl(*I, CDecl);
+ CDecl->setAtEndRange(AtEnd);
+ }
+ if (ObjCImplementationDecl *IC=dyn_cast<ObjCImplementationDecl>(ClassDecl)) {
+ IC->setAtEndRange(AtEnd);
+ if (ObjCInterfaceDecl* IDecl = IC->getClassInterface()) {
+ // Any property declared in a class extension might have user
+ // declared setter or getter in current class extension or one
+ // of the other class extensions. Mark them as synthesized as
+ // property will be synthesized when property with same name is
+ // seen in the @implementation.
+ for (const ObjCCategoryDecl *ClsExtDecl =
+ IDecl->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) {
+ for (ObjCContainerDecl::prop_iterator I = ClsExtDecl->prop_begin(),
+ E = ClsExtDecl->prop_end(); I != E; ++I) {
+ ObjCPropertyDecl *Property = (*I);
+ // Skip over properties declared @dynamic
+ if (const ObjCPropertyImplDecl *PIDecl
+ = IC->FindPropertyImplDecl(Property->getIdentifier()))
+ if (PIDecl->getPropertyImplementation()
+ == ObjCPropertyImplDecl::Dynamic)
+ continue;
+
+ for (const ObjCCategoryDecl *CExtDecl =
+ IDecl->getFirstClassExtension();
+ CExtDecl; CExtDecl = CExtDecl->getNextClassExtension()) {
+ if (ObjCMethodDecl *GetterMethod =
+ CExtDecl->getInstanceMethod(Property->getGetterName()))
+ GetterMethod->setSynthesized(true);
+ if (!Property->isReadOnly())
+ if (ObjCMethodDecl *SetterMethod =
+ CExtDecl->getInstanceMethod(Property->getSetterName()))
+ SetterMethod->setSynthesized(true);
+ }
+ }
+ }
+ ImplMethodsVsClassMethods(S, IC, IDecl);
+ AtomicPropertySetterGetterRules(IC, IDecl);
+ DiagnoseOwningPropertyGetterSynthesis(IC);
+
+ bool HasRootClassAttr = IDecl->hasAttr<ObjCRootClassAttr>();
+ if (IDecl->getSuperClass() == NULL) {
+ // This class has no superclass, so check that it has been marked with
+ // __attribute((objc_root_class)).
+ if (!HasRootClassAttr) {
+ SourceLocation DeclLoc(IDecl->getLocation());
+ SourceLocation SuperClassLoc(PP.getLocForEndOfToken(DeclLoc));
+ Diag(DeclLoc, diag::warn_objc_root_class_missing)
+ << IDecl->getIdentifier();
+ // See if NSObject is in the current scope, and if it is, suggest
+ // adding " : NSObject " to the class declaration.
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSObject),
+ DeclLoc, LookupOrdinaryName);
+ ObjCInterfaceDecl *NSObjectDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (NSObjectDecl && NSObjectDecl->getDefinition()) {
+ Diag(SuperClassLoc, diag::note_objc_needs_superclass)
+ << FixItHint::CreateInsertion(SuperClassLoc, " : NSObject ");
+ } else {
+ Diag(SuperClassLoc, diag::note_objc_needs_superclass);
+ }
+ }
+ } else if (HasRootClassAttr) {
+ // Complain that only root classes may have this attribute.
+ Diag(IDecl->getLocation(), diag::err_objc_root_class_subclass);
+ }
+
+ if (LangOpts.ObjCNonFragileABI2) {
+ while (IDecl->getSuperClass()) {
+ DiagnoseDuplicateIvars(IDecl, IDecl->getSuperClass());
+ IDecl = IDecl->getSuperClass();
+ }
+ }
+ }
+ SetIvarInitializers(IC);
+ } else if (ObjCCategoryImplDecl* CatImplClass =
+ dyn_cast<ObjCCategoryImplDecl>(ClassDecl)) {
+ CatImplClass->setAtEndRange(AtEnd);
+
+ // Find category interface decl and then check that all methods declared
+ // in this interface are implemented in the category @implementation.
+ if (ObjCInterfaceDecl* IDecl = CatImplClass->getClassInterface()) {
+ for (ObjCCategoryDecl *Categories = IDecl->getCategoryList();
+ Categories; Categories = Categories->getNextClassCategory()) {
+ if (Categories->getIdentifier() == CatImplClass->getIdentifier()) {
+ ImplMethodsVsClassMethods(S, CatImplClass, Categories);
+ break;
+ }
+ }
+ }
+ }
+ if (isInterfaceDeclKind) {
+ // Reject invalid vardecls.
+ for (unsigned i = 0; i != tuvNum; i++) {
+ DeclGroupRef DG = allTUVars[i].getAsVal<DeclGroupRef>();
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ if (VarDecl *VDecl = dyn_cast<VarDecl>(*I)) {
+ if (!VDecl->hasExternalStorage())
+ Diag(VDecl->getLocation(), diag::err_objc_var_decl_inclass);
+ }
+ }
+ }
+ ActOnObjCContainerFinishDefinition();
+
+ for (unsigned i = 0; i != tuvNum; i++) {
+ DeclGroupRef DG = allTUVars[i].getAsVal<DeclGroupRef>();
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I)
+ (*I)->setTopLevelDeclInObjCContainer();
+ Consumer.HandleTopLevelDeclInObjCContainer(DG);
+ }
+
+ return ClassDecl;
+}
+
+
+/// CvtQTToAstBitMask - utility routine to produce an AST bitmask for
+/// objective-c's type qualifier from the parser version of the same info.
+static Decl::ObjCDeclQualifier
+CvtQTToAstBitMask(ObjCDeclSpec::ObjCDeclQualifier PQTVal) {
+ return (Decl::ObjCDeclQualifier) (unsigned) PQTVal;
+}
+
+static inline
+bool containsInvalidMethodImplAttribute(ObjCMethodDecl *IMD,
+ const AttrVec &A) {
+ // If method is only declared in implementation (private method),
+ // No need to issue any diagnostics on method definition with attributes.
+ if (!IMD)
+ return false;
+
+ // method declared in interface has no attribute.
+ // But implementation has attributes. This is invalid
+ if (!IMD->hasAttrs())
+ return true;
+
+ const AttrVec &D = IMD->getAttrs();
+ if (D.size() != A.size())
+ return true;
+
+ // attributes on method declaration and definition must match exactly.
+ // Note that we have at most a couple of attributes on methods, so this
+ // n*n search is good enough.
+ for (AttrVec::const_iterator i = A.begin(), e = A.end(); i != e; ++i) {
+ bool match = false;
+ for (AttrVec::const_iterator i1 = D.begin(), e1 = D.end(); i1 != e1; ++i1) {
+ if ((*i)->getKind() == (*i1)->getKind()) {
+ match = true;
+ break;
+ }
+ }
+ if (!match)
+ return true;
+ }
+ return false;
+}
+
+namespace {
+ /// \brief Describes the compatibility of a result type with its method.
+ enum ResultTypeCompatibilityKind {
+ RTC_Compatible,
+ RTC_Incompatible,
+ RTC_Unknown
+ };
+}
+
+/// \brief Check whether the declared result type of the given Objective-C
+/// method declaration is compatible with the method's class.
+///
+static ResultTypeCompatibilityKind
+CheckRelatedResultTypeCompatibility(Sema &S, ObjCMethodDecl *Method,
+ ObjCInterfaceDecl *CurrentClass) {
+ QualType ResultType = Method->getResultType();
+
+ // If an Objective-C method inherits its related result type, then its
+ // declared result type must be compatible with its own class type. The
+ // declared result type is compatible if:
+ if (const ObjCObjectPointerType *ResultObjectType
+ = ResultType->getAs<ObjCObjectPointerType>()) {
+ // - it is id or qualified id, or
+ if (ResultObjectType->isObjCIdType() ||
+ ResultObjectType->isObjCQualifiedIdType())
+ return RTC_Compatible;
+
+ if (CurrentClass) {
+ if (ObjCInterfaceDecl *ResultClass
+ = ResultObjectType->getInterfaceDecl()) {
+ // - it is the same as the method's class type, or
+ if (declaresSameEntity(CurrentClass, ResultClass))
+ return RTC_Compatible;
+
+ // - it is a superclass of the method's class type
+ if (ResultClass->isSuperClassOf(CurrentClass))
+ return RTC_Compatible;
+ }
+ } else {
+ // Any Objective-C pointer type might be acceptable for a protocol
+ // method; we just don't know.
+ return RTC_Unknown;
+ }
+ }
+
+ return RTC_Incompatible;
+}
+
+namespace {
+/// A helper class for searching for methods which a particular method
+/// overrides.
+class OverrideSearch {
+public:
+ Sema &S;
+ ObjCMethodDecl *Method;
+ llvm::SmallPtrSet<ObjCContainerDecl*, 128> Searched;
+ llvm::SmallPtrSet<ObjCMethodDecl*, 4> Overridden;
+ bool Recursive;
+
+public:
+ OverrideSearch(Sema &S, ObjCMethodDecl *method) : S(S), Method(method) {
+ Selector selector = method->getSelector();
+
+ // Bypass this search if we've never seen an instance/class method
+ // with this selector before.
+ Sema::GlobalMethodPool::iterator it = S.MethodPool.find(selector);
+ if (it == S.MethodPool.end()) {
+ if (!S.ExternalSource) return;
+ S.ReadMethodPool(selector);
+
+ it = S.MethodPool.find(selector);
+ if (it == S.MethodPool.end())
+ return;
+ }
+ ObjCMethodList &list =
+ method->isInstanceMethod() ? it->second.first : it->second.second;
+ if (!list.Method) return;
+
+ ObjCContainerDecl *container
+ = cast<ObjCContainerDecl>(method->getDeclContext());
+
+ // Prevent the search from reaching this container again. This is
+ // important with categories, which override methods from the
+ // interface and each other.
+ Searched.insert(container);
+ searchFromContainer(container);
+ }
+
+ typedef llvm::SmallPtrSet<ObjCMethodDecl*, 128>::iterator iterator;
+ iterator begin() const { return Overridden.begin(); }
+ iterator end() const { return Overridden.end(); }
+
+private:
+ void searchFromContainer(ObjCContainerDecl *container) {
+ if (container->isInvalidDecl()) return;
+
+ switch (container->getDeclKind()) {
+#define OBJCCONTAINER(type, base) \
+ case Decl::type: \
+ searchFrom(cast<type##Decl>(container)); \
+ break;
+#define ABSTRACT_DECL(expansion)
+#define DECL(type, base) \
+ case Decl::type:
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("not an ObjC container!");
+ }
+ }
+
+ void searchFrom(ObjCProtocolDecl *protocol) {
+ if (!protocol->hasDefinition())
+ return;
+
+ // A method in a protocol declaration overrides declarations from
+ // referenced ("parent") protocols.
+ search(protocol->getReferencedProtocols());
+ }
+
+ void searchFrom(ObjCCategoryDecl *category) {
+ // A method in a category declaration overrides declarations from
+ // the main class and from protocols the category references.
+ search(category->getClassInterface());
+ search(category->getReferencedProtocols());
+ }
+
+ void searchFrom(ObjCCategoryImplDecl *impl) {
+ // A method in a category definition that has a category
+ // declaration overrides declarations from the category
+ // declaration.
+ if (ObjCCategoryDecl *category = impl->getCategoryDecl()) {
+ search(category);
+
+ // Otherwise it overrides declarations from the class.
+ } else {
+ search(impl->getClassInterface());
+ }
+ }
+
+ void searchFrom(ObjCInterfaceDecl *iface) {
+ // A method in a class declaration overrides declarations from
+ if (!iface->hasDefinition())
+ return;
+
+ // - categories,
+ for (ObjCCategoryDecl *category = iface->getCategoryList();
+ category; category = category->getNextClassCategory())
+ search(category);
+
+ // - the super class, and
+ if (ObjCInterfaceDecl *super = iface->getSuperClass())
+ search(super);
+
+ // - any referenced protocols.
+ search(iface->getReferencedProtocols());
+ }
+
+ void searchFrom(ObjCImplementationDecl *impl) {
+ // A method in a class implementation overrides declarations from
+ // the class interface.
+ search(impl->getClassInterface());
+ }
+
+
+ void search(const ObjCProtocolList &protocols) {
+ for (ObjCProtocolList::iterator i = protocols.begin(), e = protocols.end();
+ i != e; ++i)
+ search(*i);
+ }
+
+ void search(ObjCContainerDecl *container) {
+ // Abort if we've already searched this container.
+ if (!Searched.insert(container)) return;
+
+ // Check for a method in this container which matches this selector.
+ ObjCMethodDecl *meth = container->getMethod(Method->getSelector(),
+ Method->isInstanceMethod());
+
+ // If we find one, record it and bail out.
+ if (meth) {
+ Overridden.insert(meth);
+ return;
+ }
+
+ // Otherwise, search for methods that a hypothetical method here
+ // would have overridden.
+
+ // Note that we're now in a recursive case.
+ Recursive = true;
+
+ searchFromContainer(container);
+ }
+};
+}
+
+Decl *Sema::ActOnMethodDeclaration(
+ Scope *S,
+ SourceLocation MethodLoc, SourceLocation EndLoc,
+ tok::TokenKind MethodType,
+ ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
+ ArrayRef<SourceLocation> SelectorLocs,
+ Selector Sel,
+ // optional arguments. The number of types/arguments is obtained
+ // from the Sel.getNumArgs().
+ ObjCArgInfo *ArgInfo,
+ DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args
+ AttributeList *AttrList, tok::ObjCKeywordKind MethodDeclKind,
+ bool isVariadic, bool MethodDefinition) {
+ // Make sure we can establish a context for the method.
+ if (!CurContext->isObjCContainer()) {
+ Diag(MethodLoc, diag::error_missing_method_context);
+ return 0;
+ }
+ ObjCContainerDecl *OCD = dyn_cast<ObjCContainerDecl>(CurContext);
+ Decl *ClassDecl = cast<Decl>(OCD);
+ QualType resultDeclType;
+
+ bool HasRelatedResultType = false;
+ TypeSourceInfo *ResultTInfo = 0;
+ if (ReturnType) {
+ resultDeclType = GetTypeFromParser(ReturnType, &ResultTInfo);
+
+ // Methods cannot return interface types. All ObjC objects are
+ // passed by reference.
+ if (resultDeclType->isObjCObjectType()) {
+ Diag(MethodLoc, diag::err_object_cannot_be_passed_returned_by_value)
+ << 0 << resultDeclType;
+ return 0;
+ }
+
+ HasRelatedResultType = (resultDeclType == Context.getObjCInstanceType());
+ } else { // get the type for "id".
+ resultDeclType = Context.getObjCIdType();
+ Diag(MethodLoc, diag::warn_missing_method_return_type)
+ << FixItHint::CreateInsertion(SelectorLocs.front(), "(id)");
+ }
+
+ ObjCMethodDecl* ObjCMethod =
+ ObjCMethodDecl::Create(Context, MethodLoc, EndLoc, Sel,
+ resultDeclType,
+ ResultTInfo,
+ CurContext,
+ MethodType == tok::minus, isVariadic,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/false, /*isDefined=*/false,
+ MethodDeclKind == tok::objc_optional
+ ? ObjCMethodDecl::Optional
+ : ObjCMethodDecl::Required,
+ HasRelatedResultType);
+
+ SmallVector<ParmVarDecl*, 16> Params;
+
+ for (unsigned i = 0, e = Sel.getNumArgs(); i != e; ++i) {
+ QualType ArgType;
+ TypeSourceInfo *DI;
+
+ if (ArgInfo[i].Type == 0) {
+ ArgType = Context.getObjCIdType();
+ DI = 0;
+ } else {
+ ArgType = GetTypeFromParser(ArgInfo[i].Type, &DI);
+ // Perform the default array/function conversions (C99 6.7.5.3p[7,8]).
+ ArgType = Context.getAdjustedParameterType(ArgType);
+ }
+
+ LookupResult R(*this, ArgInfo[i].Name, ArgInfo[i].NameLoc,
+ LookupOrdinaryName, ForRedeclaration);
+ LookupName(R, S);
+ if (R.isSingleResult()) {
+ NamedDecl *PrevDecl = R.getFoundDecl();
+ if (S->isDeclScope(PrevDecl)) {
+ Diag(ArgInfo[i].NameLoc,
+ (MethodDefinition ? diag::warn_method_param_redefinition
+ : diag::warn_method_param_declaration))
+ << ArgInfo[i].Name;
+ Diag(PrevDecl->getLocation(),
+ diag::note_previous_declaration);
+ }
+ }
+
+ SourceLocation StartLoc = DI
+ ? DI->getTypeLoc().getBeginLoc()
+ : ArgInfo[i].NameLoc;
+
+ ParmVarDecl* Param = CheckParameter(ObjCMethod, StartLoc,
+ ArgInfo[i].NameLoc, ArgInfo[i].Name,
+ ArgType, DI, SC_None, SC_None);
+
+ Param->setObjCMethodScopeInfo(i);
+
+ Param->setObjCDeclQualifier(
+ CvtQTToAstBitMask(ArgInfo[i].DeclSpec.getObjCDeclQualifier()));
+
+ // Apply the attributes to the parameter.
+ ProcessDeclAttributeList(TUScope, Param, ArgInfo[i].ArgAttrs);
+
+ if (Param->hasAttr<BlocksAttr>()) {
+ Diag(Param->getLocation(), diag::err_block_on_nonlocal);
+ Param->setInvalidDecl();
+ }
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+
+ Params.push_back(Param);
+ }
+
+ for (unsigned i = 0, e = CNumArgs; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(CParamInfo[i].Param);
+ QualType ArgType = Param->getType();
+ if (ArgType.isNull())
+ ArgType = Context.getObjCIdType();
+ else
+ // Perform the default array/function conversions (C99 6.7.5.3p[7,8]).
+ ArgType = Context.getAdjustedParameterType(ArgType);
+ if (ArgType->isObjCObjectType()) {
+ Diag(Param->getLocation(),
+ diag::err_object_cannot_be_passed_returned_by_value)
+ << 1 << ArgType;
+ Param->setInvalidDecl();
+ }
+ Param->setDeclContext(ObjCMethod);
+
+ Params.push_back(Param);
+ }
+
+ ObjCMethod->setMethodParams(Context, Params, SelectorLocs);
+ ObjCMethod->setObjCDeclQualifier(
+ CvtQTToAstBitMask(ReturnQT.getObjCDeclQualifier()));
+
+ if (AttrList)
+ ProcessDeclAttributeList(TUScope, ObjCMethod, AttrList);
+
+ // Add the method now.
+ const ObjCMethodDecl *PrevMethod = 0;
+ if (ObjCImplDecl *ImpDecl = dyn_cast<ObjCImplDecl>(ClassDecl)) {
+ if (MethodType == tok::minus) {
+ PrevMethod = ImpDecl->getInstanceMethod(Sel);
+ ImpDecl->addInstanceMethod(ObjCMethod);
+ } else {
+ PrevMethod = ImpDecl->getClassMethod(Sel);
+ ImpDecl->addClassMethod(ObjCMethod);
+ }
+
+ ObjCMethodDecl *IMD = 0;
+ if (ObjCInterfaceDecl *IDecl = ImpDecl->getClassInterface())
+ IMD = IDecl->lookupMethod(ObjCMethod->getSelector(),
+ ObjCMethod->isInstanceMethod());
+ if (ObjCMethod->hasAttrs() &&
+ containsInvalidMethodImplAttribute(IMD, ObjCMethod->getAttrs())) {
+ SourceLocation MethodLoc = IMD->getLocation();
+ if (!getSourceManager().isInSystemHeader(MethodLoc)) {
+ Diag(EndLoc, diag::warn_attribute_method_def);
+ Diag(MethodLoc, diag::note_method_declared_at)
+ << ObjCMethod->getDeclName();
+ }
+ }
+ } else {
+ cast<DeclContext>(ClassDecl)->addDecl(ObjCMethod);
+ }
+
+ if (PrevMethod) {
+ // You can never have two method definitions with the same name.
+ Diag(ObjCMethod->getLocation(), diag::err_duplicate_method_decl)
+ << ObjCMethod->getDeclName();
+ Diag(PrevMethod->getLocation(), diag::note_previous_declaration);
+ }
+
+ // If this Objective-C method does not have a related result type, but we
+ // are allowed to infer related result types, try to do so based on the
+ // method family.
+ ObjCInterfaceDecl *CurrentClass = dyn_cast<ObjCInterfaceDecl>(ClassDecl);
+ if (!CurrentClass) {
+ if (ObjCCategoryDecl *Cat = dyn_cast<ObjCCategoryDecl>(ClassDecl))
+ CurrentClass = Cat->getClassInterface();
+ else if (ObjCImplDecl *Impl = dyn_cast<ObjCImplDecl>(ClassDecl))
+ CurrentClass = Impl->getClassInterface();
+ else if (ObjCCategoryImplDecl *CatImpl
+ = dyn_cast<ObjCCategoryImplDecl>(ClassDecl))
+ CurrentClass = CatImpl->getClassInterface();
+ }
+
+ ResultTypeCompatibilityKind RTC
+ = CheckRelatedResultTypeCompatibility(*this, ObjCMethod, CurrentClass);
+
+ // Search for overridden methods and merge information down from them.
+ OverrideSearch overrides(*this, ObjCMethod);
+ for (OverrideSearch::iterator
+ i = overrides.begin(), e = overrides.end(); i != e; ++i) {
+ ObjCMethodDecl *overridden = *i;
+
+ // Propagate down the 'related result type' bit from overridden methods.
+ if (RTC != RTC_Incompatible && overridden->hasRelatedResultType())
+ ObjCMethod->SetRelatedResultType();
+
+ // Then merge the declarations.
+ mergeObjCMethodDecls(ObjCMethod, overridden);
+
+ // Check for overriding methods
+ if (isa<ObjCInterfaceDecl>(ObjCMethod->getDeclContext()) ||
+ isa<ObjCImplementationDecl>(ObjCMethod->getDeclContext()))
+ CheckConflictingOverridingMethod(ObjCMethod, overridden,
+ isa<ObjCProtocolDecl>(overridden->getDeclContext()));
+ }
+
+ bool ARCError = false;
+ if (getLangOpts().ObjCAutoRefCount)
+ ARCError = CheckARCMethodDecl(*this, ObjCMethod);
+
+ // Infer the related result type when possible.
+ if (!ARCError && RTC == RTC_Compatible &&
+ !ObjCMethod->hasRelatedResultType() &&
+ LangOpts.ObjCInferRelatedResultType) {
+ bool InferRelatedResultType = false;
+ switch (ObjCMethod->getMethodFamily()) {
+ case OMF_None:
+ case OMF_copy:
+ case OMF_dealloc:
+ case OMF_finalize:
+ case OMF_mutableCopy:
+ case OMF_release:
+ case OMF_retainCount:
+ case OMF_performSelector:
+ break;
+
+ case OMF_alloc:
+ case OMF_new:
+ InferRelatedResultType = ObjCMethod->isClassMethod();
+ break;
+
+ case OMF_init:
+ case OMF_autorelease:
+ case OMF_retain:
+ case OMF_self:
+ InferRelatedResultType = ObjCMethod->isInstanceMethod();
+ break;
+ }
+
+ if (InferRelatedResultType)
+ ObjCMethod->SetRelatedResultType();
+ }
+
+ return ObjCMethod;
+}
+
+bool Sema::CheckObjCDeclScope(Decl *D) {
+ // Following is also an error. But it is caused by a missing @end
+ // and diagnostic is issued elsewhere.
+ if (isa<ObjCContainerDecl>(CurContext->getRedeclContext()))
+ return false;
+
+ // If we switched context to translation unit while we are still lexically in
+ // an objc container, it means the parser missed emitting an error.
+ if (isa<TranslationUnitDecl>(getCurLexicalContext()->getRedeclContext()))
+ return false;
+
+ Diag(D->getLocation(), diag::err_objc_decls_may_only_appear_in_global_scope);
+ D->setInvalidDecl();
+
+ return true;
+}
+
+/// Called whenever @defs(ClassName) is encountered in the source. Inserts the
+/// instance variables of ClassName into Decls.
+void Sema::ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
+ IdentifierInfo *ClassName,
+ SmallVectorImpl<Decl*> &Decls) {
+ // Check that ClassName is a valid class
+ ObjCInterfaceDecl *Class = getObjCInterfaceDecl(ClassName, DeclStart);
+ if (!Class) {
+ Diag(DeclStart, diag::err_undef_interface) << ClassName;
+ return;
+ }
+ if (LangOpts.ObjCNonFragileABI) {
+ Diag(DeclStart, diag::err_atdef_nonfragile_interface);
+ return;
+ }
+
+ // Collect the instance variables
+ SmallVector<const ObjCIvarDecl*, 32> Ivars;
+ Context.DeepCollectObjCIvars(Class, true, Ivars);
+ // For each ivar, create a fresh ObjCAtDefsFieldDecl.
+ for (unsigned i = 0; i < Ivars.size(); i++) {
+ const FieldDecl* ID = cast<FieldDecl>(Ivars[i]);
+ RecordDecl *Record = dyn_cast<RecordDecl>(TagD);
+ Decl *FD = ObjCAtDefsFieldDecl::Create(Context, Record,
+ /*FIXME: StartL=*/ID->getLocation(),
+ ID->getLocation(),
+ ID->getIdentifier(), ID->getType(),
+ ID->getBitWidth());
+ Decls.push_back(FD);
+ }
+
+ // Introduce all of these fields into the appropriate scope.
+ for (SmallVectorImpl<Decl*>::iterator D = Decls.begin();
+ D != Decls.end(); ++D) {
+ FieldDecl *FD = cast<FieldDecl>(*D);
+ if (getLangOpts().CPlusPlus)
+ PushOnScopeChains(cast<FieldDecl>(FD), S);
+ else if (RecordDecl *Record = dyn_cast<RecordDecl>(TagD))
+ Record->addDecl(FD);
+ }
+}
+
+/// \brief Build a type-check a new Objective-C exception variable declaration.
+VarDecl *Sema::BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType T,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id,
+ bool Invalid) {
+ // ISO/IEC TR 18037 S6.7.3: "The type of an object with automatic storage
+ // duration shall not be qualified by an address-space qualifier."
+ // Since all parameters have automatic store duration, they can not have
+ // an address space.
+ if (T.getAddressSpace() != 0) {
+ Diag(IdLoc, diag::err_arg_with_address_space);
+ Invalid = true;
+ }
+
+ // An @catch parameter must be an unqualified object pointer type;
+ // FIXME: Recover from "NSObject foo" by inserting the * in "NSObject *foo"?
+ if (Invalid) {
+ // Don't do any further checking.
+ } else if (T->isDependentType()) {
+ // Okay: we don't know what this type will instantiate to.
+ } else if (!T->isObjCObjectPointerType()) {
+ Invalid = true;
+ Diag(IdLoc ,diag::err_catch_param_not_objc_type);
+ } else if (T->isObjCQualifiedIdType()) {
+ Invalid = true;
+ Diag(IdLoc, diag::err_illegal_qualifiers_on_catch_parm);
+ }
+
+ VarDecl *New = VarDecl::Create(Context, CurContext, StartLoc, IdLoc, Id,
+ T, TInfo, SC_None, SC_None);
+ New->setExceptionVariable(true);
+
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (getLangOpts().ObjCAutoRefCount && inferObjCARCLifetime(New))
+ Invalid = true;
+
+ if (Invalid)
+ New->setInvalidDecl();
+ return New;
+}
+
+Decl *Sema::ActOnObjCExceptionDecl(Scope *S, Declarator &D) {
+ const DeclSpec &DS = D.getDeclSpec();
+
+ // We allow the "register" storage class on exception variables because
+ // GCC did, but we drop it completely. Any other storage class is an error.
+ if (DS.getStorageClassSpec() == DeclSpec::SCS_register) {
+ Diag(DS.getStorageClassSpecLoc(), diag::warn_register_objc_catch_parm)
+ << FixItHint::CreateRemoval(SourceRange(DS.getStorageClassSpecLoc()));
+ } else if (DS.getStorageClassSpec() != DeclSpec::SCS_unspecified) {
+ Diag(DS.getStorageClassSpecLoc(), diag::err_storage_spec_on_catch_parm)
+ << DS.getStorageClassSpec();
+ }
+ if (D.getDeclSpec().isThreadSpecified())
+ Diag(D.getDeclSpec().getThreadSpecLoc(), diag::err_invalid_thread);
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+
+ DiagnoseFunctionSpecifiers(D);
+
+ // Check that there are no default arguments inside the type of this
+ // exception object (C++ only).
+ if (getLangOpts().CPlusPlus)
+ CheckExtraCXXDefaultArguments(D);
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType ExceptionType = TInfo->getType();
+
+ VarDecl *New = BuildObjCExceptionDecl(TInfo, ExceptionType,
+ D.getSourceRange().getBegin(),
+ D.getIdentifierLoc(),
+ D.getIdentifier(),
+ D.isInvalidType());
+
+ // Parameter declarators cannot be qualified (C++ [dcl.meaning]p1).
+ if (D.getCXXScopeSpec().isSet()) {
+ Diag(D.getIdentifierLoc(), diag::err_qualified_objc_catch_parm)
+ << D.getCXXScopeSpec().getRange();
+ New->setInvalidDecl();
+ }
+
+ // Add the parameter declaration into this scope.
+ S->AddDecl(New);
+ if (D.getIdentifier())
+ IdResolver.AddDecl(New);
+
+ ProcessDeclAttributes(S, New, D);
+
+ if (New->hasAttr<BlocksAttr>())
+ Diag(New->getLocation(), diag::err_block_on_nonlocal);
+ return New;
+}
+
+/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
+/// initialization.
+void Sema::CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
+ SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
+ for (ObjCIvarDecl *Iv = OI->all_declared_ivar_begin(); Iv;
+ Iv= Iv->getNextIvar()) {
+ QualType QT = Context.getBaseElementType(Iv->getType());
+ if (QT->isRecordType())
+ Ivars.push_back(Iv);
+ }
+}
+
+void Sema::DiagnoseUseOfUnimplementedSelectors() {
+ // Load referenced selectors from the external source.
+ if (ExternalSource) {
+ SmallVector<std::pair<Selector, SourceLocation>, 4> Sels;
+ ExternalSource->ReadReferencedSelectors(Sels);
+ for (unsigned I = 0, N = Sels.size(); I != N; ++I)
+ ReferencedSelectors[Sels[I].first] = Sels[I].second;
+ }
+
+ // Warning will be issued only when selector table is
+ // generated (which means there is at lease one implementation
+ // in the TU). This is to match gcc's behavior.
+ if (ReferencedSelectors.empty() ||
+ !Context.AnyObjCImplementation())
+ return;
+ for (llvm::DenseMap<Selector, SourceLocation>::iterator S =
+ ReferencedSelectors.begin(),
+ E = ReferencedSelectors.end(); S != E; ++S) {
+ Selector Sel = (*S).first;
+ if (!LookupImplementedMethodInGlobalPool(Sel))
+ Diag((*S).second, diag::warn_unimplemented_selector) << Sel;
+ }
+ return;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
new file mode 100644
index 0000000..42221f8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -0,0 +1,729 @@
+//===--- SemaExceptionSpec.cpp - C++ Exception Specifications ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ exception specification testing.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+
+namespace clang {
+
+static const FunctionProtoType *GetUnderlyingFunction(QualType T)
+{
+ if (const PointerType *PtrTy = T->getAs<PointerType>())
+ T = PtrTy->getPointeeType();
+ else if (const ReferenceType *RefTy = T->getAs<ReferenceType>())
+ T = RefTy->getPointeeType();
+ else if (const MemberPointerType *MPTy = T->getAs<MemberPointerType>())
+ T = MPTy->getPointeeType();
+ return T->getAs<FunctionProtoType>();
+}
+
+/// CheckSpecifiedExceptionType - Check if the given type is valid in an
+/// exception specification. Incomplete types, or pointers to incomplete types
+/// other than void are not allowed.
+bool Sema::CheckSpecifiedExceptionType(QualType T, const SourceRange &Range) {
+
+ // This check (and the similar one below) deals with issue 437, that changes
+ // C++ 9.2p2 this way:
+ // Within the class member-specification, the class is regarded as complete
+ // within function bodies, default arguments, exception-specifications, and
+ // constructor ctor-initializers (including such things in nested classes).
+ if (T->isRecordType() && T->getAs<RecordType>()->isBeingDefined())
+ return false;
+
+ // C++ 15.4p2: A type denoted in an exception-specification shall not denote
+ // an incomplete type.
+ if (RequireCompleteType(Range.getBegin(), T,
+ PDiag(diag::err_incomplete_in_exception_spec) << /*direct*/0 << Range))
+ return true;
+
+ // C++ 15.4p2: A type denoted in an exception-specification shall not denote
+ // an incomplete type a pointer or reference to an incomplete type, other
+ // than (cv) void*.
+ int kind;
+ if (const PointerType* IT = T->getAs<PointerType>()) {
+ T = IT->getPointeeType();
+ kind = 1;
+ } else if (const ReferenceType* IT = T->getAs<ReferenceType>()) {
+ T = IT->getPointeeType();
+ kind = 2;
+ } else
+ return false;
+
+ // Again as before
+ if (T->isRecordType() && T->getAs<RecordType>()->isBeingDefined())
+ return false;
+
+ if (!T->isVoidType() && RequireCompleteType(Range.getBegin(), T,
+ PDiag(diag::err_incomplete_in_exception_spec) << kind << Range))
+ return true;
+
+ return false;
+}
+
+/// CheckDistantExceptionSpec - Check if the given type is a pointer or pointer
+/// to member to a function with an exception specification. This means that
+/// it is invalid to add another level of indirection.
+bool Sema::CheckDistantExceptionSpec(QualType T) {
+ if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else if (const MemberPointerType *PT = T->getAs<MemberPointerType>())
+ T = PT->getPointeeType();
+ else
+ return false;
+
+ const FunctionProtoType *FnT = T->getAs<FunctionProtoType>();
+ if (!FnT)
+ return false;
+
+ return FnT->hasExceptionSpec();
+}
+
+bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
+ OverloadedOperatorKind OO = New->getDeclName().getCXXOverloadedOperator();
+ bool IsOperatorNew = OO == OO_New || OO == OO_Array_New;
+ bool MissingExceptionSpecification = false;
+ bool MissingEmptyExceptionSpecification = false;
+ unsigned DiagID = diag::err_mismatched_exception_spec;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_mismatched_exception_spec;
+
+ if (!CheckEquivalentExceptionSpec(PDiag(DiagID),
+ PDiag(diag::note_previous_declaration),
+ Old->getType()->getAs<FunctionProtoType>(),
+ Old->getLocation(),
+ New->getType()->getAs<FunctionProtoType>(),
+ New->getLocation(),
+ &MissingExceptionSpecification,
+ &MissingEmptyExceptionSpecification,
+ /*AllowNoexceptAllMatchWithNoSpec=*/true,
+ IsOperatorNew))
+ return false;
+
+ // The failure was something other than an empty exception
+ // specification; return an error.
+ if (!MissingExceptionSpecification && !MissingEmptyExceptionSpecification)
+ return true;
+
+ const FunctionProtoType *NewProto
+ = New->getType()->getAs<FunctionProtoType>();
+
+ // The new function declaration is only missing an empty exception
+ // specification "throw()". If the throw() specification came from a
+ // function in a system header that has C linkage, just add an empty
+ // exception specification to the "new" declaration. This is an
+ // egregious workaround for glibc, which adds throw() specifications
+ // to many libc functions as an optimization. Unfortunately, that
+ // optimization isn't permitted by the C++ standard, so we're forced
+ // to work around it here.
+ if (MissingEmptyExceptionSpecification && NewProto &&
+ (Old->getLocation().isInvalid() ||
+ Context.getSourceManager().isInSystemHeader(Old->getLocation())) &&
+ Old->isExternC()) {
+ FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
+ EPI.ExceptionSpecType = EST_DynamicNone;
+ QualType NewType = Context.getFunctionType(NewProto->getResultType(),
+ NewProto->arg_type_begin(),
+ NewProto->getNumArgs(),
+ EPI);
+ New->setType(NewType);
+ return false;
+ }
+
+ if (MissingExceptionSpecification && NewProto) {
+ const FunctionProtoType *OldProto
+ = Old->getType()->getAs<FunctionProtoType>();
+
+ FunctionProtoType::ExtProtoInfo EPI = NewProto->getExtProtoInfo();
+ EPI.ExceptionSpecType = OldProto->getExceptionSpecType();
+ if (EPI.ExceptionSpecType == EST_Dynamic) {
+ EPI.NumExceptions = OldProto->getNumExceptions();
+ EPI.Exceptions = OldProto->exception_begin();
+ } else if (EPI.ExceptionSpecType == EST_ComputedNoexcept) {
+ // FIXME: We can't just take the expression from the old prototype. It
+ // likely contains references to the old prototype's parameters.
+ }
+
+ // Update the type of the function with the appropriate exception
+ // specification.
+ QualType NewType = Context.getFunctionType(NewProto->getResultType(),
+ NewProto->arg_type_begin(),
+ NewProto->getNumArgs(),
+ EPI);
+ New->setType(NewType);
+
+ // If exceptions are disabled, suppress the warning about missing
+ // exception specifications for new and delete operators.
+ if (!getLangOpts().CXXExceptions) {
+ switch (New->getDeclName().getCXXOverloadedOperator()) {
+ case OO_New:
+ case OO_Array_New:
+ case OO_Delete:
+ case OO_Array_Delete:
+ if (New->getDeclContext()->isTranslationUnit())
+ return false;
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ // Warn about the lack of exception specification.
+ SmallString<128> ExceptionSpecString;
+ llvm::raw_svector_ostream OS(ExceptionSpecString);
+ switch (OldProto->getExceptionSpecType()) {
+ case EST_DynamicNone:
+ OS << "throw()";
+ break;
+
+ case EST_Dynamic: {
+ OS << "throw(";
+ bool OnFirstException = true;
+ for (FunctionProtoType::exception_iterator E = OldProto->exception_begin(),
+ EEnd = OldProto->exception_end();
+ E != EEnd;
+ ++E) {
+ if (OnFirstException)
+ OnFirstException = false;
+ else
+ OS << ", ";
+
+ OS << E->getAsString(getPrintingPolicy());
+ }
+ OS << ")";
+ break;
+ }
+
+ case EST_BasicNoexcept:
+ OS << "noexcept";
+ break;
+
+ case EST_ComputedNoexcept:
+ OS << "noexcept(";
+ OldProto->getNoexceptExpr()->printPretty(OS, Context, 0,
+ getPrintingPolicy());
+ OS << ")";
+ break;
+
+ default:
+ llvm_unreachable("This spec type is compatible with none.");
+ }
+ OS.flush();
+
+ SourceLocation FixItLoc;
+ if (TypeSourceInfo *TSInfo = New->getTypeSourceInfo()) {
+ TypeLoc TL = TSInfo->getTypeLoc().IgnoreParens();
+ if (const FunctionTypeLoc *FTLoc = dyn_cast<FunctionTypeLoc>(&TL))
+ FixItLoc = PP.getLocForEndOfToken(FTLoc->getLocalRangeEnd());
+ }
+
+ if (FixItLoc.isInvalid())
+ Diag(New->getLocation(), diag::warn_missing_exception_specification)
+ << New << OS.str();
+ else {
+ // FIXME: This will get more complicated with C++0x
+ // late-specified return types.
+ Diag(New->getLocation(), diag::warn_missing_exception_specification)
+ << New << OS.str()
+ << FixItHint::CreateInsertion(FixItLoc, " " + OS.str().str());
+ }
+
+ if (!Old->getLocation().isInvalid())
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+
+ return false;
+ }
+
+ Diag(New->getLocation(), DiagID);
+ Diag(Old->getLocation(), diag::note_previous_declaration);
+ return true;
+}
+
+/// CheckEquivalentExceptionSpec - Check if the two types have equivalent
+/// exception specifications. Exception specifications are equivalent if
+/// they allow exactly the same set of exception types. It does not matter how
+/// that is achieved. See C++ [except.spec]p2.
+bool Sema::CheckEquivalentExceptionSpec(
+ const FunctionProtoType *Old, SourceLocation OldLoc,
+ const FunctionProtoType *New, SourceLocation NewLoc) {
+ unsigned DiagID = diag::err_mismatched_exception_spec;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_mismatched_exception_spec;
+ return CheckEquivalentExceptionSpec(
+ PDiag(DiagID),
+ PDiag(diag::note_previous_declaration),
+ Old, OldLoc, New, NewLoc);
+}
+
+/// CheckEquivalentExceptionSpec - Check if the two types have compatible
+/// exception specifications. See C++ [except.spec]p3.
+bool Sema::CheckEquivalentExceptionSpec(const PartialDiagnostic &DiagID,
+ const PartialDiagnostic & NoteID,
+ const FunctionProtoType *Old,
+ SourceLocation OldLoc,
+ const FunctionProtoType *New,
+ SourceLocation NewLoc,
+ bool *MissingExceptionSpecification,
+ bool*MissingEmptyExceptionSpecification,
+ bool AllowNoexceptAllMatchWithNoSpec,
+ bool IsOperatorNew) {
+ // Just completely ignore this under -fno-exceptions.
+ if (!getLangOpts().CXXExceptions)
+ return false;
+
+ if (MissingExceptionSpecification)
+ *MissingExceptionSpecification = false;
+
+ if (MissingEmptyExceptionSpecification)
+ *MissingEmptyExceptionSpecification = false;
+
+ // C++0x [except.spec]p3: Two exception-specifications are compatible if:
+ // - both are non-throwing, regardless of their form,
+ // - both have the form noexcept(constant-expression) and the constant-
+ // expressions are equivalent,
+ // - both are dynamic-exception-specifications that have the same set of
+ // adjusted types.
+ //
+ // C++0x [except.spec]p12: An exception-specifcation is non-throwing if it is
+ // of the form throw(), noexcept, or noexcept(constant-expression) where the
+ // constant-expression yields true.
+ //
+ // C++0x [except.spec]p4: If any declaration of a function has an exception-
+ // specifier that is not a noexcept-specification allowing all exceptions,
+ // all declarations [...] of that function shall have a compatible
+ // exception-specification.
+ //
+ // That last point basically means that noexcept(false) matches no spec.
+ // It's considered when AllowNoexceptAllMatchWithNoSpec is true.
+
+ ExceptionSpecificationType OldEST = Old->getExceptionSpecType();
+ ExceptionSpecificationType NewEST = New->getExceptionSpecType();
+
+ assert(OldEST != EST_Delayed && NewEST != EST_Delayed &&
+ "Shouldn't see unknown exception specifications here");
+
+ // Shortcut the case where both have no spec.
+ if (OldEST == EST_None && NewEST == EST_None)
+ return false;
+
+ FunctionProtoType::NoexceptResult OldNR = Old->getNoexceptSpec(Context);
+ FunctionProtoType::NoexceptResult NewNR = New->getNoexceptSpec(Context);
+ if (OldNR == FunctionProtoType::NR_BadNoexcept ||
+ NewNR == FunctionProtoType::NR_BadNoexcept)
+ return false;
+
+ // Dependent noexcept specifiers are compatible with each other, but nothing
+ // else.
+ // One noexcept is compatible with another if the argument is the same
+ if (OldNR == NewNR &&
+ OldNR != FunctionProtoType::NR_NoNoexcept &&
+ NewNR != FunctionProtoType::NR_NoNoexcept)
+ return false;
+ if (OldNR != NewNR &&
+ OldNR != FunctionProtoType::NR_NoNoexcept &&
+ NewNR != FunctionProtoType::NR_NoNoexcept) {
+ Diag(NewLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(OldLoc, NoteID);
+ return true;
+ }
+
+ // The MS extension throw(...) is compatible with itself.
+ if (OldEST == EST_MSAny && NewEST == EST_MSAny)
+ return false;
+
+ // It's also compatible with no spec.
+ if ((OldEST == EST_None && NewEST == EST_MSAny) ||
+ (OldEST == EST_MSAny && NewEST == EST_None))
+ return false;
+
+ // It's also compatible with noexcept(false).
+ if (OldEST == EST_MSAny && NewNR == FunctionProtoType::NR_Throw)
+ return false;
+ if (NewEST == EST_MSAny && OldNR == FunctionProtoType::NR_Throw)
+ return false;
+
+ // As described above, noexcept(false) matches no spec only for functions.
+ if (AllowNoexceptAllMatchWithNoSpec) {
+ if (OldEST == EST_None && NewNR == FunctionProtoType::NR_Throw)
+ return false;
+ if (NewEST == EST_None && OldNR == FunctionProtoType::NR_Throw)
+ return false;
+ }
+
+ // Any non-throwing specifications are compatible.
+ bool OldNonThrowing = OldNR == FunctionProtoType::NR_Nothrow ||
+ OldEST == EST_DynamicNone;
+ bool NewNonThrowing = NewNR == FunctionProtoType::NR_Nothrow ||
+ NewEST == EST_DynamicNone;
+ if (OldNonThrowing && NewNonThrowing)
+ return false;
+
+ // As a special compatibility feature, under C++0x we accept no spec and
+ // throw(std::bad_alloc) as equivalent for operator new and operator new[].
+ // This is because the implicit declaration changed, but old code would break.
+ if (getLangOpts().CPlusPlus0x && IsOperatorNew) {
+ const FunctionProtoType *WithExceptions = 0;
+ if (OldEST == EST_None && NewEST == EST_Dynamic)
+ WithExceptions = New;
+ else if (OldEST == EST_Dynamic && NewEST == EST_None)
+ WithExceptions = Old;
+ if (WithExceptions && WithExceptions->getNumExceptions() == 1) {
+ // One has no spec, the other throw(something). If that something is
+ // std::bad_alloc, all conditions are met.
+ QualType Exception = *WithExceptions->exception_begin();
+ if (CXXRecordDecl *ExRecord = Exception->getAsCXXRecordDecl()) {
+ IdentifierInfo* Name = ExRecord->getIdentifier();
+ if (Name && Name->getName() == "bad_alloc") {
+ // It's called bad_alloc, but is it in std?
+ DeclContext* DC = ExRecord->getDeclContext();
+ DC = DC->getEnclosingNamespaceContext();
+ if (NamespaceDecl* NS = dyn_cast<NamespaceDecl>(DC)) {
+ IdentifierInfo* NSName = NS->getIdentifier();
+ DC = DC->getParent();
+ if (NSName && NSName->getName() == "std" &&
+ DC->getEnclosingNamespaceContext()->isTranslationUnit()) {
+ return false;
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // At this point, the only remaining valid case is two matching dynamic
+ // specifications. We return here unless both specifications are dynamic.
+ if (OldEST != EST_Dynamic || NewEST != EST_Dynamic) {
+ if (MissingExceptionSpecification && Old->hasExceptionSpec() &&
+ !New->hasExceptionSpec()) {
+ // The old type has an exception specification of some sort, but
+ // the new type does not.
+ *MissingExceptionSpecification = true;
+
+ if (MissingEmptyExceptionSpecification && OldNonThrowing) {
+ // The old type has a throw() or noexcept(true) exception specification
+ // and the new type has no exception specification, and the caller asked
+ // to handle this itself.
+ *MissingEmptyExceptionSpecification = true;
+ }
+
+ return true;
+ }
+
+ Diag(NewLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(OldLoc, NoteID);
+ return true;
+ }
+
+ assert(OldEST == EST_Dynamic && NewEST == EST_Dynamic &&
+ "Exception compatibility logic error: non-dynamic spec slipped through.");
+
+ bool Success = true;
+ // Both have a dynamic exception spec. Collect the first set, then compare
+ // to the second.
+ llvm::SmallPtrSet<CanQualType, 8> OldTypes, NewTypes;
+ for (FunctionProtoType::exception_iterator I = Old->exception_begin(),
+ E = Old->exception_end(); I != E; ++I)
+ OldTypes.insert(Context.getCanonicalType(*I).getUnqualifiedType());
+
+ for (FunctionProtoType::exception_iterator I = New->exception_begin(),
+ E = New->exception_end(); I != E && Success; ++I) {
+ CanQualType TypePtr = Context.getCanonicalType(*I).getUnqualifiedType();
+ if(OldTypes.count(TypePtr))
+ NewTypes.insert(TypePtr);
+ else
+ Success = false;
+ }
+
+ Success = Success && OldTypes.size() == NewTypes.size();
+
+ if (Success) {
+ return false;
+ }
+ Diag(NewLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(OldLoc, NoteID);
+ return true;
+}
+
+/// CheckExceptionSpecSubset - Check whether the second function type's
+/// exception specification is a subset (or equivalent) of the first function
+/// type. This is used by override and pointer assignment checks.
+bool Sema::CheckExceptionSpecSubset(
+ const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
+ const FunctionProtoType *Superset, SourceLocation SuperLoc,
+ const FunctionProtoType *Subset, SourceLocation SubLoc) {
+
+ // Just auto-succeed under -fno-exceptions.
+ if (!getLangOpts().CXXExceptions)
+ return false;
+
+ // FIXME: As usual, we could be more specific in our error messages, but
+ // that better waits until we've got types with source locations.
+
+ if (!SubLoc.isValid())
+ SubLoc = SuperLoc;
+
+ ExceptionSpecificationType SuperEST = Superset->getExceptionSpecType();
+
+ // If superset contains everything, we're done.
+ if (SuperEST == EST_None || SuperEST == EST_MSAny)
+ return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+
+ // If there are dependent noexcept specs, assume everything is fine. Unlike
+ // with the equivalency check, this is safe in this case, because we don't
+ // want to merge declarations. Checks after instantiation will catch any
+ // omissions we make here.
+ // We also shortcut checking if a noexcept expression was bad.
+
+ FunctionProtoType::NoexceptResult SuperNR =Superset->getNoexceptSpec(Context);
+ if (SuperNR == FunctionProtoType::NR_BadNoexcept ||
+ SuperNR == FunctionProtoType::NR_Dependent)
+ return false;
+
+ // Another case of the superset containing everything.
+ if (SuperNR == FunctionProtoType::NR_Throw)
+ return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+
+ ExceptionSpecificationType SubEST = Subset->getExceptionSpecType();
+
+ assert(SuperEST != EST_Delayed && SubEST != EST_Delayed &&
+ "Shouldn't see unknown exception specifications here");
+
+ // It does not. If the subset contains everything, we've failed.
+ if (SubEST == EST_None || SubEST == EST_MSAny) {
+ Diag(SubLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+
+ FunctionProtoType::NoexceptResult SubNR = Subset->getNoexceptSpec(Context);
+ if (SubNR == FunctionProtoType::NR_BadNoexcept ||
+ SubNR == FunctionProtoType::NR_Dependent)
+ return false;
+
+ // Another case of the subset containing everything.
+ if (SubNR == FunctionProtoType::NR_Throw) {
+ Diag(SubLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+
+ // If the subset contains nothing, we're done.
+ if (SubEST == EST_DynamicNone || SubNR == FunctionProtoType::NR_Nothrow)
+ return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+
+ // Otherwise, if the superset contains nothing, we've failed.
+ if (SuperEST == EST_DynamicNone || SuperNR == FunctionProtoType::NR_Nothrow) {
+ Diag(SubLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+
+ assert(SuperEST == EST_Dynamic && SubEST == EST_Dynamic &&
+ "Exception spec subset: non-dynamic case slipped through.");
+
+ // Neither contains everything or nothing. Do a proper comparison.
+ for (FunctionProtoType::exception_iterator SubI = Subset->exception_begin(),
+ SubE = Subset->exception_end(); SubI != SubE; ++SubI) {
+ // Take one type from the subset.
+ QualType CanonicalSubT = Context.getCanonicalType(*SubI);
+ // Unwrap pointers and references so that we can do checks within a class
+ // hierarchy. Don't unwrap member pointers; they don't have hierarchy
+ // conversions on the pointee.
+ bool SubIsPointer = false;
+ if (const ReferenceType *RefTy = CanonicalSubT->getAs<ReferenceType>())
+ CanonicalSubT = RefTy->getPointeeType();
+ if (const PointerType *PtrTy = CanonicalSubT->getAs<PointerType>()) {
+ CanonicalSubT = PtrTy->getPointeeType();
+ SubIsPointer = true;
+ }
+ bool SubIsClass = CanonicalSubT->isRecordType();
+ CanonicalSubT = CanonicalSubT.getLocalUnqualifiedType();
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+
+ bool Contained = false;
+ // Make sure it's in the superset.
+ for (FunctionProtoType::exception_iterator SuperI =
+ Superset->exception_begin(), SuperE = Superset->exception_end();
+ SuperI != SuperE; ++SuperI) {
+ QualType CanonicalSuperT = Context.getCanonicalType(*SuperI);
+ // SubT must be SuperT or derived from it, or pointer or reference to
+ // such types.
+ if (const ReferenceType *RefTy = CanonicalSuperT->getAs<ReferenceType>())
+ CanonicalSuperT = RefTy->getPointeeType();
+ if (SubIsPointer) {
+ if (const PointerType *PtrTy = CanonicalSuperT->getAs<PointerType>())
+ CanonicalSuperT = PtrTy->getPointeeType();
+ else {
+ continue;
+ }
+ }
+ CanonicalSuperT = CanonicalSuperT.getLocalUnqualifiedType();
+ // If the types are the same, move on to the next type in the subset.
+ if (CanonicalSubT == CanonicalSuperT) {
+ Contained = true;
+ break;
+ }
+
+ // Otherwise we need to check the inheritance.
+ if (!SubIsClass || !CanonicalSuperT->isRecordType())
+ continue;
+
+ Paths.clear();
+ if (!IsDerivedFrom(CanonicalSubT, CanonicalSuperT, Paths))
+ continue;
+
+ if (Paths.isAmbiguous(Context.getCanonicalType(CanonicalSuperT)))
+ continue;
+
+ // Do this check from a context without privileges.
+ switch (CheckBaseClassAccess(SourceLocation(),
+ CanonicalSuperT, CanonicalSubT,
+ Paths.front(),
+ /*Diagnostic*/ 0,
+ /*ForceCheck*/ true,
+ /*ForceUnprivileged*/ true)) {
+ case AR_accessible: break;
+ case AR_inaccessible: continue;
+ case AR_dependent:
+ llvm_unreachable("access check dependent for unprivileged context");
+ case AR_delayed:
+ llvm_unreachable("access check delayed in non-declaration");
+ }
+
+ Contained = true;
+ break;
+ }
+ if (!Contained) {
+ Diag(SubLoc, DiagID);
+ if (NoteID.getDiagID() != 0)
+ Diag(SuperLoc, NoteID);
+ return true;
+ }
+ }
+ // We've run half the gauntlet.
+ return CheckParamExceptionSpec(NoteID, Superset, SuperLoc, Subset, SubLoc);
+}
+
+static bool CheckSpecForTypesEquivalent(Sema &S,
+ const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
+ QualType Target, SourceLocation TargetLoc,
+ QualType Source, SourceLocation SourceLoc)
+{
+ const FunctionProtoType *TFunc = GetUnderlyingFunction(Target);
+ if (!TFunc)
+ return false;
+ const FunctionProtoType *SFunc = GetUnderlyingFunction(Source);
+ if (!SFunc)
+ return false;
+
+ return S.CheckEquivalentExceptionSpec(DiagID, NoteID, TFunc, TargetLoc,
+ SFunc, SourceLoc);
+}
+
+/// CheckParamExceptionSpec - Check if the parameter and return types of the
+/// two functions have equivalent exception specs. This is part of the
+/// assignment and override compatibility check. We do not check the parameters
+/// of parameter function pointers recursively, as no sane programmer would
+/// even be able to write such a function type.
+bool Sema::CheckParamExceptionSpec(const PartialDiagnostic & NoteID,
+ const FunctionProtoType *Target, SourceLocation TargetLoc,
+ const FunctionProtoType *Source, SourceLocation SourceLoc)
+{
+ if (CheckSpecForTypesEquivalent(*this,
+ PDiag(diag::err_deep_exception_specs_differ) << 0,
+ PDiag(),
+ Target->getResultType(), TargetLoc,
+ Source->getResultType(), SourceLoc))
+ return true;
+
+ // We shouldn't even be testing this unless the arguments are otherwise
+ // compatible.
+ assert(Target->getNumArgs() == Source->getNumArgs() &&
+ "Functions have different argument counts.");
+ for (unsigned i = 0, E = Target->getNumArgs(); i != E; ++i) {
+ if (CheckSpecForTypesEquivalent(*this,
+ PDiag(diag::err_deep_exception_specs_differ) << 1,
+ PDiag(),
+ Target->getArgType(i), TargetLoc,
+ Source->getArgType(i), SourceLoc))
+ return true;
+ }
+ return false;
+}
+
+bool Sema::CheckExceptionSpecCompatibility(Expr *From, QualType ToType)
+{
+ // First we check for applicability.
+ // Target type must be a function, function pointer or function reference.
+ const FunctionProtoType *ToFunc = GetUnderlyingFunction(ToType);
+ if (!ToFunc)
+ return false;
+
+ // SourceType must be a function or function pointer.
+ const FunctionProtoType *FromFunc = GetUnderlyingFunction(From->getType());
+ if (!FromFunc)
+ return false;
+
+ // Now we've got the correct types on both sides, check their compatibility.
+ // This means that the source of the conversion can only throw a subset of
+ // the exceptions of the target, and any exception specs on arguments or
+ // return types must be equivalent.
+ return CheckExceptionSpecSubset(PDiag(diag::err_incompatible_exception_specs),
+ PDiag(), ToFunc,
+ From->getSourceRange().getBegin(),
+ FromFunc, SourceLocation());
+}
+
+bool Sema::CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
+ const CXXMethodDecl *Old) {
+ if (getLangOpts().CPlusPlus0x && isa<CXXDestructorDecl>(New)) {
+ // Don't check uninstantiated template destructors at all. We can only
+ // synthesize correct specs after the template is instantiated.
+ if (New->getParent()->isDependentType())
+ return false;
+ if (New->getParent()->isBeingDefined()) {
+ // The destructor might be updated once the definition is finished. So
+ // remember it and check later.
+ DelayedDestructorExceptionSpecChecks.push_back(std::make_pair(
+ cast<CXXDestructorDecl>(New), cast<CXXDestructorDecl>(Old)));
+ return false;
+ }
+ }
+ unsigned DiagID = diag::err_override_exception_spec;
+ if (getLangOpts().MicrosoftExt)
+ DiagID = diag::warn_override_exception_spec;
+ return CheckExceptionSpecSubset(PDiag(DiagID),
+ PDiag(diag::note_overridden_virtual_function),
+ Old->getType()->getAs<FunctionProtoType>(),
+ Old->getLocation(),
+ New->getType()->getAs<FunctionProtoType>(),
+ New->getLocation());
+}
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
new file mode 100644
index 0000000..0d0f2f5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExpr.cpp
@@ -0,0 +1,11289 @@
+//===--- SemaExpr.cpp - Semantic Analysis for Expressions -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/AnalysisBasedWarnings.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Designator.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/SemaFixItUtils.h"
+#include "clang/Sema/Template.h"
+#include "TreeTransform.h"
+using namespace clang;
+using namespace sema;
+
+/// \brief Determine whether the use of this declaration is valid, without
+/// emitting diagnostics.
+bool Sema::CanUseDecl(NamedDecl *D) {
+ // See if this is an auto-typed variable whose initializer we are parsing.
+ if (ParsingInitForAutoVars.count(D))
+ return false;
+
+ // See if this is a deleted function.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isDeleted())
+ return false;
+ }
+
+ // See if this function is unavailable.
+ if (D->getAvailability() == AR_Unavailable &&
+ cast<Decl>(CurContext)->getAvailability() != AR_Unavailable)
+ return false;
+
+ return true;
+}
+
+static AvailabilityResult DiagnoseAvailabilityOfDecl(Sema &S,
+ NamedDecl *D, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass) {
+ // See if this declaration is unavailable or deprecated.
+ std::string Message;
+ AvailabilityResult Result = D->getAvailability(&Message);
+ if (const EnumConstantDecl *ECD = dyn_cast<EnumConstantDecl>(D))
+ if (Result == AR_Available) {
+ const DeclContext *DC = ECD->getDeclContext();
+ if (const EnumDecl *TheEnumDecl = dyn_cast<EnumDecl>(DC))
+ Result = TheEnumDecl->getAvailability(&Message);
+ }
+
+ switch (Result) {
+ case AR_Available:
+ case AR_NotYetIntroduced:
+ break;
+
+ case AR_Deprecated:
+ S.EmitDeprecationWarning(D, Message, Loc, UnknownObjCClass);
+ break;
+
+ case AR_Unavailable:
+ if (S.getCurContextAvailability() != AR_Unavailable) {
+ if (Message.empty()) {
+ if (!UnknownObjCClass)
+ S.Diag(Loc, diag::err_unavailable) << D->getDeclName();
+ else
+ S.Diag(Loc, diag::warn_unavailable_fwdclass_message)
+ << D->getDeclName();
+ }
+ else
+ S.Diag(Loc, diag::err_unavailable_message)
+ << D->getDeclName() << Message;
+ S.Diag(D->getLocation(), diag::note_unavailable_here)
+ << isa<FunctionDecl>(D) << false;
+ }
+ break;
+ }
+ return Result;
+}
+
+/// \brief Emit a note explaining that this function is deleted or unavailable.
+void Sema::NoteDeletedFunction(FunctionDecl *Decl) {
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Decl);
+
+ if (Method && Method->isDeleted() && !Method->isDeletedAsWritten()) {
+ // If the method was explicitly defaulted, point at that declaration.
+ if (!Method->isImplicit())
+ Diag(Decl->getLocation(), diag::note_implicitly_deleted);
+
+ // Try to diagnose why this special member function was implicitly
+ // deleted. This might fail, if that reason no longer applies.
+ CXXSpecialMember CSM = getSpecialMember(Method);
+ if (CSM != CXXInvalid)
+ ShouldDeleteSpecialMember(Method, CSM, /*Diagnose=*/true);
+
+ return;
+ }
+
+ Diag(Decl->getLocation(), diag::note_unavailable_here)
+ << 1 << Decl->isDeleted();
+}
+
+/// \brief Determine whether the use of this declaration is valid, and
+/// emit any corresponding diagnostics.
+///
+/// This routine diagnoses various problems with referencing
+/// declarations that can occur when using a declaration. For example,
+/// it might warn if a deprecated or unavailable declaration is being
+/// used, or produce an error (and return true) if a C++0x deleted
+/// function is being used.
+///
+/// \returns true if there was an error (this declaration cannot be
+/// referenced), false otherwise.
+///
+bool Sema::DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc,
+ const ObjCInterfaceDecl *UnknownObjCClass) {
+ if (getLangOpts().CPlusPlus && isa<FunctionDecl>(D)) {
+ // If there were any diagnostics suppressed by template argument deduction,
+ // emit them now.
+ llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >::iterator
+ Pos = SuppressedDiagnostics.find(D->getCanonicalDecl());
+ if (Pos != SuppressedDiagnostics.end()) {
+ SmallVectorImpl<PartialDiagnosticAt> &Suppressed = Pos->second;
+ for (unsigned I = 0, N = Suppressed.size(); I != N; ++I)
+ Diag(Suppressed[I].first, Suppressed[I].second);
+
+ // Clear out the list of suppressed diagnostics, so that we don't emit
+ // them again for this specialization. However, we don't obsolete this
+ // entry from the table, because we want to avoid ever emitting these
+ // diagnostics again.
+ Suppressed.clear();
+ }
+ }
+
+ // See if this is an auto-typed variable whose initializer we are parsing.
+ if (ParsingInitForAutoVars.count(D)) {
+ Diag(Loc, diag::err_auto_variable_cannot_appear_in_own_initializer)
+ << D->getDeclName();
+ return true;
+ }
+
+ // See if this is a deleted function.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->isDeleted()) {
+ Diag(Loc, diag::err_deleted_function_use);
+ NoteDeletedFunction(FD);
+ return true;
+ }
+ }
+ DiagnoseAvailabilityOfDecl(*this, D, Loc, UnknownObjCClass);
+
+ // Warn if this is used but marked unused.
+ if (D->hasAttr<UnusedAttr>())
+ Diag(Loc, diag::warn_used_but_marked_unused) << D->getDeclName();
+ return false;
+}
+
+/// \brief Retrieve the message suffix that should be added to a
+/// diagnostic complaining about the given function being deleted or
+/// unavailable.
+std::string Sema::getDeletedOrUnavailableSuffix(const FunctionDecl *FD) {
+ // FIXME: C++0x implicitly-deleted special member functions could be
+ // detected here so that we could improve diagnostics to say, e.g.,
+ // "base class 'A' had a deleted copy constructor".
+ if (FD->isDeleted())
+ return std::string();
+
+ std::string Message;
+ if (FD->getAvailability(&Message))
+ return ": " + Message;
+
+ return std::string();
+}
+
+/// DiagnoseSentinelCalls - This routine checks whether a call or
+/// message-send is to a declaration with the sentinel attribute, and
+/// if so, it checks that the requirements of the sentinel are
+/// satisfied.
+void Sema::DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
+ Expr **args, unsigned numArgs) {
+ const SentinelAttr *attr = D->getAttr<SentinelAttr>();
+ if (!attr)
+ return;
+
+ // The number of formal parameters of the declaration.
+ unsigned numFormalParams;
+
+ // The kind of declaration. This is also an index into a %select in
+ // the diagnostic.
+ enum CalleeType { CT_Function, CT_Method, CT_Block } calleeType;
+
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ numFormalParams = MD->param_size();
+ calleeType = CT_Method;
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ numFormalParams = FD->param_size();
+ calleeType = CT_Function;
+ } else if (isa<VarDecl>(D)) {
+ QualType type = cast<ValueDecl>(D)->getType();
+ const FunctionType *fn = 0;
+ if (const PointerType *ptr = type->getAs<PointerType>()) {
+ fn = ptr->getPointeeType()->getAs<FunctionType>();
+ if (!fn) return;
+ calleeType = CT_Function;
+ } else if (const BlockPointerType *ptr = type->getAs<BlockPointerType>()) {
+ fn = ptr->getPointeeType()->castAs<FunctionType>();
+ calleeType = CT_Block;
+ } else {
+ return;
+ }
+
+ if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fn)) {
+ numFormalParams = proto->getNumArgs();
+ } else {
+ numFormalParams = 0;
+ }
+ } else {
+ return;
+ }
+
+ // "nullPos" is the number of formal parameters at the end which
+ // effectively count as part of the variadic arguments. This is
+ // useful if you would prefer to not have *any* formal parameters,
+ // but the language forces you to have at least one.
+ unsigned nullPos = attr->getNullPos();
+ assert((nullPos == 0 || nullPos == 1) && "invalid null position on sentinel");
+ numFormalParams = (nullPos > numFormalParams ? 0 : numFormalParams - nullPos);
+
+ // The number of arguments which should follow the sentinel.
+ unsigned numArgsAfterSentinel = attr->getSentinel();
+
+ // If there aren't enough arguments for all the formal parameters,
+ // the sentinel, and the args after the sentinel, complain.
+ if (numArgs < numFormalParams + numArgsAfterSentinel + 1) {
+ Diag(Loc, diag::warn_not_enough_argument) << D->getDeclName();
+ Diag(D->getLocation(), diag::note_sentinel_here) << calleeType;
+ return;
+ }
+
+ // Otherwise, find the sentinel expression.
+ Expr *sentinelExpr = args[numArgs - numArgsAfterSentinel - 1];
+ if (!sentinelExpr) return;
+ if (sentinelExpr->isValueDependent()) return;
+ if (Context.isSentinelNullExpr(sentinelExpr)) return;
+
+ // Pick a reasonable string to insert. Optimistically use 'nil' or
+ // 'NULL' if those are actually defined in the context. Only use
+ // 'nil' for ObjC methods, where it's much more likely that the
+ // variadic arguments form a list of object pointers.
+ SourceLocation MissingNilLoc
+ = PP.getLocForEndOfToken(sentinelExpr->getLocEnd());
+ std::string NullValue;
+ if (calleeType == CT_Method &&
+ PP.getIdentifierInfo("nil")->hasMacroDefinition())
+ NullValue = "nil";
+ else if (PP.getIdentifierInfo("NULL")->hasMacroDefinition())
+ NullValue = "NULL";
+ else
+ NullValue = "(void*) 0";
+
+ if (MissingNilLoc.isInvalid())
+ Diag(Loc, diag::warn_missing_sentinel) << calleeType;
+ else
+ Diag(MissingNilLoc, diag::warn_missing_sentinel)
+ << calleeType
+ << FixItHint::CreateInsertion(MissingNilLoc, ", " + NullValue);
+ Diag(D->getLocation(), diag::note_sentinel_here) << calleeType;
+}
+
+SourceRange Sema::getExprRange(Expr *E) const {
+ return E ? E->getSourceRange() : SourceRange();
+}
+
+//===----------------------------------------------------------------------===//
+// Standard Promotions and Conversions
+//===----------------------------------------------------------------------===//
+
+/// DefaultFunctionArrayConversion (C99 6.3.2.1p3, C99 6.3.2.1p4).
+ExprResult Sema::DefaultFunctionArrayConversion(Expr *E) {
+ // Handle any placeholder expressions which made it here.
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return ExprError();
+ E = result.take();
+ }
+
+ QualType Ty = E->getType();
+ assert(!Ty.isNull() && "DefaultFunctionArrayConversion - missing type");
+
+ if (Ty->isFunctionType())
+ E = ImpCastExprToType(E, Context.getPointerType(Ty),
+ CK_FunctionToPointerDecay).take();
+ else if (Ty->isArrayType()) {
+ // In C90 mode, arrays only promote to pointers if the array expression is
+ // an lvalue. The relevant legalese is C90 6.2.2.1p3: "an lvalue that has
+ // type 'array of type' is converted to an expression that has type 'pointer
+ // to type'...". In C99 this was changed to: C99 6.3.2.1p3: "an expression
+ // that has type 'array of type' ...". The relevant change is "an lvalue"
+ // (C90) to "an expression" (C99).
+ //
+ // C++ 4.2p1:
+ // An lvalue or rvalue of type "array of N T" or "array of unknown bound of
+ // T" can be converted to an rvalue of type "pointer to T".
+ //
+ if (getLangOpts().C99 || getLangOpts().CPlusPlus || E->isLValue())
+ E = ImpCastExprToType(E, Context.getArrayDecayedType(Ty),
+ CK_ArrayToPointerDecay).take();
+ }
+ return Owned(E);
+}
+
+static void CheckForNullPointerDereference(Sema &S, Expr *E) {
+ // Check to see if we are dereferencing a null pointer. If so,
+ // and if not volatile-qualified, this is undefined behavior that the
+ // optimizer will delete, so warn about it. People sometimes try to use this
+ // to get a deterministic trap and are surprised by clang's behavior. This
+ // only handles the pattern "*null", which is a very syntactic check.
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E->IgnoreParenCasts()))
+ if (UO->getOpcode() == UO_Deref &&
+ UO->getSubExpr()->IgnoreParenCasts()->
+ isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull) &&
+ !UO->getType().isVolatileQualified()) {
+ S.DiagRuntimeBehavior(UO->getOperatorLoc(), UO,
+ S.PDiag(diag::warn_indirection_through_null)
+ << UO->getSubExpr()->getSourceRange());
+ S.DiagRuntimeBehavior(UO->getOperatorLoc(), UO,
+ S.PDiag(diag::note_indirection_through_null));
+ }
+}
+
+ExprResult Sema::DefaultLvalueConversion(Expr *E) {
+ // Handle any placeholder expressions which made it here.
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return ExprError();
+ E = result.take();
+ }
+
+ // C++ [conv.lval]p1:
+ // A glvalue of a non-function, non-array type T can be
+ // converted to a prvalue.
+ if (!E->isGLValue()) return Owned(E);
+
+ QualType T = E->getType();
+ assert(!T.isNull() && "r-value conversion on typeless expression?");
+
+ // We don't want to throw lvalue-to-rvalue casts on top of
+ // expressions of certain types in C++.
+ if (getLangOpts().CPlusPlus &&
+ (E->getType() == Context.OverloadTy ||
+ T->isDependentType() ||
+ T->isRecordType()))
+ return Owned(E);
+
+ // The C standard is actually really unclear on this point, and
+ // DR106 tells us what the result should be but not why. It's
+ // generally best to say that void types just doesn't undergo
+ // lvalue-to-rvalue at all. Note that expressions of unqualified
+ // 'void' type are never l-values, but qualified void can be.
+ if (T->isVoidType())
+ return Owned(E);
+
+ CheckForNullPointerDereference(*this, E);
+
+ // C++ [conv.lval]p1:
+ // [...] If T is a non-class type, the type of the prvalue is the
+ // cv-unqualified version of T. Otherwise, the type of the
+ // rvalue is T.
+ //
+ // C99 6.3.2.1p2:
+ // If the lvalue has qualified type, the value has the unqualified
+ // version of the type of the lvalue; otherwise, the value has the
+ // type of the lvalue.
+ if (T.hasQualifiers())
+ T = T.getUnqualifiedType();
+
+ UpdateMarkingForLValueToRValue(E);
+
+ ExprResult Res = Owned(ImplicitCastExpr::Create(Context, T, CK_LValueToRValue,
+ E, 0, VK_RValue));
+
+ // C11 6.3.2.1p2:
+ // ... if the lvalue has atomic type, the value has the non-atomic version
+ // of the type of the lvalue ...
+ if (const AtomicType *Atomic = T->getAs<AtomicType>()) {
+ T = Atomic->getValueType().getUnqualifiedType();
+ Res = Owned(ImplicitCastExpr::Create(Context, T, CK_AtomicToNonAtomic,
+ Res.get(), 0, VK_RValue));
+ }
+
+ return Res;
+}
+
+ExprResult Sema::DefaultFunctionArrayLvalueConversion(Expr *E) {
+ ExprResult Res = DefaultFunctionArrayConversion(E);
+ if (Res.isInvalid())
+ return ExprError();
+ Res = DefaultLvalueConversion(Res.take());
+ if (Res.isInvalid())
+ return ExprError();
+ return move(Res);
+}
+
+
+/// UsualUnaryConversions - Performs various conversions that are common to most
+/// operators (C99 6.3). The conversions of array and function types are
+/// sometimes suppressed. For example, the array->pointer conversion doesn't
+/// apply if the array is an argument to the sizeof or address (&) operators.
+/// In these instances, this routine should *not* be called.
+ExprResult Sema::UsualUnaryConversions(Expr *E) {
+ // First, convert to an r-value.
+ ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
+ if (Res.isInvalid())
+ return Owned(E);
+ E = Res.take();
+
+ QualType Ty = E->getType();
+ assert(!Ty.isNull() && "UsualUnaryConversions - missing type");
+
+ // Half FP is a bit different: it's a storage-only type, meaning that any
+ // "use" of it should be promoted to float.
+ if (Ty->isHalfType())
+ return ImpCastExprToType(Res.take(), Context.FloatTy, CK_FloatingCast);
+
+ // Try to perform integral promotions if the object has a theoretically
+ // promotable type.
+ if (Ty->isIntegralOrUnscopedEnumerationType()) {
+ // C99 6.3.1.1p2:
+ //
+ // The following may be used in an expression wherever an int or
+ // unsigned int may be used:
+ // - an object or expression with an integer type whose integer
+ // conversion rank is less than or equal to the rank of int
+ // and unsigned int.
+ // - A bit-field of type _Bool, int, signed int, or unsigned int.
+ //
+ // If an int can represent all values of the original type, the
+ // value is converted to an int; otherwise, it is converted to an
+ // unsigned int. These are called the integer promotions. All
+ // other types are unchanged by the integer promotions.
+
+ QualType PTy = Context.isPromotableBitField(E);
+ if (!PTy.isNull()) {
+ E = ImpCastExprToType(E, PTy, CK_IntegralCast).take();
+ return Owned(E);
+ }
+ if (Ty->isPromotableIntegerType()) {
+ QualType PT = Context.getPromotedIntegerType(Ty);
+ E = ImpCastExprToType(E, PT, CK_IntegralCast).take();
+ return Owned(E);
+ }
+ }
+ return Owned(E);
+}
+
+/// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
+/// do not have a prototype. Arguments that have type float are promoted to
+/// double. All other argument types are converted by UsualUnaryConversions().
+ExprResult Sema::DefaultArgumentPromotion(Expr *E) {
+ QualType Ty = E->getType();
+ assert(!Ty.isNull() && "DefaultArgumentPromotion - missing type");
+
+ ExprResult Res = UsualUnaryConversions(E);
+ if (Res.isInvalid())
+ return Owned(E);
+ E = Res.take();
+
+ // If this is a 'float' (CVR qualified or typedef) promote to double.
+ if (Ty->isSpecificBuiltinType(BuiltinType::Float))
+ E = ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast).take();
+
+ // C++ performs lvalue-to-rvalue conversion as a default argument
+ // promotion, even on class types, but note:
+ // C++11 [conv.lval]p2:
+ // When an lvalue-to-rvalue conversion occurs in an unevaluated
+ // operand or a subexpression thereof the value contained in the
+ // referenced object is not accessed. Otherwise, if the glvalue
+ // has a class type, the conversion copy-initializes a temporary
+ // of type T from the glvalue and the result of the conversion
+ // is a prvalue for the temporary.
+ // FIXME: add some way to gate this entire thing for correctness in
+ // potentially potentially evaluated contexts.
+ if (getLangOpts().CPlusPlus && E->isGLValue() &&
+ ExprEvalContexts.back().Context != Unevaluated) {
+ ExprResult Temp = PerformCopyInitialization(
+ InitializedEntity::InitializeTemporary(E->getType()),
+ E->getExprLoc(),
+ Owned(E));
+ if (Temp.isInvalid())
+ return ExprError();
+ E = Temp.get();
+ }
+
+ return Owned(E);
+}
+
+/// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
+/// will warn if the resulting type is not a POD type, and rejects ObjC
+/// interfaces passed by value.
+ExprResult Sema::DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
+ FunctionDecl *FDecl) {
+ if (const BuiltinType *PlaceholderTy = E->getType()->getAsPlaceholderType()) {
+ // Strip the unbridged-cast placeholder expression off, if applicable.
+ if (PlaceholderTy->getKind() == BuiltinType::ARCUnbridgedCast &&
+ (CT == VariadicMethod ||
+ (FDecl && FDecl->hasAttr<CFAuditedTransferAttr>()))) {
+ E = stripARCUnbridgedCast(E);
+
+ // Otherwise, do normal placeholder checking.
+ } else {
+ ExprResult ExprRes = CheckPlaceholderExpr(E);
+ if (ExprRes.isInvalid())
+ return ExprError();
+ E = ExprRes.take();
+ }
+ }
+
+ ExprResult ExprRes = DefaultArgumentPromotion(E);
+ if (ExprRes.isInvalid())
+ return ExprError();
+ E = ExprRes.take();
+
+ // Don't allow one to pass an Objective-C interface to a vararg.
+ if (E->getType()->isObjCObjectType() &&
+ DiagRuntimeBehavior(E->getLocStart(), 0,
+ PDiag(diag::err_cannot_pass_objc_interface_to_vararg)
+ << E->getType() << CT))
+ return ExprError();
+
+ // Complain about passing non-POD types through varargs. However, don't
+ // perform this check for incomplete types, which we can get here when we're
+ // in an unevaluated context.
+ if (!E->getType()->isIncompleteType() && !E->getType().isPODType(Context)) {
+ // C++0x [expr.call]p7:
+ // Passing a potentially-evaluated argument of class type (Clause 9)
+ // having a non-trivial copy constructor, a non-trivial move constructor,
+ // or a non-trivial destructor, with no corresponding parameter,
+ // is conditionally-supported with implementation-defined semantics.
+ bool TrivialEnough = false;
+ if (getLangOpts().CPlusPlus0x && !E->getType()->isDependentType()) {
+ if (CXXRecordDecl *Record = E->getType()->getAsCXXRecordDecl()) {
+ if (Record->hasTrivialCopyConstructor() &&
+ Record->hasTrivialMoveConstructor() &&
+ Record->hasTrivialDestructor()) {
+ DiagRuntimeBehavior(E->getLocStart(), 0,
+ PDiag(diag::warn_cxx98_compat_pass_non_pod_arg_to_vararg)
+ << E->getType() << CT);
+ TrivialEnough = true;
+ }
+ }
+ }
+
+ if (!TrivialEnough &&
+ getLangOpts().ObjCAutoRefCount &&
+ E->getType()->isObjCLifetimeType())
+ TrivialEnough = true;
+
+ if (TrivialEnough) {
+ // Nothing to diagnose. This is okay.
+ } else if (DiagRuntimeBehavior(E->getLocStart(), 0,
+ PDiag(diag::warn_cannot_pass_non_pod_arg_to_vararg)
+ << getLangOpts().CPlusPlus0x << E->getType()
+ << CT)) {
+ // Turn this into a trap.
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId Name;
+ Name.setIdentifier(PP.getIdentifierInfo("__builtin_trap"),
+ E->getLocStart());
+ ExprResult TrapFn = ActOnIdExpression(TUScope, SS, TemplateKWLoc, Name,
+ true, false);
+ if (TrapFn.isInvalid())
+ return ExprError();
+
+ ExprResult Call = ActOnCallExpr(TUScope, TrapFn.get(), E->getLocStart(),
+ MultiExprArg(), E->getLocEnd());
+ if (Call.isInvalid())
+ return ExprError();
+
+ ExprResult Comma = ActOnBinOp(TUScope, E->getLocStart(), tok::comma,
+ Call.get(), E);
+ if (Comma.isInvalid())
+ return ExprError();
+ E = Comma.get();
+ }
+ }
+ // c++ rules are enforced elsewhere.
+ if (!getLangOpts().CPlusPlus &&
+ RequireCompleteType(E->getExprLoc(), E->getType(),
+ diag::err_call_incomplete_argument))
+ return ExprError();
+
+ return Owned(E);
+}
+
+/// \brief Converts an integer to complex float type. Helper function of
+/// UsualArithmeticConversions()
+///
+/// \return false if the integer expression is an integer type and is
+/// successfully converted to the complex type.
+static bool handleIntegerToComplexFloatConversion(Sema &S, ExprResult &IntExpr,
+ ExprResult &ComplexExpr,
+ QualType IntTy,
+ QualType ComplexTy,
+ bool SkipCast) {
+ if (IntTy->isComplexType() || IntTy->isRealFloatingType()) return true;
+ if (SkipCast) return false;
+ if (IntTy->isIntegerType()) {
+ QualType fpTy = cast<ComplexType>(ComplexTy)->getElementType();
+ IntExpr = S.ImpCastExprToType(IntExpr.take(), fpTy, CK_IntegralToFloating);
+ IntExpr = S.ImpCastExprToType(IntExpr.take(), ComplexTy,
+ CK_FloatingRealToComplex);
+ } else {
+ assert(IntTy->isComplexIntegerType());
+ IntExpr = S.ImpCastExprToType(IntExpr.take(), ComplexTy,
+ CK_IntegralComplexToFloatingComplex);
+ }
+ return false;
+}
+
+/// \brief Takes two complex float types and converts them to the same type.
+/// Helper function of UsualArithmeticConversions()
+static QualType
+handleComplexFloatToComplexFloatConverstion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType,
+ bool IsCompAssign) {
+ int order = S.Context.getFloatingTypeOrder(LHSType, RHSType);
+
+ if (order < 0) {
+ // _Complex float -> _Complex double
+ if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.take(), RHSType, CK_FloatingComplexCast);
+ return RHSType;
+ }
+ if (order > 0)
+ // _Complex float -> _Complex double
+ RHS = S.ImpCastExprToType(RHS.take(), LHSType, CK_FloatingComplexCast);
+ return LHSType;
+}
+
+/// \brief Converts otherExpr to complex float and promotes complexExpr if
+/// necessary. Helper function of UsualArithmeticConversions()
+static QualType handleOtherComplexFloatConversion(Sema &S,
+ ExprResult &ComplexExpr,
+ ExprResult &OtherExpr,
+ QualType ComplexTy,
+ QualType OtherTy,
+ bool ConvertComplexExpr,
+ bool ConvertOtherExpr) {
+ int order = S.Context.getFloatingTypeOrder(ComplexTy, OtherTy);
+
+ // If just the complexExpr is complex, the otherExpr needs to be converted,
+ // and the complexExpr might need to be promoted.
+ if (order > 0) { // complexExpr is wider
+ // float -> _Complex double
+ if (ConvertOtherExpr) {
+ QualType fp = cast<ComplexType>(ComplexTy)->getElementType();
+ OtherExpr = S.ImpCastExprToType(OtherExpr.take(), fp, CK_FloatingCast);
+ OtherExpr = S.ImpCastExprToType(OtherExpr.take(), ComplexTy,
+ CK_FloatingRealToComplex);
+ }
+ return ComplexTy;
+ }
+
+ // otherTy is at least as wide. Find its corresponding complex type.
+ QualType result = (order == 0 ? ComplexTy :
+ S.Context.getComplexType(OtherTy));
+
+ // double -> _Complex double
+ if (ConvertOtherExpr)
+ OtherExpr = S.ImpCastExprToType(OtherExpr.take(), result,
+ CK_FloatingRealToComplex);
+
+ // _Complex float -> _Complex double
+ if (ConvertComplexExpr && order < 0)
+ ComplexExpr = S.ImpCastExprToType(ComplexExpr.take(), result,
+ CK_FloatingComplexCast);
+
+ return result;
+}
+
+/// \brief Handle arithmetic conversion with complex types. Helper function of
+/// UsualArithmeticConversions()
+static QualType handleComplexFloatConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType,
+ bool IsCompAssign) {
+ // if we have an integer operand, the result is the complex type.
+ if (!handleIntegerToComplexFloatConversion(S, RHS, LHS, RHSType, LHSType,
+ /*skipCast*/false))
+ return LHSType;
+ if (!handleIntegerToComplexFloatConversion(S, LHS, RHS, LHSType, RHSType,
+ /*skipCast*/IsCompAssign))
+ return RHSType;
+
+ // This handles complex/complex, complex/float, or float/complex.
+ // When both operands are complex, the shorter operand is converted to the
+ // type of the longer, and that is the type of the result. This corresponds
+ // to what is done when combining two real floating-point operands.
+ // The fun begins when size promotion occur across type domains.
+ // From H&S 6.3.4: When one operand is complex and the other is a real
+ // floating-point type, the less precise type is converted, within it's
+ // real or complex domain, to the precision of the other type. For example,
+ // when combining a "long double" with a "double _Complex", the
+ // "double _Complex" is promoted to "long double _Complex".
+
+ bool LHSComplexFloat = LHSType->isComplexType();
+ bool RHSComplexFloat = RHSType->isComplexType();
+
+ // If both are complex, just cast to the more precise type.
+ if (LHSComplexFloat && RHSComplexFloat)
+ return handleComplexFloatToComplexFloatConverstion(S, LHS, RHS,
+ LHSType, RHSType,
+ IsCompAssign);
+
+ // If only one operand is complex, promote it if necessary and convert the
+ // other operand to complex.
+ if (LHSComplexFloat)
+ return handleOtherComplexFloatConversion(
+ S, LHS, RHS, LHSType, RHSType, /*convertComplexExpr*/!IsCompAssign,
+ /*convertOtherExpr*/ true);
+
+ assert(RHSComplexFloat);
+ return handleOtherComplexFloatConversion(
+ S, RHS, LHS, RHSType, LHSType, /*convertComplexExpr*/true,
+ /*convertOtherExpr*/ !IsCompAssign);
+}
+
+/// \brief Hande arithmetic conversion from integer to float. Helper function
+/// of UsualArithmeticConversions()
+static QualType handleIntToFloatConversion(Sema &S, ExprResult &FloatExpr,
+ ExprResult &IntExpr,
+ QualType FloatTy, QualType IntTy,
+ bool ConvertFloat, bool ConvertInt) {
+ if (IntTy->isIntegerType()) {
+ if (ConvertInt)
+ // Convert intExpr to the lhs floating point type.
+ IntExpr = S.ImpCastExprToType(IntExpr.take(), FloatTy,
+ CK_IntegralToFloating);
+ return FloatTy;
+ }
+
+ // Convert both sides to the appropriate complex float.
+ assert(IntTy->isComplexIntegerType());
+ QualType result = S.Context.getComplexType(FloatTy);
+
+ // _Complex int -> _Complex float
+ if (ConvertInt)
+ IntExpr = S.ImpCastExprToType(IntExpr.take(), result,
+ CK_IntegralComplexToFloatingComplex);
+
+ // float -> _Complex float
+ if (ConvertFloat)
+ FloatExpr = S.ImpCastExprToType(FloatExpr.take(), result,
+ CK_FloatingRealToComplex);
+
+ return result;
+}
+
+/// \brief Handle arithmethic conversion with floating point types. Helper
+/// function of UsualArithmeticConversions()
+static QualType handleFloatConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType, bool IsCompAssign) {
+ bool LHSFloat = LHSType->isRealFloatingType();
+ bool RHSFloat = RHSType->isRealFloatingType();
+
+ // If we have two real floating types, convert the smaller operand
+ // to the bigger result.
+ if (LHSFloat && RHSFloat) {
+ int order = S.Context.getFloatingTypeOrder(LHSType, RHSType);
+ if (order > 0) {
+ RHS = S.ImpCastExprToType(RHS.take(), LHSType, CK_FloatingCast);
+ return LHSType;
+ }
+
+ assert(order < 0 && "illegal float comparison");
+ if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.take(), RHSType, CK_FloatingCast);
+ return RHSType;
+ }
+
+ if (LHSFloat)
+ return handleIntToFloatConversion(S, LHS, RHS, LHSType, RHSType,
+ /*convertFloat=*/!IsCompAssign,
+ /*convertInt=*/ true);
+ assert(RHSFloat);
+ return handleIntToFloatConversion(S, RHS, LHS, RHSType, LHSType,
+ /*convertInt=*/ true,
+ /*convertFloat=*/!IsCompAssign);
+}
+
+/// \brief Handle conversions with GCC complex int extension. Helper function
+/// of UsualArithmeticConversions()
+// FIXME: if the operands are (int, _Complex long), we currently
+// don't promote the complex. Also, signedness?
+static QualType handleComplexIntConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType,
+ bool IsCompAssign) {
+ const ComplexType *LHSComplexInt = LHSType->getAsComplexIntegerType();
+ const ComplexType *RHSComplexInt = RHSType->getAsComplexIntegerType();
+
+ if (LHSComplexInt && RHSComplexInt) {
+ int order = S.Context.getIntegerTypeOrder(LHSComplexInt->getElementType(),
+ RHSComplexInt->getElementType());
+ assert(order && "inequal types with equal element ordering");
+ if (order > 0) {
+ // _Complex int -> _Complex long
+ RHS = S.ImpCastExprToType(RHS.take(), LHSType, CK_IntegralComplexCast);
+ return LHSType;
+ }
+
+ if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.take(), RHSType, CK_IntegralComplexCast);
+ return RHSType;
+ }
+
+ if (LHSComplexInt) {
+ // int -> _Complex int
+ // FIXME: This needs to take integer ranks into account
+ RHS = S.ImpCastExprToType(RHS.take(), LHSComplexInt->getElementType(),
+ CK_IntegralCast);
+ RHS = S.ImpCastExprToType(RHS.take(), LHSType, CK_IntegralRealToComplex);
+ return LHSType;
+ }
+
+ assert(RHSComplexInt);
+ // int -> _Complex int
+ // FIXME: This needs to take integer ranks into account
+ if (!IsCompAssign) {
+ LHS = S.ImpCastExprToType(LHS.take(), RHSComplexInt->getElementType(),
+ CK_IntegralCast);
+ LHS = S.ImpCastExprToType(LHS.take(), RHSType, CK_IntegralRealToComplex);
+ }
+ return RHSType;
+}
+
+/// \brief Handle integer arithmetic conversions. Helper function of
+/// UsualArithmeticConversions()
+static QualType handleIntegerConversion(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, QualType LHSType,
+ QualType RHSType, bool IsCompAssign) {
+ // The rules for this case are in C99 6.3.1.8
+ int order = S.Context.getIntegerTypeOrder(LHSType, RHSType);
+ bool LHSSigned = LHSType->hasSignedIntegerRepresentation();
+ bool RHSSigned = RHSType->hasSignedIntegerRepresentation();
+ if (LHSSigned == RHSSigned) {
+ // Same signedness; use the higher-ranked type
+ if (order >= 0) {
+ RHS = S.ImpCastExprToType(RHS.take(), LHSType, CK_IntegralCast);
+ return LHSType;
+ } else if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.take(), RHSType, CK_IntegralCast);
+ return RHSType;
+ } else if (order != (LHSSigned ? 1 : -1)) {
+ // The unsigned type has greater than or equal rank to the
+ // signed type, so use the unsigned type
+ if (RHSSigned) {
+ RHS = S.ImpCastExprToType(RHS.take(), LHSType, CK_IntegralCast);
+ return LHSType;
+ } else if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.take(), RHSType, CK_IntegralCast);
+ return RHSType;
+ } else if (S.Context.getIntWidth(LHSType) != S.Context.getIntWidth(RHSType)) {
+ // The two types are different widths; if we are here, that
+ // means the signed type is larger than the unsigned type, so
+ // use the signed type.
+ if (LHSSigned) {
+ RHS = S.ImpCastExprToType(RHS.take(), LHSType, CK_IntegralCast);
+ return LHSType;
+ } else if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.take(), RHSType, CK_IntegralCast);
+ return RHSType;
+ } else {
+ // The signed type is higher-ranked than the unsigned type,
+ // but isn't actually any bigger (like unsigned int and long
+ // on most 32-bit systems). Use the unsigned type corresponding
+ // to the signed type.
+ QualType result =
+ S.Context.getCorrespondingUnsignedType(LHSSigned ? LHSType : RHSType);
+ RHS = S.ImpCastExprToType(RHS.take(), result, CK_IntegralCast);
+ if (!IsCompAssign)
+ LHS = S.ImpCastExprToType(LHS.take(), result, CK_IntegralCast);
+ return result;
+ }
+}
+
+/// UsualArithmeticConversions - Performs various conversions that are common to
+/// binary operators (C99 6.3.1.8). If both operands aren't arithmetic, this
+/// routine returns the first non-arithmetic type found. The client is
+/// responsible for emitting appropriate error diagnostics.
+/// FIXME: verify the conversion rules for "complex int" are consistent with
+/// GCC.
+QualType Sema::UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
+ bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = UsualUnaryConversions(LHS.take());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+
+ RHS = UsualUnaryConversions(RHS.take());
+ if (RHS.isInvalid())
+ return QualType();
+
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType LHSType =
+ Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType();
+ QualType RHSType =
+ Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType();
+
+ // If both types are identical, no conversion is needed.
+ if (LHSType == RHSType)
+ return LHSType;
+
+ // If either side is a non-arithmetic type (e.g. a pointer), we are done.
+ // The caller can deal with this (e.g. pointer + int).
+ if (!LHSType->isArithmeticType() || !RHSType->isArithmeticType())
+ return LHSType;
+
+ // Apply unary and bitfield promotions to the LHS's type.
+ QualType LHSUnpromotedType = LHSType;
+ if (LHSType->isPromotableIntegerType())
+ LHSType = Context.getPromotedIntegerType(LHSType);
+ QualType LHSBitfieldPromoteTy = Context.isPromotableBitField(LHS.get());
+ if (!LHSBitfieldPromoteTy.isNull())
+ LHSType = LHSBitfieldPromoteTy;
+ if (LHSType != LHSUnpromotedType && !IsCompAssign)
+ LHS = ImpCastExprToType(LHS.take(), LHSType, CK_IntegralCast);
+
+ // If both types are identical, no conversion is needed.
+ if (LHSType == RHSType)
+ return LHSType;
+
+ // At this point, we have two different arithmetic types.
+
+ // Handle complex types first (C99 6.3.1.8p1).
+ if (LHSType->isComplexType() || RHSType->isComplexType())
+ return handleComplexFloatConversion(*this, LHS, RHS, LHSType, RHSType,
+ IsCompAssign);
+
+ // Now handle "real" floating types (i.e. float, double, long double).
+ if (LHSType->isRealFloatingType() || RHSType->isRealFloatingType())
+ return handleFloatConversion(*this, LHS, RHS, LHSType, RHSType,
+ IsCompAssign);
+
+ // Handle GCC complex int extension.
+ if (LHSType->isComplexIntegerType() || RHSType->isComplexIntegerType())
+ return handleComplexIntConversion(*this, LHS, RHS, LHSType, RHSType,
+ IsCompAssign);
+
+ // Finally, we have two differing integer types.
+ return handleIntegerConversion(*this, LHS, RHS, LHSType, RHSType,
+ IsCompAssign);
+}
+
+//===----------------------------------------------------------------------===//
+// Semantic Analysis for various Expression Types
+//===----------------------------------------------------------------------===//
+
+
+ExprResult
+Sema::ActOnGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ Expr *ControllingExpr,
+ MultiTypeArg ArgTypes,
+ MultiExprArg ArgExprs) {
+ unsigned NumAssocs = ArgTypes.size();
+ assert(NumAssocs == ArgExprs.size());
+
+ ParsedType *ParsedTypes = ArgTypes.release();
+ Expr **Exprs = ArgExprs.release();
+
+ TypeSourceInfo **Types = new TypeSourceInfo*[NumAssocs];
+ for (unsigned i = 0; i < NumAssocs; ++i) {
+ if (ParsedTypes[i])
+ (void) GetTypeFromParser(ParsedTypes[i], &Types[i]);
+ else
+ Types[i] = 0;
+ }
+
+ ExprResult ER = CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
+ ControllingExpr, Types, Exprs,
+ NumAssocs);
+ delete [] Types;
+ return ER;
+}
+
+ExprResult
+Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ Expr *ControllingExpr,
+ TypeSourceInfo **Types,
+ Expr **Exprs,
+ unsigned NumAssocs) {
+ bool TypeErrorFound = false,
+ IsResultDependent = ControllingExpr->isTypeDependent(),
+ ContainsUnexpandedParameterPack
+ = ControllingExpr->containsUnexpandedParameterPack();
+
+ for (unsigned i = 0; i < NumAssocs; ++i) {
+ if (Exprs[i]->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+
+ if (Types[i]) {
+ if (Types[i]->getType()->containsUnexpandedParameterPack())
+ ContainsUnexpandedParameterPack = true;
+
+ if (Types[i]->getType()->isDependentType()) {
+ IsResultDependent = true;
+ } else {
+ // C11 6.5.1.1p2 "The type name in a generic association shall specify a
+ // complete object type other than a variably modified type."
+ unsigned D = 0;
+ if (Types[i]->getType()->isIncompleteType())
+ D = diag::err_assoc_type_incomplete;
+ else if (!Types[i]->getType()->isObjectType())
+ D = diag::err_assoc_type_nonobject;
+ else if (Types[i]->getType()->isVariablyModifiedType())
+ D = diag::err_assoc_type_variably_modified;
+
+ if (D != 0) {
+ Diag(Types[i]->getTypeLoc().getBeginLoc(), D)
+ << Types[i]->getTypeLoc().getSourceRange()
+ << Types[i]->getType();
+ TypeErrorFound = true;
+ }
+
+ // C11 6.5.1.1p2 "No two generic associations in the same generic
+ // selection shall specify compatible types."
+ for (unsigned j = i+1; j < NumAssocs; ++j)
+ if (Types[j] && !Types[j]->getType()->isDependentType() &&
+ Context.typesAreCompatible(Types[i]->getType(),
+ Types[j]->getType())) {
+ Diag(Types[j]->getTypeLoc().getBeginLoc(),
+ diag::err_assoc_compatible_types)
+ << Types[j]->getTypeLoc().getSourceRange()
+ << Types[j]->getType()
+ << Types[i]->getType();
+ Diag(Types[i]->getTypeLoc().getBeginLoc(),
+ diag::note_compat_assoc)
+ << Types[i]->getTypeLoc().getSourceRange()
+ << Types[i]->getType();
+ TypeErrorFound = true;
+ }
+ }
+ }
+ }
+ if (TypeErrorFound)
+ return ExprError();
+
+ // If we determined that the generic selection is result-dependent, don't
+ // try to compute the result expression.
+ if (IsResultDependent)
+ return Owned(new (Context) GenericSelectionExpr(
+ Context, KeyLoc, ControllingExpr,
+ Types, Exprs, NumAssocs, DefaultLoc,
+ RParenLoc, ContainsUnexpandedParameterPack));
+
+ SmallVector<unsigned, 1> CompatIndices;
+ unsigned DefaultIndex = -1U;
+ for (unsigned i = 0; i < NumAssocs; ++i) {
+ if (!Types[i])
+ DefaultIndex = i;
+ else if (Context.typesAreCompatible(ControllingExpr->getType(),
+ Types[i]->getType()))
+ CompatIndices.push_back(i);
+ }
+
+ // C11 6.5.1.1p2 "The controlling expression of a generic selection shall have
+ // type compatible with at most one of the types named in its generic
+ // association list."
+ if (CompatIndices.size() > 1) {
+ // We strip parens here because the controlling expression is typically
+ // parenthesized in macro definitions.
+ ControllingExpr = ControllingExpr->IgnoreParens();
+ Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_multi_match)
+ << ControllingExpr->getSourceRange() << ControllingExpr->getType()
+ << (unsigned) CompatIndices.size();
+ for (SmallVector<unsigned, 1>::iterator I = CompatIndices.begin(),
+ E = CompatIndices.end(); I != E; ++I) {
+ Diag(Types[*I]->getTypeLoc().getBeginLoc(),
+ diag::note_compat_assoc)
+ << Types[*I]->getTypeLoc().getSourceRange()
+ << Types[*I]->getType();
+ }
+ return ExprError();
+ }
+
+ // C11 6.5.1.1p2 "If a generic selection has no default generic association,
+ // its controlling expression shall have type compatible with exactly one of
+ // the types named in its generic association list."
+ if (DefaultIndex == -1U && CompatIndices.size() == 0) {
+ // We strip parens here because the controlling expression is typically
+ // parenthesized in macro definitions.
+ ControllingExpr = ControllingExpr->IgnoreParens();
+ Diag(ControllingExpr->getLocStart(), diag::err_generic_sel_no_match)
+ << ControllingExpr->getSourceRange() << ControllingExpr->getType();
+ return ExprError();
+ }
+
+ // C11 6.5.1.1p3 "If a generic selection has a generic association with a
+ // type name that is compatible with the type of the controlling expression,
+ // then the result expression of the generic selection is the expression
+ // in that generic association. Otherwise, the result expression of the
+ // generic selection is the expression in the default generic association."
+ unsigned ResultIndex =
+ CompatIndices.size() ? CompatIndices[0] : DefaultIndex;
+
+ return Owned(new (Context) GenericSelectionExpr(
+ Context, KeyLoc, ControllingExpr,
+ Types, Exprs, NumAssocs, DefaultLoc,
+ RParenLoc, ContainsUnexpandedParameterPack,
+ ResultIndex));
+}
+
+/// getUDSuffixLoc - Create a SourceLocation for a ud-suffix, given the
+/// location of the token and the offset of the ud-suffix within it.
+static SourceLocation getUDSuffixLoc(Sema &S, SourceLocation TokLoc,
+ unsigned Offset) {
+ return Lexer::AdvanceToTokenCharacter(TokLoc, Offset, S.getSourceManager(),
+ S.getLangOpts());
+}
+
+/// BuildCookedLiteralOperatorCall - A user-defined literal was found. Look up
+/// the corresponding cooked (non-raw) literal operator, and build a call to it.
+static ExprResult BuildCookedLiteralOperatorCall(Sema &S, Scope *Scope,
+ IdentifierInfo *UDSuffix,
+ SourceLocation UDSuffixLoc,
+ ArrayRef<Expr*> Args,
+ SourceLocation LitEndLoc) {
+ assert(Args.size() <= 2 && "too many arguments for literal operator");
+
+ QualType ArgTy[2];
+ for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) {
+ ArgTy[ArgIdx] = Args[ArgIdx]->getType();
+ if (ArgTy[ArgIdx]->isArrayType())
+ ArgTy[ArgIdx] = S.Context.getArrayDecayedType(ArgTy[ArgIdx]);
+ }
+
+ DeclarationName OpName =
+ S.Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix);
+ DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc);
+ OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc);
+
+ LookupResult R(S, OpName, UDSuffixLoc, Sema::LookupOrdinaryName);
+ if (S.LookupLiteralOperator(Scope, R, llvm::makeArrayRef(ArgTy, Args.size()),
+ /*AllowRawAndTemplate*/false) == Sema::LOLR_Error)
+ return ExprError();
+
+ return S.BuildLiteralOperatorCall(R, OpNameInfo, Args, LitEndLoc);
+}
+
+/// ActOnStringLiteral - The specified tokens were lexed as pasted string
+/// fragments (e.g. "foo" "bar" L"baz"). The result string has to handle string
+/// concatenation ([C99 5.1.1.2, translation phase #6]), so it may come from
+/// multiple tokens. However, the common case is that StringToks points to one
+/// string.
+///
+ExprResult
+Sema::ActOnStringLiteral(const Token *StringToks, unsigned NumStringToks,
+ Scope *UDLScope) {
+ assert(NumStringToks && "Must have at least one string!");
+
+ StringLiteralParser Literal(StringToks, NumStringToks, PP);
+ if (Literal.hadError)
+ return ExprError();
+
+ SmallVector<SourceLocation, 4> StringTokLocs;
+ for (unsigned i = 0; i != NumStringToks; ++i)
+ StringTokLocs.push_back(StringToks[i].getLocation());
+
+ QualType StrTy = Context.CharTy;
+ if (Literal.isWide())
+ StrTy = Context.getWCharType();
+ else if (Literal.isUTF16())
+ StrTy = Context.Char16Ty;
+ else if (Literal.isUTF32())
+ StrTy = Context.Char32Ty;
+ else if (Literal.isPascal())
+ StrTy = Context.UnsignedCharTy;
+
+ StringLiteral::StringKind Kind = StringLiteral::Ascii;
+ if (Literal.isWide())
+ Kind = StringLiteral::Wide;
+ else if (Literal.isUTF8())
+ Kind = StringLiteral::UTF8;
+ else if (Literal.isUTF16())
+ Kind = StringLiteral::UTF16;
+ else if (Literal.isUTF32())
+ Kind = StringLiteral::UTF32;
+
+ // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
+ if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
+ StrTy.addConst();
+
+ // Get an array type for the string, according to C99 6.4.5. This includes
+ // the nul terminator character as well as the string length for pascal
+ // strings.
+ StrTy = Context.getConstantArrayType(StrTy,
+ llvm::APInt(32, Literal.GetNumStringChars()+1),
+ ArrayType::Normal, 0);
+
+ // Pass &StringTokLocs[0], StringTokLocs.size() to factory!
+ StringLiteral *Lit = StringLiteral::Create(Context, Literal.GetString(),
+ Kind, Literal.Pascal, StrTy,
+ &StringTokLocs[0],
+ StringTokLocs.size());
+ if (Literal.getUDSuffix().empty())
+ return Owned(Lit);
+
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, StringTokLocs[Literal.getUDSuffixToken()],
+ Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_string_udl));
+
+ // C++11 [lex.ext]p5: The literal L is treated as a call of the form
+ // operator "" X (str, len)
+ QualType SizeType = Context.getSizeType();
+ llvm::APInt Len(Context.getIntWidth(SizeType), Literal.GetNumStringChars());
+ IntegerLiteral *LenArg = IntegerLiteral::Create(Context, Len, SizeType,
+ StringTokLocs[0]);
+ Expr *Args[] = { Lit, LenArg };
+ return BuildCookedLiteralOperatorCall(*this, UDLScope, UDSuffix, UDSuffixLoc,
+ Args, StringTokLocs.back());
+}
+
+ExprResult
+Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
+ SourceLocation Loc,
+ const CXXScopeSpec *SS) {
+ DeclarationNameInfo NameInfo(D->getDeclName(), Loc);
+ return BuildDeclRefExpr(D, Ty, VK, NameInfo, SS);
+}
+
+/// BuildDeclRefExpr - Build an expression that references a
+/// declaration that does not require a closure capture.
+ExprResult
+Sema::BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
+ const DeclarationNameInfo &NameInfo,
+ const CXXScopeSpec *SS) {
+ if (getLangOpts().CUDA)
+ if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
+ if (const FunctionDecl *Callee = dyn_cast<FunctionDecl>(D)) {
+ CUDAFunctionTarget CallerTarget = IdentifyCUDATarget(Caller),
+ CalleeTarget = IdentifyCUDATarget(Callee);
+ if (CheckCUDATarget(CallerTarget, CalleeTarget)) {
+ Diag(NameInfo.getLoc(), diag::err_ref_bad_target)
+ << CalleeTarget << D->getIdentifier() << CallerTarget;
+ Diag(D->getLocation(), diag::note_previous_decl)
+ << D->getIdentifier();
+ return ExprError();
+ }
+ }
+
+ bool refersToEnclosingScope =
+ (CurContext != D->getDeclContext() &&
+ D->getDeclContext()->isFunctionOrMethod());
+
+ DeclRefExpr *E = DeclRefExpr::Create(Context,
+ SS ? SS->getWithLocInContext(Context)
+ : NestedNameSpecifierLoc(),
+ SourceLocation(),
+ D, refersToEnclosingScope,
+ NameInfo, Ty, VK);
+
+ MarkDeclRefReferenced(E);
+
+ // Just in case we're building an illegal pointer-to-member.
+ FieldDecl *FD = dyn_cast<FieldDecl>(D);
+ if (FD && FD->isBitField())
+ E->setObjectKind(OK_BitField);
+
+ return Owned(E);
+}
+
+/// Decomposes the given name into a DeclarationNameInfo, its location, and
+/// possibly a list of template arguments.
+///
+/// If this produces template arguments, it is permitted to call
+/// DecomposeTemplateName.
+///
+/// This actually loses a lot of source location information for
+/// non-standard name kinds; we should consider preserving that in
+/// some way.
+void
+Sema::DecomposeUnqualifiedId(const UnqualifiedId &Id,
+ TemplateArgumentListInfo &Buffer,
+ DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *&TemplateArgs) {
+ if (Id.getKind() == UnqualifiedId::IK_TemplateId) {
+ Buffer.setLAngleLoc(Id.TemplateId->LAngleLoc);
+ Buffer.setRAngleLoc(Id.TemplateId->RAngleLoc);
+
+ ASTTemplateArgsPtr TemplateArgsPtr(*this,
+ Id.TemplateId->getTemplateArgs(),
+ Id.TemplateId->NumArgs);
+ translateTemplateArguments(TemplateArgsPtr, Buffer);
+ TemplateArgsPtr.release();
+
+ TemplateName TName = Id.TemplateId->Template.get();
+ SourceLocation TNameLoc = Id.TemplateId->TemplateNameLoc;
+ NameInfo = Context.getNameForTemplate(TName, TNameLoc);
+ TemplateArgs = &Buffer;
+ } else {
+ NameInfo = GetNameFromUnqualifiedId(Id);
+ TemplateArgs = 0;
+ }
+}
+
+/// Diagnose an empty lookup.
+///
+/// \return false if new lookup candidates were found
+bool Sema::DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
+ CorrectionCandidateCallback &CCC,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ llvm::ArrayRef<Expr *> Args) {
+ DeclarationName Name = R.getLookupName();
+
+ unsigned diagnostic = diag::err_undeclared_var_use;
+ unsigned diagnostic_suggest = diag::err_undeclared_var_use_suggest;
+ if (Name.getNameKind() == DeclarationName::CXXOperatorName ||
+ Name.getNameKind() == DeclarationName::CXXLiteralOperatorName ||
+ Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
+ diagnostic = diag::err_undeclared_use;
+ diagnostic_suggest = diag::err_undeclared_use_suggest;
+ }
+
+ // If the original lookup was an unqualified lookup, fake an
+ // unqualified lookup. This is useful when (for example) the
+ // original lookup would not have found something because it was a
+ // dependent name.
+ DeclContext *DC = SS.isEmpty() ? CurContext : 0;
+ while (DC) {
+ if (isa<CXXRecordDecl>(DC)) {
+ LookupQualifiedName(R, DC);
+
+ if (!R.empty()) {
+ // Don't give errors about ambiguities in this lookup.
+ R.suppressDiagnostics();
+
+ // During a default argument instantiation the CurContext points
+ // to a CXXMethodDecl; but we can't apply a this-> fixit inside a
+ // function parameter list, hence add an explicit check.
+ bool isDefaultArgument = !ActiveTemplateInstantiations.empty() &&
+ ActiveTemplateInstantiations.back().Kind ==
+ ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation;
+ CXXMethodDecl *CurMethod = dyn_cast<CXXMethodDecl>(CurContext);
+ bool isInstance = CurMethod &&
+ CurMethod->isInstance() &&
+ DC == CurMethod->getParent() && !isDefaultArgument;
+
+
+ // Give a code modification hint to insert 'this->'.
+ // TODO: fixit for inserting 'Base<T>::' in the other cases.
+ // Actually quite difficult!
+ if (isInstance) {
+ UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(
+ CallsUndergoingInstantiation.back()->getCallee());
+ CXXMethodDecl *DepMethod = cast_or_null<CXXMethodDecl>(
+ CurMethod->getInstantiatedFromMemberFunction());
+ if (DepMethod) {
+ if (getLangOpts().MicrosoftMode)
+ diagnostic = diag::warn_found_via_dependent_bases_lookup;
+ Diag(R.getNameLoc(), diagnostic) << Name
+ << FixItHint::CreateInsertion(R.getNameLoc(), "this->");
+ QualType DepThisType = DepMethod->getThisType(Context);
+ CheckCXXThisCapture(R.getNameLoc());
+ CXXThisExpr *DepThis = new (Context) CXXThisExpr(
+ R.getNameLoc(), DepThisType, false);
+ TemplateArgumentListInfo TList;
+ if (ULE->hasExplicitTemplateArgs())
+ ULE->copyTemplateArgumentsInto(TList);
+
+ CXXScopeSpec SS;
+ SS.Adopt(ULE->getQualifierLoc());
+ CXXDependentScopeMemberExpr *DepExpr =
+ CXXDependentScopeMemberExpr::Create(
+ Context, DepThis, DepThisType, true, SourceLocation(),
+ SS.getWithLocInContext(Context),
+ ULE->getTemplateKeywordLoc(), 0,
+ R.getLookupNameInfo(),
+ ULE->hasExplicitTemplateArgs() ? &TList : 0);
+ CallsUndergoingInstantiation.back()->setCallee(DepExpr);
+ } else {
+ // FIXME: we should be able to handle this case too. It is correct
+ // to add this-> here. This is a workaround for PR7947.
+ Diag(R.getNameLoc(), diagnostic) << Name;
+ }
+ } else {
+ if (getLangOpts().MicrosoftMode)
+ diagnostic = diag::warn_found_via_dependent_bases_lookup;
+ Diag(R.getNameLoc(), diagnostic) << Name;
+ }
+
+ // Do we really want to note all of these?
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
+ Diag((*I)->getLocation(), diag::note_dependent_var_use);
+
+ // Return true if we are inside a default argument instantiation
+ // and the found name refers to an instance member function, otherwise
+ // the function calling DiagnoseEmptyLookup will try to create an
+ // implicit member call and this is wrong for default argument.
+ if (isDefaultArgument && ((*R.begin())->isCXXInstanceMember())) {
+ Diag(R.getNameLoc(), diag::err_member_call_without_object);
+ return true;
+ }
+
+ // Tell the callee to try to recover.
+ return false;
+ }
+
+ R.clear();
+ }
+
+ // In Microsoft mode, if we are performing lookup from within a friend
+ // function definition declared at class scope then we must set
+ // DC to the lexical parent to be able to search into the parent
+ // class.
+ if (getLangOpts().MicrosoftMode && isa<FunctionDecl>(DC) &&
+ cast<FunctionDecl>(DC)->getFriendObjectKind() &&
+ DC->getLexicalParent()->isRecord())
+ DC = DC->getLexicalParent();
+ else
+ DC = DC->getParent();
+ }
+
+ // We didn't find anything, so try to correct for a typo.
+ TypoCorrection Corrected;
+ if (S && (Corrected = CorrectTypo(R.getLookupNameInfo(), R.getLookupKind(),
+ S, &SS, CCC))) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
+ R.setLookupName(Corrected.getCorrection());
+
+ if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
+ if (Corrected.isOverloaded()) {
+ OverloadCandidateSet OCS(R.getNameLoc());
+ OverloadCandidateSet::iterator Best;
+ for (TypoCorrection::decl_iterator CD = Corrected.begin(),
+ CDEnd = Corrected.end();
+ CD != CDEnd; ++CD) {
+ if (FunctionTemplateDecl *FTD =
+ dyn_cast<FunctionTemplateDecl>(*CD))
+ AddTemplateOverloadCandidate(
+ FTD, DeclAccessPair::make(FTD, AS_none), ExplicitTemplateArgs,
+ Args, OCS);
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*CD))
+ if (!ExplicitTemplateArgs || ExplicitTemplateArgs->size() == 0)
+ AddOverloadCandidate(FD, DeclAccessPair::make(FD, AS_none),
+ Args, OCS);
+ }
+ switch (OCS.BestViableFunction(*this, R.getNameLoc(), Best)) {
+ case OR_Success:
+ ND = Best->Function;
+ break;
+ default:
+ break;
+ }
+ }
+ R.addDecl(ND);
+ if (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND)) {
+ if (SS.isEmpty())
+ Diag(R.getNameLoc(), diagnostic_suggest) << Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ else
+ Diag(R.getNameLoc(), diag::err_no_member_suggest)
+ << Name << computeDeclContext(SS, false) << CorrectedQuotedStr
+ << SS.getRange()
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ if (ND)
+ Diag(ND->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+
+ // Tell the callee to try to recover.
+ return false;
+ }
+
+ if (isa<TypeDecl>(ND) || isa<ObjCInterfaceDecl>(ND)) {
+ // FIXME: If we ended up with a typo for a type name or
+ // Objective-C class name, we're in trouble because the parser
+ // is in the wrong place to recover. Suggest the typo
+ // correction, but don't make it a fix-it since we're not going
+ // to recover well anyway.
+ if (SS.isEmpty())
+ Diag(R.getNameLoc(), diagnostic_suggest)
+ << Name << CorrectedQuotedStr;
+ else
+ Diag(R.getNameLoc(), diag::err_no_member_suggest)
+ << Name << computeDeclContext(SS, false) << CorrectedQuotedStr
+ << SS.getRange();
+
+ // Don't try to recover; it won't work.
+ return true;
+ }
+ } else {
+ // FIXME: We found a keyword. Suggest it, but don't provide a fix-it
+ // because we aren't able to recover.
+ if (SS.isEmpty())
+ Diag(R.getNameLoc(), diagnostic_suggest) << Name << CorrectedQuotedStr;
+ else
+ Diag(R.getNameLoc(), diag::err_no_member_suggest)
+ << Name << computeDeclContext(SS, false) << CorrectedQuotedStr
+ << SS.getRange();
+ return true;
+ }
+ }
+ R.clear();
+
+ // Emit a special diagnostic for failed member lookups.
+ // FIXME: computing the declaration context might fail here (?)
+ if (!SS.isEmpty()) {
+ Diag(R.getNameLoc(), diag::err_no_member)
+ << Name << computeDeclContext(SS, false)
+ << SS.getRange();
+ return true;
+ }
+
+ // Give up, we can't recover.
+ Diag(R.getNameLoc(), diagnostic) << Name;
+ return true;
+}
+
+ExprResult Sema::ActOnIdExpression(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Id,
+ bool HasTrailingLParen,
+ bool IsAddressOfOperand,
+ CorrectionCandidateCallback *CCC) {
+ assert(!(IsAddressOfOperand && HasTrailingLParen) &&
+ "cannot be direct & operand and have a trailing lparen");
+
+ if (SS.isInvalid())
+ return ExprError();
+
+ TemplateArgumentListInfo TemplateArgsBuffer;
+
+ // Decompose the UnqualifiedId into the following data.
+ DeclarationNameInfo NameInfo;
+ const TemplateArgumentListInfo *TemplateArgs;
+ DecomposeUnqualifiedId(Id, TemplateArgsBuffer, NameInfo, TemplateArgs);
+
+ DeclarationName Name = NameInfo.getName();
+ IdentifierInfo *II = Name.getAsIdentifierInfo();
+ SourceLocation NameLoc = NameInfo.getLoc();
+
+ // C++ [temp.dep.expr]p3:
+ // An id-expression is type-dependent if it contains:
+ // -- an identifier that was declared with a dependent type,
+ // (note: handled after lookup)
+ // -- a template-id that is dependent,
+ // (note: handled in BuildTemplateIdExpr)
+ // -- a conversion-function-id that specifies a dependent type,
+ // -- a nested-name-specifier that contains a class-name that
+ // names a dependent type.
+ // Determine whether this is a member of an unknown specialization;
+ // we need to handle these differently.
+ bool DependentID = false;
+ if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName &&
+ Name.getCXXNameType()->isDependentType()) {
+ DependentID = true;
+ } else if (SS.isSet()) {
+ if (DeclContext *DC = computeDeclContext(SS, false)) {
+ if (RequireCompleteDeclContext(SS, DC))
+ return ExprError();
+ } else {
+ DependentID = true;
+ }
+ }
+
+ if (DependentID)
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
+
+ // Perform the required lookup.
+ LookupResult R(*this, NameInfo,
+ (Id.getKind() == UnqualifiedId::IK_ImplicitSelfParam)
+ ? LookupObjCImplicitSelfParam : LookupOrdinaryName);
+ if (TemplateArgs) {
+ // Lookup the template name again to correctly establish the context in
+ // which it was found. This is really unfortunate as we already did the
+ // lookup to determine that it was a template name in the first place. If
+ // this becomes a performance hit, we can work harder to preserve those
+ // results until we get here but it's likely not worth it.
+ bool MemberOfUnknownSpecialization;
+ LookupTemplateName(R, S, SS, QualType(), /*EnteringContext=*/false,
+ MemberOfUnknownSpecialization);
+
+ if (MemberOfUnknownSpecialization ||
+ (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation))
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
+ } else {
+ bool IvarLookupFollowUp = II && !SS.isSet() && getCurMethodDecl();
+ LookupParsedName(R, S, &SS, !IvarLookupFollowUp);
+
+ // If the result might be in a dependent base class, this is a dependent
+ // id-expression.
+ if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
+
+ // If this reference is in an Objective-C method, then we need to do
+ // some special Objective-C lookup, too.
+ if (IvarLookupFollowUp) {
+ ExprResult E(LookupInObjCMethod(R, S, II, true));
+ if (E.isInvalid())
+ return ExprError();
+
+ if (Expr *Ex = E.takeAs<Expr>())
+ return Owned(Ex);
+ }
+ }
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ // Determine whether this name might be a candidate for
+ // argument-dependent lookup.
+ bool ADL = UseArgumentDependentLookup(SS, R, HasTrailingLParen);
+
+ if (R.empty() && !ADL) {
+ // Otherwise, this could be an implicitly declared function reference (legal
+ // in C90, extension in C99, forbidden in C++).
+ if (HasTrailingLParen && II && !getLangOpts().CPlusPlus) {
+ NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *II, S);
+ if (D) R.addDecl(D);
+ }
+
+ // If this name wasn't predeclared and if this is not a function
+ // call, diagnose the problem.
+ if (R.empty()) {
+
+ // In Microsoft mode, if we are inside a template class member function
+ // and we can't resolve an identifier then assume the identifier is type
+ // dependent. The goal is to postpone name lookup to instantiation time
+ // to be able to search into type dependent base classes.
+ if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() &&
+ isa<CXXMethodDecl>(CurContext))
+ return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
+ IsAddressOfOperand, TemplateArgs);
+
+ CorrectionCandidateCallback DefaultValidator;
+ if (DiagnoseEmptyLookup(S, SS, R, CCC ? *CCC : DefaultValidator))
+ return ExprError();
+
+ assert(!R.empty() &&
+ "DiagnoseEmptyLookup returned false but added no results");
+
+ // If we found an Objective-C instance variable, let
+ // LookupInObjCMethod build the appropriate expression to
+ // reference the ivar.
+ if (ObjCIvarDecl *Ivar = R.getAsSingle<ObjCIvarDecl>()) {
+ R.clear();
+ ExprResult E(LookupInObjCMethod(R, S, Ivar->getIdentifier()));
+ // In a hopelessly buggy code, Objective-C instance variable
+ // lookup fails and no expression will be built to reference it.
+ if (!E.isInvalid() && !E.get())
+ return ExprError();
+ return move(E);
+ }
+ }
+ }
+
+ // This is guaranteed from this point on.
+ assert(!R.empty() || ADL);
+
+ // Check whether this might be a C++ implicit instance member access.
+ // C++ [class.mfct.non-static]p3:
+ // When an id-expression that is not part of a class member access
+ // syntax and not used to form a pointer to member is used in the
+ // body of a non-static member function of class X, if name lookup
+ // resolves the name in the id-expression to a non-static non-type
+ // member of some class C, the id-expression is transformed into a
+ // class member access expression using (*this) as the
+ // postfix-expression to the left of the . operator.
+ //
+ // But we don't actually need to do this for '&' operands if R
+ // resolved to a function or overloaded function set, because the
+ // expression is ill-formed if it actually works out to be a
+ // non-static member function:
+ //
+ // C++ [expr.ref]p4:
+ // Otherwise, if E1.E2 refers to a non-static member function. . .
+ // [t]he expression can be used only as the left-hand operand of a
+ // member function call.
+ //
+ // There are other safeguards against such uses, but it's important
+ // to get this right here so that we don't end up making a
+ // spuriously dependent expression if we're inside a dependent
+ // instance method.
+ if (!R.empty() && (*R.begin())->isCXXClassMember()) {
+ bool MightBeImplicitMember;
+ if (!IsAddressOfOperand)
+ MightBeImplicitMember = true;
+ else if (!SS.isEmpty())
+ MightBeImplicitMember = false;
+ else if (R.isOverloadedResult())
+ MightBeImplicitMember = false;
+ else if (R.isUnresolvableResult())
+ MightBeImplicitMember = true;
+ else
+ MightBeImplicitMember = isa<FieldDecl>(R.getFoundDecl()) ||
+ isa<IndirectFieldDecl>(R.getFoundDecl());
+
+ if (MightBeImplicitMember)
+ return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc,
+ R, TemplateArgs);
+ }
+
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, ADL, TemplateArgs);
+
+ return BuildDeclarationNameExpr(SS, R, ADL);
+}
+
+/// BuildQualifiedDeclarationNameExpr - Build a C++ qualified
+/// declaration name, generally during template instantiation.
+/// There's a large number of things which don't need to be done along
+/// this path.
+ExprResult
+Sema::BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo) {
+ DeclContext *DC;
+ if (!(DC = computeDeclContext(SS, false)) || DC->isDependentContext())
+ return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
+ NameInfo, /*TemplateArgs=*/0);
+
+ if (RequireCompleteDeclContext(SS, DC))
+ return ExprError();
+
+ LookupResult R(*this, NameInfo, LookupOrdinaryName);
+ LookupQualifiedName(R, DC);
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ if (R.empty()) {
+ Diag(NameInfo.getLoc(), diag::err_no_member)
+ << NameInfo.getName() << DC << SS.getRange();
+ return ExprError();
+ }
+
+ return BuildDeclarationNameExpr(SS, R, /*ADL*/ false);
+}
+
+/// LookupInObjCMethod - The parser has read a name in, and Sema has
+/// detected that we're currently inside an ObjC method. Perform some
+/// additional lookup.
+///
+/// Ideally, most of this would be done by lookup, but there's
+/// actually quite a lot of extra work involved.
+///
+/// Returns a null sentinel to indicate trivial success.
+ExprResult
+Sema::LookupInObjCMethod(LookupResult &Lookup, Scope *S,
+ IdentifierInfo *II, bool AllowBuiltinCreation) {
+ SourceLocation Loc = Lookup.getNameLoc();
+ ObjCMethodDecl *CurMethod = getCurMethodDecl();
+
+ // There are two cases to handle here. 1) scoped lookup could have failed,
+ // in which case we should look for an ivar. 2) scoped lookup could have
+ // found a decl, but that decl is outside the current instance method (i.e.
+ // a global variable). In these two cases, we do a lookup for an ivar with
+ // this name, if the lookup sucedes, we replace it our current decl.
+
+ // If we're in a class method, we don't normally want to look for
+ // ivars. But if we don't find anything else, and there's an
+ // ivar, that's an error.
+ bool IsClassMethod = CurMethod->isClassMethod();
+
+ bool LookForIvars;
+ if (Lookup.empty())
+ LookForIvars = true;
+ else if (IsClassMethod)
+ LookForIvars = false;
+ else
+ LookForIvars = (Lookup.isSingleResult() &&
+ Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod());
+ ObjCInterfaceDecl *IFace = 0;
+ if (LookForIvars) {
+ IFace = CurMethod->getClassInterface();
+ ObjCInterfaceDecl *ClassDeclared;
+ ObjCIvarDecl *IV = 0;
+ if (IFace && (IV = IFace->lookupInstanceVariable(II, ClassDeclared))) {
+ // Diagnose using an ivar in a class method.
+ if (IsClassMethod)
+ return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
+ << IV->getDeclName());
+
+ // If we're referencing an invalid decl, just return this as a silent
+ // error node. The error diagnostic was already emitted on the decl.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ // Check if referencing a field with __attribute__((deprecated)).
+ if (DiagnoseUseOfDecl(IV, Loc))
+ return ExprError();
+
+ // Diagnose the use of an ivar outside of the declaring class.
+ if (IV->getAccessControl() == ObjCIvarDecl::Private &&
+ !declaresSameEntity(ClassDeclared, IFace) &&
+ !getLangOpts().DebuggerSupport)
+ Diag(Loc, diag::error_private_ivar_access) << IV->getDeclName();
+
+ // FIXME: This should use a new expr for a direct reference, don't
+ // turn this into Self->ivar, just return a BareIVarExpr or something.
+ IdentifierInfo &II = Context.Idents.get("self");
+ UnqualifiedId SelfName;
+ SelfName.setIdentifier(&II, SourceLocation());
+ SelfName.setKind(UnqualifiedId::IK_ImplicitSelfParam);
+ CXXScopeSpec SelfScopeSpec;
+ SourceLocation TemplateKWLoc;
+ ExprResult SelfExpr = ActOnIdExpression(S, SelfScopeSpec, TemplateKWLoc,
+ SelfName, false, false);
+ if (SelfExpr.isInvalid())
+ return ExprError();
+
+ SelfExpr = DefaultLvalueConversion(SelfExpr.take());
+ if (SelfExpr.isInvalid())
+ return ExprError();
+
+ MarkAnyDeclReferenced(Loc, IV);
+ return Owned(new (Context)
+ ObjCIvarRefExpr(IV, IV->getType(), Loc,
+ SelfExpr.take(), true, true));
+ }
+ } else if (CurMethod->isInstanceMethod()) {
+ // We should warn if a local variable hides an ivar.
+ if (ObjCInterfaceDecl *IFace = CurMethod->getClassInterface()) {
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *IV = IFace->lookupInstanceVariable(II, ClassDeclared)) {
+ if (IV->getAccessControl() != ObjCIvarDecl::Private ||
+ declaresSameEntity(IFace, ClassDeclared))
+ Diag(Loc, diag::warn_ivar_use_hidden) << IV->getDeclName();
+ }
+ }
+ } else if (Lookup.isSingleResult() &&
+ Lookup.getFoundDecl()->isDefinedOutsideFunctionOrMethod()) {
+ // If accessing a stand-alone ivar in a class method, this is an error.
+ if (const ObjCIvarDecl *IV = dyn_cast<ObjCIvarDecl>(Lookup.getFoundDecl()))
+ return ExprError(Diag(Loc, diag::error_ivar_use_in_class_method)
+ << IV->getDeclName());
+ }
+
+ if (Lookup.empty() && II && AllowBuiltinCreation) {
+ // FIXME. Consolidate this with similar code in LookupName.
+ if (unsigned BuiltinID = II->getBuiltinID()) {
+ if (!(getLangOpts().CPlusPlus &&
+ Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))) {
+ NamedDecl *D = LazilyCreateBuiltin((IdentifierInfo *)II, BuiltinID,
+ S, Lookup.isForRedeclaration(),
+ Lookup.getNameLoc());
+ if (D) Lookup.addDecl(D);
+ }
+ }
+ }
+ // Sentinel value saying that we didn't do anything special.
+ return Owned((Expr*) 0);
+}
+
+/// \brief Cast a base object to a member's actual type.
+///
+/// Logically this happens in three phases:
+///
+/// * First we cast from the base type to the naming class.
+/// The naming class is the class into which we were looking
+/// when we found the member; it's the qualifier type if a
+/// qualifier was provided, and otherwise it's the base type.
+///
+/// * Next we cast from the naming class to the declaring class.
+/// If the member we found was brought into a class's scope by
+/// a using declaration, this is that class; otherwise it's
+/// the class declaring the member.
+///
+/// * Finally we cast from the declaring class to the "true"
+/// declaring class of the member. This conversion does not
+/// obey access control.
+ExprResult
+Sema::PerformObjectMemberConversion(Expr *From,
+ NestedNameSpecifier *Qualifier,
+ NamedDecl *FoundDecl,
+ NamedDecl *Member) {
+ CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(Member->getDeclContext());
+ if (!RD)
+ return Owned(From);
+
+ QualType DestRecordType;
+ QualType DestType;
+ QualType FromRecordType;
+ QualType FromType = From->getType();
+ bool PointerConversions = false;
+ if (isa<FieldDecl>(Member)) {
+ DestRecordType = Context.getCanonicalType(Context.getTypeDeclType(RD));
+
+ if (FromType->getAs<PointerType>()) {
+ DestType = Context.getPointerType(DestRecordType);
+ FromRecordType = FromType->getPointeeType();
+ PointerConversions = true;
+ } else {
+ DestType = DestRecordType;
+ FromRecordType = FromType;
+ }
+ } else if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member)) {
+ if (Method->isStatic())
+ return Owned(From);
+
+ DestType = Method->getThisType(Context);
+ DestRecordType = DestType->getPointeeType();
+
+ if (FromType->getAs<PointerType>()) {
+ FromRecordType = FromType->getPointeeType();
+ PointerConversions = true;
+ } else {
+ FromRecordType = FromType;
+ DestType = DestRecordType;
+ }
+ } else {
+ // No conversion necessary.
+ return Owned(From);
+ }
+
+ if (DestType->isDependentType() || FromType->isDependentType())
+ return Owned(From);
+
+ // If the unqualified types are the same, no conversion is necessary.
+ if (Context.hasSameUnqualifiedType(FromRecordType, DestRecordType))
+ return Owned(From);
+
+ SourceRange FromRange = From->getSourceRange();
+ SourceLocation FromLoc = FromRange.getBegin();
+
+ ExprValueKind VK = From->getValueKind();
+
+ // C++ [class.member.lookup]p8:
+ // [...] Ambiguities can often be resolved by qualifying a name with its
+ // class name.
+ //
+ // If the member was a qualified name and the qualified referred to a
+ // specific base subobject type, we'll cast to that intermediate type
+ // first and then to the object in which the member is declared. That allows
+ // one to resolve ambiguities in, e.g., a diamond-shaped hierarchy such as:
+ //
+ // class Base { public: int x; };
+ // class Derived1 : public Base { };
+ // class Derived2 : public Base { };
+ // class VeryDerived : public Derived1, public Derived2 { void f(); };
+ //
+ // void VeryDerived::f() {
+ // x = 17; // error: ambiguous base subobjects
+ // Derived1::x = 17; // okay, pick the Base subobject of Derived1
+ // }
+ if (Qualifier) {
+ QualType QType = QualType(Qualifier->getAsType(), 0);
+ assert(!QType.isNull() && "lookup done with dependent qualifier?");
+ assert(QType->isRecordType() && "lookup done with non-record type");
+
+ QualType QRecordType = QualType(QType->getAs<RecordType>(), 0);
+
+ // In C++98, the qualifier type doesn't actually have to be a base
+ // type of the object type, in which case we just ignore it.
+ // Otherwise build the appropriate casts.
+ if (IsDerivedFrom(FromRecordType, QRecordType)) {
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(FromRecordType, QRecordType,
+ FromLoc, FromRange, &BasePath))
+ return ExprError();
+
+ if (PointerConversions)
+ QType = Context.getPointerType(QType);
+ From = ImpCastExprToType(From, QType, CK_UncheckedDerivedToBase,
+ VK, &BasePath).take();
+
+ FromType = QType;
+ FromRecordType = QRecordType;
+
+ // If the qualifier type was the same as the destination type,
+ // we're done.
+ if (Context.hasSameUnqualifiedType(FromRecordType, DestRecordType))
+ return Owned(From);
+ }
+ }
+
+ bool IgnoreAccess = false;
+
+ // If we actually found the member through a using declaration, cast
+ // down to the using declaration's type.
+ //
+ // Pointer equality is fine here because only one declaration of a
+ // class ever has member declarations.
+ if (FoundDecl->getDeclContext() != Member->getDeclContext()) {
+ assert(isa<UsingShadowDecl>(FoundDecl));
+ QualType URecordType = Context.getTypeDeclType(
+ cast<CXXRecordDecl>(FoundDecl->getDeclContext()));
+
+ // We only need to do this if the naming-class to declaring-class
+ // conversion is non-trivial.
+ if (!Context.hasSameUnqualifiedType(FromRecordType, URecordType)) {
+ assert(IsDerivedFrom(FromRecordType, URecordType));
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(FromRecordType, URecordType,
+ FromLoc, FromRange, &BasePath))
+ return ExprError();
+
+ QualType UType = URecordType;
+ if (PointerConversions)
+ UType = Context.getPointerType(UType);
+ From = ImpCastExprToType(From, UType, CK_UncheckedDerivedToBase,
+ VK, &BasePath).take();
+ FromType = UType;
+ FromRecordType = URecordType;
+ }
+
+ // We don't do access control for the conversion from the
+ // declaring class to the true declaring class.
+ IgnoreAccess = true;
+ }
+
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(FromRecordType, DestRecordType,
+ FromLoc, FromRange, &BasePath,
+ IgnoreAccess))
+ return ExprError();
+
+ return ImpCastExprToType(From, DestType, CK_UncheckedDerivedToBase,
+ VK, &BasePath);
+}
+
+bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
+ const LookupResult &R,
+ bool HasTrailingLParen) {
+ // Only when used directly as the postfix-expression of a call.
+ if (!HasTrailingLParen)
+ return false;
+
+ // Never if a scope specifier was provided.
+ if (SS.isSet())
+ return false;
+
+ // Only in C++ or ObjC++.
+ if (!getLangOpts().CPlusPlus)
+ return false;
+
+ // Turn off ADL when we find certain kinds of declarations during
+ // normal lookup:
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+
+ // C++0x [basic.lookup.argdep]p3:
+ // -- a declaration of a class member
+ // Since using decls preserve this property, we check this on the
+ // original decl.
+ if (D->isCXXClassMember())
+ return false;
+
+ // C++0x [basic.lookup.argdep]p3:
+ // -- a block-scope function declaration that is not a
+ // using-declaration
+ // NOTE: we also trigger this for function templates (in fact, we
+ // don't check the decl type at all, since all other decl types
+ // turn off ADL anyway).
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+ else if (D->getDeclContext()->isFunctionOrMethod())
+ return false;
+
+ // C++0x [basic.lookup.argdep]p3:
+ // -- a declaration that is neither a function or a function
+ // template
+ // And also for builtin functions.
+ if (isa<FunctionDecl>(D)) {
+ FunctionDecl *FDecl = cast<FunctionDecl>(D);
+
+ // But also builtin functions.
+ if (FDecl->getBuiltinID() && FDecl->isImplicit())
+ return false;
+ } else if (!isa<FunctionTemplateDecl>(D))
+ return false;
+ }
+
+ return true;
+}
+
+
+/// Diagnoses obvious problems with the use of the given declaration
+/// as an expression. This is only actually called for lookups that
+/// were not overloaded, and it doesn't promise that the declaration
+/// will in fact be used.
+static bool CheckDeclInExpr(Sema &S, SourceLocation Loc, NamedDecl *D) {
+ if (isa<TypedefNameDecl>(D)) {
+ S.Diag(Loc, diag::err_unexpected_typedef) << D->getDeclName();
+ return true;
+ }
+
+ if (isa<ObjCInterfaceDecl>(D)) {
+ S.Diag(Loc, diag::err_unexpected_interface) << D->getDeclName();
+ return true;
+ }
+
+ if (isa<NamespaceDecl>(D)) {
+ S.Diag(Loc, diag::err_unexpected_namespace) << D->getDeclName();
+ return true;
+ }
+
+ return false;
+}
+
+ExprResult
+Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
+ LookupResult &R,
+ bool NeedsADL) {
+ // If this is a single, fully-resolved result and we don't need ADL,
+ // just build an ordinary singleton decl ref.
+ if (!NeedsADL && R.isSingleResult() && !R.getAsSingle<FunctionTemplateDecl>())
+ return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(),
+ R.getFoundDecl());
+
+ // We only need to check the declaration if there's exactly one
+ // result, because in the overloaded case the results can only be
+ // functions and function templates.
+ if (R.isSingleResult() &&
+ CheckDeclInExpr(*this, R.getNameLoc(), R.getFoundDecl()))
+ return ExprError();
+
+ // Otherwise, just build an unresolved lookup expression. Suppress
+ // any lookup-related diagnostics; we'll hash these out later, when
+ // we've picked a target.
+ R.suppressDiagnostics();
+
+ UnresolvedLookupExpr *ULE
+ = UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
+ SS.getWithLocInContext(Context),
+ R.getLookupNameInfo(),
+ NeedsADL, R.isOverloadedResult(),
+ R.begin(), R.end());
+
+ return Owned(ULE);
+}
+
+/// \brief Complete semantic analysis for a reference to the given declaration.
+ExprResult
+Sema::BuildDeclarationNameExpr(const CXXScopeSpec &SS,
+ const DeclarationNameInfo &NameInfo,
+ NamedDecl *D) {
+ assert(D && "Cannot refer to a NULL declaration");
+ assert(!isa<FunctionTemplateDecl>(D) &&
+ "Cannot refer unambiguously to a function template");
+
+ SourceLocation Loc = NameInfo.getLoc();
+ if (CheckDeclInExpr(*this, Loc, D))
+ return ExprError();
+
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) {
+ // Specifically diagnose references to class templates that are missing
+ // a template argument list.
+ Diag(Loc, diag::err_template_decl_ref)
+ << Template << SS.getRange();
+ Diag(Template->getLocation(), diag::note_template_decl_here);
+ return ExprError();
+ }
+
+ // Make sure that we're referring to a value.
+ ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (!VD) {
+ Diag(Loc, diag::err_ref_non_value)
+ << D << SS.getRange();
+ Diag(D->getLocation(), diag::note_declared_at);
+ return ExprError();
+ }
+
+ // Check whether this declaration can be used. Note that we suppress
+ // this check when we're going to perform argument-dependent lookup
+ // on this function name, because this might not be the function
+ // that overload resolution actually selects.
+ if (DiagnoseUseOfDecl(VD, Loc))
+ return ExprError();
+
+ // Only create DeclRefExpr's for valid Decl's.
+ if (VD->isInvalidDecl())
+ return ExprError();
+
+ // Handle members of anonymous structs and unions. If we got here,
+ // and the reference is to a class member indirect field, then this
+ // must be the subject of a pointer-to-member expression.
+ if (IndirectFieldDecl *indirectField = dyn_cast<IndirectFieldDecl>(VD))
+ if (!indirectField->isCXXClassMember())
+ return BuildAnonymousStructUnionMemberReference(SS, NameInfo.getLoc(),
+ indirectField);
+
+ {
+ QualType type = VD->getType();
+ ExprValueKind valueKind = VK_RValue;
+
+ switch (D->getKind()) {
+ // Ignore all the non-ValueDecl kinds.
+#define ABSTRACT_DECL(kind)
+#define VALUE(type, base)
+#define DECL(type, base) \
+ case Decl::type:
+#include "clang/AST/DeclNodes.inc"
+ llvm_unreachable("invalid value decl kind");
+
+ // These shouldn't make it here.
+ case Decl::ObjCAtDefsField:
+ case Decl::ObjCIvar:
+ llvm_unreachable("forming non-member reference to ivar?");
+
+ // Enum constants are always r-values and never references.
+ // Unresolved using declarations are dependent.
+ case Decl::EnumConstant:
+ case Decl::UnresolvedUsingValue:
+ valueKind = VK_RValue;
+ break;
+
+ // Fields and indirect fields that got here must be for
+ // pointer-to-member expressions; we just call them l-values for
+ // internal consistency, because this subexpression doesn't really
+ // exist in the high-level semantics.
+ case Decl::Field:
+ case Decl::IndirectField:
+ assert(getLangOpts().CPlusPlus &&
+ "building reference to field in C?");
+
+ // These can't have reference type in well-formed programs, but
+ // for internal consistency we do this anyway.
+ type = type.getNonReferenceType();
+ valueKind = VK_LValue;
+ break;
+
+ // Non-type template parameters are either l-values or r-values
+ // depending on the type.
+ case Decl::NonTypeTemplateParm: {
+ if (const ReferenceType *reftype = type->getAs<ReferenceType>()) {
+ type = reftype->getPointeeType();
+ valueKind = VK_LValue; // even if the parameter is an r-value reference
+ break;
+ }
+
+ // For non-references, we need to strip qualifiers just in case
+ // the template parameter was declared as 'const int' or whatever.
+ valueKind = VK_RValue;
+ type = type.getUnqualifiedType();
+ break;
+ }
+
+ case Decl::Var:
+ // In C, "extern void blah;" is valid and is an r-value.
+ if (!getLangOpts().CPlusPlus &&
+ !type.hasQualifiers() &&
+ type->isVoidType()) {
+ valueKind = VK_RValue;
+ break;
+ }
+ // fallthrough
+
+ case Decl::ImplicitParam:
+ case Decl::ParmVar: {
+ // These are always l-values.
+ valueKind = VK_LValue;
+ type = type.getNonReferenceType();
+
+ // FIXME: Does the addition of const really only apply in
+ // potentially-evaluated contexts? Since the variable isn't actually
+ // captured in an unevaluated context, it seems that the answer is no.
+ if (ExprEvalContexts.back().Context != Sema::Unevaluated) {
+ QualType CapturedType = getCapturedDeclRefType(cast<VarDecl>(VD), Loc);
+ if (!CapturedType.isNull())
+ type = CapturedType;
+ }
+
+ break;
+ }
+
+ case Decl::Function: {
+ const FunctionType *fty = type->castAs<FunctionType>();
+
+ // If we're referring to a function with an __unknown_anytype
+ // result type, make the entire expression __unknown_anytype.
+ if (fty->getResultType() == Context.UnknownAnyTy) {
+ type = Context.UnknownAnyTy;
+ valueKind = VK_RValue;
+ break;
+ }
+
+ // Functions are l-values in C++.
+ if (getLangOpts().CPlusPlus) {
+ valueKind = VK_LValue;
+ break;
+ }
+
+ // C99 DR 316 says that, if a function type comes from a
+ // function definition (without a prototype), that type is only
+ // used for checking compatibility. Therefore, when referencing
+ // the function, we pretend that we don't have the full function
+ // type.
+ if (!cast<FunctionDecl>(VD)->hasPrototype() &&
+ isa<FunctionProtoType>(fty))
+ type = Context.getFunctionNoProtoType(fty->getResultType(),
+ fty->getExtInfo());
+
+ // Functions are r-values in C.
+ valueKind = VK_RValue;
+ break;
+ }
+
+ case Decl::CXXMethod:
+ // If we're referring to a method with an __unknown_anytype
+ // result type, make the entire expression __unknown_anytype.
+ // This should only be possible with a type written directly.
+ if (const FunctionProtoType *proto
+ = dyn_cast<FunctionProtoType>(VD->getType()))
+ if (proto->getResultType() == Context.UnknownAnyTy) {
+ type = Context.UnknownAnyTy;
+ valueKind = VK_RValue;
+ break;
+ }
+
+ // C++ methods are l-values if static, r-values if non-static.
+ if (cast<CXXMethodDecl>(VD)->isStatic()) {
+ valueKind = VK_LValue;
+ break;
+ }
+ // fallthrough
+
+ case Decl::CXXConversion:
+ case Decl::CXXDestructor:
+ case Decl::CXXConstructor:
+ valueKind = VK_RValue;
+ break;
+ }
+
+ return BuildDeclRefExpr(VD, type, valueKind, NameInfo, &SS);
+ }
+}
+
+ExprResult Sema::ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind) {
+ PredefinedExpr::IdentType IT;
+
+ switch (Kind) {
+ default: llvm_unreachable("Unknown simple primary expr!");
+ case tok::kw___func__: IT = PredefinedExpr::Func; break; // [C99 6.4.2.2]
+ case tok::kw___FUNCTION__: IT = PredefinedExpr::Function; break;
+ case tok::kw___PRETTY_FUNCTION__: IT = PredefinedExpr::PrettyFunction; break;
+ }
+
+ // Pre-defined identifiers are of type char[x], where x is the length of the
+ // string.
+
+ Decl *currentDecl = getCurFunctionOrMethodDecl();
+ if (!currentDecl && getCurBlock())
+ currentDecl = getCurBlock()->TheDecl;
+ if (!currentDecl) {
+ Diag(Loc, diag::ext_predef_outside_function);
+ currentDecl = Context.getTranslationUnitDecl();
+ }
+
+ QualType ResTy;
+ if (cast<DeclContext>(currentDecl)->isDependentContext()) {
+ ResTy = Context.DependentTy;
+ } else {
+ unsigned Length = PredefinedExpr::ComputeName(IT, currentDecl).length();
+
+ llvm::APInt LengthI(32, Length + 1);
+ ResTy = Context.CharTy.withConst();
+ ResTy = Context.getConstantArrayType(ResTy, LengthI, ArrayType::Normal, 0);
+ }
+ return Owned(new (Context) PredefinedExpr(Loc, ResTy, IT));
+}
+
+ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
+ SmallString<16> CharBuffer;
+ bool Invalid = false;
+ StringRef ThisTok = PP.getSpelling(Tok, CharBuffer, &Invalid);
+ if (Invalid)
+ return ExprError();
+
+ CharLiteralParser Literal(ThisTok.begin(), ThisTok.end(), Tok.getLocation(),
+ PP, Tok.getKind());
+ if (Literal.hadError())
+ return ExprError();
+
+ QualType Ty;
+ if (Literal.isWide())
+ Ty = Context.WCharTy; // L'x' -> wchar_t in C and C++.
+ else if (Literal.isUTF16())
+ Ty = Context.Char16Ty; // u'x' -> char16_t in C11 and C++11.
+ else if (Literal.isUTF32())
+ Ty = Context.Char32Ty; // U'x' -> char32_t in C11 and C++11.
+ else if (!getLangOpts().CPlusPlus || Literal.isMultiChar())
+ Ty = Context.IntTy; // 'x' -> int in C, 'wxyz' -> int in C++.
+ else
+ Ty = Context.CharTy; // 'x' -> char in C++
+
+ CharacterLiteral::CharacterKind Kind = CharacterLiteral::Ascii;
+ if (Literal.isWide())
+ Kind = CharacterLiteral::Wide;
+ else if (Literal.isUTF16())
+ Kind = CharacterLiteral::UTF16;
+ else if (Literal.isUTF32())
+ Kind = CharacterLiteral::UTF32;
+
+ Expr *Lit = new (Context) CharacterLiteral(Literal.getValue(), Kind, Ty,
+ Tok.getLocation());
+
+ if (Literal.getUDSuffix().empty())
+ return Owned(Lit);
+
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_character_udl));
+
+ // C++11 [lex.ext]p6: The literal L is treated as a call of the form
+ // operator "" X (ch)
+ return BuildCookedLiteralOperatorCall(*this, UDLScope, UDSuffix, UDSuffixLoc,
+ llvm::makeArrayRef(&Lit, 1),
+ Tok.getLocation());
+}
+
+ExprResult Sema::ActOnIntegerConstant(SourceLocation Loc, uint64_t Val) {
+ unsigned IntSize = Context.getTargetInfo().getIntWidth();
+ return Owned(IntegerLiteral::Create(Context, llvm::APInt(IntSize, Val),
+ Context.IntTy, Loc));
+}
+
+static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
+ QualType Ty, SourceLocation Loc) {
+ const llvm::fltSemantics &Format = S.Context.getFloatTypeSemantics(Ty);
+
+ using llvm::APFloat;
+ APFloat Val(Format);
+
+ APFloat::opStatus result = Literal.GetFloatValue(Val);
+
+ // Overflow is always an error, but underflow is only an error if
+ // we underflowed to zero (APFloat reports denormals as underflow).
+ if ((result & APFloat::opOverflow) ||
+ ((result & APFloat::opUnderflow) && Val.isZero())) {
+ unsigned diagnostic;
+ SmallString<20> buffer;
+ if (result & APFloat::opOverflow) {
+ diagnostic = diag::warn_float_overflow;
+ APFloat::getLargest(Format).toString(buffer);
+ } else {
+ diagnostic = diag::warn_float_underflow;
+ APFloat::getSmallest(Format).toString(buffer);
+ }
+
+ S.Diag(Loc, diagnostic)
+ << Ty
+ << StringRef(buffer.data(), buffer.size());
+ }
+
+ bool isExact = (result == APFloat::opOK);
+ return FloatingLiteral::Create(S.Context, Val, isExact, Ty, Loc);
+}
+
+ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
+ // Fast path for a single digit (which is quite common). A single digit
+ // cannot have a trigraph, escaped newline, radix prefix, or suffix.
+ if (Tok.getLength() == 1) {
+ const char Val = PP.getSpellingOfSingleCharacterNumericConstant(Tok);
+ return ActOnIntegerConstant(Tok.getLocation(), Val-'0');
+ }
+
+ SmallString<512> IntegerBuffer;
+ // Add padding so that NumericLiteralParser can overread by one character.
+ IntegerBuffer.resize(Tok.getLength()+1);
+ const char *ThisTokBegin = &IntegerBuffer[0];
+
+ // Get the spelling of the token, which eliminates trigraphs, etc.
+ bool Invalid = false;
+ unsigned ActualLength = PP.getSpelling(Tok, ThisTokBegin, &Invalid);
+ if (Invalid)
+ return ExprError();
+
+ NumericLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength,
+ Tok.getLocation(), PP);
+ if (Literal.hadError)
+ return ExprError();
+
+ if (Literal.hasUDSuffix()) {
+ // We're building a user-defined literal.
+ IdentifierInfo *UDSuffix = &Context.Idents.get(Literal.getUDSuffix());
+ SourceLocation UDSuffixLoc =
+ getUDSuffixLoc(*this, Tok.getLocation(), Literal.getUDSuffixOffset());
+
+ // Make sure we're allowed user-defined literals here.
+ if (!UDLScope)
+ return ExprError(Diag(UDSuffixLoc, diag::err_invalid_numeric_udl));
+
+ QualType CookedTy;
+ if (Literal.isFloatingLiteral()) {
+ // C++11 [lex.ext]p4: If S contains a literal operator with parameter type
+ // long double, the literal is treated as a call of the form
+ // operator "" X (f L)
+ CookedTy = Context.LongDoubleTy;
+ } else {
+ // C++11 [lex.ext]p3: If S contains a literal operator with parameter type
+ // unsigned long long, the literal is treated as a call of the form
+ // operator "" X (n ULL)
+ CookedTy = Context.UnsignedLongLongTy;
+ }
+
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXLiteralOperatorName(UDSuffix);
+ DeclarationNameInfo OpNameInfo(OpName, UDSuffixLoc);
+ OpNameInfo.setCXXLiteralOperatorNameLoc(UDSuffixLoc);
+
+ // Perform literal operator lookup to determine if we're building a raw
+ // literal or a cooked one.
+ LookupResult R(*this, OpName, UDSuffixLoc, LookupOrdinaryName);
+ switch (LookupLiteralOperator(UDLScope, R, llvm::makeArrayRef(&CookedTy, 1),
+ /*AllowRawAndTemplate*/true)) {
+ case LOLR_Error:
+ return ExprError();
+
+ case LOLR_Cooked: {
+ Expr *Lit;
+ if (Literal.isFloatingLiteral()) {
+ Lit = BuildFloatingLiteral(*this, Literal, CookedTy, Tok.getLocation());
+ } else {
+ llvm::APInt ResultVal(Context.getTargetInfo().getLongLongWidth(), 0);
+ if (Literal.GetIntegerValue(ResultVal))
+ Diag(Tok.getLocation(), diag::warn_integer_too_large);
+ Lit = IntegerLiteral::Create(Context, ResultVal, CookedTy,
+ Tok.getLocation());
+ }
+ return BuildLiteralOperatorCall(R, OpNameInfo,
+ llvm::makeArrayRef(&Lit, 1),
+ Tok.getLocation());
+ }
+
+ case LOLR_Raw: {
+ // C++11 [lit.ext]p3, p4: If S contains a raw literal operator, the
+ // literal is treated as a call of the form
+ // operator "" X ("n")
+ SourceLocation TokLoc = Tok.getLocation();
+ unsigned Length = Literal.getUDSuffixOffset();
+ QualType StrTy = Context.getConstantArrayType(
+ Context.CharTy, llvm::APInt(32, Length + 1),
+ ArrayType::Normal, 0);
+ Expr *Lit = StringLiteral::Create(
+ Context, StringRef(ThisTokBegin, Length), StringLiteral::Ascii,
+ /*Pascal*/false, StrTy, &TokLoc, 1);
+ return BuildLiteralOperatorCall(R, OpNameInfo,
+ llvm::makeArrayRef(&Lit, 1), TokLoc);
+ }
+
+ case LOLR_Template:
+ // C++11 [lit.ext]p3, p4: Otherwise (S contains a literal operator
+ // template), L is treated as a call fo the form
+ // operator "" X <'c1', 'c2', ... 'ck'>()
+ // where n is the source character sequence c1 c2 ... ck.
+ TemplateArgumentListInfo ExplicitArgs;
+ unsigned CharBits = Context.getIntWidth(Context.CharTy);
+ bool CharIsUnsigned = Context.CharTy->isUnsignedIntegerType();
+ llvm::APSInt Value(CharBits, CharIsUnsigned);
+ for (unsigned I = 0, N = Literal.getUDSuffixOffset(); I != N; ++I) {
+ Value = ThisTokBegin[I];
+ TemplateArgument Arg(Value, Context.CharTy);
+ TemplateArgumentLocInfo ArgInfo;
+ ExplicitArgs.addArgument(TemplateArgumentLoc(Arg, ArgInfo));
+ }
+ return BuildLiteralOperatorCall(R, OpNameInfo, ArrayRef<Expr*>(),
+ Tok.getLocation(), &ExplicitArgs);
+ }
+
+ llvm_unreachable("unexpected literal operator lookup result");
+ }
+
+ Expr *Res;
+
+ if (Literal.isFloatingLiteral()) {
+ QualType Ty;
+ if (Literal.isFloat)
+ Ty = Context.FloatTy;
+ else if (!Literal.isLong)
+ Ty = Context.DoubleTy;
+ else
+ Ty = Context.LongDoubleTy;
+
+ Res = BuildFloatingLiteral(*this, Literal, Ty, Tok.getLocation());
+
+ if (Ty == Context.DoubleTy) {
+ if (getLangOpts().SinglePrecisionConstants) {
+ Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).take();
+ } else if (getLangOpts().OpenCL && !getOpenCLOptions().cl_khr_fp64) {
+ Diag(Tok.getLocation(), diag::warn_double_const_requires_fp64);
+ Res = ImpCastExprToType(Res, Context.FloatTy, CK_FloatingCast).take();
+ }
+ }
+ } else if (!Literal.isIntegerLiteral()) {
+ return ExprError();
+ } else {
+ QualType Ty;
+
+ // long long is a C99 feature.
+ if (!getLangOpts().C99 && Literal.isLongLong)
+ Diag(Tok.getLocation(),
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_longlong);
+
+ // Get the value in the widest-possible width.
+ llvm::APInt ResultVal(Context.getTargetInfo().getIntMaxTWidth(), 0);
+
+ if (Literal.GetIntegerValue(ResultVal)) {
+ // If this value didn't fit into uintmax_t, warn and force to ull.
+ Diag(Tok.getLocation(), diag::warn_integer_too_large);
+ Ty = Context.UnsignedLongLongTy;
+ assert(Context.getTypeSize(Ty) == ResultVal.getBitWidth() &&
+ "long long is not intmax_t?");
+ } else {
+ // If this value fits into a ULL, try to figure out what else it fits into
+ // according to the rules of C99 6.4.4.1p5.
+
+ // Octal, Hexadecimal, and integers with a U suffix are allowed to
+ // be an unsigned int.
+ bool AllowUnsigned = Literal.isUnsigned || Literal.getRadix() != 10;
+
+ // Check from smallest to largest, picking the smallest type we can.
+ unsigned Width = 0;
+ if (!Literal.isLong && !Literal.isLongLong) {
+ // Are int/unsigned possibilities?
+ unsigned IntSize = Context.getTargetInfo().getIntWidth();
+
+ // Does it fit in a unsigned int?
+ if (ResultVal.isIntN(IntSize)) {
+ // Does it fit in a signed int?
+ if (!Literal.isUnsigned && ResultVal[IntSize-1] == 0)
+ Ty = Context.IntTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedIntTy;
+ Width = IntSize;
+ }
+ }
+
+ // Are long/unsigned long possibilities?
+ if (Ty.isNull() && !Literal.isLongLong) {
+ unsigned LongSize = Context.getTargetInfo().getLongWidth();
+
+ // Does it fit in a unsigned long?
+ if (ResultVal.isIntN(LongSize)) {
+ // Does it fit in a signed long?
+ if (!Literal.isUnsigned && ResultVal[LongSize-1] == 0)
+ Ty = Context.LongTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedLongTy;
+ Width = LongSize;
+ }
+ }
+
+ // Finally, check long long if needed.
+ if (Ty.isNull()) {
+ unsigned LongLongSize = Context.getTargetInfo().getLongLongWidth();
+
+ // Does it fit in a unsigned long long?
+ if (ResultVal.isIntN(LongLongSize)) {
+ // Does it fit in a signed long long?
+ // To be compatible with MSVC, hex integer literals ending with the
+ // LL or i64 suffix are always signed in Microsoft mode.
+ if (!Literal.isUnsigned && (ResultVal[LongLongSize-1] == 0 ||
+ (getLangOpts().MicrosoftExt && Literal.isLongLong)))
+ Ty = Context.LongLongTy;
+ else if (AllowUnsigned)
+ Ty = Context.UnsignedLongLongTy;
+ Width = LongLongSize;
+ }
+ }
+
+ // If we still couldn't decide a type, we probably have something that
+ // does not fit in a signed long long, but has no U suffix.
+ if (Ty.isNull()) {
+ Diag(Tok.getLocation(), diag::warn_integer_too_large_for_signed);
+ Ty = Context.UnsignedLongLongTy;
+ Width = Context.getTargetInfo().getLongLongWidth();
+ }
+
+ if (ResultVal.getBitWidth() != Width)
+ ResultVal = ResultVal.trunc(Width);
+ }
+ Res = IntegerLiteral::Create(Context, ResultVal, Ty, Tok.getLocation());
+ }
+
+ // If this is an imaginary literal, create the ImaginaryLiteral wrapper.
+ if (Literal.isImaginary)
+ Res = new (Context) ImaginaryLiteral(Res,
+ Context.getComplexType(Res->getType()));
+
+ return Owned(Res);
+}
+
+ExprResult Sema::ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E) {
+ assert((E != 0) && "ActOnParenExpr() missing expr");
+ return Owned(new (Context) ParenExpr(L, R, E));
+}
+
+static bool CheckVecStepTraitOperandType(Sema &S, QualType T,
+ SourceLocation Loc,
+ SourceRange ArgRange) {
+ // [OpenCL 1.1 6.11.12] "The vec_step built-in function takes a built-in
+ // scalar or vector data type argument..."
+ // Every built-in scalar type (OpenCL 1.1 6.1.1) is either an arithmetic
+ // type (C99 6.2.5p18) or void.
+ if (!(T->isArithmeticType() || T->isVoidType() || T->isVectorType())) {
+ S.Diag(Loc, diag::err_vecstep_non_scalar_vector_type)
+ << T << ArgRange;
+ return true;
+ }
+
+ assert((T->isVoidType() || !T->isIncompleteType()) &&
+ "Scalar types should always be complete");
+ return false;
+}
+
+static bool CheckExtensionTraitOperandType(Sema &S, QualType T,
+ SourceLocation Loc,
+ SourceRange ArgRange,
+ UnaryExprOrTypeTrait TraitKind) {
+ // C99 6.5.3.4p1:
+ if (T->isFunctionType()) {
+ // alignof(function) is allowed as an extension.
+ if (TraitKind == UETT_SizeOf)
+ S.Diag(Loc, diag::ext_sizeof_function_type) << ArgRange;
+ return false;
+ }
+
+ // Allow sizeof(void)/alignof(void) as an extension.
+ if (T->isVoidType()) {
+ S.Diag(Loc, diag::ext_sizeof_void_type) << TraitKind << ArgRange;
+ return false;
+ }
+
+ return true;
+}
+
+static bool CheckObjCTraitOperandConstraints(Sema &S, QualType T,
+ SourceLocation Loc,
+ SourceRange ArgRange,
+ UnaryExprOrTypeTrait TraitKind) {
+ // Reject sizeof(interface) and sizeof(interface<proto>) in 64-bit mode.
+ if (S.LangOpts.ObjCNonFragileABI && T->isObjCObjectType()) {
+ S.Diag(Loc, diag::err_sizeof_nonfragile_interface)
+ << T << (TraitKind == UETT_SizeOf)
+ << ArgRange;
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Check the constrains on expression operands to unary type expression
+/// and type traits.
+///
+/// Completes any types necessary and validates the constraints on the operand
+/// expression. The logic mostly mirrors the type-based overload, but may modify
+/// the expression as it completes the type for that expression through template
+/// instantiation, etc.
+bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
+ UnaryExprOrTypeTrait ExprKind) {
+ QualType ExprTy = E->getType();
+
+ // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
+ // the result is the size of the referenced type."
+ // C++ [expr.alignof]p3: "When alignof is applied to a reference type, the
+ // result shall be the alignment of the referenced type."
+ if (const ReferenceType *Ref = ExprTy->getAs<ReferenceType>())
+ ExprTy = Ref->getPointeeType();
+
+ if (ExprKind == UETT_VecStep)
+ return CheckVecStepTraitOperandType(*this, ExprTy, E->getExprLoc(),
+ E->getSourceRange());
+
+ // Whitelist some types as extensions
+ if (!CheckExtensionTraitOperandType(*this, ExprTy, E->getExprLoc(),
+ E->getSourceRange(), ExprKind))
+ return false;
+
+ if (RequireCompleteExprType(E,
+ PDiag(diag::err_sizeof_alignof_incomplete_type)
+ << ExprKind << E->getSourceRange(),
+ std::make_pair(SourceLocation(), PDiag(0))))
+ return true;
+
+ // Completeing the expression's type may have changed it.
+ ExprTy = E->getType();
+ if (const ReferenceType *Ref = ExprTy->getAs<ReferenceType>())
+ ExprTy = Ref->getPointeeType();
+
+ if (CheckObjCTraitOperandConstraints(*this, ExprTy, E->getExprLoc(),
+ E->getSourceRange(), ExprKind))
+ return true;
+
+ if (ExprKind == UETT_SizeOf) {
+ if (DeclRefExpr *DeclRef = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(DeclRef->getFoundDecl())) {
+ QualType OType = PVD->getOriginalType();
+ QualType Type = PVD->getType();
+ if (Type->isPointerType() && OType->isArrayType()) {
+ Diag(E->getExprLoc(), diag::warn_sizeof_array_param)
+ << Type << OType;
+ Diag(PVD->getLocation(), diag::note_declared_at);
+ }
+ }
+ }
+ }
+
+ return false;
+}
+
+/// \brief Check the constraints on operands to unary expression and type
+/// traits.
+///
+/// This will complete any types necessary, and validate the various constraints
+/// on those operands.
+///
+/// The UsualUnaryConversions() function is *not* called by this routine.
+/// C99 6.3.2.1p[2-4] all state:
+/// Except when it is the operand of the sizeof operator ...
+///
+/// C++ [expr.sizeof]p4
+/// The lvalue-to-rvalue, array-to-pointer, and function-to-pointer
+/// standard conversions are not applied to the operand of sizeof.
+///
+/// This policy is followed for all of the unary trait expressions.
+bool Sema::CheckUnaryExprOrTypeTraitOperand(QualType ExprType,
+ SourceLocation OpLoc,
+ SourceRange ExprRange,
+ UnaryExprOrTypeTrait ExprKind) {
+ if (ExprType->isDependentType())
+ return false;
+
+ // C++ [expr.sizeof]p2: "When applied to a reference or a reference type,
+ // the result is the size of the referenced type."
+ // C++ [expr.alignof]p3: "When alignof is applied to a reference type, the
+ // result shall be the alignment of the referenced type."
+ if (const ReferenceType *Ref = ExprType->getAs<ReferenceType>())
+ ExprType = Ref->getPointeeType();
+
+ if (ExprKind == UETT_VecStep)
+ return CheckVecStepTraitOperandType(*this, ExprType, OpLoc, ExprRange);
+
+ // Whitelist some types as extensions
+ if (!CheckExtensionTraitOperandType(*this, ExprType, OpLoc, ExprRange,
+ ExprKind))
+ return false;
+
+ if (RequireCompleteType(OpLoc, ExprType,
+ PDiag(diag::err_sizeof_alignof_incomplete_type)
+ << ExprKind << ExprRange))
+ return true;
+
+ if (CheckObjCTraitOperandConstraints(*this, ExprType, OpLoc, ExprRange,
+ ExprKind))
+ return true;
+
+ return false;
+}
+
+static bool CheckAlignOfExpr(Sema &S, Expr *E) {
+ E = E->IgnoreParens();
+
+ // alignof decl is always ok.
+ if (isa<DeclRefExpr>(E))
+ return false;
+
+ // Cannot know anything else if the expression is dependent.
+ if (E->isTypeDependent())
+ return false;
+
+ if (E->getBitField()) {
+ S.Diag(E->getExprLoc(), diag::err_sizeof_alignof_bitfield)
+ << 1 << E->getSourceRange();
+ return true;
+ }
+
+ // Alignment of a field access is always okay, so long as it isn't a
+ // bit-field.
+ if (MemberExpr *ME = dyn_cast<MemberExpr>(E))
+ if (isa<FieldDecl>(ME->getMemberDecl()))
+ return false;
+
+ return S.CheckUnaryExprOrTypeTraitOperand(E, UETT_AlignOf);
+}
+
+bool Sema::CheckVecStepExpr(Expr *E) {
+ E = E->IgnoreParens();
+
+ // Cannot know anything else if the expression is dependent.
+ if (E->isTypeDependent())
+ return false;
+
+ return CheckUnaryExprOrTypeTraitOperand(E, UETT_VecStep);
+}
+
+/// \brief Build a sizeof or alignof expression given a type operand.
+ExprResult
+Sema::CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
+ SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R) {
+ if (!TInfo)
+ return ExprError();
+
+ QualType T = TInfo->getType();
+
+ if (!T->isDependentType() &&
+ CheckUnaryExprOrTypeTraitOperand(T, OpLoc, R, ExprKind))
+ return ExprError();
+
+ // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
+ return Owned(new (Context) UnaryExprOrTypeTraitExpr(ExprKind, TInfo,
+ Context.getSizeType(),
+ OpLoc, R.getEnd()));
+}
+
+/// \brief Build a sizeof or alignof expression given an expression
+/// operand.
+ExprResult
+Sema::CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind) {
+ ExprResult PE = CheckPlaceholderExpr(E);
+ if (PE.isInvalid())
+ return ExprError();
+
+ E = PE.get();
+
+ // Verify that the operand is valid.
+ bool isInvalid = false;
+ if (E->isTypeDependent()) {
+ // Delay type-checking for type-dependent expressions.
+ } else if (ExprKind == UETT_AlignOf) {
+ isInvalid = CheckAlignOfExpr(*this, E);
+ } else if (ExprKind == UETT_VecStep) {
+ isInvalid = CheckVecStepExpr(E);
+ } else if (E->getBitField()) { // C99 6.5.3.4p1.
+ Diag(E->getExprLoc(), diag::err_sizeof_alignof_bitfield) << 0;
+ isInvalid = true;
+ } else {
+ isInvalid = CheckUnaryExprOrTypeTraitOperand(E, UETT_SizeOf);
+ }
+
+ if (isInvalid)
+ return ExprError();
+
+ if (ExprKind == UETT_SizeOf && E->getType()->isVariableArrayType()) {
+ PE = TranformToPotentiallyEvaluated(E);
+ if (PE.isInvalid()) return ExprError();
+ E = PE.take();
+ }
+
+ // C99 6.5.3.4p4: the type (an unsigned integer type) is size_t.
+ return Owned(new (Context) UnaryExprOrTypeTraitExpr(
+ ExprKind, E, Context.getSizeType(), OpLoc,
+ E->getSourceRange().getEnd()));
+}
+
+/// ActOnUnaryExprOrTypeTraitExpr - Handle @c sizeof(type) and @c sizeof @c
+/// expr and the same for @c alignof and @c __alignof
+/// Note that the ArgRange is invalid if isType is false.
+ExprResult
+Sema::ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind, bool IsType,
+ void *TyOrEx, const SourceRange &ArgRange) {
+ // If error parsing type, ignore.
+ if (TyOrEx == 0) return ExprError();
+
+ if (IsType) {
+ TypeSourceInfo *TInfo;
+ (void) GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrEx), &TInfo);
+ return CreateUnaryExprOrTypeTraitExpr(TInfo, OpLoc, ExprKind, ArgRange);
+ }
+
+ Expr *ArgEx = (Expr *)TyOrEx;
+ ExprResult Result = CreateUnaryExprOrTypeTraitExpr(ArgEx, OpLoc, ExprKind);
+ return move(Result);
+}
+
+static QualType CheckRealImagOperand(Sema &S, ExprResult &V, SourceLocation Loc,
+ bool IsReal) {
+ if (V.get()->isTypeDependent())
+ return S.Context.DependentTy;
+
+ // _Real and _Imag are only l-values for normal l-values.
+ if (V.get()->getObjectKind() != OK_Ordinary) {
+ V = S.DefaultLvalueConversion(V.take());
+ if (V.isInvalid())
+ return QualType();
+ }
+
+ // These operators return the element type of a complex type.
+ if (const ComplexType *CT = V.get()->getType()->getAs<ComplexType>())
+ return CT->getElementType();
+
+ // Otherwise they pass through real integer and floating point types here.
+ if (V.get()->getType()->isArithmeticType())
+ return V.get()->getType();
+
+ // Test for placeholders.
+ ExprResult PR = S.CheckPlaceholderExpr(V.get());
+ if (PR.isInvalid()) return QualType();
+ if (PR.get() != V.get()) {
+ V = move(PR);
+ return CheckRealImagOperand(S, V, Loc, IsReal);
+ }
+
+ // Reject anything else.
+ S.Diag(Loc, diag::err_realimag_invalid_type) << V.get()->getType()
+ << (IsReal ? "__real" : "__imag");
+ return QualType();
+}
+
+
+
+ExprResult
+Sema::ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Kind, Expr *Input) {
+ UnaryOperatorKind Opc;
+ switch (Kind) {
+ default: llvm_unreachable("Unknown unary op!");
+ case tok::plusplus: Opc = UO_PostInc; break;
+ case tok::minusminus: Opc = UO_PostDec; break;
+ }
+
+ // Since this might is a postfix expression, get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Input);
+ if (Result.isInvalid()) return ExprError();
+ Input = Result.take();
+
+ return BuildUnaryOp(S, OpLoc, Opc, Input);
+}
+
+ExprResult
+Sema::ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
+ Expr *Idx, SourceLocation RLoc) {
+ // Since this might be a postfix expression, get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.take();
+
+ Expr *LHSExp = Base, *RHSExp = Idx;
+
+ if (getLangOpts().CPlusPlus &&
+ (LHSExp->isTypeDependent() || RHSExp->isTypeDependent())) {
+ return Owned(new (Context) ArraySubscriptExpr(LHSExp, RHSExp,
+ Context.DependentTy,
+ VK_LValue, OK_Ordinary,
+ RLoc));
+ }
+
+ if (getLangOpts().CPlusPlus &&
+ (LHSExp->getType()->isRecordType() ||
+ LHSExp->getType()->isEnumeralType() ||
+ RHSExp->getType()->isRecordType() ||
+ RHSExp->getType()->isEnumeralType()) &&
+ !LHSExp->getType()->isObjCObjectPointerType()) {
+ return CreateOverloadedArraySubscriptExpr(LLoc, RLoc, Base, Idx);
+ }
+
+ return CreateBuiltinArraySubscriptExpr(Base, LLoc, Idx, RLoc);
+}
+
+
+ExprResult
+Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
+ Expr *Idx, SourceLocation RLoc) {
+ Expr *LHSExp = Base;
+ Expr *RHSExp = Idx;
+
+ // Perform default conversions.
+ if (!LHSExp->getType()->getAs<VectorType>()) {
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(LHSExp);
+ if (Result.isInvalid())
+ return ExprError();
+ LHSExp = Result.take();
+ }
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(RHSExp);
+ if (Result.isInvalid())
+ return ExprError();
+ RHSExp = Result.take();
+
+ QualType LHSTy = LHSExp->getType(), RHSTy = RHSExp->getType();
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_Ordinary;
+
+ // C99 6.5.2.1p2: the expression e1[e2] is by definition precisely equivalent
+ // to the expression *((e1)+(e2)). This means the array "Base" may actually be
+ // in the subscript position. As a result, we need to derive the array base
+ // and index from the expression types.
+ Expr *BaseExpr, *IndexExpr;
+ QualType ResultType;
+ if (LHSTy->isDependentType() || RHSTy->isDependentType()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = Context.DependentTy;
+ } else if (const PointerType *PTy = LHSTy->getAs<PointerType>()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = PTy->getPointeeType();
+ } else if (const ObjCObjectPointerType *PTy =
+ LHSTy->getAs<ObjCObjectPointerType>()) {
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ Result = BuildObjCSubscriptExpression(RLoc, BaseExpr, IndexExpr, 0, 0);
+ if (!Result.isInvalid())
+ return Owned(Result.take());
+ ResultType = PTy->getPointeeType();
+ } else if (const PointerType *PTy = RHSTy->getAs<PointerType>()) {
+ // Handle the uncommon case of "123[Ptr]".
+ BaseExpr = RHSExp;
+ IndexExpr = LHSExp;
+ ResultType = PTy->getPointeeType();
+ } else if (const ObjCObjectPointerType *PTy =
+ RHSTy->getAs<ObjCObjectPointerType>()) {
+ // Handle the uncommon case of "123[Ptr]".
+ BaseExpr = RHSExp;
+ IndexExpr = LHSExp;
+ ResultType = PTy->getPointeeType();
+ } else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) {
+ BaseExpr = LHSExp; // vectors: V[123]
+ IndexExpr = RHSExp;
+ VK = LHSExp->getValueKind();
+ if (VK != VK_RValue)
+ OK = OK_VectorComponent;
+
+ // FIXME: need to deal with const...
+ ResultType = VTy->getElementType();
+ } else if (LHSTy->isArrayType()) {
+ // If we see an array that wasn't promoted by
+ // DefaultFunctionArrayLvalueConversion, it must be an array that
+ // wasn't promoted because of the C90 rule that doesn't
+ // allow promoting non-lvalue arrays. Warn, then
+ // force the promotion here.
+ Diag(LHSExp->getLocStart(), diag::ext_subscript_non_lvalue) <<
+ LHSExp->getSourceRange();
+ LHSExp = ImpCastExprToType(LHSExp, Context.getArrayDecayedType(LHSTy),
+ CK_ArrayToPointerDecay).take();
+ LHSTy = LHSExp->getType();
+
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ ResultType = LHSTy->getAs<PointerType>()->getPointeeType();
+ } else if (RHSTy->isArrayType()) {
+ // Same as previous, except for 123[f().a] case
+ Diag(RHSExp->getLocStart(), diag::ext_subscript_non_lvalue) <<
+ RHSExp->getSourceRange();
+ RHSExp = ImpCastExprToType(RHSExp, Context.getArrayDecayedType(RHSTy),
+ CK_ArrayToPointerDecay).take();
+ RHSTy = RHSExp->getType();
+
+ BaseExpr = RHSExp;
+ IndexExpr = LHSExp;
+ ResultType = RHSTy->getAs<PointerType>()->getPointeeType();
+ } else {
+ return ExprError(Diag(LLoc, diag::err_typecheck_subscript_value)
+ << LHSExp->getSourceRange() << RHSExp->getSourceRange());
+ }
+ // C99 6.5.2.1p1
+ if (!IndexExpr->getType()->isIntegerType() && !IndexExpr->isTypeDependent())
+ return ExprError(Diag(LLoc, diag::err_typecheck_subscript_not_integer)
+ << IndexExpr->getSourceRange());
+
+ if ((IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_S) ||
+ IndexExpr->getType()->isSpecificBuiltinType(BuiltinType::Char_U))
+ && !IndexExpr->isTypeDependent())
+ Diag(LLoc, diag::warn_subscript_is_char) << IndexExpr->getSourceRange();
+
+ // C99 6.5.2.1p1: "shall have type "pointer to *object* type". Similarly,
+ // C++ [expr.sub]p1: The type "T" shall be a completely-defined object
+ // type. Note that Functions are not objects, and that (in C99 parlance)
+ // incomplete types are not object types.
+ if (ResultType->isFunctionType()) {
+ Diag(BaseExpr->getLocStart(), diag::err_subscript_function_type)
+ << ResultType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+
+ if (ResultType->isVoidType() && !getLangOpts().CPlusPlus) {
+ // GNU extension: subscripting on pointer to void
+ Diag(LLoc, diag::ext_gnu_subscript_void_type)
+ << BaseExpr->getSourceRange();
+
+ // C forbids expressions of unqualified void type from being l-values.
+ // See IsCForbiddenLValueType.
+ if (!ResultType.hasQualifiers()) VK = VK_RValue;
+ } else if (!ResultType->isDependentType() &&
+ RequireCompleteType(LLoc, ResultType,
+ PDiag(diag::err_subscript_incomplete_type)
+ << BaseExpr->getSourceRange()))
+ return ExprError();
+
+ // Diagnose bad cases where we step over interface counts.
+ if (ResultType->isObjCObjectType() && LangOpts.ObjCNonFragileABI) {
+ Diag(LLoc, diag::err_subscript_nonfragile_interface)
+ << ResultType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+
+ assert(VK == VK_RValue || LangOpts.CPlusPlus ||
+ !ResultType.isCForbiddenLValueType());
+
+ return Owned(new (Context) ArraySubscriptExpr(LHSExp, RHSExp,
+ ResultType, VK, OK, RLoc));
+}
+
+ExprResult Sema::BuildCXXDefaultArgExpr(SourceLocation CallLoc,
+ FunctionDecl *FD,
+ ParmVarDecl *Param) {
+ if (Param->hasUnparsedDefaultArg()) {
+ Diag(CallLoc,
+ diag::err_use_of_default_argument_to_function_declared_later) <<
+ FD << cast<CXXRecordDecl>(FD->getDeclContext())->getDeclName();
+ Diag(UnparsedDefaultArgLocs[Param],
+ diag::note_default_argument_declared_here);
+ return ExprError();
+ }
+
+ if (Param->hasUninstantiatedDefaultArg()) {
+ Expr *UninstExpr = Param->getUninstantiatedDefaultArg();
+
+ // Instantiate the expression.
+ MultiLevelTemplateArgumentList ArgList
+ = getTemplateInstantiationArgs(FD, 0, /*RelativeToPrimary=*/true);
+
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = ArgList.getInnermost();
+ InstantiatingTemplate Inst(*this, CallLoc, Param, Innermost.first,
+ Innermost.second);
+
+ ExprResult Result;
+ {
+ // C++ [dcl.fct.default]p5:
+ // The names in the [default argument] expression are bound, and
+ // the semantic constraints are checked, at the point where the
+ // default argument expression appears.
+ ContextRAII SavedContext(*this, FD);
+ LocalInstantiationScope Local(*this);
+ Result = SubstExpr(UninstExpr, ArgList);
+ }
+ if (Result.isInvalid())
+ return ExprError();
+
+ // Check the expression as an initializer for the parameter.
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context, Param);
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Param->getLocation(),
+ /*FIXME:EqualLoc*/UninstExpr->getLocStart());
+ Expr *ResultE = Result.takeAs<Expr>();
+
+ InitializationSequence InitSeq(*this, Entity, Kind, &ResultE, 1);
+ Result = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(*this, &ResultE, 1));
+ if (Result.isInvalid())
+ return ExprError();
+
+ // Build the default argument expression.
+ return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param,
+ Result.takeAs<Expr>()));
+ }
+
+ // If the default expression creates temporaries, we need to
+ // push them to the current stack of expression temporaries so they'll
+ // be properly destroyed.
+ // FIXME: We should really be rebuilding the default argument with new
+ // bound temporaries; see the comment in PR5810.
+ // We don't need to do that with block decls, though, because
+ // blocks in default argument expression can never capture anything.
+ if (isa<ExprWithCleanups>(Param->getInit())) {
+ // Set the "needs cleanups" bit regardless of whether there are
+ // any explicit objects.
+ ExprNeedsCleanups = true;
+
+ // Append all the objects to the cleanup list. Right now, this
+ // should always be a no-op, because blocks in default argument
+ // expressions should never be able to capture anything.
+ assert(!cast<ExprWithCleanups>(Param->getInit())->getNumObjects() &&
+ "default argument expression has capturing blocks?");
+ }
+
+ // We already type-checked the argument, so we know it works.
+ // Just mark all of the declarations in this potentially-evaluated expression
+ // as being "referenced".
+ MarkDeclarationsReferencedInExpr(Param->getDefaultArg(),
+ /*SkipLocalVariables=*/true);
+ return Owned(CXXDefaultArgExpr::Create(Context, CallLoc, Param));
+}
+
+/// ConvertArgumentsForCall - Converts the arguments specified in
+/// Args/NumArgs to the parameter types of the function FDecl with
+/// function prototype Proto. Call is the call expression itself, and
+/// Fn is the function expression. For a C++ member function, this
+/// routine does not attempt to convert the object argument. Returns
+/// true if the call is ill-formed.
+bool
+Sema::ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
+ FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ bool IsExecConfig) {
+ // Bail out early if calling a builtin with custom typechecking.
+ // We don't need to do this in the
+ if (FDecl)
+ if (unsigned ID = FDecl->getBuiltinID())
+ if (Context.BuiltinInfo.hasCustomTypechecking(ID))
+ return false;
+
+ // C99 6.5.2.2p7 - the arguments are implicitly converted, as if by
+ // assignment, to the types of the corresponding parameter, ...
+ unsigned NumArgsInProto = Proto->getNumArgs();
+ bool Invalid = false;
+ unsigned MinArgs = FDecl ? FDecl->getMinRequiredArguments() : NumArgsInProto;
+ unsigned FnKind = Fn->getType()->isBlockPointerType()
+ ? 1 /* block */
+ : (IsExecConfig ? 3 /* kernel function (exec config) */
+ : 0 /* function */);
+
+ // If too few arguments are available (and we don't have default
+ // arguments for the remaining parameters), don't make the call.
+ if (NumArgs < NumArgsInProto) {
+ if (NumArgs < MinArgs) {
+ Diag(RParenLoc, MinArgs == NumArgsInProto
+ ? diag::err_typecheck_call_too_few_args
+ : diag::err_typecheck_call_too_few_args_at_least)
+ << FnKind
+ << MinArgs << NumArgs << Fn->getSourceRange();
+
+ // Emit the location of the prototype.
+ if (FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
+ Diag(FDecl->getLocStart(), diag::note_callee_decl)
+ << FDecl;
+
+ return true;
+ }
+ Call->setNumArgs(Context, NumArgsInProto);
+ }
+
+ // If too many are passed and not variadic, error on the extras and drop
+ // them.
+ if (NumArgs > NumArgsInProto) {
+ if (!Proto->isVariadic()) {
+ Diag(Args[NumArgsInProto]->getLocStart(),
+ MinArgs == NumArgsInProto
+ ? diag::err_typecheck_call_too_many_args
+ : diag::err_typecheck_call_too_many_args_at_most)
+ << FnKind
+ << NumArgsInProto << NumArgs << Fn->getSourceRange()
+ << SourceRange(Args[NumArgsInProto]->getLocStart(),
+ Args[NumArgs-1]->getLocEnd());
+
+ // Emit the location of the prototype.
+ if (FDecl && !FDecl->getBuiltinID() && !IsExecConfig)
+ Diag(FDecl->getLocStart(), diag::note_callee_decl)
+ << FDecl;
+
+ // This deletes the extra arguments.
+ Call->setNumArgs(Context, NumArgsInProto);
+ return true;
+ }
+ }
+ SmallVector<Expr *, 8> AllArgs;
+ VariadicCallType CallType =
+ Proto->isVariadic() ? VariadicFunction : VariadicDoesNotApply;
+ if (Fn->getType()->isBlockPointerType())
+ CallType = VariadicBlock; // Block
+ else if (isa<MemberExpr>(Fn))
+ CallType = VariadicMethod;
+ Invalid = GatherArgumentsForCall(Call->getLocStart(), FDecl,
+ Proto, 0, Args, NumArgs, AllArgs, CallType);
+ if (Invalid)
+ return true;
+ unsigned TotalNumArgs = AllArgs.size();
+ for (unsigned i = 0; i < TotalNumArgs; ++i)
+ Call->setArg(i, AllArgs[i]);
+
+ return false;
+}
+
+bool Sema::GatherArgumentsForCall(SourceLocation CallLoc,
+ FunctionDecl *FDecl,
+ const FunctionProtoType *Proto,
+ unsigned FirstProtoArg,
+ Expr **Args, unsigned NumArgs,
+ SmallVector<Expr *, 8> &AllArgs,
+ VariadicCallType CallType,
+ bool AllowExplicit) {
+ unsigned NumArgsInProto = Proto->getNumArgs();
+ unsigned NumArgsToCheck = NumArgs;
+ bool Invalid = false;
+ if (NumArgs != NumArgsInProto)
+ // Use default arguments for missing arguments
+ NumArgsToCheck = NumArgsInProto;
+ unsigned ArgIx = 0;
+ // Continue to check argument types (even if we have too few/many args).
+ for (unsigned i = FirstProtoArg; i != NumArgsToCheck; i++) {
+ QualType ProtoArgType = Proto->getArgType(i);
+
+ Expr *Arg;
+ ParmVarDecl *Param;
+ if (ArgIx < NumArgs) {
+ Arg = Args[ArgIx++];
+
+ if (RequireCompleteType(Arg->getLocStart(),
+ ProtoArgType,
+ PDiag(diag::err_call_incomplete_argument)
+ << Arg->getSourceRange()))
+ return true;
+
+ // Pass the argument
+ Param = 0;
+ if (FDecl && i < FDecl->getNumParams())
+ Param = FDecl->getParamDecl(i);
+
+ // Strip the unbridged-cast placeholder expression off, if applicable.
+ if (Arg->getType() == Context.ARCUnbridgedCastTy &&
+ FDecl && FDecl->hasAttr<CFAuditedTransferAttr>() &&
+ (!Param || !Param->hasAttr<CFConsumedAttr>()))
+ Arg = stripARCUnbridgedCast(Arg);
+
+ InitializedEntity Entity =
+ Param? InitializedEntity::InitializeParameter(Context, Param)
+ : InitializedEntity::InitializeParameter(Context, ProtoArgType,
+ Proto->isArgConsumed(i));
+ ExprResult ArgE = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ Owned(Arg),
+ /*TopLevelOfInitList=*/false,
+ AllowExplicit);
+ if (ArgE.isInvalid())
+ return true;
+
+ Arg = ArgE.takeAs<Expr>();
+ } else {
+ Param = FDecl->getParamDecl(i);
+
+ ExprResult ArgExpr =
+ BuildCXXDefaultArgExpr(CallLoc, FDecl, Param);
+ if (ArgExpr.isInvalid())
+ return true;
+
+ Arg = ArgExpr.takeAs<Expr>();
+ }
+
+ // Check for array bounds violations for each argument to the call. This
+ // check only triggers warnings when the argument isn't a more complex Expr
+ // with its own checking, such as a BinaryOperator.
+ CheckArrayAccess(Arg);
+
+ // Check for violations of C99 static array rules (C99 6.7.5.3p7).
+ CheckStaticArrayArgument(CallLoc, Param, Arg);
+
+ AllArgs.push_back(Arg);
+ }
+
+ // If this is a variadic call, handle args passed through "...".
+ if (CallType != VariadicDoesNotApply) {
+
+ // Assume that extern "C" functions with variadic arguments that
+ // return __unknown_anytype aren't *really* variadic.
+ if (Proto->getResultType() == Context.UnknownAnyTy &&
+ FDecl && FDecl->isExternC()) {
+ for (unsigned i = ArgIx; i != NumArgs; ++i) {
+ ExprResult arg;
+ if (isa<ExplicitCastExpr>(Args[i]->IgnoreParens()))
+ arg = DefaultFunctionArrayLvalueConversion(Args[i]);
+ else
+ arg = DefaultVariadicArgumentPromotion(Args[i], CallType, FDecl);
+ Invalid |= arg.isInvalid();
+ AllArgs.push_back(arg.take());
+ }
+
+ // Otherwise do argument promotion, (C99 6.5.2.2p7).
+ } else {
+ for (unsigned i = ArgIx; i != NumArgs; ++i) {
+ ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], CallType,
+ FDecl);
+ Invalid |= Arg.isInvalid();
+ AllArgs.push_back(Arg.take());
+ }
+ }
+
+ // Check for array bounds violations.
+ for (unsigned i = ArgIx; i != NumArgs; ++i)
+ CheckArrayAccess(Args[i]);
+ }
+ return Invalid;
+}
+
+static void DiagnoseCalleeStaticArrayParam(Sema &S, ParmVarDecl *PVD) {
+ TypeLoc TL = PVD->getTypeSourceInfo()->getTypeLoc();
+ if (ArrayTypeLoc *ATL = dyn_cast<ArrayTypeLoc>(&TL))
+ S.Diag(PVD->getLocation(), diag::note_callee_static_array)
+ << ATL->getLocalSourceRange();
+}
+
+/// CheckStaticArrayArgument - If the given argument corresponds to a static
+/// array parameter, check that it is non-null, and that if it is formed by
+/// array-to-pointer decay, the underlying array is sufficiently large.
+///
+/// C99 6.7.5.3p7: If the keyword static also appears within the [ and ] of the
+/// array type derivation, then for each call to the function, the value of the
+/// corresponding actual argument shall provide access to the first element of
+/// an array with at least as many elements as specified by the size expression.
+void
+Sema::CheckStaticArrayArgument(SourceLocation CallLoc,
+ ParmVarDecl *Param,
+ const Expr *ArgExpr) {
+ // Static array parameters are not supported in C++.
+ if (!Param || getLangOpts().CPlusPlus)
+ return;
+
+ QualType OrigTy = Param->getOriginalType();
+
+ const ArrayType *AT = Context.getAsArrayType(OrigTy);
+ if (!AT || AT->getSizeModifier() != ArrayType::Static)
+ return;
+
+ if (ArgExpr->isNullPointerConstant(Context,
+ Expr::NPC_NeverValueDependent)) {
+ Diag(CallLoc, diag::warn_null_arg) << ArgExpr->getSourceRange();
+ DiagnoseCalleeStaticArrayParam(*this, Param);
+ return;
+ }
+
+ const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT);
+ if (!CAT)
+ return;
+
+ const ConstantArrayType *ArgCAT =
+ Context.getAsConstantArrayType(ArgExpr->IgnoreParenImpCasts()->getType());
+ if (!ArgCAT)
+ return;
+
+ if (ArgCAT->getSize().ult(CAT->getSize())) {
+ Diag(CallLoc, diag::warn_static_array_too_small)
+ << ArgExpr->getSourceRange()
+ << (unsigned) ArgCAT->getSize().getZExtValue()
+ << (unsigned) CAT->getSize().getZExtValue();
+ DiagnoseCalleeStaticArrayParam(*this, Param);
+ }
+}
+
+/// Given a function expression of unknown-any type, try to rebuild it
+/// to have a function type.
+static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *fn);
+
+/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
+/// This provides the location of the left/right parens and a list of comma
+/// locations.
+ExprResult
+Sema::ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
+ MultiExprArg ArgExprs, SourceLocation RParenLoc,
+ Expr *ExecConfig, bool IsExecConfig) {
+ unsigned NumArgs = ArgExprs.size();
+
+ // Since this might be a postfix expression, get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Fn);
+ if (Result.isInvalid()) return ExprError();
+ Fn = Result.take();
+
+ Expr **Args = ArgExprs.release();
+
+ if (getLangOpts().CPlusPlus) {
+ // If this is a pseudo-destructor expression, build the call immediately.
+ if (isa<CXXPseudoDestructorExpr>(Fn)) {
+ if (NumArgs > 0) {
+ // Pseudo-destructor calls should not have any arguments.
+ Diag(Fn->getLocStart(), diag::err_pseudo_dtor_call_with_args)
+ << FixItHint::CreateRemoval(
+ SourceRange(Args[0]->getLocStart(),
+ Args[NumArgs-1]->getLocEnd()));
+ }
+
+ return Owned(new (Context) CallExpr(Context, Fn, 0, 0, Context.VoidTy,
+ VK_RValue, RParenLoc));
+ }
+
+ // Determine whether this is a dependent call inside a C++ template,
+ // in which case we won't do any semantic analysis now.
+ // FIXME: Will need to cache the results of name lookup (including ADL) in
+ // Fn.
+ bool Dependent = false;
+ if (Fn->isTypeDependent())
+ Dependent = true;
+ else if (Expr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(Args, NumArgs)))
+ Dependent = true;
+
+ if (Dependent) {
+ if (ExecConfig) {
+ return Owned(new (Context) CUDAKernelCallExpr(
+ Context, Fn, cast<CallExpr>(ExecConfig), Args, NumArgs,
+ Context.DependentTy, VK_RValue, RParenLoc));
+ } else {
+ return Owned(new (Context) CallExpr(Context, Fn, Args, NumArgs,
+ Context.DependentTy, VK_RValue,
+ RParenLoc));
+ }
+ }
+
+ // Determine whether this is a call to an object (C++ [over.call.object]).
+ if (Fn->getType()->isRecordType())
+ return Owned(BuildCallToObjectOfClassType(S, Fn, LParenLoc, Args, NumArgs,
+ RParenLoc));
+
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult result = rebuildUnknownAnyFunction(*this, Fn);
+ if (result.isInvalid()) return ExprError();
+ Fn = result.take();
+ }
+
+ if (Fn->getType() == Context.BoundMemberTy) {
+ return BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs,
+ RParenLoc);
+ }
+ }
+
+ // Check for overloaded calls. This can happen even in C due to extensions.
+ if (Fn->getType() == Context.OverloadTy) {
+ OverloadExpr::FindResult find = OverloadExpr::find(Fn);
+
+ // We aren't supposed to apply this logic for if there's an '&' involved.
+ if (!find.HasFormOfMemberPointer) {
+ OverloadExpr *ovl = find.Expression;
+ if (isa<UnresolvedLookupExpr>(ovl)) {
+ UnresolvedLookupExpr *ULE = cast<UnresolvedLookupExpr>(ovl);
+ return BuildOverloadedCallExpr(S, Fn, ULE, LParenLoc, Args, NumArgs,
+ RParenLoc, ExecConfig);
+ } else {
+ return BuildCallToMemberFunction(S, Fn, LParenLoc, Args, NumArgs,
+ RParenLoc);
+ }
+ }
+ }
+
+ // If we're directly calling a function, get the appropriate declaration.
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult result = rebuildUnknownAnyFunction(*this, Fn);
+ if (result.isInvalid()) return ExprError();
+ Fn = result.take();
+ }
+
+ Expr *NakedFn = Fn->IgnoreParens();
+
+ NamedDecl *NDecl = 0;
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(NakedFn))
+ if (UnOp->getOpcode() == UO_AddrOf)
+ NakedFn = UnOp->getSubExpr()->IgnoreParens();
+
+ if (isa<DeclRefExpr>(NakedFn))
+ NDecl = cast<DeclRefExpr>(NakedFn)->getDecl();
+ else if (isa<MemberExpr>(NakedFn))
+ NDecl = cast<MemberExpr>(NakedFn)->getMemberDecl();
+
+ return BuildResolvedCallExpr(Fn, NDecl, LParenLoc, Args, NumArgs, RParenLoc,
+ ExecConfig, IsExecConfig);
+}
+
+ExprResult
+Sema::ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
+ MultiExprArg ExecConfig, SourceLocation GGGLoc) {
+ FunctionDecl *ConfigDecl = Context.getcudaConfigureCallDecl();
+ if (!ConfigDecl)
+ return ExprError(Diag(LLLLoc, diag::err_undeclared_var_use)
+ << "cudaConfigureCall");
+ QualType ConfigQTy = ConfigDecl->getType();
+
+ DeclRefExpr *ConfigDR = new (Context) DeclRefExpr(
+ ConfigDecl, false, ConfigQTy, VK_LValue, LLLLoc);
+ MarkFunctionReferenced(LLLLoc, ConfigDecl);
+
+ return ActOnCallExpr(S, ConfigDR, LLLLoc, ExecConfig, GGGLoc, 0,
+ /*IsExecConfig=*/true);
+}
+
+/// ActOnAsTypeExpr - create a new asType (bitcast) from the arguments.
+///
+/// __builtin_astype( value, dst type )
+///
+ExprResult Sema::ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
+ SourceLocation BuiltinLoc,
+ SourceLocation RParenLoc) {
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ QualType DstTy = GetTypeFromParser(ParsedDestTy);
+ QualType SrcTy = E->getType();
+ if (Context.getTypeSize(DstTy) != Context.getTypeSize(SrcTy))
+ return ExprError(Diag(BuiltinLoc,
+ diag::err_invalid_astype_of_different_size)
+ << DstTy
+ << SrcTy
+ << E->getSourceRange());
+ return Owned(new (Context) AsTypeExpr(E, DstTy, VK, OK, BuiltinLoc,
+ RParenLoc));
+}
+
+/// BuildResolvedCallExpr - Build a call to a resolved expression,
+/// i.e. an expression not of \p OverloadTy. The expression should
+/// unary-convert to an expression of function-pointer or
+/// block-pointer type.
+///
+/// \param NDecl the declaration being called, if available
+ExprResult
+Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ Expr *Config, bool IsExecConfig) {
+ FunctionDecl *FDecl = dyn_cast_or_null<FunctionDecl>(NDecl);
+
+ // Promote the function operand.
+ ExprResult Result = UsualUnaryConversions(Fn);
+ if (Result.isInvalid())
+ return ExprError();
+ Fn = Result.take();
+
+ // Make the call expr early, before semantic checks. This guarantees cleanup
+ // of arguments and function on error.
+ CallExpr *TheCall;
+ if (Config) {
+ TheCall = new (Context) CUDAKernelCallExpr(Context, Fn,
+ cast<CallExpr>(Config),
+ Args, NumArgs,
+ Context.BoolTy,
+ VK_RValue,
+ RParenLoc);
+ } else {
+ TheCall = new (Context) CallExpr(Context, Fn,
+ Args, NumArgs,
+ Context.BoolTy,
+ VK_RValue,
+ RParenLoc);
+ }
+
+ unsigned BuiltinID = (FDecl ? FDecl->getBuiltinID() : 0);
+
+ // Bail out early if calling a builtin with custom typechecking.
+ if (BuiltinID && Context.BuiltinInfo.hasCustomTypechecking(BuiltinID))
+ return CheckBuiltinFunctionCall(BuiltinID, TheCall);
+
+ retry:
+ const FunctionType *FuncT;
+ if (const PointerType *PT = Fn->getType()->getAs<PointerType>()) {
+ // C99 6.5.2.2p1 - "The expression that denotes the called function shall
+ // have type pointer to function".
+ FuncT = PT->getPointeeType()->getAs<FunctionType>();
+ if (FuncT == 0)
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ } else if (const BlockPointerType *BPT =
+ Fn->getType()->getAs<BlockPointerType>()) {
+ FuncT = BPT->getPointeeType()->castAs<FunctionType>();
+ } else {
+ // Handle calls to expressions of unknown-any type.
+ if (Fn->getType() == Context.UnknownAnyTy) {
+ ExprResult rewrite = rebuildUnknownAnyFunction(*this, Fn);
+ if (rewrite.isInvalid()) return ExprError();
+ Fn = rewrite.take();
+ TheCall->setCallee(Fn);
+ goto retry;
+ }
+
+ return ExprError(Diag(LParenLoc, diag::err_typecheck_call_not_function)
+ << Fn->getType() << Fn->getSourceRange());
+ }
+
+ if (getLangOpts().CUDA) {
+ if (Config) {
+ // CUDA: Kernel calls must be to global functions
+ if (FDecl && !FDecl->hasAttr<CUDAGlobalAttr>())
+ return ExprError(Diag(LParenLoc,diag::err_kern_call_not_global_function)
+ << FDecl->getName() << Fn->getSourceRange());
+
+ // CUDA: Kernel function must have 'void' return type
+ if (!FuncT->getResultType()->isVoidType())
+ return ExprError(Diag(LParenLoc, diag::err_kern_type_not_void_return)
+ << Fn->getType() << Fn->getSourceRange());
+ } else {
+ // CUDA: Calls to global functions must be configured
+ if (FDecl && FDecl->hasAttr<CUDAGlobalAttr>())
+ return ExprError(Diag(LParenLoc, diag::err_global_call_not_config)
+ << FDecl->getName() << Fn->getSourceRange());
+ }
+ }
+
+ // Check for a valid return type
+ if (CheckCallReturnType(FuncT->getResultType(),
+ Fn->getLocStart(), TheCall,
+ FDecl))
+ return ExprError();
+
+ // We know the result type of the call, set it.
+ TheCall->setType(FuncT->getCallResultType(Context));
+ TheCall->setValueKind(Expr::getValueKindForType(FuncT->getResultType()));
+
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FuncT)) {
+ if (ConvertArgumentsForCall(TheCall, Fn, FDecl, Proto, Args, NumArgs,
+ RParenLoc, IsExecConfig))
+ return ExprError();
+ } else {
+ assert(isa<FunctionNoProtoType>(FuncT) && "Unknown FunctionType!");
+
+ if (FDecl) {
+ // Check if we have too few/too many template arguments, based
+ // on our knowledge of the function definition.
+ const FunctionDecl *Def = 0;
+ if (FDecl->hasBody(Def) && NumArgs != Def->param_size()) {
+ const FunctionProtoType *Proto
+ = Def->getType()->getAs<FunctionProtoType>();
+ if (!Proto || !(Proto->isVariadic() && NumArgs >= Def->param_size()))
+ Diag(RParenLoc, diag::warn_call_wrong_number_of_arguments)
+ << (NumArgs > Def->param_size()) << FDecl << Fn->getSourceRange();
+ }
+
+ // If the function we're calling isn't a function prototype, but we have
+ // a function prototype from a prior declaratiom, use that prototype.
+ if (!FDecl->hasPrototype())
+ Proto = FDecl->getType()->getAs<FunctionProtoType>();
+ }
+
+ // Promote the arguments (C99 6.5.2.2p6).
+ for (unsigned i = 0; i != NumArgs; i++) {
+ Expr *Arg = Args[i];
+
+ if (Proto && i < Proto->getNumArgs()) {
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(Context,
+ Proto->getArgType(i),
+ Proto->isArgConsumed(i));
+ ExprResult ArgE = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ Owned(Arg));
+ if (ArgE.isInvalid())
+ return true;
+
+ Arg = ArgE.takeAs<Expr>();
+
+ } else {
+ ExprResult ArgE = DefaultArgumentPromotion(Arg);
+
+ if (ArgE.isInvalid())
+ return true;
+
+ Arg = ArgE.takeAs<Expr>();
+ }
+
+ if (RequireCompleteType(Arg->getLocStart(),
+ Arg->getType(),
+ PDiag(diag::err_call_incomplete_argument)
+ << Arg->getSourceRange()))
+ return ExprError();
+
+ TheCall->setArg(i, Arg);
+ }
+ }
+
+ if (CXXMethodDecl *Method = dyn_cast_or_null<CXXMethodDecl>(FDecl))
+ if (!Method->isStatic())
+ return ExprError(Diag(LParenLoc, diag::err_member_call_without_object)
+ << Fn->getSourceRange());
+
+ // Check for sentinels
+ if (NDecl)
+ DiagnoseSentinelCalls(NDecl, LParenLoc, Args, NumArgs);
+
+ // Do special checking on direct calls to functions.
+ if (FDecl) {
+ if (CheckFunctionCall(FDecl, TheCall))
+ return ExprError();
+
+ if (BuiltinID)
+ return CheckBuiltinFunctionCall(BuiltinID, TheCall);
+ } else if (NDecl) {
+ if (CheckBlockCall(NDecl, TheCall))
+ return ExprError();
+ }
+
+ return MaybeBindToTemporary(TheCall);
+}
+
+ExprResult
+Sema::ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty,
+ SourceLocation RParenLoc, Expr *InitExpr) {
+ assert((Ty != 0) && "ActOnCompoundLiteral(): missing type");
+ // FIXME: put back this assert when initializers are worked out.
+ //assert((InitExpr != 0) && "ActOnCompoundLiteral(): missing expression");
+
+ TypeSourceInfo *TInfo;
+ QualType literalType = GetTypeFromParser(Ty, &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(literalType);
+
+ return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, InitExpr);
+}
+
+ExprResult
+Sema::BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc, Expr *LiteralExpr) {
+ QualType literalType = TInfo->getType();
+
+ if (literalType->isArrayType()) {
+ if (RequireCompleteType(LParenLoc, Context.getBaseElementType(literalType),
+ PDiag(diag::err_illegal_decl_array_incomplete_type)
+ << SourceRange(LParenLoc,
+ LiteralExpr->getSourceRange().getEnd())))
+ return ExprError();
+ if (literalType->isVariableArrayType())
+ return ExprError(Diag(LParenLoc, diag::err_variable_object_no_init)
+ << SourceRange(LParenLoc, LiteralExpr->getSourceRange().getEnd()));
+ } else if (!literalType->isDependentType() &&
+ RequireCompleteType(LParenLoc, literalType,
+ PDiag(diag::err_typecheck_decl_incomplete_type)
+ << SourceRange(LParenLoc,
+ LiteralExpr->getSourceRange().getEnd())))
+ return ExprError();
+
+ InitializedEntity Entity
+ = InitializedEntity::InitializeTemporary(literalType);
+ InitializationKind Kind
+ = InitializationKind::CreateCStyleCast(LParenLoc,
+ SourceRange(LParenLoc, RParenLoc),
+ /*InitList=*/true);
+ InitializationSequence InitSeq(*this, Entity, Kind, &LiteralExpr, 1);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(*this, &LiteralExpr, 1),
+ &literalType);
+ if (Result.isInvalid())
+ return ExprError();
+ LiteralExpr = Result.get();
+
+ bool isFileScope = getCurFunctionOrMethodDecl() == 0;
+ if (isFileScope) { // 6.5.2.5p3
+ if (CheckForConstantInitializer(LiteralExpr, literalType))
+ return ExprError();
+ }
+
+ // In C, compound literals are l-values for some reason.
+ ExprValueKind VK = getLangOpts().CPlusPlus ? VK_RValue : VK_LValue;
+
+ return MaybeBindToTemporary(
+ new (Context) CompoundLiteralExpr(LParenLoc, TInfo, literalType,
+ VK, LiteralExpr, isFileScope));
+}
+
+ExprResult
+Sema::ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList,
+ SourceLocation RBraceLoc) {
+ unsigned NumInit = InitArgList.size();
+ Expr **InitList = InitArgList.release();
+
+ // Immediately handle non-overload placeholders. Overloads can be
+ // resolved contextually, but everything else here can't.
+ for (unsigned I = 0; I != NumInit; ++I) {
+ if (InitList[I]->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(InitList[I]);
+
+ // Ignore failures; dropping the entire initializer list because
+ // of one failure would be terrible for indexing/etc.
+ if (result.isInvalid()) continue;
+
+ InitList[I] = result.take();
+ }
+ }
+
+ // Semantic analysis for initializers is done by ActOnDeclarator() and
+ // CheckInitializer() - it requires knowledge of the object being intialized.
+
+ InitListExpr *E = new (Context) InitListExpr(Context, LBraceLoc, InitList,
+ NumInit, RBraceLoc);
+ E->setType(Context.VoidTy); // FIXME: just a place holder for now.
+ return Owned(E);
+}
+
+/// Do an explicit extend of the given block pointer if we're in ARC.
+static void maybeExtendBlockObject(Sema &S, ExprResult &E) {
+ assert(E.get()->getType()->isBlockPointerType());
+ assert(E.get()->isRValue());
+
+ // Only do this in an r-value context.
+ if (!S.getLangOpts().ObjCAutoRefCount) return;
+
+ E = ImplicitCastExpr::Create(S.Context, E.get()->getType(),
+ CK_ARCExtendBlockObject, E.get(),
+ /*base path*/ 0, VK_RValue);
+ S.ExprNeedsCleanups = true;
+}
+
+/// Prepare a conversion of the given expression to an ObjC object
+/// pointer type.
+CastKind Sema::PrepareCastToObjCObjectPointer(ExprResult &E) {
+ QualType type = E.get()->getType();
+ if (type->isObjCObjectPointerType()) {
+ return CK_BitCast;
+ } else if (type->isBlockPointerType()) {
+ maybeExtendBlockObject(*this, E);
+ return CK_BlockPointerToObjCPointerCast;
+ } else {
+ assert(type->isPointerType());
+ return CK_CPointerToObjCPointerCast;
+ }
+}
+
+/// Prepares for a scalar cast, performing all the necessary stages
+/// except the final cast and returning the kind required.
+CastKind Sema::PrepareScalarCast(ExprResult &Src, QualType DestTy) {
+ // Both Src and Dest are scalar types, i.e. arithmetic or pointer.
+ // Also, callers should have filtered out the invalid cases with
+ // pointers. Everything else should be possible.
+
+ QualType SrcTy = Src.get()->getType();
+ if (const AtomicType *SrcAtomicTy = SrcTy->getAs<AtomicType>())
+ SrcTy = SrcAtomicTy->getValueType();
+ if (const AtomicType *DestAtomicTy = DestTy->getAs<AtomicType>())
+ DestTy = DestAtomicTy->getValueType();
+
+ if (Context.hasSameUnqualifiedType(SrcTy, DestTy))
+ return CK_NoOp;
+
+ switch (Type::ScalarTypeKind SrcKind = SrcTy->getScalarTypeKind()) {
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+
+ case Type::STK_CPointer:
+ case Type::STK_BlockPointer:
+ case Type::STK_ObjCObjectPointer:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_CPointer:
+ return CK_BitCast;
+ case Type::STK_BlockPointer:
+ return (SrcKind == Type::STK_BlockPointer
+ ? CK_BitCast : CK_AnyPointerToBlockPointerCast);
+ case Type::STK_ObjCObjectPointer:
+ if (SrcKind == Type::STK_ObjCObjectPointer)
+ return CK_BitCast;
+ if (SrcKind == Type::STK_CPointer)
+ return CK_CPointerToObjCPointerCast;
+ maybeExtendBlockObject(*this, Src);
+ return CK_BlockPointerToObjCPointerCast;
+ case Type::STK_Bool:
+ return CK_PointerToBoolean;
+ case Type::STK_Integral:
+ return CK_PointerToIntegral;
+ case Type::STK_Floating:
+ case Type::STK_FloatingComplex:
+ case Type::STK_IntegralComplex:
+ case Type::STK_MemberPointer:
+ llvm_unreachable("illegal cast from pointer");
+ }
+ llvm_unreachable("Should have returned before this");
+
+ case Type::STK_Bool: // casting from bool is like casting from an integer
+ case Type::STK_Integral:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ if (Src.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull))
+ return CK_NullToPointer;
+ return CK_IntegralToPointer;
+ case Type::STK_Bool:
+ return CK_IntegralToBoolean;
+ case Type::STK_Integral:
+ return CK_IntegralCast;
+ case Type::STK_Floating:
+ return CK_IntegralToFloating;
+ case Type::STK_IntegralComplex:
+ Src = ImpCastExprToType(Src.take(),
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_IntegralCast);
+ return CK_IntegralRealToComplex;
+ case Type::STK_FloatingComplex:
+ Src = ImpCastExprToType(Src.take(),
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_IntegralToFloating);
+ return CK_FloatingRealToComplex;
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ llvm_unreachable("Should have returned before this");
+
+ case Type::STK_Floating:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_Floating:
+ return CK_FloatingCast;
+ case Type::STK_Bool:
+ return CK_FloatingToBoolean;
+ case Type::STK_Integral:
+ return CK_FloatingToIntegral;
+ case Type::STK_FloatingComplex:
+ Src = ImpCastExprToType(Src.take(),
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_FloatingCast);
+ return CK_FloatingRealToComplex;
+ case Type::STK_IntegralComplex:
+ Src = ImpCastExprToType(Src.take(),
+ DestTy->castAs<ComplexType>()->getElementType(),
+ CK_FloatingToIntegral);
+ return CK_IntegralRealToComplex;
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ llvm_unreachable("valid float->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ llvm_unreachable("Should have returned before this");
+
+ case Type::STK_FloatingComplex:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_FloatingComplex:
+ return CK_FloatingComplexCast;
+ case Type::STK_IntegralComplex:
+ return CK_FloatingComplexToIntegralComplex;
+ case Type::STK_Floating: {
+ QualType ET = SrcTy->castAs<ComplexType>()->getElementType();
+ if (Context.hasSameType(ET, DestTy))
+ return CK_FloatingComplexToReal;
+ Src = ImpCastExprToType(Src.take(), ET, CK_FloatingComplexToReal);
+ return CK_FloatingCast;
+ }
+ case Type::STK_Bool:
+ return CK_FloatingComplexToBoolean;
+ case Type::STK_Integral:
+ Src = ImpCastExprToType(Src.take(),
+ SrcTy->castAs<ComplexType>()->getElementType(),
+ CK_FloatingComplexToReal);
+ return CK_FloatingToIntegral;
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ llvm_unreachable("valid complex float->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ llvm_unreachable("Should have returned before this");
+
+ case Type::STK_IntegralComplex:
+ switch (DestTy->getScalarTypeKind()) {
+ case Type::STK_FloatingComplex:
+ return CK_IntegralComplexToFloatingComplex;
+ case Type::STK_IntegralComplex:
+ return CK_IntegralComplexCast;
+ case Type::STK_Integral: {
+ QualType ET = SrcTy->castAs<ComplexType>()->getElementType();
+ if (Context.hasSameType(ET, DestTy))
+ return CK_IntegralComplexToReal;
+ Src = ImpCastExprToType(Src.take(), ET, CK_IntegralComplexToReal);
+ return CK_IntegralCast;
+ }
+ case Type::STK_Bool:
+ return CK_IntegralComplexToBoolean;
+ case Type::STK_Floating:
+ Src = ImpCastExprToType(Src.take(),
+ SrcTy->castAs<ComplexType>()->getElementType(),
+ CK_IntegralComplexToReal);
+ return CK_IntegralToFloating;
+ case Type::STK_CPointer:
+ case Type::STK_ObjCObjectPointer:
+ case Type::STK_BlockPointer:
+ llvm_unreachable("valid complex int->pointer cast?");
+ case Type::STK_MemberPointer:
+ llvm_unreachable("member pointer type in C");
+ }
+ llvm_unreachable("Should have returned before this");
+ }
+
+ llvm_unreachable("Unhandled scalar cast");
+}
+
+bool Sema::CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
+ CastKind &Kind) {
+ assert(VectorTy->isVectorType() && "Not a vector type!");
+
+ if (Ty->isVectorType() || Ty->isIntegerType()) {
+ if (Context.getTypeSize(VectorTy) != Context.getTypeSize(Ty))
+ return Diag(R.getBegin(),
+ Ty->isVectorType() ?
+ diag::err_invalid_conversion_between_vectors :
+ diag::err_invalid_conversion_between_vector_and_integer)
+ << VectorTy << Ty << R;
+ } else
+ return Diag(R.getBegin(),
+ diag::err_invalid_conversion_between_vector_and_scalar)
+ << VectorTy << Ty << R;
+
+ Kind = CK_BitCast;
+ return false;
+}
+
+ExprResult Sema::CheckExtVectorCast(SourceRange R, QualType DestTy,
+ Expr *CastExpr, CastKind &Kind) {
+ assert(DestTy->isExtVectorType() && "Not an extended vector type!");
+
+ QualType SrcTy = CastExpr->getType();
+
+ // If SrcTy is a VectorType, the total size must match to explicitly cast to
+ // an ExtVectorType.
+ // In OpenCL, casts between vectors of different types are not allowed.
+ // (See OpenCL 6.2).
+ if (SrcTy->isVectorType()) {
+ if (Context.getTypeSize(DestTy) != Context.getTypeSize(SrcTy)
+ || (getLangOpts().OpenCL &&
+ (DestTy.getCanonicalType() != SrcTy.getCanonicalType()))) {
+ Diag(R.getBegin(),diag::err_invalid_conversion_between_ext_vectors)
+ << DestTy << SrcTy << R;
+ return ExprError();
+ }
+ Kind = CK_BitCast;
+ return Owned(CastExpr);
+ }
+
+ // All non-pointer scalars can be cast to ExtVector type. The appropriate
+ // conversion will take place first from scalar to elt type, and then
+ // splat from elt type to vector.
+ if (SrcTy->isPointerType())
+ return Diag(R.getBegin(),
+ diag::err_invalid_conversion_between_vector_and_scalar)
+ << DestTy << SrcTy << R;
+
+ QualType DestElemTy = DestTy->getAs<ExtVectorType>()->getElementType();
+ ExprResult CastExprRes = Owned(CastExpr);
+ CastKind CK = PrepareScalarCast(CastExprRes, DestElemTy);
+ if (CastExprRes.isInvalid())
+ return ExprError();
+ CastExpr = ImpCastExprToType(CastExprRes.take(), DestElemTy, CK).take();
+
+ Kind = CK_VectorSplat;
+ return Owned(CastExpr);
+}
+
+ExprResult
+Sema::ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
+ Declarator &D, ParsedType &Ty,
+ SourceLocation RParenLoc, Expr *CastExpr) {
+ assert(!D.isInvalidType() && (CastExpr != 0) &&
+ "ActOnCastExpr(): missing type or expr");
+
+ TypeSourceInfo *castTInfo = GetTypeForDeclaratorCast(D, CastExpr->getType());
+ if (D.isInvalidType())
+ return ExprError();
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ checkUnusedDeclAttributes(D);
+
+ QualType castType = castTInfo->getType();
+ Ty = CreateParsedType(castType, castTInfo);
+
+ bool isVectorLiteral = false;
+
+ // Check for an altivec or OpenCL literal,
+ // i.e. all the elements are integer constants.
+ ParenExpr *PE = dyn_cast<ParenExpr>(CastExpr);
+ ParenListExpr *PLE = dyn_cast<ParenListExpr>(CastExpr);
+ if ((getLangOpts().AltiVec || getLangOpts().OpenCL)
+ && castType->isVectorType() && (PE || PLE)) {
+ if (PLE && PLE->getNumExprs() == 0) {
+ Diag(PLE->getExprLoc(), diag::err_altivec_empty_initializer);
+ return ExprError();
+ }
+ if (PE || PLE->getNumExprs() == 1) {
+ Expr *E = (PE ? PE->getSubExpr() : PLE->getExpr(0));
+ if (!E->getType()->isVectorType())
+ isVectorLiteral = true;
+ }
+ else
+ isVectorLiteral = true;
+ }
+
+ // If this is a vector initializer, '(' type ')' '(' init, ..., init ')'
+ // then handle it as such.
+ if (isVectorLiteral)
+ return BuildVectorLiteral(LParenLoc, RParenLoc, CastExpr, castTInfo);
+
+ // If the Expr being casted is a ParenListExpr, handle it specially.
+ // This is not an AltiVec-style cast, so turn the ParenListExpr into a
+ // sequence of BinOp comma operators.
+ if (isa<ParenListExpr>(CastExpr)) {
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, CastExpr);
+ if (Result.isInvalid()) return ExprError();
+ CastExpr = Result.take();
+ }
+
+ return BuildCStyleCastExpr(LParenLoc, castTInfo, RParenLoc, CastExpr);
+}
+
+ExprResult Sema::BuildVectorLiteral(SourceLocation LParenLoc,
+ SourceLocation RParenLoc, Expr *E,
+ TypeSourceInfo *TInfo) {
+ assert((isa<ParenListExpr>(E) || isa<ParenExpr>(E)) &&
+ "Expected paren or paren list expression");
+
+ Expr **exprs;
+ unsigned numExprs;
+ Expr *subExpr;
+ if (ParenListExpr *PE = dyn_cast<ParenListExpr>(E)) {
+ exprs = PE->getExprs();
+ numExprs = PE->getNumExprs();
+ } else {
+ subExpr = cast<ParenExpr>(E)->getSubExpr();
+ exprs = &subExpr;
+ numExprs = 1;
+ }
+
+ QualType Ty = TInfo->getType();
+ assert(Ty->isVectorType() && "Expected vector type");
+
+ SmallVector<Expr *, 8> initExprs;
+ const VectorType *VTy = Ty->getAs<VectorType>();
+ unsigned numElems = Ty->getAs<VectorType>()->getNumElements();
+
+ // '(...)' form of vector initialization in AltiVec: the number of
+ // initializers must be one or must match the size of the vector.
+ // If a single value is specified in the initializer then it will be
+ // replicated to all the components of the vector
+ if (VTy->getVectorKind() == VectorType::AltiVecVector) {
+ // The number of initializers must be one or must match the size of the
+ // vector. If a single value is specified in the initializer then it will
+ // be replicated to all the components of the vector
+ if (numExprs == 1) {
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ ExprResult Literal = DefaultLvalueConversion(exprs[0]);
+ if (Literal.isInvalid())
+ return ExprError();
+ Literal = ImpCastExprToType(Literal.take(), ElemTy,
+ PrepareScalarCast(Literal, ElemTy));
+ return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.take());
+ }
+ else if (numExprs < numElems) {
+ Diag(E->getExprLoc(),
+ diag::err_incorrect_number_of_vector_initializers);
+ return ExprError();
+ }
+ else
+ initExprs.append(exprs, exprs + numExprs);
+ }
+ else {
+ // For OpenCL, when the number of initializers is a single value,
+ // it will be replicated to all components of the vector.
+ if (getLangOpts().OpenCL &&
+ VTy->getVectorKind() == VectorType::GenericVector &&
+ numExprs == 1) {
+ QualType ElemTy = Ty->getAs<VectorType>()->getElementType();
+ ExprResult Literal = DefaultLvalueConversion(exprs[0]);
+ if (Literal.isInvalid())
+ return ExprError();
+ Literal = ImpCastExprToType(Literal.take(), ElemTy,
+ PrepareScalarCast(Literal, ElemTy));
+ return BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc, Literal.take());
+ }
+
+ initExprs.append(exprs, exprs + numExprs);
+ }
+ // FIXME: This means that pretty-printing the final AST will produce curly
+ // braces instead of the original commas.
+ InitListExpr *initE = new (Context) InitListExpr(Context, LParenLoc,
+ &initExprs[0],
+ initExprs.size(), RParenLoc);
+ initE->setType(Ty);
+ return BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc, initE);
+}
+
+/// This is not an AltiVec-style cast or or C++ direct-initialization, so turn
+/// the ParenListExpr into a sequence of comma binary operators.
+ExprResult
+Sema::MaybeConvertParenListExprToParenExpr(Scope *S, Expr *OrigExpr) {
+ ParenListExpr *E = dyn_cast<ParenListExpr>(OrigExpr);
+ if (!E)
+ return Owned(OrigExpr);
+
+ ExprResult Result(E->getExpr(0));
+
+ for (unsigned i = 1, e = E->getNumExprs(); i != e && !Result.isInvalid(); ++i)
+ Result = ActOnBinOp(S, E->getExprLoc(), tok::comma, Result.get(),
+ E->getExpr(i));
+
+ if (Result.isInvalid()) return ExprError();
+
+ return ActOnParenExpr(E->getLParenLoc(), E->getRParenLoc(), Result.get());
+}
+
+ExprResult Sema::ActOnParenListExpr(SourceLocation L,
+ SourceLocation R,
+ MultiExprArg Val) {
+ unsigned nexprs = Val.size();
+ Expr **exprs = reinterpret_cast<Expr**>(Val.release());
+ assert((exprs != 0) && "ActOnParenOrParenListExpr() missing expr list");
+ Expr *expr = new (Context) ParenListExpr(Context, L, exprs, nexprs, R);
+ return Owned(expr);
+}
+
+/// \brief Emit a specialized diagnostic when one expression is a null pointer
+/// constant and the other is not a pointer. Returns true if a diagnostic is
+/// emitted.
+bool Sema::DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation QuestionLoc) {
+ Expr *NullExpr = LHSExpr;
+ Expr *NonPointerExpr = RHSExpr;
+ Expr::NullPointerConstantKind NullKind =
+ NullExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull);
+
+ if (NullKind == Expr::NPCK_NotNull) {
+ NullExpr = RHSExpr;
+ NonPointerExpr = LHSExpr;
+ NullKind =
+ NullExpr->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull);
+ }
+
+ if (NullKind == Expr::NPCK_NotNull)
+ return false;
+
+ if (NullKind == Expr::NPCK_ZeroInteger) {
+ // In this case, check to make sure that we got here from a "NULL"
+ // string in the source code.
+ NullExpr = NullExpr->IgnoreParenImpCasts();
+ SourceLocation loc = NullExpr->getExprLoc();
+ if (!findMacroSpelling(loc, "NULL"))
+ return false;
+ }
+
+ int DiagType = (NullKind == Expr::NPCK_CXX0X_nullptr);
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands_null)
+ << NonPointerExpr->getType() << DiagType
+ << NonPointerExpr->getSourceRange();
+ return true;
+}
+
+/// \brief Return false if the condition expression is valid, true otherwise.
+static bool checkCondition(Sema &S, Expr *Cond) {
+ QualType CondTy = Cond->getType();
+
+ // C99 6.5.15p2
+ if (CondTy->isScalarType()) return false;
+
+ // OpenCL: Sec 6.3.i says the condition is allowed to be a vector or scalar.
+ if (S.getLangOpts().OpenCL && CondTy->isVectorType())
+ return false;
+
+ // Emit the proper error message.
+ S.Diag(Cond->getLocStart(), S.getLangOpts().OpenCL ?
+ diag::err_typecheck_cond_expect_scalar :
+ diag::err_typecheck_cond_expect_scalar_or_vector)
+ << CondTy;
+ return true;
+}
+
+/// \brief Return false if the two expressions can be converted to a vector,
+/// true otherwise
+static bool checkConditionalConvertScalarsToVectors(Sema &S, ExprResult &LHS,
+ ExprResult &RHS,
+ QualType CondTy) {
+ // Both operands should be of scalar type.
+ if (!LHS.get()->getType()->isScalarType()) {
+ S.Diag(LHS.get()->getLocStart(), diag::err_typecheck_cond_expect_scalar)
+ << CondTy;
+ return true;
+ }
+ if (!RHS.get()->getType()->isScalarType()) {
+ S.Diag(RHS.get()->getLocStart(), diag::err_typecheck_cond_expect_scalar)
+ << CondTy;
+ return true;
+ }
+
+ // Implicity convert these scalars to the type of the condition.
+ LHS = S.ImpCastExprToType(LHS.take(), CondTy, CK_IntegralCast);
+ RHS = S.ImpCastExprToType(RHS.take(), CondTy, CK_IntegralCast);
+ return false;
+}
+
+/// \brief Handle when one or both operands are void type.
+static QualType checkConditionalVoidType(Sema &S, ExprResult &LHS,
+ ExprResult &RHS) {
+ Expr *LHSExpr = LHS.get();
+ Expr *RHSExpr = RHS.get();
+
+ if (!LHSExpr->getType()->isVoidType())
+ S.Diag(RHSExpr->getLocStart(), diag::ext_typecheck_cond_one_void)
+ << RHSExpr->getSourceRange();
+ if (!RHSExpr->getType()->isVoidType())
+ S.Diag(LHSExpr->getLocStart(), diag::ext_typecheck_cond_one_void)
+ << LHSExpr->getSourceRange();
+ LHS = S.ImpCastExprToType(LHS.take(), S.Context.VoidTy, CK_ToVoid);
+ RHS = S.ImpCastExprToType(RHS.take(), S.Context.VoidTy, CK_ToVoid);
+ return S.Context.VoidTy;
+}
+
+/// \brief Return false if the NullExpr can be promoted to PointerTy,
+/// true otherwise.
+static bool checkConditionalNullPointer(Sema &S, ExprResult &NullExpr,
+ QualType PointerTy) {
+ if ((!PointerTy->isAnyPointerType() && !PointerTy->isBlockPointerType()) ||
+ !NullExpr.get()->isNullPointerConstant(S.Context,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ NullExpr = S.ImpCastExprToType(NullExpr.take(), PointerTy, CK_NullToPointer);
+ return false;
+}
+
+/// \brief Checks compatibility between two pointers and return the resulting
+/// type.
+static QualType checkConditionalPointerCompatibility(Sema &S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc) {
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ if (S.Context.hasSameType(LHSTy, RHSTy)) {
+ // Two identical pointers types are always compatible.
+ return LHSTy;
+ }
+
+ QualType lhptee, rhptee;
+
+ // Get the pointee types.
+ if (const BlockPointerType *LHSBTy = LHSTy->getAs<BlockPointerType>()) {
+ lhptee = LHSBTy->getPointeeType();
+ rhptee = RHSTy->castAs<BlockPointerType>()->getPointeeType();
+ } else {
+ lhptee = LHSTy->castAs<PointerType>()->getPointeeType();
+ rhptee = RHSTy->castAs<PointerType>()->getPointeeType();
+ }
+
+ // C99 6.5.15p6: If both operands are pointers to compatible types or to
+ // differently qualified versions of compatible types, the result type is
+ // a pointer to an appropriately qualified version of the composite
+ // type.
+
+ // Only CVR-qualifiers exist in the standard, and the differently-qualified
+ // clause doesn't make sense for our extensions. E.g. address space 2 should
+ // be incompatible with address space 3: they may live on different devices or
+ // anything.
+ Qualifiers lhQual = lhptee.getQualifiers();
+ Qualifiers rhQual = rhptee.getQualifiers();
+
+ unsigned MergedCVRQual = lhQual.getCVRQualifiers() | rhQual.getCVRQualifiers();
+ lhQual.removeCVRQualifiers();
+ rhQual.removeCVRQualifiers();
+
+ lhptee = S.Context.getQualifiedType(lhptee.getUnqualifiedType(), lhQual);
+ rhptee = S.Context.getQualifiedType(rhptee.getUnqualifiedType(), rhQual);
+
+ QualType CompositeTy = S.Context.mergeTypes(lhptee, rhptee);
+
+ if (CompositeTy.isNull()) {
+ S.Diag(Loc, diag::warn_typecheck_cond_incompatible_pointers)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ // In this situation, we assume void* type. No especially good
+ // reason, but this is what gcc does, and we do have to pick
+ // to get a consistent AST.
+ QualType incompatTy = S.Context.getPointerType(S.Context.VoidTy);
+ LHS = S.ImpCastExprToType(LHS.take(), incompatTy, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.take(), incompatTy, CK_BitCast);
+ return incompatTy;
+ }
+
+ // The pointer types are compatible.
+ QualType ResultTy = CompositeTy.withCVRQualifiers(MergedCVRQual);
+ ResultTy = S.Context.getPointerType(ResultTy);
+
+ LHS = S.ImpCastExprToType(LHS.take(), ResultTy, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.take(), ResultTy, CK_BitCast);
+ return ResultTy;
+}
+
+/// \brief Return the resulting type when the operands are both block pointers.
+static QualType checkConditionalBlockPointerCompatibility(Sema &S,
+ ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc) {
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ if (!LHSTy->isBlockPointerType() || !RHSTy->isBlockPointerType()) {
+ if (LHSTy->isVoidPointerType() || RHSTy->isVoidPointerType()) {
+ QualType destType = S.Context.getPointerType(S.Context.VoidTy);
+ LHS = S.ImpCastExprToType(LHS.take(), destType, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.take(), destType, CK_BitCast);
+ return destType;
+ }
+ S.Diag(Loc, diag::err_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ // We have 2 block pointer types.
+ return checkConditionalPointerCompatibility(S, LHS, RHS, Loc);
+}
+
+/// \brief Return the resulting type when the operands are both pointers.
+static QualType
+checkConditionalObjectPointersCompatibility(Sema &S, ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc) {
+ // get the pointer types
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ // get the "pointed to" types
+ QualType lhptee = LHSTy->getAs<PointerType>()->getPointeeType();
+ QualType rhptee = RHSTy->getAs<PointerType>()->getPointeeType();
+
+ // ignore qualifiers on void (C99 6.5.15p3, clause 6)
+ if (lhptee->isVoidType() && rhptee->isIncompleteOrObjectType()) {
+ // Figure out necessary qualifiers (C99 6.5.15p6)
+ QualType destPointee
+ = S.Context.getQualifiedType(lhptee, rhptee.getQualifiers());
+ QualType destType = S.Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ LHS = S.ImpCastExprToType(LHS.take(), destType, CK_NoOp);
+ // Promote to void*.
+ RHS = S.ImpCastExprToType(RHS.take(), destType, CK_BitCast);
+ return destType;
+ }
+ if (rhptee->isVoidType() && lhptee->isIncompleteOrObjectType()) {
+ QualType destPointee
+ = S.Context.getQualifiedType(rhptee, lhptee.getQualifiers());
+ QualType destType = S.Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ RHS = S.ImpCastExprToType(RHS.take(), destType, CK_NoOp);
+ // Promote to void*.
+ LHS = S.ImpCastExprToType(LHS.take(), destType, CK_BitCast);
+ return destType;
+ }
+
+ return checkConditionalPointerCompatibility(S, LHS, RHS, Loc);
+}
+
+/// \brief Return false if the first expression is not an integer and the second
+/// expression is not a pointer, true otherwise.
+static bool checkPointerIntegerMismatch(Sema &S, ExprResult &Int,
+ Expr* PointerExpr, SourceLocation Loc,
+ bool IsIntFirstExpr) {
+ if (!PointerExpr->getType()->isPointerType() ||
+ !Int.get()->getType()->isIntegerType())
+ return false;
+
+ Expr *Expr1 = IsIntFirstExpr ? Int.get() : PointerExpr;
+ Expr *Expr2 = IsIntFirstExpr ? PointerExpr : Int.get();
+
+ S.Diag(Loc, diag::warn_typecheck_cond_pointer_integer_mismatch)
+ << Expr1->getType() << Expr2->getType()
+ << Expr1->getSourceRange() << Expr2->getSourceRange();
+ Int = S.ImpCastExprToType(Int.take(), PointerExpr->getType(),
+ CK_IntegralToPointer);
+ return true;
+}
+
+/// Note that LHS is not null here, even if this is the gnu "x ?: y" extension.
+/// In that case, LHS = cond.
+/// C99 6.5.15
+QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
+ ExprResult &RHS, ExprValueKind &VK,
+ ExprObjectKind &OK,
+ SourceLocation QuestionLoc) {
+
+ ExprResult LHSResult = CheckPlaceholderExpr(LHS.get());
+ if (!LHSResult.isUsable()) return QualType();
+ LHS = move(LHSResult);
+
+ ExprResult RHSResult = CheckPlaceholderExpr(RHS.get());
+ if (!RHSResult.isUsable()) return QualType();
+ RHS = move(RHSResult);
+
+ // C++ is sufficiently different to merit its own checker.
+ if (getLangOpts().CPlusPlus)
+ return CXXCheckConditionalOperands(Cond, LHS, RHS, VK, OK, QuestionLoc);
+
+ VK = VK_RValue;
+ OK = OK_Ordinary;
+
+ Cond = UsualUnaryConversions(Cond.take());
+ if (Cond.isInvalid())
+ return QualType();
+ LHS = UsualUnaryConversions(LHS.take());
+ if (LHS.isInvalid())
+ return QualType();
+ RHS = UsualUnaryConversions(RHS.take());
+ if (RHS.isInvalid())
+ return QualType();
+
+ QualType CondTy = Cond.get()->getType();
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ // first, check the condition.
+ if (checkCondition(*this, Cond.get()))
+ return QualType();
+
+ // Now check the two expressions.
+ if (LHSTy->isVectorType() || RHSTy->isVectorType())
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false);
+
+ // OpenCL: If the condition is a vector, and both operands are scalar,
+ // attempt to implicity convert them to the vector type to act like the
+ // built in select.
+ if (getLangOpts().OpenCL && CondTy->isVectorType())
+ if (checkConditionalConvertScalarsToVectors(*this, LHS, RHS, CondTy))
+ return QualType();
+
+ // If both operands have arithmetic type, do the usual arithmetic conversions
+ // to find a common type: C99 6.5.15p3,5.
+ if (LHSTy->isArithmeticType() && RHSTy->isArithmeticType()) {
+ UsualArithmeticConversions(LHS, RHS);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ return LHS.get()->getType();
+ }
+
+ // If both operands are the same structure or union type, the result is that
+ // type.
+ if (const RecordType *LHSRT = LHSTy->getAs<RecordType>()) { // C99 6.5.15p3
+ if (const RecordType *RHSRT = RHSTy->getAs<RecordType>())
+ if (LHSRT->getDecl() == RHSRT->getDecl())
+ // "If both the operands have structure or union type, the result has
+ // that type." This implies that CV qualifiers are dropped.
+ return LHSTy.getUnqualifiedType();
+ // FIXME: Type of conditional expression must be complete in C mode.
+ }
+
+ // C99 6.5.15p5: "If both operands have void type, the result has void type."
+ // The following || allows only one side to be void (a GCC-ism).
+ if (LHSTy->isVoidType() || RHSTy->isVoidType()) {
+ return checkConditionalVoidType(*this, LHS, RHS);
+ }
+
+ // C99 6.5.15p6 - "if one operand is a null pointer constant, the result has
+ // the type of the other operand."
+ if (!checkConditionalNullPointer(*this, RHS, LHSTy)) return LHSTy;
+ if (!checkConditionalNullPointer(*this, LHS, RHSTy)) return RHSTy;
+
+ // All objective-c pointer type analysis is done here.
+ QualType compositeType = FindCompositeObjCPointerType(LHS, RHS,
+ QuestionLoc);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ if (!compositeType.isNull())
+ return compositeType;
+
+
+ // Handle block pointer types.
+ if (LHSTy->isBlockPointerType() || RHSTy->isBlockPointerType())
+ return checkConditionalBlockPointerCompatibility(*this, LHS, RHS,
+ QuestionLoc);
+
+ // Check constraints for C object pointers types (C99 6.5.15p3,6).
+ if (LHSTy->isPointerType() && RHSTy->isPointerType())
+ return checkConditionalObjectPointersCompatibility(*this, LHS, RHS,
+ QuestionLoc);
+
+ // GCC compatibility: soften pointer/integer mismatch. Note that
+ // null pointers have been filtered out by this point.
+ if (checkPointerIntegerMismatch(*this, LHS, RHS.get(), QuestionLoc,
+ /*isIntFirstExpr=*/true))
+ return RHSTy;
+ if (checkPointerIntegerMismatch(*this, RHS, LHS.get(), QuestionLoc,
+ /*isIntFirstExpr=*/false))
+ return LHSTy;
+
+ // Emit a better diagnostic if one of the expressions is a null pointer
+ // constant and the other is not a pointer type. In this case, the user most
+ // likely forgot to take the address of the other expression.
+ if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc))
+ return QualType();
+
+ // Otherwise, the operands are not compatible.
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+}
+
+/// FindCompositeObjCPointerType - Helper method to find composite type of
+/// two objective-c pointer types of the two input expressions.
+QualType Sema::FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ QualType LHSTy = LHS.get()->getType();
+ QualType RHSTy = RHS.get()->getType();
+
+ // Handle things like Class and struct objc_class*. Here we case the result
+ // to the pseudo-builtin, because that will be implicitly cast back to the
+ // redefinition type if an attempt is made to access its fields.
+ if (LHSTy->isObjCClassType() &&
+ (Context.hasSameType(RHSTy, Context.getObjCClassRedefinitionType()))) {
+ RHS = ImpCastExprToType(RHS.take(), LHSTy, CK_CPointerToObjCPointerCast);
+ return LHSTy;
+ }
+ if (RHSTy->isObjCClassType() &&
+ (Context.hasSameType(LHSTy, Context.getObjCClassRedefinitionType()))) {
+ LHS = ImpCastExprToType(LHS.take(), RHSTy, CK_CPointerToObjCPointerCast);
+ return RHSTy;
+ }
+ // And the same for struct objc_object* / id
+ if (LHSTy->isObjCIdType() &&
+ (Context.hasSameType(RHSTy, Context.getObjCIdRedefinitionType()))) {
+ RHS = ImpCastExprToType(RHS.take(), LHSTy, CK_CPointerToObjCPointerCast);
+ return LHSTy;
+ }
+ if (RHSTy->isObjCIdType() &&
+ (Context.hasSameType(LHSTy, Context.getObjCIdRedefinitionType()))) {
+ LHS = ImpCastExprToType(LHS.take(), RHSTy, CK_CPointerToObjCPointerCast);
+ return RHSTy;
+ }
+ // And the same for struct objc_selector* / SEL
+ if (Context.isObjCSelType(LHSTy) &&
+ (Context.hasSameType(RHSTy, Context.getObjCSelRedefinitionType()))) {
+ RHS = ImpCastExprToType(RHS.take(), LHSTy, CK_BitCast);
+ return LHSTy;
+ }
+ if (Context.isObjCSelType(RHSTy) &&
+ (Context.hasSameType(LHSTy, Context.getObjCSelRedefinitionType()))) {
+ LHS = ImpCastExprToType(LHS.take(), RHSTy, CK_BitCast);
+ return RHSTy;
+ }
+ // Check constraints for Objective-C object pointers types.
+ if (LHSTy->isObjCObjectPointerType() && RHSTy->isObjCObjectPointerType()) {
+
+ if (Context.getCanonicalType(LHSTy) == Context.getCanonicalType(RHSTy)) {
+ // Two identical object pointer types are always compatible.
+ return LHSTy;
+ }
+ const ObjCObjectPointerType *LHSOPT = LHSTy->castAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *RHSOPT = RHSTy->castAs<ObjCObjectPointerType>();
+ QualType compositeType = LHSTy;
+
+ // If both operands are interfaces and either operand can be
+ // assigned to the other, use that type as the composite
+ // type. This allows
+ // xxx ? (A*) a : (B*) b
+ // where B is a subclass of A.
+ //
+ // Additionally, as for assignment, if either type is 'id'
+ // allow silent coercion. Finally, if the types are
+ // incompatible then make sure to use 'id' as the composite
+ // type so the result is acceptable for sending messages to.
+
+ // FIXME: Consider unifying with 'areComparableObjCPointerTypes'.
+ // It could return the composite type.
+ if (Context.canAssignObjCInterfaces(LHSOPT, RHSOPT)) {
+ compositeType = RHSOPT->isObjCBuiltinType() ? RHSTy : LHSTy;
+ } else if (Context.canAssignObjCInterfaces(RHSOPT, LHSOPT)) {
+ compositeType = LHSOPT->isObjCBuiltinType() ? LHSTy : RHSTy;
+ } else if ((LHSTy->isObjCQualifiedIdType() ||
+ RHSTy->isObjCQualifiedIdType()) &&
+ Context.ObjCQualifiedIdTypesAreCompatible(LHSTy, RHSTy, true)) {
+ // Need to handle "id<xx>" explicitly.
+ // GCC allows qualified id and any Objective-C type to devolve to
+ // id. Currently localizing to here until clear this should be
+ // part of ObjCQualifiedIdTypesAreCompatible.
+ compositeType = Context.getObjCIdType();
+ } else if (LHSTy->isObjCIdType() || RHSTy->isObjCIdType()) {
+ compositeType = Context.getObjCIdType();
+ } else if (!(compositeType =
+ Context.areCommonBaseCompatible(LHSOPT, RHSOPT)).isNull())
+ ;
+ else {
+ Diag(QuestionLoc, diag::ext_typecheck_cond_incompatible_operands)
+ << LHSTy << RHSTy
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ QualType incompatTy = Context.getObjCIdType();
+ LHS = ImpCastExprToType(LHS.take(), incompatTy, CK_BitCast);
+ RHS = ImpCastExprToType(RHS.take(), incompatTy, CK_BitCast);
+ return incompatTy;
+ }
+ // The object pointer types are compatible.
+ LHS = ImpCastExprToType(LHS.take(), compositeType, CK_BitCast);
+ RHS = ImpCastExprToType(RHS.take(), compositeType, CK_BitCast);
+ return compositeType;
+ }
+ // Check Objective-C object pointer types and 'void *'
+ if (LHSTy->isVoidPointerType() && RHSTy->isObjCObjectPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ // ARC forbids the implicit conversion of object pointers to 'void *',
+ // so these types are not compatible.
+ Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ LHS = RHS = true;
+ return QualType();
+ }
+ QualType lhptee = LHSTy->getAs<PointerType>()->getPointeeType();
+ QualType rhptee = RHSTy->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType destPointee
+ = Context.getQualifiedType(lhptee, rhptee.getQualifiers());
+ QualType destType = Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ LHS = ImpCastExprToType(LHS.take(), destType, CK_NoOp);
+ // Promote to void*.
+ RHS = ImpCastExprToType(RHS.take(), destType, CK_BitCast);
+ return destType;
+ }
+ if (LHSTy->isObjCObjectPointerType() && RHSTy->isVoidPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount) {
+ // ARC forbids the implicit conversion of object pointers to 'void *',
+ // so these types are not compatible.
+ Diag(QuestionLoc, diag::err_cond_voidptr_arc) << LHSTy << RHSTy
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ LHS = RHS = true;
+ return QualType();
+ }
+ QualType lhptee = LHSTy->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType rhptee = RHSTy->getAs<PointerType>()->getPointeeType();
+ QualType destPointee
+ = Context.getQualifiedType(rhptee, lhptee.getQualifiers());
+ QualType destType = Context.getPointerType(destPointee);
+ // Add qualifiers if necessary.
+ RHS = ImpCastExprToType(RHS.take(), destType, CK_NoOp);
+ // Promote to void*.
+ LHS = ImpCastExprToType(LHS.take(), destType, CK_BitCast);
+ return destType;
+ }
+ return QualType();
+}
+
+/// SuggestParentheses - Emit a note with a fixit hint that wraps
+/// ParenRange in parentheses.
+static void SuggestParentheses(Sema &Self, SourceLocation Loc,
+ const PartialDiagnostic &Note,
+ SourceRange ParenRange) {
+ SourceLocation EndLoc = Self.PP.getLocForEndOfToken(ParenRange.getEnd());
+ if (ParenRange.getBegin().isFileID() && ParenRange.getEnd().isFileID() &&
+ EndLoc.isValid()) {
+ Self.Diag(Loc, Note)
+ << FixItHint::CreateInsertion(ParenRange.getBegin(), "(")
+ << FixItHint::CreateInsertion(EndLoc, ")");
+ } else {
+ // We can't display the parentheses, so just show the bare note.
+ Self.Diag(Loc, Note) << ParenRange;
+ }
+}
+
+static bool IsArithmeticOp(BinaryOperatorKind Opc) {
+ return Opc >= BO_Mul && Opc <= BO_Shr;
+}
+
+/// IsArithmeticBinaryExpr - Returns true if E is an arithmetic binary
+/// expression, either using a built-in or overloaded operator,
+/// and sets *OpCode to the opcode and *RHSExprs to the right-hand side
+/// expression.
+static bool IsArithmeticBinaryExpr(Expr *E, BinaryOperatorKind *Opcode,
+ Expr **RHSExprs) {
+ // Don't strip parenthesis: we should not warn if E is in parenthesis.
+ E = E->IgnoreImpCasts();
+ E = E->IgnoreConversionOperator();
+ E = E->IgnoreImpCasts();
+
+ // Built-in binary operator.
+ if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E)) {
+ if (IsArithmeticOp(OP->getOpcode())) {
+ *Opcode = OP->getOpcode();
+ *RHSExprs = OP->getRHS();
+ return true;
+ }
+ }
+
+ // Overloaded operator.
+ if (CXXOperatorCallExpr *Call = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (Call->getNumArgs() != 2)
+ return false;
+
+ // Make sure this is really a binary operator that is safe to pass into
+ // BinaryOperator::getOverloadedOpcode(), e.g. it's not a subscript op.
+ OverloadedOperatorKind OO = Call->getOperator();
+ if (OO < OO_Plus || OO > OO_Arrow)
+ return false;
+
+ BinaryOperatorKind OpKind = BinaryOperator::getOverloadedOpcode(OO);
+ if (IsArithmeticOp(OpKind)) {
+ *Opcode = OpKind;
+ *RHSExprs = Call->getArg(1);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool IsLogicOp(BinaryOperatorKind Opc) {
+ return (Opc >= BO_LT && Opc <= BO_NE) || (Opc >= BO_LAnd && Opc <= BO_LOr);
+}
+
+/// ExprLooksBoolean - Returns true if E looks boolean, i.e. it has boolean type
+/// or is a logical expression such as (x==y) which has int type, but is
+/// commonly interpreted as boolean.
+static bool ExprLooksBoolean(Expr *E) {
+ E = E->IgnoreParenImpCasts();
+
+ if (E->getType()->isBooleanType())
+ return true;
+ if (BinaryOperator *OP = dyn_cast<BinaryOperator>(E))
+ return IsLogicOp(OP->getOpcode());
+ if (UnaryOperator *OP = dyn_cast<UnaryOperator>(E))
+ return OP->getOpcode() == UO_LNot;
+
+ return false;
+}
+
+/// DiagnoseConditionalPrecedence - Emit a warning when a conditional operator
+/// and binary operator are mixed in a way that suggests the programmer assumed
+/// the conditional operator has higher precedence, for example:
+/// "int x = a + someBinaryCondition ? 1 : 2".
+static void DiagnoseConditionalPrecedence(Sema &Self,
+ SourceLocation OpLoc,
+ Expr *Condition,
+ Expr *LHSExpr,
+ Expr *RHSExpr) {
+ BinaryOperatorKind CondOpcode;
+ Expr *CondRHS;
+
+ if (!IsArithmeticBinaryExpr(Condition, &CondOpcode, &CondRHS))
+ return;
+ if (!ExprLooksBoolean(CondRHS))
+ return;
+
+ // The condition is an arithmetic binary expression, with a right-
+ // hand side that looks boolean, so warn.
+
+ Self.Diag(OpLoc, diag::warn_precedence_conditional)
+ << Condition->getSourceRange()
+ << BinaryOperator::getOpcodeStr(CondOpcode);
+
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_conditional_silence)
+ << BinaryOperator::getOpcodeStr(CondOpcode),
+ SourceRange(Condition->getLocStart(), Condition->getLocEnd()));
+
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_conditional_first),
+ SourceRange(CondRHS->getLocStart(), RHSExpr->getLocEnd()));
+}
+
+/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
+/// in the case of a the GNU conditional expr extension.
+ExprResult Sema::ActOnConditionalOp(SourceLocation QuestionLoc,
+ SourceLocation ColonLoc,
+ Expr *CondExpr, Expr *LHSExpr,
+ Expr *RHSExpr) {
+ // If this is the gnu "x ?: y" extension, analyze the types as though the LHS
+ // was the condition.
+ OpaqueValueExpr *opaqueValue = 0;
+ Expr *commonExpr = 0;
+ if (LHSExpr == 0) {
+ commonExpr = CondExpr;
+
+ // We usually want to apply unary conversions *before* saving, except
+ // in the special case of a C++ l-value conditional.
+ if (!(getLangOpts().CPlusPlus
+ && !commonExpr->isTypeDependent()
+ && commonExpr->getValueKind() == RHSExpr->getValueKind()
+ && commonExpr->isGLValue()
+ && commonExpr->isOrdinaryOrBitFieldObject()
+ && RHSExpr->isOrdinaryOrBitFieldObject()
+ && Context.hasSameType(commonExpr->getType(), RHSExpr->getType()))) {
+ ExprResult commonRes = UsualUnaryConversions(commonExpr);
+ if (commonRes.isInvalid())
+ return ExprError();
+ commonExpr = commonRes.take();
+ }
+
+ opaqueValue = new (Context) OpaqueValueExpr(commonExpr->getExprLoc(),
+ commonExpr->getType(),
+ commonExpr->getValueKind(),
+ commonExpr->getObjectKind(),
+ commonExpr);
+ LHSExpr = CondExpr = opaqueValue;
+ }
+
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ ExprResult Cond = Owned(CondExpr), LHS = Owned(LHSExpr), RHS = Owned(RHSExpr);
+ QualType result = CheckConditionalOperands(Cond, LHS, RHS,
+ VK, OK, QuestionLoc);
+ if (result.isNull() || Cond.isInvalid() || LHS.isInvalid() ||
+ RHS.isInvalid())
+ return ExprError();
+
+ DiagnoseConditionalPrecedence(*this, QuestionLoc, Cond.get(), LHS.get(),
+ RHS.get());
+
+ if (!commonExpr)
+ return Owned(new (Context) ConditionalOperator(Cond.take(), QuestionLoc,
+ LHS.take(), ColonLoc,
+ RHS.take(), result, VK, OK));
+
+ return Owned(new (Context)
+ BinaryConditionalOperator(commonExpr, opaqueValue, Cond.take(), LHS.take(),
+ RHS.take(), QuestionLoc, ColonLoc, result, VK,
+ OK));
+}
+
+// checkPointerTypesForAssignment - This is a very tricky routine (despite
+// being closely modeled after the C99 spec:-). The odd characteristic of this
+// routine is it effectively iqnores the qualifiers on the top level pointee.
+// This circumvents the usual type rules specified in 6.2.7p1 & 6.7.5.[1-3].
+// FIXME: add a couple examples in this comment.
+static Sema::AssignConvertType
+checkPointerTypesForAssignment(Sema &S, QualType LHSType, QualType RHSType) {
+ assert(LHSType.isCanonical() && "LHS not canonicalized!");
+ assert(RHSType.isCanonical() && "RHS not canonicalized!");
+
+ // get the "pointed to" type (ignoring qualifiers at the top level)
+ const Type *lhptee, *rhptee;
+ Qualifiers lhq, rhq;
+ llvm::tie(lhptee, lhq) = cast<PointerType>(LHSType)->getPointeeType().split();
+ llvm::tie(rhptee, rhq) = cast<PointerType>(RHSType)->getPointeeType().split();
+
+ Sema::AssignConvertType ConvTy = Sema::Compatible;
+
+ // C99 6.5.16.1p1: This following citation is common to constraints
+ // 3 & 4 (below). ...and the type *pointed to* by the left has all the
+ // qualifiers of the type *pointed to* by the right;
+ Qualifiers lq;
+
+ // As a special case, 'non-__weak A *' -> 'non-__weak const *' is okay.
+ if (lhq.getObjCLifetime() != rhq.getObjCLifetime() &&
+ lhq.compatiblyIncludesObjCLifetime(rhq)) {
+ // Ignore lifetime for further calculation.
+ lhq.removeObjCLifetime();
+ rhq.removeObjCLifetime();
+ }
+
+ if (!lhq.compatiblyIncludes(rhq)) {
+ // Treat address-space mismatches as fatal. TODO: address subspaces
+ if (lhq.getAddressSpace() != rhq.getAddressSpace())
+ ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
+
+ // It's okay to add or remove GC or lifetime qualifiers when converting to
+ // and from void*.
+ else if (lhq.withoutObjCGCAttr().withoutObjCLifetime()
+ .compatiblyIncludes(
+ rhq.withoutObjCGCAttr().withoutObjCLifetime())
+ && (lhptee->isVoidType() || rhptee->isVoidType()))
+ ; // keep old
+
+ // Treat lifetime mismatches as fatal.
+ else if (lhq.getObjCLifetime() != rhq.getObjCLifetime())
+ ConvTy = Sema::IncompatiblePointerDiscardsQualifiers;
+
+ // For GCC compatibility, other qualifier mismatches are treated
+ // as still compatible in C.
+ else ConvTy = Sema::CompatiblePointerDiscardsQualifiers;
+ }
+
+ // C99 6.5.16.1p1 (constraint 4): If one operand is a pointer to an object or
+ // incomplete type and the other is a pointer to a qualified or unqualified
+ // version of void...
+ if (lhptee->isVoidType()) {
+ if (rhptee->isIncompleteOrObjectType())
+ return ConvTy;
+
+ // As an extension, we allow cast to/from void* to function pointer.
+ assert(rhptee->isFunctionType());
+ return Sema::FunctionVoidPointer;
+ }
+
+ if (rhptee->isVoidType()) {
+ if (lhptee->isIncompleteOrObjectType())
+ return ConvTy;
+
+ // As an extension, we allow cast to/from void* to function pointer.
+ assert(lhptee->isFunctionType());
+ return Sema::FunctionVoidPointer;
+ }
+
+ // C99 6.5.16.1p1 (constraint 3): both operands are pointers to qualified or
+ // unqualified versions of compatible types, ...
+ QualType ltrans = QualType(lhptee, 0), rtrans = QualType(rhptee, 0);
+ if (!S.Context.typesAreCompatible(ltrans, rtrans)) {
+ // Check if the pointee types are compatible ignoring the sign.
+ // We explicitly check for char so that we catch "char" vs
+ // "unsigned char" on systems where "char" is unsigned.
+ if (lhptee->isCharType())
+ ltrans = S.Context.UnsignedCharTy;
+ else if (lhptee->hasSignedIntegerRepresentation())
+ ltrans = S.Context.getCorrespondingUnsignedType(ltrans);
+
+ if (rhptee->isCharType())
+ rtrans = S.Context.UnsignedCharTy;
+ else if (rhptee->hasSignedIntegerRepresentation())
+ rtrans = S.Context.getCorrespondingUnsignedType(rtrans);
+
+ if (ltrans == rtrans) {
+ // Types are compatible ignoring the sign. Qualifier incompatibility
+ // takes priority over sign incompatibility because the sign
+ // warning can be disabled.
+ if (ConvTy != Sema::Compatible)
+ return ConvTy;
+
+ return Sema::IncompatiblePointerSign;
+ }
+
+ // If we are a multi-level pointer, it's possible that our issue is simply
+ // one of qualification - e.g. char ** -> const char ** is not allowed. If
+ // the eventual target type is the same and the pointers have the same
+ // level of indirection, this must be the issue.
+ if (isa<PointerType>(lhptee) && isa<PointerType>(rhptee)) {
+ do {
+ lhptee = cast<PointerType>(lhptee)->getPointeeType().getTypePtr();
+ rhptee = cast<PointerType>(rhptee)->getPointeeType().getTypePtr();
+ } while (isa<PointerType>(lhptee) && isa<PointerType>(rhptee));
+
+ if (lhptee == rhptee)
+ return Sema::IncompatibleNestedPointerQualifiers;
+ }
+
+ // General pointer incompatibility takes priority over qualifiers.
+ return Sema::IncompatiblePointer;
+ }
+ if (!S.getLangOpts().CPlusPlus &&
+ S.IsNoReturnConversion(ltrans, rtrans, ltrans))
+ return Sema::IncompatiblePointer;
+ return ConvTy;
+}
+
+/// checkBlockPointerTypesForAssignment - This routine determines whether two
+/// block pointer types are compatible or whether a block and normal pointer
+/// are compatible. It is more restrict than comparing two function pointer
+// types.
+static Sema::AssignConvertType
+checkBlockPointerTypesForAssignment(Sema &S, QualType LHSType,
+ QualType RHSType) {
+ assert(LHSType.isCanonical() && "LHS not canonicalized!");
+ assert(RHSType.isCanonical() && "RHS not canonicalized!");
+
+ QualType lhptee, rhptee;
+
+ // get the "pointed to" type (ignoring qualifiers at the top level)
+ lhptee = cast<BlockPointerType>(LHSType)->getPointeeType();
+ rhptee = cast<BlockPointerType>(RHSType)->getPointeeType();
+
+ // In C++, the types have to match exactly.
+ if (S.getLangOpts().CPlusPlus)
+ return Sema::IncompatibleBlockPointer;
+
+ Sema::AssignConvertType ConvTy = Sema::Compatible;
+
+ // For blocks we enforce that qualifiers are identical.
+ if (lhptee.getLocalQualifiers() != rhptee.getLocalQualifiers())
+ ConvTy = Sema::CompatiblePointerDiscardsQualifiers;
+
+ if (!S.Context.typesAreBlockPointerCompatible(LHSType, RHSType))
+ return Sema::IncompatibleBlockPointer;
+
+ return ConvTy;
+}
+
+/// checkObjCPointerTypesForAssignment - Compares two objective-c pointer types
+/// for assignment compatibility.
+static Sema::AssignConvertType
+checkObjCPointerTypesForAssignment(Sema &S, QualType LHSType,
+ QualType RHSType) {
+ assert(LHSType.isCanonical() && "LHS was not canonicalized!");
+ assert(RHSType.isCanonical() && "RHS was not canonicalized!");
+
+ if (LHSType->isObjCBuiltinType()) {
+ // Class is not compatible with ObjC object pointers.
+ if (LHSType->isObjCClassType() && !RHSType->isObjCBuiltinType() &&
+ !RHSType->isObjCQualifiedClassType())
+ return Sema::IncompatiblePointer;
+ return Sema::Compatible;
+ }
+ if (RHSType->isObjCBuiltinType()) {
+ if (RHSType->isObjCClassType() && !LHSType->isObjCBuiltinType() &&
+ !LHSType->isObjCQualifiedClassType())
+ return Sema::IncompatiblePointer;
+ return Sema::Compatible;
+ }
+ QualType lhptee = LHSType->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType rhptee = RHSType->getAs<ObjCObjectPointerType>()->getPointeeType();
+
+ if (!lhptee.isAtLeastAsQualifiedAs(rhptee) &&
+ // make an exception for id<P>
+ !LHSType->isObjCQualifiedIdType())
+ return Sema::CompatiblePointerDiscardsQualifiers;
+
+ if (S.Context.typesAreCompatible(LHSType, RHSType))
+ return Sema::Compatible;
+ if (LHSType->isObjCQualifiedIdType() || RHSType->isObjCQualifiedIdType())
+ return Sema::IncompatibleObjCQualifiedId;
+ return Sema::IncompatiblePointer;
+}
+
+Sema::AssignConvertType
+Sema::CheckAssignmentConstraints(SourceLocation Loc,
+ QualType LHSType, QualType RHSType) {
+ // Fake up an opaque expression. We don't actually care about what
+ // cast operations are required, so if CheckAssignmentConstraints
+ // adds casts to this they'll be wasted, but fortunately that doesn't
+ // usually happen on valid code.
+ OpaqueValueExpr RHSExpr(Loc, RHSType, VK_RValue);
+ ExprResult RHSPtr = &RHSExpr;
+ CastKind K = CK_Invalid;
+
+ return CheckAssignmentConstraints(LHSType, RHSPtr, K);
+}
+
+/// CheckAssignmentConstraints (C99 6.5.16) - This routine currently
+/// has code to accommodate several GCC extensions when type checking
+/// pointers. Here are some objectionable examples that GCC considers warnings:
+///
+/// int a, *pint;
+/// short *pshort;
+/// struct foo *pfoo;
+///
+/// pint = pshort; // warning: assignment from incompatible pointer type
+/// a = pint; // warning: assignment makes integer from pointer without a cast
+/// pint = a; // warning: assignment makes pointer from integer without a cast
+/// pint = pfoo; // warning: assignment from incompatible pointer type
+///
+/// As a result, the code for dealing with pointers is more complex than the
+/// C99 spec dictates.
+///
+/// Sets 'Kind' for any result kind except Incompatible.
+Sema::AssignConvertType
+Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
+ CastKind &Kind) {
+ QualType RHSType = RHS.get()->getType();
+ QualType OrigLHSType = LHSType;
+
+ // Get canonical types. We're not formatting these types, just comparing
+ // them.
+ LHSType = Context.getCanonicalType(LHSType).getUnqualifiedType();
+ RHSType = Context.getCanonicalType(RHSType).getUnqualifiedType();
+
+
+ // Common case: no conversion required.
+ if (LHSType == RHSType) {
+ Kind = CK_NoOp;
+ return Compatible;
+ }
+
+ if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
+ if (AtomicTy->getValueType() == RHSType) {
+ Kind = CK_NonAtomicToAtomic;
+ return Compatible;
+ }
+ }
+
+ if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(RHSType)) {
+ if (AtomicTy->getValueType() == LHSType) {
+ Kind = CK_AtomicToNonAtomic;
+ return Compatible;
+ }
+ }
+
+
+ // If the left-hand side is a reference type, then we are in a
+ // (rare!) case where we've allowed the use of references in C,
+ // e.g., as a parameter type in a built-in function. In this case,
+ // just make sure that the type referenced is compatible with the
+ // right-hand side type. The caller is responsible for adjusting
+ // LHSType so that the resulting expression does not have reference
+ // type.
+ if (const ReferenceType *LHSTypeRef = LHSType->getAs<ReferenceType>()) {
+ if (Context.typesAreCompatible(LHSTypeRef->getPointeeType(), RHSType)) {
+ Kind = CK_LValueBitCast;
+ return Compatible;
+ }
+ return Incompatible;
+ }
+
+ // Allow scalar to ExtVector assignments, and assignments of an ExtVector type
+ // to the same ExtVector type.
+ if (LHSType->isExtVectorType()) {
+ if (RHSType->isExtVectorType())
+ return Incompatible;
+ if (RHSType->isArithmeticType()) {
+ // CK_VectorSplat does T -> vector T, so first cast to the
+ // element type.
+ QualType elType = cast<ExtVectorType>(LHSType)->getElementType();
+ if (elType != RHSType) {
+ Kind = PrepareScalarCast(RHS, elType);
+ RHS = ImpCastExprToType(RHS.take(), elType, Kind);
+ }
+ Kind = CK_VectorSplat;
+ return Compatible;
+ }
+ }
+
+ // Conversions to or from vector type.
+ if (LHSType->isVectorType() || RHSType->isVectorType()) {
+ if (LHSType->isVectorType() && RHSType->isVectorType()) {
+ // Allow assignments of an AltiVec vector type to an equivalent GCC
+ // vector type and vice versa
+ if (Context.areCompatibleVectorTypes(LHSType, RHSType)) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
+ // If we are allowing lax vector conversions, and LHS and RHS are both
+ // vectors, the total size only needs to be the same. This is a bitcast;
+ // no bits are changed but the result type is different.
+ if (getLangOpts().LaxVectorConversions &&
+ (Context.getTypeSize(LHSType) == Context.getTypeSize(RHSType))) {
+ Kind = CK_BitCast;
+ return IncompatibleVectors;
+ }
+ }
+ return Incompatible;
+ }
+
+ // Arithmetic conversions.
+ if (LHSType->isArithmeticType() && RHSType->isArithmeticType() &&
+ !(getLangOpts().CPlusPlus && LHSType->isEnumeralType())) {
+ Kind = PrepareScalarCast(RHS, LHSType);
+ return Compatible;
+ }
+
+ // Conversions to normal pointers.
+ if (const PointerType *LHSPointer = dyn_cast<PointerType>(LHSType)) {
+ // U* -> T*
+ if (isa<PointerType>(RHSType)) {
+ Kind = CK_BitCast;
+ return checkPointerTypesForAssignment(*this, LHSType, RHSType);
+ }
+
+ // int -> T*
+ if (RHSType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null?
+ return IntToPointer;
+ }
+
+ // C pointers are not compatible with ObjC object pointers,
+ // with two exceptions:
+ if (isa<ObjCObjectPointerType>(RHSType)) {
+ // - conversions to void*
+ if (LHSPointer->getPointeeType()->isVoidType()) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
+ // - conversions from 'Class' to the redefinition type
+ if (RHSType->isObjCClassType() &&
+ Context.hasSameType(LHSType,
+ Context.getObjCClassRedefinitionType())) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+
+ Kind = CK_BitCast;
+ return IncompatiblePointer;
+ }
+
+ // U^ -> void*
+ if (RHSType->getAs<BlockPointerType>()) {
+ if (LHSPointer->getPointeeType()->isVoidType()) {
+ Kind = CK_BitCast;
+ return Compatible;
+ }
+ }
+
+ return Incompatible;
+ }
+
+ // Conversions to block pointers.
+ if (isa<BlockPointerType>(LHSType)) {
+ // U^ -> T^
+ if (RHSType->isBlockPointerType()) {
+ Kind = CK_BitCast;
+ return checkBlockPointerTypesForAssignment(*this, LHSType, RHSType);
+ }
+
+ // int or null -> T^
+ if (RHSType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null
+ return IntToBlockPointer;
+ }
+
+ // id -> T^
+ if (getLangOpts().ObjC1 && RHSType->isObjCIdType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
+ return Compatible;
+ }
+
+ // void* -> T^
+ if (const PointerType *RHSPT = RHSType->getAs<PointerType>())
+ if (RHSPT->getPointeeType()->isVoidType()) {
+ Kind = CK_AnyPointerToBlockPointerCast;
+ return Compatible;
+ }
+
+ return Incompatible;
+ }
+
+ // Conversions to Objective-C pointers.
+ if (isa<ObjCObjectPointerType>(LHSType)) {
+ // A* -> B*
+ if (RHSType->isObjCObjectPointerType()) {
+ Kind = CK_BitCast;
+ Sema::AssignConvertType result =
+ checkObjCPointerTypesForAssignment(*this, LHSType, RHSType);
+ if (getLangOpts().ObjCAutoRefCount &&
+ result == Compatible &&
+ !CheckObjCARCUnavailableWeakConversion(OrigLHSType, RHSType))
+ result = IncompatibleObjCWeakRef;
+ return result;
+ }
+
+ // int or null -> A*
+ if (RHSType->isIntegerType()) {
+ Kind = CK_IntegralToPointer; // FIXME: null
+ return IntToPointer;
+ }
+
+ // In general, C pointers are not compatible with ObjC object pointers,
+ // with two exceptions:
+ if (isa<PointerType>(RHSType)) {
+ Kind = CK_CPointerToObjCPointerCast;
+
+ // - conversions from 'void*'
+ if (RHSType->isVoidPointerType()) {
+ return Compatible;
+ }
+
+ // - conversions to 'Class' from its redefinition type
+ if (LHSType->isObjCClassType() &&
+ Context.hasSameType(RHSType,
+ Context.getObjCClassRedefinitionType())) {
+ return Compatible;
+ }
+
+ return IncompatiblePointer;
+ }
+
+ // T^ -> A*
+ if (RHSType->isBlockPointerType()) {
+ maybeExtendBlockObject(*this, RHS);
+ Kind = CK_BlockPointerToObjCPointerCast;
+ return Compatible;
+ }
+
+ return Incompatible;
+ }
+
+ // Conversions from pointers that are not covered by the above.
+ if (isa<PointerType>(RHSType)) {
+ // T* -> _Bool
+ if (LHSType == Context.BoolTy) {
+ Kind = CK_PointerToBoolean;
+ return Compatible;
+ }
+
+ // T* -> int
+ if (LHSType->isIntegerType()) {
+ Kind = CK_PointerToIntegral;
+ return PointerToInt;
+ }
+
+ return Incompatible;
+ }
+
+ // Conversions from Objective-C pointers that are not covered by the above.
+ if (isa<ObjCObjectPointerType>(RHSType)) {
+ // T* -> _Bool
+ if (LHSType == Context.BoolTy) {
+ Kind = CK_PointerToBoolean;
+ return Compatible;
+ }
+
+ // T* -> int
+ if (LHSType->isIntegerType()) {
+ Kind = CK_PointerToIntegral;
+ return PointerToInt;
+ }
+
+ return Incompatible;
+ }
+
+ // struct A -> struct B
+ if (isa<TagType>(LHSType) && isa<TagType>(RHSType)) {
+ if (Context.typesAreCompatible(LHSType, RHSType)) {
+ Kind = CK_NoOp;
+ return Compatible;
+ }
+ }
+
+ return Incompatible;
+}
+
+/// \brief Constructs a transparent union from an expression that is
+/// used to initialize the transparent union.
+static void ConstructTransparentUnion(Sema &S, ASTContext &C,
+ ExprResult &EResult, QualType UnionType,
+ FieldDecl *Field) {
+ // Build an initializer list that designates the appropriate member
+ // of the transparent union.
+ Expr *E = EResult.take();
+ InitListExpr *Initializer = new (C) InitListExpr(C, SourceLocation(),
+ &E, 1,
+ SourceLocation());
+ Initializer->setType(UnionType);
+ Initializer->setInitializedFieldInUnion(Field);
+
+ // Build a compound literal constructing a value of the transparent
+ // union type from this initializer list.
+ TypeSourceInfo *unionTInfo = C.getTrivialTypeSourceInfo(UnionType);
+ EResult = S.Owned(
+ new (C) CompoundLiteralExpr(SourceLocation(), unionTInfo, UnionType,
+ VK_RValue, Initializer, false));
+}
+
+Sema::AssignConvertType
+Sema::CheckTransparentUnionArgumentConstraints(QualType ArgType,
+ ExprResult &RHS) {
+ QualType RHSType = RHS.get()->getType();
+
+ // If the ArgType is a Union type, we want to handle a potential
+ // transparent_union GCC extension.
+ const RecordType *UT = ArgType->getAsUnionType();
+ if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ return Incompatible;
+
+ // The field to initialize within the transparent union.
+ RecordDecl *UD = UT->getDecl();
+ FieldDecl *InitField = 0;
+ // It's compatible if the expression matches any of the fields.
+ for (RecordDecl::field_iterator it = UD->field_begin(),
+ itend = UD->field_end();
+ it != itend; ++it) {
+ if (it->getType()->isPointerType()) {
+ // If the transparent union contains a pointer type, we allow:
+ // 1) void pointer
+ // 2) null pointer constant
+ if (RHSType->isPointerType())
+ if (RHSType->castAs<PointerType>()->getPointeeType()->isVoidType()) {
+ RHS = ImpCastExprToType(RHS.take(), it->getType(), CK_BitCast);
+ InitField = *it;
+ break;
+ }
+
+ if (RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ RHS = ImpCastExprToType(RHS.take(), it->getType(),
+ CK_NullToPointer);
+ InitField = *it;
+ break;
+ }
+ }
+
+ CastKind Kind = CK_Invalid;
+ if (CheckAssignmentConstraints(it->getType(), RHS, Kind)
+ == Compatible) {
+ RHS = ImpCastExprToType(RHS.take(), it->getType(), Kind);
+ InitField = *it;
+ break;
+ }
+ }
+
+ if (!InitField)
+ return Incompatible;
+
+ ConstructTransparentUnion(*this, Context, RHS, ArgType, InitField);
+ return Compatible;
+}
+
+Sema::AssignConvertType
+Sema::CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS,
+ bool Diagnose) {
+ if (getLangOpts().CPlusPlus) {
+ if (!LHSType->isRecordType() && !LHSType->isAtomicType()) {
+ // C++ 5.17p3: If the left operand is not of class type, the
+ // expression is implicitly converted (C++ 4) to the
+ // cv-unqualified type of the left operand.
+ ExprResult Res;
+ if (Diagnose) {
+ Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ AA_Assigning);
+ } else {
+ ImplicitConversionSequence ICS =
+ TryImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/false,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+ if (ICS.isFailure())
+ return Incompatible;
+ Res = PerformImplicitConversion(RHS.get(), LHSType.getUnqualifiedType(),
+ ICS, AA_Assigning);
+ }
+ if (Res.isInvalid())
+ return Incompatible;
+ Sema::AssignConvertType result = Compatible;
+ if (getLangOpts().ObjCAutoRefCount &&
+ !CheckObjCARCUnavailableWeakConversion(LHSType,
+ RHS.get()->getType()))
+ result = IncompatibleObjCWeakRef;
+ RHS = move(Res);
+ return result;
+ }
+
+ // FIXME: Currently, we fall through and treat C++ classes like C
+ // structures.
+ // FIXME: We also fall through for atomics; not sure what should
+ // happen there, though.
+ }
+
+ // C99 6.5.16.1p1: the left operand is a pointer and the right is
+ // a null pointer constant.
+ if ((LHSType->isPointerType() ||
+ LHSType->isObjCObjectPointerType() ||
+ LHSType->isBlockPointerType())
+ && RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_NullToPointer);
+ return Compatible;
+ }
+
+ // This check seems unnatural, however it is necessary to ensure the proper
+ // conversion of functions/arrays. If the conversion were done for all
+ // DeclExpr's (created by ActOnIdExpression), it would mess up the unary
+ // expressions that suppress this implicit conversion (&, sizeof).
+ //
+ // Suppress this for references: C++ 8.5.3p5.
+ if (!LHSType->isReferenceType()) {
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.take());
+ if (RHS.isInvalid())
+ return Incompatible;
+ }
+
+ CastKind Kind = CK_Invalid;
+ Sema::AssignConvertType result =
+ CheckAssignmentConstraints(LHSType, RHS, Kind);
+
+ // C99 6.5.16.1p2: The value of the right operand is converted to the
+ // type of the assignment expression.
+ // CheckAssignmentConstraints allows the left-hand side to be a reference,
+ // so that we can use references in built-in functions even in C.
+ // The getNonReferenceType() call makes sure that the resulting expression
+ // does not have reference type.
+ if (result != Incompatible && RHS.get()->getType() != LHSType)
+ RHS = ImpCastExprToType(RHS.take(),
+ LHSType.getNonLValueExprType(Context), Kind);
+ return result;
+}
+
+QualType Sema::InvalidOperands(SourceLocation Loc, ExprResult &LHS,
+ ExprResult &RHS) {
+ Diag(Loc, diag::err_typecheck_invalid_operands)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+}
+
+QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.take());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.take());
+ if (RHS.isInvalid())
+ return QualType();
+
+ // For conversion purposes, we ignore any qualifiers.
+ // For example, "const float" and "float" are equivalent.
+ QualType LHSType =
+ Context.getCanonicalType(LHS.get()->getType()).getUnqualifiedType();
+ QualType RHSType =
+ Context.getCanonicalType(RHS.get()->getType()).getUnqualifiedType();
+
+ // If the vector types are identical, return.
+ if (LHSType == RHSType)
+ return LHSType;
+
+ // Handle the case of equivalent AltiVec and GCC vector types
+ if (LHSType->isVectorType() && RHSType->isVectorType() &&
+ Context.areCompatibleVectorTypes(LHSType, RHSType)) {
+ if (LHSType->isExtVectorType()) {
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
+ return LHSType;
+ }
+
+ if (!IsCompAssign)
+ LHS = ImpCastExprToType(LHS.take(), RHSType, CK_BitCast);
+ return RHSType;
+ }
+
+ if (getLangOpts().LaxVectorConversions &&
+ Context.getTypeSize(LHSType) == Context.getTypeSize(RHSType)) {
+ // If we are allowing lax vector conversions, and LHS and RHS are both
+ // vectors, the total size only needs to be the same. This is a
+ // bitcast; no bits are changed but the result type is different.
+ // FIXME: Should we really be allowing this?
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
+ return LHSType;
+ }
+
+ // Canonicalize the ExtVector to the LHS, remember if we swapped so we can
+ // swap back (so that we don't reverse the inputs to a subtract, for instance.
+ bool swapped = false;
+ if (RHSType->isExtVectorType() && !IsCompAssign) {
+ swapped = true;
+ std::swap(RHS, LHS);
+ std::swap(RHSType, LHSType);
+ }
+
+ // Handle the case of an ext vector and scalar.
+ if (const ExtVectorType *LV = LHSType->getAs<ExtVectorType>()) {
+ QualType EltTy = LV->getElementType();
+ if (EltTy->isIntegralType(Context) && RHSType->isIntegralType(Context)) {
+ int order = Context.getIntegerTypeOrder(EltTy, RHSType);
+ if (order > 0)
+ RHS = ImpCastExprToType(RHS.take(), EltTy, CK_IntegralCast);
+ if (order >= 0) {
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_VectorSplat);
+ if (swapped) std::swap(RHS, LHS);
+ return LHSType;
+ }
+ }
+ if (EltTy->isRealFloatingType() && RHSType->isScalarType() &&
+ RHSType->isRealFloatingType()) {
+ int order = Context.getFloatingTypeOrder(EltTy, RHSType);
+ if (order > 0)
+ RHS = ImpCastExprToType(RHS.take(), EltTy, CK_FloatingCast);
+ if (order >= 0) {
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_VectorSplat);
+ if (swapped) std::swap(RHS, LHS);
+ return LHSType;
+ }
+ }
+ }
+
+ // Vectors of different size or scalar and non-ext-vector are errors.
+ if (swapped) std::swap(RHS, LHS);
+ Diag(Loc, diag::err_typecheck_vector_not_convertable)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+}
+
+// checkArithmeticNull - Detect when a NULL constant is used improperly in an
+// expression. These are mainly cases where the null pointer is used as an
+// integer instead of a pointer.
+static void checkArithmeticNull(Sema &S, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, bool IsCompare) {
+ // The canonical way to check for a GNU null is with isNullPointerConstant,
+ // but we use a bit of a hack here for speed; this is a relatively
+ // hot path, and isNullPointerConstant is slow.
+ bool LHSNull = isa<GNUNullExpr>(LHS.get()->IgnoreParenImpCasts());
+ bool RHSNull = isa<GNUNullExpr>(RHS.get()->IgnoreParenImpCasts());
+
+ QualType NonNullType = LHSNull ? RHS.get()->getType() : LHS.get()->getType();
+
+ // Avoid analyzing cases where the result will either be invalid (and
+ // diagnosed as such) or entirely valid and not something to warn about.
+ if ((!LHSNull && !RHSNull) || NonNullType->isBlockPointerType() ||
+ NonNullType->isMemberPointerType() || NonNullType->isFunctionType())
+ return;
+
+ // Comparison operations would not make sense with a null pointer no matter
+ // what the other expression is.
+ if (!IsCompare) {
+ S.Diag(Loc, diag::warn_null_in_arithmetic_operation)
+ << (LHSNull ? LHS.get()->getSourceRange() : SourceRange())
+ << (RHSNull ? RHS.get()->getSourceRange() : SourceRange());
+ return;
+ }
+
+ // The rest of the operations only make sense with a null pointer
+ // if the other expression is a pointer.
+ if (LHSNull == RHSNull || NonNullType->isAnyPointerType() ||
+ NonNullType->canDecayToPointerType())
+ return;
+
+ S.Diag(Loc, diag::warn_null_in_comparison_operation)
+ << LHSNull /* LHS is NULL */ << NonNullType
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+}
+
+QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign, bool IsDiv) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign);
+
+ QualType compType = UsualArithmeticConversions(LHS, RHS, IsCompAssign);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+
+ if (!LHS.get()->getType()->isArithmeticType() ||
+ !RHS.get()->getType()->isArithmeticType()) {
+ if (IsCompAssign &&
+ LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType())
+ return compType;
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ // Check for division by zero.
+ if (IsDiv &&
+ RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull))
+ DiagRuntimeBehavior(Loc, RHS.get(), PDiag(diag::warn_division_by_zero)
+ << RHS.get()->getSourceRange());
+
+ return compType;
+}
+
+QualType Sema::CheckRemainderOperands(
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ QualType compType = UsualArithmeticConversions(LHS, RHS, IsCompAssign);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ if (!LHS.get()->getType()->isIntegerType() ||
+ !RHS.get()->getType()->isIntegerType())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ // Check for remainder by zero.
+ if (RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNotNull))
+ DiagRuntimeBehavior(Loc, RHS.get(), PDiag(diag::warn_remainder_by_zero)
+ << RHS.get()->getSourceRange());
+
+ return compType;
+}
+
+/// \brief Diagnose invalid arithmetic on two void pointers.
+static void diagnoseArithmeticOnTwoVoidPointers(Sema &S, SourceLocation Loc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_void_type
+ : diag::ext_gnu_void_ptr)
+ << 1 /* two pointers */ << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on a void pointer.
+static void diagnoseArithmeticOnVoidPointer(Sema &S, SourceLocation Loc,
+ Expr *Pointer) {
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_void_type
+ : diag::ext_gnu_void_ptr)
+ << 0 /* one pointer */ << Pointer->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on two function pointers.
+static void diagnoseArithmeticOnTwoFunctionPointers(Sema &S, SourceLocation Loc,
+ Expr *LHS, Expr *RHS) {
+ assert(LHS->getType()->isAnyPointerType());
+ assert(RHS->getType()->isAnyPointerType());
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_function_type
+ : diag::ext_gnu_ptr_func_arith)
+ << 1 /* two pointers */ << LHS->getType()->getPointeeType()
+ // We only show the second type if it differs from the first.
+ << (unsigned)!S.Context.hasSameUnqualifiedType(LHS->getType(),
+ RHS->getType())
+ << RHS->getType()->getPointeeType()
+ << LHS->getSourceRange() << RHS->getSourceRange();
+}
+
+/// \brief Diagnose invalid arithmetic on a function pointer.
+static void diagnoseArithmeticOnFunctionPointer(Sema &S, SourceLocation Loc,
+ Expr *Pointer) {
+ assert(Pointer->getType()->isAnyPointerType());
+ S.Diag(Loc, S.getLangOpts().CPlusPlus
+ ? diag::err_typecheck_pointer_arith_function_type
+ : diag::ext_gnu_ptr_func_arith)
+ << 0 /* one pointer */ << Pointer->getType()->getPointeeType()
+ << 0 /* one pointer, so only one type */
+ << Pointer->getSourceRange();
+}
+
+/// \brief Emit error if Operand is incomplete pointer type
+///
+/// \returns True if pointer has incomplete type
+static bool checkArithmeticIncompletePointerType(Sema &S, SourceLocation Loc,
+ Expr *Operand) {
+ if ((Operand->getType()->isPointerType() &&
+ !Operand->getType()->isDependentType()) ||
+ Operand->getType()->isObjCObjectPointerType()) {
+ QualType PointeeTy = Operand->getType()->getPointeeType();
+ if (S.RequireCompleteType(
+ Loc, PointeeTy,
+ S.PDiag(diag::err_typecheck_arithmetic_incomplete_type)
+ << PointeeTy << Operand->getSourceRange()))
+ return true;
+ }
+ return false;
+}
+
+/// \brief Check the validity of an arithmetic pointer operand.
+///
+/// If the operand has pointer type, this code will check for pointer types
+/// which are invalid in arithmetic operations. These will be diagnosed
+/// appropriately, including whether or not the use is supported as an
+/// extension.
+///
+/// \returns True when the operand is valid to use (even if as an extension).
+static bool checkArithmeticOpPointerOperand(Sema &S, SourceLocation Loc,
+ Expr *Operand) {
+ if (!Operand->getType()->isAnyPointerType()) return true;
+
+ QualType PointeeTy = Operand->getType()->getPointeeType();
+ if (PointeeTy->isVoidType()) {
+ diagnoseArithmeticOnVoidPointer(S, Loc, Operand);
+ return !S.getLangOpts().CPlusPlus;
+ }
+ if (PointeeTy->isFunctionType()) {
+ diagnoseArithmeticOnFunctionPointer(S, Loc, Operand);
+ return !S.getLangOpts().CPlusPlus;
+ }
+
+ if (checkArithmeticIncompletePointerType(S, Loc, Operand)) return false;
+
+ return true;
+}
+
+/// \brief Check the validity of a binary arithmetic operation w.r.t. pointer
+/// operands.
+///
+/// This routine will diagnose any invalid arithmetic on pointer operands much
+/// like \see checkArithmeticOpPointerOperand. However, it has special logic
+/// for emitting a single diagnostic even for operations where both LHS and RHS
+/// are (potentially problematic) pointers.
+///
+/// \returns True when the operand is valid to use (even if as an extension).
+static bool checkArithmeticBinOpPointerOperands(Sema &S, SourceLocation Loc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ bool isLHSPointer = LHSExpr->getType()->isAnyPointerType();
+ bool isRHSPointer = RHSExpr->getType()->isAnyPointerType();
+ if (!isLHSPointer && !isRHSPointer) return true;
+
+ QualType LHSPointeeTy, RHSPointeeTy;
+ if (isLHSPointer) LHSPointeeTy = LHSExpr->getType()->getPointeeType();
+ if (isRHSPointer) RHSPointeeTy = RHSExpr->getType()->getPointeeType();
+
+ // Check for arithmetic on pointers to incomplete types.
+ bool isLHSVoidPtr = isLHSPointer && LHSPointeeTy->isVoidType();
+ bool isRHSVoidPtr = isRHSPointer && RHSPointeeTy->isVoidType();
+ if (isLHSVoidPtr || isRHSVoidPtr) {
+ if (!isRHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, LHSExpr);
+ else if (!isLHSVoidPtr) diagnoseArithmeticOnVoidPointer(S, Loc, RHSExpr);
+ else diagnoseArithmeticOnTwoVoidPointers(S, Loc, LHSExpr, RHSExpr);
+
+ return !S.getLangOpts().CPlusPlus;
+ }
+
+ bool isLHSFuncPtr = isLHSPointer && LHSPointeeTy->isFunctionType();
+ bool isRHSFuncPtr = isRHSPointer && RHSPointeeTy->isFunctionType();
+ if (isLHSFuncPtr || isRHSFuncPtr) {
+ if (!isRHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc, LHSExpr);
+ else if (!isLHSFuncPtr) diagnoseArithmeticOnFunctionPointer(S, Loc,
+ RHSExpr);
+ else diagnoseArithmeticOnTwoFunctionPointers(S, Loc, LHSExpr, RHSExpr);
+
+ return !S.getLangOpts().CPlusPlus;
+ }
+
+ if (checkArithmeticIncompletePointerType(S, Loc, LHSExpr)) return false;
+ if (checkArithmeticIncompletePointerType(S, Loc, RHSExpr)) return false;
+
+ return true;
+}
+
+/// \brief Check bad cases where we step over interface counts.
+static bool checkArithmethicPointerOnNonFragileABI(Sema &S,
+ SourceLocation OpLoc,
+ Expr *Op) {
+ assert(Op->getType()->isAnyPointerType());
+ QualType PointeeTy = Op->getType()->getPointeeType();
+ if (!PointeeTy->isObjCObjectType() || !S.LangOpts.ObjCNonFragileABI)
+ return true;
+
+ S.Diag(OpLoc, diag::err_arithmetic_nonfragile_interface)
+ << PointeeTy << Op->getSourceRange();
+ return false;
+}
+
+/// diagnoseStringPlusInt - Emit a warning when adding an integer to a string
+/// literal.
+static void diagnoseStringPlusInt(Sema &Self, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ StringLiteral* StrExpr = dyn_cast<StringLiteral>(LHSExpr->IgnoreImpCasts());
+ Expr* IndexExpr = RHSExpr;
+ if (!StrExpr) {
+ StrExpr = dyn_cast<StringLiteral>(RHSExpr->IgnoreImpCasts());
+ IndexExpr = LHSExpr;
+ }
+
+ bool IsStringPlusInt = StrExpr &&
+ IndexExpr->getType()->isIntegralOrUnscopedEnumerationType();
+ if (!IsStringPlusInt)
+ return;
+
+ llvm::APSInt index;
+ if (IndexExpr->EvaluateAsInt(index, Self.getASTContext())) {
+ unsigned StrLenWithNull = StrExpr->getLength() + 1;
+ if (index.isNonNegative() &&
+ index <= llvm::APSInt(llvm::APInt(index.getBitWidth(), StrLenWithNull),
+ index.isUnsigned()))
+ return;
+ }
+
+ SourceRange DiagRange(LHSExpr->getLocStart(), RHSExpr->getLocEnd());
+ Self.Diag(OpLoc, diag::warn_string_plus_int)
+ << DiagRange << IndexExpr->IgnoreImpCasts()->getType();
+
+ // Only print a fixit for "str" + int, not for int + "str".
+ if (IndexExpr == RHSExpr) {
+ SourceLocation EndLoc = Self.PP.getLocForEndOfToken(RHSExpr->getLocEnd());
+ Self.Diag(OpLoc, diag::note_string_plus_int_silence)
+ << FixItHint::CreateInsertion(LHSExpr->getLocStart(), "&")
+ << FixItHint::CreateReplacement(SourceRange(OpLoc), "[")
+ << FixItHint::CreateInsertion(EndLoc, "]");
+ } else
+ Self.Diag(OpLoc, diag::note_string_plus_int_silence);
+}
+
+/// \brief Emit error when two pointers are incompatible.
+static void diagnosePointerIncompatibility(Sema &S, SourceLocation Loc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ assert(LHSExpr->getType()->isAnyPointerType());
+ assert(RHSExpr->getType()->isAnyPointerType());
+ S.Diag(Loc, diag::err_typecheck_sub_ptr_compatible)
+ << LHSExpr->getType() << RHSExpr->getType() << LHSExpr->getSourceRange()
+ << RHSExpr->getSourceRange();
+}
+
+QualType Sema::CheckAdditionOperands( // C99 6.5.6
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc,
+ QualType* CompLHSTy) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ QualType compType = CheckVectorOperands(LHS, RHS, Loc, CompLHSTy);
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ QualType compType = UsualArithmeticConversions(LHS, RHS, CompLHSTy);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ // Diagnose "string literal" '+' int.
+ if (Opc == BO_Add)
+ diagnoseStringPlusInt(*this, Loc, LHS.get(), RHS.get());
+
+ // handle the common case first (both operands are arithmetic).
+ if (LHS.get()->getType()->isArithmeticType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ if (LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ *CompLHSTy = LHS.get()->getType();
+ return compType;
+ }
+
+ // Put any potential pointer into PExp
+ Expr* PExp = LHS.get(), *IExp = RHS.get();
+ if (IExp->getType()->isAnyPointerType())
+ std::swap(PExp, IExp);
+
+ if (!PExp->getType()->isAnyPointerType())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ if (!IExp->getType()->isIntegerType())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ if (!checkArithmeticOpPointerOperand(*this, Loc, PExp))
+ return QualType();
+
+ // Diagnose bad cases where we step over interface counts.
+ if (!checkArithmethicPointerOnNonFragileABI(*this, Loc, PExp))
+ return QualType();
+
+ // Check array bounds for pointer arithemtic
+ CheckArrayAccess(PExp, IExp);
+
+ if (CompLHSTy) {
+ QualType LHSTy = Context.isPromotableBitField(LHS.get());
+ if (LHSTy.isNull()) {
+ LHSTy = LHS.get()->getType();
+ if (LHSTy->isPromotableIntegerType())
+ LHSTy = Context.getPromotedIntegerType(LHSTy);
+ }
+ *CompLHSTy = LHSTy;
+ }
+
+ return PExp->getType();
+}
+
+// C99 6.5.6
+QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ QualType* CompLHSTy) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ QualType compType = CheckVectorOperands(LHS, RHS, Loc, CompLHSTy);
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ QualType compType = UsualArithmeticConversions(LHS, RHS, CompLHSTy);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ // Enforce type constraints: C99 6.5.6p3.
+
+ // Handle the common case first (both operands are arithmetic).
+ if (LHS.get()->getType()->isArithmeticType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ if (CompLHSTy) *CompLHSTy = compType;
+ return compType;
+ }
+
+ if (LHS.get()->getType()->isAtomicType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ *CompLHSTy = LHS.get()->getType();
+ return compType;
+ }
+
+ // Either ptr - int or ptr - ptr.
+ if (LHS.get()->getType()->isAnyPointerType()) {
+ QualType lpointee = LHS.get()->getType()->getPointeeType();
+
+ // Diagnose bad cases where we step over interface counts.
+ if (!checkArithmethicPointerOnNonFragileABI(*this, Loc, LHS.get()))
+ return QualType();
+
+ // The result type of a pointer-int computation is the pointer type.
+ if (RHS.get()->getType()->isIntegerType()) {
+ if (!checkArithmeticOpPointerOperand(*this, Loc, LHS.get()))
+ return QualType();
+
+ // Check array bounds for pointer arithemtic
+ CheckArrayAccess(LHS.get(), RHS.get(), /*ArraySubscriptExpr*/0,
+ /*AllowOnePastEnd*/true, /*IndexNegated*/true);
+
+ if (CompLHSTy) *CompLHSTy = LHS.get()->getType();
+ return LHS.get()->getType();
+ }
+
+ // Handle pointer-pointer subtractions.
+ if (const PointerType *RHSPTy
+ = RHS.get()->getType()->getAs<PointerType>()) {
+ QualType rpointee = RHSPTy->getPointeeType();
+
+ if (getLangOpts().CPlusPlus) {
+ // Pointee types must be the same: C++ [expr.add]
+ if (!Context.hasSameUnqualifiedType(lpointee, rpointee)) {
+ diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get());
+ }
+ } else {
+ // Pointee types must be compatible C99 6.5.6p3
+ if (!Context.typesAreCompatible(
+ Context.getCanonicalType(lpointee).getUnqualifiedType(),
+ Context.getCanonicalType(rpointee).getUnqualifiedType())) {
+ diagnosePointerIncompatibility(*this, Loc, LHS.get(), RHS.get());
+ return QualType();
+ }
+ }
+
+ if (!checkArithmeticBinOpPointerOperands(*this, Loc,
+ LHS.get(), RHS.get()))
+ return QualType();
+
+ if (CompLHSTy) *CompLHSTy = LHS.get()->getType();
+ return Context.getPointerDiffType();
+ }
+ }
+
+ return InvalidOperands(Loc, LHS, RHS);
+}
+
+static bool isScopedEnumerationType(QualType T) {
+ if (const EnumType *ET = dyn_cast<EnumType>(T))
+ return ET->getDecl()->isScoped();
+ return false;
+}
+
+static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, unsigned Opc,
+ QualType LHSType) {
+ llvm::APSInt Right;
+ // Check right/shifter operand
+ if (RHS.get()->isValueDependent() ||
+ !RHS.get()->isIntegerConstantExpr(Right, S.Context))
+ return;
+
+ if (Right.isNegative()) {
+ S.DiagRuntimeBehavior(Loc, RHS.get(),
+ S.PDiag(diag::warn_shift_negative)
+ << RHS.get()->getSourceRange());
+ return;
+ }
+ llvm::APInt LeftBits(Right.getBitWidth(),
+ S.Context.getTypeSize(LHS.get()->getType()));
+ if (Right.uge(LeftBits)) {
+ S.DiagRuntimeBehavior(Loc, RHS.get(),
+ S.PDiag(diag::warn_shift_gt_typewidth)
+ << RHS.get()->getSourceRange());
+ return;
+ }
+ if (Opc != BO_Shl)
+ return;
+
+ // When left shifting an ICE which is signed, we can check for overflow which
+ // according to C++ has undefined behavior ([expr.shift] 5.8/2). Unsigned
+ // integers have defined behavior modulo one more than the maximum value
+ // representable in the result type, so never warn for those.
+ llvm::APSInt Left;
+ if (LHS.get()->isValueDependent() ||
+ !LHS.get()->isIntegerConstantExpr(Left, S.Context) ||
+ LHSType->hasUnsignedIntegerRepresentation())
+ return;
+ llvm::APInt ResultBits =
+ static_cast<llvm::APInt&>(Right) + Left.getMinSignedBits();
+ if (LeftBits.uge(ResultBits))
+ return;
+ llvm::APSInt Result = Left.extend(ResultBits.getLimitedValue());
+ Result = Result.shl(Right);
+
+ // Print the bit representation of the signed integer as an unsigned
+ // hexadecimal number.
+ SmallString<40> HexResult;
+ Result.toString(HexResult, 16, /*Signed =*/false, /*Literal =*/true);
+
+ // If we are only missing a sign bit, this is less likely to result in actual
+ // bugs -- if the result is cast back to an unsigned type, it will have the
+ // expected value. Thus we place this behind a different warning that can be
+ // turned off separately if needed.
+ if (LeftBits == ResultBits - 1) {
+ S.Diag(Loc, diag::warn_shift_result_sets_sign_bit)
+ << HexResult.str() << LHSType
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return;
+ }
+
+ S.Diag(Loc, diag::warn_shift_result_gt_typewidth)
+ << HexResult.str() << Result.getMinSignedBits() << LHSType
+ << Left.getBitWidth() << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+}
+
+// C99 6.5.7
+QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, unsigned Opc,
+ bool IsCompAssign) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ // C99 6.5.7p2: Each of the operands shall have integer type.
+ if (!LHS.get()->getType()->hasIntegerRepresentation() ||
+ !RHS.get()->getType()->hasIntegerRepresentation())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ // C++0x: Don't allow scoped enums. FIXME: Use something better than
+ // hasIntegerRepresentation() above instead of this.
+ if (isScopedEnumerationType(LHS.get()->getType()) ||
+ isScopedEnumerationType(RHS.get()->getType())) {
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ // Vector shifts promote their scalar inputs to vector type.
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign);
+
+ // Shifts don't perform usual arithmetic conversions, they just do integer
+ // promotions on each operand. C99 6.5.7p3
+
+ // For the LHS, do usual unary conversions, but then reset them away
+ // if this is a compound assignment.
+ ExprResult OldLHS = LHS;
+ LHS = UsualUnaryConversions(LHS.take());
+ if (LHS.isInvalid())
+ return QualType();
+ QualType LHSType = LHS.get()->getType();
+ if (IsCompAssign) LHS = OldLHS;
+
+ // The RHS is simpler.
+ RHS = UsualUnaryConversions(RHS.take());
+ if (RHS.isInvalid())
+ return QualType();
+
+ // Sanity-check shift operands
+ DiagnoseBadShiftValues(*this, LHS, RHS, Loc, Opc, LHSType);
+
+ // "The type of the result is that of the promoted left operand."
+ return LHSType;
+}
+
+static bool IsWithinTemplateSpecialization(Decl *D) {
+ if (DeclContext *DC = D->getDeclContext()) {
+ if (isa<ClassTemplateSpecializationDecl>(DC))
+ return true;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC))
+ return FD->isFunctionTemplateSpecialization();
+ }
+ return false;
+}
+
+/// If two different enums are compared, raise a warning.
+static void checkEnumComparison(Sema &S, SourceLocation Loc, ExprResult &LHS,
+ ExprResult &RHS) {
+ QualType LHSStrippedType = LHS.get()->IgnoreParenImpCasts()->getType();
+ QualType RHSStrippedType = RHS.get()->IgnoreParenImpCasts()->getType();
+
+ const EnumType *LHSEnumType = LHSStrippedType->getAs<EnumType>();
+ if (!LHSEnumType)
+ return;
+ const EnumType *RHSEnumType = RHSStrippedType->getAs<EnumType>();
+ if (!RHSEnumType)
+ return;
+
+ // Ignore anonymous enums.
+ if (!LHSEnumType->getDecl()->getIdentifier())
+ return;
+ if (!RHSEnumType->getDecl()->getIdentifier())
+ return;
+
+ if (S.Context.hasSameUnqualifiedType(LHSStrippedType, RHSStrippedType))
+ return;
+
+ S.Diag(Loc, diag::warn_comparison_of_mixed_enum_types)
+ << LHSStrippedType << RHSStrippedType
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+}
+
+/// \brief Diagnose bad pointer comparisons.
+static void diagnoseDistinctPointerComparison(Sema &S, SourceLocation Loc,
+ ExprResult &LHS, ExprResult &RHS,
+ bool IsError) {
+ S.Diag(Loc, IsError ? diag::err_typecheck_comparison_of_distinct_pointers
+ : diag::ext_typecheck_comparison_of_distinct_pointers)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+}
+
+/// \brief Returns false if the pointers are converted to a composite type,
+/// true otherwise.
+static bool convertPointersToCompositeType(Sema &S, SourceLocation Loc,
+ ExprResult &LHS, ExprResult &RHS) {
+ // C++ [expr.rel]p2:
+ // [...] Pointer conversions (4.10) and qualification
+ // conversions (4.4) are performed on pointer operands (or on
+ // a pointer operand and a null pointer constant) to bring
+ // them to their composite pointer type. [...]
+ //
+ // C++ [expr.eq]p1 uses the same notion for (in)equality
+ // comparisons of pointers.
+
+ // C++ [expr.eq]p2:
+ // In addition, pointers to members can be compared, or a pointer to
+ // member and a null pointer constant. Pointer to member conversions
+ // (4.11) and qualification conversions (4.4) are performed to bring
+ // them to a common type. If one operand is a null pointer constant,
+ // the common type is the type of the other operand. Otherwise, the
+ // common type is a pointer to member type similar (4.4) to the type
+ // of one of the operands, with a cv-qualification signature (4.4)
+ // that is the union of the cv-qualification signatures of the operand
+ // types.
+
+ QualType LHSType = LHS.get()->getType();
+ QualType RHSType = RHS.get()->getType();
+ assert((LHSType->isPointerType() && RHSType->isPointerType()) ||
+ (LHSType->isMemberPointerType() && RHSType->isMemberPointerType()));
+
+ bool NonStandardCompositeType = false;
+ bool *BoolPtr = S.isSFINAEContext() ? 0 : &NonStandardCompositeType;
+ QualType T = S.FindCompositePointerType(Loc, LHS, RHS, BoolPtr);
+ if (T.isNull()) {
+ diagnoseDistinctPointerComparison(S, Loc, LHS, RHS, /*isError*/true);
+ return true;
+ }
+
+ if (NonStandardCompositeType)
+ S.Diag(Loc, diag::ext_typecheck_comparison_of_distinct_pointers_nonstandard)
+ << LHSType << RHSType << T << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+
+ LHS = S.ImpCastExprToType(LHS.take(), T, CK_BitCast);
+ RHS = S.ImpCastExprToType(RHS.take(), T, CK_BitCast);
+ return false;
+}
+
+static void diagnoseFunctionPointerToVoidComparison(Sema &S, SourceLocation Loc,
+ ExprResult &LHS,
+ ExprResult &RHS,
+ bool IsError) {
+ S.Diag(Loc, IsError ? diag::err_typecheck_comparison_of_fptr_to_void
+ : diag::ext_typecheck_comparison_of_fptr_to_void)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+}
+
+// C99 6.5.8, C++ [expr.rel]
+QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc, unsigned OpaqueOpc,
+ bool IsRelational) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/true);
+
+ BinaryOperatorKind Opc = (BinaryOperatorKind) OpaqueOpc;
+
+ // Handle vector comparisons separately.
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
+ return CheckVectorCompareOperands(LHS, RHS, Loc, IsRelational);
+
+ QualType LHSType = LHS.get()->getType();
+ QualType RHSType = RHS.get()->getType();
+
+ Expr *LHSStripped = LHS.get()->IgnoreParenImpCasts();
+ Expr *RHSStripped = RHS.get()->IgnoreParenImpCasts();
+
+ checkEnumComparison(*this, Loc, LHS, RHS);
+
+ if (!LHSType->hasFloatingRepresentation() &&
+ !(LHSType->isBlockPointerType() && IsRelational) &&
+ !LHS.get()->getLocStart().isMacroID() &&
+ !RHS.get()->getLocStart().isMacroID()) {
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ //
+ // NOTE: Don't warn about comparison expressions resulting from macro
+ // expansion. Also don't warn about comparisons which are only self
+ // comparisons within a template specialization. The warnings should catch
+ // obvious cases in the definition of the template anyways. The idea is to
+ // warn when the typed comparison operator will always evaluate to the same
+ // result.
+ if (DeclRefExpr* DRL = dyn_cast<DeclRefExpr>(LHSStripped)) {
+ if (DeclRefExpr* DRR = dyn_cast<DeclRefExpr>(RHSStripped)) {
+ if (DRL->getDecl() == DRR->getDecl() &&
+ !IsWithinTemplateSpecialization(DRL->getDecl())) {
+ DiagRuntimeBehavior(Loc, 0, PDiag(diag::warn_comparison_always)
+ << 0 // self-
+ << (Opc == BO_EQ
+ || Opc == BO_LE
+ || Opc == BO_GE));
+ } else if (LHSType->isArrayType() && RHSType->isArrayType() &&
+ !DRL->getDecl()->getType()->isReferenceType() &&
+ !DRR->getDecl()->getType()->isReferenceType()) {
+ // what is it always going to eval to?
+ char always_evals_to;
+ switch(Opc) {
+ case BO_EQ: // e.g. array1 == array2
+ always_evals_to = 0; // false
+ break;
+ case BO_NE: // e.g. array1 != array2
+ always_evals_to = 1; // true
+ break;
+ default:
+ // best we can say is 'a constant'
+ always_evals_to = 2; // e.g. array1 <= array2
+ break;
+ }
+ DiagRuntimeBehavior(Loc, 0, PDiag(diag::warn_comparison_always)
+ << 1 // array
+ << always_evals_to);
+ }
+ }
+ }
+
+ if (isa<CastExpr>(LHSStripped))
+ LHSStripped = LHSStripped->IgnoreParenCasts();
+ if (isa<CastExpr>(RHSStripped))
+ RHSStripped = RHSStripped->IgnoreParenCasts();
+
+ // Warn about comparisons against a string constant (unless the other
+ // operand is null), the user probably wants strcmp.
+ Expr *literalString = 0;
+ Expr *literalStringStripped = 0;
+ if ((isa<StringLiteral>(LHSStripped) || isa<ObjCEncodeExpr>(LHSStripped)) &&
+ !RHSStripped->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ literalString = LHS.get();
+ literalStringStripped = LHSStripped;
+ } else if ((isa<StringLiteral>(RHSStripped) ||
+ isa<ObjCEncodeExpr>(RHSStripped)) &&
+ !LHSStripped->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull)) {
+ literalString = RHS.get();
+ literalStringStripped = RHSStripped;
+ }
+
+ if (literalString) {
+ std::string resultComparison;
+ switch (Opc) {
+ case BO_LT: resultComparison = ") < 0"; break;
+ case BO_GT: resultComparison = ") > 0"; break;
+ case BO_LE: resultComparison = ") <= 0"; break;
+ case BO_GE: resultComparison = ") >= 0"; break;
+ case BO_EQ: resultComparison = ") == 0"; break;
+ case BO_NE: resultComparison = ") != 0"; break;
+ default: llvm_unreachable("Invalid comparison operator");
+ }
+
+ DiagRuntimeBehavior(Loc, 0,
+ PDiag(diag::warn_stringcompare)
+ << isa<ObjCEncodeExpr>(literalStringStripped)
+ << literalString->getSourceRange());
+ }
+ }
+
+ // C99 6.5.8p3 / C99 6.5.9p4
+ if (LHS.get()->getType()->isArithmeticType() &&
+ RHS.get()->getType()->isArithmeticType()) {
+ UsualArithmeticConversions(LHS, RHS);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ }
+ else {
+ LHS = UsualUnaryConversions(LHS.take());
+ if (LHS.isInvalid())
+ return QualType();
+
+ RHS = UsualUnaryConversions(RHS.take());
+ if (RHS.isInvalid())
+ return QualType();
+ }
+
+ LHSType = LHS.get()->getType();
+ RHSType = RHS.get()->getType();
+
+ // The result of comparisons is 'bool' in C++, 'int' in C.
+ QualType ResultTy = Context.getLogicalOperationType();
+
+ if (IsRelational) {
+ if (LHSType->isRealType() && RHSType->isRealType())
+ return ResultTy;
+ } else {
+ // Check for comparisons of floating point operands using != and ==.
+ if (LHSType->hasFloatingRepresentation())
+ CheckFloatComparison(Loc, LHS.get(), RHS.get());
+
+ if (LHSType->isArithmeticType() && RHSType->isArithmeticType())
+ return ResultTy;
+ }
+
+ bool LHSIsNull = LHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull);
+ bool RHSIsNull = RHS.get()->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull);
+
+ // All of the following pointer-related warnings are GCC extensions, except
+ // when handling null pointer constants.
+ if (LHSType->isPointerType() && RHSType->isPointerType()) { // C99 6.5.8p2
+ QualType LCanPointeeTy =
+ LHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
+ QualType RCanPointeeTy =
+ RHSType->castAs<PointerType>()->getPointeeType().getCanonicalType();
+
+ if (getLangOpts().CPlusPlus) {
+ if (LCanPointeeTy == RCanPointeeTy)
+ return ResultTy;
+ if (!IsRelational &&
+ (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
+ // Valid unless comparison between non-null pointer and function pointer
+ // This is a gcc extension compatibility comparison.
+ // In a SFINAE context, we treat this as a hard error to maintain
+ // conformance with the C++ standard.
+ if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType())
+ && !LHSIsNull && !RHSIsNull) {
+ diagnoseFunctionPointerToVoidComparison(
+ *this, Loc, LHS, RHS, /*isError*/ isSFINAEContext());
+
+ if (isSFINAEContext())
+ return QualType();
+
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
+ return ResultTy;
+ }
+ }
+
+ if (convertPointersToCompositeType(*this, Loc, LHS, RHS))
+ return QualType();
+ else
+ return ResultTy;
+ }
+ // C99 6.5.9p2 and C99 6.5.8p2
+ if (Context.typesAreCompatible(LCanPointeeTy.getUnqualifiedType(),
+ RCanPointeeTy.getUnqualifiedType())) {
+ // Valid unless a relational comparison of function pointers
+ if (IsRelational && LCanPointeeTy->isFunctionType()) {
+ Diag(Loc, diag::ext_typecheck_ordered_comparison_of_function_pointers)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ }
+ } else if (!IsRelational &&
+ (LCanPointeeTy->isVoidType() || RCanPointeeTy->isVoidType())) {
+ // Valid unless comparison between non-null pointer and function pointer
+ if ((LCanPointeeTy->isFunctionType() || RCanPointeeTy->isFunctionType())
+ && !LHSIsNull && !RHSIsNull)
+ diagnoseFunctionPointerToVoidComparison(*this, Loc, LHS, RHS,
+ /*isError*/false);
+ } else {
+ // Invalid
+ diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS, /*isError*/false);
+ }
+ if (LCanPointeeTy != RCanPointeeTy) {
+ if (LHSIsNull && !RHSIsNull)
+ LHS = ImpCastExprToType(LHS.take(), RHSType, CK_BitCast);
+ else
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
+ }
+ return ResultTy;
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // Comparison of nullptr_t with itself.
+ if (LHSType->isNullPtrType() && RHSType->isNullPtrType())
+ return ResultTy;
+
+ // Comparison of pointers with null pointer constants and equality
+ // comparisons of member pointers to null pointer constants.
+ if (RHSIsNull &&
+ ((LHSType->isAnyPointerType() || LHSType->isNullPtrType()) ||
+ (!IsRelational &&
+ (LHSType->isMemberPointerType() || LHSType->isBlockPointerType())))) {
+ RHS = ImpCastExprToType(RHS.take(), LHSType,
+ LHSType->isMemberPointerType()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer);
+ return ResultTy;
+ }
+ if (LHSIsNull &&
+ ((RHSType->isAnyPointerType() || RHSType->isNullPtrType()) ||
+ (!IsRelational &&
+ (RHSType->isMemberPointerType() || RHSType->isBlockPointerType())))) {
+ LHS = ImpCastExprToType(LHS.take(), RHSType,
+ RHSType->isMemberPointerType()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer);
+ return ResultTy;
+ }
+
+ // Comparison of member pointers.
+ if (!IsRelational &&
+ LHSType->isMemberPointerType() && RHSType->isMemberPointerType()) {
+ if (convertPointersToCompositeType(*this, Loc, LHS, RHS))
+ return QualType();
+ else
+ return ResultTy;
+ }
+
+ // Handle scoped enumeration types specifically, since they don't promote
+ // to integers.
+ if (LHS.get()->getType()->isEnumeralType() &&
+ Context.hasSameUnqualifiedType(LHS.get()->getType(),
+ RHS.get()->getType()))
+ return ResultTy;
+ }
+
+ // Handle block pointer types.
+ if (!IsRelational && LHSType->isBlockPointerType() &&
+ RHSType->isBlockPointerType()) {
+ QualType lpointee = LHSType->castAs<BlockPointerType>()->getPointeeType();
+ QualType rpointee = RHSType->castAs<BlockPointerType>()->getPointeeType();
+
+ if (!LHSIsNull && !RHSIsNull &&
+ !Context.typesAreCompatible(lpointee, rpointee)) {
+ Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ }
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
+ return ResultTy;
+ }
+
+ // Allow block pointers to be compared with null pointer constants.
+ if (!IsRelational
+ && ((LHSType->isBlockPointerType() && RHSType->isPointerType())
+ || (LHSType->isPointerType() && RHSType->isBlockPointerType()))) {
+ if (!LHSIsNull && !RHSIsNull) {
+ if (!((RHSType->isPointerType() && RHSType->castAs<PointerType>()
+ ->getPointeeType()->isVoidType())
+ || (LHSType->isPointerType() && LHSType->castAs<PointerType>()
+ ->getPointeeType()->isVoidType())))
+ Diag(Loc, diag::err_typecheck_comparison_of_distinct_blocks)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ }
+ if (LHSIsNull && !RHSIsNull)
+ LHS = ImpCastExprToType(LHS.take(), RHSType,
+ RHSType->isPointerType() ? CK_BitCast
+ : CK_AnyPointerToBlockPointerCast);
+ else
+ RHS = ImpCastExprToType(RHS.take(), LHSType,
+ LHSType->isPointerType() ? CK_BitCast
+ : CK_AnyPointerToBlockPointerCast);
+ return ResultTy;
+ }
+
+ if (LHSType->isObjCObjectPointerType() ||
+ RHSType->isObjCObjectPointerType()) {
+ const PointerType *LPT = LHSType->getAs<PointerType>();
+ const PointerType *RPT = RHSType->getAs<PointerType>();
+ if (LPT || RPT) {
+ bool LPtrToVoid = LPT ? LPT->getPointeeType()->isVoidType() : false;
+ bool RPtrToVoid = RPT ? RPT->getPointeeType()->isVoidType() : false;
+
+ if (!LPtrToVoid && !RPtrToVoid &&
+ !Context.typesAreCompatible(LHSType, RHSType)) {
+ diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS,
+ /*isError*/false);
+ }
+ if (LHSIsNull && !RHSIsNull)
+ LHS = ImpCastExprToType(LHS.take(), RHSType,
+ RPT ? CK_BitCast :CK_CPointerToObjCPointerCast);
+ else
+ RHS = ImpCastExprToType(RHS.take(), LHSType,
+ LPT ? CK_BitCast :CK_CPointerToObjCPointerCast);
+ return ResultTy;
+ }
+ if (LHSType->isObjCObjectPointerType() &&
+ RHSType->isObjCObjectPointerType()) {
+ if (!Context.areComparableObjCPointerTypes(LHSType, RHSType))
+ diagnoseDistinctPointerComparison(*this, Loc, LHS, RHS,
+ /*isError*/false);
+ if (LHSIsNull && !RHSIsNull)
+ LHS = ImpCastExprToType(LHS.take(), RHSType, CK_BitCast);
+ else
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_BitCast);
+ return ResultTy;
+ }
+ }
+ if ((LHSType->isAnyPointerType() && RHSType->isIntegerType()) ||
+ (LHSType->isIntegerType() && RHSType->isAnyPointerType())) {
+ unsigned DiagID = 0;
+ bool isError = false;
+ if ((LHSIsNull && LHSType->isIntegerType()) ||
+ (RHSIsNull && RHSType->isIntegerType())) {
+ if (IsRelational && !getLangOpts().CPlusPlus)
+ DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_and_zero;
+ } else if (IsRelational && !getLangOpts().CPlusPlus)
+ DiagID = diag::ext_typecheck_ordered_comparison_of_pointer_integer;
+ else if (getLangOpts().CPlusPlus) {
+ DiagID = diag::err_typecheck_comparison_of_pointer_integer;
+ isError = true;
+ } else
+ DiagID = diag::ext_typecheck_comparison_of_pointer_integer;
+
+ if (DiagID) {
+ Diag(Loc, DiagID)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ if (isError)
+ return QualType();
+ }
+
+ if (LHSType->isIntegerType())
+ LHS = ImpCastExprToType(LHS.take(), RHSType,
+ LHSIsNull ? CK_NullToPointer : CK_IntegralToPointer);
+ else
+ RHS = ImpCastExprToType(RHS.take(), LHSType,
+ RHSIsNull ? CK_NullToPointer : CK_IntegralToPointer);
+ return ResultTy;
+ }
+
+ // Handle block pointers.
+ if (!IsRelational && RHSIsNull
+ && LHSType->isBlockPointerType() && RHSType->isIntegerType()) {
+ RHS = ImpCastExprToType(RHS.take(), LHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+ if (!IsRelational && LHSIsNull
+ && LHSType->isIntegerType() && RHSType->isBlockPointerType()) {
+ LHS = ImpCastExprToType(LHS.take(), RHSType, CK_NullToPointer);
+ return ResultTy;
+ }
+
+ return InvalidOperands(Loc, LHS, RHS);
+}
+
+
+// Return a signed type that is of identical size and number of elements.
+// For floating point vectors, return an integer type of identical size
+// and number of elements.
+QualType Sema::GetSignedVectorType(QualType V) {
+ const VectorType *VTy = V->getAs<VectorType>();
+ unsigned TypeSize = Context.getTypeSize(VTy->getElementType());
+ if (TypeSize == Context.getTypeSize(Context.CharTy))
+ return Context.getExtVectorType(Context.CharTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.ShortTy))
+ return Context.getExtVectorType(Context.ShortTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.IntTy))
+ return Context.getExtVectorType(Context.IntTy, VTy->getNumElements());
+ else if (TypeSize == Context.getTypeSize(Context.LongTy))
+ return Context.getExtVectorType(Context.LongTy, VTy->getNumElements());
+ assert(TypeSize == Context.getTypeSize(Context.LongLongTy) &&
+ "Unhandled vector element size in vector compare");
+ return Context.getExtVectorType(Context.LongLongTy, VTy->getNumElements());
+}
+
+/// CheckVectorCompareOperands - vector comparisons are a clang extension that
+/// operates on extended vector types. Instead of producing an IntTy result,
+/// like a scalar comparison, a vector comparison produces a vector of integer
+/// types.
+QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsRelational) {
+ // Check to make sure we're operating on vectors of the same type and width,
+ // Allowing one side to be a scalar of element type.
+ QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false);
+ if (vType.isNull())
+ return vType;
+
+ QualType LHSType = LHS.get()->getType();
+
+ // If AltiVec, the comparison results in a numeric type, i.e.
+ // bool for C++, int for C
+ if (vType->getAs<VectorType>()->getVectorKind() == VectorType::AltiVecVector)
+ return Context.getLogicalOperationType();
+
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ if (!LHSType->hasFloatingRepresentation()) {
+ if (DeclRefExpr* DRL
+ = dyn_cast<DeclRefExpr>(LHS.get()->IgnoreParenImpCasts()))
+ if (DeclRefExpr* DRR
+ = dyn_cast<DeclRefExpr>(RHS.get()->IgnoreParenImpCasts()))
+ if (DRL->getDecl() == DRR->getDecl())
+ DiagRuntimeBehavior(Loc, 0,
+ PDiag(diag::warn_comparison_always)
+ << 0 // self-
+ << 2 // "a constant"
+ );
+ }
+
+ // Check for comparisons of floating point operands using != and ==.
+ if (!IsRelational && LHSType->hasFloatingRepresentation()) {
+ assert (RHS.get()->getType()->hasFloatingRepresentation());
+ CheckFloatComparison(Loc, LHS.get(), RHS.get());
+ }
+
+ // Return a signed type for the vector.
+ return GetSignedVectorType(LHSType);
+}
+
+QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc) {
+ // Ensure that either both operands are of the same vector type, or
+ // one operand is of a vector type and the other is of its element type.
+ QualType vType = CheckVectorOperands(LHS, RHS, Loc, false);
+ if (vType.isNull() || vType->isFloatingType())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ return GetSignedVectorType(LHS.get()->getType());
+}
+
+inline QualType Sema::CheckBitwiseOperands(
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign) {
+ checkArithmeticNull(*this, LHS, RHS, Loc, /*isCompare=*/false);
+
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign);
+
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ ExprResult LHSResult = Owned(LHS), RHSResult = Owned(RHS);
+ QualType compType = UsualArithmeticConversions(LHSResult, RHSResult,
+ IsCompAssign);
+ if (LHSResult.isInvalid() || RHSResult.isInvalid())
+ return QualType();
+ LHS = LHSResult.take();
+ RHS = RHSResult.take();
+
+ if (LHS.get()->getType()->isIntegralOrUnscopedEnumerationType() &&
+ RHS.get()->getType()->isIntegralOrUnscopedEnumerationType())
+ return compType;
+ return InvalidOperands(Loc, LHS, RHS);
+}
+
+inline QualType Sema::CheckLogicalOperands( // C99 6.5.[13,14]
+ ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc) {
+
+ // Check vector operands differently.
+ if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType())
+ return CheckVectorLogicalOperands(LHS, RHS, Loc);
+
+ // Diagnose cases where the user write a logical and/or but probably meant a
+ // bitwise one. We do this when the LHS is a non-bool integer and the RHS
+ // is a constant.
+ if (LHS.get()->getType()->isIntegerType() &&
+ !LHS.get()->getType()->isBooleanType() &&
+ RHS.get()->getType()->isIntegerType() && !RHS.get()->isValueDependent() &&
+ // Don't warn in macros or template instantiations.
+ !Loc.isMacroID() && ActiveTemplateInstantiations.empty()) {
+ // If the RHS can be constant folded, and if it constant folds to something
+ // that isn't 0 or 1 (which indicate a potential logical operation that
+ // happened to fold to true/false) then warn.
+ // Parens on the RHS are ignored.
+ llvm::APSInt Result;
+ if (RHS.get()->EvaluateAsInt(Result, Context))
+ if ((getLangOpts().Bool && !RHS.get()->getType()->isBooleanType()) ||
+ (Result != 0 && Result != 1)) {
+ Diag(Loc, diag::warn_logical_instead_of_bitwise)
+ << RHS.get()->getSourceRange()
+ << (Opc == BO_LAnd ? "&&" : "||");
+ // Suggest replacing the logical operator with the bitwise version
+ Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator)
+ << (Opc == BO_LAnd ? "&" : "|")
+ << FixItHint::CreateReplacement(SourceRange(
+ Loc, Lexer::getLocForEndOfToken(Loc, 0, getSourceManager(),
+ getLangOpts())),
+ Opc == BO_LAnd ? "&" : "|");
+ if (Opc == BO_LAnd)
+ // Suggest replacing "Foo() && kNonZero" with "Foo()"
+ Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
+ << FixItHint::CreateRemoval(
+ SourceRange(
+ Lexer::getLocForEndOfToken(LHS.get()->getLocEnd(),
+ 0, getSourceManager(),
+ getLangOpts()),
+ RHS.get()->getLocEnd()));
+ }
+ }
+
+ if (!Context.getLangOpts().CPlusPlus) {
+ LHS = UsualUnaryConversions(LHS.take());
+ if (LHS.isInvalid())
+ return QualType();
+
+ RHS = UsualUnaryConversions(RHS.take());
+ if (RHS.isInvalid())
+ return QualType();
+
+ if (!LHS.get()->getType()->isScalarType() ||
+ !RHS.get()->getType()->isScalarType())
+ return InvalidOperands(Loc, LHS, RHS);
+
+ return Context.IntTy;
+ }
+
+ // The following is safe because we only use this method for
+ // non-overloadable operands.
+
+ // C++ [expr.log.and]p1
+ // C++ [expr.log.or]p1
+ // The operands are both contextually converted to type bool.
+ ExprResult LHSRes = PerformContextuallyConvertToBool(LHS.get());
+ if (LHSRes.isInvalid())
+ return InvalidOperands(Loc, LHS, RHS);
+ LHS = move(LHSRes);
+
+ ExprResult RHSRes = PerformContextuallyConvertToBool(RHS.get());
+ if (RHSRes.isInvalid())
+ return InvalidOperands(Loc, LHS, RHS);
+ RHS = move(RHSRes);
+
+ // C++ [expr.log.and]p2
+ // C++ [expr.log.or]p2
+ // The result is a bool.
+ return Context.BoolTy;
+}
+
+/// IsReadonlyProperty - Verify that otherwise a valid l-value expression
+/// is a read-only property; return true if so. A readonly property expression
+/// depends on various declarations and thus must be treated specially.
+///
+static bool IsReadonlyProperty(Expr *E, Sema &S) {
+ const ObjCPropertyRefExpr *PropExpr = dyn_cast<ObjCPropertyRefExpr>(E);
+ if (!PropExpr) return false;
+ if (PropExpr->isImplicitProperty()) return false;
+
+ ObjCPropertyDecl *PDecl = PropExpr->getExplicitProperty();
+ QualType BaseType = PropExpr->isSuperReceiver() ?
+ PropExpr->getSuperReceiverType() :
+ PropExpr->getBase()->getType();
+
+ if (const ObjCObjectPointerType *OPT =
+ BaseType->getAsObjCInterfacePointerType())
+ if (ObjCInterfaceDecl *IFace = OPT->getInterfaceDecl())
+ if (S.isPropertyReadonly(PDecl, IFace))
+ return true;
+ return false;
+}
+
+static bool IsReadonlyMessage(Expr *E, Sema &S) {
+ const MemberExpr *ME = dyn_cast<MemberExpr>(E);
+ if (!ME) return false;
+ if (!isa<FieldDecl>(ME->getMemberDecl())) return false;
+ ObjCMessageExpr *Base =
+ dyn_cast<ObjCMessageExpr>(ME->getBase()->IgnoreParenImpCasts());
+ if (!Base) return false;
+ return Base->getMethodDecl() != 0;
+}
+
+/// Is the given expression (which must be 'const') a reference to a
+/// variable which was originally non-const, but which has become
+/// 'const' due to being captured within a block?
+enum NonConstCaptureKind { NCCK_None, NCCK_Block, NCCK_Lambda };
+static NonConstCaptureKind isReferenceToNonConstCapture(Sema &S, Expr *E) {
+ assert(E->isLValue() && E->getType().isConstQualified());
+ E = E->IgnoreParens();
+
+ // Must be a reference to a declaration from an enclosing scope.
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE) return NCCK_None;
+ if (!DRE->refersToEnclosingLocal()) return NCCK_None;
+
+ // The declaration must be a variable which is not declared 'const'.
+ VarDecl *var = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!var) return NCCK_None;
+ if (var->getType().isConstQualified()) return NCCK_None;
+ assert(var->hasLocalStorage() && "capture added 'const' to non-local?");
+
+ // Decide whether the first capture was for a block or a lambda.
+ DeclContext *DC = S.CurContext;
+ while (DC->getParent() != var->getDeclContext())
+ DC = DC->getParent();
+ return (isa<BlockDecl>(DC) ? NCCK_Block : NCCK_Lambda);
+}
+
+/// CheckForModifiableLvalue - Verify that E is a modifiable lvalue. If not,
+/// emit an error and return true. If so, return false.
+static bool CheckForModifiableLvalue(Expr *E, SourceLocation Loc, Sema &S) {
+ assert(!E->hasPlaceholderType(BuiltinType::PseudoObject));
+ SourceLocation OrigLoc = Loc;
+ Expr::isModifiableLvalueResult IsLV = E->isModifiableLvalue(S.Context,
+ &Loc);
+ if (IsLV == Expr::MLV_Valid && IsReadonlyProperty(E, S))
+ IsLV = Expr::MLV_ReadonlyProperty;
+ else if (IsLV == Expr::MLV_ClassTemporary && IsReadonlyMessage(E, S))
+ IsLV = Expr::MLV_InvalidMessageExpression;
+ if (IsLV == Expr::MLV_Valid)
+ return false;
+
+ unsigned Diag = 0;
+ bool NeedType = false;
+ switch (IsLV) { // C99 6.5.16p2
+ case Expr::MLV_ConstQualified:
+ Diag = diag::err_typecheck_assign_const;
+
+ // Use a specialized diagnostic when we're assigning to an object
+ // from an enclosing function or block.
+ if (NonConstCaptureKind NCCK = isReferenceToNonConstCapture(S, E)) {
+ if (NCCK == NCCK_Block)
+ Diag = diag::err_block_decl_ref_not_modifiable_lvalue;
+ else
+ Diag = diag::err_lambda_decl_ref_not_modifiable_lvalue;
+ break;
+ }
+
+ // In ARC, use some specialized diagnostics for occasions where we
+ // infer 'const'. These are always pseudo-strong variables.
+ if (S.getLangOpts().ObjCAutoRefCount) {
+ DeclRefExpr *declRef = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
+ if (declRef && isa<VarDecl>(declRef->getDecl())) {
+ VarDecl *var = cast<VarDecl>(declRef->getDecl());
+
+ // Use the normal diagnostic if it's pseudo-__strong but the
+ // user actually wrote 'const'.
+ if (var->isARCPseudoStrong() &&
+ (!var->getTypeSourceInfo() ||
+ !var->getTypeSourceInfo()->getType().isConstQualified())) {
+ // There are two pseudo-strong cases:
+ // - self
+ ObjCMethodDecl *method = S.getCurMethodDecl();
+ if (method && var == method->getSelfDecl())
+ Diag = method->isClassMethod()
+ ? diag::err_typecheck_arc_assign_self_class_method
+ : diag::err_typecheck_arc_assign_self;
+
+ // - fast enumeration variables
+ else
+ Diag = diag::err_typecheck_arr_assign_enumeration;
+
+ SourceRange Assign;
+ if (Loc != OrigLoc)
+ Assign = SourceRange(OrigLoc, OrigLoc);
+ S.Diag(Loc, Diag) << E->getSourceRange() << Assign;
+ // We need to preserve the AST regardless, so migration tool
+ // can do its job.
+ return false;
+ }
+ }
+ }
+
+ break;
+ case Expr::MLV_ArrayType:
+ Diag = diag::err_typecheck_array_not_modifiable_lvalue;
+ NeedType = true;
+ break;
+ case Expr::MLV_NotObjectType:
+ Diag = diag::err_typecheck_non_object_not_modifiable_lvalue;
+ NeedType = true;
+ break;
+ case Expr::MLV_LValueCast:
+ Diag = diag::err_typecheck_lvalue_casts_not_supported;
+ break;
+ case Expr::MLV_Valid:
+ llvm_unreachable("did not take early return for MLV_Valid");
+ case Expr::MLV_InvalidExpression:
+ case Expr::MLV_MemberFunction:
+ case Expr::MLV_ClassTemporary:
+ Diag = diag::err_typecheck_expression_not_modifiable_lvalue;
+ break;
+ case Expr::MLV_IncompleteType:
+ case Expr::MLV_IncompleteVoidType:
+ return S.RequireCompleteType(Loc, E->getType(),
+ S.PDiag(diag::err_typecheck_incomplete_type_not_modifiable_lvalue)
+ << E->getSourceRange());
+ case Expr::MLV_DuplicateVectorComponents:
+ Diag = diag::err_typecheck_duplicate_vector_components_not_mlvalue;
+ break;
+ case Expr::MLV_ReadonlyProperty:
+ case Expr::MLV_NoSetterProperty:
+ llvm_unreachable("readonly properties should be processed differently");
+ case Expr::MLV_InvalidMessageExpression:
+ Diag = diag::error_readonly_message_assignment;
+ break;
+ case Expr::MLV_SubObjCPropertySetting:
+ Diag = diag::error_no_subobject_property_setting;
+ break;
+ }
+
+ SourceRange Assign;
+ if (Loc != OrigLoc)
+ Assign = SourceRange(OrigLoc, OrigLoc);
+ if (NeedType)
+ S.Diag(Loc, Diag) << E->getType() << E->getSourceRange() << Assign;
+ else
+ S.Diag(Loc, Diag) << E->getSourceRange() << Assign;
+ return true;
+}
+
+
+
+// C99 6.5.16.1
+QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
+ SourceLocation Loc,
+ QualType CompoundType) {
+ assert(!LHSExpr->hasPlaceholderType(BuiltinType::PseudoObject));
+
+ // Verify that LHS is a modifiable lvalue, and emit error if not.
+ if (CheckForModifiableLvalue(LHSExpr, Loc, *this))
+ return QualType();
+
+ QualType LHSType = LHSExpr->getType();
+ QualType RHSType = CompoundType.isNull() ? RHS.get()->getType() :
+ CompoundType;
+ AssignConvertType ConvTy;
+ if (CompoundType.isNull()) {
+ QualType LHSTy(LHSType);
+ ConvTy = CheckSingleAssignmentConstraints(LHSTy, RHS);
+ if (RHS.isInvalid())
+ return QualType();
+ // Special case of NSObject attributes on c-style pointer types.
+ if (ConvTy == IncompatiblePointer &&
+ ((Context.isObjCNSObjectType(LHSType) &&
+ RHSType->isObjCObjectPointerType()) ||
+ (Context.isObjCNSObjectType(RHSType) &&
+ LHSType->isObjCObjectPointerType())))
+ ConvTy = Compatible;
+
+ if (ConvTy == Compatible &&
+ LHSType->isObjCObjectType())
+ Diag(Loc, diag::err_objc_object_assignment)
+ << LHSType;
+
+ // If the RHS is a unary plus or minus, check to see if they = and + are
+ // right next to each other. If so, the user may have typo'd "x =+ 4"
+ // instead of "x += 4".
+ Expr *RHSCheck = RHS.get();
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(RHSCheck))
+ RHSCheck = ICE->getSubExpr();
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(RHSCheck)) {
+ if ((UO->getOpcode() == UO_Plus ||
+ UO->getOpcode() == UO_Minus) &&
+ Loc.isFileID() && UO->getOperatorLoc().isFileID() &&
+ // Only if the two operators are exactly adjacent.
+ Loc.getLocWithOffset(1) == UO->getOperatorLoc() &&
+ // And there is a space or other character before the subexpr of the
+ // unary +/-. We don't want to warn on "x=-1".
+ Loc.getLocWithOffset(2) != UO->getSubExpr()->getLocStart() &&
+ UO->getSubExpr()->getLocStart().isFileID()) {
+ Diag(Loc, diag::warn_not_compound_assign)
+ << (UO->getOpcode() == UO_Plus ? "+" : "-")
+ << SourceRange(UO->getOperatorLoc(), UO->getOperatorLoc());
+ }
+ }
+
+ if (ConvTy == Compatible) {
+ if (LHSType.getObjCLifetime() == Qualifiers::OCL_Strong)
+ checkRetainCycles(LHSExpr, RHS.get());
+ else if (getLangOpts().ObjCAutoRefCount)
+ checkUnsafeExprAssigns(Loc, LHSExpr, RHS.get());
+ }
+ } else {
+ // Compound assignment "x += y"
+ ConvTy = CheckAssignmentConstraints(Loc, LHSType, RHSType);
+ }
+
+ if (DiagnoseAssignmentResult(ConvTy, Loc, LHSType, RHSType,
+ RHS.get(), AA_Assigning))
+ return QualType();
+
+ CheckForNullPointerDereference(*this, LHSExpr);
+
+ // C99 6.5.16p3: The type of an assignment expression is the type of the
+ // left operand unless the left operand has qualified type, in which case
+ // it is the unqualified version of the type of the left operand.
+ // C99 6.5.16.1p2: In simple assignment, the value of the right operand
+ // is converted to the type of the assignment expression (above).
+ // C++ 5.17p1: the type of the assignment expression is that of its left
+ // operand.
+ return (getLangOpts().CPlusPlus
+ ? LHSType : LHSType.getUnqualifiedType());
+}
+
+// C99 6.5.17
+static QualType CheckCommaOperands(Sema &S, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc) {
+ S.DiagnoseUnusedExprResult(LHS.get());
+
+ LHS = S.CheckPlaceholderExpr(LHS.take());
+ RHS = S.CheckPlaceholderExpr(RHS.take());
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+
+ // C's comma performs lvalue conversion (C99 6.3.2.1) on both its
+ // operands, but not unary promotions.
+ // C++'s comma does not do any conversions at all (C++ [expr.comma]p1).
+
+ // So we treat the LHS as a ignored value, and in C++ we allow the
+ // containing site to determine what should be done with the RHS.
+ LHS = S.IgnoredValueConversions(LHS.take());
+ if (LHS.isInvalid())
+ return QualType();
+
+ if (!S.getLangOpts().CPlusPlus) {
+ RHS = S.DefaultFunctionArrayLvalueConversion(RHS.take());
+ if (RHS.isInvalid())
+ return QualType();
+ if (!RHS.get()->getType()->isVoidType())
+ S.RequireCompleteType(Loc, RHS.get()->getType(),
+ diag::err_incomplete_type);
+ }
+
+ return RHS.get()->getType();
+}
+
+/// CheckIncrementDecrementOperand - unlike most "Check" methods, this routine
+/// doesn't need to call UsualUnaryConversions or UsualArithmeticConversions.
+static QualType CheckIncrementDecrementOperand(Sema &S, Expr *Op,
+ ExprValueKind &VK,
+ SourceLocation OpLoc,
+ bool IsInc, bool IsPrefix) {
+ if (Op->isTypeDependent())
+ return S.Context.DependentTy;
+
+ QualType ResType = Op->getType();
+ // Atomic types can be used for increment / decrement where the non-atomic
+ // versions can, so ignore the _Atomic() specifier for the purpose of
+ // checking.
+ if (const AtomicType *ResAtomicType = ResType->getAs<AtomicType>())
+ ResType = ResAtomicType->getValueType();
+
+ assert(!ResType.isNull() && "no type for increment/decrement expression");
+
+ if (S.getLangOpts().CPlusPlus && ResType->isBooleanType()) {
+ // Decrement of bool is not allowed.
+ if (!IsInc) {
+ S.Diag(OpLoc, diag::err_decrement_bool) << Op->getSourceRange();
+ return QualType();
+ }
+ // Increment of bool sets it to true, but is deprecated.
+ S.Diag(OpLoc, diag::warn_increment_bool) << Op->getSourceRange();
+ } else if (ResType->isRealType()) {
+ // OK!
+ } else if (ResType->isAnyPointerType()) {
+ // C99 6.5.2.4p2, 6.5.6p2
+ if (!checkArithmeticOpPointerOperand(S, OpLoc, Op))
+ return QualType();
+
+ // Diagnose bad cases where we step over interface counts.
+ else if (!checkArithmethicPointerOnNonFragileABI(S, OpLoc, Op))
+ return QualType();
+ } else if (ResType->isAnyComplexType()) {
+ // C99 does not support ++/-- on complex types, we allow as an extension.
+ S.Diag(OpLoc, diag::ext_integer_increment_complex)
+ << ResType << Op->getSourceRange();
+ } else if (ResType->isPlaceholderType()) {
+ ExprResult PR = S.CheckPlaceholderExpr(Op);
+ if (PR.isInvalid()) return QualType();
+ return CheckIncrementDecrementOperand(S, PR.take(), VK, OpLoc,
+ IsInc, IsPrefix);
+ } else if (S.getLangOpts().AltiVec && ResType->isVectorType()) {
+ // OK! ( C/C++ Language Extensions for CBEA(Version 2.6) 10.3 )
+ } else {
+ S.Diag(OpLoc, diag::err_typecheck_illegal_increment_decrement)
+ << ResType << int(IsInc) << Op->getSourceRange();
+ return QualType();
+ }
+ // At this point, we know we have a real, complex or pointer type.
+ // Now make sure the operand is a modifiable lvalue.
+ if (CheckForModifiableLvalue(Op, OpLoc, S))
+ return QualType();
+ // In C++, a prefix increment is the same type as the operand. Otherwise
+ // (in C or with postfix), the increment is the unqualified type of the
+ // operand.
+ if (IsPrefix && S.getLangOpts().CPlusPlus) {
+ VK = VK_LValue;
+ return ResType;
+ } else {
+ VK = VK_RValue;
+ return ResType.getUnqualifiedType();
+ }
+}
+
+
+/// getPrimaryDecl - Helper function for CheckAddressOfOperand().
+/// This routine allows us to typecheck complex/recursive expressions
+/// where the declaration is needed for type checking. We only need to
+/// handle cases when the expression references a function designator
+/// or is an lvalue. Here are some examples:
+/// - &(x) => x
+/// - &*****f => f for f a function designator.
+/// - &s.xx => s
+/// - &s.zz[1].yy -> s, if zz is an array
+/// - *(x + 1) -> x, if x is an array
+/// - &"123"[2] -> 0
+/// - & __real__ x -> x
+static ValueDecl *getPrimaryDecl(Expr *E) {
+ switch (E->getStmtClass()) {
+ case Stmt::DeclRefExprClass:
+ return cast<DeclRefExpr>(E)->getDecl();
+ case Stmt::MemberExprClass:
+ // If this is an arrow operator, the address is an offset from
+ // the base's value, so the object the base refers to is
+ // irrelevant.
+ if (cast<MemberExpr>(E)->isArrow())
+ return 0;
+ // Otherwise, the expression refers to a part of the base
+ return getPrimaryDecl(cast<MemberExpr>(E)->getBase());
+ case Stmt::ArraySubscriptExprClass: {
+ // FIXME: This code shouldn't be necessary! We should catch the implicit
+ // promotion of register arrays earlier.
+ Expr* Base = cast<ArraySubscriptExpr>(E)->getBase();
+ if (ImplicitCastExpr* ICE = dyn_cast<ImplicitCastExpr>(Base)) {
+ if (ICE->getSubExpr()->getType()->isArrayType())
+ return getPrimaryDecl(ICE->getSubExpr());
+ }
+ return 0;
+ }
+ case Stmt::UnaryOperatorClass: {
+ UnaryOperator *UO = cast<UnaryOperator>(E);
+
+ switch(UO->getOpcode()) {
+ case UO_Real:
+ case UO_Imag:
+ case UO_Extension:
+ return getPrimaryDecl(UO->getSubExpr());
+ default:
+ return 0;
+ }
+ }
+ case Stmt::ParenExprClass:
+ return getPrimaryDecl(cast<ParenExpr>(E)->getSubExpr());
+ case Stmt::ImplicitCastExprClass:
+ // If the result of an implicit cast is an l-value, we care about
+ // the sub-expression; otherwise, the result here doesn't matter.
+ return getPrimaryDecl(cast<ImplicitCastExpr>(E)->getSubExpr());
+ default:
+ return 0;
+ }
+}
+
+namespace {
+ enum {
+ AO_Bit_Field = 0,
+ AO_Vector_Element = 1,
+ AO_Property_Expansion = 2,
+ AO_Register_Variable = 3,
+ AO_No_Error = 4
+ };
+}
+/// \brief Diagnose invalid operand for address of operations.
+///
+/// \param Type The type of operand which cannot have its address taken.
+static void diagnoseAddressOfInvalidType(Sema &S, SourceLocation Loc,
+ Expr *E, unsigned Type) {
+ S.Diag(Loc, diag::err_typecheck_address_of) << Type << E->getSourceRange();
+}
+
+/// CheckAddressOfOperand - The operand of & must be either a function
+/// designator or an lvalue designating an object. If it is an lvalue, the
+/// object cannot be declared with storage class register or be a bit field.
+/// Note: The usual conversions are *not* applied to the operand of the &
+/// operator (C99 6.3.2.1p[2-4]), and its result is never an lvalue.
+/// In C++, the operand might be an overloaded function name, in which case
+/// we allow the '&' but retain the overloaded-function type.
+static QualType CheckAddressOfOperand(Sema &S, ExprResult &OrigOp,
+ SourceLocation OpLoc) {
+ if (const BuiltinType *PTy = OrigOp.get()->getType()->getAsPlaceholderType()){
+ if (PTy->getKind() == BuiltinType::Overload) {
+ if (!isa<OverloadExpr>(OrigOp.get()->IgnoreParens())) {
+ S.Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
+ << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+
+ return S.Context.OverloadTy;
+ }
+
+ if (PTy->getKind() == BuiltinType::UnknownAny)
+ return S.Context.UnknownAnyTy;
+
+ if (PTy->getKind() == BuiltinType::BoundMember) {
+ S.Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
+ << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+
+ OrigOp = S.CheckPlaceholderExpr(OrigOp.take());
+ if (OrigOp.isInvalid()) return QualType();
+ }
+
+ if (OrigOp.get()->isTypeDependent())
+ return S.Context.DependentTy;
+
+ assert(!OrigOp.get()->getType()->isPlaceholderType());
+
+ // Make sure to ignore parentheses in subsequent checks
+ Expr *op = OrigOp.get()->IgnoreParens();
+
+ if (S.getLangOpts().C99) {
+ // Implement C99-only parts of addressof rules.
+ if (UnaryOperator* uOp = dyn_cast<UnaryOperator>(op)) {
+ if (uOp->getOpcode() == UO_Deref)
+ // Per C99 6.5.3.2, the address of a deref always returns a valid result
+ // (assuming the deref expression is valid).
+ return uOp->getSubExpr()->getType();
+ }
+ // Technically, there should be a check for array subscript
+ // expressions here, but the result of one is always an lvalue anyway.
+ }
+ ValueDecl *dcl = getPrimaryDecl(op);
+ Expr::LValueClassification lval = op->ClassifyLValue(S.Context);
+ unsigned AddressOfError = AO_No_Error;
+
+ if (lval == Expr::LV_ClassTemporary) {
+ bool sfinae = S.isSFINAEContext();
+ S.Diag(OpLoc, sfinae ? diag::err_typecheck_addrof_class_temporary
+ : diag::ext_typecheck_addrof_class_temporary)
+ << op->getType() << op->getSourceRange();
+ if (sfinae)
+ return QualType();
+ } else if (isa<ObjCSelectorExpr>(op)) {
+ return S.Context.getPointerType(op->getType());
+ } else if (lval == Expr::LV_MemberFunction) {
+ // If it's an instance method, make a member pointer.
+ // The expression must have exactly the form &A::foo.
+
+ // If the underlying expression isn't a decl ref, give up.
+ if (!isa<DeclRefExpr>(op)) {
+ S.Diag(OpLoc, diag::err_invalid_form_pointer_member_function)
+ << OrigOp.get()->getSourceRange();
+ return QualType();
+ }
+ DeclRefExpr *DRE = cast<DeclRefExpr>(op);
+ CXXMethodDecl *MD = cast<CXXMethodDecl>(DRE->getDecl());
+
+ // The id-expression was parenthesized.
+ if (OrigOp.get() != DRE) {
+ S.Diag(OpLoc, diag::err_parens_pointer_member_function)
+ << OrigOp.get()->getSourceRange();
+
+ // The method was named without a qualifier.
+ } else if (!DRE->getQualifier()) {
+ S.Diag(OpLoc, diag::err_unqualified_pointer_member_function)
+ << op->getSourceRange();
+ }
+
+ return S.Context.getMemberPointerType(op->getType(),
+ S.Context.getTypeDeclType(MD->getParent()).getTypePtr());
+ } else if (lval != Expr::LV_Valid && lval != Expr::LV_IncompleteVoidType) {
+ // C99 6.5.3.2p1
+ // The operand must be either an l-value or a function designator
+ if (!op->getType()->isFunctionType()) {
+ // Use a special diagnostic for loads from property references.
+ if (isa<PseudoObjectExpr>(op)) {
+ AddressOfError = AO_Property_Expansion;
+ } else {
+ // FIXME: emit more specific diag...
+ S.Diag(OpLoc, diag::err_typecheck_invalid_lvalue_addrof)
+ << op->getSourceRange();
+ return QualType();
+ }
+ }
+ } else if (op->getObjectKind() == OK_BitField) { // C99 6.5.3.2p1
+ // The operand cannot be a bit-field
+ AddressOfError = AO_Bit_Field;
+ } else if (op->getObjectKind() == OK_VectorComponent) {
+ // The operand cannot be an element of a vector
+ AddressOfError = AO_Vector_Element;
+ } else if (dcl) { // C99 6.5.3.2p1
+ // We have an lvalue with a decl. Make sure the decl is not declared
+ // with the register storage-class specifier.
+ if (const VarDecl *vd = dyn_cast<VarDecl>(dcl)) {
+ // in C++ it is not error to take address of a register
+ // variable (c++03 7.1.1P3)
+ if (vd->getStorageClass() == SC_Register &&
+ !S.getLangOpts().CPlusPlus) {
+ AddressOfError = AO_Register_Variable;
+ }
+ } else if (isa<FunctionTemplateDecl>(dcl)) {
+ return S.Context.OverloadTy;
+ } else if (isa<FieldDecl>(dcl) || isa<IndirectFieldDecl>(dcl)) {
+ // Okay: we can take the address of a field.
+ // Could be a pointer to member, though, if there is an explicit
+ // scope qualifier for the class.
+ if (isa<DeclRefExpr>(op) && cast<DeclRefExpr>(op)->getQualifier()) {
+ DeclContext *Ctx = dcl->getDeclContext();
+ if (Ctx && Ctx->isRecord()) {
+ if (dcl->getType()->isReferenceType()) {
+ S.Diag(OpLoc,
+ diag::err_cannot_form_pointer_to_member_of_reference_type)
+ << dcl->getDeclName() << dcl->getType();
+ return QualType();
+ }
+
+ while (cast<RecordDecl>(Ctx)->isAnonymousStructOrUnion())
+ Ctx = Ctx->getParent();
+ return S.Context.getMemberPointerType(op->getType(),
+ S.Context.getTypeDeclType(cast<RecordDecl>(Ctx)).getTypePtr());
+ }
+ }
+ } else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl))
+ llvm_unreachable("Unknown/unexpected decl type");
+ }
+
+ if (AddressOfError != AO_No_Error) {
+ diagnoseAddressOfInvalidType(S, OpLoc, op, AddressOfError);
+ return QualType();
+ }
+
+ if (lval == Expr::LV_IncompleteVoidType) {
+ // Taking the address of a void variable is technically illegal, but we
+ // allow it in cases which are otherwise valid.
+ // Example: "extern void x; void* y = &x;".
+ S.Diag(OpLoc, diag::ext_typecheck_addrof_void) << op->getSourceRange();
+ }
+
+ // If the operand has type "type", the result has type "pointer to type".
+ if (op->getType()->isObjCObjectType())
+ return S.Context.getObjCObjectPointerType(op->getType());
+ return S.Context.getPointerType(op->getType());
+}
+
+/// CheckIndirectionOperand - Type check unary indirection (prefix '*').
+static QualType CheckIndirectionOperand(Sema &S, Expr *Op, ExprValueKind &VK,
+ SourceLocation OpLoc) {
+ if (Op->isTypeDependent())
+ return S.Context.DependentTy;
+
+ ExprResult ConvResult = S.UsualUnaryConversions(Op);
+ if (ConvResult.isInvalid())
+ return QualType();
+ Op = ConvResult.take();
+ QualType OpTy = Op->getType();
+ QualType Result;
+
+ if (isa<CXXReinterpretCastExpr>(Op)) {
+ QualType OpOrigType = Op->IgnoreParenCasts()->getType();
+ S.CheckCompatibleReinterpretCast(OpOrigType, OpTy, /*IsDereference*/true,
+ Op->getSourceRange());
+ }
+
+ // Note that per both C89 and C99, indirection is always legal, even if OpTy
+ // is an incomplete type or void. It would be possible to warn about
+ // dereferencing a void pointer, but it's completely well-defined, and such a
+ // warning is unlikely to catch any mistakes.
+ if (const PointerType *PT = OpTy->getAs<PointerType>())
+ Result = PT->getPointeeType();
+ else if (const ObjCObjectPointerType *OPT =
+ OpTy->getAs<ObjCObjectPointerType>())
+ Result = OPT->getPointeeType();
+ else {
+ ExprResult PR = S.CheckPlaceholderExpr(Op);
+ if (PR.isInvalid()) return QualType();
+ if (PR.take() != Op)
+ return CheckIndirectionOperand(S, PR.take(), VK, OpLoc);
+ }
+
+ if (Result.isNull()) {
+ S.Diag(OpLoc, diag::err_typecheck_indirection_requires_pointer)
+ << OpTy << Op->getSourceRange();
+ return QualType();
+ }
+
+ // Dereferences are usually l-values...
+ VK = VK_LValue;
+
+ // ...except that certain expressions are never l-values in C.
+ if (!S.getLangOpts().CPlusPlus && Result.isCForbiddenLValueType())
+ VK = VK_RValue;
+
+ return Result;
+}
+
+static inline BinaryOperatorKind ConvertTokenKindToBinaryOpcode(
+ tok::TokenKind Kind) {
+ BinaryOperatorKind Opc;
+ switch (Kind) {
+ default: llvm_unreachable("Unknown binop!");
+ case tok::periodstar: Opc = BO_PtrMemD; break;
+ case tok::arrowstar: Opc = BO_PtrMemI; break;
+ case tok::star: Opc = BO_Mul; break;
+ case tok::slash: Opc = BO_Div; break;
+ case tok::percent: Opc = BO_Rem; break;
+ case tok::plus: Opc = BO_Add; break;
+ case tok::minus: Opc = BO_Sub; break;
+ case tok::lessless: Opc = BO_Shl; break;
+ case tok::greatergreater: Opc = BO_Shr; break;
+ case tok::lessequal: Opc = BO_LE; break;
+ case tok::less: Opc = BO_LT; break;
+ case tok::greaterequal: Opc = BO_GE; break;
+ case tok::greater: Opc = BO_GT; break;
+ case tok::exclaimequal: Opc = BO_NE; break;
+ case tok::equalequal: Opc = BO_EQ; break;
+ case tok::amp: Opc = BO_And; break;
+ case tok::caret: Opc = BO_Xor; break;
+ case tok::pipe: Opc = BO_Or; break;
+ case tok::ampamp: Opc = BO_LAnd; break;
+ case tok::pipepipe: Opc = BO_LOr; break;
+ case tok::equal: Opc = BO_Assign; break;
+ case tok::starequal: Opc = BO_MulAssign; break;
+ case tok::slashequal: Opc = BO_DivAssign; break;
+ case tok::percentequal: Opc = BO_RemAssign; break;
+ case tok::plusequal: Opc = BO_AddAssign; break;
+ case tok::minusequal: Opc = BO_SubAssign; break;
+ case tok::lesslessequal: Opc = BO_ShlAssign; break;
+ case tok::greatergreaterequal: Opc = BO_ShrAssign; break;
+ case tok::ampequal: Opc = BO_AndAssign; break;
+ case tok::caretequal: Opc = BO_XorAssign; break;
+ case tok::pipeequal: Opc = BO_OrAssign; break;
+ case tok::comma: Opc = BO_Comma; break;
+ }
+ return Opc;
+}
+
+static inline UnaryOperatorKind ConvertTokenKindToUnaryOpcode(
+ tok::TokenKind Kind) {
+ UnaryOperatorKind Opc;
+ switch (Kind) {
+ default: llvm_unreachable("Unknown unary op!");
+ case tok::plusplus: Opc = UO_PreInc; break;
+ case tok::minusminus: Opc = UO_PreDec; break;
+ case tok::amp: Opc = UO_AddrOf; break;
+ case tok::star: Opc = UO_Deref; break;
+ case tok::plus: Opc = UO_Plus; break;
+ case tok::minus: Opc = UO_Minus; break;
+ case tok::tilde: Opc = UO_Not; break;
+ case tok::exclaim: Opc = UO_LNot; break;
+ case tok::kw___real: Opc = UO_Real; break;
+ case tok::kw___imag: Opc = UO_Imag; break;
+ case tok::kw___extension__: Opc = UO_Extension; break;
+ }
+ return Opc;
+}
+
+/// DiagnoseSelfAssignment - Emits a warning if a value is assigned to itself.
+/// This warning is only emitted for builtin assignment operations. It is also
+/// suppressed in the event of macro expansions.
+static void DiagnoseSelfAssignment(Sema &S, Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation OpLoc) {
+ if (!S.ActiveTemplateInstantiations.empty())
+ return;
+ if (OpLoc.isInvalid() || OpLoc.isMacroID())
+ return;
+ LHSExpr = LHSExpr->IgnoreParenImpCasts();
+ RHSExpr = RHSExpr->IgnoreParenImpCasts();
+ const DeclRefExpr *LHSDeclRef = dyn_cast<DeclRefExpr>(LHSExpr);
+ const DeclRefExpr *RHSDeclRef = dyn_cast<DeclRefExpr>(RHSExpr);
+ if (!LHSDeclRef || !RHSDeclRef ||
+ LHSDeclRef->getLocation().isMacroID() ||
+ RHSDeclRef->getLocation().isMacroID())
+ return;
+ const ValueDecl *LHSDecl =
+ cast<ValueDecl>(LHSDeclRef->getDecl()->getCanonicalDecl());
+ const ValueDecl *RHSDecl =
+ cast<ValueDecl>(RHSDeclRef->getDecl()->getCanonicalDecl());
+ if (LHSDecl != RHSDecl)
+ return;
+ if (LHSDecl->getType().isVolatileQualified())
+ return;
+ if (const ReferenceType *RefTy = LHSDecl->getType()->getAs<ReferenceType>())
+ if (RefTy->getPointeeType().isVolatileQualified())
+ return;
+
+ S.Diag(OpLoc, diag::warn_self_assignment)
+ << LHSDeclRef->getType()
+ << LHSExpr->getSourceRange() << RHSExpr->getSourceRange();
+}
+
+/// CreateBuiltinBinOp - Creates a new built-in binary operation with
+/// operator @p Opc at location @c TokLoc. This routine only supports
+/// built-in operations; ActOnBinOp handles overloaded operators.
+ExprResult Sema::CreateBuiltinBinOp(SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ if (getLangOpts().CPlusPlus0x && isa<InitListExpr>(RHSExpr)) {
+ // The syntax only allows initializer lists on the RHS of assignment,
+ // so we don't need to worry about accepting invalid code for
+ // non-assignment operators.
+ // C++11 5.17p9:
+ // The meaning of x = {v} [...] is that of x = T(v) [...]. The meaning
+ // of x = {} is x = T().
+ InitializationKind Kind =
+ InitializationKind::CreateDirectList(RHSExpr->getLocStart());
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemporary(LHSExpr->getType());
+ InitializationSequence InitSeq(*this, Entity, Kind, &RHSExpr, 1);
+ ExprResult Init = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(&RHSExpr, 1));
+ if (Init.isInvalid())
+ return Init;
+ RHSExpr = Init.take();
+ }
+
+ ExprResult LHS = Owned(LHSExpr), RHS = Owned(RHSExpr);
+ QualType ResultTy; // Result type of the binary operator.
+ // The following two variables are used for compound assignment operators
+ QualType CompLHSTy; // Type of LHS after promotions for computation
+ QualType CompResultTy; // Type of computation result
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+
+ switch (Opc) {
+ case BO_Assign:
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, QualType());
+ if (getLangOpts().CPlusPlus &&
+ LHS.get()->getObjectKind() != OK_ObjCProperty) {
+ VK = LHS.get()->getValueKind();
+ OK = LHS.get()->getObjectKind();
+ }
+ if (!ResultTy.isNull())
+ DiagnoseSelfAssignment(*this, LHS.get(), RHS.get(), OpLoc);
+ break;
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ ResultTy = CheckPointerToMemberOperands(LHS, RHS, VK, OpLoc,
+ Opc == BO_PtrMemI);
+ break;
+ case BO_Mul:
+ case BO_Div:
+ ResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, false,
+ Opc == BO_Div);
+ break;
+ case BO_Rem:
+ ResultTy = CheckRemainderOperands(LHS, RHS, OpLoc);
+ break;
+ case BO_Add:
+ ResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc);
+ break;
+ case BO_Sub:
+ ResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc);
+ break;
+ case BO_Shl:
+ case BO_Shr:
+ ResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc);
+ break;
+ case BO_LE:
+ case BO_LT:
+ case BO_GE:
+ case BO_GT:
+ ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, true);
+ break;
+ case BO_EQ:
+ case BO_NE:
+ ResultTy = CheckCompareOperands(LHS, RHS, OpLoc, Opc, false);
+ break;
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ ResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc);
+ break;
+ case BO_LAnd:
+ case BO_LOr:
+ ResultTy = CheckLogicalOperands(LHS, RHS, OpLoc, Opc);
+ break;
+ case BO_MulAssign:
+ case BO_DivAssign:
+ CompResultTy = CheckMultiplyDivideOperands(LHS, RHS, OpLoc, true,
+ Opc == BO_DivAssign);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_RemAssign:
+ CompResultTy = CheckRemainderOperands(LHS, RHS, OpLoc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_AddAssign:
+ CompResultTy = CheckAdditionOperands(LHS, RHS, OpLoc, Opc, &CompLHSTy);
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_SubAssign:
+ CompResultTy = CheckSubtractionOperands(LHS, RHS, OpLoc, &CompLHSTy);
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ CompResultTy = CheckShiftOperands(LHS, RHS, OpLoc, Opc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ CompResultTy = CheckBitwiseOperands(LHS, RHS, OpLoc, true);
+ CompLHSTy = CompResultTy;
+ if (!CompResultTy.isNull() && !LHS.isInvalid() && !RHS.isInvalid())
+ ResultTy = CheckAssignmentOperands(LHS.get(), RHS, OpLoc, CompResultTy);
+ break;
+ case BO_Comma:
+ ResultTy = CheckCommaOperands(*this, LHS, RHS, OpLoc);
+ if (getLangOpts().CPlusPlus && !RHS.isInvalid()) {
+ VK = RHS.get()->getValueKind();
+ OK = RHS.get()->getObjectKind();
+ }
+ break;
+ }
+ if (ResultTy.isNull() || LHS.isInvalid() || RHS.isInvalid())
+ return ExprError();
+
+ // Check for array bounds violations for both sides of the BinaryOperator
+ CheckArrayAccess(LHS.get());
+ CheckArrayAccess(RHS.get());
+
+ if (CompResultTy.isNull())
+ return Owned(new (Context) BinaryOperator(LHS.take(), RHS.take(), Opc,
+ ResultTy, VK, OK, OpLoc));
+ if (getLangOpts().CPlusPlus && LHS.get()->getObjectKind() !=
+ OK_ObjCProperty) {
+ VK = VK_LValue;
+ OK = LHS.get()->getObjectKind();
+ }
+ return Owned(new (Context) CompoundAssignOperator(LHS.take(), RHS.take(), Opc,
+ ResultTy, VK, OK, CompLHSTy,
+ CompResultTy, OpLoc));
+}
+
+/// DiagnoseBitwisePrecedence - Emit a warning when bitwise and comparison
+/// operators are mixed in a way that suggests that the programmer forgot that
+/// comparison operators have higher precedence. The most typical example of
+/// such code is "flags & 0x0020 != 0", which is equivalent to "flags & 1".
+static void DiagnoseBitwisePrecedence(Sema &Self, BinaryOperatorKind Opc,
+ SourceLocation OpLoc, Expr *LHSExpr,
+ Expr *RHSExpr) {
+ typedef BinaryOperator BinOp;
+ BinOp::Opcode LHSopc = static_cast<BinOp::Opcode>(-1),
+ RHSopc = static_cast<BinOp::Opcode>(-1);
+ if (BinOp *BO = dyn_cast<BinOp>(LHSExpr))
+ LHSopc = BO->getOpcode();
+ if (BinOp *BO = dyn_cast<BinOp>(RHSExpr))
+ RHSopc = BO->getOpcode();
+
+ // Subs are not binary operators.
+ if (LHSopc == -1 && RHSopc == -1)
+ return;
+
+ // Bitwise operations are sometimes used as eager logical ops.
+ // Don't diagnose this.
+ if ((BinOp::isComparisonOp(LHSopc) || BinOp::isBitwiseOp(LHSopc)) &&
+ (BinOp::isComparisonOp(RHSopc) || BinOp::isBitwiseOp(RHSopc)))
+ return;
+
+ bool isLeftComp = BinOp::isComparisonOp(LHSopc);
+ bool isRightComp = BinOp::isComparisonOp(RHSopc);
+ if (!isLeftComp && !isRightComp) return;
+
+ SourceRange DiagRange = isLeftComp ? SourceRange(LHSExpr->getLocStart(),
+ OpLoc)
+ : SourceRange(OpLoc, RHSExpr->getLocEnd());
+ std::string OpStr = isLeftComp ? BinOp::getOpcodeStr(LHSopc)
+ : BinOp::getOpcodeStr(RHSopc);
+ SourceRange ParensRange = isLeftComp ?
+ SourceRange(cast<BinOp>(LHSExpr)->getRHS()->getLocStart(),
+ RHSExpr->getLocEnd())
+ : SourceRange(LHSExpr->getLocStart(),
+ cast<BinOp>(RHSExpr)->getLHS()->getLocStart());
+
+ Self.Diag(OpLoc, diag::warn_precedence_bitwise_rel)
+ << DiagRange << BinOp::getOpcodeStr(Opc) << OpStr;
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_bitwise_silence) << OpStr,
+ RHSExpr->getSourceRange());
+ SuggestParentheses(Self, OpLoc,
+ Self.PDiag(diag::note_precedence_bitwise_first) << BinOp::getOpcodeStr(Opc),
+ ParensRange);
+}
+
+/// \brief It accepts a '&' expr that is inside a '|' one.
+/// Emit a diagnostic together with a fixit hint that wraps the '&' expression
+/// in parentheses.
+static void
+EmitDiagnosticForBitwiseAndInBitwiseOr(Sema &Self, SourceLocation OpLoc,
+ BinaryOperator *Bop) {
+ assert(Bop->getOpcode() == BO_And);
+ Self.Diag(Bop->getOperatorLoc(), diag::warn_bitwise_and_in_bitwise_or)
+ << Bop->getSourceRange() << OpLoc;
+ SuggestParentheses(Self, Bop->getOperatorLoc(),
+ Self.PDiag(diag::note_bitwise_and_in_bitwise_or_silence),
+ Bop->getSourceRange());
+}
+
+/// \brief It accepts a '&&' expr that is inside a '||' one.
+/// Emit a diagnostic together with a fixit hint that wraps the '&&' expression
+/// in parentheses.
+static void
+EmitDiagnosticForLogicalAndInLogicalOr(Sema &Self, SourceLocation OpLoc,
+ BinaryOperator *Bop) {
+ assert(Bop->getOpcode() == BO_LAnd);
+ Self.Diag(Bop->getOperatorLoc(), diag::warn_logical_and_in_logical_or)
+ << Bop->getSourceRange() << OpLoc;
+ SuggestParentheses(Self, Bop->getOperatorLoc(),
+ Self.PDiag(diag::note_logical_and_in_logical_or_silence),
+ Bop->getSourceRange());
+}
+
+/// \brief Returns true if the given expression can be evaluated as a constant
+/// 'true'.
+static bool EvaluatesAsTrue(Sema &S, Expr *E) {
+ bool Res;
+ return E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && Res;
+}
+
+/// \brief Returns true if the given expression can be evaluated as a constant
+/// 'false'.
+static bool EvaluatesAsFalse(Sema &S, Expr *E) {
+ bool Res;
+ return E->EvaluateAsBooleanCondition(Res, S.getASTContext()) && !Res;
+}
+
+/// \brief Look for '&&' in the left hand of a '||' expr.
+static void DiagnoseLogicalAndInLogicalOrLHS(Sema &S, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(LHSExpr)) {
+ if (Bop->getOpcode() == BO_LAnd) {
+ // If it's "a && b || 0" don't warn since the precedence doesn't matter.
+ if (EvaluatesAsFalse(S, RHSExpr))
+ return;
+ // If it's "1 && a || b" don't warn since the precedence doesn't matter.
+ if (!EvaluatesAsTrue(S, Bop->getLHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop);
+ } else if (Bop->getOpcode() == BO_LOr) {
+ if (BinaryOperator *RBop = dyn_cast<BinaryOperator>(Bop->getRHS())) {
+ // If it's "a || b && 1 || c" we didn't warn earlier for
+ // "a || b && 1", but warn now.
+ if (RBop->getOpcode() == BO_LAnd && EvaluatesAsTrue(S, RBop->getRHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, RBop);
+ }
+ }
+ }
+}
+
+/// \brief Look for '&&' in the right hand of a '||' expr.
+static void DiagnoseLogicalAndInLogicalOrRHS(Sema &S, SourceLocation OpLoc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(RHSExpr)) {
+ if (Bop->getOpcode() == BO_LAnd) {
+ // If it's "0 || a && b" don't warn since the precedence doesn't matter.
+ if (EvaluatesAsFalse(S, LHSExpr))
+ return;
+ // If it's "a || b && 1" don't warn since the precedence doesn't matter.
+ if (!EvaluatesAsTrue(S, Bop->getRHS()))
+ return EmitDiagnosticForLogicalAndInLogicalOr(S, OpLoc, Bop);
+ }
+ }
+}
+
+/// \brief Look for '&' in the left or right hand of a '|' expr.
+static void DiagnoseBitwiseAndInBitwiseOr(Sema &S, SourceLocation OpLoc,
+ Expr *OrArg) {
+ if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(OrArg)) {
+ if (Bop->getOpcode() == BO_And)
+ return EmitDiagnosticForBitwiseAndInBitwiseOr(S, OpLoc, Bop);
+ }
+}
+
+/// DiagnoseBinOpPrecedence - Emit warnings for expressions with tricky
+/// precedence.
+static void DiagnoseBinOpPrecedence(Sema &Self, BinaryOperatorKind Opc,
+ SourceLocation OpLoc, Expr *LHSExpr,
+ Expr *RHSExpr){
+ // Diagnose "arg1 'bitwise' arg2 'eq' arg3".
+ if (BinaryOperator::isBitwiseOp(Opc))
+ DiagnoseBitwisePrecedence(Self, Opc, OpLoc, LHSExpr, RHSExpr);
+
+ // Diagnose "arg1 & arg2 | arg3"
+ if (Opc == BO_Or && !OpLoc.isMacroID()/* Don't warn in macros. */) {
+ DiagnoseBitwiseAndInBitwiseOr(Self, OpLoc, LHSExpr);
+ DiagnoseBitwiseAndInBitwiseOr(Self, OpLoc, RHSExpr);
+ }
+
+ // Warn about arg1 || arg2 && arg3, as GCC 4.3+ does.
+ // We don't warn for 'assert(a || b && "bad")' since this is safe.
+ if (Opc == BO_LOr && !OpLoc.isMacroID()/* Don't warn in macros. */) {
+ DiagnoseLogicalAndInLogicalOrLHS(Self, OpLoc, LHSExpr, RHSExpr);
+ DiagnoseLogicalAndInLogicalOrRHS(Self, OpLoc, LHSExpr, RHSExpr);
+ }
+}
+
+// Binary Operators. 'Tok' is the token for the operator.
+ExprResult Sema::ActOnBinOp(Scope *S, SourceLocation TokLoc,
+ tok::TokenKind Kind,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ BinaryOperatorKind Opc = ConvertTokenKindToBinaryOpcode(Kind);
+ assert((LHSExpr != 0) && "ActOnBinOp(): missing left expression");
+ assert((RHSExpr != 0) && "ActOnBinOp(): missing right expression");
+
+ // Emit warnings for tricky precedence issues, e.g. "bitfield & 0x4 == 0"
+ DiagnoseBinOpPrecedence(*this, Opc, TokLoc, LHSExpr, RHSExpr);
+
+ return BuildBinOp(S, TokLoc, Opc, LHSExpr, RHSExpr);
+}
+
+/// Build an overloaded binary operator expression in the given scope.
+static ExprResult BuildOverloadedBinOp(Sema &S, Scope *Sc, SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHS, Expr *RHS) {
+ // Find all of the overloaded operators visible from this
+ // point. We perform both an operator-name lookup from the local
+ // scope and an argument-dependent lookup based on the types of
+ // the arguments.
+ UnresolvedSet<16> Functions;
+ OverloadedOperatorKind OverOp
+ = BinaryOperator::getOverloadedOperator(Opc);
+ if (Sc && OverOp != OO_None)
+ S.LookupOverloadedOperatorName(OverOp, Sc, LHS->getType(),
+ RHS->getType(), Functions);
+
+ // Build the (potentially-overloaded, potentially-dependent)
+ // binary operation.
+ return S.CreateOverloadedBinOp(OpLoc, Opc, Functions, LHS, RHS);
+}
+
+ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHSExpr, Expr *RHSExpr) {
+ // We want to end up calling one of checkPseudoObjectAssignment
+ // (if the LHS is a pseudo-object), BuildOverloadedBinOp (if
+ // both expressions are overloadable or either is type-dependent),
+ // or CreateBuiltinBinOp (in any other case). We also want to get
+ // any placeholder types out of the way.
+
+ // Handle pseudo-objects in the LHS.
+ if (const BuiltinType *pty = LHSExpr->getType()->getAsPlaceholderType()) {
+ // Assignments with a pseudo-object l-value need special analysis.
+ if (pty->getKind() == BuiltinType::PseudoObject &&
+ BinaryOperator::isAssignmentOp(Opc))
+ return checkPseudoObjectAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ // Don't resolve overloads if the other type is overloadable.
+ if (pty->getKind() == BuiltinType::Overload) {
+ // We can't actually test that if we still have a placeholder,
+ // though. Fortunately, none of the exceptions we see in that
+ // code below are valid when the LHS is an overload set. Note
+ // that an overload set can be dependently-typed, but it never
+ // instantiates to having an overloadable type.
+ ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr);
+ if (resolvedRHS.isInvalid()) return ExprError();
+ RHSExpr = resolvedRHS.take();
+
+ if (RHSExpr->isTypeDependent() ||
+ RHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+ }
+
+ ExprResult LHS = CheckPlaceholderExpr(LHSExpr);
+ if (LHS.isInvalid()) return ExprError();
+ LHSExpr = LHS.take();
+ }
+
+ // Handle pseudo-objects in the RHS.
+ if (const BuiltinType *pty = RHSExpr->getType()->getAsPlaceholderType()) {
+ // An overload in the RHS can potentially be resolved by the type
+ // being assigned to.
+ if (Opc == BO_Assign && pty->getKind() == BuiltinType::Overload) {
+ if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ if (LHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr);
+ }
+
+ // Don't resolve overloads if the other type is overloadable.
+ if (pty->getKind() == BuiltinType::Overload &&
+ LHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ ExprResult resolvedRHS = CheckPlaceholderExpr(RHSExpr);
+ if (!resolvedRHS.isUsable()) return ExprError();
+ RHSExpr = resolvedRHS.take();
+ }
+
+ if (getLangOpts().CPlusPlus) {
+ // If either expression is type-dependent, always build an
+ // overloaded op.
+ if (LHSExpr->isTypeDependent() || RHSExpr->isTypeDependent())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+
+ // Otherwise, build an overloaded op if either expression has an
+ // overloadable type.
+ if (LHSExpr->getType()->isOverloadableType() ||
+ RHSExpr->getType()->isOverloadableType())
+ return BuildOverloadedBinOp(*this, S, OpLoc, Opc, LHSExpr, RHSExpr);
+ }
+
+ // Build a built-in binary operation.
+ return CreateBuiltinBinOp(OpLoc, Opc, LHSExpr, RHSExpr);
+}
+
+ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
+ UnaryOperatorKind Opc,
+ Expr *InputExpr) {
+ ExprResult Input = Owned(InputExpr);
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ QualType resultType;
+ switch (Opc) {
+ case UO_PreInc:
+ case UO_PreDec:
+ case UO_PostInc:
+ case UO_PostDec:
+ resultType = CheckIncrementDecrementOperand(*this, Input.get(), VK, OpLoc,
+ Opc == UO_PreInc ||
+ Opc == UO_PostInc,
+ Opc == UO_PreInc ||
+ Opc == UO_PreDec);
+ break;
+ case UO_AddrOf:
+ resultType = CheckAddressOfOperand(*this, Input, OpLoc);
+ break;
+ case UO_Deref: {
+ Input = DefaultFunctionArrayLvalueConversion(Input.take());
+ resultType = CheckIndirectionOperand(*this, Input.get(), VK, OpLoc);
+ break;
+ }
+ case UO_Plus:
+ case UO_Minus:
+ Input = UsualUnaryConversions(Input.take());
+ if (Input.isInvalid()) return ExprError();
+ resultType = Input.get()->getType();
+ if (resultType->isDependentType())
+ break;
+ if (resultType->isArithmeticType() || // C99 6.5.3.3p1
+ resultType->isVectorType())
+ break;
+ else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6-7
+ resultType->isEnumeralType())
+ break;
+ else if (getLangOpts().CPlusPlus && // C++ [expr.unary.op]p6
+ Opc == UO_Plus &&
+ resultType->isPointerType())
+ break;
+
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+
+ case UO_Not: // bitwise complement
+ Input = UsualUnaryConversions(Input.take());
+ if (Input.isInvalid()) return ExprError();
+ resultType = Input.get()->getType();
+ if (resultType->isDependentType())
+ break;
+ // C99 6.5.3.3p1. We allow complex int and float as a GCC extension.
+ if (resultType->isComplexType() || resultType->isComplexIntegerType())
+ // C99 does not support '~' for complex conjugation.
+ Diag(OpLoc, diag::ext_integer_complement_complex)
+ << resultType << Input.get()->getSourceRange();
+ else if (resultType->hasIntegerRepresentation())
+ break;
+ else {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+ break;
+
+ case UO_LNot: // logical negation
+ // Unlike +/-/~, integer promotions aren't done here (C99 6.5.3.3p5).
+ Input = DefaultFunctionArrayLvalueConversion(Input.take());
+ if (Input.isInvalid()) return ExprError();
+ resultType = Input.get()->getType();
+
+ // Though we still have to promote half FP to float...
+ if (resultType->isHalfType()) {
+ Input = ImpCastExprToType(Input.take(), Context.FloatTy, CK_FloatingCast).take();
+ resultType = Context.FloatTy;
+ }
+
+ if (resultType->isDependentType())
+ break;
+ if (resultType->isScalarType()) {
+ // C99 6.5.3.3p1: ok, fallthrough;
+ if (Context.getLangOpts().CPlusPlus) {
+ // C++03 [expr.unary.op]p8, C++0x [expr.unary.op]p9:
+ // operand contextually converted to bool.
+ Input = ImpCastExprToType(Input.take(), Context.BoolTy,
+ ScalarTypeToBooleanCastKind(resultType));
+ }
+ } else if (resultType->isExtVectorType()) {
+ // Vector logical not returns the signed variant of the operand type.
+ resultType = GetSignedVectorType(resultType);
+ break;
+ } else {
+ return ExprError(Diag(OpLoc, diag::err_typecheck_unary_expr)
+ << resultType << Input.get()->getSourceRange());
+ }
+
+ // LNot always has type int. C99 6.5.3.3p5.
+ // In C++, it's bool. C++ 5.3.1p8
+ resultType = Context.getLogicalOperationType();
+ break;
+ case UO_Real:
+ case UO_Imag:
+ resultType = CheckRealImagOperand(*this, Input, OpLoc, Opc == UO_Real);
+ // _Real maps ordinary l-values into ordinary l-values. _Imag maps ordinary
+ // complex l-values to ordinary l-values and all other values to r-values.
+ if (Input.isInvalid()) return ExprError();
+ if (Opc == UO_Real || Input.get()->getType()->isAnyComplexType()) {
+ if (Input.get()->getValueKind() != VK_RValue &&
+ Input.get()->getObjectKind() == OK_Ordinary)
+ VK = Input.get()->getValueKind();
+ } else if (!getLangOpts().CPlusPlus) {
+ // In C, a volatile scalar is read by __imag. In C++, it is not.
+ Input = DefaultLvalueConversion(Input.take());
+ }
+ break;
+ case UO_Extension:
+ resultType = Input.get()->getType();
+ VK = Input.get()->getValueKind();
+ OK = Input.get()->getObjectKind();
+ break;
+ }
+ if (resultType.isNull() || Input.isInvalid())
+ return ExprError();
+
+ // Check for array bounds violations in the operand of the UnaryOperator,
+ // except for the '*' and '&' operators that have to be handled specially
+ // by CheckArrayAccess (as there are special cases like &array[arraysize]
+ // that are explicitly defined as valid by the standard).
+ if (Opc != UO_AddrOf && Opc != UO_Deref)
+ CheckArrayAccess(Input.get());
+
+ return Owned(new (Context) UnaryOperator(Input.take(), Opc, resultType,
+ VK, OK, OpLoc));
+}
+
+/// \brief Determine whether the given expression is a qualified member
+/// access expression, of a form that could be turned into a pointer to member
+/// with the address-of operator.
+static bool isQualifiedMemberAccess(Expr *E) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (!DRE->getQualifier())
+ return false;
+
+ ValueDecl *VD = DRE->getDecl();
+ if (!VD->isCXXClassMember())
+ return false;
+
+ if (isa<FieldDecl>(VD) || isa<IndirectFieldDecl>(VD))
+ return true;
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(VD))
+ return Method->isInstance();
+
+ return false;
+ }
+
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
+ if (!ULE->getQualifier())
+ return false;
+
+ for (UnresolvedLookupExpr::decls_iterator D = ULE->decls_begin(),
+ DEnd = ULE->decls_end();
+ D != DEnd; ++D) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*D)) {
+ if (Method->isInstance())
+ return true;
+ } else {
+ // Overload set does not contain methods.
+ break;
+ }
+ }
+
+ return false;
+ }
+
+ return false;
+}
+
+ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
+ UnaryOperatorKind Opc, Expr *Input) {
+ // First things first: handle placeholders so that the
+ // overloaded-operator check considers the right type.
+ if (const BuiltinType *pty = Input->getType()->getAsPlaceholderType()) {
+ // Increment and decrement of pseudo-object references.
+ if (pty->getKind() == BuiltinType::PseudoObject &&
+ UnaryOperator::isIncrementDecrementOp(Opc))
+ return checkPseudoObjectIncDec(S, OpLoc, Opc, Input);
+
+ // extension is always a builtin operator.
+ if (Opc == UO_Extension)
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+
+ // & gets special logic for several kinds of placeholder.
+ // The builtin code knows what to do.
+ if (Opc == UO_AddrOf &&
+ (pty->getKind() == BuiltinType::Overload ||
+ pty->getKind() == BuiltinType::UnknownAny ||
+ pty->getKind() == BuiltinType::BoundMember))
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+
+ // Anything else needs to be handled now.
+ ExprResult Result = CheckPlaceholderExpr(Input);
+ if (Result.isInvalid()) return ExprError();
+ Input = Result.take();
+ }
+
+ if (getLangOpts().CPlusPlus && Input->getType()->isOverloadableType() &&
+ UnaryOperator::getOverloadedOperator(Opc) != OO_None &&
+ !(Opc == UO_AddrOf && isQualifiedMemberAccess(Input))) {
+ // Find all of the overloaded operators visible from this
+ // point. We perform both an operator-name lookup from the local
+ // scope and an argument-dependent lookup based on the types of
+ // the arguments.
+ UnresolvedSet<16> Functions;
+ OverloadedOperatorKind OverOp = UnaryOperator::getOverloadedOperator(Opc);
+ if (S && OverOp != OO_None)
+ LookupOverloadedOperatorName(OverOp, S, Input->getType(), QualType(),
+ Functions);
+
+ return CreateOverloadedUnaryOp(OpLoc, Opc, Functions, Input);
+ }
+
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+}
+
+// Unary Operators. 'Tok' is the token for the operator.
+ExprResult Sema::ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
+ tok::TokenKind Op, Expr *Input) {
+ return BuildUnaryOp(S, OpLoc, ConvertTokenKindToUnaryOpcode(Op), Input);
+}
+
+/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
+ExprResult Sema::ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
+ LabelDecl *TheDecl) {
+ TheDecl->setUsed();
+ // Create the AST node. The address of a label always has type 'void*'.
+ return Owned(new (Context) AddrLabelExpr(OpLoc, LabLoc, TheDecl,
+ Context.getPointerType(Context.VoidTy)));
+}
+
+/// Given the last statement in a statement-expression, check whether
+/// the result is a producing expression (like a call to an
+/// ns_returns_retained function) and, if so, rebuild it to hoist the
+/// release out of the full-expression. Otherwise, return null.
+/// Cannot fail.
+static Expr *maybeRebuildARCConsumingStmt(Stmt *Statement) {
+ // Should always be wrapped with one of these.
+ ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(Statement);
+ if (!cleanups) return 0;
+
+ ImplicitCastExpr *cast = dyn_cast<ImplicitCastExpr>(cleanups->getSubExpr());
+ if (!cast || cast->getCastKind() != CK_ARCConsumeObject)
+ return 0;
+
+ // Splice out the cast. This shouldn't modify any interesting
+ // features of the statement.
+ Expr *producer = cast->getSubExpr();
+ assert(producer->getType() == cast->getType());
+ assert(producer->getValueKind() == cast->getValueKind());
+ cleanups->setSubExpr(producer);
+ return cleanups;
+}
+
+void Sema::ActOnStartStmtExpr() {
+ PushExpressionEvaluationContext(ExprEvalContexts.back().Context);
+}
+
+void Sema::ActOnStmtExprError() {
+ // Note that function is also called by TreeTransform when leaving a
+ // StmtExpr scope without rebuilding anything.
+
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+}
+
+ExprResult
+Sema::ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
+ SourceLocation RPLoc) { // "({..})"
+ assert(SubStmt && isa<CompoundStmt>(SubStmt) && "Invalid action invocation!");
+ CompoundStmt *Compound = cast<CompoundStmt>(SubStmt);
+
+ if (hasAnyUnrecoverableErrorsInThisFunction())
+ DiscardCleanupsInEvaluationContext();
+ assert(!ExprNeedsCleanups && "cleanups within StmtExpr not correctly bound!");
+ PopExpressionEvaluationContext();
+
+ bool isFileScope
+ = (getCurFunctionOrMethodDecl() == 0) && (getCurBlock() == 0);
+ if (isFileScope)
+ return ExprError(Diag(LPLoc, diag::err_stmtexpr_file_scope));
+
+ // FIXME: there are a variety of strange constraints to enforce here, for
+ // example, it is not possible to goto into a stmt expression apparently.
+ // More semantic analysis is needed.
+
+ // If there are sub stmts in the compound stmt, take the type of the last one
+ // as the type of the stmtexpr.
+ QualType Ty = Context.VoidTy;
+ bool StmtExprMayBindToTemp = false;
+ if (!Compound->body_empty()) {
+ Stmt *LastStmt = Compound->body_back();
+ LabelStmt *LastLabelStmt = 0;
+ // If LastStmt is a label, skip down through into the body.
+ while (LabelStmt *Label = dyn_cast<LabelStmt>(LastStmt)) {
+ LastLabelStmt = Label;
+ LastStmt = Label->getSubStmt();
+ }
+
+ if (Expr *LastE = dyn_cast<Expr>(LastStmt)) {
+ // Do function/array conversion on the last expression, but not
+ // lvalue-to-rvalue. However, initialize an unqualified type.
+ ExprResult LastExpr = DefaultFunctionArrayConversion(LastE);
+ if (LastExpr.isInvalid())
+ return ExprError();
+ Ty = LastExpr.get()->getType().getUnqualifiedType();
+
+ if (!Ty->isDependentType() && !LastExpr.get()->isTypeDependent()) {
+ // In ARC, if the final expression ends in a consume, splice
+ // the consume out and bind it later. In the alternate case
+ // (when dealing with a retainable type), the result
+ // initialization will create a produce. In both cases the
+ // result will be +1, and we'll need to balance that out with
+ // a bind.
+ if (Expr *rebuiltLastStmt
+ = maybeRebuildARCConsumingStmt(LastExpr.get())) {
+ LastExpr = rebuiltLastStmt;
+ } else {
+ LastExpr = PerformCopyInitialization(
+ InitializedEntity::InitializeResult(LPLoc,
+ Ty,
+ false),
+ SourceLocation(),
+ LastExpr);
+ }
+
+ if (LastExpr.isInvalid())
+ return ExprError();
+ if (LastExpr.get() != 0) {
+ if (!LastLabelStmt)
+ Compound->setLastStmt(LastExpr.take());
+ else
+ LastLabelStmt->setSubStmt(LastExpr.take());
+ StmtExprMayBindToTemp = true;
+ }
+ }
+ }
+ }
+
+ // FIXME: Check that expression type is complete/non-abstract; statement
+ // expressions are not lvalues.
+ Expr *ResStmtExpr = new (Context) StmtExpr(Compound, Ty, LPLoc, RPLoc);
+ if (StmtExprMayBindToTemp)
+ return MaybeBindToTemporary(ResStmtExpr);
+ return Owned(ResStmtExpr);
+}
+
+ExprResult Sema::BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
+ TypeSourceInfo *TInfo,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RParenLoc) {
+ QualType ArgTy = TInfo->getType();
+ bool Dependent = ArgTy->isDependentType();
+ SourceRange TypeRange = TInfo->getTypeLoc().getLocalSourceRange();
+
+ // We must have at least one component that refers to the type, and the first
+ // one is known to be a field designator. Verify that the ArgTy represents
+ // a struct/union/class.
+ if (!Dependent && !ArgTy->isRecordType())
+ return ExprError(Diag(BuiltinLoc, diag::err_offsetof_record_type)
+ << ArgTy << TypeRange);
+
+ // Type must be complete per C99 7.17p3 because a declaring a variable
+ // with an incomplete type would be ill-formed.
+ if (!Dependent
+ && RequireCompleteType(BuiltinLoc, ArgTy,
+ PDiag(diag::err_offsetof_incomplete_type)
+ << TypeRange))
+ return ExprError();
+
+ // offsetof with non-identifier designators (e.g. "offsetof(x, a.b[c])") are a
+ // GCC extension, diagnose them.
+ // FIXME: This diagnostic isn't actually visible because the location is in
+ // a system header!
+ if (NumComponents != 1)
+ Diag(BuiltinLoc, diag::ext_offsetof_extended_field_designator)
+ << SourceRange(CompPtr[1].LocStart, CompPtr[NumComponents-1].LocEnd);
+
+ bool DidWarnAboutNonPOD = false;
+ QualType CurrentType = ArgTy;
+ typedef OffsetOfExpr::OffsetOfNode OffsetOfNode;
+ SmallVector<OffsetOfNode, 4> Comps;
+ SmallVector<Expr*, 4> Exprs;
+ for (unsigned i = 0; i != NumComponents; ++i) {
+ const OffsetOfComponent &OC = CompPtr[i];
+ if (OC.isBrackets) {
+ // Offset of an array sub-field. TODO: Should we allow vector elements?
+ if (!CurrentType->isDependentType()) {
+ const ArrayType *AT = Context.getAsArrayType(CurrentType);
+ if(!AT)
+ return ExprError(Diag(OC.LocEnd, diag::err_offsetof_array_type)
+ << CurrentType);
+ CurrentType = AT->getElementType();
+ } else
+ CurrentType = Context.DependentTy;
+
+ ExprResult IdxRval = DefaultLvalueConversion(static_cast<Expr*>(OC.U.E));
+ if (IdxRval.isInvalid())
+ return ExprError();
+ Expr *Idx = IdxRval.take();
+
+ // The expression must be an integral expression.
+ // FIXME: An integral constant expression?
+ if (!Idx->isTypeDependent() && !Idx->isValueDependent() &&
+ !Idx->getType()->isIntegerType())
+ return ExprError(Diag(Idx->getLocStart(),
+ diag::err_typecheck_subscript_not_integer)
+ << Idx->getSourceRange());
+
+ // Record this array index.
+ Comps.push_back(OffsetOfNode(OC.LocStart, Exprs.size(), OC.LocEnd));
+ Exprs.push_back(Idx);
+ continue;
+ }
+
+ // Offset of a field.
+ if (CurrentType->isDependentType()) {
+ // We have the offset of a field, but we can't look into the dependent
+ // type. Just record the identifier of the field.
+ Comps.push_back(OffsetOfNode(OC.LocStart, OC.U.IdentInfo, OC.LocEnd));
+ CurrentType = Context.DependentTy;
+ continue;
+ }
+
+ // We need to have a complete type to look into.
+ if (RequireCompleteType(OC.LocStart, CurrentType,
+ diag::err_offsetof_incomplete_type))
+ return ExprError();
+
+ // Look for the designated field.
+ const RecordType *RC = CurrentType->getAs<RecordType>();
+ if (!RC)
+ return ExprError(Diag(OC.LocEnd, diag::err_offsetof_record_type)
+ << CurrentType);
+ RecordDecl *RD = RC->getDecl();
+
+ // C++ [lib.support.types]p5:
+ // The macro offsetof accepts a restricted set of type arguments in this
+ // International Standard. type shall be a POD structure or a POD union
+ // (clause 9).
+ if (CXXRecordDecl *CRD = dyn_cast<CXXRecordDecl>(RD)) {
+ if (!CRD->isPOD() && !DidWarnAboutNonPOD &&
+ DiagRuntimeBehavior(BuiltinLoc, 0,
+ PDiag(diag::warn_offsetof_non_pod_type)
+ << SourceRange(CompPtr[0].LocStart, OC.LocEnd)
+ << CurrentType))
+ DidWarnAboutNonPOD = true;
+ }
+
+ // Look for the field.
+ LookupResult R(*this, OC.U.IdentInfo, OC.LocStart, LookupMemberName);
+ LookupQualifiedName(R, RD);
+ FieldDecl *MemberDecl = R.getAsSingle<FieldDecl>();
+ IndirectFieldDecl *IndirectMemberDecl = 0;
+ if (!MemberDecl) {
+ if ((IndirectMemberDecl = R.getAsSingle<IndirectFieldDecl>()))
+ MemberDecl = IndirectMemberDecl->getAnonField();
+ }
+
+ if (!MemberDecl)
+ return ExprError(Diag(BuiltinLoc, diag::err_no_member)
+ << OC.U.IdentInfo << RD << SourceRange(OC.LocStart,
+ OC.LocEnd));
+
+ // C99 7.17p3:
+ // (If the specified member is a bit-field, the behavior is undefined.)
+ //
+ // We diagnose this as an error.
+ if (MemberDecl->isBitField()) {
+ Diag(OC.LocEnd, diag::err_offsetof_bitfield)
+ << MemberDecl->getDeclName()
+ << SourceRange(BuiltinLoc, RParenLoc);
+ Diag(MemberDecl->getLocation(), diag::note_bitfield_decl);
+ return ExprError();
+ }
+
+ RecordDecl *Parent = MemberDecl->getParent();
+ if (IndirectMemberDecl)
+ Parent = cast<RecordDecl>(IndirectMemberDecl->getDeclContext());
+
+ // If the member was found in a base class, introduce OffsetOfNodes for
+ // the base class indirections.
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ if (IsDerivedFrom(CurrentType, Context.getTypeDeclType(Parent), Paths)) {
+ CXXBasePath &Path = Paths.front();
+ for (CXXBasePath::iterator B = Path.begin(), BEnd = Path.end();
+ B != BEnd; ++B)
+ Comps.push_back(OffsetOfNode(B->Base));
+ }
+
+ if (IndirectMemberDecl) {
+ for (IndirectFieldDecl::chain_iterator FI =
+ IndirectMemberDecl->chain_begin(),
+ FEnd = IndirectMemberDecl->chain_end(); FI != FEnd; FI++) {
+ assert(isa<FieldDecl>(*FI));
+ Comps.push_back(OffsetOfNode(OC.LocStart,
+ cast<FieldDecl>(*FI), OC.LocEnd));
+ }
+ } else
+ Comps.push_back(OffsetOfNode(OC.LocStart, MemberDecl, OC.LocEnd));
+
+ CurrentType = MemberDecl->getType().getNonReferenceType();
+ }
+
+ return Owned(OffsetOfExpr::Create(Context, Context.getSizeType(), BuiltinLoc,
+ TInfo, Comps.data(), Comps.size(),
+ Exprs.data(), Exprs.size(), RParenLoc));
+}
+
+ExprResult Sema::ActOnBuiltinOffsetOf(Scope *S,
+ SourceLocation BuiltinLoc,
+ SourceLocation TypeLoc,
+ ParsedType ParsedArgTy,
+ OffsetOfComponent *CompPtr,
+ unsigned NumComponents,
+ SourceLocation RParenLoc) {
+
+ TypeSourceInfo *ArgTInfo;
+ QualType ArgTy = GetTypeFromParser(ParsedArgTy, &ArgTInfo);
+ if (ArgTy.isNull())
+ return ExprError();
+
+ if (!ArgTInfo)
+ ArgTInfo = Context.getTrivialTypeSourceInfo(ArgTy, TypeLoc);
+
+ return BuildBuiltinOffsetOf(BuiltinLoc, ArgTInfo, CompPtr, NumComponents,
+ RParenLoc);
+}
+
+
+ExprResult Sema::ActOnChooseExpr(SourceLocation BuiltinLoc,
+ Expr *CondExpr,
+ Expr *LHSExpr, Expr *RHSExpr,
+ SourceLocation RPLoc) {
+ assert((CondExpr && LHSExpr && RHSExpr) && "Missing type argument(s)");
+
+ ExprValueKind VK = VK_RValue;
+ ExprObjectKind OK = OK_Ordinary;
+ QualType resType;
+ bool ValueDependent = false;
+ if (CondExpr->isTypeDependent() || CondExpr->isValueDependent()) {
+ resType = Context.DependentTy;
+ ValueDependent = true;
+ } else {
+ // The conditional expression is required to be a constant expression.
+ llvm::APSInt condEval(32);
+ ExprResult CondICE = VerifyIntegerConstantExpression(CondExpr, &condEval,
+ PDiag(diag::err_typecheck_choose_expr_requires_constant), false);
+ if (CondICE.isInvalid())
+ return ExprError();
+ CondExpr = CondICE.take();
+
+ // If the condition is > zero, then the AST type is the same as the LSHExpr.
+ Expr *ActiveExpr = condEval.getZExtValue() ? LHSExpr : RHSExpr;
+
+ resType = ActiveExpr->getType();
+ ValueDependent = ActiveExpr->isValueDependent();
+ VK = ActiveExpr->getValueKind();
+ OK = ActiveExpr->getObjectKind();
+ }
+
+ return Owned(new (Context) ChooseExpr(BuiltinLoc, CondExpr, LHSExpr, RHSExpr,
+ resType, VK, OK, RPLoc,
+ resType->isDependentType(),
+ ValueDependent));
+}
+
+//===----------------------------------------------------------------------===//
+// Clang Extensions.
+//===----------------------------------------------------------------------===//
+
+/// ActOnBlockStart - This callback is invoked when a block literal is started.
+void Sema::ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope) {
+ BlockDecl *Block = BlockDecl::Create(Context, CurContext, CaretLoc);
+ PushBlockScope(CurScope, Block);
+ CurContext->addDecl(Block);
+ if (CurScope)
+ PushDeclContext(CurScope, Block);
+ else
+ CurContext = Block;
+
+ getCurBlock()->HasImplicitReturnType = true;
+
+ // Enter a new evaluation context to insulate the block from any
+ // cleanups from the enclosing full-expression.
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
+}
+
+void Sema::ActOnBlockArguments(Declarator &ParamInfo, Scope *CurScope) {
+ assert(ParamInfo.getIdentifier()==0 && "block-id should have no identifier!");
+ assert(ParamInfo.getContext() == Declarator::BlockLiteralContext);
+ BlockScopeInfo *CurBlock = getCurBlock();
+
+ TypeSourceInfo *Sig = GetTypeForDeclarator(ParamInfo, CurScope);
+ QualType T = Sig->getType();
+
+ // GetTypeForDeclarator always produces a function type for a block
+ // literal signature. Furthermore, it is always a FunctionProtoType
+ // unless the function was written with a typedef.
+ assert(T->isFunctionType() &&
+ "GetTypeForDeclarator made a non-function block signature");
+
+ // Look for an explicit signature in that function type.
+ FunctionProtoTypeLoc ExplicitSignature;
+
+ TypeLoc tmp = Sig->getTypeLoc().IgnoreParens();
+ if (isa<FunctionProtoTypeLoc>(tmp)) {
+ ExplicitSignature = cast<FunctionProtoTypeLoc>(tmp);
+
+ // Check whether that explicit signature was synthesized by
+ // GetTypeForDeclarator. If so, don't save that as part of the
+ // written signature.
+ if (ExplicitSignature.getLocalRangeBegin() ==
+ ExplicitSignature.getLocalRangeEnd()) {
+ // This would be much cheaper if we stored TypeLocs instead of
+ // TypeSourceInfos.
+ TypeLoc Result = ExplicitSignature.getResultLoc();
+ unsigned Size = Result.getFullDataSize();
+ Sig = Context.CreateTypeSourceInfo(Result.getType(), Size);
+ Sig->getTypeLoc().initializeFullCopy(Result, Size);
+
+ ExplicitSignature = FunctionProtoTypeLoc();
+ }
+ }
+
+ CurBlock->TheDecl->setSignatureAsWritten(Sig);
+ CurBlock->FunctionType = T;
+
+ const FunctionType *Fn = T->getAs<FunctionType>();
+ QualType RetTy = Fn->getResultType();
+ bool isVariadic =
+ (isa<FunctionProtoType>(Fn) && cast<FunctionProtoType>(Fn)->isVariadic());
+
+ CurBlock->TheDecl->setIsVariadic(isVariadic);
+
+ // Don't allow returning a objc interface by value.
+ if (RetTy->isObjCObjectType()) {
+ Diag(ParamInfo.getLocStart(),
+ diag::err_object_cannot_be_passed_returned_by_value) << 0 << RetTy;
+ return;
+ }
+
+ // Context.DependentTy is used as a placeholder for a missing block
+ // return type. TODO: what should we do with declarators like:
+ // ^ * { ... }
+ // If the answer is "apply template argument deduction"....
+ if (RetTy != Context.DependentTy) {
+ CurBlock->ReturnType = RetTy;
+ CurBlock->TheDecl->setBlockMissingReturnType(false);
+ CurBlock->HasImplicitReturnType = false;
+ }
+
+ // Push block parameters from the declarator if we had them.
+ SmallVector<ParmVarDecl*, 8> Params;
+ if (ExplicitSignature) {
+ for (unsigned I = 0, E = ExplicitSignature.getNumArgs(); I != E; ++I) {
+ ParmVarDecl *Param = ExplicitSignature.getArg(I);
+ if (Param->getIdentifier() == 0 &&
+ !Param->isImplicit() &&
+ !Param->isInvalidDecl() &&
+ !getLangOpts().CPlusPlus)
+ Diag(Param->getLocation(), diag::err_parameter_name_omitted);
+ Params.push_back(Param);
+ }
+
+ // Fake up parameter variables if we have a typedef, like
+ // ^ fntype { ... }
+ } else if (const FunctionProtoType *Fn = T->getAs<FunctionProtoType>()) {
+ for (FunctionProtoType::arg_type_iterator
+ I = Fn->arg_type_begin(), E = Fn->arg_type_end(); I != E; ++I) {
+ ParmVarDecl *Param =
+ BuildParmVarDeclForTypedef(CurBlock->TheDecl,
+ ParamInfo.getLocStart(),
+ *I);
+ Params.push_back(Param);
+ }
+ }
+
+ // Set the parameters on the block decl.
+ if (!Params.empty()) {
+ CurBlock->TheDecl->setParams(Params);
+ CheckParmsForFunctionDef(CurBlock->TheDecl->param_begin(),
+ CurBlock->TheDecl->param_end(),
+ /*CheckParameterNames=*/false);
+ }
+
+ // Finally we can process decl attributes.
+ ProcessDeclAttributes(CurScope, CurBlock->TheDecl, ParamInfo);
+
+ // Put the parameter variables in scope. We can bail out immediately
+ // if we don't have any.
+ if (Params.empty())
+ return;
+
+ for (BlockDecl::param_iterator AI = CurBlock->TheDecl->param_begin(),
+ E = CurBlock->TheDecl->param_end(); AI != E; ++AI) {
+ (*AI)->setOwningFunction(CurBlock->TheDecl);
+
+ // If this has an identifier, add it to the scope stack.
+ if ((*AI)->getIdentifier()) {
+ CheckShadow(CurBlock->TheScope, *AI);
+
+ PushOnScopeChains(*AI, CurBlock->TheScope);
+ }
+ }
+}
+
+/// ActOnBlockError - If there is an error parsing a block, this callback
+/// is invoked to pop the information about the block from the action impl.
+void Sema::ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope) {
+ // Leave the expression-evaluation context.
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+
+ // Pop off CurBlock, handle nested blocks.
+ PopDeclContext();
+ PopFunctionScopeInfo();
+}
+
+/// ActOnBlockStmtExpr - This is called when the body of a block statement
+/// literal was successfully completed. ^(int x){...}
+ExprResult Sema::ActOnBlockStmtExpr(SourceLocation CaretLoc,
+ Stmt *Body, Scope *CurScope) {
+ // If blocks are disabled, emit an error.
+ if (!LangOpts.Blocks)
+ Diag(CaretLoc, diag::err_blocks_disable);
+
+ // Leave the expression-evaluation context.
+ if (hasAnyUnrecoverableErrorsInThisFunction())
+ DiscardCleanupsInEvaluationContext();
+ assert(!ExprNeedsCleanups && "cleanups within block not correctly bound!");
+ PopExpressionEvaluationContext();
+
+ BlockScopeInfo *BSI = cast<BlockScopeInfo>(FunctionScopes.back());
+
+ PopDeclContext();
+
+ QualType RetTy = Context.VoidTy;
+ if (!BSI->ReturnType.isNull())
+ RetTy = BSI->ReturnType;
+
+ bool NoReturn = BSI->TheDecl->getAttr<NoReturnAttr>();
+ QualType BlockTy;
+
+ // Set the captured variables on the block.
+ // FIXME: Share capture structure between BlockDecl and CapturingScopeInfo!
+ SmallVector<BlockDecl::Capture, 4> Captures;
+ for (unsigned i = 0, e = BSI->Captures.size(); i != e; i++) {
+ CapturingScopeInfo::Capture &Cap = BSI->Captures[i];
+ if (Cap.isThisCapture())
+ continue;
+ BlockDecl::Capture NewCap(Cap.getVariable(), Cap.isBlockCapture(),
+ Cap.isNested(), Cap.getCopyExpr());
+ Captures.push_back(NewCap);
+ }
+ BSI->TheDecl->setCaptures(Context, Captures.begin(), Captures.end(),
+ BSI->CXXThisCaptureIndex != 0);
+
+ // If the user wrote a function type in some form, try to use that.
+ if (!BSI->FunctionType.isNull()) {
+ const FunctionType *FTy = BSI->FunctionType->getAs<FunctionType>();
+
+ FunctionType::ExtInfo Ext = FTy->getExtInfo();
+ if (NoReturn && !Ext.getNoReturn()) Ext = Ext.withNoReturn(true);
+
+ // Turn protoless block types into nullary block types.
+ if (isa<FunctionNoProtoType>(FTy)) {
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = Ext;
+ BlockTy = Context.getFunctionType(RetTy, 0, 0, EPI);
+
+ // Otherwise, if we don't need to change anything about the function type,
+ // preserve its sugar structure.
+ } else if (FTy->getResultType() == RetTy &&
+ (!NoReturn || FTy->getNoReturnAttr())) {
+ BlockTy = BSI->FunctionType;
+
+ // Otherwise, make the minimal modifications to the function type.
+ } else {
+ const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
+ FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
+ EPI.TypeQuals = 0; // FIXME: silently?
+ EPI.ExtInfo = Ext;
+ BlockTy = Context.getFunctionType(RetTy,
+ FPT->arg_type_begin(),
+ FPT->getNumArgs(),
+ EPI);
+ }
+
+ // If we don't have a function type, just build one from nothing.
+ } else {
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = FunctionType::ExtInfo().withNoReturn(NoReturn);
+ BlockTy = Context.getFunctionType(RetTy, 0, 0, EPI);
+ }
+
+ DiagnoseUnusedParameters(BSI->TheDecl->param_begin(),
+ BSI->TheDecl->param_end());
+ BlockTy = Context.getBlockPointerType(BlockTy);
+
+ // If needed, diagnose invalid gotos and switches in the block.
+ if (getCurFunction()->NeedsScopeChecking() &&
+ !hasAnyUnrecoverableErrorsInThisFunction())
+ DiagnoseInvalidJumps(cast<CompoundStmt>(Body));
+
+ BSI->TheDecl->setBody(cast<CompoundStmt>(Body));
+
+ computeNRVO(Body, getCurBlock());
+
+ BlockExpr *Result = new (Context) BlockExpr(BSI->TheDecl, BlockTy);
+ const AnalysisBasedWarnings::Policy &WP = AnalysisWarnings.getDefaultPolicy();
+ PopFunctionScopeInfo(&WP, Result->getBlockDecl(), Result);
+
+ // If the block isn't obviously global, i.e. it captures anything at
+ // all, then we need to do a few things in the surrounding context:
+ if (Result->getBlockDecl()->hasCaptures()) {
+ // First, this expression has a new cleanup object.
+ ExprCleanupObjects.push_back(Result->getBlockDecl());
+ ExprNeedsCleanups = true;
+
+ // It also gets a branch-protected scope if any of the captured
+ // variables needs destruction.
+ for (BlockDecl::capture_const_iterator
+ ci = Result->getBlockDecl()->capture_begin(),
+ ce = Result->getBlockDecl()->capture_end(); ci != ce; ++ci) {
+ const VarDecl *var = ci->getVariable();
+ if (var->getType().isDestructedType() != QualType::DK_none) {
+ getCurFunction()->setHasBranchProtectedScope();
+ break;
+ }
+ }
+ }
+
+ return Owned(Result);
+}
+
+ExprResult Sema::ActOnVAArg(SourceLocation BuiltinLoc,
+ Expr *E, ParsedType Ty,
+ SourceLocation RPLoc) {
+ TypeSourceInfo *TInfo;
+ GetTypeFromParser(Ty, &TInfo);
+ return BuildVAArgExpr(BuiltinLoc, E, TInfo, RPLoc);
+}
+
+ExprResult Sema::BuildVAArgExpr(SourceLocation BuiltinLoc,
+ Expr *E, TypeSourceInfo *TInfo,
+ SourceLocation RPLoc) {
+ Expr *OrigExpr = E;
+
+ // Get the va_list type
+ QualType VaListType = Context.getBuiltinVaListType();
+ if (VaListType->isArrayType()) {
+ // Deal with implicit array decay; for example, on x86-64,
+ // va_list is an array, but it's supposed to decay to
+ // a pointer for va_arg.
+ VaListType = Context.getArrayDecayedType(VaListType);
+ // Make sure the input expression also decays appropriately.
+ ExprResult Result = UsualUnaryConversions(E);
+ if (Result.isInvalid())
+ return ExprError();
+ E = Result.take();
+ } else {
+ // Otherwise, the va_list argument must be an l-value because
+ // it is modified by va_arg.
+ if (!E->isTypeDependent() &&
+ CheckForModifiableLvalue(E, BuiltinLoc, *this))
+ return ExprError();
+ }
+
+ if (!E->isTypeDependent() &&
+ !Context.hasSameType(VaListType, E->getType())) {
+ return ExprError(Diag(E->getLocStart(),
+ diag::err_first_argument_to_va_arg_not_of_type_va_list)
+ << OrigExpr->getType() << E->getSourceRange());
+ }
+
+ if (!TInfo->getType()->isDependentType()) {
+ if (RequireCompleteType(TInfo->getTypeLoc().getBeginLoc(), TInfo->getType(),
+ PDiag(diag::err_second_parameter_to_va_arg_incomplete)
+ << TInfo->getTypeLoc().getSourceRange()))
+ return ExprError();
+
+ if (RequireNonAbstractType(TInfo->getTypeLoc().getBeginLoc(),
+ TInfo->getType(),
+ PDiag(diag::err_second_parameter_to_va_arg_abstract)
+ << TInfo->getTypeLoc().getSourceRange()))
+ return ExprError();
+
+ if (!TInfo->getType().isPODType(Context)) {
+ Diag(TInfo->getTypeLoc().getBeginLoc(),
+ TInfo->getType()->isObjCLifetimeType()
+ ? diag::warn_second_parameter_to_va_arg_ownership_qualified
+ : diag::warn_second_parameter_to_va_arg_not_pod)
+ << TInfo->getType()
+ << TInfo->getTypeLoc().getSourceRange();
+ }
+
+ // Check for va_arg where arguments of the given type will be promoted
+ // (i.e. this va_arg is guaranteed to have undefined behavior).
+ QualType PromoteType;
+ if (TInfo->getType()->isPromotableIntegerType()) {
+ PromoteType = Context.getPromotedIntegerType(TInfo->getType());
+ if (Context.typesAreCompatible(PromoteType, TInfo->getType()))
+ PromoteType = QualType();
+ }
+ if (TInfo->getType()->isSpecificBuiltinType(BuiltinType::Float))
+ PromoteType = Context.DoubleTy;
+ if (!PromoteType.isNull())
+ Diag(TInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_second_parameter_to_va_arg_never_compatible)
+ << TInfo->getType()
+ << PromoteType
+ << TInfo->getTypeLoc().getSourceRange();
+ }
+
+ QualType T = TInfo->getType().getNonLValueExprType(Context);
+ return Owned(new (Context) VAArgExpr(BuiltinLoc, E, TInfo, RPLoc, T));
+}
+
+ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
+ // The type of __null will be int or long, depending on the size of
+ // pointers on the target.
+ QualType Ty;
+ unsigned pw = Context.getTargetInfo().getPointerWidth(0);
+ if (pw == Context.getTargetInfo().getIntWidth())
+ Ty = Context.IntTy;
+ else if (pw == Context.getTargetInfo().getLongWidth())
+ Ty = Context.LongTy;
+ else if (pw == Context.getTargetInfo().getLongLongWidth())
+ Ty = Context.LongLongTy;
+ else {
+ llvm_unreachable("I don't know size of pointer!");
+ }
+
+ return Owned(new (Context) GNUNullExpr(Ty, TokenLoc));
+}
+
+static void MakeObjCStringLiteralFixItHint(Sema& SemaRef, QualType DstType,
+ Expr *SrcExpr, FixItHint &Hint) {
+ if (!SemaRef.getLangOpts().ObjC1)
+ return;
+
+ const ObjCObjectPointerType *PT = DstType->getAs<ObjCObjectPointerType>();
+ if (!PT)
+ return;
+
+ // Check if the destination is of type 'id'.
+ if (!PT->isObjCIdType()) {
+ // Check if the destination is the 'NSString' interface.
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+ if (!ID || !ID->getIdentifier()->isStr("NSString"))
+ return;
+ }
+
+ // Ignore any parens, implicit casts (should only be
+ // array-to-pointer decays), and not-so-opaque values. The last is
+ // important for making this trigger for property assignments.
+ SrcExpr = SrcExpr->IgnoreParenImpCasts();
+ if (OpaqueValueExpr *OV = dyn_cast<OpaqueValueExpr>(SrcExpr))
+ if (OV->getSourceExpr())
+ SrcExpr = OV->getSourceExpr()->IgnoreParenImpCasts();
+
+ StringLiteral *SL = dyn_cast<StringLiteral>(SrcExpr);
+ if (!SL || !SL->isAscii())
+ return;
+
+ Hint = FixItHint::CreateInsertion(SL->getLocStart(), "@");
+}
+
+bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
+ SourceLocation Loc,
+ QualType DstType, QualType SrcType,
+ Expr *SrcExpr, AssignmentAction Action,
+ bool *Complained) {
+ if (Complained)
+ *Complained = false;
+
+ // Decode the result (notice that AST's are still created for extensions).
+ bool CheckInferredResultType = false;
+ bool isInvalid = false;
+ unsigned DiagKind = 0;
+ FixItHint Hint;
+ ConversionFixItGenerator ConvHints;
+ bool MayHaveConvFixit = false;
+ bool MayHaveFunctionDiff = false;
+
+ switch (ConvTy) {
+ case Compatible: return false;
+ case PointerToInt:
+ DiagKind = diag::ext_typecheck_convert_pointer_int;
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ break;
+ case IntToPointer:
+ DiagKind = diag::ext_typecheck_convert_int_pointer;
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ break;
+ case IncompatiblePointer:
+ MakeObjCStringLiteralFixItHint(*this, DstType, SrcExpr, Hint);
+ DiagKind = diag::ext_typecheck_convert_incompatible_pointer;
+ CheckInferredResultType = DstType->isObjCObjectPointerType() &&
+ SrcType->isObjCObjectPointerType();
+ if (Hint.isNull() && !CheckInferredResultType) {
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ }
+ MayHaveConvFixit = true;
+ break;
+ case IncompatiblePointerSign:
+ DiagKind = diag::ext_typecheck_convert_incompatible_pointer_sign;
+ break;
+ case FunctionVoidPointer:
+ DiagKind = diag::ext_typecheck_convert_pointer_void_func;
+ break;
+ case IncompatiblePointerDiscardsQualifiers: {
+ // Perform array-to-pointer decay if necessary.
+ if (SrcType->isArrayType()) SrcType = Context.getArrayDecayedType(SrcType);
+
+ Qualifiers lhq = SrcType->getPointeeType().getQualifiers();
+ Qualifiers rhq = DstType->getPointeeType().getQualifiers();
+ if (lhq.getAddressSpace() != rhq.getAddressSpace()) {
+ DiagKind = diag::err_typecheck_incompatible_address_space;
+ break;
+
+
+ } else if (lhq.getObjCLifetime() != rhq.getObjCLifetime()) {
+ DiagKind = diag::err_typecheck_incompatible_ownership;
+ break;
+ }
+
+ llvm_unreachable("unknown error case for discarding qualifiers!");
+ // fallthrough
+ }
+ case CompatiblePointerDiscardsQualifiers:
+ // If the qualifiers lost were because we were applying the
+ // (deprecated) C++ conversion from a string literal to a char*
+ // (or wchar_t*), then there was no error (C++ 4.2p2). FIXME:
+ // Ideally, this check would be performed in
+ // checkPointerTypesForAssignment. However, that would require a
+ // bit of refactoring (so that the second argument is an
+ // expression, rather than a type), which should be done as part
+ // of a larger effort to fix checkPointerTypesForAssignment for
+ // C++ semantics.
+ if (getLangOpts().CPlusPlus &&
+ IsStringLiteralToNonConstPointerConversion(SrcExpr, DstType))
+ return false;
+ DiagKind = diag::ext_typecheck_convert_discards_qualifiers;
+ break;
+ case IncompatibleNestedPointerQualifiers:
+ DiagKind = diag::ext_nested_pointer_qualifier_mismatch;
+ break;
+ case IntToBlockPointer:
+ DiagKind = diag::err_int_to_block_pointer;
+ break;
+ case IncompatibleBlockPointer:
+ DiagKind = diag::err_typecheck_convert_incompatible_block_pointer;
+ break;
+ case IncompatibleObjCQualifiedId:
+ // FIXME: Diagnose the problem in ObjCQualifiedIdTypesAreCompatible, since
+ // it can give a more specific diagnostic.
+ DiagKind = diag::warn_incompatible_qualified_id;
+ break;
+ case IncompatibleVectors:
+ DiagKind = diag::warn_incompatible_vectors;
+ break;
+ case IncompatibleObjCWeakRef:
+ DiagKind = diag::err_arc_weak_unavailable_assign;
+ break;
+ case Incompatible:
+ DiagKind = diag::err_typecheck_convert_incompatible;
+ ConvHints.tryToFixConversion(SrcExpr, SrcType, DstType, *this);
+ MayHaveConvFixit = true;
+ isInvalid = true;
+ MayHaveFunctionDiff = true;
+ break;
+ }
+
+ QualType FirstType, SecondType;
+ switch (Action) {
+ case AA_Assigning:
+ case AA_Initializing:
+ // The destination type comes first.
+ FirstType = DstType;
+ SecondType = SrcType;
+ break;
+
+ case AA_Returning:
+ case AA_Passing:
+ case AA_Converting:
+ case AA_Sending:
+ case AA_Casting:
+ // The source type comes first.
+ FirstType = SrcType;
+ SecondType = DstType;
+ break;
+ }
+
+ PartialDiagnostic FDiag = PDiag(DiagKind);
+ FDiag << FirstType << SecondType << Action << SrcExpr->getSourceRange();
+
+ // If we can fix the conversion, suggest the FixIts.
+ assert(ConvHints.isNull() || Hint.isNull());
+ if (!ConvHints.isNull()) {
+ for (std::vector<FixItHint>::iterator HI = ConvHints.Hints.begin(),
+ HE = ConvHints.Hints.end(); HI != HE; ++HI)
+ FDiag << *HI;
+ } else {
+ FDiag << Hint;
+ }
+ if (MayHaveConvFixit) { FDiag << (unsigned) (ConvHints.Kind); }
+
+ if (MayHaveFunctionDiff)
+ HandleFunctionTypeMismatch(FDiag, SecondType, FirstType);
+
+ Diag(Loc, FDiag);
+
+ if (SecondType == Context.OverloadTy)
+ NoteAllOverloadCandidates(OverloadExpr::find(SrcExpr).Expression,
+ FirstType);
+
+ if (CheckInferredResultType)
+ EmitRelatedResultTypeNote(SrcExpr);
+
+ if (Complained)
+ *Complained = true;
+ return isInvalid;
+}
+
+ExprResult Sema::VerifyIntegerConstantExpression(Expr *E,
+ llvm::APSInt *Result) {
+ return VerifyIntegerConstantExpression(E, Result,
+ PDiag(diag::err_expr_not_ice) << LangOpts.CPlusPlus);
+}
+
+ExprResult Sema::VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
+ PartialDiagnostic NotIceDiag,
+ bool AllowFold,
+ PartialDiagnostic FoldDiag) {
+ SourceLocation DiagLoc = E->getLocStart();
+
+ if (getLangOpts().CPlusPlus0x) {
+ // C++11 [expr.const]p5:
+ // If an expression of literal class type is used in a context where an
+ // integral constant expression is required, then that class type shall
+ // have a single non-explicit conversion function to an integral or
+ // unscoped enumeration type
+ ExprResult Converted;
+ if (NotIceDiag.getDiagID()) {
+ Converted = ConvertToIntegralOrEnumerationType(
+ DiagLoc, E,
+ PDiag(diag::err_ice_not_integral),
+ PDiag(diag::err_ice_incomplete_type),
+ PDiag(diag::err_ice_explicit_conversion),
+ PDiag(diag::note_ice_conversion_here),
+ PDiag(diag::err_ice_ambiguous_conversion),
+ PDiag(diag::note_ice_conversion_here),
+ PDiag(0),
+ /*AllowScopedEnumerations*/ false);
+ } else {
+ // The caller wants to silently enquire whether this is an ICE. Don't
+ // produce any diagnostics if it isn't.
+ Converted = ConvertToIntegralOrEnumerationType(
+ DiagLoc, E, PDiag(), PDiag(), PDiag(), PDiag(),
+ PDiag(), PDiag(), PDiag(), false);
+ }
+ if (Converted.isInvalid())
+ return Converted;
+ E = Converted.take();
+ if (!E->getType()->isIntegralOrUnscopedEnumerationType())
+ return ExprError();
+ } else if (!E->getType()->isIntegralOrUnscopedEnumerationType()) {
+ // An ICE must be of integral or unscoped enumeration type.
+ if (NotIceDiag.getDiagID())
+ Diag(DiagLoc, NotIceDiag) << E->getSourceRange();
+ return ExprError();
+ }
+
+ // Circumvent ICE checking in C++11 to avoid evaluating the expression twice
+ // in the non-ICE case.
+ if (!getLangOpts().CPlusPlus0x && E->isIntegerConstantExpr(Context)) {
+ if (Result)
+ *Result = E->EvaluateKnownConstInt(Context);
+ return Owned(E);
+ }
+
+ Expr::EvalResult EvalResult;
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ EvalResult.Diag = &Notes;
+
+ // Try to evaluate the expression, and produce diagnostics explaining why it's
+ // not a constant expression as a side-effect.
+ bool Folded = E->EvaluateAsRValue(EvalResult, Context) &&
+ EvalResult.Val.isInt() && !EvalResult.HasSideEffects;
+
+ // In C++11, we can rely on diagnostics being produced for any expression
+ // which is not a constant expression. If no diagnostics were produced, then
+ // this is a constant expression.
+ if (Folded && getLangOpts().CPlusPlus0x && Notes.empty()) {
+ if (Result)
+ *Result = EvalResult.Val.getInt();
+ return Owned(E);
+ }
+
+ // If our only note is the usual "invalid subexpression" note, just point
+ // the caret at its location rather than producing an essentially
+ // redundant note.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+
+ if (!Folded || !AllowFold) {
+ if (NotIceDiag.getDiagID()) {
+ Diag(DiagLoc, NotIceDiag) << E->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ Diag(Notes[I].first, Notes[I].second);
+ }
+
+ return ExprError();
+ }
+
+ if (FoldDiag.getDiagID())
+ Diag(DiagLoc, FoldDiag) << E->getSourceRange();
+ else
+ Diag(DiagLoc, diag::ext_expr_not_ice)
+ << E->getSourceRange() << LangOpts.CPlusPlus;
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ Diag(Notes[I].first, Notes[I].second);
+
+ if (Result)
+ *Result = EvalResult.Val.getInt();
+ return Owned(E);
+}
+
+namespace {
+ // Handle the case where we conclude a expression which we speculatively
+ // considered to be unevaluated is actually evaluated.
+ class TransformToPE : public TreeTransform<TransformToPE> {
+ typedef TreeTransform<TransformToPE> BaseTransform;
+
+ public:
+ TransformToPE(Sema &SemaRef) : BaseTransform(SemaRef) { }
+
+ // Make sure we redo semantic analysis
+ bool AlwaysRebuild() { return true; }
+
+ // Make sure we handle LabelStmts correctly.
+ // FIXME: This does the right thing, but maybe we need a more general
+ // fix to TreeTransform?
+ StmtResult TransformLabelStmt(LabelStmt *S) {
+ S->getDecl()->setStmt(0);
+ return BaseTransform::TransformLabelStmt(S);
+ }
+
+ // We need to special-case DeclRefExprs referring to FieldDecls which
+ // are not part of a member pointer formation; normal TreeTransforming
+ // doesn't catch this case because of the way we represent them in the AST.
+ // FIXME: This is a bit ugly; is it really the best way to handle this
+ // case?
+ //
+ // Error on DeclRefExprs referring to FieldDecls.
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E) {
+ if (isa<FieldDecl>(E->getDecl()) &&
+ SemaRef.ExprEvalContexts.back().Context != Sema::Unevaluated)
+ return SemaRef.Diag(E->getLocation(),
+ diag::err_invalid_non_static_member_use)
+ << E->getDecl() << E->getSourceRange();
+
+ return BaseTransform::TransformDeclRefExpr(E);
+ }
+
+ // Exception: filter out member pointer formation
+ ExprResult TransformUnaryOperator(UnaryOperator *E) {
+ if (E->getOpcode() == UO_AddrOf && E->getType()->isMemberPointerType())
+ return E;
+
+ return BaseTransform::TransformUnaryOperator(E);
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
+ };
+}
+
+ExprResult Sema::TranformToPotentiallyEvaluated(Expr *E) {
+ assert(ExprEvalContexts.back().Context == Unevaluated &&
+ "Should only transform unevaluated expressions");
+ ExprEvalContexts.back().Context =
+ ExprEvalContexts[ExprEvalContexts.size()-2].Context;
+ if (ExprEvalContexts.back().Context == Unevaluated)
+ return E;
+ return TransformToPE(*this).TransformExpr(E);
+}
+
+void
+Sema::PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext,
+ Decl *LambdaContextDecl,
+ bool IsDecltype) {
+ ExprEvalContexts.push_back(
+ ExpressionEvaluationContextRecord(NewContext,
+ ExprCleanupObjects.size(),
+ ExprNeedsCleanups,
+ LambdaContextDecl,
+ IsDecltype));
+ ExprNeedsCleanups = false;
+ if (!MaybeODRUseExprs.empty())
+ std::swap(MaybeODRUseExprs, ExprEvalContexts.back().SavedMaybeODRUseExprs);
+}
+
+void Sema::PopExpressionEvaluationContext() {
+ ExpressionEvaluationContextRecord& Rec = ExprEvalContexts.back();
+
+ if (!Rec.Lambdas.empty()) {
+ if (Rec.Context == Unevaluated) {
+ // C++11 [expr.prim.lambda]p2:
+ // A lambda-expression shall not appear in an unevaluated operand
+ // (Clause 5).
+ for (unsigned I = 0, N = Rec.Lambdas.size(); I != N; ++I)
+ Diag(Rec.Lambdas[I]->getLocStart(),
+ diag::err_lambda_unevaluated_operand);
+ } else {
+ // Mark the capture expressions odr-used. This was deferred
+ // during lambda expression creation.
+ for (unsigned I = 0, N = Rec.Lambdas.size(); I != N; ++I) {
+ LambdaExpr *Lambda = Rec.Lambdas[I];
+ for (LambdaExpr::capture_init_iterator
+ C = Lambda->capture_init_begin(),
+ CEnd = Lambda->capture_init_end();
+ C != CEnd; ++C) {
+ MarkDeclarationsReferencedInExpr(*C);
+ }
+ }
+ }
+ }
+
+ // When are coming out of an unevaluated context, clear out any
+ // temporaries that we may have created as part of the evaluation of
+ // the expression in that context: they aren't relevant because they
+ // will never be constructed.
+ if (Rec.Context == Unevaluated || Rec.Context == ConstantEvaluated) {
+ ExprCleanupObjects.erase(ExprCleanupObjects.begin() + Rec.NumCleanupObjects,
+ ExprCleanupObjects.end());
+ ExprNeedsCleanups = Rec.ParentNeedsCleanups;
+ CleanupVarDeclMarking();
+ std::swap(MaybeODRUseExprs, Rec.SavedMaybeODRUseExprs);
+ // Otherwise, merge the contexts together.
+ } else {
+ ExprNeedsCleanups |= Rec.ParentNeedsCleanups;
+ MaybeODRUseExprs.insert(Rec.SavedMaybeODRUseExprs.begin(),
+ Rec.SavedMaybeODRUseExprs.end());
+ }
+
+ // Pop the current expression evaluation context off the stack.
+ ExprEvalContexts.pop_back();
+}
+
+void Sema::DiscardCleanupsInEvaluationContext() {
+ ExprCleanupObjects.erase(
+ ExprCleanupObjects.begin() + ExprEvalContexts.back().NumCleanupObjects,
+ ExprCleanupObjects.end());
+ ExprNeedsCleanups = false;
+ MaybeODRUseExprs.clear();
+}
+
+ExprResult Sema::HandleExprEvaluationContextForTypeof(Expr *E) {
+ if (!E->getType()->isVariablyModifiedType())
+ return E;
+ return TranformToPotentiallyEvaluated(E);
+}
+
+static bool IsPotentiallyEvaluatedContext(Sema &SemaRef) {
+ // Do not mark anything as "used" within a dependent context; wait for
+ // an instantiation.
+ if (SemaRef.CurContext->isDependentContext())
+ return false;
+
+ switch (SemaRef.ExprEvalContexts.back().Context) {
+ case Sema::Unevaluated:
+ // We are in an expression that is not potentially evaluated; do nothing.
+ // (Depending on how you read the standard, we actually do need to do
+ // something here for null pointer constants, but the standard's
+ // definition of a null pointer constant is completely crazy.)
+ return false;
+
+ case Sema::ConstantEvaluated:
+ case Sema::PotentiallyEvaluated:
+ // We are in a potentially evaluated expression (or a constant-expression
+ // in C++03); we need to do implicit template instantiation, implicitly
+ // define class members, and mark most declarations as used.
+ return true;
+
+ case Sema::PotentiallyEvaluatedIfUsed:
+ // Referenced declarations will only be used if the construct in the
+ // containing expression is used.
+ return false;
+ }
+ llvm_unreachable("Invalid context");
+}
+
+/// \brief Mark a function referenced, and check whether it is odr-used
+/// (C++ [basic.def.odr]p2, C99 6.9p3)
+void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func) {
+ assert(Func && "No function?");
+
+ Func->setReferenced();
+
+ // Don't mark this function as used multiple times, unless it's a constexpr
+ // function which we need to instantiate.
+ if (Func->isUsed(false) &&
+ !(Func->isConstexpr() && !Func->getBody() &&
+ Func->isImplicitlyInstantiable()))
+ return;
+
+ if (!IsPotentiallyEvaluatedContext(*this))
+ return;
+
+ // Note that this declaration has been used.
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Func)) {
+ if (Constructor->isDefaulted() && !Constructor->isDeleted()) {
+ if (Constructor->isDefaultConstructor()) {
+ if (Constructor->isTrivial())
+ return;
+ if (!Constructor->isUsed(false))
+ DefineImplicitDefaultConstructor(Loc, Constructor);
+ } else if (Constructor->isCopyConstructor()) {
+ if (!Constructor->isUsed(false))
+ DefineImplicitCopyConstructor(Loc, Constructor);
+ } else if (Constructor->isMoveConstructor()) {
+ if (!Constructor->isUsed(false))
+ DefineImplicitMoveConstructor(Loc, Constructor);
+ }
+ }
+
+ MarkVTableUsed(Loc, Constructor->getParent());
+ } else if (CXXDestructorDecl *Destructor =
+ dyn_cast<CXXDestructorDecl>(Func)) {
+ if (Destructor->isDefaulted() && !Destructor->isDeleted() &&
+ !Destructor->isUsed(false))
+ DefineImplicitDestructor(Loc, Destructor);
+ if (Destructor->isVirtual())
+ MarkVTableUsed(Loc, Destructor->getParent());
+ } else if (CXXMethodDecl *MethodDecl = dyn_cast<CXXMethodDecl>(Func)) {
+ if (MethodDecl->isDefaulted() && !MethodDecl->isDeleted() &&
+ MethodDecl->isOverloadedOperator() &&
+ MethodDecl->getOverloadedOperator() == OO_Equal) {
+ if (!MethodDecl->isUsed(false)) {
+ if (MethodDecl->isCopyAssignmentOperator())
+ DefineImplicitCopyAssignment(Loc, MethodDecl);
+ else
+ DefineImplicitMoveAssignment(Loc, MethodDecl);
+ }
+ } else if (isa<CXXConversionDecl>(MethodDecl) &&
+ MethodDecl->getParent()->isLambda()) {
+ CXXConversionDecl *Conversion = cast<CXXConversionDecl>(MethodDecl);
+ if (Conversion->isLambdaToBlockPointerConversion())
+ DefineImplicitLambdaToBlockPointerConversion(Loc, Conversion);
+ else
+ DefineImplicitLambdaToFunctionPointerConversion(Loc, Conversion);
+ } else if (MethodDecl->isVirtual())
+ MarkVTableUsed(Loc, MethodDecl->getParent());
+ }
+
+ // Recursive functions should be marked when used from another function.
+ // FIXME: Is this really right?
+ if (CurContext == Func) return;
+
+ // Implicit instantiation of function templates and member functions of
+ // class templates.
+ if (Func->isImplicitlyInstantiable()) {
+ bool AlreadyInstantiated = false;
+ SourceLocation PointOfInstantiation = Loc;
+ if (FunctionTemplateSpecializationInfo *SpecInfo
+ = Func->getTemplateSpecializationInfo()) {
+ if (SpecInfo->getPointOfInstantiation().isInvalid())
+ SpecInfo->setPointOfInstantiation(Loc);
+ else if (SpecInfo->getTemplateSpecializationKind()
+ == TSK_ImplicitInstantiation) {
+ AlreadyInstantiated = true;
+ PointOfInstantiation = SpecInfo->getPointOfInstantiation();
+ }
+ } else if (MemberSpecializationInfo *MSInfo
+ = Func->getMemberSpecializationInfo()) {
+ if (MSInfo->getPointOfInstantiation().isInvalid())
+ MSInfo->setPointOfInstantiation(Loc);
+ else if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ImplicitInstantiation) {
+ AlreadyInstantiated = true;
+ PointOfInstantiation = MSInfo->getPointOfInstantiation();
+ }
+ }
+
+ if (!AlreadyInstantiated || Func->isConstexpr()) {
+ if (isa<CXXRecordDecl>(Func->getDeclContext()) &&
+ cast<CXXRecordDecl>(Func->getDeclContext())->isLocalClass())
+ PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Func, PointOfInstantiation));
+ else if (Func->isConstexpr())
+ // Do not defer instantiations of constexpr functions, to avoid the
+ // expression evaluator needing to call back into Sema if it sees a
+ // call to such a function.
+ InstantiateFunctionDefinition(PointOfInstantiation, Func);
+ else {
+ PendingInstantiations.push_back(std::make_pair(Func,
+ PointOfInstantiation));
+ // Notify the consumer that a function was implicitly instantiated.
+ Consumer.HandleCXXImplicitFunctionInstantiation(Func);
+ }
+ }
+ } else {
+ // Walk redefinitions, as some of them may be instantiable.
+ for (FunctionDecl::redecl_iterator i(Func->redecls_begin()),
+ e(Func->redecls_end()); i != e; ++i) {
+ if (!i->isUsed(false) && i->isImplicitlyInstantiable())
+ MarkFunctionReferenced(Loc, *i);
+ }
+ }
+
+ // Keep track of used but undefined functions.
+ if (!Func->isPure() && !Func->hasBody() &&
+ Func->getLinkage() != ExternalLinkage) {
+ SourceLocation &old = UndefinedInternals[Func->getCanonicalDecl()];
+ if (old.isInvalid()) old = Loc;
+ }
+
+ Func->setUsed(true);
+}
+
+static void
+diagnoseUncapturableValueReference(Sema &S, SourceLocation loc,
+ VarDecl *var, DeclContext *DC) {
+ DeclContext *VarDC = var->getDeclContext();
+
+ // If the parameter still belongs to the translation unit, then
+ // we're actually just using one parameter in the declaration of
+ // the next.
+ if (isa<ParmVarDecl>(var) &&
+ isa<TranslationUnitDecl>(VarDC))
+ return;
+
+ // For C code, don't diagnose about capture if we're not actually in code
+ // right now; it's impossible to write a non-constant expression outside of
+ // function context, so we'll get other (more useful) diagnostics later.
+ //
+ // For C++, things get a bit more nasty... it would be nice to suppress this
+ // diagnostic for certain cases like using a local variable in an array bound
+ // for a member of a local class, but the correct predicate is not obvious.
+ if (!S.getLangOpts().CPlusPlus && !S.CurContext->isFunctionOrMethod())
+ return;
+
+ if (isa<CXXMethodDecl>(VarDC) &&
+ cast<CXXRecordDecl>(VarDC->getParent())->isLambda()) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_lambda)
+ << var->getIdentifier();
+ } else if (FunctionDecl *fn = dyn_cast<FunctionDecl>(VarDC)) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_function)
+ << var->getIdentifier() << fn->getDeclName();
+ } else if (isa<BlockDecl>(VarDC)) {
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_block)
+ << var->getIdentifier();
+ } else {
+ // FIXME: Is there any other context where a local variable can be
+ // declared?
+ S.Diag(loc, diag::err_reference_to_local_var_in_enclosing_context)
+ << var->getIdentifier();
+ }
+
+ S.Diag(var->getLocation(), diag::note_local_variable_declared_here)
+ << var->getIdentifier();
+
+ // FIXME: Add additional diagnostic info about class etc. which prevents
+ // capture.
+}
+
+/// \brief Capture the given variable in the given lambda expression.
+static ExprResult captureInLambda(Sema &S, LambdaScopeInfo *LSI,
+ VarDecl *Var, QualType FieldType,
+ QualType DeclRefType,
+ SourceLocation Loc) {
+ CXXRecordDecl *Lambda = LSI->Lambda;
+
+ // Build the non-static data member.
+ FieldDecl *Field
+ = FieldDecl::Create(S.Context, Lambda, Loc, Loc, 0, FieldType,
+ S.Context.getTrivialTypeSourceInfo(FieldType, Loc),
+ 0, false, false);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ Lambda->addDecl(Field);
+
+ // C++11 [expr.prim.lambda]p21:
+ // When the lambda-expression is evaluated, the entities that
+ // are captured by copy are used to direct-initialize each
+ // corresponding non-static data member of the resulting closure
+ // object. (For array members, the array elements are
+ // direct-initialized in increasing subscript order.) These
+ // initializations are performed in the (unspecified) order in
+ // which the non-static data members are declared.
+
+ // Introduce a new evaluation context for the initialization, so
+ // that temporaries introduced as part of the capture are retained
+ // to be re-"exported" from the lambda expression itself.
+ S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+
+ // C++ [expr.prim.labda]p12:
+ // An entity captured by a lambda-expression is odr-used (3.2) in
+ // the scope containing the lambda-expression.
+ Expr *Ref = new (S.Context) DeclRefExpr(Var, false, DeclRefType,
+ VK_LValue, Loc);
+ Var->setReferenced(true);
+ Var->setUsed(true);
+
+ // When the field has array type, create index variables for each
+ // dimension of the array. We use these index variables to subscript
+ // the source array, and other clients (e.g., CodeGen) will perform
+ // the necessary iteration with these index variables.
+ SmallVector<VarDecl *, 4> IndexVariables;
+ QualType BaseType = FieldType;
+ QualType SizeType = S.Context.getSizeType();
+ LSI->ArrayIndexStarts.push_back(LSI->ArrayIndexVars.size());
+ while (const ConstantArrayType *Array
+ = S.Context.getAsConstantArrayType(BaseType)) {
+ // Create the iteration variable for this array index.
+ IdentifierInfo *IterationVarName = 0;
+ {
+ SmallString<8> Str;
+ llvm::raw_svector_ostream OS(Str);
+ OS << "__i" << IndexVariables.size();
+ IterationVarName = &S.Context.Idents.get(OS.str());
+ }
+ VarDecl *IterationVar
+ = VarDecl::Create(S.Context, S.CurContext, Loc, Loc,
+ IterationVarName, SizeType,
+ S.Context.getTrivialTypeSourceInfo(SizeType, Loc),
+ SC_None, SC_None);
+ IndexVariables.push_back(IterationVar);
+ LSI->ArrayIndexVars.push_back(IterationVar);
+
+ // Create a reference to the iteration variable.
+ ExprResult IterationVarRef
+ = S.BuildDeclRefExpr(IterationVar, SizeType, VK_LValue, Loc);
+ assert(!IterationVarRef.isInvalid() &&
+ "Reference to invented variable cannot fail!");
+ IterationVarRef = S.DefaultLvalueConversion(IterationVarRef.take());
+ assert(!IterationVarRef.isInvalid() &&
+ "Conversion of invented variable cannot fail!");
+
+ // Subscript the array with this iteration variable.
+ ExprResult Subscript = S.CreateBuiltinArraySubscriptExpr(
+ Ref, Loc, IterationVarRef.take(), Loc);
+ if (Subscript.isInvalid()) {
+ S.CleanupVarDeclMarking();
+ S.DiscardCleanupsInEvaluationContext();
+ S.PopExpressionEvaluationContext();
+ return ExprError();
+ }
+
+ Ref = Subscript.take();
+ BaseType = Array->getElementType();
+ }
+
+ // Construct the entity that we will be initializing. For an array, this
+ // will be first element in the array, which may require several levels
+ // of array-subscript entities.
+ SmallVector<InitializedEntity, 4> Entities;
+ Entities.reserve(1 + IndexVariables.size());
+ Entities.push_back(
+ InitializedEntity::InitializeLambdaCapture(Var, Field, Loc));
+ for (unsigned I = 0, N = IndexVariables.size(); I != N; ++I)
+ Entities.push_back(InitializedEntity::InitializeElement(S.Context,
+ 0,
+ Entities.back()));
+
+ InitializationKind InitKind
+ = InitializationKind::CreateDirect(Loc, Loc, Loc);
+ InitializationSequence Init(S, Entities.back(), InitKind, &Ref, 1);
+ ExprResult Result(true);
+ if (!Init.Diagnose(S, Entities.back(), InitKind, &Ref, 1))
+ Result = Init.Perform(S, Entities.back(), InitKind,
+ MultiExprArg(S, &Ref, 1));
+
+ // If this initialization requires any cleanups (e.g., due to a
+ // default argument to a copy constructor), note that for the
+ // lambda.
+ if (S.ExprNeedsCleanups)
+ LSI->ExprNeedsCleanups = true;
+
+ // Exit the expression evaluation context used for the capture.
+ S.CleanupVarDeclMarking();
+ S.DiscardCleanupsInEvaluationContext();
+ S.PopExpressionEvaluationContext();
+ return Result;
+}
+
+bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+ TryCaptureKind Kind, SourceLocation EllipsisLoc,
+ bool BuildAndDiagnose,
+ QualType &CaptureType,
+ QualType &DeclRefType) {
+ bool Nested = false;
+
+ DeclContext *DC = CurContext;
+ if (Var->getDeclContext() == DC) return true;
+ if (!Var->hasLocalStorage()) return true;
+
+ bool HasBlocksAttr = Var->hasAttr<BlocksAttr>();
+
+ // Walk up the stack to determine whether we can capture the variable,
+ // performing the "simple" checks that don't depend on type. We stop when
+ // we've either hit the declared scope of the variable or find an existing
+ // capture of that variable.
+ CaptureType = Var->getType();
+ DeclRefType = CaptureType.getNonReferenceType();
+ bool Explicit = (Kind != TryCapture_Implicit);
+ unsigned FunctionScopesIndex = FunctionScopes.size() - 1;
+ do {
+ // Only block literals and lambda expressions can capture; other
+ // scopes don't work.
+ DeclContext *ParentDC;
+ if (isa<BlockDecl>(DC))
+ ParentDC = DC->getParent();
+ else if (isa<CXXMethodDecl>(DC) &&
+ cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
+ cast<CXXRecordDecl>(DC->getParent())->isLambda())
+ ParentDC = DC->getParent()->getParent();
+ else {
+ if (BuildAndDiagnose)
+ diagnoseUncapturableValueReference(*this, Loc, Var, DC);
+ return true;
+ }
+
+ CapturingScopeInfo *CSI =
+ cast<CapturingScopeInfo>(FunctionScopes[FunctionScopesIndex]);
+
+ // Check whether we've already captured it.
+ if (CSI->CaptureMap.count(Var)) {
+ // If we found a capture, any subcaptures are nested.
+ Nested = true;
+
+ // Retrieve the capture type for this variable.
+ CaptureType = CSI->getCapture(Var).getCaptureType();
+
+ // Compute the type of an expression that refers to this variable.
+ DeclRefType = CaptureType.getNonReferenceType();
+
+ const CapturingScopeInfo::Capture &Cap = CSI->getCapture(Var);
+ if (Cap.isCopyCapture() &&
+ !(isa<LambdaScopeInfo>(CSI) && cast<LambdaScopeInfo>(CSI)->Mutable))
+ DeclRefType.addConst();
+ break;
+ }
+
+ bool IsBlock = isa<BlockScopeInfo>(CSI);
+ bool IsLambda = !IsBlock;
+
+ // Lambdas are not allowed to capture unnamed variables
+ // (e.g. anonymous unions).
+ // FIXME: The C++11 rule don't actually state this explicitly, but I'm
+ // assuming that's the intent.
+ if (IsLambda && !Var->getDeclName()) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_lambda_capture_anonymous_var);
+ Diag(Var->getLocation(), diag::note_declared_at);
+ }
+ return true;
+ }
+
+ // Prohibit variably-modified types; they're difficult to deal with.
+ if (Var->getType()->isVariablyModifiedType()) {
+ if (BuildAndDiagnose) {
+ if (IsBlock)
+ Diag(Loc, diag::err_ref_vm_type);
+ else
+ Diag(Loc, diag::err_lambda_capture_vm_type) << Var->getDeclName();
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
+ }
+
+ // Lambdas are not allowed to capture __block variables; they don't
+ // support the expected semantics.
+ if (IsLambda && HasBlocksAttr) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_lambda_capture_block)
+ << Var->getDeclName();
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
+ }
+
+ if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_None && !Explicit) {
+ // No capture-default
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_lambda_impcap) << Var->getDeclName();
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ Diag(cast<LambdaScopeInfo>(CSI)->Lambda->getLocStart(),
+ diag::note_lambda_decl);
+ }
+ return true;
+ }
+
+ FunctionScopesIndex--;
+ DC = ParentDC;
+ Explicit = false;
+ } while (!Var->getDeclContext()->Equals(DC));
+
+ // Walk back down the scope stack, computing the type of the capture at
+ // each step, checking type-specific requirements, and adding captures if
+ // requested.
+ for (unsigned I = ++FunctionScopesIndex, N = FunctionScopes.size(); I != N;
+ ++I) {
+ CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[I]);
+
+ // Compute the type of the capture and of a reference to the capture within
+ // this scope.
+ if (isa<BlockScopeInfo>(CSI)) {
+ Expr *CopyExpr = 0;
+ bool ByRef = false;
+
+ // Blocks are not allowed to capture arrays.
+ if (CaptureType->isArrayType()) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_ref_array_type);
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
+ }
+
+ // Forbid the block-capture of autoreleasing variables.
+ if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_arc_autoreleasing_capture)
+ << /*block*/ 0;
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
+ }
+
+ if (HasBlocksAttr || CaptureType->isReferenceType()) {
+ // Block capture by reference does not change the capture or
+ // declaration reference types.
+ ByRef = true;
+ } else {
+ // Block capture by copy introduces 'const'.
+ CaptureType = CaptureType.getNonReferenceType().withConst();
+ DeclRefType = CaptureType;
+
+ if (getLangOpts().CPlusPlus && BuildAndDiagnose) {
+ if (const RecordType *Record = DeclRefType->getAs<RecordType>()) {
+ // The capture logic needs the destructor, so make sure we mark it.
+ // Usually this is unnecessary because most local variables have
+ // their destructors marked at declaration time, but parameters are
+ // an exception because it's technically only the call site that
+ // actually requires the destructor.
+ if (isa<ParmVarDecl>(Var))
+ FinalizeVarWithDestructor(Var, Record);
+
+ // According to the blocks spec, the capture of a variable from
+ // the stack requires a const copy constructor. This is not true
+ // of the copy/move done to move a __block variable to the heap.
+ Expr *DeclRef = new (Context) DeclRefExpr(Var, false,
+ DeclRefType.withConst(),
+ VK_LValue, Loc);
+ ExprResult Result
+ = PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(Var->getLocation(),
+ CaptureType, false),
+ Loc, Owned(DeclRef));
+
+ // Build a full-expression copy expression if initialization
+ // succeeded and used a non-trivial constructor. Recover from
+ // errors by pretending that the copy isn't necessary.
+ if (!Result.isInvalid() &&
+ !cast<CXXConstructExpr>(Result.get())->getConstructor()
+ ->isTrivial()) {
+ Result = MaybeCreateExprWithCleanups(Result);
+ CopyExpr = Result.take();
+ }
+ }
+ }
+ }
+
+ // Actually capture the variable.
+ if (BuildAndDiagnose)
+ CSI->addCapture(Var, HasBlocksAttr, ByRef, Nested, Loc,
+ SourceLocation(), CaptureType, CopyExpr);
+ Nested = true;
+ continue;
+ }
+
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CSI);
+
+ // Determine whether we are capturing by reference or by value.
+ bool ByRef = false;
+ if (I == N - 1 && Kind != TryCapture_Implicit) {
+ ByRef = (Kind == TryCapture_ExplicitByRef);
+ } else {
+ ByRef = (LSI->ImpCaptureStyle == LambdaScopeInfo::ImpCap_LambdaByref);
+ }
+
+ // Compute the type of the field that will capture this variable.
+ if (ByRef) {
+ // C++11 [expr.prim.lambda]p15:
+ // An entity is captured by reference if it is implicitly or
+ // explicitly captured but not captured by copy. It is
+ // unspecified whether additional unnamed non-static data
+ // members are declared in the closure type for entities
+ // captured by reference.
+ //
+ // FIXME: It is not clear whether we want to build an lvalue reference
+ // to the DeclRefType or to CaptureType.getNonReferenceType(). GCC appears
+ // to do the former, while EDG does the latter. Core issue 1249 will
+ // clarify, but for now we follow GCC because it's a more permissive and
+ // easily defensible position.
+ CaptureType = Context.getLValueReferenceType(DeclRefType);
+ } else {
+ // C++11 [expr.prim.lambda]p14:
+ // For each entity captured by copy, an unnamed non-static
+ // data member is declared in the closure type. The
+ // declaration order of these members is unspecified. The type
+ // of such a data member is the type of the corresponding
+ // captured entity if the entity is not a reference to an
+ // object, or the referenced type otherwise. [Note: If the
+ // captured entity is a reference to a function, the
+ // corresponding data member is also a reference to a
+ // function. - end note ]
+ if (const ReferenceType *RefType = CaptureType->getAs<ReferenceType>()){
+ if (!RefType->getPointeeType()->isFunctionType())
+ CaptureType = RefType->getPointeeType();
+ }
+
+ // Forbid the lambda copy-capture of autoreleasing variables.
+ if (CaptureType.getObjCLifetime() == Qualifiers::OCL_Autoreleasing) {
+ if (BuildAndDiagnose) {
+ Diag(Loc, diag::err_arc_autoreleasing_capture) << /*lambda*/ 1;
+ Diag(Var->getLocation(), diag::note_previous_decl)
+ << Var->getDeclName();
+ }
+ return true;
+ }
+ }
+
+ // Capture this variable in the lambda.
+ Expr *CopyExpr = 0;
+ if (BuildAndDiagnose) {
+ ExprResult Result = captureInLambda(*this, LSI, Var, CaptureType,
+ DeclRefType, Loc);
+ if (!Result.isInvalid())
+ CopyExpr = Result.take();
+ }
+
+ // Compute the type of a reference to this captured variable.
+ if (ByRef)
+ DeclRefType = CaptureType.getNonReferenceType();
+ else {
+ // C++ [expr.prim.lambda]p5:
+ // The closure type for a lambda-expression has a public inline
+ // function call operator [...]. This function call operator is
+ // declared const (9.3.1) if and only if the lambda-expression’s
+ // parameter-declaration-clause is not followed by mutable.
+ DeclRefType = CaptureType.getNonReferenceType();
+ if (!LSI->Mutable && !CaptureType->isReferenceType())
+ DeclRefType.addConst();
+ }
+
+ // Add the capture.
+ if (BuildAndDiagnose)
+ CSI->addCapture(Var, /*IsBlock=*/false, ByRef, Nested, Loc,
+ EllipsisLoc, CaptureType, CopyExpr);
+ Nested = true;
+ }
+
+ return false;
+}
+
+bool Sema::tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
+ TryCaptureKind Kind, SourceLocation EllipsisLoc) {
+ QualType CaptureType;
+ QualType DeclRefType;
+ return tryCaptureVariable(Var, Loc, Kind, EllipsisLoc,
+ /*BuildAndDiagnose=*/true, CaptureType,
+ DeclRefType);
+}
+
+QualType Sema::getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc) {
+ QualType CaptureType;
+ QualType DeclRefType;
+
+ // Determine whether we can capture this variable.
+ if (tryCaptureVariable(Var, Loc, TryCapture_Implicit, SourceLocation(),
+ /*BuildAndDiagnose=*/false, CaptureType, DeclRefType))
+ return QualType();
+
+ return DeclRefType;
+}
+
+static void MarkVarDeclODRUsed(Sema &SemaRef, VarDecl *Var,
+ SourceLocation Loc) {
+ // Keep track of used but undefined variables.
+ // FIXME: We shouldn't suppress this warning for static data members.
+ if (Var->hasDefinition(SemaRef.Context) == VarDecl::DeclarationOnly &&
+ Var->getLinkage() != ExternalLinkage &&
+ !(Var->isStaticDataMember() && Var->hasInit())) {
+ SourceLocation &old = SemaRef.UndefinedInternals[Var->getCanonicalDecl()];
+ if (old.isInvalid()) old = Loc;
+ }
+
+ SemaRef.tryCaptureVariable(Var, Loc);
+
+ Var->setUsed(true);
+}
+
+void Sema::UpdateMarkingForLValueToRValue(Expr *E) {
+ // Per C++11 [basic.def.odr], a variable is odr-used "unless it is
+ // an object that satisfies the requirements for appearing in a
+ // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1)
+ // is immediately applied." This function handles the lvalue-to-rvalue
+ // conversion part.
+ MaybeODRUseExprs.erase(E->IgnoreParens());
+}
+
+ExprResult Sema::ActOnConstantExpression(ExprResult Res) {
+ if (!Res.isUsable())
+ return Res;
+
+ // If a constant-expression is a reference to a variable where we delay
+ // deciding whether it is an odr-use, just assume we will apply the
+ // lvalue-to-rvalue conversion. In the one case where this doesn't happen
+ // (a non-type template argument), we have special handling anyway.
+ UpdateMarkingForLValueToRValue(Res.get());
+ return Res;
+}
+
+void Sema::CleanupVarDeclMarking() {
+ for (llvm::SmallPtrSetIterator<Expr*> i = MaybeODRUseExprs.begin(),
+ e = MaybeODRUseExprs.end();
+ i != e; ++i) {
+ VarDecl *Var;
+ SourceLocation Loc;
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(*i)) {
+ Var = cast<VarDecl>(DRE->getDecl());
+ Loc = DRE->getLocation();
+ } else if (MemberExpr *ME = dyn_cast<MemberExpr>(*i)) {
+ Var = cast<VarDecl>(ME->getMemberDecl());
+ Loc = ME->getMemberLoc();
+ } else {
+ llvm_unreachable("Unexpcted expression");
+ }
+
+ MarkVarDeclODRUsed(*this, Var, Loc);
+ }
+
+ MaybeODRUseExprs.clear();
+}
+
+// Mark a VarDecl referenced, and perform the necessary handling to compute
+// odr-uses.
+static void DoMarkVarDeclReferenced(Sema &SemaRef, SourceLocation Loc,
+ VarDecl *Var, Expr *E) {
+ Var->setReferenced();
+
+ if (!IsPotentiallyEvaluatedContext(SemaRef))
+ return;
+
+ // Implicit instantiation of static data members of class templates.
+ if (Var->isStaticDataMember() && Var->getInstantiatedFromStaticDataMember()) {
+ MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
+ assert(MSInfo && "Missing member specialization information?");
+ bool AlreadyInstantiated = !MSInfo->getPointOfInstantiation().isInvalid();
+ if (MSInfo->getTemplateSpecializationKind() == TSK_ImplicitInstantiation &&
+ (!AlreadyInstantiated ||
+ Var->isUsableInConstantExpressions(SemaRef.Context))) {
+ if (!AlreadyInstantiated) {
+ // This is a modification of an existing AST node. Notify listeners.
+ if (ASTMutationListener *L = SemaRef.getASTMutationListener())
+ L->StaticDataMemberInstantiated(Var);
+ MSInfo->setPointOfInstantiation(Loc);
+ }
+ SourceLocation PointOfInstantiation = MSInfo->getPointOfInstantiation();
+ if (Var->isUsableInConstantExpressions(SemaRef.Context))
+ // Do not defer instantiations of variables which could be used in a
+ // constant expression.
+ SemaRef.InstantiateStaticDataMemberDefinition(PointOfInstantiation,Var);
+ else
+ SemaRef.PendingInstantiations.push_back(
+ std::make_pair(Var, PointOfInstantiation));
+ }
+ }
+
+ // Per C++11 [basic.def.odr], a variable is odr-used "unless it is
+ // an object that satisfies the requirements for appearing in a
+ // constant expression (5.19) and the lvalue-to-rvalue conversion (4.1)
+ // is immediately applied." We check the first part here, and
+ // Sema::UpdateMarkingForLValueToRValue deals with the second part.
+ // Note that we use the C++11 definition everywhere because nothing in
+ // C++03 depends on whether we get the C++03 version correct. This does not
+ // apply to references, since they are not objects.
+ const VarDecl *DefVD;
+ if (E && !isa<ParmVarDecl>(Var) && !Var->getType()->isReferenceType() &&
+ Var->isUsableInConstantExpressions(SemaRef.Context) &&
+ Var->getAnyInitializer(DefVD) && DefVD->checkInitIsICE())
+ SemaRef.MaybeODRUseExprs.insert(E);
+ else
+ MarkVarDeclODRUsed(SemaRef, Var, Loc);
+}
+
+/// \brief Mark a variable referenced, and check whether it is odr-used
+/// (C++ [basic.def.odr]p2, C99 6.9p3). Note that this should not be
+/// used directly for normal expressions referring to VarDecl.
+void Sema::MarkVariableReferenced(SourceLocation Loc, VarDecl *Var) {
+ DoMarkVarDeclReferenced(*this, Loc, Var, 0);
+}
+
+static void MarkExprReferenced(Sema &SemaRef, SourceLocation Loc,
+ Decl *D, Expr *E) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
+ DoMarkVarDeclReferenced(SemaRef, Loc, Var, E);
+ return;
+ }
+
+ SemaRef.MarkAnyDeclReferenced(Loc, D);
+}
+
+/// \brief Perform reference-marking and odr-use handling for a DeclRefExpr.
+void Sema::MarkDeclRefReferenced(DeclRefExpr *E) {
+ MarkExprReferenced(*this, E->getLocation(), E->getDecl(), E);
+}
+
+/// \brief Perform reference-marking and odr-use handling for a MemberExpr.
+void Sema::MarkMemberReferenced(MemberExpr *E) {
+ MarkExprReferenced(*this, E->getMemberLoc(), E->getMemberDecl(), E);
+}
+
+/// \brief Perform marking for a reference to an arbitrary declaration. It
+/// marks the declaration referenced, and performs odr-use checking for functions
+/// and variables. This method should not be used when building an normal
+/// expression which refers to a variable.
+void Sema::MarkAnyDeclReferenced(SourceLocation Loc, Decl *D) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ MarkVariableReferenced(Loc, VD);
+ else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ MarkFunctionReferenced(Loc, FD);
+ else
+ D->setReferenced();
+}
+
+namespace {
+ // Mark all of the declarations referenced
+ // FIXME: Not fully implemented yet! We need to have a better understanding
+ // of when we're entering
+ class MarkReferencedDecls : public RecursiveASTVisitor<MarkReferencedDecls> {
+ Sema &S;
+ SourceLocation Loc;
+
+ public:
+ typedef RecursiveASTVisitor<MarkReferencedDecls> Inherited;
+
+ MarkReferencedDecls(Sema &S, SourceLocation Loc) : S(S), Loc(Loc) { }
+
+ bool TraverseTemplateArgument(const TemplateArgument &Arg);
+ bool TraverseRecordType(RecordType *T);
+ };
+}
+
+bool MarkReferencedDecls::TraverseTemplateArgument(
+ const TemplateArgument &Arg) {
+ if (Arg.getKind() == TemplateArgument::Declaration) {
+ if (Decl *D = Arg.getAsDecl())
+ S.MarkAnyDeclReferenced(Loc, D);
+ }
+
+ return Inherited::TraverseTemplateArgument(Arg);
+}
+
+bool MarkReferencedDecls::TraverseRecordType(RecordType *T) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(T->getDecl())) {
+ const TemplateArgumentList &Args = Spec->getTemplateArgs();
+ return TraverseTemplateArguments(Args.data(), Args.size());
+ }
+
+ return true;
+}
+
+void Sema::MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T) {
+ MarkReferencedDecls Marker(*this, Loc);
+ Marker.TraverseType(Context.getCanonicalType(T));
+}
+
+namespace {
+ /// \brief Helper class that marks all of the declarations referenced by
+ /// potentially-evaluated subexpressions as "referenced".
+ class EvaluatedExprMarker : public EvaluatedExprVisitor<EvaluatedExprMarker> {
+ Sema &S;
+ bool SkipLocalVariables;
+
+ public:
+ typedef EvaluatedExprVisitor<EvaluatedExprMarker> Inherited;
+
+ EvaluatedExprMarker(Sema &S, bool SkipLocalVariables)
+ : Inherited(S.Context), S(S), SkipLocalVariables(SkipLocalVariables) { }
+
+ void VisitDeclRefExpr(DeclRefExpr *E) {
+ // If we were asked not to visit local variables, don't.
+ if (SkipLocalVariables) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(E->getDecl()))
+ if (VD->hasLocalStorage())
+ return;
+ }
+
+ S.MarkDeclRefReferenced(E);
+ }
+
+ void VisitMemberExpr(MemberExpr *E) {
+ S.MarkMemberReferenced(E);
+ Inherited::VisitMemberExpr(E);
+ }
+
+ void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ S.MarkFunctionReferenced(E->getLocStart(),
+ const_cast<CXXDestructorDecl*>(E->getTemporary()->getDestructor()));
+ Visit(E->getSubExpr());
+ }
+
+ void VisitCXXNewExpr(CXXNewExpr *E) {
+ if (E->getOperatorNew())
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorNew());
+ if (E->getOperatorDelete())
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete());
+ Inherited::VisitCXXNewExpr(E);
+ }
+
+ void VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ if (E->getOperatorDelete())
+ S.MarkFunctionReferenced(E->getLocStart(), E->getOperatorDelete());
+ QualType Destroyed = S.Context.getBaseElementType(E->getDestroyedType());
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ S.MarkFunctionReferenced(E->getLocStart(),
+ S.LookupDestructor(Record));
+ }
+
+ Inherited::VisitCXXDeleteExpr(E);
+ }
+
+ void VisitCXXConstructExpr(CXXConstructExpr *E) {
+ S.MarkFunctionReferenced(E->getLocStart(), E->getConstructor());
+ Inherited::VisitCXXConstructExpr(E);
+ }
+
+ void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ Visit(E->getExpr());
+ }
+
+ void VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ Inherited::VisitImplicitCastExpr(E);
+
+ if (E->getCastKind() == CK_LValueToRValue)
+ S.UpdateMarkingForLValueToRValue(E->getSubExpr());
+ }
+ };
+}
+
+/// \brief Mark any declarations that appear within this expression or any
+/// potentially-evaluated subexpressions as "referenced".
+///
+/// \param SkipLocalVariables If true, don't mark local variables as
+/// 'referenced'.
+void Sema::MarkDeclarationsReferencedInExpr(Expr *E,
+ bool SkipLocalVariables) {
+ EvaluatedExprMarker(*this, SkipLocalVariables).Visit(E);
+}
+
+/// \brief Emit a diagnostic that describes an effect on the run-time behavior
+/// of the program being compiled.
+///
+/// This routine emits the given diagnostic when the code currently being
+/// type-checked is "potentially evaluated", meaning that there is a
+/// possibility that the code will actually be executable. Code in sizeof()
+/// expressions, code used only during overload resolution, etc., are not
+/// potentially evaluated. This routine will suppress such diagnostics or,
+/// in the absolutely nutty case of potentially potentially evaluated
+/// expressions (C++ typeid), queue the diagnostic to potentially emit it
+/// later.
+///
+/// This routine should be used for all diagnostics that describe the run-time
+/// behavior of a program, such as passing a non-POD value through an ellipsis.
+/// Failure to do so will likely result in spurious diagnostics or failures
+/// during overload resolution or within sizeof/alignof/typeof/typeid.
+bool Sema::DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
+ const PartialDiagnostic &PD) {
+ switch (ExprEvalContexts.back().Context) {
+ case Unevaluated:
+ // The argument will never be evaluated, so don't complain.
+ break;
+
+ case ConstantEvaluated:
+ // Relevant diagnostics should be produced by constant evaluation.
+ break;
+
+ case PotentiallyEvaluated:
+ case PotentiallyEvaluatedIfUsed:
+ if (Statement && getCurFunctionOrMethodDecl()) {
+ FunctionScopes.back()->PossiblyUnreachableDiags.
+ push_back(sema::PossiblyUnreachableDiag(PD, Loc, Statement));
+ }
+ else
+ Diag(Loc, PD);
+
+ return true;
+ }
+
+ return false;
+}
+
+bool Sema::CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
+ CallExpr *CE, FunctionDecl *FD) {
+ if (ReturnType->isVoidType() || !ReturnType->isIncompleteType())
+ return false;
+
+ // If we're inside a decltype's expression, don't check for a valid return
+ // type or construct temporaries until we know whether this is the last call.
+ if (ExprEvalContexts.back().IsDecltype) {
+ ExprEvalContexts.back().DelayedDecltypeCalls.push_back(CE);
+ return false;
+ }
+
+ PartialDiagnostic Note =
+ FD ? PDiag(diag::note_function_with_incomplete_return_type_declared_here)
+ << FD->getDeclName() : PDiag();
+ SourceLocation NoteLoc = FD ? FD->getLocation() : SourceLocation();
+
+ if (RequireCompleteType(Loc, ReturnType,
+ FD ?
+ PDiag(diag::err_call_function_incomplete_return)
+ << CE->getSourceRange() << FD->getDeclName() :
+ PDiag(diag::err_call_incomplete_return)
+ << CE->getSourceRange(),
+ std::make_pair(NoteLoc, Note)))
+ return true;
+
+ return false;
+}
+
+// Diagnose the s/=/==/ and s/\|=/!=/ typos. Note that adding parentheses
+// will prevent this condition from triggering, which is what we want.
+void Sema::DiagnoseAssignmentAsCondition(Expr *E) {
+ SourceLocation Loc;
+
+ unsigned diagnostic = diag::warn_condition_is_assignment;
+ bool IsOrAssign = false;
+
+ if (BinaryOperator *Op = dyn_cast<BinaryOperator>(E)) {
+ if (Op->getOpcode() != BO_Assign && Op->getOpcode() != BO_OrAssign)
+ return;
+
+ IsOrAssign = Op->getOpcode() == BO_OrAssign;
+
+ // Greylist some idioms by putting them into a warning subcategory.
+ if (ObjCMessageExpr *ME
+ = dyn_cast<ObjCMessageExpr>(Op->getRHS()->IgnoreParenCasts())) {
+ Selector Sel = ME->getSelector();
+
+ // self = [<foo> init...]
+ if (isSelfExpr(Op->getLHS()) && Sel.getNameForSlot(0).startswith("init"))
+ diagnostic = diag::warn_condition_is_idiomatic_assignment;
+
+ // <foo> = [<bar> nextObject]
+ else if (Sel.isUnarySelector() && Sel.getNameForSlot(0) == "nextObject")
+ diagnostic = diag::warn_condition_is_idiomatic_assignment;
+ }
+
+ Loc = Op->getOperatorLoc();
+ } else if (CXXOperatorCallExpr *Op = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (Op->getOperator() != OO_Equal && Op->getOperator() != OO_PipeEqual)
+ return;
+
+ IsOrAssign = Op->getOperator() == OO_PipeEqual;
+ Loc = Op->getOperatorLoc();
+ } else {
+ // Not an assignment.
+ return;
+ }
+
+ Diag(Loc, diagnostic) << E->getSourceRange();
+
+ SourceLocation Open = E->getLocStart();
+ SourceLocation Close = PP.getLocForEndOfToken(E->getSourceRange().getEnd());
+ Diag(Loc, diag::note_condition_assign_silence)
+ << FixItHint::CreateInsertion(Open, "(")
+ << FixItHint::CreateInsertion(Close, ")");
+
+ if (IsOrAssign)
+ Diag(Loc, diag::note_condition_or_assign_to_comparison)
+ << FixItHint::CreateReplacement(Loc, "!=");
+ else
+ Diag(Loc, diag::note_condition_assign_to_comparison)
+ << FixItHint::CreateReplacement(Loc, "==");
+}
+
+/// \brief Redundant parentheses over an equality comparison can indicate
+/// that the user intended an assignment used as condition.
+void Sema::DiagnoseEqualityWithExtraParens(ParenExpr *ParenE) {
+ // Don't warn if the parens came from a macro.
+ SourceLocation parenLoc = ParenE->getLocStart();
+ if (parenLoc.isInvalid() || parenLoc.isMacroID())
+ return;
+ // Don't warn for dependent expressions.
+ if (ParenE->isTypeDependent())
+ return;
+
+ Expr *E = ParenE->IgnoreParens();
+
+ if (BinaryOperator *opE = dyn_cast<BinaryOperator>(E))
+ if (opE->getOpcode() == BO_EQ &&
+ opE->getLHS()->IgnoreParenImpCasts()->isModifiableLvalue(Context)
+ == Expr::MLV_Valid) {
+ SourceLocation Loc = opE->getOperatorLoc();
+
+ Diag(Loc, diag::warn_equality_with_extra_parens) << E->getSourceRange();
+ SourceRange ParenERange = ParenE->getSourceRange();
+ Diag(Loc, diag::note_equality_comparison_silence)
+ << FixItHint::CreateRemoval(ParenERange.getBegin())
+ << FixItHint::CreateRemoval(ParenERange.getEnd());
+ Diag(Loc, diag::note_equality_comparison_to_assign)
+ << FixItHint::CreateReplacement(Loc, "=");
+ }
+}
+
+ExprResult Sema::CheckBooleanCondition(Expr *E, SourceLocation Loc) {
+ DiagnoseAssignmentAsCondition(E);
+ if (ParenExpr *parenE = dyn_cast<ParenExpr>(E))
+ DiagnoseEqualityWithExtraParens(parenE);
+
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return ExprError();
+ E = result.take();
+
+ if (!E->isTypeDependent()) {
+ if (getLangOpts().CPlusPlus)
+ return CheckCXXBooleanCondition(E); // C++ 6.4p4
+
+ ExprResult ERes = DefaultFunctionArrayLvalueConversion(E);
+ if (ERes.isInvalid())
+ return ExprError();
+ E = ERes.take();
+
+ QualType T = E->getType();
+ if (!T->isScalarType()) { // C99 6.8.4.1p1
+ Diag(Loc, diag::err_typecheck_statement_requires_scalar)
+ << T << E->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ return Owned(E);
+}
+
+ExprResult Sema::ActOnBooleanCondition(Scope *S, SourceLocation Loc,
+ Expr *SubExpr) {
+ if (!SubExpr)
+ return ExprError();
+
+ return CheckBooleanCondition(SubExpr, Loc);
+}
+
+namespace {
+ /// A visitor for rebuilding a call to an __unknown_any expression
+ /// to have an appropriate type.
+ struct RebuildUnknownAnyFunction
+ : StmtVisitor<RebuildUnknownAnyFunction, ExprResult> {
+
+ Sema &S;
+
+ RebuildUnknownAnyFunction(Sema &S) : S(S) {}
+
+ ExprResult VisitStmt(Stmt *S) {
+ llvm_unreachable("unexpected statement!");
+ }
+
+ ExprResult VisitExpr(Expr *E) {
+ S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_call)
+ << E->getSourceRange();
+ return ExprError();
+ }
+
+ /// Rebuild an expression which simply semantically wraps another
+ /// expression which it shares the type and value kind of.
+ template <class T> ExprResult rebuildSugarExpr(T *E) {
+ ExprResult SubResult = Visit(E->getSubExpr());
+ if (SubResult.isInvalid()) return ExprError();
+
+ Expr *SubExpr = SubResult.take();
+ E->setSubExpr(SubExpr);
+ E->setType(SubExpr->getType());
+ E->setValueKind(SubExpr->getValueKind());
+ assert(E->getObjectKind() == OK_Ordinary);
+ return E;
+ }
+
+ ExprResult VisitParenExpr(ParenExpr *E) {
+ return rebuildSugarExpr(E);
+ }
+
+ ExprResult VisitUnaryExtension(UnaryOperator *E) {
+ return rebuildSugarExpr(E);
+ }
+
+ ExprResult VisitUnaryAddrOf(UnaryOperator *E) {
+ ExprResult SubResult = Visit(E->getSubExpr());
+ if (SubResult.isInvalid()) return ExprError();
+
+ Expr *SubExpr = SubResult.take();
+ E->setSubExpr(SubExpr);
+ E->setType(S.Context.getPointerType(SubExpr->getType()));
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+ return E;
+ }
+
+ ExprResult resolveDecl(Expr *E, ValueDecl *VD) {
+ if (!isa<FunctionDecl>(VD)) return VisitExpr(E);
+
+ E->setType(VD->getType());
+
+ assert(E->getValueKind() == VK_RValue);
+ if (S.getLangOpts().CPlusPlus &&
+ !(isa<CXXMethodDecl>(VD) &&
+ cast<CXXMethodDecl>(VD)->isInstance()))
+ E->setValueKind(VK_LValue);
+
+ return E;
+ }
+
+ ExprResult VisitMemberExpr(MemberExpr *E) {
+ return resolveDecl(E, E->getMemberDecl());
+ }
+
+ ExprResult VisitDeclRefExpr(DeclRefExpr *E) {
+ return resolveDecl(E, E->getDecl());
+ }
+ };
+}
+
+/// Given a function expression of unknown-any type, try to rebuild it
+/// to have a function type.
+static ExprResult rebuildUnknownAnyFunction(Sema &S, Expr *FunctionExpr) {
+ ExprResult Result = RebuildUnknownAnyFunction(S).Visit(FunctionExpr);
+ if (Result.isInvalid()) return ExprError();
+ return S.DefaultFunctionArrayConversion(Result.take());
+}
+
+namespace {
+ /// A visitor for rebuilding an expression of type __unknown_anytype
+ /// into one which resolves the type directly on the referring
+ /// expression. Strict preservation of the original source
+ /// structure is not a goal.
+ struct RebuildUnknownAnyExpr
+ : StmtVisitor<RebuildUnknownAnyExpr, ExprResult> {
+
+ Sema &S;
+
+ /// The current destination type.
+ QualType DestType;
+
+ RebuildUnknownAnyExpr(Sema &S, QualType CastType)
+ : S(S), DestType(CastType) {}
+
+ ExprResult VisitStmt(Stmt *S) {
+ llvm_unreachable("unexpected statement!");
+ }
+
+ ExprResult VisitExpr(Expr *E) {
+ S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_expr)
+ << E->getSourceRange();
+ return ExprError();
+ }
+
+ ExprResult VisitCallExpr(CallExpr *E);
+ ExprResult VisitObjCMessageExpr(ObjCMessageExpr *E);
+
+ /// Rebuild an expression which simply semantically wraps another
+ /// expression which it shares the type and value kind of.
+ template <class T> ExprResult rebuildSugarExpr(T *E) {
+ ExprResult SubResult = Visit(E->getSubExpr());
+ if (SubResult.isInvalid()) return ExprError();
+ Expr *SubExpr = SubResult.take();
+ E->setSubExpr(SubExpr);
+ E->setType(SubExpr->getType());
+ E->setValueKind(SubExpr->getValueKind());
+ assert(E->getObjectKind() == OK_Ordinary);
+ return E;
+ }
+
+ ExprResult VisitParenExpr(ParenExpr *E) {
+ return rebuildSugarExpr(E);
+ }
+
+ ExprResult VisitUnaryExtension(UnaryOperator *E) {
+ return rebuildSugarExpr(E);
+ }
+
+ ExprResult VisitUnaryAddrOf(UnaryOperator *E) {
+ const PointerType *Ptr = DestType->getAs<PointerType>();
+ if (!Ptr) {
+ S.Diag(E->getOperatorLoc(), diag::err_unknown_any_addrof)
+ << E->getSourceRange();
+ return ExprError();
+ }
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+ E->setType(DestType);
+
+ // Build the sub-expression as if it were an object of the pointee type.
+ DestType = Ptr->getPointeeType();
+ ExprResult SubResult = Visit(E->getSubExpr());
+ if (SubResult.isInvalid()) return ExprError();
+ E->setSubExpr(SubResult.take());
+ return E;
+ }
+
+ ExprResult VisitImplicitCastExpr(ImplicitCastExpr *E);
+
+ ExprResult resolveDecl(Expr *E, ValueDecl *VD);
+
+ ExprResult VisitMemberExpr(MemberExpr *E) {
+ return resolveDecl(E, E->getMemberDecl());
+ }
+
+ ExprResult VisitDeclRefExpr(DeclRefExpr *E) {
+ return resolveDecl(E, E->getDecl());
+ }
+ };
+}
+
+/// Rebuilds a call expression which yielded __unknown_anytype.
+ExprResult RebuildUnknownAnyExpr::VisitCallExpr(CallExpr *E) {
+ Expr *CalleeExpr = E->getCallee();
+
+ enum FnKind {
+ FK_MemberFunction,
+ FK_FunctionPointer,
+ FK_BlockPointer
+ };
+
+ FnKind Kind;
+ QualType CalleeType = CalleeExpr->getType();
+ if (CalleeType == S.Context.BoundMemberTy) {
+ assert(isa<CXXMemberCallExpr>(E) || isa<CXXOperatorCallExpr>(E));
+ Kind = FK_MemberFunction;
+ CalleeType = Expr::findBoundMemberType(CalleeExpr);
+ } else if (const PointerType *Ptr = CalleeType->getAs<PointerType>()) {
+ CalleeType = Ptr->getPointeeType();
+ Kind = FK_FunctionPointer;
+ } else {
+ CalleeType = CalleeType->castAs<BlockPointerType>()->getPointeeType();
+ Kind = FK_BlockPointer;
+ }
+ const FunctionType *FnType = CalleeType->castAs<FunctionType>();
+
+ // Verify that this is a legal result type of a function.
+ if (DestType->isArrayType() || DestType->isFunctionType()) {
+ unsigned diagID = diag::err_func_returning_array_function;
+ if (Kind == FK_BlockPointer)
+ diagID = diag::err_block_returning_array_function;
+
+ S.Diag(E->getExprLoc(), diagID)
+ << DestType->isFunctionType() << DestType;
+ return ExprError();
+ }
+
+ // Otherwise, go ahead and set DestType as the call's result.
+ E->setType(DestType.getNonLValueExprType(S.Context));
+ E->setValueKind(Expr::getValueKindForType(DestType));
+ assert(E->getObjectKind() == OK_Ordinary);
+
+ // Rebuild the function type, replacing the result type with DestType.
+ if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FnType))
+ DestType = S.Context.getFunctionType(DestType,
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ Proto->getExtProtoInfo());
+ else
+ DestType = S.Context.getFunctionNoProtoType(DestType,
+ FnType->getExtInfo());
+
+ // Rebuild the appropriate pointer-to-function type.
+ switch (Kind) {
+ case FK_MemberFunction:
+ // Nothing to do.
+ break;
+
+ case FK_FunctionPointer:
+ DestType = S.Context.getPointerType(DestType);
+ break;
+
+ case FK_BlockPointer:
+ DestType = S.Context.getBlockPointerType(DestType);
+ break;
+ }
+
+ // Finally, we can recurse.
+ ExprResult CalleeResult = Visit(CalleeExpr);
+ if (!CalleeResult.isUsable()) return ExprError();
+ E->setCallee(CalleeResult.take());
+
+ // Bind a temporary if necessary.
+ return S.MaybeBindToTemporary(E);
+}
+
+ExprResult RebuildUnknownAnyExpr::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ // Verify that this is a legal result type of a call.
+ if (DestType->isArrayType() || DestType->isFunctionType()) {
+ S.Diag(E->getExprLoc(), diag::err_func_returning_array_function)
+ << DestType->isFunctionType() << DestType;
+ return ExprError();
+ }
+
+ // Rewrite the method result type if available.
+ if (ObjCMethodDecl *Method = E->getMethodDecl()) {
+ assert(Method->getResultType() == S.Context.UnknownAnyTy);
+ Method->setResultType(DestType);
+ }
+
+ // Change the type of the message.
+ E->setType(DestType.getNonReferenceType());
+ E->setValueKind(Expr::getValueKindForType(DestType));
+
+ return S.MaybeBindToTemporary(E);
+}
+
+ExprResult RebuildUnknownAnyExpr::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ // The only case we should ever see here is a function-to-pointer decay.
+ if (E->getCastKind() == CK_FunctionToPointerDecay) {
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+
+ E->setType(DestType);
+
+ // Rebuild the sub-expression as the pointee (function) type.
+ DestType = DestType->castAs<PointerType>()->getPointeeType();
+
+ ExprResult Result = Visit(E->getSubExpr());
+ if (!Result.isUsable()) return ExprError();
+
+ E->setSubExpr(Result.take());
+ return S.Owned(E);
+ } else if (E->getCastKind() == CK_LValueToRValue) {
+ assert(E->getValueKind() == VK_RValue);
+ assert(E->getObjectKind() == OK_Ordinary);
+
+ assert(isa<BlockPointerType>(E->getType()));
+
+ E->setType(DestType);
+
+ // The sub-expression has to be a lvalue reference, so rebuild it as such.
+ DestType = S.Context.getLValueReferenceType(DestType);
+
+ ExprResult Result = Visit(E->getSubExpr());
+ if (!Result.isUsable()) return ExprError();
+
+ E->setSubExpr(Result.take());
+ return S.Owned(E);
+ } else {
+ llvm_unreachable("Unhandled cast type!");
+ }
+}
+
+ExprResult RebuildUnknownAnyExpr::resolveDecl(Expr *E, ValueDecl *VD) {
+ ExprValueKind ValueKind = VK_LValue;
+ QualType Type = DestType;
+
+ // We know how to make this work for certain kinds of decls:
+
+ // - functions
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(VD)) {
+ if (const PointerType *Ptr = Type->getAs<PointerType>()) {
+ DestType = Ptr->getPointeeType();
+ ExprResult Result = resolveDecl(E, VD);
+ if (Result.isInvalid()) return ExprError();
+ return S.ImpCastExprToType(Result.take(), Type,
+ CK_FunctionToPointerDecay, VK_RValue);
+ }
+
+ if (!Type->isFunctionType()) {
+ S.Diag(E->getExprLoc(), diag::err_unknown_any_function)
+ << VD << E->getSourceRange();
+ return ExprError();
+ }
+
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
+ if (MD->isInstance()) {
+ ValueKind = VK_RValue;
+ Type = S.Context.BoundMemberTy;
+ }
+
+ // Function references aren't l-values in C.
+ if (!S.getLangOpts().CPlusPlus)
+ ValueKind = VK_RValue;
+
+ // - variables
+ } else if (isa<VarDecl>(VD)) {
+ if (const ReferenceType *RefTy = Type->getAs<ReferenceType>()) {
+ Type = RefTy->getPointeeType();
+ } else if (Type->isFunctionType()) {
+ S.Diag(E->getExprLoc(), diag::err_unknown_any_var_function_type)
+ << VD << E->getSourceRange();
+ return ExprError();
+ }
+
+ // - nothing else
+ } else {
+ S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_decl)
+ << VD << E->getSourceRange();
+ return ExprError();
+ }
+
+ VD->setType(DestType);
+ E->setType(Type);
+ E->setValueKind(ValueKind);
+ return S.Owned(E);
+}
+
+/// Check a cast of an unknown-any type. We intentionally only
+/// trigger this for C-style casts.
+ExprResult Sema::checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
+ Expr *CastExpr, CastKind &CastKind,
+ ExprValueKind &VK, CXXCastPath &Path) {
+ // Rewrite the casted expression from scratch.
+ ExprResult result = RebuildUnknownAnyExpr(*this, CastType).Visit(CastExpr);
+ if (!result.isUsable()) return ExprError();
+
+ CastExpr = result.take();
+ VK = CastExpr->getValueKind();
+ CastKind = CK_NoOp;
+
+ return CastExpr;
+}
+
+ExprResult Sema::forceUnknownAnyToType(Expr *E, QualType ToType) {
+ return RebuildUnknownAnyExpr(*this, ToType).Visit(E);
+}
+
+static ExprResult diagnoseUnknownAnyExpr(Sema &S, Expr *E) {
+ Expr *orig = E;
+ unsigned diagID = diag::err_uncasted_use_of_unknown_any;
+ while (true) {
+ E = E->IgnoreParenImpCasts();
+ if (CallExpr *call = dyn_cast<CallExpr>(E)) {
+ E = call->getCallee();
+ diagID = diag::err_uncasted_call_of_unknown_any;
+ } else {
+ break;
+ }
+ }
+
+ SourceLocation loc;
+ NamedDecl *d;
+ if (DeclRefExpr *ref = dyn_cast<DeclRefExpr>(E)) {
+ loc = ref->getLocation();
+ d = ref->getDecl();
+ } else if (MemberExpr *mem = dyn_cast<MemberExpr>(E)) {
+ loc = mem->getMemberLoc();
+ d = mem->getMemberDecl();
+ } else if (ObjCMessageExpr *msg = dyn_cast<ObjCMessageExpr>(E)) {
+ diagID = diag::err_uncasted_call_of_unknown_any;
+ loc = msg->getSelectorStartLoc();
+ d = msg->getMethodDecl();
+ if (!d) {
+ S.Diag(loc, diag::err_uncasted_send_to_unknown_any_method)
+ << static_cast<unsigned>(msg->isClassMessage()) << msg->getSelector()
+ << orig->getSourceRange();
+ return ExprError();
+ }
+ } else {
+ S.Diag(E->getExprLoc(), diag::err_unsupported_unknown_any_expr)
+ << E->getSourceRange();
+ return ExprError();
+ }
+
+ S.Diag(loc, diagID) << d << orig->getSourceRange();
+
+ // Never recoverable.
+ return ExprError();
+}
+
+/// Check for operands with placeholder types and complain if found.
+/// Returns true if there was an error and no recovery was possible.
+ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
+ const BuiltinType *placeholderType = E->getType()->getAsPlaceholderType();
+ if (!placeholderType) return Owned(E);
+
+ switch (placeholderType->getKind()) {
+
+ // Overloaded expressions.
+ case BuiltinType::Overload: {
+ // Try to resolve a single function template specialization.
+ // This is obligatory.
+ ExprResult result = Owned(E);
+ if (ResolveAndFixSingleFunctionTemplateSpecialization(result, false)) {
+ return result;
+
+ // If that failed, try to recover with a call.
+ } else {
+ tryToRecoverWithCall(result, PDiag(diag::err_ovl_unresolvable),
+ /*complain*/ true);
+ return result;
+ }
+ }
+
+ // Bound member functions.
+ case BuiltinType::BoundMember: {
+ ExprResult result = Owned(E);
+ tryToRecoverWithCall(result, PDiag(diag::err_bound_member_function),
+ /*complain*/ true);
+ return result;
+ }
+
+ // ARC unbridged casts.
+ case BuiltinType::ARCUnbridgedCast: {
+ Expr *realCast = stripARCUnbridgedCast(E);
+ diagnoseARCUnbridgedCast(realCast);
+ return Owned(realCast);
+ }
+
+ // Expressions of unknown type.
+ case BuiltinType::UnknownAny:
+ return diagnoseUnknownAnyExpr(*this, E);
+
+ // Pseudo-objects.
+ case BuiltinType::PseudoObject:
+ return checkPseudoObjectRValue(E);
+
+ // Everything else should be impossible.
+#define BUILTIN_TYPE(Id, SingletonId) \
+ case BuiltinType::Id:
+#define PLACEHOLDER_TYPE(Id, SingletonId)
+#include "clang/AST/BuiltinTypes.def"
+ break;
+ }
+
+ llvm_unreachable("invalid placeholder type!");
+}
+
+bool Sema::CheckCaseExpression(Expr *E) {
+ if (E->isTypeDependent())
+ return true;
+ if (E->isValueDependent() || E->isIntegerConstantExpr(Context))
+ return E->getType()->isIntegralOrEnumerationType();
+ return false;
+}
+
+/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
+ExprResult
+Sema::ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
+ assert((Kind == tok::kw___objc_yes || Kind == tok::kw___objc_no) &&
+ "Unknown Objective-C Boolean value!");
+ QualType ObjCBoolLiteralQT = Context.ObjCBuiltinBoolTy;
+ // signed char is the default type for boolean literals. Use 'BOOL'
+ // instead, if BOOL typedef is visible in its scope instead.
+ Decl *TD =
+ LookupSingleName(TUScope, &Context.Idents.get("BOOL"),
+ SourceLocation(), LookupOrdinaryName);
+ if (TypedefDecl *BoolTD = dyn_cast_or_null<TypedefDecl>(TD)) {
+ QualType QT = BoolTD->getUnderlyingType();
+ if (!QT->isIntegralOrUnscopedEnumerationType()) {
+ Diag(OpLoc, diag::warn_bool_for_boolean_literal) << QT;
+ Diag(BoolTD->getLocation(), diag::note_previous_declaration);
+ }
+ else
+ ObjCBoolLiteralQT = QT;
+ }
+
+ return Owned(new (Context) ObjCBoolLiteralExpr(Kind == tok::kw___objc_yes,
+ ObjCBoolLiteralQT, OpLoc));
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
new file mode 100644
index 0000000..31a8115
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprCXX.cpp
@@ -0,0 +1,5315 @@
+//===--- SemaExprCXX.cpp - Semantic Analysis for Expressions --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "TypeLocBuilder.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace sema;
+
+ParsedType Sema::getDestructorName(SourceLocation TildeLoc,
+ IdentifierInfo &II,
+ SourceLocation NameLoc,
+ Scope *S, CXXScopeSpec &SS,
+ ParsedType ObjectTypePtr,
+ bool EnteringContext) {
+ // Determine where to perform name lookup.
+
+ // FIXME: This area of the standard is very messy, and the current
+ // wording is rather unclear about which scopes we search for the
+ // destructor name; see core issues 399 and 555. Issue 399 in
+ // particular shows where the current description of destructor name
+ // lookup is completely out of line with existing practice, e.g.,
+ // this appears to be ill-formed:
+ //
+ // namespace N {
+ // template <typename T> struct S {
+ // ~S();
+ // };
+ // }
+ //
+ // void f(N::S<int>* s) {
+ // s->N::S<int>::~S();
+ // }
+ //
+ // See also PR6358 and PR6359.
+ // For this reason, we're currently only doing the C++03 version of this
+ // code; the C++0x version has to wait until we get a proper spec.
+ QualType SearchType;
+ DeclContext *LookupCtx = 0;
+ bool isDependent = false;
+ bool LookInScope = false;
+
+ // If we have an object type, it's because we are in a
+ // pseudo-destructor-expression or a member access expression, and
+ // we know what type we're looking for.
+ if (ObjectTypePtr)
+ SearchType = GetTypeFromParser(ObjectTypePtr);
+
+ if (SS.isSet()) {
+ NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
+
+ bool AlreadySearched = false;
+ bool LookAtPrefix = true;
+ // C++ [basic.lookup.qual]p6:
+ // If a pseudo-destructor-name (5.2.4) contains a nested-name-specifier,
+ // the type-names are looked up as types in the scope designated by the
+ // nested-name-specifier. In a qualified-id of the form:
+ //
+ // ::[opt] nested-name-specifier ~ class-name
+ //
+ // where the nested-name-specifier designates a namespace scope, and in
+ // a qualified-id of the form:
+ //
+ // ::opt nested-name-specifier class-name :: ~ class-name
+ //
+ // the class-names are looked up as types in the scope designated by
+ // the nested-name-specifier.
+ //
+ // Here, we check the first case (completely) and determine whether the
+ // code below is permitted to look at the prefix of the
+ // nested-name-specifier.
+ DeclContext *DC = computeDeclContext(SS, EnteringContext);
+ if (DC && DC->isFileContext()) {
+ AlreadySearched = true;
+ LookupCtx = DC;
+ isDependent = false;
+ } else if (DC && isa<CXXRecordDecl>(DC))
+ LookAtPrefix = false;
+
+ // The second case from the C++03 rules quoted further above.
+ NestedNameSpecifier *Prefix = 0;
+ if (AlreadySearched) {
+ // Nothing left to do.
+ } else if (LookAtPrefix && (Prefix = NNS->getPrefix())) {
+ CXXScopeSpec PrefixSS;
+ PrefixSS.Adopt(NestedNameSpecifierLoc(Prefix, SS.location_data()));
+ LookupCtx = computeDeclContext(PrefixSS, EnteringContext);
+ isDependent = isDependentScopeSpecifier(PrefixSS);
+ } else if (ObjectTypePtr) {
+ LookupCtx = computeDeclContext(SearchType);
+ isDependent = SearchType->isDependentType();
+ } else {
+ LookupCtx = computeDeclContext(SS, EnteringContext);
+ isDependent = LookupCtx && LookupCtx->isDependentContext();
+ }
+
+ LookInScope = false;
+ } else if (ObjectTypePtr) {
+ // C++ [basic.lookup.classref]p3:
+ // If the unqualified-id is ~type-name, the type-name is looked up
+ // in the context of the entire postfix-expression. If the type T
+ // of the object expression is of a class type C, the type-name is
+ // also looked up in the scope of class C. At least one of the
+ // lookups shall find a name that refers to (possibly
+ // cv-qualified) T.
+ LookupCtx = computeDeclContext(SearchType);
+ isDependent = SearchType->isDependentType();
+ assert((isDependent || !SearchType->isIncompleteType()) &&
+ "Caller should have completed object type");
+
+ LookInScope = true;
+ } else {
+ // Perform lookup into the current scope (only).
+ LookInScope = true;
+ }
+
+ TypeDecl *NonMatchingTypeDecl = 0;
+ LookupResult Found(*this, &II, NameLoc, LookupOrdinaryName);
+ for (unsigned Step = 0; Step != 2; ++Step) {
+ // Look for the name first in the computed lookup context (if we
+ // have one) and, if that fails to find a match, in the scope (if
+ // we're allowed to look there).
+ Found.clear();
+ if (Step == 0 && LookupCtx)
+ LookupQualifiedName(Found, LookupCtx);
+ else if (Step == 1 && LookInScope && S)
+ LookupName(Found, S);
+ else
+ continue;
+
+ // FIXME: Should we be suppressing ambiguities here?
+ if (Found.isAmbiguous())
+ return ParsedType();
+
+ if (TypeDecl *Type = Found.getAsSingle<TypeDecl>()) {
+ QualType T = Context.getTypeDeclType(Type);
+
+ if (SearchType.isNull() || SearchType->isDependentType() ||
+ Context.hasSameUnqualifiedType(T, SearchType)) {
+ // We found our type!
+
+ return ParsedType::make(T);
+ }
+
+ if (!SearchType.isNull())
+ NonMatchingTypeDecl = Type;
+ }
+
+ // If the name that we found is a class template name, and it is
+ // the same name as the template name in the last part of the
+ // nested-name-specifier (if present) or the object type, then
+ // this is the destructor for that class.
+ // FIXME: This is a workaround until we get real drafting for core
+ // issue 399, for which there isn't even an obvious direction.
+ if (ClassTemplateDecl *Template = Found.getAsSingle<ClassTemplateDecl>()) {
+ QualType MemberOfType;
+ if (SS.isSet()) {
+ if (DeclContext *Ctx = computeDeclContext(SS, EnteringContext)) {
+ // Figure out the type of the context, if it has one.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx))
+ MemberOfType = Context.getTypeDeclType(Record);
+ }
+ }
+ if (MemberOfType.isNull())
+ MemberOfType = SearchType;
+
+ if (MemberOfType.isNull())
+ continue;
+
+ // We're referring into a class template specialization. If the
+ // class template we found is the same as the template being
+ // specialized, we found what we are looking for.
+ if (const RecordType *Record = MemberOfType->getAs<RecordType>()) {
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
+ if (Spec->getSpecializedTemplate()->getCanonicalDecl() ==
+ Template->getCanonicalDecl())
+ return ParsedType::make(MemberOfType);
+ }
+
+ continue;
+ }
+
+ // We're referring to an unresolved class template
+ // specialization. Determine whether we class template we found
+ // is the same as the template being specialized or, if we don't
+ // know which template is being specialized, that it at least
+ // has the same name.
+ if (const TemplateSpecializationType *SpecType
+ = MemberOfType->getAs<TemplateSpecializationType>()) {
+ TemplateName SpecName = SpecType->getTemplateName();
+
+ // The class template we found is the same template being
+ // specialized.
+ if (TemplateDecl *SpecTemplate = SpecName.getAsTemplateDecl()) {
+ if (SpecTemplate->getCanonicalDecl() == Template->getCanonicalDecl())
+ return ParsedType::make(MemberOfType);
+
+ continue;
+ }
+
+ // The class template we found has the same name as the
+ // (dependent) template name being specialized.
+ if (DependentTemplateName *DepTemplate
+ = SpecName.getAsDependentTemplateName()) {
+ if (DepTemplate->isIdentifier() &&
+ DepTemplate->getIdentifier() == Template->getIdentifier())
+ return ParsedType::make(MemberOfType);
+
+ continue;
+ }
+ }
+ }
+ }
+
+ if (isDependent) {
+ // We didn't find our type, but that's okay: it's dependent
+ // anyway.
+
+ // FIXME: What if we have no nested-name-specifier?
+ QualType T = CheckTypenameType(ETK_None, SourceLocation(),
+ SS.getWithLocInContext(Context),
+ II, NameLoc);
+ return ParsedType::make(T);
+ }
+
+ if (NonMatchingTypeDecl) {
+ QualType T = Context.getTypeDeclType(NonMatchingTypeDecl);
+ Diag(NameLoc, diag::err_destructor_expr_type_mismatch)
+ << T << SearchType;
+ Diag(NonMatchingTypeDecl->getLocation(), diag::note_destructor_type_here)
+ << T;
+ } else if (ObjectTypePtr)
+ Diag(NameLoc, diag::err_ident_in_dtor_not_a_type)
+ << &II;
+ else
+ Diag(NameLoc, diag::err_destructor_class_name);
+
+ return ParsedType();
+}
+
+ParsedType Sema::getDestructorType(const DeclSpec& DS, ParsedType ObjectType) {
+ if (DS.getTypeSpecType() == DeclSpec::TST_error || !ObjectType)
+ return ParsedType();
+ assert(DS.getTypeSpecType() == DeclSpec::TST_decltype
+ && "only get destructor types from declspecs");
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+ QualType SearchType = GetTypeFromParser(ObjectType);
+ if (SearchType->isDependentType() || Context.hasSameUnqualifiedType(SearchType, T)) {
+ return ParsedType::make(T);
+ }
+
+ Diag(DS.getTypeSpecTypeLoc(), diag::err_destructor_expr_type_mismatch)
+ << T << SearchType;
+ return ParsedType();
+}
+
+/// \brief Build a C++ typeid expression with a type operand.
+ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ // C++ [expr.typeid]p4:
+ // The top-level cv-qualifiers of the lvalue expression or the type-id
+ // that is the operand of typeid are always ignored.
+ // If the type of the type-id is a class type or a reference to a class
+ // type, the class shall be completely-defined.
+ Qualifiers Quals;
+ QualType T
+ = Context.getUnqualifiedArrayType(Operand->getType().getNonReferenceType(),
+ Quals);
+ if (T->getAs<RecordType>() &&
+ RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
+ return ExprError();
+
+ return Owned(new (Context) CXXTypeidExpr(TypeInfoType.withConst(),
+ Operand,
+ SourceRange(TypeidLoc, RParenLoc)));
+}
+
+/// \brief Build a C++ typeid expression with an expression operand.
+ExprResult Sema::BuildCXXTypeId(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *E,
+ SourceLocation RParenLoc) {
+ if (E && !E->isTypeDependent()) {
+ if (E->getType()->isPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return ExprError();
+ E = result.take();
+ }
+
+ QualType T = E->getType();
+ if (const RecordType *RecordT = T->getAs<RecordType>()) {
+ CXXRecordDecl *RecordD = cast<CXXRecordDecl>(RecordT->getDecl());
+ // C++ [expr.typeid]p3:
+ // [...] If the type of the expression is a class type, the class
+ // shall be completely-defined.
+ if (RequireCompleteType(TypeidLoc, T, diag::err_incomplete_typeid))
+ return ExprError();
+
+ // C++ [expr.typeid]p3:
+ // When typeid is applied to an expression other than an glvalue of a
+ // polymorphic class type [...] [the] expression is an unevaluated
+ // operand. [...]
+ if (RecordD->isPolymorphic() && E->Classify(Context).isGLValue()) {
+ // The subexpression is potentially evaluated; switch the context
+ // and recheck the subexpression.
+ ExprResult Result = TranformToPotentiallyEvaluated(E);
+ if (Result.isInvalid()) return ExprError();
+ E = Result.take();
+
+ // We require a vtable to query the type at run time.
+ MarkVTableUsed(TypeidLoc, RecordD);
+ }
+ }
+
+ // C++ [expr.typeid]p4:
+ // [...] If the type of the type-id is a reference to a possibly
+ // cv-qualified type, the result of the typeid expression refers to a
+ // std::type_info object representing the cv-unqualified referenced
+ // type.
+ Qualifiers Quals;
+ QualType UnqualT = Context.getUnqualifiedArrayType(T, Quals);
+ if (!Context.hasSameType(T, UnqualT)) {
+ T = UnqualT;
+ E = ImpCastExprToType(E, UnqualT, CK_NoOp, E->getValueKind()).take();
+ }
+ }
+
+ return Owned(new (Context) CXXTypeidExpr(TypeInfoType.withConst(),
+ E,
+ SourceRange(TypeidLoc, RParenLoc)));
+}
+
+/// ActOnCXXTypeidOfType - Parse typeid( type-id ) or typeid (expression);
+ExprResult
+Sema::ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc,
+ bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
+ // Find the std::type_info type.
+ if (!getStdNamespace())
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
+
+ if (!CXXTypeInfoDecl) {
+ IdentifierInfo *TypeInfoII = &PP.getIdentifierTable().get("type_info");
+ LookupResult R(*this, TypeInfoII, SourceLocation(), LookupTagName);
+ LookupQualifiedName(R, getStdNamespace());
+ CXXTypeInfoDecl = R.getAsSingle<RecordDecl>();
+ if (!CXXTypeInfoDecl)
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_typeid));
+ }
+
+ QualType TypeInfoType = Context.getTypeDeclType(CXXTypeInfoDecl);
+
+ if (isType) {
+ // The operand is a type; handle it as such.
+ TypeSourceInfo *TInfo = 0;
+ QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr),
+ &TInfo);
+ if (T.isNull())
+ return ExprError();
+
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
+
+ return BuildCXXTypeId(TypeInfoType, OpLoc, TInfo, RParenLoc);
+ }
+
+ // The operand is an expression.
+ return BuildCXXTypeId(TypeInfoType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
+}
+
+/// Retrieve the UuidAttr associated with QT.
+static UuidAttr *GetUuidAttrOfType(QualType QT) {
+ // Optionally remove one level of pointer, reference or array indirection.
+ const Type *Ty = QT.getTypePtr();;
+ if (QT->isPointerType() || QT->isReferenceType())
+ Ty = QT->getPointeeType().getTypePtr();
+ else if (QT->isArrayType())
+ Ty = cast<ArrayType>(QT)->getElementType().getTypePtr();
+
+ // Loop all record redeclaration looking for an uuid attribute.
+ CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
+ for (CXXRecordDecl::redecl_iterator I = RD->redecls_begin(),
+ E = RD->redecls_end(); I != E; ++I) {
+ if (UuidAttr *Uuid = I->getAttr<UuidAttr>())
+ return Uuid;
+ }
+
+ return 0;
+}
+
+/// \brief Build a Microsoft __uuidof expression with a type operand.
+ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ if (!Operand->getType()->isDependentType()) {
+ if (!GetUuidAttrOfType(Operand->getType()))
+ return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
+ }
+
+ // FIXME: add __uuidof semantic analysis for type operand.
+ return Owned(new (Context) CXXUuidofExpr(TypeInfoType.withConst(),
+ Operand,
+ SourceRange(TypeidLoc, RParenLoc)));
+}
+
+/// \brief Build a Microsoft __uuidof expression with an expression operand.
+ExprResult Sema::BuildCXXUuidof(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *E,
+ SourceLocation RParenLoc) {
+ if (!E->getType()->isDependentType()) {
+ if (!GetUuidAttrOfType(E->getType()) &&
+ !E->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ return ExprError(Diag(TypeidLoc, diag::err_uuidof_without_guid));
+ }
+ // FIXME: add __uuidof semantic analysis for type operand.
+ return Owned(new (Context) CXXUuidofExpr(TypeInfoType.withConst(),
+ E,
+ SourceRange(TypeidLoc, RParenLoc)));
+}
+
+/// ActOnCXXUuidof - Parse __uuidof( type-id ) or __uuidof (expression);
+ExprResult
+Sema::ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc,
+ bool isType, void *TyOrExpr, SourceLocation RParenLoc) {
+ // If MSVCGuidDecl has not been cached, do the lookup.
+ if (!MSVCGuidDecl) {
+ IdentifierInfo *GuidII = &PP.getIdentifierTable().get("_GUID");
+ LookupResult R(*this, GuidII, SourceLocation(), LookupTagName);
+ LookupQualifiedName(R, Context.getTranslationUnitDecl());
+ MSVCGuidDecl = R.getAsSingle<RecordDecl>();
+ if (!MSVCGuidDecl)
+ return ExprError(Diag(OpLoc, diag::err_need_header_before_ms_uuidof));
+ }
+
+ QualType GuidType = Context.getTypeDeclType(MSVCGuidDecl);
+
+ if (isType) {
+ // The operand is a type; handle it as such.
+ TypeSourceInfo *TInfo = 0;
+ QualType T = GetTypeFromParser(ParsedType::getFromOpaquePtr(TyOrExpr),
+ &TInfo);
+ if (T.isNull())
+ return ExprError();
+
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, OpLoc);
+
+ return BuildCXXUuidof(GuidType, OpLoc, TInfo, RParenLoc);
+ }
+
+ // The operand is an expression.
+ return BuildCXXUuidof(GuidType, OpLoc, (Expr*)TyOrExpr, RParenLoc);
+}
+
+/// ActOnCXXBoolLiteral - Parse {true,false} literals.
+ExprResult
+Sema::ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind) {
+ assert((Kind == tok::kw_true || Kind == tok::kw_false) &&
+ "Unknown C++ Boolean value!");
+ return Owned(new (Context) CXXBoolLiteralExpr(Kind == tok::kw_true,
+ Context.BoolTy, OpLoc));
+}
+
+/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
+ExprResult
+Sema::ActOnCXXNullPtrLiteral(SourceLocation Loc) {
+ return Owned(new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc));
+}
+
+/// ActOnCXXThrow - Parse throw expressions.
+ExprResult
+Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
+ bool IsThrownVarInScope = false;
+ if (Ex) {
+ // C++0x [class.copymove]p31:
+ // When certain criteria are met, an implementation is allowed to omit the
+ // copy/move construction of a class object [...]
+ //
+ // - in a throw-expression, when the operand is the name of a
+ // non-volatile automatic object (other than a function or catch-
+ // clause parameter) whose scope does not extend beyond the end of the
+ // innermost enclosing try-block (if there is one), the copy/move
+ // operation from the operand to the exception object (15.1) can be
+ // omitted by constructing the automatic object directly into the
+ // exception object
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Ex->IgnoreParens()))
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (Var->hasLocalStorage() && !Var->getType().isVolatileQualified()) {
+ for( ; S; S = S->getParent()) {
+ if (S->isDeclScope(Var)) {
+ IsThrownVarInScope = true;
+ break;
+ }
+
+ if (S->getFlags() &
+ (Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
+ Scope::FunctionPrototypeScope | Scope::ObjCMethodScope |
+ Scope::TryScope))
+ break;
+ }
+ }
+ }
+ }
+
+ return BuildCXXThrow(OpLoc, Ex, IsThrownVarInScope);
+}
+
+ExprResult Sema::BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
+ bool IsThrownVarInScope) {
+ // Don't report an error if 'throw' is used in system headers.
+ if (!getLangOpts().CXXExceptions &&
+ !getSourceManager().isInSystemHeader(OpLoc))
+ Diag(OpLoc, diag::err_exceptions_disabled) << "throw";
+
+ if (Ex && !Ex->isTypeDependent()) {
+ ExprResult ExRes = CheckCXXThrowOperand(OpLoc, Ex, IsThrownVarInScope);
+ if (ExRes.isInvalid())
+ return ExprError();
+ Ex = ExRes.take();
+ }
+
+ return Owned(new (Context) CXXThrowExpr(Ex, Context.VoidTy, OpLoc,
+ IsThrownVarInScope));
+}
+
+/// CheckCXXThrowOperand - Validate the operand of a throw.
+ExprResult Sema::CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E,
+ bool IsThrownVarInScope) {
+ // C++ [except.throw]p3:
+ // A throw-expression initializes a temporary object, called the exception
+ // object, the type of which is determined by removing any top-level
+ // cv-qualifiers from the static type of the operand of throw and adjusting
+ // the type from "array of T" or "function returning T" to "pointer to T"
+ // or "pointer to function returning T", [...]
+ if (E->getType().hasQualifiers())
+ E = ImpCastExprToType(E, E->getType().getUnqualifiedType(), CK_NoOp,
+ E->getValueKind()).take();
+
+ ExprResult Res = DefaultFunctionArrayConversion(E);
+ if (Res.isInvalid())
+ return ExprError();
+ E = Res.take();
+
+ // If the type of the exception would be an incomplete type or a pointer
+ // to an incomplete type other than (cv) void the program is ill-formed.
+ QualType Ty = E->getType();
+ bool isPointer = false;
+ if (const PointerType* Ptr = Ty->getAs<PointerType>()) {
+ Ty = Ptr->getPointeeType();
+ isPointer = true;
+ }
+ if (!isPointer || !Ty->isVoidType()) {
+ if (RequireCompleteType(ThrowLoc, Ty,
+ PDiag(isPointer ? diag::err_throw_incomplete_ptr
+ : diag::err_throw_incomplete)
+ << E->getSourceRange()))
+ return ExprError();
+
+ if (RequireNonAbstractType(ThrowLoc, E->getType(),
+ PDiag(diag::err_throw_abstract_type)
+ << E->getSourceRange()))
+ return ExprError();
+ }
+
+ // Initialize the exception result. This implicitly weeds out
+ // abstract types or types with inaccessible copy constructors.
+
+ // C++0x [class.copymove]p31:
+ // When certain criteria are met, an implementation is allowed to omit the
+ // copy/move construction of a class object [...]
+ //
+ // - in a throw-expression, when the operand is the name of a
+ // non-volatile automatic object (other than a function or catch-clause
+ // parameter) whose scope does not extend beyond the end of the
+ // innermost enclosing try-block (if there is one), the copy/move
+ // operation from the operand to the exception object (15.1) can be
+ // omitted by constructing the automatic object directly into the
+ // exception object
+ const VarDecl *NRVOVariable = 0;
+ if (IsThrownVarInScope)
+ NRVOVariable = getCopyElisionCandidate(QualType(), E, false);
+
+ InitializedEntity Entity =
+ InitializedEntity::InitializeException(ThrowLoc, E->getType(),
+ /*NRVO=*/NRVOVariable != 0);
+ Res = PerformMoveOrCopyInitialization(Entity, NRVOVariable,
+ QualType(), E,
+ IsThrownVarInScope);
+ if (Res.isInvalid())
+ return ExprError();
+ E = Res.take();
+
+ // If the exception has class type, we need additional handling.
+ const RecordType *RecordTy = Ty->getAs<RecordType>();
+ if (!RecordTy)
+ return Owned(E);
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
+
+ // If we are throwing a polymorphic class type or pointer thereof,
+ // exception handling will make use of the vtable.
+ MarkVTableUsed(ThrowLoc, RD);
+
+ // If a pointer is thrown, the referenced object will not be destroyed.
+ if (isPointer)
+ return Owned(E);
+
+ // If the class has a destructor, we must be able to call it.
+ if (RD->hasIrrelevantDestructor())
+ return Owned(E);
+
+ CXXDestructorDecl *Destructor = LookupDestructor(RD);
+ if (!Destructor)
+ return Owned(E);
+
+ MarkFunctionReferenced(E->getExprLoc(), Destructor);
+ CheckDestructorAccess(E->getExprLoc(), Destructor,
+ PDiag(diag::err_access_dtor_exception) << Ty);
+ DiagnoseUseOfDecl(Destructor, E->getExprLoc());
+ return Owned(E);
+}
+
+QualType Sema::getCurrentThisType() {
+ DeclContext *DC = getFunctionLevelDeclContext();
+ QualType ThisTy;
+ if (CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(DC)) {
+ if (method && method->isInstance())
+ ThisTy = method->getThisType(Context);
+ } else if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC)) {
+ // C++0x [expr.prim]p4:
+ // Otherwise, if a member-declarator declares a non-static data member
+ // of a class X, the expression this is a prvalue of type "pointer to X"
+ // within the optional brace-or-equal-initializer.
+ Scope *S = getScopeForContext(DC);
+ if (!S || S->getFlags() & Scope::ThisScope)
+ ThisTy = Context.getPointerType(Context.getRecordType(RD));
+ }
+
+ return ThisTy;
+}
+
+void Sema::CheckCXXThisCapture(SourceLocation Loc, bool Explicit) {
+ // We don't need to capture this in an unevaluated context.
+ if (ExprEvalContexts.back().Context == Unevaluated && !Explicit)
+ return;
+
+ // Otherwise, check that we can capture 'this'.
+ unsigned NumClosures = 0;
+ for (unsigned idx = FunctionScopes.size() - 1; idx != 0; idx--) {
+ if (CapturingScopeInfo *CSI =
+ dyn_cast<CapturingScopeInfo>(FunctionScopes[idx])) {
+ if (CSI->CXXThisCaptureIndex != 0) {
+ // 'this' is already being captured; there isn't anything more to do.
+ break;
+ }
+
+ if (CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByref ||
+ CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_LambdaByval ||
+ CSI->ImpCaptureStyle == CapturingScopeInfo::ImpCap_Block ||
+ Explicit) {
+ // This closure can capture 'this'; continue looking upwards.
+ NumClosures++;
+ Explicit = false;
+ continue;
+ }
+ // This context can't implicitly capture 'this'; fail out.
+ Diag(Loc, diag::err_this_capture) << Explicit;
+ return;
+ }
+ break;
+ }
+
+ // Mark that we're implicitly capturing 'this' in all the scopes we skipped.
+ // FIXME: We need to delay this marking in PotentiallyPotentiallyEvaluated
+ // contexts.
+ for (unsigned idx = FunctionScopes.size() - 1;
+ NumClosures; --idx, --NumClosures) {
+ CapturingScopeInfo *CSI = cast<CapturingScopeInfo>(FunctionScopes[idx]);
+ Expr *ThisExpr = 0;
+ QualType ThisTy = getCurrentThisType();
+ if (LambdaScopeInfo *LSI = dyn_cast<LambdaScopeInfo>(CSI)) {
+ // For lambda expressions, build a field and an initializing expression.
+ CXXRecordDecl *Lambda = LSI->Lambda;
+ FieldDecl *Field
+ = FieldDecl::Create(Context, Lambda, Loc, Loc, 0, ThisTy,
+ Context.getTrivialTypeSourceInfo(ThisTy, Loc),
+ 0, false, false);
+ Field->setImplicit(true);
+ Field->setAccess(AS_private);
+ Lambda->addDecl(Field);
+ ThisExpr = new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit=*/true);
+ }
+ bool isNested = NumClosures > 1;
+ CSI->addThisCapture(isNested, Loc, ThisTy, ThisExpr);
+ }
+}
+
+ExprResult Sema::ActOnCXXThis(SourceLocation Loc) {
+ /// C++ 9.3.2: In the body of a non-static member function, the keyword this
+ /// is a non-lvalue expression whose value is the address of the object for
+ /// which the function is called.
+
+ QualType ThisTy = getCurrentThisType();
+ if (ThisTy.isNull()) return Diag(Loc, diag::err_invalid_this_use);
+
+ CheckCXXThisCapture(Loc);
+ return Owned(new (Context) CXXThisExpr(Loc, ThisTy, /*isImplicit=*/false));
+}
+
+ExprResult
+Sema::ActOnCXXTypeConstructExpr(ParsedType TypeRep,
+ SourceLocation LParenLoc,
+ MultiExprArg exprs,
+ SourceLocation RParenLoc) {
+ if (!TypeRep)
+ return ExprError();
+
+ TypeSourceInfo *TInfo;
+ QualType Ty = GetTypeFromParser(TypeRep, &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(Ty, SourceLocation());
+
+ return BuildCXXTypeConstructExpr(TInfo, LParenLoc, exprs, RParenLoc);
+}
+
+/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
+/// Can be interpreted either as function-style casting ("int(x)")
+/// or class type construction ("ClassType(x,y,z)")
+/// or creation of a value-initialized type ("int()").
+ExprResult
+Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg exprs,
+ SourceLocation RParenLoc) {
+ QualType Ty = TInfo->getType();
+ unsigned NumExprs = exprs.size();
+ Expr **Exprs = (Expr**)exprs.get();
+ SourceLocation TyBeginLoc = TInfo->getTypeLoc().getBeginLoc();
+
+ if (Ty->isDependentType() ||
+ CallExpr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(Exprs, NumExprs))) {
+ exprs.release();
+
+ return Owned(CXXUnresolvedConstructExpr::Create(Context, TInfo,
+ LParenLoc,
+ Exprs, NumExprs,
+ RParenLoc));
+ }
+
+ bool ListInitialization = LParenLoc.isInvalid();
+ assert((!ListInitialization || (NumExprs == 1 && isa<InitListExpr>(Exprs[0])))
+ && "List initialization must have initializer list as expression.");
+ SourceRange FullRange = SourceRange(TyBeginLoc,
+ ListInitialization ? Exprs[0]->getSourceRange().getEnd() : RParenLoc);
+
+ // C++ [expr.type.conv]p1:
+ // If the expression list is a single expression, the type conversion
+ // expression is equivalent (in definedness, and if defined in meaning) to the
+ // corresponding cast expression.
+ if (NumExprs == 1 && !ListInitialization) {
+ Expr *Arg = Exprs[0];
+ exprs.release();
+ return BuildCXXFunctionalCastExpr(TInfo, LParenLoc, Arg, RParenLoc);
+ }
+
+ QualType ElemTy = Ty;
+ if (Ty->isArrayType()) {
+ if (!ListInitialization)
+ return ExprError(Diag(TyBeginLoc,
+ diag::err_value_init_for_array_type) << FullRange);
+ ElemTy = Context.getBaseElementType(Ty);
+ }
+
+ if (!Ty->isVoidType() &&
+ RequireCompleteType(TyBeginLoc, ElemTy,
+ PDiag(diag::err_invalid_incomplete_type_use)
+ << FullRange))
+ return ExprError();
+
+ if (RequireNonAbstractType(TyBeginLoc, Ty,
+ diag::err_allocation_of_abstract_type))
+ return ExprError();
+
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(TInfo);
+ InitializationKind Kind
+ = NumExprs ? ListInitialization
+ ? InitializationKind::CreateDirectList(TyBeginLoc)
+ : InitializationKind::CreateDirect(TyBeginLoc,
+ LParenLoc, RParenLoc)
+ : InitializationKind::CreateValue(TyBeginLoc,
+ LParenLoc, RParenLoc);
+ InitializationSequence InitSeq(*this, Entity, Kind, Exprs, NumExprs);
+ ExprResult Result = InitSeq.Perform(*this, Entity, Kind, move(exprs));
+
+ if (!Result.isInvalid() && ListInitialization &&
+ isa<InitListExpr>(Result.get())) {
+ // If the list-initialization doesn't involve a constructor call, we'll get
+ // the initializer-list (with corrected type) back, but that's not what we
+ // want, since it will be treated as an initializer list in further
+ // processing. Explicitly insert a cast here.
+ InitListExpr *List = cast<InitListExpr>(Result.take());
+ Result = Owned(CXXFunctionalCastExpr::Create(Context, List->getType(),
+ Expr::getValueKindForType(TInfo->getType()),
+ TInfo, TyBeginLoc, CK_NoOp,
+ List, /*Path=*/0, RParenLoc));
+ }
+
+ // FIXME: Improve AST representation?
+ return move(Result);
+}
+
+/// doesUsualArrayDeleteWantSize - Answers whether the usual
+/// operator delete[] for the given type has a size_t parameter.
+static bool doesUsualArrayDeleteWantSize(Sema &S, SourceLocation loc,
+ QualType allocType) {
+ const RecordType *record =
+ allocType->getBaseElementTypeUnsafe()->getAs<RecordType>();
+ if (!record) return false;
+
+ // Try to find an operator delete[] in class scope.
+
+ DeclarationName deleteName =
+ S.Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete);
+ LookupResult ops(S, deleteName, loc, Sema::LookupOrdinaryName);
+ S.LookupQualifiedName(ops, record->getDecl());
+
+ // We're just doing this for information.
+ ops.suppressDiagnostics();
+
+ // Very likely: there's no operator delete[].
+ if (ops.empty()) return false;
+
+ // If it's ambiguous, it should be illegal to call operator delete[]
+ // on this thing, so it doesn't matter if we allocate extra space or not.
+ if (ops.isAmbiguous()) return false;
+
+ LookupResult::Filter filter = ops.makeFilter();
+ while (filter.hasNext()) {
+ NamedDecl *del = filter.next()->getUnderlyingDecl();
+
+ // C++0x [basic.stc.dynamic.deallocation]p2:
+ // A template instance is never a usual deallocation function,
+ // regardless of its signature.
+ if (isa<FunctionTemplateDecl>(del)) {
+ filter.erase();
+ continue;
+ }
+
+ // C++0x [basic.stc.dynamic.deallocation]p2:
+ // If class T does not declare [an operator delete[] with one
+ // parameter] but does declare a member deallocation function
+ // named operator delete[] with exactly two parameters, the
+ // second of which has type std::size_t, then this function
+ // is a usual deallocation function.
+ if (!cast<CXXMethodDecl>(del)->isUsualDeallocationFunction()) {
+ filter.erase();
+ continue;
+ }
+ }
+ filter.done();
+
+ if (!ops.isSingleResult()) return false;
+
+ const FunctionDecl *del = cast<FunctionDecl>(ops.getFoundDecl());
+ return (del->getNumParams() == 2);
+}
+
+/// \brief Parsed a C++ 'new' expression (C++ 5.3.4).
+
+/// E.g.:
+/// @code new (memory) int[size][4] @endcode
+/// or
+/// @code ::new Foo(23, "hello") @endcode
+///
+/// \param StartLoc The first location of the expression.
+/// \param UseGlobal True if 'new' was prefixed with '::'.
+/// \param PlacementLParen Opening paren of the placement arguments.
+/// \param PlacementArgs Placement new arguments.
+/// \param PlacementRParen Closing paren of the placement arguments.
+/// \param TypeIdParens If the type is in parens, the source range.
+/// \param D The type to be allocated, as well as array dimensions.
+/// \param ConstructorLParen Opening paren of the constructor args, empty if
+/// initializer-list syntax is used.
+/// \param ConstructorArgs Constructor/initialization arguments.
+/// \param ConstructorRParen Closing paren of the constructor args.
+ExprResult
+Sema::ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen, MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen, SourceRange TypeIdParens,
+ Declarator &D, Expr *Initializer) {
+ bool TypeContainsAuto = D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto;
+
+ Expr *ArraySize = 0;
+ // If the specified type is an array, unwrap it and save the expression.
+ if (D.getNumTypeObjects() > 0 &&
+ D.getTypeObject(0).Kind == DeclaratorChunk::Array) {
+ DeclaratorChunk &Chunk = D.getTypeObject(0);
+ if (TypeContainsAuto)
+ return ExprError(Diag(Chunk.Loc, diag::err_new_array_of_auto)
+ << D.getSourceRange());
+ if (Chunk.Arr.hasStatic)
+ return ExprError(Diag(Chunk.Loc, diag::err_static_illegal_in_new)
+ << D.getSourceRange());
+ if (!Chunk.Arr.NumElts)
+ return ExprError(Diag(Chunk.Loc, diag::err_array_new_needs_size)
+ << D.getSourceRange());
+
+ ArraySize = static_cast<Expr*>(Chunk.Arr.NumElts);
+ D.DropFirstTypeObject();
+ }
+
+ // Every dimension shall be of constant size.
+ if (ArraySize) {
+ for (unsigned I = 0, N = D.getNumTypeObjects(); I < N; ++I) {
+ if (D.getTypeObject(I).Kind != DeclaratorChunk::Array)
+ break;
+
+ DeclaratorChunk::ArrayTypeInfo &Array = D.getTypeObject(I).Arr;
+ if (Expr *NumElts = (Expr *)Array.NumElts) {
+ if (!NumElts->isTypeDependent() && !NumElts->isValueDependent()) {
+ Array.NumElts = VerifyIntegerConstantExpression(NumElts, 0,
+ PDiag(diag::err_new_array_nonconst)).take();
+ if (!Array.NumElts)
+ return ExprError();
+ }
+ }
+ }
+ }
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, /*Scope=*/0);
+ QualType AllocType = TInfo->getType();
+ if (D.isInvalidType())
+ return ExprError();
+
+ SourceRange DirectInitRange;
+ if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer))
+ DirectInitRange = List->getSourceRange();
+
+ return BuildCXXNew(StartLoc, UseGlobal,
+ PlacementLParen,
+ move(PlacementArgs),
+ PlacementRParen,
+ TypeIdParens,
+ AllocType,
+ TInfo,
+ ArraySize,
+ DirectInitRange,
+ Initializer,
+ TypeContainsAuto);
+}
+
+static bool isLegalArrayNewInitializer(CXXNewExpr::InitializationStyle Style,
+ Expr *Init) {
+ if (!Init)
+ return true;
+ if (ParenListExpr *PLE = dyn_cast<ParenListExpr>(Init))
+ return PLE->getNumExprs() == 0;
+ if (isa<ImplicitValueInitExpr>(Init))
+ return true;
+ else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Init))
+ return !CCE->isListInitialization() &&
+ CCE->getConstructor()->isDefaultConstructor();
+ else if (Style == CXXNewExpr::ListInit) {
+ assert(isa<InitListExpr>(Init) &&
+ "Shouldn't create list CXXConstructExprs for arrays.");
+ return true;
+ }
+ return false;
+}
+
+ExprResult
+Sema::BuildCXXNew(SourceLocation StartLoc, bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ SourceRange TypeIdParens,
+ QualType AllocType,
+ TypeSourceInfo *AllocTypeInfo,
+ Expr *ArraySize,
+ SourceRange DirectInitRange,
+ Expr *Initializer,
+ bool TypeMayContainAuto) {
+ SourceRange TypeRange = AllocTypeInfo->getTypeLoc().getSourceRange();
+
+ CXXNewExpr::InitializationStyle initStyle;
+ if (DirectInitRange.isValid()) {
+ assert(Initializer && "Have parens but no initializer.");
+ initStyle = CXXNewExpr::CallInit;
+ } else if (Initializer && isa<InitListExpr>(Initializer))
+ initStyle = CXXNewExpr::ListInit;
+ else {
+ // In template instantiation, the initializer could be a CXXDefaultArgExpr
+ // unwrapped from a CXXConstructExpr that was implicitly built. There is no
+ // particularly sane way we can handle this (especially since it can even
+ // occur for array new), so we throw the initializer away and have it be
+ // rebuilt.
+ if (Initializer && isa<CXXDefaultArgExpr>(Initializer))
+ Initializer = 0;
+ assert((!Initializer || isa<ImplicitValueInitExpr>(Initializer) ||
+ isa<CXXConstructExpr>(Initializer)) &&
+ "Initializer expression that cannot have been implicitly created.");
+ initStyle = CXXNewExpr::NoInit;
+ }
+
+ Expr **Inits = &Initializer;
+ unsigned NumInits = Initializer ? 1 : 0;
+ if (initStyle == CXXNewExpr::CallInit) {
+ if (ParenListExpr *List = dyn_cast<ParenListExpr>(Initializer)) {
+ Inits = List->getExprs();
+ NumInits = List->getNumExprs();
+ } else if (CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(Initializer)){
+ if (!isa<CXXTemporaryObjectExpr>(CCE)) {
+ // Can happen in template instantiation. Since this is just an implicit
+ // construction, we just take it apart and rebuild it.
+ Inits = CCE->getArgs();
+ NumInits = CCE->getNumArgs();
+ }
+ }
+ }
+
+ // C++0x [decl.spec.auto]p6. Deduce the type which 'auto' stands in for.
+ if (TypeMayContainAuto && AllocType->getContainedAutoType()) {
+ if (initStyle == CXXNewExpr::NoInit || NumInits == 0)
+ return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
+ << AllocType << TypeRange);
+ if (initStyle == CXXNewExpr::ListInit)
+ return ExprError(Diag(Inits[0]->getLocStart(),
+ diag::err_auto_new_requires_parens)
+ << AllocType << TypeRange);
+ if (NumInits > 1) {
+ Expr *FirstBad = Inits[1];
+ return ExprError(Diag(FirstBad->getLocStart(),
+ diag::err_auto_new_ctor_multiple_expressions)
+ << AllocType << TypeRange);
+ }
+ Expr *Deduce = Inits[0];
+ TypeSourceInfo *DeducedType = 0;
+ if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) ==
+ DAR_Failed)
+ return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
+ << AllocType << Deduce->getType()
+ << TypeRange << Deduce->getSourceRange());
+ if (!DeducedType)
+ return ExprError();
+
+ AllocTypeInfo = DeducedType;
+ AllocType = AllocTypeInfo->getType();
+ }
+
+ // Per C++0x [expr.new]p5, the type being constructed may be a
+ // typedef of an array type.
+ if (!ArraySize) {
+ if (const ConstantArrayType *Array
+ = Context.getAsConstantArrayType(AllocType)) {
+ ArraySize = IntegerLiteral::Create(Context, Array->getSize(),
+ Context.getSizeType(),
+ TypeRange.getEnd());
+ AllocType = Array->getElementType();
+ }
+ }
+
+ if (CheckAllocatedType(AllocType, TypeRange.getBegin(), TypeRange))
+ return ExprError();
+
+ if (initStyle == CXXNewExpr::ListInit && isStdInitializerList(AllocType, 0)) {
+ Diag(AllocTypeInfo->getTypeLoc().getBeginLoc(),
+ diag::warn_dangling_std_initializer_list)
+ << /*at end of FE*/0 << Inits[0]->getSourceRange();
+ }
+
+ // In ARC, infer 'retaining' for the allocated
+ if (getLangOpts().ObjCAutoRefCount &&
+ AllocType.getObjCLifetime() == Qualifiers::OCL_None &&
+ AllocType->isObjCLifetimeType()) {
+ AllocType = Context.getLifetimeQualifiedType(AllocType,
+ AllocType->getObjCARCImplicitLifetime());
+ }
+
+ QualType ResultType = Context.getPointerType(AllocType);
+
+ // C++98 5.3.4p6: "The expression in a direct-new-declarator shall have
+ // integral or enumeration type with a non-negative value."
+ // C++11 [expr.new]p6: The expression [...] shall be of integral or unscoped
+ // enumeration type, or a class type for which a single non-explicit
+ // conversion function to integral or unscoped enumeration type exists.
+ if (ArraySize && !ArraySize->isTypeDependent()) {
+ ExprResult ConvertedSize = ConvertToIntegralOrEnumerationType(
+ StartLoc, ArraySize,
+ PDiag(diag::err_array_size_not_integral) << getLangOpts().CPlusPlus0x,
+ PDiag(diag::err_array_size_incomplete_type)
+ << ArraySize->getSourceRange(),
+ PDiag(diag::err_array_size_explicit_conversion),
+ PDiag(diag::note_array_size_conversion),
+ PDiag(diag::err_array_size_ambiguous_conversion),
+ PDiag(diag::note_array_size_conversion),
+ PDiag(getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_array_size_conversion :
+ diag::ext_array_size_conversion),
+ /*AllowScopedEnumerations*/ false);
+ if (ConvertedSize.isInvalid())
+ return ExprError();
+
+ ArraySize = ConvertedSize.take();
+ QualType SizeType = ArraySize->getType();
+ if (!SizeType->isIntegralOrUnscopedEnumerationType())
+ return ExprError();
+
+ // C++98 [expr.new]p7:
+ // The expression in a direct-new-declarator shall have integral type
+ // with a non-negative value.
+ //
+ // Let's see if this is a constant < 0. If so, we reject it out of
+ // hand. Otherwise, if it's not a constant, we must have an unparenthesized
+ // array type.
+ //
+ // Note: such a construct has well-defined semantics in C++11: it throws
+ // std::bad_array_new_length.
+ if (!ArraySize->isValueDependent()) {
+ llvm::APSInt Value;
+ // We've already performed any required implicit conversion to integer or
+ // unscoped enumeration type.
+ if (ArraySize->isIntegerConstantExpr(Value, Context)) {
+ if (Value < llvm::APSInt(
+ llvm::APInt::getNullValue(Value.getBitWidth()),
+ Value.isUnsigned())) {
+ if (getLangOpts().CPlusPlus0x)
+ Diag(ArraySize->getLocStart(),
+ diag::warn_typecheck_negative_array_new_size)
+ << ArraySize->getSourceRange();
+ else
+ return ExprError(Diag(ArraySize->getLocStart(),
+ diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange());
+ } else if (!AllocType->isDependentType()) {
+ unsigned ActiveSizeBits =
+ ConstantArrayType::getNumAddressingBits(Context, AllocType, Value);
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context)) {
+ if (getLangOpts().CPlusPlus0x)
+ Diag(ArraySize->getLocStart(),
+ diag::warn_array_new_too_large)
+ << Value.toString(10)
+ << ArraySize->getSourceRange();
+ else
+ return ExprError(Diag(ArraySize->getLocStart(),
+ diag::err_array_too_large)
+ << Value.toString(10)
+ << ArraySize->getSourceRange());
+ }
+ }
+ } else if (TypeIdParens.isValid()) {
+ // Can't have dynamic array size when the type-id is in parentheses.
+ Diag(ArraySize->getLocStart(), diag::ext_new_paren_array_nonconst)
+ << ArraySize->getSourceRange()
+ << FixItHint::CreateRemoval(TypeIdParens.getBegin())
+ << FixItHint::CreateRemoval(TypeIdParens.getEnd());
+
+ TypeIdParens = SourceRange();
+ }
+ }
+
+ // ARC: warn about ABI issues.
+ if (getLangOpts().ObjCAutoRefCount) {
+ QualType BaseAllocType = Context.getBaseElementType(AllocType);
+ if (BaseAllocType.hasStrongOrWeakObjCLifetime())
+ Diag(StartLoc, diag::warn_err_new_delete_object_array)
+ << 0 << BaseAllocType;
+ }
+
+ // Note that we do *not* convert the argument in any way. It can
+ // be signed, larger than size_t, whatever.
+ }
+
+ FunctionDecl *OperatorNew = 0;
+ FunctionDecl *OperatorDelete = 0;
+ Expr **PlaceArgs = (Expr**)PlacementArgs.get();
+ unsigned NumPlaceArgs = PlacementArgs.size();
+
+ if (!AllocType->isDependentType() &&
+ !Expr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(PlaceArgs, NumPlaceArgs)) &&
+ FindAllocationFunctions(StartLoc,
+ SourceRange(PlacementLParen, PlacementRParen),
+ UseGlobal, AllocType, ArraySize, PlaceArgs,
+ NumPlaceArgs, OperatorNew, OperatorDelete))
+ return ExprError();
+
+ // If this is an array allocation, compute whether the usual array
+ // deallocation function for the type has a size_t parameter.
+ bool UsualArrayDeleteWantsSize = false;
+ if (ArraySize && !AllocType->isDependentType())
+ UsualArrayDeleteWantsSize
+ = doesUsualArrayDeleteWantSize(*this, StartLoc, AllocType);
+
+ SmallVector<Expr *, 8> AllPlaceArgs;
+ if (OperatorNew) {
+ // Add default arguments, if any.
+ const FunctionProtoType *Proto =
+ OperatorNew->getType()->getAs<FunctionProtoType>();
+ VariadicCallType CallType =
+ Proto->isVariadic() ? VariadicFunction : VariadicDoesNotApply;
+
+ if (GatherArgumentsForCall(PlacementLParen, OperatorNew,
+ Proto, 1, PlaceArgs, NumPlaceArgs,
+ AllPlaceArgs, CallType))
+ return ExprError();
+
+ NumPlaceArgs = AllPlaceArgs.size();
+ if (NumPlaceArgs > 0)
+ PlaceArgs = &AllPlaceArgs[0];
+
+ DiagnoseSentinelCalls(OperatorNew, PlacementLParen,
+ PlaceArgs, NumPlaceArgs);
+
+ // FIXME: Missing call to CheckFunctionCall or equivalent
+ }
+
+ // Warn if the type is over-aligned and is being allocated by global operator
+ // new.
+ if (NumPlaceArgs == 0 && OperatorNew &&
+ (OperatorNew->isImplicit() ||
+ getSourceManager().isInSystemHeader(OperatorNew->getLocStart()))) {
+ if (unsigned Align = Context.getPreferredTypeAlign(AllocType.getTypePtr())){
+ unsigned SuitableAlign = Context.getTargetInfo().getSuitableAlign();
+ if (Align > SuitableAlign)
+ Diag(StartLoc, diag::warn_overaligned_type)
+ << AllocType
+ << unsigned(Align / Context.getCharWidth())
+ << unsigned(SuitableAlign / Context.getCharWidth());
+ }
+ }
+
+ QualType InitType = AllocType;
+ // Array 'new' can't have any initializers except empty parentheses.
+ // Initializer lists are also allowed, in C++11. Rely on the parser for the
+ // dialect distinction.
+ if (ResultType->isArrayType() || ArraySize) {
+ if (!isLegalArrayNewInitializer(initStyle, Initializer)) {
+ SourceRange InitRange(Inits[0]->getLocStart(),
+ Inits[NumInits - 1]->getLocEnd());
+ Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
+ return ExprError();
+ }
+ if (InitListExpr *ILE = dyn_cast_or_null<InitListExpr>(Initializer)) {
+ // We do the initialization typechecking against the array type
+ // corresponding to the number of initializers + 1 (to also check
+ // default-initialization).
+ unsigned NumElements = ILE->getNumInits() + 1;
+ InitType = Context.getConstantArrayType(AllocType,
+ llvm::APInt(Context.getTypeSize(Context.getSizeType()), NumElements),
+ ArrayType::Normal, 0);
+ }
+ }
+
+ if (!AllocType->isDependentType() &&
+ !Expr::hasAnyTypeDependentArguments(
+ llvm::makeArrayRef(Inits, NumInits))) {
+ // C++11 [expr.new]p15:
+ // A new-expression that creates an object of type T initializes that
+ // object as follows:
+ InitializationKind Kind
+ // - If the new-initializer is omitted, the object is default-
+ // initialized (8.5); if no initialization is performed,
+ // the object has indeterminate value
+ = initStyle == CXXNewExpr::NoInit
+ ? InitializationKind::CreateDefault(TypeRange.getBegin())
+ // - Otherwise, the new-initializer is interpreted according to the
+ // initialization rules of 8.5 for direct-initialization.
+ : initStyle == CXXNewExpr::ListInit
+ ? InitializationKind::CreateDirectList(TypeRange.getBegin())
+ : InitializationKind::CreateDirect(TypeRange.getBegin(),
+ DirectInitRange.getBegin(),
+ DirectInitRange.getEnd());
+
+ InitializedEntity Entity
+ = InitializedEntity::InitializeNew(StartLoc, InitType);
+ InitializationSequence InitSeq(*this, Entity, Kind, Inits, NumInits);
+ ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
+ MultiExprArg(Inits, NumInits));
+ if (FullInit.isInvalid())
+ return ExprError();
+
+ // FullInit is our initializer; strip off CXXBindTemporaryExprs, because
+ // we don't want the initialized object to be destructed.
+ if (CXXBindTemporaryExpr *Binder =
+ dyn_cast_or_null<CXXBindTemporaryExpr>(FullInit.get()))
+ FullInit = Owned(Binder->getSubExpr());
+
+ Initializer = FullInit.take();
+ }
+
+ // Mark the new and delete operators as referenced.
+ if (OperatorNew)
+ MarkFunctionReferenced(StartLoc, OperatorNew);
+ if (OperatorDelete)
+ MarkFunctionReferenced(StartLoc, OperatorDelete);
+
+ // C++0x [expr.new]p17:
+ // If the new expression creates an array of objects of class type,
+ // access and ambiguity control are done for the destructor.
+ QualType BaseAllocType = Context.getBaseElementType(AllocType);
+ if (ArraySize && !BaseAllocType->isDependentType()) {
+ if (const RecordType *BaseRecordType = BaseAllocType->getAs<RecordType>()) {
+ if (CXXDestructorDecl *dtor = LookupDestructor(
+ cast<CXXRecordDecl>(BaseRecordType->getDecl()))) {
+ MarkFunctionReferenced(StartLoc, dtor);
+ CheckDestructorAccess(StartLoc, dtor,
+ PDiag(diag::err_access_dtor)
+ << BaseAllocType);
+ DiagnoseUseOfDecl(dtor, StartLoc);
+ }
+ }
+ }
+
+ PlacementArgs.release();
+
+ return Owned(new (Context) CXXNewExpr(Context, UseGlobal, OperatorNew,
+ OperatorDelete,
+ UsualArrayDeleteWantsSize,
+ PlaceArgs, NumPlaceArgs, TypeIdParens,
+ ArraySize, initStyle, Initializer,
+ ResultType, AllocTypeInfo,
+ StartLoc, DirectInitRange));
+}
+
+/// \brief Checks that a type is suitable as the allocated type
+/// in a new-expression.
+bool Sema::CheckAllocatedType(QualType AllocType, SourceLocation Loc,
+ SourceRange R) {
+ // C++ 5.3.4p1: "[The] type shall be a complete object type, but not an
+ // abstract class type or array thereof.
+ if (AllocType->isFunctionType())
+ return Diag(Loc, diag::err_bad_new_type)
+ << AllocType << 0 << R;
+ else if (AllocType->isReferenceType())
+ return Diag(Loc, diag::err_bad_new_type)
+ << AllocType << 1 << R;
+ else if (!AllocType->isDependentType() &&
+ RequireCompleteType(Loc, AllocType,
+ PDiag(diag::err_new_incomplete_type)
+ << R))
+ return true;
+ else if (RequireNonAbstractType(Loc, AllocType,
+ diag::err_allocation_of_abstract_type))
+ return true;
+ else if (AllocType->isVariablyModifiedType())
+ return Diag(Loc, diag::err_variably_modified_new_type)
+ << AllocType;
+ else if (unsigned AddressSpace = AllocType.getAddressSpace())
+ return Diag(Loc, diag::err_address_space_qualified_new)
+ << AllocType.getUnqualifiedType() << AddressSpace;
+ else if (getLangOpts().ObjCAutoRefCount) {
+ if (const ArrayType *AT = Context.getAsArrayType(AllocType)) {
+ QualType BaseAllocType = Context.getBaseElementType(AT);
+ if (BaseAllocType.getObjCLifetime() == Qualifiers::OCL_None &&
+ BaseAllocType->isObjCLifetimeType())
+ return Diag(Loc, diag::err_arc_new_array_without_ownership)
+ << BaseAllocType;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Determine whether the given function is a non-placement
+/// deallocation function.
+static bool isNonPlacementDeallocationFunction(FunctionDecl *FD) {
+ if (FD->isInvalidDecl())
+ return false;
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FD))
+ return Method->isUsualDeallocationFunction();
+
+ return ((FD->getOverloadedOperator() == OO_Delete ||
+ FD->getOverloadedOperator() == OO_Array_Delete) &&
+ FD->getNumParams() == 1);
+}
+
+/// FindAllocationFunctions - Finds the overloads of operator new and delete
+/// that are appropriate for the allocation.
+bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
+ bool UseGlobal, QualType AllocType,
+ bool IsArray, Expr **PlaceArgs,
+ unsigned NumPlaceArgs,
+ FunctionDecl *&OperatorNew,
+ FunctionDecl *&OperatorDelete) {
+ // --- Choosing an allocation function ---
+ // C++ 5.3.4p8 - 14 & 18
+ // 1) If UseGlobal is true, only look in the global scope. Else, also look
+ // in the scope of the allocated class.
+ // 2) If an array size is given, look for operator new[], else look for
+ // operator new.
+ // 3) The first argument is always size_t. Append the arguments from the
+ // placement form.
+
+ SmallVector<Expr*, 8> AllocArgs(1 + NumPlaceArgs);
+ // We don't care about the actual value of this argument.
+ // FIXME: Should the Sema create the expression and embed it in the syntax
+ // tree? Or should the consumer just recalculate the value?
+ IntegerLiteral Size(Context, llvm::APInt::getNullValue(
+ Context.getTargetInfo().getPointerWidth(0)),
+ Context.getSizeType(),
+ SourceLocation());
+ AllocArgs[0] = &Size;
+ std::copy(PlaceArgs, PlaceArgs + NumPlaceArgs, AllocArgs.begin() + 1);
+
+ // C++ [expr.new]p8:
+ // If the allocated type is a non-array type, the allocation
+ // function's name is operator new and the deallocation function's
+ // name is operator delete. If the allocated type is an array
+ // type, the allocation function's name is operator new[] and the
+ // deallocation function's name is operator delete[].
+ DeclarationName NewName = Context.DeclarationNames.getCXXOperatorName(
+ IsArray ? OO_Array_New : OO_New);
+ DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
+ IsArray ? OO_Array_Delete : OO_Delete);
+
+ QualType AllocElemType = Context.getBaseElementType(AllocType);
+
+ if (AllocElemType->isRecordType() && !UseGlobal) {
+ CXXRecordDecl *Record
+ = cast<CXXRecordDecl>(AllocElemType->getAs<RecordType>()->getDecl());
+ if (FindAllocationOverload(StartLoc, Range, NewName, &AllocArgs[0],
+ AllocArgs.size(), Record, /*AllowMissing=*/true,
+ OperatorNew))
+ return true;
+ }
+ if (!OperatorNew) {
+ // Didn't find a member overload. Look for a global one.
+ DeclareGlobalNewDelete();
+ DeclContext *TUDecl = Context.getTranslationUnitDecl();
+ if (FindAllocationOverload(StartLoc, Range, NewName, &AllocArgs[0],
+ AllocArgs.size(), TUDecl, /*AllowMissing=*/false,
+ OperatorNew))
+ return true;
+ }
+
+ // We don't need an operator delete if we're running under
+ // -fno-exceptions.
+ if (!getLangOpts().Exceptions) {
+ OperatorDelete = 0;
+ return false;
+ }
+
+ // FindAllocationOverload can change the passed in arguments, so we need to
+ // copy them back.
+ if (NumPlaceArgs > 0)
+ std::copy(&AllocArgs[1], AllocArgs.end(), PlaceArgs);
+
+ // C++ [expr.new]p19:
+ //
+ // If the new-expression begins with a unary :: operator, the
+ // deallocation function's name is looked up in the global
+ // scope. Otherwise, if the allocated type is a class type T or an
+ // array thereof, the deallocation function's name is looked up in
+ // the scope of T. If this lookup fails to find the name, or if
+ // the allocated type is not a class type or array thereof, the
+ // deallocation function's name is looked up in the global scope.
+ LookupResult FoundDelete(*this, DeleteName, StartLoc, LookupOrdinaryName);
+ if (AllocElemType->isRecordType() && !UseGlobal) {
+ CXXRecordDecl *RD
+ = cast<CXXRecordDecl>(AllocElemType->getAs<RecordType>()->getDecl());
+ LookupQualifiedName(FoundDelete, RD);
+ }
+ if (FoundDelete.isAmbiguous())
+ return true; // FIXME: clean up expressions?
+
+ if (FoundDelete.empty()) {
+ DeclareGlobalNewDelete();
+ LookupQualifiedName(FoundDelete, Context.getTranslationUnitDecl());
+ }
+
+ FoundDelete.suppressDiagnostics();
+
+ SmallVector<std::pair<DeclAccessPair,FunctionDecl*>, 2> Matches;
+
+ // Whether we're looking for a placement operator delete is dictated
+ // by whether we selected a placement operator new, not by whether
+ // we had explicit placement arguments. This matters for things like
+ // struct A { void *operator new(size_t, int = 0); ... };
+ // A *a = new A()
+ bool isPlacementNew = (NumPlaceArgs > 0 || OperatorNew->param_size() != 1);
+
+ if (isPlacementNew) {
+ // C++ [expr.new]p20:
+ // A declaration of a placement deallocation function matches the
+ // declaration of a placement allocation function if it has the
+ // same number of parameters and, after parameter transformations
+ // (8.3.5), all parameter types except the first are
+ // identical. [...]
+ //
+ // To perform this comparison, we compute the function type that
+ // the deallocation function should have, and use that type both
+ // for template argument deduction and for comparison purposes.
+ //
+ // FIXME: this comparison should ignore CC and the like.
+ QualType ExpectedFunctionType;
+ {
+ const FunctionProtoType *Proto
+ = OperatorNew->getType()->getAs<FunctionProtoType>();
+
+ SmallVector<QualType, 4> ArgTypes;
+ ArgTypes.push_back(Context.VoidPtrTy);
+ for (unsigned I = 1, N = Proto->getNumArgs(); I < N; ++I)
+ ArgTypes.push_back(Proto->getArgType(I));
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = Proto->isVariadic();
+
+ ExpectedFunctionType
+ = Context.getFunctionType(Context.VoidTy, ArgTypes.data(),
+ ArgTypes.size(), EPI);
+ }
+
+ for (LookupResult::iterator D = FoundDelete.begin(),
+ DEnd = FoundDelete.end();
+ D != DEnd; ++D) {
+ FunctionDecl *Fn = 0;
+ if (FunctionTemplateDecl *FnTmpl
+ = dyn_cast<FunctionTemplateDecl>((*D)->getUnderlyingDecl())) {
+ // Perform template argument deduction to try to match the
+ // expected function type.
+ TemplateDeductionInfo Info(Context, StartLoc);
+ if (DeduceTemplateArguments(FnTmpl, 0, ExpectedFunctionType, Fn, Info))
+ continue;
+ } else
+ Fn = cast<FunctionDecl>((*D)->getUnderlyingDecl());
+
+ if (Context.hasSameType(Fn->getType(), ExpectedFunctionType))
+ Matches.push_back(std::make_pair(D.getPair(), Fn));
+ }
+ } else {
+ // C++ [expr.new]p20:
+ // [...] Any non-placement deallocation function matches a
+ // non-placement allocation function. [...]
+ for (LookupResult::iterator D = FoundDelete.begin(),
+ DEnd = FoundDelete.end();
+ D != DEnd; ++D) {
+ if (FunctionDecl *Fn = dyn_cast<FunctionDecl>((*D)->getUnderlyingDecl()))
+ if (isNonPlacementDeallocationFunction(Fn))
+ Matches.push_back(std::make_pair(D.getPair(), Fn));
+ }
+ }
+
+ // C++ [expr.new]p20:
+ // [...] If the lookup finds a single matching deallocation
+ // function, that function will be called; otherwise, no
+ // deallocation function will be called.
+ if (Matches.size() == 1) {
+ OperatorDelete = Matches[0].second;
+
+ // C++0x [expr.new]p20:
+ // If the lookup finds the two-parameter form of a usual
+ // deallocation function (3.7.4.2) and that function, considered
+ // as a placement deallocation function, would have been
+ // selected as a match for the allocation function, the program
+ // is ill-formed.
+ if (NumPlaceArgs && getLangOpts().CPlusPlus0x &&
+ isNonPlacementDeallocationFunction(OperatorDelete)) {
+ Diag(StartLoc, diag::err_placement_new_non_placement_delete)
+ << SourceRange(PlaceArgs[0]->getLocStart(),
+ PlaceArgs[NumPlaceArgs - 1]->getLocEnd());
+ Diag(OperatorDelete->getLocation(), diag::note_previous_decl)
+ << DeleteName;
+ } else {
+ CheckAllocationAccess(StartLoc, Range, FoundDelete.getNamingClass(),
+ Matches[0].first);
+ }
+ }
+
+ return false;
+}
+
+/// FindAllocationOverload - Find an fitting overload for the allocation
+/// function in the specified scope.
+bool Sema::FindAllocationOverload(SourceLocation StartLoc, SourceRange Range,
+ DeclarationName Name, Expr** Args,
+ unsigned NumArgs, DeclContext *Ctx,
+ bool AllowMissing, FunctionDecl *&Operator,
+ bool Diagnose) {
+ LookupResult R(*this, Name, StartLoc, LookupOrdinaryName);
+ LookupQualifiedName(R, Ctx);
+ if (R.empty()) {
+ if (AllowMissing || !Diagnose)
+ return false;
+ return Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
+ << Name << Range;
+ }
+
+ if (R.isAmbiguous())
+ return true;
+
+ R.suppressDiagnostics();
+
+ OverloadCandidateSet Candidates(StartLoc);
+ for (LookupResult::iterator Alloc = R.begin(), AllocEnd = R.end();
+ Alloc != AllocEnd; ++Alloc) {
+ // Even member operator new/delete are implicitly treated as
+ // static, so don't use AddMemberCandidate.
+ NamedDecl *D = (*Alloc)->getUnderlyingDecl();
+
+ if (FunctionTemplateDecl *FnTemplate = dyn_cast<FunctionTemplateDecl>(D)) {
+ AddTemplateOverloadCandidate(FnTemplate, Alloc.getPair(),
+ /*ExplicitTemplateArgs=*/0,
+ llvm::makeArrayRef(Args, NumArgs),
+ Candidates,
+ /*SuppressUserConversions=*/false);
+ continue;
+ }
+
+ FunctionDecl *Fn = cast<FunctionDecl>(D);
+ AddOverloadCandidate(Fn, Alloc.getPair(),
+ llvm::makeArrayRef(Args, NumArgs), Candidates,
+ /*SuppressUserConversions=*/false);
+ }
+
+ // Do the resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (Candidates.BestViableFunction(*this, StartLoc, Best)) {
+ case OR_Success: {
+ // Got one!
+ FunctionDecl *FnDecl = Best->Function;
+ MarkFunctionReferenced(StartLoc, FnDecl);
+ // The first argument is size_t, and the first parameter must be size_t,
+ // too. This is checked on declaration and can be assumed. (It can't be
+ // asserted on, though, since invalid decls are left in there.)
+ // Watch out for variadic allocator function.
+ unsigned NumArgsInFnDecl = FnDecl->getNumParams();
+ for (unsigned i = 0; (i < NumArgs && i < NumArgsInFnDecl); ++i) {
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(i));
+
+ if (!Diagnose && !CanPerformCopyInitialization(Entity, Owned(Args[i])))
+ return true;
+
+ ExprResult Result
+ = PerformCopyInitialization(Entity, SourceLocation(), Owned(Args[i]));
+ if (Result.isInvalid())
+ return true;
+
+ Args[i] = Result.takeAs<Expr>();
+ }
+
+ Operator = FnDecl;
+
+ if (CheckAllocationAccess(StartLoc, Range, R.getNamingClass(),
+ Best->FoundDecl, Diagnose) == AR_inaccessible)
+ return true;
+
+ return false;
+ }
+
+ case OR_No_Viable_Function:
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_ovl_no_viable_function_in_call)
+ << Name << Range;
+ Candidates.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ }
+ return true;
+
+ case OR_Ambiguous:
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_ovl_ambiguous_call)
+ << Name << Range;
+ Candidates.NoteCandidates(*this, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ }
+ return true;
+
+ case OR_Deleted: {
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_ovl_deleted_call)
+ << Best->Function->isDeleted()
+ << Name
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Range;
+ Candidates.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ }
+ return true;
+ }
+ }
+ llvm_unreachable("Unreachable, bad result from BestViableFunction");
+}
+
+
+/// DeclareGlobalNewDelete - Declare the global forms of operator new and
+/// delete. These are:
+/// @code
+/// // C++03:
+/// void* operator new(std::size_t) throw(std::bad_alloc);
+/// void* operator new[](std::size_t) throw(std::bad_alloc);
+/// void operator delete(void *) throw();
+/// void operator delete[](void *) throw();
+/// // C++0x:
+/// void* operator new(std::size_t);
+/// void* operator new[](std::size_t);
+/// void operator delete(void *);
+/// void operator delete[](void *);
+/// @endcode
+/// C++0x operator delete is implicitly noexcept.
+/// Note that the placement and nothrow forms of new are *not* implicitly
+/// declared. Their use requires including \<new\>.
+void Sema::DeclareGlobalNewDelete() {
+ if (GlobalNewDeleteDeclared)
+ return;
+
+ // C++ [basic.std.dynamic]p2:
+ // [...] The following allocation and deallocation functions (18.4) are
+ // implicitly declared in global scope in each translation unit of a
+ // program
+ //
+ // C++03:
+ // void* operator new(std::size_t) throw(std::bad_alloc);
+ // void* operator new[](std::size_t) throw(std::bad_alloc);
+ // void operator delete(void*) throw();
+ // void operator delete[](void*) throw();
+ // C++0x:
+ // void* operator new(std::size_t);
+ // void* operator new[](std::size_t);
+ // void operator delete(void*);
+ // void operator delete[](void*);
+ //
+ // These implicit declarations introduce only the function names operator
+ // new, operator new[], operator delete, operator delete[].
+ //
+ // Here, we need to refer to std::bad_alloc, so we will implicitly declare
+ // "std" or "bad_alloc" as necessary to form the exception specification.
+ // However, we do not make these implicit declarations visible to name
+ // lookup.
+ // Note that the C++0x versions of operator delete are deallocation functions,
+ // and thus are implicitly noexcept.
+ if (!StdBadAlloc && !getLangOpts().CPlusPlus0x) {
+ // The "std::bad_alloc" class has not yet been declared, so build it
+ // implicitly.
+ StdBadAlloc = CXXRecordDecl::Create(Context, TTK_Class,
+ getOrCreateStdNamespace(),
+ SourceLocation(), SourceLocation(),
+ &PP.getIdentifierTable().get("bad_alloc"),
+ 0);
+ getStdBadAlloc()->setImplicit(true);
+ }
+
+ GlobalNewDeleteDeclared = true;
+
+ QualType VoidPtr = Context.getPointerType(Context.VoidTy);
+ QualType SizeT = Context.getSizeType();
+ bool AssumeSaneOperatorNew = getLangOpts().AssumeSaneOperatorNew;
+
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_New),
+ VoidPtr, SizeT, AssumeSaneOperatorNew);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Array_New),
+ VoidPtr, SizeT, AssumeSaneOperatorNew);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Delete),
+ Context.VoidTy, VoidPtr);
+ DeclareGlobalAllocationFunction(
+ Context.DeclarationNames.getCXXOperatorName(OO_Array_Delete),
+ Context.VoidTy, VoidPtr);
+}
+
+/// DeclareGlobalAllocationFunction - Declares a single implicit global
+/// allocation function if it doesn't already exist.
+void Sema::DeclareGlobalAllocationFunction(DeclarationName Name,
+ QualType Return, QualType Argument,
+ bool AddMallocAttr) {
+ DeclContext *GlobalCtx = Context.getTranslationUnitDecl();
+
+ // Check if this function is already declared.
+ {
+ DeclContext::lookup_iterator Alloc, AllocEnd;
+ for (llvm::tie(Alloc, AllocEnd) = GlobalCtx->lookup(Name);
+ Alloc != AllocEnd; ++Alloc) {
+ // Only look at non-template functions, as it is the predefined,
+ // non-templated allocation function we are trying to declare here.
+ if (FunctionDecl *Func = dyn_cast<FunctionDecl>(*Alloc)) {
+ QualType InitialParamType =
+ Context.getCanonicalType(
+ Func->getParamDecl(0)->getType().getUnqualifiedType());
+ // FIXME: Do we need to check for default arguments here?
+ if (Func->getNumParams() == 1 && InitialParamType == Argument) {
+ if(AddMallocAttr && !Func->hasAttr<MallocAttr>())
+ Func->addAttr(::new (Context) MallocAttr(SourceLocation(), Context));
+ return;
+ }
+ }
+ }
+ }
+
+ QualType BadAllocType;
+ bool HasBadAllocExceptionSpec
+ = (Name.getCXXOverloadedOperator() == OO_New ||
+ Name.getCXXOverloadedOperator() == OO_Array_New);
+ if (HasBadAllocExceptionSpec && !getLangOpts().CPlusPlus0x) {
+ assert(StdBadAlloc && "Must have std::bad_alloc declared");
+ BadAllocType = Context.getTypeDeclType(getStdBadAlloc());
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ if (HasBadAllocExceptionSpec) {
+ if (!getLangOpts().CPlusPlus0x) {
+ EPI.ExceptionSpecType = EST_Dynamic;
+ EPI.NumExceptions = 1;
+ EPI.Exceptions = &BadAllocType;
+ }
+ } else {
+ EPI.ExceptionSpecType = getLangOpts().CPlusPlus0x ?
+ EST_BasicNoexcept : EST_DynamicNone;
+ }
+
+ QualType FnType = Context.getFunctionType(Return, &Argument, 1, EPI);
+ FunctionDecl *Alloc =
+ FunctionDecl::Create(Context, GlobalCtx, SourceLocation(),
+ SourceLocation(), Name,
+ FnType, /*TInfo=*/0, SC_None,
+ SC_None, false, true);
+ Alloc->setImplicit();
+
+ if (AddMallocAttr)
+ Alloc->addAttr(::new (Context) MallocAttr(SourceLocation(), Context));
+
+ ParmVarDecl *Param = ParmVarDecl::Create(Context, Alloc, SourceLocation(),
+ SourceLocation(), 0,
+ Argument, /*TInfo=*/0,
+ SC_None, SC_None, 0);
+ Alloc->setParams(Param);
+
+ // FIXME: Also add this declaration to the IdentifierResolver, but
+ // make sure it is at the end of the chain to coincide with the
+ // global scope.
+ Context.getTranslationUnitDecl()->addDecl(Alloc);
+}
+
+bool Sema::FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
+ DeclarationName Name,
+ FunctionDecl* &Operator, bool Diagnose) {
+ LookupResult Found(*this, Name, StartLoc, LookupOrdinaryName);
+ // Try to find operator delete/operator delete[] in class scope.
+ LookupQualifiedName(Found, RD);
+
+ if (Found.isAmbiguous())
+ return true;
+
+ Found.suppressDiagnostics();
+
+ SmallVector<DeclAccessPair,4> Matches;
+ for (LookupResult::iterator F = Found.begin(), FEnd = Found.end();
+ F != FEnd; ++F) {
+ NamedDecl *ND = (*F)->getUnderlyingDecl();
+
+ // Ignore template operator delete members from the check for a usual
+ // deallocation function.
+ if (isa<FunctionTemplateDecl>(ND))
+ continue;
+
+ if (cast<CXXMethodDecl>(ND)->isUsualDeallocationFunction())
+ Matches.push_back(F.getPair());
+ }
+
+ // There's exactly one suitable operator; pick it.
+ if (Matches.size() == 1) {
+ Operator = cast<CXXMethodDecl>(Matches[0]->getUnderlyingDecl());
+
+ if (Operator->isDeleted()) {
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_deleted_function_use);
+ NoteDeletedFunction(Operator);
+ }
+ return true;
+ }
+
+ if (CheckAllocationAccess(StartLoc, SourceRange(), Found.getNamingClass(),
+ Matches[0], Diagnose) == AR_inaccessible)
+ return true;
+
+ return false;
+
+ // We found multiple suitable operators; complain about the ambiguity.
+ } else if (!Matches.empty()) {
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_ambiguous_suitable_delete_member_function_found)
+ << Name << RD;
+
+ for (SmallVectorImpl<DeclAccessPair>::iterator
+ F = Matches.begin(), FEnd = Matches.end(); F != FEnd; ++F)
+ Diag((*F)->getUnderlyingDecl()->getLocation(),
+ diag::note_member_declared_here) << Name;
+ }
+ return true;
+ }
+
+ // We did find operator delete/operator delete[] declarations, but
+ // none of them were suitable.
+ if (!Found.empty()) {
+ if (Diagnose) {
+ Diag(StartLoc, diag::err_no_suitable_delete_member_function_found)
+ << Name << RD;
+
+ for (LookupResult::iterator F = Found.begin(), FEnd = Found.end();
+ F != FEnd; ++F)
+ Diag((*F)->getUnderlyingDecl()->getLocation(),
+ diag::note_member_declared_here) << Name;
+ }
+ return true;
+ }
+
+ // Look for a global declaration.
+ DeclareGlobalNewDelete();
+ DeclContext *TUDecl = Context.getTranslationUnitDecl();
+
+ CXXNullPtrLiteralExpr Null(Context.VoidPtrTy, SourceLocation());
+ Expr* DeallocArgs[1];
+ DeallocArgs[0] = &Null;
+ if (FindAllocationOverload(StartLoc, SourceRange(), Name,
+ DeallocArgs, 1, TUDecl, !Diagnose,
+ Operator, Diagnose))
+ return true;
+
+ assert(Operator && "Did not find a deallocation function!");
+ return false;
+}
+
+/// ActOnCXXDelete - Parsed a C++ 'delete' expression (C++ 5.3.5), as in:
+/// @code ::delete ptr; @endcode
+/// or
+/// @code delete [] ptr; @endcode
+ExprResult
+Sema::ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal,
+ bool ArrayForm, Expr *ExE) {
+ // C++ [expr.delete]p1:
+ // The operand shall have a pointer type, or a class type having a single
+ // conversion function to a pointer type. The result has type void.
+ //
+ // DR599 amends "pointer type" to "pointer to object type" in both cases.
+
+ ExprResult Ex = Owned(ExE);
+ FunctionDecl *OperatorDelete = 0;
+ bool ArrayFormAsWritten = ArrayForm;
+ bool UsualArrayDeleteWantsSize = false;
+
+ if (!Ex.get()->isTypeDependent()) {
+ // Perform lvalue-to-rvalue cast, if needed.
+ Ex = DefaultLvalueConversion(Ex.take());
+
+ QualType Type = Ex.get()->getType();
+
+ if (const RecordType *Record = Type->getAs<RecordType>()) {
+ if (RequireCompleteType(StartLoc, Type,
+ PDiag(diag::err_delete_incomplete_class_type)))
+ return ExprError();
+
+ SmallVector<CXXConversionDecl*, 4> ObjectPtrConversions;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ const UnresolvedSetImpl *Conversions = RD->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ NamedDecl *D = I.getDecl();
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ // Skip over templated conversion functions; they aren't considered.
+ if (isa<FunctionTemplateDecl>(D))
+ continue;
+
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
+
+ QualType ConvType = Conv->getConversionType().getNonReferenceType();
+ if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
+ if (ConvPtrType->getPointeeType()->isIncompleteOrObjectType())
+ ObjectPtrConversions.push_back(Conv);
+ }
+ if (ObjectPtrConversions.size() == 1) {
+ // We have a single conversion to a pointer-to-object type. Perform
+ // that conversion.
+ // TODO: don't redo the conversion calculation.
+ ExprResult Res =
+ PerformImplicitConversion(Ex.get(),
+ ObjectPtrConversions.front()->getConversionType(),
+ AA_Converting);
+ if (Res.isUsable()) {
+ Ex = move(Res);
+ Type = Ex.get()->getType();
+ }
+ }
+ else if (ObjectPtrConversions.size() > 1) {
+ Diag(StartLoc, diag::err_ambiguous_delete_operand)
+ << Type << Ex.get()->getSourceRange();
+ for (unsigned i= 0; i < ObjectPtrConversions.size(); i++)
+ NoteOverloadCandidate(ObjectPtrConversions[i]);
+ return ExprError();
+ }
+ }
+
+ if (!Type->isPointerType())
+ return ExprError(Diag(StartLoc, diag::err_delete_operand)
+ << Type << Ex.get()->getSourceRange());
+
+ QualType Pointee = Type->getAs<PointerType>()->getPointeeType();
+ QualType PointeeElem = Context.getBaseElementType(Pointee);
+
+ if (unsigned AddressSpace = Pointee.getAddressSpace())
+ return Diag(Ex.get()->getLocStart(),
+ diag::err_address_space_qualified_delete)
+ << Pointee.getUnqualifiedType() << AddressSpace;
+
+ CXXRecordDecl *PointeeRD = 0;
+ if (Pointee->isVoidType() && !isSFINAEContext()) {
+ // The C++ standard bans deleting a pointer to a non-object type, which
+ // effectively bans deletion of "void*". However, most compilers support
+ // this, so we treat it as a warning unless we're in a SFINAE context.
+ Diag(StartLoc, diag::ext_delete_void_ptr_operand)
+ << Type << Ex.get()->getSourceRange();
+ } else if (Pointee->isFunctionType() || Pointee->isVoidType()) {
+ return ExprError(Diag(StartLoc, diag::err_delete_operand)
+ << Type << Ex.get()->getSourceRange());
+ } else if (!Pointee->isDependentType()) {
+ if (!RequireCompleteType(StartLoc, Pointee,
+ PDiag(diag::warn_delete_incomplete)
+ << Ex.get()->getSourceRange())) {
+ if (const RecordType *RT = PointeeElem->getAs<RecordType>())
+ PointeeRD = cast<CXXRecordDecl>(RT->getDecl());
+ }
+ }
+
+ // C++ [expr.delete]p2:
+ // [Note: a pointer to a const type can be the operand of a
+ // delete-expression; it is not necessary to cast away the constness
+ // (5.2.11) of the pointer expression before it is used as the operand
+ // of the delete-expression. ]
+ if (!Context.hasSameType(Ex.get()->getType(), Context.VoidPtrTy))
+ Ex = Owned(ImplicitCastExpr::Create(Context, Context.VoidPtrTy,
+ CK_BitCast, Ex.take(), 0, VK_RValue));
+
+ if (Pointee->isArrayType() && !ArrayForm) {
+ Diag(StartLoc, diag::warn_delete_array_type)
+ << Type << Ex.get()->getSourceRange()
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(StartLoc), "[]");
+ ArrayForm = true;
+ }
+
+ DeclarationName DeleteName = Context.DeclarationNames.getCXXOperatorName(
+ ArrayForm ? OO_Array_Delete : OO_Delete);
+
+ if (PointeeRD) {
+ if (!UseGlobal &&
+ FindDeallocationFunction(StartLoc, PointeeRD, DeleteName,
+ OperatorDelete))
+ return ExprError();
+
+ // If we're allocating an array of records, check whether the
+ // usual operator delete[] has a size_t parameter.
+ if (ArrayForm) {
+ // If the user specifically asked to use the global allocator,
+ // we'll need to do the lookup into the class.
+ if (UseGlobal)
+ UsualArrayDeleteWantsSize =
+ doesUsualArrayDeleteWantSize(*this, StartLoc, PointeeElem);
+
+ // Otherwise, the usual operator delete[] should be the
+ // function we just found.
+ else if (isa<CXXMethodDecl>(OperatorDelete))
+ UsualArrayDeleteWantsSize = (OperatorDelete->getNumParams() == 2);
+ }
+
+ if (!PointeeRD->hasIrrelevantDestructor())
+ if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
+ MarkFunctionReferenced(StartLoc,
+ const_cast<CXXDestructorDecl*>(Dtor));
+ DiagnoseUseOfDecl(Dtor, StartLoc);
+ }
+
+ // C++ [expr.delete]p3:
+ // In the first alternative (delete object), if the static type of the
+ // object to be deleted is different from its dynamic type, the static
+ // type shall be a base class of the dynamic type of the object to be
+ // deleted and the static type shall have a virtual destructor or the
+ // behavior is undefined.
+ //
+ // Note: a final class cannot be derived from, no issue there
+ if (PointeeRD->isPolymorphic() && !PointeeRD->hasAttr<FinalAttr>()) {
+ CXXDestructorDecl *dtor = PointeeRD->getDestructor();
+ if (dtor && !dtor->isVirtual()) {
+ if (PointeeRD->isAbstract()) {
+ // If the class is abstract, we warn by default, because we're
+ // sure the code has undefined behavior.
+ Diag(StartLoc, diag::warn_delete_abstract_non_virtual_dtor)
+ << PointeeElem;
+ } else if (!ArrayForm) {
+ // Otherwise, if this is not an array delete, it's a bit suspect,
+ // but not necessarily wrong.
+ Diag(StartLoc, diag::warn_delete_non_virtual_dtor) << PointeeElem;
+ }
+ }
+ }
+
+ } else if (getLangOpts().ObjCAutoRefCount &&
+ PointeeElem->isObjCLifetimeType() &&
+ (PointeeElem.getObjCLifetime() == Qualifiers::OCL_Strong ||
+ PointeeElem.getObjCLifetime() == Qualifiers::OCL_Weak) &&
+ ArrayForm) {
+ Diag(StartLoc, diag::warn_err_new_delete_object_array)
+ << 1 << PointeeElem;
+ }
+
+ if (!OperatorDelete) {
+ // Look for a global declaration.
+ DeclareGlobalNewDelete();
+ DeclContext *TUDecl = Context.getTranslationUnitDecl();
+ Expr *Arg = Ex.get();
+ if (FindAllocationOverload(StartLoc, SourceRange(), DeleteName,
+ &Arg, 1, TUDecl, /*AllowMissing=*/false,
+ OperatorDelete))
+ return ExprError();
+ }
+
+ MarkFunctionReferenced(StartLoc, OperatorDelete);
+
+ // Check access and ambiguity of operator delete and destructor.
+ if (PointeeRD) {
+ if (CXXDestructorDecl *Dtor = LookupDestructor(PointeeRD)) {
+ CheckDestructorAccess(Ex.get()->getExprLoc(), Dtor,
+ PDiag(diag::err_access_dtor) << PointeeElem);
+ }
+ }
+
+ }
+
+ return Owned(new (Context) CXXDeleteExpr(Context.VoidTy, UseGlobal, ArrayForm,
+ ArrayFormAsWritten,
+ UsualArrayDeleteWantsSize,
+ OperatorDelete, Ex.take(), StartLoc));
+}
+
+/// \brief Check the use of the given variable as a C++ condition in an if,
+/// while, do-while, or switch statement.
+ExprResult Sema::CheckConditionVariable(VarDecl *ConditionVar,
+ SourceLocation StmtLoc,
+ bool ConvertToBoolean) {
+ QualType T = ConditionVar->getType();
+
+ // C++ [stmt.select]p2:
+ // The declarator shall not specify a function or an array.
+ if (T->isFunctionType())
+ return ExprError(Diag(ConditionVar->getLocation(),
+ diag::err_invalid_use_of_function_type)
+ << ConditionVar->getSourceRange());
+ else if (T->isArrayType())
+ return ExprError(Diag(ConditionVar->getLocation(),
+ diag::err_invalid_use_of_array_type)
+ << ConditionVar->getSourceRange());
+
+ ExprResult Condition =
+ Owned(DeclRefExpr::Create(Context, NestedNameSpecifierLoc(),
+ SourceLocation(),
+ ConditionVar,
+ /*enclosing*/ false,
+ ConditionVar->getLocation(),
+ ConditionVar->getType().getNonReferenceType(),
+ VK_LValue));
+
+ MarkDeclRefReferenced(cast<DeclRefExpr>(Condition.get()));
+
+ if (ConvertToBoolean) {
+ Condition = CheckBooleanCondition(Condition.take(), StmtLoc);
+ if (Condition.isInvalid())
+ return ExprError();
+ }
+
+ return move(Condition);
+}
+
+/// CheckCXXBooleanCondition - Returns true if a conversion to bool is invalid.
+ExprResult Sema::CheckCXXBooleanCondition(Expr *CondExpr) {
+ // C++ 6.4p4:
+ // The value of a condition that is an initialized declaration in a statement
+ // other than a switch statement is the value of the declared variable
+ // implicitly converted to type bool. If that conversion is ill-formed, the
+ // program is ill-formed.
+ // The value of a condition that is an expression is the value of the
+ // expression, implicitly converted to bool.
+ //
+ return PerformContextuallyConvertToBool(CondExpr);
+}
+
+/// Helper function to determine whether this is the (deprecated) C++
+/// conversion from a string literal to a pointer to non-const char or
+/// non-const wchar_t (for narrow and wide string literals,
+/// respectively).
+bool
+Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
+ // Look inside the implicit cast, if it exists.
+ if (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(From))
+ From = Cast->getSubExpr();
+
+ // A string literal (2.13.4) that is not a wide string literal can
+ // be converted to an rvalue of type "pointer to char"; a wide
+ // string literal can be converted to an rvalue of type "pointer
+ // to wchar_t" (C++ 4.2p2).
+ if (StringLiteral *StrLit = dyn_cast<StringLiteral>(From->IgnoreParens()))
+ if (const PointerType *ToPtrType = ToType->getAs<PointerType>())
+ if (const BuiltinType *ToPointeeType
+ = ToPtrType->getPointeeType()->getAs<BuiltinType>()) {
+ // This conversion is considered only when there is an
+ // explicit appropriate pointer target type (C++ 4.2p2).
+ if (!ToPtrType->getPointeeType().hasQualifiers()) {
+ switch (StrLit->getKind()) {
+ case StringLiteral::UTF8:
+ case StringLiteral::UTF16:
+ case StringLiteral::UTF32:
+ // We don't allow UTF literals to be implicitly converted
+ break;
+ case StringLiteral::Ascii:
+ return (ToPointeeType->getKind() == BuiltinType::Char_U ||
+ ToPointeeType->getKind() == BuiltinType::Char_S);
+ case StringLiteral::Wide:
+ return ToPointeeType->isWideCharType();
+ }
+ }
+ }
+
+ return false;
+}
+
+static ExprResult BuildCXXCastArgument(Sema &S,
+ SourceLocation CastLoc,
+ QualType Ty,
+ CastKind Kind,
+ CXXMethodDecl *Method,
+ DeclAccessPair FoundDecl,
+ bool HadMultipleCandidates,
+ Expr *From) {
+ switch (Kind) {
+ default: llvm_unreachable("Unhandled cast kind!");
+ case CK_ConstructorConversion: {
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Method);
+ ASTOwningVector<Expr*> ConstructorArgs(S);
+
+ if (S.CompleteConstructorCall(Constructor,
+ MultiExprArg(&From, 1),
+ CastLoc, ConstructorArgs))
+ return ExprError();
+
+ S.CheckConstructorAccess(CastLoc, Constructor,
+ InitializedEntity::InitializeTemporary(Ty),
+ Constructor->getAccess());
+
+ ExprResult Result
+ = S.BuildCXXConstructExpr(CastLoc, Ty, cast<CXXConstructorDecl>(Method),
+ move_arg(ConstructorArgs),
+ HadMultipleCandidates, /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete, SourceRange());
+ if (Result.isInvalid())
+ return ExprError();
+
+ return S.MaybeBindToTemporary(Result.takeAs<Expr>());
+ }
+
+ case CK_UserDefinedConversion: {
+ assert(!From->getType()->isPointerType() && "Arg can't have pointer type!");
+
+ // Create an implicit call expr that calls it.
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(Method);
+ ExprResult Result = S.BuildCXXMemberCallExpr(From, FoundDecl, Conv,
+ HadMultipleCandidates);
+ if (Result.isInvalid())
+ return ExprError();
+ // Record usage of conversion in an implicit cast.
+ Result = S.Owned(ImplicitCastExpr::Create(S.Context,
+ Result.get()->getType(),
+ CK_UserDefinedConversion,
+ Result.get(), 0,
+ Result.get()->getValueKind()));
+
+ S.CheckMemberOperatorAccess(CastLoc, From, /*arg*/ 0, FoundDecl);
+
+ return S.MaybeBindToTemporary(Result.get());
+ }
+ }
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType using the pre-computed implicit
+/// conversion sequence ICS. Returns the converted
+/// expression. Action is the kind of conversion we're performing,
+/// used in the error message.
+ExprResult
+Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ const ImplicitConversionSequence &ICS,
+ AssignmentAction Action,
+ CheckedConversionKind CCK) {
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion: {
+ ExprResult Res = PerformImplicitConversion(From, ToType, ICS.Standard,
+ Action, CCK);
+ if (Res.isInvalid())
+ return ExprError();
+ From = Res.take();
+ break;
+ }
+
+ case ImplicitConversionSequence::UserDefinedConversion: {
+
+ FunctionDecl *FD = ICS.UserDefined.ConversionFunction;
+ CastKind CastKind;
+ QualType BeforeToType;
+ assert(FD && "FIXME: aggregate initialization from init list");
+ if (const CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(FD)) {
+ CastKind = CK_UserDefinedConversion;
+
+ // If the user-defined conversion is specified by a conversion function,
+ // the initial standard conversion sequence converts the source type to
+ // the implicit object parameter of the conversion function.
+ BeforeToType = Context.getTagDeclType(Conv->getParent());
+ } else {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(FD);
+ CastKind = CK_ConstructorConversion;
+ // Do no conversion if dealing with ... for the first conversion.
+ if (!ICS.UserDefined.EllipsisConversion) {
+ // If the user-defined conversion is specified by a constructor, the
+ // initial standard conversion sequence converts the source type to the
+ // type required by the argument of the constructor
+ BeforeToType = Ctor->getParamDecl(0)->getType().getNonReferenceType();
+ }
+ }
+ // Watch out for elipsis conversion.
+ if (!ICS.UserDefined.EllipsisConversion) {
+ ExprResult Res =
+ PerformImplicitConversion(From, BeforeToType,
+ ICS.UserDefined.Before, AA_Converting,
+ CCK);
+ if (Res.isInvalid())
+ return ExprError();
+ From = Res.take();
+ }
+
+ ExprResult CastArg
+ = BuildCXXCastArgument(*this,
+ From->getLocStart(),
+ ToType.getNonReferenceType(),
+ CastKind, cast<CXXMethodDecl>(FD),
+ ICS.UserDefined.FoundConversionFunction,
+ ICS.UserDefined.HadMultipleCandidates,
+ From);
+
+ if (CastArg.isInvalid())
+ return ExprError();
+
+ From = CastArg.take();
+
+ return PerformImplicitConversion(From, ToType, ICS.UserDefined.After,
+ AA_Converting, CCK);
+ }
+
+ case ImplicitConversionSequence::AmbiguousConversion:
+ ICS.DiagnoseAmbiguousConversion(*this, From->getExprLoc(),
+ PDiag(diag::err_typecheck_ambiguous_condition)
+ << From->getSourceRange());
+ return ExprError();
+
+ case ImplicitConversionSequence::EllipsisConversion:
+ llvm_unreachable("Cannot perform an ellipsis conversion");
+
+ case ImplicitConversionSequence::BadConversion:
+ return ExprError();
+ }
+
+ // Everything went well.
+ return Owned(From);
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType by following the standard
+/// conversion sequence SCS. Returns the converted
+/// expression. Flavor is the context in which we're performing this
+/// conversion, for use in error messages.
+ExprResult
+Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ const StandardConversionSequence& SCS,
+ AssignmentAction Action,
+ CheckedConversionKind CCK) {
+ bool CStyle = (CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast);
+
+ // Overall FIXME: we are recomputing too many types here and doing far too
+ // much extra work. What this means is that we need to keep track of more
+ // information that is computed when we try the implicit conversion initially,
+ // so that we don't need to recompute anything here.
+ QualType FromType = From->getType();
+
+ if (SCS.CopyConstructor) {
+ // FIXME: When can ToType be a reference type?
+ assert(!ToType->isReferenceType());
+ if (SCS.Second == ICK_Derived_To_Base) {
+ ASTOwningVector<Expr*> ConstructorArgs(*this);
+ if (CompleteConstructorCall(cast<CXXConstructorDecl>(SCS.CopyConstructor),
+ MultiExprArg(*this, &From, 1),
+ /*FIXME:ConstructLoc*/SourceLocation(),
+ ConstructorArgs))
+ return ExprError();
+ return BuildCXXConstructExpr(/*FIXME:ConstructLoc*/SourceLocation(),
+ ToType, SCS.CopyConstructor,
+ move_arg(ConstructorArgs),
+ /*HadMultipleCandidates*/ false,
+ /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
+ }
+ return BuildCXXConstructExpr(/*FIXME:ConstructLoc*/SourceLocation(),
+ ToType, SCS.CopyConstructor,
+ MultiExprArg(*this, &From, 1),
+ /*HadMultipleCandidates*/ false,
+ /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
+ }
+
+ // Resolve overloaded function references.
+ if (Context.hasSameType(FromType, Context.OverloadTy)) {
+ DeclAccessPair Found;
+ FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(From, ToType,
+ true, Found);
+ if (!Fn)
+ return ExprError();
+
+ if (DiagnoseUseOfDecl(Fn, From->getLocStart()))
+ return ExprError();
+
+ From = FixOverloadedFunctionReference(From, Found, Fn);
+ FromType = From->getType();
+ }
+
+ // Perform the first implicit conversion.
+ switch (SCS.First) {
+ case ICK_Identity:
+ // Nothing to do.
+ break;
+
+ case ICK_Lvalue_To_Rvalue: {
+ assert(From->getObjectKind() != OK_ObjCProperty);
+ FromType = FromType.getUnqualifiedType();
+ ExprResult FromRes = DefaultLvalueConversion(From);
+ assert(!FromRes.isInvalid() && "Can't perform deduced conversion?!");
+ From = FromRes.take();
+ break;
+ }
+
+ case ICK_Array_To_Pointer:
+ FromType = Context.getArrayDecayedType(FromType);
+ From = ImpCastExprToType(From, FromType, CK_ArrayToPointerDecay,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Function_To_Pointer:
+ FromType = Context.getPointerType(FromType);
+ From = ImpCastExprToType(From, FromType, CK_FunctionToPointerDecay,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ default:
+ llvm_unreachable("Improper first standard conversion");
+ }
+
+ // Perform the second implicit conversion
+ switch (SCS.Second) {
+ case ICK_Identity:
+ // If both sides are functions (or pointers/references to them), there could
+ // be incompatible exception declarations.
+ if (CheckExceptionSpecCompatibility(From, ToType))
+ return ExprError();
+ // Nothing else to do.
+ break;
+
+ case ICK_NoReturn_Adjustment:
+ // If both sides are functions (or pointers/references to them), there could
+ // be incompatible exception declarations.
+ if (CheckExceptionSpecCompatibility(From, ToType))
+ return ExprError();
+
+ From = ImpCastExprToType(From, ToType, CK_NoOp,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Integral_Promotion:
+ case ICK_Integral_Conversion:
+ From = ImpCastExprToType(From, ToType, CK_IntegralCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Floating_Promotion:
+ case ICK_Floating_Conversion:
+ From = ImpCastExprToType(From, ToType, CK_FloatingCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Complex_Promotion:
+ case ICK_Complex_Conversion: {
+ QualType FromEl = From->getType()->getAs<ComplexType>()->getElementType();
+ QualType ToEl = ToType->getAs<ComplexType>()->getElementType();
+ CastKind CK;
+ if (FromEl->isRealFloatingType()) {
+ if (ToEl->isRealFloatingType())
+ CK = CK_FloatingComplexCast;
+ else
+ CK = CK_FloatingComplexToIntegralComplex;
+ } else if (ToEl->isRealFloatingType()) {
+ CK = CK_IntegralComplexToFloatingComplex;
+ } else {
+ CK = CK_IntegralComplexCast;
+ }
+ From = ImpCastExprToType(From, ToType, CK,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+ }
+
+ case ICK_Floating_Integral:
+ if (ToType->isRealFloatingType())
+ From = ImpCastExprToType(From, ToType, CK_IntegralToFloating,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ else
+ From = ImpCastExprToType(From, ToType, CK_FloatingToIntegral,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Compatible_Conversion:
+ From = ImpCastExprToType(From, ToType, CK_NoOp,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Writeback_Conversion:
+ case ICK_Pointer_Conversion: {
+ if (SCS.IncompatibleObjC && Action != AA_Casting) {
+ // Diagnose incompatible Objective-C conversions
+ if (Action == AA_Initializing || Action == AA_Assigning)
+ Diag(From->getLocStart(),
+ diag::ext_typecheck_convert_incompatible_pointer)
+ << ToType << From->getType() << Action
+ << From->getSourceRange() << 0;
+ else
+ Diag(From->getLocStart(),
+ diag::ext_typecheck_convert_incompatible_pointer)
+ << From->getType() << ToType << Action
+ << From->getSourceRange() << 0;
+
+ if (From->getType()->isObjCObjectPointerType() &&
+ ToType->isObjCObjectPointerType())
+ EmitRelatedResultTypeNote(From);
+ }
+ else if (getLangOpts().ObjCAutoRefCount &&
+ !CheckObjCARCUnavailableWeakConversion(ToType,
+ From->getType())) {
+ if (Action == AA_Initializing)
+ Diag(From->getLocStart(),
+ diag::err_arc_weak_unavailable_assign);
+ else
+ Diag(From->getLocStart(),
+ diag::err_arc_convesion_of_weak_unavailable)
+ << (Action == AA_Casting) << From->getType() << ToType
+ << From->getSourceRange();
+ }
+
+ CastKind Kind = CK_Invalid;
+ CXXCastPath BasePath;
+ if (CheckPointerConversion(From, ToType, Kind, BasePath, CStyle))
+ return ExprError();
+
+ // Make sure we extend blocks if necessary.
+ // FIXME: doing this here is really ugly.
+ if (Kind == CK_BlockPointerToObjCPointerCast) {
+ ExprResult E = From;
+ (void) PrepareCastToObjCObjectPointer(E);
+ From = E.take();
+ }
+
+ From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
+ .take();
+ break;
+ }
+
+ case ICK_Pointer_Member: {
+ CastKind Kind = CK_Invalid;
+ CXXCastPath BasePath;
+ if (CheckMemberPointerConversion(From, ToType, Kind, BasePath, CStyle))
+ return ExprError();
+ if (CheckExceptionSpecCompatibility(From, ToType))
+ return ExprError();
+ From = ImpCastExprToType(From, ToType, Kind, VK_RValue, &BasePath, CCK)
+ .take();
+ break;
+ }
+
+ case ICK_Boolean_Conversion:
+ // Perform half-to-boolean conversion via float.
+ if (From->getType()->isHalfType()) {
+ From = ImpCastExprToType(From, Context.FloatTy, CK_FloatingCast).take();
+ FromType = Context.FloatTy;
+ }
+
+ From = ImpCastExprToType(From, Context.BoolTy,
+ ScalarTypeToBooleanCastKind(FromType),
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Derived_To_Base: {
+ CXXCastPath BasePath;
+ if (CheckDerivedToBaseConversion(From->getType(),
+ ToType.getNonReferenceType(),
+ From->getLocStart(),
+ From->getSourceRange(),
+ &BasePath,
+ CStyle))
+ return ExprError();
+
+ From = ImpCastExprToType(From, ToType.getNonReferenceType(),
+ CK_DerivedToBase, From->getValueKind(),
+ &BasePath, CCK).take();
+ break;
+ }
+
+ case ICK_Vector_Conversion:
+ From = ImpCastExprToType(From, ToType, CK_BitCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Vector_Splat:
+ From = ImpCastExprToType(From, ToType, CK_VectorSplat,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+
+ case ICK_Complex_Real:
+ // Case 1. x -> _Complex y
+ if (const ComplexType *ToComplex = ToType->getAs<ComplexType>()) {
+ QualType ElType = ToComplex->getElementType();
+ bool isFloatingComplex = ElType->isRealFloatingType();
+
+ // x -> y
+ if (Context.hasSameUnqualifiedType(ElType, From->getType())) {
+ // do nothing
+ } else if (From->getType()->isRealFloatingType()) {
+ From = ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_FloatingCast : CK_FloatingToIntegral).take();
+ } else {
+ assert(From->getType()->isIntegerType());
+ From = ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_IntegralToFloating : CK_IntegralCast).take();
+ }
+ // y -> _Complex y
+ From = ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingRealToComplex
+ : CK_IntegralRealToComplex).take();
+
+ // Case 2. _Complex x -> y
+ } else {
+ const ComplexType *FromComplex = From->getType()->getAs<ComplexType>();
+ assert(FromComplex);
+
+ QualType ElType = FromComplex->getElementType();
+ bool isFloatingComplex = ElType->isRealFloatingType();
+
+ // _Complex x -> x
+ From = ImpCastExprToType(From, ElType,
+ isFloatingComplex ? CK_FloatingComplexToReal
+ : CK_IntegralComplexToReal,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+
+ // x -> y
+ if (Context.hasSameUnqualifiedType(ElType, ToType)) {
+ // do nothing
+ } else if (ToType->isRealFloatingType()) {
+ From = ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingCast : CK_IntegralToFloating,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ } else {
+ assert(ToType->isIntegerType());
+ From = ImpCastExprToType(From, ToType,
+ isFloatingComplex ? CK_FloatingToIntegral : CK_IntegralCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ }
+ }
+ break;
+
+ case ICK_Block_Pointer_Conversion: {
+ From = ImpCastExprToType(From, ToType.getUnqualifiedType(), CK_BitCast,
+ VK_RValue, /*BasePath=*/0, CCK).take();
+ break;
+ }
+
+ case ICK_TransparentUnionConversion: {
+ ExprResult FromRes = Owned(From);
+ Sema::AssignConvertType ConvTy =
+ CheckTransparentUnionArgumentConstraints(ToType, FromRes);
+ if (FromRes.isInvalid())
+ return ExprError();
+ From = FromRes.take();
+ assert ((ConvTy == Sema::Compatible) &&
+ "Improper transparent union conversion");
+ (void)ConvTy;
+ break;
+ }
+
+ case ICK_Lvalue_To_Rvalue:
+ case ICK_Array_To_Pointer:
+ case ICK_Function_To_Pointer:
+ case ICK_Qualification:
+ case ICK_Num_Conversion_Kinds:
+ llvm_unreachable("Improper second standard conversion");
+ }
+
+ switch (SCS.Third) {
+ case ICK_Identity:
+ // Nothing to do.
+ break;
+
+ case ICK_Qualification: {
+ // The qualification keeps the category of the inner expression, unless the
+ // target type isn't a reference.
+ ExprValueKind VK = ToType->isReferenceType() ?
+ From->getValueKind() : VK_RValue;
+ From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context),
+ CK_NoOp, VK, /*BasePath=*/0, CCK).take();
+
+ if (SCS.DeprecatedStringLiteralToCharPtr &&
+ !getLangOpts().WritableStrings)
+ Diag(From->getLocStart(), diag::warn_deprecated_string_literal_conversion)
+ << ToType.getNonReferenceType();
+
+ break;
+ }
+
+ default:
+ llvm_unreachable("Improper third standard conversion");
+ }
+
+ // If this conversion sequence involved a scalar -> atomic conversion, perform
+ // that conversion now.
+ if (const AtomicType *ToAtomic = ToType->getAs<AtomicType>())
+ if (Context.hasSameType(ToAtomic->getValueType(), From->getType()))
+ From = ImpCastExprToType(From, ToType, CK_NonAtomicToAtomic, VK_RValue, 0,
+ CCK).take();
+
+ return Owned(From);
+}
+
+ExprResult Sema::ActOnUnaryTypeTrait(UnaryTypeTrait UTT,
+ SourceLocation KWLoc,
+ ParsedType Ty,
+ SourceLocation RParen) {
+ TypeSourceInfo *TSInfo;
+ QualType T = GetTypeFromParser(Ty, &TSInfo);
+
+ if (!TSInfo)
+ TSInfo = Context.getTrivialTypeSourceInfo(T);
+ return BuildUnaryTypeTrait(UTT, KWLoc, TSInfo, RParen);
+}
+
+/// \brief Check the completeness of a type in a unary type trait.
+///
+/// If the particular type trait requires a complete type, tries to complete
+/// it. If completing the type fails, a diagnostic is emitted and false
+/// returned. If completing the type succeeds or no completion was required,
+/// returns true.
+static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S,
+ UnaryTypeTrait UTT,
+ SourceLocation Loc,
+ QualType ArgTy) {
+ // C++0x [meta.unary.prop]p3:
+ // For all of the class templates X declared in this Clause, instantiating
+ // that template with a template argument that is a class template
+ // specialization may result in the implicit instantiation of the template
+ // argument if and only if the semantics of X require that the argument
+ // must be a complete type.
+ // We apply this rule to all the type trait expressions used to implement
+ // these class templates. We also try to follow any GCC documented behavior
+ // in these expressions to ensure portability of standard libraries.
+ switch (UTT) {
+ // is_complete_type somewhat obviously cannot require a complete type.
+ case UTT_IsCompleteType:
+ // Fall-through
+
+ // These traits are modeled on the type predicates in C++0x
+ // [meta.unary.cat] and [meta.unary.comp]. They are not specified as
+ // requiring a complete type, as whether or not they return true cannot be
+ // impacted by the completeness of the type.
+ case UTT_IsVoid:
+ case UTT_IsIntegral:
+ case UTT_IsFloatingPoint:
+ case UTT_IsArray:
+ case UTT_IsPointer:
+ case UTT_IsLvalueReference:
+ case UTT_IsRvalueReference:
+ case UTT_IsMemberFunctionPointer:
+ case UTT_IsMemberObjectPointer:
+ case UTT_IsEnum:
+ case UTT_IsUnion:
+ case UTT_IsClass:
+ case UTT_IsFunction:
+ case UTT_IsReference:
+ case UTT_IsArithmetic:
+ case UTT_IsFundamental:
+ case UTT_IsObject:
+ case UTT_IsScalar:
+ case UTT_IsCompound:
+ case UTT_IsMemberPointer:
+ // Fall-through
+
+ // These traits are modeled on type predicates in C++0x [meta.unary.prop]
+ // which requires some of its traits to have the complete type. However,
+ // the completeness of the type cannot impact these traits' semantics, and
+ // so they don't require it. This matches the comments on these traits in
+ // Table 49.
+ case UTT_IsConst:
+ case UTT_IsVolatile:
+ case UTT_IsSigned:
+ case UTT_IsUnsigned:
+ return true;
+
+ // C++0x [meta.unary.prop] Table 49 requires the following traits to be
+ // applied to a complete type.
+ case UTT_IsTrivial:
+ case UTT_IsTriviallyCopyable:
+ case UTT_IsStandardLayout:
+ case UTT_IsPOD:
+ case UTT_IsLiteral:
+ case UTT_IsEmpty:
+ case UTT_IsPolymorphic:
+ case UTT_IsAbstract:
+ // Fall-through
+
+ // These traits require a complete type.
+ case UTT_IsFinal:
+
+ // These trait expressions are designed to help implement predicates in
+ // [meta.unary.prop] despite not being named the same. They are specified
+ // by both GCC and the Embarcadero C++ compiler, and require the complete
+ // type due to the overarching C++0x type predicates being implemented
+ // requiring the complete type.
+ case UTT_HasNothrowAssign:
+ case UTT_HasNothrowConstructor:
+ case UTT_HasNothrowCopy:
+ case UTT_HasTrivialAssign:
+ case UTT_HasTrivialDefaultConstructor:
+ case UTT_HasTrivialCopy:
+ case UTT_HasTrivialDestructor:
+ case UTT_HasVirtualDestructor:
+ // Arrays of unknown bound are expressly allowed.
+ QualType ElTy = ArgTy;
+ if (ArgTy->isIncompleteArrayType())
+ ElTy = S.Context.getAsArrayType(ArgTy)->getElementType();
+
+ // The void type is expressly allowed.
+ if (ElTy->isVoidType())
+ return true;
+
+ return !S.RequireCompleteType(
+ Loc, ElTy, diag::err_incomplete_type_used_in_type_trait_expr);
+ }
+ llvm_unreachable("Type trait not handled by switch");
+}
+
+static bool EvaluateUnaryTypeTrait(Sema &Self, UnaryTypeTrait UTT,
+ SourceLocation KeyLoc, QualType T) {
+ assert(!T->isDependentType() && "Cannot evaluate traits of dependent type");
+
+ ASTContext &C = Self.Context;
+ switch(UTT) {
+ // Type trait expressions corresponding to the primary type category
+ // predicates in C++0x [meta.unary.cat].
+ case UTT_IsVoid:
+ return T->isVoidType();
+ case UTT_IsIntegral:
+ return T->isIntegralType(C);
+ case UTT_IsFloatingPoint:
+ return T->isFloatingType();
+ case UTT_IsArray:
+ return T->isArrayType();
+ case UTT_IsPointer:
+ return T->isPointerType();
+ case UTT_IsLvalueReference:
+ return T->isLValueReferenceType();
+ case UTT_IsRvalueReference:
+ return T->isRValueReferenceType();
+ case UTT_IsMemberFunctionPointer:
+ return T->isMemberFunctionPointerType();
+ case UTT_IsMemberObjectPointer:
+ return T->isMemberDataPointerType();
+ case UTT_IsEnum:
+ return T->isEnumeralType();
+ case UTT_IsUnion:
+ return T->isUnionType();
+ case UTT_IsClass:
+ return T->isClassType() || T->isStructureType();
+ case UTT_IsFunction:
+ return T->isFunctionType();
+
+ // Type trait expressions which correspond to the convenient composition
+ // predicates in C++0x [meta.unary.comp].
+ case UTT_IsReference:
+ return T->isReferenceType();
+ case UTT_IsArithmetic:
+ return T->isArithmeticType() && !T->isEnumeralType();
+ case UTT_IsFundamental:
+ return T->isFundamentalType();
+ case UTT_IsObject:
+ return T->isObjectType();
+ case UTT_IsScalar:
+ // Note: semantic analysis depends on Objective-C lifetime types to be
+ // considered scalar types. However, such types do not actually behave
+ // like scalar types at run time (since they may require retain/release
+ // operations), so we report them as non-scalar.
+ if (T->isObjCLifetimeType()) {
+ switch (T.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ return true;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ case Qualifiers::OCL_Autoreleasing:
+ return false;
+ }
+ }
+
+ return T->isScalarType();
+ case UTT_IsCompound:
+ return T->isCompoundType();
+ case UTT_IsMemberPointer:
+ return T->isMemberPointerType();
+
+ // Type trait expressions which correspond to the type property predicates
+ // in C++0x [meta.unary.prop].
+ case UTT_IsConst:
+ return T.isConstQualified();
+ case UTT_IsVolatile:
+ return T.isVolatileQualified();
+ case UTT_IsTrivial:
+ return T.isTrivialType(Self.Context);
+ case UTT_IsTriviallyCopyable:
+ return T.isTriviallyCopyableType(Self.Context);
+ case UTT_IsStandardLayout:
+ return T->isStandardLayoutType();
+ case UTT_IsPOD:
+ return T.isPODType(Self.Context);
+ case UTT_IsLiteral:
+ return T->isLiteralType();
+ case UTT_IsEmpty:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return !RD->isUnion() && RD->isEmpty();
+ return false;
+ case UTT_IsPolymorphic:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return RD->isPolymorphic();
+ return false;
+ case UTT_IsAbstract:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return RD->isAbstract();
+ return false;
+ case UTT_IsFinal:
+ if (const CXXRecordDecl *RD = T->getAsCXXRecordDecl())
+ return RD->hasAttr<FinalAttr>();
+ return false;
+ case UTT_IsSigned:
+ return T->isSignedIntegerType();
+ case UTT_IsUnsigned:
+ return T->isUnsignedIntegerType();
+
+ // Type trait expressions which query classes regarding their construction,
+ // destruction, and copying. Rather than being based directly on the
+ // related type predicates in the standard, they are specified by both
+ // GCC[1] and the Embarcadero C++ compiler[2], and Clang implements those
+ // specifications.
+ //
+ // 1: http://gcc.gnu/.org/onlinedocs/gcc/Type-Traits.html
+ // 2: http://docwiki.embarcadero.com/RADStudio/XE/en/Type_Trait_Functions_(C%2B%2B0x)_Index
+ case UTT_HasTrivialDefaultConstructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __is_pod (type) is true then the trait is true, else if type is
+ // a cv class or union type (or array thereof) with a trivial default
+ // constructor ([class.ctor]) then the trait is true, else it is false.
+ if (T.isPODType(Self.Context))
+ return true;
+ if (const RecordType *RT =
+ C.getBaseElementType(T)->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDefaultConstructor();
+ return false;
+ case UTT_HasTrivialCopy:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __is_pod (type) is true or type is a reference type then
+ // the trait is true, else if type is a cv class or union type
+ // with a trivial copy constructor ([class.copy]) then the trait
+ // is true, else it is false.
+ if (T.isPODType(Self.Context) || T->isReferenceType())
+ return true;
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialCopyConstructor();
+ return false;
+ case UTT_HasTrivialAssign:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is const qualified or is a reference type then the
+ // trait is false. Otherwise if __is_pod (type) is true then the
+ // trait is true, else if type is a cv class or union type with
+ // a trivial copy assignment ([class.copy]) then the trait is
+ // true, else it is false.
+ // Note: the const and reference restrictions are interesting,
+ // given that const and reference members don't prevent a class
+ // from having a trivial copy assignment operator (but do cause
+ // errors if the copy assignment operator is actually used, q.v.
+ // [class.copy]p12).
+
+ if (C.getBaseElementType(T).isConstQualified())
+ return false;
+ if (T.isPODType(Self.Context))
+ return true;
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialCopyAssignment();
+ return false;
+ case UTT_HasTrivialDestructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __is_pod (type) is true or type is a reference type
+ // then the trait is true, else if type is a cv class or union
+ // type (or array thereof) with a trivial destructor
+ // ([class.dtor]) then the trait is true, else it is
+ // false.
+ if (T.isPODType(Self.Context) || T->isReferenceType())
+ return true;
+
+ // Objective-C++ ARC: autorelease types don't require destruction.
+ if (T->isObjCLifetimeType() &&
+ T.getObjCLifetime() == Qualifiers::OCL_Autoreleasing)
+ return true;
+
+ if (const RecordType *RT =
+ C.getBaseElementType(T)->getAs<RecordType>())
+ return cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor();
+ return false;
+ // TODO: Propagate nothrowness for implicitly declared special members.
+ case UTT_HasNothrowAssign:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is const qualified or is a reference type then the
+ // trait is false. Otherwise if __has_trivial_assign (type)
+ // is true then the trait is true, else if type is a cv class
+ // or union type with copy assignment operators that are known
+ // not to throw an exception then the trait is true, else it is
+ // false.
+ if (C.getBaseElementType(T).isConstQualified())
+ return false;
+ if (T->isReferenceType())
+ return false;
+ if (T.isPODType(Self.Context) || T->isObjCLifetimeType())
+ return true;
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ CXXRecordDecl* RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialCopyAssignment())
+ return true;
+
+ bool FoundAssign = false;
+ DeclarationName Name = C.DeclarationNames.getCXXOperatorName(OO_Equal);
+ LookupResult Res(Self, DeclarationNameInfo(Name, KeyLoc),
+ Sema::LookupOrdinaryName);
+ if (Self.LookupQualifiedName(Res, RD)) {
+ Res.suppressDiagnostics();
+ for (LookupResult::iterator Op = Res.begin(), OpEnd = Res.end();
+ Op != OpEnd; ++Op) {
+ if (isa<FunctionTemplateDecl>(*Op))
+ continue;
+
+ CXXMethodDecl *Operator = cast<CXXMethodDecl>(*Op);
+ if (Operator->isCopyAssignmentOperator()) {
+ FoundAssign = true;
+ const FunctionProtoType *CPT
+ = Operator->getType()->getAs<FunctionProtoType>();
+ if (CPT->getExceptionSpecType() == EST_Delayed)
+ return false;
+ if (!CPT->isNothrow(Self.Context))
+ return false;
+ }
+ }
+ }
+
+ return FoundAssign;
+ }
+ return false;
+ case UTT_HasNothrowCopy:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __has_trivial_copy (type) is true then the trait is true, else
+ // if type is a cv class or union type with copy constructors that are
+ // known not to throw an exception then the trait is true, else it is
+ // false.
+ if (T.isPODType(C) || T->isReferenceType() || T->isObjCLifetimeType())
+ return true;
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialCopyConstructor())
+ return true;
+
+ bool FoundConstructor = false;
+ unsigned FoundTQs;
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = Self.LookupConstructors(RD);
+ Con != ConEnd; ++Con) {
+ // A template constructor is never a copy constructor.
+ // FIXME: However, it may actually be selected at the actual overload
+ // resolution point.
+ if (isa<FunctionTemplateDecl>(*Con))
+ continue;
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isCopyConstructor(FoundTQs)) {
+ FoundConstructor = true;
+ const FunctionProtoType *CPT
+ = Constructor->getType()->getAs<FunctionProtoType>();
+ if (CPT->getExceptionSpecType() == EST_Delayed)
+ return false;
+ // FIXME: check whether evaluating default arguments can throw.
+ // For now, we'll be conservative and assume that they can throw.
+ if (!CPT->isNothrow(Self.Context) || CPT->getNumArgs() > 1)
+ return false;
+ }
+ }
+
+ return FoundConstructor;
+ }
+ return false;
+ case UTT_HasNothrowConstructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If __has_trivial_constructor (type) is true then the trait is
+ // true, else if type is a cv class or union type (or array
+ // thereof) with a default constructor that is known not to
+ // throw an exception then the trait is true, else it is false.
+ if (T.isPODType(C) || T->isObjCLifetimeType())
+ return true;
+ if (const RecordType *RT = C.getBaseElementType(T)->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialDefaultConstructor())
+ return true;
+
+ DeclContext::lookup_const_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = Self.LookupConstructors(RD);
+ Con != ConEnd; ++Con) {
+ // FIXME: In C++0x, a constructor template can be a default constructor.
+ if (isa<FunctionTemplateDecl>(*Con))
+ continue;
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isDefaultConstructor()) {
+ const FunctionProtoType *CPT
+ = Constructor->getType()->getAs<FunctionProtoType>();
+ if (CPT->getExceptionSpecType() == EST_Delayed)
+ return false;
+ // TODO: check whether evaluating default arguments can throw.
+ // For now, we'll be conservative and assume that they can throw.
+ return CPT->isNothrow(Self.Context) && CPT->getNumArgs() == 0;
+ }
+ }
+ }
+ return false;
+ case UTT_HasVirtualDestructor:
+ // http://gcc.gnu.org/onlinedocs/gcc/Type-Traits.html:
+ // If type is a class type with a virtual destructor ([class.dtor])
+ // then the trait is true, else it is false.
+ if (const RecordType *Record = T->getAs<RecordType>()) {
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (CXXDestructorDecl *Destructor = Self.LookupDestructor(RD))
+ return Destructor->isVirtual();
+ }
+ return false;
+
+ // These type trait expressions are modeled on the specifications for the
+ // Embarcadero C++0x type trait functions:
+ // http://docwiki.embarcadero.com/RADStudio/XE/en/Type_Trait_Functions_(C%2B%2B0x)_Index
+ case UTT_IsCompleteType:
+ // http://docwiki.embarcadero.com/RADStudio/XE/en/Is_complete_type_(typename_T_):
+ // Returns True if and only if T is a complete type at the point of the
+ // function call.
+ return !T->isIncompleteType();
+ }
+ llvm_unreachable("Type trait not covered by switch");
+}
+
+ExprResult Sema::BuildUnaryTypeTrait(UnaryTypeTrait UTT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *TSInfo,
+ SourceLocation RParen) {
+ QualType T = TSInfo->getType();
+ if (!CheckUnaryTypeTraitTypeCompleteness(*this, UTT, KWLoc, T))
+ return ExprError();
+
+ bool Value = false;
+ if (!T->isDependentType())
+ Value = EvaluateUnaryTypeTrait(*this, UTT, KWLoc, T);
+
+ return Owned(new (Context) UnaryTypeTraitExpr(KWLoc, UTT, TSInfo, Value,
+ RParen, Context.BoolTy));
+}
+
+ExprResult Sema::ActOnBinaryTypeTrait(BinaryTypeTrait BTT,
+ SourceLocation KWLoc,
+ ParsedType LhsTy,
+ ParsedType RhsTy,
+ SourceLocation RParen) {
+ TypeSourceInfo *LhsTSInfo;
+ QualType LhsT = GetTypeFromParser(LhsTy, &LhsTSInfo);
+ if (!LhsTSInfo)
+ LhsTSInfo = Context.getTrivialTypeSourceInfo(LhsT);
+
+ TypeSourceInfo *RhsTSInfo;
+ QualType RhsT = GetTypeFromParser(RhsTy, &RhsTSInfo);
+ if (!RhsTSInfo)
+ RhsTSInfo = Context.getTrivialTypeSourceInfo(RhsT);
+
+ return BuildBinaryTypeTrait(BTT, KWLoc, LhsTSInfo, RhsTSInfo, RParen);
+}
+
+static bool evaluateTypeTrait(Sema &S, TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ switch (Kind) {
+ case clang::TT_IsTriviallyConstructible: {
+ // C++11 [meta.unary.prop]:
+ // is_trivially_constructible is defined as:
+ //
+ // is_constructible<T, Args...>::value is true and the variable
+ // definition for is_constructible, as defined below, is known to call no
+ // operation that is not trivial.
+ //
+ // The predicate condition for a template specialization
+ // is_constructible<T, Args...> shall be satisfied if and only if the
+ // following variable definition would be well-formed for some invented
+ // variable t:
+ //
+ // T t(create<Args>()...);
+ if (Args.empty()) {
+ S.Diag(KWLoc, diag::err_type_trait_arity)
+ << 1 << 1 << 1 << (int)Args.size();
+ return false;
+ }
+
+ bool SawVoid = false;
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isVoidType()) {
+ SawVoid = true;
+ continue;
+ }
+
+ if (!Args[I]->getType()->isIncompleteType() &&
+ S.RequireCompleteType(KWLoc, Args[I]->getType(),
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+ }
+
+ // If any argument was 'void', of course it won't type-check.
+ if (SawVoid)
+ return false;
+
+ llvm::SmallVector<OpaqueValueExpr, 2> OpaqueArgExprs;
+ llvm::SmallVector<Expr *, 2> ArgExprs;
+ ArgExprs.reserve(Args.size() - 1);
+ for (unsigned I = 1, N = Args.size(); I != N; ++I) {
+ QualType T = Args[I]->getType();
+ if (T->isObjectType() || T->isFunctionType())
+ T = S.Context.getRValueReferenceType(T);
+ OpaqueArgExprs.push_back(
+ OpaqueValueExpr(Args[I]->getTypeLoc().getLocStart(),
+ T.getNonLValueExprType(S.Context),
+ Expr::getValueKindForType(T)));
+ ArgExprs.push_back(&OpaqueArgExprs.back());
+ }
+
+ // Perform the initialization in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ Sema::SFINAETrap SFINAE(S, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(S, S.Context.getTranslationUnitDecl());
+ InitializedEntity To(InitializedEntity::InitializeTemporary(Args[0]));
+ InitializationKind InitKind(InitializationKind::CreateDirect(KWLoc, KWLoc,
+ RParenLoc));
+ InitializationSequence Init(S, To, InitKind,
+ ArgExprs.begin(), ArgExprs.size());
+ if (Init.Failed())
+ return false;
+
+ ExprResult Result = Init.Perform(S, To, InitKind,
+ MultiExprArg(ArgExprs.data(),
+ ArgExprs.size()));
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return false;
+
+ // The initialization succeeded; not make sure there are no non-trivial
+ // calls.
+ return !Result.get()->hasNonTrivialCall(S.Context);
+ }
+ }
+
+ return false;
+}
+
+ExprResult Sema::BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ bool Dependent = false;
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ if (Args[I]->getType()->isDependentType()) {
+ Dependent = true;
+ break;
+ }
+ }
+
+ bool Value = false;
+ if (!Dependent)
+ Value = evaluateTypeTrait(*this, Kind, KWLoc, Args, RParenLoc);
+
+ return TypeTraitExpr::Create(Context, Context.BoolTy, KWLoc, Kind,
+ Args, RParenLoc, Value);
+}
+
+ExprResult Sema::ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
+ ArrayRef<ParsedType> Args,
+ SourceLocation RParenLoc) {
+ llvm::SmallVector<TypeSourceInfo *, 4> ConvertedArgs;
+ ConvertedArgs.reserve(Args.size());
+
+ for (unsigned I = 0, N = Args.size(); I != N; ++I) {
+ TypeSourceInfo *TInfo;
+ QualType T = GetTypeFromParser(Args[I], &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(T, KWLoc);
+
+ ConvertedArgs.push_back(TInfo);
+ }
+
+ return BuildTypeTrait(Kind, KWLoc, ConvertedArgs, RParenLoc);
+}
+
+static bool EvaluateBinaryTypeTrait(Sema &Self, BinaryTypeTrait BTT,
+ QualType LhsT, QualType RhsT,
+ SourceLocation KeyLoc) {
+ assert(!LhsT->isDependentType() && !RhsT->isDependentType() &&
+ "Cannot evaluate traits of dependent types");
+
+ switch(BTT) {
+ case BTT_IsBaseOf: {
+ // C++0x [meta.rel]p2
+ // Base is a base class of Derived without regard to cv-qualifiers or
+ // Base and Derived are not unions and name the same class type without
+ // regard to cv-qualifiers.
+
+ const RecordType *lhsRecord = LhsT->getAs<RecordType>();
+ if (!lhsRecord) return false;
+
+ const RecordType *rhsRecord = RhsT->getAs<RecordType>();
+ if (!rhsRecord) return false;
+
+ assert(Self.Context.hasSameUnqualifiedType(LhsT, RhsT)
+ == (lhsRecord == rhsRecord));
+
+ if (lhsRecord == rhsRecord)
+ return !lhsRecord->getDecl()->isUnion();
+
+ // C++0x [meta.rel]p2:
+ // If Base and Derived are class types and are different types
+ // (ignoring possible cv-qualifiers) then Derived shall be a
+ // complete type.
+ if (Self.RequireCompleteType(KeyLoc, RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+
+ return cast<CXXRecordDecl>(rhsRecord->getDecl())
+ ->isDerivedFrom(cast<CXXRecordDecl>(lhsRecord->getDecl()));
+ }
+ case BTT_IsSame:
+ return Self.Context.hasSameType(LhsT, RhsT);
+ case BTT_TypeCompatible:
+ return Self.Context.typesAreCompatible(LhsT.getUnqualifiedType(),
+ RhsT.getUnqualifiedType());
+ case BTT_IsConvertible:
+ case BTT_IsConvertibleTo: {
+ // C++0x [meta.rel]p4:
+ // Given the following function prototype:
+ //
+ // template <class T>
+ // typename add_rvalue_reference<T>::type create();
+ //
+ // the predicate condition for a template specialization
+ // is_convertible<From, To> shall be satisfied if and only if
+ // the return expression in the following code would be
+ // well-formed, including any implicit conversions to the return
+ // type of the function:
+ //
+ // To test() {
+ // return create<From>();
+ // }
+ //
+ // Access checking is performed as if in a context unrelated to To and
+ // From. Only the validity of the immediate context of the expression
+ // of the return-statement (including conversions to the return type)
+ // is considered.
+ //
+ // We model the initialization as a copy-initialization of a temporary
+ // of the appropriate type, which for this expression is identical to the
+ // return statement (since NRVO doesn't apply).
+ if (LhsT->isObjectType() || LhsT->isFunctionType())
+ LhsT = Self.Context.getRValueReferenceType(LhsT);
+
+ InitializedEntity To(InitializedEntity::InitializeTemporary(RhsT));
+ OpaqueValueExpr From(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(LhsT));
+ Expr *FromPtr = &From;
+ InitializationKind Kind(InitializationKind::CreateCopy(KeyLoc,
+ SourceLocation()));
+
+ // Perform the initialization in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated);
+ Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
+ InitializationSequence Init(Self, To, Kind, &FromPtr, 1);
+ if (Init.Failed())
+ return false;
+
+ ExprResult Result = Init.Perform(Self, To, Kind, MultiExprArg(&FromPtr, 1));
+ return !Result.isInvalid() && !SFINAE.hasErrorOccurred();
+ }
+
+ case BTT_IsTriviallyAssignable: {
+ // C++11 [meta.unary.prop]p3:
+ // is_trivially_assignable is defined as:
+ // is_assignable<T, U>::value is true and the assignment, as defined by
+ // is_assignable, is known to call no operation that is not trivial
+ //
+ // is_assignable is defined as:
+ // The expression declval<T>() = declval<U>() is well-formed when
+ // treated as an unevaluated operand (Clause 5).
+ //
+ // For both, T and U shall be complete types, (possibly cv-qualified)
+ // void, or arrays of unknown bound.
+ if (!LhsT->isVoidType() && !LhsT->isIncompleteArrayType() &&
+ Self.RequireCompleteType(KeyLoc, LhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+ if (!RhsT->isVoidType() && !RhsT->isIncompleteArrayType() &&
+ Self.RequireCompleteType(KeyLoc, RhsT,
+ diag::err_incomplete_type_used_in_type_trait_expr))
+ return false;
+
+ // cv void is never assignable.
+ if (LhsT->isVoidType() || RhsT->isVoidType())
+ return false;
+
+ // Build expressions that emulate the effect of declval<T>() and
+ // declval<U>().
+ if (LhsT->isObjectType() || LhsT->isFunctionType())
+ LhsT = Self.Context.getRValueReferenceType(LhsT);
+ if (RhsT->isObjectType() || RhsT->isFunctionType())
+ RhsT = Self.Context.getRValueReferenceType(RhsT);
+ OpaqueValueExpr Lhs(KeyLoc, LhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(LhsT));
+ OpaqueValueExpr Rhs(KeyLoc, RhsT.getNonLValueExprType(Self.Context),
+ Expr::getValueKindForType(RhsT));
+
+ // Attempt the assignment in an unevaluated context within a SFINAE
+ // trap at translation unit scope.
+ EnterExpressionEvaluationContext Unevaluated(Self, Sema::Unevaluated);
+ Sema::SFINAETrap SFINAE(Self, /*AccessCheckingSFINAE=*/true);
+ Sema::ContextRAII TUContext(Self, Self.Context.getTranslationUnitDecl());
+ ExprResult Result = Self.BuildBinOp(/*S=*/0, KeyLoc, BO_Assign, &Lhs, &Rhs);
+ if (Result.isInvalid() || SFINAE.hasErrorOccurred())
+ return false;
+
+ return !Result.get()->hasNonTrivialCall(Self.Context);
+ }
+ }
+ llvm_unreachable("Unknown type trait or not implemented");
+}
+
+ExprResult Sema::BuildBinaryTypeTrait(BinaryTypeTrait BTT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *LhsTSInfo,
+ TypeSourceInfo *RhsTSInfo,
+ SourceLocation RParen) {
+ QualType LhsT = LhsTSInfo->getType();
+ QualType RhsT = RhsTSInfo->getType();
+
+ if (BTT == BTT_TypeCompatible) {
+ if (getLangOpts().CPlusPlus) {
+ Diag(KWLoc, diag::err_types_compatible_p_in_cplusplus)
+ << SourceRange(KWLoc, RParen);
+ return ExprError();
+ }
+ }
+
+ bool Value = false;
+ if (!LhsT->isDependentType() && !RhsT->isDependentType())
+ Value = EvaluateBinaryTypeTrait(*this, BTT, LhsT, RhsT, KWLoc);
+
+ // Select trait result type.
+ QualType ResultType;
+ switch (BTT) {
+ case BTT_IsBaseOf: ResultType = Context.BoolTy; break;
+ case BTT_IsConvertible: ResultType = Context.BoolTy; break;
+ case BTT_IsSame: ResultType = Context.BoolTy; break;
+ case BTT_TypeCompatible: ResultType = Context.IntTy; break;
+ case BTT_IsConvertibleTo: ResultType = Context.BoolTy; break;
+ case BTT_IsTriviallyAssignable: ResultType = Context.BoolTy;
+ }
+
+ return Owned(new (Context) BinaryTypeTraitExpr(KWLoc, BTT, LhsTSInfo,
+ RhsTSInfo, Value, RParen,
+ ResultType));
+}
+
+ExprResult Sema::ActOnArrayTypeTrait(ArrayTypeTrait ATT,
+ SourceLocation KWLoc,
+ ParsedType Ty,
+ Expr* DimExpr,
+ SourceLocation RParen) {
+ TypeSourceInfo *TSInfo;
+ QualType T = GetTypeFromParser(Ty, &TSInfo);
+ if (!TSInfo)
+ TSInfo = Context.getTrivialTypeSourceInfo(T);
+
+ return BuildArrayTypeTrait(ATT, KWLoc, TSInfo, DimExpr, RParen);
+}
+
+static uint64_t EvaluateArrayTypeTrait(Sema &Self, ArrayTypeTrait ATT,
+ QualType T, Expr *DimExpr,
+ SourceLocation KeyLoc) {
+ assert(!T->isDependentType() && "Cannot evaluate traits of dependent type");
+
+ switch(ATT) {
+ case ATT_ArrayRank:
+ if (T->isArrayType()) {
+ unsigned Dim = 0;
+ while (const ArrayType *AT = Self.Context.getAsArrayType(T)) {
+ ++Dim;
+ T = AT->getElementType();
+ }
+ return Dim;
+ }
+ return 0;
+
+ case ATT_ArrayExtent: {
+ llvm::APSInt Value;
+ uint64_t Dim;
+ if (Self.VerifyIntegerConstantExpression(DimExpr, &Value,
+ Self.PDiag(diag::err_dimension_expr_not_constant_integer),
+ false).isInvalid())
+ return 0;
+ if (Value.isSigned() && Value.isNegative()) {
+ Self.Diag(KeyLoc, diag::err_dimension_expr_not_constant_integer)
+ << DimExpr->getSourceRange();
+ return 0;
+ }
+ Dim = Value.getLimitedValue();
+
+ if (T->isArrayType()) {
+ unsigned D = 0;
+ bool Matched = false;
+ while (const ArrayType *AT = Self.Context.getAsArrayType(T)) {
+ if (Dim == D) {
+ Matched = true;
+ break;
+ }
+ ++D;
+ T = AT->getElementType();
+ }
+
+ if (Matched && T->isArrayType()) {
+ if (const ConstantArrayType *CAT = Self.Context.getAsConstantArrayType(T))
+ return CAT->getSize().getLimitedValue();
+ }
+ }
+ return 0;
+ }
+ }
+ llvm_unreachable("Unknown type trait or not implemented");
+}
+
+ExprResult Sema::BuildArrayTypeTrait(ArrayTypeTrait ATT,
+ SourceLocation KWLoc,
+ TypeSourceInfo *TSInfo,
+ Expr* DimExpr,
+ SourceLocation RParen) {
+ QualType T = TSInfo->getType();
+
+ // FIXME: This should likely be tracked as an APInt to remove any host
+ // assumptions about the width of size_t on the target.
+ uint64_t Value = 0;
+ if (!T->isDependentType())
+ Value = EvaluateArrayTypeTrait(*this, ATT, T, DimExpr, KWLoc);
+
+ // While the specification for these traits from the Embarcadero C++
+ // compiler's documentation says the return type is 'unsigned int', Clang
+ // returns 'size_t'. On Windows, the primary platform for the Embarcadero
+ // compiler, there is no difference. On several other platforms this is an
+ // important distinction.
+ return Owned(new (Context) ArrayTypeTraitExpr(KWLoc, ATT, TSInfo, Value,
+ DimExpr, RParen,
+ Context.getSizeType()));
+}
+
+ExprResult Sema::ActOnExpressionTrait(ExpressionTrait ET,
+ SourceLocation KWLoc,
+ Expr *Queried,
+ SourceLocation RParen) {
+ // If error parsing the expression, ignore.
+ if (!Queried)
+ return ExprError();
+
+ ExprResult Result = BuildExpressionTrait(ET, KWLoc, Queried, RParen);
+
+ return move(Result);
+}
+
+static bool EvaluateExpressionTrait(ExpressionTrait ET, Expr *E) {
+ switch (ET) {
+ case ET_IsLValueExpr: return E->isLValue();
+ case ET_IsRValueExpr: return E->isRValue();
+ }
+ llvm_unreachable("Expression trait not covered by switch");
+}
+
+ExprResult Sema::BuildExpressionTrait(ExpressionTrait ET,
+ SourceLocation KWLoc,
+ Expr *Queried,
+ SourceLocation RParen) {
+ if (Queried->isTypeDependent()) {
+ // Delay type-checking for type-dependent expressions.
+ } else if (Queried->getType()->isPlaceholderType()) {
+ ExprResult PE = CheckPlaceholderExpr(Queried);
+ if (PE.isInvalid()) return ExprError();
+ return BuildExpressionTrait(ET, KWLoc, PE.take(), RParen);
+ }
+
+ bool Value = EvaluateExpressionTrait(ET, Queried);
+
+ return Owned(new (Context) ExpressionTraitExpr(KWLoc, ET, Queried, Value,
+ RParen, Context.BoolTy));
+}
+
+QualType Sema::CheckPointerToMemberOperands(ExprResult &LHS, ExprResult &RHS,
+ ExprValueKind &VK,
+ SourceLocation Loc,
+ bool isIndirect) {
+ assert(!LHS.get()->getType()->isPlaceholderType() &&
+ !RHS.get()->getType()->isPlaceholderType() &&
+ "placeholders should have been weeded out by now");
+
+ // The LHS undergoes lvalue conversions if this is ->*.
+ if (isIndirect) {
+ LHS = DefaultLvalueConversion(LHS.take());
+ if (LHS.isInvalid()) return QualType();
+ }
+
+ // The RHS always undergoes lvalue conversions.
+ RHS = DefaultLvalueConversion(RHS.take());
+ if (RHS.isInvalid()) return QualType();
+
+ const char *OpSpelling = isIndirect ? "->*" : ".*";
+ // C++ 5.5p2
+ // The binary operator .* [p3: ->*] binds its second operand, which shall
+ // be of type "pointer to member of T" (where T is a completely-defined
+ // class type) [...]
+ QualType RHSType = RHS.get()->getType();
+ const MemberPointerType *MemPtr = RHSType->getAs<MemberPointerType>();
+ if (!MemPtr) {
+ Diag(Loc, diag::err_bad_memptr_rhs)
+ << OpSpelling << RHSType << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ QualType Class(MemPtr->getClass(), 0);
+
+ // Note: C++ [expr.mptr.oper]p2-3 says that the class type into which the
+ // member pointer points must be completely-defined. However, there is no
+ // reason for this semantic distinction, and the rule is not enforced by
+ // other compilers. Therefore, we do not check this property, as it is
+ // likely to be considered a defect.
+
+ // C++ 5.5p2
+ // [...] to its first operand, which shall be of class T or of a class of
+ // which T is an unambiguous and accessible base class. [p3: a pointer to
+ // such a class]
+ QualType LHSType = LHS.get()->getType();
+ if (isIndirect) {
+ if (const PointerType *Ptr = LHSType->getAs<PointerType>())
+ LHSType = Ptr->getPointeeType();
+ else {
+ Diag(Loc, diag::err_bad_memptr_lhs)
+ << OpSpelling << 1 << LHSType
+ << FixItHint::CreateReplacement(SourceRange(Loc), ".*");
+ return QualType();
+ }
+ }
+
+ if (!Context.hasSameUnqualifiedType(Class, LHSType)) {
+ // If we want to check the hierarchy, we need a complete type.
+ if (RequireCompleteType(Loc, LHSType, PDiag(diag::err_bad_memptr_lhs)
+ << OpSpelling << (int)isIndirect)) {
+ return QualType();
+ }
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/false);
+ // FIXME: Would it be useful to print full ambiguity paths, or is that
+ // overkill?
+ if (!IsDerivedFrom(LHSType, Class, Paths) ||
+ Paths.isAmbiguous(Context.getCanonicalType(Class))) {
+ Diag(Loc, diag::err_bad_memptr_lhs) << OpSpelling
+ << (int)isIndirect << LHS.get()->getType();
+ return QualType();
+ }
+ // Cast LHS to type of use.
+ QualType UseType = isIndirect ? Context.getPointerType(Class) : Class;
+ ExprValueKind VK = isIndirect ? VK_RValue : LHS.get()->getValueKind();
+
+ CXXCastPath BasePath;
+ BuildBasePathArray(Paths, BasePath);
+ LHS = ImpCastExprToType(LHS.take(), UseType, CK_DerivedToBase, VK,
+ &BasePath);
+ }
+
+ if (isa<CXXScalarValueInitExpr>(RHS.get()->IgnoreParens())) {
+ // Diagnose use of pointer-to-member type which when used as
+ // the functional cast in a pointer-to-member expression.
+ Diag(Loc, diag::err_pointer_to_member_type) << isIndirect;
+ return QualType();
+ }
+
+ // C++ 5.5p2
+ // The result is an object or a function of the type specified by the
+ // second operand.
+ // The cv qualifiers are the union of those in the pointer and the left side,
+ // in accordance with 5.5p5 and 5.2.5.
+ QualType Result = MemPtr->getPointeeType();
+ Result = Context.getCVRQualifiedType(Result, LHSType.getCVRQualifiers());
+
+ // C++0x [expr.mptr.oper]p6:
+ // In a .* expression whose object expression is an rvalue, the program is
+ // ill-formed if the second operand is a pointer to member function with
+ // ref-qualifier &. In a ->* expression or in a .* expression whose object
+ // expression is an lvalue, the program is ill-formed if the second operand
+ // is a pointer to member function with ref-qualifier &&.
+ if (const FunctionProtoType *Proto = Result->getAs<FunctionProtoType>()) {
+ switch (Proto->getRefQualifier()) {
+ case RQ_None:
+ // Do nothing
+ break;
+
+ case RQ_LValue:
+ if (!isIndirect && !LHS.get()->Classify(Context).isLValue())
+ Diag(Loc, diag::err_pointer_to_member_oper_value_classify)
+ << RHSType << 1 << LHS.get()->getSourceRange();
+ break;
+
+ case RQ_RValue:
+ if (isIndirect || !LHS.get()->Classify(Context).isRValue())
+ Diag(Loc, diag::err_pointer_to_member_oper_value_classify)
+ << RHSType << 0 << LHS.get()->getSourceRange();
+ break;
+ }
+ }
+
+ // C++ [expr.mptr.oper]p6:
+ // The result of a .* expression whose second operand is a pointer
+ // to a data member is of the same value category as its
+ // first operand. The result of a .* expression whose second
+ // operand is a pointer to a member function is a prvalue. The
+ // result of an ->* expression is an lvalue if its second operand
+ // is a pointer to data member and a prvalue otherwise.
+ if (Result->isFunctionType()) {
+ VK = VK_RValue;
+ return Context.BoundMemberTy;
+ } else if (isIndirect) {
+ VK = VK_LValue;
+ } else {
+ VK = LHS.get()->getValueKind();
+ }
+
+ return Result;
+}
+
+/// \brief Try to convert a type to another according to C++0x 5.16p3.
+///
+/// This is part of the parameter validation for the ? operator. If either
+/// value operand is a class type, the two operands are attempted to be
+/// converted to each other. This function does the conversion in one direction.
+/// It returns true if the program is ill-formed and has already been diagnosed
+/// as such.
+static bool TryClassUnification(Sema &Self, Expr *From, Expr *To,
+ SourceLocation QuestionLoc,
+ bool &HaveConversion,
+ QualType &ToType) {
+ HaveConversion = false;
+ ToType = To->getType();
+
+ InitializationKind Kind = InitializationKind::CreateCopy(To->getLocStart(),
+ SourceLocation());
+ // C++0x 5.16p3
+ // The process for determining whether an operand expression E1 of type T1
+ // can be converted to match an operand expression E2 of type T2 is defined
+ // as follows:
+ // -- If E2 is an lvalue:
+ bool ToIsLvalue = To->isLValue();
+ if (ToIsLvalue) {
+ // E1 can be converted to match E2 if E1 can be implicitly converted to
+ // type "lvalue reference to T2", subject to the constraint that in the
+ // conversion the reference must bind directly to E1.
+ QualType T = Self.Context.getLValueReferenceType(ToType);
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(T);
+
+ InitializationSequence InitSeq(Self, Entity, Kind, &From, 1);
+ if (InitSeq.isDirectReferenceBinding()) {
+ ToType = T;
+ HaveConversion = true;
+ return false;
+ }
+
+ if (InitSeq.isAmbiguous())
+ return InitSeq.Diagnose(Self, Entity, Kind, &From, 1);
+ }
+
+ // -- If E2 is an rvalue, or if the conversion above cannot be done:
+ // -- if E1 and E2 have class type, and the underlying class types are
+ // the same or one is a base class of the other:
+ QualType FTy = From->getType();
+ QualType TTy = To->getType();
+ const RecordType *FRec = FTy->getAs<RecordType>();
+ const RecordType *TRec = TTy->getAs<RecordType>();
+ bool FDerivedFromT = FRec && TRec && FRec != TRec &&
+ Self.IsDerivedFrom(FTy, TTy);
+ if (FRec && TRec &&
+ (FRec == TRec || FDerivedFromT || Self.IsDerivedFrom(TTy, FTy))) {
+ // E1 can be converted to match E2 if the class of T2 is the
+ // same type as, or a base class of, the class of T1, and
+ // [cv2 > cv1].
+ if (FRec == TRec || FDerivedFromT) {
+ if (TTy.isAtLeastAsQualifiedAs(FTy)) {
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy);
+ InitializationSequence InitSeq(Self, Entity, Kind, &From, 1);
+ if (InitSeq) {
+ HaveConversion = true;
+ return false;
+ }
+
+ if (InitSeq.isAmbiguous())
+ return InitSeq.Diagnose(Self, Entity, Kind, &From, 1);
+ }
+ }
+
+ return false;
+ }
+
+ // -- Otherwise: E1 can be converted to match E2 if E1 can be
+ // implicitly converted to the type that expression E2 would have
+ // if E2 were converted to an rvalue (or the type it has, if E2 is
+ // an rvalue).
+ //
+ // This actually refers very narrowly to the lvalue-to-rvalue conversion, not
+ // to the array-to-pointer or function-to-pointer conversions.
+ if (!TTy->getAs<TagType>())
+ TTy = TTy.getUnqualifiedType();
+
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(TTy);
+ InitializationSequence InitSeq(Self, Entity, Kind, &From, 1);
+ HaveConversion = !InitSeq.Failed();
+ ToType = TTy;
+ if (InitSeq.isAmbiguous())
+ return InitSeq.Diagnose(Self, Entity, Kind, &From, 1);
+
+ return false;
+}
+
+/// \brief Try to find a common type for two according to C++0x 5.16p5.
+///
+/// This is part of the parameter validation for the ? operator. If either
+/// value operand is a class type, overload resolution is used to find a
+/// conversion to a common type.
+static bool FindConditionalOverload(Sema &Self, ExprResult &LHS, ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ Expr *Args[2] = { LHS.get(), RHS.get() };
+ OverloadCandidateSet CandidateSet(QuestionLoc);
+ Self.AddBuiltinOperatorCandidates(OO_Conditional, QuestionLoc, Args, 2,
+ CandidateSet);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(Self, QuestionLoc, Best)) {
+ case OR_Success: {
+ // We found a match. Perform the conversions on the arguments and move on.
+ ExprResult LHSRes =
+ Self.PerformImplicitConversion(LHS.get(), Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], Sema::AA_Converting);
+ if (LHSRes.isInvalid())
+ break;
+ LHS = move(LHSRes);
+
+ ExprResult RHSRes =
+ Self.PerformImplicitConversion(RHS.get(), Best->BuiltinTypes.ParamTypes[1],
+ Best->Conversions[1], Sema::AA_Converting);
+ if (RHSRes.isInvalid())
+ break;
+ RHS = move(RHSRes);
+ if (Best->Function)
+ Self.MarkFunctionReferenced(QuestionLoc, Best->Function);
+ return false;
+ }
+
+ case OR_No_Viable_Function:
+
+ // Emit a better diagnostic if one of the expressions is a null pointer
+ // constant and the other is a pointer type. In this case, the user most
+ // likely forgot to take the address of the other expression.
+ if (Self.DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc))
+ return true;
+
+ Self.Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return true;
+
+ case OR_Ambiguous:
+ Self.Diag(QuestionLoc, diag::err_conditional_ambiguous_ovl)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ // FIXME: Print the possible common types by printing the return types of
+ // the viable candidates.
+ break;
+
+ case OR_Deleted:
+ llvm_unreachable("Conditional operator has only built-in overloads");
+ }
+ return true;
+}
+
+/// \brief Perform an "extended" implicit conversion as returned by
+/// TryClassUnification.
+static bool ConvertForConditional(Sema &Self, ExprResult &E, QualType T) {
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(T);
+ InitializationKind Kind = InitializationKind::CreateCopy(E.get()->getLocStart(),
+ SourceLocation());
+ Expr *Arg = E.take();
+ InitializationSequence InitSeq(Self, Entity, Kind, &Arg, 1);
+ ExprResult Result = InitSeq.Perform(Self, Entity, Kind, MultiExprArg(&Arg, 1));
+ if (Result.isInvalid())
+ return true;
+
+ E = Result;
+ return false;
+}
+
+/// \brief Check the operands of ?: under C++ semantics.
+///
+/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
+/// extension. In this case, LHS == Cond. (But they're not aliases.)
+QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
+ ExprValueKind &VK, ExprObjectKind &OK,
+ SourceLocation QuestionLoc) {
+ // FIXME: Handle C99's complex types, vector types, block pointers and Obj-C++
+ // interface pointers.
+
+ // C++0x 5.16p1
+ // The first expression is contextually converted to bool.
+ if (!Cond.get()->isTypeDependent()) {
+ ExprResult CondRes = CheckCXXBooleanCondition(Cond.take());
+ if (CondRes.isInvalid())
+ return QualType();
+ Cond = move(CondRes);
+ }
+
+ // Assume r-value.
+ VK = VK_RValue;
+ OK = OK_Ordinary;
+
+ // Either of the arguments dependent?
+ if (LHS.get()->isTypeDependent() || RHS.get()->isTypeDependent())
+ return Context.DependentTy;
+
+ // C++0x 5.16p2
+ // If either the second or the third operand has type (cv) void, ...
+ QualType LTy = LHS.get()->getType();
+ QualType RTy = RHS.get()->getType();
+ bool LVoid = LTy->isVoidType();
+ bool RVoid = RTy->isVoidType();
+ if (LVoid || RVoid) {
+ // ... then the [l2r] conversions are performed on the second and third
+ // operands ...
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.take());
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.take());
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ LTy = LHS.get()->getType();
+ RTy = RHS.get()->getType();
+
+ // ... and one of the following shall hold:
+ // -- The second or the third operand (but not both) is a throw-
+ // expression; the result is of the type of the other and is an rvalue.
+ bool LThrow = isa<CXXThrowExpr>(LHS.get());
+ bool RThrow = isa<CXXThrowExpr>(RHS.get());
+ if (LThrow && !RThrow)
+ return RTy;
+ if (RThrow && !LThrow)
+ return LTy;
+
+ // -- Both the second and third operands have type void; the result is of
+ // type void and is an rvalue.
+ if (LVoid && RVoid)
+ return Context.VoidTy;
+
+ // Neither holds, error.
+ Diag(QuestionLoc, diag::err_conditional_void_nonvoid)
+ << (LVoid ? RTy : LTy) << (LVoid ? 0 : 1)
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ // Neither is void.
+
+ // C++0x 5.16p3
+ // Otherwise, if the second and third operand have different types, and
+ // either has (cv) class type, and attempt is made to convert each of those
+ // operands to the other.
+ if (!Context.hasSameType(LTy, RTy) &&
+ (LTy->isRecordType() || RTy->isRecordType())) {
+ ImplicitConversionSequence ICSLeftToRight, ICSRightToLeft;
+ // These return true if a single direction is already ambiguous.
+ QualType L2RType, R2LType;
+ bool HaveL2R, HaveR2L;
+ if (TryClassUnification(*this, LHS.get(), RHS.get(), QuestionLoc, HaveL2R, L2RType))
+ return QualType();
+ if (TryClassUnification(*this, RHS.get(), LHS.get(), QuestionLoc, HaveR2L, R2LType))
+ return QualType();
+
+ // If both can be converted, [...] the program is ill-formed.
+ if (HaveL2R && HaveR2L) {
+ Diag(QuestionLoc, diag::err_conditional_ambiguous)
+ << LTy << RTy << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ // If exactly one conversion is possible, that conversion is applied to
+ // the chosen operand and the converted operands are used in place of the
+ // original operands for the remainder of this section.
+ if (HaveL2R) {
+ if (ConvertForConditional(*this, LHS, L2RType) || LHS.isInvalid())
+ return QualType();
+ LTy = LHS.get()->getType();
+ } else if (HaveR2L) {
+ if (ConvertForConditional(*this, RHS, R2LType) || RHS.isInvalid())
+ return QualType();
+ RTy = RHS.get()->getType();
+ }
+ }
+
+ // C++0x 5.16p4
+ // If the second and third operands are glvalues of the same value
+ // category and have the same type, the result is of that type and
+ // value category and it is a bit-field if the second or the third
+ // operand is a bit-field, or if both are bit-fields.
+ // We only extend this to bitfields, not to the crazy other kinds of
+ // l-values.
+ bool Same = Context.hasSameType(LTy, RTy);
+ if (Same &&
+ LHS.get()->isGLValue() &&
+ LHS.get()->getValueKind() == RHS.get()->getValueKind() &&
+ LHS.get()->isOrdinaryOrBitFieldObject() &&
+ RHS.get()->isOrdinaryOrBitFieldObject()) {
+ VK = LHS.get()->getValueKind();
+ if (LHS.get()->getObjectKind() == OK_BitField ||
+ RHS.get()->getObjectKind() == OK_BitField)
+ OK = OK_BitField;
+ return LTy;
+ }
+
+ // C++0x 5.16p5
+ // Otherwise, the result is an rvalue. If the second and third operands
+ // do not have the same type, and either has (cv) class type, ...
+ if (!Same && (LTy->isRecordType() || RTy->isRecordType())) {
+ // ... overload resolution is used to determine the conversions (if any)
+ // to be applied to the operands. If the overload resolution fails, the
+ // program is ill-formed.
+ if (FindConditionalOverload(*this, LHS, RHS, QuestionLoc))
+ return QualType();
+ }
+
+ // C++0x 5.16p6
+ // LValue-to-rvalue, array-to-pointer, and function-to-pointer standard
+ // conversions are performed on the second and third operands.
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.take());
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.take());
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ LTy = LHS.get()->getType();
+ RTy = RHS.get()->getType();
+
+ // After those conversions, one of the following shall hold:
+ // -- The second and third operands have the same type; the result
+ // is of that type. If the operands have class type, the result
+ // is a prvalue temporary of the result type, which is
+ // copy-initialized from either the second operand or the third
+ // operand depending on the value of the first operand.
+ if (Context.getCanonicalType(LTy) == Context.getCanonicalType(RTy)) {
+ if (LTy->isRecordType()) {
+ // The operands have class type. Make a temporary copy.
+ InitializedEntity Entity = InitializedEntity::InitializeTemporary(LTy);
+ ExprResult LHSCopy = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ LHS);
+ if (LHSCopy.isInvalid())
+ return QualType();
+
+ ExprResult RHSCopy = PerformCopyInitialization(Entity,
+ SourceLocation(),
+ RHS);
+ if (RHSCopy.isInvalid())
+ return QualType();
+
+ LHS = LHSCopy;
+ RHS = RHSCopy;
+ }
+
+ return LTy;
+ }
+
+ // Extension: conditional operator involving vector types.
+ if (LTy->isVectorType() || RTy->isVectorType())
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false);
+
+ // -- The second and third operands have arithmetic or enumeration type;
+ // the usual arithmetic conversions are performed to bring them to a
+ // common type, and the result is of that type.
+ if (LTy->isArithmeticType() && RTy->isArithmeticType()) {
+ UsualArithmeticConversions(LHS, RHS);
+ if (LHS.isInvalid() || RHS.isInvalid())
+ return QualType();
+ return LHS.get()->getType();
+ }
+
+ // -- The second and third operands have pointer type, or one has pointer
+ // type and the other is a null pointer constant; pointer conversions
+ // and qualification conversions are performed to bring them to their
+ // composite pointer type. The result is of the composite pointer type.
+ // -- The second and third operands have pointer to member type, or one has
+ // pointer to member type and the other is a null pointer constant;
+ // pointer to member conversions and qualification conversions are
+ // performed to bring them to a common type, whose cv-qualification
+ // shall match the cv-qualification of either the second or the third
+ // operand. The result is of the common type.
+ bool NonStandardCompositeType = false;
+ QualType Composite = FindCompositePointerType(QuestionLoc, LHS, RHS,
+ isSFINAEContext()? 0 : &NonStandardCompositeType);
+ if (!Composite.isNull()) {
+ if (NonStandardCompositeType)
+ Diag(QuestionLoc,
+ diag::ext_typecheck_cond_incompatible_operands_nonstandard)
+ << LTy << RTy << Composite
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+
+ return Composite;
+ }
+
+ // Similarly, attempt to find composite type of two objective-c pointers.
+ Composite = FindCompositeObjCPointerType(LHS, RHS, QuestionLoc);
+ if (!Composite.isNull())
+ return Composite;
+
+ // Check if we are using a null with a non-pointer type.
+ if (DiagnoseConditionalForNull(LHS.get(), RHS.get(), QuestionLoc))
+ return QualType();
+
+ Diag(QuestionLoc, diag::err_typecheck_cond_incompatible_operands)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange() << RHS.get()->getSourceRange();
+ return QualType();
+}
+
+/// \brief Find a merged pointer type and convert the two expressions to it.
+///
+/// This finds the composite pointer type (or member pointer type) for @p E1
+/// and @p E2 according to C++0x 5.9p2. It converts both expressions to this
+/// type and returns it.
+/// It does not emit diagnostics.
+///
+/// \param Loc The location of the operator requiring these two expressions to
+/// be converted to the composite pointer type.
+///
+/// If \p NonStandardCompositeType is non-NULL, then we are permitted to find
+/// a non-standard (but still sane) composite type to which both expressions
+/// can be converted. When such a type is chosen, \c *NonStandardCompositeType
+/// will be set true.
+QualType Sema::FindCompositePointerType(SourceLocation Loc,
+ Expr *&E1, Expr *&E2,
+ bool *NonStandardCompositeType) {
+ if (NonStandardCompositeType)
+ *NonStandardCompositeType = false;
+
+ assert(getLangOpts().CPlusPlus && "This function assumes C++");
+ QualType T1 = E1->getType(), T2 = E2->getType();
+
+ if (!T1->isAnyPointerType() && !T1->isMemberPointerType() &&
+ !T2->isAnyPointerType() && !T2->isMemberPointerType())
+ return QualType();
+
+ // C++0x 5.9p2
+ // Pointer conversions and qualification conversions are performed on
+ // pointer operands to bring them to their composite pointer type. If
+ // one operand is a null pointer constant, the composite pointer type is
+ // the type of the other operand.
+ if (E1->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ if (T2->isMemberPointerType())
+ E1 = ImpCastExprToType(E1, T2, CK_NullToMemberPointer).take();
+ else
+ E1 = ImpCastExprToType(E1, T2, CK_NullToPointer).take();
+ return T2;
+ }
+ if (E2->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull)) {
+ if (T1->isMemberPointerType())
+ E2 = ImpCastExprToType(E2, T1, CK_NullToMemberPointer).take();
+ else
+ E2 = ImpCastExprToType(E2, T1, CK_NullToPointer).take();
+ return T1;
+ }
+
+ // Now both have to be pointers or member pointers.
+ if ((!T1->isPointerType() && !T1->isMemberPointerType()) ||
+ (!T2->isPointerType() && !T2->isMemberPointerType()))
+ return QualType();
+
+ // Otherwise, of one of the operands has type "pointer to cv1 void," then
+ // the other has type "pointer to cv2 T" and the composite pointer type is
+ // "pointer to cv12 void," where cv12 is the union of cv1 and cv2.
+ // Otherwise, the composite pointer type is a pointer type similar to the
+ // type of one of the operands, with a cv-qualification signature that is
+ // the union of the cv-qualification signatures of the operand types.
+ // In practice, the first part here is redundant; it's subsumed by the second.
+ // What we do here is, we build the two possible composite types, and try the
+ // conversions in both directions. If only one works, or if the two composite
+ // types are the same, we have succeeded.
+ // FIXME: extended qualifiers?
+ typedef SmallVector<unsigned, 4> QualifierVector;
+ QualifierVector QualifierUnion;
+ typedef SmallVector<std::pair<const Type *, const Type *>, 4>
+ ContainingClassVector;
+ ContainingClassVector MemberOfClass;
+ QualType Composite1 = Context.getCanonicalType(T1),
+ Composite2 = Context.getCanonicalType(T2);
+ unsigned NeedConstBefore = 0;
+ do {
+ const PointerType *Ptr1, *Ptr2;
+ if ((Ptr1 = Composite1->getAs<PointerType>()) &&
+ (Ptr2 = Composite2->getAs<PointerType>())) {
+ Composite1 = Ptr1->getPointeeType();
+ Composite2 = Ptr2->getPointeeType();
+
+ // If we're allowed to create a non-standard composite type, keep track
+ // of where we need to fill in additional 'const' qualifiers.
+ if (NonStandardCompositeType &&
+ Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
+ NeedConstBefore = QualifierUnion.size();
+
+ QualifierUnion.push_back(
+ Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers());
+ MemberOfClass.push_back(std::make_pair((const Type *)0, (const Type *)0));
+ continue;
+ }
+
+ const MemberPointerType *MemPtr1, *MemPtr2;
+ if ((MemPtr1 = Composite1->getAs<MemberPointerType>()) &&
+ (MemPtr2 = Composite2->getAs<MemberPointerType>())) {
+ Composite1 = MemPtr1->getPointeeType();
+ Composite2 = MemPtr2->getPointeeType();
+
+ // If we're allowed to create a non-standard composite type, keep track
+ // of where we need to fill in additional 'const' qualifiers.
+ if (NonStandardCompositeType &&
+ Composite1.getCVRQualifiers() != Composite2.getCVRQualifiers())
+ NeedConstBefore = QualifierUnion.size();
+
+ QualifierUnion.push_back(
+ Composite1.getCVRQualifiers() | Composite2.getCVRQualifiers());
+ MemberOfClass.push_back(std::make_pair(MemPtr1->getClass(),
+ MemPtr2->getClass()));
+ continue;
+ }
+
+ // FIXME: block pointer types?
+
+ // Cannot unwrap any more types.
+ break;
+ } while (true);
+
+ if (NeedConstBefore && NonStandardCompositeType) {
+ // Extension: Add 'const' to qualifiers that come before the first qualifier
+ // mismatch, so that our (non-standard!) composite type meets the
+ // requirements of C++ [conv.qual]p4 bullet 3.
+ for (unsigned I = 0; I != NeedConstBefore; ++I) {
+ if ((QualifierUnion[I] & Qualifiers::Const) == 0) {
+ QualifierUnion[I] = QualifierUnion[I] | Qualifiers::Const;
+ *NonStandardCompositeType = true;
+ }
+ }
+ }
+
+ // Rewrap the composites as pointers or member pointers with the union CVRs.
+ ContainingClassVector::reverse_iterator MOC
+ = MemberOfClass.rbegin();
+ for (QualifierVector::reverse_iterator
+ I = QualifierUnion.rbegin(),
+ E = QualifierUnion.rend();
+ I != E; (void)++I, ++MOC) {
+ Qualifiers Quals = Qualifiers::fromCVRMask(*I);
+ if (MOC->first && MOC->second) {
+ // Rebuild member pointer type
+ Composite1 = Context.getMemberPointerType(
+ Context.getQualifiedType(Composite1, Quals),
+ MOC->first);
+ Composite2 = Context.getMemberPointerType(
+ Context.getQualifiedType(Composite2, Quals),
+ MOC->second);
+ } else {
+ // Rebuild pointer type
+ Composite1
+ = Context.getPointerType(Context.getQualifiedType(Composite1, Quals));
+ Composite2
+ = Context.getPointerType(Context.getQualifiedType(Composite2, Quals));
+ }
+ }
+
+ // Try to convert to the first composite pointer type.
+ InitializedEntity Entity1
+ = InitializedEntity::InitializeTemporary(Composite1);
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Loc, SourceLocation());
+ InitializationSequence E1ToC1(*this, Entity1, Kind, &E1, 1);
+ InitializationSequence E2ToC1(*this, Entity1, Kind, &E2, 1);
+
+ if (E1ToC1 && E2ToC1) {
+ // Conversion to Composite1 is viable.
+ if (!Context.hasSameType(Composite1, Composite2)) {
+ // Composite2 is a different type from Composite1. Check whether
+ // Composite2 is also viable.
+ InitializedEntity Entity2
+ = InitializedEntity::InitializeTemporary(Composite2);
+ InitializationSequence E1ToC2(*this, Entity2, Kind, &E1, 1);
+ InitializationSequence E2ToC2(*this, Entity2, Kind, &E2, 1);
+ if (E1ToC2 && E2ToC2) {
+ // Both Composite1 and Composite2 are viable and are different;
+ // this is an ambiguity.
+ return QualType();
+ }
+ }
+
+ // Convert E1 to Composite1
+ ExprResult E1Result
+ = E1ToC1.Perform(*this, Entity1, Kind, MultiExprArg(*this,&E1,1));
+ if (E1Result.isInvalid())
+ return QualType();
+ E1 = E1Result.takeAs<Expr>();
+
+ // Convert E2 to Composite1
+ ExprResult E2Result
+ = E2ToC1.Perform(*this, Entity1, Kind, MultiExprArg(*this,&E2,1));
+ if (E2Result.isInvalid())
+ return QualType();
+ E2 = E2Result.takeAs<Expr>();
+
+ return Composite1;
+ }
+
+ // Check whether Composite2 is viable.
+ InitializedEntity Entity2
+ = InitializedEntity::InitializeTemporary(Composite2);
+ InitializationSequence E1ToC2(*this, Entity2, Kind, &E1, 1);
+ InitializationSequence E2ToC2(*this, Entity2, Kind, &E2, 1);
+ if (!E1ToC2 || !E2ToC2)
+ return QualType();
+
+ // Convert E1 to Composite2
+ ExprResult E1Result
+ = E1ToC2.Perform(*this, Entity2, Kind, MultiExprArg(*this, &E1, 1));
+ if (E1Result.isInvalid())
+ return QualType();
+ E1 = E1Result.takeAs<Expr>();
+
+ // Convert E2 to Composite2
+ ExprResult E2Result
+ = E2ToC2.Perform(*this, Entity2, Kind, MultiExprArg(*this, &E2, 1));
+ if (E2Result.isInvalid())
+ return QualType();
+ E2 = E2Result.takeAs<Expr>();
+
+ return Composite2;
+}
+
+ExprResult Sema::MaybeBindToTemporary(Expr *E) {
+ if (!E)
+ return ExprError();
+
+ assert(!isa<CXXBindTemporaryExpr>(E) && "Double-bound temporary?");
+
+ // If the result is a glvalue, we shouldn't bind it.
+ if (!E->isRValue())
+ return Owned(E);
+
+ // In ARC, calls that return a retainable type can return retained,
+ // in which case we have to insert a consuming cast.
+ if (getLangOpts().ObjCAutoRefCount &&
+ E->getType()->isObjCRetainableType()) {
+
+ bool ReturnsRetained;
+
+ // For actual calls, we compute this by examining the type of the
+ // called value.
+ if (CallExpr *Call = dyn_cast<CallExpr>(E)) {
+ Expr *Callee = Call->getCallee()->IgnoreParens();
+ QualType T = Callee->getType();
+
+ if (T == Context.BoundMemberTy) {
+ // Handle pointer-to-members.
+ if (BinaryOperator *BinOp = dyn_cast<BinaryOperator>(Callee))
+ T = BinOp->getRHS()->getType();
+ else if (MemberExpr *Mem = dyn_cast<MemberExpr>(Callee))
+ T = Mem->getMemberDecl()->getType();
+ }
+
+ if (const PointerType *Ptr = T->getAs<PointerType>())
+ T = Ptr->getPointeeType();
+ else if (const BlockPointerType *Ptr = T->getAs<BlockPointerType>())
+ T = Ptr->getPointeeType();
+ else if (const MemberPointerType *MemPtr = T->getAs<MemberPointerType>())
+ T = MemPtr->getPointeeType();
+
+ const FunctionType *FTy = T->getAs<FunctionType>();
+ assert(FTy && "call to value not of function type?");
+ ReturnsRetained = FTy->getExtInfo().getProducesResult();
+
+ // ActOnStmtExpr arranges things so that StmtExprs of retainable
+ // type always produce a +1 object.
+ } else if (isa<StmtExpr>(E)) {
+ ReturnsRetained = true;
+
+ // We hit this case with the lambda conversion-to-block optimization;
+ // we don't want any extra casts here.
+ } else if (isa<CastExpr>(E) &&
+ isa<BlockExpr>(cast<CastExpr>(E)->getSubExpr())) {
+ return Owned(E);
+
+ // For message sends and property references, we try to find an
+ // actual method. FIXME: we should infer retention by selector in
+ // cases where we don't have an actual method.
+ } else {
+ ObjCMethodDecl *D = 0;
+ if (ObjCMessageExpr *Send = dyn_cast<ObjCMessageExpr>(E)) {
+ D = Send->getMethodDecl();
+ } else if (ObjCNumericLiteral *NumLit = dyn_cast<ObjCNumericLiteral>(E)) {
+ D = NumLit->getObjCNumericLiteralMethod();
+ } else if (ObjCArrayLiteral *ArrayLit = dyn_cast<ObjCArrayLiteral>(E)) {
+ D = ArrayLit->getArrayWithObjectsMethod();
+ } else if (ObjCDictionaryLiteral *DictLit
+ = dyn_cast<ObjCDictionaryLiteral>(E)) {
+ D = DictLit->getDictWithObjectsMethod();
+ }
+
+ ReturnsRetained = (D && D->hasAttr<NSReturnsRetainedAttr>());
+
+ // Don't do reclaims on performSelector calls; despite their
+ // return type, the invoked method doesn't necessarily actually
+ // return an object.
+ if (!ReturnsRetained &&
+ D && D->getMethodFamily() == OMF_performSelector)
+ return Owned(E);
+ }
+
+ // Don't reclaim an object of Class type.
+ if (!ReturnsRetained && E->getType()->isObjCARCImplicitlyUnretainedType())
+ return Owned(E);
+
+ ExprNeedsCleanups = true;
+
+ CastKind ck = (ReturnsRetained ? CK_ARCConsumeObject
+ : CK_ARCReclaimReturnedObject);
+ return Owned(ImplicitCastExpr::Create(Context, E->getType(), ck, E, 0,
+ VK_RValue));
+ }
+
+ if (!getLangOpts().CPlusPlus)
+ return Owned(E);
+
+ // Search for the base element type (cf. ASTContext::getBaseElementType) with
+ // a fast path for the common case that the type is directly a RecordType.
+ const Type *T = Context.getCanonicalType(E->getType().getTypePtr());
+ const RecordType *RT = 0;
+ while (!RT) {
+ switch (T->getTypeClass()) {
+ case Type::Record:
+ RT = cast<RecordType>(T);
+ break;
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ T = cast<ArrayType>(T)->getElementType().getTypePtr();
+ break;
+ default:
+ return Owned(E);
+ }
+ }
+
+ // That should be enough to guarantee that this type is complete, if we're
+ // not processing a decltype expression.
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->isInvalidDecl() || RD->isDependentContext())
+ return Owned(E);
+
+ bool IsDecltype = ExprEvalContexts.back().IsDecltype;
+ CXXDestructorDecl *Destructor = IsDecltype ? 0 : LookupDestructor(RD);
+
+ if (Destructor) {
+ MarkFunctionReferenced(E->getExprLoc(), Destructor);
+ CheckDestructorAccess(E->getExprLoc(), Destructor,
+ PDiag(diag::err_access_dtor_temp)
+ << E->getType());
+ DiagnoseUseOfDecl(Destructor, E->getExprLoc());
+
+ // If destructor is trivial, we can avoid the extra copy.
+ if (Destructor->isTrivial())
+ return Owned(E);
+
+ // We need a cleanup, but we don't need to remember the temporary.
+ ExprNeedsCleanups = true;
+ }
+
+ CXXTemporary *Temp = CXXTemporary::Create(Context, Destructor);
+ CXXBindTemporaryExpr *Bind = CXXBindTemporaryExpr::Create(Context, Temp, E);
+
+ if (IsDecltype)
+ ExprEvalContexts.back().DelayedDecltypeBinds.push_back(Bind);
+
+ return Owned(Bind);
+}
+
+ExprResult
+Sema::MaybeCreateExprWithCleanups(ExprResult SubExpr) {
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ return Owned(MaybeCreateExprWithCleanups(SubExpr.take()));
+}
+
+Expr *Sema::MaybeCreateExprWithCleanups(Expr *SubExpr) {
+ assert(SubExpr && "sub expression can't be null!");
+
+ CleanupVarDeclMarking();
+
+ unsigned FirstCleanup = ExprEvalContexts.back().NumCleanupObjects;
+ assert(ExprCleanupObjects.size() >= FirstCleanup);
+ assert(ExprNeedsCleanups || ExprCleanupObjects.size() == FirstCleanup);
+ if (!ExprNeedsCleanups)
+ return SubExpr;
+
+ ArrayRef<ExprWithCleanups::CleanupObject> Cleanups
+ = llvm::makeArrayRef(ExprCleanupObjects.begin() + FirstCleanup,
+ ExprCleanupObjects.size() - FirstCleanup);
+
+ Expr *E = ExprWithCleanups::Create(Context, SubExpr, Cleanups);
+ DiscardCleanupsInEvaluationContext();
+
+ return E;
+}
+
+Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
+ assert(SubStmt && "sub statement can't be null!");
+
+ CleanupVarDeclMarking();
+
+ if (!ExprNeedsCleanups)
+ return SubStmt;
+
+ // FIXME: In order to attach the temporaries, wrap the statement into
+ // a StmtExpr; currently this is only used for asm statements.
+ // This is hacky, either create a new CXXStmtWithTemporaries statement or
+ // a new AsmStmtWithTemporaries.
+ CompoundStmt *CompStmt = new (Context) CompoundStmt(Context, &SubStmt, 1,
+ SourceLocation(),
+ SourceLocation());
+ Expr *E = new (Context) StmtExpr(CompStmt, Context.VoidTy, SourceLocation(),
+ SourceLocation());
+ return MaybeCreateExprWithCleanups(E);
+}
+
+/// Process the expression contained within a decltype. For such expressions,
+/// certain semantic checks on temporaries are delayed until this point, and
+/// are omitted for the 'topmost' call in the decltype expression. If the
+/// topmost call bound a temporary, strip that temporary off the expression.
+ExprResult Sema::ActOnDecltypeExpression(Expr *E) {
+ ExpressionEvaluationContextRecord &Rec = ExprEvalContexts.back();
+ assert(Rec.IsDecltype && "not in a decltype expression");
+
+ // C++11 [expr.call]p11:
+ // If a function call is a prvalue of object type,
+ // -- if the function call is either
+ // -- the operand of a decltype-specifier, or
+ // -- the right operand of a comma operator that is the operand of a
+ // decltype-specifier,
+ // a temporary object is not introduced for the prvalue.
+
+ // Recursively rebuild ParenExprs and comma expressions to strip out the
+ // outermost CXXBindTemporaryExpr, if any.
+ if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ ExprResult SubExpr = ActOnDecltypeExpression(PE->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+ if (SubExpr.get() == PE->getSubExpr())
+ return Owned(E);
+ return ActOnParenExpr(PE->getLParen(), PE->getRParen(), SubExpr.take());
+ }
+ if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (BO->getOpcode() == BO_Comma) {
+ ExprResult RHS = ActOnDecltypeExpression(BO->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+ if (RHS.get() == BO->getRHS())
+ return Owned(E);
+ return Owned(new (Context) BinaryOperator(BO->getLHS(), RHS.take(),
+ BO_Comma, BO->getType(),
+ BO->getValueKind(),
+ BO->getObjectKind(),
+ BO->getOperatorLoc()));
+ }
+ }
+
+ CXXBindTemporaryExpr *TopBind = dyn_cast<CXXBindTemporaryExpr>(E);
+ if (TopBind)
+ E = TopBind->getSubExpr();
+
+ // Disable the special decltype handling now.
+ Rec.IsDecltype = false;
+
+ // Perform the semantic checks we delayed until this point.
+ CallExpr *TopCall = dyn_cast<CallExpr>(E);
+ for (unsigned I = 0, N = Rec.DelayedDecltypeCalls.size(); I != N; ++I) {
+ CallExpr *Call = Rec.DelayedDecltypeCalls[I];
+ if (Call == TopCall)
+ continue;
+
+ if (CheckCallReturnType(Call->getCallReturnType(),
+ Call->getLocStart(),
+ Call, Call->getDirectCallee()))
+ return ExprError();
+ }
+
+ // Now all relevant types are complete, check the destructors are accessible
+ // and non-deleted, and annotate them on the temporaries.
+ for (unsigned I = 0, N = Rec.DelayedDecltypeBinds.size(); I != N; ++I) {
+ CXXBindTemporaryExpr *Bind = Rec.DelayedDecltypeBinds[I];
+ if (Bind == TopBind)
+ continue;
+
+ CXXTemporary *Temp = Bind->getTemporary();
+
+ CXXRecordDecl *RD =
+ Bind->getType()->getBaseElementTypeUnsafe()->getAsCXXRecordDecl();
+ CXXDestructorDecl *Destructor = LookupDestructor(RD);
+ Temp->setDestructor(Destructor);
+
+ MarkFunctionReferenced(E->getExprLoc(), Destructor);
+ CheckDestructorAccess(E->getExprLoc(), Destructor,
+ PDiag(diag::err_access_dtor_temp)
+ << E->getType());
+ DiagnoseUseOfDecl(Destructor, E->getExprLoc());
+
+ // We need a cleanup, but we don't need to remember the temporary.
+ ExprNeedsCleanups = true;
+ }
+
+ // Possibly strip off the top CXXBindTemporaryExpr.
+ return Owned(E);
+}
+
+ExprResult
+Sema::ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc,
+ tok::TokenKind OpKind, ParsedType &ObjectType,
+ bool &MayBePseudoDestructor) {
+ // Since this might be a postfix expression, get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.get();
+
+ Result = CheckPlaceholderExpr(Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.take();
+
+ QualType BaseType = Base->getType();
+ MayBePseudoDestructor = false;
+ if (BaseType->isDependentType()) {
+ // If we have a pointer to a dependent type and are using the -> operator,
+ // the object type is the type that the pointer points to. We might still
+ // have enough information about that type to do something useful.
+ if (OpKind == tok::arrow)
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+
+ ObjectType = ParsedType::make(BaseType);
+ MayBePseudoDestructor = true;
+ return Owned(Base);
+ }
+
+ // C++ [over.match.oper]p8:
+ // [...] When operator->returns, the operator-> is applied to the value
+ // returned, with the original second operand.
+ if (OpKind == tok::arrow) {
+ // The set of types we've considered so far.
+ llvm::SmallPtrSet<CanQualType,8> CTypes;
+ SmallVector<SourceLocation, 8> Locations;
+ CTypes.insert(Context.getCanonicalType(BaseType));
+
+ while (BaseType->isRecordType()) {
+ Result = BuildOverloadedArrowExpr(S, Base, OpLoc);
+ if (Result.isInvalid())
+ return ExprError();
+ Base = Result.get();
+ if (CXXOperatorCallExpr *OpCall = dyn_cast<CXXOperatorCallExpr>(Base))
+ Locations.push_back(OpCall->getDirectCallee()->getLocation());
+ BaseType = Base->getType();
+ CanQualType CBaseType = Context.getCanonicalType(BaseType);
+ if (!CTypes.insert(CBaseType)) {
+ Diag(OpLoc, diag::err_operator_arrow_circular);
+ for (unsigned i = 0; i < Locations.size(); i++)
+ Diag(Locations[i], diag::note_declared_at);
+ return ExprError();
+ }
+ }
+
+ if (BaseType->isPointerType() || BaseType->isObjCObjectPointerType())
+ BaseType = BaseType->getPointeeType();
+ }
+
+ // Objective-C properties allow "." access on Objective-C pointer types,
+ // so adjust the base type to the object type itself.
+ if (BaseType->isObjCObjectPointerType())
+ BaseType = BaseType->getPointeeType();
+
+ // C++ [basic.lookup.classref]p2:
+ // [...] If the type of the object expression is of pointer to scalar
+ // type, the unqualified-id is looked up in the context of the complete
+ // postfix-expression.
+ //
+ // This also indicates that we could be parsing a pseudo-destructor-name.
+ // Note that Objective-C class and object types can be pseudo-destructor
+ // expressions or normal member (ivar or property) access expressions.
+ if (BaseType->isObjCObjectOrInterfaceType()) {
+ MayBePseudoDestructor = true;
+ } else if (!BaseType->isRecordType()) {
+ ObjectType = ParsedType();
+ MayBePseudoDestructor = true;
+ return Owned(Base);
+ }
+
+ // The object type must be complete (or dependent).
+ if (!BaseType->isDependentType() &&
+ RequireCompleteType(OpLoc, BaseType,
+ PDiag(diag::err_incomplete_member_access)))
+ return ExprError();
+
+ // C++ [basic.lookup.classref]p2:
+ // If the id-expression in a class member access (5.2.5) is an
+ // unqualified-id, and the type of the object expression is of a class
+ // type C (or of pointer to a class type C), the unqualified-id is looked
+ // up in the scope of class C. [...]
+ ObjectType = ParsedType::make(BaseType);
+ return move(Base);
+}
+
+ExprResult Sema::DiagnoseDtorReference(SourceLocation NameLoc,
+ Expr *MemExpr) {
+ SourceLocation ExpectedLParenLoc = PP.getLocForEndOfToken(NameLoc);
+ Diag(MemExpr->getLocStart(), diag::err_dtor_expr_without_call)
+ << isa<CXXPseudoDestructorExpr>(MemExpr)
+ << FixItHint::CreateInsertion(ExpectedLParenLoc, "()");
+
+ return ActOnCallExpr(/*Scope*/ 0,
+ MemExpr,
+ /*LPLoc*/ ExpectedLParenLoc,
+ MultiExprArg(),
+ /*RPLoc*/ ExpectedLParenLoc);
+}
+
+static bool CheckArrow(Sema& S, QualType& ObjectType, Expr *&Base,
+ tok::TokenKind& OpKind, SourceLocation OpLoc) {
+ if (Base->hasPlaceholderType()) {
+ ExprResult result = S.CheckPlaceholderExpr(Base);
+ if (result.isInvalid()) return true;
+ Base = result.take();
+ }
+ ObjectType = Base->getType();
+
+ // C++ [expr.pseudo]p2:
+ // The left-hand side of the dot operator shall be of scalar type. The
+ // left-hand side of the arrow operator shall be of pointer to scalar type.
+ // This scalar type is the object type.
+ // Note that this is rather different from the normal handling for the
+ // arrow operator.
+ if (OpKind == tok::arrow) {
+ if (const PointerType *Ptr = ObjectType->getAs<PointerType>()) {
+ ObjectType = Ptr->getPointeeType();
+ } else if (!Base->isTypeDependent()) {
+ // The user wrote "p->" when she probably meant "p."; fix it.
+ S.Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << ObjectType << true
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ if (S.isSFINAEContext())
+ return true;
+
+ OpKind = tok::period;
+ }
+ }
+
+ return false;
+}
+
+ExprResult Sema::BuildPseudoDestructorExpr(Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ const CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeTypeInfo,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage Destructed,
+ bool HasTrailingLParen) {
+ TypeSourceInfo *DestructedTypeInfo = Destructed.getTypeSourceInfo();
+
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
+
+ if (!ObjectType->isDependentType() && !ObjectType->isScalarType()) {
+ if (getLangOpts().MicrosoftMode && ObjectType->isVoidType())
+ Diag(OpLoc, diag::ext_pseudo_dtor_on_void) << Base->getSourceRange();
+ else
+ Diag(OpLoc, diag::err_pseudo_dtor_base_not_scalar)
+ << ObjectType << Base->getSourceRange();
+ return ExprError();
+ }
+
+ // C++ [expr.pseudo]p2:
+ // [...] The cv-unqualified versions of the object type and of the type
+ // designated by the pseudo-destructor-name shall be the same type.
+ if (DestructedTypeInfo) {
+ QualType DestructedType = DestructedTypeInfo->getType();
+ SourceLocation DestructedTypeStart
+ = DestructedTypeInfo->getTypeLoc().getLocalSourceRange().getBegin();
+ if (!DestructedType->isDependentType() && !ObjectType->isDependentType()) {
+ if (!Context.hasSameUnqualifiedType(DestructedType, ObjectType)) {
+ Diag(DestructedTypeStart, diag::err_pseudo_dtor_type_mismatch)
+ << ObjectType << DestructedType << Base->getSourceRange()
+ << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
+
+ // Recover by setting the destructed type to the object type.
+ DestructedType = ObjectType;
+ DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
+ DestructedTypeStart);
+ Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ } else if (DestructedType.getObjCLifetime() !=
+ ObjectType.getObjCLifetime()) {
+
+ if (DestructedType.getObjCLifetime() == Qualifiers::OCL_None) {
+ // Okay: just pretend that the user provided the correctly-qualified
+ // type.
+ } else {
+ Diag(DestructedTypeStart, diag::err_arc_pseudo_dtor_inconstant_quals)
+ << ObjectType << DestructedType << Base->getSourceRange()
+ << DestructedTypeInfo->getTypeLoc().getLocalSourceRange();
+ }
+
+ // Recover by setting the destructed type to the object type.
+ DestructedType = ObjectType;
+ DestructedTypeInfo = Context.getTrivialTypeSourceInfo(ObjectType,
+ DestructedTypeStart);
+ Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ }
+ }
+ }
+
+ // C++ [expr.pseudo]p2:
+ // [...] Furthermore, the two type-names in a pseudo-destructor-name of the
+ // form
+ //
+ // ::[opt] nested-name-specifier[opt] type-name :: ~ type-name
+ //
+ // shall designate the same scalar type.
+ if (ScopeTypeInfo) {
+ QualType ScopeType = ScopeTypeInfo->getType();
+ if (!ScopeType->isDependentType() && !ObjectType->isDependentType() &&
+ !Context.hasSameUnqualifiedType(ScopeType, ObjectType)) {
+
+ Diag(ScopeTypeInfo->getTypeLoc().getLocalSourceRange().getBegin(),
+ diag::err_pseudo_dtor_type_mismatch)
+ << ObjectType << ScopeType << Base->getSourceRange()
+ << ScopeTypeInfo->getTypeLoc().getLocalSourceRange();
+
+ ScopeType = QualType();
+ ScopeTypeInfo = 0;
+ }
+ }
+
+ Expr *Result
+ = new (Context) CXXPseudoDestructorExpr(Context, Base,
+ OpKind == tok::arrow, OpLoc,
+ SS.getWithLocInContext(Context),
+ ScopeTypeInfo,
+ CCLoc,
+ TildeLoc,
+ Destructed);
+
+ if (HasTrailingLParen)
+ return Owned(Result);
+
+ return DiagnoseDtorReference(Destructed.getLocation(), Result);
+}
+
+ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ UnqualifiedId &FirstTypeName,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ UnqualifiedId &SecondTypeName,
+ bool HasTrailingLParen) {
+ assert((FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
+ FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) &&
+ "Invalid first type name in pseudo-destructor");
+ assert((SecondTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
+ SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) &&
+ "Invalid second type name in pseudo-destructor");
+
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
+
+ // Compute the object type that we should use for name lookup purposes. Only
+ // record types and dependent types matter.
+ ParsedType ObjectTypePtrForLookup;
+ if (!SS.isSet()) {
+ if (ObjectType->isRecordType())
+ ObjectTypePtrForLookup = ParsedType::make(ObjectType);
+ else if (ObjectType->isDependentType())
+ ObjectTypePtrForLookup = ParsedType::make(Context.DependentTy);
+ }
+
+ // Convert the name of the type being destructed (following the ~) into a
+ // type (with source-location information).
+ QualType DestructedType;
+ TypeSourceInfo *DestructedTypeInfo = 0;
+ PseudoDestructorTypeStorage Destructed;
+ if (SecondTypeName.getKind() == UnqualifiedId::IK_Identifier) {
+ ParsedType T = getTypeName(*SecondTypeName.Identifier,
+ SecondTypeName.StartLocation,
+ S, &SS, true, false, ObjectTypePtrForLookup);
+ if (!T &&
+ ((SS.isSet() && !computeDeclContext(SS, false)) ||
+ (!SS.isSet() && ObjectType->isDependentType()))) {
+ // The name of the type being destroyed is a dependent name, and we
+ // couldn't find anything useful in scope. Just store the identifier and
+ // it's location, and we'll perform (qualified) name lookup again at
+ // template instantiation time.
+ Destructed = PseudoDestructorTypeStorage(SecondTypeName.Identifier,
+ SecondTypeName.StartLocation);
+ } else if (!T) {
+ Diag(SecondTypeName.StartLocation,
+ diag::err_pseudo_dtor_destructor_non_type)
+ << SecondTypeName.Identifier << ObjectType;
+ if (isSFINAEContext())
+ return ExprError();
+
+ // Recover by assuming we had the right type all along.
+ DestructedType = ObjectType;
+ } else
+ DestructedType = GetTypeFromParser(T, &DestructedTypeInfo);
+ } else {
+ // Resolve the template-id to a type.
+ TemplateIdAnnotation *TemplateId = SecondTypeName.TemplateId;
+ ASTTemplateArgsPtr TemplateArgsPtr(*this,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ TypeResult T = ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ if (T.isInvalid() || !T.get()) {
+ // Recover by assuming we had the right type all along.
+ DestructedType = ObjectType;
+ } else
+ DestructedType = GetTypeFromParser(T.get(), &DestructedTypeInfo);
+ }
+
+ // If we've performed some kind of recovery, (re-)build the type source
+ // information.
+ if (!DestructedType.isNull()) {
+ if (!DestructedTypeInfo)
+ DestructedTypeInfo = Context.getTrivialTypeSourceInfo(DestructedType,
+ SecondTypeName.StartLocation);
+ Destructed = PseudoDestructorTypeStorage(DestructedTypeInfo);
+ }
+
+ // Convert the name of the scope type (the type prior to '::') into a type.
+ TypeSourceInfo *ScopeTypeInfo = 0;
+ QualType ScopeType;
+ if (FirstTypeName.getKind() == UnqualifiedId::IK_TemplateId ||
+ FirstTypeName.Identifier) {
+ if (FirstTypeName.getKind() == UnqualifiedId::IK_Identifier) {
+ ParsedType T = getTypeName(*FirstTypeName.Identifier,
+ FirstTypeName.StartLocation,
+ S, &SS, true, false, ObjectTypePtrForLookup);
+ if (!T) {
+ Diag(FirstTypeName.StartLocation,
+ diag::err_pseudo_dtor_destructor_non_type)
+ << FirstTypeName.Identifier << ObjectType;
+
+ if (isSFINAEContext())
+ return ExprError();
+
+ // Just drop this type. It's unnecessary anyway.
+ ScopeType = QualType();
+ } else
+ ScopeType = GetTypeFromParser(T, &ScopeTypeInfo);
+ } else {
+ // Resolve the template-id to a type.
+ TemplateIdAnnotation *TemplateId = FirstTypeName.TemplateId;
+ ASTTemplateArgsPtr TemplateArgsPtr(*this,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ TypeResult T = ActOnTemplateIdType(TemplateId->SS,
+ TemplateId->TemplateKWLoc,
+ TemplateId->Template,
+ TemplateId->TemplateNameLoc,
+ TemplateId->LAngleLoc,
+ TemplateArgsPtr,
+ TemplateId->RAngleLoc);
+ if (T.isInvalid() || !T.get()) {
+ // Recover by dropping this type.
+ ScopeType = QualType();
+ } else
+ ScopeType = GetTypeFromParser(T.get(), &ScopeTypeInfo);
+ }
+ }
+
+ if (!ScopeType.isNull() && !ScopeTypeInfo)
+ ScopeTypeInfo = Context.getTrivialTypeSourceInfo(ScopeType,
+ FirstTypeName.StartLocation);
+
+
+ return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, SS,
+ ScopeTypeInfo, CCLoc, TildeLoc,
+ Destructed, HasTrailingLParen);
+}
+
+ExprResult Sema::ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ SourceLocation TildeLoc,
+ const DeclSpec& DS,
+ bool HasTrailingLParen) {
+ QualType ObjectType;
+ if (CheckArrow(*this, ObjectType, Base, OpKind, OpLoc))
+ return ExprError();
+
+ QualType T = BuildDecltypeType(DS.getRepAsExpr(), DS.getTypeSpecTypeLoc());
+
+ TypeLocBuilder TLB;
+ DecltypeTypeLoc DecltypeTL = TLB.push<DecltypeTypeLoc>(T);
+ DecltypeTL.setNameLoc(DS.getTypeSpecTypeLoc());
+ TypeSourceInfo *DestructedTypeInfo = TLB.getTypeSourceInfo(Context, T);
+ PseudoDestructorTypeStorage Destructed(DestructedTypeInfo);
+
+ return BuildPseudoDestructorExpr(Base, OpLoc, OpKind, CXXScopeSpec(),
+ 0, SourceLocation(), TildeLoc,
+ Destructed, HasTrailingLParen);
+}
+
+ExprResult Sema::BuildCXXMemberCallExpr(Expr *E, NamedDecl *FoundDecl,
+ CXXConversionDecl *Method,
+ bool HadMultipleCandidates) {
+ if (Method->getParent()->isLambda() &&
+ Method->getConversionType()->isBlockPointerType()) {
+ // This is a lambda coversion to block pointer; check if the argument
+ // is a LambdaExpr.
+ Expr *SubE = E;
+ CastExpr *CE = dyn_cast<CastExpr>(SubE);
+ if (CE && CE->getCastKind() == CK_NoOp)
+ SubE = CE->getSubExpr();
+ SubE = SubE->IgnoreParens();
+ if (CXXBindTemporaryExpr *BE = dyn_cast<CXXBindTemporaryExpr>(SubE))
+ SubE = BE->getSubExpr();
+ if (isa<LambdaExpr>(SubE)) {
+ // For the conversion to block pointer on a lambda expression, we
+ // construct a special BlockLiteral instead; this doesn't really make
+ // a difference in ARC, but outside of ARC the resulting block literal
+ // follows the normal lifetime rules for block literals instead of being
+ // autoreleased.
+ DiagnosticErrorTrap Trap(Diags);
+ ExprResult Exp = BuildBlockForLambdaConversion(E->getExprLoc(),
+ E->getExprLoc(),
+ Method, E);
+ if (Exp.isInvalid())
+ Diag(E->getExprLoc(), diag::note_lambda_to_block_conv);
+ return Exp;
+ }
+ }
+
+
+ ExprResult Exp = PerformObjectArgumentInitialization(E, /*Qualifier=*/0,
+ FoundDecl, Method);
+ if (Exp.isInvalid())
+ return true;
+
+ MemberExpr *ME =
+ new (Context) MemberExpr(Exp.take(), /*IsArrow=*/false, Method,
+ SourceLocation(), Context.BoundMemberTy,
+ VK_RValue, OK_Ordinary);
+ if (HadMultipleCandidates)
+ ME->setHadMultipleCandidates(true);
+
+ QualType ResultType = Method->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultType);
+ ResultType = ResultType.getNonLValueExprType(Context);
+
+ MarkFunctionReferenced(Exp.get()->getLocStart(), Method);
+ CXXMemberCallExpr *CE =
+ new (Context) CXXMemberCallExpr(Context, ME, 0, 0, ResultType, VK,
+ Exp.get()->getLocEnd());
+ return CE;
+}
+
+ExprResult Sema::BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
+ SourceLocation RParen) {
+ return Owned(new (Context) CXXNoexceptExpr(Context.BoolTy, Operand,
+ Operand->CanThrow(Context),
+ KeyLoc, RParen));
+}
+
+ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
+ Expr *Operand, SourceLocation RParen) {
+ return BuildCXXNoexceptExpr(KeyLoc, Operand, RParen);
+}
+
+/// Perform the conversions required for an expression used in a
+/// context that ignores the result.
+ExprResult Sema::IgnoredValueConversions(Expr *E) {
+ if (E->hasPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return Owned(E);
+ E = result.take();
+ }
+
+ // C99 6.3.2.1:
+ // [Except in specific positions,] an lvalue that does not have
+ // array type is converted to the value stored in the
+ // designated object (and is no longer an lvalue).
+ if (E->isRValue()) {
+ // In C, function designators (i.e. expressions of function type)
+ // are r-values, but we still want to do function-to-pointer decay
+ // on them. This is both technically correct and convenient for
+ // some clients.
+ if (!getLangOpts().CPlusPlus && E->getType()->isFunctionType())
+ return DefaultFunctionArrayConversion(E);
+
+ return Owned(E);
+ }
+
+ // Otherwise, this rule does not apply in C++, at least not for the moment.
+ if (getLangOpts().CPlusPlus) return Owned(E);
+
+ // GCC seems to also exclude expressions of incomplete enum type.
+ if (const EnumType *T = E->getType()->getAs<EnumType>()) {
+ if (!T->getDecl()->isComplete()) {
+ // FIXME: stupid workaround for a codegen bug!
+ E = ImpCastExprToType(E, Context.VoidTy, CK_ToVoid).take();
+ return Owned(E);
+ }
+ }
+
+ ExprResult Res = DefaultFunctionArrayLvalueConversion(E);
+ if (Res.isInvalid())
+ return Owned(E);
+ E = Res.take();
+
+ if (!E->getType()->isVoidType())
+ RequireCompleteType(E->getExprLoc(), E->getType(),
+ diag::err_incomplete_type);
+ return Owned(E);
+}
+
+ExprResult Sema::ActOnFinishFullExpr(Expr *FE) {
+ ExprResult FullExpr = Owned(FE);
+
+ if (!FullExpr.get())
+ return ExprError();
+
+ if (DiagnoseUnexpandedParameterPack(FullExpr.get()))
+ return ExprError();
+
+ // Top-level message sends default to 'id' when we're in a debugger.
+ if (getLangOpts().DebuggerCastResultToId &&
+ FullExpr.get()->getType() == Context.UnknownAnyTy &&
+ isa<ObjCMessageExpr>(FullExpr.get())) {
+ FullExpr = forceUnknownAnyToType(FullExpr.take(), Context.getObjCIdType());
+ if (FullExpr.isInvalid())
+ return ExprError();
+ }
+
+ FullExpr = CheckPlaceholderExpr(FullExpr.take());
+ if (FullExpr.isInvalid())
+ return ExprError();
+
+ FullExpr = IgnoredValueConversions(FullExpr.take());
+ if (FullExpr.isInvalid())
+ return ExprError();
+
+ CheckImplicitConversions(FullExpr.get(), FullExpr.get()->getExprLoc());
+ return MaybeCreateExprWithCleanups(FullExpr);
+}
+
+StmtResult Sema::ActOnFinishFullStmt(Stmt *FullStmt) {
+ if (!FullStmt) return StmtError();
+
+ return MaybeCreateStmtWithCleanups(FullStmt);
+}
+
+Sema::IfExistsResult
+Sema::CheckMicrosoftIfExistsSymbol(Scope *S,
+ CXXScopeSpec &SS,
+ const DeclarationNameInfo &TargetNameInfo) {
+ DeclarationName TargetName = TargetNameInfo.getName();
+ if (!TargetName)
+ return IER_DoesNotExist;
+
+ // If the name itself is dependent, then the result is dependent.
+ if (TargetName.isDependentName())
+ return IER_Dependent;
+
+ // Do the redeclaration lookup in the current scope.
+ LookupResult R(*this, TargetNameInfo, Sema::LookupAnyName,
+ Sema::NotForRedeclaration);
+ LookupParsedName(R, S, &SS);
+ R.suppressDiagnostics();
+
+ switch (R.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ case LookupResult::Ambiguous:
+ return IER_Exists;
+
+ case LookupResult::NotFound:
+ return IER_DoesNotExist;
+
+ case LookupResult::NotFoundInCurrentInstantiation:
+ return IER_Dependent;
+ }
+
+ llvm_unreachable("Invalid LookupResult Kind!");
+}
+
+Sema::IfExistsResult
+Sema::CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
+ bool IsIfExists, CXXScopeSpec &SS,
+ UnqualifiedId &Name) {
+ DeclarationNameInfo TargetNameInfo = GetNameFromUnqualifiedId(Name);
+
+ // Check for unexpanded parameter packs.
+ SmallVector<UnexpandedParameterPack, 4> Unexpanded;
+ collectUnexpandedParameterPacks(SS, Unexpanded);
+ collectUnexpandedParameterPacks(TargetNameInfo, Unexpanded);
+ if (!Unexpanded.empty()) {
+ DiagnoseUnexpandedParameterPacks(KeywordLoc,
+ IsIfExists? UPPC_IfExists
+ : UPPC_IfNotExists,
+ Unexpanded);
+ return IER_Error;
+ }
+
+ return CheckMicrosoftIfExistsSymbol(S, SS, TargetNameInfo);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
new file mode 100644
index 0000000..26b88a2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprMember.cpp
@@ -0,0 +1,1625 @@
+//===--- SemaExprMember.cpp - Semantic Analysis for Expressions -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis member access expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/Preprocessor.h"
+
+using namespace clang;
+using namespace sema;
+
+/// Determines if the given class is provably not derived from all of
+/// the prospective base classes.
+static bool IsProvablyNotDerivedFrom(Sema &SemaRef,
+ CXXRecordDecl *Record,
+ const llvm::SmallPtrSet<CXXRecordDecl*, 4> &Bases) {
+ if (Bases.count(Record->getCanonicalDecl()))
+ return false;
+
+ RecordDecl *RD = Record->getDefinition();
+ if (!RD) return false;
+ Record = cast<CXXRecordDecl>(RD);
+
+ for (CXXRecordDecl::base_class_iterator I = Record->bases_begin(),
+ E = Record->bases_end(); I != E; ++I) {
+ CanQualType BaseT = SemaRef.Context.getCanonicalType((*I).getType());
+ CanQual<RecordType> BaseRT = BaseT->getAs<RecordType>();
+ if (!BaseRT) return false;
+
+ CXXRecordDecl *BaseRecord = cast<CXXRecordDecl>(BaseRT->getDecl());
+ if (!IsProvablyNotDerivedFrom(SemaRef, BaseRecord, Bases))
+ return false;
+ }
+
+ return true;
+}
+
+enum IMAKind {
+ /// The reference is definitely not an instance member access.
+ IMA_Static,
+
+ /// The reference may be an implicit instance member access.
+ IMA_Mixed,
+
+ /// The reference may be to an instance member, but it might be invalid if
+ /// so, because the context is not an instance method.
+ IMA_Mixed_StaticContext,
+
+ /// The reference may be to an instance member, but it is invalid if
+ /// so, because the context is from an unrelated class.
+ IMA_Mixed_Unrelated,
+
+ /// The reference is definitely an implicit instance member access.
+ IMA_Instance,
+
+ /// The reference may be to an unresolved using declaration.
+ IMA_Unresolved,
+
+ /// The reference may be to an unresolved using declaration and the
+ /// context is not an instance method.
+ IMA_Unresolved_StaticContext,
+
+ // The reference refers to a field which is not a member of the containing
+ // class, which is allowed because we're in C++11 mode and the context is
+ // unevaluated.
+ IMA_Field_Uneval_Context,
+
+ /// All possible referrents are instance members and the current
+ /// context is not an instance method.
+ IMA_Error_StaticContext,
+
+ /// All possible referrents are instance members of an unrelated
+ /// class.
+ IMA_Error_Unrelated
+};
+
+/// The given lookup names class member(s) and is not being used for
+/// an address-of-member expression. Classify the type of access
+/// according to whether it's possible that this reference names an
+/// instance member. This is best-effort in dependent contexts; it is okay to
+/// conservatively answer "yes", in which case some errors will simply
+/// not be caught until template-instantiation.
+static IMAKind ClassifyImplicitMemberAccess(Sema &SemaRef,
+ Scope *CurScope,
+ const LookupResult &R) {
+ assert(!R.empty() && (*R.begin())->isCXXClassMember());
+
+ DeclContext *DC = SemaRef.getFunctionLevelDeclContext();
+
+ bool isStaticContext =
+ (!isa<CXXMethodDecl>(DC) ||
+ cast<CXXMethodDecl>(DC)->isStatic());
+
+ // C++0x [expr.prim]p4:
+ // Otherwise, if a member-declarator declares a non-static data member
+ // of a class X, the expression this is a prvalue of type "pointer to X"
+ // within the optional brace-or-equal-initializer.
+ if (CurScope->getFlags() & Scope::ThisScope)
+ isStaticContext = false;
+
+ if (R.isUnresolvableResult())
+ return isStaticContext ? IMA_Unresolved_StaticContext : IMA_Unresolved;
+
+ // Collect all the declaring classes of instance members we find.
+ bool hasNonInstance = false;
+ bool isField = false;
+ llvm::SmallPtrSet<CXXRecordDecl*, 4> Classes;
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ NamedDecl *D = *I;
+
+ if (D->isCXXInstanceMember()) {
+ if (dyn_cast<FieldDecl>(D))
+ isField = true;
+
+ CXXRecordDecl *R = cast<CXXRecordDecl>(D->getDeclContext());
+ Classes.insert(R->getCanonicalDecl());
+ }
+ else
+ hasNonInstance = true;
+ }
+
+ // If we didn't find any instance members, it can't be an implicit
+ // member reference.
+ if (Classes.empty())
+ return IMA_Static;
+
+ bool IsCXX11UnevaluatedField = false;
+ if (SemaRef.getLangOpts().CPlusPlus0x && isField) {
+ // C++11 [expr.prim.general]p12:
+ // An id-expression that denotes a non-static data member or non-static
+ // member function of a class can only be used:
+ // (...)
+ // - if that id-expression denotes a non-static data member and it
+ // appears in an unevaluated operand.
+ const Sema::ExpressionEvaluationContextRecord& record
+ = SemaRef.ExprEvalContexts.back();
+ if (record.Context == Sema::Unevaluated)
+ IsCXX11UnevaluatedField = true;
+ }
+
+ // If the current context is not an instance method, it can't be
+ // an implicit member reference.
+ if (isStaticContext) {
+ if (hasNonInstance)
+ return IMA_Mixed_StaticContext;
+
+ return IsCXX11UnevaluatedField ? IMA_Field_Uneval_Context
+ : IMA_Error_StaticContext;
+ }
+
+ CXXRecordDecl *contextClass;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC))
+ contextClass = MD->getParent()->getCanonicalDecl();
+ else
+ contextClass = cast<CXXRecordDecl>(DC);
+
+ // [class.mfct.non-static]p3:
+ // ...is used in the body of a non-static member function of class X,
+ // if name lookup (3.4.1) resolves the name in the id-expression to a
+ // non-static non-type member of some class C [...]
+ // ...if C is not X or a base class of X, the class member access expression
+ // is ill-formed.
+ if (R.getNamingClass() &&
+ contextClass->getCanonicalDecl() !=
+ R.getNamingClass()->getCanonicalDecl() &&
+ contextClass->isProvablyNotDerivedFrom(R.getNamingClass()))
+ return hasNonInstance ? IMA_Mixed_Unrelated :
+ IsCXX11UnevaluatedField ? IMA_Field_Uneval_Context :
+ IMA_Error_Unrelated;
+
+ // If we can prove that the current context is unrelated to all the
+ // declaring classes, it can't be an implicit member reference (in
+ // which case it's an error if any of those members are selected).
+ if (IsProvablyNotDerivedFrom(SemaRef, contextClass, Classes))
+ return hasNonInstance ? IMA_Mixed_Unrelated :
+ IsCXX11UnevaluatedField ? IMA_Field_Uneval_Context :
+ IMA_Error_Unrelated;
+
+ return (hasNonInstance ? IMA_Mixed : IMA_Instance);
+}
+
+/// Diagnose a reference to a field with no object available.
+static void diagnoseInstanceReference(Sema &SemaRef,
+ const CXXScopeSpec &SS,
+ NamedDecl *Rep,
+ const DeclarationNameInfo &nameInfo) {
+ SourceLocation Loc = nameInfo.getLoc();
+ SourceRange Range(Loc);
+ if (SS.isSet()) Range.setBegin(SS.getRange().getBegin());
+
+ DeclContext *FunctionLevelDC = SemaRef.getFunctionLevelDeclContext();
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FunctionLevelDC);
+ CXXRecordDecl *ContextClass = Method ? Method->getParent() : 0;
+ CXXRecordDecl *RepClass = dyn_cast<CXXRecordDecl>(Rep->getDeclContext());
+
+ bool InStaticMethod = Method && Method->isStatic();
+ bool IsField = isa<FieldDecl>(Rep) || isa<IndirectFieldDecl>(Rep);
+
+ if (IsField && InStaticMethod)
+ // "invalid use of member 'x' in static member function"
+ SemaRef.Diag(Loc, diag::err_invalid_member_use_in_static_method)
+ << Range << nameInfo.getName();
+ else if (ContextClass && RepClass && SS.isEmpty() && !InStaticMethod &&
+ !RepClass->Equals(ContextClass) && RepClass->Encloses(ContextClass))
+ // Unqualified lookup in a non-static member function found a member of an
+ // enclosing class.
+ SemaRef.Diag(Loc, diag::err_nested_non_static_member_use)
+ << IsField << RepClass << nameInfo.getName() << ContextClass << Range;
+ else if (IsField)
+ SemaRef.Diag(Loc, diag::err_invalid_non_static_member_use)
+ << nameInfo.getName() << Range;
+ else
+ SemaRef.Diag(Loc, diag::err_member_call_without_object)
+ << Range;
+}
+
+/// Builds an expression which might be an implicit member expression.
+ExprResult
+Sema::BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ switch (ClassifyImplicitMemberAccess(*this, CurScope, R)) {
+ case IMA_Instance:
+ return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, true);
+
+ case IMA_Mixed:
+ case IMA_Mixed_Unrelated:
+ case IMA_Unresolved:
+ return BuildImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs, false);
+
+ case IMA_Field_Uneval_Context:
+ Diag(R.getNameLoc(), diag::warn_cxx98_compat_non_static_member_use)
+ << R.getLookupNameInfo().getName();
+ // Fall through.
+ case IMA_Static:
+ case IMA_Mixed_StaticContext:
+ case IMA_Unresolved_StaticContext:
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, false, TemplateArgs);
+ return BuildDeclarationNameExpr(SS, R, false);
+
+ case IMA_Error_StaticContext:
+ case IMA_Error_Unrelated:
+ diagnoseInstanceReference(*this, SS, R.getRepresentativeDecl(),
+ R.getLookupNameInfo());
+ return ExprError();
+ }
+
+ llvm_unreachable("unexpected instance member access kind");
+}
+
+/// Check an ext-vector component access expression.
+///
+/// VK should be set in advance to the value kind of the base
+/// expression.
+static QualType
+CheckExtVectorComponent(Sema &S, QualType baseType, ExprValueKind &VK,
+ SourceLocation OpLoc, const IdentifierInfo *CompName,
+ SourceLocation CompLoc) {
+ // FIXME: Share logic with ExtVectorElementExpr::containsDuplicateElements,
+ // see FIXME there.
+ //
+ // FIXME: This logic can be greatly simplified by splitting it along
+ // halving/not halving and reworking the component checking.
+ const ExtVectorType *vecType = baseType->getAs<ExtVectorType>();
+
+ // The vector accessor can't exceed the number of elements.
+ const char *compStr = CompName->getNameStart();
+
+ // This flag determines whether or not the component is one of the four
+ // special names that indicate a subset of exactly half the elements are
+ // to be selected.
+ bool HalvingSwizzle = false;
+
+ // This flag determines whether or not CompName has an 's' char prefix,
+ // indicating that it is a string of hex values to be used as vector indices.
+ bool HexSwizzle = *compStr == 's' || *compStr == 'S';
+
+ bool HasRepeated = false;
+ bool HasIndex[16] = {};
+
+ int Idx;
+
+ // Check that we've found one of the special components, or that the component
+ // names must come from the same set.
+ if (!strcmp(compStr, "hi") || !strcmp(compStr, "lo") ||
+ !strcmp(compStr, "even") || !strcmp(compStr, "odd")) {
+ HalvingSwizzle = true;
+ } else if (!HexSwizzle &&
+ (Idx = vecType->getPointAccessorIdx(*compStr)) != -1) {
+ do {
+ if (HasIndex[Idx]) HasRepeated = true;
+ HasIndex[Idx] = true;
+ compStr++;
+ } while (*compStr && (Idx = vecType->getPointAccessorIdx(*compStr)) != -1);
+ } else {
+ if (HexSwizzle) compStr++;
+ while ((Idx = vecType->getNumericAccessorIdx(*compStr)) != -1) {
+ if (HasIndex[Idx]) HasRepeated = true;
+ HasIndex[Idx] = true;
+ compStr++;
+ }
+ }
+
+ if (!HalvingSwizzle && *compStr) {
+ // We didn't get to the end of the string. This means the component names
+ // didn't come from the same set *or* we encountered an illegal name.
+ S.Diag(OpLoc, diag::err_ext_vector_component_name_illegal)
+ << StringRef(compStr, 1) << SourceRange(CompLoc);
+ return QualType();
+ }
+
+ // Ensure no component accessor exceeds the width of the vector type it
+ // operates on.
+ if (!HalvingSwizzle) {
+ compStr = CompName->getNameStart();
+
+ if (HexSwizzle)
+ compStr++;
+
+ while (*compStr) {
+ if (!vecType->isAccessorWithinNumElements(*compStr++)) {
+ S.Diag(OpLoc, diag::err_ext_vector_component_exceeds_length)
+ << baseType << SourceRange(CompLoc);
+ return QualType();
+ }
+ }
+ }
+
+ // The component accessor looks fine - now we need to compute the actual type.
+ // The vector type is implied by the component accessor. For example,
+ // vec4.b is a float, vec4.xy is a vec2, vec4.rgb is a vec3, etc.
+ // vec4.s0 is a float, vec4.s23 is a vec3, etc.
+ // vec4.hi, vec4.lo, vec4.e, and vec4.o all return vec2.
+ unsigned CompSize = HalvingSwizzle ? (vecType->getNumElements() + 1) / 2
+ : CompName->getLength();
+ if (HexSwizzle)
+ CompSize--;
+
+ if (CompSize == 1)
+ return vecType->getElementType();
+
+ if (HasRepeated) VK = VK_RValue;
+
+ QualType VT = S.Context.getExtVectorType(vecType->getElementType(), CompSize);
+ // Now look up the TypeDefDecl from the vector type. Without this,
+ // diagostics look bad. We want extended vector types to appear built-in.
+ for (Sema::ExtVectorDeclsType::iterator
+ I = S.ExtVectorDecls.begin(S.ExternalSource),
+ E = S.ExtVectorDecls.end();
+ I != E; ++I) {
+ if ((*I)->getUnderlyingType() == VT)
+ return S.Context.getTypedefType(*I);
+ }
+
+ return VT; // should never get here (a typedef type should always be found).
+}
+
+static Decl *FindGetterSetterNameDeclFromProtocolList(const ObjCProtocolDecl*PDecl,
+ IdentifierInfo *Member,
+ const Selector &Sel,
+ ASTContext &Context) {
+ if (Member)
+ if (ObjCPropertyDecl *PD = PDecl->FindPropertyDeclaration(Member))
+ return PD;
+ if (ObjCMethodDecl *OMD = PDecl->getInstanceMethod(Sel))
+ return OMD;
+
+ for (ObjCProtocolDecl::protocol_iterator I = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); I != E; ++I) {
+ if (Decl *D = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
+ Context))
+ return D;
+ }
+ return 0;
+}
+
+static Decl *FindGetterSetterNameDecl(const ObjCObjectPointerType *QIdTy,
+ IdentifierInfo *Member,
+ const Selector &Sel,
+ ASTContext &Context) {
+ // Check protocols on qualified interfaces.
+ Decl *GDecl = 0;
+ for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
+ E = QIdTy->qual_end(); I != E; ++I) {
+ if (Member)
+ if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Member)) {
+ GDecl = PD;
+ break;
+ }
+ // Also must look for a getter or setter name which uses property syntax.
+ if (ObjCMethodDecl *OMD = (*I)->getInstanceMethod(Sel)) {
+ GDecl = OMD;
+ break;
+ }
+ }
+ if (!GDecl) {
+ for (ObjCObjectPointerType::qual_iterator I = QIdTy->qual_begin(),
+ E = QIdTy->qual_end(); I != E; ++I) {
+ // Search in the protocol-qualifier list of current protocol.
+ GDecl = FindGetterSetterNameDeclFromProtocolList(*I, Member, Sel,
+ Context);
+ if (GDecl)
+ return GDecl;
+ }
+ }
+ return GDecl;
+}
+
+ExprResult
+Sema::ActOnDependentMemberExpr(Expr *BaseExpr, QualType BaseType,
+ bool IsArrow, SourceLocation OpLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // Even in dependent contexts, try to diagnose base expressions with
+ // obviously wrong types, e.g.:
+ //
+ // T* t;
+ // t.f;
+ //
+ // In Obj-C++, however, the above expression is valid, since it could be
+ // accessing the 'f' property if T is an Obj-C interface. The extra check
+ // allows this, while still reporting an error if T is a struct pointer.
+ if (!IsArrow) {
+ const PointerType *PT = BaseType->getAs<PointerType>();
+ if (PT && (!getLangOpts().ObjC1 ||
+ PT->getPointeeType()->isRecordType())) {
+ assert(BaseExpr && "cannot happen with implicit member accesses");
+ Diag(NameInfo.getLoc(), diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ assert(BaseType->isDependentType() ||
+ NameInfo.getName().isDependentName() ||
+ isDependentScopeSpecifier(SS));
+
+ // Get the type being accessed in BaseType. If this is an arrow, the BaseExpr
+ // must have pointer type, and the accessed type is the pointee.
+ return Owned(CXXDependentScopeMemberExpr::Create(Context, BaseExpr, BaseType,
+ IsArrow, OpLoc,
+ SS.getWithLocInContext(Context),
+ TemplateKWLoc,
+ FirstQualifierInScope,
+ NameInfo, TemplateArgs));
+}
+
+/// We know that the given qualified member reference points only to
+/// declarations which do not belong to the static type of the base
+/// expression. Diagnose the problem.
+static void DiagnoseQualifiedMemberReference(Sema &SemaRef,
+ Expr *BaseExpr,
+ QualType BaseType,
+ const CXXScopeSpec &SS,
+ NamedDecl *rep,
+ const DeclarationNameInfo &nameInfo) {
+ // If this is an implicit member access, use a different set of
+ // diagnostics.
+ if (!BaseExpr)
+ return diagnoseInstanceReference(SemaRef, SS, rep, nameInfo);
+
+ SemaRef.Diag(nameInfo.getLoc(), diag::err_qualified_member_of_unrelated)
+ << SS.getRange() << rep << BaseType;
+}
+
+// Check whether the declarations we found through a nested-name
+// specifier in a member expression are actually members of the base
+// type. The restriction here is:
+//
+// C++ [expr.ref]p2:
+// ... In these cases, the id-expression shall name a
+// member of the class or of one of its base classes.
+//
+// So it's perfectly legitimate for the nested-name specifier to name
+// an unrelated class, and for us to find an overload set including
+// decls from classes which are not superclasses, as long as the decl
+// we actually pick through overload resolution is from a superclass.
+bool Sema::CheckQualifiedMemberReference(Expr *BaseExpr,
+ QualType BaseType,
+ const CXXScopeSpec &SS,
+ const LookupResult &R) {
+ const RecordType *BaseRT = BaseType->getAs<RecordType>();
+ if (!BaseRT) {
+ // We can't check this yet because the base type is still
+ // dependent.
+ assert(BaseType->isDependentType());
+ return false;
+ }
+ CXXRecordDecl *BaseRecord = cast<CXXRecordDecl>(BaseRT->getDecl());
+
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ // If this is an implicit member reference and we find a
+ // non-instance member, it's not an error.
+ if (!BaseExpr && !(*I)->isCXXInstanceMember())
+ return false;
+
+ // Note that we use the DC of the decl, not the underlying decl.
+ DeclContext *DC = (*I)->getDeclContext();
+ while (DC->isTransparentContext())
+ DC = DC->getParent();
+
+ if (!DC->isRecord())
+ continue;
+
+ llvm::SmallPtrSet<CXXRecordDecl*,4> MemberRecord;
+ MemberRecord.insert(cast<CXXRecordDecl>(DC)->getCanonicalDecl());
+
+ if (!IsProvablyNotDerivedFrom(*this, BaseRecord, MemberRecord))
+ return false;
+ }
+
+ DiagnoseQualifiedMemberReference(*this, BaseExpr, BaseType, SS,
+ R.getRepresentativeDecl(),
+ R.getLookupNameInfo());
+ return true;
+}
+
+namespace {
+
+// Callback to only accept typo corrections that are either a ValueDecl or a
+// FunctionTemplateDecl.
+class RecordMemberExprValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ return ND && (isa<ValueDecl>(ND) || isa<FunctionTemplateDecl>(ND));
+ }
+};
+
+}
+
+static bool
+LookupMemberExprInRecord(Sema &SemaRef, LookupResult &R,
+ SourceRange BaseRange, const RecordType *RTy,
+ SourceLocation OpLoc, CXXScopeSpec &SS,
+ bool HasTemplateArgs) {
+ RecordDecl *RDecl = RTy->getDecl();
+ if (SemaRef.RequireCompleteType(OpLoc, QualType(RTy, 0),
+ SemaRef.PDiag(diag::err_typecheck_incomplete_tag)
+ << BaseRange))
+ return true;
+
+ if (HasTemplateArgs) {
+ // LookupTemplateName doesn't expect these both to exist simultaneously.
+ QualType ObjectType = SS.isSet() ? QualType() : QualType(RTy, 0);
+
+ bool MOUS;
+ SemaRef.LookupTemplateName(R, 0, SS, ObjectType, false, MOUS);
+ return false;
+ }
+
+ DeclContext *DC = RDecl;
+ if (SS.isSet()) {
+ // If the member name was a qualified-id, look into the
+ // nested-name-specifier.
+ DC = SemaRef.computeDeclContext(SS, false);
+
+ if (SemaRef.RequireCompleteDeclContext(SS, DC)) {
+ SemaRef.Diag(SS.getRange().getEnd(), diag::err_typecheck_incomplete_tag)
+ << SS.getRange() << DC;
+ return true;
+ }
+
+ assert(DC && "Cannot handle non-computable dependent contexts in lookup");
+
+ if (!isa<TypeDecl>(DC)) {
+ SemaRef.Diag(R.getNameLoc(), diag::err_qualified_member_nonclass)
+ << DC << SS.getRange();
+ return true;
+ }
+ }
+
+ // The record definition is complete, now look up the member.
+ SemaRef.LookupQualifiedName(R, DC);
+
+ if (!R.empty())
+ return false;
+
+ // We didn't find anything with the given name, so try to correct
+ // for typos.
+ DeclarationName Name = R.getLookupName();
+ RecordMemberExprValidatorCCC Validator;
+ TypoCorrection Corrected = SemaRef.CorrectTypo(R.getLookupNameInfo(),
+ R.getLookupKind(), NULL,
+ &SS, Validator, DC);
+ R.clear();
+ if (NamedDecl *ND = Corrected.getCorrectionDecl()) {
+ std::string CorrectedStr(
+ Corrected.getAsString(SemaRef.getLangOpts()));
+ std::string CorrectedQuotedStr(
+ Corrected.getQuoted(SemaRef.getLangOpts()));
+ R.setLookupName(Corrected.getCorrection());
+ R.addDecl(ND);
+ SemaRef.Diag(R.getNameLoc(), diag::err_no_member_suggest)
+ << Name << DC << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(R.getNameLoc(), CorrectedStr);
+ SemaRef.Diag(ND->getLocation(), diag::note_previous_decl)
+ << ND->getDeclName();
+ }
+
+ return false;
+}
+
+ExprResult
+Sema::BuildMemberReferenceExpr(Expr *Base, QualType BaseType,
+ SourceLocation OpLoc, bool IsArrow,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ if (BaseType->isDependentType() ||
+ (SS.isSet() && isDependentScopeSpecifier(SS)))
+ return ActOnDependentMemberExpr(Base, BaseType,
+ IsArrow, OpLoc,
+ SS, TemplateKWLoc, FirstQualifierInScope,
+ NameInfo, TemplateArgs);
+
+ LookupResult R(*this, NameInfo, LookupMemberName);
+
+ // Implicit member accesses.
+ if (!Base) {
+ QualType RecordTy = BaseType;
+ if (IsArrow) RecordTy = RecordTy->getAs<PointerType>()->getPointeeType();
+ if (LookupMemberExprInRecord(*this, R, SourceRange(),
+ RecordTy->getAs<RecordType>(),
+ OpLoc, SS, TemplateArgs != 0))
+ return ExprError();
+
+ // Explicit member accesses.
+ } else {
+ ExprResult BaseResult = Owned(Base);
+ ExprResult Result =
+ LookupMemberExpr(R, BaseResult, IsArrow, OpLoc,
+ SS, /*ObjCImpDecl*/ 0, TemplateArgs != 0);
+
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.take();
+
+ if (Result.isInvalid()) {
+ Owned(Base);
+ return ExprError();
+ }
+
+ if (Result.get())
+ return move(Result);
+
+ // LookupMemberExpr can modify Base, and thus change BaseType
+ BaseType = Base->getType();
+ }
+
+ return BuildMemberReferenceExpr(Base, BaseType,
+ OpLoc, IsArrow, SS, TemplateKWLoc,
+ FirstQualifierInScope, R, TemplateArgs);
+}
+
+static ExprResult
+BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ const CXXScopeSpec &SS, FieldDecl *Field,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo);
+
+ExprResult
+Sema::BuildAnonymousStructUnionMemberReference(const CXXScopeSpec &SS,
+ SourceLocation loc,
+ IndirectFieldDecl *indirectField,
+ Expr *baseObjectExpr,
+ SourceLocation opLoc) {
+ // First, build the expression that refers to the base object.
+
+ bool baseObjectIsPointer = false;
+ Qualifiers baseQuals;
+
+ // Case 1: the base of the indirect field is not a field.
+ VarDecl *baseVariable = indirectField->getVarDecl();
+ CXXScopeSpec EmptySS;
+ if (baseVariable) {
+ assert(baseVariable->getType()->isRecordType());
+
+ // In principle we could have a member access expression that
+ // accesses an anonymous struct/union that's a static member of
+ // the base object's class. However, under the current standard,
+ // static data members cannot be anonymous structs or unions.
+ // Supporting this is as easy as building a MemberExpr here.
+ assert(!baseObjectExpr && "anonymous struct/union is static data member?");
+
+ DeclarationNameInfo baseNameInfo(DeclarationName(), loc);
+
+ ExprResult result
+ = BuildDeclarationNameExpr(EmptySS, baseNameInfo, baseVariable);
+ if (result.isInvalid()) return ExprError();
+
+ baseObjectExpr = result.take();
+ baseObjectIsPointer = false;
+ baseQuals = baseObjectExpr->getType().getQualifiers();
+
+ // Case 2: the base of the indirect field is a field and the user
+ // wrote a member expression.
+ } else if (baseObjectExpr) {
+ // The caller provided the base object expression. Determine
+ // whether its a pointer and whether it adds any qualifiers to the
+ // anonymous struct/union fields we're looking into.
+ QualType objectType = baseObjectExpr->getType();
+
+ if (const PointerType *ptr = objectType->getAs<PointerType>()) {
+ baseObjectIsPointer = true;
+ objectType = ptr->getPointeeType();
+ } else {
+ baseObjectIsPointer = false;
+ }
+ baseQuals = objectType.getQualifiers();
+
+ // Case 3: the base of the indirect field is a field and we should
+ // build an implicit member access.
+ } else {
+ // We've found a member of an anonymous struct/union that is
+ // inside a non-anonymous struct/union, so in a well-formed
+ // program our base object expression is "this".
+ QualType ThisTy = getCurrentThisType();
+ if (ThisTy.isNull()) {
+ Diag(loc, diag::err_invalid_member_use_in_static_method)
+ << indirectField->getDeclName();
+ return ExprError();
+ }
+
+ // Our base object expression is "this".
+ CheckCXXThisCapture(loc);
+ baseObjectExpr
+ = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/ true);
+ baseObjectIsPointer = true;
+ baseQuals = ThisTy->castAs<PointerType>()->getPointeeType().getQualifiers();
+ }
+
+ // Build the implicit member references to the field of the
+ // anonymous struct/union.
+ Expr *result = baseObjectExpr;
+ IndirectFieldDecl::chain_iterator
+ FI = indirectField->chain_begin(), FEnd = indirectField->chain_end();
+
+ // Build the first member access in the chain with full information.
+ if (!baseVariable) {
+ FieldDecl *field = cast<FieldDecl>(*FI);
+
+ // FIXME: use the real found-decl info!
+ DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
+
+ // Make a nameInfo that properly uses the anonymous name.
+ DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
+
+ result = BuildFieldReferenceExpr(*this, result, baseObjectIsPointer,
+ EmptySS, field, foundDecl,
+ memberNameInfo).take();
+ baseObjectIsPointer = false;
+
+ // FIXME: check qualified member access
+ }
+
+ // In all cases, we should now skip the first declaration in the chain.
+ ++FI;
+
+ while (FI != FEnd) {
+ FieldDecl *field = cast<FieldDecl>(*FI++);
+
+ // FIXME: these are somewhat meaningless
+ DeclarationNameInfo memberNameInfo(field->getDeclName(), loc);
+ DeclAccessPair foundDecl = DeclAccessPair::make(field, field->getAccess());
+
+ result = BuildFieldReferenceExpr(*this, result, /*isarrow*/ false,
+ (FI == FEnd? SS : EmptySS), field,
+ foundDecl, memberNameInfo).take();
+ }
+
+ return Owned(result);
+}
+
+/// \brief Build a MemberExpr AST node.
+static MemberExpr *BuildMemberExpr(Sema &SemaRef,
+ ASTContext &C, Expr *Base, bool isArrow,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ ValueDecl *Member,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo,
+ QualType Ty,
+ ExprValueKind VK, ExprObjectKind OK,
+ const TemplateArgumentListInfo *TemplateArgs = 0) {
+ assert((!isArrow || Base->isRValue()) && "-> base must be a pointer rvalue");
+ MemberExpr *E =
+ MemberExpr::Create(C, Base, isArrow, SS.getWithLocInContext(C),
+ TemplateKWLoc, Member, FoundDecl, MemberNameInfo,
+ TemplateArgs, Ty, VK, OK);
+ SemaRef.MarkMemberReferenced(E);
+ return E;
+}
+
+ExprResult
+Sema::BuildMemberReferenceExpr(Expr *BaseExpr, QualType BaseExprType,
+ SourceLocation OpLoc, bool IsArrow,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool SuppressQualifierCheck) {
+ QualType BaseType = BaseExprType;
+ if (IsArrow) {
+ assert(BaseType->isPointerType());
+ BaseType = BaseType->castAs<PointerType>()->getPointeeType();
+ }
+ R.setBaseObjectType(BaseType);
+
+ const DeclarationNameInfo &MemberNameInfo = R.getLookupNameInfo();
+ DeclarationName MemberName = MemberNameInfo.getName();
+ SourceLocation MemberLoc = MemberNameInfo.getLoc();
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ if (R.empty()) {
+ // Rederive where we looked up.
+ DeclContext *DC = (SS.isSet()
+ ? computeDeclContext(SS, false)
+ : BaseType->getAs<RecordType>()->getDecl());
+
+ Diag(R.getNameLoc(), diag::err_no_member)
+ << MemberName << DC
+ << (BaseExpr ? BaseExpr->getSourceRange() : SourceRange());
+ return ExprError();
+ }
+
+ // Diagnose lookups that find only declarations from a non-base
+ // type. This is possible for either qualified lookups (which may
+ // have been qualified with an unrelated type) or implicit member
+ // expressions (which were found with unqualified lookup and thus
+ // may have come from an enclosing scope). Note that it's okay for
+ // lookup to find declarations from a non-base type as long as those
+ // aren't the ones picked by overload resolution.
+ if ((SS.isSet() || !BaseExpr ||
+ (isa<CXXThisExpr>(BaseExpr) &&
+ cast<CXXThisExpr>(BaseExpr)->isImplicit())) &&
+ !SuppressQualifierCheck &&
+ CheckQualifiedMemberReference(BaseExpr, BaseType, SS, R))
+ return ExprError();
+
+ // Construct an unresolved result if we in fact got an unresolved
+ // result.
+ if (R.isOverloadedResult() || R.isUnresolvableResult()) {
+ // Suppress any lookup-related diagnostics; we'll do these when we
+ // pick a member.
+ R.suppressDiagnostics();
+
+ UnresolvedMemberExpr *MemExpr
+ = UnresolvedMemberExpr::Create(Context, R.isUnresolvableResult(),
+ BaseExpr, BaseExprType,
+ IsArrow, OpLoc,
+ SS.getWithLocInContext(Context),
+ TemplateKWLoc, MemberNameInfo,
+ TemplateArgs, R.begin(), R.end());
+
+ return Owned(MemExpr);
+ }
+
+ assert(R.isSingleResult());
+ DeclAccessPair FoundDecl = R.begin().getPair();
+ NamedDecl *MemberDecl = R.getFoundDecl();
+
+ // FIXME: diagnose the presence of template arguments now.
+
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (MemberDecl->isInvalidDecl())
+ return ExprError();
+
+ // Handle the implicit-member-access case.
+ if (!BaseExpr) {
+ // If this is not an instance member, convert to a non-member access.
+ if (!MemberDecl->isCXXInstanceMember())
+ return BuildDeclarationNameExpr(SS, R.getLookupNameInfo(), MemberDecl);
+
+ SourceLocation Loc = R.getNameLoc();
+ if (SS.getRange().isValid())
+ Loc = SS.getRange().getBegin();
+ CheckCXXThisCapture(Loc);
+ BaseExpr = new (Context) CXXThisExpr(Loc, BaseExprType,/*isImplicit=*/true);
+ }
+
+ bool ShouldCheckUse = true;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MemberDecl)) {
+ // Don't diagnose the use of a virtual member function unless it's
+ // explicitly qualified.
+ if (MD->isVirtual() && !SS.isSet())
+ ShouldCheckUse = false;
+ }
+
+ // Check the use of this member.
+ if (ShouldCheckUse && DiagnoseUseOfDecl(MemberDecl, MemberLoc)) {
+ Owned(BaseExpr);
+ return ExprError();
+ }
+
+ if (FieldDecl *FD = dyn_cast<FieldDecl>(MemberDecl))
+ return BuildFieldReferenceExpr(*this, BaseExpr, IsArrow,
+ SS, FD, FoundDecl, MemberNameInfo);
+
+ if (IndirectFieldDecl *FD = dyn_cast<IndirectFieldDecl>(MemberDecl))
+ // We may have found a field within an anonymous union or struct
+ // (C++ [class.union]).
+ return BuildAnonymousStructUnionMemberReference(SS, MemberLoc, FD,
+ BaseExpr, OpLoc);
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(MemberDecl)) {
+ return Owned(BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS,
+ TemplateKWLoc, Var, FoundDecl, MemberNameInfo,
+ Var->getType().getNonReferenceType(),
+ VK_LValue, OK_Ordinary));
+ }
+
+ if (CXXMethodDecl *MemberFn = dyn_cast<CXXMethodDecl>(MemberDecl)) {
+ ExprValueKind valueKind;
+ QualType type;
+ if (MemberFn->isInstance()) {
+ valueKind = VK_RValue;
+ type = Context.BoundMemberTy;
+ } else {
+ valueKind = VK_LValue;
+ type = MemberFn->getType();
+ }
+
+ return Owned(BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS,
+ TemplateKWLoc, MemberFn, FoundDecl,
+ MemberNameInfo, type, valueKind,
+ OK_Ordinary));
+ }
+ assert(!isa<FunctionDecl>(MemberDecl) && "member function not C++ method?");
+
+ if (EnumConstantDecl *Enum = dyn_cast<EnumConstantDecl>(MemberDecl)) {
+ return Owned(BuildMemberExpr(*this, Context, BaseExpr, IsArrow, SS,
+ TemplateKWLoc, Enum, FoundDecl, MemberNameInfo,
+ Enum->getType(), VK_RValue, OK_Ordinary));
+ }
+
+ Owned(BaseExpr);
+
+ // We found something that we didn't expect. Complain.
+ if (isa<TypeDecl>(MemberDecl))
+ Diag(MemberLoc, diag::err_typecheck_member_reference_type)
+ << MemberName << BaseType << int(IsArrow);
+ else
+ Diag(MemberLoc, diag::err_typecheck_member_reference_unknown)
+ << MemberName << BaseType << int(IsArrow);
+
+ Diag(MemberDecl->getLocation(), diag::note_member_declared_here)
+ << MemberName;
+ R.suppressDiagnostics();
+ return ExprError();
+}
+
+/// Given that normal member access failed on the given expression,
+/// and given that the expression's type involves builtin-id or
+/// builtin-Class, decide whether substituting in the redefinition
+/// types would be profitable. The redefinition type is whatever
+/// this translation unit tried to typedef to id/Class; we store
+/// it to the side and then re-use it in places like this.
+static bool ShouldTryAgainWithRedefinitionType(Sema &S, ExprResult &base) {
+ const ObjCObjectPointerType *opty
+ = base.get()->getType()->getAs<ObjCObjectPointerType>();
+ if (!opty) return false;
+
+ const ObjCObjectType *ty = opty->getObjectType();
+
+ QualType redef;
+ if (ty->isObjCId()) {
+ redef = S.Context.getObjCIdRedefinitionType();
+ } else if (ty->isObjCClass()) {
+ redef = S.Context.getObjCClassRedefinitionType();
+ } else {
+ return false;
+ }
+
+ // Do the substitution as long as the redefinition type isn't just a
+ // possibly-qualified pointer to builtin-id or builtin-Class again.
+ opty = redef->getAs<ObjCObjectPointerType>();
+ if (opty && !opty->getObjectType()->getInterface() != 0)
+ return false;
+
+ base = S.ImpCastExprToType(base.take(), redef, CK_BitCast);
+ return true;
+}
+
+static bool isRecordType(QualType T) {
+ return T->isRecordType();
+}
+static bool isPointerToRecordType(QualType T) {
+ if (const PointerType *PT = T->getAs<PointerType>())
+ return PT->getPointeeType()->isRecordType();
+ return false;
+}
+
+/// Perform conversions on the LHS of a member access expression.
+ExprResult
+Sema::PerformMemberExprBaseConversion(Expr *Base, bool IsArrow) {
+ if (IsArrow && !Base->getType()->isFunctionType())
+ return DefaultFunctionArrayLvalueConversion(Base);
+
+ return CheckPlaceholderExpr(Base);
+}
+
+/// Look up the given member of the given non-type-dependent
+/// expression. This can return in one of two ways:
+/// * If it returns a sentinel null-but-valid result, the caller will
+/// assume that lookup was performed and the results written into
+/// the provided structure. It will take over from there.
+/// * Otherwise, the returned expression will be produced in place of
+/// an ordinary member expression.
+///
+/// The ObjCImpDecl bit is a gross hack that will need to be properly
+/// fixed for ObjC++.
+ExprResult
+Sema::LookupMemberExpr(LookupResult &R, ExprResult &BaseExpr,
+ bool &IsArrow, SourceLocation OpLoc,
+ CXXScopeSpec &SS,
+ Decl *ObjCImpDecl, bool HasTemplateArgs) {
+ assert(BaseExpr.get() && "no base expression");
+
+ // Perform default conversions.
+ BaseExpr = PerformMemberExprBaseConversion(BaseExpr.take(), IsArrow);
+ if (BaseExpr.isInvalid())
+ return ExprError();
+
+ QualType BaseType = BaseExpr.get()->getType();
+ assert(!BaseType->isDependentType());
+
+ DeclarationName MemberName = R.getLookupName();
+ SourceLocation MemberLoc = R.getNameLoc();
+
+ // For later type-checking purposes, turn arrow accesses into dot
+ // accesses. The only access type we support that doesn't follow
+ // the C equivalence "a->b === (*a).b" is ObjC property accesses,
+ // and those never use arrows, so this is unaffected.
+ if (IsArrow) {
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (const ObjCObjectPointerType *Ptr
+ = BaseType->getAs<ObjCObjectPointerType>())
+ BaseType = Ptr->getPointeeType();
+ else if (BaseType->isRecordType()) {
+ // Recover from arrow accesses to records, e.g.:
+ // struct MyRecord foo;
+ // foo->bar
+ // This is actually well-formed in C++ if MyRecord has an
+ // overloaded operator->, but that should have been dealt with
+ // by now.
+ Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ IsArrow = false;
+ } else if (BaseType->isFunctionType()) {
+ goto fail;
+ } else {
+ Diag(MemberLoc, diag::err_typecheck_member_reference_arrow)
+ << BaseType << BaseExpr.get()->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ // Handle field access to simple records.
+ if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
+ if (LookupMemberExprInRecord(*this, R, BaseExpr.get()->getSourceRange(),
+ RTy, OpLoc, SS, HasTemplateArgs))
+ return ExprError();
+
+ // Returning valid-but-null is how we indicate to the caller that
+ // the lookup result was filled in.
+ return Owned((Expr*) 0);
+ }
+
+ // Handle ivar access to Objective-C objects.
+ if (const ObjCObjectType *OTy = BaseType->getAs<ObjCObjectType>()) {
+ if (!SS.isEmpty() && !SS.isInvalid()) {
+ Diag(SS.getRange().getBegin(), diag::err_qualified_objc_access)
+ << 1 << SS.getScopeRep()
+ << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ }
+
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
+ // There are three cases for the base type:
+ // - builtin id (qualified or unqualified)
+ // - builtin Class (qualified or unqualified)
+ // - an interface
+ ObjCInterfaceDecl *IDecl = OTy->getInterface();
+ if (!IDecl) {
+ if (getLangOpts().ObjCAutoRefCount &&
+ (OTy->isObjCId() || OTy->isObjCClass()))
+ goto fail;
+ // There's an implicit 'isa' ivar on all objects.
+ // But we only actually find it this way on objects of type 'id',
+ // apparently.ghjg
+ if (OTy->isObjCId() && Member->isStr("isa")) {
+ Diag(MemberLoc, diag::warn_objc_isa_use);
+ return Owned(new (Context) ObjCIsaExpr(BaseExpr.take(), IsArrow, MemberLoc,
+ Context.getObjCClassType()));
+ }
+
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ goto fail;
+ }
+
+ if (RequireCompleteType(OpLoc, BaseType,
+ PDiag(diag::err_typecheck_incomplete_tag)
+ << BaseExpr.get()->getSourceRange()))
+ return ExprError();
+
+ ObjCInterfaceDecl *ClassDeclared = 0;
+ ObjCIvarDecl *IV = IDecl->lookupInstanceVariable(Member, ClassDeclared);
+
+ if (!IV) {
+ // Attempt to correct for typos in ivar names.
+ DeclFilterCCC<ObjCIvarDecl> Validator;
+ Validator.IsObjCIvarLookup = IsArrow;
+ if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(),
+ LookupMemberName, NULL, NULL,
+ Validator, IDecl)) {
+ IV = Corrected.getCorrectionDeclAs<ObjCIvarDecl>();
+ Diag(R.getNameLoc(),
+ diag::err_typecheck_member_reference_ivar_suggest)
+ << IDecl->getDeclName() << MemberName << IV->getDeclName()
+ << FixItHint::CreateReplacement(R.getNameLoc(),
+ IV->getNameAsString());
+ Diag(IV->getLocation(), diag::note_previous_decl)
+ << IV->getDeclName();
+
+ // Figure out the class that declares the ivar.
+ assert(!ClassDeclared);
+ Decl *D = cast<Decl>(IV->getDeclContext());
+ if (ObjCCategoryDecl *CAT = dyn_cast<ObjCCategoryDecl>(D))
+ D = CAT->getClassInterface();
+ ClassDeclared = cast<ObjCInterfaceDecl>(D);
+ } else {
+ if (IsArrow && IDecl->FindPropertyDeclaration(Member)) {
+ Diag(MemberLoc,
+ diag::err_property_found_suggest)
+ << Member << BaseExpr.get()->getType()
+ << FixItHint::CreateReplacement(OpLoc, ".");
+ return ExprError();
+ }
+
+ Diag(MemberLoc, diag::err_typecheck_member_reference_ivar)
+ << IDecl->getDeclName() << MemberName
+ << BaseExpr.get()->getSourceRange();
+ return ExprError();
+ }
+ }
+
+ assert(ClassDeclared);
+
+ // If the decl being referenced had an error, return an error for this
+ // sub-expr without emitting another error, in order to avoid cascading
+ // error cases.
+ if (IV->isInvalidDecl())
+ return ExprError();
+
+ // Check whether we can reference this field.
+ if (DiagnoseUseOfDecl(IV, MemberLoc))
+ return ExprError();
+ if (IV->getAccessControl() != ObjCIvarDecl::Public &&
+ IV->getAccessControl() != ObjCIvarDecl::Package) {
+ ObjCInterfaceDecl *ClassOfMethodDecl = 0;
+ if (ObjCMethodDecl *MD = getCurMethodDecl())
+ ClassOfMethodDecl = MD->getClassInterface();
+ else if (ObjCImpDecl && getCurFunctionDecl()) {
+ // Case of a c-function declared inside an objc implementation.
+ // FIXME: For a c-style function nested inside an objc implementation
+ // class, there is no implementation context available, so we pass
+ // down the context as argument to this routine. Ideally, this context
+ // need be passed down in the AST node and somehow calculated from the
+ // AST for a function decl.
+ if (ObjCImplementationDecl *IMPD =
+ dyn_cast<ObjCImplementationDecl>(ObjCImpDecl))
+ ClassOfMethodDecl = IMPD->getClassInterface();
+ else if (ObjCCategoryImplDecl* CatImplClass =
+ dyn_cast<ObjCCategoryImplDecl>(ObjCImpDecl))
+ ClassOfMethodDecl = CatImplClass->getClassInterface();
+ }
+ if (!getLangOpts().DebuggerSupport) {
+ if (IV->getAccessControl() == ObjCIvarDecl::Private) {
+ if (!declaresSameEntity(ClassDeclared, IDecl) ||
+ !declaresSameEntity(ClassOfMethodDecl, ClassDeclared))
+ Diag(MemberLoc, diag::error_private_ivar_access)
+ << IV->getDeclName();
+ } else if (!IDecl->isSuperClassOf(ClassOfMethodDecl))
+ // @protected
+ Diag(MemberLoc, diag::error_protected_ivar_access)
+ << IV->getDeclName();
+ }
+ }
+ if (getLangOpts().ObjCAutoRefCount) {
+ Expr *BaseExp = BaseExpr.get()->IgnoreParenImpCasts();
+ if (UnaryOperator *UO = dyn_cast<UnaryOperator>(BaseExp))
+ if (UO->getOpcode() == UO_Deref)
+ BaseExp = UO->getSubExpr()->IgnoreParenCasts();
+
+ if (DeclRefExpr *DE = dyn_cast<DeclRefExpr>(BaseExp))
+ if (DE->getType().getObjCLifetime() == Qualifiers::OCL_Weak)
+ Diag(DE->getLocation(), diag::error_arc_weak_ivar_access);
+ }
+
+ return Owned(new (Context) ObjCIvarRefExpr(IV, IV->getType(),
+ MemberLoc, BaseExpr.take(),
+ IsArrow));
+ }
+
+ // Objective-C property access.
+ const ObjCObjectPointerType *OPT;
+ if (!IsArrow && (OPT = BaseType->getAs<ObjCObjectPointerType>())) {
+ if (!SS.isEmpty() && !SS.isInvalid()) {
+ Diag(SS.getRange().getBegin(), diag::err_qualified_objc_access)
+ << 0 << SS.getScopeRep()
+ << FixItHint::CreateRemoval(SS.getRange());
+ SS.clear();
+ }
+
+ // This actually uses the base as an r-value.
+ BaseExpr = DefaultLvalueConversion(BaseExpr.take());
+ if (BaseExpr.isInvalid())
+ return ExprError();
+
+ assert(Context.hasSameUnqualifiedType(BaseType, BaseExpr.get()->getType()));
+
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+
+ const ObjCObjectType *OT = OPT->getObjectType();
+
+ // id, with and without qualifiers.
+ if (OT->isObjCId()) {
+ // Check protocols on qualified interfaces.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
+ if (Decl *PMDecl = FindGetterSetterNameDecl(OPT, Member, Sel, Context)) {
+ if (ObjCPropertyDecl *PD = dyn_cast<ObjCPropertyDecl>(PMDecl)) {
+ // Check the use of this declaration
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ return Owned(new (Context) ObjCPropertyRefExpr(PD,
+ Context.PseudoObjectTy,
+ VK_LValue,
+ OK_ObjCProperty,
+ MemberLoc,
+ BaseExpr.take()));
+ }
+
+ if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(PMDecl)) {
+ // Check the use of this method.
+ if (DiagnoseUseOfDecl(OMD, MemberLoc))
+ return ExprError();
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), Member);
+ ObjCMethodDecl *SMD = 0;
+ if (Decl *SDecl = FindGetterSetterNameDecl(OPT, /*Property id*/0,
+ SetterSel, Context))
+ SMD = dyn_cast<ObjCMethodDecl>(SDecl);
+
+ return Owned(new (Context) ObjCPropertyRefExpr(OMD, SMD,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
+ MemberLoc, BaseExpr.take()));
+ }
+ }
+ // Use of id.member can only be for a property reference. Do not
+ // use the 'id' redefinition in this case.
+ if (IsArrow && ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ return ExprError(Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << BaseType);
+ }
+
+ // 'Class', unqualified only.
+ if (OT->isObjCClass()) {
+ // Only works in a method declaration (??!).
+ ObjCMethodDecl *MD = getCurMethodDecl();
+ if (!MD) {
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ goto fail;
+ }
+
+ // Also must look for a getter name which uses property syntax.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
+ ObjCInterfaceDecl *IFace = MD->getClassInterface();
+ ObjCMethodDecl *Getter;
+ if ((Getter = IFace->lookupClassMethod(Sel))) {
+ // Check the use of this method.
+ if (DiagnoseUseOfDecl(Getter, MemberLoc))
+ return ExprError();
+ } else
+ Getter = IFace->lookupPrivateMethod(Sel, false);
+ // If we found a getter then this may be a valid dot-reference, we
+ // will look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), Member);
+ ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel);
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ Setter = IFace->lookupPrivateMethod(SetterSel, false);
+ }
+ // Look through local category implementations associated with the class.
+ if (!Setter)
+ Setter = IFace->getCategoryClassMethod(SetterSel);
+
+ if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
+ MemberLoc, BaseExpr.take()));
+ }
+
+ if (ShouldTryAgainWithRedefinitionType(*this, BaseExpr))
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+
+ return ExprError(Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << BaseType);
+ }
+
+ // Normal property access.
+ return HandleExprPropertyRefExpr(OPT, BaseExpr.get(), OpLoc,
+ MemberName, MemberLoc,
+ SourceLocation(), QualType(), false);
+ }
+
+ // Handle 'field access' to vectors, such as 'V.xx'.
+ if (BaseType->isExtVectorType()) {
+ // FIXME: this expr should store IsArrow.
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+ ExprValueKind VK = (IsArrow ? VK_LValue : BaseExpr.get()->getValueKind());
+ QualType ret = CheckExtVectorComponent(*this, BaseType, VK, OpLoc,
+ Member, MemberLoc);
+ if (ret.isNull())
+ return ExprError();
+
+ return Owned(new (Context) ExtVectorElementExpr(ret, VK, BaseExpr.take(),
+ *Member, MemberLoc));
+ }
+
+ // Adjust builtin-sel to the appropriate redefinition type if that's
+ // not just a pointer to builtin-sel again.
+ if (IsArrow &&
+ BaseType->isSpecificBuiltinType(BuiltinType::ObjCSel) &&
+ !Context.getObjCSelRedefinitionType()->isObjCSelType()) {
+ BaseExpr = ImpCastExprToType(BaseExpr.take(),
+ Context.getObjCSelRedefinitionType(),
+ CK_BitCast);
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+
+ // Failure cases.
+ fail:
+
+ // Recover from dot accesses to pointers, e.g.:
+ // type *foo;
+ // foo.bar
+ // This is actually well-formed in two cases:
+ // - 'type' is an Objective C type
+ // - 'bar' is a pseudo-destructor name which happens to refer to
+ // the appropriate pointer type
+ if (const PointerType *Ptr = BaseType->getAs<PointerType>()) {
+ if (!IsArrow && Ptr->getPointeeType()->isRecordType() &&
+ MemberName.getNameKind() != DeclarationName::CXXDestructorName) {
+ Diag(OpLoc, diag::err_typecheck_member_reference_suggestion)
+ << BaseType << int(IsArrow) << BaseExpr.get()->getSourceRange()
+ << FixItHint::CreateReplacement(OpLoc, "->");
+
+ // Recurse as an -> access.
+ IsArrow = true;
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+ }
+
+ // If the user is trying to apply -> or . to a function name, it's probably
+ // because they forgot parentheses to call that function.
+ if (tryToRecoverWithCall(BaseExpr,
+ PDiag(diag::err_member_reference_needs_call),
+ /*complain*/ false,
+ IsArrow ? &isPointerToRecordType : &isRecordType)) {
+ if (BaseExpr.isInvalid())
+ return ExprError();
+ BaseExpr = DefaultFunctionArrayConversion(BaseExpr.take());
+ return LookupMemberExpr(R, BaseExpr, IsArrow, OpLoc, SS,
+ ObjCImpDecl, HasTemplateArgs);
+ }
+
+ Diag(MemberLoc, diag::err_typecheck_member_reference_struct_union)
+ << BaseType << BaseExpr.get()->getSourceRange();
+
+ return ExprError();
+}
+
+/// The main callback when the parser finds something like
+/// expression . [nested-name-specifier] identifier
+/// expression -> [nested-name-specifier] identifier
+/// where 'identifier' encompasses a fairly broad spectrum of
+/// possibilities, including destructor and operator references.
+///
+/// \param OpKind either tok::arrow or tok::period
+/// \param HasTrailingLParen whether the next token is '(', which
+/// is used to diagnose mis-uses of special members that can
+/// only be called
+/// \param ObjCImpDecl the current ObjC @implementation decl;
+/// this is an ugly hack around the fact that ObjC @implementations
+/// aren't properly put in the context chain
+ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Id,
+ Decl *ObjCImpDecl,
+ bool HasTrailingLParen) {
+ if (SS.isSet() && SS.isInvalid())
+ return ExprError();
+
+ // Warn about the explicit constructor calls Microsoft extension.
+ if (getLangOpts().MicrosoftExt &&
+ Id.getKind() == UnqualifiedId::IK_ConstructorName)
+ Diag(Id.getSourceRange().getBegin(),
+ diag::ext_ms_explicit_constructor_call);
+
+ TemplateArgumentListInfo TemplateArgsBuffer;
+
+ // Decompose the name into its component parts.
+ DeclarationNameInfo NameInfo;
+ const TemplateArgumentListInfo *TemplateArgs;
+ DecomposeUnqualifiedId(Id, TemplateArgsBuffer,
+ NameInfo, TemplateArgs);
+
+ DeclarationName Name = NameInfo.getName();
+ bool IsArrow = (OpKind == tok::arrow);
+
+ NamedDecl *FirstQualifierInScope
+ = (!SS.isSet() ? 0 : FindFirstQualifierInScope(S,
+ static_cast<NestedNameSpecifier*>(SS.getScopeRep())));
+
+ // This is a postfix expression, so get rid of ParenListExprs.
+ ExprResult Result = MaybeConvertParenListExprToParenExpr(S, Base);
+ if (Result.isInvalid()) return ExprError();
+ Base = Result.take();
+
+ if (Base->getType()->isDependentType() || Name.isDependentName() ||
+ isDependentScopeSpecifier(SS)) {
+ Result = ActOnDependentMemberExpr(Base, Base->getType(),
+ IsArrow, OpLoc,
+ SS, TemplateKWLoc, FirstQualifierInScope,
+ NameInfo, TemplateArgs);
+ } else {
+ LookupResult R(*this, NameInfo, LookupMemberName);
+ ExprResult BaseResult = Owned(Base);
+ Result = LookupMemberExpr(R, BaseResult, IsArrow, OpLoc,
+ SS, ObjCImpDecl, TemplateArgs != 0);
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.take();
+
+ if (Result.isInvalid()) {
+ Owned(Base);
+ return ExprError();
+ }
+
+ if (Result.get()) {
+ // The only way a reference to a destructor can be used is to
+ // immediately call it, which falls into this case. If the
+ // next token is not a '(', produce a diagnostic and build the
+ // call now.
+ if (!HasTrailingLParen &&
+ Id.getKind() == UnqualifiedId::IK_DestructorName)
+ return DiagnoseDtorReference(NameInfo.getLoc(), Result.get());
+
+ return move(Result);
+ }
+
+ Result = BuildMemberReferenceExpr(Base, Base->getType(),
+ OpLoc, IsArrow, SS, TemplateKWLoc,
+ FirstQualifierInScope, R, TemplateArgs);
+ }
+
+ return move(Result);
+}
+
+static ExprResult
+BuildFieldReferenceExpr(Sema &S, Expr *BaseExpr, bool IsArrow,
+ const CXXScopeSpec &SS, FieldDecl *Field,
+ DeclAccessPair FoundDecl,
+ const DeclarationNameInfo &MemberNameInfo) {
+ // x.a is an l-value if 'a' has a reference type. Otherwise:
+ // x.a is an l-value/x-value/pr-value if the base is (and note
+ // that *x is always an l-value), except that if the base isn't
+ // an ordinary object then we must have an rvalue.
+ ExprValueKind VK = VK_LValue;
+ ExprObjectKind OK = OK_Ordinary;
+ if (!IsArrow) {
+ if (BaseExpr->getObjectKind() == OK_Ordinary)
+ VK = BaseExpr->getValueKind();
+ else
+ VK = VK_RValue;
+ }
+ if (VK != VK_RValue && Field->isBitField())
+ OK = OK_BitField;
+
+ // Figure out the type of the member; see C99 6.5.2.3p3, C++ [expr.ref]
+ QualType MemberType = Field->getType();
+ if (const ReferenceType *Ref = MemberType->getAs<ReferenceType>()) {
+ MemberType = Ref->getPointeeType();
+ VK = VK_LValue;
+ } else {
+ QualType BaseType = BaseExpr->getType();
+ if (IsArrow) BaseType = BaseType->getAs<PointerType>()->getPointeeType();
+
+ Qualifiers BaseQuals = BaseType.getQualifiers();
+
+ // GC attributes are never picked up by members.
+ BaseQuals.removeObjCGCAttr();
+
+ // CVR attributes from the base are picked up by members,
+ // except that 'mutable' members don't pick up 'const'.
+ if (Field->isMutable()) BaseQuals.removeConst();
+
+ Qualifiers MemberQuals
+ = S.Context.getCanonicalType(MemberType).getQualifiers();
+
+ // TR 18037 does not allow fields to be declared with address spaces.
+ assert(!MemberQuals.hasAddressSpace());
+
+ Qualifiers Combined = BaseQuals + MemberQuals;
+ if (Combined != MemberQuals)
+ MemberType = S.Context.getQualifiedType(MemberType, Combined);
+ }
+
+ ExprResult Base =
+ S.PerformObjectMemberConversion(BaseExpr, SS.getScopeRep(),
+ FoundDecl, Field);
+ if (Base.isInvalid())
+ return ExprError();
+ return S.Owned(BuildMemberExpr(S, S.Context, Base.take(), IsArrow, SS,
+ /*TemplateKWLoc=*/SourceLocation(),
+ Field, FoundDecl, MemberNameInfo,
+ MemberType, VK, OK));
+}
+
+/// Builds an implicit member access expression. The current context
+/// is known to be an instance method, and the given unqualified lookup
+/// set is known to contain only instance members, at least one of which
+/// is from an appropriate type.
+ExprResult
+Sema::BuildImplicitMemberExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool IsKnownInstance) {
+ assert(!R.empty() && !R.isAmbiguous());
+
+ SourceLocation loc = R.getNameLoc();
+
+ // We may have found a field within an anonymous union or struct
+ // (C++ [class.union]).
+ // FIXME: template-ids inside anonymous structs?
+ if (IndirectFieldDecl *FD = R.getAsSingle<IndirectFieldDecl>())
+ return BuildAnonymousStructUnionMemberReference(SS, R.getNameLoc(), FD);
+
+ // If this is known to be an instance access, go ahead and build an
+ // implicit 'this' expression now.
+ // 'this' expression now.
+ QualType ThisTy = getCurrentThisType();
+ assert(!ThisTy.isNull() && "didn't correctly pre-flight capture of 'this'");
+
+ Expr *baseExpr = 0; // null signifies implicit access
+ if (IsKnownInstance) {
+ SourceLocation Loc = R.getNameLoc();
+ if (SS.getRange().isValid())
+ Loc = SS.getRange().getBegin();
+ CheckCXXThisCapture(Loc);
+ baseExpr = new (Context) CXXThisExpr(loc, ThisTy, /*isImplicit=*/true);
+ }
+
+ return BuildMemberReferenceExpr(baseExpr, ThisTy,
+ /*OpLoc*/ SourceLocation(),
+ /*IsArrow*/ true,
+ SS, TemplateKWLoc,
+ /*FirstQualifierInScope*/ 0,
+ R, TemplateArgs);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
new file mode 100644
index 0000000..b62d56e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaExprObjC.cpp
@@ -0,0 +1,3049 @@
+//===--- SemaExprObjC.cpp - Semantic Analysis for ObjC Expressions --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for Objective-C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/Edit/Rewriters.h"
+#include "clang/Edit/Commit.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "llvm/ADT/SmallString.h"
+#include "clang/Lex/Preprocessor.h"
+
+using namespace clang;
+using namespace sema;
+using llvm::makeArrayRef;
+
+ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
+ Expr **strings,
+ unsigned NumStrings) {
+ StringLiteral **Strings = reinterpret_cast<StringLiteral**>(strings);
+
+ // Most ObjC strings are formed out of a single piece. However, we *can*
+ // have strings formed out of multiple @ strings with multiple pptokens in
+ // each one, e.g. @"foo" "bar" @"baz" "qux" which need to be turned into one
+ // StringLiteral for ObjCStringLiteral to hold onto.
+ StringLiteral *S = Strings[0];
+
+ // If we have a multi-part string, merge it all together.
+ if (NumStrings != 1) {
+ // Concatenate objc strings.
+ SmallString<128> StrBuf;
+ SmallVector<SourceLocation, 8> StrLocs;
+
+ for (unsigned i = 0; i != NumStrings; ++i) {
+ S = Strings[i];
+
+ // ObjC strings can't be wide or UTF.
+ if (!S->isAscii()) {
+ Diag(S->getLocStart(), diag::err_cfstring_literal_not_string_constant)
+ << S->getSourceRange();
+ return true;
+ }
+
+ // Append the string.
+ StrBuf += S->getString();
+
+ // Get the locations of the string tokens.
+ StrLocs.append(S->tokloc_begin(), S->tokloc_end());
+ }
+
+ // Create the aggregate string with the appropriate content and location
+ // information.
+ S = StringLiteral::Create(Context, StrBuf,
+ StringLiteral::Ascii, /*Pascal=*/false,
+ Context.getPointerType(Context.CharTy),
+ &StrLocs[0], StrLocs.size());
+ }
+
+ return BuildObjCStringLiteral(AtLocs[0], S);
+}
+
+ExprResult Sema::BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S){
+ // Verify that this composite string is acceptable for ObjC strings.
+ if (CheckObjCString(S))
+ return true;
+
+ // Initialize the constant string interface lazily. This assumes
+ // the NSString interface is seen in this translation unit. Note: We
+ // don't use NSConstantString, since the runtime team considers this
+ // interface private (even though it appears in the header files).
+ QualType Ty = Context.getObjCConstantStringInterface();
+ if (!Ty.isNull()) {
+ Ty = Context.getObjCObjectPointerType(Ty);
+ } else if (getLangOpts().NoConstantCFStrings) {
+ IdentifierInfo *NSIdent=0;
+ std::string StringClass(getLangOpts().ObjCConstantStringClass);
+
+ if (StringClass.empty())
+ NSIdent = &Context.Idents.get("NSConstantString");
+ else
+ NSIdent = &Context.Idents.get(StringClass);
+
+ NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
+ LookupOrdinaryName);
+ if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
+ Context.setObjCConstantStringInterface(StrIF);
+ Ty = Context.getObjCConstantStringInterface();
+ Ty = Context.getObjCObjectPointerType(Ty);
+ } else {
+ // If there is no NSConstantString interface defined then treat this
+ // as error and recover from it.
+ Diag(S->getLocStart(), diag::err_no_nsconstant_string_class) << NSIdent
+ << S->getSourceRange();
+ Ty = Context.getObjCIdType();
+ }
+ } else {
+ IdentifierInfo *NSIdent = &Context.Idents.get("NSString");
+ NamedDecl *IF = LookupSingleName(TUScope, NSIdent, AtLoc,
+ LookupOrdinaryName);
+ if (ObjCInterfaceDecl *StrIF = dyn_cast_or_null<ObjCInterfaceDecl>(IF)) {
+ Context.setObjCConstantStringInterface(StrIF);
+ Ty = Context.getObjCConstantStringInterface();
+ Ty = Context.getObjCObjectPointerType(Ty);
+ } else {
+ // If there is no NSString interface defined, implicitly declare
+ // a @class NSString; and use that instead. This is to make sure
+ // type of an NSString literal is represented correctly, instead of
+ // being an 'id' type.
+ Ty = Context.getObjCNSStringType();
+ if (Ty.isNull()) {
+ ObjCInterfaceDecl *NSStringIDecl =
+ ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(), NSIdent,
+ 0, SourceLocation());
+ Ty = Context.getObjCInterfaceType(NSStringIDecl);
+ Context.setObjCNSStringType(Ty);
+ }
+ Ty = Context.getObjCObjectPointerType(Ty);
+ }
+ }
+
+ return new (Context) ObjCStringLiteral(S, Ty, AtLoc);
+}
+
+/// \brief Retrieve the NSNumber factory method that should be used to create
+/// an Objective-C literal for the given type.
+static ObjCMethodDecl *getNSNumberFactoryMethod(Sema &S, SourceLocation Loc,
+ QualType T, QualType ReturnType,
+ SourceRange Range) {
+ llvm::Optional<NSAPI::NSNumberLiteralMethodKind> Kind
+ = S.NSAPIObj->getNSNumberFactoryMethodKind(T);
+
+ if (!Kind) {
+ S.Diag(Loc, diag::err_invalid_nsnumber_type)
+ << T << Range;
+ return 0;
+ }
+
+ // If we already looked up this method, we're done.
+ if (S.NSNumberLiteralMethods[*Kind])
+ return S.NSNumberLiteralMethods[*Kind];
+
+ Selector Sel = S.NSAPIObj->getNSNumberLiteralSelector(*Kind,
+ /*Instance=*/false);
+
+ // Look for the appropriate method within NSNumber.
+ ObjCMethodDecl *Method = S.NSNumberDecl->lookupClassMethod(Sel);;
+ if (!Method && S.getLangOpts().DebuggerObjCLiteral) {
+ TypeSourceInfo *ResultTInfo = 0;
+ Method = ObjCMethodDecl::Create(S.Context, SourceLocation(), SourceLocation(), Sel,
+ ReturnType,
+ ResultTInfo,
+ S.Context.getTranslationUnitDecl(),
+ false /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ ParmVarDecl *value = ParmVarDecl::Create(S.Context, Method,
+ SourceLocation(), SourceLocation(),
+ &S.Context.Idents.get("value"),
+ T, /*TInfo=*/0, SC_None, SC_None, 0);
+ Method->setMethodParams(S.Context, value, ArrayRef<SourceLocation>());
+ }
+
+ if (!Method) {
+ S.Diag(Loc, diag::err_undeclared_nsnumber_method) << Sel;
+ return 0;
+ }
+
+ // Make sure the return type is reasonable.
+ if (!Method->getResultType()->isObjCObjectPointerType()) {
+ S.Diag(Loc, diag::err_objc_literal_method_sig)
+ << Sel;
+ S.Diag(Method->getLocation(), diag::note_objc_literal_method_return)
+ << Method->getResultType();
+ return 0;
+ }
+
+ // Note: if the parameter type is out-of-line, we'll catch it later in the
+ // implicit conversion.
+
+ S.NSNumberLiteralMethods[*Kind] = Method;
+ return Method;
+}
+
+/// BuildObjCNumericLiteral - builds an ObjCNumericLiteral AST node for the
+/// numeric literal expression. Type of the expression will be "NSNumber *"
+/// or "id" if NSNumber is unavailable.
+ExprResult Sema::BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number) {
+ // Look up the NSNumber class, if we haven't done so already.
+ if (!NSNumberDecl) {
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber),
+ AtLoc, LookupOrdinaryName);
+ NSNumberDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+
+ if (!NSNumberDecl && getLangOpts().DebuggerObjCLiteral)
+ NSNumberDecl = ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSNumber),
+ 0, SourceLocation());
+ if (!NSNumberDecl) {
+ Diag(AtLoc, diag::err_undeclared_nsnumber);
+ return ExprError();
+ }
+ }
+
+ // Determine the type of the literal.
+ QualType NumberType = Number->getType();
+ if (CharacterLiteral *Char = dyn_cast<CharacterLiteral>(Number)) {
+ // In C, character literals have type 'int'. That's not the type we want
+ // to use to determine the Objective-c literal kind.
+ switch (Char->getKind()) {
+ case CharacterLiteral::Ascii:
+ NumberType = Context.CharTy;
+ break;
+
+ case CharacterLiteral::Wide:
+ NumberType = Context.getWCharType();
+ break;
+
+ case CharacterLiteral::UTF16:
+ NumberType = Context.Char16Ty;
+ break;
+
+ case CharacterLiteral::UTF32:
+ NumberType = Context.Char32Ty;
+ break;
+ }
+ }
+
+ ObjCMethodDecl *Method = 0;
+ // Look for the appropriate method within NSNumber.
+ // Construct the literal.
+ QualType Ty
+ = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSNumberDecl));
+ Method = getNSNumberFactoryMethod(*this, AtLoc,
+ NumberType, Ty,
+ Number->getSourceRange());
+
+ if (!Method)
+ return ExprError();
+
+ // Convert the number to the type that the parameter expects.
+ QualType ElementT = Method->param_begin()[0]->getType();
+ ExprResult ConvertedNumber = PerformImplicitConversion(Number, ElementT,
+ AA_Sending);
+ if (ConvertedNumber.isInvalid())
+ return ExprError();
+ Number = ConvertedNumber.get();
+
+ return MaybeBindToTemporary(
+ new (Context) ObjCNumericLiteral(Number, Ty, Method, AtLoc));
+}
+
+ExprResult Sema::ActOnObjCBoolLiteral(SourceLocation AtLoc,
+ SourceLocation ValueLoc,
+ bool Value) {
+ ExprResult Inner;
+ if (getLangOpts().CPlusPlus) {
+ Inner = ActOnCXXBoolLiteral(ValueLoc, Value? tok::kw_true : tok::kw_false);
+ } else {
+ // C doesn't actually have a way to represent literal values of type
+ // _Bool. So, we'll use 0/1 and implicit cast to _Bool.
+ Inner = ActOnIntegerConstant(ValueLoc, Value? 1 : 0);
+ Inner = ImpCastExprToType(Inner.get(), Context.BoolTy,
+ CK_IntegralToBoolean);
+ }
+
+ return BuildObjCNumericLiteral(AtLoc, Inner.get());
+}
+
+/// \brief Check that the given expression is a valid element of an Objective-C
+/// collection literal.
+static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
+ QualType T) {
+ // If the expression is type-dependent, there's nothing for us to do.
+ if (Element->isTypeDependent())
+ return Element;
+
+ ExprResult Result = S.CheckPlaceholderExpr(Element);
+ if (Result.isInvalid())
+ return ExprError();
+ Element = Result.get();
+
+ // In C++, check for an implicit conversion to an Objective-C object pointer
+ // type.
+ if (S.getLangOpts().CPlusPlus && Element->getType()->isRecordType()) {
+ InitializedEntity Entity
+ = InitializedEntity::InitializeParameter(S.Context, T, /*Consumed=*/false);
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Element->getLocStart(), SourceLocation());
+ InitializationSequence Seq(S, Entity, Kind, &Element, 1);
+ if (!Seq.Failed())
+ return Seq.Perform(S, Entity, Kind, MultiExprArg(S, &Element, 1));
+ }
+
+ Expr *OrigElement = Element;
+
+ // Perform lvalue-to-rvalue conversion.
+ Result = S.DefaultLvalueConversion(Element);
+ if (Result.isInvalid())
+ return ExprError();
+ Element = Result.get();
+
+ // Make sure that we have an Objective-C pointer type or block.
+ if (!Element->getType()->isObjCObjectPointerType() &&
+ !Element->getType()->isBlockPointerType()) {
+ bool Recovered = false;
+
+ // If this is potentially an Objective-C numeric literal, add the '@'.
+ if (isa<IntegerLiteral>(OrigElement) ||
+ isa<CharacterLiteral>(OrigElement) ||
+ isa<FloatingLiteral>(OrigElement) ||
+ isa<ObjCBoolLiteralExpr>(OrigElement) ||
+ isa<CXXBoolLiteralExpr>(OrigElement)) {
+ if (S.NSAPIObj->getNSNumberFactoryMethodKind(OrigElement->getType())) {
+ int Which = isa<CharacterLiteral>(OrigElement) ? 1
+ : (isa<CXXBoolLiteralExpr>(OrigElement) ||
+ isa<ObjCBoolLiteralExpr>(OrigElement)) ? 2
+ : 3;
+
+ S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection)
+ << Which << OrigElement->getSourceRange()
+ << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@");
+
+ Result = S.BuildObjCNumericLiteral(OrigElement->getLocStart(),
+ OrigElement);
+ if (Result.isInvalid())
+ return ExprError();
+
+ Element = Result.get();
+ Recovered = true;
+ }
+ }
+ // If this is potentially an Objective-C string literal, add the '@'.
+ else if (StringLiteral *String = dyn_cast<StringLiteral>(OrigElement)) {
+ if (String->isAscii()) {
+ S.Diag(OrigElement->getLocStart(), diag::err_box_literal_collection)
+ << 0 << OrigElement->getSourceRange()
+ << FixItHint::CreateInsertion(OrigElement->getLocStart(), "@");
+
+ Result = S.BuildObjCStringLiteral(OrigElement->getLocStart(), String);
+ if (Result.isInvalid())
+ return ExprError();
+
+ Element = Result.get();
+ Recovered = true;
+ }
+ }
+
+ if (!Recovered) {
+ S.Diag(Element->getLocStart(), diag::err_invalid_collection_element)
+ << Element->getType();
+ return ExprError();
+ }
+ }
+
+ // Make sure that the element has the type that the container factory
+ // function expects.
+ return S.PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(S.Context, T,
+ /*Consumed=*/false),
+ Element->getLocStart(), Element);
+}
+
+ExprResult Sema::BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
+ Expr *IndexExpr,
+ ObjCMethodDecl *getterMethod,
+ ObjCMethodDecl *setterMethod) {
+ // Feature support is for modern abi.
+ if (!LangOpts.ObjCNonFragileABI)
+ return ExprError();
+ // If the expression is type-dependent, there's nothing for us to do.
+ assert ((!BaseExpr->isTypeDependent() && !IndexExpr->isTypeDependent()) &&
+ "base or index cannot have dependent type here");
+ ExprResult Result = CheckPlaceholderExpr(IndexExpr);
+ if (Result.isInvalid())
+ return ExprError();
+ IndexExpr = Result.get();
+
+ // Perform lvalue-to-rvalue conversion.
+ Result = DefaultLvalueConversion(BaseExpr);
+ if (Result.isInvalid())
+ return ExprError();
+ BaseExpr = Result.get();
+ return Owned(ObjCSubscriptRefExpr::Create(Context,
+ BaseExpr,
+ IndexExpr,
+ Context.PseudoObjectTy,
+ getterMethod,
+ setterMethod, RB));
+
+}
+
+ExprResult Sema::BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements) {
+ // Look up the NSArray class, if we haven't done so already.
+ if (!NSArrayDecl) {
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSArray),
+ SR.getBegin(),
+ LookupOrdinaryName);
+ NSArrayDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (!NSArrayDecl && getLangOpts().DebuggerObjCLiteral)
+ NSArrayDecl = ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSArray),
+ 0, SourceLocation());
+
+ if (!NSArrayDecl) {
+ Diag(SR.getBegin(), diag::err_undeclared_nsarray);
+ return ExprError();
+ }
+ }
+
+ // Find the arrayWithObjects:count: method, if we haven't done so already.
+ QualType IdT = Context.getObjCIdType();
+ if (!ArrayWithObjectsMethod) {
+ Selector
+ Sel = NSAPIObj->getNSArraySelector(NSAPI::NSArr_arrayWithObjectsCount);
+ ArrayWithObjectsMethod = NSArrayDecl->lookupClassMethod(Sel);
+ if (!ArrayWithObjectsMethod && getLangOpts().DebuggerObjCLiteral) {
+ TypeSourceInfo *ResultTInfo = 0;
+ ArrayWithObjectsMethod =
+ ObjCMethodDecl::Create(Context,
+ SourceLocation(), SourceLocation(), Sel,
+ IdT,
+ ResultTInfo,
+ Context.getTranslationUnitDecl(),
+ false /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ SmallVector<ParmVarDecl *, 2> Params;
+ ParmVarDecl *objects = ParmVarDecl::Create(Context, ArrayWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("objects"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(objects);
+ ParmVarDecl *cnt = ParmVarDecl::Create(Context, ArrayWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("cnt"),
+ Context.UnsignedLongTy,
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(cnt);
+ ArrayWithObjectsMethod->setMethodParams(Context, Params,
+ ArrayRef<SourceLocation>());
+
+
+ }
+
+ if (!ArrayWithObjectsMethod) {
+ Diag(SR.getBegin(), diag::err_undeclared_arraywithobjects) << Sel;
+ return ExprError();
+ }
+ }
+
+ // Make sure the return type is reasonable.
+ if (!ArrayWithObjectsMethod->getResultType()->isObjCObjectPointerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << ArrayWithObjectsMethod->getSelector();
+ Diag(ArrayWithObjectsMethod->getLocation(),
+ diag::note_objc_literal_method_return)
+ << ArrayWithObjectsMethod->getResultType();
+ return ExprError();
+ }
+
+ // Dig out the type that all elements should be converted to.
+ QualType T = ArrayWithObjectsMethod->param_begin()[0]->getType();
+ const PointerType *PtrT = T->getAs<PointerType>();
+ if (!PtrT ||
+ !Context.hasSameUnqualifiedType(PtrT->getPointeeType(), IdT)) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << ArrayWithObjectsMethod->getSelector();
+ Diag(ArrayWithObjectsMethod->param_begin()[0]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 0 << T
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+ T = PtrT->getPointeeType();
+
+ // Check that the 'count' parameter is integral.
+ if (!ArrayWithObjectsMethod->param_begin()[1]->getType()->isIntegerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << ArrayWithObjectsMethod->getSelector();
+ Diag(ArrayWithObjectsMethod->param_begin()[1]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 1
+ << ArrayWithObjectsMethod->param_begin()[1]->getType()
+ << "integral";
+ return ExprError();
+ }
+
+ // Check that each of the elements provided is valid in a collection literal,
+ // performing conversions as necessary.
+ Expr **ElementsBuffer = Elements.get();
+ for (unsigned I = 0, N = Elements.size(); I != N; ++I) {
+ ExprResult Converted = CheckObjCCollectionLiteralElement(*this,
+ ElementsBuffer[I],
+ T);
+ if (Converted.isInvalid())
+ return ExprError();
+
+ ElementsBuffer[I] = Converted.get();
+ }
+
+ QualType Ty
+ = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSArrayDecl));
+
+ return MaybeBindToTemporary(
+ ObjCArrayLiteral::Create(Context,
+ llvm::makeArrayRef(Elements.get(),
+ Elements.size()),
+ Ty, ArrayWithObjectsMethod, SR));
+}
+
+ExprResult Sema::BuildObjCDictionaryLiteral(SourceRange SR,
+ ObjCDictionaryElement *Elements,
+ unsigned NumElements) {
+ // Look up the NSDictionary class, if we haven't done so already.
+ if (!NSDictionaryDecl) {
+ NamedDecl *IF = LookupSingleName(TUScope,
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSDictionary),
+ SR.getBegin(), LookupOrdinaryName);
+ NSDictionaryDecl = dyn_cast_or_null<ObjCInterfaceDecl>(IF);
+ if (!NSDictionaryDecl && getLangOpts().DebuggerObjCLiteral)
+ NSDictionaryDecl = ObjCInterfaceDecl::Create (Context,
+ Context.getTranslationUnitDecl(),
+ SourceLocation(),
+ NSAPIObj->getNSClassId(NSAPI::ClassId_NSDictionary),
+ 0, SourceLocation());
+
+ if (!NSDictionaryDecl) {
+ Diag(SR.getBegin(), diag::err_undeclared_nsdictionary);
+ return ExprError();
+ }
+ }
+
+ // Find the dictionaryWithObjects:forKeys:count: method, if we haven't done
+ // so already.
+ QualType IdT = Context.getObjCIdType();
+ if (!DictionaryWithObjectsMethod) {
+ Selector Sel = NSAPIObj->getNSDictionarySelector(
+ NSAPI::NSDict_dictionaryWithObjectsForKeysCount);
+ DictionaryWithObjectsMethod = NSDictionaryDecl->lookupClassMethod(Sel);
+ if (!DictionaryWithObjectsMethod && getLangOpts().DebuggerObjCLiteral) {
+ DictionaryWithObjectsMethod =
+ ObjCMethodDecl::Create(Context,
+ SourceLocation(), SourceLocation(), Sel,
+ IdT,
+ 0 /*TypeSourceInfo */,
+ Context.getTranslationUnitDecl(),
+ false /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ SmallVector<ParmVarDecl *, 3> Params;
+ ParmVarDecl *objects = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("objects"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(objects);
+ ParmVarDecl *keys = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("keys"),
+ Context.getPointerType(IdT),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(keys);
+ ParmVarDecl *cnt = ParmVarDecl::Create(Context, DictionaryWithObjectsMethod,
+ SourceLocation(), SourceLocation(),
+ &Context.Idents.get("cnt"),
+ Context.UnsignedLongTy,
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(cnt);
+ DictionaryWithObjectsMethod->setMethodParams(Context, Params,
+ ArrayRef<SourceLocation>());
+ }
+
+ if (!DictionaryWithObjectsMethod) {
+ Diag(SR.getBegin(), diag::err_undeclared_dictwithobjects) << Sel;
+ return ExprError();
+ }
+ }
+
+ // Make sure the return type is reasonable.
+ if (!DictionaryWithObjectsMethod->getResultType()->isObjCObjectPointerType()){
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << DictionaryWithObjectsMethod->getSelector();
+ Diag(DictionaryWithObjectsMethod->getLocation(),
+ diag::note_objc_literal_method_return)
+ << DictionaryWithObjectsMethod->getResultType();
+ return ExprError();
+ }
+
+ // Dig out the type that all values should be converted to.
+ QualType ValueT = DictionaryWithObjectsMethod->param_begin()[0]->getType();
+ const PointerType *PtrValue = ValueT->getAs<PointerType>();
+ if (!PtrValue ||
+ !Context.hasSameUnqualifiedType(PtrValue->getPointeeType(), IdT)) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << DictionaryWithObjectsMethod->getSelector();
+ Diag(DictionaryWithObjectsMethod->param_begin()[0]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 0 << ValueT
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+ ValueT = PtrValue->getPointeeType();
+
+ // Dig out the type that all keys should be converted to.
+ QualType KeyT = DictionaryWithObjectsMethod->param_begin()[1]->getType();
+ const PointerType *PtrKey = KeyT->getAs<PointerType>();
+ if (!PtrKey ||
+ !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
+ IdT)) {
+ bool err = true;
+ if (PtrKey) {
+ if (QIDNSCopying.isNull()) {
+ // key argument of selector is id<NSCopying>?
+ if (ObjCProtocolDecl *NSCopyingPDecl =
+ LookupProtocol(&Context.Idents.get("NSCopying"), SR.getBegin())) {
+ ObjCProtocolDecl *PQ[] = {NSCopyingPDecl};
+ QIDNSCopying =
+ Context.getObjCObjectType(Context.ObjCBuiltinIdTy,
+ (ObjCProtocolDecl**) PQ,1);
+ QIDNSCopying = Context.getObjCObjectPointerType(QIDNSCopying);
+ }
+ }
+ if (!QIDNSCopying.isNull())
+ err = !Context.hasSameUnqualifiedType(PtrKey->getPointeeType(),
+ QIDNSCopying);
+ }
+
+ if (err) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << DictionaryWithObjectsMethod->getSelector();
+ Diag(DictionaryWithObjectsMethod->param_begin()[1]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 1 << KeyT
+ << Context.getPointerType(IdT.withConst());
+ return ExprError();
+ }
+ }
+ KeyT = PtrKey->getPointeeType();
+
+ // Check that the 'count' parameter is integral.
+ if (!DictionaryWithObjectsMethod->param_begin()[2]->getType()
+ ->isIntegerType()) {
+ Diag(SR.getBegin(), diag::err_objc_literal_method_sig)
+ << DictionaryWithObjectsMethod->getSelector();
+ Diag(DictionaryWithObjectsMethod->param_begin()[2]->getLocation(),
+ diag::note_objc_literal_method_param)
+ << 2
+ << DictionaryWithObjectsMethod->param_begin()[2]->getType()
+ << "integral";
+ return ExprError();
+ }
+
+ // Check that each of the keys and values provided is valid in a collection
+ // literal, performing conversions as necessary.
+ bool HasPackExpansions = false;
+ for (unsigned I = 0, N = NumElements; I != N; ++I) {
+ // Check the key.
+ ExprResult Key = CheckObjCCollectionLiteralElement(*this, Elements[I].Key,
+ KeyT);
+ if (Key.isInvalid())
+ return ExprError();
+
+ // Check the value.
+ ExprResult Value
+ = CheckObjCCollectionLiteralElement(*this, Elements[I].Value, ValueT);
+ if (Value.isInvalid())
+ return ExprError();
+
+ Elements[I].Key = Key.get();
+ Elements[I].Value = Value.get();
+
+ if (Elements[I].EllipsisLoc.isInvalid())
+ continue;
+
+ if (!Elements[I].Key->containsUnexpandedParameterPack() &&
+ !Elements[I].Value->containsUnexpandedParameterPack()) {
+ Diag(Elements[I].EllipsisLoc,
+ diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(Elements[I].Key->getLocStart(),
+ Elements[I].Value->getLocEnd());
+ return ExprError();
+ }
+
+ HasPackExpansions = true;
+ }
+
+
+ QualType Ty
+ = Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(NSDictionaryDecl));
+ return MaybeBindToTemporary(
+ ObjCDictionaryLiteral::Create(Context,
+ llvm::makeArrayRef(Elements,
+ NumElements),
+ HasPackExpansions,
+ Ty,
+ DictionaryWithObjectsMethod, SR));
+}
+
+ExprResult Sema::BuildObjCEncodeExpression(SourceLocation AtLoc,
+ TypeSourceInfo *EncodedTypeInfo,
+ SourceLocation RParenLoc) {
+ QualType EncodedType = EncodedTypeInfo->getType();
+ QualType StrTy;
+ if (EncodedType->isDependentType())
+ StrTy = Context.DependentTy;
+ else {
+ if (!EncodedType->getAsArrayTypeUnsafe() && //// Incomplete array is handled.
+ !EncodedType->isVoidType()) // void is handled too.
+ if (RequireCompleteType(AtLoc, EncodedType,
+ PDiag(diag::err_incomplete_type_objc_at_encode)
+ << EncodedTypeInfo->getTypeLoc().getSourceRange()))
+ return ExprError();
+
+ std::string Str;
+ Context.getObjCEncodingForType(EncodedType, Str);
+
+ // The type of @encode is the same as the type of the corresponding string,
+ // which is an array type.
+ StrTy = Context.CharTy;
+ // A C++ string literal has a const-qualified element type (C++ 2.13.4p1).
+ if (getLangOpts().CPlusPlus || getLangOpts().ConstStrings)
+ StrTy.addConst();
+ StrTy = Context.getConstantArrayType(StrTy, llvm::APInt(32, Str.size()+1),
+ ArrayType::Normal, 0);
+ }
+
+ return new (Context) ObjCEncodeExpr(StrTy, EncodedTypeInfo, AtLoc, RParenLoc);
+}
+
+ExprResult Sema::ParseObjCEncodeExpression(SourceLocation AtLoc,
+ SourceLocation EncodeLoc,
+ SourceLocation LParenLoc,
+ ParsedType ty,
+ SourceLocation RParenLoc) {
+ // FIXME: Preserve type source info ?
+ TypeSourceInfo *TInfo;
+ QualType EncodedType = GetTypeFromParser(ty, &TInfo);
+ if (!TInfo)
+ TInfo = Context.getTrivialTypeSourceInfo(EncodedType,
+ PP.getLocForEndOfToken(LParenLoc));
+
+ return BuildObjCEncodeExpression(AtLoc, TInfo, RParenLoc);
+}
+
+ExprResult Sema::ParseObjCSelectorExpression(Selector Sel,
+ SourceLocation AtLoc,
+ SourceLocation SelLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ ObjCMethodDecl *Method = LookupInstanceMethodInGlobalPool(Sel,
+ SourceRange(LParenLoc, RParenLoc), false, false);
+ if (!Method)
+ Method = LookupFactoryMethodInGlobalPool(Sel,
+ SourceRange(LParenLoc, RParenLoc));
+ if (!Method)
+ Diag(SelLoc, diag::warn_undeclared_selector) << Sel;
+
+ if (!Method ||
+ Method->getImplementationControl() != ObjCMethodDecl::Optional) {
+ llvm::DenseMap<Selector, SourceLocation>::iterator Pos
+ = ReferencedSelectors.find(Sel);
+ if (Pos == ReferencedSelectors.end())
+ ReferencedSelectors.insert(std::make_pair(Sel, SelLoc));
+ }
+
+ // In ARC, forbid the user from using @selector for
+ // retain/release/autorelease/dealloc/retainCount.
+ if (getLangOpts().ObjCAutoRefCount) {
+ switch (Sel.getMethodFamily()) {
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ case OMF_dealloc:
+ Diag(AtLoc, diag::err_arc_illegal_selector) <<
+ Sel << SourceRange(LParenLoc, RParenLoc);
+ break;
+
+ case OMF_None:
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_finalize:
+ case OMF_init:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_performSelector:
+ break;
+ }
+ }
+ QualType Ty = Context.getObjCSelType();
+ return new (Context) ObjCSelectorExpr(Ty, Sel, AtLoc, RParenLoc);
+}
+
+ExprResult Sema::ParseObjCProtocolExpression(IdentifierInfo *ProtocolId,
+ SourceLocation AtLoc,
+ SourceLocation ProtoLoc,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ ObjCProtocolDecl* PDecl = LookupProtocol(ProtocolId, ProtoLoc);
+ if (!PDecl) {
+ Diag(ProtoLoc, diag::err_undeclared_protocol) << ProtocolId;
+ return true;
+ }
+
+ QualType Ty = Context.getObjCProtoType();
+ if (Ty.isNull())
+ return true;
+ Ty = Context.getObjCObjectPointerType(Ty);
+ return new (Context) ObjCProtocolExpr(Ty, PDecl, AtLoc, RParenLoc);
+}
+
+/// Try to capture an implicit reference to 'self'.
+ObjCMethodDecl *Sema::tryCaptureObjCSelf(SourceLocation Loc) {
+ DeclContext *DC = getFunctionLevelDeclContext();
+
+ // If we're not in an ObjC method, error out. Note that, unlike the
+ // C++ case, we don't require an instance method --- class methods
+ // still have a 'self', and we really do still need to capture it!
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(DC);
+ if (!method)
+ return 0;
+
+ tryCaptureVariable(method->getSelfDecl(), Loc);
+
+ return method;
+}
+
+static QualType stripObjCInstanceType(ASTContext &Context, QualType T) {
+ if (T == Context.getObjCInstanceType())
+ return Context.getObjCIdType();
+
+ return T;
+}
+
+QualType Sema::getMessageSendResultType(QualType ReceiverType,
+ ObjCMethodDecl *Method,
+ bool isClassMessage, bool isSuperMessage) {
+ assert(Method && "Must have a method");
+ if (!Method->hasRelatedResultType())
+ return Method->getSendResultType();
+
+ // If a method has a related return type:
+ // - if the method found is an instance method, but the message send
+ // was a class message send, T is the declared return type of the method
+ // found
+ if (Method->isInstanceMethod() && isClassMessage)
+ return stripObjCInstanceType(Context, Method->getSendResultType());
+
+ // - if the receiver is super, T is a pointer to the class of the
+ // enclosing method definition
+ if (isSuperMessage) {
+ if (ObjCMethodDecl *CurMethod = getCurMethodDecl())
+ if (ObjCInterfaceDecl *Class = CurMethod->getClassInterface())
+ return Context.getObjCObjectPointerType(
+ Context.getObjCInterfaceType(Class));
+ }
+
+ // - if the receiver is the name of a class U, T is a pointer to U
+ if (ReceiverType->getAs<ObjCInterfaceType>() ||
+ ReceiverType->isObjCQualifiedInterfaceType())
+ return Context.getObjCObjectPointerType(ReceiverType);
+ // - if the receiver is of type Class or qualified Class type,
+ // T is the declared return type of the method.
+ if (ReceiverType->isObjCClassType() ||
+ ReceiverType->isObjCQualifiedClassType())
+ return stripObjCInstanceType(Context, Method->getSendResultType());
+
+ // - if the receiver is id, qualified id, Class, or qualified Class, T
+ // is the receiver type, otherwise
+ // - T is the type of the receiver expression.
+ return ReceiverType;
+}
+
+void Sema::EmitRelatedResultTypeNote(const Expr *E) {
+ E = E->IgnoreParenImpCasts();
+ const ObjCMessageExpr *MsgSend = dyn_cast<ObjCMessageExpr>(E);
+ if (!MsgSend)
+ return;
+
+ const ObjCMethodDecl *Method = MsgSend->getMethodDecl();
+ if (!Method)
+ return;
+
+ if (!Method->hasRelatedResultType())
+ return;
+
+ if (Context.hasSameUnqualifiedType(Method->getResultType()
+ .getNonReferenceType(),
+ MsgSend->getType()))
+ return;
+
+ if (!Context.hasSameUnqualifiedType(Method->getResultType(),
+ Context.getObjCInstanceType()))
+ return;
+
+ Diag(Method->getLocation(), diag::note_related_result_type_inferred)
+ << Method->isInstanceMethod() << Method->getSelector()
+ << MsgSend->getType();
+}
+
+bool Sema::CheckMessageArgumentTypes(QualType ReceiverType,
+ Expr **Args, unsigned NumArgs,
+ Selector Sel, ObjCMethodDecl *Method,
+ bool isClassMessage, bool isSuperMessage,
+ SourceLocation lbrac, SourceLocation rbrac,
+ QualType &ReturnType, ExprValueKind &VK) {
+ if (!Method) {
+ // Apply default argument promotion as for (C99 6.5.2.2p6).
+ for (unsigned i = 0; i != NumArgs; i++) {
+ if (Args[i]->isTypeDependent())
+ continue;
+
+ ExprResult Result = DefaultArgumentPromotion(Args[i]);
+ if (Result.isInvalid())
+ return true;
+ Args[i] = Result.take();
+ }
+
+ unsigned DiagID;
+ if (getLangOpts().ObjCAutoRefCount)
+ DiagID = diag::err_arc_method_not_found;
+ else
+ DiagID = isClassMessage ? diag::warn_class_method_not_found
+ : diag::warn_inst_method_not_found;
+ if (!getLangOpts().DebuggerSupport)
+ Diag(lbrac, DiagID)
+ << Sel << isClassMessage << SourceRange(lbrac, rbrac);
+
+ // In debuggers, we want to use __unknown_anytype for these
+ // results so that clients can cast them.
+ if (getLangOpts().DebuggerSupport) {
+ ReturnType = Context.UnknownAnyTy;
+ } else {
+ ReturnType = Context.getObjCIdType();
+ }
+ VK = VK_RValue;
+ return false;
+ }
+
+ ReturnType = getMessageSendResultType(ReceiverType, Method, isClassMessage,
+ isSuperMessage);
+ VK = Expr::getValueKindForType(Method->getResultType());
+
+ unsigned NumNamedArgs = Sel.getNumArgs();
+ // Method might have more arguments than selector indicates. This is due
+ // to addition of c-style arguments in method.
+ if (Method->param_size() > Sel.getNumArgs())
+ NumNamedArgs = Method->param_size();
+ // FIXME. This need be cleaned up.
+ if (NumArgs < NumNamedArgs) {
+ Diag(lbrac, diag::err_typecheck_call_too_few_args)
+ << 2 << NumNamedArgs << NumArgs;
+ return false;
+ }
+
+ bool IsError = false;
+ for (unsigned i = 0; i < NumNamedArgs; i++) {
+ // We can't do any type-checking on a type-dependent argument.
+ if (Args[i]->isTypeDependent())
+ continue;
+
+ Expr *argExpr = Args[i];
+
+ ParmVarDecl *param = Method->param_begin()[i];
+ assert(argExpr && "CheckMessageArgumentTypes(): missing expression");
+
+ // Strip the unbridged-cast placeholder expression off unless it's
+ // a consumed argument.
+ if (argExpr->hasPlaceholderType(BuiltinType::ARCUnbridgedCast) &&
+ !param->hasAttr<CFConsumedAttr>())
+ argExpr = stripARCUnbridgedCast(argExpr);
+
+ if (RequireCompleteType(argExpr->getSourceRange().getBegin(),
+ param->getType(),
+ PDiag(diag::err_call_incomplete_argument)
+ << argExpr->getSourceRange()))
+ return true;
+
+ InitializedEntity Entity = InitializedEntity::InitializeParameter(Context,
+ param);
+ ExprResult ArgE = PerformCopyInitialization(Entity, lbrac, Owned(argExpr));
+ if (ArgE.isInvalid())
+ IsError = true;
+ else
+ Args[i] = ArgE.takeAs<Expr>();
+ }
+
+ // Promote additional arguments to variadic methods.
+ if (Method->isVariadic()) {
+ for (unsigned i = NumNamedArgs; i < NumArgs; ++i) {
+ if (Args[i]->isTypeDependent())
+ continue;
+
+ ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod, 0);
+ IsError |= Arg.isInvalid();
+ Args[i] = Arg.take();
+ }
+ } else {
+ // Check for extra arguments to non-variadic methods.
+ if (NumArgs != NumNamedArgs) {
+ Diag(Args[NumNamedArgs]->getLocStart(),
+ diag::err_typecheck_call_too_many_args)
+ << 2 /*method*/ << NumNamedArgs << NumArgs
+ << Method->getSourceRange()
+ << SourceRange(Args[NumNamedArgs]->getLocStart(),
+ Args[NumArgs-1]->getLocEnd());
+ }
+ }
+
+ DiagnoseSentinelCalls(Method, lbrac, Args, NumArgs);
+
+ // Do additional checkings on method.
+ IsError |= CheckObjCMethodCall(Method, lbrac, Args, NumArgs);
+
+ return IsError;
+}
+
+bool Sema::isSelfExpr(Expr *receiver) {
+ // 'self' is objc 'self' in an objc method only.
+ ObjCMethodDecl *method =
+ dyn_cast<ObjCMethodDecl>(CurContext->getNonClosureAncestor());
+ if (!method) return false;
+
+ receiver = receiver->IgnoreParenLValueCasts();
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(receiver))
+ if (DRE->getDecl() == method->getSelfDecl())
+ return true;
+ return false;
+}
+
+// Helper method for ActOnClassMethod/ActOnInstanceMethod.
+// Will search "local" class/category implementations for a method decl.
+// If failed, then we search in class's root for an instance method.
+// Returns 0 if no method is found.
+ObjCMethodDecl *Sema::LookupPrivateClassMethod(Selector Sel,
+ ObjCInterfaceDecl *ClassDecl) {
+ ObjCMethodDecl *Method = 0;
+ // lookup in class and all superclasses
+ while (ClassDecl && !Method) {
+ if (ObjCImplementationDecl *ImpDecl = ClassDecl->getImplementation())
+ Method = ImpDecl->getClassMethod(Sel);
+
+ // Look through local category implementations associated with the class.
+ if (!Method)
+ Method = ClassDecl->getCategoryClassMethod(Sel);
+
+ // Before we give up, check if the selector is an instance method.
+ // But only in the root. This matches gcc's behaviour and what the
+ // runtime expects.
+ if (!Method && !ClassDecl->getSuperClass()) {
+ Method = ClassDecl->lookupInstanceMethod(Sel);
+ // Look through local category implementations associated
+ // with the root class.
+ if (!Method)
+ Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
+ }
+
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return Method;
+}
+
+ObjCMethodDecl *Sema::LookupPrivateInstanceMethod(Selector Sel,
+ ObjCInterfaceDecl *ClassDecl) {
+ if (!ClassDecl->hasDefinition())
+ return 0;
+
+ ObjCMethodDecl *Method = 0;
+ while (ClassDecl && !Method) {
+ // If we have implementations in scope, check "private" methods.
+ if (ObjCImplementationDecl *ImpDecl = ClassDecl->getImplementation())
+ Method = ImpDecl->getInstanceMethod(Sel);
+
+ // Look through local category implementations associated with the class.
+ if (!Method)
+ Method = ClassDecl->getCategoryInstanceMethod(Sel);
+ ClassDecl = ClassDecl->getSuperClass();
+ }
+ return Method;
+}
+
+/// LookupMethodInType - Look up a method in an ObjCObjectType.
+ObjCMethodDecl *Sema::LookupMethodInObjectType(Selector sel, QualType type,
+ bool isInstance) {
+ const ObjCObjectType *objType = type->castAs<ObjCObjectType>();
+ if (ObjCInterfaceDecl *iface = objType->getInterface()) {
+ // Look it up in the main interface (and categories, etc.)
+ if (ObjCMethodDecl *method = iface->lookupMethod(sel, isInstance))
+ return method;
+
+ // Okay, look for "private" methods declared in any
+ // @implementations we've seen.
+ if (isInstance) {
+ if (ObjCMethodDecl *method = LookupPrivateInstanceMethod(sel, iface))
+ return method;
+ } else {
+ if (ObjCMethodDecl *method = LookupPrivateClassMethod(sel, iface))
+ return method;
+ }
+ }
+
+ // Check qualifiers.
+ for (ObjCObjectType::qual_iterator
+ i = objType->qual_begin(), e = objType->qual_end(); i != e; ++i)
+ if (ObjCMethodDecl *method = (*i)->lookupMethod(sel, isInstance))
+ return method;
+
+ return 0;
+}
+
+/// LookupMethodInQualifiedType - Lookups up a method in protocol qualifier
+/// list of a qualified objective pointer type.
+ObjCMethodDecl *Sema::LookupMethodInQualifiedType(Selector Sel,
+ const ObjCObjectPointerType *OPT,
+ bool Instance)
+{
+ ObjCMethodDecl *MD = 0;
+ for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
+ E = OPT->qual_end(); I != E; ++I) {
+ ObjCProtocolDecl *PROTO = (*I);
+ if ((MD = PROTO->lookupMethod(Sel, Instance))) {
+ return MD;
+ }
+ }
+ return 0;
+}
+
+/// HandleExprPropertyRefExpr - Handle foo.bar where foo is a pointer to an
+/// objective C interface. This is a property reference expression.
+ExprResult Sema::
+HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
+ Expr *BaseExpr, SourceLocation OpLoc,
+ DeclarationName MemberName,
+ SourceLocation MemberLoc,
+ SourceLocation SuperLoc, QualType SuperType,
+ bool Super) {
+ const ObjCInterfaceType *IFaceT = OPT->getInterfaceType();
+ ObjCInterfaceDecl *IFace = IFaceT->getDecl();
+
+ if (MemberName.getNameKind() != DeclarationName::Identifier) {
+ Diag(MemberLoc, diag::err_invalid_property_name)
+ << MemberName << QualType(OPT, 0);
+ return ExprError();
+ }
+
+ IdentifierInfo *Member = MemberName.getAsIdentifierInfo();
+ SourceRange BaseRange = Super? SourceRange(SuperLoc)
+ : BaseExpr->getSourceRange();
+ if (RequireCompleteType(MemberLoc, OPT->getPointeeType(),
+ PDiag(diag::err_property_not_found_forward_class)
+ << MemberName << BaseRange))
+ return ExprError();
+
+ // Search for a declared property first.
+ if (ObjCPropertyDecl *PD = IFace->FindPropertyDeclaration(Member)) {
+ // Check whether we can reference this property.
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ if (Super)
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
+ MemberLoc,
+ SuperLoc, SuperType));
+ else
+ return Owned(new (Context) ObjCPropertyRefExpr(PD, Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
+ MemberLoc, BaseExpr));
+ }
+ // Check protocols on qualified interfaces.
+ for (ObjCObjectPointerType::qual_iterator I = OPT->qual_begin(),
+ E = OPT->qual_end(); I != E; ++I)
+ if (ObjCPropertyDecl *PD = (*I)->FindPropertyDeclaration(Member)) {
+ // Check whether we can reference this property.
+ if (DiagnoseUseOfDecl(PD, MemberLoc))
+ return ExprError();
+
+ if (Super)
+ return Owned(new (Context) ObjCPropertyRefExpr(PD,
+ Context.PseudoObjectTy,
+ VK_LValue,
+ OK_ObjCProperty,
+ MemberLoc,
+ SuperLoc, SuperType));
+ else
+ return Owned(new (Context) ObjCPropertyRefExpr(PD,
+ Context.PseudoObjectTy,
+ VK_LValue,
+ OK_ObjCProperty,
+ MemberLoc,
+ BaseExpr));
+ }
+ // If that failed, look for an "implicit" property by seeing if the nullary
+ // selector is implemented.
+
+ // FIXME: The logic for looking up nullary and unary selectors should be
+ // shared with the code in ActOnInstanceMessage.
+
+ Selector Sel = PP.getSelectorTable().getNullarySelector(Member);
+ ObjCMethodDecl *Getter = IFace->lookupInstanceMethod(Sel);
+
+ // May be founf in property's qualified list.
+ if (!Getter)
+ Getter = LookupMethodInQualifiedType(Sel, OPT, true);
+
+ // If this reference is in an @implementation, check for 'private' methods.
+ if (!Getter)
+ Getter = IFace->lookupPrivateMethod(Sel);
+
+ // Look through local category implementations associated with the class.
+ if (!Getter)
+ Getter = IFace->getCategoryInstanceMethod(Sel);
+ if (Getter) {
+ // Check if we can reference this property.
+ if (DiagnoseUseOfDecl(Getter, MemberLoc))
+ return ExprError();
+ }
+ // If we found a getter then this may be a valid dot-reference, we
+ // will look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), Member);
+ ObjCMethodDecl *Setter = IFace->lookupInstanceMethod(SetterSel);
+
+ // May be founf in property's qualified list.
+ if (!Setter)
+ Setter = LookupMethodInQualifiedType(SetterSel, OPT, true);
+
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ Setter = IFace->lookupPrivateMethod(SetterSel);
+ }
+ // Look through local category implementations associated with the class.
+ if (!Setter)
+ Setter = IFace->getCategoryInstanceMethod(SetterSel);
+
+ if (Setter && DiagnoseUseOfDecl(Setter, MemberLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ if (Super)
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
+ MemberLoc,
+ SuperLoc, SuperType));
+ else
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
+ MemberLoc, BaseExpr));
+
+ }
+
+ // Attempt to correct for typos in property names.
+ DeclFilterCCC<ObjCPropertyDecl> Validator;
+ if (TypoCorrection Corrected = CorrectTypo(
+ DeclarationNameInfo(MemberName, MemberLoc), LookupOrdinaryName, NULL,
+ NULL, Validator, IFace, false, OPT)) {
+ ObjCPropertyDecl *Property =
+ Corrected.getCorrectionDeclAs<ObjCPropertyDecl>();
+ DeclarationName TypoResult = Corrected.getCorrection();
+ Diag(MemberLoc, diag::err_property_not_found_suggest)
+ << MemberName << QualType(OPT, 0) << TypoResult
+ << FixItHint::CreateReplacement(MemberLoc, TypoResult.getAsString());
+ Diag(Property->getLocation(), diag::note_previous_decl)
+ << Property->getDeclName();
+ return HandleExprPropertyRefExpr(OPT, BaseExpr, OpLoc,
+ TypoResult, MemberLoc,
+ SuperLoc, SuperType, Super);
+ }
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *Ivar =
+ IFace->lookupInstanceVariable(Member, ClassDeclared)) {
+ QualType T = Ivar->getType();
+ if (const ObjCObjectPointerType * OBJPT =
+ T->getAsObjCInterfacePointerType()) {
+ if (RequireCompleteType(MemberLoc, OBJPT->getPointeeType(),
+ PDiag(diag::err_property_not_as_forward_class)
+ << MemberName << BaseExpr->getSourceRange()))
+ return ExprError();
+ }
+ Diag(MemberLoc,
+ diag::err_ivar_access_using_property_syntax_suggest)
+ << MemberName << QualType(OPT, 0) << Ivar->getDeclName()
+ << FixItHint::CreateReplacement(OpLoc, "->");
+ return ExprError();
+ }
+
+ Diag(MemberLoc, diag::err_property_not_found)
+ << MemberName << QualType(OPT, 0);
+ if (Setter)
+ Diag(Setter->getLocation(), diag::note_getter_unavailable)
+ << MemberName << BaseExpr->getSourceRange();
+ return ExprError();
+}
+
+
+
+ExprResult Sema::
+ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
+ IdentifierInfo &propertyName,
+ SourceLocation receiverNameLoc,
+ SourceLocation propertyNameLoc) {
+
+ IdentifierInfo *receiverNamePtr = &receiverName;
+ ObjCInterfaceDecl *IFace = getObjCInterfaceDecl(receiverNamePtr,
+ receiverNameLoc);
+
+ bool IsSuper = false;
+ if (IFace == 0) {
+ // If the "receiver" is 'super' in a method, handle it as an expression-like
+ // property reference.
+ if (receiverNamePtr->isStr("super")) {
+ IsSuper = true;
+
+ if (ObjCMethodDecl *CurMethod = tryCaptureObjCSelf(receiverNameLoc)) {
+ if (CurMethod->isInstanceMethod()) {
+ QualType T =
+ Context.getObjCInterfaceType(CurMethod->getClassInterface());
+ T = Context.getObjCObjectPointerType(T);
+
+ return HandleExprPropertyRefExpr(T->getAsObjCInterfacePointerType(),
+ /*BaseExpr*/0,
+ SourceLocation()/*OpLoc*/,
+ &propertyName,
+ propertyNameLoc,
+ receiverNameLoc, T, true);
+ }
+
+ // Otherwise, if this is a class method, try dispatching to our
+ // superclass.
+ IFace = CurMethod->getClassInterface()->getSuperClass();
+ }
+ }
+
+ if (IFace == 0) {
+ Diag(receiverNameLoc, diag::err_expected_ident_or_lparen);
+ return ExprError();
+ }
+ }
+
+ // Search for a declared property first.
+ Selector Sel = PP.getSelectorTable().getNullarySelector(&propertyName);
+ ObjCMethodDecl *Getter = IFace->lookupClassMethod(Sel);
+
+ // If this reference is in an @implementation, check for 'private' methods.
+ if (!Getter)
+ if (ObjCMethodDecl *CurMeth = getCurMethodDecl())
+ if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface())
+ if (ObjCImplementationDecl *ImpDecl = ClassDecl->getImplementation())
+ Getter = ImpDecl->getClassMethod(Sel);
+
+ if (Getter) {
+ // FIXME: refactor/share with ActOnMemberReference().
+ // Check if we can reference this property.
+ if (DiagnoseUseOfDecl(Getter, propertyNameLoc))
+ return ExprError();
+ }
+
+ // Look for the matching setter, in case it is needed.
+ Selector SetterSel =
+ SelectorTable::constructSetterName(PP.getIdentifierTable(),
+ PP.getSelectorTable(), &propertyName);
+
+ ObjCMethodDecl *Setter = IFace->lookupClassMethod(SetterSel);
+ if (!Setter) {
+ // If this reference is in an @implementation, also check for 'private'
+ // methods.
+ if (ObjCMethodDecl *CurMeth = getCurMethodDecl())
+ if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface())
+ if (ObjCImplementationDecl *ImpDecl = ClassDecl->getImplementation())
+ Setter = ImpDecl->getClassMethod(SetterSel);
+ }
+ // Look through local category implementations associated with the class.
+ if (!Setter)
+ Setter = IFace->getCategoryClassMethod(SetterSel);
+
+ if (Setter && DiagnoseUseOfDecl(Setter, propertyNameLoc))
+ return ExprError();
+
+ if (Getter || Setter) {
+ if (IsSuper)
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
+ propertyNameLoc,
+ receiverNameLoc,
+ Context.getObjCInterfaceType(IFace)));
+
+ return Owned(new (Context) ObjCPropertyRefExpr(Getter, Setter,
+ Context.PseudoObjectTy,
+ VK_LValue, OK_ObjCProperty,
+ propertyNameLoc,
+ receiverNameLoc, IFace));
+ }
+ return ExprError(Diag(propertyNameLoc, diag::err_property_not_found)
+ << &propertyName << Context.getObjCInterfaceType(IFace));
+}
+
+namespace {
+
+class ObjCInterfaceOrSuperCCC : public CorrectionCandidateCallback {
+ public:
+ ObjCInterfaceOrSuperCCC(ObjCMethodDecl *Method) {
+ // Determine whether "super" is acceptable in the current context.
+ if (Method && Method->getClassInterface())
+ WantObjCSuper = Method->getClassInterface()->getSuperClass();
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return candidate.getCorrectionDeclAs<ObjCInterfaceDecl>() ||
+ candidate.isKeyword("super");
+ }
+};
+
+}
+
+Sema::ObjCMessageKind Sema::getObjCMessageKind(Scope *S,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ bool IsSuper,
+ bool HasTrailingDot,
+ ParsedType &ReceiverType) {
+ ReceiverType = ParsedType();
+
+ // If the identifier is "super" and there is no trailing dot, we're
+ // messaging super. If the identifier is "super" and there is a
+ // trailing dot, it's an instance message.
+ if (IsSuper && S->isInObjcMethodScope())
+ return HasTrailingDot? ObjCInstanceMessage : ObjCSuperMessage;
+
+ LookupResult Result(*this, Name, NameLoc, LookupOrdinaryName);
+ LookupName(Result, S);
+
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ // Normal name lookup didn't find anything. If we're in an
+ // Objective-C method, look for ivars. If we find one, we're done!
+ // FIXME: This is a hack. Ivar lookup should be part of normal
+ // lookup.
+ if (ObjCMethodDecl *Method = getCurMethodDecl()) {
+ if (!Method->getClassInterface()) {
+ // Fall back: let the parser try to parse it as an instance message.
+ return ObjCInstanceMessage;
+ }
+
+ ObjCInterfaceDecl *ClassDeclared;
+ if (Method->getClassInterface()->lookupInstanceVariable(Name,
+ ClassDeclared))
+ return ObjCInstanceMessage;
+ }
+
+ // Break out; we'll perform typo correction below.
+ break;
+
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ case LookupResult::Ambiguous:
+ Result.suppressDiagnostics();
+ return ObjCInstanceMessage;
+
+ case LookupResult::Found: {
+ // If the identifier is a class or not, and there is a trailing dot,
+ // it's an instance message.
+ if (HasTrailingDot)
+ return ObjCInstanceMessage;
+ // We found something. If it's a type, then we have a class
+ // message. Otherwise, it's an instance message.
+ NamedDecl *ND = Result.getFoundDecl();
+ QualType T;
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(ND))
+ T = Context.getObjCInterfaceType(Class);
+ else if (TypeDecl *Type = dyn_cast<TypeDecl>(ND))
+ T = Context.getTypeDeclType(Type);
+ else
+ return ObjCInstanceMessage;
+
+ // We have a class message, and T is the type we're
+ // messaging. Build source-location information for it.
+ TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
+ ReceiverType = CreateParsedType(T, TSInfo);
+ return ObjCClassMessage;
+ }
+ }
+
+ ObjCInterfaceOrSuperCCC Validator(getCurMethodDecl());
+ if (TypoCorrection Corrected = CorrectTypo(Result.getLookupNameInfo(),
+ Result.getLookupKind(), S, NULL,
+ Validator)) {
+ if (Corrected.isKeyword()) {
+ // If we've found the keyword "super" (the only keyword that would be
+ // returned by CorrectTypo), this is a send to super.
+ Diag(NameLoc, diag::err_unknown_receiver_suggest)
+ << Name << Corrected.getCorrection()
+ << FixItHint::CreateReplacement(SourceRange(NameLoc), "super");
+ return ObjCSuperMessage;
+ } else if (ObjCInterfaceDecl *Class =
+ Corrected.getCorrectionDeclAs<ObjCInterfaceDecl>()) {
+ // If we found a declaration, correct when it refers to an Objective-C
+ // class.
+ Diag(NameLoc, diag::err_unknown_receiver_suggest)
+ << Name << Corrected.getCorrection()
+ << FixItHint::CreateReplacement(SourceRange(NameLoc),
+ Class->getNameAsString());
+ Diag(Class->getLocation(), diag::note_previous_decl)
+ << Corrected.getCorrection();
+
+ QualType T = Context.getObjCInterfaceType(Class);
+ TypeSourceInfo *TSInfo = Context.getTrivialTypeSourceInfo(T, NameLoc);
+ ReceiverType = CreateParsedType(T, TSInfo);
+ return ObjCClassMessage;
+ }
+ }
+
+ // Fall back: let the parser try to parse it as an instance message.
+ return ObjCInstanceMessage;
+}
+
+ExprResult Sema::ActOnSuperMessage(Scope *S,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ // Determine whether we are inside a method or not.
+ ObjCMethodDecl *Method = tryCaptureObjCSelf(SuperLoc);
+ if (!Method) {
+ Diag(SuperLoc, diag::err_invalid_receiver_to_message_super);
+ return ExprError();
+ }
+
+ ObjCInterfaceDecl *Class = Method->getClassInterface();
+ if (!Class) {
+ Diag(SuperLoc, diag::error_no_super_class_message)
+ << Method->getDeclName();
+ return ExprError();
+ }
+
+ ObjCInterfaceDecl *Super = Class->getSuperClass();
+ if (!Super) {
+ // The current class does not have a superclass.
+ Diag(SuperLoc, diag::error_root_class_cannot_use_super)
+ << Class->getIdentifier();
+ return ExprError();
+ }
+
+ // We are in a method whose class has a superclass, so 'super'
+ // is acting as a keyword.
+ if (Method->isInstanceMethod()) {
+ if (Sel.getMethodFamily() == OMF_dealloc)
+ ObjCShouldCallSuperDealloc = false;
+ if (Sel.getMethodFamily() == OMF_finalize)
+ ObjCShouldCallSuperFinalize = false;
+
+ // Since we are in an instance method, this is an instance
+ // message to the superclass instance.
+ QualType SuperTy = Context.getObjCInterfaceType(Super);
+ SuperTy = Context.getObjCObjectPointerType(SuperTy);
+ return BuildInstanceMessage(0, SuperTy, SuperLoc,
+ Sel, /*Method=*/0,
+ LBracLoc, SelectorLocs, RBracLoc, move(Args));
+ }
+
+ // Since we are in a class method, this is a class message to
+ // the superclass.
+ return BuildClassMessage(/*ReceiverTypeInfo=*/0,
+ Context.getObjCInterfaceType(Super),
+ SuperLoc, Sel, /*Method=*/0,
+ LBracLoc, SelectorLocs, RBracLoc, move(Args));
+}
+
+
+ExprResult Sema::BuildClassMessageImplicit(QualType ReceiverType,
+ bool isSuperReceiver,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args) {
+ TypeSourceInfo *receiverTypeInfo = 0;
+ if (!ReceiverType.isNull())
+ receiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType);
+
+ return BuildClassMessage(receiverTypeInfo, ReceiverType,
+ /*SuperLoc=*/isSuperReceiver ? Loc : SourceLocation(),
+ Sel, Method, Loc, Loc, Loc, Args,
+ /*isImplicit=*/true);
+
+}
+
+static void applyCocoaAPICheck(Sema &S, const ObjCMessageExpr *Msg,
+ unsigned DiagID,
+ bool (*refactor)(const ObjCMessageExpr *,
+ const NSAPI &, edit::Commit &)) {
+ SourceLocation MsgLoc = Msg->getExprLoc();
+ if (S.Diags.getDiagnosticLevel(DiagID, MsgLoc) == DiagnosticsEngine::Ignored)
+ return;
+
+ SourceManager &SM = S.SourceMgr;
+ edit::Commit ECommit(SM, S.LangOpts);
+ if (refactor(Msg,*S.NSAPIObj, ECommit)) {
+ DiagnosticBuilder Builder = S.Diag(MsgLoc, DiagID)
+ << Msg->getSelector() << Msg->getSourceRange();
+ // FIXME: Don't emit diagnostic at all if fixits are non-commitable.
+ if (!ECommit.isCommitable())
+ return;
+ for (edit::Commit::edit_iterator
+ I = ECommit.edit_begin(), E = ECommit.edit_end(); I != E; ++I) {
+ const edit::Commit::Edit &Edit = *I;
+ switch (Edit.Kind) {
+ case edit::Commit::Act_Insert:
+ Builder.AddFixItHint(FixItHint::CreateInsertion(Edit.OrigLoc,
+ Edit.Text,
+ Edit.BeforePrev));
+ break;
+ case edit::Commit::Act_InsertFromRange:
+ Builder.AddFixItHint(
+ FixItHint::CreateInsertionFromRange(Edit.OrigLoc,
+ Edit.getInsertFromRange(SM),
+ Edit.BeforePrev));
+ break;
+ case edit::Commit::Act_Remove:
+ Builder.AddFixItHint(FixItHint::CreateRemoval(Edit.getFileRange(SM)));
+ break;
+ }
+ }
+ }
+}
+
+static void checkCocoaAPI(Sema &S, const ObjCMessageExpr *Msg) {
+ applyCocoaAPICheck(S, Msg, diag::warn_objc_redundant_literal_use,
+ edit::rewriteObjCRedundantCallWithLiteral);
+}
+
+/// \brief Build an Objective-C class message expression.
+///
+/// This routine takes care of both normal class messages and
+/// class messages to the superclass.
+///
+/// \param ReceiverTypeInfo Type source information that describes the
+/// receiver of this message. This may be NULL, in which case we are
+/// sending to the superclass and \p SuperLoc must be a valid source
+/// location.
+
+/// \param ReceiverType The type of the object receiving the
+/// message. When \p ReceiverTypeInfo is non-NULL, this is the same
+/// type as that refers to. For a superclass send, this is the type of
+/// the superclass.
+///
+/// \param SuperLoc The location of the "super" keyword in a
+/// superclass message.
+///
+/// \param Sel The selector to which the message is being sent.
+///
+/// \param Method The method that this class message is invoking, if
+/// already known.
+///
+/// \param LBracLoc The location of the opening square bracket ']'.
+///
+/// \param RBrac The location of the closing square bracket ']'.
+///
+/// \param Args The message arguments.
+ExprResult Sema::BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
+ QualType ReceiverType,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg ArgsIn,
+ bool isImplicit) {
+ SourceLocation Loc = SuperLoc.isValid()? SuperLoc
+ : ReceiverTypeInfo->getTypeLoc().getSourceRange().getBegin();
+ if (LBracLoc.isInvalid()) {
+ Diag(Loc, diag::err_missing_open_square_message_send)
+ << FixItHint::CreateInsertion(Loc, "[");
+ LBracLoc = Loc;
+ }
+
+ if (ReceiverType->isDependentType()) {
+ // If the receiver type is dependent, we can't type-check anything
+ // at this point. Build a dependent expression.
+ unsigned NumArgs = ArgsIn.size();
+ Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
+ assert(SuperLoc.isInvalid() && "Message to super with dependent type");
+ return Owned(ObjCMessageExpr::Create(Context, ReceiverType,
+ VK_RValue, LBracLoc, ReceiverTypeInfo,
+ Sel, SelectorLocs, /*Method=*/0,
+ makeArrayRef(Args, NumArgs),RBracLoc,
+ isImplicit));
+ }
+
+ // Find the class to which we are sending this message.
+ ObjCInterfaceDecl *Class = 0;
+ const ObjCObjectType *ClassType = ReceiverType->getAs<ObjCObjectType>();
+ if (!ClassType || !(Class = ClassType->getInterface())) {
+ Diag(Loc, diag::err_invalid_receiver_class_message)
+ << ReceiverType;
+ return ExprError();
+ }
+ assert(Class && "We don't know which class we're messaging?");
+ // objc++ diagnoses during typename annotation.
+ if (!getLangOpts().CPlusPlus)
+ (void)DiagnoseUseOfDecl(Class, Loc);
+ // Find the method we are messaging.
+ if (!Method) {
+ SourceRange TypeRange
+ = SuperLoc.isValid()? SourceRange(SuperLoc)
+ : ReceiverTypeInfo->getTypeLoc().getSourceRange();
+ if (RequireCompleteType(Loc, Context.getObjCInterfaceType(Class),
+ (getLangOpts().ObjCAutoRefCount
+ ? PDiag(diag::err_arc_receiver_forward_class)
+ : PDiag(diag::warn_receiver_forward_class))
+ << TypeRange)) {
+ // A forward class used in messaging is treated as a 'Class'
+ Method = LookupFactoryMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc));
+ if (Method && !getLangOpts().ObjCAutoRefCount)
+ Diag(Method->getLocation(), diag::note_method_sent_forward_class)
+ << Method->getDeclName();
+ }
+ if (!Method)
+ Method = Class->lookupClassMethod(Sel);
+
+ // If we have an implementation in scope, check "private" methods.
+ if (!Method)
+ Method = LookupPrivateClassMethod(Sel, Class);
+
+ if (Method && DiagnoseUseOfDecl(Method, Loc))
+ return ExprError();
+ }
+
+ // Check the argument types and determine the result type.
+ QualType ReturnType;
+ ExprValueKind VK = VK_RValue;
+
+ unsigned NumArgs = ArgsIn.size();
+ Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
+ if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, Method, true,
+ SuperLoc.isValid(), LBracLoc, RBracLoc,
+ ReturnType, VK))
+ return ExprError();
+
+ if (Method && !Method->getResultType()->isVoidType() &&
+ RequireCompleteType(LBracLoc, Method->getResultType(),
+ diag::err_illegal_message_expr_incomplete_type))
+ return ExprError();
+
+ // Construct the appropriate ObjCMessageExpr.
+ ObjCMessageExpr *Result;
+ if (SuperLoc.isValid())
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ SuperLoc, /*IsInstanceSuper=*/false,
+ ReceiverType, Sel, SelectorLocs,
+ Method, makeArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit);
+ else {
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ ReceiverTypeInfo, Sel, SelectorLocs,
+ Method, makeArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit);
+ if (!isImplicit)
+ checkCocoaAPI(*this, Result);
+ }
+ return MaybeBindToTemporary(Result);
+}
+
+// ActOnClassMessage - used for both unary and keyword messages.
+// ArgExprs is optional - if it is present, the number of expressions
+// is obtained from Sel.getNumArgs().
+ExprResult Sema::ActOnClassMessage(Scope *S,
+ ParsedType Receiver,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ TypeSourceInfo *ReceiverTypeInfo;
+ QualType ReceiverType = GetTypeFromParser(Receiver, &ReceiverTypeInfo);
+ if (ReceiverType.isNull())
+ return ExprError();
+
+
+ if (!ReceiverTypeInfo)
+ ReceiverTypeInfo = Context.getTrivialTypeSourceInfo(ReceiverType, LBracLoc);
+
+ return BuildClassMessage(ReceiverTypeInfo, ReceiverType,
+ /*SuperLoc=*/SourceLocation(), Sel, /*Method=*/0,
+ LBracLoc, SelectorLocs, RBracLoc, move(Args));
+}
+
+ExprResult Sema::BuildInstanceMessageImplicit(Expr *Receiver,
+ QualType ReceiverType,
+ SourceLocation Loc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ MultiExprArg Args) {
+ return BuildInstanceMessage(Receiver, ReceiverType,
+ /*SuperLoc=*/!Receiver ? Loc : SourceLocation(),
+ Sel, Method, Loc, Loc, Loc, Args,
+ /*isImplicit=*/true);
+}
+
+/// \brief Build an Objective-C instance message expression.
+///
+/// This routine takes care of both normal instance messages and
+/// instance messages to the superclass instance.
+///
+/// \param Receiver The expression that computes the object that will
+/// receive this message. This may be empty, in which case we are
+/// sending to the superclass instance and \p SuperLoc must be a valid
+/// source location.
+///
+/// \param ReceiverType The (static) type of the object receiving the
+/// message. When a \p Receiver expression is provided, this is the
+/// same type as that expression. For a superclass instance send, this
+/// is a pointer to the type of the superclass.
+///
+/// \param SuperLoc The location of the "super" keyword in a
+/// superclass instance message.
+///
+/// \param Sel The selector to which the message is being sent.
+///
+/// \param Method The method that this instance message is invoking, if
+/// already known.
+///
+/// \param LBracLoc The location of the opening square bracket ']'.
+///
+/// \param RBrac The location of the closing square bracket ']'.
+///
+/// \param Args The message arguments.
+ExprResult Sema::BuildInstanceMessage(Expr *Receiver,
+ QualType ReceiverType,
+ SourceLocation SuperLoc,
+ Selector Sel,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg ArgsIn,
+ bool isImplicit) {
+ // The location of the receiver.
+ SourceLocation Loc = SuperLoc.isValid()? SuperLoc : Receiver->getLocStart();
+
+ if (LBracLoc.isInvalid()) {
+ Diag(Loc, diag::err_missing_open_square_message_send)
+ << FixItHint::CreateInsertion(Loc, "[");
+ LBracLoc = Loc;
+ }
+
+ // If we have a receiver expression, perform appropriate promotions
+ // and determine receiver type.
+ if (Receiver) {
+ if (Receiver->hasPlaceholderType()) {
+ ExprResult Result;
+ if (Receiver->getType() == Context.UnknownAnyTy)
+ Result = forceUnknownAnyToType(Receiver, Context.getObjCIdType());
+ else
+ Result = CheckPlaceholderExpr(Receiver);
+ if (Result.isInvalid()) return ExprError();
+ Receiver = Result.take();
+ }
+
+ if (Receiver->isTypeDependent()) {
+ // If the receiver is type-dependent, we can't type-check anything
+ // at this point. Build a dependent expression.
+ unsigned NumArgs = ArgsIn.size();
+ Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
+ assert(SuperLoc.isInvalid() && "Message to super with dependent type");
+ return Owned(ObjCMessageExpr::Create(Context, Context.DependentTy,
+ VK_RValue, LBracLoc, Receiver, Sel,
+ SelectorLocs, /*Method=*/0,
+ makeArrayRef(Args, NumArgs),
+ RBracLoc, isImplicit));
+ }
+
+ // If necessary, apply function/array conversion to the receiver.
+ // C99 6.7.5.3p[7,8].
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(Receiver);
+ if (Result.isInvalid())
+ return ExprError();
+ Receiver = Result.take();
+ ReceiverType = Receiver->getType();
+ }
+
+ if (!Method) {
+ // Handle messages to id.
+ bool receiverIsId = ReceiverType->isObjCIdType();
+ if (receiverIsId || ReceiverType->isBlockPointerType() ||
+ (Receiver && Context.isObjCNSObjectType(Receiver->getType()))) {
+ Method = LookupInstanceMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc),
+ receiverIsId);
+ if (!Method)
+ Method = LookupFactoryMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc),
+ receiverIsId);
+ } else if (ReceiverType->isObjCClassType() ||
+ ReceiverType->isObjCQualifiedClassType()) {
+ // Handle messages to Class.
+ // We allow sending a message to a qualified Class ("Class<foo>"), which
+ // is ok as long as one of the protocols implements the selector (if not, warn).
+ if (const ObjCObjectPointerType *QClassTy
+ = ReceiverType->getAsObjCQualifiedClassType()) {
+ // Search protocols for class methods.
+ Method = LookupMethodInQualifiedType(Sel, QClassTy, false);
+ if (!Method) {
+ Method = LookupMethodInQualifiedType(Sel, QClassTy, true);
+ // warn if instance method found for a Class message.
+ if (Method) {
+ Diag(Loc, diag::warn_instance_method_on_class_found)
+ << Method->getSelector() << Sel;
+ Diag(Method->getLocation(), diag::note_method_declared_at)
+ << Method->getDeclName();
+ }
+ }
+ } else {
+ if (ObjCMethodDecl *CurMeth = getCurMethodDecl()) {
+ if (ObjCInterfaceDecl *ClassDecl = CurMeth->getClassInterface()) {
+ // First check the public methods in the class interface.
+ Method = ClassDecl->lookupClassMethod(Sel);
+
+ if (!Method)
+ Method = LookupPrivateClassMethod(Sel, ClassDecl);
+ }
+ if (Method && DiagnoseUseOfDecl(Method, Loc))
+ return ExprError();
+ }
+ if (!Method) {
+ // If not messaging 'self', look for any factory method named 'Sel'.
+ if (!Receiver || !isSelfExpr(Receiver)) {
+ Method = LookupFactoryMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc),
+ true);
+ if (!Method) {
+ // If no class (factory) method was found, check if an _instance_
+ // method of the same name exists in the root class only.
+ Method = LookupInstanceMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc),
+ true);
+ if (Method)
+ if (const ObjCInterfaceDecl *ID =
+ dyn_cast<ObjCInterfaceDecl>(Method->getDeclContext())) {
+ if (ID->getSuperClass())
+ Diag(Loc, diag::warn_root_inst_method_not_found)
+ << Sel << SourceRange(LBracLoc, RBracLoc);
+ }
+ }
+ }
+ }
+ }
+ } else {
+ ObjCInterfaceDecl* ClassDecl = 0;
+
+ // We allow sending a message to a qualified ID ("id<foo>"), which is ok as
+ // long as one of the protocols implements the selector (if not, warn).
+ if (const ObjCObjectPointerType *QIdTy
+ = ReceiverType->getAsObjCQualifiedIdType()) {
+ // Search protocols for instance methods.
+ Method = LookupMethodInQualifiedType(Sel, QIdTy, true);
+ if (!Method)
+ Method = LookupMethodInQualifiedType(Sel, QIdTy, false);
+ } else if (const ObjCObjectPointerType *OCIType
+ = ReceiverType->getAsObjCInterfacePointerType()) {
+ // We allow sending a message to a pointer to an interface (an object).
+ ClassDecl = OCIType->getInterfaceDecl();
+
+ // Try to complete the type. Under ARC, this is a hard error from which
+ // we don't try to recover.
+ const ObjCInterfaceDecl *forwardClass = 0;
+ if (RequireCompleteType(Loc, OCIType->getPointeeType(),
+ getLangOpts().ObjCAutoRefCount
+ ? PDiag(diag::err_arc_receiver_forward_instance)
+ << (Receiver ? Receiver->getSourceRange()
+ : SourceRange(SuperLoc))
+ : PDiag(diag::warn_receiver_forward_instance)
+ << (Receiver ? Receiver->getSourceRange()
+ : SourceRange(SuperLoc)))) {
+ if (getLangOpts().ObjCAutoRefCount)
+ return ExprError();
+
+ forwardClass = OCIType->getInterfaceDecl();
+ Diag(Receiver ? Receiver->getLocStart()
+ : SuperLoc, diag::note_receiver_is_id);
+ Method = 0;
+ } else {
+ Method = ClassDecl->lookupInstanceMethod(Sel);
+ }
+
+ if (!Method)
+ // Search protocol qualifiers.
+ Method = LookupMethodInQualifiedType(Sel, OCIType, true);
+
+ if (!Method) {
+ // If we have implementations in scope, check "private" methods.
+ Method = LookupPrivateInstanceMethod(Sel, ClassDecl);
+
+ if (!Method && getLangOpts().ObjCAutoRefCount) {
+ Diag(Loc, diag::err_arc_may_not_respond)
+ << OCIType->getPointeeType() << Sel;
+ return ExprError();
+ }
+
+ if (!Method && (!Receiver || !isSelfExpr(Receiver))) {
+ // If we still haven't found a method, look in the global pool. This
+ // behavior isn't very desirable, however we need it for GCC
+ // compatibility. FIXME: should we deviate??
+ if (OCIType->qual_empty()) {
+ Method = LookupInstanceMethodInGlobalPool(Sel,
+ SourceRange(LBracLoc, RBracLoc));
+ if (Method && !forwardClass)
+ Diag(Loc, diag::warn_maynot_respond)
+ << OCIType->getInterfaceDecl()->getIdentifier() << Sel;
+ }
+ }
+ }
+ if (Method && DiagnoseUseOfDecl(Method, Loc, forwardClass))
+ return ExprError();
+ } else if (!getLangOpts().ObjCAutoRefCount &&
+ !Context.getObjCIdType().isNull() &&
+ (ReceiverType->isPointerType() ||
+ ReceiverType->isIntegerType())) {
+ // Implicitly convert integers and pointers to 'id' but emit a warning.
+ // But not in ARC.
+ Diag(Loc, diag::warn_bad_receiver_type)
+ << ReceiverType
+ << Receiver->getSourceRange();
+ if (ReceiverType->isPointerType())
+ Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
+ CK_CPointerToObjCPointerCast).take();
+ else {
+ // TODO: specialized warning on null receivers?
+ bool IsNull = Receiver->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull);
+ Receiver = ImpCastExprToType(Receiver, Context.getObjCIdType(),
+ IsNull ? CK_NullToPointer : CK_IntegralToPointer).take();
+ }
+ ReceiverType = Receiver->getType();
+ } else {
+ ExprResult ReceiverRes;
+ if (getLangOpts().CPlusPlus)
+ ReceiverRes = PerformContextuallyConvertToObjCPointer(Receiver);
+ if (ReceiverRes.isUsable()) {
+ Receiver = ReceiverRes.take();
+ return BuildInstanceMessage(Receiver,
+ ReceiverType,
+ SuperLoc,
+ Sel,
+ Method,
+ LBracLoc,
+ SelectorLocs,
+ RBracLoc,
+ move(ArgsIn));
+ } else {
+ // Reject other random receiver types (e.g. structs).
+ Diag(Loc, diag::err_bad_receiver_type)
+ << ReceiverType << Receiver->getSourceRange();
+ return ExprError();
+ }
+ }
+ }
+ }
+
+ // Check the message arguments.
+ unsigned NumArgs = ArgsIn.size();
+ Expr **Args = reinterpret_cast<Expr **>(ArgsIn.release());
+ QualType ReturnType;
+ ExprValueKind VK = VK_RValue;
+ bool ClassMessage = (ReceiverType->isObjCClassType() ||
+ ReceiverType->isObjCQualifiedClassType());
+ if (CheckMessageArgumentTypes(ReceiverType, Args, NumArgs, Sel, Method,
+ ClassMessage, SuperLoc.isValid(),
+ LBracLoc, RBracLoc, ReturnType, VK))
+ return ExprError();
+
+ if (Method && !Method->getResultType()->isVoidType() &&
+ RequireCompleteType(LBracLoc, Method->getResultType(),
+ diag::err_illegal_message_expr_incomplete_type))
+ return ExprError();
+
+ SourceLocation SelLoc = SelectorLocs.front();
+
+ // In ARC, forbid the user from sending messages to
+ // retain/release/autorelease/dealloc/retainCount explicitly.
+ if (getLangOpts().ObjCAutoRefCount) {
+ ObjCMethodFamily family =
+ (Method ? Method->getMethodFamily() : Sel.getMethodFamily());
+ switch (family) {
+ case OMF_init:
+ if (Method)
+ checkInitMethod(Method, ReceiverType);
+
+ case OMF_None:
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_finalize:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ break;
+
+ case OMF_dealloc:
+ case OMF_retain:
+ case OMF_release:
+ case OMF_autorelease:
+ case OMF_retainCount:
+ Diag(Loc, diag::err_arc_illegal_explicit_message)
+ << Sel << SelLoc;
+ break;
+
+ case OMF_performSelector:
+ if (Method && NumArgs >= 1) {
+ if (ObjCSelectorExpr *SelExp = dyn_cast<ObjCSelectorExpr>(Args[0])) {
+ Selector ArgSel = SelExp->getSelector();
+ ObjCMethodDecl *SelMethod =
+ LookupInstanceMethodInGlobalPool(ArgSel,
+ SelExp->getSourceRange());
+ if (!SelMethod)
+ SelMethod =
+ LookupFactoryMethodInGlobalPool(ArgSel,
+ SelExp->getSourceRange());
+ if (SelMethod) {
+ ObjCMethodFamily SelFamily = SelMethod->getMethodFamily();
+ switch (SelFamily) {
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ case OMF_self:
+ case OMF_init:
+ // Issue error, unless ns_returns_not_retained.
+ if (!SelMethod->hasAttr<NSReturnsNotRetainedAttr>()) {
+ // selector names a +1 method
+ Diag(SelLoc,
+ diag::err_arc_perform_selector_retains);
+ Diag(SelMethod->getLocation(), diag::note_method_declared_at)
+ << SelMethod->getDeclName();
+ }
+ break;
+ default:
+ // +0 call. OK. unless ns_returns_retained.
+ if (SelMethod->hasAttr<NSReturnsRetainedAttr>()) {
+ // selector names a +1 method
+ Diag(SelLoc,
+ diag::err_arc_perform_selector_retains);
+ Diag(SelMethod->getLocation(), diag::note_method_declared_at)
+ << SelMethod->getDeclName();
+ }
+ break;
+ }
+ }
+ } else {
+ // error (may leak).
+ Diag(SelLoc, diag::warn_arc_perform_selector_leaks);
+ Diag(Args[0]->getExprLoc(), diag::note_used_here);
+ }
+ }
+ break;
+ }
+ }
+
+ // Construct the appropriate ObjCMessageExpr instance.
+ ObjCMessageExpr *Result;
+ if (SuperLoc.isValid())
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ SuperLoc, /*IsInstanceSuper=*/true,
+ ReceiverType, Sel, SelectorLocs, Method,
+ makeArrayRef(Args, NumArgs), RBracLoc,
+ isImplicit);
+ else {
+ Result = ObjCMessageExpr::Create(Context, ReturnType, VK, LBracLoc,
+ Receiver, Sel, SelectorLocs, Method,
+ makeArrayRef(Args, NumArgs), RBracLoc,
+ isImplicit);
+ if (!isImplicit)
+ checkCocoaAPI(*this, Result);
+ }
+
+ if (getLangOpts().ObjCAutoRefCount) {
+ if (Receiver &&
+ (Receiver->IgnoreParenImpCasts()->getType().getObjCLifetime()
+ == Qualifiers::OCL_Weak))
+ Diag(Receiver->getLocStart(), diag::warn_receiver_is_weak);
+
+ // In ARC, annotate delegate init calls.
+ if (Result->getMethodFamily() == OMF_init &&
+ (SuperLoc.isValid() || isSelfExpr(Receiver))) {
+ // Only consider init calls *directly* in init implementations,
+ // not within blocks.
+ ObjCMethodDecl *method = dyn_cast<ObjCMethodDecl>(CurContext);
+ if (method && method->getMethodFamily() == OMF_init) {
+ // The implicit assignment to self means we also don't want to
+ // consume the result.
+ Result->setDelegateInitCall(true);
+ return Owned(Result);
+ }
+ }
+
+ // In ARC, check for message sends which are likely to introduce
+ // retain cycles.
+ checkRetainCycles(Result);
+ }
+
+ return MaybeBindToTemporary(Result);
+}
+
+// ActOnInstanceMessage - used for both unary and keyword messages.
+// ArgExprs is optional - if it is present, the number of expressions
+// is obtained from Sel.getNumArgs().
+ExprResult Sema::ActOnInstanceMessage(Scope *S,
+ Expr *Receiver,
+ Selector Sel,
+ SourceLocation LBracLoc,
+ ArrayRef<SourceLocation> SelectorLocs,
+ SourceLocation RBracLoc,
+ MultiExprArg Args) {
+ if (!Receiver)
+ return ExprError();
+
+ return BuildInstanceMessage(Receiver, Receiver->getType(),
+ /*SuperLoc=*/SourceLocation(), Sel, /*Method=*/0,
+ LBracLoc, SelectorLocs, RBracLoc, move(Args));
+}
+
+enum ARCConversionTypeClass {
+ /// int, void, struct A
+ ACTC_none,
+
+ /// id, void (^)()
+ ACTC_retainable,
+
+ /// id*, id***, void (^*)(),
+ ACTC_indirectRetainable,
+
+ /// void* might be a normal C type, or it might a CF type.
+ ACTC_voidPtr,
+
+ /// struct A*
+ ACTC_coreFoundation
+};
+static bool isAnyRetainable(ARCConversionTypeClass ACTC) {
+ return (ACTC == ACTC_retainable ||
+ ACTC == ACTC_coreFoundation ||
+ ACTC == ACTC_voidPtr);
+}
+static bool isAnyCLike(ARCConversionTypeClass ACTC) {
+ return ACTC == ACTC_none ||
+ ACTC == ACTC_voidPtr ||
+ ACTC == ACTC_coreFoundation;
+}
+
+static ARCConversionTypeClass classifyTypeForARCConversion(QualType type) {
+ bool isIndirect = false;
+
+ // Ignore an outermost reference type.
+ if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
+ type = ref->getPointeeType();
+ isIndirect = true;
+ }
+
+ // Drill through pointers and arrays recursively.
+ while (true) {
+ if (const PointerType *ptr = type->getAs<PointerType>()) {
+ type = ptr->getPointeeType();
+
+ // The first level of pointer may be the innermost pointer on a CF type.
+ if (!isIndirect) {
+ if (type->isVoidType()) return ACTC_voidPtr;
+ if (type->isRecordType()) return ACTC_coreFoundation;
+ }
+ } else if (const ArrayType *array = type->getAsArrayTypeUnsafe()) {
+ type = QualType(array->getElementType()->getBaseElementTypeUnsafe(), 0);
+ } else {
+ break;
+ }
+ isIndirect = true;
+ }
+
+ if (isIndirect) {
+ if (type->isObjCARCBridgableType())
+ return ACTC_indirectRetainable;
+ return ACTC_none;
+ }
+
+ if (type->isObjCARCBridgableType())
+ return ACTC_retainable;
+
+ return ACTC_none;
+}
+
+namespace {
+ /// A result from the cast checker.
+ enum ACCResult {
+ /// Cannot be casted.
+ ACC_invalid,
+
+ /// Can be safely retained or not retained.
+ ACC_bottom,
+
+ /// Can be casted at +0.
+ ACC_plusZero,
+
+ /// Can be casted at +1.
+ ACC_plusOne
+ };
+ ACCResult merge(ACCResult left, ACCResult right) {
+ if (left == right) return left;
+ if (left == ACC_bottom) return right;
+ if (right == ACC_bottom) return left;
+ return ACC_invalid;
+ }
+
+ /// A checker which white-lists certain expressions whose conversion
+ /// to or from retainable type would otherwise be forbidden in ARC.
+ class ARCCastChecker : public StmtVisitor<ARCCastChecker, ACCResult> {
+ typedef StmtVisitor<ARCCastChecker, ACCResult> super;
+
+ ASTContext &Context;
+ ARCConversionTypeClass SourceClass;
+ ARCConversionTypeClass TargetClass;
+
+ static bool isCFType(QualType type) {
+ // Someday this can use ns_bridged. For now, it has to do this.
+ return type->isCARCBridgableType();
+ }
+
+ public:
+ ARCCastChecker(ASTContext &Context, ARCConversionTypeClass source,
+ ARCConversionTypeClass target)
+ : Context(Context), SourceClass(source), TargetClass(target) {}
+
+ using super::Visit;
+ ACCResult Visit(Expr *e) {
+ return super::Visit(e->IgnoreParens());
+ }
+
+ ACCResult VisitStmt(Stmt *s) {
+ return ACC_invalid;
+ }
+
+ /// Null pointer constants can be casted however you please.
+ ACCResult VisitExpr(Expr *e) {
+ if (e->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull))
+ return ACC_bottom;
+ return ACC_invalid;
+ }
+
+ /// Objective-C string literals can be safely casted.
+ ACCResult VisitObjCStringLiteral(ObjCStringLiteral *e) {
+ // If we're casting to any retainable type, go ahead. Global
+ // strings are immune to retains, so this is bottom.
+ if (isAnyRetainable(TargetClass)) return ACC_bottom;
+
+ return ACC_invalid;
+ }
+
+ /// Look through certain implicit and explicit casts.
+ ACCResult VisitCastExpr(CastExpr *e) {
+ switch (e->getCastKind()) {
+ case CK_NullToPointer:
+ return ACC_bottom;
+
+ case CK_NoOp:
+ case CK_LValueToRValue:
+ case CK_BitCast:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ return Visit(e->getSubExpr());
+
+ default:
+ return ACC_invalid;
+ }
+ }
+
+ /// Look through unary extension.
+ ACCResult VisitUnaryExtension(UnaryOperator *e) {
+ return Visit(e->getSubExpr());
+ }
+
+ /// Ignore the LHS of a comma operator.
+ ACCResult VisitBinComma(BinaryOperator *e) {
+ return Visit(e->getRHS());
+ }
+
+ /// Conditional operators are okay if both sides are okay.
+ ACCResult VisitConditionalOperator(ConditionalOperator *e) {
+ ACCResult left = Visit(e->getTrueExpr());
+ if (left == ACC_invalid) return ACC_invalid;
+ return merge(left, Visit(e->getFalseExpr()));
+ }
+
+ /// Look through pseudo-objects.
+ ACCResult VisitPseudoObjectExpr(PseudoObjectExpr *e) {
+ // If we're getting here, we should always have a result.
+ return Visit(e->getResultExpr());
+ }
+
+ /// Statement expressions are okay if their result expression is okay.
+ ACCResult VisitStmtExpr(StmtExpr *e) {
+ return Visit(e->getSubStmt()->body_back());
+ }
+
+ /// Some declaration references are okay.
+ ACCResult VisitDeclRefExpr(DeclRefExpr *e) {
+ // References to global constants from system headers are okay.
+ // These are things like 'kCFStringTransformToLatin'. They are
+ // can also be assumed to be immune to retains.
+ VarDecl *var = dyn_cast<VarDecl>(e->getDecl());
+ if (isAnyRetainable(TargetClass) &&
+ isAnyRetainable(SourceClass) &&
+ var &&
+ var->getStorageClass() == SC_Extern &&
+ var->getType().isConstQualified() &&
+ Context.getSourceManager().isInSystemHeader(var->getLocation())) {
+ return ACC_bottom;
+ }
+
+ // Nothing else.
+ return ACC_invalid;
+ }
+
+ /// Some calls are okay.
+ ACCResult VisitCallExpr(CallExpr *e) {
+ if (FunctionDecl *fn = e->getDirectCallee())
+ if (ACCResult result = checkCallToFunction(fn))
+ return result;
+
+ return super::VisitCallExpr(e);
+ }
+
+ ACCResult checkCallToFunction(FunctionDecl *fn) {
+ // Require a CF*Ref return type.
+ if (!isCFType(fn->getResultType()))
+ return ACC_invalid;
+
+ if (!isAnyRetainable(TargetClass))
+ return ACC_invalid;
+
+ // Honor an explicit 'not retained' attribute.
+ if (fn->hasAttr<CFReturnsNotRetainedAttr>())
+ return ACC_plusZero;
+
+ // Honor an explicit 'retained' attribute, except that for
+ // now we're not going to permit implicit handling of +1 results,
+ // because it's a bit frightening.
+ if (fn->hasAttr<CFReturnsRetainedAttr>())
+ return ACC_invalid; // ACC_plusOne if we start accepting this
+
+ // Recognize this specific builtin function, which is used by CFSTR.
+ unsigned builtinID = fn->getBuiltinID();
+ if (builtinID == Builtin::BI__builtin___CFStringMakeConstantString)
+ return ACC_bottom;
+
+ // Otherwise, don't do anything implicit with an unaudited function.
+ if (!fn->hasAttr<CFAuditedTransferAttr>())
+ return ACC_invalid;
+
+ // Otherwise, it's +0 unless it follows the create convention.
+ if (ento::coreFoundation::followsCreateRule(fn))
+ return ACC_invalid; // ACC_plusOne if we start accepting this
+
+ return ACC_plusZero;
+ }
+
+ ACCResult VisitObjCMessageExpr(ObjCMessageExpr *e) {
+ return checkCallToMethod(e->getMethodDecl());
+ }
+
+ ACCResult VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *e) {
+ ObjCMethodDecl *method;
+ if (e->isExplicitProperty())
+ method = e->getExplicitProperty()->getGetterMethodDecl();
+ else
+ method = e->getImplicitPropertyGetter();
+ return checkCallToMethod(method);
+ }
+
+ ACCResult checkCallToMethod(ObjCMethodDecl *method) {
+ if (!method) return ACC_invalid;
+
+ // Check for message sends to functions returning CF types. We
+ // just obey the Cocoa conventions with these, even though the
+ // return type is CF.
+ if (!isAnyRetainable(TargetClass) || !isCFType(method->getResultType()))
+ return ACC_invalid;
+
+ // If the method is explicitly marked not-retained, it's +0.
+ if (method->hasAttr<CFReturnsNotRetainedAttr>())
+ return ACC_plusZero;
+
+ // If the method is explicitly marked as returning retained, or its
+ // selector follows a +1 Cocoa convention, treat it as +1.
+ if (method->hasAttr<CFReturnsRetainedAttr>())
+ return ACC_plusOne;
+
+ switch (method->getSelector().getMethodFamily()) {
+ case OMF_alloc:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ case OMF_new:
+ return ACC_plusOne;
+
+ default:
+ // Otherwise, treat it as +0.
+ return ACC_plusZero;
+ }
+ }
+ };
+}
+
+static bool
+KnownName(Sema &S, const char *name) {
+ LookupResult R(S, &S.Context.Idents.get(name), SourceLocation(),
+ Sema::LookupOrdinaryName);
+ return S.LookupName(R, S.TUScope, false);
+}
+
+static void addFixitForObjCARCConversion(Sema &S,
+ DiagnosticBuilder &DiagB,
+ Sema::CheckedConversionKind CCK,
+ SourceLocation afterLParen,
+ QualType castType,
+ Expr *castExpr,
+ const char *bridgeKeyword,
+ const char *CFBridgeName) {
+ // We handle C-style and implicit casts here.
+ switch (CCK) {
+ case Sema::CCK_ImplicitConversion:
+ case Sema::CCK_CStyleCast:
+ break;
+ case Sema::CCK_FunctionalCast:
+ case Sema::CCK_OtherCast:
+ return;
+ }
+
+ if (CFBridgeName) {
+ Expr *castedE = castExpr;
+ if (CStyleCastExpr *CCE = dyn_cast<CStyleCastExpr>(castedE))
+ castedE = CCE->getSubExpr();
+ castedE = castedE->IgnoreImpCasts();
+ SourceRange range = castedE->getSourceRange();
+ if (isa<ParenExpr>(castedE)) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ CFBridgeName));
+ } else {
+ std::string namePlusParen = CFBridgeName;
+ namePlusParen += "(";
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ namePlusParen));
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(
+ S.PP.getLocForEndOfToken(range.getEnd()),
+ ")"));
+ }
+ return;
+ }
+
+ if (CCK == Sema::CCK_CStyleCast) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(afterLParen, bridgeKeyword));
+ } else {
+ std::string castCode = "(";
+ castCode += bridgeKeyword;
+ castCode += castType.getAsString();
+ castCode += ")";
+ Expr *castedE = castExpr->IgnoreImpCasts();
+ SourceRange range = castedE->getSourceRange();
+ if (isa<ParenExpr>(castedE)) {
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ castCode));
+ } else {
+ castCode += "(";
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(range.getBegin(),
+ castCode));
+ DiagB.AddFixItHint(FixItHint::CreateInsertion(
+ S.PP.getLocForEndOfToken(range.getEnd()),
+ ")"));
+ }
+ }
+}
+
+static void
+diagnoseObjCARCConversion(Sema &S, SourceRange castRange,
+ QualType castType, ARCConversionTypeClass castACTC,
+ Expr *castExpr, ARCConversionTypeClass exprACTC,
+ Sema::CheckedConversionKind CCK) {
+ SourceLocation loc =
+ (castRange.isValid() ? castRange.getBegin() : castExpr->getExprLoc());
+
+ if (S.makeUnavailableInSystemHeader(loc,
+ "converts between Objective-C and C pointers in -fobjc-arc"))
+ return;
+
+ QualType castExprType = castExpr->getType();
+
+ unsigned srcKind = 0;
+ switch (exprACTC) {
+ case ACTC_none:
+ case ACTC_coreFoundation:
+ case ACTC_voidPtr:
+ srcKind = (castExprType->isPointerType() ? 1 : 0);
+ break;
+ case ACTC_retainable:
+ srcKind = (castExprType->isBlockPointerType() ? 2 : 3);
+ break;
+ case ACTC_indirectRetainable:
+ srcKind = 4;
+ break;
+ }
+
+ // Check whether this could be fixed with a bridge cast.
+ SourceLocation afterLParen = S.PP.getLocForEndOfToken(castRange.getBegin());
+ SourceLocation noteLoc = afterLParen.isValid() ? afterLParen : loc;
+
+ // Bridge from an ARC type to a CF type.
+ if (castACTC == ACTC_retainable && isAnyRetainable(exprACTC)) {
+
+ S.Diag(loc, diag::err_arc_cast_requires_bridge)
+ << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
+ << 2 // of C pointer type
+ << castExprType
+ << unsigned(castType->isBlockPointerType()) // to ObjC|block type
+ << castType
+ << castRange
+ << castExpr->getSourceRange();
+ bool br = KnownName(S, "CFBridgingRelease");
+ {
+ DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge);
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, "__bridge ", 0);
+ }
+ {
+ DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge_transfer)
+ << castExprType << br;
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, "__bridge_transfer ",
+ br ? "CFBridgingRelease" : 0);
+ }
+
+ return;
+ }
+
+ // Bridge from a CF type to an ARC type.
+ if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC)) {
+ bool br = KnownName(S, "CFBridgingRetain");
+ S.Diag(loc, diag::err_arc_cast_requires_bridge)
+ << unsigned(CCK == Sema::CCK_ImplicitConversion) // cast|implicit
+ << unsigned(castExprType->isBlockPointerType()) // of ObjC|block type
+ << castExprType
+ << 2 // to C pointer type
+ << castType
+ << castRange
+ << castExpr->getSourceRange();
+
+ {
+ DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge);
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, "__bridge ", 0);
+ }
+ {
+ DiagnosticBuilder DiagB = S.Diag(noteLoc, diag::note_arc_bridge_retained)
+ << castType << br;
+ addFixitForObjCARCConversion(S, DiagB, CCK, afterLParen,
+ castType, castExpr, "__bridge_retained ",
+ br ? "CFBridgingRetain" : 0);
+ }
+
+ return;
+ }
+
+ S.Diag(loc, diag::err_arc_mismatched_cast)
+ << (CCK != Sema::CCK_ImplicitConversion)
+ << srcKind << castExprType << castType
+ << castRange << castExpr->getSourceRange();
+}
+
+Sema::ARCConversionResult
+Sema::CheckObjCARCConversion(SourceRange castRange, QualType castType,
+ Expr *&castExpr, CheckedConversionKind CCK) {
+ QualType castExprType = castExpr->getType();
+
+ // For the purposes of the classification, we assume reference types
+ // will bind to temporaries.
+ QualType effCastType = castType;
+ if (const ReferenceType *ref = castType->getAs<ReferenceType>())
+ effCastType = ref->getPointeeType();
+
+ ARCConversionTypeClass exprACTC = classifyTypeForARCConversion(castExprType);
+ ARCConversionTypeClass castACTC = classifyTypeForARCConversion(effCastType);
+ if (exprACTC == castACTC) {
+ // check for viablity and report error if casting an rvalue to a
+ // life-time qualifier.
+ if ((castACTC == ACTC_retainable) &&
+ (CCK == CCK_CStyleCast || CCK == CCK_OtherCast) &&
+ (castType != castExprType)) {
+ const Type *DT = castType.getTypePtr();
+ QualType QDT = castType;
+ // We desugar some types but not others. We ignore those
+ // that cannot happen in a cast; i.e. auto, and those which
+ // should not be de-sugared; i.e typedef.
+ if (const ParenType *PT = dyn_cast<ParenType>(DT))
+ QDT = PT->desugar();
+ else if (const TypeOfType *TP = dyn_cast<TypeOfType>(DT))
+ QDT = TP->desugar();
+ else if (const AttributedType *AT = dyn_cast<AttributedType>(DT))
+ QDT = AT->desugar();
+ if (QDT != castType &&
+ QDT.getObjCLifetime() != Qualifiers::OCL_None) {
+ SourceLocation loc =
+ (castRange.isValid() ? castRange.getBegin()
+ : castExpr->getExprLoc());
+ Diag(loc, diag::err_arc_nolifetime_behavior);
+ }
+ }
+ return ACR_okay;
+ }
+
+ if (isAnyCLike(exprACTC) && isAnyCLike(castACTC)) return ACR_okay;
+
+ // Allow all of these types to be cast to integer types (but not
+ // vice-versa).
+ if (castACTC == ACTC_none && castType->isIntegralType(Context))
+ return ACR_okay;
+
+ // Allow casts between pointers to lifetime types (e.g., __strong id*)
+ // and pointers to void (e.g., cv void *). Casting from void* to lifetime*
+ // must be explicit.
+ if (exprACTC == ACTC_indirectRetainable && castACTC == ACTC_voidPtr)
+ return ACR_okay;
+ if (castACTC == ACTC_indirectRetainable && exprACTC == ACTC_voidPtr &&
+ CCK != CCK_ImplicitConversion)
+ return ACR_okay;
+
+ switch (ARCCastChecker(Context, exprACTC, castACTC).Visit(castExpr)) {
+ // For invalid casts, fall through.
+ case ACC_invalid:
+ break;
+
+ // Do nothing for both bottom and +0.
+ case ACC_bottom:
+ case ACC_plusZero:
+ return ACR_okay;
+
+ // If the result is +1, consume it here.
+ case ACC_plusOne:
+ castExpr = ImplicitCastExpr::Create(Context, castExpr->getType(),
+ CK_ARCConsumeObject, castExpr,
+ 0, VK_RValue);
+ ExprNeedsCleanups = true;
+ return ACR_okay;
+ }
+
+ // If this is a non-implicit cast from id or block type to a
+ // CoreFoundation type, delay complaining in case the cast is used
+ // in an acceptable context.
+ if (exprACTC == ACTC_retainable && isAnyRetainable(castACTC) &&
+ CCK != CCK_ImplicitConversion)
+ return ACR_unbridged;
+
+ diagnoseObjCARCConversion(*this, castRange, castType, castACTC,
+ castExpr, exprACTC, CCK);
+ return ACR_okay;
+}
+
+/// Given that we saw an expression with the ARCUnbridgedCastTy
+/// placeholder type, complain bitterly.
+void Sema::diagnoseARCUnbridgedCast(Expr *e) {
+ // We expect the spurious ImplicitCastExpr to already have been stripped.
+ assert(!e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+ CastExpr *realCast = cast<CastExpr>(e->IgnoreParens());
+
+ SourceRange castRange;
+ QualType castType;
+ CheckedConversionKind CCK;
+
+ if (CStyleCastExpr *cast = dyn_cast<CStyleCastExpr>(realCast)) {
+ castRange = SourceRange(cast->getLParenLoc(), cast->getRParenLoc());
+ castType = cast->getTypeAsWritten();
+ CCK = CCK_CStyleCast;
+ } else if (ExplicitCastExpr *cast = dyn_cast<ExplicitCastExpr>(realCast)) {
+ castRange = cast->getTypeInfoAsWritten()->getTypeLoc().getSourceRange();
+ castType = cast->getTypeAsWritten();
+ CCK = CCK_OtherCast;
+ } else {
+ castType = cast->getType();
+ CCK = CCK_ImplicitConversion;
+ }
+
+ ARCConversionTypeClass castACTC =
+ classifyTypeForARCConversion(castType.getNonReferenceType());
+
+ Expr *castExpr = realCast->getSubExpr();
+ assert(classifyTypeForARCConversion(castExpr->getType()) == ACTC_retainable);
+
+ diagnoseObjCARCConversion(*this, castRange, castType, castACTC,
+ castExpr, ACTC_retainable, CCK);
+}
+
+/// stripARCUnbridgedCast - Given an expression of ARCUnbridgedCast
+/// type, remove the placeholder cast.
+Expr *Sema::stripARCUnbridgedCast(Expr *e) {
+ assert(e->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+
+ if (ParenExpr *pe = dyn_cast<ParenExpr>(e)) {
+ Expr *sub = stripARCUnbridgedCast(pe->getSubExpr());
+ return new (Context) ParenExpr(pe->getLParen(), pe->getRParen(), sub);
+ } else if (UnaryOperator *uo = dyn_cast<UnaryOperator>(e)) {
+ assert(uo->getOpcode() == UO_Extension);
+ Expr *sub = stripARCUnbridgedCast(uo->getSubExpr());
+ return new (Context) UnaryOperator(sub, UO_Extension, sub->getType(),
+ sub->getValueKind(), sub->getObjectKind(),
+ uo->getOperatorLoc());
+ } else if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
+ assert(!gse->isResultDependent());
+
+ unsigned n = gse->getNumAssocs();
+ SmallVector<Expr*, 4> subExprs(n);
+ SmallVector<TypeSourceInfo*, 4> subTypes(n);
+ for (unsigned i = 0; i != n; ++i) {
+ subTypes[i] = gse->getAssocTypeSourceInfo(i);
+ Expr *sub = gse->getAssocExpr(i);
+ if (i == gse->getResultIndex())
+ sub = stripARCUnbridgedCast(sub);
+ subExprs[i] = sub;
+ }
+
+ return new (Context) GenericSelectionExpr(Context, gse->getGenericLoc(),
+ gse->getControllingExpr(),
+ subTypes.data(), subExprs.data(),
+ n, gse->getDefaultLoc(),
+ gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(),
+ gse->getResultIndex());
+ } else {
+ assert(isa<ImplicitCastExpr>(e) && "bad form of unbridged cast!");
+ return cast<ImplicitCastExpr>(e)->getSubExpr();
+ }
+}
+
+bool Sema::CheckObjCARCUnavailableWeakConversion(QualType castType,
+ QualType exprType) {
+ QualType canCastType =
+ Context.getCanonicalType(castType).getUnqualifiedType();
+ QualType canExprType =
+ Context.getCanonicalType(exprType).getUnqualifiedType();
+ if (isa<ObjCObjectPointerType>(canCastType) &&
+ castType.getObjCLifetime() == Qualifiers::OCL_Weak &&
+ canExprType->isObjCObjectPointerType()) {
+ if (const ObjCObjectPointerType *ObjT =
+ canExprType->getAs<ObjCObjectPointerType>())
+ if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable())
+ return false;
+ }
+ return true;
+}
+
+/// Look for an ObjCReclaimReturnedObject cast and destroy it.
+static Expr *maybeUndoReclaimObject(Expr *e) {
+ // For now, we just undo operands that are *immediately* reclaim
+ // expressions, which prevents the vast majority of potential
+ // problems here. To catch them all, we'd need to rebuild arbitrary
+ // value-propagating subexpressions --- we can't reliably rebuild
+ // in-place because of expression sharing.
+ if (ImplicitCastExpr *ice = dyn_cast<ImplicitCastExpr>(e))
+ if (ice->getCastKind() == CK_ARCReclaimReturnedObject)
+ return ice->getSubExpr();
+
+ return e;
+}
+
+ExprResult Sema::BuildObjCBridgedCast(SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ TypeSourceInfo *TSInfo,
+ Expr *SubExpr) {
+ ExprResult SubResult = UsualUnaryConversions(SubExpr);
+ if (SubResult.isInvalid()) return ExprError();
+ SubExpr = SubResult.take();
+
+ QualType T = TSInfo->getType();
+ QualType FromType = SubExpr->getType();
+
+ CastKind CK;
+
+ bool MustConsume = false;
+ if (T->isDependentType() || SubExpr->isTypeDependent()) {
+ // Okay: we'll build a dependent expression type.
+ CK = CK_Dependent;
+ } else if (T->isObjCARCBridgableType() && FromType->isCARCBridgableType()) {
+ // Casting CF -> id
+ CK = (T->isBlockPointerType() ? CK_AnyPointerToBlockPointerCast
+ : CK_CPointerToObjCPointerCast);
+ switch (Kind) {
+ case OBC_Bridge:
+ break;
+
+ case OBC_BridgeRetained: {
+ bool br = KnownName(*this, "CFBridgingRelease");
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
+ << 2
+ << FromType
+ << (T->isBlockPointerType()? 1 : 0)
+ << T
+ << SubExpr->getSourceRange()
+ << Kind;
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge");
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge_transfer)
+ << FromType << br
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ br ? "CFBridgingRelease "
+ : "__bridge_transfer ");
+
+ Kind = OBC_Bridge;
+ break;
+ }
+
+ case OBC_BridgeTransfer:
+ // We must consume the Objective-C object produced by the cast.
+ MustConsume = true;
+ break;
+ }
+ } else if (T->isCARCBridgableType() && FromType->isObjCARCBridgableType()) {
+ // Okay: id -> CF
+ CK = CK_BitCast;
+ switch (Kind) {
+ case OBC_Bridge:
+ // Reclaiming a value that's going to be __bridge-casted to CF
+ // is very dangerous, so we don't do it.
+ SubExpr = maybeUndoReclaimObject(SubExpr);
+ break;
+
+ case OBC_BridgeRetained:
+ // Produce the object before casting it.
+ SubExpr = ImplicitCastExpr::Create(Context, FromType,
+ CK_ARCProduceObject,
+ SubExpr, 0, VK_RValue);
+ break;
+
+ case OBC_BridgeTransfer: {
+ bool br = KnownName(*this, "CFBridgingRetain");
+ Diag(BridgeKeywordLoc, diag::err_arc_bridge_cast_wrong_kind)
+ << (FromType->isBlockPointerType()? 1 : 0)
+ << FromType
+ << 2
+ << T
+ << SubExpr->getSourceRange()
+ << Kind;
+
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge)
+ << FixItHint::CreateReplacement(BridgeKeywordLoc, "__bridge ");
+ Diag(BridgeKeywordLoc, diag::note_arc_bridge_retained)
+ << T << br
+ << FixItHint::CreateReplacement(BridgeKeywordLoc,
+ br ? "CFBridgingRetain " : "__bridge_retained");
+
+ Kind = OBC_Bridge;
+ break;
+ }
+ }
+ } else {
+ Diag(LParenLoc, diag::err_arc_bridge_cast_incompatible)
+ << FromType << T << Kind
+ << SubExpr->getSourceRange()
+ << TSInfo->getTypeLoc().getSourceRange();
+ return ExprError();
+ }
+
+ Expr *Result = new (Context) ObjCBridgedCastExpr(LParenLoc, Kind, CK,
+ BridgeKeywordLoc,
+ TSInfo, SubExpr);
+
+ if (MustConsume) {
+ ExprNeedsCleanups = true;
+ Result = ImplicitCastExpr::Create(Context, T, CK_ARCConsumeObject, Result,
+ 0, VK_RValue);
+ }
+
+ return Result;
+}
+
+ExprResult Sema::ActOnObjCBridgedCast(Scope *S,
+ SourceLocation LParenLoc,
+ ObjCBridgeCastKind Kind,
+ SourceLocation BridgeKeywordLoc,
+ ParsedType Type,
+ SourceLocation RParenLoc,
+ Expr *SubExpr) {
+ TypeSourceInfo *TSInfo = 0;
+ QualType T = GetTypeFromParser(Type, &TSInfo);
+ if (!TSInfo)
+ TSInfo = Context.getTrivialTypeSourceInfo(T, LParenLoc);
+ return BuildObjCBridgedCast(LParenLoc, Kind, BridgeKeywordLoc, TSInfo,
+ SubExpr);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
new file mode 100644
index 0000000..b78ea7d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaFixItUtils.cpp
@@ -0,0 +1,204 @@
+//===--- SemaFixItUtils.cpp - Sema FixIts ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines helper classes for generation of Sema FixItHints.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaFixItUtils.h"
+
+using namespace clang;
+
+bool ConversionFixItGenerator::compareTypesSimple(CanQualType From,
+ CanQualType To,
+ Sema &S,
+ SourceLocation Loc,
+ ExprValueKind FromVK) {
+ if (!To.isAtLeastAsQualifiedAs(From))
+ return false;
+
+ From = From.getNonReferenceType();
+ To = To.getNonReferenceType();
+
+ // If both are pointer types, work with the pointee types.
+ if (isa<PointerType>(From) && isa<PointerType>(To)) {
+ From = S.Context.getCanonicalType(
+ (cast<PointerType>(From))->getPointeeType());
+ To = S.Context.getCanonicalType(
+ (cast<PointerType>(To))->getPointeeType());
+ }
+
+ const CanQualType FromUnq = From.getUnqualifiedType();
+ const CanQualType ToUnq = To.getUnqualifiedType();
+
+ if ((FromUnq == ToUnq || (S.IsDerivedFrom(FromUnq, ToUnq)) ) &&
+ To.isAtLeastAsQualifiedAs(From))
+ return true;
+ return false;
+}
+
+bool ConversionFixItGenerator::tryToFixConversion(const Expr *FullExpr,
+ const QualType FromTy,
+ const QualType ToTy,
+ Sema &S) {
+ if (!FullExpr)
+ return false;
+
+ const CanQualType FromQTy = S.Context.getCanonicalType(FromTy);
+ const CanQualType ToQTy = S.Context.getCanonicalType(ToTy);
+ const SourceLocation Begin = FullExpr->getSourceRange().getBegin();
+ const SourceLocation End = S.PP.getLocForEndOfToken(FullExpr->getSourceRange()
+ .getEnd());
+
+ // Strip the implicit casts - those are implied by the compiler, not the
+ // original source code.
+ const Expr* Expr = FullExpr->IgnoreImpCasts();
+
+ bool NeedParen = true;
+ if (isa<ArraySubscriptExpr>(Expr) ||
+ isa<CallExpr>(Expr) ||
+ isa<DeclRefExpr>(Expr) ||
+ isa<CastExpr>(Expr) ||
+ isa<CXXNewExpr>(Expr) ||
+ isa<CXXConstructExpr>(Expr) ||
+ isa<CXXDeleteExpr>(Expr) ||
+ isa<CXXNoexceptExpr>(Expr) ||
+ isa<CXXPseudoDestructorExpr>(Expr) ||
+ isa<CXXScalarValueInitExpr>(Expr) ||
+ isa<CXXThisExpr>(Expr) ||
+ isa<CXXTypeidExpr>(Expr) ||
+ isa<CXXUnresolvedConstructExpr>(Expr) ||
+ isa<ObjCMessageExpr>(Expr) ||
+ isa<ObjCPropertyRefExpr>(Expr) ||
+ isa<ObjCProtocolExpr>(Expr) ||
+ isa<MemberExpr>(Expr) ||
+ isa<ParenExpr>(FullExpr) ||
+ isa<ParenListExpr>(Expr) ||
+ isa<SizeOfPackExpr>(Expr) ||
+ isa<UnaryOperator>(Expr))
+ NeedParen = false;
+
+ // Check if the argument needs to be dereferenced:
+ // (type * -> type) or (type * -> type &).
+ if (const PointerType *FromPtrTy = dyn_cast<PointerType>(FromQTy)) {
+ OverloadFixItKind FixKind = OFIK_Dereference;
+
+ bool CanConvert = CompareTypes(
+ S.Context.getCanonicalType(FromPtrTy->getPointeeType()), ToQTy,
+ S, Begin, VK_LValue);
+ if (CanConvert) {
+ // Do not suggest dereferencing a Null pointer.
+ if (Expr->IgnoreParenCasts()->
+ isNullPointerConstant(S.Context, Expr::NPC_ValueDependentIsNotNull))
+ return false;
+
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Expr)) {
+ if (UO->getOpcode() == UO_AddrOf) {
+ FixKind = OFIK_RemoveTakeAddress;
+ Hints.push_back(FixItHint::CreateRemoval(
+ CharSourceRange::getTokenRange(Begin, Begin)));
+ }
+ } else if (NeedParen) {
+ Hints.push_back(FixItHint::CreateInsertion(Begin, "*("));
+ Hints.push_back(FixItHint::CreateInsertion(End, ")"));
+ } else {
+ Hints.push_back(FixItHint::CreateInsertion(Begin, "*"));
+ }
+
+ NumConversionsFixed++;
+ if (NumConversionsFixed == 1)
+ Kind = FixKind;
+ return true;
+ }
+ }
+
+ // Check if the pointer to the argument needs to be passed:
+ // (type -> type *) or (type & -> type *).
+ if (isa<PointerType>(ToQTy)) {
+ bool CanConvert = false;
+ OverloadFixItKind FixKind = OFIK_TakeAddress;
+
+ // Only suggest taking address of L-values.
+ if (!Expr->isLValue() || Expr->getObjectKind() != OK_Ordinary)
+ return false;
+
+ CanConvert = CompareTypes(S.Context.getPointerType(FromQTy), ToQTy,
+ S, Begin, VK_RValue);
+ if (CanConvert) {
+
+ if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Expr)) {
+ if (UO->getOpcode() == UO_Deref) {
+ FixKind = OFIK_RemoveDereference;
+ Hints.push_back(FixItHint::CreateRemoval(
+ CharSourceRange::getTokenRange(Begin, Begin)));
+ }
+ } else if (NeedParen) {
+ Hints.push_back(FixItHint::CreateInsertion(Begin, "&("));
+ Hints.push_back(FixItHint::CreateInsertion(End, ")"));
+ } else {
+ Hints.push_back(FixItHint::CreateInsertion(Begin, "&"));
+ }
+
+ NumConversionsFixed++;
+ if (NumConversionsFixed == 1)
+ Kind = FixKind;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool isMacroDefined(const Sema &S, StringRef Name) {
+ return S.PP.getMacroInfo(&S.getASTContext().Idents.get(Name));
+}
+
+const char *Sema::getFixItZeroInitializerForType(QualType T) const {
+ if (T->isScalarType()) {
+ // Suggest " = 0" for non-enumeration scalar types, unless we can find a
+ // better initializer.
+ if (T->isEnumeralType())
+ return 0;
+ if ((T->isObjCObjectPointerType() || T->isBlockPointerType()) &&
+ isMacroDefined(*this, "nil"))
+ return " = nil";
+ if (T->isRealFloatingType())
+ return " = 0.0";
+ if (T->isBooleanType() && LangOpts.CPlusPlus)
+ return " = false";
+ if (T->isPointerType() || T->isMemberPointerType()) {
+ if (LangOpts.CPlusPlus0x)
+ return " = nullptr";
+ else if (isMacroDefined(*this, "NULL"))
+ return " = NULL";
+ }
+ if (T->isCharType())
+ return " = '\\0'";
+ if (T->isWideCharType())
+ return " = L'\\0'";
+ if (T->isChar16Type())
+ return " = u'\\0'";
+ if (T->isChar32Type())
+ return " = U'\\0'";
+ return " = 0";
+ }
+
+ const CXXRecordDecl *RD = T->getAsCXXRecordDecl();
+ if (!RD || !RD->hasDefinition())
+ return 0;
+ if (LangOpts.CPlusPlus0x && !RD->hasUserProvidedDefaultConstructor())
+ return "{}";
+ if (RD->isAggregate())
+ return " = {}";
+ return 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
new file mode 100644
index 0000000..a65b41f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaInit.cpp
@@ -0,0 +1,6167 @@
+//===--- SemaInit.cpp - Semantic Analysis for Initializers ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for initializers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/Designator.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/TypeLoc.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include <map>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Sema Initialization Checking
+//===----------------------------------------------------------------------===//
+
+static Expr *IsStringInit(Expr *Init, const ArrayType *AT,
+ ASTContext &Context) {
+ if (!isa<ConstantArrayType>(AT) && !isa<IncompleteArrayType>(AT))
+ return 0;
+
+ // See if this is a string literal or @encode.
+ Init = Init->IgnoreParens();
+
+ // Handle @encode, which is a narrow string.
+ if (isa<ObjCEncodeExpr>(Init) && AT->getElementType()->isCharType())
+ return Init;
+
+ // Otherwise we can only handle string literals.
+ StringLiteral *SL = dyn_cast<StringLiteral>(Init);
+ if (SL == 0) return 0;
+
+ QualType ElemTy = Context.getCanonicalType(AT->getElementType());
+
+ switch (SL->getKind()) {
+ case StringLiteral::Ascii:
+ case StringLiteral::UTF8:
+ // char array can be initialized with a narrow string.
+ // Only allow char x[] = "foo"; not char x[] = L"foo";
+ return ElemTy->isCharType() ? Init : 0;
+ case StringLiteral::UTF16:
+ return ElemTy->isChar16Type() ? Init : 0;
+ case StringLiteral::UTF32:
+ return ElemTy->isChar32Type() ? Init : 0;
+ case StringLiteral::Wide:
+ // wchar_t array can be initialized with a wide string: C99 6.7.8p15 (with
+ // correction from DR343): "An array with element type compatible with a
+ // qualified or unqualified version of wchar_t may be initialized by a wide
+ // string literal, optionally enclosed in braces."
+ if (Context.typesAreCompatible(Context.getWCharType(),
+ ElemTy.getUnqualifiedType()))
+ return Init;
+
+ return 0;
+ }
+
+ llvm_unreachable("missed a StringLiteral kind?");
+}
+
+static Expr *IsStringInit(Expr *init, QualType declType, ASTContext &Context) {
+ const ArrayType *arrayType = Context.getAsArrayType(declType);
+ if (!arrayType) return 0;
+
+ return IsStringInit(init, arrayType, Context);
+}
+
+static void CheckStringInit(Expr *Str, QualType &DeclT, const ArrayType *AT,
+ Sema &S) {
+ // Get the length of the string as parsed.
+ uint64_t StrLength =
+ cast<ConstantArrayType>(Str->getType())->getSize().getZExtValue();
+
+
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
+ // C99 6.7.8p14. We have an array of character type with unknown size
+ // being initialized to a string literal.
+ llvm::APSInt ConstVal(32);
+ ConstVal = StrLength;
+ // Return a new array type (C99 6.7.8p22).
+ DeclT = S.Context.getConstantArrayType(IAT->getElementType(),
+ ConstVal,
+ ArrayType::Normal, 0);
+ return;
+ }
+
+ const ConstantArrayType *CAT = cast<ConstantArrayType>(AT);
+
+ // We have an array of character type with known size. However,
+ // the size may be smaller or larger than the string we are initializing.
+ // FIXME: Avoid truncation for 64-bit length strings.
+ if (S.getLangOpts().CPlusPlus) {
+ if (StringLiteral *SL = dyn_cast<StringLiteral>(Str)) {
+ // For Pascal strings it's OK to strip off the terminating null character,
+ // so the example below is valid:
+ //
+ // unsigned char a[2] = "\pa";
+ if (SL->isPascal())
+ StrLength--;
+ }
+
+ // [dcl.init.string]p2
+ if (StrLength > CAT->getSize().getZExtValue())
+ S.Diag(Str->getLocStart(),
+ diag::err_initializer_string_for_char_array_too_long)
+ << Str->getSourceRange();
+ } else {
+ // C99 6.7.8p14.
+ if (StrLength-1 > CAT->getSize().getZExtValue())
+ S.Diag(Str->getLocStart(),
+ diag::warn_initializer_string_for_char_array_too_long)
+ << Str->getSourceRange();
+ }
+
+ // Set the type to the actual size that we are initializing. If we have
+ // something like:
+ // char x[1] = "foo";
+ // then this will set the string literal's type to char[1].
+ Str->setType(DeclT);
+}
+
+//===----------------------------------------------------------------------===//
+// Semantic checking for initializer lists.
+//===----------------------------------------------------------------------===//
+
+/// @brief Semantic checking for initializer lists.
+///
+/// The InitListChecker class contains a set of routines that each
+/// handle the initialization of a certain kind of entity, e.g.,
+/// arrays, vectors, struct/union types, scalars, etc. The
+/// InitListChecker itself performs a recursive walk of the subobject
+/// structure of the type to be initialized, while stepping through
+/// the initializer list one element at a time. The IList and Index
+/// parameters to each of the Check* routines contain the active
+/// (syntactic) initializer list and the index into that initializer
+/// list that represents the current initializer. Each routine is
+/// responsible for moving that Index forward as it consumes elements.
+///
+/// Each Check* routine also has a StructuredList/StructuredIndex
+/// arguments, which contains the current "structured" (semantic)
+/// initializer list and the index into that initializer list where we
+/// are copying initializers as we map them over to the semantic
+/// list. Once we have completed our recursive walk of the subobject
+/// structure, we will have constructed a full semantic initializer
+/// list.
+///
+/// C99 designators cause changes in the initializer list traversal,
+/// because they make the initialization "jump" into a specific
+/// subobject and then continue the initialization from that
+/// point. CheckDesignatedInitializer() recursively steps into the
+/// designated subobject and manages backing out the recursion to
+/// initialize the subobjects after the one designated.
+namespace {
+class InitListChecker {
+ Sema &SemaRef;
+ bool hadError;
+ bool VerifyOnly; // no diagnostics, no structure building
+ bool AllowBraceElision;
+ llvm::DenseMap<InitListExpr *, InitListExpr *> SyntacticToSemantic;
+ InitListExpr *FullyStructuredList;
+
+ void CheckImplicitInitList(const InitializedEntity &Entity,
+ InitListExpr *ParentIList, QualType T,
+ unsigned &Index, InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckExplicitInitList(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &T,
+ unsigned &Index, InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckListElementTypes(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &DeclType,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckSubElementType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType ElemType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckComplexType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckScalarType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckReferenceType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckVectorType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ void CheckStructUnionTypes(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ RecordDecl::field_iterator Field,
+ bool SubobjectIsDesignatorContext, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject = false);
+ void CheckArrayType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &DeclType,
+ llvm::APSInt elementIndex,
+ bool SubobjectIsDesignatorContext, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex);
+ bool CheckDesignatedInitializer(const InitializedEntity &Entity,
+ InitListExpr *IList, DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ QualType &CurrentObjectType,
+ RecordDecl::field_iterator *NextField,
+ llvm::APSInt *NextElementIndex,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool FinishSubobjectInit,
+ bool TopLevelObject);
+ InitListExpr *getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
+ QualType CurrentObjectType,
+ InitListExpr *StructuredList,
+ unsigned StructuredIndex,
+ SourceRange InitRange);
+ void UpdateStructuredListElement(InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ Expr *expr);
+ int numArrayElements(QualType DeclType);
+ int numStructUnionElements(QualType DeclType);
+
+ void FillInValueInitForField(unsigned Init, FieldDecl *Field,
+ const InitializedEntity &ParentEntity,
+ InitListExpr *ILE, bool &RequiresSecondPass);
+ void FillInValueInitializations(const InitializedEntity &Entity,
+ InitListExpr *ILE, bool &RequiresSecondPass);
+ bool CheckFlexibleArrayInit(const InitializedEntity &Entity,
+ Expr *InitExpr, FieldDecl *Field,
+ bool TopLevelObject);
+ void CheckValueInitializable(const InitializedEntity &Entity);
+
+public:
+ InitListChecker(Sema &S, const InitializedEntity &Entity,
+ InitListExpr *IL, QualType &T, bool VerifyOnly,
+ bool AllowBraceElision);
+ bool HadError() { return hadError; }
+
+ // @brief Retrieves the fully-structured initializer list used for
+ // semantic analysis and code generation.
+ InitListExpr *getFullyStructuredList() const { return FullyStructuredList; }
+};
+} // end anonymous namespace
+
+void InitListChecker::CheckValueInitializable(const InitializedEntity &Entity) {
+ assert(VerifyOnly &&
+ "CheckValueInitializable is only inteded for verification mode.");
+
+ SourceLocation Loc;
+ InitializationKind Kind = InitializationKind::CreateValue(Loc, Loc, Loc,
+ true);
+ InitializationSequence InitSeq(SemaRef, Entity, Kind, 0, 0);
+ if (InitSeq.Failed())
+ hadError = true;
+}
+
+void InitListChecker::FillInValueInitForField(unsigned Init, FieldDecl *Field,
+ const InitializedEntity &ParentEntity,
+ InitListExpr *ILE,
+ bool &RequiresSecondPass) {
+ SourceLocation Loc = ILE->getLocStart();
+ unsigned NumInits = ILE->getNumInits();
+ InitializedEntity MemberEntity
+ = InitializedEntity::InitializeMember(Field, &ParentEntity);
+ if (Init >= NumInits || !ILE->getInit(Init)) {
+ // FIXME: We probably don't need to handle references
+ // specially here, since value-initialization of references is
+ // handled in InitializationSequence.
+ if (Field->getType()->isReferenceType()) {
+ // C++ [dcl.init.aggr]p9:
+ // If an incomplete or empty initializer-list leaves a
+ // member of reference type uninitialized, the program is
+ // ill-formed.
+ SemaRef.Diag(Loc, diag::err_init_reference_member_uninitialized)
+ << Field->getType()
+ << ILE->getSyntacticForm()->getSourceRange();
+ SemaRef.Diag(Field->getLocation(),
+ diag::note_uninit_reference_member);
+ hadError = true;
+ return;
+ }
+
+ InitializationKind Kind = InitializationKind::CreateValue(Loc, Loc, Loc,
+ true);
+ InitializationSequence InitSeq(SemaRef, MemberEntity, Kind, 0, 0);
+ if (!InitSeq) {
+ InitSeq.Diagnose(SemaRef, MemberEntity, Kind, 0, 0);
+ hadError = true;
+ return;
+ }
+
+ ExprResult MemberInit
+ = InitSeq.Perform(SemaRef, MemberEntity, Kind, MultiExprArg());
+ if (MemberInit.isInvalid()) {
+ hadError = true;
+ return;
+ }
+
+ if (hadError) {
+ // Do nothing
+ } else if (Init < NumInits) {
+ ILE->setInit(Init, MemberInit.takeAs<Expr>());
+ } else if (InitSeq.isConstructorInitialization()) {
+ // Value-initialization requires a constructor call, so
+ // extend the initializer list to include the constructor
+ // call and make a note that we'll need to take another pass
+ // through the initializer list.
+ ILE->updateInit(SemaRef.Context, Init, MemberInit.takeAs<Expr>());
+ RequiresSecondPass = true;
+ }
+ } else if (InitListExpr *InnerILE
+ = dyn_cast<InitListExpr>(ILE->getInit(Init)))
+ FillInValueInitializations(MemberEntity, InnerILE,
+ RequiresSecondPass);
+}
+
+/// Recursively replaces NULL values within the given initializer list
+/// with expressions that perform value-initialization of the
+/// appropriate type.
+void
+InitListChecker::FillInValueInitializations(const InitializedEntity &Entity,
+ InitListExpr *ILE,
+ bool &RequiresSecondPass) {
+ assert((ILE->getType() != SemaRef.Context.VoidTy) &&
+ "Should not have void type");
+ SourceLocation Loc = ILE->getLocStart();
+ if (ILE->getSyntacticForm())
+ Loc = ILE->getSyntacticForm()->getLocStart();
+
+ if (const RecordType *RType = ILE->getType()->getAs<RecordType>()) {
+ if (RType->getDecl()->isUnion() &&
+ ILE->getInitializedFieldInUnion())
+ FillInValueInitForField(0, ILE->getInitializedFieldInUnion(),
+ Entity, ILE, RequiresSecondPass);
+ else {
+ unsigned Init = 0;
+ for (RecordDecl::field_iterator
+ Field = RType->getDecl()->field_begin(),
+ FieldEnd = RType->getDecl()->field_end();
+ Field != FieldEnd; ++Field) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ if (hadError)
+ return;
+
+ FillInValueInitForField(Init, *Field, Entity, ILE, RequiresSecondPass);
+ if (hadError)
+ return;
+
+ ++Init;
+
+ // Only look at the first initialization of a union.
+ if (RType->getDecl()->isUnion())
+ break;
+ }
+ }
+
+ return;
+ }
+
+ QualType ElementType;
+
+ InitializedEntity ElementEntity = Entity;
+ unsigned NumInits = ILE->getNumInits();
+ unsigned NumElements = NumInits;
+ if (const ArrayType *AType = SemaRef.Context.getAsArrayType(ILE->getType())) {
+ ElementType = AType->getElementType();
+ if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType))
+ NumElements = CAType->getSize().getZExtValue();
+ ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
+ 0, Entity);
+ } else if (const VectorType *VType = ILE->getType()->getAs<VectorType>()) {
+ ElementType = VType->getElementType();
+ NumElements = VType->getNumElements();
+ ElementEntity = InitializedEntity::InitializeElement(SemaRef.Context,
+ 0, Entity);
+ } else
+ ElementType = ILE->getType();
+
+
+ for (unsigned Init = 0; Init != NumElements; ++Init) {
+ if (hadError)
+ return;
+
+ if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement ||
+ ElementEntity.getKind() == InitializedEntity::EK_VectorElement)
+ ElementEntity.setElementIndex(Init);
+
+ Expr *InitExpr = (Init < NumInits ? ILE->getInit(Init) : 0);
+ if (!InitExpr && !ILE->hasArrayFiller()) {
+ InitializationKind Kind = InitializationKind::CreateValue(Loc, Loc, Loc,
+ true);
+ InitializationSequence InitSeq(SemaRef, ElementEntity, Kind, 0, 0);
+ if (!InitSeq) {
+ InitSeq.Diagnose(SemaRef, ElementEntity, Kind, 0, 0);
+ hadError = true;
+ return;
+ }
+
+ ExprResult ElementInit
+ = InitSeq.Perform(SemaRef, ElementEntity, Kind, MultiExprArg());
+ if (ElementInit.isInvalid()) {
+ hadError = true;
+ return;
+ }
+
+ if (hadError) {
+ // Do nothing
+ } else if (Init < NumInits) {
+ // For arrays, just set the expression used for value-initialization
+ // of the "holes" in the array.
+ if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement)
+ ILE->setArrayFiller(ElementInit.takeAs<Expr>());
+ else
+ ILE->setInit(Init, ElementInit.takeAs<Expr>());
+ } else {
+ // For arrays, just set the expression used for value-initialization
+ // of the rest of elements and exit.
+ if (ElementEntity.getKind() == InitializedEntity::EK_ArrayElement) {
+ ILE->setArrayFiller(ElementInit.takeAs<Expr>());
+ return;
+ }
+
+ if (InitSeq.isConstructorInitialization()) {
+ // Value-initialization requires a constructor call, so
+ // extend the initializer list to include the constructor
+ // call and make a note that we'll need to take another pass
+ // through the initializer list.
+ ILE->updateInit(SemaRef.Context, Init, ElementInit.takeAs<Expr>());
+ RequiresSecondPass = true;
+ }
+ }
+ } else if (InitListExpr *InnerILE
+ = dyn_cast_or_null<InitListExpr>(InitExpr))
+ FillInValueInitializations(ElementEntity, InnerILE, RequiresSecondPass);
+ }
+}
+
+
+InitListChecker::InitListChecker(Sema &S, const InitializedEntity &Entity,
+ InitListExpr *IL, QualType &T,
+ bool VerifyOnly, bool AllowBraceElision)
+ : SemaRef(S), VerifyOnly(VerifyOnly), AllowBraceElision(AllowBraceElision) {
+ hadError = false;
+
+ unsigned newIndex = 0;
+ unsigned newStructuredIndex = 0;
+ FullyStructuredList
+ = getStructuredSubobjectInit(IL, newIndex, T, 0, 0, IL->getSourceRange());
+ CheckExplicitInitList(Entity, IL, T, newIndex,
+ FullyStructuredList, newStructuredIndex,
+ /*TopLevelObject=*/true);
+
+ if (!hadError && !VerifyOnly) {
+ bool RequiresSecondPass = false;
+ FillInValueInitializations(Entity, FullyStructuredList, RequiresSecondPass);
+ if (RequiresSecondPass && !hadError)
+ FillInValueInitializations(Entity, FullyStructuredList,
+ RequiresSecondPass);
+ }
+}
+
+int InitListChecker::numArrayElements(QualType DeclType) {
+ // FIXME: use a proper constant
+ int maxElements = 0x7FFFFFFF;
+ if (const ConstantArrayType *CAT =
+ SemaRef.Context.getAsConstantArrayType(DeclType)) {
+ maxElements = static_cast<int>(CAT->getSize().getZExtValue());
+ }
+ return maxElements;
+}
+
+int InitListChecker::numStructUnionElements(QualType DeclType) {
+ RecordDecl *structDecl = DeclType->getAs<RecordType>()->getDecl();
+ int InitializableMembers = 0;
+ for (RecordDecl::field_iterator
+ Field = structDecl->field_begin(),
+ FieldEnd = structDecl->field_end();
+ Field != FieldEnd; ++Field) {
+ if (!Field->isUnnamedBitfield())
+ ++InitializableMembers;
+ }
+ if (structDecl->isUnion())
+ return std::min(InitializableMembers, 1);
+ return InitializableMembers - structDecl->hasFlexibleArrayMember();
+}
+
+void InitListChecker::CheckImplicitInitList(const InitializedEntity &Entity,
+ InitListExpr *ParentIList,
+ QualType T, unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ int maxElements = 0;
+
+ if (T->isArrayType())
+ maxElements = numArrayElements(T);
+ else if (T->isRecordType())
+ maxElements = numStructUnionElements(T);
+ else if (T->isVectorType())
+ maxElements = T->getAs<VectorType>()->getNumElements();
+ else
+ llvm_unreachable("CheckImplicitInitList(): Illegal type");
+
+ if (maxElements == 0) {
+ if (!VerifyOnly)
+ SemaRef.Diag(ParentIList->getInit(Index)->getLocStart(),
+ diag::err_implicit_empty_initializer);
+ ++Index;
+ hadError = true;
+ return;
+ }
+
+ // Build a structured initializer list corresponding to this subobject.
+ InitListExpr *StructuredSubobjectInitList
+ = getStructuredSubobjectInit(ParentIList, Index, T, StructuredList,
+ StructuredIndex,
+ SourceRange(ParentIList->getInit(Index)->getLocStart(),
+ ParentIList->getSourceRange().getEnd()));
+ unsigned StructuredSubobjectInitIndex = 0;
+
+ // Check the element types and build the structural subobject.
+ unsigned StartIndex = Index;
+ CheckListElementTypes(Entity, ParentIList, T,
+ /*SubobjectIsDesignatorContext=*/false, Index,
+ StructuredSubobjectInitList,
+ StructuredSubobjectInitIndex);
+
+ if (VerifyOnly) {
+ if (!AllowBraceElision && (T->isArrayType() || T->isRecordType()))
+ hadError = true;
+ } else {
+ StructuredSubobjectInitList->setType(T);
+
+ unsigned EndIndex = (Index == StartIndex? StartIndex : Index - 1);
+ // Update the structured sub-object initializer so that it's ending
+ // range corresponds with the end of the last initializer it used.
+ if (EndIndex < ParentIList->getNumInits()) {
+ SourceLocation EndLoc
+ = ParentIList->getInit(EndIndex)->getSourceRange().getEnd();
+ StructuredSubobjectInitList->setRBraceLoc(EndLoc);
+ }
+
+ // Complain about missing braces.
+ if (T->isArrayType() || T->isRecordType()) {
+ SemaRef.Diag(StructuredSubobjectInitList->getLocStart(),
+ AllowBraceElision ? diag::warn_missing_braces :
+ diag::err_missing_braces)
+ << StructuredSubobjectInitList->getSourceRange()
+ << FixItHint::CreateInsertion(
+ StructuredSubobjectInitList->getLocStart(), "{")
+ << FixItHint::CreateInsertion(
+ SemaRef.PP.getLocForEndOfToken(
+ StructuredSubobjectInitList->getLocEnd()),
+ "}");
+ if (!AllowBraceElision)
+ hadError = true;
+ }
+ }
+}
+
+void InitListChecker::CheckExplicitInitList(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &T,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ assert(IList->isExplicit() && "Illegal Implicit InitListExpr");
+ if (!VerifyOnly) {
+ SyntacticToSemantic[IList] = StructuredList;
+ StructuredList->setSyntacticForm(IList);
+ }
+ CheckListElementTypes(Entity, IList, T, /*SubobjectIsDesignatorContext=*/true,
+ Index, StructuredList, StructuredIndex, TopLevelObject);
+ if (!VerifyOnly) {
+ QualType ExprTy = T;
+ if (!ExprTy->isArrayType())
+ ExprTy = ExprTy.getNonLValueExprType(SemaRef.Context);
+ IList->setType(ExprTy);
+ StructuredList->setType(ExprTy);
+ }
+ if (hadError)
+ return;
+
+ if (Index < IList->getNumInits()) {
+ // We have leftover initializers
+ if (VerifyOnly) {
+ if (SemaRef.getLangOpts().CPlusPlus ||
+ (SemaRef.getLangOpts().OpenCL &&
+ IList->getType()->isVectorType())) {
+ hadError = true;
+ }
+ return;
+ }
+
+ if (StructuredIndex == 1 &&
+ IsStringInit(StructuredList->getInit(0), T, SemaRef.Context)) {
+ unsigned DK = diag::warn_excess_initializers_in_char_array_initializer;
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ DK = diag::err_excess_initializers_in_char_array_initializer;
+ hadError = true;
+ }
+ // Special-case
+ SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK)
+ << IList->getInit(Index)->getSourceRange();
+ } else if (!T->isIncompleteType()) {
+ // Don't complain for incomplete types, since we'll get an error
+ // elsewhere
+ QualType CurrentObjectType = StructuredList->getType();
+ int initKind =
+ CurrentObjectType->isArrayType()? 0 :
+ CurrentObjectType->isVectorType()? 1 :
+ CurrentObjectType->isScalarType()? 2 :
+ CurrentObjectType->isUnionType()? 3 :
+ 4;
+
+ unsigned DK = diag::warn_excess_initializers;
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ DK = diag::err_excess_initializers;
+ hadError = true;
+ }
+ if (SemaRef.getLangOpts().OpenCL && initKind == 1) {
+ DK = diag::err_excess_initializers;
+ hadError = true;
+ }
+
+ SemaRef.Diag(IList->getInit(Index)->getLocStart(), DK)
+ << initKind << IList->getInit(Index)->getSourceRange();
+ }
+ }
+
+ if (!VerifyOnly && T->isScalarType() && IList->getNumInits() == 1 &&
+ !TopLevelObject)
+ SemaRef.Diag(IList->getLocStart(), diag::warn_braces_around_scalar_init)
+ << IList->getSourceRange()
+ << FixItHint::CreateRemoval(IList->getLocStart())
+ << FixItHint::CreateRemoval(IList->getLocEnd());
+}
+
+void InitListChecker::CheckListElementTypes(const InitializedEntity &Entity,
+ InitListExpr *IList,
+ QualType &DeclType,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ if (DeclType->isAnyComplexType() && SubobjectIsDesignatorContext) {
+ // Explicitly braced initializer for complex type can be real+imaginary
+ // parts.
+ CheckComplexType(Entity, IList, DeclType, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isScalarType()) {
+ CheckScalarType(Entity, IList, DeclType, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isVectorType()) {
+ CheckVectorType(Entity, IList, DeclType, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isAggregateType()) {
+ if (DeclType->isRecordType()) {
+ RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+ CheckStructUnionTypes(Entity, IList, DeclType, RD->field_begin(),
+ SubobjectIsDesignatorContext, Index,
+ StructuredList, StructuredIndex,
+ TopLevelObject);
+ } else if (DeclType->isArrayType()) {
+ llvm::APSInt Zero(
+ SemaRef.Context.getTypeSize(SemaRef.Context.getSizeType()),
+ false);
+ CheckArrayType(Entity, IList, DeclType, Zero,
+ SubobjectIsDesignatorContext, Index,
+ StructuredList, StructuredIndex);
+ } else
+ llvm_unreachable("Aggregate that isn't a structure or array?!");
+ } else if (DeclType->isVoidType() || DeclType->isFunctionType()) {
+ // This type is invalid, issue a diagnostic.
+ ++Index;
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type)
+ << DeclType;
+ hadError = true;
+ } else if (DeclType->isRecordType()) {
+ // C++ [dcl.init]p14:
+ // [...] If the class is an aggregate (8.5.1), and the initializer
+ // is a brace-enclosed list, see 8.5.1.
+ //
+ // Note: 8.5.1 is handled below; here, we diagnose the case where
+ // we have an initializer list and a destination type that is not
+ // an aggregate.
+ // FIXME: In C++0x, this is yet another form of initialization.
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list)
+ << DeclType << IList->getSourceRange();
+ hadError = true;
+ } else if (DeclType->isReferenceType()) {
+ CheckReferenceType(Entity, IList, DeclType, Index,
+ StructuredList, StructuredIndex);
+ } else if (DeclType->isObjCObjectType()) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_init_objc_class)
+ << DeclType;
+ hadError = true;
+ } else {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_illegal_initializer_type)
+ << DeclType;
+ hadError = true;
+ }
+}
+
+void InitListChecker::CheckSubElementType(const InitializedEntity &Entity,
+ InitListExpr *IList,
+ QualType ElemType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ Expr *expr = IList->getInit(Index);
+ if (InitListExpr *SubInitList = dyn_cast<InitListExpr>(expr)) {
+ unsigned newIndex = 0;
+ unsigned newStructuredIndex = 0;
+ InitListExpr *newStructuredList
+ = getStructuredSubobjectInit(IList, Index, ElemType,
+ StructuredList, StructuredIndex,
+ SubInitList->getSourceRange());
+ CheckExplicitInitList(Entity, SubInitList, ElemType, newIndex,
+ newStructuredList, newStructuredIndex);
+ ++StructuredIndex;
+ ++Index;
+ return;
+ } else if (ElemType->isScalarType()) {
+ return CheckScalarType(Entity, IList, ElemType, Index,
+ StructuredList, StructuredIndex);
+ } else if (ElemType->isReferenceType()) {
+ return CheckReferenceType(Entity, IList, ElemType, Index,
+ StructuredList, StructuredIndex);
+ }
+
+ if (const ArrayType *arrayType = SemaRef.Context.getAsArrayType(ElemType)) {
+ // arrayType can be incomplete if we're initializing a flexible
+ // array member. There's nothing we can do with the completed
+ // type here, though.
+
+ if (Expr *Str = IsStringInit(expr, arrayType, SemaRef.Context)) {
+ if (!VerifyOnly) {
+ CheckStringInit(Str, ElemType, arrayType, SemaRef);
+ UpdateStructuredListElement(StructuredList, StructuredIndex, Str);
+ }
+ ++Index;
+ return;
+ }
+
+ // Fall through for subaggregate initialization.
+
+ } else if (SemaRef.getLangOpts().CPlusPlus) {
+ // C++ [dcl.init.aggr]p12:
+ // All implicit type conversions (clause 4) are considered when
+ // initializing the aggregate member with an initializer from
+ // an initializer-list. If the initializer can initialize a
+ // member, the member is initialized. [...]
+
+ // FIXME: Better EqualLoc?
+ InitializationKind Kind =
+ InitializationKind::CreateCopy(expr->getLocStart(), SourceLocation());
+ InitializationSequence Seq(SemaRef, Entity, Kind, &expr, 1);
+
+ if (Seq) {
+ if (!VerifyOnly) {
+ ExprResult Result =
+ Seq.Perform(SemaRef, Entity, Kind, MultiExprArg(&expr, 1));
+ if (Result.isInvalid())
+ hadError = true;
+
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ Result.takeAs<Expr>());
+ }
+ ++Index;
+ return;
+ }
+
+ // Fall through for subaggregate initialization
+ } else {
+ // C99 6.7.8p13:
+ //
+ // The initializer for a structure or union object that has
+ // automatic storage duration shall be either an initializer
+ // list as described below, or a single expression that has
+ // compatible structure or union type. In the latter case, the
+ // initial value of the object, including unnamed members, is
+ // that of the expression.
+ ExprResult ExprRes = SemaRef.Owned(expr);
+ if ((ElemType->isRecordType() || ElemType->isVectorType()) &&
+ SemaRef.CheckSingleAssignmentConstraints(ElemType, ExprRes,
+ !VerifyOnly)
+ == Sema::Compatible) {
+ if (ExprRes.isInvalid())
+ hadError = true;
+ else {
+ ExprRes = SemaRef.DefaultFunctionArrayLvalueConversion(ExprRes.take());
+ if (ExprRes.isInvalid())
+ hadError = true;
+ }
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ ExprRes.takeAs<Expr>());
+ ++Index;
+ return;
+ }
+ ExprRes.release();
+ // Fall through for subaggregate initialization
+ }
+
+ // C++ [dcl.init.aggr]p12:
+ //
+ // [...] Otherwise, if the member is itself a non-empty
+ // subaggregate, brace elision is assumed and the initializer is
+ // considered for the initialization of the first member of
+ // the subaggregate.
+ if (!SemaRef.getLangOpts().OpenCL &&
+ (ElemType->isAggregateType() || ElemType->isVectorType())) {
+ CheckImplicitInitList(Entity, IList, ElemType, Index, StructuredList,
+ StructuredIndex);
+ ++StructuredIndex;
+ } else {
+ if (!VerifyOnly) {
+ // We cannot initialize this element, so let
+ // PerformCopyInitialization produce the appropriate diagnostic.
+ SemaRef.PerformCopyInitialization(Entity, SourceLocation(),
+ SemaRef.Owned(expr),
+ /*TopLevelOfInitList=*/true);
+ }
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ }
+}
+
+void InitListChecker::CheckComplexType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ assert(Index == 0 && "Index in explicit init list must be zero");
+
+ // As an extension, clang supports complex initializers, which initialize
+ // a complex number component-wise. When an explicit initializer list for
+ // a complex number contains two two initializers, this extension kicks in:
+ // it exepcts the initializer list to contain two elements convertible to
+ // the element type of the complex type. The first element initializes
+ // the real part, and the second element intitializes the imaginary part.
+
+ if (IList->getNumInits() != 2)
+ return CheckScalarType(Entity, IList, DeclType, Index, StructuredList,
+ StructuredIndex);
+
+ // This is an extension in C. (The builtin _Complex type does not exist
+ // in the C++ standard.)
+ if (!SemaRef.getLangOpts().CPlusPlus && !VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::ext_complex_component_init)
+ << IList->getSourceRange();
+
+ // Initialize the complex number.
+ QualType elementType = DeclType->getAs<ComplexType>()->getElementType();
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ for (unsigned i = 0; i < 2; ++i) {
+ ElementEntity.setElementIndex(Index);
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ }
+}
+
+
+void InitListChecker::CheckScalarType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (Index >= IList->getNumInits()) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(),
+ SemaRef.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_empty_scalar_initializer :
+ diag::err_empty_scalar_initializer)
+ << IList->getSourceRange();
+ hadError = !SemaRef.getLangOpts().CPlusPlus0x;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ Expr *expr = IList->getInit(Index);
+ if (InitListExpr *SubIList = dyn_cast<InitListExpr>(expr)) {
+ if (!VerifyOnly)
+ SemaRef.Diag(SubIList->getLocStart(),
+ diag::warn_many_braces_around_scalar_init)
+ << SubIList->getSourceRange();
+
+ CheckScalarType(Entity, SubIList, DeclType, Index, StructuredList,
+ StructuredIndex);
+ return;
+ } else if (isa<DesignatedInitExpr>(expr)) {
+ if (!VerifyOnly)
+ SemaRef.Diag(expr->getLocStart(),
+ diag::err_designator_for_scalar_init)
+ << DeclType << expr->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ if (VerifyOnly) {
+ if (!SemaRef.CanPerformCopyInitialization(Entity, SemaRef.Owned(expr)))
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(),
+ SemaRef.Owned(expr),
+ /*TopLevelOfInitList=*/true);
+
+ Expr *ResultExpr = 0;
+
+ if (Result.isInvalid())
+ hadError = true; // types weren't compatible.
+ else {
+ ResultExpr = Result.takeAs<Expr>();
+
+ if (ResultExpr != expr) {
+ // The type was promoted, update initializer list.
+ IList->setInit(Index, ResultExpr);
+ }
+ }
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex, ResultExpr);
+ ++Index;
+}
+
+void InitListChecker::CheckReferenceType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ if (Index >= IList->getNumInits()) {
+ // FIXME: It would be wonderful if we could point at the actual member. In
+ // general, it would be useful to pass location information down the stack,
+ // so that we know the location (or decl) of the "current object" being
+ // initialized.
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(),
+ diag::err_init_reference_member_uninitialized)
+ << DeclType
+ << IList->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ Expr *expr = IList->getInit(Index);
+ if (isa<InitListExpr>(expr) && !SemaRef.getLangOpts().CPlusPlus0x) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(), diag::err_init_non_aggr_init_list)
+ << DeclType << IList->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ if (VerifyOnly) {
+ if (!SemaRef.CanPerformCopyInitialization(Entity, SemaRef.Owned(expr)))
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, expr->getLocStart(),
+ SemaRef.Owned(expr),
+ /*TopLevelOfInitList=*/true);
+
+ if (Result.isInvalid())
+ hadError = true;
+
+ expr = Result.takeAs<Expr>();
+ IList->setInit(Index, expr);
+
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex, expr);
+ ++Index;
+}
+
+void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType DeclType,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ const VectorType *VT = DeclType->getAs<VectorType>();
+ unsigned maxElements = VT->getNumElements();
+ unsigned numEltsInit = 0;
+ QualType elementType = VT->getElementType();
+
+ if (Index >= IList->getNumInits()) {
+ // Make sure the element type can be value-initialized.
+ if (VerifyOnly)
+ CheckValueInitializable(
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity));
+ return;
+ }
+
+ if (!SemaRef.getLangOpts().OpenCL) {
+ // If the initializing element is a vector, try to copy-initialize
+ // instead of breaking it apart (which is doomed to failure anyway).
+ Expr *Init = IList->getInit(Index);
+ if (!isa<InitListExpr>(Init) && Init->getType()->isVectorType()) {
+ if (VerifyOnly) {
+ if (!SemaRef.CanPerformCopyInitialization(Entity, SemaRef.Owned(Init)))
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ ExprResult Result =
+ SemaRef.PerformCopyInitialization(Entity, Init->getLocStart(),
+ SemaRef.Owned(Init),
+ /*TopLevelOfInitList=*/true);
+
+ Expr *ResultExpr = 0;
+ if (Result.isInvalid())
+ hadError = true; // types weren't compatible.
+ else {
+ ResultExpr = Result.takeAs<Expr>();
+
+ if (ResultExpr != Init) {
+ // The type was promoted, update initializer list.
+ IList->setInit(Index, ResultExpr);
+ }
+ }
+ if (hadError)
+ ++StructuredIndex;
+ else
+ UpdateStructuredListElement(StructuredList, StructuredIndex,
+ ResultExpr);
+ ++Index;
+ return;
+ }
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ for (unsigned i = 0; i < maxElements; ++i, ++numEltsInit) {
+ // Don't attempt to go past the end of the init list
+ if (Index >= IList->getNumInits()) {
+ if (VerifyOnly)
+ CheckValueInitializable(ElementEntity);
+ break;
+ }
+
+ ElementEntity.setElementIndex(Index);
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ }
+ return;
+ }
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ // OpenCL initializers allows vectors to be constructed from vectors.
+ for (unsigned i = 0; i < maxElements; ++i) {
+ // Don't attempt to go past the end of the init list
+ if (Index >= IList->getNumInits())
+ break;
+
+ ElementEntity.setElementIndex(Index);
+
+ QualType IType = IList->getInit(Index)->getType();
+ if (!IType->isVectorType()) {
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ ++numEltsInit;
+ } else {
+ QualType VecType;
+ const VectorType *IVT = IType->getAs<VectorType>();
+ unsigned numIElts = IVT->getNumElements();
+
+ if (IType->isExtVectorType())
+ VecType = SemaRef.Context.getExtVectorType(elementType, numIElts);
+ else
+ VecType = SemaRef.Context.getVectorType(elementType, numIElts,
+ IVT->getVectorKind());
+ CheckSubElementType(ElementEntity, IList, VecType, Index,
+ StructuredList, StructuredIndex);
+ numEltsInit += numIElts;
+ }
+ }
+
+ // OpenCL requires all elements to be initialized.
+ if (numEltsInit != maxElements) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IList->getLocStart(),
+ diag::err_vector_incorrect_num_initializers)
+ << (numEltsInit < maxElements) << maxElements << numEltsInit;
+ hadError = true;
+ }
+}
+
+void InitListChecker::CheckArrayType(const InitializedEntity &Entity,
+ InitListExpr *IList, QualType &DeclType,
+ llvm::APSInt elementIndex,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex) {
+ const ArrayType *arrayType = SemaRef.Context.getAsArrayType(DeclType);
+
+ // Check for the special-case of initializing an array with a string.
+ if (Index < IList->getNumInits()) {
+ if (Expr *Str = IsStringInit(IList->getInit(Index), arrayType,
+ SemaRef.Context)) {
+ // We place the string literal directly into the resulting
+ // initializer list. This is the only place where the structure
+ // of the structured initializer list doesn't match exactly,
+ // because doing so would involve allocating one character
+ // constant for each string.
+ if (!VerifyOnly) {
+ CheckStringInit(Str, DeclType, arrayType, SemaRef);
+ UpdateStructuredListElement(StructuredList, StructuredIndex, Str);
+ StructuredList->resizeInits(SemaRef.Context, StructuredIndex);
+ }
+ ++Index;
+ return;
+ }
+ }
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(arrayType)) {
+ // Check for VLAs; in standard C it would be possible to check this
+ // earlier, but I don't know where clang accepts VLAs (gcc accepts
+ // them in all sorts of strange places).
+ if (!VerifyOnly)
+ SemaRef.Diag(VAT->getSizeExpr()->getLocStart(),
+ diag::err_variable_object_no_init)
+ << VAT->getSizeExpr()->getSourceRange();
+ hadError = true;
+ ++Index;
+ ++StructuredIndex;
+ return;
+ }
+
+ // We might know the maximum number of elements in advance.
+ llvm::APSInt maxElements(elementIndex.getBitWidth(),
+ elementIndex.isUnsigned());
+ bool maxElementsKnown = false;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(arrayType)) {
+ maxElements = CAT->getSize();
+ elementIndex = elementIndex.extOrTrunc(maxElements.getBitWidth());
+ elementIndex.setIsUnsigned(maxElements.isUnsigned());
+ maxElementsKnown = true;
+ }
+
+ QualType elementType = arrayType->getElementType();
+ while (Index < IList->getNumInits()) {
+ Expr *Init = IList->getInit(Index);
+ if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) {
+ // If we're not the subobject that matches up with the '{' for
+ // the designator, we shouldn't be handling the
+ // designator. Return immediately.
+ if (!SubobjectIsDesignatorContext)
+ return;
+
+ // Handle this designated initializer. elementIndex will be
+ // updated to be the next array element we'll initialize.
+ if (CheckDesignatedInitializer(Entity, IList, DIE, 0,
+ DeclType, 0, &elementIndex, Index,
+ StructuredList, StructuredIndex, true,
+ false)) {
+ hadError = true;
+ continue;
+ }
+
+ if (elementIndex.getBitWidth() > maxElements.getBitWidth())
+ maxElements = maxElements.extend(elementIndex.getBitWidth());
+ else if (elementIndex.getBitWidth() < maxElements.getBitWidth())
+ elementIndex = elementIndex.extend(maxElements.getBitWidth());
+ elementIndex.setIsUnsigned(maxElements.isUnsigned());
+
+ // If the array is of incomplete type, keep track of the number of
+ // elements in the initializer.
+ if (!maxElementsKnown && elementIndex > maxElements)
+ maxElements = elementIndex;
+
+ continue;
+ }
+
+ // If we know the maximum number of elements, and we've already
+ // hit it, stop consuming elements in the initializer list.
+ if (maxElementsKnown && elementIndex == maxElements)
+ break;
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, StructuredIndex,
+ Entity);
+ // Check this element.
+ CheckSubElementType(ElementEntity, IList, elementType, Index,
+ StructuredList, StructuredIndex);
+ ++elementIndex;
+
+ // If the array is of incomplete type, keep track of the number of
+ // elements in the initializer.
+ if (!maxElementsKnown && elementIndex > maxElements)
+ maxElements = elementIndex;
+ }
+ if (!hadError && DeclType->isIncompleteArrayType() && !VerifyOnly) {
+ // If this is an incomplete array type, the actual type needs to
+ // be calculated here.
+ llvm::APSInt Zero(maxElements.getBitWidth(), maxElements.isUnsigned());
+ if (maxElements == Zero) {
+ // Sizing an array implicitly to zero is not allowed by ISO C,
+ // but is supported by GNU.
+ SemaRef.Diag(IList->getLocStart(),
+ diag::ext_typecheck_zero_array_size);
+ }
+
+ DeclType = SemaRef.Context.getConstantArrayType(elementType, maxElements,
+ ArrayType::Normal, 0);
+ }
+ if (!hadError && VerifyOnly) {
+ // Check if there are any members of the array that get value-initialized.
+ // If so, check if doing that is possible.
+ // FIXME: This needs to detect holes left by designated initializers too.
+ if (maxElementsKnown && elementIndex < maxElements)
+ CheckValueInitializable(InitializedEntity::InitializeElement(
+ SemaRef.Context, 0, Entity));
+ }
+}
+
+bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
+ Expr *InitExpr,
+ FieldDecl *Field,
+ bool TopLevelObject) {
+ // Handle GNU flexible array initializers.
+ unsigned FlexArrayDiag;
+ if (isa<InitListExpr>(InitExpr) &&
+ cast<InitListExpr>(InitExpr)->getNumInits() == 0) {
+ // Empty flexible array init always allowed as an extension
+ FlexArrayDiag = diag::ext_flexible_array_init;
+ } else if (SemaRef.getLangOpts().CPlusPlus) {
+ // Disallow flexible array init in C++; it is not required for gcc
+ // compatibility, and it needs work to IRGen correctly in general.
+ FlexArrayDiag = diag::err_flexible_array_init;
+ } else if (!TopLevelObject) {
+ // Disallow flexible array init on non-top-level object
+ FlexArrayDiag = diag::err_flexible_array_init;
+ } else if (Entity.getKind() != InitializedEntity::EK_Variable) {
+ // Disallow flexible array init on anything which is not a variable.
+ FlexArrayDiag = diag::err_flexible_array_init;
+ } else if (cast<VarDecl>(Entity.getDecl())->hasLocalStorage()) {
+ // Disallow flexible array init on local variables.
+ FlexArrayDiag = diag::err_flexible_array_init;
+ } else {
+ // Allow other cases.
+ FlexArrayDiag = diag::ext_flexible_array_init;
+ }
+
+ if (!VerifyOnly) {
+ SemaRef.Diag(InitExpr->getLocStart(),
+ FlexArrayDiag)
+ << InitExpr->getLocStart();
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << Field;
+ }
+
+ return FlexArrayDiag != diag::ext_flexible_array_init;
+}
+
+void InitListChecker::CheckStructUnionTypes(const InitializedEntity &Entity,
+ InitListExpr *IList,
+ QualType DeclType,
+ RecordDecl::field_iterator Field,
+ bool SubobjectIsDesignatorContext,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool TopLevelObject) {
+ RecordDecl* structDecl = DeclType->getAs<RecordType>()->getDecl();
+
+ // If the record is invalid, some of it's members are invalid. To avoid
+ // confusion, we forgo checking the intializer for the entire record.
+ if (structDecl->isInvalidDecl()) {
+ hadError = true;
+ return;
+ }
+
+ if (DeclType->isUnionType() && IList->getNumInits() == 0) {
+ // Value-initialize the first named member of the union.
+ RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+ for (RecordDecl::field_iterator FieldEnd = RD->field_end();
+ Field != FieldEnd; ++Field) {
+ if (Field->getDeclName()) {
+ if (VerifyOnly)
+ CheckValueInitializable(
+ InitializedEntity::InitializeMember(*Field, &Entity));
+ else
+ StructuredList->setInitializedFieldInUnion(*Field);
+ break;
+ }
+ }
+ return;
+ }
+
+ // If structDecl is a forward declaration, this loop won't do
+ // anything except look at designated initializers; That's okay,
+ // because an error should get printed out elsewhere. It might be
+ // worthwhile to skip over the rest of the initializer, though.
+ RecordDecl *RD = DeclType->getAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator FieldEnd = RD->field_end();
+ bool InitializedSomething = false;
+ bool CheckForMissingFields = true;
+ while (Index < IList->getNumInits()) {
+ Expr *Init = IList->getInit(Index);
+
+ if (DesignatedInitExpr *DIE = dyn_cast<DesignatedInitExpr>(Init)) {
+ // If we're not the subobject that matches up with the '{' for
+ // the designator, we shouldn't be handling the
+ // designator. Return immediately.
+ if (!SubobjectIsDesignatorContext)
+ return;
+
+ // Handle this designated initializer. Field will be updated to
+ // the next field that we'll be initializing.
+ if (CheckDesignatedInitializer(Entity, IList, DIE, 0,
+ DeclType, &Field, 0, Index,
+ StructuredList, StructuredIndex,
+ true, TopLevelObject))
+ hadError = true;
+
+ InitializedSomething = true;
+
+ // Disable check for missing fields when designators are used.
+ // This matches gcc behaviour.
+ CheckForMissingFields = false;
+ continue;
+ }
+
+ if (Field == FieldEnd) {
+ // We've run out of fields. We're done.
+ break;
+ }
+
+ // We've already initialized a member of a union. We're done.
+ if (InitializedSomething && DeclType->isUnionType())
+ break;
+
+ // If we've hit the flexible array member at the end, we're done.
+ if (Field->getType()->isIncompleteArrayType())
+ break;
+
+ if (Field->isUnnamedBitfield()) {
+ // Don't initialize unnamed bitfields, e.g. "int : 20;"
+ ++Field;
+ continue;
+ }
+
+ // Make sure we can use this declaration.
+ bool InvalidUse;
+ if (VerifyOnly)
+ InvalidUse = !SemaRef.CanUseDecl(*Field);
+ else
+ InvalidUse = SemaRef.DiagnoseUseOfDecl(*Field,
+ IList->getInit(Index)->getLocStart());
+ if (InvalidUse) {
+ ++Index;
+ ++Field;
+ hadError = true;
+ continue;
+ }
+
+ InitializedEntity MemberEntity =
+ InitializedEntity::InitializeMember(*Field, &Entity);
+ CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
+ StructuredList, StructuredIndex);
+ InitializedSomething = true;
+
+ if (DeclType->isUnionType() && !VerifyOnly) {
+ // Initialize the first field within the union.
+ StructuredList->setInitializedFieldInUnion(*Field);
+ }
+
+ ++Field;
+ }
+
+ // Emit warnings for missing struct field initializers.
+ if (!VerifyOnly && InitializedSomething && CheckForMissingFields &&
+ Field != FieldEnd && !Field->getType()->isIncompleteArrayType() &&
+ !DeclType->isUnionType()) {
+ // It is possible we have one or more unnamed bitfields remaining.
+ // Find first (if any) named field and emit warning.
+ for (RecordDecl::field_iterator it = Field, end = RD->field_end();
+ it != end; ++it) {
+ if (!it->isUnnamedBitfield()) {
+ SemaRef.Diag(IList->getSourceRange().getEnd(),
+ diag::warn_missing_field_initializers) << it->getName();
+ break;
+ }
+ }
+ }
+
+ // Check that any remaining fields can be value-initialized.
+ if (VerifyOnly && Field != FieldEnd && !DeclType->isUnionType() &&
+ !Field->getType()->isIncompleteArrayType()) {
+ // FIXME: Should check for holes left by designated initializers too.
+ for (; Field != FieldEnd && !hadError; ++Field) {
+ if (!Field->isUnnamedBitfield())
+ CheckValueInitializable(
+ InitializedEntity::InitializeMember(*Field, &Entity));
+ }
+ }
+
+ if (Field == FieldEnd || !Field->getType()->isIncompleteArrayType() ||
+ Index >= IList->getNumInits())
+ return;
+
+ if (CheckFlexibleArrayInit(Entity, IList->getInit(Index), *Field,
+ TopLevelObject)) {
+ hadError = true;
+ ++Index;
+ return;
+ }
+
+ InitializedEntity MemberEntity =
+ InitializedEntity::InitializeMember(*Field, &Entity);
+
+ if (isa<InitListExpr>(IList->getInit(Index)))
+ CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
+ StructuredList, StructuredIndex);
+ else
+ CheckImplicitInitList(MemberEntity, IList, Field->getType(), Index,
+ StructuredList, StructuredIndex);
+}
+
+/// \brief Expand a field designator that refers to a member of an
+/// anonymous struct or union into a series of field designators that
+/// refers to the field within the appropriate subobject.
+///
+static void ExpandAnonymousFieldDesignator(Sema &SemaRef,
+ DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ IndirectFieldDecl *IndirectField) {
+ typedef DesignatedInitExpr::Designator Designator;
+
+ // Build the replacement designators.
+ SmallVector<Designator, 4> Replacements;
+ for (IndirectFieldDecl::chain_iterator PI = IndirectField->chain_begin(),
+ PE = IndirectField->chain_end(); PI != PE; ++PI) {
+ if (PI + 1 == PE)
+ Replacements.push_back(Designator((IdentifierInfo *)0,
+ DIE->getDesignator(DesigIdx)->getDotLoc(),
+ DIE->getDesignator(DesigIdx)->getFieldLoc()));
+ else
+ Replacements.push_back(Designator((IdentifierInfo *)0, SourceLocation(),
+ SourceLocation()));
+ assert(isa<FieldDecl>(*PI));
+ Replacements.back().setField(cast<FieldDecl>(*PI));
+ }
+
+ // Expand the current designator into the set of replacement
+ // designators, so we have a full subobject path down to where the
+ // member of the anonymous struct/union is actually stored.
+ DIE->ExpandDesignator(SemaRef.Context, DesigIdx, &Replacements[0],
+ &Replacements[0] + Replacements.size());
+}
+
+/// \brief Given an implicit anonymous field, search the IndirectField that
+/// corresponds to FieldName.
+static IndirectFieldDecl *FindIndirectFieldDesignator(FieldDecl *AnonField,
+ IdentifierInfo *FieldName) {
+ assert(AnonField->isAnonymousStructOrUnion());
+ Decl *NextDecl = AnonField->getNextDeclInContext();
+ while (IndirectFieldDecl *IF =
+ dyn_cast_or_null<IndirectFieldDecl>(NextDecl)) {
+ if (FieldName && FieldName == IF->getAnonField()->getIdentifier())
+ return IF;
+ NextDecl = NextDecl->getNextDeclInContext();
+ }
+ return 0;
+}
+
+static DesignatedInitExpr *CloneDesignatedInitExpr(Sema &SemaRef,
+ DesignatedInitExpr *DIE) {
+ unsigned NumIndexExprs = DIE->getNumSubExprs() - 1;
+ SmallVector<Expr*, 4> IndexExprs(NumIndexExprs);
+ for (unsigned I = 0; I < NumIndexExprs; ++I)
+ IndexExprs[I] = DIE->getSubExpr(I + 1);
+ return DesignatedInitExpr::Create(SemaRef.Context, DIE->designators_begin(),
+ DIE->size(), IndexExprs.data(),
+ NumIndexExprs, DIE->getEqualOrColonLoc(),
+ DIE->usesGNUSyntax(), DIE->getInit());
+}
+
+namespace {
+
+// Callback to only accept typo corrections that are for field members of
+// the given struct or union.
+class FieldInitializerValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ explicit FieldInitializerValidatorCCC(RecordDecl *RD)
+ : Record(RD) {}
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ FieldDecl *FD = candidate.getCorrectionDeclAs<FieldDecl>();
+ return FD && FD->getDeclContext()->getRedeclContext()->Equals(Record);
+ }
+
+ private:
+ RecordDecl *Record;
+};
+
+}
+
+/// @brief Check the well-formedness of a C99 designated initializer.
+///
+/// Determines whether the designated initializer @p DIE, which
+/// resides at the given @p Index within the initializer list @p
+/// IList, is well-formed for a current object of type @p DeclType
+/// (C99 6.7.8). The actual subobject that this designator refers to
+/// within the current subobject is returned in either
+/// @p NextField or @p NextElementIndex (whichever is appropriate).
+///
+/// @param IList The initializer list in which this designated
+/// initializer occurs.
+///
+/// @param DIE The designated initializer expression.
+///
+/// @param DesigIdx The index of the current designator.
+///
+/// @param DeclType The type of the "current object" (C99 6.7.8p17),
+/// into which the designation in @p DIE should refer.
+///
+/// @param NextField If non-NULL and the first designator in @p DIE is
+/// a field, this will be set to the field declaration corresponding
+/// to the field named by the designator.
+///
+/// @param NextElementIndex If non-NULL and the first designator in @p
+/// DIE is an array designator or GNU array-range designator, this
+/// will be set to the last index initialized by this designator.
+///
+/// @param Index Index into @p IList where the designated initializer
+/// @p DIE occurs.
+///
+/// @param StructuredList The initializer list expression that
+/// describes all of the subobject initializers in the order they'll
+/// actually be initialized.
+///
+/// @returns true if there was an error, false otherwise.
+bool
+InitListChecker::CheckDesignatedInitializer(const InitializedEntity &Entity,
+ InitListExpr *IList,
+ DesignatedInitExpr *DIE,
+ unsigned DesigIdx,
+ QualType &CurrentObjectType,
+ RecordDecl::field_iterator *NextField,
+ llvm::APSInt *NextElementIndex,
+ unsigned &Index,
+ InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ bool FinishSubobjectInit,
+ bool TopLevelObject) {
+ if (DesigIdx == DIE->size()) {
+ // Check the actual initialization for the designated object type.
+ bool prevHadError = hadError;
+
+ // Temporarily remove the designator expression from the
+ // initializer list that the child calls see, so that we don't try
+ // to re-process the designator.
+ unsigned OldIndex = Index;
+ IList->setInit(OldIndex, DIE->getInit());
+
+ CheckSubElementType(Entity, IList, CurrentObjectType, Index,
+ StructuredList, StructuredIndex);
+
+ // Restore the designated initializer expression in the syntactic
+ // form of the initializer list.
+ if (IList->getInit(OldIndex) != DIE->getInit())
+ DIE->setInit(IList->getInit(OldIndex));
+ IList->setInit(OldIndex, DIE);
+
+ return hadError && !prevHadError;
+ }
+
+ DesignatedInitExpr::Designator *D = DIE->getDesignator(DesigIdx);
+ bool IsFirstDesignator = (DesigIdx == 0);
+ if (!VerifyOnly) {
+ assert((IsFirstDesignator || StructuredList) &&
+ "Need a non-designated initializer list to start from");
+
+ // Determine the structural initializer list that corresponds to the
+ // current subobject.
+ StructuredList = IsFirstDesignator? SyntacticToSemantic.lookup(IList)
+ : getStructuredSubobjectInit(IList, Index, CurrentObjectType,
+ StructuredList, StructuredIndex,
+ SourceRange(D->getStartLocation(),
+ DIE->getSourceRange().getEnd()));
+ assert(StructuredList && "Expected a structured initializer list");
+ }
+
+ if (D->isFieldDesignator()) {
+ // C99 6.7.8p7:
+ //
+ // If a designator has the form
+ //
+ // . identifier
+ //
+ // then the current object (defined below) shall have
+ // structure or union type and the identifier shall be the
+ // name of a member of that type.
+ const RecordType *RT = CurrentObjectType->getAs<RecordType>();
+ if (!RT) {
+ SourceLocation Loc = D->getDotLoc();
+ if (Loc.isInvalid())
+ Loc = D->getFieldLoc();
+ if (!VerifyOnly)
+ SemaRef.Diag(Loc, diag::err_field_designator_non_aggr)
+ << SemaRef.getLangOpts().CPlusPlus << CurrentObjectType;
+ ++Index;
+ return true;
+ }
+
+ // Note: we perform a linear search of the fields here, despite
+ // the fact that we have a faster lookup method, because we always
+ // need to compute the field's index.
+ FieldDecl *KnownField = D->getField();
+ IdentifierInfo *FieldName = D->getFieldName();
+ unsigned FieldIndex = 0;
+ RecordDecl::field_iterator
+ Field = RT->getDecl()->field_begin(),
+ FieldEnd = RT->getDecl()->field_end();
+ for (; Field != FieldEnd; ++Field) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ // If we find a field representing an anonymous field, look in the
+ // IndirectFieldDecl that follow for the designated initializer.
+ if (!KnownField && Field->isAnonymousStructOrUnion()) {
+ if (IndirectFieldDecl *IF =
+ FindIndirectFieldDesignator(*Field, FieldName)) {
+ // In verify mode, don't modify the original.
+ if (VerifyOnly)
+ DIE = CloneDesignatedInitExpr(SemaRef, DIE);
+ ExpandAnonymousFieldDesignator(SemaRef, DIE, DesigIdx, IF);
+ D = DIE->getDesignator(DesigIdx);
+ break;
+ }
+ }
+ if (KnownField && KnownField == *Field)
+ break;
+ if (FieldName && FieldName == Field->getIdentifier())
+ break;
+
+ ++FieldIndex;
+ }
+
+ if (Field == FieldEnd) {
+ if (VerifyOnly) {
+ ++Index;
+ return true; // No typo correction when just trying this out.
+ }
+
+ // There was no normal field in the struct with the designated
+ // name. Perform another lookup for this name, which may find
+ // something that we can't designate (e.g., a member function),
+ // may find nothing, or may find a member of an anonymous
+ // struct/union.
+ DeclContext::lookup_result Lookup = RT->getDecl()->lookup(FieldName);
+ FieldDecl *ReplacementField = 0;
+ if (Lookup.first == Lookup.second) {
+ // Name lookup didn't find anything. Determine whether this
+ // was a typo for another field name.
+ FieldInitializerValidatorCCC Validator(RT->getDecl());
+ TypoCorrection Corrected = SemaRef.CorrectTypo(
+ DeclarationNameInfo(FieldName, D->getFieldLoc()),
+ Sema::LookupMemberName, /*Scope=*/0, /*SS=*/0, Validator,
+ RT->getDecl());
+ if (Corrected) {
+ std::string CorrectedStr(
+ Corrected.getAsString(SemaRef.getLangOpts()));
+ std::string CorrectedQuotedStr(
+ Corrected.getQuoted(SemaRef.getLangOpts()));
+ ReplacementField = Corrected.getCorrectionDeclAs<FieldDecl>();
+ SemaRef.Diag(D->getFieldLoc(),
+ diag::err_field_designator_unknown_suggest)
+ << FieldName << CurrentObjectType << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(D->getFieldLoc(), CorrectedStr);
+ SemaRef.Diag(ReplacementField->getLocation(),
+ diag::note_previous_decl) << CorrectedQuotedStr;
+ hadError = true;
+ } else {
+ SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_unknown)
+ << FieldName << CurrentObjectType;
+ ++Index;
+ return true;
+ }
+ }
+
+ if (!ReplacementField) {
+ // Name lookup found something, but it wasn't a field.
+ SemaRef.Diag(D->getFieldLoc(), diag::err_field_designator_nonfield)
+ << FieldName;
+ SemaRef.Diag((*Lookup.first)->getLocation(),
+ diag::note_field_designator_found);
+ ++Index;
+ return true;
+ }
+
+ if (!KnownField) {
+ // The replacement field comes from typo correction; find it
+ // in the list of fields.
+ FieldIndex = 0;
+ Field = RT->getDecl()->field_begin();
+ for (; Field != FieldEnd; ++Field) {
+ if (Field->isUnnamedBitfield())
+ continue;
+
+ if (ReplacementField == *Field ||
+ Field->getIdentifier() == ReplacementField->getIdentifier())
+ break;
+
+ ++FieldIndex;
+ }
+ }
+ }
+
+ // All of the fields of a union are located at the same place in
+ // the initializer list.
+ if (RT->getDecl()->isUnion()) {
+ FieldIndex = 0;
+ if (!VerifyOnly)
+ StructuredList->setInitializedFieldInUnion(*Field);
+ }
+
+ // Make sure we can use this declaration.
+ bool InvalidUse;
+ if (VerifyOnly)
+ InvalidUse = !SemaRef.CanUseDecl(*Field);
+ else
+ InvalidUse = SemaRef.DiagnoseUseOfDecl(*Field, D->getFieldLoc());
+ if (InvalidUse) {
+ ++Index;
+ return true;
+ }
+
+ if (!VerifyOnly) {
+ // Update the designator with the field declaration.
+ D->setField(*Field);
+
+ // Make sure that our non-designated initializer list has space
+ // for a subobject corresponding to this field.
+ if (FieldIndex >= StructuredList->getNumInits())
+ StructuredList->resizeInits(SemaRef.Context, FieldIndex + 1);
+ }
+
+ // This designator names a flexible array member.
+ if (Field->getType()->isIncompleteArrayType()) {
+ bool Invalid = false;
+ if ((DesigIdx + 1) != DIE->size()) {
+ // We can't designate an object within the flexible array
+ // member (because GCC doesn't allow it).
+ if (!VerifyOnly) {
+ DesignatedInitExpr::Designator *NextD
+ = DIE->getDesignator(DesigIdx + 1);
+ SemaRef.Diag(NextD->getStartLocation(),
+ diag::err_designator_into_flexible_array_member)
+ << SourceRange(NextD->getStartLocation(),
+ DIE->getSourceRange().getEnd());
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ }
+ Invalid = true;
+ }
+
+ if (!hadError && !isa<InitListExpr>(DIE->getInit()) &&
+ !isa<StringLiteral>(DIE->getInit())) {
+ // The initializer is not an initializer list.
+ if (!VerifyOnly) {
+ SemaRef.Diag(DIE->getInit()->getLocStart(),
+ diag::err_flexible_array_init_needs_braces)
+ << DIE->getInit()->getSourceRange();
+ SemaRef.Diag(Field->getLocation(), diag::note_flexible_array_member)
+ << *Field;
+ }
+ Invalid = true;
+ }
+
+ // Check GNU flexible array initializer.
+ if (!Invalid && CheckFlexibleArrayInit(Entity, DIE->getInit(), *Field,
+ TopLevelObject))
+ Invalid = true;
+
+ if (Invalid) {
+ ++Index;
+ return true;
+ }
+
+ // Initialize the array.
+ bool prevHadError = hadError;
+ unsigned newStructuredIndex = FieldIndex;
+ unsigned OldIndex = Index;
+ IList->setInit(Index, DIE->getInit());
+
+ InitializedEntity MemberEntity =
+ InitializedEntity::InitializeMember(*Field, &Entity);
+ CheckSubElementType(MemberEntity, IList, Field->getType(), Index,
+ StructuredList, newStructuredIndex);
+
+ IList->setInit(OldIndex, DIE);
+ if (hadError && !prevHadError) {
+ ++Field;
+ ++FieldIndex;
+ if (NextField)
+ *NextField = Field;
+ StructuredIndex = FieldIndex;
+ return true;
+ }
+ } else {
+ // Recurse to check later designated subobjects.
+ QualType FieldType = (*Field)->getType();
+ unsigned newStructuredIndex = FieldIndex;
+
+ InitializedEntity MemberEntity =
+ InitializedEntity::InitializeMember(*Field, &Entity);
+ if (CheckDesignatedInitializer(MemberEntity, IList, DIE, DesigIdx + 1,
+ FieldType, 0, 0, Index,
+ StructuredList, newStructuredIndex,
+ true, false))
+ return true;
+ }
+
+ // Find the position of the next field to be initialized in this
+ // subobject.
+ ++Field;
+ ++FieldIndex;
+
+ // If this the first designator, our caller will continue checking
+ // the rest of this struct/class/union subobject.
+ if (IsFirstDesignator) {
+ if (NextField)
+ *NextField = Field;
+ StructuredIndex = FieldIndex;
+ return false;
+ }
+
+ if (!FinishSubobjectInit)
+ return false;
+
+ // We've already initialized something in the union; we're done.
+ if (RT->getDecl()->isUnion())
+ return hadError;
+
+ // Check the remaining fields within this class/struct/union subobject.
+ bool prevHadError = hadError;
+
+ CheckStructUnionTypes(Entity, IList, CurrentObjectType, Field, false, Index,
+ StructuredList, FieldIndex);
+ return hadError && !prevHadError;
+ }
+
+ // C99 6.7.8p6:
+ //
+ // If a designator has the form
+ //
+ // [ constant-expression ]
+ //
+ // then the current object (defined below) shall have array
+ // type and the expression shall be an integer constant
+ // expression. If the array is of unknown size, any
+ // nonnegative value is valid.
+ //
+ // Additionally, cope with the GNU extension that permits
+ // designators of the form
+ //
+ // [ constant-expression ... constant-expression ]
+ const ArrayType *AT = SemaRef.Context.getAsArrayType(CurrentObjectType);
+ if (!AT) {
+ if (!VerifyOnly)
+ SemaRef.Diag(D->getLBracketLoc(), diag::err_array_designator_non_array)
+ << CurrentObjectType;
+ ++Index;
+ return true;
+ }
+
+ Expr *IndexExpr = 0;
+ llvm::APSInt DesignatedStartIndex, DesignatedEndIndex;
+ if (D->isArrayDesignator()) {
+ IndexExpr = DIE->getArrayIndex(*D);
+ DesignatedStartIndex = IndexExpr->EvaluateKnownConstInt(SemaRef.Context);
+ DesignatedEndIndex = DesignatedStartIndex;
+ } else {
+ assert(D->isArrayRangeDesignator() && "Need array-range designator");
+
+ DesignatedStartIndex =
+ DIE->getArrayRangeStart(*D)->EvaluateKnownConstInt(SemaRef.Context);
+ DesignatedEndIndex =
+ DIE->getArrayRangeEnd(*D)->EvaluateKnownConstInt(SemaRef.Context);
+ IndexExpr = DIE->getArrayRangeEnd(*D);
+
+ // Codegen can't handle evaluating array range designators that have side
+ // effects, because we replicate the AST value for each initialized element.
+ // As such, set the sawArrayRangeDesignator() bit if we initialize multiple
+ // elements with something that has a side effect, so codegen can emit an
+ // "error unsupported" error instead of miscompiling the app.
+ if (DesignatedStartIndex.getZExtValue()!=DesignatedEndIndex.getZExtValue()&&
+ DIE->getInit()->HasSideEffects(SemaRef.Context) && !VerifyOnly)
+ FullyStructuredList->sawArrayRangeDesignator();
+ }
+
+ if (isa<ConstantArrayType>(AT)) {
+ llvm::APSInt MaxElements(cast<ConstantArrayType>(AT)->getSize(), false);
+ DesignatedStartIndex
+ = DesignatedStartIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedStartIndex.setIsUnsigned(MaxElements.isUnsigned());
+ DesignatedEndIndex
+ = DesignatedEndIndex.extOrTrunc(MaxElements.getBitWidth());
+ DesignatedEndIndex.setIsUnsigned(MaxElements.isUnsigned());
+ if (DesignatedEndIndex >= MaxElements) {
+ if (!VerifyOnly)
+ SemaRef.Diag(IndexExpr->getLocStart(),
+ diag::err_array_designator_too_large)
+ << DesignatedEndIndex.toString(10) << MaxElements.toString(10)
+ << IndexExpr->getSourceRange();
+ ++Index;
+ return true;
+ }
+ } else {
+ // Make sure the bit-widths and signedness match.
+ if (DesignatedStartIndex.getBitWidth() > DesignatedEndIndex.getBitWidth())
+ DesignatedEndIndex
+ = DesignatedEndIndex.extend(DesignatedStartIndex.getBitWidth());
+ else if (DesignatedStartIndex.getBitWidth() <
+ DesignatedEndIndex.getBitWidth())
+ DesignatedStartIndex
+ = DesignatedStartIndex.extend(DesignatedEndIndex.getBitWidth());
+ DesignatedStartIndex.setIsUnsigned(true);
+ DesignatedEndIndex.setIsUnsigned(true);
+ }
+
+ // Make sure that our non-designated initializer list has space
+ // for a subobject corresponding to this array element.
+ if (!VerifyOnly &&
+ DesignatedEndIndex.getZExtValue() >= StructuredList->getNumInits())
+ StructuredList->resizeInits(SemaRef.Context,
+ DesignatedEndIndex.getZExtValue() + 1);
+
+ // Repeatedly perform subobject initializations in the range
+ // [DesignatedStartIndex, DesignatedEndIndex].
+
+ // Move to the next designator
+ unsigned ElementIndex = DesignatedStartIndex.getZExtValue();
+ unsigned OldIndex = Index;
+
+ InitializedEntity ElementEntity =
+ InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
+
+ while (DesignatedStartIndex <= DesignatedEndIndex) {
+ // Recurse to check later designated subobjects.
+ QualType ElementType = AT->getElementType();
+ Index = OldIndex;
+
+ ElementEntity.setElementIndex(ElementIndex);
+ if (CheckDesignatedInitializer(ElementEntity, IList, DIE, DesigIdx + 1,
+ ElementType, 0, 0, Index,
+ StructuredList, ElementIndex,
+ (DesignatedStartIndex == DesignatedEndIndex),
+ false))
+ return true;
+
+ // Move to the next index in the array that we'll be initializing.
+ ++DesignatedStartIndex;
+ ElementIndex = DesignatedStartIndex.getZExtValue();
+ }
+
+ // If this the first designator, our caller will continue checking
+ // the rest of this array subobject.
+ if (IsFirstDesignator) {
+ if (NextElementIndex)
+ *NextElementIndex = DesignatedStartIndex;
+ StructuredIndex = ElementIndex;
+ return false;
+ }
+
+ if (!FinishSubobjectInit)
+ return false;
+
+ // Check the remaining elements within this array subobject.
+ bool prevHadError = hadError;
+ CheckArrayType(Entity, IList, CurrentObjectType, DesignatedStartIndex,
+ /*SubobjectIsDesignatorContext=*/false, Index,
+ StructuredList, ElementIndex);
+ return hadError && !prevHadError;
+}
+
+// Get the structured initializer list for a subobject of type
+// @p CurrentObjectType.
+InitListExpr *
+InitListChecker::getStructuredSubobjectInit(InitListExpr *IList, unsigned Index,
+ QualType CurrentObjectType,
+ InitListExpr *StructuredList,
+ unsigned StructuredIndex,
+ SourceRange InitRange) {
+ if (VerifyOnly)
+ return 0; // No structured list in verification-only mode.
+ Expr *ExistingInit = 0;
+ if (!StructuredList)
+ ExistingInit = SyntacticToSemantic.lookup(IList);
+ else if (StructuredIndex < StructuredList->getNumInits())
+ ExistingInit = StructuredList->getInit(StructuredIndex);
+
+ if (InitListExpr *Result = dyn_cast_or_null<InitListExpr>(ExistingInit))
+ return Result;
+
+ if (ExistingInit) {
+ // We are creating an initializer list that initializes the
+ // subobjects of the current object, but there was already an
+ // initialization that completely initialized the current
+ // subobject, e.g., by a compound literal:
+ //
+ // struct X { int a, b; };
+ // struct X xs[] = { [0] = (struct X) { 1, 2 }, [0].b = 3 };
+ //
+ // Here, xs[0].a == 0 and xs[0].b == 3, since the second,
+ // designated initializer re-initializes the whole
+ // subobject [0], overwriting previous initializers.
+ SemaRef.Diag(InitRange.getBegin(),
+ diag::warn_subobject_initializer_overrides)
+ << InitRange;
+ SemaRef.Diag(ExistingInit->getLocStart(),
+ diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0
+ << ExistingInit->getSourceRange();
+ }
+
+ InitListExpr *Result
+ = new (SemaRef.Context) InitListExpr(SemaRef.Context,
+ InitRange.getBegin(), 0, 0,
+ InitRange.getEnd());
+
+ QualType ResultType = CurrentObjectType;
+ if (!ResultType->isArrayType())
+ ResultType = ResultType.getNonLValueExprType(SemaRef.Context);
+ Result->setType(ResultType);
+
+ // Pre-allocate storage for the structured initializer list.
+ unsigned NumElements = 0;
+ unsigned NumInits = 0;
+ bool GotNumInits = false;
+ if (!StructuredList) {
+ NumInits = IList->getNumInits();
+ GotNumInits = true;
+ } else if (Index < IList->getNumInits()) {
+ if (InitListExpr *SubList = dyn_cast<InitListExpr>(IList->getInit(Index))) {
+ NumInits = SubList->getNumInits();
+ GotNumInits = true;
+ }
+ }
+
+ if (const ArrayType *AType
+ = SemaRef.Context.getAsArrayType(CurrentObjectType)) {
+ if (const ConstantArrayType *CAType = dyn_cast<ConstantArrayType>(AType)) {
+ NumElements = CAType->getSize().getZExtValue();
+ // Simple heuristic so that we don't allocate a very large
+ // initializer with many empty entries at the end.
+ if (GotNumInits && NumElements > NumInits)
+ NumElements = 0;
+ }
+ } else if (const VectorType *VType = CurrentObjectType->getAs<VectorType>())
+ NumElements = VType->getNumElements();
+ else if (const RecordType *RType = CurrentObjectType->getAs<RecordType>()) {
+ RecordDecl *RDecl = RType->getDecl();
+ if (RDecl->isUnion())
+ NumElements = 1;
+ else
+ NumElements = std::distance(RDecl->field_begin(),
+ RDecl->field_end());
+ }
+
+ Result->reserveInits(SemaRef.Context, NumElements);
+
+ // Link this new initializer list into the structured initializer
+ // lists.
+ if (StructuredList)
+ StructuredList->updateInit(SemaRef.Context, StructuredIndex, Result);
+ else {
+ Result->setSyntacticForm(IList);
+ SyntacticToSemantic[IList] = Result;
+ }
+
+ return Result;
+}
+
+/// Update the initializer at index @p StructuredIndex within the
+/// structured initializer list to the value @p expr.
+void InitListChecker::UpdateStructuredListElement(InitListExpr *StructuredList,
+ unsigned &StructuredIndex,
+ Expr *expr) {
+ // No structured initializer list to update
+ if (!StructuredList)
+ return;
+
+ if (Expr *PrevInit = StructuredList->updateInit(SemaRef.Context,
+ StructuredIndex, expr)) {
+ // This initializer overwrites a previous initializer. Warn.
+ SemaRef.Diag(expr->getLocStart(),
+ diag::warn_initializer_overrides)
+ << expr->getSourceRange();
+ SemaRef.Diag(PrevInit->getLocStart(),
+ diag::note_previous_initializer)
+ << /*FIXME:has side effects=*/0
+ << PrevInit->getSourceRange();
+ }
+
+ ++StructuredIndex;
+}
+
+/// Check that the given Index expression is a valid array designator
+/// value. This is essentially just a wrapper around
+/// VerifyIntegerConstantExpression that also checks for negative values
+/// and produces a reasonable diagnostic if there is a
+/// failure. Returns the index expression, possibly with an implicit cast
+/// added, on success. If everything went okay, Value will receive the
+/// value of the constant expression.
+static ExprResult
+CheckArrayDesignatorExpr(Sema &S, Expr *Index, llvm::APSInt &Value) {
+ SourceLocation Loc = Index->getLocStart();
+
+ // Make sure this is an integer constant expression.
+ ExprResult Result = S.VerifyIntegerConstantExpression(Index, &Value);
+ if (Result.isInvalid())
+ return Result;
+
+ if (Value.isSigned() && Value.isNegative())
+ return S.Diag(Loc, diag::err_array_designator_negative)
+ << Value.toString(10) << Index->getSourceRange();
+
+ Value.setIsUnsigned(true);
+ return Result;
+}
+
+ExprResult Sema::ActOnDesignatedInitializer(Designation &Desig,
+ SourceLocation Loc,
+ bool GNUSyntax,
+ ExprResult Init) {
+ typedef DesignatedInitExpr::Designator ASTDesignator;
+
+ bool Invalid = false;
+ SmallVector<ASTDesignator, 32> Designators;
+ SmallVector<Expr *, 32> InitExpressions;
+
+ // Build designators and check array designator expressions.
+ for (unsigned Idx = 0; Idx < Desig.getNumDesignators(); ++Idx) {
+ const Designator &D = Desig.getDesignator(Idx);
+ switch (D.getKind()) {
+ case Designator::FieldDesignator:
+ Designators.push_back(ASTDesignator(D.getField(), D.getDotLoc(),
+ D.getFieldLoc()));
+ break;
+
+ case Designator::ArrayDesignator: {
+ Expr *Index = static_cast<Expr *>(D.getArrayIndex());
+ llvm::APSInt IndexValue;
+ if (!Index->isTypeDependent() && !Index->isValueDependent())
+ Index = CheckArrayDesignatorExpr(*this, Index, IndexValue).take();
+ if (!Index)
+ Invalid = true;
+ else {
+ Designators.push_back(ASTDesignator(InitExpressions.size(),
+ D.getLBracketLoc(),
+ D.getRBracketLoc()));
+ InitExpressions.push_back(Index);
+ }
+ break;
+ }
+
+ case Designator::ArrayRangeDesignator: {
+ Expr *StartIndex = static_cast<Expr *>(D.getArrayRangeStart());
+ Expr *EndIndex = static_cast<Expr *>(D.getArrayRangeEnd());
+ llvm::APSInt StartValue;
+ llvm::APSInt EndValue;
+ bool StartDependent = StartIndex->isTypeDependent() ||
+ StartIndex->isValueDependent();
+ bool EndDependent = EndIndex->isTypeDependent() ||
+ EndIndex->isValueDependent();
+ if (!StartDependent)
+ StartIndex =
+ CheckArrayDesignatorExpr(*this, StartIndex, StartValue).take();
+ if (!EndDependent)
+ EndIndex = CheckArrayDesignatorExpr(*this, EndIndex, EndValue).take();
+
+ if (!StartIndex || !EndIndex)
+ Invalid = true;
+ else {
+ // Make sure we're comparing values with the same bit width.
+ if (StartDependent || EndDependent) {
+ // Nothing to compute.
+ } else if (StartValue.getBitWidth() > EndValue.getBitWidth())
+ EndValue = EndValue.extend(StartValue.getBitWidth());
+ else if (StartValue.getBitWidth() < EndValue.getBitWidth())
+ StartValue = StartValue.extend(EndValue.getBitWidth());
+
+ if (!StartDependent && !EndDependent && EndValue < StartValue) {
+ Diag(D.getEllipsisLoc(), diag::err_array_designator_empty_range)
+ << StartValue.toString(10) << EndValue.toString(10)
+ << StartIndex->getSourceRange() << EndIndex->getSourceRange();
+ Invalid = true;
+ } else {
+ Designators.push_back(ASTDesignator(InitExpressions.size(),
+ D.getLBracketLoc(),
+ D.getEllipsisLoc(),
+ D.getRBracketLoc()));
+ InitExpressions.push_back(StartIndex);
+ InitExpressions.push_back(EndIndex);
+ }
+ }
+ break;
+ }
+ }
+ }
+
+ if (Invalid || Init.isInvalid())
+ return ExprError();
+
+ // Clear out the expressions within the designation.
+ Desig.ClearExprs(*this);
+
+ DesignatedInitExpr *DIE
+ = DesignatedInitExpr::Create(Context,
+ Designators.data(), Designators.size(),
+ InitExpressions.data(), InitExpressions.size(),
+ Loc, GNUSyntax, Init.takeAs<Expr>());
+
+ if (!getLangOpts().C99)
+ Diag(DIE->getLocStart(), diag::ext_designated_init)
+ << DIE->getSourceRange();
+
+ return Owned(DIE);
+}
+
+//===----------------------------------------------------------------------===//
+// Initialization entity
+//===----------------------------------------------------------------------===//
+
+InitializedEntity::InitializedEntity(ASTContext &Context, unsigned Index,
+ const InitializedEntity &Parent)
+ : Parent(&Parent), Index(Index)
+{
+ if (const ArrayType *AT = Context.getAsArrayType(Parent.getType())) {
+ Kind = EK_ArrayElement;
+ Type = AT->getElementType();
+ } else if (const VectorType *VT = Parent.getType()->getAs<VectorType>()) {
+ Kind = EK_VectorElement;
+ Type = VT->getElementType();
+ } else {
+ const ComplexType *CT = Parent.getType()->getAs<ComplexType>();
+ assert(CT && "Unexpected type");
+ Kind = EK_ComplexElement;
+ Type = CT->getElementType();
+ }
+}
+
+InitializedEntity InitializedEntity::InitializeBase(ASTContext &Context,
+ CXXBaseSpecifier *Base,
+ bool IsInheritedVirtualBase)
+{
+ InitializedEntity Result;
+ Result.Kind = EK_Base;
+ Result.Base = reinterpret_cast<uintptr_t>(Base);
+ if (IsInheritedVirtualBase)
+ Result.Base |= 0x01;
+
+ Result.Type = Base->getType();
+ return Result;
+}
+
+DeclarationName InitializedEntity::getName() const {
+ switch (getKind()) {
+ case EK_Parameter: {
+ ParmVarDecl *D = reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
+ return (D ? D->getDeclName() : DeclarationName());
+ }
+
+ case EK_Variable:
+ case EK_Member:
+ return VariableOrMember->getDeclName();
+
+ case EK_LambdaCapture:
+ return Capture.Var->getDeclName();
+
+ case EK_Result:
+ case EK_Exception:
+ case EK_New:
+ case EK_Temporary:
+ case EK_Base:
+ case EK_Delegating:
+ case EK_ArrayElement:
+ case EK_VectorElement:
+ case EK_ComplexElement:
+ case EK_BlockElement:
+ return DeclarationName();
+ }
+
+ llvm_unreachable("Invalid EntityKind!");
+}
+
+DeclaratorDecl *InitializedEntity::getDecl() const {
+ switch (getKind()) {
+ case EK_Variable:
+ case EK_Member:
+ return VariableOrMember;
+
+ case EK_Parameter:
+ return reinterpret_cast<ParmVarDecl*>(Parameter & ~0x1);
+
+ case EK_Result:
+ case EK_Exception:
+ case EK_New:
+ case EK_Temporary:
+ case EK_Base:
+ case EK_Delegating:
+ case EK_ArrayElement:
+ case EK_VectorElement:
+ case EK_ComplexElement:
+ case EK_BlockElement:
+ case EK_LambdaCapture:
+ return 0;
+ }
+
+ llvm_unreachable("Invalid EntityKind!");
+}
+
+bool InitializedEntity::allowsNRVO() const {
+ switch (getKind()) {
+ case EK_Result:
+ case EK_Exception:
+ return LocAndNRVO.NRVO;
+
+ case EK_Variable:
+ case EK_Parameter:
+ case EK_Member:
+ case EK_New:
+ case EK_Temporary:
+ case EK_Base:
+ case EK_Delegating:
+ case EK_ArrayElement:
+ case EK_VectorElement:
+ case EK_ComplexElement:
+ case EK_BlockElement:
+ case EK_LambdaCapture:
+ break;
+ }
+
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Initialization sequence
+//===----------------------------------------------------------------------===//
+
+void InitializationSequence::Step::Destroy() {
+ switch (Kind) {
+ case SK_ResolveAddressOfOverloadedFunction:
+ case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBaseXValue:
+ case SK_CastDerivedToBaseLValue:
+ case SK_BindReference:
+ case SK_BindReferenceToTemporary:
+ case SK_ExtraneousCopyToTemporary:
+ case SK_UserConversion:
+ case SK_QualificationConversionRValue:
+ case SK_QualificationConversionXValue:
+ case SK_QualificationConversionLValue:
+ case SK_ListInitialization:
+ case SK_ListConstructorCall:
+ case SK_UnwrapInitList:
+ case SK_RewrapInitList:
+ case SK_ConstructorInitialization:
+ case SK_ZeroInitialization:
+ case SK_CAssignment:
+ case SK_StringInit:
+ case SK_ObjCObjectConversion:
+ case SK_ArrayInit:
+ case SK_ParenthesizedArrayInit:
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ case SK_ProduceObjCObject:
+ case SK_StdInitializerList:
+ break;
+
+ case SK_ConversionSequence:
+ delete ICS;
+ }
+}
+
+bool InitializationSequence::isDirectReferenceBinding() const {
+ return !Steps.empty() && Steps.back().Kind == SK_BindReference;
+}
+
+bool InitializationSequence::isAmbiguous() const {
+ if (!Failed())
+ return false;
+
+ switch (getFailureKind()) {
+ case FK_TooManyInitsForReference:
+ case FK_ArrayNeedsInitList:
+ case FK_ArrayNeedsInitListOrStringLiteral:
+ case FK_AddressOfOverloadFailed: // FIXME: Could do better
+ case FK_NonConstLValueReferenceBindingToTemporary:
+ case FK_NonConstLValueReferenceBindingToUnrelated:
+ case FK_RValueReferenceBindingToLValue:
+ case FK_ReferenceInitDropsQualifiers:
+ case FK_ReferenceInitFailed:
+ case FK_ConversionFailed:
+ case FK_ConversionFromPropertyFailed:
+ case FK_TooManyInitsForScalar:
+ case FK_ReferenceBindingToInitList:
+ case FK_InitListBadDestinationType:
+ case FK_DefaultInitOfConst:
+ case FK_Incomplete:
+ case FK_ArrayTypeMismatch:
+ case FK_NonConstantArrayInit:
+ case FK_ListInitializationFailed:
+ case FK_VariableLengthArrayHasInitializer:
+ case FK_PlaceholderType:
+ case FK_InitListElementCopyFailure:
+ case FK_ExplicitConstructor:
+ return false;
+
+ case FK_ReferenceInitOverloadFailed:
+ case FK_UserConversionOverloadFailed:
+ case FK_ConstructorOverloadFailed:
+ case FK_ListConstructorOverloadFailed:
+ return FailedOverloadResult == OR_Ambiguous;
+ }
+
+ llvm_unreachable("Invalid EntityKind!");
+}
+
+bool InitializationSequence::isConstructorInitialization() const {
+ return !Steps.empty() && Steps.back().Kind == SK_ConstructorInitialization;
+}
+
+void
+InitializationSequence
+::AddAddressOverloadResolutionStep(FunctionDecl *Function,
+ DeclAccessPair Found,
+ bool HadMultipleCandidates) {
+ Step S;
+ S.Kind = SK_ResolveAddressOfOverloadedFunction;
+ S.Type = Function->getType();
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
+ S.Function.Function = Function;
+ S.Function.FoundDecl = Found;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddDerivedToBaseCastStep(QualType BaseType,
+ ExprValueKind VK) {
+ Step S;
+ switch (VK) {
+ case VK_RValue: S.Kind = SK_CastDerivedToBaseRValue; break;
+ case VK_XValue: S.Kind = SK_CastDerivedToBaseXValue; break;
+ case VK_LValue: S.Kind = SK_CastDerivedToBaseLValue; break;
+ }
+ S.Type = BaseType;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddReferenceBindingStep(QualType T,
+ bool BindingTemporary) {
+ Step S;
+ S.Kind = BindingTemporary? SK_BindReferenceToTemporary : SK_BindReference;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddExtraneousCopyToTemporary(QualType T) {
+ Step S;
+ S.Kind = SK_ExtraneousCopyToTemporary;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void
+InitializationSequence::AddUserConversionStep(FunctionDecl *Function,
+ DeclAccessPair FoundDecl,
+ QualType T,
+ bool HadMultipleCandidates) {
+ Step S;
+ S.Kind = SK_UserConversion;
+ S.Type = T;
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
+ S.Function.Function = Function;
+ S.Function.FoundDecl = FoundDecl;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddQualificationConversionStep(QualType Ty,
+ ExprValueKind VK) {
+ Step S;
+ S.Kind = SK_QualificationConversionRValue; // work around a gcc warning
+ switch (VK) {
+ case VK_RValue:
+ S.Kind = SK_QualificationConversionRValue;
+ break;
+ case VK_XValue:
+ S.Kind = SK_QualificationConversionXValue;
+ break;
+ case VK_LValue:
+ S.Kind = SK_QualificationConversionLValue;
+ break;
+ }
+ S.Type = Ty;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddConversionSequenceStep(
+ const ImplicitConversionSequence &ICS,
+ QualType T) {
+ Step S;
+ S.Kind = SK_ConversionSequence;
+ S.Type = T;
+ S.ICS = new ImplicitConversionSequence(ICS);
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddListInitializationStep(QualType T) {
+ Step S;
+ S.Kind = SK_ListInitialization;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void
+InitializationSequence
+::AddConstructorInitializationStep(CXXConstructorDecl *Constructor,
+ AccessSpecifier Access,
+ QualType T,
+ bool HadMultipleCandidates,
+ bool FromInitList, bool AsInitList) {
+ Step S;
+ S.Kind = FromInitList && !AsInitList ? SK_ListConstructorCall
+ : SK_ConstructorInitialization;
+ S.Type = T;
+ S.Function.HadMultipleCandidates = HadMultipleCandidates;
+ S.Function.Function = Constructor;
+ S.Function.FoundDecl = DeclAccessPair::make(Constructor, Access);
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddZeroInitializationStep(QualType T) {
+ Step S;
+ S.Kind = SK_ZeroInitialization;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddCAssignmentStep(QualType T) {
+ Step S;
+ S.Kind = SK_CAssignment;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddStringInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_StringInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddObjCObjectConversionStep(QualType T) {
+ Step S;
+ S.Kind = SK_ObjCObjectConversion;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddArrayInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_ArrayInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddParenthesizedArrayInitStep(QualType T) {
+ Step S;
+ S.Kind = SK_ParenthesizedArrayInit;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddPassByIndirectCopyRestoreStep(QualType type,
+ bool shouldCopy) {
+ Step s;
+ s.Kind = (shouldCopy ? SK_PassByIndirectCopyRestore
+ : SK_PassByIndirectRestore);
+ s.Type = type;
+ Steps.push_back(s);
+}
+
+void InitializationSequence::AddProduceObjCObjectStep(QualType T) {
+ Step S;
+ S.Kind = SK_ProduceObjCObject;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::AddStdInitializerListConstructionStep(QualType T) {
+ Step S;
+ S.Kind = SK_StdInitializerList;
+ S.Type = T;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::RewrapReferenceInitList(QualType T,
+ InitListExpr *Syntactic) {
+ assert(Syntactic->getNumInits() == 1 &&
+ "Can only rewrap trivial init lists.");
+ Step S;
+ S.Kind = SK_UnwrapInitList;
+ S.Type = Syntactic->getInit(0)->getType();
+ Steps.insert(Steps.begin(), S);
+
+ S.Kind = SK_RewrapInitList;
+ S.Type = T;
+ S.WrappingSyntacticList = Syntactic;
+ Steps.push_back(S);
+}
+
+void InitializationSequence::SetOverloadFailure(FailureKind Failure,
+ OverloadingResult Result) {
+ setSequenceKind(FailedSequence);
+ this->Failure = Failure;
+ this->FailedOverloadResult = Result;
+}
+
+//===----------------------------------------------------------------------===//
+// Attempt initialization
+//===----------------------------------------------------------------------===//
+
+static void MaybeProduceObjCObject(Sema &S,
+ InitializationSequence &Sequence,
+ const InitializedEntity &Entity) {
+ if (!S.getLangOpts().ObjCAutoRefCount) return;
+
+ /// When initializing a parameter, produce the value if it's marked
+ /// __attribute__((ns_consumed)).
+ if (Entity.getKind() == InitializedEntity::EK_Parameter) {
+ if (!Entity.isParameterConsumed())
+ return;
+
+ assert(Entity.getType()->isObjCRetainableType() &&
+ "consuming an object of unretainable type?");
+ Sequence.AddProduceObjCObjectStep(Entity.getType());
+
+ /// When initializing a return value, if the return type is a
+ /// retainable type, then returns need to immediately retain the
+ /// object. If an autorelease is required, it will be done at the
+ /// last instant.
+ } else if (Entity.getKind() == InitializedEntity::EK_Result) {
+ if (!Entity.getType()->isObjCRetainableType())
+ return;
+
+ Sequence.AddProduceObjCObjectStep(Entity.getType());
+ }
+}
+
+/// \brief When initializing from init list via constructor, deal with the
+/// empty init list and std::initializer_list special cases.
+///
+/// \return True if this was a special case, false otherwise.
+static bool TryListConstructionSpecialCases(Sema &S,
+ InitListExpr *List,
+ CXXRecordDecl *DestRecordDecl,
+ QualType DestType,
+ InitializationSequence &Sequence) {
+ // C++11 [dcl.init.list]p3:
+ // List-initialization of an object or reference of type T is defined as
+ // follows:
+ // - If T is an aggregate, aggregate initialization is performed.
+ if (DestType->isAggregateType())
+ return false;
+
+ // - Otherwise, if the initializer list has no elements and T is a class
+ // type with a default constructor, the object is value-initialized.
+ if (List->getNumInits() == 0) {
+ if (CXXConstructorDecl *DefaultConstructor =
+ S.LookupDefaultConstructor(DestRecordDecl)) {
+ if (DefaultConstructor->isDeleted() ||
+ S.isFunctionConsideredUnavailable(DefaultConstructor)) {
+ // Fake an overload resolution failure.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+ DeclAccessPair FoundDecl = DeclAccessPair::make(DefaultConstructor,
+ DefaultConstructor->getAccess());
+ if (FunctionTemplateDecl *ConstructorTmpl =
+ dyn_cast<FunctionTemplateDecl>(DefaultConstructor))
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ ArrayRef<Expr*>(), CandidateSet,
+ /*SuppressUserConversions*/ false);
+ else
+ S.AddOverloadCandidate(DefaultConstructor, FoundDecl,
+ ArrayRef<Expr*>(), CandidateSet,
+ /*SuppressUserConversions*/ false);
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ListConstructorOverloadFailed,
+ OR_Deleted);
+ } else
+ Sequence.AddConstructorInitializationStep(DefaultConstructor,
+ DefaultConstructor->getAccess(),
+ DestType,
+ /*MultipleCandidates=*/false,
+ /*FromInitList=*/true,
+ /*AsInitList=*/false);
+ return true;
+ }
+ }
+
+ // - Otherwise, if T is a specialization of std::initializer_list, [...]
+ QualType E;
+ if (S.isStdInitializerList(DestType, &E)) {
+ // Check that each individual element can be copy-constructed. But since we
+ // have no place to store further information, we'll recalculate everything
+ // later.
+ InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
+ S.Context.getConstantArrayType(E,
+ llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ List->getNumInits()),
+ ArrayType::Normal, 0));
+ InitializedEntity Element = InitializedEntity::InitializeElement(S.Context,
+ 0, HiddenArray);
+ for (unsigned i = 0, n = List->getNumInits(); i < n; ++i) {
+ Element.setElementIndex(i);
+ if (!S.CanPerformCopyInitialization(Element, List->getInit(i))) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_InitListElementCopyFailure);
+ return true;
+ }
+ }
+ Sequence.AddStdInitializerListConstructionStep(DestType);
+ return true;
+ }
+
+ // Not a special case.
+ return false;
+}
+
+static OverloadingResult
+ResolveConstructorOverload(Sema &S, SourceLocation DeclLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet &CandidateSet,
+ DeclContext::lookup_iterator Con,
+ DeclContext::lookup_iterator ConEnd,
+ OverloadCandidateSet::iterator &Best,
+ bool CopyInitializing, bool AllowExplicit,
+ bool OnlyListConstructors, bool InitListSyntax) {
+ CandidateSet.clear();
+
+ for (; Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+ bool SuppressUserConversions = false;
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = 0;
+ FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ else {
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ // If we're performing copy initialization using a copy constructor, we
+ // suppress user-defined conversions on the arguments. We do the same for
+ // move constructors.
+ if ((CopyInitializing || (InitListSyntax && NumArgs == 1)) &&
+ Constructor->isCopyOrMoveConstructor())
+ SuppressUserConversions = true;
+ }
+
+ if (!Constructor->isInvalidDecl() &&
+ (AllowExplicit || !Constructor->isExplicit()) &&
+ (!OnlyListConstructors || S.isInitListConstructor(Constructor))) {
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
+ else {
+ // C++ [over.match.copy]p1:
+ // - When initializing a temporary to be bound to the first parameter
+ // of a constructor that takes a reference to possibly cv-qualified
+ // T as its first argument, called with a single argument in the
+ // context of direct-initialization, explicit conversion functions
+ // are also considered.
+ bool AllowExplicitConv = AllowExplicit && !CopyInitializing &&
+ NumArgs == 1 &&
+ Constructor->isCopyOrMoveConstructor();
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ llvm::makeArrayRef(Args, NumArgs), CandidateSet,
+ SuppressUserConversions,
+ /*PartialOverloading=*/false,
+ /*AllowExplicit=*/AllowExplicitConv);
+ }
+ }
+ }
+
+ // Perform overload resolution and return the result.
+ return CandidateSet.BestViableFunction(S, DeclLoc, Best);
+}
+
+/// \brief Attempt initialization by constructor (C++ [dcl.init]), which
+/// enumerates the constructors of the initialized entity and performs overload
+/// resolution to select the best.
+/// If InitListSyntax is true, this is list-initialization of a non-aggregate
+/// class type.
+static void TryConstructorInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr **Args, unsigned NumArgs,
+ QualType DestType,
+ InitializationSequence &Sequence,
+ bool InitListSyntax = false) {
+ assert((!InitListSyntax || (NumArgs == 1 && isa<InitListExpr>(Args[0]))) &&
+ "InitListSyntax must come with a single initializer list argument.");
+
+ // Check constructor arguments for self reference.
+ if (DeclaratorDecl *DD = Entity.getDecl())
+ // Parameters arguments are occassionially constructed with itself,
+ // for instance, in recursive functions. Skip them.
+ if (!isa<ParmVarDecl>(DD))
+ for (unsigned i = 0; i < NumArgs; ++i)
+ S.CheckSelfReference(DD, Args[i]);
+
+ // The type we're constructing needs to be complete.
+ if (S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
+ Sequence.setIncompleteTypeFailure(DestType);
+ return;
+ }
+
+ const RecordType *DestRecordType = DestType->getAs<RecordType>();
+ assert(DestRecordType && "Constructor initialization requires record type");
+ CXXRecordDecl *DestRecordDecl
+ = cast<CXXRecordDecl>(DestRecordType->getDecl());
+
+ if (InitListSyntax &&
+ TryListConstructionSpecialCases(S, cast<InitListExpr>(Args[0]),
+ DestRecordDecl, DestType, Sequence))
+ return;
+
+ // Build the candidate set directly in the initialization sequence
+ // structure, so that it will persist if we fail.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+
+ // Determine whether we are allowed to call explicit constructors or
+ // explicit conversion operators.
+ bool AllowExplicit = Kind.AllowExplicit() || InitListSyntax;
+ bool CopyInitialization = Kind.getKind() == InitializationKind::IK_Copy;
+
+ // - Otherwise, if T is a class type, constructors are considered. The
+ // applicable constructors are enumerated, and the best one is chosen
+ // through overload resolution.
+ DeclContext::lookup_iterator ConStart, ConEnd;
+ llvm::tie(ConStart, ConEnd) = S.LookupConstructors(DestRecordDecl);
+
+ OverloadingResult Result = OR_No_Viable_Function;
+ OverloadCandidateSet::iterator Best;
+ bool AsInitializerList = false;
+
+ // C++11 [over.match.list]p1:
+ // When objects of non-aggregate type T are list-initialized, overload
+ // resolution selects the constructor in two phases:
+ // - Initially, the candidate functions are the initializer-list
+ // constructors of the class T and the argument list consists of the
+ // initializer list as a single argument.
+ if (InitListSyntax) {
+ AsInitializerList = true;
+ Result = ResolveConstructorOverload(S, Kind.getLocation(), Args, NumArgs,
+ CandidateSet, ConStart, ConEnd, Best,
+ CopyInitialization, AllowExplicit,
+ /*OnlyListConstructor=*/true,
+ InitListSyntax);
+
+ // Time to unwrap the init list.
+ InitListExpr *ILE = cast<InitListExpr>(Args[0]);
+ Args = ILE->getInits();
+ NumArgs = ILE->getNumInits();
+ }
+
+ // C++11 [over.match.list]p1:
+ // - If no viable initializer-list constructor is found, overload resolution
+ // is performed again, where the candidate functions are all the
+ // constructors of the class T nad the argument list consists of the
+ // elements of the initializer list.
+ if (Result == OR_No_Viable_Function) {
+ AsInitializerList = false;
+ Result = ResolveConstructorOverload(S, Kind.getLocation(), Args, NumArgs,
+ CandidateSet, ConStart, ConEnd, Best,
+ CopyInitialization, AllowExplicit,
+ /*OnlyListConstructors=*/false,
+ InitListSyntax);
+ }
+ if (Result) {
+ Sequence.SetOverloadFailure(InitListSyntax ?
+ InitializationSequence::FK_ListConstructorOverloadFailed :
+ InitializationSequence::FK_ConstructorOverloadFailed,
+ Result);
+ return;
+ }
+
+ // C++0x [dcl.init]p6:
+ // If a program calls for the default initialization of an object
+ // of a const-qualified type T, T shall be a class type with a
+ // user-provided default constructor.
+ if (Kind.getKind() == InitializationKind::IK_Default &&
+ Entity.getType().isConstQualified() &&
+ cast<CXXConstructorDecl>(Best->Function)->isImplicit()) {
+ Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
+ return;
+ }
+
+ // C++11 [over.match.list]p1:
+ // In copy-list-initialization, if an explicit constructor is chosen, the
+ // initializer is ill-formed.
+ CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function);
+ if (InitListSyntax && !Kind.AllowExplicit() && CtorDecl->isExplicit()) {
+ Sequence.SetFailed(InitializationSequence::FK_ExplicitConstructor);
+ return;
+ }
+
+ // Add the constructor initialization step. Any cv-qualification conversion is
+ // subsumed by the initialization.
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+ Sequence.AddConstructorInitializationStep(CtorDecl,
+ Best->FoundDecl.getAccess(),
+ DestType, HadMultipleCandidates,
+ InitListSyntax, AsInitializerList);
+}
+
+static bool
+ResolveOverloadedFunctionForReferenceBinding(Sema &S,
+ Expr *Initializer,
+ QualType &SourceType,
+ QualType &UnqualifiedSourceType,
+ QualType UnqualifiedTargetType,
+ InitializationSequence &Sequence) {
+ if (S.Context.getCanonicalType(UnqualifiedSourceType) ==
+ S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ bool HadMultipleCandidates = false;
+ if (FunctionDecl *Fn
+ = S.ResolveAddressOfOverloadedFunction(Initializer,
+ UnqualifiedTargetType,
+ false, Found,
+ &HadMultipleCandidates)) {
+ Sequence.AddAddressOverloadResolutionStep(Fn, Found,
+ HadMultipleCandidates);
+ SourceType = Fn->getType();
+ UnqualifiedSourceType = SourceType.getUnqualifiedType();
+ } else if (!UnqualifiedTargetType->isRecordType()) {
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ return true;
+ }
+ }
+ return false;
+}
+
+static void TryReferenceInitializationCore(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ QualType cv1T1, QualType T1,
+ Qualifiers T1Quals,
+ QualType cv2T2, QualType T2,
+ Qualifiers T2Quals,
+ InitializationSequence &Sequence);
+
+static void TryListInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitListExpr *InitList,
+ InitializationSequence &Sequence);
+
+/// \brief Attempt list initialization of a reference.
+static void TryReferenceListInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitListExpr *InitList,
+ InitializationSequence &Sequence)
+{
+ // First, catch C++03 where this isn't possible.
+ if (!S.getLangOpts().CPlusPlus0x) {
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceBindingToInitList);
+ return;
+ }
+
+ QualType DestType = Entity.getType();
+ QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
+ Qualifiers T1Quals;
+ QualType T1 = S.Context.getUnqualifiedArrayType(cv1T1, T1Quals);
+
+ // Reference initialization via an initializer list works thus:
+ // If the initializer list consists of a single element that is
+ // reference-related to the referenced type, bind directly to that element
+ // (possibly creating temporaries).
+ // Otherwise, initialize a temporary with the initializer list and
+ // bind to that.
+ if (InitList->getNumInits() == 1) {
+ Expr *Initializer = InitList->getInit(0);
+ QualType cv2T2 = Initializer->getType();
+ Qualifiers T2Quals;
+ QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
+
+ // If this fails, creating a temporary wouldn't work either.
+ if (ResolveOverloadedFunctionForReferenceBinding(S, Initializer, cv2T2, T2,
+ T1, Sequence))
+ return;
+
+ SourceLocation DeclLoc = Initializer->getLocStart();
+ bool dummy1, dummy2, dummy3;
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, dummy1,
+ dummy2, dummy3);
+ if (RefRelationship >= Sema::Ref_Related) {
+ // Try to bind the reference here.
+ TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1,
+ T1Quals, cv2T2, T2, T2Quals, Sequence);
+ if (Sequence)
+ Sequence.RewrapReferenceInitList(cv1T1, InitList);
+ return;
+ }
+ }
+
+ // Not reference-related. Create a temporary and bind to that.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
+
+ TryListInitialization(S, TempEntity, Kind, InitList, Sequence);
+ if (Sequence) {
+ if (DestType->isRValueReferenceType() ||
+ (T1Quals.hasConst() && !T1Quals.hasVolatile()))
+ Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
+ else
+ Sequence.SetFailed(
+ InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
+ }
+}
+
+/// \brief Attempt list initialization (C++0x [dcl.init.list])
+static void TryListInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitListExpr *InitList,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+
+ // C++ doesn't allow scalar initialization with more than one argument.
+ // But C99 complex numbers are scalars and it makes sense there.
+ if (S.getLangOpts().CPlusPlus && DestType->isScalarType() &&
+ !DestType->isAnyComplexType() && InitList->getNumInits() > 1) {
+ Sequence.SetFailed(InitializationSequence::FK_TooManyInitsForScalar);
+ return;
+ }
+ if (DestType->isReferenceType()) {
+ TryReferenceListInitialization(S, Entity, Kind, InitList, Sequence);
+ return;
+ }
+ if (DestType->isRecordType()) {
+ if (S.RequireCompleteType(InitList->getLocStart(), DestType, S.PDiag())) {
+ Sequence.setIncompleteTypeFailure(DestType);
+ return;
+ }
+
+ if (!DestType->isAggregateType()) {
+ if (S.getLangOpts().CPlusPlus0x) {
+ Expr *Arg = InitList;
+ // A direct-initializer is not list-syntax, i.e. there's no special
+ // treatment of "A a({1, 2});".
+ TryConstructorInitialization(S, Entity, Kind, &Arg, 1, DestType,
+ Sequence,
+ Kind.getKind() != InitializationKind::IK_Direct);
+ } else
+ Sequence.SetFailed(
+ InitializationSequence::FK_InitListBadDestinationType);
+ return;
+ }
+ }
+
+ InitListChecker CheckInitList(S, Entity, InitList,
+ DestType, /*VerifyOnly=*/true,
+ Kind.getKind() != InitializationKind::IK_DirectList ||
+ !S.getLangOpts().CPlusPlus0x);
+ if (CheckInitList.HadError()) {
+ Sequence.SetFailed(InitializationSequence::FK_ListInitializationFailed);
+ return;
+ }
+
+ // Add the list initialization step with the built init list.
+ Sequence.AddListInitializationStep(DestType);
+}
+
+/// \brief Try a reference initialization that involves calling a conversion
+/// function.
+static OverloadingResult TryRefInitWithConversionFunction(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ bool AllowRValues,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+ QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
+ QualType T1 = cv1T1.getUnqualifiedType();
+ QualType cv2T2 = Initializer->getType();
+ QualType T2 = cv2T2.getUnqualifiedType();
+
+ bool DerivedToBase;
+ bool ObjCConversion;
+ bool ObjCLifetimeConversion;
+ assert(!S.CompareReferenceRelationship(Initializer->getLocStart(),
+ T1, T2, DerivedToBase,
+ ObjCConversion,
+ ObjCLifetimeConversion) &&
+ "Must have incompatible references when binding via conversion");
+ (void)DerivedToBase;
+ (void)ObjCConversion;
+ (void)ObjCLifetimeConversion;
+
+ // Build the candidate set directly in the initialization sequence
+ // structure, so that it will persist if we fail.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+ CandidateSet.clear();
+
+ // Determine whether we are allowed to call explicit constructors or
+ // explicit conversion operators.
+ bool AllowExplicit = Kind.AllowExplicit();
+ bool AllowExplicitConvs = Kind.allowExplicitConversionFunctions();
+
+ const RecordType *T1RecordType = 0;
+ if (AllowRValues && (T1RecordType = T1->getAs<RecordType>()) &&
+ !S.RequireCompleteType(Kind.getLocation(), T1, 0)) {
+ // The type we're converting to is a class type. Enumerate its constructors
+ // to see if there is a suitable conversion.
+ CXXRecordDecl *T1RecordDecl = cast<CXXRecordDecl>(T1RecordType->getDecl());
+
+ DeclContext::lookup_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(T1RecordDecl);
+ Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = 0;
+ FunctionTemplateDecl *ConstructorTmpl = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ if (!Constructor->isInvalidDecl() &&
+ Constructor->isConvertingConstructor(AllowExplicit)) {
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true);
+ else
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true);
+ }
+ }
+ }
+ if (T1RecordType && T1RecordType->getDecl()->isInvalidDecl())
+ return OR_No_Viable_Function;
+
+ const RecordType *T2RecordType = 0;
+ if ((T2RecordType = T2->getAs<RecordType>()) &&
+ !S.RequireCompleteType(Kind.getLocation(), T2, 0)) {
+ // The type we're converting from is a class type, enumerate its conversion
+ // functions.
+ CXXRecordDecl *T2RecordDecl = cast<CXXRecordDecl>(T2RecordType->getDecl());
+
+ const UnresolvedSetImpl *Conversions
+ = T2RecordDecl->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::const_iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
+ CXXConversionDecl *Conv;
+ if (ConvTemplate)
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ // If the conversion function doesn't return a reference type,
+ // it can't be considered for this conversion unless we're allowed to
+ // consider rvalues.
+ // FIXME: Do we need to make sure that we only consider conversion
+ // candidates with reference-compatible results? That might be needed to
+ // break recursion.
+ if ((AllowExplicitConvs || !Conv->isExplicit()) &&
+ (AllowRValues || Conv->getConversionType()->isLValueReferenceType())){
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
+ ActingDC, Initializer,
+ DestType, CandidateSet);
+ else
+ S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
+ Initializer, DestType, CandidateSet);
+ }
+ }
+ }
+ if (T2RecordType && T2RecordType->getDecl()->isInvalidDecl())
+ return OR_No_Viable_Function;
+
+ SourceLocation DeclLoc = Initializer->getLocStart();
+
+ // Perform overload resolution. If it fails, return the failed result.
+ OverloadCandidateSet::iterator Best;
+ if (OverloadingResult Result
+ = CandidateSet.BestViableFunction(S, DeclLoc, Best, true))
+ return Result;
+
+ FunctionDecl *Function = Best->Function;
+
+ // This is the overload that will actually be used for the initialization, so
+ // mark it as used.
+ S.MarkFunctionReferenced(DeclLoc, Function);
+
+ // Compute the returned type of the conversion.
+ if (isa<CXXConversionDecl>(Function))
+ T2 = Function->getResultType();
+ else
+ T2 = cv1T1;
+
+ // Add the user-defined conversion step.
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl,
+ T2.getNonLValueExprType(S.Context),
+ HadMultipleCandidates);
+
+ // Determine whether we need to perform derived-to-base or
+ // cv-qualification adjustments.
+ ExprValueKind VK = VK_RValue;
+ if (T2->isLValueReferenceType())
+ VK = VK_LValue;
+ else if (const RValueReferenceType *RRef = T2->getAs<RValueReferenceType>())
+ VK = RRef->getPointeeType()->isFunctionType() ? VK_LValue : VK_XValue;
+
+ bool NewDerivedToBase = false;
+ bool NewObjCConversion = false;
+ bool NewObjCLifetimeConversion = false;
+ Sema::ReferenceCompareResult NewRefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, T1,
+ T2.getNonLValueExprType(S.Context),
+ NewDerivedToBase, NewObjCConversion,
+ NewObjCLifetimeConversion);
+ if (NewRefRelationship == Sema::Ref_Incompatible) {
+ // If the type we've converted to is not reference-related to the
+ // type we're looking for, then there is another conversion step
+ // we need to perform to produce a temporary of the right type
+ // that we'll be binding to.
+ ImplicitConversionSequence ICS;
+ ICS.setStandard();
+ ICS.Standard = Best->FinalConversion;
+ T2 = ICS.Standard.getToType(2);
+ Sequence.AddConversionSequenceStep(ICS, T2);
+ } else if (NewDerivedToBase)
+ Sequence.AddDerivedToBaseCastStep(
+ S.Context.getQualifiedType(T1,
+ T2.getNonReferenceType().getQualifiers()),
+ VK);
+ else if (NewObjCConversion)
+ Sequence.AddObjCObjectConversionStep(
+ S.Context.getQualifiedType(T1,
+ T2.getNonReferenceType().getQualifiers()));
+
+ if (cv1T1.getQualifiers() != T2.getNonReferenceType().getQualifiers())
+ Sequence.AddQualificationConversionStep(cv1T1, VK);
+
+ Sequence.AddReferenceBindingStep(cv1T1, !T2->isReferenceType());
+ return OR_Success;
+}
+
+static void CheckCXX98CompatAccessibleCopy(Sema &S,
+ const InitializedEntity &Entity,
+ Expr *CurInitExpr);
+
+/// \brief Attempt reference initialization (C++0x [dcl.init.ref])
+static void TryReferenceInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+ QualType cv1T1 = DestType->getAs<ReferenceType>()->getPointeeType();
+ Qualifiers T1Quals;
+ QualType T1 = S.Context.getUnqualifiedArrayType(cv1T1, T1Quals);
+ QualType cv2T2 = Initializer->getType();
+ Qualifiers T2Quals;
+ QualType T2 = S.Context.getUnqualifiedArrayType(cv2T2, T2Quals);
+
+ // If the initializer is the address of an overloaded function, try
+ // to resolve the overloaded function. If all goes well, T2 is the
+ // type of the resulting function.
+ if (ResolveOverloadedFunctionForReferenceBinding(S, Initializer, cv2T2, T2,
+ T1, Sequence))
+ return;
+
+ // Delegate everything else to a subfunction.
+ TryReferenceInitializationCore(S, Entity, Kind, Initializer, cv1T1, T1,
+ T1Quals, cv2T2, T2, T2Quals, Sequence);
+}
+
+/// \brief Reference initialization without resolving overloaded functions.
+static void TryReferenceInitializationCore(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ QualType cv1T1, QualType T1,
+ Qualifiers T1Quals,
+ QualType cv2T2, QualType T2,
+ Qualifiers T2Quals,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+ SourceLocation DeclLoc = Initializer->getLocStart();
+ // Compute some basic properties of the types and the initializer.
+ bool isLValueRef = DestType->isLValueReferenceType();
+ bool isRValueRef = !isLValueRef;
+ bool DerivedToBase = false;
+ bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
+ Expr::Classification InitCategory = Initializer->Classify(S.Context);
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, cv1T1, cv2T2, DerivedToBase,
+ ObjCConversion, ObjCLifetimeConversion);
+
+ // C++0x [dcl.init.ref]p5:
+ // A reference to type "cv1 T1" is initialized by an expression of type
+ // "cv2 T2" as follows:
+ //
+ // - If the reference is an lvalue reference and the initializer
+ // expression
+ // Note the analogous bullet points for rvlaue refs to functions. Because
+ // there are no function rvalues in C++, rvalue refs to functions are treated
+ // like lvalue refs.
+ OverloadingResult ConvOvlResult = OR_Success;
+ bool T1Function = T1->isFunctionType();
+ if (isLValueRef || T1Function) {
+ if (InitCategory.isLValue() &&
+ (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification ||
+ (Kind.isCStyleOrFunctionalCast() &&
+ RefRelationship == Sema::Ref_Related))) {
+ // - is an lvalue (but is not a bit-field), and "cv1 T1" is
+ // reference-compatible with "cv2 T2," or
+ //
+ // Per C++ [over.best.ics]p2, we don't diagnose whether the lvalue is a
+ // bit-field when we're determining whether the reference initialization
+ // can occur. However, we do pay attention to whether it is a bit-field
+ // to decide whether we're actually binding to a temporary created from
+ // the bit-field.
+ if (DerivedToBase)
+ Sequence.AddDerivedToBaseCastStep(
+ S.Context.getQualifiedType(T1, T2Quals),
+ VK_LValue);
+ else if (ObjCConversion)
+ Sequence.AddObjCObjectConversionStep(
+ S.Context.getQualifiedType(T1, T2Quals));
+
+ if (T1Quals != T2Quals)
+ Sequence.AddQualificationConversionStep(cv1T1, VK_LValue);
+ bool BindingTemporary = T1Quals.hasConst() && !T1Quals.hasVolatile() &&
+ (Initializer->getBitField() || Initializer->refersToVectorElement());
+ Sequence.AddReferenceBindingStep(cv1T1, BindingTemporary);
+ return;
+ }
+
+ // - has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to an
+ // lvalue of type "cv3 T3," where "cv1 T1" is reference-compatible
+ // with "cv3 T3" (this conversion is selected by enumerating the
+ // applicable conversion functions (13.3.1.6) and choosing the best
+ // one through overload resolution (13.3)),
+ // If we have an rvalue ref to function type here, the rhs must be
+ // an rvalue.
+ if (RefRelationship == Sema::Ref_Incompatible && T2->isRecordType() &&
+ (isLValueRef || InitCategory.isRValue())) {
+ ConvOvlResult = TryRefInitWithConversionFunction(S, Entity, Kind,
+ Initializer,
+ /*AllowRValues=*/isRValueRef,
+ Sequence);
+ if (ConvOvlResult == OR_Success)
+ return;
+ if (ConvOvlResult != OR_No_Viable_Function) {
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+ }
+ }
+ }
+
+ // - Otherwise, the reference shall be an lvalue reference to a
+ // non-volatile const type (i.e., cv1 shall be const), or the reference
+ // shall be an rvalue reference.
+ if (isLValueRef && !(T1Quals.hasConst() && !T1Quals.hasVolatile())) {
+ if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy)
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty())
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+ else
+ Sequence.SetFailed(InitCategory.isLValue()
+ ? (RefRelationship == Sema::Ref_Related
+ ? InitializationSequence::FK_ReferenceInitDropsQualifiers
+ : InitializationSequence::FK_NonConstLValueReferenceBindingToUnrelated)
+ : InitializationSequence::FK_NonConstLValueReferenceBindingToTemporary);
+
+ return;
+ }
+
+ // - If the initializer expression
+ // - is an xvalue, class prvalue, array prvalue, or function lvalue and
+ // "cv1 T1" is reference-compatible with "cv2 T2"
+ // Note: functions are handled below.
+ if (!T1Function &&
+ (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification ||
+ (Kind.isCStyleOrFunctionalCast() &&
+ RefRelationship == Sema::Ref_Related)) &&
+ (InitCategory.isXValue() ||
+ (InitCategory.isPRValue() && T2->isRecordType()) ||
+ (InitCategory.isPRValue() && T2->isArrayType()))) {
+ ExprValueKind ValueKind = InitCategory.isXValue()? VK_XValue : VK_RValue;
+ if (InitCategory.isPRValue() && T2->isRecordType()) {
+ // The corresponding bullet in C++03 [dcl.init.ref]p5 gives the
+ // compiler the freedom to perform a copy here or bind to the
+ // object, while C++0x requires that we bind directly to the
+ // object. Hence, we always bind to the object without making an
+ // extra copy. However, in C++03 requires that we check for the
+ // presence of a suitable copy constructor:
+ //
+ // The constructor that would be used to make the copy shall
+ // be callable whether or not the copy is actually done.
+ if (!S.getLangOpts().CPlusPlus0x && !S.getLangOpts().MicrosoftExt)
+ Sequence.AddExtraneousCopyToTemporary(cv2T2);
+ else if (S.getLangOpts().CPlusPlus0x)
+ CheckCXX98CompatAccessibleCopy(S, Entity, Initializer);
+ }
+
+ if (DerivedToBase)
+ Sequence.AddDerivedToBaseCastStep(S.Context.getQualifiedType(T1, T2Quals),
+ ValueKind);
+ else if (ObjCConversion)
+ Sequence.AddObjCObjectConversionStep(
+ S.Context.getQualifiedType(T1, T2Quals));
+
+ if (T1Quals != T2Quals)
+ Sequence.AddQualificationConversionStep(cv1T1, ValueKind);
+ Sequence.AddReferenceBindingStep(cv1T1,
+ /*bindingTemporary=*/InitCategory.isPRValue());
+ return;
+ }
+
+ // - has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to an
+ // xvalue, class prvalue, or function lvalue of type "cv3 T3",
+ // where "cv1 T1" is reference-compatible with "cv3 T3",
+ if (T2->isRecordType()) {
+ if (RefRelationship == Sema::Ref_Incompatible) {
+ ConvOvlResult = TryRefInitWithConversionFunction(S, Entity,
+ Kind, Initializer,
+ /*AllowRValues=*/true,
+ Sequence);
+ if (ConvOvlResult)
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+
+ return;
+ }
+
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers);
+ return;
+ }
+
+ // - Otherwise, a temporary of type "cv1 T1" is created and initialized
+ // from the initializer expression using the rules for a non-reference
+ // copy initialization (8.5). The reference is then bound to the
+ // temporary. [...]
+
+ // Determine whether we are allowed to call explicit constructors or
+ // explicit conversion operators.
+ bool AllowExplicit = Kind.AllowExplicit();
+
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(cv1T1);
+
+ ImplicitConversionSequence ICS
+ = S.TryImplicitConversion(Initializer, TempEntity.getType(),
+ /*SuppressUserConversions*/ false,
+ AllowExplicit,
+ /*FIXME:InOverloadResolution=*/false,
+ /*CStyle=*/Kind.isCStyleOrFunctionalCast(),
+ /*AllowObjCWritebackConversion=*/false);
+
+ if (ICS.isBad()) {
+ // FIXME: Use the conversion function set stored in ICS to turn
+ // this into an overloading ambiguity diagnostic. However, we need
+ // to keep that set as an OverloadCandidateSet rather than as some
+ // other kind of set.
+ if (ConvOvlResult && !Sequence.getFailedCandidateSet().empty())
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_ReferenceInitOverloadFailed,
+ ConvOvlResult);
+ else if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy)
+ Sequence.SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceInitFailed);
+ return;
+ } else {
+ Sequence.AddConversionSequenceStep(ICS, TempEntity.getType());
+ }
+
+ // [...] If T1 is reference-related to T2, cv1 must be the
+ // same cv-qualification as, or greater cv-qualification
+ // than, cv2; otherwise, the program is ill-formed.
+ unsigned T1CVRQuals = T1Quals.getCVRQualifiers();
+ unsigned T2CVRQuals = T2Quals.getCVRQualifiers();
+ if (RefRelationship == Sema::Ref_Related &&
+ (T1CVRQuals | T2CVRQuals) != T1CVRQuals) {
+ Sequence.SetFailed(InitializationSequence::FK_ReferenceInitDropsQualifiers);
+ return;
+ }
+
+ // [...] If T1 is reference-related to T2 and the reference is an rvalue
+ // reference, the initializer expression shall not be an lvalue.
+ if (RefRelationship >= Sema::Ref_Related && !isLValueRef &&
+ InitCategory.isLValue()) {
+ Sequence.SetFailed(
+ InitializationSequence::FK_RValueReferenceBindingToLValue);
+ return;
+ }
+
+ Sequence.AddReferenceBindingStep(cv1T1, /*bindingTemporary=*/true);
+ return;
+}
+
+/// \brief Attempt character array initialization from a string literal
+/// (C++ [dcl.init.string], C99 6.7.8).
+static void TryStringLiteralInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ InitializationSequence &Sequence) {
+ Sequence.AddStringInitStep(Entity.getType());
+}
+
+/// \brief Attempt value initialization (C++ [dcl.init]p7).
+static void TryValueInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitializationSequence &Sequence) {
+ // C++98 [dcl.init]p5, C++11 [dcl.init]p7:
+ //
+ // To value-initialize an object of type T means:
+ QualType T = Entity.getType();
+
+ // -- if T is an array type, then each element is value-initialized;
+ T = S.Context.getBaseElementType(T);
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
+ // C++98:
+ // -- if T is a class type (clause 9) with a user-declared
+ // constructor (12.1), then the default constructor for T is
+ // called (and the initialization is ill-formed if T has no
+ // accessible default constructor);
+ if (!S.getLangOpts().CPlusPlus0x) {
+ if (ClassDecl->hasUserDeclaredConstructor())
+ // FIXME: we really want to refer to a single subobject of the array,
+ // but Entity doesn't have a way to capture that (yet).
+ return TryConstructorInitialization(S, Entity, Kind, 0, 0,
+ T, Sequence);
+ } else {
+ // C++11:
+ // -- if T is a class type (clause 9) with either no default constructor
+ // (12.1 [class.ctor]) or a default constructor that is user-provided
+ // or deleted, then the object is default-initialized;
+ CXXConstructorDecl *CD = S.LookupDefaultConstructor(ClassDecl);
+ if (!CD || !CD->getCanonicalDecl()->isDefaulted() || CD->isDeleted())
+ return TryConstructorInitialization(S, Entity, Kind, 0, 0,
+ T, Sequence);
+ }
+
+ // -- if T is a (possibly cv-qualified) non-union class type without a
+ // user-provided or deleted default constructor, then the object is
+ // zero-initialized and, if T has a non-trivial default constructor,
+ // default-initialized;
+ if ((ClassDecl->getTagKind() == TTK_Class ||
+ ClassDecl->getTagKind() == TTK_Struct)) {
+ Sequence.AddZeroInitializationStep(Entity.getType());
+ return TryConstructorInitialization(S, Entity, Kind, 0, 0, T, Sequence);
+ }
+ }
+ }
+
+ Sequence.AddZeroInitializationStep(Entity.getType());
+}
+
+/// \brief Attempt default initialization (C++ [dcl.init]p6).
+static void TryDefaultInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ InitializationSequence &Sequence) {
+ assert(Kind.getKind() == InitializationKind::IK_Default);
+
+ // C++ [dcl.init]p6:
+ // To default-initialize an object of type T means:
+ // - if T is an array type, each element is default-initialized;
+ QualType DestType = S.Context.getBaseElementType(Entity.getType());
+
+ // - if T is a (possibly cv-qualified) class type (Clause 9), the default
+ // constructor for T is called (and the initialization is ill-formed if
+ // T has no accessible default constructor);
+ if (DestType->isRecordType() && S.getLangOpts().CPlusPlus) {
+ TryConstructorInitialization(S, Entity, Kind, 0, 0, DestType, Sequence);
+ return;
+ }
+
+ // - otherwise, no initialization is performed.
+
+ // If a program calls for the default initialization of an object of
+ // a const-qualified type T, T shall be a class type with a user-provided
+ // default constructor.
+ if (DestType.isConstQualified() && S.getLangOpts().CPlusPlus) {
+ Sequence.SetFailed(InitializationSequence::FK_DefaultInitOfConst);
+ return;
+ }
+
+ // If the destination type has a lifetime property, zero-initialize it.
+ if (DestType.getQualifiers().hasObjCLifetime()) {
+ Sequence.AddZeroInitializationStep(Entity.getType());
+ return;
+ }
+}
+
+/// \brief Attempt a user-defined conversion between two types (C++ [dcl.init]),
+/// which enumerates all conversion functions and performs overload resolution
+/// to select the best.
+static void TryUserDefinedConversion(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr *Initializer,
+ InitializationSequence &Sequence) {
+ QualType DestType = Entity.getType();
+ assert(!DestType->isReferenceType() && "References are handled elsewhere");
+ QualType SourceType = Initializer->getType();
+ assert((DestType->isRecordType() || SourceType->isRecordType()) &&
+ "Must have a class type to perform a user-defined conversion");
+
+ // Build the candidate set directly in the initialization sequence
+ // structure, so that it will persist if we fail.
+ OverloadCandidateSet &CandidateSet = Sequence.getFailedCandidateSet();
+ CandidateSet.clear();
+
+ // Determine whether we are allowed to call explicit constructors or
+ // explicit conversion operators.
+ bool AllowExplicit = Kind.AllowExplicit();
+
+ if (const RecordType *DestRecordType = DestType->getAs<RecordType>()) {
+ // The type we're converting to is a class type. Enumerate its constructors
+ // to see if there is a suitable conversion.
+ CXXRecordDecl *DestRecordDecl
+ = cast<CXXRecordDecl>(DestRecordType->getDecl());
+
+ // Try to complete the type we're converting to.
+ if (!S.RequireCompleteType(Kind.getLocation(), DestType, 0)) {
+ DeclContext::lookup_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(DestRecordDecl);
+ Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = 0;
+ FunctionTemplateDecl *ConstructorTmpl
+ = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ if (!Constructor->isInvalidDecl() &&
+ Constructor->isConvertingConstructor(AllowExplicit)) {
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true);
+ else
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ Initializer, CandidateSet,
+ /*SuppressUserConversions=*/true);
+ }
+ }
+ }
+ }
+
+ SourceLocation DeclLoc = Initializer->getLocStart();
+
+ if (const RecordType *SourceRecordType = SourceType->getAs<RecordType>()) {
+ // The type we're converting from is a class type, enumerate its conversion
+ // functions.
+
+ // We can only enumerate the conversion functions for a complete type; if
+ // the type isn't complete, simply skip this step.
+ if (!S.RequireCompleteType(DeclLoc, SourceType, 0)) {
+ CXXRecordDecl *SourceRecordDecl
+ = cast<CXXRecordDecl>(SourceRecordType->getDecl());
+
+ const UnresolvedSetImpl *Conversions
+ = SourceRecordDecl->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::const_iterator I = Conversions->begin(),
+ E = Conversions->end();
+ I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(D);
+ CXXConversionDecl *Conv;
+ if (ConvTemplate)
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ if (AllowExplicit || !Conv->isExplicit()) {
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(),
+ ActingDC, Initializer, DestType,
+ CandidateSet);
+ else
+ S.AddConversionCandidate(Conv, I.getPair(), ActingDC,
+ Initializer, DestType, CandidateSet);
+ }
+ }
+ }
+ }
+
+ // Perform overload resolution. If it fails, return the failed result.
+ OverloadCandidateSet::iterator Best;
+ if (OverloadingResult Result
+ = CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) {
+ Sequence.SetOverloadFailure(
+ InitializationSequence::FK_UserConversionOverloadFailed,
+ Result);
+ return;
+ }
+
+ FunctionDecl *Function = Best->Function;
+ S.MarkFunctionReferenced(DeclLoc, Function);
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ if (isa<CXXConstructorDecl>(Function)) {
+ // Add the user-defined conversion step. Any cv-qualification conversion is
+ // subsumed by the initialization. Per DR5, the created temporary is of the
+ // cv-unqualified type of the destination.
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl,
+ DestType.getUnqualifiedType(),
+ HadMultipleCandidates);
+ return;
+ }
+
+ // Add the user-defined conversion step that calls the conversion function.
+ QualType ConvType = Function->getCallResultType();
+ if (ConvType->getAs<RecordType>()) {
+ // If we're converting to a class type, there may be an copy of
+ // the resulting temporary object (possible to create an object of
+ // a base class type). That copy is not a separate conversion, so
+ // we just make a note of the actual destination type (possibly a
+ // base class of the type returned by the conversion function) and
+ // let the user-defined conversion step handle the conversion.
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl, DestType,
+ HadMultipleCandidates);
+ return;
+ }
+
+ Sequence.AddUserConversionStep(Function, Best->FoundDecl, ConvType,
+ HadMultipleCandidates);
+
+ // If the conversion following the call to the conversion function
+ // is interesting, add it as a separate step.
+ if (Best->FinalConversion.First || Best->FinalConversion.Second ||
+ Best->FinalConversion.Third) {
+ ImplicitConversionSequence ICS;
+ ICS.setStandard();
+ ICS.Standard = Best->FinalConversion;
+ Sequence.AddConversionSequenceStep(ICS, DestType);
+ }
+}
+
+/// The non-zero enum values here are indexes into diagnostic alternatives.
+enum InvalidICRKind { IIK_okay, IIK_nonlocal, IIK_nonscalar };
+
+/// Determines whether this expression is an acceptable ICR source.
+static InvalidICRKind isInvalidICRSource(ASTContext &C, Expr *e,
+ bool isAddressOf) {
+ // Skip parens.
+ e = e->IgnoreParens();
+
+ // Skip address-of nodes.
+ if (UnaryOperator *op = dyn_cast<UnaryOperator>(e)) {
+ if (op->getOpcode() == UO_AddrOf)
+ return isInvalidICRSource(C, op->getSubExpr(), /*addressof*/ true);
+
+ // Skip certain casts.
+ } else if (CastExpr *ce = dyn_cast<CastExpr>(e)) {
+ switch (ce->getCastKind()) {
+ case CK_Dependent:
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_NoOp:
+ return isInvalidICRSource(C, ce->getSubExpr(), isAddressOf);
+
+ case CK_ArrayToPointerDecay:
+ return IIK_nonscalar;
+
+ case CK_NullToPointer:
+ return IIK_okay;
+
+ default:
+ break;
+ }
+
+ // If we have a declaration reference, it had better be a local variable.
+ } else if (isa<DeclRefExpr>(e)) {
+ if (!isAddressOf) return IIK_nonlocal;
+
+ VarDecl *var = dyn_cast<VarDecl>(cast<DeclRefExpr>(e)->getDecl());
+ if (!var) return IIK_nonlocal;
+
+ return (var->hasLocalStorage() ? IIK_okay : IIK_nonlocal);
+
+ // If we have a conditional operator, check both sides.
+ } else if (ConditionalOperator *cond = dyn_cast<ConditionalOperator>(e)) {
+ if (InvalidICRKind iik = isInvalidICRSource(C, cond->getLHS(), isAddressOf))
+ return iik;
+
+ return isInvalidICRSource(C, cond->getRHS(), isAddressOf);
+
+ // These are never scalar.
+ } else if (isa<ArraySubscriptExpr>(e)) {
+ return IIK_nonscalar;
+
+ // Otherwise, it needs to be a null pointer constant.
+ } else {
+ return (e->isNullPointerConstant(C, Expr::NPC_ValueDependentIsNull)
+ ? IIK_okay : IIK_nonlocal);
+ }
+
+ return IIK_nonlocal;
+}
+
+/// Check whether the given expression is a valid operand for an
+/// indirect copy/restore.
+static void checkIndirectCopyRestoreSource(Sema &S, Expr *src) {
+ assert(src->isRValue());
+
+ InvalidICRKind iik = isInvalidICRSource(S.Context, src, false);
+ if (iik == IIK_okay) return;
+
+ S.Diag(src->getExprLoc(), diag::err_arc_nonlocal_writeback)
+ << ((unsigned) iik - 1) // shift index into diagnostic explanations
+ << src->getSourceRange();
+}
+
+/// \brief Determine whether we have compatible array types for the
+/// purposes of GNU by-copy array initialization.
+static bool hasCompatibleArrayTypes(ASTContext &Context,
+ const ArrayType *Dest,
+ const ArrayType *Source) {
+ // If the source and destination array types are equivalent, we're
+ // done.
+ if (Context.hasSameType(QualType(Dest, 0), QualType(Source, 0)))
+ return true;
+
+ // Make sure that the element types are the same.
+ if (!Context.hasSameType(Dest->getElementType(), Source->getElementType()))
+ return false;
+
+ // The only mismatch we allow is when the destination is an
+ // incomplete array type and the source is a constant array type.
+ return Source->isConstantArrayType() && Dest->isIncompleteArrayType();
+}
+
+static bool tryObjCWritebackConversion(Sema &S,
+ InitializationSequence &Sequence,
+ const InitializedEntity &Entity,
+ Expr *Initializer) {
+ bool ArrayDecay = false;
+ QualType ArgType = Initializer->getType();
+ QualType ArgPointee;
+ if (const ArrayType *ArgArrayType = S.Context.getAsArrayType(ArgType)) {
+ ArrayDecay = true;
+ ArgPointee = ArgArrayType->getElementType();
+ ArgType = S.Context.getPointerType(ArgPointee);
+ }
+
+ // Handle write-back conversion.
+ QualType ConvertedArgType;
+ if (!S.isObjCWritebackConversion(ArgType, Entity.getType(),
+ ConvertedArgType))
+ return false;
+
+ // We should copy unless we're passing to an argument explicitly
+ // marked 'out'.
+ bool ShouldCopy = true;
+ if (ParmVarDecl *param = cast_or_null<ParmVarDecl>(Entity.getDecl()))
+ ShouldCopy = (param->getObjCDeclQualifier() != ParmVarDecl::OBJC_TQ_Out);
+
+ // Do we need an lvalue conversion?
+ if (ArrayDecay || Initializer->isGLValue()) {
+ ImplicitConversionSequence ICS;
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+
+ QualType ResultType;
+ if (ArrayDecay) {
+ ICS.Standard.First = ICK_Array_To_Pointer;
+ ResultType = S.Context.getPointerType(ArgPointee);
+ } else {
+ ICS.Standard.First = ICK_Lvalue_To_Rvalue;
+ ResultType = Initializer->getType().getNonLValueExprType(S.Context);
+ }
+
+ Sequence.AddConversionSequenceStep(ICS, ResultType);
+ }
+
+ Sequence.AddPassByIndirectCopyRestoreStep(Entity.getType(), ShouldCopy);
+ return true;
+}
+
+InitializationSequence::InitializationSequence(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr **Args,
+ unsigned NumArgs)
+ : FailedCandidateSet(Kind.getLocation()) {
+ ASTContext &Context = S.Context;
+
+ // C++0x [dcl.init]p16:
+ // The semantics of initializers are as follows. The destination type is
+ // the type of the object or reference being initialized and the source
+ // type is the type of the initializer expression. The source type is not
+ // defined when the initializer is a braced-init-list or when it is a
+ // parenthesized list of expressions.
+ QualType DestType = Entity.getType();
+
+ if (DestType->isDependentType() ||
+ Expr::hasAnyTypeDependentArguments(llvm::makeArrayRef(Args, NumArgs))) {
+ SequenceKind = DependentSequence;
+ return;
+ }
+
+ // Almost everything is a normal sequence.
+ setSequenceKind(NormalSequence);
+
+ for (unsigned I = 0; I != NumArgs; ++I)
+ if (Args[I]->getType()->isNonOverloadPlaceholderType()) {
+ // FIXME: should we be doing this here?
+ ExprResult result = S.CheckPlaceholderExpr(Args[I]);
+ if (result.isInvalid()) {
+ SetFailed(FK_PlaceholderType);
+ return;
+ }
+ Args[I] = result.take();
+ }
+
+
+ QualType SourceType;
+ Expr *Initializer = 0;
+ if (NumArgs == 1) {
+ Initializer = Args[0];
+ if (!isa<InitListExpr>(Initializer))
+ SourceType = Initializer->getType();
+ }
+
+ // - If the initializer is a (non-parenthesized) braced-init-list, the
+ // object is list-initialized (8.5.4).
+ if (Kind.getKind() != InitializationKind::IK_Direct) {
+ if (InitListExpr *InitList = dyn_cast_or_null<InitListExpr>(Initializer)) {
+ TryListInitialization(S, Entity, Kind, InitList, *this);
+ return;
+ }
+ }
+
+ // - If the destination type is a reference type, see 8.5.3.
+ if (DestType->isReferenceType()) {
+ // C++0x [dcl.init.ref]p1:
+ // A variable declared to be a T& or T&&, that is, "reference to type T"
+ // (8.3.2), shall be initialized by an object, or function, of type T or
+ // by an object that can be converted into a T.
+ // (Therefore, multiple arguments are not permitted.)
+ if (NumArgs != 1)
+ SetFailed(FK_TooManyInitsForReference);
+ else
+ TryReferenceInitialization(S, Entity, Kind, Args[0], *this);
+ return;
+ }
+
+ // - If the initializer is (), the object is value-initialized.
+ if (Kind.getKind() == InitializationKind::IK_Value ||
+ (Kind.getKind() == InitializationKind::IK_Direct && NumArgs == 0)) {
+ TryValueInitialization(S, Entity, Kind, *this);
+ return;
+ }
+
+ // Handle default initialization.
+ if (Kind.getKind() == InitializationKind::IK_Default) {
+ TryDefaultInitialization(S, Entity, Kind, *this);
+ return;
+ }
+
+ // - If the destination type is an array of characters, an array of
+ // char16_t, an array of char32_t, or an array of wchar_t, and the
+ // initializer is a string literal, see 8.5.2.
+ // - Otherwise, if the destination type is an array, the program is
+ // ill-formed.
+ if (const ArrayType *DestAT = Context.getAsArrayType(DestType)) {
+ if (Initializer && isa<VariableArrayType>(DestAT)) {
+ SetFailed(FK_VariableLengthArrayHasInitializer);
+ return;
+ }
+
+ if (Initializer && IsStringInit(Initializer, DestAT, Context)) {
+ TryStringLiteralInitialization(S, Entity, Kind, Initializer, *this);
+ return;
+ }
+
+ // Note: as an GNU C extension, we allow initialization of an
+ // array from a compound literal that creates an array of the same
+ // type, so long as the initializer has no side effects.
+ if (!S.getLangOpts().CPlusPlus && Initializer &&
+ isa<CompoundLiteralExpr>(Initializer->IgnoreParens()) &&
+ Initializer->getType()->isArrayType()) {
+ const ArrayType *SourceAT
+ = Context.getAsArrayType(Initializer->getType());
+ if (!hasCompatibleArrayTypes(S.Context, DestAT, SourceAT))
+ SetFailed(FK_ArrayTypeMismatch);
+ else if (Initializer->HasSideEffects(S.Context))
+ SetFailed(FK_NonConstantArrayInit);
+ else {
+ AddArrayInitStep(DestType);
+ }
+ }
+ // Note: as a GNU C++ extension, we allow initialization of a
+ // class member from a parenthesized initializer list.
+ else if (S.getLangOpts().CPlusPlus &&
+ Entity.getKind() == InitializedEntity::EK_Member &&
+ Initializer && isa<InitListExpr>(Initializer)) {
+ TryListInitialization(S, Entity, Kind, cast<InitListExpr>(Initializer),
+ *this);
+ AddParenthesizedArrayInitStep(DestType);
+ } else if (DestAT->getElementType()->isAnyCharacterType())
+ SetFailed(FK_ArrayNeedsInitListOrStringLiteral);
+ else
+ SetFailed(FK_ArrayNeedsInitList);
+
+ return;
+ }
+
+ // Determine whether we should consider writeback conversions for
+ // Objective-C ARC.
+ bool allowObjCWritebackConversion = S.getLangOpts().ObjCAutoRefCount &&
+ Entity.getKind() == InitializedEntity::EK_Parameter;
+
+ // We're at the end of the line for C: it's either a write-back conversion
+ // or it's a C assignment. There's no need to check anything else.
+ if (!S.getLangOpts().CPlusPlus) {
+ // If allowed, check whether this is an Objective-C writeback conversion.
+ if (allowObjCWritebackConversion &&
+ tryObjCWritebackConversion(S, *this, Entity, Initializer)) {
+ return;
+ }
+
+ // Handle initialization in C
+ AddCAssignmentStep(DestType);
+ MaybeProduceObjCObject(S, *this, Entity);
+ return;
+ }
+
+ assert(S.getLangOpts().CPlusPlus);
+
+ // - If the destination type is a (possibly cv-qualified) class type:
+ if (DestType->isRecordType()) {
+ // - If the initialization is direct-initialization, or if it is
+ // copy-initialization where the cv-unqualified version of the
+ // source type is the same class as, or a derived class of, the
+ // class of the destination, constructors are considered. [...]
+ if (Kind.getKind() == InitializationKind::IK_Direct ||
+ (Kind.getKind() == InitializationKind::IK_Copy &&
+ (Context.hasSameUnqualifiedType(SourceType, DestType) ||
+ S.IsDerivedFrom(SourceType, DestType))))
+ TryConstructorInitialization(S, Entity, Kind, Args, NumArgs,
+ Entity.getType(), *this);
+ // - Otherwise (i.e., for the remaining copy-initialization cases),
+ // user-defined conversion sequences that can convert from the source
+ // type to the destination type or (when a conversion function is
+ // used) to a derived class thereof are enumerated as described in
+ // 13.3.1.4, and the best one is chosen through overload resolution
+ // (13.3).
+ else
+ TryUserDefinedConversion(S, Entity, Kind, Initializer, *this);
+ return;
+ }
+
+ if (NumArgs > 1) {
+ SetFailed(FK_TooManyInitsForScalar);
+ return;
+ }
+ assert(NumArgs == 1 && "Zero-argument case handled above");
+
+ // - Otherwise, if the source type is a (possibly cv-qualified) class
+ // type, conversion functions are considered.
+ if (!SourceType.isNull() && SourceType->isRecordType()) {
+ TryUserDefinedConversion(S, Entity, Kind, Initializer, *this);
+ MaybeProduceObjCObject(S, *this, Entity);
+ return;
+ }
+
+ // - Otherwise, the initial value of the object being initialized is the
+ // (possibly converted) value of the initializer expression. Standard
+ // conversions (Clause 4) will be used, if necessary, to convert the
+ // initializer expression to the cv-unqualified version of the
+ // destination type; no user-defined conversions are considered.
+
+ ImplicitConversionSequence ICS
+ = S.TryImplicitConversion(Initializer, Entity.getType(),
+ /*SuppressUserConversions*/true,
+ /*AllowExplicitConversions*/ false,
+ /*InOverloadResolution*/ false,
+ /*CStyle=*/Kind.isCStyleOrFunctionalCast(),
+ allowObjCWritebackConversion);
+
+ if (ICS.isStandard() &&
+ ICS.Standard.Second == ICK_Writeback_Conversion) {
+ // Objective-C ARC writeback conversion.
+
+ // We should copy unless we're passing to an argument explicitly
+ // marked 'out'.
+ bool ShouldCopy = true;
+ if (ParmVarDecl *Param = cast_or_null<ParmVarDecl>(Entity.getDecl()))
+ ShouldCopy = (Param->getObjCDeclQualifier() != ParmVarDecl::OBJC_TQ_Out);
+
+ // If there was an lvalue adjustment, add it as a separate conversion.
+ if (ICS.Standard.First == ICK_Array_To_Pointer ||
+ ICS.Standard.First == ICK_Lvalue_To_Rvalue) {
+ ImplicitConversionSequence LvalueICS;
+ LvalueICS.setStandard();
+ LvalueICS.Standard.setAsIdentityConversion();
+ LvalueICS.Standard.setAllToTypes(ICS.Standard.getToType(0));
+ LvalueICS.Standard.First = ICS.Standard.First;
+ AddConversionSequenceStep(LvalueICS, ICS.Standard.getToType(0));
+ }
+
+ AddPassByIndirectCopyRestoreStep(Entity.getType(), ShouldCopy);
+ } else if (ICS.isBad()) {
+ DeclAccessPair dap;
+ if (Initializer->getType() == Context.OverloadTy &&
+ !S.ResolveAddressOfOverloadedFunction(Initializer
+ , DestType, false, dap))
+ SetFailed(InitializationSequence::FK_AddressOfOverloadFailed);
+ else
+ SetFailed(InitializationSequence::FK_ConversionFailed);
+ } else {
+ AddConversionSequenceStep(ICS, Entity.getType());
+
+ MaybeProduceObjCObject(S, *this, Entity);
+ }
+}
+
+InitializationSequence::~InitializationSequence() {
+ for (SmallVectorImpl<Step>::iterator Step = Steps.begin(),
+ StepEnd = Steps.end();
+ Step != StepEnd; ++Step)
+ Step->Destroy();
+}
+
+//===----------------------------------------------------------------------===//
+// Perform initialization
+//===----------------------------------------------------------------------===//
+static Sema::AssignmentAction
+getAssignmentAction(const InitializedEntity &Entity) {
+ switch(Entity.getKind()) {
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ return Sema::AA_Initializing;
+
+ case InitializedEntity::EK_Parameter:
+ if (Entity.getDecl() &&
+ isa<ObjCMethodDecl>(Entity.getDecl()->getDeclContext()))
+ return Sema::AA_Sending;
+
+ return Sema::AA_Passing;
+
+ case InitializedEntity::EK_Result:
+ return Sema::AA_Returning;
+
+ case InitializedEntity::EK_Temporary:
+ // FIXME: Can we tell apart casting vs. converting?
+ return Sema::AA_Casting;
+
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
+ return Sema::AA_Initializing;
+ }
+
+ llvm_unreachable("Invalid EntityKind!");
+}
+
+/// \brief Whether we should binding a created object as a temporary when
+/// initializing the given entity.
+static bool shouldBindAsTemporary(const InitializedEntity &Entity) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_Exception:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
+ return false;
+
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Temporary:
+ return true;
+ }
+
+ llvm_unreachable("missed an InitializedEntity kind?");
+}
+
+/// \brief Whether the given entity, when initialized with an object
+/// created for that initialization, requires destruction.
+static bool shouldDestroyTemporary(const InitializedEntity &Entity) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Result:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_BlockElement:
+ case InitializedEntity::EK_LambdaCapture:
+ return false;
+
+ case InitializedEntity::EK_Variable:
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_Exception:
+ return true;
+ }
+
+ llvm_unreachable("missed an InitializedEntity kind?");
+}
+
+/// \brief Look for copy and move constructors and constructor templates, for
+/// copying an object via direct-initialization (per C++11 [dcl.init]p16).
+static void LookupCopyAndMoveConstructors(Sema &S,
+ OverloadCandidateSet &CandidateSet,
+ CXXRecordDecl *Class,
+ Expr *CurInitExpr) {
+ DeclContext::lookup_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(Class);
+ Con != ConEnd; ++Con) {
+ CXXConstructorDecl *Constructor = 0;
+
+ if ((Constructor = dyn_cast<CXXConstructorDecl>(*Con))) {
+ // Handle copy/moveconstructors, only.
+ if (!Constructor || Constructor->isInvalidDecl() ||
+ !Constructor->isCopyOrMoveConstructor() ||
+ !Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
+ continue;
+
+ DeclAccessPair FoundDecl
+ = DeclAccessPair::make(Constructor, Constructor->getAccess());
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ CurInitExpr, CandidateSet);
+ continue;
+ }
+
+ // Handle constructor templates.
+ FunctionTemplateDecl *ConstructorTmpl = cast<FunctionTemplateDecl>(*Con);
+ if (ConstructorTmpl->isInvalidDecl())
+ continue;
+
+ Constructor = cast<CXXConstructorDecl>(
+ ConstructorTmpl->getTemplatedDecl());
+ if (!Constructor->isConvertingConstructor(/*AllowExplicit=*/true))
+ continue;
+
+ // FIXME: Do we need to limit this to copy-constructor-like
+ // candidates?
+ DeclAccessPair FoundDecl
+ = DeclAccessPair::make(ConstructorTmpl, ConstructorTmpl->getAccess());
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl, 0,
+ CurInitExpr, CandidateSet, true);
+ }
+}
+
+/// \brief Get the location at which initialization diagnostics should appear.
+static SourceLocation getInitializationLoc(const InitializedEntity &Entity,
+ Expr *Initializer) {
+ switch (Entity.getKind()) {
+ case InitializedEntity::EK_Result:
+ return Entity.getReturnLoc();
+
+ case InitializedEntity::EK_Exception:
+ return Entity.getThrowLoc();
+
+ case InitializedEntity::EK_Variable:
+ return Entity.getDecl()->getLocation();
+
+ case InitializedEntity::EK_LambdaCapture:
+ return Entity.getCaptureLoc();
+
+ case InitializedEntity::EK_ArrayElement:
+ case InitializedEntity::EK_Member:
+ case InitializedEntity::EK_Parameter:
+ case InitializedEntity::EK_Temporary:
+ case InitializedEntity::EK_New:
+ case InitializedEntity::EK_Base:
+ case InitializedEntity::EK_Delegating:
+ case InitializedEntity::EK_VectorElement:
+ case InitializedEntity::EK_ComplexElement:
+ case InitializedEntity::EK_BlockElement:
+ return Initializer->getLocStart();
+ }
+ llvm_unreachable("missed an InitializedEntity kind?");
+}
+
+/// \brief Make a (potentially elidable) temporary copy of the object
+/// provided by the given initializer by calling the appropriate copy
+/// constructor.
+///
+/// \param S The Sema object used for type-checking.
+///
+/// \param T The type of the temporary object, which must either be
+/// the type of the initializer expression or a superclass thereof.
+///
+/// \param Enter The entity being initialized.
+///
+/// \param CurInit The initializer expression.
+///
+/// \param IsExtraneousCopy Whether this is an "extraneous" copy that
+/// is permitted in C++03 (but not C++0x) when binding a reference to
+/// an rvalue.
+///
+/// \returns An expression that copies the initializer expression into
+/// a temporary object, or an error expression if a copy could not be
+/// created.
+static ExprResult CopyObject(Sema &S,
+ QualType T,
+ const InitializedEntity &Entity,
+ ExprResult CurInit,
+ bool IsExtraneousCopy) {
+ // Determine which class type we're copying to.
+ Expr *CurInitExpr = (Expr *)CurInit.get();
+ CXXRecordDecl *Class = 0;
+ if (const RecordType *Record = T->getAs<RecordType>())
+ Class = cast<CXXRecordDecl>(Record->getDecl());
+ if (!Class)
+ return move(CurInit);
+
+ // C++0x [class.copy]p32:
+ // When certain criteria are met, an implementation is allowed to
+ // omit the copy/move construction of a class object, even if the
+ // copy/move constructor and/or destructor for the object have
+ // side effects. [...]
+ // - when a temporary class object that has not been bound to a
+ // reference (12.2) would be copied/moved to a class object
+ // with the same cv-unqualified type, the copy/move operation
+ // can be omitted by constructing the temporary object
+ // directly into the target of the omitted copy/move
+ //
+ // Note that the other three bullets are handled elsewhere. Copy
+ // elision for return statements and throw expressions are handled as part
+ // of constructor initialization, while copy elision for exception handlers
+ // is handled by the run-time.
+ bool Elidable = CurInitExpr->isTemporaryObject(S.Context, Class);
+ SourceLocation Loc = getInitializationLoc(Entity, CurInit.get());
+
+ // Make sure that the type we are copying is complete.
+ if (S.RequireCompleteType(Loc, T, S.PDiag(diag::err_temp_copy_incomplete)))
+ return move(CurInit);
+
+ // Perform overload resolution using the class's copy/move constructors.
+ // Only consider constructors and constructor templates. Per
+ // C++0x [dcl.init]p16, second bullet to class types, this initialization
+ // is direct-initialization.
+ OverloadCandidateSet CandidateSet(Loc);
+ LookupCopyAndMoveConstructors(S, CandidateSet, Class, CurInitExpr);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(S, Loc, Best)) {
+ case OR_Success:
+ break;
+
+ case OR_No_Viable_Function:
+ S.Diag(Loc, IsExtraneousCopy && !S.isSFINAEContext()
+ ? diag::ext_rvalue_to_reference_temp_copy_no_viable
+ : diag::err_temp_copy_no_viable)
+ << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+ CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
+ if (!IsExtraneousCopy || S.isSFINAEContext())
+ return ExprError();
+ return move(CurInit);
+
+ case OR_Ambiguous:
+ S.Diag(Loc, diag::err_temp_copy_ambiguous)
+ << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+ CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr);
+ return ExprError();
+
+ case OR_Deleted:
+ S.Diag(Loc, diag::err_temp_copy_deleted)
+ << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+ S.NoteDeletedFunction(Best->Function);
+ return ExprError();
+ }
+
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
+ ASTOwningVector<Expr*> ConstructorArgs(S);
+ CurInit.release(); // Ownership transferred into MultiExprArg, below.
+
+ S.CheckConstructorAccess(Loc, Constructor, Entity,
+ Best->FoundDecl.getAccess(), IsExtraneousCopy);
+
+ if (IsExtraneousCopy) {
+ // If this is a totally extraneous copy for C++03 reference
+ // binding purposes, just return the original initialization
+ // expression. We don't generate an (elided) copy operation here
+ // because doing so would require us to pass down a flag to avoid
+ // infinite recursion, where each step adds another extraneous,
+ // elidable copy.
+
+ // Instantiate the default arguments of any extra parameters in
+ // the selected copy constructor, as if we were going to create a
+ // proper call to the copy constructor.
+ for (unsigned I = 1, N = Constructor->getNumParams(); I != N; ++I) {
+ ParmVarDecl *Parm = Constructor->getParamDecl(I);
+ if (S.RequireCompleteType(Loc, Parm->getType(),
+ S.PDiag(diag::err_call_incomplete_argument)))
+ break;
+
+ // Build the default argument expression; we don't actually care
+ // if this succeeds or not, because this routine will complain
+ // if there was a problem.
+ S.BuildCXXDefaultArgExpr(Loc, Constructor, Parm);
+ }
+
+ return S.Owned(CurInitExpr);
+ }
+
+ S.MarkFunctionReferenced(Loc, Constructor);
+
+ // Determine the arguments required to actually perform the
+ // constructor call (we might have derived-to-base conversions, or
+ // the copy constructor may have default arguments).
+ if (S.CompleteConstructorCall(Constructor, MultiExprArg(&CurInitExpr, 1),
+ Loc, ConstructorArgs))
+ return ExprError();
+
+ // Actually perform the constructor call.
+ CurInit = S.BuildCXXConstructExpr(Loc, T, Constructor, Elidable,
+ move_arg(ConstructorArgs),
+ HadMultipleCandidates,
+ /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
+
+ // If we're supposed to bind temporaries, do so.
+ if (!CurInit.isInvalid() && shouldBindAsTemporary(Entity))
+ CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
+ return move(CurInit);
+}
+
+/// \brief Check whether elidable copy construction for binding a reference to
+/// a temporary would have succeeded if we were building in C++98 mode, for
+/// -Wc++98-compat.
+static void CheckCXX98CompatAccessibleCopy(Sema &S,
+ const InitializedEntity &Entity,
+ Expr *CurInitExpr) {
+ assert(S.getLangOpts().CPlusPlus0x);
+
+ const RecordType *Record = CurInitExpr->getType()->getAs<RecordType>();
+ if (!Record)
+ return;
+
+ SourceLocation Loc = getInitializationLoc(Entity, CurInitExpr);
+ if (S.Diags.getDiagnosticLevel(diag::warn_cxx98_compat_temp_copy, Loc)
+ == DiagnosticsEngine::Ignored)
+ return;
+
+ // Find constructors which would have been considered.
+ OverloadCandidateSet CandidateSet(Loc);
+ LookupCopyAndMoveConstructors(
+ S, CandidateSet, cast<CXXRecordDecl>(Record->getDecl()), CurInitExpr);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult OR = CandidateSet.BestViableFunction(S, Loc, Best);
+
+ PartialDiagnostic Diag = S.PDiag(diag::warn_cxx98_compat_temp_copy)
+ << OR << (int)Entity.getKind() << CurInitExpr->getType()
+ << CurInitExpr->getSourceRange();
+
+ switch (OR) {
+ case OR_Success:
+ S.CheckConstructorAccess(Loc, cast<CXXConstructorDecl>(Best->Function),
+ Entity, Best->FoundDecl.getAccess(), Diag);
+ // FIXME: Check default arguments as far as that's possible.
+ break;
+
+ case OR_No_Viable_Function:
+ S.Diag(Loc, Diag);
+ CandidateSet.NoteCandidates(S, OCD_AllCandidates, CurInitExpr);
+ break;
+
+ case OR_Ambiguous:
+ S.Diag(Loc, Diag);
+ CandidateSet.NoteCandidates(S, OCD_ViableCandidates, CurInitExpr);
+ break;
+
+ case OR_Deleted:
+ S.Diag(Loc, Diag);
+ S.NoteDeletedFunction(Best->Function);
+ break;
+ }
+}
+
+void InitializationSequence::PrintInitLocationNote(Sema &S,
+ const InitializedEntity &Entity) {
+ if (Entity.getKind() == InitializedEntity::EK_Parameter && Entity.getDecl()) {
+ if (Entity.getDecl()->getLocation().isInvalid())
+ return;
+
+ if (Entity.getDecl()->getDeclName())
+ S.Diag(Entity.getDecl()->getLocation(), diag::note_parameter_named_here)
+ << Entity.getDecl()->getDeclName();
+ else
+ S.Diag(Entity.getDecl()->getLocation(), diag::note_parameter_here);
+ }
+}
+
+static bool isReferenceBinding(const InitializationSequence::Step &s) {
+ return s.Kind == InitializationSequence::SK_BindReference ||
+ s.Kind == InitializationSequence::SK_BindReferenceToTemporary;
+}
+
+static ExprResult
+PerformConstructorInitialization(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ const InitializationSequence::Step& Step,
+ bool &ConstructorInitRequiresZeroInit) {
+ unsigned NumArgs = Args.size();
+ CXXConstructorDecl *Constructor
+ = cast<CXXConstructorDecl>(Step.Function.Function);
+ bool HadMultipleCandidates = Step.Function.HadMultipleCandidates;
+
+ // Build a call to the selected constructor.
+ ASTOwningVector<Expr*> ConstructorArgs(S);
+ SourceLocation Loc = (Kind.isCopyInit() && Kind.getEqualLoc().isValid())
+ ? Kind.getEqualLoc()
+ : Kind.getLocation();
+
+ if (Kind.getKind() == InitializationKind::IK_Default) {
+ // Force even a trivial, implicit default constructor to be
+ // semantically checked. We do this explicitly because we don't build
+ // the definition for completely trivial constructors.
+ assert(Constructor->getParent() && "No parent class for constructor.");
+ if (Constructor->isDefaulted() && Constructor->isDefaultConstructor() &&
+ Constructor->isTrivial() && !Constructor->isUsed(false))
+ S.DefineImplicitDefaultConstructor(Loc, Constructor);
+ }
+
+ ExprResult CurInit = S.Owned((Expr *)0);
+
+ // C++ [over.match.copy]p1:
+ // - When initializing a temporary to be bound to the first parameter
+ // of a constructor that takes a reference to possibly cv-qualified
+ // T as its first argument, called with a single argument in the
+ // context of direct-initialization, explicit conversion functions
+ // are also considered.
+ bool AllowExplicitConv = Kind.AllowExplicit() && !Kind.isCopyInit() &&
+ Args.size() == 1 &&
+ Constructor->isCopyOrMoveConstructor();
+
+ // Determine the arguments required to actually perform the constructor
+ // call.
+ if (S.CompleteConstructorCall(Constructor, move(Args),
+ Loc, ConstructorArgs,
+ AllowExplicitConv))
+ return ExprError();
+
+
+ if (Entity.getKind() == InitializedEntity::EK_Temporary &&
+ (Kind.getKind() == InitializationKind::IK_DirectList ||
+ (NumArgs != 1 && // FIXME: Hack to work around cast weirdness
+ (Kind.getKind() == InitializationKind::IK_Direct ||
+ Kind.getKind() == InitializationKind::IK_Value)))) {
+ // An explicitly-constructed temporary, e.g., X(1, 2).
+ unsigned NumExprs = ConstructorArgs.size();
+ Expr **Exprs = (Expr **)ConstructorArgs.take();
+ S.MarkFunctionReferenced(Loc, Constructor);
+ S.DiagnoseUseOfDecl(Constructor, Loc);
+
+ TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
+ if (!TSInfo)
+ TSInfo = S.Context.getTrivialTypeSourceInfo(Entity.getType(), Loc);
+ SourceRange ParenRange;
+ if (Kind.getKind() != InitializationKind::IK_DirectList)
+ ParenRange = Kind.getParenRange();
+
+ CurInit = S.Owned(new (S.Context) CXXTemporaryObjectExpr(S.Context,
+ Constructor,
+ TSInfo,
+ Exprs,
+ NumExprs,
+ ParenRange,
+ HadMultipleCandidates,
+ ConstructorInitRequiresZeroInit));
+ } else {
+ CXXConstructExpr::ConstructionKind ConstructKind =
+ CXXConstructExpr::CK_Complete;
+
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ ConstructKind = Entity.getBaseSpecifier()->isVirtual() ?
+ CXXConstructExpr::CK_VirtualBase :
+ CXXConstructExpr::CK_NonVirtualBase;
+ } else if (Entity.getKind() == InitializedEntity::EK_Delegating) {
+ ConstructKind = CXXConstructExpr::CK_Delegating;
+ }
+
+ // Only get the parenthesis range if it is a direct construction.
+ SourceRange parenRange =
+ Kind.getKind() == InitializationKind::IK_Direct ?
+ Kind.getParenRange() : SourceRange();
+
+ // If the entity allows NRVO, mark the construction as elidable
+ // unconditionally.
+ if (Entity.allowsNRVO())
+ CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
+ Constructor, /*Elidable=*/true,
+ move_arg(ConstructorArgs),
+ HadMultipleCandidates,
+ ConstructorInitRequiresZeroInit,
+ ConstructKind,
+ parenRange);
+ else
+ CurInit = S.BuildCXXConstructExpr(Loc, Entity.getType(),
+ Constructor,
+ move_arg(ConstructorArgs),
+ HadMultipleCandidates,
+ ConstructorInitRequiresZeroInit,
+ ConstructKind,
+ parenRange);
+ }
+ if (CurInit.isInvalid())
+ return ExprError();
+
+ // Only check access if all of that succeeded.
+ S.CheckConstructorAccess(Loc, Constructor, Entity,
+ Step.Function.FoundDecl.getAccess());
+ S.DiagnoseUseOfDecl(Step.Function.FoundDecl, Loc);
+
+ if (shouldBindAsTemporary(Entity))
+ CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
+
+ return move(CurInit);
+}
+
+ExprResult
+InitializationSequence::Perform(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ MultiExprArg Args,
+ QualType *ResultType) {
+ if (Failed()) {
+ unsigned NumArgs = Args.size();
+ Diagnose(S, Entity, Kind, (Expr **)Args.release(), NumArgs);
+ return ExprError();
+ }
+
+ if (getKind() == DependentSequence) {
+ // If the declaration is a non-dependent, incomplete array type
+ // that has an initializer, then its type will be completed once
+ // the initializer is instantiated.
+ if (ResultType && !Entity.getType()->isDependentType() &&
+ Args.size() == 1) {
+ QualType DeclType = Entity.getType();
+ if (const IncompleteArrayType *ArrayT
+ = S.Context.getAsIncompleteArrayType(DeclType)) {
+ // FIXME: We don't currently have the ability to accurately
+ // compute the length of an initializer list without
+ // performing full type-checking of the initializer list
+ // (since we have to determine where braces are implicitly
+ // introduced and such). So, we fall back to making the array
+ // type a dependently-sized array type with no specified
+ // bound.
+ if (isa<InitListExpr>((Expr *)Args.get()[0])) {
+ SourceRange Brackets;
+
+ // Scavange the location of the brackets from the entity, if we can.
+ if (DeclaratorDecl *DD = Entity.getDecl()) {
+ if (TypeSourceInfo *TInfo = DD->getTypeSourceInfo()) {
+ TypeLoc TL = TInfo->getTypeLoc();
+ if (IncompleteArrayTypeLoc *ArrayLoc
+ = dyn_cast<IncompleteArrayTypeLoc>(&TL))
+ Brackets = ArrayLoc->getBracketsRange();
+ }
+ }
+
+ *ResultType
+ = S.Context.getDependentSizedArrayType(ArrayT->getElementType(),
+ /*NumElts=*/0,
+ ArrayT->getSizeModifier(),
+ ArrayT->getIndexTypeCVRQualifiers(),
+ Brackets);
+ }
+
+ }
+ }
+ if (Kind.getKind() == InitializationKind::IK_Direct &&
+ !Kind.isExplicitCast()) {
+ // Rebuild the ParenListExpr.
+ SourceRange ParenRange = Kind.getParenRange();
+ return S.ActOnParenListExpr(ParenRange.getBegin(), ParenRange.getEnd(),
+ move(Args));
+ }
+ assert(Kind.getKind() == InitializationKind::IK_Copy ||
+ Kind.isExplicitCast() ||
+ Kind.getKind() == InitializationKind::IK_DirectList);
+ return ExprResult(Args.release()[0]);
+ }
+
+ // No steps means no initialization.
+ if (Steps.empty())
+ return S.Owned((Expr *)0);
+
+ QualType DestType = Entity.getType().getNonReferenceType();
+ // FIXME: Ugly hack around the fact that Entity.getType() is not
+ // the same as Entity.getDecl()->getType() in cases involving type merging,
+ // and we want latter when it makes sense.
+ if (ResultType)
+ *ResultType = Entity.getDecl() ? Entity.getDecl()->getType() :
+ Entity.getType();
+
+ ExprResult CurInit = S.Owned((Expr *)0);
+
+ // For initialization steps that start with a single initializer,
+ // grab the only argument out the Args and place it into the "current"
+ // initializer.
+ switch (Steps.front().Kind) {
+ case SK_ResolveAddressOfOverloadedFunction:
+ case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBaseXValue:
+ case SK_CastDerivedToBaseLValue:
+ case SK_BindReference:
+ case SK_BindReferenceToTemporary:
+ case SK_ExtraneousCopyToTemporary:
+ case SK_UserConversion:
+ case SK_QualificationConversionLValue:
+ case SK_QualificationConversionXValue:
+ case SK_QualificationConversionRValue:
+ case SK_ConversionSequence:
+ case SK_ListConstructorCall:
+ case SK_ListInitialization:
+ case SK_UnwrapInitList:
+ case SK_RewrapInitList:
+ case SK_CAssignment:
+ case SK_StringInit:
+ case SK_ObjCObjectConversion:
+ case SK_ArrayInit:
+ case SK_ParenthesizedArrayInit:
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ case SK_ProduceObjCObject:
+ case SK_StdInitializerList: {
+ assert(Args.size() == 1);
+ CurInit = Args.get()[0];
+ if (!CurInit.get()) return ExprError();
+ break;
+ }
+
+ case SK_ConstructorInitialization:
+ case SK_ZeroInitialization:
+ break;
+ }
+
+ // Walk through the computed steps for the initialization sequence,
+ // performing the specified conversions along the way.
+ bool ConstructorInitRequiresZeroInit = false;
+ for (step_iterator Step = step_begin(), StepEnd = step_end();
+ Step != StepEnd; ++Step) {
+ if (CurInit.isInvalid())
+ return ExprError();
+
+ QualType SourceType = CurInit.get() ? CurInit.get()->getType() : QualType();
+
+ switch (Step->Kind) {
+ case SK_ResolveAddressOfOverloadedFunction:
+ // Overload resolution determined which function invoke; update the
+ // initializer to reflect that choice.
+ S.CheckAddressOfMemberAccess(CurInit.get(), Step->Function.FoundDecl);
+ S.DiagnoseUseOfDecl(Step->Function.FoundDecl, Kind.getLocation());
+ CurInit = S.FixOverloadedFunctionReference(move(CurInit),
+ Step->Function.FoundDecl,
+ Step->Function.Function);
+ break;
+
+ case SK_CastDerivedToBaseRValue:
+ case SK_CastDerivedToBaseXValue:
+ case SK_CastDerivedToBaseLValue: {
+ // We have a derived-to-base cast that produces either an rvalue or an
+ // lvalue. Perform that cast.
+
+ CXXCastPath BasePath;
+
+ // Casts to inaccessible base classes are allowed with C-style casts.
+ bool IgnoreBaseAccess = Kind.isCStyleOrFunctionalCast();
+ if (S.CheckDerivedToBaseConversion(SourceType, Step->Type,
+ CurInit.get()->getLocStart(),
+ CurInit.get()->getSourceRange(),
+ &BasePath, IgnoreBaseAccess))
+ return ExprError();
+
+ if (S.BasePathInvolvesVirtualBase(BasePath)) {
+ QualType T = SourceType;
+ if (const PointerType *Pointer = T->getAs<PointerType>())
+ T = Pointer->getPointeeType();
+ if (const RecordType *RecordTy = T->getAs<RecordType>())
+ S.MarkVTableUsed(CurInit.get()->getLocStart(),
+ cast<CXXRecordDecl>(RecordTy->getDecl()));
+ }
+
+ ExprValueKind VK =
+ Step->Kind == SK_CastDerivedToBaseLValue ?
+ VK_LValue :
+ (Step->Kind == SK_CastDerivedToBaseXValue ?
+ VK_XValue :
+ VK_RValue);
+ CurInit = S.Owned(ImplicitCastExpr::Create(S.Context,
+ Step->Type,
+ CK_DerivedToBase,
+ CurInit.get(),
+ &BasePath, VK));
+ break;
+ }
+
+ case SK_BindReference:
+ if (FieldDecl *BitField = CurInit.get()->getBitField()) {
+ // References cannot bind to bit fields (C++ [dcl.init.ref]p5).
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_to_bitfield)
+ << Entity.getType().isVolatileQualified()
+ << BitField->getDeclName()
+ << CurInit.get()->getSourceRange();
+ S.Diag(BitField->getLocation(), diag::note_bitfield_decl);
+ return ExprError();
+ }
+
+ if (CurInit.get()->refersToVectorElement()) {
+ // References cannot bind to vector elements.
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_to_vector_element)
+ << Entity.getType().isVolatileQualified()
+ << CurInit.get()->getSourceRange();
+ PrintInitLocationNote(S, Entity);
+ return ExprError();
+ }
+
+ // Reference binding does not have any corresponding ASTs.
+
+ // Check exception specifications
+ if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType))
+ return ExprError();
+
+ break;
+
+ case SK_BindReferenceToTemporary:
+ // Check exception specifications
+ if (S.CheckExceptionSpecCompatibility(CurInit.get(), DestType))
+ return ExprError();
+
+ // Materialize the temporary into memory.
+ CurInit = new (S.Context) MaterializeTemporaryExpr(
+ Entity.getType().getNonReferenceType(),
+ CurInit.get(),
+ Entity.getType()->isLValueReferenceType());
+
+ // If we're binding to an Objective-C object that has lifetime, we
+ // need cleanups.
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ CurInit.get()->getType()->isObjCLifetimeType())
+ S.ExprNeedsCleanups = true;
+
+ break;
+
+ case SK_ExtraneousCopyToTemporary:
+ CurInit = CopyObject(S, Step->Type, Entity, move(CurInit),
+ /*IsExtraneousCopy=*/true);
+ break;
+
+ case SK_UserConversion: {
+ // We have a user-defined conversion that invokes either a constructor
+ // or a conversion function.
+ CastKind CastKind;
+ bool IsCopy = false;
+ FunctionDecl *Fn = Step->Function.Function;
+ DeclAccessPair FoundFn = Step->Function.FoundDecl;
+ bool HadMultipleCandidates = Step->Function.HadMultipleCandidates;
+ bool CreatedObject = false;
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Fn)) {
+ // Build a call to the selected constructor.
+ ASTOwningVector<Expr*> ConstructorArgs(S);
+ SourceLocation Loc = CurInit.get()->getLocStart();
+ CurInit.release(); // Ownership transferred into MultiExprArg, below.
+
+ // Determine the arguments required to actually perform the constructor
+ // call.
+ Expr *Arg = CurInit.get();
+ if (S.CompleteConstructorCall(Constructor,
+ MultiExprArg(&Arg, 1),
+ Loc, ConstructorArgs))
+ return ExprError();
+
+ // Build an expression that constructs a temporary.
+ CurInit = S.BuildCXXConstructExpr(Loc, Step->Type, Constructor,
+ move_arg(ConstructorArgs),
+ HadMultipleCandidates,
+ /*ZeroInit*/ false,
+ CXXConstructExpr::CK_Complete,
+ SourceRange());
+ if (CurInit.isInvalid())
+ return ExprError();
+
+ S.CheckConstructorAccess(Kind.getLocation(), Constructor, Entity,
+ FoundFn.getAccess());
+ S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation());
+
+ CastKind = CK_ConstructorConversion;
+ QualType Class = S.Context.getTypeDeclType(Constructor->getParent());
+ if (S.Context.hasSameUnqualifiedType(SourceType, Class) ||
+ S.IsDerivedFrom(SourceType, Class))
+ IsCopy = true;
+
+ CreatedObject = true;
+ } else {
+ // Build a call to the conversion function.
+ CXXConversionDecl *Conversion = cast<CXXConversionDecl>(Fn);
+ S.CheckMemberOperatorAccess(Kind.getLocation(), CurInit.get(), 0,
+ FoundFn);
+ S.DiagnoseUseOfDecl(FoundFn, Kind.getLocation());
+
+ // FIXME: Should we move this initialization into a separate
+ // derived-to-base conversion? I believe the answer is "no", because
+ // we don't want to turn off access control here for c-style casts.
+ ExprResult CurInitExprRes =
+ S.PerformObjectArgumentInitialization(CurInit.take(), /*Qualifier=*/0,
+ FoundFn, Conversion);
+ if(CurInitExprRes.isInvalid())
+ return ExprError();
+ CurInit = move(CurInitExprRes);
+
+ // Build the actual call to the conversion function.
+ CurInit = S.BuildCXXMemberCallExpr(CurInit.get(), FoundFn, Conversion,
+ HadMultipleCandidates);
+ if (CurInit.isInvalid() || !CurInit.get())
+ return ExprError();
+
+ CastKind = CK_UserDefinedConversion;
+
+ CreatedObject = Conversion->getResultType()->isRecordType();
+ }
+
+ bool RequiresCopy = !IsCopy && !isReferenceBinding(Steps.back());
+ bool MaybeBindToTemp = RequiresCopy || shouldBindAsTemporary(Entity);
+
+ if (!MaybeBindToTemp && CreatedObject && shouldDestroyTemporary(Entity)) {
+ QualType T = CurInit.get()->getType();
+ if (const RecordType *Record = T->getAs<RecordType>()) {
+ CXXDestructorDecl *Destructor
+ = S.LookupDestructor(cast<CXXRecordDecl>(Record->getDecl()));
+ S.CheckDestructorAccess(CurInit.get()->getLocStart(), Destructor,
+ S.PDiag(diag::err_access_dtor_temp) << T);
+ S.MarkFunctionReferenced(CurInit.get()->getLocStart(), Destructor);
+ S.DiagnoseUseOfDecl(Destructor, CurInit.get()->getLocStart());
+ }
+ }
+
+ CurInit = S.Owned(ImplicitCastExpr::Create(S.Context,
+ CurInit.get()->getType(),
+ CastKind, CurInit.get(), 0,
+ CurInit.get()->getValueKind()));
+ if (MaybeBindToTemp)
+ CurInit = S.MaybeBindToTemporary(CurInit.takeAs<Expr>());
+ if (RequiresCopy)
+ CurInit = CopyObject(S, Entity.getType().getNonReferenceType(), Entity,
+ move(CurInit), /*IsExtraneousCopy=*/false);
+ break;
+ }
+
+ case SK_QualificationConversionLValue:
+ case SK_QualificationConversionXValue:
+ case SK_QualificationConversionRValue: {
+ // Perform a qualification conversion; these can never go wrong.
+ ExprValueKind VK =
+ Step->Kind == SK_QualificationConversionLValue ?
+ VK_LValue :
+ (Step->Kind == SK_QualificationConversionXValue ?
+ VK_XValue :
+ VK_RValue);
+ CurInit = S.ImpCastExprToType(CurInit.take(), Step->Type, CK_NoOp, VK);
+ break;
+ }
+
+ case SK_ConversionSequence: {
+ Sema::CheckedConversionKind CCK
+ = Kind.isCStyleCast()? Sema::CCK_CStyleCast
+ : Kind.isFunctionalCast()? Sema::CCK_FunctionalCast
+ : Kind.isExplicitCast()? Sema::CCK_OtherCast
+ : Sema::CCK_ImplicitConversion;
+ ExprResult CurInitExprRes =
+ S.PerformImplicitConversion(CurInit.get(), Step->Type, *Step->ICS,
+ getAssignmentAction(Entity), CCK);
+ if (CurInitExprRes.isInvalid())
+ return ExprError();
+ CurInit = move(CurInitExprRes);
+ break;
+ }
+
+ case SK_ListInitialization: {
+ InitListExpr *InitList = cast<InitListExpr>(CurInit.get());
+ // Hack: We must pass *ResultType if available in order to set the type
+ // of arrays, e.g. in 'int ar[] = {1, 2, 3};'.
+ // But in 'const X &x = {1, 2, 3};' we're supposed to initialize a
+ // temporary, not a reference, so we should pass Ty.
+ // Worst case: 'const int (&arref)[] = {1, 2, 3};'.
+ // Since this step is never used for a reference directly, we explicitly
+ // unwrap references here and rewrap them afterwards.
+ // We also need to create a InitializeTemporary entity for this.
+ QualType Ty = ResultType ? ResultType->getNonReferenceType() : Step->Type;
+ bool IsTemporary = Entity.getType()->isReferenceType();
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(Ty);
+ InitListChecker PerformInitList(S, IsTemporary ? TempEntity : Entity,
+ InitList, Ty, /*VerifyOnly=*/false,
+ Kind.getKind() != InitializationKind::IK_DirectList ||
+ !S.getLangOpts().CPlusPlus0x);
+ if (PerformInitList.HadError())
+ return ExprError();
+
+ if (ResultType) {
+ if ((*ResultType)->isRValueReferenceType())
+ Ty = S.Context.getRValueReferenceType(Ty);
+ else if ((*ResultType)->isLValueReferenceType())
+ Ty = S.Context.getLValueReferenceType(Ty,
+ (*ResultType)->getAs<LValueReferenceType>()->isSpelledAsLValue());
+ *ResultType = Ty;
+ }
+
+ InitListExpr *StructuredInitList =
+ PerformInitList.getFullyStructuredList();
+ CurInit.release();
+ CurInit = S.Owned(StructuredInitList);
+ break;
+ }
+
+ case SK_ListConstructorCall: {
+ // When an initializer list is passed for a parameter of type "reference
+ // to object", we don't get an EK_Temporary entity, but instead an
+ // EK_Parameter entity with reference type.
+ // FIXME: This is a hack. What we really should do is create a user
+ // conversion step for this case, but this makes it considerably more
+ // complicated. For now, this will do.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(
+ Entity.getType().getNonReferenceType());
+ bool UseTemporary = Entity.getType()->isReferenceType();
+ InitListExpr *InitList = cast<InitListExpr>(CurInit.get());
+ MultiExprArg Arg(InitList->getInits(), InitList->getNumInits());
+ CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity :
+ Entity,
+ Kind, move(Arg), *Step,
+ ConstructorInitRequiresZeroInit);
+ break;
+ }
+
+ case SK_UnwrapInitList:
+ CurInit = S.Owned(cast<InitListExpr>(CurInit.take())->getInit(0));
+ break;
+
+ case SK_RewrapInitList: {
+ Expr *E = CurInit.take();
+ InitListExpr *Syntactic = Step->WrappingSyntacticList;
+ InitListExpr *ILE = new (S.Context) InitListExpr(S.Context,
+ Syntactic->getLBraceLoc(), &E, 1, Syntactic->getRBraceLoc());
+ ILE->setSyntacticForm(Syntactic);
+ ILE->setType(E->getType());
+ ILE->setValueKind(E->getValueKind());
+ CurInit = S.Owned(ILE);
+ break;
+ }
+
+ case SK_ConstructorInitialization: {
+ // When an initializer list is passed for a parameter of type "reference
+ // to object", we don't get an EK_Temporary entity, but instead an
+ // EK_Parameter entity with reference type.
+ // FIXME: This is a hack. What we really should do is create a user
+ // conversion step for this case, but this makes it considerably more
+ // complicated. For now, this will do.
+ InitializedEntity TempEntity = InitializedEntity::InitializeTemporary(
+ Entity.getType().getNonReferenceType());
+ bool UseTemporary = Entity.getType()->isReferenceType();
+ CurInit = PerformConstructorInitialization(S, UseTemporary ? TempEntity
+ : Entity,
+ Kind, move(Args), *Step,
+ ConstructorInitRequiresZeroInit);
+ break;
+ }
+
+ case SK_ZeroInitialization: {
+ step_iterator NextStep = Step;
+ ++NextStep;
+ if (NextStep != StepEnd &&
+ NextStep->Kind == SK_ConstructorInitialization) {
+ // The need for zero-initialization is recorded directly into
+ // the call to the object's constructor within the next step.
+ ConstructorInitRequiresZeroInit = true;
+ } else if (Kind.getKind() == InitializationKind::IK_Value &&
+ S.getLangOpts().CPlusPlus &&
+ !Kind.isImplicitValueInit()) {
+ TypeSourceInfo *TSInfo = Entity.getTypeSourceInfo();
+ if (!TSInfo)
+ TSInfo = S.Context.getTrivialTypeSourceInfo(Step->Type,
+ Kind.getRange().getBegin());
+
+ CurInit = S.Owned(new (S.Context) CXXScalarValueInitExpr(
+ TSInfo->getType().getNonLValueExprType(S.Context),
+ TSInfo,
+ Kind.getRange().getEnd()));
+ } else {
+ CurInit = S.Owned(new (S.Context) ImplicitValueInitExpr(Step->Type));
+ }
+ break;
+ }
+
+ case SK_CAssignment: {
+ QualType SourceType = CurInit.get()->getType();
+ ExprResult Result = move(CurInit);
+ Sema::AssignConvertType ConvTy =
+ S.CheckSingleAssignmentConstraints(Step->Type, Result);
+ if (Result.isInvalid())
+ return ExprError();
+ CurInit = move(Result);
+
+ // If this is a call, allow conversion to a transparent union.
+ ExprResult CurInitExprRes = move(CurInit);
+ if (ConvTy != Sema::Compatible &&
+ Entity.getKind() == InitializedEntity::EK_Parameter &&
+ S.CheckTransparentUnionArgumentConstraints(Step->Type, CurInitExprRes)
+ == Sema::Compatible)
+ ConvTy = Sema::Compatible;
+ if (CurInitExprRes.isInvalid())
+ return ExprError();
+ CurInit = move(CurInitExprRes);
+
+ bool Complained;
+ if (S.DiagnoseAssignmentResult(ConvTy, Kind.getLocation(),
+ Step->Type, SourceType,
+ CurInit.get(),
+ getAssignmentAction(Entity),
+ &Complained)) {
+ PrintInitLocationNote(S, Entity);
+ return ExprError();
+ } else if (Complained)
+ PrintInitLocationNote(S, Entity);
+ break;
+ }
+
+ case SK_StringInit: {
+ QualType Ty = Step->Type;
+ CheckStringInit(CurInit.get(), ResultType ? *ResultType : Ty,
+ S.Context.getAsArrayType(Ty), S);
+ break;
+ }
+
+ case SK_ObjCObjectConversion:
+ CurInit = S.ImpCastExprToType(CurInit.take(), Step->Type,
+ CK_ObjCObjectLValueCast,
+ CurInit.get()->getValueKind());
+ break;
+
+ case SK_ArrayInit:
+ // Okay: we checked everything before creating this step. Note that
+ // this is a GNU extension.
+ S.Diag(Kind.getLocation(), diag::ext_array_init_copy)
+ << Step->Type << CurInit.get()->getType()
+ << CurInit.get()->getSourceRange();
+
+ // If the destination type is an incomplete array type, update the
+ // type accordingly.
+ if (ResultType) {
+ if (const IncompleteArrayType *IncompleteDest
+ = S.Context.getAsIncompleteArrayType(Step->Type)) {
+ if (const ConstantArrayType *ConstantSource
+ = S.Context.getAsConstantArrayType(CurInit.get()->getType())) {
+ *ResultType = S.Context.getConstantArrayType(
+ IncompleteDest->getElementType(),
+ ConstantSource->getSize(),
+ ArrayType::Normal, 0);
+ }
+ }
+ }
+ break;
+
+ case SK_ParenthesizedArrayInit:
+ // Okay: we checked everything before creating this step. Note that
+ // this is a GNU extension.
+ S.Diag(Kind.getLocation(), diag::ext_array_init_parens)
+ << CurInit.get()->getSourceRange();
+ break;
+
+ case SK_PassByIndirectCopyRestore:
+ case SK_PassByIndirectRestore:
+ checkIndirectCopyRestoreSource(S, CurInit.get());
+ CurInit = S.Owned(new (S.Context)
+ ObjCIndirectCopyRestoreExpr(CurInit.take(), Step->Type,
+ Step->Kind == SK_PassByIndirectCopyRestore));
+ break;
+
+ case SK_ProduceObjCObject:
+ CurInit = S.Owned(ImplicitCastExpr::Create(S.Context, Step->Type,
+ CK_ARCProduceObject,
+ CurInit.take(), 0, VK_RValue));
+ break;
+
+ case SK_StdInitializerList: {
+ QualType Dest = Step->Type;
+ QualType E;
+ bool Success = S.isStdInitializerList(Dest, &E);
+ (void)Success;
+ assert(Success && "Destination type changed?");
+
+ // If the element type has a destructor, check it.
+ if (CXXRecordDecl *RD = E->getAsCXXRecordDecl()) {
+ if (!RD->hasIrrelevantDestructor()) {
+ if (CXXDestructorDecl *Destructor = S.LookupDestructor(RD)) {
+ S.MarkFunctionReferenced(Kind.getLocation(), Destructor);
+ S.CheckDestructorAccess(Kind.getLocation(), Destructor,
+ S.PDiag(diag::err_access_dtor_temp) << E);
+ S.DiagnoseUseOfDecl(Destructor, Kind.getLocation());
+ }
+ }
+ }
+
+ InitListExpr *ILE = cast<InitListExpr>(CurInit.take());
+ unsigned NumInits = ILE->getNumInits();
+ SmallVector<Expr*, 16> Converted(NumInits);
+ InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
+ S.Context.getConstantArrayType(E,
+ llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ NumInits),
+ ArrayType::Normal, 0));
+ InitializedEntity Element =InitializedEntity::InitializeElement(S.Context,
+ 0, HiddenArray);
+ for (unsigned i = 0; i < NumInits; ++i) {
+ Element.setElementIndex(i);
+ ExprResult Init = S.Owned(ILE->getInit(i));
+ ExprResult Res = S.PerformCopyInitialization(Element,
+ Init.get()->getExprLoc(),
+ Init);
+ assert(!Res.isInvalid() && "Result changed since try phase.");
+ Converted[i] = Res.take();
+ }
+ InitListExpr *Semantic = new (S.Context)
+ InitListExpr(S.Context, ILE->getLBraceLoc(),
+ Converted.data(), NumInits, ILE->getRBraceLoc());
+ Semantic->setSyntacticForm(ILE);
+ Semantic->setType(Dest);
+ Semantic->setInitializesStdInitializerList();
+ CurInit = S.Owned(Semantic);
+ break;
+ }
+ }
+ }
+
+ // Diagnose non-fatal problems with the completed initialization.
+ if (Entity.getKind() == InitializedEntity::EK_Member &&
+ cast<FieldDecl>(Entity.getDecl())->isBitField())
+ S.CheckBitFieldInitialization(Kind.getLocation(),
+ cast<FieldDecl>(Entity.getDecl()),
+ CurInit.get());
+
+ return move(CurInit);
+}
+
+//===----------------------------------------------------------------------===//
+// Diagnose initialization failures
+//===----------------------------------------------------------------------===//
+bool InitializationSequence::Diagnose(Sema &S,
+ const InitializedEntity &Entity,
+ const InitializationKind &Kind,
+ Expr **Args, unsigned NumArgs) {
+ if (!Failed())
+ return false;
+
+ QualType DestType = Entity.getType();
+ switch (Failure) {
+ case FK_TooManyInitsForReference:
+ // FIXME: Customize for the initialized entity?
+ if (NumArgs == 0)
+ S.Diag(Kind.getLocation(), diag::err_reference_without_init)
+ << DestType.getNonReferenceType();
+ else // FIXME: diagnostic below could be better!
+ S.Diag(Kind.getLocation(), diag::err_reference_has_multiple_inits)
+ << SourceRange(Args[0]->getLocStart(), Args[NumArgs - 1]->getLocEnd());
+ break;
+
+ case FK_ArrayNeedsInitList:
+ case FK_ArrayNeedsInitListOrStringLiteral:
+ S.Diag(Kind.getLocation(), diag::err_array_init_not_init_list)
+ << (Failure == FK_ArrayNeedsInitListOrStringLiteral);
+ break;
+
+ case FK_ArrayTypeMismatch:
+ case FK_NonConstantArrayInit:
+ S.Diag(Kind.getLocation(),
+ (Failure == FK_ArrayTypeMismatch
+ ? diag::err_array_init_different_type
+ : diag::err_array_init_non_constant_array))
+ << DestType.getNonReferenceType()
+ << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_VariableLengthArrayHasInitializer:
+ S.Diag(Kind.getLocation(), diag::err_variable_object_no_init)
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_AddressOfOverloadFailed: {
+ DeclAccessPair Found;
+ S.ResolveAddressOfOverloadedFunction(Args[0],
+ DestType.getNonReferenceType(),
+ true,
+ Found);
+ break;
+ }
+
+ case FK_ReferenceInitOverloadFailed:
+ case FK_UserConversionOverloadFailed:
+ switch (FailedOverloadResult) {
+ case OR_Ambiguous:
+ if (Failure == FK_UserConversionOverloadFailed)
+ S.Diag(Kind.getLocation(), diag::err_typecheck_ambiguous_condition)
+ << Args[0]->getType() << DestType
+ << Args[0]->getSourceRange();
+ else
+ S.Diag(Kind.getLocation(), diag::err_ref_init_ambiguous)
+ << DestType << Args[0]->getType()
+ << Args[0]->getSourceRange();
+
+ FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+
+ case OR_No_Viable_Function:
+ S.Diag(Kind.getLocation(), diag::err_typecheck_nonviable_condition)
+ << Args[0]->getType() << DestType.getNonReferenceType()
+ << Args[0]->getSourceRange();
+ FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+
+ case OR_Deleted: {
+ S.Diag(Kind.getLocation(), diag::err_typecheck_deleted_function)
+ << Args[0]->getType() << DestType.getNonReferenceType()
+ << Args[0]->getSourceRange();
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult Ovl
+ = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best,
+ true);
+ if (Ovl == OR_Deleted) {
+ S.NoteDeletedFunction(Best->Function);
+ } else {
+ llvm_unreachable("Inconsistent overload resolution?");
+ }
+ break;
+ }
+
+ case OR_Success:
+ llvm_unreachable("Conversion did not fail!");
+ }
+ break;
+
+ case FK_NonConstLValueReferenceBindingToTemporary:
+ if (isa<InitListExpr>(Args[0])) {
+ S.Diag(Kind.getLocation(),
+ diag::err_lvalue_reference_bind_to_initlist)
+ << DestType.getNonReferenceType().isVolatileQualified()
+ << DestType.getNonReferenceType()
+ << Args[0]->getSourceRange();
+ break;
+ }
+ // Intentional fallthrough
+
+ case FK_NonConstLValueReferenceBindingToUnrelated:
+ S.Diag(Kind.getLocation(),
+ Failure == FK_NonConstLValueReferenceBindingToTemporary
+ ? diag::err_lvalue_reference_bind_to_temporary
+ : diag::err_lvalue_reference_bind_to_unrelated)
+ << DestType.getNonReferenceType().isVolatileQualified()
+ << DestType.getNonReferenceType()
+ << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_RValueReferenceBindingToLValue:
+ S.Diag(Kind.getLocation(), diag::err_lvalue_to_rvalue_ref)
+ << DestType.getNonReferenceType() << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_ReferenceInitDropsQualifiers:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_drops_quals)
+ << DestType.getNonReferenceType()
+ << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ break;
+
+ case FK_ReferenceInitFailed:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_failed)
+ << DestType.getNonReferenceType()
+ << Args[0]->isLValue()
+ << Args[0]->getType()
+ << Args[0]->getSourceRange();
+ if (DestType.getNonReferenceType()->isObjCObjectPointerType() &&
+ Args[0]->getType()->isObjCObjectPointerType())
+ S.EmitRelatedResultTypeNote(Args[0]);
+ break;
+
+ case FK_ConversionFailed: {
+ QualType FromType = Args[0]->getType();
+ PartialDiagnostic PDiag = S.PDiag(diag::err_init_conversion_failed)
+ << (int)Entity.getKind()
+ << DestType
+ << Args[0]->isLValue()
+ << FromType
+ << Args[0]->getSourceRange();
+ S.HandleFunctionTypeMismatch(PDiag, FromType, DestType);
+ S.Diag(Kind.getLocation(), PDiag);
+ if (DestType.getNonReferenceType()->isObjCObjectPointerType() &&
+ Args[0]->getType()->isObjCObjectPointerType())
+ S.EmitRelatedResultTypeNote(Args[0]);
+ break;
+ }
+
+ case FK_ConversionFromPropertyFailed:
+ // No-op. This error has already been reported.
+ break;
+
+ case FK_TooManyInitsForScalar: {
+ SourceRange R;
+
+ if (InitListExpr *InitList = dyn_cast<InitListExpr>(Args[0]))
+ R = SourceRange(InitList->getInit(0)->getLocEnd(),
+ InitList->getLocEnd());
+ else
+ R = SourceRange(Args[0]->getLocEnd(), Args[NumArgs - 1]->getLocEnd());
+
+ R.setBegin(S.PP.getLocForEndOfToken(R.getBegin()));
+ if (Kind.isCStyleOrFunctionalCast())
+ S.Diag(Kind.getLocation(), diag::err_builtin_func_cast_more_than_one_arg)
+ << R;
+ else
+ S.Diag(Kind.getLocation(), diag::err_excess_initializers)
+ << /*scalar=*/2 << R;
+ break;
+ }
+
+ case FK_ReferenceBindingToInitList:
+ S.Diag(Kind.getLocation(), diag::err_reference_bind_init_list)
+ << DestType.getNonReferenceType() << Args[0]->getSourceRange();
+ break;
+
+ case FK_InitListBadDestinationType:
+ S.Diag(Kind.getLocation(), diag::err_init_list_bad_dest_type)
+ << (DestType->isRecordType()) << DestType << Args[0]->getSourceRange();
+ break;
+
+ case FK_ListConstructorOverloadFailed:
+ case FK_ConstructorOverloadFailed: {
+ SourceRange ArgsRange;
+ if (NumArgs)
+ ArgsRange = SourceRange(Args[0]->getLocStart(),
+ Args[NumArgs - 1]->getLocEnd());
+
+ if (Failure == FK_ListConstructorOverloadFailed) {
+ assert(NumArgs == 1 && "List construction from other than 1 argument.");
+ InitListExpr *InitList = cast<InitListExpr>(Args[0]);
+ Args = InitList->getInits();
+ NumArgs = InitList->getNumInits();
+ }
+
+ // FIXME: Using "DestType" for the entity we're printing is probably
+ // bad.
+ switch (FailedOverloadResult) {
+ case OR_Ambiguous:
+ S.Diag(Kind.getLocation(), diag::err_ovl_ambiguous_init)
+ << DestType << ArgsRange;
+ FailedCandidateSet.NoteCandidates(S, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+
+ case OR_No_Viable_Function:
+ if (Kind.getKind() == InitializationKind::IK_Default &&
+ (Entity.getKind() == InitializedEntity::EK_Base ||
+ Entity.getKind() == InitializedEntity::EK_Member) &&
+ isa<CXXConstructorDecl>(S.CurContext)) {
+ // This is implicit default initialization of a member or
+ // base within a constructor. If no viable function was
+ // found, notify the user that she needs to explicitly
+ // initialize this base/member.
+ CXXConstructorDecl *Constructor
+ = cast<CXXConstructorDecl>(S.CurContext);
+ if (Entity.getKind() == InitializedEntity::EK_Base) {
+ S.Diag(Kind.getLocation(), diag::err_missing_default_ctor)
+ << Constructor->isImplicit()
+ << S.Context.getTypeDeclType(Constructor->getParent())
+ << /*base=*/0
+ << Entity.getType();
+
+ RecordDecl *BaseDecl
+ = Entity.getBaseSpecifier()->getType()->getAs<RecordType>()
+ ->getDecl();
+ S.Diag(BaseDecl->getLocation(), diag::note_previous_decl)
+ << S.Context.getTagDeclType(BaseDecl);
+ } else {
+ S.Diag(Kind.getLocation(), diag::err_missing_default_ctor)
+ << Constructor->isImplicit()
+ << S.Context.getTypeDeclType(Constructor->getParent())
+ << /*member=*/1
+ << Entity.getName();
+ S.Diag(Entity.getDecl()->getLocation(), diag::note_field_decl);
+
+ if (const RecordType *Record
+ = Entity.getType()->getAs<RecordType>())
+ S.Diag(Record->getDecl()->getLocation(),
+ diag::note_previous_decl)
+ << S.Context.getTagDeclType(Record->getDecl());
+ }
+ break;
+ }
+
+ S.Diag(Kind.getLocation(), diag::err_ovl_no_viable_function_in_init)
+ << DestType << ArgsRange;
+ FailedCandidateSet.NoteCandidates(S, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+
+ case OR_Deleted: {
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult Ovl
+ = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
+ if (Ovl != OR_Deleted) {
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
+ << true << DestType << ArgsRange;
+ llvm_unreachable("Inconsistent overload resolution?");
+ break;
+ }
+
+ // If this is a defaulted or implicitly-declared function, then
+ // it was implicitly deleted. Make it clear that the deletion was
+ // implicit.
+ if (S.isImplicitlyDeleted(Best->Function))
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_special_init)
+ << S.getSpecialMember(cast<CXXMethodDecl>(Best->Function))
+ << DestType << ArgsRange;
+ else
+ S.Diag(Kind.getLocation(), diag::err_ovl_deleted_init)
+ << true << DestType << ArgsRange;
+
+ S.NoteDeletedFunction(Best->Function);
+ break;
+ }
+
+ case OR_Success:
+ llvm_unreachable("Conversion did not fail!");
+ }
+ }
+ break;
+
+ case FK_DefaultInitOfConst:
+ if (Entity.getKind() == InitializedEntity::EK_Member &&
+ isa<CXXConstructorDecl>(S.CurContext)) {
+ // This is implicit default-initialization of a const member in
+ // a constructor. Complain that it needs to be explicitly
+ // initialized.
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(S.CurContext);
+ S.Diag(Kind.getLocation(), diag::err_uninitialized_member_in_ctor)
+ << Constructor->isImplicit()
+ << S.Context.getTypeDeclType(Constructor->getParent())
+ << /*const=*/1
+ << Entity.getName();
+ S.Diag(Entity.getDecl()->getLocation(), diag::note_previous_decl)
+ << Entity.getName();
+ } else {
+ S.Diag(Kind.getLocation(), diag::err_default_init_const)
+ << DestType << (bool)DestType->getAs<RecordType>();
+ }
+ break;
+
+ case FK_Incomplete:
+ S.RequireCompleteType(Kind.getLocation(), FailedIncompleteType,
+ diag::err_init_incomplete_type);
+ break;
+
+ case FK_ListInitializationFailed: {
+ // Run the init list checker again to emit diagnostics.
+ InitListExpr* InitList = cast<InitListExpr>(Args[0]);
+ QualType DestType = Entity.getType();
+ InitListChecker DiagnoseInitList(S, Entity, InitList,
+ DestType, /*VerifyOnly=*/false,
+ Kind.getKind() != InitializationKind::IK_DirectList ||
+ !S.getLangOpts().CPlusPlus0x);
+ assert(DiagnoseInitList.HadError() &&
+ "Inconsistent init list check result.");
+ break;
+ }
+
+ case FK_PlaceholderType: {
+ // FIXME: Already diagnosed!
+ break;
+ }
+
+ case FK_InitListElementCopyFailure: {
+ // Try to perform all copies again.
+ InitListExpr* InitList = cast<InitListExpr>(Args[0]);
+ unsigned NumInits = InitList->getNumInits();
+ QualType DestType = Entity.getType();
+ QualType E;
+ bool Success = S.isStdInitializerList(DestType, &E);
+ (void)Success;
+ assert(Success && "Where did the std::initializer_list go?");
+ InitializedEntity HiddenArray = InitializedEntity::InitializeTemporary(
+ S.Context.getConstantArrayType(E,
+ llvm::APInt(S.Context.getTypeSize(S.Context.getSizeType()),
+ NumInits),
+ ArrayType::Normal, 0));
+ InitializedEntity Element = InitializedEntity::InitializeElement(S.Context,
+ 0, HiddenArray);
+ // Show at most 3 errors. Otherwise, you'd get a lot of errors for errors
+ // where the init list type is wrong, e.g.
+ // std::initializer_list<void*> list = { 1, 2, 3, 4, 5, 6, 7, 8 };
+ // FIXME: Emit a note if we hit the limit?
+ int ErrorCount = 0;
+ for (unsigned i = 0; i < NumInits && ErrorCount < 3; ++i) {
+ Element.setElementIndex(i);
+ ExprResult Init = S.Owned(InitList->getInit(i));
+ if (S.PerformCopyInitialization(Element, Init.get()->getExprLoc(), Init)
+ .isInvalid())
+ ++ErrorCount;
+ }
+ break;
+ }
+
+ case FK_ExplicitConstructor: {
+ S.Diag(Kind.getLocation(), diag::err_selected_explicit_constructor)
+ << Args[0]->getSourceRange();
+ OverloadCandidateSet::iterator Best;
+ OverloadingResult Ovl
+ = FailedCandidateSet.BestViableFunction(S, Kind.getLocation(), Best);
+ (void)Ovl;
+ assert(Ovl == OR_Success && "Inconsistent overload resolution");
+ CXXConstructorDecl *CtorDecl = cast<CXXConstructorDecl>(Best->Function);
+ S.Diag(CtorDecl->getLocation(), diag::note_constructor_declared_here);
+ break;
+ }
+ }
+
+ PrintInitLocationNote(S, Entity);
+ return true;
+}
+
+void InitializationSequence::dump(raw_ostream &OS) const {
+ switch (SequenceKind) {
+ case FailedSequence: {
+ OS << "Failed sequence: ";
+ switch (Failure) {
+ case FK_TooManyInitsForReference:
+ OS << "too many initializers for reference";
+ break;
+
+ case FK_ArrayNeedsInitList:
+ OS << "array requires initializer list";
+ break;
+
+ case FK_ArrayNeedsInitListOrStringLiteral:
+ OS << "array requires initializer list or string literal";
+ break;
+
+ case FK_ArrayTypeMismatch:
+ OS << "array type mismatch";
+ break;
+
+ case FK_NonConstantArrayInit:
+ OS << "non-constant array initializer";
+ break;
+
+ case FK_AddressOfOverloadFailed:
+ OS << "address of overloaded function failed";
+ break;
+
+ case FK_ReferenceInitOverloadFailed:
+ OS << "overload resolution for reference initialization failed";
+ break;
+
+ case FK_NonConstLValueReferenceBindingToTemporary:
+ OS << "non-const lvalue reference bound to temporary";
+ break;
+
+ case FK_NonConstLValueReferenceBindingToUnrelated:
+ OS << "non-const lvalue reference bound to unrelated type";
+ break;
+
+ case FK_RValueReferenceBindingToLValue:
+ OS << "rvalue reference bound to an lvalue";
+ break;
+
+ case FK_ReferenceInitDropsQualifiers:
+ OS << "reference initialization drops qualifiers";
+ break;
+
+ case FK_ReferenceInitFailed:
+ OS << "reference initialization failed";
+ break;
+
+ case FK_ConversionFailed:
+ OS << "conversion failed";
+ break;
+
+ case FK_ConversionFromPropertyFailed:
+ OS << "conversion from property failed";
+ break;
+
+ case FK_TooManyInitsForScalar:
+ OS << "too many initializers for scalar";
+ break;
+
+ case FK_ReferenceBindingToInitList:
+ OS << "referencing binding to initializer list";
+ break;
+
+ case FK_InitListBadDestinationType:
+ OS << "initializer list for non-aggregate, non-scalar type";
+ break;
+
+ case FK_UserConversionOverloadFailed:
+ OS << "overloading failed for user-defined conversion";
+ break;
+
+ case FK_ConstructorOverloadFailed:
+ OS << "constructor overloading failed";
+ break;
+
+ case FK_DefaultInitOfConst:
+ OS << "default initialization of a const variable";
+ break;
+
+ case FK_Incomplete:
+ OS << "initialization of incomplete type";
+ break;
+
+ case FK_ListInitializationFailed:
+ OS << "list initialization checker failure";
+ break;
+
+ case FK_VariableLengthArrayHasInitializer:
+ OS << "variable length array has an initializer";
+ break;
+
+ case FK_PlaceholderType:
+ OS << "initializer expression isn't contextually valid";
+ break;
+
+ case FK_ListConstructorOverloadFailed:
+ OS << "list constructor overloading failed";
+ break;
+
+ case FK_InitListElementCopyFailure:
+ OS << "copy construction of initializer list element failed";
+ break;
+
+ case FK_ExplicitConstructor:
+ OS << "list copy initialization chose explicit constructor";
+ break;
+ }
+ OS << '\n';
+ return;
+ }
+
+ case DependentSequence:
+ OS << "Dependent sequence\n";
+ return;
+
+ case NormalSequence:
+ OS << "Normal sequence: ";
+ break;
+ }
+
+ for (step_iterator S = step_begin(), SEnd = step_end(); S != SEnd; ++S) {
+ if (S != step_begin()) {
+ OS << " -> ";
+ }
+
+ switch (S->Kind) {
+ case SK_ResolveAddressOfOverloadedFunction:
+ OS << "resolve address of overloaded function";
+ break;
+
+ case SK_CastDerivedToBaseRValue:
+ OS << "derived-to-base case (rvalue" << S->Type.getAsString() << ")";
+ break;
+
+ case SK_CastDerivedToBaseXValue:
+ OS << "derived-to-base case (xvalue" << S->Type.getAsString() << ")";
+ break;
+
+ case SK_CastDerivedToBaseLValue:
+ OS << "derived-to-base case (lvalue" << S->Type.getAsString() << ")";
+ break;
+
+ case SK_BindReference:
+ OS << "bind reference to lvalue";
+ break;
+
+ case SK_BindReferenceToTemporary:
+ OS << "bind reference to a temporary";
+ break;
+
+ case SK_ExtraneousCopyToTemporary:
+ OS << "extraneous C++03 copy to temporary";
+ break;
+
+ case SK_UserConversion:
+ OS << "user-defined conversion via " << *S->Function.Function;
+ break;
+
+ case SK_QualificationConversionRValue:
+ OS << "qualification conversion (rvalue)";
+ break;
+
+ case SK_QualificationConversionXValue:
+ OS << "qualification conversion (xvalue)";
+ break;
+
+ case SK_QualificationConversionLValue:
+ OS << "qualification conversion (lvalue)";
+ break;
+
+ case SK_ConversionSequence:
+ OS << "implicit conversion sequence (";
+ S->ICS->DebugPrint(); // FIXME: use OS
+ OS << ")";
+ break;
+
+ case SK_ListInitialization:
+ OS << "list aggregate initialization";
+ break;
+
+ case SK_ListConstructorCall:
+ OS << "list initialization via constructor";
+ break;
+
+ case SK_UnwrapInitList:
+ OS << "unwrap reference initializer list";
+ break;
+
+ case SK_RewrapInitList:
+ OS << "rewrap reference initializer list";
+ break;
+
+ case SK_ConstructorInitialization:
+ OS << "constructor initialization";
+ break;
+
+ case SK_ZeroInitialization:
+ OS << "zero initialization";
+ break;
+
+ case SK_CAssignment:
+ OS << "C assignment";
+ break;
+
+ case SK_StringInit:
+ OS << "string initialization";
+ break;
+
+ case SK_ObjCObjectConversion:
+ OS << "Objective-C object conversion";
+ break;
+
+ case SK_ArrayInit:
+ OS << "array initialization";
+ break;
+
+ case SK_ParenthesizedArrayInit:
+ OS << "parenthesized array initialization";
+ break;
+
+ case SK_PassByIndirectCopyRestore:
+ OS << "pass by indirect copy and restore";
+ break;
+
+ case SK_PassByIndirectRestore:
+ OS << "pass by indirect restore";
+ break;
+
+ case SK_ProduceObjCObject:
+ OS << "Objective-C object retension";
+ break;
+
+ case SK_StdInitializerList:
+ OS << "std::initializer_list from initializer list";
+ break;
+ }
+ }
+}
+
+void InitializationSequence::dump() const {
+ dump(llvm::errs());
+}
+
+static void DiagnoseNarrowingInInitList(Sema &S, InitializationSequence &Seq,
+ QualType EntityType,
+ const Expr *PreInit,
+ const Expr *PostInit) {
+ if (Seq.step_begin() == Seq.step_end() || PreInit->isValueDependent())
+ return;
+
+ // A narrowing conversion can only appear as the final implicit conversion in
+ // an initialization sequence.
+ const InitializationSequence::Step &LastStep = Seq.step_end()[-1];
+ if (LastStep.Kind != InitializationSequence::SK_ConversionSequence)
+ return;
+
+ const ImplicitConversionSequence &ICS = *LastStep.ICS;
+ const StandardConversionSequence *SCS = 0;
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion:
+ SCS = &ICS.Standard;
+ break;
+ case ImplicitConversionSequence::UserDefinedConversion:
+ SCS = &ICS.UserDefined.After;
+ break;
+ case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::EllipsisConversion:
+ case ImplicitConversionSequence::BadConversion:
+ return;
+ }
+
+ // Determine the type prior to the narrowing conversion. If a conversion
+ // operator was used, this may be different from both the type of the entity
+ // and of the pre-initialization expression.
+ QualType PreNarrowingType = PreInit->getType();
+ if (Seq.step_begin() + 1 != Seq.step_end())
+ PreNarrowingType = Seq.step_end()[-2].Type;
+
+ // C++11 [dcl.init.list]p7: Check whether this is a narrowing conversion.
+ APValue ConstantValue;
+ QualType ConstantType;
+ switch (SCS->getNarrowingKind(S.Context, PostInit, ConstantValue,
+ ConstantType)) {
+ case NK_Not_Narrowing:
+ // No narrowing occurred.
+ return;
+
+ case NK_Type_Narrowing:
+ // This was a floating-to-integer conversion, which is always considered a
+ // narrowing conversion even if the value is a constant and can be
+ // represented exactly as an integer.
+ S.Diag(PostInit->getLocStart(),
+ S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus0x?
+ diag::warn_init_list_type_narrowing
+ : S.isSFINAEContext()?
+ diag::err_init_list_type_narrowing_sfinae
+ : diag::err_init_list_type_narrowing)
+ << PostInit->getSourceRange()
+ << PreNarrowingType.getLocalUnqualifiedType()
+ << EntityType.getLocalUnqualifiedType();
+ break;
+
+ case NK_Constant_Narrowing:
+ // A constant value was narrowed.
+ S.Diag(PostInit->getLocStart(),
+ S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus0x?
+ diag::warn_init_list_constant_narrowing
+ : S.isSFINAEContext()?
+ diag::err_init_list_constant_narrowing_sfinae
+ : diag::err_init_list_constant_narrowing)
+ << PostInit->getSourceRange()
+ << ConstantValue.getAsString(S.getASTContext(), ConstantType)
+ << EntityType.getLocalUnqualifiedType();
+ break;
+
+ case NK_Variable_Narrowing:
+ // A variable's value may have been narrowed.
+ S.Diag(PostInit->getLocStart(),
+ S.getLangOpts().MicrosoftExt || !S.getLangOpts().CPlusPlus0x?
+ diag::warn_init_list_variable_narrowing
+ : S.isSFINAEContext()?
+ diag::err_init_list_variable_narrowing_sfinae
+ : diag::err_init_list_variable_narrowing)
+ << PostInit->getSourceRange()
+ << PreNarrowingType.getLocalUnqualifiedType()
+ << EntityType.getLocalUnqualifiedType();
+ break;
+ }
+
+ SmallString<128> StaticCast;
+ llvm::raw_svector_ostream OS(StaticCast);
+ OS << "static_cast<";
+ if (const TypedefType *TT = EntityType->getAs<TypedefType>()) {
+ // It's important to use the typedef's name if there is one so that the
+ // fixit doesn't break code using types like int64_t.
+ //
+ // FIXME: This will break if the typedef requires qualification. But
+ // getQualifiedNameAsString() includes non-machine-parsable components.
+ OS << *TT->getDecl();
+ } else if (const BuiltinType *BT = EntityType->getAs<BuiltinType>())
+ OS << BT->getName(S.getLangOpts());
+ else {
+ // Oops, we didn't find the actual type of the variable. Don't emit a fixit
+ // with a broken cast.
+ return;
+ }
+ OS << ">(";
+ S.Diag(PostInit->getLocStart(), diag::note_init_list_narrowing_override)
+ << PostInit->getSourceRange()
+ << FixItHint::CreateInsertion(PostInit->getLocStart(), OS.str())
+ << FixItHint::CreateInsertion(
+ S.getPreprocessor().getLocForEndOfToken(PostInit->getLocEnd()), ")");
+}
+
+//===----------------------------------------------------------------------===//
+// Initialization helper functions
+//===----------------------------------------------------------------------===//
+bool
+Sema::CanPerformCopyInitialization(const InitializedEntity &Entity,
+ ExprResult Init) {
+ if (Init.isInvalid())
+ return false;
+
+ Expr *InitE = Init.get();
+ assert(InitE && "No initialization expression");
+
+ InitializationKind Kind = InitializationKind::CreateCopy(SourceLocation(),
+ SourceLocation());
+ InitializationSequence Seq(*this, Entity, Kind, &InitE, 1);
+ return !Seq.Failed();
+}
+
+ExprResult
+Sema::PerformCopyInitialization(const InitializedEntity &Entity,
+ SourceLocation EqualLoc,
+ ExprResult Init,
+ bool TopLevelOfInitList,
+ bool AllowExplicit) {
+ if (Init.isInvalid())
+ return ExprError();
+
+ Expr *InitE = Init.get();
+ assert(InitE && "No initialization expression?");
+
+ if (EqualLoc.isInvalid())
+ EqualLoc = InitE->getLocStart();
+
+ InitializationKind Kind = InitializationKind::CreateCopy(InitE->getLocStart(),
+ EqualLoc,
+ AllowExplicit);
+ InitializationSequence Seq(*this, Entity, Kind, &InitE, 1);
+ Init.release();
+
+ ExprResult Result = Seq.Perform(*this, Entity, Kind, MultiExprArg(&InitE, 1));
+
+ if (!Result.isInvalid() && TopLevelOfInitList)
+ DiagnoseNarrowingInInitList(*this, Seq, Entity.getType(),
+ InitE, Result.get());
+
+ return Result;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
new file mode 100644
index 0000000..6ef8d88
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLambda.cpp
@@ -0,0 +1,820 @@
+//===--- SemaLambda.cpp - Semantic Analysis for C++11 Lambdas -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for C++ lambda expressions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/AST/ExprCXX.h"
+using namespace clang;
+using namespace sema;
+
+CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange,
+ bool KnownDependent) {
+ DeclContext *DC = CurContext;
+ while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
+ DC = DC->getParent();
+
+ // Start constructing the lambda class.
+ CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC,
+ IntroducerRange.getBegin(),
+ KnownDependent);
+ DC->addDecl(Class);
+
+ return Class;
+}
+
+/// \brief Determine whether the given context is or is enclosed in an inline
+/// function.
+static bool isInInlineFunction(const DeclContext *DC) {
+ while (!DC->isFileContext()) {
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(DC))
+ if (FD->isInlined())
+ return true;
+
+ DC = DC->getLexicalParent();
+ }
+
+ return false;
+}
+
+CXXMethodDecl *Sema::startLambdaDefinition(CXXRecordDecl *Class,
+ SourceRange IntroducerRange,
+ TypeSourceInfo *MethodType,
+ SourceLocation EndLoc,
+ llvm::ArrayRef<ParmVarDecl *> Params,
+ llvm::Optional<unsigned> ManglingNumber,
+ Decl *ContextDecl) {
+ // C++11 [expr.prim.lambda]p5:
+ // The closure type for a lambda-expression has a public inline function
+ // call operator (13.5.4) whose parameters and return type are described by
+ // the lambda-expression's parameter-declaration-clause and
+ // trailing-return-type respectively.
+ DeclarationName MethodName
+ = Context.DeclarationNames.getCXXOperatorName(OO_Call);
+ DeclarationNameLoc MethodNameLoc;
+ MethodNameLoc.CXXOperatorName.BeginOpNameLoc
+ = IntroducerRange.getBegin().getRawEncoding();
+ MethodNameLoc.CXXOperatorName.EndOpNameLoc
+ = IntroducerRange.getEnd().getRawEncoding();
+ CXXMethodDecl *Method
+ = CXXMethodDecl::Create(Context, Class, EndLoc,
+ DeclarationNameInfo(MethodName,
+ IntroducerRange.getBegin(),
+ MethodNameLoc),
+ MethodType->getType(), MethodType,
+ /*isStatic=*/false,
+ SC_None,
+ /*isInline=*/true,
+ /*isConstExpr=*/false,
+ EndLoc);
+ Method->setAccess(AS_public);
+
+ // Temporarily set the lexical declaration context to the current
+ // context, so that the Scope stack matches the lexical nesting.
+ Method->setLexicalDeclContext(CurContext);
+
+ // Add parameters.
+ if (!Params.empty()) {
+ Method->setParams(Params);
+ CheckParmsForFunctionDef(const_cast<ParmVarDecl **>(Params.begin()),
+ const_cast<ParmVarDecl **>(Params.end()),
+ /*CheckParameterNames=*/false);
+
+ for (CXXMethodDecl::param_iterator P = Method->param_begin(),
+ PEnd = Method->param_end();
+ P != PEnd; ++P)
+ (*P)->setOwningFunction(Method);
+ }
+
+ // If we don't already have a mangling number for this lambda expression,
+ // allocate one now.
+ if (!ManglingNumber) {
+ ContextDecl = ExprEvalContexts.back().LambdaContextDecl;
+
+ enum ContextKind {
+ Normal,
+ DefaultArgument,
+ DataMember,
+ StaticDataMember
+ } Kind = Normal;
+
+ // Default arguments of member function parameters that appear in a class
+ // definition, as well as the initializers of data members, receive special
+ // treatment. Identify them.
+ if (ContextDecl) {
+ if (ParmVarDecl *Param = dyn_cast<ParmVarDecl>(ContextDecl)) {
+ if (const DeclContext *LexicalDC
+ = Param->getDeclContext()->getLexicalParent())
+ if (LexicalDC->isRecord())
+ Kind = DefaultArgument;
+ } else if (VarDecl *Var = dyn_cast<VarDecl>(ContextDecl)) {
+ if (Var->getDeclContext()->isRecord())
+ Kind = StaticDataMember;
+ } else if (isa<FieldDecl>(ContextDecl)) {
+ Kind = DataMember;
+ }
+ }
+
+ switch (Kind) {
+ case Normal:
+ if (CurContext->isDependentContext() || isInInlineFunction(CurContext))
+ ManglingNumber = Context.getLambdaManglingNumber(Method);
+ else
+ ManglingNumber = 0;
+
+ // There is no special context for this lambda.
+ ContextDecl = 0;
+ break;
+
+ case StaticDataMember:
+ if (!CurContext->isDependentContext()) {
+ ManglingNumber = 0;
+ ContextDecl = 0;
+ break;
+ }
+ // Fall through to assign a mangling number.
+
+ case DataMember:
+ case DefaultArgument:
+ ManglingNumber = ExprEvalContexts.back().getLambdaMangleContext()
+ .getManglingNumber(Method);
+ break;
+ }
+ }
+
+ Class->setLambdaMangling(*ManglingNumber, ContextDecl);
+ return Method;
+}
+
+LambdaScopeInfo *Sema::enterLambdaScope(CXXMethodDecl *CallOperator,
+ SourceRange IntroducerRange,
+ LambdaCaptureDefault CaptureDefault,
+ bool ExplicitParams,
+ bool ExplicitResultType,
+ bool Mutable) {
+ PushLambdaScope(CallOperator->getParent(), CallOperator);
+ LambdaScopeInfo *LSI = getCurLambda();
+ if (CaptureDefault == LCD_ByCopy)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByval;
+ else if (CaptureDefault == LCD_ByRef)
+ LSI->ImpCaptureStyle = LambdaScopeInfo::ImpCap_LambdaByref;
+ LSI->IntroducerRange = IntroducerRange;
+ LSI->ExplicitParams = ExplicitParams;
+ LSI->Mutable = Mutable;
+
+ if (ExplicitResultType) {
+ LSI->ReturnType = CallOperator->getResultType();
+
+ if (!LSI->ReturnType->isDependentType() &&
+ !LSI->ReturnType->isVoidType()) {
+ if (RequireCompleteType(CallOperator->getLocStart(), LSI->ReturnType,
+ diag::err_lambda_incomplete_result)) {
+ // Do nothing.
+ } else if (LSI->ReturnType->isObjCObjectOrInterfaceType()) {
+ Diag(CallOperator->getLocStart(), diag::err_lambda_objc_object_result)
+ << LSI->ReturnType;
+ }
+ }
+ } else {
+ LSI->HasImplicitReturnType = true;
+ }
+
+ return LSI;
+}
+
+void Sema::finishLambdaExplicitCaptures(LambdaScopeInfo *LSI) {
+ LSI->finishedExplicitCaptures();
+}
+
+void Sema::addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope) {
+ // Introduce our parameters into the function scope
+ for (unsigned p = 0, NumParams = CallOperator->getNumParams();
+ p < NumParams; ++p) {
+ ParmVarDecl *Param = CallOperator->getParamDecl(p);
+
+ // If this has an identifier, add it to the scope stack.
+ if (CurScope && Param->getIdentifier()) {
+ CheckShadow(CurScope, Param);
+
+ PushOnScopeChains(Param, CurScope);
+ }
+ }
+}
+
+void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
+ Declarator &ParamInfo,
+ Scope *CurScope) {
+ // Determine if we're within a context where we know that the lambda will
+ // be dependent, because there are template parameters in scope.
+ bool KnownDependent = false;
+ if (Scope *TmplScope = CurScope->getTemplateParamParent())
+ if (!TmplScope->decl_empty())
+ KnownDependent = true;
+
+ CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, KnownDependent);
+
+ // Determine the signature of the call operator.
+ TypeSourceInfo *MethodTyInfo;
+ bool ExplicitParams = true;
+ bool ExplicitResultType = true;
+ SourceLocation EndLoc;
+ llvm::ArrayRef<ParmVarDecl *> Params;
+ if (ParamInfo.getNumTypeObjects() == 0) {
+ // C++11 [expr.prim.lambda]p4:
+ // If a lambda-expression does not include a lambda-declarator, it is as
+ // if the lambda-declarator were ().
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.HasTrailingReturn = true;
+ EPI.TypeQuals |= DeclSpec::TQ_const;
+ QualType MethodTy = Context.getFunctionType(Context.DependentTy,
+ /*Args=*/0, /*NumArgs=*/0, EPI);
+ MethodTyInfo = Context.getTrivialTypeSourceInfo(MethodTy);
+ ExplicitParams = false;
+ ExplicitResultType = false;
+ EndLoc = Intro.Range.getEnd();
+ } else {
+ assert(ParamInfo.isFunctionDeclarator() &&
+ "lambda-declarator is a function");
+ DeclaratorChunk::FunctionTypeInfo &FTI = ParamInfo.getFunctionTypeInfo();
+
+ // C++11 [expr.prim.lambda]p5:
+ // This function call operator is declared const (9.3.1) if and only if
+ // the lambda-expression's parameter-declaration-clause is not followed
+ // by mutable. It is neither virtual nor declared volatile. [...]
+ if (!FTI.hasMutableQualifier())
+ FTI.TypeQuals |= DeclSpec::TQ_const;
+
+ MethodTyInfo = GetTypeForDeclarator(ParamInfo, CurScope);
+ assert(MethodTyInfo && "no type from lambda-declarator");
+ EndLoc = ParamInfo.getSourceRange().getEnd();
+
+ ExplicitResultType
+ = MethodTyInfo->getType()->getAs<FunctionType>()->getResultType()
+ != Context.DependentTy;
+
+ TypeLoc TL = MethodTyInfo->getTypeLoc();
+ FunctionProtoTypeLoc Proto = cast<FunctionProtoTypeLoc>(TL);
+ Params = llvm::ArrayRef<ParmVarDecl *>(Proto.getParmArray(),
+ Proto.getNumArgs());
+ }
+
+ CXXMethodDecl *Method = startLambdaDefinition(Class, Intro.Range,
+ MethodTyInfo, EndLoc, Params);
+
+ if (ExplicitParams)
+ CheckCXXDefaultArguments(Method);
+
+ // Attributes on the lambda apply to the method.
+ ProcessDeclAttributes(CurScope, Method, ParamInfo);
+
+ // Introduce the function call operator as the current declaration context.
+ PushDeclContext(CurScope, Method);
+
+ // Introduce the lambda scope.
+ LambdaScopeInfo *LSI
+ = enterLambdaScope(Method, Intro.Range, Intro.Default, ExplicitParams,
+ ExplicitResultType,
+ (Method->getTypeQualifiers() & Qualifiers::Const) == 0);
+
+ // Handle explicit captures.
+ SourceLocation PrevCaptureLoc
+ = Intro.Default == LCD_None? Intro.Range.getBegin() : Intro.DefaultLoc;
+ for (llvm::SmallVector<LambdaCapture, 4>::const_iterator
+ C = Intro.Captures.begin(),
+ E = Intro.Captures.end();
+ C != E;
+ PrevCaptureLoc = C->Loc, ++C) {
+ if (C->Kind == LCK_This) {
+ // C++11 [expr.prim.lambda]p8:
+ // An identifier or this shall not appear more than once in a
+ // lambda-capture.
+ if (LSI->isCXXThisCaptured()) {
+ Diag(C->Loc, diag::err_capture_more_than_once)
+ << "'this'"
+ << SourceRange(LSI->getCXXThisCapture().getLocation())
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p8:
+ // If a lambda-capture includes a capture-default that is =, the
+ // lambda-capture shall not contain this [...].
+ if (Intro.Default == LCD_ByCopy) {
+ Diag(C->Loc, diag::err_this_capture_with_copy_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p12:
+ // If this is captured by a local lambda expression, its nearest
+ // enclosing function shall be a non-static member function.
+ QualType ThisCaptureType = getCurrentThisType();
+ if (ThisCaptureType.isNull()) {
+ Diag(C->Loc, diag::err_this_capture) << true;
+ continue;
+ }
+
+ CheckCXXThisCapture(C->Loc, /*Explicit=*/true);
+ continue;
+ }
+
+ assert(C->Id && "missing identifier for capture");
+
+ // C++11 [expr.prim.lambda]p8:
+ // If a lambda-capture includes a capture-default that is &, the
+ // identifiers in the lambda-capture shall not be preceded by &.
+ // If a lambda-capture includes a capture-default that is =, [...]
+ // each identifier it contains shall be preceded by &.
+ if (C->Kind == LCK_ByRef && Intro.Default == LCD_ByRef) {
+ Diag(C->Loc, diag::err_reference_capture_with_reference_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ } else if (C->Kind == LCK_ByCopy && Intro.Default == LCD_ByCopy) {
+ Diag(C->Loc, diag::err_copy_capture_with_copy_default)
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ DeclarationNameInfo Name(C->Id, C->Loc);
+ LookupResult R(*this, Name, LookupOrdinaryName);
+ LookupName(R, CurScope);
+ if (R.isAmbiguous())
+ continue;
+ if (R.empty()) {
+ // FIXME: Disable corrections that would add qualification?
+ CXXScopeSpec ScopeSpec;
+ DeclFilterCCC<VarDecl> Validator;
+ if (DiagnoseEmptyLookup(CurScope, ScopeSpec, R, Validator))
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p10:
+ // The identifiers in a capture-list are looked up using the usual rules
+ // for unqualified name lookup (3.4.1); each such lookup shall find a
+ // variable with automatic storage duration declared in the reaching
+ // scope of the local lambda expression.
+ //
+ // Note that the 'reaching scope' check happens in tryCaptureVariable().
+ VarDecl *Var = R.getAsSingle<VarDecl>();
+ if (!Var) {
+ Diag(C->Loc, diag::err_capture_does_not_name_variable) << C->Id;
+ continue;
+ }
+
+ if (!Var->hasLocalStorage()) {
+ Diag(C->Loc, diag::err_capture_non_automatic_variable) << C->Id;
+ Diag(Var->getLocation(), diag::note_previous_decl) << C->Id;
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p8:
+ // An identifier or this shall not appear more than once in a
+ // lambda-capture.
+ if (LSI->isCaptured(Var)) {
+ Diag(C->Loc, diag::err_capture_more_than_once)
+ << C->Id
+ << SourceRange(LSI->getCapture(Var).getLocation())
+ << FixItHint::CreateRemoval(
+ SourceRange(PP.getLocForEndOfToken(PrevCaptureLoc), C->Loc));
+ continue;
+ }
+
+ // C++11 [expr.prim.lambda]p23:
+ // A capture followed by an ellipsis is a pack expansion (14.5.3).
+ SourceLocation EllipsisLoc;
+ if (C->EllipsisLoc.isValid()) {
+ if (Var->isParameterPack()) {
+ EllipsisLoc = C->EllipsisLoc;
+ } else {
+ Diag(C->EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << SourceRange(C->Loc);
+
+ // Just ignore the ellipsis.
+ }
+ } else if (Var->isParameterPack()) {
+ Diag(C->Loc, diag::err_lambda_unexpanded_pack);
+ continue;
+ }
+
+ TryCaptureKind Kind = C->Kind == LCK_ByRef ? TryCapture_ExplicitByRef :
+ TryCapture_ExplicitByVal;
+ tryCaptureVariable(Var, C->Loc, Kind, EllipsisLoc);
+ }
+ finishLambdaExplicitCaptures(LSI);
+
+ // Add lambda parameters into scope.
+ addLambdaParameters(Method, CurScope);
+
+ // Enter a new evaluation context to insulate the lambda from any
+ // cleanups from the enclosing full-expression.
+ PushExpressionEvaluationContext(PotentiallyEvaluated);
+}
+
+void Sema::ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
+ bool IsInstantiation) {
+ // Leave the expression-evaluation context.
+ DiscardCleanupsInEvaluationContext();
+ PopExpressionEvaluationContext();
+
+ // Leave the context of the lambda.
+ if (!IsInstantiation)
+ PopDeclContext();
+
+ // Finalize the lambda.
+ LambdaScopeInfo *LSI = getCurLambda();
+ CXXRecordDecl *Class = LSI->Lambda;
+ Class->setInvalidDecl();
+ SmallVector<Decl*, 4> Fields(Class->field_begin(), Class->field_end());
+ ActOnFields(0, Class->getLocation(), Class, Fields,
+ SourceLocation(), SourceLocation(), 0);
+ CheckCompletedCXXClass(Class);
+
+ PopFunctionScopeInfo();
+}
+
+/// \brief Add a lambda's conversion to function pointer, as described in
+/// C++11 [expr.prim.lambda]p6.
+static void addFunctionPointerConversion(Sema &S,
+ SourceRange IntroducerRange,
+ CXXRecordDecl *Class,
+ CXXMethodDecl *CallOperator) {
+ // Add the conversion to function pointer.
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType FunctionPtrTy;
+ QualType FunctionTy;
+ {
+ FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
+ ExtInfo.TypeQuals = 0;
+ FunctionTy = S.Context.getFunctionType(Proto->getResultType(),
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ ExtInfo);
+ FunctionPtrTy = S.Context.getPointerType(FunctionTy);
+ }
+
+ FunctionProtoType::ExtProtoInfo ExtInfo;
+ ExtInfo.TypeQuals = Qualifiers::Const;
+ QualType ConvTy = S.Context.getFunctionType(FunctionPtrTy, 0, 0, ExtInfo);
+
+ SourceLocation Loc = IntroducerRange.getBegin();
+ DeclarationName Name
+ = S.Context.DeclarationNames.getCXXConversionFunctionName(
+ S.Context.getCanonicalType(FunctionPtrTy));
+ DeclarationNameLoc NameLoc;
+ NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(FunctionPtrTy,
+ Loc);
+ CXXConversionDecl *Conversion
+ = CXXConversionDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(Name, Loc, NameLoc),
+ ConvTy,
+ S.Context.getTrivialTypeSourceInfo(ConvTy,
+ Loc),
+ /*isInline=*/false, /*isExplicit=*/false,
+ /*isConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ Conversion->setAccess(AS_public);
+ Conversion->setImplicit(true);
+ Class->addDecl(Conversion);
+
+ // Add a non-static member function "__invoke" that will be the result of
+ // the conversion.
+ Name = &S.Context.Idents.get("__invoke");
+ CXXMethodDecl *Invoke
+ = CXXMethodDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(Name, Loc), FunctionTy,
+ CallOperator->getTypeSourceInfo(),
+ /*IsStatic=*/true, SC_Static, /*IsInline=*/true,
+ /*IsConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ SmallVector<ParmVarDecl *, 4> InvokeParams;
+ for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) {
+ ParmVarDecl *From = CallOperator->getParamDecl(I);
+ InvokeParams.push_back(ParmVarDecl::Create(S.Context, Invoke,
+ From->getLocStart(),
+ From->getLocation(),
+ From->getIdentifier(),
+ From->getType(),
+ From->getTypeSourceInfo(),
+ From->getStorageClass(),
+ From->getStorageClassAsWritten(),
+ /*DefaultArg=*/0));
+ }
+ Invoke->setParams(InvokeParams);
+ Invoke->setAccess(AS_private);
+ Invoke->setImplicit(true);
+ Class->addDecl(Invoke);
+}
+
+/// \brief Add a lambda's conversion to block pointer.
+static void addBlockPointerConversion(Sema &S,
+ SourceRange IntroducerRange,
+ CXXRecordDecl *Class,
+ CXXMethodDecl *CallOperator) {
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType BlockPtrTy;
+ {
+ FunctionProtoType::ExtProtoInfo ExtInfo = Proto->getExtProtoInfo();
+ ExtInfo.TypeQuals = 0;
+ QualType FunctionTy
+ = S.Context.getFunctionType(Proto->getResultType(),
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ ExtInfo);
+ BlockPtrTy = S.Context.getBlockPointerType(FunctionTy);
+ }
+
+ FunctionProtoType::ExtProtoInfo ExtInfo;
+ ExtInfo.TypeQuals = Qualifiers::Const;
+ QualType ConvTy = S.Context.getFunctionType(BlockPtrTy, 0, 0, ExtInfo);
+
+ SourceLocation Loc = IntroducerRange.getBegin();
+ DeclarationName Name
+ = S.Context.DeclarationNames.getCXXConversionFunctionName(
+ S.Context.getCanonicalType(BlockPtrTy));
+ DeclarationNameLoc NameLoc;
+ NameLoc.NamedType.TInfo = S.Context.getTrivialTypeSourceInfo(BlockPtrTy, Loc);
+ CXXConversionDecl *Conversion
+ = CXXConversionDecl::Create(S.Context, Class, Loc,
+ DeclarationNameInfo(Name, Loc, NameLoc),
+ ConvTy,
+ S.Context.getTrivialTypeSourceInfo(ConvTy, Loc),
+ /*isInline=*/false, /*isExplicit=*/false,
+ /*isConstexpr=*/false,
+ CallOperator->getBody()->getLocEnd());
+ Conversion->setAccess(AS_public);
+ Conversion->setImplicit(true);
+ Class->addDecl(Conversion);
+}
+
+ExprResult Sema::ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
+ Scope *CurScope,
+ bool IsInstantiation) {
+ // Collect information from the lambda scope.
+ llvm::SmallVector<LambdaExpr::Capture, 4> Captures;
+ llvm::SmallVector<Expr *, 4> CaptureInits;
+ LambdaCaptureDefault CaptureDefault;
+ CXXRecordDecl *Class;
+ CXXMethodDecl *CallOperator;
+ SourceRange IntroducerRange;
+ bool ExplicitParams;
+ bool ExplicitResultType;
+ bool LambdaExprNeedsCleanups;
+ llvm::SmallVector<VarDecl *, 4> ArrayIndexVars;
+ llvm::SmallVector<unsigned, 4> ArrayIndexStarts;
+ {
+ LambdaScopeInfo *LSI = getCurLambda();
+ CallOperator = LSI->CallOperator;
+ Class = LSI->Lambda;
+ IntroducerRange = LSI->IntroducerRange;
+ ExplicitParams = LSI->ExplicitParams;
+ ExplicitResultType = !LSI->HasImplicitReturnType;
+ LambdaExprNeedsCleanups = LSI->ExprNeedsCleanups;
+ ArrayIndexVars.swap(LSI->ArrayIndexVars);
+ ArrayIndexStarts.swap(LSI->ArrayIndexStarts);
+
+ // Translate captures.
+ for (unsigned I = 0, N = LSI->Captures.size(); I != N; ++I) {
+ LambdaScopeInfo::Capture From = LSI->Captures[I];
+ assert(!From.isBlockCapture() && "Cannot capture __block variables");
+ bool IsImplicit = I >= LSI->NumExplicitCaptures;
+
+ // Handle 'this' capture.
+ if (From.isThisCapture()) {
+ Captures.push_back(LambdaExpr::Capture(From.getLocation(),
+ IsImplicit,
+ LCK_This));
+ CaptureInits.push_back(new (Context) CXXThisExpr(From.getLocation(),
+ getCurrentThisType(),
+ /*isImplicit=*/true));
+ continue;
+ }
+
+ VarDecl *Var = From.getVariable();
+ LambdaCaptureKind Kind = From.isCopyCapture()? LCK_ByCopy : LCK_ByRef;
+ Captures.push_back(LambdaExpr::Capture(From.getLocation(), IsImplicit,
+ Kind, Var, From.getEllipsisLoc()));
+ CaptureInits.push_back(From.getCopyExpr());
+ }
+
+ switch (LSI->ImpCaptureStyle) {
+ case CapturingScopeInfo::ImpCap_None:
+ CaptureDefault = LCD_None;
+ break;
+
+ case CapturingScopeInfo::ImpCap_LambdaByval:
+ CaptureDefault = LCD_ByCopy;
+ break;
+
+ case CapturingScopeInfo::ImpCap_LambdaByref:
+ CaptureDefault = LCD_ByRef;
+ break;
+
+ case CapturingScopeInfo::ImpCap_Block:
+ llvm_unreachable("block capture in lambda");
+ break;
+ }
+
+ // C++11 [expr.prim.lambda]p4:
+ // If a lambda-expression does not include a
+ // trailing-return-type, it is as if the trailing-return-type
+ // denotes the following type:
+ // FIXME: Assumes current resolution to core issue 975.
+ if (LSI->HasImplicitReturnType) {
+ // - if there are no return statements in the
+ // compound-statement, or all return statements return
+ // either an expression of type void or no expression or
+ // braced-init-list, the type void;
+ if (LSI->ReturnType.isNull()) {
+ LSI->ReturnType = Context.VoidTy;
+ } else {
+ // C++11 [expr.prim.lambda]p4:
+ // - if the compound-statement is of the form
+ //
+ // { attribute-specifier-seq[opt] return expression ; }
+ //
+ // the type of the returned expression after
+ // lvalue-to-rvalue conversion (4.1), array-to-pointer
+ // conver- sion (4.2), and function-to-pointer conversion
+ // (4.3);
+ //
+ // Since we're accepting the resolution to a post-C++11 core
+ // issue with a non-trivial extension, provide a warning (by
+ // default).
+ CompoundStmt *CompoundBody = cast<CompoundStmt>(Body);
+ if (!(CompoundBody->size() == 1 &&
+ isa<ReturnStmt>(*CompoundBody->body_begin())) &&
+ !Context.hasSameType(LSI->ReturnType, Context.VoidTy))
+ Diag(IntroducerRange.getBegin(),
+ diag::ext_lambda_implies_void_return);
+ }
+
+ // Create a function type with the inferred return type.
+ const FunctionProtoType *Proto
+ = CallOperator->getType()->getAs<FunctionProtoType>();
+ QualType FunctionTy
+ = Context.getFunctionType(LSI->ReturnType,
+ Proto->arg_type_begin(),
+ Proto->getNumArgs(),
+ Proto->getExtProtoInfo());
+ CallOperator->setType(FunctionTy);
+ }
+
+ // C++ [expr.prim.lambda]p7:
+ // The lambda-expression's compound-statement yields the
+ // function-body (8.4) of the function call operator [...].
+ ActOnFinishFunctionBody(CallOperator, Body, IsInstantiation);
+ CallOperator->setLexicalDeclContext(Class);
+ Class->addDecl(CallOperator);
+ PopExpressionEvaluationContext();
+
+ // C++11 [expr.prim.lambda]p6:
+ // The closure type for a lambda-expression with no lambda-capture
+ // has a public non-virtual non-explicit const conversion function
+ // to pointer to function having the same parameter and return
+ // types as the closure type's function call operator.
+ if (Captures.empty() && CaptureDefault == LCD_None)
+ addFunctionPointerConversion(*this, IntroducerRange, Class,
+ CallOperator);
+
+ // Objective-C++:
+ // The closure type for a lambda-expression has a public non-virtual
+ // non-explicit const conversion function to a block pointer having the
+ // same parameter and return types as the closure type's function call
+ // operator.
+ if (getLangOpts().Blocks && getLangOpts().ObjC1)
+ addBlockPointerConversion(*this, IntroducerRange, Class, CallOperator);
+
+ // Finalize the lambda class.
+ SmallVector<Decl*, 4> Fields(Class->field_begin(), Class->field_end());
+ ActOnFields(0, Class->getLocation(), Class, Fields,
+ SourceLocation(), SourceLocation(), 0);
+ CheckCompletedCXXClass(Class);
+ }
+
+ if (LambdaExprNeedsCleanups)
+ ExprNeedsCleanups = true;
+
+ LambdaExpr *Lambda = LambdaExpr::Create(Context, Class, IntroducerRange,
+ CaptureDefault, Captures,
+ ExplicitParams, ExplicitResultType,
+ CaptureInits, ArrayIndexVars,
+ ArrayIndexStarts, Body->getLocEnd());
+
+ // C++11 [expr.prim.lambda]p2:
+ // A lambda-expression shall not appear in an unevaluated operand
+ // (Clause 5).
+ if (!CurContext->isDependentContext()) {
+ switch (ExprEvalContexts.back().Context) {
+ case Unevaluated:
+ // We don't actually diagnose this case immediately, because we
+ // could be within a context where we might find out later that
+ // the expression is potentially evaluated (e.g., for typeid).
+ ExprEvalContexts.back().Lambdas.push_back(Lambda);
+ break;
+
+ case ConstantEvaluated:
+ case PotentiallyEvaluated:
+ case PotentiallyEvaluatedIfUsed:
+ break;
+ }
+ }
+
+ return MaybeBindToTemporary(Lambda);
+}
+
+ExprResult Sema::BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
+ SourceLocation ConvLocation,
+ CXXConversionDecl *Conv,
+ Expr *Src) {
+ // Make sure that the lambda call operator is marked used.
+ CXXRecordDecl *Lambda = Conv->getParent();
+ CXXMethodDecl *CallOperator
+ = cast<CXXMethodDecl>(
+ *Lambda->lookup(
+ Context.DeclarationNames.getCXXOperatorName(OO_Call)).first);
+ CallOperator->setReferenced();
+ CallOperator->setUsed();
+
+ ExprResult Init = PerformCopyInitialization(
+ InitializedEntity::InitializeBlock(ConvLocation,
+ Src->getType(),
+ /*NRVO=*/false),
+ CurrentLocation, Src);
+ if (!Init.isInvalid())
+ Init = ActOnFinishFullExpr(Init.take());
+
+ if (Init.isInvalid())
+ return ExprError();
+
+ // Create the new block to be returned.
+ BlockDecl *Block = BlockDecl::Create(Context, CurContext, ConvLocation);
+
+ // Set the type information.
+ Block->setSignatureAsWritten(CallOperator->getTypeSourceInfo());
+ Block->setIsVariadic(CallOperator->isVariadic());
+ Block->setBlockMissingReturnType(false);
+
+ // Add parameters.
+ SmallVector<ParmVarDecl *, 4> BlockParams;
+ for (unsigned I = 0, N = CallOperator->getNumParams(); I != N; ++I) {
+ ParmVarDecl *From = CallOperator->getParamDecl(I);
+ BlockParams.push_back(ParmVarDecl::Create(Context, Block,
+ From->getLocStart(),
+ From->getLocation(),
+ From->getIdentifier(),
+ From->getType(),
+ From->getTypeSourceInfo(),
+ From->getStorageClass(),
+ From->getStorageClassAsWritten(),
+ /*DefaultArg=*/0));
+ }
+ Block->setParams(BlockParams);
+
+ Block->setIsConversionFromLambda(true);
+
+ // Add capture. The capture uses a fake variable, which doesn't correspond
+ // to any actual memory location. However, the initializer copy-initializes
+ // the lambda object.
+ TypeSourceInfo *CapVarTSI =
+ Context.getTrivialTypeSourceInfo(Src->getType());
+ VarDecl *CapVar = VarDecl::Create(Context, Block, ConvLocation,
+ ConvLocation, 0,
+ Src->getType(), CapVarTSI,
+ SC_None, SC_None);
+ BlockDecl::Capture Capture(/*Variable=*/CapVar, /*ByRef=*/false,
+ /*Nested=*/false, /*Copy=*/Init.take());
+ Block->setCaptures(Context, &Capture, &Capture + 1,
+ /*CapturesCXXThis=*/false);
+
+ // Add a fake function body to the block. IR generation is responsible
+ // for filling in the actual body, which cannot be expressed as an AST.
+ Block->setBody(new (Context) CompoundStmt(Context, 0, 0,
+ ConvLocation,
+ ConvLocation));
+
+ // Create the block literal expression.
+ Expr *BuildBlock = new (Context) BlockExpr(Block, Conv->getConversionType());
+ ExprCleanupObjects.push_back(Block);
+ ExprNeedsCleanups = true;
+
+ return BuildBlock;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
new file mode 100644
index 0000000..966eb90
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaLookup.cpp
@@ -0,0 +1,4090 @@
+//===--------------------- SemaLookup.cpp - Name Lookup ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements name lookup for C, C++, Objective-C, and
+// Objective-C++.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Overload.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "clang/Sema/ExternalSemaSource.h"
+#include "clang/Sema/TypoCorrection.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclLookups.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/ADT/SetVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/TinyPtrVector.h"
+#include "llvm/ADT/edit_distance.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <algorithm>
+#include <iterator>
+#include <limits>
+#include <list>
+#include <map>
+#include <set>
+#include <utility>
+#include <vector>
+
+using namespace clang;
+using namespace sema;
+
+namespace {
+ class UnqualUsingEntry {
+ const DeclContext *Nominated;
+ const DeclContext *CommonAncestor;
+
+ public:
+ UnqualUsingEntry(const DeclContext *Nominated,
+ const DeclContext *CommonAncestor)
+ : Nominated(Nominated), CommonAncestor(CommonAncestor) {
+ }
+
+ const DeclContext *getCommonAncestor() const {
+ return CommonAncestor;
+ }
+
+ const DeclContext *getNominatedNamespace() const {
+ return Nominated;
+ }
+
+ // Sort by the pointer value of the common ancestor.
+ struct Comparator {
+ bool operator()(const UnqualUsingEntry &L, const UnqualUsingEntry &R) {
+ return L.getCommonAncestor() < R.getCommonAncestor();
+ }
+
+ bool operator()(const UnqualUsingEntry &E, const DeclContext *DC) {
+ return E.getCommonAncestor() < DC;
+ }
+
+ bool operator()(const DeclContext *DC, const UnqualUsingEntry &E) {
+ return DC < E.getCommonAncestor();
+ }
+ };
+ };
+
+ /// A collection of using directives, as used by C++ unqualified
+ /// lookup.
+ class UnqualUsingDirectiveSet {
+ typedef SmallVector<UnqualUsingEntry, 8> ListTy;
+
+ ListTy list;
+ llvm::SmallPtrSet<DeclContext*, 8> visited;
+
+ public:
+ UnqualUsingDirectiveSet() {}
+
+ void visitScopeChain(Scope *S, Scope *InnermostFileScope) {
+ // C++ [namespace.udir]p1:
+ // During unqualified name lookup, the names appear as if they
+ // were declared in the nearest enclosing namespace which contains
+ // both the using-directive and the nominated namespace.
+ DeclContext *InnermostFileDC
+ = static_cast<DeclContext*>(InnermostFileScope->getEntity());
+ assert(InnermostFileDC && InnermostFileDC->isFileContext());
+
+ for (; S; S = S->getParent()) {
+ // C++ [namespace.udir]p1:
+ // A using-directive shall not appear in class scope, but may
+ // appear in namespace scope or in block scope.
+ DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity());
+ if (Ctx && Ctx->isFileContext()) {
+ visit(Ctx, Ctx);
+ } else if (!Ctx || Ctx->isFunctionOrMethod()) {
+ Scope::udir_iterator I = S->using_directives_begin(),
+ End = S->using_directives_end();
+ for (; I != End; ++I)
+ visit(*I, InnermostFileDC);
+ }
+ }
+ }
+
+ // Visits a context and collect all of its using directives
+ // recursively. Treats all using directives as if they were
+ // declared in the context.
+ //
+ // A given context is only every visited once, so it is important
+ // that contexts be visited from the inside out in order to get
+ // the effective DCs right.
+ void visit(DeclContext *DC, DeclContext *EffectiveDC) {
+ if (!visited.insert(DC))
+ return;
+
+ addUsingDirectives(DC, EffectiveDC);
+ }
+
+ // Visits a using directive and collects all of its using
+ // directives recursively. Treats all using directives as if they
+ // were declared in the effective DC.
+ void visit(UsingDirectiveDecl *UD, DeclContext *EffectiveDC) {
+ DeclContext *NS = UD->getNominatedNamespace();
+ if (!visited.insert(NS))
+ return;
+
+ addUsingDirective(UD, EffectiveDC);
+ addUsingDirectives(NS, EffectiveDC);
+ }
+
+ // Adds all the using directives in a context (and those nominated
+ // by its using directives, transitively) as if they appeared in
+ // the given effective context.
+ void addUsingDirectives(DeclContext *DC, DeclContext *EffectiveDC) {
+ SmallVector<DeclContext*,4> queue;
+ while (true) {
+ DeclContext::udir_iterator I, End;
+ for (llvm::tie(I, End) = DC->getUsingDirectives(); I != End; ++I) {
+ UsingDirectiveDecl *UD = *I;
+ DeclContext *NS = UD->getNominatedNamespace();
+ if (visited.insert(NS)) {
+ addUsingDirective(UD, EffectiveDC);
+ queue.push_back(NS);
+ }
+ }
+
+ if (queue.empty())
+ return;
+
+ DC = queue.back();
+ queue.pop_back();
+ }
+ }
+
+ // Add a using directive as if it had been declared in the given
+ // context. This helps implement C++ [namespace.udir]p3:
+ // The using-directive is transitive: if a scope contains a
+ // using-directive that nominates a second namespace that itself
+ // contains using-directives, the effect is as if the
+ // using-directives from the second namespace also appeared in
+ // the first.
+ void addUsingDirective(UsingDirectiveDecl *UD, DeclContext *EffectiveDC) {
+ // Find the common ancestor between the effective context and
+ // the nominated namespace.
+ DeclContext *Common = UD->getNominatedNamespace();
+ while (!Common->Encloses(EffectiveDC))
+ Common = Common->getParent();
+ Common = Common->getPrimaryContext();
+
+ list.push_back(UnqualUsingEntry(UD->getNominatedNamespace(), Common));
+ }
+
+ void done() {
+ std::sort(list.begin(), list.end(), UnqualUsingEntry::Comparator());
+ }
+
+ typedef ListTy::const_iterator const_iterator;
+
+ const_iterator begin() const { return list.begin(); }
+ const_iterator end() const { return list.end(); }
+
+ std::pair<const_iterator,const_iterator>
+ getNamespacesFor(DeclContext *DC) const {
+ return std::equal_range(begin(), end(), DC->getPrimaryContext(),
+ UnqualUsingEntry::Comparator());
+ }
+ };
+}
+
+// Retrieve the set of identifier namespaces that correspond to a
+// specific kind of name lookup.
+static inline unsigned getIDNS(Sema::LookupNameKind NameKind,
+ bool CPlusPlus,
+ bool Redeclaration) {
+ unsigned IDNS = 0;
+ switch (NameKind) {
+ case Sema::LookupObjCImplicitSelfParam:
+ case Sema::LookupOrdinaryName:
+ case Sema::LookupRedeclarationWithLinkage:
+ IDNS = Decl::IDNS_Ordinary;
+ if (CPlusPlus) {
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Member | Decl::IDNS_Namespace;
+ if (Redeclaration)
+ IDNS |= Decl::IDNS_TagFriend | Decl::IDNS_OrdinaryFriend;
+ }
+ break;
+
+ case Sema::LookupOperatorName:
+ // Operator lookup is its own crazy thing; it is not the same
+ // as (e.g.) looking up an operator name for redeclaration.
+ assert(!Redeclaration && "cannot do redeclaration operator lookup");
+ IDNS = Decl::IDNS_NonMemberOperator;
+ break;
+
+ case Sema::LookupTagName:
+ if (CPlusPlus) {
+ IDNS = Decl::IDNS_Type;
+
+ // When looking for a redeclaration of a tag name, we add:
+ // 1) TagFriend to find undeclared friend decls
+ // 2) Namespace because they can't "overload" with tag decls.
+ // 3) Tag because it includes class templates, which can't
+ // "overload" with tag decls.
+ if (Redeclaration)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_TagFriend | Decl::IDNS_Namespace;
+ } else {
+ IDNS = Decl::IDNS_Tag;
+ }
+ break;
+ case Sema::LookupLabel:
+ IDNS = Decl::IDNS_Label;
+ break;
+
+ case Sema::LookupMemberName:
+ IDNS = Decl::IDNS_Member;
+ if (CPlusPlus)
+ IDNS |= Decl::IDNS_Tag | Decl::IDNS_Ordinary;
+ break;
+
+ case Sema::LookupNestedNameSpecifierName:
+ IDNS = Decl::IDNS_Type | Decl::IDNS_Namespace;
+ break;
+
+ case Sema::LookupNamespaceName:
+ IDNS = Decl::IDNS_Namespace;
+ break;
+
+ case Sema::LookupUsingDeclName:
+ IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag
+ | Decl::IDNS_Member | Decl::IDNS_Using;
+ break;
+
+ case Sema::LookupObjCProtocolName:
+ IDNS = Decl::IDNS_ObjCProtocol;
+ break;
+
+ case Sema::LookupAnyName:
+ IDNS = Decl::IDNS_Ordinary | Decl::IDNS_Tag | Decl::IDNS_Member
+ | Decl::IDNS_Using | Decl::IDNS_Namespace | Decl::IDNS_ObjCProtocol
+ | Decl::IDNS_Type;
+ break;
+ }
+ return IDNS;
+}
+
+void LookupResult::configure() {
+ IDNS = getIDNS(LookupKind, SemaRef.getLangOpts().CPlusPlus,
+ isForRedeclaration());
+
+ // If we're looking for one of the allocation or deallocation
+ // operators, make sure that the implicitly-declared new and delete
+ // operators can be found.
+ if (!isForRedeclaration()) {
+ switch (NameInfo.getName().getCXXOverloadedOperator()) {
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ SemaRef.DeclareGlobalNewDelete();
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+void LookupResult::sanityImpl() const {
+ // Note that this function is never called by NDEBUG builds. See
+ // LookupResult::sanity().
+ assert(ResultKind != NotFound || Decls.size() == 0);
+ assert(ResultKind != Found || Decls.size() == 1);
+ assert(ResultKind != FoundOverloaded || Decls.size() > 1 ||
+ (Decls.size() == 1 &&
+ isa<FunctionTemplateDecl>((*begin())->getUnderlyingDecl())));
+ assert(ResultKind != FoundUnresolvedValue || sanityCheckUnresolved());
+ assert(ResultKind != Ambiguous || Decls.size() > 1 ||
+ (Decls.size() == 1 && (Ambiguity == AmbiguousBaseSubobjects ||
+ Ambiguity == AmbiguousBaseSubobjectTypes)));
+ assert((Paths != NULL) == (ResultKind == Ambiguous &&
+ (Ambiguity == AmbiguousBaseSubobjectTypes ||
+ Ambiguity == AmbiguousBaseSubobjects)));
+}
+
+// Necessary because CXXBasePaths is not complete in Sema.h
+void LookupResult::deletePaths(CXXBasePaths *Paths) {
+ delete Paths;
+}
+
+static NamedDecl *getVisibleDecl(NamedDecl *D);
+
+NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
+ return getVisibleDecl(D);
+}
+
+/// Resolves the result kind of this lookup.
+void LookupResult::resolveKind() {
+ unsigned N = Decls.size();
+
+ // Fast case: no possible ambiguity.
+ if (N == 0) {
+ assert(ResultKind == NotFound || ResultKind == NotFoundInCurrentInstantiation);
+ return;
+ }
+
+ // If there's a single decl, we need to examine it to decide what
+ // kind of lookup this is.
+ if (N == 1) {
+ NamedDecl *D = (*Decls.begin())->getUnderlyingDecl();
+ if (isa<FunctionTemplateDecl>(D))
+ ResultKind = FoundOverloaded;
+ else if (isa<UnresolvedUsingValueDecl>(D))
+ ResultKind = FoundUnresolvedValue;
+ return;
+ }
+
+ // Don't do any extra resolution if we've already resolved as ambiguous.
+ if (ResultKind == Ambiguous) return;
+
+ llvm::SmallPtrSet<NamedDecl*, 16> Unique;
+ llvm::SmallPtrSet<QualType, 16> UniqueTypes;
+
+ bool Ambiguous = false;
+ bool HasTag = false, HasFunction = false, HasNonFunction = false;
+ bool HasFunctionTemplate = false, HasUnresolved = false;
+
+ unsigned UniqueTagIndex = 0;
+
+ unsigned I = 0;
+ while (I < N) {
+ NamedDecl *D = Decls[I]->getUnderlyingDecl();
+ D = cast<NamedDecl>(D->getCanonicalDecl());
+
+ // Redeclarations of types via typedef can occur both within a scope
+ // and, through using declarations and directives, across scopes. There is
+ // no ambiguity if they all refer to the same type, so unique based on the
+ // canonical type.
+ if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
+ if (!TD->getDeclContext()->isRecord()) {
+ QualType T = SemaRef.Context.getTypeDeclType(TD);
+ if (!UniqueTypes.insert(SemaRef.Context.getCanonicalType(T))) {
+ // The type is not unique; pull something off the back and continue
+ // at this index.
+ Decls[I] = Decls[--N];
+ continue;
+ }
+ }
+ }
+
+ if (!Unique.insert(D)) {
+ // If it's not unique, pull something off the back (and
+ // continue at this index).
+ Decls[I] = Decls[--N];
+ continue;
+ }
+
+ // Otherwise, do some decl type analysis and then continue.
+
+ if (isa<UnresolvedUsingValueDecl>(D)) {
+ HasUnresolved = true;
+ } else if (isa<TagDecl>(D)) {
+ if (HasTag)
+ Ambiguous = true;
+ UniqueTagIndex = I;
+ HasTag = true;
+ } else if (isa<FunctionTemplateDecl>(D)) {
+ HasFunction = true;
+ HasFunctionTemplate = true;
+ } else if (isa<FunctionDecl>(D)) {
+ HasFunction = true;
+ } else {
+ if (HasNonFunction)
+ Ambiguous = true;
+ HasNonFunction = true;
+ }
+ I++;
+ }
+
+ // C++ [basic.scope.hiding]p2:
+ // A class name or enumeration name can be hidden by the name of
+ // an object, function, or enumerator declared in the same
+ // scope. If a class or enumeration name and an object, function,
+ // or enumerator are declared in the same scope (in any order)
+ // with the same name, the class or enumeration name is hidden
+ // wherever the object, function, or enumerator name is visible.
+ // But it's still an error if there are distinct tag types found,
+ // even if they're not visible. (ref?)
+ if (HideTags && HasTag && !Ambiguous &&
+ (HasFunction || HasNonFunction || HasUnresolved)) {
+ if (Decls[UniqueTagIndex]->getDeclContext()->getRedeclContext()->Equals(
+ Decls[UniqueTagIndex? 0 : N-1]->getDeclContext()->getRedeclContext()))
+ Decls[UniqueTagIndex] = Decls[--N];
+ else
+ Ambiguous = true;
+ }
+
+ Decls.set_size(N);
+
+ if (HasNonFunction && (HasFunction || HasUnresolved))
+ Ambiguous = true;
+
+ if (Ambiguous)
+ setAmbiguous(LookupResult::AmbiguousReference);
+ else if (HasUnresolved)
+ ResultKind = LookupResult::FoundUnresolvedValue;
+ else if (N > 1 || HasFunctionTemplate)
+ ResultKind = LookupResult::FoundOverloaded;
+ else
+ ResultKind = LookupResult::Found;
+}
+
+void LookupResult::addDeclsFromBasePaths(const CXXBasePaths &P) {
+ CXXBasePaths::const_paths_iterator I, E;
+ DeclContext::lookup_iterator DI, DE;
+ for (I = P.begin(), E = P.end(); I != E; ++I)
+ for (llvm::tie(DI,DE) = I->Decls; DI != DE; ++DI)
+ addDecl(*DI);
+}
+
+void LookupResult::setAmbiguousBaseSubobjects(CXXBasePaths &P) {
+ Paths = new CXXBasePaths;
+ Paths->swap(P);
+ addDeclsFromBasePaths(*Paths);
+ resolveKind();
+ setAmbiguous(AmbiguousBaseSubobjects);
+}
+
+void LookupResult::setAmbiguousBaseSubobjectTypes(CXXBasePaths &P) {
+ Paths = new CXXBasePaths;
+ Paths->swap(P);
+ addDeclsFromBasePaths(*Paths);
+ resolveKind();
+ setAmbiguous(AmbiguousBaseSubobjectTypes);
+}
+
+void LookupResult::print(raw_ostream &Out) {
+ Out << Decls.size() << " result(s)";
+ if (isAmbiguous()) Out << ", ambiguous";
+ if (Paths) Out << ", base paths present";
+
+ for (iterator I = begin(), E = end(); I != E; ++I) {
+ Out << "\n";
+ (*I)->print(Out, 2);
+ }
+}
+
+/// \brief Lookup a builtin function, when name lookup would otherwise
+/// fail.
+static bool LookupBuiltin(Sema &S, LookupResult &R) {
+ Sema::LookupNameKind NameKind = R.getLookupKind();
+
+ // If we didn't find a use of this identifier, and if the identifier
+ // corresponds to a compiler builtin, create the decl object for the builtin
+ // now, injecting it into translation unit scope, and return it.
+ if (NameKind == Sema::LookupOrdinaryName ||
+ NameKind == Sema::LookupRedeclarationWithLinkage) {
+ IdentifierInfo *II = R.getLookupName().getAsIdentifierInfo();
+ if (II) {
+ // If this is a builtin on this (or all) targets, create the decl.
+ if (unsigned BuiltinID = II->getBuiltinID()) {
+ // In C++, we don't have any predefined library functions like
+ // 'malloc'. Instead, we'll just error.
+ if (S.getLangOpts().CPlusPlus &&
+ S.Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
+ return false;
+
+ if (NamedDecl *D = S.LazilyCreateBuiltin((IdentifierInfo *)II,
+ BuiltinID, S.TUScope,
+ R.isForRedeclaration(),
+ R.getNameLoc())) {
+ R.addDecl(D);
+ return true;
+ }
+
+ if (R.isForRedeclaration()) {
+ // If we're redeclaring this function anyway, forget that
+ // this was a builtin at all.
+ S.Context.BuiltinInfo.ForgetBuiltin(BuiltinID, S.Context.Idents);
+ }
+
+ return false;
+ }
+ }
+ }
+
+ return false;
+}
+
+/// \brief Determine whether we can declare a special member function within
+/// the class at this point.
+static bool CanDeclareSpecialMemberFunction(ASTContext &Context,
+ const CXXRecordDecl *Class) {
+ // We need to have a definition for the class.
+ if (!Class->getDefinition() || Class->isDependentContext())
+ return false;
+
+ // We can't be in the middle of defining the class.
+ if (const RecordType *RecordTy
+ = Context.getTypeDeclType(Class)->getAs<RecordType>())
+ return !RecordTy->isBeingDefined();
+
+ return false;
+}
+
+void Sema::ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class) {
+ if (!CanDeclareSpecialMemberFunction(Context, Class))
+ return;
+
+ // If the default constructor has not yet been declared, do so now.
+ if (Class->needsImplicitDefaultConstructor())
+ DeclareImplicitDefaultConstructor(Class);
+
+ // If the copy constructor has not yet been declared, do so now.
+ if (!Class->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(Class);
+
+ // If the copy assignment operator has not yet been declared, do so now.
+ if (!Class->hasDeclaredCopyAssignment())
+ DeclareImplicitCopyAssignment(Class);
+
+ if (getLangOpts().CPlusPlus0x) {
+ // If the move constructor has not yet been declared, do so now.
+ if (Class->needsImplicitMoveConstructor())
+ DeclareImplicitMoveConstructor(Class); // might not actually do it
+
+ // If the move assignment operator has not yet been declared, do so now.
+ if (Class->needsImplicitMoveAssignment())
+ DeclareImplicitMoveAssignment(Class); // might not actually do it
+ }
+
+ // If the destructor has not yet been declared, do so now.
+ if (!Class->hasDeclaredDestructor())
+ DeclareImplicitDestructor(Class);
+}
+
+/// \brief Determine whether this is the name of an implicitly-declared
+/// special member function.
+static bool isImplicitlyDeclaredMemberFunctionName(DeclarationName Name) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ return true;
+
+ case DeclarationName::CXXOperatorName:
+ return Name.getCXXOverloadedOperator() == OO_Equal;
+
+ default:
+ break;
+ }
+
+ return false;
+}
+
+/// \brief If there are any implicit member functions with the given name
+/// that need to be declared in the given declaration context, do so.
+static void DeclareImplicitMemberFunctionsWithName(Sema &S,
+ DeclarationName Name,
+ const DeclContext *DC) {
+ if (!DC)
+ return;
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getDefinition() &&
+ CanDeclareSpecialMemberFunction(S.Context, Record)) {
+ CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(Record);
+ if (Record->needsImplicitDefaultConstructor())
+ S.DeclareImplicitDefaultConstructor(Class);
+ if (!Record->hasDeclaredCopyConstructor())
+ S.DeclareImplicitCopyConstructor(Class);
+ if (S.getLangOpts().CPlusPlus0x &&
+ Record->needsImplicitMoveConstructor())
+ S.DeclareImplicitMoveConstructor(Class);
+ }
+ break;
+
+ case DeclarationName::CXXDestructorName:
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC))
+ if (Record->getDefinition() && !Record->hasDeclaredDestructor() &&
+ CanDeclareSpecialMemberFunction(S.Context, Record))
+ S.DeclareImplicitDestructor(const_cast<CXXRecordDecl *>(Record));
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ if (Name.getCXXOverloadedOperator() != OO_Equal)
+ break;
+
+ if (const CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(DC)) {
+ if (Record->getDefinition() &&
+ CanDeclareSpecialMemberFunction(S.Context, Record)) {
+ CXXRecordDecl *Class = const_cast<CXXRecordDecl *>(Record);
+ if (!Record->hasDeclaredCopyAssignment())
+ S.DeclareImplicitCopyAssignment(Class);
+ if (S.getLangOpts().CPlusPlus0x &&
+ Record->needsImplicitMoveAssignment())
+ S.DeclareImplicitMoveAssignment(Class);
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+// Adds all qualifying matches for a name within a decl context to the
+// given lookup result. Returns true if any matches were found.
+static bool LookupDirect(Sema &S, LookupResult &R, const DeclContext *DC) {
+ bool Found = false;
+
+ // Lazily declare C++ special member functions.
+ if (S.getLangOpts().CPlusPlus)
+ DeclareImplicitMemberFunctionsWithName(S, R.getLookupName(), DC);
+
+ // Perform lookup into this declaration context.
+ DeclContext::lookup_const_iterator I, E;
+ for (llvm::tie(I, E) = DC->lookup(R.getLookupName()); I != E; ++I) {
+ NamedDecl *D = *I;
+ if ((D = R.getAcceptableDecl(D))) {
+ R.addDecl(D);
+ Found = true;
+ }
+ }
+
+ if (!Found && DC->isTranslationUnit() && LookupBuiltin(S, R))
+ return true;
+
+ if (R.getLookupName().getNameKind()
+ != DeclarationName::CXXConversionFunctionName ||
+ R.getLookupName().getCXXNameType()->isDependentType() ||
+ !isa<CXXRecordDecl>(DC))
+ return Found;
+
+ // C++ [temp.mem]p6:
+ // A specialization of a conversion function template is not found by
+ // name lookup. Instead, any conversion function templates visible in the
+ // context of the use are considered. [...]
+ const CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ if (!Record->isCompleteDefinition())
+ return Found;
+
+ const UnresolvedSetImpl *Unresolved = Record->getConversionFunctions();
+ for (UnresolvedSetImpl::iterator U = Unresolved->begin(),
+ UEnd = Unresolved->end(); U != UEnd; ++U) {
+ FunctionTemplateDecl *ConvTemplate = dyn_cast<FunctionTemplateDecl>(*U);
+ if (!ConvTemplate)
+ continue;
+
+ // When we're performing lookup for the purposes of redeclaration, just
+ // add the conversion function template. When we deduce template
+ // arguments for specializations, we'll end up unifying the return
+ // type of the new declaration with the type of the function template.
+ if (R.isForRedeclaration()) {
+ R.addDecl(ConvTemplate);
+ Found = true;
+ continue;
+ }
+
+ // C++ [temp.mem]p6:
+ // [...] For each such operator, if argument deduction succeeds
+ // (14.9.2.3), the resulting specialization is used as if found by
+ // name lookup.
+ //
+ // When referencing a conversion function for any purpose other than
+ // a redeclaration (such that we'll be building an expression with the
+ // result), perform template argument deduction and place the
+ // specialization into the result set. We do this to avoid forcing all
+ // callers to perform special deduction for conversion functions.
+ TemplateDeductionInfo Info(R.getSema().Context, R.getNameLoc());
+ FunctionDecl *Specialization = 0;
+
+ const FunctionProtoType *ConvProto
+ = ConvTemplate->getTemplatedDecl()->getType()->getAs<FunctionProtoType>();
+ assert(ConvProto && "Nonsensical conversion function template type");
+
+ // Compute the type of the function that we would expect the conversion
+ // function to have, if it were to match the name given.
+ // FIXME: Calling convention!
+ FunctionProtoType::ExtProtoInfo EPI = ConvProto->getExtProtoInfo();
+ EPI.ExtInfo = EPI.ExtInfo.withCallingConv(CC_Default);
+ EPI.ExceptionSpecType = EST_None;
+ EPI.NumExceptions = 0;
+ QualType ExpectedType
+ = R.getSema().Context.getFunctionType(R.getLookupName().getCXXNameType(),
+ 0, 0, EPI);
+
+ // Perform template argument deduction against the type that we would
+ // expect the function to have.
+ if (R.getSema().DeduceTemplateArguments(ConvTemplate, 0, ExpectedType,
+ Specialization, Info)
+ == Sema::TDK_Success) {
+ R.addDecl(Specialization);
+ Found = true;
+ }
+ }
+
+ return Found;
+}
+
+// Performs C++ unqualified lookup into the given file context.
+static bool
+CppNamespaceLookup(Sema &S, LookupResult &R, ASTContext &Context,
+ DeclContext *NS, UnqualUsingDirectiveSet &UDirs) {
+
+ assert(NS && NS->isFileContext() && "CppNamespaceLookup() requires namespace!");
+
+ // Perform direct name lookup into the LookupCtx.
+ bool Found = LookupDirect(S, R, NS);
+
+ // Perform direct name lookup into the namespaces nominated by the
+ // using directives whose common ancestor is this namespace.
+ UnqualUsingDirectiveSet::const_iterator UI, UEnd;
+ llvm::tie(UI, UEnd) = UDirs.getNamespacesFor(NS);
+
+ for (; UI != UEnd; ++UI)
+ if (LookupDirect(S, R, UI->getNominatedNamespace()))
+ Found = true;
+
+ R.resolveKind();
+
+ return Found;
+}
+
+static bool isNamespaceOrTranslationUnitScope(Scope *S) {
+ if (DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity()))
+ return Ctx->isFileContext();
+ return false;
+}
+
+// Find the next outer declaration context from this scope. This
+// routine actually returns the semantic outer context, which may
+// differ from the lexical context (encoded directly in the Scope
+// stack) when we are parsing a member of a class template. In this
+// case, the second element of the pair will be true, to indicate that
+// name lookup should continue searching in this semantic context when
+// it leaves the current template parameter scope.
+static std::pair<DeclContext *, bool> findOuterContext(Scope *S) {
+ DeclContext *DC = static_cast<DeclContext *>(S->getEntity());
+ DeclContext *Lexical = 0;
+ for (Scope *OuterS = S->getParent(); OuterS;
+ OuterS = OuterS->getParent()) {
+ if (OuterS->getEntity()) {
+ Lexical = static_cast<DeclContext *>(OuterS->getEntity());
+ break;
+ }
+ }
+
+ // C++ [temp.local]p8:
+ // In the definition of a member of a class template that appears
+ // outside of the namespace containing the class template
+ // definition, the name of a template-parameter hides the name of
+ // a member of this namespace.
+ //
+ // Example:
+ //
+ // namespace N {
+ // class C { };
+ //
+ // template<class T> class B {
+ // void f(T);
+ // };
+ // }
+ //
+ // template<class C> void N::B<C>::f(C) {
+ // C b; // C is the template parameter, not N::C
+ // }
+ //
+ // In this example, the lexical context we return is the
+ // TranslationUnit, while the semantic context is the namespace N.
+ if (!Lexical || !DC || !S->getParent() ||
+ !S->getParent()->isTemplateParamScope())
+ return std::make_pair(Lexical, false);
+
+ // Find the outermost template parameter scope.
+ // For the example, this is the scope for the template parameters of
+ // template<class C>.
+ Scope *OutermostTemplateScope = S->getParent();
+ while (OutermostTemplateScope->getParent() &&
+ OutermostTemplateScope->getParent()->isTemplateParamScope())
+ OutermostTemplateScope = OutermostTemplateScope->getParent();
+
+ // Find the namespace context in which the original scope occurs. In
+ // the example, this is namespace N.
+ DeclContext *Semantic = DC;
+ while (!Semantic->isFileContext())
+ Semantic = Semantic->getParent();
+
+ // Find the declaration context just outside of the template
+ // parameter scope. This is the context in which the template is
+ // being lexically declaration (a namespace context). In the
+ // example, this is the global scope.
+ if (Lexical->isFileContext() && !Lexical->Equals(Semantic) &&
+ Lexical->Encloses(Semantic))
+ return std::make_pair(Semantic, true);
+
+ return std::make_pair(Lexical, false);
+}
+
+bool Sema::CppLookupName(LookupResult &R, Scope *S) {
+ assert(getLangOpts().CPlusPlus && "Can perform only C++ lookup");
+
+ DeclarationName Name = R.getLookupName();
+
+ // If this is the name of an implicitly-declared special member function,
+ // go through the scope stack to implicitly declare
+ if (isImplicitlyDeclaredMemberFunctionName(Name)) {
+ for (Scope *PreS = S; PreS; PreS = PreS->getParent())
+ if (DeclContext *DC = static_cast<DeclContext *>(PreS->getEntity()))
+ DeclareImplicitMemberFunctionsWithName(*this, Name, DC);
+ }
+
+ // Implicitly declare member functions with the name we're looking for, if in
+ // fact we are in a scope where it matters.
+
+ Scope *Initial = S;
+ IdentifierResolver::iterator
+ I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+
+ // First we lookup local scope.
+ // We don't consider using-directives, as per 7.3.4.p1 [namespace.udir]
+ // ...During unqualified name lookup (3.4.1), the names appear as if
+ // they were declared in the nearest enclosing namespace which contains
+ // both the using-directive and the nominated namespace.
+ // [Note: in this context, "contains" means "contains directly or
+ // indirectly".
+ //
+ // For example:
+ // namespace A { int i; }
+ // void foo() {
+ // int i;
+ // {
+ // using namespace A;
+ // ++i; // finds local 'i', A::i appears at global scope
+ // }
+ // }
+ //
+ DeclContext *OutsideOfTemplateParamDC = 0;
+ for (; S && !isNamespaceOrTranslationUnitScope(S); S = S->getParent()) {
+ DeclContext *Ctx = static_cast<DeclContext*>(S->getEntity());
+
+ // Check whether the IdResolver has anything in this scope.
+ bool Found = false;
+ for (; I != IEnd && S->isDeclScope(*I); ++I) {
+ if (NamedDecl *ND = R.getAcceptableDecl(*I)) {
+ Found = true;
+ R.addDecl(ND);
+ }
+ }
+ if (Found) {
+ R.resolveKind();
+ if (S->isClassScope())
+ if (CXXRecordDecl *Record = dyn_cast_or_null<CXXRecordDecl>(Ctx))
+ R.setNamingClass(Record);
+ return true;
+ }
+
+ if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
+ S->getParent() && !S->getParent()->isTemplateParamScope()) {
+ // We've just searched the last template parameter scope and
+ // found nothing, so look into the the contexts between the
+ // lexical and semantic declaration contexts returned by
+ // findOuterContext(). This implements the name lookup behavior
+ // of C++ [temp.local]p8.
+ Ctx = OutsideOfTemplateParamDC;
+ OutsideOfTemplateParamDC = 0;
+ }
+
+ if (Ctx) {
+ DeclContext *OuterCtx;
+ bool SearchAfterTemplateScope;
+ llvm::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
+ if (SearchAfterTemplateScope)
+ OutsideOfTemplateParamDC = OuterCtx;
+
+ for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) {
+ // We do not directly look into transparent contexts, since
+ // those entities will be found in the nearest enclosing
+ // non-transparent context.
+ if (Ctx->isTransparentContext())
+ continue;
+
+ // We do not look directly into function or method contexts,
+ // since all of the local variables and parameters of the
+ // function/method are present within the Scope.
+ if (Ctx->isFunctionOrMethod()) {
+ // If we have an Objective-C instance method, look for ivars
+ // in the corresponding interface.
+ if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(Ctx)) {
+ if (Method->isInstanceMethod() && Name.getAsIdentifierInfo())
+ if (ObjCInterfaceDecl *Class = Method->getClassInterface()) {
+ ObjCInterfaceDecl *ClassDeclared;
+ if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(
+ Name.getAsIdentifierInfo(),
+ ClassDeclared)) {
+ if (NamedDecl *ND = R.getAcceptableDecl(Ivar)) {
+ R.addDecl(ND);
+ R.resolveKind();
+ return true;
+ }
+ }
+ }
+ }
+
+ continue;
+ }
+
+ // Perform qualified name lookup into this context.
+ // FIXME: In some cases, we know that every name that could be found by
+ // this qualified name lookup will also be on the identifier chain. For
+ // example, inside a class without any base classes, we never need to
+ // perform qualified lookup because all of the members are on top of the
+ // identifier chain.
+ if (LookupQualifiedName(R, Ctx, /*InUnqualifiedLookup=*/true))
+ return true;
+ }
+ }
+ }
+
+ // Stop if we ran out of scopes.
+ // FIXME: This really, really shouldn't be happening.
+ if (!S) return false;
+
+ // If we are looking for members, no need to look into global/namespace scope.
+ if (R.getLookupKind() == LookupMemberName)
+ return false;
+
+ // Collect UsingDirectiveDecls in all scopes, and recursively all
+ // nominated namespaces by those using-directives.
+ //
+ // FIXME: Cache this sorted list in Scope structure, and DeclContext, so we
+ // don't build it for each lookup!
+
+ UnqualUsingDirectiveSet UDirs;
+ UDirs.visitScopeChain(Initial, S);
+ UDirs.done();
+
+ // Lookup namespace scope, and global scope.
+ // Unqualified name lookup in C++ requires looking into scopes
+ // that aren't strictly lexical, and therefore we walk through the
+ // context as well as walking through the scopes.
+
+ for (; S; S = S->getParent()) {
+ // Check whether the IdResolver has anything in this scope.
+ bool Found = false;
+ for (; I != IEnd && S->isDeclScope(*I); ++I) {
+ if (NamedDecl *ND = R.getAcceptableDecl(*I)) {
+ // We found something. Look for anything else in our scope
+ // with this same name and in an acceptable identifier
+ // namespace, so that we can construct an overload set if we
+ // need to.
+ Found = true;
+ R.addDecl(ND);
+ }
+ }
+
+ if (Found && S->isTemplateParamScope()) {
+ R.resolveKind();
+ return true;
+ }
+
+ DeclContext *Ctx = static_cast<DeclContext *>(S->getEntity());
+ if (!Ctx && S->isTemplateParamScope() && OutsideOfTemplateParamDC &&
+ S->getParent() && !S->getParent()->isTemplateParamScope()) {
+ // We've just searched the last template parameter scope and
+ // found nothing, so look into the the contexts between the
+ // lexical and semantic declaration contexts returned by
+ // findOuterContext(). This implements the name lookup behavior
+ // of C++ [temp.local]p8.
+ Ctx = OutsideOfTemplateParamDC;
+ OutsideOfTemplateParamDC = 0;
+ }
+
+ if (Ctx) {
+ DeclContext *OuterCtx;
+ bool SearchAfterTemplateScope;
+ llvm::tie(OuterCtx, SearchAfterTemplateScope) = findOuterContext(S);
+ if (SearchAfterTemplateScope)
+ OutsideOfTemplateParamDC = OuterCtx;
+
+ for (; Ctx && !Ctx->Equals(OuterCtx); Ctx = Ctx->getLookupParent()) {
+ // We do not directly look into transparent contexts, since
+ // those entities will be found in the nearest enclosing
+ // non-transparent context.
+ if (Ctx->isTransparentContext())
+ continue;
+
+ // If we have a context, and it's not a context stashed in the
+ // template parameter scope for an out-of-line definition, also
+ // look into that context.
+ if (!(Found && S && S->isTemplateParamScope())) {
+ assert(Ctx->isFileContext() &&
+ "We should have been looking only at file context here already.");
+
+ // Look into context considering using-directives.
+ if (CppNamespaceLookup(*this, R, Context, Ctx, UDirs))
+ Found = true;
+ }
+
+ if (Found) {
+ R.resolveKind();
+ return true;
+ }
+
+ if (R.isForRedeclaration() && !Ctx->isTransparentContext())
+ return false;
+ }
+ }
+
+ if (R.isForRedeclaration() && Ctx && !Ctx->isTransparentContext())
+ return false;
+ }
+
+ return !R.empty();
+}
+
+/// \brief Retrieve the visible declaration corresponding to D, if any.
+///
+/// This routine determines whether the declaration D is visible in the current
+/// module, with the current imports. If not, it checks whether any
+/// redeclaration of D is visible, and if so, returns that declaration.
+///
+/// \returns D, or a visible previous declaration of D, whichever is more recent
+/// and visible. If no declaration of D is visible, returns null.
+static NamedDecl *getVisibleDecl(NamedDecl *D) {
+ if (LookupResult::isVisible(D))
+ return D;
+
+ for (Decl::redecl_iterator RD = D->redecls_begin(), RDEnd = D->redecls_end();
+ RD != RDEnd; ++RD) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*RD)) {
+ if (LookupResult::isVisible(ND))
+ return ND;
+ }
+ }
+
+ return 0;
+}
+
+/// @brief Perform unqualified name lookup starting from a given
+/// scope.
+///
+/// Unqualified name lookup (C++ [basic.lookup.unqual], C99 6.2.1) is
+/// used to find names within the current scope. For example, 'x' in
+/// @code
+/// int x;
+/// int f() {
+/// return x; // unqualified name look finds 'x' in the global scope
+/// }
+/// @endcode
+///
+/// Different lookup criteria can find different names. For example, a
+/// particular scope can have both a struct and a function of the same
+/// name, and each can be found by certain lookup criteria. For more
+/// information about lookup criteria, see the documentation for the
+/// class LookupCriteria.
+///
+/// @param S The scope from which unqualified name lookup will
+/// begin. If the lookup criteria permits, name lookup may also search
+/// in the parent scopes.
+///
+/// @param Name The name of the entity that we are searching for.
+///
+/// @param Loc If provided, the source location where we're performing
+/// name lookup. At present, this is only used to produce diagnostics when
+/// C library functions (like "malloc") are implicitly declared.
+///
+/// @returns The result of name lookup, which includes zero or more
+/// declarations and possibly additional information used to diagnose
+/// ambiguities.
+bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
+ DeclarationName Name = R.getLookupName();
+ if (!Name) return false;
+
+ LookupNameKind NameKind = R.getLookupKind();
+
+ if (!getLangOpts().CPlusPlus) {
+ // Unqualified name lookup in C/Objective-C is purely lexical, so
+ // search in the declarations attached to the name.
+ if (NameKind == Sema::LookupRedeclarationWithLinkage) {
+ // Find the nearest non-transparent declaration scope.
+ while (!(S->getFlags() & Scope::DeclScope) ||
+ (S->getEntity() &&
+ static_cast<DeclContext *>(S->getEntity())
+ ->isTransparentContext()))
+ S = S->getParent();
+ }
+
+ unsigned IDNS = R.getIdentifierNamespace();
+
+ // Scan up the scope chain looking for a decl that matches this
+ // identifier that is in the appropriate namespace. This search
+ // should not take long, as shadowing of names is uncommon, and
+ // deep shadowing is extremely uncommon.
+ bool LeftStartingScope = false;
+
+ for (IdentifierResolver::iterator I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+ I != IEnd; ++I)
+ if ((*I)->isInIdentifierNamespace(IDNS)) {
+ if (NameKind == LookupRedeclarationWithLinkage) {
+ // Determine whether this (or a previous) declaration is
+ // out-of-scope.
+ if (!LeftStartingScope && !S->isDeclScope(*I))
+ LeftStartingScope = true;
+
+ // If we found something outside of our starting scope that
+ // does not have linkage, skip it.
+ if (LeftStartingScope && !((*I)->hasLinkage()))
+ continue;
+ }
+ else if (NameKind == LookupObjCImplicitSelfParam &&
+ !isa<ImplicitParamDecl>(*I))
+ continue;
+
+ // If this declaration is module-private and it came from an AST
+ // file, we can't see it.
+ NamedDecl *D = R.isHiddenDeclarationVisible()? *I : getVisibleDecl(*I);
+ if (!D)
+ continue;
+
+ R.addDecl(D);
+
+ // Check whether there are any other declarations with the same name
+ // and in the same scope.
+ if (I != IEnd) {
+ // Find the scope in which this declaration was declared (if it
+ // actually exists in a Scope).
+ while (S && !S->isDeclScope(D))
+ S = S->getParent();
+
+ // If the scope containing the declaration is the translation unit,
+ // then we'll need to perform our checks based on the matching
+ // DeclContexts rather than matching scopes.
+ if (S && isNamespaceOrTranslationUnitScope(S))
+ S = 0;
+
+ // Compute the DeclContext, if we need it.
+ DeclContext *DC = 0;
+ if (!S)
+ DC = (*I)->getDeclContext()->getRedeclContext();
+
+ IdentifierResolver::iterator LastI = I;
+ for (++LastI; LastI != IEnd; ++LastI) {
+ if (S) {
+ // Match based on scope.
+ if (!S->isDeclScope(*LastI))
+ break;
+ } else {
+ // Match based on DeclContext.
+ DeclContext *LastDC
+ = (*LastI)->getDeclContext()->getRedeclContext();
+ if (!LastDC->Equals(DC))
+ break;
+ }
+
+ // If the declaration isn't in the right namespace, skip it.
+ if (!(*LastI)->isInIdentifierNamespace(IDNS))
+ continue;
+
+ D = R.isHiddenDeclarationVisible()? *LastI : getVisibleDecl(*LastI);
+ if (D)
+ R.addDecl(D);
+ }
+
+ R.resolveKind();
+ }
+ return true;
+ }
+ } else {
+ // Perform C++ unqualified name lookup.
+ if (CppLookupName(R, S))
+ return true;
+ }
+
+ // If we didn't find a use of this identifier, and if the identifier
+ // corresponds to a compiler builtin, create the decl object for the builtin
+ // now, injecting it into translation unit scope, and return it.
+ if (AllowBuiltinCreation && LookupBuiltin(*this, R))
+ return true;
+
+ // If we didn't find a use of this identifier, the ExternalSource
+ // may be able to handle the situation.
+ // Note: some lookup failures are expected!
+ // See e.g. R.isForRedeclaration().
+ return (ExternalSource && ExternalSource->LookupUnqualified(R, S));
+}
+
+/// @brief Perform qualified name lookup in the namespaces nominated by
+/// using directives by the given context.
+///
+/// C++98 [namespace.qual]p2:
+/// Given X::m (where X is a user-declared namespace), or given ::m
+/// (where X is the global namespace), let S be the set of all
+/// declarations of m in X and in the transitive closure of all
+/// namespaces nominated by using-directives in X and its used
+/// namespaces, except that using-directives are ignored in any
+/// namespace, including X, directly containing one or more
+/// declarations of m. No namespace is searched more than once in
+/// the lookup of a name. If S is the empty set, the program is
+/// ill-formed. Otherwise, if S has exactly one member, or if the
+/// context of the reference is a using-declaration
+/// (namespace.udecl), S is the required set of declarations of
+/// m. Otherwise if the use of m is not one that allows a unique
+/// declaration to be chosen from S, the program is ill-formed.
+/// C++98 [namespace.qual]p5:
+/// During the lookup of a qualified namespace member name, if the
+/// lookup finds more than one declaration of the member, and if one
+/// declaration introduces a class name or enumeration name and the
+/// other declarations either introduce the same object, the same
+/// enumerator or a set of functions, the non-type name hides the
+/// class or enumeration name if and only if the declarations are
+/// from the same namespace; otherwise (the declarations are from
+/// different namespaces), the program is ill-formed.
+static bool LookupQualifiedNameInUsingDirectives(Sema &S, LookupResult &R,
+ DeclContext *StartDC) {
+ assert(StartDC->isFileContext() && "start context is not a file context");
+
+ DeclContext::udir_iterator I = StartDC->using_directives_begin();
+ DeclContext::udir_iterator E = StartDC->using_directives_end();
+
+ if (I == E) return false;
+
+ // We have at least added all these contexts to the queue.
+ llvm::SmallPtrSet<DeclContext*, 8> Visited;
+ Visited.insert(StartDC);
+
+ // We have not yet looked into these namespaces, much less added
+ // their "using-children" to the queue.
+ SmallVector<NamespaceDecl*, 8> Queue;
+
+ // We have already looked into the initial namespace; seed the queue
+ // with its using-children.
+ for (; I != E; ++I) {
+ NamespaceDecl *ND = (*I)->getNominatedNamespace()->getOriginalNamespace();
+ if (Visited.insert(ND))
+ Queue.push_back(ND);
+ }
+
+ // The easiest way to implement the restriction in [namespace.qual]p5
+ // is to check whether any of the individual results found a tag
+ // and, if so, to declare an ambiguity if the final result is not
+ // a tag.
+ bool FoundTag = false;
+ bool FoundNonTag = false;
+
+ LookupResult LocalR(LookupResult::Temporary, R);
+
+ bool Found = false;
+ while (!Queue.empty()) {
+ NamespaceDecl *ND = Queue.back();
+ Queue.pop_back();
+
+ // We go through some convolutions here to avoid copying results
+ // between LookupResults.
+ bool UseLocal = !R.empty();
+ LookupResult &DirectR = UseLocal ? LocalR : R;
+ bool FoundDirect = LookupDirect(S, DirectR, ND);
+
+ if (FoundDirect) {
+ // First do any local hiding.
+ DirectR.resolveKind();
+
+ // If the local result is a tag, remember that.
+ if (DirectR.isSingleTagDecl())
+ FoundTag = true;
+ else
+ FoundNonTag = true;
+
+ // Append the local results to the total results if necessary.
+ if (UseLocal) {
+ R.addAllDecls(LocalR);
+ LocalR.clear();
+ }
+ }
+
+ // If we find names in this namespace, ignore its using directives.
+ if (FoundDirect) {
+ Found = true;
+ continue;
+ }
+
+ for (llvm::tie(I,E) = ND->getUsingDirectives(); I != E; ++I) {
+ NamespaceDecl *Nom = (*I)->getNominatedNamespace();
+ if (Visited.insert(Nom))
+ Queue.push_back(Nom);
+ }
+ }
+
+ if (Found) {
+ if (FoundTag && FoundNonTag)
+ R.setAmbiguousQualifiedTagHiding();
+ else
+ R.resolveKind();
+ }
+
+ return Found;
+}
+
+/// \brief Callback that looks for any member of a class with the given name.
+static bool LookupAnyMember(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *Name) {
+ RecordDecl *BaseRecord = Specifier->getType()->getAs<RecordType>()->getDecl();
+
+ DeclarationName N = DeclarationName::getFromOpaquePtr(Name);
+ Path.Decls = BaseRecord->lookup(N);
+ return Path.Decls.first != Path.Decls.second;
+}
+
+/// \brief Determine whether the given set of member declarations contains only
+/// static members, nested types, and enumerators.
+template<typename InputIterator>
+static bool HasOnlyStaticMembers(InputIterator First, InputIterator Last) {
+ Decl *D = (*First)->getUnderlyingDecl();
+ if (isa<VarDecl>(D) || isa<TypeDecl>(D) || isa<EnumConstantDecl>(D))
+ return true;
+
+ if (isa<CXXMethodDecl>(D)) {
+ // Determine whether all of the methods are static.
+ bool AllMethodsAreStatic = true;
+ for(; First != Last; ++First) {
+ D = (*First)->getUnderlyingDecl();
+
+ if (!isa<CXXMethodDecl>(D)) {
+ assert(isa<TagDecl>(D) && "Non-function must be a tag decl");
+ break;
+ }
+
+ if (!cast<CXXMethodDecl>(D)->isStatic()) {
+ AllMethodsAreStatic = false;
+ break;
+ }
+ }
+
+ if (AllMethodsAreStatic)
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Perform qualified name lookup into a given context.
+///
+/// Qualified name lookup (C++ [basic.lookup.qual]) is used to find
+/// names when the context of those names is explicit specified, e.g.,
+/// "std::vector" or "x->member", or as part of unqualified name lookup.
+///
+/// Different lookup criteria can find different names. For example, a
+/// particular scope can have both a struct and a function of the same
+/// name, and each can be found by certain lookup criteria. For more
+/// information about lookup criteria, see the documentation for the
+/// class LookupCriteria.
+///
+/// \param R captures both the lookup criteria and any lookup results found.
+///
+/// \param LookupCtx The context in which qualified name lookup will
+/// search. If the lookup criteria permits, name lookup may also search
+/// in the parent contexts or (for C++ classes) base classes.
+///
+/// \param InUnqualifiedLookup true if this is qualified name lookup that
+/// occurs as part of unqualified name lookup.
+///
+/// \returns true if lookup succeeded, false if it failed.
+bool Sema::LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
+ bool InUnqualifiedLookup) {
+ assert(LookupCtx && "Sema::LookupQualifiedName requires a lookup context");
+
+ if (!R.getLookupName())
+ return false;
+
+ // Make sure that the declaration context is complete.
+ assert((!isa<TagDecl>(LookupCtx) ||
+ LookupCtx->isDependentContext() ||
+ cast<TagDecl>(LookupCtx)->isCompleteDefinition() ||
+ cast<TagDecl>(LookupCtx)->isBeingDefined()) &&
+ "Declaration context must already be complete!");
+
+ // Perform qualified name lookup into the LookupCtx.
+ if (LookupDirect(*this, R, LookupCtx)) {
+ R.resolveKind();
+ if (isa<CXXRecordDecl>(LookupCtx))
+ R.setNamingClass(cast<CXXRecordDecl>(LookupCtx));
+ return true;
+ }
+
+ // Don't descend into implied contexts for redeclarations.
+ // C++98 [namespace.qual]p6:
+ // In a declaration for a namespace member in which the
+ // declarator-id is a qualified-id, given that the qualified-id
+ // for the namespace member has the form
+ // nested-name-specifier unqualified-id
+ // the unqualified-id shall name a member of the namespace
+ // designated by the nested-name-specifier.
+ // See also [class.mfct]p5 and [class.static.data]p2.
+ if (R.isForRedeclaration())
+ return false;
+
+ // If this is a namespace, look it up in the implied namespaces.
+ if (LookupCtx->isFileContext())
+ return LookupQualifiedNameInUsingDirectives(*this, R, LookupCtx);
+
+ // If this isn't a C++ class, we aren't allowed to look into base
+ // classes, we're done.
+ CXXRecordDecl *LookupRec = dyn_cast<CXXRecordDecl>(LookupCtx);
+ if (!LookupRec || !LookupRec->getDefinition())
+ return false;
+
+ // If we're performing qualified name lookup into a dependent class,
+ // then we are actually looking into a current instantiation. If we have any
+ // dependent base classes, then we either have to delay lookup until
+ // template instantiation time (at which point all bases will be available)
+ // or we have to fail.
+ if (!InUnqualifiedLookup && LookupRec->isDependentContext() &&
+ LookupRec->hasAnyDependentBases()) {
+ R.setNotFoundInCurrentInstantiation();
+ return false;
+ }
+
+ // Perform lookup into our base classes.
+ CXXBasePaths Paths;
+ Paths.setOrigin(LookupRec);
+
+ // Look for this member in our base classes
+ CXXRecordDecl::BaseMatchesCallback *BaseCallback = 0;
+ switch (R.getLookupKind()) {
+ case LookupObjCImplicitSelfParam:
+ case LookupOrdinaryName:
+ case LookupMemberName:
+ case LookupRedeclarationWithLinkage:
+ BaseCallback = &CXXRecordDecl::FindOrdinaryMember;
+ break;
+
+ case LookupTagName:
+ BaseCallback = &CXXRecordDecl::FindTagMember;
+ break;
+
+ case LookupAnyName:
+ BaseCallback = &LookupAnyMember;
+ break;
+
+ case LookupUsingDeclName:
+ // This lookup is for redeclarations only.
+
+ case LookupOperatorName:
+ case LookupNamespaceName:
+ case LookupObjCProtocolName:
+ case LookupLabel:
+ // These lookups will never find a member in a C++ class (or base class).
+ return false;
+
+ case LookupNestedNameSpecifierName:
+ BaseCallback = &CXXRecordDecl::FindNestedNameSpecifierMember;
+ break;
+ }
+
+ if (!LookupRec->lookupInBases(BaseCallback,
+ R.getLookupName().getAsOpaquePtr(), Paths))
+ return false;
+
+ R.setNamingClass(LookupRec);
+
+ // C++ [class.member.lookup]p2:
+ // [...] If the resulting set of declarations are not all from
+ // sub-objects of the same type, or the set has a nonstatic member
+ // and includes members from distinct sub-objects, there is an
+ // ambiguity and the program is ill-formed. Otherwise that set is
+ // the result of the lookup.
+ QualType SubobjectType;
+ int SubobjectNumber = 0;
+ AccessSpecifier SubobjectAccess = AS_none;
+
+ for (CXXBasePaths::paths_iterator Path = Paths.begin(), PathEnd = Paths.end();
+ Path != PathEnd; ++Path) {
+ const CXXBasePathElement &PathElement = Path->back();
+
+ // Pick the best (i.e. most permissive i.e. numerically lowest) access
+ // across all paths.
+ SubobjectAccess = std::min(SubobjectAccess, Path->Access);
+
+ // Determine whether we're looking at a distinct sub-object or not.
+ if (SubobjectType.isNull()) {
+ // This is the first subobject we've looked at. Record its type.
+ SubobjectType = Context.getCanonicalType(PathElement.Base->getType());
+ SubobjectNumber = PathElement.SubobjectNumber;
+ continue;
+ }
+
+ if (SubobjectType
+ != Context.getCanonicalType(PathElement.Base->getType())) {
+ // We found members of the given name in two subobjects of
+ // different types. If the declaration sets aren't the same, this
+ // this lookup is ambiguous.
+ if (HasOnlyStaticMembers(Path->Decls.first, Path->Decls.second)) {
+ CXXBasePaths::paths_iterator FirstPath = Paths.begin();
+ DeclContext::lookup_iterator FirstD = FirstPath->Decls.first;
+ DeclContext::lookup_iterator CurrentD = Path->Decls.first;
+
+ while (FirstD != FirstPath->Decls.second &&
+ CurrentD != Path->Decls.second) {
+ if ((*FirstD)->getUnderlyingDecl()->getCanonicalDecl() !=
+ (*CurrentD)->getUnderlyingDecl()->getCanonicalDecl())
+ break;
+
+ ++FirstD;
+ ++CurrentD;
+ }
+
+ if (FirstD == FirstPath->Decls.second &&
+ CurrentD == Path->Decls.second)
+ continue;
+ }
+
+ R.setAmbiguousBaseSubobjectTypes(Paths);
+ return true;
+ }
+
+ if (SubobjectNumber != PathElement.SubobjectNumber) {
+ // We have a different subobject of the same type.
+
+ // C++ [class.member.lookup]p5:
+ // A static member, a nested type or an enumerator defined in
+ // a base class T can unambiguously be found even if an object
+ // has more than one base class subobject of type T.
+ if (HasOnlyStaticMembers(Path->Decls.first, Path->Decls.second))
+ continue;
+
+ // We have found a nonstatic member name in multiple, distinct
+ // subobjects. Name lookup is ambiguous.
+ R.setAmbiguousBaseSubobjects(Paths);
+ return true;
+ }
+ }
+
+ // Lookup in a base class succeeded; return these results.
+
+ DeclContext::lookup_iterator I, E;
+ for (llvm::tie(I,E) = Paths.front().Decls; I != E; ++I) {
+ NamedDecl *D = *I;
+ AccessSpecifier AS = CXXRecordDecl::MergeAccess(SubobjectAccess,
+ D->getAccess());
+ R.addDecl(D, AS);
+ }
+ R.resolveKind();
+ return true;
+}
+
+/// @brief Performs name lookup for a name that was parsed in the
+/// source code, and may contain a C++ scope specifier.
+///
+/// This routine is a convenience routine meant to be called from
+/// contexts that receive a name and an optional C++ scope specifier
+/// (e.g., "N::M::x"). It will then perform either qualified or
+/// unqualified name lookup (with LookupQualifiedName or LookupName,
+/// respectively) on the given name and return those results.
+///
+/// @param S The scope from which unqualified name lookup will
+/// begin.
+///
+/// @param SS An optional C++ scope-specifier, e.g., "::N::M".
+///
+/// @param EnteringContext Indicates whether we are going to enter the
+/// context of the scope-specifier SS (if present).
+///
+/// @returns True if any decls were found (but possibly ambiguous)
+bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
+ bool AllowBuiltinCreation, bool EnteringContext) {
+ if (SS && SS->isInvalid()) {
+ // When the scope specifier is invalid, don't even look for
+ // anything.
+ return false;
+ }
+
+ if (SS && SS->isSet()) {
+ if (DeclContext *DC = computeDeclContext(*SS, EnteringContext)) {
+ // We have resolved the scope specifier to a particular declaration
+ // contex, and will perform name lookup in that context.
+ if (!DC->isDependentContext() && RequireCompleteDeclContext(*SS, DC))
+ return false;
+
+ R.setContextRange(SS->getRange());
+ return LookupQualifiedName(R, DC);
+ }
+
+ // We could not resolve the scope specified to a specific declaration
+ // context, which means that SS refers to an unknown specialization.
+ // Name lookup can't find anything in this case.
+ R.setNotFoundInCurrentInstantiation();
+ R.setContextRange(SS->getRange());
+ return false;
+ }
+
+ // Perform unqualified name lookup starting in the given scope.
+ return LookupName(R, S, AllowBuiltinCreation);
+}
+
+
+/// @brief Produce a diagnostic describing the ambiguity that resulted
+/// from name lookup.
+///
+/// @param Result The ambiguous name lookup result.
+///
+/// @param Name The name of the entity that name lookup was
+/// searching for.
+///
+/// @param NameLoc The location of the name within the source code.
+///
+/// @param LookupRange A source range that provides more
+/// source-location information concerning the lookup itself. For
+/// example, this range might highlight a nested-name-specifier that
+/// precedes the name.
+///
+/// @returns true
+bool Sema::DiagnoseAmbiguousLookup(LookupResult &Result) {
+ assert(Result.isAmbiguous() && "Lookup result must be ambiguous");
+
+ DeclarationName Name = Result.getLookupName();
+ SourceLocation NameLoc = Result.getNameLoc();
+ SourceRange LookupRange = Result.getContextRange();
+
+ switch (Result.getAmbiguityKind()) {
+ case LookupResult::AmbiguousBaseSubobjects: {
+ CXXBasePaths *Paths = Result.getBasePaths();
+ QualType SubobjectType = Paths->front().back().Base->getType();
+ Diag(NameLoc, diag::err_ambiguous_member_multiple_subobjects)
+ << Name << SubobjectType << getAmbiguousPathsDisplayString(*Paths)
+ << LookupRange;
+
+ DeclContext::lookup_iterator Found = Paths->front().Decls.first;
+ while (isa<CXXMethodDecl>(*Found) &&
+ cast<CXXMethodDecl>(*Found)->isStatic())
+ ++Found;
+
+ Diag((*Found)->getLocation(), diag::note_ambiguous_member_found);
+
+ return true;
+ }
+
+ case LookupResult::AmbiguousBaseSubobjectTypes: {
+ Diag(NameLoc, diag::err_ambiguous_member_multiple_subobject_types)
+ << Name << LookupRange;
+
+ CXXBasePaths *Paths = Result.getBasePaths();
+ std::set<Decl *> DeclsPrinted;
+ for (CXXBasePaths::paths_iterator Path = Paths->begin(),
+ PathEnd = Paths->end();
+ Path != PathEnd; ++Path) {
+ Decl *D = *Path->Decls.first;
+ if (DeclsPrinted.insert(D).second)
+ Diag(D->getLocation(), diag::note_ambiguous_member_found);
+ }
+
+ return true;
+ }
+
+ case LookupResult::AmbiguousTagHiding: {
+ Diag(NameLoc, diag::err_ambiguous_tag_hiding) << Name << LookupRange;
+
+ llvm::SmallPtrSet<NamedDecl*,8> TagDecls;
+
+ LookupResult::iterator DI, DE = Result.end();
+ for (DI = Result.begin(); DI != DE; ++DI)
+ if (TagDecl *TD = dyn_cast<TagDecl>(*DI)) {
+ TagDecls.insert(TD);
+ Diag(TD->getLocation(), diag::note_hidden_tag);
+ }
+
+ for (DI = Result.begin(); DI != DE; ++DI)
+ if (!isa<TagDecl>(*DI))
+ Diag((*DI)->getLocation(), diag::note_hiding_object);
+
+ // For recovery purposes, go ahead and implement the hiding.
+ LookupResult::Filter F = Result.makeFilter();
+ while (F.hasNext()) {
+ if (TagDecls.count(F.next()))
+ F.erase();
+ }
+ F.done();
+
+ return true;
+ }
+
+ case LookupResult::AmbiguousReference: {
+ Diag(NameLoc, diag::err_ambiguous_reference) << Name << LookupRange;
+
+ LookupResult::iterator DI = Result.begin(), DE = Result.end();
+ for (; DI != DE; ++DI)
+ Diag((*DI)->getLocation(), diag::note_ambiguous_candidate) << *DI;
+
+ return true;
+ }
+ }
+
+ llvm_unreachable("unknown ambiguity kind");
+}
+
+namespace {
+ struct AssociatedLookup {
+ AssociatedLookup(Sema &S,
+ Sema::AssociatedNamespaceSet &Namespaces,
+ Sema::AssociatedClassSet &Classes)
+ : S(S), Namespaces(Namespaces), Classes(Classes) {
+ }
+
+ Sema &S;
+ Sema::AssociatedNamespaceSet &Namespaces;
+ Sema::AssociatedClassSet &Classes;
+ };
+}
+
+static void
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType T);
+
+static void CollectEnclosingNamespace(Sema::AssociatedNamespaceSet &Namespaces,
+ DeclContext *Ctx) {
+ // Add the associated namespace for this class.
+
+ // We don't use DeclContext::getEnclosingNamespaceContext() as this may
+ // be a locally scoped record.
+
+ // We skip out of inline namespaces. The innermost non-inline namespace
+ // contains all names of all its nested inline namespaces anyway, so we can
+ // replace the entire inline namespace tree with its root.
+ while (Ctx->isRecord() || Ctx->isTransparentContext() ||
+ Ctx->isInlineNamespace())
+ Ctx = Ctx->getParent();
+
+ if (Ctx->isFileContext())
+ Namespaces.insert(Ctx->getPrimaryContext());
+}
+
+// \brief Add the associated classes and namespaces for argument-dependent
+// lookup that involves a template argument (C++ [basic.lookup.koenig]p2).
+static void
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
+ const TemplateArgument &Arg) {
+ // C++ [basic.lookup.koenig]p2, last bullet:
+ // -- [...] ;
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+
+ case TemplateArgument::Type:
+ // [...] the namespaces and classes associated with the types of the
+ // template arguments provided for template type parameters (excluding
+ // template template parameters)
+ addAssociatedClassesAndNamespaces(Result, Arg.getAsType());
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ // [...] the namespaces in which any template template arguments are
+ // defined; and the classes in which any member templates used as
+ // template template arguments are defined.
+ TemplateName Template = Arg.getAsTemplateOrTemplatePattern();
+ if (ClassTemplateDecl *ClassTemplate
+ = dyn_cast<ClassTemplateDecl>(Template.getAsTemplateDecl())) {
+ DeclContext *Ctx = ClassTemplate->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
+ }
+ break;
+ }
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Expression:
+ // [Note: non-type template arguments do not contribute to the set of
+ // associated namespaces. ]
+ break;
+
+ case TemplateArgument::Pack:
+ for (TemplateArgument::pack_iterator P = Arg.pack_begin(),
+ PEnd = Arg.pack_end();
+ P != PEnd; ++P)
+ addAssociatedClassesAndNamespaces(Result, *P);
+ break;
+ }
+}
+
+// \brief Add the associated classes and namespaces for
+// argument-dependent lookup with an argument of class type
+// (C++ [basic.lookup.koenig]p2).
+static void
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result,
+ CXXRecordDecl *Class) {
+
+ // Just silently ignore anything whose name is __va_list_tag.
+ if (Class->getDeclName() == Result.S.VAListTagName)
+ return;
+
+ // C++ [basic.lookup.koenig]p2:
+ // [...]
+ // -- If T is a class type (including unions), its associated
+ // classes are: the class itself; the class of which it is a
+ // member, if any; and its direct and indirect base
+ // classes. Its associated namespaces are the namespaces in
+ // which its associated classes are defined.
+
+ // Add the class of which it is a member, if any.
+ DeclContext *Ctx = Class->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
+
+ // Add the class itself. If we've already seen this class, we don't
+ // need to visit base classes.
+ if (!Result.Classes.insert(Class))
+ return;
+
+ // -- If T is a template-id, its associated namespaces and classes are
+ // the namespace in which the template is defined; for member
+ // templates, the member template's class; the namespaces and classes
+ // associated with the types of the template arguments provided for
+ // template type parameters (excluding template template parameters); the
+ // namespaces in which any template template arguments are defined; and
+ // the classes in which any member templates used as template template
+ // arguments are defined. [Note: non-type template arguments do not
+ // contribute to the set of associated namespaces. ]
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Class)) {
+ DeclContext *Ctx = Spec->getSpecializedTemplate()->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
+
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ addAssociatedClassesAndNamespaces(Result, TemplateArgs[I]);
+ }
+
+ // Only recurse into base classes for complete types.
+ if (!Class->hasDefinition()) {
+ // FIXME: we might need to instantiate templates here
+ return;
+ }
+
+ // Add direct and indirect base classes along with their associated
+ // namespaces.
+ SmallVector<CXXRecordDecl *, 32> Bases;
+ Bases.push_back(Class);
+ while (!Bases.empty()) {
+ // Pop this class off the stack.
+ Class = Bases.back();
+ Bases.pop_back();
+
+ // Visit the base classes.
+ for (CXXRecordDecl::base_class_iterator Base = Class->bases_begin(),
+ BaseEnd = Class->bases_end();
+ Base != BaseEnd; ++Base) {
+ const RecordType *BaseType = Base->getType()->getAs<RecordType>();
+ // In dependent contexts, we do ADL twice, and the first time around,
+ // the base type might be a dependent TemplateSpecializationType, or a
+ // TemplateTypeParmType. If that happens, simply ignore it.
+ // FIXME: If we want to support export, we probably need to add the
+ // namespace of the template in a TemplateSpecializationType, or even
+ // the classes and namespaces of known non-dependent arguments.
+ if (!BaseType)
+ continue;
+ CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(BaseType->getDecl());
+ if (Result.Classes.insert(BaseDecl)) {
+ // Find the associated namespace for this base class.
+ DeclContext *BaseCtx = BaseDecl->getDeclContext();
+ CollectEnclosingNamespace(Result.Namespaces, BaseCtx);
+
+ // Make sure we visit the bases of this base class.
+ if (BaseDecl->bases_begin() != BaseDecl->bases_end())
+ Bases.push_back(BaseDecl);
+ }
+ }
+ }
+}
+
+// \brief Add the associated classes and namespaces for
+// argument-dependent lookup with an argument of type T
+// (C++ [basic.lookup.koenig]p2).
+static void
+addAssociatedClassesAndNamespaces(AssociatedLookup &Result, QualType Ty) {
+ // C++ [basic.lookup.koenig]p2:
+ //
+ // For each argument type T in the function call, there is a set
+ // of zero or more associated namespaces and a set of zero or more
+ // associated classes to be considered. The sets of namespaces and
+ // classes is determined entirely by the types of the function
+ // arguments (and the namespace of any template template
+ // argument). Typedef names and using-declarations used to specify
+ // the types do not contribute to this set. The sets of namespaces
+ // and classes are determined in the following way:
+
+ SmallVector<const Type *, 16> Queue;
+ const Type *T = Ty->getCanonicalTypeInternal().getTypePtr();
+
+ while (true) {
+ switch (T->getTypeClass()) {
+
+#define TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ // T is canonical. We can also ignore dependent types because
+ // we don't need to do ADL at the definition point, but if we
+ // wanted to implement template export (or if we find some other
+ // use for associated classes and namespaces...) this would be
+ // wrong.
+ break;
+
+ // -- If T is a pointer to U or an array of U, its associated
+ // namespaces and classes are those associated with U.
+ case Type::Pointer:
+ T = cast<PointerType>(T)->getPointeeType().getTypePtr();
+ continue;
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ T = cast<ArrayType>(T)->getElementType().getTypePtr();
+ continue;
+
+ // -- If T is a fundamental type, its associated sets of
+ // namespaces and classes are both empty.
+ case Type::Builtin:
+ break;
+
+ // -- If T is a class type (including unions), its associated
+ // classes are: the class itself; the class of which it is a
+ // member, if any; and its direct and indirect base
+ // classes. Its associated namespaces are the namespaces in
+ // which its associated classes are defined.
+ case Type::Record: {
+ CXXRecordDecl *Class
+ = cast<CXXRecordDecl>(cast<RecordType>(T)->getDecl());
+ addAssociatedClassesAndNamespaces(Result, Class);
+ break;
+ }
+
+ // -- If T is an enumeration type, its associated namespace is
+ // the namespace in which it is defined. If it is class
+ // member, its associated class is the member's class; else
+ // it has no associated class.
+ case Type::Enum: {
+ EnumDecl *Enum = cast<EnumType>(T)->getDecl();
+
+ DeclContext *Ctx = Enum->getDeclContext();
+ if (CXXRecordDecl *EnclosingClass = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.Classes.insert(EnclosingClass);
+
+ // Add the associated namespace for this class.
+ CollectEnclosingNamespace(Result.Namespaces, Ctx);
+
+ break;
+ }
+
+ // -- If T is a function type, its associated namespaces and
+ // classes are those associated with the function parameter
+ // types and those associated with the return type.
+ case Type::FunctionProto: {
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+ for (FunctionProtoType::arg_type_iterator Arg = Proto->arg_type_begin(),
+ ArgEnd = Proto->arg_type_end();
+ Arg != ArgEnd; ++Arg)
+ Queue.push_back(Arg->getTypePtr());
+ // fallthrough
+ }
+ case Type::FunctionNoProto: {
+ const FunctionType *FnType = cast<FunctionType>(T);
+ T = FnType->getResultType().getTypePtr();
+ continue;
+ }
+
+ // -- If T is a pointer to a member function of a class X, its
+ // associated namespaces and classes are those associated
+ // with the function parameter types and return type,
+ // together with those associated with X.
+ //
+ // -- If T is a pointer to a data member of class X, its
+ // associated namespaces and classes are those associated
+ // with the member type together with those associated with
+ // X.
+ case Type::MemberPointer: {
+ const MemberPointerType *MemberPtr = cast<MemberPointerType>(T);
+
+ // Queue up the class type into which this points.
+ Queue.push_back(MemberPtr->getClass());
+
+ // And directly continue with the pointee type.
+ T = MemberPtr->getPointeeType().getTypePtr();
+ continue;
+ }
+
+ // As an extension, treat this like a normal pointer.
+ case Type::BlockPointer:
+ T = cast<BlockPointerType>(T)->getPointeeType().getTypePtr();
+ continue;
+
+ // References aren't covered by the standard, but that's such an
+ // obvious defect that we cover them anyway.
+ case Type::LValueReference:
+ case Type::RValueReference:
+ T = cast<ReferenceType>(T)->getPointeeType().getTypePtr();
+ continue;
+
+ // These are fundamental types.
+ case Type::Vector:
+ case Type::ExtVector:
+ case Type::Complex:
+ break;
+
+ // If T is an Objective-C object or interface type, or a pointer to an
+ // object or interface type, the associated namespace is the global
+ // namespace.
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer:
+ Result.Namespaces.insert(Result.S.Context.getTranslationUnitDecl());
+ break;
+
+ // Atomic types are just wrappers; use the associations of the
+ // contained type.
+ case Type::Atomic:
+ T = cast<AtomicType>(T)->getValueType().getTypePtr();
+ continue;
+ }
+
+ if (Queue.empty()) break;
+ T = Queue.back();
+ Queue.pop_back();
+ }
+}
+
+/// \brief Find the associated classes and namespaces for
+/// argument-dependent lookup for a call with the given set of
+/// arguments.
+///
+/// This routine computes the sets of associated classes and associated
+/// namespaces searched by argument-dependent lookup
+/// (C++ [basic.lookup.argdep]) for a given set of arguments.
+void
+Sema::FindAssociatedClassesAndNamespaces(llvm::ArrayRef<Expr *> Args,
+ AssociatedNamespaceSet &AssociatedNamespaces,
+ AssociatedClassSet &AssociatedClasses) {
+ AssociatedNamespaces.clear();
+ AssociatedClasses.clear();
+
+ AssociatedLookup Result(*this, AssociatedNamespaces, AssociatedClasses);
+
+ // C++ [basic.lookup.koenig]p2:
+ // For each argument type T in the function call, there is a set
+ // of zero or more associated namespaces and a set of zero or more
+ // associated classes to be considered. The sets of namespaces and
+ // classes is determined entirely by the types of the function
+ // arguments (and the namespace of any template template
+ // argument).
+ for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) {
+ Expr *Arg = Args[ArgIdx];
+
+ if (Arg->getType() != Context.OverloadTy) {
+ addAssociatedClassesAndNamespaces(Result, Arg->getType());
+ continue;
+ }
+
+ // [...] In addition, if the argument is the name or address of a
+ // set of overloaded functions and/or function templates, its
+ // associated classes and namespaces are the union of those
+ // associated with each of the members of the set: the namespace
+ // in which the function or function template is defined and the
+ // classes and namespaces associated with its (non-dependent)
+ // parameter types and return type.
+ Arg = Arg->IgnoreParens();
+ if (UnaryOperator *unaryOp = dyn_cast<UnaryOperator>(Arg))
+ if (unaryOp->getOpcode() == UO_AddrOf)
+ Arg = unaryOp->getSubExpr();
+
+ UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Arg);
+ if (!ULE) continue;
+
+ for (UnresolvedSetIterator I = ULE->decls_begin(), E = ULE->decls_end();
+ I != E; ++I) {
+ // Look through any using declarations to find the underlying function.
+ NamedDecl *Fn = (*I)->getUnderlyingDecl();
+
+ FunctionDecl *FDecl = dyn_cast<FunctionDecl>(Fn);
+ if (!FDecl)
+ FDecl = cast<FunctionTemplateDecl>(Fn)->getTemplatedDecl();
+
+ // Add the classes and namespaces associated with the parameter
+ // types and return type of this function.
+ addAssociatedClassesAndNamespaces(Result, FDecl->getType());
+ }
+ }
+}
+
+/// IsAcceptableNonMemberOperatorCandidate - Determine whether Fn is
+/// an acceptable non-member overloaded operator for a call whose
+/// arguments have types T1 (and, if non-empty, T2). This routine
+/// implements the check in C++ [over.match.oper]p3b2 concerning
+/// enumeration types.
+static bool
+IsAcceptableNonMemberOperatorCandidate(FunctionDecl *Fn,
+ QualType T1, QualType T2,
+ ASTContext &Context) {
+ if (T1->isDependentType() || (!T2.isNull() && T2->isDependentType()))
+ return true;
+
+ if (T1->isRecordType() || (!T2.isNull() && T2->isRecordType()))
+ return true;
+
+ const FunctionProtoType *Proto = Fn->getType()->getAs<FunctionProtoType>();
+ if (Proto->getNumArgs() < 1)
+ return false;
+
+ if (T1->isEnumeralType()) {
+ QualType ArgType = Proto->getArgType(0).getNonReferenceType();
+ if (Context.hasSameUnqualifiedType(T1, ArgType))
+ return true;
+ }
+
+ if (Proto->getNumArgs() < 2)
+ return false;
+
+ if (!T2.isNull() && T2->isEnumeralType()) {
+ QualType ArgType = Proto->getArgType(1).getNonReferenceType();
+ if (Context.hasSameUnqualifiedType(T2, ArgType))
+ return true;
+ }
+
+ return false;
+}
+
+NamedDecl *Sema::LookupSingleName(Scope *S, DeclarationName Name,
+ SourceLocation Loc,
+ LookupNameKind NameKind,
+ RedeclarationKind Redecl) {
+ LookupResult R(*this, Name, Loc, NameKind, Redecl);
+ LookupName(R, S);
+ return R.getAsSingle<NamedDecl>();
+}
+
+/// \brief Find the protocol with the given name, if any.
+ObjCProtocolDecl *Sema::LookupProtocol(IdentifierInfo *II,
+ SourceLocation IdLoc,
+ RedeclarationKind Redecl) {
+ Decl *D = LookupSingleName(TUScope, II, IdLoc,
+ LookupObjCProtocolName, Redecl);
+ return cast_or_null<ObjCProtocolDecl>(D);
+}
+
+void Sema::LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
+ QualType T1, QualType T2,
+ UnresolvedSetImpl &Functions) {
+ // C++ [over.match.oper]p3:
+ // -- The set of non-member candidates is the result of the
+ // unqualified lookup of operator@ in the context of the
+ // expression according to the usual rules for name lookup in
+ // unqualified function calls (3.4.2) except that all member
+ // functions are ignored. However, if no operand has a class
+ // type, only those non-member functions in the lookup set
+ // that have a first parameter of type T1 or "reference to
+ // (possibly cv-qualified) T1", when T1 is an enumeration
+ // type, or (if there is a right operand) a second parameter
+ // of type T2 or "reference to (possibly cv-qualified) T2",
+ // when T2 is an enumeration type, are candidate functions.
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+ LookupResult Operators(*this, OpName, SourceLocation(), LookupOperatorName);
+ LookupName(Operators, S);
+
+ assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous");
+
+ if (Operators.empty())
+ return;
+
+ for (LookupResult::iterator Op = Operators.begin(), OpEnd = Operators.end();
+ Op != OpEnd; ++Op) {
+ NamedDecl *Found = (*Op)->getUnderlyingDecl();
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(Found)) {
+ if (IsAcceptableNonMemberOperatorCandidate(FD, T1, T2, Context))
+ Functions.addDecl(*Op, Op.getAccess()); // FIXME: canonical FD
+ } else if (FunctionTemplateDecl *FunTmpl
+ = dyn_cast<FunctionTemplateDecl>(Found)) {
+ // FIXME: friend operators?
+ // FIXME: do we need to check IsAcceptableNonMemberOperatorCandidate,
+ // later?
+ if (!FunTmpl->getDeclContext()->isRecord())
+ Functions.addDecl(*Op, Op.getAccess());
+ }
+ }
+}
+
+Sema::SpecialMemberOverloadResult *Sema::LookupSpecialMember(CXXRecordDecl *RD,
+ CXXSpecialMember SM,
+ bool ConstArg,
+ bool VolatileArg,
+ bool RValueThis,
+ bool ConstThis,
+ bool VolatileThis) {
+ RD = RD->getDefinition();
+ assert((RD && !RD->isBeingDefined()) &&
+ "doing special member lookup into record that isn't fully complete");
+ if (RValueThis || ConstThis || VolatileThis)
+ assert((SM == CXXCopyAssignment || SM == CXXMoveAssignment) &&
+ "constructors and destructors always have unqualified lvalue this");
+ if (ConstArg || VolatileArg)
+ assert((SM != CXXDefaultConstructor && SM != CXXDestructor) &&
+ "parameter-less special members can't have qualified arguments");
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(RD);
+ ID.AddInteger(SM);
+ ID.AddInteger(ConstArg);
+ ID.AddInteger(VolatileArg);
+ ID.AddInteger(RValueThis);
+ ID.AddInteger(ConstThis);
+ ID.AddInteger(VolatileThis);
+
+ void *InsertPoint;
+ SpecialMemberOverloadResult *Result =
+ SpecialMemberCache.FindNodeOrInsertPos(ID, InsertPoint);
+
+ // This was already cached
+ if (Result)
+ return Result;
+
+ Result = BumpAlloc.Allocate<SpecialMemberOverloadResult>();
+ Result = new (Result) SpecialMemberOverloadResult(ID);
+ SpecialMemberCache.InsertNode(Result, InsertPoint);
+
+ if (SM == CXXDestructor) {
+ if (!RD->hasDeclaredDestructor())
+ DeclareImplicitDestructor(RD);
+ CXXDestructorDecl *DD = RD->getDestructor();
+ assert(DD && "record without a destructor");
+ Result->setMethod(DD);
+ Result->setKind(DD->isDeleted() ?
+ SpecialMemberOverloadResult::NoMemberOrDeleted :
+ SpecialMemberOverloadResult::SuccessNonConst);
+ return Result;
+ }
+
+ // Prepare for overload resolution. Here we construct a synthetic argument
+ // if necessary and make sure that implicit functions are declared.
+ CanQualType CanTy = Context.getCanonicalType(Context.getTagDeclType(RD));
+ DeclarationName Name;
+ Expr *Arg = 0;
+ unsigned NumArgs;
+
+ if (SM == CXXDefaultConstructor) {
+ Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
+ NumArgs = 0;
+ if (RD->needsImplicitDefaultConstructor())
+ DeclareImplicitDefaultConstructor(RD);
+ } else {
+ if (SM == CXXCopyConstructor || SM == CXXMoveConstructor) {
+ Name = Context.DeclarationNames.getCXXConstructorName(CanTy);
+ if (!RD->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(RD);
+ if (getLangOpts().CPlusPlus0x && RD->needsImplicitMoveConstructor())
+ DeclareImplicitMoveConstructor(RD);
+ } else {
+ Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+ if (!RD->hasDeclaredCopyAssignment())
+ DeclareImplicitCopyAssignment(RD);
+ if (getLangOpts().CPlusPlus0x && RD->needsImplicitMoveAssignment())
+ DeclareImplicitMoveAssignment(RD);
+ }
+
+ QualType ArgType = CanTy;
+ if (ConstArg)
+ ArgType.addConst();
+ if (VolatileArg)
+ ArgType.addVolatile();
+
+ // This isn't /really/ specified by the standard, but it's implied
+ // we should be working from an RValue in the case of move to ensure
+ // that we prefer to bind to rvalue references, and an LValue in the
+ // case of copy to ensure we don't bind to rvalue references.
+ // Possibly an XValue is actually correct in the case of move, but
+ // there is no semantic difference for class types in this restricted
+ // case.
+ ExprValueKind VK;
+ if (SM == CXXCopyConstructor || SM == CXXCopyAssignment)
+ VK = VK_LValue;
+ else
+ VK = VK_RValue;
+
+ NumArgs = 1;
+ Arg = new (Context) OpaqueValueExpr(SourceLocation(), ArgType, VK);
+ }
+
+ // Create the object argument
+ QualType ThisTy = CanTy;
+ if (ConstThis)
+ ThisTy.addConst();
+ if (VolatileThis)
+ ThisTy.addVolatile();
+ Expr::Classification Classification =
+ (new (Context) OpaqueValueExpr(SourceLocation(), ThisTy,
+ RValueThis ? VK_RValue : VK_LValue))->
+ Classify(Context);
+
+ // Now we perform lookup on the name we computed earlier and do overload
+ // resolution. Lookup is only performed directly into the class since there
+ // will always be a (possibly implicit) declaration to shadow any others.
+ OverloadCandidateSet OCS((SourceLocation()));
+ DeclContext::lookup_iterator I, E;
+ SpecialMemberOverloadResult::Kind SuccessKind =
+ SpecialMemberOverloadResult::SuccessNonConst;
+
+ llvm::tie(I, E) = RD->lookup(Name);
+ assert((I != E) &&
+ "lookup for a constructor or assignment operator was empty");
+ for ( ; I != E; ++I) {
+ Decl *Cand = *I;
+
+ if (Cand->isInvalidDecl())
+ continue;
+
+ if (UsingShadowDecl *U = dyn_cast<UsingShadowDecl>(Cand)) {
+ // FIXME: [namespace.udecl]p15 says that we should only consider a
+ // using declaration here if it does not match a declaration in the
+ // derived class. We do not implement this correctly in other cases
+ // either.
+ Cand = U->getTargetDecl();
+
+ if (Cand->isInvalidDecl())
+ continue;
+ }
+
+ if (CXXMethodDecl *M = dyn_cast<CXXMethodDecl>(Cand)) {
+ if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ AddMethodCandidate(M, DeclAccessPair::make(M, AS_public), RD, ThisTy,
+ Classification, llvm::makeArrayRef(&Arg, NumArgs),
+ OCS, true);
+ else
+ AddOverloadCandidate(M, DeclAccessPair::make(M, AS_public),
+ llvm::makeArrayRef(&Arg, NumArgs), OCS, true);
+
+ // Here we're looking for a const parameter to speed up creation of
+ // implicit copy methods.
+ if ((SM == CXXCopyAssignment && M->isCopyAssignmentOperator()) ||
+ (SM == CXXCopyConstructor &&
+ cast<CXXConstructorDecl>(M)->isCopyConstructor())) {
+ QualType ArgType = M->getType()->getAs<FunctionProtoType>()->getArgType(0);
+ if (!ArgType->isReferenceType() ||
+ ArgType->getPointeeType().isConstQualified())
+ SuccessKind = SpecialMemberOverloadResult::SuccessConst;
+ }
+ } else if (FunctionTemplateDecl *Tmpl =
+ dyn_cast<FunctionTemplateDecl>(Cand)) {
+ if (SM == CXXCopyAssignment || SM == CXXMoveAssignment)
+ AddMethodTemplateCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
+ RD, 0, ThisTy, Classification,
+ llvm::makeArrayRef(&Arg, NumArgs),
+ OCS, true);
+ else
+ AddTemplateOverloadCandidate(Tmpl, DeclAccessPair::make(Tmpl, AS_public),
+ 0, llvm::makeArrayRef(&Arg, NumArgs),
+ OCS, true);
+ } else {
+ assert(isa<UsingDecl>(Cand) && "illegal Kind of operator = Decl");
+ }
+ }
+
+ OverloadCandidateSet::iterator Best;
+ switch (OCS.BestViableFunction(*this, SourceLocation(), Best)) {
+ case OR_Success:
+ Result->setMethod(cast<CXXMethodDecl>(Best->Function));
+ Result->setKind(SuccessKind);
+ break;
+
+ case OR_Deleted:
+ Result->setMethod(cast<CXXMethodDecl>(Best->Function));
+ Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
+ break;
+
+ case OR_Ambiguous:
+ Result->setMethod(0);
+ Result->setKind(SpecialMemberOverloadResult::Ambiguous);
+ break;
+
+ case OR_No_Viable_Function:
+ Result->setMethod(0);
+ Result->setKind(SpecialMemberOverloadResult::NoMemberOrDeleted);
+ break;
+ }
+
+ return Result;
+}
+
+/// \brief Look up the default constructor for the given class.
+CXXConstructorDecl *Sema::LookupDefaultConstructor(CXXRecordDecl *Class) {
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXDefaultConstructor, false, false, false,
+ false, false);
+
+ return cast_or_null<CXXConstructorDecl>(Result->getMethod());
+}
+
+/// \brief Look up the copying constructor for the given class.
+CXXConstructorDecl *Sema::LookupCopyingConstructor(CXXRecordDecl *Class,
+ unsigned Quals,
+ bool *ConstParamMatch) {
+ assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy ctor arg");
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXCopyConstructor, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, false, false, false);
+
+ if (ConstParamMatch)
+ *ConstParamMatch = Result->hasConstParamMatch();
+
+ return cast_or_null<CXXConstructorDecl>(Result->getMethod());
+}
+
+/// \brief Look up the moving constructor for the given class.
+CXXConstructorDecl *Sema::LookupMovingConstructor(CXXRecordDecl *Class) {
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXMoveConstructor, false,
+ false, false, false, false);
+
+ return cast_or_null<CXXConstructorDecl>(Result->getMethod());
+}
+
+/// \brief Look up the constructors for the given class.
+DeclContext::lookup_result Sema::LookupConstructors(CXXRecordDecl *Class) {
+ // If the implicit constructors have not yet been declared, do so now.
+ if (CanDeclareSpecialMemberFunction(Context, Class)) {
+ if (Class->needsImplicitDefaultConstructor())
+ DeclareImplicitDefaultConstructor(Class);
+ if (!Class->hasDeclaredCopyConstructor())
+ DeclareImplicitCopyConstructor(Class);
+ if (getLangOpts().CPlusPlus0x && Class->needsImplicitMoveConstructor())
+ DeclareImplicitMoveConstructor(Class);
+ }
+
+ CanQualType T = Context.getCanonicalType(Context.getTypeDeclType(Class));
+ DeclarationName Name = Context.DeclarationNames.getCXXConstructorName(T);
+ return Class->lookup(Name);
+}
+
+/// \brief Look up the copying assignment operator for the given class.
+CXXMethodDecl *Sema::LookupCopyingAssignment(CXXRecordDecl *Class,
+ unsigned Quals, bool RValueThis,
+ unsigned ThisQuals,
+ bool *ConstParamMatch) {
+ assert(!(Quals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy assignment arg");
+ assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy assignment this");
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXCopyAssignment, Quals & Qualifiers::Const,
+ Quals & Qualifiers::Volatile, RValueThis,
+ ThisQuals & Qualifiers::Const,
+ ThisQuals & Qualifiers::Volatile);
+
+ if (ConstParamMatch)
+ *ConstParamMatch = Result->hasConstParamMatch();
+
+ return Result->getMethod();
+}
+
+/// \brief Look up the moving assignment operator for the given class.
+CXXMethodDecl *Sema::LookupMovingAssignment(CXXRecordDecl *Class,
+ bool RValueThis,
+ unsigned ThisQuals) {
+ assert(!(ThisQuals & ~(Qualifiers::Const | Qualifiers::Volatile)) &&
+ "non-const, non-volatile qualifiers for copy assignment this");
+ SpecialMemberOverloadResult *Result =
+ LookupSpecialMember(Class, CXXMoveAssignment, false, false, RValueThis,
+ ThisQuals & Qualifiers::Const,
+ ThisQuals & Qualifiers::Volatile);
+
+ return Result->getMethod();
+}
+
+/// \brief Look for the destructor of the given class.
+///
+/// During semantic analysis, this routine should be used in lieu of
+/// CXXRecordDecl::getDestructor().
+///
+/// \returns The destructor for this class.
+CXXDestructorDecl *Sema::LookupDestructor(CXXRecordDecl *Class) {
+ return cast<CXXDestructorDecl>(LookupSpecialMember(Class, CXXDestructor,
+ false, false, false,
+ false, false)->getMethod());
+}
+
+/// LookupLiteralOperator - Determine which literal operator should be used for
+/// a user-defined literal, per C++11 [lex.ext].
+///
+/// Normal overload resolution is not used to select which literal operator to
+/// call for a user-defined literal. Look up the provided literal operator name,
+/// and filter the results to the appropriate set for the given argument types.
+Sema::LiteralOperatorLookupResult
+Sema::LookupLiteralOperator(Scope *S, LookupResult &R,
+ ArrayRef<QualType> ArgTys,
+ bool AllowRawAndTemplate) {
+ LookupName(R, S);
+ assert(R.getResultKind() != LookupResult::Ambiguous &&
+ "literal operator lookup can't be ambiguous");
+
+ // Filter the lookup results appropriately.
+ LookupResult::Filter F = R.makeFilter();
+
+ bool FoundTemplate = false;
+ bool FoundRaw = false;
+ bool FoundExactMatch = false;
+
+ while (F.hasNext()) {
+ Decl *D = F.next();
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
+ D = USD->getTargetDecl();
+
+ bool IsTemplate = isa<FunctionTemplateDecl>(D);
+ bool IsRaw = false;
+ bool IsExactMatch = false;
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (FD->getNumParams() == 1 &&
+ FD->getParamDecl(0)->getType()->getAs<PointerType>())
+ IsRaw = true;
+ else {
+ IsExactMatch = true;
+ for (unsigned ArgIdx = 0; ArgIdx != ArgTys.size(); ++ArgIdx) {
+ QualType ParamTy = FD->getParamDecl(ArgIdx)->getType();
+ if (!Context.hasSameUnqualifiedType(ArgTys[ArgIdx], ParamTy)) {
+ IsExactMatch = false;
+ break;
+ }
+ }
+ }
+ }
+
+ if (IsExactMatch) {
+ FoundExactMatch = true;
+ AllowRawAndTemplate = false;
+ if (FoundRaw || FoundTemplate) {
+ // Go through again and remove the raw and template decls we've
+ // already found.
+ F.restart();
+ FoundRaw = FoundTemplate = false;
+ }
+ } else if (AllowRawAndTemplate && (IsTemplate || IsRaw)) {
+ FoundTemplate |= IsTemplate;
+ FoundRaw |= IsRaw;
+ } else {
+ F.erase();
+ }
+ }
+
+ F.done();
+
+ // C++11 [lex.ext]p3, p4: If S contains a literal operator with a matching
+ // parameter type, that is used in preference to a raw literal operator
+ // or literal operator template.
+ if (FoundExactMatch)
+ return LOLR_Cooked;
+
+ // C++11 [lex.ext]p3, p4: S shall contain a raw literal operator or a literal
+ // operator template, but not both.
+ if (FoundRaw && FoundTemplate) {
+ Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I) {
+ Decl *D = *I;
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
+ D = USD->getTargetDecl();
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D))
+ D = FunTmpl->getTemplatedDecl();
+ NoteOverloadCandidate(cast<FunctionDecl>(D));
+ }
+ return LOLR_Error;
+ }
+
+ if (FoundRaw)
+ return LOLR_Raw;
+
+ if (FoundTemplate)
+ return LOLR_Template;
+
+ // Didn't find anything we could use.
+ Diag(R.getNameLoc(), diag::err_ovl_no_viable_literal_operator)
+ << R.getLookupName() << (int)ArgTys.size() << ArgTys[0]
+ << (ArgTys.size() == 2 ? ArgTys[1] : QualType()) << AllowRawAndTemplate;
+ return LOLR_Error;
+}
+
+void ADLResult::insert(NamedDecl *New) {
+ NamedDecl *&Old = Decls[cast<NamedDecl>(New->getCanonicalDecl())];
+
+ // If we haven't yet seen a decl for this key, or the last decl
+ // was exactly this one, we're done.
+ if (Old == 0 || Old == New) {
+ Old = New;
+ return;
+ }
+
+ // Otherwise, decide which is a more recent redeclaration.
+ FunctionDecl *OldFD, *NewFD;
+ if (isa<FunctionTemplateDecl>(New)) {
+ OldFD = cast<FunctionTemplateDecl>(Old)->getTemplatedDecl();
+ NewFD = cast<FunctionTemplateDecl>(New)->getTemplatedDecl();
+ } else {
+ OldFD = cast<FunctionDecl>(Old);
+ NewFD = cast<FunctionDecl>(New);
+ }
+
+ FunctionDecl *Cursor = NewFD;
+ while (true) {
+ Cursor = Cursor->getPreviousDecl();
+
+ // If we got to the end without finding OldFD, OldFD is the newer
+ // declaration; leave things as they are.
+ if (!Cursor) return;
+
+ // If we do find OldFD, then NewFD is newer.
+ if (Cursor == OldFD) break;
+
+ // Otherwise, keep looking.
+ }
+
+ Old = New;
+}
+
+void Sema::ArgumentDependentLookup(DeclarationName Name, bool Operator,
+ SourceLocation Loc,
+ llvm::ArrayRef<Expr *> Args,
+ ADLResult &Result,
+ bool StdNamespaceIsAssociated) {
+ // Find all of the associated namespaces and classes based on the
+ // arguments we have.
+ AssociatedNamespaceSet AssociatedNamespaces;
+ AssociatedClassSet AssociatedClasses;
+ FindAssociatedClassesAndNamespaces(Args,
+ AssociatedNamespaces,
+ AssociatedClasses);
+ if (StdNamespaceIsAssociated && StdNamespace)
+ AssociatedNamespaces.insert(getStdNamespace());
+
+ QualType T1, T2;
+ if (Operator) {
+ T1 = Args[0]->getType();
+ if (Args.size() >= 2)
+ T2 = Args[1]->getType();
+ }
+
+ // Try to complete all associated classes, in case they contain a
+ // declaration of a friend function.
+ for (AssociatedClassSet::iterator C = AssociatedClasses.begin(),
+ CEnd = AssociatedClasses.end();
+ C != CEnd; ++C)
+ RequireCompleteType(Loc, Context.getRecordType(*C), 0);
+
+ // C++ [basic.lookup.argdep]p3:
+ // Let X be the lookup set produced by unqualified lookup (3.4.1)
+ // and let Y be the lookup set produced by argument dependent
+ // lookup (defined as follows). If X contains [...] then Y is
+ // empty. Otherwise Y is the set of declarations found in the
+ // namespaces associated with the argument types as described
+ // below. The set of declarations found by the lookup of the name
+ // is the union of X and Y.
+ //
+ // Here, we compute Y and add its members to the overloaded
+ // candidate set.
+ for (AssociatedNamespaceSet::iterator NS = AssociatedNamespaces.begin(),
+ NSEnd = AssociatedNamespaces.end();
+ NS != NSEnd; ++NS) {
+ // When considering an associated namespace, the lookup is the
+ // same as the lookup performed when the associated namespace is
+ // used as a qualifier (3.4.3.2) except that:
+ //
+ // -- Any using-directives in the associated namespace are
+ // ignored.
+ //
+ // -- Any namespace-scope friend functions declared in
+ // associated classes are visible within their respective
+ // namespaces even if they are not visible during an ordinary
+ // lookup (11.4).
+ DeclContext::lookup_iterator I, E;
+ for (llvm::tie(I, E) = (*NS)->lookup(Name); I != E; ++I) {
+ NamedDecl *D = *I;
+ // If the only declaration here is an ordinary friend, consider
+ // it only if it was declared in an associated classes.
+ if (D->getIdentifierNamespace() == Decl::IDNS_OrdinaryFriend) {
+ DeclContext *LexDC = D->getLexicalDeclContext();
+ if (!AssociatedClasses.count(cast<CXXRecordDecl>(LexDC)))
+ continue;
+ }
+
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ if (isa<FunctionDecl>(D)) {
+ if (Operator &&
+ !IsAcceptableNonMemberOperatorCandidate(cast<FunctionDecl>(D),
+ T1, T2, Context))
+ continue;
+ } else if (!isa<FunctionTemplateDecl>(D))
+ continue;
+
+ Result.insert(D);
+ }
+ }
+}
+
+//----------------------------------------------------------------------------
+// Search for all visible declarations.
+//----------------------------------------------------------------------------
+VisibleDeclConsumer::~VisibleDeclConsumer() { }
+
+namespace {
+
+class ShadowContextRAII;
+
+class VisibleDeclsRecord {
+public:
+ /// \brief An entry in the shadow map, which is optimized to store a
+ /// single declaration (the common case) but can also store a list
+ /// of declarations.
+ typedef llvm::TinyPtrVector<NamedDecl*> ShadowMapEntry;
+
+private:
+ /// \brief A mapping from declaration names to the declarations that have
+ /// this name within a particular scope.
+ typedef llvm::DenseMap<DeclarationName, ShadowMapEntry> ShadowMap;
+
+ /// \brief A list of shadow maps, which is used to model name hiding.
+ std::list<ShadowMap> ShadowMaps;
+
+ /// \brief The declaration contexts we have already visited.
+ llvm::SmallPtrSet<DeclContext *, 8> VisitedContexts;
+
+ friend class ShadowContextRAII;
+
+public:
+ /// \brief Determine whether we have already visited this context
+ /// (and, if not, note that we are going to visit that context now).
+ bool visitedContext(DeclContext *Ctx) {
+ return !VisitedContexts.insert(Ctx);
+ }
+
+ bool alreadyVisitedContext(DeclContext *Ctx) {
+ return VisitedContexts.count(Ctx);
+ }
+
+ /// \brief Determine whether the given declaration is hidden in the
+ /// current scope.
+ ///
+ /// \returns the declaration that hides the given declaration, or
+ /// NULL if no such declaration exists.
+ NamedDecl *checkHidden(NamedDecl *ND);
+
+ /// \brief Add a declaration to the current shadow map.
+ void add(NamedDecl *ND) {
+ ShadowMaps.back()[ND->getDeclName()].push_back(ND);
+ }
+};
+
+/// \brief RAII object that records when we've entered a shadow context.
+class ShadowContextRAII {
+ VisibleDeclsRecord &Visible;
+
+ typedef VisibleDeclsRecord::ShadowMap ShadowMap;
+
+public:
+ ShadowContextRAII(VisibleDeclsRecord &Visible) : Visible(Visible) {
+ Visible.ShadowMaps.push_back(ShadowMap());
+ }
+
+ ~ShadowContextRAII() {
+ Visible.ShadowMaps.pop_back();
+ }
+};
+
+} // end anonymous namespace
+
+NamedDecl *VisibleDeclsRecord::checkHidden(NamedDecl *ND) {
+ // Look through using declarations.
+ ND = ND->getUnderlyingDecl();
+
+ unsigned IDNS = ND->getIdentifierNamespace();
+ std::list<ShadowMap>::reverse_iterator SM = ShadowMaps.rbegin();
+ for (std::list<ShadowMap>::reverse_iterator SMEnd = ShadowMaps.rend();
+ SM != SMEnd; ++SM) {
+ ShadowMap::iterator Pos = SM->find(ND->getDeclName());
+ if (Pos == SM->end())
+ continue;
+
+ for (ShadowMapEntry::iterator I = Pos->second.begin(),
+ IEnd = Pos->second.end();
+ I != IEnd; ++I) {
+ // A tag declaration does not hide a non-tag declaration.
+ if ((*I)->hasTagIdentifierNamespace() &&
+ (IDNS & (Decl::IDNS_Member | Decl::IDNS_Ordinary |
+ Decl::IDNS_ObjCProtocol)))
+ continue;
+
+ // Protocols are in distinct namespaces from everything else.
+ if ((((*I)->getIdentifierNamespace() & Decl::IDNS_ObjCProtocol)
+ || (IDNS & Decl::IDNS_ObjCProtocol)) &&
+ (*I)->getIdentifierNamespace() != IDNS)
+ continue;
+
+ // Functions and function templates in the same scope overload
+ // rather than hide. FIXME: Look for hiding based on function
+ // signatures!
+ if ((*I)->isFunctionOrFunctionTemplate() &&
+ ND->isFunctionOrFunctionTemplate() &&
+ SM == ShadowMaps.rbegin())
+ continue;
+
+ // We've found a declaration that hides this one.
+ return *I;
+ }
+ }
+
+ return 0;
+}
+
+static void LookupVisibleDecls(DeclContext *Ctx, LookupResult &Result,
+ bool QualifiedNameLookup,
+ bool InBaseClass,
+ VisibleDeclConsumer &Consumer,
+ VisibleDeclsRecord &Visited) {
+ if (!Ctx)
+ return;
+
+ // Make sure we don't visit the same context twice.
+ if (Visited.visitedContext(Ctx->getPrimaryContext()))
+ return;
+
+ if (CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(Ctx))
+ Result.getSema().ForceDeclarationOfImplicitMembers(Class);
+
+ // Enumerate all of the results in this context.
+ for (DeclContext::all_lookups_iterator L = Ctx->lookups_begin(),
+ LEnd = Ctx->lookups_end();
+ L != LEnd; ++L) {
+ for (DeclContext::lookup_result R = *L; R.first != R.second; ++R.first) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*R.first)) {
+ if ((ND = Result.getAcceptableDecl(ND))) {
+ Consumer.FoundDecl(ND, Visited.checkHidden(ND), Ctx, InBaseClass);
+ Visited.add(ND);
+ }
+ }
+ }
+ }
+
+ // Traverse using directives for qualified name lookup.
+ if (QualifiedNameLookup) {
+ ShadowContextRAII Shadow(Visited);
+ DeclContext::udir_iterator I, E;
+ for (llvm::tie(I, E) = Ctx->getUsingDirectives(); I != E; ++I) {
+ LookupVisibleDecls((*I)->getNominatedNamespace(), Result,
+ QualifiedNameLookup, InBaseClass, Consumer, Visited);
+ }
+ }
+
+ // Traverse the contexts of inherited C++ classes.
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx)) {
+ if (!Record->hasDefinition())
+ return;
+
+ for (CXXRecordDecl::base_class_iterator B = Record->bases_begin(),
+ BEnd = Record->bases_end();
+ B != BEnd; ++B) {
+ QualType BaseType = B->getType();
+
+ // Don't look into dependent bases, because name lookup can't look
+ // there anyway.
+ if (BaseType->isDependentType())
+ continue;
+
+ const RecordType *Record = BaseType->getAs<RecordType>();
+ if (!Record)
+ continue;
+
+ // FIXME: It would be nice to be able to determine whether referencing
+ // a particular member would be ambiguous. For example, given
+ //
+ // struct A { int member; };
+ // struct B { int member; };
+ // struct C : A, B { };
+ //
+ // void f(C *c) { c->### }
+ //
+ // accessing 'member' would result in an ambiguity. However, we
+ // could be smart enough to qualify the member with the base
+ // class, e.g.,
+ //
+ // c->B::member
+ //
+ // or
+ //
+ // c->A::member
+
+ // Find results in this base class (and its bases).
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(Record->getDecl(), Result, QualifiedNameLookup,
+ true, Consumer, Visited);
+ }
+ }
+
+ // Traverse the contexts of Objective-C classes.
+ if (ObjCInterfaceDecl *IFace = dyn_cast<ObjCInterfaceDecl>(Ctx)) {
+ // Traverse categories.
+ for (ObjCCategoryDecl *Category = IFace->getCategoryList();
+ Category; Category = Category->getNextClassCategory()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(Category, Result, QualifiedNameLookup, false,
+ Consumer, Visited);
+ }
+
+ // Traverse protocols.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ I = IFace->all_referenced_protocol_begin(),
+ E = IFace->all_referenced_protocol_end(); I != E; ++I) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+ Visited);
+ }
+
+ // Traverse the superclass.
+ if (IFace->getSuperClass()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(IFace->getSuperClass(), Result, QualifiedNameLookup,
+ true, Consumer, Visited);
+ }
+
+ // If there is an implementation, traverse it. We do this to find
+ // synthesized ivars.
+ if (IFace->getImplementation()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(IFace->getImplementation(), Result,
+ QualifiedNameLookup, InBaseClass, Consumer, Visited);
+ }
+ } else if (ObjCProtocolDecl *Protocol = dyn_cast<ObjCProtocolDecl>(Ctx)) {
+ for (ObjCProtocolDecl::protocol_iterator I = Protocol->protocol_begin(),
+ E = Protocol->protocol_end(); I != E; ++I) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+ Visited);
+ }
+ } else if (ObjCCategoryDecl *Category = dyn_cast<ObjCCategoryDecl>(Ctx)) {
+ for (ObjCCategoryDecl::protocol_iterator I = Category->protocol_begin(),
+ E = Category->protocol_end(); I != E; ++I) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(*I, Result, QualifiedNameLookup, false, Consumer,
+ Visited);
+ }
+
+ // If there is an implementation, traverse it.
+ if (Category->getImplementation()) {
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(Category->getImplementation(), Result,
+ QualifiedNameLookup, true, Consumer, Visited);
+ }
+ }
+}
+
+static void LookupVisibleDecls(Scope *S, LookupResult &Result,
+ UnqualUsingDirectiveSet &UDirs,
+ VisibleDeclConsumer &Consumer,
+ VisibleDeclsRecord &Visited) {
+ if (!S)
+ return;
+
+ if (!S->getEntity() ||
+ (!S->getParent() &&
+ !Visited.alreadyVisitedContext((DeclContext *)S->getEntity())) ||
+ ((DeclContext *)S->getEntity())->isFunctionOrMethod()) {
+ // Walk through the declarations in this Scope.
+ for (Scope::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
+ D != DEnd; ++D) {
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*D))
+ if ((ND = Result.getAcceptableDecl(ND))) {
+ Consumer.FoundDecl(ND, Visited.checkHidden(ND), 0, false);
+ Visited.add(ND);
+ }
+ }
+ }
+
+ // FIXME: C++ [temp.local]p8
+ DeclContext *Entity = 0;
+ if (S->getEntity()) {
+ // Look into this scope's declaration context, along with any of its
+ // parent lookup contexts (e.g., enclosing classes), up to the point
+ // where we hit the context stored in the next outer scope.
+ Entity = (DeclContext *)S->getEntity();
+ DeclContext *OuterCtx = findOuterContext(S).first; // FIXME
+
+ for (DeclContext *Ctx = Entity; Ctx && !Ctx->Equals(OuterCtx);
+ Ctx = Ctx->getLookupParent()) {
+ if (ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(Ctx)) {
+ if (Method->isInstanceMethod()) {
+ // For instance methods, look for ivars in the method's interface.
+ LookupResult IvarResult(Result.getSema(), Result.getLookupName(),
+ Result.getNameLoc(), Sema::LookupMemberName);
+ if (ObjCInterfaceDecl *IFace = Method->getClassInterface()) {
+ LookupVisibleDecls(IFace, IvarResult, /*QualifiedNameLookup=*/false,
+ /*InBaseClass=*/false, Consumer, Visited);
+ }
+ }
+
+ // We've already performed all of the name lookup that we need
+ // to for Objective-C methods; the next context will be the
+ // outer scope.
+ break;
+ }
+
+ if (Ctx->isFunctionOrMethod())
+ continue;
+
+ LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/false,
+ /*InBaseClass=*/false, Consumer, Visited);
+ }
+ } else if (!S->getParent()) {
+ // Look into the translation unit scope. We walk through the translation
+ // unit's declaration context, because the Scope itself won't have all of
+ // the declarations if we loaded a precompiled header.
+ // FIXME: We would like the translation unit's Scope object to point to the
+ // translation unit, so we don't need this special "if" branch. However,
+ // doing so would force the normal C++ name-lookup code to look into the
+ // translation unit decl when the IdentifierInfo chains would suffice.
+ // Once we fix that problem (which is part of a more general "don't look
+ // in DeclContexts unless we have to" optimization), we can eliminate this.
+ Entity = Result.getSema().Context.getTranslationUnitDecl();
+ LookupVisibleDecls(Entity, Result, /*QualifiedNameLookup=*/false,
+ /*InBaseClass=*/false, Consumer, Visited);
+ }
+
+ if (Entity) {
+ // Lookup visible declarations in any namespaces found by using
+ // directives.
+ UnqualUsingDirectiveSet::const_iterator UI, UEnd;
+ llvm::tie(UI, UEnd) = UDirs.getNamespacesFor(Entity);
+ for (; UI != UEnd; ++UI)
+ LookupVisibleDecls(const_cast<DeclContext *>(UI->getNominatedNamespace()),
+ Result, /*QualifiedNameLookup=*/false,
+ /*InBaseClass=*/false, Consumer, Visited);
+ }
+
+ // Lookup names in the parent scope.
+ ShadowContextRAII Shadow(Visited);
+ LookupVisibleDecls(S->getParent(), Result, UDirs, Consumer, Visited);
+}
+
+void Sema::LookupVisibleDecls(Scope *S, LookupNameKind Kind,
+ VisibleDeclConsumer &Consumer,
+ bool IncludeGlobalScope) {
+ // Determine the set of using directives available during
+ // unqualified name lookup.
+ Scope *Initial = S;
+ UnqualUsingDirectiveSet UDirs;
+ if (getLangOpts().CPlusPlus) {
+ // Find the first namespace or translation-unit scope.
+ while (S && !isNamespaceOrTranslationUnitScope(S))
+ S = S->getParent();
+
+ UDirs.visitScopeChain(Initial, S);
+ }
+ UDirs.done();
+
+ // Look for visible declarations.
+ LookupResult Result(*this, DeclarationName(), SourceLocation(), Kind);
+ VisibleDeclsRecord Visited;
+ if (!IncludeGlobalScope)
+ Visited.visitedContext(Context.getTranslationUnitDecl());
+ ShadowContextRAII Shadow(Visited);
+ ::LookupVisibleDecls(Initial, Result, UDirs, Consumer, Visited);
+}
+
+void Sema::LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
+ VisibleDeclConsumer &Consumer,
+ bool IncludeGlobalScope) {
+ LookupResult Result(*this, DeclarationName(), SourceLocation(), Kind);
+ VisibleDeclsRecord Visited;
+ if (!IncludeGlobalScope)
+ Visited.visitedContext(Context.getTranslationUnitDecl());
+ ShadowContextRAII Shadow(Visited);
+ ::LookupVisibleDecls(Ctx, Result, /*QualifiedNameLookup=*/true,
+ /*InBaseClass=*/false, Consumer, Visited);
+}
+
+/// LookupOrCreateLabel - Do a name lookup of a label with the specified name.
+/// If GnuLabelLoc is a valid source location, then this is a definition
+/// of an __label__ label name, otherwise it is a normal label definition
+/// or use.
+LabelDecl *Sema::LookupOrCreateLabel(IdentifierInfo *II, SourceLocation Loc,
+ SourceLocation GnuLabelLoc) {
+ // Do a lookup to see if we have a label with this name already.
+ NamedDecl *Res = 0;
+
+ if (GnuLabelLoc.isValid()) {
+ // Local label definitions always shadow existing labels.
+ Res = LabelDecl::Create(Context, CurContext, Loc, II, GnuLabelLoc);
+ Scope *S = CurScope;
+ PushOnScopeChains(Res, S, true);
+ return cast<LabelDecl>(Res);
+ }
+
+ // Not a GNU local label.
+ Res = LookupSingleName(CurScope, II, Loc, LookupLabel, NotForRedeclaration);
+ // If we found a label, check to see if it is in the same context as us.
+ // When in a Block, we don't want to reuse a label in an enclosing function.
+ if (Res && Res->getDeclContext() != CurContext)
+ Res = 0;
+ if (Res == 0) {
+ // If not forward referenced or defined already, create the backing decl.
+ Res = LabelDecl::Create(Context, CurContext, Loc, II);
+ Scope *S = CurScope->getFnParent();
+ assert(S && "Not in a function?");
+ PushOnScopeChains(Res, S, true);
+ }
+ return cast<LabelDecl>(Res);
+}
+
+//===----------------------------------------------------------------------===//
+// Typo correction
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+typedef llvm::StringMap<TypoCorrection, llvm::BumpPtrAllocator> TypoResultsMap;
+typedef std::map<unsigned, TypoResultsMap *> TypoEditDistanceMap;
+
+static const unsigned MaxTypoDistanceResultSets = 5;
+
+class TypoCorrectionConsumer : public VisibleDeclConsumer {
+ /// \brief The name written that is a typo in the source.
+ StringRef Typo;
+
+ /// \brief The results found that have the smallest edit distance
+ /// found (so far) with the typo name.
+ ///
+ /// The pointer value being set to the current DeclContext indicates
+ /// whether there is a keyword with this name.
+ TypoEditDistanceMap BestResults;
+
+ Sema &SemaRef;
+
+public:
+ explicit TypoCorrectionConsumer(Sema &SemaRef, IdentifierInfo *Typo)
+ : Typo(Typo->getName()),
+ SemaRef(SemaRef) { }
+
+ ~TypoCorrectionConsumer() {
+ for (TypoEditDistanceMap::iterator I = BestResults.begin(),
+ IEnd = BestResults.end();
+ I != IEnd;
+ ++I)
+ delete I->second;
+ }
+
+ virtual void FoundDecl(NamedDecl *ND, NamedDecl *Hiding, DeclContext *Ctx,
+ bool InBaseClass);
+ void FoundName(StringRef Name);
+ void addKeywordResult(StringRef Keyword);
+ void addName(StringRef Name, NamedDecl *ND, unsigned Distance,
+ NestedNameSpecifier *NNS=NULL, bool isKeyword=false);
+ void addCorrection(TypoCorrection Correction);
+
+ typedef TypoResultsMap::iterator result_iterator;
+ typedef TypoEditDistanceMap::iterator distance_iterator;
+ distance_iterator begin() { return BestResults.begin(); }
+ distance_iterator end() { return BestResults.end(); }
+ void erase(distance_iterator I) { BestResults.erase(I); }
+ unsigned size() const { return BestResults.size(); }
+ bool empty() const { return BestResults.empty(); }
+
+ TypoCorrection &operator[](StringRef Name) {
+ return (*BestResults.begin()->second)[Name];
+ }
+
+ unsigned getBestEditDistance(bool Normalized) {
+ if (BestResults.empty())
+ return (std::numeric_limits<unsigned>::max)();
+
+ unsigned BestED = BestResults.begin()->first;
+ return Normalized ? TypoCorrection::NormalizeEditDistance(BestED) : BestED;
+ }
+};
+
+}
+
+void TypoCorrectionConsumer::FoundDecl(NamedDecl *ND, NamedDecl *Hiding,
+ DeclContext *Ctx, bool InBaseClass) {
+ // Don't consider hidden names for typo correction.
+ if (Hiding)
+ return;
+
+ // Only consider entities with identifiers for names, ignoring
+ // special names (constructors, overloaded operators, selectors,
+ // etc.).
+ IdentifierInfo *Name = ND->getIdentifier();
+ if (!Name)
+ return;
+
+ FoundName(Name->getName());
+}
+
+void TypoCorrectionConsumer::FoundName(StringRef Name) {
+ // Use a simple length-based heuristic to determine the minimum possible
+ // edit distance. If the minimum isn't good enough, bail out early.
+ unsigned MinED = abs((int)Name.size() - (int)Typo.size());
+ if (MinED && Typo.size() / MinED < 3)
+ return;
+
+ // Compute an upper bound on the allowable edit distance, so that the
+ // edit-distance algorithm can short-circuit.
+ unsigned UpperBound = (Typo.size() + 2) / 3;
+
+ // Compute the edit distance between the typo and the name of this
+ // entity, and add the identifier to the list of results.
+ addName(Name, NULL, Typo.edit_distance(Name, true, UpperBound));
+}
+
+void TypoCorrectionConsumer::addKeywordResult(StringRef Keyword) {
+ // Compute the edit distance between the typo and this keyword,
+ // and add the keyword to the list of results.
+ addName(Keyword, NULL, Typo.edit_distance(Keyword), NULL, true);
+}
+
+void TypoCorrectionConsumer::addName(StringRef Name,
+ NamedDecl *ND,
+ unsigned Distance,
+ NestedNameSpecifier *NNS,
+ bool isKeyword) {
+ TypoCorrection TC(&SemaRef.Context.Idents.get(Name), ND, NNS, Distance);
+ if (isKeyword) TC.makeKeyword();
+ addCorrection(TC);
+}
+
+void TypoCorrectionConsumer::addCorrection(TypoCorrection Correction) {
+ StringRef Name = Correction.getCorrectionAsIdentifierInfo()->getName();
+ TypoResultsMap *& Map = BestResults[Correction.getEditDistance(false)];
+ if (!Map)
+ Map = new TypoResultsMap;
+
+ TypoCorrection &CurrentCorrection = (*Map)[Name];
+ if (!CurrentCorrection ||
+ // FIXME: The following should be rolled up into an operator< on
+ // TypoCorrection with a more principled definition.
+ CurrentCorrection.isKeyword() < Correction.isKeyword() ||
+ Correction.getAsString(SemaRef.getLangOpts()) <
+ CurrentCorrection.getAsString(SemaRef.getLangOpts()))
+ CurrentCorrection = Correction;
+
+ while (BestResults.size() > MaxTypoDistanceResultSets) {
+ TypoEditDistanceMap::iterator Last = BestResults.end();
+ --Last;
+ delete Last->second;
+ BestResults.erase(Last);
+ }
+}
+
+// Fill the supplied vector with the IdentifierInfo pointers for each piece of
+// the given NestedNameSpecifier (i.e. given a NestedNameSpecifier "foo::bar::",
+// fill the vector with the IdentifierInfo pointers for "foo" and "bar").
+static void getNestedNameSpecifierIdentifiers(
+ NestedNameSpecifier *NNS,
+ SmallVectorImpl<const IdentifierInfo*> &Identifiers) {
+ if (NestedNameSpecifier *Prefix = NNS->getPrefix())
+ getNestedNameSpecifierIdentifiers(Prefix, Identifiers);
+ else
+ Identifiers.clear();
+
+ const IdentifierInfo *II = NULL;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ II = NNS->getAsIdentifier();
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ if (NNS->getAsNamespace()->isAnonymousNamespace())
+ return;
+ II = NNS->getAsNamespace()->getIdentifier();
+ break;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ II = NNS->getAsNamespaceAlias()->getIdentifier();
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec:
+ II = QualType(NNS->getAsType(), 0).getBaseTypeIdentifier();
+ break;
+
+ case NestedNameSpecifier::Global:
+ return;
+ }
+
+ if (II)
+ Identifiers.push_back(II);
+}
+
+namespace {
+
+class SpecifierInfo {
+ public:
+ DeclContext* DeclCtx;
+ NestedNameSpecifier* NameSpecifier;
+ unsigned EditDistance;
+
+ SpecifierInfo(DeclContext *Ctx, NestedNameSpecifier *NNS, unsigned ED)
+ : DeclCtx(Ctx), NameSpecifier(NNS), EditDistance(ED) {}
+};
+
+typedef SmallVector<DeclContext*, 4> DeclContextList;
+typedef SmallVector<SpecifierInfo, 16> SpecifierInfoList;
+
+class NamespaceSpecifierSet {
+ ASTContext &Context;
+ DeclContextList CurContextChain;
+ SmallVector<const IdentifierInfo*, 4> CurContextIdentifiers;
+ SmallVector<const IdentifierInfo*, 4> CurNameSpecifierIdentifiers;
+ bool isSorted;
+
+ SpecifierInfoList Specifiers;
+ llvm::SmallSetVector<unsigned, 4> Distances;
+ llvm::DenseMap<unsigned, SpecifierInfoList> DistanceMap;
+
+ /// \brief Helper for building the list of DeclContexts between the current
+ /// context and the top of the translation unit
+ static DeclContextList BuildContextChain(DeclContext *Start);
+
+ void SortNamespaces();
+
+ public:
+ NamespaceSpecifierSet(ASTContext &Context, DeclContext *CurContext,
+ CXXScopeSpec *CurScopeSpec)
+ : Context(Context), CurContextChain(BuildContextChain(CurContext)),
+ isSorted(true) {
+ if (CurScopeSpec && CurScopeSpec->getScopeRep())
+ getNestedNameSpecifierIdentifiers(CurScopeSpec->getScopeRep(),
+ CurNameSpecifierIdentifiers);
+ // Build the list of identifiers that would be used for an absolute
+ // (from the global context) NestedNameSpecifier refering to the current
+ // context.
+ for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(),
+ CEnd = CurContextChain.rend();
+ C != CEnd; ++C) {
+ if (NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(*C))
+ CurContextIdentifiers.push_back(ND->getIdentifier());
+ }
+ }
+
+ /// \brief Add the namespace to the set, computing the corresponding
+ /// NestedNameSpecifier and its distance in the process.
+ void AddNamespace(NamespaceDecl *ND);
+
+ typedef SpecifierInfoList::iterator iterator;
+ iterator begin() {
+ if (!isSorted) SortNamespaces();
+ return Specifiers.begin();
+ }
+ iterator end() { return Specifiers.end(); }
+};
+
+}
+
+DeclContextList NamespaceSpecifierSet::BuildContextChain(DeclContext *Start) {
+ assert(Start && "Bulding a context chain from a null context");
+ DeclContextList Chain;
+ for (DeclContext *DC = Start->getPrimaryContext(); DC != NULL;
+ DC = DC->getLookupParent()) {
+ NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(DC);
+ if (!DC->isInlineNamespace() && !DC->isTransparentContext() &&
+ !(ND && ND->isAnonymousNamespace()))
+ Chain.push_back(DC->getPrimaryContext());
+ }
+ return Chain;
+}
+
+void NamespaceSpecifierSet::SortNamespaces() {
+ SmallVector<unsigned, 4> sortedDistances;
+ sortedDistances.append(Distances.begin(), Distances.end());
+
+ if (sortedDistances.size() > 1)
+ std::sort(sortedDistances.begin(), sortedDistances.end());
+
+ Specifiers.clear();
+ for (SmallVector<unsigned, 4>::iterator DI = sortedDistances.begin(),
+ DIEnd = sortedDistances.end();
+ DI != DIEnd; ++DI) {
+ SpecifierInfoList &SpecList = DistanceMap[*DI];
+ Specifiers.append(SpecList.begin(), SpecList.end());
+ }
+
+ isSorted = true;
+}
+
+void NamespaceSpecifierSet::AddNamespace(NamespaceDecl *ND) {
+ DeclContext *Ctx = cast<DeclContext>(ND);
+ NestedNameSpecifier *NNS = NULL;
+ unsigned NumSpecifiers = 0;
+ DeclContextList NamespaceDeclChain(BuildContextChain(Ctx));
+ DeclContextList FullNamespaceDeclChain(NamespaceDeclChain);
+
+ // Eliminate common elements from the two DeclContext chains.
+ for (DeclContextList::reverse_iterator C = CurContextChain.rbegin(),
+ CEnd = CurContextChain.rend();
+ C != CEnd && !NamespaceDeclChain.empty() &&
+ NamespaceDeclChain.back() == *C; ++C) {
+ NamespaceDeclChain.pop_back();
+ }
+
+ // Add an explicit leading '::' specifier if needed.
+ if (NamespaceDecl *ND =
+ NamespaceDeclChain.empty() ? NULL :
+ dyn_cast_or_null<NamespaceDecl>(NamespaceDeclChain.back())) {
+ IdentifierInfo *Name = ND->getIdentifier();
+ if (std::find(CurContextIdentifiers.begin(), CurContextIdentifiers.end(),
+ Name) != CurContextIdentifiers.end() ||
+ std::find(CurNameSpecifierIdentifiers.begin(),
+ CurNameSpecifierIdentifiers.end(),
+ Name) != CurNameSpecifierIdentifiers.end()) {
+ NamespaceDeclChain = FullNamespaceDeclChain;
+ NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ }
+ }
+
+ // Build the NestedNameSpecifier from what is left of the NamespaceDeclChain
+ for (DeclContextList::reverse_iterator C = NamespaceDeclChain.rbegin(),
+ CEnd = NamespaceDeclChain.rend();
+ C != CEnd; ++C) {
+ NamespaceDecl *ND = dyn_cast_or_null<NamespaceDecl>(*C);
+ if (ND) {
+ NNS = NestedNameSpecifier::Create(Context, NNS, ND);
+ ++NumSpecifiers;
+ }
+ }
+
+ // If the built NestedNameSpecifier would be replacing an existing
+ // NestedNameSpecifier, use the number of component identifiers that
+ // would need to be changed as the edit distance instead of the number
+ // of components in the built NestedNameSpecifier.
+ if (NNS && !CurNameSpecifierIdentifiers.empty()) {
+ SmallVector<const IdentifierInfo*, 4> NewNameSpecifierIdentifiers;
+ getNestedNameSpecifierIdentifiers(NNS, NewNameSpecifierIdentifiers);
+ NumSpecifiers = llvm::ComputeEditDistance(
+ llvm::ArrayRef<const IdentifierInfo*>(CurNameSpecifierIdentifiers),
+ llvm::ArrayRef<const IdentifierInfo*>(NewNameSpecifierIdentifiers));
+ }
+
+ isSorted = false;
+ Distances.insert(NumSpecifiers);
+ DistanceMap[NumSpecifiers].push_back(SpecifierInfo(Ctx, NNS, NumSpecifiers));
+}
+
+/// \brief Perform name lookup for a possible result for typo correction.
+static void LookupPotentialTypoResult(Sema &SemaRef,
+ LookupResult &Res,
+ IdentifierInfo *Name,
+ Scope *S, CXXScopeSpec *SS,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ bool isObjCIvarLookup) {
+ Res.suppressDiagnostics();
+ Res.clear();
+ Res.setLookupName(Name);
+ if (MemberContext) {
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(MemberContext)) {
+ if (isObjCIvarLookup) {
+ if (ObjCIvarDecl *Ivar = Class->lookupInstanceVariable(Name)) {
+ Res.addDecl(Ivar);
+ Res.resolveKind();
+ return;
+ }
+ }
+
+ if (ObjCPropertyDecl *Prop = Class->FindPropertyDeclaration(Name)) {
+ Res.addDecl(Prop);
+ Res.resolveKind();
+ return;
+ }
+ }
+
+ SemaRef.LookupQualifiedName(Res, MemberContext);
+ return;
+ }
+
+ SemaRef.LookupParsedName(Res, S, SS, /*AllowBuiltinCreation=*/false,
+ EnteringContext);
+
+ // Fake ivar lookup; this should really be part of
+ // LookupParsedName.
+ if (ObjCMethodDecl *Method = SemaRef.getCurMethodDecl()) {
+ if (Method->isInstanceMethod() && Method->getClassInterface() &&
+ (Res.empty() ||
+ (Res.isSingleResult() &&
+ Res.getFoundDecl()->isDefinedOutsideFunctionOrMethod()))) {
+ if (ObjCIvarDecl *IV
+ = Method->getClassInterface()->lookupInstanceVariable(Name)) {
+ Res.addDecl(IV);
+ Res.resolveKind();
+ }
+ }
+ }
+}
+
+/// \brief Add keywords to the consumer as possible typo corrections.
+static void AddKeywordsToConsumer(Sema &SemaRef,
+ TypoCorrectionConsumer &Consumer,
+ Scope *S, CorrectionCandidateCallback &CCC) {
+ if (CCC.WantObjCSuper)
+ Consumer.addKeywordResult("super");
+
+ if (CCC.WantTypeSpecifiers) {
+ // Add type-specifier keywords to the set of results.
+ const char *CTypeSpecs[] = {
+ "char", "const", "double", "enum", "float", "int", "long", "short",
+ "signed", "struct", "union", "unsigned", "void", "volatile",
+ "_Complex", "_Imaginary",
+ // storage-specifiers as well
+ "extern", "inline", "static", "typedef"
+ };
+
+ const unsigned NumCTypeSpecs = sizeof(CTypeSpecs) / sizeof(CTypeSpecs[0]);
+ for (unsigned I = 0; I != NumCTypeSpecs; ++I)
+ Consumer.addKeywordResult(CTypeSpecs[I]);
+
+ if (SemaRef.getLangOpts().C99)
+ Consumer.addKeywordResult("restrict");
+ if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus)
+ Consumer.addKeywordResult("bool");
+ else if (SemaRef.getLangOpts().C99)
+ Consumer.addKeywordResult("_Bool");
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("class");
+ Consumer.addKeywordResult("typename");
+ Consumer.addKeywordResult("wchar_t");
+
+ if (SemaRef.getLangOpts().CPlusPlus0x) {
+ Consumer.addKeywordResult("char16_t");
+ Consumer.addKeywordResult("char32_t");
+ Consumer.addKeywordResult("constexpr");
+ Consumer.addKeywordResult("decltype");
+ Consumer.addKeywordResult("thread_local");
+ }
+ }
+
+ if (SemaRef.getLangOpts().GNUMode)
+ Consumer.addKeywordResult("typeof");
+ }
+
+ if (CCC.WantCXXNamedCasts && SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("const_cast");
+ Consumer.addKeywordResult("dynamic_cast");
+ Consumer.addKeywordResult("reinterpret_cast");
+ Consumer.addKeywordResult("static_cast");
+ }
+
+ if (CCC.WantExpressionKeywords) {
+ Consumer.addKeywordResult("sizeof");
+ if (SemaRef.getLangOpts().Bool || SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("false");
+ Consumer.addKeywordResult("true");
+ }
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ const char *CXXExprs[] = {
+ "delete", "new", "operator", "throw", "typeid"
+ };
+ const unsigned NumCXXExprs = sizeof(CXXExprs) / sizeof(CXXExprs[0]);
+ for (unsigned I = 0; I != NumCXXExprs; ++I)
+ Consumer.addKeywordResult(CXXExprs[I]);
+
+ if (isa<CXXMethodDecl>(SemaRef.CurContext) &&
+ cast<CXXMethodDecl>(SemaRef.CurContext)->isInstance())
+ Consumer.addKeywordResult("this");
+
+ if (SemaRef.getLangOpts().CPlusPlus0x) {
+ Consumer.addKeywordResult("alignof");
+ Consumer.addKeywordResult("nullptr");
+ }
+ }
+ }
+
+ if (CCC.WantRemainingKeywords) {
+ if (SemaRef.getCurFunctionOrMethodDecl() || SemaRef.getCurBlock()) {
+ // Statements.
+ const char *CStmts[] = {
+ "do", "else", "for", "goto", "if", "return", "switch", "while" };
+ const unsigned NumCStmts = sizeof(CStmts) / sizeof(CStmts[0]);
+ for (unsigned I = 0; I != NumCStmts; ++I)
+ Consumer.addKeywordResult(CStmts[I]);
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("catch");
+ Consumer.addKeywordResult("try");
+ }
+
+ if (S && S->getBreakParent())
+ Consumer.addKeywordResult("break");
+
+ if (S && S->getContinueParent())
+ Consumer.addKeywordResult("continue");
+
+ if (!SemaRef.getCurFunction()->SwitchStack.empty()) {
+ Consumer.addKeywordResult("case");
+ Consumer.addKeywordResult("default");
+ }
+ } else {
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("namespace");
+ Consumer.addKeywordResult("template");
+ }
+
+ if (S && S->isClassScope()) {
+ Consumer.addKeywordResult("explicit");
+ Consumer.addKeywordResult("friend");
+ Consumer.addKeywordResult("mutable");
+ Consumer.addKeywordResult("private");
+ Consumer.addKeywordResult("protected");
+ Consumer.addKeywordResult("public");
+ Consumer.addKeywordResult("virtual");
+ }
+ }
+
+ if (SemaRef.getLangOpts().CPlusPlus) {
+ Consumer.addKeywordResult("using");
+
+ if (SemaRef.getLangOpts().CPlusPlus0x)
+ Consumer.addKeywordResult("static_assert");
+ }
+ }
+}
+
+static bool isCandidateViable(CorrectionCandidateCallback &CCC,
+ TypoCorrection &Candidate) {
+ Candidate.setCallbackDistance(CCC.RankCandidate(Candidate));
+ return Candidate.getEditDistance(false) != TypoCorrection::InvalidDistance;
+}
+
+/// \brief Try to "correct" a typo in the source code by finding
+/// visible declarations whose names are similar to the name that was
+/// present in the source code.
+///
+/// \param TypoName the \c DeclarationNameInfo structure that contains
+/// the name that was present in the source code along with its location.
+///
+/// \param LookupKind the name-lookup criteria used to search for the name.
+///
+/// \param S the scope in which name lookup occurs.
+///
+/// \param SS the nested-name-specifier that precedes the name we're
+/// looking for, if present.
+///
+/// \param CCC A CorrectionCandidateCallback object that provides further
+/// validation of typo correction candidates. It also provides flags for
+/// determining the set of keywords permitted.
+///
+/// \param MemberContext if non-NULL, the context in which to look for
+/// a member access expression.
+///
+/// \param EnteringContext whether we're entering the context described by
+/// the nested-name-specifier SS.
+///
+/// \param OPT when non-NULL, the search for visible declarations will
+/// also walk the protocols in the qualified interfaces of \p OPT.
+///
+/// \returns a \c TypoCorrection containing the corrected name if the typo
+/// along with information such as the \c NamedDecl where the corrected name
+/// was declared, and any additional \c NestedNameSpecifier needed to access
+/// it (C++ only). The \c TypoCorrection is empty if there is no correction.
+TypoCorrection Sema::CorrectTypo(const DeclarationNameInfo &TypoName,
+ Sema::LookupNameKind LookupKind,
+ Scope *S, CXXScopeSpec *SS,
+ CorrectionCandidateCallback &CCC,
+ DeclContext *MemberContext,
+ bool EnteringContext,
+ const ObjCObjectPointerType *OPT) {
+ if (Diags.hasFatalErrorOccurred() || !getLangOpts().SpellChecking)
+ return TypoCorrection();
+
+ // In Microsoft mode, don't perform typo correction in a template member
+ // function dependent context because it interferes with the "lookup into
+ // dependent bases of class templates" feature.
+ if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() &&
+ isa<CXXMethodDecl>(CurContext))
+ return TypoCorrection();
+
+ // We only attempt to correct typos for identifiers.
+ IdentifierInfo *Typo = TypoName.getName().getAsIdentifierInfo();
+ if (!Typo)
+ return TypoCorrection();
+
+ // If the scope specifier itself was invalid, don't try to correct
+ // typos.
+ if (SS && SS->isInvalid())
+ return TypoCorrection();
+
+ // Never try to correct typos during template deduction or
+ // instantiation.
+ if (!ActiveTemplateInstantiations.empty())
+ return TypoCorrection();
+
+ NamespaceSpecifierSet Namespaces(Context, CurContext, SS);
+
+ TypoCorrectionConsumer Consumer(*this, Typo);
+
+ // If a callback object considers an empty typo correction candidate to be
+ // viable, assume it does not do any actual validation of the candidates.
+ TypoCorrection EmptyCorrection;
+ bool ValidatingCallback = !isCandidateViable(CCC, EmptyCorrection);
+
+ // Perform name lookup to find visible, similarly-named entities.
+ bool IsUnqualifiedLookup = false;
+ DeclContext *QualifiedDC = MemberContext;
+ if (MemberContext) {
+ LookupVisibleDecls(MemberContext, LookupKind, Consumer);
+
+ // Look in qualified interfaces.
+ if (OPT) {
+ for (ObjCObjectPointerType::qual_iterator
+ I = OPT->qual_begin(), E = OPT->qual_end();
+ I != E; ++I)
+ LookupVisibleDecls(*I, LookupKind, Consumer);
+ }
+ } else if (SS && SS->isSet()) {
+ QualifiedDC = computeDeclContext(*SS, EnteringContext);
+ if (!QualifiedDC)
+ return TypoCorrection();
+
+ // Provide a stop gap for files that are just seriously broken. Trying
+ // to correct all typos can turn into a HUGE performance penalty, causing
+ // some files to take minutes to get rejected by the parser.
+ if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
+ return TypoCorrection();
+ ++TyposCorrected;
+
+ LookupVisibleDecls(QualifiedDC, LookupKind, Consumer);
+ } else {
+ IsUnqualifiedLookup = true;
+ UnqualifiedTyposCorrectedMap::iterator Cached
+ = UnqualifiedTyposCorrected.find(Typo);
+ if (Cached != UnqualifiedTyposCorrected.end()) {
+ // Add the cached value, unless it's a keyword or fails validation. In the
+ // keyword case, we'll end up adding the keyword below.
+ if (Cached->second) {
+ if (!Cached->second.isKeyword() &&
+ isCandidateViable(CCC, Cached->second))
+ Consumer.addCorrection(Cached->second);
+ } else {
+ // Only honor no-correction cache hits when a callback that will validate
+ // correction candidates is not being used.
+ if (!ValidatingCallback)
+ return TypoCorrection();
+ }
+ }
+ if (Cached == UnqualifiedTyposCorrected.end()) {
+ // Provide a stop gap for files that are just seriously broken. Trying
+ // to correct all typos can turn into a HUGE performance penalty, causing
+ // some files to take minutes to get rejected by the parser.
+ if (TyposCorrected + UnqualifiedTyposCorrected.size() >= 20)
+ return TypoCorrection();
+ }
+ }
+
+ // Determine whether we are going to search in the various namespaces for
+ // corrections.
+ bool SearchNamespaces
+ = getLangOpts().CPlusPlus &&
+ (IsUnqualifiedLookup || (QualifiedDC && QualifiedDC->isNamespace()));
+
+ if (IsUnqualifiedLookup || SearchNamespaces) {
+ // For unqualified lookup, look through all of the names that we have
+ // seen in this translation unit.
+ // FIXME: Re-add the ability to skip very unlikely potential corrections.
+ for (IdentifierTable::iterator I = Context.Idents.begin(),
+ IEnd = Context.Idents.end();
+ I != IEnd; ++I)
+ Consumer.FoundName(I->getKey());
+
+ // Walk through identifiers in external identifier sources.
+ // FIXME: Re-add the ability to skip very unlikely potential corrections.
+ if (IdentifierInfoLookup *External
+ = Context.Idents.getExternalIdentifierLookup()) {
+ OwningPtr<IdentifierIterator> Iter(External->getIdentifiers());
+ do {
+ StringRef Name = Iter->Next();
+ if (Name.empty())
+ break;
+
+ Consumer.FoundName(Name);
+ } while (true);
+ }
+ }
+
+ AddKeywordsToConsumer(*this, Consumer, S, CCC);
+
+ // If we haven't found anything, we're done.
+ if (Consumer.empty()) {
+ // If this was an unqualified lookup, note that no correction was found.
+ if (IsUnqualifiedLookup)
+ (void)UnqualifiedTyposCorrected[Typo];
+
+ return TypoCorrection();
+ }
+
+ // Make sure that the user typed at least 3 characters for each correction
+ // made. Otherwise, we don't even both looking at the results.
+ unsigned ED = Consumer.getBestEditDistance(true);
+ if (ED > 0 && Typo->getName().size() / ED < 3) {
+ // If this was an unqualified lookup, note that no correction was found.
+ if (IsUnqualifiedLookup)
+ (void)UnqualifiedTyposCorrected[Typo];
+
+ return TypoCorrection();
+ }
+
+ // Build the NestedNameSpecifiers for the KnownNamespaces, if we're going
+ // to search those namespaces.
+ if (SearchNamespaces) {
+ // Load any externally-known namespaces.
+ if (ExternalSource && !LoadedExternalKnownNamespaces) {
+ SmallVector<NamespaceDecl *, 4> ExternalKnownNamespaces;
+ LoadedExternalKnownNamespaces = true;
+ ExternalSource->ReadKnownNamespaces(ExternalKnownNamespaces);
+ for (unsigned I = 0, N = ExternalKnownNamespaces.size(); I != N; ++I)
+ KnownNamespaces[ExternalKnownNamespaces[I]] = true;
+ }
+
+ for (llvm::DenseMap<NamespaceDecl*, bool>::iterator
+ KNI = KnownNamespaces.begin(),
+ KNIEnd = KnownNamespaces.end();
+ KNI != KNIEnd; ++KNI)
+ Namespaces.AddNamespace(KNI->first);
+ }
+
+ // Weed out any names that could not be found by name lookup or, if a
+ // CorrectionCandidateCallback object was provided, failed validation.
+ llvm::SmallVector<TypoCorrection, 16> QualifiedResults;
+ LookupResult TmpRes(*this, TypoName, LookupKind);
+ TmpRes.suppressDiagnostics();
+ while (!Consumer.empty()) {
+ TypoCorrectionConsumer::distance_iterator DI = Consumer.begin();
+ unsigned ED = DI->first;
+ for (TypoCorrectionConsumer::result_iterator I = DI->second->begin(),
+ IEnd = DI->second->end();
+ I != IEnd; /* Increment in loop. */) {
+ // If the item already has been looked up or is a keyword, keep it.
+ // If a validator callback object was given, drop the correction
+ // unless it passes validation.
+ if (I->second.isResolved()) {
+ TypoCorrectionConsumer::result_iterator Prev = I;
+ ++I;
+ if (!isCandidateViable(CCC, Prev->second))
+ DI->second->erase(Prev);
+ continue;
+ }
+
+ // Perform name lookup on this name.
+ IdentifierInfo *Name = I->second.getCorrectionAsIdentifierInfo();
+ LookupPotentialTypoResult(*this, TmpRes, Name, S, SS, MemberContext,
+ EnteringContext, CCC.IsObjCIvarLookup);
+
+ switch (TmpRes.getResultKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::FoundUnresolvedValue:
+ QualifiedResults.push_back(I->second);
+ // We didn't find this name in our scope, or didn't like what we found;
+ // ignore it.
+ {
+ TypoCorrectionConsumer::result_iterator Next = I;
+ ++Next;
+ DI->second->erase(I);
+ I = Next;
+ }
+ break;
+
+ case LookupResult::Ambiguous:
+ // We don't deal with ambiguities.
+ return TypoCorrection();
+
+ case LookupResult::FoundOverloaded: {
+ TypoCorrectionConsumer::result_iterator Prev = I;
+ // Store all of the Decls for overloaded symbols
+ for (LookupResult::iterator TRD = TmpRes.begin(),
+ TRDEnd = TmpRes.end();
+ TRD != TRDEnd; ++TRD)
+ I->second.addCorrectionDecl(*TRD);
+ ++I;
+ if (!isCandidateViable(CCC, Prev->second))
+ DI->second->erase(Prev);
+ break;
+ }
+
+ case LookupResult::Found: {
+ TypoCorrectionConsumer::result_iterator Prev = I;
+ I->second.setCorrectionDecl(TmpRes.getAsSingle<NamedDecl>());
+ ++I;
+ if (!isCandidateViable(CCC, Prev->second))
+ DI->second->erase(Prev);
+ break;
+ }
+
+ }
+ }
+
+ if (DI->second->empty())
+ Consumer.erase(DI);
+ else if (!getLangOpts().CPlusPlus || QualifiedResults.empty() || !ED)
+ // If there are results in the closest possible bucket, stop
+ break;
+
+ // Only perform the qualified lookups for C++
+ if (SearchNamespaces) {
+ TmpRes.suppressDiagnostics();
+ for (llvm::SmallVector<TypoCorrection,
+ 16>::iterator QRI = QualifiedResults.begin(),
+ QRIEnd = QualifiedResults.end();
+ QRI != QRIEnd; ++QRI) {
+ for (NamespaceSpecifierSet::iterator NI = Namespaces.begin(),
+ NIEnd = Namespaces.end();
+ NI != NIEnd; ++NI) {
+ DeclContext *Ctx = NI->DeclCtx;
+
+ // FIXME: Stop searching once the namespaces are too far away to create
+ // acceptable corrections for this identifier (since the namespaces
+ // are sorted in ascending order by edit distance).
+
+ TmpRes.clear();
+ TmpRes.setLookupName(QRI->getCorrectionAsIdentifierInfo());
+ if (!LookupQualifiedName(TmpRes, Ctx)) continue;
+
+ // Any corrections added below will be validated in subsequent
+ // iterations of the main while() loop over the Consumer's contents.
+ switch (TmpRes.getResultKind()) {
+ case LookupResult::Found: {
+ TypoCorrection TC(*QRI);
+ TC.setCorrectionDecl(TmpRes.getAsSingle<NamedDecl>());
+ TC.setCorrectionSpecifier(NI->NameSpecifier);
+ TC.setQualifierDistance(NI->EditDistance);
+ Consumer.addCorrection(TC);
+ break;
+ }
+ case LookupResult::FoundOverloaded: {
+ TypoCorrection TC(*QRI);
+ TC.setCorrectionSpecifier(NI->NameSpecifier);
+ TC.setQualifierDistance(NI->EditDistance);
+ for (LookupResult::iterator TRD = TmpRes.begin(),
+ TRDEnd = TmpRes.end();
+ TRD != TRDEnd; ++TRD)
+ TC.addCorrectionDecl(*TRD);
+ Consumer.addCorrection(TC);
+ break;
+ }
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ case LookupResult::Ambiguous:
+ case LookupResult::FoundUnresolvedValue:
+ break;
+ }
+ }
+ }
+ }
+
+ QualifiedResults.clear();
+ }
+
+ // No corrections remain...
+ if (Consumer.empty()) return TypoCorrection();
+
+ TypoResultsMap &BestResults = *Consumer.begin()->second;
+ ED = TypoCorrection::NormalizeEditDistance(Consumer.begin()->first);
+
+ if (ED > 0 && Typo->getName().size() / ED < 3) {
+ // If this was an unqualified lookup and we believe the callback
+ // object wouldn't have filtered out possible corrections, note
+ // that no correction was found.
+ if (IsUnqualifiedLookup && !ValidatingCallback)
+ (void)UnqualifiedTyposCorrected[Typo];
+
+ return TypoCorrection();
+ }
+
+ // If only a single name remains, return that result.
+ if (BestResults.size() == 1) {
+ const llvm::StringMapEntry<TypoCorrection> &Correction = *(BestResults.begin());
+ const TypoCorrection &Result = Correction.second;
+
+ // Don't correct to a keyword that's the same as the typo; the keyword
+ // wasn't actually in scope.
+ if (ED == 0 && Result.isKeyword()) return TypoCorrection();
+
+ // Record the correction for unqualified lookup.
+ if (IsUnqualifiedLookup)
+ UnqualifiedTyposCorrected[Typo] = Result;
+
+ return Result;
+ }
+ else if (BestResults.size() > 1
+ // Ugly hack equivalent to CTC == CTC_ObjCMessageReceiver;
+ // WantObjCSuper is only true for CTC_ObjCMessageReceiver and for
+ // some instances of CTC_Unknown, while WantRemainingKeywords is true
+ // for CTC_Unknown but not for CTC_ObjCMessageReceiver.
+ && CCC.WantObjCSuper && !CCC.WantRemainingKeywords
+ && BestResults["super"].isKeyword()) {
+ // Prefer 'super' when we're completing in a message-receiver
+ // context.
+
+ // Don't correct to a keyword that's the same as the typo; the keyword
+ // wasn't actually in scope.
+ if (ED == 0) return TypoCorrection();
+
+ // Record the correction for unqualified lookup.
+ if (IsUnqualifiedLookup)
+ UnqualifiedTyposCorrected[Typo] = BestResults["super"];
+
+ return BestResults["super"];
+ }
+
+ // If this was an unqualified lookup and we believe the callback object did
+ // not filter out possible corrections, note that no correction was found.
+ if (IsUnqualifiedLookup && !ValidatingCallback)
+ (void)UnqualifiedTyposCorrected[Typo];
+
+ return TypoCorrection();
+}
+
+void TypoCorrection::addCorrectionDecl(NamedDecl *CDecl) {
+ if (!CDecl) return;
+
+ if (isKeyword())
+ CorrectionDecls.clear();
+
+ CorrectionDecls.push_back(CDecl);
+
+ if (!CorrectionName)
+ CorrectionName = CDecl->getDeclName();
+}
+
+std::string TypoCorrection::getAsString(const LangOptions &LO) const {
+ if (CorrectionNameSpec) {
+ std::string tmpBuffer;
+ llvm::raw_string_ostream PrefixOStream(tmpBuffer);
+ CorrectionNameSpec->print(PrefixOStream, PrintingPolicy(LO));
+ return PrefixOStream.str() + CorrectionName.getAsString();
+ }
+
+ return CorrectionName.getAsString();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
new file mode 100644
index 0000000..5ece8f1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaObjCProperty.cpp
@@ -0,0 +1,1953 @@
+//===--- SemaObjCProperty.cpp - Semantic Analysis for ObjC @property ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for Objective C @property and
+// @synthesize declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Grammar actions.
+//===----------------------------------------------------------------------===//
+
+/// getImpliedARCOwnership - Given a set of property attributes and a
+/// type, infer an expected lifetime. The type's ownership qualification
+/// is not considered.
+///
+/// Returns OCL_None if the attributes as stated do not imply an ownership.
+/// Never returns OCL_Autoreleasing.
+static Qualifiers::ObjCLifetime getImpliedARCOwnership(
+ ObjCPropertyDecl::PropertyAttributeKind attrs,
+ QualType type) {
+ // retain, strong, copy, weak, and unsafe_unretained are only legal
+ // on properties of retainable pointer type.
+ if (attrs & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_copy)) {
+ return type->getObjCARCImplicitLifetime();
+ } else if (attrs & ObjCPropertyDecl::OBJC_PR_weak) {
+ return Qualifiers::OCL_Weak;
+ } else if (attrs & ObjCPropertyDecl::OBJC_PR_unsafe_unretained) {
+ return Qualifiers::OCL_ExplicitNone;
+ }
+
+ // assign can appear on other types, so we have to check the
+ // property type.
+ if (attrs & ObjCPropertyDecl::OBJC_PR_assign &&
+ type->isObjCRetainableType()) {
+ return Qualifiers::OCL_ExplicitNone;
+ }
+
+ return Qualifiers::OCL_None;
+}
+
+/// Check the internal consistency of a property declaration.
+static void checkARCPropertyDecl(Sema &S, ObjCPropertyDecl *property) {
+ if (property->isInvalidDecl()) return;
+
+ ObjCPropertyDecl::PropertyAttributeKind propertyKind
+ = property->getPropertyAttributes();
+ Qualifiers::ObjCLifetime propertyLifetime
+ = property->getType().getObjCLifetime();
+
+ // Nothing to do if we don't have a lifetime.
+ if (propertyLifetime == Qualifiers::OCL_None) return;
+
+ Qualifiers::ObjCLifetime expectedLifetime
+ = getImpliedARCOwnership(propertyKind, property->getType());
+ if (!expectedLifetime) {
+ // We have a lifetime qualifier but no dominating property
+ // attribute. That's okay, but restore reasonable invariants by
+ // setting the property attribute according to the lifetime
+ // qualifier.
+ ObjCPropertyDecl::PropertyAttributeKind attr;
+ if (propertyLifetime == Qualifiers::OCL_Strong) {
+ attr = ObjCPropertyDecl::OBJC_PR_strong;
+ } else if (propertyLifetime == Qualifiers::OCL_Weak) {
+ attr = ObjCPropertyDecl::OBJC_PR_weak;
+ } else {
+ assert(propertyLifetime == Qualifiers::OCL_ExplicitNone);
+ attr = ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ }
+ property->setPropertyAttributes(attr);
+ return;
+ }
+
+ if (propertyLifetime == expectedLifetime) return;
+
+ property->setInvalidDecl();
+ S.Diag(property->getLocation(),
+ diag::err_arc_inconsistent_property_ownership)
+ << property->getDeclName()
+ << expectedLifetime
+ << propertyLifetime;
+}
+
+Decl *Sema::ActOnProperty(Scope *S, SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
+ ObjCDeclSpec &ODS,
+ Selector GetterSel,
+ Selector SetterSel,
+ bool *isOverridingProperty,
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC) {
+ unsigned Attributes = ODS.getPropertyAttributes();
+ TypeSourceInfo *TSI = GetTypeForDeclarator(FD.D, S);
+ QualType T = TSI->getType();
+ if ((getLangOpts().getGC() != LangOptions::NonGC &&
+ T.isObjCGCWeak()) ||
+ (getLangOpts().ObjCAutoRefCount &&
+ T.getObjCLifetime() == Qualifiers::OCL_Weak))
+ Attributes |= ObjCDeclSpec::DQ_PR_weak;
+
+ bool isReadWrite = ((Attributes & ObjCDeclSpec::DQ_PR_readwrite) ||
+ // default is readwrite!
+ !(Attributes & ObjCDeclSpec::DQ_PR_readonly));
+ // property is defaulted to 'assign' if it is readwrite and is
+ // not retain or copy
+ bool isAssign = ((Attributes & ObjCDeclSpec::DQ_PR_assign) ||
+ (isReadWrite &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_retain) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_copy) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_weak)));
+
+ // Proceed with constructing the ObjCPropertDecls.
+ ObjCContainerDecl *ClassDecl = cast<ObjCContainerDecl>(CurContext);
+
+ if (ObjCCategoryDecl *CDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl))
+ if (CDecl->IsClassExtension()) {
+ Decl *Res = HandlePropertyInClassExtension(S, AtLoc, LParenLoc,
+ FD, GetterSel, SetterSel,
+ isAssign, isReadWrite,
+ Attributes,
+ ODS.getPropertyAttributes(),
+ isOverridingProperty, TSI,
+ MethodImplKind);
+ if (Res) {
+ CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
+ if (getLangOpts().ObjCAutoRefCount)
+ checkARCPropertyDecl(*this, cast<ObjCPropertyDecl>(Res));
+ }
+ return Res;
+ }
+
+ ObjCPropertyDecl *Res = CreatePropertyDecl(S, ClassDecl, AtLoc, LParenLoc, FD,
+ GetterSel, SetterSel,
+ isAssign, isReadWrite,
+ Attributes,
+ ODS.getPropertyAttributes(),
+ TSI, MethodImplKind);
+ if (lexicalDC)
+ Res->setLexicalDeclContext(lexicalDC);
+
+ // Validate the attributes on the @property.
+ CheckObjCPropertyAttributes(Res, AtLoc, Attributes);
+
+ if (getLangOpts().ObjCAutoRefCount)
+ checkARCPropertyDecl(*this, Res);
+
+ return Res;
+}
+
+static ObjCPropertyDecl::PropertyAttributeKind
+makePropertyAttributesAsWritten(unsigned Attributes) {
+ unsigned attributesAsWritten = 0;
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readonly;
+ if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_readwrite;
+ if (Attributes & ObjCDeclSpec::DQ_PR_getter)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_getter;
+ if (Attributes & ObjCDeclSpec::DQ_PR_setter)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_setter;
+ if (Attributes & ObjCDeclSpec::DQ_PR_assign)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_assign;
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_retain;
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_strong;
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_weak;
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_copy;
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_unsafe_unretained;
+ if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_nonatomic;
+ if (Attributes & ObjCDeclSpec::DQ_PR_atomic)
+ attributesAsWritten |= ObjCPropertyDecl::OBJC_PR_atomic;
+
+ return (ObjCPropertyDecl::PropertyAttributeKind)attributesAsWritten;
+}
+
+Decl *
+Sema::HandlePropertyInClassExtension(Scope *S,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
+ Selector GetterSel, Selector SetterSel,
+ const bool isAssign,
+ const bool isReadWrite,
+ const unsigned Attributes,
+ const unsigned AttributesAsWritten,
+ bool *isOverridingProperty,
+ TypeSourceInfo *T,
+ tok::ObjCKeywordKind MethodImplKind) {
+ ObjCCategoryDecl *CDecl = cast<ObjCCategoryDecl>(CurContext);
+ // Diagnose if this property is already in continuation class.
+ DeclContext *DC = CurContext;
+ IdentifierInfo *PropertyId = FD.D.getIdentifier();
+ ObjCInterfaceDecl *CCPrimary = CDecl->getClassInterface();
+
+ if (CCPrimary)
+ // Check for duplicate declaration of this property in current and
+ // other class extensions.
+ for (const ObjCCategoryDecl *ClsExtDecl =
+ CCPrimary->getFirstClassExtension();
+ ClsExtDecl; ClsExtDecl = ClsExtDecl->getNextClassExtension()) {
+ if (ObjCPropertyDecl *prevDecl =
+ ObjCPropertyDecl::findPropertyDecl(ClsExtDecl, PropertyId)) {
+ Diag(AtLoc, diag::err_duplicate_property);
+ Diag(prevDecl->getLocation(), diag::note_property_declare);
+ return 0;
+ }
+ }
+
+ // Create a new ObjCPropertyDecl with the DeclContext being
+ // the class extension.
+ // FIXME. We should really be using CreatePropertyDecl for this.
+ ObjCPropertyDecl *PDecl =
+ ObjCPropertyDecl::Create(Context, DC, FD.D.getIdentifierLoc(),
+ PropertyId, AtLoc, LParenLoc, T);
+ PDecl->setPropertyAttributesAsWritten(
+ makePropertyAttributesAsWritten(AttributesAsWritten));
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
+ if (Attributes & ObjCDeclSpec::DQ_PR_readwrite)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite);
+ // Set setter/getter selector name. Needed later.
+ PDecl->setGetterName(GetterSel);
+ PDecl->setSetterName(SetterSel);
+ ProcessDeclAttributes(S, PDecl, FD.D);
+ DC->addDecl(PDecl);
+
+ // We need to look in the @interface to see if the @property was
+ // already declared.
+ if (!CCPrimary) {
+ Diag(CDecl->getLocation(), diag::err_continuation_class);
+ *isOverridingProperty = true;
+ return 0;
+ }
+
+ // Find the property in continuation class's primary class only.
+ ObjCPropertyDecl *PIDecl =
+ CCPrimary->FindPropertyVisibleInPrimaryClass(PropertyId);
+
+ if (!PIDecl) {
+ // No matching property found in the primary class. Just fall thru
+ // and add property to continuation class's primary class.
+ ObjCPropertyDecl *PrimaryPDecl =
+ CreatePropertyDecl(S, CCPrimary, AtLoc, LParenLoc,
+ FD, GetterSel, SetterSel, isAssign, isReadWrite,
+ Attributes,AttributesAsWritten, T, MethodImplKind, DC);
+
+ // A case of continuation class adding a new property in the class. This
+ // is not what it was meant for. However, gcc supports it and so should we.
+ // Make sure setter/getters are declared here.
+ ProcessPropertyDecl(PrimaryPDecl, CCPrimary, /* redeclaredProperty = */ 0,
+ /* lexicalDC = */ CDecl);
+ PDecl->setGetterMethodDecl(PrimaryPDecl->getGetterMethodDecl());
+ PDecl->setSetterMethodDecl(PrimaryPDecl->getSetterMethodDecl());
+ if (ASTMutationListener *L = Context.getASTMutationListener())
+ L->AddedObjCPropertyInClassExtension(PrimaryPDecl, /*OrigProp=*/0, CDecl);
+ return PrimaryPDecl;
+ }
+ if (!Context.hasSameType(PIDecl->getType(), PDecl->getType())) {
+ bool IncompatibleObjC = false;
+ QualType ConvertedType;
+ // Relax the strict type matching for property type in continuation class.
+ // Allow property object type of continuation class to be different as long
+ // as it narrows the object type in its primary class property. Note that
+ // this conversion is safe only because the wider type is for a 'readonly'
+ // property in primary class and 'narrowed' type for a 'readwrite' property
+ // in continuation class.
+ if (!isa<ObjCObjectPointerType>(PIDecl->getType()) ||
+ !isa<ObjCObjectPointerType>(PDecl->getType()) ||
+ (!isObjCPointerConversion(PDecl->getType(), PIDecl->getType(),
+ ConvertedType, IncompatibleObjC))
+ || IncompatibleObjC) {
+ Diag(AtLoc,
+ diag::err_type_mismatch_continuation_class) << PDecl->getType();
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+ }
+
+ // The property 'PIDecl's readonly attribute will be over-ridden
+ // with continuation class's readwrite property attribute!
+ unsigned PIkind = PIDecl->getPropertyAttributesAsWritten();
+ if (isReadWrite && (PIkind & ObjCPropertyDecl::OBJC_PR_readonly)) {
+ unsigned retainCopyNonatomic =
+ (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_strong |
+ ObjCPropertyDecl::OBJC_PR_copy |
+ ObjCPropertyDecl::OBJC_PR_nonatomic);
+ if ((Attributes & retainCopyNonatomic) !=
+ (PIkind & retainCopyNonatomic)) {
+ Diag(AtLoc, diag::warn_property_attr_mismatch);
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+ DeclContext *DC = cast<DeclContext>(CCPrimary);
+ if (!ObjCPropertyDecl::findPropertyDecl(DC,
+ PIDecl->getDeclName().getAsIdentifierInfo())) {
+ // Protocol is not in the primary class. Must build one for it.
+ ObjCDeclSpec ProtocolPropertyODS;
+ // FIXME. Assuming that ObjCDeclSpec::ObjCPropertyAttributeKind
+ // and ObjCPropertyDecl::PropertyAttributeKind have identical
+ // values. Should consolidate both into one enum type.
+ ProtocolPropertyODS.
+ setPropertyAttributes((ObjCDeclSpec::ObjCPropertyAttributeKind)
+ PIkind);
+ // Must re-establish the context from class extension to primary
+ // class context.
+ ContextRAII SavedContext(*this, CCPrimary);
+
+ Decl *ProtocolPtrTy =
+ ActOnProperty(S, AtLoc, LParenLoc, FD, ProtocolPropertyODS,
+ PIDecl->getGetterName(),
+ PIDecl->getSetterName(),
+ isOverridingProperty,
+ MethodImplKind,
+ /* lexicalDC = */ CDecl);
+ PIDecl = cast<ObjCPropertyDecl>(ProtocolPtrTy);
+ }
+ PIDecl->makeitReadWriteAttribute();
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ PIDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
+ PIDecl->setSetterName(SetterSel);
+ } else {
+ // Tailor the diagnostics for the common case where a readwrite
+ // property is declared both in the @interface and the continuation.
+ // This is a common error where the user often intended the original
+ // declaration to be readonly.
+ unsigned diag =
+ (Attributes & ObjCDeclSpec::DQ_PR_readwrite) &&
+ (PIkind & ObjCPropertyDecl::OBJC_PR_readwrite)
+ ? diag::err_use_continuation_class_redeclaration_readwrite
+ : diag::err_use_continuation_class;
+ Diag(AtLoc, diag)
+ << CCPrimary->getDeclName();
+ Diag(PIDecl->getLocation(), diag::note_property_declare);
+ }
+ *isOverridingProperty = true;
+ // Make sure setter decl is synthesized, and added to primary class's list.
+ ProcessPropertyDecl(PIDecl, CCPrimary, PDecl, CDecl);
+ PDecl->setGetterMethodDecl(PIDecl->getGetterMethodDecl());
+ PDecl->setSetterMethodDecl(PIDecl->getSetterMethodDecl());
+ if (ASTMutationListener *L = Context.getASTMutationListener())
+ L->AddedObjCPropertyInClassExtension(PDecl, PIDecl, CDecl);
+ return 0;
+}
+
+ObjCPropertyDecl *Sema::CreatePropertyDecl(Scope *S,
+ ObjCContainerDecl *CDecl,
+ SourceLocation AtLoc,
+ SourceLocation LParenLoc,
+ FieldDeclarator &FD,
+ Selector GetterSel,
+ Selector SetterSel,
+ const bool isAssign,
+ const bool isReadWrite,
+ const unsigned Attributes,
+ const unsigned AttributesAsWritten,
+ TypeSourceInfo *TInfo,
+ tok::ObjCKeywordKind MethodImplKind,
+ DeclContext *lexicalDC){
+ IdentifierInfo *PropertyId = FD.D.getIdentifier();
+ QualType T = TInfo->getType();
+
+ // Issue a warning if property is 'assign' as default and its object, which is
+ // gc'able conforms to NSCopying protocol
+ if (getLangOpts().getGC() != LangOptions::NonGC &&
+ isAssign && !(Attributes & ObjCDeclSpec::DQ_PR_assign))
+ if (const ObjCObjectPointerType *ObjPtrTy =
+ T->getAs<ObjCObjectPointerType>()) {
+ ObjCInterfaceDecl *IDecl = ObjPtrTy->getObjectType()->getInterface();
+ if (IDecl)
+ if (ObjCProtocolDecl* PNSCopying =
+ LookupProtocol(&Context.Idents.get("NSCopying"), AtLoc))
+ if (IDecl->ClassImplementsProtocol(PNSCopying, true))
+ Diag(AtLoc, diag::warn_implements_nscopying) << PropertyId;
+ }
+ if (T->isObjCObjectType())
+ Diag(FD.D.getIdentifierLoc(), diag::err_statically_allocated_object);
+
+ DeclContext *DC = cast<DeclContext>(CDecl);
+ ObjCPropertyDecl *PDecl = ObjCPropertyDecl::Create(Context, DC,
+ FD.D.getIdentifierLoc(),
+ PropertyId, AtLoc, LParenLoc, TInfo);
+
+ if (ObjCPropertyDecl *prevDecl =
+ ObjCPropertyDecl::findPropertyDecl(DC, PropertyId)) {
+ Diag(PDecl->getLocation(), diag::err_duplicate_property);
+ Diag(prevDecl->getLocation(), diag::note_property_declare);
+ PDecl->setInvalidDecl();
+ }
+ else {
+ DC->addDecl(PDecl);
+ if (lexicalDC)
+ PDecl->setLexicalDeclContext(lexicalDC);
+ }
+
+ if (T->isArrayType() || T->isFunctionType()) {
+ Diag(AtLoc, diag::err_property_type) << T;
+ PDecl->setInvalidDecl();
+ }
+
+ ProcessDeclAttributes(S, PDecl, FD.D);
+
+ // Regardless of setter/getter attribute, we save the default getter/setter
+ // selector names in anticipation of declaration of setter/getter methods.
+ PDecl->setGetterName(GetterSel);
+ PDecl->setSetterName(SetterSel);
+ PDecl->setPropertyAttributesAsWritten(
+ makePropertyAttributesAsWritten(AttributesAsWritten));
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readonly);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_getter)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_getter);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_setter)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_setter);
+
+ if (isReadWrite)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_readwrite);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_retain);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_copy);
+
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+
+ if (isAssign)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+
+ // In the semantic attributes, one of nonatomic or atomic is always set.
+ if (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_nonatomic);
+ else
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_atomic);
+
+ // 'unsafe_unretained' is alias for 'assign'.
+ if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_assign);
+ if (isAssign)
+ PDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_unsafe_unretained);
+
+ if (MethodImplKind == tok::objc_required)
+ PDecl->setPropertyImplementation(ObjCPropertyDecl::Required);
+ else if (MethodImplKind == tok::objc_optional)
+ PDecl->setPropertyImplementation(ObjCPropertyDecl::Optional);
+
+ return PDecl;
+}
+
+static void checkARCPropertyImpl(Sema &S, SourceLocation propertyImplLoc,
+ ObjCPropertyDecl *property,
+ ObjCIvarDecl *ivar) {
+ if (property->isInvalidDecl() || ivar->isInvalidDecl()) return;
+
+ QualType ivarType = ivar->getType();
+ Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime();
+
+ // The lifetime implied by the property's attributes.
+ Qualifiers::ObjCLifetime propertyLifetime =
+ getImpliedARCOwnership(property->getPropertyAttributes(),
+ property->getType());
+
+ // We're fine if they match.
+ if (propertyLifetime == ivarLifetime) return;
+
+ // These aren't valid lifetimes for object ivars; don't diagnose twice.
+ if (ivarLifetime == Qualifiers::OCL_None ||
+ ivarLifetime == Qualifiers::OCL_Autoreleasing)
+ return;
+
+ switch (propertyLifetime) {
+ case Qualifiers::OCL_Strong:
+ S.Diag(propertyImplLoc, diag::err_arc_strong_property_ownership)
+ << property->getDeclName()
+ << ivar->getDeclName()
+ << ivarLifetime;
+ break;
+
+ case Qualifiers::OCL_Weak:
+ S.Diag(propertyImplLoc, diag::error_weak_property)
+ << property->getDeclName()
+ << ivar->getDeclName();
+ break;
+
+ case Qualifiers::OCL_ExplicitNone:
+ S.Diag(propertyImplLoc, diag::err_arc_assign_property_ownership)
+ << property->getDeclName()
+ << ivar->getDeclName()
+ << ((property->getPropertyAttributesAsWritten()
+ & ObjCPropertyDecl::OBJC_PR_assign) != 0);
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ llvm_unreachable("properties cannot be autoreleasing");
+
+ case Qualifiers::OCL_None:
+ // Any other property should be ignored.
+ return;
+ }
+
+ S.Diag(property->getLocation(), diag::note_property_declare);
+}
+
+/// setImpliedPropertyAttributeForReadOnlyProperty -
+/// This routine evaludates life-time attributes for a 'readonly'
+/// property with no known lifetime of its own, using backing
+/// 'ivar's attribute, if any. If no backing 'ivar', property's
+/// life-time is assumed 'strong'.
+static void setImpliedPropertyAttributeForReadOnlyProperty(
+ ObjCPropertyDecl *property, ObjCIvarDecl *ivar) {
+ Qualifiers::ObjCLifetime propertyLifetime =
+ getImpliedARCOwnership(property->getPropertyAttributes(),
+ property->getType());
+ if (propertyLifetime != Qualifiers::OCL_None)
+ return;
+
+ if (!ivar) {
+ // if no backing ivar, make property 'strong'.
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ return;
+ }
+ // property assumes owenership of backing ivar.
+ QualType ivarType = ivar->getType();
+ Qualifiers::ObjCLifetime ivarLifetime = ivarType.getObjCLifetime();
+ if (ivarLifetime == Qualifiers::OCL_Strong)
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ else if (ivarLifetime == Qualifiers::OCL_Weak)
+ property->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_weak);
+ return;
+}
+
+/// ActOnPropertyImplDecl - This routine performs semantic checks and
+/// builds the AST node for a property implementation declaration; declared
+/// as @synthesize or @dynamic.
+///
+Decl *Sema::ActOnPropertyImplDecl(Scope *S,
+ SourceLocation AtLoc,
+ SourceLocation PropertyLoc,
+ bool Synthesize,
+ IdentifierInfo *PropertyId,
+ IdentifierInfo *PropertyIvar,
+ SourceLocation PropertyIvarLoc) {
+ ObjCContainerDecl *ClassImpDecl =
+ dyn_cast<ObjCContainerDecl>(CurContext);
+ // Make sure we have a context for the property implementation declaration.
+ if (!ClassImpDecl) {
+ Diag(AtLoc, diag::error_missing_property_context);
+ return 0;
+ }
+ if (PropertyIvarLoc.isInvalid())
+ PropertyIvarLoc = PropertyLoc;
+ ObjCPropertyDecl *property = 0;
+ ObjCInterfaceDecl* IDecl = 0;
+ // Find the class or category class where this property must have
+ // a declaration.
+ ObjCImplementationDecl *IC = 0;
+ ObjCCategoryImplDecl* CatImplClass = 0;
+ if ((IC = dyn_cast<ObjCImplementationDecl>(ClassImpDecl))) {
+ IDecl = IC->getClassInterface();
+ // We always synthesize an interface for an implementation
+ // without an interface decl. So, IDecl is always non-zero.
+ assert(IDecl &&
+ "ActOnPropertyImplDecl - @implementation without @interface");
+
+ // Look for this property declaration in the @implementation's @interface
+ property = IDecl->FindPropertyDeclaration(PropertyId);
+ if (!property) {
+ Diag(PropertyLoc, diag::error_bad_property_decl) << IDecl->getDeclName();
+ return 0;
+ }
+ unsigned PIkind = property->getPropertyAttributesAsWritten();
+ if ((PIkind & (ObjCPropertyDecl::OBJC_PR_atomic |
+ ObjCPropertyDecl::OBJC_PR_nonatomic) ) == 0) {
+ if (AtLoc.isValid())
+ Diag(AtLoc, diag::warn_implicit_atomic_property);
+ else
+ Diag(IC->getLocation(), diag::warn_auto_implicit_atomic_property);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+
+ if (const ObjCCategoryDecl *CD =
+ dyn_cast<ObjCCategoryDecl>(property->getDeclContext())) {
+ if (!CD->IsClassExtension()) {
+ Diag(PropertyLoc, diag::error_category_property) << CD->getDeclName();
+ Diag(property->getLocation(), diag::note_property_declare);
+ return 0;
+ }
+ }
+ } else if ((CatImplClass = dyn_cast<ObjCCategoryImplDecl>(ClassImpDecl))) {
+ if (Synthesize) {
+ Diag(AtLoc, diag::error_synthesize_category_decl);
+ return 0;
+ }
+ IDecl = CatImplClass->getClassInterface();
+ if (!IDecl) {
+ Diag(AtLoc, diag::error_missing_property_interface);
+ return 0;
+ }
+ ObjCCategoryDecl *Category =
+ IDecl->FindCategoryDeclaration(CatImplClass->getIdentifier());
+
+ // If category for this implementation not found, it is an error which
+ // has already been reported eralier.
+ if (!Category)
+ return 0;
+ // Look for this property declaration in @implementation's category
+ property = Category->FindPropertyDeclaration(PropertyId);
+ if (!property) {
+ Diag(PropertyLoc, diag::error_bad_category_property_decl)
+ << Category->getDeclName();
+ return 0;
+ }
+ } else {
+ Diag(AtLoc, diag::error_bad_property_context);
+ return 0;
+ }
+ ObjCIvarDecl *Ivar = 0;
+ // Check that we have a valid, previously declared ivar for @synthesize
+ if (Synthesize) {
+ // @synthesize
+ if (!PropertyIvar)
+ PropertyIvar = PropertyId;
+ // Check that this is a previously declared 'ivar' in 'IDecl' interface
+ ObjCInterfaceDecl *ClassDeclared;
+ Ivar = IDecl->lookupInstanceVariable(PropertyIvar, ClassDeclared);
+ QualType PropType = property->getType();
+ QualType PropertyIvarType = PropType.getNonReferenceType();
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ (property->getPropertyAttributesAsWritten() &
+ ObjCPropertyDecl::OBJC_PR_readonly) &&
+ PropertyIvarType->isObjCRetainableType()) {
+ setImpliedPropertyAttributeForReadOnlyProperty(property, Ivar);
+ }
+
+ ObjCPropertyDecl::PropertyAttributeKind kind
+ = property->getPropertyAttributes();
+
+ // Add GC __weak to the ivar type if the property is weak.
+ if ((kind & ObjCPropertyDecl::OBJC_PR_weak) &&
+ getLangOpts().getGC() != LangOptions::NonGC) {
+ assert(!getLangOpts().ObjCAutoRefCount);
+ if (PropertyIvarType.isObjCGCStrong()) {
+ Diag(PropertyLoc, diag::err_gc_weak_property_strong_type);
+ Diag(property->getLocation(), diag::note_property_declare);
+ } else {
+ PropertyIvarType =
+ Context.getObjCGCQualType(PropertyIvarType, Qualifiers::Weak);
+ }
+ }
+
+ if (!Ivar) {
+ // In ARC, give the ivar a lifetime qualifier based on the
+ // property attributes.
+ if (getLangOpts().ObjCAutoRefCount &&
+ !PropertyIvarType.getObjCLifetime() &&
+ PropertyIvarType->isObjCRetainableType()) {
+
+ // It's an error if we have to do this and the user didn't
+ // explicitly write an ownership attribute on the property.
+ if (!property->hasWrittenStorageAttribute() &&
+ !(kind & ObjCPropertyDecl::OBJC_PR_strong)) {
+ Diag(PropertyLoc,
+ diag::err_arc_objc_property_default_assign_on_object);
+ Diag(property->getLocation(), diag::note_property_declare);
+ } else {
+ Qualifiers::ObjCLifetime lifetime =
+ getImpliedARCOwnership(kind, PropertyIvarType);
+ assert(lifetime && "no lifetime for property?");
+ if (lifetime == Qualifiers::OCL_Weak) {
+ bool err = false;
+ if (const ObjCObjectPointerType *ObjT =
+ PropertyIvarType->getAs<ObjCObjectPointerType>())
+ if (ObjT->getInterfaceDecl()->isArcWeakrefUnavailable()) {
+ Diag(PropertyLoc, diag::err_arc_weak_unavailable_property);
+ Diag(property->getLocation(), diag::note_property_declare);
+ err = true;
+ }
+ if (!err && !getLangOpts().ObjCRuntimeHasWeak) {
+ Diag(PropertyLoc, diag::err_arc_weak_no_runtime);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+ }
+
+ Qualifiers qs;
+ qs.addObjCLifetime(lifetime);
+ PropertyIvarType = Context.getQualifiedType(PropertyIvarType, qs);
+ }
+ }
+
+ if (kind & ObjCPropertyDecl::OBJC_PR_weak &&
+ !getLangOpts().ObjCAutoRefCount &&
+ getLangOpts().getGC() == LangOptions::NonGC) {
+ Diag(PropertyLoc, diag::error_synthesize_weak_non_arc_or_gc);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+
+ Ivar = ObjCIvarDecl::Create(Context, ClassImpDecl,
+ PropertyIvarLoc,PropertyIvarLoc, PropertyIvar,
+ PropertyIvarType, /*Dinfo=*/0,
+ ObjCIvarDecl::Private,
+ (Expr *)0, true);
+ ClassImpDecl->addDecl(Ivar);
+ IDecl->makeDeclVisibleInContext(Ivar);
+ property->setPropertyIvarDecl(Ivar);
+
+ if (!getLangOpts().ObjCNonFragileABI)
+ Diag(PropertyLoc, diag::error_missing_property_ivar_decl) << PropertyId;
+ // Note! I deliberately want it to fall thru so, we have a
+ // a property implementation and to avoid future warnings.
+ } else if (getLangOpts().ObjCNonFragileABI &&
+ !declaresSameEntity(ClassDeclared, IDecl)) {
+ Diag(PropertyLoc, diag::error_ivar_in_superclass_use)
+ << property->getDeclName() << Ivar->getDeclName()
+ << ClassDeclared->getDeclName();
+ Diag(Ivar->getLocation(), diag::note_previous_access_declaration)
+ << Ivar << Ivar->getName();
+ // Note! I deliberately want it to fall thru so more errors are caught.
+ }
+ QualType IvarType = Context.getCanonicalType(Ivar->getType());
+
+ // Check that type of property and its ivar are type compatible.
+ if (Context.getCanonicalType(PropertyIvarType) != IvarType) {
+ bool compat = false;
+ if (isa<ObjCObjectPointerType>(PropertyIvarType)
+ && isa<ObjCObjectPointerType>(IvarType))
+ compat =
+ Context.canAssignObjCInterfaces(
+ PropertyIvarType->getAs<ObjCObjectPointerType>(),
+ IvarType->getAs<ObjCObjectPointerType>());
+ else {
+ compat = (CheckAssignmentConstraints(PropertyIvarLoc, PropertyIvarType,
+ IvarType)
+ == Compatible);
+ }
+ if (!compat) {
+ Diag(PropertyLoc, diag::error_property_ivar_type)
+ << property->getDeclName() << PropType
+ << Ivar->getDeclName() << IvarType;
+ Diag(Ivar->getLocation(), diag::note_ivar_decl);
+ // Note! I deliberately want it to fall thru so, we have a
+ // a property implementation and to avoid future warnings.
+ }
+
+ // FIXME! Rules for properties are somewhat different that those
+ // for assignments. Use a new routine to consolidate all cases;
+ // specifically for property redeclarations as well as for ivars.
+ QualType lhsType =Context.getCanonicalType(PropertyIvarType).getUnqualifiedType();
+ QualType rhsType =Context.getCanonicalType(IvarType).getUnqualifiedType();
+ if (lhsType != rhsType &&
+ lhsType->isArithmeticType()) {
+ Diag(PropertyLoc, diag::error_property_ivar_type)
+ << property->getDeclName() << PropType
+ << Ivar->getDeclName() << IvarType;
+ Diag(Ivar->getLocation(), diag::note_ivar_decl);
+ // Fall thru - see previous comment
+ }
+ // __weak is explicit. So it works on Canonical type.
+ if ((PropType.isObjCGCWeak() && !IvarType.isObjCGCWeak() &&
+ getLangOpts().getGC() != LangOptions::NonGC)) {
+ Diag(PropertyLoc, diag::error_weak_property)
+ << property->getDeclName() << Ivar->getDeclName();
+ Diag(Ivar->getLocation(), diag::note_ivar_decl);
+ // Fall thru - see previous comment
+ }
+ // Fall thru - see previous comment
+ if ((property->getType()->isObjCObjectPointerType() ||
+ PropType.isObjCGCStrong()) && IvarType.isObjCGCWeak() &&
+ getLangOpts().getGC() != LangOptions::NonGC) {
+ Diag(PropertyLoc, diag::error_strong_property)
+ << property->getDeclName() << Ivar->getDeclName();
+ // Fall thru - see previous comment
+ }
+ }
+ if (getLangOpts().ObjCAutoRefCount)
+ checkARCPropertyImpl(*this, PropertyLoc, property, Ivar);
+ } else if (PropertyIvar)
+ // @dynamic
+ Diag(PropertyLoc, diag::error_dynamic_property_ivar_decl);
+
+ assert (property && "ActOnPropertyImplDecl - property declaration missing");
+ ObjCPropertyImplDecl *PIDecl =
+ ObjCPropertyImplDecl::Create(Context, CurContext, AtLoc, PropertyLoc,
+ property,
+ (Synthesize ?
+ ObjCPropertyImplDecl::Synthesize
+ : ObjCPropertyImplDecl::Dynamic),
+ Ivar, PropertyIvarLoc);
+ if (ObjCMethodDecl *getterMethod = property->getGetterMethodDecl()) {
+ getterMethod->createImplicitParams(Context, IDecl);
+ if (getLangOpts().CPlusPlus && Synthesize &&
+ Ivar->getType()->isRecordType()) {
+ // For Objective-C++, need to synthesize the AST for the IVAR object to be
+ // returned by the getter as it must conform to C++'s copy-return rules.
+ // FIXME. Eventually we want to do this for Objective-C as well.
+ ImplicitParamDecl *SelfDecl = getterMethod->getSelfDecl();
+ DeclRefExpr *SelfExpr =
+ new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
+ VK_RValue, SourceLocation());
+ Expr *IvarRefExpr =
+ new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc,
+ SelfExpr, true, true);
+ ExprResult Res =
+ PerformCopyInitialization(InitializedEntity::InitializeResult(
+ SourceLocation(),
+ getterMethod->getResultType(),
+ /*NRVO=*/false),
+ SourceLocation(),
+ Owned(IvarRefExpr));
+ if (!Res.isInvalid()) {
+ Expr *ResExpr = Res.takeAs<Expr>();
+ if (ResExpr)
+ ResExpr = MaybeCreateExprWithCleanups(ResExpr);
+ PIDecl->setGetterCXXConstructor(ResExpr);
+ }
+ }
+ if (property->hasAttr<NSReturnsNotRetainedAttr>() &&
+ !getterMethod->hasAttr<NSReturnsNotRetainedAttr>()) {
+ Diag(getterMethod->getLocation(),
+ diag::warn_property_getter_owning_mismatch);
+ Diag(property->getLocation(), diag::note_property_declare);
+ }
+ }
+ if (ObjCMethodDecl *setterMethod = property->getSetterMethodDecl()) {
+ setterMethod->createImplicitParams(Context, IDecl);
+ if (getLangOpts().CPlusPlus && Synthesize
+ && Ivar->getType()->isRecordType()) {
+ // FIXME. Eventually we want to do this for Objective-C as well.
+ ImplicitParamDecl *SelfDecl = setterMethod->getSelfDecl();
+ DeclRefExpr *SelfExpr =
+ new (Context) DeclRefExpr(SelfDecl, false, SelfDecl->getType(),
+ VK_RValue, SourceLocation());
+ Expr *lhs =
+ new (Context) ObjCIvarRefExpr(Ivar, Ivar->getType(), AtLoc,
+ SelfExpr, true, true);
+ ObjCMethodDecl::param_iterator P = setterMethod->param_begin();
+ ParmVarDecl *Param = (*P);
+ QualType T = Param->getType().getNonReferenceType();
+ Expr *rhs = new (Context) DeclRefExpr(Param, false, T,
+ VK_LValue, SourceLocation());
+ ExprResult Res = BuildBinOp(S, lhs->getLocEnd(),
+ BO_Assign, lhs, rhs);
+ if (property->getPropertyAttributes() &
+ ObjCPropertyDecl::OBJC_PR_atomic) {
+ Expr *callExpr = Res.takeAs<Expr>();
+ if (const CXXOperatorCallExpr *CXXCE =
+ dyn_cast_or_null<CXXOperatorCallExpr>(callExpr))
+ if (const FunctionDecl *FuncDecl = CXXCE->getDirectCallee())
+ if (!FuncDecl->isTrivial())
+ if (property->getType()->isReferenceType()) {
+ Diag(PropertyLoc,
+ diag::err_atomic_property_nontrivial_assign_op)
+ << property->getType();
+ Diag(FuncDecl->getLocStart(),
+ diag::note_callee_decl) << FuncDecl;
+ }
+ }
+ PIDecl->setSetterCXXAssignment(Res.takeAs<Expr>());
+ }
+ }
+
+ if (IC) {
+ if (Synthesize)
+ if (ObjCPropertyImplDecl *PPIDecl =
+ IC->FindPropertyImplIvarDecl(PropertyIvar)) {
+ Diag(PropertyLoc, diag::error_duplicate_ivar_use)
+ << PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
+ << PropertyIvar;
+ Diag(PPIDecl->getLocation(), diag::note_previous_use);
+ }
+
+ if (ObjCPropertyImplDecl *PPIDecl
+ = IC->FindPropertyImplDecl(PropertyId)) {
+ Diag(PropertyLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
+ return 0;
+ }
+ IC->addPropertyImplementation(PIDecl);
+ if (getLangOpts().ObjCDefaultSynthProperties &&
+ getLangOpts().ObjCNonFragileABI2 &&
+ !IDecl->isObjCRequiresPropertyDefs()) {
+ // Diagnose if an ivar was lazily synthesdized due to a previous
+ // use and if 1) property is @dynamic or 2) property is synthesized
+ // but it requires an ivar of different name.
+ ObjCInterfaceDecl *ClassDeclared=0;
+ ObjCIvarDecl *Ivar = 0;
+ if (!Synthesize)
+ Ivar = IDecl->lookupInstanceVariable(PropertyId, ClassDeclared);
+ else {
+ if (PropertyIvar && PropertyIvar != PropertyId)
+ Ivar = IDecl->lookupInstanceVariable(PropertyId, ClassDeclared);
+ }
+ // Issue diagnostics only if Ivar belongs to current class.
+ if (Ivar && Ivar->getSynthesize() &&
+ declaresSameEntity(IC->getClassInterface(), ClassDeclared)) {
+ Diag(Ivar->getLocation(), diag::err_undeclared_var_use)
+ << PropertyId;
+ Ivar->setInvalidDecl();
+ }
+ }
+ } else {
+ if (Synthesize)
+ if (ObjCPropertyImplDecl *PPIDecl =
+ CatImplClass->FindPropertyImplIvarDecl(PropertyIvar)) {
+ Diag(PropertyLoc, diag::error_duplicate_ivar_use)
+ << PropertyId << PPIDecl->getPropertyDecl()->getIdentifier()
+ << PropertyIvar;
+ Diag(PPIDecl->getLocation(), diag::note_previous_use);
+ }
+
+ if (ObjCPropertyImplDecl *PPIDecl =
+ CatImplClass->FindPropertyImplDecl(PropertyId)) {
+ Diag(PropertyLoc, diag::error_property_implemented) << PropertyId;
+ Diag(PPIDecl->getLocation(), diag::note_previous_declaration);
+ return 0;
+ }
+ CatImplClass->addPropertyImplementation(PIDecl);
+ }
+
+ return PIDecl;
+}
+
+//===----------------------------------------------------------------------===//
+// Helper methods.
+//===----------------------------------------------------------------------===//
+
+/// DiagnosePropertyMismatch - Compares two properties for their
+/// attributes and types and warns on a variety of inconsistencies.
+///
+void
+Sema::DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
+ ObjCPropertyDecl *SuperProperty,
+ const IdentifierInfo *inheritedName) {
+ ObjCPropertyDecl::PropertyAttributeKind CAttr =
+ Property->getPropertyAttributes();
+ ObjCPropertyDecl::PropertyAttributeKind SAttr =
+ SuperProperty->getPropertyAttributes();
+ if ((CAttr & ObjCPropertyDecl::OBJC_PR_readonly)
+ && (SAttr & ObjCPropertyDecl::OBJC_PR_readwrite))
+ Diag(Property->getLocation(), diag::warn_readonly_property)
+ << Property->getDeclName() << inheritedName;
+ if ((CAttr & ObjCPropertyDecl::OBJC_PR_copy)
+ != (SAttr & ObjCPropertyDecl::OBJC_PR_copy))
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "copy" << inheritedName;
+ else if (!(SAttr & ObjCPropertyDecl::OBJC_PR_readonly)){
+ unsigned CAttrRetain =
+ (CAttr &
+ (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
+ unsigned SAttrRetain =
+ (SAttr &
+ (ObjCPropertyDecl::OBJC_PR_retain | ObjCPropertyDecl::OBJC_PR_strong));
+ bool CStrong = (CAttrRetain != 0);
+ bool SStrong = (SAttrRetain != 0);
+ if (CStrong != SStrong)
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "retain (or strong)" << inheritedName;
+ }
+
+ if ((CAttr & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ != (SAttr & ObjCPropertyDecl::OBJC_PR_nonatomic))
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "atomic" << inheritedName;
+ if (Property->getSetterName() != SuperProperty->getSetterName())
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "setter" << inheritedName;
+ if (Property->getGetterName() != SuperProperty->getGetterName())
+ Diag(Property->getLocation(), diag::warn_property_attribute)
+ << Property->getDeclName() << "getter" << inheritedName;
+
+ QualType LHSType =
+ Context.getCanonicalType(SuperProperty->getType());
+ QualType RHSType =
+ Context.getCanonicalType(Property->getType());
+
+ if (!Context.propertyTypesAreCompatible(LHSType, RHSType)) {
+ // Do cases not handled in above.
+ // FIXME. For future support of covariant property types, revisit this.
+ bool IncompatibleObjC = false;
+ QualType ConvertedType;
+ if (!isObjCPointerConversion(RHSType, LHSType,
+ ConvertedType, IncompatibleObjC) ||
+ IncompatibleObjC) {
+ Diag(Property->getLocation(), diag::warn_property_types_are_incompatible)
+ << Property->getType() << SuperProperty->getType() << inheritedName;
+ Diag(SuperProperty->getLocation(), diag::note_property_declare);
+ }
+ }
+}
+
+bool Sema::DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *property,
+ ObjCMethodDecl *GetterMethod,
+ SourceLocation Loc) {
+ if (GetterMethod &&
+ !Context.hasSameType(GetterMethod->getResultType().getNonReferenceType(),
+ property->getType().getNonReferenceType())) {
+ AssignConvertType result = Incompatible;
+ if (property->getType()->isObjCObjectPointerType())
+ result = CheckAssignmentConstraints(Loc, GetterMethod->getResultType(),
+ property->getType());
+ if (result != Compatible) {
+ Diag(Loc, diag::warn_accessor_property_type_mismatch)
+ << property->getDeclName()
+ << GetterMethod->getSelector();
+ Diag(GetterMethod->getLocation(), diag::note_declared_at);
+ return true;
+ }
+ }
+ return false;
+}
+
+/// ComparePropertiesInBaseAndSuper - This routine compares property
+/// declarations in base and its super class, if any, and issues
+/// diagnostics in a variety of inconsistent situations.
+///
+void Sema::ComparePropertiesInBaseAndSuper(ObjCInterfaceDecl *IDecl) {
+ ObjCInterfaceDecl *SDecl = IDecl->getSuperClass();
+ if (!SDecl)
+ return;
+ // FIXME: O(N^2)
+ for (ObjCInterfaceDecl::prop_iterator S = SDecl->prop_begin(),
+ E = SDecl->prop_end(); S != E; ++S) {
+ ObjCPropertyDecl *SuperPDecl = (*S);
+ // Does property in super class has declaration in current class?
+ for (ObjCInterfaceDecl::prop_iterator I = IDecl->prop_begin(),
+ E = IDecl->prop_end(); I != E; ++I) {
+ ObjCPropertyDecl *PDecl = (*I);
+ if (SuperPDecl->getIdentifier() == PDecl->getIdentifier())
+ DiagnosePropertyMismatch(PDecl, SuperPDecl,
+ SDecl->getIdentifier());
+ }
+ }
+}
+
+/// MatchOneProtocolPropertiesInClass - This routine goes thru the list
+/// of properties declared in a protocol and compares their attribute against
+/// the same property declared in the class or category.
+void
+Sema::MatchOneProtocolPropertiesInClass(Decl *CDecl,
+ ObjCProtocolDecl *PDecl) {
+ ObjCInterfaceDecl *IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(CDecl);
+ if (!IDecl) {
+ // Category
+ ObjCCategoryDecl *CatDecl = static_cast<ObjCCategoryDecl*>(CDecl);
+ assert (CatDecl && "MatchOneProtocolPropertiesInClass");
+ if (!CatDecl->IsClassExtension())
+ for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
+ E = PDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Pr = (*P);
+ ObjCCategoryDecl::prop_iterator CP, CE;
+ // Is this property already in category's list of properties?
+ for (CP = CatDecl->prop_begin(), CE = CatDecl->prop_end(); CP!=CE; ++CP)
+ if ((*CP)->getIdentifier() == Pr->getIdentifier())
+ break;
+ if (CP != CE)
+ // Property protocol already exist in class. Diagnose any mismatch.
+ DiagnosePropertyMismatch((*CP), Pr, PDecl->getIdentifier());
+ }
+ return;
+ }
+ for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
+ E = PDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Pr = (*P);
+ ObjCInterfaceDecl::prop_iterator CP, CE;
+ // Is this property already in class's list of properties?
+ for (CP = IDecl->prop_begin(), CE = IDecl->prop_end(); CP != CE; ++CP)
+ if ((*CP)->getIdentifier() == Pr->getIdentifier())
+ break;
+ if (CP != CE)
+ // Property protocol already exist in class. Diagnose any mismatch.
+ DiagnosePropertyMismatch((*CP), Pr, PDecl->getIdentifier());
+ }
+}
+
+/// CompareProperties - This routine compares properties
+/// declared in 'ClassOrProtocol' objects (which can be a class or an
+/// inherited protocol with the list of properties for class/category 'CDecl'
+///
+void Sema::CompareProperties(Decl *CDecl, Decl *ClassOrProtocol) {
+ Decl *ClassDecl = ClassOrProtocol;
+ ObjCInterfaceDecl *IDecl = dyn_cast_or_null<ObjCInterfaceDecl>(CDecl);
+
+ if (!IDecl) {
+ // Category
+ ObjCCategoryDecl *CatDecl = static_cast<ObjCCategoryDecl*>(CDecl);
+ assert (CatDecl && "CompareProperties");
+ if (ObjCCategoryDecl *MDecl = dyn_cast<ObjCCategoryDecl>(ClassDecl)) {
+ for (ObjCCategoryDecl::protocol_iterator P = MDecl->protocol_begin(),
+ E = MDecl->protocol_end(); P != E; ++P)
+ // Match properties of category with those of protocol (*P)
+ MatchOneProtocolPropertiesInClass(CatDecl, *P);
+
+ // Go thru the list of protocols for this category and recursively match
+ // their properties with those in the category.
+ for (ObjCCategoryDecl::protocol_iterator P = CatDecl->protocol_begin(),
+ E = CatDecl->protocol_end(); P != E; ++P)
+ CompareProperties(CatDecl, *P);
+ } else {
+ ObjCProtocolDecl *MD = cast<ObjCProtocolDecl>(ClassDecl);
+ for (ObjCProtocolDecl::protocol_iterator P = MD->protocol_begin(),
+ E = MD->protocol_end(); P != E; ++P)
+ MatchOneProtocolPropertiesInClass(CatDecl, *P);
+ }
+ return;
+ }
+
+ if (ObjCInterfaceDecl *MDecl = dyn_cast<ObjCInterfaceDecl>(ClassDecl)) {
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ P = MDecl->all_referenced_protocol_begin(),
+ E = MDecl->all_referenced_protocol_end(); P != E; ++P)
+ // Match properties of class IDecl with those of protocol (*P).
+ MatchOneProtocolPropertiesInClass(IDecl, *P);
+
+ // Go thru the list of protocols for this class and recursively match
+ // their properties with those declared in the class.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ P = IDecl->all_referenced_protocol_begin(),
+ E = IDecl->all_referenced_protocol_end(); P != E; ++P)
+ CompareProperties(IDecl, *P);
+ } else {
+ ObjCProtocolDecl *MD = cast<ObjCProtocolDecl>(ClassDecl);
+ for (ObjCProtocolDecl::protocol_iterator P = MD->protocol_begin(),
+ E = MD->protocol_end(); P != E; ++P)
+ MatchOneProtocolPropertiesInClass(IDecl, *P);
+ }
+}
+
+/// isPropertyReadonly - Return true if property is readonly, by searching
+/// for the property in the class and in its categories and implementations
+///
+bool Sema::isPropertyReadonly(ObjCPropertyDecl *PDecl,
+ ObjCInterfaceDecl *IDecl) {
+ // by far the most common case.
+ if (!PDecl->isReadOnly())
+ return false;
+ // Even if property is ready only, if interface has a user defined setter,
+ // it is not considered read only.
+ if (IDecl->getInstanceMethod(PDecl->getSetterName()))
+ return false;
+
+ // Main class has the property as 'readonly'. Must search
+ // through the category list to see if the property's
+ // attribute has been over-ridden to 'readwrite'.
+ for (ObjCCategoryDecl *Category = IDecl->getCategoryList();
+ Category; Category = Category->getNextClassCategory()) {
+ // Even if property is ready only, if a category has a user defined setter,
+ // it is not considered read only.
+ if (Category->getInstanceMethod(PDecl->getSetterName()))
+ return false;
+ ObjCPropertyDecl *P =
+ Category->FindPropertyDeclaration(PDecl->getIdentifier());
+ if (P && !P->isReadOnly())
+ return false;
+ }
+
+ // Also, check for definition of a setter method in the implementation if
+ // all else failed.
+ if (ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(CurContext)) {
+ if (ObjCImplementationDecl *IMD =
+ dyn_cast<ObjCImplementationDecl>(OMD->getDeclContext())) {
+ if (IMD->getInstanceMethod(PDecl->getSetterName()))
+ return false;
+ } else if (ObjCCategoryImplDecl *CIMD =
+ dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
+ if (CIMD->getInstanceMethod(PDecl->getSetterName()))
+ return false;
+ }
+ }
+ // Lastly, look through the implementation (if one is in scope).
+ if (ObjCImplementationDecl *ImpDecl = IDecl->getImplementation())
+ if (ImpDecl->getInstanceMethod(PDecl->getSetterName()))
+ return false;
+ // If all fails, look at the super class.
+ if (ObjCInterfaceDecl *SIDecl = IDecl->getSuperClass())
+ return isPropertyReadonly(PDecl, SIDecl);
+ return true;
+}
+
+/// CollectImmediateProperties - This routine collects all properties in
+/// the class and its conforming protocols; but not those it its super class.
+void Sema::CollectImmediateProperties(ObjCContainerDecl *CDecl,
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap,
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& SuperPropMap) {
+ if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
+ E = IDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = (*P);
+ PropMap[Prop->getIdentifier()] = Prop;
+ }
+ // scan through class's protocols.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ PI = IDecl->all_referenced_protocol_begin(),
+ E = IDecl->all_referenced_protocol_end(); PI != E; ++PI)
+ CollectImmediateProperties((*PI), PropMap, SuperPropMap);
+ }
+ if (ObjCCategoryDecl *CATDecl = dyn_cast<ObjCCategoryDecl>(CDecl)) {
+ if (!CATDecl->IsClassExtension())
+ for (ObjCContainerDecl::prop_iterator P = CATDecl->prop_begin(),
+ E = CATDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = (*P);
+ PropMap[Prop->getIdentifier()] = Prop;
+ }
+ // scan through class's protocols.
+ for (ObjCCategoryDecl::protocol_iterator PI = CATDecl->protocol_begin(),
+ E = CATDecl->protocol_end(); PI != E; ++PI)
+ CollectImmediateProperties((*PI), PropMap, SuperPropMap);
+ }
+ else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) {
+ for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
+ E = PDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = (*P);
+ ObjCPropertyDecl *PropertyFromSuper = SuperPropMap[Prop->getIdentifier()];
+ // Exclude property for protocols which conform to class's super-class,
+ // as super-class has to implement the property.
+ if (!PropertyFromSuper ||
+ PropertyFromSuper->getIdentifier() != Prop->getIdentifier()) {
+ ObjCPropertyDecl *&PropEntry = PropMap[Prop->getIdentifier()];
+ if (!PropEntry)
+ PropEntry = Prop;
+ }
+ }
+ // scan through protocol's protocols.
+ for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); PI != E; ++PI)
+ CollectImmediateProperties((*PI), PropMap, SuperPropMap);
+ }
+}
+
+/// CollectClassPropertyImplementations - This routine collects list of
+/// properties to be implemented in the class. This includes, class's
+/// and its conforming protocols' properties.
+static void CollectClassPropertyImplementations(ObjCContainerDecl *CDecl,
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap) {
+ if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
+ E = IDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = (*P);
+ PropMap[Prop->getIdentifier()] = Prop;
+ }
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ PI = IDecl->all_referenced_protocol_begin(),
+ E = IDecl->all_referenced_protocol_end(); PI != E; ++PI)
+ CollectClassPropertyImplementations((*PI), PropMap);
+ }
+ else if (ObjCProtocolDecl *PDecl = dyn_cast<ObjCProtocolDecl>(CDecl)) {
+ for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
+ E = PDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = (*P);
+ if (!PropMap.count(Prop->getIdentifier()))
+ PropMap[Prop->getIdentifier()] = Prop;
+ }
+ // scan through protocol's protocols.
+ for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); PI != E; ++PI)
+ CollectClassPropertyImplementations((*PI), PropMap);
+ }
+}
+
+/// CollectSuperClassPropertyImplementations - This routine collects list of
+/// properties to be implemented in super class(s) and also coming from their
+/// conforming protocols.
+static void CollectSuperClassPropertyImplementations(ObjCInterfaceDecl *CDecl,
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>& PropMap) {
+ if (ObjCInterfaceDecl *SDecl = CDecl->getSuperClass()) {
+ while (SDecl) {
+ CollectClassPropertyImplementations(SDecl, PropMap);
+ SDecl = SDecl->getSuperClass();
+ }
+ }
+}
+
+/// LookupPropertyDecl - Looks up a property in the current class and all
+/// its protocols.
+ObjCPropertyDecl *Sema::LookupPropertyDecl(const ObjCContainerDecl *CDecl,
+ IdentifierInfo *II) {
+ if (const ObjCInterfaceDecl *IDecl =
+ dyn_cast<ObjCInterfaceDecl>(CDecl)) {
+ for (ObjCContainerDecl::prop_iterator P = IDecl->prop_begin(),
+ E = IDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = (*P);
+ if (Prop->getIdentifier() == II)
+ return Prop;
+ }
+ // scan through class's protocols.
+ for (ObjCInterfaceDecl::all_protocol_iterator
+ PI = IDecl->all_referenced_protocol_begin(),
+ E = IDecl->all_referenced_protocol_end(); PI != E; ++PI) {
+ ObjCPropertyDecl *Prop = LookupPropertyDecl((*PI), II);
+ if (Prop)
+ return Prop;
+ }
+ }
+ else if (const ObjCProtocolDecl *PDecl =
+ dyn_cast<ObjCProtocolDecl>(CDecl)) {
+ for (ObjCProtocolDecl::prop_iterator P = PDecl->prop_begin(),
+ E = PDecl->prop_end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = (*P);
+ if (Prop->getIdentifier() == II)
+ return Prop;
+ }
+ // scan through protocol's protocols.
+ for (ObjCProtocolDecl::protocol_iterator PI = PDecl->protocol_begin(),
+ E = PDecl->protocol_end(); PI != E; ++PI) {
+ ObjCPropertyDecl *Prop = LookupPropertyDecl((*PI), II);
+ if (Prop)
+ return Prop;
+ }
+ }
+ return 0;
+}
+
+static IdentifierInfo * getDefaultSynthIvarName(ObjCPropertyDecl *Prop,
+ ASTContext &Ctx) {
+ SmallString<128> ivarName;
+ {
+ llvm::raw_svector_ostream os(ivarName);
+ os << '_' << Prop->getIdentifier()->getName();
+ }
+ return &Ctx.Idents.get(ivarName.str());
+}
+
+/// DefaultSynthesizeProperties - This routine default synthesizes all
+/// properties which must be synthesized in class's @implementation.
+void Sema::DefaultSynthesizeProperties(Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCInterfaceDecl *IDecl) {
+
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> PropMap;
+ CollectClassPropertyImplementations(IDecl, PropMap);
+ if (PropMap.empty())
+ return;
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> SuperPropMap;
+ CollectSuperClassPropertyImplementations(IDecl, SuperPropMap);
+
+ for (llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>::iterator
+ P = PropMap.begin(), E = PropMap.end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = P->second;
+ // If property to be implemented in the super class, ignore.
+ if (SuperPropMap[Prop->getIdentifier()])
+ continue;
+ // Is there a matching propery synthesize/dynamic?
+ if (Prop->isInvalidDecl() ||
+ Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional ||
+ IMPDecl->FindPropertyImplIvarDecl(Prop->getIdentifier()))
+ continue;
+ // Property may have been synthesized by user.
+ if (IMPDecl->FindPropertyImplDecl(Prop->getIdentifier()))
+ continue;
+ if (IMPDecl->getInstanceMethod(Prop->getGetterName())) {
+ if (Prop->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_readonly)
+ continue;
+ if (IMPDecl->getInstanceMethod(Prop->getSetterName()))
+ continue;
+ }
+ if (isa<ObjCProtocolDecl>(Prop->getDeclContext())) {
+ // We won't auto-synthesize properties declared in protocols.
+ Diag(IMPDecl->getLocation(),
+ diag::warn_auto_synthesizing_protocol_property);
+ Diag(Prop->getLocation(), diag::note_property_declare);
+ continue;
+ }
+
+ // We use invalid SourceLocations for the synthesized ivars since they
+ // aren't really synthesized at a particular location; they just exist.
+ // Saying that they are located at the @implementation isn't really going
+ // to help users.
+ ActOnPropertyImplDecl(S, SourceLocation(), SourceLocation(),
+ true,
+ /* property = */ Prop->getIdentifier(),
+ /* ivar = */ getDefaultSynthIvarName(Prop, Context),
+ SourceLocation());
+ }
+}
+
+void Sema::DefaultSynthesizeProperties(Scope *S, Decl *D) {
+ if (!LangOpts.ObjCDefaultSynthProperties || !LangOpts.ObjCNonFragileABI2)
+ return;
+ ObjCImplementationDecl *IC=dyn_cast_or_null<ObjCImplementationDecl>(D);
+ if (!IC)
+ return;
+ if (ObjCInterfaceDecl* IDecl = IC->getClassInterface())
+ if (!IDecl->isObjCRequiresPropertyDefs())
+ DefaultSynthesizeProperties(S, IC, IDecl);
+}
+
+void Sema::DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl *CDecl,
+ const llvm::DenseSet<Selector>& InsMap) {
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> SuperPropMap;
+ if (ObjCInterfaceDecl *IDecl = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ CollectSuperClassPropertyImplementations(IDecl, SuperPropMap);
+
+ llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*> PropMap;
+ CollectImmediateProperties(CDecl, PropMap, SuperPropMap);
+ if (PropMap.empty())
+ return;
+
+ llvm::DenseSet<ObjCPropertyDecl *> PropImplMap;
+ for (ObjCImplDecl::propimpl_iterator
+ I = IMPDecl->propimpl_begin(),
+ EI = IMPDecl->propimpl_end(); I != EI; ++I)
+ PropImplMap.insert((*I)->getPropertyDecl());
+
+ for (llvm::DenseMap<IdentifierInfo *, ObjCPropertyDecl*>::iterator
+ P = PropMap.begin(), E = PropMap.end(); P != E; ++P) {
+ ObjCPropertyDecl *Prop = P->second;
+ // Is there a matching propery synthesize/dynamic?
+ if (Prop->isInvalidDecl() ||
+ Prop->getPropertyImplementation() == ObjCPropertyDecl::Optional ||
+ PropImplMap.count(Prop) || Prop->hasAttr<UnavailableAttr>())
+ continue;
+ if (!InsMap.count(Prop->getGetterName())) {
+ Diag(IMPDecl->getLocation(),
+ isa<ObjCCategoryDecl>(CDecl) ?
+ diag::warn_setter_getter_impl_required_in_category :
+ diag::warn_setter_getter_impl_required)
+ << Prop->getDeclName() << Prop->getGetterName();
+ Diag(Prop->getLocation(),
+ diag::note_property_declare);
+ if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2)
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
+ Diag(RID->getLocation(), diag::note_suppressed_class_declare);
+
+ }
+
+ if (!Prop->isReadOnly() && !InsMap.count(Prop->getSetterName())) {
+ Diag(IMPDecl->getLocation(),
+ isa<ObjCCategoryDecl>(CDecl) ?
+ diag::warn_setter_getter_impl_required_in_category :
+ diag::warn_setter_getter_impl_required)
+ << Prop->getDeclName() << Prop->getSetterName();
+ Diag(Prop->getLocation(),
+ diag::note_property_declare);
+ if (LangOpts.ObjCDefaultSynthProperties && LangOpts.ObjCNonFragileABI2)
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(CDecl))
+ if (const ObjCInterfaceDecl *RID = ID->isObjCRequiresPropertyDefs())
+ Diag(RID->getLocation(), diag::note_suppressed_class_declare);
+ }
+ }
+}
+
+void
+Sema::AtomicPropertySetterGetterRules (ObjCImplDecl* IMPDecl,
+ ObjCContainerDecl* IDecl) {
+ // Rules apply in non-GC mode only
+ if (getLangOpts().getGC() != LangOptions::NonGC)
+ return;
+ for (ObjCContainerDecl::prop_iterator I = IDecl->prop_begin(),
+ E = IDecl->prop_end();
+ I != E; ++I) {
+ ObjCPropertyDecl *Property = (*I);
+ ObjCMethodDecl *GetterMethod = 0;
+ ObjCMethodDecl *SetterMethod = 0;
+ bool LookedUpGetterSetter = false;
+
+ unsigned Attributes = Property->getPropertyAttributes();
+ unsigned AttributesAsWritten = Property->getPropertyAttributesAsWritten();
+
+ if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic) &&
+ !(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_nonatomic)) {
+ GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName());
+ SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName());
+ LookedUpGetterSetter = true;
+ if (GetterMethod) {
+ Diag(GetterMethod->getLocation(),
+ diag::warn_default_atomic_custom_getter_setter)
+ << Property->getIdentifier() << 0;
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
+ if (SetterMethod) {
+ Diag(SetterMethod->getLocation(),
+ diag::warn_default_atomic_custom_getter_setter)
+ << Property->getIdentifier() << 1;
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
+ }
+
+ // We only care about readwrite atomic property.
+ if ((Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) ||
+ !(Attributes & ObjCPropertyDecl::OBJC_PR_readwrite))
+ continue;
+ if (const ObjCPropertyImplDecl *PIDecl
+ = IMPDecl->FindPropertyImplDecl(Property->getIdentifier())) {
+ if (PIDecl->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic)
+ continue;
+ if (!LookedUpGetterSetter) {
+ GetterMethod = IMPDecl->getInstanceMethod(Property->getGetterName());
+ SetterMethod = IMPDecl->getInstanceMethod(Property->getSetterName());
+ LookedUpGetterSetter = true;
+ }
+ if ((GetterMethod && !SetterMethod) || (!GetterMethod && SetterMethod)) {
+ SourceLocation MethodLoc =
+ (GetterMethod ? GetterMethod->getLocation()
+ : SetterMethod->getLocation());
+ Diag(MethodLoc, diag::warn_atomic_property_rule)
+ << Property->getIdentifier() << (GetterMethod != 0)
+ << (SetterMethod != 0);
+ // fixit stuff.
+ if (!AttributesAsWritten) {
+ if (Property->getLParenLoc().isValid()) {
+ // @property () ... case.
+ SourceRange PropSourceRange(Property->getAtLoc(),
+ Property->getLParenLoc());
+ Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
+ FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic");
+ }
+ else {
+ //@property id etc.
+ SourceLocation endLoc =
+ Property->getTypeSourceInfo()->getTypeLoc().getBeginLoc();
+ endLoc = endLoc.getLocWithOffset(-1);
+ SourceRange PropSourceRange(Property->getAtLoc(), endLoc);
+ Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
+ FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic) ");
+ }
+ }
+ else if (!(AttributesAsWritten & ObjCPropertyDecl::OBJC_PR_atomic)) {
+ // @property () ... case.
+ SourceLocation endLoc = Property->getLParenLoc();
+ SourceRange PropSourceRange(Property->getAtLoc(), endLoc);
+ Diag(Property->getLocation(), diag::note_atomic_property_fixup_suggest) <<
+ FixItHint::CreateReplacement(PropSourceRange, "@property (nonatomic, ");
+ }
+ else
+ Diag(MethodLoc, diag::note_atomic_property_fixup_suggest);
+ Diag(Property->getLocation(), diag::note_property_declare);
+ }
+ }
+ }
+}
+
+void Sema::DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D) {
+ if (getLangOpts().getGC() == LangOptions::GCOnly)
+ return;
+
+ for (ObjCImplementationDecl::propimpl_iterator
+ i = D->propimpl_begin(), e = D->propimpl_end(); i != e; ++i) {
+ ObjCPropertyImplDecl *PID = *i;
+ if (PID->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+
+ const ObjCPropertyDecl *PD = PID->getPropertyDecl();
+ if (PD && !PD->hasAttr<NSReturnsNotRetainedAttr>() &&
+ !D->getInstanceMethod(PD->getGetterName())) {
+ ObjCMethodDecl *method = PD->getGetterMethodDecl();
+ if (!method)
+ continue;
+ ObjCMethodFamily family = method->getMethodFamily();
+ if (family == OMF_alloc || family == OMF_copy ||
+ family == OMF_mutableCopy || family == OMF_new) {
+ if (getLangOpts().ObjCAutoRefCount)
+ Diag(PID->getLocation(), diag::err_ownin_getter_rule);
+ else
+ Diag(PID->getLocation(), diag::warn_owning_getter_rule);
+ Diag(PD->getLocation(), diag::note_property_declare);
+ }
+ }
+ }
+}
+
+/// AddPropertyAttrs - Propagates attributes from a property to the
+/// implicitly-declared getter or setter for that property.
+static void AddPropertyAttrs(Sema &S, ObjCMethodDecl *PropertyMethod,
+ ObjCPropertyDecl *Property) {
+ // Should we just clone all attributes over?
+ for (Decl::attr_iterator A = Property->attr_begin(),
+ AEnd = Property->attr_end();
+ A != AEnd; ++A) {
+ if (isa<DeprecatedAttr>(*A) ||
+ isa<UnavailableAttr>(*A) ||
+ isa<AvailabilityAttr>(*A))
+ PropertyMethod->addAttr((*A)->clone(S.Context));
+ }
+}
+
+/// ProcessPropertyDecl - Make sure that any user-defined setter/getter methods
+/// have the property type and issue diagnostics if they don't.
+/// Also synthesize a getter/setter method if none exist (and update the
+/// appropriate lookup tables. FIXME: Should reconsider if adding synthesized
+/// methods is the "right" thing to do.
+void Sema::ProcessPropertyDecl(ObjCPropertyDecl *property,
+ ObjCContainerDecl *CD,
+ ObjCPropertyDecl *redeclaredProperty,
+ ObjCContainerDecl *lexicalDC) {
+
+ ObjCMethodDecl *GetterMethod, *SetterMethod;
+
+ GetterMethod = CD->getInstanceMethod(property->getGetterName());
+ SetterMethod = CD->getInstanceMethod(property->getSetterName());
+ DiagnosePropertyAccessorMismatch(property, GetterMethod,
+ property->getLocation());
+
+ if (SetterMethod) {
+ ObjCPropertyDecl::PropertyAttributeKind CAttr =
+ property->getPropertyAttributes();
+ if ((!(CAttr & ObjCPropertyDecl::OBJC_PR_readonly)) &&
+ Context.getCanonicalType(SetterMethod->getResultType()) !=
+ Context.VoidTy)
+ Diag(SetterMethod->getLocation(), diag::err_setter_type_void);
+ if (SetterMethod->param_size() != 1 ||
+ !Context.hasSameUnqualifiedType(
+ (*SetterMethod->param_begin())->getType().getNonReferenceType(),
+ property->getType().getNonReferenceType())) {
+ Diag(property->getLocation(),
+ diag::warn_accessor_property_type_mismatch)
+ << property->getDeclName()
+ << SetterMethod->getSelector();
+ Diag(SetterMethod->getLocation(), diag::note_declared_at);
+ }
+ }
+
+ // Synthesize getter/setter methods if none exist.
+ // Find the default getter and if one not found, add one.
+ // FIXME: The synthesized property we set here is misleading. We almost always
+ // synthesize these methods unless the user explicitly provided prototypes
+ // (which is odd, but allowed). Sema should be typechecking that the
+ // declarations jive in that situation (which it is not currently).
+ if (!GetterMethod) {
+ // No instance method of same name as property getter name was found.
+ // Declare a getter method and add it to the list of methods
+ // for this class.
+ SourceLocation Loc = redeclaredProperty ?
+ redeclaredProperty->getLocation() :
+ property->getLocation();
+
+ GetterMethod = ObjCMethodDecl::Create(Context, Loc, Loc,
+ property->getGetterName(),
+ property->getType(), 0, CD, /*isInstance=*/true,
+ /*isVariadic=*/false, /*isSynthesized=*/true,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ (property->getPropertyImplementation() ==
+ ObjCPropertyDecl::Optional) ?
+ ObjCMethodDecl::Optional :
+ ObjCMethodDecl::Required);
+ CD->addDecl(GetterMethod);
+
+ AddPropertyAttrs(*this, GetterMethod, property);
+
+ // FIXME: Eventually this shouldn't be needed, as the lexical context
+ // and the real context should be the same.
+ if (lexicalDC)
+ GetterMethod->setLexicalDeclContext(lexicalDC);
+ if (property->hasAttr<NSReturnsNotRetainedAttr>())
+ GetterMethod->addAttr(
+ ::new (Context) NSReturnsNotRetainedAttr(Loc, Context));
+ } else
+ // A user declared getter will be synthesize when @synthesize of
+ // the property with the same name is seen in the @implementation
+ GetterMethod->setSynthesized(true);
+ property->setGetterMethodDecl(GetterMethod);
+
+ // Skip setter if property is read-only.
+ if (!property->isReadOnly()) {
+ // Find the default setter and if one not found, add one.
+ if (!SetterMethod) {
+ // No instance method of same name as property setter name was found.
+ // Declare a setter method and add it to the list of methods
+ // for this class.
+ SourceLocation Loc = redeclaredProperty ?
+ redeclaredProperty->getLocation() :
+ property->getLocation();
+
+ SetterMethod =
+ ObjCMethodDecl::Create(Context, Loc, Loc,
+ property->getSetterName(), Context.VoidTy, 0,
+ CD, /*isInstance=*/true, /*isVariadic=*/false,
+ /*isSynthesized=*/true,
+ /*isImplicitlyDeclared=*/true,
+ /*isDefined=*/false,
+ (property->getPropertyImplementation() ==
+ ObjCPropertyDecl::Optional) ?
+ ObjCMethodDecl::Optional :
+ ObjCMethodDecl::Required);
+
+ // Invent the arguments for the setter. We don't bother making a
+ // nice name for the argument.
+ ParmVarDecl *Argument = ParmVarDecl::Create(Context, SetterMethod,
+ Loc, Loc,
+ property->getIdentifier(),
+ property->getType().getUnqualifiedType(),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ SetterMethod->setMethodParams(Context, Argument,
+ ArrayRef<SourceLocation>());
+
+ AddPropertyAttrs(*this, SetterMethod, property);
+
+ CD->addDecl(SetterMethod);
+ // FIXME: Eventually this shouldn't be needed, as the lexical context
+ // and the real context should be the same.
+ if (lexicalDC)
+ SetterMethod->setLexicalDeclContext(lexicalDC);
+ } else
+ // A user declared setter will be synthesize when @synthesize of
+ // the property with the same name is seen in the @implementation
+ SetterMethod->setSynthesized(true);
+ property->setSetterMethodDecl(SetterMethod);
+ }
+ // Add any synthesized methods to the global pool. This allows us to
+ // handle the following, which is supported by GCC (and part of the design).
+ //
+ // @interface Foo
+ // @property double bar;
+ // @end
+ //
+ // void thisIsUnfortunate() {
+ // id foo;
+ // double bar = [foo bar];
+ // }
+ //
+ if (GetterMethod)
+ AddInstanceMethodToGlobalPool(GetterMethod);
+ if (SetterMethod)
+ AddInstanceMethodToGlobalPool(SetterMethod);
+}
+
+void Sema::CheckObjCPropertyAttributes(Decl *PDecl,
+ SourceLocation Loc,
+ unsigned &Attributes) {
+ // FIXME: Improve the reported location.
+ if (!PDecl || PDecl->isInvalidDecl())
+ return;
+
+ ObjCPropertyDecl *PropertyDecl = cast<ObjCPropertyDecl>(PDecl);
+ QualType PropertyTy = PropertyDecl->getType();
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ PropertyTy->isObjCRetainableType()) {
+ // 'readonly' property with no obvious lifetime.
+ // its life time will be determined by its backing ivar.
+ unsigned rel = (ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong |
+ ObjCDeclSpec::DQ_PR_weak |
+ ObjCDeclSpec::DQ_PR_assign);
+ if ((Attributes & rel) == 0)
+ return;
+ }
+
+ // readonly and readwrite/assign/retain/copy conflict.
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & (ObjCDeclSpec::DQ_PR_readwrite |
+ ObjCDeclSpec::DQ_PR_assign |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain |
+ ObjCDeclSpec::DQ_PR_strong))) {
+ const char * which = (Attributes & ObjCDeclSpec::DQ_PR_readwrite) ?
+ "readwrite" :
+ (Attributes & ObjCDeclSpec::DQ_PR_assign) ?
+ "assign" :
+ (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) ?
+ "unsafe_unretained" :
+ (Attributes & ObjCDeclSpec::DQ_PR_copy) ?
+ "copy" : "retain";
+
+ Diag(Loc, (Attributes & (ObjCDeclSpec::DQ_PR_readwrite)) ?
+ diag::err_objc_property_attr_mutually_exclusive :
+ diag::warn_objc_property_attr_mutually_exclusive)
+ << "readonly" << which;
+ }
+
+ // Check for copy or retain on non-object types.
+ if ((Attributes & (ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong)) &&
+ !PropertyTy->isObjCRetainableType() &&
+ !PropertyDecl->getAttr<ObjCNSObjectAttr>()) {
+ Diag(Loc, diag::err_objc_property_requires_object)
+ << (Attributes & ObjCDeclSpec::DQ_PR_weak ? "weak" :
+ Attributes & ObjCDeclSpec::DQ_PR_copy ? "copy" : "retain (or strong)");
+ Attributes &= ~(ObjCDeclSpec::DQ_PR_weak | ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong);
+ PropertyDecl->setInvalidDecl();
+ }
+
+ // Check for more than one of { assign, copy, retain }.
+ if (Attributes & ObjCDeclSpec::DQ_PR_assign) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "copy";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "assign" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ } else if (Attributes & ObjCDeclSpec::DQ_PR_unsafe_unretained) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "copy";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_copy;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "unsafe_unretained" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ } else if (Attributes & ObjCDeclSpec::DQ_PR_copy) {
+ if (Attributes & ObjCDeclSpec::DQ_PR_retain) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "retain";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_strong) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "strong";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_strong;
+ }
+ if (Attributes & ObjCDeclSpec::DQ_PR_weak) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "copy" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+ }
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_retain) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "retain" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_retain;
+ }
+ else if ((Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_weak)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "strong" << "weak";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_weak;
+ }
+
+ if ((Attributes & ObjCDeclSpec::DQ_PR_atomic) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_nonatomic)) {
+ Diag(Loc, diag::err_objc_property_attr_mutually_exclusive)
+ << "atomic" << "nonatomic";
+ Attributes &= ~ObjCDeclSpec::DQ_PR_atomic;
+ }
+
+ // Warn if user supplied no assignment attribute, property is
+ // readwrite, and this is an object type.
+ if (!(Attributes & (ObjCDeclSpec::DQ_PR_assign | ObjCDeclSpec::DQ_PR_copy |
+ ObjCDeclSpec::DQ_PR_unsafe_unretained |
+ ObjCDeclSpec::DQ_PR_retain | ObjCDeclSpec::DQ_PR_strong |
+ ObjCDeclSpec::DQ_PR_weak)) &&
+ PropertyTy->isObjCObjectPointerType()) {
+ if (getLangOpts().ObjCAutoRefCount)
+ // With arc, @property definitions should default to (strong) when
+ // not specified; including when property is 'readonly'.
+ PropertyDecl->setPropertyAttributes(ObjCPropertyDecl::OBJC_PR_strong);
+ else if (!(Attributes & ObjCDeclSpec::DQ_PR_readonly)) {
+ bool isAnyClassTy =
+ (PropertyTy->isObjCClassType() ||
+ PropertyTy->isObjCQualifiedClassType());
+ // In non-gc, non-arc mode, 'Class' is treated as a 'void *' no need to
+ // issue any warning.
+ if (isAnyClassTy && getLangOpts().getGC() == LangOptions::NonGC)
+ ;
+ else {
+ // Skip this warning in gc-only mode.
+ if (getLangOpts().getGC() != LangOptions::GCOnly)
+ Diag(Loc, diag::warn_objc_property_no_assignment_attribute);
+
+ // If non-gc code warn that this is likely inappropriate.
+ if (getLangOpts().getGC() == LangOptions::NonGC)
+ Diag(Loc, diag::warn_objc_property_default_assign_on_object);
+ }
+ }
+
+ // FIXME: Implement warning dependent on NSCopying being
+ // implemented. See also:
+ // <rdar://5168496&4855821&5607453&5096644&4947311&5698469&4947014&5168496>
+ // (please trim this list while you are at it).
+ }
+
+ if (!(Attributes & ObjCDeclSpec::DQ_PR_copy)
+ &&!(Attributes & ObjCDeclSpec::DQ_PR_readonly)
+ && getLangOpts().getGC() == LangOptions::GCOnly
+ && PropertyTy->isBlockPointerType())
+ Diag(Loc, diag::warn_objc_property_copy_missing_on_block);
+ else if (getLangOpts().ObjCAutoRefCount &&
+ (Attributes & ObjCDeclSpec::DQ_PR_retain) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ !(Attributes & ObjCDeclSpec::DQ_PR_strong) &&
+ PropertyTy->isBlockPointerType())
+ Diag(Loc, diag::warn_objc_property_retain_of_block);
+
+ if ((Attributes & ObjCDeclSpec::DQ_PR_readonly) &&
+ (Attributes & ObjCDeclSpec::DQ_PR_setter))
+ Diag(Loc, diag::warn_objc_readonly_property_has_setter);
+
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
new file mode 100644
index 0000000..284c8de
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaOverload.cpp
@@ -0,0 +1,11227 @@
+//===--- SemaOverload.cpp - C++ Overloading ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides Sema routines for C++ overloading.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/TypeOrdering.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include <algorithm>
+
+namespace clang {
+using namespace sema;
+
+/// A convenience routine for creating a decayed reference to a
+/// function.
+static ExprResult
+CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, bool HadMultipleCandidates,
+ SourceLocation Loc = SourceLocation(),
+ const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
+ DeclRefExpr *DRE = new (S.Context) DeclRefExpr(Fn, false, Fn->getType(),
+ VK_LValue, Loc, LocInfo);
+ if (HadMultipleCandidates)
+ DRE->setHadMultipleCandidates(true);
+ ExprResult E = S.Owned(DRE);
+ E = S.DefaultFunctionArrayConversion(E.take());
+ if (E.isInvalid())
+ return ExprError();
+ return move(E);
+}
+
+static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle,
+ bool AllowObjCWritebackConversion);
+
+static bool IsTransparentUnionStandardConversion(Sema &S, Expr* From,
+ QualType &ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle);
+static OverloadingResult
+IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
+ UserDefinedConversionSequence& User,
+ OverloadCandidateSet& Conversions,
+ bool AllowExplicit);
+
+
+static ImplicitConversionSequence::CompareKind
+CompareStandardConversionSequences(Sema &S,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+static ImplicitConversionSequence::CompareKind
+CompareQualificationConversions(Sema &S,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+static ImplicitConversionSequence::CompareKind
+CompareDerivedToBaseConversions(Sema &S,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2);
+
+
+
+/// GetConversionCategory - Retrieve the implicit conversion
+/// category corresponding to the given implicit conversion kind.
+ImplicitConversionCategory
+GetConversionCategory(ImplicitConversionKind Kind) {
+ static const ImplicitConversionCategory
+ Category[(int)ICK_Num_Conversion_Kinds] = {
+ ICC_Identity,
+ ICC_Lvalue_Transformation,
+ ICC_Lvalue_Transformation,
+ ICC_Lvalue_Transformation,
+ ICC_Identity,
+ ICC_Qualification_Adjustment,
+ ICC_Promotion,
+ ICC_Promotion,
+ ICC_Promotion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion,
+ ICC_Conversion
+ };
+ return Category[(int)Kind];
+}
+
+/// GetConversionRank - Retrieve the implicit conversion rank
+/// corresponding to the given implicit conversion kind.
+ImplicitConversionRank GetConversionRank(ImplicitConversionKind Kind) {
+ static const ImplicitConversionRank
+ Rank[(int)ICK_Num_Conversion_Kinds] = {
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Exact_Match,
+ ICR_Promotion,
+ ICR_Promotion,
+ ICR_Promotion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Complex_Real_Conversion,
+ ICR_Conversion,
+ ICR_Conversion,
+ ICR_Writeback_Conversion
+ };
+ return Rank[(int)Kind];
+}
+
+/// GetImplicitConversionName - Return the name of this kind of
+/// implicit conversion.
+const char* GetImplicitConversionName(ImplicitConversionKind Kind) {
+ static const char* const Name[(int)ICK_Num_Conversion_Kinds] = {
+ "No conversion",
+ "Lvalue-to-rvalue",
+ "Array-to-pointer",
+ "Function-to-pointer",
+ "Noreturn adjustment",
+ "Qualification",
+ "Integral promotion",
+ "Floating point promotion",
+ "Complex promotion",
+ "Integral conversion",
+ "Floating conversion",
+ "Complex conversion",
+ "Floating-integral conversion",
+ "Pointer conversion",
+ "Pointer-to-member conversion",
+ "Boolean conversion",
+ "Compatible-types conversion",
+ "Derived-to-base conversion",
+ "Vector conversion",
+ "Vector splat",
+ "Complex-real conversion",
+ "Block Pointer conversion",
+ "Transparent Union Conversion"
+ "Writeback conversion"
+ };
+ return Name[Kind];
+}
+
+/// StandardConversionSequence - Set the standard conversion
+/// sequence to the identity conversion.
+void StandardConversionSequence::setAsIdentityConversion() {
+ First = ICK_Identity;
+ Second = ICK_Identity;
+ Third = ICK_Identity;
+ DeprecatedStringLiteralToCharPtr = false;
+ QualificationIncludesObjCLifetime = false;
+ ReferenceBinding = false;
+ DirectBinding = false;
+ IsLvalueReference = true;
+ BindsToFunctionLvalue = false;
+ BindsToRvalue = false;
+ BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ObjCLifetimeConversionBinding = false;
+ CopyConstructor = 0;
+}
+
+/// getRank - Retrieve the rank of this standard conversion sequence
+/// (C++ 13.3.3.1.1p3). The rank is the largest rank of each of the
+/// implicit conversions.
+ImplicitConversionRank StandardConversionSequence::getRank() const {
+ ImplicitConversionRank Rank = ICR_Exact_Match;
+ if (GetConversionRank(First) > Rank)
+ Rank = GetConversionRank(First);
+ if (GetConversionRank(Second) > Rank)
+ Rank = GetConversionRank(Second);
+ if (GetConversionRank(Third) > Rank)
+ Rank = GetConversionRank(Third);
+ return Rank;
+}
+
+/// isPointerConversionToBool - Determines whether this conversion is
+/// a conversion of a pointer or pointer-to-member to bool. This is
+/// used as part of the ranking of standard conversion sequences
+/// (C++ 13.3.3.2p4).
+bool StandardConversionSequence::isPointerConversionToBool() const {
+ // Note that FromType has not necessarily been transformed by the
+ // array-to-pointer or function-to-pointer implicit conversions, so
+ // check for their presence as well as checking whether FromType is
+ // a pointer.
+ if (getToType(1)->isBooleanType() &&
+ (getFromType()->isPointerType() ||
+ getFromType()->isObjCObjectPointerType() ||
+ getFromType()->isBlockPointerType() ||
+ getFromType()->isNullPtrType() ||
+ First == ICK_Array_To_Pointer || First == ICK_Function_To_Pointer))
+ return true;
+
+ return false;
+}
+
+/// isPointerConversionToVoidPointer - Determines whether this
+/// conversion is a conversion of a pointer to a void pointer. This is
+/// used as part of the ranking of standard conversion sequences (C++
+/// 13.3.3.2p4).
+bool
+StandardConversionSequence::
+isPointerConversionToVoidPointer(ASTContext& Context) const {
+ QualType FromType = getFromType();
+ QualType ToType = getToType(1);
+
+ // Note that FromType has not necessarily been transformed by the
+ // array-to-pointer implicit conversion, so check for its presence
+ // and redo the conversion to get a pointer.
+ if (First == ICK_Array_To_Pointer)
+ FromType = Context.getArrayDecayedType(FromType);
+
+ if (Second == ICK_Pointer_Conversion && FromType->isAnyPointerType())
+ if (const PointerType* ToPtrType = ToType->getAs<PointerType>())
+ return ToPtrType->getPointeeType()->isVoidType();
+
+ return false;
+}
+
+/// Skip any implicit casts which could be either part of a narrowing conversion
+/// or after one in an implicit conversion.
+static const Expr *IgnoreNarrowingConversion(const Expr *Converted) {
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Converted)) {
+ switch (ICE->getCastKind()) {
+ case CK_NoOp:
+ case CK_IntegralCast:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ Converted = ICE->getSubExpr();
+ continue;
+
+ default:
+ return Converted;
+ }
+ }
+
+ return Converted;
+}
+
+/// Check if this standard conversion sequence represents a narrowing
+/// conversion, according to C++11 [dcl.init.list]p7.
+///
+/// \param Ctx The AST context.
+/// \param Converted The result of applying this standard conversion sequence.
+/// \param ConstantValue If this is an NK_Constant_Narrowing conversion, the
+/// value of the expression prior to the narrowing conversion.
+/// \param ConstantType If this is an NK_Constant_Narrowing conversion, the
+/// type of the expression prior to the narrowing conversion.
+NarrowingKind
+StandardConversionSequence::getNarrowingKind(ASTContext &Ctx,
+ const Expr *Converted,
+ APValue &ConstantValue,
+ QualType &ConstantType) const {
+ assert(Ctx.getLangOpts().CPlusPlus && "narrowing check outside C++");
+
+ // C++11 [dcl.init.list]p7:
+ // A narrowing conversion is an implicit conversion ...
+ QualType FromType = getToType(0);
+ QualType ToType = getToType(1);
+ switch (Second) {
+ // -- from a floating-point type to an integer type, or
+ //
+ // -- from an integer type or unscoped enumeration type to a floating-point
+ // type, except where the source is a constant expression and the actual
+ // value after conversion will fit into the target type and will produce
+ // the original value when converted back to the original type, or
+ case ICK_Floating_Integral:
+ if (FromType->isRealFloatingType() && ToType->isIntegralType(Ctx)) {
+ return NK_Type_Narrowing;
+ } else if (FromType->isIntegralType(Ctx) && ToType->isRealFloatingType()) {
+ llvm::APSInt IntConstantValue;
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (Initializer &&
+ Initializer->isIntegerConstantExpr(IntConstantValue, Ctx)) {
+ // Convert the integer to the floating type.
+ llvm::APFloat Result(Ctx.getFloatTypeSemantics(ToType));
+ Result.convertFromAPInt(IntConstantValue, IntConstantValue.isSigned(),
+ llvm::APFloat::rmNearestTiesToEven);
+ // And back.
+ llvm::APSInt ConvertedValue = IntConstantValue;
+ bool ignored;
+ Result.convertToInteger(ConvertedValue,
+ llvm::APFloat::rmTowardZero, &ignored);
+ // If the resulting value is different, this was a narrowing conversion.
+ if (IntConstantValue != ConvertedValue) {
+ ConstantValue = APValue(IntConstantValue);
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ // Variables are always narrowings.
+ return NK_Variable_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+
+ // -- from long double to double or float, or from double to float, except
+ // where the source is a constant expression and the actual value after
+ // conversion is within the range of values that can be represented (even
+ // if it cannot be represented exactly), or
+ case ICK_Floating_Conversion:
+ if (FromType->isRealFloatingType() && ToType->isRealFloatingType() &&
+ Ctx.getFloatingTypeOrder(FromType, ToType) == 1) {
+ // FromType is larger than ToType.
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (Initializer->isCXX11ConstantExpr(Ctx, &ConstantValue)) {
+ // Constant!
+ assert(ConstantValue.isFloat());
+ llvm::APFloat FloatVal = ConstantValue.getFloat();
+ // Convert the source value into the target type.
+ bool ignored;
+ llvm::APFloat::opStatus ConvertStatus = FloatVal.convert(
+ Ctx.getFloatTypeSemantics(ToType),
+ llvm::APFloat::rmNearestTiesToEven, &ignored);
+ // If there was no overflow, the source value is within the range of
+ // values that can be represented.
+ if (ConvertStatus & llvm::APFloat::opOverflow) {
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ return NK_Variable_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+
+ // -- from an integer type or unscoped enumeration type to an integer type
+ // that cannot represent all the values of the original type, except where
+ // the source is a constant expression and the actual value after
+ // conversion will fit into the target type and will produce the original
+ // value when converted back to the original type.
+ case ICK_Boolean_Conversion: // Bools are integers too.
+ if (!FromType->isIntegralOrUnscopedEnumerationType()) {
+ // Boolean conversions can be from pointers and pointers to members
+ // [conv.bool], and those aren't considered narrowing conversions.
+ return NK_Not_Narrowing;
+ } // Otherwise, fall through to the integral case.
+ case ICK_Integral_Conversion: {
+ assert(FromType->isIntegralOrUnscopedEnumerationType());
+ assert(ToType->isIntegralOrUnscopedEnumerationType());
+ const bool FromSigned = FromType->isSignedIntegerOrEnumerationType();
+ const unsigned FromWidth = Ctx.getIntWidth(FromType);
+ const bool ToSigned = ToType->isSignedIntegerOrEnumerationType();
+ const unsigned ToWidth = Ctx.getIntWidth(ToType);
+
+ if (FromWidth > ToWidth ||
+ (FromWidth == ToWidth && FromSigned != ToSigned)) {
+ // Not all values of FromType can be represented in ToType.
+ llvm::APSInt InitializerValue;
+ const Expr *Initializer = IgnoreNarrowingConversion(Converted);
+ if (Initializer->isIntegerConstantExpr(InitializerValue, Ctx)) {
+ ConstantValue = APValue(InitializerValue);
+
+ // Add a bit to the InitializerValue so we don't have to worry about
+ // signed vs. unsigned comparisons.
+ InitializerValue = InitializerValue.extend(
+ InitializerValue.getBitWidth() + 1);
+ // Convert the initializer to and from the target width and signed-ness.
+ llvm::APSInt ConvertedValue = InitializerValue;
+ ConvertedValue = ConvertedValue.trunc(ToWidth);
+ ConvertedValue.setIsSigned(ToSigned);
+ ConvertedValue = ConvertedValue.extend(InitializerValue.getBitWidth());
+ ConvertedValue.setIsSigned(InitializerValue.isSigned());
+ // If the result is different, this was a narrowing conversion.
+ if (ConvertedValue != InitializerValue) {
+ ConstantType = Initializer->getType();
+ return NK_Constant_Narrowing;
+ }
+ } else {
+ // Variables are always narrowings.
+ return NK_Variable_Narrowing;
+ }
+ }
+ return NK_Not_Narrowing;
+ }
+
+ default:
+ // Other kinds of conversions are not narrowings.
+ return NK_Not_Narrowing;
+ }
+}
+
+/// DebugPrint - Print this standard conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void StandardConversionSequence::DebugPrint() const {
+ raw_ostream &OS = llvm::errs();
+ bool PrintedSomething = false;
+ if (First != ICK_Identity) {
+ OS << GetImplicitConversionName(First);
+ PrintedSomething = true;
+ }
+
+ if (Second != ICK_Identity) {
+ if (PrintedSomething) {
+ OS << " -> ";
+ }
+ OS << GetImplicitConversionName(Second);
+
+ if (CopyConstructor) {
+ OS << " (by copy constructor)";
+ } else if (DirectBinding) {
+ OS << " (direct reference binding)";
+ } else if (ReferenceBinding) {
+ OS << " (reference binding)";
+ }
+ PrintedSomething = true;
+ }
+
+ if (Third != ICK_Identity) {
+ if (PrintedSomething) {
+ OS << " -> ";
+ }
+ OS << GetImplicitConversionName(Third);
+ PrintedSomething = true;
+ }
+
+ if (!PrintedSomething) {
+ OS << "No conversions required";
+ }
+}
+
+/// DebugPrint - Print this user-defined conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void UserDefinedConversionSequence::DebugPrint() const {
+ raw_ostream &OS = llvm::errs();
+ if (Before.First || Before.Second || Before.Third) {
+ Before.DebugPrint();
+ OS << " -> ";
+ }
+ if (ConversionFunction)
+ OS << '\'' << *ConversionFunction << '\'';
+ else
+ OS << "aggregate initialization";
+ if (After.First || After.Second || After.Third) {
+ OS << " -> ";
+ After.DebugPrint();
+ }
+}
+
+/// DebugPrint - Print this implicit conversion sequence to standard
+/// error. Useful for debugging overloading issues.
+void ImplicitConversionSequence::DebugPrint() const {
+ raw_ostream &OS = llvm::errs();
+ switch (ConversionKind) {
+ case StandardConversion:
+ OS << "Standard conversion: ";
+ Standard.DebugPrint();
+ break;
+ case UserDefinedConversion:
+ OS << "User-defined conversion: ";
+ UserDefined.DebugPrint();
+ break;
+ case EllipsisConversion:
+ OS << "Ellipsis conversion";
+ break;
+ case AmbiguousConversion:
+ OS << "Ambiguous conversion";
+ break;
+ case BadConversion:
+ OS << "Bad conversion";
+ break;
+ }
+
+ OS << "\n";
+}
+
+void AmbiguousConversionSequence::construct() {
+ new (&conversions()) ConversionSet();
+}
+
+void AmbiguousConversionSequence::destruct() {
+ conversions().~ConversionSet();
+}
+
+void
+AmbiguousConversionSequence::copyFrom(const AmbiguousConversionSequence &O) {
+ FromTypePtr = O.FromTypePtr;
+ ToTypePtr = O.ToTypePtr;
+ new (&conversions()) ConversionSet(O.conversions());
+}
+
+namespace {
+ // Structure used by OverloadCandidate::DeductionFailureInfo to store
+ // template parameter and template argument information.
+ struct DFIParamWithArguments {
+ TemplateParameter Param;
+ TemplateArgument FirstArg;
+ TemplateArgument SecondArg;
+ };
+}
+
+/// \brief Convert from Sema's representation of template deduction information
+/// to the form used in overload-candidate information.
+OverloadCandidate::DeductionFailureInfo
+static MakeDeductionFailureInfo(ASTContext &Context,
+ Sema::TemplateDeductionResult TDK,
+ TemplateDeductionInfo &Info) {
+ OverloadCandidate::DeductionFailureInfo Result;
+ Result.Result = static_cast<unsigned>(TDK);
+ Result.Data = 0;
+ switch (TDK) {
+ case Sema::TDK_Success:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ break;
+
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_InvalidExplicitArguments:
+ Result.Data = Info.Param.getOpaqueValue();
+ break;
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified: {
+ // FIXME: Should allocate from normal heap so that we can free this later.
+ DFIParamWithArguments *Saved = new (Context) DFIParamWithArguments;
+ Saved->Param = Info.Param;
+ Saved->FirstArg = Info.FirstArg;
+ Saved->SecondArg = Info.SecondArg;
+ Result.Data = Saved;
+ break;
+ }
+
+ case Sema::TDK_SubstitutionFailure:
+ Result.Data = Info.take();
+ break;
+
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ break;
+ }
+
+ return Result;
+}
+
+void OverloadCandidate::DeductionFailureInfo::Destroy() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_InvalidExplicitArguments:
+ break;
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ // FIXME: Destroy the data?
+ Data = 0;
+ break;
+
+ case Sema::TDK_SubstitutionFailure:
+ // FIXME: Destroy the template arugment list?
+ Data = 0;
+ break;
+
+ // Unhandled
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ break;
+ }
+}
+
+TemplateParameter
+OverloadCandidate::DeductionFailureInfo::getTemplateParameter() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_SubstitutionFailure:
+ return TemplateParameter();
+
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_InvalidExplicitArguments:
+ return TemplateParameter::getFromOpaqueValue(Data);
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ return static_cast<DFIParamWithArguments*>(Data)->Param;
+
+ // Unhandled
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ break;
+ }
+
+ return TemplateParameter();
+}
+
+TemplateArgumentList *
+OverloadCandidate::DeductionFailureInfo::getTemplateArgumentList() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_InvalidExplicitArguments:
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ return 0;
+
+ case Sema::TDK_SubstitutionFailure:
+ return static_cast<TemplateArgumentList*>(Data);
+
+ // Unhandled
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ break;
+ }
+
+ return 0;
+}
+
+const TemplateArgument *OverloadCandidate::DeductionFailureInfo::getFirstArg() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_InvalidExplicitArguments:
+ case Sema::TDK_SubstitutionFailure:
+ return 0;
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ return &static_cast<DFIParamWithArguments*>(Data)->FirstArg;
+
+ // Unhandled
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ break;
+ }
+
+ return 0;
+}
+
+const TemplateArgument *
+OverloadCandidate::DeductionFailureInfo::getSecondArg() {
+ switch (static_cast<Sema::TemplateDeductionResult>(Result)) {
+ case Sema::TDK_Success:
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_Incomplete:
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ case Sema::TDK_InvalidExplicitArguments:
+ case Sema::TDK_SubstitutionFailure:
+ return 0;
+
+ case Sema::TDK_Inconsistent:
+ case Sema::TDK_Underqualified:
+ return &static_cast<DFIParamWithArguments*>(Data)->SecondArg;
+
+ // Unhandled
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ break;
+ }
+
+ return 0;
+}
+
+void OverloadCandidateSet::clear() {
+ for (iterator i = begin(), e = end(); i != e; ++i)
+ for (unsigned ii = 0, ie = i->NumConversions; ii != ie; ++ii)
+ i->Conversions[ii].~ImplicitConversionSequence();
+ NumInlineSequences = 0;
+ Candidates.clear();
+ Functions.clear();
+}
+
+namespace {
+ class UnbridgedCastsSet {
+ struct Entry {
+ Expr **Addr;
+ Expr *Saved;
+ };
+ SmallVector<Entry, 2> Entries;
+
+ public:
+ void save(Sema &S, Expr *&E) {
+ assert(E->hasPlaceholderType(BuiltinType::ARCUnbridgedCast));
+ Entry entry = { &E, E };
+ Entries.push_back(entry);
+ E = S.stripARCUnbridgedCast(E);
+ }
+
+ void restore() {
+ for (SmallVectorImpl<Entry>::iterator
+ i = Entries.begin(), e = Entries.end(); i != e; ++i)
+ *i->Addr = i->Saved;
+ }
+ };
+}
+
+/// checkPlaceholderForOverload - Do any interesting placeholder-like
+/// preprocessing on the given expression.
+///
+/// \param unbridgedCasts a collection to which to add unbridged casts;
+/// without this, they will be immediately diagnosed as errors
+///
+/// Return true on unrecoverable error.
+static bool checkPlaceholderForOverload(Sema &S, Expr *&E,
+ UnbridgedCastsSet *unbridgedCasts = 0) {
+ if (const BuiltinType *placeholder = E->getType()->getAsPlaceholderType()) {
+ // We can't handle overloaded expressions here because overload
+ // resolution might reasonably tweak them.
+ if (placeholder->getKind() == BuiltinType::Overload) return false;
+
+ // If the context potentially accepts unbridged ARC casts, strip
+ // the unbridged cast and add it to the collection for later restoration.
+ if (placeholder->getKind() == BuiltinType::ARCUnbridgedCast &&
+ unbridgedCasts) {
+ unbridgedCasts->save(S, E);
+ return false;
+ }
+
+ // Go ahead and check everything else.
+ ExprResult result = S.CheckPlaceholderExpr(E);
+ if (result.isInvalid())
+ return true;
+
+ E = result.take();
+ return false;
+ }
+
+ // Nothing to do.
+ return false;
+}
+
+/// checkArgPlaceholdersForOverload - Check a set of call operands for
+/// placeholders.
+static bool checkArgPlaceholdersForOverload(Sema &S, Expr **args,
+ unsigned numArgs,
+ UnbridgedCastsSet &unbridged) {
+ for (unsigned i = 0; i != numArgs; ++i)
+ if (checkPlaceholderForOverload(S, args[i], &unbridged))
+ return true;
+
+ return false;
+}
+
+// IsOverload - Determine whether the given New declaration is an
+// overload of the declarations in Old. This routine returns false if
+// New and Old cannot be overloaded, e.g., if New has the same
+// signature as some function in Old (C++ 1.3.10) or if the Old
+// declarations aren't functions (or function templates) at all. When
+// it does return false, MatchedDecl will point to the decl that New
+// cannot be overloaded with. This decl may be a UsingShadowDecl on
+// top of the underlying declaration.
+//
+// Example: Given the following input:
+//
+// void f(int, float); // #1
+// void f(int, int); // #2
+// int f(int, int); // #3
+//
+// When we process #1, there is no previous declaration of "f",
+// so IsOverload will not be used.
+//
+// When we process #2, Old contains only the FunctionDecl for #1. By
+// comparing the parameter types, we see that #1 and #2 are overloaded
+// (since they have different signatures), so this routine returns
+// false; MatchedDecl is unchanged.
+//
+// When we process #3, Old is an overload set containing #1 and #2. We
+// compare the signatures of #3 to #1 (they're overloaded, so we do
+// nothing) and then #3 to #2. Since the signatures of #3 and #2 are
+// identical (return types of functions are not part of the
+// signature), IsOverload returns false and MatchedDecl will be set to
+// point to the FunctionDecl for #2.
+//
+// 'NewIsUsingShadowDecl' indicates that 'New' is being introduced
+// into a class by a using declaration. The rules for whether to hide
+// shadow declarations ignore some properties which otherwise figure
+// into a function template's signature.
+Sema::OverloadKind
+Sema::CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &Old,
+ NamedDecl *&Match, bool NewIsUsingDecl) {
+ for (LookupResult::iterator I = Old.begin(), E = Old.end();
+ I != E; ++I) {
+ NamedDecl *OldD = *I;
+
+ bool OldIsUsingDecl = false;
+ if (isa<UsingShadowDecl>(OldD)) {
+ OldIsUsingDecl = true;
+
+ // We can always introduce two using declarations into the same
+ // context, even if they have identical signatures.
+ if (NewIsUsingDecl) continue;
+
+ OldD = cast<UsingShadowDecl>(OldD)->getTargetDecl();
+ }
+
+ // If either declaration was introduced by a using declaration,
+ // we'll need to use slightly different rules for matching.
+ // Essentially, these rules are the normal rules, except that
+ // function templates hide function templates with different
+ // return types or template parameter lists.
+ bool UseMemberUsingDeclRules =
+ (OldIsUsingDecl || NewIsUsingDecl) && CurContext->isRecord();
+
+ if (FunctionTemplateDecl *OldT = dyn_cast<FunctionTemplateDecl>(OldD)) {
+ if (!IsOverload(New, OldT->getTemplatedDecl(), UseMemberUsingDeclRules)) {
+ if (UseMemberUsingDeclRules && OldIsUsingDecl) {
+ HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I));
+ continue;
+ }
+
+ Match = *I;
+ return Ovl_Match;
+ }
+ } else if (FunctionDecl *OldF = dyn_cast<FunctionDecl>(OldD)) {
+ if (!IsOverload(New, OldF, UseMemberUsingDeclRules)) {
+ if (UseMemberUsingDeclRules && OldIsUsingDecl) {
+ HideUsingShadowDecl(S, cast<UsingShadowDecl>(*I));
+ continue;
+ }
+
+ Match = *I;
+ return Ovl_Match;
+ }
+ } else if (isa<UsingDecl>(OldD)) {
+ // We can overload with these, which can show up when doing
+ // redeclaration checks for UsingDecls.
+ assert(Old.getLookupKind() == LookupUsingDeclName);
+ } else if (isa<TagDecl>(OldD)) {
+ // We can always overload with tags by hiding them.
+ } else if (isa<UnresolvedUsingValueDecl>(OldD)) {
+ // Optimistically assume that an unresolved using decl will
+ // overload; if it doesn't, we'll have to diagnose during
+ // template instantiation.
+ } else {
+ // (C++ 13p1):
+ // Only function declarations can be overloaded; object and type
+ // declarations cannot be overloaded.
+ Match = *I;
+ return Ovl_NonFunction;
+ }
+ }
+
+ return Ovl_Overload;
+}
+
+bool Sema::IsOverload(FunctionDecl *New, FunctionDecl *Old,
+ bool UseUsingDeclRules) {
+ // If both of the functions are extern "C", then they are not
+ // overloads.
+ if (Old->isExternC() && New->isExternC())
+ return false;
+
+ FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate();
+ FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate();
+
+ // C++ [temp.fct]p2:
+ // A function template can be overloaded with other function templates
+ // and with normal (non-template) functions.
+ if ((OldTemplate == 0) != (NewTemplate == 0))
+ return true;
+
+ // Is the function New an overload of the function Old?
+ QualType OldQType = Context.getCanonicalType(Old->getType());
+ QualType NewQType = Context.getCanonicalType(New->getType());
+
+ // Compare the signatures (C++ 1.3.10) of the two functions to
+ // determine whether they are overloads. If we find any mismatch
+ // in the signature, they are overloads.
+
+ // If either of these functions is a K&R-style function (no
+ // prototype), then we consider them to have matching signatures.
+ if (isa<FunctionNoProtoType>(OldQType.getTypePtr()) ||
+ isa<FunctionNoProtoType>(NewQType.getTypePtr()))
+ return false;
+
+ const FunctionProtoType* OldType = cast<FunctionProtoType>(OldQType);
+ const FunctionProtoType* NewType = cast<FunctionProtoType>(NewQType);
+
+ // The signature of a function includes the types of its
+ // parameters (C++ 1.3.10), which includes the presence or absence
+ // of the ellipsis; see C++ DR 357).
+ if (OldQType != NewQType &&
+ (OldType->getNumArgs() != NewType->getNumArgs() ||
+ OldType->isVariadic() != NewType->isVariadic() ||
+ !FunctionArgTypesAreEqual(OldType, NewType)))
+ return true;
+
+ // C++ [temp.over.link]p4:
+ // The signature of a function template consists of its function
+ // signature, its return type and its template parameter list. The names
+ // of the template parameters are significant only for establishing the
+ // relationship between the template parameters and the rest of the
+ // signature.
+ //
+ // We check the return type and template parameter lists for function
+ // templates first; the remaining checks follow.
+ //
+ // However, we don't consider either of these when deciding whether
+ // a member introduced by a shadow declaration is hidden.
+ if (!UseUsingDeclRules && NewTemplate &&
+ (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
+ OldTemplate->getTemplateParameters(),
+ false, TPL_TemplateMatch) ||
+ OldType->getResultType() != NewType->getResultType()))
+ return true;
+
+ // If the function is a class member, its signature includes the
+ // cv-qualifiers (if any) and ref-qualifier (if any) on the function itself.
+ //
+ // As part of this, also check whether one of the member functions
+ // is static, in which case they are not overloads (C++
+ // 13.1p2). While not part of the definition of the signature,
+ // this check is important to determine whether these functions
+ // can be overloaded.
+ CXXMethodDecl* OldMethod = dyn_cast<CXXMethodDecl>(Old);
+ CXXMethodDecl* NewMethod = dyn_cast<CXXMethodDecl>(New);
+ if (OldMethod && NewMethod &&
+ !OldMethod->isStatic() && !NewMethod->isStatic() &&
+ (OldMethod->getTypeQualifiers() != NewMethod->getTypeQualifiers() ||
+ OldMethod->getRefQualifier() != NewMethod->getRefQualifier())) {
+ if (!UseUsingDeclRules &&
+ OldMethod->getRefQualifier() != NewMethod->getRefQualifier() &&
+ (OldMethod->getRefQualifier() == RQ_None ||
+ NewMethod->getRefQualifier() == RQ_None)) {
+ // C++0x [over.load]p2:
+ // - Member function declarations with the same name and the same
+ // parameter-type-list as well as member function template
+ // declarations with the same name, the same parameter-type-list, and
+ // the same template parameter lists cannot be overloaded if any of
+ // them, but not all, have a ref-qualifier (8.3.5).
+ Diag(NewMethod->getLocation(), diag::err_ref_qualifier_overload)
+ << NewMethod->getRefQualifier() << OldMethod->getRefQualifier();
+ Diag(OldMethod->getLocation(), diag::note_previous_declaration);
+ }
+
+ return true;
+ }
+
+ // The signatures match; this is not an overload.
+ return false;
+}
+
+/// \brief Checks availability of the function depending on the current
+/// function context. Inside an unavailable function, unavailability is ignored.
+///
+/// \returns true if \arg FD is unavailable and current context is inside
+/// an available function, false otherwise.
+bool Sema::isFunctionConsideredUnavailable(FunctionDecl *FD) {
+ return FD->isUnavailable() && !cast<Decl>(CurContext)->isUnavailable();
+}
+
+/// \brief Tries a user-defined conversion from From to ToType.
+///
+/// Produces an implicit conversion sequence for when a standard conversion
+/// is not an option. See TryImplicitConversion for more information.
+static ImplicitConversionSequence
+TryUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
+ ImplicitConversionSequence ICS;
+
+ if (SuppressUserConversions) {
+ // We're not in the case above, so there is no conversion that
+ // we can perform.
+ ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
+ return ICS;
+ }
+
+ // Attempt user-defined conversion.
+ OverloadCandidateSet Conversions(From->getExprLoc());
+ OverloadingResult UserDefResult
+ = IsUserDefinedConversion(S, From, ToType, ICS.UserDefined, Conversions,
+ AllowExplicit);
+
+ if (UserDefResult == OR_Success) {
+ ICS.setUserDefined();
+ // C++ [over.ics.user]p4:
+ // A conversion of an expression of class type to the same class
+ // type is given Exact Match rank, and a conversion of an
+ // expression of class type to a base class of that type is
+ // given Conversion rank, in spite of the fact that a copy
+ // constructor (i.e., a user-defined conversion function) is
+ // called for those cases.
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(ICS.UserDefined.ConversionFunction)) {
+ QualType FromCanon
+ = S.Context.getCanonicalType(From->getType().getUnqualifiedType());
+ QualType ToCanon
+ = S.Context.getCanonicalType(ToType).getUnqualifiedType();
+ if (Constructor->isCopyConstructor() &&
+ (FromCanon == ToCanon || S.IsDerivedFrom(FromCanon, ToCanon))) {
+ // Turn this into a "standard" conversion sequence, so that it
+ // gets ranked with standard conversion sequences.
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.setFromType(From->getType());
+ ICS.Standard.setAllToTypes(ToType);
+ ICS.Standard.CopyConstructor = Constructor;
+ if (ToCanon != FromCanon)
+ ICS.Standard.Second = ICK_Derived_To_Base;
+ }
+ }
+
+ // C++ [over.best.ics]p4:
+ // However, when considering the argument of a user-defined
+ // conversion function that is a candidate by 13.3.1.3 when
+ // invoked for the copying of the temporary in the second step
+ // of a class copy-initialization, or by 13.3.1.4, 13.3.1.5, or
+ // 13.3.1.6 in all cases, only standard conversion sequences and
+ // ellipsis conversion sequences are allowed.
+ if (SuppressUserConversions && ICS.isUserDefined()) {
+ ICS.setBad(BadConversionSequence::suppressed_user, From, ToType);
+ }
+ } else if (UserDefResult == OR_Ambiguous && !SuppressUserConversions) {
+ ICS.setAmbiguous();
+ ICS.Ambiguous.setFromType(From->getType());
+ ICS.Ambiguous.setToType(ToType);
+ for (OverloadCandidateSet::iterator Cand = Conversions.begin();
+ Cand != Conversions.end(); ++Cand)
+ if (Cand->Viable)
+ ICS.Ambiguous.addConversion(Cand->Function);
+ } else {
+ ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
+ }
+
+ return ICS;
+}
+
+/// TryImplicitConversion - Attempt to perform an implicit conversion
+/// from the given expression (Expr) to the given type (ToType). This
+/// function returns an implicit conversion sequence that can be used
+/// to perform the initialization. Given
+///
+/// void f(float f);
+/// void g(int i) { f(i); }
+///
+/// this routine would produce an implicit conversion sequence to
+/// describe the initialization of f from i, which will be a standard
+/// conversion sequence containing an lvalue-to-rvalue conversion (C++
+/// 4.1) followed by a floating-integral conversion (C++ 4.9).
+//
+/// Note that this routine only determines how the conversion can be
+/// performed; it does not actually perform the conversion. As such,
+/// it will not produce any diagnostics if no conversion is available,
+/// but will instead return an implicit conversion sequence of kind
+/// "BadConversion".
+///
+/// If @p SuppressUserConversions, then user-defined conversions are
+/// not permitted.
+/// If @p AllowExplicit, then explicit user-defined conversions are
+/// permitted.
+///
+/// \param AllowObjCWritebackConversion Whether we allow the Objective-C
+/// writeback conversion, which allows __autoreleasing id* parameters to
+/// be initialized with __strong id* or __weak id* arguments.
+static ImplicitConversionSequence
+TryImplicitConversion(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
+ ImplicitConversionSequence ICS;
+ if (IsStandardConversion(S, From, ToType, InOverloadResolution,
+ ICS.Standard, CStyle, AllowObjCWritebackConversion)){
+ ICS.setStandard();
+ return ICS;
+ }
+
+ if (!S.getLangOpts().CPlusPlus) {
+ ICS.setBad(BadConversionSequence::no_conversion, From, ToType);
+ return ICS;
+ }
+
+ // C++ [over.ics.user]p4:
+ // A conversion of an expression of class type to the same class
+ // type is given Exact Match rank, and a conversion of an
+ // expression of class type to a base class of that type is
+ // given Conversion rank, in spite of the fact that a copy/move
+ // constructor (i.e., a user-defined conversion function) is
+ // called for those cases.
+ QualType FromType = From->getType();
+ if (ToType->getAs<RecordType>() && FromType->getAs<RecordType>() &&
+ (S.Context.hasSameUnqualifiedType(FromType, ToType) ||
+ S.IsDerivedFrom(FromType, ToType))) {
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.setFromType(FromType);
+ ICS.Standard.setAllToTypes(ToType);
+
+ // We don't actually check at this point whether there is a valid
+ // copy/move constructor, since overloading just assumes that it
+ // exists. When we actually perform initialization, we'll find the
+ // appropriate constructor to copy the returned object, if needed.
+ ICS.Standard.CopyConstructor = 0;
+
+ // Determine whether this is considered a derived-to-base conversion.
+ if (!S.Context.hasSameUnqualifiedType(FromType, ToType))
+ ICS.Standard.Second = ICK_Derived_To_Base;
+
+ return ICS;
+ }
+
+ return TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
+ AllowExplicit, InOverloadResolution, CStyle,
+ AllowObjCWritebackConversion);
+}
+
+ImplicitConversionSequence
+Sema::TryImplicitConversion(Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool AllowExplicit,
+ bool InOverloadResolution,
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
+ return clang::TryImplicitConversion(*this, From, ToType,
+ SuppressUserConversions, AllowExplicit,
+ InOverloadResolution, CStyle,
+ AllowObjCWritebackConversion);
+}
+
+/// PerformImplicitConversion - Perform an implicit conversion of the
+/// expression From to the type ToType. Returns the
+/// converted expression. Flavor is the kind of conversion we're
+/// performing, used in the error message. If @p AllowExplicit,
+/// explicit user-defined conversions are permitted.
+ExprResult
+Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ AssignmentAction Action, bool AllowExplicit) {
+ ImplicitConversionSequence ICS;
+ return PerformImplicitConversion(From, ToType, Action, AllowExplicit, ICS);
+}
+
+ExprResult
+Sema::PerformImplicitConversion(Expr *From, QualType ToType,
+ AssignmentAction Action, bool AllowExplicit,
+ ImplicitConversionSequence& ICS) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
+ // Objective-C ARC: Determine whether we will allow the writeback conversion.
+ bool AllowObjCWritebackConversion
+ = getLangOpts().ObjCAutoRefCount &&
+ (Action == AA_Passing || Action == AA_Sending);
+
+ ICS = clang::TryImplicitConversion(*this, From, ToType,
+ /*SuppressUserConversions=*/false,
+ AllowExplicit,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ AllowObjCWritebackConversion);
+ return PerformImplicitConversion(From, ToType, ICS, Action);
+}
+
+/// \brief Determine whether the conversion from FromType to ToType is a valid
+/// conversion that strips "noreturn" off the nested function type.
+bool Sema::IsNoReturnConversion(QualType FromType, QualType ToType,
+ QualType &ResultTy) {
+ if (Context.hasSameUnqualifiedType(FromType, ToType))
+ return false;
+
+ // Permit the conversion F(t __attribute__((noreturn))) -> F(t)
+ // where F adds one of the following at most once:
+ // - a pointer
+ // - a member pointer
+ // - a block pointer
+ CanQualType CanTo = Context.getCanonicalType(ToType);
+ CanQualType CanFrom = Context.getCanonicalType(FromType);
+ Type::TypeClass TyClass = CanTo->getTypeClass();
+ if (TyClass != CanFrom->getTypeClass()) return false;
+ if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto) {
+ if (TyClass == Type::Pointer) {
+ CanTo = CanTo.getAs<PointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<PointerType>()->getPointeeType();
+ } else if (TyClass == Type::BlockPointer) {
+ CanTo = CanTo.getAs<BlockPointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<BlockPointerType>()->getPointeeType();
+ } else if (TyClass == Type::MemberPointer) {
+ CanTo = CanTo.getAs<MemberPointerType>()->getPointeeType();
+ CanFrom = CanFrom.getAs<MemberPointerType>()->getPointeeType();
+ } else {
+ return false;
+ }
+
+ TyClass = CanTo->getTypeClass();
+ if (TyClass != CanFrom->getTypeClass()) return false;
+ if (TyClass != Type::FunctionProto && TyClass != Type::FunctionNoProto)
+ return false;
+ }
+
+ const FunctionType *FromFn = cast<FunctionType>(CanFrom);
+ FunctionType::ExtInfo EInfo = FromFn->getExtInfo();
+ if (!EInfo.getNoReturn()) return false;
+
+ FromFn = Context.adjustFunctionType(FromFn, EInfo.withNoReturn(false));
+ assert(QualType(FromFn, 0).isCanonical());
+ if (QualType(FromFn, 0) != CanTo) return false;
+
+ ResultTy = ToType;
+ return true;
+}
+
+/// \brief Determine whether the conversion from FromType to ToType is a valid
+/// vector conversion.
+///
+/// \param ICK Will be set to the vector conversion kind, if this is a vector
+/// conversion.
+static bool IsVectorConversion(ASTContext &Context, QualType FromType,
+ QualType ToType, ImplicitConversionKind &ICK) {
+ // We need at least one of these types to be a vector type to have a vector
+ // conversion.
+ if (!ToType->isVectorType() && !FromType->isVectorType())
+ return false;
+
+ // Identical types require no conversions.
+ if (Context.hasSameUnqualifiedType(FromType, ToType))
+ return false;
+
+ // There are no conversions between extended vector types, only identity.
+ if (ToType->isExtVectorType()) {
+ // There are no conversions between extended vector types other than the
+ // identity conversion.
+ if (FromType->isExtVectorType())
+ return false;
+
+ // Vector splat from any arithmetic type to a vector.
+ if (FromType->isArithmeticType()) {
+ ICK = ICK_Vector_Splat;
+ return true;
+ }
+ }
+
+ // We can perform the conversion between vector types in the following cases:
+ // 1)vector types are equivalent AltiVec and GCC vector types
+ // 2)lax vector conversions are permitted and the vector types are of the
+ // same size
+ if (ToType->isVectorType() && FromType->isVectorType()) {
+ if (Context.areCompatibleVectorTypes(FromType, ToType) ||
+ (Context.getLangOpts().LaxVectorConversions &&
+ (Context.getTypeSize(FromType) == Context.getTypeSize(ToType)))) {
+ ICK = ICK_Vector_Conversion;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle);
+
+/// IsStandardConversion - Determines whether there is a standard
+/// conversion sequence (C++ [conv], C++ [over.ics.scs]) from the
+/// expression From to the type ToType. Standard conversion sequences
+/// only consider non-class types; for conversions that involve class
+/// types, use TryImplicitConversion. If a conversion exists, SCS will
+/// contain the standard conversion sequence required to perform this
+/// conversion and this routine will return true. Otherwise, this
+/// routine will return false and the value of SCS is unspecified.
+static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle,
+ bool AllowObjCWritebackConversion) {
+ QualType FromType = From->getType();
+
+ // Standard conversions (C++ [conv])
+ SCS.setAsIdentityConversion();
+ SCS.DeprecatedStringLiteralToCharPtr = false;
+ SCS.IncompatibleObjC = false;
+ SCS.setFromType(FromType);
+ SCS.CopyConstructor = 0;
+
+ // There are no standard conversions for class types in C++, so
+ // abort early. When overloading in C, however, we do permit
+ if (FromType->isRecordType() || ToType->isRecordType()) {
+ if (S.getLangOpts().CPlusPlus)
+ return false;
+
+ // When we're overloading in C, we allow, as standard conversions,
+ }
+
+ // The first conversion can be an lvalue-to-rvalue conversion,
+ // array-to-pointer conversion, or function-to-pointer conversion
+ // (C++ 4p1).
+
+ if (FromType == S.Context.OverloadTy) {
+ DeclAccessPair AccessPair;
+ if (FunctionDecl *Fn
+ = S.ResolveAddressOfOverloadedFunction(From, ToType, false,
+ AccessPair)) {
+ // We were able to resolve the address of the overloaded function,
+ // so we can convert to the type of that function.
+ FromType = Fn->getType();
+
+ // we can sometimes resolve &foo<int> regardless of ToType, so check
+ // if the type matches (identity) or we are converting to bool
+ if (!S.Context.hasSameUnqualifiedType(
+ S.ExtractUnqualifiedFunctionType(ToType), FromType)) {
+ QualType resultTy;
+ // if the function type matches except for [[noreturn]], it's ok
+ if (!S.IsNoReturnConversion(FromType,
+ S.ExtractUnqualifiedFunctionType(ToType), resultTy))
+ // otherwise, only a boolean conversion is standard
+ if (!ToType->isBooleanType())
+ return false;
+ }
+
+ // Check if the "from" expression is taking the address of an overloaded
+ // function and recompute the FromType accordingly. Take advantage of the
+ // fact that non-static member functions *must* have such an address-of
+ // expression.
+ CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn);
+ if (Method && !Method->isStatic()) {
+ assert(isa<UnaryOperator>(From->IgnoreParens()) &&
+ "Non-unary operator on non-static member address");
+ assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode()
+ == UO_AddrOf &&
+ "Non-address-of operator on non-static member address");
+ const Type *ClassType
+ = S.Context.getTypeDeclType(Method->getParent()).getTypePtr();
+ FromType = S.Context.getMemberPointerType(FromType, ClassType);
+ } else if (isa<UnaryOperator>(From->IgnoreParens())) {
+ assert(cast<UnaryOperator>(From->IgnoreParens())->getOpcode() ==
+ UO_AddrOf &&
+ "Non-address-of operator for overloaded function expression");
+ FromType = S.Context.getPointerType(FromType);
+ }
+
+ // Check that we've computed the proper type after overload resolution.
+ assert(S.Context.hasSameType(
+ FromType,
+ S.FixOverloadedFunctionReference(From, AccessPair, Fn)->getType()));
+ } else {
+ return false;
+ }
+ }
+ // Lvalue-to-rvalue conversion (C++11 4.1):
+ // A glvalue (3.10) of a non-function, non-array type T can
+ // be converted to a prvalue.
+ bool argIsLValue = From->isGLValue();
+ if (argIsLValue &&
+ !FromType->isFunctionType() && !FromType->isArrayType() &&
+ S.Context.getCanonicalType(FromType) != S.Context.OverloadTy) {
+ SCS.First = ICK_Lvalue_To_Rvalue;
+
+ // C11 6.3.2.1p2:
+ // ... if the lvalue has atomic type, the value has the non-atomic version
+ // of the type of the lvalue ...
+ if (const AtomicType *Atomic = FromType->getAs<AtomicType>())
+ FromType = Atomic->getValueType();
+
+ // If T is a non-class type, the type of the rvalue is the
+ // cv-unqualified version of T. Otherwise, the type of the rvalue
+ // is T (C++ 4.1p1). C++ can't get here with class types; in C, we
+ // just strip the qualifiers because they don't matter.
+ FromType = FromType.getUnqualifiedType();
+ } else if (FromType->isArrayType()) {
+ // Array-to-pointer conversion (C++ 4.2)
+ SCS.First = ICK_Array_To_Pointer;
+
+ // An lvalue or rvalue of type "array of N T" or "array of unknown
+ // bound of T" can be converted to an rvalue of type "pointer to
+ // T" (C++ 4.2p1).
+ FromType = S.Context.getArrayDecayedType(FromType);
+
+ if (S.IsStringLiteralToNonConstPointerConversion(From, ToType)) {
+ // This conversion is deprecated. (C++ D.4).
+ SCS.DeprecatedStringLiteralToCharPtr = true;
+
+ // For the purpose of ranking in overload resolution
+ // (13.3.3.1.1), this conversion is considered an
+ // array-to-pointer conversion followed by a qualification
+ // conversion (4.4). (C++ 4.2p2)
+ SCS.Second = ICK_Identity;
+ SCS.Third = ICK_Qualification;
+ SCS.QualificationIncludesObjCLifetime = false;
+ SCS.setAllToTypes(FromType);
+ return true;
+ }
+ } else if (FromType->isFunctionType() && argIsLValue) {
+ // Function-to-pointer conversion (C++ 4.3).
+ SCS.First = ICK_Function_To_Pointer;
+
+ // An lvalue of function type T can be converted to an rvalue of
+ // type "pointer to T." The result is a pointer to the
+ // function. (C++ 4.3p1).
+ FromType = S.Context.getPointerType(FromType);
+ } else {
+ // We don't require any conversions for the first step.
+ SCS.First = ICK_Identity;
+ }
+ SCS.setToType(0, FromType);
+
+ // The second conversion can be an integral promotion, floating
+ // point promotion, integral conversion, floating point conversion,
+ // floating-integral conversion, pointer conversion,
+ // pointer-to-member conversion, or boolean conversion (C++ 4p1).
+ // For overloading in C, this can also be a "compatible-type"
+ // conversion.
+ bool IncompatibleObjC = false;
+ ImplicitConversionKind SecondICK = ICK_Identity;
+ if (S.Context.hasSameUnqualifiedType(FromType, ToType)) {
+ // The unqualified versions of the types are the same: there's no
+ // conversion to do.
+ SCS.Second = ICK_Identity;
+ } else if (S.IsIntegralPromotion(From, FromType, ToType)) {
+ // Integral promotion (C++ 4.5).
+ SCS.Second = ICK_Integral_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (S.IsFloatingPointPromotion(FromType, ToType)) {
+ // Floating point promotion (C++ 4.6).
+ SCS.Second = ICK_Floating_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (S.IsComplexPromotion(FromType, ToType)) {
+ // Complex promotion (Clang extension)
+ SCS.Second = ICK_Complex_Promotion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (ToType->isBooleanType() &&
+ (FromType->isArithmeticType() ||
+ FromType->isAnyPointerType() ||
+ FromType->isBlockPointerType() ||
+ FromType->isMemberPointerType() ||
+ FromType->isNullPtrType())) {
+ // Boolean conversions (C++ 4.12).
+ SCS.Second = ICK_Boolean_Conversion;
+ FromType = S.Context.BoolTy;
+ } else if (FromType->isIntegralOrUnscopedEnumerationType() &&
+ ToType->isIntegralType(S.Context)) {
+ // Integral conversions (C++ 4.7).
+ SCS.Second = ICK_Integral_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (FromType->isAnyComplexType() && ToType->isComplexType()) {
+ // Complex conversions (C99 6.3.1.6)
+ SCS.Second = ICK_Complex_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ } else if ((FromType->isAnyComplexType() && ToType->isArithmeticType()) ||
+ (ToType->isAnyComplexType() && FromType->isArithmeticType())) {
+ // Complex-real conversions (C99 6.3.1.7)
+ SCS.Second = ICK_Complex_Real;
+ FromType = ToType.getUnqualifiedType();
+ } else if (FromType->isRealFloatingType() && ToType->isRealFloatingType()) {
+ // Floating point conversions (C++ 4.8).
+ SCS.Second = ICK_Floating_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ } else if ((FromType->isRealFloatingType() &&
+ ToType->isIntegralType(S.Context)) ||
+ (FromType->isIntegralOrUnscopedEnumerationType() &&
+ ToType->isRealFloatingType())) {
+ // Floating-integral conversions (C++ 4.9).
+ SCS.Second = ICK_Floating_Integral;
+ FromType = ToType.getUnqualifiedType();
+ } else if (S.IsBlockPointerConversion(FromType, ToType, FromType)) {
+ SCS.Second = ICK_Block_Pointer_Conversion;
+ } else if (AllowObjCWritebackConversion &&
+ S.isObjCWritebackConversion(FromType, ToType, FromType)) {
+ SCS.Second = ICK_Writeback_Conversion;
+ } else if (S.IsPointerConversion(From, FromType, ToType, InOverloadResolution,
+ FromType, IncompatibleObjC)) {
+ // Pointer conversions (C++ 4.10).
+ SCS.Second = ICK_Pointer_Conversion;
+ SCS.IncompatibleObjC = IncompatibleObjC;
+ FromType = FromType.getUnqualifiedType();
+ } else if (S.IsMemberPointerConversion(From, FromType, ToType,
+ InOverloadResolution, FromType)) {
+ // Pointer to member conversions (4.11).
+ SCS.Second = ICK_Pointer_Member;
+ } else if (IsVectorConversion(S.Context, FromType, ToType, SecondICK)) {
+ SCS.Second = SecondICK;
+ FromType = ToType.getUnqualifiedType();
+ } else if (!S.getLangOpts().CPlusPlus &&
+ S.Context.typesAreCompatible(ToType, FromType)) {
+ // Compatible conversions (Clang extension for C function overloading)
+ SCS.Second = ICK_Compatible_Conversion;
+ FromType = ToType.getUnqualifiedType();
+ } else if (S.IsNoReturnConversion(FromType, ToType, FromType)) {
+ // Treat a conversion that strips "noreturn" as an identity conversion.
+ SCS.Second = ICK_NoReturn_Adjustment;
+ } else if (IsTransparentUnionStandardConversion(S, From, ToType,
+ InOverloadResolution,
+ SCS, CStyle)) {
+ SCS.Second = ICK_TransparentUnionConversion;
+ FromType = ToType;
+ } else if (tryAtomicConversion(S, From, ToType, InOverloadResolution, SCS,
+ CStyle)) {
+ // tryAtomicConversion has updated the standard conversion sequence
+ // appropriately.
+ return true;
+ } else {
+ // No second conversion required.
+ SCS.Second = ICK_Identity;
+ }
+ SCS.setToType(1, FromType);
+
+ QualType CanonFrom;
+ QualType CanonTo;
+ // The third conversion can be a qualification conversion (C++ 4p1).
+ bool ObjCLifetimeConversion;
+ if (S.IsQualificationConversion(FromType, ToType, CStyle,
+ ObjCLifetimeConversion)) {
+ SCS.Third = ICK_Qualification;
+ SCS.QualificationIncludesObjCLifetime = ObjCLifetimeConversion;
+ FromType = ToType;
+ CanonFrom = S.Context.getCanonicalType(FromType);
+ CanonTo = S.Context.getCanonicalType(ToType);
+ } else {
+ // No conversion required
+ SCS.Third = ICK_Identity;
+
+ // C++ [over.best.ics]p6:
+ // [...] Any difference in top-level cv-qualification is
+ // subsumed by the initialization itself and does not constitute
+ // a conversion. [...]
+ CanonFrom = S.Context.getCanonicalType(FromType);
+ CanonTo = S.Context.getCanonicalType(ToType);
+ if (CanonFrom.getLocalUnqualifiedType()
+ == CanonTo.getLocalUnqualifiedType() &&
+ (CanonFrom.getLocalCVRQualifiers() != CanonTo.getLocalCVRQualifiers()
+ || CanonFrom.getObjCGCAttr() != CanonTo.getObjCGCAttr()
+ || CanonFrom.getObjCLifetime() != CanonTo.getObjCLifetime())) {
+ FromType = ToType;
+ CanonFrom = CanonTo;
+ }
+ }
+ SCS.setToType(2, FromType);
+
+ // If we have not converted the argument type to the parameter type,
+ // this is a bad conversion sequence.
+ if (CanonFrom != CanonTo)
+ return false;
+
+ return true;
+}
+
+static bool
+IsTransparentUnionStandardConversion(Sema &S, Expr* From,
+ QualType &ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle) {
+
+ const RecordType *UT = ToType->getAsUnionType();
+ if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ return false;
+ // The field to initialize within the transparent union.
+ RecordDecl *UD = UT->getDecl();
+ // It's compatible if the expression matches any of the fields.
+ for (RecordDecl::field_iterator it = UD->field_begin(),
+ itend = UD->field_end();
+ it != itend; ++it) {
+ if (IsStandardConversion(S, From, it->getType(), InOverloadResolution, SCS,
+ CStyle, /*ObjCWritebackConversion=*/false)) {
+ ToType = it->getType();
+ return true;
+ }
+ }
+ return false;
+}
+
+/// IsIntegralPromotion - Determines whether the conversion from the
+/// expression From (whose potentially-adjusted type is FromType) to
+/// ToType is an integral promotion (C++ 4.5). If so, returns true and
+/// sets PromotedType to the promoted type.
+bool Sema::IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType) {
+ const BuiltinType *To = ToType->getAs<BuiltinType>();
+ // All integers are built-in.
+ if (!To) {
+ return false;
+ }
+
+ // An rvalue of type char, signed char, unsigned char, short int, or
+ // unsigned short int can be converted to an rvalue of type int if
+ // int can represent all the values of the source type; otherwise,
+ // the source rvalue can be converted to an rvalue of type unsigned
+ // int (C++ 4.5p1).
+ if (FromType->isPromotableIntegerType() && !FromType->isBooleanType() &&
+ !FromType->isEnumeralType()) {
+ if (// We can promote any signed, promotable integer type to an int
+ (FromType->isSignedIntegerType() ||
+ // We can promote any unsigned integer type whose size is
+ // less than int to an int.
+ (!FromType->isSignedIntegerType() &&
+ Context.getTypeSize(FromType) < Context.getTypeSize(ToType)))) {
+ return To->getKind() == BuiltinType::Int;
+ }
+
+ return To->getKind() == BuiltinType::UInt;
+ }
+
+ // C++0x [conv.prom]p3:
+ // A prvalue of an unscoped enumeration type whose underlying type is not
+ // fixed (7.2) can be converted to an rvalue a prvalue of the first of the
+ // following types that can represent all the values of the enumeration
+ // (i.e., the values in the range bmin to bmax as described in 7.2): int,
+ // unsigned int, long int, unsigned long int, long long int, or unsigned
+ // long long int. If none of the types in that list can represent all the
+ // values of the enumeration, an rvalue a prvalue of an unscoped enumeration
+ // type can be converted to an rvalue a prvalue of the extended integer type
+ // with lowest integer conversion rank (4.13) greater than the rank of long
+ // long in which all the values of the enumeration can be represented. If
+ // there are two such extended types, the signed one is chosen.
+ if (const EnumType *FromEnumType = FromType->getAs<EnumType>()) {
+ // C++0x 7.2p9: Note that this implicit enum to int conversion is not
+ // provided for a scoped enumeration.
+ if (FromEnumType->getDecl()->isScoped())
+ return false;
+
+ // We have already pre-calculated the promotion type, so this is trivial.
+ if (ToType->isIntegerType() &&
+ !RequireCompleteType(From->getLocStart(), FromType, PDiag()))
+ return Context.hasSameUnqualifiedType(ToType,
+ FromEnumType->getDecl()->getPromotionType());
+ }
+
+ // C++0x [conv.prom]p2:
+ // A prvalue of type char16_t, char32_t, or wchar_t (3.9.1) can be converted
+ // to an rvalue a prvalue of the first of the following types that can
+ // represent all the values of its underlying type: int, unsigned int,
+ // long int, unsigned long int, long long int, or unsigned long long int.
+ // If none of the types in that list can represent all the values of its
+ // underlying type, an rvalue a prvalue of type char16_t, char32_t,
+ // or wchar_t can be converted to an rvalue a prvalue of its underlying
+ // type.
+ if (FromType->isAnyCharacterType() && !FromType->isCharType() &&
+ ToType->isIntegerType()) {
+ // Determine whether the type we're converting from is signed or
+ // unsigned.
+ bool FromIsSigned = FromType->isSignedIntegerType();
+ uint64_t FromSize = Context.getTypeSize(FromType);
+
+ // The types we'll try to promote to, in the appropriate
+ // order. Try each of these types.
+ QualType PromoteTypes[6] = {
+ Context.IntTy, Context.UnsignedIntTy,
+ Context.LongTy, Context.UnsignedLongTy ,
+ Context.LongLongTy, Context.UnsignedLongLongTy
+ };
+ for (int Idx = 0; Idx < 6; ++Idx) {
+ uint64_t ToSize = Context.getTypeSize(PromoteTypes[Idx]);
+ if (FromSize < ToSize ||
+ (FromSize == ToSize &&
+ FromIsSigned == PromoteTypes[Idx]->isSignedIntegerType())) {
+ // We found the type that we can promote to. If this is the
+ // type we wanted, we have a promotion. Otherwise, no
+ // promotion.
+ return Context.hasSameUnqualifiedType(ToType, PromoteTypes[Idx]);
+ }
+ }
+ }
+
+ // An rvalue for an integral bit-field (9.6) can be converted to an
+ // rvalue of type int if int can represent all the values of the
+ // bit-field; otherwise, it can be converted to unsigned int if
+ // unsigned int can represent all the values of the bit-field. If
+ // the bit-field is larger yet, no integral promotion applies to
+ // it. If the bit-field has an enumerated type, it is treated as any
+ // other value of that type for promotion purposes (C++ 4.5p3).
+ // FIXME: We should delay checking of bit-fields until we actually perform the
+ // conversion.
+ using llvm::APSInt;
+ if (From)
+ if (FieldDecl *MemberDecl = From->getBitField()) {
+ APSInt BitWidth;
+ if (FromType->isIntegralType(Context) &&
+ MemberDecl->getBitWidth()->isIntegerConstantExpr(BitWidth, Context)) {
+ APSInt ToSize(BitWidth.getBitWidth(), BitWidth.isUnsigned());
+ ToSize = Context.getTypeSize(ToType);
+
+ // Are we promoting to an int from a bitfield that fits in an int?
+ if (BitWidth < ToSize ||
+ (FromType->isSignedIntegerType() && BitWidth <= ToSize)) {
+ return To->getKind() == BuiltinType::Int;
+ }
+
+ // Are we promoting to an unsigned int from an unsigned bitfield
+ // that fits into an unsigned int?
+ if (FromType->isUnsignedIntegerType() && BitWidth <= ToSize) {
+ return To->getKind() == BuiltinType::UInt;
+ }
+
+ return false;
+ }
+ }
+
+ // An rvalue of type bool can be converted to an rvalue of type int,
+ // with false becoming zero and true becoming one (C++ 4.5p4).
+ if (FromType->isBooleanType() && To->getKind() == BuiltinType::Int) {
+ return true;
+ }
+
+ return false;
+}
+
+/// IsFloatingPointPromotion - Determines whether the conversion from
+/// FromType to ToType is a floating point promotion (C++ 4.6). If so,
+/// returns true and sets PromotedType to the promoted type.
+bool Sema::IsFloatingPointPromotion(QualType FromType, QualType ToType) {
+ if (const BuiltinType *FromBuiltin = FromType->getAs<BuiltinType>())
+ if (const BuiltinType *ToBuiltin = ToType->getAs<BuiltinType>()) {
+ /// An rvalue of type float can be converted to an rvalue of type
+ /// double. (C++ 4.6p1).
+ if (FromBuiltin->getKind() == BuiltinType::Float &&
+ ToBuiltin->getKind() == BuiltinType::Double)
+ return true;
+
+ // C99 6.3.1.5p1:
+ // When a float is promoted to double or long double, or a
+ // double is promoted to long double [...].
+ if (!getLangOpts().CPlusPlus &&
+ (FromBuiltin->getKind() == BuiltinType::Float ||
+ FromBuiltin->getKind() == BuiltinType::Double) &&
+ (ToBuiltin->getKind() == BuiltinType::LongDouble))
+ return true;
+
+ // Half can be promoted to float.
+ if (FromBuiltin->getKind() == BuiltinType::Half &&
+ ToBuiltin->getKind() == BuiltinType::Float)
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Determine if a conversion is a complex promotion.
+///
+/// A complex promotion is defined as a complex -> complex conversion
+/// where the conversion between the underlying real types is a
+/// floating-point or integral promotion.
+bool Sema::IsComplexPromotion(QualType FromType, QualType ToType) {
+ const ComplexType *FromComplex = FromType->getAs<ComplexType>();
+ if (!FromComplex)
+ return false;
+
+ const ComplexType *ToComplex = ToType->getAs<ComplexType>();
+ if (!ToComplex)
+ return false;
+
+ return IsFloatingPointPromotion(FromComplex->getElementType(),
+ ToComplex->getElementType()) ||
+ IsIntegralPromotion(0, FromComplex->getElementType(),
+ ToComplex->getElementType());
+}
+
+/// BuildSimilarlyQualifiedPointerType - In a pointer conversion from
+/// the pointer type FromPtr to a pointer to type ToPointee, with the
+/// same type qualifiers as FromPtr has on its pointee type. ToType,
+/// if non-empty, will be a pointer to ToType that may or may not have
+/// the right set of qualifiers on its pointee.
+///
+static QualType
+BuildSimilarlyQualifiedPointerType(const Type *FromPtr,
+ QualType ToPointee, QualType ToType,
+ ASTContext &Context,
+ bool StripObjCLifetime = false) {
+ assert((FromPtr->getTypeClass() == Type::Pointer ||
+ FromPtr->getTypeClass() == Type::ObjCObjectPointer) &&
+ "Invalid similarly-qualified pointer type");
+
+ /// Conversions to 'id' subsume cv-qualifier conversions.
+ if (ToType->isObjCIdType() || ToType->isObjCQualifiedIdType())
+ return ToType.getUnqualifiedType();
+
+ QualType CanonFromPointee
+ = Context.getCanonicalType(FromPtr->getPointeeType());
+ QualType CanonToPointee = Context.getCanonicalType(ToPointee);
+ Qualifiers Quals = CanonFromPointee.getQualifiers();
+
+ if (StripObjCLifetime)
+ Quals.removeObjCLifetime();
+
+ // Exact qualifier match -> return the pointer type we're converting to.
+ if (CanonToPointee.getLocalQualifiers() == Quals) {
+ // ToType is exactly what we need. Return it.
+ if (!ToType.isNull())
+ return ToType.getUnqualifiedType();
+
+ // Build a pointer to ToPointee. It has the right qualifiers
+ // already.
+ if (isa<ObjCObjectPointerType>(ToType))
+ return Context.getObjCObjectPointerType(ToPointee);
+ return Context.getPointerType(ToPointee);
+ }
+
+ // Just build a canonical type that has the right qualifiers.
+ QualType QualifiedCanonToPointee
+ = Context.getQualifiedType(CanonToPointee.getLocalUnqualifiedType(), Quals);
+
+ if (isa<ObjCObjectPointerType>(ToType))
+ return Context.getObjCObjectPointerType(QualifiedCanonToPointee);
+ return Context.getPointerType(QualifiedCanonToPointee);
+}
+
+static bool isNullPointerConstantForConversion(Expr *Expr,
+ bool InOverloadResolution,
+ ASTContext &Context) {
+ // Handle value-dependent integral null pointer constants correctly.
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#903
+ if (Expr->isValueDependent() && !Expr->isTypeDependent() &&
+ Expr->getType()->isIntegerType() && !Expr->getType()->isEnumeralType())
+ return !InOverloadResolution;
+
+ return Expr->isNullPointerConstant(Context,
+ InOverloadResolution? Expr::NPC_ValueDependentIsNotNull
+ : Expr::NPC_ValueDependentIsNull);
+}
+
+/// IsPointerConversion - Determines whether the conversion of the
+/// expression From, which has the (possibly adjusted) type FromType,
+/// can be converted to the type ToType via a pointer conversion (C++
+/// 4.10). If so, returns true and places the converted type (that
+/// might differ from ToType in its cv-qualifiers at some level) into
+/// ConvertedType.
+///
+/// This routine also supports conversions to and from block pointers
+/// and conversions with Objective-C's 'id', 'id<protocols...>', and
+/// pointers to interfaces. FIXME: Once we've determined the
+/// appropriate overloading rules for Objective-C, we may want to
+/// split the Objective-C checks into a different routine; however,
+/// GCC seems to consider all of these conversions to be pointer
+/// conversions, so for now they live here. IncompatibleObjC will be
+/// set if the conversion is an allowed Objective-C conversion that
+/// should result in a warning.
+bool Sema::IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
+ bool InOverloadResolution,
+ QualType& ConvertedType,
+ bool &IncompatibleObjC) {
+ IncompatibleObjC = false;
+ if (isObjCPointerConversion(FromType, ToType, ConvertedType,
+ IncompatibleObjC))
+ return true;
+
+ // Conversion from a null pointer constant to any Objective-C pointer type.
+ if (ToType->isObjCObjectPointerType() &&
+ isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Blocks: Block pointers can be converted to void*.
+ if (FromType->isBlockPointerType() && ToType->isPointerType() &&
+ ToType->getAs<PointerType>()->getPointeeType()->isVoidType()) {
+ ConvertedType = ToType;
+ return true;
+ }
+ // Blocks: A null pointer constant can be converted to a block
+ // pointer type.
+ if (ToType->isBlockPointerType() &&
+ isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // If the left-hand-side is nullptr_t, the right side can be a null
+ // pointer constant.
+ if (ToType->isNullPtrType() &&
+ isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ const PointerType* ToTypePtr = ToType->getAs<PointerType>();
+ if (!ToTypePtr)
+ return false;
+
+ // A null pointer constant can be converted to a pointer type (C++ 4.10p1).
+ if (isNullPointerConstantForConversion(From, InOverloadResolution, Context)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Beyond this point, both types need to be pointers
+ // , including objective-c pointers.
+ QualType ToPointeeType = ToTypePtr->getPointeeType();
+ if (FromType->isObjCObjectPointerType() && ToPointeeType->isVoidType() &&
+ !getLangOpts().ObjCAutoRefCount) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(
+ FromType->getAs<ObjCObjectPointerType>(),
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+ const PointerType *FromTypePtr = FromType->getAs<PointerType>();
+ if (!FromTypePtr)
+ return false;
+
+ QualType FromPointeeType = FromTypePtr->getPointeeType();
+
+ // If the unqualified pointee types are the same, this can't be a
+ // pointer conversion, so don't do all of the work below.
+ if (Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType))
+ return false;
+
+ // An rvalue of type "pointer to cv T," where T is an object type,
+ // can be converted to an rvalue of type "pointer to cv void" (C++
+ // 4.10p2).
+ if (FromPointeeType->isIncompleteOrObjectType() &&
+ ToPointeeType->isVoidType()) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context,
+ /*StripObjCLifetime=*/true);
+ return true;
+ }
+
+ // MSVC allows implicit function to void* type conversion.
+ if (getLangOpts().MicrosoftExt && FromPointeeType->isFunctionType() &&
+ ToPointeeType->isVoidType()) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ // When we're overloading in C, we allow a special kind of pointer
+ // conversion for compatible-but-not-identical pointee types.
+ if (!getLangOpts().CPlusPlus &&
+ Context.typesAreCompatible(FromPointeeType, ToPointeeType)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ // C++ [conv.ptr]p3:
+ //
+ // An rvalue of type "pointer to cv D," where D is a class type,
+ // can be converted to an rvalue of type "pointer to cv B," where
+ // B is a base class (clause 10) of D. If B is an inaccessible
+ // (clause 11) or ambiguous (10.2) base class of D, a program that
+ // necessitates this conversion is ill-formed. The result of the
+ // conversion is a pointer to the base class sub-object of the
+ // derived class object. The null pointer value is converted to
+ // the null pointer value of the destination type.
+ //
+ // Note that we do not check for ambiguity or inaccessibility
+ // here. That is handled by CheckPointerConversion.
+ if (getLangOpts().CPlusPlus &&
+ FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
+ !Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType) &&
+ !RequireCompleteType(From->getLocStart(), FromPointeeType, PDiag()) &&
+ IsDerivedFrom(FromPointeeType, ToPointeeType)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ if (FromPointeeType->isVectorType() && ToPointeeType->isVectorType() &&
+ Context.areCompatibleVectorTypes(FromPointeeType, ToPointeeType)) {
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromTypePtr,
+ ToPointeeType,
+ ToType, Context);
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Adopt the given qualifiers for the given type.
+static QualType AdoptQualifiers(ASTContext &Context, QualType T, Qualifiers Qs){
+ Qualifiers TQs = T.getQualifiers();
+
+ // Check whether qualifiers already match.
+ if (TQs == Qs)
+ return T;
+
+ if (Qs.compatiblyIncludes(TQs))
+ return Context.getQualifiedType(T, Qs);
+
+ return Context.getQualifiedType(T.getUnqualifiedType(), Qs);
+}
+
+/// isObjCPointerConversion - Determines whether this is an
+/// Objective-C pointer conversion. Subroutine of IsPointerConversion,
+/// with the same arguments and return values.
+bool Sema::isObjCPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType,
+ bool &IncompatibleObjC) {
+ if (!getLangOpts().ObjC1)
+ return false;
+
+ // The set of qualifiers on the type we're converting from.
+ Qualifiers FromQualifiers = FromType.getQualifiers();
+
+ // First, we handle all conversions on ObjC object pointer types.
+ const ObjCObjectPointerType* ToObjCPtr =
+ ToType->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *FromObjCPtr =
+ FromType->getAs<ObjCObjectPointerType>();
+
+ if (ToObjCPtr && FromObjCPtr) {
+ // If the pointee types are the same (ignoring qualifications),
+ // then this is not a pointer conversion.
+ if (Context.hasSameUnqualifiedType(ToObjCPtr->getPointeeType(),
+ FromObjCPtr->getPointeeType()))
+ return false;
+
+ // Check for compatible
+ // Objective C++: We're able to convert between "id" or "Class" and a
+ // pointer to any interface (in both directions).
+ if (ToObjCPtr->isObjCBuiltinType() && FromObjCPtr->isObjCBuiltinType()) {
+ ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
+ return true;
+ }
+ // Conversions with Objective-C's id<...>.
+ if ((FromObjCPtr->isObjCQualifiedIdType() ||
+ ToObjCPtr->isObjCQualifiedIdType()) &&
+ Context.ObjCQualifiedIdTypesAreCompatible(ToType, FromType,
+ /*compare=*/false)) {
+ ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
+ return true;
+ }
+ // Objective C++: We're able to convert from a pointer to an
+ // interface to a pointer to a different interface.
+ if (Context.canAssignObjCInterfaces(ToObjCPtr, FromObjCPtr)) {
+ const ObjCInterfaceType* LHS = ToObjCPtr->getInterfaceType();
+ const ObjCInterfaceType* RHS = FromObjCPtr->getInterfaceType();
+ if (getLangOpts().CPlusPlus && LHS && RHS &&
+ !ToObjCPtr->getPointeeType().isAtLeastAsQualifiedAs(
+ FromObjCPtr->getPointeeType()))
+ return false;
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr,
+ ToObjCPtr->getPointeeType(),
+ ToType, Context);
+ ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
+ return true;
+ }
+
+ if (Context.canAssignObjCInterfaces(FromObjCPtr, ToObjCPtr)) {
+ // Okay: this is some kind of implicit downcast of Objective-C
+ // interfaces, which is permitted. However, we're going to
+ // complain about it.
+ IncompatibleObjC = true;
+ ConvertedType = BuildSimilarlyQualifiedPointerType(FromObjCPtr,
+ ToObjCPtr->getPointeeType(),
+ ToType, Context);
+ ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
+ return true;
+ }
+ }
+ // Beyond this point, both types need to be C pointers or block pointers.
+ QualType ToPointeeType;
+ if (const PointerType *ToCPtr = ToType->getAs<PointerType>())
+ ToPointeeType = ToCPtr->getPointeeType();
+ else if (const BlockPointerType *ToBlockPtr =
+ ToType->getAs<BlockPointerType>()) {
+ // Objective C++: We're able to convert from a pointer to any object
+ // to a block pointer type.
+ if (FromObjCPtr && FromObjCPtr->isObjCBuiltinType()) {
+ ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
+ return true;
+ }
+ ToPointeeType = ToBlockPtr->getPointeeType();
+ }
+ else if (FromType->getAs<BlockPointerType>() &&
+ ToObjCPtr && ToObjCPtr->isObjCBuiltinType()) {
+ // Objective C++: We're able to convert from a block pointer type to a
+ // pointer to any object.
+ ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
+ return true;
+ }
+ else
+ return false;
+
+ QualType FromPointeeType;
+ if (const PointerType *FromCPtr = FromType->getAs<PointerType>())
+ FromPointeeType = FromCPtr->getPointeeType();
+ else if (const BlockPointerType *FromBlockPtr =
+ FromType->getAs<BlockPointerType>())
+ FromPointeeType = FromBlockPtr->getPointeeType();
+ else
+ return false;
+
+ // If we have pointers to pointers, recursively check whether this
+ // is an Objective-C conversion.
+ if (FromPointeeType->isPointerType() && ToPointeeType->isPointerType() &&
+ isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
+ IncompatibleObjC)) {
+ // We always complain about this conversion.
+ IncompatibleObjC = true;
+ ConvertedType = Context.getPointerType(ConvertedType);
+ ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
+ return true;
+ }
+ // Allow conversion of pointee being objective-c pointer to another one;
+ // as in I* to id.
+ if (FromPointeeType->getAs<ObjCObjectPointerType>() &&
+ ToPointeeType->getAs<ObjCObjectPointerType>() &&
+ isObjCPointerConversion(FromPointeeType, ToPointeeType, ConvertedType,
+ IncompatibleObjC)) {
+
+ ConvertedType = Context.getPointerType(ConvertedType);
+ ConvertedType = AdoptQualifiers(Context, ConvertedType, FromQualifiers);
+ return true;
+ }
+
+ // If we have pointers to functions or blocks, check whether the only
+ // differences in the argument and result types are in Objective-C
+ // pointer conversions. If so, we permit the conversion (but
+ // complain about it).
+ const FunctionProtoType *FromFunctionType
+ = FromPointeeType->getAs<FunctionProtoType>();
+ const FunctionProtoType *ToFunctionType
+ = ToPointeeType->getAs<FunctionProtoType>();
+ if (FromFunctionType && ToFunctionType) {
+ // If the function types are exactly the same, this isn't an
+ // Objective-C pointer conversion.
+ if (Context.getCanonicalType(FromPointeeType)
+ == Context.getCanonicalType(ToPointeeType))
+ return false;
+
+ // Perform the quick checks that will tell us whether these
+ // function types are obviously different.
+ if (FromFunctionType->getNumArgs() != ToFunctionType->getNumArgs() ||
+ FromFunctionType->isVariadic() != ToFunctionType->isVariadic() ||
+ FromFunctionType->getTypeQuals() != ToFunctionType->getTypeQuals())
+ return false;
+
+ bool HasObjCConversion = false;
+ if (Context.getCanonicalType(FromFunctionType->getResultType())
+ == Context.getCanonicalType(ToFunctionType->getResultType())) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(FromFunctionType->getResultType(),
+ ToFunctionType->getResultType(),
+ ConvertedType, IncompatibleObjC)) {
+ // Okay, we have an Objective-C pointer conversion.
+ HasObjCConversion = true;
+ } else {
+ // Function types are too different. Abort.
+ return false;
+ }
+
+ // Check argument types.
+ for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
+ ArgIdx != NumArgs; ++ArgIdx) {
+ QualType FromArgType = FromFunctionType->getArgType(ArgIdx);
+ QualType ToArgType = ToFunctionType->getArgType(ArgIdx);
+ if (Context.getCanonicalType(FromArgType)
+ == Context.getCanonicalType(ToArgType)) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(FromArgType, ToArgType,
+ ConvertedType, IncompatibleObjC)) {
+ // Okay, we have an Objective-C pointer conversion.
+ HasObjCConversion = true;
+ } else {
+ // Argument types are too different. Abort.
+ return false;
+ }
+ }
+
+ if (HasObjCConversion) {
+ // We had an Objective-C conversion. Allow this pointer
+ // conversion, but complain about it.
+ ConvertedType = AdoptQualifiers(Context, ToType, FromQualifiers);
+ IncompatibleObjC = true;
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Determine whether this is an Objective-C writeback conversion,
+/// used for parameter passing when performing automatic reference counting.
+///
+/// \param FromType The type we're converting form.
+///
+/// \param ToType The type we're converting to.
+///
+/// \param ConvertedType The type that will be produced after applying
+/// this conversion.
+bool Sema::isObjCWritebackConversion(QualType FromType, QualType ToType,
+ QualType &ConvertedType) {
+ if (!getLangOpts().ObjCAutoRefCount ||
+ Context.hasSameUnqualifiedType(FromType, ToType))
+ return false;
+
+ // Parameter must be a pointer to __autoreleasing (with no other qualifiers).
+ QualType ToPointee;
+ if (const PointerType *ToPointer = ToType->getAs<PointerType>())
+ ToPointee = ToPointer->getPointeeType();
+ else
+ return false;
+
+ Qualifiers ToQuals = ToPointee.getQualifiers();
+ if (!ToPointee->isObjCLifetimeType() ||
+ ToQuals.getObjCLifetime() != Qualifiers::OCL_Autoreleasing ||
+ !ToQuals.withoutObjCLifetime().empty())
+ return false;
+
+ // Argument must be a pointer to __strong to __weak.
+ QualType FromPointee;
+ if (const PointerType *FromPointer = FromType->getAs<PointerType>())
+ FromPointee = FromPointer->getPointeeType();
+ else
+ return false;
+
+ Qualifiers FromQuals = FromPointee.getQualifiers();
+ if (!FromPointee->isObjCLifetimeType() ||
+ (FromQuals.getObjCLifetime() != Qualifiers::OCL_Strong &&
+ FromQuals.getObjCLifetime() != Qualifiers::OCL_Weak))
+ return false;
+
+ // Make sure that we have compatible qualifiers.
+ FromQuals.setObjCLifetime(Qualifiers::OCL_Autoreleasing);
+ if (!ToQuals.compatiblyIncludes(FromQuals))
+ return false;
+
+ // Remove qualifiers from the pointee type we're converting from; they
+ // aren't used in the compatibility check belong, and we'll be adding back
+ // qualifiers (with __autoreleasing) if the compatibility check succeeds.
+ FromPointee = FromPointee.getUnqualifiedType();
+
+ // The unqualified form of the pointee types must be compatible.
+ ToPointee = ToPointee.getUnqualifiedType();
+ bool IncompatibleObjC;
+ if (Context.typesAreCompatible(FromPointee, ToPointee))
+ FromPointee = ToPointee;
+ else if (!isObjCPointerConversion(FromPointee, ToPointee, FromPointee,
+ IncompatibleObjC))
+ return false;
+
+ /// \brief Construct the type we're converting to, which is a pointer to
+ /// __autoreleasing pointee.
+ FromPointee = Context.getQualifiedType(FromPointee, FromQuals);
+ ConvertedType = Context.getPointerType(FromPointee);
+ return true;
+}
+
+bool Sema::IsBlockPointerConversion(QualType FromType, QualType ToType,
+ QualType& ConvertedType) {
+ QualType ToPointeeType;
+ if (const BlockPointerType *ToBlockPtr =
+ ToType->getAs<BlockPointerType>())
+ ToPointeeType = ToBlockPtr->getPointeeType();
+ else
+ return false;
+
+ QualType FromPointeeType;
+ if (const BlockPointerType *FromBlockPtr =
+ FromType->getAs<BlockPointerType>())
+ FromPointeeType = FromBlockPtr->getPointeeType();
+ else
+ return false;
+ // We have pointer to blocks, check whether the only
+ // differences in the argument and result types are in Objective-C
+ // pointer conversions. If so, we permit the conversion.
+
+ const FunctionProtoType *FromFunctionType
+ = FromPointeeType->getAs<FunctionProtoType>();
+ const FunctionProtoType *ToFunctionType
+ = ToPointeeType->getAs<FunctionProtoType>();
+
+ if (!FromFunctionType || !ToFunctionType)
+ return false;
+
+ if (Context.hasSameType(FromPointeeType, ToPointeeType))
+ return true;
+
+ // Perform the quick checks that will tell us whether these
+ // function types are obviously different.
+ if (FromFunctionType->getNumArgs() != ToFunctionType->getNumArgs() ||
+ FromFunctionType->isVariadic() != ToFunctionType->isVariadic())
+ return false;
+
+ FunctionType::ExtInfo FromEInfo = FromFunctionType->getExtInfo();
+ FunctionType::ExtInfo ToEInfo = ToFunctionType->getExtInfo();
+ if (FromEInfo != ToEInfo)
+ return false;
+
+ bool IncompatibleObjC = false;
+ if (Context.hasSameType(FromFunctionType->getResultType(),
+ ToFunctionType->getResultType())) {
+ // Okay, the types match exactly. Nothing to do.
+ } else {
+ QualType RHS = FromFunctionType->getResultType();
+ QualType LHS = ToFunctionType->getResultType();
+ if ((!getLangOpts().CPlusPlus || !RHS->isRecordType()) &&
+ !RHS.hasQualifiers() && LHS.hasQualifiers())
+ LHS = LHS.getUnqualifiedType();
+
+ if (Context.hasSameType(RHS,LHS)) {
+ // OK exact match.
+ } else if (isObjCPointerConversion(RHS, LHS,
+ ConvertedType, IncompatibleObjC)) {
+ if (IncompatibleObjC)
+ return false;
+ // Okay, we have an Objective-C pointer conversion.
+ }
+ else
+ return false;
+ }
+
+ // Check argument types.
+ for (unsigned ArgIdx = 0, NumArgs = FromFunctionType->getNumArgs();
+ ArgIdx != NumArgs; ++ArgIdx) {
+ IncompatibleObjC = false;
+ QualType FromArgType = FromFunctionType->getArgType(ArgIdx);
+ QualType ToArgType = ToFunctionType->getArgType(ArgIdx);
+ if (Context.hasSameType(FromArgType, ToArgType)) {
+ // Okay, the types match exactly. Nothing to do.
+ } else if (isObjCPointerConversion(ToArgType, FromArgType,
+ ConvertedType, IncompatibleObjC)) {
+ if (IncompatibleObjC)
+ return false;
+ // Okay, we have an Objective-C pointer conversion.
+ } else
+ // Argument types are too different. Abort.
+ return false;
+ }
+ if (LangOpts.ObjCAutoRefCount &&
+ !Context.FunctionTypesMatchOnNSConsumedAttrs(FromFunctionType,
+ ToFunctionType))
+ return false;
+
+ ConvertedType = ToType;
+ return true;
+}
+
+enum {
+ ft_default,
+ ft_different_class,
+ ft_parameter_arity,
+ ft_parameter_mismatch,
+ ft_return_type,
+ ft_qualifer_mismatch
+};
+
+/// HandleFunctionTypeMismatch - Gives diagnostic information for differeing
+/// function types. Catches different number of parameter, mismatch in
+/// parameter types, and different return types.
+void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
+ QualType FromType, QualType ToType) {
+ // If either type is not valid, include no extra info.
+ if (FromType.isNull() || ToType.isNull()) {
+ PDiag << ft_default;
+ return;
+ }
+
+ // Get the function type from the pointers.
+ if (FromType->isMemberPointerType() && ToType->isMemberPointerType()) {
+ const MemberPointerType *FromMember = FromType->getAs<MemberPointerType>(),
+ *ToMember = ToType->getAs<MemberPointerType>();
+ if (FromMember->getClass() != ToMember->getClass()) {
+ PDiag << ft_different_class << QualType(ToMember->getClass(), 0)
+ << QualType(FromMember->getClass(), 0);
+ return;
+ }
+ FromType = FromMember->getPointeeType();
+ ToType = ToMember->getPointeeType();
+ }
+
+ if (FromType->isPointerType())
+ FromType = FromType->getPointeeType();
+ if (ToType->isPointerType())
+ ToType = ToType->getPointeeType();
+
+ // Remove references.
+ FromType = FromType.getNonReferenceType();
+ ToType = ToType.getNonReferenceType();
+
+ // Don't print extra info for non-specialized template functions.
+ if (FromType->isInstantiationDependentType() &&
+ !FromType->getAs<TemplateSpecializationType>()) {
+ PDiag << ft_default;
+ return;
+ }
+
+ // No extra info for same types.
+ if (Context.hasSameType(FromType, ToType)) {
+ PDiag << ft_default;
+ return;
+ }
+
+ const FunctionProtoType *FromFunction = FromType->getAs<FunctionProtoType>(),
+ *ToFunction = ToType->getAs<FunctionProtoType>();
+
+ // Both types need to be function types.
+ if (!FromFunction || !ToFunction) {
+ PDiag << ft_default;
+ return;
+ }
+
+ if (FromFunction->getNumArgs() != ToFunction->getNumArgs()) {
+ PDiag << ft_parameter_arity << ToFunction->getNumArgs()
+ << FromFunction->getNumArgs();
+ return;
+ }
+
+ // Handle different parameter types.
+ unsigned ArgPos;
+ if (!FunctionArgTypesAreEqual(FromFunction, ToFunction, &ArgPos)) {
+ PDiag << ft_parameter_mismatch << ArgPos + 1
+ << ToFunction->getArgType(ArgPos)
+ << FromFunction->getArgType(ArgPos);
+ return;
+ }
+
+ // Handle different return type.
+ if (!Context.hasSameType(FromFunction->getResultType(),
+ ToFunction->getResultType())) {
+ PDiag << ft_return_type << ToFunction->getResultType()
+ << FromFunction->getResultType();
+ return;
+ }
+
+ unsigned FromQuals = FromFunction->getTypeQuals(),
+ ToQuals = ToFunction->getTypeQuals();
+ if (FromQuals != ToQuals) {
+ PDiag << ft_qualifer_mismatch << ToQuals << FromQuals;
+ return;
+ }
+
+ // Unable to find a difference, so add no extra info.
+ PDiag << ft_default;
+}
+
+/// FunctionArgTypesAreEqual - This routine checks two function proto types
+/// for equality of their argument types. Caller has already checked that
+/// they have same number of arguments. This routine assumes that Objective-C
+/// pointer types which only differ in their protocol qualifiers are equal.
+/// If the parameters are different, ArgPos will have the the parameter index
+/// of the first different parameter.
+bool Sema::FunctionArgTypesAreEqual(const FunctionProtoType *OldType,
+ const FunctionProtoType *NewType,
+ unsigned *ArgPos) {
+ if (!getLangOpts().ObjC1) {
+ for (FunctionProtoType::arg_type_iterator O = OldType->arg_type_begin(),
+ N = NewType->arg_type_begin(),
+ E = OldType->arg_type_end(); O && (O != E); ++O, ++N) {
+ if (!Context.hasSameType(*O, *N)) {
+ if (ArgPos) *ArgPos = O - OldType->arg_type_begin();
+ return false;
+ }
+ }
+ return true;
+ }
+
+ for (FunctionProtoType::arg_type_iterator O = OldType->arg_type_begin(),
+ N = NewType->arg_type_begin(),
+ E = OldType->arg_type_end(); O && (O != E); ++O, ++N) {
+ QualType ToType = (*O);
+ QualType FromType = (*N);
+ if (!Context.hasSameType(ToType, FromType)) {
+ if (const PointerType *PTTo = ToType->getAs<PointerType>()) {
+ if (const PointerType *PTFr = FromType->getAs<PointerType>())
+ if ((PTTo->getPointeeType()->isObjCQualifiedIdType() &&
+ PTFr->getPointeeType()->isObjCQualifiedIdType()) ||
+ (PTTo->getPointeeType()->isObjCQualifiedClassType() &&
+ PTFr->getPointeeType()->isObjCQualifiedClassType()))
+ continue;
+ }
+ else if (const ObjCObjectPointerType *PTTo =
+ ToType->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *PTFr =
+ FromType->getAs<ObjCObjectPointerType>())
+ if (Context.hasSameUnqualifiedType(
+ PTTo->getObjectType()->getBaseType(),
+ PTFr->getObjectType()->getBaseType()))
+ continue;
+ }
+ if (ArgPos) *ArgPos = O - OldType->arg_type_begin();
+ return false;
+ }
+ }
+ return true;
+}
+
+/// CheckPointerConversion - Check the pointer conversion from the
+/// expression From to the type ToType. This routine checks for
+/// ambiguous or inaccessible derived-to-base pointer
+/// conversions for which IsPointerConversion has already returned
+/// true. It returns true and produces a diagnostic if there was an
+/// error, or returns false otherwise.
+bool Sema::CheckPointerConversion(Expr *From, QualType ToType,
+ CastKind &Kind,
+ CXXCastPath& BasePath,
+ bool IgnoreBaseAccess) {
+ QualType FromType = From->getType();
+ bool IsCStyleOrFunctionalCast = IgnoreBaseAccess;
+
+ Kind = CK_BitCast;
+
+ if (!IsCStyleOrFunctionalCast &&
+ Context.hasSameUnqualifiedType(From->getType(), Context.BoolTy) &&
+ From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNotNull))
+ DiagRuntimeBehavior(From->getExprLoc(), From,
+ PDiag(diag::warn_impcast_bool_to_null_pointer)
+ << ToType << From->getSourceRange());
+
+ if (const PointerType *ToPtrType = ToType->getAs<PointerType>()) {
+ if (const PointerType *FromPtrType = FromType->getAs<PointerType>()) {
+ QualType FromPointeeType = FromPtrType->getPointeeType(),
+ ToPointeeType = ToPtrType->getPointeeType();
+
+ if (FromPointeeType->isRecordType() && ToPointeeType->isRecordType() &&
+ !Context.hasSameUnqualifiedType(FromPointeeType, ToPointeeType)) {
+ // We must have a derived-to-base conversion. Check an
+ // ambiguous or inaccessible conversion.
+ if (CheckDerivedToBaseConversion(FromPointeeType, ToPointeeType,
+ From->getExprLoc(),
+ From->getSourceRange(), &BasePath,
+ IgnoreBaseAccess))
+ return true;
+
+ // The conversion was successful.
+ Kind = CK_DerivedToBase;
+ }
+ }
+ } else if (const ObjCObjectPointerType *ToPtrType =
+ ToType->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *FromPtrType =
+ FromType->getAs<ObjCObjectPointerType>()) {
+ // Objective-C++ conversions are always okay.
+ // FIXME: We should have a different class of conversions for the
+ // Objective-C++ implicit conversions.
+ if (FromPtrType->isObjCBuiltinType() || ToPtrType->isObjCBuiltinType())
+ return false;
+ } else if (FromType->isBlockPointerType()) {
+ Kind = CK_BlockPointerToObjCPointerCast;
+ } else {
+ Kind = CK_CPointerToObjCPointerCast;
+ }
+ } else if (ToType->isBlockPointerType()) {
+ if (!FromType->isBlockPointerType())
+ Kind = CK_AnyPointerToBlockPointerCast;
+ }
+
+ // We shouldn't fall into this case unless it's valid for other
+ // reasons.
+ if (From->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ Kind = CK_NullToPointer;
+
+ return false;
+}
+
+/// IsMemberPointerConversion - Determines whether the conversion of the
+/// expression From, which has the (possibly adjusted) type FromType, can be
+/// converted to the type ToType via a member pointer conversion (C++ 4.11).
+/// If so, returns true and places the converted type (that might differ from
+/// ToType in its cv-qualifiers at some level) into ConvertedType.
+bool Sema::IsMemberPointerConversion(Expr *From, QualType FromType,
+ QualType ToType,
+ bool InOverloadResolution,
+ QualType &ConvertedType) {
+ const MemberPointerType *ToTypePtr = ToType->getAs<MemberPointerType>();
+ if (!ToTypePtr)
+ return false;
+
+ // A null pointer constant can be converted to a member pointer (C++ 4.11p1)
+ if (From->isNullPointerConstant(Context,
+ InOverloadResolution? Expr::NPC_ValueDependentIsNotNull
+ : Expr::NPC_ValueDependentIsNull)) {
+ ConvertedType = ToType;
+ return true;
+ }
+
+ // Otherwise, both types have to be member pointers.
+ const MemberPointerType *FromTypePtr = FromType->getAs<MemberPointerType>();
+ if (!FromTypePtr)
+ return false;
+
+ // A pointer to member of B can be converted to a pointer to member of D,
+ // where D is derived from B (C++ 4.11p2).
+ QualType FromClass(FromTypePtr->getClass(), 0);
+ QualType ToClass(ToTypePtr->getClass(), 0);
+
+ if (!Context.hasSameUnqualifiedType(FromClass, ToClass) &&
+ !RequireCompleteType(From->getLocStart(), ToClass, PDiag()) &&
+ IsDerivedFrom(ToClass, FromClass)) {
+ ConvertedType = Context.getMemberPointerType(FromTypePtr->getPointeeType(),
+ ToClass.getTypePtr());
+ return true;
+ }
+
+ return false;
+}
+
+/// CheckMemberPointerConversion - Check the member pointer conversion from the
+/// expression From to the type ToType. This routine checks for ambiguous or
+/// virtual or inaccessible base-to-derived member pointer conversions
+/// for which IsMemberPointerConversion has already returned true. It returns
+/// true and produces a diagnostic if there was an error, or returns false
+/// otherwise.
+bool Sema::CheckMemberPointerConversion(Expr *From, QualType ToType,
+ CastKind &Kind,
+ CXXCastPath &BasePath,
+ bool IgnoreBaseAccess) {
+ QualType FromType = From->getType();
+ const MemberPointerType *FromPtrType = FromType->getAs<MemberPointerType>();
+ if (!FromPtrType) {
+ // This must be a null pointer to member pointer conversion
+ assert(From->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull) &&
+ "Expr must be null pointer constant!");
+ Kind = CK_NullToMemberPointer;
+ return false;
+ }
+
+ const MemberPointerType *ToPtrType = ToType->getAs<MemberPointerType>();
+ assert(ToPtrType && "No member pointer cast has a target type "
+ "that is not a member pointer.");
+
+ QualType FromClass = QualType(FromPtrType->getClass(), 0);
+ QualType ToClass = QualType(ToPtrType->getClass(), 0);
+
+ // FIXME: What about dependent types?
+ assert(FromClass->isRecordType() && "Pointer into non-class.");
+ assert(ToClass->isRecordType() && "Pointer into non-class.");
+
+ CXXBasePaths Paths(/*FindAmbiguities=*/true, /*RecordPaths=*/true,
+ /*DetectVirtual=*/true);
+ bool DerivationOkay = IsDerivedFrom(ToClass, FromClass, Paths);
+ assert(DerivationOkay &&
+ "Should not have been called if derivation isn't OK.");
+ (void)DerivationOkay;
+
+ if (Paths.isAmbiguous(Context.getCanonicalType(FromClass).
+ getUnqualifiedType())) {
+ std::string PathDisplayStr = getAmbiguousPathsDisplayString(Paths);
+ Diag(From->getExprLoc(), diag::err_ambiguous_memptr_conv)
+ << 0 << FromClass << ToClass << PathDisplayStr << From->getSourceRange();
+ return true;
+ }
+
+ if (const RecordType *VBase = Paths.getDetectedVirtual()) {
+ Diag(From->getExprLoc(), diag::err_memptr_conv_via_virtual)
+ << FromClass << ToClass << QualType(VBase, 0)
+ << From->getSourceRange();
+ return true;
+ }
+
+ if (!IgnoreBaseAccess)
+ CheckBaseClassAccess(From->getExprLoc(), FromClass, ToClass,
+ Paths.front(),
+ diag::err_downcast_from_inaccessible_base);
+
+ // Must be a base to derived member conversion.
+ BuildBasePathArray(Paths, BasePath);
+ Kind = CK_BaseToDerivedMemberPointer;
+ return false;
+}
+
+/// IsQualificationConversion - Determines whether the conversion from
+/// an rvalue of type FromType to ToType is a qualification conversion
+/// (C++ 4.4).
+///
+/// \param ObjCLifetimeConversion Output parameter that will be set to indicate
+/// when the qualification conversion involves a change in the Objective-C
+/// object lifetime.
+bool
+Sema::IsQualificationConversion(QualType FromType, QualType ToType,
+ bool CStyle, bool &ObjCLifetimeConversion) {
+ FromType = Context.getCanonicalType(FromType);
+ ToType = Context.getCanonicalType(ToType);
+ ObjCLifetimeConversion = false;
+
+ // If FromType and ToType are the same type, this is not a
+ // qualification conversion.
+ if (FromType.getUnqualifiedType() == ToType.getUnqualifiedType())
+ return false;
+
+ // (C++ 4.4p4):
+ // A conversion can add cv-qualifiers at levels other than the first
+ // in multi-level pointers, subject to the following rules: [...]
+ bool PreviousToQualsIncludeConst = true;
+ bool UnwrappedAnyPointer = false;
+ while (Context.UnwrapSimilarPointerTypes(FromType, ToType)) {
+ // Within each iteration of the loop, we check the qualifiers to
+ // determine if this still looks like a qualification
+ // conversion. Then, if all is well, we unwrap one more level of
+ // pointers or pointers-to-members and do it all again
+ // until there are no more pointers or pointers-to-members left to
+ // unwrap.
+ UnwrappedAnyPointer = true;
+
+ Qualifiers FromQuals = FromType.getQualifiers();
+ Qualifiers ToQuals = ToType.getQualifiers();
+
+ // Objective-C ARC:
+ // Check Objective-C lifetime conversions.
+ if (FromQuals.getObjCLifetime() != ToQuals.getObjCLifetime() &&
+ UnwrappedAnyPointer) {
+ if (ToQuals.compatiblyIncludesObjCLifetime(FromQuals)) {
+ ObjCLifetimeConversion = true;
+ FromQuals.removeObjCLifetime();
+ ToQuals.removeObjCLifetime();
+ } else {
+ // Qualification conversions cannot cast between different
+ // Objective-C lifetime qualifiers.
+ return false;
+ }
+ }
+
+ // Allow addition/removal of GC attributes but not changing GC attributes.
+ if (FromQuals.getObjCGCAttr() != ToQuals.getObjCGCAttr() &&
+ (!FromQuals.hasObjCGCAttr() || !ToQuals.hasObjCGCAttr())) {
+ FromQuals.removeObjCGCAttr();
+ ToQuals.removeObjCGCAttr();
+ }
+
+ // -- for every j > 0, if const is in cv 1,j then const is in cv
+ // 2,j, and similarly for volatile.
+ if (!CStyle && !ToQuals.compatiblyIncludes(FromQuals))
+ return false;
+
+ // -- if the cv 1,j and cv 2,j are different, then const is in
+ // every cv for 0 < k < j.
+ if (!CStyle && FromQuals.getCVRQualifiers() != ToQuals.getCVRQualifiers()
+ && !PreviousToQualsIncludeConst)
+ return false;
+
+ // Keep track of whether all prior cv-qualifiers in the "to" type
+ // include const.
+ PreviousToQualsIncludeConst
+ = PreviousToQualsIncludeConst && ToQuals.hasConst();
+ }
+
+ // We are left with FromType and ToType being the pointee types
+ // after unwrapping the original FromType and ToType the same number
+ // of types. If we unwrapped any pointers, and if FromType and
+ // ToType have the same unqualified type (since we checked
+ // qualifiers above), then this is a qualification conversion.
+ return UnwrappedAnyPointer && Context.hasSameUnqualifiedType(FromType,ToType);
+}
+
+/// \brief - Determine whether this is a conversion from a scalar type to an
+/// atomic type.
+///
+/// If successful, updates \c SCS's second and third steps in the conversion
+/// sequence to finish the conversion.
+static bool tryAtomicConversion(Sema &S, Expr *From, QualType ToType,
+ bool InOverloadResolution,
+ StandardConversionSequence &SCS,
+ bool CStyle) {
+ const AtomicType *ToAtomic = ToType->getAs<AtomicType>();
+ if (!ToAtomic)
+ return false;
+
+ StandardConversionSequence InnerSCS;
+ if (!IsStandardConversion(S, From, ToAtomic->getValueType(),
+ InOverloadResolution, InnerSCS,
+ CStyle, /*AllowObjCWritebackConversion=*/false))
+ return false;
+
+ SCS.Second = InnerSCS.Second;
+ SCS.setToType(1, InnerSCS.getToType(1));
+ SCS.Third = InnerSCS.Third;
+ SCS.QualificationIncludesObjCLifetime
+ = InnerSCS.QualificationIncludesObjCLifetime;
+ SCS.setToType(2, InnerSCS.getToType(2));
+ return true;
+}
+
+static bool isFirstArgumentCompatibleWithType(ASTContext &Context,
+ CXXConstructorDecl *Constructor,
+ QualType Type) {
+ const FunctionProtoType *CtorType =
+ Constructor->getType()->getAs<FunctionProtoType>();
+ if (CtorType->getNumArgs() > 0) {
+ QualType FirstArg = CtorType->getArgType(0);
+ if (Context.hasSameUnqualifiedType(Type, FirstArg.getNonReferenceType()))
+ return true;
+ }
+ return false;
+}
+
+static OverloadingResult
+IsInitializerListConstructorConversion(Sema &S, Expr *From, QualType ToType,
+ CXXRecordDecl *To,
+ UserDefinedConversionSequence &User,
+ OverloadCandidateSet &CandidateSet,
+ bool AllowExplicit) {
+ DeclContext::lookup_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(To);
+ Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = 0;
+ FunctionTemplateDecl *ConstructorTmpl
+ = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor
+ = cast<CXXConstructorDecl>(ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ bool Usable = !Constructor->isInvalidDecl() &&
+ S.isInitListConstructor(Constructor) &&
+ (AllowExplicit || !Constructor->isExplicit());
+ if (Usable) {
+ // If the first argument is (a reference to) the target type,
+ // suppress conversions.
+ bool SuppressUserConversions =
+ isFirstArgumentCompatibleWithType(S.Context, Constructor, ToType);
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ From, CandidateSet,
+ SuppressUserConversions);
+ else
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ From, CandidateSet,
+ SuppressUserConversions);
+ }
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(S, From->getLocStart(), Best, true)) {
+ case OR_Success: {
+ // Record the standard conversion we used and the conversion function.
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(Best->Function);
+ S.MarkFunctionReferenced(From->getLocStart(), Constructor);
+
+ QualType ThisType = Constructor->getThisType(S.Context);
+ // Initializer lists don't have conversions as such.
+ User.Before.setAsIdentityConversion();
+ User.HadMultipleCandidates = HadMultipleCandidates;
+ User.ConversionFunction = Constructor;
+ User.FoundConversionFunction = Best->FoundDecl;
+ User.After.setAsIdentityConversion();
+ User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
+ User.After.setAllToTypes(ToType);
+ return OR_Success;
+ }
+
+ case OR_No_Viable_Function:
+ return OR_No_Viable_Function;
+ case OR_Deleted:
+ return OR_Deleted;
+ case OR_Ambiguous:
+ return OR_Ambiguous;
+ }
+
+ llvm_unreachable("Invalid OverloadResult!");
+}
+
+/// Determines whether there is a user-defined conversion sequence
+/// (C++ [over.ics.user]) that converts expression From to the type
+/// ToType. If such a conversion exists, User will contain the
+/// user-defined conversion sequence that performs such a conversion
+/// and this routine will return true. Otherwise, this routine returns
+/// false and User is unspecified.
+///
+/// \param AllowExplicit true if the conversion should consider C++0x
+/// "explicit" conversion functions as well as non-explicit conversion
+/// functions (C++0x [class.conv.fct]p2).
+static OverloadingResult
+IsUserDefinedConversion(Sema &S, Expr *From, QualType ToType,
+ UserDefinedConversionSequence &User,
+ OverloadCandidateSet &CandidateSet,
+ bool AllowExplicit) {
+ // Whether we will only visit constructors.
+ bool ConstructorsOnly = false;
+
+ // If the type we are conversion to is a class type, enumerate its
+ // constructors.
+ if (const RecordType *ToRecordType = ToType->getAs<RecordType>()) {
+ // C++ [over.match.ctor]p1:
+ // When objects of class type are direct-initialized (8.5), or
+ // copy-initialized from an expression of the same or a
+ // derived class type (8.5), overload resolution selects the
+ // constructor. [...] For copy-initialization, the candidate
+ // functions are all the converting constructors (12.3.1) of
+ // that class. The argument list is the expression-list within
+ // the parentheses of the initializer.
+ if (S.Context.hasSameUnqualifiedType(ToType, From->getType()) ||
+ (From->getType()->getAs<RecordType>() &&
+ S.IsDerivedFrom(From->getType(), ToType)))
+ ConstructorsOnly = true;
+
+ S.RequireCompleteType(From->getLocStart(), ToType, S.PDiag());
+ // RequireCompleteType may have returned true due to some invalid decl
+ // during template instantiation, but ToType may be complete enough now
+ // to try to recover.
+ if (ToType->isIncompleteType()) {
+ // We're not going to find any constructors.
+ } else if (CXXRecordDecl *ToRecordDecl
+ = dyn_cast<CXXRecordDecl>(ToRecordType->getDecl())) {
+
+ Expr **Args = &From;
+ unsigned NumArgs = 1;
+ bool ListInitializing = false;
+ if (InitListExpr *InitList = dyn_cast<InitListExpr>(From)) {
+ // But first, see if there is an init-list-contructor that will work.
+ OverloadingResult Result = IsInitializerListConstructorConversion(
+ S, From, ToType, ToRecordDecl, User, CandidateSet, AllowExplicit);
+ if (Result != OR_No_Viable_Function)
+ return Result;
+ // Never mind.
+ CandidateSet.clear();
+
+ // If we're list-initializing, we pass the individual elements as
+ // arguments, not the entire list.
+ Args = InitList->getInits();
+ NumArgs = InitList->getNumInits();
+ ListInitializing = true;
+ }
+
+ DeclContext::lookup_iterator Con, ConEnd;
+ for (llvm::tie(Con, ConEnd) = S.LookupConstructors(ToRecordDecl);
+ Con != ConEnd; ++Con) {
+ NamedDecl *D = *Con;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(D, D->getAccess());
+
+ // Find the constructor (which may be a template).
+ CXXConstructorDecl *Constructor = 0;
+ FunctionTemplateDecl *ConstructorTmpl
+ = dyn_cast<FunctionTemplateDecl>(D);
+ if (ConstructorTmpl)
+ Constructor
+ = cast<CXXConstructorDecl>(ConstructorTmpl->getTemplatedDecl());
+ else
+ Constructor = cast<CXXConstructorDecl>(D);
+
+ bool Usable = !Constructor->isInvalidDecl();
+ if (ListInitializing)
+ Usable = Usable && (AllowExplicit || !Constructor->isExplicit());
+ else
+ Usable = Usable &&Constructor->isConvertingConstructor(AllowExplicit);
+ if (Usable) {
+ bool SuppressUserConversions = !ConstructorsOnly;
+ if (SuppressUserConversions && ListInitializing) {
+ SuppressUserConversions = false;
+ if (NumArgs == 1) {
+ // If the first argument is (a reference to) the target type,
+ // suppress conversions.
+ SuppressUserConversions = isFirstArgumentCompatibleWithType(
+ S.Context, Constructor, ToType);
+ }
+ }
+ if (ConstructorTmpl)
+ S.AddTemplateOverloadCandidate(ConstructorTmpl, FoundDecl,
+ /*ExplicitArgs*/ 0,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
+ else
+ // Allow one user-defined conversion when user specifies a
+ // From->ToType conversion via an static cast (c-style, etc).
+ S.AddOverloadCandidate(Constructor, FoundDecl,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
+ }
+ }
+ }
+ }
+
+ // Enumerate conversion functions, if we're allowed to.
+ if (ConstructorsOnly || isa<InitListExpr>(From)) {
+ } else if (S.RequireCompleteType(From->getLocStart(), From->getType(),
+ S.PDiag(0) << From->getSourceRange())) {
+ // No conversion functions from incomplete types.
+ } else if (const RecordType *FromRecordType
+ = From->getType()->getAs<RecordType>()) {
+ if (CXXRecordDecl *FromRecordDecl
+ = dyn_cast<CXXRecordDecl>(FromRecordType->getDecl())) {
+ // Add all of the conversion functions as candidates.
+ const UnresolvedSetImpl *Conversions
+ = FromRecordDecl->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ DeclAccessPair FoundDecl = I.getPair();
+ NamedDecl *D = FoundDecl.getDecl();
+ CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ CXXConversionDecl *Conv;
+ FunctionTemplateDecl *ConvTemplate;
+ if ((ConvTemplate = dyn_cast<FunctionTemplateDecl>(D)))
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ if (AllowExplicit || !Conv->isExplicit()) {
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, FoundDecl,
+ ActingContext, From, ToType,
+ CandidateSet);
+ else
+ S.AddConversionCandidate(Conv, FoundDecl, ActingContext,
+ From, ToType, CandidateSet);
+ }
+ }
+ }
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(S, From->getLocStart(), Best, true)) {
+ case OR_Success:
+ // Record the standard conversion we used and the conversion function.
+ if (CXXConstructorDecl *Constructor
+ = dyn_cast<CXXConstructorDecl>(Best->Function)) {
+ S.MarkFunctionReferenced(From->getLocStart(), Constructor);
+
+ // C++ [over.ics.user]p1:
+ // If the user-defined conversion is specified by a
+ // constructor (12.3.1), the initial standard conversion
+ // sequence converts the source type to the type required by
+ // the argument of the constructor.
+ //
+ QualType ThisType = Constructor->getThisType(S.Context);
+ if (isa<InitListExpr>(From)) {
+ // Initializer lists don't have conversions as such.
+ User.Before.setAsIdentityConversion();
+ } else {
+ if (Best->Conversions[0].isEllipsis())
+ User.EllipsisConversion = true;
+ else {
+ User.Before = Best->Conversions[0].Standard;
+ User.EllipsisConversion = false;
+ }
+ }
+ User.HadMultipleCandidates = HadMultipleCandidates;
+ User.ConversionFunction = Constructor;
+ User.FoundConversionFunction = Best->FoundDecl;
+ User.After.setAsIdentityConversion();
+ User.After.setFromType(ThisType->getAs<PointerType>()->getPointeeType());
+ User.After.setAllToTypes(ToType);
+ return OR_Success;
+ }
+ if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>(Best->Function)) {
+ S.MarkFunctionReferenced(From->getLocStart(), Conversion);
+
+ // C++ [over.ics.user]p1:
+ //
+ // [...] If the user-defined conversion is specified by a
+ // conversion function (12.3.2), the initial standard
+ // conversion sequence converts the source type to the
+ // implicit object parameter of the conversion function.
+ User.Before = Best->Conversions[0].Standard;
+ User.HadMultipleCandidates = HadMultipleCandidates;
+ User.ConversionFunction = Conversion;
+ User.FoundConversionFunction = Best->FoundDecl;
+ User.EllipsisConversion = false;
+
+ // C++ [over.ics.user]p2:
+ // The second standard conversion sequence converts the
+ // result of the user-defined conversion to the target type
+ // for the sequence. Since an implicit conversion sequence
+ // is an initialization, the special rules for
+ // initialization by user-defined conversion apply when
+ // selecting the best user-defined conversion for a
+ // user-defined conversion sequence (see 13.3.3 and
+ // 13.3.3.1).
+ User.After = Best->FinalConversion;
+ return OR_Success;
+ }
+ llvm_unreachable("Not a constructor or conversion function?");
+
+ case OR_No_Viable_Function:
+ return OR_No_Viable_Function;
+ case OR_Deleted:
+ // No conversion here! We're done.
+ return OR_Deleted;
+
+ case OR_Ambiguous:
+ return OR_Ambiguous;
+ }
+
+ llvm_unreachable("Invalid OverloadResult!");
+}
+
+bool
+Sema::DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType) {
+ ImplicitConversionSequence ICS;
+ OverloadCandidateSet CandidateSet(From->getExprLoc());
+ OverloadingResult OvResult =
+ IsUserDefinedConversion(*this, From, ToType, ICS.UserDefined,
+ CandidateSet, false);
+ if (OvResult == OR_Ambiguous)
+ Diag(From->getLocStart(),
+ diag::err_typecheck_ambiguous_condition)
+ << From->getType() << ToType << From->getSourceRange();
+ else if (OvResult == OR_No_Viable_Function && !CandidateSet.empty())
+ Diag(From->getLocStart(),
+ diag::err_typecheck_nonviable_condition)
+ << From->getType() << ToType << From->getSourceRange();
+ else
+ return false;
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, From);
+ return true;
+}
+
+/// \brief Compare the user-defined conversion functions or constructors
+/// of two user-defined conversion sequences to determine whether any ordering
+/// is possible.
+static ImplicitConversionSequence::CompareKind
+compareConversionFunctions(Sema &S,
+ FunctionDecl *Function1,
+ FunctionDecl *Function2) {
+ if (!S.getLangOpts().ObjC1 || !S.getLangOpts().CPlusPlus0x)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ // Objective-C++:
+ // If both conversion functions are implicitly-declared conversions from
+ // a lambda closure type to a function pointer and a block pointer,
+ // respectively, always prefer the conversion to a function pointer,
+ // because the function pointer is more lightweight and is more likely
+ // to keep code working.
+ CXXConversionDecl *Conv1 = dyn_cast<CXXConversionDecl>(Function1);
+ if (!Conv1)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ CXXConversionDecl *Conv2 = dyn_cast<CXXConversionDecl>(Function2);
+ if (!Conv2)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ if (Conv1->getParent()->isLambda() && Conv2->getParent()->isLambda()) {
+ bool Block1 = Conv1->getConversionType()->isBlockPointerType();
+ bool Block2 = Conv2->getConversionType()->isBlockPointerType();
+ if (Block1 != Block2)
+ return Block1? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// CompareImplicitConversionSequences - Compare two implicit
+/// conversion sequences to determine whether one is better than the
+/// other or if they are indistinguishable (C++ 13.3.3.2).
+static ImplicitConversionSequence::CompareKind
+CompareImplicitConversionSequences(Sema &S,
+ const ImplicitConversionSequence& ICS1,
+ const ImplicitConversionSequence& ICS2)
+{
+ // (C++ 13.3.3.2p2): When comparing the basic forms of implicit
+ // conversion sequences (as defined in 13.3.3.1)
+ // -- a standard conversion sequence (13.3.3.1.1) is a better
+ // conversion sequence than a user-defined conversion sequence or
+ // an ellipsis conversion sequence, and
+ // -- a user-defined conversion sequence (13.3.3.1.2) is a better
+ // conversion sequence than an ellipsis conversion sequence
+ // (13.3.3.1.3).
+ //
+ // C++0x [over.best.ics]p10:
+ // For the purpose of ranking implicit conversion sequences as
+ // described in 13.3.3.2, the ambiguous conversion sequence is
+ // treated as a user-defined sequence that is indistinguishable
+ // from any other user-defined conversion sequence.
+ if (ICS1.getKindRank() < ICS2.getKindRank())
+ return ImplicitConversionSequence::Better;
+ if (ICS2.getKindRank() < ICS1.getKindRank())
+ return ImplicitConversionSequence::Worse;
+
+ // The following checks require both conversion sequences to be of
+ // the same kind.
+ if (ICS1.getKind() != ICS2.getKind())
+ return ImplicitConversionSequence::Indistinguishable;
+
+ ImplicitConversionSequence::CompareKind Result =
+ ImplicitConversionSequence::Indistinguishable;
+
+ // Two implicit conversion sequences of the same form are
+ // indistinguishable conversion sequences unless one of the
+ // following rules apply: (C++ 13.3.3.2p3):
+ if (ICS1.isStandard())
+ Result = CompareStandardConversionSequences(S,
+ ICS1.Standard, ICS2.Standard);
+ else if (ICS1.isUserDefined()) {
+ // User-defined conversion sequence U1 is a better conversion
+ // sequence than another user-defined conversion sequence U2 if
+ // they contain the same user-defined conversion function or
+ // constructor and if the second standard conversion sequence of
+ // U1 is better than the second standard conversion sequence of
+ // U2 (C++ 13.3.3.2p3).
+ if (ICS1.UserDefined.ConversionFunction ==
+ ICS2.UserDefined.ConversionFunction)
+ Result = CompareStandardConversionSequences(S,
+ ICS1.UserDefined.After,
+ ICS2.UserDefined.After);
+ else
+ Result = compareConversionFunctions(S,
+ ICS1.UserDefined.ConversionFunction,
+ ICS2.UserDefined.ConversionFunction);
+ }
+
+ // List-initialization sequence L1 is a better conversion sequence than
+ // list-initialization sequence L2 if L1 converts to std::initializer_list<X>
+ // for some X and L2 does not.
+ if (Result == ImplicitConversionSequence::Indistinguishable &&
+ !ICS1.isBad() &&
+ ICS1.isListInitializationSequence() &&
+ ICS2.isListInitializationSequence()) {
+ if (ICS1.isStdInitializerListElement() &&
+ !ICS2.isStdInitializerListElement())
+ return ImplicitConversionSequence::Better;
+ if (!ICS1.isStdInitializerListElement() &&
+ ICS2.isStdInitializerListElement())
+ return ImplicitConversionSequence::Worse;
+ }
+
+ return Result;
+}
+
+static bool hasSimilarType(ASTContext &Context, QualType T1, QualType T2) {
+ while (Context.UnwrapSimilarPointerTypes(T1, T2)) {
+ Qualifiers Quals;
+ T1 = Context.getUnqualifiedArrayType(T1, Quals);
+ T2 = Context.getUnqualifiedArrayType(T2, Quals);
+ }
+
+ return Context.hasSameUnqualifiedType(T1, T2);
+}
+
+// Per 13.3.3.2p3, compare the given standard conversion sequences to
+// determine if one is a proper subset of the other.
+static ImplicitConversionSequence::CompareKind
+compareStandardConversionSubsets(ASTContext &Context,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2) {
+ ImplicitConversionSequence::CompareKind Result
+ = ImplicitConversionSequence::Indistinguishable;
+
+ // the identity conversion sequence is considered to be a subsequence of
+ // any non-identity conversion sequence
+ if (SCS1.isIdentityConversion() && !SCS2.isIdentityConversion())
+ return ImplicitConversionSequence::Better;
+ else if (!SCS1.isIdentityConversion() && SCS2.isIdentityConversion())
+ return ImplicitConversionSequence::Worse;
+
+ if (SCS1.Second != SCS2.Second) {
+ if (SCS1.Second == ICK_Identity)
+ Result = ImplicitConversionSequence::Better;
+ else if (SCS2.Second == ICK_Identity)
+ Result = ImplicitConversionSequence::Worse;
+ else
+ return ImplicitConversionSequence::Indistinguishable;
+ } else if (!hasSimilarType(Context, SCS1.getToType(1), SCS2.getToType(1)))
+ return ImplicitConversionSequence::Indistinguishable;
+
+ if (SCS1.Third == SCS2.Third) {
+ return Context.hasSameType(SCS1.getToType(2), SCS2.getToType(2))? Result
+ : ImplicitConversionSequence::Indistinguishable;
+ }
+
+ if (SCS1.Third == ICK_Identity)
+ return Result == ImplicitConversionSequence::Worse
+ ? ImplicitConversionSequence::Indistinguishable
+ : ImplicitConversionSequence::Better;
+
+ if (SCS2.Third == ICK_Identity)
+ return Result == ImplicitConversionSequence::Better
+ ? ImplicitConversionSequence::Indistinguishable
+ : ImplicitConversionSequence::Worse;
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// \brief Determine whether one of the given reference bindings is better
+/// than the other based on what kind of bindings they are.
+static bool isBetterReferenceBindingKind(const StandardConversionSequence &SCS1,
+ const StandardConversionSequence &SCS2) {
+ // C++0x [over.ics.rank]p3b4:
+ // -- S1 and S2 are reference bindings (8.5.3) and neither refers to an
+ // implicit object parameter of a non-static member function declared
+ // without a ref-qualifier, and *either* S1 binds an rvalue reference
+ // to an rvalue and S2 binds an lvalue reference *or S1 binds an
+ // lvalue reference to a function lvalue and S2 binds an rvalue
+ // reference*.
+ //
+ // FIXME: Rvalue references. We're going rogue with the above edits,
+ // because the semantics in the current C++0x working paper (N3225 at the
+ // time of this writing) break the standard definition of std::forward
+ // and std::reference_wrapper when dealing with references to functions.
+ // Proposed wording changes submitted to CWG for consideration.
+ if (SCS1.BindsImplicitObjectArgumentWithoutRefQualifier ||
+ SCS2.BindsImplicitObjectArgumentWithoutRefQualifier)
+ return false;
+
+ return (!SCS1.IsLvalueReference && SCS1.BindsToRvalue &&
+ SCS2.IsLvalueReference) ||
+ (SCS1.IsLvalueReference && SCS1.BindsToFunctionLvalue &&
+ !SCS2.IsLvalueReference);
+}
+
+/// CompareStandardConversionSequences - Compare two standard
+/// conversion sequences to determine whether one is better than the
+/// other or if they are indistinguishable (C++ 13.3.3.2p3).
+static ImplicitConversionSequence::CompareKind
+CompareStandardConversionSequences(Sema &S,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2)
+{
+ // Standard conversion sequence S1 is a better conversion sequence
+ // than standard conversion sequence S2 if (C++ 13.3.3.2p3):
+
+ // -- S1 is a proper subsequence of S2 (comparing the conversion
+ // sequences in the canonical form defined by 13.3.3.1.1,
+ // excluding any Lvalue Transformation; the identity conversion
+ // sequence is considered to be a subsequence of any
+ // non-identity conversion sequence) or, if not that,
+ if (ImplicitConversionSequence::CompareKind CK
+ = compareStandardConversionSubsets(S.Context, SCS1, SCS2))
+ return CK;
+
+ // -- the rank of S1 is better than the rank of S2 (by the rules
+ // defined below), or, if not that,
+ ImplicitConversionRank Rank1 = SCS1.getRank();
+ ImplicitConversionRank Rank2 = SCS2.getRank();
+ if (Rank1 < Rank2)
+ return ImplicitConversionSequence::Better;
+ else if (Rank2 < Rank1)
+ return ImplicitConversionSequence::Worse;
+
+ // (C++ 13.3.3.2p4): Two conversion sequences with the same rank
+ // are indistinguishable unless one of the following rules
+ // applies:
+
+ // A conversion that is not a conversion of a pointer, or
+ // pointer to member, to bool is better than another conversion
+ // that is such a conversion.
+ if (SCS1.isPointerConversionToBool() != SCS2.isPointerConversionToBool())
+ return SCS2.isPointerConversionToBool()
+ ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+
+ // C++ [over.ics.rank]p4b2:
+ //
+ // If class B is derived directly or indirectly from class A,
+ // conversion of B* to A* is better than conversion of B* to
+ // void*, and conversion of A* to void* is better than conversion
+ // of B* to void*.
+ bool SCS1ConvertsToVoid
+ = SCS1.isPointerConversionToVoidPointer(S.Context);
+ bool SCS2ConvertsToVoid
+ = SCS2.isPointerConversionToVoidPointer(S.Context);
+ if (SCS1ConvertsToVoid != SCS2ConvertsToVoid) {
+ // Exactly one of the conversion sequences is a conversion to
+ // a void pointer; it's the worse conversion.
+ return SCS2ConvertsToVoid ? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ } else if (!SCS1ConvertsToVoid && !SCS2ConvertsToVoid) {
+ // Neither conversion sequence converts to a void pointer; compare
+ // their derived-to-base conversions.
+ if (ImplicitConversionSequence::CompareKind DerivedCK
+ = CompareDerivedToBaseConversions(S, SCS1, SCS2))
+ return DerivedCK;
+ } else if (SCS1ConvertsToVoid && SCS2ConvertsToVoid &&
+ !S.Context.hasSameType(SCS1.getFromType(), SCS2.getFromType())) {
+ // Both conversion sequences are conversions to void
+ // pointers. Compare the source types to determine if there's an
+ // inheritance relationship in their sources.
+ QualType FromType1 = SCS1.getFromType();
+ QualType FromType2 = SCS2.getFromType();
+
+ // Adjust the types we're converting from via the array-to-pointer
+ // conversion, if we need to.
+ if (SCS1.First == ICK_Array_To_Pointer)
+ FromType1 = S.Context.getArrayDecayedType(FromType1);
+ if (SCS2.First == ICK_Array_To_Pointer)
+ FromType2 = S.Context.getArrayDecayedType(FromType2);
+
+ QualType FromPointee1 = FromType1->getPointeeType().getUnqualifiedType();
+ QualType FromPointee2 = FromType2->getPointeeType().getUnqualifiedType();
+
+ if (S.IsDerivedFrom(FromPointee2, FromPointee1))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(FromPointee1, FromPointee2))
+ return ImplicitConversionSequence::Worse;
+
+ // Objective-C++: If one interface is more specific than the
+ // other, it is the better one.
+ const ObjCObjectPointerType* FromObjCPtr1
+ = FromType1->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType* FromObjCPtr2
+ = FromType2->getAs<ObjCObjectPointerType>();
+ if (FromObjCPtr1 && FromObjCPtr2) {
+ bool AssignLeft = S.Context.canAssignObjCInterfaces(FromObjCPtr1,
+ FromObjCPtr2);
+ bool AssignRight = S.Context.canAssignObjCInterfaces(FromObjCPtr2,
+ FromObjCPtr1);
+ if (AssignLeft != AssignRight) {
+ return AssignLeft? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+ }
+ }
+
+ // Compare based on qualification conversions (C++ 13.3.3.2p3,
+ // bullet 3).
+ if (ImplicitConversionSequence::CompareKind QualCK
+ = CompareQualificationConversions(S, SCS1, SCS2))
+ return QualCK;
+
+ if (SCS1.ReferenceBinding && SCS2.ReferenceBinding) {
+ // Check for a better reference binding based on the kind of bindings.
+ if (isBetterReferenceBindingKind(SCS1, SCS2))
+ return ImplicitConversionSequence::Better;
+ else if (isBetterReferenceBindingKind(SCS2, SCS1))
+ return ImplicitConversionSequence::Worse;
+
+ // C++ [over.ics.rank]p3b4:
+ // -- S1 and S2 are reference bindings (8.5.3), and the types to
+ // which the references refer are the same type except for
+ // top-level cv-qualifiers, and the type to which the reference
+ // initialized by S2 refers is more cv-qualified than the type
+ // to which the reference initialized by S1 refers.
+ QualType T1 = SCS1.getToType(2);
+ QualType T2 = SCS2.getToType(2);
+ T1 = S.Context.getCanonicalType(T1);
+ T2 = S.Context.getCanonicalType(T2);
+ Qualifiers T1Quals, T2Quals;
+ QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals);
+ QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals);
+ if (UnqualT1 == UnqualT2) {
+ // Objective-C++ ARC: If the references refer to objects with different
+ // lifetimes, prefer bindings that don't change lifetime.
+ if (SCS1.ObjCLifetimeConversionBinding !=
+ SCS2.ObjCLifetimeConversionBinding) {
+ return SCS1.ObjCLifetimeConversionBinding
+ ? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
+ // If the type is an array type, promote the element qualifiers to the
+ // type for comparison.
+ if (isa<ArrayType>(T1) && T1Quals)
+ T1 = S.Context.getQualifiedType(UnqualT1, T1Quals);
+ if (isa<ArrayType>(T2) && T2Quals)
+ T2 = S.Context.getQualifiedType(UnqualT2, T2Quals);
+ if (T2.isMoreQualifiedThan(T1))
+ return ImplicitConversionSequence::Better;
+ else if (T1.isMoreQualifiedThan(T2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ // In Microsoft mode, prefer an integral conversion to a
+ // floating-to-integral conversion if the integral conversion
+ // is between types of the same size.
+ // For example:
+ // void f(float);
+ // void f(int);
+ // int main {
+ // long a;
+ // f(a);
+ // }
+ // Here, MSVC will call f(int) instead of generating a compile error
+ // as clang will do in standard mode.
+ if (S.getLangOpts().MicrosoftMode &&
+ SCS1.Second == ICK_Integral_Conversion &&
+ SCS2.Second == ICK_Floating_Integral &&
+ S.Context.getTypeSize(SCS1.getFromType()) ==
+ S.Context.getTypeSize(SCS1.getToType(2)))
+ return ImplicitConversionSequence::Better;
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// CompareQualificationConversions - Compares two standard conversion
+/// sequences to determine whether they can be ranked based on their
+/// qualification conversions (C++ 13.3.3.2p3 bullet 3).
+ImplicitConversionSequence::CompareKind
+CompareQualificationConversions(Sema &S,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2) {
+ // C++ 13.3.3.2p3:
+ // -- S1 and S2 differ only in their qualification conversion and
+ // yield similar types T1 and T2 (C++ 4.4), respectively, and the
+ // cv-qualification signature of type T1 is a proper subset of
+ // the cv-qualification signature of type T2, and S1 is not the
+ // deprecated string literal array-to-pointer conversion (4.2).
+ if (SCS1.First != SCS2.First || SCS1.Second != SCS2.Second ||
+ SCS1.Third != SCS2.Third || SCS1.Third != ICK_Qualification)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ // FIXME: the example in the standard doesn't use a qualification
+ // conversion (!)
+ QualType T1 = SCS1.getToType(2);
+ QualType T2 = SCS2.getToType(2);
+ T1 = S.Context.getCanonicalType(T1);
+ T2 = S.Context.getCanonicalType(T2);
+ Qualifiers T1Quals, T2Quals;
+ QualType UnqualT1 = S.Context.getUnqualifiedArrayType(T1, T1Quals);
+ QualType UnqualT2 = S.Context.getUnqualifiedArrayType(T2, T2Quals);
+
+ // If the types are the same, we won't learn anything by unwrapped
+ // them.
+ if (UnqualT1 == UnqualT2)
+ return ImplicitConversionSequence::Indistinguishable;
+
+ // If the type is an array type, promote the element qualifiers to the type
+ // for comparison.
+ if (isa<ArrayType>(T1) && T1Quals)
+ T1 = S.Context.getQualifiedType(UnqualT1, T1Quals);
+ if (isa<ArrayType>(T2) && T2Quals)
+ T2 = S.Context.getQualifiedType(UnqualT2, T2Quals);
+
+ ImplicitConversionSequence::CompareKind Result
+ = ImplicitConversionSequence::Indistinguishable;
+
+ // Objective-C++ ARC:
+ // Prefer qualification conversions not involving a change in lifetime
+ // to qualification conversions that do not change lifetime.
+ if (SCS1.QualificationIncludesObjCLifetime !=
+ SCS2.QualificationIncludesObjCLifetime) {
+ Result = SCS1.QualificationIncludesObjCLifetime
+ ? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+ }
+
+ while (S.Context.UnwrapSimilarPointerTypes(T1, T2)) {
+ // Within each iteration of the loop, we check the qualifiers to
+ // determine if this still looks like a qualification
+ // conversion. Then, if all is well, we unwrap one more level of
+ // pointers or pointers-to-members and do it all again
+ // until there are no more pointers or pointers-to-members left
+ // to unwrap. This essentially mimics what
+ // IsQualificationConversion does, but here we're checking for a
+ // strict subset of qualifiers.
+ if (T1.getCVRQualifiers() == T2.getCVRQualifiers())
+ // The qualifiers are the same, so this doesn't tell us anything
+ // about how the sequences rank.
+ ;
+ else if (T2.isMoreQualifiedThan(T1)) {
+ // T1 has fewer qualifiers, so it could be the better sequence.
+ if (Result == ImplicitConversionSequence::Worse)
+ // Neither has qualifiers that are a subset of the other's
+ // qualifiers.
+ return ImplicitConversionSequence::Indistinguishable;
+
+ Result = ImplicitConversionSequence::Better;
+ } else if (T1.isMoreQualifiedThan(T2)) {
+ // T2 has fewer qualifiers, so it could be the better sequence.
+ if (Result == ImplicitConversionSequence::Better)
+ // Neither has qualifiers that are a subset of the other's
+ // qualifiers.
+ return ImplicitConversionSequence::Indistinguishable;
+
+ Result = ImplicitConversionSequence::Worse;
+ } else {
+ // Qualifiers are disjoint.
+ return ImplicitConversionSequence::Indistinguishable;
+ }
+
+ // If the types after this point are equivalent, we're done.
+ if (S.Context.hasSameUnqualifiedType(T1, T2))
+ break;
+ }
+
+ // Check that the winning standard conversion sequence isn't using
+ // the deprecated string literal array to pointer conversion.
+ switch (Result) {
+ case ImplicitConversionSequence::Better:
+ if (SCS1.DeprecatedStringLiteralToCharPtr)
+ Result = ImplicitConversionSequence::Indistinguishable;
+ break;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ break;
+
+ case ImplicitConversionSequence::Worse:
+ if (SCS2.DeprecatedStringLiteralToCharPtr)
+ Result = ImplicitConversionSequence::Indistinguishable;
+ break;
+ }
+
+ return Result;
+}
+
+/// CompareDerivedToBaseConversions - Compares two standard conversion
+/// sequences to determine whether they can be ranked based on their
+/// various kinds of derived-to-base conversions (C++
+/// [over.ics.rank]p4b3). As part of these checks, we also look at
+/// conversions between Objective-C interface types.
+ImplicitConversionSequence::CompareKind
+CompareDerivedToBaseConversions(Sema &S,
+ const StandardConversionSequence& SCS1,
+ const StandardConversionSequence& SCS2) {
+ QualType FromType1 = SCS1.getFromType();
+ QualType ToType1 = SCS1.getToType(1);
+ QualType FromType2 = SCS2.getFromType();
+ QualType ToType2 = SCS2.getToType(1);
+
+ // Adjust the types we're converting from via the array-to-pointer
+ // conversion, if we need to.
+ if (SCS1.First == ICK_Array_To_Pointer)
+ FromType1 = S.Context.getArrayDecayedType(FromType1);
+ if (SCS2.First == ICK_Array_To_Pointer)
+ FromType2 = S.Context.getArrayDecayedType(FromType2);
+
+ // Canonicalize all of the types.
+ FromType1 = S.Context.getCanonicalType(FromType1);
+ ToType1 = S.Context.getCanonicalType(ToType1);
+ FromType2 = S.Context.getCanonicalType(FromType2);
+ ToType2 = S.Context.getCanonicalType(ToType2);
+
+ // C++ [over.ics.rank]p4b3:
+ //
+ // If class B is derived directly or indirectly from class A and
+ // class C is derived directly or indirectly from B,
+ //
+ // Compare based on pointer conversions.
+ if (SCS1.Second == ICK_Pointer_Conversion &&
+ SCS2.Second == ICK_Pointer_Conversion &&
+ /*FIXME: Remove if Objective-C id conversions get their own rank*/
+ FromType1->isPointerType() && FromType2->isPointerType() &&
+ ToType1->isPointerType() && ToType2->isPointerType()) {
+ QualType FromPointee1
+ = FromType1->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
+ QualType ToPointee1
+ = ToType1->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
+ QualType FromPointee2
+ = FromType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
+ QualType ToPointee2
+ = ToType2->getAs<PointerType>()->getPointeeType().getUnqualifiedType();
+
+ // -- conversion of C* to B* is better than conversion of C* to A*,
+ if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
+ if (S.IsDerivedFrom(ToPointee1, ToPointee2))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(ToPointee2, ToPointee1))
+ return ImplicitConversionSequence::Worse;
+ }
+
+ // -- conversion of B* to A* is better than conversion of C* to A*,
+ if (FromPointee1 != FromPointee2 && ToPointee1 == ToPointee2) {
+ if (S.IsDerivedFrom(FromPointee2, FromPointee1))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(FromPointee1, FromPointee2))
+ return ImplicitConversionSequence::Worse;
+ }
+ } else if (SCS1.Second == ICK_Pointer_Conversion &&
+ SCS2.Second == ICK_Pointer_Conversion) {
+ const ObjCObjectPointerType *FromPtr1
+ = FromType1->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *FromPtr2
+ = FromType2->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *ToPtr1
+ = ToType1->getAs<ObjCObjectPointerType>();
+ const ObjCObjectPointerType *ToPtr2
+ = ToType2->getAs<ObjCObjectPointerType>();
+
+ if (FromPtr1 && FromPtr2 && ToPtr1 && ToPtr2) {
+ // Apply the same conversion ranking rules for Objective-C pointer types
+ // that we do for C++ pointers to class types. However, we employ the
+ // Objective-C pseudo-subtyping relationship used for assignment of
+ // Objective-C pointer types.
+ bool FromAssignLeft
+ = S.Context.canAssignObjCInterfaces(FromPtr1, FromPtr2);
+ bool FromAssignRight
+ = S.Context.canAssignObjCInterfaces(FromPtr2, FromPtr1);
+ bool ToAssignLeft
+ = S.Context.canAssignObjCInterfaces(ToPtr1, ToPtr2);
+ bool ToAssignRight
+ = S.Context.canAssignObjCInterfaces(ToPtr2, ToPtr1);
+
+ // A conversion to an a non-id object pointer type or qualified 'id'
+ // type is better than a conversion to 'id'.
+ if (ToPtr1->isObjCIdType() &&
+ (ToPtr2->isObjCQualifiedIdType() || ToPtr2->getInterfaceDecl()))
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCIdType() &&
+ (ToPtr1->isObjCQualifiedIdType() || ToPtr1->getInterfaceDecl()))
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to a non-id object pointer type is better than a
+ // conversion to a qualified 'id' type
+ if (ToPtr1->isObjCQualifiedIdType() && ToPtr2->getInterfaceDecl())
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCQualifiedIdType() && ToPtr1->getInterfaceDecl())
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to an a non-Class object pointer type or qualified 'Class'
+ // type is better than a conversion to 'Class'.
+ if (ToPtr1->isObjCClassType() &&
+ (ToPtr2->isObjCQualifiedClassType() || ToPtr2->getInterfaceDecl()))
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCClassType() &&
+ (ToPtr1->isObjCQualifiedClassType() || ToPtr1->getInterfaceDecl()))
+ return ImplicitConversionSequence::Better;
+
+ // A conversion to a non-Class object pointer type is better than a
+ // conversion to a qualified 'Class' type.
+ if (ToPtr1->isObjCQualifiedClassType() && ToPtr2->getInterfaceDecl())
+ return ImplicitConversionSequence::Worse;
+ if (ToPtr2->isObjCQualifiedClassType() && ToPtr1->getInterfaceDecl())
+ return ImplicitConversionSequence::Better;
+
+ // -- "conversion of C* to B* is better than conversion of C* to A*,"
+ if (S.Context.hasSameType(FromType1, FromType2) &&
+ !FromPtr1->isObjCIdType() && !FromPtr1->isObjCClassType() &&
+ (ToAssignLeft != ToAssignRight))
+ return ToAssignLeft? ImplicitConversionSequence::Worse
+ : ImplicitConversionSequence::Better;
+
+ // -- "conversion of B* to A* is better than conversion of C* to A*,"
+ if (S.Context.hasSameUnqualifiedType(ToType1, ToType2) &&
+ (FromAssignLeft != FromAssignRight))
+ return FromAssignLeft? ImplicitConversionSequence::Better
+ : ImplicitConversionSequence::Worse;
+ }
+ }
+
+ // Ranking of member-pointer types.
+ if (SCS1.Second == ICK_Pointer_Member && SCS2.Second == ICK_Pointer_Member &&
+ FromType1->isMemberPointerType() && FromType2->isMemberPointerType() &&
+ ToType1->isMemberPointerType() && ToType2->isMemberPointerType()) {
+ const MemberPointerType * FromMemPointer1 =
+ FromType1->getAs<MemberPointerType>();
+ const MemberPointerType * ToMemPointer1 =
+ ToType1->getAs<MemberPointerType>();
+ const MemberPointerType * FromMemPointer2 =
+ FromType2->getAs<MemberPointerType>();
+ const MemberPointerType * ToMemPointer2 =
+ ToType2->getAs<MemberPointerType>();
+ const Type *FromPointeeType1 = FromMemPointer1->getClass();
+ const Type *ToPointeeType1 = ToMemPointer1->getClass();
+ const Type *FromPointeeType2 = FromMemPointer2->getClass();
+ const Type *ToPointeeType2 = ToMemPointer2->getClass();
+ QualType FromPointee1 = QualType(FromPointeeType1, 0).getUnqualifiedType();
+ QualType ToPointee1 = QualType(ToPointeeType1, 0).getUnqualifiedType();
+ QualType FromPointee2 = QualType(FromPointeeType2, 0).getUnqualifiedType();
+ QualType ToPointee2 = QualType(ToPointeeType2, 0).getUnqualifiedType();
+ // conversion of A::* to B::* is better than conversion of A::* to C::*,
+ if (FromPointee1 == FromPointee2 && ToPointee1 != ToPointee2) {
+ if (S.IsDerivedFrom(ToPointee1, ToPointee2))
+ return ImplicitConversionSequence::Worse;
+ else if (S.IsDerivedFrom(ToPointee2, ToPointee1))
+ return ImplicitConversionSequence::Better;
+ }
+ // conversion of B::* to C::* is better than conversion of A::* to C::*
+ if (ToPointee1 == ToPointee2 && FromPointee1 != FromPointee2) {
+ if (S.IsDerivedFrom(FromPointee1, FromPointee2))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(FromPointee2, FromPointee1))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ if (SCS1.Second == ICK_Derived_To_Base) {
+ // -- conversion of C to B is better than conversion of C to A,
+ // -- binding of an expression of type C to a reference of type
+ // B& is better than binding an expression of type C to a
+ // reference of type A&,
+ if (S.Context.hasSameUnqualifiedType(FromType1, FromType2) &&
+ !S.Context.hasSameUnqualifiedType(ToType1, ToType2)) {
+ if (S.IsDerivedFrom(ToType1, ToType2))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(ToType2, ToType1))
+ return ImplicitConversionSequence::Worse;
+ }
+
+ // -- conversion of B to A is better than conversion of C to A.
+ // -- binding of an expression of type B to a reference of type
+ // A& is better than binding an expression of type C to a
+ // reference of type A&,
+ if (!S.Context.hasSameUnqualifiedType(FromType1, FromType2) &&
+ S.Context.hasSameUnqualifiedType(ToType1, ToType2)) {
+ if (S.IsDerivedFrom(FromType2, FromType1))
+ return ImplicitConversionSequence::Better;
+ else if (S.IsDerivedFrom(FromType1, FromType2))
+ return ImplicitConversionSequence::Worse;
+ }
+ }
+
+ return ImplicitConversionSequence::Indistinguishable;
+}
+
+/// CompareReferenceRelationship - Compare the two types T1 and T2 to
+/// determine whether they are reference-related,
+/// reference-compatible, reference-compatible with added
+/// qualification, or incompatible, for use in C++ initialization by
+/// reference (C++ [dcl.ref.init]p4). Neither type can be a reference
+/// type, and the first type (T1) is the pointee type of the reference
+/// type being initialized.
+Sema::ReferenceCompareResult
+Sema::CompareReferenceRelationship(SourceLocation Loc,
+ QualType OrigT1, QualType OrigT2,
+ bool &DerivedToBase,
+ bool &ObjCConversion,
+ bool &ObjCLifetimeConversion) {
+ assert(!OrigT1->isReferenceType() &&
+ "T1 must be the pointee type of the reference type");
+ assert(!OrigT2->isReferenceType() && "T2 cannot be a reference type");
+
+ QualType T1 = Context.getCanonicalType(OrigT1);
+ QualType T2 = Context.getCanonicalType(OrigT2);
+ Qualifiers T1Quals, T2Quals;
+ QualType UnqualT1 = Context.getUnqualifiedArrayType(T1, T1Quals);
+ QualType UnqualT2 = Context.getUnqualifiedArrayType(T2, T2Quals);
+
+ // C++ [dcl.init.ref]p4:
+ // Given types "cv1 T1" and "cv2 T2," "cv1 T1" is
+ // reference-related to "cv2 T2" if T1 is the same type as T2, or
+ // T1 is a base class of T2.
+ DerivedToBase = false;
+ ObjCConversion = false;
+ ObjCLifetimeConversion = false;
+ if (UnqualT1 == UnqualT2) {
+ // Nothing to do.
+ } else if (!RequireCompleteType(Loc, OrigT2, PDiag()) &&
+ IsDerivedFrom(UnqualT2, UnqualT1))
+ DerivedToBase = true;
+ else if (UnqualT1->isObjCObjectOrInterfaceType() &&
+ UnqualT2->isObjCObjectOrInterfaceType() &&
+ Context.canBindObjCObjectType(UnqualT1, UnqualT2))
+ ObjCConversion = true;
+ else
+ return Ref_Incompatible;
+
+ // At this point, we know that T1 and T2 are reference-related (at
+ // least).
+
+ // If the type is an array type, promote the element qualifiers to the type
+ // for comparison.
+ if (isa<ArrayType>(T1) && T1Quals)
+ T1 = Context.getQualifiedType(UnqualT1, T1Quals);
+ if (isa<ArrayType>(T2) && T2Quals)
+ T2 = Context.getQualifiedType(UnqualT2, T2Quals);
+
+ // C++ [dcl.init.ref]p4:
+ // "cv1 T1" is reference-compatible with "cv2 T2" if T1 is
+ // reference-related to T2 and cv1 is the same cv-qualification
+ // as, or greater cv-qualification than, cv2. For purposes of
+ // overload resolution, cases for which cv1 is greater
+ // cv-qualification than cv2 are identified as
+ // reference-compatible with added qualification (see 13.3.3.2).
+ //
+ // Note that we also require equivalence of Objective-C GC and address-space
+ // qualifiers when performing these computations, so that e.g., an int in
+ // address space 1 is not reference-compatible with an int in address
+ // space 2.
+ if (T1Quals.getObjCLifetime() != T2Quals.getObjCLifetime() &&
+ T1Quals.compatiblyIncludesObjCLifetime(T2Quals)) {
+ T1Quals.removeObjCLifetime();
+ T2Quals.removeObjCLifetime();
+ ObjCLifetimeConversion = true;
+ }
+
+ if (T1Quals == T2Quals)
+ return Ref_Compatible;
+ else if (T1Quals.compatiblyIncludes(T2Quals))
+ return Ref_Compatible_With_Added_Qualification;
+ else
+ return Ref_Related;
+}
+
+/// \brief Look for a user-defined conversion to an value reference-compatible
+/// with DeclType. Return true if something definite is found.
+static bool
+FindConversionForRefInit(Sema &S, ImplicitConversionSequence &ICS,
+ QualType DeclType, SourceLocation DeclLoc,
+ Expr *Init, QualType T2, bool AllowRvalues,
+ bool AllowExplicit) {
+ assert(T2->isRecordType() && "Can only find conversions of record types.");
+ CXXRecordDecl *T2RecordDecl
+ = dyn_cast<CXXRecordDecl>(T2->getAs<RecordType>()->getDecl());
+
+ OverloadCandidateSet CandidateSet(DeclLoc);
+ const UnresolvedSetImpl *Conversions
+ = T2RecordDecl->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ FunctionTemplateDecl *ConvTemplate
+ = dyn_cast<FunctionTemplateDecl>(D);
+ CXXConversionDecl *Conv;
+ if (ConvTemplate)
+ Conv = cast<CXXConversionDecl>(ConvTemplate->getTemplatedDecl());
+ else
+ Conv = cast<CXXConversionDecl>(D);
+
+ // If this is an explicit conversion, and we're not allowed to consider
+ // explicit conversions, skip it.
+ if (!AllowExplicit && Conv->isExplicit())
+ continue;
+
+ if (AllowRvalues) {
+ bool DerivedToBase = false;
+ bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
+
+ // If we are initializing an rvalue reference, don't permit conversion
+ // functions that return lvalues.
+ if (!ConvTemplate && DeclType->isRValueReferenceType()) {
+ const ReferenceType *RefType
+ = Conv->getConversionType()->getAs<LValueReferenceType>();
+ if (RefType && !RefType->getPointeeType()->isFunctionType())
+ continue;
+ }
+
+ if (!ConvTemplate &&
+ S.CompareReferenceRelationship(
+ DeclLoc,
+ Conv->getConversionType().getNonReferenceType()
+ .getUnqualifiedType(),
+ DeclType.getNonReferenceType().getUnqualifiedType(),
+ DerivedToBase, ObjCConversion, ObjCLifetimeConversion) ==
+ Sema::Ref_Incompatible)
+ continue;
+ } else {
+ // If the conversion function doesn't return a reference type,
+ // it can't be considered for this conversion. An rvalue reference
+ // is only acceptable if its referencee is a function type.
+
+ const ReferenceType *RefType =
+ Conv->getConversionType()->getAs<ReferenceType>();
+ if (!RefType ||
+ (!RefType->isLValueReferenceType() &&
+ !RefType->getPointeeType()->isFunctionType()))
+ continue;
+ }
+
+ if (ConvTemplate)
+ S.AddTemplateConversionCandidate(ConvTemplate, I.getPair(), ActingDC,
+ Init, DeclType, CandidateSet);
+ else
+ S.AddConversionCandidate(Conv, I.getPair(), ActingDC, Init,
+ DeclType, CandidateSet);
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(S, DeclLoc, Best, true)) {
+ case OR_Success:
+ // C++ [over.ics.ref]p1:
+ //
+ // [...] If the parameter binds directly to the result of
+ // applying a conversion function to the argument
+ // expression, the implicit conversion sequence is a
+ // user-defined conversion sequence (13.3.3.1.2), with the
+ // second standard conversion sequence either an identity
+ // conversion or, if the conversion function returns an
+ // entity of a type that is a derived class of the parameter
+ // type, a derived-to-base Conversion.
+ if (!Best->FinalConversion.DirectBinding)
+ return false;
+
+ if (Best->Function)
+ S.MarkFunctionReferenced(DeclLoc, Best->Function);
+ ICS.setUserDefined();
+ ICS.UserDefined.Before = Best->Conversions[0].Standard;
+ ICS.UserDefined.After = Best->FinalConversion;
+ ICS.UserDefined.HadMultipleCandidates = HadMultipleCandidates;
+ ICS.UserDefined.ConversionFunction = Best->Function;
+ ICS.UserDefined.FoundConversionFunction = Best->FoundDecl;
+ ICS.UserDefined.EllipsisConversion = false;
+ assert(ICS.UserDefined.After.ReferenceBinding &&
+ ICS.UserDefined.After.DirectBinding &&
+ "Expected a direct reference binding!");
+ return true;
+
+ case OR_Ambiguous:
+ ICS.setAmbiguous();
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin();
+ Cand != CandidateSet.end(); ++Cand)
+ if (Cand->Viable)
+ ICS.Ambiguous.addConversion(Cand->Function);
+ return true;
+
+ case OR_No_Viable_Function:
+ case OR_Deleted:
+ // There was no suitable conversion, or we found a deleted
+ // conversion; continue with other checks.
+ return false;
+ }
+
+ llvm_unreachable("Invalid OverloadResult!");
+}
+
+/// \brief Compute an implicit conversion sequence for reference
+/// initialization.
+static ImplicitConversionSequence
+TryReferenceInit(Sema &S, Expr *Init, QualType DeclType,
+ SourceLocation DeclLoc,
+ bool SuppressUserConversions,
+ bool AllowExplicit) {
+ assert(DeclType->isReferenceType() && "Reference init needs a reference");
+
+ // Most paths end in a failed conversion.
+ ImplicitConversionSequence ICS;
+ ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType);
+
+ QualType T1 = DeclType->getAs<ReferenceType>()->getPointeeType();
+ QualType T2 = Init->getType();
+
+ // If the initializer is the address of an overloaded function, try
+ // to resolve the overloaded function. If all goes well, T2 is the
+ // type of the resulting function.
+ if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(Init, DeclType,
+ false, Found))
+ T2 = Fn->getType();
+ }
+
+ // Compute some basic properties of the types and the initializer.
+ bool isRValRef = DeclType->isRValueReferenceType();
+ bool DerivedToBase = false;
+ bool ObjCConversion = false;
+ bool ObjCLifetimeConversion = false;
+ Expr::Classification InitCategory = Init->Classify(S.Context);
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(DeclLoc, T1, T2, DerivedToBase,
+ ObjCConversion, ObjCLifetimeConversion);
+
+
+ // C++0x [dcl.init.ref]p5:
+ // A reference to type "cv1 T1" is initialized by an expression
+ // of type "cv2 T2" as follows:
+
+ // -- If reference is an lvalue reference and the initializer expression
+ if (!isRValRef) {
+ // -- is an lvalue (but is not a bit-field), and "cv1 T1" is
+ // reference-compatible with "cv2 T2," or
+ //
+ // Per C++ [over.ics.ref]p4, we don't check the bit-field property here.
+ if (InitCategory.isLValue() &&
+ RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification) {
+ // C++ [over.ics.ref]p1:
+ // When a parameter of reference type binds directly (8.5.3)
+ // to an argument expression, the implicit conversion sequence
+ // is the identity conversion, unless the argument expression
+ // has a type that is a derived class of the parameter type,
+ // in which case the implicit conversion sequence is a
+ // derived-to-base Conversion (13.3.3.1).
+ ICS.setStandard();
+ ICS.Standard.First = ICK_Identity;
+ ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
+ : ObjCConversion? ICK_Compatible_Conversion
+ : ICK_Identity;
+ ICS.Standard.Third = ICK_Identity;
+ ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS.Standard.setToType(0, T2);
+ ICS.Standard.setToType(1, T1);
+ ICS.Standard.setToType(2, T1);
+ ICS.Standard.ReferenceBinding = true;
+ ICS.Standard.DirectBinding = true;
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = false;
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
+ ICS.Standard.CopyConstructor = 0;
+
+ // Nothing more to do: the inaccessibility/ambiguity check for
+ // derived-to-base conversions is suppressed when we're
+ // computing the implicit conversion sequence (C++
+ // [over.best.ics]p2).
+ return ICS;
+ }
+
+ // -- has a class type (i.e., T2 is a class type), where T1 is
+ // not reference-related to T2, and can be implicitly
+ // converted to an lvalue of type "cv3 T3," where "cv1 T1"
+ // is reference-compatible with "cv3 T3" 92) (this
+ // conversion is selected by enumerating the applicable
+ // conversion functions (13.3.1.6) and choosing the best
+ // one through overload resolution (13.3)),
+ if (!SuppressUserConversions && T2->isRecordType() &&
+ !S.RequireCompleteType(DeclLoc, T2, 0) &&
+ RefRelationship == Sema::Ref_Incompatible) {
+ if (FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
+ Init, T2, /*AllowRvalues=*/false,
+ AllowExplicit))
+ return ICS;
+ }
+ }
+
+ // -- Otherwise, the reference shall be an lvalue reference to a
+ // non-volatile const type (i.e., cv1 shall be const), or the reference
+ // shall be an rvalue reference.
+ //
+ // We actually handle one oddity of C++ [over.ics.ref] at this
+ // point, which is that, due to p2 (which short-circuits reference
+ // binding by only attempting a simple conversion for non-direct
+ // bindings) and p3's strange wording, we allow a const volatile
+ // reference to bind to an rvalue. Hence the check for the presence
+ // of "const" rather than checking for "const" being the only
+ // qualifier.
+ // This is also the point where rvalue references and lvalue inits no longer
+ // go together.
+ if (!isRValRef && !T1.isConstQualified())
+ return ICS;
+
+ // -- If the initializer expression
+ //
+ // -- is an xvalue, class prvalue, array prvalue or function
+ // lvalue and "cv1 T1" is reference-compatible with "cv2 T2", or
+ if (RefRelationship >= Sema::Ref_Compatible_With_Added_Qualification &&
+ (InitCategory.isXValue() ||
+ (InitCategory.isPRValue() && (T2->isRecordType() || T2->isArrayType())) ||
+ (InitCategory.isLValue() && T2->isFunctionType()))) {
+ ICS.setStandard();
+ ICS.Standard.First = ICK_Identity;
+ ICS.Standard.Second = DerivedToBase? ICK_Derived_To_Base
+ : ObjCConversion? ICK_Compatible_Conversion
+ : ICK_Identity;
+ ICS.Standard.Third = ICK_Identity;
+ ICS.Standard.FromTypePtr = T2.getAsOpaquePtr();
+ ICS.Standard.setToType(0, T2);
+ ICS.Standard.setToType(1, T1);
+ ICS.Standard.setToType(2, T1);
+ ICS.Standard.ReferenceBinding = true;
+ // In C++0x, this is always a direct binding. In C++98/03, it's a direct
+ // binding unless we're binding to a class prvalue.
+ // Note: Although xvalues wouldn't normally show up in C++98/03 code, we
+ // allow the use of rvalue references in C++98/03 for the benefit of
+ // standard library implementors; therefore, we need the xvalue check here.
+ ICS.Standard.DirectBinding =
+ S.getLangOpts().CPlusPlus0x ||
+ (InitCategory.isPRValue() && !T2->isRecordType());
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = InitCategory.isRValue();
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = ObjCLifetimeConversion;
+ ICS.Standard.CopyConstructor = 0;
+ return ICS;
+ }
+
+ // -- has a class type (i.e., T2 is a class type), where T1 is not
+ // reference-related to T2, and can be implicitly converted to
+ // an xvalue, class prvalue, or function lvalue of type
+ // "cv3 T3", where "cv1 T1" is reference-compatible with
+ // "cv3 T3",
+ //
+ // then the reference is bound to the value of the initializer
+ // expression in the first case and to the result of the conversion
+ // in the second case (or, in either case, to an appropriate base
+ // class subobject).
+ if (!SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible &&
+ T2->isRecordType() && !S.RequireCompleteType(DeclLoc, T2, 0) &&
+ FindConversionForRefInit(S, ICS, DeclType, DeclLoc,
+ Init, T2, /*AllowRvalues=*/true,
+ AllowExplicit)) {
+ // In the second case, if the reference is an rvalue reference
+ // and the second standard conversion sequence of the
+ // user-defined conversion sequence includes an lvalue-to-rvalue
+ // conversion, the program is ill-formed.
+ if (ICS.isUserDefined() && isRValRef &&
+ ICS.UserDefined.After.First == ICK_Lvalue_To_Rvalue)
+ ICS.setBad(BadConversionSequence::no_conversion, Init, DeclType);
+
+ return ICS;
+ }
+
+ // -- Otherwise, a temporary of type "cv1 T1" is created and
+ // initialized from the initializer expression using the
+ // rules for a non-reference copy initialization (8.5). The
+ // reference is then bound to the temporary. If T1 is
+ // reference-related to T2, cv1 must be the same
+ // cv-qualification as, or greater cv-qualification than,
+ // cv2; otherwise, the program is ill-formed.
+ if (RefRelationship == Sema::Ref_Related) {
+ // If cv1 == cv2 or cv1 is a greater cv-qualified than cv2, then
+ // we would be reference-compatible or reference-compatible with
+ // added qualification. But that wasn't the case, so the reference
+ // initialization fails.
+ //
+ // Note that we only want to check address spaces and cvr-qualifiers here.
+ // ObjC GC and lifetime qualifiers aren't important.
+ Qualifiers T1Quals = T1.getQualifiers();
+ Qualifiers T2Quals = T2.getQualifiers();
+ T1Quals.removeObjCGCAttr();
+ T1Quals.removeObjCLifetime();
+ T2Quals.removeObjCGCAttr();
+ T2Quals.removeObjCLifetime();
+ if (!T1Quals.compatiblyIncludes(T2Quals))
+ return ICS;
+ }
+
+ // If at least one of the types is a class type, the types are not
+ // related, and we aren't allowed any user conversions, the
+ // reference binding fails. This case is important for breaking
+ // recursion, since TryImplicitConversion below will attempt to
+ // create a temporary through the use of a copy constructor.
+ if (SuppressUserConversions && RefRelationship == Sema::Ref_Incompatible &&
+ (T1->isRecordType() || T2->isRecordType()))
+ return ICS;
+
+ // If T1 is reference-related to T2 and the reference is an rvalue
+ // reference, the initializer expression shall not be an lvalue.
+ if (RefRelationship >= Sema::Ref_Related &&
+ isRValRef && Init->Classify(S.Context).isLValue())
+ return ICS;
+
+ // C++ [over.ics.ref]p2:
+ // When a parameter of reference type is not bound directly to
+ // an argument expression, the conversion sequence is the one
+ // required to convert the argument expression to the
+ // underlying type of the reference according to
+ // 13.3.3.1. Conceptually, this conversion sequence corresponds
+ // to copy-initializing a temporary of the underlying type with
+ // the argument expression. Any difference in top-level
+ // cv-qualification is subsumed by the initialization itself
+ // and does not constitute a conversion.
+ ICS = TryImplicitConversion(S, Init, T1, SuppressUserConversions,
+ /*AllowExplicit=*/false,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+
+ // Of course, that's still a reference binding.
+ if (ICS.isStandard()) {
+ ICS.Standard.ReferenceBinding = true;
+ ICS.Standard.IsLvalueReference = !isRValRef;
+ ICS.Standard.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.Standard.BindsToRvalue = true;
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.Standard.ObjCLifetimeConversionBinding = false;
+ } else if (ICS.isUserDefined()) {
+ // Don't allow rvalue references to bind to lvalues.
+ if (DeclType->isRValueReferenceType()) {
+ if (const ReferenceType *RefType
+ = ICS.UserDefined.ConversionFunction->getResultType()
+ ->getAs<LValueReferenceType>()) {
+ if (!RefType->getPointeeType()->isFunctionType()) {
+ ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, Init,
+ DeclType);
+ return ICS;
+ }
+ }
+ }
+
+ ICS.UserDefined.After.ReferenceBinding = true;
+ ICS.UserDefined.After.IsLvalueReference = !isRValRef;
+ ICS.UserDefined.After.BindsToFunctionLvalue = T2->isFunctionType();
+ ICS.UserDefined.After.BindsToRvalue = true;
+ ICS.UserDefined.After.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ ICS.UserDefined.After.ObjCLifetimeConversionBinding = false;
+ }
+
+ return ICS;
+}
+
+static ImplicitConversionSequence
+TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion,
+ bool AllowExplicit = false);
+
+/// TryListConversion - Try to copy-initialize a value of type ToType from the
+/// initializer list From.
+static ImplicitConversionSequence
+TryListConversion(Sema &S, InitListExpr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion) {
+ // C++11 [over.ics.list]p1:
+ // When an argument is an initializer list, it is not an expression and
+ // special rules apply for converting it to a parameter type.
+
+ ImplicitConversionSequence Result;
+ Result.setBad(BadConversionSequence::no_conversion, From, ToType);
+ Result.setListInitializationSequence();
+
+ // We need a complete type for what follows. Incomplete types can never be
+ // initialized from init lists.
+ if (S.RequireCompleteType(From->getLocStart(), ToType, S.PDiag()))
+ return Result;
+
+ // C++11 [over.ics.list]p2:
+ // If the parameter type is std::initializer_list<X> or "array of X" and
+ // all the elements can be implicitly converted to X, the implicit
+ // conversion sequence is the worst conversion necessary to convert an
+ // element of the list to X.
+ bool toStdInitializerList = false;
+ QualType X;
+ if (ToType->isArrayType())
+ X = S.Context.getBaseElementType(ToType);
+ else
+ toStdInitializerList = S.isStdInitializerList(ToType, &X);
+ if (!X.isNull()) {
+ for (unsigned i = 0, e = From->getNumInits(); i < e; ++i) {
+ Expr *Init = From->getInit(i);
+ ImplicitConversionSequence ICS =
+ TryCopyInitialization(S, Init, X, SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ // If a single element isn't convertible, fail.
+ if (ICS.isBad()) {
+ Result = ICS;
+ break;
+ }
+ // Otherwise, look for the worst conversion.
+ if (Result.isBad() ||
+ CompareImplicitConversionSequences(S, ICS, Result) ==
+ ImplicitConversionSequence::Worse)
+ Result = ICS;
+ }
+
+ // For an empty list, we won't have computed any conversion sequence.
+ // Introduce the identity conversion sequence.
+ if (From->getNumInits() == 0) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ }
+
+ Result.setListInitializationSequence();
+ Result.setStdInitializerListElement(toStdInitializerList);
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p3:
+ // Otherwise, if the parameter is a non-aggregate class X and overload
+ // resolution chooses a single best constructor [...] the implicit
+ // conversion sequence is a user-defined conversion sequence. If multiple
+ // constructors are viable but none is better than the others, the
+ // implicit conversion sequence is a user-defined conversion sequence.
+ if (ToType->isRecordType() && !ToType->isAggregateType()) {
+ // This function can deal with initializer lists.
+ Result = TryUserDefinedConversion(S, From, ToType, SuppressUserConversions,
+ /*AllowExplicit=*/false,
+ InOverloadResolution, /*CStyle=*/false,
+ AllowObjCWritebackConversion);
+ Result.setListInitializationSequence();
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p4:
+ // Otherwise, if the parameter has an aggregate type which can be
+ // initialized from the initializer list [...] the implicit conversion
+ // sequence is a user-defined conversion sequence.
+ if (ToType->isAggregateType()) {
+ // Type is an aggregate, argument is an init list. At this point it comes
+ // down to checking whether the initialization works.
+ // FIXME: Find out whether this parameter is consumed or not.
+ InitializedEntity Entity =
+ InitializedEntity::InitializeParameter(S.Context, ToType,
+ /*Consumed=*/false);
+ if (S.CanPerformCopyInitialization(Entity, S.Owned(From))) {
+ Result.setUserDefined();
+ Result.UserDefined.Before.setAsIdentityConversion();
+ // Initializer lists don't have a type.
+ Result.UserDefined.Before.setFromType(QualType());
+ Result.UserDefined.Before.setAllToTypes(QualType());
+
+ Result.UserDefined.After.setAsIdentityConversion();
+ Result.UserDefined.After.setFromType(ToType);
+ Result.UserDefined.After.setAllToTypes(ToType);
+ Result.UserDefined.ConversionFunction = 0;
+ }
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p5:
+ // Otherwise, if the parameter is a reference, see 13.3.3.1.4.
+ if (ToType->isReferenceType()) {
+ // The standard is notoriously unclear here, since 13.3.3.1.4 doesn't
+ // mention initializer lists in any way. So we go by what list-
+ // initialization would do and try to extrapolate from that.
+
+ QualType T1 = ToType->getAs<ReferenceType>()->getPointeeType();
+
+ // If the initializer list has a single element that is reference-related
+ // to the parameter type, we initialize the reference from that.
+ if (From->getNumInits() == 1) {
+ Expr *Init = From->getInit(0);
+
+ QualType T2 = Init->getType();
+
+ // If the initializer is the address of an overloaded function, try
+ // to resolve the overloaded function. If all goes well, T2 is the
+ // type of the resulting function.
+ if (S.Context.getCanonicalType(T2) == S.Context.OverloadTy) {
+ DeclAccessPair Found;
+ if (FunctionDecl *Fn = S.ResolveAddressOfOverloadedFunction(
+ Init, ToType, false, Found))
+ T2 = Fn->getType();
+ }
+
+ // Compute some basic properties of the types and the initializer.
+ bool dummy1 = false;
+ bool dummy2 = false;
+ bool dummy3 = false;
+ Sema::ReferenceCompareResult RefRelationship
+ = S.CompareReferenceRelationship(From->getLocStart(), T1, T2, dummy1,
+ dummy2, dummy3);
+
+ if (RefRelationship >= Sema::Ref_Related)
+ return TryReferenceInit(S, Init, ToType,
+ /*FIXME:*/From->getLocStart(),
+ SuppressUserConversions,
+ /*AllowExplicit=*/false);
+ }
+
+ // Otherwise, we bind the reference to a temporary created from the
+ // initializer list.
+ Result = TryListConversion(S, From, T1, SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ if (Result.isFailure())
+ return Result;
+ assert(!Result.isEllipsis() &&
+ "Sub-initialization cannot result in ellipsis conversion.");
+
+ // Can we even bind to a temporary?
+ if (ToType->isRValueReferenceType() ||
+ (T1.isConstQualified() && !T1.isVolatileQualified())) {
+ StandardConversionSequence &SCS = Result.isStandard() ? Result.Standard :
+ Result.UserDefined.After;
+ SCS.ReferenceBinding = true;
+ SCS.IsLvalueReference = ToType->isLValueReferenceType();
+ SCS.BindsToRvalue = true;
+ SCS.BindsToFunctionLvalue = false;
+ SCS.BindsImplicitObjectArgumentWithoutRefQualifier = false;
+ SCS.ObjCLifetimeConversionBinding = false;
+ } else
+ Result.setBad(BadConversionSequence::lvalue_ref_to_rvalue,
+ From, ToType);
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p6:
+ // Otherwise, if the parameter type is not a class:
+ if (!ToType->isRecordType()) {
+ // - if the initializer list has one element, the implicit conversion
+ // sequence is the one required to convert the element to the
+ // parameter type.
+ unsigned NumInits = From->getNumInits();
+ if (NumInits == 1)
+ Result = TryCopyInitialization(S, From->getInit(0), ToType,
+ SuppressUserConversions,
+ InOverloadResolution,
+ AllowObjCWritebackConversion);
+ // - if the initializer list has no elements, the implicit conversion
+ // sequence is the identity conversion.
+ else if (NumInits == 0) {
+ Result.setStandard();
+ Result.Standard.setAsIdentityConversion();
+ Result.Standard.setFromType(ToType);
+ Result.Standard.setAllToTypes(ToType);
+ }
+ Result.setListInitializationSequence();
+ return Result;
+ }
+
+ // C++11 [over.ics.list]p7:
+ // In all cases other than those enumerated above, no conversion is possible
+ return Result;
+}
+
+/// TryCopyInitialization - Try to copy-initialize a value of type
+/// ToType from the expression From. Return the implicit conversion
+/// sequence required to pass this argument, which may be a bad
+/// conversion sequence (meaning that the argument cannot be passed to
+/// a parameter of this type). If @p SuppressUserConversions, then we
+/// do not permit any user-defined conversion sequences.
+static ImplicitConversionSequence
+TryCopyInitialization(Sema &S, Expr *From, QualType ToType,
+ bool SuppressUserConversions,
+ bool InOverloadResolution,
+ bool AllowObjCWritebackConversion,
+ bool AllowExplicit) {
+ if (InitListExpr *FromInitList = dyn_cast<InitListExpr>(From))
+ return TryListConversion(S, FromInitList, ToType, SuppressUserConversions,
+ InOverloadResolution,AllowObjCWritebackConversion);
+
+ if (ToType->isReferenceType())
+ return TryReferenceInit(S, From, ToType,
+ /*FIXME:*/From->getLocStart(),
+ SuppressUserConversions,
+ AllowExplicit);
+
+ return TryImplicitConversion(S, From, ToType,
+ SuppressUserConversions,
+ /*AllowExplicit=*/false,
+ InOverloadResolution,
+ /*CStyle=*/false,
+ AllowObjCWritebackConversion);
+}
+
+static bool TryCopyInitialization(const CanQualType FromQTy,
+ const CanQualType ToQTy,
+ Sema &S,
+ SourceLocation Loc,
+ ExprValueKind FromVK) {
+ OpaqueValueExpr TmpExpr(Loc, FromQTy, FromVK);
+ ImplicitConversionSequence ICS =
+ TryCopyInitialization(S, &TmpExpr, ToQTy, true, true, false);
+
+ return !ICS.isBad();
+}
+
+/// TryObjectArgumentInitialization - Try to initialize the object
+/// parameter of the given member function (@c Method) from the
+/// expression @p From.
+static ImplicitConversionSequence
+TryObjectArgumentInitialization(Sema &S, QualType OrigFromType,
+ Expr::Classification FromClassification,
+ CXXMethodDecl *Method,
+ CXXRecordDecl *ActingContext) {
+ QualType ClassType = S.Context.getTypeDeclType(ActingContext);
+ // [class.dtor]p2: A destructor can be invoked for a const, volatile or
+ // const volatile object.
+ unsigned Quals = isa<CXXDestructorDecl>(Method) ?
+ Qualifiers::Const | Qualifiers::Volatile : Method->getTypeQualifiers();
+ QualType ImplicitParamType = S.Context.getCVRQualifiedType(ClassType, Quals);
+
+ // Set up the conversion sequence as a "bad" conversion, to allow us
+ // to exit early.
+ ImplicitConversionSequence ICS;
+
+ // We need to have an object of class type.
+ QualType FromType = OrigFromType;
+ if (const PointerType *PT = FromType->getAs<PointerType>()) {
+ FromType = PT->getPointeeType();
+
+ // When we had a pointer, it's implicitly dereferenced, so we
+ // better have an lvalue.
+ assert(FromClassification.isLValue());
+ }
+
+ assert(FromType->isRecordType());
+
+ // C++0x [over.match.funcs]p4:
+ // For non-static member functions, the type of the implicit object
+ // parameter is
+ //
+ // - "lvalue reference to cv X" for functions declared without a
+ // ref-qualifier or with the & ref-qualifier
+ // - "rvalue reference to cv X" for functions declared with the &&
+ // ref-qualifier
+ //
+ // where X is the class of which the function is a member and cv is the
+ // cv-qualification on the member function declaration.
+ //
+ // However, when finding an implicit conversion sequence for the argument, we
+ // are not allowed to create temporaries or perform user-defined conversions
+ // (C++ [over.match.funcs]p5). We perform a simplified version of
+ // reference binding here, that allows class rvalues to bind to
+ // non-constant references.
+
+ // First check the qualifiers.
+ QualType FromTypeCanon = S.Context.getCanonicalType(FromType);
+ if (ImplicitParamType.getCVRQualifiers()
+ != FromTypeCanon.getLocalCVRQualifiers() &&
+ !ImplicitParamType.isAtLeastAsQualifiedAs(FromTypeCanon)) {
+ ICS.setBad(BadConversionSequence::bad_qualifiers,
+ OrigFromType, ImplicitParamType);
+ return ICS;
+ }
+
+ // Check that we have either the same type or a derived type. It
+ // affects the conversion rank.
+ QualType ClassTypeCanon = S.Context.getCanonicalType(ClassType);
+ ImplicitConversionKind SecondKind;
+ if (ClassTypeCanon == FromTypeCanon.getLocalUnqualifiedType()) {
+ SecondKind = ICK_Identity;
+ } else if (S.IsDerivedFrom(FromType, ClassType))
+ SecondKind = ICK_Derived_To_Base;
+ else {
+ ICS.setBad(BadConversionSequence::unrelated_class,
+ FromType, ImplicitParamType);
+ return ICS;
+ }
+
+ // Check the ref-qualifier.
+ switch (Method->getRefQualifier()) {
+ case RQ_None:
+ // Do nothing; we don't care about lvalueness or rvalueness.
+ break;
+
+ case RQ_LValue:
+ if (!FromClassification.isLValue() && Quals != Qualifiers::Const) {
+ // non-const lvalue reference cannot bind to an rvalue
+ ICS.setBad(BadConversionSequence::lvalue_ref_to_rvalue, FromType,
+ ImplicitParamType);
+ return ICS;
+ }
+ break;
+
+ case RQ_RValue:
+ if (!FromClassification.isRValue()) {
+ // rvalue reference cannot bind to an lvalue
+ ICS.setBad(BadConversionSequence::rvalue_ref_to_lvalue, FromType,
+ ImplicitParamType);
+ return ICS;
+ }
+ break;
+ }
+
+ // Success. Mark this as a reference binding.
+ ICS.setStandard();
+ ICS.Standard.setAsIdentityConversion();
+ ICS.Standard.Second = SecondKind;
+ ICS.Standard.setFromType(FromType);
+ ICS.Standard.setAllToTypes(ImplicitParamType);
+ ICS.Standard.ReferenceBinding = true;
+ ICS.Standard.DirectBinding = true;
+ ICS.Standard.IsLvalueReference = Method->getRefQualifier() != RQ_RValue;
+ ICS.Standard.BindsToFunctionLvalue = false;
+ ICS.Standard.BindsToRvalue = FromClassification.isRValue();
+ ICS.Standard.BindsImplicitObjectArgumentWithoutRefQualifier
+ = (Method->getRefQualifier() == RQ_None);
+ return ICS;
+}
+
+/// PerformObjectArgumentInitialization - Perform initialization of
+/// the implicit object parameter for the given Method with the given
+/// expression.
+ExprResult
+Sema::PerformObjectArgumentInitialization(Expr *From,
+ NestedNameSpecifier *Qualifier,
+ NamedDecl *FoundDecl,
+ CXXMethodDecl *Method) {
+ QualType FromRecordType, DestType;
+ QualType ImplicitParamRecordType =
+ Method->getThisType(Context)->getAs<PointerType>()->getPointeeType();
+
+ Expr::Classification FromClassification;
+ if (const PointerType *PT = From->getType()->getAs<PointerType>()) {
+ FromRecordType = PT->getPointeeType();
+ DestType = Method->getThisType(Context);
+ FromClassification = Expr::Classification::makeSimpleLValue();
+ } else {
+ FromRecordType = From->getType();
+ DestType = ImplicitParamRecordType;
+ FromClassification = From->Classify(Context);
+ }
+
+ // Note that we always use the true parent context when performing
+ // the actual argument initialization.
+ ImplicitConversionSequence ICS
+ = TryObjectArgumentInitialization(*this, From->getType(), FromClassification,
+ Method, Method->getParent());
+ if (ICS.isBad()) {
+ if (ICS.Bad.Kind == BadConversionSequence::bad_qualifiers) {
+ Qualifiers FromQs = FromRecordType.getQualifiers();
+ Qualifiers ToQs = DestType.getQualifiers();
+ unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
+ if (CVR) {
+ Diag(From->getLocStart(),
+ diag::err_member_function_call_bad_cvr)
+ << Method->getDeclName() << FromRecordType << (CVR - 1)
+ << From->getSourceRange();
+ Diag(Method->getLocation(), diag::note_previous_decl)
+ << Method->getDeclName();
+ return ExprError();
+ }
+ }
+
+ return Diag(From->getLocStart(),
+ diag::err_implicit_object_parameter_init)
+ << ImplicitParamRecordType << FromRecordType << From->getSourceRange();
+ }
+
+ if (ICS.Standard.Second == ICK_Derived_To_Base) {
+ ExprResult FromRes =
+ PerformObjectMemberConversion(From, Qualifier, FoundDecl, Method);
+ if (FromRes.isInvalid())
+ return ExprError();
+ From = FromRes.take();
+ }
+
+ if (!Context.hasSameType(From->getType(), DestType))
+ From = ImpCastExprToType(From, DestType, CK_NoOp,
+ From->getValueKind()).take();
+ return Owned(From);
+}
+
+/// TryContextuallyConvertToBool - Attempt to contextually convert the
+/// expression From to bool (C++0x [conv]p3).
+static ImplicitConversionSequence
+TryContextuallyConvertToBool(Sema &S, Expr *From) {
+ // FIXME: This is pretty broken.
+ return TryImplicitConversion(S, From, S.Context.BoolTy,
+ // FIXME: Are these flags correct?
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/true,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+}
+
+/// PerformContextuallyConvertToBool - Perform a contextual conversion
+/// of the expression From to bool (C++0x [conv]p3).
+ExprResult Sema::PerformContextuallyConvertToBool(Expr *From) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
+ ImplicitConversionSequence ICS = TryContextuallyConvertToBool(*this, From);
+ if (!ICS.isBad())
+ return PerformImplicitConversion(From, Context.BoolTy, ICS, AA_Converting);
+
+ if (!DiagnoseMultipleUserDefinedConversion(From, Context.BoolTy))
+ return Diag(From->getLocStart(),
+ diag::err_typecheck_bool_condition)
+ << From->getType() << From->getSourceRange();
+ return ExprError();
+}
+
+/// Check that the specified conversion is permitted in a converted constant
+/// expression, according to C++11 [expr.const]p3. Return true if the conversion
+/// is acceptable.
+static bool CheckConvertedConstantConversions(Sema &S,
+ StandardConversionSequence &SCS) {
+ // Since we know that the target type is an integral or unscoped enumeration
+ // type, most conversion kinds are impossible. All possible First and Third
+ // conversions are fine.
+ switch (SCS.Second) {
+ case ICK_Identity:
+ case ICK_Integral_Promotion:
+ case ICK_Integral_Conversion:
+ return true;
+
+ case ICK_Boolean_Conversion:
+ // Conversion from an integral or unscoped enumeration type to bool is
+ // classified as ICK_Boolean_Conversion, but it's also an integral
+ // conversion, so it's permitted in a converted constant expression.
+ return SCS.getFromType()->isIntegralOrUnscopedEnumerationType() &&
+ SCS.getToType(2)->isBooleanType();
+
+ case ICK_Floating_Integral:
+ case ICK_Complex_Real:
+ return false;
+
+ case ICK_Lvalue_To_Rvalue:
+ case ICK_Array_To_Pointer:
+ case ICK_Function_To_Pointer:
+ case ICK_NoReturn_Adjustment:
+ case ICK_Qualification:
+ case ICK_Compatible_Conversion:
+ case ICK_Vector_Conversion:
+ case ICK_Vector_Splat:
+ case ICK_Derived_To_Base:
+ case ICK_Pointer_Conversion:
+ case ICK_Pointer_Member:
+ case ICK_Block_Pointer_Conversion:
+ case ICK_Writeback_Conversion:
+ case ICK_Floating_Promotion:
+ case ICK_Complex_Promotion:
+ case ICK_Complex_Conversion:
+ case ICK_Floating_Conversion:
+ case ICK_TransparentUnionConversion:
+ llvm_unreachable("unexpected second conversion kind");
+
+ case ICK_Num_Conversion_Kinds:
+ break;
+ }
+
+ llvm_unreachable("unknown conversion kind");
+}
+
+/// CheckConvertedConstantExpression - Check that the expression From is a
+/// converted constant expression of type T, perform the conversion and produce
+/// the converted expression, per C++11 [expr.const]p3.
+ExprResult Sema::CheckConvertedConstantExpression(Expr *From, QualType T,
+ llvm::APSInt &Value,
+ CCEKind CCE) {
+ assert(LangOpts.CPlusPlus0x && "converted constant expression outside C++11");
+ assert(T->isIntegralOrEnumerationType() && "unexpected converted const type");
+
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
+ // C++11 [expr.const]p3 with proposed wording fixes:
+ // A converted constant expression of type T is a core constant expression,
+ // implicitly converted to a prvalue of type T, where the converted
+ // expression is a literal constant expression and the implicit conversion
+ // sequence contains only user-defined conversions, lvalue-to-rvalue
+ // conversions, integral promotions, and integral conversions other than
+ // narrowing conversions.
+ ImplicitConversionSequence ICS =
+ TryImplicitConversion(From, T,
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/false,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjcWritebackConversion=*/false);
+ StandardConversionSequence *SCS = 0;
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion:
+ if (!CheckConvertedConstantConversions(*this, ICS.Standard))
+ return Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression_disallowed)
+ << From->getType() << From->getSourceRange() << T;
+ SCS = &ICS.Standard;
+ break;
+ case ImplicitConversionSequence::UserDefinedConversion:
+ // We are converting from class type to an integral or enumeration type, so
+ // the Before sequence must be trivial.
+ if (!CheckConvertedConstantConversions(*this, ICS.UserDefined.After))
+ return Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression_disallowed)
+ << From->getType() << From->getSourceRange() << T;
+ SCS = &ICS.UserDefined.After;
+ break;
+ case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::BadConversion:
+ if (!DiagnoseMultipleUserDefinedConversion(From, T))
+ return Diag(From->getLocStart(),
+ diag::err_typecheck_converted_constant_expression)
+ << From->getType() << From->getSourceRange() << T;
+ return ExprError();
+
+ case ImplicitConversionSequence::EllipsisConversion:
+ llvm_unreachable("ellipsis conversion in converted constant expression");
+ }
+
+ ExprResult Result = PerformImplicitConversion(From, T, ICS, AA_Converting);
+ if (Result.isInvalid())
+ return Result;
+
+ // Check for a narrowing implicit conversion.
+ APValue PreNarrowingValue;
+ QualType PreNarrowingType;
+ switch (SCS->getNarrowingKind(Context, Result.get(), PreNarrowingValue,
+ PreNarrowingType)) {
+ case NK_Variable_Narrowing:
+ // Implicit conversion to a narrower type, and the value is not a constant
+ // expression. We'll diagnose this in a moment.
+ case NK_Not_Narrowing:
+ break;
+
+ case NK_Constant_Narrowing:
+ Diag(From->getLocStart(),
+ isSFINAEContext() ? diag::err_cce_narrowing_sfinae :
+ diag::err_cce_narrowing)
+ << CCE << /*Constant*/1
+ << PreNarrowingValue.getAsString(Context, PreNarrowingType) << T;
+ break;
+
+ case NK_Type_Narrowing:
+ Diag(From->getLocStart(),
+ isSFINAEContext() ? diag::err_cce_narrowing_sfinae :
+ diag::err_cce_narrowing)
+ << CCE << /*Constant*/0 << From->getType() << T;
+ break;
+ }
+
+ // Check the expression is a constant expression.
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ Expr::EvalResult Eval;
+ Eval.Diag = &Notes;
+
+ if (!Result.get()->EvaluateAsRValue(Eval, Context)) {
+ // The expression can't be folded, so we can't keep it at this position in
+ // the AST.
+ Result = ExprError();
+ } else {
+ Value = Eval.Val.getInt();
+
+ if (Notes.empty()) {
+ // It's a constant expression.
+ return Result;
+ }
+ }
+
+ // It's not a constant expression. Produce an appropriate diagnostic.
+ if (Notes.size() == 1 &&
+ Notes[0].second.getDiagID() == diag::note_invalid_subexpr_in_const_expr)
+ Diag(Notes[0].first, diag::err_expr_not_cce) << CCE;
+ else {
+ Diag(From->getLocStart(), diag::err_expr_not_cce)
+ << CCE << From->getSourceRange();
+ for (unsigned I = 0; I < Notes.size(); ++I)
+ Diag(Notes[I].first, Notes[I].second);
+ }
+ return Result;
+}
+
+/// dropPointerConversions - If the given standard conversion sequence
+/// involves any pointer conversions, remove them. This may change
+/// the result type of the conversion sequence.
+static void dropPointerConversion(StandardConversionSequence &SCS) {
+ if (SCS.Second == ICK_Pointer_Conversion) {
+ SCS.Second = ICK_Identity;
+ SCS.Third = ICK_Identity;
+ SCS.ToTypePtrs[2] = SCS.ToTypePtrs[1] = SCS.ToTypePtrs[0];
+ }
+}
+
+/// TryContextuallyConvertToObjCPointer - Attempt to contextually
+/// convert the expression From to an Objective-C pointer type.
+static ImplicitConversionSequence
+TryContextuallyConvertToObjCPointer(Sema &S, Expr *From) {
+ // Do an implicit conversion to 'id'.
+ QualType Ty = S.Context.getObjCIdType();
+ ImplicitConversionSequence ICS
+ = TryImplicitConversion(S, From, Ty,
+ // FIXME: Are these flags correct?
+ /*SuppressUserConversions=*/false,
+ /*AllowExplicit=*/true,
+ /*InOverloadResolution=*/false,
+ /*CStyle=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+
+ // Strip off any final conversions to 'id'.
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::BadConversion:
+ case ImplicitConversionSequence::AmbiguousConversion:
+ case ImplicitConversionSequence::EllipsisConversion:
+ break;
+
+ case ImplicitConversionSequence::UserDefinedConversion:
+ dropPointerConversion(ICS.UserDefined.After);
+ break;
+
+ case ImplicitConversionSequence::StandardConversion:
+ dropPointerConversion(ICS.Standard);
+ break;
+ }
+
+ return ICS;
+}
+
+/// PerformContextuallyConvertToObjCPointer - Perform a contextual
+/// conversion of the expression From to an Objective-C pointer type.
+ExprResult Sema::PerformContextuallyConvertToObjCPointer(Expr *From) {
+ if (checkPlaceholderForOverload(*this, From))
+ return ExprError();
+
+ QualType Ty = Context.getObjCIdType();
+ ImplicitConversionSequence ICS =
+ TryContextuallyConvertToObjCPointer(*this, From);
+ if (!ICS.isBad())
+ return PerformImplicitConversion(From, Ty, ICS, AA_Converting);
+ return ExprError();
+}
+
+/// Determine whether the provided type is an integral type, or an enumeration
+/// type of a permitted flavor.
+static bool isIntegralOrEnumerationType(QualType T, bool AllowScopedEnum) {
+ return AllowScopedEnum ? T->isIntegralOrEnumerationType()
+ : T->isIntegralOrUnscopedEnumerationType();
+}
+
+/// \brief Attempt to convert the given expression to an integral or
+/// enumeration type.
+///
+/// This routine will attempt to convert an expression of class type to an
+/// integral or enumeration type, if that class type only has a single
+/// conversion to an integral or enumeration type.
+///
+/// \param Loc The source location of the construct that requires the
+/// conversion.
+///
+/// \param FromE The expression we're converting from.
+///
+/// \param NotIntDiag The diagnostic to be emitted if the expression does not
+/// have integral or enumeration type.
+///
+/// \param IncompleteDiag The diagnostic to be emitted if the expression has
+/// incomplete class type.
+///
+/// \param ExplicitConvDiag The diagnostic to be emitted if we're calling an
+/// explicit conversion function (because no implicit conversion functions
+/// were available). This is a recovery mode.
+///
+/// \param ExplicitConvNote The note to be emitted with \p ExplicitConvDiag,
+/// showing which conversion was picked.
+///
+/// \param AmbigDiag The diagnostic to be emitted if there is more than one
+/// conversion function that could convert to integral or enumeration type.
+///
+/// \param AmbigNote The note to be emitted with \p AmbigDiag for each
+/// usable conversion function.
+///
+/// \param ConvDiag The diagnostic to be emitted if we are calling a conversion
+/// function, which may be an extension in this case.
+///
+/// \param AllowScopedEnumerations Specifies whether conversions to scoped
+/// enumerations should be considered.
+///
+/// \returns The expression, converted to an integral or enumeration type if
+/// successful.
+ExprResult
+Sema::ConvertToIntegralOrEnumerationType(SourceLocation Loc, Expr *From,
+ const PartialDiagnostic &NotIntDiag,
+ const PartialDiagnostic &IncompleteDiag,
+ const PartialDiagnostic &ExplicitConvDiag,
+ const PartialDiagnostic &ExplicitConvNote,
+ const PartialDiagnostic &AmbigDiag,
+ const PartialDiagnostic &AmbigNote,
+ const PartialDiagnostic &ConvDiag,
+ bool AllowScopedEnumerations) {
+ // We can't perform any more checking for type-dependent expressions.
+ if (From->isTypeDependent())
+ return Owned(From);
+
+ // Process placeholders immediately.
+ if (From->hasPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(From);
+ if (result.isInvalid()) return result;
+ From = result.take();
+ }
+
+ // If the expression already has integral or enumeration type, we're golden.
+ QualType T = From->getType();
+ if (isIntegralOrEnumerationType(T, AllowScopedEnumerations))
+ return DefaultLvalueConversion(From);
+
+ // FIXME: Check for missing '()' if T is a function type?
+
+ // If we don't have a class type in C++, there's no way we can get an
+ // expression of integral or enumeration type.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy || !getLangOpts().CPlusPlus) {
+ if (NotIntDiag.getDiagID())
+ Diag(Loc, NotIntDiag) << T << From->getSourceRange();
+ return Owned(From);
+ }
+
+ // We must have a complete class type.
+ if (RequireCompleteType(Loc, T, IncompleteDiag))
+ return Owned(From);
+
+ // Look for a conversion to an integral or enumeration type.
+ UnresolvedSet<4> ViableConversions;
+ UnresolvedSet<4> ExplicitConversions;
+ const UnresolvedSetImpl *Conversions
+ = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
+
+ bool HadMultipleCandidates = (Conversions->size() > 1);
+
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end();
+ I != E;
+ ++I) {
+ if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl())) {
+ if (isIntegralOrEnumerationType(
+ Conversion->getConversionType().getNonReferenceType(),
+ AllowScopedEnumerations)) {
+ if (Conversion->isExplicit())
+ ExplicitConversions.addDecl(I.getDecl(), I.getAccess());
+ else
+ ViableConversions.addDecl(I.getDecl(), I.getAccess());
+ }
+ }
+ }
+
+ switch (ViableConversions.size()) {
+ case 0:
+ if (ExplicitConversions.size() == 1 && ExplicitConvDiag.getDiagID()) {
+ DeclAccessPair Found = ExplicitConversions[0];
+ CXXConversionDecl *Conversion
+ = cast<CXXConversionDecl>(Found->getUnderlyingDecl());
+
+ // The user probably meant to invoke the given explicit
+ // conversion; use it.
+ QualType ConvTy
+ = Conversion->getConversionType().getNonReferenceType();
+ std::string TypeStr;
+ ConvTy.getAsStringInternal(TypeStr, getPrintingPolicy());
+
+ Diag(Loc, ExplicitConvDiag)
+ << T << ConvTy
+ << FixItHint::CreateInsertion(From->getLocStart(),
+ "static_cast<" + TypeStr + ">(")
+ << FixItHint::CreateInsertion(PP.getLocForEndOfToken(From->getLocEnd()),
+ ")");
+ Diag(Conversion->getLocation(), ExplicitConvNote)
+ << ConvTy->isEnumeralType() << ConvTy;
+
+ // If we aren't in a SFINAE context, build a call to the
+ // explicit conversion function.
+ if (isSFINAEContext())
+ return ExprError();
+
+ CheckMemberOperatorAccess(From->getExprLoc(), From, 0, Found);
+ ExprResult Result = BuildCXXMemberCallExpr(From, Found, Conversion,
+ HadMultipleCandidates);
+ if (Result.isInvalid())
+ return ExprError();
+ // Record usage of conversion in an implicit cast.
+ From = ImplicitCastExpr::Create(Context, Result.get()->getType(),
+ CK_UserDefinedConversion,
+ Result.get(), 0,
+ Result.get()->getValueKind());
+ }
+
+ // We'll complain below about a non-integral condition type.
+ break;
+
+ case 1: {
+ // Apply this conversion.
+ DeclAccessPair Found = ViableConversions[0];
+ CheckMemberOperatorAccess(From->getExprLoc(), From, 0, Found);
+
+ CXXConversionDecl *Conversion
+ = cast<CXXConversionDecl>(Found->getUnderlyingDecl());
+ QualType ConvTy
+ = Conversion->getConversionType().getNonReferenceType();
+ if (ConvDiag.getDiagID()) {
+ if (isSFINAEContext())
+ return ExprError();
+
+ Diag(Loc, ConvDiag)
+ << T << ConvTy->isEnumeralType() << ConvTy << From->getSourceRange();
+ }
+
+ ExprResult Result = BuildCXXMemberCallExpr(From, Found, Conversion,
+ HadMultipleCandidates);
+ if (Result.isInvalid())
+ return ExprError();
+ // Record usage of conversion in an implicit cast.
+ From = ImplicitCastExpr::Create(Context, Result.get()->getType(),
+ CK_UserDefinedConversion,
+ Result.get(), 0,
+ Result.get()->getValueKind());
+ break;
+ }
+
+ default:
+ if (!AmbigDiag.getDiagID())
+ return Owned(From);
+
+ Diag(Loc, AmbigDiag)
+ << T << From->getSourceRange();
+ for (unsigned I = 0, N = ViableConversions.size(); I != N; ++I) {
+ CXXConversionDecl *Conv
+ = cast<CXXConversionDecl>(ViableConversions[I]->getUnderlyingDecl());
+ QualType ConvTy = Conv->getConversionType().getNonReferenceType();
+ Diag(Conv->getLocation(), AmbigNote)
+ << ConvTy->isEnumeralType() << ConvTy;
+ }
+ return Owned(From);
+ }
+
+ if (!isIntegralOrEnumerationType(From->getType(), AllowScopedEnumerations) &&
+ NotIntDiag.getDiagID())
+ Diag(Loc, NotIntDiag) << From->getType() << From->getSourceRange();
+
+ return DefaultLvalueConversion(From);
+}
+
+/// AddOverloadCandidate - Adds the given function to the set of
+/// candidate functions, using the given function call arguments. If
+/// @p SuppressUserConversions, then don't allow user-defined
+/// conversions via constructors or conversion operators.
+///
+/// \para PartialOverloading true if we are performing "partial" overloading
+/// based on an incomplete set of function arguments. This feature is used by
+/// code completion.
+void
+Sema::AddOverloadCandidate(FunctionDecl *Function,
+ DeclAccessPair FoundDecl,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions,
+ bool PartialOverloading,
+ bool AllowExplicit) {
+ const FunctionProtoType* Proto
+ = dyn_cast<FunctionProtoType>(Function->getType()->getAs<FunctionType>());
+ assert(Proto && "Functions without a prototype cannot be overloaded");
+ assert(!Function->getDescribedFunctionTemplate() &&
+ "Use AddTemplateOverloadCandidate for function templates");
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Function)) {
+ if (!isa<CXXConstructorDecl>(Method)) {
+ // If we get here, it's because we're calling a member function
+ // that is named without a member access expression (e.g.,
+ // "this->f") that was either written explicitly or created
+ // implicitly. This can happen with a qualified call to a member
+ // function, e.g., X::f(). We use an empty type for the implied
+ // object argument (C++ [over.call.func]p3), and the acting context
+ // is irrelevant.
+ AddMethodCandidate(Method, FoundDecl, Method->getParent(),
+ QualType(), Expr::Classification::makeSimpleLValue(),
+ Args, CandidateSet, SuppressUserConversions);
+ return;
+ }
+ // We treat a constructor like a non-member function, since its object
+ // argument doesn't participate in overload resolution.
+ }
+
+ if (!CandidateSet.isNewCandidate(Function))
+ return;
+
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(Function)){
+ // C++ [class.copy]p3:
+ // A member function template is never instantiated to perform the copy
+ // of a class object to an object of its class type.
+ QualType ClassType = Context.getTypeDeclType(Constructor->getParent());
+ if (Args.size() == 1 &&
+ Constructor->isSpecializationCopyingObject() &&
+ (Context.hasSameUnqualifiedType(ClassType, Args[0]->getType()) ||
+ IsDerivedFrom(Args[0]->getType(), ClassType)))
+ return;
+ }
+
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size());
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = Function;
+ Candidate.Viable = true;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+
+ unsigned NumArgsInProto = Proto->getNumArgs();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if ((Args.size() + (PartialOverloading && Args.size())) > NumArgsInProto &&
+ !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_many_arguments;
+ return;
+ }
+
+ // (C++ 13.3.2p2): A candidate function having more than m parameters
+ // is viable only if the (m+1)st parameter has a default argument
+ // (8.3.6). For the purposes of overload resolution, the
+ // parameter list is truncated on the right, so that there are
+ // exactly m parameters.
+ unsigned MinRequiredArgs = Function->getMinRequiredArguments();
+ if (Args.size() < MinRequiredArgs && !PartialOverloading) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_few_arguments;
+ return;
+ }
+
+ // (CUDA B.1): Check for invalid calls between targets.
+ if (getLangOpts().CUDA)
+ if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
+ if (CheckCUDATarget(Caller, Function)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_target;
+ return;
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
+ if (ArgIdx < NumArgsInProto) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getArgType(ArgIdx);
+ Candidate.Conversions[ArgIdx]
+ = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
+ SuppressUserConversions,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount,
+ AllowExplicit);
+ if (Candidate.Conversions[ArgIdx].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ break;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx].setEllipsis();
+ }
+ }
+}
+
+/// \brief Add all of the function declarations in the given function set to
+/// the overload canddiate set.
+void Sema::AddFunctionCandidates(const UnresolvedSetImpl &Fns,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions,
+ TemplateArgumentListInfo *ExplicitTemplateArgs) {
+ for (UnresolvedSetIterator F = Fns.begin(), E = Fns.end(); F != E; ++F) {
+ NamedDecl *D = F.getDecl()->getUnderlyingDecl();
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (isa<CXXMethodDecl>(FD) && !cast<CXXMethodDecl>(FD)->isStatic())
+ AddMethodCandidate(cast<CXXMethodDecl>(FD), F.getPair(),
+ cast<CXXMethodDecl>(FD)->getParent(),
+ Args[0]->getType(), Args[0]->Classify(Context),
+ Args.slice(1), CandidateSet,
+ SuppressUserConversions);
+ else
+ AddOverloadCandidate(FD, F.getPair(), Args, CandidateSet,
+ SuppressUserConversions);
+ } else {
+ FunctionTemplateDecl *FunTmpl = cast<FunctionTemplateDecl>(D);
+ if (isa<CXXMethodDecl>(FunTmpl->getTemplatedDecl()) &&
+ !cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl())->isStatic())
+ AddMethodTemplateCandidate(FunTmpl, F.getPair(),
+ cast<CXXRecordDecl>(FunTmpl->getDeclContext()),
+ ExplicitTemplateArgs,
+ Args[0]->getType(),
+ Args[0]->Classify(Context), Args.slice(1),
+ CandidateSet, SuppressUserConversions);
+ else
+ AddTemplateOverloadCandidate(FunTmpl, F.getPair(),
+ ExplicitTemplateArgs, Args,
+ CandidateSet, SuppressUserConversions);
+ }
+ }
+}
+
+/// AddMethodCandidate - Adds a named decl (which is some kind of
+/// method) as a method candidate to the given overload set.
+void Sema::AddMethodCandidate(DeclAccessPair FoundDecl,
+ QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions) {
+ NamedDecl *Decl = FoundDecl.getDecl();
+ CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(Decl->getDeclContext());
+
+ if (isa<UsingShadowDecl>(Decl))
+ Decl = cast<UsingShadowDecl>(Decl)->getTargetDecl();
+
+ if (FunctionTemplateDecl *TD = dyn_cast<FunctionTemplateDecl>(Decl)) {
+ assert(isa<CXXMethodDecl>(TD->getTemplatedDecl()) &&
+ "Expected a member function template");
+ AddMethodTemplateCandidate(TD, FoundDecl, ActingContext,
+ /*ExplicitArgs*/ 0,
+ ObjectType, ObjectClassification,
+ llvm::makeArrayRef(Args, NumArgs), CandidateSet,
+ SuppressUserConversions);
+ } else {
+ AddMethodCandidate(cast<CXXMethodDecl>(Decl), FoundDecl, ActingContext,
+ ObjectType, ObjectClassification,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet, SuppressUserConversions);
+ }
+}
+
+/// AddMethodCandidate - Adds the given C++ member function to the set
+/// of candidate functions, using the given function call arguments
+/// and the object argument (@c Object). For example, in a call
+/// @c o.f(a1,a2), @c Object will contain @c o and @c Args will contain
+/// both @c a1 and @c a2. If @p SuppressUserConversions, then don't
+/// allow user-defined conversions via constructors or conversion
+/// operators.
+void
+Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext, QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions) {
+ const FunctionProtoType* Proto
+ = dyn_cast<FunctionProtoType>(Method->getType()->getAs<FunctionType>());
+ assert(Proto && "Methods without a prototype cannot be overloaded");
+ assert(!isa<CXXConstructorDecl>(Method) &&
+ "Use AddOverloadCandidate for constructors");
+
+ if (!CandidateSet.isNewCandidate(Method))
+ return;
+
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1);
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = Method;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+
+ unsigned NumArgsInProto = Proto->getNumArgs();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if (Args.size() > NumArgsInProto && !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_many_arguments;
+ return;
+ }
+
+ // (C++ 13.3.2p2): A candidate function having more than m parameters
+ // is viable only if the (m+1)st parameter has a default argument
+ // (8.3.6). For the purposes of overload resolution, the
+ // parameter list is truncated on the right, so that there are
+ // exactly m parameters.
+ unsigned MinRequiredArgs = Method->getMinRequiredArguments();
+ if (Args.size() < MinRequiredArgs) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_few_arguments;
+ return;
+ }
+
+ Candidate.Viable = true;
+
+ if (Method->isStatic() || ObjectType.isNull())
+ // The implicit object argument is ignored.
+ Candidate.IgnoreObjectArgument = true;
+ else {
+ // Determine the implicit conversion sequence for the object
+ // parameter.
+ Candidate.Conversions[0]
+ = TryObjectArgumentInitialization(*this, ObjectType, ObjectClassification,
+ Method, ActingContext);
+ if (Candidate.Conversions[0].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ return;
+ }
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
+ if (ArgIdx < NumArgsInProto) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getArgType(ArgIdx);
+ Candidate.Conversions[ArgIdx + 1]
+ = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
+ SuppressUserConversions,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount);
+ if (Candidate.Conversions[ArgIdx + 1].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ break;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx + 1].setEllipsis();
+ }
+ }
+}
+
+/// \brief Add a C++ member function template as a candidate to the candidate
+/// set, using template argument deduction to produce an appropriate member
+/// function template specialization.
+void
+Sema::AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ QualType ObjectType,
+ Expr::Classification ObjectClassification,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions) {
+ if (!CandidateSet.isNewCandidate(MethodTmpl))
+ return;
+
+ // C++ [over.match.funcs]p7:
+ // In each case where a candidate is a function template, candidate
+ // function template specializations are generated using template argument
+ // deduction (14.8.3, 14.8.2). Those candidates are then handled as
+ // candidate functions in the usual way.113) A given name can refer to one
+ // or more function templates and also to a set of overloaded non-template
+ // functions. In such a case, the candidate functions generated from each
+ // function template are combined with the set of non-template candidate
+ // functions.
+ TemplateDeductionInfo Info(Context, CandidateSet.getLocation());
+ FunctionDecl *Specialization = 0;
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(MethodTmpl, ExplicitTemplateArgs, Args,
+ Specialization, Info)) {
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = MethodTmpl->getTemplatedDecl();
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_deduction;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Info);
+ return;
+ }
+
+ // Add the function template specialization produced by template argument
+ // deduction as a candidate.
+ assert(Specialization && "Missing member function template specialization?");
+ assert(isa<CXXMethodDecl>(Specialization) &&
+ "Specialization is not a member function?");
+ AddMethodCandidate(cast<CXXMethodDecl>(Specialization), FoundDecl,
+ ActingContext, ObjectType, ObjectClassification, Args,
+ CandidateSet, SuppressUserConversions);
+}
+
+/// \brief Add a C++ function template specialization as a candidate
+/// in the candidate set, using template argument deduction to produce
+/// an appropriate function template specialization.
+void
+Sema::AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate,
+ DeclAccessPair FoundDecl,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet,
+ bool SuppressUserConversions) {
+ if (!CandidateSet.isNewCandidate(FunctionTemplate))
+ return;
+
+ // C++ [over.match.funcs]p7:
+ // In each case where a candidate is a function template, candidate
+ // function template specializations are generated using template argument
+ // deduction (14.8.3, 14.8.2). Those candidates are then handled as
+ // candidate functions in the usual way.113) A given name can refer to one
+ // or more function templates and also to a set of overloaded non-template
+ // functions. In such a case, the candidate functions generated from each
+ // function template are combined with the set of non-template candidate
+ // functions.
+ TemplateDeductionInfo Info(Context, CandidateSet.getLocation());
+ FunctionDecl *Specialization = 0;
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs, Args,
+ Specialization, Info)) {
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = FunctionTemplate->getTemplatedDecl();
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_deduction;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Info);
+ return;
+ }
+
+ // Add the function template specialization produced by template argument
+ // deduction as a candidate.
+ assert(Specialization && "Missing function template specialization?");
+ AddOverloadCandidate(Specialization, FoundDecl, Args, CandidateSet,
+ SuppressUserConversions);
+}
+
+/// AddConversionCandidate - Add a C++ conversion function as a
+/// candidate in the candidate set (C++ [over.match.conv],
+/// C++ [over.match.copy]). From is the expression we're converting from,
+/// and ToType is the type that we're eventually trying to convert to
+/// (which may or may not be the same type as the type that the
+/// conversion function produces).
+void
+Sema::AddConversionCandidate(CXXConversionDecl *Conversion,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ Expr *From, QualType ToType,
+ OverloadCandidateSet& CandidateSet) {
+ assert(!Conversion->getDescribedFunctionTemplate() &&
+ "Conversion function templates use AddTemplateConversionCandidate");
+ QualType ConvType = Conversion->getConversionType().getNonReferenceType();
+ if (!CandidateSet.isNewCandidate(Conversion))
+ return;
+
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(1);
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = Conversion;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.FinalConversion.setAsIdentityConversion();
+ Candidate.FinalConversion.setFromType(ConvType);
+ Candidate.FinalConversion.setAllToTypes(ToType);
+ Candidate.Viable = true;
+ Candidate.ExplicitCallArguments = 1;
+
+ // C++ [over.match.funcs]p4:
+ // For conversion functions, the function is considered to be a member of
+ // the class of the implicit implied object argument for the purpose of
+ // defining the type of the implicit object parameter.
+ //
+ // Determine the implicit conversion sequence for the implicit
+ // object parameter.
+ QualType ImplicitParamType = From->getType();
+ if (const PointerType *FromPtrType = ImplicitParamType->getAs<PointerType>())
+ ImplicitParamType = FromPtrType->getPointeeType();
+ CXXRecordDecl *ConversionContext
+ = cast<CXXRecordDecl>(ImplicitParamType->getAs<RecordType>()->getDecl());
+
+ Candidate.Conversions[0]
+ = TryObjectArgumentInitialization(*this, From->getType(),
+ From->Classify(Context),
+ Conversion, ConversionContext);
+
+ if (Candidate.Conversions[0].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ return;
+ }
+
+ // We won't go through a user-define type conversion function to convert a
+ // derived to base as such conversions are given Conversion Rank. They only
+ // go through a copy constructor. 13.3.3.1.2-p4 [over.ics.user]
+ QualType FromCanon
+ = Context.getCanonicalType(From->getType().getUnqualifiedType());
+ QualType ToCanon = Context.getCanonicalType(ToType).getUnqualifiedType();
+ if (FromCanon == ToCanon || IsDerivedFrom(FromCanon, ToCanon)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_trivial_conversion;
+ return;
+ }
+
+ // To determine what the conversion from the result of calling the
+ // conversion function to the type we're eventually trying to
+ // convert to (ToType), we need to synthesize a call to the
+ // conversion function and attempt copy initialization from it. This
+ // makes sure that we get the right semantics with respect to
+ // lvalues/rvalues and the type. Fortunately, we can allocate this
+ // call on the stack and we don't need its arguments to be
+ // well-formed.
+ DeclRefExpr ConversionRef(Conversion, false, Conversion->getType(),
+ VK_LValue, From->getLocStart());
+ ImplicitCastExpr ConversionFn(ImplicitCastExpr::OnStack,
+ Context.getPointerType(Conversion->getType()),
+ CK_FunctionToPointerDecay,
+ &ConversionRef, VK_RValue);
+
+ QualType ConversionType = Conversion->getConversionType();
+ if (RequireCompleteType(From->getLocStart(), ConversionType, 0)) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_final_conversion;
+ return;
+ }
+
+ ExprValueKind VK = Expr::getValueKindForType(ConversionType);
+
+ // Note that it is safe to allocate CallExpr on the stack here because
+ // there are 0 arguments (i.e., nothing is allocated using ASTContext's
+ // allocator).
+ QualType CallResultType = ConversionType.getNonLValueExprType(Context);
+ CallExpr Call(Context, &ConversionFn, 0, 0, CallResultType, VK,
+ From->getLocStart());
+ ImplicitConversionSequence ICS =
+ TryCopyInitialization(*this, &Call, ToType,
+ /*SuppressUserConversions=*/true,
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/false);
+
+ switch (ICS.getKind()) {
+ case ImplicitConversionSequence::StandardConversion:
+ Candidate.FinalConversion = ICS.Standard;
+
+ // C++ [over.ics.user]p3:
+ // If the user-defined conversion is specified by a specialization of a
+ // conversion function template, the second standard conversion sequence
+ // shall have exact match rank.
+ if (Conversion->getPrimaryTemplate() &&
+ GetConversionRank(ICS.Standard.Second) != ICR_Exact_Match) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_final_conversion_not_exact;
+ }
+
+ // C++0x [dcl.init.ref]p5:
+ // In the second case, if the reference is an rvalue reference and
+ // the second standard conversion sequence of the user-defined
+ // conversion sequence includes an lvalue-to-rvalue conversion, the
+ // program is ill-formed.
+ if (ToType->isRValueReferenceType() &&
+ ICS.Standard.First == ICK_Lvalue_To_Rvalue) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_final_conversion;
+ }
+ break;
+
+ case ImplicitConversionSequence::BadConversion:
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_final_conversion;
+ break;
+
+ default:
+ llvm_unreachable(
+ "Can only end up with a standard conversion sequence or failure");
+ }
+}
+
+/// \brief Adds a conversion function template specialization
+/// candidate to the overload set, using template argument deduction
+/// to deduce the template arguments of the conversion function
+/// template from the type that we are converting to (C++
+/// [temp.deduct.conv]).
+void
+Sema::AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingDC,
+ Expr *From, QualType ToType,
+ OverloadCandidateSet &CandidateSet) {
+ assert(isa<CXXConversionDecl>(FunctionTemplate->getTemplatedDecl()) &&
+ "Only conversion function templates permitted here");
+
+ if (!CandidateSet.isNewCandidate(FunctionTemplate))
+ return;
+
+ TemplateDeductionInfo Info(Context, CandidateSet.getLocation());
+ CXXConversionDecl *Specialization = 0;
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(FunctionTemplate, ToType,
+ Specialization, Info)) {
+ OverloadCandidate &Candidate = CandidateSet.addCandidate();
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = FunctionTemplate->getTemplatedDecl();
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_deduction;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = 1;
+ Candidate.DeductionFailure = MakeDeductionFailureInfo(Context, Result,
+ Info);
+ return;
+ }
+
+ // Add the conversion function template specialization produced by
+ // template argument deduction as a candidate.
+ assert(Specialization && "Missing function template specialization?");
+ AddConversionCandidate(Specialization, FoundDecl, ActingDC, From, ToType,
+ CandidateSet);
+}
+
+/// AddSurrogateCandidate - Adds a "surrogate" candidate function that
+/// converts the given @c Object to a function pointer via the
+/// conversion function @c Conversion, and then attempts to call it
+/// with the given arguments (C++ [over.call.object]p2-4). Proto is
+/// the type of function that we'll eventually be calling.
+void Sema::AddSurrogateCandidate(CXXConversionDecl *Conversion,
+ DeclAccessPair FoundDecl,
+ CXXRecordDecl *ActingContext,
+ const FunctionProtoType *Proto,
+ Expr *Object,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet& CandidateSet) {
+ if (!CandidateSet.isNewCandidate(Conversion))
+ return;
+
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(Args.size() + 1);
+ Candidate.FoundDecl = FoundDecl;
+ Candidate.Function = 0;
+ Candidate.Surrogate = Conversion;
+ Candidate.Viable = true;
+ Candidate.IsSurrogate = true;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.ExplicitCallArguments = Args.size();
+
+ // Determine the implicit conversion sequence for the implicit
+ // object parameter.
+ ImplicitConversionSequence ObjectInit
+ = TryObjectArgumentInitialization(*this, Object->getType(),
+ Object->Classify(Context),
+ Conversion, ActingContext);
+ if (ObjectInit.isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ Candidate.Conversions[0] = ObjectInit;
+ return;
+ }
+
+ // The first conversion is actually a user-defined conversion whose
+ // first conversion is ObjectInit's standard conversion (which is
+ // effectively a reference binding). Record it as such.
+ Candidate.Conversions[0].setUserDefined();
+ Candidate.Conversions[0].UserDefined.Before = ObjectInit.Standard;
+ Candidate.Conversions[0].UserDefined.EllipsisConversion = false;
+ Candidate.Conversions[0].UserDefined.HadMultipleCandidates = false;
+ Candidate.Conversions[0].UserDefined.ConversionFunction = Conversion;
+ Candidate.Conversions[0].UserDefined.FoundConversionFunction = FoundDecl;
+ Candidate.Conversions[0].UserDefined.After
+ = Candidate.Conversions[0].UserDefined.Before;
+ Candidate.Conversions[0].UserDefined.After.setAsIdentityConversion();
+
+ // Find the
+ unsigned NumArgsInProto = Proto->getNumArgs();
+
+ // (C++ 13.3.2p2): A candidate function having fewer than m
+ // parameters is viable only if it has an ellipsis in its parameter
+ // list (8.3.5).
+ if (Args.size() > NumArgsInProto && !Proto->isVariadic()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_many_arguments;
+ return;
+ }
+
+ // Function types don't have any default arguments, so just check if
+ // we have enough arguments.
+ if (Args.size() < NumArgsInProto) {
+ // Not enough arguments.
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_too_few_arguments;
+ return;
+ }
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ for (unsigned ArgIdx = 0; ArgIdx < Args.size(); ++ArgIdx) {
+ if (ArgIdx < NumArgsInProto) {
+ // (C++ 13.3.2p3): for F to be a viable function, there shall
+ // exist for each argument an implicit conversion sequence
+ // (13.3.3.1) that converts that argument to the corresponding
+ // parameter of F.
+ QualType ParamType = Proto->getArgType(ArgIdx);
+ Candidate.Conversions[ArgIdx + 1]
+ = TryCopyInitialization(*this, Args[ArgIdx], ParamType,
+ /*SuppressUserConversions=*/false,
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount);
+ if (Candidate.Conversions[ArgIdx + 1].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ break;
+ }
+ } else {
+ // (C++ 13.3.2p2): For the purposes of overload resolution, any
+ // argument for which there is no corresponding parameter is
+ // considered to ""match the ellipsis" (C+ 13.3.3.1.3).
+ Candidate.Conversions[ArgIdx + 1].setEllipsis();
+ }
+ }
+}
+
+/// \brief Add overload candidates for overloaded operators that are
+/// member functions.
+///
+/// Add the overloaded operator candidates that are member functions
+/// for the operator Op that was used in an operator expression such
+/// as "x Op y". , Args/NumArgs provides the operator arguments, and
+/// CandidateSet will store the added overload candidates. (C++
+/// [over.match.oper]).
+void Sema::AddMemberOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ SourceRange OpRange) {
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+
+ // C++ [over.match.oper]p3:
+ // For a unary operator @ with an operand of a type whose
+ // cv-unqualified version is T1, and for a binary operator @ with
+ // a left operand of a type whose cv-unqualified version is T1 and
+ // a right operand of a type whose cv-unqualified version is T2,
+ // three sets of candidate functions, designated member
+ // candidates, non-member candidates and built-in candidates, are
+ // constructed as follows:
+ QualType T1 = Args[0]->getType();
+
+ // -- If T1 is a class type, the set of member candidates is the
+ // result of the qualified lookup of T1::operator@
+ // (13.3.1.1.1); otherwise, the set of member candidates is
+ // empty.
+ if (const RecordType *T1Rec = T1->getAs<RecordType>()) {
+ // Complete the type if it can be completed. Otherwise, we're done.
+ if (RequireCompleteType(OpLoc, T1, PDiag()))
+ return;
+
+ LookupResult Operators(*this, OpName, OpLoc, LookupOrdinaryName);
+ LookupQualifiedName(Operators, T1Rec->getDecl());
+ Operators.suppressDiagnostics();
+
+ for (LookupResult::iterator Oper = Operators.begin(),
+ OperEnd = Operators.end();
+ Oper != OperEnd;
+ ++Oper)
+ AddMethodCandidate(Oper.getPair(), Args[0]->getType(),
+ Args[0]->Classify(Context), Args + 1, NumArgs - 1,
+ CandidateSet,
+ /* SuppressUserConversions = */ false);
+ }
+}
+
+/// AddBuiltinCandidate - Add a candidate for a built-in
+/// operator. ResultTy and ParamTys are the result and parameter types
+/// of the built-in candidate, respectively. Args and NumArgs are the
+/// arguments being passed to the candidate. IsAssignmentOperator
+/// should be true when this built-in candidate is an assignment
+/// operator. NumContextualBoolArguments is the number of arguments
+/// (at the beginning of the argument list) that will be contextually
+/// converted to bool.
+void Sema::AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool IsAssignmentOperator,
+ unsigned NumContextualBoolArguments) {
+ // Overload resolution is always an unevaluated context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+
+ // Add this candidate
+ OverloadCandidate &Candidate = CandidateSet.addCandidate(NumArgs);
+ Candidate.FoundDecl = DeclAccessPair::make(0, AS_none);
+ Candidate.Function = 0;
+ Candidate.IsSurrogate = false;
+ Candidate.IgnoreObjectArgument = false;
+ Candidate.BuiltinTypes.ResultTy = ResultTy;
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
+ Candidate.BuiltinTypes.ParamTypes[ArgIdx] = ParamTys[ArgIdx];
+
+ // Determine the implicit conversion sequences for each of the
+ // arguments.
+ Candidate.Viable = true;
+ Candidate.ExplicitCallArguments = NumArgs;
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ // C++ [over.match.oper]p4:
+ // For the built-in assignment operators, conversions of the
+ // left operand are restricted as follows:
+ // -- no temporaries are introduced to hold the left operand, and
+ // -- no user-defined conversions are applied to the left
+ // operand to achieve a type match with the left-most
+ // parameter of a built-in candidate.
+ //
+ // We block these conversions by turning off user-defined
+ // conversions, since that is the only way that initialization of
+ // a reference to a non-class type can occur from something that
+ // is not of the same type.
+ if (ArgIdx < NumContextualBoolArguments) {
+ assert(ParamTys[ArgIdx] == Context.BoolTy &&
+ "Contextual conversion to bool requires bool type");
+ Candidate.Conversions[ArgIdx]
+ = TryContextuallyConvertToBool(*this, Args[ArgIdx]);
+ } else {
+ Candidate.Conversions[ArgIdx]
+ = TryCopyInitialization(*this, Args[ArgIdx], ParamTys[ArgIdx],
+ ArgIdx == 0 && IsAssignmentOperator,
+ /*InOverloadResolution=*/false,
+ /*AllowObjCWritebackConversion=*/
+ getLangOpts().ObjCAutoRefCount);
+ }
+ if (Candidate.Conversions[ArgIdx].isBad()) {
+ Candidate.Viable = false;
+ Candidate.FailureKind = ovl_fail_bad_conversion;
+ break;
+ }
+ }
+}
+
+/// BuiltinCandidateTypeSet - A set of types that will be used for the
+/// candidate operator functions for built-in operators (C++
+/// [over.built]). The types are separated into pointer types and
+/// enumeration types.
+class BuiltinCandidateTypeSet {
+ /// TypeSet - A set of types.
+ typedef llvm::SmallPtrSet<QualType, 8> TypeSet;
+
+ /// PointerTypes - The set of pointer types that will be used in the
+ /// built-in candidates.
+ TypeSet PointerTypes;
+
+ /// MemberPointerTypes - The set of member pointer types that will be
+ /// used in the built-in candidates.
+ TypeSet MemberPointerTypes;
+
+ /// EnumerationTypes - The set of enumeration types that will be
+ /// used in the built-in candidates.
+ TypeSet EnumerationTypes;
+
+ /// \brief The set of vector types that will be used in the built-in
+ /// candidates.
+ TypeSet VectorTypes;
+
+ /// \brief A flag indicating non-record types are viable candidates
+ bool HasNonRecordTypes;
+
+ /// \brief A flag indicating whether either arithmetic or enumeration types
+ /// were present in the candidate set.
+ bool HasArithmeticOrEnumeralTypes;
+
+ /// \brief A flag indicating whether the nullptr type was present in the
+ /// candidate set.
+ bool HasNullPtrType;
+
+ /// Sema - The semantic analysis instance where we are building the
+ /// candidate type set.
+ Sema &SemaRef;
+
+ /// Context - The AST context in which we will build the type sets.
+ ASTContext &Context;
+
+ bool AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
+ const Qualifiers &VisibleQuals);
+ bool AddMemberPointerWithMoreQualifiedTypeVariants(QualType Ty);
+
+public:
+ /// iterator - Iterates through the types that are part of the set.
+ typedef TypeSet::iterator iterator;
+
+ BuiltinCandidateTypeSet(Sema &SemaRef)
+ : HasNonRecordTypes(false),
+ HasArithmeticOrEnumeralTypes(false),
+ HasNullPtrType(false),
+ SemaRef(SemaRef),
+ Context(SemaRef.Context) { }
+
+ void AddTypesConvertedFrom(QualType Ty,
+ SourceLocation Loc,
+ bool AllowUserConversions,
+ bool AllowExplicitConversions,
+ const Qualifiers &VisibleTypeConversionsQuals);
+
+ /// pointer_begin - First pointer type found;
+ iterator pointer_begin() { return PointerTypes.begin(); }
+
+ /// pointer_end - Past the last pointer type found;
+ iterator pointer_end() { return PointerTypes.end(); }
+
+ /// member_pointer_begin - First member pointer type found;
+ iterator member_pointer_begin() { return MemberPointerTypes.begin(); }
+
+ /// member_pointer_end - Past the last member pointer type found;
+ iterator member_pointer_end() { return MemberPointerTypes.end(); }
+
+ /// enumeration_begin - First enumeration type found;
+ iterator enumeration_begin() { return EnumerationTypes.begin(); }
+
+ /// enumeration_end - Past the last enumeration type found;
+ iterator enumeration_end() { return EnumerationTypes.end(); }
+
+ iterator vector_begin() { return VectorTypes.begin(); }
+ iterator vector_end() { return VectorTypes.end(); }
+
+ bool hasNonRecordTypes() { return HasNonRecordTypes; }
+ bool hasArithmeticOrEnumeralTypes() { return HasArithmeticOrEnumeralTypes; }
+ bool hasNullPtrType() const { return HasNullPtrType; }
+};
+
+/// AddPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty to
+/// the set of pointer types along with any more-qualified variants of
+/// that type. For example, if @p Ty is "int const *", this routine
+/// will add "int const *", "int const volatile *", "int const
+/// restrict *", and "int const volatile restrict *" to the set of
+/// pointer types. Returns true if the add of @p Ty itself succeeded,
+/// false otherwise.
+///
+/// FIXME: what to do about extended qualifiers?
+bool
+BuiltinCandidateTypeSet::AddPointerWithMoreQualifiedTypeVariants(QualType Ty,
+ const Qualifiers &VisibleQuals) {
+
+ // Insert this type.
+ if (!PointerTypes.insert(Ty))
+ return false;
+
+ QualType PointeeTy;
+ const PointerType *PointerTy = Ty->getAs<PointerType>();
+ bool buildObjCPtr = false;
+ if (!PointerTy) {
+ if (const ObjCObjectPointerType *PTy = Ty->getAs<ObjCObjectPointerType>()) {
+ PointeeTy = PTy->getPointeeType();
+ buildObjCPtr = true;
+ }
+ else
+ llvm_unreachable("type was not a pointer type!");
+ }
+ else
+ PointeeTy = PointerTy->getPointeeType();
+
+ // Don't add qualified variants of arrays. For one, they're not allowed
+ // (the qualifier would sink to the element type), and for another, the
+ // only overload situation where it matters is subscript or pointer +- int,
+ // and those shouldn't have qualifier variants anyway.
+ if (PointeeTy->isArrayType())
+ return true;
+ unsigned BaseCVR = PointeeTy.getCVRQualifiers();
+ if (const ConstantArrayType *Array =Context.getAsConstantArrayType(PointeeTy))
+ BaseCVR = Array->getElementType().getCVRQualifiers();
+ bool hasVolatile = VisibleQuals.hasVolatile();
+ bool hasRestrict = VisibleQuals.hasRestrict();
+
+ // Iterate through all strict supersets of BaseCVR.
+ for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
+ if ((CVR | BaseCVR) != CVR) continue;
+ // Skip over Volatile/Restrict if no Volatile/Restrict found anywhere
+ // in the types.
+ if ((CVR & Qualifiers::Volatile) && !hasVolatile) continue;
+ if ((CVR & Qualifiers::Restrict) && !hasRestrict) continue;
+ QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
+ if (!buildObjCPtr)
+ PointerTypes.insert(Context.getPointerType(QPointeeTy));
+ else
+ PointerTypes.insert(Context.getObjCObjectPointerType(QPointeeTy));
+ }
+
+ return true;
+}
+
+/// AddMemberPointerWithMoreQualifiedTypeVariants - Add the pointer type @p Ty
+/// to the set of pointer types along with any more-qualified variants of
+/// that type. For example, if @p Ty is "int const *", this routine
+/// will add "int const *", "int const volatile *", "int const
+/// restrict *", and "int const volatile restrict *" to the set of
+/// pointer types. Returns true if the add of @p Ty itself succeeded,
+/// false otherwise.
+///
+/// FIXME: what to do about extended qualifiers?
+bool
+BuiltinCandidateTypeSet::AddMemberPointerWithMoreQualifiedTypeVariants(
+ QualType Ty) {
+ // Insert this type.
+ if (!MemberPointerTypes.insert(Ty))
+ return false;
+
+ const MemberPointerType *PointerTy = Ty->getAs<MemberPointerType>();
+ assert(PointerTy && "type was not a member pointer type!");
+
+ QualType PointeeTy = PointerTy->getPointeeType();
+ // Don't add qualified variants of arrays. For one, they're not allowed
+ // (the qualifier would sink to the element type), and for another, the
+ // only overload situation where it matters is subscript or pointer +- int,
+ // and those shouldn't have qualifier variants anyway.
+ if (PointeeTy->isArrayType())
+ return true;
+ const Type *ClassTy = PointerTy->getClass();
+
+ // Iterate through all strict supersets of the pointee type's CVR
+ // qualifiers.
+ unsigned BaseCVR = PointeeTy.getCVRQualifiers();
+ for (unsigned CVR = BaseCVR+1; CVR <= Qualifiers::CVRMask; ++CVR) {
+ if ((CVR | BaseCVR) != CVR) continue;
+
+ QualType QPointeeTy = Context.getCVRQualifiedType(PointeeTy, CVR);
+ MemberPointerTypes.insert(
+ Context.getMemberPointerType(QPointeeTy, ClassTy));
+ }
+
+ return true;
+}
+
+/// AddTypesConvertedFrom - Add each of the types to which the type @p
+/// Ty can be implicit converted to the given set of @p Types. We're
+/// primarily interested in pointer types and enumeration types. We also
+/// take member pointer types, for the conditional operator.
+/// AllowUserConversions is true if we should look at the conversion
+/// functions of a class type, and AllowExplicitConversions if we
+/// should also include the explicit conversion functions of a class
+/// type.
+void
+BuiltinCandidateTypeSet::AddTypesConvertedFrom(QualType Ty,
+ SourceLocation Loc,
+ bool AllowUserConversions,
+ bool AllowExplicitConversions,
+ const Qualifiers &VisibleQuals) {
+ // Only deal with canonical types.
+ Ty = Context.getCanonicalType(Ty);
+
+ // Look through reference types; they aren't part of the type of an
+ // expression for the purposes of conversions.
+ if (const ReferenceType *RefTy = Ty->getAs<ReferenceType>())
+ Ty = RefTy->getPointeeType();
+
+ // If we're dealing with an array type, decay to the pointer.
+ if (Ty->isArrayType())
+ Ty = SemaRef.Context.getArrayDecayedType(Ty);
+
+ // Otherwise, we don't care about qualifiers on the type.
+ Ty = Ty.getLocalUnqualifiedType();
+
+ // Flag if we ever add a non-record type.
+ const RecordType *TyRec = Ty->getAs<RecordType>();
+ HasNonRecordTypes = HasNonRecordTypes || !TyRec;
+
+ // Flag if we encounter an arithmetic type.
+ HasArithmeticOrEnumeralTypes =
+ HasArithmeticOrEnumeralTypes || Ty->isArithmeticType();
+
+ if (Ty->isObjCIdType() || Ty->isObjCClassType())
+ PointerTypes.insert(Ty);
+ else if (Ty->getAs<PointerType>() || Ty->getAs<ObjCObjectPointerType>()) {
+ // Insert our type, and its more-qualified variants, into the set
+ // of types.
+ if (!AddPointerWithMoreQualifiedTypeVariants(Ty, VisibleQuals))
+ return;
+ } else if (Ty->isMemberPointerType()) {
+ // Member pointers are far easier, since the pointee can't be converted.
+ if (!AddMemberPointerWithMoreQualifiedTypeVariants(Ty))
+ return;
+ } else if (Ty->isEnumeralType()) {
+ HasArithmeticOrEnumeralTypes = true;
+ EnumerationTypes.insert(Ty);
+ } else if (Ty->isVectorType()) {
+ // We treat vector types as arithmetic types in many contexts as an
+ // extension.
+ HasArithmeticOrEnumeralTypes = true;
+ VectorTypes.insert(Ty);
+ } else if (Ty->isNullPtrType()) {
+ HasNullPtrType = true;
+ } else if (AllowUserConversions && TyRec) {
+ // No conversion functions in incomplete types.
+ if (SemaRef.RequireCompleteType(Loc, Ty, 0))
+ return;
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
+ const UnresolvedSetImpl *Conversions
+ = ClassDecl->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ NamedDecl *D = I.getDecl();
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ // Skip conversion function templates; they don't tell us anything
+ // about which builtin types we can convert to.
+ if (isa<FunctionTemplateDecl>(D))
+ continue;
+
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
+ if (AllowExplicitConversions || !Conv->isExplicit()) {
+ AddTypesConvertedFrom(Conv->getConversionType(), Loc, false, false,
+ VisibleQuals);
+ }
+ }
+ }
+}
+
+/// \brief Helper function for AddBuiltinOperatorCandidates() that adds
+/// the volatile- and non-volatile-qualified assignment operators for the
+/// given type to the candidate set.
+static void AddBuiltinAssignmentOperatorCandidates(Sema &S,
+ QualType T,
+ Expr **Args,
+ unsigned NumArgs,
+ OverloadCandidateSet &CandidateSet) {
+ QualType ParamTypes[2];
+
+ // T& operator=(T&, T)
+ ParamTypes[0] = S.Context.getLValueReferenceType(T);
+ ParamTypes[1] = T;
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssignmentOperator=*/true);
+
+ if (!S.Context.getCanonicalType(T).isVolatileQualified()) {
+ // volatile T& operator=(volatile T&, T)
+ ParamTypes[0]
+ = S.Context.getLValueReferenceType(S.Context.getVolatileType(T));
+ ParamTypes[1] = T;
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssignmentOperator=*/true);
+ }
+}
+
+/// CollectVRQualifiers - This routine returns Volatile/Restrict qualifiers,
+/// if any, found in visible type conversion functions found in ArgExpr's type.
+static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
+ Qualifiers VRQuals;
+ const RecordType *TyRec;
+ if (const MemberPointerType *RHSMPType =
+ ArgExpr->getType()->getAs<MemberPointerType>())
+ TyRec = RHSMPType->getClass()->getAs<RecordType>();
+ else
+ TyRec = ArgExpr->getType()->getAs<RecordType>();
+ if (!TyRec) {
+ // Just to be safe, assume the worst case.
+ VRQuals.addVolatile();
+ VRQuals.addRestrict();
+ return VRQuals;
+ }
+
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(TyRec->getDecl());
+ if (!ClassDecl->hasDefinition())
+ return VRQuals;
+
+ const UnresolvedSetImpl *Conversions =
+ ClassDecl->getVisibleConversionFunctions();
+
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ NamedDecl *D = I.getDecl();
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+ if (CXXConversionDecl *Conv = dyn_cast<CXXConversionDecl>(D)) {
+ QualType CanTy = Context.getCanonicalType(Conv->getConversionType());
+ if (const ReferenceType *ResTypeRef = CanTy->getAs<ReferenceType>())
+ CanTy = ResTypeRef->getPointeeType();
+ // Need to go down the pointer/mempointer chain and add qualifiers
+ // as see them.
+ bool done = false;
+ while (!done) {
+ if (const PointerType *ResTypePtr = CanTy->getAs<PointerType>())
+ CanTy = ResTypePtr->getPointeeType();
+ else if (const MemberPointerType *ResTypeMPtr =
+ CanTy->getAs<MemberPointerType>())
+ CanTy = ResTypeMPtr->getPointeeType();
+ else
+ done = true;
+ if (CanTy.isVolatileQualified())
+ VRQuals.addVolatile();
+ if (CanTy.isRestrictQualified())
+ VRQuals.addRestrict();
+ if (VRQuals.hasRestrict() && VRQuals.hasVolatile())
+ return VRQuals;
+ }
+ }
+ }
+ return VRQuals;
+}
+
+namespace {
+
+/// \brief Helper class to manage the addition of builtin operator overload
+/// candidates. It provides shared state and utility methods used throughout
+/// the process, as well as a helper method to add each group of builtin
+/// operator overloads from the standard to a candidate set.
+class BuiltinOperatorOverloadBuilder {
+ // Common instance state available to all overload candidate addition methods.
+ Sema &S;
+ Expr **Args;
+ unsigned NumArgs;
+ Qualifiers VisibleTypeConversionsQuals;
+ bool HasArithmeticOrEnumeralCandidateType;
+ SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes;
+ OverloadCandidateSet &CandidateSet;
+
+ // Define some constants used to index and iterate over the arithemetic types
+ // provided via the getArithmeticType() method below.
+ // The "promoted arithmetic types" are the arithmetic
+ // types are that preserved by promotion (C++ [over.built]p2).
+ static const unsigned FirstIntegralType = 3;
+ static const unsigned LastIntegralType = 18;
+ static const unsigned FirstPromotedIntegralType = 3,
+ LastPromotedIntegralType = 9;
+ static const unsigned FirstPromotedArithmeticType = 0,
+ LastPromotedArithmeticType = 9;
+ static const unsigned NumArithmeticTypes = 18;
+
+ /// \brief Get the canonical type for a given arithmetic type index.
+ CanQualType getArithmeticType(unsigned index) {
+ assert(index < NumArithmeticTypes);
+ static CanQualType ASTContext::* const
+ ArithmeticTypes[NumArithmeticTypes] = {
+ // Start of promoted types.
+ &ASTContext::FloatTy,
+ &ASTContext::DoubleTy,
+ &ASTContext::LongDoubleTy,
+
+ // Start of integral types.
+ &ASTContext::IntTy,
+ &ASTContext::LongTy,
+ &ASTContext::LongLongTy,
+ &ASTContext::UnsignedIntTy,
+ &ASTContext::UnsignedLongTy,
+ &ASTContext::UnsignedLongLongTy,
+ // End of promoted types.
+
+ &ASTContext::BoolTy,
+ &ASTContext::CharTy,
+ &ASTContext::WCharTy,
+ &ASTContext::Char16Ty,
+ &ASTContext::Char32Ty,
+ &ASTContext::SignedCharTy,
+ &ASTContext::ShortTy,
+ &ASTContext::UnsignedCharTy,
+ &ASTContext::UnsignedShortTy,
+ // End of integral types.
+ // FIXME: What about complex?
+ };
+ return S.Context.*ArithmeticTypes[index];
+ }
+
+ /// \brief Gets the canonical type resulting from the usual arithemetic
+ /// converions for the given arithmetic types.
+ CanQualType getUsualArithmeticConversions(unsigned L, unsigned R) {
+ // Accelerator table for performing the usual arithmetic conversions.
+ // The rules are basically:
+ // - if either is floating-point, use the wider floating-point
+ // - if same signedness, use the higher rank
+ // - if same size, use unsigned of the higher rank
+ // - use the larger type
+ // These rules, together with the axiom that higher ranks are
+ // never smaller, are sufficient to precompute all of these results
+ // *except* when dealing with signed types of higher rank.
+ // (we could precompute SLL x UI for all known platforms, but it's
+ // better not to make any assumptions).
+ enum PromotedType {
+ Flt, Dbl, LDbl, SI, SL, SLL, UI, UL, ULL, Dep=-1
+ };
+ static PromotedType ConversionsTable[LastPromotedArithmeticType]
+ [LastPromotedArithmeticType] = {
+ /* Flt*/ { Flt, Dbl, LDbl, Flt, Flt, Flt, Flt, Flt, Flt },
+ /* Dbl*/ { Dbl, Dbl, LDbl, Dbl, Dbl, Dbl, Dbl, Dbl, Dbl },
+ /*LDbl*/ { LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl, LDbl },
+ /* SI*/ { Flt, Dbl, LDbl, SI, SL, SLL, UI, UL, ULL },
+ /* SL*/ { Flt, Dbl, LDbl, SL, SL, SLL, Dep, UL, ULL },
+ /* SLL*/ { Flt, Dbl, LDbl, SLL, SLL, SLL, Dep, Dep, ULL },
+ /* UI*/ { Flt, Dbl, LDbl, UI, Dep, Dep, UI, UL, ULL },
+ /* UL*/ { Flt, Dbl, LDbl, UL, UL, Dep, UL, UL, ULL },
+ /* ULL*/ { Flt, Dbl, LDbl, ULL, ULL, ULL, ULL, ULL, ULL },
+ };
+
+ assert(L < LastPromotedArithmeticType);
+ assert(R < LastPromotedArithmeticType);
+ int Idx = ConversionsTable[L][R];
+
+ // Fast path: the table gives us a concrete answer.
+ if (Idx != Dep) return getArithmeticType(Idx);
+
+ // Slow path: we need to compare widths.
+ // An invariant is that the signed type has higher rank.
+ CanQualType LT = getArithmeticType(L),
+ RT = getArithmeticType(R);
+ unsigned LW = S.Context.getIntWidth(LT),
+ RW = S.Context.getIntWidth(RT);
+
+ // If they're different widths, use the signed type.
+ if (LW > RW) return LT;
+ else if (LW < RW) return RT;
+
+ // Otherwise, use the unsigned type of the signed type's rank.
+ if (L == SL || R == SL) return S.Context.UnsignedLongTy;
+ assert(L == SLL || R == SLL);
+ return S.Context.UnsignedLongLongTy;
+ }
+
+ /// \brief Helper method to factor out the common pattern of adding overloads
+ /// for '++' and '--' builtin operators.
+ void addPlusPlusMinusMinusStyleOverloads(QualType CandidateTy,
+ bool HasVolatile) {
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(CandidateTy),
+ S.Context.IntTy
+ };
+
+ // Non-volatile version.
+ if (NumArgs == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, 2, CandidateSet);
+
+ // Use a heuristic to reduce number of builtin candidates in the set:
+ // add volatile version only if there are conversions to a volatile type.
+ if (HasVolatile) {
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(
+ S.Context.getVolatileType(CandidateTy));
+ if (NumArgs == 1)
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 1, CandidateSet);
+ else
+ S.AddBuiltinCandidate(CandidateTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+
+public:
+ BuiltinOperatorOverloadBuilder(
+ Sema &S, Expr **Args, unsigned NumArgs,
+ Qualifiers VisibleTypeConversionsQuals,
+ bool HasArithmeticOrEnumeralCandidateType,
+ SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes,
+ OverloadCandidateSet &CandidateSet)
+ : S(S), Args(Args), NumArgs(NumArgs),
+ VisibleTypeConversionsQuals(VisibleTypeConversionsQuals),
+ HasArithmeticOrEnumeralCandidateType(
+ HasArithmeticOrEnumeralCandidateType),
+ CandidateTypes(CandidateTypes),
+ CandidateSet(CandidateSet) {
+ // Validate some of our static helper constants in debug builds.
+ assert(getArithmeticType(FirstPromotedIntegralType) == S.Context.IntTy &&
+ "Invalid first promoted integral type");
+ assert(getArithmeticType(LastPromotedIntegralType - 1)
+ == S.Context.UnsignedLongLongTy &&
+ "Invalid last promoted integral type");
+ assert(getArithmeticType(FirstPromotedArithmeticType)
+ == S.Context.FloatTy &&
+ "Invalid first promoted arithmetic type");
+ assert(getArithmeticType(LastPromotedArithmeticType - 1)
+ == S.Context.UnsignedLongLongTy &&
+ "Invalid last promoted arithmetic type");
+ }
+
+ // C++ [over.built]p3:
+ //
+ // For every pair (T, VQ), where T is an arithmetic type, and VQ
+ // is either volatile or empty, there exist candidate operator
+ // functions of the form
+ //
+ // VQ T& operator++(VQ T&);
+ // T operator++(VQ T&, int);
+ //
+ // C++ [over.built]p4:
+ //
+ // For every pair (T, VQ), where T is an arithmetic type other
+ // than bool, and VQ is either volatile or empty, there exist
+ // candidate operator functions of the form
+ //
+ // VQ T& operator--(VQ T&);
+ // T operator--(VQ T&, int);
+ void addPlusPlusMinusMinusArithmeticOverloads(OverloadedOperatorKind Op) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Arith = (Op == OO_PlusPlus? 0 : 1);
+ Arith < NumArithmeticTypes; ++Arith) {
+ addPlusPlusMinusMinusStyleOverloads(
+ getArithmeticType(Arith),
+ VisibleTypeConversionsQuals.hasVolatile());
+ }
+ }
+
+ // C++ [over.built]p5:
+ //
+ // For every pair (T, VQ), where T is a cv-qualified or
+ // cv-unqualified object type, and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // T*VQ& operator++(T*VQ&);
+ // T*VQ& operator--(T*VQ&);
+ // T* operator++(T*VQ&, int);
+ // T* operator--(T*VQ&, int);
+ void addPlusPlusMinusMinusPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // Skip pointer types that aren't pointers to object types.
+ if (!(*Ptr)->getPointeeType()->isObjectType())
+ continue;
+
+ addPlusPlusMinusMinusStyleOverloads(*Ptr,
+ (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile()));
+ }
+ }
+
+ // C++ [over.built]p6:
+ // For every cv-qualified or cv-unqualified object type T, there
+ // exist candidate operator functions of the form
+ //
+ // T& operator*(T*);
+ //
+ // C++ [over.built]p7:
+ // For every function type T that does not have cv-qualifiers or a
+ // ref-qualifier, there exist candidate operator functions of the form
+ // T& operator*(T*);
+ void addUnaryStarPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTy = *Ptr;
+ QualType PointeeTy = ParamTy->getPointeeType();
+ if (!PointeeTy->isObjectType() && !PointeeTy->isFunctionType())
+ continue;
+
+ if (const FunctionProtoType *Proto =PointeeTy->getAs<FunctionProtoType>())
+ if (Proto->getTypeQuals() || Proto->getRefQualifier())
+ continue;
+
+ S.AddBuiltinCandidate(S.Context.getLValueReferenceType(PointeeTy),
+ &ParamTy, Args, 1, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p9:
+ // For every promoted arithmetic type T, there exist candidate
+ // operator functions of the form
+ //
+ // T operator+(T);
+ // T operator-(T);
+ void addUnaryPlusOrMinusArithmeticOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Arith = FirstPromotedArithmeticType;
+ Arith < LastPromotedArithmeticType; ++Arith) {
+ QualType ArithTy = getArithmeticType(Arith);
+ S.AddBuiltinCandidate(ArithTy, &ArithTy, Args, 1, CandidateSet);
+ }
+
+ // Extension: We also add these operators for vector types.
+ for (BuiltinCandidateTypeSet::iterator
+ Vec = CandidateTypes[0].vector_begin(),
+ VecEnd = CandidateTypes[0].vector_end();
+ Vec != VecEnd; ++Vec) {
+ QualType VecTy = *Vec;
+ S.AddBuiltinCandidate(VecTy, &VecTy, Args, 1, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p8:
+ // For every type T, there exist candidate operator functions of
+ // the form
+ //
+ // T* operator+(T*);
+ void addUnaryPlusPointerOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTy = *Ptr;
+ S.AddBuiltinCandidate(ParamTy, &ParamTy, Args, 1, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p10:
+ // For every promoted integral type T, there exist candidate
+ // operator functions of the form
+ //
+ // T operator~(T);
+ void addUnaryTildePromotedIntegralOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Int = FirstPromotedIntegralType;
+ Int < LastPromotedIntegralType; ++Int) {
+ QualType IntTy = getArithmeticType(Int);
+ S.AddBuiltinCandidate(IntTy, &IntTy, Args, 1, CandidateSet);
+ }
+
+ // Extension: We also add this operator for vector types.
+ for (BuiltinCandidateTypeSet::iterator
+ Vec = CandidateTypes[0].vector_begin(),
+ VecEnd = CandidateTypes[0].vector_end();
+ Vec != VecEnd; ++Vec) {
+ QualType VecTy = *Vec;
+ S.AddBuiltinCandidate(VecTy, &VecTy, Args, 1, CandidateSet);
+ }
+ }
+
+ // C++ [over.match.oper]p16:
+ // For every pointer to member type T, there exist candidate operator
+ // functions of the form
+ //
+ // bool operator==(T,T);
+ // bool operator!=(T,T);
+ void addEqualEqualOrNotEqualMemberPointerOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd;
+ ++MemPtr) {
+ // Don't add the same builtin candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
+ continue;
+
+ QualType ParamTypes[2] = { *MemPtr, *MemPtr };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2,
+ CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p15:
+ //
+ // For every T, where T is an enumeration type, a pointer type, or
+ // std::nullptr_t, there exist candidate operator functions of the form
+ //
+ // bool operator<(T, T);
+ // bool operator>(T, T);
+ // bool operator<=(T, T);
+ // bool operator>=(T, T);
+ // bool operator==(T, T);
+ // bool operator!=(T, T);
+ void addRelationalPointerOrEnumeralOverloads() {
+ // C++ [over.built]p1:
+ // If there is a user-written candidate with the same name and parameter
+ // types as a built-in candidate operator function, the built-in operator
+ // function is hidden and is not included in the set of candidate
+ // functions.
+ //
+ // The text is actually in a note, but if we don't implement it then we end
+ // up with ambiguities when the user provides an overloaded operator for
+ // an enumeration type. Note that only enumeration types have this problem,
+ // so we track which enumeration types we've seen operators for. Also, the
+ // only other overloaded operator with enumeration argumenst, operator=,
+ // cannot be overloaded for enumeration types, so this is the only place
+ // where we must suppress candidates like this.
+ llvm::DenseSet<std::pair<CanQualType, CanQualType> >
+ UserDefinedBinaryOperators;
+
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ if (CandidateTypes[ArgIdx].enumeration_begin() !=
+ CandidateTypes[ArgIdx].enumeration_end()) {
+ for (OverloadCandidateSet::iterator C = CandidateSet.begin(),
+ CEnd = CandidateSet.end();
+ C != CEnd; ++C) {
+ if (!C->Viable || !C->Function || C->Function->getNumParams() != 2)
+ continue;
+
+ QualType FirstParamType =
+ C->Function->getParamDecl(0)->getType().getUnqualifiedType();
+ QualType SecondParamType =
+ C->Function->getParamDecl(1)->getType().getUnqualifiedType();
+
+ // Skip if either parameter isn't of enumeral type.
+ if (!FirstParamType->isEnumeralType() ||
+ !SecondParamType->isEnumeralType())
+ continue;
+
+ // Add this operator to the set of known user-defined operators.
+ UserDefinedBinaryOperators.insert(
+ std::make_pair(S.Context.getCanonicalType(FirstParamType),
+ S.Context.getCanonicalType(SecondParamType)));
+ }
+ }
+ }
+
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[ArgIdx].pointer_begin(),
+ PtrEnd = CandidateTypes[ArgIdx].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // Don't add the same builtin candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ continue;
+
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2,
+ CandidateSet);
+ }
+ for (BuiltinCandidateTypeSet::iterator
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ CanQualType CanonType = S.Context.getCanonicalType(*Enum);
+
+ // Don't add the same builtin candidate twice, or if a user defined
+ // candidate exists.
+ if (!AddedTypes.insert(CanonType) ||
+ UserDefinedBinaryOperators.count(std::make_pair(CanonType,
+ CanonType)))
+ continue;
+
+ QualType ParamTypes[2] = { *Enum, *Enum };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2,
+ CandidateSet);
+ }
+
+ if (CandidateTypes[ArgIdx].hasNullPtrType()) {
+ CanQualType NullPtrTy = S.Context.getCanonicalType(S.Context.NullPtrTy);
+ if (AddedTypes.insert(NullPtrTy) &&
+ !UserDefinedBinaryOperators.count(std::make_pair(NullPtrTy,
+ NullPtrTy))) {
+ QualType ParamTypes[2] = { NullPtrTy, NullPtrTy };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2,
+ CandidateSet);
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p13:
+ //
+ // For every cv-qualified or cv-unqualified object type T
+ // there exist candidate operator functions of the form
+ //
+ // T* operator+(T*, ptrdiff_t);
+ // T& operator[](T*, ptrdiff_t); [BELOW]
+ // T* operator-(T*, ptrdiff_t);
+ // T* operator+(ptrdiff_t, T*);
+ // T& operator[](ptrdiff_t, T*); [BELOW]
+ //
+ // C++ [over.built]p14:
+ //
+ // For every T, where T is a pointer to object type, there
+ // exist candidate operator functions of the form
+ //
+ // ptrdiff_t operator-(T, T);
+ void addBinaryPlusOrMinusPointerOverloads(OverloadedOperatorKind Op) {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (int Arg = 0; Arg < 2; ++Arg) {
+ QualType AsymetricParamTypes[2] = {
+ S.Context.getPointerDiffType(),
+ S.Context.getPointerDiffType(),
+ };
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[Arg].pointer_begin(),
+ PtrEnd = CandidateTypes[Arg].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType PointeeTy = (*Ptr)->getPointeeType();
+ if (!PointeeTy->isObjectType())
+ continue;
+
+ AsymetricParamTypes[Arg] = *Ptr;
+ if (Arg == 0 || Op == OO_Plus) {
+ // operator+(T*, ptrdiff_t) or operator-(T*, ptrdiff_t)
+ // T* operator+(ptrdiff_t, T*);
+ S.AddBuiltinCandidate(*Ptr, AsymetricParamTypes, Args, 2,
+ CandidateSet);
+ }
+ if (Op == OO_Minus) {
+ // ptrdiff_t operator-(T, T);
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ continue;
+
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(S.Context.getPointerDiffType(), ParamTypes,
+ Args, 2, CandidateSet);
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p12:
+ //
+ // For every pair of promoted arithmetic types L and R, there
+ // exist candidate operator functions of the form
+ //
+ // LR operator*(L, R);
+ // LR operator/(L, R);
+ // LR operator+(L, R);
+ // LR operator-(L, R);
+ // bool operator<(L, R);
+ // bool operator>(L, R);
+ // bool operator<=(L, R);
+ // bool operator>=(L, R);
+ // bool operator==(L, R);
+ // bool operator!=(L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ //
+ // C++ [over.built]p24:
+ //
+ // For every pair of promoted arithmetic types L and R, there exist
+ // candidate operator functions of the form
+ //
+ // LR operator?(bool, L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ // Our candidates ignore the first parameter.
+ void addGenericBinaryArithmeticOverloads(bool isComparison) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Left = FirstPromotedArithmeticType;
+ Left < LastPromotedArithmeticType; ++Left) {
+ for (unsigned Right = FirstPromotedArithmeticType;
+ Right < LastPromotedArithmeticType; ++Right) {
+ QualType LandR[2] = { getArithmeticType(Left),
+ getArithmeticType(Right) };
+ QualType Result =
+ isComparison ? S.Context.BoolTy
+ : getUsualArithmeticConversions(Left, Right);
+ S.AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
+ }
+ }
+
+ // Extension: Add the binary operators ==, !=, <, <=, >=, >, *, /, and the
+ // conditional operator for vector types.
+ for (BuiltinCandidateTypeSet::iterator
+ Vec1 = CandidateTypes[0].vector_begin(),
+ Vec1End = CandidateTypes[0].vector_end();
+ Vec1 != Vec1End; ++Vec1) {
+ for (BuiltinCandidateTypeSet::iterator
+ Vec2 = CandidateTypes[1].vector_begin(),
+ Vec2End = CandidateTypes[1].vector_end();
+ Vec2 != Vec2End; ++Vec2) {
+ QualType LandR[2] = { *Vec1, *Vec2 };
+ QualType Result = S.Context.BoolTy;
+ if (!isComparison) {
+ if ((*Vec1)->isExtVectorType() || !(*Vec2)->isExtVectorType())
+ Result = *Vec1;
+ else
+ Result = *Vec2;
+ }
+
+ S.AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p17:
+ //
+ // For every pair of promoted integral types L and R, there
+ // exist candidate operator functions of the form
+ //
+ // LR operator%(L, R);
+ // LR operator&(L, R);
+ // LR operator^(L, R);
+ // LR operator|(L, R);
+ // L operator<<(L, R);
+ // L operator>>(L, R);
+ //
+ // where LR is the result of the usual arithmetic conversions
+ // between types L and R.
+ void addBinaryBitwiseArithmeticOverloads(OverloadedOperatorKind Op) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Left = FirstPromotedIntegralType;
+ Left < LastPromotedIntegralType; ++Left) {
+ for (unsigned Right = FirstPromotedIntegralType;
+ Right < LastPromotedIntegralType; ++Right) {
+ QualType LandR[2] = { getArithmeticType(Left),
+ getArithmeticType(Right) };
+ QualType Result = (Op == OO_LessLess || Op == OO_GreaterGreater)
+ ? LandR[0]
+ : getUsualArithmeticConversions(Left, Right);
+ S.AddBuiltinCandidate(Result, LandR, Args, 2, CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p20:
+ //
+ // For every pair (T, VQ), where T is an enumeration or
+ // pointer to member type and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // VQ T& operator=(VQ T&, T);
+ void addAssignmentMemberPointerOrEnumeralOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)))
+ continue;
+
+ AddBuiltinAssignmentOperatorCandidates(S, *Enum, Args, 2,
+ CandidateSet);
+ }
+
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
+ continue;
+
+ AddBuiltinAssignmentOperatorCandidates(S, *MemPtr, Args, 2,
+ CandidateSet);
+ }
+ }
+ }
+
+ // C++ [over.built]p19:
+ //
+ // For every pair (T, VQ), where T is any type and VQ is either
+ // volatile or empty, there exist candidate operator functions
+ // of the form
+ //
+ // T*VQ& operator=(T*VQ&, T*);
+ //
+ // C++ [over.built]p21:
+ //
+ // For every pair (T, VQ), where T is a cv-qualified or
+ // cv-unqualified object type and VQ is either volatile or
+ // empty, there exist candidate operator functions of the form
+ //
+ // T*VQ& operator+=(T*VQ&, ptrdiff_t);
+ // T*VQ& operator-=(T*VQ&, ptrdiff_t);
+ void addAssignmentPointerOverloads(bool isEqualOp) {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // If this is operator=, keep track of the builtin candidates we added.
+ if (isEqualOp)
+ AddedTypes.insert(S.Context.getCanonicalType(*Ptr));
+ else if (!(*Ptr)->getPointeeType()->isObjectType())
+ continue;
+
+ // non-volatile version
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(*Ptr),
+ isEqualOp ? *Ptr : S.Context.getPointerDiffType(),
+ };
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/ isEqualOp);
+
+ if (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile()) {
+ // volatile version
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+ }
+ }
+
+ if (isEqualOp) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[1].pointer_begin(),
+ PtrEnd = CandidateTypes[1].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ // Make sure we don't add the same candidate twice.
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ continue;
+
+ QualType ParamTypes[2] = {
+ S.Context.getLValueReferenceType(*Ptr),
+ *Ptr,
+ };
+
+ // non-volatile version
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/true);
+
+ if (!S.Context.getCanonicalType(*Ptr).isVolatileQualified() &&
+ VisibleTypeConversionsQuals.hasVolatile()) {
+ // volatile version
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(S.Context.getVolatileType(*Ptr));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet, /*IsAssigmentOperator=*/true);
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p18:
+ //
+ // For every triple (L, VQ, R), where L is an arithmetic type,
+ // VQ is either volatile or empty, and R is a promoted
+ // arithmetic type, there exist candidate operator functions of
+ // the form
+ //
+ // VQ L& operator=(VQ L&, R);
+ // VQ L& operator*=(VQ L&, R);
+ // VQ L& operator/=(VQ L&, R);
+ // VQ L& operator+=(VQ L&, R);
+ // VQ L& operator-=(VQ L&, R);
+ void addAssignmentArithmeticOverloads(bool isEqualOp) {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Left = 0; Left < NumArithmeticTypes; ++Left) {
+ for (unsigned Right = FirstPromotedArithmeticType;
+ Right < LastPromotedArithmeticType; ++Right) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = getArithmeticType(Right);
+
+ // Add this built-in operator as a candidate (VQ is empty).
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(getArithmeticType(Left));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+
+ // Add this built-in operator as a candidate (VQ is 'volatile').
+ if (VisibleTypeConversionsQuals.hasVolatile()) {
+ ParamTypes[0] =
+ S.Context.getVolatileType(getArithmeticType(Left));
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+ }
+ }
+ }
+
+ // Extension: Add the binary operators =, +=, -=, *=, /= for vector types.
+ for (BuiltinCandidateTypeSet::iterator
+ Vec1 = CandidateTypes[0].vector_begin(),
+ Vec1End = CandidateTypes[0].vector_end();
+ Vec1 != Vec1End; ++Vec1) {
+ for (BuiltinCandidateTypeSet::iterator
+ Vec2 = CandidateTypes[1].vector_begin(),
+ Vec2End = CandidateTypes[1].vector_end();
+ Vec2 != Vec2End; ++Vec2) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = *Vec2;
+ // Add this built-in operator as a candidate (VQ is empty).
+ ParamTypes[0] = S.Context.getLValueReferenceType(*Vec1);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+
+ // Add this built-in operator as a candidate (VQ is 'volatile').
+ if (VisibleTypeConversionsQuals.hasVolatile()) {
+ ParamTypes[0] = S.Context.getVolatileType(*Vec1);
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet,
+ /*IsAssigmentOperator=*/isEqualOp);
+ }
+ }
+ }
+ }
+
+ // C++ [over.built]p22:
+ //
+ // For every triple (L, VQ, R), where L is an integral type, VQ
+ // is either volatile or empty, and R is a promoted integral
+ // type, there exist candidate operator functions of the form
+ //
+ // VQ L& operator%=(VQ L&, R);
+ // VQ L& operator<<=(VQ L&, R);
+ // VQ L& operator>>=(VQ L&, R);
+ // VQ L& operator&=(VQ L&, R);
+ // VQ L& operator^=(VQ L&, R);
+ // VQ L& operator|=(VQ L&, R);
+ void addAssignmentIntegralOverloads() {
+ if (!HasArithmeticOrEnumeralCandidateType)
+ return;
+
+ for (unsigned Left = FirstIntegralType; Left < LastIntegralType; ++Left) {
+ for (unsigned Right = FirstPromotedIntegralType;
+ Right < LastPromotedIntegralType; ++Right) {
+ QualType ParamTypes[2];
+ ParamTypes[1] = getArithmeticType(Right);
+
+ // Add this built-in operator as a candidate (VQ is empty).
+ ParamTypes[0] =
+ S.Context.getLValueReferenceType(getArithmeticType(Left));
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2, CandidateSet);
+ if (VisibleTypeConversionsQuals.hasVolatile()) {
+ // Add this built-in operator as a candidate (VQ is 'volatile').
+ ParamTypes[0] = getArithmeticType(Left);
+ ParamTypes[0] = S.Context.getVolatileType(ParamTypes[0]);
+ ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
+ S.AddBuiltinCandidate(ParamTypes[0], ParamTypes, Args, 2,
+ CandidateSet);
+ }
+ }
+ }
+ }
+
+ // C++ [over.operator]p23:
+ //
+ // There also exist candidate operator functions of the form
+ //
+ // bool operator!(bool);
+ // bool operator&&(bool, bool);
+ // bool operator||(bool, bool);
+ void addExclaimOverload() {
+ QualType ParamTy = S.Context.BoolTy;
+ S.AddBuiltinCandidate(ParamTy, &ParamTy, Args, 1, CandidateSet,
+ /*IsAssignmentOperator=*/false,
+ /*NumContextualBoolArguments=*/1);
+ }
+ void addAmpAmpOrPipePipeOverload() {
+ QualType ParamTypes[2] = { S.Context.BoolTy, S.Context.BoolTy };
+ S.AddBuiltinCandidate(S.Context.BoolTy, ParamTypes, Args, 2, CandidateSet,
+ /*IsAssignmentOperator=*/false,
+ /*NumContextualBoolArguments=*/2);
+ }
+
+ // C++ [over.built]p13:
+ //
+ // For every cv-qualified or cv-unqualified object type T there
+ // exist candidate operator functions of the form
+ //
+ // T* operator+(T*, ptrdiff_t); [ABOVE]
+ // T& operator[](T*, ptrdiff_t);
+ // T* operator-(T*, ptrdiff_t); [ABOVE]
+ // T* operator+(ptrdiff_t, T*); [ABOVE]
+ // T& operator[](ptrdiff_t, T*);
+ void addSubscriptOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTypes[2] = { *Ptr, S.Context.getPointerDiffType() };
+ QualType PointeeType = (*Ptr)->getPointeeType();
+ if (!PointeeType->isObjectType())
+ continue;
+
+ QualType ResultTy = S.Context.getLValueReferenceType(PointeeType);
+
+ // T& operator[](T*, ptrdiff_t)
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+ }
+
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[1].pointer_begin(),
+ PtrEnd = CandidateTypes[1].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType ParamTypes[2] = { S.Context.getPointerDiffType(), *Ptr };
+ QualType PointeeType = (*Ptr)->getPointeeType();
+ if (!PointeeType->isObjectType())
+ continue;
+
+ QualType ResultTy = S.Context.getLValueReferenceType(PointeeType);
+
+ // T& operator[](ptrdiff_t, T*)
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+
+ // C++ [over.built]p11:
+ // For every quintuple (C1, C2, T, CV1, CV2), where C2 is a class type,
+ // C1 is the same type as C2 or is a derived class of C2, T is an object
+ // type or a function type, and CV1 and CV2 are cv-qualifier-seqs,
+ // there exist candidate operator functions of the form
+ //
+ // CV12 T& operator->*(CV1 C1*, CV2 T C2::*);
+ //
+ // where CV12 is the union of CV1 and CV2.
+ void addArrowStarOverloads() {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[0].pointer_begin(),
+ PtrEnd = CandidateTypes[0].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ QualType C1Ty = (*Ptr);
+ QualType C1;
+ QualifierCollector Q1;
+ C1 = QualType(Q1.strip(C1Ty->getPointeeType()), 0);
+ if (!isa<RecordType>(C1))
+ continue;
+ // heuristic to reduce number of builtin candidates in the set.
+ // Add volatile/restrict version only if there are conversions to a
+ // volatile/restrict type.
+ if (!VisibleTypeConversionsQuals.hasVolatile() && Q1.hasVolatile())
+ continue;
+ if (!VisibleTypeConversionsQuals.hasRestrict() && Q1.hasRestrict())
+ continue;
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[1].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[1].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ const MemberPointerType *mptr = cast<MemberPointerType>(*MemPtr);
+ QualType C2 = QualType(mptr->getClass(), 0);
+ C2 = C2.getUnqualifiedType();
+ if (C1 != C2 && !S.IsDerivedFrom(C1, C2))
+ break;
+ QualType ParamTypes[2] = { *Ptr, *MemPtr };
+ // build CV12 T&
+ QualType T = mptr->getPointeeType();
+ if (!VisibleTypeConversionsQuals.hasVolatile() &&
+ T.isVolatileQualified())
+ continue;
+ if (!VisibleTypeConversionsQuals.hasRestrict() &&
+ T.isRestrictQualified())
+ continue;
+ T = Q1.apply(S.Context, T);
+ QualType ResultTy = S.Context.getLValueReferenceType(T);
+ S.AddBuiltinCandidate(ResultTy, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+ }
+
+ // Note that we don't consider the first argument, since it has been
+ // contextually converted to bool long ago. The candidates below are
+ // therefore added as binary.
+ //
+ // C++ [over.built]p25:
+ // For every type T, where T is a pointer, pointer-to-member, or scoped
+ // enumeration type, there exist candidate operator functions of the form
+ //
+ // T operator?(bool, T, T);
+ //
+ void addConditionalOperatorOverloads() {
+ /// Set of (canonical) types that we've already handled.
+ llvm::SmallPtrSet<QualType, 8> AddedTypes;
+
+ for (unsigned ArgIdx = 0; ArgIdx < 2; ++ArgIdx) {
+ for (BuiltinCandidateTypeSet::iterator
+ Ptr = CandidateTypes[ArgIdx].pointer_begin(),
+ PtrEnd = CandidateTypes[ArgIdx].pointer_end();
+ Ptr != PtrEnd; ++Ptr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Ptr)))
+ continue;
+
+ QualType ParamTypes[2] = { *Ptr, *Ptr };
+ S.AddBuiltinCandidate(*Ptr, ParamTypes, Args, 2, CandidateSet);
+ }
+
+ for (BuiltinCandidateTypeSet::iterator
+ MemPtr = CandidateTypes[ArgIdx].member_pointer_begin(),
+ MemPtrEnd = CandidateTypes[ArgIdx].member_pointer_end();
+ MemPtr != MemPtrEnd; ++MemPtr) {
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*MemPtr)))
+ continue;
+
+ QualType ParamTypes[2] = { *MemPtr, *MemPtr };
+ S.AddBuiltinCandidate(*MemPtr, ParamTypes, Args, 2, CandidateSet);
+ }
+
+ if (S.getLangOpts().CPlusPlus0x) {
+ for (BuiltinCandidateTypeSet::iterator
+ Enum = CandidateTypes[ArgIdx].enumeration_begin(),
+ EnumEnd = CandidateTypes[ArgIdx].enumeration_end();
+ Enum != EnumEnd; ++Enum) {
+ if (!(*Enum)->getAs<EnumType>()->getDecl()->isScoped())
+ continue;
+
+ if (!AddedTypes.insert(S.Context.getCanonicalType(*Enum)))
+ continue;
+
+ QualType ParamTypes[2] = { *Enum, *Enum };
+ S.AddBuiltinCandidate(*Enum, ParamTypes, Args, 2, CandidateSet);
+ }
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+/// AddBuiltinOperatorCandidates - Add the appropriate built-in
+/// operator overloads to the candidate set (C++ [over.built]), based
+/// on the operator @p Op and the arguments given. For example, if the
+/// operator is a binary '+', this routine might add "int
+/// operator+(int, int)" to cover integer addition.
+void
+Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr **Args, unsigned NumArgs,
+ OverloadCandidateSet& CandidateSet) {
+ // Find all of the types that the arguments can convert to, but only
+ // if the operator we're looking at has built-in operator candidates
+ // that make use of these types. Also record whether we encounter non-record
+ // candidate types or either arithmetic or enumeral candidate types.
+ Qualifiers VisibleTypeConversionsQuals;
+ VisibleTypeConversionsQuals.addConst();
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
+ VisibleTypeConversionsQuals += CollectVRQualifiers(Context, Args[ArgIdx]);
+
+ bool HasNonRecordCandidateType = false;
+ bool HasArithmeticOrEnumeralCandidateType = false;
+ SmallVector<BuiltinCandidateTypeSet, 2> CandidateTypes;
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx) {
+ CandidateTypes.push_back(BuiltinCandidateTypeSet(*this));
+ CandidateTypes[ArgIdx].AddTypesConvertedFrom(Args[ArgIdx]->getType(),
+ OpLoc,
+ true,
+ (Op == OO_Exclaim ||
+ Op == OO_AmpAmp ||
+ Op == OO_PipePipe),
+ VisibleTypeConversionsQuals);
+ HasNonRecordCandidateType = HasNonRecordCandidateType ||
+ CandidateTypes[ArgIdx].hasNonRecordTypes();
+ HasArithmeticOrEnumeralCandidateType =
+ HasArithmeticOrEnumeralCandidateType ||
+ CandidateTypes[ArgIdx].hasArithmeticOrEnumeralTypes();
+ }
+
+ // Exit early when no non-record types have been added to the candidate set
+ // for any of the arguments to the operator.
+ //
+ // We can't exit early for !, ||, or &&, since there we have always have
+ // 'bool' overloads.
+ if (!HasNonRecordCandidateType &&
+ !(Op == OO_Exclaim || Op == OO_AmpAmp || Op == OO_PipePipe))
+ return;
+
+ // Setup an object to manage the common state for building overloads.
+ BuiltinOperatorOverloadBuilder OpBuilder(*this, Args, NumArgs,
+ VisibleTypeConversionsQuals,
+ HasArithmeticOrEnumeralCandidateType,
+ CandidateTypes, CandidateSet);
+
+ // Dispatch over the operation to add in only those overloads which apply.
+ switch (Op) {
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Expected an overloaded operator");
+
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Call:
+ llvm_unreachable(
+ "Special operators don't use AddBuiltinOperatorCandidates");
+
+ case OO_Comma:
+ case OO_Arrow:
+ // C++ [over.match.oper]p3:
+ // -- For the operator ',', the unary operator '&', or the
+ // operator '->', the built-in candidates set is empty.
+ break;
+
+ case OO_Plus: // '+' is either unary or binary
+ if (NumArgs == 1)
+ OpBuilder.addUnaryPlusPointerOverloads();
+ // Fall through.
+
+ case OO_Minus: // '-' is either unary or binary
+ if (NumArgs == 1) {
+ OpBuilder.addUnaryPlusOrMinusArithmeticOverloads();
+ } else {
+ OpBuilder.addBinaryPlusOrMinusPointerOverloads(Op);
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ }
+ break;
+
+ case OO_Star: // '*' is either unary or binary
+ if (NumArgs == 1)
+ OpBuilder.addUnaryStarPointerOverloads();
+ else
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
+
+ case OO_Slash:
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
+
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ OpBuilder.addPlusPlusMinusMinusArithmeticOverloads(Op);
+ OpBuilder.addPlusPlusMinusMinusPointerOverloads();
+ break;
+
+ case OO_EqualEqual:
+ case OO_ExclaimEqual:
+ OpBuilder.addEqualEqualOrNotEqualMemberPointerOverloads();
+ // Fall through.
+
+ case OO_Less:
+ case OO_Greater:
+ case OO_LessEqual:
+ case OO_GreaterEqual:
+ OpBuilder.addRelationalPointerOrEnumeralOverloads();
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/true);
+ break;
+
+ case OO_Percent:
+ case OO_Caret:
+ case OO_Pipe:
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
+ break;
+
+ case OO_Amp: // '&' is either unary or binary
+ if (NumArgs == 1)
+ // C++ [over.match.oper]p3:
+ // -- For the operator ',', the unary operator '&', or the
+ // operator '->', the built-in candidates set is empty.
+ break;
+
+ OpBuilder.addBinaryBitwiseArithmeticOverloads(Op);
+ break;
+
+ case OO_Tilde:
+ OpBuilder.addUnaryTildePromotedIntegralOverloads();
+ break;
+
+ case OO_Equal:
+ OpBuilder.addAssignmentMemberPointerOrEnumeralOverloads();
+ // Fall through.
+
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ OpBuilder.addAssignmentPointerOverloads(Op == OO_Equal);
+ // Fall through.
+
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ OpBuilder.addAssignmentArithmeticOverloads(Op == OO_Equal);
+ break;
+
+ case OO_PercentEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ case OO_AmpEqual:
+ case OO_CaretEqual:
+ case OO_PipeEqual:
+ OpBuilder.addAssignmentIntegralOverloads();
+ break;
+
+ case OO_Exclaim:
+ OpBuilder.addExclaimOverload();
+ break;
+
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ OpBuilder.addAmpAmpOrPipePipeOverload();
+ break;
+
+ case OO_Subscript:
+ OpBuilder.addSubscriptOverloads();
+ break;
+
+ case OO_ArrowStar:
+ OpBuilder.addArrowStarOverloads();
+ break;
+
+ case OO_Conditional:
+ OpBuilder.addConditionalOperatorOverloads();
+ OpBuilder.addGenericBinaryArithmeticOverloads(/*isComparison=*/false);
+ break;
+ }
+}
+
+/// \brief Add function candidates found via argument-dependent lookup
+/// to the set of overloading candidates.
+///
+/// This routine performs argument-dependent name lookup based on the
+/// given function name (which may also be an operator name) and adds
+/// all of the overload candidates found by ADL to the overload
+/// candidate set (C++ [basic.lookup.argdep]).
+void
+Sema::AddArgumentDependentLookupCandidates(DeclarationName Name,
+ bool Operator, SourceLocation Loc,
+ llvm::ArrayRef<Expr *> Args,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ OverloadCandidateSet& CandidateSet,
+ bool PartialOverloading,
+ bool StdNamespaceIsAssociated) {
+ ADLResult Fns;
+
+ // FIXME: This approach for uniquing ADL results (and removing
+ // redundant candidates from the set) relies on pointer-equality,
+ // which means we need to key off the canonical decl. However,
+ // always going back to the canonical decl might not get us the
+ // right set of default arguments. What default arguments are
+ // we supposed to consider on ADL candidates, anyway?
+
+ // FIXME: Pass in the explicit template arguments?
+ ArgumentDependentLookup(Name, Operator, Loc, Args, Fns,
+ StdNamespaceIsAssociated);
+
+ // Erase all of the candidates we already knew about.
+ for (OverloadCandidateSet::iterator Cand = CandidateSet.begin(),
+ CandEnd = CandidateSet.end();
+ Cand != CandEnd; ++Cand)
+ if (Cand->Function) {
+ Fns.erase(Cand->Function);
+ if (FunctionTemplateDecl *FunTmpl = Cand->Function->getPrimaryTemplate())
+ Fns.erase(FunTmpl);
+ }
+
+ // For each of the ADL candidates we found, add it to the overload
+ // set.
+ for (ADLResult::iterator I = Fns.begin(), E = Fns.end(); I != E; ++I) {
+ DeclAccessPair FoundDecl = DeclAccessPair::make(*I, AS_none);
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ if (ExplicitTemplateArgs)
+ continue;
+
+ AddOverloadCandidate(FD, FoundDecl, Args, CandidateSet, false,
+ PartialOverloading);
+ } else
+ AddTemplateOverloadCandidate(cast<FunctionTemplateDecl>(*I),
+ FoundDecl, ExplicitTemplateArgs,
+ Args, CandidateSet);
+ }
+}
+
+/// isBetterOverloadCandidate - Determines whether the first overload
+/// candidate is a better candidate than the second (C++ 13.3.3p1).
+bool
+isBetterOverloadCandidate(Sema &S,
+ const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2,
+ SourceLocation Loc,
+ bool UserDefinedConversion) {
+ // Define viable functions to be better candidates than non-viable
+ // functions.
+ if (!Cand2.Viable)
+ return Cand1.Viable;
+ else if (!Cand1.Viable)
+ return false;
+
+ // C++ [over.match.best]p1:
+ //
+ // -- if F is a static member function, ICS1(F) is defined such
+ // that ICS1(F) is neither better nor worse than ICS1(G) for
+ // any function G, and, symmetrically, ICS1(G) is neither
+ // better nor worse than ICS1(F).
+ unsigned StartArg = 0;
+ if (Cand1.IgnoreObjectArgument || Cand2.IgnoreObjectArgument)
+ StartArg = 1;
+
+ // C++ [over.match.best]p1:
+ // A viable function F1 is defined to be a better function than another
+ // viable function F2 if for all arguments i, ICSi(F1) is not a worse
+ // conversion sequence than ICSi(F2), and then...
+ unsigned NumArgs = Cand1.NumConversions;
+ assert(Cand2.NumConversions == NumArgs && "Overload candidate mismatch");
+ bool HasBetterConversion = false;
+ for (unsigned ArgIdx = StartArg; ArgIdx < NumArgs; ++ArgIdx) {
+ switch (CompareImplicitConversionSequences(S,
+ Cand1.Conversions[ArgIdx],
+ Cand2.Conversions[ArgIdx])) {
+ case ImplicitConversionSequence::Better:
+ // Cand1 has a better conversion sequence.
+ HasBetterConversion = true;
+ break;
+
+ case ImplicitConversionSequence::Worse:
+ // Cand1 can't be better than Cand2.
+ return false;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ // Do nothing.
+ break;
+ }
+ }
+
+ // -- for some argument j, ICSj(F1) is a better conversion sequence than
+ // ICSj(F2), or, if not that,
+ if (HasBetterConversion)
+ return true;
+
+ // - F1 is a non-template function and F2 is a function template
+ // specialization, or, if not that,
+ if ((!Cand1.Function || !Cand1.Function->getPrimaryTemplate()) &&
+ Cand2.Function && Cand2.Function->getPrimaryTemplate())
+ return true;
+
+ // -- F1 and F2 are function template specializations, and the function
+ // template for F1 is more specialized than the template for F2
+ // according to the partial ordering rules described in 14.5.5.2, or,
+ // if not that,
+ if (Cand1.Function && Cand1.Function->getPrimaryTemplate() &&
+ Cand2.Function && Cand2.Function->getPrimaryTemplate()) {
+ if (FunctionTemplateDecl *BetterTemplate
+ = S.getMoreSpecializedTemplate(Cand1.Function->getPrimaryTemplate(),
+ Cand2.Function->getPrimaryTemplate(),
+ Loc,
+ isa<CXXConversionDecl>(Cand1.Function)? TPOC_Conversion
+ : TPOC_Call,
+ Cand1.ExplicitCallArguments))
+ return BetterTemplate == Cand1.Function->getPrimaryTemplate();
+ }
+
+ // -- the context is an initialization by user-defined conversion
+ // (see 8.5, 13.3.1.5) and the standard conversion sequence
+ // from the return type of F1 to the destination type (i.e.,
+ // the type of the entity being initialized) is a better
+ // conversion sequence than the standard conversion sequence
+ // from the return type of F2 to the destination type.
+ if (UserDefinedConversion && Cand1.Function && Cand2.Function &&
+ isa<CXXConversionDecl>(Cand1.Function) &&
+ isa<CXXConversionDecl>(Cand2.Function)) {
+ // First check whether we prefer one of the conversion functions over the
+ // other. This only distinguishes the results in non-standard, extension
+ // cases such as the conversion from a lambda closure type to a function
+ // pointer or block.
+ ImplicitConversionSequence::CompareKind FuncResult
+ = compareConversionFunctions(S, Cand1.Function, Cand2.Function);
+ if (FuncResult != ImplicitConversionSequence::Indistinguishable)
+ return FuncResult;
+
+ switch (CompareStandardConversionSequences(S,
+ Cand1.FinalConversion,
+ Cand2.FinalConversion)) {
+ case ImplicitConversionSequence::Better:
+ // Cand1 has a better conversion sequence.
+ return true;
+
+ case ImplicitConversionSequence::Worse:
+ // Cand1 can't be better than Cand2.
+ return false;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ // Do nothing
+ break;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Computes the best viable function (C++ 13.3.3)
+/// within an overload candidate set.
+///
+/// \param CandidateSet the set of candidate functions.
+///
+/// \param Loc the location of the function name (or operator symbol) for
+/// which overload resolution occurs.
+///
+/// \param Best f overload resolution was successful or found a deleted
+/// function, Best points to the candidate function found.
+///
+/// \returns The result of overload resolution.
+OverloadingResult
+OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
+ iterator &Best,
+ bool UserDefinedConversion) {
+ // Find the best viable function.
+ Best = end();
+ for (iterator Cand = begin(); Cand != end(); ++Cand) {
+ if (Cand->Viable)
+ if (Best == end() || isBetterOverloadCandidate(S, *Cand, *Best, Loc,
+ UserDefinedConversion))
+ Best = Cand;
+ }
+
+ // If we didn't find any viable functions, abort.
+ if (Best == end())
+ return OR_No_Viable_Function;
+
+ // Make sure that this function is better than every other viable
+ // function. If not, we have an ambiguity.
+ for (iterator Cand = begin(); Cand != end(); ++Cand) {
+ if (Cand->Viable &&
+ Cand != Best &&
+ !isBetterOverloadCandidate(S, *Best, *Cand, Loc,
+ UserDefinedConversion)) {
+ Best = end();
+ return OR_Ambiguous;
+ }
+ }
+
+ // Best is the best viable function.
+ if (Best->Function &&
+ (Best->Function->isDeleted() ||
+ S.isFunctionConsideredUnavailable(Best->Function)))
+ return OR_Deleted;
+
+ return OR_Success;
+}
+
+namespace {
+
+enum OverloadCandidateKind {
+ oc_function,
+ oc_method,
+ oc_constructor,
+ oc_function_template,
+ oc_method_template,
+ oc_constructor_template,
+ oc_implicit_default_constructor,
+ oc_implicit_copy_constructor,
+ oc_implicit_move_constructor,
+ oc_implicit_copy_assignment,
+ oc_implicit_move_assignment,
+ oc_implicit_inherited_constructor
+};
+
+OverloadCandidateKind ClassifyOverloadCandidate(Sema &S,
+ FunctionDecl *Fn,
+ std::string &Description) {
+ bool isTemplate = false;
+
+ if (FunctionTemplateDecl *FunTmpl = Fn->getPrimaryTemplate()) {
+ isTemplate = true;
+ Description = S.getTemplateArgumentBindingsText(
+ FunTmpl->getTemplateParameters(), *Fn->getTemplateSpecializationArgs());
+ }
+
+ if (CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn)) {
+ if (!Ctor->isImplicit())
+ return isTemplate ? oc_constructor_template : oc_constructor;
+
+ if (Ctor->getInheritedConstructor())
+ return oc_implicit_inherited_constructor;
+
+ if (Ctor->isDefaultConstructor())
+ return oc_implicit_default_constructor;
+
+ if (Ctor->isMoveConstructor())
+ return oc_implicit_move_constructor;
+
+ assert(Ctor->isCopyConstructor() &&
+ "unexpected sort of implicit constructor");
+ return oc_implicit_copy_constructor;
+ }
+
+ if (CXXMethodDecl *Meth = dyn_cast<CXXMethodDecl>(Fn)) {
+ // This actually gets spelled 'candidate function' for now, but
+ // it doesn't hurt to split it out.
+ if (!Meth->isImplicit())
+ return isTemplate ? oc_method_template : oc_method;
+
+ if (Meth->isMoveAssignmentOperator())
+ return oc_implicit_move_assignment;
+
+ if (Meth->isCopyAssignmentOperator())
+ return oc_implicit_copy_assignment;
+
+ assert(isa<CXXConversionDecl>(Meth) && "expected conversion");
+ return oc_method;
+ }
+
+ return isTemplate ? oc_function_template : oc_function;
+}
+
+void MaybeEmitInheritedConstructorNote(Sema &S, FunctionDecl *Fn) {
+ const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(Fn);
+ if (!Ctor) return;
+
+ Ctor = Ctor->getInheritedConstructor();
+ if (!Ctor) return;
+
+ S.Diag(Ctor->getLocation(), diag::note_ovl_candidate_inherited_constructor);
+}
+
+} // end anonymous namespace
+
+// Notes the location of an overload candidate.
+void Sema::NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType) {
+ std::string FnDesc;
+ OverloadCandidateKind K = ClassifyOverloadCandidate(*this, Fn, FnDesc);
+ PartialDiagnostic PD = PDiag(diag::note_ovl_candidate)
+ << (unsigned) K << FnDesc;
+ HandleFunctionTypeMismatch(PD, Fn->getType(), DestType);
+ Diag(Fn->getLocation(), PD);
+ MaybeEmitInheritedConstructorNote(*this, Fn);
+}
+
+//Notes the location of all overload candidates designated through
+// OverloadedExpr
+void Sema::NoteAllOverloadCandidates(Expr* OverloadedExpr, QualType DestType) {
+ assert(OverloadedExpr->getType() == Context.OverloadTy);
+
+ OverloadExpr::FindResult Ovl = OverloadExpr::find(OverloadedExpr);
+ OverloadExpr *OvlExpr = Ovl.Expression;
+
+ for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
+ IEnd = OvlExpr->decls_end();
+ I != IEnd; ++I) {
+ if (FunctionTemplateDecl *FunTmpl =
+ dyn_cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl()) ) {
+ NoteOverloadCandidate(FunTmpl->getTemplatedDecl(), DestType);
+ } else if (FunctionDecl *Fun
+ = dyn_cast<FunctionDecl>((*I)->getUnderlyingDecl()) ) {
+ NoteOverloadCandidate(Fun, DestType);
+ }
+ }
+}
+
+/// Diagnoses an ambiguous conversion. The partial diagnostic is the
+/// "lead" diagnostic; it will be given two arguments, the source and
+/// target types of the conversion.
+void ImplicitConversionSequence::DiagnoseAmbiguousConversion(
+ Sema &S,
+ SourceLocation CaretLoc,
+ const PartialDiagnostic &PDiag) const {
+ S.Diag(CaretLoc, PDiag)
+ << Ambiguous.getFromType() << Ambiguous.getToType();
+ for (AmbiguousConversionSequence::const_iterator
+ I = Ambiguous.begin(), E = Ambiguous.end(); I != E; ++I) {
+ S.NoteOverloadCandidate(*I);
+ }
+}
+
+namespace {
+
+void DiagnoseBadConversion(Sema &S, OverloadCandidate *Cand, unsigned I) {
+ const ImplicitConversionSequence &Conv = Cand->Conversions[I];
+ assert(Conv.isBad());
+ assert(Cand->Function && "for now, candidate must be a function");
+ FunctionDecl *Fn = Cand->Function;
+
+ // There's a conversion slot for the object argument if this is a
+ // non-constructor method. Note that 'I' corresponds the
+ // conversion-slot index.
+ bool isObjectArgument = false;
+ if (isa<CXXMethodDecl>(Fn) && !isa<CXXConstructorDecl>(Fn)) {
+ if (I == 0)
+ isObjectArgument = true;
+ else
+ I--;
+ }
+
+ std::string FnDesc;
+ OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, FnDesc);
+
+ Expr *FromExpr = Conv.Bad.FromExpr;
+ QualType FromTy = Conv.Bad.getFromType();
+ QualType ToTy = Conv.Bad.getToType();
+
+ if (FromTy == S.Context.OverloadTy) {
+ assert(FromExpr && "overload set argument came from implicit argument?");
+ Expr *E = FromExpr->IgnoreParens();
+ if (isa<UnaryOperator>(E))
+ E = cast<UnaryOperator>(E)->getSubExpr()->IgnoreParens();
+ DeclarationName Name = cast<OverloadExpr>(E)->getName();
+
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_overload)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << ToTy << Name << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // Do some hand-waving analysis to see if the non-viability is due
+ // to a qualifier mismatch.
+ CanQualType CFromTy = S.Context.getCanonicalType(FromTy);
+ CanQualType CToTy = S.Context.getCanonicalType(ToTy);
+ if (CanQual<ReferenceType> RT = CToTy->getAs<ReferenceType>())
+ CToTy = RT->getPointeeType();
+ else {
+ // TODO: detect and diagnose the full richness of const mismatches.
+ if (CanQual<PointerType> FromPT = CFromTy->getAs<PointerType>())
+ if (CanQual<PointerType> ToPT = CToTy->getAs<PointerType>())
+ CFromTy = FromPT->getPointeeType(), CToTy = ToPT->getPointeeType();
+ }
+
+ if (CToTy.getUnqualifiedType() == CFromTy.getUnqualifiedType() &&
+ !CToTy.isAtLeastAsQualifiedAs(CFromTy)) {
+ Qualifiers FromQs = CFromTy.getQualifiers();
+ Qualifiers ToQs = CToTy.getQualifiers();
+
+ if (FromQs.getAddressSpace() != ToQs.getAddressSpace()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_addrspace)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy
+ << FromQs.getAddressSpace() << ToQs.getAddressSpace()
+ << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_ownership)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy
+ << FromQs.getObjCLifetime() << ToQs.getObjCLifetime()
+ << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ if (FromQs.getObjCGCAttr() != ToQs.getObjCGCAttr()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_gc)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy
+ << FromQs.getObjCGCAttr() << ToQs.getObjCGCAttr()
+ << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ unsigned CVR = FromQs.getCVRQualifiers() & ~ToQs.getCVRQualifiers();
+ assert(CVR && "unexpected qualifiers mismatch");
+
+ if (isObjectArgument) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr_this)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << (CVR - 1);
+ } else {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_cvr)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << (CVR - 1) << I+1;
+ }
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // Special diagnostic for failure to convert an initializer list, since
+ // telling the user that it has type void is not useful.
+ if (FromExpr && isa<InitListExpr>(FromExpr)) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_list_argument)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // Diagnose references or pointers to incomplete types differently,
+ // since it's far from impossible that the incompleteness triggered
+ // the failure.
+ QualType TempFromTy = FromTy.getNonReferenceType();
+ if (const PointerType *PTy = TempFromTy->getAs<PointerType>())
+ TempFromTy = PTy->getPointeeType();
+ if (TempFromTy->isIncompleteType()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_conv_incomplete)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // Diagnose base -> derived pointer conversions.
+ unsigned BaseToDerivedConversion = 0;
+ if (const PointerType *FromPtrTy = FromTy->getAs<PointerType>()) {
+ if (const PointerType *ToPtrTy = ToTy->getAs<PointerType>()) {
+ if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs(
+ FromPtrTy->getPointeeType()) &&
+ !FromPtrTy->getPointeeType()->isIncompleteType() &&
+ !ToPtrTy->getPointeeType()->isIncompleteType() &&
+ S.IsDerivedFrom(ToPtrTy->getPointeeType(),
+ FromPtrTy->getPointeeType()))
+ BaseToDerivedConversion = 1;
+ }
+ } else if (const ObjCObjectPointerType *FromPtrTy
+ = FromTy->getAs<ObjCObjectPointerType>()) {
+ if (const ObjCObjectPointerType *ToPtrTy
+ = ToTy->getAs<ObjCObjectPointerType>())
+ if (const ObjCInterfaceDecl *FromIface = FromPtrTy->getInterfaceDecl())
+ if (const ObjCInterfaceDecl *ToIface = ToPtrTy->getInterfaceDecl())
+ if (ToPtrTy->getPointeeType().isAtLeastAsQualifiedAs(
+ FromPtrTy->getPointeeType()) &&
+ FromIface->isSuperClassOf(ToIface))
+ BaseToDerivedConversion = 2;
+ } else if (const ReferenceType *ToRefTy = ToTy->getAs<ReferenceType>()) {
+ if (ToRefTy->getPointeeType().isAtLeastAsQualifiedAs(FromTy) &&
+ !FromTy->isIncompleteType() &&
+ !ToRefTy->getPointeeType()->isIncompleteType() &&
+ S.IsDerivedFrom(ToRefTy->getPointeeType(), FromTy))
+ BaseToDerivedConversion = 3;
+ }
+
+ if (BaseToDerivedConversion) {
+ S.Diag(Fn->getLocation(),
+ diag::note_ovl_candidate_bad_base_to_derived_conv)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << (BaseToDerivedConversion - 1)
+ << FromTy << ToTy << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ if (isa<ObjCObjectPointerType>(CFromTy) &&
+ isa<PointerType>(CToTy)) {
+ Qualifiers FromQs = CFromTy.getQualifiers();
+ Qualifiers ToQs = CToTy.getQualifiers();
+ if (FromQs.getObjCLifetime() != ToQs.getObjCLifetime()) {
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_arc_conv)
+ << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned) isObjectArgument << I+1;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+ }
+
+ // Emit the generic diagnostic and, optionally, add the hints to it.
+ PartialDiagnostic FDiag = S.PDiag(diag::note_ovl_candidate_bad_conv);
+ FDiag << (unsigned) FnKind << FnDesc
+ << (FromExpr ? FromExpr->getSourceRange() : SourceRange())
+ << FromTy << ToTy << (unsigned) isObjectArgument << I + 1
+ << (unsigned) (Cand->Fix.Kind);
+
+ // If we can fix the conversion, suggest the FixIts.
+ for (std::vector<FixItHint>::iterator HI = Cand->Fix.Hints.begin(),
+ HE = Cand->Fix.Hints.end(); HI != HE; ++HI)
+ FDiag << *HI;
+ S.Diag(Fn->getLocation(), FDiag);
+
+ MaybeEmitInheritedConstructorNote(S, Fn);
+}
+
+void DiagnoseArityMismatch(Sema &S, OverloadCandidate *Cand,
+ unsigned NumFormalArgs) {
+ // TODO: treat calls to a missing default constructor as a special case
+
+ FunctionDecl *Fn = Cand->Function;
+ const FunctionProtoType *FnTy = Fn->getType()->getAs<FunctionProtoType>();
+
+ unsigned MinParams = Fn->getMinRequiredArguments();
+
+ // With invalid overloaded operators, it's possible that we think we
+ // have an arity mismatch when it fact it looks like we have the
+ // right number of arguments, because only overloaded operators have
+ // the weird behavior of overloading member and non-member functions.
+ // Just don't report anything.
+ if (Fn->isInvalidDecl() &&
+ Fn->getDeclName().getNameKind() == DeclarationName::CXXOperatorName)
+ return;
+
+ // at least / at most / exactly
+ unsigned mode, modeCount;
+ if (NumFormalArgs < MinParams) {
+ assert((Cand->FailureKind == ovl_fail_too_few_arguments) ||
+ (Cand->FailureKind == ovl_fail_bad_deduction &&
+ Cand->DeductionFailure.Result == Sema::TDK_TooFewArguments));
+ if (MinParams != FnTy->getNumArgs() ||
+ FnTy->isVariadic() || FnTy->isTemplateVariadic())
+ mode = 0; // "at least"
+ else
+ mode = 2; // "exactly"
+ modeCount = MinParams;
+ } else {
+ assert((Cand->FailureKind == ovl_fail_too_many_arguments) ||
+ (Cand->FailureKind == ovl_fail_bad_deduction &&
+ Cand->DeductionFailure.Result == Sema::TDK_TooManyArguments));
+ if (MinParams != FnTy->getNumArgs())
+ mode = 1; // "at most"
+ else
+ mode = 2; // "exactly"
+ modeCount = FnTy->getNumArgs();
+ }
+
+ std::string Description;
+ OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, Description);
+
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_arity)
+ << (unsigned) FnKind << (Fn->getDescribedFunctionTemplate() != 0) << mode
+ << modeCount << NumFormalArgs;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+}
+
+/// Diagnose a failed template-argument deduction.
+void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
+ unsigned NumArgs) {
+ FunctionDecl *Fn = Cand->Function; // pattern
+
+ TemplateParameter Param = Cand->DeductionFailure.getTemplateParameter();
+ NamedDecl *ParamD;
+ (ParamD = Param.dyn_cast<TemplateTypeParmDecl*>()) ||
+ (ParamD = Param.dyn_cast<NonTypeTemplateParmDecl*>()) ||
+ (ParamD = Param.dyn_cast<TemplateTemplateParmDecl*>());
+ switch (Cand->DeductionFailure.Result) {
+ case Sema::TDK_Success:
+ llvm_unreachable("TDK_success while diagnosing bad deduction");
+
+ case Sema::TDK_Incomplete: {
+ assert(ParamD && "no parameter found for incomplete deduction result");
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_incomplete_deduction)
+ << ParamD->getDeclName();
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ case Sema::TDK_Underqualified: {
+ assert(ParamD && "no parameter found for bad qualifiers deduction result");
+ TemplateTypeParmDecl *TParam = cast<TemplateTypeParmDecl>(ParamD);
+
+ QualType Param = Cand->DeductionFailure.getFirstArg()->getAsType();
+
+ // Param will have been canonicalized, but it should just be a
+ // qualified version of ParamD, so move the qualifiers to that.
+ QualifierCollector Qs;
+ Qs.strip(Param);
+ QualType NonCanonParam = Qs.apply(S.Context, TParam->getTypeForDecl());
+ assert(S.Context.hasSameType(Param, NonCanonParam));
+
+ // Arg has also been canonicalized, but there's nothing we can do
+ // about that. It also doesn't matter as much, because it won't
+ // have any template parameters in it (because deduction isn't
+ // done on dependent types).
+ QualType Arg = Cand->DeductionFailure.getSecondArg()->getAsType();
+
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_underqualified)
+ << ParamD->getDeclName() << Arg << NonCanonParam;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ case Sema::TDK_Inconsistent: {
+ assert(ParamD && "no parameter found for inconsistent deduction result");
+ int which = 0;
+ if (isa<TemplateTypeParmDecl>(ParamD))
+ which = 0;
+ else if (isa<NonTypeTemplateParmDecl>(ParamD))
+ which = 1;
+ else {
+ which = 2;
+ }
+
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_inconsistent_deduction)
+ << which << ParamD->getDeclName()
+ << *Cand->DeductionFailure.getFirstArg()
+ << *Cand->DeductionFailure.getSecondArg();
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ case Sema::TDK_InvalidExplicitArguments:
+ assert(ParamD && "no parameter found for invalid explicit arguments");
+ if (ParamD->getDeclName())
+ S.Diag(Fn->getLocation(),
+ diag::note_ovl_candidate_explicit_arg_mismatch_named)
+ << ParamD->getDeclName();
+ else {
+ int index = 0;
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ParamD))
+ index = TTP->getIndex();
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(ParamD))
+ index = NTTP->getIndex();
+ else
+ index = cast<TemplateTemplateParmDecl>(ParamD)->getIndex();
+ S.Diag(Fn->getLocation(),
+ diag::note_ovl_candidate_explicit_arg_mismatch_unnamed)
+ << (index + 1);
+ }
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ DiagnoseArityMismatch(S, Cand, NumArgs);
+ return;
+
+ case Sema::TDK_InstantiationDepth:
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_instantiation_depth);
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+
+ case Sema::TDK_SubstitutionFailure: {
+ std::string ArgString;
+ if (TemplateArgumentList *Args
+ = Cand->DeductionFailure.getTemplateArgumentList())
+ ArgString = S.getTemplateArgumentBindingsText(
+ Fn->getDescribedFunctionTemplate()->getTemplateParameters(),
+ *Args);
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_substitution_failure)
+ << ArgString;
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // TODO: diagnose these individually, then kill off
+ // note_ovl_candidate_bad_deduction, which is uselessly vague.
+ case Sema::TDK_NonDeducedMismatch:
+ case Sema::TDK_FailedOverloadResolution:
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_bad_deduction);
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+}
+
+/// CUDA: diagnose an invalid call across targets.
+void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
+ FunctionDecl *Caller = cast<FunctionDecl>(S.CurContext);
+ FunctionDecl *Callee = Cand->Function;
+
+ Sema::CUDAFunctionTarget CallerTarget = S.IdentifyCUDATarget(Caller),
+ CalleeTarget = S.IdentifyCUDATarget(Callee);
+
+ std::string FnDesc;
+ OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Callee, FnDesc);
+
+ S.Diag(Callee->getLocation(), diag::note_ovl_candidate_bad_target)
+ << (unsigned) FnKind << CalleeTarget << CallerTarget;
+}
+
+/// Generates a 'note' diagnostic for an overload candidate. We've
+/// already generated a primary error at the call site.
+///
+/// It really does need to be a single diagnostic with its caret
+/// pointed at the candidate declaration. Yes, this creates some
+/// major challenges of technical writing. Yes, this makes pointing
+/// out problems with specific arguments quite awkward. It's still
+/// better than generating twenty screens of text for every failed
+/// overload.
+///
+/// It would be great to be able to express per-candidate problems
+/// more richly for those diagnostic clients that cared, but we'd
+/// still have to be just as careful with the default diagnostics.
+void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
+ unsigned NumArgs) {
+ FunctionDecl *Fn = Cand->Function;
+
+ // Note deleted candidates, but only if they're viable.
+ if (Cand->Viable && (Fn->isDeleted() ||
+ S.isFunctionConsideredUnavailable(Fn))) {
+ std::string FnDesc;
+ OverloadCandidateKind FnKind = ClassifyOverloadCandidate(S, Fn, FnDesc);
+
+ S.Diag(Fn->getLocation(), diag::note_ovl_candidate_deleted)
+ << FnKind << FnDesc
+ << (Fn->isDeleted() ? (Fn->isDeletedAsWritten() ? 1 : 2) : 0);
+ MaybeEmitInheritedConstructorNote(S, Fn);
+ return;
+ }
+
+ // We don't really have anything else to say about viable candidates.
+ if (Cand->Viable) {
+ S.NoteOverloadCandidate(Fn);
+ return;
+ }
+
+ switch (Cand->FailureKind) {
+ case ovl_fail_too_many_arguments:
+ case ovl_fail_too_few_arguments:
+ return DiagnoseArityMismatch(S, Cand, NumArgs);
+
+ case ovl_fail_bad_deduction:
+ return DiagnoseBadDeduction(S, Cand, NumArgs);
+
+ case ovl_fail_trivial_conversion:
+ case ovl_fail_bad_final_conversion:
+ case ovl_fail_final_conversion_not_exact:
+ return S.NoteOverloadCandidate(Fn);
+
+ case ovl_fail_bad_conversion: {
+ unsigned I = (Cand->IgnoreObjectArgument ? 1 : 0);
+ for (unsigned N = Cand->NumConversions; I != N; ++I)
+ if (Cand->Conversions[I].isBad())
+ return DiagnoseBadConversion(S, Cand, I);
+
+ // FIXME: this currently happens when we're called from SemaInit
+ // when user-conversion overload fails. Figure out how to handle
+ // those conditions and diagnose them well.
+ return S.NoteOverloadCandidate(Fn);
+ }
+
+ case ovl_fail_bad_target:
+ return DiagnoseBadTarget(S, Cand);
+ }
+}
+
+void NoteSurrogateCandidate(Sema &S, OverloadCandidate *Cand) {
+ // Desugar the type of the surrogate down to a function type,
+ // retaining as many typedefs as possible while still showing
+ // the function type (and, therefore, its parameter types).
+ QualType FnType = Cand->Surrogate->getConversionType();
+ bool isLValueReference = false;
+ bool isRValueReference = false;
+ bool isPointer = false;
+ if (const LValueReferenceType *FnTypeRef =
+ FnType->getAs<LValueReferenceType>()) {
+ FnType = FnTypeRef->getPointeeType();
+ isLValueReference = true;
+ } else if (const RValueReferenceType *FnTypeRef =
+ FnType->getAs<RValueReferenceType>()) {
+ FnType = FnTypeRef->getPointeeType();
+ isRValueReference = true;
+ }
+ if (const PointerType *FnTypePtr = FnType->getAs<PointerType>()) {
+ FnType = FnTypePtr->getPointeeType();
+ isPointer = true;
+ }
+ // Desugar down to a function type.
+ FnType = QualType(FnType->getAs<FunctionType>(), 0);
+ // Reconstruct the pointer/reference as appropriate.
+ if (isPointer) FnType = S.Context.getPointerType(FnType);
+ if (isRValueReference) FnType = S.Context.getRValueReferenceType(FnType);
+ if (isLValueReference) FnType = S.Context.getLValueReferenceType(FnType);
+
+ S.Diag(Cand->Surrogate->getLocation(), diag::note_ovl_surrogate_cand)
+ << FnType;
+ MaybeEmitInheritedConstructorNote(S, Cand->Surrogate);
+}
+
+void NoteBuiltinOperatorCandidate(Sema &S,
+ const char *Opc,
+ SourceLocation OpLoc,
+ OverloadCandidate *Cand) {
+ assert(Cand->NumConversions <= 2 && "builtin operator is not binary");
+ std::string TypeStr("operator");
+ TypeStr += Opc;
+ TypeStr += "(";
+ TypeStr += Cand->BuiltinTypes.ParamTypes[0].getAsString();
+ if (Cand->NumConversions == 1) {
+ TypeStr += ")";
+ S.Diag(OpLoc, diag::note_ovl_builtin_unary_candidate) << TypeStr;
+ } else {
+ TypeStr += ", ";
+ TypeStr += Cand->BuiltinTypes.ParamTypes[1].getAsString();
+ TypeStr += ")";
+ S.Diag(OpLoc, diag::note_ovl_builtin_binary_candidate) << TypeStr;
+ }
+}
+
+void NoteAmbiguousUserConversions(Sema &S, SourceLocation OpLoc,
+ OverloadCandidate *Cand) {
+ unsigned NoOperands = Cand->NumConversions;
+ for (unsigned ArgIdx = 0; ArgIdx < NoOperands; ++ArgIdx) {
+ const ImplicitConversionSequence &ICS = Cand->Conversions[ArgIdx];
+ if (ICS.isBad()) break; // all meaningless after first invalid
+ if (!ICS.isAmbiguous()) continue;
+
+ ICS.DiagnoseAmbiguousConversion(S, OpLoc,
+ S.PDiag(diag::note_ambiguous_type_conversion));
+ }
+}
+
+SourceLocation GetLocationForCandidate(const OverloadCandidate *Cand) {
+ if (Cand->Function)
+ return Cand->Function->getLocation();
+ if (Cand->IsSurrogate)
+ return Cand->Surrogate->getLocation();
+ return SourceLocation();
+}
+
+static unsigned
+RankDeductionFailure(const OverloadCandidate::DeductionFailureInfo &DFI) {
+ switch ((Sema::TemplateDeductionResult)DFI.Result) {
+ case Sema::TDK_Success:
+ llvm_unreachable("TDK_success while diagnosing bad deduction");
+
+ case Sema::TDK_Incomplete:
+ return 1;
+
+ case Sema::TDK_Underqualified:
+ case Sema::TDK_Inconsistent:
+ return 2;
+
+ case Sema::TDK_SubstitutionFailure:
+ case Sema::TDK_NonDeducedMismatch:
+ return 3;
+
+ case Sema::TDK_InstantiationDepth:
+ case Sema::TDK_FailedOverloadResolution:
+ return 4;
+
+ case Sema::TDK_InvalidExplicitArguments:
+ return 5;
+
+ case Sema::TDK_TooManyArguments:
+ case Sema::TDK_TooFewArguments:
+ return 6;
+ }
+ llvm_unreachable("Unhandled deduction result");
+}
+
+struct CompareOverloadCandidatesForDisplay {
+ Sema &S;
+ CompareOverloadCandidatesForDisplay(Sema &S) : S(S) {}
+
+ bool operator()(const OverloadCandidate *L,
+ const OverloadCandidate *R) {
+ // Fast-path this check.
+ if (L == R) return false;
+
+ // Order first by viability.
+ if (L->Viable) {
+ if (!R->Viable) return true;
+
+ // TODO: introduce a tri-valued comparison for overload
+ // candidates. Would be more worthwhile if we had a sort
+ // that could exploit it.
+ if (isBetterOverloadCandidate(S, *L, *R, SourceLocation())) return true;
+ if (isBetterOverloadCandidate(S, *R, *L, SourceLocation())) return false;
+ } else if (R->Viable)
+ return false;
+
+ assert(L->Viable == R->Viable);
+
+ // Criteria by which we can sort non-viable candidates:
+ if (!L->Viable) {
+ // 1. Arity mismatches come after other candidates.
+ if (L->FailureKind == ovl_fail_too_many_arguments ||
+ L->FailureKind == ovl_fail_too_few_arguments)
+ return false;
+ if (R->FailureKind == ovl_fail_too_many_arguments ||
+ R->FailureKind == ovl_fail_too_few_arguments)
+ return true;
+
+ // 2. Bad conversions come first and are ordered by the number
+ // of bad conversions and quality of good conversions.
+ if (L->FailureKind == ovl_fail_bad_conversion) {
+ if (R->FailureKind != ovl_fail_bad_conversion)
+ return true;
+
+ // The conversion that can be fixed with a smaller number of changes,
+ // comes first.
+ unsigned numLFixes = L->Fix.NumConversionsFixed;
+ unsigned numRFixes = R->Fix.NumConversionsFixed;
+ numLFixes = (numLFixes == 0) ? UINT_MAX : numLFixes;
+ numRFixes = (numRFixes == 0) ? UINT_MAX : numRFixes;
+ if (numLFixes != numRFixes) {
+ if (numLFixes < numRFixes)
+ return true;
+ else
+ return false;
+ }
+
+ // If there's any ordering between the defined conversions...
+ // FIXME: this might not be transitive.
+ assert(L->NumConversions == R->NumConversions);
+
+ int leftBetter = 0;
+ unsigned I = (L->IgnoreObjectArgument || R->IgnoreObjectArgument);
+ for (unsigned E = L->NumConversions; I != E; ++I) {
+ switch (CompareImplicitConversionSequences(S,
+ L->Conversions[I],
+ R->Conversions[I])) {
+ case ImplicitConversionSequence::Better:
+ leftBetter++;
+ break;
+
+ case ImplicitConversionSequence::Worse:
+ leftBetter--;
+ break;
+
+ case ImplicitConversionSequence::Indistinguishable:
+ break;
+ }
+ }
+ if (leftBetter > 0) return true;
+ if (leftBetter < 0) return false;
+
+ } else if (R->FailureKind == ovl_fail_bad_conversion)
+ return false;
+
+ if (L->FailureKind == ovl_fail_bad_deduction) {
+ if (R->FailureKind != ovl_fail_bad_deduction)
+ return true;
+
+ if (L->DeductionFailure.Result != R->DeductionFailure.Result)
+ return RankDeductionFailure(L->DeductionFailure)
+ < RankDeductionFailure(R->DeductionFailure);
+ } else if (R->FailureKind == ovl_fail_bad_deduction)
+ return false;
+
+ // TODO: others?
+ }
+
+ // Sort everything else by location.
+ SourceLocation LLoc = GetLocationForCandidate(L);
+ SourceLocation RLoc = GetLocationForCandidate(R);
+
+ // Put candidates without locations (e.g. builtins) at the end.
+ if (LLoc.isInvalid()) return false;
+ if (RLoc.isInvalid()) return true;
+
+ return S.SourceMgr.isBeforeInTranslationUnit(LLoc, RLoc);
+ }
+};
+
+/// CompleteNonViableCandidate - Normally, overload resolution only
+/// computes up to the first. Produces the FixIt set if possible.
+void CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
+ llvm::ArrayRef<Expr *> Args) {
+ assert(!Cand->Viable);
+
+ // Don't do anything on failures other than bad conversion.
+ if (Cand->FailureKind != ovl_fail_bad_conversion) return;
+
+ // We only want the FixIts if all the arguments can be corrected.
+ bool Unfixable = false;
+ // Use a implicit copy initialization to check conversion fixes.
+ Cand->Fix.setConversionChecker(TryCopyInitialization);
+
+ // Skip forward to the first bad conversion.
+ unsigned ConvIdx = (Cand->IgnoreObjectArgument ? 1 : 0);
+ unsigned ConvCount = Cand->NumConversions;
+ while (true) {
+ assert(ConvIdx != ConvCount && "no bad conversion in candidate");
+ ConvIdx++;
+ if (Cand->Conversions[ConvIdx - 1].isBad()) {
+ Unfixable = !Cand->TryToFixBadConversion(ConvIdx - 1, S);
+ break;
+ }
+ }
+
+ if (ConvIdx == ConvCount)
+ return;
+
+ assert(!Cand->Conversions[ConvIdx].isInitialized() &&
+ "remaining conversion is initialized?");
+
+ // FIXME: this should probably be preserved from the overload
+ // operation somehow.
+ bool SuppressUserConversions = false;
+
+ const FunctionProtoType* Proto;
+ unsigned ArgIdx = ConvIdx;
+
+ if (Cand->IsSurrogate) {
+ QualType ConvType
+ = Cand->Surrogate->getConversionType().getNonReferenceType();
+ if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
+ ConvType = ConvPtrType->getPointeeType();
+ Proto = ConvType->getAs<FunctionProtoType>();
+ ArgIdx--;
+ } else if (Cand->Function) {
+ Proto = Cand->Function->getType()->getAs<FunctionProtoType>();
+ if (isa<CXXMethodDecl>(Cand->Function) &&
+ !isa<CXXConstructorDecl>(Cand->Function))
+ ArgIdx--;
+ } else {
+ // Builtin binary operator with a bad first conversion.
+ assert(ConvCount <= 3);
+ for (; ConvIdx != ConvCount; ++ConvIdx)
+ Cand->Conversions[ConvIdx]
+ = TryCopyInitialization(S, Args[ConvIdx],
+ Cand->BuiltinTypes.ParamTypes[ConvIdx],
+ SuppressUserConversions,
+ /*InOverloadResolution*/ true,
+ /*AllowObjCWritebackConversion=*/
+ S.getLangOpts().ObjCAutoRefCount);
+ return;
+ }
+
+ // Fill in the rest of the conversions.
+ unsigned NumArgsInProto = Proto->getNumArgs();
+ for (; ConvIdx != ConvCount; ++ConvIdx, ++ArgIdx) {
+ if (ArgIdx < NumArgsInProto) {
+ Cand->Conversions[ConvIdx]
+ = TryCopyInitialization(S, Args[ArgIdx], Proto->getArgType(ArgIdx),
+ SuppressUserConversions,
+ /*InOverloadResolution=*/true,
+ /*AllowObjCWritebackConversion=*/
+ S.getLangOpts().ObjCAutoRefCount);
+ // Store the FixIt in the candidate if it exists.
+ if (!Unfixable && Cand->Conversions[ConvIdx].isBad())
+ Unfixable = !Cand->TryToFixBadConversion(ConvIdx, S);
+ }
+ else
+ Cand->Conversions[ConvIdx].setEllipsis();
+ }
+}
+
+} // end anonymous namespace
+
+/// PrintOverloadCandidates - When overload resolution fails, prints
+/// diagnostic messages containing the candidates in the candidate
+/// set.
+void OverloadCandidateSet::NoteCandidates(Sema &S,
+ OverloadCandidateDisplayKind OCD,
+ llvm::ArrayRef<Expr *> Args,
+ const char *Opc,
+ SourceLocation OpLoc) {
+ // Sort the candidates by viability and position. Sorting directly would
+ // be prohibitive, so we make a set of pointers and sort those.
+ SmallVector<OverloadCandidate*, 32> Cands;
+ if (OCD == OCD_AllCandidates) Cands.reserve(size());
+ for (iterator Cand = begin(), LastCand = end(); Cand != LastCand; ++Cand) {
+ if (Cand->Viable)
+ Cands.push_back(Cand);
+ else if (OCD == OCD_AllCandidates) {
+ CompleteNonViableCandidate(S, Cand, Args);
+ if (Cand->Function || Cand->IsSurrogate)
+ Cands.push_back(Cand);
+ // Otherwise, this a non-viable builtin candidate. We do not, in general,
+ // want to list every possible builtin candidate.
+ }
+ }
+
+ std::sort(Cands.begin(), Cands.end(),
+ CompareOverloadCandidatesForDisplay(S));
+
+ bool ReportedAmbiguousConversions = false;
+
+ SmallVectorImpl<OverloadCandidate*>::iterator I, E;
+ const DiagnosticsEngine::OverloadsShown ShowOverloads =
+ S.Diags.getShowOverloads();
+ unsigned CandsShown = 0;
+ for (I = Cands.begin(), E = Cands.end(); I != E; ++I) {
+ OverloadCandidate *Cand = *I;
+
+ // Set an arbitrary limit on the number of candidate functions we'll spam
+ // the user with. FIXME: This limit should depend on details of the
+ // candidate list.
+ if (CandsShown >= 4 && ShowOverloads == DiagnosticsEngine::Ovl_Best) {
+ break;
+ }
+ ++CandsShown;
+
+ if (Cand->Function)
+ NoteFunctionCandidate(S, Cand, Args.size());
+ else if (Cand->IsSurrogate)
+ NoteSurrogateCandidate(S, Cand);
+ else {
+ assert(Cand->Viable &&
+ "Non-viable built-in candidates are not added to Cands.");
+ // Generally we only see ambiguities including viable builtin
+ // operators if overload resolution got screwed up by an
+ // ambiguous user-defined conversion.
+ //
+ // FIXME: It's quite possible for different conversions to see
+ // different ambiguities, though.
+ if (!ReportedAmbiguousConversions) {
+ NoteAmbiguousUserConversions(S, OpLoc, Cand);
+ ReportedAmbiguousConversions = true;
+ }
+
+ // If this is a viable builtin, print it.
+ NoteBuiltinOperatorCandidate(S, Opc, OpLoc, Cand);
+ }
+ }
+
+ if (I != E)
+ S.Diag(OpLoc, diag::note_ovl_too_many_candidates) << int(E - I);
+}
+
+// [PossiblyAFunctionType] --> [Return]
+// NonFunctionType --> NonFunctionType
+// R (A) --> R(A)
+// R (*)(A) --> R (A)
+// R (&)(A) --> R (A)
+// R (S::*)(A) --> R (A)
+QualType Sema::ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType) {
+ QualType Ret = PossiblyAFunctionType;
+ if (const PointerType *ToTypePtr =
+ PossiblyAFunctionType->getAs<PointerType>())
+ Ret = ToTypePtr->getPointeeType();
+ else if (const ReferenceType *ToTypeRef =
+ PossiblyAFunctionType->getAs<ReferenceType>())
+ Ret = ToTypeRef->getPointeeType();
+ else if (const MemberPointerType *MemTypePtr =
+ PossiblyAFunctionType->getAs<MemberPointerType>())
+ Ret = MemTypePtr->getPointeeType();
+ Ret =
+ Context.getCanonicalType(Ret).getUnqualifiedType();
+ return Ret;
+}
+
+// A helper class to help with address of function resolution
+// - allows us to avoid passing around all those ugly parameters
+class AddressOfFunctionResolver
+{
+ Sema& S;
+ Expr* SourceExpr;
+ const QualType& TargetType;
+ QualType TargetFunctionType; // Extracted function type from target type
+
+ bool Complain;
+ //DeclAccessPair& ResultFunctionAccessPair;
+ ASTContext& Context;
+
+ bool TargetTypeIsNonStaticMemberFunction;
+ bool FoundNonTemplateFunction;
+
+ OverloadExpr::FindResult OvlExprInfo;
+ OverloadExpr *OvlExpr;
+ TemplateArgumentListInfo OvlExplicitTemplateArgs;
+ SmallVector<std::pair<DeclAccessPair, FunctionDecl*>, 4> Matches;
+
+public:
+ AddressOfFunctionResolver(Sema &S, Expr* SourceExpr,
+ const QualType& TargetType, bool Complain)
+ : S(S), SourceExpr(SourceExpr), TargetType(TargetType),
+ Complain(Complain), Context(S.getASTContext()),
+ TargetTypeIsNonStaticMemberFunction(
+ !!TargetType->getAs<MemberPointerType>()),
+ FoundNonTemplateFunction(false),
+ OvlExprInfo(OverloadExpr::find(SourceExpr)),
+ OvlExpr(OvlExprInfo.Expression)
+ {
+ ExtractUnqualifiedFunctionTypeFromTargetType();
+
+ if (!TargetFunctionType->isFunctionType()) {
+ if (OvlExpr->hasExplicitTemplateArgs()) {
+ DeclAccessPair dap;
+ if (FunctionDecl* Fn = S.ResolveSingleFunctionTemplateSpecialization(
+ OvlExpr, false, &dap) ) {
+
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
+ if (!Method->isStatic()) {
+ // If the target type is a non-function type and the function
+ // found is a non-static member function, pretend as if that was
+ // the target, it's the only possible type to end up with.
+ TargetTypeIsNonStaticMemberFunction = true;
+
+ // And skip adding the function if its not in the proper form.
+ // We'll diagnose this due to an empty set of functions.
+ if (!OvlExprInfo.HasFormOfMemberPointer)
+ return;
+ }
+ }
+
+ Matches.push_back(std::make_pair(dap,Fn));
+ }
+ }
+ return;
+ }
+
+ if (OvlExpr->hasExplicitTemplateArgs())
+ OvlExpr->getExplicitTemplateArgs().copyInto(OvlExplicitTemplateArgs);
+
+ if (FindAllFunctionsThatMatchTargetTypeExactly()) {
+ // C++ [over.over]p4:
+ // If more than one function is selected, [...]
+ if (Matches.size() > 1) {
+ if (FoundNonTemplateFunction)
+ EliminateAllTemplateMatches();
+ else
+ EliminateAllExceptMostSpecializedTemplate();
+ }
+ }
+ }
+
+private:
+ bool isTargetTypeAFunction() const {
+ return TargetFunctionType->isFunctionType();
+ }
+
+ // [ToType] [Return]
+
+ // R (*)(A) --> R (A), IsNonStaticMemberFunction = false
+ // R (&)(A) --> R (A), IsNonStaticMemberFunction = false
+ // R (S::*)(A) --> R (A), IsNonStaticMemberFunction = true
+ void inline ExtractUnqualifiedFunctionTypeFromTargetType() {
+ TargetFunctionType = S.ExtractUnqualifiedFunctionType(TargetType);
+ }
+
+ // return true if any matching specializations were found
+ bool AddMatchingTemplateFunction(FunctionTemplateDecl* FunctionTemplate,
+ const DeclAccessPair& CurAccessFunPair) {
+ if (CXXMethodDecl *Method
+ = dyn_cast<CXXMethodDecl>(FunctionTemplate->getTemplatedDecl())) {
+ // Skip non-static function templates when converting to pointer, and
+ // static when converting to member pointer.
+ if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
+ return false;
+ }
+ else if (TargetTypeIsNonStaticMemberFunction)
+ return false;
+
+ // C++ [over.over]p2:
+ // If the name is a function template, template argument deduction is
+ // done (14.8.2.2), and if the argument deduction succeeds, the
+ // resulting template argument list is used to generate a single
+ // function template specialization, which is added to the set of
+ // overloaded functions considered.
+ FunctionDecl *Specialization = 0;
+ TemplateDeductionInfo Info(Context, OvlExpr->getNameLoc());
+ if (Sema::TemplateDeductionResult Result
+ = S.DeduceTemplateArguments(FunctionTemplate,
+ &OvlExplicitTemplateArgs,
+ TargetFunctionType, Specialization,
+ Info)) {
+ // FIXME: make a note of the failed deduction for diagnostics.
+ (void)Result;
+ return false;
+ }
+
+ // Template argument deduction ensures that we have an exact match.
+ // This function template specicalization works.
+ Specialization = cast<FunctionDecl>(Specialization->getCanonicalDecl());
+ assert(TargetFunctionType
+ == Context.getCanonicalType(Specialization->getType()));
+ Matches.push_back(std::make_pair(CurAccessFunPair, Specialization));
+ return true;
+ }
+
+ bool AddMatchingNonTemplateFunction(NamedDecl* Fn,
+ const DeclAccessPair& CurAccessFunPair) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
+ // Skip non-static functions when converting to pointer, and static
+ // when converting to member pointer.
+ if (Method->isStatic() == TargetTypeIsNonStaticMemberFunction)
+ return false;
+ }
+ else if (TargetTypeIsNonStaticMemberFunction)
+ return false;
+
+ if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
+ if (S.getLangOpts().CUDA)
+ if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
+ if (S.CheckCUDATarget(Caller, FunDecl))
+ return false;
+
+ QualType ResultTy;
+ if (Context.hasSameUnqualifiedType(TargetFunctionType,
+ FunDecl->getType()) ||
+ S.IsNoReturnConversion(FunDecl->getType(), TargetFunctionType,
+ ResultTy)) {
+ Matches.push_back(std::make_pair(CurAccessFunPair,
+ cast<FunctionDecl>(FunDecl->getCanonicalDecl())));
+ FoundNonTemplateFunction = true;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool FindAllFunctionsThatMatchTargetTypeExactly() {
+ bool Ret = false;
+
+ // If the overload expression doesn't have the form of a pointer to
+ // member, don't try to convert it to a pointer-to-member type.
+ if (IsInvalidFormOfPointerToMemberFunction())
+ return false;
+
+ for (UnresolvedSetIterator I = OvlExpr->decls_begin(),
+ E = OvlExpr->decls_end();
+ I != E; ++I) {
+ // Look through any using declarations to find the underlying function.
+ NamedDecl *Fn = (*I)->getUnderlyingDecl();
+
+ // C++ [over.over]p3:
+ // Non-member functions and static member functions match
+ // targets of type "pointer-to-function" or "reference-to-function."
+ // Nonstatic member functions match targets of
+ // type "pointer-to-member-function."
+ // Note that according to DR 247, the containing class does not matter.
+ if (FunctionTemplateDecl *FunctionTemplate
+ = dyn_cast<FunctionTemplateDecl>(Fn)) {
+ if (AddMatchingTemplateFunction(FunctionTemplate, I.getPair()))
+ Ret = true;
+ }
+ // If we have explicit template arguments supplied, skip non-templates.
+ else if (!OvlExpr->hasExplicitTemplateArgs() &&
+ AddMatchingNonTemplateFunction(Fn, I.getPair()))
+ Ret = true;
+ }
+ assert(Ret || Matches.empty());
+ return Ret;
+ }
+
+ void EliminateAllExceptMostSpecializedTemplate() {
+ // [...] and any given function template specialization F1 is
+ // eliminated if the set contains a second function template
+ // specialization whose function template is more specialized
+ // than the function template of F1 according to the partial
+ // ordering rules of 14.5.5.2.
+
+ // The algorithm specified above is quadratic. We instead use a
+ // two-pass algorithm (similar to the one used to identify the
+ // best viable function in an overload set) that identifies the
+ // best function template (if it exists).
+
+ UnresolvedSet<4> MatchesCopy; // TODO: avoid!
+ for (unsigned I = 0, E = Matches.size(); I != E; ++I)
+ MatchesCopy.addDecl(Matches[I].second, Matches[I].first.getAccess());
+
+ UnresolvedSetIterator Result =
+ S.getMostSpecialized(MatchesCopy.begin(), MatchesCopy.end(),
+ TPOC_Other, 0, SourceExpr->getLocStart(),
+ S.PDiag(),
+ S.PDiag(diag::err_addr_ovl_ambiguous)
+ << Matches[0].second->getDeclName(),
+ S.PDiag(diag::note_ovl_candidate)
+ << (unsigned) oc_function_template,
+ Complain, TargetFunctionType);
+
+ if (Result != MatchesCopy.end()) {
+ // Make it the first and only element
+ Matches[0].first = Matches[Result - MatchesCopy.begin()].first;
+ Matches[0].second = cast<FunctionDecl>(*Result);
+ Matches.resize(1);
+ }
+ }
+
+ void EliminateAllTemplateMatches() {
+ // [...] any function template specializations in the set are
+ // eliminated if the set also contains a non-template function, [...]
+ for (unsigned I = 0, N = Matches.size(); I != N; ) {
+ if (Matches[I].second->getPrimaryTemplate() == 0)
+ ++I;
+ else {
+ Matches[I] = Matches[--N];
+ Matches.set_size(N);
+ }
+ }
+ }
+
+public:
+ void ComplainNoMatchesFound() const {
+ assert(Matches.empty());
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_no_viable)
+ << OvlExpr->getName() << TargetFunctionType
+ << OvlExpr->getSourceRange();
+ S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType);
+ }
+
+ bool IsInvalidFormOfPointerToMemberFunction() const {
+ return TargetTypeIsNonStaticMemberFunction &&
+ !OvlExprInfo.HasFormOfMemberPointer;
+ }
+
+ void ComplainIsInvalidFormOfPointerToMemberFunction() const {
+ // TODO: Should we condition this on whether any functions might
+ // have matched, or is it more appropriate to do that in callers?
+ // TODO: a fixit wouldn't hurt.
+ S.Diag(OvlExpr->getNameLoc(), diag::err_addr_ovl_no_qualifier)
+ << TargetType << OvlExpr->getSourceRange();
+ }
+
+ void ComplainOfInvalidConversion() const {
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_not_func_ptrref)
+ << OvlExpr->getName() << TargetType;
+ }
+
+ void ComplainMultipleMatchesFound() const {
+ assert(Matches.size() > 1);
+ S.Diag(OvlExpr->getLocStart(), diag::err_addr_ovl_ambiguous)
+ << OvlExpr->getName()
+ << OvlExpr->getSourceRange();
+ S.NoteAllOverloadCandidates(OvlExpr, TargetFunctionType);
+ }
+
+ bool hadMultipleCandidates() const { return (OvlExpr->getNumDecls() > 1); }
+
+ int getNumMatches() const { return Matches.size(); }
+
+ FunctionDecl* getMatchingFunctionDecl() const {
+ if (Matches.size() != 1) return 0;
+ return Matches[0].second;
+ }
+
+ const DeclAccessPair* getMatchingFunctionAccessPair() const {
+ if (Matches.size() != 1) return 0;
+ return &Matches[0].first;
+ }
+};
+
+/// ResolveAddressOfOverloadedFunction - Try to resolve the address of
+/// an overloaded function (C++ [over.over]), where @p From is an
+/// expression with overloaded function type and @p ToType is the type
+/// we're trying to resolve to. For example:
+///
+/// @code
+/// int f(double);
+/// int f(int);
+///
+/// int (*pfd)(double) = f; // selects f(double)
+/// @endcode
+///
+/// This routine returns the resulting FunctionDecl if it could be
+/// resolved, and NULL otherwise. When @p Complain is true, this
+/// routine will emit diagnostics if there is an error.
+FunctionDecl *
+Sema::ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
+ QualType TargetType,
+ bool Complain,
+ DeclAccessPair &FoundResult,
+ bool *pHadMultipleCandidates) {
+ assert(AddressOfExpr->getType() == Context.OverloadTy);
+
+ AddressOfFunctionResolver Resolver(*this, AddressOfExpr, TargetType,
+ Complain);
+ int NumMatches = Resolver.getNumMatches();
+ FunctionDecl* Fn = 0;
+ if (NumMatches == 0 && Complain) {
+ if (Resolver.IsInvalidFormOfPointerToMemberFunction())
+ Resolver.ComplainIsInvalidFormOfPointerToMemberFunction();
+ else
+ Resolver.ComplainNoMatchesFound();
+ }
+ else if (NumMatches > 1 && Complain)
+ Resolver.ComplainMultipleMatchesFound();
+ else if (NumMatches == 1) {
+ Fn = Resolver.getMatchingFunctionDecl();
+ assert(Fn);
+ FoundResult = *Resolver.getMatchingFunctionAccessPair();
+ MarkFunctionReferenced(AddressOfExpr->getLocStart(), Fn);
+ if (Complain)
+ CheckAddressOfMemberAccess(AddressOfExpr, FoundResult);
+ }
+
+ if (pHadMultipleCandidates)
+ *pHadMultipleCandidates = Resolver.hadMultipleCandidates();
+ return Fn;
+}
+
+/// \brief Given an expression that refers to an overloaded function, try to
+/// resolve that overloaded function expression down to a single function.
+///
+/// This routine can only resolve template-ids that refer to a single function
+/// template, where that template-id refers to a single template whose template
+/// arguments are either provided by the template-id or have defaults,
+/// as described in C++0x [temp.arg.explicit]p3.
+FunctionDecl *
+Sema::ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
+ bool Complain,
+ DeclAccessPair *FoundResult) {
+ // C++ [over.over]p1:
+ // [...] [Note: any redundant set of parentheses surrounding the
+ // overloaded function name is ignored (5.1). ]
+ // C++ [over.over]p1:
+ // [...] The overloaded function name can be preceded by the &
+ // operator.
+
+ // If we didn't actually find any template-ids, we're done.
+ if (!ovl->hasExplicitTemplateArgs())
+ return 0;
+
+ TemplateArgumentListInfo ExplicitTemplateArgs;
+ ovl->getExplicitTemplateArgs().copyInto(ExplicitTemplateArgs);
+
+ // Look through all of the overloaded functions, searching for one
+ // whose type matches exactly.
+ FunctionDecl *Matched = 0;
+ for (UnresolvedSetIterator I = ovl->decls_begin(),
+ E = ovl->decls_end(); I != E; ++I) {
+ // C++0x [temp.arg.explicit]p3:
+ // [...] In contexts where deduction is done and fails, or in contexts
+ // where deduction is not done, if a template argument list is
+ // specified and it, along with any default template arguments,
+ // identifies a single function template specialization, then the
+ // template-id is an lvalue for the function template specialization.
+ FunctionTemplateDecl *FunctionTemplate
+ = cast<FunctionTemplateDecl>((*I)->getUnderlyingDecl());
+
+ // C++ [over.over]p2:
+ // If the name is a function template, template argument deduction is
+ // done (14.8.2.2), and if the argument deduction succeeds, the
+ // resulting template argument list is used to generate a single
+ // function template specialization, which is added to the set of
+ // overloaded functions considered.
+ FunctionDecl *Specialization = 0;
+ TemplateDeductionInfo Info(Context, ovl->getNameLoc());
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(FunctionTemplate, &ExplicitTemplateArgs,
+ Specialization, Info)) {
+ // FIXME: make a note of the failed deduction for diagnostics.
+ (void)Result;
+ continue;
+ }
+
+ assert(Specialization && "no specialization and no error?");
+
+ // Multiple matches; we can't resolve to a single declaration.
+ if (Matched) {
+ if (Complain) {
+ Diag(ovl->getExprLoc(), diag::err_addr_ovl_ambiguous)
+ << ovl->getName();
+ NoteAllOverloadCandidates(ovl);
+ }
+ return 0;
+ }
+
+ Matched = Specialization;
+ if (FoundResult) *FoundResult = I.getPair();
+ }
+
+ return Matched;
+}
+
+
+
+
+// Resolve and fix an overloaded expression that can be resolved
+// because it identifies a single function template specialization.
+//
+// Last three arguments should only be supplied if Complain = true
+//
+// Return true if it was logically possible to so resolve the
+// expression, regardless of whether or not it succeeded. Always
+// returns true if 'complain' is set.
+bool Sema::ResolveAndFixSingleFunctionTemplateSpecialization(
+ ExprResult &SrcExpr, bool doFunctionPointerConverion,
+ bool complain, const SourceRange& OpRangeForComplaining,
+ QualType DestTypeForComplaining,
+ unsigned DiagIDForComplaining) {
+ assert(SrcExpr.get()->getType() == Context.OverloadTy);
+
+ OverloadExpr::FindResult ovl = OverloadExpr::find(SrcExpr.get());
+
+ DeclAccessPair found;
+ ExprResult SingleFunctionExpression;
+ if (FunctionDecl *fn = ResolveSingleFunctionTemplateSpecialization(
+ ovl.Expression, /*complain*/ false, &found)) {
+ if (DiagnoseUseOfDecl(fn, SrcExpr.get()->getLocStart())) {
+ SrcExpr = ExprError();
+ return true;
+ }
+
+ // It is only correct to resolve to an instance method if we're
+ // resolving a form that's permitted to be a pointer to member.
+ // Otherwise we'll end up making a bound member expression, which
+ // is illegal in all the contexts we resolve like this.
+ if (!ovl.HasFormOfMemberPointer &&
+ isa<CXXMethodDecl>(fn) &&
+ cast<CXXMethodDecl>(fn)->isInstance()) {
+ if (!complain) return false;
+
+ Diag(ovl.Expression->getExprLoc(),
+ diag::err_bound_member_function)
+ << 0 << ovl.Expression->getSourceRange();
+
+ // TODO: I believe we only end up here if there's a mix of
+ // static and non-static candidates (otherwise the expression
+ // would have 'bound member' type, not 'overload' type).
+ // Ideally we would note which candidate was chosen and why
+ // the static candidates were rejected.
+ SrcExpr = ExprError();
+ return true;
+ }
+
+ // Fix the expresion to refer to 'fn'.
+ SingleFunctionExpression =
+ Owned(FixOverloadedFunctionReference(SrcExpr.take(), found, fn));
+
+ // If desired, do function-to-pointer decay.
+ if (doFunctionPointerConverion) {
+ SingleFunctionExpression =
+ DefaultFunctionArrayLvalueConversion(SingleFunctionExpression.take());
+ if (SingleFunctionExpression.isInvalid()) {
+ SrcExpr = ExprError();
+ return true;
+ }
+ }
+ }
+
+ if (!SingleFunctionExpression.isUsable()) {
+ if (complain) {
+ Diag(OpRangeForComplaining.getBegin(), DiagIDForComplaining)
+ << ovl.Expression->getName()
+ << DestTypeForComplaining
+ << OpRangeForComplaining
+ << ovl.Expression->getQualifierLoc().getSourceRange();
+ NoteAllOverloadCandidates(SrcExpr.get());
+
+ SrcExpr = ExprError();
+ return true;
+ }
+
+ return false;
+ }
+
+ SrcExpr = SingleFunctionExpression;
+ return true;
+}
+
+/// \brief Add a single candidate to the overload set.
+static void AddOverloadedCallCandidate(Sema &S,
+ DeclAccessPair FoundDecl,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool PartialOverloading,
+ bool KnownValid) {
+ NamedDecl *Callee = FoundDecl.getDecl();
+ if (isa<UsingShadowDecl>(Callee))
+ Callee = cast<UsingShadowDecl>(Callee)->getTargetDecl();
+
+ if (FunctionDecl *Func = dyn_cast<FunctionDecl>(Callee)) {
+ if (ExplicitTemplateArgs) {
+ assert(!KnownValid && "Explicit template arguments?");
+ return;
+ }
+ S.AddOverloadCandidate(Func, FoundDecl, Args, CandidateSet, false,
+ PartialOverloading);
+ return;
+ }
+
+ if (FunctionTemplateDecl *FuncTemplate
+ = dyn_cast<FunctionTemplateDecl>(Callee)) {
+ S.AddTemplateOverloadCandidate(FuncTemplate, FoundDecl,
+ ExplicitTemplateArgs, Args, CandidateSet);
+ return;
+ }
+
+ assert(!KnownValid && "unhandled case in overloaded call candidate");
+}
+
+/// \brief Add the overload candidates named by callee and/or found by argument
+/// dependent lookup to the given overload set.
+void Sema::AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
+ llvm::ArrayRef<Expr *> Args,
+ OverloadCandidateSet &CandidateSet,
+ bool PartialOverloading) {
+
+#ifndef NDEBUG
+ // Verify that ArgumentDependentLookup is consistent with the rules
+ // in C++0x [basic.lookup.argdep]p3:
+ //
+ // Let X be the lookup set produced by unqualified lookup (3.4.1)
+ // and let Y be the lookup set produced by argument dependent
+ // lookup (defined as follows). If X contains
+ //
+ // -- a declaration of a class member, or
+ //
+ // -- a block-scope function declaration that is not a
+ // using-declaration, or
+ //
+ // -- a declaration that is neither a function or a function
+ // template
+ //
+ // then Y is empty.
+
+ if (ULE->requiresADL()) {
+ for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(),
+ E = ULE->decls_end(); I != E; ++I) {
+ assert(!(*I)->getDeclContext()->isRecord());
+ assert(isa<UsingShadowDecl>(*I) ||
+ !(*I)->getDeclContext()->isFunctionOrMethod());
+ assert((*I)->getUnderlyingDecl()->isFunctionOrFunctionTemplate());
+ }
+ }
+#endif
+
+ // It would be nice to avoid this copy.
+ TemplateArgumentListInfo TABuffer;
+ TemplateArgumentListInfo *ExplicitTemplateArgs = 0;
+ if (ULE->hasExplicitTemplateArgs()) {
+ ULE->copyTemplateArgumentsInto(TABuffer);
+ ExplicitTemplateArgs = &TABuffer;
+ }
+
+ for (UnresolvedLookupExpr::decls_iterator I = ULE->decls_begin(),
+ E = ULE->decls_end(); I != E; ++I)
+ AddOverloadedCallCandidate(*this, I.getPair(), ExplicitTemplateArgs, Args,
+ CandidateSet, PartialOverloading,
+ /*KnownValid*/ true);
+
+ if (ULE->requiresADL())
+ AddArgumentDependentLookupCandidates(ULE->getName(), /*Operator*/ false,
+ ULE->getExprLoc(),
+ Args, ExplicitTemplateArgs,
+ CandidateSet, PartialOverloading,
+ ULE->isStdAssociatedNamespace());
+}
+
+/// Attempt to recover from an ill-formed use of a non-dependent name in a
+/// template, where the non-dependent name was declared after the template
+/// was defined. This is common in code written for a compilers which do not
+/// correctly implement two-stage name lookup.
+///
+/// Returns true if a viable candidate was found and a diagnostic was issued.
+static bool
+DiagnoseTwoPhaseLookup(Sema &SemaRef, SourceLocation FnLoc,
+ const CXXScopeSpec &SS, LookupResult &R,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ llvm::ArrayRef<Expr *> Args) {
+ if (SemaRef.ActiveTemplateInstantiations.empty() || !SS.isEmpty())
+ return false;
+
+ for (DeclContext *DC = SemaRef.CurContext; DC; DC = DC->getParent()) {
+ if (DC->isTransparentContext())
+ continue;
+
+ SemaRef.LookupQualifiedName(R, DC);
+
+ if (!R.empty()) {
+ R.suppressDiagnostics();
+
+ if (isa<CXXRecordDecl>(DC)) {
+ // Don't diagnose names we find in classes; we get much better
+ // diagnostics for these from DiagnoseEmptyLookup.
+ R.clear();
+ return false;
+ }
+
+ OverloadCandidateSet Candidates(FnLoc);
+ for (LookupResult::iterator I = R.begin(), E = R.end(); I != E; ++I)
+ AddOverloadedCallCandidate(SemaRef, I.getPair(),
+ ExplicitTemplateArgs, Args,
+ Candidates, false, /*KnownValid*/ false);
+
+ OverloadCandidateSet::iterator Best;
+ if (Candidates.BestViableFunction(SemaRef, FnLoc, Best) != OR_Success) {
+ // No viable functions. Don't bother the user with notes for functions
+ // which don't work and shouldn't be found anyway.
+ R.clear();
+ return false;
+ }
+
+ // Find the namespaces where ADL would have looked, and suggest
+ // declaring the function there instead.
+ Sema::AssociatedNamespaceSet AssociatedNamespaces;
+ Sema::AssociatedClassSet AssociatedClasses;
+ SemaRef.FindAssociatedClassesAndNamespaces(Args,
+ AssociatedNamespaces,
+ AssociatedClasses);
+ // Never suggest declaring a function within namespace 'std'.
+ Sema::AssociatedNamespaceSet SuggestedNamespaces;
+ if (DeclContext *Std = SemaRef.getStdNamespace()) {
+ for (Sema::AssociatedNamespaceSet::iterator
+ it = AssociatedNamespaces.begin(),
+ end = AssociatedNamespaces.end(); it != end; ++it) {
+ if (!Std->Encloses(*it))
+ SuggestedNamespaces.insert(*it);
+ }
+ } else {
+ // Lacking the 'std::' namespace, use all of the associated namespaces.
+ SuggestedNamespaces = AssociatedNamespaces;
+ }
+
+ SemaRef.Diag(R.getNameLoc(), diag::err_not_found_by_two_phase_lookup)
+ << R.getLookupName();
+ if (SuggestedNamespaces.empty()) {
+ SemaRef.Diag(Best->Function->getLocation(),
+ diag::note_not_found_by_two_phase_lookup)
+ << R.getLookupName() << 0;
+ } else if (SuggestedNamespaces.size() == 1) {
+ SemaRef.Diag(Best->Function->getLocation(),
+ diag::note_not_found_by_two_phase_lookup)
+ << R.getLookupName() << 1 << *SuggestedNamespaces.begin();
+ } else {
+ // FIXME: It would be useful to list the associated namespaces here,
+ // but the diagnostics infrastructure doesn't provide a way to produce
+ // a localized representation of a list of items.
+ SemaRef.Diag(Best->Function->getLocation(),
+ diag::note_not_found_by_two_phase_lookup)
+ << R.getLookupName() << 2;
+ }
+
+ // Try to recover by calling this function.
+ return true;
+ }
+
+ R.clear();
+ }
+
+ return false;
+}
+
+/// Attempt to recover from ill-formed use of a non-dependent operator in a
+/// template, where the non-dependent operator was declared after the template
+/// was defined.
+///
+/// Returns true if a viable candidate was found and a diagnostic was issued.
+static bool
+DiagnoseTwoPhaseOperatorLookup(Sema &SemaRef, OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ llvm::ArrayRef<Expr *> Args) {
+ DeclarationName OpName =
+ SemaRef.Context.DeclarationNames.getCXXOperatorName(Op);
+ LookupResult R(SemaRef, OpName, OpLoc, Sema::LookupOperatorName);
+ return DiagnoseTwoPhaseLookup(SemaRef, OpLoc, CXXScopeSpec(), R,
+ /*ExplicitTemplateArgs=*/0, Args);
+}
+
+namespace {
+// Callback to limit the allowed keywords and to only accept typo corrections
+// that are keywords or whose decls refer to functions (or template functions)
+// that accept the given number of arguments.
+class RecoveryCallCCC : public CorrectionCandidateCallback {
+ public:
+ RecoveryCallCCC(Sema &SemaRef, unsigned NumArgs, bool HasExplicitTemplateArgs)
+ : NumArgs(NumArgs), HasExplicitTemplateArgs(HasExplicitTemplateArgs) {
+ WantTypeSpecifiers = SemaRef.getLangOpts().CPlusPlus;
+ WantRemainingKeywords = false;
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ if (!candidate.getCorrectionDecl())
+ return candidate.isKeyword();
+
+ for (TypoCorrection::const_decl_iterator DI = candidate.begin(),
+ DIEnd = candidate.end(); DI != DIEnd; ++DI) {
+ FunctionDecl *FD = 0;
+ NamedDecl *ND = (*DI)->getUnderlyingDecl();
+ if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(ND))
+ FD = FTD->getTemplatedDecl();
+ if (!HasExplicitTemplateArgs && !FD) {
+ if (!(FD = dyn_cast<FunctionDecl>(ND)) && isa<ValueDecl>(ND)) {
+ // If the Decl is neither a function nor a template function,
+ // determine if it is a pointer or reference to a function. If so,
+ // check against the number of arguments expected for the pointee.
+ QualType ValType = cast<ValueDecl>(ND)->getType();
+ if (ValType->isAnyPointerType() || ValType->isReferenceType())
+ ValType = ValType->getPointeeType();
+ if (const FunctionProtoType *FPT = ValType->getAs<FunctionProtoType>())
+ if (FPT->getNumArgs() == NumArgs)
+ return true;
+ }
+ }
+ if (FD && FD->getNumParams() >= NumArgs &&
+ FD->getMinRequiredArguments() <= NumArgs)
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ unsigned NumArgs;
+ bool HasExplicitTemplateArgs;
+};
+
+// Callback that effectively disabled typo correction
+class NoTypoCorrectionCCC : public CorrectionCandidateCallback {
+ public:
+ NoTypoCorrectionCCC() {
+ WantTypeSpecifiers = false;
+ WantExpressionKeywords = false;
+ WantCXXNamedCasts = false;
+ WantRemainingKeywords = false;
+ }
+
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ return false;
+ }
+};
+}
+
+/// Attempts to recover from a call where no functions were found.
+///
+/// Returns true if new candidates were found.
+static ExprResult
+BuildRecoveryCallExpr(Sema &SemaRef, Scope *S, Expr *Fn,
+ UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc,
+ llvm::MutableArrayRef<Expr *> Args,
+ SourceLocation RParenLoc,
+ bool EmptyLookup, bool AllowTypoCorrection) {
+
+ CXXScopeSpec SS;
+ SS.Adopt(ULE->getQualifierLoc());
+ SourceLocation TemplateKWLoc = ULE->getTemplateKeywordLoc();
+
+ TemplateArgumentListInfo TABuffer;
+ TemplateArgumentListInfo *ExplicitTemplateArgs = 0;
+ if (ULE->hasExplicitTemplateArgs()) {
+ ULE->copyTemplateArgumentsInto(TABuffer);
+ ExplicitTemplateArgs = &TABuffer;
+ }
+
+ LookupResult R(SemaRef, ULE->getName(), ULE->getNameLoc(),
+ Sema::LookupOrdinaryName);
+ RecoveryCallCCC Validator(SemaRef, Args.size(), ExplicitTemplateArgs != 0);
+ NoTypoCorrectionCCC RejectAll;
+ CorrectionCandidateCallback *CCC = AllowTypoCorrection ?
+ (CorrectionCandidateCallback*)&Validator :
+ (CorrectionCandidateCallback*)&RejectAll;
+ if (!DiagnoseTwoPhaseLookup(SemaRef, Fn->getExprLoc(), SS, R,
+ ExplicitTemplateArgs, Args) &&
+ (!EmptyLookup ||
+ SemaRef.DiagnoseEmptyLookup(S, SS, R, *CCC,
+ ExplicitTemplateArgs, Args)))
+ return ExprError();
+
+ assert(!R.empty() && "lookup results empty despite recovery");
+
+ // Build an implicit member call if appropriate. Just drop the
+ // casts and such from the call, we don't really care.
+ ExprResult NewFn = ExprError();
+ if ((*R.begin())->isCXXClassMember())
+ NewFn = SemaRef.BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc,
+ R, ExplicitTemplateArgs);
+ else if (ExplicitTemplateArgs || TemplateKWLoc.isValid())
+ NewFn = SemaRef.BuildTemplateIdExpr(SS, TemplateKWLoc, R, false,
+ ExplicitTemplateArgs);
+ else
+ NewFn = SemaRef.BuildDeclarationNameExpr(SS, R, false);
+
+ if (NewFn.isInvalid())
+ return ExprError();
+
+ // This shouldn't cause an infinite loop because we're giving it
+ // an expression with viable lookup results, which should never
+ // end up here.
+ return SemaRef.ActOnCallExpr(/*Scope*/ 0, NewFn.take(), LParenLoc,
+ MultiExprArg(Args.data(), Args.size()),
+ RParenLoc);
+}
+
+/// ResolveOverloadedCallFn - Given the call expression that calls Fn
+/// (which eventually refers to the declaration Func) and the call
+/// arguments Args/NumArgs, attempt to resolve the function call down
+/// to a specific function. If overload resolution succeeds, returns
+/// the function declaration produced by overload
+/// resolution. Otherwise, emits diagnostics, deletes all of the
+/// arguments and Fn, and returns NULL.
+ExprResult
+Sema::BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig,
+ bool AllowTypoCorrection) {
+#ifndef NDEBUG
+ if (ULE->requiresADL()) {
+ // To do ADL, we must have found an unqualified name.
+ assert(!ULE->getQualifier() && "qualified name with ADL");
+
+ // We don't perform ADL for implicit declarations of builtins.
+ // Verify that this was correctly set up.
+ FunctionDecl *F;
+ if (ULE->decls_begin() + 1 == ULE->decls_end() &&
+ (F = dyn_cast<FunctionDecl>(*ULE->decls_begin())) &&
+ F->getBuiltinID() && F->isImplicit())
+ llvm_unreachable("performing ADL for builtin");
+
+ // We don't perform ADL in C.
+ assert(getLangOpts().CPlusPlus && "ADL enabled in C");
+ } else
+ assert(!ULE->isStdAssociatedNamespace() &&
+ "std is associated namespace but not doing ADL");
+#endif
+
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts))
+ return ExprError();
+
+ OverloadCandidateSet CandidateSet(Fn->getExprLoc());
+
+ // Add the functions denoted by the callee to the set of candidate
+ // functions, including those from argument-dependent lookup.
+ AddOverloadedCallCandidates(ULE, llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet);
+
+ // If we found nothing, try to recover.
+ // BuildRecoveryCallExpr diagnoses the error itself, so we just bail
+ // out if it fails.
+ if (CandidateSet.empty()) {
+ // In Microsoft mode, if we are inside a template class member function then
+ // create a type dependent CallExpr. The goal is to postpone name lookup
+ // to instantiation time to be able to search into type dependent base
+ // classes.
+ if (getLangOpts().MicrosoftMode && CurContext->isDependentContext() &&
+ (isa<FunctionDecl>(CurContext) || isa<CXXRecordDecl>(CurContext))) {
+ CallExpr *CE = new (Context) CallExpr(Context, Fn, Args, NumArgs,
+ Context.DependentTy, VK_RValue,
+ RParenLoc);
+ CE->setTypeDependent(true);
+ return Owned(CE);
+ }
+ return BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc,
+ llvm::MutableArrayRef<Expr *>(Args, NumArgs),
+ RParenLoc, /*EmptyLookup=*/true,
+ AllowTypoCorrection);
+ }
+
+ UnbridgedCasts.restore();
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, Fn->getLocStart(), Best)) {
+ case OR_Success: {
+ FunctionDecl *FDecl = Best->Function;
+ MarkFunctionReferenced(Fn->getExprLoc(), FDecl);
+ CheckUnresolvedLookupAccess(ULE, Best->FoundDecl);
+ DiagnoseUseOfDecl(FDecl, ULE->getNameLoc());
+ Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl);
+ return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs, RParenLoc,
+ ExecConfig);
+ }
+
+ case OR_No_Viable_Function: {
+ // Try to recover by looking for viable functions which the user might
+ // have meant to call.
+ ExprResult Recovery = BuildRecoveryCallExpr(*this, S, Fn, ULE, LParenLoc,
+ llvm::MutableArrayRef<Expr *>(Args, NumArgs),
+ RParenLoc,
+ /*EmptyLookup=*/false,
+ AllowTypoCorrection);
+ if (!Recovery.isInvalid())
+ return Recovery;
+
+ Diag(Fn->getLocStart(),
+ diag::err_ovl_no_viable_function_in_call)
+ << ULE->getName() << Fn->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+ }
+
+ case OR_Ambiguous:
+ Diag(Fn->getLocStart(), diag::err_ovl_ambiguous_call)
+ << ULE->getName() << Fn->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+
+ case OR_Deleted:
+ {
+ Diag(Fn->getLocStart(), diag::err_ovl_deleted_call)
+ << Best->Function->isDeleted()
+ << ULE->getName()
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Fn->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+
+ // We emitted an error for the unvailable/deleted function call but keep
+ // the call in the AST.
+ FunctionDecl *FDecl = Best->Function;
+ Fn = FixOverloadedFunctionReference(Fn, Best->FoundDecl, FDecl);
+ return BuildResolvedCallExpr(Fn, FDecl, LParenLoc, Args, NumArgs,
+ RParenLoc, ExecConfig);
+ }
+ }
+
+ // Overload resolution failed.
+ return ExprError();
+}
+
+static bool IsOverloaded(const UnresolvedSetImpl &Functions) {
+ return Functions.size() > 1 ||
+ (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
+}
+
+/// \brief Create a unary operation that may resolve to an overloaded
+/// operator.
+///
+/// \param OpLoc The location of the operator itself (e.g., '*').
+///
+/// \param OpcIn The UnaryOperator::Opcode that describes this
+/// operator.
+///
+/// \param Functions The set of non-member functions that will be
+/// considered by overload resolution. The caller needs to build this
+/// set based on the context using, e.g.,
+/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
+/// set should not contain any member functions; those will be added
+/// by CreateOverloadedUnaryOp().
+///
+/// \param input The input argument.
+ExprResult
+Sema::CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned OpcIn,
+ const UnresolvedSetImpl &Fns,
+ Expr *Input) {
+ UnaryOperator::Opcode Opc = static_cast<UnaryOperator::Opcode>(OpcIn);
+
+ OverloadedOperatorKind Op = UnaryOperator::getOverloadedOperator(Opc);
+ assert(Op != OO_None && "Invalid opcode for overloaded unary operator");
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+ // TODO: provide better source location info.
+ DeclarationNameInfo OpNameInfo(OpName, OpLoc);
+
+ if (checkPlaceholderForOverload(*this, Input))
+ return ExprError();
+
+ Expr *Args[2] = { Input, 0 };
+ unsigned NumArgs = 1;
+
+ // For post-increment and post-decrement, add the implicit '0' as
+ // the second argument, so that we know this is a post-increment or
+ // post-decrement.
+ if (Opc == UO_PostInc || Opc == UO_PostDec) {
+ llvm::APSInt Zero(Context.getTypeSize(Context.IntTy), false);
+ Args[1] = IntegerLiteral::Create(Context, Zero, Context.IntTy,
+ SourceLocation());
+ NumArgs = 2;
+ }
+
+ if (Input->isTypeDependent()) {
+ if (Fns.empty())
+ return Owned(new (Context) UnaryOperator(Input,
+ Opc,
+ Context.DependentTy,
+ VK_RValue, OK_Ordinary,
+ OpLoc));
+
+ CXXRecordDecl *NamingClass = 0; // because lookup ignores member operators
+ UnresolvedLookupExpr *Fn
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
+ NestedNameSpecifierLoc(), OpNameInfo,
+ /*ADL*/ true, IsOverloaded(Fns),
+ Fns.begin(), Fns.end());
+ return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn,
+ &Args[0], NumArgs,
+ Context.DependentTy,
+ VK_RValue,
+ OpLoc));
+ }
+
+ // Build an empty overload set.
+ OverloadCandidateSet CandidateSet(OpLoc);
+
+ // Add the candidates from the given function set.
+ AddFunctionCandidates(Fns, llvm::makeArrayRef(Args, NumArgs), CandidateSet,
+ false);
+
+ // Add operator candidates that are member functions.
+ AddMemberOperatorCandidates(Op, OpLoc, &Args[0], NumArgs, CandidateSet);
+
+ // Add candidates from ADL.
+ AddArgumentDependentLookupCandidates(OpName, /*Operator*/ true,
+ OpLoc, llvm::makeArrayRef(Args, NumArgs),
+ /*ExplicitTemplateArgs*/ 0,
+ CandidateSet);
+
+ // Add builtin operator candidates.
+ AddBuiltinOperatorCandidates(Op, OpLoc, &Args[0], NumArgs, CandidateSet);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ MarkFunctionReferenced(OpLoc, FnDecl);
+
+ // Convert the arguments.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ CheckMemberOperatorAccess(OpLoc, Args[0], 0, Best->FoundDecl);
+
+ ExprResult InputRes =
+ PerformObjectArgumentInitialization(Input, /*Qualifier=*/0,
+ Best->FoundDecl, Method);
+ if (InputRes.isInvalid())
+ return ExprError();
+ Input = InputRes.take();
+ } else {
+ // Convert the arguments.
+ ExprResult InputInit
+ = PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(),
+ Input);
+ if (InputInit.isInvalid())
+ return ExprError();
+ Input = InputInit.take();
+ }
+
+ DiagnoseUseOfDecl(Best->FoundDecl, OpLoc);
+
+ // Determine the result type.
+ QualType ResultTy = FnDecl->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ // Build the actual expression node.
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
+ HadMultipleCandidates, OpLoc);
+ if (FnExpr.isInvalid())
+ return ExprError();
+
+ Args[0] = Input;
+ CallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.take(),
+ Args, NumArgs, ResultTy, VK, OpLoc);
+
+ if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
+ FnDecl))
+ return ExprError();
+
+ return MaybeBindToTemporary(TheCall);
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ ExprResult InputRes =
+ PerformImplicitConversion(Input, Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], AA_Passing);
+ if (InputRes.isInvalid())
+ return ExprError();
+ Input = InputRes.take();
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function:
+ // This is an erroneous use of an operator which can be overloaded by
+ // a non-member function. Check for non-member operators which were
+ // defined too late to be candidates.
+ if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc,
+ llvm::makeArrayRef(Args, NumArgs)))
+ // FIXME: Recover by calling the found function.
+ return ExprError();
+
+ // No viable function; fall through to handling this as a
+ // built-in operator, which will produce an error message for us.
+ break;
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
+ << UnaryOperator::getOpcodeStr(Opc)
+ << Input->getType()
+ << Input->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs),
+ UnaryOperator::getOpcodeStr(Opc), OpLoc);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << UnaryOperator::getOpcodeStr(Opc)
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Input->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs),
+ UnaryOperator::getOpcodeStr(Opc), OpLoc);
+ return ExprError();
+ }
+
+ // Either we found no viable overloaded operator or we matched a
+ // built-in operator. In either case, fall through to trying to
+ // build a built-in operation.
+ return CreateBuiltinUnaryOp(OpLoc, Opc, Input);
+}
+
+/// \brief Create a binary operation that may resolve to an overloaded
+/// operator.
+///
+/// \param OpLoc The location of the operator itself (e.g., '+').
+///
+/// \param OpcIn The BinaryOperator::Opcode that describes this
+/// operator.
+///
+/// \param Functions The set of non-member functions that will be
+/// considered by overload resolution. The caller needs to build this
+/// set based on the context using, e.g.,
+/// LookupOverloadedOperatorName() and ArgumentDependentLookup(). This
+/// set should not contain any member functions; those will be added
+/// by CreateOverloadedBinOp().
+///
+/// \param LHS Left-hand argument.
+/// \param RHS Right-hand argument.
+ExprResult
+Sema::CreateOverloadedBinOp(SourceLocation OpLoc,
+ unsigned OpcIn,
+ const UnresolvedSetImpl &Fns,
+ Expr *LHS, Expr *RHS) {
+ Expr *Args[2] = { LHS, RHS };
+ LHS=RHS=0; //Please use only Args instead of LHS/RHS couple
+
+ BinaryOperator::Opcode Opc = static_cast<BinaryOperator::Opcode>(OpcIn);
+ OverloadedOperatorKind Op = BinaryOperator::getOverloadedOperator(Opc);
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(Op);
+
+ // If either side is type-dependent, create an appropriate dependent
+ // expression.
+ if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
+ if (Fns.empty()) {
+ // If there are no functions to store, just build a dependent
+ // BinaryOperator or CompoundAssignment.
+ if (Opc <= BO_Assign || Opc > BO_OrAssign)
+ return Owned(new (Context) BinaryOperator(Args[0], Args[1], Opc,
+ Context.DependentTy,
+ VK_RValue, OK_Ordinary,
+ OpLoc));
+
+ return Owned(new (Context) CompoundAssignOperator(Args[0], Args[1], Opc,
+ Context.DependentTy,
+ VK_LValue,
+ OK_Ordinary,
+ Context.DependentTy,
+ Context.DependentTy,
+ OpLoc));
+ }
+
+ // FIXME: save results of ADL from here?
+ CXXRecordDecl *NamingClass = 0; // because lookup ignores member operators
+ // TODO: provide better source location info in DNLoc component.
+ DeclarationNameInfo OpNameInfo(OpName, OpLoc);
+ UnresolvedLookupExpr *Fn
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
+ NestedNameSpecifierLoc(), OpNameInfo,
+ /*ADL*/ true, IsOverloaded(Fns),
+ Fns.begin(), Fns.end());
+ return Owned(new (Context) CXXOperatorCallExpr(Context, Op, Fn,
+ Args, 2,
+ Context.DependentTy,
+ VK_RValue,
+ OpLoc));
+ }
+
+ // Always do placeholder-like conversions on the RHS.
+ if (checkPlaceholderForOverload(*this, Args[1]))
+ return ExprError();
+
+ // Do placeholder-like conversion on the LHS; note that we should
+ // not get here with a PseudoObject LHS.
+ assert(Args[0]->getObjectKind() != OK_ObjCProperty);
+ if (checkPlaceholderForOverload(*this, Args[0]))
+ return ExprError();
+
+ // If this is the assignment operator, we only perform overload resolution
+ // if the left-hand side is a class or enumeration type. This is actually
+ // a hack. The standard requires that we do overload resolution between the
+ // various built-in candidates, but as DR507 points out, this can lead to
+ // problems. So we do it this way, which pretty much follows what GCC does.
+ // Note that we go the traditional code path for compound assignment forms.
+ if (Opc == BO_Assign && !Args[0]->getType()->isOverloadableType())
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+
+ // If this is the .* operator, which is not overloadable, just
+ // create a built-in binary operator.
+ if (Opc == BO_PtrMemD)
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+
+ // Build an empty overload set.
+ OverloadCandidateSet CandidateSet(OpLoc);
+
+ // Add the candidates from the given function set.
+ AddFunctionCandidates(Fns, Args, CandidateSet, false);
+
+ // Add operator candidates that are member functions.
+ AddMemberOperatorCandidates(Op, OpLoc, Args, 2, CandidateSet);
+
+ // Add candidates from ADL.
+ AddArgumentDependentLookupCandidates(OpName, /*Operator*/ true,
+ OpLoc, Args,
+ /*ExplicitTemplateArgs*/ 0,
+ CandidateSet);
+
+ // Add builtin operator candidates.
+ AddBuiltinOperatorCandidates(Op, OpLoc, Args, 2, CandidateSet);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ MarkFunctionReferenced(OpLoc, FnDecl);
+
+ // Convert the arguments.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(FnDecl)) {
+ // Best->Access is only meaningful for class members.
+ CheckMemberOperatorAccess(OpLoc, Args[0], Args[1], Best->FoundDecl);
+
+ ExprResult Arg1 =
+ PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(), Owned(Args[1]));
+ if (Arg1.isInvalid())
+ return ExprError();
+
+ ExprResult Arg0 =
+ PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/0,
+ Best->FoundDecl, Method);
+ if (Arg0.isInvalid())
+ return ExprError();
+ Args[0] = Arg0.takeAs<Expr>();
+ Args[1] = RHS = Arg1.takeAs<Expr>();
+ } else {
+ // Convert the arguments.
+ ExprResult Arg0 = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(), Owned(Args[0]));
+ if (Arg0.isInvalid())
+ return ExprError();
+
+ ExprResult Arg1 =
+ PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context,
+ FnDecl->getParamDecl(1)),
+ SourceLocation(), Owned(Args[1]));
+ if (Arg1.isInvalid())
+ return ExprError();
+ Args[0] = LHS = Arg0.takeAs<Expr>();
+ Args[1] = RHS = Arg1.takeAs<Expr>();
+ }
+
+ DiagnoseUseOfDecl(Best->FoundDecl, OpLoc);
+
+ // Determine the result type.
+ QualType ResultTy = FnDecl->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ // Build the actual expression node.
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
+ HadMultipleCandidates, OpLoc);
+ if (FnExpr.isInvalid())
+ return ExprError();
+
+ CXXOperatorCallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, Op, FnExpr.take(),
+ Args, 2, ResultTy, VK, OpLoc);
+
+ if (CheckCallReturnType(FnDecl->getResultType(), OpLoc, TheCall,
+ FnDecl))
+ return ExprError();
+
+ return MaybeBindToTemporary(TheCall);
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ ExprResult ArgsRes0 =
+ PerformImplicitConversion(Args[0], Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], AA_Passing);
+ if (ArgsRes0.isInvalid())
+ return ExprError();
+ Args[0] = ArgsRes0.take();
+
+ ExprResult ArgsRes1 =
+ PerformImplicitConversion(Args[1], Best->BuiltinTypes.ParamTypes[1],
+ Best->Conversions[1], AA_Passing);
+ if (ArgsRes1.isInvalid())
+ return ExprError();
+ Args[1] = ArgsRes1.take();
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function: {
+ // C++ [over.match.oper]p9:
+ // If the operator is the operator , [...] and there are no
+ // viable functions, then the operator is assumed to be the
+ // built-in operator and interpreted according to clause 5.
+ if (Opc == BO_Comma)
+ break;
+
+ // For class as left operand for assignment or compound assigment
+ // operator do not fall through to handling in built-in, but report that
+ // no overloaded assignment operator found
+ ExprResult Result = ExprError();
+ if (Args[0]->getType()->isRecordType() &&
+ Opc >= BO_Assign && Opc <= BO_OrAssign) {
+ Diag(OpLoc, diag::err_ovl_no_viable_oper)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ } else {
+ // This is an erroneous use of an operator which can be overloaded by
+ // a non-member function. Check for non-member operators which were
+ // defined too late to be candidates.
+ if (DiagnoseTwoPhaseOperatorLookup(*this, Op, OpLoc, Args))
+ // FIXME: Recover by calling the found function.
+ return ExprError();
+
+ // No viable function; try to create a built-in operation, which will
+ // produce an error. Then, show the non-viable candidates.
+ Result = CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+ }
+ assert(Result.isInvalid() &&
+ "C++ binary operator overloading is missing candidates!");
+ if (Result.isInvalid())
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
+ BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ return move(Result);
+ }
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_binary)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << Args[0]->getType() << Args[1]->getType()
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
+ BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ return ExprError();
+
+ case OR_Deleted:
+ if (isImplicitlyDeleted(Best->Function)) {
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+ Diag(OpLoc, diag::err_ovl_deleted_special_oper)
+ << getSpecialMember(Method)
+ << BinaryOperator::getOpcodeStr(Opc)
+ << getDeletedOrUnavailableSuffix(Best->Function);
+
+ if (getSpecialMember(Method) != CXXInvalid) {
+ // The user probably meant to call this special member. Just
+ // explain why it's deleted.
+ NoteDeletedFunction(Method);
+ return ExprError();
+ }
+ } else {
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << BinaryOperator::getOpcodeStr(Opc)
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ }
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
+ BinaryOperator::getOpcodeStr(Opc), OpLoc);
+ return ExprError();
+ }
+
+ // We matched a built-in operator; build it.
+ return CreateBuiltinBinOp(OpLoc, Opc, Args[0], Args[1]);
+}
+
+ExprResult
+Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
+ SourceLocation RLoc,
+ Expr *Base, Expr *Idx) {
+ Expr *Args[2] = { Base, Idx };
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Subscript);
+
+ // If either side is type-dependent, create an appropriate dependent
+ // expression.
+ if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
+
+ CXXRecordDecl *NamingClass = 0; // because lookup ignores member operators
+ // CHECKME: no 'operator' keyword?
+ DeclarationNameInfo OpNameInfo(OpName, LLoc);
+ OpNameInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
+ UnresolvedLookupExpr *Fn
+ = UnresolvedLookupExpr::Create(Context, NamingClass,
+ NestedNameSpecifierLoc(), OpNameInfo,
+ /*ADL*/ true, /*Overloaded*/ false,
+ UnresolvedSetIterator(),
+ UnresolvedSetIterator());
+ // Can't add any actual overloads yet
+
+ return Owned(new (Context) CXXOperatorCallExpr(Context, OO_Subscript, Fn,
+ Args, 2,
+ Context.DependentTy,
+ VK_RValue,
+ RLoc));
+ }
+
+ // Handle placeholders on both operands.
+ if (checkPlaceholderForOverload(*this, Args[0]))
+ return ExprError();
+ if (checkPlaceholderForOverload(*this, Args[1]))
+ return ExprError();
+
+ // Build an empty overload set.
+ OverloadCandidateSet CandidateSet(LLoc);
+
+ // Subscript can only be overloaded as a member function.
+
+ // Add operator candidates that are member functions.
+ AddMemberOperatorCandidates(OO_Subscript, LLoc, Args, 2, CandidateSet);
+
+ // Add builtin operator candidates.
+ AddBuiltinOperatorCandidates(OO_Subscript, LLoc, Args, 2, CandidateSet);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, LLoc, Best)) {
+ case OR_Success: {
+ // We found a built-in operator or an overloaded operator.
+ FunctionDecl *FnDecl = Best->Function;
+
+ if (FnDecl) {
+ // We matched an overloaded operator. Build a call to that
+ // operator.
+
+ MarkFunctionReferenced(LLoc, FnDecl);
+
+ CheckMemberOperatorAccess(LLoc, Args[0], Args[1], Best->FoundDecl);
+ DiagnoseUseOfDecl(Best->FoundDecl, LLoc);
+
+ // Convert the arguments.
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl);
+ ExprResult Arg0 =
+ PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/0,
+ Best->FoundDecl, Method);
+ if (Arg0.isInvalid())
+ return ExprError();
+ Args[0] = Arg0.take();
+
+ // Convert the arguments.
+ ExprResult InputInit
+ = PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
+ FnDecl->getParamDecl(0)),
+ SourceLocation(),
+ Owned(Args[1]));
+ if (InputInit.isInvalid())
+ return ExprError();
+
+ Args[1] = InputInit.takeAs<Expr>();
+
+ // Determine the result type
+ QualType ResultTy = FnDecl->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ // Build the actual expression node.
+ DeclarationNameInfo OpLocInfo(OpName, LLoc);
+ OpLocInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
+ HadMultipleCandidates,
+ OpLocInfo.getLoc(),
+ OpLocInfo.getInfo());
+ if (FnExpr.isInvalid())
+ return ExprError();
+
+ CXXOperatorCallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, OO_Subscript,
+ FnExpr.take(), Args, 2,
+ ResultTy, VK, RLoc);
+
+ if (CheckCallReturnType(FnDecl->getResultType(), LLoc, TheCall,
+ FnDecl))
+ return ExprError();
+
+ return MaybeBindToTemporary(TheCall);
+ } else {
+ // We matched a built-in operator. Convert the arguments, then
+ // break out so that we will build the appropriate built-in
+ // operator node.
+ ExprResult ArgsRes0 =
+ PerformImplicitConversion(Args[0], Best->BuiltinTypes.ParamTypes[0],
+ Best->Conversions[0], AA_Passing);
+ if (ArgsRes0.isInvalid())
+ return ExprError();
+ Args[0] = ArgsRes0.take();
+
+ ExprResult ArgsRes1 =
+ PerformImplicitConversion(Args[1], Best->BuiltinTypes.ParamTypes[1],
+ Best->Conversions[1], AA_Passing);
+ if (ArgsRes1.isInvalid())
+ return ExprError();
+ Args[1] = ArgsRes1.take();
+
+ break;
+ }
+ }
+
+ case OR_No_Viable_Function: {
+ if (CandidateSet.empty())
+ Diag(LLoc, diag::err_ovl_no_oper)
+ << Args[0]->getType() << /*subscript*/ 0
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ else
+ Diag(LLoc, diag::err_ovl_no_viable_subscript)
+ << Args[0]->getType()
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
+ "[]", LLoc);
+ return ExprError();
+ }
+
+ case OR_Ambiguous:
+ Diag(LLoc, diag::err_ovl_ambiguous_oper_binary)
+ << "[]"
+ << Args[0]->getType() << Args[1]->getType()
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args,
+ "[]", LLoc);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(LLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted() << "[]"
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Args[0]->getSourceRange() << Args[1]->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args,
+ "[]", LLoc);
+ return ExprError();
+ }
+
+ // We matched a built-in operator; build it.
+ return CreateBuiltinArraySubscriptExpr(Args[0], LLoc, Args[1], RLoc);
+}
+
+/// BuildCallToMemberFunction - Build a call to a member
+/// function. MemExpr is the expression that refers to the member
+/// function (and includes the object parameter), Args/NumArgs are the
+/// arguments to the function call (not including the object
+/// parameter). The caller needs to validate that the member
+/// expression refers to a non-static member function or an overloaded
+/// member function.
+ExprResult
+Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
+ SourceLocation LParenLoc, Expr **Args,
+ unsigned NumArgs, SourceLocation RParenLoc) {
+ assert(MemExprE->getType() == Context.BoundMemberTy ||
+ MemExprE->getType() == Context.OverloadTy);
+
+ // Dig out the member expression. This holds both the object
+ // argument and the member function we're referring to.
+ Expr *NakedMemExpr = MemExprE->IgnoreParens();
+
+ // Determine whether this is a call to a pointer-to-member function.
+ if (BinaryOperator *op = dyn_cast<BinaryOperator>(NakedMemExpr)) {
+ assert(op->getType() == Context.BoundMemberTy);
+ assert(op->getOpcode() == BO_PtrMemD || op->getOpcode() == BO_PtrMemI);
+
+ QualType fnType =
+ op->getRHS()->getType()->castAs<MemberPointerType>()->getPointeeType();
+
+ const FunctionProtoType *proto = fnType->castAs<FunctionProtoType>();
+ QualType resultType = proto->getCallResultType(Context);
+ ExprValueKind valueKind = Expr::getValueKindForType(proto->getResultType());
+
+ // Check that the object type isn't more qualified than the
+ // member function we're calling.
+ Qualifiers funcQuals = Qualifiers::fromCVRMask(proto->getTypeQuals());
+
+ QualType objectType = op->getLHS()->getType();
+ if (op->getOpcode() == BO_PtrMemI)
+ objectType = objectType->castAs<PointerType>()->getPointeeType();
+ Qualifiers objectQuals = objectType.getQualifiers();
+
+ Qualifiers difference = objectQuals - funcQuals;
+ difference.removeObjCGCAttr();
+ difference.removeAddressSpace();
+ if (difference) {
+ std::string qualsString = difference.getAsString();
+ Diag(LParenLoc, diag::err_pointer_to_member_call_drops_quals)
+ << fnType.getUnqualifiedType()
+ << qualsString
+ << (qualsString.find(' ') == std::string::npos ? 1 : 2);
+ }
+
+ CXXMemberCallExpr *call
+ = new (Context) CXXMemberCallExpr(Context, MemExprE, Args, NumArgs,
+ resultType, valueKind, RParenLoc);
+
+ if (CheckCallReturnType(proto->getResultType(),
+ op->getRHS()->getLocStart(),
+ call, 0))
+ return ExprError();
+
+ if (ConvertArgumentsForCall(call, op, 0, proto, Args, NumArgs, RParenLoc))
+ return ExprError();
+
+ return MaybeBindToTemporary(call);
+ }
+
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts))
+ return ExprError();
+
+ MemberExpr *MemExpr;
+ CXXMethodDecl *Method = 0;
+ DeclAccessPair FoundDecl = DeclAccessPair::make(0, AS_public);
+ NestedNameSpecifier *Qualifier = 0;
+ if (isa<MemberExpr>(NakedMemExpr)) {
+ MemExpr = cast<MemberExpr>(NakedMemExpr);
+ Method = cast<CXXMethodDecl>(MemExpr->getMemberDecl());
+ FoundDecl = MemExpr->getFoundDecl();
+ Qualifier = MemExpr->getQualifier();
+ UnbridgedCasts.restore();
+ } else {
+ UnresolvedMemberExpr *UnresExpr = cast<UnresolvedMemberExpr>(NakedMemExpr);
+ Qualifier = UnresExpr->getQualifier();
+
+ QualType ObjectType = UnresExpr->getBaseType();
+ Expr::Classification ObjectClassification
+ = UnresExpr->isArrow()? Expr::Classification::makeSimpleLValue()
+ : UnresExpr->getBase()->Classify(Context);
+
+ // Add overload candidates
+ OverloadCandidateSet CandidateSet(UnresExpr->getMemberLoc());
+
+ // FIXME: avoid copy.
+ TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = 0;
+ if (UnresExpr->hasExplicitTemplateArgs()) {
+ UnresExpr->copyTemplateArgumentsInto(TemplateArgsBuffer);
+ TemplateArgs = &TemplateArgsBuffer;
+ }
+
+ for (UnresolvedMemberExpr::decls_iterator I = UnresExpr->decls_begin(),
+ E = UnresExpr->decls_end(); I != E; ++I) {
+
+ NamedDecl *Func = *I;
+ CXXRecordDecl *ActingDC = cast<CXXRecordDecl>(Func->getDeclContext());
+ if (isa<UsingShadowDecl>(Func))
+ Func = cast<UsingShadowDecl>(Func)->getTargetDecl();
+
+
+ // Microsoft supports direct constructor calls.
+ if (getLangOpts().MicrosoftExt && isa<CXXConstructorDecl>(Func)) {
+ AddOverloadCandidate(cast<CXXConstructorDecl>(Func), I.getPair(),
+ llvm::makeArrayRef(Args, NumArgs), CandidateSet);
+ } else if ((Method = dyn_cast<CXXMethodDecl>(Func))) {
+ // If explicit template arguments were provided, we can't call a
+ // non-template member function.
+ if (TemplateArgs)
+ continue;
+
+ AddMethodCandidate(Method, I.getPair(), ActingDC, ObjectType,
+ ObjectClassification,
+ llvm::makeArrayRef(Args, NumArgs), CandidateSet,
+ /*SuppressUserConversions=*/false);
+ } else {
+ AddMethodTemplateCandidate(cast<FunctionTemplateDecl>(Func),
+ I.getPair(), ActingDC, TemplateArgs,
+ ObjectType, ObjectClassification,
+ llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet,
+ /*SuppressUsedConversions=*/false);
+ }
+ }
+
+ DeclarationName DeclName = UnresExpr->getMemberName();
+
+ UnbridgedCasts.restore();
+
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, UnresExpr->getLocStart(),
+ Best)) {
+ case OR_Success:
+ Method = cast<CXXMethodDecl>(Best->Function);
+ MarkFunctionReferenced(UnresExpr->getMemberLoc(), Method);
+ FoundDecl = Best->FoundDecl;
+ CheckUnresolvedMemberAccess(UnresExpr, Best->FoundDecl);
+ DiagnoseUseOfDecl(Best->FoundDecl, UnresExpr->getNameLoc());
+ break;
+
+ case OR_No_Viable_Function:
+ Diag(UnresExpr->getMemberLoc(),
+ diag::err_ovl_no_viable_member_function_in_call)
+ << DeclName << MemExprE->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ // FIXME: Leaking incoming expressions!
+ return ExprError();
+
+ case OR_Ambiguous:
+ Diag(UnresExpr->getMemberLoc(), diag::err_ovl_ambiguous_member_call)
+ << DeclName << MemExprE->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ // FIXME: Leaking incoming expressions!
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(UnresExpr->getMemberLoc(), diag::err_ovl_deleted_member_call)
+ << Best->Function->isDeleted()
+ << DeclName
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << MemExprE->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ // FIXME: Leaking incoming expressions!
+ return ExprError();
+ }
+
+ MemExprE = FixOverloadedFunctionReference(MemExprE, FoundDecl, Method);
+
+ // If overload resolution picked a static member, build a
+ // non-member call based on that function.
+ if (Method->isStatic()) {
+ return BuildResolvedCallExpr(MemExprE, Method, LParenLoc,
+ Args, NumArgs, RParenLoc);
+ }
+
+ MemExpr = cast<MemberExpr>(MemExprE->IgnoreParens());
+ }
+
+ QualType ResultType = Method->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultType);
+ ResultType = ResultType.getNonLValueExprType(Context);
+
+ assert(Method && "Member call to something that isn't a method?");
+ CXXMemberCallExpr *TheCall =
+ new (Context) CXXMemberCallExpr(Context, MemExprE, Args, NumArgs,
+ ResultType, VK, RParenLoc);
+
+ // Check for a valid return type.
+ if (CheckCallReturnType(Method->getResultType(), MemExpr->getMemberLoc(),
+ TheCall, Method))
+ return ExprError();
+
+ // Convert the object argument (for a non-static member function call).
+ // We only need to do this if there was actually an overload; otherwise
+ // it was done at lookup.
+ if (!Method->isStatic()) {
+ ExprResult ObjectArg =
+ PerformObjectArgumentInitialization(MemExpr->getBase(), Qualifier,
+ FoundDecl, Method);
+ if (ObjectArg.isInvalid())
+ return ExprError();
+ MemExpr->setBase(ObjectArg.take());
+ }
+
+ // Convert the rest of the arguments
+ const FunctionProtoType *Proto =
+ Method->getType()->getAs<FunctionProtoType>();
+ if (ConvertArgumentsForCall(TheCall, MemExpr, Method, Proto, Args, NumArgs,
+ RParenLoc))
+ return ExprError();
+
+ DiagnoseSentinelCalls(Method, LParenLoc, Args, NumArgs);
+
+ if (CheckFunctionCall(Method, TheCall))
+ return ExprError();
+
+ if ((isa<CXXConstructorDecl>(CurContext) ||
+ isa<CXXDestructorDecl>(CurContext)) &&
+ TheCall->getMethodDecl()->isPure()) {
+ const CXXMethodDecl *MD = TheCall->getMethodDecl();
+
+ if (isa<CXXThisExpr>(MemExpr->getBase()->IgnoreParenCasts())) {
+ Diag(MemExpr->getLocStart(),
+ diag::warn_call_to_pure_virtual_member_function_from_ctor_dtor)
+ << MD->getDeclName() << isa<CXXDestructorDecl>(CurContext)
+ << MD->getParent()->getDeclName();
+
+ Diag(MD->getLocStart(), diag::note_previous_decl) << MD->getDeclName();
+ }
+ }
+ return MaybeBindToTemporary(TheCall);
+}
+
+/// BuildCallToObjectOfClassType - Build a call to an object of class
+/// type (C++ [over.call.object]), which can end up invoking an
+/// overloaded function call operator (@c operator()) or performing a
+/// user-defined conversion on the object argument.
+ExprResult
+Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
+ SourceLocation LParenLoc,
+ Expr **Args, unsigned NumArgs,
+ SourceLocation RParenLoc) {
+ if (checkPlaceholderForOverload(*this, Obj))
+ return ExprError();
+ ExprResult Object = Owned(Obj);
+
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, NumArgs, UnbridgedCasts))
+ return ExprError();
+
+ assert(Object.get()->getType()->isRecordType() && "Requires object type argument");
+ const RecordType *Record = Object.get()->getType()->getAs<RecordType>();
+
+ // C++ [over.call.object]p1:
+ // If the primary-expression E in the function call syntax
+ // evaluates to a class object of type "cv T", then the set of
+ // candidate functions includes at least the function call
+ // operators of T. The function call operators of T are obtained by
+ // ordinary lookup of the name operator() in the context of
+ // (E).operator().
+ OverloadCandidateSet CandidateSet(LParenLoc);
+ DeclarationName OpName = Context.DeclarationNames.getCXXOperatorName(OO_Call);
+
+ if (RequireCompleteType(LParenLoc, Object.get()->getType(),
+ PDiag(diag::err_incomplete_object_call)
+ << Object.get()->getSourceRange()))
+ return true;
+
+ LookupResult R(*this, OpName, LParenLoc, LookupOrdinaryName);
+ LookupQualifiedName(R, Record->getDecl());
+ R.suppressDiagnostics();
+
+ for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
+ Oper != OperEnd; ++Oper) {
+ AddMethodCandidate(Oper.getPair(), Object.get()->getType(),
+ Object.get()->Classify(Context), Args, NumArgs, CandidateSet,
+ /*SuppressUserConversions=*/ false);
+ }
+
+ // C++ [over.call.object]p2:
+ // In addition, for each (non-explicit in C++0x) conversion function
+ // declared in T of the form
+ //
+ // operator conversion-type-id () cv-qualifier;
+ //
+ // where cv-qualifier is the same cv-qualification as, or a
+ // greater cv-qualification than, cv, and where conversion-type-id
+ // denotes the type "pointer to function of (P1,...,Pn) returning
+ // R", or the type "reference to pointer to function of
+ // (P1,...,Pn) returning R", or the type "reference to function
+ // of (P1,...,Pn) returning R", a surrogate call function [...]
+ // is also considered as a candidate function. Similarly,
+ // surrogate call functions are added to the set of candidate
+ // functions for each conversion function declared in an
+ // accessible base class provided the function is not hidden
+ // within T by another intervening declaration.
+ const UnresolvedSetImpl *Conversions
+ = cast<CXXRecordDecl>(Record->getDecl())->getVisibleConversionFunctions();
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end(); I != E; ++I) {
+ NamedDecl *D = *I;
+ CXXRecordDecl *ActingContext = cast<CXXRecordDecl>(D->getDeclContext());
+ if (isa<UsingShadowDecl>(D))
+ D = cast<UsingShadowDecl>(D)->getTargetDecl();
+
+ // Skip over templated conversion functions; they aren't
+ // surrogates.
+ if (isa<FunctionTemplateDecl>(D))
+ continue;
+
+ CXXConversionDecl *Conv = cast<CXXConversionDecl>(D);
+ if (!Conv->isExplicit()) {
+ // Strip the reference type (if any) and then the pointer type (if
+ // any) to get down to what might be a function type.
+ QualType ConvType = Conv->getConversionType().getNonReferenceType();
+ if (const PointerType *ConvPtrType = ConvType->getAs<PointerType>())
+ ConvType = ConvPtrType->getPointeeType();
+
+ if (const FunctionProtoType *Proto = ConvType->getAs<FunctionProtoType>())
+ {
+ AddSurrogateCandidate(Conv, I.getPair(), ActingContext, Proto,
+ Object.get(), llvm::makeArrayRef(Args, NumArgs),
+ CandidateSet);
+ }
+ }
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, Object.get()->getLocStart(),
+ Best)) {
+ case OR_Success:
+ // Overload resolution succeeded; we'll build the appropriate call
+ // below.
+ break;
+
+ case OR_No_Viable_Function:
+ if (CandidateSet.empty())
+ Diag(Object.get()->getLocStart(), diag::err_ovl_no_oper)
+ << Object.get()->getType() << /*call*/ 1
+ << Object.get()->getSourceRange();
+ else
+ Diag(Object.get()->getLocStart(),
+ diag::err_ovl_no_viable_object_call)
+ << Object.get()->getType() << Object.get()->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+
+ case OR_Ambiguous:
+ Diag(Object.get()->getLocStart(),
+ diag::err_ovl_ambiguous_object_call)
+ << Object.get()->getType() << Object.get()->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+
+ case OR_Deleted:
+ Diag(Object.get()->getLocStart(),
+ diag::err_ovl_deleted_object_call)
+ << Best->Function->isDeleted()
+ << Object.get()->getType()
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Object.get()->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates,
+ llvm::makeArrayRef(Args, NumArgs));
+ break;
+ }
+
+ if (Best == CandidateSet.end())
+ return true;
+
+ UnbridgedCasts.restore();
+
+ if (Best->Function == 0) {
+ // Since there is no function declaration, this is one of the
+ // surrogate candidates. Dig out the conversion function.
+ CXXConversionDecl *Conv
+ = cast<CXXConversionDecl>(
+ Best->Conversions[0].UserDefined.ConversionFunction);
+
+ CheckMemberOperatorAccess(LParenLoc, Object.get(), 0, Best->FoundDecl);
+ DiagnoseUseOfDecl(Best->FoundDecl, LParenLoc);
+
+ // We selected one of the surrogate functions that converts the
+ // object parameter to a function pointer. Perform the conversion
+ // on the object argument, then let ActOnCallExpr finish the job.
+
+ // Create an implicit member expr to refer to the conversion operator.
+ // and then call it.
+ ExprResult Call = BuildCXXMemberCallExpr(Object.get(), Best->FoundDecl,
+ Conv, HadMultipleCandidates);
+ if (Call.isInvalid())
+ return ExprError();
+ // Record usage of conversion in an implicit cast.
+ Call = Owned(ImplicitCastExpr::Create(Context, Call.get()->getType(),
+ CK_UserDefinedConversion,
+ Call.get(), 0, VK_RValue));
+
+ return ActOnCallExpr(S, Call.get(), LParenLoc, MultiExprArg(Args, NumArgs),
+ RParenLoc);
+ }
+
+ MarkFunctionReferenced(LParenLoc, Best->Function);
+ CheckMemberOperatorAccess(LParenLoc, Object.get(), 0, Best->FoundDecl);
+ DiagnoseUseOfDecl(Best->FoundDecl, LParenLoc);
+
+ // We found an overloaded operator(). Build a CXXOperatorCallExpr
+ // that calls this method, using Object for the implicit object
+ // parameter and passing along the remaining arguments.
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+ const FunctionProtoType *Proto =
+ Method->getType()->getAs<FunctionProtoType>();
+
+ unsigned NumArgsInProto = Proto->getNumArgs();
+ unsigned NumArgsToCheck = NumArgs;
+
+ // Build the full argument list for the method call (the
+ // implicit object parameter is placed at the beginning of the
+ // list).
+ Expr **MethodArgs;
+ if (NumArgs < NumArgsInProto) {
+ NumArgsToCheck = NumArgsInProto;
+ MethodArgs = new Expr*[NumArgsInProto + 1];
+ } else {
+ MethodArgs = new Expr*[NumArgs + 1];
+ }
+ MethodArgs[0] = Object.get();
+ for (unsigned ArgIdx = 0; ArgIdx < NumArgs; ++ArgIdx)
+ MethodArgs[ArgIdx + 1] = Args[ArgIdx];
+
+ DeclarationNameInfo OpLocInfo(
+ Context.DeclarationNames.getCXXOperatorName(OO_Call), LParenLoc);
+ OpLocInfo.setCXXOperatorNameRange(SourceRange(LParenLoc, RParenLoc));
+ ExprResult NewFn = CreateFunctionRefExpr(*this, Method,
+ HadMultipleCandidates,
+ OpLocInfo.getLoc(),
+ OpLocInfo.getInfo());
+ if (NewFn.isInvalid())
+ return true;
+
+ // Once we've built TheCall, all of the expressions are properly
+ // owned.
+ QualType ResultTy = Method->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ CXXOperatorCallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, OO_Call, NewFn.take(),
+ MethodArgs, NumArgs + 1,
+ ResultTy, VK, RParenLoc);
+ delete [] MethodArgs;
+
+ if (CheckCallReturnType(Method->getResultType(), LParenLoc, TheCall,
+ Method))
+ return true;
+
+ // We may have default arguments. If so, we need to allocate more
+ // slots in the call for them.
+ if (NumArgs < NumArgsInProto)
+ TheCall->setNumArgs(Context, NumArgsInProto + 1);
+ else if (NumArgs > NumArgsInProto)
+ NumArgsToCheck = NumArgsInProto;
+
+ bool IsError = false;
+
+ // Initialize the implicit object parameter.
+ ExprResult ObjRes =
+ PerformObjectArgumentInitialization(Object.get(), /*Qualifier=*/0,
+ Best->FoundDecl, Method);
+ if (ObjRes.isInvalid())
+ IsError = true;
+ else
+ Object = move(ObjRes);
+ TheCall->setArg(0, Object.take());
+
+ // Check the argument types.
+ for (unsigned i = 0; i != NumArgsToCheck; i++) {
+ Expr *Arg;
+ if (i < NumArgs) {
+ Arg = Args[i];
+
+ // Pass the argument.
+
+ ExprResult InputInit
+ = PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ Context,
+ Method->getParamDecl(i)),
+ SourceLocation(), Arg);
+
+ IsError |= InputInit.isInvalid();
+ Arg = InputInit.takeAs<Expr>();
+ } else {
+ ExprResult DefArg
+ = BuildCXXDefaultArgExpr(LParenLoc, Method, Method->getParamDecl(i));
+ if (DefArg.isInvalid()) {
+ IsError = true;
+ break;
+ }
+
+ Arg = DefArg.takeAs<Expr>();
+ }
+
+ TheCall->setArg(i + 1, Arg);
+ }
+
+ // If this is a variadic call, handle args passed through "...".
+ if (Proto->isVariadic()) {
+ // Promote the arguments (C99 6.5.2.2p7).
+ for (unsigned i = NumArgsInProto; i != NumArgs; i++) {
+ ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod, 0);
+ IsError |= Arg.isInvalid();
+ TheCall->setArg(i + 1, Arg.take());
+ }
+ }
+
+ if (IsError) return true;
+
+ DiagnoseSentinelCalls(Method, LParenLoc, Args, NumArgs);
+
+ if (CheckFunctionCall(Method, TheCall))
+ return true;
+
+ return MaybeBindToTemporary(TheCall);
+}
+
+/// BuildOverloadedArrowExpr - Build a call to an overloaded @c operator->
+/// (if one exists), where @c Base is an expression of class type and
+/// @c Member is the name of the member we're trying to find.
+ExprResult
+Sema::BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc) {
+ assert(Base->getType()->isRecordType() &&
+ "left-hand side must have class type");
+
+ if (checkPlaceholderForOverload(*this, Base))
+ return ExprError();
+
+ SourceLocation Loc = Base->getExprLoc();
+
+ // C++ [over.ref]p1:
+ //
+ // [...] An expression x->m is interpreted as (x.operator->())->m
+ // for a class object x of type T if T::operator->() exists and if
+ // the operator is selected as the best match function by the
+ // overload resolution mechanism (13.3).
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Arrow);
+ OverloadCandidateSet CandidateSet(Loc);
+ const RecordType *BaseRecord = Base->getType()->getAs<RecordType>();
+
+ if (RequireCompleteType(Loc, Base->getType(),
+ PDiag(diag::err_typecheck_incomplete_tag)
+ << Base->getSourceRange()))
+ return ExprError();
+
+ LookupResult R(*this, OpName, OpLoc, LookupOrdinaryName);
+ LookupQualifiedName(R, BaseRecord->getDecl());
+ R.suppressDiagnostics();
+
+ for (LookupResult::iterator Oper = R.begin(), OperEnd = R.end();
+ Oper != OperEnd; ++Oper) {
+ AddMethodCandidate(Oper.getPair(), Base->getType(), Base->Classify(Context),
+ 0, 0, CandidateSet, /*SuppressUserConversions=*/false);
+ }
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, OpLoc, Best)) {
+ case OR_Success:
+ // Overload resolution succeeded; we'll build the call below.
+ break;
+
+ case OR_No_Viable_Function:
+ if (CandidateSet.empty())
+ Diag(OpLoc, diag::err_typecheck_member_reference_arrow)
+ << Base->getType() << Base->getSourceRange();
+ else
+ Diag(OpLoc, diag::err_ovl_no_viable_oper)
+ << "operator->" << Base->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
+ return ExprError();
+
+ case OR_Ambiguous:
+ Diag(OpLoc, diag::err_ovl_ambiguous_oper_unary)
+ << "->" << Base->getType() << Base->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Base);
+ return ExprError();
+
+ case OR_Deleted:
+ Diag(OpLoc, diag::err_ovl_deleted_oper)
+ << Best->Function->isDeleted()
+ << "->"
+ << getDeletedOrUnavailableSuffix(Best->Function)
+ << Base->getSourceRange();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Base);
+ return ExprError();
+ }
+
+ MarkFunctionReferenced(OpLoc, Best->Function);
+ CheckMemberOperatorAccess(OpLoc, Base, 0, Best->FoundDecl);
+ DiagnoseUseOfDecl(Best->FoundDecl, OpLoc);
+
+ // Convert the object parameter.
+ CXXMethodDecl *Method = cast<CXXMethodDecl>(Best->Function);
+ ExprResult BaseResult =
+ PerformObjectArgumentInitialization(Base, /*Qualifier=*/0,
+ Best->FoundDecl, Method);
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.take();
+
+ // Build the operator call.
+ ExprResult FnExpr = CreateFunctionRefExpr(*this, Method,
+ HadMultipleCandidates, OpLoc);
+ if (FnExpr.isInvalid())
+ return ExprError();
+
+ QualType ResultTy = Method->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+ CXXOperatorCallExpr *TheCall =
+ new (Context) CXXOperatorCallExpr(Context, OO_Arrow, FnExpr.take(),
+ &Base, 1, ResultTy, VK, OpLoc);
+
+ if (CheckCallReturnType(Method->getResultType(), OpLoc, TheCall,
+ Method))
+ return ExprError();
+
+ return MaybeBindToTemporary(TheCall);
+}
+
+/// BuildLiteralOperatorCall - Build a UserDefinedLiteral by creating a call to
+/// a literal operator described by the provided lookup results.
+ExprResult Sema::BuildLiteralOperatorCall(LookupResult &R,
+ DeclarationNameInfo &SuffixInfo,
+ ArrayRef<Expr*> Args,
+ SourceLocation LitEndLoc,
+ TemplateArgumentListInfo *TemplateArgs) {
+ SourceLocation UDSuffixLoc = SuffixInfo.getCXXLiteralOperatorNameLoc();
+
+ OverloadCandidateSet CandidateSet(UDSuffixLoc);
+ AddFunctionCandidates(R.asUnresolvedSet(), Args, CandidateSet, true,
+ TemplateArgs);
+
+ bool HadMultipleCandidates = (CandidateSet.size() > 1);
+
+ // Perform overload resolution. This will usually be trivial, but might need
+ // to perform substitutions for a literal operator template.
+ OverloadCandidateSet::iterator Best;
+ switch (CandidateSet.BestViableFunction(*this, UDSuffixLoc, Best)) {
+ case OR_Success:
+ case OR_Deleted:
+ break;
+
+ case OR_No_Viable_Function:
+ Diag(UDSuffixLoc, diag::err_ovl_no_viable_function_in_call)
+ << R.getLookupName();
+ CandidateSet.NoteCandidates(*this, OCD_AllCandidates, Args);
+ return ExprError();
+
+ case OR_Ambiguous:
+ Diag(R.getNameLoc(), diag::err_ovl_ambiguous_call) << R.getLookupName();
+ CandidateSet.NoteCandidates(*this, OCD_ViableCandidates, Args);
+ return ExprError();
+ }
+
+ FunctionDecl *FD = Best->Function;
+ MarkFunctionReferenced(UDSuffixLoc, FD);
+ DiagnoseUseOfDecl(Best->FoundDecl, UDSuffixLoc);
+
+ ExprResult Fn = CreateFunctionRefExpr(*this, FD, HadMultipleCandidates,
+ SuffixInfo.getLoc(),
+ SuffixInfo.getInfo());
+ if (Fn.isInvalid())
+ return true;
+
+ // Check the argument types. This should almost always be a no-op, except
+ // that array-to-pointer decay is applied to string literals.
+ Expr *ConvArgs[2];
+ for (unsigned ArgIdx = 0; ArgIdx != Args.size(); ++ArgIdx) {
+ ExprResult InputInit = PerformCopyInitialization(
+ InitializedEntity::InitializeParameter(Context, FD->getParamDecl(ArgIdx)),
+ SourceLocation(), Args[ArgIdx]);
+ if (InputInit.isInvalid())
+ return true;
+ ConvArgs[ArgIdx] = InputInit.take();
+ }
+
+ QualType ResultTy = FD->getResultType();
+ ExprValueKind VK = Expr::getValueKindForType(ResultTy);
+ ResultTy = ResultTy.getNonLValueExprType(Context);
+
+ UserDefinedLiteral *UDL =
+ new (Context) UserDefinedLiteral(Context, Fn.take(), ConvArgs, Args.size(),
+ ResultTy, VK, LitEndLoc, UDSuffixLoc);
+
+ if (CheckCallReturnType(FD->getResultType(), UDSuffixLoc, UDL, FD))
+ return ExprError();
+
+ if (CheckFunctionCall(FD, UDL))
+ return ExprError();
+
+ return MaybeBindToTemporary(UDL);
+}
+
+/// FixOverloadedFunctionReference - E is an expression that refers to
+/// a C++ overloaded function (possibly with some parentheses and
+/// perhaps a '&' around it). We have resolved the overloaded function
+/// to the function declaration Fn, so patch up the expression E to
+/// refer (possibly indirectly) to Fn. Returns the new expr.
+Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
+ FunctionDecl *Fn) {
+ if (ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ Expr *SubExpr = FixOverloadedFunctionReference(PE->getSubExpr(),
+ Found, Fn);
+ if (SubExpr == PE->getSubExpr())
+ return PE;
+
+ return new (Context) ParenExpr(PE->getLParen(), PE->getRParen(), SubExpr);
+ }
+
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ Expr *SubExpr = FixOverloadedFunctionReference(ICE->getSubExpr(),
+ Found, Fn);
+ assert(Context.hasSameType(ICE->getSubExpr()->getType(),
+ SubExpr->getType()) &&
+ "Implicit cast type cannot be determined from overload");
+ assert(ICE->path_empty() && "fixing up hierarchy conversion?");
+ if (SubExpr == ICE->getSubExpr())
+ return ICE;
+
+ return ImplicitCastExpr::Create(Context, ICE->getType(),
+ ICE->getCastKind(),
+ SubExpr, 0,
+ ICE->getValueKind());
+ }
+
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(E)) {
+ assert(UnOp->getOpcode() == UO_AddrOf &&
+ "Can only take the address of an overloaded function");
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn)) {
+ if (Method->isStatic()) {
+ // Do nothing: static member functions aren't any different
+ // from non-member functions.
+ } else {
+ // Fix the sub expression, which really has to be an
+ // UnresolvedLookupExpr holding an overloaded member function
+ // or template.
+ Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
+ Found, Fn);
+ if (SubExpr == UnOp->getSubExpr())
+ return UnOp;
+
+ assert(isa<DeclRefExpr>(SubExpr)
+ && "fixed to something other than a decl ref");
+ assert(cast<DeclRefExpr>(SubExpr)->getQualifier()
+ && "fixed to a member ref with no nested name qualifier");
+
+ // We have taken the address of a pointer to member
+ // function. Perform the computation here so that we get the
+ // appropriate pointer to member type.
+ QualType ClassType
+ = Context.getTypeDeclType(cast<RecordDecl>(Method->getDeclContext()));
+ QualType MemPtrType
+ = Context.getMemberPointerType(Fn->getType(), ClassType.getTypePtr());
+
+ return new (Context) UnaryOperator(SubExpr, UO_AddrOf, MemPtrType,
+ VK_RValue, OK_Ordinary,
+ UnOp->getOperatorLoc());
+ }
+ }
+ Expr *SubExpr = FixOverloadedFunctionReference(UnOp->getSubExpr(),
+ Found, Fn);
+ if (SubExpr == UnOp->getSubExpr())
+ return UnOp;
+
+ return new (Context) UnaryOperator(SubExpr, UO_AddrOf,
+ Context.getPointerType(SubExpr->getType()),
+ VK_RValue, OK_Ordinary,
+ UnOp->getOperatorLoc());
+ }
+
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
+ // FIXME: avoid copy.
+ TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = 0;
+ if (ULE->hasExplicitTemplateArgs()) {
+ ULE->copyTemplateArgumentsInto(TemplateArgsBuffer);
+ TemplateArgs = &TemplateArgsBuffer;
+ }
+
+ DeclRefExpr *DRE = DeclRefExpr::Create(Context,
+ ULE->getQualifierLoc(),
+ ULE->getTemplateKeywordLoc(),
+ Fn,
+ /*enclosing*/ false, // FIXME?
+ ULE->getNameLoc(),
+ Fn->getType(),
+ VK_LValue,
+ Found.getDecl(),
+ TemplateArgs);
+ DRE->setHadMultipleCandidates(ULE->getNumDecls() > 1);
+ return DRE;
+ }
+
+ if (UnresolvedMemberExpr *MemExpr = dyn_cast<UnresolvedMemberExpr>(E)) {
+ // FIXME: avoid copy.
+ TemplateArgumentListInfo TemplateArgsBuffer, *TemplateArgs = 0;
+ if (MemExpr->hasExplicitTemplateArgs()) {
+ MemExpr->copyTemplateArgumentsInto(TemplateArgsBuffer);
+ TemplateArgs = &TemplateArgsBuffer;
+ }
+
+ Expr *Base;
+
+ // If we're filling in a static method where we used to have an
+ // implicit member access, rewrite to a simple decl ref.
+ if (MemExpr->isImplicitAccess()) {
+ if (cast<CXXMethodDecl>(Fn)->isStatic()) {
+ DeclRefExpr *DRE = DeclRefExpr::Create(Context,
+ MemExpr->getQualifierLoc(),
+ MemExpr->getTemplateKeywordLoc(),
+ Fn,
+ /*enclosing*/ false,
+ MemExpr->getMemberLoc(),
+ Fn->getType(),
+ VK_LValue,
+ Found.getDecl(),
+ TemplateArgs);
+ DRE->setHadMultipleCandidates(MemExpr->getNumDecls() > 1);
+ return DRE;
+ } else {
+ SourceLocation Loc = MemExpr->getMemberLoc();
+ if (MemExpr->getQualifier())
+ Loc = MemExpr->getQualifierLoc().getBeginLoc();
+ CheckCXXThisCapture(Loc);
+ Base = new (Context) CXXThisExpr(Loc,
+ MemExpr->getBaseType(),
+ /*isImplicit=*/true);
+ }
+ } else
+ Base = MemExpr->getBase();
+
+ ExprValueKind valueKind;
+ QualType type;
+ if (cast<CXXMethodDecl>(Fn)->isStatic()) {
+ valueKind = VK_LValue;
+ type = Fn->getType();
+ } else {
+ valueKind = VK_RValue;
+ type = Context.BoundMemberTy;
+ }
+
+ MemberExpr *ME = MemberExpr::Create(Context, Base,
+ MemExpr->isArrow(),
+ MemExpr->getQualifierLoc(),
+ MemExpr->getTemplateKeywordLoc(),
+ Fn,
+ Found,
+ MemExpr->getMemberNameInfo(),
+ TemplateArgs,
+ type, valueKind, OK_Ordinary);
+ ME->setHadMultipleCandidates(true);
+ return ME;
+ }
+
+ llvm_unreachable("Invalid reference to overloaded function");
+}
+
+ExprResult Sema::FixOverloadedFunctionReference(ExprResult E,
+ DeclAccessPair Found,
+ FunctionDecl *Fn) {
+ return Owned(FixOverloadedFunctionReference((Expr *)E.get(), Found, Fn));
+}
+
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
new file mode 100644
index 0000000..d52c912
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaPseudoObject.cpp
@@ -0,0 +1,1351 @@
+//===--- SemaPseudoObject.cpp - Semantic Analysis for Pseudo-Objects ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for expressions involving
+// pseudo-object references. Pseudo-objects are conceptual objects
+// whose storage is entirely abstract and all accesses to which are
+// translated through some sort of abstraction barrier.
+//
+// For example, Objective-C objects can have "properties", either
+// declared or undeclared. A property may be accessed by writing
+// expr.prop
+// where 'expr' is an r-value of Objective-C pointer type and 'prop'
+// is the name of the property. If this expression is used in a context
+// needing an r-value, it is treated as if it were a message-send
+// of the associated 'getter' selector, typically:
+// [expr prop]
+// If it is used as the LHS of a simple assignment, it is treated
+// as a message-send of the associated 'setter' selector, typically:
+// [expr setProp: RHS]
+// If it is used as the LHS of a compound assignment, or the operand
+// of a unary increment or decrement, both are required; for example,
+// 'expr.prop *= 100' would be translated to:
+// [expr setProp: [expr prop] * 100]
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Lex/Preprocessor.h"
+
+using namespace clang;
+using namespace sema;
+
+namespace {
+ // Basically just a very focused copy of TreeTransform.
+ template <class T> struct Rebuilder {
+ Sema &S;
+ Rebuilder(Sema &S) : S(S) {}
+
+ T &getDerived() { return static_cast<T&>(*this); }
+
+ Expr *rebuild(Expr *e) {
+ // Fast path: nothing to look through.
+ if (typename T::specific_type *specific
+ = dyn_cast<typename T::specific_type>(e))
+ return getDerived().rebuildSpecific(specific);
+
+ // Otherwise, we should look through and rebuild anything that
+ // IgnoreParens would.
+
+ if (ParenExpr *parens = dyn_cast<ParenExpr>(e)) {
+ e = rebuild(parens->getSubExpr());
+ return new (S.Context) ParenExpr(parens->getLParen(),
+ parens->getRParen(),
+ e);
+ }
+
+ if (UnaryOperator *uop = dyn_cast<UnaryOperator>(e)) {
+ assert(uop->getOpcode() == UO_Extension);
+ e = rebuild(uop->getSubExpr());
+ return new (S.Context) UnaryOperator(e, uop->getOpcode(),
+ uop->getType(),
+ uop->getValueKind(),
+ uop->getObjectKind(),
+ uop->getOperatorLoc());
+ }
+
+ if (GenericSelectionExpr *gse = dyn_cast<GenericSelectionExpr>(e)) {
+ assert(!gse->isResultDependent());
+ unsigned resultIndex = gse->getResultIndex();
+ unsigned numAssocs = gse->getNumAssocs();
+
+ SmallVector<Expr*, 8> assocs(numAssocs);
+ SmallVector<TypeSourceInfo*, 8> assocTypes(numAssocs);
+
+ for (unsigned i = 0; i != numAssocs; ++i) {
+ Expr *assoc = gse->getAssocExpr(i);
+ if (i == resultIndex) assoc = rebuild(assoc);
+ assocs[i] = assoc;
+ assocTypes[i] = gse->getAssocTypeSourceInfo(i);
+ }
+
+ return new (S.Context) GenericSelectionExpr(S.Context,
+ gse->getGenericLoc(),
+ gse->getControllingExpr(),
+ assocTypes.data(),
+ assocs.data(),
+ numAssocs,
+ gse->getDefaultLoc(),
+ gse->getRParenLoc(),
+ gse->containsUnexpandedParameterPack(),
+ resultIndex);
+ }
+
+ llvm_unreachable("bad expression to rebuild!");
+ }
+ };
+
+ struct ObjCPropertyRefRebuilder : Rebuilder<ObjCPropertyRefRebuilder> {
+ Expr *NewBase;
+ ObjCPropertyRefRebuilder(Sema &S, Expr *newBase)
+ : Rebuilder<ObjCPropertyRefRebuilder>(S), NewBase(newBase) {}
+
+ typedef ObjCPropertyRefExpr specific_type;
+ Expr *rebuildSpecific(ObjCPropertyRefExpr *refExpr) {
+ // Fortunately, the constraint that we're rebuilding something
+ // with a base limits the number of cases here.
+ assert(refExpr->getBase());
+
+ if (refExpr->isExplicitProperty()) {
+ return new (S.Context)
+ ObjCPropertyRefExpr(refExpr->getExplicitProperty(),
+ refExpr->getType(), refExpr->getValueKind(),
+ refExpr->getObjectKind(), refExpr->getLocation(),
+ NewBase);
+ }
+ return new (S.Context)
+ ObjCPropertyRefExpr(refExpr->getImplicitPropertyGetter(),
+ refExpr->getImplicitPropertySetter(),
+ refExpr->getType(), refExpr->getValueKind(),
+ refExpr->getObjectKind(),refExpr->getLocation(),
+ NewBase);
+ }
+ };
+
+ struct ObjCSubscriptRefRebuilder : Rebuilder<ObjCSubscriptRefRebuilder> {
+ Expr *NewBase;
+ Expr *NewKeyExpr;
+ ObjCSubscriptRefRebuilder(Sema &S, Expr *newBase, Expr *newKeyExpr)
+ : Rebuilder<ObjCSubscriptRefRebuilder>(S),
+ NewBase(newBase), NewKeyExpr(newKeyExpr) {}
+
+ typedef ObjCSubscriptRefExpr specific_type;
+ Expr *rebuildSpecific(ObjCSubscriptRefExpr *refExpr) {
+ assert(refExpr->getBaseExpr());
+ assert(refExpr->getKeyExpr());
+
+ return new (S.Context)
+ ObjCSubscriptRefExpr(NewBase,
+ NewKeyExpr,
+ refExpr->getType(), refExpr->getValueKind(),
+ refExpr->getObjectKind(),refExpr->getAtIndexMethodDecl(),
+ refExpr->setAtIndexMethodDecl(),
+ refExpr->getRBracket());
+ }
+ };
+
+ class PseudoOpBuilder {
+ public:
+ Sema &S;
+ unsigned ResultIndex;
+ SourceLocation GenericLoc;
+ SmallVector<Expr *, 4> Semantics;
+
+ PseudoOpBuilder(Sema &S, SourceLocation genericLoc)
+ : S(S), ResultIndex(PseudoObjectExpr::NoResult),
+ GenericLoc(genericLoc) {}
+
+ virtual ~PseudoOpBuilder() {}
+
+ /// Add a normal semantic expression.
+ void addSemanticExpr(Expr *semantic) {
+ Semantics.push_back(semantic);
+ }
+
+ /// Add the 'result' semantic expression.
+ void addResultSemanticExpr(Expr *resultExpr) {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+ ResultIndex = Semantics.size();
+ Semantics.push_back(resultExpr);
+ }
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ ExprResult buildIncDecOperation(Scope *Sc, SourceLocation opLoc,
+ UnaryOperatorKind opcode,
+ Expr *op);
+
+ ExprResult complete(Expr *syntacticForm);
+
+ OpaqueValueExpr *capture(Expr *op);
+ OpaqueValueExpr *captureValueAsResult(Expr *op);
+
+ void setResultToLastSemantic() {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+ ResultIndex = Semantics.size() - 1;
+ }
+
+ /// Return true if assignments have a non-void result.
+ virtual bool assignmentsHaveResult() { return true; }
+
+ virtual Expr *rebuildAndCaptureObject(Expr *) = 0;
+ virtual ExprResult buildGet() = 0;
+ virtual ExprResult buildSet(Expr *, SourceLocation,
+ bool captureSetValueAsResult) = 0;
+ };
+
+ /// A PseudoOpBuilder for Objective-C @properties.
+ class ObjCPropertyOpBuilder : public PseudoOpBuilder {
+ ObjCPropertyRefExpr *RefExpr;
+ ObjCPropertyRefExpr *SyntacticRefExpr;
+ OpaqueValueExpr *InstanceReceiver;
+ ObjCMethodDecl *Getter;
+
+ ObjCMethodDecl *Setter;
+ Selector SetterSelector;
+
+ public:
+ ObjCPropertyOpBuilder(Sema &S, ObjCPropertyRefExpr *refExpr) :
+ PseudoOpBuilder(S, refExpr->getLocation()), RefExpr(refExpr),
+ SyntacticRefExpr(0), InstanceReceiver(0), Getter(0), Setter(0) {
+ }
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ ExprResult buildIncDecOperation(Scope *Sc, SourceLocation opLoc,
+ UnaryOperatorKind opcode,
+ Expr *op);
+
+ bool tryBuildGetOfReference(Expr *op, ExprResult &result);
+ bool findSetter();
+ bool findGetter();
+
+ Expr *rebuildAndCaptureObject(Expr *syntacticBase);
+ ExprResult buildGet();
+ ExprResult buildSet(Expr *op, SourceLocation, bool);
+ };
+
+ /// A PseudoOpBuilder for Objective-C array/dictionary indexing.
+ class ObjCSubscriptOpBuilder : public PseudoOpBuilder {
+ ObjCSubscriptRefExpr *RefExpr;
+ OpaqueValueExpr *InstanceBase;
+ OpaqueValueExpr *InstanceKey;
+ ObjCMethodDecl *AtIndexGetter;
+ Selector AtIndexGetterSelector;
+
+ ObjCMethodDecl *AtIndexSetter;
+ Selector AtIndexSetterSelector;
+
+ public:
+ ObjCSubscriptOpBuilder(Sema &S, ObjCSubscriptRefExpr *refExpr) :
+ PseudoOpBuilder(S, refExpr->getSourceRange().getBegin()),
+ RefExpr(refExpr),
+ InstanceBase(0), InstanceKey(0),
+ AtIndexGetter(0), AtIndexSetter(0) { }
+
+ ExprResult buildRValueOperation(Expr *op);
+ ExprResult buildAssignmentOperation(Scope *Sc,
+ SourceLocation opLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS);
+ Expr *rebuildAndCaptureObject(Expr *syntacticBase);
+
+ bool findAtIndexGetter();
+ bool findAtIndexSetter();
+
+ ExprResult buildGet();
+ ExprResult buildSet(Expr *op, SourceLocation, bool);
+ };
+
+}
+
+/// Capture the given expression in an OpaqueValueExpr.
+OpaqueValueExpr *PseudoOpBuilder::capture(Expr *e) {
+ // Make a new OVE whose source is the given expression.
+ OpaqueValueExpr *captured =
+ new (S.Context) OpaqueValueExpr(GenericLoc, e->getType(),
+ e->getValueKind(), e->getObjectKind(),
+ e);
+
+ // Make sure we bind that in the semantics.
+ addSemanticExpr(captured);
+ return captured;
+}
+
+/// Capture the given expression as the result of this pseudo-object
+/// operation. This routine is safe against expressions which may
+/// already be captured.
+///
+/// \param Returns the captured expression, which will be the
+/// same as the input if the input was already captured
+OpaqueValueExpr *PseudoOpBuilder::captureValueAsResult(Expr *e) {
+ assert(ResultIndex == PseudoObjectExpr::NoResult);
+
+ // If the expression hasn't already been captured, just capture it
+ // and set the new semantic
+ if (!isa<OpaqueValueExpr>(e)) {
+ OpaqueValueExpr *cap = capture(e);
+ setResultToLastSemantic();
+ return cap;
+ }
+
+ // Otherwise, it must already be one of our semantic expressions;
+ // set ResultIndex to its index.
+ unsigned index = 0;
+ for (;; ++index) {
+ assert(index < Semantics.size() &&
+ "captured expression not found in semantics!");
+ if (e == Semantics[index]) break;
+ }
+ ResultIndex = index;
+ return cast<OpaqueValueExpr>(e);
+}
+
+/// The routine which creates the final PseudoObjectExpr.
+ExprResult PseudoOpBuilder::complete(Expr *syntactic) {
+ return PseudoObjectExpr::Create(S.Context, syntactic,
+ Semantics, ResultIndex);
+}
+
+/// The main skeleton for building an r-value operation.
+ExprResult PseudoOpBuilder::buildRValueOperation(Expr *op) {
+ Expr *syntacticBase = rebuildAndCaptureObject(op);
+
+ ExprResult getExpr = buildGet();
+ if (getExpr.isInvalid()) return ExprError();
+ addResultSemanticExpr(getExpr.take());
+
+ return complete(syntacticBase);
+}
+
+/// The basic skeleton for building a simple or compound
+/// assignment operation.
+ExprResult
+PseudoOpBuilder::buildAssignmentOperation(Scope *Sc, SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+
+ Expr *syntacticLHS = rebuildAndCaptureObject(LHS);
+ OpaqueValueExpr *capturedRHS = capture(RHS);
+
+ Expr *syntactic;
+
+ ExprResult result;
+ if (opcode == BO_Assign) {
+ result = capturedRHS;
+ syntactic = new (S.Context) BinaryOperator(syntacticLHS, capturedRHS,
+ opcode, capturedRHS->getType(),
+ capturedRHS->getValueKind(),
+ OK_Ordinary, opcLoc);
+ } else {
+ ExprResult opLHS = buildGet();
+ if (opLHS.isInvalid()) return ExprError();
+
+ // Build an ordinary, non-compound operation.
+ BinaryOperatorKind nonCompound =
+ BinaryOperator::getOpForCompoundAssignment(opcode);
+ result = S.BuildBinOp(Sc, opcLoc, nonCompound,
+ opLHS.take(), capturedRHS);
+ if (result.isInvalid()) return ExprError();
+
+ syntactic =
+ new (S.Context) CompoundAssignOperator(syntacticLHS, capturedRHS, opcode,
+ result.get()->getType(),
+ result.get()->getValueKind(),
+ OK_Ordinary,
+ opLHS.get()->getType(),
+ result.get()->getType(),
+ opcLoc);
+ }
+
+ // The result of the assignment, if not void, is the value set into
+ // the l-value.
+ result = buildSet(result.take(), opcLoc, assignmentsHaveResult());
+ if (result.isInvalid()) return ExprError();
+ addSemanticExpr(result.take());
+
+ return complete(syntactic);
+}
+
+/// The basic skeleton for building an increment or decrement
+/// operation.
+ExprResult
+PseudoOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode,
+ Expr *op) {
+ assert(UnaryOperator::isIncrementDecrementOp(opcode));
+
+ Expr *syntacticOp = rebuildAndCaptureObject(op);
+
+ // Load the value.
+ ExprResult result = buildGet();
+ if (result.isInvalid()) return ExprError();
+
+ QualType resultType = result.get()->getType();
+
+ // That's the postfix result.
+ if (UnaryOperator::isPostfix(opcode) && assignmentsHaveResult()) {
+ result = capture(result.take());
+ setResultToLastSemantic();
+ }
+
+ // Add or subtract a literal 1.
+ llvm::APInt oneV(S.Context.getTypeSize(S.Context.IntTy), 1);
+ Expr *one = IntegerLiteral::Create(S.Context, oneV, S.Context.IntTy,
+ GenericLoc);
+
+ if (UnaryOperator::isIncrementOp(opcode)) {
+ result = S.BuildBinOp(Sc, opcLoc, BO_Add, result.take(), one);
+ } else {
+ result = S.BuildBinOp(Sc, opcLoc, BO_Sub, result.take(), one);
+ }
+ if (result.isInvalid()) return ExprError();
+
+ // Store that back into the result. The value stored is the result
+ // of a prefix operation.
+ result = buildSet(result.take(), opcLoc,
+ UnaryOperator::isPrefix(opcode) && assignmentsHaveResult());
+ if (result.isInvalid()) return ExprError();
+ addSemanticExpr(result.take());
+
+ UnaryOperator *syntactic =
+ new (S.Context) UnaryOperator(syntacticOp, opcode, resultType,
+ VK_LValue, OK_Ordinary, opcLoc);
+ return complete(syntactic);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Objective-C @property and implicit property references
+//===----------------------------------------------------------------------===//
+
+/// Look up a method in the receiver type of an Objective-C property
+/// reference.
+static ObjCMethodDecl *LookupMethodInReceiverType(Sema &S, Selector sel,
+ const ObjCPropertyRefExpr *PRE) {
+ if (PRE->isObjectReceiver()) {
+ const ObjCObjectPointerType *PT =
+ PRE->getBase()->getType()->castAs<ObjCObjectPointerType>();
+
+ // Special case for 'self' in class method implementations.
+ if (PT->isObjCClassType() &&
+ S.isSelfExpr(const_cast<Expr*>(PRE->getBase()))) {
+ // This cast is safe because isSelfExpr is only true within
+ // methods.
+ ObjCMethodDecl *method =
+ cast<ObjCMethodDecl>(S.CurContext->getNonClosureAncestor());
+ return S.LookupMethodInObjectType(sel,
+ S.Context.getObjCInterfaceType(method->getClassInterface()),
+ /*instance*/ false);
+ }
+
+ return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true);
+ }
+
+ if (PRE->isSuperReceiver()) {
+ if (const ObjCObjectPointerType *PT =
+ PRE->getSuperReceiverType()->getAs<ObjCObjectPointerType>())
+ return S.LookupMethodInObjectType(sel, PT->getPointeeType(), true);
+
+ return S.LookupMethodInObjectType(sel, PRE->getSuperReceiverType(), false);
+ }
+
+ assert(PRE->isClassReceiver() && "Invalid expression");
+ QualType IT = S.Context.getObjCInterfaceType(PRE->getClassReceiver());
+ return S.LookupMethodInObjectType(sel, IT, false);
+}
+
+bool ObjCPropertyOpBuilder::findGetter() {
+ if (Getter) return true;
+
+ // For implicit properties, just trust the lookup we already did.
+ if (RefExpr->isImplicitProperty()) {
+ Getter = RefExpr->getImplicitPropertyGetter();
+ return (Getter != 0);
+ }
+
+ ObjCPropertyDecl *prop = RefExpr->getExplicitProperty();
+ Getter = LookupMethodInReceiverType(S, prop->getGetterName(), RefExpr);
+ return (Getter != 0);
+}
+
+/// Try to find the most accurate setter declaration for the property
+/// reference.
+///
+/// \return true if a setter was found, in which case Setter
+bool ObjCPropertyOpBuilder::findSetter() {
+ // For implicit properties, just trust the lookup we already did.
+ if (RefExpr->isImplicitProperty()) {
+ if (ObjCMethodDecl *setter = RefExpr->getImplicitPropertySetter()) {
+ Setter = setter;
+ SetterSelector = setter->getSelector();
+ return true;
+ } else {
+ IdentifierInfo *getterName =
+ RefExpr->getImplicitPropertyGetter()->getSelector()
+ .getIdentifierInfoForSlot(0);
+ SetterSelector =
+ SelectorTable::constructSetterName(S.PP.getIdentifierTable(),
+ S.PP.getSelectorTable(),
+ getterName);
+ return false;
+ }
+ }
+
+ // For explicit properties, this is more involved.
+ ObjCPropertyDecl *prop = RefExpr->getExplicitProperty();
+ SetterSelector = prop->getSetterName();
+
+ // Do a normal method lookup first.
+ if (ObjCMethodDecl *setter =
+ LookupMethodInReceiverType(S, SetterSelector, RefExpr)) {
+ Setter = setter;
+ return true;
+ }
+
+ // That can fail in the somewhat crazy situation that we're
+ // type-checking a message send within the @interface declaration
+ // that declared the @property. But it's not clear that that's
+ // valuable to support.
+
+ return false;
+}
+
+/// Capture the base object of an Objective-C property expression.
+Expr *ObjCPropertyOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
+ assert(InstanceReceiver == 0);
+
+ // If we have a base, capture it in an OVE and rebuild the syntactic
+ // form to use the OVE as its base.
+ if (RefExpr->isObjectReceiver()) {
+ InstanceReceiver = capture(RefExpr->getBase());
+
+ syntacticBase =
+ ObjCPropertyRefRebuilder(S, InstanceReceiver).rebuild(syntacticBase);
+ }
+
+ if (ObjCPropertyRefExpr *
+ refE = dyn_cast<ObjCPropertyRefExpr>(syntacticBase->IgnoreParens()))
+ SyntacticRefExpr = refE;
+
+ return syntacticBase;
+}
+
+/// Load from an Objective-C property reference.
+ExprResult ObjCPropertyOpBuilder::buildGet() {
+ findGetter();
+ assert(Getter);
+
+ if (SyntacticRefExpr)
+ SyntacticRefExpr->setIsMessagingGetter();
+
+ QualType receiverType;
+ if (RefExpr->isClassReceiver()) {
+ receiverType = S.Context.getObjCInterfaceType(RefExpr->getClassReceiver());
+ } else if (RefExpr->isSuperReceiver()) {
+ receiverType = RefExpr->getSuperReceiverType();
+ } else {
+ assert(InstanceReceiver);
+ receiverType = InstanceReceiver->getType();
+ }
+
+ // Build a message-send.
+ ExprResult msg;
+ if (Getter->isInstanceMethod() || RefExpr->isObjectReceiver()) {
+ assert(InstanceReceiver || RefExpr->isSuperReceiver());
+ msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
+ GenericLoc, Getter->getSelector(),
+ Getter, MultiExprArg());
+ } else {
+ msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
+ GenericLoc,
+ Getter->getSelector(), Getter,
+ MultiExprArg());
+ }
+ return msg;
+}
+
+/// Store to an Objective-C property reference.
+///
+/// \param bindSetValueAsResult - If true, capture the actual
+/// value being set as the value of the property operation.
+ExprResult ObjCPropertyOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
+ bool captureSetValueAsResult) {
+ bool hasSetter = findSetter();
+ assert(hasSetter); (void) hasSetter;
+
+ if (SyntacticRefExpr)
+ SyntacticRefExpr->setIsMessagingSetter();
+
+ QualType receiverType;
+ if (RefExpr->isClassReceiver()) {
+ receiverType = S.Context.getObjCInterfaceType(RefExpr->getClassReceiver());
+ } else if (RefExpr->isSuperReceiver()) {
+ receiverType = RefExpr->getSuperReceiverType();
+ } else {
+ assert(InstanceReceiver);
+ receiverType = InstanceReceiver->getType();
+ }
+
+ // Use assignment constraints when possible; they give us better
+ // diagnostics. "When possible" basically means anything except a
+ // C++ class type.
+ if (!S.getLangOpts().CPlusPlus || !op->getType()->isRecordType()) {
+ QualType paramType = (*Setter->param_begin())->getType();
+ if (!S.getLangOpts().CPlusPlus || !paramType->isRecordType()) {
+ ExprResult opResult = op;
+ Sema::AssignConvertType assignResult
+ = S.CheckSingleAssignmentConstraints(paramType, opResult);
+ if (S.DiagnoseAssignmentResult(assignResult, opcLoc, paramType,
+ op->getType(), opResult.get(),
+ Sema::AA_Assigning))
+ return ExprError();
+
+ op = opResult.take();
+ assert(op && "successful assignment left argument invalid?");
+ }
+ }
+
+ // Arguments.
+ Expr *args[] = { op };
+
+ // Build a message-send.
+ ExprResult msg;
+ if (Setter->isInstanceMethod() || RefExpr->isObjectReceiver()) {
+ msg = S.BuildInstanceMessageImplicit(InstanceReceiver, receiverType,
+ GenericLoc, SetterSelector, Setter,
+ MultiExprArg(args, 1));
+ } else {
+ msg = S.BuildClassMessageImplicit(receiverType, RefExpr->isSuperReceiver(),
+ GenericLoc,
+ SetterSelector, Setter,
+ MultiExprArg(args, 1));
+ }
+
+ if (!msg.isInvalid() && captureSetValueAsResult) {
+ ObjCMessageExpr *msgExpr =
+ cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
+ Expr *arg = msgExpr->getArg(0);
+ msgExpr->setArg(0, captureValueAsResult(arg));
+ }
+
+ return msg;
+}
+
+/// @property-specific behavior for doing lvalue-to-rvalue conversion.
+ExprResult ObjCPropertyOpBuilder::buildRValueOperation(Expr *op) {
+ // Explicit properties always have getters, but implicit ones don't.
+ // Check that before proceeding.
+ if (RefExpr->isImplicitProperty() &&
+ !RefExpr->getImplicitPropertyGetter()) {
+ S.Diag(RefExpr->getLocation(), diag::err_getter_not_found)
+ << RefExpr->getBase()->getType();
+ return ExprError();
+ }
+
+ ExprResult result = PseudoOpBuilder::buildRValueOperation(op);
+ if (result.isInvalid()) return ExprError();
+
+ if (RefExpr->isExplicitProperty() && !Getter->hasRelatedResultType())
+ S.DiagnosePropertyAccessorMismatch(RefExpr->getExplicitProperty(),
+ Getter, RefExpr->getLocation());
+
+ // As a special case, if the method returns 'id', try to get
+ // a better type from the property.
+ if (RefExpr->isExplicitProperty() && result.get()->isRValue() &&
+ result.get()->getType()->isObjCIdType()) {
+ QualType propType = RefExpr->getExplicitProperty()->getType();
+ if (const ObjCObjectPointerType *ptr
+ = propType->getAs<ObjCObjectPointerType>()) {
+ if (!ptr->isObjCIdType())
+ result = S.ImpCastExprToType(result.get(), propType, CK_BitCast);
+ }
+ }
+
+ return result;
+}
+
+/// Try to build this as a call to a getter that returns a reference.
+///
+/// \return true if it was possible, whether or not it actually
+/// succeeded
+bool ObjCPropertyOpBuilder::tryBuildGetOfReference(Expr *op,
+ ExprResult &result) {
+ if (!S.getLangOpts().CPlusPlus) return false;
+
+ findGetter();
+ assert(Getter && "property has no setter and no getter!");
+
+ // Only do this if the getter returns an l-value reference type.
+ QualType resultType = Getter->getResultType();
+ if (!resultType->isLValueReferenceType()) return false;
+
+ result = buildRValueOperation(op);
+ return true;
+}
+
+/// @property-specific behavior for doing assignments.
+ExprResult
+ObjCPropertyOpBuilder::buildAssignmentOperation(Scope *Sc,
+ SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+
+ // If there's no setter, we have no choice but to try to assign to
+ // the result of the getter.
+ if (!findSetter()) {
+ ExprResult result;
+ if (tryBuildGetOfReference(LHS, result)) {
+ if (result.isInvalid()) return ExprError();
+ return S.BuildBinOp(Sc, opcLoc, opcode, result.take(), RHS);
+ }
+
+ // Otherwise, it's an error.
+ S.Diag(opcLoc, diag::err_nosetter_property_assignment)
+ << unsigned(RefExpr->isImplicitProperty())
+ << SetterSelector
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return ExprError();
+ }
+
+ // If there is a setter, we definitely want to use it.
+
+ // Verify that we can do a compound assignment.
+ if (opcode != BO_Assign && !findGetter()) {
+ S.Diag(opcLoc, diag::err_nogetter_property_compound_assignment)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return ExprError();
+ }
+
+ ExprResult result =
+ PseudoOpBuilder::buildAssignmentOperation(Sc, opcLoc, opcode, LHS, RHS);
+ if (result.isInvalid()) return ExprError();
+
+ // Various warnings about property assignments in ARC.
+ if (S.getLangOpts().ObjCAutoRefCount && InstanceReceiver) {
+ S.checkRetainCycles(InstanceReceiver->getSourceExpr(), RHS);
+ S.checkUnsafeExprAssigns(opcLoc, LHS, RHS);
+ }
+
+ return result;
+}
+
+/// @property-specific behavior for doing increments and decrements.
+ExprResult
+ObjCPropertyOpBuilder::buildIncDecOperation(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode,
+ Expr *op) {
+ // If there's no setter, we have no choice but to try to assign to
+ // the result of the getter.
+ if (!findSetter()) {
+ ExprResult result;
+ if (tryBuildGetOfReference(op, result)) {
+ if (result.isInvalid()) return ExprError();
+ return S.BuildUnaryOp(Sc, opcLoc, opcode, result.take());
+ }
+
+ // Otherwise, it's an error.
+ S.Diag(opcLoc, diag::err_nosetter_property_incdec)
+ << unsigned(RefExpr->isImplicitProperty())
+ << unsigned(UnaryOperator::isDecrementOp(opcode))
+ << SetterSelector
+ << op->getSourceRange();
+ return ExprError();
+ }
+
+ // If there is a setter, we definitely want to use it.
+
+ // We also need a getter.
+ if (!findGetter()) {
+ assert(RefExpr->isImplicitProperty());
+ S.Diag(opcLoc, diag::err_nogetter_property_incdec)
+ << unsigned(UnaryOperator::isDecrementOp(opcode))
+ << RefExpr->getImplicitPropertyGetter()->getSelector() // FIXME!
+ << op->getSourceRange();
+ return ExprError();
+ }
+
+ return PseudoOpBuilder::buildIncDecOperation(Sc, opcLoc, opcode, op);
+}
+
+// ObjCSubscript build stuff.
+//
+
+/// objective-c subscripting-specific behavior for doing lvalue-to-rvalue
+/// conversion.
+/// FIXME. Remove this routine if it is proven that no additional
+/// specifity is needed.
+ExprResult ObjCSubscriptOpBuilder::buildRValueOperation(Expr *op) {
+ ExprResult result = PseudoOpBuilder::buildRValueOperation(op);
+ if (result.isInvalid()) return ExprError();
+ return result;
+}
+
+/// objective-c subscripting-specific behavior for doing assignments.
+ExprResult
+ObjCSubscriptOpBuilder::buildAssignmentOperation(Scope *Sc,
+ SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ assert(BinaryOperator::isAssignmentOp(opcode));
+ // There must be a method to do the Index'ed assignment.
+ if (!findAtIndexSetter())
+ return ExprError();
+
+ // Verify that we can do a compound assignment.
+ if (opcode != BO_Assign && !findAtIndexGetter())
+ return ExprError();
+
+ ExprResult result =
+ PseudoOpBuilder::buildAssignmentOperation(Sc, opcLoc, opcode, LHS, RHS);
+ if (result.isInvalid()) return ExprError();
+
+ // Various warnings about objc Index'ed assignments in ARC.
+ if (S.getLangOpts().ObjCAutoRefCount && InstanceBase) {
+ S.checkRetainCycles(InstanceBase->getSourceExpr(), RHS);
+ S.checkUnsafeExprAssigns(opcLoc, LHS, RHS);
+ }
+
+ return result;
+}
+
+/// Capture the base object of an Objective-C Index'ed expression.
+Expr *ObjCSubscriptOpBuilder::rebuildAndCaptureObject(Expr *syntacticBase) {
+ assert(InstanceBase == 0);
+
+ // Capture base expression in an OVE and rebuild the syntactic
+ // form to use the OVE as its base expression.
+ InstanceBase = capture(RefExpr->getBaseExpr());
+ InstanceKey = capture(RefExpr->getKeyExpr());
+
+ syntacticBase =
+ ObjCSubscriptRefRebuilder(S, InstanceBase,
+ InstanceKey).rebuild(syntacticBase);
+
+ return syntacticBase;
+}
+
+/// CheckSubscriptingKind - This routine decide what type
+/// of indexing represented by "FromE" is being done.
+Sema::ObjCSubscriptKind
+ Sema::CheckSubscriptingKind(Expr *FromE) {
+ // If the expression already has integral or enumeration type, we're golden.
+ QualType T = FromE->getType();
+ if (T->isIntegralOrEnumerationType())
+ return OS_Array;
+
+ // If we don't have a class type in C++, there's no way we can get an
+ // expression of integral or enumeration type.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy && T->isObjCObjectPointerType())
+ // All other scalar cases are assumed to be dictionary indexing which
+ // caller handles, with diagnostics if needed.
+ return OS_Dictionary;
+ if (!getLangOpts().CPlusPlus ||
+ !RecordTy || RecordTy->isIncompleteType()) {
+ // No indexing can be done. Issue diagnostics and quit.
+ const Expr *IndexExpr = FromE->IgnoreParenImpCasts();
+ if (isa<StringLiteral>(IndexExpr))
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_pointer)
+ << T << FixItHint::CreateInsertion(FromE->getExprLoc(), "@");
+ else
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
+ << T;
+ return OS_Error;
+ }
+
+ // We must have a complete class type.
+ if (RequireCompleteType(FromE->getExprLoc(), T,
+ PDiag(diag::err_objc_index_incomplete_class_type)
+ << FromE->getSourceRange()))
+ return OS_Error;
+
+ // Look for a conversion to an integral, enumeration type, or
+ // objective-C pointer type.
+ UnresolvedSet<4> ViableConversions;
+ UnresolvedSet<4> ExplicitConversions;
+ const UnresolvedSetImpl *Conversions
+ = cast<CXXRecordDecl>(RecordTy->getDecl())->getVisibleConversionFunctions();
+
+ int NoIntegrals=0, NoObjCIdPointers=0;
+ SmallVector<CXXConversionDecl *, 4> ConversionDecls;
+
+ for (UnresolvedSetImpl::iterator I = Conversions->begin(),
+ E = Conversions->end();
+ I != E;
+ ++I) {
+ if (CXXConversionDecl *Conversion
+ = dyn_cast<CXXConversionDecl>((*I)->getUnderlyingDecl())) {
+ QualType CT = Conversion->getConversionType().getNonReferenceType();
+ if (CT->isIntegralOrEnumerationType()) {
+ ++NoIntegrals;
+ ConversionDecls.push_back(Conversion);
+ }
+ else if (CT->isObjCIdType() ||CT->isBlockPointerType()) {
+ ++NoObjCIdPointers;
+ ConversionDecls.push_back(Conversion);
+ }
+ }
+ }
+ if (NoIntegrals ==1 && NoObjCIdPointers == 0)
+ return OS_Array;
+ if (NoIntegrals == 0 && NoObjCIdPointers == 1)
+ return OS_Dictionary;
+ if (NoIntegrals == 0 && NoObjCIdPointers == 0) {
+ // No conversion function was found. Issue diagnostic and return.
+ Diag(FromE->getExprLoc(), diag::err_objc_subscript_type_conversion)
+ << FromE->getType();
+ return OS_Error;
+ }
+ Diag(FromE->getExprLoc(), diag::err_objc_multiple_subscript_type_conversion)
+ << FromE->getType();
+ for (unsigned int i = 0; i < ConversionDecls.size(); i++)
+ Diag(ConversionDecls[i]->getLocation(), diag::not_conv_function_declared_at);
+
+ return OS_Error;
+}
+
+bool ObjCSubscriptOpBuilder::findAtIndexGetter() {
+ if (AtIndexGetter)
+ return true;
+
+ Expr *BaseExpr = RefExpr->getBaseExpr();
+ QualType BaseT = BaseExpr->getType();
+
+ QualType ResultType;
+ if (const ObjCObjectPointerType *PTy =
+ BaseT->getAs<ObjCObjectPointerType>()) {
+ ResultType = PTy->getPointeeType();
+ if (const ObjCObjectType *iQFaceTy =
+ ResultType->getAsObjCQualifiedInterfaceType())
+ ResultType = iQFaceTy->getBaseType();
+ }
+ Sema::ObjCSubscriptKind Res =
+ S.CheckSubscriptingKind(RefExpr->getKeyExpr());
+ if (Res == Sema::OS_Error)
+ return false;
+ bool arrayRef = (Res == Sema::OS_Array);
+
+ if (ResultType.isNull()) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type)
+ << BaseExpr->getType() << arrayRef;
+ return false;
+ }
+ if (!arrayRef) {
+ // dictionary subscripting.
+ // - (id)objectForKeyedSubscript:(id)key;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectForKeyedSubscript")
+ };
+ AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
+ }
+ else {
+ // - (id)objectAtIndexedSubscript:(size_t)index;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("objectAtIndexedSubscript")
+ };
+
+ AtIndexGetterSelector = S.Context.Selectors.getSelector(1, KeyIdents);
+ }
+
+ AtIndexGetter = S.LookupMethodInObjectType(AtIndexGetterSelector, ResultType,
+ true /*instance*/);
+ bool receiverIdType = (BaseT->isObjCIdType() ||
+ BaseT->isObjCQualifiedIdType());
+
+ if (!AtIndexGetter && S.getLangOpts().DebuggerObjCLiteral) {
+ AtIndexGetter = ObjCMethodDecl::Create(S.Context, SourceLocation(),
+ SourceLocation(), AtIndexGetterSelector,
+ S.Context.getObjCIdType() /*ReturnType*/,
+ 0 /*TypeSourceInfo */,
+ S.Context.getTranslationUnitDecl(),
+ true /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ ParmVarDecl *Argument = ParmVarDecl::Create(S.Context, AtIndexGetter,
+ SourceLocation(), SourceLocation(),
+ arrayRef ? &S.Context.Idents.get("index")
+ : &S.Context.Idents.get("key"),
+ arrayRef ? S.Context.UnsignedLongTy
+ : S.Context.getObjCIdType(),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ AtIndexGetter->setMethodParams(S.Context, Argument,
+ ArrayRef<SourceLocation>());
+ }
+
+ if (!AtIndexGetter) {
+ if (!receiverIdType) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_method_not_found)
+ << BaseExpr->getType() << 0 << arrayRef;
+ return false;
+ }
+ AtIndexGetter =
+ S.LookupInstanceMethodInGlobalPool(AtIndexGetterSelector,
+ RefExpr->getSourceRange(),
+ true, false);
+ }
+
+ if (AtIndexGetter) {
+ QualType T = AtIndexGetter->param_begin()[0]->getType();
+ if ((arrayRef && !T->isIntegralOrEnumerationType()) ||
+ (!arrayRef && !T->isObjCObjectPointerType())) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ arrayRef ? diag::err_objc_subscript_index_type
+ : diag::err_objc_subscript_key_type) << T;
+ S.Diag(AtIndexGetter->param_begin()[0]->getLocation(),
+ diag::note_parameter_type) << T;
+ return false;
+ }
+ QualType R = AtIndexGetter->getResultType();
+ if (!R->isObjCObjectPointerType()) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_indexing_method_result_type) << R << arrayRef;
+ S.Diag(AtIndexGetter->getLocation(), diag::note_method_declared_at) <<
+ AtIndexGetter->getDeclName();
+ }
+ }
+ return true;
+}
+
+bool ObjCSubscriptOpBuilder::findAtIndexSetter() {
+ if (AtIndexSetter)
+ return true;
+
+ Expr *BaseExpr = RefExpr->getBaseExpr();
+ QualType BaseT = BaseExpr->getType();
+
+ QualType ResultType;
+ if (const ObjCObjectPointerType *PTy =
+ BaseT->getAs<ObjCObjectPointerType>()) {
+ ResultType = PTy->getPointeeType();
+ if (const ObjCObjectType *iQFaceTy =
+ ResultType->getAsObjCQualifiedInterfaceType())
+ ResultType = iQFaceTy->getBaseType();
+ }
+
+ Sema::ObjCSubscriptKind Res =
+ S.CheckSubscriptingKind(RefExpr->getKeyExpr());
+ if (Res == Sema::OS_Error)
+ return false;
+ bool arrayRef = (Res == Sema::OS_Array);
+
+ if (ResultType.isNull()) {
+ S.Diag(BaseExpr->getExprLoc(), diag::err_objc_subscript_base_type)
+ << BaseExpr->getType() << arrayRef;
+ return false;
+ }
+
+ if (!arrayRef) {
+ // dictionary subscripting.
+ // - (void)setObject:(id)object forKeyedSubscript:(id)key;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("setObject"),
+ &S.Context.Idents.get("forKeyedSubscript")
+ };
+ AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents);
+ }
+ else {
+ // - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index;
+ IdentifierInfo *KeyIdents[] = {
+ &S.Context.Idents.get("setObject"),
+ &S.Context.Idents.get("atIndexedSubscript")
+ };
+ AtIndexSetterSelector = S.Context.Selectors.getSelector(2, KeyIdents);
+ }
+ AtIndexSetter = S.LookupMethodInObjectType(AtIndexSetterSelector, ResultType,
+ true /*instance*/);
+
+ bool receiverIdType = (BaseT->isObjCIdType() ||
+ BaseT->isObjCQualifiedIdType());
+
+ if (!AtIndexSetter && S.getLangOpts().DebuggerObjCLiteral) {
+ TypeSourceInfo *ResultTInfo = 0;
+ QualType ReturnType = S.Context.VoidTy;
+ AtIndexSetter = ObjCMethodDecl::Create(S.Context, SourceLocation(),
+ SourceLocation(), AtIndexSetterSelector,
+ ReturnType,
+ ResultTInfo,
+ S.Context.getTranslationUnitDecl(),
+ true /*Instance*/, false/*isVariadic*/,
+ /*isSynthesized=*/false,
+ /*isImplicitlyDeclared=*/true, /*isDefined=*/false,
+ ObjCMethodDecl::Required,
+ false);
+ SmallVector<ParmVarDecl *, 2> Params;
+ ParmVarDecl *object = ParmVarDecl::Create(S.Context, AtIndexSetter,
+ SourceLocation(), SourceLocation(),
+ &S.Context.Idents.get("object"),
+ S.Context.getObjCIdType(),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(object);
+ ParmVarDecl *key = ParmVarDecl::Create(S.Context, AtIndexSetter,
+ SourceLocation(), SourceLocation(),
+ arrayRef ? &S.Context.Idents.get("index")
+ : &S.Context.Idents.get("key"),
+ arrayRef ? S.Context.UnsignedLongTy
+ : S.Context.getObjCIdType(),
+ /*TInfo=*/0,
+ SC_None,
+ SC_None,
+ 0);
+ Params.push_back(key);
+ AtIndexSetter->setMethodParams(S.Context, Params, ArrayRef<SourceLocation>());
+ }
+
+ if (!AtIndexSetter) {
+ if (!receiverIdType) {
+ S.Diag(BaseExpr->getExprLoc(),
+ diag::err_objc_subscript_method_not_found)
+ << BaseExpr->getType() << 1 << arrayRef;
+ return false;
+ }
+ AtIndexSetter =
+ S.LookupInstanceMethodInGlobalPool(AtIndexSetterSelector,
+ RefExpr->getSourceRange(),
+ true, false);
+ }
+
+ bool err = false;
+ if (AtIndexSetter && arrayRef) {
+ QualType T = AtIndexSetter->param_begin()[1]->getType();
+ if (!T->isIntegralOrEnumerationType()) {
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_subscript_index_type) << T;
+ S.Diag(AtIndexSetter->param_begin()[1]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ T = AtIndexSetter->param_begin()[0]->getType();
+ if (!T->isObjCObjectPointerType()) {
+ S.Diag(RefExpr->getBaseExpr()->getExprLoc(),
+ diag::err_objc_subscript_object_type) << T << arrayRef;
+ S.Diag(AtIndexSetter->param_begin()[0]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ }
+ else if (AtIndexSetter && !arrayRef)
+ for (unsigned i=0; i <2; i++) {
+ QualType T = AtIndexSetter->param_begin()[i]->getType();
+ if (!T->isObjCObjectPointerType()) {
+ if (i == 1)
+ S.Diag(RefExpr->getKeyExpr()->getExprLoc(),
+ diag::err_objc_subscript_key_type) << T;
+ else
+ S.Diag(RefExpr->getBaseExpr()->getExprLoc(),
+ diag::err_objc_subscript_dic_object_type) << T;
+ S.Diag(AtIndexSetter->param_begin()[i]->getLocation(),
+ diag::note_parameter_type) << T;
+ err = true;
+ }
+ }
+
+ return !err;
+}
+
+// Get the object at "Index" position in the container.
+// [BaseExpr objectAtIndexedSubscript : IndexExpr];
+ExprResult ObjCSubscriptOpBuilder::buildGet() {
+ if (!findAtIndexGetter())
+ return ExprError();
+
+ QualType receiverType = InstanceBase->getType();
+
+ // Build a message-send.
+ ExprResult msg;
+ Expr *Index = InstanceKey;
+
+ // Arguments.
+ Expr *args[] = { Index };
+ assert(InstanceBase);
+ msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType,
+ GenericLoc,
+ AtIndexGetterSelector, AtIndexGetter,
+ MultiExprArg(args, 1));
+ return msg;
+}
+
+/// Store into the container the "op" object at "Index"'ed location
+/// by building this messaging expression:
+/// - (void)setObject:(id)object atIndexedSubscript:(NSInteger)index;
+/// \param bindSetValueAsResult - If true, capture the actual
+/// value being set as the value of the property operation.
+ExprResult ObjCSubscriptOpBuilder::buildSet(Expr *op, SourceLocation opcLoc,
+ bool captureSetValueAsResult) {
+ if (!findAtIndexSetter())
+ return ExprError();
+
+ QualType receiverType = InstanceBase->getType();
+ Expr *Index = InstanceKey;
+
+ // Arguments.
+ Expr *args[] = { op, Index };
+
+ // Build a message-send.
+ ExprResult msg = S.BuildInstanceMessageImplicit(InstanceBase, receiverType,
+ GenericLoc,
+ AtIndexSetterSelector,
+ AtIndexSetter,
+ MultiExprArg(args, 2));
+
+ if (!msg.isInvalid() && captureSetValueAsResult) {
+ ObjCMessageExpr *msgExpr =
+ cast<ObjCMessageExpr>(msg.get()->IgnoreImplicit());
+ Expr *arg = msgExpr->getArg(0);
+ msgExpr->setArg(0, captureValueAsResult(arg));
+ }
+
+ return msg;
+}
+
+//===----------------------------------------------------------------------===//
+// General Sema routines.
+//===----------------------------------------------------------------------===//
+
+ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
+ Expr *opaqueRef = E->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildRValueOperation(E);
+ }
+ else if (ObjCSubscriptRefExpr *refExpr
+ = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
+ ObjCSubscriptOpBuilder builder(*this, refExpr);
+ return builder.buildRValueOperation(E);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+/// Check an increment or decrement of a pseudo-object expression.
+ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
+ UnaryOperatorKind opcode, Expr *op) {
+ // Do nothing if the operand is dependent.
+ if (op->isTypeDependent())
+ return new (Context) UnaryOperator(op, opcode, Context.DependentTy,
+ VK_RValue, OK_Ordinary, opcLoc);
+
+ assert(UnaryOperator::isIncrementDecrementOp(opcode));
+ Expr *opaqueRef = op->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
+ } else if (isa<ObjCSubscriptRefExpr>(opaqueRef)) {
+ Diag(opcLoc, diag::err_illegal_container_subscripting_op);
+ return ExprError();
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
+ BinaryOperatorKind opcode,
+ Expr *LHS, Expr *RHS) {
+ // Do nothing if either argument is dependent.
+ if (LHS->isTypeDependent() || RHS->isTypeDependent())
+ return new (Context) BinaryOperator(LHS, RHS, opcode, Context.DependentTy,
+ VK_RValue, OK_Ordinary, opcLoc);
+
+ // Filter out non-overload placeholder types in the RHS.
+ if (RHS->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(RHS);
+ if (result.isInvalid()) return ExprError();
+ RHS = result.take();
+ }
+
+ Expr *opaqueRef = LHS->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ ObjCPropertyOpBuilder builder(*this, refExpr);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else if (ObjCSubscriptRefExpr *refExpr
+ = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
+ ObjCSubscriptOpBuilder builder(*this, refExpr);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+/// Given a pseudo-object reference, rebuild it without the opaque
+/// values. Basically, undo the behavior of rebuildAndCaptureObject.
+/// This should never operate in-place.
+static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) {
+ Expr *opaqueRef = E->IgnoreParens();
+ if (ObjCPropertyRefExpr *refExpr
+ = dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
+ OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBase());
+ return ObjCPropertyRefRebuilder(S, baseOVE->getSourceExpr()).rebuild(E);
+ } else if (ObjCSubscriptRefExpr *refExpr
+ = dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
+ OpaqueValueExpr *baseOVE = cast<OpaqueValueExpr>(refExpr->getBaseExpr());
+ OpaqueValueExpr *keyOVE = cast<OpaqueValueExpr>(refExpr->getKeyExpr());
+ return ObjCSubscriptRefRebuilder(S, baseOVE->getSourceExpr(),
+ keyOVE->getSourceExpr()).rebuild(E);
+ } else {
+ llvm_unreachable("unknown pseudo-object kind!");
+ }
+}
+
+/// Given a pseudo-object expression, recreate what it looks like
+/// syntactically without the attendant OpaqueValueExprs.
+///
+/// This is a hack which should be removed when TreeTransform is
+/// capable of rebuilding a tree without stripping implicit
+/// operations.
+Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
+ Expr *syntax = E->getSyntacticForm();
+ if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
+ Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
+ return new (Context) UnaryOperator(op, uop->getOpcode(), uop->getType(),
+ uop->getValueKind(), uop->getObjectKind(),
+ uop->getOperatorLoc());
+ } else if (CompoundAssignOperator *cop
+ = dyn_cast<CompoundAssignOperator>(syntax)) {
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
+ Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
+ return new (Context) CompoundAssignOperator(lhs, rhs, cop->getOpcode(),
+ cop->getType(),
+ cop->getValueKind(),
+ cop->getObjectKind(),
+ cop->getComputationLHSType(),
+ cop->getComputationResultType(),
+ cop->getOperatorLoc());
+ } else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
+ Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
+ return new (Context) BinaryOperator(lhs, rhs, bop->getOpcode(),
+ bop->getType(), bop->getValueKind(),
+ bop->getObjectKind(),
+ bop->getOperatorLoc());
+ } else {
+ assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
+ return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
new file mode 100644
index 0000000..97c8eb0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaStmt.cpp
@@ -0,0 +1,2654 @@
+//===--- SemaStmt.cpp - Semantic Analysis for Statements ------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements semantic analysis for statements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+using namespace sema;
+
+StmtResult Sema::ActOnExprStmt(FullExprArg expr) {
+ Expr *E = expr.get();
+ if (!E) // FIXME: FullExprArg has no error state?
+ return StmtError();
+
+ // C99 6.8.3p2: The expression in an expression statement is evaluated as a
+ // void expression for its side effects. Conversion to void allows any
+ // operand, even incomplete types.
+
+ // Same thing in for stmt first clause (when expr) and third clause.
+ return Owned(static_cast<Stmt*>(E));
+}
+
+
+StmtResult Sema::ActOnNullStmt(SourceLocation SemiLoc,
+ bool HasLeadingEmptyMacro) {
+ return Owned(new (Context) NullStmt(SemiLoc, HasLeadingEmptyMacro));
+}
+
+StmtResult Sema::ActOnDeclStmt(DeclGroupPtrTy dg, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ DeclGroupRef DG = dg.getAsVal<DeclGroupRef>();
+
+ // If we have an invalid decl, just return an error.
+ if (DG.isNull()) return StmtError();
+
+ return Owned(new (Context) DeclStmt(DG, StartLoc, EndLoc));
+}
+
+void Sema::ActOnForEachDeclStmt(DeclGroupPtrTy dg) {
+ DeclGroupRef DG = dg.getAsVal<DeclGroupRef>();
+
+ // If we have an invalid decl, just return.
+ if (DG.isNull() || !DG.isSingleDecl()) return;
+ VarDecl *var = cast<VarDecl>(DG.getSingleDecl());
+
+ // suppress any potential 'unused variable' warning.
+ var->setUsed();
+
+ // foreach variables are never actually initialized in the way that
+ // the parser came up with.
+ var->setInit(0);
+
+ // In ARC, we don't need to retain the iteration variable of a fast
+ // enumeration loop. Rather than actually trying to catch that
+ // during declaration processing, we remove the consequences here.
+ if (getLangOpts().ObjCAutoRefCount) {
+ QualType type = var->getType();
+
+ // Only do this if we inferred the lifetime. Inferred lifetime
+ // will show up as a local qualifier because explicit lifetime
+ // should have shown up as an AttributedType instead.
+ if (type.getLocalQualifiers().getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // Add 'const' and mark the variable as pseudo-strong.
+ var->setType(type.withConst());
+ var->setARCPseudoStrong(true);
+ }
+ }
+}
+
+/// \brief Diagnose unused '==' and '!=' as likely typos for '=' or '|='.
+///
+/// Adding a cast to void (or other expression wrappers) will prevent the
+/// warning from firing.
+static bool DiagnoseUnusedComparison(Sema &S, const Expr *E) {
+ SourceLocation Loc;
+ bool IsNotEqual, CanAssign;
+
+ if (const BinaryOperator *Op = dyn_cast<BinaryOperator>(E)) {
+ if (Op->getOpcode() != BO_EQ && Op->getOpcode() != BO_NE)
+ return false;
+
+ Loc = Op->getOperatorLoc();
+ IsNotEqual = Op->getOpcode() == BO_NE;
+ CanAssign = Op->getLHS()->IgnoreParenImpCasts()->isLValue();
+ } else if (const CXXOperatorCallExpr *Op = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (Op->getOperator() != OO_EqualEqual &&
+ Op->getOperator() != OO_ExclaimEqual)
+ return false;
+
+ Loc = Op->getOperatorLoc();
+ IsNotEqual = Op->getOperator() == OO_ExclaimEqual;
+ CanAssign = Op->getArg(0)->IgnoreParenImpCasts()->isLValue();
+ } else {
+ // Not a typo-prone comparison.
+ return false;
+ }
+
+ // Suppress warnings when the operator, suspicious as it may be, comes from
+ // a macro expansion.
+ if (Loc.isMacroID())
+ return false;
+
+ S.Diag(Loc, diag::warn_unused_comparison)
+ << (unsigned)IsNotEqual << E->getSourceRange();
+
+ // If the LHS is a plausible entity to assign to, provide a fixit hint to
+ // correct common typos.
+ if (CanAssign) {
+ if (IsNotEqual)
+ S.Diag(Loc, diag::note_inequality_comparison_to_or_assign)
+ << FixItHint::CreateReplacement(Loc, "|=");
+ else
+ S.Diag(Loc, diag::note_equality_comparison_to_assign)
+ << FixItHint::CreateReplacement(Loc, "=");
+ }
+
+ return true;
+}
+
+void Sema::DiagnoseUnusedExprResult(const Stmt *S) {
+ if (const LabelStmt *Label = dyn_cast_or_null<LabelStmt>(S))
+ return DiagnoseUnusedExprResult(Label->getSubStmt());
+
+ const Expr *E = dyn_cast_or_null<Expr>(S);
+ if (!E)
+ return;
+
+ SourceLocation Loc;
+ SourceRange R1, R2;
+ if (SourceMgr.isInSystemMacro(E->getExprLoc()) ||
+ !E->isUnusedResultAWarning(Loc, R1, R2, Context))
+ return;
+
+ // Okay, we have an unused result. Depending on what the base expression is,
+ // we might want to make a more specific diagnostic. Check for one of these
+ // cases now.
+ unsigned DiagID = diag::warn_unused_expr;
+ if (const ExprWithCleanups *Temps = dyn_cast<ExprWithCleanups>(E))
+ E = Temps->getSubExpr();
+ if (const CXXBindTemporaryExpr *TempExpr = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = TempExpr->getSubExpr();
+
+ if (DiagnoseUnusedComparison(*this, E))
+ return;
+
+ E = E->IgnoreParenImpCasts();
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ if (E->getType()->isVoidType())
+ return;
+
+ // If the callee has attribute pure, const, or warn_unused_result, warn with
+ // a more specific message to make it clear what is happening.
+ if (const Decl *FD = CE->getCalleeDecl()) {
+ if (FD->getAttr<WarnUnusedResultAttr>()) {
+ Diag(Loc, diag::warn_unused_result) << R1 << R2;
+ return;
+ }
+ if (FD->getAttr<PureAttr>()) {
+ Diag(Loc, diag::warn_unused_call) << R1 << R2 << "pure";
+ return;
+ }
+ if (FD->getAttr<ConstAttr>()) {
+ Diag(Loc, diag::warn_unused_call) << R1 << R2 << "const";
+ return;
+ }
+ }
+ } else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(E)) {
+ if (getLangOpts().ObjCAutoRefCount && ME->isDelegateInitCall()) {
+ Diag(Loc, diag::err_arc_unused_init_message) << R1;
+ return;
+ }
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
+ Diag(Loc, diag::warn_unused_result) << R1 << R2;
+ return;
+ }
+ } else if (const PseudoObjectExpr *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ const Expr *Source = POE->getSyntacticForm();
+ if (isa<ObjCSubscriptRefExpr>(Source))
+ DiagID = diag::warn_unused_container_subscript_expr;
+ else
+ DiagID = diag::warn_unused_property_expr;
+ } else if (const CXXFunctionalCastExpr *FC
+ = dyn_cast<CXXFunctionalCastExpr>(E)) {
+ if (isa<CXXConstructExpr>(FC->getSubExpr()) ||
+ isa<CXXTemporaryObjectExpr>(FC->getSubExpr()))
+ return;
+ }
+ // Diagnose "(void*) blah" as a typo for "(void) blah".
+ else if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(E)) {
+ TypeSourceInfo *TI = CE->getTypeInfoAsWritten();
+ QualType T = TI->getType();
+
+ // We really do want to use the non-canonical type here.
+ if (T == Context.VoidPtrTy) {
+ PointerTypeLoc TL = cast<PointerTypeLoc>(TI->getTypeLoc());
+
+ Diag(Loc, diag::warn_unused_voidptr)
+ << FixItHint::CreateRemoval(TL.getStarLoc());
+ return;
+ }
+ }
+
+ DiagRuntimeBehavior(Loc, 0, PDiag(DiagID) << R1 << R2);
+}
+
+void Sema::ActOnStartOfCompoundStmt() {
+ PushCompoundScope();
+}
+
+void Sema::ActOnFinishOfCompoundStmt() {
+ PopCompoundScope();
+}
+
+sema::CompoundScopeInfo &Sema::getCurCompoundScope() const {
+ return getCurFunction()->CompoundScopes.back();
+}
+
+StmtResult
+Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
+ MultiStmtArg elts, bool isStmtExpr) {
+ unsigned NumElts = elts.size();
+ Stmt **Elts = reinterpret_cast<Stmt**>(elts.release());
+ // If we're in C89 mode, check that we don't have any decls after stmts. If
+ // so, emit an extension diagnostic.
+ if (!getLangOpts().C99 && !getLangOpts().CPlusPlus) {
+ // Note that __extension__ can be around a decl.
+ unsigned i = 0;
+ // Skip over all declarations.
+ for (; i != NumElts && isa<DeclStmt>(Elts[i]); ++i)
+ /*empty*/;
+
+ // We found the end of the list or a statement. Scan for another declstmt.
+ for (; i != NumElts && !isa<DeclStmt>(Elts[i]); ++i)
+ /*empty*/;
+
+ if (i != NumElts) {
+ Decl *D = *cast<DeclStmt>(Elts[i])->decl_begin();
+ Diag(D->getLocation(), diag::ext_mixed_decls_code);
+ }
+ }
+ // Warn about unused expressions in statements.
+ for (unsigned i = 0; i != NumElts; ++i) {
+ // Ignore statements that are last in a statement expression.
+ if (isStmtExpr && i == NumElts - 1)
+ continue;
+
+ DiagnoseUnusedExprResult(Elts[i]);
+ }
+
+ // Check for suspicious empty body (null statement) in `for' and `while'
+ // statements. Don't do anything for template instantiations, this just adds
+ // noise.
+ if (NumElts != 0 && !CurrentInstantiationScope &&
+ getCurCompoundScope().HasEmptyLoopBodies) {
+ for (unsigned i = 0; i != NumElts - 1; ++i)
+ DiagnoseEmptyLoopBody(Elts[i], Elts[i + 1]);
+ }
+
+ return Owned(new (Context) CompoundStmt(Context, Elts, NumElts, L, R));
+}
+
+StmtResult
+Sema::ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal,
+ SourceLocation DotDotDotLoc, Expr *RHSVal,
+ SourceLocation ColonLoc) {
+ assert((LHSVal != 0) && "missing expression in case statement");
+
+ if (getCurFunction()->SwitchStack.empty()) {
+ Diag(CaseLoc, diag::err_case_not_in_switch);
+ return StmtError();
+ }
+
+ if (!getLangOpts().CPlusPlus0x) {
+ // C99 6.8.4.2p3: The expression shall be an integer constant.
+ // However, GCC allows any evaluatable integer expression.
+ if (!LHSVal->isTypeDependent() && !LHSVal->isValueDependent()) {
+ LHSVal = VerifyIntegerConstantExpression(LHSVal).take();
+ if (!LHSVal)
+ return StmtError();
+ }
+
+ // GCC extension: The expression shall be an integer constant.
+
+ if (RHSVal && !RHSVal->isTypeDependent() && !RHSVal->isValueDependent()) {
+ RHSVal = VerifyIntegerConstantExpression(RHSVal).take();
+ // Recover from an error by just forgetting about it.
+ }
+ }
+
+ CaseStmt *CS = new (Context) CaseStmt(LHSVal, RHSVal, CaseLoc, DotDotDotLoc,
+ ColonLoc);
+ getCurFunction()->SwitchStack.back()->addSwitchCase(CS);
+ return Owned(CS);
+}
+
+/// ActOnCaseStmtBody - This installs a statement as the body of a case.
+void Sema::ActOnCaseStmtBody(Stmt *caseStmt, Stmt *SubStmt) {
+ DiagnoseUnusedExprResult(SubStmt);
+
+ CaseStmt *CS = static_cast<CaseStmt*>(caseStmt);
+ CS->setSubStmt(SubStmt);
+}
+
+StmtResult
+Sema::ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc,
+ Stmt *SubStmt, Scope *CurScope) {
+ DiagnoseUnusedExprResult(SubStmt);
+
+ if (getCurFunction()->SwitchStack.empty()) {
+ Diag(DefaultLoc, diag::err_default_not_in_switch);
+ return Owned(SubStmt);
+ }
+
+ DefaultStmt *DS = new (Context) DefaultStmt(DefaultLoc, ColonLoc, SubStmt);
+ getCurFunction()->SwitchStack.back()->addSwitchCase(DS);
+ return Owned(DS);
+}
+
+StmtResult
+Sema::ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
+ SourceLocation ColonLoc, Stmt *SubStmt) {
+
+ // If the label was multiply defined, reject it now.
+ if (TheDecl->getStmt()) {
+ Diag(IdentLoc, diag::err_redefinition_of_label) << TheDecl->getDeclName();
+ Diag(TheDecl->getLocation(), diag::note_previous_definition);
+ return Owned(SubStmt);
+ }
+
+ // Otherwise, things are good. Fill in the declaration and return it.
+ LabelStmt *LS = new (Context) LabelStmt(IdentLoc, TheDecl, SubStmt);
+ TheDecl->setStmt(LS);
+ if (!TheDecl->isGnuLocal())
+ TheDecl->setLocation(IdentLoc);
+ return Owned(LS);
+}
+
+StmtResult
+Sema::ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar,
+ Stmt *thenStmt, SourceLocation ElseLoc,
+ Stmt *elseStmt) {
+ ExprResult CondResult(CondVal.release());
+
+ VarDecl *ConditionVar = 0;
+ if (CondVar) {
+ ConditionVar = cast<VarDecl>(CondVar);
+ CondResult = CheckConditionVariable(ConditionVar, IfLoc, true);
+ if (CondResult.isInvalid())
+ return StmtError();
+ }
+ Expr *ConditionExpr = CondResult.takeAs<Expr>();
+ if (!ConditionExpr)
+ return StmtError();
+
+ DiagnoseUnusedExprResult(thenStmt);
+
+ if (!elseStmt) {
+ DiagnoseEmptyStmtBody(ConditionExpr->getLocEnd(), thenStmt,
+ diag::warn_empty_if_body);
+ }
+
+ DiagnoseUnusedExprResult(elseStmt);
+
+ return Owned(new (Context) IfStmt(Context, IfLoc, ConditionVar, ConditionExpr,
+ thenStmt, ElseLoc, elseStmt));
+}
+
+/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
+/// the specified width and sign. If an overflow occurs, detect it and emit
+/// the specified diagnostic.
+void Sema::ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &Val,
+ unsigned NewWidth, bool NewSign,
+ SourceLocation Loc,
+ unsigned DiagID) {
+ // Perform a conversion to the promoted condition type if needed.
+ if (NewWidth > Val.getBitWidth()) {
+ // If this is an extension, just do it.
+ Val = Val.extend(NewWidth);
+ Val.setIsSigned(NewSign);
+
+ // If the input was signed and negative and the output is
+ // unsigned, don't bother to warn: this is implementation-defined
+ // behavior.
+ // FIXME: Introduce a second, default-ignored warning for this case?
+ } else if (NewWidth < Val.getBitWidth()) {
+ // If this is a truncation, check for overflow.
+ llvm::APSInt ConvVal(Val);
+ ConvVal = ConvVal.trunc(NewWidth);
+ ConvVal.setIsSigned(NewSign);
+ ConvVal = ConvVal.extend(Val.getBitWidth());
+ ConvVal.setIsSigned(Val.isSigned());
+ if (ConvVal != Val)
+ Diag(Loc, DiagID) << Val.toString(10) << ConvVal.toString(10);
+
+ // Regardless of whether a diagnostic was emitted, really do the
+ // truncation.
+ Val = Val.trunc(NewWidth);
+ Val.setIsSigned(NewSign);
+ } else if (NewSign != Val.isSigned()) {
+ // Convert the sign to match the sign of the condition. This can cause
+ // overflow as well: unsigned(INTMIN)
+ // We don't diagnose this overflow, because it is implementation-defined
+ // behavior.
+ // FIXME: Introduce a second, default-ignored warning for this case?
+ llvm::APSInt OldVal(Val);
+ Val.setIsSigned(NewSign);
+ }
+}
+
+namespace {
+ struct CaseCompareFunctor {
+ bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
+ const llvm::APSInt &RHS) {
+ return LHS.first < RHS;
+ }
+ bool operator()(const std::pair<llvm::APSInt, CaseStmt*> &LHS,
+ const std::pair<llvm::APSInt, CaseStmt*> &RHS) {
+ return LHS.first < RHS.first;
+ }
+ bool operator()(const llvm::APSInt &LHS,
+ const std::pair<llvm::APSInt, CaseStmt*> &RHS) {
+ return LHS < RHS.first;
+ }
+ };
+}
+
+/// CmpCaseVals - Comparison predicate for sorting case values.
+///
+static bool CmpCaseVals(const std::pair<llvm::APSInt, CaseStmt*>& lhs,
+ const std::pair<llvm::APSInt, CaseStmt*>& rhs) {
+ if (lhs.first < rhs.first)
+ return true;
+
+ if (lhs.first == rhs.first &&
+ lhs.second->getCaseLoc().getRawEncoding()
+ < rhs.second->getCaseLoc().getRawEncoding())
+ return true;
+ return false;
+}
+
+/// CmpEnumVals - Comparison predicate for sorting enumeration values.
+///
+static bool CmpEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
+ const std::pair<llvm::APSInt, EnumConstantDecl*>& rhs)
+{
+ return lhs.first < rhs.first;
+}
+
+/// EqEnumVals - Comparison preficate for uniqing enumeration values.
+///
+static bool EqEnumVals(const std::pair<llvm::APSInt, EnumConstantDecl*>& lhs,
+ const std::pair<llvm::APSInt, EnumConstantDecl*>& rhs)
+{
+ return lhs.first == rhs.first;
+}
+
+/// GetTypeBeforeIntegralPromotion - Returns the pre-promotion type of
+/// potentially integral-promoted expression @p expr.
+static QualType GetTypeBeforeIntegralPromotion(Expr *&expr) {
+ if (ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(expr))
+ expr = cleanups->getSubExpr();
+ while (ImplicitCastExpr *impcast = dyn_cast<ImplicitCastExpr>(expr)) {
+ if (impcast->getCastKind() != CK_IntegralCast) break;
+ expr = impcast->getSubExpr();
+ }
+ return expr->getType();
+}
+
+StmtResult
+Sema::ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond,
+ Decl *CondVar) {
+ ExprResult CondResult;
+
+ VarDecl *ConditionVar = 0;
+ if (CondVar) {
+ ConditionVar = cast<VarDecl>(CondVar);
+ CondResult = CheckConditionVariable(ConditionVar, SourceLocation(), false);
+ if (CondResult.isInvalid())
+ return StmtError();
+
+ Cond = CondResult.release();
+ }
+
+ if (!Cond)
+ return StmtError();
+
+ CondResult
+ = ConvertToIntegralOrEnumerationType(SwitchLoc, Cond,
+ PDiag(diag::err_typecheck_statement_requires_integer),
+ PDiag(diag::err_switch_incomplete_class_type)
+ << Cond->getSourceRange(),
+ PDiag(diag::err_switch_explicit_conversion),
+ PDiag(diag::note_switch_conversion),
+ PDiag(diag::err_switch_multiple_conversions),
+ PDiag(diag::note_switch_conversion),
+ PDiag(0),
+ /*AllowScopedEnumerations*/ true);
+ if (CondResult.isInvalid()) return StmtError();
+ Cond = CondResult.take();
+
+ // C99 6.8.4.2p5 - Integer promotions are performed on the controlling expr.
+ CondResult = UsualUnaryConversions(Cond);
+ if (CondResult.isInvalid()) return StmtError();
+ Cond = CondResult.take();
+
+ if (!CondVar) {
+ CheckImplicitConversions(Cond, SwitchLoc);
+ CondResult = MaybeCreateExprWithCleanups(Cond);
+ if (CondResult.isInvalid())
+ return StmtError();
+ Cond = CondResult.take();
+ }
+
+ getCurFunction()->setHasBranchIntoScope();
+
+ SwitchStmt *SS = new (Context) SwitchStmt(Context, ConditionVar, Cond);
+ getCurFunction()->SwitchStack.push_back(SS);
+ return Owned(SS);
+}
+
+static void AdjustAPSInt(llvm::APSInt &Val, unsigned BitWidth, bool IsSigned) {
+ if (Val.getBitWidth() < BitWidth)
+ Val = Val.extend(BitWidth);
+ else if (Val.getBitWidth() > BitWidth)
+ Val = Val.trunc(BitWidth);
+ Val.setIsSigned(IsSigned);
+}
+
+StmtResult
+Sema::ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch,
+ Stmt *BodyStmt) {
+ SwitchStmt *SS = cast<SwitchStmt>(Switch);
+ assert(SS == getCurFunction()->SwitchStack.back() &&
+ "switch stack missing push/pop!");
+
+ SS->setBody(BodyStmt, SwitchLoc);
+ getCurFunction()->SwitchStack.pop_back();
+
+ Expr *CondExpr = SS->getCond();
+ if (!CondExpr) return StmtError();
+
+ QualType CondType = CondExpr->getType();
+
+ Expr *CondExprBeforePromotion = CondExpr;
+ QualType CondTypeBeforePromotion =
+ GetTypeBeforeIntegralPromotion(CondExprBeforePromotion);
+
+ // C++ 6.4.2.p2:
+ // Integral promotions are performed (on the switch condition).
+ //
+ // A case value unrepresentable by the original switch condition
+ // type (before the promotion) doesn't make sense, even when it can
+ // be represented by the promoted type. Therefore we need to find
+ // the pre-promotion type of the switch condition.
+ if (!CondExpr->isTypeDependent()) {
+ // We have already converted the expression to an integral or enumeration
+ // type, when we started the switch statement. If we don't have an
+ // appropriate type now, just return an error.
+ if (!CondType->isIntegralOrEnumerationType())
+ return StmtError();
+
+ if (CondExpr->isKnownToHaveBooleanValue()) {
+ // switch(bool_expr) {...} is often a programmer error, e.g.
+ // switch(n && mask) { ... } // Doh - should be "n & mask".
+ // One can always use an if statement instead of switch(bool_expr).
+ Diag(SwitchLoc, diag::warn_bool_switch_condition)
+ << CondExpr->getSourceRange();
+ }
+ }
+
+ // Get the bitwidth of the switched-on value before promotions. We must
+ // convert the integer case values to this width before comparison.
+ bool HasDependentValue
+ = CondExpr->isTypeDependent() || CondExpr->isValueDependent();
+ unsigned CondWidth
+ = HasDependentValue ? 0 : Context.getIntWidth(CondTypeBeforePromotion);
+ bool CondIsSigned
+ = CondTypeBeforePromotion->isSignedIntegerOrEnumerationType();
+
+ // Accumulate all of the case values in a vector so that we can sort them
+ // and detect duplicates. This vector contains the APInt for the case after
+ // it has been converted to the condition type.
+ typedef SmallVector<std::pair<llvm::APSInt, CaseStmt*>, 64> CaseValsTy;
+ CaseValsTy CaseVals;
+
+ // Keep track of any GNU case ranges we see. The APSInt is the low value.
+ typedef std::vector<std::pair<llvm::APSInt, CaseStmt*> > CaseRangesTy;
+ CaseRangesTy CaseRanges;
+
+ DefaultStmt *TheDefaultStmt = 0;
+
+ bool CaseListIsErroneous = false;
+
+ for (SwitchCase *SC = SS->getSwitchCaseList(); SC && !HasDependentValue;
+ SC = SC->getNextSwitchCase()) {
+
+ if (DefaultStmt *DS = dyn_cast<DefaultStmt>(SC)) {
+ if (TheDefaultStmt) {
+ Diag(DS->getDefaultLoc(), diag::err_multiple_default_labels_defined);
+ Diag(TheDefaultStmt->getDefaultLoc(), diag::note_duplicate_case_prev);
+
+ // FIXME: Remove the default statement from the switch block so that
+ // we'll return a valid AST. This requires recursing down the AST and
+ // finding it, not something we are set up to do right now. For now,
+ // just lop the entire switch stmt out of the AST.
+ CaseListIsErroneous = true;
+ }
+ TheDefaultStmt = DS;
+
+ } else {
+ CaseStmt *CS = cast<CaseStmt>(SC);
+
+ Expr *Lo = CS->getLHS();
+
+ if (Lo->isTypeDependent() || Lo->isValueDependent()) {
+ HasDependentValue = true;
+ break;
+ }
+
+ llvm::APSInt LoVal;
+
+ if (getLangOpts().CPlusPlus0x) {
+ // C++11 [stmt.switch]p2: the constant-expression shall be a converted
+ // constant expression of the promoted type of the switch condition.
+ ExprResult ConvLo =
+ CheckConvertedConstantExpression(Lo, CondType, LoVal, CCEK_CaseValue);
+ if (ConvLo.isInvalid()) {
+ CaseListIsErroneous = true;
+ continue;
+ }
+ Lo = ConvLo.take();
+ } else {
+ // We already verified that the expression has a i-c-e value (C99
+ // 6.8.4.2p3) - get that value now.
+ LoVal = Lo->EvaluateKnownConstInt(Context);
+
+ // If the LHS is not the same type as the condition, insert an implicit
+ // cast.
+ Lo = DefaultLvalueConversion(Lo).take();
+ Lo = ImpCastExprToType(Lo, CondType, CK_IntegralCast).take();
+ }
+
+ // Convert the value to the same width/sign as the condition had prior to
+ // integral promotions.
+ //
+ // FIXME: This causes us to reject valid code:
+ // switch ((char)c) { case 256: case 0: return 0; }
+ // Here we claim there is a duplicated condition value, but there is not.
+ ConvertIntegerToTypeWarnOnOverflow(LoVal, CondWidth, CondIsSigned,
+ Lo->getLocStart(),
+ diag::warn_case_value_overflow);
+
+ CS->setLHS(Lo);
+
+ // If this is a case range, remember it in CaseRanges, otherwise CaseVals.
+ if (CS->getRHS()) {
+ if (CS->getRHS()->isTypeDependent() ||
+ CS->getRHS()->isValueDependent()) {
+ HasDependentValue = true;
+ break;
+ }
+ CaseRanges.push_back(std::make_pair(LoVal, CS));
+ } else
+ CaseVals.push_back(std::make_pair(LoVal, CS));
+ }
+ }
+
+ if (!HasDependentValue) {
+ // If we don't have a default statement, check whether the
+ // condition is constant.
+ llvm::APSInt ConstantCondValue;
+ bool HasConstantCond = false;
+ if (!HasDependentValue && !TheDefaultStmt) {
+ HasConstantCond
+ = CondExprBeforePromotion->EvaluateAsInt(ConstantCondValue, Context,
+ Expr::SE_AllowSideEffects);
+ assert(!HasConstantCond ||
+ (ConstantCondValue.getBitWidth() == CondWidth &&
+ ConstantCondValue.isSigned() == CondIsSigned));
+ }
+ bool ShouldCheckConstantCond = HasConstantCond;
+
+ // Sort all the scalar case values so we can easily detect duplicates.
+ std::stable_sort(CaseVals.begin(), CaseVals.end(), CmpCaseVals);
+
+ if (!CaseVals.empty()) {
+ for (unsigned i = 0, e = CaseVals.size(); i != e; ++i) {
+ if (ShouldCheckConstantCond &&
+ CaseVals[i].first == ConstantCondValue)
+ ShouldCheckConstantCond = false;
+
+ if (i != 0 && CaseVals[i].first == CaseVals[i-1].first) {
+ // If we have a duplicate, report it.
+ Diag(CaseVals[i].second->getLHS()->getLocStart(),
+ diag::err_duplicate_case) << CaseVals[i].first.toString(10);
+ Diag(CaseVals[i-1].second->getLHS()->getLocStart(),
+ diag::note_duplicate_case_prev);
+ // FIXME: We really want to remove the bogus case stmt from the
+ // substmt, but we have no way to do this right now.
+ CaseListIsErroneous = true;
+ }
+ }
+ }
+
+ // Detect duplicate case ranges, which usually don't exist at all in
+ // the first place.
+ if (!CaseRanges.empty()) {
+ // Sort all the case ranges by their low value so we can easily detect
+ // overlaps between ranges.
+ std::stable_sort(CaseRanges.begin(), CaseRanges.end());
+
+ // Scan the ranges, computing the high values and removing empty ranges.
+ std::vector<llvm::APSInt> HiVals;
+ for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) {
+ llvm::APSInt &LoVal = CaseRanges[i].first;
+ CaseStmt *CR = CaseRanges[i].second;
+ Expr *Hi = CR->getRHS();
+ llvm::APSInt HiVal;
+
+ if (getLangOpts().CPlusPlus0x) {
+ // C++11 [stmt.switch]p2: the constant-expression shall be a converted
+ // constant expression of the promoted type of the switch condition.
+ ExprResult ConvHi =
+ CheckConvertedConstantExpression(Hi, CondType, HiVal,
+ CCEK_CaseValue);
+ if (ConvHi.isInvalid()) {
+ CaseListIsErroneous = true;
+ continue;
+ }
+ Hi = ConvHi.take();
+ } else {
+ HiVal = Hi->EvaluateKnownConstInt(Context);
+
+ // If the RHS is not the same type as the condition, insert an
+ // implicit cast.
+ Hi = DefaultLvalueConversion(Hi).take();
+ Hi = ImpCastExprToType(Hi, CondType, CK_IntegralCast).take();
+ }
+
+ // Convert the value to the same width/sign as the condition.
+ ConvertIntegerToTypeWarnOnOverflow(HiVal, CondWidth, CondIsSigned,
+ Hi->getLocStart(),
+ diag::warn_case_value_overflow);
+
+ CR->setRHS(Hi);
+
+ // If the low value is bigger than the high value, the case is empty.
+ if (LoVal > HiVal) {
+ Diag(CR->getLHS()->getLocStart(), diag::warn_case_empty_range)
+ << SourceRange(CR->getLHS()->getLocStart(),
+ Hi->getLocEnd());
+ CaseRanges.erase(CaseRanges.begin()+i);
+ --i, --e;
+ continue;
+ }
+
+ if (ShouldCheckConstantCond &&
+ LoVal <= ConstantCondValue &&
+ ConstantCondValue <= HiVal)
+ ShouldCheckConstantCond = false;
+
+ HiVals.push_back(HiVal);
+ }
+
+ // Rescan the ranges, looking for overlap with singleton values and other
+ // ranges. Since the range list is sorted, we only need to compare case
+ // ranges with their neighbors.
+ for (unsigned i = 0, e = CaseRanges.size(); i != e; ++i) {
+ llvm::APSInt &CRLo = CaseRanges[i].first;
+ llvm::APSInt &CRHi = HiVals[i];
+ CaseStmt *CR = CaseRanges[i].second;
+
+ // Check to see whether the case range overlaps with any
+ // singleton cases.
+ CaseStmt *OverlapStmt = 0;
+ llvm::APSInt OverlapVal(32);
+
+ // Find the smallest value >= the lower bound. If I is in the
+ // case range, then we have overlap.
+ CaseValsTy::iterator I = std::lower_bound(CaseVals.begin(),
+ CaseVals.end(), CRLo,
+ CaseCompareFunctor());
+ if (I != CaseVals.end() && I->first < CRHi) {
+ OverlapVal = I->first; // Found overlap with scalar.
+ OverlapStmt = I->second;
+ }
+
+ // Find the smallest value bigger than the upper bound.
+ I = std::upper_bound(I, CaseVals.end(), CRHi, CaseCompareFunctor());
+ if (I != CaseVals.begin() && (I-1)->first >= CRLo) {
+ OverlapVal = (I-1)->first; // Found overlap with scalar.
+ OverlapStmt = (I-1)->second;
+ }
+
+ // Check to see if this case stmt overlaps with the subsequent
+ // case range.
+ if (i && CRLo <= HiVals[i-1]) {
+ OverlapVal = HiVals[i-1]; // Found overlap with range.
+ OverlapStmt = CaseRanges[i-1].second;
+ }
+
+ if (OverlapStmt) {
+ // If we have a duplicate, report it.
+ Diag(CR->getLHS()->getLocStart(), diag::err_duplicate_case)
+ << OverlapVal.toString(10);
+ Diag(OverlapStmt->getLHS()->getLocStart(),
+ diag::note_duplicate_case_prev);
+ // FIXME: We really want to remove the bogus case stmt from the
+ // substmt, but we have no way to do this right now.
+ CaseListIsErroneous = true;
+ }
+ }
+ }
+
+ // Complain if we have a constant condition and we didn't find a match.
+ if (!CaseListIsErroneous && ShouldCheckConstantCond) {
+ // TODO: it would be nice if we printed enums as enums, chars as
+ // chars, etc.
+ Diag(CondExpr->getExprLoc(), diag::warn_missing_case_for_condition)
+ << ConstantCondValue.toString(10)
+ << CondExpr->getSourceRange();
+ }
+
+ // Check to see if switch is over an Enum and handles all of its
+ // values. We only issue a warning if there is not 'default:', but
+ // we still do the analysis to preserve this information in the AST
+ // (which can be used by flow-based analyes).
+ //
+ const EnumType *ET = CondTypeBeforePromotion->getAs<EnumType>();
+
+ // If switch has default case, then ignore it.
+ if (!CaseListIsErroneous && !HasConstantCond && ET) {
+ const EnumDecl *ED = ET->getDecl();
+ typedef SmallVector<std::pair<llvm::APSInt, EnumConstantDecl*>, 64>
+ EnumValsTy;
+ EnumValsTy EnumVals;
+
+ // Gather all enum values, set their type and sort them,
+ // allowing easier comparison with CaseVals.
+ for (EnumDecl::enumerator_iterator EDI = ED->enumerator_begin();
+ EDI != ED->enumerator_end(); ++EDI) {
+ llvm::APSInt Val = EDI->getInitVal();
+ AdjustAPSInt(Val, CondWidth, CondIsSigned);
+ EnumVals.push_back(std::make_pair(Val, *EDI));
+ }
+ std::stable_sort(EnumVals.begin(), EnumVals.end(), CmpEnumVals);
+ EnumValsTy::iterator EIend =
+ std::unique(EnumVals.begin(), EnumVals.end(), EqEnumVals);
+
+ // See which case values aren't in enum.
+ EnumValsTy::const_iterator EI = EnumVals.begin();
+ for (CaseValsTy::const_iterator CI = CaseVals.begin();
+ CI != CaseVals.end(); CI++) {
+ while (EI != EIend && EI->first < CI->first)
+ EI++;
+ if (EI == EIend || EI->first > CI->first)
+ Diag(CI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
+ }
+ // See which of case ranges aren't in enum
+ EI = EnumVals.begin();
+ for (CaseRangesTy::const_iterator RI = CaseRanges.begin();
+ RI != CaseRanges.end() && EI != EIend; RI++) {
+ while (EI != EIend && EI->first < RI->first)
+ EI++;
+
+ if (EI == EIend || EI->first != RI->first) {
+ Diag(RI->second->getLHS()->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
+ }
+
+ llvm::APSInt Hi =
+ RI->second->getRHS()->EvaluateKnownConstInt(Context);
+ AdjustAPSInt(Hi, CondWidth, CondIsSigned);
+ while (EI != EIend && EI->first < Hi)
+ EI++;
+ if (EI == EIend || EI->first != Hi)
+ Diag(RI->second->getRHS()->getExprLoc(), diag::warn_not_in_enum)
+ << CondTypeBeforePromotion;
+ }
+
+ // Check which enum vals aren't in switch
+ CaseValsTy::const_iterator CI = CaseVals.begin();
+ CaseRangesTy::const_iterator RI = CaseRanges.begin();
+ bool hasCasesNotInSwitch = false;
+
+ SmallVector<DeclarationName,8> UnhandledNames;
+
+ for (EI = EnumVals.begin(); EI != EIend; EI++){
+ // Drop unneeded case values
+ llvm::APSInt CIVal;
+ while (CI != CaseVals.end() && CI->first < EI->first)
+ CI++;
+
+ if (CI != CaseVals.end() && CI->first == EI->first)
+ continue;
+
+ // Drop unneeded case ranges
+ for (; RI != CaseRanges.end(); RI++) {
+ llvm::APSInt Hi =
+ RI->second->getRHS()->EvaluateKnownConstInt(Context);
+ AdjustAPSInt(Hi, CondWidth, CondIsSigned);
+ if (EI->first <= Hi)
+ break;
+ }
+
+ if (RI == CaseRanges.end() || EI->first < RI->first) {
+ hasCasesNotInSwitch = true;
+ UnhandledNames.push_back(EI->second->getDeclName());
+ }
+ }
+
+ if (TheDefaultStmt && UnhandledNames.empty())
+ Diag(TheDefaultStmt->getDefaultLoc(), diag::warn_unreachable_default);
+
+ // Produce a nice diagnostic if multiple values aren't handled.
+ switch (UnhandledNames.size()) {
+ case 0: break;
+ case 1:
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_case1 : diag::warn_missing_case1)
+ << UnhandledNames[0];
+ break;
+ case 2:
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_case2 : diag::warn_missing_case2)
+ << UnhandledNames[0] << UnhandledNames[1];
+ break;
+ case 3:
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_case3 : diag::warn_missing_case3)
+ << UnhandledNames[0] << UnhandledNames[1] << UnhandledNames[2];
+ break;
+ default:
+ Diag(CondExpr->getExprLoc(), TheDefaultStmt
+ ? diag::warn_def_missing_cases : diag::warn_missing_cases)
+ << (unsigned)UnhandledNames.size()
+ << UnhandledNames[0] << UnhandledNames[1] << UnhandledNames[2];
+ break;
+ }
+
+ if (!hasCasesNotInSwitch)
+ SS->setAllEnumCasesCovered();
+ }
+ }
+
+ DiagnoseEmptyStmtBody(CondExpr->getLocEnd(), BodyStmt,
+ diag::warn_empty_switch_body);
+
+ // FIXME: If the case list was broken is some way, we don't have a good system
+ // to patch it up. Instead, just return the whole substmt as broken.
+ if (CaseListIsErroneous)
+ return StmtError();
+
+ return Owned(SS);
+}
+
+StmtResult
+Sema::ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond,
+ Decl *CondVar, Stmt *Body) {
+ ExprResult CondResult(Cond.release());
+
+ VarDecl *ConditionVar = 0;
+ if (CondVar) {
+ ConditionVar = cast<VarDecl>(CondVar);
+ CondResult = CheckConditionVariable(ConditionVar, WhileLoc, true);
+ if (CondResult.isInvalid())
+ return StmtError();
+ }
+ Expr *ConditionExpr = CondResult.take();
+ if (!ConditionExpr)
+ return StmtError();
+
+ DiagnoseUnusedExprResult(Body);
+
+ if (isa<NullStmt>(Body))
+ getCurCompoundScope().setHasEmptyLoopBodies();
+
+ return Owned(new (Context) WhileStmt(Context, ConditionVar, ConditionExpr,
+ Body, WhileLoc));
+}
+
+StmtResult
+Sema::ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
+ SourceLocation WhileLoc, SourceLocation CondLParen,
+ Expr *Cond, SourceLocation CondRParen) {
+ assert(Cond && "ActOnDoStmt(): missing expression");
+
+ ExprResult CondResult = CheckBooleanCondition(Cond, DoLoc);
+ if (CondResult.isInvalid() || CondResult.isInvalid())
+ return StmtError();
+ Cond = CondResult.take();
+
+ CheckImplicitConversions(Cond, DoLoc);
+ CondResult = MaybeCreateExprWithCleanups(Cond);
+ if (CondResult.isInvalid())
+ return StmtError();
+ Cond = CondResult.take();
+
+ DiagnoseUnusedExprResult(Body);
+
+ return Owned(new (Context) DoStmt(Body, Cond, DoLoc, WhileLoc, CondRParen));
+}
+
+StmtResult
+Sema::ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
+ Stmt *First, FullExprArg second, Decl *secondVar,
+ FullExprArg third,
+ SourceLocation RParenLoc, Stmt *Body) {
+ if (!getLangOpts().CPlusPlus) {
+ if (DeclStmt *DS = dyn_cast_or_null<DeclStmt>(First)) {
+ // C99 6.8.5p3: The declaration part of a 'for' statement shall only
+ // declare identifiers for objects having storage class 'auto' or
+ // 'register'.
+ for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
+ DI!=DE; ++DI) {
+ VarDecl *VD = dyn_cast<VarDecl>(*DI);
+ if (VD && VD->isLocalVarDecl() && !VD->hasLocalStorage())
+ VD = 0;
+ if (VD == 0)
+ Diag((*DI)->getLocation(), diag::err_non_variable_decl_in_for);
+ // FIXME: mark decl erroneous!
+ }
+ }
+ }
+
+ ExprResult SecondResult(second.release());
+ VarDecl *ConditionVar = 0;
+ if (secondVar) {
+ ConditionVar = cast<VarDecl>(secondVar);
+ SecondResult = CheckConditionVariable(ConditionVar, ForLoc, true);
+ if (SecondResult.isInvalid())
+ return StmtError();
+ }
+
+ Expr *Third = third.release().takeAs<Expr>();
+
+ DiagnoseUnusedExprResult(First);
+ DiagnoseUnusedExprResult(Third);
+ DiagnoseUnusedExprResult(Body);
+
+ if (isa<NullStmt>(Body))
+ getCurCompoundScope().setHasEmptyLoopBodies();
+
+ return Owned(new (Context) ForStmt(Context, First,
+ SecondResult.take(), ConditionVar,
+ Third, Body, ForLoc, LParenLoc,
+ RParenLoc));
+}
+
+/// In an Objective C collection iteration statement:
+/// for (x in y)
+/// x can be an arbitrary l-value expression. Bind it up as a
+/// full-expression.
+StmtResult Sema::ActOnForEachLValueExpr(Expr *E) {
+ // Reduce placeholder expressions here. Note that this rejects the
+ // use of pseudo-object l-values in this position.
+ ExprResult result = CheckPlaceholderExpr(E);
+ if (result.isInvalid()) return StmtError();
+ E = result.take();
+
+ CheckImplicitConversions(E);
+
+ result = MaybeCreateExprWithCleanups(E);
+ if (result.isInvalid()) return StmtError();
+
+ return Owned(static_cast<Stmt*>(result.take()));
+}
+
+ExprResult
+Sema::ActOnObjCForCollectionOperand(SourceLocation forLoc, Expr *collection) {
+ assert(collection);
+
+ // Bail out early if we've got a type-dependent expression.
+ if (collection->isTypeDependent()) return Owned(collection);
+
+ // Perform normal l-value conversion.
+ ExprResult result = DefaultFunctionArrayLvalueConversion(collection);
+ if (result.isInvalid())
+ return ExprError();
+ collection = result.take();
+
+ // The operand needs to have object-pointer type.
+ // TODO: should we do a contextual conversion?
+ const ObjCObjectPointerType *pointerType =
+ collection->getType()->getAs<ObjCObjectPointerType>();
+ if (!pointerType)
+ return Diag(forLoc, diag::err_collection_expr_type)
+ << collection->getType() << collection->getSourceRange();
+
+ // Check that the operand provides
+ // - countByEnumeratingWithState:objects:count:
+ const ObjCObjectType *objectType = pointerType->getObjectType();
+ ObjCInterfaceDecl *iface = objectType->getInterface();
+
+ // If we have a forward-declared type, we can't do this check.
+ // Under ARC, it is an error not to have a forward-declared class.
+ if (iface &&
+ RequireCompleteType(forLoc, QualType(objectType, 0),
+ getLangOpts().ObjCAutoRefCount
+ ? PDiag(diag::err_arc_collection_forward)
+ << collection->getSourceRange()
+ : PDiag(0))) {
+ // Otherwise, if we have any useful type information, check that
+ // the type declares the appropriate method.
+ } else if (iface || !objectType->qual_empty()) {
+ IdentifierInfo *selectorIdents[] = {
+ &Context.Idents.get("countByEnumeratingWithState"),
+ &Context.Idents.get("objects"),
+ &Context.Idents.get("count")
+ };
+ Selector selector = Context.Selectors.getSelector(3, &selectorIdents[0]);
+
+ ObjCMethodDecl *method = 0;
+
+ // If there's an interface, look in both the public and private APIs.
+ if (iface) {
+ method = iface->lookupInstanceMethod(selector);
+ if (!method) method = LookupPrivateInstanceMethod(selector, iface);
+ }
+
+ // Also check protocol qualifiers.
+ if (!method)
+ method = LookupMethodInQualifiedType(selector, pointerType,
+ /*instance*/ true);
+
+ // If we didn't find it anywhere, give up.
+ if (!method) {
+ Diag(forLoc, diag::warn_collection_expr_type)
+ << collection->getType() << selector << collection->getSourceRange();
+ }
+
+ // TODO: check for an incompatible signature?
+ }
+
+ // Wrap up any cleanups in the expression.
+ return Owned(MaybeCreateExprWithCleanups(collection));
+}
+
+StmtResult
+Sema::ActOnObjCForCollectionStmt(SourceLocation ForLoc,
+ SourceLocation LParenLoc,
+ Stmt *First, Expr *Second,
+ SourceLocation RParenLoc, Stmt *Body) {
+ if (First) {
+ QualType FirstType;
+ if (DeclStmt *DS = dyn_cast<DeclStmt>(First)) {
+ if (!DS->isSingleDecl())
+ return StmtError(Diag((*DS->decl_begin())->getLocation(),
+ diag::err_toomany_element_decls));
+
+ VarDecl *D = cast<VarDecl>(DS->getSingleDecl());
+ FirstType = D->getType();
+ // C99 6.8.5p3: The declaration part of a 'for' statement shall only
+ // declare identifiers for objects having storage class 'auto' or
+ // 'register'.
+ if (!D->hasLocalStorage())
+ return StmtError(Diag(D->getLocation(),
+ diag::err_non_variable_decl_in_for));
+ } else {
+ Expr *FirstE = cast<Expr>(First);
+ if (!FirstE->isTypeDependent() && !FirstE->isLValue())
+ return StmtError(Diag(First->getLocStart(),
+ diag::err_selector_element_not_lvalue)
+ << First->getSourceRange());
+
+ FirstType = static_cast<Expr*>(First)->getType();
+ }
+ if (!FirstType->isDependentType() &&
+ !FirstType->isObjCObjectPointerType() &&
+ !FirstType->isBlockPointerType())
+ Diag(ForLoc, diag::err_selector_element_type)
+ << FirstType << First->getSourceRange();
+ }
+
+ return Owned(new (Context) ObjCForCollectionStmt(First, Second, Body,
+ ForLoc, RParenLoc));
+}
+
+namespace {
+
+enum BeginEndFunction {
+ BEF_begin,
+ BEF_end
+};
+
+/// Build a variable declaration for a for-range statement.
+static VarDecl *BuildForRangeVarDecl(Sema &SemaRef, SourceLocation Loc,
+ QualType Type, const char *Name) {
+ DeclContext *DC = SemaRef.CurContext;
+ IdentifierInfo *II = &SemaRef.PP.getIdentifierTable().get(Name);
+ TypeSourceInfo *TInfo = SemaRef.Context.getTrivialTypeSourceInfo(Type, Loc);
+ VarDecl *Decl = VarDecl::Create(SemaRef.Context, DC, Loc, Loc, II, Type,
+ TInfo, SC_Auto, SC_None);
+ Decl->setImplicit();
+ return Decl;
+}
+
+/// Finish building a variable declaration for a for-range statement.
+/// \return true if an error occurs.
+static bool FinishForRangeVarDecl(Sema &SemaRef, VarDecl *Decl, Expr *Init,
+ SourceLocation Loc, int diag) {
+ // Deduce the type for the iterator variable now rather than leaving it to
+ // AddInitializerToDecl, so we can produce a more suitable diagnostic.
+ TypeSourceInfo *InitTSI = 0;
+ if ((!isa<InitListExpr>(Init) && Init->getType()->isVoidType()) ||
+ SemaRef.DeduceAutoType(Decl->getTypeSourceInfo(), Init, InitTSI) ==
+ Sema::DAR_Failed)
+ SemaRef.Diag(Loc, diag) << Init->getType();
+ if (!InitTSI) {
+ Decl->setInvalidDecl();
+ return true;
+ }
+ Decl->setTypeSourceInfo(InitTSI);
+ Decl->setType(InitTSI->getType());
+
+ // In ARC, infer lifetime.
+ // FIXME: ARC may want to turn this into 'const __unsafe_unretained' if
+ // we're doing the equivalent of fast iteration.
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ SemaRef.inferObjCARCLifetime(Decl))
+ Decl->setInvalidDecl();
+
+ SemaRef.AddInitializerToDecl(Decl, Init, /*DirectInit=*/false,
+ /*TypeMayContainAuto=*/false);
+ SemaRef.FinalizeDeclaration(Decl);
+ SemaRef.CurContext->addHiddenDecl(Decl);
+ return false;
+}
+
+/// Produce a note indicating which begin/end function was implicitly called
+/// by a C++0x for-range statement. This is often not obvious from the code,
+/// nor from the diagnostics produced when analysing the implicit expressions
+/// required in a for-range statement.
+void NoteForRangeBeginEndFunction(Sema &SemaRef, Expr *E,
+ BeginEndFunction BEF) {
+ CallExpr *CE = dyn_cast<CallExpr>(E);
+ if (!CE)
+ return;
+ FunctionDecl *D = dyn_cast<FunctionDecl>(CE->getCalleeDecl());
+ if (!D)
+ return;
+ SourceLocation Loc = D->getLocation();
+
+ std::string Description;
+ bool IsTemplate = false;
+ if (FunctionTemplateDecl *FunTmpl = D->getPrimaryTemplate()) {
+ Description = SemaRef.getTemplateArgumentBindingsText(
+ FunTmpl->getTemplateParameters(), *D->getTemplateSpecializationArgs());
+ IsTemplate = true;
+ }
+
+ SemaRef.Diag(Loc, diag::note_for_range_begin_end)
+ << BEF << IsTemplate << Description << E->getType();
+}
+
+/// Build a call to 'begin' or 'end' for a C++0x for-range statement. If the
+/// given LookupResult is non-empty, it is assumed to describe a member which
+/// will be invoked. Otherwise, the function will be found via argument
+/// dependent lookup.
+static ExprResult BuildForRangeBeginEndCall(Sema &SemaRef, Scope *S,
+ SourceLocation Loc,
+ VarDecl *Decl,
+ BeginEndFunction BEF,
+ const DeclarationNameInfo &NameInfo,
+ LookupResult &MemberLookup,
+ Expr *Range) {
+ ExprResult CallExpr;
+ if (!MemberLookup.empty()) {
+ ExprResult MemberRef =
+ SemaRef.BuildMemberReferenceExpr(Range, Range->getType(), Loc,
+ /*IsPtr=*/false, CXXScopeSpec(),
+ /*TemplateKWLoc=*/SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ MemberLookup,
+ /*TemplateArgs=*/0);
+ if (MemberRef.isInvalid())
+ return ExprError();
+ CallExpr = SemaRef.ActOnCallExpr(S, MemberRef.get(), Loc, MultiExprArg(),
+ Loc, 0);
+ if (CallExpr.isInvalid())
+ return ExprError();
+ } else {
+ UnresolvedSet<0> FoundNames;
+ // C++0x [stmt.ranged]p1: For the purposes of this name lookup, namespace
+ // std is an associated namespace.
+ UnresolvedLookupExpr *Fn =
+ UnresolvedLookupExpr::Create(SemaRef.Context, /*NamingClass=*/0,
+ NestedNameSpecifierLoc(), NameInfo,
+ /*NeedsADL=*/true, /*Overloaded=*/false,
+ FoundNames.begin(), FoundNames.end(),
+ /*LookInStdNamespace=*/true);
+ CallExpr = SemaRef.BuildOverloadedCallExpr(S, Fn, Fn, Loc, &Range, 1, Loc,
+ 0, /*AllowTypoCorrection=*/false);
+ if (CallExpr.isInvalid()) {
+ SemaRef.Diag(Range->getLocStart(), diag::note_for_range_type)
+ << Range->getType();
+ return ExprError();
+ }
+ }
+ if (FinishForRangeVarDecl(SemaRef, Decl, CallExpr.get(), Loc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(SemaRef, CallExpr.get(), BEF);
+ return ExprError();
+ }
+ return CallExpr;
+}
+
+}
+
+/// ActOnCXXForRangeStmt - Check and build a C++0x for-range statement.
+///
+/// C++0x [stmt.ranged]:
+/// A range-based for statement is equivalent to
+///
+/// {
+/// auto && __range = range-init;
+/// for ( auto __begin = begin-expr,
+/// __end = end-expr;
+/// __begin != __end;
+/// ++__begin ) {
+/// for-range-declaration = *__begin;
+/// statement
+/// }
+/// }
+///
+/// The body of the loop is not available yet, since it cannot be analysed until
+/// we have determined the type of the for-range-declaration.
+StmtResult
+Sema::ActOnCXXForRangeStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
+ Stmt *First, SourceLocation ColonLoc, Expr *Range,
+ SourceLocation RParenLoc) {
+ if (!First || !Range)
+ return StmtError();
+
+ DeclStmt *DS = dyn_cast<DeclStmt>(First);
+ assert(DS && "first part of for range not a decl stmt");
+
+ if (!DS->isSingleDecl()) {
+ Diag(DS->getStartLoc(), diag::err_type_defined_in_for_range);
+ return StmtError();
+ }
+ if (DS->getSingleDecl()->isInvalidDecl())
+ return StmtError();
+
+ if (DiagnoseUnexpandedParameterPack(Range, UPPC_Expression))
+ return StmtError();
+
+ // Build auto && __range = range-init
+ SourceLocation RangeLoc = Range->getLocStart();
+ VarDecl *RangeVar = BuildForRangeVarDecl(*this, RangeLoc,
+ Context.getAutoRRefDeductType(),
+ "__range");
+ if (FinishForRangeVarDecl(*this, RangeVar, Range, RangeLoc,
+ diag::err_for_range_deduction_failure))
+ return StmtError();
+
+ // Claim the type doesn't contain auto: we've already done the checking.
+ DeclGroupPtrTy RangeGroup =
+ BuildDeclaratorGroup((Decl**)&RangeVar, 1, /*TypeMayContainAuto=*/false);
+ StmtResult RangeDecl = ActOnDeclStmt(RangeGroup, RangeLoc, RangeLoc);
+ if (RangeDecl.isInvalid())
+ return StmtError();
+
+ return BuildCXXForRangeStmt(ForLoc, ColonLoc, RangeDecl.get(),
+ /*BeginEndDecl=*/0, /*Cond=*/0, /*Inc=*/0, DS,
+ RParenLoc);
+}
+
+/// BuildCXXForRangeStmt - Build or instantiate a C++0x for-range statement.
+StmtResult
+Sema::BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc,
+ Stmt *RangeDecl, Stmt *BeginEnd, Expr *Cond,
+ Expr *Inc, Stmt *LoopVarDecl,
+ SourceLocation RParenLoc) {
+ Scope *S = getCurScope();
+
+ DeclStmt *RangeDS = cast<DeclStmt>(RangeDecl);
+ VarDecl *RangeVar = cast<VarDecl>(RangeDS->getSingleDecl());
+ QualType RangeVarType = RangeVar->getType();
+
+ DeclStmt *LoopVarDS = cast<DeclStmt>(LoopVarDecl);
+ VarDecl *LoopVar = cast<VarDecl>(LoopVarDS->getSingleDecl());
+
+ StmtResult BeginEndDecl = BeginEnd;
+ ExprResult NotEqExpr = Cond, IncrExpr = Inc;
+
+ if (!BeginEndDecl.get() && !RangeVarType->isDependentType()) {
+ SourceLocation RangeLoc = RangeVar->getLocation();
+
+ const QualType RangeVarNonRefType = RangeVarType.getNonReferenceType();
+
+ ExprResult BeginRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType,
+ VK_LValue, ColonLoc);
+ if (BeginRangeRef.isInvalid())
+ return StmtError();
+
+ ExprResult EndRangeRef = BuildDeclRefExpr(RangeVar, RangeVarNonRefType,
+ VK_LValue, ColonLoc);
+ if (EndRangeRef.isInvalid())
+ return StmtError();
+
+ QualType AutoType = Context.getAutoDeductType();
+ Expr *Range = RangeVar->getInit();
+ if (!Range)
+ return StmtError();
+ QualType RangeType = Range->getType();
+
+ if (RequireCompleteType(RangeLoc, RangeType,
+ PDiag(diag::err_for_range_incomplete_type)))
+ return StmtError();
+
+ // Build auto __begin = begin-expr, __end = end-expr.
+ VarDecl *BeginVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType,
+ "__begin");
+ VarDecl *EndVar = BuildForRangeVarDecl(*this, ColonLoc, AutoType,
+ "__end");
+
+ // Build begin-expr and end-expr and attach to __begin and __end variables.
+ ExprResult BeginExpr, EndExpr;
+ if (const ArrayType *UnqAT = RangeType->getAsArrayTypeUnsafe()) {
+ // - if _RangeT is an array type, begin-expr and end-expr are __range and
+ // __range + __bound, respectively, where __bound is the array bound. If
+ // _RangeT is an array of unknown size or an array of incomplete type,
+ // the program is ill-formed;
+
+ // begin-expr is __range.
+ BeginExpr = BeginRangeRef;
+ if (FinishForRangeVarDecl(*this, BeginVar, BeginRangeRef.get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ return StmtError();
+ }
+
+ // Find the array bound.
+ ExprResult BoundExpr;
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(UnqAT))
+ BoundExpr = Owned(IntegerLiteral::Create(Context, CAT->getSize(),
+ Context.getPointerDiffType(),
+ RangeLoc));
+ else if (const VariableArrayType *VAT =
+ dyn_cast<VariableArrayType>(UnqAT))
+ BoundExpr = VAT->getSizeExpr();
+ else {
+ // Can't be a DependentSizedArrayType or an IncompleteArrayType since
+ // UnqAT is not incomplete and Range is not type-dependent.
+ llvm_unreachable("Unexpected array type in for-range");
+ }
+
+ // end-expr is __range + __bound.
+ EndExpr = ActOnBinOp(S, ColonLoc, tok::plus, EndRangeRef.get(),
+ BoundExpr.get());
+ if (EndExpr.isInvalid())
+ return StmtError();
+ if (FinishForRangeVarDecl(*this, EndVar, EndExpr.get(), ColonLoc,
+ diag::err_for_range_iter_deduction_failure)) {
+ NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
+ return StmtError();
+ }
+ } else {
+ DeclarationNameInfo BeginNameInfo(&PP.getIdentifierTable().get("begin"),
+ ColonLoc);
+ DeclarationNameInfo EndNameInfo(&PP.getIdentifierTable().get("end"),
+ ColonLoc);
+
+ LookupResult BeginMemberLookup(*this, BeginNameInfo, LookupMemberName);
+ LookupResult EndMemberLookup(*this, EndNameInfo, LookupMemberName);
+
+ if (CXXRecordDecl *D = RangeType->getAsCXXRecordDecl()) {
+ // - if _RangeT is a class type, the unqualified-ids begin and end are
+ // looked up in the scope of class _RangeT as if by class member access
+ // lookup (3.4.5), and if either (or both) finds at least one
+ // declaration, begin-expr and end-expr are __range.begin() and
+ // __range.end(), respectively;
+ LookupQualifiedName(BeginMemberLookup, D);
+ LookupQualifiedName(EndMemberLookup, D);
+
+ if (BeginMemberLookup.empty() != EndMemberLookup.empty()) {
+ Diag(ColonLoc, diag::err_for_range_member_begin_end_mismatch)
+ << RangeType << BeginMemberLookup.empty();
+ return StmtError();
+ }
+ } else {
+ // - otherwise, begin-expr and end-expr are begin(__range) and
+ // end(__range), respectively, where begin and end are looked up with
+ // argument-dependent lookup (3.4.2). For the purposes of this name
+ // lookup, namespace std is an associated namespace.
+ }
+
+ BeginExpr = BuildForRangeBeginEndCall(*this, S, ColonLoc, BeginVar,
+ BEF_begin, BeginNameInfo,
+ BeginMemberLookup,
+ BeginRangeRef.get());
+ if (BeginExpr.isInvalid())
+ return StmtError();
+
+ EndExpr = BuildForRangeBeginEndCall(*this, S, ColonLoc, EndVar,
+ BEF_end, EndNameInfo,
+ EndMemberLookup, EndRangeRef.get());
+ if (EndExpr.isInvalid())
+ return StmtError();
+ }
+
+ // C++0x [decl.spec.auto]p6: BeginType and EndType must be the same.
+ QualType BeginType = BeginVar->getType(), EndType = EndVar->getType();
+ if (!Context.hasSameType(BeginType, EndType)) {
+ Diag(RangeLoc, diag::err_for_range_begin_end_types_differ)
+ << BeginType << EndType;
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
+ }
+
+ Decl *BeginEndDecls[] = { BeginVar, EndVar };
+ // Claim the type doesn't contain auto: we've already done the checking.
+ DeclGroupPtrTy BeginEndGroup =
+ BuildDeclaratorGroup(BeginEndDecls, 2, /*TypeMayContainAuto=*/false);
+ BeginEndDecl = ActOnDeclStmt(BeginEndGroup, ColonLoc, ColonLoc);
+
+ const QualType BeginRefNonRefType = BeginType.getNonReferenceType();
+ ExprResult BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
+ VK_LValue, ColonLoc);
+ if (BeginRef.isInvalid())
+ return StmtError();
+
+ ExprResult EndRef = BuildDeclRefExpr(EndVar, EndType.getNonReferenceType(),
+ VK_LValue, ColonLoc);
+ if (EndRef.isInvalid())
+ return StmtError();
+
+ // Build and check __begin != __end expression.
+ NotEqExpr = ActOnBinOp(S, ColonLoc, tok::exclaimequal,
+ BeginRef.get(), EndRef.get());
+ NotEqExpr = ActOnBooleanCondition(S, ColonLoc, NotEqExpr.get());
+ NotEqExpr = ActOnFinishFullExpr(NotEqExpr.get());
+ if (NotEqExpr.isInvalid()) {
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ if (!Context.hasSameType(BeginType, EndType))
+ NoteForRangeBeginEndFunction(*this, EndExpr.get(), BEF_end);
+ return StmtError();
+ }
+
+ // Build and check ++__begin expression.
+ BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
+ VK_LValue, ColonLoc);
+ if (BeginRef.isInvalid())
+ return StmtError();
+
+ IncrExpr = ActOnUnaryOp(S, ColonLoc, tok::plusplus, BeginRef.get());
+ IncrExpr = ActOnFinishFullExpr(IncrExpr.get());
+ if (IncrExpr.isInvalid()) {
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ return StmtError();
+ }
+
+ // Build and check *__begin expression.
+ BeginRef = BuildDeclRefExpr(BeginVar, BeginRefNonRefType,
+ VK_LValue, ColonLoc);
+ if (BeginRef.isInvalid())
+ return StmtError();
+
+ ExprResult DerefExpr = ActOnUnaryOp(S, ColonLoc, tok::star, BeginRef.get());
+ if (DerefExpr.isInvalid()) {
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ return StmtError();
+ }
+
+ // Attach *__begin as initializer for VD.
+ if (!LoopVar->isInvalidDecl()) {
+ AddInitializerToDecl(LoopVar, DerefExpr.get(), /*DirectInit=*/false,
+ /*TypeMayContainAuto=*/true);
+ if (LoopVar->isInvalidDecl())
+ NoteForRangeBeginEndFunction(*this, BeginExpr.get(), BEF_begin);
+ }
+ } else {
+ // The range is implicitly used as a placeholder when it is dependent.
+ RangeVar->setUsed();
+ }
+
+ return Owned(new (Context) CXXForRangeStmt(RangeDS,
+ cast_or_null<DeclStmt>(BeginEndDecl.get()),
+ NotEqExpr.take(), IncrExpr.take(),
+ LoopVarDS, /*Body=*/0, ForLoc,
+ ColonLoc, RParenLoc));
+}
+
+/// FinishCXXForRangeStmt - Attach the body to a C++0x for-range statement.
+/// This is a separate step from ActOnCXXForRangeStmt because analysis of the
+/// body cannot be performed until after the type of the range variable is
+/// determined.
+StmtResult Sema::FinishCXXForRangeStmt(Stmt *S, Stmt *B) {
+ if (!S || !B)
+ return StmtError();
+
+ CXXForRangeStmt *ForStmt = cast<CXXForRangeStmt>(S);
+ ForStmt->setBody(B);
+
+ DiagnoseEmptyStmtBody(ForStmt->getRParenLoc(), B,
+ diag::warn_empty_range_based_for_body);
+
+ return S;
+}
+
+StmtResult Sema::ActOnGotoStmt(SourceLocation GotoLoc,
+ SourceLocation LabelLoc,
+ LabelDecl *TheDecl) {
+ getCurFunction()->setHasBranchIntoScope();
+ TheDecl->setUsed();
+ return Owned(new (Context) GotoStmt(TheDecl, GotoLoc, LabelLoc));
+}
+
+StmtResult
+Sema::ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc,
+ Expr *E) {
+ // Convert operand to void*
+ if (!E->isTypeDependent()) {
+ QualType ETy = E->getType();
+ QualType DestTy = Context.getPointerType(Context.VoidTy.withConst());
+ ExprResult ExprRes = Owned(E);
+ AssignConvertType ConvTy =
+ CheckSingleAssignmentConstraints(DestTy, ExprRes);
+ if (ExprRes.isInvalid())
+ return StmtError();
+ E = ExprRes.take();
+ if (DiagnoseAssignmentResult(ConvTy, StarLoc, DestTy, ETy, E, AA_Passing))
+ return StmtError();
+ E = MaybeCreateExprWithCleanups(E);
+ }
+
+ getCurFunction()->setHasIndirectGoto();
+
+ return Owned(new (Context) IndirectGotoStmt(GotoLoc, StarLoc, E));
+}
+
+StmtResult
+Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
+ Scope *S = CurScope->getContinueParent();
+ if (!S) {
+ // C99 6.8.6.2p1: A break shall appear only in or as a loop body.
+ return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop));
+ }
+
+ return Owned(new (Context) ContinueStmt(ContinueLoc));
+}
+
+StmtResult
+Sema::ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope) {
+ Scope *S = CurScope->getBreakParent();
+ if (!S) {
+ // C99 6.8.6.3p1: A break shall appear only in or as a switch/loop body.
+ return StmtError(Diag(BreakLoc, diag::err_break_not_in_loop_or_switch));
+ }
+
+ return Owned(new (Context) BreakStmt(BreakLoc));
+}
+
+/// \brief Determine whether the given expression is a candidate for
+/// copy elision in either a return statement or a throw expression.
+///
+/// \param ReturnType If we're determining the copy elision candidate for
+/// a return statement, this is the return type of the function. If we're
+/// determining the copy elision candidate for a throw expression, this will
+/// be a NULL type.
+///
+/// \param E The expression being returned from the function or block, or
+/// being thrown.
+///
+/// \param AllowFunctionParameter Whether we allow function parameters to
+/// be considered NRVO candidates. C++ prohibits this for NRVO itself, but
+/// we re-use this logic to determine whether we should try to move as part of
+/// a return or throw (which does allow function parameters).
+///
+/// \returns The NRVO candidate variable, if the return statement may use the
+/// NRVO, or NULL if there is no such candidate.
+const VarDecl *Sema::getCopyElisionCandidate(QualType ReturnType,
+ Expr *E,
+ bool AllowFunctionParameter) {
+ QualType ExprType = E->getType();
+ // - in a return statement in a function with ...
+ // ... a class return type ...
+ if (!ReturnType.isNull()) {
+ if (!ReturnType->isRecordType())
+ return 0;
+ // ... the same cv-unqualified type as the function return type ...
+ if (!Context.hasSameUnqualifiedType(ReturnType, ExprType))
+ return 0;
+ }
+
+ // ... the expression is the name of a non-volatile automatic object
+ // (other than a function or catch-clause parameter)) ...
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParens());
+ if (!DR)
+ return 0;
+ const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ if (!VD)
+ return 0;
+
+ // ...object (other than a function or catch-clause parameter)...
+ if (VD->getKind() != Decl::Var &&
+ !(AllowFunctionParameter && VD->getKind() == Decl::ParmVar))
+ return 0;
+ if (VD->isExceptionVariable()) return 0;
+
+ // ...automatic...
+ if (!VD->hasLocalStorage()) return 0;
+
+ // ...non-volatile...
+ if (VD->getType().isVolatileQualified()) return 0;
+ if (VD->getType()->isReferenceType()) return 0;
+
+ // __block variables can't be allocated in a way that permits NRVO.
+ if (VD->hasAttr<BlocksAttr>()) return 0;
+
+ // Variables with higher required alignment than their type's ABI
+ // alignment cannot use NRVO.
+ if (VD->hasAttr<AlignedAttr>() &&
+ Context.getDeclAlign(VD) > Context.getTypeAlignInChars(VD->getType()))
+ return 0;
+
+ return VD;
+}
+
+/// \brief Perform the initialization of a potentially-movable value, which
+/// is the result of return value.
+///
+/// This routine implements C++0x [class.copy]p33, which attempts to treat
+/// returned lvalues as rvalues in certain cases (to prefer move construction),
+/// then falls back to treating them as lvalues if that failed.
+ExprResult
+Sema::PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
+ const VarDecl *NRVOCandidate,
+ QualType ResultType,
+ Expr *Value,
+ bool AllowNRVO) {
+ // C++0x [class.copy]p33:
+ // When the criteria for elision of a copy operation are met or would
+ // be met save for the fact that the source object is a function
+ // parameter, and the object to be copied is designated by an lvalue,
+ // overload resolution to select the constructor for the copy is first
+ // performed as if the object were designated by an rvalue.
+ ExprResult Res = ExprError();
+ if (AllowNRVO &&
+ (NRVOCandidate || getCopyElisionCandidate(ResultType, Value, true))) {
+ ImplicitCastExpr AsRvalue(ImplicitCastExpr::OnStack,
+ Value->getType(), CK_LValueToRValue,
+ Value, VK_XValue);
+
+ Expr *InitExpr = &AsRvalue;
+ InitializationKind Kind
+ = InitializationKind::CreateCopy(Value->getLocStart(),
+ Value->getLocStart());
+ InitializationSequence Seq(*this, Entity, Kind, &InitExpr, 1);
+
+ // [...] If overload resolution fails, or if the type of the first
+ // parameter of the selected constructor is not an rvalue reference
+ // to the object's type (possibly cv-qualified), overload resolution
+ // is performed again, considering the object as an lvalue.
+ if (Seq) {
+ for (InitializationSequence::step_iterator Step = Seq.step_begin(),
+ StepEnd = Seq.step_end();
+ Step != StepEnd; ++Step) {
+ if (Step->Kind != InitializationSequence::SK_ConstructorInitialization)
+ continue;
+
+ CXXConstructorDecl *Constructor
+ = cast<CXXConstructorDecl>(Step->Function.Function);
+
+ const RValueReferenceType *RRefType
+ = Constructor->getParamDecl(0)->getType()
+ ->getAs<RValueReferenceType>();
+
+ // If we don't meet the criteria, break out now.
+ if (!RRefType ||
+ !Context.hasSameUnqualifiedType(RRefType->getPointeeType(),
+ Context.getTypeDeclType(Constructor->getParent())))
+ break;
+
+ // Promote "AsRvalue" to the heap, since we now need this
+ // expression node to persist.
+ Value = ImplicitCastExpr::Create(Context, Value->getType(),
+ CK_LValueToRValue, Value, 0,
+ VK_XValue);
+
+ // Complete type-checking the initialization of the return type
+ // using the constructor we found.
+ Res = Seq.Perform(*this, Entity, Kind, MultiExprArg(&Value, 1));
+ }
+ }
+ }
+
+ // Either we didn't meet the criteria for treating an lvalue as an rvalue,
+ // above, or overload resolution failed. Either way, we need to try
+ // (again) now with the return value expression as written.
+ if (Res.isInvalid())
+ Res = PerformCopyInitialization(Entity, SourceLocation(), Value);
+
+ return Res;
+}
+
+/// ActOnCapScopeReturnStmt - Utility routine to type-check return statements
+/// for capturing scopes.
+///
+StmtResult
+Sema::ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+ // If this is the first return we've seen, infer the return type.
+ // [expr.prim.lambda]p4 in C++11; block literals follow a superset of those
+ // rules which allows multiple return statements.
+ CapturingScopeInfo *CurCap = cast<CapturingScopeInfo>(getCurFunction());
+ if (CurCap->HasImplicitReturnType) {
+ QualType ReturnT;
+ if (RetValExp && !isa<InitListExpr>(RetValExp)) {
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(RetValExp);
+ if (Result.isInvalid())
+ return StmtError();
+ RetValExp = Result.take();
+
+ if (!RetValExp->isTypeDependent())
+ ReturnT = RetValExp->getType();
+ else
+ ReturnT = Context.DependentTy;
+ } else {
+ if (RetValExp) {
+ // C++11 [expr.lambda.prim]p4 bans inferring the result from an
+ // initializer list, because it is not an expression (even
+ // though we represent it as one). We still deduce 'void'.
+ Diag(ReturnLoc, diag::err_lambda_return_init_list)
+ << RetValExp->getSourceRange();
+ }
+
+ ReturnT = Context.VoidTy;
+ }
+ // We require the return types to strictly match here.
+ if (!CurCap->ReturnType.isNull() &&
+ !CurCap->ReturnType->isDependentType() &&
+ !ReturnT->isDependentType() &&
+ !Context.hasSameType(ReturnT, CurCap->ReturnType)) {
+ Diag(ReturnLoc, diag::err_typecheck_missing_return_type_incompatible)
+ << ReturnT << CurCap->ReturnType
+ << (getCurLambda() != 0);
+ return StmtError();
+ }
+ CurCap->ReturnType = ReturnT;
+ }
+ QualType FnRetType = CurCap->ReturnType;
+ assert(!FnRetType.isNull());
+
+ if (BlockScopeInfo *CurBlock = dyn_cast<BlockScopeInfo>(CurCap)) {
+ if (CurBlock->FunctionType->getAs<FunctionType>()->getNoReturnAttr()) {
+ Diag(ReturnLoc, diag::err_noreturn_block_has_return_expr);
+ return StmtError();
+ }
+ } else {
+ LambdaScopeInfo *LSI = cast<LambdaScopeInfo>(CurCap);
+ if (LSI->CallOperator->getType()->getAs<FunctionType>()->getNoReturnAttr()){
+ Diag(ReturnLoc, diag::err_noreturn_lambda_has_return_expr);
+ return StmtError();
+ }
+ }
+
+ // Otherwise, verify that this result type matches the previous one. We are
+ // pickier with blocks than for normal functions because we don't have GCC
+ // compatibility to worry about here.
+ const VarDecl *NRVOCandidate = 0;
+ if (FnRetType->isDependentType()) {
+ // Delay processing for now. TODO: there are lots of dependent
+ // types we can conclusively prove aren't void.
+ } else if (FnRetType->isVoidType()) {
+ if (RetValExp && !isa<InitListExpr>(RetValExp) &&
+ !(getLangOpts().CPlusPlus &&
+ (RetValExp->isTypeDependent() ||
+ RetValExp->getType()->isVoidType()))) {
+ if (!getLangOpts().CPlusPlus &&
+ RetValExp->getType()->isVoidType())
+ Diag(ReturnLoc, diag::ext_return_has_void_expr) << "literal" << 2;
+ else {
+ Diag(ReturnLoc, diag::err_return_block_has_expr);
+ RetValExp = 0;
+ }
+ }
+ } else if (!RetValExp) {
+ return StmtError(Diag(ReturnLoc, diag::err_block_return_missing_expr));
+ } else if (!RetValExp->isTypeDependent()) {
+ // we have a non-void block with an expression, continue checking
+
+ // C99 6.8.6.4p3(136): The return statement is not an assignment. The
+ // overlap restriction of subclause 6.5.16.1 does not apply to the case of
+ // function return.
+
+ // In C++ the return statement is handled via a copy initialization.
+ // the C version of which boils down to CheckSingleAssignmentConstraints.
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
+ FnRetType,
+ NRVOCandidate != 0);
+ ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
+ FnRetType, RetValExp);
+ if (Res.isInvalid()) {
+ // FIXME: Cleanup temporaries here, anyway?
+ return StmtError();
+ }
+ RetValExp = Res.take();
+ CheckReturnStackAddr(RetValExp, FnRetType, ReturnLoc);
+ }
+
+ if (RetValExp) {
+ CheckImplicitConversions(RetValExp, ReturnLoc);
+ RetValExp = MaybeCreateExprWithCleanups(RetValExp);
+ }
+ ReturnStmt *Result = new (Context) ReturnStmt(ReturnLoc, RetValExp,
+ NRVOCandidate);
+
+ // If we need to check for the named return value optimization, save the
+ // return statement in our scope for later processing.
+ if (getLangOpts().CPlusPlus && FnRetType->isRecordType() &&
+ !CurContext->isDependentContext())
+ FunctionScopes.back()->Returns.push_back(Result);
+
+ return Owned(Result);
+}
+
+StmtResult
+Sema::ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp) {
+ // Check for unexpanded parameter packs.
+ if (RetValExp && DiagnoseUnexpandedParameterPack(RetValExp))
+ return StmtError();
+
+ if (isa<CapturingScopeInfo>(getCurFunction()))
+ return ActOnCapScopeReturnStmt(ReturnLoc, RetValExp);
+
+ QualType FnRetType;
+ QualType RelatedRetType;
+ if (const FunctionDecl *FD = getCurFunctionDecl()) {
+ FnRetType = FD->getResultType();
+ if (FD->hasAttr<NoReturnAttr>() ||
+ FD->getType()->getAs<FunctionType>()->getNoReturnAttr())
+ Diag(ReturnLoc, diag::warn_noreturn_function_has_return_expr)
+ << FD->getDeclName();
+ } else if (ObjCMethodDecl *MD = getCurMethodDecl()) {
+ FnRetType = MD->getResultType();
+ if (MD->hasRelatedResultType() && MD->getClassInterface()) {
+ // In the implementation of a method with a related return type, the
+ // type used to type-check the validity of return statements within the
+ // method body is a pointer to the type of the class being implemented.
+ RelatedRetType = Context.getObjCInterfaceType(MD->getClassInterface());
+ RelatedRetType = Context.getObjCObjectPointerType(RelatedRetType);
+ }
+ } else // If we don't have a function/method context, bail.
+ return StmtError();
+
+ ReturnStmt *Result = 0;
+ if (FnRetType->isVoidType()) {
+ if (RetValExp) {
+ if (isa<InitListExpr>(RetValExp)) {
+ // We simply never allow init lists as the return value of void
+ // functions. This is compatible because this was never allowed before,
+ // so there's no legacy code to deal with.
+ NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+ int FunctionKind = 0;
+ if (isa<ObjCMethodDecl>(CurDecl))
+ FunctionKind = 1;
+ else if (isa<CXXConstructorDecl>(CurDecl))
+ FunctionKind = 2;
+ else if (isa<CXXDestructorDecl>(CurDecl))
+ FunctionKind = 3;
+
+ Diag(ReturnLoc, diag::err_return_init_list)
+ << CurDecl->getDeclName() << FunctionKind
+ << RetValExp->getSourceRange();
+
+ // Drop the expression.
+ RetValExp = 0;
+ } else if (!RetValExp->isTypeDependent()) {
+ // C99 6.8.6.4p1 (ext_ since GCC warns)
+ unsigned D = diag::ext_return_has_expr;
+ if (RetValExp->getType()->isVoidType())
+ D = diag::ext_return_has_void_expr;
+ else {
+ ExprResult Result = Owned(RetValExp);
+ Result = IgnoredValueConversions(Result.take());
+ if (Result.isInvalid())
+ return StmtError();
+ RetValExp = Result.take();
+ RetValExp = ImpCastExprToType(RetValExp,
+ Context.VoidTy, CK_ToVoid).take();
+ }
+
+ // return (some void expression); is legal in C++.
+ if (D != diag::ext_return_has_void_expr ||
+ !getLangOpts().CPlusPlus) {
+ NamedDecl *CurDecl = getCurFunctionOrMethodDecl();
+
+ int FunctionKind = 0;
+ if (isa<ObjCMethodDecl>(CurDecl))
+ FunctionKind = 1;
+ else if (isa<CXXConstructorDecl>(CurDecl))
+ FunctionKind = 2;
+ else if (isa<CXXDestructorDecl>(CurDecl))
+ FunctionKind = 3;
+
+ Diag(ReturnLoc, D)
+ << CurDecl->getDeclName() << FunctionKind
+ << RetValExp->getSourceRange();
+ }
+ }
+
+ if (RetValExp) {
+ CheckImplicitConversions(RetValExp, ReturnLoc);
+ RetValExp = MaybeCreateExprWithCleanups(RetValExp);
+ }
+ }
+
+ Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, 0);
+ } else if (!RetValExp && !FnRetType->isDependentType()) {
+ unsigned DiagID = diag::warn_return_missing_expr; // C90 6.6.6.4p4
+ // C99 6.8.6.4p1 (ext_ since GCC warns)
+ if (getLangOpts().C99) DiagID = diag::ext_return_missing_expr;
+
+ if (FunctionDecl *FD = getCurFunctionDecl())
+ Diag(ReturnLoc, DiagID) << FD->getIdentifier() << 0/*fn*/;
+ else
+ Diag(ReturnLoc, DiagID) << getCurMethodDecl()->getDeclName() << 1/*meth*/;
+ Result = new (Context) ReturnStmt(ReturnLoc);
+ } else {
+ const VarDecl *NRVOCandidate = 0;
+ if (!FnRetType->isDependentType() && !RetValExp->isTypeDependent()) {
+ // we have a non-void function with an expression, continue checking
+
+ if (!RelatedRetType.isNull()) {
+ // If we have a related result type, perform an extra conversion here.
+ // FIXME: The diagnostics here don't really describe what is happening.
+ InitializedEntity Entity =
+ InitializedEntity::InitializeTemporary(RelatedRetType);
+
+ ExprResult Res = PerformCopyInitialization(Entity, SourceLocation(),
+ RetValExp);
+ if (Res.isInvalid()) {
+ // FIXME: Cleanup temporaries here, anyway?
+ return StmtError();
+ }
+ RetValExp = Res.takeAs<Expr>();
+ }
+
+ // C99 6.8.6.4p3(136): The return statement is not an assignment. The
+ // overlap restriction of subclause 6.5.16.1 does not apply to the case of
+ // function return.
+
+ // In C++ the return statement is handled via a copy initialization,
+ // the C version of which boils down to CheckSingleAssignmentConstraints.
+ NRVOCandidate = getCopyElisionCandidate(FnRetType, RetValExp, false);
+ InitializedEntity Entity = InitializedEntity::InitializeResult(ReturnLoc,
+ FnRetType,
+ NRVOCandidate != 0);
+ ExprResult Res = PerformMoveOrCopyInitialization(Entity, NRVOCandidate,
+ FnRetType, RetValExp);
+ if (Res.isInvalid()) {
+ // FIXME: Cleanup temporaries here, anyway?
+ return StmtError();
+ }
+
+ RetValExp = Res.takeAs<Expr>();
+ if (RetValExp)
+ CheckReturnStackAddr(RetValExp, FnRetType, ReturnLoc);
+ }
+
+ if (RetValExp) {
+ CheckImplicitConversions(RetValExp, ReturnLoc);
+ RetValExp = MaybeCreateExprWithCleanups(RetValExp);
+ }
+ Result = new (Context) ReturnStmt(ReturnLoc, RetValExp, NRVOCandidate);
+ }
+
+ // If we need to check for the named return value optimization, save the
+ // return statement in our scope for later processing.
+ if (getLangOpts().CPlusPlus && FnRetType->isRecordType() &&
+ !CurContext->isDependentContext())
+ FunctionScopes.back()->Returns.push_back(Result);
+
+ return Owned(Result);
+}
+
+/// CheckAsmLValue - GNU C has an extremely ugly extension whereby they silently
+/// ignore "noop" casts in places where an lvalue is required by an inline asm.
+/// We emulate this behavior when -fheinous-gnu-extensions is specified, but
+/// provide a strong guidance to not use it.
+///
+/// This method checks to see if the argument is an acceptable l-value and
+/// returns false if it is a case we can handle.
+static bool CheckAsmLValue(const Expr *E, Sema &S) {
+ // Type dependent expressions will be checked during instantiation.
+ if (E->isTypeDependent())
+ return false;
+
+ if (E->isLValue())
+ return false; // Cool, this is an lvalue.
+
+ // Okay, this is not an lvalue, but perhaps it is the result of a cast that we
+ // are supposed to allow.
+ const Expr *E2 = E->IgnoreParenNoopCasts(S.Context);
+ if (E != E2 && E2->isLValue()) {
+ if (!S.getLangOpts().HeinousExtensions)
+ S.Diag(E2->getLocStart(), diag::err_invalid_asm_cast_lvalue)
+ << E->getSourceRange();
+ else
+ S.Diag(E2->getLocStart(), diag::warn_invalid_asm_cast_lvalue)
+ << E->getSourceRange();
+ // Accept, even if we emitted an error diagnostic.
+ return false;
+ }
+
+ // None of the above, just randomly invalid non-lvalue.
+ return true;
+}
+
+/// isOperandMentioned - Return true if the specified operand # is mentioned
+/// anywhere in the decomposed asm string.
+static bool isOperandMentioned(unsigned OpNo,
+ ArrayRef<AsmStmt::AsmStringPiece> AsmStrPieces) {
+ for (unsigned p = 0, e = AsmStrPieces.size(); p != e; ++p) {
+ const AsmStmt::AsmStringPiece &Piece = AsmStrPieces[p];
+ if (!Piece.isOperand()) continue;
+
+ // If this is a reference to the input and if the input was the smaller
+ // one, then we have to reject this asm.
+ if (Piece.getOperandNo() == OpNo)
+ return true;
+ }
+
+ return false;
+}
+
+StmtResult Sema::ActOnAsmStmt(SourceLocation AsmLoc, bool IsSimple,
+ bool IsVolatile, unsigned NumOutputs,
+ unsigned NumInputs, IdentifierInfo **Names,
+ MultiExprArg constraints, MultiExprArg exprs,
+ Expr *asmString, MultiExprArg clobbers,
+ SourceLocation RParenLoc, bool MSAsm) {
+ unsigned NumClobbers = clobbers.size();
+ StringLiteral **Constraints =
+ reinterpret_cast<StringLiteral**>(constraints.get());
+ Expr **Exprs = exprs.get();
+ StringLiteral *AsmString = cast<StringLiteral>(asmString);
+ StringLiteral **Clobbers = reinterpret_cast<StringLiteral**>(clobbers.get());
+
+ SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
+
+ // The parser verifies that there is a string literal here.
+ if (!AsmString->isAscii())
+ return StmtError(Diag(AsmString->getLocStart(),diag::err_asm_wide_character)
+ << AsmString->getSourceRange());
+
+ for (unsigned i = 0; i != NumOutputs; i++) {
+ StringLiteral *Literal = Constraints[i];
+ if (!Literal->isAscii())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ StringRef OutputName;
+ if (Names[i])
+ OutputName = Names[i]->getName();
+
+ TargetInfo::ConstraintInfo Info(Literal->getString(), OutputName);
+ if (!Context.getTargetInfo().validateOutputConstraint(Info))
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_invalid_output_constraint)
+ << Info.getConstraintStr());
+
+ // Check that the output exprs are valid lvalues.
+ Expr *OutputExpr = Exprs[i];
+ if (CheckAsmLValue(OutputExpr, *this)) {
+ return StmtError(Diag(OutputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_output)
+ << OutputExpr->getSourceRange());
+ }
+
+ OutputConstraintInfos.push_back(Info);
+ }
+
+ SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
+
+ for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
+ StringLiteral *Literal = Constraints[i];
+ if (!Literal->isAscii())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ StringRef InputName;
+ if (Names[i])
+ InputName = Names[i]->getName();
+
+ TargetInfo::ConstraintInfo Info(Literal->getString(), InputName);
+ if (!Context.getTargetInfo().validateInputConstraint(OutputConstraintInfos.data(),
+ NumOutputs, Info)) {
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_invalid_input_constraint)
+ << Info.getConstraintStr());
+ }
+
+ Expr *InputExpr = Exprs[i];
+
+ // Only allow void types for memory constraints.
+ if (Info.allowsMemory() && !Info.allowsRegister()) {
+ if (CheckAsmLValue(InputExpr, *this))
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_lvalue_in_input)
+ << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+
+ if (Info.allowsRegister()) {
+ if (InputExpr->getType()->isVoidType()) {
+ return StmtError(Diag(InputExpr->getLocStart(),
+ diag::err_asm_invalid_type_in_input)
+ << InputExpr->getType() << Info.getConstraintStr()
+ << InputExpr->getSourceRange());
+ }
+ }
+
+ ExprResult Result = DefaultFunctionArrayLvalueConversion(Exprs[i]);
+ if (Result.isInvalid())
+ return StmtError();
+
+ Exprs[i] = Result.take();
+ InputConstraintInfos.push_back(Info);
+ }
+
+ // Check that the clobbers are valid.
+ for (unsigned i = 0; i != NumClobbers; i++) {
+ StringLiteral *Literal = Clobbers[i];
+ if (!Literal->isAscii())
+ return StmtError(Diag(Literal->getLocStart(),diag::err_asm_wide_character)
+ << Literal->getSourceRange());
+
+ StringRef Clobber = Literal->getString();
+
+ if (!Context.getTargetInfo().isValidClobber(Clobber))
+ return StmtError(Diag(Literal->getLocStart(),
+ diag::err_asm_unknown_register_name) << Clobber);
+ }
+
+ AsmStmt *NS =
+ new (Context) AsmStmt(Context, AsmLoc, IsSimple, IsVolatile, MSAsm,
+ NumOutputs, NumInputs, Names, Constraints, Exprs,
+ AsmString, NumClobbers, Clobbers, RParenLoc);
+ // Validate the asm string, ensuring it makes sense given the operands we
+ // have.
+ SmallVector<AsmStmt::AsmStringPiece, 8> Pieces;
+ unsigned DiagOffs;
+ if (unsigned DiagID = NS->AnalyzeAsmString(Pieces, Context, DiagOffs)) {
+ Diag(getLocationOfStringLiteralByte(AsmString, DiagOffs), DiagID)
+ << AsmString->getSourceRange();
+ return StmtError();
+ }
+
+ // Validate tied input operands for type mismatches.
+ for (unsigned i = 0, e = InputConstraintInfos.size(); i != e; ++i) {
+ TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
+
+ // If this is a tied constraint, verify that the output and input have
+ // either exactly the same type, or that they are int/ptr operands with the
+ // same size (int/long, int*/long, are ok etc).
+ if (!Info.hasTiedOperand()) continue;
+
+ unsigned TiedTo = Info.getTiedOperand();
+ unsigned InputOpNo = i+NumOutputs;
+ Expr *OutputExpr = Exprs[TiedTo];
+ Expr *InputExpr = Exprs[InputOpNo];
+
+ if (OutputExpr->isTypeDependent() || InputExpr->isTypeDependent())
+ continue;
+
+ QualType InTy = InputExpr->getType();
+ QualType OutTy = OutputExpr->getType();
+ if (Context.hasSameType(InTy, OutTy))
+ continue; // All types can be tied to themselves.
+
+ // Decide if the input and output are in the same domain (integer/ptr or
+ // floating point.
+ enum AsmDomain {
+ AD_Int, AD_FP, AD_Other
+ } InputDomain, OutputDomain;
+
+ if (InTy->isIntegerType() || InTy->isPointerType())
+ InputDomain = AD_Int;
+ else if (InTy->isRealFloatingType())
+ InputDomain = AD_FP;
+ else
+ InputDomain = AD_Other;
+
+ if (OutTy->isIntegerType() || OutTy->isPointerType())
+ OutputDomain = AD_Int;
+ else if (OutTy->isRealFloatingType())
+ OutputDomain = AD_FP;
+ else
+ OutputDomain = AD_Other;
+
+ // They are ok if they are the same size and in the same domain. This
+ // allows tying things like:
+ // void* to int*
+ // void* to int if they are the same size.
+ // double to long double if they are the same size.
+ //
+ uint64_t OutSize = Context.getTypeSize(OutTy);
+ uint64_t InSize = Context.getTypeSize(InTy);
+ if (OutSize == InSize && InputDomain == OutputDomain &&
+ InputDomain != AD_Other)
+ continue;
+
+ // If the smaller input/output operand is not mentioned in the asm string,
+ // then we can promote the smaller one to a larger input and the asm string
+ // won't notice.
+ bool SmallerValueMentioned = false;
+
+ // If this is a reference to the input and if the input was the smaller
+ // one, then we have to reject this asm.
+ if (isOperandMentioned(InputOpNo, Pieces)) {
+ // This is a use in the asm string of the smaller operand. Since we
+ // codegen this by promoting to a wider value, the asm will get printed
+ // "wrong".
+ SmallerValueMentioned |= InSize < OutSize;
+ }
+ if (isOperandMentioned(TiedTo, Pieces)) {
+ // If this is a reference to the output, and if the output is the larger
+ // value, then it's ok because we'll promote the input to the larger type.
+ SmallerValueMentioned |= OutSize < InSize;
+ }
+
+ // If the smaller value wasn't mentioned in the asm string, and if the
+ // output was a register, just extend the shorter one to the size of the
+ // larger one.
+ if (!SmallerValueMentioned && InputDomain != AD_Other &&
+ OutputConstraintInfos[TiedTo].allowsRegister())
+ continue;
+
+ // Either both of the operands were mentioned or the smaller one was
+ // mentioned. One more special case that we'll allow: if the tied input is
+ // integer, unmentioned, and is a constant, then we'll allow truncating it
+ // down to the size of the destination.
+ if (InputDomain == AD_Int && OutputDomain == AD_Int &&
+ !isOperandMentioned(InputOpNo, Pieces) &&
+ InputExpr->isEvaluatable(Context)) {
+ CastKind castKind =
+ (OutTy->isBooleanType() ? CK_IntegralToBoolean : CK_IntegralCast);
+ InputExpr = ImpCastExprToType(InputExpr, OutTy, castKind).take();
+ Exprs[InputOpNo] = InputExpr;
+ NS->setInputExpr(i, InputExpr);
+ continue;
+ }
+
+ Diag(InputExpr->getLocStart(),
+ diag::err_asm_tying_incompatible_types)
+ << InTy << OutTy << OutputExpr->getSourceRange()
+ << InputExpr->getSourceRange();
+ return StmtError();
+ }
+
+ return Owned(NS);
+}
+
+StmtResult
+Sema::ActOnObjCAtCatchStmt(SourceLocation AtLoc,
+ SourceLocation RParen, Decl *Parm,
+ Stmt *Body) {
+ VarDecl *Var = cast_or_null<VarDecl>(Parm);
+ if (Var && Var->isInvalidDecl())
+ return StmtError();
+
+ return Owned(new (Context) ObjCAtCatchStmt(AtLoc, RParen, Var, Body));
+}
+
+StmtResult
+Sema::ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body) {
+ return Owned(new (Context) ObjCAtFinallyStmt(AtLoc, Body));
+}
+
+StmtResult
+Sema::ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
+ MultiStmtArg CatchStmts, Stmt *Finally) {
+ if (!getLangOpts().ObjCExceptions)
+ Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@try";
+
+ getCurFunction()->setHasBranchProtectedScope();
+ unsigned NumCatchStmts = CatchStmts.size();
+ return Owned(ObjCAtTryStmt::Create(Context, AtLoc, Try,
+ CatchStmts.release(),
+ NumCatchStmts,
+ Finally));
+}
+
+StmtResult Sema::BuildObjCAtThrowStmt(SourceLocation AtLoc,
+ Expr *Throw) {
+ if (Throw) {
+ Throw = MaybeCreateExprWithCleanups(Throw);
+ ExprResult Result = DefaultLvalueConversion(Throw);
+ if (Result.isInvalid())
+ return StmtError();
+
+ Throw = Result.take();
+ QualType ThrowType = Throw->getType();
+ // Make sure the expression type is an ObjC pointer or "void *".
+ if (!ThrowType->isDependentType() &&
+ !ThrowType->isObjCObjectPointerType()) {
+ const PointerType *PT = ThrowType->getAs<PointerType>();
+ if (!PT || !PT->getPointeeType()->isVoidType())
+ return StmtError(Diag(AtLoc, diag::error_objc_throw_expects_object)
+ << Throw->getType() << Throw->getSourceRange());
+ }
+ }
+
+ return Owned(new (Context) ObjCAtThrowStmt(AtLoc, Throw));
+}
+
+StmtResult
+Sema::ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
+ Scope *CurScope) {
+ if (!getLangOpts().ObjCExceptions)
+ Diag(AtLoc, diag::err_objc_exceptions_disabled) << "@throw";
+
+ if (!Throw) {
+ // @throw without an expression designates a rethrow (which much occur
+ // in the context of an @catch clause).
+ Scope *AtCatchParent = CurScope;
+ while (AtCatchParent && !AtCatchParent->isAtCatchScope())
+ AtCatchParent = AtCatchParent->getParent();
+ if (!AtCatchParent)
+ return StmtError(Diag(AtLoc, diag::error_rethrow_used_outside_catch));
+ }
+
+ return BuildObjCAtThrowStmt(AtLoc, Throw);
+}
+
+ExprResult
+Sema::ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand) {
+ ExprResult result = DefaultLvalueConversion(operand);
+ if (result.isInvalid())
+ return ExprError();
+ operand = result.take();
+
+ // Make sure the expression type is an ObjC pointer or "void *".
+ QualType type = operand->getType();
+ if (!type->isDependentType() &&
+ !type->isObjCObjectPointerType()) {
+ const PointerType *pointerType = type->getAs<PointerType>();
+ if (!pointerType || !pointerType->getPointeeType()->isVoidType())
+ return Diag(atLoc, diag::error_objc_synchronized_expects_object)
+ << type << operand->getSourceRange();
+ }
+
+ // The operand to @synchronized is a full-expression.
+ return MaybeCreateExprWithCleanups(operand);
+}
+
+StmtResult
+Sema::ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SyncExpr,
+ Stmt *SyncBody) {
+ // We can't jump into or indirect-jump out of a @synchronized block.
+ getCurFunction()->setHasBranchProtectedScope();
+ return Owned(new (Context) ObjCAtSynchronizedStmt(AtLoc, SyncExpr, SyncBody));
+}
+
+/// ActOnCXXCatchBlock - Takes an exception declaration and a handler block
+/// and creates a proper catch handler from them.
+StmtResult
+Sema::ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl,
+ Stmt *HandlerBlock) {
+ // There's nothing to test that ActOnExceptionDecl didn't already test.
+ return Owned(new (Context) CXXCatchStmt(CatchLoc,
+ cast_or_null<VarDecl>(ExDecl),
+ HandlerBlock));
+}
+
+StmtResult
+Sema::ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body) {
+ getCurFunction()->setHasBranchProtectedScope();
+ return Owned(new (Context) ObjCAutoreleasePoolStmt(AtLoc, Body));
+}
+
+namespace {
+
+class TypeWithHandler {
+ QualType t;
+ CXXCatchStmt *stmt;
+public:
+ TypeWithHandler(const QualType &type, CXXCatchStmt *statement)
+ : t(type), stmt(statement) {}
+
+ // An arbitrary order is fine as long as it places identical
+ // types next to each other.
+ bool operator<(const TypeWithHandler &y) const {
+ if (t.getAsOpaquePtr() < y.t.getAsOpaquePtr())
+ return true;
+ if (t.getAsOpaquePtr() > y.t.getAsOpaquePtr())
+ return false;
+ else
+ return getTypeSpecStartLoc() < y.getTypeSpecStartLoc();
+ }
+
+ bool operator==(const TypeWithHandler& other) const {
+ return t == other.t;
+ }
+
+ CXXCatchStmt *getCatchStmt() const { return stmt; }
+ SourceLocation getTypeSpecStartLoc() const {
+ return stmt->getExceptionDecl()->getTypeSpecStartLoc();
+ }
+};
+
+}
+
+/// ActOnCXXTryBlock - Takes a try compound-statement and a number of
+/// handlers and creates a try statement from them.
+StmtResult
+Sema::ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
+ MultiStmtArg RawHandlers) {
+ // Don't report an error if 'try' is used in system headers.
+ if (!getLangOpts().CXXExceptions &&
+ !getSourceManager().isInSystemHeader(TryLoc))
+ Diag(TryLoc, diag::err_exceptions_disabled) << "try";
+
+ unsigned NumHandlers = RawHandlers.size();
+ assert(NumHandlers > 0 &&
+ "The parser shouldn't call this if there are no handlers.");
+ Stmt **Handlers = RawHandlers.get();
+
+ SmallVector<TypeWithHandler, 8> TypesWithHandlers;
+
+ for (unsigned i = 0; i < NumHandlers; ++i) {
+ CXXCatchStmt *Handler = cast<CXXCatchStmt>(Handlers[i]);
+ if (!Handler->getExceptionDecl()) {
+ if (i < NumHandlers - 1)
+ return StmtError(Diag(Handler->getLocStart(),
+ diag::err_early_catch_all));
+
+ continue;
+ }
+
+ const QualType CaughtType = Handler->getCaughtType();
+ const QualType CanonicalCaughtType = Context.getCanonicalType(CaughtType);
+ TypesWithHandlers.push_back(TypeWithHandler(CanonicalCaughtType, Handler));
+ }
+
+ // Detect handlers for the same type as an earlier one.
+ if (NumHandlers > 1) {
+ llvm::array_pod_sort(TypesWithHandlers.begin(), TypesWithHandlers.end());
+
+ TypeWithHandler prev = TypesWithHandlers[0];
+ for (unsigned i = 1; i < TypesWithHandlers.size(); ++i) {
+ TypeWithHandler curr = TypesWithHandlers[i];
+
+ if (curr == prev) {
+ Diag(curr.getTypeSpecStartLoc(),
+ diag::warn_exception_caught_by_earlier_handler)
+ << curr.getCatchStmt()->getCaughtType().getAsString();
+ Diag(prev.getTypeSpecStartLoc(),
+ diag::note_previous_exception_handler)
+ << prev.getCatchStmt()->getCaughtType().getAsString();
+ }
+
+ prev = curr;
+ }
+ }
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ // FIXME: We should detect handlers that cannot catch anything because an
+ // earlier handler catches a superclass. Need to find a method that is not
+ // quadratic for this.
+ // Neither of these are explicitly forbidden, but every compiler detects them
+ // and warns.
+
+ return Owned(CXXTryStmt::Create(Context, TryLoc, TryBlock,
+ Handlers, NumHandlers));
+}
+
+StmtResult
+Sema::ActOnSEHTryBlock(bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler) {
+ assert(TryBlock && Handler);
+
+ getCurFunction()->setHasBranchProtectedScope();
+
+ return Owned(SEHTryStmt::Create(Context,IsCXXTry,TryLoc,TryBlock,Handler));
+}
+
+StmtResult
+Sema::ActOnSEHExceptBlock(SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block) {
+ assert(FilterExpr && Block);
+
+ if(!FilterExpr->getType()->isIntegerType()) {
+ return StmtError(Diag(FilterExpr->getExprLoc(),
+ diag::err_filter_expression_integral)
+ << FilterExpr->getType());
+ }
+
+ return Owned(SEHExceptStmt::Create(Context,Loc,FilterExpr,Block));
+}
+
+StmtResult
+Sema::ActOnSEHFinallyBlock(SourceLocation Loc,
+ Stmt *Block) {
+ assert(Block);
+ return Owned(SEHFinallyStmt::Create(Context,Loc,Block));
+}
+
+StmtResult Sema::BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ Stmt *Nested)
+{
+ return new (Context) MSDependentExistsStmt(KeywordLoc, IsIfExists,
+ QualifierLoc, NameInfo,
+ cast<CompoundStmt>(Nested));
+}
+
+
+StmtResult Sema::ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ CXXScopeSpec &SS,
+ UnqualifiedId &Name,
+ Stmt *Nested) {
+ return BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
+ SS.getWithLocInContext(Context),
+ GetNameFromUnqualifiedId(Name),
+ Nested);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
new file mode 100644
index 0000000..ff8c4da
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplate.cpp
@@ -0,0 +1,7188 @@
+//===------- SemaTemplate.cpp - Semantic Analysis for C++ Templates -------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements semantic analysis for C++ templates.
+//===----------------------------------------------------------------------===/
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "TreeTransform.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringExtras.h"
+using namespace clang;
+using namespace sema;
+
+// Exported for use by Parser.
+SourceRange
+clang::getTemplateParamsRange(TemplateParameterList const * const *Ps,
+ unsigned N) {
+ if (!N) return SourceRange();
+ return SourceRange(Ps[0]->getTemplateLoc(), Ps[N-1]->getRAngleLoc());
+}
+
+/// \brief Determine whether the declaration found is acceptable as the name
+/// of a template and, if so, return that template declaration. Otherwise,
+/// returns NULL.
+static NamedDecl *isAcceptableTemplateName(ASTContext &Context,
+ NamedDecl *Orig,
+ bool AllowFunctionTemplates) {
+ NamedDecl *D = Orig->getUnderlyingDecl();
+
+ if (isa<TemplateDecl>(D)) {
+ if (!AllowFunctionTemplates && isa<FunctionTemplateDecl>(D))
+ return 0;
+
+ return Orig;
+ }
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ // C++ [temp.local]p1:
+ // Like normal (non-template) classes, class templates have an
+ // injected-class-name (Clause 9). The injected-class-name
+ // can be used with or without a template-argument-list. When
+ // it is used without a template-argument-list, it is
+ // equivalent to the injected-class-name followed by the
+ // template-parameters of the class template enclosed in
+ // <>. When it is used with a template-argument-list, it
+ // refers to the specified class template specialization,
+ // which could be the current specialization or another
+ // specialization.
+ if (Record->isInjectedClassName()) {
+ Record = cast<CXXRecordDecl>(Record->getDeclContext());
+ if (Record->getDescribedClassTemplate())
+ return Record->getDescribedClassTemplate();
+
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record))
+ return Spec->getSpecializedTemplate();
+ }
+
+ return 0;
+ }
+
+ return 0;
+}
+
+void Sema::FilterAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates) {
+ // The set of class templates we've already seen.
+ llvm::SmallPtrSet<ClassTemplateDecl *, 8> ClassTemplates;
+ LookupResult::Filter filter = R.makeFilter();
+ while (filter.hasNext()) {
+ NamedDecl *Orig = filter.next();
+ NamedDecl *Repl = isAcceptableTemplateName(Context, Orig,
+ AllowFunctionTemplates);
+ if (!Repl)
+ filter.erase();
+ else if (Repl != Orig) {
+
+ // C++ [temp.local]p3:
+ // A lookup that finds an injected-class-name (10.2) can result in an
+ // ambiguity in certain cases (for example, if it is found in more than
+ // one base class). If all of the injected-class-names that are found
+ // refer to specializations of the same class template, and if the name
+ // is used as a template-name, the reference refers to the class
+ // template itself and not a specialization thereof, and is not
+ // ambiguous.
+ if (ClassTemplateDecl *ClassTmpl = dyn_cast<ClassTemplateDecl>(Repl))
+ if (!ClassTemplates.insert(ClassTmpl)) {
+ filter.erase();
+ continue;
+ }
+
+ // FIXME: we promote access to public here as a workaround to
+ // the fact that LookupResult doesn't let us remember that we
+ // found this template through a particular injected class name,
+ // which means we end up doing nasty things to the invariants.
+ // Pretending that access is public is *much* safer.
+ filter.replace(Repl, AS_public);
+ }
+ }
+ filter.done();
+}
+
+bool Sema::hasAnyAcceptableTemplateNames(LookupResult &R,
+ bool AllowFunctionTemplates) {
+ for (LookupResult::iterator I = R.begin(), IEnd = R.end(); I != IEnd; ++I)
+ if (isAcceptableTemplateName(Context, *I, AllowFunctionTemplates))
+ return true;
+
+ return false;
+}
+
+TemplateNameKind Sema::isTemplateName(Scope *S,
+ CXXScopeSpec &SS,
+ bool hasTemplateKeyword,
+ UnqualifiedId &Name,
+ ParsedType ObjectTypePtr,
+ bool EnteringContext,
+ TemplateTy &TemplateResult,
+ bool &MemberOfUnknownSpecialization) {
+ assert(getLangOpts().CPlusPlus && "No template names in C!");
+
+ DeclarationName TName;
+ MemberOfUnknownSpecialization = false;
+
+ switch (Name.getKind()) {
+ case UnqualifiedId::IK_Identifier:
+ TName = DeclarationName(Name.Identifier);
+ break;
+
+ case UnqualifiedId::IK_OperatorFunctionId:
+ TName = Context.DeclarationNames.getCXXOperatorName(
+ Name.OperatorFunctionId.Operator);
+ break;
+
+ case UnqualifiedId::IK_LiteralOperatorId:
+ TName = Context.DeclarationNames.getCXXLiteralOperatorName(Name.Identifier);
+ break;
+
+ default:
+ return TNK_Non_template;
+ }
+
+ QualType ObjectType = ObjectTypePtr.get();
+
+ LookupResult R(*this, TName, Name.getLocStart(), LookupOrdinaryName);
+ LookupTemplateName(R, S, SS, ObjectType, EnteringContext,
+ MemberOfUnknownSpecialization);
+ if (R.empty()) return TNK_Non_template;
+ if (R.isAmbiguous()) {
+ // Suppress diagnostics; we'll redo this lookup later.
+ R.suppressDiagnostics();
+
+ // FIXME: we might have ambiguous templates, in which case we
+ // should at least parse them properly!
+ return TNK_Non_template;
+ }
+
+ TemplateName Template;
+ TemplateNameKind TemplateKind;
+
+ unsigned ResultCount = R.end() - R.begin();
+ if (ResultCount > 1) {
+ // We assume that we'll preserve the qualifier from a function
+ // template name in other ways.
+ Template = Context.getOverloadedTemplateName(R.begin(), R.end());
+ TemplateKind = TNK_Function_template;
+
+ // We'll do this lookup again later.
+ R.suppressDiagnostics();
+ } else {
+ TemplateDecl *TD = cast<TemplateDecl>((*R.begin())->getUnderlyingDecl());
+
+ if (SS.isSet() && !SS.isInvalid()) {
+ NestedNameSpecifier *Qualifier
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ Template = Context.getQualifiedTemplateName(Qualifier,
+ hasTemplateKeyword, TD);
+ } else {
+ Template = TemplateName(TD);
+ }
+
+ if (isa<FunctionTemplateDecl>(TD)) {
+ TemplateKind = TNK_Function_template;
+
+ // We'll do this lookup again later.
+ R.suppressDiagnostics();
+ } else {
+ assert(isa<ClassTemplateDecl>(TD) || isa<TemplateTemplateParmDecl>(TD) ||
+ isa<TypeAliasTemplateDecl>(TD));
+ TemplateKind = TNK_Type_template;
+ }
+ }
+
+ TemplateResult = TemplateTy::make(Template);
+ return TemplateKind;
+}
+
+bool Sema::DiagnoseUnknownTemplateName(const IdentifierInfo &II,
+ SourceLocation IILoc,
+ Scope *S,
+ const CXXScopeSpec *SS,
+ TemplateTy &SuggestedTemplate,
+ TemplateNameKind &SuggestedKind) {
+ // We can't recover unless there's a dependent scope specifier preceding the
+ // template name.
+ // FIXME: Typo correction?
+ if (!SS || !SS->isSet() || !isDependentScopeSpecifier(*SS) ||
+ computeDeclContext(*SS))
+ return false;
+
+ // The code is missing a 'template' keyword prior to the dependent template
+ // name.
+ NestedNameSpecifier *Qualifier = (NestedNameSpecifier*)SS->getScopeRep();
+ Diag(IILoc, diag::err_template_kw_missing)
+ << Qualifier << II.getName()
+ << FixItHint::CreateInsertion(IILoc, "template ");
+ SuggestedTemplate
+ = TemplateTy::make(Context.getDependentTemplateName(Qualifier, &II));
+ SuggestedKind = TNK_Dependent_template_name;
+ return true;
+}
+
+void Sema::LookupTemplateName(LookupResult &Found,
+ Scope *S, CXXScopeSpec &SS,
+ QualType ObjectType,
+ bool EnteringContext,
+ bool &MemberOfUnknownSpecialization) {
+ // Determine where to perform name lookup
+ MemberOfUnknownSpecialization = false;
+ DeclContext *LookupCtx = 0;
+ bool isDependent = false;
+ if (!ObjectType.isNull()) {
+ // This nested-name-specifier occurs in a member access expression, e.g.,
+ // x->B::f, and we are looking into the type of the object.
+ assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ LookupCtx = computeDeclContext(ObjectType);
+ isDependent = ObjectType->isDependentType();
+ assert((isDependent || !ObjectType->isIncompleteType()) &&
+ "Caller should have completed object type");
+
+ // Template names cannot appear inside an Objective-C class or object type.
+ if (ObjectType->isObjCObjectOrInterfaceType()) {
+ Found.clear();
+ return;
+ }
+ } else if (SS.isSet()) {
+ // This nested-name-specifier occurs after another nested-name-specifier,
+ // so long into the context associated with the prior nested-name-specifier.
+ LookupCtx = computeDeclContext(SS, EnteringContext);
+ isDependent = isDependentScopeSpecifier(SS);
+
+ // The declaration context must be complete.
+ if (LookupCtx && RequireCompleteDeclContext(SS, LookupCtx))
+ return;
+ }
+
+ bool ObjectTypeSearchedInScope = false;
+ bool AllowFunctionTemplatesInLookup = true;
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+ LookupQualifiedName(Found, LookupCtx);
+ if (!ObjectType.isNull() && Found.empty()) {
+ // C++ [basic.lookup.classref]p1:
+ // In a class member access expression (5.2.5), if the . or -> token is
+ // immediately followed by an identifier followed by a <, the
+ // identifier must be looked up to determine whether the < is the
+ // beginning of a template argument list (14.2) or a less-than operator.
+ // The identifier is first looked up in the class of the object
+ // expression. If the identifier is not found, it is then looked up in
+ // the context of the entire postfix-expression and shall name a class
+ // or function template.
+ if (S) LookupName(Found, S);
+ ObjectTypeSearchedInScope = true;
+ AllowFunctionTemplatesInLookup = false;
+ }
+ } else if (isDependent && (!S || ObjectType.isNull())) {
+ // We cannot look into a dependent object type or nested nme
+ // specifier.
+ MemberOfUnknownSpecialization = true;
+ return;
+ } else {
+ // Perform unqualified name lookup in the current scope.
+ LookupName(Found, S);
+
+ if (!ObjectType.isNull())
+ AllowFunctionTemplatesInLookup = false;
+ }
+
+ if (Found.empty() && !isDependent) {
+ // If we did not find any names, attempt to correct any typos.
+ DeclarationName Name = Found.getLookupName();
+ Found.clear();
+ // Simple filter callback that, for keywords, only accepts the C++ *_cast
+ CorrectionCandidateCallback FilterCCC;
+ FilterCCC.WantTypeSpecifiers = false;
+ FilterCCC.WantExpressionKeywords = false;
+ FilterCCC.WantRemainingKeywords = false;
+ FilterCCC.WantCXXNamedCasts = true;
+ if (TypoCorrection Corrected = CorrectTypo(Found.getLookupNameInfo(),
+ Found.getLookupKind(), S, &SS,
+ FilterCCC, LookupCtx)) {
+ Found.setLookupName(Corrected.getCorrection());
+ if (Corrected.getCorrectionDecl())
+ Found.addDecl(Corrected.getCorrectionDecl());
+ FilterAcceptableTemplateNames(Found);
+ if (!Found.empty()) {
+ std::string CorrectedStr(Corrected.getAsString(getLangOpts()));
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
+ if (LookupCtx)
+ Diag(Found.getNameLoc(), diag::err_no_member_template_suggest)
+ << Name << LookupCtx << CorrectedQuotedStr << SS.getRange()
+ << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
+ else
+ Diag(Found.getNameLoc(), diag::err_no_template_suggest)
+ << Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(Found.getNameLoc(), CorrectedStr);
+ if (TemplateDecl *Template = Found.getAsSingle<TemplateDecl>())
+ Diag(Template->getLocation(), diag::note_previous_decl)
+ << CorrectedQuotedStr;
+ }
+ } else {
+ Found.setLookupName(Name);
+ }
+ }
+
+ FilterAcceptableTemplateNames(Found, AllowFunctionTemplatesInLookup);
+ if (Found.empty()) {
+ if (isDependent)
+ MemberOfUnknownSpecialization = true;
+ return;
+ }
+
+ if (S && !ObjectType.isNull() && !ObjectTypeSearchedInScope) {
+ // C++ [basic.lookup.classref]p1:
+ // [...] If the lookup in the class of the object expression finds a
+ // template, the name is also looked up in the context of the entire
+ // postfix-expression and [...]
+ //
+ LookupResult FoundOuter(*this, Found.getLookupName(), Found.getNameLoc(),
+ LookupOrdinaryName);
+ LookupName(FoundOuter, S);
+ FilterAcceptableTemplateNames(FoundOuter, /*AllowFunctionTemplates=*/false);
+
+ if (FoundOuter.empty()) {
+ // - if the name is not found, the name found in the class of the
+ // object expression is used, otherwise
+ } else if (!FoundOuter.getAsSingle<ClassTemplateDecl>() ||
+ FoundOuter.isAmbiguous()) {
+ // - if the name is found in the context of the entire
+ // postfix-expression and does not name a class template, the name
+ // found in the class of the object expression is used, otherwise
+ FoundOuter.clear();
+ } else if (!Found.isSuppressingDiagnostics()) {
+ // - if the name found is a class template, it must refer to the same
+ // entity as the one found in the class of the object expression,
+ // otherwise the program is ill-formed.
+ if (!Found.isSingleResult() ||
+ Found.getFoundDecl()->getCanonicalDecl()
+ != FoundOuter.getFoundDecl()->getCanonicalDecl()) {
+ Diag(Found.getNameLoc(),
+ diag::ext_nested_name_member_ref_lookup_ambiguous)
+ << Found.getLookupName()
+ << ObjectType;
+ Diag(Found.getRepresentativeDecl()->getLocation(),
+ diag::note_ambig_member_ref_object_type)
+ << ObjectType;
+ Diag(FoundOuter.getFoundDecl()->getLocation(),
+ diag::note_ambig_member_ref_scope);
+
+ // Recover by taking the template that we found in the object
+ // expression's type.
+ }
+ }
+ }
+}
+
+/// ActOnDependentIdExpression - Handle a dependent id-expression that
+/// was just parsed. This is only possible with an explicit scope
+/// specifier naming a dependent type.
+ExprResult
+Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ bool isAddressOfOperand,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ DeclContext *DC = getFunctionLevelDeclContext();
+
+ if (!isAddressOfOperand &&
+ isa<CXXMethodDecl>(DC) &&
+ cast<CXXMethodDecl>(DC)->isInstance()) {
+ QualType ThisType = cast<CXXMethodDecl>(DC)->getThisType(Context);
+
+ // Since the 'this' expression is synthesized, we don't need to
+ // perform the double-lookup check.
+ NamedDecl *FirstQualifierInScope = 0;
+
+ return Owned(CXXDependentScopeMemberExpr::Create(Context,
+ /*This*/ 0, ThisType,
+ /*IsArrow*/ true,
+ /*Op*/ SourceLocation(),
+ SS.getWithLocInContext(Context),
+ TemplateKWLoc,
+ FirstQualifierInScope,
+ NameInfo,
+ TemplateArgs));
+ }
+
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
+}
+
+ExprResult
+Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ return Owned(DependentScopeDeclRefExpr::Create(Context,
+ SS.getWithLocInContext(Context),
+ TemplateKWLoc,
+ NameInfo,
+ TemplateArgs));
+}
+
+/// DiagnoseTemplateParameterShadow - Produce a diagnostic complaining
+/// that the template parameter 'PrevDecl' is being shadowed by a new
+/// declaration at location Loc. Returns true to indicate that this is
+/// an error, and false otherwise.
+void Sema::DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl) {
+ assert(PrevDecl->isTemplateParameter() && "Not a template parameter");
+
+ // Microsoft Visual C++ permits template parameters to be shadowed.
+ if (getLangOpts().MicrosoftExt)
+ return;
+
+ // C++ [temp.local]p4:
+ // A template-parameter shall not be redeclared within its
+ // scope (including nested scopes).
+ Diag(Loc, diag::err_template_param_shadow)
+ << cast<NamedDecl>(PrevDecl)->getDeclName();
+ Diag(PrevDecl->getLocation(), diag::note_template_param_here);
+ return;
+}
+
+/// AdjustDeclIfTemplate - If the given decl happens to be a template, reset
+/// the parameter D to reference the templated declaration and return a pointer
+/// to the template declaration. Otherwise, do nothing to D and return null.
+TemplateDecl *Sema::AdjustDeclIfTemplate(Decl *&D) {
+ if (TemplateDecl *Temp = dyn_cast_or_null<TemplateDecl>(D)) {
+ D = Temp->getTemplatedDecl();
+ return Temp;
+ }
+ return 0;
+}
+
+ParsedTemplateArgument ParsedTemplateArgument::getTemplatePackExpansion(
+ SourceLocation EllipsisLoc) const {
+ assert(Kind == Template &&
+ "Only template template arguments can be pack expansions here");
+ assert(getAsTemplate().get().containsUnexpandedParameterPack() &&
+ "Template template argument pack expansion without packs");
+ ParsedTemplateArgument Result(*this);
+ Result.EllipsisLoc = EllipsisLoc;
+ return Result;
+}
+
+static TemplateArgumentLoc translateTemplateArgument(Sema &SemaRef,
+ const ParsedTemplateArgument &Arg) {
+
+ switch (Arg.getKind()) {
+ case ParsedTemplateArgument::Type: {
+ TypeSourceInfo *DI;
+ QualType T = SemaRef.GetTypeFromParser(Arg.getAsType(), &DI);
+ if (!DI)
+ DI = SemaRef.Context.getTrivialTypeSourceInfo(T, Arg.getLocation());
+ return TemplateArgumentLoc(TemplateArgument(T), DI);
+ }
+
+ case ParsedTemplateArgument::NonType: {
+ Expr *E = static_cast<Expr *>(Arg.getAsExpr());
+ return TemplateArgumentLoc(TemplateArgument(E), E);
+ }
+
+ case ParsedTemplateArgument::Template: {
+ TemplateName Template = Arg.getAsTemplate().get();
+ TemplateArgument TArg;
+ if (Arg.getEllipsisLoc().isValid())
+ TArg = TemplateArgument(Template, llvm::Optional<unsigned int>());
+ else
+ TArg = Template;
+ return TemplateArgumentLoc(TArg,
+ Arg.getScopeSpec().getWithLocInContext(
+ SemaRef.Context),
+ Arg.getLocation(),
+ Arg.getEllipsisLoc());
+ }
+ }
+
+ llvm_unreachable("Unhandled parsed template argument");
+}
+
+/// \brief Translates template arguments as provided by the parser
+/// into template arguments used by semantic analysis.
+void Sema::translateTemplateArguments(const ASTTemplateArgsPtr &TemplateArgsIn,
+ TemplateArgumentListInfo &TemplateArgs) {
+ for (unsigned I = 0, Last = TemplateArgsIn.size(); I != Last; ++I)
+ TemplateArgs.addArgument(translateTemplateArgument(*this,
+ TemplateArgsIn[I]));
+}
+
+/// ActOnTypeParameter - Called when a C++ template type parameter
+/// (e.g., "typename T") has been parsed. Typename specifies whether
+/// the keyword "typename" was used to declare the type parameter
+/// (otherwise, "class" was used), and KeyLoc is the location of the
+/// "class" or "typename" keyword. ParamName is the name of the
+/// parameter (NULL indicates an unnamed template parameter) and
+/// ParamNameLoc is the location of the parameter name (if any).
+/// If the type parameter has a default argument, it will be added
+/// later via ActOnTypeParameterDefault.
+Decl *Sema::ActOnTypeParameter(Scope *S, bool Typename, bool Ellipsis,
+ SourceLocation EllipsisLoc,
+ SourceLocation KeyLoc,
+ IdentifierInfo *ParamName,
+ SourceLocation ParamNameLoc,
+ unsigned Depth, unsigned Position,
+ SourceLocation EqualLoc,
+ ParsedType DefaultArg) {
+ assert(S->isTemplateParamScope() &&
+ "Template type parameter not in template parameter scope!");
+ bool Invalid = false;
+
+ if (ParamName) {
+ NamedDecl *PrevDecl = LookupSingleName(S, ParamName, ParamNameLoc,
+ LookupOrdinaryName,
+ ForRedeclaration);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(ParamNameLoc, PrevDecl);
+ PrevDecl = 0;
+ }
+ }
+
+ SourceLocation Loc = ParamNameLoc;
+ if (!ParamName)
+ Loc = KeyLoc;
+
+ TemplateTypeParmDecl *Param
+ = TemplateTypeParmDecl::Create(Context, Context.getTranslationUnitDecl(),
+ KeyLoc, Loc, Depth, Position, ParamName,
+ Typename, Ellipsis);
+ Param->setAccess(AS_public);
+ if (Invalid)
+ Param->setInvalidDecl();
+
+ if (ParamName) {
+ // Add the template parameter into the current scope.
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (DefaultArg && Ellipsis) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ DefaultArg = ParsedType();
+ }
+
+ // Handle the default argument, if provided.
+ if (DefaultArg) {
+ TypeSourceInfo *DefaultTInfo;
+ GetTypeFromParser(DefaultArg, &DefaultTInfo);
+
+ assert(DefaultTInfo && "expected source information for type");
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Loc, DefaultTInfo,
+ UPPC_DefaultArgument))
+ return Param;
+
+ // Check the template argument itself.
+ if (CheckTemplateArgument(Param, DefaultTInfo)) {
+ Param->setInvalidDecl();
+ return Param;
+ }
+
+ Param->setDefaultArgument(DefaultTInfo, false);
+ }
+
+ return Param;
+}
+
+/// \brief Check that the type of a non-type template parameter is
+/// well-formed.
+///
+/// \returns the (possibly-promoted) parameter type if valid;
+/// otherwise, produces a diagnostic and returns a NULL type.
+QualType
+Sema::CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc) {
+ // We don't allow variably-modified types as the type of non-type template
+ // parameters.
+ if (T->isVariablyModifiedType()) {
+ Diag(Loc, diag::err_variably_modified_nontype_template_param)
+ << T;
+ return QualType();
+ }
+
+ // C++ [temp.param]p4:
+ //
+ // A non-type template-parameter shall have one of the following
+ // (optionally cv-qualified) types:
+ //
+ // -- integral or enumeration type,
+ if (T->isIntegralOrEnumerationType() ||
+ // -- pointer to object or pointer to function,
+ T->isPointerType() ||
+ // -- reference to object or reference to function,
+ T->isReferenceType() ||
+ // -- pointer to member,
+ T->isMemberPointerType() ||
+ // -- std::nullptr_t.
+ T->isNullPtrType() ||
+ // If T is a dependent type, we can't do the check now, so we
+ // assume that it is well-formed.
+ T->isDependentType()) {
+ // C++ [temp.param]p5: The top-level cv-qualifiers on the template-parameter
+ // are ignored when determining its type.
+ return T.getUnqualifiedType();
+ }
+
+ // C++ [temp.param]p8:
+ //
+ // A non-type template-parameter of type "array of T" or
+ // "function returning T" is adjusted to be of type "pointer to
+ // T" or "pointer to function returning T", respectively.
+ else if (T->isArrayType())
+ // FIXME: Keep the type prior to promotion?
+ return Context.getArrayDecayedType(T);
+ else if (T->isFunctionType())
+ // FIXME: Keep the type prior to promotion?
+ return Context.getPointerType(T);
+
+ Diag(Loc, diag::err_template_nontype_parm_bad_type)
+ << T;
+
+ return QualType();
+}
+
+Decl *Sema::ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
+ unsigned Depth,
+ unsigned Position,
+ SourceLocation EqualLoc,
+ Expr *Default) {
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+
+ assert(S->isTemplateParamScope() &&
+ "Non-type template parameter not in template parameter scope!");
+ bool Invalid = false;
+
+ IdentifierInfo *ParamName = D.getIdentifier();
+ if (ParamName) {
+ NamedDecl *PrevDecl = LookupSingleName(S, ParamName, D.getIdentifierLoc(),
+ LookupOrdinaryName,
+ ForRedeclaration);
+ if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ DiagnoseTemplateParameterShadow(D.getIdentifierLoc(), PrevDecl);
+ PrevDecl = 0;
+ }
+ }
+
+ T = CheckNonTypeTemplateParameterType(T, D.getIdentifierLoc());
+ if (T.isNull()) {
+ T = Context.IntTy; // Recover with an 'int' type.
+ Invalid = true;
+ }
+
+ bool IsParameterPack = D.hasEllipsis();
+ NonTypeTemplateParmDecl *Param
+ = NonTypeTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
+ D.getLocStart(),
+ D.getIdentifierLoc(),
+ Depth, Position, ParamName, T,
+ IsParameterPack, TInfo);
+ Param->setAccess(AS_public);
+
+ if (Invalid)
+ Param->setInvalidDecl();
+
+ if (D.getIdentifier()) {
+ // Add the template parameter into the current scope.
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (Default && IsParameterPack) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ Default = 0;
+ }
+
+ // Check the well-formedness of the default template argument, if provided.
+ if (Default) {
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(Default, UPPC_DefaultArgument))
+ return Param;
+
+ TemplateArgument Converted;
+ ExprResult DefaultRes = CheckTemplateArgument(Param, Param->getType(), Default, Converted);
+ if (DefaultRes.isInvalid()) {
+ Param->setInvalidDecl();
+ return Param;
+ }
+ Default = DefaultRes.take();
+
+ Param->setDefaultArgument(Default, false);
+ }
+
+ return Param;
+}
+
+/// ActOnTemplateTemplateParameter - Called when a C++ template template
+/// parameter (e.g. T in template <template <typename> class T> class array)
+/// has been parsed. S is the current scope.
+Decl *Sema::ActOnTemplateTemplateParameter(Scope* S,
+ SourceLocation TmpLoc,
+ TemplateParameterList *Params,
+ SourceLocation EllipsisLoc,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ unsigned Depth,
+ unsigned Position,
+ SourceLocation EqualLoc,
+ ParsedTemplateArgument Default) {
+ assert(S->isTemplateParamScope() &&
+ "Template template parameter not in template parameter scope!");
+
+ // Construct the parameter object.
+ bool IsParameterPack = EllipsisLoc.isValid();
+ TemplateTemplateParmDecl *Param =
+ TemplateTemplateParmDecl::Create(Context, Context.getTranslationUnitDecl(),
+ NameLoc.isInvalid()? TmpLoc : NameLoc,
+ Depth, Position, IsParameterPack,
+ Name, Params);
+ Param->setAccess(AS_public);
+
+ // If the template template parameter has a name, then link the identifier
+ // into the scope and lookup mechanisms.
+ if (Name) {
+ S->AddDecl(Param);
+ IdResolver.AddDecl(Param);
+ }
+
+ if (Params->size() == 0) {
+ Diag(Param->getLocation(), diag::err_template_template_parm_no_parms)
+ << SourceRange(Params->getLAngleLoc(), Params->getRAngleLoc());
+ Param->setInvalidDecl();
+ }
+
+ // C++0x [temp.param]p9:
+ // A default template-argument may be specified for any kind of
+ // template-parameter that is not a template parameter pack.
+ if (IsParameterPack && !Default.isInvalid()) {
+ Diag(EqualLoc, diag::err_template_param_pack_default_arg);
+ Default = ParsedTemplateArgument();
+ }
+
+ if (!Default.isInvalid()) {
+ // Check only that we have a template template argument. We don't want to
+ // try to check well-formedness now, because our template template parameter
+ // might have dependent types in its template parameters, which we wouldn't
+ // be able to match now.
+ //
+ // If none of the template template parameter's template arguments mention
+ // other template parameters, we could actually perform more checking here.
+ // However, it isn't worth doing.
+ TemplateArgumentLoc DefaultArg = translateTemplateArgument(*this, Default);
+ if (DefaultArg.getArgument().getAsTemplate().isNull()) {
+ Diag(DefaultArg.getLocation(), diag::err_template_arg_not_class_template)
+ << DefaultArg.getSourceRange();
+ return Param;
+ }
+
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(DefaultArg.getLocation(),
+ DefaultArg.getArgument().getAsTemplate(),
+ UPPC_DefaultArgument))
+ return Param;
+
+ Param->setDefaultArgument(DefaultArg, false);
+ }
+
+ return Param;
+}
+
+/// ActOnTemplateParameterList - Builds a TemplateParameterList that
+/// contains the template parameters in Params/NumParams.
+TemplateParameterList *
+Sema::ActOnTemplateParameterList(unsigned Depth,
+ SourceLocation ExportLoc,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ Decl **Params, unsigned NumParams,
+ SourceLocation RAngleLoc) {
+ if (ExportLoc.isValid())
+ Diag(ExportLoc, diag::warn_template_export_unsupported);
+
+ return TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc,
+ (NamedDecl**)Params, NumParams,
+ RAngleLoc);
+}
+
+static void SetNestedNameSpecifier(TagDecl *T, const CXXScopeSpec &SS) {
+ if (SS.isSet())
+ T->setQualifierInfo(SS.getWithLocInContext(T->getASTContext()));
+}
+
+DeclResult
+Sema::CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ SourceLocation KWLoc, CXXScopeSpec &SS,
+ IdentifierInfo *Name, SourceLocation NameLoc,
+ AttributeList *Attr,
+ TemplateParameterList *TemplateParams,
+ AccessSpecifier AS, SourceLocation ModulePrivateLoc,
+ unsigned NumOuterTemplateParamLists,
+ TemplateParameterList** OuterTemplateParamLists) {
+ assert(TemplateParams && TemplateParams->size() > 0 &&
+ "No template parameters");
+ assert(TUK != TUK_Reference && "Can only declare or define class templates");
+ bool Invalid = false;
+
+ // Check that we can declare a template here.
+ if (CheckTemplateDeclScope(S, TemplateParams))
+ return true;
+
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ assert(Kind != TTK_Enum && "can't build template of enumerated type");
+
+ // There is no such thing as an unnamed class template.
+ if (!Name) {
+ Diag(KWLoc, diag::err_template_unnamed_class);
+ return true;
+ }
+
+ // Find any previous declaration with this name.
+ DeclContext *SemanticContext;
+ LookupResult Previous(*this, Name, NameLoc, LookupOrdinaryName,
+ ForRedeclaration);
+ if (SS.isNotEmpty() && !SS.isInvalid()) {
+ SemanticContext = computeDeclContext(SS, true);
+ if (!SemanticContext) {
+ // FIXME: Horrible, horrible hack! We can't currently represent this
+ // in the AST, and historically we have just ignored such friend
+ // class templates, so don't complain here.
+ if (TUK != TUK_Friend)
+ Diag(NameLoc, diag::err_template_qualified_declarator_no_match)
+ << SS.getScopeRep() << SS.getRange();
+ return true;
+ }
+
+ if (RequireCompleteDeclContext(SS, SemanticContext))
+ return true;
+
+ // If we're adding a template to a dependent context, we may need to
+ // rebuilding some of the types used within the template parameter list,
+ // now that we know what the current instantiation is.
+ if (SemanticContext->isDependentContext()) {
+ ContextRAII SavedContext(*this, SemanticContext);
+ if (RebuildTemplateParamsInCurrentInstantiation(TemplateParams))
+ Invalid = true;
+ } else if (TUK != TUK_Friend && TUK != TUK_Reference)
+ diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc);
+
+ LookupQualifiedName(Previous, SemanticContext);
+ } else {
+ SemanticContext = CurContext;
+ LookupName(Previous, S);
+ }
+
+ if (Previous.isAmbiguous())
+ return true;
+
+ NamedDecl *PrevDecl = 0;
+ if (Previous.begin() != Previous.end())
+ PrevDecl = (*Previous.begin())->getUnderlyingDecl();
+
+ // If there is a previous declaration with the same name, check
+ // whether this is a valid redeclaration.
+ ClassTemplateDecl *PrevClassTemplate
+ = dyn_cast_or_null<ClassTemplateDecl>(PrevDecl);
+
+ // We may have found the injected-class-name of a class template,
+ // class template partial specialization, or class template specialization.
+ // In these cases, grab the template that is being defined or specialized.
+ if (!PrevClassTemplate && PrevDecl && isa<CXXRecordDecl>(PrevDecl) &&
+ cast<CXXRecordDecl>(PrevDecl)->isInjectedClassName()) {
+ PrevDecl = cast<CXXRecordDecl>(PrevDecl->getDeclContext());
+ PrevClassTemplate
+ = cast<CXXRecordDecl>(PrevDecl)->getDescribedClassTemplate();
+ if (!PrevClassTemplate && isa<ClassTemplateSpecializationDecl>(PrevDecl)) {
+ PrevClassTemplate
+ = cast<ClassTemplateSpecializationDecl>(PrevDecl)
+ ->getSpecializedTemplate();
+ }
+ }
+
+ if (TUK == TUK_Friend) {
+ // C++ [namespace.memdef]p3:
+ // [...] When looking for a prior declaration of a class or a function
+ // declared as a friend, and when the name of the friend class or
+ // function is neither a qualified name nor a template-id, scopes outside
+ // the innermost enclosing namespace scope are not considered.
+ if (!SS.isSet()) {
+ DeclContext *OutermostContext = CurContext;
+ while (!OutermostContext->isFileContext())
+ OutermostContext = OutermostContext->getLookupParent();
+
+ if (PrevDecl &&
+ (OutermostContext->Equals(PrevDecl->getDeclContext()) ||
+ OutermostContext->Encloses(PrevDecl->getDeclContext()))) {
+ SemanticContext = PrevDecl->getDeclContext();
+ } else {
+ // Declarations in outer scopes don't matter. However, the outermost
+ // context we computed is the semantic context for our new
+ // declaration.
+ PrevDecl = PrevClassTemplate = 0;
+ SemanticContext = OutermostContext;
+ }
+ }
+
+ if (CurContext->isDependentContext()) {
+ // If this is a dependent context, we don't want to link the friend
+ // class template to the template in scope, because that would perform
+ // checking of the template parameter lists that can't be performed
+ // until the outer context is instantiated.
+ PrevDecl = PrevClassTemplate = 0;
+ }
+ } else if (PrevDecl && !isDeclInScope(PrevDecl, SemanticContext, S))
+ PrevDecl = PrevClassTemplate = 0;
+
+ if (PrevClassTemplate) {
+ // Ensure that the template parameter lists are compatible.
+ if (!TemplateParameterListsAreEqual(TemplateParams,
+ PrevClassTemplate->getTemplateParameters(),
+ /*Complain=*/true,
+ TPL_TemplateMatch))
+ return true;
+
+ // C++ [temp.class]p4:
+ // In a redeclaration, partial specialization, explicit
+ // specialization or explicit instantiation of a class template,
+ // the class-key shall agree in kind with the original class
+ // template declaration (7.1.5.3).
+ RecordDecl *PrevRecordDecl = PrevClassTemplate->getTemplatedDecl();
+ if (!isAcceptableTagRedeclaration(PrevRecordDecl, Kind,
+ TUK == TUK_Definition, KWLoc, *Name)) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << Name
+ << FixItHint::CreateReplacement(KWLoc, PrevRecordDecl->getKindName());
+ Diag(PrevRecordDecl->getLocation(), diag::note_previous_use);
+ Kind = PrevRecordDecl->getTagKind();
+ }
+
+ // Check for redefinition of this class template.
+ if (TUK == TUK_Definition) {
+ if (TagDecl *Def = PrevRecordDecl->getDefinition()) {
+ Diag(NameLoc, diag::err_redefinition) << Name;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ // FIXME: Would it make sense to try to "forget" the previous
+ // definition, as part of error recovery?
+ return true;
+ }
+ }
+ } else if (PrevDecl && PrevDecl->isTemplateParameter()) {
+ // Maybe we will complain about the shadowed template parameter.
+ DiagnoseTemplateParameterShadow(NameLoc, PrevDecl);
+ // Just pretend that we didn't see the previous declaration.
+ PrevDecl = 0;
+ } else if (PrevDecl) {
+ // C++ [temp]p5:
+ // A class template shall not have the same name as any other
+ // template, class, function, object, enumeration, enumerator,
+ // namespace, or type in the same scope (3.3), except as specified
+ // in (14.5.4).
+ Diag(NameLoc, diag::err_redefinition_different_kind) << Name;
+ Diag(PrevDecl->getLocation(), diag::note_previous_definition);
+ return true;
+ }
+
+ // Check the template parameter list of this declaration, possibly
+ // merging in the template parameter list from the previous class
+ // template declaration.
+ if (CheckTemplateParameterList(TemplateParams,
+ PrevClassTemplate? PrevClassTemplate->getTemplateParameters() : 0,
+ (SS.isSet() && SemanticContext &&
+ SemanticContext->isRecord() &&
+ SemanticContext->isDependentContext())
+ ? TPC_ClassTemplateMember
+ : TPC_ClassTemplate))
+ Invalid = true;
+
+ if (SS.isSet()) {
+ // If the name of the template was qualified, we must be defining the
+ // template out-of-line.
+ if (!SS.isInvalid() && !Invalid && !PrevClassTemplate &&
+ !(TUK == TUK_Friend && CurContext->isDependentContext())) {
+ Diag(NameLoc, diag::err_member_def_does_not_match)
+ << Name << SemanticContext << SS.getRange();
+ Invalid = true;
+ }
+ }
+
+ CXXRecordDecl *NewClass =
+ CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
+ PrevClassTemplate?
+ PrevClassTemplate->getTemplatedDecl() : 0,
+ /*DelayTypeCreation=*/true);
+ SetNestedNameSpecifier(NewClass, SS);
+ if (NumOuterTemplateParamLists > 0)
+ NewClass->setTemplateParameterListsInfo(Context,
+ NumOuterTemplateParamLists,
+ OuterTemplateParamLists);
+
+ // Add alignment attributes if necessary; these attributes are checked when
+ // the ASTContext lays out the structure.
+ AddAlignmentAttributesForRecord(NewClass);
+ AddMsStructLayoutForRecord(NewClass);
+
+ ClassTemplateDecl *NewTemplate
+ = ClassTemplateDecl::Create(Context, SemanticContext, NameLoc,
+ DeclarationName(Name), TemplateParams,
+ NewClass, PrevClassTemplate);
+ NewClass->setDescribedClassTemplate(NewTemplate);
+
+ if (ModulePrivateLoc.isValid())
+ NewTemplate->setModulePrivate();
+
+ // Build the type for the class template declaration now.
+ QualType T = NewTemplate->getInjectedClassNameSpecialization();
+ T = Context.getInjectedClassNameType(NewClass, T);
+ assert(T->isDependentType() && "Class template type is not dependent?");
+ (void)T;
+
+ // If we are providing an explicit specialization of a member that is a
+ // class template, make a note of that.
+ if (PrevClassTemplate &&
+ PrevClassTemplate->getInstantiatedFromMemberTemplate())
+ PrevClassTemplate->setMemberSpecialization();
+
+ // Set the access specifier.
+ if (!Invalid && TUK != TUK_Friend && NewTemplate->getDeclContext()->isRecord())
+ SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
+
+ // Set the lexical context of these templates
+ NewClass->setLexicalDeclContext(CurContext);
+ NewTemplate->setLexicalDeclContext(CurContext);
+
+ if (TUK == TUK_Definition)
+ NewClass->startDefinition();
+
+ if (Attr)
+ ProcessDeclAttributeList(S, NewClass, Attr);
+
+ if (TUK != TUK_Friend)
+ PushOnScopeChains(NewTemplate, S);
+ else {
+ if (PrevClassTemplate && PrevClassTemplate->getAccess() != AS_none) {
+ NewTemplate->setAccess(PrevClassTemplate->getAccess());
+ NewClass->setAccess(PrevClassTemplate->getAccess());
+ }
+
+ NewTemplate->setObjectOfFriendDecl(/* PreviouslyDeclared = */
+ PrevClassTemplate != NULL);
+
+ // Friend templates are visible in fairly strange ways.
+ if (!CurContext->isDependentContext()) {
+ DeclContext *DC = SemanticContext->getRedeclContext();
+ DC->makeDeclVisibleInContext(NewTemplate);
+ if (Scope *EnclosingScope = getScopeForDeclContext(S, DC))
+ PushOnScopeChains(NewTemplate, EnclosingScope,
+ /* AddToContext = */ false);
+ }
+
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext,
+ NewClass->getLocation(),
+ NewTemplate,
+ /*FIXME:*/NewClass->getLocation());
+ Friend->setAccess(AS_public);
+ CurContext->addDecl(Friend);
+ }
+
+ if (Invalid) {
+ NewTemplate->setInvalidDecl();
+ NewClass->setInvalidDecl();
+ }
+ return NewTemplate;
+}
+
+/// \brief Diagnose the presence of a default template argument on a
+/// template parameter, which is ill-formed in certain contexts.
+///
+/// \returns true if the default template argument should be dropped.
+static bool DiagnoseDefaultTemplateArgument(Sema &S,
+ Sema::TemplateParamListContext TPC,
+ SourceLocation ParamLoc,
+ SourceRange DefArgRange) {
+ switch (TPC) {
+ case Sema::TPC_ClassTemplate:
+ case Sema::TPC_TypeAliasTemplate:
+ return false;
+
+ case Sema::TPC_FunctionTemplate:
+ case Sema::TPC_FriendFunctionTemplateDefinition:
+ // C++ [temp.param]p9:
+ // A default template-argument shall not be specified in a
+ // function template declaration or a function template
+ // definition [...]
+ // If a friend function template declaration specifies a default
+ // template-argument, that declaration shall be a definition and shall be
+ // the only declaration of the function template in the translation unit.
+ // (C++98/03 doesn't have this wording; see DR226).
+ S.Diag(ParamLoc, S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_parameter_default_in_function_template
+ : diag::ext_template_parameter_default_in_function_template)
+ << DefArgRange;
+ return false;
+
+ case Sema::TPC_ClassTemplateMember:
+ // C++0x [temp.param]p9:
+ // A default template-argument shall not be specified in the
+ // template-parameter-lists of the definition of a member of a
+ // class template that appears outside of the member's class.
+ S.Diag(ParamLoc, diag::err_template_parameter_default_template_member)
+ << DefArgRange;
+ return true;
+
+ case Sema::TPC_FriendFunctionTemplate:
+ // C++ [temp.param]p9:
+ // A default template-argument shall not be specified in a
+ // friend template declaration.
+ S.Diag(ParamLoc, diag::err_template_parameter_default_friend_template)
+ << DefArgRange;
+ return true;
+
+ // FIXME: C++0x [temp.param]p9 allows default template-arguments
+ // for friend function templates if there is only a single
+ // declaration (and it is a definition). Strange!
+ }
+
+ llvm_unreachable("Invalid TemplateParamListContext!");
+}
+
+/// \brief Check for unexpanded parameter packs within the template parameters
+/// of a template template parameter, recursively.
+static bool DiagnoseUnexpandedParameterPacks(Sema &S,
+ TemplateTemplateParmDecl *TTP) {
+ TemplateParameterList *Params = TTP->getTemplateParameters();
+ for (unsigned I = 0, N = Params->size(); I != N; ++I) {
+ NamedDecl *P = Params->getParam(I);
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(P)) {
+ if (S.DiagnoseUnexpandedParameterPack(NTTP->getLocation(),
+ NTTP->getTypeSourceInfo(),
+ Sema::UPPC_NonTypeTemplateParameterType))
+ return true;
+
+ continue;
+ }
+
+ if (TemplateTemplateParmDecl *InnerTTP
+ = dyn_cast<TemplateTemplateParmDecl>(P))
+ if (DiagnoseUnexpandedParameterPacks(S, InnerTTP))
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Checks the validity of a template parameter list, possibly
+/// considering the template parameter list from a previous
+/// declaration.
+///
+/// If an "old" template parameter list is provided, it must be
+/// equivalent (per TemplateParameterListsAreEqual) to the "new"
+/// template parameter list.
+///
+/// \param NewParams Template parameter list for a new template
+/// declaration. This template parameter list will be updated with any
+/// default arguments that are carried through from the previous
+/// template parameter list.
+///
+/// \param OldParams If provided, template parameter list from a
+/// previous declaration of the same template. Default template
+/// arguments will be merged from the old template parameter list to
+/// the new template parameter list.
+///
+/// \param TPC Describes the context in which we are checking the given
+/// template parameter list.
+///
+/// \returns true if an error occurred, false otherwise.
+bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
+ TemplateParameterList *OldParams,
+ TemplateParamListContext TPC) {
+ bool Invalid = false;
+
+ // C++ [temp.param]p10:
+ // The set of default template-arguments available for use with a
+ // template declaration or definition is obtained by merging the
+ // default arguments from the definition (if in scope) and all
+ // declarations in scope in the same way default function
+ // arguments are (8.3.6).
+ bool SawDefaultArgument = false;
+ SourceLocation PreviousDefaultArgLoc;
+
+ // Dummy initialization to avoid warnings.
+ TemplateParameterList::iterator OldParam = NewParams->end();
+ if (OldParams)
+ OldParam = OldParams->begin();
+
+ bool RemoveDefaultArguments = false;
+ for (TemplateParameterList::iterator NewParam = NewParams->begin(),
+ NewParamEnd = NewParams->end();
+ NewParam != NewParamEnd; ++NewParam) {
+ // Variables used to diagnose redundant default arguments
+ bool RedundantDefaultArg = false;
+ SourceLocation OldDefaultLoc;
+ SourceLocation NewDefaultLoc;
+
+ // Variable used to diagnose missing default arguments
+ bool MissingDefaultArg = false;
+
+ // Variable used to diagnose non-final parameter packs
+ bool SawParameterPack = false;
+
+ if (TemplateTypeParmDecl *NewTypeParm
+ = dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
+ // Check the presence of a default argument here.
+ if (NewTypeParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewTypeParm->getLocation(),
+ NewTypeParm->getDefaultArgumentInfo()->getTypeLoc()
+ .getSourceRange()))
+ NewTypeParm->removeDefaultArgument();
+
+ // Merge default arguments for template type parameters.
+ TemplateTypeParmDecl *OldTypeParm
+ = OldParams? cast<TemplateTypeParmDecl>(*OldParam) : 0;
+
+ if (NewTypeParm->isParameterPack()) {
+ assert(!NewTypeParm->hasDefaultArgument() &&
+ "Parameter packs can't have a default argument!");
+ SawParameterPack = true;
+ } else if (OldTypeParm && OldTypeParm->hasDefaultArgument() &&
+ NewTypeParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldTypeParm->getDefaultArgumentLoc();
+ NewDefaultLoc = NewTypeParm->getDefaultArgumentLoc();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldTypeParm && OldTypeParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ SawDefaultArgument = true;
+ NewTypeParm->setDefaultArgument(OldTypeParm->getDefaultArgumentInfo(),
+ true);
+ PreviousDefaultArgLoc = OldTypeParm->getDefaultArgumentLoc();
+ } else if (NewTypeParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc = NewTypeParm->getDefaultArgumentLoc();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ } else if (NonTypeTemplateParmDecl *NewNonTypeParm
+ = dyn_cast<NonTypeTemplateParmDecl>(*NewParam)) {
+ // Check for unexpanded parameter packs.
+ if (DiagnoseUnexpandedParameterPack(NewNonTypeParm->getLocation(),
+ NewNonTypeParm->getTypeSourceInfo(),
+ UPPC_NonTypeTemplateParameterType)) {
+ Invalid = true;
+ continue;
+ }
+
+ // Check the presence of a default argument here.
+ if (NewNonTypeParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewNonTypeParm->getLocation(),
+ NewNonTypeParm->getDefaultArgument()->getSourceRange())) {
+ NewNonTypeParm->removeDefaultArgument();
+ }
+
+ // Merge default arguments for non-type template parameters
+ NonTypeTemplateParmDecl *OldNonTypeParm
+ = OldParams? cast<NonTypeTemplateParmDecl>(*OldParam) : 0;
+ if (NewNonTypeParm->isParameterPack()) {
+ assert(!NewNonTypeParm->hasDefaultArgument() &&
+ "Parameter packs can't have a default argument!");
+ SawParameterPack = true;
+ } else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument() &&
+ NewNonTypeParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldNonTypeParm->getDefaultArgumentLoc();
+ NewDefaultLoc = NewNonTypeParm->getDefaultArgumentLoc();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldNonTypeParm && OldNonTypeParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ SawDefaultArgument = true;
+ // FIXME: We need to create a new kind of "default argument"
+ // expression that points to a previous non-type template
+ // parameter.
+ NewNonTypeParm->setDefaultArgument(
+ OldNonTypeParm->getDefaultArgument(),
+ /*Inherited=*/ true);
+ PreviousDefaultArgLoc = OldNonTypeParm->getDefaultArgumentLoc();
+ } else if (NewNonTypeParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc = NewNonTypeParm->getDefaultArgumentLoc();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ } else {
+ TemplateTemplateParmDecl *NewTemplateParm
+ = cast<TemplateTemplateParmDecl>(*NewParam);
+
+ // Check for unexpanded parameter packs, recursively.
+ if (::DiagnoseUnexpandedParameterPacks(*this, NewTemplateParm)) {
+ Invalid = true;
+ continue;
+ }
+
+ // Check the presence of a default argument here.
+ if (NewTemplateParm->hasDefaultArgument() &&
+ DiagnoseDefaultTemplateArgument(*this, TPC,
+ NewTemplateParm->getLocation(),
+ NewTemplateParm->getDefaultArgument().getSourceRange()))
+ NewTemplateParm->removeDefaultArgument();
+
+ // Merge default arguments for template template parameters
+ TemplateTemplateParmDecl *OldTemplateParm
+ = OldParams? cast<TemplateTemplateParmDecl>(*OldParam) : 0;
+ if (NewTemplateParm->isParameterPack()) {
+ assert(!NewTemplateParm->hasDefaultArgument() &&
+ "Parameter packs can't have a default argument!");
+ SawParameterPack = true;
+ } else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument() &&
+ NewTemplateParm->hasDefaultArgument()) {
+ OldDefaultLoc = OldTemplateParm->getDefaultArgument().getLocation();
+ NewDefaultLoc = NewTemplateParm->getDefaultArgument().getLocation();
+ SawDefaultArgument = true;
+ RedundantDefaultArg = true;
+ PreviousDefaultArgLoc = NewDefaultLoc;
+ } else if (OldTemplateParm && OldTemplateParm->hasDefaultArgument()) {
+ // Merge the default argument from the old declaration to the
+ // new declaration.
+ SawDefaultArgument = true;
+ // FIXME: We need to create a new kind of "default argument" expression
+ // that points to a previous template template parameter.
+ NewTemplateParm->setDefaultArgument(
+ OldTemplateParm->getDefaultArgument(),
+ /*Inherited=*/ true);
+ PreviousDefaultArgLoc
+ = OldTemplateParm->getDefaultArgument().getLocation();
+ } else if (NewTemplateParm->hasDefaultArgument()) {
+ SawDefaultArgument = true;
+ PreviousDefaultArgLoc
+ = NewTemplateParm->getDefaultArgument().getLocation();
+ } else if (SawDefaultArgument)
+ MissingDefaultArg = true;
+ }
+
+ // C++0x [temp.param]p11:
+ // If a template parameter of a primary class template or alias template
+ // is a template parameter pack, it shall be the last template parameter.
+ if (SawParameterPack && (NewParam + 1) != NewParamEnd &&
+ (TPC == TPC_ClassTemplate || TPC == TPC_TypeAliasTemplate)) {
+ Diag((*NewParam)->getLocation(),
+ diag::err_template_param_pack_must_be_last_template_parameter);
+ Invalid = true;
+ }
+
+ if (RedundantDefaultArg) {
+ // C++ [temp.param]p12:
+ // A template-parameter shall not be given default arguments
+ // by two different declarations in the same scope.
+ Diag(NewDefaultLoc, diag::err_template_param_default_arg_redefinition);
+ Diag(OldDefaultLoc, diag::note_template_param_prev_default_arg);
+ Invalid = true;
+ } else if (MissingDefaultArg && TPC != TPC_FunctionTemplate) {
+ // C++ [temp.param]p11:
+ // If a template-parameter of a class template has a default
+ // template-argument, each subsequent template-parameter shall either
+ // have a default template-argument supplied or be a template parameter
+ // pack.
+ Diag((*NewParam)->getLocation(),
+ diag::err_template_param_default_arg_missing);
+ Diag(PreviousDefaultArgLoc, diag::note_template_param_prev_default_arg);
+ Invalid = true;
+ RemoveDefaultArguments = true;
+ }
+
+ // If we have an old template parameter list that we're merging
+ // in, move on to the next parameter.
+ if (OldParams)
+ ++OldParam;
+ }
+
+ // We were missing some default arguments at the end of the list, so remove
+ // all of the default arguments.
+ if (RemoveDefaultArguments) {
+ for (TemplateParameterList::iterator NewParam = NewParams->begin(),
+ NewParamEnd = NewParams->end();
+ NewParam != NewParamEnd; ++NewParam) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*NewParam))
+ TTP->removeDefaultArgument();
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*NewParam))
+ NTTP->removeDefaultArgument();
+ else
+ cast<TemplateTemplateParmDecl>(*NewParam)->removeDefaultArgument();
+ }
+ }
+
+ return Invalid;
+}
+
+namespace {
+
+/// A class which looks for a use of a certain level of template
+/// parameter.
+struct DependencyChecker : RecursiveASTVisitor<DependencyChecker> {
+ typedef RecursiveASTVisitor<DependencyChecker> super;
+
+ unsigned Depth;
+ bool Match;
+
+ DependencyChecker(TemplateParameterList *Params) : Match(false) {
+ NamedDecl *ND = Params->getParam(0);
+ if (TemplateTypeParmDecl *PD = dyn_cast<TemplateTypeParmDecl>(ND)) {
+ Depth = PD->getDepth();
+ } else if (NonTypeTemplateParmDecl *PD =
+ dyn_cast<NonTypeTemplateParmDecl>(ND)) {
+ Depth = PD->getDepth();
+ } else {
+ Depth = cast<TemplateTemplateParmDecl>(ND)->getDepth();
+ }
+ }
+
+ bool Matches(unsigned ParmDepth) {
+ if (ParmDepth >= Depth) {
+ Match = true;
+ return true;
+ }
+ return false;
+ }
+
+ bool VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ return !Matches(T->getDepth());
+ }
+
+ bool TraverseTemplateName(TemplateName N) {
+ if (TemplateTemplateParmDecl *PD =
+ dyn_cast_or_null<TemplateTemplateParmDecl>(N.getAsTemplateDecl()))
+ if (Matches(PD->getDepth())) return false;
+ return super::TraverseTemplateName(N);
+ }
+
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (NonTypeTemplateParmDecl *PD =
+ dyn_cast<NonTypeTemplateParmDecl>(E->getDecl())) {
+ if (PD->getDepth() == Depth) {
+ Match = true;
+ return false;
+ }
+ }
+ return super::VisitDeclRefExpr(E);
+ }
+
+ bool TraverseInjectedClassNameType(const InjectedClassNameType *T) {
+ return TraverseType(T->getInjectedSpecializationType());
+ }
+};
+}
+
+/// Determines whether a given type depends on the given parameter
+/// list.
+static bool
+DependsOnTemplateParameters(QualType T, TemplateParameterList *Params) {
+ DependencyChecker Checker(Params);
+ Checker.TraverseType(T);
+ return Checker.Match;
+}
+
+// Find the source range corresponding to the named type in the given
+// nested-name-specifier, if any.
+static SourceRange getRangeOfTypeInNestedNameSpecifier(ASTContext &Context,
+ QualType T,
+ const CXXScopeSpec &SS) {
+ NestedNameSpecifierLoc NNSLoc(SS.getScopeRep(), SS.location_data());
+ while (NestedNameSpecifier *NNS = NNSLoc.getNestedNameSpecifier()) {
+ if (const Type *CurType = NNS->getAsType()) {
+ if (Context.hasSameUnqualifiedType(T, QualType(CurType, 0)))
+ return NNSLoc.getTypeLoc().getSourceRange();
+ } else
+ break;
+
+ NNSLoc = NNSLoc.getPrefix();
+ }
+
+ return SourceRange();
+}
+
+/// \brief Match the given template parameter lists to the given scope
+/// specifier, returning the template parameter list that applies to the
+/// name.
+///
+/// \param DeclStartLoc the start of the declaration that has a scope
+/// specifier or a template parameter list.
+///
+/// \param DeclLoc The location of the declaration itself.
+///
+/// \param SS the scope specifier that will be matched to the given template
+/// parameter lists. This scope specifier precedes a qualified name that is
+/// being declared.
+///
+/// \param ParamLists the template parameter lists, from the outermost to the
+/// innermost template parameter lists.
+///
+/// \param NumParamLists the number of template parameter lists in ParamLists.
+///
+/// \param IsFriend Whether to apply the slightly different rules for
+/// matching template parameters to scope specifiers in friend
+/// declarations.
+///
+/// \param IsExplicitSpecialization will be set true if the entity being
+/// declared is an explicit specialization, false otherwise.
+///
+/// \returns the template parameter list, if any, that corresponds to the
+/// name that is preceded by the scope specifier @p SS. This template
+/// parameter list may have template parameters (if we're declaring a
+/// template) or may have no template parameters (if we're declaring a
+/// template specialization), or may be NULL (if what we're declaring isn't
+/// itself a template).
+TemplateParameterList *
+Sema::MatchTemplateParametersToScopeSpecifier(SourceLocation DeclStartLoc,
+ SourceLocation DeclLoc,
+ const CXXScopeSpec &SS,
+ TemplateParameterList **ParamLists,
+ unsigned NumParamLists,
+ bool IsFriend,
+ bool &IsExplicitSpecialization,
+ bool &Invalid) {
+ IsExplicitSpecialization = false;
+ Invalid = false;
+
+ // The sequence of nested types to which we will match up the template
+ // parameter lists. We first build this list by starting with the type named
+ // by the nested-name-specifier and walking out until we run out of types.
+ SmallVector<QualType, 4> NestedTypes;
+ QualType T;
+ if (SS.getScopeRep()) {
+ if (CXXRecordDecl *Record
+ = dyn_cast_or_null<CXXRecordDecl>(computeDeclContext(SS, true)))
+ T = Context.getTypeDeclType(Record);
+ else
+ T = QualType(SS.getScopeRep()->getAsType(), 0);
+ }
+
+ // If we found an explicit specialization that prevents us from needing
+ // 'template<>' headers, this will be set to the location of that
+ // explicit specialization.
+ SourceLocation ExplicitSpecLoc;
+
+ while (!T.isNull()) {
+ NestedTypes.push_back(T);
+
+ // Retrieve the parent of a record type.
+ if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
+ // If this type is an explicit specialization, we're done.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
+ if (!isa<ClassTemplatePartialSpecializationDecl>(Spec) &&
+ Spec->getSpecializationKind() == TSK_ExplicitSpecialization) {
+ ExplicitSpecLoc = Spec->getLocation();
+ break;
+ }
+ } else if (Record->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization) {
+ ExplicitSpecLoc = Record->getLocation();
+ break;
+ }
+
+ if (TypeDecl *Parent = dyn_cast<TypeDecl>(Record->getParent()))
+ T = Context.getTypeDeclType(Parent);
+ else
+ T = QualType();
+ continue;
+ }
+
+ if (const TemplateSpecializationType *TST
+ = T->getAs<TemplateSpecializationType>()) {
+ if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) {
+ if (TypeDecl *Parent = dyn_cast<TypeDecl>(Template->getDeclContext()))
+ T = Context.getTypeDeclType(Parent);
+ else
+ T = QualType();
+ continue;
+ }
+ }
+
+ // Look one step prior in a dependent template specialization type.
+ if (const DependentTemplateSpecializationType *DependentTST
+ = T->getAs<DependentTemplateSpecializationType>()) {
+ if (NestedNameSpecifier *NNS = DependentTST->getQualifier())
+ T = QualType(NNS->getAsType(), 0);
+ else
+ T = QualType();
+ continue;
+ }
+
+ // Look one step prior in a dependent name type.
+ if (const DependentNameType *DependentName = T->getAs<DependentNameType>()){
+ if (NestedNameSpecifier *NNS = DependentName->getQualifier())
+ T = QualType(NNS->getAsType(), 0);
+ else
+ T = QualType();
+ continue;
+ }
+
+ // Retrieve the parent of an enumeration type.
+ if (const EnumType *EnumT = T->getAs<EnumType>()) {
+ // FIXME: Forward-declared enums require a TSK_ExplicitSpecialization
+ // check here.
+ EnumDecl *Enum = EnumT->getDecl();
+
+ // Get to the parent type.
+ if (TypeDecl *Parent = dyn_cast<TypeDecl>(Enum->getParent()))
+ T = Context.getTypeDeclType(Parent);
+ else
+ T = QualType();
+ continue;
+ }
+
+ T = QualType();
+ }
+ // Reverse the nested types list, since we want to traverse from the outermost
+ // to the innermost while checking template-parameter-lists.
+ std::reverse(NestedTypes.begin(), NestedTypes.end());
+
+ // C++0x [temp.expl.spec]p17:
+ // A member or a member template may be nested within many
+ // enclosing class templates. In an explicit specialization for
+ // such a member, the member declaration shall be preceded by a
+ // template<> for each enclosing class template that is
+ // explicitly specialized.
+ bool SawNonEmptyTemplateParameterList = false;
+ unsigned ParamIdx = 0;
+ for (unsigned TypeIdx = 0, NumTypes = NestedTypes.size(); TypeIdx != NumTypes;
+ ++TypeIdx) {
+ T = NestedTypes[TypeIdx];
+
+ // Whether we expect a 'template<>' header.
+ bool NeedEmptyTemplateHeader = false;
+
+ // Whether we expect a template header with parameters.
+ bool NeedNonemptyTemplateHeader = false;
+
+ // For a dependent type, the set of template parameters that we
+ // expect to see.
+ TemplateParameterList *ExpectedTemplateParams = 0;
+
+ // C++0x [temp.expl.spec]p15:
+ // A member or a member template may be nested within many enclosing
+ // class templates. In an explicit specialization for such a member, the
+ // member declaration shall be preceded by a template<> for each
+ // enclosing class template that is explicitly specialized.
+ if (CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
+ if (ClassTemplatePartialSpecializationDecl *Partial
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record)) {
+ ExpectedTemplateParams = Partial->getTemplateParameters();
+ NeedNonemptyTemplateHeader = true;
+ } else if (Record->isDependentType()) {
+ if (Record->getDescribedClassTemplate()) {
+ ExpectedTemplateParams = Record->getDescribedClassTemplate()
+ ->getTemplateParameters();
+ NeedNonemptyTemplateHeader = true;
+ }
+ } else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record)) {
+ // C++0x [temp.expl.spec]p4:
+ // Members of an explicitly specialized class template are defined
+ // in the same manner as members of normal classes, and not using
+ // the template<> syntax.
+ if (Spec->getSpecializationKind() != TSK_ExplicitSpecialization)
+ NeedEmptyTemplateHeader = true;
+ else
+ continue;
+ } else if (Record->getTemplateSpecializationKind()) {
+ if (Record->getTemplateSpecializationKind()
+ != TSK_ExplicitSpecialization &&
+ TypeIdx == NumTypes - 1)
+ IsExplicitSpecialization = true;
+
+ continue;
+ }
+ } else if (const TemplateSpecializationType *TST
+ = T->getAs<TemplateSpecializationType>()) {
+ if (TemplateDecl *Template = TST->getTemplateName().getAsTemplateDecl()) {
+ ExpectedTemplateParams = Template->getTemplateParameters();
+ NeedNonemptyTemplateHeader = true;
+ }
+ } else if (T->getAs<DependentTemplateSpecializationType>()) {
+ // FIXME: We actually could/should check the template arguments here
+ // against the corresponding template parameter list.
+ NeedNonemptyTemplateHeader = false;
+ }
+
+ // C++ [temp.expl.spec]p16:
+ // In an explicit specialization declaration for a member of a class
+ // template or a member template that ap- pears in namespace scope, the
+ // member template and some of its enclosing class templates may remain
+ // unspecialized, except that the declaration shall not explicitly
+ // specialize a class member template if its en- closing class templates
+ // are not explicitly specialized as well.
+ if (ParamIdx < NumParamLists) {
+ if (ParamLists[ParamIdx]->size() == 0) {
+ if (SawNonEmptyTemplateParameterList) {
+ Diag(DeclLoc, diag::err_specialize_member_of_template)
+ << ParamLists[ParamIdx]->getSourceRange();
+ Invalid = true;
+ IsExplicitSpecialization = false;
+ return 0;
+ }
+ } else
+ SawNonEmptyTemplateParameterList = true;
+ }
+
+ if (NeedEmptyTemplateHeader) {
+ // If we're on the last of the types, and we need a 'template<>' header
+ // here, then it's an explicit specialization.
+ if (TypeIdx == NumTypes - 1)
+ IsExplicitSpecialization = true;
+
+ if (ParamIdx < NumParamLists) {
+ if (ParamLists[ParamIdx]->size() > 0) {
+ // The header has template parameters when it shouldn't. Complain.
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
+ diag::err_template_param_list_matches_nontemplate)
+ << T
+ << SourceRange(ParamLists[ParamIdx]->getLAngleLoc(),
+ ParamLists[ParamIdx]->getRAngleLoc())
+ << getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
+ Invalid = true;
+ return 0;
+ }
+
+ // Consume this template header.
+ ++ParamIdx;
+ continue;
+ }
+
+ if (!IsFriend) {
+ // We don't have a template header, but we should.
+ SourceLocation ExpectedTemplateLoc;
+ if (NumParamLists > 0)
+ ExpectedTemplateLoc = ParamLists[0]->getTemplateLoc();
+ else
+ ExpectedTemplateLoc = DeclStartLoc;
+
+ Diag(DeclLoc, diag::err_template_spec_needs_header)
+ << getRangeOfTypeInNestedNameSpecifier(Context, T, SS)
+ << FixItHint::CreateInsertion(ExpectedTemplateLoc, "template<> ");
+ }
+
+ continue;
+ }
+
+ if (NeedNonemptyTemplateHeader) {
+ // In friend declarations we can have template-ids which don't
+ // depend on the corresponding template parameter lists. But
+ // assume that empty parameter lists are supposed to match this
+ // template-id.
+ if (IsFriend && T->isDependentType()) {
+ if (ParamIdx < NumParamLists &&
+ DependsOnTemplateParameters(T, ParamLists[ParamIdx]))
+ ExpectedTemplateParams = 0;
+ else
+ continue;
+ }
+
+ if (ParamIdx < NumParamLists) {
+ // Check the template parameter list, if we can.
+ if (ExpectedTemplateParams &&
+ !TemplateParameterListsAreEqual(ParamLists[ParamIdx],
+ ExpectedTemplateParams,
+ true, TPL_TemplateMatch))
+ Invalid = true;
+
+ if (!Invalid &&
+ CheckTemplateParameterList(ParamLists[ParamIdx], 0,
+ TPC_ClassTemplateMember))
+ Invalid = true;
+
+ ++ParamIdx;
+ continue;
+ }
+
+ Diag(DeclLoc, diag::err_template_spec_needs_template_parameters)
+ << T
+ << getRangeOfTypeInNestedNameSpecifier(Context, T, SS);
+ Invalid = true;
+ continue;
+ }
+ }
+
+ // If there were at least as many template-ids as there were template
+ // parameter lists, then there are no template parameter lists remaining for
+ // the declaration itself.
+ if (ParamIdx >= NumParamLists)
+ return 0;
+
+ // If there were too many template parameter lists, complain about that now.
+ if (ParamIdx < NumParamLists - 1) {
+ bool HasAnyExplicitSpecHeader = false;
+ bool AllExplicitSpecHeaders = true;
+ for (unsigned I = ParamIdx; I != NumParamLists - 1; ++I) {
+ if (ParamLists[I]->size() == 0)
+ HasAnyExplicitSpecHeader = true;
+ else
+ AllExplicitSpecHeaders = false;
+ }
+
+ Diag(ParamLists[ParamIdx]->getTemplateLoc(),
+ AllExplicitSpecHeaders? diag::warn_template_spec_extra_headers
+ : diag::err_template_spec_extra_headers)
+ << SourceRange(ParamLists[ParamIdx]->getTemplateLoc(),
+ ParamLists[NumParamLists - 2]->getRAngleLoc());
+
+ // If there was a specialization somewhere, such that 'template<>' is
+ // not required, and there were any 'template<>' headers, note where the
+ // specialization occurred.
+ if (ExplicitSpecLoc.isValid() && HasAnyExplicitSpecHeader)
+ Diag(ExplicitSpecLoc,
+ diag::note_explicit_template_spec_does_not_need_header)
+ << NestedTypes.back();
+
+ // We have a template parameter list with no corresponding scope, which
+ // means that the resulting template declaration can't be instantiated
+ // properly (we'll end up with dependent nodes when we shouldn't).
+ if (!AllExplicitSpecHeaders)
+ Invalid = true;
+ }
+
+ // C++ [temp.expl.spec]p16:
+ // In an explicit specialization declaration for a member of a class
+ // template or a member template that ap- pears in namespace scope, the
+ // member template and some of its enclosing class templates may remain
+ // unspecialized, except that the declaration shall not explicitly
+ // specialize a class member template if its en- closing class templates
+ // are not explicitly specialized as well.
+ if (ParamLists[NumParamLists - 1]->size() == 0 &&
+ SawNonEmptyTemplateParameterList) {
+ Diag(DeclLoc, diag::err_specialize_member_of_template)
+ << ParamLists[ParamIdx]->getSourceRange();
+ Invalid = true;
+ IsExplicitSpecialization = false;
+ return 0;
+ }
+
+ // Return the last template parameter list, which corresponds to the
+ // entity being declared.
+ return ParamLists[NumParamLists - 1];
+}
+
+void Sema::NoteAllFoundTemplates(TemplateName Name) {
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ Diag(Template->getLocation(), diag::note_template_declared_here)
+ << (isa<FunctionTemplateDecl>(Template)? 0
+ : isa<ClassTemplateDecl>(Template)? 1
+ : isa<TypeAliasTemplateDecl>(Template)? 2
+ : 3)
+ << Template->getDeclName();
+ return;
+ }
+
+ if (OverloadedTemplateStorage *OST = Name.getAsOverloadedTemplate()) {
+ for (OverloadedTemplateStorage::iterator I = OST->begin(),
+ IEnd = OST->end();
+ I != IEnd; ++I)
+ Diag((*I)->getLocation(), diag::note_template_declared_here)
+ << 0 << (*I)->getDeclName();
+
+ return;
+ }
+}
+
+QualType Sema::CheckTemplateIdType(TemplateName Name,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ DependentTemplateName *DTN
+ = Name.getUnderlying().getAsDependentTemplateName();
+ if (DTN && DTN->isIdentifier())
+ // When building a template-id where the template-name is dependent,
+ // assume the template is a type template. Either our assumption is
+ // correct, or the code is ill-formed and will be diagnosed when the
+ // dependent name is substituted.
+ return Context.getDependentTemplateSpecializationType(ETK_None,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+
+ TemplateDecl *Template = Name.getAsTemplateDecl();
+ if (!Template || isa<FunctionTemplateDecl>(Template)) {
+ // We might have a substituted template template parameter pack. If so,
+ // build a template specialization type for it.
+ if (Name.getAsSubstTemplateTemplateParmPack())
+ return Context.getTemplateSpecializationType(Name, TemplateArgs);
+
+ Diag(TemplateLoc, diag::err_template_id_not_a_type)
+ << Name;
+ NoteAllFoundTemplates(Name);
+ return QualType();
+ }
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ SmallVector<TemplateArgument, 4> Converted;
+ bool ExpansionIntoFixedList = false;
+ if (CheckTemplateArgumentList(Template, TemplateLoc, TemplateArgs,
+ false, Converted, &ExpansionIntoFixedList))
+ return QualType();
+
+ QualType CanonType;
+
+ bool InstantiationDependent = false;
+ TypeAliasTemplateDecl *AliasTemplate = 0;
+ if (!ExpansionIntoFixedList &&
+ (AliasTemplate = dyn_cast<TypeAliasTemplateDecl>(Template))) {
+ // Find the canonical type for this type alias template specialization.
+ TypeAliasDecl *Pattern = AliasTemplate->getTemplatedDecl();
+ if (Pattern->isInvalidDecl())
+ return QualType();
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
+ // Only substitute for the innermost template argument list.
+ MultiLevelTemplateArgumentList TemplateArgLists;
+ TemplateArgLists.addOuterTemplateArguments(&TemplateArgs);
+ unsigned Depth = AliasTemplate->getTemplateParameters()->getDepth();
+ for (unsigned I = 0; I < Depth; ++I)
+ TemplateArgLists.addOuterTemplateArguments(0, 0);
+
+ InstantiatingTemplate Inst(*this, TemplateLoc, Template);
+ CanonType = SubstType(Pattern->getUnderlyingType(),
+ TemplateArgLists, AliasTemplate->getLocation(),
+ AliasTemplate->getDeclName());
+ if (CanonType.isNull())
+ return QualType();
+ } else if (Name.isDependent() ||
+ TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs, InstantiationDependent)) {
+ // This class template specialization is a dependent
+ // type. Therefore, its canonical type is another class template
+ // specialization type that contains all of the converted
+ // arguments in canonical form. This ensures that, e.g., A<T> and
+ // A<T, T> have identical types when A is declared as:
+ //
+ // template<typename T, typename U = T> struct A;
+ TemplateName CanonName = Context.getCanonicalTemplateName(Name);
+ CanonType = Context.getTemplateSpecializationType(CanonName,
+ Converted.data(),
+ Converted.size());
+
+ // FIXME: CanonType is not actually the canonical type, and unfortunately
+ // it is a TemplateSpecializationType that we will never use again.
+ // In the future, we need to teach getTemplateSpecializationType to only
+ // build the canonical type and return that to us.
+ CanonType = Context.getCanonicalType(CanonType);
+
+ // This might work out to be a current instantiation, in which
+ // case the canonical type needs to be the InjectedClassNameType.
+ //
+ // TODO: in theory this could be a simple hashtable lookup; most
+ // changes to CurContext don't change the set of current
+ // instantiations.
+ if (isa<ClassTemplateDecl>(Template)) {
+ for (DeclContext *Ctx = CurContext; Ctx; Ctx = Ctx->getLookupParent()) {
+ // If we get out to a namespace, we're done.
+ if (Ctx->isFileContext()) break;
+
+ // If this isn't a record, keep looking.
+ CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Ctx);
+ if (!Record) continue;
+
+ // Look for one of the two cases with InjectedClassNameTypes
+ // and check whether it's the same template.
+ if (!isa<ClassTemplatePartialSpecializationDecl>(Record) &&
+ !Record->getDescribedClassTemplate())
+ continue;
+
+ // Fetch the injected class name type and check whether its
+ // injected type is equal to the type we just built.
+ QualType ICNT = Context.getTypeDeclType(Record);
+ QualType Injected = cast<InjectedClassNameType>(ICNT)
+ ->getInjectedSpecializationType();
+
+ if (CanonType != Injected->getCanonicalTypeInternal())
+ continue;
+
+ // If so, the canonical type of this TST is the injected
+ // class name type of the record we just found.
+ assert(ICNT.isCanonical());
+ CanonType = ICNT;
+ break;
+ }
+ }
+ } else if (ClassTemplateDecl *ClassTemplate
+ = dyn_cast<ClassTemplateDecl>(Template)) {
+ // Find the class template specialization declaration that
+ // corresponds to these arguments.
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *Decl
+ = ClassTemplate->findSpecialization(Converted.data(), Converted.size(),
+ InsertPos);
+ if (!Decl) {
+ // This is the first time we have referenced this class template
+ // specialization. Create the canonical declaration and add it to
+ // the set of specializations.
+ Decl = ClassTemplateSpecializationDecl::Create(Context,
+ ClassTemplate->getTemplatedDecl()->getTagKind(),
+ ClassTemplate->getDeclContext(),
+ ClassTemplate->getTemplatedDecl()->getLocStart(),
+ ClassTemplate->getLocation(),
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(), 0);
+ ClassTemplate->AddSpecialization(Decl, InsertPos);
+ Decl->setLexicalDeclContext(CurContext);
+ }
+
+ CanonType = Context.getTypeDeclType(Decl);
+ assert(isa<RecordType>(CanonType) &&
+ "type of non-dependent specialization is not a RecordType");
+ }
+
+ // Build the fully-sugared type for this class template
+ // specialization, which refers back to the class template
+ // specialization we created or found.
+ return Context.getTemplateSpecializationType(Name, TemplateArgs, CanonType);
+}
+
+TypeResult
+Sema::ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ TemplateTy TemplateD, SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc,
+ bool IsCtorOrDtorName) {
+ if (SS.isInvalid())
+ return true;
+
+ TemplateName Template = TemplateD.getAsVal<TemplateName>();
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
+ QualType T
+ = Context.getDependentTemplateSpecializationType(ETK_None,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+ // Build type-source information.
+ TypeLocBuilder TLB;
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(SourceLocation());
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
+
+ QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
+ TemplateArgsIn.release();
+
+ if (Result.isNull())
+ return true;
+
+ // Build type-source information.
+ TypeLocBuilder TLB;
+ TemplateSpecializationTypeLoc SpecTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
+ SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
+
+ // NOTE: avoid constructing an ElaboratedTypeLoc if this is a
+ // constructor or destructor name (in such a case, the scope specifier
+ // will be attached to the enclosing Decl or Expr node).
+ if (SS.isNotEmpty() && !IsCtorOrDtorName) {
+ // Create an elaborated-type-specifier containing the nested-name-specifier.
+ Result = Context.getElaboratedType(ETK_None, SS.getScopeRep(), Result);
+ ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
+ ElabTL.setElaboratedKeywordLoc(SourceLocation());
+ ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ }
+
+ return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
+}
+
+TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
+ TypeSpecifierType TagSpec,
+ SourceLocation TagLoc,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ TemplateTy TemplateD,
+ SourceLocation TemplateLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc) {
+ TemplateName Template = TemplateD.getAsVal<TemplateName>();
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ // Determine the tag kind
+ TagTypeKind TagKind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ ElaboratedTypeKeyword Keyword
+ = TypeWithKeyword::getKeywordForTagTypeKind(TagKind);
+
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
+ QualType T = Context.getDependentTemplateSpecializationType(Keyword,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+
+ // Build type-source information.
+ TypeLocBuilder TLB;
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(TagLoc);
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = SpecTL.getNumArgs(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+ return CreateParsedType(T, TLB.getTypeSourceInfo(Context, T));
+ }
+
+ if (TypeAliasTemplateDecl *TAT =
+ dyn_cast_or_null<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) {
+ // C++0x [dcl.type.elab]p2:
+ // If the identifier resolves to a typedef-name or the simple-template-id
+ // resolves to an alias template specialization, the
+ // elaborated-type-specifier is ill-formed.
+ Diag(TemplateLoc, diag::err_tag_reference_non_tag) << 4;
+ Diag(TAT->getLocation(), diag::note_declared_at);
+ }
+
+ QualType Result = CheckTemplateIdType(Template, TemplateLoc, TemplateArgs);
+ if (Result.isNull())
+ return TypeResult(true);
+
+ // Check the tag kind
+ if (const RecordType *RT = Result->getAs<RecordType>()) {
+ RecordDecl *D = RT->getDecl();
+
+ IdentifierInfo *Id = D->getIdentifier();
+ assert(Id && "templated class must have an identifier");
+
+ if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TUK_Definition,
+ TagLoc, *Id)) {
+ Diag(TagLoc, diag::err_use_with_wrong_tag)
+ << Result
+ << FixItHint::CreateReplacement(SourceRange(TagLoc), D->getKindName());
+ Diag(D->getLocation(), diag::note_previous_use);
+ }
+ }
+
+ // Provide source-location information for the template specialization.
+ TypeLocBuilder TLB;
+ TemplateSpecializationTypeLoc SpecTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned i = 0, e = SpecTL.getNumArgs(); i != e; ++i)
+ SpecTL.setArgLocInfo(i, TemplateArgs[i].getLocInfo());
+
+ // Construct an elaborated type containing the nested-name-specifier (if any)
+ // and tag keyword.
+ Result = Context.getElaboratedType(Keyword, SS.getScopeRep(), Result);
+ ElaboratedTypeLoc ElabTL = TLB.push<ElaboratedTypeLoc>(Result);
+ ElabTL.setElaboratedKeywordLoc(TagLoc);
+ ElabTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
+}
+
+ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ bool RequiresADL,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ // FIXME: Can we do any checking at this point? I guess we could check the
+ // template arguments that we have against the template name, if the template
+ // name refers to a single template. That's not a terribly common case,
+ // though.
+ // foo<int> could identify a single function unambiguously
+ // This approach does NOT work, since f<int>(1);
+ // gets resolved prior to resorting to overload resolution
+ // i.e., template<class T> void f(double);
+ // vs template<class T, class U> void f(U);
+
+ // These should be filtered out by our callers.
+ assert(!R.empty() && "empty lookup results when building templateid");
+ assert(!R.isAmbiguous() && "ambiguous lookup when building templateid");
+
+ // We don't want lookup warnings at this point.
+ R.suppressDiagnostics();
+
+ UnresolvedLookupExpr *ULE
+ = UnresolvedLookupExpr::Create(Context, R.getNamingClass(),
+ SS.getWithLocInContext(Context),
+ TemplateKWLoc,
+ R.getLookupNameInfo(),
+ RequiresADL, TemplateArgs,
+ R.begin(), R.end());
+
+ return Owned(ULE);
+}
+
+// We actually only call this from template instantiation.
+ExprResult
+Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ assert(TemplateArgs || TemplateKWLoc.isValid());
+ DeclContext *DC;
+ if (!(DC = computeDeclContext(SS, false)) ||
+ DC->isDependentContext() ||
+ RequireCompleteDeclContext(SS, DC))
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
+
+ bool MemberOfUnknownSpecialization;
+ LookupResult R(*this, NameInfo, LookupOrdinaryName);
+ LookupTemplateName(R, (Scope*) 0, SS, QualType(), /*Entering*/ false,
+ MemberOfUnknownSpecialization);
+
+ if (R.isAmbiguous())
+ return ExprError();
+
+ if (R.empty()) {
+ Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_non_template)
+ << NameInfo.getName() << SS.getRange();
+ return ExprError();
+ }
+
+ if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>()) {
+ Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_class_template)
+ << (NestedNameSpecifier*) SS.getScopeRep()
+ << NameInfo.getName() << SS.getRange();
+ Diag(Temp->getLocation(), diag::note_referenced_class_template);
+ return ExprError();
+ }
+
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
+}
+
+/// \brief Form a dependent template name.
+///
+/// This action forms a dependent template name given the template
+/// name and its (presumably dependent) scope specifier. For
+/// example, given "MetaFun::template apply", the scope specifier \p
+/// SS will be "MetaFun::", \p TemplateKWLoc contains the location
+/// of the "template" keyword, and "apply" is the \p Name.
+TemplateNameKind Sema::ActOnDependentTemplateName(Scope *S,
+ CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ UnqualifiedId &Name,
+ ParsedType ObjectType,
+ bool EnteringContext,
+ TemplateTy &Result) {
+ if (TemplateKWLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TemplateKWLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_outside_of_template :
+ diag::ext_template_outside_of_template)
+ << FixItHint::CreateRemoval(TemplateKWLoc);
+
+ DeclContext *LookupCtx = 0;
+ if (SS.isSet())
+ LookupCtx = computeDeclContext(SS, EnteringContext);
+ if (!LookupCtx && ObjectType)
+ LookupCtx = computeDeclContext(ObjectType.get());
+ if (LookupCtx) {
+ // C++0x [temp.names]p5:
+ // If a name prefixed by the keyword template is not the name of
+ // a template, the program is ill-formed. [Note: the keyword
+ // template may not be applied to non-template members of class
+ // templates. -end note ] [ Note: as is the case with the
+ // typename prefix, the template prefix is allowed in cases
+ // where it is not strictly necessary; i.e., when the
+ // nested-name-specifier or the expression on the left of the ->
+ // or . is not dependent on a template-parameter, or the use
+ // does not appear in the scope of a template. -end note]
+ //
+ // Note: C++03 was more strict here, because it banned the use of
+ // the "template" keyword prior to a template-name that was not a
+ // dependent name. C++ DR468 relaxed this requirement (the
+ // "template" keyword is now permitted). We follow the C++0x
+ // rules, even in C++03 mode with a warning, retroactively applying the DR.
+ bool MemberOfUnknownSpecialization;
+ TemplateNameKind TNK = isTemplateName(0, SS, TemplateKWLoc.isValid(), Name,
+ ObjectType, EnteringContext, Result,
+ MemberOfUnknownSpecialization);
+ if (TNK == TNK_Non_template && LookupCtx->isDependentContext() &&
+ isa<CXXRecordDecl>(LookupCtx) &&
+ (!cast<CXXRecordDecl>(LookupCtx)->hasDefinition() ||
+ cast<CXXRecordDecl>(LookupCtx)->hasAnyDependentBases())) {
+ // This is a dependent template. Handle it below.
+ } else if (TNK == TNK_Non_template) {
+ Diag(Name.getLocStart(),
+ diag::err_template_kw_refers_to_non_template)
+ << GetNameFromUnqualifiedId(Name).getName()
+ << Name.getSourceRange()
+ << TemplateKWLoc;
+ return TNK_Non_template;
+ } else {
+ // We found something; return it.
+ return TNK;
+ }
+ }
+
+ NestedNameSpecifier *Qualifier
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+
+ switch (Name.getKind()) {
+ case UnqualifiedId::IK_Identifier:
+ Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
+ Name.Identifier));
+ return TNK_Dependent_template_name;
+
+ case UnqualifiedId::IK_OperatorFunctionId:
+ Result = TemplateTy::make(Context.getDependentTemplateName(Qualifier,
+ Name.OperatorFunctionId.Operator));
+ return TNK_Dependent_template_name;
+
+ case UnqualifiedId::IK_LiteralOperatorId:
+ llvm_unreachable(
+ "We don't support these; Parse shouldn't have allowed propagation");
+
+ default:
+ break;
+ }
+
+ Diag(Name.getLocStart(),
+ diag::err_template_kw_refers_to_non_template)
+ << GetNameFromUnqualifiedId(Name).getName()
+ << Name.getSourceRange()
+ << TemplateKWLoc;
+ return TNK_Non_template;
+}
+
+bool Sema::CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
+ const TemplateArgumentLoc &AL,
+ SmallVectorImpl<TemplateArgument> &Converted) {
+ const TemplateArgument &Arg = AL.getArgument();
+
+ // Check template type parameter.
+ switch(Arg.getKind()) {
+ case TemplateArgument::Type:
+ // C++ [temp.arg.type]p1:
+ // A template-argument for a template-parameter which is a
+ // type shall be a type-id.
+ break;
+ case TemplateArgument::Template: {
+ // We have a template type parameter but the template argument
+ // is a template without any arguments.
+ SourceRange SR = AL.getSourceRange();
+ TemplateName Name = Arg.getAsTemplate();
+ Diag(SR.getBegin(), diag::err_template_missing_args)
+ << Name << SR;
+ if (TemplateDecl *Decl = Name.getAsTemplateDecl())
+ Diag(Decl->getLocation(), diag::note_template_decl_here);
+
+ return true;
+ }
+ default: {
+ // We have a template type parameter but the template argument
+ // is not a type.
+ SourceRange SR = AL.getSourceRange();
+ Diag(SR.getBegin(), diag::err_template_arg_must_be_type) << SR;
+ Diag(Param->getLocation(), diag::note_template_param_here);
+
+ return true;
+ }
+ }
+
+ if (CheckTemplateArgument(Param, AL.getTypeSourceInfo()))
+ return true;
+
+ // Add the converted template type argument.
+ QualType ArgType = Context.getCanonicalType(Arg.getAsType());
+
+ // Objective-C ARC:
+ // If an explicitly-specified template argument type is a lifetime type
+ // with no lifetime qualifier, the __strong lifetime qualifier is inferred.
+ if (getLangOpts().ObjCAutoRefCount &&
+ ArgType->isObjCLifetimeType() &&
+ !ArgType.getObjCLifetime()) {
+ Qualifiers Qs;
+ Qs.setObjCLifetime(Qualifiers::OCL_Strong);
+ ArgType = Context.getQualifiedType(ArgType, Qs);
+ }
+
+ Converted.push_back(TemplateArgument(ArgType));
+ return false;
+}
+
+/// \brief Substitute template arguments into the default template argument for
+/// the given template type parameter.
+///
+/// \param SemaRef the semantic analysis object for which we are performing
+/// the substitution.
+///
+/// \param Template the template that we are synthesizing template arguments
+/// for.
+///
+/// \param TemplateLoc the location of the template name that started the
+/// template-id we are checking.
+///
+/// \param RAngleLoc the location of the right angle bracket ('>') that
+/// terminates the template-id.
+///
+/// \param Param the template template parameter whose default we are
+/// substituting into.
+///
+/// \param Converted the list of template arguments provided for template
+/// parameters that precede \p Param in the template parameter list.
+/// \returns the substituted template argument, or NULL if an error occurred.
+static TypeSourceInfo *
+SubstDefaultTemplateArgument(Sema &SemaRef,
+ TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ TemplateTypeParmDecl *Param,
+ SmallVectorImpl<TemplateArgument> &Converted) {
+ TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo();
+
+ // If the argument type is dependent, instantiate it now based
+ // on the previously-computed template arguments.
+ if (ArgType->getType()->isDependentType()) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
+ MultiLevelTemplateArgumentList AllTemplateArgs
+ = SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
+
+ Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
+ Template, Converted.data(),
+ Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
+ ArgType = SemaRef.SubstType(ArgType, AllTemplateArgs,
+ Param->getDefaultArgumentLoc(),
+ Param->getDeclName());
+ }
+
+ return ArgType;
+}
+
+/// \brief Substitute template arguments into the default template argument for
+/// the given non-type template parameter.
+///
+/// \param SemaRef the semantic analysis object for which we are performing
+/// the substitution.
+///
+/// \param Template the template that we are synthesizing template arguments
+/// for.
+///
+/// \param TemplateLoc the location of the template name that started the
+/// template-id we are checking.
+///
+/// \param RAngleLoc the location of the right angle bracket ('>') that
+/// terminates the template-id.
+///
+/// \param Param the non-type template parameter whose default we are
+/// substituting into.
+///
+/// \param Converted the list of template arguments provided for template
+/// parameters that precede \p Param in the template parameter list.
+///
+/// \returns the substituted template argument, or NULL if an error occurred.
+static ExprResult
+SubstDefaultTemplateArgument(Sema &SemaRef,
+ TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ NonTypeTemplateParmDecl *Param,
+ SmallVectorImpl<TemplateArgument> &Converted) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
+ MultiLevelTemplateArgumentList AllTemplateArgs
+ = SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
+
+ Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
+ Template, Converted.data(),
+ Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
+ return SemaRef.SubstExpr(Param->getDefaultArgument(), AllTemplateArgs);
+}
+
+/// \brief Substitute template arguments into the default template argument for
+/// the given template template parameter.
+///
+/// \param SemaRef the semantic analysis object for which we are performing
+/// the substitution.
+///
+/// \param Template the template that we are synthesizing template arguments
+/// for.
+///
+/// \param TemplateLoc the location of the template name that started the
+/// template-id we are checking.
+///
+/// \param RAngleLoc the location of the right angle bracket ('>') that
+/// terminates the template-id.
+///
+/// \param Param the template template parameter whose default we are
+/// substituting into.
+///
+/// \param Converted the list of template arguments provided for template
+/// parameters that precede \p Param in the template parameter list.
+///
+/// \param QualifierLoc Will be set to the nested-name-specifier (with
+/// source-location information) that precedes the template name.
+///
+/// \returns the substituted template argument, or NULL if an error occurred.
+static TemplateName
+SubstDefaultTemplateArgument(Sema &SemaRef,
+ TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ TemplateTemplateParmDecl *Param,
+ SmallVectorImpl<TemplateArgument> &Converted,
+ NestedNameSpecifierLoc &QualifierLoc) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+
+ MultiLevelTemplateArgumentList AllTemplateArgs
+ = SemaRef.getTemplateInstantiationArgs(Template, &TemplateArgs);
+
+ Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc,
+ Template, Converted.data(),
+ Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
+ // Substitute into the nested-name-specifier first,
+ QualifierLoc = Param->getDefaultArgument().getTemplateQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
+ AllTemplateArgs);
+ if (!QualifierLoc)
+ return TemplateName();
+ }
+
+ return SemaRef.SubstTemplateName(QualifierLoc,
+ Param->getDefaultArgument().getArgument().getAsTemplate(),
+ Param->getDefaultArgument().getTemplateNameLoc(),
+ AllTemplateArgs);
+}
+
+/// \brief If the given template parameter has a default template
+/// argument, substitute into that default template argument and
+/// return the corresponding template argument.
+TemplateArgumentLoc
+Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ Decl *Param,
+ SmallVectorImpl<TemplateArgument> &Converted) {
+ if (TemplateTypeParmDecl *TypeParm = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ if (!TypeParm->hasDefaultArgument())
+ return TemplateArgumentLoc();
+
+ TypeSourceInfo *DI = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ TypeParm,
+ Converted);
+ if (DI)
+ return TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
+
+ return TemplateArgumentLoc();
+ }
+
+ if (NonTypeTemplateParmDecl *NonTypeParm
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ if (!NonTypeParm->hasDefaultArgument())
+ return TemplateArgumentLoc();
+
+ ExprResult Arg = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ NonTypeParm,
+ Converted);
+ if (Arg.isInvalid())
+ return TemplateArgumentLoc();
+
+ Expr *ArgE = Arg.takeAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(ArgE), ArgE);
+ }
+
+ TemplateTemplateParmDecl *TempTempParm
+ = cast<TemplateTemplateParmDecl>(Param);
+ if (!TempTempParm->hasDefaultArgument())
+ return TemplateArgumentLoc();
+
+
+ NestedNameSpecifierLoc QualifierLoc;
+ TemplateName TName = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ TempTempParm,
+ Converted,
+ QualifierLoc);
+ if (TName.isNull())
+ return TemplateArgumentLoc();
+
+ return TemplateArgumentLoc(TemplateArgument(TName),
+ TempTempParm->getDefaultArgument().getTemplateQualifierLoc(),
+ TempTempParm->getDefaultArgument().getTemplateNameLoc());
+}
+
+/// \brief Check that the given template argument corresponds to the given
+/// template parameter.
+///
+/// \param Param The template parameter against which the argument will be
+/// checked.
+///
+/// \param Arg The template argument.
+///
+/// \param Template The template in which the template argument resides.
+///
+/// \param TemplateLoc The location of the template name for the template
+/// whose argument list we're matching.
+///
+/// \param RAngleLoc The location of the right angle bracket ('>') that closes
+/// the template argument list.
+///
+/// \param ArgumentPackIndex The index into the argument pack where this
+/// argument will be placed. Only valid if the parameter is a parameter pack.
+///
+/// \param Converted The checked, converted argument will be added to the
+/// end of this small vector.
+///
+/// \param CTAK Describes how we arrived at this particular template argument:
+/// explicitly written, deduced, etc.
+///
+/// \returns true on error, false otherwise.
+bool Sema::CheckTemplateArgument(NamedDecl *Param,
+ const TemplateArgumentLoc &Arg,
+ NamedDecl *Template,
+ SourceLocation TemplateLoc,
+ SourceLocation RAngleLoc,
+ unsigned ArgumentPackIndex,
+ SmallVectorImpl<TemplateArgument> &Converted,
+ CheckTemplateArgumentKind CTAK) {
+ // Check template type parameters.
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param))
+ return CheckTemplateTypeArgument(TTP, Arg, Converted);
+
+ // Check non-type template parameters.
+ if (NonTypeTemplateParmDecl *NTTP =dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ // Do substitution on the type of the non-type template parameter
+ // with the template arguments we've seen thus far. But if the
+ // template has a dependent context then we cannot substitute yet.
+ QualType NTTPType = NTTP->getType();
+ if (NTTP->isParameterPack() && NTTP->isExpandedParameterPack())
+ NTTPType = NTTP->getExpansionType(ArgumentPackIndex);
+
+ if (NTTPType->isDependentType() &&
+ !isa<TemplateTemplateParmDecl>(Template) &&
+ !Template->getDeclContext()->isDependentContext()) {
+ // Do substitution on the type of the non-type template parameter.
+ InstantiatingTemplate Inst(*this, TemplateLoc, Template,
+ NTTP, Converted.data(), Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+ NTTPType = SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ // If that worked, check the non-type template parameter type
+ // for validity.
+ if (!NTTPType.isNull())
+ NTTPType = CheckNonTypeTemplateParameterType(NTTPType,
+ NTTP->getLocation());
+ if (NTTPType.isNull())
+ return true;
+ }
+
+ switch (Arg.getArgument().getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Should never see a NULL template argument here");
+
+ case TemplateArgument::Expression: {
+ TemplateArgument Result;
+ ExprResult Res =
+ CheckTemplateArgument(NTTP, NTTPType, Arg.getArgument().getAsExpr(),
+ Result, CTAK);
+ if (Res.isInvalid())
+ return true;
+
+ Converted.push_back(Result);
+ break;
+ }
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ // We've already checked this template argument, so just copy
+ // it to the list of converted arguments.
+ Converted.push_back(Arg.getArgument());
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ // We were given a template template argument. It may not be ill-formed;
+ // see below.
+ if (DependentTemplateName *DTN
+ = Arg.getArgument().getAsTemplateOrTemplatePattern()
+ .getAsDependentTemplateName()) {
+ // We have a template argument such as \c T::template X, which we
+ // parsed as a template template argument. However, since we now
+ // know that we need a non-type template argument, convert this
+ // template name into an expression.
+
+ DeclarationNameInfo NameInfo(DTN->getIdentifier(),
+ Arg.getTemplateNameLoc());
+
+ CXXScopeSpec SS;
+ SS.Adopt(Arg.getTemplateQualifierLoc());
+ // FIXME: the template-template arg was a DependentTemplateName,
+ // so it was provided with a template keyword. However, its source
+ // location is not stored in the template argument structure.
+ SourceLocation TemplateKWLoc;
+ ExprResult E = Owned(DependentScopeDeclRefExpr::Create(Context,
+ SS.getWithLocInContext(Context),
+ TemplateKWLoc,
+ NameInfo, 0));
+
+ // If we parsed the template argument as a pack expansion, create a
+ // pack expansion expression.
+ if (Arg.getArgument().getKind() == TemplateArgument::TemplateExpansion){
+ E = ActOnPackExpansion(E.take(), Arg.getTemplateEllipsisLoc());
+ if (E.isInvalid())
+ return true;
+ }
+
+ TemplateArgument Result;
+ E = CheckTemplateArgument(NTTP, NTTPType, E.take(), Result);
+ if (E.isInvalid())
+ return true;
+
+ Converted.push_back(Result);
+ break;
+ }
+
+ // We have a template argument that actually does refer to a class
+ // template, alias template, or template template parameter, and
+ // therefore cannot be a non-type template argument.
+ Diag(Arg.getLocation(), diag::err_template_arg_must_be_expr)
+ << Arg.getSourceRange();
+
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+
+ case TemplateArgument::Type: {
+ // We have a non-type template parameter but the template
+ // argument is a type.
+
+ // C++ [temp.arg]p2:
+ // In a template-argument, an ambiguity between a type-id and
+ // an expression is resolved to a type-id, regardless of the
+ // form of the corresponding template-parameter.
+ //
+ // We warn specifically about this case, since it can be rather
+ // confusing for users.
+ QualType T = Arg.getArgument().getAsType();
+ SourceRange SR = Arg.getSourceRange();
+ if (T->isFunctionType())
+ Diag(SR.getBegin(), diag::err_template_arg_nontype_ambig) << SR << T;
+ else
+ Diag(SR.getBegin(), diag::err_template_arg_must_be_expr) << SR;
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ case TemplateArgument::Pack:
+ llvm_unreachable("Caller must expand template argument packs");
+ }
+
+ return false;
+ }
+
+
+ // Check template template parameters.
+ TemplateTemplateParmDecl *TempParm = cast<TemplateTemplateParmDecl>(Param);
+
+ // Substitute into the template parameter list of the template
+ // template parameter, since previously-supplied template arguments
+ // may appear within the template template parameter.
+ {
+ // Set up a template instantiation context.
+ LocalInstantiationScope Scope(*this);
+ InstantiatingTemplate Inst(*this, TemplateLoc, Template,
+ TempParm, Converted.data(), Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Converted.data(), Converted.size());
+ TempParm = cast_or_null<TemplateTemplateParmDecl>(
+ SubstDecl(TempParm, CurContext,
+ MultiLevelTemplateArgumentList(TemplateArgs)));
+ if (!TempParm)
+ return true;
+ }
+
+ switch (Arg.getArgument().getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Should never see a NULL template argument here");
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ if (CheckTemplateArgument(TempParm, Arg))
+ return true;
+
+ Converted.push_back(Arg.getArgument());
+ break;
+
+ case TemplateArgument::Expression:
+ case TemplateArgument::Type:
+ // We have a template template parameter but the template
+ // argument does not refer to a template.
+ Diag(Arg.getLocation(), diag::err_template_arg_must_be_template)
+ << getLangOpts().CPlusPlus0x;
+ return true;
+
+ case TemplateArgument::Declaration:
+ llvm_unreachable("Declaration argument with template template parameter");
+ case TemplateArgument::Integral:
+ llvm_unreachable("Integral argument with template template parameter");
+
+ case TemplateArgument::Pack:
+ llvm_unreachable("Caller must expand template argument packs");
+ }
+
+ return false;
+}
+
+/// \brief Diagnose an arity mismatch in the
+static bool diagnoseArityMismatch(Sema &S, TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ unsigned NumParams = Params->size();
+ unsigned NumArgs = TemplateArgs.size();
+
+ SourceRange Range;
+ if (NumArgs > NumParams)
+ Range = SourceRange(TemplateArgs[NumParams].getLocation(),
+ TemplateArgs.getRAngleLoc());
+ S.Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << (NumArgs > NumParams)
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template << Range;
+ S.Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
+}
+
+/// \brief Check that the given template argument list is well-formed
+/// for specializing the given template.
+bool Sema::CheckTemplateArgumentList(TemplateDecl *Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &TemplateArgs,
+ bool PartialTemplateArgs,
+ SmallVectorImpl<TemplateArgument> &Converted,
+ bool *ExpansionIntoFixedList) {
+ if (ExpansionIntoFixedList)
+ *ExpansionIntoFixedList = false;
+
+ TemplateParameterList *Params = Template->getTemplateParameters();
+ unsigned NumParams = Params->size();
+ unsigned NumArgs = TemplateArgs.size();
+ bool Invalid = false;
+
+ SourceLocation RAngleLoc = TemplateArgs.getRAngleLoc();
+
+ bool HasParameterPack =
+ NumParams > 0 && Params->getParam(NumParams - 1)->isTemplateParameterPack();
+
+ // C++ [temp.arg]p1:
+ // [...] The type and form of each template-argument specified in
+ // a template-id shall match the type and form specified for the
+ // corresponding parameter declared by the template in its
+ // template-parameter-list.
+ bool isTemplateTemplateParameter = isa<TemplateTemplateParmDecl>(Template);
+ SmallVector<TemplateArgument, 2> ArgumentPack;
+ TemplateParameterList::iterator Param = Params->begin(),
+ ParamEnd = Params->end();
+ unsigned ArgIdx = 0;
+ LocalInstantiationScope InstScope(*this, true);
+ bool SawPackExpansion = false;
+ while (Param != ParamEnd) {
+ if (ArgIdx < NumArgs) {
+ // If we have an expanded parameter pack, make sure we don't have too
+ // many arguments.
+ // FIXME: This really should fall out from the normal arity checking.
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ if (NTTP->isExpandedParameterPack() &&
+ ArgumentPack.size() >= NTTP->getNumExpansionTypes()) {
+ Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << true
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template;
+ Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
+ }
+ }
+
+ // Check the template argument we were given.
+ if (CheckTemplateArgument(*Param, TemplateArgs[ArgIdx], Template,
+ TemplateLoc, RAngleLoc,
+ ArgumentPack.size(), Converted))
+ return true;
+
+ if ((*Param)->isTemplateParameterPack()) {
+ // The template parameter was a template parameter pack, so take the
+ // deduced argument and place it on the argument pack. Note that we
+ // stay on the same template parameter so that we can deduce more
+ // arguments.
+ ArgumentPack.push_back(Converted.back());
+ Converted.pop_back();
+ } else {
+ // Move to the next template parameter.
+ ++Param;
+ }
+
+ // If this template argument is a pack expansion, record that fact
+ // and break out; we can't actually check any more.
+ if (TemplateArgs[ArgIdx].getArgument().isPackExpansion()) {
+ SawPackExpansion = true;
+ ++ArgIdx;
+ break;
+ }
+
+ ++ArgIdx;
+ continue;
+ }
+
+ // If we're checking a partial template argument list, we're done.
+ if (PartialTemplateArgs) {
+ if ((*Param)->isTemplateParameterPack() && !ArgumentPack.empty())
+ Converted.push_back(TemplateArgument::CreatePackCopy(Context,
+ ArgumentPack.data(),
+ ArgumentPack.size()));
+
+ return Invalid;
+ }
+
+ // If we have a template parameter pack with no more corresponding
+ // arguments, just break out now and we'll fill in the argument pack below.
+ if ((*Param)->isTemplateParameterPack())
+ break;
+
+ // Check whether we have a default argument.
+ TemplateArgumentLoc Arg;
+
+ // Retrieve the default template argument from the template
+ // parameter. For each kind of template parameter, we substitute the
+ // template arguments provided thus far and any "outer" template arguments
+ // (when the template parameter was part of a nested template) into
+ // the default argument.
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
+ if (!TTP->hasDefaultArgument())
+ return diagnoseArityMismatch(*this, Template, TemplateLoc,
+ TemplateArgs);
+
+ TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(*this,
+ Template,
+ TemplateLoc,
+ RAngleLoc,
+ TTP,
+ Converted);
+ if (!ArgType)
+ return true;
+
+ Arg = TemplateArgumentLoc(TemplateArgument(ArgType->getType()),
+ ArgType);
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ if (!NTTP->hasDefaultArgument())
+ return diagnoseArityMismatch(*this, Template, TemplateLoc,
+ TemplateArgs);
+
+ ExprResult E = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ NTTP,
+ Converted);
+ if (E.isInvalid())
+ return true;
+
+ Expr *Ex = E.takeAs<Expr>();
+ Arg = TemplateArgumentLoc(TemplateArgument(Ex), Ex);
+ } else {
+ TemplateTemplateParmDecl *TempParm
+ = cast<TemplateTemplateParmDecl>(*Param);
+
+ if (!TempParm->hasDefaultArgument())
+ return diagnoseArityMismatch(*this, Template, TemplateLoc,
+ TemplateArgs);
+
+ NestedNameSpecifierLoc QualifierLoc;
+ TemplateName Name = SubstDefaultTemplateArgument(*this, Template,
+ TemplateLoc,
+ RAngleLoc,
+ TempParm,
+ Converted,
+ QualifierLoc);
+ if (Name.isNull())
+ return true;
+
+ Arg = TemplateArgumentLoc(TemplateArgument(Name), QualifierLoc,
+ TempParm->getDefaultArgument().getTemplateNameLoc());
+ }
+
+ // Introduce an instantiation record that describes where we are using
+ // the default template argument.
+ InstantiatingTemplate Instantiating(*this, RAngleLoc, Template, *Param,
+ Converted.data(), Converted.size(),
+ SourceRange(TemplateLoc, RAngleLoc));
+
+ // Check the default template argument.
+ if (CheckTemplateArgument(*Param, Arg, Template, TemplateLoc,
+ RAngleLoc, 0, Converted))
+ return true;
+
+ // Core issue 150 (assumed resolution): if this is a template template
+ // parameter, keep track of the default template arguments from the
+ // template definition.
+ if (isTemplateTemplateParameter)
+ TemplateArgs.addArgument(Arg);
+
+ // Move to the next template parameter and argument.
+ ++Param;
+ ++ArgIdx;
+ }
+
+ // If we saw a pack expansion, then directly convert the remaining arguments,
+ // because we don't know what parameters they'll match up with.
+ if (SawPackExpansion) {
+ bool AddToArgumentPack
+ = Param != ParamEnd && (*Param)->isTemplateParameterPack();
+ while (ArgIdx < NumArgs) {
+ if (AddToArgumentPack)
+ ArgumentPack.push_back(TemplateArgs[ArgIdx].getArgument());
+ else
+ Converted.push_back(TemplateArgs[ArgIdx].getArgument());
+ ++ArgIdx;
+ }
+
+ // Push the argument pack onto the list of converted arguments.
+ if (AddToArgumentPack) {
+ if (ArgumentPack.empty())
+ Converted.push_back(TemplateArgument(0, 0));
+ else {
+ Converted.push_back(
+ TemplateArgument::CreatePackCopy(Context,
+ ArgumentPack.data(),
+ ArgumentPack.size()));
+ ArgumentPack.clear();
+ }
+ } else if (ExpansionIntoFixedList) {
+ // We have expanded a pack into a fixed list.
+ *ExpansionIntoFixedList = true;
+ }
+
+ return Invalid;
+ }
+
+ // If we have any leftover arguments, then there were too many arguments.
+ // Complain and fail.
+ if (ArgIdx < NumArgs)
+ return diagnoseArityMismatch(*this, Template, TemplateLoc, TemplateArgs);
+
+ // If we have an expanded parameter pack, make sure we don't have too
+ // many arguments.
+ // FIXME: This really should fall out from the normal arity checking.
+ if (Param != ParamEnd) {
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
+ if (NTTP->isExpandedParameterPack() &&
+ ArgumentPack.size() < NTTP->getNumExpansionTypes()) {
+ Diag(TemplateLoc, diag::err_template_arg_list_different_arity)
+ << false
+ << (isa<ClassTemplateDecl>(Template)? 0 :
+ isa<FunctionTemplateDecl>(Template)? 1 :
+ isa<TemplateTemplateParmDecl>(Template)? 2 : 3)
+ << Template;
+ Diag(Template->getLocation(), diag::note_template_decl_here)
+ << Params->getSourceRange();
+ return true;
+ }
+ }
+ }
+
+ // Form argument packs for each of the parameter packs remaining.
+ while (Param != ParamEnd) {
+ // If we're checking a partial list of template arguments, don't fill
+ // in arguments for non-template parameter packs.
+ if ((*Param)->isTemplateParameterPack()) {
+ if (!HasParameterPack)
+ return true;
+ if (ArgumentPack.empty())
+ Converted.push_back(TemplateArgument(0, 0));
+ else {
+ Converted.push_back(TemplateArgument::CreatePackCopy(Context,
+ ArgumentPack.data(),
+ ArgumentPack.size()));
+ ArgumentPack.clear();
+ }
+ } else if (!PartialTemplateArgs)
+ return diagnoseArityMismatch(*this, Template, TemplateLoc, TemplateArgs);
+
+ ++Param;
+ }
+
+ return Invalid;
+}
+
+namespace {
+ class UnnamedLocalNoLinkageFinder
+ : public TypeVisitor<UnnamedLocalNoLinkageFinder, bool>
+ {
+ Sema &S;
+ SourceRange SR;
+
+ typedef TypeVisitor<UnnamedLocalNoLinkageFinder, bool> inherited;
+
+ public:
+ UnnamedLocalNoLinkageFinder(Sema &S, SourceRange SR) : S(S), SR(SR) { }
+
+ bool Visit(QualType T) {
+ return inherited::Visit(T.getTypePtr());
+ }
+
+#define TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *);
+#define ABSTRACT_TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *) { return false; }
+#define NON_CANONICAL_TYPE(Class, Parent) \
+ bool Visit##Class##Type(const Class##Type *) { return false; }
+#include "clang/AST/TypeNodes.def"
+
+ bool VisitTagDecl(const TagDecl *Tag);
+ bool VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+ };
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitBuiltinType(const BuiltinType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitComplexType(const ComplexType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitPointerType(const PointerType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitBlockPointerType(
+ const BlockPointerType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitLValueReferenceType(
+ const LValueReferenceType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitRValueReferenceType(
+ const RValueReferenceType* T) {
+ return Visit(T->getPointeeType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitMemberPointerType(
+ const MemberPointerType* T) {
+ return Visit(T->getPointeeType()) || Visit(QualType(T->getClass(), 0));
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitConstantArrayType(
+ const ConstantArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitIncompleteArrayType(
+ const IncompleteArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitVariableArrayType(
+ const VariableArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentSizedArrayType(
+ const DependentSizedArrayType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitVectorType(const VectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitExtVectorType(const ExtVectorType* T) {
+ return Visit(T->getElementType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitFunctionProtoType(
+ const FunctionProtoType* T) {
+ for (FunctionProtoType::arg_type_iterator A = T->arg_type_begin(),
+ AEnd = T->arg_type_end();
+ A != AEnd; ++A) {
+ if (Visit(*A))
+ return true;
+ }
+
+ return Visit(T->getResultType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitFunctionNoProtoType(
+ const FunctionNoProtoType* T) {
+ return Visit(T->getResultType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitUnresolvedUsingType(
+ const UnresolvedUsingType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTypeOfExprType(const TypeOfExprType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTypeOfType(const TypeOfType* T) {
+ return Visit(T->getUnderlyingType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDecltypeType(const DecltypeType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitUnaryTransformType(
+ const UnaryTransformType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitAutoType(const AutoType *T) {
+ return Visit(T->getDeducedType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitRecordType(const RecordType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitEnumType(const EnumType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTemplateTypeParmType(
+ const TemplateTypeParmType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTemplateSpecializationType(
+ const TemplateSpecializationType*) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitInjectedClassNameType(
+ const InjectedClassNameType* T) {
+ return VisitTagDecl(T->getDecl());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentNameType(
+ const DependentNameType* T) {
+ return VisitNestedNameSpecifier(T->getQualifier());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitDependentTemplateSpecializationType(
+ const DependentTemplateSpecializationType* T) {
+ return VisitNestedNameSpecifier(T->getQualifier());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitPackExpansionType(
+ const PackExpansionType* T) {
+ return Visit(T->getPattern());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCObjectType(const ObjCObjectType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCInterfaceType(
+ const ObjCInterfaceType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitObjCObjectPointerType(
+ const ObjCObjectPointerType *) {
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitAtomicType(const AtomicType* T) {
+ return Visit(T->getValueType());
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitTagDecl(const TagDecl *Tag) {
+ if (Tag->getDeclContext()->isFunctionOrMethod()) {
+ S.Diag(SR.getBegin(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_local_type :
+ diag::ext_template_arg_local_type)
+ << S.Context.getTypeDeclType(Tag) << SR;
+ return true;
+ }
+
+ if (!Tag->getDeclName() && !Tag->getTypedefNameForAnonDecl()) {
+ S.Diag(SR.getBegin(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_unnamed_type :
+ diag::ext_template_arg_unnamed_type) << SR;
+ S.Diag(Tag->getLocation(), diag::note_template_unnamed_type_here);
+ return true;
+ }
+
+ return false;
+}
+
+bool UnnamedLocalNoLinkageFinder::VisitNestedNameSpecifier(
+ NestedNameSpecifier *NNS) {
+ if (NNS->getPrefix() && VisitNestedNameSpecifier(NNS->getPrefix()))
+ return true;
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Global:
+ return false;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ return Visit(QualType(NNS->getAsType(), 0));
+ }
+ llvm_unreachable("Invalid NestedNameSpecifier::Kind!");
+}
+
+
+/// \brief Check a template argument against its corresponding
+/// template type parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.type]. It
+/// returns true if an error occurred, and false otherwise.
+bool Sema::CheckTemplateArgument(TemplateTypeParmDecl *Param,
+ TypeSourceInfo *ArgInfo) {
+ assert(ArgInfo && "invalid TypeSourceInfo");
+ QualType Arg = ArgInfo->getType();
+ SourceRange SR = ArgInfo->getTypeLoc().getSourceRange();
+
+ if (Arg->isVariablyModifiedType()) {
+ return Diag(SR.getBegin(), diag::err_variably_modified_template_arg) << Arg;
+ } else if (Context.hasSameUnqualifiedType(Arg, Context.OverloadTy)) {
+ return Diag(SR.getBegin(), diag::err_template_arg_overload_type) << SR;
+ }
+
+ // C++03 [temp.arg.type]p2:
+ // A local type, a type with no linkage, an unnamed type or a type
+ // compounded from any of these types shall not be used as a
+ // template-argument for a template type-parameter.
+ //
+ // C++11 allows these, and even in C++03 we allow them as an extension with
+ // a warning.
+ if (LangOpts.CPlusPlus0x ?
+ Diags.getDiagnosticLevel(diag::warn_cxx98_compat_template_arg_unnamed_type,
+ SR.getBegin()) != DiagnosticsEngine::Ignored ||
+ Diags.getDiagnosticLevel(diag::warn_cxx98_compat_template_arg_local_type,
+ SR.getBegin()) != DiagnosticsEngine::Ignored :
+ Arg->hasUnnamedOrLocalType()) {
+ UnnamedLocalNoLinkageFinder Finder(*this, SR);
+ (void)Finder.Visit(Context.getCanonicalType(Arg));
+ }
+
+ return false;
+}
+
+enum NullPointerValueKind {
+ NPV_NotNullPointer,
+ NPV_NullPointer,
+ NPV_Error
+};
+
+/// \brief Determine whether the given template argument is a null pointer
+/// value of the appropriate type.
+static NullPointerValueKind
+isNullPointerValueTemplateArgument(Sema &S, NonTypeTemplateParmDecl *Param,
+ QualType ParamType, Expr *Arg) {
+ if (Arg->isValueDependent() || Arg->isTypeDependent())
+ return NPV_NotNullPointer;
+
+ if (!S.getLangOpts().CPlusPlus0x)
+ return NPV_NotNullPointer;
+
+ // Determine whether we have a constant expression.
+ ExprResult ArgRV = S.DefaultFunctionArrayConversion(Arg);
+ if (ArgRV.isInvalid())
+ return NPV_Error;
+ Arg = ArgRV.take();
+
+ Expr::EvalResult EvalResult;
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ EvalResult.Diag = &Notes;
+ if (!Arg->EvaluateAsRValue(EvalResult, S.Context) ||
+ EvalResult.HasSideEffects) {
+ SourceLocation DiagLoc = Arg->getExprLoc();
+
+ // If our only note is the usual "invalid subexpression" note, just point
+ // the caret at its location rather than producing an essentially
+ // redundant note.
+ if (Notes.size() == 1 && Notes[0].second.getDiagID() ==
+ diag::note_invalid_subexpr_in_const_expr) {
+ DiagLoc = Notes[0].first;
+ Notes.clear();
+ }
+
+ S.Diag(DiagLoc, diag::err_template_arg_not_address_constant)
+ << Arg->getType() << Arg->getSourceRange();
+ for (unsigned I = 0, N = Notes.size(); I != N; ++I)
+ S.Diag(Notes[I].first, Notes[I].second);
+
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_Error;
+ }
+
+ // C++11 [temp.arg.nontype]p1:
+ // - an address constant expression of type std::nullptr_t
+ if (Arg->getType()->isNullPtrType())
+ return NPV_NullPointer;
+
+ // - a constant expression that evaluates to a null pointer value (4.10); or
+ // - a constant expression that evaluates to a null member pointer value
+ // (4.11); or
+ if ((EvalResult.Val.isLValue() && !EvalResult.Val.getLValueBase()) ||
+ (EvalResult.Val.isMemberPointer() &&
+ !EvalResult.Val.getMemberPointerDecl())) {
+ // If our expression has an appropriate type, we've succeeded.
+ bool ObjCLifetimeConversion;
+ if (S.Context.hasSameUnqualifiedType(Arg->getType(), ParamType) ||
+ S.IsQualificationConversion(Arg->getType(), ParamType, false,
+ ObjCLifetimeConversion))
+ return NPV_NullPointer;
+
+ // The types didn't match, but we know we got a null pointer; complain,
+ // then recover as if the types were correct.
+ S.Diag(Arg->getExprLoc(), diag::err_template_arg_wrongtype_null_constant)
+ << Arg->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_NullPointer;
+ }
+
+ // If we don't have a null pointer value, but we do have a NULL pointer
+ // constant, suggest a cast to the appropriate type.
+ if (Arg->isNullPointerConstant(S.Context, Expr::NPC_NeverValueDependent)) {
+ std::string Code = "static_cast<" + ParamType.getAsString() + ">(";
+ S.Diag(Arg->getExprLoc(), diag::err_template_arg_untyped_null_constant)
+ << ParamType
+ << FixItHint::CreateInsertion(Arg->getLocStart(), Code)
+ << FixItHint::CreateInsertion(S.PP.getLocForEndOfToken(Arg->getLocEnd()),
+ ")");
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return NPV_NullPointer;
+ }
+
+ // FIXME: If we ever want to support general, address-constant expressions
+ // as non-type template arguments, we should return the ExprResult here to
+ // be interpreted by the caller.
+ return NPV_NotNullPointer;
+}
+
+/// \brief Checks whether the given template argument is the address
+/// of an object or function according to C++ [temp.arg.nontype]p1.
+static bool
+CheckTemplateArgumentAddressOfObjectOrFunction(Sema &S,
+ NonTypeTemplateParmDecl *Param,
+ QualType ParamType,
+ Expr *ArgIn,
+ TemplateArgument &Converted) {
+ bool Invalid = false;
+ Expr *Arg = ArgIn;
+ QualType ArgType = Arg->getType();
+
+ // If our parameter has pointer type, check for a null template value.
+ if (ParamType->isPointerType() || ParamType->isNullPtrType()) {
+ switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) {
+ case NPV_NullPointer:
+ Converted = TemplateArgument((Decl *)0);
+ return false;
+
+ case NPV_Error:
+ return true;
+
+ case NPV_NotNullPointer:
+ break;
+ }
+ }
+
+ // See through any implicit casts we added to fix the type.
+ Arg = Arg->IgnoreImpCasts();
+
+ // C++ [temp.arg.nontype]p1:
+ //
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of: [...]
+ //
+ // -- the address of an object or function with external
+ // linkage, including function templates and function
+ // template-ids but excluding non-static class members,
+ // expressed as & id-expression where the & is optional if
+ // the name refers to a function or array, or if the
+ // corresponding template-parameter is a reference; or
+
+ // In C++98/03 mode, give an extension warning on any extra parentheses.
+ // See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
+ bool ExtraParens = false;
+ while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
+ if (!Invalid && !ExtraParens) {
+ S.Diag(Arg->getLocStart(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_extra_parens :
+ diag::ext_template_arg_extra_parens)
+ << Arg->getSourceRange();
+ ExtraParens = true;
+ }
+
+ Arg = Parens->getSubExpr();
+ }
+
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+
+ bool AddressTaken = false;
+ SourceLocation AddrOpLoc;
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
+ if (UnOp->getOpcode() == UO_AddrOf) {
+ Arg = UnOp->getSubExpr();
+ AddressTaken = true;
+ AddrOpLoc = UnOp->getOperatorLoc();
+ }
+ }
+
+ if (S.getLangOpts().MicrosoftExt && isa<CXXUuidofExpr>(Arg)) {
+ Converted = TemplateArgument(ArgIn);
+ return false;
+ }
+
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+
+ // Stop checking the precise nature of the argument if it is value dependent,
+ // it should be checked when instantiated.
+ if (Arg->isValueDependent()) {
+ Converted = TemplateArgument(ArgIn);
+ return false;
+ }
+
+ DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arg);
+ if (!DRE) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_decl_ref)
+ << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ if (!isa<ValueDecl>(DRE->getDecl())) {
+ S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_object_or_func_form)
+ << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ NamedDecl *Entity = DRE->getDecl();
+
+ // Cannot refer to non-static data members
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(Entity)) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_field)
+ << Field << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // Cannot refer to non-static member functions
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Entity)) {
+ if (!Method->isStatic()) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_method)
+ << Method << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+ }
+
+ FunctionDecl *Func = dyn_cast<FunctionDecl>(Entity);
+ VarDecl *Var = dyn_cast<VarDecl>(Entity);
+
+ // A non-type template argument must refer to an object or function.
+ if (!Func && !Var) {
+ // We found something, but we don't know specifically what it is.
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_object_or_func)
+ << Arg->getSourceRange();
+ S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
+ return true;
+ }
+
+ // Address / reference template args must have external linkage in C++98.
+ if (Entity->getLinkage() == InternalLinkage) {
+ S.Diag(Arg->getLocStart(), S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_object_internal :
+ diag::ext_template_arg_object_internal)
+ << !Func << Entity << Arg->getSourceRange();
+ S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
+ << !Func;
+ } else if (Entity->getLinkage() == NoLinkage) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_object_no_linkage)
+ << !Func << Entity << Arg->getSourceRange();
+ S.Diag(Entity->getLocation(), diag::note_template_arg_internal_object)
+ << !Func;
+ return true;
+ }
+
+ if (Func) {
+ // If the template parameter has pointer type, the function decays.
+ if (ParamType->isPointerType() && !AddressTaken)
+ ArgType = S.Context.getPointerType(Func->getType());
+ else if (AddressTaken && ParamType->isReferenceType()) {
+ // If we originally had an address-of operator, but the
+ // parameter has reference type, complain and (if things look
+ // like they will work) drop the address-of operator.
+ if (!S.Context.hasSameUnqualifiedType(Func->getType(),
+ ParamType.getNonReferenceType())) {
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType;
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType
+ << FixItHint::CreateRemoval(AddrOpLoc);
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+
+ ArgType = Func->getType();
+ }
+ } else {
+ // A value of reference type is not an object.
+ if (Var->getType()->isReferenceType()) {
+ S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_reference_var)
+ << Var->getType() << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // A template argument must have static storage duration.
+ // FIXME: Ensure this works for thread_local as well as __thread.
+ if (Var->isThreadSpecified()) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_thread_local)
+ << Arg->getSourceRange();
+ S.Diag(Var->getLocation(), diag::note_template_arg_refers_here);
+ return true;
+ }
+
+ // If the template parameter has pointer type, we must have taken
+ // the address of this object.
+ if (ParamType->isReferenceType()) {
+ if (AddressTaken) {
+ // If we originally had an address-of operator, but the
+ // parameter has reference type, complain and (if things look
+ // like they will work) drop the address-of operator.
+ if (!S.Context.hasSameUnqualifiedType(Var->getType(),
+ ParamType.getNonReferenceType())) {
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType;
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ S.Diag(AddrOpLoc, diag::err_template_arg_address_of_non_pointer)
+ << ParamType
+ << FixItHint::CreateRemoval(AddrOpLoc);
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+
+ ArgType = Var->getType();
+ }
+ } else if (!AddressTaken && ParamType->isPointerType()) {
+ if (Var->getType()->isArrayType()) {
+ // Array-to-pointer decay.
+ ArgType = S.Context.getArrayDecayedType(Var->getType());
+ } else {
+ // If the template parameter has pointer type but the address of
+ // this object was not taken, complain and (possibly) recover by
+ // taking the address of the entity.
+ ArgType = S.Context.getPointerType(Var->getType());
+ if (!S.Context.hasSameUnqualifiedType(ArgType, ParamType)) {
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of)
+ << ParamType;
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_address_of)
+ << ParamType
+ << FixItHint::CreateInsertion(Arg->getLocStart(), "&");
+
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ }
+ }
+ }
+
+ bool ObjCLifetimeConversion;
+ if (ParamType->isPointerType() &&
+ !ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType() &&
+ S.IsQualificationConversion(ArgType, ParamType, false,
+ ObjCLifetimeConversion)) {
+ // For pointer-to-object types, qualification conversions are
+ // permitted.
+ } else {
+ if (const ReferenceType *ParamRef = ParamType->getAs<ReferenceType>()) {
+ if (!ParamRef->getPointeeType()->isFunctionType()) {
+ // C++ [temp.arg.nontype]p5b3:
+ // For a non-type template-parameter of type reference to
+ // object, no conversions apply. The type referred to by the
+ // reference may be more cv-qualified than the (otherwise
+ // identical) type of the template- argument. The
+ // template-parameter is bound directly to the
+ // template-argument, which shall be an lvalue.
+
+ // FIXME: Other qualifiers?
+ unsigned ParamQuals = ParamRef->getPointeeType().getCVRQualifiers();
+ unsigned ArgQuals = ArgType.getCVRQualifiers();
+
+ if ((ParamQuals | ArgQuals) != ParamQuals) {
+ S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_ref_bind_ignores_quals)
+ << ParamType << Arg->getType()
+ << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+ }
+ }
+
+ // At this point, the template argument refers to an object or
+ // function with external linkage. We now need to check whether the
+ // argument and parameter types are compatible.
+ if (!S.Context.hasSameUnqualifiedType(ArgType,
+ ParamType.getNonReferenceType())) {
+ // We can't perform this conversion or binding.
+ if (ParamType->isReferenceType())
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_no_ref_bind)
+ << ParamType << ArgIn->getType() << Arg->getSourceRange();
+ else
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible)
+ << ArgIn->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+ }
+
+ // Create the template argument.
+ Converted = TemplateArgument(Entity->getCanonicalDecl());
+ S.MarkAnyDeclReferenced(Arg->getLocStart(), Entity);
+ return false;
+}
+
+/// \brief Checks whether the given template argument is a pointer to
+/// member constant according to C++ [temp.arg.nontype]p1.
+static bool CheckTemplateArgumentPointerToMember(Sema &S,
+ NonTypeTemplateParmDecl *Param,
+ QualType ParamType,
+ Expr *&ResultArg,
+ TemplateArgument &Converted) {
+ bool Invalid = false;
+
+ // Check for a null pointer value.
+ Expr *Arg = ResultArg;
+ switch (isNullPointerValueTemplateArgument(S, Param, ParamType, Arg)) {
+ case NPV_Error:
+ return true;
+ case NPV_NullPointer:
+ Converted = TemplateArgument((Decl *)0);
+ return false;
+ case NPV_NotNullPointer:
+ break;
+ }
+
+ bool ObjCLifetimeConversion;
+ if (S.IsQualificationConversion(Arg->getType(),
+ ParamType.getNonReferenceType(),
+ false, ObjCLifetimeConversion)) {
+ Arg = S.ImpCastExprToType(Arg, ParamType, CK_NoOp,
+ Arg->getValueKind()).take();
+ ResultArg = Arg;
+ } else if (!S.Context.hasSameUnqualifiedType(Arg->getType(),
+ ParamType.getNonReferenceType())) {
+ // We can't perform this conversion.
+ S.Diag(Arg->getLocStart(), diag::err_template_arg_not_convertible)
+ << Arg->getType() << ParamType << Arg->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+
+ // See through any implicit casts we added to fix the type.
+ while (ImplicitCastExpr *Cast = dyn_cast<ImplicitCastExpr>(Arg))
+ Arg = Cast->getSubExpr();
+
+ // C++ [temp.arg.nontype]p1:
+ //
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of: [...]
+ //
+ // -- a pointer to member expressed as described in 5.3.1.
+ DeclRefExpr *DRE = 0;
+
+ // In C++98/03 mode, give an extension warning on any extra parentheses.
+ // See http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_defects.html#773
+ bool ExtraParens = false;
+ while (ParenExpr *Parens = dyn_cast<ParenExpr>(Arg)) {
+ if (!Invalid && !ExtraParens) {
+ S.Diag(Arg->getLocStart(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_template_arg_extra_parens :
+ diag::ext_template_arg_extra_parens)
+ << Arg->getSourceRange();
+ ExtraParens = true;
+ }
+
+ Arg = Parens->getSubExpr();
+ }
+
+ while (SubstNonTypeTemplateParmExpr *subst =
+ dyn_cast<SubstNonTypeTemplateParmExpr>(Arg))
+ Arg = subst->getReplacement()->IgnoreImpCasts();
+
+ // A pointer-to-member constant written &Class::member.
+ if (UnaryOperator *UnOp = dyn_cast<UnaryOperator>(Arg)) {
+ if (UnOp->getOpcode() == UO_AddrOf) {
+ DRE = dyn_cast<DeclRefExpr>(UnOp->getSubExpr());
+ if (DRE && !DRE->getQualifier())
+ DRE = 0;
+ }
+ }
+ // A constant of pointer-to-member type.
+ else if ((DRE = dyn_cast<DeclRefExpr>(Arg))) {
+ if (ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl())) {
+ if (VD->getType()->isMemberPointerType()) {
+ if (isa<NonTypeTemplateParmDecl>(VD) ||
+ (isa<VarDecl>(VD) &&
+ S.Context.getCanonicalType(VD->getType()).isConstQualified())) {
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ Converted = TemplateArgument(Arg);
+ else
+ Converted = TemplateArgument(VD->getCanonicalDecl());
+ return Invalid;
+ }
+ }
+ }
+
+ DRE = 0;
+ }
+
+ if (!DRE)
+ return S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_pointer_to_member_form)
+ << Arg->getSourceRange();
+
+ if (isa<FieldDecl>(DRE->getDecl()) || isa<CXXMethodDecl>(DRE->getDecl())) {
+ assert((isa<FieldDecl>(DRE->getDecl()) ||
+ !cast<CXXMethodDecl>(DRE->getDecl())->isStatic()) &&
+ "Only non-static member pointers can make it here");
+
+ // Okay: this is the address of a non-static member, and therefore
+ // a member pointer constant.
+ if (Arg->isTypeDependent() || Arg->isValueDependent())
+ Converted = TemplateArgument(Arg);
+ else
+ Converted = TemplateArgument(DRE->getDecl()->getCanonicalDecl());
+ return Invalid;
+ }
+
+ // We found something else, but we don't know specifically what it is.
+ S.Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_pointer_to_member_form)
+ << Arg->getSourceRange();
+ S.Diag(DRE->getDecl()->getLocation(), diag::note_template_arg_refers_here);
+ return true;
+}
+
+/// \brief Check a template argument against its corresponding
+/// non-type template parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.nontype].
+/// If an error occurred, it returns ExprError(); otherwise, it
+/// returns the converted template argument. \p
+/// InstantiatedParamType is the type of the non-type template
+/// parameter after it has been instantiated.
+ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
+ QualType InstantiatedParamType, Expr *Arg,
+ TemplateArgument &Converted,
+ CheckTemplateArgumentKind CTAK) {
+ SourceLocation StartLoc = Arg->getLocStart();
+
+ // If either the parameter has a dependent type or the argument is
+ // type-dependent, there's nothing we can check now.
+ if (InstantiatedParamType->isDependentType() || Arg->isTypeDependent()) {
+ // FIXME: Produce a cloned, canonical expression?
+ Converted = TemplateArgument(Arg);
+ return Owned(Arg);
+ }
+
+ // C++ [temp.arg.nontype]p5:
+ // The following conversions are performed on each expression used
+ // as a non-type template-argument. If a non-type
+ // template-argument cannot be converted to the type of the
+ // corresponding template-parameter then the program is
+ // ill-formed.
+ QualType ParamType = InstantiatedParamType;
+ if (ParamType->isIntegralOrEnumerationType()) {
+ // C++11:
+ // -- for a non-type template-parameter of integral or
+ // enumeration type, conversions permitted in a converted
+ // constant expression are applied.
+ //
+ // C++98:
+ // -- for a non-type template-parameter of integral or
+ // enumeration type, integral promotions (4.5) and integral
+ // conversions (4.7) are applied.
+
+ if (CTAK == CTAK_Deduced &&
+ !Context.hasSameUnqualifiedType(ParamType, Arg->getType())) {
+ // C++ [temp.deduct.type]p17:
+ // If, in the declaration of a function template with a non-type
+ // template-parameter, the non-type template-parameter is used
+ // in an expression in the function parameter-list and, if the
+ // corresponding template-argument is deduced, the
+ // template-argument type shall match the type of the
+ // template-parameter exactly, except that a template-argument
+ // deduced from an array bound may be of any integral type.
+ Diag(StartLoc, diag::err_deduced_non_type_template_arg_type_mismatch)
+ << Arg->getType().getUnqualifiedType()
+ << ParamType.getUnqualifiedType();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ }
+
+ if (getLangOpts().CPlusPlus0x) {
+ // We can't check arbitrary value-dependent arguments.
+ // FIXME: If there's no viable conversion to the template parameter type,
+ // we should be able to diagnose that prior to instantiation.
+ if (Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ return Owned(Arg);
+ }
+
+ // C++ [temp.arg.nontype]p1:
+ // A template-argument for a non-type, non-template template-parameter
+ // shall be one of:
+ //
+ // -- for a non-type template-parameter of integral or enumeration
+ // type, a converted constant expression of the type of the
+ // template-parameter; or
+ llvm::APSInt Value;
+ ExprResult ArgResult =
+ CheckConvertedConstantExpression(Arg, ParamType, Value,
+ CCEK_TemplateArg);
+ if (ArgResult.isInvalid())
+ return ExprError();
+
+ // Widen the argument value to sizeof(parameter type). This is almost
+ // always a no-op, except when the parameter type is bool. In
+ // that case, this may extend the argument from 1 bit to 8 bits.
+ QualType IntegerType = ParamType;
+ if (const EnumType *Enum = IntegerType->getAs<EnumType>())
+ IntegerType = Enum->getDecl()->getIntegerType();
+ Value = Value.extOrTrunc(Context.getTypeSize(IntegerType));
+
+ Converted = TemplateArgument(Value, Context.getCanonicalType(ParamType));
+ return ArgResult;
+ }
+
+ ExprResult ArgResult = DefaultLvalueConversion(Arg);
+ if (ArgResult.isInvalid())
+ return ExprError();
+ Arg = ArgResult.take();
+
+ QualType ArgType = Arg->getType();
+
+ // C++ [temp.arg.nontype]p1:
+ // A template-argument for a non-type, non-template
+ // template-parameter shall be one of:
+ //
+ // -- an integral constant-expression of integral or enumeration
+ // type; or
+ // -- the name of a non-type template-parameter; or
+ SourceLocation NonConstantLoc;
+ llvm::APSInt Value;
+ if (!ArgType->isIntegralOrEnumerationType()) {
+ Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_integral_or_enumeral)
+ << ArgType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ } else if (!Arg->isValueDependent()) {
+ Arg = VerifyIntegerConstantExpression(Arg, &Value,
+ PDiag(diag::err_template_arg_not_ice) << ArgType, false).take();
+ if (!Arg)
+ return ExprError();
+ }
+
+ // From here on out, all we care about are the unqualified forms
+ // of the parameter and argument types.
+ ParamType = ParamType.getUnqualifiedType();
+ ArgType = ArgType.getUnqualifiedType();
+
+ // Try to convert the argument to the parameter's type.
+ if (Context.hasSameType(ParamType, ArgType)) {
+ // Okay: no conversion necessary
+ } else if (ParamType->isBooleanType()) {
+ // This is an integral-to-boolean conversion.
+ Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralToBoolean).take();
+ } else if (IsIntegralPromotion(Arg, ArgType, ParamType) ||
+ !ParamType->isEnumeralType()) {
+ // This is an integral promotion or conversion.
+ Arg = ImpCastExprToType(Arg, ParamType, CK_IntegralCast).take();
+ } else {
+ // We can't perform this conversion.
+ Diag(Arg->getLocStart(),
+ diag::err_template_arg_not_convertible)
+ << Arg->getType() << InstantiatedParamType << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+ }
+
+ // Add the value of this argument to the list of converted
+ // arguments. We use the bitwidth and signedness of the template
+ // parameter.
+ if (Arg->isValueDependent()) {
+ // The argument is value-dependent. Create a new
+ // TemplateArgument with the converted expression.
+ Converted = TemplateArgument(Arg);
+ return Owned(Arg);
+ }
+
+ QualType IntegerType = Context.getCanonicalType(ParamType);
+ if (const EnumType *Enum = IntegerType->getAs<EnumType>())
+ IntegerType = Context.getCanonicalType(Enum->getDecl()->getIntegerType());
+
+ if (ParamType->isBooleanType()) {
+ // Value must be zero or one.
+ Value = Value != 0;
+ unsigned AllowedBits = Context.getTypeSize(IntegerType);
+ if (Value.getBitWidth() != AllowedBits)
+ Value = Value.extOrTrunc(AllowedBits);
+ Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
+ } else {
+ llvm::APSInt OldValue = Value;
+
+ // Coerce the template argument's value to the value it will have
+ // based on the template parameter's type.
+ unsigned AllowedBits = Context.getTypeSize(IntegerType);
+ if (Value.getBitWidth() != AllowedBits)
+ Value = Value.extOrTrunc(AllowedBits);
+ Value.setIsSigned(IntegerType->isSignedIntegerOrEnumerationType());
+
+ // Complain if an unsigned parameter received a negative value.
+ if (IntegerType->isUnsignedIntegerOrEnumerationType()
+ && (OldValue.isSigned() && OldValue.isNegative())) {
+ Diag(Arg->getLocStart(), diag::warn_template_arg_negative)
+ << OldValue.toString(10) << Value.toString(10) << Param->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ }
+
+ // Complain if we overflowed the template parameter's type.
+ unsigned RequiredBits;
+ if (IntegerType->isUnsignedIntegerOrEnumerationType())
+ RequiredBits = OldValue.getActiveBits();
+ else if (OldValue.isUnsigned())
+ RequiredBits = OldValue.getActiveBits() + 1;
+ else
+ RequiredBits = OldValue.getMinSignedBits();
+ if (RequiredBits > AllowedBits) {
+ Diag(Arg->getLocStart(),
+ diag::warn_template_arg_too_large)
+ << OldValue.toString(10) << Value.toString(10) << Param->getType()
+ << Arg->getSourceRange();
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ }
+ }
+
+ Converted = TemplateArgument(Value,
+ ParamType->isEnumeralType()
+ ? Context.getCanonicalType(ParamType)
+ : IntegerType);
+ return Owned(Arg);
+ }
+
+ QualType ArgType = Arg->getType();
+ DeclAccessPair FoundResult; // temporary for ResolveOverloadedFunction
+
+ // Handle pointer-to-function, reference-to-function, and
+ // pointer-to-member-function all in (roughly) the same way.
+ if (// -- For a non-type template-parameter of type pointer to
+ // function, only the function-to-pointer conversion (4.3) is
+ // applied. If the template-argument represents a set of
+ // overloaded functions (or a pointer to such), the matching
+ // function is selected from the set (13.4).
+ (ParamType->isPointerType() &&
+ ParamType->getAs<PointerType>()->getPointeeType()->isFunctionType()) ||
+ // -- For a non-type template-parameter of type reference to
+ // function, no conversions apply. If the template-argument
+ // represents a set of overloaded functions, the matching
+ // function is selected from the set (13.4).
+ (ParamType->isReferenceType() &&
+ ParamType->getAs<ReferenceType>()->getPointeeType()->isFunctionType()) ||
+ // -- For a non-type template-parameter of type pointer to
+ // member function, no conversions apply. If the
+ // template-argument represents a set of overloaded member
+ // functions, the matching member function is selected from
+ // the set (13.4).
+ (ParamType->isMemberPointerType() &&
+ ParamType->getAs<MemberPointerType>()->getPointeeType()
+ ->isFunctionType())) {
+
+ if (Arg->getType() == Context.OverloadTy) {
+ if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg, ParamType,
+ true,
+ FoundResult)) {
+ if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
+ return ExprError();
+
+ Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
+ ArgType = Arg->getType();
+ } else
+ return ExprError();
+ }
+
+ if (!ParamType->isMemberPointerType()) {
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
+ ParamType,
+ Arg, Converted))
+ return ExprError();
+ return Owned(Arg);
+ }
+
+ if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
+ Converted))
+ return ExprError();
+ return Owned(Arg);
+ }
+
+ if (ParamType->isPointerType()) {
+ // -- for a non-type template-parameter of type pointer to
+ // object, qualification conversions (4.4) and the
+ // array-to-pointer conversion (4.2) are applied.
+ // C++0x also allows a value of std::nullptr_t.
+ assert(ParamType->getPointeeType()->isIncompleteOrObjectType() &&
+ "Only object pointers allowed here");
+
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
+ ParamType,
+ Arg, Converted))
+ return ExprError();
+ return Owned(Arg);
+ }
+
+ if (const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>()) {
+ // -- For a non-type template-parameter of type reference to
+ // object, no conversions apply. The type referred to by the
+ // reference may be more cv-qualified than the (otherwise
+ // identical) type of the template-argument. The
+ // template-parameter is bound directly to the
+ // template-argument, which must be an lvalue.
+ assert(ParamRefType->getPointeeType()->isIncompleteOrObjectType() &&
+ "Only object references allowed here");
+
+ if (Arg->getType() == Context.OverloadTy) {
+ if (FunctionDecl *Fn = ResolveAddressOfOverloadedFunction(Arg,
+ ParamRefType->getPointeeType(),
+ true,
+ FoundResult)) {
+ if (DiagnoseUseOfDecl(Fn, Arg->getLocStart()))
+ return ExprError();
+
+ Arg = FixOverloadedFunctionReference(Arg, FoundResult, Fn);
+ ArgType = Arg->getType();
+ } else
+ return ExprError();
+ }
+
+ if (CheckTemplateArgumentAddressOfObjectOrFunction(*this, Param,
+ ParamType,
+ Arg, Converted))
+ return ExprError();
+ return Owned(Arg);
+ }
+
+ // Deal with parameters of type std::nullptr_t.
+ if (ParamType->isNullPtrType()) {
+ if (Arg->isTypeDependent() || Arg->isValueDependent()) {
+ Converted = TemplateArgument(Arg);
+ return Owned(Arg);
+ }
+
+ switch (isNullPointerValueTemplateArgument(*this, Param, ParamType, Arg)) {
+ case NPV_NotNullPointer:
+ Diag(Arg->getExprLoc(), diag::err_template_arg_not_convertible)
+ << Arg->getType() << ParamType;
+ Diag(Param->getLocation(), diag::note_template_param_here);
+ return ExprError();
+
+ case NPV_Error:
+ return ExprError();
+
+ case NPV_NullPointer:
+ Converted = TemplateArgument((Decl *)0);
+ return Owned(Arg);;
+ }
+ }
+
+ // -- For a non-type template-parameter of type pointer to data
+ // member, qualification conversions (4.4) are applied.
+ assert(ParamType->isMemberPointerType() && "Only pointers to members remain");
+
+ if (CheckTemplateArgumentPointerToMember(*this, Param, ParamType, Arg,
+ Converted))
+ return ExprError();
+ return Owned(Arg);
+}
+
+/// \brief Check a template argument against its corresponding
+/// template template parameter.
+///
+/// This routine implements the semantics of C++ [temp.arg.template].
+/// It returns true if an error occurred, and false otherwise.
+bool Sema::CheckTemplateArgument(TemplateTemplateParmDecl *Param,
+ const TemplateArgumentLoc &Arg) {
+ TemplateName Name = Arg.getArgument().getAsTemplate();
+ TemplateDecl *Template = Name.getAsTemplateDecl();
+ if (!Template) {
+ // Any dependent template name is fine.
+ assert(Name.isDependent() && "Non-dependent template isn't a declaration?");
+ return false;
+ }
+
+ // C++0x [temp.arg.template]p1:
+ // A template-argument for a template template-parameter shall be
+ // the name of a class template or an alias template, expressed as an
+ // id-expression. When the template-argument names a class template, only
+ // primary class templates are considered when matching the
+ // template template argument with the corresponding parameter;
+ // partial specializations are not considered even if their
+ // parameter lists match that of the template template parameter.
+ //
+ // Note that we also allow template template parameters here, which
+ // will happen when we are dealing with, e.g., class template
+ // partial specializations.
+ if (!isa<ClassTemplateDecl>(Template) &&
+ !isa<TemplateTemplateParmDecl>(Template) &&
+ !isa<TypeAliasTemplateDecl>(Template)) {
+ assert(isa<FunctionTemplateDecl>(Template) &&
+ "Only function templates are possible here");
+ Diag(Arg.getLocation(), diag::err_template_arg_not_class_template);
+ Diag(Template->getLocation(), diag::note_template_arg_refers_here_func)
+ << Template;
+ }
+
+ return !TemplateParameterListsAreEqual(Template->getTemplateParameters(),
+ Param->getTemplateParameters(),
+ true,
+ TPL_TemplateTemplateArgumentMatch,
+ Arg.getLocation());
+}
+
+/// \brief Given a non-type template argument that refers to a
+/// declaration and the type of its corresponding non-type template
+/// parameter, produce an expression that properly refers to that
+/// declaration.
+ExprResult
+Sema::BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
+ QualType ParamType,
+ SourceLocation Loc) {
+ assert(Arg.getKind() == TemplateArgument::Declaration &&
+ "Only declaration template arguments permitted here");
+
+ // For a NULL non-type template argument, return nullptr casted to the
+ // parameter's type.
+ if (!Arg.getAsDecl()) {
+ return ImpCastExprToType(
+ new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc),
+ ParamType,
+ ParamType->getAs<MemberPointerType>()
+ ? CK_NullToMemberPointer
+ : CK_NullToPointer);
+ }
+
+ ValueDecl *VD = cast<ValueDecl>(Arg.getAsDecl());
+
+ if (VD->getDeclContext()->isRecord() &&
+ (isa<CXXMethodDecl>(VD) || isa<FieldDecl>(VD))) {
+ // If the value is a class member, we might have a pointer-to-member.
+ // Determine whether the non-type template template parameter is of
+ // pointer-to-member type. If so, we need to build an appropriate
+ // expression for a pointer-to-member, since a "normal" DeclRefExpr
+ // would refer to the member itself.
+ if (ParamType->isMemberPointerType()) {
+ QualType ClassType
+ = Context.getTypeDeclType(cast<RecordDecl>(VD->getDeclContext()));
+ NestedNameSpecifier *Qualifier
+ = NestedNameSpecifier::Create(Context, 0, false,
+ ClassType.getTypePtr());
+ CXXScopeSpec SS;
+ SS.MakeTrivial(Context, Qualifier, Loc);
+
+ // The actual value-ness of this is unimportant, but for
+ // internal consistency's sake, references to instance methods
+ // are r-values.
+ ExprValueKind VK = VK_LValue;
+ if (isa<CXXMethodDecl>(VD) && cast<CXXMethodDecl>(VD)->isInstance())
+ VK = VK_RValue;
+
+ ExprResult RefExpr = BuildDeclRefExpr(VD,
+ VD->getType().getNonReferenceType(),
+ VK,
+ Loc,
+ &SS);
+ if (RefExpr.isInvalid())
+ return ExprError();
+
+ RefExpr = CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
+
+ // We might need to perform a trailing qualification conversion, since
+ // the element type on the parameter could be more qualified than the
+ // element type in the expression we constructed.
+ bool ObjCLifetimeConversion;
+ if (IsQualificationConversion(((Expr*) RefExpr.get())->getType(),
+ ParamType.getUnqualifiedType(), false,
+ ObjCLifetimeConversion))
+ RefExpr = ImpCastExprToType(RefExpr.take(), ParamType.getUnqualifiedType(), CK_NoOp);
+
+ assert(!RefExpr.isInvalid() &&
+ Context.hasSameType(((Expr*) RefExpr.get())->getType(),
+ ParamType.getUnqualifiedType()));
+ return move(RefExpr);
+ }
+ }
+
+ QualType T = VD->getType().getNonReferenceType();
+ if (ParamType->isPointerType()) {
+ // When the non-type template parameter is a pointer, take the
+ // address of the declaration.
+ ExprResult RefExpr = BuildDeclRefExpr(VD, T, VK_LValue, Loc);
+ if (RefExpr.isInvalid())
+ return ExprError();
+
+ if (T->isFunctionType() || T->isArrayType()) {
+ // Decay functions and arrays.
+ RefExpr = DefaultFunctionArrayConversion(RefExpr.take());
+ if (RefExpr.isInvalid())
+ return ExprError();
+
+ return move(RefExpr);
+ }
+
+ // Take the address of everything else
+ return CreateBuiltinUnaryOp(Loc, UO_AddrOf, RefExpr.get());
+ }
+
+ ExprValueKind VK = VK_RValue;
+
+ // If the non-type template parameter has reference type, qualify the
+ // resulting declaration reference with the extra qualifiers on the
+ // type that the reference refers to.
+ if (const ReferenceType *TargetRef = ParamType->getAs<ReferenceType>()) {
+ VK = VK_LValue;
+ T = Context.getQualifiedType(T,
+ TargetRef->getPointeeType().getQualifiers());
+ }
+
+ return BuildDeclRefExpr(VD, T, VK, Loc);
+}
+
+/// \brief Construct a new expression that refers to the given
+/// integral template argument with the given source-location
+/// information.
+///
+/// This routine takes care of the mapping from an integral template
+/// argument (which may have any integral type) to the appropriate
+/// literal value.
+ExprResult
+Sema::BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
+ SourceLocation Loc) {
+ assert(Arg.getKind() == TemplateArgument::Integral &&
+ "Operation is only valid for integral template arguments");
+ QualType T = Arg.getIntegralType();
+ if (T->isAnyCharacterType()) {
+ CharacterLiteral::CharacterKind Kind;
+ if (T->isWideCharType())
+ Kind = CharacterLiteral::Wide;
+ else if (T->isChar16Type())
+ Kind = CharacterLiteral::UTF16;
+ else if (T->isChar32Type())
+ Kind = CharacterLiteral::UTF32;
+ else
+ Kind = CharacterLiteral::Ascii;
+
+ return Owned(new (Context) CharacterLiteral(
+ Arg.getAsIntegral()->getZExtValue(),
+ Kind, T, Loc));
+ }
+
+ if (T->isBooleanType())
+ return Owned(new (Context) CXXBoolLiteralExpr(
+ Arg.getAsIntegral()->getBoolValue(),
+ T, Loc));
+
+ if (T->isNullPtrType())
+ return Owned(new (Context) CXXNullPtrLiteralExpr(Context.NullPtrTy, Loc));
+
+ // If this is an enum type that we're instantiating, we need to use an integer
+ // type the same size as the enumerator. We don't want to build an
+ // IntegerLiteral with enum type.
+ QualType BT;
+ if (const EnumType *ET = T->getAs<EnumType>())
+ BT = ET->getDecl()->getIntegerType();
+ else
+ BT = T;
+
+ Expr *E = IntegerLiteral::Create(Context, *Arg.getAsIntegral(), BT, Loc);
+ if (T->isEnumeralType()) {
+ // FIXME: This is a hack. We need a better way to handle substituted
+ // non-type template parameters.
+ E = CStyleCastExpr::Create(Context, T, VK_RValue, CK_IntegralCast, E, 0,
+ Context.getTrivialTypeSourceInfo(T, Loc),
+ Loc, Loc);
+ }
+
+ return Owned(E);
+}
+
+/// \brief Match two template parameters within template parameter lists.
+static bool MatchTemplateParameterKind(Sema &S, NamedDecl *New, NamedDecl *Old,
+ bool Complain,
+ Sema::TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc) {
+ // Check the actual kind (type, non-type, template).
+ if (Old->getKind() != New->getKind()) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_param_different_kind;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_param_different_kind;
+ }
+ S.Diag(New->getLocation(), NextDiag)
+ << (Kind != Sema::TPL_TemplateMatch);
+ S.Diag(Old->getLocation(), diag::note_template_prev_declaration)
+ << (Kind != Sema::TPL_TemplateMatch);
+ }
+
+ return false;
+ }
+
+ // Check that both are parameter packs are neither are parameter packs.
+ // However, if we are matching a template template argument to a
+ // template template parameter, the template template parameter can have
+ // a parameter pack where the template template argument does not.
+ if (Old->isTemplateParameterPack() != New->isTemplateParameterPack() &&
+ !(Kind == Sema::TPL_TemplateTemplateArgumentMatch &&
+ Old->isTemplateParameterPack())) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_parameter_pack_non_pack;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc,
+ diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_parameter_pack_non_pack;
+ }
+
+ unsigned ParamKind = isa<TemplateTypeParmDecl>(New)? 0
+ : isa<NonTypeTemplateParmDecl>(New)? 1
+ : 2;
+ S.Diag(New->getLocation(), NextDiag)
+ << ParamKind << New->isParameterPack();
+ S.Diag(Old->getLocation(), diag::note_template_parameter_pack_here)
+ << ParamKind << Old->isParameterPack();
+ }
+
+ return false;
+ }
+
+ // For non-type template parameters, check the type of the parameter.
+ if (NonTypeTemplateParmDecl *OldNTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Old)) {
+ NonTypeTemplateParmDecl *NewNTTP = cast<NonTypeTemplateParmDecl>(New);
+
+ // If we are matching a template template argument to a template
+ // template parameter and one of the non-type template parameter types
+ // is dependent, then we must wait until template instantiation time
+ // to actually compare the arguments.
+ if (Kind == Sema::TPL_TemplateTemplateArgumentMatch &&
+ (OldNTTP->getType()->isDependentType() ||
+ NewNTTP->getType()->isDependentType()))
+ return true;
+
+ if (!S.Context.hasSameType(OldNTTP->getType(), NewNTTP->getType())) {
+ if (Complain) {
+ unsigned NextDiag = diag::err_template_nontype_parm_different_type;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc,
+ diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_nontype_parm_different_type;
+ }
+ S.Diag(NewNTTP->getLocation(), NextDiag)
+ << NewNTTP->getType()
+ << (Kind != Sema::TPL_TemplateMatch);
+ S.Diag(OldNTTP->getLocation(),
+ diag::note_template_nontype_parm_prev_declaration)
+ << OldNTTP->getType();
+ }
+
+ return false;
+ }
+
+ return true;
+ }
+
+ // For template template parameters, check the template parameter types.
+ // The template parameter lists of template template
+ // parameters must agree.
+ if (TemplateTemplateParmDecl *OldTTP
+ = dyn_cast<TemplateTemplateParmDecl>(Old)) {
+ TemplateTemplateParmDecl *NewTTP = cast<TemplateTemplateParmDecl>(New);
+ return S.TemplateParameterListsAreEqual(NewTTP->getTemplateParameters(),
+ OldTTP->getTemplateParameters(),
+ Complain,
+ (Kind == Sema::TPL_TemplateMatch
+ ? Sema::TPL_TemplateTemplateParmMatch
+ : Kind),
+ TemplateArgLoc);
+ }
+
+ return true;
+}
+
+/// \brief Diagnose a known arity mismatch when comparing template argument
+/// lists.
+static
+void DiagnoseTemplateParameterListArityMismatch(Sema &S,
+ TemplateParameterList *New,
+ TemplateParameterList *Old,
+ Sema::TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc) {
+ unsigned NextDiag = diag::err_template_param_list_different_arity;
+ if (TemplateArgLoc.isValid()) {
+ S.Diag(TemplateArgLoc, diag::err_template_arg_template_params_mismatch);
+ NextDiag = diag::note_template_param_list_different_arity;
+ }
+ S.Diag(New->getTemplateLoc(), NextDiag)
+ << (New->size() > Old->size())
+ << (Kind != Sema::TPL_TemplateMatch)
+ << SourceRange(New->getTemplateLoc(), New->getRAngleLoc());
+ S.Diag(Old->getTemplateLoc(), diag::note_template_prev_declaration)
+ << (Kind != Sema::TPL_TemplateMatch)
+ << SourceRange(Old->getTemplateLoc(), Old->getRAngleLoc());
+}
+
+/// \brief Determine whether the given template parameter lists are
+/// equivalent.
+///
+/// \param New The new template parameter list, typically written in the
+/// source code as part of a new template declaration.
+///
+/// \param Old The old template parameter list, typically found via
+/// name lookup of the template declared with this template parameter
+/// list.
+///
+/// \param Complain If true, this routine will produce a diagnostic if
+/// the template parameter lists are not equivalent.
+///
+/// \param Kind describes how we are to match the template parameter lists.
+///
+/// \param TemplateArgLoc If this source location is valid, then we
+/// are actually checking the template parameter list of a template
+/// argument (New) against the template parameter list of its
+/// corresponding template template parameter (Old). We produce
+/// slightly different diagnostics in this scenario.
+///
+/// \returns True if the template parameter lists are equal, false
+/// otherwise.
+bool
+Sema::TemplateParameterListsAreEqual(TemplateParameterList *New,
+ TemplateParameterList *Old,
+ bool Complain,
+ TemplateParameterListEqualKind Kind,
+ SourceLocation TemplateArgLoc) {
+ if (Old->size() != New->size() && Kind != TPL_TemplateTemplateArgumentMatch) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
+
+ return false;
+ }
+
+ // C++0x [temp.arg.template]p3:
+ // A template-argument matches a template template-parameter (call it P)
+ // when each of the template parameters in the template-parameter-list of
+ // the template-argument's corresponding class template or alias template
+ // (call it A) matches the corresponding template parameter in the
+ // template-parameter-list of P. [...]
+ TemplateParameterList::iterator NewParm = New->begin();
+ TemplateParameterList::iterator NewParmEnd = New->end();
+ for (TemplateParameterList::iterator OldParm = Old->begin(),
+ OldParmEnd = Old->end();
+ OldParm != OldParmEnd; ++OldParm) {
+ if (Kind != TPL_TemplateTemplateArgumentMatch ||
+ !(*OldParm)->isTemplateParameterPack()) {
+ if (NewParm == NewParmEnd) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
+
+ return false;
+ }
+
+ if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
+ Kind, TemplateArgLoc))
+ return false;
+
+ ++NewParm;
+ continue;
+ }
+
+ // C++0x [temp.arg.template]p3:
+ // [...] When P's template- parameter-list contains a template parameter
+ // pack (14.5.3), the template parameter pack will match zero or more
+ // template parameters or template parameter packs in the
+ // template-parameter-list of A with the same type and form as the
+ // template parameter pack in P (ignoring whether those template
+ // parameters are template parameter packs).
+ for (; NewParm != NewParmEnd; ++NewParm) {
+ if (!MatchTemplateParameterKind(*this, *NewParm, *OldParm, Complain,
+ Kind, TemplateArgLoc))
+ return false;
+ }
+ }
+
+ // Make sure we exhausted all of the arguments.
+ if (NewParm != NewParmEnd) {
+ if (Complain)
+ DiagnoseTemplateParameterListArityMismatch(*this, New, Old, Kind,
+ TemplateArgLoc);
+
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Check whether a template can be declared within this scope.
+///
+/// If the template declaration is valid in this scope, returns
+/// false. Otherwise, issues a diagnostic and returns true.
+bool
+Sema::CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams) {
+ if (!S)
+ return false;
+
+ // Find the nearest enclosing declaration scope.
+ while ((S->getFlags() & Scope::DeclScope) == 0 ||
+ (S->getFlags() & Scope::TemplateParamScope) != 0)
+ S = S->getParent();
+
+ // C++ [temp]p2:
+ // A template-declaration can appear only as a namespace scope or
+ // class scope declaration.
+ DeclContext *Ctx = static_cast<DeclContext *>(S->getEntity());
+ if (Ctx && isa<LinkageSpecDecl>(Ctx) &&
+ cast<LinkageSpecDecl>(Ctx)->getLanguage() != LinkageSpecDecl::lang_cxx)
+ return Diag(TemplateParams->getTemplateLoc(), diag::err_template_linkage)
+ << TemplateParams->getSourceRange();
+
+ while (Ctx && isa<LinkageSpecDecl>(Ctx))
+ Ctx = Ctx->getParent();
+
+ if (Ctx && (Ctx->isFileContext() || Ctx->isRecord()))
+ return false;
+
+ return Diag(TemplateParams->getTemplateLoc(),
+ diag::err_template_outside_namespace_or_class_scope)
+ << TemplateParams->getSourceRange();
+}
+
+/// \brief Determine what kind of template specialization the given declaration
+/// is.
+static TemplateSpecializationKind getTemplateSpecializationKind(Decl *D) {
+ if (!D)
+ return TSK_Undeclared;
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D))
+ return Record->getTemplateSpecializationKind();
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D))
+ return Function->getTemplateSpecializationKind();
+ if (VarDecl *Var = dyn_cast<VarDecl>(D))
+ return Var->getTemplateSpecializationKind();
+
+ return TSK_Undeclared;
+}
+
+/// \brief Check whether a specialization is well-formed in the current
+/// context.
+///
+/// This routine determines whether a template specialization can be declared
+/// in the current context (C++ [temp.expl.spec]p2).
+///
+/// \param S the semantic analysis object for which this check is being
+/// performed.
+///
+/// \param Specialized the entity being specialized or instantiated, which
+/// may be a kind of template (class template, function template, etc.) or
+/// a member of a class template (member function, static data member,
+/// member class).
+///
+/// \param PrevDecl the previous declaration of this entity, if any.
+///
+/// \param Loc the location of the explicit specialization or instantiation of
+/// this entity.
+///
+/// \param IsPartialSpecialization whether this is a partial specialization of
+/// a class template.
+///
+/// \returns true if there was an error that we cannot recover from, false
+/// otherwise.
+static bool CheckTemplateSpecializationScope(Sema &S,
+ NamedDecl *Specialized,
+ NamedDecl *PrevDecl,
+ SourceLocation Loc,
+ bool IsPartialSpecialization) {
+ // Keep these "kind" numbers in sync with the %select statements in the
+ // various diagnostics emitted by this routine.
+ int EntityKind = 0;
+ if (isa<ClassTemplateDecl>(Specialized))
+ EntityKind = IsPartialSpecialization? 1 : 0;
+ else if (isa<FunctionTemplateDecl>(Specialized))
+ EntityKind = 2;
+ else if (isa<CXXMethodDecl>(Specialized))
+ EntityKind = 3;
+ else if (isa<VarDecl>(Specialized))
+ EntityKind = 4;
+ else if (isa<RecordDecl>(Specialized))
+ EntityKind = 5;
+ else if (isa<EnumDecl>(Specialized) && S.getLangOpts().CPlusPlus0x)
+ EntityKind = 6;
+ else {
+ S.Diag(Loc, diag::err_template_spec_unknown_kind)
+ << S.getLangOpts().CPlusPlus0x;
+ S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
+ return true;
+ }
+
+ // C++ [temp.expl.spec]p2:
+ // An explicit specialization shall be declared in the namespace
+ // of which the template is a member, or, for member templates, in
+ // the namespace of which the enclosing class or enclosing class
+ // template is a member. An explicit specialization of a member
+ // function, member class or static data member of a class
+ // template shall be declared in the namespace of which the class
+ // template is a member. Such a declaration may also be a
+ // definition. If the declaration is not a definition, the
+ // specialization may be defined later in the name- space in which
+ // the explicit specialization was declared, or in a namespace
+ // that encloses the one in which the explicit specialization was
+ // declared.
+ if (S.CurContext->getRedeclContext()->isFunctionOrMethod()) {
+ S.Diag(Loc, diag::err_template_spec_decl_function_scope)
+ << Specialized;
+ return true;
+ }
+
+ if (S.CurContext->isRecord() && !IsPartialSpecialization) {
+ if (S.getLangOpts().MicrosoftExt) {
+ // Do not warn for class scope explicit specialization during
+ // instantiation, warning was already emitted during pattern
+ // semantic analysis.
+ if (!S.ActiveTemplateInstantiations.size())
+ S.Diag(Loc, diag::ext_function_specialization_in_class)
+ << Specialized;
+ } else {
+ S.Diag(Loc, diag::err_template_spec_decl_class_scope)
+ << Specialized;
+ return true;
+ }
+ }
+
+ if (S.CurContext->isRecord() &&
+ !S.CurContext->Equals(Specialized->getDeclContext())) {
+ // Make sure that we're specializing in the right record context.
+ // Otherwise, things can go horribly wrong.
+ S.Diag(Loc, diag::err_template_spec_decl_class_scope)
+ << Specialized;
+ return true;
+ }
+
+ // C++ [temp.class.spec]p6:
+ // A class template partial specialization may be declared or redeclared
+ // in any namespace scope in which its definition may be defined (14.5.1
+ // and 14.5.2).
+ bool ComplainedAboutScope = false;
+ DeclContext *SpecializedContext
+ = Specialized->getDeclContext()->getEnclosingNamespaceContext();
+ DeclContext *DC = S.CurContext->getEnclosingNamespaceContext();
+ if ((!PrevDecl ||
+ getTemplateSpecializationKind(PrevDecl) == TSK_Undeclared ||
+ getTemplateSpecializationKind(PrevDecl) == TSK_ImplicitInstantiation)){
+ // C++ [temp.exp.spec]p2:
+ // An explicit specialization shall be declared in the namespace of which
+ // the template is a member, or, for member templates, in the namespace
+ // of which the enclosing class or enclosing class template is a member.
+ // An explicit specialization of a member function, member class or
+ // static data member of a class template shall be declared in the
+ // namespace of which the class template is a member.
+ //
+ // C++0x [temp.expl.spec]p2:
+ // An explicit specialization shall be declared in a namespace enclosing
+ // the specialized template.
+ if (!DC->InEnclosingNamespaceSetOf(SpecializedContext)) {
+ bool IsCPlusPlus0xExtension = DC->Encloses(SpecializedContext);
+ if (isa<TranslationUnitDecl>(SpecializedContext)) {
+ assert(!IsCPlusPlus0xExtension &&
+ "DC encloses TU but isn't in enclosing namespace set");
+ S.Diag(Loc, diag::err_template_spec_decl_out_of_scope_global)
+ << EntityKind << Specialized;
+ } else if (isa<NamespaceDecl>(SpecializedContext)) {
+ int Diag;
+ if (!IsCPlusPlus0xExtension)
+ Diag = diag::err_template_spec_decl_out_of_scope;
+ else if (!S.getLangOpts().CPlusPlus0x)
+ Diag = diag::ext_template_spec_decl_out_of_scope;
+ else
+ Diag = diag::warn_cxx98_compat_template_spec_decl_out_of_scope;
+ S.Diag(Loc, Diag)
+ << EntityKind << Specialized << cast<NamedDecl>(SpecializedContext);
+ }
+
+ S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
+ ComplainedAboutScope =
+ !(IsCPlusPlus0xExtension && S.getLangOpts().CPlusPlus0x);
+ }
+ }
+
+ // Make sure that this redeclaration (or definition) occurs in an enclosing
+ // namespace.
+ // Note that HandleDeclarator() performs this check for explicit
+ // specializations of function templates, static data members, and member
+ // functions, so we skip the check here for those kinds of entities.
+ // FIXME: HandleDeclarator's diagnostics aren't quite as good, though.
+ // Should we refactor that check, so that it occurs later?
+ if (!ComplainedAboutScope && !DC->Encloses(SpecializedContext) &&
+ !(isa<FunctionTemplateDecl>(Specialized) || isa<VarDecl>(Specialized) ||
+ isa<FunctionDecl>(Specialized))) {
+ if (isa<TranslationUnitDecl>(SpecializedContext))
+ S.Diag(Loc, diag::err_template_spec_redecl_global_scope)
+ << EntityKind << Specialized;
+ else if (isa<NamespaceDecl>(SpecializedContext))
+ S.Diag(Loc, diag::err_template_spec_redecl_out_of_scope)
+ << EntityKind << Specialized
+ << cast<NamedDecl>(SpecializedContext);
+
+ S.Diag(Specialized->getLocation(), diag::note_specialized_entity);
+ }
+
+ // FIXME: check for specialization-after-instantiation errors and such.
+
+ return false;
+}
+
+/// \brief Subroutine of Sema::CheckClassTemplatePartialSpecializationArgs
+/// that checks non-type template partial specialization arguments.
+static bool CheckNonTypeClassTemplatePartialSpecializationArgs(Sema &S,
+ NonTypeTemplateParmDecl *Param,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ if (Args[I].getKind() == TemplateArgument::Pack) {
+ if (CheckNonTypeClassTemplatePartialSpecializationArgs(S, Param,
+ Args[I].pack_begin(),
+ Args[I].pack_size()))
+ return true;
+
+ continue;
+ }
+
+ Expr *ArgExpr = Args[I].getAsExpr();
+ if (!ArgExpr) {
+ continue;
+ }
+
+ // We can have a pack expansion of any of the bullets below.
+ if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(ArgExpr))
+ ArgExpr = Expansion->getPattern();
+
+ // Strip off any implicit casts we added as part of type checking.
+ while (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(ArgExpr))
+ ArgExpr = ICE->getSubExpr();
+
+ // C++ [temp.class.spec]p8:
+ // A non-type argument is non-specialized if it is the name of a
+ // non-type parameter. All other non-type arguments are
+ // specialized.
+ //
+ // Below, we check the two conditions that only apply to
+ // specialized non-type arguments, so skip any non-specialized
+ // arguments.
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ArgExpr))
+ if (isa<NonTypeTemplateParmDecl>(DRE->getDecl()))
+ continue;
+
+ // C++ [temp.class.spec]p9:
+ // Within the argument list of a class template partial
+ // specialization, the following restrictions apply:
+ // -- A partially specialized non-type argument expression
+ // shall not involve a template parameter of the partial
+ // specialization except when the argument expression is a
+ // simple identifier.
+ if (ArgExpr->isTypeDependent() || ArgExpr->isValueDependent()) {
+ S.Diag(ArgExpr->getLocStart(),
+ diag::err_dependent_non_type_arg_in_partial_spec)
+ << ArgExpr->getSourceRange();
+ return true;
+ }
+
+ // -- The type of a template parameter corresponding to a
+ // specialized non-type argument shall not be dependent on a
+ // parameter of the specialization.
+ if (Param->getType()->isDependentType()) {
+ S.Diag(ArgExpr->getLocStart(),
+ diag::err_dependent_typed_non_type_arg_in_partial_spec)
+ << Param->getType()
+ << ArgExpr->getSourceRange();
+ S.Diag(Param->getLocation(), diag::note_template_param_here);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// \brief Check the non-type template arguments of a class template
+/// partial specialization according to C++ [temp.class.spec]p9.
+///
+/// \param TemplateParams the template parameters of the primary class
+/// template.
+///
+/// \param TemplateArg the template arguments of the class template
+/// partial specialization.
+///
+/// \returns true if there was an error, false otherwise.
+static bool CheckClassTemplatePartialSpecializationArgs(Sema &S,
+ TemplateParameterList *TemplateParams,
+ SmallVectorImpl<TemplateArgument> &TemplateArgs) {
+ const TemplateArgument *ArgList = TemplateArgs.data();
+
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ NonTypeTemplateParmDecl *Param
+ = dyn_cast<NonTypeTemplateParmDecl>(TemplateParams->getParam(I));
+ if (!Param)
+ continue;
+
+ if (CheckNonTypeClassTemplatePartialSpecializationArgs(S, Param,
+ &ArgList[I], 1))
+ return true;
+ }
+
+ return false;
+}
+
+DeclResult
+Sema::ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec,
+ TagUseKind TUK,
+ SourceLocation KWLoc,
+ SourceLocation ModulePrivateLoc,
+ CXXScopeSpec &SS,
+ TemplateTy TemplateD,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr,
+ MultiTemplateParamsArg TemplateParameterLists) {
+ assert(TUK != TUK_Reference && "References are not specializations");
+
+ // NOTE: KWLoc is the location of the tag keyword. This will instead
+ // store the location of the outermost template keyword in the declaration.
+ SourceLocation TemplateKWLoc = TemplateParameterLists.size() > 0
+ ? TemplateParameterLists.get()[0]->getTemplateLoc() : SourceLocation();
+
+ // Find the class template we're specializing
+ TemplateName Name = TemplateD.getAsVal<TemplateName>();
+ ClassTemplateDecl *ClassTemplate
+ = dyn_cast_or_null<ClassTemplateDecl>(Name.getAsTemplateDecl());
+
+ if (!ClassTemplate) {
+ Diag(TemplateNameLoc, diag::err_not_class_template_specialization)
+ << (Name.getAsTemplateDecl() &&
+ isa<TemplateTemplateParmDecl>(Name.getAsTemplateDecl()));
+ return true;
+ }
+
+ bool isExplicitSpecialization = false;
+ bool isPartialSpecialization = false;
+
+ // Check the validity of the template headers that introduce this
+ // template.
+ // FIXME: We probably shouldn't complain about these headers for
+ // friend declarations.
+ bool Invalid = false;
+ TemplateParameterList *TemplateParams
+ = MatchTemplateParametersToScopeSpecifier(TemplateNameLoc,
+ TemplateNameLoc,
+ SS,
+ (TemplateParameterList**)TemplateParameterLists.get(),
+ TemplateParameterLists.size(),
+ TUK == TUK_Friend,
+ isExplicitSpecialization,
+ Invalid);
+ if (Invalid)
+ return true;
+
+ if (TemplateParams && TemplateParams->size() > 0) {
+ isPartialSpecialization = true;
+
+ if (TUK == TUK_Friend) {
+ Diag(KWLoc, diag::err_partial_specialization_friend)
+ << SourceRange(LAngleLoc, RAngleLoc);
+ return true;
+ }
+
+ // C++ [temp.class.spec]p10:
+ // The template parameter list of a specialization shall not
+ // contain default template argument values.
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ Decl *Param = TemplateParams->getParam(I);
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ if (TTP->hasDefaultArgument()) {
+ Diag(TTP->getDefaultArgumentLoc(),
+ diag::err_default_arg_in_partial_spec);
+ TTP->removeDefaultArgument();
+ }
+ } else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ if (Expr *DefArg = NTTP->getDefaultArgument()) {
+ Diag(NTTP->getDefaultArgumentLoc(),
+ diag::err_default_arg_in_partial_spec)
+ << DefArg->getSourceRange();
+ NTTP->removeDefaultArgument();
+ }
+ } else {
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(Param);
+ if (TTP->hasDefaultArgument()) {
+ Diag(TTP->getDefaultArgument().getLocation(),
+ diag::err_default_arg_in_partial_spec)
+ << TTP->getDefaultArgument().getSourceRange();
+ TTP->removeDefaultArgument();
+ }
+ }
+ }
+ } else if (TemplateParams) {
+ if (TUK == TUK_Friend)
+ Diag(KWLoc, diag::err_template_spec_friend)
+ << FixItHint::CreateRemoval(
+ SourceRange(TemplateParams->getTemplateLoc(),
+ TemplateParams->getRAngleLoc()))
+ << SourceRange(LAngleLoc, RAngleLoc);
+ else
+ isExplicitSpecialization = true;
+ } else if (TUK != TUK_Friend) {
+ Diag(KWLoc, diag::err_template_spec_needs_header)
+ << FixItHint::CreateInsertion(KWLoc, "template<> ");
+ isExplicitSpecialization = true;
+ }
+
+ // Check that the specialization uses the same tag kind as the
+ // original template.
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ assert(Kind != TTK_Enum && "Invalid enum tag in class template spec!");
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
+ Kind, TUK == TUK_Definition, KWLoc,
+ *ClassTemplate->getIdentifier())) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << ClassTemplate
+ << FixItHint::CreateReplacement(KWLoc,
+ ClassTemplate->getTemplatedDecl()->getKindName());
+ Diag(ClassTemplate->getTemplatedDecl()->getLocation(),
+ diag::note_previous_use);
+ Kind = ClassTemplate->getTemplatedDecl()->getTagKind();
+ }
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs;
+ TemplateArgs.setLAngleLoc(LAngleLoc);
+ TemplateArgs.setRAngleLoc(RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ // Check for unexpanded parameter packs in any of the template arguments.
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ if (DiagnoseUnexpandedParameterPack(TemplateArgs[I],
+ UPPC_PartialSpecialization))
+ return true;
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
+ TemplateArgs, false, Converted))
+ return true;
+
+ // Find the class template (partial) specialization declaration that
+ // corresponds to these arguments.
+ if (isPartialSpecialization) {
+ if (CheckClassTemplatePartialSpecializationArgs(*this,
+ ClassTemplate->getTemplateParameters(),
+ Converted))
+ return true;
+
+ bool InstantiationDependent;
+ if (!Name.isDependent() &&
+ !TemplateSpecializationType::anyDependentTemplateArguments(
+ TemplateArgs.getArgumentArray(),
+ TemplateArgs.size(),
+ InstantiationDependent)) {
+ Diag(TemplateNameLoc, diag::err_partial_spec_fully_specialized)
+ << ClassTemplate->getDeclName();
+ isPartialSpecialization = false;
+ }
+ }
+
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *PrevDecl = 0;
+
+ if (isPartialSpecialization)
+ // FIXME: Template parameter list matters, too
+ PrevDecl
+ = ClassTemplate->findPartialSpecialization(Converted.data(),
+ Converted.size(),
+ InsertPos);
+ else
+ PrevDecl
+ = ClassTemplate->findSpecialization(Converted.data(),
+ Converted.size(), InsertPos);
+
+ ClassTemplateSpecializationDecl *Specialization = 0;
+
+ // Check whether we can declare a class template specialization in
+ // the current scope.
+ if (TUK != TUK_Friend &&
+ CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl,
+ TemplateNameLoc,
+ isPartialSpecialization))
+ return true;
+
+ // The canonical type
+ QualType CanonType;
+ if (PrevDecl &&
+ (PrevDecl->getSpecializationKind() == TSK_Undeclared ||
+ TUK == TUK_Friend)) {
+ // Since the only prior class template specialization with these
+ // arguments was referenced but not declared, or we're only
+ // referencing this specialization as a friend, reuse that
+ // declaration node as our own, updating its source location and
+ // the list of outer template parameters to reflect our new declaration.
+ Specialization = PrevDecl;
+ Specialization->setLocation(TemplateNameLoc);
+ if (TemplateParameterLists.size() > 0) {
+ Specialization->setTemplateParameterListsInfo(Context,
+ TemplateParameterLists.size(),
+ (TemplateParameterList**) TemplateParameterLists.release());
+ }
+ PrevDecl = 0;
+ CanonType = Context.getTypeDeclType(Specialization);
+ } else if (isPartialSpecialization) {
+ // Build the canonical type that describes the converted template
+ // arguments of the class template partial specialization.
+ TemplateName CanonTemplate = Context.getCanonicalTemplateName(Name);
+ CanonType = Context.getTemplateSpecializationType(CanonTemplate,
+ Converted.data(),
+ Converted.size());
+
+ if (Context.hasSameType(CanonType,
+ ClassTemplate->getInjectedClassNameSpecialization())) {
+ // C++ [temp.class.spec]p9b3:
+ //
+ // -- The argument list of the specialization shall not be identical
+ // to the implicit argument list of the primary template.
+ Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
+ << (TUK == TUK_Definition)
+ << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
+ return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS,
+ ClassTemplate->getIdentifier(),
+ TemplateNameLoc,
+ Attr,
+ TemplateParams,
+ AS_none, /*ModulePrivateLoc=*/SourceLocation(),
+ TemplateParameterLists.size() - 1,
+ (TemplateParameterList**) TemplateParameterLists.release());
+ }
+
+ // Create a new class template partial specialization declaration node.
+ ClassTemplatePartialSpecializationDecl *PrevPartial
+ = cast_or_null<ClassTemplatePartialSpecializationDecl>(PrevDecl);
+ unsigned SequenceNumber = PrevPartial? PrevPartial->getSequenceNumber()
+ : ClassTemplate->getNextPartialSpecSequenceNumber();
+ ClassTemplatePartialSpecializationDecl *Partial
+ = ClassTemplatePartialSpecializationDecl::Create(Context, Kind,
+ ClassTemplate->getDeclContext(),
+ KWLoc, TemplateNameLoc,
+ TemplateParams,
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ TemplateArgs,
+ CanonType,
+ PrevPartial,
+ SequenceNumber);
+ SetNestedNameSpecifier(Partial, SS);
+ if (TemplateParameterLists.size() > 1 && SS.isSet()) {
+ Partial->setTemplateParameterListsInfo(Context,
+ TemplateParameterLists.size() - 1,
+ (TemplateParameterList**) TemplateParameterLists.release());
+ }
+
+ if (!PrevPartial)
+ ClassTemplate->AddPartialSpecialization(Partial, InsertPos);
+ Specialization = Partial;
+
+ // If we are providing an explicit specialization of a member class
+ // template specialization, make a note of that.
+ if (PrevPartial && PrevPartial->getInstantiatedFromMember())
+ PrevPartial->setMemberSpecialization();
+
+ // Check that all of the template parameters of the class template
+ // partial specialization are deducible from the template
+ // arguments. If not, this class template partial specialization
+ // will never be used.
+ llvm::SmallBitVector DeducibleParams(TemplateParams->size());
+ MarkUsedTemplateParameters(Partial->getTemplateArgs(), true,
+ TemplateParams->getDepth(),
+ DeducibleParams);
+
+ if (!DeducibleParams.all()) {
+ unsigned NumNonDeducible = DeducibleParams.size()-DeducibleParams.count();
+ Diag(TemplateNameLoc, diag::warn_partial_specs_not_deducible)
+ << (NumNonDeducible > 1)
+ << SourceRange(TemplateNameLoc, RAngleLoc);
+ for (unsigned I = 0, N = DeducibleParams.size(); I != N; ++I) {
+ if (!DeducibleParams[I]) {
+ NamedDecl *Param = cast<NamedDecl>(TemplateParams->getParam(I));
+ if (Param->getDeclName())
+ Diag(Param->getLocation(),
+ diag::note_partial_spec_unused_parameter)
+ << Param->getDeclName();
+ else
+ Diag(Param->getLocation(),
+ diag::note_partial_spec_unused_parameter)
+ << "<anonymous>";
+ }
+ }
+ }
+ } else {
+ // Create a new class template specialization declaration node for
+ // this explicit specialization or friend declaration.
+ Specialization
+ = ClassTemplateSpecializationDecl::Create(Context, Kind,
+ ClassTemplate->getDeclContext(),
+ KWLoc, TemplateNameLoc,
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ PrevDecl);
+ SetNestedNameSpecifier(Specialization, SS);
+ if (TemplateParameterLists.size() > 0) {
+ Specialization->setTemplateParameterListsInfo(Context,
+ TemplateParameterLists.size(),
+ (TemplateParameterList**) TemplateParameterLists.release());
+ }
+
+ if (!PrevDecl)
+ ClassTemplate->AddSpecialization(Specialization, InsertPos);
+
+ CanonType = Context.getTypeDeclType(Specialization);
+ }
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template is
+ // explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an implicit
+ // instantiation to take place, in every translation unit in which such a
+ // use occurs; no diagnostic is required.
+ if (PrevDecl && PrevDecl->getPointOfInstantiation().isValid()) {
+ bool Okay = false;
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
+ // Is there any previous explicit specialization declaration?
+ if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
+ Okay = true;
+ break;
+ }
+ }
+
+ if (!Okay) {
+ SourceRange Range(TemplateNameLoc, RAngleLoc);
+ Diag(TemplateNameLoc, diag::err_specialization_after_instantiation)
+ << Context.getTypeDeclType(Specialization) << Range;
+
+ Diag(PrevDecl->getPointOfInstantiation(),
+ diag::note_instantiation_required_here)
+ << (PrevDecl->getTemplateSpecializationKind()
+ != TSK_ImplicitInstantiation);
+ return true;
+ }
+ }
+
+ // If this is not a friend, note that this is an explicit specialization.
+ if (TUK != TUK_Friend)
+ Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
+
+ // Check that this isn't a redefinition of this specialization.
+ if (TUK == TUK_Definition) {
+ if (RecordDecl *Def = Specialization->getDefinition()) {
+ SourceRange Range(TemplateNameLoc, RAngleLoc);
+ Diag(TemplateNameLoc, diag::err_redefinition)
+ << Context.getTypeDeclType(Specialization) << Range;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ Specialization->setInvalidDecl();
+ return true;
+ }
+ }
+
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
+
+ if (ModulePrivateLoc.isValid())
+ Diag(Specialization->getLocation(), diag::err_module_private_specialization)
+ << (isPartialSpecialization? 1 : 0)
+ << FixItHint::CreateRemoval(ModulePrivateLoc);
+
+ // Build the fully-sugared type for this class template
+ // specialization as the user wrote in the specialization
+ // itself. This means that we'll pretty-print the type retrieved
+ // from the specialization's declaration the way that the user
+ // actually wrote the specialization, rather than formatting the
+ // name based on the "canonical" representation used to store the
+ // template arguments in the specialization.
+ TypeSourceInfo *WrittenTy
+ = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
+ TemplateArgs, CanonType);
+ if (TUK != TUK_Friend) {
+ Specialization->setTypeAsWritten(WrittenTy);
+ Specialization->setTemplateKeywordLoc(TemplateKWLoc);
+ }
+ TemplateArgsIn.release();
+
+ // C++ [temp.expl.spec]p9:
+ // A template explicit specialization is in the scope of the
+ // namespace in which the template was defined.
+ //
+ // We actually implement this paragraph where we set the semantic
+ // context (in the creation of the ClassTemplateSpecializationDecl),
+ // but we also maintain the lexical context where the actual
+ // definition occurs.
+ Specialization->setLexicalDeclContext(CurContext);
+
+ // We may be starting the definition of this specialization.
+ if (TUK == TUK_Definition)
+ Specialization->startDefinition();
+
+ if (TUK == TUK_Friend) {
+ FriendDecl *Friend = FriendDecl::Create(Context, CurContext,
+ TemplateNameLoc,
+ WrittenTy,
+ /*FIXME:*/KWLoc);
+ Friend->setAccess(AS_public);
+ CurContext->addDecl(Friend);
+ } else {
+ // Add the specialization into its lexical context, so that it can
+ // be seen when iterating through the list of declarations in that
+ // context. However, specializations are not found by name lookup.
+ CurContext->addDecl(Specialization);
+ }
+ return Specialization;
+}
+
+Decl *Sema::ActOnTemplateDeclarator(Scope *S,
+ MultiTemplateParamsArg TemplateParameterLists,
+ Declarator &D) {
+ return HandleDeclarator(S, D, move(TemplateParameterLists));
+}
+
+Decl *Sema::ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope,
+ MultiTemplateParamsArg TemplateParameterLists,
+ Declarator &D) {
+ assert(getCurFunctionDecl() == 0 && "Function parsing confused");
+ DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
+
+ if (FTI.hasPrototype) {
+ // FIXME: Diagnose arguments without names in C.
+ }
+
+ Scope *ParentScope = FnBodyScope->getParent();
+
+ D.setFunctionDefinitionKind(FDK_Definition);
+ Decl *DP = HandleDeclarator(ParentScope, D,
+ move(TemplateParameterLists));
+ if (FunctionTemplateDecl *FunctionTemplate
+ = dyn_cast_or_null<FunctionTemplateDecl>(DP))
+ return ActOnStartOfFunctionDef(FnBodyScope,
+ FunctionTemplate->getTemplatedDecl());
+ if (FunctionDecl *Function = dyn_cast_or_null<FunctionDecl>(DP))
+ return ActOnStartOfFunctionDef(FnBodyScope, Function);
+ return 0;
+}
+
+/// \brief Strips various properties off an implicit instantiation
+/// that has just been explicitly specialized.
+static void StripImplicitInstantiation(NamedDecl *D) {
+ // FIXME: "make check" is clean if the call to dropAttrs() is commented out.
+ D->dropAttrs();
+
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ FD->setInlineSpecified(false);
+ }
+}
+
+/// \brief Compute the diagnostic location for an explicit instantiation
+// declaration or definition.
+static SourceLocation DiagLocForExplicitInstantiation(
+ NamedDecl* D, SourceLocation PointOfInstantiation) {
+ // Explicit instantiations following a specialization have no effect and
+ // hence no PointOfInstantiation. In that case, walk decl backwards
+ // until a valid name loc is found.
+ SourceLocation PrevDiagLoc = PointOfInstantiation;
+ for (Decl *Prev = D; Prev && !PrevDiagLoc.isValid();
+ Prev = Prev->getPreviousDecl()) {
+ PrevDiagLoc = Prev->getLocation();
+ }
+ assert(PrevDiagLoc.isValid() &&
+ "Explicit instantiation without point of instantiation?");
+ return PrevDiagLoc;
+}
+
+/// \brief Diagnose cases where we have an explicit template specialization
+/// before/after an explicit template instantiation, producing diagnostics
+/// for those cases where they are required and determining whether the
+/// new specialization/instantiation will have any effect.
+///
+/// \param NewLoc the location of the new explicit specialization or
+/// instantiation.
+///
+/// \param NewTSK the kind of the new explicit specialization or instantiation.
+///
+/// \param PrevDecl the previous declaration of the entity.
+///
+/// \param PrevTSK the kind of the old explicit specialization or instantiatin.
+///
+/// \param PrevPointOfInstantiation if valid, indicates where the previus
+/// declaration was instantiated (either implicitly or explicitly).
+///
+/// \param HasNoEffect will be set to true to indicate that the new
+/// specialization or instantiation has no effect and should be ignored.
+///
+/// \returns true if there was an error that should prevent the introduction of
+/// the new declaration into the AST, false otherwise.
+bool
+Sema::CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
+ TemplateSpecializationKind NewTSK,
+ NamedDecl *PrevDecl,
+ TemplateSpecializationKind PrevTSK,
+ SourceLocation PrevPointOfInstantiation,
+ bool &HasNoEffect) {
+ HasNoEffect = false;
+
+ switch (NewTSK) {
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ llvm_unreachable("Don't check implicit instantiations here");
+
+ case TSK_ExplicitSpecialization:
+ switch (PrevTSK) {
+ case TSK_Undeclared:
+ case TSK_ExplicitSpecialization:
+ // Okay, we're just specializing something that is either already
+ // explicitly specialized or has merely been mentioned without any
+ // instantiation.
+ return false;
+
+ case TSK_ImplicitInstantiation:
+ if (PrevPointOfInstantiation.isInvalid()) {
+ // The declaration itself has not actually been instantiated, so it is
+ // still okay to specialize it.
+ StripImplicitInstantiation(PrevDecl);
+ return false;
+ }
+ // Fall through
+
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitInstantiationDefinition:
+ assert((PrevTSK == TSK_ImplicitInstantiation ||
+ PrevPointOfInstantiation.isValid()) &&
+ "Explicit instantiation without point of instantiation?");
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template
+ // is explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an
+ // implicit instantiation to take place, in every translation unit in
+ // which such a use occurs; no diagnostic is required.
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
+ // Is there any previous explicit specialization declaration?
+ if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization)
+ return false;
+ }
+
+ Diag(NewLoc, diag::err_specialization_after_instantiation)
+ << PrevDecl;
+ Diag(PrevPointOfInstantiation, diag::note_instantiation_required_here)
+ << (PrevTSK != TSK_ImplicitInstantiation);
+
+ return true;
+ }
+
+ case TSK_ExplicitInstantiationDeclaration:
+ switch (PrevTSK) {
+ case TSK_ExplicitInstantiationDeclaration:
+ // This explicit instantiation declaration is redundant (that's okay).
+ HasNoEffect = true;
+ return false;
+
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ // We're explicitly instantiating something that may have already been
+ // implicitly instantiated; that's fine.
+ return false;
+
+ case TSK_ExplicitSpecialization:
+ // C++0x [temp.explicit]p4:
+ // For a given set of template parameters, if an explicit instantiation
+ // of a template appears after a declaration of an explicit
+ // specialization for that template, the explicit instantiation has no
+ // effect.
+ HasNoEffect = true;
+ return false;
+
+ case TSK_ExplicitInstantiationDefinition:
+ // C++0x [temp.explicit]p10:
+ // If an entity is the subject of both an explicit instantiation
+ // declaration and an explicit instantiation definition in the same
+ // translation unit, the definition shall follow the declaration.
+ Diag(NewLoc,
+ diag::err_explicit_instantiation_declaration_after_definition);
+
+ // Explicit instantiations following a specialization have no effect and
+ // hence no PrevPointOfInstantiation. In that case, walk decl backwards
+ // until a valid name loc is found.
+ Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation),
+ diag::note_explicit_instantiation_definition_here);
+ HasNoEffect = true;
+ return false;
+ }
+
+ case TSK_ExplicitInstantiationDefinition:
+ switch (PrevTSK) {
+ case TSK_Undeclared:
+ case TSK_ImplicitInstantiation:
+ // We're explicitly instantiating something that may have already been
+ // implicitly instantiated; that's fine.
+ return false;
+
+ case TSK_ExplicitSpecialization:
+ // C++ DR 259, C++0x [temp.explicit]p4:
+ // For a given set of template parameters, if an explicit
+ // instantiation of a template appears after a declaration of
+ // an explicit specialization for that template, the explicit
+ // instantiation has no effect.
+ //
+ // In C++98/03 mode, we only give an extension warning here, because it
+ // is not harmful to try to explicitly instantiate something that
+ // has been explicitly specialized.
+ Diag(NewLoc, getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_explicit_instantiation_after_specialization :
+ diag::ext_explicit_instantiation_after_specialization)
+ << PrevDecl;
+ Diag(PrevDecl->getLocation(),
+ diag::note_previous_template_specialization);
+ HasNoEffect = true;
+ return false;
+
+ case TSK_ExplicitInstantiationDeclaration:
+ // We're explicity instantiating a definition for something for which we
+ // were previously asked to suppress instantiations. That's fine.
+
+ // C++0x [temp.explicit]p4:
+ // For a given set of template parameters, if an explicit instantiation
+ // of a template appears after a declaration of an explicit
+ // specialization for that template, the explicit instantiation has no
+ // effect.
+ for (Decl *Prev = PrevDecl; Prev; Prev = Prev->getPreviousDecl()) {
+ // Is there any previous explicit specialization declaration?
+ if (getTemplateSpecializationKind(Prev) == TSK_ExplicitSpecialization) {
+ HasNoEffect = true;
+ break;
+ }
+ }
+
+ return false;
+
+ case TSK_ExplicitInstantiationDefinition:
+ // C++0x [temp.spec]p5:
+ // For a given template and a given set of template-arguments,
+ // - an explicit instantiation definition shall appear at most once
+ // in a program,
+ Diag(NewLoc, diag::err_explicit_instantiation_duplicate)
+ << PrevDecl;
+ Diag(DiagLocForExplicitInstantiation(PrevDecl, PrevPointOfInstantiation),
+ diag::note_previous_explicit_instantiation);
+ HasNoEffect = true;
+ return false;
+ }
+ }
+
+ llvm_unreachable("Missing specialization/instantiation case?");
+}
+
+/// \brief Perform semantic analysis for the given dependent function
+/// template specialization. The only possible way to get a dependent
+/// function template specialization is with a friend declaration,
+/// like so:
+///
+/// template <class T> void foo(T);
+/// template <class T> class A {
+/// friend void foo<>(T);
+/// };
+///
+/// There really isn't any useful analysis we can do here, so we
+/// just store the information.
+bool
+Sema::CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
+ const TemplateArgumentListInfo &ExplicitTemplateArgs,
+ LookupResult &Previous) {
+ // Remove anything from Previous that isn't a function template in
+ // the correct context.
+ DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
+ LookupResult::Filter F = Previous.makeFilter();
+ while (F.hasNext()) {
+ NamedDecl *D = F.next()->getUnderlyingDecl();
+ if (!isa<FunctionTemplateDecl>(D) ||
+ !FDLookupContext->InEnclosingNamespaceSetOf(
+ D->getDeclContext()->getRedeclContext()))
+ F.erase();
+ }
+ F.done();
+
+ // Should this be diagnosed here?
+ if (Previous.empty()) return true;
+
+ FD->setDependentTemplateSpecialization(Context, Previous.asUnresolvedSet(),
+ ExplicitTemplateArgs);
+ return false;
+}
+
+/// \brief Perform semantic analysis for the given function template
+/// specialization.
+///
+/// This routine performs all of the semantic analysis required for an
+/// explicit function template specialization. On successful completion,
+/// the function declaration \p FD will become a function template
+/// specialization.
+///
+/// \param FD the function declaration, which will be updated to become a
+/// function template specialization.
+///
+/// \param ExplicitTemplateArgs the explicitly-provided template arguments,
+/// if any. Note that this may be valid info even when 0 arguments are
+/// explicitly provided as in, e.g., \c void sort<>(char*, char*);
+/// as it anyway contains info on the angle brackets locations.
+///
+/// \param Previous the set of declarations that may be specialized by
+/// this function specialization.
+bool
+Sema::CheckFunctionTemplateSpecialization(FunctionDecl *FD,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ LookupResult &Previous) {
+ // The set of function template specializations that could match this
+ // explicit function template specialization.
+ UnresolvedSet<8> Candidates;
+
+ DeclContext *FDLookupContext = FD->getDeclContext()->getRedeclContext();
+ for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
+ I != E; ++I) {
+ NamedDecl *Ovl = (*I)->getUnderlyingDecl();
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Ovl)) {
+ // Only consider templates found within the same semantic lookup scope as
+ // FD.
+ if (!FDLookupContext->InEnclosingNamespaceSetOf(
+ Ovl->getDeclContext()->getRedeclContext()))
+ continue;
+
+ // C++ [temp.expl.spec]p11:
+ // A trailing template-argument can be left unspecified in the
+ // template-id naming an explicit function template specialization
+ // provided it can be deduced from the function argument type.
+ // Perform template argument deduction to determine whether we may be
+ // specializing this template.
+ // FIXME: It is somewhat wasteful to build
+ TemplateDeductionInfo Info(Context, FD->getLocation());
+ FunctionDecl *Specialization = 0;
+ if (TemplateDeductionResult TDK
+ = DeduceTemplateArguments(FunTmpl, ExplicitTemplateArgs,
+ FD->getType(),
+ Specialization,
+ Info)) {
+ // FIXME: Template argument deduction failed; record why it failed, so
+ // that we can provide nifty diagnostics.
+ (void)TDK;
+ continue;
+ }
+
+ // Record this candidate.
+ Candidates.addDecl(Specialization, I.getAccess());
+ }
+ }
+
+ // Find the most specialized function template.
+ UnresolvedSetIterator Result
+ = getMostSpecialized(Candidates.begin(), Candidates.end(),
+ TPOC_Other, 0, FD->getLocation(),
+ PDiag(diag::err_function_template_spec_no_match)
+ << FD->getDeclName(),
+ PDiag(diag::err_function_template_spec_ambiguous)
+ << FD->getDeclName() << (ExplicitTemplateArgs != 0),
+ PDiag(diag::note_function_template_spec_matched));
+ if (Result == Candidates.end())
+ return true;
+
+ // Ignore access information; it doesn't figure into redeclaration checking.
+ FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
+
+ FunctionTemplateSpecializationInfo *SpecInfo
+ = Specialization->getTemplateSpecializationInfo();
+ assert(SpecInfo && "Function template specialization info missing?");
+
+ // Note: do not overwrite location info if previous template
+ // specialization kind was explicit.
+ TemplateSpecializationKind TSK = SpecInfo->getTemplateSpecializationKind();
+ if (TSK == TSK_Undeclared || TSK == TSK_ImplicitInstantiation) {
+ Specialization->setLocation(FD->getLocation());
+ // C++11 [dcl.constexpr]p1: An explicit specialization of a constexpr
+ // function can differ from the template declaration with respect to
+ // the constexpr specifier.
+ Specialization->setConstexpr(FD->isConstexpr());
+ }
+
+ // FIXME: Check if the prior specialization has a point of instantiation.
+ // If so, we have run afoul of .
+
+ // If this is a friend declaration, then we're not really declaring
+ // an explicit specialization.
+ bool isFriend = (FD->getFriendObjectKind() != Decl::FOK_None);
+
+ // Check the scope of this explicit specialization.
+ if (!isFriend &&
+ CheckTemplateSpecializationScope(*this,
+ Specialization->getPrimaryTemplate(),
+ Specialization, FD->getLocation(),
+ false))
+ return true;
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template is
+ // explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an implicit
+ // instantiation to take place, in every translation unit in which such a
+ // use occurs; no diagnostic is required.
+ bool HasNoEffect = false;
+ if (!isFriend &&
+ CheckSpecializationInstantiationRedecl(FD->getLocation(),
+ TSK_ExplicitSpecialization,
+ Specialization,
+ SpecInfo->getTemplateSpecializationKind(),
+ SpecInfo->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+
+ // Mark the prior declaration as an explicit specialization, so that later
+ // clients know that this is an explicit specialization.
+ if (!isFriend) {
+ SpecInfo->setTemplateSpecializationKind(TSK_ExplicitSpecialization);
+ MarkUnusedFileScopedDecl(Specialization);
+ }
+
+ // Turn the given function declaration into a function template
+ // specialization, with the template arguments from the previous
+ // specialization.
+ // Take copies of (semantic and syntactic) template argument lists.
+ const TemplateArgumentList* TemplArgs = new (Context)
+ TemplateArgumentList(Specialization->getTemplateSpecializationArgs());
+ FD->setFunctionTemplateSpecialization(Specialization->getPrimaryTemplate(),
+ TemplArgs, /*InsertPos=*/0,
+ SpecInfo->getTemplateSpecializationKind(),
+ ExplicitTemplateArgs);
+ FD->setStorageClass(Specialization->getStorageClass());
+
+ // The "previous declaration" for this function template specialization is
+ // the prior function template specialization.
+ Previous.clear();
+ Previous.addDecl(Specialization);
+ return false;
+}
+
+/// \brief Perform semantic analysis for the given non-template member
+/// specialization.
+///
+/// This routine performs all of the semantic analysis required for an
+/// explicit member function specialization. On successful completion,
+/// the function declaration \p FD will become a member function
+/// specialization.
+///
+/// \param Member the member declaration, which will be updated to become a
+/// specialization.
+///
+/// \param Previous the set of declarations, one of which may be specialized
+/// by this function specialization; the set will be modified to contain the
+/// redeclared member.
+bool
+Sema::CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous) {
+ assert(!isa<TemplateDecl>(Member) && "Only for non-template members");
+
+ // Try to find the member we are instantiating.
+ NamedDecl *Instantiation = 0;
+ NamedDecl *InstantiatedFrom = 0;
+ MemberSpecializationInfo *MSInfo = 0;
+
+ if (Previous.empty()) {
+ // Nowhere to look anyway.
+ } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Member)) {
+ for (LookupResult::iterator I = Previous.begin(), E = Previous.end();
+ I != E; ++I) {
+ NamedDecl *D = (*I)->getUnderlyingDecl();
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (Context.hasSameType(Function->getType(), Method->getType())) {
+ Instantiation = Method;
+ InstantiatedFrom = Method->getInstantiatedFromMemberFunction();
+ MSInfo = Method->getMemberSpecializationInfo();
+ break;
+ }
+ }
+ }
+ } else if (isa<VarDecl>(Member)) {
+ VarDecl *PrevVar;
+ if (Previous.isSingleResult() &&
+ (PrevVar = dyn_cast<VarDecl>(Previous.getFoundDecl())))
+ if (PrevVar->isStaticDataMember()) {
+ Instantiation = PrevVar;
+ InstantiatedFrom = PrevVar->getInstantiatedFromStaticDataMember();
+ MSInfo = PrevVar->getMemberSpecializationInfo();
+ }
+ } else if (isa<RecordDecl>(Member)) {
+ CXXRecordDecl *PrevRecord;
+ if (Previous.isSingleResult() &&
+ (PrevRecord = dyn_cast<CXXRecordDecl>(Previous.getFoundDecl()))) {
+ Instantiation = PrevRecord;
+ InstantiatedFrom = PrevRecord->getInstantiatedFromMemberClass();
+ MSInfo = PrevRecord->getMemberSpecializationInfo();
+ }
+ } else if (isa<EnumDecl>(Member)) {
+ EnumDecl *PrevEnum;
+ if (Previous.isSingleResult() &&
+ (PrevEnum = dyn_cast<EnumDecl>(Previous.getFoundDecl()))) {
+ Instantiation = PrevEnum;
+ InstantiatedFrom = PrevEnum->getInstantiatedFromMemberEnum();
+ MSInfo = PrevEnum->getMemberSpecializationInfo();
+ }
+ }
+
+ if (!Instantiation) {
+ // There is no previous declaration that matches. Since member
+ // specializations are always out-of-line, the caller will complain about
+ // this mismatch later.
+ return false;
+ }
+
+ // If this is a friend, just bail out here before we start turning
+ // things into explicit specializations.
+ if (Member->getFriendObjectKind() != Decl::FOK_None) {
+ // Preserve instantiation information.
+ if (InstantiatedFrom && isa<CXXMethodDecl>(Member)) {
+ cast<CXXMethodDecl>(Member)->setInstantiationOfMemberFunction(
+ cast<CXXMethodDecl>(InstantiatedFrom),
+ cast<CXXMethodDecl>(Instantiation)->getTemplateSpecializationKind());
+ } else if (InstantiatedFrom && isa<CXXRecordDecl>(Member)) {
+ cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass(
+ cast<CXXRecordDecl>(InstantiatedFrom),
+ cast<CXXRecordDecl>(Instantiation)->getTemplateSpecializationKind());
+ }
+
+ Previous.clear();
+ Previous.addDecl(Instantiation);
+ return false;
+ }
+
+ // Make sure that this is a specialization of a member.
+ if (!InstantiatedFrom) {
+ Diag(Member->getLocation(), diag::err_spec_member_not_instantiated)
+ << Member;
+ Diag(Instantiation->getLocation(), diag::note_specialized_decl);
+ return true;
+ }
+
+ // C++ [temp.expl.spec]p6:
+ // If a template, a member template or the member of a class template is
+ // explicitly specialized then that specialization shall be declared
+ // before the first use of that specialization that would cause an implicit
+ // instantiation to take place, in every translation unit in which such a
+ // use occurs; no diagnostic is required.
+ assert(MSInfo && "Member specialization info missing?");
+
+ bool HasNoEffect = false;
+ if (CheckSpecializationInstantiationRedecl(Member->getLocation(),
+ TSK_ExplicitSpecialization,
+ Instantiation,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+
+ // Check the scope of this explicit specialization.
+ if (CheckTemplateSpecializationScope(*this,
+ InstantiatedFrom,
+ Instantiation, Member->getLocation(),
+ false))
+ return true;
+
+ // Note that this is an explicit instantiation of a member.
+ // the original declaration to note that it is an explicit specialization
+ // (if it was previously an implicit instantiation). This latter step
+ // makes bookkeeping easier.
+ if (isa<FunctionDecl>(Member)) {
+ FunctionDecl *InstantiationFunction = cast<FunctionDecl>(Instantiation);
+ if (InstantiationFunction->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationFunction->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationFunction->setLocation(Member->getLocation());
+ }
+
+ cast<FunctionDecl>(Member)->setInstantiationOfMemberFunction(
+ cast<CXXMethodDecl>(InstantiatedFrom),
+ TSK_ExplicitSpecialization);
+ MarkUnusedFileScopedDecl(InstantiationFunction);
+ } else if (isa<VarDecl>(Member)) {
+ VarDecl *InstantiationVar = cast<VarDecl>(Instantiation);
+ if (InstantiationVar->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationVar->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationVar->setLocation(Member->getLocation());
+ }
+
+ Context.setInstantiatedFromStaticDataMember(cast<VarDecl>(Member),
+ cast<VarDecl>(InstantiatedFrom),
+ TSK_ExplicitSpecialization);
+ MarkUnusedFileScopedDecl(InstantiationVar);
+ } else if (isa<CXXRecordDecl>(Member)) {
+ CXXRecordDecl *InstantiationClass = cast<CXXRecordDecl>(Instantiation);
+ if (InstantiationClass->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationClass->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationClass->setLocation(Member->getLocation());
+ }
+
+ cast<CXXRecordDecl>(Member)->setInstantiationOfMemberClass(
+ cast<CXXRecordDecl>(InstantiatedFrom),
+ TSK_ExplicitSpecialization);
+ } else {
+ assert(isa<EnumDecl>(Member) && "Only member enums remain");
+ EnumDecl *InstantiationEnum = cast<EnumDecl>(Instantiation);
+ if (InstantiationEnum->getTemplateSpecializationKind() ==
+ TSK_ImplicitInstantiation) {
+ InstantiationEnum->setTemplateSpecializationKind(
+ TSK_ExplicitSpecialization);
+ InstantiationEnum->setLocation(Member->getLocation());
+ }
+
+ cast<EnumDecl>(Member)->setInstantiationOfMemberEnum(
+ cast<EnumDecl>(InstantiatedFrom), TSK_ExplicitSpecialization);
+ }
+
+ // Save the caller the trouble of having to figure out which declaration
+ // this specialization matches.
+ Previous.clear();
+ Previous.addDecl(Instantiation);
+ return false;
+}
+
+/// \brief Check the scope of an explicit instantiation.
+///
+/// \returns true if a serious error occurs, false otherwise.
+static bool CheckExplicitInstantiationScope(Sema &S, NamedDecl *D,
+ SourceLocation InstLoc,
+ bool WasQualifiedName) {
+ DeclContext *OrigContext= D->getDeclContext()->getEnclosingNamespaceContext();
+ DeclContext *CurContext = S.CurContext->getRedeclContext();
+
+ if (CurContext->isRecord()) {
+ S.Diag(InstLoc, diag::err_explicit_instantiation_in_class)
+ << D;
+ return true;
+ }
+
+ // C++11 [temp.explicit]p3:
+ // An explicit instantiation shall appear in an enclosing namespace of its
+ // template. If the name declared in the explicit instantiation is an
+ // unqualified name, the explicit instantiation shall appear in the
+ // namespace where its template is declared or, if that namespace is inline
+ // (7.3.1), any namespace from its enclosing namespace set.
+ //
+ // This is DR275, which we do not retroactively apply to C++98/03.
+ if (WasQualifiedName) {
+ if (CurContext->Encloses(OrigContext))
+ return false;
+ } else {
+ if (CurContext->InEnclosingNamespaceSetOf(OrigContext))
+ return false;
+ }
+
+ if (NamespaceDecl *NS = dyn_cast<NamespaceDecl>(OrigContext)) {
+ if (WasQualifiedName)
+ S.Diag(InstLoc,
+ S.getLangOpts().CPlusPlus0x?
+ diag::err_explicit_instantiation_out_of_scope :
+ diag::warn_explicit_instantiation_out_of_scope_0x)
+ << D << NS;
+ else
+ S.Diag(InstLoc,
+ S.getLangOpts().CPlusPlus0x?
+ diag::err_explicit_instantiation_unqualified_wrong_namespace :
+ diag::warn_explicit_instantiation_unqualified_wrong_namespace_0x)
+ << D << NS;
+ } else
+ S.Diag(InstLoc,
+ S.getLangOpts().CPlusPlus0x?
+ diag::err_explicit_instantiation_must_be_global :
+ diag::warn_explicit_instantiation_must_be_global_0x)
+ << D;
+ S.Diag(D->getLocation(), diag::note_explicit_instantiation_here);
+ return false;
+}
+
+/// \brief Determine whether the given scope specifier has a template-id in it.
+static bool ScopeSpecifierHasTemplateId(const CXXScopeSpec &SS) {
+ if (!SS.isSet())
+ return false;
+
+ // C++11 [temp.explicit]p3:
+ // If the explicit instantiation is for a member function, a member class
+ // or a static data member of a class template specialization, the name of
+ // the class template specialization in the qualified-id for the member
+ // name shall be a simple-template-id.
+ //
+ // C++98 has the same restriction, just worded differently.
+ for (NestedNameSpecifier *NNS = (NestedNameSpecifier *)SS.getScopeRep();
+ NNS; NNS = NNS->getPrefix())
+ if (const Type *T = NNS->getAsType())
+ if (isa<TemplateSpecializationType>(T))
+ return true;
+
+ return false;
+}
+
+// Explicit instantiation of a class template specialization
+DeclResult
+Sema::ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ const CXXScopeSpec &SS,
+ TemplateTy TemplateD,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc,
+ AttributeList *Attr) {
+ // Find the class template we're specializing
+ TemplateName Name = TemplateD.getAsVal<TemplateName>();
+ ClassTemplateDecl *ClassTemplate
+ = cast<ClassTemplateDecl>(Name.getAsTemplateDecl());
+
+ // Check that the specialization uses the same tag kind as the
+ // original template.
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+ assert(Kind != TTK_Enum &&
+ "Invalid enum tag in class template explicit instantiation!");
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
+ Kind, /*isDefinition*/false, KWLoc,
+ *ClassTemplate->getIdentifier())) {
+ Diag(KWLoc, diag::err_use_with_wrong_tag)
+ << ClassTemplate
+ << FixItHint::CreateReplacement(KWLoc,
+ ClassTemplate->getTemplatedDecl()->getKindName());
+ Diag(ClassTemplate->getTemplatedDecl()->getLocation(),
+ diag::note_previous_use);
+ Kind = ClassTemplate->getTemplatedDecl()->getTagKind();
+ }
+
+ // C++0x [temp.explicit]p2:
+ // There are two forms of explicit instantiation: an explicit instantiation
+ // definition and an explicit instantiation declaration. An explicit
+ // instantiation declaration begins with the extern keyword. [...]
+ TemplateSpecializationKind TSK
+ = ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
+ : TSK_ExplicitInstantiationDeclaration;
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ // Check that the template argument list is well-formed for this
+ // template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (CheckTemplateArgumentList(ClassTemplate, TemplateNameLoc,
+ TemplateArgs, false, Converted))
+ return true;
+
+ // Find the class template specialization declaration that
+ // corresponds to these arguments.
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *PrevDecl
+ = ClassTemplate->findSpecialization(Converted.data(),
+ Converted.size(), InsertPos);
+
+ TemplateSpecializationKind PrevDecl_TSK
+ = PrevDecl ? PrevDecl->getTemplateSpecializationKind() : TSK_Undeclared;
+
+ // C++0x [temp.explicit]p2:
+ // [...] An explicit instantiation shall appear in an enclosing
+ // namespace of its template. [...]
+ //
+ // This is C++ DR 275.
+ if (CheckExplicitInstantiationScope(*this, ClassTemplate, TemplateNameLoc,
+ SS.isSet()))
+ return true;
+
+ ClassTemplateSpecializationDecl *Specialization = 0;
+
+ bool HasNoEffect = false;
+ if (PrevDecl) {
+ if (CheckSpecializationInstantiationRedecl(TemplateNameLoc, TSK,
+ PrevDecl, PrevDecl_TSK,
+ PrevDecl->getPointOfInstantiation(),
+ HasNoEffect))
+ return PrevDecl;
+
+ // Even though HasNoEffect == true means that this explicit instantiation
+ // has no effect on semantics, we go on to put its syntax in the AST.
+
+ if (PrevDecl_TSK == TSK_ImplicitInstantiation ||
+ PrevDecl_TSK == TSK_Undeclared) {
+ // Since the only prior class template specialization with these
+ // arguments was referenced but not declared, reuse that
+ // declaration node as our own, updating the source location
+ // for the template name to reflect our new declaration.
+ // (Other source locations will be updated later.)
+ Specialization = PrevDecl;
+ Specialization->setLocation(TemplateNameLoc);
+ PrevDecl = 0;
+ }
+ }
+
+ if (!Specialization) {
+ // Create a new class template specialization declaration node for
+ // this explicit specialization.
+ Specialization
+ = ClassTemplateSpecializationDecl::Create(Context, Kind,
+ ClassTemplate->getDeclContext(),
+ KWLoc, TemplateNameLoc,
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ PrevDecl);
+ SetNestedNameSpecifier(Specialization, SS);
+
+ if (!HasNoEffect && !PrevDecl) {
+ // Insert the new specialization.
+ ClassTemplate->AddSpecialization(Specialization, InsertPos);
+ }
+ }
+
+ // Build the fully-sugared type for this explicit instantiation as
+ // the user wrote in the explicit instantiation itself. This means
+ // that we'll pretty-print the type retrieved from the
+ // specialization's declaration the way that the user actually wrote
+ // the explicit instantiation, rather than formatting the name based
+ // on the "canonical" representation used to store the template
+ // arguments in the specialization.
+ TypeSourceInfo *WrittenTy
+ = Context.getTemplateSpecializationTypeInfo(Name, TemplateNameLoc,
+ TemplateArgs,
+ Context.getTypeDeclType(Specialization));
+ Specialization->setTypeAsWritten(WrittenTy);
+ TemplateArgsIn.release();
+
+ // Set source locations for keywords.
+ Specialization->setExternLoc(ExternLoc);
+ Specialization->setTemplateKeywordLoc(TemplateLoc);
+
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
+
+ // Add the explicit instantiation into its lexical context. However,
+ // since explicit instantiations are never found by name lookup, we
+ // just put it into the declaration context directly.
+ Specialization->setLexicalDeclContext(CurContext);
+ CurContext->addDecl(Specialization);
+
+ // Syntax is now OK, so return if it has no other effect on semantics.
+ if (HasNoEffect) {
+ // Set the template specialization kind.
+ Specialization->setTemplateSpecializationKind(TSK);
+ return Specialization;
+ }
+
+ // C++ [temp.explicit]p3:
+ // A definition of a class template or class member template
+ // shall be in scope at the point of the explicit instantiation of
+ // the class template or class member template.
+ //
+ // This check comes when we actually try to perform the
+ // instantiation.
+ ClassTemplateSpecializationDecl *Def
+ = cast_or_null<ClassTemplateSpecializationDecl>(
+ Specialization->getDefinition());
+ if (!Def)
+ InstantiateClassTemplateSpecialization(TemplateNameLoc, Specialization, TSK);
+ else if (TSK == TSK_ExplicitInstantiationDefinition) {
+ MarkVTableUsed(TemplateNameLoc, Specialization, true);
+ Specialization->setPointOfInstantiation(Def->getPointOfInstantiation());
+ }
+
+ // Instantiate the members of this class template specialization.
+ Def = cast_or_null<ClassTemplateSpecializationDecl>(
+ Specialization->getDefinition());
+ if (Def) {
+ TemplateSpecializationKind Old_TSK = Def->getTemplateSpecializationKind();
+
+ // Fix a TSK_ExplicitInstantiationDeclaration followed by a
+ // TSK_ExplicitInstantiationDefinition
+ if (Old_TSK == TSK_ExplicitInstantiationDeclaration &&
+ TSK == TSK_ExplicitInstantiationDefinition)
+ Def->setTemplateSpecializationKind(TSK);
+
+ InstantiateClassTemplateSpecializationMembers(TemplateNameLoc, Def, TSK);
+ }
+
+ // Set the template specialization kind.
+ Specialization->setTemplateSpecializationKind(TSK);
+ return Specialization;
+}
+
+// Explicit instantiation of a member class of a class template.
+DeclResult
+Sema::ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ unsigned TagSpec,
+ SourceLocation KWLoc,
+ CXXScopeSpec &SS,
+ IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ AttributeList *Attr) {
+
+ bool Owned = false;
+ bool IsDependent = false;
+ Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference,
+ KWLoc, SS, Name, NameLoc, Attr, AS_none,
+ /*ModulePrivateLoc=*/SourceLocation(),
+ MultiTemplateParamsArg(*this, 0, 0),
+ Owned, IsDependent, SourceLocation(), false,
+ TypeResult());
+ assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
+
+ if (!TagD)
+ return true;
+
+ TagDecl *Tag = cast<TagDecl>(TagD);
+ assert(!Tag->isEnum() && "shouldn't see enumerations here");
+
+ if (Tag->isInvalidDecl())
+ return true;
+
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(Tag);
+ CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
+ if (!Pattern) {
+ Diag(TemplateLoc, diag::err_explicit_instantiation_nontemplate_type)
+ << Context.getTypeDeclType(Record);
+ Diag(Record->getLocation(), diag::note_nontemplate_decl_here);
+ return true;
+ }
+
+ // C++0x [temp.explicit]p2:
+ // If the explicit instantiation is for a class or member class, the
+ // elaborated-type-specifier in the declaration shall include a
+ // simple-template-id.
+ //
+ // C++98 has the same restriction, just worded differently.
+ if (!ScopeSpecifierHasTemplateId(SS))
+ Diag(TemplateLoc, diag::ext_explicit_instantiation_without_qualified_id)
+ << Record << SS.getRange();
+
+ // C++0x [temp.explicit]p2:
+ // There are two forms of explicit instantiation: an explicit instantiation
+ // definition and an explicit instantiation declaration. An explicit
+ // instantiation declaration begins with the extern keyword. [...]
+ TemplateSpecializationKind TSK
+ = ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
+ : TSK_ExplicitInstantiationDeclaration;
+
+ // C++0x [temp.explicit]p2:
+ // [...] An explicit instantiation shall appear in an enclosing
+ // namespace of its template. [...]
+ //
+ // This is C++ DR 275.
+ CheckExplicitInstantiationScope(*this, Record, NameLoc, true);
+
+ // Verify that it is okay to explicitly instantiate here.
+ CXXRecordDecl *PrevDecl
+ = cast_or_null<CXXRecordDecl>(Record->getPreviousDecl());
+ if (!PrevDecl && Record->getDefinition())
+ PrevDecl = Record;
+ if (PrevDecl) {
+ MemberSpecializationInfo *MSInfo = PrevDecl->getMemberSpecializationInfo();
+ bool HasNoEffect = false;
+ assert(MSInfo && "No member specialization information?");
+ if (CheckSpecializationInstantiationRedecl(TemplateLoc, TSK,
+ PrevDecl,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+ if (HasNoEffect)
+ return TagD;
+ }
+
+ CXXRecordDecl *RecordDef
+ = cast_or_null<CXXRecordDecl>(Record->getDefinition());
+ if (!RecordDef) {
+ // C++ [temp.explicit]p3:
+ // A definition of a member class of a class template shall be in scope
+ // at the point of an explicit instantiation of the member class.
+ CXXRecordDecl *Def
+ = cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
+ if (!Def) {
+ Diag(TemplateLoc, diag::err_explicit_instantiation_undefined_member)
+ << 0 << Record->getDeclName() << Record->getDeclContext();
+ Diag(Pattern->getLocation(), diag::note_forward_declaration)
+ << Pattern;
+ return true;
+ } else {
+ if (InstantiateClass(NameLoc, Record, Def,
+ getTemplateInstantiationArgs(Record),
+ TSK))
+ return true;
+
+ RecordDef = cast_or_null<CXXRecordDecl>(Record->getDefinition());
+ if (!RecordDef)
+ return true;
+ }
+ }
+
+ // Instantiate all of the members of the class.
+ InstantiateClassMembers(NameLoc, RecordDef,
+ getTemplateInstantiationArgs(Record), TSK);
+
+ if (TSK == TSK_ExplicitInstantiationDefinition)
+ MarkVTableUsed(NameLoc, RecordDef, true);
+
+ // FIXME: We don't have any representation for explicit instantiations of
+ // member classes. Such a representation is not needed for compilation, but it
+ // should be available for clients that want to see all of the declarations in
+ // the source code.
+ return TagD;
+}
+
+DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
+ SourceLocation ExternLoc,
+ SourceLocation TemplateLoc,
+ Declarator &D) {
+ // Explicit instantiations always require a name.
+ // TODO: check if/when DNInfo should replace Name.
+ DeclarationNameInfo NameInfo = GetNameForDeclarator(D);
+ DeclarationName Name = NameInfo.getName();
+ if (!Name) {
+ if (!D.isInvalidType())
+ Diag(D.getDeclSpec().getLocStart(),
+ diag::err_explicit_instantiation_requires_name)
+ << D.getDeclSpec().getSourceRange()
+ << D.getSourceRange();
+
+ return true;
+ }
+
+ // The scope passed in may not be a decl scope. Zip up the scope tree until
+ // we find one that is.
+ while ((S->getFlags() & Scope::DeclScope) == 0 ||
+ (S->getFlags() & Scope::TemplateParamScope) != 0)
+ S = S->getParent();
+
+ // Determine the type of the declaration.
+ TypeSourceInfo *T = GetTypeForDeclarator(D, S);
+ QualType R = T->getType();
+ if (R.isNull())
+ return true;
+
+ // C++ [dcl.stc]p1:
+ // A storage-class-specifier shall not be specified in [...] an explicit
+ // instantiation (14.7.2) directive.
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef) {
+ Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_of_typedef)
+ << Name;
+ return true;
+ } else if (D.getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_unspecified) {
+ // Complain about then remove the storage class specifier.
+ Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_storage_class)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getStorageClassSpecLoc());
+
+ D.getMutableDeclSpec().ClearStorageClassSpecs();
+ }
+
+ // C++0x [temp.explicit]p1:
+ // [...] An explicit instantiation of a function template shall not use the
+ // inline or constexpr specifiers.
+ // Presumably, this also applies to member functions of class templates as
+ // well.
+ if (D.getDeclSpec().isInlineSpecified())
+ Diag(D.getDeclSpec().getInlineSpecLoc(),
+ getLangOpts().CPlusPlus0x ?
+ diag::err_explicit_instantiation_inline :
+ diag::warn_explicit_instantiation_inline_0x)
+ << FixItHint::CreateRemoval(D.getDeclSpec().getInlineSpecLoc());
+ if (D.getDeclSpec().isConstexprSpecified())
+ // FIXME: Add a fix-it to remove the 'constexpr' and add a 'const' if one is
+ // not already specified.
+ Diag(D.getDeclSpec().getConstexprSpecLoc(),
+ diag::err_explicit_instantiation_constexpr);
+
+ // C++0x [temp.explicit]p2:
+ // There are two forms of explicit instantiation: an explicit instantiation
+ // definition and an explicit instantiation declaration. An explicit
+ // instantiation declaration begins with the extern keyword. [...]
+ TemplateSpecializationKind TSK
+ = ExternLoc.isInvalid()? TSK_ExplicitInstantiationDefinition
+ : TSK_ExplicitInstantiationDeclaration;
+
+ LookupResult Previous(*this, NameInfo, LookupOrdinaryName);
+ LookupParsedName(Previous, S, &D.getCXXScopeSpec());
+
+ if (!R->isFunctionType()) {
+ // C++ [temp.explicit]p1:
+ // A [...] static data member of a class template can be explicitly
+ // instantiated from the member definition associated with its class
+ // template.
+ if (Previous.isAmbiguous())
+ return true;
+
+ VarDecl *Prev = Previous.getAsSingle<VarDecl>();
+ if (!Prev || !Prev->isStaticDataMember()) {
+ // We expect to see a data data member here.
+ Diag(D.getIdentifierLoc(), diag::err_explicit_instantiation_not_known)
+ << Name;
+ for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
+ P != PEnd; ++P)
+ Diag((*P)->getLocation(), diag::note_explicit_instantiation_here);
+ return true;
+ }
+
+ if (!Prev->getInstantiatedFromStaticDataMember()) {
+ // FIXME: Check for explicit specialization?
+ Diag(D.getIdentifierLoc(),
+ diag::err_explicit_instantiation_data_member_not_instantiated)
+ << Prev;
+ Diag(Prev->getLocation(), diag::note_explicit_instantiation_here);
+ // FIXME: Can we provide a note showing where this was declared?
+ return true;
+ }
+
+ // C++0x [temp.explicit]p2:
+ // If the explicit instantiation is for a member function, a member class
+ // or a static data member of a class template specialization, the name of
+ // the class template specialization in the qualified-id for the member
+ // name shall be a simple-template-id.
+ //
+ // C++98 has the same restriction, just worded differently.
+ if (!ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
+ Diag(D.getIdentifierLoc(),
+ diag::ext_explicit_instantiation_without_qualified_id)
+ << Prev << D.getCXXScopeSpec().getRange();
+
+ // Check the scope of this explicit instantiation.
+ CheckExplicitInstantiationScope(*this, Prev, D.getIdentifierLoc(), true);
+
+ // Verify that it is okay to explicitly instantiate here.
+ MemberSpecializationInfo *MSInfo = Prev->getMemberSpecializationInfo();
+ assert(MSInfo && "Missing static data member specialization info?");
+ bool HasNoEffect = false;
+ if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK, Prev,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+ if (HasNoEffect)
+ return (Decl*) 0;
+
+ // Instantiate static data member.
+ Prev->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
+ if (TSK == TSK_ExplicitInstantiationDefinition)
+ InstantiateStaticDataMemberDefinition(D.getIdentifierLoc(), Prev);
+
+ // FIXME: Create an ExplicitInstantiation node?
+ return (Decl*) 0;
+ }
+
+ // If the declarator is a template-id, translate the parser's template
+ // argument list into our AST format.
+ bool HasExplicitTemplateArgs = false;
+ TemplateArgumentListInfo TemplateArgs;
+ if (D.getName().getKind() == UnqualifiedId::IK_TemplateId) {
+ TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
+ TemplateArgs.setLAngleLoc(TemplateId->LAngleLoc);
+ TemplateArgs.setRAngleLoc(TemplateId->RAngleLoc);
+ ASTTemplateArgsPtr TemplateArgsPtr(*this,
+ TemplateId->getTemplateArgs(),
+ TemplateId->NumArgs);
+ translateTemplateArguments(TemplateArgsPtr, TemplateArgs);
+ HasExplicitTemplateArgs = true;
+ TemplateArgsPtr.release();
+ }
+
+ // C++ [temp.explicit]p1:
+ // A [...] function [...] can be explicitly instantiated from its template.
+ // A member function [...] of a class template can be explicitly
+ // instantiated from the member definition associated with its class
+ // template.
+ UnresolvedSet<8> Matches;
+ for (LookupResult::iterator P = Previous.begin(), PEnd = Previous.end();
+ P != PEnd; ++P) {
+ NamedDecl *Prev = *P;
+ if (!HasExplicitTemplateArgs) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Prev)) {
+ if (Context.hasSameUnqualifiedType(Method->getType(), R)) {
+ Matches.clear();
+
+ Matches.addDecl(Method, P.getAccess());
+ if (Method->getTemplateSpecializationKind() == TSK_Undeclared)
+ break;
+ }
+ }
+ }
+
+ FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(Prev);
+ if (!FunTmpl)
+ continue;
+
+ TemplateDeductionInfo Info(Context, D.getIdentifierLoc());
+ FunctionDecl *Specialization = 0;
+ if (TemplateDeductionResult TDK
+ = DeduceTemplateArguments(FunTmpl,
+ (HasExplicitTemplateArgs ? &TemplateArgs : 0),
+ R, Specialization, Info)) {
+ // FIXME: Keep track of almost-matches?
+ (void)TDK;
+ continue;
+ }
+
+ Matches.addDecl(Specialization, P.getAccess());
+ }
+
+ // Find the most specialized function template specialization.
+ UnresolvedSetIterator Result
+ = getMostSpecialized(Matches.begin(), Matches.end(), TPOC_Other, 0,
+ D.getIdentifierLoc(),
+ PDiag(diag::err_explicit_instantiation_not_known) << Name,
+ PDiag(diag::err_explicit_instantiation_ambiguous) << Name,
+ PDiag(diag::note_explicit_instantiation_candidate));
+
+ if (Result == Matches.end())
+ return true;
+
+ // Ignore access control bits, we don't need them for redeclaration checking.
+ FunctionDecl *Specialization = cast<FunctionDecl>(*Result);
+
+ if (Specialization->getTemplateSpecializationKind() == TSK_Undeclared) {
+ Diag(D.getIdentifierLoc(),
+ diag::err_explicit_instantiation_member_function_not_instantiated)
+ << Specialization
+ << (Specialization->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization);
+ Diag(Specialization->getLocation(), diag::note_explicit_instantiation_here);
+ return true;
+ }
+
+ FunctionDecl *PrevDecl = Specialization->getPreviousDecl();
+ if (!PrevDecl && Specialization->isThisDeclarationADefinition())
+ PrevDecl = Specialization;
+
+ if (PrevDecl) {
+ bool HasNoEffect = false;
+ if (CheckSpecializationInstantiationRedecl(D.getIdentifierLoc(), TSK,
+ PrevDecl,
+ PrevDecl->getTemplateSpecializationKind(),
+ PrevDecl->getPointOfInstantiation(),
+ HasNoEffect))
+ return true;
+
+ // FIXME: We may still want to build some representation of this
+ // explicit specialization.
+ if (HasNoEffect)
+ return (Decl*) 0;
+ }
+
+ Specialization->setTemplateSpecializationKind(TSK, D.getIdentifierLoc());
+ AttributeList *Attr = D.getDeclSpec().getAttributes().getList();
+ if (Attr)
+ ProcessDeclAttributeList(S, Specialization, Attr);
+
+ if (TSK == TSK_ExplicitInstantiationDefinition)
+ InstantiateFunctionDefinition(D.getIdentifierLoc(), Specialization);
+
+ // C++0x [temp.explicit]p2:
+ // If the explicit instantiation is for a member function, a member class
+ // or a static data member of a class template specialization, the name of
+ // the class template specialization in the qualified-id for the member
+ // name shall be a simple-template-id.
+ //
+ // C++98 has the same restriction, just worded differently.
+ FunctionTemplateDecl *FunTmpl = Specialization->getPrimaryTemplate();
+ if (D.getName().getKind() != UnqualifiedId::IK_TemplateId && !FunTmpl &&
+ D.getCXXScopeSpec().isSet() &&
+ !ScopeSpecifierHasTemplateId(D.getCXXScopeSpec()))
+ Diag(D.getIdentifierLoc(),
+ diag::ext_explicit_instantiation_without_qualified_id)
+ << Specialization << D.getCXXScopeSpec().getRange();
+
+ CheckExplicitInstantiationScope(*this,
+ FunTmpl? (NamedDecl *)FunTmpl
+ : Specialization->getInstantiatedFromMemberFunction(),
+ D.getIdentifierLoc(),
+ D.getCXXScopeSpec().isSet());
+
+ // FIXME: Create some kind of ExplicitInstantiationDecl here.
+ return (Decl*) 0;
+}
+
+TypeResult
+Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
+ const CXXScopeSpec &SS, IdentifierInfo *Name,
+ SourceLocation TagLoc, SourceLocation NameLoc) {
+ // This has to hold, because SS is expected to be defined.
+ assert(Name && "Expected a name in a dependent tag");
+
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ if (!NNS)
+ return true;
+
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
+
+ if (TUK == TUK_Declaration || TUK == TUK_Definition) {
+ Diag(NameLoc, diag::err_dependent_tag_decl)
+ << (TUK == TUK_Definition) << Kind << SS.getRange();
+ return true;
+ }
+
+ // Create the resulting type.
+ ElaboratedTypeKeyword Kwd = TypeWithKeyword::getKeywordForTagTypeKind(Kind);
+ QualType Result = Context.getDependentNameType(Kwd, NNS, Name);
+
+ // Create type-source location information for this type.
+ TypeLocBuilder TLB;
+ DependentNameTypeLoc TL = TLB.push<DependentNameTypeLoc>(Result);
+ TL.setElaboratedKeywordLoc(TagLoc);
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ TL.setNameLoc(NameLoc);
+ return CreateParsedType(Result, TLB.getTypeSourceInfo(Context, Result));
+}
+
+TypeResult
+Sema::ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS, const IdentifierInfo &II,
+ SourceLocation IdLoc) {
+ if (SS.isInvalid())
+ return true;
+
+ if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TypenameLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_typename_outside_of_template :
+ diag::ext_typename_outside_of_template)
+ << FixItHint::CreateRemoval(TypenameLoc);
+
+ NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ QualType T = CheckTypenameType(TypenameLoc.isValid()? ETK_Typename : ETK_None,
+ TypenameLoc, QualifierLoc, II, IdLoc);
+ if (T.isNull())
+ return true;
+
+ TypeSourceInfo *TSI = Context.CreateTypeSourceInfo(T);
+ if (isa<DependentNameType>(T)) {
+ DependentNameTypeLoc TL = cast<DependentNameTypeLoc>(TSI->getTypeLoc());
+ TL.setElaboratedKeywordLoc(TypenameLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ TL.setNameLoc(IdLoc);
+ } else {
+ ElaboratedTypeLoc TL = cast<ElaboratedTypeLoc>(TSI->getTypeLoc());
+ TL.setElaboratedKeywordLoc(TypenameLoc);
+ TL.setQualifierLoc(QualifierLoc);
+ cast<TypeSpecTypeLoc>(TL.getNamedTypeLoc()).setNameLoc(IdLoc);
+ }
+
+ return CreateParsedType(T, TSI);
+}
+
+TypeResult
+Sema::ActOnTypenameType(Scope *S,
+ SourceLocation TypenameLoc,
+ const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ TemplateTy TemplateIn,
+ SourceLocation TemplateNameLoc,
+ SourceLocation LAngleLoc,
+ ASTTemplateArgsPtr TemplateArgsIn,
+ SourceLocation RAngleLoc) {
+ if (TypenameLoc.isValid() && S && !S->getTemplateParamParent())
+ Diag(TypenameLoc,
+ getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_typename_outside_of_template :
+ diag::ext_typename_outside_of_template)
+ << FixItHint::CreateRemoval(TypenameLoc);
+
+ // Translate the parser's template argument list in our AST format.
+ TemplateArgumentListInfo TemplateArgs(LAngleLoc, RAngleLoc);
+ translateTemplateArguments(TemplateArgsIn, TemplateArgs);
+
+ TemplateName Template = TemplateIn.get();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
+ // Construct a dependent template specialization type.
+ assert(DTN && "dependent template has non-dependent name?");
+ assert(DTN->getQualifier()
+ == static_cast<NestedNameSpecifier*>(SS.getScopeRep()));
+ QualType T = Context.getDependentTemplateSpecializationType(ETK_Typename,
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ TemplateArgs);
+
+ // Create source-location information for this type.
+ TypeLocBuilder Builder;
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = Builder.push<DependentTemplateSpecializationTypeLoc>(T);
+ SpecTL.setElaboratedKeywordLoc(TypenameLoc);
+ SpecTL.setQualifierLoc(SS.getWithLocInContext(Context));
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+ return CreateParsedType(T, Builder.getTypeSourceInfo(Context, T));
+ }
+
+ QualType T = CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
+ if (T.isNull())
+ return true;
+
+ // Provide source-location information for the template specialization type.
+ TypeLocBuilder Builder;
+ TemplateSpecializationTypeLoc SpecTL
+ = Builder.push<TemplateSpecializationTypeLoc>(T);
+ SpecTL.setTemplateKeywordLoc(TemplateKWLoc);
+ SpecTL.setTemplateNameLoc(TemplateNameLoc);
+ SpecTL.setLAngleLoc(LAngleLoc);
+ SpecTL.setRAngleLoc(RAngleLoc);
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ SpecTL.setArgLocInfo(I, TemplateArgs[I].getLocInfo());
+
+ T = Context.getElaboratedType(ETK_Typename, SS.getScopeRep(), T);
+ ElaboratedTypeLoc TL = Builder.push<ElaboratedTypeLoc>(T);
+ TL.setElaboratedKeywordLoc(TypenameLoc);
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+
+ TypeSourceInfo *TSI = Builder.getTypeSourceInfo(Context, T);
+ return CreateParsedType(T, TSI);
+}
+
+
+/// \brief Build the type that describes a C++ typename specifier,
+/// e.g., "typename T::type".
+QualType
+Sema::CheckTypenameType(ElaboratedTypeKeyword Keyword,
+ SourceLocation KeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const IdentifierInfo &II,
+ SourceLocation IILoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ DeclContext *Ctx = computeDeclContext(SS);
+ if (!Ctx) {
+ // If the nested-name-specifier is dependent and couldn't be
+ // resolved to a type, build a typename type.
+ assert(QualifierLoc.getNestedNameSpecifier()->isDependent());
+ return Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ &II);
+ }
+
+ // If the nested-name-specifier refers to the current instantiation,
+ // the "typename" keyword itself is superfluous. In C++03, the
+ // program is actually ill-formed. However, DR 382 (in C++0x CD1)
+ // allows such extraneous "typename" keywords, and we retroactively
+ // apply this DR to C++03 code with only a warning. In any case we continue.
+
+ if (RequireCompleteDeclContext(SS, Ctx))
+ return QualType();
+
+ DeclarationName Name(&II);
+ LookupResult Result(*this, Name, IILoc, LookupOrdinaryName);
+ LookupQualifiedName(Result, Ctx);
+ unsigned DiagID = 0;
+ Decl *Referenced = 0;
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ DiagID = diag::err_typename_nested_not_found;
+ break;
+
+ case LookupResult::FoundUnresolvedValue: {
+ // We found a using declaration that is a value. Most likely, the using
+ // declaration itself is meant to have the 'typename' keyword.
+ SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(),
+ IILoc);
+ Diag(IILoc, diag::err_typename_refers_to_using_value_decl)
+ << Name << Ctx << FullRange;
+ if (UnresolvedUsingValueDecl *Using
+ = dyn_cast<UnresolvedUsingValueDecl>(Result.getRepresentativeDecl())){
+ SourceLocation Loc = Using->getQualifierLoc().getBeginLoc();
+ Diag(Loc, diag::note_using_value_decl_missing_typename)
+ << FixItHint::CreateInsertion(Loc, "typename ");
+ }
+ }
+ // Fall through to create a dependent typename type, from which we can recover
+ // better.
+
+ case LookupResult::NotFoundInCurrentInstantiation:
+ // Okay, it's a member of an unknown instantiation.
+ return Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ &II);
+
+ case LookupResult::Found:
+ if (TypeDecl *Type = dyn_cast<TypeDecl>(Result.getFoundDecl())) {
+ // We found a type. Build an ElaboratedType, since the
+ // typename-specifier was just sugar.
+ return Context.getElaboratedType(ETK_Typename,
+ QualifierLoc.getNestedNameSpecifier(),
+ Context.getTypeDeclType(Type));
+ }
+
+ DiagID = diag::err_typename_nested_not_type;
+ Referenced = Result.getFoundDecl();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ DiagID = diag::err_typename_nested_not_type;
+ Referenced = *Result.begin();
+ break;
+
+ case LookupResult::Ambiguous:
+ return QualType();
+ }
+
+ // If we get here, it's because name lookup did not find a
+ // type. Emit an appropriate diagnostic and return an error.
+ SourceRange FullRange(KeywordLoc.isValid() ? KeywordLoc : SS.getBeginLoc(),
+ IILoc);
+ Diag(IILoc, DiagID) << FullRange << Name << Ctx;
+ if (Referenced)
+ Diag(Referenced->getLocation(), diag::note_typename_refers_here)
+ << Name;
+ return QualType();
+}
+
+namespace {
+ // See Sema::RebuildTypeInCurrentInstantiation
+ class CurrentInstantiationRebuilder
+ : public TreeTransform<CurrentInstantiationRebuilder> {
+ SourceLocation Loc;
+ DeclarationName Entity;
+
+ public:
+ typedef TreeTransform<CurrentInstantiationRebuilder> inherited;
+
+ CurrentInstantiationRebuilder(Sema &SemaRef,
+ SourceLocation Loc,
+ DeclarationName Entity)
+ : TreeTransform<CurrentInstantiationRebuilder>(SemaRef),
+ Loc(Loc), Entity(Entity) { }
+
+ /// \brief Determine whether the given type \p T has already been
+ /// transformed.
+ ///
+ /// For the purposes of type reconstruction, a type has already been
+ /// transformed if it is NULL or if it is not dependent.
+ bool AlreadyTransformed(QualType T) {
+ return T.isNull() || !T->isDependentType();
+ }
+
+ /// \brief Returns the location of the entity whose type is being
+ /// rebuilt.
+ SourceLocation getBaseLocation() { return Loc; }
+
+ /// \brief Returns the name of the entity whose type is being rebuilt.
+ DeclarationName getBaseEntity() { return Entity; }
+
+ /// \brief Sets the "base" location and entity when that
+ /// information is known based on another transformation.
+ void setBase(SourceLocation Loc, DeclarationName Entity) {
+ this->Loc = Loc;
+ this->Entity = Entity;
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
+ };
+}
+
+/// \brief Rebuilds a type within the context of the current instantiation.
+///
+/// The type \p T is part of the type of an out-of-line member definition of
+/// a class template (or class template partial specialization) that was parsed
+/// and constructed before we entered the scope of the class template (or
+/// partial specialization thereof). This routine will rebuild that type now
+/// that we have entered the declarator's scope, which may produce different
+/// canonical types, e.g.,
+///
+/// \code
+/// template<typename T>
+/// struct X {
+/// typedef T* pointer;
+/// pointer data();
+/// };
+///
+/// template<typename T>
+/// typename X<T>::pointer X<T>::data() { ... }
+/// \endcode
+///
+/// Here, the type "typename X<T>::pointer" will be created as a DependentNameType,
+/// since we do not know that we can look into X<T> when we parsed the type.
+/// This function will rebuild the type, performing the lookup of "pointer"
+/// in X<T> and returning an ElaboratedType whose canonical type is the same
+/// as the canonical type of T*, allowing the return types of the out-of-line
+/// definition and the declaration to match.
+TypeSourceInfo *Sema::RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
+ SourceLocation Loc,
+ DeclarationName Name) {
+ if (!T || !T->getType()->isDependentType())
+ return T;
+
+ CurrentInstantiationRebuilder Rebuilder(*this, Loc, Name);
+ return Rebuilder.TransformType(T);
+}
+
+ExprResult Sema::RebuildExprInCurrentInstantiation(Expr *E) {
+ CurrentInstantiationRebuilder Rebuilder(*this, E->getExprLoc(),
+ DeclarationName());
+ return Rebuilder.TransformExpr(E);
+}
+
+bool Sema::RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS) {
+ if (SS.isInvalid())
+ return true;
+
+ NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
+ CurrentInstantiationRebuilder Rebuilder(*this, SS.getRange().getBegin(),
+ DeclarationName());
+ NestedNameSpecifierLoc Rebuilt
+ = Rebuilder.TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!Rebuilt)
+ return true;
+
+ SS.Adopt(Rebuilt);
+ return false;
+}
+
+/// \brief Rebuild the template parameters now that we know we're in a current
+/// instantiation.
+bool Sema::RebuildTemplateParamsInCurrentInstantiation(
+ TemplateParameterList *Params) {
+ for (unsigned I = 0, N = Params->size(); I != N; ++I) {
+ Decl *Param = Params->getParam(I);
+
+ // There is nothing to rebuild in a type parameter.
+ if (isa<TemplateTypeParmDecl>(Param))
+ continue;
+
+ // Rebuild the template parameter list of a template template parameter.
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Param)) {
+ if (RebuildTemplateParamsInCurrentInstantiation(
+ TTP->getTemplateParameters()))
+ return true;
+
+ continue;
+ }
+
+ // Rebuild the type of a non-type template parameter.
+ NonTypeTemplateParmDecl *NTTP = cast<NonTypeTemplateParmDecl>(Param);
+ TypeSourceInfo *NewTSI
+ = RebuildTypeInCurrentInstantiation(NTTP->getTypeSourceInfo(),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ if (!NewTSI)
+ return true;
+
+ if (NewTSI != NTTP->getTypeSourceInfo()) {
+ NTTP->setTypeSourceInfo(NewTSI);
+ NTTP->setType(NewTSI->getType());
+ }
+ }
+
+ return false;
+}
+
+/// \brief Produces a formatted string that describes the binding of
+/// template parameters to template arguments.
+std::string
+Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
+ const TemplateArgumentList &Args) {
+ return getTemplateArgumentBindingsText(Params, Args.data(), Args.size());
+}
+
+std::string
+Sema::getTemplateArgumentBindingsText(const TemplateParameterList *Params,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ SmallString<128> Str;
+ llvm::raw_svector_ostream Out(Str);
+
+ if (!Params || Params->size() == 0 || NumArgs == 0)
+ return std::string();
+
+ for (unsigned I = 0, N = Params->size(); I != N; ++I) {
+ if (I >= NumArgs)
+ break;
+
+ if (I == 0)
+ Out << "[with ";
+ else
+ Out << ", ";
+
+ if (const IdentifierInfo *Id = Params->getParam(I)->getIdentifier()) {
+ Out << Id->getName();
+ } else {
+ Out << '$' << I;
+ }
+
+ Out << " = ";
+ Args[I].print(getPrintingPolicy(), Out);
+ }
+
+ Out << ']';
+ return Out.str();
+}
+
+void Sema::MarkAsLateParsedTemplate(FunctionDecl *FD, bool Flag) {
+ if (!FD)
+ return;
+ FD->setLateTemplateParsed(Flag);
+}
+
+bool Sema::IsInsideALocalClassWithinATemplateFunction() {
+ DeclContext *DC = CurContext;
+
+ while (DC) {
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(CurContext)) {
+ const FunctionDecl *FD = RD->isLocalClass();
+ return (FD && FD->getTemplatedKind() != FunctionDecl::TK_NonTemplate);
+ } else if (DC->isTranslationUnit() || DC->isNamespace())
+ return false;
+
+ DC = DC->getParent();
+ }
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
new file mode 100644
index 0000000..2ea1e6f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -0,0 +1,4496 @@
+//===------- SemaTemplateDeduction.cpp - Template Argument Deduction ------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template argument deduction.
+//
+//===----------------------------------------------------------------------===/
+
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "llvm/ADT/SmallBitVector.h"
+#include "TreeTransform.h"
+#include <algorithm>
+
+namespace clang {
+ using namespace sema;
+
+ /// \brief Various flags that control template argument deduction.
+ ///
+ /// These flags can be bitwise-OR'd together.
+ enum TemplateDeductionFlags {
+ /// \brief No template argument deduction flags, which indicates the
+ /// strictest results for template argument deduction (as used for, e.g.,
+ /// matching class template partial specializations).
+ TDF_None = 0,
+ /// \brief Within template argument deduction from a function call, we are
+ /// matching with a parameter type for which the original parameter was
+ /// a reference.
+ TDF_ParamWithReferenceType = 0x1,
+ /// \brief Within template argument deduction from a function call, we
+ /// are matching in a case where we ignore cv-qualifiers.
+ TDF_IgnoreQualifiers = 0x02,
+ /// \brief Within template argument deduction from a function call,
+ /// we are matching in a case where we can perform template argument
+ /// deduction from a template-id of a derived class of the argument type.
+ TDF_DerivedClass = 0x04,
+ /// \brief Allow non-dependent types to differ, e.g., when performing
+ /// template argument deduction from a function call where conversions
+ /// may apply.
+ TDF_SkipNonDependent = 0x08,
+ /// \brief Whether we are performing template argument deduction for
+ /// parameters and arguments in a top-level template argument
+ TDF_TopLevelParameterTypeList = 0x10
+ };
+}
+
+using namespace clang;
+
+/// \brief Compare two APSInts, extending and switching the sign as
+/// necessary to compare their values regardless of underlying type.
+static bool hasSameExtendedValue(llvm::APSInt X, llvm::APSInt Y) {
+ if (Y.getBitWidth() > X.getBitWidth())
+ X = X.extend(Y.getBitWidth());
+ else if (Y.getBitWidth() < X.getBitWidth())
+ Y = Y.extend(X.getBitWidth());
+
+ // If there is a signedness mismatch, correct it.
+ if (X.isSigned() != Y.isSigned()) {
+ // If the signed value is negative, then the values cannot be the same.
+ if ((Y.isSigned() && Y.isNegative()) || (X.isSigned() && X.isNegative()))
+ return false;
+
+ Y.setIsSigned(true);
+ X.setIsSigned(true);
+ }
+
+ return X == Y;
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument &Param,
+ TemplateArgument Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced);
+
+/// \brief Whether template argument deduction for two reference parameters
+/// resulted in the argument type, parameter type, or neither type being more
+/// qualified than the other.
+enum DeductionQualifierComparison {
+ NeitherMoreQualified = 0,
+ ParamMoreQualified,
+ ArgMoreQualified
+};
+
+/// \brief Stores the result of comparing two reference parameters while
+/// performing template argument deduction for partial ordering of function
+/// templates.
+struct RefParamPartialOrderingComparison {
+ /// \brief Whether the parameter type is an rvalue reference type.
+ bool ParamIsRvalueRef;
+ /// \brief Whether the argument type is an rvalue reference type.
+ bool ArgIsRvalueRef;
+
+ /// \brief Whether the parameter or argument (or neither) is more qualified.
+ DeductionQualifierComparison Qualifiers;
+};
+
+
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArgumentsByTypeMatch(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType Param,
+ QualType Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &
+ Deduced,
+ unsigned TDF,
+ bool PartialOrdering = false,
+ SmallVectorImpl<RefParamPartialOrderingComparison> *
+ RefParamComparisons = 0);
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument *Params, unsigned NumParams,
+ const TemplateArgument *Args, unsigned NumArgs,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ bool NumberOfArgumentsMustMatch = true);
+
+/// \brief If the given expression is of a form that permits the deduction
+/// of a non-type template parameter, return the declaration of that
+/// non-type template parameter.
+static NonTypeTemplateParmDecl *getDeducedParameterFromExpr(Expr *E) {
+ if (ImplicitCastExpr *IC = dyn_cast<ImplicitCastExpr>(E))
+ E = IC->getSubExpr();
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
+ return dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
+
+ return 0;
+}
+
+/// \brief Determine whether two declaration pointers refer to the same
+/// declaration.
+static bool isSameDeclaration(Decl *X, Decl *Y) {
+ if (!X || !Y)
+ return !X && !Y;
+
+ if (NamedDecl *NX = dyn_cast<NamedDecl>(X))
+ X = NX->getUnderlyingDecl();
+ if (NamedDecl *NY = dyn_cast<NamedDecl>(Y))
+ Y = NY->getUnderlyingDecl();
+
+ return X->getCanonicalDecl() == Y->getCanonicalDecl();
+}
+
+/// \brief Verify that the given, deduced template arguments are compatible.
+///
+/// \returns The deduced template argument, or a NULL template argument if
+/// the deduced template arguments were incompatible.
+static DeducedTemplateArgument
+checkDeducedTemplateArguments(ASTContext &Context,
+ const DeducedTemplateArgument &X,
+ const DeducedTemplateArgument &Y) {
+ // We have no deduction for one or both of the arguments; they're compatible.
+ if (X.isNull())
+ return Y;
+ if (Y.isNull())
+ return X;
+
+ switch (X.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Non-deduced template arguments handled above");
+
+ case TemplateArgument::Type:
+ // If two template type arguments have the same type, they're compatible.
+ if (Y.getKind() == TemplateArgument::Type &&
+ Context.hasSameType(X.getAsType(), Y.getAsType()))
+ return X;
+
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Integral:
+ // If we deduced a constant in one case and either a dependent expression or
+ // declaration in another case, keep the integral constant.
+ // If both are integral constants with the same value, keep that value.
+ if (Y.getKind() == TemplateArgument::Expression ||
+ Y.getKind() == TemplateArgument::Declaration ||
+ (Y.getKind() == TemplateArgument::Integral &&
+ hasSameExtendedValue(*X.getAsIntegral(), *Y.getAsIntegral())))
+ return DeducedTemplateArgument(X,
+ X.wasDeducedFromArrayBound() &&
+ Y.wasDeducedFromArrayBound());
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Template:
+ if (Y.getKind() == TemplateArgument::Template &&
+ Context.hasSameTemplateName(X.getAsTemplate(), Y.getAsTemplate()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::TemplateExpansion:
+ if (Y.getKind() == TemplateArgument::TemplateExpansion &&
+ Context.hasSameTemplateName(X.getAsTemplateOrTemplatePattern(),
+ Y.getAsTemplateOrTemplatePattern()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Expression:
+ // If we deduced a dependent expression in one case and either an integral
+ // constant or a declaration in another case, keep the integral constant
+ // or declaration.
+ if (Y.getKind() == TemplateArgument::Integral ||
+ Y.getKind() == TemplateArgument::Declaration)
+ return DeducedTemplateArgument(Y, X.wasDeducedFromArrayBound() &&
+ Y.wasDeducedFromArrayBound());
+
+ if (Y.getKind() == TemplateArgument::Expression) {
+ // Compare the expressions for equality
+ llvm::FoldingSetNodeID ID1, ID2;
+ X.getAsExpr()->Profile(ID1, Context, true);
+ Y.getAsExpr()->Profile(ID2, Context, true);
+ if (ID1 == ID2)
+ return X;
+ }
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Declaration:
+ // If we deduced a declaration and a dependent expression, keep the
+ // declaration.
+ if (Y.getKind() == TemplateArgument::Expression)
+ return X;
+
+ // If we deduced a declaration and an integral constant, keep the
+ // integral constant.
+ if (Y.getKind() == TemplateArgument::Integral)
+ return Y;
+
+ // If we deduced two declarations, make sure they they refer to the
+ // same declaration.
+ if (Y.getKind() == TemplateArgument::Declaration &&
+ isSameDeclaration(X.getAsDecl(), Y.getAsDecl()))
+ return X;
+
+ // All other combinations are incompatible.
+ return DeducedTemplateArgument();
+
+ case TemplateArgument::Pack:
+ if (Y.getKind() != TemplateArgument::Pack ||
+ X.pack_size() != Y.pack_size())
+ return DeducedTemplateArgument();
+
+ for (TemplateArgument::pack_iterator XA = X.pack_begin(),
+ XAEnd = X.pack_end(),
+ YA = Y.pack_begin();
+ XA != XAEnd; ++XA, ++YA) {
+ if (checkDeducedTemplateArguments(Context,
+ DeducedTemplateArgument(*XA, X.wasDeducedFromArrayBound()),
+ DeducedTemplateArgument(*YA, Y.wasDeducedFromArrayBound()))
+ .isNull())
+ return DeducedTemplateArgument();
+ }
+
+ return X;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given constant.
+static Sema::TemplateDeductionResult
+DeduceNonTypeTemplateArgument(Sema &S,
+ NonTypeTemplateParmDecl *NTTP,
+ llvm::APSInt Value, QualType ValueType,
+ bool DeducedFromArrayBound,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument with depth > 0");
+
+ DeducedTemplateArgument NewDeduced(Value, ValueType, DeducedFromArrayBound);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = NTTP;
+ Info.FirstArg = Deduced[NTTP->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[NTTP->getIndex()] = Result;
+ return Sema::TDK_Success;
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given type- or value-dependent expression.
+///
+/// \returns true if deduction succeeded, false otherwise.
+static Sema::TemplateDeductionResult
+DeduceNonTypeTemplateArgument(Sema &S,
+ NonTypeTemplateParmDecl *NTTP,
+ Expr *Value,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument with depth > 0");
+ assert((Value->isTypeDependent() || Value->isValueDependent()) &&
+ "Expression template argument must be type- or value-dependent.");
+
+ DeducedTemplateArgument NewDeduced(Value);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
+
+ if (Result.isNull()) {
+ Info.Param = NTTP;
+ Info.FirstArg = Deduced[NTTP->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[NTTP->getIndex()] = Result;
+ return Sema::TDK_Success;
+}
+
+/// \brief Deduce the value of the given non-type template parameter
+/// from the given declaration.
+///
+/// \returns true if deduction succeeded, false otherwise.
+static Sema::TemplateDeductionResult
+DeduceNonTypeTemplateArgument(Sema &S,
+ NonTypeTemplateParmDecl *NTTP,
+ Decl *D,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument with depth > 0");
+
+ DeducedTemplateArgument NewDeduced(D? D->getCanonicalDecl() : 0);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[NTTP->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = NTTP;
+ Info.FirstArg = Deduced[NTTP->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[NTTP->getIndex()] = Result;
+ return Sema::TDK_Success;
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ TemplateName Param,
+ TemplateName Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ TemplateDecl *ParamDecl = Param.getAsTemplateDecl();
+ if (!ParamDecl) {
+ // The parameter type is dependent and is not a template template parameter,
+ // so there is nothing that we can deduce.
+ return Sema::TDK_Success;
+ }
+
+ if (TemplateTemplateParmDecl *TempParam
+ = dyn_cast<TemplateTemplateParmDecl>(ParamDecl)) {
+ DeducedTemplateArgument NewDeduced(S.Context.getCanonicalTemplateName(Arg));
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[TempParam->getIndex()],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = TempParam;
+ Info.FirstArg = Deduced[TempParam->getIndex()];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[TempParam->getIndex()] = Result;
+ return Sema::TDK_Success;
+ }
+
+ // Verify that the two template names are equivalent.
+ if (S.Context.hasSameTemplateName(Param, Arg))
+ return Sema::TDK_Success;
+
+ // Mismatch of non-dependent template parameter to argument.
+ Info.FirstArg = TemplateArgument(Param);
+ Info.SecondArg = TemplateArgument(Arg);
+ return Sema::TDK_NonDeducedMismatch;
+}
+
+/// \brief Deduce the template arguments by comparing the template parameter
+/// type (which is a template-id) with the template argument type.
+///
+/// \param S the Sema
+///
+/// \param TemplateParams the template parameters that we are deducing
+///
+/// \param Param the parameter type
+///
+/// \param Arg the argument type
+///
+/// \param Info information about the template argument deduction itself
+///
+/// \param Deduced the deduced template arguments
+///
+/// \returns the result of template argument deduction so far. Note that a
+/// "success" result means that template argument deduction has not yet failed,
+/// but it may still fail, later, for other reasons.
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateSpecializationType *Param,
+ QualType Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ assert(Arg.isCanonical() && "Argument type must be canonical");
+
+ // Check whether the template argument is a dependent template-id.
+ if (const TemplateSpecializationType *SpecArg
+ = dyn_cast<TemplateSpecializationType>(Arg)) {
+ // Perform template argument deduction for the template name.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams,
+ Param->getTemplateName(),
+ SpecArg->getTemplateName(),
+ Info, Deduced))
+ return Result;
+
+
+ // Perform template argument deduction on each template
+ // argument. Ignore any missing/extra arguments, since they could be
+ // filled in by default arguments.
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param->getArgs(), Param->getNumArgs(),
+ SpecArg->getArgs(), SpecArg->getNumArgs(),
+ Info, Deduced,
+ /*NumberOfArgumentsMustMatch=*/false);
+ }
+
+ // If the argument type is a class template specialization, we
+ // perform template argument deduction using its template
+ // arguments.
+ const RecordType *RecordArg = dyn_cast<RecordType>(Arg);
+ if (!RecordArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ ClassTemplateSpecializationDecl *SpecArg
+ = dyn_cast<ClassTemplateSpecializationDecl>(RecordArg->getDecl());
+ if (!SpecArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Perform template argument deduction for the template name.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S,
+ TemplateParams,
+ Param->getTemplateName(),
+ TemplateName(SpecArg->getSpecializedTemplate()),
+ Info, Deduced))
+ return Result;
+
+ // Perform template argument deduction for the template arguments.
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param->getArgs(), Param->getNumArgs(),
+ SpecArg->getTemplateArgs().data(),
+ SpecArg->getTemplateArgs().size(),
+ Info, Deduced);
+}
+
+/// \brief Determines whether the given type is an opaque type that
+/// might be more qualified when instantiated.
+static bool IsPossiblyOpaquelyQualifiedType(QualType T) {
+ switch (T->getTypeClass()) {
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::DependentName:
+ case Type::Decltype:
+ case Type::UnresolvedUsing:
+ case Type::TemplateTypeParm:
+ return true;
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ case Type::VariableArray:
+ case Type::DependentSizedArray:
+ return IsPossiblyOpaquelyQualifiedType(
+ cast<ArrayType>(T)->getElementType());
+
+ default:
+ return false;
+ }
+}
+
+/// \brief Retrieve the depth and index of a template parameter.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+}
+
+/// \brief Retrieve the depth and index of an unexpanded parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(UnexpandedParameterPack UPP) {
+ if (const TemplateTypeParmType *TTP
+ = UPP.first.dyn_cast<const TemplateTypeParmType *>())
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ return getDepthAndIndex(UPP.first.get<NamedDecl *>());
+}
+
+/// \brief Helper function to build a TemplateParameter when we don't
+/// know its type statically.
+static TemplateParameter makeTemplateParameter(Decl *D) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(D))
+ return TemplateParameter(TTP);
+ else if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D))
+ return TemplateParameter(NTTP);
+
+ return TemplateParameter(cast<TemplateTemplateParmDecl>(D));
+}
+
+/// \brief Prepare to perform template argument deduction for all of the
+/// arguments in a set of argument packs.
+static void PrepareArgumentPackDeduction(Sema &S,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ ArrayRef<unsigned> PackIndices,
+ SmallVectorImpl<DeducedTemplateArgument> &SavedPacks,
+ SmallVectorImpl<
+ SmallVector<DeducedTemplateArgument, 4> > &NewlyDeducedPacks) {
+ // Save the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, then clear out the deduction.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ // Save the previously-deduced argument pack, then clear it out so that we
+ // can deduce a new argument pack.
+ SavedPacks[I] = Deduced[PackIndices[I]];
+ Deduced[PackIndices[I]] = TemplateArgument();
+
+ // If the template arugment pack was explicitly specified, add that to
+ // the set of deduced arguments.
+ const TemplateArgument *ExplicitArgs;
+ unsigned NumExplicitArgs;
+ if (NamedDecl *PartiallySubstitutedPack
+ = S.CurrentInstantiationScope->getPartiallySubstitutedPack(
+ &ExplicitArgs,
+ &NumExplicitArgs)) {
+ if (getDepthAndIndex(PartiallySubstitutedPack).second == PackIndices[I])
+ NewlyDeducedPacks[I].append(ExplicitArgs,
+ ExplicitArgs + NumExplicitArgs);
+ }
+ }
+}
+
+/// \brief Finish template argument deduction for a set of argument packs,
+/// producing the argument packs and checking for consistency with prior
+/// deductions.
+static Sema::TemplateDeductionResult
+FinishArgumentPackDeduction(Sema &S,
+ TemplateParameterList *TemplateParams,
+ bool HasAnyArguments,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ ArrayRef<unsigned> PackIndices,
+ SmallVectorImpl<DeducedTemplateArgument> &SavedPacks,
+ SmallVectorImpl<
+ SmallVector<DeducedTemplateArgument, 4> > &NewlyDeducedPacks,
+ TemplateDeductionInfo &Info) {
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ if (HasAnyArguments && NewlyDeducedPacks[I].empty()) {
+ // We were not able to deduce anything for this parameter pack,
+ // so just restore the saved argument pack.
+ Deduced[PackIndices[I]] = SavedPacks[I];
+ continue;
+ }
+
+ DeducedTemplateArgument NewPack;
+
+ if (NewlyDeducedPacks[I].empty()) {
+ // If we deduced an empty argument pack, create it now.
+ NewPack = DeducedTemplateArgument(TemplateArgument(0, 0));
+ } else {
+ TemplateArgument *ArgumentPack
+ = new (S.Context) TemplateArgument [NewlyDeducedPacks[I].size()];
+ std::copy(NewlyDeducedPacks[I].begin(), NewlyDeducedPacks[I].end(),
+ ArgumentPack);
+ NewPack
+ = DeducedTemplateArgument(TemplateArgument(ArgumentPack,
+ NewlyDeducedPacks[I].size()),
+ NewlyDeducedPacks[I][0].wasDeducedFromArrayBound());
+ }
+
+ DeducedTemplateArgument Result
+ = checkDeducedTemplateArguments(S.Context, SavedPacks[I], NewPack);
+ if (Result.isNull()) {
+ Info.Param
+ = makeTemplateParameter(TemplateParams->getParam(PackIndices[I]));
+ Info.FirstArg = SavedPacks[I];
+ Info.SecondArg = NewPack;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[PackIndices[I]] = Result;
+ }
+
+ return Sema::TDK_Success;
+}
+
+/// \brief Deduce the template arguments by comparing the list of parameter
+/// types to the list of argument types, as in the parameter-type-lists of
+/// function types (C++ [temp.deduct.type]p10).
+///
+/// \param S The semantic analysis object within which we are deducing
+///
+/// \param TemplateParams The template parameters that we are deducing
+///
+/// \param Params The list of parameter types
+///
+/// \param NumParams The number of types in \c Params
+///
+/// \param Args The list of argument types
+///
+/// \param NumArgs The number of types in \c Args
+///
+/// \param Info information about the template argument deduction itself
+///
+/// \param Deduced the deduced template arguments
+///
+/// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe
+/// how template argument deduction is performed.
+///
+/// \param PartialOrdering If true, we are performing template argument
+/// deduction for during partial ordering for a call
+/// (C++0x [temp.deduct.partial]).
+///
+/// \param RefParamComparisons If we're performing template argument deduction
+/// in the context of partial ordering, the set of qualifier comparisons.
+///
+/// \returns the result of template argument deduction so far. Note that a
+/// "success" result means that template argument deduction has not yet failed,
+/// but it may still fail, later, for other reasons.
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const QualType *Params, unsigned NumParams,
+ const QualType *Args, unsigned NumArgs,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF,
+ bool PartialOrdering = false,
+ SmallVectorImpl<RefParamPartialOrderingComparison> *
+ RefParamComparisons = 0) {
+ // Fast-path check to see if we have too many/too few arguments.
+ if (NumParams != NumArgs &&
+ !(NumParams && isa<PackExpansionType>(Params[NumParams - 1])) &&
+ !(NumArgs && isa<PackExpansionType>(Args[NumArgs - 1])))
+ return Sema::TDK_NonDeducedMismatch;
+
+ // C++0x [temp.deduct.type]p10:
+ // Similarly, if P has a form that contains (T), then each parameter type
+ // Pi of the respective parameter-type- list of P is compared with the
+ // corresponding parameter type Ai of the corresponding parameter-type-list
+ // of A. [...]
+ unsigned ArgIdx = 0, ParamIdx = 0;
+ for (; ParamIdx != NumParams; ++ParamIdx) {
+ // Check argument types.
+ const PackExpansionType *Expansion
+ = dyn_cast<PackExpansionType>(Params[ParamIdx]);
+ if (!Expansion) {
+ // Simple case: compare the parameter and argument types at this point.
+
+ // Make sure we have an argument.
+ if (ArgIdx >= NumArgs)
+ return Sema::TDK_NonDeducedMismatch;
+
+ if (isa<PackExpansionType>(Args[ArgIdx])) {
+ // C++0x [temp.deduct.type]p22:
+ // If the original function parameter associated with A is a function
+ // parameter pack and the function parameter associated with P is not
+ // a function parameter pack, then template argument deduction fails.
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ Params[ParamIdx], Args[ArgIdx],
+ Info, Deduced, TDF,
+ PartialOrdering,
+ RefParamComparisons))
+ return Result;
+
+ ++ArgIdx;
+ continue;
+ }
+
+ // C++0x [temp.deduct.type]p5:
+ // The non-deduced contexts are:
+ // - A function parameter pack that does not occur at the end of the
+ // parameter-declaration-clause.
+ if (ParamIdx + 1 < NumParams)
+ return Sema::TDK_Success;
+
+ // C++0x [temp.deduct.type]p10:
+ // If the parameter-declaration corresponding to Pi is a function
+ // parameter pack, then the type of its declarator- id is compared with
+ // each remaining parameter type in the parameter-type-list of A. Each
+ // comparison deduces template arguments for subsequent positions in the
+ // template parameter packs expanded by the function parameter pack.
+
+ // Compute the set of template parameter indices that correspond to
+ // parameter packs expanded by the pack expansion.
+ SmallVector<unsigned, 2> PackIndices;
+ QualType Pattern = Expansion->getPattern();
+ {
+ llvm::SmallBitVector SawIndices(TemplateParams->size());
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == 0 && !SawIndices[Index]) {
+ SawIndices[Index] = true;
+ PackIndices.push_back(Index);
+ }
+ }
+ }
+ assert(!PackIndices.empty() && "Pack expansion without unexpanded packs?");
+
+ // Keep track of the deduced template arguments for each parameter pack
+ // expanded by this pack expansion (the outer index) and for each
+ // template argument (the inner SmallVectors).
+ SmallVector<SmallVector<DeducedTemplateArgument, 4>, 2>
+ NewlyDeducedPacks(PackIndices.size());
+ SmallVector<DeducedTemplateArgument, 2>
+ SavedPacks(PackIndices.size());
+ PrepareArgumentPackDeduction(S, Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks);
+
+ bool HasAnyArguments = false;
+ for (; ArgIdx < NumArgs; ++ArgIdx) {
+ HasAnyArguments = true;
+
+ // Deduce template arguments from the pattern.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, Pattern,
+ Args[ArgIdx], Info, Deduced,
+ TDF, PartialOrdering,
+ RefParamComparisons))
+ return Result;
+
+ // Capture the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, add them to the list of arguments we've deduced
+ // for that pack, then clear out the deduced argument.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ DeducedTemplateArgument &DeducedArg = Deduced[PackIndices[I]];
+ if (!DeducedArg.isNull()) {
+ NewlyDeducedPacks[I].push_back(DeducedArg);
+ DeducedArg = DeducedTemplateArgument();
+ }
+ }
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ if (Sema::TemplateDeductionResult Result
+ = FinishArgumentPackDeduction(S, TemplateParams, HasAnyArguments,
+ Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks, Info))
+ return Result;
+ }
+
+ // Make sure we don't have any extra arguments.
+ if (ArgIdx < NumArgs)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return Sema::TDK_Success;
+}
+
+/// \brief Determine whether the parameter has qualifiers that are either
+/// inconsistent with or a superset of the argument's qualifiers.
+static bool hasInconsistentOrSupersetQualifiersOf(QualType ParamType,
+ QualType ArgType) {
+ Qualifiers ParamQs = ParamType.getQualifiers();
+ Qualifiers ArgQs = ArgType.getQualifiers();
+
+ if (ParamQs == ArgQs)
+ return false;
+
+ // Mismatched (but not missing) Objective-C GC attributes.
+ if (ParamQs.getObjCGCAttr() != ArgQs.getObjCGCAttr() &&
+ ParamQs.hasObjCGCAttr())
+ return true;
+
+ // Mismatched (but not missing) address spaces.
+ if (ParamQs.getAddressSpace() != ArgQs.getAddressSpace() &&
+ ParamQs.hasAddressSpace())
+ return true;
+
+ // Mismatched (but not missing) Objective-C lifetime qualifiers.
+ if (ParamQs.getObjCLifetime() != ArgQs.getObjCLifetime() &&
+ ParamQs.hasObjCLifetime())
+ return true;
+
+ // CVR qualifier superset.
+ return (ParamQs.getCVRQualifiers() != ArgQs.getCVRQualifiers()) &&
+ ((ParamQs.getCVRQualifiers() | ArgQs.getCVRQualifiers())
+ == ParamQs.getCVRQualifiers());
+}
+
+/// \brief Deduce the template arguments by comparing the parameter type and
+/// the argument type (C++ [temp.deduct.type]).
+///
+/// \param S the semantic analysis object within which we are deducing
+///
+/// \param TemplateParams the template parameters that we are deducing
+///
+/// \param ParamIn the parameter type
+///
+/// \param ArgIn the argument type
+///
+/// \param Info information about the template argument deduction itself
+///
+/// \param Deduced the deduced template arguments
+///
+/// \param TDF bitwise OR of the TemplateDeductionFlags bits that describe
+/// how template argument deduction is performed.
+///
+/// \param PartialOrdering Whether we're performing template argument deduction
+/// in the context of partial ordering (C++0x [temp.deduct.partial]).
+///
+/// \param RefParamComparisons If we're performing template argument deduction
+/// in the context of partial ordering, the set of qualifier comparisons.
+///
+/// \returns the result of template argument deduction so far. Note that a
+/// "success" result means that template argument deduction has not yet failed,
+/// but it may still fail, later, for other reasons.
+static Sema::TemplateDeductionResult
+DeduceTemplateArgumentsByTypeMatch(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType ParamIn, QualType ArgIn,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF,
+ bool PartialOrdering,
+ SmallVectorImpl<RefParamPartialOrderingComparison> *
+ RefParamComparisons) {
+ // We only want to look at the canonical types, since typedefs and
+ // sugar are not part of template argument deduction.
+ QualType Param = S.Context.getCanonicalType(ParamIn);
+ QualType Arg = S.Context.getCanonicalType(ArgIn);
+
+ // If the argument type is a pack expansion, look at its pattern.
+ // This isn't explicitly called out
+ if (const PackExpansionType *ArgExpansion
+ = dyn_cast<PackExpansionType>(Arg))
+ Arg = ArgExpansion->getPattern();
+
+ if (PartialOrdering) {
+ // C++0x [temp.deduct.partial]p5:
+ // Before the partial ordering is done, certain transformations are
+ // performed on the types used for partial ordering:
+ // - If P is a reference type, P is replaced by the type referred to.
+ const ReferenceType *ParamRef = Param->getAs<ReferenceType>();
+ if (ParamRef)
+ Param = ParamRef->getPointeeType();
+
+ // - If A is a reference type, A is replaced by the type referred to.
+ const ReferenceType *ArgRef = Arg->getAs<ReferenceType>();
+ if (ArgRef)
+ Arg = ArgRef->getPointeeType();
+
+ if (RefParamComparisons && ParamRef && ArgRef) {
+ // C++0x [temp.deduct.partial]p6:
+ // If both P and A were reference types (before being replaced with the
+ // type referred to above), determine which of the two types (if any) is
+ // more cv-qualified than the other; otherwise the types are considered
+ // to be equally cv-qualified for partial ordering purposes. The result
+ // of this determination will be used below.
+ //
+ // We save this information for later, using it only when deduction
+ // succeeds in both directions.
+ RefParamPartialOrderingComparison Comparison;
+ Comparison.ParamIsRvalueRef = ParamRef->getAs<RValueReferenceType>();
+ Comparison.ArgIsRvalueRef = ArgRef->getAs<RValueReferenceType>();
+ Comparison.Qualifiers = NeitherMoreQualified;
+
+ Qualifiers ParamQuals = Param.getQualifiers();
+ Qualifiers ArgQuals = Arg.getQualifiers();
+ if (ParamQuals.isStrictSupersetOf(ArgQuals))
+ Comparison.Qualifiers = ParamMoreQualified;
+ else if (ArgQuals.isStrictSupersetOf(ParamQuals))
+ Comparison.Qualifiers = ArgMoreQualified;
+ RefParamComparisons->push_back(Comparison);
+ }
+
+ // C++0x [temp.deduct.partial]p7:
+ // Remove any top-level cv-qualifiers:
+ // - If P is a cv-qualified type, P is replaced by the cv-unqualified
+ // version of P.
+ Param = Param.getUnqualifiedType();
+ // - If A is a cv-qualified type, A is replaced by the cv-unqualified
+ // version of A.
+ Arg = Arg.getUnqualifiedType();
+ } else {
+ // C++0x [temp.deduct.call]p4 bullet 1:
+ // - If the original P is a reference type, the deduced A (i.e., the type
+ // referred to by the reference) can be more cv-qualified than the
+ // transformed A.
+ if (TDF & TDF_ParamWithReferenceType) {
+ Qualifiers Quals;
+ QualType UnqualParam = S.Context.getUnqualifiedArrayType(Param, Quals);
+ Quals.setCVRQualifiers(Quals.getCVRQualifiers() &
+ Arg.getCVRQualifiers());
+ Param = S.Context.getQualifiedType(UnqualParam, Quals);
+ }
+
+ if ((TDF & TDF_TopLevelParameterTypeList) && !Param->isFunctionType()) {
+ // C++0x [temp.deduct.type]p10:
+ // If P and A are function types that originated from deduction when
+ // taking the address of a function template (14.8.2.2) or when deducing
+ // template arguments from a function declaration (14.8.2.6) and Pi and
+ // Ai are parameters of the top-level parameter-type-list of P and A,
+ // respectively, Pi is adjusted if it is an rvalue reference to a
+ // cv-unqualified template parameter and Ai is an lvalue reference, in
+ // which case the type of Pi is changed to be the template parameter
+ // type (i.e., T&& is changed to simply T). [ Note: As a result, when
+ // Pi is T&& and Ai is X&, the adjusted Pi will be T, causing T to be
+ // deduced as X&. - end note ]
+ TDF &= ~TDF_TopLevelParameterTypeList;
+
+ if (const RValueReferenceType *ParamRef
+ = Param->getAs<RValueReferenceType>()) {
+ if (isa<TemplateTypeParmType>(ParamRef->getPointeeType()) &&
+ !ParamRef->getPointeeType().getQualifiers())
+ if (Arg->isLValueReferenceType())
+ Param = ParamRef->getPointeeType();
+ }
+ }
+ }
+
+ // C++ [temp.deduct.type]p9:
+ // A template type argument T, a template template argument TT or a
+ // template non-type argument i can be deduced if P and A have one of
+ // the following forms:
+ //
+ // T
+ // cv-list T
+ if (const TemplateTypeParmType *TemplateTypeParm
+ = Param->getAs<TemplateTypeParmType>()) {
+ // Just skip any attempts to deduce from a placeholder type.
+ if (Arg->isPlaceholderType())
+ return Sema::TDK_Success;
+
+ unsigned Index = TemplateTypeParm->getIndex();
+ bool RecanonicalizeArg = false;
+
+ // If the argument type is an array type, move the qualifiers up to the
+ // top level, so they can be matched with the qualifiers on the parameter.
+ if (isa<ArrayType>(Arg)) {
+ Qualifiers Quals;
+ Arg = S.Context.getUnqualifiedArrayType(Arg, Quals);
+ if (Quals) {
+ Arg = S.Context.getQualifiedType(Arg, Quals);
+ RecanonicalizeArg = true;
+ }
+ }
+
+ // The argument type can not be less qualified than the parameter
+ // type.
+ if (!(TDF & TDF_IgnoreQualifiers) &&
+ hasInconsistentOrSupersetQualifiersOf(Param, Arg)) {
+ Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
+ Info.FirstArg = TemplateArgument(Param);
+ Info.SecondArg = TemplateArgument(Arg);
+ return Sema::TDK_Underqualified;
+ }
+
+ assert(TemplateTypeParm->getDepth() == 0 && "Can't deduce with depth > 0");
+ assert(Arg != S.Context.OverloadTy && "Unresolved overloaded function");
+ QualType DeducedType = Arg;
+
+ // Remove any qualifiers on the parameter from the deduced type.
+ // We checked the qualifiers for consistency above.
+ Qualifiers DeducedQs = DeducedType.getQualifiers();
+ Qualifiers ParamQs = Param.getQualifiers();
+ DeducedQs.removeCVRQualifiers(ParamQs.getCVRQualifiers());
+ if (ParamQs.hasObjCGCAttr())
+ DeducedQs.removeObjCGCAttr();
+ if (ParamQs.hasAddressSpace())
+ DeducedQs.removeAddressSpace();
+ if (ParamQs.hasObjCLifetime())
+ DeducedQs.removeObjCLifetime();
+
+ // Objective-C ARC:
+ // If template deduction would produce a lifetime qualifier on a type
+ // that is not a lifetime type, template argument deduction fails.
+ if (ParamQs.hasObjCLifetime() && !DeducedType->isObjCLifetimeType() &&
+ !DeducedType->isDependentType()) {
+ Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
+ Info.FirstArg = TemplateArgument(Param);
+ Info.SecondArg = TemplateArgument(Arg);
+ return Sema::TDK_Underqualified;
+ }
+
+ // Objective-C ARC:
+ // If template deduction would produce an argument type with lifetime type
+ // but no lifetime qualifier, the __strong lifetime qualifier is inferred.
+ if (S.getLangOpts().ObjCAutoRefCount &&
+ DeducedType->isObjCLifetimeType() &&
+ !DeducedQs.hasObjCLifetime())
+ DeducedQs.setObjCLifetime(Qualifiers::OCL_Strong);
+
+ DeducedType = S.Context.getQualifiedType(DeducedType.getUnqualifiedType(),
+ DeducedQs);
+
+ if (RecanonicalizeArg)
+ DeducedType = S.Context.getCanonicalType(DeducedType);
+
+ DeducedTemplateArgument NewDeduced(DeducedType);
+ DeducedTemplateArgument Result = checkDeducedTemplateArguments(S.Context,
+ Deduced[Index],
+ NewDeduced);
+ if (Result.isNull()) {
+ Info.Param = cast<TemplateTypeParmDecl>(TemplateParams->getParam(Index));
+ Info.FirstArg = Deduced[Index];
+ Info.SecondArg = NewDeduced;
+ return Sema::TDK_Inconsistent;
+ }
+
+ Deduced[Index] = Result;
+ return Sema::TDK_Success;
+ }
+
+ // Set up the template argument deduction information for a failure.
+ Info.FirstArg = TemplateArgument(ParamIn);
+ Info.SecondArg = TemplateArgument(ArgIn);
+
+ // If the parameter is an already-substituted template parameter
+ // pack, do nothing: we don't know which of its arguments to look
+ // at, so we have to wait until all of the parameter packs in this
+ // expansion have arguments.
+ if (isa<SubstTemplateTypeParmPackType>(Param))
+ return Sema::TDK_Success;
+
+ // Check the cv-qualifiers on the parameter and argument types.
+ if (!(TDF & TDF_IgnoreQualifiers)) {
+ if (TDF & TDF_ParamWithReferenceType) {
+ if (hasInconsistentOrSupersetQualifiersOf(Param, Arg))
+ return Sema::TDK_NonDeducedMismatch;
+ } else if (!IsPossiblyOpaquelyQualifiedType(Param)) {
+ if (Param.getCVRQualifiers() != Arg.getCVRQualifiers())
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // If the parameter type is not dependent, there is nothing to deduce.
+ if (!Param->isDependentType()) {
+ if (!(TDF & TDF_SkipNonDependent) && Param != Arg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return Sema::TDK_Success;
+ }
+ } else if (!Param->isDependentType() &&
+ Param.getUnqualifiedType() == Arg.getUnqualifiedType()) {
+ return Sema::TDK_Success;
+ }
+
+ switch (Param->getTypeClass()) {
+ // Non-canonical types cannot appear here.
+#define NON_CANONICAL_TYPE(Class, Base) \
+ case Type::Class: llvm_unreachable("deducing non-canonical type: " #Class);
+#define TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+
+ case Type::TemplateTypeParm:
+ case Type::SubstTemplateTypeParmPack:
+ llvm_unreachable("Type nodes handled above");
+
+ // These types cannot be dependent, so simply check whether the types are
+ // the same.
+ case Type::Builtin:
+ case Type::VariableArray:
+ case Type::Vector:
+ case Type::FunctionNoProto:
+ case Type::Record:
+ case Type::Enum:
+ case Type::ObjCObject:
+ case Type::ObjCInterface:
+ case Type::ObjCObjectPointer: {
+ if (TDF & TDF_SkipNonDependent)
+ return Sema::TDK_Success;
+
+ if (TDF & TDF_IgnoreQualifiers) {
+ Param = Param.getUnqualifiedType();
+ Arg = Arg.getUnqualifiedType();
+ }
+
+ return Param == Arg? Sema::TDK_Success : Sema::TDK_NonDeducedMismatch;
+ }
+
+ // _Complex T [placeholder extension]
+ case Type::Complex:
+ if (const ComplexType *ComplexArg = Arg->getAs<ComplexType>())
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<ComplexType>(Param)->getElementType(),
+ ComplexArg->getElementType(),
+ Info, Deduced, TDF);
+
+ return Sema::TDK_NonDeducedMismatch;
+
+ // _Atomic T [extension]
+ case Type::Atomic:
+ if (const AtomicType *AtomicArg = Arg->getAs<AtomicType>())
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<AtomicType>(Param)->getValueType(),
+ AtomicArg->getValueType(),
+ Info, Deduced, TDF);
+
+ return Sema::TDK_NonDeducedMismatch;
+
+ // T *
+ case Type::Pointer: {
+ QualType PointeeType;
+ if (const PointerType *PointerArg = Arg->getAs<PointerType>()) {
+ PointeeType = PointerArg->getPointeeType();
+ } else if (const ObjCObjectPointerType *PointerArg
+ = Arg->getAs<ObjCObjectPointerType>()) {
+ PointeeType = PointerArg->getPointeeType();
+ } else {
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ unsigned SubTDF = TDF & (TDF_IgnoreQualifiers | TDF_DerivedClass);
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<PointerType>(Param)->getPointeeType(),
+ PointeeType,
+ Info, Deduced, SubTDF);
+ }
+
+ // T &
+ case Type::LValueReference: {
+ const LValueReferenceType *ReferenceArg = Arg->getAs<LValueReferenceType>();
+ if (!ReferenceArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<LValueReferenceType>(Param)->getPointeeType(),
+ ReferenceArg->getPointeeType(), Info, Deduced, 0);
+ }
+
+ // T && [C++0x]
+ case Type::RValueReference: {
+ const RValueReferenceType *ReferenceArg = Arg->getAs<RValueReferenceType>();
+ if (!ReferenceArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ cast<RValueReferenceType>(Param)->getPointeeType(),
+ ReferenceArg->getPointeeType(),
+ Info, Deduced, 0);
+ }
+
+ // T [] (implied, but not stated explicitly)
+ case Type::IncompleteArray: {
+ const IncompleteArrayType *IncompleteArrayArg =
+ S.Context.getAsIncompleteArrayType(Arg);
+ if (!IncompleteArrayArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ S.Context.getAsIncompleteArrayType(Param)->getElementType(),
+ IncompleteArrayArg->getElementType(),
+ Info, Deduced, SubTDF);
+ }
+
+ // T [integer-constant]
+ case Type::ConstantArray: {
+ const ConstantArrayType *ConstantArrayArg =
+ S.Context.getAsConstantArrayType(Arg);
+ if (!ConstantArrayArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ const ConstantArrayType *ConstantArrayParm =
+ S.Context.getAsConstantArrayType(Param);
+ if (ConstantArrayArg->getSize() != ConstantArrayParm->getSize())
+ return Sema::TDK_NonDeducedMismatch;
+
+ unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ ConstantArrayParm->getElementType(),
+ ConstantArrayArg->getElementType(),
+ Info, Deduced, SubTDF);
+ }
+
+ // type [i]
+ case Type::DependentSizedArray: {
+ const ArrayType *ArrayArg = S.Context.getAsArrayType(Arg);
+ if (!ArrayArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ unsigned SubTDF = TDF & TDF_IgnoreQualifiers;
+
+ // Check the element type of the arrays
+ const DependentSizedArrayType *DependentArrayParm
+ = S.Context.getAsDependentSizedArrayType(Param);
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ DependentArrayParm->getElementType(),
+ ArrayArg->getElementType(),
+ Info, Deduced, SubTDF))
+ return Result;
+
+ // Determine the array bound is something we can deduce.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(DependentArrayParm->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ // We can perform template argument deduction for the given non-type
+ // template parameter.
+ assert(NTTP->getDepth() == 0 &&
+ "Cannot deduce non-type template argument at depth > 0");
+ if (const ConstantArrayType *ConstantArrayArg
+ = dyn_cast<ConstantArrayType>(ArrayArg)) {
+ llvm::APSInt Size(ConstantArrayArg->getSize());
+ return DeduceNonTypeTemplateArgument(S, NTTP, Size,
+ S.Context.getSizeType(),
+ /*ArrayBound=*/true,
+ Info, Deduced);
+ }
+ if (const DependentSizedArrayType *DependentArrayArg
+ = dyn_cast<DependentSizedArrayType>(ArrayArg))
+ if (DependentArrayArg->getSizeExpr())
+ return DeduceNonTypeTemplateArgument(S, NTTP,
+ DependentArrayArg->getSizeExpr(),
+ Info, Deduced);
+
+ // Incomplete type does not match a dependently-sized array type
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // type(*)(T)
+ // T(*)()
+ // T(*)(T)
+ case Type::FunctionProto: {
+ unsigned SubTDF = TDF & TDF_TopLevelParameterTypeList;
+ const FunctionProtoType *FunctionProtoArg =
+ dyn_cast<FunctionProtoType>(Arg);
+ if (!FunctionProtoArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ const FunctionProtoType *FunctionProtoParam =
+ cast<FunctionProtoType>(Param);
+
+ if (FunctionProtoParam->getTypeQuals()
+ != FunctionProtoArg->getTypeQuals() ||
+ FunctionProtoParam->getRefQualifier()
+ != FunctionProtoArg->getRefQualifier() ||
+ FunctionProtoParam->isVariadic() != FunctionProtoArg->isVariadic())
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Check return types.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ FunctionProtoParam->getResultType(),
+ FunctionProtoArg->getResultType(),
+ Info, Deduced, 0))
+ return Result;
+
+ return DeduceTemplateArguments(S, TemplateParams,
+ FunctionProtoParam->arg_type_begin(),
+ FunctionProtoParam->getNumArgs(),
+ FunctionProtoArg->arg_type_begin(),
+ FunctionProtoArg->getNumArgs(),
+ Info, Deduced, SubTDF);
+ }
+
+ case Type::InjectedClassName: {
+ // Treat a template's injected-class-name as if the template
+ // specialization type had been used.
+ Param = cast<InjectedClassNameType>(Param)
+ ->getInjectedSpecializationType();
+ assert(isa<TemplateSpecializationType>(Param) &&
+ "injected class name is not a template specialization type");
+ // fall through
+ }
+
+ // template-name<T> (where template-name refers to a class template)
+ // template-name<i>
+ // TT<T>
+ // TT<i>
+ // TT<>
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *SpecParam
+ = cast<TemplateSpecializationType>(Param);
+
+ // Try to deduce template arguments from the template-id.
+ Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams, SpecParam, Arg,
+ Info, Deduced);
+
+ if (Result && (TDF & TDF_DerivedClass)) {
+ // C++ [temp.deduct.call]p3b3:
+ // If P is a class, and P has the form template-id, then A can be a
+ // derived class of the deduced A. Likewise, if P is a pointer to a
+ // class of the form template-id, A can be a pointer to a derived
+ // class pointed to by the deduced A.
+ //
+ // More importantly:
+ // These alternatives are considered only if type deduction would
+ // otherwise fail.
+ if (const RecordType *RecordT = Arg->getAs<RecordType>()) {
+ // We cannot inspect base classes as part of deduction when the type
+ // is incomplete, so either instantiate any templates necessary to
+ // complete the type, or skip over it if it cannot be completed.
+ if (S.RequireCompleteType(Info.getLocation(), Arg, 0))
+ return Result;
+
+ // Use data recursion to crawl through the list of base classes.
+ // Visited contains the set of nodes we have already visited, while
+ // ToVisit is our stack of records that we still need to visit.
+ llvm::SmallPtrSet<const RecordType *, 8> Visited;
+ SmallVector<const RecordType *, 8> ToVisit;
+ ToVisit.push_back(RecordT);
+ bool Successful = false;
+ SmallVector<DeducedTemplateArgument, 8> DeducedOrig(Deduced.begin(),
+ Deduced.end());
+ while (!ToVisit.empty()) {
+ // Retrieve the next class in the inheritance hierarchy.
+ const RecordType *NextT = ToVisit.back();
+ ToVisit.pop_back();
+
+ // If we have already seen this type, skip it.
+ if (!Visited.insert(NextT))
+ continue;
+
+ // If this is a base class, try to perform template argument
+ // deduction from it.
+ if (NextT != RecordT) {
+ Sema::TemplateDeductionResult BaseResult
+ = DeduceTemplateArguments(S, TemplateParams, SpecParam,
+ QualType(NextT, 0), Info, Deduced);
+
+ // If template argument deduction for this base was successful,
+ // note that we had some success. Otherwise, ignore any deductions
+ // from this base class.
+ if (BaseResult == Sema::TDK_Success) {
+ Successful = true;
+ DeducedOrig.clear();
+ DeducedOrig.append(Deduced.begin(), Deduced.end());
+ }
+ else
+ Deduced = DeducedOrig;
+ }
+
+ // Visit base classes
+ CXXRecordDecl *Next = cast<CXXRecordDecl>(NextT->getDecl());
+ for (CXXRecordDecl::base_class_iterator Base = Next->bases_begin(),
+ BaseEnd = Next->bases_end();
+ Base != BaseEnd; ++Base) {
+ assert(Base->getType()->isRecordType() &&
+ "Base class that isn't a record?");
+ ToVisit.push_back(Base->getType()->getAs<RecordType>());
+ }
+ }
+
+ if (Successful)
+ return Sema::TDK_Success;
+ }
+
+ }
+
+ return Result;
+ }
+
+ // T type::*
+ // T T::*
+ // T (type::*)()
+ // type (T::*)()
+ // type (type::*)(T)
+ // type (T::*)(T)
+ // T (type::*)(T)
+ // T (T::*)()
+ // T (T::*)(T)
+ case Type::MemberPointer: {
+ const MemberPointerType *MemPtrParam = cast<MemberPointerType>(Param);
+ const MemberPointerType *MemPtrArg = dyn_cast<MemberPointerType>(Arg);
+ if (!MemPtrArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ MemPtrParam->getPointeeType(),
+ MemPtrArg->getPointeeType(),
+ Info, Deduced,
+ TDF & TDF_IgnoreQualifiers))
+ return Result;
+
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ QualType(MemPtrParam->getClass(), 0),
+ QualType(MemPtrArg->getClass(), 0),
+ Info, Deduced,
+ TDF & TDF_IgnoreQualifiers);
+ }
+
+ // (clang extension)
+ //
+ // type(^)(T)
+ // T(^)()
+ // T(^)(T)
+ case Type::BlockPointer: {
+ const BlockPointerType *BlockPtrParam = cast<BlockPointerType>(Param);
+ const BlockPointerType *BlockPtrArg = dyn_cast<BlockPointerType>(Arg);
+
+ if (!BlockPtrArg)
+ return Sema::TDK_NonDeducedMismatch;
+
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ BlockPtrParam->getPointeeType(),
+ BlockPtrArg->getPointeeType(),
+ Info, Deduced, 0);
+ }
+
+ // (clang extension)
+ //
+ // T __attribute__(((ext_vector_type(<integral constant>))))
+ case Type::ExtVector: {
+ const ExtVectorType *VectorParam = cast<ExtVectorType>(Param);
+ if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
+ // Make sure that the vectors have the same number of elements.
+ if (VectorParam->getNumElements() != VectorArg->getNumElements())
+ return Sema::TDK_NonDeducedMismatch;
+
+ // Perform deduction on the element types.
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF);
+ }
+
+ if (const DependentSizedExtVectorType *VectorArg
+ = dyn_cast<DependentSizedExtVectorType>(Arg)) {
+ // We can't check the number of elements, since the argument has a
+ // dependent number of elements. This can only occur during partial
+ // ordering.
+
+ // Perform deduction on the element types.
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF);
+ }
+
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // (clang extension)
+ //
+ // T __attribute__(((ext_vector_type(N))))
+ case Type::DependentSizedExtVector: {
+ const DependentSizedExtVectorType *VectorParam
+ = cast<DependentSizedExtVectorType>(Param);
+
+ if (const ExtVectorType *VectorArg = dyn_cast<ExtVectorType>(Arg)) {
+ // Perform deduction on the element types.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF))
+ return Result;
+
+ // Perform deduction on the vector size, if we can.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(VectorParam->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ llvm::APSInt ArgSize(S.Context.getTypeSize(S.Context.IntTy), false);
+ ArgSize = VectorArg->getNumElements();
+ return DeduceNonTypeTemplateArgument(S, NTTP, ArgSize, S.Context.IntTy,
+ false, Info, Deduced);
+ }
+
+ if (const DependentSizedExtVectorType *VectorArg
+ = dyn_cast<DependentSizedExtVectorType>(Arg)) {
+ // Perform deduction on the element types.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ VectorParam->getElementType(),
+ VectorArg->getElementType(),
+ Info, Deduced, TDF))
+ return Result;
+
+ // Perform deduction on the vector size, if we can.
+ NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(VectorParam->getSizeExpr());
+ if (!NTTP)
+ return Sema::TDK_Success;
+
+ return DeduceNonTypeTemplateArgument(S, NTTP, VectorArg->getSizeExpr(),
+ Info, Deduced);
+ }
+
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ case Type::TypeOfExpr:
+ case Type::TypeOf:
+ case Type::DependentName:
+ case Type::UnresolvedUsing:
+ case Type::Decltype:
+ case Type::UnaryTransform:
+ case Type::Auto:
+ case Type::DependentTemplateSpecialization:
+ case Type::PackExpansion:
+ // No template argument deduction for these types
+ return Sema::TDK_Success;
+ }
+
+ llvm_unreachable("Invalid Type Class!");
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument &Param,
+ TemplateArgument Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ // If the template argument is a pack expansion, perform template argument
+ // deduction against the pattern of that expansion. This only occurs during
+ // partial ordering.
+ if (Arg.isPackExpansion())
+ Arg = Arg.getPackExpansionPattern();
+
+ switch (Param.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Null template argument in parameter list");
+
+ case TemplateArgument::Type:
+ if (Arg.getKind() == TemplateArgument::Type)
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ Param.getAsType(),
+ Arg.getAsType(),
+ Info, Deduced, 0);
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::Template:
+ if (Arg.getKind() == TemplateArgument::Template)
+ return DeduceTemplateArguments(S, TemplateParams,
+ Param.getAsTemplate(),
+ Arg.getAsTemplate(), Info, Deduced);
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("caller should handle pack expansions");
+
+ case TemplateArgument::Declaration:
+ if (Arg.getKind() == TemplateArgument::Declaration &&
+ isSameDeclaration(Param.getAsDecl(), Arg.getAsDecl()))
+ return Sema::TDK_Success;
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::Integral:
+ if (Arg.getKind() == TemplateArgument::Integral) {
+ if (hasSameExtendedValue(*Param.getAsIntegral(), *Arg.getAsIntegral()))
+ return Sema::TDK_Success;
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+
+ case TemplateArgument::Expression: {
+ if (NonTypeTemplateParmDecl *NTTP
+ = getDeducedParameterFromExpr(Param.getAsExpr())) {
+ if (Arg.getKind() == TemplateArgument::Integral)
+ return DeduceNonTypeTemplateArgument(S, NTTP,
+ *Arg.getAsIntegral(),
+ Arg.getIntegralType(),
+ /*ArrayBound=*/false,
+ Info, Deduced);
+ if (Arg.getKind() == TemplateArgument::Expression)
+ return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsExpr(),
+ Info, Deduced);
+ if (Arg.getKind() == TemplateArgument::Declaration)
+ return DeduceNonTypeTemplateArgument(S, NTTP, Arg.getAsDecl(),
+ Info, Deduced);
+
+ Info.FirstArg = Param;
+ Info.SecondArg = Arg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // Can't deduce anything, but that's okay.
+ return Sema::TDK_Success;
+ }
+ case TemplateArgument::Pack:
+ llvm_unreachable("Argument packs should be expanded by the caller!");
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+/// \brief Determine whether there is a template argument to be used for
+/// deduction.
+///
+/// This routine "expands" argument packs in-place, overriding its input
+/// parameters so that \c Args[ArgIdx] will be the available template argument.
+///
+/// \returns true if there is another template argument (which will be at
+/// \c Args[ArgIdx]), false otherwise.
+static bool hasTemplateArgumentForDeduction(const TemplateArgument *&Args,
+ unsigned &ArgIdx,
+ unsigned &NumArgs) {
+ if (ArgIdx == NumArgs)
+ return false;
+
+ const TemplateArgument &Arg = Args[ArgIdx];
+ if (Arg.getKind() != TemplateArgument::Pack)
+ return true;
+
+ assert(ArgIdx == NumArgs - 1 && "Pack not at the end of argument list?");
+ Args = Arg.pack_begin();
+ NumArgs = Arg.pack_size();
+ ArgIdx = 0;
+ return ArgIdx < NumArgs;
+}
+
+/// \brief Determine whether the given set of template arguments has a pack
+/// expansion that is not the last template argument.
+static bool hasPackExpansionBeforeEnd(const TemplateArgument *Args,
+ unsigned NumArgs) {
+ unsigned ArgIdx = 0;
+ while (ArgIdx < NumArgs) {
+ const TemplateArgument &Arg = Args[ArgIdx];
+
+ // Unwrap argument packs.
+ if (Args[ArgIdx].getKind() == TemplateArgument::Pack) {
+ Args = Arg.pack_begin();
+ NumArgs = Arg.pack_size();
+ ArgIdx = 0;
+ continue;
+ }
+
+ ++ArgIdx;
+ if (ArgIdx == NumArgs)
+ return false;
+
+ if (Arg.isPackExpansion())
+ return true;
+ }
+
+ return false;
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgument *Params, unsigned NumParams,
+ const TemplateArgument *Args, unsigned NumArgs,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ bool NumberOfArgumentsMustMatch) {
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (hasPackExpansionBeforeEnd(Params, NumParams))
+ return Sema::TDK_Success;
+
+ // C++0x [temp.deduct.type]p9:
+ // If P has a form that contains <T> or <i>, then each argument Pi of the
+ // respective template argument list P is compared with the corresponding
+ // argument Ai of the corresponding template argument list of A.
+ unsigned ArgIdx = 0, ParamIdx = 0;
+ for (; hasTemplateArgumentForDeduction(Params, ParamIdx, NumParams);
+ ++ParamIdx) {
+ if (!Params[ParamIdx].isPackExpansion()) {
+ // The simple case: deduce template arguments by matching Pi and Ai.
+
+ // Check whether we have enough arguments.
+ if (!hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs))
+ return NumberOfArgumentsMustMatch? Sema::TDK_NonDeducedMismatch
+ : Sema::TDK_Success;
+
+ if (Args[ArgIdx].isPackExpansion()) {
+ // FIXME: We follow the logic of C++0x [temp.deduct.type]p22 here,
+ // but applied to pack expansions that are template arguments.
+ return Sema::TDK_NonDeducedMismatch;
+ }
+
+ // Perform deduction for this Pi/Ai pair.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams,
+ Params[ParamIdx], Args[ArgIdx],
+ Info, Deduced))
+ return Result;
+
+ // Move to the next argument.
+ ++ArgIdx;
+ continue;
+ }
+
+ // The parameter is a pack expansion.
+
+ // C++0x [temp.deduct.type]p9:
+ // If Pi is a pack expansion, then the pattern of Pi is compared with
+ // each remaining argument in the template argument list of A. Each
+ // comparison deduces template arguments for subsequent positions in the
+ // template parameter packs expanded by Pi.
+ TemplateArgument Pattern = Params[ParamIdx].getPackExpansionPattern();
+
+ // Compute the set of template parameter indices that correspond to
+ // parameter packs expanded by the pack expansion.
+ SmallVector<unsigned, 2> PackIndices;
+ {
+ llvm::SmallBitVector SawIndices(TemplateParams->size());
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ S.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == 0 && !SawIndices[Index]) {
+ SawIndices[Index] = true;
+ PackIndices.push_back(Index);
+ }
+ }
+ }
+ assert(!PackIndices.empty() && "Pack expansion without unexpanded packs?");
+
+ // FIXME: If there are no remaining arguments, we can bail out early
+ // and set any deduced parameter packs to an empty argument pack.
+ // The latter part of this is a (minor) correctness issue.
+
+ // Save the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, then clear out the deduction.
+ SmallVector<DeducedTemplateArgument, 2>
+ SavedPacks(PackIndices.size());
+ SmallVector<SmallVector<DeducedTemplateArgument, 4>, 2>
+ NewlyDeducedPacks(PackIndices.size());
+ PrepareArgumentPackDeduction(S, Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks);
+
+ // Keep track of the deduced template arguments for each parameter pack
+ // expanded by this pack expansion (the outer index) and for each
+ // template argument (the inner SmallVectors).
+ bool HasAnyArguments = false;
+ while (hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs)) {
+ HasAnyArguments = true;
+
+ // Deduce template arguments from the pattern.
+ if (Sema::TemplateDeductionResult Result
+ = DeduceTemplateArguments(S, TemplateParams, Pattern, Args[ArgIdx],
+ Info, Deduced))
+ return Result;
+
+ // Capture the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, add them to the list of arguments we've deduced
+ // for that pack, then clear out the deduced argument.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ DeducedTemplateArgument &DeducedArg = Deduced[PackIndices[I]];
+ if (!DeducedArg.isNull()) {
+ NewlyDeducedPacks[I].push_back(DeducedArg);
+ DeducedArg = DeducedTemplateArgument();
+ }
+ }
+
+ ++ArgIdx;
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ if (Sema::TemplateDeductionResult Result
+ = FinishArgumentPackDeduction(S, TemplateParams, HasAnyArguments,
+ Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks, Info))
+ return Result;
+ }
+
+ // If there is an argument remaining, then we had too many arguments.
+ if (NumberOfArgumentsMustMatch &&
+ hasTemplateArgumentForDeduction(Args, ArgIdx, NumArgs))
+ return Sema::TDK_NonDeducedMismatch;
+
+ return Sema::TDK_Success;
+}
+
+static Sema::TemplateDeductionResult
+DeduceTemplateArguments(Sema &S,
+ TemplateParameterList *TemplateParams,
+ const TemplateArgumentList &ParamList,
+ const TemplateArgumentList &ArgList,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced) {
+ return DeduceTemplateArguments(S, TemplateParams,
+ ParamList.data(), ParamList.size(),
+ ArgList.data(), ArgList.size(),
+ Info, Deduced);
+}
+
+/// \brief Determine whether two template arguments are the same.
+static bool isSameTemplateArg(ASTContext &Context,
+ const TemplateArgument &X,
+ const TemplateArgument &Y) {
+ if (X.getKind() != Y.getKind())
+ return false;
+
+ switch (X.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Comparing NULL template argument");
+
+ case TemplateArgument::Type:
+ return Context.getCanonicalType(X.getAsType()) ==
+ Context.getCanonicalType(Y.getAsType());
+
+ case TemplateArgument::Declaration:
+ return isSameDeclaration(X.getAsDecl(), Y.getAsDecl());
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ return Context.getCanonicalTemplateName(
+ X.getAsTemplateOrTemplatePattern()).getAsVoidPointer() ==
+ Context.getCanonicalTemplateName(
+ Y.getAsTemplateOrTemplatePattern()).getAsVoidPointer();
+
+ case TemplateArgument::Integral:
+ return *X.getAsIntegral() == *Y.getAsIntegral();
+
+ case TemplateArgument::Expression: {
+ llvm::FoldingSetNodeID XID, YID;
+ X.getAsExpr()->Profile(XID, Context, true);
+ Y.getAsExpr()->Profile(YID, Context, true);
+ return XID == YID;
+ }
+
+ case TemplateArgument::Pack:
+ if (X.pack_size() != Y.pack_size())
+ return false;
+
+ for (TemplateArgument::pack_iterator XP = X.pack_begin(),
+ XPEnd = X.pack_end(),
+ YP = Y.pack_begin();
+ XP != XPEnd; ++XP, ++YP)
+ if (!isSameTemplateArg(Context, *XP, *YP))
+ return false;
+
+ return true;
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+/// \brief Allocate a TemplateArgumentLoc where all locations have
+/// been initialized to the given location.
+///
+/// \param S The semantic analysis object.
+///
+/// \param The template argument we are producing template argument
+/// location information for.
+///
+/// \param NTTPType For a declaration template argument, the type of
+/// the non-type template parameter that corresponds to this template
+/// argument.
+///
+/// \param Loc The source location to use for the resulting template
+/// argument.
+static TemplateArgumentLoc
+getTrivialTemplateArgumentLoc(Sema &S,
+ const TemplateArgument &Arg,
+ QualType NTTPType,
+ SourceLocation Loc) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("Can't get a NULL template argument here");
+
+ case TemplateArgument::Type:
+ return TemplateArgumentLoc(Arg,
+ S.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
+
+ case TemplateArgument::Declaration: {
+ Expr *E
+ = S.BuildExpressionFromDeclTemplateArgument(Arg, NTTPType, Loc)
+ .takeAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(E), E);
+ }
+
+ case TemplateArgument::Integral: {
+ Expr *E
+ = S.BuildExpressionFromIntegralTemplateArgument(Arg, Loc).takeAs<Expr>();
+ return TemplateArgumentLoc(TemplateArgument(E), E);
+ }
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLocBuilder Builder;
+ TemplateName Template = Arg.getAsTemplate();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
+ Builder.MakeTrivial(S.Context, DTN->getQualifier(), Loc);
+ else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Builder.MakeTrivial(S.Context, QTN->getQualifier(), Loc);
+
+ if (Arg.getKind() == TemplateArgument::Template)
+ return TemplateArgumentLoc(Arg,
+ Builder.getWithLocInContext(S.Context),
+ Loc);
+
+
+ return TemplateArgumentLoc(Arg, Builder.getWithLocInContext(S.Context),
+ Loc, Loc);
+ }
+
+ case TemplateArgument::Expression:
+ return TemplateArgumentLoc(Arg, Arg.getAsExpr());
+
+ case TemplateArgument::Pack:
+ return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
+ }
+
+ llvm_unreachable("Invalid TemplateArgument Kind!");
+}
+
+
+/// \brief Convert the given deduced template argument and add it to the set of
+/// fully-converted template arguments.
+static bool ConvertDeducedTemplateArgument(Sema &S, NamedDecl *Param,
+ DeducedTemplateArgument Arg,
+ NamedDecl *Template,
+ QualType NTTPType,
+ unsigned ArgumentPackIndex,
+ TemplateDeductionInfo &Info,
+ bool InFunctionTemplate,
+ SmallVectorImpl<TemplateArgument> &Output) {
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ // This is a template argument pack, so check each of its arguments against
+ // the template parameter.
+ SmallVector<TemplateArgument, 2> PackedArgsBuilder;
+ for (TemplateArgument::pack_iterator PA = Arg.pack_begin(),
+ PAEnd = Arg.pack_end();
+ PA != PAEnd; ++PA) {
+ // When converting the deduced template argument, append it to the
+ // general output list. We need to do this so that the template argument
+ // checking logic has all of the prior template arguments available.
+ DeducedTemplateArgument InnerArg(*PA);
+ InnerArg.setDeducedFromArrayBound(Arg.wasDeducedFromArrayBound());
+ if (ConvertDeducedTemplateArgument(S, Param, InnerArg, Template,
+ NTTPType, PackedArgsBuilder.size(),
+ Info, InFunctionTemplate, Output))
+ return true;
+
+ // Move the converted template argument into our argument pack.
+ PackedArgsBuilder.push_back(Output.back());
+ Output.pop_back();
+ }
+
+ // Create the resulting argument pack.
+ Output.push_back(TemplateArgument::CreatePackCopy(S.Context,
+ PackedArgsBuilder.data(),
+ PackedArgsBuilder.size()));
+ return false;
+ }
+
+ // Convert the deduced template argument into a template
+ // argument that we can check, almost as if the user had written
+ // the template argument explicitly.
+ TemplateArgumentLoc ArgLoc = getTrivialTemplateArgumentLoc(S, Arg, NTTPType,
+ Info.getLocation());
+
+ // Check the template argument, converting it as necessary.
+ return S.CheckTemplateArgument(Param, ArgLoc,
+ Template,
+ Template->getLocation(),
+ Template->getSourceRange().getEnd(),
+ ArgumentPackIndex,
+ Output,
+ InFunctionTemplate
+ ? (Arg.wasDeducedFromArrayBound()
+ ? Sema::CTAK_DeducedFromArrayBound
+ : Sema::CTAK_Deduced)
+ : Sema::CTAK_Specified);
+}
+
+/// Complete template argument deduction for a class template partial
+/// specialization.
+static Sema::TemplateDeductionResult
+FinishTemplateArgumentDeduction(Sema &S,
+ ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ TemplateDeductionInfo &Info) {
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(S, Sema::Unevaluated);
+ Sema::SFINAETrap Trap(S);
+
+ Sema::ContextRAII SavedContext(S, Partial);
+
+ // C++ [temp.deduct.type]p2:
+ // [...] or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ SmallVector<TemplateArgument, 4> Builder;
+ TemplateParameterList *PartialParams = Partial->getTemplateParameters();
+ for (unsigned I = 0, N = PartialParams->size(); I != N; ++I) {
+ NamedDecl *Param = PartialParams->getParam(I);
+ if (Deduced[I].isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ return Sema::TDK_Incomplete;
+ }
+
+ // We have deduced this argument, so it still needs to be
+ // checked and converted.
+
+ // First, for a non-type template parameter type that is
+ // initialized by a declaration, we need the type of the
+ // corresponding non-type template parameter.
+ QualType NTTPType;
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ NTTPType = NTTP->getType();
+ if (NTTPType->isDependentType()) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Builder.data(), Builder.size());
+ NTTPType = S.SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ if (NTTPType.isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context,
+ Builder.data(),
+ Builder.size()));
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+ }
+
+ if (ConvertDeducedTemplateArgument(S, Param, Deduced[I],
+ Partial, NTTPType, 0, Info, false,
+ Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(S.Context, Builder.data(),
+ Builder.size()));
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+
+ // Form the template argument list from the deduced template arguments.
+ TemplateArgumentList *DeducedArgumentList
+ = TemplateArgumentList::CreateCopy(S.Context, Builder.data(),
+ Builder.size());
+
+ Info.reset(DeducedArgumentList);
+
+ // Substitute the deduced template arguments into the template
+ // arguments of the class template partial specialization, and
+ // verify that the instantiated template arguments are both valid
+ // and are equivalent to the template arguments originally provided
+ // to the class template.
+ LocalInstantiationScope InstScope(S);
+ ClassTemplateDecl *ClassTemplate = Partial->getSpecializedTemplate();
+ const TemplateArgumentLoc *PartialTemplateArgs
+ = Partial->getTemplateArgsAsWritten();
+
+ // Note that we don't provide the langle and rangle locations.
+ TemplateArgumentListInfo InstArgs;
+
+ if (S.Subst(PartialTemplateArgs,
+ Partial->getNumTemplateArgsAsWritten(),
+ InstArgs, MultiLevelTemplateArgumentList(*DeducedArgumentList))) {
+ unsigned ArgIdx = InstArgs.size(), ParamIdx = ArgIdx;
+ if (ParamIdx >= Partial->getTemplateParameters()->size())
+ ParamIdx = Partial->getTemplateParameters()->size() - 1;
+
+ Decl *Param
+ = const_cast<NamedDecl *>(
+ Partial->getTemplateParameters()->getParam(ParamIdx));
+ Info.Param = makeTemplateParameter(Param);
+ Info.FirstArg = PartialTemplateArgs[ArgIdx].getArgument();
+ return Sema::TDK_SubstitutionFailure;
+ }
+
+ SmallVector<TemplateArgument, 4> ConvertedInstArgs;
+ if (S.CheckTemplateArgumentList(ClassTemplate, Partial->getLocation(),
+ InstArgs, false, ConvertedInstArgs))
+ return Sema::TDK_SubstitutionFailure;
+
+ TemplateParameterList *TemplateParams
+ = ClassTemplate->getTemplateParameters();
+ for (unsigned I = 0, E = TemplateParams->size(); I != E; ++I) {
+ TemplateArgument InstArg = ConvertedInstArgs.data()[I];
+ if (!isSameTemplateArg(S.Context, TemplateArgs[I], InstArg)) {
+ Info.Param = makeTemplateParameter(TemplateParams->getParam(I));
+ Info.FirstArg = TemplateArgs[I];
+ Info.SecondArg = InstArg;
+ return Sema::TDK_NonDeducedMismatch;
+ }
+ }
+
+ if (Trap.hasErrorOccurred())
+ return Sema::TDK_SubstitutionFailure;
+
+ return Sema::TDK_Success;
+}
+
+/// \brief Perform template argument deduction to determine whether
+/// the given template arguments match the given class template
+/// partial specialization per C++ [temp.class.spec.match].
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
+ const TemplateArgumentList &TemplateArgs,
+ TemplateDeductionInfo &Info) {
+ // C++ [temp.class.spec.match]p2:
+ // A partial specialization matches a given actual template
+ // argument list if the template arguments of the partial
+ // specialization can be deduced from the actual template argument
+ // list (14.8.2).
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ Deduced.resize(Partial->getTemplateParameters()->size());
+ if (TemplateDeductionResult Result
+ = ::DeduceTemplateArguments(*this,
+ Partial->getTemplateParameters(),
+ Partial->getTemplateArgs(),
+ TemplateArgs, Info, Deduced))
+ return Result;
+
+ InstantiatingTemplate Inst(*this, Partial->getLocation(), Partial,
+ Deduced.data(), Deduced.size(), Info);
+ if (Inst)
+ return TDK_InstantiationDepth;
+
+ if (Trap.hasErrorOccurred())
+ return Sema::TDK_SubstitutionFailure;
+
+ return ::FinishTemplateArgumentDeduction(*this, Partial, TemplateArgs,
+ Deduced, Info);
+}
+
+/// \brief Determine whether the given type T is a simple-template-id type.
+static bool isSimpleTemplateIdType(QualType T) {
+ if (const TemplateSpecializationType *Spec
+ = T->getAs<TemplateSpecializationType>())
+ return Spec->getTemplateName().getAsTemplateDecl() != 0;
+
+ return false;
+}
+
+/// \brief Substitute the explicitly-provided template arguments into the
+/// given function template according to C++ [temp.arg.explicit].
+///
+/// \param FunctionTemplate the function template into which the explicit
+/// template arguments will be substituted.
+///
+/// \param ExplicitTemplateArguments the explicitly-specified template
+/// arguments.
+///
+/// \param Deduced the deduced template arguments, which will be populated
+/// with the converted and checked explicit template arguments.
+///
+/// \param ParamTypes will be populated with the instantiated function
+/// parameters.
+///
+/// \param FunctionType if non-NULL, the result type of the function template
+/// will also be instantiated and the pointed-to value will be updated with
+/// the instantiated function type.
+///
+/// \param Info if substitution fails for any reason, this object will be
+/// populated with more information about the failure.
+///
+/// \returns TDK_Success if substitution was successful, or some failure
+/// condition.
+Sema::TemplateDeductionResult
+Sema::SubstituteExplicitTemplateArguments(
+ FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo &ExplicitTemplateArgs,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ SmallVectorImpl<QualType> &ParamTypes,
+ QualType *FunctionType,
+ TemplateDeductionInfo &Info) {
+ FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+
+ if (ExplicitTemplateArgs.size() == 0) {
+ // No arguments to substitute; just copy over the parameter types and
+ // fill in the function type.
+ for (FunctionDecl::param_iterator P = Function->param_begin(),
+ PEnd = Function->param_end();
+ P != PEnd;
+ ++P)
+ ParamTypes.push_back((*P)->getType());
+
+ if (FunctionType)
+ *FunctionType = Function->getType();
+ return TDK_Success;
+ }
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ // C++ [temp.arg.explicit]p3:
+ // Template arguments that are present shall be specified in the
+ // declaration order of their corresponding template-parameters. The
+ // template argument list shall not specify more template-arguments than
+ // there are corresponding template-parameters.
+ SmallVector<TemplateArgument, 4> Builder;
+
+ // Enter a new template instantiation context where we check the
+ // explicitly-specified template arguments against this function template,
+ // and then substitute them into the function parameter types.
+ InstantiatingTemplate Inst(*this, FunctionTemplate->getLocation(),
+ FunctionTemplate, Deduced.data(), Deduced.size(),
+ ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution,
+ Info);
+ if (Inst)
+ return TDK_InstantiationDepth;
+
+ if (CheckTemplateArgumentList(FunctionTemplate,
+ SourceLocation(),
+ ExplicitTemplateArgs,
+ true,
+ Builder) || Trap.hasErrorOccurred()) {
+ unsigned Index = Builder.size();
+ if (Index >= TemplateParams->size())
+ Index = TemplateParams->size() - 1;
+ Info.Param = makeTemplateParameter(TemplateParams->getParam(Index));
+ return TDK_InvalidExplicitArguments;
+ }
+
+ // Form the template argument list from the explicitly-specified
+ // template arguments.
+ TemplateArgumentList *ExplicitArgumentList
+ = TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size());
+ Info.reset(ExplicitArgumentList);
+
+ // Template argument deduction and the final substitution should be
+ // done in the context of the templated declaration. Explicit
+ // argument substitution, on the other hand, needs to happen in the
+ // calling context.
+ ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl());
+
+ // If we deduced template arguments for a template parameter pack,
+ // note that the template argument pack is partially substituted and record
+ // the explicit template arguments. They'll be used as part of deduction
+ // for this template parameter pack.
+ for (unsigned I = 0, N = Builder.size(); I != N; ++I) {
+ const TemplateArgument &Arg = Builder[I];
+ if (Arg.getKind() == TemplateArgument::Pack) {
+ CurrentInstantiationScope->SetPartiallySubstitutedPack(
+ TemplateParams->getParam(I),
+ Arg.pack_begin(),
+ Arg.pack_size());
+ break;
+ }
+ }
+
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "Function template does not have a prototype?");
+
+ // Instantiate the types of each of the function parameters given the
+ // explicitly-specified template arguments. If the function has a trailing
+ // return type, substitute it after the arguments to ensure we substitute
+ // in lexical order.
+ if (Proto->hasTrailingReturn() &&
+ SubstParmTypes(Function->getLocation(),
+ Function->param_begin(), Function->getNumParams(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ ParamTypes))
+ return TDK_SubstitutionFailure;
+
+ // Instantiate the return type.
+ // FIXME: exception-specifications?
+ QualType ResultType
+ = SubstType(Proto->getResultType(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ Function->getTypeSpecStartLoc(),
+ Function->getDeclName());
+ if (ResultType.isNull() || Trap.hasErrorOccurred())
+ return TDK_SubstitutionFailure;
+
+ // Instantiate the types of each of the function parameters given the
+ // explicitly-specified template arguments if we didn't do so earlier.
+ if (!Proto->hasTrailingReturn() &&
+ SubstParmTypes(Function->getLocation(),
+ Function->param_begin(), Function->getNumParams(),
+ MultiLevelTemplateArgumentList(*ExplicitArgumentList),
+ ParamTypes))
+ return TDK_SubstitutionFailure;
+
+ if (FunctionType) {
+ *FunctionType = BuildFunctionType(ResultType,
+ ParamTypes.data(), ParamTypes.size(),
+ Proto->isVariadic(),
+ Proto->hasTrailingReturn(),
+ Proto->getTypeQuals(),
+ Proto->getRefQualifier(),
+ Function->getLocation(),
+ Function->getDeclName(),
+ Proto->getExtInfo());
+ if (FunctionType->isNull() || Trap.hasErrorOccurred())
+ return TDK_SubstitutionFailure;
+ }
+
+ // C++ [temp.arg.explicit]p2:
+ // Trailing template arguments that can be deduced (14.8.2) may be
+ // omitted from the list of explicit template-arguments. If all of the
+ // template arguments can be deduced, they may all be omitted; in this
+ // case, the empty template argument list <> itself may also be omitted.
+ //
+ // Take all of the explicitly-specified arguments and put them into
+ // the set of deduced template arguments. Explicitly-specified
+ // parameter packs, however, will be set to NULL since the deduction
+ // mechanisms handle explicitly-specified argument packs directly.
+ Deduced.reserve(TemplateParams->size());
+ for (unsigned I = 0, N = ExplicitArgumentList->size(); I != N; ++I) {
+ const TemplateArgument &Arg = ExplicitArgumentList->get(I);
+ if (Arg.getKind() == TemplateArgument::Pack)
+ Deduced.push_back(DeducedTemplateArgument());
+ else
+ Deduced.push_back(Arg);
+ }
+
+ return TDK_Success;
+}
+
+/// \brief Check whether the deduced argument type for a call to a function
+/// template matches the actual argument type per C++ [temp.deduct.call]p4.
+static bool
+CheckOriginalCallArgDeduction(Sema &S, Sema::OriginalCallArg OriginalArg,
+ QualType DeducedA) {
+ ASTContext &Context = S.Context;
+
+ QualType A = OriginalArg.OriginalArgType;
+ QualType OriginalParamType = OriginalArg.OriginalParamType;
+
+ // Check for type equality (top-level cv-qualifiers are ignored).
+ if (Context.hasSameUnqualifiedType(A, DeducedA))
+ return false;
+
+ // Strip off references on the argument types; they aren't needed for
+ // the following checks.
+ if (const ReferenceType *DeducedARef = DeducedA->getAs<ReferenceType>())
+ DeducedA = DeducedARef->getPointeeType();
+ if (const ReferenceType *ARef = A->getAs<ReferenceType>())
+ A = ARef->getPointeeType();
+
+ // C++ [temp.deduct.call]p4:
+ // [...] However, there are three cases that allow a difference:
+ // - If the original P is a reference type, the deduced A (i.e., the
+ // type referred to by the reference) can be more cv-qualified than
+ // the transformed A.
+ if (const ReferenceType *OriginalParamRef
+ = OriginalParamType->getAs<ReferenceType>()) {
+ // We don't want to keep the reference around any more.
+ OriginalParamType = OriginalParamRef->getPointeeType();
+
+ Qualifiers AQuals = A.getQualifiers();
+ Qualifiers DeducedAQuals = DeducedA.getQualifiers();
+ if (AQuals == DeducedAQuals) {
+ // Qualifiers match; there's nothing to do.
+ } else if (!DeducedAQuals.compatiblyIncludes(AQuals)) {
+ return true;
+ } else {
+ // Qualifiers are compatible, so have the argument type adopt the
+ // deduced argument type's qualifiers as if we had performed the
+ // qualification conversion.
+ A = Context.getQualifiedType(A.getUnqualifiedType(), DeducedAQuals);
+ }
+ }
+
+ // - The transformed A can be another pointer or pointer to member
+ // type that can be converted to the deduced A via a qualification
+ // conversion.
+ //
+ // Also allow conversions which merely strip [[noreturn]] from function types
+ // (recursively) as an extension.
+ // FIXME: Currently, this doesn't place nicely with qualfication conversions.
+ bool ObjCLifetimeConversion = false;
+ QualType ResultTy;
+ if ((A->isAnyPointerType() || A->isMemberPointerType()) &&
+ (S.IsQualificationConversion(A, DeducedA, false,
+ ObjCLifetimeConversion) ||
+ S.IsNoReturnConversion(A, DeducedA, ResultTy)))
+ return false;
+
+
+ // - If P is a class and P has the form simple-template-id, then the
+ // transformed A can be a derived class of the deduced A. [...]
+ // [...] Likewise, if P is a pointer to a class of the form
+ // simple-template-id, the transformed A can be a pointer to a
+ // derived class pointed to by the deduced A.
+ if (const PointerType *OriginalParamPtr
+ = OriginalParamType->getAs<PointerType>()) {
+ if (const PointerType *DeducedAPtr = DeducedA->getAs<PointerType>()) {
+ if (const PointerType *APtr = A->getAs<PointerType>()) {
+ if (A->getPointeeType()->isRecordType()) {
+ OriginalParamType = OriginalParamPtr->getPointeeType();
+ DeducedA = DeducedAPtr->getPointeeType();
+ A = APtr->getPointeeType();
+ }
+ }
+ }
+ }
+
+ if (Context.hasSameUnqualifiedType(A, DeducedA))
+ return false;
+
+ if (A->isRecordType() && isSimpleTemplateIdType(OriginalParamType) &&
+ S.IsDerivedFrom(A, DeducedA))
+ return false;
+
+ return true;
+}
+
+/// \brief Finish template argument deduction for a function template,
+/// checking the deduced template arguments for completeness and forming
+/// the function template specialization.
+///
+/// \param OriginalCallArgs If non-NULL, the original call arguments against
+/// which the deduced argument types should be compared.
+Sema::TemplateDeductionResult
+Sema::FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned NumExplicitlySpecified,
+ FunctionDecl *&Specialization,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs) {
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ // Enter a new template instantiation context while we instantiate the
+ // actual function declaration.
+ InstantiatingTemplate Inst(*this, FunctionTemplate->getLocation(),
+ FunctionTemplate, Deduced.data(), Deduced.size(),
+ ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution,
+ Info);
+ if (Inst)
+ return TDK_InstantiationDepth;
+
+ ContextRAII SavedContext(*this, FunctionTemplate->getTemplatedDecl());
+
+ // C++ [temp.deduct.type]p2:
+ // [...] or if any template argument remains neither deduced nor
+ // explicitly specified, template argument deduction fails.
+ SmallVector<TemplateArgument, 4> Builder;
+ for (unsigned I = 0, N = TemplateParams->size(); I != N; ++I) {
+ NamedDecl *Param = TemplateParams->getParam(I);
+
+ if (!Deduced[I].isNull()) {
+ if (I < NumExplicitlySpecified) {
+ // We have already fully type-checked and converted this
+ // argument, because it was explicitly-specified. Just record the
+ // presence of this argument.
+ Builder.push_back(Deduced[I]);
+ continue;
+ }
+
+ // We have deduced this argument, so it still needs to be
+ // checked and converted.
+
+ // First, for a non-type template parameter type that is
+ // initialized by a declaration, we need the type of the
+ // corresponding non-type template parameter.
+ QualType NTTPType;
+ if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ NTTPType = NTTP->getType();
+ if (NTTPType->isDependentType()) {
+ TemplateArgumentList TemplateArgs(TemplateArgumentList::OnStack,
+ Builder.data(), Builder.size());
+ NTTPType = SubstType(NTTPType,
+ MultiLevelTemplateArgumentList(TemplateArgs),
+ NTTP->getLocation(),
+ NTTP->getDeclName());
+ if (NTTPType.isNull()) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context,
+ Builder.data(),
+ Builder.size()));
+ return TDK_SubstitutionFailure;
+ }
+ }
+ }
+
+ if (ConvertDeducedTemplateArgument(*this, Param, Deduced[I],
+ FunctionTemplate, NTTPType, 0, Info,
+ true, Builder)) {
+ Info.Param = makeTemplateParameter(Param);
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(),
+ Builder.size()));
+ return TDK_SubstitutionFailure;
+ }
+
+ continue;
+ }
+
+ // C++0x [temp.arg.explicit]p3:
+ // A trailing template parameter pack (14.5.3) not otherwise deduced will
+ // be deduced to an empty sequence of template arguments.
+ // FIXME: Where did the word "trailing" come from?
+ if (Param->isTemplateParameterPack()) {
+ // We may have had explicitly-specified template arguments for this
+ // template parameter pack. If so, our empty deduction extends the
+ // explicitly-specified set (C++0x [temp.arg.explicit]p9).
+ const TemplateArgument *ExplicitArgs;
+ unsigned NumExplicitArgs;
+ if (CurrentInstantiationScope->getPartiallySubstitutedPack(&ExplicitArgs,
+ &NumExplicitArgs)
+ == Param)
+ Builder.push_back(TemplateArgument(ExplicitArgs, NumExplicitArgs));
+ else
+ Builder.push_back(TemplateArgument(0, 0));
+
+ continue;
+ }
+
+ // Substitute into the default template argument, if available.
+ TemplateArgumentLoc DefArg
+ = SubstDefaultTemplateArgumentIfAvailable(FunctionTemplate,
+ FunctionTemplate->getLocation(),
+ FunctionTemplate->getSourceRange().getEnd(),
+ Param,
+ Builder);
+
+ // If there was no default argument, deduction is incomplete.
+ if (DefArg.getArgument().isNull()) {
+ Info.Param = makeTemplateParameter(
+ const_cast<NamedDecl *>(TemplateParams->getParam(I)));
+ return TDK_Incomplete;
+ }
+
+ // Check whether we can actually use the default argument.
+ if (CheckTemplateArgument(Param, DefArg,
+ FunctionTemplate,
+ FunctionTemplate->getLocation(),
+ FunctionTemplate->getSourceRange().getEnd(),
+ 0, Builder,
+ CTAK_Specified)) {
+ Info.Param = makeTemplateParameter(
+ const_cast<NamedDecl *>(TemplateParams->getParam(I)));
+ // FIXME: These template arguments are temporary. Free them!
+ Info.reset(TemplateArgumentList::CreateCopy(Context, Builder.data(),
+ Builder.size()));
+ return TDK_SubstitutionFailure;
+ }
+
+ // If we get here, we successfully used the default template argument.
+ }
+
+ // Form the template argument list from the deduced template arguments.
+ TemplateArgumentList *DeducedArgumentList
+ = TemplateArgumentList::CreateCopy(Context, Builder.data(), Builder.size());
+ Info.reset(DeducedArgumentList);
+
+ // Substitute the deduced template arguments into the function template
+ // declaration to produce the function template specialization.
+ DeclContext *Owner = FunctionTemplate->getDeclContext();
+ if (FunctionTemplate->getFriendObjectKind())
+ Owner = FunctionTemplate->getLexicalDeclContext();
+ Specialization = cast_or_null<FunctionDecl>(
+ SubstDecl(FunctionTemplate->getTemplatedDecl(), Owner,
+ MultiLevelTemplateArgumentList(*DeducedArgumentList)));
+ if (!Specialization || Specialization->isInvalidDecl())
+ return TDK_SubstitutionFailure;
+
+ assert(Specialization->getPrimaryTemplate()->getCanonicalDecl() ==
+ FunctionTemplate->getCanonicalDecl());
+
+ // If the template argument list is owned by the function template
+ // specialization, release it.
+ if (Specialization->getTemplateSpecializationArgs() == DeducedArgumentList &&
+ !Trap.hasErrorOccurred())
+ Info.take();
+
+ // There may have been an error that did not prevent us from constructing a
+ // declaration. Mark the declaration invalid and return with a substitution
+ // failure.
+ if (Trap.hasErrorOccurred()) {
+ Specialization->setInvalidDecl(true);
+ return TDK_SubstitutionFailure;
+ }
+
+ if (OriginalCallArgs) {
+ // C++ [temp.deduct.call]p4:
+ // In general, the deduction process attempts to find template argument
+ // values that will make the deduced A identical to A (after the type A
+ // is transformed as described above). [...]
+ for (unsigned I = 0, N = OriginalCallArgs->size(); I != N; ++I) {
+ OriginalCallArg OriginalArg = (*OriginalCallArgs)[I];
+ unsigned ParamIdx = OriginalArg.ArgIdx;
+
+ if (ParamIdx >= Specialization->getNumParams())
+ continue;
+
+ QualType DeducedA = Specialization->getParamDecl(ParamIdx)->getType();
+ if (CheckOriginalCallArgDeduction(*this, OriginalArg, DeducedA))
+ return Sema::TDK_SubstitutionFailure;
+ }
+ }
+
+ // If we suppressed any diagnostics while performing template argument
+ // deduction, and if we haven't already instantiated this declaration,
+ // keep track of these diagnostics. They'll be emitted if this specialization
+ // is actually used.
+ if (Info.diag_begin() != Info.diag_end()) {
+ llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >::iterator
+ Pos = SuppressedDiagnostics.find(Specialization->getCanonicalDecl());
+ if (Pos == SuppressedDiagnostics.end())
+ SuppressedDiagnostics[Specialization->getCanonicalDecl()]
+ .append(Info.diag_begin(), Info.diag_end());
+ }
+
+ return TDK_Success;
+}
+
+/// Gets the type of a function for template-argument-deducton
+/// purposes when it's considered as part of an overload set.
+static QualType GetTypeOfFunction(ASTContext &Context,
+ const OverloadExpr::FindResult &R,
+ FunctionDecl *Fn) {
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Fn))
+ if (Method->isInstance()) {
+ // An instance method that's referenced in a form that doesn't
+ // look like a member pointer is just invalid.
+ if (!R.HasFormOfMemberPointer) return QualType();
+
+ return Context.getMemberPointerType(Fn->getType(),
+ Context.getTypeDeclType(Method->getParent()).getTypePtr());
+ }
+
+ if (!R.IsAddressOfOperand) return Fn->getType();
+ return Context.getPointerType(Fn->getType());
+}
+
+/// Apply the deduction rules for overload sets.
+///
+/// \return the null type if this argument should be treated as an
+/// undeduced context
+static QualType
+ResolveOverloadForDeduction(Sema &S, TemplateParameterList *TemplateParams,
+ Expr *Arg, QualType ParamType,
+ bool ParamWasReference) {
+
+ OverloadExpr::FindResult R = OverloadExpr::find(Arg);
+
+ OverloadExpr *Ovl = R.Expression;
+
+ // C++0x [temp.deduct.call]p4
+ unsigned TDF = 0;
+ if (ParamWasReference)
+ TDF |= TDF_ParamWithReferenceType;
+ if (R.IsAddressOfOperand)
+ TDF |= TDF_IgnoreQualifiers;
+
+ // C++0x [temp.deduct.call]p6:
+ // When P is a function type, pointer to function type, or pointer
+ // to member function type:
+
+ if (!ParamType->isFunctionType() &&
+ !ParamType->isFunctionPointerType() &&
+ !ParamType->isMemberFunctionPointerType()) {
+ if (Ovl->hasExplicitTemplateArgs()) {
+ // But we can still look for an explicit specialization.
+ if (FunctionDecl *ExplicitSpec
+ = S.ResolveSingleFunctionTemplateSpecialization(Ovl))
+ return GetTypeOfFunction(S.Context, R, ExplicitSpec);
+ }
+
+ return QualType();
+ }
+
+ // Gather the explicit template arguments, if any.
+ TemplateArgumentListInfo ExplicitTemplateArgs;
+ if (Ovl->hasExplicitTemplateArgs())
+ Ovl->getExplicitTemplateArgs().copyInto(ExplicitTemplateArgs);
+ QualType Match;
+ for (UnresolvedSetIterator I = Ovl->decls_begin(),
+ E = Ovl->decls_end(); I != E; ++I) {
+ NamedDecl *D = (*I)->getUnderlyingDecl();
+
+ if (FunctionTemplateDecl *FunTmpl = dyn_cast<FunctionTemplateDecl>(D)) {
+ // - If the argument is an overload set containing one or more
+ // function templates, the parameter is treated as a
+ // non-deduced context.
+ if (!Ovl->hasExplicitTemplateArgs())
+ return QualType();
+
+ // Otherwise, see if we can resolve a function type
+ FunctionDecl *Specialization = 0;
+ TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc());
+ if (S.DeduceTemplateArguments(FunTmpl, &ExplicitTemplateArgs,
+ Specialization, Info))
+ continue;
+
+ D = Specialization;
+ }
+
+ FunctionDecl *Fn = cast<FunctionDecl>(D);
+ QualType ArgType = GetTypeOfFunction(S.Context, R, Fn);
+ if (ArgType.isNull()) continue;
+
+ // Function-to-pointer conversion.
+ if (!ParamWasReference && ParamType->isPointerType() &&
+ ArgType->isFunctionType())
+ ArgType = S.Context.getPointerType(ArgType);
+
+ // - If the argument is an overload set (not containing function
+ // templates), trial argument deduction is attempted using each
+ // of the members of the set. If deduction succeeds for only one
+ // of the overload set members, that member is used as the
+ // argument value for the deduction. If deduction succeeds for
+ // more than one member of the overload set the parameter is
+ // treated as a non-deduced context.
+
+ // We do all of this in a fresh context per C++0x [temp.deduct.type]p2:
+ // Type deduction is done independently for each P/A pair, and
+ // the deduced template argument values are then combined.
+ // So we do not reject deductions which were made elsewhere.
+ SmallVector<DeducedTemplateArgument, 8>
+ Deduced(TemplateParams->size());
+ TemplateDeductionInfo Info(S.Context, Ovl->getNameLoc());
+ Sema::TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
+ ArgType, Info, Deduced, TDF);
+ if (Result) continue;
+ if (!Match.isNull()) return QualType();
+ Match = ArgType;
+ }
+
+ return Match;
+}
+
+/// \brief Perform the adjustments to the parameter and argument types
+/// described in C++ [temp.deduct.call].
+///
+/// \returns true if the caller should not attempt to perform any template
+/// argument deduction based on this P/A pair.
+static bool AdjustFunctionParmAndArgTypesForDeduction(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType &ParamType,
+ QualType &ArgType,
+ Expr *Arg,
+ unsigned &TDF) {
+ // C++0x [temp.deduct.call]p3:
+ // If P is a cv-qualified type, the top level cv-qualifiers of P's type
+ // are ignored for type deduction.
+ if (ParamType.hasQualifiers())
+ ParamType = ParamType.getUnqualifiedType();
+ const ReferenceType *ParamRefType = ParamType->getAs<ReferenceType>();
+ if (ParamRefType) {
+ QualType PointeeType = ParamRefType->getPointeeType();
+
+ // If the argument has incomplete array type, try to complete it's type.
+ if (ArgType->isIncompleteArrayType() &&
+ !S.RequireCompleteExprType(Arg, S.PDiag(),
+ std::make_pair(SourceLocation(), S.PDiag())))
+ ArgType = Arg->getType();
+
+ // [C++0x] If P is an rvalue reference to a cv-unqualified
+ // template parameter and the argument is an lvalue, the type
+ // "lvalue reference to A" is used in place of A for type
+ // deduction.
+ if (isa<RValueReferenceType>(ParamType)) {
+ if (!PointeeType.getQualifiers() &&
+ isa<TemplateTypeParmType>(PointeeType) &&
+ Arg->Classify(S.Context).isLValue() &&
+ Arg->getType() != S.Context.OverloadTy &&
+ Arg->getType() != S.Context.BoundMemberTy)
+ ArgType = S.Context.getLValueReferenceType(ArgType);
+ }
+
+ // [...] If P is a reference type, the type referred to by P is used
+ // for type deduction.
+ ParamType = PointeeType;
+ }
+
+ // Overload sets usually make this parameter an undeduced
+ // context, but there are sometimes special circumstances.
+ if (ArgType == S.Context.OverloadTy) {
+ ArgType = ResolveOverloadForDeduction(S, TemplateParams,
+ Arg, ParamType,
+ ParamRefType != 0);
+ if (ArgType.isNull())
+ return true;
+ }
+
+ if (ParamRefType) {
+ // C++0x [temp.deduct.call]p3:
+ // [...] If P is of the form T&&, where T is a template parameter, and
+ // the argument is an lvalue, the type A& is used in place of A for
+ // type deduction.
+ if (ParamRefType->isRValueReferenceType() &&
+ ParamRefType->getAs<TemplateTypeParmType>() &&
+ Arg->isLValue())
+ ArgType = S.Context.getLValueReferenceType(ArgType);
+ } else {
+ // C++ [temp.deduct.call]p2:
+ // If P is not a reference type:
+ // - If A is an array type, the pointer type produced by the
+ // array-to-pointer standard conversion (4.2) is used in place of
+ // A for type deduction; otherwise,
+ if (ArgType->isArrayType())
+ ArgType = S.Context.getArrayDecayedType(ArgType);
+ // - If A is a function type, the pointer type produced by the
+ // function-to-pointer standard conversion (4.3) is used in place
+ // of A for type deduction; otherwise,
+ else if (ArgType->isFunctionType())
+ ArgType = S.Context.getPointerType(ArgType);
+ else {
+ // - If A is a cv-qualified type, the top level cv-qualifiers of A's
+ // type are ignored for type deduction.
+ ArgType = ArgType.getUnqualifiedType();
+ }
+ }
+
+ // C++0x [temp.deduct.call]p4:
+ // In general, the deduction process attempts to find template argument
+ // values that will make the deduced A identical to A (after the type A
+ // is transformed as described above). [...]
+ TDF = TDF_SkipNonDependent;
+
+ // - If the original P is a reference type, the deduced A (i.e., the
+ // type referred to by the reference) can be more cv-qualified than
+ // the transformed A.
+ if (ParamRefType)
+ TDF |= TDF_ParamWithReferenceType;
+ // - The transformed A can be another pointer or pointer to member
+ // type that can be converted to the deduced A via a qualification
+ // conversion (4.4).
+ if (ArgType->isPointerType() || ArgType->isMemberPointerType() ||
+ ArgType->isObjCObjectPointerType())
+ TDF |= TDF_IgnoreQualifiers;
+ // - If P is a class and P has the form simple-template-id, then the
+ // transformed A can be a derived class of the deduced A. Likewise,
+ // if P is a pointer to a class of the form simple-template-id, the
+ // transformed A can be a pointer to a derived class pointed to by
+ // the deduced A.
+ if (isSimpleTemplateIdType(ParamType) ||
+ (isa<PointerType>(ParamType) &&
+ isSimpleTemplateIdType(
+ ParamType->getAs<PointerType>()->getPointeeType())))
+ TDF |= TDF_DerivedClass;
+
+ return false;
+}
+
+static bool hasDeducibleTemplateParameters(Sema &S,
+ FunctionTemplateDecl *FunctionTemplate,
+ QualType T);
+
+/// \brief Perform template argument deduction by matching a parameter type
+/// against a single expression, where the expression is an element of
+/// an initializer list that was originally matched against the argument
+/// type.
+static Sema::TemplateDeductionResult
+DeduceTemplateArgumentByListElement(Sema &S,
+ TemplateParameterList *TemplateParams,
+ QualType ParamType, Expr *Arg,
+ TemplateDeductionInfo &Info,
+ SmallVectorImpl<DeducedTemplateArgument> &Deduced,
+ unsigned TDF) {
+ // Handle the case where an init list contains another init list as the
+ // element.
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ QualType X;
+ if (!S.isStdInitializerList(ParamType.getNonReferenceType(), &X))
+ return Sema::TDK_Success; // Just ignore this expression.
+
+ // Recurse down into the init list.
+ for (unsigned i = 0, e = ILE->getNumInits(); i < e; ++i) {
+ if (Sema::TemplateDeductionResult Result =
+ DeduceTemplateArgumentByListElement(S, TemplateParams, X,
+ ILE->getInit(i),
+ Info, Deduced, TDF))
+ return Result;
+ }
+ return Sema::TDK_Success;
+ }
+
+ // For all other cases, just match by type.
+ QualType ArgType = Arg->getType();
+ if (AdjustFunctionParmAndArgTypesForDeduction(S, TemplateParams, ParamType,
+ ArgType, Arg, TDF))
+ return Sema::TDK_FailedOverloadResolution;
+ return DeduceTemplateArgumentsByTypeMatch(S, TemplateParams, ParamType,
+ ArgType, Info, Deduced, TDF);
+}
+
+/// \brief Perform template argument deduction from a function call
+/// (C++ [temp.deduct.call]).
+///
+/// \param FunctionTemplate the function template for which we are performing
+/// template argument deduction.
+///
+/// \param ExplicitTemplateArguments the explicit template arguments provided
+/// for this call.
+///
+/// \param Args the function call arguments
+///
+/// \param NumArgs the number of arguments in Args
+///
+/// \param Name the name of the function being called. This is only significant
+/// when the function template is a conversion function template, in which
+/// case this routine will also perform template argument deduction based on
+/// the function to which
+///
+/// \param Specialization if template argument deduction was successful,
+/// this will be set to the function template specialization produced by
+/// template argument deduction.
+///
+/// \param Info the argument will be updated to provide additional information
+/// about template argument deduction.
+///
+/// \returns the result of template argument deduction.
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ llvm::ArrayRef<Expr *> Args,
+ FunctionDecl *&Specialization,
+ TemplateDeductionInfo &Info) {
+ FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
+
+ // C++ [temp.deduct.call]p1:
+ // Template argument deduction is done by comparing each function template
+ // parameter type (call it P) with the type of the corresponding argument
+ // of the call (call it A) as described below.
+ unsigned CheckArgs = Args.size();
+ if (Args.size() < Function->getMinRequiredArguments())
+ return TDK_TooFewArguments;
+ else if (Args.size() > Function->getNumParams()) {
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ if (Proto->isTemplateVariadic())
+ /* Do nothing */;
+ else if (Proto->isVariadic())
+ CheckArgs = Function->getNumParams();
+ else
+ return TDK_TooManyArguments;
+ }
+
+ // The types of the parameters from which we will perform template argument
+ // deduction.
+ LocalInstantiationScope InstScope(*this);
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ SmallVector<QualType, 4> ParamTypes;
+ unsigned NumExplicitlySpecified = 0;
+ if (ExplicitTemplateArgs) {
+ TemplateDeductionResult Result =
+ SubstituteExplicitTemplateArguments(FunctionTemplate,
+ *ExplicitTemplateArgs,
+ Deduced,
+ ParamTypes,
+ 0,
+ Info);
+ if (Result)
+ return Result;
+
+ NumExplicitlySpecified = Deduced.size();
+ } else {
+ // Just fill in the parameter types from the function declaration.
+ for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I)
+ ParamTypes.push_back(Function->getParamDecl(I)->getType());
+ }
+
+ // Deduce template arguments from the function parameters.
+ Deduced.resize(TemplateParams->size());
+ unsigned ArgIdx = 0;
+ SmallVector<OriginalCallArg, 4> OriginalCallArgs;
+ for (unsigned ParamIdx = 0, NumParams = ParamTypes.size();
+ ParamIdx != NumParams; ++ParamIdx) {
+ QualType OrigParamType = ParamTypes[ParamIdx];
+ QualType ParamType = OrigParamType;
+
+ const PackExpansionType *ParamExpansion
+ = dyn_cast<PackExpansionType>(ParamType);
+ if (!ParamExpansion) {
+ // Simple case: matching a function parameter to a function argument.
+ if (ArgIdx >= CheckArgs)
+ break;
+
+ Expr *Arg = Args[ArgIdx++];
+ QualType ArgType = Arg->getType();
+
+ unsigned TDF = 0;
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
+ ParamType, ArgType, Arg,
+ TDF))
+ continue;
+
+ // If we have nothing to deduce, we're done.
+ if (!hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
+ continue;
+
+ // If the argument is an initializer list ...
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ // ... then the parameter is an undeduced context, unless the parameter
+ // type is (reference to cv) std::initializer_list<P'>, in which case
+ // deduction is done for each element of the initializer list, and the
+ // result is the deduced type if it's the same for all elements.
+ QualType X;
+ // Removing references was already done.
+ if (!isStdInitializerList(ParamType, &X))
+ continue;
+
+ for (unsigned i = 0, e = ILE->getNumInits(); i < e; ++i) {
+ if (TemplateDeductionResult Result =
+ DeduceTemplateArgumentByListElement(*this, TemplateParams, X,
+ ILE->getInit(i),
+ Info, Deduced, TDF))
+ return Result;
+ }
+ // Don't track the argument type, since an initializer list has none.
+ continue;
+ }
+
+ // Keep track of the argument type and corresponding parameter index,
+ // so we can check for compatibility between the deduced A and A.
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx-1,
+ ArgType));
+
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ ParamType, ArgType,
+ Info, Deduced, TDF))
+ return Result;
+
+ continue;
+ }
+
+ // C++0x [temp.deduct.call]p1:
+ // For a function parameter pack that occurs at the end of the
+ // parameter-declaration-list, the type A of each remaining argument of
+ // the call is compared with the type P of the declarator-id of the
+ // function parameter pack. Each comparison deduces template arguments
+ // for subsequent positions in the template parameter packs expanded by
+ // the function parameter pack. For a function parameter pack that does
+ // not occur at the end of the parameter-declaration-list, the type of
+ // the parameter pack is a non-deduced context.
+ if (ParamIdx + 1 < NumParams)
+ break;
+
+ QualType ParamPattern = ParamExpansion->getPattern();
+ SmallVector<unsigned, 2> PackIndices;
+ {
+ llvm::SmallBitVector SawIndices(TemplateParams->size());
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(ParamPattern, Unexpanded);
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(Unexpanded[I]);
+ if (Depth == 0 && !SawIndices[Index]) {
+ SawIndices[Index] = true;
+ PackIndices.push_back(Index);
+ }
+ }
+ }
+ assert(!PackIndices.empty() && "Pack expansion without unexpanded packs?");
+
+ // Keep track of the deduced template arguments for each parameter pack
+ // expanded by this pack expansion (the outer index) and for each
+ // template argument (the inner SmallVectors).
+ SmallVector<SmallVector<DeducedTemplateArgument, 4>, 2>
+ NewlyDeducedPacks(PackIndices.size());
+ SmallVector<DeducedTemplateArgument, 2>
+ SavedPacks(PackIndices.size());
+ PrepareArgumentPackDeduction(*this, Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks);
+ bool HasAnyArguments = false;
+ for (; ArgIdx < Args.size(); ++ArgIdx) {
+ HasAnyArguments = true;
+
+ QualType OrigParamType = ParamPattern;
+ ParamType = OrigParamType;
+ Expr *Arg = Args[ArgIdx];
+ QualType ArgType = Arg->getType();
+
+ unsigned TDF = 0;
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, TemplateParams,
+ ParamType, ArgType, Arg,
+ TDF)) {
+ // We can't actually perform any deduction for this argument, so stop
+ // deduction at this point.
+ ++ArgIdx;
+ break;
+ }
+
+ // As above, initializer lists need special handling.
+ if (InitListExpr *ILE = dyn_cast<InitListExpr>(Arg)) {
+ QualType X;
+ if (!isStdInitializerList(ParamType, &X)) {
+ ++ArgIdx;
+ break;
+ }
+
+ for (unsigned i = 0, e = ILE->getNumInits(); i < e; ++i) {
+ if (TemplateDeductionResult Result =
+ DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams, X,
+ ILE->getInit(i)->getType(),
+ Info, Deduced, TDF))
+ return Result;
+ }
+ } else {
+
+ // Keep track of the argument type and corresponding argument index,
+ // so we can check for compatibility between the deduced A and A.
+ if (hasDeducibleTemplateParameters(*this, FunctionTemplate, ParamType))
+ OriginalCallArgs.push_back(OriginalCallArg(OrigParamType, ArgIdx,
+ ArgType));
+
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ ParamType, ArgType, Info,
+ Deduced, TDF))
+ return Result;
+ }
+
+ // Capture the deduced template arguments for each parameter pack expanded
+ // by this pack expansion, add them to the list of arguments we've deduced
+ // for that pack, then clear out the deduced argument.
+ for (unsigned I = 0, N = PackIndices.size(); I != N; ++I) {
+ DeducedTemplateArgument &DeducedArg = Deduced[PackIndices[I]];
+ if (!DeducedArg.isNull()) {
+ NewlyDeducedPacks[I].push_back(DeducedArg);
+ DeducedArg = DeducedTemplateArgument();
+ }
+ }
+ }
+
+ // Build argument packs for each of the parameter packs expanded by this
+ // pack expansion.
+ if (Sema::TemplateDeductionResult Result
+ = FinishArgumentPackDeduction(*this, TemplateParams, HasAnyArguments,
+ Deduced, PackIndices, SavedPacks,
+ NewlyDeducedPacks, Info))
+ return Result;
+
+ // After we've matching against a parameter pack, we're done.
+ break;
+ }
+
+ return FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
+ NumExplicitlySpecified,
+ Specialization, Info, &OriginalCallArgs);
+}
+
+/// \brief Deduce template arguments when taking the address of a function
+/// template (C++ [temp.deduct.funcaddr]) or matching a specialization to
+/// a template.
+///
+/// \param FunctionTemplate the function template for which we are performing
+/// template argument deduction.
+///
+/// \param ExplicitTemplateArguments the explicitly-specified template
+/// arguments.
+///
+/// \param ArgFunctionType the function type that will be used as the
+/// "argument" type (A) when performing template argument deduction from the
+/// function template's function type. This type may be NULL, if there is no
+/// argument type to compare against, in C++0x [temp.arg.explicit]p3.
+///
+/// \param Specialization if template argument deduction was successful,
+/// this will be set to the function template specialization produced by
+/// template argument deduction.
+///
+/// \param Info the argument will be updated to provide additional information
+/// about template argument deduction.
+///
+/// \returns the result of template argument deduction.
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ QualType ArgFunctionType,
+ FunctionDecl *&Specialization,
+ TemplateDeductionInfo &Info) {
+ FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ QualType FunctionType = Function->getType();
+
+ // Substitute any explicit template arguments.
+ LocalInstantiationScope InstScope(*this);
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ unsigned NumExplicitlySpecified = 0;
+ SmallVector<QualType, 4> ParamTypes;
+ if (ExplicitTemplateArgs) {
+ if (TemplateDeductionResult Result
+ = SubstituteExplicitTemplateArguments(FunctionTemplate,
+ *ExplicitTemplateArgs,
+ Deduced, ParamTypes,
+ &FunctionType, Info))
+ return Result;
+
+ NumExplicitlySpecified = Deduced.size();
+ }
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ Deduced.resize(TemplateParams->size());
+
+ if (!ArgFunctionType.isNull()) {
+ // Deduce template arguments from the function type.
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ FunctionType, ArgFunctionType, Info,
+ Deduced, TDF_TopLevelParameterTypeList))
+ return Result;
+ }
+
+ if (TemplateDeductionResult Result
+ = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced,
+ NumExplicitlySpecified,
+ Specialization, Info))
+ return Result;
+
+ // If the requested function type does not match the actual type of the
+ // specialization, template argument deduction fails.
+ if (!ArgFunctionType.isNull() &&
+ !Context.hasSameType(ArgFunctionType, Specialization->getType()))
+ return TDK_NonDeducedMismatch;
+
+ return TDK_Success;
+}
+
+/// \brief Deduce template arguments for a templated conversion
+/// function (C++ [temp.deduct.conv]) and, if successful, produce a
+/// conversion function template specialization.
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ QualType ToType,
+ CXXConversionDecl *&Specialization,
+ TemplateDeductionInfo &Info) {
+ CXXConversionDecl *Conv
+ = cast<CXXConversionDecl>(FunctionTemplate->getTemplatedDecl());
+ QualType FromType = Conv->getConversionType();
+
+ // Canonicalize the types for deduction.
+ QualType P = Context.getCanonicalType(FromType);
+ QualType A = Context.getCanonicalType(ToType);
+
+ // C++0x [temp.deduct.conv]p2:
+ // If P is a reference type, the type referred to by P is used for
+ // type deduction.
+ if (const ReferenceType *PRef = P->getAs<ReferenceType>())
+ P = PRef->getPointeeType();
+
+ // C++0x [temp.deduct.conv]p4:
+ // [...] If A is a reference type, the type referred to by A is used
+ // for type deduction.
+ if (const ReferenceType *ARef = A->getAs<ReferenceType>())
+ A = ARef->getPointeeType().getUnqualifiedType();
+ // C++ [temp.deduct.conv]p3:
+ //
+ // If A is not a reference type:
+ else {
+ assert(!A->isReferenceType() && "Reference types were handled above");
+
+ // - If P is an array type, the pointer type produced by the
+ // array-to-pointer standard conversion (4.2) is used in place
+ // of P for type deduction; otherwise,
+ if (P->isArrayType())
+ P = Context.getArrayDecayedType(P);
+ // - If P is a function type, the pointer type produced by the
+ // function-to-pointer standard conversion (4.3) is used in
+ // place of P for type deduction; otherwise,
+ else if (P->isFunctionType())
+ P = Context.getPointerType(P);
+ // - If P is a cv-qualified type, the top level cv-qualifiers of
+ // P's type are ignored for type deduction.
+ else
+ P = P.getUnqualifiedType();
+
+ // C++0x [temp.deduct.conv]p4:
+ // If A is a cv-qualified type, the top level cv-qualifiers of A's
+ // type are ignored for type deduction. If A is a reference type, the type
+ // referred to by A is used for type deduction.
+ A = A.getUnqualifiedType();
+ }
+
+ // Unevaluated SFINAE context.
+ EnterExpressionEvaluationContext Unevaluated(*this, Sema::Unevaluated);
+ SFINAETrap Trap(*this);
+
+ // C++ [temp.deduct.conv]p1:
+ // Template argument deduction is done by comparing the return
+ // type of the template conversion function (call it P) with the
+ // type that is required as the result of the conversion (call it
+ // A) as described in 14.8.2.4.
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ Deduced.resize(TemplateParams->size());
+
+ // C++0x [temp.deduct.conv]p4:
+ // In general, the deduction process attempts to find template
+ // argument values that will make the deduced A identical to
+ // A. However, there are two cases that allow a difference:
+ unsigned TDF = 0;
+ // - If the original A is a reference type, A can be more
+ // cv-qualified than the deduced A (i.e., the type referred to
+ // by the reference)
+ if (ToType->isReferenceType())
+ TDF |= TDF_ParamWithReferenceType;
+ // - The deduced A can be another pointer or pointer to member
+ // type that can be converted to A via a qualification
+ // conversion.
+ //
+ // (C++0x [temp.deduct.conv]p6 clarifies that this only happens when
+ // both P and A are pointers or member pointers. In this case, we
+ // just ignore cv-qualifiers completely).
+ if ((P->isPointerType() && A->isPointerType()) ||
+ (P->isMemberPointerType() && A->isMemberPointerType()))
+ TDF |= TDF_IgnoreQualifiers;
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArgumentsByTypeMatch(*this, TemplateParams,
+ P, A, Info, Deduced, TDF))
+ return Result;
+
+ // Finish template argument deduction.
+ LocalInstantiationScope InstScope(*this);
+ FunctionDecl *Spec = 0;
+ TemplateDeductionResult Result
+ = FinishTemplateArgumentDeduction(FunctionTemplate, Deduced, 0, Spec,
+ Info);
+ Specialization = cast_or_null<CXXConversionDecl>(Spec);
+ return Result;
+}
+
+/// \brief Deduce template arguments for a function template when there is
+/// nothing to deduce against (C++0x [temp.arg.explicit]p3).
+///
+/// \param FunctionTemplate the function template for which we are performing
+/// template argument deduction.
+///
+/// \param ExplicitTemplateArguments the explicitly-specified template
+/// arguments.
+///
+/// \param Specialization if template argument deduction was successful,
+/// this will be set to the function template specialization produced by
+/// template argument deduction.
+///
+/// \param Info the argument will be updated to provide additional information
+/// about template argument deduction.
+///
+/// \returns the result of template argument deduction.
+Sema::TemplateDeductionResult
+Sema::DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
+ TemplateArgumentListInfo *ExplicitTemplateArgs,
+ FunctionDecl *&Specialization,
+ TemplateDeductionInfo &Info) {
+ return DeduceTemplateArguments(FunctionTemplate, ExplicitTemplateArgs,
+ QualType(), Specialization, Info);
+}
+
+namespace {
+ /// Substitute the 'auto' type specifier within a type for a given replacement
+ /// type.
+ class SubstituteAutoTransform :
+ public TreeTransform<SubstituteAutoTransform> {
+ QualType Replacement;
+ public:
+ SubstituteAutoTransform(Sema &SemaRef, QualType Replacement) :
+ TreeTransform<SubstituteAutoTransform>(SemaRef), Replacement(Replacement) {
+ }
+ QualType TransformAutoType(TypeLocBuilder &TLB, AutoTypeLoc TL) {
+ // If we're building the type pattern to deduce against, don't wrap the
+ // substituted type in an AutoType. Certain template deduction rules
+ // apply only when a template type parameter appears directly (and not if
+ // the parameter is found through desugaring). For instance:
+ // auto &&lref = lvalue;
+ // must transform into "rvalue reference to T" not "rvalue reference to
+ // auto type deduced as T" in order for [temp.deduct.call]p3 to apply.
+ if (isa<TemplateTypeParmType>(Replacement)) {
+ QualType Result = Replacement;
+ TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ } else {
+ QualType Result = RebuildAutoType(Replacement);
+ AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+ }
+
+ ExprResult TransformLambdaExpr(LambdaExpr *E) {
+ // Lambdas never need to be transformed.
+ return E;
+ }
+ };
+}
+
+/// \brief Deduce the type for an auto type-specifier (C++0x [dcl.spec.auto]p6)
+///
+/// \param Type the type pattern using the auto type-specifier.
+///
+/// \param Init the initializer for the variable whose type is to be deduced.
+///
+/// \param Result if type deduction was successful, this will be set to the
+/// deduced type. This may still contain undeduced autos if the type is
+/// dependent. This will be set to null if deduction succeeded, but auto
+/// substitution failed; the appropriate diagnostic will already have been
+/// produced in that case.
+Sema::DeduceAutoResult
+Sema::DeduceAutoType(TypeSourceInfo *Type, Expr *&Init,
+ TypeSourceInfo *&Result) {
+ if (Init->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(Init);
+ if (result.isInvalid()) return DAR_FailedAlreadyDiagnosed;
+ Init = result.take();
+ }
+
+ if (Init->isTypeDependent()) {
+ Result = Type;
+ return DAR_Succeeded;
+ }
+
+ SourceLocation Loc = Init->getExprLoc();
+
+ LocalInstantiationScope InstScope(*this);
+
+ // Build template<class TemplParam> void Func(FuncParam);
+ TemplateTypeParmDecl *TemplParam =
+ TemplateTypeParmDecl::Create(Context, 0, SourceLocation(), Loc, 0, 0, 0,
+ false, false);
+ QualType TemplArg = QualType(TemplParam->getTypeForDecl(), 0);
+ NamedDecl *TemplParamPtr = TemplParam;
+ FixedSizeTemplateParameterList<1> TemplateParams(Loc, Loc, &TemplParamPtr,
+ Loc);
+
+ TypeSourceInfo *FuncParamInfo =
+ SubstituteAutoTransform(*this, TemplArg).TransformType(Type);
+ assert(FuncParamInfo && "substituting template parameter for 'auto' failed");
+ QualType FuncParam = FuncParamInfo->getType();
+
+ // Deduce type of TemplParam in Func(Init)
+ SmallVector<DeducedTemplateArgument, 1> Deduced;
+ Deduced.resize(1);
+ QualType InitType = Init->getType();
+ unsigned TDF = 0;
+
+ TemplateDeductionInfo Info(Context, Loc);
+
+ InitListExpr * InitList = dyn_cast<InitListExpr>(Init);
+ if (InitList) {
+ for (unsigned i = 0, e = InitList->getNumInits(); i < e; ++i) {
+ if (DeduceTemplateArgumentByListElement(*this, &TemplateParams,
+ TemplArg,
+ InitList->getInit(i),
+ Info, Deduced, TDF))
+ return DAR_Failed;
+ }
+ } else {
+ if (AdjustFunctionParmAndArgTypesForDeduction(*this, &TemplateParams,
+ FuncParam, InitType, Init,
+ TDF))
+ return DAR_Failed;
+
+ if (DeduceTemplateArgumentsByTypeMatch(*this, &TemplateParams, FuncParam,
+ InitType, Info, Deduced, TDF))
+ return DAR_Failed;
+ }
+
+ QualType DeducedType = Deduced[0].getAsType();
+ if (DeducedType.isNull())
+ return DAR_Failed;
+
+ if (InitList) {
+ DeducedType = BuildStdInitializerList(DeducedType, Loc);
+ if (DeducedType.isNull())
+ return DAR_FailedAlreadyDiagnosed;
+ }
+
+ Result = SubstituteAutoTransform(*this, DeducedType).TransformType(Type);
+
+ // Check that the deduced argument type is compatible with the original
+ // argument type per C++ [temp.deduct.call]p4.
+ if (!InitList && Result &&
+ CheckOriginalCallArgDeduction(*this,
+ Sema::OriginalCallArg(FuncParam,0,InitType),
+ Result->getType())) {
+ Result = 0;
+ return DAR_Failed;
+ }
+
+ return DAR_Succeeded;
+}
+
+void Sema::DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init) {
+ if (isa<InitListExpr>(Init))
+ Diag(VDecl->getLocation(),
+ diag::err_auto_var_deduction_failure_from_init_list)
+ << VDecl->getDeclName() << VDecl->getType() << Init->getSourceRange();
+ else
+ Diag(VDecl->getLocation(), diag::err_auto_var_deduction_failure)
+ << VDecl->getDeclName() << VDecl->getType() << Init->getType()
+ << Init->getSourceRange();
+}
+
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
+ bool OnlyDeduced,
+ unsigned Level,
+ llvm::SmallBitVector &Deduced);
+
+/// \brief If this is a non-static member function,
+static void MaybeAddImplicitObjectParameterType(ASTContext &Context,
+ CXXMethodDecl *Method,
+ SmallVectorImpl<QualType> &ArgTypes) {
+ if (Method->isStatic())
+ return;
+
+ // C++ [over.match.funcs]p4:
+ //
+ // For non-static member functions, the type of the implicit
+ // object parameter is
+ // - "lvalue reference to cv X" for functions declared without a
+ // ref-qualifier or with the & ref-qualifier
+ // - "rvalue reference to cv X" for functions declared with the
+ // && ref-qualifier
+ //
+ // FIXME: We don't have ref-qualifiers yet, so we don't do that part.
+ QualType ArgTy = Context.getTypeDeclType(Method->getParent());
+ ArgTy = Context.getQualifiedType(ArgTy,
+ Qualifiers::fromCVRMask(Method->getTypeQualifiers()));
+ ArgTy = Context.getLValueReferenceType(ArgTy);
+ ArgTypes.push_back(ArgTy);
+}
+
+/// \brief Determine whether the function template \p FT1 is at least as
+/// specialized as \p FT2.
+static bool isAtLeastAsSpecializedAs(Sema &S,
+ SourceLocation Loc,
+ FunctionTemplateDecl *FT1,
+ FunctionTemplateDecl *FT2,
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments,
+ SmallVectorImpl<RefParamPartialOrderingComparison> *RefParamComparisons) {
+ FunctionDecl *FD1 = FT1->getTemplatedDecl();
+ FunctionDecl *FD2 = FT2->getTemplatedDecl();
+ const FunctionProtoType *Proto1 = FD1->getType()->getAs<FunctionProtoType>();
+ const FunctionProtoType *Proto2 = FD2->getType()->getAs<FunctionProtoType>();
+
+ assert(Proto1 && Proto2 && "Function templates must have prototypes");
+ TemplateParameterList *TemplateParams = FT2->getTemplateParameters();
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ Deduced.resize(TemplateParams->size());
+
+ // C++0x [temp.deduct.partial]p3:
+ // The types used to determine the ordering depend on the context in which
+ // the partial ordering is done:
+ TemplateDeductionInfo Info(S.Context, Loc);
+ CXXMethodDecl *Method1 = 0;
+ CXXMethodDecl *Method2 = 0;
+ bool IsNonStatic2 = false;
+ bool IsNonStatic1 = false;
+ unsigned Skip2 = 0;
+ switch (TPOC) {
+ case TPOC_Call: {
+ // - In the context of a function call, the function parameter types are
+ // used.
+ Method1 = dyn_cast<CXXMethodDecl>(FD1);
+ Method2 = dyn_cast<CXXMethodDecl>(FD2);
+ IsNonStatic1 = Method1 && !Method1->isStatic();
+ IsNonStatic2 = Method2 && !Method2->isStatic();
+
+ // C++0x [temp.func.order]p3:
+ // [...] If only one of the function templates is a non-static
+ // member, that function template is considered to have a new
+ // first parameter inserted in its function parameter list. The
+ // new parameter is of type "reference to cv A," where cv are
+ // the cv-qualifiers of the function template (if any) and A is
+ // the class of which the function template is a member.
+ //
+ // C++98/03 doesn't have this provision, so instead we drop the
+ // first argument of the free function or static member, which
+ // seems to match existing practice.
+ SmallVector<QualType, 4> Args1;
+ unsigned Skip1 = !S.getLangOpts().CPlusPlus0x &&
+ IsNonStatic2 && !IsNonStatic1;
+ if (S.getLangOpts().CPlusPlus0x && IsNonStatic1 && !IsNonStatic2)
+ MaybeAddImplicitObjectParameterType(S.Context, Method1, Args1);
+ Args1.insert(Args1.end(),
+ Proto1->arg_type_begin() + Skip1, Proto1->arg_type_end());
+
+ SmallVector<QualType, 4> Args2;
+ Skip2 = !S.getLangOpts().CPlusPlus0x &&
+ IsNonStatic1 && !IsNonStatic2;
+ if (S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
+ MaybeAddImplicitObjectParameterType(S.Context, Method2, Args2);
+ Args2.insert(Args2.end(),
+ Proto2->arg_type_begin() + Skip2, Proto2->arg_type_end());
+
+ // C++ [temp.func.order]p5:
+ // The presence of unused ellipsis and default arguments has no effect on
+ // the partial ordering of function templates.
+ if (Args1.size() > NumCallArguments)
+ Args1.resize(NumCallArguments);
+ if (Args2.size() > NumCallArguments)
+ Args2.resize(NumCallArguments);
+ if (DeduceTemplateArguments(S, TemplateParams, Args2.data(), Args2.size(),
+ Args1.data(), Args1.size(), Info, Deduced,
+ TDF_None, /*PartialOrdering=*/true,
+ RefParamComparisons))
+ return false;
+
+ break;
+ }
+
+ case TPOC_Conversion:
+ // - In the context of a call to a conversion operator, the return types
+ // of the conversion function templates are used.
+ if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ Proto2->getResultType(),
+ Proto1->getResultType(),
+ Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true,
+ RefParamComparisons))
+ return false;
+ break;
+
+ case TPOC_Other:
+ // - In other contexts (14.6.6.2) the function template's function type
+ // is used.
+ if (DeduceTemplateArgumentsByTypeMatch(S, TemplateParams,
+ FD2->getType(), FD1->getType(),
+ Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true,
+ RefParamComparisons))
+ return false;
+ break;
+ }
+
+ // C++0x [temp.deduct.partial]p11:
+ // In most cases, all template parameters must have values in order for
+ // deduction to succeed, but for partial ordering purposes a template
+ // parameter may remain without a value provided it is not used in the
+ // types being used for partial ordering. [ Note: a template parameter used
+ // in a non-deduced context is considered used. -end note]
+ unsigned ArgIdx = 0, NumArgs = Deduced.size();
+ for (; ArgIdx != NumArgs; ++ArgIdx)
+ if (Deduced[ArgIdx].isNull())
+ break;
+
+ if (ArgIdx == NumArgs) {
+ // All template arguments were deduced. FT1 is at least as specialized
+ // as FT2.
+ return true;
+ }
+
+ // Figure out which template parameters were used.
+ llvm::SmallBitVector UsedParameters(TemplateParams->size());
+ switch (TPOC) {
+ case TPOC_Call: {
+ unsigned NumParams = std::min(NumCallArguments,
+ std::min(Proto1->getNumArgs(),
+ Proto2->getNumArgs()));
+ if (S.getLangOpts().CPlusPlus0x && IsNonStatic2 && !IsNonStatic1)
+ ::MarkUsedTemplateParameters(S.Context, Method2->getThisType(S.Context),
+ false,
+ TemplateParams->getDepth(), UsedParameters);
+ for (unsigned I = Skip2; I < NumParams; ++I)
+ ::MarkUsedTemplateParameters(S.Context, Proto2->getArgType(I), false,
+ TemplateParams->getDepth(),
+ UsedParameters);
+ break;
+ }
+
+ case TPOC_Conversion:
+ ::MarkUsedTemplateParameters(S.Context, Proto2->getResultType(), false,
+ TemplateParams->getDepth(),
+ UsedParameters);
+ break;
+
+ case TPOC_Other:
+ ::MarkUsedTemplateParameters(S.Context, FD2->getType(), false,
+ TemplateParams->getDepth(),
+ UsedParameters);
+ break;
+ }
+
+ for (; ArgIdx != NumArgs; ++ArgIdx)
+ // If this argument had no value deduced but was used in one of the types
+ // used for partial ordering, then deduction fails.
+ if (Deduced[ArgIdx].isNull() && UsedParameters[ArgIdx])
+ return false;
+
+ return true;
+}
+
+/// \brief Determine whether this a function template whose parameter-type-list
+/// ends with a function parameter pack.
+static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
+ FunctionDecl *Function = FunTmpl->getTemplatedDecl();
+ unsigned NumParams = Function->getNumParams();
+ if (NumParams == 0)
+ return false;
+
+ ParmVarDecl *Last = Function->getParamDecl(NumParams - 1);
+ if (!Last->isParameterPack())
+ return false;
+
+ // Make sure that no previous parameter is a parameter pack.
+ while (--NumParams > 0) {
+ if (Function->getParamDecl(NumParams - 1)->isParameterPack())
+ return false;
+ }
+
+ return true;
+}
+
+/// \brief Returns the more specialized function template according
+/// to the rules of function template partial ordering (C++ [temp.func.order]).
+///
+/// \param FT1 the first function template
+///
+/// \param FT2 the second function template
+///
+/// \param TPOC the context in which we are performing partial ordering of
+/// function templates.
+///
+/// \param NumCallArguments The number of arguments in a call, used only
+/// when \c TPOC is \c TPOC_Call.
+///
+/// \returns the more specialized function template. If neither
+/// template is more specialized, returns NULL.
+FunctionTemplateDecl *
+Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
+ FunctionTemplateDecl *FT2,
+ SourceLocation Loc,
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments) {
+ SmallVector<RefParamPartialOrderingComparison, 4> RefParamComparisons;
+ bool Better1 = isAtLeastAsSpecializedAs(*this, Loc, FT1, FT2, TPOC,
+ NumCallArguments, 0);
+ bool Better2 = isAtLeastAsSpecializedAs(*this, Loc, FT2, FT1, TPOC,
+ NumCallArguments,
+ &RefParamComparisons);
+
+ if (Better1 != Better2) // We have a clear winner
+ return Better1? FT1 : FT2;
+
+ if (!Better1 && !Better2) // Neither is better than the other
+ return 0;
+
+ // C++0x [temp.deduct.partial]p10:
+ // If for each type being considered a given template is at least as
+ // specialized for all types and more specialized for some set of types and
+ // the other template is not more specialized for any types or is not at
+ // least as specialized for any types, then the given template is more
+ // specialized than the other template. Otherwise, neither template is more
+ // specialized than the other.
+ Better1 = false;
+ Better2 = false;
+ for (unsigned I = 0, N = RefParamComparisons.size(); I != N; ++I) {
+ // C++0x [temp.deduct.partial]p9:
+ // If, for a given type, deduction succeeds in both directions (i.e., the
+ // types are identical after the transformations above) and both P and A
+ // were reference types (before being replaced with the type referred to
+ // above):
+
+ // -- if the type from the argument template was an lvalue reference
+ // and the type from the parameter template was not, the argument
+ // type is considered to be more specialized than the other;
+ // otherwise,
+ if (!RefParamComparisons[I].ArgIsRvalueRef &&
+ RefParamComparisons[I].ParamIsRvalueRef) {
+ Better2 = true;
+ if (Better1)
+ return 0;
+ continue;
+ } else if (!RefParamComparisons[I].ParamIsRvalueRef &&
+ RefParamComparisons[I].ArgIsRvalueRef) {
+ Better1 = true;
+ if (Better2)
+ return 0;
+ continue;
+ }
+
+ // -- if the type from the argument template is more cv-qualified than
+ // the type from the parameter template (as described above), the
+ // argument type is considered to be more specialized than the
+ // other; otherwise,
+ switch (RefParamComparisons[I].Qualifiers) {
+ case NeitherMoreQualified:
+ break;
+
+ case ParamMoreQualified:
+ Better1 = true;
+ if (Better2)
+ return 0;
+ continue;
+
+ case ArgMoreQualified:
+ Better2 = true;
+ if (Better1)
+ return 0;
+ continue;
+ }
+
+ // -- neither type is more specialized than the other.
+ }
+
+ assert(!(Better1 && Better2) && "Should have broken out in the loop above");
+ if (Better1)
+ return FT1;
+ else if (Better2)
+ return FT2;
+
+ // FIXME: This mimics what GCC implements, but doesn't match up with the
+ // proposed resolution for core issue 692. This area needs to be sorted out,
+ // but for now we attempt to maintain compatibility.
+ bool Variadic1 = isVariadicFunctionTemplate(FT1);
+ bool Variadic2 = isVariadicFunctionTemplate(FT2);
+ if (Variadic1 != Variadic2)
+ return Variadic1? FT2 : FT1;
+
+ return 0;
+}
+
+/// \brief Determine if the two templates are equivalent.
+static bool isSameTemplate(TemplateDecl *T1, TemplateDecl *T2) {
+ if (T1 == T2)
+ return true;
+
+ if (!T1 || !T2)
+ return false;
+
+ return T1->getCanonicalDecl() == T2->getCanonicalDecl();
+}
+
+/// \brief Retrieve the most specialized of the given function template
+/// specializations.
+///
+/// \param SpecBegin the start iterator of the function template
+/// specializations that we will be comparing.
+///
+/// \param SpecEnd the end iterator of the function template
+/// specializations, paired with \p SpecBegin.
+///
+/// \param TPOC the partial ordering context to use to compare the function
+/// template specializations.
+///
+/// \param NumCallArguments The number of arguments in a call, used only
+/// when \c TPOC is \c TPOC_Call.
+///
+/// \param Loc the location where the ambiguity or no-specializations
+/// diagnostic should occur.
+///
+/// \param NoneDiag partial diagnostic used to diagnose cases where there are
+/// no matching candidates.
+///
+/// \param AmbigDiag partial diagnostic used to diagnose an ambiguity, if one
+/// occurs.
+///
+/// \param CandidateDiag partial diagnostic used for each function template
+/// specialization that is a candidate in the ambiguous ordering. One parameter
+/// in this diagnostic should be unbound, which will correspond to the string
+/// describing the template arguments for the function template specialization.
+///
+/// \param Index if non-NULL and the result of this function is non-nULL,
+/// receives the index corresponding to the resulting function template
+/// specialization.
+///
+/// \returns the most specialized function template specialization, if
+/// found. Otherwise, returns SpecEnd.
+///
+/// \todo FIXME: Consider passing in the "also-ran" candidates that failed
+/// template argument deduction.
+UnresolvedSetIterator
+Sema::getMostSpecialized(UnresolvedSetIterator SpecBegin,
+ UnresolvedSetIterator SpecEnd,
+ TemplatePartialOrderingContext TPOC,
+ unsigned NumCallArguments,
+ SourceLocation Loc,
+ const PartialDiagnostic &NoneDiag,
+ const PartialDiagnostic &AmbigDiag,
+ const PartialDiagnostic &CandidateDiag,
+ bool Complain,
+ QualType TargetType) {
+ if (SpecBegin == SpecEnd) {
+ if (Complain)
+ Diag(Loc, NoneDiag);
+ return SpecEnd;
+ }
+
+ if (SpecBegin + 1 == SpecEnd)
+ return SpecBegin;
+
+ // Find the function template that is better than all of the templates it
+ // has been compared to.
+ UnresolvedSetIterator Best = SpecBegin;
+ FunctionTemplateDecl *BestTemplate
+ = cast<FunctionDecl>(*Best)->getPrimaryTemplate();
+ assert(BestTemplate && "Not a function template specialization?");
+ for (UnresolvedSetIterator I = SpecBegin + 1; I != SpecEnd; ++I) {
+ FunctionTemplateDecl *Challenger
+ = cast<FunctionDecl>(*I)->getPrimaryTemplate();
+ assert(Challenger && "Not a function template specialization?");
+ if (isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
+ Loc, TPOC, NumCallArguments),
+ Challenger)) {
+ Best = I;
+ BestTemplate = Challenger;
+ }
+ }
+
+ // Make sure that the "best" function template is more specialized than all
+ // of the others.
+ bool Ambiguous = false;
+ for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) {
+ FunctionTemplateDecl *Challenger
+ = cast<FunctionDecl>(*I)->getPrimaryTemplate();
+ if (I != Best &&
+ !isSameTemplate(getMoreSpecializedTemplate(BestTemplate, Challenger,
+ Loc, TPOC, NumCallArguments),
+ BestTemplate)) {
+ Ambiguous = true;
+ break;
+ }
+ }
+
+ if (!Ambiguous) {
+ // We found an answer. Return it.
+ return Best;
+ }
+
+ // Diagnose the ambiguity.
+ if (Complain)
+ Diag(Loc, AmbigDiag);
+
+ if (Complain)
+ // FIXME: Can we order the candidates in some sane way?
+ for (UnresolvedSetIterator I = SpecBegin; I != SpecEnd; ++I) {
+ PartialDiagnostic PD = CandidateDiag;
+ PD << getTemplateArgumentBindingsText(
+ cast<FunctionDecl>(*I)->getPrimaryTemplate()->getTemplateParameters(),
+ *cast<FunctionDecl>(*I)->getTemplateSpecializationArgs());
+ if (!TargetType.isNull())
+ HandleFunctionTypeMismatch(PD, cast<FunctionDecl>(*I)->getType(),
+ TargetType);
+ Diag((*I)->getLocation(), PD);
+ }
+
+ return SpecEnd;
+}
+
+/// \brief Returns the more specialized class template partial specialization
+/// according to the rules of partial ordering of class template partial
+/// specializations (C++ [temp.class.order]).
+///
+/// \param PS1 the first class template partial specialization
+///
+/// \param PS2 the second class template partial specialization
+///
+/// \returns the more specialized class template partial specialization. If
+/// neither partial specialization is more specialized, returns NULL.
+ClassTemplatePartialSpecializationDecl *
+Sema::getMoreSpecializedPartialSpecialization(
+ ClassTemplatePartialSpecializationDecl *PS1,
+ ClassTemplatePartialSpecializationDecl *PS2,
+ SourceLocation Loc) {
+ // C++ [temp.class.order]p1:
+ // For two class template partial specializations, the first is at least as
+ // specialized as the second if, given the following rewrite to two
+ // function templates, the first function template is at least as
+ // specialized as the second according to the ordering rules for function
+ // templates (14.6.6.2):
+ // - the first function template has the same template parameters as the
+ // first partial specialization and has a single function parameter
+ // whose type is a class template specialization with the template
+ // arguments of the first partial specialization, and
+ // - the second function template has the same template parameters as the
+ // second partial specialization and has a single function parameter
+ // whose type is a class template specialization with the template
+ // arguments of the second partial specialization.
+ //
+ // Rather than synthesize function templates, we merely perform the
+ // equivalent partial ordering by performing deduction directly on
+ // the template arguments of the class template partial
+ // specializations. This computation is slightly simpler than the
+ // general problem of function template partial ordering, because
+ // class template partial specializations are more constrained. We
+ // know that every template parameter is deducible from the class
+ // template partial specialization's template arguments, for
+ // example.
+ SmallVector<DeducedTemplateArgument, 4> Deduced;
+ TemplateDeductionInfo Info(Context, Loc);
+
+ QualType PT1 = PS1->getInjectedSpecializationType();
+ QualType PT2 = PS2->getInjectedSpecializationType();
+
+ // Determine whether PS1 is at least as specialized as PS2
+ Deduced.resize(PS2->getTemplateParameters()->size());
+ bool Better1 = !DeduceTemplateArgumentsByTypeMatch(*this,
+ PS2->getTemplateParameters(),
+ PT2, PT1, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true,
+ /*RefParamComparisons=*/0);
+ if (Better1) {
+ InstantiatingTemplate Inst(*this, PS2->getLocation(), PS2,
+ Deduced.data(), Deduced.size(), Info);
+ Better1 = !::FinishTemplateArgumentDeduction(*this, PS2,
+ PS1->getTemplateArgs(),
+ Deduced, Info);
+ }
+
+ // Determine whether PS2 is at least as specialized as PS1
+ Deduced.clear();
+ Deduced.resize(PS1->getTemplateParameters()->size());
+ bool Better2 = !DeduceTemplateArgumentsByTypeMatch(*this,
+ PS1->getTemplateParameters(),
+ PT1, PT2, Info, Deduced, TDF_None,
+ /*PartialOrdering=*/true,
+ /*RefParamComparisons=*/0);
+ if (Better2) {
+ InstantiatingTemplate Inst(*this, PS1->getLocation(), PS1,
+ Deduced.data(), Deduced.size(), Info);
+ Better2 = !::FinishTemplateArgumentDeduction(*this, PS1,
+ PS2->getTemplateArgs(),
+ Deduced, Info);
+ }
+
+ if (Better1 == Better2)
+ return 0;
+
+ return Better1? PS1 : PS2;
+}
+
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ const TemplateArgument &TemplateArg,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used);
+
+/// \brief Mark the template parameters that are used by the given
+/// expression.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ const Expr *E,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ // We can deduce from a pack expansion.
+ if (const PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(E))
+ E = Expansion->getPattern();
+
+ // Skip through any implicit casts we added while type-checking.
+ while (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExpr();
+
+ // FIXME: if !OnlyDeduced, we have to walk the whole subexpression to
+ // find other occurrences of template parameters.
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E);
+ if (!DRE)
+ return;
+
+ const NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(DRE->getDecl());
+ if (!NTTP)
+ return;
+
+ if (NTTP->getDepth() == Depth)
+ Used[NTTP->getIndex()] = true;
+}
+
+/// \brief Mark the template parameters that are used by the given
+/// nested name specifier.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ NestedNameSpecifier *NNS,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ if (!NNS)
+ return;
+
+ MarkUsedTemplateParameters(Ctx, NNS->getPrefix(), OnlyDeduced, Depth,
+ Used);
+ MarkUsedTemplateParameters(Ctx, QualType(NNS->getAsType(), 0),
+ OnlyDeduced, Depth, Used);
+}
+
+/// \brief Mark the template parameters that are used by the given
+/// template name.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ TemplateName Name,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template)) {
+ if (TTP->getDepth() == Depth)
+ Used[TTP->getIndex()] = true;
+ }
+ return;
+ }
+
+ if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName())
+ MarkUsedTemplateParameters(Ctx, QTN->getQualifier(), OnlyDeduced,
+ Depth, Used);
+ if (DependentTemplateName *DTN = Name.getAsDependentTemplateName())
+ MarkUsedTemplateParameters(Ctx, DTN->getQualifier(), OnlyDeduced,
+ Depth, Used);
+}
+
+/// \brief Mark the template parameters that are used by the given
+/// type.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx, QualType T,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ if (T.isNull())
+ return;
+
+ // Non-dependent types have nothing deducible
+ if (!T->isDependentType())
+ return;
+
+ T = Ctx.getCanonicalType(T);
+ switch (T->getTypeClass()) {
+ case Type::Pointer:
+ MarkUsedTemplateParameters(Ctx,
+ cast<PointerType>(T)->getPointeeType(),
+ OnlyDeduced,
+ Depth,
+ Used);
+ break;
+
+ case Type::BlockPointer:
+ MarkUsedTemplateParameters(Ctx,
+ cast<BlockPointerType>(T)->getPointeeType(),
+ OnlyDeduced,
+ Depth,
+ Used);
+ break;
+
+ case Type::LValueReference:
+ case Type::RValueReference:
+ MarkUsedTemplateParameters(Ctx,
+ cast<ReferenceType>(T)->getPointeeType(),
+ OnlyDeduced,
+ Depth,
+ Used);
+ break;
+
+ case Type::MemberPointer: {
+ const MemberPointerType *MemPtr = cast<MemberPointerType>(T.getTypePtr());
+ MarkUsedTemplateParameters(Ctx, MemPtr->getPointeeType(), OnlyDeduced,
+ Depth, Used);
+ MarkUsedTemplateParameters(Ctx, QualType(MemPtr->getClass(), 0),
+ OnlyDeduced, Depth, Used);
+ break;
+ }
+
+ case Type::DependentSizedArray:
+ MarkUsedTemplateParameters(Ctx,
+ cast<DependentSizedArrayType>(T)->getSizeExpr(),
+ OnlyDeduced, Depth, Used);
+ // Fall through to check the element type
+
+ case Type::ConstantArray:
+ case Type::IncompleteArray:
+ MarkUsedTemplateParameters(Ctx,
+ cast<ArrayType>(T)->getElementType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Vector:
+ case Type::ExtVector:
+ MarkUsedTemplateParameters(Ctx,
+ cast<VectorType>(T)->getElementType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::DependentSizedExtVector: {
+ const DependentSizedExtVectorType *VecType
+ = cast<DependentSizedExtVectorType>(T);
+ MarkUsedTemplateParameters(Ctx, VecType->getElementType(), OnlyDeduced,
+ Depth, Used);
+ MarkUsedTemplateParameters(Ctx, VecType->getSizeExpr(), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+
+ case Type::FunctionProto: {
+ const FunctionProtoType *Proto = cast<FunctionProtoType>(T);
+ MarkUsedTemplateParameters(Ctx, Proto->getResultType(), OnlyDeduced,
+ Depth, Used);
+ for (unsigned I = 0, N = Proto->getNumArgs(); I != N; ++I)
+ MarkUsedTemplateParameters(Ctx, Proto->getArgType(I), OnlyDeduced,
+ Depth, Used);
+ break;
+ }
+
+ case Type::TemplateTypeParm: {
+ const TemplateTypeParmType *TTP = cast<TemplateTypeParmType>(T);
+ if (TTP->getDepth() == Depth)
+ Used[TTP->getIndex()] = true;
+ break;
+ }
+
+ case Type::SubstTemplateTypeParmPack: {
+ const SubstTemplateTypeParmPackType *Subst
+ = cast<SubstTemplateTypeParmPackType>(T);
+ MarkUsedTemplateParameters(Ctx,
+ QualType(Subst->getReplacedParameter(), 0),
+ OnlyDeduced, Depth, Used);
+ MarkUsedTemplateParameters(Ctx, Subst->getArgumentPack(),
+ OnlyDeduced, Depth, Used);
+ break;
+ }
+
+ case Type::InjectedClassName:
+ T = cast<InjectedClassNameType>(T)->getInjectedSpecializationType();
+ // fall through
+
+ case Type::TemplateSpecialization: {
+ const TemplateSpecializationType *Spec
+ = cast<TemplateSpecializationType>(T);
+ MarkUsedTemplateParameters(Ctx, Spec->getTemplateName(), OnlyDeduced,
+ Depth, Used);
+
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (OnlyDeduced &&
+ hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
+ break;
+
+ for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
+ MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
+ Used);
+ break;
+ }
+
+ case Type::Complex:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<ComplexType>(T)->getElementType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Atomic:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<AtomicType>(T)->getValueType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::DependentName:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<DependentNameType>(T)->getQualifier(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *Spec
+ = cast<DependentTemplateSpecializationType>(T);
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx, Spec->getQualifier(),
+ OnlyDeduced, Depth, Used);
+
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (OnlyDeduced &&
+ hasPackExpansionBeforeEnd(Spec->getArgs(), Spec->getNumArgs()))
+ break;
+
+ for (unsigned I = 0, N = Spec->getNumArgs(); I != N; ++I)
+ MarkUsedTemplateParameters(Ctx, Spec->getArg(I), OnlyDeduced, Depth,
+ Used);
+ break;
+ }
+
+ case Type::TypeOf:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<TypeOfType>(T)->getUnderlyingType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::TypeOfExpr:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<TypeOfExprType>(T)->getUnderlyingExpr(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Decltype:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<DecltypeType>(T)->getUnderlyingExpr(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::UnaryTransform:
+ if (!OnlyDeduced)
+ MarkUsedTemplateParameters(Ctx,
+ cast<UnaryTransformType>(T)->getUnderlyingType(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::PackExpansion:
+ MarkUsedTemplateParameters(Ctx,
+ cast<PackExpansionType>(T)->getPattern(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case Type::Auto:
+ MarkUsedTemplateParameters(Ctx,
+ cast<AutoType>(T)->getDeducedType(),
+ OnlyDeduced, Depth, Used);
+
+ // None of these types have any template parameters in them.
+ case Type::Builtin:
+ case Type::VariableArray:
+ case Type::FunctionNoProto:
+ case Type::Record:
+ case Type::Enum:
+ case Type::ObjCInterface:
+ case Type::ObjCObject:
+ case Type::ObjCObjectPointer:
+ case Type::UnresolvedUsing:
+#define TYPE(Class, Base)
+#define ABSTRACT_TYPE(Class, Base)
+#define DEPENDENT_TYPE(Class, Base)
+#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
+#include "clang/AST/TypeNodes.def"
+ break;
+ }
+}
+
+/// \brief Mark the template parameters that are used by this
+/// template argument.
+static void
+MarkUsedTemplateParameters(ASTContext &Ctx,
+ const TemplateArgument &TemplateArg,
+ bool OnlyDeduced,
+ unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ switch (TemplateArg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ break;
+
+ case TemplateArgument::Type:
+ MarkUsedTemplateParameters(Ctx, TemplateArg.getAsType(), OnlyDeduced,
+ Depth, Used);
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion:
+ MarkUsedTemplateParameters(Ctx,
+ TemplateArg.getAsTemplateOrTemplatePattern(),
+ OnlyDeduced, Depth, Used);
+ break;
+
+ case TemplateArgument::Expression:
+ MarkUsedTemplateParameters(Ctx, TemplateArg.getAsExpr(), OnlyDeduced,
+ Depth, Used);
+ break;
+
+ case TemplateArgument::Pack:
+ for (TemplateArgument::pack_iterator P = TemplateArg.pack_begin(),
+ PEnd = TemplateArg.pack_end();
+ P != PEnd; ++P)
+ MarkUsedTemplateParameters(Ctx, *P, OnlyDeduced, Depth, Used);
+ break;
+ }
+}
+
+/// \brief Mark the template parameters can be deduced by the given
+/// template argument list.
+///
+/// \param TemplateArgs the template argument list from which template
+/// parameters will be deduced.
+///
+/// \param Deduced a bit vector whose elements will be set to \c true
+/// to indicate when the corresponding template parameter will be
+/// deduced.
+void
+Sema::MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
+ bool OnlyDeduced, unsigned Depth,
+ llvm::SmallBitVector &Used) {
+ // C++0x [temp.deduct.type]p9:
+ // If the template argument list of P contains a pack expansion that is not
+ // the last template argument, the entire template argument list is a
+ // non-deduced context.
+ if (OnlyDeduced &&
+ hasPackExpansionBeforeEnd(TemplateArgs.data(), TemplateArgs.size()))
+ return;
+
+ for (unsigned I = 0, N = TemplateArgs.size(); I != N; ++I)
+ ::MarkUsedTemplateParameters(Context, TemplateArgs[I], OnlyDeduced,
+ Depth, Used);
+}
+
+/// \brief Marks all of the template parameters that will be deduced by a
+/// call to the given function template.
+void
+Sema::MarkDeducedTemplateParameters(ASTContext &Ctx,
+ FunctionTemplateDecl *FunctionTemplate,
+ llvm::SmallBitVector &Deduced) {
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ Deduced.clear();
+ Deduced.resize(TemplateParams->size());
+
+ FunctionDecl *Function = FunctionTemplate->getTemplatedDecl();
+ for (unsigned I = 0, N = Function->getNumParams(); I != N; ++I)
+ ::MarkUsedTemplateParameters(Ctx, Function->getParamDecl(I)->getType(),
+ true, TemplateParams->getDepth(), Deduced);
+}
+
+bool hasDeducibleTemplateParameters(Sema &S,
+ FunctionTemplateDecl *FunctionTemplate,
+ QualType T) {
+ if (!T->isDependentType())
+ return false;
+
+ TemplateParameterList *TemplateParams
+ = FunctionTemplate->getTemplateParameters();
+ llvm::SmallBitVector Deduced(TemplateParams->size());
+ ::MarkUsedTemplateParameters(S.Context, T, true, TemplateParams->getDepth(),
+ Deduced);
+
+ return Deduced.any();
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
new file mode 100644
index 0000000..4740145
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -0,0 +1,2556 @@
+//===------- SemaTemplateInstantiate.cpp - C++ Template Instantiation ------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template instantiation.
+//
+//===----------------------------------------------------------------------===/
+
+#include "clang/Sema/SemaInternal.h"
+#include "TreeTransform.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/Initialization.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/Template.h"
+#include "clang/Sema/TemplateDeduction.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/LangOptions.h"
+
+using namespace clang;
+using namespace sema;
+
+//===----------------------------------------------------------------------===/
+// Template Instantiation Support
+//===----------------------------------------------------------------------===/
+
+/// \brief Retrieve the template argument list(s) that should be used to
+/// instantiate the definition of the given declaration.
+///
+/// \param D the declaration for which we are computing template instantiation
+/// arguments.
+///
+/// \param Innermost if non-NULL, the innermost template argument list.
+///
+/// \param RelativeToPrimary true if we should get the template
+/// arguments relative to the primary template, even when we're
+/// dealing with a specialization. This is only relevant for function
+/// template specializations.
+///
+/// \param Pattern If non-NULL, indicates the pattern from which we will be
+/// instantiating the definition of the given declaration, \p D. This is
+/// used to determine the proper set of template instantiation arguments for
+/// friend function template specializations.
+MultiLevelTemplateArgumentList
+Sema::getTemplateInstantiationArgs(NamedDecl *D,
+ const TemplateArgumentList *Innermost,
+ bool RelativeToPrimary,
+ const FunctionDecl *Pattern) {
+ // Accumulate the set of template argument lists in this structure.
+ MultiLevelTemplateArgumentList Result;
+
+ if (Innermost)
+ Result.addOuterTemplateArguments(Innermost);
+
+ DeclContext *Ctx = dyn_cast<DeclContext>(D);
+ if (!Ctx) {
+ Ctx = D->getDeclContext();
+
+ // If we have a template template parameter with translation unit context,
+ // then we're performing substitution into a default template argument of
+ // this template template parameter before we've constructed the template
+ // that will own this template template parameter. In this case, we
+ // use empty template parameter lists for all of the outer templates
+ // to avoid performing any substitutions.
+ if (Ctx->isTranslationUnit()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(D)) {
+ for (unsigned I = 0, N = TTP->getDepth() + 1; I != N; ++I)
+ Result.addOuterTemplateArguments(0, 0);
+ return Result;
+ }
+ }
+ }
+
+ while (!Ctx->isFileContext()) {
+ // Add template arguments from a class template instantiation.
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Ctx)) {
+ // We're done when we hit an explicit specialization.
+ if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
+ !isa<ClassTemplatePartialSpecializationDecl>(Spec))
+ break;
+
+ Result.addOuterTemplateArguments(&Spec->getTemplateInstantiationArgs());
+
+ // If this class template specialization was instantiated from a
+ // specialized member that is a class template, we're done.
+ assert(Spec->getSpecializedTemplate() && "No class template?");
+ if (Spec->getSpecializedTemplate()->isMemberSpecialization())
+ break;
+ }
+ // Add template arguments from a function template specialization.
+ else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Ctx)) {
+ if (!RelativeToPrimary &&
+ (Function->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization &&
+ !Function->getClassScopeSpecializationPattern()))
+ break;
+
+ if (const TemplateArgumentList *TemplateArgs
+ = Function->getTemplateSpecializationArgs()) {
+ // Add the template arguments for this specialization.
+ Result.addOuterTemplateArguments(TemplateArgs);
+
+ // If this function was instantiated from a specialized member that is
+ // a function template, we're done.
+ assert(Function->getPrimaryTemplate() && "No function template?");
+ if (Function->getPrimaryTemplate()->isMemberSpecialization())
+ break;
+ } else if (FunctionTemplateDecl *FunTmpl
+ = Function->getDescribedFunctionTemplate()) {
+ // Add the "injected" template arguments.
+ std::pair<const TemplateArgument *, unsigned>
+ Injected = FunTmpl->getInjectedTemplateArgs();
+ Result.addOuterTemplateArguments(Injected.first, Injected.second);
+ }
+
+ // If this is a friend declaration and it declares an entity at
+ // namespace scope, take arguments from its lexical parent
+ // instead of its semantic parent, unless of course the pattern we're
+ // instantiating actually comes from the file's context!
+ if (Function->getFriendObjectKind() &&
+ Function->getDeclContext()->isFileContext() &&
+ (!Pattern || !Pattern->getLexicalDeclContext()->isFileContext())) {
+ Ctx = Function->getLexicalDeclContext();
+ RelativeToPrimary = false;
+ continue;
+ }
+ } else if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Ctx)) {
+ if (ClassTemplateDecl *ClassTemplate = Rec->getDescribedClassTemplate()) {
+ QualType T = ClassTemplate->getInjectedClassNameSpecialization();
+ const TemplateSpecializationType *TST
+ = cast<TemplateSpecializationType>(Context.getCanonicalType(T));
+ Result.addOuterTemplateArguments(TST->getArgs(), TST->getNumArgs());
+ if (ClassTemplate->isMemberSpecialization())
+ break;
+ }
+ }
+
+ Ctx = Ctx->getParent();
+ RelativeToPrimary = false;
+ }
+
+ return Result;
+}
+
+bool Sema::ActiveTemplateInstantiation::isInstantiationRecord() const {
+ switch (Kind) {
+ case TemplateInstantiation:
+ case DefaultTemplateArgumentInstantiation:
+ case DefaultFunctionArgumentInstantiation:
+ return true;
+
+ case ExplicitTemplateArgumentSubstitution:
+ case DeducedTemplateArgumentSubstitution:
+ case PriorTemplateArgumentSubstitution:
+ case DefaultTemplateArgumentChecking:
+ return false;
+ }
+
+ llvm_unreachable("Invalid InstantiationKind!");
+}
+
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ Decl *Entity,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
+ Invalid = CheckInstantiationDepth(PointOfInstantiation,
+ InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::TemplateInstantiation;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Entity);
+ Inst.TemplateArgs = 0;
+ Inst.NumTemplateArgs = 0;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ }
+}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
+ SourceLocation PointOfInstantiation,
+ TemplateDecl *Template,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
+ Invalid = CheckInstantiationDepth(PointOfInstantiation,
+ InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind
+ = ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Template);
+ Inst.TemplateArgs = TemplateArgs;
+ Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ }
+}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
+ SourceLocation PointOfInstantiation,
+ FunctionTemplateDecl *FunctionTemplate,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ ActiveTemplateInstantiation::InstantiationKind Kind,
+ sema::TemplateDeductionInfo &DeductionInfo,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
+ Invalid = CheckInstantiationDepth(PointOfInstantiation,
+ InstantiationRange);
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = Kind;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = reinterpret_cast<uintptr_t>(FunctionTemplate);
+ Inst.TemplateArgs = TemplateArgs;
+ Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.DeductionInfo = &DeductionInfo;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+
+ if (!Inst.isInstantiationRecord())
+ ++SemaRef.NonInstantiationEntries;
+ }
+}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
+ SourceLocation PointOfInstantiation,
+ ClassTemplatePartialSpecializationDecl *PartialSpec,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ sema::TemplateDeductionInfo &DeductionInfo,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
+ Invalid = false;
+
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = reinterpret_cast<uintptr_t>(PartialSpec);
+ Inst.TemplateArgs = TemplateArgs;
+ Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.DeductionInfo = &DeductionInfo;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+
+ assert(!Inst.isInstantiationRecord());
+ ++SemaRef.NonInstantiationEntries;
+}
+
+Sema::InstantiatingTemplate::InstantiatingTemplate(Sema &SemaRef,
+ SourceLocation PointOfInstantiation,
+ ParmVarDecl *Param,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
+ Invalid = CheckInstantiationDepth(PointOfInstantiation, InstantiationRange);
+
+ if (!Invalid) {
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind
+ = ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Param);
+ Inst.TemplateArgs = TemplateArgs;
+ Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+ }
+}
+
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ NamedDecl *Template,
+ NonTypeTemplateParmDecl *Param,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
+ Invalid = false;
+
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Template = Template;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Param);
+ Inst.TemplateArgs = TemplateArgs;
+ Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+
+ assert(!Inst.isInstantiationRecord());
+ ++SemaRef.NonInstantiationEntries;
+}
+
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ NamedDecl *Template,
+ TemplateTemplateParmDecl *Param,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
+ Invalid = false;
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Template = Template;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Param);
+ Inst.TemplateArgs = TemplateArgs;
+ Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+
+ assert(!Inst.isInstantiationRecord());
+ ++SemaRef.NonInstantiationEntries;
+}
+
+Sema::InstantiatingTemplate::
+InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
+ TemplateDecl *Template,
+ NamedDecl *Param,
+ const TemplateArgument *TemplateArgs,
+ unsigned NumTemplateArgs,
+ SourceRange InstantiationRange)
+ : SemaRef(SemaRef),
+ SavedInNonInstantiationSFINAEContext(
+ SemaRef.InNonInstantiationSFINAEContext)
+{
+ Invalid = false;
+
+ ActiveTemplateInstantiation Inst;
+ Inst.Kind = ActiveTemplateInstantiation::DefaultTemplateArgumentChecking;
+ Inst.PointOfInstantiation = PointOfInstantiation;
+ Inst.Template = Template;
+ Inst.Entity = reinterpret_cast<uintptr_t>(Param);
+ Inst.TemplateArgs = TemplateArgs;
+ Inst.NumTemplateArgs = NumTemplateArgs;
+ Inst.InstantiationRange = InstantiationRange;
+ SemaRef.InNonInstantiationSFINAEContext = false;
+ SemaRef.ActiveTemplateInstantiations.push_back(Inst);
+
+ assert(!Inst.isInstantiationRecord());
+ ++SemaRef.NonInstantiationEntries;
+}
+
+void Sema::InstantiatingTemplate::Clear() {
+ if (!Invalid) {
+ if (!SemaRef.ActiveTemplateInstantiations.back().isInstantiationRecord()) {
+ assert(SemaRef.NonInstantiationEntries > 0);
+ --SemaRef.NonInstantiationEntries;
+ }
+ SemaRef.InNonInstantiationSFINAEContext
+ = SavedInNonInstantiationSFINAEContext;
+ SemaRef.ActiveTemplateInstantiations.pop_back();
+ Invalid = true;
+ }
+}
+
+bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
+ SourceLocation PointOfInstantiation,
+ SourceRange InstantiationRange) {
+ assert(SemaRef.NonInstantiationEntries <=
+ SemaRef.ActiveTemplateInstantiations.size());
+ if ((SemaRef.ActiveTemplateInstantiations.size() -
+ SemaRef.NonInstantiationEntries)
+ <= SemaRef.getLangOpts().InstantiationDepth)
+ return false;
+
+ SemaRef.Diag(PointOfInstantiation,
+ diag::err_template_recursion_depth_exceeded)
+ << SemaRef.getLangOpts().InstantiationDepth
+ << InstantiationRange;
+ SemaRef.Diag(PointOfInstantiation, diag::note_template_recursion_depth)
+ << SemaRef.getLangOpts().InstantiationDepth;
+ return true;
+}
+
+/// \brief Prints the current instantiation stack through a series of
+/// notes.
+void Sema::PrintInstantiationStack() {
+ // Determine which template instantiations to skip, if any.
+ unsigned SkipStart = ActiveTemplateInstantiations.size(), SkipEnd = SkipStart;
+ unsigned Limit = Diags.getTemplateBacktraceLimit();
+ if (Limit && Limit < ActiveTemplateInstantiations.size()) {
+ SkipStart = Limit / 2 + Limit % 2;
+ SkipEnd = ActiveTemplateInstantiations.size() - Limit / 2;
+ }
+
+ // FIXME: In all of these cases, we need to show the template arguments
+ unsigned InstantiationIdx = 0;
+ for (SmallVector<ActiveTemplateInstantiation, 16>::reverse_iterator
+ Active = ActiveTemplateInstantiations.rbegin(),
+ ActiveEnd = ActiveTemplateInstantiations.rend();
+ Active != ActiveEnd;
+ ++Active, ++InstantiationIdx) {
+ // Skip this instantiation?
+ if (InstantiationIdx >= SkipStart && InstantiationIdx < SkipEnd) {
+ if (InstantiationIdx == SkipStart) {
+ // Note that we're skipping instantiations.
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_instantiation_contexts_suppressed)
+ << unsigned(ActiveTemplateInstantiations.size() - Limit);
+ }
+ continue;
+ }
+
+ switch (Active->Kind) {
+ case ActiveTemplateInstantiation::TemplateInstantiation: {
+ Decl *D = reinterpret_cast<Decl *>(Active->Entity);
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ unsigned DiagID = diag::note_template_member_class_here;
+ if (isa<ClassTemplateSpecializationDecl>(Record))
+ DiagID = diag::note_template_class_instantiation_here;
+ Diags.Report(Active->PointOfInstantiation, DiagID)
+ << Context.getTypeDeclType(Record)
+ << Active->InstantiationRange;
+ } else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(D)) {
+ unsigned DiagID;
+ if (Function->getPrimaryTemplate())
+ DiagID = diag::note_function_template_spec_here;
+ else
+ DiagID = diag::note_template_member_function_here;
+ Diags.Report(Active->PointOfInstantiation, DiagID)
+ << Function
+ << Active->InstantiationRange;
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_static_data_member_def_here)
+ << VD
+ << Active->InstantiationRange;
+ } else if (EnumDecl *ED = dyn_cast<EnumDecl>(D)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_enum_def_here)
+ << ED
+ << Active->InstantiationRange;
+ } else {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_type_alias_instantiation_here)
+ << cast<TypeAliasTemplateDecl>(D)
+ << Active->InstantiationRange;
+ }
+ break;
+ }
+
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation: {
+ TemplateDecl *Template = cast<TemplateDecl>((Decl *)Active->Entity);
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ Active->TemplateArgs,
+ Active->NumTemplateArgs,
+ getPrintingPolicy());
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_default_arg_instantiation_here)
+ << (Template->getNameAsString() + TemplateArgsStr)
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution: {
+ FunctionTemplateDecl *FnTmpl
+ = cast<FunctionTemplateDecl>((Decl *)Active->Entity);
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_explicit_template_arg_substitution_here)
+ << FnTmpl
+ << getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution:
+ if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(
+ (Decl *)Active->Entity)) {
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_partial_spec_deduct_instantiation_here)
+ << Context.getTypeDeclType(PartialSpec)
+ << getTemplateArgumentBindingsText(
+ PartialSpec->getTemplateParameters(),
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ } else {
+ FunctionTemplateDecl *FnTmpl
+ = cast<FunctionTemplateDecl>((Decl *)Active->Entity);
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_function_template_deduction_instantiation_here)
+ << FnTmpl
+ << getTemplateArgumentBindingsText(FnTmpl->getTemplateParameters(),
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ }
+ break;
+
+ case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation: {
+ ParmVarDecl *Param = cast<ParmVarDecl>((Decl *)Active->Entity);
+ FunctionDecl *FD = cast<FunctionDecl>(Param->getDeclContext());
+
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ Active->TemplateArgs,
+ Active->NumTemplateArgs,
+ getPrintingPolicy());
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_default_function_arg_instantiation_here)
+ << (FD->getNameAsString() + TemplateArgsStr)
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution: {
+ NamedDecl *Parm = cast<NamedDecl>((Decl *)Active->Entity);
+ std::string Name;
+ if (!Parm->getName().empty())
+ Name = std::string(" '") + Parm->getName().str() + "'";
+
+ TemplateParameterList *TemplateParams = 0;
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(Active->Template))
+ TemplateParams = Template->getTemplateParameters();
+ else
+ TemplateParams =
+ cast<ClassTemplatePartialSpecializationDecl>(Active->Template)
+ ->getTemplateParameters();
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_prior_template_arg_substitution)
+ << isa<TemplateTemplateParmDecl>(Parm)
+ << Name
+ << getTemplateArgumentBindingsText(TemplateParams,
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ break;
+ }
+
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentChecking: {
+ TemplateParameterList *TemplateParams = 0;
+ if (TemplateDecl *Template = dyn_cast<TemplateDecl>(Active->Template))
+ TemplateParams = Template->getTemplateParameters();
+ else
+ TemplateParams =
+ cast<ClassTemplatePartialSpecializationDecl>(Active->Template)
+ ->getTemplateParameters();
+
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_template_default_arg_checking)
+ << getTemplateArgumentBindingsText(TemplateParams,
+ Active->TemplateArgs,
+ Active->NumTemplateArgs)
+ << Active->InstantiationRange;
+ break;
+ }
+ }
+ }
+}
+
+llvm::Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
+ if (InNonInstantiationSFINAEContext)
+ return llvm::Optional<TemplateDeductionInfo *>(0);
+
+ for (SmallVector<ActiveTemplateInstantiation, 16>::const_reverse_iterator
+ Active = ActiveTemplateInstantiations.rbegin(),
+ ActiveEnd = ActiveTemplateInstantiations.rend();
+ Active != ActiveEnd;
+ ++Active)
+ {
+ switch(Active->Kind) {
+ case ActiveTemplateInstantiation::DefaultFunctionArgumentInstantiation:
+ case ActiveTemplateInstantiation::TemplateInstantiation:
+ // This is a template instantiation, so there is no SFINAE.
+ return llvm::Optional<TemplateDeductionInfo *>();
+
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentInstantiation:
+ case ActiveTemplateInstantiation::PriorTemplateArgumentSubstitution:
+ case ActiveTemplateInstantiation::DefaultTemplateArgumentChecking:
+ // A default template argument instantiation and substitution into
+ // template parameters with arguments for prior parameters may or may
+ // not be a SFINAE context; look further up the stack.
+ break;
+
+ case ActiveTemplateInstantiation::ExplicitTemplateArgumentSubstitution:
+ case ActiveTemplateInstantiation::DeducedTemplateArgumentSubstitution:
+ // We're either substitution explicitly-specified template arguments
+ // or deduced template arguments, so SFINAE applies.
+ assert(Active->DeductionInfo && "Missing deduction info pointer");
+ return Active->DeductionInfo;
+ }
+ }
+
+ return llvm::Optional<TemplateDeductionInfo *>();
+}
+
+/// \brief Retrieve the depth and index of a parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+}
+
+//===----------------------------------------------------------------------===/
+// Template Instantiation for Types
+//===----------------------------------------------------------------------===/
+namespace {
+ class TemplateInstantiator : public TreeTransform<TemplateInstantiator> {
+ const MultiLevelTemplateArgumentList &TemplateArgs;
+ SourceLocation Loc;
+ DeclarationName Entity;
+
+ public:
+ typedef TreeTransform<TemplateInstantiator> inherited;
+
+ TemplateInstantiator(Sema &SemaRef,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc,
+ DeclarationName Entity)
+ : inherited(SemaRef), TemplateArgs(TemplateArgs), Loc(Loc),
+ Entity(Entity) { }
+
+ /// \brief Determine whether the given type \p T has already been
+ /// transformed.
+ ///
+ /// For the purposes of template instantiation, a type has already been
+ /// transformed if it is NULL or if it is not dependent.
+ bool AlreadyTransformed(QualType T);
+
+ /// \brief Returns the location of the entity being instantiated, if known.
+ SourceLocation getBaseLocation() { return Loc; }
+
+ /// \brief Returns the name of the entity being instantiated, if any.
+ DeclarationName getBaseEntity() { return Entity; }
+
+ /// \brief Sets the "base" location and entity when that
+ /// information is known based on another transformation.
+ void setBase(SourceLocation Loc, DeclarationName Entity) {
+ this->Loc = Loc;
+ this->Entity = Entity;
+ }
+
+ bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ llvm::ArrayRef<UnexpandedParameterPack> Unexpanded,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ llvm::Optional<unsigned> &NumExpansions) {
+ return getSema().CheckParameterPacksForExpansion(EllipsisLoc,
+ PatternRange, Unexpanded,
+ TemplateArgs,
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions);
+ }
+
+ void ExpandingFunctionParameterPack(ParmVarDecl *Pack) {
+ SemaRef.CurrentInstantiationScope->MakeInstantiatedLocalArgPack(Pack);
+ }
+
+ TemplateArgument ForgetPartiallySubstitutedPack() {
+ TemplateArgument Result;
+ if (NamedDecl *PartialPack
+ = SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ MultiLevelTemplateArgumentList &TemplateArgs
+ = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(PartialPack);
+ if (TemplateArgs.hasTemplateArgument(Depth, Index)) {
+ Result = TemplateArgs(Depth, Index);
+ TemplateArgs.setArgument(Depth, Index, TemplateArgument());
+ }
+ }
+
+ return Result;
+ }
+
+ void RememberPartiallySubstitutedPack(TemplateArgument Arg) {
+ if (Arg.isNull())
+ return;
+
+ if (NamedDecl *PartialPack
+ = SemaRef.CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ MultiLevelTemplateArgumentList &TemplateArgs
+ = const_cast<MultiLevelTemplateArgumentList &>(this->TemplateArgs);
+ unsigned Depth, Index;
+ llvm::tie(Depth, Index) = getDepthAndIndex(PartialPack);
+ TemplateArgs.setArgument(Depth, Index, Arg);
+ }
+ }
+
+ /// \brief Transform the given declaration by instantiating a reference to
+ /// this declaration.
+ Decl *TransformDecl(SourceLocation Loc, Decl *D);
+
+ void transformAttrs(Decl *Old, Decl *New) {
+ SemaRef.InstantiateAttrs(TemplateArgs, Old, New);
+ }
+
+ void transformedLocalDecl(Decl *Old, Decl *New) {
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(Old, New);
+ }
+
+ /// \brief Transform the definition of the given declaration by
+ /// instantiating it.
+ Decl *TransformDefinition(SourceLocation Loc, Decl *D);
+
+ /// \bried Transform the first qualifier within a scope by instantiating the
+ /// declaration.
+ NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc);
+
+ /// \brief Rebuild the exception declaration and register the declaration
+ /// as an instantiated local.
+ VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *Declarator,
+ SourceLocation StartLoc,
+ SourceLocation NameLoc,
+ IdentifierInfo *Name);
+
+ /// \brief Rebuild the Objective-C exception declaration and register the
+ /// declaration as an instantiated local.
+ VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *TSInfo, QualType T);
+
+ /// \brief Check for tag mismatches when instantiating an
+ /// elaborated type.
+ QualType RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifierLoc QualifierLoc,
+ QualType T);
+
+ TemplateName TransformTemplateName(CXXScopeSpec &SS,
+ TemplateName Name,
+ SourceLocation NameLoc,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = 0);
+
+ ExprResult TransformPredefinedExpr(PredefinedExpr *E);
+ ExprResult TransformDeclRefExpr(DeclRefExpr *E);
+ ExprResult TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E);
+ ExprResult TransformTemplateParmRefExpr(DeclRefExpr *E,
+ NonTypeTemplateParmDecl *D);
+ ExprResult TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E);
+
+ QualType TransformFunctionProtoType(TypeLocBuilder &TLB,
+ FunctionProtoTypeLoc TL);
+ ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ int indexAdjustment,
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
+
+ /// \brief Transforms a template type parameter type by performing
+ /// substitution of the corresponding template type argument.
+ QualType TransformTemplateTypeParmType(TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL);
+
+ /// \brief Transforms an already-substituted template type parameter pack
+ /// into either itself (if we aren't substituting into its pack expansion)
+ /// or the appropriate substituted argument.
+ QualType TransformSubstTemplateTypeParmPackType(TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL);
+
+ ExprResult TransformCallExpr(CallExpr *CE) {
+ getSema().CallsUndergoingInstantiation.push_back(CE);
+ ExprResult Result =
+ TreeTransform<TemplateInstantiator>::TransformCallExpr(CE);
+ getSema().CallsUndergoingInstantiation.pop_back();
+ return move(Result);
+ }
+
+ private:
+ ExprResult transformNonTypeTemplateParmRef(NonTypeTemplateParmDecl *parm,
+ SourceLocation loc,
+ const TemplateArgument &arg);
+ };
+}
+
+bool TemplateInstantiator::AlreadyTransformed(QualType T) {
+ if (T.isNull())
+ return true;
+
+ if (T->isInstantiationDependentType() || T->isVariablyModifiedType())
+ return false;
+
+ getSema().MarkDeclarationsReferencedInType(Loc, T);
+ return true;
+}
+
+Decl *TemplateInstantiator::TransformDecl(SourceLocation Loc, Decl *D) {
+ if (!D)
+ return 0;
+
+ if (TemplateTemplateParmDecl *TTP = dyn_cast<TemplateTemplateParmDecl>(D)) {
+ if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ // If the corresponding template argument is NULL or non-existent, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(TTP->getDepth(),
+ TTP->getPosition()))
+ return D;
+
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ assert(getSema().ArgumentPackSubstitutionIndex >= 0);
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ TemplateName Template = Arg.getAsTemplate();
+ assert(!Template.isNull() && Template.getAsTemplateDecl() &&
+ "Wrong kind of template template argument");
+ return Template.getAsTemplateDecl();
+ }
+
+ // Fall through to find the instantiated declaration for this template
+ // template parameter.
+ }
+
+ return SemaRef.FindInstantiatedDecl(Loc, cast<NamedDecl>(D), TemplateArgs);
+}
+
+Decl *TemplateInstantiator::TransformDefinition(SourceLocation Loc, Decl *D) {
+ Decl *Inst = getSema().SubstDecl(D, getSema().CurContext, TemplateArgs);
+ if (!Inst)
+ return 0;
+
+ getSema().CurrentInstantiationScope->InstantiatedLocal(D, Inst);
+ return Inst;
+}
+
+NamedDecl *
+TemplateInstantiator::TransformFirstQualifierInScope(NamedDecl *D,
+ SourceLocation Loc) {
+ // If the first part of the nested-name-specifier was a template type
+ // parameter, instantiate that type parameter down to a tag type.
+ if (TemplateTypeParmDecl *TTPD = dyn_cast_or_null<TemplateTypeParmDecl>(D)) {
+ const TemplateTypeParmType *TTP
+ = cast<TemplateTypeParmType>(getSema().Context.getTypeDeclType(TTPD));
+
+ if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ // FIXME: This needs testing w/ member access expressions.
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getIndex());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1)
+ return 0;
+
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ QualType T = Arg.getAsType();
+ if (T.isNull())
+ return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
+
+ if (const TagType *Tag = T->getAs<TagType>())
+ return Tag->getDecl();
+
+ // The resulting type is not a tag; complain.
+ getSema().Diag(Loc, diag::err_nested_name_spec_non_tag) << T;
+ return 0;
+ }
+ }
+
+ return cast_or_null<NamedDecl>(TransformDecl(Loc, D));
+}
+
+VarDecl *
+TemplateInstantiator::RebuildExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *Declarator,
+ SourceLocation StartLoc,
+ SourceLocation NameLoc,
+ IdentifierInfo *Name) {
+ VarDecl *Var = inherited::RebuildExceptionDecl(ExceptionDecl, Declarator,
+ StartLoc, NameLoc, Name);
+ if (Var)
+ getSema().CurrentInstantiationScope->InstantiatedLocal(ExceptionDecl, Var);
+ return Var;
+}
+
+VarDecl *TemplateInstantiator::RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *TSInfo,
+ QualType T) {
+ VarDecl *Var = inherited::RebuildObjCExceptionDecl(ExceptionDecl, TSInfo, T);
+ if (Var)
+ getSema().CurrentInstantiationScope->InstantiatedLocal(ExceptionDecl, Var);
+ return Var;
+}
+
+QualType
+TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifierLoc QualifierLoc,
+ QualType T) {
+ if (const TagType *TT = T->getAs<TagType>()) {
+ TagDecl* TD = TT->getDecl();
+
+ SourceLocation TagLocation = KeywordLoc;
+
+ // FIXME: type might be anonymous.
+ IdentifierInfo *Id = TD->getIdentifier();
+
+ // TODO: should we even warn on struct/class mismatches for this? Seems
+ // like it's likely to produce a lot of spurious errors.
+ if (Keyword != ETK_None && Keyword != ETK_Typename) {
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
+ if (!SemaRef.isAcceptableTagRedeclaration(TD, Kind, /*isDefinition*/false,
+ TagLocation, *Id)) {
+ SemaRef.Diag(TagLocation, diag::err_use_with_wrong_tag)
+ << Id
+ << FixItHint::CreateReplacement(SourceRange(TagLocation),
+ TD->getKindName());
+ SemaRef.Diag(TD->getLocation(), diag::note_previous_use);
+ }
+ }
+ }
+
+ return TreeTransform<TemplateInstantiator>::RebuildElaboratedType(KeywordLoc,
+ Keyword,
+ QualifierLoc,
+ T);
+}
+
+TemplateName TemplateInstantiator::TransformTemplateName(CXXScopeSpec &SS,
+ TemplateName Name,
+ SourceLocation NameLoc,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(Name.getAsTemplateDecl())) {
+ if (TTP->getDepth() < TemplateArgs.getNumLevels()) {
+ // If the corresponding template argument is NULL or non-existent, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(TTP->getDepth(),
+ TTP->getPosition()))
+ return Name;
+
+ TemplateArgument Arg = TemplateArgs(TTP->getDepth(), TTP->getPosition());
+
+ if (TTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have the template argument pack to substitute, but we're not
+ // actually expanding the enclosing pack expansion yet. So, just
+ // keep the entire argument pack.
+ return getSema().Context.getSubstTemplateTemplateParmPack(TTP, Arg);
+ }
+
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ TemplateName Template = Arg.getAsTemplate();
+ assert(!Template.isNull() && "Null template template argument");
+
+ // We don't ever want to substitute for a qualified template name, since
+ // the qualifier is handled separately. So, look through the qualified
+ // template name to its underlying declaration.
+ if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Template = TemplateName(QTN->getTemplateDecl());
+
+ Template = getSema().Context.getSubstTemplateTemplateParm(TTP, Template);
+ return Template;
+ }
+ }
+
+ if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack()) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1)
+ return Name;
+
+ const TemplateArgument &ArgPack = SubstPack->getArgumentPack();
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)ArgPack.pack_size() &&
+ "Pack substitution index out-of-range");
+ return ArgPack.pack_begin()[getSema().ArgumentPackSubstitutionIndex]
+ .getAsTemplate();
+ }
+
+ return inherited::TransformTemplateName(SS, Name, NameLoc, ObjectType,
+ FirstQualifierInScope);
+}
+
+ExprResult
+TemplateInstantiator::TransformPredefinedExpr(PredefinedExpr *E) {
+ if (!E->isTypeDependent())
+ return SemaRef.Owned(E);
+
+ FunctionDecl *currentDecl = getSema().getCurFunctionDecl();
+ assert(currentDecl && "Must have current function declaration when "
+ "instantiating.");
+
+ PredefinedExpr::IdentType IT = E->getIdentType();
+
+ unsigned Length = PredefinedExpr::ComputeName(IT, currentDecl).length();
+
+ llvm::APInt LengthI(32, Length + 1);
+ QualType ResTy = getSema().Context.CharTy.withConst();
+ ResTy = getSema().Context.getConstantArrayType(ResTy, LengthI,
+ ArrayType::Normal, 0);
+ PredefinedExpr *PE =
+ new (getSema().Context) PredefinedExpr(E->getLocation(), ResTy, IT);
+ return getSema().Owned(PE);
+}
+
+ExprResult
+TemplateInstantiator::TransformTemplateParmRefExpr(DeclRefExpr *E,
+ NonTypeTemplateParmDecl *NTTP) {
+ // If the corresponding template argument is NULL or non-existent, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(NTTP->getDepth(),
+ NTTP->getPosition()))
+ return SemaRef.Owned(E);
+
+ TemplateArgument Arg = TemplateArgs(NTTP->getDepth(), NTTP->getPosition());
+ if (NTTP->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have an argument pack, but we can't select a particular argument
+ // out of it yet. Therefore, we'll build an expression to hold on to that
+ // argument pack.
+ QualType TargetType = SemaRef.SubstType(NTTP->getType(), TemplateArgs,
+ E->getLocation(),
+ NTTP->getDeclName());
+ if (TargetType.isNull())
+ return ExprError();
+
+ return new (SemaRef.Context) SubstNonTypeTemplateParmPackExpr(TargetType,
+ NTTP,
+ E->getLocation(),
+ Arg);
+ }
+
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ return transformNonTypeTemplateParmRef(NTTP, E->getLocation(), Arg);
+}
+
+ExprResult TemplateInstantiator::transformNonTypeTemplateParmRef(
+ NonTypeTemplateParmDecl *parm,
+ SourceLocation loc,
+ const TemplateArgument &arg) {
+ ExprResult result;
+ QualType type;
+
+ // The template argument itself might be an expression, in which
+ // case we just return that expression.
+ if (arg.getKind() == TemplateArgument::Expression) {
+ Expr *argExpr = arg.getAsExpr();
+ result = SemaRef.Owned(argExpr);
+ type = argExpr->getType();
+
+ } else if (arg.getKind() == TemplateArgument::Declaration) {
+ ValueDecl *VD;
+ if (Decl *D = arg.getAsDecl()) {
+ VD = cast<ValueDecl>(D);
+
+ // Find the instantiation of the template argument. This is
+ // required for nested templates.
+ VD = cast_or_null<ValueDecl>(
+ getSema().FindInstantiatedDecl(loc, VD, TemplateArgs));
+ if (!VD)
+ return ExprError();
+ } else {
+ // Propagate NULL template argument.
+ VD = 0;
+ }
+
+ // Derive the type we want the substituted decl to have. This had
+ // better be non-dependent, or these checks will have serious problems.
+ if (parm->isExpandedParameterPack()) {
+ type = parm->getExpansionType(SemaRef.ArgumentPackSubstitutionIndex);
+ } else if (parm->isParameterPack() &&
+ isa<PackExpansionType>(parm->getType())) {
+ type = SemaRef.SubstType(
+ cast<PackExpansionType>(parm->getType())->getPattern(),
+ TemplateArgs, loc, parm->getDeclName());
+ } else {
+ type = SemaRef.SubstType(parm->getType(), TemplateArgs,
+ loc, parm->getDeclName());
+ }
+ assert(!type.isNull() && "type substitution failed for param type");
+ assert(!type->isDependentType() && "param type still dependent");
+ result = SemaRef.BuildExpressionFromDeclTemplateArgument(arg, type, loc);
+
+ if (!result.isInvalid()) type = result.get()->getType();
+ } else {
+ result = SemaRef.BuildExpressionFromIntegralTemplateArgument(arg, loc);
+
+ // Note that this type can be different from the type of 'result',
+ // e.g. if it's an enum type.
+ type = arg.getIntegralType();
+ }
+ if (result.isInvalid()) return ExprError();
+
+ Expr *resultExpr = result.take();
+ return SemaRef.Owned(new (SemaRef.Context)
+ SubstNonTypeTemplateParmExpr(type,
+ resultExpr->getValueKind(),
+ loc, parm, resultExpr));
+}
+
+ExprResult
+TemplateInstantiator::TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We aren't expanding the parameter pack, so just return ourselves.
+ return getSema().Owned(E);
+ }
+
+ const TemplateArgument &ArgPack = E->getArgumentPack();
+ unsigned Index = (unsigned)getSema().ArgumentPackSubstitutionIndex;
+ assert(Index < ArgPack.pack_size() && "Substitution index out-of-range");
+
+ const TemplateArgument &Arg = ArgPack.pack_begin()[Index];
+ return transformNonTypeTemplateParmRef(E->getParameterPack(),
+ E->getParameterPackLocation(),
+ Arg);
+}
+
+ExprResult
+TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
+ NamedDecl *D = E->getDecl();
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ if (NTTP->getDepth() < TemplateArgs.getNumLevels())
+ return TransformTemplateParmRefExpr(E, NTTP);
+
+ // We have a non-type template parameter that isn't fully substituted;
+ // FindInstantiatedDecl will find it in the local instantiation scope.
+ }
+
+ return TreeTransform<TemplateInstantiator>::TransformDeclRefExpr(E);
+}
+
+ExprResult TemplateInstantiator::TransformCXXDefaultArgExpr(
+ CXXDefaultArgExpr *E) {
+ assert(!cast<FunctionDecl>(E->getParam()->getDeclContext())->
+ getDescribedFunctionTemplate() &&
+ "Default arg expressions are never formed in dependent cases.");
+ return SemaRef.BuildCXXDefaultArgExpr(E->getUsedLocation(),
+ cast<FunctionDecl>(E->getParam()->getDeclContext()),
+ E->getParam());
+}
+
+QualType TemplateInstantiator::TransformFunctionProtoType(TypeLocBuilder &TLB,
+ FunctionProtoTypeLoc TL) {
+ // We need a local instantiation scope for this function prototype.
+ LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
+ return inherited::TransformFunctionProtoType(TLB, TL);
+}
+
+ParmVarDecl *
+TemplateInstantiator::TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ int indexAdjustment,
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
+ return SemaRef.SubstParmVarDecl(OldParm, TemplateArgs, indexAdjustment,
+ NumExpansions, ExpectParameterPack);
+}
+
+QualType
+TemplateInstantiator::TransformTemplateTypeParmType(TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL) {
+ const TemplateTypeParmType *T = TL.getTypePtr();
+ if (T->getDepth() < TemplateArgs.getNumLevels()) {
+ // Replace the template type parameter with its corresponding
+ // template argument.
+
+ // If the corresponding template argument is NULL or doesn't exist, it's
+ // because we are performing instantiation from explicitly-specified
+ // template arguments in a function template class, but there were some
+ // arguments left unspecified.
+ if (!TemplateArgs.hasTemplateArgument(T->getDepth(), T->getIndex())) {
+ TemplateTypeParmTypeLoc NewTL
+ = TLB.push<TemplateTypeParmTypeLoc>(TL.getType());
+ NewTL.setNameLoc(TL.getNameLoc());
+ return TL.getType();
+ }
+
+ TemplateArgument Arg = TemplateArgs(T->getDepth(), T->getIndex());
+
+ if (T->isParameterPack()) {
+ assert(Arg.getKind() == TemplateArgument::Pack &&
+ "Missing argument pack");
+
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We have the template argument pack, but we're not expanding the
+ // enclosing pack expansion yet. Just save the template argument
+ // pack for later substitution.
+ QualType Result
+ = getSema().Context.getSubstTemplateTypeParmPackType(T, Arg);
+ SubstTemplateTypeParmPackTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmPackTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+
+ assert(getSema().ArgumentPackSubstitutionIndex < (int)Arg.pack_size());
+ Arg = Arg.pack_begin()[getSema().ArgumentPackSubstitutionIndex];
+ }
+
+ assert(Arg.getKind() == TemplateArgument::Type &&
+ "Template argument kind mismatch");
+
+ QualType Replacement = Arg.getAsType();
+
+ // TODO: only do this uniquing once, at the start of instantiation.
+ QualType Result
+ = getSema().Context.getSubstTemplateTypeParmType(T, Replacement);
+ SubstTemplateTypeParmTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+ }
+
+ // The template type parameter comes from an inner template (e.g.,
+ // the template parameter list of a member template inside the
+ // template we are instantiating). Create a new template type
+ // parameter with the template "level" reduced by one.
+ TemplateTypeParmDecl *NewTTPDecl = 0;
+ if (TemplateTypeParmDecl *OldTTPDecl = T->getDecl())
+ NewTTPDecl = cast_or_null<TemplateTypeParmDecl>(
+ TransformDecl(TL.getNameLoc(), OldTTPDecl));
+
+ QualType Result
+ = getSema().Context.getTemplateTypeParmType(T->getDepth()
+ - TemplateArgs.getNumLevels(),
+ T->getIndex(),
+ T->isParameterPack(),
+ NewTTPDecl);
+ TemplateTypeParmTypeLoc NewTL = TLB.push<TemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
+QualType
+TemplateInstantiator::TransformSubstTemplateTypeParmPackType(
+ TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ if (getSema().ArgumentPackSubstitutionIndex == -1) {
+ // We aren't expanding the parameter pack, so just return ourselves.
+ SubstTemplateTypeParmPackTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmPackTypeLoc>(TL.getType());
+ NewTL.setNameLoc(TL.getNameLoc());
+ return TL.getType();
+ }
+
+ const TemplateArgument &ArgPack = TL.getTypePtr()->getArgumentPack();
+ unsigned Index = (unsigned)getSema().ArgumentPackSubstitutionIndex;
+ assert(Index < ArgPack.pack_size() && "Substitution index out-of-range");
+
+ QualType Result = ArgPack.pack_begin()[Index].getAsType();
+ Result = getSema().Context.getSubstTemplateTypeParmType(
+ TL.getTypePtr()->getReplacedParameter(),
+ Result);
+ SubstTemplateTypeParmTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+}
+
+/// \brief Perform substitution on the type T with a given set of template
+/// arguments.
+///
+/// This routine substitutes the given template arguments into the
+/// type T and produces the instantiated type.
+///
+/// \param T the type into which the template arguments will be
+/// substituted. If this type is not dependent, it will be returned
+/// immediately.
+///
+/// \param TemplateArgs the template arguments that will be
+/// substituted for the top-level template parameters within T.
+///
+/// \param Loc the location in the source code where this substitution
+/// is being performed. It will typically be the location of the
+/// declarator (if we're instantiating the type of some declaration)
+/// or the location of the type in the source code (if, e.g., we're
+/// instantiating the type of a cast expression).
+///
+/// \param Entity the name of the entity associated with a declaration
+/// being instantiated (if any). May be empty to indicate that there
+/// is no such entity (if, e.g., this is a type that occurs as part of
+/// a cast expression) or that the entity has no name (e.g., an
+/// unnamed function parameter).
+///
+/// \returns If the instantiation succeeds, the instantiated
+/// type. Otherwise, produces diagnostics and returns a NULL type.
+TypeSourceInfo *Sema::SubstType(TypeSourceInfo *T,
+ const MultiLevelTemplateArgumentList &Args,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ if (!T->getType()->isInstantiationDependentType() &&
+ !T->getType()->isVariablyModifiedType())
+ return T;
+
+ TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
+ return Instantiator.TransformType(T);
+}
+
+TypeSourceInfo *Sema::SubstType(TypeLoc TL,
+ const MultiLevelTemplateArgumentList &Args,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ if (TL.getType().isNull())
+ return 0;
+
+ if (!TL.getType()->isInstantiationDependentType() &&
+ !TL.getType()->isVariablyModifiedType()) {
+ // FIXME: Make a copy of the TypeLoc data here, so that we can
+ // return a new TypeSourceInfo. Inefficient!
+ TypeLocBuilder TLB;
+ TLB.pushFullCopy(TL);
+ return TLB.getTypeSourceInfo(Context, TL.getType());
+ }
+
+ TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
+ TypeLocBuilder TLB;
+ TLB.reserve(TL.getFullDataSize());
+ QualType Result = Instantiator.TransformType(TLB, TL);
+ if (Result.isNull())
+ return 0;
+
+ return TLB.getTypeSourceInfo(Context, Result);
+}
+
+/// Deprecated form of the above.
+QualType Sema::SubstType(QualType T,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SourceLocation Loc, DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ // If T is not a dependent type or a variably-modified type, there
+ // is nothing to do.
+ if (!T->isInstantiationDependentType() && !T->isVariablyModifiedType())
+ return T;
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc, Entity);
+ return Instantiator.TransformType(T);
+}
+
+static bool NeedsInstantiationAsFunctionType(TypeSourceInfo *T) {
+ if (T->getType()->isInstantiationDependentType() ||
+ T->getType()->isVariablyModifiedType())
+ return true;
+
+ TypeLoc TL = T->getTypeLoc().IgnoreParens();
+ if (!isa<FunctionProtoTypeLoc>(TL))
+ return false;
+
+ FunctionProtoTypeLoc FP = cast<FunctionProtoTypeLoc>(TL);
+ for (unsigned I = 0, E = FP.getNumArgs(); I != E; ++I) {
+ ParmVarDecl *P = FP.getArg(I);
+
+ // The parameter's type as written might be dependent even if the
+ // decayed type was not dependent.
+ if (TypeSourceInfo *TSInfo = P->getTypeSourceInfo())
+ if (TSInfo->getType()->isInstantiationDependentType())
+ return true;
+
+ // TODO: currently we always rebuild expressions. When we
+ // properly get lazier about this, we should use the same
+ // logic to avoid rebuilding prototypes here.
+ if (P->hasDefaultArg())
+ return true;
+ }
+
+ return false;
+}
+
+/// A form of SubstType intended specifically for instantiating the
+/// type of a FunctionDecl. Its purpose is solely to force the
+/// instantiation of default-argument expressions.
+TypeSourceInfo *Sema::SubstFunctionDeclType(TypeSourceInfo *T,
+ const MultiLevelTemplateArgumentList &Args,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ if (!NeedsInstantiationAsFunctionType(T))
+ return T;
+
+ TemplateInstantiator Instantiator(*this, Args, Loc, Entity);
+
+ TypeLocBuilder TLB;
+
+ TypeLoc TL = T->getTypeLoc();
+ TLB.reserve(TL.getFullDataSize());
+
+ QualType Result = Instantiator.TransformType(TLB, TL);
+ if (Result.isNull())
+ return 0;
+
+ return TLB.getTypeSourceInfo(Context, Result);
+}
+
+ParmVarDecl *Sema::SubstParmVarDecl(ParmVarDecl *OldParm,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ int indexAdjustment,
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
+ TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
+ TypeSourceInfo *NewDI = 0;
+
+ TypeLoc OldTL = OldDI->getTypeLoc();
+ if (isa<PackExpansionTypeLoc>(OldTL)) {
+ PackExpansionTypeLoc ExpansionTL = cast<PackExpansionTypeLoc>(OldTL);
+
+ // We have a function parameter pack. Substitute into the pattern of the
+ // expansion.
+ NewDI = SubstType(ExpansionTL.getPatternLoc(), TemplateArgs,
+ OldParm->getLocation(), OldParm->getDeclName());
+ if (!NewDI)
+ return 0;
+
+ if (NewDI->getType()->containsUnexpandedParameterPack()) {
+ // We still have unexpanded parameter packs, which means that
+ // our function parameter is still a function parameter pack.
+ // Therefore, make its type a pack expansion type.
+ NewDI = CheckPackExpansion(NewDI, ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ } else if (ExpectParameterPack) {
+ // We expected to get a parameter pack but didn't (because the type
+ // itself is not a pack expansion type), so complain. This can occur when
+ // the substitution goes through an alias template that "loses" the
+ // pack expansion.
+ Diag(OldParm->getLocation(),
+ diag::err_function_parameter_pack_without_parameter_packs)
+ << NewDI->getType();
+ return 0;
+ }
+ } else {
+ NewDI = SubstType(OldDI, TemplateArgs, OldParm->getLocation(),
+ OldParm->getDeclName());
+ }
+
+ if (!NewDI)
+ return 0;
+
+ if (NewDI->getType()->isVoidType()) {
+ Diag(OldParm->getLocation(), diag::err_param_with_void_type);
+ return 0;
+ }
+
+ ParmVarDecl *NewParm = CheckParameter(Context.getTranslationUnitDecl(),
+ OldParm->getInnerLocStart(),
+ OldParm->getLocation(),
+ OldParm->getIdentifier(),
+ NewDI->getType(), NewDI,
+ OldParm->getStorageClass(),
+ OldParm->getStorageClassAsWritten());
+ if (!NewParm)
+ return 0;
+
+ // Mark the (new) default argument as uninstantiated (if any).
+ if (OldParm->hasUninstantiatedDefaultArg()) {
+ Expr *Arg = OldParm->getUninstantiatedDefaultArg();
+ NewParm->setUninstantiatedDefaultArg(Arg);
+ } else if (OldParm->hasUnparsedDefaultArg()) {
+ NewParm->setUnparsedDefaultArg();
+ UnparsedDefaultArgInstantiations[OldParm].push_back(NewParm);
+ } else if (Expr *Arg = OldParm->getDefaultArg())
+ NewParm->setUninstantiatedDefaultArg(Arg);
+
+ NewParm->setHasInheritedDefaultArg(OldParm->hasInheritedDefaultArg());
+
+ if (OldParm->isParameterPack() && !NewParm->isParameterPack()) {
+ // Add the new parameter to the instantiated parameter pack.
+ CurrentInstantiationScope->InstantiatedLocalPackArg(OldParm, NewParm);
+ } else {
+ // Introduce an Old -> New mapping
+ CurrentInstantiationScope->InstantiatedLocal(OldParm, NewParm);
+ }
+
+ // FIXME: OldParm may come from a FunctionProtoType, in which case CurContext
+ // can be anything, is this right ?
+ NewParm->setDeclContext(CurContext);
+
+ NewParm->setScopeInfo(OldParm->getFunctionScopeDepth(),
+ OldParm->getFunctionScopeIndex() + indexAdjustment);
+
+ return NewParm;
+}
+
+/// \brief Substitute the given template arguments into the given set of
+/// parameters, producing the set of parameter types that would be generated
+/// from such a substitution.
+bool Sema::SubstParmTypes(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<QualType> &ParamTypes,
+ SmallVectorImpl<ParmVarDecl *> *OutParams) {
+ assert(!ActiveTemplateInstantiations.empty() &&
+ "Cannot perform an instantiation without some context on the "
+ "instantiation stack");
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
+ DeclarationName());
+ return Instantiator.TransformFunctionTypeParams(Loc, Params, NumParams, 0,
+ ParamTypes, OutParams);
+}
+
+/// \brief Perform substitution on the base class specifiers of the
+/// given class template specialization.
+///
+/// Produces a diagnostic and returns true on error, returns false and
+/// attaches the instantiated base classes to the class template
+/// specialization if successful.
+bool
+Sema::SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
+ CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ bool Invalid = false;
+ SmallVector<CXXBaseSpecifier*, 4> InstantiatedBases;
+ for (ClassTemplateSpecializationDecl::base_class_iterator
+ Base = Pattern->bases_begin(), BaseEnd = Pattern->bases_end();
+ Base != BaseEnd; ++Base) {
+ if (!Base->getType()->isDependentType()) {
+ InstantiatedBases.push_back(new (Context) CXXBaseSpecifier(*Base));
+ continue;
+ }
+
+ SourceLocation EllipsisLoc;
+ TypeSourceInfo *BaseTypeLoc;
+ if (Base->isPackExpansion()) {
+ // This is a pack expansion. See whether we should expand it now, or
+ // wait until later.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(Base->getTypeSourceInfo()->getTypeLoc(),
+ Unexpanded);
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (CheckParameterPacksForExpansion(Base->getEllipsisLoc(),
+ Base->getSourceRange(),
+ Unexpanded,
+ TemplateArgs, ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ Invalid = true;
+ continue;
+ }
+
+ // If we should expand this pack expansion now, do so.
+ if (ShouldExpand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
+
+ TypeSourceInfo *BaseTypeLoc = SubstType(Base->getTypeSourceInfo(),
+ TemplateArgs,
+ Base->getSourceRange().getBegin(),
+ DeclarationName());
+ if (!BaseTypeLoc) {
+ Invalid = true;
+ continue;
+ }
+
+ if (CXXBaseSpecifier *InstantiatedBase
+ = CheckBaseSpecifier(Instantiation,
+ Base->getSourceRange(),
+ Base->isVirtual(),
+ Base->getAccessSpecifierAsWritten(),
+ BaseTypeLoc,
+ SourceLocation()))
+ InstantiatedBases.push_back(InstantiatedBase);
+ else
+ Invalid = true;
+ }
+
+ continue;
+ }
+
+ // The resulting base specifier will (still) be a pack expansion.
+ EllipsisLoc = Base->getEllipsisLoc();
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, -1);
+ BaseTypeLoc = SubstType(Base->getTypeSourceInfo(),
+ TemplateArgs,
+ Base->getSourceRange().getBegin(),
+ DeclarationName());
+ } else {
+ BaseTypeLoc = SubstType(Base->getTypeSourceInfo(),
+ TemplateArgs,
+ Base->getSourceRange().getBegin(),
+ DeclarationName());
+ }
+
+ if (!BaseTypeLoc) {
+ Invalid = true;
+ continue;
+ }
+
+ if (CXXBaseSpecifier *InstantiatedBase
+ = CheckBaseSpecifier(Instantiation,
+ Base->getSourceRange(),
+ Base->isVirtual(),
+ Base->getAccessSpecifierAsWritten(),
+ BaseTypeLoc,
+ EllipsisLoc))
+ InstantiatedBases.push_back(InstantiatedBase);
+ else
+ Invalid = true;
+ }
+
+ if (!Invalid &&
+ AttachBaseSpecifiers(Instantiation, InstantiatedBases.data(),
+ InstantiatedBases.size()))
+ Invalid = true;
+
+ return Invalid;
+}
+
+// Defined via #include from SemaTemplateInstantiateDecl.cpp
+namespace clang {
+ namespace sema {
+ Attr *instantiateTemplateAttribute(const Attr *At, ASTContext &C, Sema &S,
+ const MultiLevelTemplateArgumentList &TemplateArgs);
+ }
+}
+
+/// Determine whether we would be unable to instantiate this template (because
+/// it either has no definition, or is in the process of being instantiated).
+static bool DiagnoseUninstantiableTemplate(Sema &S,
+ SourceLocation PointOfInstantiation,
+ TagDecl *Instantiation,
+ bool InstantiatedFromMember,
+ TagDecl *Pattern,
+ TagDecl *PatternDef,
+ TemplateSpecializationKind TSK,
+ bool Complain = true) {
+ if (PatternDef && !PatternDef->isBeingDefined())
+ return false;
+
+ if (!Complain || (PatternDef && PatternDef->isInvalidDecl())) {
+ // Say nothing
+ } else if (PatternDef) {
+ assert(PatternDef->isBeingDefined());
+ S.Diag(PointOfInstantiation,
+ diag::err_template_instantiate_within_definition)
+ << (TSK != TSK_ImplicitInstantiation)
+ << S.Context.getTypeDeclType(Instantiation);
+ // Not much point in noting the template declaration here, since
+ // we're lexically inside it.
+ Instantiation->setInvalidDecl();
+ } else if (InstantiatedFromMember) {
+ S.Diag(PointOfInstantiation,
+ diag::err_implicit_instantiate_member_undefined)
+ << S.Context.getTypeDeclType(Instantiation);
+ S.Diag(Pattern->getLocation(), diag::note_member_of_template_here);
+ } else {
+ S.Diag(PointOfInstantiation, diag::err_template_instantiate_undefined)
+ << (TSK != TSK_ImplicitInstantiation)
+ << S.Context.getTypeDeclType(Instantiation);
+ S.Diag(Pattern->getLocation(), diag::note_template_decl_here);
+ }
+
+ // In general, Instantiation isn't marked invalid to get more than one
+ // error for multiple undefined instantiations. But the code that does
+ // explicit declaration -> explicit definition conversion can't handle
+ // invalid declarations, so mark as invalid in that case.
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ Instantiation->setInvalidDecl();
+ return true;
+}
+
+/// \brief Instantiate the definition of a class from a given pattern.
+///
+/// \param PointOfInstantiation The point of instantiation within the
+/// source code.
+///
+/// \param Instantiation is the declaration whose definition is being
+/// instantiated. This will be either a class template specialization
+/// or a member class of a class template specialization.
+///
+/// \param Pattern is the pattern from which the instantiation
+/// occurs. This will be either the declaration of a class template or
+/// the declaration of a member class of a class template.
+///
+/// \param TemplateArgs The template arguments to be substituted into
+/// the pattern.
+///
+/// \param TSK the kind of implicit or explicit instantiation to perform.
+///
+/// \param Complain whether to complain if the class cannot be instantiated due
+/// to the lack of a definition.
+///
+/// \returns true if an error occurred, false otherwise.
+bool
+Sema::InstantiateClass(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK,
+ bool Complain) {
+ bool Invalid = false;
+
+ CXXRecordDecl *PatternDef
+ = cast_or_null<CXXRecordDecl>(Pattern->getDefinition());
+ if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
+ Instantiation->getInstantiatedFromMemberClass(),
+ Pattern, PatternDef, TSK, Complain))
+ return true;
+ Pattern = PatternDef;
+
+ // \brief Record the point of instantiation.
+ if (MemberSpecializationInfo *MSInfo
+ = Instantiation->getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ } else if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Instantiation)) {
+ Spec->setTemplateSpecializationKind(TSK);
+ Spec->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
+ if (Inst)
+ return true;
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ ContextRAII SavedContext(*this, Instantiation);
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+
+ // If this is an instantiation of a local class, merge this local
+ // instantiation scope with the enclosing scope. Otherwise, every
+ // instantiation of a class has its own local instantiation scope.
+ bool MergeWithParentScope = !Instantiation->isDefinedOutsideFunctionOrMethod();
+ LocalInstantiationScope Scope(*this, MergeWithParentScope);
+
+ // Pull attributes from the pattern onto the instantiation.
+ InstantiateAttrs(TemplateArgs, Pattern, Instantiation);
+
+ // Start the definition of this instantiation.
+ Instantiation->startDefinition();
+
+ Instantiation->setTagKind(Pattern->getTagKind());
+
+ // Do substitution on the base class specifiers.
+ if (SubstBaseSpecifiers(Instantiation, Pattern, TemplateArgs))
+ Invalid = true;
+
+ TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
+ SmallVector<Decl*, 4> Fields;
+ SmallVector<std::pair<FieldDecl*, FieldDecl*>, 4>
+ FieldsWithMemberInitializers;
+ // Delay instantiation of late parsed attributes.
+ LateInstantiatedAttrVec LateAttrs;
+ Instantiator.enableLateAttributeInstantiation(&LateAttrs);
+
+ for (RecordDecl::decl_iterator Member = Pattern->decls_begin(),
+ MemberEnd = Pattern->decls_end();
+ Member != MemberEnd; ++Member) {
+ // Don't instantiate members not belonging in this semantic context.
+ // e.g. for:
+ // @code
+ // template <int i> class A {
+ // class B *g;
+ // };
+ // @endcode
+ // 'class B' has the template as lexical context but semantically it is
+ // introduced in namespace scope.
+ if ((*Member)->getDeclContext() != Pattern)
+ continue;
+
+ if ((*Member)->isInvalidDecl()) {
+ Invalid = true;
+ continue;
+ }
+
+ Decl *NewMember = Instantiator.Visit(*Member);
+ if (NewMember) {
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(NewMember)) {
+ Fields.push_back(Field);
+ FieldDecl *OldField = cast<FieldDecl>(*Member);
+ if (OldField->getInClassInitializer())
+ FieldsWithMemberInitializers.push_back(std::make_pair(OldField,
+ Field));
+ } else if (EnumDecl *Enum = dyn_cast<EnumDecl>(NewMember)) {
+ // C++11 [temp.inst]p1: The implicit instantiation of a class template
+ // specialization causes the implicit instantiation of the definitions
+ // of unscoped member enumerations.
+ // Record a point of instantiation for this implicit instantiation.
+ if (TSK == TSK_ImplicitInstantiation && !Enum->isScoped() &&
+ Enum->isCompleteDefinition()) {
+ MemberSpecializationInfo *MSInfo =Enum->getMemberSpecializationInfo();
+ assert(MSInfo && "no spec info for member enum specialization");
+ MSInfo->setTemplateSpecializationKind(TSK_ImplicitInstantiation);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+ }
+
+ if (NewMember->isInvalidDecl())
+ Invalid = true;
+ } else {
+ // FIXME: Eventually, a NULL return will mean that one of the
+ // instantiations was a semantic disaster, and we'll want to set Invalid =
+ // true. For now, we expect to skip some members that we can't yet handle.
+ }
+ }
+
+ // Finish checking fields.
+ ActOnFields(0, Instantiation->getLocation(), Instantiation, Fields,
+ SourceLocation(), SourceLocation(), 0);
+ CheckCompletedCXXClass(Instantiation);
+
+ // Attach any in-class member initializers now the class is complete.
+ for (unsigned I = 0, N = FieldsWithMemberInitializers.size(); I != N; ++I) {
+ FieldDecl *OldField = FieldsWithMemberInitializers[I].first;
+ FieldDecl *NewField = FieldsWithMemberInitializers[I].second;
+ Expr *OldInit = OldField->getInClassInitializer();
+
+ ExprResult NewInit = SubstInitializer(OldInit, TemplateArgs,
+ /*CXXDirectInit=*/false);
+ if (NewInit.isInvalid())
+ NewField->setInvalidDecl();
+ else {
+ Expr *Init = NewInit.take();
+ assert(Init && "no-argument initializer in class");
+ assert(!isa<ParenListExpr>(Init) && "call-style init in class");
+ ActOnCXXInClassMemberInitializer(NewField,
+ Init->getSourceRange().getBegin(), Init);
+ }
+ }
+
+ // Instantiate late parsed attributes, and attach them to their decls.
+ // See Sema::InstantiateAttrs
+ for (LateInstantiatedAttrVec::iterator I = LateAttrs.begin(),
+ E = LateAttrs.end(); I != E; ++I) {
+ assert(CurrentInstantiationScope == Instantiator.getStartingScope());
+ CurrentInstantiationScope = I->Scope;
+ Attr *NewAttr =
+ instantiateTemplateAttribute(I->TmplAttr, Context, *this, TemplateArgs);
+ I->NewDecl->addAttr(NewAttr);
+ LocalInstantiationScope::deleteScopes(I->Scope,
+ Instantiator.getStartingScope());
+ }
+ Instantiator.disableLateAttributeInstantiation();
+ LateAttrs.clear();
+
+ if (!FieldsWithMemberInitializers.empty())
+ ActOnFinishDelayedMemberInitializers(Instantiation);
+
+ if (TSK == TSK_ImplicitInstantiation) {
+ Instantiation->setLocation(Pattern->getLocation());
+ Instantiation->setLocStart(Pattern->getInnerLocStart());
+ Instantiation->setRBraceLoc(Pattern->getRBraceLoc());
+ }
+
+ if (Instantiation->isInvalidDecl())
+ Invalid = true;
+ else {
+ // Instantiate any out-of-line class template partial
+ // specializations now.
+ for (TemplateDeclInstantiator::delayed_partial_spec_iterator
+ P = Instantiator.delayed_partial_spec_begin(),
+ PEnd = Instantiator.delayed_partial_spec_end();
+ P != PEnd; ++P) {
+ if (!Instantiator.InstantiateClassTemplatePartialSpecialization(
+ P->first,
+ P->second)) {
+ Invalid = true;
+ break;
+ }
+ }
+ }
+
+ // Exit the scope of this instantiation.
+ SavedContext.pop();
+
+ if (!Invalid) {
+ Consumer.HandleTagDeclDefinition(Instantiation);
+
+ // Always emit the vtable for an explicit instantiation definition
+ // of a polymorphic class template specialization.
+ if (TSK == TSK_ExplicitInstantiationDefinition)
+ MarkVTableUsed(PointOfInstantiation, Instantiation, true);
+ }
+
+ return Invalid;
+}
+
+/// \brief Instantiate the definition of an enum from a given pattern.
+///
+/// \param PointOfInstantiation The point of instantiation within the
+/// source code.
+/// \param Instantiation is the declaration whose definition is being
+/// instantiated. This will be a member enumeration of a class
+/// temploid specialization, or a local enumeration within a
+/// function temploid specialization.
+/// \param Pattern The templated declaration from which the instantiation
+/// occurs.
+/// \param TemplateArgs The template arguments to be substituted into
+/// the pattern.
+/// \param TSK The kind of implicit or explicit instantiation to perform.
+///
+/// \return \c true if an error occurred, \c false otherwise.
+bool Sema::InstantiateEnum(SourceLocation PointOfInstantiation,
+ EnumDecl *Instantiation, EnumDecl *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK) {
+ EnumDecl *PatternDef = Pattern->getDefinition();
+ if (DiagnoseUninstantiableTemplate(*this, PointOfInstantiation, Instantiation,
+ Instantiation->getInstantiatedFromMemberEnum(),
+ Pattern, PatternDef, TSK,/*Complain*/true))
+ return true;
+ Pattern = PatternDef;
+
+ // Record the point of instantiation.
+ if (MemberSpecializationInfo *MSInfo
+ = Instantiation->getMemberSpecializationInfo()) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Instantiation);
+ if (Inst)
+ return true;
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ ContextRAII SavedContext(*this, Instantiation);
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+
+ LocalInstantiationScope Scope(*this, /*MergeWithParentScope*/true);
+
+ // Pull attributes from the pattern onto the instantiation.
+ InstantiateAttrs(TemplateArgs, Pattern, Instantiation);
+
+ TemplateDeclInstantiator Instantiator(*this, Instantiation, TemplateArgs);
+ Instantiator.InstantiateEnumDefinition(Instantiation, Pattern);
+
+ // Exit the scope of this instantiation.
+ SavedContext.pop();
+
+ return Instantiation->isInvalidDecl();
+}
+
+namespace {
+ /// \brief A partial specialization whose template arguments have matched
+ /// a given template-id.
+ struct PartialSpecMatchResult {
+ ClassTemplatePartialSpecializationDecl *Partial;
+ TemplateArgumentList *Args;
+ };
+}
+
+bool
+Sema::InstantiateClassTemplateSpecialization(
+ SourceLocation PointOfInstantiation,
+ ClassTemplateSpecializationDecl *ClassTemplateSpec,
+ TemplateSpecializationKind TSK,
+ bool Complain) {
+ // Perform the actual instantiation on the canonical declaration.
+ ClassTemplateSpec = cast<ClassTemplateSpecializationDecl>(
+ ClassTemplateSpec->getCanonicalDecl());
+
+ // Check whether we have already instantiated or specialized this class
+ // template specialization.
+ if (ClassTemplateSpec->getSpecializationKind() != TSK_Undeclared) {
+ if (ClassTemplateSpec->getSpecializationKind() ==
+ TSK_ExplicitInstantiationDeclaration &&
+ TSK == TSK_ExplicitInstantiationDefinition) {
+ // An explicit instantiation definition follows an explicit instantiation
+ // declaration (C++0x [temp.explicit]p10); go ahead and perform the
+ // explicit instantiation.
+ ClassTemplateSpec->setSpecializationKind(TSK);
+
+ // If this is an explicit instantiation definition, mark the
+ // vtable as used.
+ if (TSK == TSK_ExplicitInstantiationDefinition &&
+ !ClassTemplateSpec->isInvalidDecl())
+ MarkVTableUsed(PointOfInstantiation, ClassTemplateSpec, true);
+
+ return false;
+ }
+
+ // We can only instantiate something that hasn't already been
+ // instantiated or specialized. Fail without any diagnostics: our
+ // caller will provide an error message.
+ return true;
+ }
+
+ if (ClassTemplateSpec->isInvalidDecl())
+ return true;
+
+ ClassTemplateDecl *Template = ClassTemplateSpec->getSpecializedTemplate();
+ CXXRecordDecl *Pattern = 0;
+
+ // C++ [temp.class.spec.match]p1:
+ // When a class template is used in a context that requires an
+ // instantiation of the class, it is necessary to determine
+ // whether the instantiation is to be generated using the primary
+ // template or one of the partial specializations. This is done by
+ // matching the template arguments of the class template
+ // specialization with the template argument lists of the partial
+ // specializations.
+ typedef PartialSpecMatchResult MatchResult;
+ SmallVector<MatchResult, 4> Matched;
+ SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ Template->getPartialSpecializations(PartialSpecs);
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I) {
+ ClassTemplatePartialSpecializationDecl *Partial = PartialSpecs[I];
+ TemplateDeductionInfo Info(Context, PointOfInstantiation);
+ if (TemplateDeductionResult Result
+ = DeduceTemplateArguments(Partial,
+ ClassTemplateSpec->getTemplateArgs(),
+ Info)) {
+ // FIXME: Store the failed-deduction information for use in
+ // diagnostics, later.
+ (void)Result;
+ } else {
+ Matched.push_back(PartialSpecMatchResult());
+ Matched.back().Partial = Partial;
+ Matched.back().Args = Info.take();
+ }
+ }
+
+ // If we're dealing with a member template where the template parameters
+ // have been instantiated, this provides the original template parameters
+ // from which the member template's parameters were instantiated.
+ SmallVector<const NamedDecl *, 4> InstantiatedTemplateParameters;
+
+ if (Matched.size() >= 1) {
+ SmallVector<MatchResult, 4>::iterator Best = Matched.begin();
+ if (Matched.size() == 1) {
+ // -- If exactly one matching specialization is found, the
+ // instantiation is generated from that specialization.
+ // We don't need to do anything for this.
+ } else {
+ // -- If more than one matching specialization is found, the
+ // partial order rules (14.5.4.2) are used to determine
+ // whether one of the specializations is more specialized
+ // than the others. If none of the specializations is more
+ // specialized than all of the other matching
+ // specializations, then the use of the class template is
+ // ambiguous and the program is ill-formed.
+ for (SmallVector<MatchResult, 4>::iterator P = Best + 1,
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
+ PointOfInstantiation)
+ == P->Partial)
+ Best = P;
+ }
+
+ // Determine if the best partial specialization is more specialized than
+ // the others.
+ bool Ambiguous = false;
+ for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
+ PEnd = Matched.end();
+ P != PEnd; ++P) {
+ if (P != Best &&
+ getMoreSpecializedPartialSpecialization(P->Partial, Best->Partial,
+ PointOfInstantiation)
+ != Best->Partial) {
+ Ambiguous = true;
+ break;
+ }
+ }
+
+ if (Ambiguous) {
+ // Partial ordering did not produce a clear winner. Complain.
+ ClassTemplateSpec->setInvalidDecl();
+ Diag(PointOfInstantiation, diag::err_partial_spec_ordering_ambiguous)
+ << ClassTemplateSpec;
+
+ // Print the matching partial specializations.
+ for (SmallVector<MatchResult, 4>::iterator P = Matched.begin(),
+ PEnd = Matched.end();
+ P != PEnd; ++P)
+ Diag(P->Partial->getLocation(), diag::note_partial_spec_match)
+ << getTemplateArgumentBindingsText(
+ P->Partial->getTemplateParameters(),
+ *P->Args);
+
+ return true;
+ }
+ }
+
+ // Instantiate using the best class template partial specialization.
+ ClassTemplatePartialSpecializationDecl *OrigPartialSpec = Best->Partial;
+ while (OrigPartialSpec->getInstantiatedFromMember()) {
+ // If we've found an explicit specialization of this class template,
+ // stop here and use that as the pattern.
+ if (OrigPartialSpec->isMemberSpecialization())
+ break;
+
+ OrigPartialSpec = OrigPartialSpec->getInstantiatedFromMember();
+ }
+
+ Pattern = OrigPartialSpec;
+ ClassTemplateSpec->setInstantiationOf(Best->Partial, Best->Args);
+ } else {
+ // -- If no matches are found, the instantiation is generated
+ // from the primary template.
+ ClassTemplateDecl *OrigTemplate = Template;
+ while (OrigTemplate->getInstantiatedFromMemberTemplate()) {
+ // If we've found an explicit specialization of this class template,
+ // stop here and use that as the pattern.
+ if (OrigTemplate->isMemberSpecialization())
+ break;
+
+ OrigTemplate = OrigTemplate->getInstantiatedFromMemberTemplate();
+ }
+
+ Pattern = OrigTemplate->getTemplatedDecl();
+ }
+
+ bool Result = InstantiateClass(PointOfInstantiation, ClassTemplateSpec,
+ Pattern,
+ getTemplateInstantiationArgs(ClassTemplateSpec),
+ TSK,
+ Complain);
+
+ return Result;
+}
+
+/// \brief Instantiates the definitions of all of the member
+/// of the given class, which is an instantiation of a class template
+/// or a member class of a template.
+void
+Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
+ CXXRecordDecl *Instantiation,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ TemplateSpecializationKind TSK) {
+ for (DeclContext::decl_iterator D = Instantiation->decls_begin(),
+ DEnd = Instantiation->decls_end();
+ D != DEnd; ++D) {
+ bool SuppressNew = false;
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(*D)) {
+ if (FunctionDecl *Pattern
+ = Function->getInstantiatedFromMemberFunction()) {
+ MemberSpecializationInfo *MSInfo
+ = Function->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
+ Function,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ if (Function->isDefined())
+ continue;
+
+ if (TSK == TSK_ExplicitInstantiationDefinition) {
+ // C++0x [temp.explicit]p8:
+ // An explicit instantiation definition that names a class template
+ // specialization explicitly instantiates the class template
+ // specialization and is only an explicit instantiation definition
+ // of members whose definition is visible at the point of
+ // instantiation.
+ if (!Pattern->isDefined())
+ continue;
+
+ Function->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+
+ InstantiateFunctionDefinition(PointOfInstantiation, Function);
+ } else {
+ Function->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+ }
+ }
+ } else if (VarDecl *Var = dyn_cast<VarDecl>(*D)) {
+ if (Var->isStaticDataMember()) {
+ MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
+ Var,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ if (TSK == TSK_ExplicitInstantiationDefinition) {
+ // C++0x [temp.explicit]p8:
+ // An explicit instantiation definition that names a class template
+ // specialization explicitly instantiates the class template
+ // specialization and is only an explicit instantiation definition
+ // of members whose definition is visible at the point of
+ // instantiation.
+ if (!Var->getInstantiatedFromStaticDataMember()
+ ->getOutOfLineDefinition())
+ continue;
+
+ Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+ InstantiateStaticDataMemberDefinition(PointOfInstantiation, Var);
+ } else {
+ Var->setTemplateSpecializationKind(TSK, PointOfInstantiation);
+ }
+ }
+ } else if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(*D)) {
+ // Always skip the injected-class-name, along with any
+ // redeclarations of nested classes, since both would cause us
+ // to try to instantiate the members of a class twice.
+ if (Record->isInjectedClassName() || Record->getPreviousDecl())
+ continue;
+
+ MemberSpecializationInfo *MSInfo = Record->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(PointOfInstantiation, TSK,
+ Record,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(),
+ SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ CXXRecordDecl *Pattern = Record->getInstantiatedFromMemberClass();
+ assert(Pattern && "Missing instantiated-from-template information");
+
+ if (!Record->getDefinition()) {
+ if (!Pattern->getDefinition()) {
+ // C++0x [temp.explicit]p8:
+ // An explicit instantiation definition that names a class template
+ // specialization explicitly instantiates the class template
+ // specialization and is only an explicit instantiation definition
+ // of members whose definition is visible at the point of
+ // instantiation.
+ if (TSK == TSK_ExplicitInstantiationDeclaration) {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+
+ continue;
+ }
+
+ InstantiateClass(PointOfInstantiation, Record, Pattern,
+ TemplateArgs,
+ TSK);
+ } else {
+ if (TSK == TSK_ExplicitInstantiationDefinition &&
+ Record->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDeclaration) {
+ Record->setTemplateSpecializationKind(TSK);
+ MarkVTableUsed(PointOfInstantiation, Record, true);
+ }
+ }
+
+ Pattern = cast_or_null<CXXRecordDecl>(Record->getDefinition());
+ if (Pattern)
+ InstantiateClassMembers(PointOfInstantiation, Pattern, TemplateArgs,
+ TSK);
+ } else if (EnumDecl *Enum = dyn_cast<EnumDecl>(*D)) {
+ MemberSpecializationInfo *MSInfo = Enum->getMemberSpecializationInfo();
+ assert(MSInfo && "No member specialization information?");
+
+ if (MSInfo->getTemplateSpecializationKind()
+ == TSK_ExplicitSpecialization)
+ continue;
+
+ if (CheckSpecializationInstantiationRedecl(
+ PointOfInstantiation, TSK, Enum,
+ MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation(), SuppressNew) ||
+ SuppressNew)
+ continue;
+
+ if (Enum->getDefinition())
+ continue;
+
+ EnumDecl *Pattern = Enum->getInstantiatedFromMemberEnum();
+ assert(Pattern && "Missing instantiated-from-template information");
+
+ if (TSK == TSK_ExplicitInstantiationDefinition) {
+ if (!Pattern->getDefinition())
+ continue;
+
+ InstantiateEnum(PointOfInstantiation, Enum, Pattern, TemplateArgs, TSK);
+ } else {
+ MSInfo->setTemplateSpecializationKind(TSK);
+ MSInfo->setPointOfInstantiation(PointOfInstantiation);
+ }
+ }
+ }
+}
+
+/// \brief Instantiate the definitions of all of the members of the
+/// given class template specialization, which was named as part of an
+/// explicit instantiation.
+void
+Sema::InstantiateClassTemplateSpecializationMembers(
+ SourceLocation PointOfInstantiation,
+ ClassTemplateSpecializationDecl *ClassTemplateSpec,
+ TemplateSpecializationKind TSK) {
+ // C++0x [temp.explicit]p7:
+ // An explicit instantiation that names a class template
+ // specialization is an explicit instantion of the same kind
+ // (declaration or definition) of each of its members (not
+ // including members inherited from base classes) that has not
+ // been previously explicitly specialized in the translation unit
+ // containing the explicit instantiation, except as described
+ // below.
+ InstantiateClassMembers(PointOfInstantiation, ClassTemplateSpec,
+ getTemplateInstantiationArgs(ClassTemplateSpec),
+ TSK);
+}
+
+StmtResult
+Sema::SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (!S)
+ return Owned(S);
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs,
+ SourceLocation(),
+ DeclarationName());
+ return Instantiator.TransformStmt(S);
+}
+
+ExprResult
+Sema::SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (!E)
+ return Owned(E);
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs,
+ SourceLocation(),
+ DeclarationName());
+ return Instantiator.TransformExpr(E);
+}
+
+bool Sema::SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ SmallVectorImpl<Expr *> &Outputs) {
+ if (NumExprs == 0)
+ return false;
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs,
+ SourceLocation(),
+ DeclarationName());
+ return Instantiator.TransformExprs(Exprs, NumExprs, IsCall, Outputs);
+}
+
+NestedNameSpecifierLoc
+Sema::SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (!NNS)
+ return NestedNameSpecifierLoc();
+
+ TemplateInstantiator Instantiator(*this, TemplateArgs, NNS.getBeginLoc(),
+ DeclarationName());
+ return Instantiator.TransformNestedNameSpecifierLoc(NNS);
+}
+
+/// \brief Do template substitution on declaration name info.
+DeclarationNameInfo
+Sema::SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, NameInfo.getLoc(),
+ NameInfo.getName());
+ return Instantiator.TransformDeclarationNameInfo(NameInfo);
+}
+
+TemplateName
+Sema::SubstTemplateName(NestedNameSpecifierLoc QualifierLoc,
+ TemplateName Name, SourceLocation Loc,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc,
+ DeclarationName());
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ return Instantiator.TransformTemplateName(SS, Name, Loc);
+}
+
+bool Sema::Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
+ TemplateArgumentListInfo &Result,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
+ DeclarationName());
+
+ return Instantiator.TransformTemplateArguments(Args, NumArgs, Result);
+}
+
+llvm::PointerUnion<Decl *, LocalInstantiationScope::DeclArgumentPack *> *
+LocalInstantiationScope::findInstantiationOf(const Decl *D) {
+ for (LocalInstantiationScope *Current = this; Current;
+ Current = Current->Outer) {
+
+ // Check if we found something within this scope.
+ const Decl *CheckD = D;
+ do {
+ LocalDeclsMap::iterator Found = Current->LocalDecls.find(CheckD);
+ if (Found != Current->LocalDecls.end())
+ return &Found->second;
+
+ // If this is a tag declaration, it's possible that we need to look for
+ // a previous declaration.
+ if (const TagDecl *Tag = dyn_cast<TagDecl>(CheckD))
+ CheckD = Tag->getPreviousDecl();
+ else
+ CheckD = 0;
+ } while (CheckD);
+
+ // If we aren't combined with our outer scope, we're done.
+ if (!Current->CombineWithOuterScope)
+ break;
+ }
+
+ // If we didn't find the decl, then we either have a sema bug, or we have a
+ // forward reference to a label declaration. Return null to indicate that
+ // we have an uninstantiated label.
+ assert(isa<LabelDecl>(D) && "declaration not instantiated in this scope");
+ return 0;
+}
+
+void LocalInstantiationScope::InstantiatedLocal(const Decl *D, Decl *Inst) {
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
+ if (Stored.isNull())
+ Stored = Inst;
+ else if (Stored.is<Decl *>()) {
+ assert(Stored.get<Decl *>() == Inst && "Already instantiated this local");
+ Stored = Inst;
+ } else
+ LocalDecls[D].get<DeclArgumentPack *>()->push_back(Inst);
+}
+
+void LocalInstantiationScope::InstantiatedLocalPackArg(const Decl *D,
+ Decl *Inst) {
+ DeclArgumentPack *Pack = LocalDecls[D].get<DeclArgumentPack *>();
+ Pack->push_back(Inst);
+}
+
+void LocalInstantiationScope::MakeInstantiatedLocalArgPack(const Decl *D) {
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> &Stored = LocalDecls[D];
+ assert(Stored.isNull() && "Already instantiated this local");
+ DeclArgumentPack *Pack = new DeclArgumentPack;
+ Stored = Pack;
+ ArgumentPacks.push_back(Pack);
+}
+
+void LocalInstantiationScope::SetPartiallySubstitutedPack(NamedDecl *Pack,
+ const TemplateArgument *ExplicitArgs,
+ unsigned NumExplicitArgs) {
+ assert((!PartiallySubstitutedPack || PartiallySubstitutedPack == Pack) &&
+ "Already have a partially-substituted pack");
+ assert((!PartiallySubstitutedPack
+ || NumArgsInPartiallySubstitutedPack == NumExplicitArgs) &&
+ "Wrong number of arguments in partially-substituted pack");
+ PartiallySubstitutedPack = Pack;
+ ArgsInPartiallySubstitutedPack = ExplicitArgs;
+ NumArgsInPartiallySubstitutedPack = NumExplicitArgs;
+}
+
+NamedDecl *LocalInstantiationScope::getPartiallySubstitutedPack(
+ const TemplateArgument **ExplicitArgs,
+ unsigned *NumExplicitArgs) const {
+ if (ExplicitArgs)
+ *ExplicitArgs = 0;
+ if (NumExplicitArgs)
+ *NumExplicitArgs = 0;
+
+ for (const LocalInstantiationScope *Current = this; Current;
+ Current = Current->Outer) {
+ if (Current->PartiallySubstitutedPack) {
+ if (ExplicitArgs)
+ *ExplicitArgs = Current->ArgsInPartiallySubstitutedPack;
+ if (NumExplicitArgs)
+ *NumExplicitArgs = Current->NumArgsInPartiallySubstitutedPack;
+
+ return Current->PartiallySubstitutedPack;
+ }
+
+ if (!Current->CombineWithOuterScope)
+ break;
+ }
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
new file mode 100644
index 0000000..8afe7ac
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -0,0 +1,3411 @@
+//===--- SemaTemplateInstantiateDecl.cpp - C++ Template Decl Instantiation ===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements C++ template instantiation for declarations.
+//
+//===----------------------------------------------------------------------===/
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/PrettyDeclStackTrace.h"
+#include "clang/Sema/Template.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/DependentDiagnostic.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/Lex/Preprocessor.h"
+
+using namespace clang;
+
+bool TemplateDeclInstantiator::SubstQualifier(const DeclaratorDecl *OldDecl,
+ DeclaratorDecl *NewDecl) {
+ if (!OldDecl->getQualifierLoc())
+ return false;
+
+ NestedNameSpecifierLoc NewQualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(OldDecl->getQualifierLoc(),
+ TemplateArgs);
+
+ if (!NewQualifierLoc)
+ return true;
+
+ NewDecl->setQualifierInfo(NewQualifierLoc);
+ return false;
+}
+
+bool TemplateDeclInstantiator::SubstQualifier(const TagDecl *OldDecl,
+ TagDecl *NewDecl) {
+ if (!OldDecl->getQualifierLoc())
+ return false;
+
+ NestedNameSpecifierLoc NewQualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(OldDecl->getQualifierLoc(),
+ TemplateArgs);
+
+ if (!NewQualifierLoc)
+ return true;
+
+ NewDecl->setQualifierInfo(NewQualifierLoc);
+ return false;
+}
+
+// Include attribute instantiation code.
+#include "clang/Sema/AttrTemplateInstantiate.inc"
+
+void Sema::InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
+ const Decl *Tmpl, Decl *New,
+ LateInstantiatedAttrVec *LateAttrs,
+ LocalInstantiationScope *OuterMostScope) {
+ for (AttrVec::const_iterator i = Tmpl->attr_begin(), e = Tmpl->attr_end();
+ i != e; ++i) {
+ const Attr *TmplAttr = *i;
+
+ // FIXME: This should be generalized to more than just the AlignedAttr.
+ if (const AlignedAttr *Aligned = dyn_cast<AlignedAttr>(TmplAttr)) {
+ if (Aligned->isAlignmentDependent()) {
+ if (Aligned->isAlignmentExpr()) {
+ // The alignment expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(*this,
+ Sema::ConstantEvaluated);
+
+ ExprResult Result = SubstExpr(Aligned->getAlignmentExpr(),
+ TemplateArgs);
+ if (!Result.isInvalid())
+ AddAlignedAttr(Aligned->getLocation(), New, Result.takeAs<Expr>());
+ } else {
+ TypeSourceInfo *Result = SubstType(Aligned->getAlignmentType(),
+ TemplateArgs,
+ Aligned->getLocation(),
+ DeclarationName());
+ if (Result)
+ AddAlignedAttr(Aligned->getLocation(), New, Result);
+ }
+ continue;
+ }
+ }
+
+ if (TmplAttr->isLateParsed() && LateAttrs) {
+ // Late parsed attributes must be instantiated and attached after the
+ // enclosing class has been instantiated. See Sema::InstantiateClass.
+ LocalInstantiationScope *Saved = 0;
+ if (CurrentInstantiationScope)
+ Saved = CurrentInstantiationScope->cloneScopes(OuterMostScope);
+ LateAttrs->push_back(LateInstantiatedAttribute(TmplAttr, Saved, New));
+ } else {
+ Attr *NewAttr = sema::instantiateTemplateAttribute(TmplAttr, Context,
+ *this, TemplateArgs);
+ New->addAttr(NewAttr);
+ }
+ }
+}
+
+Decl *
+TemplateDeclInstantiator::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ llvm_unreachable("Translation units cannot be instantiated");
+}
+
+Decl *
+TemplateDeclInstantiator::VisitLabelDecl(LabelDecl *D) {
+ LabelDecl *Inst = LabelDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getIdentifier());
+ Owner->addDecl(Inst);
+ return Inst;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitNamespaceDecl(NamespaceDecl *D) {
+ llvm_unreachable("Namespaces cannot be instantiated");
+}
+
+Decl *
+TemplateDeclInstantiator::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ NamespaceAliasDecl *Inst
+ = NamespaceAliasDecl::Create(SemaRef.Context, Owner,
+ D->getNamespaceLoc(),
+ D->getAliasLoc(),
+ D->getIdentifier(),
+ D->getQualifierLoc(),
+ D->getTargetNameLoc(),
+ D->getNamespace());
+ Owner->addDecl(Inst);
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
+ bool IsTypeAlias) {
+ bool Invalid = false;
+ TypeSourceInfo *DI = D->getTypeSourceInfo();
+ if (DI->getType()->isInstantiationDependentType() ||
+ DI->getType()->isVariablyModifiedType()) {
+ DI = SemaRef.SubstType(DI, TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!DI) {
+ Invalid = true;
+ DI = SemaRef.Context.getTrivialTypeSourceInfo(SemaRef.Context.IntTy);
+ }
+ } else {
+ SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType());
+ }
+
+ // Create the new typedef
+ TypedefNameDecl *Typedef;
+ if (IsTypeAlias)
+ Typedef = TypeAliasDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
+ D->getLocation(), D->getIdentifier(), DI);
+ else
+ Typedef = TypedefDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
+ D->getLocation(), D->getIdentifier(), DI);
+ if (Invalid)
+ Typedef->setInvalidDecl();
+
+ // If the old typedef was the name for linkage purposes of an anonymous
+ // tag decl, re-establish that relationship for the new typedef.
+ if (const TagType *oldTagType = D->getUnderlyingType()->getAs<TagType>()) {
+ TagDecl *oldTag = oldTagType->getDecl();
+ if (oldTag->getTypedefNameForAnonDecl() == D) {
+ TagDecl *newTag = DI->getType()->castAs<TagType>()->getDecl();
+ assert(!newTag->getIdentifier() && !newTag->getTypedefNameForAnonDecl());
+ newTag->setTypedefNameForAnonDecl(Typedef);
+ }
+ }
+
+ if (TypedefNameDecl *Prev = D->getPreviousDecl()) {
+ NamedDecl *InstPrev = SemaRef.FindInstantiatedDecl(D->getLocation(), Prev,
+ TemplateArgs);
+ if (!InstPrev)
+ return 0;
+
+ TypedefNameDecl *InstPrevTypedef = cast<TypedefNameDecl>(InstPrev);
+
+ // If the typedef types are not identical, reject them.
+ SemaRef.isIncompatibleTypedef(InstPrevTypedef, Typedef);
+
+ Typedef->setPreviousDeclaration(InstPrevTypedef);
+ }
+
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Typedef);
+
+ Typedef->setAccess(D->getAccess());
+
+ return Typedef;
+}
+
+Decl *TemplateDeclInstantiator::VisitTypedefDecl(TypedefDecl *D) {
+ Decl *Typedef = InstantiateTypedefNameDecl(D, /*IsTypeAlias=*/false);
+ Owner->addDecl(Typedef);
+ return Typedef;
+}
+
+Decl *TemplateDeclInstantiator::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ Decl *Typedef = InstantiateTypedefNameDecl(D, /*IsTypeAlias=*/true);
+ Owner->addDecl(Typedef);
+ return Typedef;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ // Create a local instantiation scope for this type alias template, which
+ // will contain the instantiations of the template parameters.
+ LocalInstantiationScope Scope(SemaRef);
+
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return 0;
+
+ TypeAliasDecl *Pattern = D->getTemplatedDecl();
+
+ TypeAliasTemplateDecl *PrevAliasTemplate = 0;
+ if (Pattern->getPreviousDecl()) {
+ DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
+ if (Found.first != Found.second) {
+ PrevAliasTemplate = dyn_cast<TypeAliasTemplateDecl>(*Found.first);
+ }
+ }
+
+ TypeAliasDecl *AliasInst = cast_or_null<TypeAliasDecl>(
+ InstantiateTypedefNameDecl(Pattern, /*IsTypeAlias=*/true));
+ if (!AliasInst)
+ return 0;
+
+ TypeAliasTemplateDecl *Inst
+ = TypeAliasTemplateDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getDeclName(), InstParams, AliasInst);
+ if (PrevAliasTemplate)
+ Inst->setPreviousDeclaration(PrevAliasTemplate);
+
+ Inst->setAccess(D->getAccess());
+
+ if (!PrevAliasTemplate)
+ Inst->setInstantiatedFromMemberTemplate(D);
+
+ Owner->addDecl(Inst);
+
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::VisitVarDecl(VarDecl *D) {
+ // If this is the variable for an anonymous struct or union,
+ // instantiate the anonymous struct/union type first.
+ if (const RecordType *RecordTy = D->getType()->getAs<RecordType>())
+ if (RecordTy->getDecl()->isAnonymousStructOrUnion())
+ if (!VisitCXXRecordDecl(cast<CXXRecordDecl>(RecordTy->getDecl())))
+ return 0;
+
+ // Do substitution on the type of the declaration
+ TypeSourceInfo *DI = SemaRef.SubstType(D->getTypeSourceInfo(),
+ TemplateArgs,
+ D->getTypeSpecStartLoc(),
+ D->getDeclName());
+ if (!DI)
+ return 0;
+
+ if (DI->getType()->isFunctionType()) {
+ SemaRef.Diag(D->getLocation(), diag::err_variable_instantiates_to_function)
+ << D->isStaticDataMember() << DI->getType();
+ return 0;
+ }
+
+ // Build the instantiated declaration
+ VarDecl *Var = VarDecl::Create(SemaRef.Context, Owner,
+ D->getInnerLocStart(),
+ D->getLocation(), D->getIdentifier(),
+ DI->getType(), DI,
+ D->getStorageClass(),
+ D->getStorageClassAsWritten());
+ Var->setThreadSpecified(D->isThreadSpecified());
+ Var->setInitStyle(D->getInitStyle());
+ Var->setCXXForRangeDecl(D->isCXXForRangeDecl());
+ Var->setConstexpr(D->isConstexpr());
+
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(D, Var))
+ return 0;
+
+ // If we are instantiating a static data member defined
+ // out-of-line, the instantiation will have the same lexical
+ // context (which will be a namespace scope) as the template.
+ if (D->isOutOfLine())
+ Var->setLexicalDeclContext(D->getLexicalDeclContext());
+
+ Var->setAccess(D->getAccess());
+
+ if (!D->isStaticDataMember()) {
+ Var->setUsed(D->isUsed(false));
+ Var->setReferenced(D->isReferenced());
+ }
+
+ // FIXME: In theory, we could have a previous declaration for variables that
+ // are not static data members.
+ // FIXME: having to fake up a LookupResult is dumb.
+ LookupResult Previous(SemaRef, Var->getDeclName(), Var->getLocation(),
+ Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+ if (D->isStaticDataMember())
+ SemaRef.LookupQualifiedName(Previous, Owner, false);
+
+ // In ARC, infer 'retaining' for variables of retainable type.
+ if (SemaRef.getLangOpts().ObjCAutoRefCount &&
+ SemaRef.inferObjCARCLifetime(Var))
+ Var->setInvalidDecl();
+
+ SemaRef.CheckVariableDeclaration(Var, Previous);
+
+ if (D->isOutOfLine()) {
+ D->getLexicalDeclContext()->addDecl(Var);
+ Owner->makeDeclVisibleInContext(Var);
+ } else {
+ Owner->addDecl(Var);
+ if (Owner->isFunctionOrMethod())
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Var);
+ }
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Var, LateAttrs, StartingScope);
+
+ // Link instantiations of static data members back to the template from
+ // which they were instantiated.
+ if (Var->isStaticDataMember())
+ SemaRef.Context.setInstantiatedFromStaticDataMember(Var, D,
+ TSK_ImplicitInstantiation);
+
+ if (Var->getAnyInitializer()) {
+ // We already have an initializer in the class.
+ } else if (D->getInit()) {
+ if (Var->isStaticDataMember() && !D->isOutOfLine())
+ SemaRef.PushExpressionEvaluationContext(Sema::ConstantEvaluated);
+ else
+ SemaRef.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+
+ // Instantiate the initializer.
+ ExprResult Init = SemaRef.SubstInitializer(D->getInit(), TemplateArgs,
+ D->getInitStyle() == VarDecl::CallInit);
+ if (!Init.isInvalid()) {
+ bool TypeMayContainAuto = true;
+ if (Init.get()) {
+ bool DirectInit = D->isDirectInit();
+ SemaRef.AddInitializerToDecl(Var, Init.take(), DirectInit,
+ TypeMayContainAuto);
+ } else
+ SemaRef.ActOnUninitializedDecl(Var, TypeMayContainAuto);
+ } else {
+ // FIXME: Not too happy about invalidating the declaration
+ // because of a bogus initializer.
+ Var->setInvalidDecl();
+ }
+
+ SemaRef.PopExpressionEvaluationContext();
+ } else if ((!Var->isStaticDataMember() || Var->isOutOfLine()) &&
+ !Var->isCXXForRangeDecl())
+ SemaRef.ActOnUninitializedDecl(Var, false);
+
+ // Diagnose unused local variables with dependent types, where the diagnostic
+ // will have been deferred.
+ if (!Var->isInvalidDecl() && Owner->isFunctionOrMethod() && !Var->isUsed() &&
+ D->getType()->isDependentType())
+ SemaRef.DiagnoseUnusedDecl(Var);
+
+ return Var;
+}
+
+Decl *TemplateDeclInstantiator::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ AccessSpecDecl* AD
+ = AccessSpecDecl::Create(SemaRef.Context, D->getAccess(), Owner,
+ D->getAccessSpecifierLoc(), D->getColonLoc());
+ Owner->addHiddenDecl(AD);
+ return AD;
+}
+
+Decl *TemplateDeclInstantiator::VisitFieldDecl(FieldDecl *D) {
+ bool Invalid = false;
+ TypeSourceInfo *DI = D->getTypeSourceInfo();
+ if (DI->getType()->isInstantiationDependentType() ||
+ DI->getType()->isVariablyModifiedType()) {
+ DI = SemaRef.SubstType(DI, TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!DI) {
+ DI = D->getTypeSourceInfo();
+ Invalid = true;
+ } else if (DI->getType()->isFunctionType()) {
+ // C++ [temp.arg.type]p3:
+ // If a declaration acquires a function type through a type
+ // dependent on a template-parameter and this causes a
+ // declaration that does not use the syntactic form of a
+ // function declarator to have function type, the program is
+ // ill-formed.
+ SemaRef.Diag(D->getLocation(), diag::err_field_instantiates_to_function)
+ << DI->getType();
+ Invalid = true;
+ }
+ } else {
+ SemaRef.MarkDeclarationsReferencedInType(D->getLocation(), DI->getType());
+ }
+
+ Expr *BitWidth = D->getBitWidth();
+ if (Invalid)
+ BitWidth = 0;
+ else if (BitWidth) {
+ // The bit-width expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ ExprResult InstantiatedBitWidth
+ = SemaRef.SubstExpr(BitWidth, TemplateArgs);
+ if (InstantiatedBitWidth.isInvalid()) {
+ Invalid = true;
+ BitWidth = 0;
+ } else
+ BitWidth = InstantiatedBitWidth.takeAs<Expr>();
+ }
+
+ FieldDecl *Field = SemaRef.CheckFieldDecl(D->getDeclName(),
+ DI->getType(), DI,
+ cast<RecordDecl>(Owner),
+ D->getLocation(),
+ D->isMutable(),
+ BitWidth,
+ D->hasInClassInitializer(),
+ D->getTypeSpecStartLoc(),
+ D->getAccess(),
+ 0);
+ if (!Field) {
+ cast<Decl>(Owner)->setInvalidDecl();
+ return 0;
+ }
+
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Field, LateAttrs, StartingScope);
+
+ if (Invalid)
+ Field->setInvalidDecl();
+
+ if (!Field->getDeclName()) {
+ // Keep track of where this decl came from.
+ SemaRef.Context.setInstantiatedFromUnnamedFieldDecl(Field, D);
+ }
+ if (CXXRecordDecl *Parent= dyn_cast<CXXRecordDecl>(Field->getDeclContext())) {
+ if (Parent->isAnonymousStructOrUnion() &&
+ Parent->getRedeclContext()->isFunctionOrMethod())
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Field);
+ }
+
+ Field->setImplicit(D->isImplicit());
+ Field->setAccess(D->getAccess());
+ Owner->addDecl(Field);
+
+ return Field;
+}
+
+Decl *TemplateDeclInstantiator::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ NamedDecl **NamedChain =
+ new (SemaRef.Context)NamedDecl*[D->getChainingSize()];
+
+ int i = 0;
+ for (IndirectFieldDecl::chain_iterator PI =
+ D->chain_begin(), PE = D->chain_end();
+ PI != PE; ++PI) {
+ NamedDecl *Next = SemaRef.FindInstantiatedDecl(D->getLocation(), *PI,
+ TemplateArgs);
+ if (!Next)
+ return 0;
+
+ NamedChain[i++] = Next;
+ }
+
+ QualType T = cast<FieldDecl>(NamedChain[i-1])->getType();
+ IndirectFieldDecl* IndirectField
+ = IndirectFieldDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getIdentifier(), T,
+ NamedChain, D->getChainingSize());
+
+
+ IndirectField->setImplicit(D->isImplicit());
+ IndirectField->setAccess(D->getAccess());
+ Owner->addDecl(IndirectField);
+ return IndirectField;
+}
+
+Decl *TemplateDeclInstantiator::VisitFriendDecl(FriendDecl *D) {
+ // Handle friend type expressions by simply substituting template
+ // parameters into the pattern type and checking the result.
+ if (TypeSourceInfo *Ty = D->getFriendType()) {
+ TypeSourceInfo *InstTy;
+ // If this is an unsupported friend, don't bother substituting template
+ // arguments into it. The actual type referred to won't be used by any
+ // parts of Clang, and may not be valid for instantiating. Just use the
+ // same info for the instantiated friend.
+ if (D->isUnsupportedFriend()) {
+ InstTy = Ty;
+ } else {
+ InstTy = SemaRef.SubstType(Ty, TemplateArgs,
+ D->getLocation(), DeclarationName());
+ }
+ if (!InstTy)
+ return 0;
+
+ FriendDecl *FD = SemaRef.CheckFriendTypeDecl(D->getLocation(),
+ D->getFriendLoc(), InstTy);
+ if (!FD)
+ return 0;
+
+ FD->setAccess(AS_public);
+ FD->setUnsupportedFriend(D->isUnsupportedFriend());
+ Owner->addDecl(FD);
+ return FD;
+ }
+
+ NamedDecl *ND = D->getFriendDecl();
+ assert(ND && "friend decl must be a decl or a type!");
+
+ // All of the Visit implementations for the various potential friend
+ // declarations have to be carefully written to work for friend
+ // objects, with the most important detail being that the target
+ // decl should almost certainly not be placed in Owner.
+ Decl *NewND = Visit(ND);
+ if (!NewND) return 0;
+
+ FriendDecl *FD =
+ FriendDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ cast<NamedDecl>(NewND), D->getFriendLoc());
+ FD->setAccess(AS_public);
+ FD->setUnsupportedFriend(D->isUnsupportedFriend());
+ Owner->addDecl(FD);
+ return FD;
+}
+
+Decl *TemplateDeclInstantiator::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ Expr *AssertExpr = D->getAssertExpr();
+
+ // The expression in a static assertion is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ ExprResult InstantiatedAssertExpr
+ = SemaRef.SubstExpr(AssertExpr, TemplateArgs);
+ if (InstantiatedAssertExpr.isInvalid())
+ return 0;
+
+ ExprResult Message(D->getMessage());
+ D->getMessage();
+ return SemaRef.ActOnStaticAssertDeclaration(D->getLocation(),
+ InstantiatedAssertExpr.get(),
+ Message.get(),
+ D->getRParenLoc());
+}
+
+Decl *TemplateDeclInstantiator::VisitEnumDecl(EnumDecl *D) {
+ EnumDecl *PrevDecl = 0;
+ if (D->getPreviousDecl()) {
+ NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
+ D->getPreviousDecl(),
+ TemplateArgs);
+ if (!Prev) return 0;
+ PrevDecl = cast<EnumDecl>(Prev);
+ }
+
+ EnumDecl *Enum = EnumDecl::Create(SemaRef.Context, Owner, D->getLocStart(),
+ D->getLocation(), D->getIdentifier(),
+ PrevDecl, D->isScoped(),
+ D->isScopedUsingClassTag(), D->isFixed());
+ if (D->isFixed()) {
+ if (TypeSourceInfo *TI = D->getIntegerTypeSourceInfo()) {
+ // If we have type source information for the underlying type, it means it
+ // has been explicitly set by the user. Perform substitution on it before
+ // moving on.
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ TypeSourceInfo *NewTI = SemaRef.SubstType(TI, TemplateArgs, UnderlyingLoc,
+ DeclarationName());
+ if (!NewTI || SemaRef.CheckEnumUnderlyingType(NewTI))
+ Enum->setIntegerType(SemaRef.Context.IntTy);
+ else
+ Enum->setIntegerTypeSourceInfo(NewTI);
+ } else {
+ assert(!D->getIntegerType()->isDependentType()
+ && "Dependent type without type source info");
+ Enum->setIntegerType(D->getIntegerType());
+ }
+ }
+
+ SemaRef.InstantiateAttrs(TemplateArgs, D, Enum);
+
+ Enum->setInstantiationOfMemberEnum(D, TSK_ImplicitInstantiation);
+ Enum->setAccess(D->getAccess());
+ if (SubstQualifier(D, Enum)) return 0;
+ Owner->addDecl(Enum);
+
+ EnumDecl *Def = D->getDefinition();
+ if (Def && Def != D) {
+ // If this is an out-of-line definition of an enum member template, check
+ // that the underlying types match in the instantiation of both
+ // declarations.
+ if (TypeSourceInfo *TI = Def->getIntegerTypeSourceInfo()) {
+ SourceLocation UnderlyingLoc = TI->getTypeLoc().getBeginLoc();
+ QualType DefnUnderlying =
+ SemaRef.SubstType(TI->getType(), TemplateArgs,
+ UnderlyingLoc, DeclarationName());
+ SemaRef.CheckEnumRedeclaration(Def->getLocation(), Def->isScoped(),
+ DefnUnderlying, Enum);
+ }
+ }
+
+ if (D->getDeclContext()->isFunctionOrMethod())
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Enum);
+
+ // C++11 [temp.inst]p1: The implicit instantiation of a class template
+ // specialization causes the implicit instantiation of the declarations, but
+ // not the definitions of scoped member enumerations.
+ // FIXME: There appears to be no wording for what happens for an enum defined
+ // within a block scope, but we treat that much like a member template. Only
+ // instantiate the definition when visiting the definition in that case, since
+ // we will visit all redeclarations.
+ if (!Enum->isScoped() && Def &&
+ (!D->getDeclContext()->isFunctionOrMethod() || D->isCompleteDefinition()))
+ InstantiateEnumDefinition(Enum, Def);
+
+ return Enum;
+}
+
+void TemplateDeclInstantiator::InstantiateEnumDefinition(
+ EnumDecl *Enum, EnumDecl *Pattern) {
+ Enum->startDefinition();
+
+ // Update the location to refer to the definition.
+ Enum->setLocation(Pattern->getLocation());
+
+ SmallVector<Decl*, 4> Enumerators;
+
+ EnumConstantDecl *LastEnumConst = 0;
+ for (EnumDecl::enumerator_iterator EC = Pattern->enumerator_begin(),
+ ECEnd = Pattern->enumerator_end();
+ EC != ECEnd; ++EC) {
+ // The specified value for the enumerator.
+ ExprResult Value = SemaRef.Owned((Expr *)0);
+ if (Expr *UninstValue = EC->getInitExpr()) {
+ // The enumerator's value expression is a constant expression.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ Value = SemaRef.SubstExpr(UninstValue, TemplateArgs);
+ }
+
+ // Drop the initial value and continue.
+ bool isInvalid = false;
+ if (Value.isInvalid()) {
+ Value = SemaRef.Owned((Expr *)0);
+ isInvalid = true;
+ }
+
+ EnumConstantDecl *EnumConst
+ = SemaRef.CheckEnumConstant(Enum, LastEnumConst,
+ EC->getLocation(), EC->getIdentifier(),
+ Value.get());
+
+ if (isInvalid) {
+ if (EnumConst)
+ EnumConst->setInvalidDecl();
+ Enum->setInvalidDecl();
+ }
+
+ if (EnumConst) {
+ SemaRef.InstantiateAttrs(TemplateArgs, *EC, EnumConst);
+
+ EnumConst->setAccess(Enum->getAccess());
+ Enum->addDecl(EnumConst);
+ Enumerators.push_back(EnumConst);
+ LastEnumConst = EnumConst;
+
+ if (Pattern->getDeclContext()->isFunctionOrMethod() &&
+ !Enum->isScoped()) {
+ // If the enumeration is within a function or method, record the enum
+ // constant as a local.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(*EC, EnumConst);
+ }
+ }
+ }
+
+ // FIXME: Fixup LBraceLoc
+ SemaRef.ActOnEnumBody(Enum->getLocation(), SourceLocation(),
+ Enum->getRBraceLoc(), Enum,
+ Enumerators.data(), Enumerators.size(),
+ 0, 0);
+}
+
+Decl *TemplateDeclInstantiator::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ llvm_unreachable("EnumConstantDecls can only occur within EnumDecls.");
+}
+
+Decl *TemplateDeclInstantiator::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ bool isFriend = (D->getFriendObjectKind() != Decl::FOK_None);
+
+ // Create a local instantiation scope for this class template, which
+ // will contain the instantiations of the template parameters.
+ LocalInstantiationScope Scope(SemaRef);
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return NULL;
+
+ CXXRecordDecl *Pattern = D->getTemplatedDecl();
+
+ // Instantiate the qualifier. We have to do this first in case
+ // we're a friend declaration, because if we are then we need to put
+ // the new declaration in the appropriate context.
+ NestedNameSpecifierLoc QualifierLoc = Pattern->getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
+ TemplateArgs);
+ if (!QualifierLoc)
+ return 0;
+ }
+
+ CXXRecordDecl *PrevDecl = 0;
+ ClassTemplateDecl *PrevClassTemplate = 0;
+
+ if (!isFriend && Pattern->getPreviousDecl()) {
+ DeclContext::lookup_result Found = Owner->lookup(Pattern->getDeclName());
+ if (Found.first != Found.second) {
+ PrevClassTemplate = dyn_cast<ClassTemplateDecl>(*Found.first);
+ if (PrevClassTemplate)
+ PrevDecl = PrevClassTemplate->getTemplatedDecl();
+ }
+ }
+
+ // If this isn't a friend, then it's a member template, in which
+ // case we just want to build the instantiation in the
+ // specialization. If it is a friend, we want to build it in
+ // the appropriate context.
+ DeclContext *DC = Owner;
+ if (isFriend) {
+ if (QualifierLoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ DC = SemaRef.computeDeclContext(SS);
+ if (!DC) return 0;
+ } else {
+ DC = SemaRef.FindInstantiatedContext(Pattern->getLocation(),
+ Pattern->getDeclContext(),
+ TemplateArgs);
+ }
+
+ // Look for a previous declaration of the template in the owning
+ // context.
+ LookupResult R(SemaRef, Pattern->getDeclName(), Pattern->getLocation(),
+ Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+ SemaRef.LookupQualifiedName(R, DC);
+
+ if (R.isSingleResult()) {
+ PrevClassTemplate = R.getAsSingle<ClassTemplateDecl>();
+ if (PrevClassTemplate)
+ PrevDecl = PrevClassTemplate->getTemplatedDecl();
+ }
+
+ if (!PrevClassTemplate && QualifierLoc) {
+ SemaRef.Diag(Pattern->getLocation(), diag::err_not_tag_in_scope)
+ << D->getTemplatedDecl()->getTagKind() << Pattern->getDeclName() << DC
+ << QualifierLoc.getSourceRange();
+ return 0;
+ }
+
+ bool AdoptedPreviousTemplateParams = false;
+ if (PrevClassTemplate) {
+ bool Complain = true;
+
+ // HACK: libstdc++ 4.2.1 contains an ill-formed friend class
+ // template for struct std::tr1::__detail::_Map_base, where the
+ // template parameters of the friend declaration don't match the
+ // template parameters of the original declaration. In this one
+ // case, we don't complain about the ill-formed friend
+ // declaration.
+ if (isFriend && Pattern->getIdentifier() &&
+ Pattern->getIdentifier()->isStr("_Map_base") &&
+ DC->isNamespace() &&
+ cast<NamespaceDecl>(DC)->getIdentifier() &&
+ cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__detail")) {
+ DeclContext *DCParent = DC->getParent();
+ if (DCParent->isNamespace() &&
+ cast<NamespaceDecl>(DCParent)->getIdentifier() &&
+ cast<NamespaceDecl>(DCParent)->getIdentifier()->isStr("tr1")) {
+ DeclContext *DCParent2 = DCParent->getParent();
+ if (DCParent2->isNamespace() &&
+ cast<NamespaceDecl>(DCParent2)->getIdentifier() &&
+ cast<NamespaceDecl>(DCParent2)->getIdentifier()->isStr("std") &&
+ DCParent2->getParent()->isTranslationUnit())
+ Complain = false;
+ }
+ }
+
+ TemplateParameterList *PrevParams
+ = PrevClassTemplate->getTemplateParameters();
+
+ // Make sure the parameter lists match.
+ if (!SemaRef.TemplateParameterListsAreEqual(InstParams, PrevParams,
+ Complain,
+ Sema::TPL_TemplateMatch)) {
+ if (Complain)
+ return 0;
+
+ AdoptedPreviousTemplateParams = true;
+ InstParams = PrevParams;
+ }
+
+ // Do some additional validation, then merge default arguments
+ // from the existing declarations.
+ if (!AdoptedPreviousTemplateParams &&
+ SemaRef.CheckTemplateParameterList(InstParams, PrevParams,
+ Sema::TPC_ClassTemplate))
+ return 0;
+ }
+ }
+
+ CXXRecordDecl *RecordInst
+ = CXXRecordDecl::Create(SemaRef.Context, Pattern->getTagKind(), DC,
+ Pattern->getLocStart(), Pattern->getLocation(),
+ Pattern->getIdentifier(), PrevDecl,
+ /*DelayTypeCreation=*/true);
+
+ if (QualifierLoc)
+ RecordInst->setQualifierInfo(QualifierLoc);
+
+ ClassTemplateDecl *Inst
+ = ClassTemplateDecl::Create(SemaRef.Context, DC, D->getLocation(),
+ D->getIdentifier(), InstParams, RecordInst,
+ PrevClassTemplate);
+ RecordInst->setDescribedClassTemplate(Inst);
+
+ if (isFriend) {
+ if (PrevClassTemplate)
+ Inst->setAccess(PrevClassTemplate->getAccess());
+ else
+ Inst->setAccess(D->getAccess());
+
+ Inst->setObjectOfFriendDecl(PrevClassTemplate != 0);
+ // TODO: do we want to track the instantiation progeny of this
+ // friend target decl?
+ } else {
+ Inst->setAccess(D->getAccess());
+ if (!PrevClassTemplate)
+ Inst->setInstantiatedFromMemberTemplate(D);
+ }
+
+ // Trigger creation of the type for the instantiation.
+ SemaRef.Context.getInjectedClassNameType(RecordInst,
+ Inst->getInjectedClassNameSpecialization());
+
+ // Finish handling of friends.
+ if (isFriend) {
+ DC->makeDeclVisibleInContext(Inst);
+ Inst->setLexicalDeclContext(Owner);
+ RecordInst->setLexicalDeclContext(Owner);
+ return Inst;
+ }
+
+ if (D->isOutOfLine()) {
+ Inst->setLexicalDeclContext(D->getLexicalDeclContext());
+ RecordInst->setLexicalDeclContext(D->getLexicalDeclContext());
+ }
+
+ Owner->addDecl(Inst);
+
+ if (!PrevClassTemplate) {
+ // Queue up any out-of-line partial specializations of this member
+ // class template; the client will force their instantiation once
+ // the enclosing class has been instantiated.
+ SmallVector<ClassTemplatePartialSpecializationDecl *, 4> PartialSpecs;
+ D->getPartialSpecializations(PartialSpecs);
+ for (unsigned I = 0, N = PartialSpecs.size(); I != N; ++I)
+ if (PartialSpecs[I]->isOutOfLine())
+ OutOfLinePartialSpecs.push_back(std::make_pair(Inst, PartialSpecs[I]));
+ }
+
+ return Inst;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D) {
+ ClassTemplateDecl *ClassTemplate = D->getSpecializedTemplate();
+
+ // Lookup the already-instantiated declaration in the instantiation
+ // of the class template and return that.
+ DeclContext::lookup_result Found
+ = Owner->lookup(ClassTemplate->getDeclName());
+ if (Found.first == Found.second)
+ return 0;
+
+ ClassTemplateDecl *InstClassTemplate
+ = dyn_cast<ClassTemplateDecl>(*Found.first);
+ if (!InstClassTemplate)
+ return 0;
+
+ if (ClassTemplatePartialSpecializationDecl *Result
+ = InstClassTemplate->findPartialSpecInstantiatedFromMember(D))
+ return Result;
+
+ return InstantiateClassTemplatePartialSpecialization(InstClassTemplate, D);
+}
+
+Decl *
+TemplateDeclInstantiator::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ // Create a local instantiation scope for this function template, which
+ // will contain the instantiations of the template parameters and then get
+ // merged with the local instantiation scope for the function template
+ // itself.
+ LocalInstantiationScope Scope(SemaRef);
+
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return NULL;
+
+ FunctionDecl *Instantiated = 0;
+ if (CXXMethodDecl *DMethod = dyn_cast<CXXMethodDecl>(D->getTemplatedDecl()))
+ Instantiated = cast_or_null<FunctionDecl>(VisitCXXMethodDecl(DMethod,
+ InstParams));
+ else
+ Instantiated = cast_or_null<FunctionDecl>(VisitFunctionDecl(
+ D->getTemplatedDecl(),
+ InstParams));
+
+ if (!Instantiated)
+ return 0;
+
+ Instantiated->setAccess(D->getAccess());
+
+ // Link the instantiated function template declaration to the function
+ // template from which it was instantiated.
+ FunctionTemplateDecl *InstTemplate
+ = Instantiated->getDescribedFunctionTemplate();
+ InstTemplate->setAccess(D->getAccess());
+ assert(InstTemplate &&
+ "VisitFunctionDecl/CXXMethodDecl didn't create a template!");
+
+ bool isFriend = (InstTemplate->getFriendObjectKind() != Decl::FOK_None);
+
+ // Link the instantiation back to the pattern *unless* this is a
+ // non-definition friend declaration.
+ if (!InstTemplate->getInstantiatedFromMemberTemplate() &&
+ !(isFriend && !D->getTemplatedDecl()->isThisDeclarationADefinition()))
+ InstTemplate->setInstantiatedFromMemberTemplate(D);
+
+ // Make declarations visible in the appropriate context.
+ if (!isFriend)
+ Owner->addDecl(InstTemplate);
+
+ return InstTemplate;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ CXXRecordDecl *PrevDecl = 0;
+ if (D->isInjectedClassName())
+ PrevDecl = cast<CXXRecordDecl>(Owner);
+ else if (D->getPreviousDecl()) {
+ NamedDecl *Prev = SemaRef.FindInstantiatedDecl(D->getLocation(),
+ D->getPreviousDecl(),
+ TemplateArgs);
+ if (!Prev) return 0;
+ PrevDecl = cast<CXXRecordDecl>(Prev);
+ }
+
+ CXXRecordDecl *Record
+ = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
+ D->getLocStart(), D->getLocation(),
+ D->getIdentifier(), PrevDecl);
+
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(D, Record))
+ return 0;
+
+ Record->setImplicit(D->isImplicit());
+ // FIXME: Check against AS_none is an ugly hack to work around the issue that
+ // the tag decls introduced by friend class declarations don't have an access
+ // specifier. Remove once this area of the code gets sorted out.
+ if (D->getAccess() != AS_none)
+ Record->setAccess(D->getAccess());
+ if (!D->isInjectedClassName())
+ Record->setInstantiationOfMemberClass(D, TSK_ImplicitInstantiation);
+
+ // If the original function was part of a friend declaration,
+ // inherit its namespace state.
+ if (Decl::FriendObjectKind FOK = D->getFriendObjectKind())
+ Record->setObjectOfFriendDecl(FOK == Decl::FOK_Declared);
+
+ // Make sure that anonymous structs and unions are recorded.
+ if (D->isAnonymousStructOrUnion()) {
+ Record->setAnonymousStructOrUnion(true);
+ if (Record->getDeclContext()->getRedeclContext()->isFunctionOrMethod())
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Record);
+ }
+
+ Owner->addDecl(Record);
+ return Record;
+}
+
+/// Normal class members are of more specific types and therefore
+/// don't make it here. This function serves two purposes:
+/// 1) instantiating function templates
+/// 2) substituting friend declarations
+/// FIXME: preserve function definitions in case #2
+Decl *TemplateDeclInstantiator::VisitFunctionDecl(FunctionDecl *D,
+ TemplateParameterList *TemplateParams) {
+ // Check whether there is already a function template specialization for
+ // this declaration.
+ FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate();
+ if (FunctionTemplate && !TemplateParams) {
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = TemplateArgs.getInnermost();
+
+ void *InsertPos = 0;
+ FunctionDecl *SpecFunc
+ = FunctionTemplate->findSpecialization(Innermost.first, Innermost.second,
+ InsertPos);
+
+ // If we already have a function template specialization, return it.
+ if (SpecFunc)
+ return SpecFunc;
+ }
+
+ bool isFriend;
+ if (FunctionTemplate)
+ isFriend = (FunctionTemplate->getFriendObjectKind() != Decl::FOK_None);
+ else
+ isFriend = (D->getFriendObjectKind() != Decl::FOK_None);
+
+ bool MergeWithParentScope = (TemplateParams != 0) ||
+ Owner->isFunctionOrMethod() ||
+ !(isa<Decl>(Owner) &&
+ cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod());
+ LocalInstantiationScope Scope(SemaRef, MergeWithParentScope);
+
+ SmallVector<ParmVarDecl *, 4> Params;
+ TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
+ if (!TInfo)
+ return 0;
+ QualType T = TInfo->getType();
+
+ NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
+ TemplateArgs);
+ if (!QualifierLoc)
+ return 0;
+ }
+
+ // If we're instantiating a local function declaration, put the result
+ // in the owner; otherwise we need to find the instantiated context.
+ DeclContext *DC;
+ if (D->getDeclContext()->isFunctionOrMethod())
+ DC = Owner;
+ else if (isFriend && QualifierLoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ DC = SemaRef.computeDeclContext(SS);
+ if (!DC) return 0;
+ } else {
+ DC = SemaRef.FindInstantiatedContext(D->getLocation(), D->getDeclContext(),
+ TemplateArgs);
+ }
+
+ FunctionDecl *Function =
+ FunctionDecl::Create(SemaRef.Context, DC, D->getInnerLocStart(),
+ D->getLocation(), D->getDeclName(), T, TInfo,
+ D->getStorageClass(), D->getStorageClassAsWritten(),
+ D->isInlineSpecified(), D->hasWrittenPrototype(),
+ D->isConstexpr());
+
+ if (QualifierLoc)
+ Function->setQualifierInfo(QualifierLoc);
+
+ DeclContext *LexicalDC = Owner;
+ if (!isFriend && D->isOutOfLine()) {
+ assert(D->getDeclContext()->isFileContext());
+ LexicalDC = D->getDeclContext();
+ }
+
+ Function->setLexicalDeclContext(LexicalDC);
+
+ // Attach the parameters
+ if (isa<FunctionProtoType>(Function->getType().IgnoreParens())) {
+ // Adopt the already-instantiated parameters into our own context.
+ for (unsigned P = 0; P < Params.size(); ++P)
+ if (Params[P])
+ Params[P]->setOwningFunction(Function);
+ } else {
+ // Since we were instantiated via a typedef of a function type, create
+ // new parameters.
+ const FunctionProtoType *Proto
+ = Function->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "No function prototype in template instantiation?");
+ for (FunctionProtoType::arg_type_iterator AI = Proto->arg_type_begin(),
+ AE = Proto->arg_type_end(); AI != AE; ++AI) {
+ ParmVarDecl *Param
+ = SemaRef.BuildParmVarDeclForTypedef(Function, Function->getLocation(),
+ *AI);
+ Param->setScopeInfo(0, Params.size());
+ Params.push_back(Param);
+ }
+ }
+ Function->setParams(Params);
+
+ SourceLocation InstantiateAtPOI;
+ if (TemplateParams) {
+ // Our resulting instantiation is actually a function template, since we
+ // are substituting only the outer template parameters. For example, given
+ //
+ // template<typename T>
+ // struct X {
+ // template<typename U> friend void f(T, U);
+ // };
+ //
+ // X<int> x;
+ //
+ // We are instantiating the friend function template "f" within X<int>,
+ // which means substituting int for T, but leaving "f" as a friend function
+ // template.
+ // Build the function template itself.
+ FunctionTemplate = FunctionTemplateDecl::Create(SemaRef.Context, DC,
+ Function->getLocation(),
+ Function->getDeclName(),
+ TemplateParams, Function);
+ Function->setDescribedFunctionTemplate(FunctionTemplate);
+
+ FunctionTemplate->setLexicalDeclContext(LexicalDC);
+
+ if (isFriend && D->isThisDeclarationADefinition()) {
+ // TODO: should we remember this connection regardless of whether
+ // the friend declaration provided a body?
+ FunctionTemplate->setInstantiatedFromMemberTemplate(
+ D->getDescribedFunctionTemplate());
+ }
+ } else if (FunctionTemplate) {
+ // Record this function template specialization.
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = TemplateArgs.getInnermost();
+ Function->setFunctionTemplateSpecialization(FunctionTemplate,
+ TemplateArgumentList::CreateCopy(SemaRef.Context,
+ Innermost.first,
+ Innermost.second),
+ /*InsertPos=*/0);
+ } else if (isFriend) {
+ // Note, we need this connection even if the friend doesn't have a body.
+ // Its body may exist but not have been attached yet due to deferred
+ // parsing.
+ // FIXME: It might be cleaner to set this when attaching the body to the
+ // friend function declaration, however that would require finding all the
+ // instantiations and modifying them.
+ Function->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
+ }
+
+ if (InitFunctionInstantiation(Function, D))
+ Function->setInvalidDecl();
+
+ bool isExplicitSpecialization = false;
+
+ LookupResult Previous(SemaRef, Function->getDeclName(), SourceLocation(),
+ Sema::LookupOrdinaryName, Sema::ForRedeclaration);
+
+ if (DependentFunctionTemplateSpecializationInfo *Info
+ = D->getDependentSpecializationInfo()) {
+ assert(isFriend && "non-friend has dependent specialization info?");
+
+ // This needs to be set now for future sanity.
+ Function->setObjectOfFriendDecl(/*HasPrevious*/ true);
+
+ // Instantiate the explicit template arguments.
+ TemplateArgumentListInfo ExplicitArgs(Info->getLAngleLoc(),
+ Info->getRAngleLoc());
+ if (SemaRef.Subst(Info->getTemplateArgs(), Info->getNumTemplateArgs(),
+ ExplicitArgs, TemplateArgs))
+ return 0;
+
+ // Map the candidate templates to their instantiations.
+ for (unsigned I = 0, E = Info->getNumTemplates(); I != E; ++I) {
+ Decl *Temp = SemaRef.FindInstantiatedDecl(D->getLocation(),
+ Info->getTemplate(I),
+ TemplateArgs);
+ if (!Temp) return 0;
+
+ Previous.addDecl(cast<FunctionTemplateDecl>(Temp));
+ }
+
+ if (SemaRef.CheckFunctionTemplateSpecialization(Function,
+ &ExplicitArgs,
+ Previous))
+ Function->setInvalidDecl();
+
+ isExplicitSpecialization = true;
+
+ } else if (TemplateParams || !FunctionTemplate) {
+ // Look only into the namespace where the friend would be declared to
+ // find a previous declaration. This is the innermost enclosing namespace,
+ // as described in ActOnFriendFunctionDecl.
+ SemaRef.LookupQualifiedName(Previous, DC);
+
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (Previous.isSingleTagDecl())
+ Previous.clear();
+ }
+
+ SemaRef.CheckFunctionDeclaration(/*Scope*/ 0, Function, Previous,
+ isExplicitSpecialization);
+
+ NamedDecl *PrincipalDecl = (TemplateParams
+ ? cast<NamedDecl>(FunctionTemplate)
+ : Function);
+
+ // If the original function was part of a friend declaration,
+ // inherit its namespace state and add it to the owner.
+ if (isFriend) {
+ NamedDecl *PrevDecl;
+ if (TemplateParams)
+ PrevDecl = FunctionTemplate->getPreviousDecl();
+ else
+ PrevDecl = Function->getPreviousDecl();
+
+ PrincipalDecl->setObjectOfFriendDecl(PrevDecl != 0);
+ DC->makeDeclVisibleInContext(PrincipalDecl);
+
+ bool queuedInstantiation = false;
+
+ // C++98 [temp.friend]p5: When a function is defined in a friend function
+ // declaration in a class template, the function is defined at each
+ // instantiation of the class template. The function is defined even if it
+ // is never used.
+ // C++11 [temp.friend]p4: When a function is defined in a friend function
+ // declaration in a class template, the function is instantiated when the
+ // function is odr-used.
+ //
+ // If -Wc++98-compat is enabled, we go through the motions of checking for a
+ // redefinition, but don't instantiate the function.
+ if ((!SemaRef.getLangOpts().CPlusPlus0x ||
+ SemaRef.Diags.getDiagnosticLevel(
+ diag::warn_cxx98_compat_friend_redefinition,
+ Function->getLocation())
+ != DiagnosticsEngine::Ignored) &&
+ D->isThisDeclarationADefinition()) {
+ // Check for a function body.
+ const FunctionDecl *Definition = 0;
+ if (Function->isDefined(Definition) &&
+ Definition->getTemplateSpecializationKind() == TSK_Undeclared) {
+ SemaRef.Diag(Function->getLocation(),
+ SemaRef.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_friend_redefinition :
+ diag::err_redefinition) << Function->getDeclName();
+ SemaRef.Diag(Definition->getLocation(), diag::note_previous_definition);
+ if (!SemaRef.getLangOpts().CPlusPlus0x)
+ Function->setInvalidDecl();
+ }
+ // Check for redefinitions due to other instantiations of this or
+ // a similar friend function.
+ else for (FunctionDecl::redecl_iterator R = Function->redecls_begin(),
+ REnd = Function->redecls_end();
+ R != REnd; ++R) {
+ if (*R == Function)
+ continue;
+ switch (R->getFriendObjectKind()) {
+ case Decl::FOK_None:
+ if (!SemaRef.getLangOpts().CPlusPlus0x &&
+ !queuedInstantiation && R->isUsed(false)) {
+ if (MemberSpecializationInfo *MSInfo
+ = Function->getMemberSpecializationInfo()) {
+ if (MSInfo->getPointOfInstantiation().isInvalid()) {
+ SourceLocation Loc = R->getLocation(); // FIXME
+ MSInfo->setPointOfInstantiation(Loc);
+ SemaRef.PendingLocalImplicitInstantiations.push_back(
+ std::make_pair(Function, Loc));
+ queuedInstantiation = true;
+ }
+ }
+ }
+ break;
+ default:
+ if (const FunctionDecl *RPattern
+ = R->getTemplateInstantiationPattern())
+ if (RPattern->isDefined(RPattern)) {
+ SemaRef.Diag(Function->getLocation(),
+ SemaRef.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_friend_redefinition :
+ diag::err_redefinition)
+ << Function->getDeclName();
+ SemaRef.Diag(R->getLocation(), diag::note_previous_definition);
+ if (!SemaRef.getLangOpts().CPlusPlus0x)
+ Function->setInvalidDecl();
+ break;
+ }
+ }
+ }
+ }
+ }
+
+ if (Function->isOverloadedOperator() && !DC->isRecord() &&
+ PrincipalDecl->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ PrincipalDecl->setNonMemberOperator();
+
+ assert(!D->isDefaulted() && "only methods should be defaulted");
+ return Function;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitCXXMethodDecl(CXXMethodDecl *D,
+ TemplateParameterList *TemplateParams,
+ bool IsClassScopeSpecialization) {
+ FunctionTemplateDecl *FunctionTemplate = D->getDescribedFunctionTemplate();
+ if (FunctionTemplate && !TemplateParams) {
+ // We are creating a function template specialization from a function
+ // template. Check whether there is already a function template
+ // specialization for this particular set of template arguments.
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = TemplateArgs.getInnermost();
+
+ void *InsertPos = 0;
+ FunctionDecl *SpecFunc
+ = FunctionTemplate->findSpecialization(Innermost.first, Innermost.second,
+ InsertPos);
+
+ // If we already have a function template specialization, return it.
+ if (SpecFunc)
+ return SpecFunc;
+ }
+
+ bool isFriend;
+ if (FunctionTemplate)
+ isFriend = (FunctionTemplate->getFriendObjectKind() != Decl::FOK_None);
+ else
+ isFriend = (D->getFriendObjectKind() != Decl::FOK_None);
+
+ bool MergeWithParentScope = (TemplateParams != 0) ||
+ !(isa<Decl>(Owner) &&
+ cast<Decl>(Owner)->isDefinedOutsideFunctionOrMethod());
+ LocalInstantiationScope Scope(SemaRef, MergeWithParentScope);
+
+ // Instantiate enclosing template arguments for friends.
+ SmallVector<TemplateParameterList *, 4> TempParamLists;
+ unsigned NumTempParamLists = 0;
+ if (isFriend && (NumTempParamLists = D->getNumTemplateParameterLists())) {
+ TempParamLists.set_size(NumTempParamLists);
+ for (unsigned I = 0; I != NumTempParamLists; ++I) {
+ TemplateParameterList *TempParams = D->getTemplateParameterList(I);
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return NULL;
+ TempParamLists[I] = InstParams;
+ }
+ }
+
+ SmallVector<ParmVarDecl *, 4> Params;
+ TypeSourceInfo *TInfo = SubstFunctionType(D, Params);
+ if (!TInfo)
+ return 0;
+ QualType T = TInfo->getType();
+
+ // \brief If the type of this function, after ignoring parentheses,
+ // is not *directly* a function type, then we're instantiating a function
+ // that was declared via a typedef, e.g.,
+ //
+ // typedef int functype(int, int);
+ // functype func;
+ //
+ // In this case, we'll just go instantiate the ParmVarDecls that we
+ // synthesized in the method declaration.
+ if (!isa<FunctionProtoType>(T.IgnoreParens())) {
+ assert(!Params.size() && "Instantiating type could not yield parameters");
+ SmallVector<QualType, 4> ParamTypes;
+ if (SemaRef.SubstParmTypes(D->getLocation(), D->param_begin(),
+ D->getNumParams(), TemplateArgs, ParamTypes,
+ &Params))
+ return 0;
+ }
+
+ NestedNameSpecifierLoc QualifierLoc = D->getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = SemaRef.SubstNestedNameSpecifierLoc(QualifierLoc,
+ TemplateArgs);
+ if (!QualifierLoc)
+ return 0;
+ }
+
+ DeclContext *DC = Owner;
+ if (isFriend) {
+ if (QualifierLoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ DC = SemaRef.computeDeclContext(SS);
+
+ if (DC && SemaRef.RequireCompleteDeclContext(SS, DC))
+ return 0;
+ } else {
+ DC = SemaRef.FindInstantiatedContext(D->getLocation(),
+ D->getDeclContext(),
+ TemplateArgs);
+ }
+ if (!DC) return 0;
+ }
+
+ // Build the instantiated method declaration.
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DC);
+ CXXMethodDecl *Method = 0;
+
+ SourceLocation StartLoc = D->getInnerLocStart();
+ DeclarationNameInfo NameInfo
+ = SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs);
+ if (CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ Method = CXXConstructorDecl::Create(SemaRef.Context, Record,
+ StartLoc, NameInfo, T, TInfo,
+ Constructor->isExplicit(),
+ Constructor->isInlineSpecified(),
+ false, Constructor->isConstexpr());
+ } else if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(D)) {
+ Method = CXXDestructorDecl::Create(SemaRef.Context, Record,
+ StartLoc, NameInfo, T, TInfo,
+ Destructor->isInlineSpecified(),
+ false);
+ } else if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(D)) {
+ Method = CXXConversionDecl::Create(SemaRef.Context, Record,
+ StartLoc, NameInfo, T, TInfo,
+ Conversion->isInlineSpecified(),
+ Conversion->isExplicit(),
+ Conversion->isConstexpr(),
+ Conversion->getLocEnd());
+ } else {
+ Method = CXXMethodDecl::Create(SemaRef.Context, Record,
+ StartLoc, NameInfo, T, TInfo,
+ D->isStatic(),
+ D->getStorageClassAsWritten(),
+ D->isInlineSpecified(),
+ D->isConstexpr(), D->getLocEnd());
+ }
+
+ if (QualifierLoc)
+ Method->setQualifierInfo(QualifierLoc);
+
+ if (TemplateParams) {
+ // Our resulting instantiation is actually a function template, since we
+ // are substituting only the outer template parameters. For example, given
+ //
+ // template<typename T>
+ // struct X {
+ // template<typename U> void f(T, U);
+ // };
+ //
+ // X<int> x;
+ //
+ // We are instantiating the member template "f" within X<int>, which means
+ // substituting int for T, but leaving "f" as a member function template.
+ // Build the function template itself.
+ FunctionTemplate = FunctionTemplateDecl::Create(SemaRef.Context, Record,
+ Method->getLocation(),
+ Method->getDeclName(),
+ TemplateParams, Method);
+ if (isFriend) {
+ FunctionTemplate->setLexicalDeclContext(Owner);
+ FunctionTemplate->setObjectOfFriendDecl(true);
+ } else if (D->isOutOfLine())
+ FunctionTemplate->setLexicalDeclContext(D->getLexicalDeclContext());
+ Method->setDescribedFunctionTemplate(FunctionTemplate);
+ } else if (FunctionTemplate) {
+ // Record this function template specialization.
+ std::pair<const TemplateArgument *, unsigned> Innermost
+ = TemplateArgs.getInnermost();
+ Method->setFunctionTemplateSpecialization(FunctionTemplate,
+ TemplateArgumentList::CreateCopy(SemaRef.Context,
+ Innermost.first,
+ Innermost.second),
+ /*InsertPos=*/0);
+ } else if (!isFriend) {
+ // Record that this is an instantiation of a member function.
+ Method->setInstantiationOfMemberFunction(D, TSK_ImplicitInstantiation);
+ }
+
+ // If we are instantiating a member function defined
+ // out-of-line, the instantiation will have the same lexical
+ // context (which will be a namespace scope) as the template.
+ if (isFriend) {
+ if (NumTempParamLists)
+ Method->setTemplateParameterListsInfo(SemaRef.Context,
+ NumTempParamLists,
+ TempParamLists.data());
+
+ Method->setLexicalDeclContext(Owner);
+ Method->setObjectOfFriendDecl(true);
+ } else if (D->isOutOfLine())
+ Method->setLexicalDeclContext(D->getLexicalDeclContext());
+
+ // Attach the parameters
+ for (unsigned P = 0; P < Params.size(); ++P)
+ Params[P]->setOwningFunction(Method);
+ Method->setParams(Params);
+
+ if (InitMethodInstantiation(Method, D))
+ Method->setInvalidDecl();
+
+ LookupResult Previous(SemaRef, NameInfo, Sema::LookupOrdinaryName,
+ Sema::ForRedeclaration);
+
+ if (!FunctionTemplate || TemplateParams || isFriend) {
+ SemaRef.LookupQualifiedName(Previous, Record);
+
+ // In C++, the previous declaration we find might be a tag type
+ // (class or enum). In this case, the new declaration will hide the
+ // tag type. Note that this does does not apply if we're declaring a
+ // typedef (C++ [dcl.typedef]p4).
+ if (Previous.isSingleTagDecl())
+ Previous.clear();
+ }
+
+ if (!IsClassScopeSpecialization)
+ SemaRef.CheckFunctionDeclaration(0, Method, Previous, false);
+
+ if (D->isPure())
+ SemaRef.CheckPureMethod(Method, SourceRange());
+
+ Method->setAccess(D->getAccess());
+
+ SemaRef.CheckOverrideControl(Method);
+
+ // If a function is defined as defaulted or deleted, mark it as such now.
+ if (D->isDefaulted())
+ Method->setDefaulted();
+ if (D->isDeletedAsWritten())
+ Method->setDeletedAsWritten();
+
+ if (FunctionTemplate) {
+ // If there's a function template, let our caller handle it.
+ } else if (Method->isInvalidDecl() && !Previous.empty()) {
+ // Don't hide a (potentially) valid declaration with an invalid one.
+ } else {
+ NamedDecl *DeclToAdd = (TemplateParams
+ ? cast<NamedDecl>(FunctionTemplate)
+ : Method);
+ if (isFriend)
+ Record->makeDeclVisibleInContext(DeclToAdd);
+ else if (!IsClassScopeSpecialization)
+ Owner->addDecl(DeclToAdd);
+ }
+
+ if (D->isExplicitlyDefaulted()) {
+ SemaRef.SetDeclDefaulted(Method, Method->getLocation());
+ } else {
+ assert(!D->isDefaulted() &&
+ "should not implicitly default uninstantiated function");
+ }
+
+ return Method;
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *TemplateDeclInstantiator::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+ParmVarDecl *TemplateDeclInstantiator::VisitParmVarDecl(ParmVarDecl *D) {
+ return SemaRef.SubstParmVarDecl(D, TemplateArgs, /*indexAdjustment*/ 0,
+ llvm::Optional<unsigned>(),
+ /*ExpectParameterPack=*/false);
+}
+
+Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
+ TemplateTypeParmDecl *D) {
+ // TODO: don't always clone when decls are refcounted.
+ assert(D->getTypeForDecl()->isTemplateTypeParmType());
+
+ TemplateTypeParmDecl *Inst =
+ TemplateTypeParmDecl::Create(SemaRef.Context, Owner,
+ D->getLocStart(), D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getIndex(), D->getIdentifier(),
+ D->wasDeclaredWithTypename(),
+ D->isParameterPack());
+ Inst->setAccess(AS_public);
+
+ if (D->hasDefaultArgument())
+ Inst->setDefaultArgument(D->getDefaultArgumentInfo(), false);
+
+ // Introduce this template parameter's instantiation into the instantiation
+ // scope.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Inst);
+
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::VisitNonTypeTemplateParmDecl(
+ NonTypeTemplateParmDecl *D) {
+ // Substitute into the type of the non-type template parameter.
+ TypeLoc TL = D->getTypeSourceInfo()->getTypeLoc();
+ SmallVector<TypeSourceInfo *, 4> ExpandedParameterPackTypesAsWritten;
+ SmallVector<QualType, 4> ExpandedParameterPackTypes;
+ bool IsExpandedParameterPack = false;
+ TypeSourceInfo *DI;
+ QualType T;
+ bool Invalid = false;
+
+ if (D->isExpandedParameterPack()) {
+ // The non-type template parameter pack is an already-expanded pack
+ // expansion of types. Substitute into each of the expanded types.
+ ExpandedParameterPackTypes.reserve(D->getNumExpansionTypes());
+ ExpandedParameterPackTypesAsWritten.reserve(D->getNumExpansionTypes());
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ TypeSourceInfo *NewDI =SemaRef.SubstType(D->getExpansionTypeSourceInfo(I),
+ TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewDI)
+ return 0;
+
+ ExpandedParameterPackTypesAsWritten.push_back(NewDI);
+ QualType NewT =SemaRef.CheckNonTypeTemplateParameterType(NewDI->getType(),
+ D->getLocation());
+ if (NewT.isNull())
+ return 0;
+ ExpandedParameterPackTypes.push_back(NewT);
+ }
+
+ IsExpandedParameterPack = true;
+ DI = D->getTypeSourceInfo();
+ T = DI->getType();
+ } else if (isa<PackExpansionTypeLoc>(TL)) {
+ // The non-type template parameter pack's type is a pack expansion of types.
+ // Determine whether we need to expand this parameter pack into separate
+ // types.
+ PackExpansionTypeLoc Expansion = cast<PackExpansionTypeLoc>(TL);
+ TypeLoc Pattern = Expansion.getPatternLoc();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions
+ = Expansion.getTypePtr()->getNumExpansions();
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (SemaRef.CheckParameterPacksForExpansion(Expansion.getEllipsisLoc(),
+ Pattern.getSourceRange(),
+ Unexpanded,
+ TemplateArgs,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return 0;
+
+ if (Expand) {
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ TypeSourceInfo *NewDI = SemaRef.SubstType(Pattern, TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewDI)
+ return 0;
+
+ ExpandedParameterPackTypesAsWritten.push_back(NewDI);
+ QualType NewT = SemaRef.CheckNonTypeTemplateParameterType(
+ NewDI->getType(),
+ D->getLocation());
+ if (NewT.isNull())
+ return 0;
+ ExpandedParameterPackTypes.push_back(NewT);
+ }
+
+ // Note that we have an expanded parameter pack. The "type" of this
+ // expanded parameter pack is the original expansion type, but callers
+ // will end up using the expanded parameter pack types for type-checking.
+ IsExpandedParameterPack = true;
+ DI = D->getTypeSourceInfo();
+ T = DI->getType();
+ } else {
+ // We cannot fully expand the pack expansion now, so substitute into the
+ // pattern and create a new pack expansion type.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
+ TypeSourceInfo *NewPattern = SemaRef.SubstType(Pattern, TemplateArgs,
+ D->getLocation(),
+ D->getDeclName());
+ if (!NewPattern)
+ return 0;
+
+ DI = SemaRef.CheckPackExpansion(NewPattern, Expansion.getEllipsisLoc(),
+ NumExpansions);
+ if (!DI)
+ return 0;
+
+ T = DI->getType();
+ }
+ } else {
+ // Simple case: substitution into a parameter that is not a parameter pack.
+ DI = SemaRef.SubstType(D->getTypeSourceInfo(), TemplateArgs,
+ D->getLocation(), D->getDeclName());
+ if (!DI)
+ return 0;
+
+ // Check that this type is acceptable for a non-type template parameter.
+ T = SemaRef.CheckNonTypeTemplateParameterType(DI->getType(),
+ D->getLocation());
+ if (T.isNull()) {
+ T = SemaRef.Context.IntTy;
+ Invalid = true;
+ }
+ }
+
+ NonTypeTemplateParmDecl *Param;
+ if (IsExpandedParameterPack)
+ Param = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getInnerLocStart(),
+ D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(),
+ D->getIdentifier(), T,
+ DI,
+ ExpandedParameterPackTypes.data(),
+ ExpandedParameterPackTypes.size(),
+ ExpandedParameterPackTypesAsWritten.data());
+ else
+ Param = NonTypeTemplateParmDecl::Create(SemaRef.Context, Owner,
+ D->getInnerLocStart(),
+ D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(),
+ D->getIdentifier(), T,
+ D->isParameterPack(), DI);
+
+ Param->setAccess(AS_public);
+ if (Invalid)
+ Param->setInvalidDecl();
+
+ Param->setDefaultArgument(D->getDefaultArgument(), false);
+
+ // Introduce this template parameter's instantiation into the instantiation
+ // scope.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Param);
+ return Param;
+}
+
+Decl *
+TemplateDeclInstantiator::VisitTemplateTemplateParmDecl(
+ TemplateTemplateParmDecl *D) {
+ // Instantiate the template parameter list of the template template parameter.
+ TemplateParameterList *TempParams = D->getTemplateParameters();
+ TemplateParameterList *InstParams;
+ {
+ // Perform the actual substitution of template parameters within a new,
+ // local instantiation scope.
+ LocalInstantiationScope Scope(SemaRef);
+ InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return NULL;
+ }
+
+ // Build the template template parameter.
+ TemplateTemplateParmDecl *Param
+ = TemplateTemplateParmDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getDepth() - TemplateArgs.getNumLevels(),
+ D->getPosition(), D->isParameterPack(),
+ D->getIdentifier(), InstParams);
+ Param->setDefaultArgument(D->getDefaultArgument(), false);
+ Param->setAccess(AS_public);
+
+ // Introduce this template parameter's instantiation into the instantiation
+ // scope.
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(D, Param);
+
+ return Param;
+}
+
+Decl *TemplateDeclInstantiator::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ // Using directives are never dependent (and never contain any types or
+ // expressions), so they require no explicit instantiation work.
+
+ UsingDirectiveDecl *Inst
+ = UsingDirectiveDecl::Create(SemaRef.Context, Owner, D->getLocation(),
+ D->getNamespaceKeyLocation(),
+ D->getQualifierLoc(),
+ D->getIdentLocation(),
+ D->getNominatedNamespace(),
+ D->getCommonAncestor());
+ Owner->addDecl(Inst);
+ return Inst;
+}
+
+Decl *TemplateDeclInstantiator::VisitUsingDecl(UsingDecl *D) {
+
+ // The nested name specifier may be dependent, for example
+ // template <typename T> struct t {
+ // struct s1 { T f1(); };
+ // struct s2 : s1 { using s1::f1; };
+ // };
+ // template struct t<int>;
+ // Here, in using s1::f1, s1 refers to t<T>::s1;
+ // we need to substitute for t<int>::s1.
+ NestedNameSpecifierLoc QualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(),
+ TemplateArgs);
+ if (!QualifierLoc)
+ return 0;
+
+ // The name info is non-dependent, so no transformation
+ // is required.
+ DeclarationNameInfo NameInfo = D->getNameInfo();
+
+ // We only need to do redeclaration lookups if we're in a class
+ // scope (in fact, it's not really even possible in non-class
+ // scopes).
+ bool CheckRedeclaration = Owner->isRecord();
+
+ LookupResult Prev(SemaRef, NameInfo, Sema::LookupUsingDeclName,
+ Sema::ForRedeclaration);
+
+ UsingDecl *NewUD = UsingDecl::Create(SemaRef.Context, Owner,
+ D->getUsingLocation(),
+ QualifierLoc,
+ NameInfo,
+ D->isTypeName());
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ if (CheckRedeclaration) {
+ Prev.setHideTags(false);
+ SemaRef.LookupQualifiedName(Prev, Owner);
+
+ // Check for invalid redeclarations.
+ if (SemaRef.CheckUsingDeclRedeclaration(D->getUsingLocation(),
+ D->isTypeName(), SS,
+ D->getLocation(), Prev))
+ NewUD->setInvalidDecl();
+
+ }
+
+ if (!NewUD->isInvalidDecl() &&
+ SemaRef.CheckUsingDeclQualifier(D->getUsingLocation(), SS,
+ D->getLocation()))
+ NewUD->setInvalidDecl();
+
+ SemaRef.Context.setInstantiatedFromUsingDecl(NewUD, D);
+ NewUD->setAccess(D->getAccess());
+ Owner->addDecl(NewUD);
+
+ // Don't process the shadow decls for an invalid decl.
+ if (NewUD->isInvalidDecl())
+ return NewUD;
+
+ if (NameInfo.getName().getNameKind() == DeclarationName::CXXConstructorName) {
+ if (SemaRef.CheckInheritingConstructorUsingDecl(NewUD))
+ NewUD->setInvalidDecl();
+ return NewUD;
+ }
+
+ bool isFunctionScope = Owner->isFunctionOrMethod();
+
+ // Process the shadow decls.
+ for (UsingDecl::shadow_iterator I = D->shadow_begin(), E = D->shadow_end();
+ I != E; ++I) {
+ UsingShadowDecl *Shadow = *I;
+ NamedDecl *InstTarget =
+ cast_or_null<NamedDecl>(SemaRef.FindInstantiatedDecl(
+ Shadow->getLocation(),
+ Shadow->getTargetDecl(),
+ TemplateArgs));
+ if (!InstTarget)
+ return 0;
+
+ if (CheckRedeclaration &&
+ SemaRef.CheckUsingShadowDecl(NewUD, InstTarget, Prev))
+ continue;
+
+ UsingShadowDecl *InstShadow
+ = SemaRef.BuildUsingShadowDecl(/*Scope*/ 0, NewUD, InstTarget);
+ SemaRef.Context.setInstantiatedFromUsingShadowDecl(InstShadow, Shadow);
+
+ if (isFunctionScope)
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(Shadow, InstShadow);
+ }
+
+ return NewUD;
+}
+
+Decl *TemplateDeclInstantiator::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ // Ignore these; we handle them in bulk when processing the UsingDecl.
+ return 0;
+}
+
+Decl * TemplateDeclInstantiator
+ ::VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D) {
+ NestedNameSpecifierLoc QualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(),
+ TemplateArgs);
+ if (!QualifierLoc)
+ return 0;
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ // Since NameInfo refers to a typename, it cannot be a C++ special name.
+ // Hence, no tranformation is required for it.
+ DeclarationNameInfo NameInfo(D->getDeclName(), D->getLocation());
+ NamedDecl *UD =
+ SemaRef.BuildUsingDeclaration(/*Scope*/ 0, D->getAccess(),
+ D->getUsingLoc(), SS, NameInfo, 0,
+ /*instantiation*/ true,
+ /*typename*/ true, D->getTypenameLoc());
+ if (UD)
+ SemaRef.Context.setInstantiatedFromUsingDecl(cast<UsingDecl>(UD), D);
+
+ return UD;
+}
+
+Decl * TemplateDeclInstantiator
+ ::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ NestedNameSpecifierLoc QualifierLoc
+ = SemaRef.SubstNestedNameSpecifierLoc(D->getQualifierLoc(), TemplateArgs);
+ if (!QualifierLoc)
+ return 0;
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ DeclarationNameInfo NameInfo
+ = SemaRef.SubstDeclarationNameInfo(D->getNameInfo(), TemplateArgs);
+
+ NamedDecl *UD =
+ SemaRef.BuildUsingDeclaration(/*Scope*/ 0, D->getAccess(),
+ D->getUsingLoc(), SS, NameInfo, 0,
+ /*instantiation*/ true,
+ /*typename*/ false, SourceLocation());
+ if (UD)
+ SemaRef.Context.setInstantiatedFromUsingDecl(cast<UsingDecl>(UD), D);
+
+ return UD;
+}
+
+
+Decl *TemplateDeclInstantiator::VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *Decl) {
+ CXXMethodDecl *OldFD = Decl->getSpecialization();
+ CXXMethodDecl *NewFD = cast<CXXMethodDecl>(VisitCXXMethodDecl(OldFD, 0, true));
+
+ LookupResult Previous(SemaRef, NewFD->getNameInfo(), Sema::LookupOrdinaryName,
+ Sema::ForRedeclaration);
+
+ SemaRef.LookupQualifiedName(Previous, SemaRef.CurContext);
+ if (SemaRef.CheckFunctionTemplateSpecialization(NewFD, 0, Previous)) {
+ NewFD->setInvalidDecl();
+ return NewFD;
+ }
+
+ // Associate the specialization with the pattern.
+ FunctionDecl *Specialization = cast<FunctionDecl>(Previous.getFoundDecl());
+ assert(Specialization && "Class scope Specialization is null");
+ SemaRef.Context.setClassScopeSpecializationPattern(Specialization, OldFD);
+
+ return NewFD;
+}
+
+Decl *Sema::SubstDecl(Decl *D, DeclContext *Owner,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ TemplateDeclInstantiator Instantiator(*this, Owner, TemplateArgs);
+ if (D->isInvalidDecl())
+ return 0;
+
+ return Instantiator.Visit(D);
+}
+
+/// \brief Instantiates a nested template parameter list in the current
+/// instantiation context.
+///
+/// \param L The parameter list to instantiate
+///
+/// \returns NULL if there was an error
+TemplateParameterList *
+TemplateDeclInstantiator::SubstTemplateParams(TemplateParameterList *L) {
+ // Get errors for all the parameters before bailing out.
+ bool Invalid = false;
+
+ unsigned N = L->size();
+ typedef SmallVector<NamedDecl *, 8> ParamVector;
+ ParamVector Params;
+ Params.reserve(N);
+ for (TemplateParameterList::iterator PI = L->begin(), PE = L->end();
+ PI != PE; ++PI) {
+ NamedDecl *D = cast_or_null<NamedDecl>(Visit(*PI));
+ Params.push_back(D);
+ Invalid = Invalid || !D || D->isInvalidDecl();
+ }
+
+ // Clean up if we had an error.
+ if (Invalid)
+ return NULL;
+
+ TemplateParameterList *InstL
+ = TemplateParameterList::Create(SemaRef.Context, L->getTemplateLoc(),
+ L->getLAngleLoc(), &Params.front(), N,
+ L->getRAngleLoc());
+ return InstL;
+}
+
+/// \brief Instantiate the declaration of a class template partial
+/// specialization.
+///
+/// \param ClassTemplate the (instantiated) class template that is partially
+// specialized by the instantiation of \p PartialSpec.
+///
+/// \param PartialSpec the (uninstantiated) class template partial
+/// specialization that we are instantiating.
+///
+/// \returns The instantiated partial specialization, if successful; otherwise,
+/// NULL to indicate an error.
+ClassTemplatePartialSpecializationDecl *
+TemplateDeclInstantiator::InstantiateClassTemplatePartialSpecialization(
+ ClassTemplateDecl *ClassTemplate,
+ ClassTemplatePartialSpecializationDecl *PartialSpec) {
+ // Create a local instantiation scope for this class template partial
+ // specialization, which will contain the instantiations of the template
+ // parameters.
+ LocalInstantiationScope Scope(SemaRef);
+
+ // Substitute into the template parameters of the class template partial
+ // specialization.
+ TemplateParameterList *TempParams = PartialSpec->getTemplateParameters();
+ TemplateParameterList *InstParams = SubstTemplateParams(TempParams);
+ if (!InstParams)
+ return 0;
+
+ // Substitute into the template arguments of the class template partial
+ // specialization.
+ TemplateArgumentListInfo InstTemplateArgs; // no angle locations
+ if (SemaRef.Subst(PartialSpec->getTemplateArgsAsWritten(),
+ PartialSpec->getNumTemplateArgsAsWritten(),
+ InstTemplateArgs, TemplateArgs))
+ return 0;
+
+ // Check that the template argument list is well-formed for this
+ // class template.
+ SmallVector<TemplateArgument, 4> Converted;
+ if (SemaRef.CheckTemplateArgumentList(ClassTemplate,
+ PartialSpec->getLocation(),
+ InstTemplateArgs,
+ false,
+ Converted))
+ return 0;
+
+ // Figure out where to insert this class template partial specialization
+ // in the member template's set of class template partial specializations.
+ void *InsertPos = 0;
+ ClassTemplateSpecializationDecl *PrevDecl
+ = ClassTemplate->findPartialSpecialization(Converted.data(),
+ Converted.size(), InsertPos);
+
+ // Build the canonical type that describes the converted template
+ // arguments of the class template partial specialization.
+ QualType CanonType
+ = SemaRef.Context.getTemplateSpecializationType(TemplateName(ClassTemplate),
+ Converted.data(),
+ Converted.size());
+
+ // Build the fully-sugared type for this class template
+ // specialization as the user wrote in the specialization
+ // itself. This means that we'll pretty-print the type retrieved
+ // from the specialization's declaration the way that the user
+ // actually wrote the specialization, rather than formatting the
+ // name based on the "canonical" representation used to store the
+ // template arguments in the specialization.
+ TypeSourceInfo *WrittenTy
+ = SemaRef.Context.getTemplateSpecializationTypeInfo(
+ TemplateName(ClassTemplate),
+ PartialSpec->getLocation(),
+ InstTemplateArgs,
+ CanonType);
+
+ if (PrevDecl) {
+ // We've already seen a partial specialization with the same template
+ // parameters and template arguments. This can happen, for example, when
+ // substituting the outer template arguments ends up causing two
+ // class template partial specializations of a member class template
+ // to have identical forms, e.g.,
+ //
+ // template<typename T, typename U>
+ // struct Outer {
+ // template<typename X, typename Y> struct Inner;
+ // template<typename Y> struct Inner<T, Y>;
+ // template<typename Y> struct Inner<U, Y>;
+ // };
+ //
+ // Outer<int, int> outer; // error: the partial specializations of Inner
+ // // have the same signature.
+ SemaRef.Diag(PartialSpec->getLocation(), diag::err_partial_spec_redeclared)
+ << WrittenTy->getType();
+ SemaRef.Diag(PrevDecl->getLocation(), diag::note_prev_partial_spec_here)
+ << SemaRef.Context.getTypeDeclType(PrevDecl);
+ return 0;
+ }
+
+
+ // Create the class template partial specialization declaration.
+ ClassTemplatePartialSpecializationDecl *InstPartialSpec
+ = ClassTemplatePartialSpecializationDecl::Create(SemaRef.Context,
+ PartialSpec->getTagKind(),
+ Owner,
+ PartialSpec->getLocStart(),
+ PartialSpec->getLocation(),
+ InstParams,
+ ClassTemplate,
+ Converted.data(),
+ Converted.size(),
+ InstTemplateArgs,
+ CanonType,
+ 0,
+ ClassTemplate->getNextPartialSpecSequenceNumber());
+ // Substitute the nested name specifier, if any.
+ if (SubstQualifier(PartialSpec, InstPartialSpec))
+ return 0;
+
+ InstPartialSpec->setInstantiatedFromMember(PartialSpec);
+ InstPartialSpec->setTypeAsWritten(WrittenTy);
+
+ // Add this partial specialization to the set of class template partial
+ // specializations.
+ ClassTemplate->AddPartialSpecialization(InstPartialSpec, /*InsertPos=*/0);
+ return InstPartialSpec;
+}
+
+TypeSourceInfo*
+TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
+ SmallVectorImpl<ParmVarDecl *> &Params) {
+ TypeSourceInfo *OldTInfo = D->getTypeSourceInfo();
+ assert(OldTInfo && "substituting function without type source info");
+ assert(Params.empty() && "parameter vector is non-empty at start");
+ TypeSourceInfo *NewTInfo
+ = SemaRef.SubstFunctionDeclType(OldTInfo, TemplateArgs,
+ D->getTypeSpecStartLoc(),
+ D->getDeclName());
+ if (!NewTInfo)
+ return 0;
+
+ if (NewTInfo != OldTInfo) {
+ // Get parameters from the new type info.
+ TypeLoc OldTL = OldTInfo->getTypeLoc().IgnoreParens();
+ if (FunctionProtoTypeLoc *OldProtoLoc
+ = dyn_cast<FunctionProtoTypeLoc>(&OldTL)) {
+ TypeLoc NewTL = NewTInfo->getTypeLoc().IgnoreParens();
+ FunctionProtoTypeLoc *NewProtoLoc = cast<FunctionProtoTypeLoc>(&NewTL);
+ assert(NewProtoLoc && "Missing prototype?");
+ unsigned NewIdx = 0, NumNewParams = NewProtoLoc->getNumArgs();
+ for (unsigned OldIdx = 0, NumOldParams = OldProtoLoc->getNumArgs();
+ OldIdx != NumOldParams; ++OldIdx) {
+ ParmVarDecl *OldParam = OldProtoLoc->getArg(OldIdx);
+ if (!OldParam->isParameterPack() ||
+ // FIXME: Is this right? OldParam could expand to an empty parameter
+ // pack and the next parameter could be an unexpanded parameter pack
+ (NewIdx < NumNewParams &&
+ NewProtoLoc->getArg(NewIdx)->isParameterPack())) {
+ // Simple case: normal parameter, or a parameter pack that's
+ // instantiated to a (still-dependent) parameter pack.
+ ParmVarDecl *NewParam = NewProtoLoc->getArg(NewIdx++);
+ Params.push_back(NewParam);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocal(OldParam,
+ NewParam);
+ continue;
+ }
+
+ // Parameter pack: make the instantiation an argument pack.
+ SemaRef.CurrentInstantiationScope->MakeInstantiatedLocalArgPack(
+ OldParam);
+ unsigned NumArgumentsInExpansion
+ = SemaRef.getNumArgumentsInExpansion(OldParam->getType(),
+ TemplateArgs);
+ while (NumArgumentsInExpansion--) {
+ ParmVarDecl *NewParam = NewProtoLoc->getArg(NewIdx++);
+ Params.push_back(NewParam);
+ SemaRef.CurrentInstantiationScope->InstantiatedLocalPackArg(OldParam,
+ NewParam);
+ }
+ }
+ }
+ } else {
+ // The function type itself was not dependent and therefore no
+ // substitution occurred. However, we still need to instantiate
+ // the function parameters themselves.
+ TypeLoc OldTL = OldTInfo->getTypeLoc().IgnoreParens();
+ if (FunctionProtoTypeLoc *OldProtoLoc
+ = dyn_cast<FunctionProtoTypeLoc>(&OldTL)) {
+ for (unsigned i = 0, i_end = OldProtoLoc->getNumArgs(); i != i_end; ++i) {
+ ParmVarDecl *Parm = VisitParmVarDecl(OldProtoLoc->getArg(i));
+ if (!Parm)
+ return 0;
+ Params.push_back(Parm);
+ }
+ }
+ }
+ return NewTInfo;
+}
+
+/// \brief Initializes the common fields of an instantiation function
+/// declaration (New) from the corresponding fields of its template (Tmpl).
+///
+/// \returns true if there was an error
+bool
+TemplateDeclInstantiator::InitFunctionInstantiation(FunctionDecl *New,
+ FunctionDecl *Tmpl) {
+ if (Tmpl->isDeletedAsWritten())
+ New->setDeletedAsWritten();
+
+ // If we are performing substituting explicitly-specified template arguments
+ // or deduced template arguments into a function template and we reach this
+ // point, we are now past the point where SFINAE applies and have committed
+ // to keeping the new function template specialization. We therefore
+ // convert the active template instantiation for the function template
+ // into a template instantiation for this specific function template
+ // specialization, which is not a SFINAE context, so that we diagnose any
+ // further errors in the declaration itself.
+ typedef Sema::ActiveTemplateInstantiation ActiveInstType;
+ ActiveInstType &ActiveInst = SemaRef.ActiveTemplateInstantiations.back();
+ if (ActiveInst.Kind == ActiveInstType::ExplicitTemplateArgumentSubstitution ||
+ ActiveInst.Kind == ActiveInstType::DeducedTemplateArgumentSubstitution) {
+ if (FunctionTemplateDecl *FunTmpl
+ = dyn_cast<FunctionTemplateDecl>((Decl *)ActiveInst.Entity)) {
+ assert(FunTmpl->getTemplatedDecl() == Tmpl &&
+ "Deduction from the wrong function template?");
+ (void) FunTmpl;
+ ActiveInst.Kind = ActiveInstType::TemplateInstantiation;
+ ActiveInst.Entity = reinterpret_cast<uintptr_t>(New);
+ --SemaRef.NonInstantiationEntries;
+ }
+ }
+
+ const FunctionProtoType *Proto = Tmpl->getType()->getAs<FunctionProtoType>();
+ assert(Proto && "Function template without prototype?");
+
+ if (Proto->hasExceptionSpec() || Proto->getNoReturnAttr()) {
+ // The function has an exception specification or a "noreturn"
+ // attribute. Substitute into each of the exception types.
+ SmallVector<QualType, 4> Exceptions;
+ for (unsigned I = 0, N = Proto->getNumExceptions(); I != N; ++I) {
+ // FIXME: Poor location information!
+ if (const PackExpansionType *PackExpansion
+ = Proto->getExceptionType(I)->getAs<PackExpansionType>()) {
+ // We have a pack expansion. Instantiate it.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(PackExpansion->getPattern(),
+ Unexpanded);
+ assert(!Unexpanded.empty() &&
+ "Pack expansion without parameter packs?");
+
+ bool Expand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions
+ = PackExpansion->getNumExpansions();
+ if (SemaRef.CheckParameterPacksForExpansion(New->getLocation(),
+ SourceRange(),
+ Unexpanded,
+ TemplateArgs,
+ Expand,
+ RetainExpansion,
+ NumExpansions))
+ break;
+
+ if (!Expand) {
+ // We can't expand this pack expansion into separate arguments yet;
+ // just substitute into the pattern and create a new pack expansion
+ // type.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, -1);
+ QualType T = SemaRef.SubstType(PackExpansion->getPattern(),
+ TemplateArgs,
+ New->getLocation(), New->getDeclName());
+ if (T.isNull())
+ break;
+
+ T = SemaRef.Context.getPackExpansionType(T, NumExpansions);
+ Exceptions.push_back(T);
+ continue;
+ }
+
+ // Substitute into the pack expansion pattern for each template
+ bool Invalid = false;
+ for (unsigned ArgIdx = 0; ArgIdx != *NumExpansions; ++ArgIdx) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, ArgIdx);
+
+ QualType T = SemaRef.SubstType(PackExpansion->getPattern(),
+ TemplateArgs,
+ New->getLocation(), New->getDeclName());
+ if (T.isNull()) {
+ Invalid = true;
+ break;
+ }
+
+ Exceptions.push_back(T);
+ }
+
+ if (Invalid)
+ break;
+
+ continue;
+ }
+
+ QualType T
+ = SemaRef.SubstType(Proto->getExceptionType(I), TemplateArgs,
+ New->getLocation(), New->getDeclName());
+ if (T.isNull() ||
+ SemaRef.CheckSpecifiedExceptionType(T, New->getLocation()))
+ continue;
+
+ Exceptions.push_back(T);
+ }
+ Expr *NoexceptExpr = 0;
+ if (Expr *OldNoexceptExpr = Proto->getNoexceptExpr()) {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+ ExprResult E = SemaRef.SubstExpr(OldNoexceptExpr, TemplateArgs);
+ if (E.isUsable())
+ E = SemaRef.CheckBooleanCondition(E.get(), E.get()->getLocStart());
+
+ if (E.isUsable()) {
+ NoexceptExpr = E.take();
+ if (!NoexceptExpr->isTypeDependent() &&
+ !NoexceptExpr->isValueDependent())
+ NoexceptExpr = SemaRef.VerifyIntegerConstantExpression(NoexceptExpr,
+ 0, SemaRef.PDiag(diag::err_noexcept_needs_constant_expression),
+ /*AllowFold*/ false).take();
+ }
+ }
+
+ // Rebuild the function type
+
+ FunctionProtoType::ExtProtoInfo EPI = Proto->getExtProtoInfo();
+ EPI.ExceptionSpecType = Proto->getExceptionSpecType();
+ EPI.NumExceptions = Exceptions.size();
+ EPI.Exceptions = Exceptions.data();
+ EPI.NoexceptExpr = NoexceptExpr;
+ EPI.ExtInfo = Proto->getExtInfo();
+
+ const FunctionProtoType *NewProto
+ = New->getType()->getAs<FunctionProtoType>();
+ assert(NewProto && "Template instantiation without function prototype?");
+ New->setType(SemaRef.Context.getFunctionType(NewProto->getResultType(),
+ NewProto->arg_type_begin(),
+ NewProto->getNumArgs(),
+ EPI));
+ }
+
+ const FunctionDecl* Definition = Tmpl;
+
+ // Get the definition. Leaves the variable unchanged if undefined.
+ Tmpl->isDefined(Definition);
+
+ SemaRef.InstantiateAttrs(TemplateArgs, Definition, New,
+ LateAttrs, StartingScope);
+
+ return false;
+}
+
+/// \brief Initializes common fields of an instantiated method
+/// declaration (New) from the corresponding fields of its template
+/// (Tmpl).
+///
+/// \returns true if there was an error
+bool
+TemplateDeclInstantiator::InitMethodInstantiation(CXXMethodDecl *New,
+ CXXMethodDecl *Tmpl) {
+ if (InitFunctionInstantiation(New, Tmpl))
+ return true;
+
+ New->setAccess(Tmpl->getAccess());
+ if (Tmpl->isVirtualAsWritten())
+ New->setVirtualAsWritten(true);
+
+ // FIXME: attributes
+ // FIXME: New needs a pointer to Tmpl
+ return false;
+}
+
+/// \brief Instantiate the definition of the given function from its
+/// template.
+///
+/// \param PointOfInstantiation the point at which the instantiation was
+/// required. Note that this is not precisely a "point of instantiation"
+/// for the function, but it's close.
+///
+/// \param Function the already-instantiated declaration of a
+/// function template specialization or member function of a class template
+/// specialization.
+///
+/// \param Recursive if true, recursively instantiates any functions that
+/// are required by this instantiation.
+///
+/// \param DefinitionRequired if true, then we are performing an explicit
+/// instantiation where the body of the function is required. Complain if
+/// there is no such body.
+void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
+ FunctionDecl *Function,
+ bool Recursive,
+ bool DefinitionRequired) {
+ if (Function->isInvalidDecl() || Function->isDefined())
+ return;
+
+ // Never instantiate an explicit specialization except if it is a class scope
+ // explicit specialization.
+ if (Function->getTemplateSpecializationKind() == TSK_ExplicitSpecialization &&
+ !Function->getClassScopeSpecializationPattern())
+ return;
+
+ // Find the function body that we'll be substituting.
+ const FunctionDecl *PatternDecl = Function->getTemplateInstantiationPattern();
+ assert(PatternDecl && "instantiating a non-template");
+
+ Stmt *Pattern = PatternDecl->getBody(PatternDecl);
+ assert(PatternDecl && "template definition is not a template");
+ if (!Pattern) {
+ // Try to find a defaulted definition
+ PatternDecl->isDefined(PatternDecl);
+ }
+ assert(PatternDecl && "template definition is not a template");
+
+ // Postpone late parsed template instantiations.
+ if (PatternDecl->isLateTemplateParsed() &&
+ !LateTemplateParser) {
+ PendingInstantiations.push_back(
+ std::make_pair(Function, PointOfInstantiation));
+ return;
+ }
+
+ // Call the LateTemplateParser callback if there a need to late parse
+ // a templated function definition.
+ if (!Pattern && PatternDecl->isLateTemplateParsed() &&
+ LateTemplateParser) {
+ LateTemplateParser(OpaqueParser, PatternDecl);
+ Pattern = PatternDecl->getBody(PatternDecl);
+ }
+
+ if (!Pattern && !PatternDecl->isDefaulted()) {
+ if (DefinitionRequired) {
+ if (Function->getPrimaryTemplate())
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_func_template)
+ << Function->getPrimaryTemplate();
+ else
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_member)
+ << 1 << Function->getDeclName() << Function->getDeclContext();
+
+ if (PatternDecl)
+ Diag(PatternDecl->getLocation(),
+ diag::note_explicit_instantiation_here);
+ Function->setInvalidDecl();
+ } else if (Function->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDefinition) {
+ PendingInstantiations.push_back(
+ std::make_pair(Function, PointOfInstantiation));
+ }
+
+ return;
+ }
+
+ // C++0x [temp.explicit]p9:
+ // Except for inline functions, other explicit instantiation declarations
+ // have the effect of suppressing the implicit instantiation of the entity
+ // to which they refer.
+ if (Function->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDeclaration &&
+ !PatternDecl->isInlined())
+ return;
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Function);
+ if (Inst)
+ return;
+
+ // Copy the inner loc start from the pattern.
+ Function->setInnerLocStart(PatternDecl->getInnerLocStart());
+
+ // If we're performing recursive template instantiation, create our own
+ // queue of pending implicit instantiations that we will instantiate later,
+ // while we're still within our own instantiation context.
+ SmallVector<VTableUse, 16> SavedVTableUses;
+ std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
+ if (Recursive) {
+ VTableUses.swap(SavedVTableUses);
+ PendingInstantiations.swap(SavedPendingInstantiations);
+ }
+
+ EnterExpressionEvaluationContext EvalContext(*this,
+ Sema::PotentiallyEvaluated);
+ ActOnStartOfFunctionDef(0, Function);
+
+ // Introduce a new scope where local variable instantiations will be
+ // recorded, unless we're actually a member function within a local
+ // class, in which case we need to merge our results with the parent
+ // scope (of the enclosing function).
+ bool MergeWithParentScope = false;
+ if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Function->getDeclContext()))
+ MergeWithParentScope = Rec->isLocalClass();
+
+ LocalInstantiationScope Scope(*this, MergeWithParentScope);
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ Sema::ContextRAII savedContext(*this, Function);
+
+ MultiLevelTemplateArgumentList TemplateArgs =
+ getTemplateInstantiationArgs(Function, 0, false, PatternDecl);
+
+ // Introduce the instantiated function parameters into the local
+ // instantiation scope, and set the parameter names to those used
+ // in the template.
+ unsigned FParamIdx = 0;
+ for (unsigned I = 0, N = PatternDecl->getNumParams(); I != N; ++I) {
+ const ParmVarDecl *PatternParam = PatternDecl->getParamDecl(I);
+ if (!PatternParam->isParameterPack()) {
+ // Simple case: not a parameter pack.
+ assert(FParamIdx < Function->getNumParams());
+ ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+ FunctionParam->setDeclName(PatternParam->getDeclName());
+ Scope.InstantiatedLocal(PatternParam, FunctionParam);
+ ++FParamIdx;
+ continue;
+ }
+
+ // Expand the parameter pack.
+ Scope.MakeInstantiatedLocalArgPack(PatternParam);
+ unsigned NumArgumentsInExpansion
+ = getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
+ for (unsigned Arg = 0; Arg < NumArgumentsInExpansion; ++Arg) {
+ ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
+ FunctionParam->setDeclName(PatternParam->getDeclName());
+ Scope.InstantiatedLocalPackArg(PatternParam, FunctionParam);
+ ++FParamIdx;
+ }
+ }
+
+ if (PatternDecl->isDefaulted()) {
+ ActOnFinishFunctionBody(Function, 0, /*IsInstantiation=*/true);
+
+ SetDeclDefaulted(Function, PatternDecl->getLocation());
+ } else {
+ // If this is a constructor, instantiate the member initializers.
+ if (const CXXConstructorDecl *Ctor =
+ dyn_cast<CXXConstructorDecl>(PatternDecl)) {
+ InstantiateMemInitializers(cast<CXXConstructorDecl>(Function), Ctor,
+ TemplateArgs);
+ }
+
+ // Instantiate the function body.
+ StmtResult Body = SubstStmt(Pattern, TemplateArgs);
+
+ if (Body.isInvalid())
+ Function->setInvalidDecl();
+
+ ActOnFinishFunctionBody(Function, Body.get(),
+ /*IsInstantiation=*/true);
+ }
+
+ PerformDependentDiagnostics(PatternDecl, TemplateArgs);
+
+ savedContext.pop();
+
+ DeclGroupRef DG(Function);
+ Consumer.HandleTopLevelDecl(DG);
+
+ // This class may have local implicit instantiations that need to be
+ // instantiation within this scope.
+ PerformPendingInstantiations(/*LocalOnly=*/true);
+ Scope.Exit();
+
+ if (Recursive) {
+ // Define any pending vtables.
+ DefineUsedVTables();
+
+ // Instantiate any pending implicit instantiations found during the
+ // instantiation of this template.
+ PerformPendingInstantiations();
+
+ // Restore the set of pending vtables.
+ assert(VTableUses.empty() &&
+ "VTableUses should be empty before it is discarded.");
+ VTableUses.swap(SavedVTableUses);
+
+ // Restore the set of pending implicit instantiations.
+ assert(PendingInstantiations.empty() &&
+ "PendingInstantiations should be empty before it is discarded.");
+ PendingInstantiations.swap(SavedPendingInstantiations);
+ }
+}
+
+/// \brief Instantiate the definition of the given variable from its
+/// template.
+///
+/// \param PointOfInstantiation the point at which the instantiation was
+/// required. Note that this is not precisely a "point of instantiation"
+/// for the function, but it's close.
+///
+/// \param Var the already-instantiated declaration of a static member
+/// variable of a class template specialization.
+///
+/// \param Recursive if true, recursively instantiates any functions that
+/// are required by this instantiation.
+///
+/// \param DefinitionRequired if true, then we are performing an explicit
+/// instantiation where an out-of-line definition of the member variable
+/// is required. Complain if there is no such definition.
+void Sema::InstantiateStaticDataMemberDefinition(
+ SourceLocation PointOfInstantiation,
+ VarDecl *Var,
+ bool Recursive,
+ bool DefinitionRequired) {
+ if (Var->isInvalidDecl())
+ return;
+
+ // Find the out-of-line definition of this static data member.
+ VarDecl *Def = Var->getInstantiatedFromStaticDataMember();
+ assert(Def && "This data member was not instantiated from a template?");
+ assert(Def->isStaticDataMember() && "Not a static data member?");
+ Def = Def->getOutOfLineDefinition();
+
+ if (!Def) {
+ // We did not find an out-of-line definition of this static data member,
+ // so we won't perform any instantiation. Rather, we rely on the user to
+ // instantiate this definition (or provide a specialization for it) in
+ // another translation unit.
+ if (DefinitionRequired) {
+ Def = Var->getInstantiatedFromStaticDataMember();
+ Diag(PointOfInstantiation,
+ diag::err_explicit_instantiation_undefined_member)
+ << 2 << Var->getDeclName() << Var->getDeclContext();
+ Diag(Def->getLocation(), diag::note_explicit_instantiation_here);
+ } else if (Var->getTemplateSpecializationKind()
+ == TSK_ExplicitInstantiationDefinition) {
+ PendingInstantiations.push_back(
+ std::make_pair(Var, PointOfInstantiation));
+ }
+
+ return;
+ }
+
+ TemplateSpecializationKind TSK = Var->getTemplateSpecializationKind();
+
+ // Never instantiate an explicit specialization.
+ if (TSK == TSK_ExplicitSpecialization)
+ return;
+
+ // C++0x [temp.explicit]p9:
+ // Except for inline functions, other explicit instantiation declarations
+ // have the effect of suppressing the implicit instantiation of the entity
+ // to which they refer.
+ if (TSK == TSK_ExplicitInstantiationDeclaration)
+ return;
+
+ Consumer.HandleCXXStaticMemberVarInstantiation(Var);
+
+ // If we already have a definition, we're done.
+ if (VarDecl *Def = Var->getDefinition()) {
+ // We may be explicitly instantiating something we've already implicitly
+ // instantiated.
+ Def->setTemplateSpecializationKind(Var->getTemplateSpecializationKind(),
+ PointOfInstantiation);
+ return;
+ }
+
+ InstantiatingTemplate Inst(*this, PointOfInstantiation, Var);
+ if (Inst)
+ return;
+
+ // If we're performing recursive template instantiation, create our own
+ // queue of pending implicit instantiations that we will instantiate later,
+ // while we're still within our own instantiation context.
+ SmallVector<VTableUse, 16> SavedVTableUses;
+ std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
+ if (Recursive) {
+ VTableUses.swap(SavedVTableUses);
+ PendingInstantiations.swap(SavedPendingInstantiations);
+ }
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ ContextRAII previousContext(*this, Var->getDeclContext());
+ LocalInstantiationScope Local(*this);
+
+ VarDecl *OldVar = Var;
+ Var = cast_or_null<VarDecl>(SubstDecl(Def, Var->getDeclContext(),
+ getTemplateInstantiationArgs(Var)));
+
+ previousContext.pop();
+
+ if (Var) {
+ MemberSpecializationInfo *MSInfo = OldVar->getMemberSpecializationInfo();
+ assert(MSInfo && "Missing member specialization information?");
+ Var->setTemplateSpecializationKind(MSInfo->getTemplateSpecializationKind(),
+ MSInfo->getPointOfInstantiation());
+ DeclGroupRef DG(Var);
+ Consumer.HandleTopLevelDecl(DG);
+ }
+ Local.Exit();
+
+ if (Recursive) {
+ // Define any newly required vtables.
+ DefineUsedVTables();
+
+ // Instantiate any pending implicit instantiations found during the
+ // instantiation of this template.
+ PerformPendingInstantiations();
+
+ // Restore the set of pending vtables.
+ assert(VTableUses.empty() &&
+ "VTableUses should be empty before it is discarded, "
+ "while instantiating static data member.");
+ VTableUses.swap(SavedVTableUses);
+
+ // Restore the set of pending implicit instantiations.
+ assert(PendingInstantiations.empty() &&
+ "PendingInstantiations should be empty before it is discarded, "
+ "while instantiating static data member.");
+ PendingInstantiations.swap(SavedPendingInstantiations);
+ }
+}
+
+void
+Sema::InstantiateMemInitializers(CXXConstructorDecl *New,
+ const CXXConstructorDecl *Tmpl,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+
+ SmallVector<CXXCtorInitializer*, 4> NewInits;
+ bool AnyErrors = false;
+
+ // Instantiate all the initializers.
+ for (CXXConstructorDecl::init_const_iterator Inits = Tmpl->init_begin(),
+ InitsEnd = Tmpl->init_end();
+ Inits != InitsEnd; ++Inits) {
+ CXXCtorInitializer *Init = *Inits;
+
+ // Only instantiate written initializers, let Sema re-construct implicit
+ // ones.
+ if (!Init->isWritten())
+ continue;
+
+ SourceLocation EllipsisLoc;
+
+ if (Init->isPackExpansion()) {
+ // This is a pack expansion. We should expand it now.
+ TypeLoc BaseTL = Init->getTypeSourceInfo()->getTypeLoc();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ collectUnexpandedParameterPacks(BaseTL, Unexpanded);
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (CheckParameterPacksForExpansion(Init->getEllipsisLoc(),
+ BaseTL.getSourceRange(),
+ Unexpanded,
+ TemplateArgs, ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+ assert(ShouldExpand && "Partial instantiation of base initializer?");
+
+ // Loop over all of the arguments in the argument pack(s),
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, I);
+
+ // Instantiate the initializer.
+ ExprResult TempInit = SubstInitializer(Init->getInit(), TemplateArgs,
+ /*CXXDirectInit=*/true);
+ if (TempInit.isInvalid()) {
+ AnyErrors = true;
+ break;
+ }
+
+ // Instantiate the base type.
+ TypeSourceInfo *BaseTInfo = SubstType(Init->getTypeSourceInfo(),
+ TemplateArgs,
+ Init->getSourceLocation(),
+ New->getDeclName());
+ if (!BaseTInfo) {
+ AnyErrors = true;
+ break;
+ }
+
+ // Build the initializer.
+ MemInitResult NewInit = BuildBaseInitializer(BaseTInfo->getType(),
+ BaseTInfo, TempInit.take(),
+ New->getParent(),
+ SourceLocation());
+ if (NewInit.isInvalid()) {
+ AnyErrors = true;
+ break;
+ }
+
+ NewInits.push_back(NewInit.get());
+ }
+
+ continue;
+ }
+
+ // Instantiate the initializer.
+ ExprResult TempInit = SubstInitializer(Init->getInit(), TemplateArgs,
+ /*CXXDirectInit=*/true);
+ if (TempInit.isInvalid()) {
+ AnyErrors = true;
+ continue;
+ }
+
+ MemInitResult NewInit;
+ if (Init->isDelegatingInitializer() || Init->isBaseInitializer()) {
+ TypeSourceInfo *TInfo = SubstType(Init->getTypeSourceInfo(),
+ TemplateArgs,
+ Init->getSourceLocation(),
+ New->getDeclName());
+ if (!TInfo) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+
+ if (Init->isBaseInitializer())
+ NewInit = BuildBaseInitializer(TInfo->getType(), TInfo, TempInit.take(),
+ New->getParent(), EllipsisLoc);
+ else
+ NewInit = BuildDelegatingInitializer(TInfo, TempInit.take(),
+ cast<CXXRecordDecl>(CurContext->getParent()));
+ } else if (Init->isMemberInitializer()) {
+ FieldDecl *Member = cast_or_null<FieldDecl>(FindInstantiatedDecl(
+ Init->getMemberLocation(),
+ Init->getMember(),
+ TemplateArgs));
+ if (!Member) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+
+ NewInit = BuildMemberInitializer(Member, TempInit.take(),
+ Init->getSourceLocation());
+ } else if (Init->isIndirectMemberInitializer()) {
+ IndirectFieldDecl *IndirectMember =
+ cast_or_null<IndirectFieldDecl>(FindInstantiatedDecl(
+ Init->getMemberLocation(),
+ Init->getIndirectMember(), TemplateArgs));
+
+ if (!IndirectMember) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ continue;
+ }
+
+ NewInit = BuildMemberInitializer(IndirectMember, TempInit.take(),
+ Init->getSourceLocation());
+ }
+
+ if (NewInit.isInvalid()) {
+ AnyErrors = true;
+ New->setInvalidDecl();
+ } else {
+ NewInits.push_back(NewInit.get());
+ }
+ }
+
+ // Assign all the initializers to the new constructor.
+ ActOnMemInitializers(New,
+ /*FIXME: ColonLoc */
+ SourceLocation(),
+ NewInits.data(), NewInits.size(),
+ AnyErrors);
+}
+
+ExprResult Sema::SubstInitializer(Expr *Init,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool CXXDirectInit) {
+ // Initializers are instantiated like expressions, except that various outer
+ // layers are stripped.
+ if (!Init)
+ return Owned(Init);
+
+ if (ExprWithCleanups *ExprTemp = dyn_cast<ExprWithCleanups>(Init))
+ Init = ExprTemp->getSubExpr();
+
+ while (CXXBindTemporaryExpr *Binder = dyn_cast<CXXBindTemporaryExpr>(Init))
+ Init = Binder->getSubExpr();
+
+ if (ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Init))
+ Init = ICE->getSubExprAsWritten();
+
+ // If this is a direct-initializer, we take apart CXXConstructExprs.
+ // Everything else is passed through.
+ CXXConstructExpr *Construct;
+ if (!CXXDirectInit || !(Construct = dyn_cast<CXXConstructExpr>(Init)) ||
+ isa<CXXTemporaryObjectExpr>(Construct))
+ return SubstExpr(Init, TemplateArgs);
+
+ ASTOwningVector<Expr*> NewArgs(*this);
+ if (SubstExprs(Construct->getArgs(), Construct->getNumArgs(), true,
+ TemplateArgs, NewArgs))
+ return ExprError();
+
+ // Treat an empty initializer like none.
+ if (NewArgs.empty())
+ return Owned((Expr*)0);
+
+ // Build a ParenListExpr to represent anything else.
+ // FIXME: Fake locations!
+ SourceLocation Loc = PP.getLocForEndOfToken(Init->getLocStart());
+ return ActOnParenListExpr(Loc, Loc, move_arg(NewArgs));
+}
+
+// TODO: this could be templated if the various decl types used the
+// same method name.
+static bool isInstantiationOf(ClassTemplateDecl *Pattern,
+ ClassTemplateDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberTemplate();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(FunctionTemplateDecl *Pattern,
+ FunctionTemplateDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberTemplate();
+ } while (Instance);
+
+ return false;
+}
+
+static bool
+isInstantiationOf(ClassTemplatePartialSpecializationDecl *Pattern,
+ ClassTemplatePartialSpecializationDecl *Instance) {
+ Pattern
+ = cast<ClassTemplatePartialSpecializationDecl>(Pattern->getCanonicalDecl());
+ do {
+ Instance = cast<ClassTemplatePartialSpecializationDecl>(
+ Instance->getCanonicalDecl());
+ if (Pattern == Instance)
+ return true;
+ Instance = Instance->getInstantiatedFromMember();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(CXXRecordDecl *Pattern,
+ CXXRecordDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberClass();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(FunctionDecl *Pattern,
+ FunctionDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberFunction();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(EnumDecl *Pattern,
+ EnumDecl *Instance) {
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromMemberEnum();
+ } while (Instance);
+
+ return false;
+}
+
+static bool isInstantiationOf(UsingShadowDecl *Pattern,
+ UsingShadowDecl *Instance,
+ ASTContext &C) {
+ return C.getInstantiatedFromUsingShadowDecl(Instance) == Pattern;
+}
+
+static bool isInstantiationOf(UsingDecl *Pattern,
+ UsingDecl *Instance,
+ ASTContext &C) {
+ return C.getInstantiatedFromUsingDecl(Instance) == Pattern;
+}
+
+static bool isInstantiationOf(UnresolvedUsingValueDecl *Pattern,
+ UsingDecl *Instance,
+ ASTContext &C) {
+ return C.getInstantiatedFromUsingDecl(Instance) == Pattern;
+}
+
+static bool isInstantiationOf(UnresolvedUsingTypenameDecl *Pattern,
+ UsingDecl *Instance,
+ ASTContext &C) {
+ return C.getInstantiatedFromUsingDecl(Instance) == Pattern;
+}
+
+static bool isInstantiationOfStaticDataMember(VarDecl *Pattern,
+ VarDecl *Instance) {
+ assert(Instance->isStaticDataMember());
+
+ Pattern = Pattern->getCanonicalDecl();
+
+ do {
+ Instance = Instance->getCanonicalDecl();
+ if (Pattern == Instance) return true;
+ Instance = Instance->getInstantiatedFromStaticDataMember();
+ } while (Instance);
+
+ return false;
+}
+
+// Other is the prospective instantiation
+// D is the prospective pattern
+static bool isInstantiationOf(ASTContext &Ctx, NamedDecl *D, Decl *Other) {
+ if (D->getKind() != Other->getKind()) {
+ if (UnresolvedUsingTypenameDecl *UUD
+ = dyn_cast<UnresolvedUsingTypenameDecl>(D)) {
+ if (UsingDecl *UD = dyn_cast<UsingDecl>(Other)) {
+ return isInstantiationOf(UUD, UD, Ctx);
+ }
+ }
+
+ if (UnresolvedUsingValueDecl *UUD
+ = dyn_cast<UnresolvedUsingValueDecl>(D)) {
+ if (UsingDecl *UD = dyn_cast<UsingDecl>(Other)) {
+ return isInstantiationOf(UUD, UD, Ctx);
+ }
+ }
+
+ return false;
+ }
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(Other))
+ return isInstantiationOf(cast<CXXRecordDecl>(D), Record);
+
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Other))
+ return isInstantiationOf(cast<FunctionDecl>(D), Function);
+
+ if (EnumDecl *Enum = dyn_cast<EnumDecl>(Other))
+ return isInstantiationOf(cast<EnumDecl>(D), Enum);
+
+ if (VarDecl *Var = dyn_cast<VarDecl>(Other))
+ if (Var->isStaticDataMember())
+ return isInstantiationOfStaticDataMember(cast<VarDecl>(D), Var);
+
+ if (ClassTemplateDecl *Temp = dyn_cast<ClassTemplateDecl>(Other))
+ return isInstantiationOf(cast<ClassTemplateDecl>(D), Temp);
+
+ if (FunctionTemplateDecl *Temp = dyn_cast<FunctionTemplateDecl>(Other))
+ return isInstantiationOf(cast<FunctionTemplateDecl>(D), Temp);
+
+ if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(Other))
+ return isInstantiationOf(cast<ClassTemplatePartialSpecializationDecl>(D),
+ PartialSpec);
+
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(Other)) {
+ if (!Field->getDeclName()) {
+ // This is an unnamed field.
+ return Ctx.getInstantiatedFromUnnamedFieldDecl(Field) ==
+ cast<FieldDecl>(D);
+ }
+ }
+
+ if (UsingDecl *Using = dyn_cast<UsingDecl>(Other))
+ return isInstantiationOf(cast<UsingDecl>(D), Using, Ctx);
+
+ if (UsingShadowDecl *Shadow = dyn_cast<UsingShadowDecl>(Other))
+ return isInstantiationOf(cast<UsingShadowDecl>(D), Shadow, Ctx);
+
+ return D->getDeclName() && isa<NamedDecl>(Other) &&
+ D->getDeclName() == cast<NamedDecl>(Other)->getDeclName();
+}
+
+template<typename ForwardIterator>
+static NamedDecl *findInstantiationOf(ASTContext &Ctx,
+ NamedDecl *D,
+ ForwardIterator first,
+ ForwardIterator last) {
+ for (; first != last; ++first)
+ if (isInstantiationOf(Ctx, D, *first))
+ return cast<NamedDecl>(*first);
+
+ return 0;
+}
+
+/// \brief Finds the instantiation of the given declaration context
+/// within the current instantiation.
+///
+/// \returns NULL if there was an error
+DeclContext *Sema::FindInstantiatedContext(SourceLocation Loc, DeclContext* DC,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ if (NamedDecl *D = dyn_cast<NamedDecl>(DC)) {
+ Decl* ID = FindInstantiatedDecl(Loc, D, TemplateArgs);
+ return cast_or_null<DeclContext>(ID);
+ } else return DC;
+}
+
+/// \brief Find the instantiation of the given declaration within the
+/// current instantiation.
+///
+/// This routine is intended to be used when \p D is a declaration
+/// referenced from within a template, that needs to mapped into the
+/// corresponding declaration within an instantiation. For example,
+/// given:
+///
+/// \code
+/// template<typename T>
+/// struct X {
+/// enum Kind {
+/// KnownValue = sizeof(T)
+/// };
+///
+/// bool getKind() const { return KnownValue; }
+/// };
+///
+/// template struct X<int>;
+/// \endcode
+///
+/// In the instantiation of X<int>::getKind(), we need to map the
+/// EnumConstantDecl for KnownValue (which refers to
+/// X<T>::<Kind>::KnownValue) to its instantiation
+/// (X<int>::<Kind>::KnownValue). InstantiateCurrentDeclRef() performs
+/// this mapping from within the instantiation of X<int>.
+NamedDecl *Sema::FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ DeclContext *ParentDC = D->getDeclContext();
+ if (isa<ParmVarDecl>(D) || isa<NonTypeTemplateParmDecl>(D) ||
+ isa<TemplateTypeParmDecl>(D) || isa<TemplateTemplateParmDecl>(D) ||
+ (ParentDC->isFunctionOrMethod() && ParentDC->isDependentContext()) ||
+ (isa<CXXRecordDecl>(D) && cast<CXXRecordDecl>(D)->isLambda())) {
+ // D is a local of some kind. Look into the map of local
+ // declarations to their instantiations.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Found
+ = CurrentInstantiationScope->findInstantiationOf(D);
+
+ if (Found) {
+ if (Decl *FD = Found->dyn_cast<Decl *>())
+ return cast<NamedDecl>(FD);
+
+ unsigned PackIdx = ArgumentPackSubstitutionIndex;
+ return cast<NamedDecl>((*Found->get<DeclArgumentPack *>())[PackIdx]);
+ }
+
+ // If we didn't find the decl, then we must have a label decl that hasn't
+ // been found yet. Lazily instantiate it and return it now.
+ assert(isa<LabelDecl>(D));
+
+ Decl *Inst = SubstDecl(D, CurContext, TemplateArgs);
+ assert(Inst && "Failed to instantiate label??");
+
+ CurrentInstantiationScope->InstantiatedLocal(D, Inst);
+ return cast<LabelDecl>(Inst);
+ }
+
+ if (CXXRecordDecl *Record = dyn_cast<CXXRecordDecl>(D)) {
+ if (!Record->isDependentContext())
+ return D;
+
+ // Determine whether this record is the "templated" declaration describing
+ // a class template or class template partial specialization.
+ ClassTemplateDecl *ClassTemplate = Record->getDescribedClassTemplate();
+ if (ClassTemplate)
+ ClassTemplate = ClassTemplate->getCanonicalDecl();
+ else if (ClassTemplatePartialSpecializationDecl *PartialSpec
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(Record))
+ ClassTemplate = PartialSpec->getSpecializedTemplate()->getCanonicalDecl();
+
+ // Walk the current context to find either the record or an instantiation of
+ // it.
+ DeclContext *DC = CurContext;
+ while (!DC->isFileContext()) {
+ // If we're performing substitution while we're inside the template
+ // definition, we'll find our own context. We're done.
+ if (DC->Equals(Record))
+ return Record;
+
+ if (CXXRecordDecl *InstRecord = dyn_cast<CXXRecordDecl>(DC)) {
+ // Check whether we're in the process of instantiating a class template
+ // specialization of the template we're mapping.
+ if (ClassTemplateSpecializationDecl *InstSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(InstRecord)){
+ ClassTemplateDecl *SpecTemplate = InstSpec->getSpecializedTemplate();
+ if (ClassTemplate && isInstantiationOf(ClassTemplate, SpecTemplate))
+ return InstRecord;
+ }
+
+ // Check whether we're in the process of instantiating a member class.
+ if (isInstantiationOf(Record, InstRecord))
+ return InstRecord;
+ }
+
+
+ // Move to the outer template scope.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(DC)) {
+ if (FD->getFriendObjectKind() && FD->getDeclContext()->isFileContext()){
+ DC = FD->getLexicalDeclContext();
+ continue;
+ }
+ }
+
+ DC = DC->getParent();
+ }
+
+ // Fall through to deal with other dependent record types (e.g.,
+ // anonymous unions in class templates).
+ }
+
+ if (!ParentDC->isDependentContext())
+ return D;
+
+ ParentDC = FindInstantiatedContext(Loc, ParentDC, TemplateArgs);
+ if (!ParentDC)
+ return 0;
+
+ if (ParentDC != D->getDeclContext()) {
+ // We performed some kind of instantiation in the parent context,
+ // so now we need to look into the instantiated parent context to
+ // find the instantiation of the declaration D.
+
+ // If our context used to be dependent, we may need to instantiate
+ // it before performing lookup into that context.
+ bool IsBeingInstantiated = false;
+ if (CXXRecordDecl *Spec = dyn_cast<CXXRecordDecl>(ParentDC)) {
+ if (!Spec->isDependentContext()) {
+ QualType T = Context.getTypeDeclType(Spec);
+ const RecordType *Tag = T->getAs<RecordType>();
+ assert(Tag && "type of non-dependent record is not a RecordType");
+ if (Tag->isBeingDefined())
+ IsBeingInstantiated = true;
+ if (!Tag->isBeingDefined() &&
+ RequireCompleteType(Loc, T, diag::err_incomplete_type))
+ return 0;
+
+ ParentDC = Tag->getDecl();
+ }
+ }
+
+ NamedDecl *Result = 0;
+ if (D->getDeclName()) {
+ DeclContext::lookup_result Found = ParentDC->lookup(D->getDeclName());
+ Result = findInstantiationOf(Context, D, Found.first, Found.second);
+ } else {
+ // Since we don't have a name for the entity we're looking for,
+ // our only option is to walk through all of the declarations to
+ // find that name. This will occur in a few cases:
+ //
+ // - anonymous struct/union within a template
+ // - unnamed class/struct/union/enum within a template
+ //
+ // FIXME: Find a better way to find these instantiations!
+ Result = findInstantiationOf(Context, D,
+ ParentDC->decls_begin(),
+ ParentDC->decls_end());
+ }
+
+ if (!Result) {
+ if (isa<UsingShadowDecl>(D)) {
+ // UsingShadowDecls can instantiate to nothing because of using hiding.
+ } else if (Diags.hasErrorOccurred()) {
+ // We've already complained about something, so most likely this
+ // declaration failed to instantiate. There's no point in complaining
+ // further, since this is normal in invalid code.
+ } else if (IsBeingInstantiated) {
+ // The class in which this member exists is currently being
+ // instantiated, and we haven't gotten around to instantiating this
+ // member yet. This can happen when the code uses forward declarations
+ // of member classes, and introduces ordering dependencies via
+ // template instantiation.
+ Diag(Loc, diag::err_member_not_yet_instantiated)
+ << D->getDeclName()
+ << Context.getTypeDeclType(cast<CXXRecordDecl>(ParentDC));
+ Diag(D->getLocation(), diag::note_non_instantiated_member_here);
+ } else if (EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
+ // This enumeration constant was found when the template was defined,
+ // but can't be found in the instantiation. This can happen if an
+ // unscoped enumeration member is explicitly specialized.
+ EnumDecl *Enum = cast<EnumDecl>(ED->getLexicalDeclContext());
+ EnumDecl *Spec = cast<EnumDecl>(FindInstantiatedDecl(Loc, Enum,
+ TemplateArgs));
+ assert(Spec->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization);
+ Diag(Loc, diag::err_enumerator_does_not_exist)
+ << D->getDeclName()
+ << Context.getTypeDeclType(cast<TypeDecl>(Spec->getDeclContext()));
+ Diag(Spec->getLocation(), diag::note_enum_specialized_here)
+ << Context.getTypeDeclType(Spec);
+ } else {
+ // We should have found something, but didn't.
+ llvm_unreachable("Unable to find instantiation of declaration!");
+ }
+ }
+
+ D = Result;
+ }
+
+ return D;
+}
+
+/// \brief Performs template instantiation for all implicit template
+/// instantiations we have seen until this point.
+void Sema::PerformPendingInstantiations(bool LocalOnly) {
+ // Load pending instantiations from the external source.
+ if (!LocalOnly && ExternalSource) {
+ SmallVector<std::pair<ValueDecl *, SourceLocation>, 4> Pending;
+ ExternalSource->ReadPendingInstantiations(Pending);
+ PendingInstantiations.insert(PendingInstantiations.begin(),
+ Pending.begin(), Pending.end());
+ }
+
+ while (!PendingLocalImplicitInstantiations.empty() ||
+ (!LocalOnly && !PendingInstantiations.empty())) {
+ PendingImplicitInstantiation Inst;
+
+ if (PendingLocalImplicitInstantiations.empty()) {
+ Inst = PendingInstantiations.front();
+ PendingInstantiations.pop_front();
+ } else {
+ Inst = PendingLocalImplicitInstantiations.front();
+ PendingLocalImplicitInstantiations.pop_front();
+ }
+
+ // Instantiate function definitions
+ if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Inst.first)) {
+ PrettyDeclStackTraceEntry CrashInfo(*this, Function, SourceLocation(),
+ "instantiating function definition");
+ bool DefinitionRequired = Function->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition;
+ InstantiateFunctionDefinition(/*FIXME:*/Inst.second, Function, true,
+ DefinitionRequired);
+ continue;
+ }
+
+ // Instantiate static data member definitions.
+ VarDecl *Var = cast<VarDecl>(Inst.first);
+ assert(Var->isStaticDataMember() && "Not a static data member?");
+
+ // Don't try to instantiate declarations if the most recent redeclaration
+ // is invalid.
+ if (Var->getMostRecentDecl()->isInvalidDecl())
+ continue;
+
+ // Check if the most recent declaration has changed the specialization kind
+ // and removed the need for implicit instantiation.
+ switch (Var->getMostRecentDecl()->getTemplateSpecializationKind()) {
+ case TSK_Undeclared:
+ llvm_unreachable("Cannot instantitiate an undeclared specialization.");
+ case TSK_ExplicitInstantiationDeclaration:
+ case TSK_ExplicitSpecialization:
+ continue; // No longer need to instantiate this type.
+ case TSK_ExplicitInstantiationDefinition:
+ // We only need an instantiation if the pending instantiation *is* the
+ // explicit instantiation.
+ if (Var != Var->getMostRecentDecl()) continue;
+ case TSK_ImplicitInstantiation:
+ break;
+ }
+
+ PrettyDeclStackTraceEntry CrashInfo(*this, Var, Var->getLocation(),
+ "instantiating static data member "
+ "definition");
+
+ bool DefinitionRequired = Var->getTemplateSpecializationKind() ==
+ TSK_ExplicitInstantiationDefinition;
+ InstantiateStaticDataMemberDefinition(/*FIXME:*/Inst.second, Var, true,
+ DefinitionRequired);
+ }
+}
+
+void Sema::PerformDependentDiagnostics(const DeclContext *Pattern,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ for (DeclContext::ddiag_iterator I = Pattern->ddiag_begin(),
+ E = Pattern->ddiag_end(); I != E; ++I) {
+ DependentDiagnostic *DD = *I;
+
+ switch (DD->getKind()) {
+ case DependentDiagnostic::Access:
+ HandleDependentAccessCheck(*DD, TemplateArgs);
+ break;
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
new file mode 100644
index 0000000..a40100c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -0,0 +1,794 @@
+//===------- SemaTemplateVariadic.cpp - C++ Variadic Templates ------------===/
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===/
+//
+// This file implements semantic analysis for C++0x variadic templates.
+//===----------------------------------------------------------------------===/
+
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Template.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
+
+using namespace clang;
+
+//----------------------------------------------------------------------------
+// Visitor that collects unexpanded parameter packs
+//----------------------------------------------------------------------------
+
+namespace {
+ /// \brief A class that collects unexpanded parameter packs.
+ class CollectUnexpandedParameterPacksVisitor :
+ public RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor>
+ {
+ typedef RecursiveASTVisitor<CollectUnexpandedParameterPacksVisitor>
+ inherited;
+
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded;
+
+ public:
+ explicit CollectUnexpandedParameterPacksVisitor(
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded)
+ : Unexpanded(Unexpanded) { }
+
+ bool shouldWalkTypesOfTypeLocs() const { return false; }
+
+ //------------------------------------------------------------------------
+ // Recording occurrences of (unexpanded) parameter packs.
+ //------------------------------------------------------------------------
+
+ /// \brief Record occurrences of template type parameter packs.
+ bool VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+ if (TL.getTypePtr()->isParameterPack())
+ Unexpanded.push_back(std::make_pair(TL.getTypePtr(), TL.getNameLoc()));
+ return true;
+ }
+
+ /// \brief Record occurrences of template type parameter packs
+ /// when we don't have proper source-location information for
+ /// them.
+ ///
+ /// Ideally, this routine would never be used.
+ bool VisitTemplateTypeParmType(TemplateTypeParmType *T) {
+ if (T->isParameterPack())
+ Unexpanded.push_back(std::make_pair(T, SourceLocation()));
+
+ return true;
+ }
+
+ /// \brief Record occurrences of function and non-type template
+ /// parameter packs in an expression.
+ bool VisitDeclRefExpr(DeclRefExpr *E) {
+ if (E->getDecl()->isParameterPack())
+ Unexpanded.push_back(std::make_pair(E->getDecl(), E->getLocation()));
+
+ return true;
+ }
+
+ /// \brief Record occurrences of template template parameter packs.
+ bool TraverseTemplateName(TemplateName Template) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast_or_null<TemplateTemplateParmDecl>(
+ Template.getAsTemplateDecl()))
+ if (TTP->isParameterPack())
+ Unexpanded.push_back(std::make_pair(TTP, SourceLocation()));
+
+ return inherited::TraverseTemplateName(Template);
+ }
+
+ /// \brief Suppress traversal into Objective-C container literal
+ /// elements that are pack expansions.
+ bool TraverseObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ if (!E->containsUnexpandedParameterPack())
+ return true;
+
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ ObjCDictionaryElement Element = E->getKeyValueElement(I);
+ if (Element.isPackExpansion())
+ continue;
+
+ TraverseStmt(Element.Key);
+ TraverseStmt(Element.Value);
+ }
+ return true;
+ }
+ //------------------------------------------------------------------------
+ // Pruning the search for unexpanded parameter packs.
+ //------------------------------------------------------------------------
+
+ /// \brief Suppress traversal into statements and expressions that
+ /// do not contain unexpanded parameter packs.
+ bool TraverseStmt(Stmt *S) {
+ if (Expr *E = dyn_cast_or_null<Expr>(S))
+ if (E->containsUnexpandedParameterPack())
+ return inherited::TraverseStmt(E);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal into types that do not contain
+ /// unexpanded parameter packs.
+ bool TraverseType(QualType T) {
+ if (!T.isNull() && T->containsUnexpandedParameterPack())
+ return inherited::TraverseType(T);
+
+ return true;
+ }
+
+ /// \brief Suppress traversel into types with location information
+ /// that do not contain unexpanded parameter packs.
+ bool TraverseTypeLoc(TypeLoc TL) {
+ if (!TL.getType().isNull() &&
+ TL.getType()->containsUnexpandedParameterPack())
+ return inherited::TraverseTypeLoc(TL);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal of non-parameter declarations, since
+ /// they cannot contain unexpanded parameter packs.
+ bool TraverseDecl(Decl *D) {
+ if (D && isa<ParmVarDecl>(D))
+ return inherited::TraverseDecl(D);
+
+ return true;
+ }
+
+ /// \brief Suppress traversal of template argument pack expansions.
+ bool TraverseTemplateArgument(const TemplateArgument &Arg) {
+ if (Arg.isPackExpansion())
+ return true;
+
+ return inherited::TraverseTemplateArgument(Arg);
+ }
+
+ /// \brief Suppress traversal of template argument pack expansions.
+ bool TraverseTemplateArgumentLoc(const TemplateArgumentLoc &ArgLoc) {
+ if (ArgLoc.getArgument().isPackExpansion())
+ return true;
+
+ return inherited::TraverseTemplateArgumentLoc(ArgLoc);
+ }
+ };
+}
+
+/// \brief Diagnose all of the unexpanded parameter packs in the given
+/// vector.
+void
+Sema::DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
+ UnexpandedParameterPackContext UPPC,
+ ArrayRef<UnexpandedParameterPack> Unexpanded) {
+ if (Unexpanded.empty())
+ return;
+
+ SmallVector<SourceLocation, 4> Locations;
+ SmallVector<IdentifierInfo *, 4> Names;
+ llvm::SmallPtrSet<IdentifierInfo *, 4> NamesKnown;
+
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ IdentifierInfo *Name = 0;
+ if (const TemplateTypeParmType *TTP
+ = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>())
+ Name = TTP->getIdentifier();
+ else
+ Name = Unexpanded[I].first.get<NamedDecl *>()->getIdentifier();
+
+ if (Name && NamesKnown.insert(Name))
+ Names.push_back(Name);
+
+ if (Unexpanded[I].second.isValid())
+ Locations.push_back(Unexpanded[I].second);
+ }
+
+ DiagnosticBuilder DB
+ = Names.size() == 0? Diag(Loc, diag::err_unexpanded_parameter_pack_0)
+ << (int)UPPC
+ : Names.size() == 1? Diag(Loc, diag::err_unexpanded_parameter_pack_1)
+ << (int)UPPC << Names[0]
+ : Names.size() == 2? Diag(Loc, diag::err_unexpanded_parameter_pack_2)
+ << (int)UPPC << Names[0] << Names[1]
+ : Diag(Loc, diag::err_unexpanded_parameter_pack_3_or_more)
+ << (int)UPPC << Names[0] << Names[1];
+
+ for (unsigned I = 0, N = Locations.size(); I != N; ++I)
+ DB << SourceRange(Locations[I]);
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
+ TypeSourceInfo *T,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!T->getType()->containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(
+ T->getTypeLoc());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(Expr *E,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!E->containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseStmt(E);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(E->getLocStart(), UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ if (!SS.getScopeRep() ||
+ !SS.getScopeRep()->containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseNestedNameSpecifier(SS.getScopeRep());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(SS.getRange().getBegin(),
+ UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
+ UnexpandedParameterPackContext UPPC) {
+ // C++0x [temp.variadic]p5:
+ // An appearance of a name of a parameter pack that is not expanded is
+ // ill-formed.
+ switch (NameInfo.getName().getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return false;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ // FIXME: We shouldn't need this null check!
+ if (TypeSourceInfo *TSInfo = NameInfo.getNamedTypeInfo())
+ return DiagnoseUnexpandedParameterPack(NameInfo.getLoc(), TSInfo, UPPC);
+
+ if (!NameInfo.getName().getCXXNameType()->containsUnexpandedParameterPack())
+ return false;
+
+ break;
+ }
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseType(NameInfo.getName().getCXXNameType());
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(NameInfo.getLoc(), UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(SourceLocation Loc,
+ TemplateName Template,
+ UnexpandedParameterPackContext UPPC) {
+
+ if (Template.isNull() || !Template.containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateName(Template);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(Loc, UPPC, Unexpanded);
+ return true;
+}
+
+bool Sema::DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
+ UnexpandedParameterPackContext UPPC) {
+ if (Arg.getArgument().isNull() ||
+ !Arg.getArgument().containsUnexpandedParameterPack())
+ return false;
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgumentLoc(Arg);
+ assert(!Unexpanded.empty() && "Unable to find unexpanded parameter packs");
+ DiagnoseUnexpandedParameterPacks(Arg.getLocation(), UPPC, Unexpanded);
+ return true;
+}
+
+void Sema::collectUnexpandedParameterPacks(TemplateArgument Arg,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgument(Arg);
+}
+
+void Sema::collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseTemplateArgumentLoc(Arg);
+}
+
+void Sema::collectUnexpandedParameterPacks(QualType T,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(T);
+}
+
+void Sema::collectUnexpandedParameterPacks(TypeLoc TL,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseTypeLoc(TL);
+}
+
+void Sema::collectUnexpandedParameterPacks(CXXScopeSpec &SS,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ NestedNameSpecifier *Qualifier = SS.getScopeRep();
+ if (!Qualifier)
+ return;
+
+ NestedNameSpecifierLoc QualifierLoc(Qualifier, SS.location_data());
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseNestedNameSpecifierLoc(QualifierLoc);
+}
+
+void Sema::collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
+ SmallVectorImpl<UnexpandedParameterPack> &Unexpanded) {
+ CollectUnexpandedParameterPacksVisitor(Unexpanded)
+ .TraverseDeclarationNameInfo(NameInfo);
+}
+
+
+ParsedTemplateArgument
+Sema::ActOnPackExpansion(const ParsedTemplateArgument &Arg,
+ SourceLocation EllipsisLoc) {
+ if (Arg.isInvalid())
+ return Arg;
+
+ switch (Arg.getKind()) {
+ case ParsedTemplateArgument::Type: {
+ TypeResult Result = ActOnPackExpansion(Arg.getAsType(), EllipsisLoc);
+ if (Result.isInvalid())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(Arg.getKind(), Result.get().getAsOpaquePtr(),
+ Arg.getLocation());
+ }
+
+ case ParsedTemplateArgument::NonType: {
+ ExprResult Result = ActOnPackExpansion(Arg.getAsExpr(), EllipsisLoc);
+ if (Result.isInvalid())
+ return ParsedTemplateArgument();
+
+ return ParsedTemplateArgument(Arg.getKind(), Result.get(),
+ Arg.getLocation());
+ }
+
+ case ParsedTemplateArgument::Template:
+ if (!Arg.getAsTemplate().get().containsUnexpandedParameterPack()) {
+ SourceRange R(Arg.getLocation());
+ if (Arg.getScopeSpec().isValid())
+ R.setBegin(Arg.getScopeSpec().getBeginLoc());
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << R;
+ return ParsedTemplateArgument();
+ }
+
+ return Arg.getTemplatePackExpansion(EllipsisLoc);
+ }
+ llvm_unreachable("Unhandled template argument kind?");
+}
+
+TypeResult Sema::ActOnPackExpansion(ParsedType Type,
+ SourceLocation EllipsisLoc) {
+ TypeSourceInfo *TSInfo;
+ GetTypeFromParser(Type, &TSInfo);
+ if (!TSInfo)
+ return true;
+
+ TypeSourceInfo *TSResult = CheckPackExpansion(TSInfo, EllipsisLoc,
+ llvm::Optional<unsigned>());
+ if (!TSResult)
+ return true;
+
+ return CreateParsedType(TSResult->getType(), TSResult);
+}
+
+TypeSourceInfo *Sema::CheckPackExpansion(TypeSourceInfo *Pattern,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ // Create the pack expansion type and source-location information.
+ QualType Result = CheckPackExpansion(Pattern->getType(),
+ Pattern->getTypeLoc().getSourceRange(),
+ EllipsisLoc, NumExpansions);
+ if (Result.isNull())
+ return 0;
+
+ TypeSourceInfo *TSResult = Context.CreateTypeSourceInfo(Result);
+ PackExpansionTypeLoc TL = cast<PackExpansionTypeLoc>(TSResult->getTypeLoc());
+ TL.setEllipsisLoc(EllipsisLoc);
+
+ // Copy over the source-location information from the type.
+ memcpy(TL.getNextTypeLoc().getOpaqueData(),
+ Pattern->getTypeLoc().getOpaqueData(),
+ Pattern->getTypeLoc().getFullDataSize());
+ return TSResult;
+}
+
+QualType Sema::CheckPackExpansion(QualType Pattern,
+ SourceRange PatternRange,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ // C++0x [temp.variadic]p5:
+ // The pattern of a pack expansion shall name one or more
+ // parameter packs that are not expanded by a nested pack
+ // expansion.
+ if (!Pattern->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << PatternRange;
+ return QualType();
+ }
+
+ return Context.getPackExpansionType(Pattern, NumExpansions);
+}
+
+ExprResult Sema::ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc) {
+ return CheckPackExpansion(Pattern, EllipsisLoc, llvm::Optional<unsigned>());
+}
+
+ExprResult Sema::CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ if (!Pattern)
+ return ExprError();
+
+ // C++0x [temp.variadic]p5:
+ // The pattern of a pack expansion shall name one or more
+ // parameter packs that are not expanded by a nested pack
+ // expansion.
+ if (!Pattern->containsUnexpandedParameterPack()) {
+ Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
+ << Pattern->getSourceRange();
+ return ExprError();
+ }
+
+ // Create the pack expansion expression and source-location information.
+ return Owned(new (Context) PackExpansionExpr(Context.DependentTy, Pattern,
+ EllipsisLoc, NumExpansions));
+}
+
+/// \brief Retrieve the depth and index of a parameter pack.
+static std::pair<unsigned, unsigned>
+getDepthAndIndex(NamedDecl *ND) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(ND))
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(ND))
+ return std::make_pair(NTTP->getDepth(), NTTP->getIndex());
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(ND);
+ return std::make_pair(TTP->getDepth(), TTP->getIndex());
+}
+
+bool Sema::CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ ArrayRef<UnexpandedParameterPack> Unexpanded,
+ const MultiLevelTemplateArgumentList &TemplateArgs,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ llvm::Optional<unsigned> &NumExpansions) {
+ ShouldExpand = true;
+ RetainExpansion = false;
+ std::pair<IdentifierInfo *, SourceLocation> FirstPack;
+ bool HaveFirstPack = false;
+
+ for (ArrayRef<UnexpandedParameterPack>::iterator i = Unexpanded.begin(),
+ end = Unexpanded.end();
+ i != end; ++i) {
+ // Compute the depth and index for this parameter pack.
+ unsigned Depth = 0, Index = 0;
+ IdentifierInfo *Name;
+ bool IsFunctionParameterPack = false;
+
+ if (const TemplateTypeParmType *TTP
+ = i->first.dyn_cast<const TemplateTypeParmType *>()) {
+ Depth = TTP->getDepth();
+ Index = TTP->getIndex();
+ Name = TTP->getIdentifier();
+ } else {
+ NamedDecl *ND = i->first.get<NamedDecl *>();
+ if (isa<ParmVarDecl>(ND))
+ IsFunctionParameterPack = true;
+ else
+ llvm::tie(Depth, Index) = getDepthAndIndex(ND);
+
+ Name = ND->getIdentifier();
+ }
+
+ // Determine the size of this argument pack.
+ unsigned NewPackSize;
+ if (IsFunctionParameterPack) {
+ // Figure out whether we're instantiating to an argument pack or not.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
+ = CurrentInstantiationScope->findInstantiationOf(
+ i->first.get<NamedDecl *>());
+ if (Instantiation->is<DeclArgumentPack *>()) {
+ // We could expand this function parameter pack.
+ NewPackSize = Instantiation->get<DeclArgumentPack *>()->size();
+ } else {
+ // We can't expand this function parameter pack, so we can't expand
+ // the pack expansion.
+ ShouldExpand = false;
+ continue;
+ }
+ } else {
+ // If we don't have a template argument at this depth/index, then we
+ // cannot expand the pack expansion. Make a note of this, but we still
+ // want to check any parameter packs we *do* have arguments for.
+ if (Depth >= TemplateArgs.getNumLevels() ||
+ !TemplateArgs.hasTemplateArgument(Depth, Index)) {
+ ShouldExpand = false;
+ continue;
+ }
+
+ // Determine the size of the argument pack.
+ NewPackSize = TemplateArgs(Depth, Index).pack_size();
+ }
+
+ // C++0x [temp.arg.explicit]p9:
+ // Template argument deduction can extend the sequence of template
+ // arguments corresponding to a template parameter pack, even when the
+ // sequence contains explicitly specified template arguments.
+ if (!IsFunctionParameterPack) {
+ if (NamedDecl *PartialPack
+ = CurrentInstantiationScope->getPartiallySubstitutedPack()){
+ unsigned PartialDepth, PartialIndex;
+ llvm::tie(PartialDepth, PartialIndex) = getDepthAndIndex(PartialPack);
+ if (PartialDepth == Depth && PartialIndex == Index)
+ RetainExpansion = true;
+ }
+ }
+
+ if (!NumExpansions) {
+ // The is the first pack we've seen for which we have an argument.
+ // Record it.
+ NumExpansions = NewPackSize;
+ FirstPack.first = Name;
+ FirstPack.second = i->second;
+ HaveFirstPack = true;
+ continue;
+ }
+
+ if (NewPackSize != *NumExpansions) {
+ // C++0x [temp.variadic]p5:
+ // All of the parameter packs expanded by a pack expansion shall have
+ // the same number of arguments specified.
+ if (HaveFirstPack)
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict)
+ << FirstPack.first << Name << *NumExpansions << NewPackSize
+ << SourceRange(FirstPack.second) << SourceRange(i->second);
+ else
+ Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel)
+ << Name << *NumExpansions << NewPackSize
+ << SourceRange(i->second);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+unsigned Sema::getNumArgumentsInExpansion(QualType T,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
+ QualType Pattern = cast<PackExpansionType>(T)->getPattern();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ CollectUnexpandedParameterPacksVisitor(Unexpanded).TraverseType(Pattern);
+
+ for (unsigned I = 0, N = Unexpanded.size(); I != N; ++I) {
+ // Compute the depth and index for this parameter pack.
+ unsigned Depth;
+ unsigned Index;
+
+ if (const TemplateTypeParmType *TTP
+ = Unexpanded[I].first.dyn_cast<const TemplateTypeParmType *>()) {
+ Depth = TTP->getDepth();
+ Index = TTP->getIndex();
+ } else {
+ NamedDecl *ND = Unexpanded[I].first.get<NamedDecl *>();
+ if (isa<ParmVarDecl>(ND)) {
+ // Function parameter pack.
+ typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
+
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
+ = CurrentInstantiationScope->findInstantiationOf(
+ Unexpanded[I].first.get<NamedDecl *>());
+ if (Instantiation->is<DeclArgumentPack *>())
+ return Instantiation->get<DeclArgumentPack *>()->size();
+
+ continue;
+ }
+
+ llvm::tie(Depth, Index) = getDepthAndIndex(ND);
+ }
+ if (Depth >= TemplateArgs.getNumLevels() ||
+ !TemplateArgs.hasTemplateArgument(Depth, Index))
+ continue;
+
+ // Determine the size of the argument pack.
+ return TemplateArgs(Depth, Index).pack_size();
+ }
+
+ llvm_unreachable("No unexpanded parameter packs in type expansion.");
+}
+
+bool Sema::containsUnexpandedParameterPacks(Declarator &D) {
+ const DeclSpec &DS = D.getDeclSpec();
+ switch (DS.getTypeSpecType()) {
+ case TST_typename:
+ case TST_typeofType:
+ case TST_underlyingType:
+ case TST_atomic: {
+ QualType T = DS.getRepAsType().get();
+ if (!T.isNull() && T->containsUnexpandedParameterPack())
+ return true;
+ break;
+ }
+
+ case TST_typeofExpr:
+ case TST_decltype:
+ if (DS.getRepAsExpr() &&
+ DS.getRepAsExpr()->containsUnexpandedParameterPack())
+ return true;
+ break;
+
+ case TST_unspecified:
+ case TST_void:
+ case TST_char:
+ case TST_wchar:
+ case TST_char16:
+ case TST_char32:
+ case TST_int:
+ case TST_int128:
+ case TST_half:
+ case TST_float:
+ case TST_double:
+ case TST_bool:
+ case TST_decimal32:
+ case TST_decimal64:
+ case TST_decimal128:
+ case TST_enum:
+ case TST_union:
+ case TST_struct:
+ case TST_class:
+ case TST_auto:
+ case TST_unknown_anytype:
+ case TST_error:
+ break;
+ }
+
+ for (unsigned I = 0, N = D.getNumTypeObjects(); I != N; ++I) {
+ const DeclaratorChunk &Chunk = D.getTypeObject(I);
+ switch (Chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Paren:
+ // These declarator chunks cannot contain any parameter packs.
+ break;
+
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::BlockPointer:
+ // Syntactically, these kinds of declarator chunks all come after the
+ // declarator-id (conceptually), so the parser should not invoke this
+ // routine at this time.
+ llvm_unreachable("Could not have seen this kind of declarator chunk");
+
+ case DeclaratorChunk::MemberPointer:
+ if (Chunk.Mem.Scope().getScopeRep() &&
+ Chunk.Mem.Scope().getScopeRep()->containsUnexpandedParameterPack())
+ return true;
+ break;
+ }
+ }
+
+ return false;
+}
+
+namespace {
+
+// Callback to only accept typo corrections that refer to parameter packs.
+class ParameterPackValidatorCCC : public CorrectionCandidateCallback {
+ public:
+ virtual bool ValidateCandidate(const TypoCorrection &candidate) {
+ NamedDecl *ND = candidate.getCorrectionDecl();
+ return ND && ND->isParameterPack();
+ }
+};
+
+}
+
+/// \brief Called when an expression computing the size of a parameter pack
+/// is parsed.
+///
+/// \code
+/// template<typename ...Types> struct count {
+/// static const unsigned value = sizeof...(Types);
+/// };
+/// \endcode
+///
+//
+/// \param OpLoc The location of the "sizeof" keyword.
+/// \param Name The name of the parameter pack whose size will be determined.
+/// \param NameLoc The source location of the name of the parameter pack.
+/// \param RParenLoc The location of the closing parentheses.
+ExprResult Sema::ActOnSizeofParameterPackExpr(Scope *S,
+ SourceLocation OpLoc,
+ IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ SourceLocation RParenLoc) {
+ // C++0x [expr.sizeof]p5:
+ // The identifier in a sizeof... expression shall name a parameter pack.
+ LookupResult R(*this, &Name, NameLoc, LookupOrdinaryName);
+ LookupName(R, S);
+
+ NamedDecl *ParameterPack = 0;
+ ParameterPackValidatorCCC Validator;
+ switch (R.getResultKind()) {
+ case LookupResult::Found:
+ ParameterPack = R.getFoundDecl();
+ break;
+
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ if (TypoCorrection Corrected = CorrectTypo(R.getLookupNameInfo(),
+ R.getLookupKind(), S, 0,
+ Validator)) {
+ std::string CorrectedQuotedStr(Corrected.getQuoted(getLangOpts()));
+ ParameterPack = Corrected.getCorrectionDecl();
+ Diag(NameLoc, diag::err_sizeof_pack_no_pack_name_suggest)
+ << &Name << CorrectedQuotedStr
+ << FixItHint::CreateReplacement(
+ NameLoc, Corrected.getAsString(getLangOpts()));
+ Diag(ParameterPack->getLocation(), diag::note_parameter_pack_here)
+ << CorrectedQuotedStr;
+ }
+
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ break;
+
+ case LookupResult::Ambiguous:
+ DiagnoseAmbiguousLookup(R);
+ return ExprError();
+ }
+
+ if (!ParameterPack || !ParameterPack->isParameterPack()) {
+ Diag(NameLoc, diag::err_sizeof_pack_no_pack_name)
+ << &Name;
+ return ExprError();
+ }
+
+ MarkAnyDeclReferenced(OpLoc, ParameterPack);
+
+ return new (Context) SizeOfPackExpr(Context.getSizeType(), OpLoc,
+ ParameterPack, NameLoc, RParenLoc);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
new file mode 100644
index 0000000..c41df82
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/SemaType.cpp
@@ -0,0 +1,4514 @@
+//===--- SemaType.cpp - Semantic Analysis for Types -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements type-related semantic analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Template.h"
+#include "clang/Basic/OpenCL.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/AST/Expr.h"
+#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Sema/DeclSpec.h"
+#include "clang/Sema/DelayedDiagnostic.h"
+#include "clang/Sema/Lookup.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+
+/// isOmittedBlockReturnType - Return true if this declarator is missing a
+/// return type because this is a omitted return type on a block literal.
+static bool isOmittedBlockReturnType(const Declarator &D) {
+ if (D.getContext() != Declarator::BlockLiteralContext ||
+ D.getDeclSpec().hasTypeSpecifier())
+ return false;
+
+ if (D.getNumTypeObjects() == 0)
+ return true; // ^{ ... }
+
+ if (D.getNumTypeObjects() == 1 &&
+ D.getTypeObject(0).Kind == DeclaratorChunk::Function)
+ return true; // ^(int X, float Y) { ... }
+
+ return false;
+}
+
+/// diagnoseBadTypeAttribute - Diagnoses a type attribute which
+/// doesn't apply to the given type.
+static void diagnoseBadTypeAttribute(Sema &S, const AttributeList &attr,
+ QualType type) {
+ bool useExpansionLoc = false;
+
+ unsigned diagID = 0;
+ switch (attr.getKind()) {
+ case AttributeList::AT_objc_gc:
+ diagID = diag::warn_pointer_attribute_wrong_type;
+ useExpansionLoc = true;
+ break;
+
+ case AttributeList::AT_objc_ownership:
+ diagID = diag::warn_objc_object_attribute_wrong_type;
+ useExpansionLoc = true;
+ break;
+
+ default:
+ // Assume everything else was a function attribute.
+ diagID = diag::warn_function_attribute_wrong_type;
+ break;
+ }
+
+ SourceLocation loc = attr.getLoc();
+ StringRef name = attr.getName()->getName();
+
+ // The GC attributes are usually written with macros; special-case them.
+ if (useExpansionLoc && loc.isMacroID() && attr.getParameterName()) {
+ if (attr.getParameterName()->isStr("strong")) {
+ if (S.findMacroSpelling(loc, "__strong")) name = "__strong";
+ } else if (attr.getParameterName()->isStr("weak")) {
+ if (S.findMacroSpelling(loc, "__weak")) name = "__weak";
+ }
+ }
+
+ S.Diag(loc, diagID) << name << type;
+}
+
+// objc_gc applies to Objective-C pointers or, otherwise, to the
+// smallest available pointer type (i.e. 'void*' in 'void**').
+#define OBJC_POINTER_TYPE_ATTRS_CASELIST \
+ case AttributeList::AT_objc_gc: \
+ case AttributeList::AT_objc_ownership
+
+// Function type attributes.
+#define FUNCTION_TYPE_ATTRS_CASELIST \
+ case AttributeList::AT_noreturn: \
+ case AttributeList::AT_cdecl: \
+ case AttributeList::AT_fastcall: \
+ case AttributeList::AT_stdcall: \
+ case AttributeList::AT_thiscall: \
+ case AttributeList::AT_pascal: \
+ case AttributeList::AT_regparm: \
+ case AttributeList::AT_pcs \
+
+namespace {
+ /// An object which stores processing state for the entire
+ /// GetTypeForDeclarator process.
+ class TypeProcessingState {
+ Sema &sema;
+
+ /// The declarator being processed.
+ Declarator &declarator;
+
+ /// The index of the declarator chunk we're currently processing.
+ /// May be the total number of valid chunks, indicating the
+ /// DeclSpec.
+ unsigned chunkIndex;
+
+ /// Whether there are non-trivial modifications to the decl spec.
+ bool trivial;
+
+ /// Whether we saved the attributes in the decl spec.
+ bool hasSavedAttrs;
+
+ /// The original set of attributes on the DeclSpec.
+ SmallVector<AttributeList*, 2> savedAttrs;
+
+ /// A list of attributes to diagnose the uselessness of when the
+ /// processing is complete.
+ SmallVector<AttributeList*, 2> ignoredTypeAttrs;
+
+ public:
+ TypeProcessingState(Sema &sema, Declarator &declarator)
+ : sema(sema), declarator(declarator),
+ chunkIndex(declarator.getNumTypeObjects()),
+ trivial(true), hasSavedAttrs(false) {}
+
+ Sema &getSema() const {
+ return sema;
+ }
+
+ Declarator &getDeclarator() const {
+ return declarator;
+ }
+
+ unsigned getCurrentChunkIndex() const {
+ return chunkIndex;
+ }
+
+ void setCurrentChunkIndex(unsigned idx) {
+ assert(idx <= declarator.getNumTypeObjects());
+ chunkIndex = idx;
+ }
+
+ AttributeList *&getCurrentAttrListRef() const {
+ assert(chunkIndex <= declarator.getNumTypeObjects());
+ if (chunkIndex == declarator.getNumTypeObjects())
+ return getMutableDeclSpec().getAttributes().getListRef();
+ return declarator.getTypeObject(chunkIndex).getAttrListRef();
+ }
+
+ /// Save the current set of attributes on the DeclSpec.
+ void saveDeclSpecAttrs() {
+ // Don't try to save them multiple times.
+ if (hasSavedAttrs) return;
+
+ DeclSpec &spec = getMutableDeclSpec();
+ for (AttributeList *attr = spec.getAttributes().getList(); attr;
+ attr = attr->getNext())
+ savedAttrs.push_back(attr);
+ trivial &= savedAttrs.empty();
+ hasSavedAttrs = true;
+ }
+
+ /// Record that we had nowhere to put the given type attribute.
+ /// We will diagnose such attributes later.
+ void addIgnoredTypeAttr(AttributeList &attr) {
+ ignoredTypeAttrs.push_back(&attr);
+ }
+
+ /// Diagnose all the ignored type attributes, given that the
+ /// declarator worked out to the given type.
+ void diagnoseIgnoredTypeAttrs(QualType type) const {
+ for (SmallVectorImpl<AttributeList*>::const_iterator
+ i = ignoredTypeAttrs.begin(), e = ignoredTypeAttrs.end();
+ i != e; ++i)
+ diagnoseBadTypeAttribute(getSema(), **i, type);
+ }
+
+ ~TypeProcessingState() {
+ if (trivial) return;
+
+ restoreDeclSpecAttrs();
+ }
+
+ private:
+ DeclSpec &getMutableDeclSpec() const {
+ return const_cast<DeclSpec&>(declarator.getDeclSpec());
+ }
+
+ void restoreDeclSpecAttrs() {
+ assert(hasSavedAttrs);
+
+ if (savedAttrs.empty()) {
+ getMutableDeclSpec().getAttributes().set(0);
+ return;
+ }
+
+ getMutableDeclSpec().getAttributes().set(savedAttrs[0]);
+ for (unsigned i = 0, e = savedAttrs.size() - 1; i != e; ++i)
+ savedAttrs[i]->setNext(savedAttrs[i+1]);
+ savedAttrs.back()->setNext(0);
+ }
+ };
+
+ /// Basically std::pair except that we really want to avoid an
+ /// implicit operator= for safety concerns. It's also a minor
+ /// link-time optimization for this to be a private type.
+ struct AttrAndList {
+ /// The attribute.
+ AttributeList &first;
+
+ /// The head of the list the attribute is currently in.
+ AttributeList *&second;
+
+ AttrAndList(AttributeList &attr, AttributeList *&head)
+ : first(attr), second(head) {}
+ };
+}
+
+namespace llvm {
+ template <> struct isPodLike<AttrAndList> {
+ static const bool value = true;
+ };
+}
+
+static void spliceAttrIntoList(AttributeList &attr, AttributeList *&head) {
+ attr.setNext(head);
+ head = &attr;
+}
+
+static void spliceAttrOutOfList(AttributeList &attr, AttributeList *&head) {
+ if (head == &attr) {
+ head = attr.getNext();
+ return;
+ }
+
+ AttributeList *cur = head;
+ while (true) {
+ assert(cur && cur->getNext() && "ran out of attrs?");
+ if (cur->getNext() == &attr) {
+ cur->setNext(attr.getNext());
+ return;
+ }
+ cur = cur->getNext();
+ }
+}
+
+static void moveAttrFromListToList(AttributeList &attr,
+ AttributeList *&fromList,
+ AttributeList *&toList) {
+ spliceAttrOutOfList(attr, fromList);
+ spliceAttrIntoList(attr, toList);
+}
+
+static void processTypeAttrs(TypeProcessingState &state,
+ QualType &type, bool isDeclSpec,
+ AttributeList *attrs);
+
+static bool handleFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type);
+
+static bool handleObjCGCTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type);
+
+static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type);
+
+static bool handleObjCPointerTypeAttr(TypeProcessingState &state,
+ AttributeList &attr, QualType &type) {
+ if (attr.getKind() == AttributeList::AT_objc_gc)
+ return handleObjCGCTypeAttr(state, attr, type);
+ assert(attr.getKind() == AttributeList::AT_objc_ownership);
+ return handleObjCOwnershipTypeAttr(state, attr, type);
+}
+
+/// Given that an objc_gc attribute was written somewhere on a
+/// declaration *other* than on the declarator itself (for which, use
+/// distributeObjCPointerTypeAttrFromDeclarator), and given that it
+/// didn't apply in whatever position it was written in, try to move
+/// it to a more appropriate position.
+static void distributeObjCPointerTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType type) {
+ Declarator &declarator = state.getDeclarator();
+ for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ chunk.getAttrListRef());
+ return;
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Array:
+ continue;
+
+ // Don't walk through these.
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::MemberPointer:
+ goto error;
+ }
+ }
+ error:
+
+ diagnoseBadTypeAttribute(state.getSema(), attr, type);
+}
+
+/// Distribute an objc_gc type attribute that was written on the
+/// declarator.
+static void
+distributeObjCPointerTypeAttrFromDeclarator(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // objc_gc goes on the innermost pointer to something that's not a
+ // pointer.
+ unsigned innermost = -1U;
+ bool considerDeclSpec = true;
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ innermost = i;
+ continue;
+
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Array:
+ continue;
+
+ case DeclaratorChunk::Function:
+ considerDeclSpec = false;
+ goto done;
+ }
+ }
+ done:
+
+ // That might actually be the decl spec if we weren't blocked by
+ // anything in the declarator.
+ if (considerDeclSpec) {
+ if (handleObjCPointerTypeAttr(state, attr, declSpecType)) {
+ // Splice the attribute into the decl spec. Prevents the
+ // attribute from being applied multiple times and gives
+ // the source-location-filler something to work with.
+ state.saveDeclSpecAttrs();
+ moveAttrFromListToList(attr, declarator.getAttrListRef(),
+ declarator.getMutableDeclSpec().getAttributes().getListRef());
+ return;
+ }
+ }
+
+ // Otherwise, if we found an appropriate chunk, splice the attribute
+ // into it.
+ if (innermost != -1U) {
+ moveAttrFromListToList(attr, declarator.getAttrListRef(),
+ declarator.getTypeObject(innermost).getAttrListRef());
+ return;
+ }
+
+ // Otherwise, diagnose when we're done building the type.
+ spliceAttrOutOfList(attr, declarator.getAttrListRef());
+ state.addIgnoredTypeAttr(attr);
+}
+
+/// A function type attribute was written somewhere in a declaration
+/// *other* than on the declarator itself or in the decl spec. Given
+/// that it didn't apply in whatever position it was written in, try
+/// to move it to a more appropriate position.
+static void distributeFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType type) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Try to push the attribute from the return type of a function to
+ // the function itself.
+ for (unsigned i = state.getCurrentChunkIndex(); i != 0; --i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i-1);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Function:
+ moveAttrFromListToList(attr, state.getCurrentAttrListRef(),
+ chunk.getAttrListRef());
+ return;
+
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::Pointer:
+ case DeclaratorChunk::BlockPointer:
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::MemberPointer:
+ continue;
+ }
+ }
+
+ diagnoseBadTypeAttribute(state.getSema(), attr, type);
+}
+
+/// Try to distribute a function type attribute to the innermost
+/// function chunk or type. Returns true if the attribute was
+/// distributed, false if no location was found.
+static bool
+distributeFunctionTypeAttrToInnermost(TypeProcessingState &state,
+ AttributeList &attr,
+ AttributeList *&attrList,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Put it on the innermost function chunk, if there is one.
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = declarator.getTypeObject(i);
+ if (chunk.Kind != DeclaratorChunk::Function) continue;
+
+ moveAttrFromListToList(attr, attrList, chunk.getAttrListRef());
+ return true;
+ }
+
+ if (handleFunctionTypeAttr(state, attr, declSpecType)) {
+ spliceAttrOutOfList(attr, attrList);
+ return true;
+ }
+
+ return false;
+}
+
+/// A function type attribute was written in the decl spec. Try to
+/// apply it somewhere.
+static void
+distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ state.saveDeclSpecAttrs();
+
+ // Try to distribute to the innermost.
+ if (distributeFunctionTypeAttrToInnermost(state, attr,
+ state.getCurrentAttrListRef(),
+ declSpecType))
+ return;
+
+ // If that failed, diagnose the bad attribute when the declarator is
+ // fully built.
+ state.addIgnoredTypeAttr(attr);
+}
+
+/// A function type attribute was written on the declarator. Try to
+/// apply it somewhere.
+static void
+distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // Try to distribute to the innermost.
+ if (distributeFunctionTypeAttrToInnermost(state, attr,
+ declarator.getAttrListRef(),
+ declSpecType))
+ return;
+
+ // If that failed, diagnose the bad attribute when the declarator is
+ // fully built.
+ spliceAttrOutOfList(attr, declarator.getAttrListRef());
+ state.addIgnoredTypeAttr(attr);
+}
+
+/// \brief Given that there are attributes written on the declarator
+/// itself, try to distribute any type attributes to the appropriate
+/// declarator chunk.
+///
+/// These are attributes like the following:
+/// int f ATTR;
+/// int (f ATTR)();
+/// but not necessarily this:
+/// int f() ATTR;
+static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
+ QualType &declSpecType) {
+ // Collect all the type attributes from the declarator itself.
+ assert(state.getDeclarator().getAttributes() && "declarator has no attrs!");
+ AttributeList *attr = state.getDeclarator().getAttributes();
+ AttributeList *next;
+ do {
+ next = attr->getNext();
+
+ switch (attr->getKind()) {
+ OBJC_POINTER_TYPE_ATTRS_CASELIST:
+ distributeObjCPointerTypeAttrFromDeclarator(state, *attr, declSpecType);
+ break;
+
+ case AttributeList::AT_ns_returns_retained:
+ if (!state.getSema().getLangOpts().ObjCAutoRefCount)
+ break;
+ // fallthrough
+
+ FUNCTION_TYPE_ATTRS_CASELIST:
+ distributeFunctionTypeAttrFromDeclarator(state, *attr, declSpecType);
+ break;
+
+ default:
+ break;
+ }
+ } while ((attr = next));
+}
+
+/// Add a synthetic '()' to a block-literal declarator if it is
+/// required, given the return type.
+static void maybeSynthesizeBlockSignature(TypeProcessingState &state,
+ QualType declSpecType) {
+ Declarator &declarator = state.getDeclarator();
+
+ // First, check whether the declarator would produce a function,
+ // i.e. whether the innermost semantic chunk is a function.
+ if (declarator.isFunctionDeclarator()) {
+ // If so, make that declarator a prototyped declarator.
+ declarator.getFunctionTypeInfo().hasPrototype = true;
+ return;
+ }
+
+ // If there are any type objects, the type as written won't name a
+ // function, regardless of the decl spec type. This is because a
+ // block signature declarator is always an abstract-declarator, and
+ // abstract-declarators can't just be parentheses chunks. Therefore
+ // we need to build a function chunk unless there are no type
+ // objects and the decl spec type is a function.
+ if (!declarator.getNumTypeObjects() && declSpecType->isFunctionType())
+ return;
+
+ // Note that there *are* cases with invalid declarators where
+ // declarators consist solely of parentheses. In general, these
+ // occur only in failed efforts to make function declarators, so
+ // faking up the function chunk is still the right thing to do.
+
+ // Otherwise, we need to fake up a function declarator.
+ SourceLocation loc = declarator.getLocStart();
+
+ // ...and *prepend* it to the declarator.
+ declarator.AddInnermostTypeInfo(DeclaratorChunk::getFunction(
+ /*proto*/ true,
+ /*variadic*/ false, SourceLocation(),
+ /*args*/ 0, 0,
+ /*type quals*/ 0,
+ /*ref-qualifier*/true, SourceLocation(),
+ /*const qualifier*/SourceLocation(),
+ /*volatile qualifier*/SourceLocation(),
+ /*mutable qualifier*/SourceLocation(),
+ /*EH*/ EST_None, SourceLocation(), 0, 0, 0, 0,
+ /*parens*/ loc, loc,
+ declarator));
+
+ // For consistency, make sure the state still has us as processing
+ // the decl spec.
+ assert(state.getCurrentChunkIndex() == declarator.getNumTypeObjects() - 1);
+ state.setCurrentChunkIndex(declarator.getNumTypeObjects());
+}
+
+/// \brief Convert the specified declspec to the appropriate type
+/// object.
+/// \param D the declarator containing the declaration specifier.
+/// \returns The type described by the declaration specifiers. This function
+/// never returns null.
+static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
+ // FIXME: Should move the logic from DeclSpec::Finish to here for validity
+ // checking.
+
+ Sema &S = state.getSema();
+ Declarator &declarator = state.getDeclarator();
+ const DeclSpec &DS = declarator.getDeclSpec();
+ SourceLocation DeclLoc = declarator.getIdentifierLoc();
+ if (DeclLoc.isInvalid())
+ DeclLoc = DS.getLocStart();
+
+ ASTContext &Context = S.Context;
+
+ QualType Result;
+ switch (DS.getTypeSpecType()) {
+ case DeclSpec::TST_void:
+ Result = Context.VoidTy;
+ break;
+ case DeclSpec::TST_char:
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
+ Result = Context.CharTy;
+ else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed)
+ Result = Context.SignedCharTy;
+ else {
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
+ "Unknown TSS value");
+ Result = Context.UnsignedCharTy;
+ }
+ break;
+ case DeclSpec::TST_wchar:
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unspecified)
+ Result = Context.WCharTy;
+ else if (DS.getTypeSpecSign() == DeclSpec::TSS_signed) {
+ S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
+ << DS.getSpecifierName(DS.getTypeSpecType());
+ Result = Context.getSignedWCharType();
+ } else {
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unsigned &&
+ "Unknown TSS value");
+ S.Diag(DS.getTypeSpecSignLoc(), diag::ext_invalid_sign_spec)
+ << DS.getSpecifierName(DS.getTypeSpecType());
+ Result = Context.getUnsignedWCharType();
+ }
+ break;
+ case DeclSpec::TST_char16:
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ "Unknown TSS value");
+ Result = Context.Char16Ty;
+ break;
+ case DeclSpec::TST_char32:
+ assert(DS.getTypeSpecSign() == DeclSpec::TSS_unspecified &&
+ "Unknown TSS value");
+ Result = Context.Char32Ty;
+ break;
+ case DeclSpec::TST_unspecified:
+ // "<proto1,proto2>" is an objc qualified ID with a missing id.
+ if (DeclSpec::ProtocolQualifierListTy PQ = DS.getProtocolQualifiers()) {
+ Result = Context.getObjCObjectType(Context.ObjCBuiltinIdTy,
+ (ObjCProtocolDecl**)PQ,
+ DS.getNumProtocolQualifiers());
+ Result = Context.getObjCObjectPointerType(Result);
+ break;
+ }
+
+ // If this is a missing declspec in a block literal return context, then it
+ // is inferred from the return statements inside the block.
+ // The declspec is always missing in a lambda expr context; it is either
+ // specified with a trailing return type or inferred.
+ if (declarator.getContext() == Declarator::LambdaExprContext ||
+ isOmittedBlockReturnType(declarator)) {
+ Result = Context.DependentTy;
+ break;
+ }
+
+ // Unspecified typespec defaults to int in C90. However, the C90 grammar
+ // [C90 6.5] only allows a decl-spec if there was *some* type-specifier,
+ // type-qualifier, or storage-class-specifier. If not, emit an extwarn.
+ // Note that the one exception to this is function definitions, which are
+ // allowed to be completely missing a declspec. This is handled in the
+ // parser already though by it pretending to have seen an 'int' in this
+ // case.
+ if (S.getLangOpts().ImplicitInt) {
+ // In C89 mode, we only warn if there is a completely missing declspec
+ // when one is not allowed.
+ if (DS.isEmpty()) {
+ S.Diag(DeclLoc, diag::ext_missing_declspec)
+ << DS.getSourceRange()
+ << FixItHint::CreateInsertion(DS.getLocStart(), "int");
+ }
+ } else if (!DS.hasTypeSpecifier()) {
+ // C99 and C++ require a type specifier. For example, C99 6.7.2p2 says:
+ // "At least one type specifier shall be given in the declaration
+ // specifiers in each declaration, and in the specifier-qualifier list in
+ // each struct declaration and type name."
+ // FIXME: Does Microsoft really have the implicit int extension in C++?
+ if (S.getLangOpts().CPlusPlus &&
+ !S.getLangOpts().MicrosoftExt) {
+ S.Diag(DeclLoc, diag::err_missing_type_specifier)
+ << DS.getSourceRange();
+
+ // When this occurs in C++ code, often something is very broken with the
+ // value being declared, poison it as invalid so we don't get chains of
+ // errors.
+ declarator.setInvalidType(true);
+ } else {
+ S.Diag(DeclLoc, diag::ext_missing_type_specifier)
+ << DS.getSourceRange();
+ }
+ }
+
+ // FALL THROUGH.
+ case DeclSpec::TST_int: {
+ if (DS.getTypeSpecSign() != DeclSpec::TSS_unsigned) {
+ switch (DS.getTypeSpecWidth()) {
+ case DeclSpec::TSW_unspecified: Result = Context.IntTy; break;
+ case DeclSpec::TSW_short: Result = Context.ShortTy; break;
+ case DeclSpec::TSW_long: Result = Context.LongTy; break;
+ case DeclSpec::TSW_longlong:
+ Result = Context.LongLongTy;
+
+ // long long is a C99 feature.
+ if (!S.getLangOpts().C99)
+ S.Diag(DS.getTypeSpecWidthLoc(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_longlong);
+ break;
+ }
+ } else {
+ switch (DS.getTypeSpecWidth()) {
+ case DeclSpec::TSW_unspecified: Result = Context.UnsignedIntTy; break;
+ case DeclSpec::TSW_short: Result = Context.UnsignedShortTy; break;
+ case DeclSpec::TSW_long: Result = Context.UnsignedLongTy; break;
+ case DeclSpec::TSW_longlong:
+ Result = Context.UnsignedLongLongTy;
+
+ // long long is a C99 feature.
+ if (!S.getLangOpts().C99)
+ S.Diag(DS.getTypeSpecWidthLoc(),
+ S.getLangOpts().CPlusPlus0x ?
+ diag::warn_cxx98_compat_longlong : diag::ext_longlong);
+ break;
+ }
+ }
+ break;
+ }
+ case DeclSpec::TST_int128:
+ if (DS.getTypeSpecSign() == DeclSpec::TSS_unsigned)
+ Result = Context.UnsignedInt128Ty;
+ else
+ Result = Context.Int128Ty;
+ break;
+ case DeclSpec::TST_half: Result = Context.HalfTy; break;
+ case DeclSpec::TST_float: Result = Context.FloatTy; break;
+ case DeclSpec::TST_double:
+ if (DS.getTypeSpecWidth() == DeclSpec::TSW_long)
+ Result = Context.LongDoubleTy;
+ else
+ Result = Context.DoubleTy;
+
+ if (S.getLangOpts().OpenCL && !S.getOpenCLOptions().cl_khr_fp64) {
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_double_requires_fp64);
+ declarator.setInvalidType(true);
+ }
+ break;
+ case DeclSpec::TST_bool: Result = Context.BoolTy; break; // _Bool or bool
+ case DeclSpec::TST_decimal32: // _Decimal32
+ case DeclSpec::TST_decimal64: // _Decimal64
+ case DeclSpec::TST_decimal128: // _Decimal128
+ S.Diag(DS.getTypeSpecTypeLoc(), diag::err_decimal_unsupported);
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ break;
+ case DeclSpec::TST_class:
+ case DeclSpec::TST_enum:
+ case DeclSpec::TST_union:
+ case DeclSpec::TST_struct: {
+ TypeDecl *D = dyn_cast_or_null<TypeDecl>(DS.getRepAsDecl());
+ if (!D) {
+ // This can happen in C++ with ambiguous lookups.
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ break;
+ }
+
+ // If the type is deprecated or unavailable, diagnose it.
+ S.DiagnoseUseOfDecl(D, DS.getTypeSpecTypeNameLoc());
+
+ assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
+ DS.getTypeSpecSign() == 0 && "No qualifiers on tag names!");
+
+ // TypeQuals handled by caller.
+ Result = Context.getTypeDeclType(D);
+
+ // In both C and C++, make an ElaboratedType.
+ ElaboratedTypeKeyword Keyword
+ = ElaboratedType::getKeywordForTypeSpec(DS.getTypeSpecType());
+ Result = S.getElaboratedType(Keyword, DS.getTypeSpecScope(), Result);
+ break;
+ }
+ case DeclSpec::TST_typename: {
+ assert(DS.getTypeSpecWidth() == 0 && DS.getTypeSpecComplex() == 0 &&
+ DS.getTypeSpecSign() == 0 &&
+ "Can't handle qualifiers on typedef names yet!");
+ Result = S.GetTypeFromParser(DS.getRepAsType());
+ if (Result.isNull())
+ declarator.setInvalidType(true);
+ else if (DeclSpec::ProtocolQualifierListTy PQ
+ = DS.getProtocolQualifiers()) {
+ if (const ObjCObjectType *ObjT = Result->getAs<ObjCObjectType>()) {
+ // Silently drop any existing protocol qualifiers.
+ // TODO: determine whether that's the right thing to do.
+ if (ObjT->getNumProtocols())
+ Result = ObjT->getBaseType();
+
+ if (DS.getNumProtocolQualifiers())
+ Result = Context.getObjCObjectType(Result,
+ (ObjCProtocolDecl**) PQ,
+ DS.getNumProtocolQualifiers());
+ } else if (Result->isObjCIdType()) {
+ // id<protocol-list>
+ Result = Context.getObjCObjectType(Context.ObjCBuiltinIdTy,
+ (ObjCProtocolDecl**) PQ,
+ DS.getNumProtocolQualifiers());
+ Result = Context.getObjCObjectPointerType(Result);
+ } else if (Result->isObjCClassType()) {
+ // Class<protocol-list>
+ Result = Context.getObjCObjectType(Context.ObjCBuiltinClassTy,
+ (ObjCProtocolDecl**) PQ,
+ DS.getNumProtocolQualifiers());
+ Result = Context.getObjCObjectPointerType(Result);
+ } else {
+ S.Diag(DeclLoc, diag::err_invalid_protocol_qualifiers)
+ << DS.getSourceRange();
+ declarator.setInvalidType(true);
+ }
+ }
+
+ // TypeQuals handled by caller.
+ break;
+ }
+ case DeclSpec::TST_typeofType:
+ // FIXME: Preserve type source info.
+ Result = S.GetTypeFromParser(DS.getRepAsType());
+ assert(!Result.isNull() && "Didn't get a type for typeof?");
+ if (!Result->isDependentType())
+ if (const TagType *TT = Result->getAs<TagType>())
+ S.DiagnoseUseOfDecl(TT->getDecl(), DS.getTypeSpecTypeLoc());
+ // TypeQuals handled by caller.
+ Result = Context.getTypeOfType(Result);
+ break;
+ case DeclSpec::TST_typeofExpr: {
+ Expr *E = DS.getRepAsExpr();
+ assert(E && "Didn't get an expression for typeof?");
+ // TypeQuals handled by caller.
+ Result = S.BuildTypeofExprType(E, DS.getTypeSpecTypeLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+ }
+ case DeclSpec::TST_decltype: {
+ Expr *E = DS.getRepAsExpr();
+ assert(E && "Didn't get an expression for decltype?");
+ // TypeQuals handled by caller.
+ Result = S.BuildDecltypeType(E, DS.getTypeSpecTypeLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+ }
+ case DeclSpec::TST_underlyingType:
+ Result = S.GetTypeFromParser(DS.getRepAsType());
+ assert(!Result.isNull() && "Didn't get a type for __underlying_type?");
+ Result = S.BuildUnaryTransformType(Result,
+ UnaryTransformType::EnumUnderlyingType,
+ DS.getTypeSpecTypeLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+
+ case DeclSpec::TST_auto: {
+ // TypeQuals handled by caller.
+ Result = Context.getAutoType(QualType());
+ break;
+ }
+
+ case DeclSpec::TST_unknown_anytype:
+ Result = Context.UnknownAnyTy;
+ break;
+
+ case DeclSpec::TST_atomic:
+ Result = S.GetTypeFromParser(DS.getRepAsType());
+ assert(!Result.isNull() && "Didn't get a type for _Atomic?");
+ Result = S.BuildAtomicType(Result, DS.getTypeSpecTypeLoc());
+ if (Result.isNull()) {
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ }
+ break;
+
+ case DeclSpec::TST_error:
+ Result = Context.IntTy;
+ declarator.setInvalidType(true);
+ break;
+ }
+
+ // Handle complex types.
+ if (DS.getTypeSpecComplex() == DeclSpec::TSC_complex) {
+ if (S.getLangOpts().Freestanding)
+ S.Diag(DS.getTypeSpecComplexLoc(), diag::ext_freestanding_complex);
+ Result = Context.getComplexType(Result);
+ } else if (DS.isTypeAltiVecVector()) {
+ unsigned typeSize = static_cast<unsigned>(Context.getTypeSize(Result));
+ assert(typeSize > 0 && "type size for vector must be greater than 0 bits");
+ VectorType::VectorKind VecKind = VectorType::AltiVecVector;
+ if (DS.isTypeAltiVecPixel())
+ VecKind = VectorType::AltiVecPixel;
+ else if (DS.isTypeAltiVecBool())
+ VecKind = VectorType::AltiVecBool;
+ Result = Context.getVectorType(Result, 128/typeSize, VecKind);
+ }
+
+ // FIXME: Imaginary.
+ if (DS.getTypeSpecComplex() == DeclSpec::TSC_imaginary)
+ S.Diag(DS.getTypeSpecComplexLoc(), diag::err_imaginary_not_supported);
+
+ // Before we process any type attributes, synthesize a block literal
+ // function declarator if necessary.
+ if (declarator.getContext() == Declarator::BlockLiteralContext)
+ maybeSynthesizeBlockSignature(state, Result);
+
+ // Apply any type attributes from the decl spec. This may cause the
+ // list of type attributes to be temporarily saved while the type
+ // attributes are pushed around.
+ if (AttributeList *attrs = DS.getAttributes().getList())
+ processTypeAttrs(state, Result, true, attrs);
+
+ // Apply const/volatile/restrict qualifiers to T.
+ if (unsigned TypeQuals = DS.getTypeQualifiers()) {
+
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from object
+ // or incomplete types shall not be restrict-qualified." C++ also allows
+ // restrict-qualified references.
+ if (TypeQuals & DeclSpec::TQ_restrict) {
+ if (Result->isAnyPointerType() || Result->isReferenceType()) {
+ QualType EltTy;
+ if (Result->isObjCObjectPointerType())
+ EltTy = Result;
+ else
+ EltTy = Result->isPointerType() ?
+ Result->getAs<PointerType>()->getPointeeType() :
+ Result->getAs<ReferenceType>()->getPointeeType();
+
+ // If we have a pointer or reference, the pointee must have an object
+ // incomplete type.
+ if (!EltTy->isIncompleteOrObjectType()) {
+ S.Diag(DS.getRestrictSpecLoc(),
+ diag::err_typecheck_invalid_restrict_invalid_pointee)
+ << EltTy << DS.getSourceRange();
+ TypeQuals &= ~DeclSpec::TQ_restrict; // Remove the restrict qualifier.
+ }
+ } else {
+ S.Diag(DS.getRestrictSpecLoc(),
+ diag::err_typecheck_invalid_restrict_not_pointer)
+ << Result << DS.getSourceRange();
+ TypeQuals &= ~DeclSpec::TQ_restrict; // Remove the restrict qualifier.
+ }
+ }
+
+ // Warn about CV qualifiers on functions: C99 6.7.3p8: "If the specification
+ // of a function type includes any type qualifiers, the behavior is
+ // undefined."
+ if (Result->isFunctionType() && TypeQuals) {
+ // Get some location to point at, either the C or V location.
+ SourceLocation Loc;
+ if (TypeQuals & DeclSpec::TQ_const)
+ Loc = DS.getConstSpecLoc();
+ else if (TypeQuals & DeclSpec::TQ_volatile)
+ Loc = DS.getVolatileSpecLoc();
+ else {
+ assert((TypeQuals & DeclSpec::TQ_restrict) &&
+ "Has CVR quals but not C, V, or R?");
+ Loc = DS.getRestrictSpecLoc();
+ }
+ S.Diag(Loc, diag::warn_typecheck_function_qualifiers)
+ << Result << DS.getSourceRange();
+ }
+
+ // C++ [dcl.ref]p1:
+ // Cv-qualified references are ill-formed except when the
+ // cv-qualifiers are introduced through the use of a typedef
+ // (7.1.3) or of a template type argument (14.3), in which
+ // case the cv-qualifiers are ignored.
+ // FIXME: Shouldn't we be checking SCS_typedef here?
+ if (DS.getTypeSpecType() == DeclSpec::TST_typename &&
+ TypeQuals && Result->isReferenceType()) {
+ TypeQuals &= ~DeclSpec::TQ_const;
+ TypeQuals &= ~DeclSpec::TQ_volatile;
+ }
+
+ // C90 6.5.3 constraints: "The same type qualifier shall not appear more
+ // than once in the same specifier-list or qualifier-list, either directly
+ // or via one or more typedefs."
+ if (!S.getLangOpts().C99 && !S.getLangOpts().CPlusPlus
+ && TypeQuals & Result.getCVRQualifiers()) {
+ if (TypeQuals & DeclSpec::TQ_const && Result.isConstQualified()) {
+ S.Diag(DS.getConstSpecLoc(), diag::ext_duplicate_declspec)
+ << "const";
+ }
+
+ if (TypeQuals & DeclSpec::TQ_volatile && Result.isVolatileQualified()) {
+ S.Diag(DS.getVolatileSpecLoc(), diag::ext_duplicate_declspec)
+ << "volatile";
+ }
+
+ // C90 doesn't have restrict, so it doesn't force us to produce a warning
+ // in this case.
+ }
+
+ Qualifiers Quals = Qualifiers::fromCVRMask(TypeQuals);
+ Result = Context.getQualifiedType(Result, Quals);
+ }
+
+ return Result;
+}
+
+static std::string getPrintableNameForEntity(DeclarationName Entity) {
+ if (Entity)
+ return Entity.getAsString();
+
+ return "type name";
+}
+
+QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
+ Qualifiers Qs) {
+ // Enforce C99 6.7.3p2: "Types other than pointer types derived from
+ // object or incomplete types shall not be restrict-qualified."
+ if (Qs.hasRestrict()) {
+ unsigned DiagID = 0;
+ QualType ProblemTy;
+
+ const Type *Ty = T->getCanonicalTypeInternal().getTypePtr();
+ if (const ReferenceType *RTy = dyn_cast<ReferenceType>(Ty)) {
+ if (!RTy->getPointeeType()->isIncompleteOrObjectType()) {
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = T->getAs<ReferenceType>()->getPointeeType();
+ }
+ } else if (const PointerType *PTy = dyn_cast<PointerType>(Ty)) {
+ if (!PTy->getPointeeType()->isIncompleteOrObjectType()) {
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = T->getAs<PointerType>()->getPointeeType();
+ }
+ } else if (const MemberPointerType *PTy = dyn_cast<MemberPointerType>(Ty)) {
+ if (!PTy->getPointeeType()->isIncompleteOrObjectType()) {
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = T->getAs<PointerType>()->getPointeeType();
+ }
+ } else if (!Ty->isDependentType()) {
+ // FIXME: this deserves a proper diagnostic
+ DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
+ ProblemTy = T;
+ }
+
+ if (DiagID) {
+ Diag(Loc, DiagID) << ProblemTy;
+ Qs.removeRestrict();
+ }
+ }
+
+ return Context.getQualifiedType(T, Qs);
+}
+
+/// \brief Build a paren type including \p T.
+QualType Sema::BuildParenType(QualType T) {
+ return Context.getParenType(T);
+}
+
+/// Given that we're building a pointer or reference to the given
+static QualType inferARCLifetimeForPointee(Sema &S, QualType type,
+ SourceLocation loc,
+ bool isReference) {
+ // Bail out if retention is unrequired or already specified.
+ if (!type->isObjCLifetimeType() ||
+ type.getObjCLifetime() != Qualifiers::OCL_None)
+ return type;
+
+ Qualifiers::ObjCLifetime implicitLifetime = Qualifiers::OCL_None;
+
+ // If the object type is const-qualified, we can safely use
+ // __unsafe_unretained. This is safe (because there are no read
+ // barriers), and it'll be safe to coerce anything but __weak* to
+ // the resulting type.
+ if (type.isConstQualified()) {
+ implicitLifetime = Qualifiers::OCL_ExplicitNone;
+
+ // Otherwise, check whether the static type does not require
+ // retaining. This currently only triggers for Class (possibly
+ // protocol-qualifed, and arrays thereof).
+ } else if (type->isObjCARCImplicitlyUnretainedType()) {
+ implicitLifetime = Qualifiers::OCL_ExplicitNone;
+
+ // If we are in an unevaluated context, like sizeof, skip adding a
+ // qualification.
+ } else if (S.ExprEvalContexts.back().Context == Sema::Unevaluated) {
+ return type;
+
+ // If that failed, give an error and recover using __strong. __strong
+ // is the option most likely to prevent spurious second-order diagnostics,
+ // like when binding a reference to a field.
+ } else {
+ // These types can show up in private ivars in system headers, so
+ // we need this to not be an error in those cases. Instead we
+ // want to delay.
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(loc,
+ diag::err_arc_indirect_no_ownership, type, isReference));
+ } else {
+ S.Diag(loc, diag::err_arc_indirect_no_ownership) << type << isReference;
+ }
+ implicitLifetime = Qualifiers::OCL_Strong;
+ }
+ assert(implicitLifetime && "didn't infer any lifetime!");
+
+ Qualifiers qs;
+ qs.addObjCLifetime(implicitLifetime);
+ return S.Context.getQualifiedType(type, qs);
+}
+
+/// \brief Build a pointer type.
+///
+/// \param T The type to which we'll be building a pointer.
+///
+/// \param Loc The location of the entity whose type involves this
+/// pointer type or, if there is no such entity, the location of the
+/// type that will have pointer type.
+///
+/// \param Entity The name of the entity that involves the pointer
+/// type, if known.
+///
+/// \returns A suitable pointer type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildPointerType(QualType T,
+ SourceLocation Loc, DeclarationName Entity) {
+ if (T->isReferenceType()) {
+ // C++ 8.3.2p4: There shall be no ... pointers to references ...
+ Diag(Loc, diag::err_illegal_decl_pointer_to_reference)
+ << getPrintableNameForEntity(Entity) << T;
+ return QualType();
+ }
+
+ assert(!T->isObjCObjectType() && "Should build ObjCObjectPointerType");
+
+ // In ARC, it is forbidden to build pointers to unqualified pointers.
+ if (getLangOpts().ObjCAutoRefCount)
+ T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ false);
+
+ // Build the pointer type.
+ return Context.getPointerType(T);
+}
+
+/// \brief Build a reference type.
+///
+/// \param T The type to which we'll be building a reference.
+///
+/// \param Loc The location of the entity whose type involves this
+/// reference type or, if there is no such entity, the location of the
+/// type that will have reference type.
+///
+/// \param Entity The name of the entity that involves the reference
+/// type, if known.
+///
+/// \returns A suitable reference type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ assert(Context.getCanonicalType(T) != Context.OverloadTy &&
+ "Unresolved overloaded function type");
+
+ // C++0x [dcl.ref]p6:
+ // If a typedef (7.1.3), a type template-parameter (14.3.1), or a
+ // decltype-specifier (7.1.6.2) denotes a type TR that is a reference to a
+ // type T, an attempt to create the type "lvalue reference to cv TR" creates
+ // the type "lvalue reference to T", while an attempt to create the type
+ // "rvalue reference to cv TR" creates the type TR.
+ bool LValueRef = SpelledAsLValue || T->getAs<LValueReferenceType>();
+
+ // C++ [dcl.ref]p4: There shall be no references to references.
+ //
+ // According to C++ DR 106, references to references are only
+ // diagnosed when they are written directly (e.g., "int & &"),
+ // but not when they happen via a typedef:
+ //
+ // typedef int& intref;
+ // typedef intref& intref2;
+ //
+ // Parser::ParseDeclaratorInternal diagnoses the case where
+ // references are written directly; here, we handle the
+ // collapsing of references-to-references as described in C++0x.
+ // DR 106 and 540 introduce reference-collapsing into C++98/03.
+
+ // C++ [dcl.ref]p1:
+ // A declarator that specifies the type "reference to cv void"
+ // is ill-formed.
+ if (T->isVoidType()) {
+ Diag(Loc, diag::err_reference_to_void);
+ return QualType();
+ }
+
+ // In ARC, it is forbidden to build references to unqualified pointers.
+ if (getLangOpts().ObjCAutoRefCount)
+ T = inferARCLifetimeForPointee(*this, T, Loc, /*reference*/ true);
+
+ // Handle restrict on references.
+ if (LValueRef)
+ return Context.getLValueReferenceType(T, SpelledAsLValue);
+ return Context.getRValueReferenceType(T);
+}
+
+/// Check whether the specified array size makes the array type a VLA. If so,
+/// return true, if not, return the size of the array in SizeVal.
+static bool isArraySizeVLA(Sema &S, Expr *ArraySize, llvm::APSInt &SizeVal) {
+ // If the size is an ICE, it certainly isn't a VLA. If we're in a GNU mode
+ // (like gnu99, but not c99) accept any evaluatable value as an extension.
+ return S.VerifyIntegerConstantExpression(
+ ArraySize, &SizeVal, S.PDiag(), S.LangOpts.GNUMode,
+ S.PDiag(diag::ext_vla_folded_to_constant)).isInvalid();
+}
+
+
+/// \brief Build an array type.
+///
+/// \param T The type of each element in the array.
+///
+/// \param ASM C99 array size modifier (e.g., '*', 'static').
+///
+/// \param ArraySize Expression describing the size of the array.
+///
+/// \param Loc The location of the entity whose type involves this
+/// array type or, if there is no such entity, the location of the
+/// type that will have array type.
+///
+/// \param Entity The name of the entity that involves the array
+/// type, if known.
+///
+/// \returns A suitable array type, if there are no errors. Otherwise,
+/// returns a NULL type.
+QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
+ Expr *ArraySize, unsigned Quals,
+ SourceRange Brackets, DeclarationName Entity) {
+
+ SourceLocation Loc = Brackets.getBegin();
+ if (getLangOpts().CPlusPlus) {
+ // C++ [dcl.array]p1:
+ // T is called the array element type; this type shall not be a reference
+ // type, the (possibly cv-qualified) type void, a function type or an
+ // abstract class type.
+ //
+ // Note: function types are handled in the common path with C.
+ if (T->isReferenceType()) {
+ Diag(Loc, diag::err_illegal_decl_array_of_references)
+ << getPrintableNameForEntity(Entity) << T;
+ return QualType();
+ }
+
+ if (T->isVoidType()) {
+ Diag(Loc, diag::err_illegal_decl_array_incomplete_type) << T;
+ return QualType();
+ }
+
+ if (RequireNonAbstractType(Brackets.getBegin(), T,
+ diag::err_array_of_abstract_type))
+ return QualType();
+
+ } else {
+ // C99 6.7.5.2p1: If the element type is an incomplete or function type,
+ // reject it (e.g. void ary[7], struct foo ary[7], void ary[7]())
+ if (RequireCompleteType(Loc, T,
+ diag::err_illegal_decl_array_incomplete_type))
+ return QualType();
+ }
+
+ if (T->isFunctionType()) {
+ Diag(Loc, diag::err_illegal_decl_array_of_functions)
+ << getPrintableNameForEntity(Entity) << T;
+ return QualType();
+ }
+
+ if (T->getContainedAutoType()) {
+ Diag(Loc, diag::err_illegal_decl_array_of_auto)
+ << getPrintableNameForEntity(Entity) << T;
+ return QualType();
+ }
+
+ if (const RecordType *EltTy = T->getAs<RecordType>()) {
+ // If the element type is a struct or union that contains a variadic
+ // array, accept it as a GNU extension: C99 6.7.2.1p2.
+ if (EltTy->getDecl()->hasFlexibleArrayMember())
+ Diag(Loc, diag::ext_flexible_array_in_array) << T;
+ } else if (T->isObjCObjectType()) {
+ Diag(Loc, diag::err_objc_array_of_interfaces) << T;
+ return QualType();
+ }
+
+ // Do placeholder conversions on the array size expression.
+ if (ArraySize && ArraySize->hasPlaceholderType()) {
+ ExprResult Result = CheckPlaceholderExpr(ArraySize);
+ if (Result.isInvalid()) return QualType();
+ ArraySize = Result.take();
+ }
+
+ // Do lvalue-to-rvalue conversions on the array size expression.
+ if (ArraySize && !ArraySize->isRValue()) {
+ ExprResult Result = DefaultLvalueConversion(ArraySize);
+ if (Result.isInvalid())
+ return QualType();
+
+ ArraySize = Result.take();
+ }
+
+ // C99 6.7.5.2p1: The size expression shall have integer type.
+ // C++11 allows contextual conversions to such types.
+ if (!getLangOpts().CPlusPlus0x &&
+ ArraySize && !ArraySize->isTypeDependent() &&
+ !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
+ Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
+ << ArraySize->getType() << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ llvm::APSInt ConstVal(Context.getTypeSize(Context.getSizeType()));
+ if (!ArraySize) {
+ if (ASM == ArrayType::Star)
+ T = Context.getVariableArrayType(T, 0, ASM, Quals, Brackets);
+ else
+ T = Context.getIncompleteArrayType(T, ASM, Quals);
+ } else if (ArraySize->isTypeDependent() || ArraySize->isValueDependent()) {
+ T = Context.getDependentSizedArrayType(T, ArraySize, ASM, Quals, Brackets);
+ } else if ((!T->isDependentType() && !T->isIncompleteType() &&
+ !T->isConstantSizeType()) ||
+ isArraySizeVLA(*this, ArraySize, ConstVal)) {
+ // Even in C++11, don't allow contextual conversions in the array bound
+ // of a VLA.
+ if (getLangOpts().CPlusPlus0x &&
+ !ArraySize->getType()->isIntegralOrUnscopedEnumerationType()) {
+ Diag(ArraySize->getLocStart(), diag::err_array_size_non_int)
+ << ArraySize->getType() << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ // C99: an array with an element type that has a non-constant-size is a VLA.
+ // C99: an array with a non-ICE size is a VLA. We accept any expression
+ // that we can fold to a non-zero positive value as an extension.
+ T = Context.getVariableArrayType(T, ArraySize, ASM, Quals, Brackets);
+ } else {
+ // C99 6.7.5.2p1: If the expression is a constant expression, it shall
+ // have a value greater than zero.
+ if (ConstVal.isSigned() && ConstVal.isNegative()) {
+ if (Entity)
+ Diag(ArraySize->getLocStart(), diag::err_decl_negative_array_size)
+ << getPrintableNameForEntity(Entity) << ArraySize->getSourceRange();
+ else
+ Diag(ArraySize->getLocStart(), diag::err_typecheck_negative_array_size)
+ << ArraySize->getSourceRange();
+ return QualType();
+ }
+ if (ConstVal == 0) {
+ // GCC accepts zero sized static arrays. We allow them when
+ // we're not in a SFINAE context.
+ Diag(ArraySize->getLocStart(),
+ isSFINAEContext()? diag::err_typecheck_zero_array_size
+ : diag::ext_typecheck_zero_array_size)
+ << ArraySize->getSourceRange();
+
+ if (ASM == ArrayType::Static) {
+ Diag(ArraySize->getLocStart(),
+ diag::warn_typecheck_zero_static_array_size)
+ << ArraySize->getSourceRange();
+ ASM = ArrayType::Normal;
+ }
+ } else if (!T->isDependentType() && !T->isVariablyModifiedType() &&
+ !T->isIncompleteType()) {
+ // Is the array too large?
+ unsigned ActiveSizeBits
+ = ConstantArrayType::getNumAddressingBits(Context, T, ConstVal);
+ if (ActiveSizeBits > ConstantArrayType::getMaxSizeBits(Context))
+ Diag(ArraySize->getLocStart(), diag::err_array_too_large)
+ << ConstVal.toString(10)
+ << ArraySize->getSourceRange();
+ }
+
+ T = Context.getConstantArrayType(T, ConstVal, ASM, Quals);
+ }
+ // If this is not C99, extwarn about VLA's and C99 array size modifiers.
+ if (!getLangOpts().C99) {
+ if (T->isVariableArrayType()) {
+ // Prohibit the use of non-POD types in VLAs.
+ QualType BaseT = Context.getBaseElementType(T);
+ if (!T->isDependentType() &&
+ !BaseT.isPODType(Context) &&
+ !BaseT->isObjCLifetimeType()) {
+ Diag(Loc, diag::err_vla_non_pod)
+ << BaseT;
+ return QualType();
+ }
+ // Prohibit the use of VLAs during template argument deduction.
+ else if (isSFINAEContext()) {
+ Diag(Loc, diag::err_vla_in_sfinae);
+ return QualType();
+ }
+ // Just extwarn about VLAs.
+ else
+ Diag(Loc, diag::ext_vla);
+ } else if (ASM != ArrayType::Normal || Quals != 0)
+ Diag(Loc,
+ getLangOpts().CPlusPlus? diag::err_c99_array_usage_cxx
+ : diag::ext_c99_array_usage) << ASM;
+ }
+
+ return T;
+}
+
+/// \brief Build an ext-vector type.
+///
+/// Run the required checks for the extended vector type.
+QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
+ SourceLocation AttrLoc) {
+ // unlike gcc's vector_size attribute, we do not allow vectors to be defined
+ // in conjunction with complex types (pointers, arrays, functions, etc.).
+ if (!T->isDependentType() &&
+ !T->isIntegerType() && !T->isRealFloatingType()) {
+ Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << T;
+ return QualType();
+ }
+
+ if (!ArraySize->isTypeDependent() && !ArraySize->isValueDependent()) {
+ llvm::APSInt vecSize(32);
+ if (!ArraySize->isIntegerConstantExpr(vecSize, Context)) {
+ Diag(AttrLoc, diag::err_attribute_argument_not_int)
+ << "ext_vector_type" << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ // unlike gcc's vector_size attribute, the size is specified as the
+ // number of elements, not the number of bytes.
+ unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue());
+
+ if (vectorSize == 0) {
+ Diag(AttrLoc, diag::err_attribute_zero_size)
+ << ArraySize->getSourceRange();
+ return QualType();
+ }
+
+ return Context.getExtVectorType(T, vectorSize);
+ }
+
+ return Context.getDependentSizedExtVectorType(T, ArraySize, AttrLoc);
+}
+
+/// \brief Build a function type.
+///
+/// This routine checks the function type according to C++ rules and
+/// under the assumption that the result type and parameter types have
+/// just been instantiated from a template. It therefore duplicates
+/// some of the behavior of GetTypeForDeclarator, but in a much
+/// simpler form that is only suitable for this narrow use case.
+///
+/// \param T The return type of the function.
+///
+/// \param ParamTypes The parameter types of the function. This array
+/// will be modified to account for adjustments to the types of the
+/// function parameters.
+///
+/// \param NumParamTypes The number of parameter types in ParamTypes.
+///
+/// \param Variadic Whether this is a variadic function type.
+///
+/// \param HasTrailingReturn Whether this function has a trailing return type.
+///
+/// \param Quals The cvr-qualifiers to be applied to the function type.
+///
+/// \param Loc The location of the entity whose type involves this
+/// function type or, if there is no such entity, the location of the
+/// type that will have function type.
+///
+/// \param Entity The name of the entity that involves the function
+/// type, if known.
+///
+/// \returns A suitable function type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildFunctionType(QualType T,
+ QualType *ParamTypes,
+ unsigned NumParamTypes,
+ bool Variadic, bool HasTrailingReturn,
+ unsigned Quals,
+ RefQualifierKind RefQualifier,
+ SourceLocation Loc, DeclarationName Entity,
+ FunctionType::ExtInfo Info) {
+ if (T->isArrayType() || T->isFunctionType()) {
+ Diag(Loc, diag::err_func_returning_array_function)
+ << T->isFunctionType() << T;
+ return QualType();
+ }
+
+ // Functions cannot return half FP.
+ if (T->isHalfType()) {
+ Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 1 <<
+ FixItHint::CreateInsertion(Loc, "*");
+ return QualType();
+ }
+
+ bool Invalid = false;
+ for (unsigned Idx = 0; Idx < NumParamTypes; ++Idx) {
+ // FIXME: Loc is too inprecise here, should use proper locations for args.
+ QualType ParamType = Context.getAdjustedParameterType(ParamTypes[Idx]);
+ if (ParamType->isVoidType()) {
+ Diag(Loc, diag::err_param_with_void_type);
+ Invalid = true;
+ } else if (ParamType->isHalfType()) {
+ // Disallow half FP arguments.
+ Diag(Loc, diag::err_parameters_retval_cannot_have_fp16_type) << 0 <<
+ FixItHint::CreateInsertion(Loc, "*");
+ Invalid = true;
+ }
+
+ ParamTypes[Idx] = ParamType;
+ }
+
+ if (Invalid)
+ return QualType();
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = Variadic;
+ EPI.HasTrailingReturn = HasTrailingReturn;
+ EPI.TypeQuals = Quals;
+ EPI.RefQualifier = RefQualifier;
+ EPI.ExtInfo = Info;
+
+ return Context.getFunctionType(T, ParamTypes, NumParamTypes, EPI);
+}
+
+/// \brief Build a member pointer type \c T Class::*.
+///
+/// \param T the type to which the member pointer refers.
+/// \param Class the class type into which the member pointer points.
+/// \param Loc the location where this type begins
+/// \param Entity the name of the entity that will have this member pointer type
+///
+/// \returns a member pointer type, if successful, or a NULL type if there was
+/// an error.
+QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ // Verify that we're not building a pointer to pointer to function with
+ // exception specification.
+ if (CheckDistantExceptionSpec(T)) {
+ Diag(Loc, diag::err_distant_exception_spec);
+
+ // FIXME: If we're doing this as part of template instantiation,
+ // we should return immediately.
+
+ // Build the type anyway, but use the canonical type so that the
+ // exception specifiers are stripped off.
+ T = Context.getCanonicalType(T);
+ }
+
+ // C++ 8.3.3p3: A pointer to member shall not point to ... a member
+ // with reference type, or "cv void."
+ if (T->isReferenceType()) {
+ Diag(Loc, diag::err_illegal_decl_mempointer_to_reference)
+ << (Entity? Entity.getAsString() : "type name") << T;
+ return QualType();
+ }
+
+ if (T->isVoidType()) {
+ Diag(Loc, diag::err_illegal_decl_mempointer_to_void)
+ << (Entity? Entity.getAsString() : "type name");
+ return QualType();
+ }
+
+ if (!Class->isDependentType() && !Class->isRecordType()) {
+ Diag(Loc, diag::err_mempointer_in_nonclass_type) << Class;
+ return QualType();
+ }
+
+ // In the Microsoft ABI, the class is allowed to be an incomplete
+ // type. In such cases, the compiler makes a worst-case assumption.
+ // We make no such assumption right now, so emit an error if the
+ // class isn't a complete type.
+ if (Context.getTargetInfo().getCXXABI() == CXXABI_Microsoft &&
+ RequireCompleteType(Loc, Class, diag::err_incomplete_type))
+ return QualType();
+
+ return Context.getMemberPointerType(T, Class.getTypePtr());
+}
+
+/// \brief Build a block pointer type.
+///
+/// \param T The type to which we'll be building a block pointer.
+///
+/// \param CVR The cvr-qualifiers to be applied to the block pointer type.
+///
+/// \param Loc The location of the entity whose type involves this
+/// block pointer type or, if there is no such entity, the location of the
+/// type that will have block pointer type.
+///
+/// \param Entity The name of the entity that involves the block pointer
+/// type, if known.
+///
+/// \returns A suitable block pointer type, if there are no
+/// errors. Otherwise, returns a NULL type.
+QualType Sema::BuildBlockPointerType(QualType T,
+ SourceLocation Loc,
+ DeclarationName Entity) {
+ if (!T->isFunctionType()) {
+ Diag(Loc, diag::err_nonfunction_block_type);
+ return QualType();
+ }
+
+ return Context.getBlockPointerType(T);
+}
+
+QualType Sema::GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo) {
+ QualType QT = Ty.get();
+ if (QT.isNull()) {
+ if (TInfo) *TInfo = 0;
+ return QualType();
+ }
+
+ TypeSourceInfo *DI = 0;
+ if (const LocInfoType *LIT = dyn_cast<LocInfoType>(QT)) {
+ QT = LIT->getType();
+ DI = LIT->getTypeSourceInfo();
+ }
+
+ if (TInfo) *TInfo = DI;
+ return QT;
+}
+
+static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
+ Qualifiers::ObjCLifetime ownership,
+ unsigned chunkIndex);
+
+/// Given that this is the declaration of a parameter under ARC,
+/// attempt to infer attributes and such for pointer-to-whatever
+/// types.
+static void inferARCWriteback(TypeProcessingState &state,
+ QualType &declSpecType) {
+ Sema &S = state.getSema();
+ Declarator &declarator = state.getDeclarator();
+
+ // TODO: should we care about decl qualifiers?
+
+ // Check whether the declarator has the expected form. We walk
+ // from the inside out in order to make the block logic work.
+ unsigned outermostPointerIndex = 0;
+ bool isBlockPointer = false;
+ unsigned numPointers = 0;
+ for (unsigned i = 0, e = declarator.getNumTypeObjects(); i != e; ++i) {
+ unsigned chunkIndex = i;
+ DeclaratorChunk &chunk = declarator.getTypeObject(chunkIndex);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Paren:
+ // Ignore parens.
+ break;
+
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Pointer:
+ // Count the number of pointers. Treat references
+ // interchangeably as pointers; if they're mis-ordered, normal
+ // type building will discover that.
+ outermostPointerIndex = chunkIndex;
+ numPointers++;
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ // If we have a pointer to block pointer, that's an acceptable
+ // indirect reference; anything else is not an application of
+ // the rules.
+ if (numPointers != 1) return;
+ numPointers++;
+ outermostPointerIndex = chunkIndex;
+ isBlockPointer = true;
+
+ // We don't care about pointer structure in return values here.
+ goto done;
+
+ case DeclaratorChunk::Array: // suppress if written (id[])?
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::MemberPointer:
+ return;
+ }
+ }
+ done:
+
+ // If we have *one* pointer, then we want to throw the qualifier on
+ // the declaration-specifiers, which means that it needs to be a
+ // retainable object type.
+ if (numPointers == 1) {
+ // If it's not a retainable object type, the rule doesn't apply.
+ if (!declSpecType->isObjCRetainableType()) return;
+
+ // If it already has lifetime, don't do anything.
+ if (declSpecType.getObjCLifetime()) return;
+
+ // Otherwise, modify the type in-place.
+ Qualifiers qs;
+
+ if (declSpecType->isObjCARCImplicitlyUnretainedType())
+ qs.addObjCLifetime(Qualifiers::OCL_ExplicitNone);
+ else
+ qs.addObjCLifetime(Qualifiers::OCL_Autoreleasing);
+ declSpecType = S.Context.getQualifiedType(declSpecType, qs);
+
+ // If we have *two* pointers, then we want to throw the qualifier on
+ // the outermost pointer.
+ } else if (numPointers == 2) {
+ // If we don't have a block pointer, we need to check whether the
+ // declaration-specifiers gave us something that will turn into a
+ // retainable object pointer after we slap the first pointer on it.
+ if (!isBlockPointer && !declSpecType->isObjCObjectType())
+ return;
+
+ // Look for an explicit lifetime attribute there.
+ DeclaratorChunk &chunk = declarator.getTypeObject(outermostPointerIndex);
+ if (chunk.Kind != DeclaratorChunk::Pointer &&
+ chunk.Kind != DeclaratorChunk::BlockPointer)
+ return;
+ for (const AttributeList *attr = chunk.getAttrs(); attr;
+ attr = attr->getNext())
+ if (attr->getKind() == AttributeList::AT_objc_ownership)
+ return;
+
+ transferARCOwnershipToDeclaratorChunk(state, Qualifiers::OCL_Autoreleasing,
+ outermostPointerIndex);
+
+ // Any other number of pointers/references does not trigger the rule.
+ } else return;
+
+ // TODO: mark whether we did this inference?
+}
+
+static void DiagnoseIgnoredQualifiers(unsigned Quals,
+ SourceLocation ConstQualLoc,
+ SourceLocation VolatileQualLoc,
+ SourceLocation RestrictQualLoc,
+ Sema& S) {
+ std::string QualStr;
+ unsigned NumQuals = 0;
+ SourceLocation Loc;
+
+ FixItHint ConstFixIt;
+ FixItHint VolatileFixIt;
+ FixItHint RestrictFixIt;
+
+ const SourceManager &SM = S.getSourceManager();
+
+ // FIXME: The locations here are set kind of arbitrarily. It'd be nicer to
+ // find a range and grow it to encompass all the qualifiers, regardless of
+ // the order in which they textually appear.
+ if (Quals & Qualifiers::Const) {
+ ConstFixIt = FixItHint::CreateRemoval(ConstQualLoc);
+ QualStr = "const";
+ ++NumQuals;
+ if (!Loc.isValid() || SM.isBeforeInTranslationUnit(ConstQualLoc, Loc))
+ Loc = ConstQualLoc;
+ }
+ if (Quals & Qualifiers::Volatile) {
+ VolatileFixIt = FixItHint::CreateRemoval(VolatileQualLoc);
+ QualStr += (NumQuals == 0 ? "volatile" : " volatile");
+ ++NumQuals;
+ if (!Loc.isValid() || SM.isBeforeInTranslationUnit(VolatileQualLoc, Loc))
+ Loc = VolatileQualLoc;
+ }
+ if (Quals & Qualifiers::Restrict) {
+ RestrictFixIt = FixItHint::CreateRemoval(RestrictQualLoc);
+ QualStr += (NumQuals == 0 ? "restrict" : " restrict");
+ ++NumQuals;
+ if (!Loc.isValid() || SM.isBeforeInTranslationUnit(RestrictQualLoc, Loc))
+ Loc = RestrictQualLoc;
+ }
+
+ assert(NumQuals > 0 && "No known qualifiers?");
+
+ S.Diag(Loc, diag::warn_qual_return_type)
+ << QualStr << NumQuals << ConstFixIt << VolatileFixIt << RestrictFixIt;
+}
+
+static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
+ TypeSourceInfo *&ReturnTypeInfo) {
+ Sema &SemaRef = state.getSema();
+ Declarator &D = state.getDeclarator();
+ QualType T;
+ ReturnTypeInfo = 0;
+
+ // The TagDecl owned by the DeclSpec.
+ TagDecl *OwnedTagDecl = 0;
+
+ switch (D.getName().getKind()) {
+ case UnqualifiedId::IK_ImplicitSelfParam:
+ case UnqualifiedId::IK_OperatorFunctionId:
+ case UnqualifiedId::IK_Identifier:
+ case UnqualifiedId::IK_LiteralOperatorId:
+ case UnqualifiedId::IK_TemplateId:
+ T = ConvertDeclSpecToType(state);
+
+ if (!D.isInvalidType() && D.getDeclSpec().isTypeSpecOwned()) {
+ OwnedTagDecl = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
+ // Owned declaration is embedded in declarator.
+ OwnedTagDecl->setEmbeddedInDeclarator(true);
+ }
+ break;
+
+ case UnqualifiedId::IK_ConstructorName:
+ case UnqualifiedId::IK_ConstructorTemplateId:
+ case UnqualifiedId::IK_DestructorName:
+ // Constructors and destructors don't have return types. Use
+ // "void" instead.
+ T = SemaRef.Context.VoidTy;
+ break;
+
+ case UnqualifiedId::IK_ConversionFunctionId:
+ // The result type of a conversion function is the type that it
+ // converts to.
+ T = SemaRef.GetTypeFromParser(D.getName().ConversionFunctionId,
+ &ReturnTypeInfo);
+ break;
+ }
+
+ if (D.getAttributes())
+ distributeTypeAttrsFromDeclarator(state, T);
+
+ // C++11 [dcl.spec.auto]p5: reject 'auto' if it is not in an allowed context.
+ // In C++11, a function declarator using 'auto' must have a trailing return
+ // type (this is checked later) and we can skip this. In other languages
+ // using auto, we need to check regardless.
+ if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
+ (!SemaRef.getLangOpts().CPlusPlus0x || !D.isFunctionDeclarator())) {
+ int Error = -1;
+
+ switch (D.getContext()) {
+ case Declarator::KNRTypeListContext:
+ llvm_unreachable("K&R type lists aren't allowed in C++");
+ case Declarator::LambdaExprContext:
+ llvm_unreachable("Can't specify a type specifier in lambda grammar");
+ case Declarator::ObjCParameterContext:
+ case Declarator::ObjCResultContext:
+ case Declarator::PrototypeContext:
+ Error = 0; // Function prototype
+ break;
+ case Declarator::MemberContext:
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_static)
+ break;
+ switch (cast<TagDecl>(SemaRef.CurContext)->getTagKind()) {
+ case TTK_Enum: llvm_unreachable("unhandled tag kind");
+ case TTK_Struct: Error = 1; /* Struct member */ break;
+ case TTK_Union: Error = 2; /* Union member */ break;
+ case TTK_Class: Error = 3; /* Class member */ break;
+ }
+ break;
+ case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
+ Error = 4; // Exception declaration
+ break;
+ case Declarator::TemplateParamContext:
+ Error = 5; // Template parameter
+ break;
+ case Declarator::BlockLiteralContext:
+ Error = 6; // Block literal
+ break;
+ case Declarator::TemplateTypeArgContext:
+ Error = 7; // Template type argument
+ break;
+ case Declarator::AliasDeclContext:
+ case Declarator::AliasTemplateContext:
+ Error = 9; // Type alias
+ break;
+ case Declarator::TrailingReturnContext:
+ Error = 10; // Function return type
+ break;
+ case Declarator::TypeNameContext:
+ Error = 11; // Generic
+ break;
+ case Declarator::FileContext:
+ case Declarator::BlockContext:
+ case Declarator::ForContext:
+ case Declarator::ConditionContext:
+ case Declarator::CXXNewContext:
+ break;
+ }
+
+ if (D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef)
+ Error = 8;
+
+ // In Objective-C it is an error to use 'auto' on a function declarator.
+ if (D.isFunctionDeclarator())
+ Error = 10;
+
+ // C++11 [dcl.spec.auto]p2: 'auto' is always fine if the declarator
+ // contains a trailing return type. That is only legal at the outermost
+ // level. Check all declarator chunks (outermost first) anyway, to give
+ // better diagnostics.
+ if (SemaRef.getLangOpts().CPlusPlus0x && Error != -1) {
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ unsigned chunkIndex = e - i - 1;
+ state.setCurrentChunkIndex(chunkIndex);
+ DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
+ if (DeclType.Kind == DeclaratorChunk::Function) {
+ const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+ if (FTI.TrailingReturnType) {
+ Error = -1;
+ break;
+ }
+ }
+ }
+ }
+
+ if (Error != -1) {
+ SemaRef.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_auto_not_allowed)
+ << Error;
+ T = SemaRef.Context.IntTy;
+ D.setInvalidType(true);
+ } else
+ SemaRef.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::warn_cxx98_compat_auto_type_specifier);
+ }
+
+ if (SemaRef.getLangOpts().CPlusPlus &&
+ OwnedTagDecl && OwnedTagDecl->isCompleteDefinition()) {
+ // Check the contexts where C++ forbids the declaration of a new class
+ // or enumeration in a type-specifier-seq.
+ switch (D.getContext()) {
+ case Declarator::TrailingReturnContext:
+ // Class and enumeration definitions are syntactically not allowed in
+ // trailing return types.
+ llvm_unreachable("parser should not have allowed this");
+ break;
+ case Declarator::FileContext:
+ case Declarator::MemberContext:
+ case Declarator::BlockContext:
+ case Declarator::ForContext:
+ case Declarator::BlockLiteralContext:
+ case Declarator::LambdaExprContext:
+ // C++11 [dcl.type]p3:
+ // A type-specifier-seq shall not define a class or enumeration unless
+ // it appears in the type-id of an alias-declaration (7.1.3) that is not
+ // the declaration of a template-declaration.
+ case Declarator::AliasDeclContext:
+ break;
+ case Declarator::AliasTemplateContext:
+ SemaRef.Diag(OwnedTagDecl->getLocation(),
+ diag::err_type_defined_in_alias_template)
+ << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ break;
+ case Declarator::TypeNameContext:
+ case Declarator::TemplateParamContext:
+ case Declarator::CXXNewContext:
+ case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
+ case Declarator::TemplateTypeArgContext:
+ SemaRef.Diag(OwnedTagDecl->getLocation(),
+ diag::err_type_defined_in_type_specifier)
+ << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ break;
+ case Declarator::PrototypeContext:
+ case Declarator::ObjCParameterContext:
+ case Declarator::ObjCResultContext:
+ case Declarator::KNRTypeListContext:
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ SemaRef.Diag(OwnedTagDecl->getLocation(),
+ diag::err_type_defined_in_param_type)
+ << SemaRef.Context.getTypeDeclType(OwnedTagDecl);
+ break;
+ case Declarator::ConditionContext:
+ // C++ 6.4p2:
+ // The type-specifier-seq shall not contain typedef and shall not declare
+ // a new class or enumeration.
+ SemaRef.Diag(OwnedTagDecl->getLocation(),
+ diag::err_type_defined_in_condition);
+ break;
+ }
+ }
+
+ return T;
+}
+
+static std::string getFunctionQualifiersAsString(const FunctionProtoType *FnTy){
+ std::string Quals =
+ Qualifiers::fromCVRMask(FnTy->getTypeQuals()).getAsString();
+
+ switch (FnTy->getRefQualifier()) {
+ case RQ_None:
+ break;
+
+ case RQ_LValue:
+ if (!Quals.empty())
+ Quals += ' ';
+ Quals += '&';
+ break;
+
+ case RQ_RValue:
+ if (!Quals.empty())
+ Quals += ' ';
+ Quals += "&&";
+ break;
+ }
+
+ return Quals;
+}
+
+/// Check that the function type T, which has a cv-qualifier or a ref-qualifier,
+/// can be contained within the declarator chunk DeclType, and produce an
+/// appropriate diagnostic if not.
+static void checkQualifiedFunction(Sema &S, QualType T,
+ DeclaratorChunk &DeclType) {
+ // C++98 [dcl.fct]p4 / C++11 [dcl.fct]p6: a function type with a
+ // cv-qualifier or a ref-qualifier can only appear at the topmost level
+ // of a type.
+ int DiagKind = -1;
+ switch (DeclType.Kind) {
+ case DeclaratorChunk::Paren:
+ case DeclaratorChunk::MemberPointer:
+ // These cases are permitted.
+ return;
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Function:
+ // These cases don't allow function types at all; no need to diagnose the
+ // qualifiers separately.
+ return;
+ case DeclaratorChunk::BlockPointer:
+ DiagKind = 0;
+ break;
+ case DeclaratorChunk::Pointer:
+ DiagKind = 1;
+ break;
+ case DeclaratorChunk::Reference:
+ DiagKind = 2;
+ break;
+ }
+
+ assert(DiagKind != -1);
+ S.Diag(DeclType.Loc, diag::err_compound_qualified_function_type)
+ << DiagKind << isa<FunctionType>(T.IgnoreParens()) << T
+ << getFunctionQualifiersAsString(T->castAs<FunctionProtoType>());
+}
+
+static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
+ QualType declSpecType,
+ TypeSourceInfo *TInfo) {
+
+ QualType T = declSpecType;
+ Declarator &D = state.getDeclarator();
+ Sema &S = state.getSema();
+ ASTContext &Context = S.Context;
+ const LangOptions &LangOpts = S.getLangOpts();
+
+ bool ImplicitlyNoexcept = false;
+ if (D.getName().getKind() == UnqualifiedId::IK_OperatorFunctionId &&
+ LangOpts.CPlusPlus0x) {
+ OverloadedOperatorKind OO = D.getName().OperatorFunctionId.Operator;
+ /// In C++0x, deallocation functions (normal and array operator delete)
+ /// are implicitly noexcept.
+ if (OO == OO_Delete || OO == OO_Array_Delete)
+ ImplicitlyNoexcept = true;
+ }
+
+ // The name we're declaring, if any.
+ DeclarationName Name;
+ if (D.getIdentifier())
+ Name = D.getIdentifier();
+
+ // Does this declaration declare a typedef-name?
+ bool IsTypedefName =
+ D.getDeclSpec().getStorageClassSpec() == DeclSpec::SCS_typedef ||
+ D.getContext() == Declarator::AliasDeclContext ||
+ D.getContext() == Declarator::AliasTemplateContext;
+
+ // Does T refer to a function type with a cv-qualifier or a ref-qualifier?
+ bool IsQualifiedFunction = T->isFunctionProtoType() &&
+ (T->castAs<FunctionProtoType>()->getTypeQuals() != 0 ||
+ T->castAs<FunctionProtoType>()->getRefQualifier() != RQ_None);
+
+ // Walk the DeclTypeInfo, building the recursive type as we go.
+ // DeclTypeInfos are ordered from the identifier out, which is
+ // opposite of what we want :).
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ unsigned chunkIndex = e - i - 1;
+ state.setCurrentChunkIndex(chunkIndex);
+ DeclaratorChunk &DeclType = D.getTypeObject(chunkIndex);
+ if (IsQualifiedFunction) {
+ checkQualifiedFunction(S, T, DeclType);
+ IsQualifiedFunction = DeclType.Kind == DeclaratorChunk::Paren;
+ }
+ switch (DeclType.Kind) {
+ case DeclaratorChunk::Paren:
+ T = S.BuildParenType(T);
+ break;
+ case DeclaratorChunk::BlockPointer:
+ // If blocks are disabled, emit an error.
+ if (!LangOpts.Blocks)
+ S.Diag(DeclType.Loc, diag::err_blocks_disable);
+
+ T = S.BuildBlockPointerType(T, D.getIdentifierLoc(), Name);
+ if (DeclType.Cls.TypeQuals)
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Cls.TypeQuals);
+ break;
+ case DeclaratorChunk::Pointer:
+ // Verify that we're not building a pointer to pointer to function with
+ // exception specification.
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ if (LangOpts.ObjC1 && T->getAs<ObjCObjectType>()) {
+ T = Context.getObjCObjectPointerType(T);
+ if (DeclType.Ptr.TypeQuals)
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
+ break;
+ }
+ T = S.BuildPointerType(T, DeclType.Loc, Name);
+ if (DeclType.Ptr.TypeQuals)
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Ptr.TypeQuals);
+
+ break;
+ case DeclaratorChunk::Reference: {
+ // Verify that we're not building a reference to pointer to function with
+ // exception specification.
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ T = S.BuildReferenceType(T, DeclType.Ref.LValueRef, DeclType.Loc, Name);
+
+ Qualifiers Quals;
+ if (DeclType.Ref.HasRestrict)
+ T = S.BuildQualifiedType(T, DeclType.Loc, Qualifiers::Restrict);
+ break;
+ }
+ case DeclaratorChunk::Array: {
+ // Verify that we're not building an array of pointers to function with
+ // exception specification.
+ if (LangOpts.CPlusPlus && S.CheckDistantExceptionSpec(T)) {
+ S.Diag(D.getIdentifierLoc(), diag::err_distant_exception_spec);
+ D.setInvalidType(true);
+ // Build the type anyway.
+ }
+ DeclaratorChunk::ArrayTypeInfo &ATI = DeclType.Arr;
+ Expr *ArraySize = static_cast<Expr*>(ATI.NumElts);
+ ArrayType::ArraySizeModifier ASM;
+ if (ATI.isStar)
+ ASM = ArrayType::Star;
+ else if (ATI.hasStatic)
+ ASM = ArrayType::Static;
+ else
+ ASM = ArrayType::Normal;
+ if (ASM == ArrayType::Star && !D.isPrototypeContext()) {
+ // FIXME: This check isn't quite right: it allows star in prototypes
+ // for function definitions, and disallows some edge cases detailed
+ // in http://gcc.gnu.org/ml/gcc-patches/2009-02/msg00133.html
+ S.Diag(DeclType.Loc, diag::err_array_star_outside_prototype);
+ ASM = ArrayType::Normal;
+ D.setInvalidType(true);
+ }
+ T = S.BuildArrayType(T, ASM, ArraySize, ATI.TypeQuals,
+ SourceRange(DeclType.Loc, DeclType.EndLoc), Name);
+ break;
+ }
+ case DeclaratorChunk::Function: {
+ // If the function declarator has a prototype (i.e. it is not () and
+ // does not have a K&R-style identifier list), then the arguments are part
+ // of the type, otherwise the argument list is ().
+ const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
+ IsQualifiedFunction = FTI.TypeQuals || FTI.hasRefQualifier();
+
+ // Check for auto functions and trailing return type and adjust the
+ // return type accordingly.
+ if (!D.isInvalidType()) {
+ // trailing-return-type is only required if we're declaring a function,
+ // and not, for instance, a pointer to a function.
+ if (D.getDeclSpec().getTypeSpecType() == DeclSpec::TST_auto &&
+ !FTI.TrailingReturnType && chunkIndex == 0) {
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_auto_missing_trailing_return);
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ } else if (FTI.TrailingReturnType) {
+ // T must be exactly 'auto' at this point. See CWG issue 681.
+ if (isa<ParenType>(T)) {
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_trailing_return_in_parens)
+ << T << D.getDeclSpec().getSourceRange();
+ D.setInvalidType(true);
+ } else if (D.getContext() != Declarator::LambdaExprContext &&
+ (T.hasQualifiers() || !isa<AutoType>(T))) {
+ S.Diag(D.getDeclSpec().getTypeSpecTypeLoc(),
+ diag::err_trailing_return_without_auto)
+ << T << D.getDeclSpec().getSourceRange();
+ D.setInvalidType(true);
+ }
+
+ T = S.GetTypeFromParser(
+ ParsedType::getFromOpaquePtr(FTI.TrailingReturnType),
+ &TInfo);
+ }
+ }
+
+ // C99 6.7.5.3p1: The return type may not be a function or array type.
+ // For conversion functions, we'll diagnose this particular error later.
+ if ((T->isArrayType() || T->isFunctionType()) &&
+ (D.getName().getKind() != UnqualifiedId::IK_ConversionFunctionId)) {
+ unsigned diagID = diag::err_func_returning_array_function;
+ // Last processing chunk in block context means this function chunk
+ // represents the block.
+ if (chunkIndex == 0 &&
+ D.getContext() == Declarator::BlockLiteralContext)
+ diagID = diag::err_block_returning_array_function;
+ S.Diag(DeclType.Loc, diagID) << T->isFunctionType() << T;
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ }
+
+ // Do not allow returning half FP value.
+ // FIXME: This really should be in BuildFunctionType.
+ if (T->isHalfType()) {
+ S.Diag(D.getIdentifierLoc(),
+ diag::err_parameters_retval_cannot_have_fp16_type) << 1
+ << FixItHint::CreateInsertion(D.getIdentifierLoc(), "*");
+ D.setInvalidType(true);
+ }
+
+ // cv-qualifiers on return types are pointless except when the type is a
+ // class type in C++.
+ if (isa<PointerType>(T) && T.getLocalCVRQualifiers() &&
+ (D.getName().getKind() != UnqualifiedId::IK_ConversionFunctionId) &&
+ (!LangOpts.CPlusPlus || !T->isDependentType())) {
+ assert(chunkIndex + 1 < e && "No DeclaratorChunk for the return type?");
+ DeclaratorChunk ReturnTypeChunk = D.getTypeObject(chunkIndex + 1);
+ assert(ReturnTypeChunk.Kind == DeclaratorChunk::Pointer);
+
+ DeclaratorChunk::PointerTypeInfo &PTI = ReturnTypeChunk.Ptr;
+
+ DiagnoseIgnoredQualifiers(PTI.TypeQuals,
+ SourceLocation::getFromRawEncoding(PTI.ConstQualLoc),
+ SourceLocation::getFromRawEncoding(PTI.VolatileQualLoc),
+ SourceLocation::getFromRawEncoding(PTI.RestrictQualLoc),
+ S);
+
+ } else if (T.getCVRQualifiers() && D.getDeclSpec().getTypeQualifiers() &&
+ (!LangOpts.CPlusPlus ||
+ (!T->isDependentType() && !T->isRecordType()))) {
+
+ DiagnoseIgnoredQualifiers(D.getDeclSpec().getTypeQualifiers(),
+ D.getDeclSpec().getConstSpecLoc(),
+ D.getDeclSpec().getVolatileSpecLoc(),
+ D.getDeclSpec().getRestrictSpecLoc(),
+ S);
+ }
+
+ if (LangOpts.CPlusPlus && D.getDeclSpec().isTypeSpecOwned()) {
+ // C++ [dcl.fct]p6:
+ // Types shall not be defined in return or parameter types.
+ TagDecl *Tag = cast<TagDecl>(D.getDeclSpec().getRepAsDecl());
+ if (Tag->isCompleteDefinition())
+ S.Diag(Tag->getLocation(), diag::err_type_defined_in_result_type)
+ << Context.getTypeDeclType(Tag);
+ }
+
+ // Exception specs are not allowed in typedefs. Complain, but add it
+ // anyway.
+ if (IsTypedefName && FTI.getExceptionSpecType())
+ S.Diag(FTI.getExceptionSpecLoc(), diag::err_exception_spec_in_typedef)
+ << (D.getContext() == Declarator::AliasDeclContext ||
+ D.getContext() == Declarator::AliasTemplateContext);
+
+ if (!FTI.NumArgs && !FTI.isVariadic && !LangOpts.CPlusPlus) {
+ // Simple void foo(), where the incoming T is the result type.
+ T = Context.getFunctionNoProtoType(T);
+ } else {
+ // We allow a zero-parameter variadic function in C if the
+ // function is marked with the "overloadable" attribute. Scan
+ // for this attribute now.
+ if (!FTI.NumArgs && FTI.isVariadic && !LangOpts.CPlusPlus) {
+ bool Overloadable = false;
+ for (const AttributeList *Attrs = D.getAttributes();
+ Attrs; Attrs = Attrs->getNext()) {
+ if (Attrs->getKind() == AttributeList::AT_overloadable) {
+ Overloadable = true;
+ break;
+ }
+ }
+
+ if (!Overloadable)
+ S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_arg);
+ }
+
+ if (FTI.NumArgs && FTI.ArgInfo[0].Param == 0) {
+ // C99 6.7.5.3p3: Reject int(x,y,z) when it's not a function
+ // definition.
+ S.Diag(FTI.ArgInfo[0].IdentLoc, diag::err_ident_list_in_fn_declaration);
+ D.setInvalidType(true);
+ break;
+ }
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.Variadic = FTI.isVariadic;
+ EPI.HasTrailingReturn = FTI.TrailingReturnType;
+ EPI.TypeQuals = FTI.TypeQuals;
+ EPI.RefQualifier = !FTI.hasRefQualifier()? RQ_None
+ : FTI.RefQualifierIsLValueRef? RQ_LValue
+ : RQ_RValue;
+
+ // Otherwise, we have a function with an argument list that is
+ // potentially variadic.
+ SmallVector<QualType, 16> ArgTys;
+ ArgTys.reserve(FTI.NumArgs);
+
+ SmallVector<bool, 16> ConsumedArguments;
+ ConsumedArguments.reserve(FTI.NumArgs);
+ bool HasAnyConsumedArguments = false;
+
+ for (unsigned i = 0, e = FTI.NumArgs; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
+ QualType ArgTy = Param->getType();
+ assert(!ArgTy.isNull() && "Couldn't parse type?");
+
+ // Adjust the parameter type.
+ assert((ArgTy == Context.getAdjustedParameterType(ArgTy)) &&
+ "Unadjusted type?");
+
+ // Look for 'void'. void is allowed only as a single argument to a
+ // function with no other parameters (C99 6.7.5.3p10). We record
+ // int(void) as a FunctionProtoType with an empty argument list.
+ if (ArgTy->isVoidType()) {
+ // If this is something like 'float(int, void)', reject it. 'void'
+ // is an incomplete type (C99 6.2.5p19) and function decls cannot
+ // have arguments of incomplete type.
+ if (FTI.NumArgs != 1 || FTI.isVariadic) {
+ S.Diag(DeclType.Loc, diag::err_void_only_param);
+ ArgTy = Context.IntTy;
+ Param->setType(ArgTy);
+ } else if (FTI.ArgInfo[i].Ident) {
+ // Reject, but continue to parse 'int(void abc)'.
+ S.Diag(FTI.ArgInfo[i].IdentLoc,
+ diag::err_param_with_void_type);
+ ArgTy = Context.IntTy;
+ Param->setType(ArgTy);
+ } else {
+ // Reject, but continue to parse 'float(const void)'.
+ if (ArgTy.hasQualifiers())
+ S.Diag(DeclType.Loc, diag::err_void_param_qualified);
+
+ // Do not add 'void' to the ArgTys list.
+ break;
+ }
+ } else if (ArgTy->isHalfType()) {
+ // Disallow half FP arguments.
+ // FIXME: This really should be in BuildFunctionType.
+ S.Diag(Param->getLocation(),
+ diag::err_parameters_retval_cannot_have_fp16_type) << 0
+ << FixItHint::CreateInsertion(Param->getLocation(), "*");
+ D.setInvalidType();
+ } else if (!FTI.hasPrototype) {
+ if (ArgTy->isPromotableIntegerType()) {
+ ArgTy = Context.getPromotedIntegerType(ArgTy);
+ Param->setKNRPromoted(true);
+ } else if (const BuiltinType* BTy = ArgTy->getAs<BuiltinType>()) {
+ if (BTy->getKind() == BuiltinType::Float) {
+ ArgTy = Context.DoubleTy;
+ Param->setKNRPromoted(true);
+ }
+ }
+ }
+
+ if (LangOpts.ObjCAutoRefCount) {
+ bool Consumed = Param->hasAttr<NSConsumedAttr>();
+ ConsumedArguments.push_back(Consumed);
+ HasAnyConsumedArguments |= Consumed;
+ }
+
+ ArgTys.push_back(ArgTy);
+ }
+
+ if (HasAnyConsumedArguments)
+ EPI.ConsumedArguments = ConsumedArguments.data();
+
+ SmallVector<QualType, 4> Exceptions;
+ EPI.ExceptionSpecType = FTI.getExceptionSpecType();
+ if (FTI.getExceptionSpecType() == EST_Dynamic) {
+ Exceptions.reserve(FTI.NumExceptions);
+ for (unsigned ei = 0, ee = FTI.NumExceptions; ei != ee; ++ei) {
+ // FIXME: Preserve type source info.
+ QualType ET = S.GetTypeFromParser(FTI.Exceptions[ei].Ty);
+ // Check that the type is valid for an exception spec, and
+ // drop it if not.
+ if (!S.CheckSpecifiedExceptionType(ET, FTI.Exceptions[ei].Range))
+ Exceptions.push_back(ET);
+ }
+ EPI.NumExceptions = Exceptions.size();
+ EPI.Exceptions = Exceptions.data();
+ } else if (FTI.getExceptionSpecType() == EST_ComputedNoexcept) {
+ // If an error occurred, there's no expression here.
+ if (Expr *NoexceptExpr = FTI.NoexceptExpr) {
+ assert((NoexceptExpr->isTypeDependent() ||
+ NoexceptExpr->getType()->getCanonicalTypeUnqualified() ==
+ Context.BoolTy) &&
+ "Parser should have made sure that the expression is boolean");
+ if (!NoexceptExpr->isValueDependent())
+ NoexceptExpr = S.VerifyIntegerConstantExpression(NoexceptExpr, 0,
+ S.PDiag(diag::err_noexcept_needs_constant_expression),
+ /*AllowFold*/ false).take();
+ EPI.NoexceptExpr = NoexceptExpr;
+ }
+ } else if (FTI.getExceptionSpecType() == EST_None &&
+ ImplicitlyNoexcept && chunkIndex == 0) {
+ // Only the outermost chunk is marked noexcept, of course.
+ EPI.ExceptionSpecType = EST_BasicNoexcept;
+ }
+
+ T = Context.getFunctionType(T, ArgTys.data(), ArgTys.size(), EPI);
+ }
+
+ break;
+ }
+ case DeclaratorChunk::MemberPointer:
+ // The scope spec must refer to a class, or be dependent.
+ CXXScopeSpec &SS = DeclType.Mem.Scope();
+ QualType ClsType;
+ if (SS.isInvalid()) {
+ // Avoid emitting extra errors if we already errored on the scope.
+ D.setInvalidType(true);
+ } else if (S.isDependentScopeSpecifier(SS) ||
+ dyn_cast_or_null<CXXRecordDecl>(S.computeDeclContext(SS))) {
+ NestedNameSpecifier *NNS
+ = static_cast<NestedNameSpecifier*>(SS.getScopeRep());
+ NestedNameSpecifier *NNSPrefix = NNS->getPrefix();
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ ClsType = Context.getDependentNameType(ETK_None, NNSPrefix,
+ NNS->getAsIdentifier());
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Global:
+ llvm_unreachable("Nested-name-specifier must name a type");
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ ClsType = QualType(NNS->getAsType(), 0);
+ // Note: if the NNS has a prefix and ClsType is a nondependent
+ // TemplateSpecializationType, then the NNS prefix is NOT included
+ // in ClsType; hence we wrap ClsType into an ElaboratedType.
+ // NOTE: in particular, no wrap occurs if ClsType already is an
+ // Elaborated, DependentName, or DependentTemplateSpecialization.
+ if (NNSPrefix && isa<TemplateSpecializationType>(NNS->getAsType()))
+ ClsType = Context.getElaboratedType(ETK_None, NNSPrefix, ClsType);
+ break;
+ }
+ } else {
+ S.Diag(DeclType.Mem.Scope().getBeginLoc(),
+ diag::err_illegal_decl_mempointer_in_nonclass)
+ << (D.getIdentifier() ? D.getIdentifier()->getName() : "type name")
+ << DeclType.Mem.Scope().getRange();
+ D.setInvalidType(true);
+ }
+
+ if (!ClsType.isNull())
+ T = S.BuildMemberPointerType(T, ClsType, DeclType.Loc, D.getIdentifier());
+ if (T.isNull()) {
+ T = Context.IntTy;
+ D.setInvalidType(true);
+ } else if (DeclType.Mem.TypeQuals) {
+ T = S.BuildQualifiedType(T, DeclType.Loc, DeclType.Mem.TypeQuals);
+ }
+ break;
+ }
+
+ if (T.isNull()) {
+ D.setInvalidType(true);
+ T = Context.IntTy;
+ }
+
+ // See if there are any attributes on this declarator chunk.
+ if (AttributeList *attrs = const_cast<AttributeList*>(DeclType.getAttrs()))
+ processTypeAttrs(state, T, false, attrs);
+ }
+
+ if (LangOpts.CPlusPlus && T->isFunctionType()) {
+ const FunctionProtoType *FnTy = T->getAs<FunctionProtoType>();
+ assert(FnTy && "Why oh why is there not a FunctionProtoType here?");
+
+ // C++ 8.3.5p4:
+ // A cv-qualifier-seq shall only be part of the function type
+ // for a nonstatic member function, the function type to which a pointer
+ // to member refers, or the top-level function type of a function typedef
+ // declaration.
+ //
+ // Core issue 547 also allows cv-qualifiers on function types that are
+ // top-level template type arguments.
+ bool FreeFunction;
+ if (!D.getCXXScopeSpec().isSet()) {
+ FreeFunction = ((D.getContext() != Declarator::MemberContext &&
+ D.getContext() != Declarator::LambdaExprContext) ||
+ D.getDeclSpec().isFriendSpecified());
+ } else {
+ DeclContext *DC = S.computeDeclContext(D.getCXXScopeSpec());
+ FreeFunction = (DC && !DC->isRecord());
+ }
+
+ // C++0x [dcl.constexpr]p8: A constexpr specifier for a non-static member
+ // function that is not a constructor declares that function to be const.
+ if (D.getDeclSpec().isConstexprSpecified() && !FreeFunction &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static &&
+ D.getName().getKind() != UnqualifiedId::IK_ConstructorName &&
+ D.getName().getKind() != UnqualifiedId::IK_ConstructorTemplateId &&
+ !(FnTy->getTypeQuals() & DeclSpec::TQ_const)) {
+ // Rebuild function type adding a 'const' qualifier.
+ FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
+ EPI.TypeQuals |= DeclSpec::TQ_const;
+ T = Context.getFunctionType(FnTy->getResultType(),
+ FnTy->arg_type_begin(),
+ FnTy->getNumArgs(), EPI);
+ }
+
+ // C++11 [dcl.fct]p6 (w/DR1417):
+ // An attempt to specify a function type with a cv-qualifier-seq or a
+ // ref-qualifier (including by typedef-name) is ill-formed unless it is:
+ // - the function type for a non-static member function,
+ // - the function type to which a pointer to member refers,
+ // - the top-level function type of a function typedef declaration or
+ // alias-declaration,
+ // - the type-id in the default argument of a type-parameter, or
+ // - the type-id of a template-argument for a type-parameter
+ if (IsQualifiedFunction &&
+ !(!FreeFunction &&
+ D.getDeclSpec().getStorageClassSpec() != DeclSpec::SCS_static) &&
+ !IsTypedefName &&
+ D.getContext() != Declarator::TemplateTypeArgContext) {
+ SourceLocation Loc = D.getLocStart();
+ SourceRange RemovalRange;
+ unsigned I;
+ if (D.isFunctionDeclarator(I)) {
+ SmallVector<SourceLocation, 4> RemovalLocs;
+ const DeclaratorChunk &Chunk = D.getTypeObject(I);
+ assert(Chunk.Kind == DeclaratorChunk::Function);
+ if (Chunk.Fun.hasRefQualifier())
+ RemovalLocs.push_back(Chunk.Fun.getRefQualifierLoc());
+ if (Chunk.Fun.TypeQuals & Qualifiers::Const)
+ RemovalLocs.push_back(Chunk.Fun.getConstQualifierLoc());
+ if (Chunk.Fun.TypeQuals & Qualifiers::Volatile)
+ RemovalLocs.push_back(Chunk.Fun.getVolatileQualifierLoc());
+ // FIXME: We do not track the location of the __restrict qualifier.
+ //if (Chunk.Fun.TypeQuals & Qualifiers::Restrict)
+ // RemovalLocs.push_back(Chunk.Fun.getRestrictQualifierLoc());
+ if (!RemovalLocs.empty()) {
+ std::sort(RemovalLocs.begin(), RemovalLocs.end(),
+ SourceManager::LocBeforeThanCompare(S.getSourceManager()));
+ RemovalRange = SourceRange(RemovalLocs.front(), RemovalLocs.back());
+ Loc = RemovalLocs.front();
+ }
+ }
+
+ S.Diag(Loc, diag::err_invalid_qualified_function_type)
+ << FreeFunction << D.isFunctionDeclarator() << T
+ << getFunctionQualifiersAsString(FnTy)
+ << FixItHint::CreateRemoval(RemovalRange);
+
+ // Strip the cv-qualifiers and ref-qualifiers from the type.
+ FunctionProtoType::ExtProtoInfo EPI = FnTy->getExtProtoInfo();
+ EPI.TypeQuals = 0;
+ EPI.RefQualifier = RQ_None;
+
+ T = Context.getFunctionType(FnTy->getResultType(),
+ FnTy->arg_type_begin(),
+ FnTy->getNumArgs(), EPI);
+ }
+ }
+
+ // Apply any undistributed attributes from the declarator.
+ if (!T.isNull())
+ if (AttributeList *attrs = D.getAttributes())
+ processTypeAttrs(state, T, false, attrs);
+
+ // Diagnose any ignored type attributes.
+ if (!T.isNull()) state.diagnoseIgnoredTypeAttrs(T);
+
+ // C++0x [dcl.constexpr]p9:
+ // A constexpr specifier used in an object declaration declares the object
+ // as const.
+ if (D.getDeclSpec().isConstexprSpecified() && T->isObjectType()) {
+ T.addConst();
+ }
+
+ // If there was an ellipsis in the declarator, the declaration declares a
+ // parameter pack whose type may be a pack expansion type.
+ if (D.hasEllipsis() && !T.isNull()) {
+ // C++0x [dcl.fct]p13:
+ // A declarator-id or abstract-declarator containing an ellipsis shall
+ // only be used in a parameter-declaration. Such a parameter-declaration
+ // is a parameter pack (14.5.3). [...]
+ switch (D.getContext()) {
+ case Declarator::PrototypeContext:
+ // C++0x [dcl.fct]p13:
+ // [...] When it is part of a parameter-declaration-clause, the
+ // parameter pack is a function parameter pack (14.5.3). The type T
+ // of the declarator-id of the function parameter pack shall contain
+ // a template parameter pack; each template parameter pack in T is
+ // expanded by the function parameter pack.
+ //
+ // We represent function parameter packs as function parameters whose
+ // type is a pack expansion.
+ if (!T->containsUnexpandedParameterPack()) {
+ S.Diag(D.getEllipsisLoc(),
+ diag::err_function_parameter_pack_without_parameter_packs)
+ << T << D.getSourceRange();
+ D.setEllipsisLoc(SourceLocation());
+ } else {
+ T = Context.getPackExpansionType(T, llvm::Optional<unsigned>());
+ }
+ break;
+
+ case Declarator::TemplateParamContext:
+ // C++0x [temp.param]p15:
+ // If a template-parameter is a [...] is a parameter-declaration that
+ // declares a parameter pack (8.3.5), then the template-parameter is a
+ // template parameter pack (14.5.3).
+ //
+ // Note: core issue 778 clarifies that, if there are any unexpanded
+ // parameter packs in the type of the non-type template parameter, then
+ // it expands those parameter packs.
+ if (T->containsUnexpandedParameterPack())
+ T = Context.getPackExpansionType(T, llvm::Optional<unsigned>());
+ else
+ S.Diag(D.getEllipsisLoc(),
+ LangOpts.CPlusPlus0x
+ ? diag::warn_cxx98_compat_variadic_templates
+ : diag::ext_variadic_templates);
+ break;
+
+ case Declarator::FileContext:
+ case Declarator::KNRTypeListContext:
+ case Declarator::ObjCParameterContext: // FIXME: special diagnostic here?
+ case Declarator::ObjCResultContext: // FIXME: special diagnostic here?
+ case Declarator::TypeNameContext:
+ case Declarator::CXXNewContext:
+ case Declarator::AliasDeclContext:
+ case Declarator::AliasTemplateContext:
+ case Declarator::MemberContext:
+ case Declarator::BlockContext:
+ case Declarator::ForContext:
+ case Declarator::ConditionContext:
+ case Declarator::CXXCatchContext:
+ case Declarator::ObjCCatchContext:
+ case Declarator::BlockLiteralContext:
+ case Declarator::LambdaExprContext:
+ case Declarator::TrailingReturnContext:
+ case Declarator::TemplateTypeArgContext:
+ // FIXME: We may want to allow parameter packs in block-literal contexts
+ // in the future.
+ S.Diag(D.getEllipsisLoc(), diag::err_ellipsis_in_declarator_not_parameter);
+ D.setEllipsisLoc(SourceLocation());
+ break;
+ }
+ }
+
+ if (T.isNull())
+ return Context.getNullTypeSourceInfo();
+ else if (D.isInvalidType())
+ return Context.getTrivialTypeSourceInfo(T);
+
+ return S.GetTypeSourceInfoForDeclarator(D, T, TInfo);
+}
+
+/// GetTypeForDeclarator - Convert the type for the specified
+/// declarator to Type instances.
+///
+/// The result of this call will never be null, but the associated
+/// type may be a null type if there's an unrecoverable error.
+TypeSourceInfo *Sema::GetTypeForDeclarator(Declarator &D, Scope *S) {
+ // Determine the type of the declarator. Not all forms of declarator
+ // have a type.
+
+ TypeProcessingState state(*this, D);
+
+ TypeSourceInfo *ReturnTypeInfo = 0;
+ QualType T = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
+ if (T.isNull())
+ return Context.getNullTypeSourceInfo();
+
+ if (D.isPrototypeContext() && getLangOpts().ObjCAutoRefCount)
+ inferARCWriteback(state, T);
+
+ return GetFullTypeForDeclarator(state, T, ReturnTypeInfo);
+}
+
+static void transferARCOwnershipToDeclSpec(Sema &S,
+ QualType &declSpecTy,
+ Qualifiers::ObjCLifetime ownership) {
+ if (declSpecTy->isObjCRetainableType() &&
+ declSpecTy.getObjCLifetime() == Qualifiers::OCL_None) {
+ Qualifiers qs;
+ qs.addObjCLifetime(ownership);
+ declSpecTy = S.Context.getQualifiedType(declSpecTy, qs);
+ }
+}
+
+static void transferARCOwnershipToDeclaratorChunk(TypeProcessingState &state,
+ Qualifiers::ObjCLifetime ownership,
+ unsigned chunkIndex) {
+ Sema &S = state.getSema();
+ Declarator &D = state.getDeclarator();
+
+ // Look for an explicit lifetime attribute.
+ DeclaratorChunk &chunk = D.getTypeObject(chunkIndex);
+ for (const AttributeList *attr = chunk.getAttrs(); attr;
+ attr = attr->getNext())
+ if (attr->getKind() == AttributeList::AT_objc_ownership)
+ return;
+
+ const char *attrStr = 0;
+ switch (ownership) {
+ case Qualifiers::OCL_None: llvm_unreachable("no ownership!");
+ case Qualifiers::OCL_ExplicitNone: attrStr = "none"; break;
+ case Qualifiers::OCL_Strong: attrStr = "strong"; break;
+ case Qualifiers::OCL_Weak: attrStr = "weak"; break;
+ case Qualifiers::OCL_Autoreleasing: attrStr = "autoreleasing"; break;
+ }
+
+ // If there wasn't one, add one (with an invalid source location
+ // so that we don't make an AttributedType for it).
+ AttributeList *attr = D.getAttributePool()
+ .create(&S.Context.Idents.get("objc_ownership"), SourceLocation(),
+ /*scope*/ 0, SourceLocation(),
+ &S.Context.Idents.get(attrStr), SourceLocation(),
+ /*args*/ 0, 0,
+ /*declspec*/ false, /*C++0x*/ false);
+ spliceAttrIntoList(*attr, chunk.getAttrListRef());
+
+ // TODO: mark whether we did this inference?
+}
+
+/// \brief Used for transfering ownership in casts resulting in l-values.
+static void transferARCOwnership(TypeProcessingState &state,
+ QualType &declSpecTy,
+ Qualifiers::ObjCLifetime ownership) {
+ Sema &S = state.getSema();
+ Declarator &D = state.getDeclarator();
+
+ int inner = -1;
+ bool hasIndirection = false;
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ DeclaratorChunk &chunk = D.getTypeObject(i);
+ switch (chunk.Kind) {
+ case DeclaratorChunk::Paren:
+ // Ignore parens.
+ break;
+
+ case DeclaratorChunk::Array:
+ case DeclaratorChunk::Reference:
+ case DeclaratorChunk::Pointer:
+ if (inner != -1)
+ hasIndirection = true;
+ inner = i;
+ break;
+
+ case DeclaratorChunk::BlockPointer:
+ if (inner != -1)
+ transferARCOwnershipToDeclaratorChunk(state, ownership, i);
+ return;
+
+ case DeclaratorChunk::Function:
+ case DeclaratorChunk::MemberPointer:
+ return;
+ }
+ }
+
+ if (inner == -1)
+ return;
+
+ DeclaratorChunk &chunk = D.getTypeObject(inner);
+ if (chunk.Kind == DeclaratorChunk::Pointer) {
+ if (declSpecTy->isObjCRetainableType())
+ return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
+ if (declSpecTy->isObjCObjectType() && hasIndirection)
+ return transferARCOwnershipToDeclaratorChunk(state, ownership, inner);
+ } else {
+ assert(chunk.Kind == DeclaratorChunk::Array ||
+ chunk.Kind == DeclaratorChunk::Reference);
+ return transferARCOwnershipToDeclSpec(S, declSpecTy, ownership);
+ }
+}
+
+TypeSourceInfo *Sema::GetTypeForDeclaratorCast(Declarator &D, QualType FromTy) {
+ TypeProcessingState state(*this, D);
+
+ TypeSourceInfo *ReturnTypeInfo = 0;
+ QualType declSpecTy = GetDeclSpecTypeForDeclarator(state, ReturnTypeInfo);
+ if (declSpecTy.isNull())
+ return Context.getNullTypeSourceInfo();
+
+ if (getLangOpts().ObjCAutoRefCount) {
+ Qualifiers::ObjCLifetime ownership = Context.getInnerObjCOwnership(FromTy);
+ if (ownership != Qualifiers::OCL_None)
+ transferARCOwnership(state, declSpecTy, ownership);
+ }
+
+ return GetFullTypeForDeclarator(state, declSpecTy, ReturnTypeInfo);
+}
+
+/// Map an AttributedType::Kind to an AttributeList::Kind.
+static AttributeList::Kind getAttrListKind(AttributedType::Kind kind) {
+ switch (kind) {
+ case AttributedType::attr_address_space:
+ return AttributeList::AT_address_space;
+ case AttributedType::attr_regparm:
+ return AttributeList::AT_regparm;
+ case AttributedType::attr_vector_size:
+ return AttributeList::AT_vector_size;
+ case AttributedType::attr_neon_vector_type:
+ return AttributeList::AT_neon_vector_type;
+ case AttributedType::attr_neon_polyvector_type:
+ return AttributeList::AT_neon_polyvector_type;
+ case AttributedType::attr_objc_gc:
+ return AttributeList::AT_objc_gc;
+ case AttributedType::attr_objc_ownership:
+ return AttributeList::AT_objc_ownership;
+ case AttributedType::attr_noreturn:
+ return AttributeList::AT_noreturn;
+ case AttributedType::attr_cdecl:
+ return AttributeList::AT_cdecl;
+ case AttributedType::attr_fastcall:
+ return AttributeList::AT_fastcall;
+ case AttributedType::attr_stdcall:
+ return AttributeList::AT_stdcall;
+ case AttributedType::attr_thiscall:
+ return AttributeList::AT_thiscall;
+ case AttributedType::attr_pascal:
+ return AttributeList::AT_pascal;
+ case AttributedType::attr_pcs:
+ return AttributeList::AT_pcs;
+ }
+ llvm_unreachable("unexpected attribute kind!");
+}
+
+static void fillAttributedTypeLoc(AttributedTypeLoc TL,
+ const AttributeList *attrs) {
+ AttributedType::Kind kind = TL.getAttrKind();
+
+ assert(attrs && "no type attributes in the expected location!");
+ AttributeList::Kind parsedKind = getAttrListKind(kind);
+ while (attrs->getKind() != parsedKind) {
+ attrs = attrs->getNext();
+ assert(attrs && "no matching attribute in expected location!");
+ }
+
+ TL.setAttrNameLoc(attrs->getLoc());
+ if (TL.hasAttrExprOperand())
+ TL.setAttrExprOperand(attrs->getArg(0));
+ else if (TL.hasAttrEnumOperand())
+ TL.setAttrEnumOperandLoc(attrs->getParameterLoc());
+
+ // FIXME: preserve this information to here.
+ if (TL.hasAttrOperand())
+ TL.setAttrOperandParensRange(SourceRange());
+}
+
+namespace {
+ class TypeSpecLocFiller : public TypeLocVisitor<TypeSpecLocFiller> {
+ ASTContext &Context;
+ const DeclSpec &DS;
+
+ public:
+ TypeSpecLocFiller(ASTContext &Context, const DeclSpec &DS)
+ : Context(Context), DS(DS) {}
+
+ void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ fillAttributedTypeLoc(TL, DS.getAttributes().getList());
+ Visit(TL.getModifiedLoc());
+ }
+ void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ Visit(TL.getUnqualifiedLoc());
+ }
+ void VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ }
+ void VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeLoc());
+ }
+ void VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
+ // Handle the base type, which might not have been written explicitly.
+ if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
+ TL.setHasBaseTypeAsWritten(false);
+ TL.getBaseLoc().initialize(Context, SourceLocation());
+ } else {
+ TL.setHasBaseTypeAsWritten(true);
+ Visit(TL.getBaseLoc());
+ }
+
+ // Protocol qualifiers.
+ if (DS.getProtocolQualifiers()) {
+ assert(TL.getNumProtocols() > 0);
+ assert(TL.getNumProtocols() == DS.getNumProtocolQualifiers());
+ TL.setLAngleLoc(DS.getProtocolLAngleLoc());
+ TL.setRAngleLoc(DS.getSourceRange().getEnd());
+ for (unsigned i = 0, e = DS.getNumProtocolQualifiers(); i != e; ++i)
+ TL.setProtocolLoc(i, DS.getProtocolLocs()[i]);
+ } else {
+ assert(TL.getNumProtocols() == 0);
+ TL.setLAngleLoc(SourceLocation());
+ TL.setRAngleLoc(SourceLocation());
+ }
+ }
+ void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
+ TL.setStarLoc(SourceLocation());
+ Visit(TL.getPointeeLoc());
+ }
+ void VisitTemplateSpecializationTypeLoc(TemplateSpecializationTypeLoc TL) {
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+
+ // If we got no declarator info from previous Sema routines,
+ // just fill with the typespec loc.
+ if (!TInfo) {
+ TL.initialize(Context, DS.getTypeSpecTypeNameLoc());
+ return;
+ }
+
+ TypeLoc OldTL = TInfo->getTypeLoc();
+ if (TInfo->getType()->getAs<ElaboratedType>()) {
+ ElaboratedTypeLoc ElabTL = cast<ElaboratedTypeLoc>(OldTL);
+ TemplateSpecializationTypeLoc NamedTL =
+ cast<TemplateSpecializationTypeLoc>(ElabTL.getNamedTypeLoc());
+ TL.copy(NamedTL);
+ }
+ else
+ TL.copy(cast<TemplateSpecializationTypeLoc>(OldTL));
+ }
+ void VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
+ assert(DS.getTypeSpecType() == DeclSpec::TST_typeofExpr);
+ TL.setTypeofLoc(DS.getTypeSpecTypeLoc());
+ TL.setParensRange(DS.getTypeofParensRange());
+ }
+ void VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
+ assert(DS.getTypeSpecType() == DeclSpec::TST_typeofType);
+ TL.setTypeofLoc(DS.getTypeSpecTypeLoc());
+ TL.setParensRange(DS.getTypeofParensRange());
+ assert(DS.getRepAsType());
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ TL.setUnderlyingTInfo(TInfo);
+ }
+ void VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
+ // FIXME: This holds only because we only have one unary transform.
+ assert(DS.getTypeSpecType() == DeclSpec::TST_underlyingType);
+ TL.setKWLoc(DS.getTypeSpecTypeLoc());
+ TL.setParensRange(DS.getTypeofParensRange());
+ assert(DS.getRepAsType());
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ TL.setUnderlyingTInfo(TInfo);
+ }
+ void VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
+ // By default, use the source location of the type specifier.
+ TL.setBuiltinLoc(DS.getTypeSpecTypeLoc());
+ if (TL.needsExtraLocalData()) {
+ // Set info for the written builtin specifiers.
+ TL.getWrittenBuiltinSpecs() = DS.getWrittenBuiltinSpecs();
+ // Try to have a meaningful source location.
+ if (TL.getWrittenSignSpec() != TSS_unspecified)
+ // Sign spec loc overrides the others (e.g., 'unsigned long').
+ TL.setBuiltinLoc(DS.getTypeSpecSignLoc());
+ else if (TL.getWrittenWidthSpec() != TSW_unspecified)
+ // Width spec loc overrides type spec loc (e.g., 'short int').
+ TL.setBuiltinLoc(DS.getTypeSpecWidthLoc());
+ }
+ }
+ void VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
+ ElaboratedTypeKeyword Keyword
+ = TypeWithKeyword::getKeywordForTypeSpec(DS.getTypeSpecType());
+ if (DS.getTypeSpecType() == TST_typename) {
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ if (TInfo) {
+ TL.copy(cast<ElaboratedTypeLoc>(TInfo->getTypeLoc()));
+ return;
+ }
+ }
+ TL.setElaboratedKeywordLoc(Keyword != ETK_None
+ ? DS.getTypeSpecTypeLoc()
+ : SourceLocation());
+ const CXXScopeSpec& SS = DS.getTypeSpecScope();
+ TL.setQualifierLoc(SS.getWithLocInContext(Context));
+ Visit(TL.getNextTypeLoc().getUnqualifiedLoc());
+ }
+ void VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
+ assert(DS.getTypeSpecType() == TST_typename);
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.copy(cast<DependentNameTypeLoc>(TInfo->getTypeLoc()));
+ }
+ void VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ assert(DS.getTypeSpecType() == TST_typename);
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ assert(TInfo);
+ TL.copy(cast<DependentTemplateSpecializationTypeLoc>(
+ TInfo->getTypeLoc()));
+ }
+ void VisitTagTypeLoc(TagTypeLoc TL) {
+ TL.setNameLoc(DS.getTypeSpecTypeNameLoc());
+ }
+ void VisitAtomicTypeLoc(AtomicTypeLoc TL) {
+ TL.setKWLoc(DS.getTypeSpecTypeLoc());
+ TL.setParensRange(DS.getTypeofParensRange());
+
+ TypeSourceInfo *TInfo = 0;
+ Sema::GetTypeFromParser(DS.getRepAsType(), &TInfo);
+ TL.getValueLoc().initializeFullCopy(TInfo->getTypeLoc());
+ }
+
+ void VisitTypeLoc(TypeLoc TL) {
+ // FIXME: add other typespec types and change this to an assert.
+ TL.initialize(Context, DS.getTypeSpecTypeLoc());
+ }
+ };
+
+ class DeclaratorLocFiller : public TypeLocVisitor<DeclaratorLocFiller> {
+ ASTContext &Context;
+ const DeclaratorChunk &Chunk;
+
+ public:
+ DeclaratorLocFiller(ASTContext &Context, const DeclaratorChunk &Chunk)
+ : Context(Context), Chunk(Chunk) {}
+
+ void VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ llvm_unreachable("qualified type locs not expected here!");
+ }
+
+ void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ fillAttributedTypeLoc(TL, Chunk.getAttrs());
+ }
+ void VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::BlockPointer);
+ TL.setCaretLoc(Chunk.Loc);
+ }
+ void VisitPointerTypeLoc(PointerTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Pointer);
+ TL.setStarLoc(Chunk.Loc);
+ }
+ void VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Pointer);
+ TL.setStarLoc(Chunk.Loc);
+ }
+ void VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::MemberPointer);
+ const CXXScopeSpec& SS = Chunk.Mem.Scope();
+ NestedNameSpecifierLoc NNSLoc = SS.getWithLocInContext(Context);
+
+ const Type* ClsTy = TL.getClass();
+ QualType ClsQT = QualType(ClsTy, 0);
+ TypeSourceInfo *ClsTInfo = Context.CreateTypeSourceInfo(ClsQT, 0);
+ // Now copy source location info into the type loc component.
+ TypeLoc ClsTL = ClsTInfo->getTypeLoc();
+ switch (NNSLoc.getNestedNameSpecifier()->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ assert(isa<DependentNameType>(ClsTy) && "Unexpected TypeLoc");
+ {
+ DependentNameTypeLoc DNTLoc = cast<DependentNameTypeLoc>(ClsTL);
+ DNTLoc.setElaboratedKeywordLoc(SourceLocation());
+ DNTLoc.setQualifierLoc(NNSLoc.getPrefix());
+ DNTLoc.setNameLoc(NNSLoc.getLocalBeginLoc());
+ }
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ if (isa<ElaboratedType>(ClsTy)) {
+ ElaboratedTypeLoc ETLoc = *cast<ElaboratedTypeLoc>(&ClsTL);
+ ETLoc.setElaboratedKeywordLoc(SourceLocation());
+ ETLoc.setQualifierLoc(NNSLoc.getPrefix());
+ TypeLoc NamedTL = ETLoc.getNamedTypeLoc();
+ NamedTL.initializeFullCopy(NNSLoc.getTypeLoc());
+ } else {
+ ClsTL.initializeFullCopy(NNSLoc.getTypeLoc());
+ }
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ case NestedNameSpecifier::NamespaceAlias:
+ case NestedNameSpecifier::Global:
+ llvm_unreachable("Nested-name-specifier must name a type");
+ }
+
+ // Finally fill in MemberPointerLocInfo fields.
+ TL.setStarLoc(Chunk.Loc);
+ TL.setClassTInfo(ClsTInfo);
+ }
+ void VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Reference);
+ // 'Amp' is misleading: this might have been originally
+ /// spelled with AmpAmp.
+ TL.setAmpLoc(Chunk.Loc);
+ }
+ void VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Reference);
+ assert(!Chunk.Ref.LValueRef);
+ TL.setAmpAmpLoc(Chunk.Loc);
+ }
+ void VisitArrayTypeLoc(ArrayTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Array);
+ TL.setLBracketLoc(Chunk.Loc);
+ TL.setRBracketLoc(Chunk.EndLoc);
+ TL.setSizeExpr(static_cast<Expr*>(Chunk.Arr.NumElts));
+ }
+ void VisitFunctionTypeLoc(FunctionTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Function);
+ TL.setLocalRangeBegin(Chunk.Loc);
+ TL.setLocalRangeEnd(Chunk.EndLoc);
+ TL.setTrailingReturn(!!Chunk.Fun.TrailingReturnType);
+
+ const DeclaratorChunk::FunctionTypeInfo &FTI = Chunk.Fun;
+ for (unsigned i = 0, e = TL.getNumArgs(), tpi = 0; i != e; ++i) {
+ ParmVarDecl *Param = cast<ParmVarDecl>(FTI.ArgInfo[i].Param);
+ TL.setArg(tpi++, Param);
+ }
+ // FIXME: exception specs
+ }
+ void VisitParenTypeLoc(ParenTypeLoc TL) {
+ assert(Chunk.Kind == DeclaratorChunk::Paren);
+ TL.setLParenLoc(Chunk.Loc);
+ TL.setRParenLoc(Chunk.EndLoc);
+ }
+
+ void VisitTypeLoc(TypeLoc TL) {
+ llvm_unreachable("unsupported TypeLoc kind in declarator!");
+ }
+ };
+}
+
+/// \brief Create and instantiate a TypeSourceInfo with type source information.
+///
+/// \param T QualType referring to the type as written in source code.
+///
+/// \param ReturnTypeInfo For declarators whose return type does not show
+/// up in the normal place in the declaration specifiers (such as a C++
+/// conversion function), this pointer will refer to a type source information
+/// for that return type.
+TypeSourceInfo *
+Sema::GetTypeSourceInfoForDeclarator(Declarator &D, QualType T,
+ TypeSourceInfo *ReturnTypeInfo) {
+ TypeSourceInfo *TInfo = Context.CreateTypeSourceInfo(T);
+ UnqualTypeLoc CurrTL = TInfo->getTypeLoc().getUnqualifiedLoc();
+
+ // Handle parameter packs whose type is a pack expansion.
+ if (isa<PackExpansionType>(T)) {
+ cast<PackExpansionTypeLoc>(CurrTL).setEllipsisLoc(D.getEllipsisLoc());
+ CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
+ }
+
+ for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i) {
+ while (isa<AttributedTypeLoc>(CurrTL)) {
+ AttributedTypeLoc TL = cast<AttributedTypeLoc>(CurrTL);
+ fillAttributedTypeLoc(TL, D.getTypeObject(i).getAttrs());
+ CurrTL = TL.getNextTypeLoc().getUnqualifiedLoc();
+ }
+
+ DeclaratorLocFiller(Context, D.getTypeObject(i)).Visit(CurrTL);
+ CurrTL = CurrTL.getNextTypeLoc().getUnqualifiedLoc();
+ }
+
+ // If we have different source information for the return type, use
+ // that. This really only applies to C++ conversion functions.
+ if (ReturnTypeInfo) {
+ TypeLoc TL = ReturnTypeInfo->getTypeLoc();
+ assert(TL.getFullDataSize() == CurrTL.getFullDataSize());
+ memcpy(CurrTL.getOpaqueData(), TL.getOpaqueData(), TL.getFullDataSize());
+ } else {
+ TypeSpecLocFiller(Context, D.getDeclSpec()).Visit(CurrTL);
+ }
+
+ return TInfo;
+}
+
+/// \brief Create a LocInfoType to hold the given QualType and TypeSourceInfo.
+ParsedType Sema::CreateParsedType(QualType T, TypeSourceInfo *TInfo) {
+ // FIXME: LocInfoTypes are "transient", only needed for passing to/from Parser
+ // and Sema during declaration parsing. Try deallocating/caching them when
+ // it's appropriate, instead of allocating them and keeping them around.
+ LocInfoType *LocT = (LocInfoType*)BumpAlloc.Allocate(sizeof(LocInfoType),
+ TypeAlignment);
+ new (LocT) LocInfoType(T, TInfo);
+ assert(LocT->getTypeClass() != T->getTypeClass() &&
+ "LocInfoType's TypeClass conflicts with an existing Type class");
+ return ParsedType::make(QualType(LocT, 0));
+}
+
+void LocInfoType::getAsStringInternal(std::string &Str,
+ const PrintingPolicy &Policy) const {
+ llvm_unreachable("LocInfoType leaked into the type system; an opaque TypeTy*"
+ " was used directly instead of getting the QualType through"
+ " GetTypeFromParser");
+}
+
+TypeResult Sema::ActOnTypeName(Scope *S, Declarator &D) {
+ // C99 6.7.6: Type names have no identifier. This is already validated by
+ // the parser.
+ assert(D.getIdentifier() == 0 && "Type name should have no identifier!");
+
+ TypeSourceInfo *TInfo = GetTypeForDeclarator(D, S);
+ QualType T = TInfo->getType();
+ if (D.isInvalidType())
+ return true;
+
+ // Make sure there are no unused decl attributes on the declarator.
+ // We don't want to do this for ObjC parameters because we're going
+ // to apply them to the actual parameter declaration.
+ if (D.getContext() != Declarator::ObjCParameterContext)
+ checkUnusedDeclAttributes(D);
+
+ if (getLangOpts().CPlusPlus) {
+ // Check that there are no default arguments (C++ only).
+ CheckExtraCXXDefaultArguments(D);
+ }
+
+ return CreateParsedType(T, TInfo);
+}
+
+ParsedType Sema::ActOnObjCInstanceType(SourceLocation Loc) {
+ QualType T = Context.getObjCInstanceType();
+ TypeSourceInfo *TInfo = Context.getTrivialTypeSourceInfo(T, Loc);
+ return CreateParsedType(T, TInfo);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Type Attribute Processing
+//===----------------------------------------------------------------------===//
+
+/// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the
+/// specified type. The attribute contains 1 argument, the id of the address
+/// space for the type.
+static void HandleAddressSpaceTypeAttribute(QualType &Type,
+ const AttributeList &Attr, Sema &S){
+
+ // If this type is already address space qualified, reject it.
+ // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "No type shall be qualified by
+ // qualifiers for two or more different address spaces."
+ if (Type.getAddressSpace()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_multiple_qualifiers);
+ Attr.setInvalid();
+ return;
+ }
+
+ // ISO/IEC TR 18037 S5.3 (amending C99 6.7.3): "A function type shall not be
+ // qualified by an address-space qualifier."
+ if (Type->isFunctionType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_function_type);
+ Attr.setInvalid();
+ return;
+ }
+
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
+ return;
+ }
+ Expr *ASArgExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt addrSpace(32);
+ if (ASArgExpr->isTypeDependent() || ASArgExpr->isValueDependent() ||
+ !ASArgExpr->isIntegerConstantExpr(addrSpace, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_space_not_int)
+ << ASArgExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+
+ // Bounds checking.
+ if (addrSpace.isSigned()) {
+ if (addrSpace.isNegative()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_space_negative)
+ << ASArgExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ addrSpace.setIsSigned(false);
+ }
+ llvm::APSInt max(addrSpace.getBitWidth());
+ max = Qualifiers::MaxAddressSpace;
+ if (addrSpace > max) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_address_space_too_high)
+ << Qualifiers::MaxAddressSpace << ASArgExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+
+ unsigned ASIdx = static_cast<unsigned>(addrSpace.getZExtValue());
+ Type = S.Context.getAddrSpaceQualType(Type, ASIdx);
+}
+
+/// Does this type have a "direct" ownership qualifier? That is,
+/// is it written like "__strong id", as opposed to something like
+/// "typeof(foo)", where that happens to be strong?
+static bool hasDirectOwnershipQualifier(QualType type) {
+ // Fast path: no qualifier at all.
+ assert(type.getQualifiers().hasObjCLifetime());
+
+ while (true) {
+ // __strong id
+ if (const AttributedType *attr = dyn_cast<AttributedType>(type)) {
+ if (attr->getAttrKind() == AttributedType::attr_objc_ownership)
+ return true;
+
+ type = attr->getModifiedType();
+
+ // X *__strong (...)
+ } else if (const ParenType *paren = dyn_cast<ParenType>(type)) {
+ type = paren->getInnerType();
+
+ // That's it for things we want to complain about. In particular,
+ // we do not want to look through typedefs, typeof(expr),
+ // typeof(type), or any other way that the type is somehow
+ // abstracted.
+ } else {
+
+ return false;
+ }
+ }
+}
+
+/// handleObjCOwnershipTypeAttr - Process an objc_ownership
+/// attribute on the specified type.
+///
+/// Returns 'true' if the attribute was handled.
+static bool handleObjCOwnershipTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ bool NonObjCPointer = false;
+
+ if (!type->isDependentType()) {
+ if (const PointerType *ptr = type->getAs<PointerType>()) {
+ QualType pointee = ptr->getPointeeType();
+ if (pointee->isObjCRetainableType() || pointee->isPointerType())
+ return false;
+ // It is important not to lose the source info that there was an attribute
+ // applied to non-objc pointer. We will create an attributed type but
+ // its type will be the same as the original type.
+ NonObjCPointer = true;
+ } else if (!type->isObjCRetainableType()) {
+ return false;
+ }
+ }
+
+ Sema &S = state.getSema();
+ SourceLocation AttrLoc = attr.getLoc();
+ if (AttrLoc.isMacroID())
+ AttrLoc = S.getSourceManager().getImmediateExpansionRange(AttrLoc).first;
+
+ if (!attr.getParameterName()) {
+ S.Diag(AttrLoc, diag::err_attribute_argument_n_not_string)
+ << "objc_ownership" << 1;
+ attr.setInvalid();
+ return true;
+ }
+
+ // Consume lifetime attributes without further comment outside of
+ // ARC mode.
+ if (!S.getLangOpts().ObjCAutoRefCount)
+ return true;
+
+ Qualifiers::ObjCLifetime lifetime;
+ if (attr.getParameterName()->isStr("none"))
+ lifetime = Qualifiers::OCL_ExplicitNone;
+ else if (attr.getParameterName()->isStr("strong"))
+ lifetime = Qualifiers::OCL_Strong;
+ else if (attr.getParameterName()->isStr("weak"))
+ lifetime = Qualifiers::OCL_Weak;
+ else if (attr.getParameterName()->isStr("autoreleasing"))
+ lifetime = Qualifiers::OCL_Autoreleasing;
+ else {
+ S.Diag(AttrLoc, diag::warn_attribute_type_not_supported)
+ << "objc_ownership" << attr.getParameterName();
+ attr.setInvalid();
+ return true;
+ }
+
+ SplitQualType underlyingType = type.split();
+
+ // Check for redundant/conflicting ownership qualifiers.
+ if (Qualifiers::ObjCLifetime previousLifetime
+ = type.getQualifiers().getObjCLifetime()) {
+ // If it's written directly, that's an error.
+ if (hasDirectOwnershipQualifier(type)) {
+ S.Diag(AttrLoc, diag::err_attr_objc_ownership_redundant)
+ << type;
+ return true;
+ }
+
+ // Otherwise, if the qualifiers actually conflict, pull sugar off
+ // until we reach a type that is directly qualified.
+ if (previousLifetime != lifetime) {
+ // This should always terminate: the canonical type is
+ // qualified, so some bit of sugar must be hiding it.
+ while (!underlyingType.Quals.hasObjCLifetime()) {
+ underlyingType = underlyingType.getSingleStepDesugaredType();
+ }
+ underlyingType.Quals.removeObjCLifetime();
+ }
+ }
+
+ underlyingType.Quals.addObjCLifetime(lifetime);
+
+ if (NonObjCPointer) {
+ StringRef name = attr.getName()->getName();
+ switch (lifetime) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ break;
+ case Qualifiers::OCL_Strong: name = "__strong"; break;
+ case Qualifiers::OCL_Weak: name = "__weak"; break;
+ case Qualifiers::OCL_Autoreleasing: name = "__autoreleasing"; break;
+ }
+ S.Diag(AttrLoc, diag::warn_objc_object_attribute_wrong_type)
+ << name << type;
+ }
+
+ QualType origType = type;
+ if (!NonObjCPointer)
+ type = S.Context.getQualifiedType(underlyingType);
+
+ // If we have a valid source location for the attribute, use an
+ // AttributedType instead.
+ if (AttrLoc.isValid())
+ type = S.Context.getAttributedType(AttributedType::attr_objc_ownership,
+ origType, type);
+
+ // Forbid __weak if the runtime doesn't support it.
+ if (lifetime == Qualifiers::OCL_Weak &&
+ !S.getLangOpts().ObjCRuntimeHasWeak && !NonObjCPointer) {
+
+ // Actually, delay this until we know what we're parsing.
+ if (S.DelayedDiagnostics.shouldDelayDiagnostics()) {
+ S.DelayedDiagnostics.add(
+ sema::DelayedDiagnostic::makeForbiddenType(
+ S.getSourceManager().getExpansionLoc(AttrLoc),
+ diag::err_arc_weak_no_runtime, type, /*ignored*/ 0));
+ } else {
+ S.Diag(AttrLoc, diag::err_arc_weak_no_runtime);
+ }
+
+ attr.setInvalid();
+ return true;
+ }
+
+ // Forbid __weak for class objects marked as
+ // objc_arc_weak_reference_unavailable
+ if (lifetime == Qualifiers::OCL_Weak) {
+ QualType T = type;
+ while (const PointerType *ptr = T->getAs<PointerType>())
+ T = ptr->getPointeeType();
+ if (const ObjCObjectPointerType *ObjT = T->getAs<ObjCObjectPointerType>()) {
+ ObjCInterfaceDecl *Class = ObjT->getInterfaceDecl();
+ if (Class->isArcWeakrefUnavailable()) {
+ S.Diag(AttrLoc, diag::err_arc_unsupported_weak_class);
+ S.Diag(ObjT->getInterfaceDecl()->getLocation(),
+ diag::note_class_declared);
+ }
+ }
+ }
+
+ return true;
+}
+
+/// handleObjCGCTypeAttr - Process the __attribute__((objc_gc)) type
+/// attribute on the specified type. Returns true to indicate that
+/// the attribute was handled, false to indicate that the type does
+/// not permit the attribute.
+static bool handleObjCGCTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ Sema &S = state.getSema();
+
+ // Delay if this isn't some kind of pointer.
+ if (!type->isPointerType() &&
+ !type->isObjCObjectPointerType() &&
+ !type->isBlockPointerType())
+ return false;
+
+ if (type.getObjCGCAttr() != Qualifiers::GCNone) {
+ S.Diag(attr.getLoc(), diag::err_attribute_multiple_objc_gc);
+ attr.setInvalid();
+ return true;
+ }
+
+ // Check the attribute arguments.
+ if (!attr.getParameterName()) {
+ S.Diag(attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "objc_gc" << 1;
+ attr.setInvalid();
+ return true;
+ }
+ Qualifiers::GC GCAttr;
+ if (attr.getNumArgs() != 0) {
+ S.Diag(attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ attr.setInvalid();
+ return true;
+ }
+ if (attr.getParameterName()->isStr("weak"))
+ GCAttr = Qualifiers::Weak;
+ else if (attr.getParameterName()->isStr("strong"))
+ GCAttr = Qualifiers::Strong;
+ else {
+ S.Diag(attr.getLoc(), diag::warn_attribute_type_not_supported)
+ << "objc_gc" << attr.getParameterName();
+ attr.setInvalid();
+ return true;
+ }
+
+ QualType origType = type;
+ type = S.Context.getObjCGCQualType(origType, GCAttr);
+
+ // Make an attributed type to preserve the source information.
+ if (attr.getLoc().isValid())
+ type = S.Context.getAttributedType(AttributedType::attr_objc_gc,
+ origType, type);
+
+ return true;
+}
+
+namespace {
+ /// A helper class to unwrap a type down to a function for the
+ /// purposes of applying attributes there.
+ ///
+ /// Use:
+ /// FunctionTypeUnwrapper unwrapped(SemaRef, T);
+ /// if (unwrapped.isFunctionType()) {
+ /// const FunctionType *fn = unwrapped.get();
+ /// // change fn somehow
+ /// T = unwrapped.wrap(fn);
+ /// }
+ struct FunctionTypeUnwrapper {
+ enum WrapKind {
+ Desugar,
+ Parens,
+ Pointer,
+ BlockPointer,
+ Reference,
+ MemberPointer
+ };
+
+ QualType Original;
+ const FunctionType *Fn;
+ SmallVector<unsigned char /*WrapKind*/, 8> Stack;
+
+ FunctionTypeUnwrapper(Sema &S, QualType T) : Original(T) {
+ while (true) {
+ const Type *Ty = T.getTypePtr();
+ if (isa<FunctionType>(Ty)) {
+ Fn = cast<FunctionType>(Ty);
+ return;
+ } else if (isa<ParenType>(Ty)) {
+ T = cast<ParenType>(Ty)->getInnerType();
+ Stack.push_back(Parens);
+ } else if (isa<PointerType>(Ty)) {
+ T = cast<PointerType>(Ty)->getPointeeType();
+ Stack.push_back(Pointer);
+ } else if (isa<BlockPointerType>(Ty)) {
+ T = cast<BlockPointerType>(Ty)->getPointeeType();
+ Stack.push_back(BlockPointer);
+ } else if (isa<MemberPointerType>(Ty)) {
+ T = cast<MemberPointerType>(Ty)->getPointeeType();
+ Stack.push_back(MemberPointer);
+ } else if (isa<ReferenceType>(Ty)) {
+ T = cast<ReferenceType>(Ty)->getPointeeType();
+ Stack.push_back(Reference);
+ } else {
+ const Type *DTy = Ty->getUnqualifiedDesugaredType();
+ if (Ty == DTy) {
+ Fn = 0;
+ return;
+ }
+
+ T = QualType(DTy, 0);
+ Stack.push_back(Desugar);
+ }
+ }
+ }
+
+ bool isFunctionType() const { return (Fn != 0); }
+ const FunctionType *get() const { return Fn; }
+
+ QualType wrap(Sema &S, const FunctionType *New) {
+ // If T wasn't modified from the unwrapped type, do nothing.
+ if (New == get()) return Original;
+
+ Fn = New;
+ return wrap(S.Context, Original, 0);
+ }
+
+ private:
+ QualType wrap(ASTContext &C, QualType Old, unsigned I) {
+ if (I == Stack.size())
+ return C.getQualifiedType(Fn, Old.getQualifiers());
+
+ // Build up the inner type, applying the qualifiers from the old
+ // type to the new type.
+ SplitQualType SplitOld = Old.split();
+
+ // As a special case, tail-recurse if there are no qualifiers.
+ if (SplitOld.Quals.empty())
+ return wrap(C, SplitOld.Ty, I);
+ return C.getQualifiedType(wrap(C, SplitOld.Ty, I), SplitOld.Quals);
+ }
+
+ QualType wrap(ASTContext &C, const Type *Old, unsigned I) {
+ if (I == Stack.size()) return QualType(Fn, 0);
+
+ switch (static_cast<WrapKind>(Stack[I++])) {
+ case Desugar:
+ // This is the point at which we potentially lose source
+ // information.
+ return wrap(C, Old->getUnqualifiedDesugaredType(), I);
+
+ case Parens: {
+ QualType New = wrap(C, cast<ParenType>(Old)->getInnerType(), I);
+ return C.getParenType(New);
+ }
+
+ case Pointer: {
+ QualType New = wrap(C, cast<PointerType>(Old)->getPointeeType(), I);
+ return C.getPointerType(New);
+ }
+
+ case BlockPointer: {
+ QualType New = wrap(C, cast<BlockPointerType>(Old)->getPointeeType(),I);
+ return C.getBlockPointerType(New);
+ }
+
+ case MemberPointer: {
+ const MemberPointerType *OldMPT = cast<MemberPointerType>(Old);
+ QualType New = wrap(C, OldMPT->getPointeeType(), I);
+ return C.getMemberPointerType(New, OldMPT->getClass());
+ }
+
+ case Reference: {
+ const ReferenceType *OldRef = cast<ReferenceType>(Old);
+ QualType New = wrap(C, OldRef->getPointeeType(), I);
+ if (isa<LValueReferenceType>(OldRef))
+ return C.getLValueReferenceType(New, OldRef->isSpelledAsLValue());
+ else
+ return C.getRValueReferenceType(New);
+ }
+ }
+
+ llvm_unreachable("unknown wrapping kind");
+ }
+ };
+}
+
+/// Process an individual function attribute. Returns true to
+/// indicate that the attribute was handled, false if it wasn't.
+static bool handleFunctionTypeAttr(TypeProcessingState &state,
+ AttributeList &attr,
+ QualType &type) {
+ Sema &S = state.getSema();
+
+ FunctionTypeUnwrapper unwrapped(S, type);
+
+ if (attr.getKind() == AttributeList::AT_noreturn) {
+ if (S.CheckNoReturnAttr(attr))
+ return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ // Otherwise we can process right away.
+ FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withNoReturn(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
+ // ns_returns_retained is not always a type attribute, but if we got
+ // here, we're treating it as one right now.
+ if (attr.getKind() == AttributeList::AT_ns_returns_retained) {
+ assert(S.getLangOpts().ObjCAutoRefCount &&
+ "ns_returns_retained treated as type attribute in non-ARC");
+ if (attr.getNumArgs()) return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ FunctionType::ExtInfo EI
+ = unwrapped.get()->getExtInfo().withProducesResult(true);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
+ if (attr.getKind() == AttributeList::AT_regparm) {
+ unsigned value;
+ if (S.CheckRegparmAttr(attr, value))
+ return true;
+
+ // Delay if this is not a function type.
+ if (!unwrapped.isFunctionType())
+ return false;
+
+ // Diagnose regparm with fastcall.
+ const FunctionType *fn = unwrapped.get();
+ CallingConv CC = fn->getCallConv();
+ if (CC == CC_X86FastCall) {
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << FunctionType::getNameForCallConv(CC)
+ << "regparm";
+ attr.setInvalid();
+ return true;
+ }
+
+ FunctionType::ExtInfo EI =
+ unwrapped.get()->getExtInfo().withRegParm(value);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
+ // Otherwise, a calling convention.
+ CallingConv CC;
+ if (S.CheckCallingConvAttr(attr, CC))
+ return true;
+
+ // Delay if the type didn't work out to a function.
+ if (!unwrapped.isFunctionType()) return false;
+
+ const FunctionType *fn = unwrapped.get();
+ CallingConv CCOld = fn->getCallConv();
+ if (S.Context.getCanonicalCallConv(CC) ==
+ S.Context.getCanonicalCallConv(CCOld)) {
+ FunctionType::ExtInfo EI= unwrapped.get()->getExtInfo().withCallingConv(CC);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+ }
+
+ if (CCOld != (S.LangOpts.MRTD ? CC_X86StdCall : CC_Default)) {
+ // Should we diagnose reapplications of the same convention?
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << FunctionType::getNameForCallConv(CC)
+ << FunctionType::getNameForCallConv(CCOld);
+ attr.setInvalid();
+ return true;
+ }
+
+ // Diagnose the use of X86 fastcall on varargs or unprototyped functions.
+ if (CC == CC_X86FastCall) {
+ if (isa<FunctionNoProtoType>(fn)) {
+ S.Diag(attr.getLoc(), diag::err_cconv_knr)
+ << FunctionType::getNameForCallConv(CC);
+ attr.setInvalid();
+ return true;
+ }
+
+ const FunctionProtoType *FnP = cast<FunctionProtoType>(fn);
+ if (FnP->isVariadic()) {
+ S.Diag(attr.getLoc(), diag::err_cconv_varargs)
+ << FunctionType::getNameForCallConv(CC);
+ attr.setInvalid();
+ return true;
+ }
+
+ // Also diagnose fastcall with regparm.
+ if (fn->getHasRegParm()) {
+ S.Diag(attr.getLoc(), diag::err_attributes_are_not_compatible)
+ << "regparm"
+ << FunctionType::getNameForCallConv(CC);
+ attr.setInvalid();
+ return true;
+ }
+ }
+
+ FunctionType::ExtInfo EI = unwrapped.get()->getExtInfo().withCallingConv(CC);
+ type = unwrapped.wrap(S, S.Context.adjustFunctionType(unwrapped.get(), EI));
+ return true;
+}
+
+/// Handle OpenCL image access qualifiers: read_only, write_only, read_write
+static void HandleOpenCLImageAccessAttribute(QualType& CurType,
+ const AttributeList &Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
+ return;
+ }
+ Expr *sizeExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt arg(32);
+ if (sizeExpr->isTypeDependent() || sizeExpr->isValueDependent() ||
+ !sizeExpr->isIntegerConstantExpr(arg, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "opencl_image_access" << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ unsigned iarg = static_cast<unsigned>(arg.getZExtValue());
+ switch (iarg) {
+ case CLIA_read_only:
+ case CLIA_write_only:
+ case CLIA_read_write:
+ // Implemented in a separate patch
+ break;
+ default:
+ // Implemented in a separate patch
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_size)
+ << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ break;
+ }
+}
+
+/// HandleVectorSizeAttribute - this attribute is only applicable to integral
+/// and float scalars, although arrays, pointers, and function return values are
+/// allowed in conjunction with this construct. Aggregates with this attribute
+/// are invalid, even if they are of the same size as a corresponding scalar.
+/// The raw attribute should contain precisely 1 argument, the vector size for
+/// the variable, measured in bytes. If curType and rawAttr are well formed,
+/// this routine will return a new vector type.
+static void HandleVectorSizeAttr(QualType& CurType, const AttributeList &Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
+ return;
+ }
+ Expr *sizeExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt vecSize(32);
+ if (sizeExpr->isTypeDependent() || sizeExpr->isValueDependent() ||
+ !sizeExpr->isIntegerConstantExpr(vecSize, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "vector_size" << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ // the base type must be integer or float, and can't already be a vector.
+ if (!CurType->isIntegerType() && !CurType->isRealFloatingType()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) << CurType;
+ Attr.setInvalid();
+ return;
+ }
+ unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType));
+ // vecSize is specified in bytes - convert to bits.
+ unsigned vectorSize = static_cast<unsigned>(vecSize.getZExtValue() * 8);
+
+ // the vector size needs to be an integral multiple of the type size.
+ if (vectorSize % typeSize) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_size)
+ << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ if (vectorSize == 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_zero_size)
+ << sizeExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+
+ // Success! Instantiate the vector type, the number of elements is > 0, and
+ // not required to be a power of 2, unlike GCC.
+ CurType = S.Context.getVectorType(CurType, vectorSize/typeSize,
+ VectorType::GenericVector);
+}
+
+/// \brief Process the OpenCL-like ext_vector_type attribute when it occurs on
+/// a type.
+static void HandleExtVectorTypeAttr(QualType &CurType,
+ const AttributeList &Attr,
+ Sema &S) {
+ Expr *sizeExpr;
+
+ // Special case where the argument is a template id.
+ if (Attr.getParameterName()) {
+ CXXScopeSpec SS;
+ SourceLocation TemplateKWLoc;
+ UnqualifiedId id;
+ id.setIdentifier(Attr.getParameterName(), Attr.getLoc());
+
+ ExprResult Size = S.ActOnIdExpression(S.getCurScope(), SS, TemplateKWLoc,
+ id, false, false);
+ if (Size.isInvalid())
+ return;
+
+ sizeExpr = Size.get();
+ } else {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+ sizeExpr = Attr.getArg(0);
+ }
+
+ // Create the vector type.
+ QualType T = S.BuildExtVectorType(CurType, sizeExpr, Attr.getLoc());
+ if (!T.isNull())
+ CurType = T;
+}
+
+/// HandleNeonVectorTypeAttr - The "neon_vector_type" and
+/// "neon_polyvector_type" attributes are used to create vector types that
+/// are mangled according to ARM's ABI. Otherwise, these types are identical
+/// to those created with the "vector_size" attribute. Unlike "vector_size"
+/// the argument to these Neon attributes is the number of vector elements,
+/// not the vector size in bytes. The vector width and element type must
+/// match one of the standard Neon vector types.
+static void HandleNeonVectorTypeAttr(QualType& CurType,
+ const AttributeList &Attr, Sema &S,
+ VectorType::VectorKind VecKind,
+ const char *AttrName) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ Attr.setInvalid();
+ return;
+ }
+ // The number of elements must be an ICE.
+ Expr *numEltsExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt numEltsInt(32);
+ if (numEltsExpr->isTypeDependent() || numEltsExpr->isValueDependent() ||
+ !numEltsExpr->isIntegerConstantExpr(numEltsInt, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << AttrName << numEltsExpr->getSourceRange();
+ Attr.setInvalid();
+ return;
+ }
+ // Only certain element types are supported for Neon vectors.
+ const BuiltinType* BTy = CurType->getAs<BuiltinType>();
+ if (!BTy ||
+ (VecKind == VectorType::NeonPolyVector &&
+ BTy->getKind() != BuiltinType::SChar &&
+ BTy->getKind() != BuiltinType::Short) ||
+ (BTy->getKind() != BuiltinType::SChar &&
+ BTy->getKind() != BuiltinType::UChar &&
+ BTy->getKind() != BuiltinType::Short &&
+ BTy->getKind() != BuiltinType::UShort &&
+ BTy->getKind() != BuiltinType::Int &&
+ BTy->getKind() != BuiltinType::UInt &&
+ BTy->getKind() != BuiltinType::LongLong &&
+ BTy->getKind() != BuiltinType::ULongLong &&
+ BTy->getKind() != BuiltinType::Float)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_invalid_vector_type) <<CurType;
+ Attr.setInvalid();
+ return;
+ }
+ // The total size of the vector must be 64 or 128 bits.
+ unsigned typeSize = static_cast<unsigned>(S.Context.getTypeSize(CurType));
+ unsigned numElts = static_cast<unsigned>(numEltsInt.getZExtValue());
+ unsigned vecSize = typeSize * numElts;
+ if (vecSize != 64 && vecSize != 128) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_bad_neon_vector_size) << CurType;
+ Attr.setInvalid();
+ return;
+ }
+
+ CurType = S.Context.getVectorType(CurType, numElts, VecKind);
+}
+
+static void processTypeAttrs(TypeProcessingState &state, QualType &type,
+ bool isDeclSpec, AttributeList *attrs) {
+ // Scan through and apply attributes to this type where it makes sense. Some
+ // attributes (such as __address_space__, __vector_size__, etc) apply to the
+ // type, but others can be present in the type specifiers even though they
+ // apply to the decl. Here we apply type attributes and ignore the rest.
+
+ AttributeList *next;
+ do {
+ AttributeList &attr = *attrs;
+ next = attr.getNext();
+
+ // Skip attributes that were marked to be invalid.
+ if (attr.isInvalid())
+ continue;
+
+ // If this is an attribute we can handle, do so now,
+ // otherwise, add it to the FnAttrs list for rechaining.
+ switch (attr.getKind()) {
+ default: break;
+
+ case AttributeList::AT_may_alias:
+ // FIXME: This attribute needs to actually be handled, but if we ignore
+ // it it breaks large amounts of Linux software.
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_address_space:
+ HandleAddressSpaceTypeAttribute(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+ OBJC_POINTER_TYPE_ATTRS_CASELIST:
+ if (!handleObjCPointerTypeAttr(state, attr, type))
+ distributeObjCPointerTypeAttr(state, attr, type);
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_vector_size:
+ HandleVectorSizeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_ext_vector_type:
+ if (state.getDeclarator().getDeclSpec().getStorageClassSpec()
+ != DeclSpec::SCS_typedef)
+ HandleExtVectorTypeAttr(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_neon_vector_type:
+ HandleNeonVectorTypeAttr(type, attr, state.getSema(),
+ VectorType::NeonVector, "neon_vector_type");
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_neon_polyvector_type:
+ HandleNeonVectorTypeAttr(type, attr, state.getSema(),
+ VectorType::NeonPolyVector,
+ "neon_polyvector_type");
+ attr.setUsedAsTypeAttr();
+ break;
+ case AttributeList::AT_opencl_image_access:
+ HandleOpenCLImageAccessAttribute(type, attr, state.getSema());
+ attr.setUsedAsTypeAttr();
+ break;
+
+ case AttributeList::AT_ns_returns_retained:
+ if (!state.getSema().getLangOpts().ObjCAutoRefCount)
+ break;
+ // fallthrough into the function attrs
+
+ FUNCTION_TYPE_ATTRS_CASELIST:
+ attr.setUsedAsTypeAttr();
+
+ // Never process function type attributes as part of the
+ // declaration-specifiers.
+ if (isDeclSpec)
+ distributeFunctionTypeAttrFromDeclSpec(state, attr, type);
+
+ // Otherwise, handle the possible delays.
+ else if (!handleFunctionTypeAttr(state, attr, type))
+ distributeFunctionTypeAttr(state, attr, type);
+ break;
+ }
+ } while ((attrs = next));
+}
+
+/// \brief Ensure that the type of the given expression is complete.
+///
+/// This routine checks whether the expression \p E has a complete type. If the
+/// expression refers to an instantiable construct, that instantiation is
+/// performed as needed to complete its type. Furthermore
+/// Sema::RequireCompleteType is called for the expression's type (or in the
+/// case of a reference type, the referred-to type).
+///
+/// \param E The expression whose type is required to be complete.
+/// \param PD The partial diagnostic that will be printed out if the type cannot
+/// be completed.
+///
+/// \returns \c true if the type of \p E is incomplete and diagnosed, \c false
+/// otherwise.
+bool Sema::RequireCompleteExprType(Expr *E, const PartialDiagnostic &PD,
+ std::pair<SourceLocation,
+ PartialDiagnostic> Note) {
+ QualType T = E->getType();
+
+ // Fast path the case where the type is already complete.
+ if (!T->isIncompleteType())
+ return false;
+
+ // Incomplete array types may be completed by the initializer attached to
+ // their definitions. For static data members of class templates we need to
+ // instantiate the definition to get this initializer and complete the type.
+ if (T->isIncompleteArrayType()) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ if (Var->isStaticDataMember() &&
+ Var->getInstantiatedFromStaticDataMember()) {
+
+ MemberSpecializationInfo *MSInfo = Var->getMemberSpecializationInfo();
+ assert(MSInfo && "Missing member specialization information?");
+ if (MSInfo->getTemplateSpecializationKind()
+ != TSK_ExplicitSpecialization) {
+ // If we don't already have a point of instantiation, this is it.
+ if (MSInfo->getPointOfInstantiation().isInvalid()) {
+ MSInfo->setPointOfInstantiation(E->getLocStart());
+
+ // This is a modification of an existing AST node. Notify
+ // listeners.
+ if (ASTMutationListener *L = getASTMutationListener())
+ L->StaticDataMemberInstantiated(Var);
+ }
+
+ InstantiateStaticDataMemberDefinition(E->getExprLoc(), Var);
+
+ // Update the type to the newly instantiated definition's type both
+ // here and within the expression.
+ if (VarDecl *Def = Var->getDefinition()) {
+ DRE->setDecl(Def);
+ T = Def->getType();
+ DRE->setType(T);
+ E->setType(T);
+ }
+ }
+
+ // We still go on to try to complete the type independently, as it
+ // may also require instantiations or diagnostics if it remains
+ // incomplete.
+ }
+ }
+ }
+ }
+
+ // FIXME: Are there other cases which require instantiating something other
+ // than the type to complete the type of an expression?
+
+ // Look through reference types and complete the referred type.
+ if (const ReferenceType *Ref = T->getAs<ReferenceType>())
+ T = Ref->getPointeeType();
+
+ return RequireCompleteType(E->getExprLoc(), T, PD, Note);
+}
+
+/// @brief Ensure that the type T is a complete type.
+///
+/// This routine checks whether the type @p T is complete in any
+/// context where a complete type is required. If @p T is a complete
+/// type, returns false. If @p T is a class template specialization,
+/// this routine then attempts to perform class template
+/// instantiation. If instantiation fails, or if @p T is incomplete
+/// and cannot be completed, issues the diagnostic @p diag (giving it
+/// the type @p T) and returns true.
+///
+/// @param Loc The location in the source that the incomplete type
+/// diagnostic should refer to.
+///
+/// @param T The type that this routine is examining for completeness.
+///
+/// @param PD The partial diagnostic that will be printed out if T is not a
+/// complete type.
+///
+/// @returns @c true if @p T is incomplete and a diagnostic was emitted,
+/// @c false otherwise.
+bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
+ const PartialDiagnostic &PD,
+ std::pair<SourceLocation,
+ PartialDiagnostic> Note) {
+ unsigned diag = PD.getDiagID();
+
+ // FIXME: Add this assertion to make sure we always get instantiation points.
+ // assert(!Loc.isInvalid() && "Invalid location in RequireCompleteType");
+ // FIXME: Add this assertion to help us flush out problems with
+ // checking for dependent types and type-dependent expressions.
+ //
+ // assert(!T->isDependentType() &&
+ // "Can't ask whether a dependent type is complete");
+
+ // If we have a complete type, we're done.
+ NamedDecl *Def = 0;
+ if (!T->isIncompleteType(&Def)) {
+ // If we know about the definition but it is not visible, complain.
+ if (diag != 0 && Def && !LookupResult::isVisible(Def)) {
+ // Suppress this error outside of a SFINAE context if we've already
+ // emitted the error once for this type. There's no usefulness in
+ // repeating the diagnostic.
+ // FIXME: Add a Fix-It that imports the corresponding module or includes
+ // the header.
+ if (isSFINAEContext() || HiddenDefinitions.insert(Def)) {
+ Diag(Loc, diag::err_module_private_definition) << T;
+ Diag(Def->getLocation(), diag::note_previous_definition);
+ }
+ }
+
+ return false;
+ }
+
+ const TagType *Tag = T->getAs<TagType>();
+ const ObjCInterfaceType *IFace = 0;
+
+ if (Tag) {
+ // Avoid diagnosing invalid decls as incomplete.
+ if (Tag->getDecl()->isInvalidDecl())
+ return true;
+
+ // Give the external AST source a chance to complete the type.
+ if (Tag->getDecl()->hasExternalLexicalStorage()) {
+ Context.getExternalSource()->CompleteType(Tag->getDecl());
+ if (!Tag->isIncompleteType())
+ return false;
+ }
+ }
+ else if ((IFace = T->getAs<ObjCInterfaceType>())) {
+ // Avoid diagnosing invalid decls as incomplete.
+ if (IFace->getDecl()->isInvalidDecl())
+ return true;
+
+ // Give the external AST source a chance to complete the type.
+ if (IFace->getDecl()->hasExternalLexicalStorage()) {
+ Context.getExternalSource()->CompleteType(IFace->getDecl());
+ if (!IFace->isIncompleteType())
+ return false;
+ }
+ }
+
+ // If we have a class template specialization or a class member of a
+ // class template specialization, or an array with known size of such,
+ // try to instantiate it.
+ QualType MaybeTemplate = T;
+ if (const ConstantArrayType *Array = Context.getAsConstantArrayType(T))
+ MaybeTemplate = Array->getElementType();
+ if (const RecordType *Record = MaybeTemplate->getAs<RecordType>()) {
+ if (ClassTemplateSpecializationDecl *ClassTemplateSpec
+ = dyn_cast<ClassTemplateSpecializationDecl>(Record->getDecl())) {
+ if (ClassTemplateSpec->getSpecializationKind() == TSK_Undeclared)
+ return InstantiateClassTemplateSpecialization(Loc, ClassTemplateSpec,
+ TSK_ImplicitInstantiation,
+ /*Complain=*/diag != 0);
+ } else if (CXXRecordDecl *Rec
+ = dyn_cast<CXXRecordDecl>(Record->getDecl())) {
+ CXXRecordDecl *Pattern = Rec->getInstantiatedFromMemberClass();
+ if (!Rec->isBeingDefined() && Pattern) {
+ MemberSpecializationInfo *MSI = Rec->getMemberSpecializationInfo();
+ assert(MSI && "Missing member specialization information?");
+ // This record was instantiated from a class within a template.
+ if (MSI->getTemplateSpecializationKind() != TSK_ExplicitSpecialization)
+ return InstantiateClass(Loc, Rec, Pattern,
+ getTemplateInstantiationArgs(Rec),
+ TSK_ImplicitInstantiation,
+ /*Complain=*/diag != 0);
+ }
+ }
+ }
+
+ if (diag == 0)
+ return true;
+
+ // We have an incomplete type. Produce a diagnostic.
+ Diag(Loc, PD) << T;
+
+ // If we have a note, produce it.
+ if (!Note.first.isInvalid())
+ Diag(Note.first, Note.second);
+
+ // If the type was a forward declaration of a class/struct/union
+ // type, produce a note.
+ if (Tag && !Tag->getDecl()->isInvalidDecl())
+ Diag(Tag->getDecl()->getLocation(),
+ Tag->isBeingDefined() ? diag::note_type_being_defined
+ : diag::note_forward_declaration)
+ << QualType(Tag, 0);
+
+ // If the Objective-C class was a forward declaration, produce a note.
+ if (IFace && !IFace->getDecl()->isInvalidDecl())
+ Diag(IFace->getDecl()->getLocation(), diag::note_forward_class);
+
+ return true;
+}
+
+bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
+ const PartialDiagnostic &PD) {
+ return RequireCompleteType(Loc, T, PD,
+ std::make_pair(SourceLocation(), PDiag(0)));
+}
+
+bool Sema::RequireCompleteType(SourceLocation Loc, QualType T,
+ unsigned DiagID) {
+ return RequireCompleteType(Loc, T, PDiag(DiagID),
+ std::make_pair(SourceLocation(), PDiag(0)));
+}
+
+/// @brief Ensure that the type T is a literal type.
+///
+/// This routine checks whether the type @p T is a literal type. If @p T is an
+/// incomplete type, an attempt is made to complete it. If @p T is a literal
+/// type, or @p AllowIncompleteType is true and @p T is an incomplete type,
+/// returns false. Otherwise, this routine issues the diagnostic @p PD (giving
+/// it the type @p T), along with notes explaining why the type is not a
+/// literal type, and returns true.
+///
+/// @param Loc The location in the source that the non-literal type
+/// diagnostic should refer to.
+///
+/// @param T The type that this routine is examining for literalness.
+///
+/// @param PD The partial diagnostic that will be printed out if T is not a
+/// literal type.
+///
+/// @returns @c true if @p T is not a literal type and a diagnostic was emitted,
+/// @c false otherwise.
+bool Sema::RequireLiteralType(SourceLocation Loc, QualType T,
+ const PartialDiagnostic &PD) {
+ assert(!T->isDependentType() && "type should not be dependent");
+
+ QualType ElemType = Context.getBaseElementType(T);
+ RequireCompleteType(Loc, ElemType, 0);
+
+ if (T->isLiteralType())
+ return false;
+
+ if (PD.getDiagID() == 0)
+ return true;
+
+ Diag(Loc, PD) << T;
+
+ if (T->isVariableArrayType())
+ return true;
+
+ const RecordType *RT = ElemType->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ // FIXME: Better diagnostic for incomplete class?
+ if (!RD->isCompleteDefinition())
+ return true;
+
+ // If the class has virtual base classes, then it's not an aggregate, and
+ // cannot have any constexpr constructors or a trivial default constructor,
+ // so is non-literal. This is better to diagnose than the resulting absence
+ // of constexpr constructors.
+ if (RD->getNumVBases()) {
+ Diag(RD->getLocation(), diag::note_non_literal_virtual_base)
+ << RD->isStruct() << RD->getNumVBases();
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I)
+ Diag(I->getLocStart(),
+ diag::note_constexpr_virtual_base_here) << I->getSourceRange();
+ } else if (!RD->isAggregate() && !RD->hasConstexprNonCopyMoveConstructor() &&
+ !RD->hasTrivialDefaultConstructor()) {
+ Diag(RD->getLocation(), diag::note_non_literal_no_constexpr_ctors) << RD;
+ } else if (RD->hasNonLiteralTypeFieldsOrBases()) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (!I->getType()->isLiteralType()) {
+ Diag(I->getLocStart(),
+ diag::note_non_literal_base_class)
+ << RD << I->getType() << I->getSourceRange();
+ return true;
+ }
+ }
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I) {
+ if (!(*I)->getType()->isLiteralType() ||
+ (*I)->getType().isVolatileQualified()) {
+ Diag((*I)->getLocation(), diag::note_non_literal_field)
+ << RD << (*I) << (*I)->getType()
+ << (*I)->getType().isVolatileQualified();
+ return true;
+ }
+ }
+ } else if (!RD->hasTrivialDestructor()) {
+ // All fields and bases are of literal types, so have trivial destructors.
+ // If this class's destructor is non-trivial it must be user-declared.
+ CXXDestructorDecl *Dtor = RD->getDestructor();
+ assert(Dtor && "class has literal fields and bases but no dtor?");
+ if (!Dtor)
+ return true;
+
+ Diag(Dtor->getLocation(), Dtor->isUserProvided() ?
+ diag::note_non_literal_user_provided_dtor :
+ diag::note_non_literal_nontrivial_dtor) << RD;
+ }
+
+ return true;
+}
+
+/// \brief Retrieve a version of the type 'T' that is elaborated by Keyword
+/// and qualified by the nested-name-specifier contained in SS.
+QualType Sema::getElaboratedType(ElaboratedTypeKeyword Keyword,
+ const CXXScopeSpec &SS, QualType T) {
+ if (T.isNull())
+ return T;
+ NestedNameSpecifier *NNS;
+ if (SS.isValid())
+ NNS = static_cast<NestedNameSpecifier *>(SS.getScopeRep());
+ else {
+ if (Keyword == ETK_None)
+ return T;
+ NNS = 0;
+ }
+ return Context.getElaboratedType(Keyword, NNS, T);
+}
+
+QualType Sema::BuildTypeofExprType(Expr *E, SourceLocation Loc) {
+ ExprResult ER = CheckPlaceholderExpr(E);
+ if (ER.isInvalid()) return QualType();
+ E = ER.take();
+
+ if (!E->isTypeDependent()) {
+ QualType T = E->getType();
+ if (const TagType *TT = T->getAs<TagType>())
+ DiagnoseUseOfDecl(TT->getDecl(), E->getExprLoc());
+ }
+ return Context.getTypeOfExprType(E);
+}
+
+/// getDecltypeForExpr - Given an expr, will return the decltype for
+/// that expression, according to the rules in C++11
+/// [dcl.type.simple]p4 and C++11 [expr.lambda.prim]p18.
+static QualType getDecltypeForExpr(Sema &S, Expr *E) {
+ if (E->isTypeDependent())
+ return S.Context.DependentTy;
+
+ // C++11 [dcl.type.simple]p4:
+ // The type denoted by decltype(e) is defined as follows:
+ //
+ // - if e is an unparenthesized id-expression or an unparenthesized class
+ // member access (5.2.5), decltype(e) is the type of the entity named
+ // by e. If there is no such entity, or if e names a set of overloaded
+ // functions, the program is ill-formed;
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(DRE->getDecl()))
+ return VD->getType();
+ }
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl()))
+ return FD->getType();
+ }
+
+ // C++11 [expr.lambda.prim]p18:
+ // Every occurrence of decltype((x)) where x is a possibly
+ // parenthesized id-expression that names an entity of automatic
+ // storage duration is treated as if x were transformed into an
+ // access to a corresponding data member of the closure type that
+ // would have been declared if x were an odr-use of the denoted
+ // entity.
+ using namespace sema;
+ if (S.getCurLambda()) {
+ if (isa<ParenExpr>(E)) {
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParens())) {
+ if (VarDecl *Var = dyn_cast<VarDecl>(DRE->getDecl())) {
+ QualType T = S.getCapturedDeclRefType(Var, DRE->getLocation());
+ if (!T.isNull())
+ return S.Context.getLValueReferenceType(T);
+ }
+ }
+ }
+ }
+
+
+ // C++11 [dcl.type.simple]p4:
+ // [...]
+ QualType T = E->getType();
+ switch (E->getValueKind()) {
+ // - otherwise, if e is an xvalue, decltype(e) is T&&, where T is the
+ // type of e;
+ case VK_XValue: T = S.Context.getRValueReferenceType(T); break;
+ // - otherwise, if e is an lvalue, decltype(e) is T&, where T is the
+ // type of e;
+ case VK_LValue: T = S.Context.getLValueReferenceType(T); break;
+ // - otherwise, decltype(e) is the type of e.
+ case VK_RValue: break;
+ }
+
+ return T;
+}
+
+QualType Sema::BuildDecltypeType(Expr *E, SourceLocation Loc) {
+ ExprResult ER = CheckPlaceholderExpr(E);
+ if (ER.isInvalid()) return QualType();
+ E = ER.take();
+
+ return Context.getDecltypeType(E, getDecltypeForExpr(*this, E));
+}
+
+QualType Sema::BuildUnaryTransformType(QualType BaseType,
+ UnaryTransformType::UTTKind UKind,
+ SourceLocation Loc) {
+ switch (UKind) {
+ case UnaryTransformType::EnumUnderlyingType:
+ if (!BaseType->isDependentType() && !BaseType->isEnumeralType()) {
+ Diag(Loc, diag::err_only_enums_have_underlying_types);
+ return QualType();
+ } else {
+ QualType Underlying = BaseType;
+ if (!BaseType->isDependentType()) {
+ EnumDecl *ED = BaseType->getAs<EnumType>()->getDecl();
+ assert(ED && "EnumType has no EnumDecl");
+ DiagnoseUseOfDecl(ED, Loc);
+ Underlying = ED->getIntegerType();
+ }
+ assert(!Underlying.isNull());
+ return Context.getUnaryTransformType(BaseType, Underlying,
+ UnaryTransformType::EnumUnderlyingType);
+ }
+ }
+ llvm_unreachable("unknown unary transform type");
+}
+
+QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
+ if (!T->isDependentType()) {
+ // FIXME: It isn't entirely clear whether incomplete atomic types
+ // are allowed or not; for simplicity, ban them for the moment.
+ if (RequireCompleteType(Loc, T,
+ PDiag(diag::err_atomic_specifier_bad_type) << 0))
+ return QualType();
+
+ int DisallowedKind = -1;
+ if (T->isArrayType())
+ DisallowedKind = 1;
+ else if (T->isFunctionType())
+ DisallowedKind = 2;
+ else if (T->isReferenceType())
+ DisallowedKind = 3;
+ else if (T->isAtomicType())
+ DisallowedKind = 4;
+ else if (T.hasQualifiers())
+ DisallowedKind = 5;
+ else if (!T.isTriviallyCopyableType(Context))
+ // Some other non-trivially-copyable type (probably a C++ class)
+ DisallowedKind = 6;
+
+ if (DisallowedKind != -1) {
+ Diag(Loc, diag::err_atomic_specifier_bad_type) << DisallowedKind << T;
+ return QualType();
+ }
+
+ // FIXME: Do we need any handling for ARC here?
+ }
+
+ // Build the pointer type.
+ return Context.getAtomicType(T);
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
new file mode 100644
index 0000000..8b19be7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.cpp
@@ -0,0 +1,278 @@
+//===-- TargetAttributesSema.cpp - Encapsulate target attributes-*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains semantic analysis implementation for target-specific
+// attributes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "TargetAttributesSema.h"
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/Triple.h"
+
+using namespace clang;
+
+TargetAttributesSema::~TargetAttributesSema() {}
+bool TargetAttributesSema::ProcessDeclAttribute(Scope *scope, Decl *D,
+ const AttributeList &Attr, Sema &S) const {
+ return false;
+}
+
+static void HandleMSP430InterruptAttr(Decl *d,
+ const AttributeList &Attr, Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ // FIXME: Check for decl - it should be void ()(void).
+
+ Expr *NumParamsExpr = static_cast<Expr *>(Attr.getArg(0));
+ llvm::APSInt NumParams(32);
+ if (!NumParamsExpr->isIntegerConstantExpr(NumParams, S.Context)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_not_int)
+ << "interrupt" << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ unsigned Num = NumParams.getLimitedValue(255);
+ if ((Num & 1) || Num > 30) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_out_of_bounds)
+ << "interrupt" << (int)NumParams.getSExtValue()
+ << NumParamsExpr->getSourceRange();
+ return;
+ }
+
+ d->addAttr(::new (S.Context) MSP430InterruptAttr(Attr.getLoc(), S.Context, Num));
+ d->addAttr(::new (S.Context) UsedAttr(Attr.getLoc(), S.Context));
+ }
+
+namespace {
+ class MSP430AttributesSema : public TargetAttributesSema {
+ public:
+ MSP430AttributesSema() { }
+ bool ProcessDeclAttribute(Scope *scope, Decl *D,
+ const AttributeList &Attr, Sema &S) const {
+ if (Attr.getName()->getName() == "interrupt") {
+ HandleMSP430InterruptAttr(D, Attr, S);
+ return true;
+ }
+ return false;
+ }
+ };
+}
+
+static void HandleMBlazeInterruptHandlerAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ // FIXME: Check for decl - it should be void ()(void).
+
+ d->addAttr(::new (S.Context) MBlazeInterruptHandlerAttr(Attr.getLoc(),
+ S.Context));
+ d->addAttr(::new (S.Context) UsedAttr(Attr.getLoc(), S.Context));
+}
+
+static void HandleMBlazeSaveVolatilesAttr(Decl *d, const AttributeList &Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ // FIXME: Check for decl - it should be void ()(void).
+
+ d->addAttr(::new (S.Context) MBlazeSaveVolatilesAttr(Attr.getLoc(),
+ S.Context));
+ d->addAttr(::new (S.Context) UsedAttr(Attr.getLoc(), S.Context));
+}
+
+
+namespace {
+ class MBlazeAttributesSema : public TargetAttributesSema {
+ public:
+ MBlazeAttributesSema() { }
+ bool ProcessDeclAttribute(Scope *scope, Decl *D, const AttributeList &Attr,
+ Sema &S) const {
+ if (Attr.getName()->getName() == "interrupt_handler") {
+ HandleMBlazeInterruptHandlerAttr(D, Attr, S);
+ return true;
+ } else if (Attr.getName()->getName() == "save_volatiles") {
+ HandleMBlazeSaveVolatilesAttr(D, Attr, S);
+ return true;
+ }
+ return false;
+ }
+ };
+}
+
+static void HandleX86ForceAlignArgPointerAttr(Decl *D,
+ const AttributeList& Attr,
+ Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // If we try to apply it to a function pointer, don't warn, but don't
+ // do anything, either. It doesn't matter anyway, because there's nothing
+ // special about calling a force_align_arg_pointer function.
+ ValueDecl *VD = dyn_cast<ValueDecl>(D);
+ if (VD && VD->getType()->isFunctionPointerType())
+ return;
+ // Also don't warn on function pointer typedefs.
+ TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D);
+ if (TD && (TD->getUnderlyingType()->isFunctionPointerType() ||
+ TD->getUnderlyingType()->isFunctionType()))
+ return;
+ // Attribute can only be applied to function types.
+ if (!isa<FunctionDecl>(D)) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << /* function */0;
+ return;
+ }
+
+ D->addAttr(::new (S.Context) X86ForceAlignArgPointerAttr(Attr.getRange(),
+ S.Context));
+}
+
+static void HandleDLLImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // Attribute can be applied only to functions or variables.
+ if (isa<VarDecl>(D)) {
+ D->addAttr(::new (S.Context) DLLImportAttr(Attr.getLoc(), S.Context));
+ return;
+ }
+
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) {
+ // Apparently Visual C++ thinks it is okay to not emit a warning
+ // in this case, so only emit a warning when -fms-extensions is not
+ // specified.
+ if (!S.getLangOpts().MicrosoftExt)
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ // Currently, the dllimport attribute is ignored for inlined functions.
+ // Warning is emitted.
+ if (FD->isInlineSpecified()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
+ return;
+ }
+
+ // The attribute is also overridden by a subsequent declaration as dllexport.
+ // Warning is emitted.
+ for (AttributeList *nextAttr = Attr.getNext(); nextAttr;
+ nextAttr = nextAttr->getNext()) {
+ if (nextAttr->getKind() == AttributeList::AT_dllexport) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
+ return;
+ }
+ }
+
+ if (D->getAttr<DLLExportAttr>()) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) DLLImportAttr(Attr.getLoc(), S.Context));
+}
+
+static void HandleDLLExportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() != 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // Attribute can be applied only to functions or variables.
+ if (isa<VarDecl>(D)) {
+ D->addAttr(::new (S.Context) DLLExportAttr(Attr.getLoc(), S.Context));
+ return;
+ }
+
+ FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ if (!FD) {
+ S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
+ << Attr.getName() << 2 /*variable and function*/;
+ return;
+ }
+
+ // Currently, the dllexport attribute is ignored for inlined functions, unless
+ // the -fkeep-inline-functions flag has been used. Warning is emitted;
+ if (FD->isInlineSpecified()) {
+ // FIXME: ... unless the -fkeep-inline-functions flag has been used.
+ S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllexport";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) DLLExportAttr(Attr.getLoc(), S.Context));
+}
+
+namespace {
+ class X86AttributesSema : public TargetAttributesSema {
+ public:
+ X86AttributesSema() { }
+ bool ProcessDeclAttribute(Scope *scope, Decl *D,
+ const AttributeList &Attr, Sema &S) const {
+ const llvm::Triple &Triple(S.Context.getTargetInfo().getTriple());
+ if (Triple.getOS() == llvm::Triple::Win32 ||
+ Triple.getOS() == llvm::Triple::MinGW32) {
+ switch (Attr.getKind()) {
+ case AttributeList::AT_dllimport: HandleDLLImportAttr(D, Attr, S);
+ return true;
+ case AttributeList::AT_dllexport: HandleDLLExportAttr(D, Attr, S);
+ return true;
+ default: break;
+ }
+ }
+ if (Triple.getArch() != llvm::Triple::x86_64 &&
+ (Attr.getName()->getName() == "force_align_arg_pointer" ||
+ Attr.getName()->getName() == "__force_align_arg_pointer__")) {
+ HandleX86ForceAlignArgPointerAttr(D, Attr, S);
+ return true;
+ }
+ return false;
+ }
+ };
+}
+
+const TargetAttributesSema &Sema::getTargetAttributesSema() const {
+ if (TheTargetAttributesSema)
+ return *TheTargetAttributesSema;
+
+ const llvm::Triple &Triple(Context.getTargetInfo().getTriple());
+ switch (Triple.getArch()) {
+ case llvm::Triple::msp430:
+ return *(TheTargetAttributesSema = new MSP430AttributesSema);
+ case llvm::Triple::mblaze:
+ return *(TheTargetAttributesSema = new MBlazeAttributesSema);
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return *(TheTargetAttributesSema = new X86AttributesSema);
+ default:
+ return *(TheTargetAttributesSema = new TargetAttributesSema);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.h b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.h
new file mode 100644
index 0000000..410c900
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/TargetAttributesSema.h
@@ -0,0 +1,27 @@
+//===--- TargetAttributesSema.h - Semantic Analysis For Target Attributes -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_SEMA_TARGETSEMA_H
+#define CLANG_SEMA_TARGETSEMA_H
+
+namespace clang {
+ class Scope;
+ class Decl;
+ class AttributeList;
+ class Sema;
+
+ class TargetAttributesSema {
+ public:
+ virtual ~TargetAttributesSema();
+ virtual bool ProcessDeclAttribute(Scope *scope, Decl *D,
+ const AttributeList &Attr, Sema &S) const;
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
new file mode 100644
index 0000000..fdb861e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/TreeTransform.h
@@ -0,0 +1,9220 @@
+//===------- TreeTransform.h - Semantic Tree Transformation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//===----------------------------------------------------------------------===//
+//
+// This file implements a semantic tree transformation that takes a given
+// AST and rebuilds it, possibly transforming some nodes in the process.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_TREETRANSFORM_H
+#define LLVM_CLANG_SEMA_TREETRANSFORM_H
+
+#include "clang/Sema/SemaInternal.h"
+#include "clang/Sema/Lookup.h"
+#include "clang/Sema/ParsedTemplate.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/Sema/ScopeInfo.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/Designator.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "TypeLocBuilder.h"
+#include <algorithm>
+
+namespace clang {
+using namespace sema;
+
+/// \brief A semantic tree transformation that allows one to transform one
+/// abstract syntax tree into another.
+///
+/// A new tree transformation is defined by creating a new subclass \c X of
+/// \c TreeTransform<X> and then overriding certain operations to provide
+/// behavior specific to that transformation. For example, template
+/// instantiation is implemented as a tree transformation where the
+/// transformation of TemplateTypeParmType nodes involves substituting the
+/// template arguments for their corresponding template parameters; a similar
+/// transformation is performed for non-type template parameters and
+/// template template parameters.
+///
+/// This tree-transformation template uses static polymorphism to allow
+/// subclasses to customize any of its operations. Thus, a subclass can
+/// override any of the transformation or rebuild operators by providing an
+/// operation with the same signature as the default implementation. The
+/// overridding function should not be virtual.
+///
+/// Semantic tree transformations are split into two stages, either of which
+/// can be replaced by a subclass. The "transform" step transforms an AST node
+/// or the parts of an AST node using the various transformation functions,
+/// then passes the pieces on to the "rebuild" step, which constructs a new AST
+/// node of the appropriate kind from the pieces. The default transformation
+/// routines recursively transform the operands to composite AST nodes (e.g.,
+/// the pointee type of a PointerType node) and, if any of those operand nodes
+/// were changed by the transformation, invokes the rebuild operation to create
+/// a new AST node.
+///
+/// Subclasses can customize the transformation at various levels. The
+/// most coarse-grained transformations involve replacing TransformType(),
+/// TransformExpr(), TransformDecl(), TransformNestedNameSpecifierLoc(),
+/// TransformTemplateName(), or TransformTemplateArgument() with entirely
+/// new implementations.
+///
+/// For more fine-grained transformations, subclasses can replace any of the
+/// \c TransformXXX functions (where XXX is the name of an AST node, e.g.,
+/// PointerType, StmtExpr) to alter the transformation. As mentioned previously,
+/// replacing TransformTemplateTypeParmType() allows template instantiation
+/// to substitute template arguments for their corresponding template
+/// parameters. Additionally, subclasses can override the \c RebuildXXX
+/// functions to control how AST nodes are rebuilt when their operands change.
+/// By default, \c TreeTransform will invoke semantic analysis to rebuild
+/// AST nodes. However, certain other tree transformations (e.g, cloning) may
+/// be able to use more efficient rebuild steps.
+///
+/// There are a handful of other functions that can be overridden, allowing one
+/// to avoid traversing nodes that don't need any transformation
+/// (\c AlreadyTransformed()), force rebuilding AST nodes even when their
+/// operands have not changed (\c AlwaysRebuild()), and customize the
+/// default locations and entity names used for type-checking
+/// (\c getBaseLocation(), \c getBaseEntity()).
+template<typename Derived>
+class TreeTransform {
+ /// \brief Private RAII object that helps us forget and then re-remember
+ /// the template argument corresponding to a partially-substituted parameter
+ /// pack.
+ class ForgetPartiallySubstitutedPackRAII {
+ Derived &Self;
+ TemplateArgument Old;
+
+ public:
+ ForgetPartiallySubstitutedPackRAII(Derived &Self) : Self(Self) {
+ Old = Self.ForgetPartiallySubstitutedPack();
+ }
+
+ ~ForgetPartiallySubstitutedPackRAII() {
+ Self.RememberPartiallySubstitutedPack(Old);
+ }
+ };
+
+protected:
+ Sema &SemaRef;
+
+ /// \brief The set of local declarations that have been transformed, for
+ /// cases where we are forced to build new declarations within the transformer
+ /// rather than in the subclass (e.g., lambda closure types).
+ llvm::DenseMap<Decl *, Decl *> TransformedLocalDecls;
+
+public:
+ /// \brief Initializes a new tree transformer.
+ TreeTransform(Sema &SemaRef) : SemaRef(SemaRef) { }
+
+ /// \brief Retrieves a reference to the derived class.
+ Derived &getDerived() { return static_cast<Derived&>(*this); }
+
+ /// \brief Retrieves a reference to the derived class.
+ const Derived &getDerived() const {
+ return static_cast<const Derived&>(*this);
+ }
+
+ static inline ExprResult Owned(Expr *E) { return E; }
+ static inline StmtResult Owned(Stmt *S) { return S; }
+
+ /// \brief Retrieves a reference to the semantic analysis object used for
+ /// this tree transform.
+ Sema &getSema() const { return SemaRef; }
+
+ /// \brief Whether the transformation should always rebuild AST nodes, even
+ /// if none of the children have changed.
+ ///
+ /// Subclasses may override this function to specify when the transformation
+ /// should rebuild all AST nodes.
+ bool AlwaysRebuild() { return false; }
+
+ /// \brief Returns the location of the entity being transformed, if that
+ /// information was not available elsewhere in the AST.
+ ///
+ /// By default, returns no source-location information. Subclasses can
+ /// provide an alternative implementation that provides better location
+ /// information.
+ SourceLocation getBaseLocation() { return SourceLocation(); }
+
+ /// \brief Returns the name of the entity being transformed, if that
+ /// information was not available elsewhere in the AST.
+ ///
+ /// By default, returns an empty name. Subclasses can provide an alternative
+ /// implementation with a more precise name.
+ DeclarationName getBaseEntity() { return DeclarationName(); }
+
+ /// \brief Sets the "base" location and entity when that
+ /// information is known based on another transformation.
+ ///
+ /// By default, the source location and entity are ignored. Subclasses can
+ /// override this function to provide a customized implementation.
+ void setBase(SourceLocation Loc, DeclarationName Entity) { }
+
+ /// \brief RAII object that temporarily sets the base location and entity
+ /// used for reporting diagnostics in types.
+ class TemporaryBase {
+ TreeTransform &Self;
+ SourceLocation OldLocation;
+ DeclarationName OldEntity;
+
+ public:
+ TemporaryBase(TreeTransform &Self, SourceLocation Location,
+ DeclarationName Entity) : Self(Self) {
+ OldLocation = Self.getDerived().getBaseLocation();
+ OldEntity = Self.getDerived().getBaseEntity();
+
+ if (Location.isValid())
+ Self.getDerived().setBase(Location, Entity);
+ }
+
+ ~TemporaryBase() {
+ Self.getDerived().setBase(OldLocation, OldEntity);
+ }
+ };
+
+ /// \brief Determine whether the given type \p T has already been
+ /// transformed.
+ ///
+ /// Subclasses can provide an alternative implementation of this routine
+ /// to short-circuit evaluation when it is known that a given type will
+ /// not change. For example, template instantiation need not traverse
+ /// non-dependent types.
+ bool AlreadyTransformed(QualType T) {
+ return T.isNull();
+ }
+
+ /// \brief Determine whether the given call argument should be dropped, e.g.,
+ /// because it is a default argument.
+ ///
+ /// Subclasses can provide an alternative implementation of this routine to
+ /// determine which kinds of call arguments get dropped. By default,
+ /// CXXDefaultArgument nodes are dropped (prior to transformation).
+ bool DropCallArgument(Expr *E) {
+ return E->isDefaultArgument();
+ }
+
+ /// \brief Determine whether we should expand a pack expansion with the
+ /// given set of parameter packs into separate arguments by repeatedly
+ /// transforming the pattern.
+ ///
+ /// By default, the transformer never tries to expand pack expansions.
+ /// Subclasses can override this routine to provide different behavior.
+ ///
+ /// \param EllipsisLoc The location of the ellipsis that identifies the
+ /// pack expansion.
+ ///
+ /// \param PatternRange The source range that covers the entire pattern of
+ /// the pack expansion.
+ ///
+ /// \param Unexpanded The set of unexpanded parameter packs within the
+ /// pattern.
+ ///
+ /// \param NumUnexpanded The number of unexpanded parameter packs in
+ /// \p Unexpanded.
+ ///
+ /// \param ShouldExpand Will be set to \c true if the transformer should
+ /// expand the corresponding pack expansions into separate arguments. When
+ /// set, \c NumExpansions must also be set.
+ ///
+ /// \param RetainExpansion Whether the caller should add an unexpanded
+ /// pack expansion after all of the expanded arguments. This is used
+ /// when extending explicitly-specified template argument packs per
+ /// C++0x [temp.arg.explicit]p9.
+ ///
+ /// \param NumExpansions The number of separate arguments that will be in
+ /// the expanded form of the corresponding pack expansion. This is both an
+ /// input and an output parameter, which can be set by the caller if the
+ /// number of expansions is known a priori (e.g., due to a prior substitution)
+ /// and will be set by the callee when the number of expansions is known.
+ /// The callee must set this value when \c ShouldExpand is \c true; it may
+ /// set this value in other cases.
+ ///
+ /// \returns true if an error occurred (e.g., because the parameter packs
+ /// are to be instantiated with arguments of different lengths), false
+ /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
+ /// must be set.
+ bool TryExpandParameterPacks(SourceLocation EllipsisLoc,
+ SourceRange PatternRange,
+ llvm::ArrayRef<UnexpandedParameterPack> Unexpanded,
+ bool &ShouldExpand,
+ bool &RetainExpansion,
+ llvm::Optional<unsigned> &NumExpansions) {
+ ShouldExpand = false;
+ return false;
+ }
+
+ /// \brief "Forget" about the partially-substituted pack template argument,
+ /// when performing an instantiation that must preserve the parameter pack
+ /// use.
+ ///
+ /// This routine is meant to be overridden by the template instantiator.
+ TemplateArgument ForgetPartiallySubstitutedPack() {
+ return TemplateArgument();
+ }
+
+ /// \brief "Remember" the partially-substituted pack template argument
+ /// after performing an instantiation that must preserve the parameter pack
+ /// use.
+ ///
+ /// This routine is meant to be overridden by the template instantiator.
+ void RememberPartiallySubstitutedPack(TemplateArgument Arg) { }
+
+ /// \brief Note to the derived class when a function parameter pack is
+ /// being expanded.
+ void ExpandingFunctionParameterPack(ParmVarDecl *Pack) { }
+
+ /// \brief Transforms the given type into another type.
+ ///
+ /// By default, this routine transforms a type by creating a
+ /// TypeSourceInfo for it and delegating to the appropriate
+ /// function. This is expensive, but we don't mind, because
+ /// this method is deprecated anyway; all users should be
+ /// switched to storing TypeSourceInfos.
+ ///
+ /// \returns the transformed type.
+ QualType TransformType(QualType T);
+
+ /// \brief Transforms the given type-with-location into a new
+ /// type-with-location.
+ ///
+ /// By default, this routine transforms a type by delegating to the
+ /// appropriate TransformXXXType to build a new type. Subclasses
+ /// may override this function (to take over all type
+ /// transformations) or some set of the TransformXXXType functions
+ /// to alter the transformation.
+ TypeSourceInfo *TransformType(TypeSourceInfo *DI);
+
+ /// \brief Transform the given type-with-location into a new
+ /// type, collecting location information in the given builder
+ /// as necessary.
+ ///
+ QualType TransformType(TypeLocBuilder &TLB, TypeLoc TL);
+
+ /// \brief Transform the given statement.
+ ///
+ /// By default, this routine transforms a statement by delegating to the
+ /// appropriate TransformXXXStmt function to transform a specific kind of
+ /// statement or the TransformExpr() function to transform an expression.
+ /// Subclasses may override this function to transform statements using some
+ /// other mechanism.
+ ///
+ /// \returns the transformed statement.
+ StmtResult TransformStmt(Stmt *S);
+
+ /// \brief Transform the given expression.
+ ///
+ /// By default, this routine transforms an expression by delegating to the
+ /// appropriate TransformXXXExpr function to build a new expression.
+ /// Subclasses may override this function to transform expressions using some
+ /// other mechanism.
+ ///
+ /// \returns the transformed expression.
+ ExprResult TransformExpr(Expr *E);
+
+ /// \brief Transform the given list of expressions.
+ ///
+ /// This routine transforms a list of expressions by invoking
+ /// \c TransformExpr() for each subexpression. However, it also provides
+ /// support for variadic templates by expanding any pack expansions (if the
+ /// derived class permits such expansion) along the way. When pack expansions
+ /// are present, the number of outputs may not equal the number of inputs.
+ ///
+ /// \param Inputs The set of expressions to be transformed.
+ ///
+ /// \param NumInputs The number of expressions in \c Inputs.
+ ///
+ /// \param IsCall If \c true, then this transform is being performed on
+ /// function-call arguments, and any arguments that should be dropped, will
+ /// be.
+ ///
+ /// \param Outputs The transformed input expressions will be added to this
+ /// vector.
+ ///
+ /// \param ArgChanged If non-NULL, will be set \c true if any argument changed
+ /// due to transformation.
+ ///
+ /// \returns true if an error occurred, false otherwise.
+ bool TransformExprs(Expr **Inputs, unsigned NumInputs, bool IsCall,
+ SmallVectorImpl<Expr *> &Outputs,
+ bool *ArgChanged = 0);
+
+ /// \brief Transform the given declaration, which is referenced from a type
+ /// or expression.
+ ///
+ /// By default, acts as the identity function on declarations, unless the
+ /// transformer has had to transform the declaration itself. Subclasses
+ /// may override this function to provide alternate behavior.
+ Decl *TransformDecl(SourceLocation Loc, Decl *D) {
+ llvm::DenseMap<Decl *, Decl *>::iterator Known
+ = TransformedLocalDecls.find(D);
+ if (Known != TransformedLocalDecls.end())
+ return Known->second;
+
+ return D;
+ }
+
+ /// \brief Transform the attributes associated with the given declaration and
+ /// place them on the new declaration.
+ ///
+ /// By default, this operation does nothing. Subclasses may override this
+ /// behavior to transform attributes.
+ void transformAttrs(Decl *Old, Decl *New) { }
+
+ /// \brief Note that a local declaration has been transformed by this
+ /// transformer.
+ ///
+ /// Local declarations are typically transformed via a call to
+ /// TransformDefinition. However, in some cases (e.g., lambda expressions),
+ /// the transformer itself has to transform the declarations. This routine
+ /// can be overridden by a subclass that keeps track of such mappings.
+ void transformedLocalDecl(Decl *Old, Decl *New) {
+ TransformedLocalDecls[Old] = New;
+ }
+
+ /// \brief Transform the definition of the given declaration.
+ ///
+ /// By default, invokes TransformDecl() to transform the declaration.
+ /// Subclasses may override this function to provide alternate behavior.
+ Decl *TransformDefinition(SourceLocation Loc, Decl *D) {
+ return getDerived().TransformDecl(Loc, D);
+ }
+
+ /// \brief Transform the given declaration, which was the first part of a
+ /// nested-name-specifier in a member access expression.
+ ///
+ /// This specific declaration transformation only applies to the first
+ /// identifier in a nested-name-specifier of a member access expression, e.g.,
+ /// the \c T in \c x->T::member
+ ///
+ /// By default, invokes TransformDecl() to transform the declaration.
+ /// Subclasses may override this function to provide alternate behavior.
+ NamedDecl *TransformFirstQualifierInScope(NamedDecl *D, SourceLocation Loc) {
+ return cast_or_null<NamedDecl>(getDerived().TransformDecl(Loc, D));
+ }
+
+ /// \brief Transform the given nested-name-specifier with source-location
+ /// information.
+ ///
+ /// By default, transforms all of the types and declarations within the
+ /// nested-name-specifier. Subclasses may override this function to provide
+ /// alternate behavior.
+ NestedNameSpecifierLoc TransformNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc NNS,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = 0);
+
+ /// \brief Transform the given declaration name.
+ ///
+ /// By default, transforms the types of conversion function, constructor,
+ /// and destructor names and then (if needed) rebuilds the declaration name.
+ /// Identifiers and selectors are returned unmodified. Sublcasses may
+ /// override this function to provide alternate behavior.
+ DeclarationNameInfo
+ TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo);
+
+ /// \brief Transform the given template name.
+ ///
+ /// \param SS The nested-name-specifier that qualifies the template
+ /// name. This nested-name-specifier must already have been transformed.
+ ///
+ /// \param Name The template name to transform.
+ ///
+ /// \param NameLoc The source location of the template name.
+ ///
+ /// \param ObjectType If we're translating a template name within a member
+ /// access expression, this is the type of the object whose member template
+ /// is being referenced.
+ ///
+ /// \param FirstQualifierInScope If the first part of a nested-name-specifier
+ /// also refers to a name within the current (lexical) scope, this is the
+ /// declaration it refers to.
+ ///
+ /// By default, transforms the template name by transforming the declarations
+ /// and nested-name-specifiers that occur within the template name.
+ /// Subclasses may override this function to provide alternate behavior.
+ TemplateName TransformTemplateName(CXXScopeSpec &SS,
+ TemplateName Name,
+ SourceLocation NameLoc,
+ QualType ObjectType = QualType(),
+ NamedDecl *FirstQualifierInScope = 0);
+
+ /// \brief Transform the given template argument.
+ ///
+ /// By default, this operation transforms the type, expression, or
+ /// declaration stored within the template argument and constructs a
+ /// new template argument from the transformed result. Subclasses may
+ /// override this function to provide alternate behavior.
+ ///
+ /// Returns true if there was an error.
+ bool TransformTemplateArgument(const TemplateArgumentLoc &Input,
+ TemplateArgumentLoc &Output);
+
+ /// \brief Transform the given set of template arguments.
+ ///
+ /// By default, this operation transforms all of the template arguments
+ /// in the input set using \c TransformTemplateArgument(), and appends
+ /// the transformed arguments to the output list.
+ ///
+ /// Note that this overload of \c TransformTemplateArguments() is merely
+ /// a convenience function. Subclasses that wish to override this behavior
+ /// should override the iterator-based member template version.
+ ///
+ /// \param Inputs The set of template arguments to be transformed.
+ ///
+ /// \param NumInputs The number of template arguments in \p Inputs.
+ ///
+ /// \param Outputs The set of transformed template arguments output by this
+ /// routine.
+ ///
+ /// Returns true if an error occurred.
+ bool TransformTemplateArguments(const TemplateArgumentLoc *Inputs,
+ unsigned NumInputs,
+ TemplateArgumentListInfo &Outputs) {
+ return TransformTemplateArguments(Inputs, Inputs + NumInputs, Outputs);
+ }
+
+ /// \brief Transform the given set of template arguments.
+ ///
+ /// By default, this operation transforms all of the template arguments
+ /// in the input set using \c TransformTemplateArgument(), and appends
+ /// the transformed arguments to the output list.
+ ///
+ /// \param First An iterator to the first template argument.
+ ///
+ /// \param Last An iterator one step past the last template argument.
+ ///
+ /// \param Outputs The set of transformed template arguments output by this
+ /// routine.
+ ///
+ /// Returns true if an error occurred.
+ template<typename InputIterator>
+ bool TransformTemplateArguments(InputIterator First,
+ InputIterator Last,
+ TemplateArgumentListInfo &Outputs);
+
+ /// \brief Fakes up a TemplateArgumentLoc for a given TemplateArgument.
+ void InventTemplateArgumentLoc(const TemplateArgument &Arg,
+ TemplateArgumentLoc &ArgLoc);
+
+ /// \brief Fakes up a TypeSourceInfo for a type.
+ TypeSourceInfo *InventTypeSourceInfo(QualType T) {
+ return SemaRef.Context.getTrivialTypeSourceInfo(T,
+ getDerived().getBaseLocation());
+ }
+
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ QualType Transform##CLASS##Type(TypeLocBuilder &TLB, CLASS##TypeLoc T);
+#include "clang/AST/TypeLocNodes.def"
+
+ StmtResult
+ TransformSEHHandler(Stmt *Handler);
+
+ QualType
+ TransformTemplateSpecializationType(TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL,
+ TemplateName Template);
+
+ QualType
+ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ TemplateName Template,
+ CXXScopeSpec &SS);
+
+ QualType
+ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ NestedNameSpecifierLoc QualifierLoc);
+
+ /// \brief Transforms the parameters of a function type into the
+ /// given vectors.
+ ///
+ /// The result vectors should be kept in sync; null entries in the
+ /// variables vector are acceptable.
+ ///
+ /// Return true on error.
+ bool TransformFunctionTypeParams(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const QualType *ParamTypes,
+ SmallVectorImpl<QualType> &PTypes,
+ SmallVectorImpl<ParmVarDecl*> *PVars);
+
+ /// \brief Transforms a single function-type parameter. Return null
+ /// on error.
+ ///
+ /// \param indexAdjustment - A number to add to the parameter's
+ /// scope index; can be negative
+ ParmVarDecl *TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ int indexAdjustment,
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack);
+
+ QualType TransformReferenceType(TypeLocBuilder &TLB, ReferenceTypeLoc TL);
+
+ StmtResult TransformCompoundStmt(CompoundStmt *S, bool IsStmtExpr);
+ ExprResult TransformCXXNamedCastExpr(CXXNamedCastExpr *E);
+
+#define STMT(Node, Parent) \
+ StmtResult Transform##Node(Node *S);
+#define EXPR(Node, Parent) \
+ ExprResult Transform##Node(Node *E);
+#define ABSTRACT_STMT(Stmt)
+#include "clang/AST/StmtNodes.inc"
+
+ /// \brief Build a new pointer type given its pointee type.
+ ///
+ /// By default, performs semantic analysis when building the pointer type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildPointerType(QualType PointeeType, SourceLocation Sigil);
+
+ /// \brief Build a new block pointer type given its pointee type.
+ ///
+ /// By default, performs semantic analysis when building the block pointer
+ /// type. Subclasses may override this routine to provide different behavior.
+ QualType RebuildBlockPointerType(QualType PointeeType, SourceLocation Sigil);
+
+ /// \brief Build a new reference type given the type it references.
+ ///
+ /// By default, performs semantic analysis when building the
+ /// reference type. Subclasses may override this routine to provide
+ /// different behavior.
+ ///
+ /// \param LValue whether the type was written with an lvalue sigil
+ /// or an rvalue sigil.
+ QualType RebuildReferenceType(QualType ReferentType,
+ bool LValue,
+ SourceLocation Sigil);
+
+ /// \brief Build a new member pointer type given the pointee type and the
+ /// class type it refers into.
+ ///
+ /// By default, performs semantic analysis when building the member pointer
+ /// type. Subclasses may override this routine to provide different behavior.
+ QualType RebuildMemberPointerType(QualType PointeeType, QualType ClassType,
+ SourceLocation Sigil);
+
+ /// \brief Build a new array type given the element type, size
+ /// modifier, size of the array (if known), size expression, and index type
+ /// qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ /// Also by default, all of the other Rebuild*Array
+ QualType RebuildArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ const llvm::APInt *Size,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new constant array type given the element type, size
+ /// modifier, (known) size of the array, and index type qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildConstantArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ const llvm::APInt &Size,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new incomplete array type given the element type, size
+ /// modifier, and index type qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildIncompleteArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new variable-length array type given the element type,
+ /// size modifier, size expression, and index type qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildVariableArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new dependent-sized array type given the element type,
+ /// size modifier, size expression, and index type qualifiers.
+ ///
+ /// By default, performs semantic analysis when building the array type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildDependentSizedArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange);
+
+ /// \brief Build a new vector type given the element type and
+ /// number of elements.
+ ///
+ /// By default, performs semantic analysis when building the vector type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildVectorType(QualType ElementType, unsigned NumElements,
+ VectorType::VectorKind VecKind);
+
+ /// \brief Build a new extended vector type given the element type and
+ /// number of elements.
+ ///
+ /// By default, performs semantic analysis when building the vector type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildExtVectorType(QualType ElementType, unsigned NumElements,
+ SourceLocation AttributeLoc);
+
+ /// \brief Build a new potentially dependently-sized extended vector type
+ /// given the element type and number of elements.
+ ///
+ /// By default, performs semantic analysis when building the vector type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildDependentSizedExtVectorType(QualType ElementType,
+ Expr *SizeExpr,
+ SourceLocation AttributeLoc);
+
+ /// \brief Build a new function type.
+ ///
+ /// By default, performs semantic analysis when building the function type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildFunctionProtoType(QualType T,
+ QualType *ParamTypes,
+ unsigned NumParamTypes,
+ bool Variadic, bool HasTrailingReturn,
+ unsigned Quals,
+ RefQualifierKind RefQualifier,
+ const FunctionType::ExtInfo &Info);
+
+ /// \brief Build a new unprototyped function type.
+ QualType RebuildFunctionNoProtoType(QualType ResultType);
+
+ /// \brief Rebuild an unresolved typename type, given the decl that
+ /// the UnresolvedUsingTypenameDecl was transformed to.
+ QualType RebuildUnresolvedUsingType(Decl *D);
+
+ /// \brief Build a new typedef type.
+ QualType RebuildTypedefType(TypedefNameDecl *Typedef) {
+ return SemaRef.Context.getTypeDeclType(Typedef);
+ }
+
+ /// \brief Build a new class/struct/union type.
+ QualType RebuildRecordType(RecordDecl *Record) {
+ return SemaRef.Context.getTypeDeclType(Record);
+ }
+
+ /// \brief Build a new Enum type.
+ QualType RebuildEnumType(EnumDecl *Enum) {
+ return SemaRef.Context.getTypeDeclType(Enum);
+ }
+
+ /// \brief Build a new typeof(expr) type.
+ ///
+ /// By default, performs semantic analysis when building the typeof type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildTypeOfExprType(Expr *Underlying, SourceLocation Loc);
+
+ /// \brief Build a new typeof(type) type.
+ ///
+ /// By default, builds a new TypeOfType with the given underlying type.
+ QualType RebuildTypeOfType(QualType Underlying);
+
+ /// \brief Build a new unary transform type.
+ QualType RebuildUnaryTransformType(QualType BaseType,
+ UnaryTransformType::UTTKind UKind,
+ SourceLocation Loc);
+
+ /// \brief Build a new C++0x decltype type.
+ ///
+ /// By default, performs semantic analysis when building the decltype type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildDecltypeType(Expr *Underlying, SourceLocation Loc);
+
+ /// \brief Build a new C++0x auto type.
+ ///
+ /// By default, builds a new AutoType with the given deduced type.
+ QualType RebuildAutoType(QualType Deduced) {
+ return SemaRef.Context.getAutoType(Deduced);
+ }
+
+ /// \brief Build a new template specialization type.
+ ///
+ /// By default, performs semantic analysis when building the template
+ /// specialization type. Subclasses may override this routine to provide
+ /// different behavior.
+ QualType RebuildTemplateSpecializationType(TemplateName Template,
+ SourceLocation TemplateLoc,
+ TemplateArgumentListInfo &Args);
+
+ /// \brief Build a new parenthesized type.
+ ///
+ /// By default, builds a new ParenType type from the inner type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildParenType(QualType InnerType) {
+ return SemaRef.Context.getParenType(InnerType);
+ }
+
+ /// \brief Build a new qualified name type.
+ ///
+ /// By default, builds a new ElaboratedType type from the keyword,
+ /// the nested-name-specifier and the named type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildElaboratedType(SourceLocation KeywordLoc,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifierLoc QualifierLoc,
+ QualType Named) {
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ Named);
+ }
+
+ /// \brief Build a new typename type that refers to a template-id.
+ ///
+ /// By default, builds a new DependentNameType type from the
+ /// nested-name-specifier and the given type. Subclasses may override
+ /// this routine to provide different behavior.
+ QualType RebuildDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifierLoc QualifierLoc,
+ const IdentifierInfo *Name,
+ SourceLocation NameLoc,
+ TemplateArgumentListInfo &Args) {
+ // Rebuild the template name.
+ // TODO: avoid TemplateName abstraction
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ TemplateName InstName
+ = getDerived().RebuildTemplateName(SS, *Name, NameLoc, QualType(), 0);
+
+ if (InstName.isNull())
+ return QualType();
+
+ // If it's still dependent, make a dependent specialization.
+ if (InstName.getAsDependentTemplateName())
+ return SemaRef.Context.getDependentTemplateSpecializationType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ Name,
+ Args);
+
+ // Otherwise, make an elaborated type wrapping a non-dependent
+ // specialization.
+ QualType T =
+ getDerived().RebuildTemplateSpecializationType(InstName, NameLoc, Args);
+ if (T.isNull()) return QualType();
+
+ if (Keyword == ETK_None && QualifierLoc.getNestedNameSpecifier() == 0)
+ return T;
+
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ T);
+ }
+
+ /// \brief Build a new typename type that refers to an identifier.
+ ///
+ /// By default, performs semantic analysis when building the typename type
+ /// (or elaborated type). Subclasses may override this routine to provide
+ /// different behavior.
+ QualType RebuildDependentNameType(ElaboratedTypeKeyword Keyword,
+ SourceLocation KeywordLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ const IdentifierInfo *Id,
+ SourceLocation IdLoc) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ if (QualifierLoc.getNestedNameSpecifier()->isDependent()) {
+ // If the name is still dependent, just build a new dependent name type.
+ if (!SemaRef.computeDeclContext(SS))
+ return SemaRef.Context.getDependentNameType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ Id);
+ }
+
+ if (Keyword == ETK_None || Keyword == ETK_Typename)
+ return SemaRef.CheckTypenameType(Keyword, KeywordLoc, QualifierLoc,
+ *Id, IdLoc);
+
+ TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForKeyword(Keyword);
+
+ // We had a dependent elaborated-type-specifier that has been transformed
+ // into a non-dependent elaborated-type-specifier. Find the tag we're
+ // referring to.
+ LookupResult Result(SemaRef, Id, IdLoc, Sema::LookupTagName);
+ DeclContext *DC = SemaRef.computeDeclContext(SS, false);
+ if (!DC)
+ return QualType();
+
+ if (SemaRef.RequireCompleteDeclContext(SS, DC))
+ return QualType();
+
+ TagDecl *Tag = 0;
+ SemaRef.LookupQualifiedName(Result, DC);
+ switch (Result.getResultKind()) {
+ case LookupResult::NotFound:
+ case LookupResult::NotFoundInCurrentInstantiation:
+ break;
+
+ case LookupResult::Found:
+ Tag = Result.getAsSingle<TagDecl>();
+ break;
+
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue:
+ llvm_unreachable("Tag lookup cannot find non-tags");
+
+ case LookupResult::Ambiguous:
+ // Let the LookupResult structure handle ambiguities.
+ return QualType();
+ }
+
+ if (!Tag) {
+ // Check where the name exists but isn't a tag type and use that to emit
+ // better diagnostics.
+ LookupResult Result(SemaRef, Id, IdLoc, Sema::LookupTagName);
+ SemaRef.LookupQualifiedName(Result, DC);
+ switch (Result.getResultKind()) {
+ case LookupResult::Found:
+ case LookupResult::FoundOverloaded:
+ case LookupResult::FoundUnresolvedValue: {
+ NamedDecl *SomeDecl = Result.getRepresentativeDecl();
+ unsigned Kind = 0;
+ if (isa<TypedefDecl>(SomeDecl)) Kind = 1;
+ else if (isa<TypeAliasDecl>(SomeDecl)) Kind = 2;
+ else if (isa<ClassTemplateDecl>(SomeDecl)) Kind = 3;
+ SemaRef.Diag(IdLoc, diag::err_tag_reference_non_tag) << Kind;
+ SemaRef.Diag(SomeDecl->getLocation(), diag::note_declared_at);
+ break;
+ }
+ default:
+ // FIXME: Would be nice to highlight just the source range.
+ SemaRef.Diag(IdLoc, diag::err_not_tag_in_scope)
+ << Kind << Id << DC;
+ break;
+ }
+ return QualType();
+ }
+
+ if (!SemaRef.isAcceptableTagRedeclaration(Tag, Kind, /*isDefinition*/false,
+ IdLoc, *Id)) {
+ SemaRef.Diag(KeywordLoc, diag::err_use_with_wrong_tag) << Id;
+ SemaRef.Diag(Tag->getLocation(), diag::note_previous_use);
+ return QualType();
+ }
+
+ // Build the elaborated-type-specifier type.
+ QualType T = SemaRef.Context.getTypeDeclType(Tag);
+ return SemaRef.Context.getElaboratedType(Keyword,
+ QualifierLoc.getNestedNameSpecifier(),
+ T);
+ }
+
+ /// \brief Build a new pack expansion type.
+ ///
+ /// By default, builds a new PackExpansionType type from the given pattern.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildPackExpansionType(QualType Pattern,
+ SourceRange PatternRange,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ return getSema().CheckPackExpansion(Pattern, PatternRange, EllipsisLoc,
+ NumExpansions);
+ }
+
+ /// \brief Build a new atomic type given its value type.
+ ///
+ /// By default, performs semantic analysis when building the atomic type.
+ /// Subclasses may override this routine to provide different behavior.
+ QualType RebuildAtomicType(QualType ValueType, SourceLocation KWLoc);
+
+ /// \brief Build a new template name given a nested name specifier, a flag
+ /// indicating whether the "template" keyword was provided, and the template
+ /// that the template name refers to.
+ ///
+ /// By default, builds the new template name directly. Subclasses may override
+ /// this routine to provide different behavior.
+ TemplateName RebuildTemplateName(CXXScopeSpec &SS,
+ bool TemplateKW,
+ TemplateDecl *Template);
+
+ /// \brief Build a new template name given a nested name specifier and the
+ /// name that is referred to as a template.
+ ///
+ /// By default, performs semantic analysis to determine whether the name can
+ /// be resolved to a specific template, then builds the appropriate kind of
+ /// template name. Subclasses may override this routine to provide different
+ /// behavior.
+ TemplateName RebuildTemplateName(CXXScopeSpec &SS,
+ const IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope);
+
+ /// \brief Build a new template name given a nested name specifier and the
+ /// overloaded operator name that is referred to as a template.
+ ///
+ /// By default, performs semantic analysis to determine whether the name can
+ /// be resolved to a specific template, then builds the appropriate kind of
+ /// template name. Subclasses may override this routine to provide different
+ /// behavior.
+ TemplateName RebuildTemplateName(CXXScopeSpec &SS,
+ OverloadedOperatorKind Operator,
+ SourceLocation NameLoc,
+ QualType ObjectType);
+
+ /// \brief Build a new template name given a template template parameter pack
+ /// and the
+ ///
+ /// By default, performs semantic analysis to determine whether the name can
+ /// be resolved to a specific template, then builds the appropriate kind of
+ /// template name. Subclasses may override this routine to provide different
+ /// behavior.
+ TemplateName RebuildTemplateName(TemplateTemplateParmDecl *Param,
+ const TemplateArgument &ArgPack) {
+ return getSema().Context.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+
+ /// \brief Build a new compound statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCompoundStmt(SourceLocation LBraceLoc,
+ MultiStmtArg Statements,
+ SourceLocation RBraceLoc,
+ bool IsStmtExpr) {
+ return getSema().ActOnCompoundStmt(LBraceLoc, RBraceLoc, Statements,
+ IsStmtExpr);
+ }
+
+ /// \brief Build a new case statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCaseStmt(SourceLocation CaseLoc,
+ Expr *LHS,
+ SourceLocation EllipsisLoc,
+ Expr *RHS,
+ SourceLocation ColonLoc) {
+ return getSema().ActOnCaseStmt(CaseLoc, LHS, EllipsisLoc, RHS,
+ ColonLoc);
+ }
+
+ /// \brief Attach the body to a new case statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCaseStmtBody(Stmt *S, Stmt *Body) {
+ getSema().ActOnCaseStmtBody(S, Body);
+ return S;
+ }
+
+ /// \brief Build a new default statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildDefaultStmt(SourceLocation DefaultLoc,
+ SourceLocation ColonLoc,
+ Stmt *SubStmt) {
+ return getSema().ActOnDefaultStmt(DefaultLoc, ColonLoc, SubStmt,
+ /*CurScope=*/0);
+ }
+
+ /// \brief Build a new label statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildLabelStmt(SourceLocation IdentLoc, LabelDecl *L,
+ SourceLocation ColonLoc, Stmt *SubStmt) {
+ return SemaRef.ActOnLabelStmt(IdentLoc, L, ColonLoc, SubStmt);
+ }
+
+ /// \brief Build a new "if" statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildIfStmt(SourceLocation IfLoc, Sema::FullExprArg Cond,
+ VarDecl *CondVar, Stmt *Then,
+ SourceLocation ElseLoc, Stmt *Else) {
+ return getSema().ActOnIfStmt(IfLoc, Cond, CondVar, Then, ElseLoc, Else);
+ }
+
+ /// \brief Start building a new switch statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildSwitchStmtStart(SourceLocation SwitchLoc,
+ Expr *Cond, VarDecl *CondVar) {
+ return getSema().ActOnStartOfSwitchStmt(SwitchLoc, Cond,
+ CondVar);
+ }
+
+ /// \brief Attach the body to the switch statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildSwitchStmtBody(SourceLocation SwitchLoc,
+ Stmt *Switch, Stmt *Body) {
+ return getSema().ActOnFinishSwitchStmt(SwitchLoc, Switch, Body);
+ }
+
+ /// \brief Build a new while statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildWhileStmt(SourceLocation WhileLoc, Sema::FullExprArg Cond,
+ VarDecl *CondVar, Stmt *Body) {
+ return getSema().ActOnWhileStmt(WhileLoc, Cond, CondVar, Body);
+ }
+
+ /// \brief Build a new do-while statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildDoStmt(SourceLocation DoLoc, Stmt *Body,
+ SourceLocation WhileLoc, SourceLocation LParenLoc,
+ Expr *Cond, SourceLocation RParenLoc) {
+ return getSema().ActOnDoStmt(DoLoc, Body, WhileLoc, LParenLoc,
+ Cond, RParenLoc);
+ }
+
+ /// \brief Build a new for statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildForStmt(SourceLocation ForLoc, SourceLocation LParenLoc,
+ Stmt *Init, Sema::FullExprArg Cond,
+ VarDecl *CondVar, Sema::FullExprArg Inc,
+ SourceLocation RParenLoc, Stmt *Body) {
+ return getSema().ActOnForStmt(ForLoc, LParenLoc, Init, Cond,
+ CondVar, Inc, RParenLoc, Body);
+ }
+
+ /// \brief Build a new goto statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc,
+ LabelDecl *Label) {
+ return getSema().ActOnGotoStmt(GotoLoc, LabelLoc, Label);
+ }
+
+ /// \brief Build a new indirect goto statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildIndirectGotoStmt(SourceLocation GotoLoc,
+ SourceLocation StarLoc,
+ Expr *Target) {
+ return getSema().ActOnIndirectGotoStmt(GotoLoc, StarLoc, Target);
+ }
+
+ /// \brief Build a new return statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildReturnStmt(SourceLocation ReturnLoc, Expr *Result) {
+ return getSema().ActOnReturnStmt(ReturnLoc, Result);
+ }
+
+ /// \brief Build a new declaration statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildDeclStmt(Decl **Decls, unsigned NumDecls,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ Sema::DeclGroupPtrTy DG = getSema().BuildDeclaratorGroup(Decls, NumDecls);
+ return getSema().ActOnDeclStmt(DG, StartLoc, EndLoc);
+ }
+
+ /// \brief Build a new inline asm statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildAsmStmt(SourceLocation AsmLoc,
+ bool IsSimple,
+ bool IsVolatile,
+ unsigned NumOutputs,
+ unsigned NumInputs,
+ IdentifierInfo **Names,
+ MultiExprArg Constraints,
+ MultiExprArg Exprs,
+ Expr *AsmString,
+ MultiExprArg Clobbers,
+ SourceLocation RParenLoc,
+ bool MSAsm) {
+ return getSema().ActOnAsmStmt(AsmLoc, IsSimple, IsVolatile, NumOutputs,
+ NumInputs, Names, move(Constraints),
+ Exprs, AsmString, Clobbers,
+ RParenLoc, MSAsm);
+ }
+
+ /// \brief Build a new Objective-C @try statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtTryStmt(SourceLocation AtLoc,
+ Stmt *TryBody,
+ MultiStmtArg CatchStmts,
+ Stmt *Finally) {
+ return getSema().ActOnObjCAtTryStmt(AtLoc, TryBody, move(CatchStmts),
+ Finally);
+ }
+
+ /// \brief Rebuild an Objective-C exception declaration.
+ ///
+ /// By default, performs semantic analysis to build the new declaration.
+ /// Subclasses may override this routine to provide different behavior.
+ VarDecl *RebuildObjCExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *TInfo, QualType T) {
+ return getSema().BuildObjCExceptionDecl(TInfo, T,
+ ExceptionDecl->getInnerLocStart(),
+ ExceptionDecl->getLocation(),
+ ExceptionDecl->getIdentifier());
+ }
+
+ /// \brief Build a new Objective-C @catch statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtCatchStmt(SourceLocation AtLoc,
+ SourceLocation RParenLoc,
+ VarDecl *Var,
+ Stmt *Body) {
+ return getSema().ActOnObjCAtCatchStmt(AtLoc, RParenLoc,
+ Var, Body);
+ }
+
+ /// \brief Build a new Objective-C @finally statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtFinallyStmt(SourceLocation AtLoc,
+ Stmt *Body) {
+ return getSema().ActOnObjCAtFinallyStmt(AtLoc, Body);
+ }
+
+ /// \brief Build a new Objective-C @throw statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtThrowStmt(SourceLocation AtLoc,
+ Expr *Operand) {
+ return getSema().BuildObjCAtThrowStmt(AtLoc, Operand);
+ }
+
+ /// \brief Rebuild the operand to an Objective-C @synchronized statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCAtSynchronizedOperand(SourceLocation atLoc,
+ Expr *object) {
+ return getSema().ActOnObjCAtSynchronizedOperand(atLoc, object);
+ }
+
+ /// \brief Build a new Objective-C @synchronized statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAtSynchronizedStmt(SourceLocation AtLoc,
+ Expr *Object, Stmt *Body) {
+ return getSema().ActOnObjCAtSynchronizedStmt(AtLoc, Object, Body);
+ }
+
+ /// \brief Build a new Objective-C @autoreleasepool statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCAutoreleasePoolStmt(SourceLocation AtLoc,
+ Stmt *Body) {
+ return getSema().ActOnObjCAutoreleasePoolStmt(AtLoc, Body);
+ }
+
+ /// \brief Build the collection operand to a new Objective-C fast
+ /// enumeration statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCForCollectionOperand(SourceLocation forLoc,
+ Expr *collection) {
+ return getSema().ActOnObjCForCollectionOperand(forLoc, collection);
+ }
+
+ /// \brief Build a new Objective-C fast enumeration statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildObjCForCollectionStmt(SourceLocation ForLoc,
+ SourceLocation LParenLoc,
+ Stmt *Element,
+ Expr *Collection,
+ SourceLocation RParenLoc,
+ Stmt *Body) {
+ return getSema().ActOnObjCForCollectionStmt(ForLoc, LParenLoc,
+ Element,
+ Collection,
+ RParenLoc,
+ Body);
+ }
+
+ /// \brief Build a new C++ exception declaration.
+ ///
+ /// By default, performs semantic analysis to build the new decaration.
+ /// Subclasses may override this routine to provide different behavior.
+ VarDecl *RebuildExceptionDecl(VarDecl *ExceptionDecl,
+ TypeSourceInfo *Declarator,
+ SourceLocation StartLoc,
+ SourceLocation IdLoc,
+ IdentifierInfo *Id) {
+ VarDecl *Var = getSema().BuildExceptionDeclaration(0, Declarator,
+ StartLoc, IdLoc, Id);
+ if (Var)
+ getSema().CurContext->addDecl(Var);
+ return Var;
+ }
+
+ /// \brief Build a new C++ catch statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCXXCatchStmt(SourceLocation CatchLoc,
+ VarDecl *ExceptionDecl,
+ Stmt *Handler) {
+ return Owned(new (getSema().Context) CXXCatchStmt(CatchLoc, ExceptionDecl,
+ Handler));
+ }
+
+ /// \brief Build a new C++ try statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCXXTryStmt(SourceLocation TryLoc,
+ Stmt *TryBlock,
+ MultiStmtArg Handlers) {
+ return getSema().ActOnCXXTryBlock(TryLoc, TryBlock, move(Handlers));
+ }
+
+ /// \brief Build a new C++0x range-based for statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildCXXForRangeStmt(SourceLocation ForLoc,
+ SourceLocation ColonLoc,
+ Stmt *Range, Stmt *BeginEnd,
+ Expr *Cond, Expr *Inc,
+ Stmt *LoopVar,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXForRangeStmt(ForLoc, ColonLoc, Range, BeginEnd,
+ Cond, Inc, LoopVar, RParenLoc);
+ }
+
+ /// \brief Build a new C++0x range-based for statement.
+ ///
+ /// By default, performs semantic analysis to build the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult RebuildMSDependentExistsStmt(SourceLocation KeywordLoc,
+ bool IsIfExists,
+ NestedNameSpecifierLoc QualifierLoc,
+ DeclarationNameInfo NameInfo,
+ Stmt *Nested) {
+ return getSema().BuildMSDependentExistsStmt(KeywordLoc, IsIfExists,
+ QualifierLoc, NameInfo, Nested);
+ }
+
+ /// \brief Attach body to a C++0x range-based for statement.
+ ///
+ /// By default, performs semantic analysis to finish the new statement.
+ /// Subclasses may override this routine to provide different behavior.
+ StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body) {
+ return getSema().FinishCXXForRangeStmt(ForRange, Body);
+ }
+
+ StmtResult RebuildSEHTryStmt(bool IsCXXTry,
+ SourceLocation TryLoc,
+ Stmt *TryBlock,
+ Stmt *Handler) {
+ return getSema().ActOnSEHTryBlock(IsCXXTry,TryLoc,TryBlock,Handler);
+ }
+
+ StmtResult RebuildSEHExceptStmt(SourceLocation Loc,
+ Expr *FilterExpr,
+ Stmt *Block) {
+ return getSema().ActOnSEHExceptBlock(Loc,FilterExpr,Block);
+ }
+
+ StmtResult RebuildSEHFinallyStmt(SourceLocation Loc,
+ Stmt *Block) {
+ return getSema().ActOnSEHFinallyBlock(Loc,Block);
+ }
+
+ /// \brief Build a new expression that references a declaration.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildDeclarationNameExpr(const CXXScopeSpec &SS,
+ LookupResult &R,
+ bool RequiresADL) {
+ return getSema().BuildDeclarationNameExpr(SS, R, RequiresADL);
+ }
+
+
+ /// \brief Build a new expression that references a declaration.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildDeclRefExpr(NestedNameSpecifierLoc QualifierLoc,
+ ValueDecl *VD,
+ const DeclarationNameInfo &NameInfo,
+ TemplateArgumentListInfo *TemplateArgs) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ // FIXME: loses template args.
+
+ return getSema().BuildDeclarationNameExpr(SS, NameInfo, VD);
+ }
+
+ /// \brief Build a new expression in parentheses.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildParenExpr(Expr *SubExpr, SourceLocation LParen,
+ SourceLocation RParen) {
+ return getSema().ActOnParenExpr(LParen, RParen, SubExpr);
+ }
+
+ /// \brief Build a new pseudo-destructor expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXPseudoDestructorExpr(Expr *Base,
+ SourceLocation OperatorLoc,
+ bool isArrow,
+ CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeType,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage Destroyed);
+
+ /// \brief Build a new unary operator expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnaryOperator(SourceLocation OpLoc,
+ UnaryOperatorKind Opc,
+ Expr *SubExpr) {
+ return getSema().BuildUnaryOp(/*Scope=*/0, OpLoc, Opc, SubExpr);
+ }
+
+ /// \brief Build a new builtin offsetof expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildOffsetOfExpr(SourceLocation OperatorLoc,
+ TypeSourceInfo *Type,
+ Sema::OffsetOfComponent *Components,
+ unsigned NumComponents,
+ SourceLocation RParenLoc) {
+ return getSema().BuildBuiltinOffsetOf(OperatorLoc, Type, Components,
+ NumComponents, RParenLoc);
+ }
+
+ /// \brief Build a new sizeof, alignof or vec_step expression with a
+ /// type argument.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnaryExprOrTypeTrait(TypeSourceInfo *TInfo,
+ SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R) {
+ return getSema().CreateUnaryExprOrTypeTraitExpr(TInfo, OpLoc, ExprKind, R);
+ }
+
+ /// \brief Build a new sizeof, alignof or vec step expression with an
+ /// expression argument.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnaryExprOrTypeTrait(Expr *SubExpr, SourceLocation OpLoc,
+ UnaryExprOrTypeTrait ExprKind,
+ SourceRange R) {
+ ExprResult Result
+ = getSema().CreateUnaryExprOrTypeTraitExpr(SubExpr, OpLoc, ExprKind);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return move(Result);
+ }
+
+ /// \brief Build a new array subscript expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildArraySubscriptExpr(Expr *LHS,
+ SourceLocation LBracketLoc,
+ Expr *RHS,
+ SourceLocation RBracketLoc) {
+ return getSema().ActOnArraySubscriptExpr(/*Scope=*/0, LHS,
+ LBracketLoc, RHS,
+ RBracketLoc);
+ }
+
+ /// \brief Build a new call expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCallExpr(Expr *Callee, SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc,
+ Expr *ExecConfig = 0) {
+ return getSema().ActOnCallExpr(/*Scope=*/0, Callee, LParenLoc,
+ move(Args), RParenLoc, ExecConfig);
+ }
+
+ /// \brief Build a new member access expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildMemberExpr(Expr *Base, SourceLocation OpLoc,
+ bool isArrow,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &MemberNameInfo,
+ ValueDecl *Member,
+ NamedDecl *FoundDecl,
+ const TemplateArgumentListInfo *ExplicitTemplateArgs,
+ NamedDecl *FirstQualifierInScope) {
+ ExprResult BaseResult = getSema().PerformMemberExprBaseConversion(Base,
+ isArrow);
+ if (!Member->getDeclName()) {
+ // We have a reference to an unnamed field. This is always the
+ // base of an anonymous struct/union member access, i.e. the
+ // field is always of record type.
+ assert(!QualifierLoc && "Can't have an unnamed field with a qualifier!");
+ assert(Member->getType()->isRecordType() &&
+ "unnamed member not of record type?");
+
+ BaseResult =
+ getSema().PerformObjectMemberConversion(BaseResult.take(),
+ QualifierLoc.getNestedNameSpecifier(),
+ FoundDecl, Member);
+ if (BaseResult.isInvalid())
+ return ExprError();
+ Base = BaseResult.take();
+ ExprValueKind VK = isArrow ? VK_LValue : Base->getValueKind();
+ MemberExpr *ME =
+ new (getSema().Context) MemberExpr(Base, isArrow,
+ Member, MemberNameInfo,
+ cast<FieldDecl>(Member)->getType(),
+ VK, OK_Ordinary);
+ return getSema().Owned(ME);
+ }
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ Base = BaseResult.take();
+ QualType BaseType = Base->getType();
+
+ // FIXME: this involves duplicating earlier analysis in a lot of
+ // cases; we should avoid this when possible.
+ LookupResult R(getSema(), MemberNameInfo, Sema::LookupMemberName);
+ R.addDecl(FoundDecl);
+ R.resolveKind();
+
+ return getSema().BuildMemberReferenceExpr(Base, BaseType, OpLoc, isArrow,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
+ R, ExplicitTemplateArgs);
+ }
+
+ /// \brief Build a new binary operator expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildBinaryOperator(SourceLocation OpLoc,
+ BinaryOperatorKind Opc,
+ Expr *LHS, Expr *RHS) {
+ return getSema().BuildBinOp(/*Scope=*/0, OpLoc, Opc, LHS, RHS);
+ }
+
+ /// \brief Build a new conditional operator expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildConditionalOperator(Expr *Cond,
+ SourceLocation QuestionLoc,
+ Expr *LHS,
+ SourceLocation ColonLoc,
+ Expr *RHS) {
+ return getSema().ActOnConditionalOp(QuestionLoc, ColonLoc, Cond,
+ LHS, RHS);
+ }
+
+ /// \brief Build a new C-style cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCStyleCastExpr(SourceLocation LParenLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc,
+ Expr *SubExpr) {
+ return getSema().BuildCStyleCastExpr(LParenLoc, TInfo, RParenLoc,
+ SubExpr);
+ }
+
+ /// \brief Build a new compound literal expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCompoundLiteralExpr(SourceLocation LParenLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc,
+ Expr *Init) {
+ return getSema().BuildCompoundLiteralExpr(LParenLoc, TInfo, RParenLoc,
+ Init);
+ }
+
+ /// \brief Build a new extended vector element access expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildExtVectorElementExpr(Expr *Base,
+ SourceLocation OpLoc,
+ SourceLocation AccessorLoc,
+ IdentifierInfo &Accessor) {
+
+ CXXScopeSpec SS;
+ DeclarationNameInfo NameInfo(&Accessor, AccessorLoc);
+ return getSema().BuildMemberReferenceExpr(Base, Base->getType(),
+ OpLoc, /*IsArrow*/ false,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope*/ 0,
+ NameInfo,
+ /* TemplateArgs */ 0);
+ }
+
+ /// \brief Build a new initializer list expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildInitList(SourceLocation LBraceLoc,
+ MultiExprArg Inits,
+ SourceLocation RBraceLoc,
+ QualType ResultTy) {
+ ExprResult Result
+ = SemaRef.ActOnInitList(LBraceLoc, move(Inits), RBraceLoc);
+ if (Result.isInvalid() || ResultTy->isDependentType())
+ return move(Result);
+
+ // Patch in the result type we were given, which may have been computed
+ // when the initial InitListExpr was built.
+ InitListExpr *ILE = cast<InitListExpr>((Expr *)Result.get());
+ ILE->setType(ResultTy);
+ return move(Result);
+ }
+
+ /// \brief Build a new designated initializer expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildDesignatedInitExpr(Designation &Desig,
+ MultiExprArg ArrayExprs,
+ SourceLocation EqualOrColonLoc,
+ bool GNUSyntax,
+ Expr *Init) {
+ ExprResult Result
+ = SemaRef.ActOnDesignatedInitializer(Desig, EqualOrColonLoc, GNUSyntax,
+ Init);
+ if (Result.isInvalid())
+ return ExprError();
+
+ ArrayExprs.release();
+ return move(Result);
+ }
+
+ /// \brief Build a new value-initialized expression.
+ ///
+ /// By default, builds the implicit value initialization without performing
+ /// any semantic analysis. Subclasses may override this routine to provide
+ /// different behavior.
+ ExprResult RebuildImplicitValueInitExpr(QualType T) {
+ return SemaRef.Owned(new (SemaRef.Context) ImplicitValueInitExpr(T));
+ }
+
+ /// \brief Build a new \c va_arg expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildVAArgExpr(SourceLocation BuiltinLoc,
+ Expr *SubExpr, TypeSourceInfo *TInfo,
+ SourceLocation RParenLoc) {
+ return getSema().BuildVAArgExpr(BuiltinLoc,
+ SubExpr, TInfo,
+ RParenLoc);
+ }
+
+ /// \brief Build a new expression list in parentheses.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildParenListExpr(SourceLocation LParenLoc,
+ MultiExprArg SubExprs,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnParenListExpr(LParenLoc, RParenLoc, move(SubExprs));
+ }
+
+ /// \brief Build a new address-of-label expression.
+ ///
+ /// By default, performs semantic analysis, using the name of the label
+ /// rather than attempting to map the label statement itself.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildAddrLabelExpr(SourceLocation AmpAmpLoc,
+ SourceLocation LabelLoc, LabelDecl *Label) {
+ return getSema().ActOnAddrLabel(AmpAmpLoc, LabelLoc, Label);
+ }
+
+ /// \brief Build a new GNU statement expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildStmtExpr(SourceLocation LParenLoc,
+ Stmt *SubStmt,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnStmtExpr(LParenLoc, SubStmt, RParenLoc);
+ }
+
+ /// \brief Build a new __builtin_choose_expr expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildChooseExpr(SourceLocation BuiltinLoc,
+ Expr *Cond, Expr *LHS, Expr *RHS,
+ SourceLocation RParenLoc) {
+ return SemaRef.ActOnChooseExpr(BuiltinLoc,
+ Cond, LHS, RHS,
+ RParenLoc);
+ }
+
+ /// \brief Build a new generic selection expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildGenericSelectionExpr(SourceLocation KeyLoc,
+ SourceLocation DefaultLoc,
+ SourceLocation RParenLoc,
+ Expr *ControllingExpr,
+ TypeSourceInfo **Types,
+ Expr **Exprs,
+ unsigned NumAssocs) {
+ return getSema().CreateGenericSelectionExpr(KeyLoc, DefaultLoc, RParenLoc,
+ ControllingExpr, Types, Exprs,
+ NumAssocs);
+ }
+
+ /// \brief Build a new overloaded operator call expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// The semantic analysis provides the behavior of template instantiation,
+ /// copying with transformations that turn what looks like an overloaded
+ /// operator call into a use of a builtin operator, performing
+ /// argument-dependent lookup, etc. Subclasses may override this routine to
+ /// provide different behavior.
+ ExprResult RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr *Callee,
+ Expr *First,
+ Expr *Second);
+
+ /// \brief Build a new C++ "named" cast expression, such as static_cast or
+ /// reinterpret_cast.
+ ///
+ /// By default, this routine dispatches to one of the more-specific routines
+ /// for a particular named case, e.g., RebuildCXXStaticCastExpr().
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXNamedCastExpr(SourceLocation OpLoc,
+ Stmt::StmtClass Class,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ switch (Class) {
+ case Stmt::CXXStaticCastExprClass:
+ return getDerived().RebuildCXXStaticCastExpr(OpLoc, LAngleLoc, TInfo,
+ RAngleLoc, LParenLoc,
+ SubExpr, RParenLoc);
+
+ case Stmt::CXXDynamicCastExprClass:
+ return getDerived().RebuildCXXDynamicCastExpr(OpLoc, LAngleLoc, TInfo,
+ RAngleLoc, LParenLoc,
+ SubExpr, RParenLoc);
+
+ case Stmt::CXXReinterpretCastExprClass:
+ return getDerived().RebuildCXXReinterpretCastExpr(OpLoc, LAngleLoc, TInfo,
+ RAngleLoc, LParenLoc,
+ SubExpr,
+ RParenLoc);
+
+ case Stmt::CXXConstCastExprClass:
+ return getDerived().RebuildCXXConstCastExpr(OpLoc, LAngleLoc, TInfo,
+ RAngleLoc, LParenLoc,
+ SubExpr, RParenLoc);
+
+ default:
+ llvm_unreachable("Invalid C++ named cast");
+ }
+ }
+
+ /// \brief Build a new C++ static_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXStaticCastExpr(SourceLocation OpLoc,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(OpLoc, tok::kw_static_cast,
+ TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc),
+ SourceRange(LParenLoc, RParenLoc));
+ }
+
+ /// \brief Build a new C++ dynamic_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXDynamicCastExpr(SourceLocation OpLoc,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(OpLoc, tok::kw_dynamic_cast,
+ TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc),
+ SourceRange(LParenLoc, RParenLoc));
+ }
+
+ /// \brief Build a new C++ reinterpret_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXReinterpretCastExpr(SourceLocation OpLoc,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(OpLoc, tok::kw_reinterpret_cast,
+ TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc),
+ SourceRange(LParenLoc, RParenLoc));
+ }
+
+ /// \brief Build a new C++ const_cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXConstCastExpr(SourceLocation OpLoc,
+ SourceLocation LAngleLoc,
+ TypeSourceInfo *TInfo,
+ SourceLocation RAngleLoc,
+ SourceLocation LParenLoc,
+ Expr *SubExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXNamedCast(OpLoc, tok::kw_const_cast,
+ TInfo, SubExpr,
+ SourceRange(LAngleLoc, RAngleLoc),
+ SourceRange(LParenLoc, RParenLoc));
+ }
+
+ /// \brief Build a new C++ functional-style cast expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo,
+ SourceLocation LParenLoc,
+ Expr *Sub,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TInfo, LParenLoc,
+ MultiExprArg(&Sub, 1),
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ typeid(type) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXTypeidExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeId(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+
+ /// \brief Build a new C++ typeid(expr) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXTypeidExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeId(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ __uuidof(type) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ TypeSourceInfo *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ __uuidof(expr) expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXUuidofExpr(QualType TypeInfoType,
+ SourceLocation TypeidLoc,
+ Expr *Operand,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXUuidof(TypeInfoType, TypeidLoc, Operand,
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ "this" expression.
+ ///
+ /// By default, builds a new "this" expression without performing any
+ /// semantic analysis. Subclasses may override this routine to provide
+ /// different behavior.
+ ExprResult RebuildCXXThisExpr(SourceLocation ThisLoc,
+ QualType ThisType,
+ bool isImplicit) {
+ getSema().CheckCXXThisCapture(ThisLoc);
+ return getSema().Owned(
+ new (getSema().Context) CXXThisExpr(ThisLoc, ThisType,
+ isImplicit));
+ }
+
+ /// \brief Build a new C++ throw expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXThrowExpr(SourceLocation ThrowLoc, Expr *Sub,
+ bool IsThrownVariableInScope) {
+ return getSema().BuildCXXThrow(ThrowLoc, Sub, IsThrownVariableInScope);
+ }
+
+ /// \brief Build a new C++ default-argument expression.
+ ///
+ /// By default, builds a new default-argument expression, which does not
+ /// require any semantic analysis. Subclasses may override this routine to
+ /// provide different behavior.
+ ExprResult RebuildCXXDefaultArgExpr(SourceLocation Loc,
+ ParmVarDecl *Param) {
+ return getSema().Owned(CXXDefaultArgExpr::Create(getSema().Context, Loc,
+ Param));
+ }
+
+ /// \brief Build a new C++ zero-initialization expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXScalarValueInitExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo, LParenLoc,
+ MultiExprArg(getSema(), 0, 0),
+ RParenLoc);
+ }
+
+ /// \brief Build a new C++ "new" expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXNewExpr(SourceLocation StartLoc,
+ bool UseGlobal,
+ SourceLocation PlacementLParen,
+ MultiExprArg PlacementArgs,
+ SourceLocation PlacementRParen,
+ SourceRange TypeIdParens,
+ QualType AllocatedType,
+ TypeSourceInfo *AllocatedTypeInfo,
+ Expr *ArraySize,
+ SourceRange DirectInitRange,
+ Expr *Initializer) {
+ return getSema().BuildCXXNew(StartLoc, UseGlobal,
+ PlacementLParen,
+ move(PlacementArgs),
+ PlacementRParen,
+ TypeIdParens,
+ AllocatedType,
+ AllocatedTypeInfo,
+ ArraySize,
+ DirectInitRange,
+ Initializer);
+ }
+
+ /// \brief Build a new C++ "delete" expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXDeleteExpr(SourceLocation StartLoc,
+ bool IsGlobalDelete,
+ bool IsArrayForm,
+ Expr *Operand) {
+ return getSema().ActOnCXXDelete(StartLoc, IsGlobalDelete, IsArrayForm,
+ Operand);
+ }
+
+ /// \brief Build a new unary type trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnaryTypeTrait(UnaryTypeTrait Trait,
+ SourceLocation StartLoc,
+ TypeSourceInfo *T,
+ SourceLocation RParenLoc) {
+ return getSema().BuildUnaryTypeTrait(Trait, StartLoc, T, RParenLoc);
+ }
+
+ /// \brief Build a new binary type trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildBinaryTypeTrait(BinaryTypeTrait Trait,
+ SourceLocation StartLoc,
+ TypeSourceInfo *LhsT,
+ TypeSourceInfo *RhsT,
+ SourceLocation RParenLoc) {
+ return getSema().BuildBinaryTypeTrait(Trait, StartLoc, LhsT, RhsT, RParenLoc);
+ }
+
+ /// \brief Build a new type trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildTypeTrait(TypeTrait Trait,
+ SourceLocation StartLoc,
+ ArrayRef<TypeSourceInfo *> Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildTypeTrait(Trait, StartLoc, Args, RParenLoc);
+ }
+
+ /// \brief Build a new array type trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildArrayTypeTrait(ArrayTypeTrait Trait,
+ SourceLocation StartLoc,
+ TypeSourceInfo *TSInfo,
+ Expr *DimExpr,
+ SourceLocation RParenLoc) {
+ return getSema().BuildArrayTypeTrait(Trait, StartLoc, TSInfo, DimExpr, RParenLoc);
+ }
+
+ /// \brief Build a new expression trait expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildExpressionTrait(ExpressionTrait Trait,
+ SourceLocation StartLoc,
+ Expr *Queried,
+ SourceLocation RParenLoc) {
+ return getSema().BuildExpressionTrait(Trait, StartLoc, Queried, RParenLoc);
+ }
+
+ /// \brief Build a new (previously unresolved) declaration reference
+ /// expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildDependentScopeDeclRefExpr(
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ if (TemplateArgs || TemplateKWLoc.isValid())
+ return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc,
+ NameInfo, TemplateArgs);
+
+ return getSema().BuildQualifiedDeclarationNameExpr(SS, NameInfo);
+ }
+
+ /// \brief Build a new template-id expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildTemplateIdExpr(const CXXScopeSpec &SS,
+ SourceLocation TemplateKWLoc,
+ LookupResult &R,
+ bool RequiresADL,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ return getSema().BuildTemplateIdExpr(SS, TemplateKWLoc, R, RequiresADL,
+ TemplateArgs);
+ }
+
+ /// \brief Build a new object-construction expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXConstructExpr(QualType T,
+ SourceLocation Loc,
+ CXXConstructorDecl *Constructor,
+ bool IsElidable,
+ MultiExprArg Args,
+ bool HadMultipleCandidates,
+ bool RequiresZeroInit,
+ CXXConstructExpr::ConstructionKind ConstructKind,
+ SourceRange ParenRange) {
+ ASTOwningVector<Expr*> ConvertedArgs(SemaRef);
+ if (getSema().CompleteConstructorCall(Constructor, move(Args), Loc,
+ ConvertedArgs))
+ return ExprError();
+
+ return getSema().BuildCXXConstructExpr(Loc, T, Constructor, IsElidable,
+ move_arg(ConvertedArgs),
+ HadMultipleCandidates,
+ RequiresZeroInit, ConstructKind,
+ ParenRange);
+ }
+
+ /// \brief Build a new object-construction expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXTemporaryObjectExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo,
+ LParenLoc,
+ move(Args),
+ RParenLoc);
+ }
+
+ /// \brief Build a new object-construction expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXUnresolvedConstructExpr(TypeSourceInfo *TSInfo,
+ SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ return getSema().BuildCXXTypeConstructExpr(TSInfo,
+ LParenLoc,
+ move(Args),
+ RParenLoc);
+ }
+
+ /// \brief Build a new member reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXDependentScopeMemberExpr(Expr *BaseE,
+ QualType BaseType,
+ bool IsArrow,
+ SourceLocation OperatorLoc,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ const DeclarationNameInfo &MemberNameInfo,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ return SemaRef.BuildMemberReferenceExpr(BaseE, BaseType,
+ OperatorLoc, IsArrow,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
+ MemberNameInfo,
+ TemplateArgs);
+ }
+
+ /// \brief Build a new member reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildUnresolvedMemberExpr(Expr *BaseE, QualType BaseType,
+ SourceLocation OperatorLoc,
+ bool IsArrow,
+ NestedNameSpecifierLoc QualifierLoc,
+ SourceLocation TemplateKWLoc,
+ NamedDecl *FirstQualifierInScope,
+ LookupResult &R,
+ const TemplateArgumentListInfo *TemplateArgs) {
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ return SemaRef.BuildMemberReferenceExpr(BaseE, BaseType,
+ OperatorLoc, IsArrow,
+ SS, TemplateKWLoc,
+ FirstQualifierInScope,
+ R, TemplateArgs);
+ }
+
+ /// \brief Build a new noexcept expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildCXXNoexceptExpr(SourceRange Range, Expr *Arg) {
+ return SemaRef.BuildCXXNoexceptExpr(Range.getBegin(), Arg, Range.getEnd());
+ }
+
+ /// \brief Build a new expression to compute the length of a parameter pack.
+ ExprResult RebuildSizeOfPackExpr(SourceLocation OperatorLoc, NamedDecl *Pack,
+ SourceLocation PackLoc,
+ SourceLocation RParenLoc,
+ llvm::Optional<unsigned> Length) {
+ if (Length)
+ return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
+ OperatorLoc, Pack, PackLoc,
+ RParenLoc, *Length);
+
+ return new (SemaRef.Context) SizeOfPackExpr(SemaRef.Context.getSizeType(),
+ OperatorLoc, Pack, PackLoc,
+ RParenLoc);
+ }
+
+ /// \brief Build a new Objective-C array literal.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCArrayLiteral(SourceRange Range,
+ Expr **Elements, unsigned NumElements) {
+ return getSema().BuildObjCArrayLiteral(Range,
+ MultiExprArg(Elements, NumElements));
+ }
+
+ ExprResult RebuildObjCSubscriptRefExpr(SourceLocation RB,
+ Expr *Base, Expr *Key,
+ ObjCMethodDecl *getterMethod,
+ ObjCMethodDecl *setterMethod) {
+ return getSema().BuildObjCSubscriptExpression(RB, Base, Key,
+ getterMethod, setterMethod);
+ }
+
+ /// \brief Build a new Objective-C dictionary literal.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCDictionaryLiteral(SourceRange Range,
+ ObjCDictionaryElement *Elements,
+ unsigned NumElements) {
+ return getSema().BuildObjCDictionaryLiteral(Range, Elements, NumElements);
+ }
+
+ /// \brief Build a new Objective-C @encode expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCEncodeExpr(SourceLocation AtLoc,
+ TypeSourceInfo *EncodeTypeInfo,
+ SourceLocation RParenLoc) {
+ return SemaRef.Owned(SemaRef.BuildObjCEncodeExpression(AtLoc, EncodeTypeInfo,
+ RParenLoc));
+ }
+
+ /// \brief Build a new Objective-C class message.
+ ExprResult RebuildObjCMessageExpr(TypeSourceInfo *ReceiverTypeInfo,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelectorLocs,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ MultiExprArg Args,
+ SourceLocation RBracLoc) {
+ return SemaRef.BuildClassMessage(ReceiverTypeInfo,
+ ReceiverTypeInfo->getType(),
+ /*SuperLoc=*/SourceLocation(),
+ Sel, Method, LBracLoc, SelectorLocs,
+ RBracLoc, move(Args));
+ }
+
+ /// \brief Build a new Objective-C instance message.
+ ExprResult RebuildObjCMessageExpr(Expr *Receiver,
+ Selector Sel,
+ ArrayRef<SourceLocation> SelectorLocs,
+ ObjCMethodDecl *Method,
+ SourceLocation LBracLoc,
+ MultiExprArg Args,
+ SourceLocation RBracLoc) {
+ return SemaRef.BuildInstanceMessage(Receiver,
+ Receiver->getType(),
+ /*SuperLoc=*/SourceLocation(),
+ Sel, Method, LBracLoc, SelectorLocs,
+ RBracLoc, move(Args));
+ }
+
+ /// \brief Build a new Objective-C ivar reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCIvarRefExpr(Expr *BaseArg, ObjCIvarDecl *Ivar,
+ SourceLocation IvarLoc,
+ bool IsArrow, bool IsFreeIvar) {
+ // FIXME: We lose track of the IsFreeIvar bit.
+ CXXScopeSpec SS;
+ ExprResult Base = getSema().Owned(BaseArg);
+ LookupResult R(getSema(), Ivar->getDeclName(), IvarLoc,
+ Sema::LookupMemberName);
+ ExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow,
+ /*FIME:*/IvarLoc,
+ SS, 0,
+ false);
+ if (Result.isInvalid() || Base.isInvalid())
+ return ExprError();
+
+ if (Result.get())
+ return move(Result);
+
+ return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
+ /*FIXME:*/IvarLoc, IsArrow,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ R,
+ /*TemplateArgs=*/0);
+ }
+
+ /// \brief Build a new Objective-C property reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCPropertyRefExpr(Expr *BaseArg,
+ ObjCPropertyDecl *Property,
+ SourceLocation PropertyLoc) {
+ CXXScopeSpec SS;
+ ExprResult Base = getSema().Owned(BaseArg);
+ LookupResult R(getSema(), Property->getDeclName(), PropertyLoc,
+ Sema::LookupMemberName);
+ bool IsArrow = false;
+ ExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow,
+ /*FIME:*/PropertyLoc,
+ SS, 0, false);
+ if (Result.isInvalid() || Base.isInvalid())
+ return ExprError();
+
+ if (Result.get())
+ return move(Result);
+
+ return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
+ /*FIXME:*/PropertyLoc, IsArrow,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ R,
+ /*TemplateArgs=*/0);
+ }
+
+ /// \brief Build a new Objective-C property reference expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCPropertyRefExpr(Expr *Base, QualType T,
+ ObjCMethodDecl *Getter,
+ ObjCMethodDecl *Setter,
+ SourceLocation PropertyLoc) {
+ // Since these expressions can only be value-dependent, we do not
+ // need to perform semantic analysis again.
+ return Owned(
+ new (getSema().Context) ObjCPropertyRefExpr(Getter, Setter, T,
+ VK_LValue, OK_ObjCProperty,
+ PropertyLoc, Base));
+ }
+
+ /// \brief Build a new Objective-C "isa" expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildObjCIsaExpr(Expr *BaseArg, SourceLocation IsaLoc,
+ bool IsArrow) {
+ CXXScopeSpec SS;
+ ExprResult Base = getSema().Owned(BaseArg);
+ LookupResult R(getSema(), &getSema().Context.Idents.get("isa"), IsaLoc,
+ Sema::LookupMemberName);
+ ExprResult Result = getSema().LookupMemberExpr(R, Base, IsArrow,
+ /*FIME:*/IsaLoc,
+ SS, 0, false);
+ if (Result.isInvalid() || Base.isInvalid())
+ return ExprError();
+
+ if (Result.get())
+ return move(Result);
+
+ return getSema().BuildMemberReferenceExpr(Base.get(), Base.get()->getType(),
+ /*FIXME:*/IsaLoc, IsArrow,
+ SS, SourceLocation(),
+ /*FirstQualifierInScope=*/0,
+ R,
+ /*TemplateArgs=*/0);
+ }
+
+ /// \brief Build a new shuffle vector expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildShuffleVectorExpr(SourceLocation BuiltinLoc,
+ MultiExprArg SubExprs,
+ SourceLocation RParenLoc) {
+ // Find the declaration for __builtin_shufflevector
+ const IdentifierInfo &Name
+ = SemaRef.Context.Idents.get("__builtin_shufflevector");
+ TranslationUnitDecl *TUDecl = SemaRef.Context.getTranslationUnitDecl();
+ DeclContext::lookup_result Lookup = TUDecl->lookup(DeclarationName(&Name));
+ assert(Lookup.first != Lookup.second && "No __builtin_shufflevector?");
+
+ // Build a reference to the __builtin_shufflevector builtin
+ FunctionDecl *Builtin = cast<FunctionDecl>(*Lookup.first);
+ ExprResult Callee
+ = SemaRef.Owned(new (SemaRef.Context) DeclRefExpr(Builtin, false,
+ Builtin->getType(),
+ VK_LValue, BuiltinLoc));
+ Callee = SemaRef.UsualUnaryConversions(Callee.take());
+ if (Callee.isInvalid())
+ return ExprError();
+
+ // Build the CallExpr
+ unsigned NumSubExprs = SubExprs.size();
+ Expr **Subs = (Expr **)SubExprs.release();
+ ExprResult TheCall = SemaRef.Owned(
+ new (SemaRef.Context) CallExpr(SemaRef.Context, Callee.take(),
+ Subs, NumSubExprs,
+ Builtin->getCallResultType(),
+ Expr::getValueKindForType(Builtin->getResultType()),
+ RParenLoc));
+
+ // Type-check the __builtin_shufflevector expression.
+ return SemaRef.SemaBuiltinShuffleVector(cast<CallExpr>(TheCall.take()));
+ }
+
+ /// \brief Build a new template argument pack expansion.
+ ///
+ /// By default, performs semantic analysis to build a new pack expansion
+ /// for a template argument. Subclasses may override this routine to provide
+ /// different behavior.
+ TemplateArgumentLoc RebuildPackExpansion(TemplateArgumentLoc Pattern,
+ SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ switch (Pattern.getArgument().getKind()) {
+ case TemplateArgument::Expression: {
+ ExprResult Result
+ = getSema().CheckPackExpansion(Pattern.getSourceExpression(),
+ EllipsisLoc, NumExpansions);
+ if (Result.isInvalid())
+ return TemplateArgumentLoc();
+
+ return TemplateArgumentLoc(Result.get(), Result.get());
+ }
+
+ case TemplateArgument::Template:
+ return TemplateArgumentLoc(TemplateArgument(
+ Pattern.getArgument().getAsTemplate(),
+ NumExpansions),
+ Pattern.getTemplateQualifierLoc(),
+ Pattern.getTemplateNameLoc(),
+ EllipsisLoc);
+
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Pack:
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("Pack expansion pattern has no parameter packs");
+
+ case TemplateArgument::Type:
+ if (TypeSourceInfo *Expansion
+ = getSema().CheckPackExpansion(Pattern.getTypeSourceInfo(),
+ EllipsisLoc,
+ NumExpansions))
+ return TemplateArgumentLoc(TemplateArgument(Expansion->getType()),
+ Expansion);
+ break;
+ }
+
+ return TemplateArgumentLoc();
+ }
+
+ /// \brief Build a new expression pack expansion.
+ ///
+ /// By default, performs semantic analysis to build a new pack expansion
+ /// for an expression. Subclasses may override this routine to provide
+ /// different behavior.
+ ExprResult RebuildPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
+ llvm::Optional<unsigned> NumExpansions) {
+ return getSema().CheckPackExpansion(Pattern, EllipsisLoc, NumExpansions);
+ }
+
+ /// \brief Build a new atomic operation expression.
+ ///
+ /// By default, performs semantic analysis to build the new expression.
+ /// Subclasses may override this routine to provide different behavior.
+ ExprResult RebuildAtomicExpr(SourceLocation BuiltinLoc,
+ MultiExprArg SubExprs,
+ QualType RetTy,
+ AtomicExpr::AtomicOp Op,
+ SourceLocation RParenLoc) {
+ // Just create the expression; there is not any interesting semantic
+ // analysis here because we can't actually build an AtomicExpr until
+ // we are sure it is semantically sound.
+ unsigned NumSubExprs = SubExprs.size();
+ Expr **Subs = (Expr **)SubExprs.release();
+ return new (SemaRef.Context) AtomicExpr(BuiltinLoc, Subs,
+ NumSubExprs, RetTy, Op,
+ RParenLoc);
+ }
+
+private:
+ TypeLoc TransformTypeInObjectScope(TypeLoc TL,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope,
+ CXXScopeSpec &SS);
+
+ TypeSourceInfo *TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope,
+ CXXScopeSpec &SS);
+};
+
+template<typename Derived>
+StmtResult TreeTransform<Derived>::TransformStmt(Stmt *S) {
+ if (!S)
+ return SemaRef.Owned(S);
+
+ switch (S->getStmtClass()) {
+ case Stmt::NoStmtClass: break;
+
+ // Transform individual statement nodes
+#define STMT(Node, Parent) \
+ case Stmt::Node##Class: return getDerived().Transform##Node(cast<Node>(S));
+#define ABSTRACT_STMT(Node)
+#define EXPR(Node, Parent)
+#include "clang/AST/StmtNodes.inc"
+
+ // Transform expressions by calling TransformExpr.
+#define STMT(Node, Parent)
+#define ABSTRACT_STMT(Stmt)
+#define EXPR(Node, Parent) case Stmt::Node##Class:
+#include "clang/AST/StmtNodes.inc"
+ {
+ ExprResult E = getDerived().TransformExpr(cast<Expr>(S));
+ if (E.isInvalid())
+ return StmtError();
+
+ return getSema().ActOnExprStmt(getSema().MakeFullExpr(E.take()));
+ }
+ }
+
+ return SemaRef.Owned(S);
+}
+
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::TransformExpr(Expr *E) {
+ if (!E)
+ return SemaRef.Owned(E);
+
+ switch (E->getStmtClass()) {
+ case Stmt::NoStmtClass: break;
+#define STMT(Node, Parent) case Stmt::Node##Class: break;
+#define ABSTRACT_STMT(Stmt)
+#define EXPR(Node, Parent) \
+ case Stmt::Node##Class: return getDerived().Transform##Node(cast<Node>(E));
+#include "clang/AST/StmtNodes.inc"
+ }
+
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::TransformExprs(Expr **Inputs,
+ unsigned NumInputs,
+ bool IsCall,
+ SmallVectorImpl<Expr *> &Outputs,
+ bool *ArgChanged) {
+ for (unsigned I = 0; I != NumInputs; ++I) {
+ // If requested, drop call arguments that need to be dropped.
+ if (IsCall && getDerived().DropCallArgument(Inputs[I])) {
+ if (ArgChanged)
+ *ArgChanged = true;
+
+ break;
+ }
+
+ if (PackExpansionExpr *Expansion = dyn_cast<PackExpansionExpr>(Inputs[I])) {
+ Expr *Pattern = Expansion->getPattern();
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions
+ = Expansion->getNumExpansions();
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(Expansion->getEllipsisLoc(),
+ Pattern->getSourceRange(),
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ ExprResult OutPattern = getDerived().TransformExpr(Pattern);
+ if (OutPattern.isInvalid())
+ return true;
+
+ ExprResult Out = getDerived().RebuildPackExpansion(OutPattern.get(),
+ Expansion->getEllipsisLoc(),
+ NumExpansions);
+ if (Out.isInvalid())
+ return true;
+
+ if (ArgChanged)
+ *ArgChanged = true;
+ Outputs.push_back(Out.get());
+ continue;
+ }
+
+ // Record right away that the argument was changed. This needs
+ // to happen even if the array expands to nothing.
+ if (ArgChanged) *ArgChanged = true;
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ExprResult Out = getDerived().TransformExpr(Pattern);
+ if (Out.isInvalid())
+ return true;
+
+ if (Out.get()->containsUnexpandedParameterPack()) {
+ Out = RebuildPackExpansion(Out.get(), Expansion->getEllipsisLoc(),
+ OrigNumExpansions);
+ if (Out.isInvalid())
+ return true;
+ }
+
+ Outputs.push_back(Out.get());
+ }
+
+ continue;
+ }
+
+ ExprResult Result = getDerived().TransformExpr(Inputs[I]);
+ if (Result.isInvalid())
+ return true;
+
+ if (Result.get() != Inputs[I] && ArgChanged)
+ *ArgChanged = true;
+
+ Outputs.push_back(Result.get());
+ }
+
+ return false;
+}
+
+template<typename Derived>
+NestedNameSpecifierLoc
+TreeTransform<Derived>::TransformNestedNameSpecifierLoc(
+ NestedNameSpecifierLoc NNS,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ SmallVector<NestedNameSpecifierLoc, 4> Qualifiers;
+ for (NestedNameSpecifierLoc Qualifier = NNS; Qualifier;
+ Qualifier = Qualifier.getPrefix())
+ Qualifiers.push_back(Qualifier);
+
+ CXXScopeSpec SS;
+ while (!Qualifiers.empty()) {
+ NestedNameSpecifierLoc Q = Qualifiers.pop_back_val();
+ NestedNameSpecifier *QNNS = Q.getNestedNameSpecifier();
+
+ switch (QNNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ if (SemaRef.BuildCXXNestedNameSpecifier(/*Scope=*/0,
+ *QNNS->getAsIdentifier(),
+ Q.getLocalBeginLoc(),
+ Q.getLocalEndLoc(),
+ ObjectType, false, SS,
+ FirstQualifierInScope, false))
+ return NestedNameSpecifierLoc();
+
+ break;
+
+ case NestedNameSpecifier::Namespace: {
+ NamespaceDecl *NS
+ = cast_or_null<NamespaceDecl>(
+ getDerived().TransformDecl(
+ Q.getLocalBeginLoc(),
+ QNNS->getAsNamespace()));
+ SS.Extend(SemaRef.Context, NS, Q.getLocalBeginLoc(), Q.getLocalEndLoc());
+ break;
+ }
+
+ case NestedNameSpecifier::NamespaceAlias: {
+ NamespaceAliasDecl *Alias
+ = cast_or_null<NamespaceAliasDecl>(
+ getDerived().TransformDecl(Q.getLocalBeginLoc(),
+ QNNS->getAsNamespaceAlias()));
+ SS.Extend(SemaRef.Context, Alias, Q.getLocalBeginLoc(),
+ Q.getLocalEndLoc());
+ break;
+ }
+
+ case NestedNameSpecifier::Global:
+ // There is no meaningful transformation that one could perform on the
+ // global scope.
+ SS.MakeGlobal(SemaRef.Context, Q.getBeginLoc());
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ case NestedNameSpecifier::TypeSpec: {
+ TypeLoc TL = TransformTypeInObjectScope(Q.getTypeLoc(), ObjectType,
+ FirstQualifierInScope, SS);
+
+ if (!TL)
+ return NestedNameSpecifierLoc();
+
+ if (TL.getType()->isDependentType() || TL.getType()->isRecordType() ||
+ (SemaRef.getLangOpts().CPlusPlus0x &&
+ TL.getType()->isEnumeralType())) {
+ assert(!TL.getType().hasLocalQualifiers() &&
+ "Can't get cv-qualifiers here");
+ if (TL.getType()->isEnumeralType())
+ SemaRef.Diag(TL.getBeginLoc(),
+ diag::warn_cxx98_compat_enum_nested_name_spec);
+ SS.Extend(SemaRef.Context, /*FIXME:*/SourceLocation(), TL,
+ Q.getLocalEndLoc());
+ break;
+ }
+ // If the nested-name-specifier is an invalid type def, don't emit an
+ // error because a previous error should have already been emitted.
+ TypedefTypeLoc* TTL = dyn_cast<TypedefTypeLoc>(&TL);
+ if (!TTL || !TTL->getTypedefNameDecl()->isInvalidDecl()) {
+ SemaRef.Diag(TL.getBeginLoc(), diag::err_nested_name_spec_non_tag)
+ << TL.getType() << SS.getRange();
+ }
+ return NestedNameSpecifierLoc();
+ }
+ }
+
+ // The qualifier-in-scope and object type only apply to the leftmost entity.
+ FirstQualifierInScope = 0;
+ ObjectType = QualType();
+ }
+
+ // Don't rebuild the nested-name-specifier if we don't have to.
+ if (SS.getScopeRep() == NNS.getNestedNameSpecifier() &&
+ !getDerived().AlwaysRebuild())
+ return NNS;
+
+ // If we can re-use the source-location data from the original
+ // nested-name-specifier, do so.
+ if (SS.location_size() == NNS.getDataLength() &&
+ memcmp(SS.location_data(), NNS.getOpaqueData(), SS.location_size()) == 0)
+ return NestedNameSpecifierLoc(SS.getScopeRep(), NNS.getOpaqueData());
+
+ // Allocate new nested-name-specifier location information.
+ return SS.getWithLocInContext(SemaRef.Context);
+}
+
+template<typename Derived>
+DeclarationNameInfo
+TreeTransform<Derived>
+::TransformDeclarationNameInfo(const DeclarationNameInfo &NameInfo) {
+ DeclarationName Name = NameInfo.getName();
+ if (!Name)
+ return DeclarationNameInfo();
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXOperatorName:
+ case DeclarationName::CXXLiteralOperatorName:
+ case DeclarationName::CXXUsingDirective:
+ return NameInfo;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName: {
+ TypeSourceInfo *NewTInfo;
+ CanQualType NewCanTy;
+ if (TypeSourceInfo *OldTInfo = NameInfo.getNamedTypeInfo()) {
+ NewTInfo = getDerived().TransformType(OldTInfo);
+ if (!NewTInfo)
+ return DeclarationNameInfo();
+ NewCanTy = SemaRef.Context.getCanonicalType(NewTInfo->getType());
+ }
+ else {
+ NewTInfo = 0;
+ TemporaryBase Rebase(*this, NameInfo.getLoc(), Name);
+ QualType NewT = getDerived().TransformType(Name.getCXXNameType());
+ if (NewT.isNull())
+ return DeclarationNameInfo();
+ NewCanTy = SemaRef.Context.getCanonicalType(NewT);
+ }
+
+ DeclarationName NewName
+ = SemaRef.Context.DeclarationNames.getCXXSpecialName(Name.getNameKind(),
+ NewCanTy);
+ DeclarationNameInfo NewNameInfo(NameInfo);
+ NewNameInfo.setName(NewName);
+ NewNameInfo.setNamedTypeInfo(NewTInfo);
+ return NewNameInfo;
+ }
+ }
+
+ llvm_unreachable("Unknown name kind.");
+}
+
+template<typename Derived>
+TemplateName
+TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
+ TemplateName Name,
+ SourceLocation NameLoc,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
+ TemplateDecl *Template = QTN->getTemplateDecl();
+ assert(Template && "qualified template name must refer to a template");
+
+ TemplateDecl *TransTemplate
+ = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
+ Template));
+ if (!TransTemplate)
+ return TemplateName();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SS.getScopeRep() == QTN->getQualifier() &&
+ TransTemplate == Template)
+ return Name;
+
+ return getDerived().RebuildTemplateName(SS, QTN->hasTemplateKeyword(),
+ TransTemplate);
+ }
+
+ if (DependentTemplateName *DTN = Name.getAsDependentTemplateName()) {
+ if (SS.getScopeRep()) {
+ // These apply to the scope specifier, not the template.
+ ObjectType = QualType();
+ FirstQualifierInScope = 0;
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ SS.getScopeRep() == DTN->getQualifier() &&
+ ObjectType.isNull())
+ return Name;
+
+ if (DTN->isIdentifier()) {
+ return getDerived().RebuildTemplateName(SS,
+ *DTN->getIdentifier(),
+ NameLoc,
+ ObjectType,
+ FirstQualifierInScope);
+ }
+
+ return getDerived().RebuildTemplateName(SS, DTN->getOperator(), NameLoc,
+ ObjectType);
+ }
+
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ TemplateDecl *TransTemplate
+ = cast_or_null<TemplateDecl>(getDerived().TransformDecl(NameLoc,
+ Template));
+ if (!TransTemplate)
+ return TemplateName();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TransTemplate == Template)
+ return Name;
+
+ return TemplateName(TransTemplate);
+ }
+
+ if (SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack()) {
+ TemplateTemplateParmDecl *TransParam
+ = cast_or_null<TemplateTemplateParmDecl>(
+ getDerived().TransformDecl(NameLoc, SubstPack->getParameterPack()));
+ if (!TransParam)
+ return TemplateName();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TransParam == SubstPack->getParameterPack())
+ return Name;
+
+ return getDerived().RebuildTemplateName(TransParam,
+ SubstPack->getArgumentPack());
+ }
+
+ // These should be getting filtered out before they reach the AST.
+ llvm_unreachable("overloaded function decl survived to here");
+}
+
+template<typename Derived>
+void TreeTransform<Derived>::InventTemplateArgumentLoc(
+ const TemplateArgument &Arg,
+ TemplateArgumentLoc &Output) {
+ SourceLocation Loc = getDerived().getBaseLocation();
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ llvm_unreachable("null template argument in TreeTransform");
+ break;
+
+ case TemplateArgument::Type:
+ Output = TemplateArgumentLoc(Arg,
+ SemaRef.Context.getTrivialTypeSourceInfo(Arg.getAsType(), Loc));
+
+ break;
+
+ case TemplateArgument::Template:
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLocBuilder Builder;
+ TemplateName Template = Arg.getAsTemplate();
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName())
+ Builder.MakeTrivial(SemaRef.Context, DTN->getQualifier(), Loc);
+ else if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
+ Builder.MakeTrivial(SemaRef.Context, QTN->getQualifier(), Loc);
+
+ if (Arg.getKind() == TemplateArgument::Template)
+ Output = TemplateArgumentLoc(Arg,
+ Builder.getWithLocInContext(SemaRef.Context),
+ Loc);
+ else
+ Output = TemplateArgumentLoc(Arg,
+ Builder.getWithLocInContext(SemaRef.Context),
+ Loc, Loc);
+
+ break;
+ }
+
+ case TemplateArgument::Expression:
+ Output = TemplateArgumentLoc(Arg, Arg.getAsExpr());
+ break;
+
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Pack:
+ Output = TemplateArgumentLoc(Arg, TemplateArgumentLocInfo());
+ break;
+ }
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::TransformTemplateArgument(
+ const TemplateArgumentLoc &Input,
+ TemplateArgumentLoc &Output) {
+ const TemplateArgument &Arg = Input.getArgument();
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ Output = Input;
+ return false;
+
+ case TemplateArgument::Type: {
+ TypeSourceInfo *DI = Input.getTypeSourceInfo();
+ if (DI == NULL)
+ DI = InventTypeSourceInfo(Input.getArgument().getAsType());
+
+ DI = getDerived().TransformType(DI);
+ if (!DI) return true;
+
+ Output = TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
+ return false;
+ }
+
+ case TemplateArgument::Declaration: {
+ // FIXME: we should never have to transform one of these.
+ DeclarationName Name;
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(Arg.getAsDecl()))
+ Name = ND->getDeclName();
+ TemporaryBase Rebase(*this, Input.getLocation(), Name);
+ Decl *D = getDerived().TransformDecl(Input.getLocation(), Arg.getAsDecl());
+ if (!D) return true;
+
+ Expr *SourceExpr = Input.getSourceDeclExpression();
+ if (SourceExpr) {
+ EnterExpressionEvaluationContext Unevaluated(getSema(),
+ Sema::ConstantEvaluated);
+ ExprResult E = getDerived().TransformExpr(SourceExpr);
+ E = SemaRef.ActOnConstantExpression(E);
+ SourceExpr = (E.isInvalid() ? 0 : E.take());
+ }
+
+ Output = TemplateArgumentLoc(TemplateArgument(D), SourceExpr);
+ return false;
+ }
+
+ case TemplateArgument::Template: {
+ NestedNameSpecifierLoc QualifierLoc = Input.getTemplateQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc);
+ if (!QualifierLoc)
+ return true;
+ }
+
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ TemplateName Template
+ = getDerived().TransformTemplateName(SS, Arg.getAsTemplate(),
+ Input.getTemplateNameLoc());
+ if (Template.isNull())
+ return true;
+
+ Output = TemplateArgumentLoc(TemplateArgument(Template), QualifierLoc,
+ Input.getTemplateNameLoc());
+ return false;
+ }
+
+ case TemplateArgument::TemplateExpansion:
+ llvm_unreachable("Caller should expand pack expansions");
+
+ case TemplateArgument::Expression: {
+ // Template argument expressions are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(getSema(),
+ Sema::ConstantEvaluated);
+
+ Expr *InputExpr = Input.getSourceExpression();
+ if (!InputExpr) InputExpr = Input.getArgument().getAsExpr();
+
+ ExprResult E = getDerived().TransformExpr(InputExpr);
+ E = SemaRef.ActOnConstantExpression(E);
+ if (E.isInvalid()) return true;
+ Output = TemplateArgumentLoc(TemplateArgument(E.take()), E.take());
+ return false;
+ }
+
+ case TemplateArgument::Pack: {
+ SmallVector<TemplateArgument, 4> TransformedArgs;
+ TransformedArgs.reserve(Arg.pack_size());
+ for (TemplateArgument::pack_iterator A = Arg.pack_begin(),
+ AEnd = Arg.pack_end();
+ A != AEnd; ++A) {
+
+ // FIXME: preserve source information here when we start
+ // caring about parameter packs.
+
+ TemplateArgumentLoc InputArg;
+ TemplateArgumentLoc OutputArg;
+ getDerived().InventTemplateArgumentLoc(*A, InputArg);
+ if (getDerived().TransformTemplateArgument(InputArg, OutputArg))
+ return true;
+
+ TransformedArgs.push_back(OutputArg.getArgument());
+ }
+
+ TemplateArgument *TransformedArgsPtr
+ = new (getSema().Context) TemplateArgument[TransformedArgs.size()];
+ std::copy(TransformedArgs.begin(), TransformedArgs.end(),
+ TransformedArgsPtr);
+ Output = TemplateArgumentLoc(TemplateArgument(TransformedArgsPtr,
+ TransformedArgs.size()),
+ Input.getLocInfo());
+ return false;
+ }
+ }
+
+ // Work around bogus GCC warning
+ return true;
+}
+
+/// \brief Iterator adaptor that invents template argument location information
+/// for each of the template arguments in its underlying iterator.
+template<typename Derived, typename InputIterator>
+class TemplateArgumentLocInventIterator {
+ TreeTransform<Derived> &Self;
+ InputIterator Iter;
+
+public:
+ typedef TemplateArgumentLoc value_type;
+ typedef TemplateArgumentLoc reference;
+ typedef typename std::iterator_traits<InputIterator>::difference_type
+ difference_type;
+ typedef std::input_iterator_tag iterator_category;
+
+ class pointer {
+ TemplateArgumentLoc Arg;
+
+ public:
+ explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { }
+
+ const TemplateArgumentLoc *operator->() const { return &Arg; }
+ };
+
+ TemplateArgumentLocInventIterator() { }
+
+ explicit TemplateArgumentLocInventIterator(TreeTransform<Derived> &Self,
+ InputIterator Iter)
+ : Self(Self), Iter(Iter) { }
+
+ TemplateArgumentLocInventIterator &operator++() {
+ ++Iter;
+ return *this;
+ }
+
+ TemplateArgumentLocInventIterator operator++(int) {
+ TemplateArgumentLocInventIterator Old(*this);
+ ++(*this);
+ return Old;
+ }
+
+ reference operator*() const {
+ TemplateArgumentLoc Result;
+ Self.InventTemplateArgumentLoc(*Iter, Result);
+ return Result;
+ }
+
+ pointer operator->() const { return pointer(**this); }
+
+ friend bool operator==(const TemplateArgumentLocInventIterator &X,
+ const TemplateArgumentLocInventIterator &Y) {
+ return X.Iter == Y.Iter;
+ }
+
+ friend bool operator!=(const TemplateArgumentLocInventIterator &X,
+ const TemplateArgumentLocInventIterator &Y) {
+ return X.Iter != Y.Iter;
+ }
+};
+
+template<typename Derived>
+template<typename InputIterator>
+bool TreeTransform<Derived>::TransformTemplateArguments(InputIterator First,
+ InputIterator Last,
+ TemplateArgumentListInfo &Outputs) {
+ for (; First != Last; ++First) {
+ TemplateArgumentLoc Out;
+ TemplateArgumentLoc In = *First;
+
+ if (In.getArgument().getKind() == TemplateArgument::Pack) {
+ // Unpack argument packs, which we translate them into separate
+ // arguments.
+ // FIXME: We could do much better if we could guarantee that the
+ // TemplateArgumentLocInfo for the pack expansion would be usable for
+ // all of the template arguments in the argument pack.
+ typedef TemplateArgumentLocInventIterator<Derived,
+ TemplateArgument::pack_iterator>
+ PackLocIterator;
+ if (TransformTemplateArguments(PackLocIterator(*this,
+ In.getArgument().pack_begin()),
+ PackLocIterator(*this,
+ In.getArgument().pack_end()),
+ Outputs))
+ return true;
+
+ continue;
+ }
+
+ if (In.getArgument().isPackExpansion()) {
+ // We have a pack expansion, for which we will be substituting into
+ // the pattern.
+ SourceLocation Ellipsis;
+ llvm::Optional<unsigned> OrigNumExpansions;
+ TemplateArgumentLoc Pattern
+ = In.getPackExpansionPattern(Ellipsis, OrigNumExpansions,
+ getSema().Context);
+
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(Ellipsis,
+ Pattern.getSourceRange(),
+ Unexpanded,
+ Expand,
+ RetainExpansion,
+ NumExpansions))
+ return true;
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ TemplateArgumentLoc OutPattern;
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ if (getDerived().TransformTemplateArgument(Pattern, OutPattern))
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(OutPattern, Ellipsis,
+ NumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+
+ Outputs.addArgument(Out);
+ continue;
+ }
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+
+ if (getDerived().TransformTemplateArgument(Pattern, Out))
+ return true;
+
+ if (Out.getArgument().containsUnexpandedParameterPack()) {
+ Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
+ OrigNumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+ }
+
+ Outputs.addArgument(Out);
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ if (getDerived().TransformTemplateArgument(Pattern, Out))
+ return true;
+
+ Out = getDerived().RebuildPackExpansion(Out, Ellipsis,
+ OrigNumExpansions);
+ if (Out.getArgument().isNull())
+ return true;
+
+ Outputs.addArgument(Out);
+ }
+
+ continue;
+ }
+
+ // The simple case:
+ if (getDerived().TransformTemplateArgument(In, Out))
+ return true;
+
+ Outputs.addArgument(Out);
+ }
+
+ return false;
+
+}
+
+//===----------------------------------------------------------------------===//
+// Type transformation
+//===----------------------------------------------------------------------===//
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformType(QualType T) {
+ if (getDerived().AlreadyTransformed(T))
+ return T;
+
+ // Temporary workaround. All of these transformations should
+ // eventually turn into transformations on TypeLocs.
+ TypeSourceInfo *DI = getSema().Context.getTrivialTypeSourceInfo(T,
+ getDerived().getBaseLocation());
+
+ TypeSourceInfo *NewDI = getDerived().TransformType(DI);
+
+ if (!NewDI)
+ return QualType();
+
+ return NewDI->getType();
+}
+
+template<typename Derived>
+TypeSourceInfo *TreeTransform<Derived>::TransformType(TypeSourceInfo *DI) {
+ // Refine the base location to the type's location.
+ TemporaryBase Rebase(*this, DI->getTypeLoc().getBeginLoc(),
+ getDerived().getBaseEntity());
+ if (getDerived().AlreadyTransformed(DI->getType()))
+ return DI;
+
+ TypeLocBuilder TLB;
+
+ TypeLoc TL = DI->getTypeLoc();
+ TLB.reserve(TL.getFullDataSize());
+
+ QualType Result = getDerived().TransformType(TLB, TL);
+ if (Result.isNull())
+ return 0;
+
+ return TLB.getTypeSourceInfo(SemaRef.Context, Result);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformType(TypeLocBuilder &TLB, TypeLoc T) {
+ switch (T.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case TypeLoc::CLASS: \
+ return getDerived().Transform##CLASS##Type(TLB, cast<CLASS##TypeLoc>(T));
+#include "clang/AST/TypeLocNodes.def"
+ }
+
+ llvm_unreachable("unhandled type loc!");
+}
+
+/// FIXME: By default, this routine adds type qualifiers only to types
+/// that can have qualifiers, and silently suppresses those qualifiers
+/// that are not permitted (e.g., qualifiers on reference or function
+/// types). This is the right thing for template instantiation, but
+/// probably not for other clients.
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformQualifiedType(TypeLocBuilder &TLB,
+ QualifiedTypeLoc T) {
+ Qualifiers Quals = T.getType().getLocalQualifiers();
+
+ QualType Result = getDerived().TransformType(TLB, T.getUnqualifiedLoc());
+ if (Result.isNull())
+ return QualType();
+
+ // Silently suppress qualifiers if the result type can't be qualified.
+ // FIXME: this is the right thing for template instantiation, but
+ // probably not for other clients.
+ if (Result->isFunctionType() || Result->isReferenceType())
+ return Result;
+
+ // Suppress Objective-C lifetime qualifiers if they don't make sense for the
+ // resulting type.
+ if (Quals.hasObjCLifetime()) {
+ if (!Result->isObjCLifetimeType() && !Result->isDependentType())
+ Quals.removeObjCLifetime();
+ else if (Result.getObjCLifetime()) {
+ // Objective-C ARC:
+ // A lifetime qualifier applied to a substituted template parameter
+ // overrides the lifetime qualifier from the template argument.
+ if (const SubstTemplateTypeParmType *SubstTypeParam
+ = dyn_cast<SubstTemplateTypeParmType>(Result)) {
+ QualType Replacement = SubstTypeParam->getReplacementType();
+ Qualifiers Qs = Replacement.getQualifiers();
+ Qs.removeObjCLifetime();
+ Replacement
+ = SemaRef.Context.getQualifiedType(Replacement.getUnqualifiedType(),
+ Qs);
+ Result = SemaRef.Context.getSubstTemplateTypeParmType(
+ SubstTypeParam->getReplacedParameter(),
+ Replacement);
+ TLB.TypeWasModifiedSafely(Result);
+ } else {
+ // Otherwise, complain about the addition of a qualifier to an
+ // already-qualified type.
+ SourceRange R = TLB.getTemporaryTypeLoc(Result).getSourceRange();
+ SemaRef.Diag(R.getBegin(), diag::err_attr_objc_ownership_redundant)
+ << Result << R;
+
+ Quals.removeObjCLifetime();
+ }
+ }
+ }
+ if (!Quals.empty()) {
+ Result = SemaRef.BuildQualifiedType(Result, T.getBeginLoc(), Quals);
+ TLB.push<QualifiedTypeLoc>(Result);
+ // No location information to preserve.
+ }
+
+ return Result;
+}
+
+template<typename Derived>
+TypeLoc
+TreeTransform<Derived>::TransformTypeInObjectScope(TypeLoc TL,
+ QualType ObjectType,
+ NamedDecl *UnqualLookup,
+ CXXScopeSpec &SS) {
+ QualType T = TL.getType();
+ if (getDerived().AlreadyTransformed(T))
+ return TL;
+
+ TypeLocBuilder TLB;
+ QualType Result;
+
+ if (isa<TemplateSpecializationType>(T)) {
+ TemplateSpecializationTypeLoc SpecTL
+ = cast<TemplateSpecializationTypeLoc>(TL);
+
+ TemplateName Template =
+ getDerived().TransformTemplateName(SS,
+ SpecTL.getTypePtr()->getTemplateName(),
+ SpecTL.getTemplateNameLoc(),
+ ObjectType, UnqualLookup);
+ if (Template.isNull())
+ return TypeLoc();
+
+ Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
+ Template);
+ } else if (isa<DependentTemplateSpecializationType>(T)) {
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = cast<DependentTemplateSpecializationTypeLoc>(TL);
+
+ TemplateName Template
+ = getDerived().RebuildTemplateName(SS,
+ *SpecTL.getTypePtr()->getIdentifier(),
+ SpecTL.getTemplateNameLoc(),
+ ObjectType, UnqualLookup);
+ if (Template.isNull())
+ return TypeLoc();
+
+ Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
+ SpecTL,
+ Template,
+ SS);
+ } else {
+ // Nothing special needs to be done for these.
+ Result = getDerived().TransformType(TLB, TL);
+ }
+
+ if (Result.isNull())
+ return TypeLoc();
+
+ return TLB.getTypeSourceInfo(SemaRef.Context, Result)->getTypeLoc();
+}
+
+template<typename Derived>
+TypeSourceInfo *
+TreeTransform<Derived>::TransformTypeInObjectScope(TypeSourceInfo *TSInfo,
+ QualType ObjectType,
+ NamedDecl *UnqualLookup,
+ CXXScopeSpec &SS) {
+ // FIXME: Painfully copy-paste from the above!
+
+ QualType T = TSInfo->getType();
+ if (getDerived().AlreadyTransformed(T))
+ return TSInfo;
+
+ TypeLocBuilder TLB;
+ QualType Result;
+
+ TypeLoc TL = TSInfo->getTypeLoc();
+ if (isa<TemplateSpecializationType>(T)) {
+ TemplateSpecializationTypeLoc SpecTL
+ = cast<TemplateSpecializationTypeLoc>(TL);
+
+ TemplateName Template
+ = getDerived().TransformTemplateName(SS,
+ SpecTL.getTypePtr()->getTemplateName(),
+ SpecTL.getTemplateNameLoc(),
+ ObjectType, UnqualLookup);
+ if (Template.isNull())
+ return 0;
+
+ Result = getDerived().TransformTemplateSpecializationType(TLB, SpecTL,
+ Template);
+ } else if (isa<DependentTemplateSpecializationType>(T)) {
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = cast<DependentTemplateSpecializationTypeLoc>(TL);
+
+ TemplateName Template
+ = getDerived().RebuildTemplateName(SS,
+ *SpecTL.getTypePtr()->getIdentifier(),
+ SpecTL.getTemplateNameLoc(),
+ ObjectType, UnqualLookup);
+ if (Template.isNull())
+ return 0;
+
+ Result = getDerived().TransformDependentTemplateSpecializationType(TLB,
+ SpecTL,
+ Template,
+ SS);
+ } else {
+ // Nothing special needs to be done for these.
+ Result = getDerived().TransformType(TLB, TL);
+ }
+
+ if (Result.isNull())
+ return 0;
+
+ return TLB.getTypeSourceInfo(SemaRef.Context, Result);
+}
+
+template <class TyLoc> static inline
+QualType TransformTypeSpecType(TypeLocBuilder &TLB, TyLoc T) {
+ TyLoc NewT = TLB.push<TyLoc>(T.getType());
+ NewT.setNameLoc(T.getNameLoc());
+ return T.getType();
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformBuiltinType(TypeLocBuilder &TLB,
+ BuiltinTypeLoc T) {
+ BuiltinTypeLoc NewT = TLB.push<BuiltinTypeLoc>(T.getType());
+ NewT.setBuiltinLoc(T.getBuiltinLoc());
+ if (T.needsExtraLocalData())
+ NewT.getWrittenBuiltinSpecs() = T.getWrittenBuiltinSpecs();
+ return T.getType();
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformComplexType(TypeLocBuilder &TLB,
+ ComplexTypeLoc T) {
+ // FIXME: recurse?
+ return TransformTypeSpecType(TLB, T);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformPointerType(TypeLocBuilder &TLB,
+ PointerTypeLoc TL) {
+ QualType PointeeType
+ = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (PointeeType->getAs<ObjCObjectType>()) {
+ // A dependent pointer type 'T *' has is being transformed such
+ // that an Objective-C class type is being replaced for 'T'. The
+ // resulting pointer type is an ObjCObjectPointerType, not a
+ // PointerType.
+ Result = SemaRef.Context.getObjCObjectPointerType(PointeeType);
+
+ ObjCObjectPointerTypeLoc NewT = TLB.push<ObjCObjectPointerTypeLoc>(Result);
+ NewT.setStarLoc(TL.getStarLoc());
+ return Result;
+ }
+
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != TL.getPointeeLoc().getType()) {
+ Result = getDerived().RebuildPointerType(PointeeType, TL.getSigilLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // Objective-C ARC can add lifetime qualifiers to the type that we're
+ // pointing to.
+ TLB.TypeWasModifiedSafely(Result->getPointeeType());
+
+ PointerTypeLoc NewT = TLB.push<PointerTypeLoc>(Result);
+ NewT.setSigilLoc(TL.getSigilLoc());
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformBlockPointerType(TypeLocBuilder &TLB,
+ BlockPointerTypeLoc TL) {
+ QualType PointeeType
+ = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != TL.getPointeeLoc().getType()) {
+ Result = getDerived().RebuildBlockPointerType(PointeeType,
+ TL.getSigilLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ BlockPointerTypeLoc NewT = TLB.push<BlockPointerTypeLoc>(Result);
+ NewT.setSigilLoc(TL.getSigilLoc());
+ return Result;
+}
+
+/// Transforms a reference type. Note that somewhat paradoxically we
+/// don't care whether the type itself is an l-value type or an r-value
+/// type; we only care if the type was *written* as an l-value type
+/// or an r-value type.
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformReferenceType(TypeLocBuilder &TLB,
+ ReferenceTypeLoc TL) {
+ const ReferenceType *T = TL.getTypePtr();
+
+ // Note that this works with the pointee-as-written.
+ QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != T->getPointeeTypeAsWritten()) {
+ Result = getDerived().RebuildReferenceType(PointeeType,
+ T->isSpelledAsLValue(),
+ TL.getSigilLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // Objective-C ARC can add lifetime qualifiers to the type that we're
+ // referring to.
+ TLB.TypeWasModifiedSafely(
+ Result->getAs<ReferenceType>()->getPointeeTypeAsWritten());
+
+ // r-value references can be rebuilt as l-value references.
+ ReferenceTypeLoc NewTL;
+ if (isa<LValueReferenceType>(Result))
+ NewTL = TLB.push<LValueReferenceTypeLoc>(Result);
+ else
+ NewTL = TLB.push<RValueReferenceTypeLoc>(Result);
+ NewTL.setSigilLoc(TL.getSigilLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformLValueReferenceType(TypeLocBuilder &TLB,
+ LValueReferenceTypeLoc TL) {
+ return TransformReferenceType(TLB, TL);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformRValueReferenceType(TypeLocBuilder &TLB,
+ RValueReferenceTypeLoc TL) {
+ return TransformReferenceType(TLB, TL);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformMemberPointerType(TypeLocBuilder &TLB,
+ MemberPointerTypeLoc TL) {
+ QualType PointeeType = getDerived().TransformType(TLB, TL.getPointeeLoc());
+ if (PointeeType.isNull())
+ return QualType();
+
+ TypeSourceInfo* OldClsTInfo = TL.getClassTInfo();
+ TypeSourceInfo* NewClsTInfo = 0;
+ if (OldClsTInfo) {
+ NewClsTInfo = getDerived().TransformType(OldClsTInfo);
+ if (!NewClsTInfo)
+ return QualType();
+ }
+
+ const MemberPointerType *T = TL.getTypePtr();
+ QualType OldClsType = QualType(T->getClass(), 0);
+ QualType NewClsType;
+ if (NewClsTInfo)
+ NewClsType = NewClsTInfo->getType();
+ else {
+ NewClsType = getDerived().TransformType(OldClsType);
+ if (NewClsType.isNull())
+ return QualType();
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ PointeeType != T->getPointeeType() ||
+ NewClsType != OldClsType) {
+ Result = getDerived().RebuildMemberPointerType(PointeeType, NewClsType,
+ TL.getStarLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ MemberPointerTypeLoc NewTL = TLB.push<MemberPointerTypeLoc>(Result);
+ NewTL.setSigilLoc(TL.getSigilLoc());
+ NewTL.setClassTInfo(NewClsTInfo);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformConstantArrayType(TypeLocBuilder &TLB,
+ ConstantArrayTypeLoc TL) {
+ const ConstantArrayType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType()) {
+ Result = getDerived().RebuildConstantArrayType(ElementType,
+ T->getSizeModifier(),
+ T->getSize(),
+ T->getIndexTypeCVRQualifiers(),
+ TL.getBracketsRange());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might have either a ConstantArrayType or a VariableArrayType now:
+ // a ConstantArrayType is allowed to have an element type which is a
+ // VariableArrayType if the type is dependent. Fortunately, all array
+ // types have the same location layout.
+ ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result);
+ NewTL.setLBracketLoc(TL.getLBracketLoc());
+ NewTL.setRBracketLoc(TL.getRBracketLoc());
+
+ Expr *Size = TL.getSizeExpr();
+ if (Size) {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+ Size = getDerived().TransformExpr(Size).template takeAs<Expr>();
+ Size = SemaRef.ActOnConstantExpression(Size).take();
+ }
+ NewTL.setSizeExpr(Size);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformIncompleteArrayType(
+ TypeLocBuilder &TLB,
+ IncompleteArrayTypeLoc TL) {
+ const IncompleteArrayType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType()) {
+ Result = getDerived().RebuildIncompleteArrayType(ElementType,
+ T->getSizeModifier(),
+ T->getIndexTypeCVRQualifiers(),
+ TL.getBracketsRange());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ IncompleteArrayTypeLoc NewTL = TLB.push<IncompleteArrayTypeLoc>(Result);
+ NewTL.setLBracketLoc(TL.getLBracketLoc());
+ NewTL.setRBracketLoc(TL.getRBracketLoc());
+ NewTL.setSizeExpr(0);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformVariableArrayType(TypeLocBuilder &TLB,
+ VariableArrayTypeLoc TL) {
+ const VariableArrayType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (ElementType.isNull())
+ return QualType();
+
+ ExprResult SizeResult
+ = getDerived().TransformExpr(T->getSizeExpr());
+ if (SizeResult.isInvalid())
+ return QualType();
+
+ Expr *Size = SizeResult.take();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType() ||
+ Size != T->getSizeExpr()) {
+ Result = getDerived().RebuildVariableArrayType(ElementType,
+ T->getSizeModifier(),
+ Size,
+ T->getIndexTypeCVRQualifiers(),
+ TL.getBracketsRange());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ VariableArrayTypeLoc NewTL = TLB.push<VariableArrayTypeLoc>(Result);
+ NewTL.setLBracketLoc(TL.getLBracketLoc());
+ NewTL.setRBracketLoc(TL.getRBracketLoc());
+ NewTL.setSizeExpr(Size);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformDependentSizedArrayType(TypeLocBuilder &TLB,
+ DependentSizedArrayTypeLoc TL) {
+ const DependentSizedArrayType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(TLB, TL.getElementLoc());
+ if (ElementType.isNull())
+ return QualType();
+
+ // Array bounds are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ // Prefer the expression from the TypeLoc; the other may have been uniqued.
+ Expr *origSize = TL.getSizeExpr();
+ if (!origSize) origSize = T->getSizeExpr();
+
+ ExprResult sizeResult
+ = getDerived().TransformExpr(origSize);
+ sizeResult = SemaRef.ActOnConstantExpression(sizeResult);
+ if (sizeResult.isInvalid())
+ return QualType();
+
+ Expr *size = sizeResult.get();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType() ||
+ size != origSize) {
+ Result = getDerived().RebuildDependentSizedArrayType(ElementType,
+ T->getSizeModifier(),
+ size,
+ T->getIndexTypeCVRQualifiers(),
+ TL.getBracketsRange());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might have any sort of array type now, but fortunately they
+ // all have the same location layout.
+ ArrayTypeLoc NewTL = TLB.push<ArrayTypeLoc>(Result);
+ NewTL.setLBracketLoc(TL.getLBracketLoc());
+ NewTL.setRBracketLoc(TL.getRBracketLoc());
+ NewTL.setSizeExpr(size);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformDependentSizedExtVectorType(
+ TypeLocBuilder &TLB,
+ DependentSizedExtVectorTypeLoc TL) {
+ const DependentSizedExtVectorType *T = TL.getTypePtr();
+
+ // FIXME: ext vector locs should be nested
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ // Vector sizes are constant expressions.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ ExprResult Size = getDerived().TransformExpr(T->getSizeExpr());
+ Size = SemaRef.ActOnConstantExpression(Size);
+ if (Size.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType() ||
+ Size.get() != T->getSizeExpr()) {
+ Result = getDerived().RebuildDependentSizedExtVectorType(ElementType,
+ Size.take(),
+ T->getAttributeLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // Result might be dependent or not.
+ if (isa<DependentSizedExtVectorType>(Result)) {
+ DependentSizedExtVectorTypeLoc NewTL
+ = TLB.push<DependentSizedExtVectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ } else {
+ ExtVectorTypeLoc NewTL = TLB.push<ExtVectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ }
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformVectorType(TypeLocBuilder &TLB,
+ VectorTypeLoc TL) {
+ const VectorType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType()) {
+ Result = getDerived().RebuildVectorType(ElementType, T->getNumElements(),
+ T->getVectorKind());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ VectorTypeLoc NewTL = TLB.push<VectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformExtVectorType(TypeLocBuilder &TLB,
+ ExtVectorTypeLoc TL) {
+ const VectorType *T = TL.getTypePtr();
+ QualType ElementType = getDerived().TransformType(T->getElementType());
+ if (ElementType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ElementType != T->getElementType()) {
+ Result = getDerived().RebuildExtVectorType(ElementType,
+ T->getNumElements(),
+ /*FIXME*/ SourceLocation());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ExtVectorTypeLoc NewTL = TLB.push<ExtVectorTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+ParmVarDecl *
+TreeTransform<Derived>::TransformFunctionTypeParam(ParmVarDecl *OldParm,
+ int indexAdjustment,
+ llvm::Optional<unsigned> NumExpansions,
+ bool ExpectParameterPack) {
+ TypeSourceInfo *OldDI = OldParm->getTypeSourceInfo();
+ TypeSourceInfo *NewDI = 0;
+
+ if (NumExpansions && isa<PackExpansionType>(OldDI->getType())) {
+ // If we're substituting into a pack expansion type and we know the
+ // length we want to expand to, just substitute for the pattern.
+ TypeLoc OldTL = OldDI->getTypeLoc();
+ PackExpansionTypeLoc OldExpansionTL = cast<PackExpansionTypeLoc>(OldTL);
+
+ TypeLocBuilder TLB;
+ TypeLoc NewTL = OldDI->getTypeLoc();
+ TLB.reserve(NewTL.getFullDataSize());
+
+ QualType Result = getDerived().TransformType(TLB,
+ OldExpansionTL.getPatternLoc());
+ if (Result.isNull())
+ return 0;
+
+ Result = RebuildPackExpansionType(Result,
+ OldExpansionTL.getPatternLoc().getSourceRange(),
+ OldExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (Result.isNull())
+ return 0;
+
+ PackExpansionTypeLoc NewExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(Result);
+ NewExpansionTL.setEllipsisLoc(OldExpansionTL.getEllipsisLoc());
+ NewDI = TLB.getTypeSourceInfo(SemaRef.Context, Result);
+ } else
+ NewDI = getDerived().TransformType(OldDI);
+ if (!NewDI)
+ return 0;
+
+ if (NewDI == OldDI && indexAdjustment == 0)
+ return OldParm;
+
+ ParmVarDecl *newParm = ParmVarDecl::Create(SemaRef.Context,
+ OldParm->getDeclContext(),
+ OldParm->getInnerLocStart(),
+ OldParm->getLocation(),
+ OldParm->getIdentifier(),
+ NewDI->getType(),
+ NewDI,
+ OldParm->getStorageClass(),
+ OldParm->getStorageClassAsWritten(),
+ /* DefArg */ NULL);
+ newParm->setScopeInfo(OldParm->getFunctionScopeDepth(),
+ OldParm->getFunctionScopeIndex() + indexAdjustment);
+ return newParm;
+}
+
+template<typename Derived>
+bool TreeTransform<Derived>::
+ TransformFunctionTypeParams(SourceLocation Loc,
+ ParmVarDecl **Params, unsigned NumParams,
+ const QualType *ParamTypes,
+ SmallVectorImpl<QualType> &OutParamTypes,
+ SmallVectorImpl<ParmVarDecl*> *PVars) {
+ int indexAdjustment = 0;
+
+ for (unsigned i = 0; i != NumParams; ++i) {
+ if (ParmVarDecl *OldParm = Params[i]) {
+ assert(OldParm->getFunctionScopeIndex() == i);
+
+ llvm::Optional<unsigned> NumExpansions;
+ ParmVarDecl *NewParm = 0;
+ if (OldParm->isParameterPack()) {
+ // We have a function parameter pack that may need to be expanded.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+
+ // Find the parameter packs that could be expanded.
+ TypeLoc TL = OldParm->getTypeSourceInfo()->getTypeLoc();
+ PackExpansionTypeLoc ExpansionTL = cast<PackExpansionTypeLoc>(TL);
+ TypeLoc Pattern = ExpansionTL.getPatternLoc();
+ SemaRef.collectUnexpandedParameterPacks(Pattern, Unexpanded);
+ assert(Unexpanded.size() > 0 && "Could not find parameter packs!");
+
+ // Determine whether we should expand the parameter packs.
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions
+ = ExpansionTL.getTypePtr()->getNumExpansions();
+ NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
+ Pattern.getSourceRange(),
+ Unexpanded,
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ return true;
+ }
+
+ if (ShouldExpand) {
+ // Expand the function parameter pack into multiple, separate
+ // parameters.
+ getDerived().ExpandingFunctionParameterPack(OldParm);
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ParmVarDecl *NewParm
+ = getDerived().TransformFunctionTypeParam(OldParm,
+ indexAdjustment++,
+ OrigNumExpansions,
+ /*ExpectParameterPack=*/false);
+ if (!NewParm)
+ return true;
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+ ParmVarDecl *NewParm
+ = getDerived().TransformFunctionTypeParam(OldParm,
+ indexAdjustment++,
+ OrigNumExpansions,
+ /*ExpectParameterPack=*/false);
+ if (!NewParm)
+ return true;
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ }
+
+ // The next parameter should have the same adjustment as the
+ // last thing we pushed, but we post-incremented indexAdjustment
+ // on every push. Also, if we push nothing, the adjustment should
+ // go down by one.
+ indexAdjustment--;
+
+ // We're done with the pack expansion.
+ continue;
+ }
+
+ // We'll substitute the parameter now without expanding the pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ NewParm = getDerived().TransformFunctionTypeParam(OldParm,
+ indexAdjustment,
+ NumExpansions,
+ /*ExpectParameterPack=*/true);
+ } else {
+ NewParm = getDerived().TransformFunctionTypeParam(OldParm,
+ indexAdjustment,
+ llvm::Optional<unsigned>(),
+ /*ExpectParameterPack=*/false);
+ }
+
+ if (!NewParm)
+ return true;
+
+ OutParamTypes.push_back(NewParm->getType());
+ if (PVars)
+ PVars->push_back(NewParm);
+ continue;
+ }
+
+ // Deal with the possibility that we don't have a parameter
+ // declaration for this parameter.
+ QualType OldType = ParamTypes[i];
+ bool IsPackExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ QualType NewType;
+ if (const PackExpansionType *Expansion
+ = dyn_cast<PackExpansionType>(OldType)) {
+ // We have a function parameter pack that may need to be expanded.
+ QualType Pattern = Expansion->getPattern();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(Pattern, Unexpanded);
+
+ // Determine whether we should expand the parameter packs.
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ if (getDerived().TryExpandParameterPacks(Loc, SourceRange(),
+ Unexpanded,
+ ShouldExpand,
+ RetainExpansion,
+ NumExpansions)) {
+ return true;
+ }
+
+ if (ShouldExpand) {
+ // Expand the function parameter pack into multiple, separate
+ // parameters.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ QualType NewType = getDerived().TransformType(Pattern);
+ if (NewType.isNull())
+ return true;
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(0);
+ }
+
+ // We're done with the pack expansion.
+ continue;
+ }
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ if (RetainExpansion) {
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+ QualType NewType = getDerived().TransformType(Pattern);
+ if (NewType.isNull())
+ return true;
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(0);
+ }
+
+ // We'll substitute the parameter now without expanding the pack
+ // expansion.
+ OldType = Expansion->getPattern();
+ IsPackExpansion = true;
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ NewType = getDerived().TransformType(OldType);
+ } else {
+ NewType = getDerived().TransformType(OldType);
+ }
+
+ if (NewType.isNull())
+ return true;
+
+ if (IsPackExpansion)
+ NewType = getSema().Context.getPackExpansionType(NewType,
+ NumExpansions);
+
+ OutParamTypes.push_back(NewType);
+ if (PVars)
+ PVars->push_back(0);
+ }
+
+#ifndef NDEBUG
+ if (PVars) {
+ for (unsigned i = 0, e = PVars->size(); i != e; ++i)
+ if (ParmVarDecl *parm = (*PVars)[i])
+ assert(parm->getFunctionScopeIndex() == i);
+ }
+#endif
+
+ return false;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformFunctionProtoType(TypeLocBuilder &TLB,
+ FunctionProtoTypeLoc TL) {
+ // Transform the parameters and return type.
+ //
+ // We instantiate in source order, with the return type first followed by
+ // the parameters, because users tend to expect this (even if they shouldn't
+ // rely on it!).
+ //
+ // When the function has a trailing return type, we instantiate the
+ // parameters before the return type, since the return type can then refer
+ // to the parameters themselves (via decltype, sizeof, etc.).
+ //
+ SmallVector<QualType, 4> ParamTypes;
+ SmallVector<ParmVarDecl*, 4> ParamDecls;
+ const FunctionProtoType *T = TL.getTypePtr();
+
+ QualType ResultType;
+
+ if (TL.getTrailingReturn()) {
+ if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
+ TL.getParmArray(),
+ TL.getNumArgs(),
+ TL.getTypePtr()->arg_type_begin(),
+ ParamTypes, &ParamDecls))
+ return QualType();
+
+ ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
+ if (ResultType.isNull())
+ return QualType();
+ }
+ else {
+ ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
+ if (ResultType.isNull())
+ return QualType();
+
+ if (getDerived().TransformFunctionTypeParams(TL.getBeginLoc(),
+ TL.getParmArray(),
+ TL.getNumArgs(),
+ TL.getTypePtr()->arg_type_begin(),
+ ParamTypes, &ParamDecls))
+ return QualType();
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ResultType != T->getResultType() ||
+ T->getNumArgs() != ParamTypes.size() ||
+ !std::equal(T->arg_type_begin(), T->arg_type_end(), ParamTypes.begin())) {
+ Result = getDerived().RebuildFunctionProtoType(ResultType,
+ ParamTypes.data(),
+ ParamTypes.size(),
+ T->isVariadic(),
+ T->hasTrailingReturn(),
+ T->getTypeQuals(),
+ T->getRefQualifier(),
+ T->getExtInfo());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ FunctionProtoTypeLoc NewTL = TLB.push<FunctionProtoTypeLoc>(Result);
+ NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
+ NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
+ NewTL.setTrailingReturn(TL.getTrailingReturn());
+ for (unsigned i = 0, e = NewTL.getNumArgs(); i != e; ++i)
+ NewTL.setArg(i, ParamDecls[i]);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformFunctionNoProtoType(
+ TypeLocBuilder &TLB,
+ FunctionNoProtoTypeLoc TL) {
+ const FunctionNoProtoType *T = TL.getTypePtr();
+ QualType ResultType = getDerived().TransformType(TLB, TL.getResultLoc());
+ if (ResultType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ResultType != T->getResultType())
+ Result = getDerived().RebuildFunctionNoProtoType(ResultType);
+
+ FunctionNoProtoTypeLoc NewTL = TLB.push<FunctionNoProtoTypeLoc>(Result);
+ NewTL.setLocalRangeBegin(TL.getLocalRangeBegin());
+ NewTL.setLocalRangeEnd(TL.getLocalRangeEnd());
+ NewTL.setTrailingReturn(false);
+
+ return Result;
+}
+
+template<typename Derived> QualType
+TreeTransform<Derived>::TransformUnresolvedUsingType(TypeLocBuilder &TLB,
+ UnresolvedUsingTypeLoc TL) {
+ const UnresolvedUsingType *T = TL.getTypePtr();
+ Decl *D = getDerived().TransformDecl(TL.getNameLoc(), T->getDecl());
+ if (!D)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || D != T->getDecl()) {
+ Result = getDerived().RebuildUnresolvedUsingType(D);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ // We might get an arbitrary type spec type back. We should at
+ // least always get a type spec type, though.
+ TypeSpecTypeLoc NewTL = TLB.pushTypeSpec(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTypedefType(TypeLocBuilder &TLB,
+ TypedefTypeLoc TL) {
+ const TypedefType *T = TL.getTypePtr();
+ TypedefNameDecl *Typedef
+ = cast_or_null<TypedefNameDecl>(getDerived().TransformDecl(TL.getNameLoc(),
+ T->getDecl()));
+ if (!Typedef)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Typedef != T->getDecl()) {
+ Result = getDerived().RebuildTypedefType(Typedef);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ TypedefTypeLoc NewTL = TLB.push<TypedefTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTypeOfExprType(TypeLocBuilder &TLB,
+ TypeOfExprTypeLoc TL) {
+ // typeof expressions are not potentially evaluated contexts
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+
+ ExprResult E = getDerived().TransformExpr(TL.getUnderlyingExpr());
+ if (E.isInvalid())
+ return QualType();
+
+ E = SemaRef.HandleExprEvaluationContextForTypeof(E.get());
+ if (E.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ E.get() != TL.getUnderlyingExpr()) {
+ Result = getDerived().RebuildTypeOfExprType(E.get(), TL.getTypeofLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+ else E.take();
+
+ TypeOfExprTypeLoc NewTL = TLB.push<TypeOfExprTypeLoc>(Result);
+ NewTL.setTypeofLoc(TL.getTypeofLoc());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTypeOfType(TypeLocBuilder &TLB,
+ TypeOfTypeLoc TL) {
+ TypeSourceInfo* Old_Under_TI = TL.getUnderlyingTInfo();
+ TypeSourceInfo* New_Under_TI = getDerived().TransformType(Old_Under_TI);
+ if (!New_Under_TI)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || New_Under_TI != Old_Under_TI) {
+ Result = getDerived().RebuildTypeOfType(New_Under_TI->getType());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ TypeOfTypeLoc NewTL = TLB.push<TypeOfTypeLoc>(Result);
+ NewTL.setTypeofLoc(TL.getTypeofLoc());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ NewTL.setUnderlyingTInfo(New_Under_TI);
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformDecltypeType(TypeLocBuilder &TLB,
+ DecltypeTypeLoc TL) {
+ const DecltypeType *T = TL.getTypePtr();
+
+ // decltype expressions are not potentially evaluated contexts
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated, 0,
+ /*IsDecltype=*/ true);
+
+ ExprResult E = getDerived().TransformExpr(T->getUnderlyingExpr());
+ if (E.isInvalid())
+ return QualType();
+
+ E = getSema().ActOnDecltypeExpression(E.take());
+ if (E.isInvalid())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ E.get() != T->getUnderlyingExpr()) {
+ Result = getDerived().RebuildDecltypeType(E.get(), TL.getNameLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+ else E.take();
+
+ DecltypeTypeLoc NewTL = TLB.push<DecltypeTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformUnaryTransformType(
+ TypeLocBuilder &TLB,
+ UnaryTransformTypeLoc TL) {
+ QualType Result = TL.getType();
+ if (Result->isDependentType()) {
+ const UnaryTransformType *T = TL.getTypePtr();
+ QualType NewBase =
+ getDerived().TransformType(TL.getUnderlyingTInfo())->getType();
+ Result = getDerived().RebuildUnaryTransformType(NewBase,
+ T->getUTTKind(),
+ TL.getKWLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ UnaryTransformTypeLoc NewTL = TLB.push<UnaryTransformTypeLoc>(Result);
+ NewTL.setKWLoc(TL.getKWLoc());
+ NewTL.setParensRange(TL.getParensRange());
+ NewTL.setUnderlyingTInfo(TL.getUnderlyingTInfo());
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformAutoType(TypeLocBuilder &TLB,
+ AutoTypeLoc TL) {
+ const AutoType *T = TL.getTypePtr();
+ QualType OldDeduced = T->getDeducedType();
+ QualType NewDeduced;
+ if (!OldDeduced.isNull()) {
+ NewDeduced = getDerived().TransformType(OldDeduced);
+ if (NewDeduced.isNull())
+ return QualType();
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() || NewDeduced != OldDeduced) {
+ Result = getDerived().RebuildAutoType(NewDeduced);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ AutoTypeLoc NewTL = TLB.push<AutoTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformRecordType(TypeLocBuilder &TLB,
+ RecordTypeLoc TL) {
+ const RecordType *T = TL.getTypePtr();
+ RecordDecl *Record
+ = cast_or_null<RecordDecl>(getDerived().TransformDecl(TL.getNameLoc(),
+ T->getDecl()));
+ if (!Record)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Record != T->getDecl()) {
+ Result = getDerived().RebuildRecordType(Record);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ RecordTypeLoc NewTL = TLB.push<RecordTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformEnumType(TypeLocBuilder &TLB,
+ EnumTypeLoc TL) {
+ const EnumType *T = TL.getTypePtr();
+ EnumDecl *Enum
+ = cast_or_null<EnumDecl>(getDerived().TransformDecl(TL.getNameLoc(),
+ T->getDecl()));
+ if (!Enum)
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Enum != T->getDecl()) {
+ Result = getDerived().RebuildEnumType(Enum);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ EnumTypeLoc NewTL = TLB.push<EnumTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformInjectedClassNameType(
+ TypeLocBuilder &TLB,
+ InjectedClassNameTypeLoc TL) {
+ Decl *D = getDerived().TransformDecl(TL.getNameLoc(),
+ TL.getTypePtr()->getDecl());
+ if (!D) return QualType();
+
+ QualType T = SemaRef.Context.getTypeDeclType(cast<TypeDecl>(D));
+ TLB.pushTypeSpec(T).setNameLoc(TL.getNameLoc());
+ return T;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTemplateTypeParmType(
+ TypeLocBuilder &TLB,
+ TemplateTypeParmTypeLoc TL) {
+ return TransformTypeSpecType(TLB, TL);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmType(
+ TypeLocBuilder &TLB,
+ SubstTemplateTypeParmTypeLoc TL) {
+ const SubstTemplateTypeParmType *T = TL.getTypePtr();
+
+ // Substitute into the replacement type, which itself might involve something
+ // that needs to be transformed. This only tends to occur with default
+ // template arguments of template template parameters.
+ TemporaryBase Rebase(*this, TL.getNameLoc(), DeclarationName());
+ QualType Replacement = getDerived().TransformType(T->getReplacementType());
+ if (Replacement.isNull())
+ return QualType();
+
+ // Always canonicalize the replacement type.
+ Replacement = SemaRef.Context.getCanonicalType(Replacement);
+ QualType Result
+ = SemaRef.Context.getSubstTemplateTypeParmType(T->getReplacedParameter(),
+ Replacement);
+
+ // Propagate type-source information.
+ SubstTemplateTypeParmTypeLoc NewTL
+ = TLB.push<SubstTemplateTypeParmTypeLoc>(Result);
+ NewTL.setNameLoc(TL.getNameLoc());
+ return Result;
+
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformSubstTemplateTypeParmPackType(
+ TypeLocBuilder &TLB,
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ return TransformTypeSpecType(TLB, TL);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
+ TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL) {
+ const TemplateSpecializationType *T = TL.getTypePtr();
+
+ // The nested-name-specifier never matters in a TemplateSpecializationType,
+ // because we can't have a dependent nested-name-specifier anyway.
+ CXXScopeSpec SS;
+ TemplateName Template
+ = getDerived().TransformTemplateName(SS, T->getTemplateName(),
+ TL.getTemplateNameLoc());
+ if (Template.isNull())
+ return QualType();
+
+ return getDerived().TransformTemplateSpecializationType(TLB, TL, Template);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformAtomicType(TypeLocBuilder &TLB,
+ AtomicTypeLoc TL) {
+ QualType ValueType = getDerived().TransformType(TLB, TL.getValueLoc());
+ if (ValueType.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ ValueType != TL.getValueLoc().getType()) {
+ Result = getDerived().RebuildAtomicType(ValueType, TL.getKWLoc());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ AtomicTypeLoc NewTL = TLB.push<AtomicTypeLoc>(Result);
+ NewTL.setKWLoc(TL.getKWLoc());
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+
+ return Result;
+}
+
+namespace {
+ /// \brief Simple iterator that traverses the template arguments in a
+ /// container that provides a \c getArgLoc() member function.
+ ///
+ /// This iterator is intended to be used with the iterator form of
+ /// \c TreeTransform<Derived>::TransformTemplateArguments().
+ template<typename ArgLocContainer>
+ class TemplateArgumentLocContainerIterator {
+ ArgLocContainer *Container;
+ unsigned Index;
+
+ public:
+ typedef TemplateArgumentLoc value_type;
+ typedef TemplateArgumentLoc reference;
+ typedef int difference_type;
+ typedef std::input_iterator_tag iterator_category;
+
+ class pointer {
+ TemplateArgumentLoc Arg;
+
+ public:
+ explicit pointer(TemplateArgumentLoc Arg) : Arg(Arg) { }
+
+ const TemplateArgumentLoc *operator->() const {
+ return &Arg;
+ }
+ };
+
+
+ TemplateArgumentLocContainerIterator() {}
+
+ TemplateArgumentLocContainerIterator(ArgLocContainer &Container,
+ unsigned Index)
+ : Container(&Container), Index(Index) { }
+
+ TemplateArgumentLocContainerIterator &operator++() {
+ ++Index;
+ return *this;
+ }
+
+ TemplateArgumentLocContainerIterator operator++(int) {
+ TemplateArgumentLocContainerIterator Old(*this);
+ ++(*this);
+ return Old;
+ }
+
+ TemplateArgumentLoc operator*() const {
+ return Container->getArgLoc(Index);
+ }
+
+ pointer operator->() const {
+ return pointer(Container->getArgLoc(Index));
+ }
+
+ friend bool operator==(const TemplateArgumentLocContainerIterator &X,
+ const TemplateArgumentLocContainerIterator &Y) {
+ return X.Container == Y.Container && X.Index == Y.Index;
+ }
+
+ friend bool operator!=(const TemplateArgumentLocContainerIterator &X,
+ const TemplateArgumentLocContainerIterator &Y) {
+ return !(X == Y);
+ }
+ };
+}
+
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformTemplateSpecializationType(
+ TypeLocBuilder &TLB,
+ TemplateSpecializationTypeLoc TL,
+ TemplateName Template) {
+ TemplateArgumentListInfo NewTemplateArgs;
+ NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
+ NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+ typedef TemplateArgumentLocContainerIterator<TemplateSpecializationTypeLoc>
+ ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
+
+ // FIXME: maybe don't rebuild if all the template arguments are the same.
+
+ QualType Result =
+ getDerived().RebuildTemplateSpecializationType(Template,
+ TL.getTemplateNameLoc(),
+ NewTemplateArgs);
+
+ if (!Result.isNull()) {
+ // Specializations of template template parameters are represented as
+ // TemplateSpecializationTypes, and substitution of type alias templates
+ // within a dependent context can transform them into
+ // DependentTemplateSpecializationTypes.
+ if (isa<DependentTemplateSpecializationType>(Result)) {
+ DependentTemplateSpecializationTypeLoc NewTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(SourceLocation());
+ NewTL.setQualifierLoc(NestedNameSpecifierLoc());
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
+ NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
+ return Result;
+ }
+
+ TemplateSpecializationTypeLoc NewTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
+ NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
+ }
+
+ return Result;
+}
+
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformDependentTemplateSpecializationType(
+ TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ TemplateName Template,
+ CXXScopeSpec &SS) {
+ TemplateArgumentListInfo NewTemplateArgs;
+ NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
+ NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+ typedef TemplateArgumentLocContainerIterator<
+ DependentTemplateSpecializationTypeLoc> ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
+
+ // FIXME: maybe don't rebuild if all the template arguments are the same.
+
+ if (DependentTemplateName *DTN = Template.getAsDependentTemplateName()) {
+ QualType Result
+ = getSema().Context.getDependentTemplateSpecializationType(
+ TL.getTypePtr()->getKeyword(),
+ DTN->getQualifier(),
+ DTN->getIdentifier(),
+ NewTemplateArgs);
+
+ DependentTemplateSpecializationTypeLoc NewTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(SS.getWithLocInContext(SemaRef.Context));
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
+ NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
+ return Result;
+ }
+
+ QualType Result
+ = getDerived().RebuildTemplateSpecializationType(Template,
+ TL.getTemplateNameLoc(),
+ NewTemplateArgs);
+
+ if (!Result.isNull()) {
+ /// FIXME: Wrap this in an elaborated-type-specifier?
+ TemplateSpecializationTypeLoc NewTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ NewTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NewTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NewTL.setLAngleLoc(TL.getLAngleLoc());
+ NewTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned i = 0, e = NewTemplateArgs.size(); i != e; ++i)
+ NewTL.setArgLocInfo(i, NewTemplateArgs[i].getLocInfo());
+ }
+
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformElaboratedType(TypeLocBuilder &TLB,
+ ElaboratedTypeLoc TL) {
+ const ElaboratedType *T = TL.getTypePtr();
+
+ NestedNameSpecifierLoc QualifierLoc;
+ // NOTE: the qualifier in an ElaboratedType is optional.
+ if (TL.getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
+ if (!QualifierLoc)
+ return QualType();
+ }
+
+ QualType NamedT = getDerived().TransformType(TLB, TL.getNamedTypeLoc());
+ if (NamedT.isNull())
+ return QualType();
+
+ // C++0x [dcl.type.elab]p2:
+ // If the identifier resolves to a typedef-name or the simple-template-id
+ // resolves to an alias template specialization, the
+ // elaborated-type-specifier is ill-formed.
+ if (T->getKeyword() != ETK_None && T->getKeyword() != ETK_Typename) {
+ if (const TemplateSpecializationType *TST =
+ NamedT->getAs<TemplateSpecializationType>()) {
+ TemplateName Template = TST->getTemplateName();
+ if (TypeAliasTemplateDecl *TAT =
+ dyn_cast_or_null<TypeAliasTemplateDecl>(Template.getAsTemplateDecl())) {
+ SemaRef.Diag(TL.getNamedTypeLoc().getBeginLoc(),
+ diag::err_tag_reference_non_tag) << 4;
+ SemaRef.Diag(TAT->getLocation(), diag::note_declared_at);
+ }
+ }
+ }
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ QualifierLoc != TL.getQualifierLoc() ||
+ NamedT != T->getNamedType()) {
+ Result = getDerived().RebuildElaboratedType(TL.getElaboratedKeywordLoc(),
+ T->getKeyword(),
+ QualifierLoc, NamedT);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformAttributedType(
+ TypeLocBuilder &TLB,
+ AttributedTypeLoc TL) {
+ const AttributedType *oldType = TL.getTypePtr();
+ QualType modifiedType = getDerived().TransformType(TLB, TL.getModifiedLoc());
+ if (modifiedType.isNull())
+ return QualType();
+
+ QualType result = TL.getType();
+
+ // FIXME: dependent operand expressions?
+ if (getDerived().AlwaysRebuild() ||
+ modifiedType != oldType->getModifiedType()) {
+ // TODO: this is really lame; we should really be rebuilding the
+ // equivalent type from first principles.
+ QualType equivalentType
+ = getDerived().TransformType(oldType->getEquivalentType());
+ if (equivalentType.isNull())
+ return QualType();
+ result = SemaRef.Context.getAttributedType(oldType->getAttrKind(),
+ modifiedType,
+ equivalentType);
+ }
+
+ AttributedTypeLoc newTL = TLB.push<AttributedTypeLoc>(result);
+ newTL.setAttrNameLoc(TL.getAttrNameLoc());
+ if (TL.hasAttrOperand())
+ newTL.setAttrOperandParensRange(TL.getAttrOperandParensRange());
+ if (TL.hasAttrExprOperand())
+ newTL.setAttrExprOperand(TL.getAttrExprOperand());
+ else if (TL.hasAttrEnumOperand())
+ newTL.setAttrEnumOperandLoc(TL.getAttrEnumOperandLoc());
+
+ return result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformParenType(TypeLocBuilder &TLB,
+ ParenTypeLoc TL) {
+ QualType Inner = getDerived().TransformType(TLB, TL.getInnerLoc());
+ if (Inner.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Inner != TL.getInnerLoc().getType()) {
+ Result = getDerived().RebuildParenType(Inner);
+ if (Result.isNull())
+ return QualType();
+ }
+
+ ParenTypeLoc NewTL = TLB.push<ParenTypeLoc>(Result);
+ NewTL.setLParenLoc(TL.getLParenLoc());
+ NewTL.setRParenLoc(TL.getRParenLoc());
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformDependentNameType(TypeLocBuilder &TLB,
+ DependentNameTypeLoc TL) {
+ const DependentNameType *T = TL.getTypePtr();
+
+ NestedNameSpecifierLoc QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
+ if (!QualifierLoc)
+ return QualType();
+
+ QualType Result
+ = getDerived().RebuildDependentNameType(T->getKeyword(),
+ TL.getElaboratedKeywordLoc(),
+ QualifierLoc,
+ T->getIdentifier(),
+ TL.getNameLoc());
+ if (Result.isNull())
+ return QualType();
+
+ if (const ElaboratedType* ElabT = Result->getAs<ElaboratedType>()) {
+ QualType NamedT = ElabT->getNamedType();
+ TLB.pushTypeSpec(NamedT).setNameLoc(TL.getNameLoc());
+
+ ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ } else {
+ DependentNameTypeLoc NewTL = TLB.push<DependentNameTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ NewTL.setNameLoc(TL.getNameLoc());
+ }
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::
+ TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL) {
+ NestedNameSpecifierLoc QualifierLoc;
+ if (TL.getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(TL.getQualifierLoc());
+ if (!QualifierLoc)
+ return QualType();
+ }
+
+ return getDerived()
+ .TransformDependentTemplateSpecializationType(TLB, TL, QualifierLoc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::
+TransformDependentTemplateSpecializationType(TypeLocBuilder &TLB,
+ DependentTemplateSpecializationTypeLoc TL,
+ NestedNameSpecifierLoc QualifierLoc) {
+ const DependentTemplateSpecializationType *T = TL.getTypePtr();
+
+ TemplateArgumentListInfo NewTemplateArgs;
+ NewTemplateArgs.setLAngleLoc(TL.getLAngleLoc());
+ NewTemplateArgs.setRAngleLoc(TL.getRAngleLoc());
+
+ typedef TemplateArgumentLocContainerIterator<
+ DependentTemplateSpecializationTypeLoc> ArgIterator;
+ if (getDerived().TransformTemplateArguments(ArgIterator(TL, 0),
+ ArgIterator(TL, TL.getNumArgs()),
+ NewTemplateArgs))
+ return QualType();
+
+ QualType Result
+ = getDerived().RebuildDependentTemplateSpecializationType(T->getKeyword(),
+ QualifierLoc,
+ T->getIdentifier(),
+ TL.getTemplateNameLoc(),
+ NewTemplateArgs);
+ if (Result.isNull())
+ return QualType();
+
+ if (const ElaboratedType *ElabT = dyn_cast<ElaboratedType>(Result)) {
+ QualType NamedT = ElabT->getNamedType();
+
+ // Copy information relevant to the template specialization.
+ TemplateSpecializationTypeLoc NamedTL
+ = TLB.push<TemplateSpecializationTypeLoc>(NamedT);
+ NamedTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ NamedTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ NamedTL.setLAngleLoc(TL.getLAngleLoc());
+ NamedTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
+ NamedTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
+
+ // Copy information relevant to the elaborated type.
+ ElaboratedTypeLoc NewTL = TLB.push<ElaboratedTypeLoc>(Result);
+ NewTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ NewTL.setQualifierLoc(QualifierLoc);
+ } else if (isa<DependentTemplateSpecializationType>(Result)) {
+ DependentTemplateSpecializationTypeLoc SpecTL
+ = TLB.push<DependentTemplateSpecializationTypeLoc>(Result);
+ SpecTL.setElaboratedKeywordLoc(TL.getElaboratedKeywordLoc());
+ SpecTL.setQualifierLoc(QualifierLoc);
+ SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ SpecTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ SpecTL.setLAngleLoc(TL.getLAngleLoc());
+ SpecTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
+ SpecTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
+ } else {
+ TemplateSpecializationTypeLoc SpecTL
+ = TLB.push<TemplateSpecializationTypeLoc>(Result);
+ SpecTL.setTemplateKeywordLoc(TL.getTemplateKeywordLoc());
+ SpecTL.setTemplateNameLoc(TL.getTemplateNameLoc());
+ SpecTL.setLAngleLoc(TL.getLAngleLoc());
+ SpecTL.setRAngleLoc(TL.getRAngleLoc());
+ for (unsigned I = 0, E = NewTemplateArgs.size(); I != E; ++I)
+ SpecTL.setArgLocInfo(I, NewTemplateArgs[I].getLocInfo());
+ }
+ return Result;
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::TransformPackExpansionType(TypeLocBuilder &TLB,
+ PackExpansionTypeLoc TL) {
+ QualType Pattern
+ = getDerived().TransformType(TLB, TL.getPatternLoc());
+ if (Pattern.isNull())
+ return QualType();
+
+ QualType Result = TL.getType();
+ if (getDerived().AlwaysRebuild() ||
+ Pattern != TL.getPatternLoc().getType()) {
+ Result = getDerived().RebuildPackExpansionType(Pattern,
+ TL.getPatternLoc().getSourceRange(),
+ TL.getEllipsisLoc(),
+ TL.getTypePtr()->getNumExpansions());
+ if (Result.isNull())
+ return QualType();
+ }
+
+ PackExpansionTypeLoc NewT = TLB.push<PackExpansionTypeLoc>(Result);
+ NewT.setEllipsisLoc(TL.getEllipsisLoc());
+ return Result;
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformObjCInterfaceType(TypeLocBuilder &TLB,
+ ObjCInterfaceTypeLoc TL) {
+ // ObjCInterfaceType is never dependent.
+ TLB.pushFullCopy(TL);
+ return TL.getType();
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformObjCObjectType(TypeLocBuilder &TLB,
+ ObjCObjectTypeLoc TL) {
+ // ObjCObjectType is never dependent.
+ TLB.pushFullCopy(TL);
+ return TL.getType();
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::TransformObjCObjectPointerType(TypeLocBuilder &TLB,
+ ObjCObjectPointerTypeLoc TL) {
+ // ObjCObjectPointerType is never dependent.
+ TLB.pushFullCopy(TL);
+ return TL.getType();
+}
+
+//===----------------------------------------------------------------------===//
+// Statement transformation
+//===----------------------------------------------------------------------===//
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformNullStmt(NullStmt *S) {
+ return SemaRef.Owned(S);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S) {
+ return getDerived().TransformCompoundStmt(S, false);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCompoundStmt(CompoundStmt *S,
+ bool IsStmtExpr) {
+ Sema::CompoundScopeRAII CompoundScope(getSema());
+
+ bool SubStmtInvalid = false;
+ bool SubStmtChanged = false;
+ ASTOwningVector<Stmt*> Statements(getSema());
+ for (CompoundStmt::body_iterator B = S->body_begin(), BEnd = S->body_end();
+ B != BEnd; ++B) {
+ StmtResult Result = getDerived().TransformStmt(*B);
+ if (Result.isInvalid()) {
+ // Immediately fail if this was a DeclStmt, since it's very
+ // likely that this will cause problems for future statements.
+ if (isa<DeclStmt>(*B))
+ return StmtError();
+
+ // Otherwise, just keep processing substatements and fail later.
+ SubStmtInvalid = true;
+ continue;
+ }
+
+ SubStmtChanged = SubStmtChanged || Result.get() != *B;
+ Statements.push_back(Result.takeAs<Stmt>());
+ }
+
+ if (SubStmtInvalid)
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ !SubStmtChanged)
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildCompoundStmt(S->getLBracLoc(),
+ move_arg(Statements),
+ S->getRBracLoc(),
+ IsStmtExpr);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCaseStmt(CaseStmt *S) {
+ ExprResult LHS, RHS;
+ {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef,
+ Sema::ConstantEvaluated);
+
+ // Transform the left-hand case value.
+ LHS = getDerived().TransformExpr(S->getLHS());
+ LHS = SemaRef.ActOnConstantExpression(LHS);
+ if (LHS.isInvalid())
+ return StmtError();
+
+ // Transform the right-hand case value (for the GNU case-range extension).
+ RHS = getDerived().TransformExpr(S->getRHS());
+ RHS = SemaRef.ActOnConstantExpression(RHS);
+ if (RHS.isInvalid())
+ return StmtError();
+ }
+
+ // Build the case statement.
+ // Case statements are always rebuilt so that they will attached to their
+ // transformed switch statement.
+ StmtResult Case = getDerived().RebuildCaseStmt(S->getCaseLoc(),
+ LHS.get(),
+ S->getEllipsisLoc(),
+ RHS.get(),
+ S->getColonLoc());
+ if (Case.isInvalid())
+ return StmtError();
+
+ // Transform the statement following the case
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ // Attach the body to the case statement
+ return getDerived().RebuildCaseStmtBody(Case.get(), SubStmt.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformDefaultStmt(DefaultStmt *S) {
+ // Transform the statement following the default case
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ // Default statements are always rebuilt
+ return getDerived().RebuildDefaultStmt(S->getDefaultLoc(), S->getColonLoc(),
+ SubStmt.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformLabelStmt(LabelStmt *S) {
+ StmtResult SubStmt = getDerived().TransformStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ Decl *LD = getDerived().TransformDecl(S->getDecl()->getLocation(),
+ S->getDecl());
+ if (!LD)
+ return StmtError();
+
+
+ // FIXME: Pass the real colon location in.
+ return getDerived().RebuildLabelStmt(S->getIdentLoc(),
+ cast<LabelDecl>(LD), SourceLocation(),
+ SubStmt.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformIfStmt(IfStmt *S) {
+ // Transform the condition
+ ExprResult Cond;
+ VarDecl *ConditionVar = 0;
+ if (S->getConditionVariable()) {
+ ConditionVar
+ = cast_or_null<VarDecl>(
+ getDerived().TransformDefinition(
+ S->getConditionVariable()->getLocation(),
+ S->getConditionVariable()));
+ if (!ConditionVar)
+ return StmtError();
+ } else {
+ Cond = getDerived().TransformExpr(S->getCond());
+
+ if (Cond.isInvalid())
+ return StmtError();
+
+ // Convert the condition to a boolean value.
+ if (S->getCond()) {
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getIfLoc(),
+ Cond.get());
+ if (CondE.isInvalid())
+ return StmtError();
+
+ Cond = CondE.get();
+ }
+ }
+
+ Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.take()));
+ if (!S->getConditionVariable() && S->getCond() && !FullCond.get())
+ return StmtError();
+
+ // Transform the "then" branch.
+ StmtResult Then = getDerived().TransformStmt(S->getThen());
+ if (Then.isInvalid())
+ return StmtError();
+
+ // Transform the "else" branch.
+ StmtResult Else = getDerived().TransformStmt(S->getElse());
+ if (Else.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ FullCond.get() == S->getCond() &&
+ ConditionVar == S->getConditionVariable() &&
+ Then.get() == S->getThen() &&
+ Else.get() == S->getElse())
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildIfStmt(S->getIfLoc(), FullCond, ConditionVar,
+ Then.get(),
+ S->getElseLoc(), Else.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformSwitchStmt(SwitchStmt *S) {
+ // Transform the condition.
+ ExprResult Cond;
+ VarDecl *ConditionVar = 0;
+ if (S->getConditionVariable()) {
+ ConditionVar
+ = cast_or_null<VarDecl>(
+ getDerived().TransformDefinition(
+ S->getConditionVariable()->getLocation(),
+ S->getConditionVariable()));
+ if (!ConditionVar)
+ return StmtError();
+ } else {
+ Cond = getDerived().TransformExpr(S->getCond());
+
+ if (Cond.isInvalid())
+ return StmtError();
+ }
+
+ // Rebuild the switch statement.
+ StmtResult Switch
+ = getDerived().RebuildSwitchStmtStart(S->getSwitchLoc(), Cond.get(),
+ ConditionVar);
+ if (Switch.isInvalid())
+ return StmtError();
+
+ // Transform the body of the switch statement.
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // Complete the switch statement.
+ return getDerived().RebuildSwitchStmtBody(S->getSwitchLoc(), Switch.get(),
+ Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformWhileStmt(WhileStmt *S) {
+ // Transform the condition
+ ExprResult Cond;
+ VarDecl *ConditionVar = 0;
+ if (S->getConditionVariable()) {
+ ConditionVar
+ = cast_or_null<VarDecl>(
+ getDerived().TransformDefinition(
+ S->getConditionVariable()->getLocation(),
+ S->getConditionVariable()));
+ if (!ConditionVar)
+ return StmtError();
+ } else {
+ Cond = getDerived().TransformExpr(S->getCond());
+
+ if (Cond.isInvalid())
+ return StmtError();
+
+ if (S->getCond()) {
+ // Convert the condition to a boolean value.
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getWhileLoc(),
+ Cond.get());
+ if (CondE.isInvalid())
+ return StmtError();
+ Cond = CondE;
+ }
+ }
+
+ Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.take()));
+ if (!S->getConditionVariable() && S->getCond() && !FullCond.get())
+ return StmtError();
+
+ // Transform the body
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ FullCond.get() == S->getCond() &&
+ ConditionVar == S->getConditionVariable() &&
+ Body.get() == S->getBody())
+ return Owned(S);
+
+ return getDerived().RebuildWhileStmt(S->getWhileLoc(), FullCond,
+ ConditionVar, Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformDoStmt(DoStmt *S) {
+ // Transform the body
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // Transform the condition
+ ExprResult Cond = getDerived().TransformExpr(S->getCond());
+ if (Cond.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Cond.get() == S->getCond() &&
+ Body.get() == S->getBody())
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildDoStmt(S->getDoLoc(), Body.get(), S->getWhileLoc(),
+ /*FIXME:*/S->getWhileLoc(), Cond.get(),
+ S->getRParenLoc());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformForStmt(ForStmt *S) {
+ // Transform the initialization statement
+ StmtResult Init = getDerived().TransformStmt(S->getInit());
+ if (Init.isInvalid())
+ return StmtError();
+
+ // Transform the condition
+ ExprResult Cond;
+ VarDecl *ConditionVar = 0;
+ if (S->getConditionVariable()) {
+ ConditionVar
+ = cast_or_null<VarDecl>(
+ getDerived().TransformDefinition(
+ S->getConditionVariable()->getLocation(),
+ S->getConditionVariable()));
+ if (!ConditionVar)
+ return StmtError();
+ } else {
+ Cond = getDerived().TransformExpr(S->getCond());
+
+ if (Cond.isInvalid())
+ return StmtError();
+
+ if (S->getCond()) {
+ // Convert the condition to a boolean value.
+ ExprResult CondE = getSema().ActOnBooleanCondition(0, S->getForLoc(),
+ Cond.get());
+ if (CondE.isInvalid())
+ return StmtError();
+
+ Cond = CondE.get();
+ }
+ }
+
+ Sema::FullExprArg FullCond(getSema().MakeFullExpr(Cond.take()));
+ if (!S->getConditionVariable() && S->getCond() && !FullCond.get())
+ return StmtError();
+
+ // Transform the increment
+ ExprResult Inc = getDerived().TransformExpr(S->getInc());
+ if (Inc.isInvalid())
+ return StmtError();
+
+ Sema::FullExprArg FullInc(getSema().MakeFullExpr(Inc.get()));
+ if (S->getInc() && !FullInc.get())
+ return StmtError();
+
+ // Transform the body
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Init.get() == S->getInit() &&
+ FullCond.get() == S->getCond() &&
+ Inc.get() == S->getInc() &&
+ Body.get() == S->getBody())
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildForStmt(S->getForLoc(), S->getLParenLoc(),
+ Init.get(), FullCond, ConditionVar,
+ FullInc, S->getRParenLoc(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformGotoStmt(GotoStmt *S) {
+ Decl *LD = getDerived().TransformDecl(S->getLabel()->getLocation(),
+ S->getLabel());
+ if (!LD)
+ return StmtError();
+
+ // Goto statements must always be rebuilt, to resolve the label.
+ return getDerived().RebuildGotoStmt(S->getGotoLoc(), S->getLabelLoc(),
+ cast<LabelDecl>(LD));
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformIndirectGotoStmt(IndirectGotoStmt *S) {
+ ExprResult Target = getDerived().TransformExpr(S->getTarget());
+ if (Target.isInvalid())
+ return StmtError();
+ Target = SemaRef.MaybeCreateExprWithCleanups(Target.take());
+
+ if (!getDerived().AlwaysRebuild() &&
+ Target.get() == S->getTarget())
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildIndirectGotoStmt(S->getGotoLoc(), S->getStarLoc(),
+ Target.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformContinueStmt(ContinueStmt *S) {
+ return SemaRef.Owned(S);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformBreakStmt(BreakStmt *S) {
+ return SemaRef.Owned(S);
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformReturnStmt(ReturnStmt *S) {
+ ExprResult Result = getDerived().TransformExpr(S->getRetValue());
+ if (Result.isInvalid())
+ return StmtError();
+
+ // FIXME: We always rebuild the return statement because there is no way
+ // to tell whether the return type of the function has changed.
+ return getDerived().RebuildReturnStmt(S->getReturnLoc(), Result.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformDeclStmt(DeclStmt *S) {
+ bool DeclChanged = false;
+ SmallVector<Decl *, 4> Decls;
+ for (DeclStmt::decl_iterator D = S->decl_begin(), DEnd = S->decl_end();
+ D != DEnd; ++D) {
+ Decl *Transformed = getDerived().TransformDefinition((*D)->getLocation(),
+ *D);
+ if (!Transformed)
+ return StmtError();
+
+ if (Transformed != *D)
+ DeclChanged = true;
+
+ Decls.push_back(Transformed);
+ }
+
+ if (!getDerived().AlwaysRebuild() && !DeclChanged)
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildDeclStmt(Decls.data(), Decls.size(),
+ S->getStartLoc(), S->getEndLoc());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformAsmStmt(AsmStmt *S) {
+
+ ASTOwningVector<Expr*> Constraints(getSema());
+ ASTOwningVector<Expr*> Exprs(getSema());
+ SmallVector<IdentifierInfo *, 4> Names;
+
+ ExprResult AsmString;
+ ASTOwningVector<Expr*> Clobbers(getSema());
+
+ bool ExprsChanged = false;
+
+ // Go through the outputs.
+ for (unsigned I = 0, E = S->getNumOutputs(); I != E; ++I) {
+ Names.push_back(S->getOutputIdentifier(I));
+
+ // No need to transform the constraint literal.
+ Constraints.push_back(S->getOutputConstraintLiteral(I));
+
+ // Transform the output expr.
+ Expr *OutputExpr = S->getOutputExpr(I);
+ ExprResult Result = getDerived().TransformExpr(OutputExpr);
+ if (Result.isInvalid())
+ return StmtError();
+
+ ExprsChanged |= Result.get() != OutputExpr;
+
+ Exprs.push_back(Result.get());
+ }
+
+ // Go through the inputs.
+ for (unsigned I = 0, E = S->getNumInputs(); I != E; ++I) {
+ Names.push_back(S->getInputIdentifier(I));
+
+ // No need to transform the constraint literal.
+ Constraints.push_back(S->getInputConstraintLiteral(I));
+
+ // Transform the input expr.
+ Expr *InputExpr = S->getInputExpr(I);
+ ExprResult Result = getDerived().TransformExpr(InputExpr);
+ if (Result.isInvalid())
+ return StmtError();
+
+ ExprsChanged |= Result.get() != InputExpr;
+
+ Exprs.push_back(Result.get());
+ }
+
+ if (!getDerived().AlwaysRebuild() && !ExprsChanged)
+ return SemaRef.Owned(S);
+
+ // Go through the clobbers.
+ for (unsigned I = 0, E = S->getNumClobbers(); I != E; ++I)
+ Clobbers.push_back(S->getClobber(I));
+
+ // No need to transform the asm string literal.
+ AsmString = SemaRef.Owned(S->getAsmString());
+
+ return getDerived().RebuildAsmStmt(S->getAsmLoc(),
+ S->isSimple(),
+ S->isVolatile(),
+ S->getNumOutputs(),
+ S->getNumInputs(),
+ Names.data(),
+ move_arg(Constraints),
+ move_arg(Exprs),
+ AsmString.get(),
+ move_arg(Clobbers),
+ S->getRParenLoc(),
+ S->isMSAsm());
+}
+
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtTryStmt(ObjCAtTryStmt *S) {
+ // Transform the body of the @try.
+ StmtResult TryBody = getDerived().TransformStmt(S->getTryBody());
+ if (TryBody.isInvalid())
+ return StmtError();
+
+ // Transform the @catch statements (if present).
+ bool AnyCatchChanged = false;
+ ASTOwningVector<Stmt*> CatchStmts(SemaRef);
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I) {
+ StmtResult Catch = getDerived().TransformStmt(S->getCatchStmt(I));
+ if (Catch.isInvalid())
+ return StmtError();
+ if (Catch.get() != S->getCatchStmt(I))
+ AnyCatchChanged = true;
+ CatchStmts.push_back(Catch.release());
+ }
+
+ // Transform the @finally statement (if present).
+ StmtResult Finally;
+ if (S->getFinallyStmt()) {
+ Finally = getDerived().TransformStmt(S->getFinallyStmt());
+ if (Finally.isInvalid())
+ return StmtError();
+ }
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ TryBody.get() == S->getTryBody() &&
+ !AnyCatchChanged &&
+ Finally.get() == S->getFinallyStmt())
+ return SemaRef.Owned(S);
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAtTryStmt(S->getAtTryLoc(), TryBody.get(),
+ move_arg(CatchStmts), Finally.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ // Transform the @catch parameter, if there is one.
+ VarDecl *Var = 0;
+ if (VarDecl *FromVar = S->getCatchParamDecl()) {
+ TypeSourceInfo *TSInfo = 0;
+ if (FromVar->getTypeSourceInfo()) {
+ TSInfo = getDerived().TransformType(FromVar->getTypeSourceInfo());
+ if (!TSInfo)
+ return StmtError();
+ }
+
+ QualType T;
+ if (TSInfo)
+ T = TSInfo->getType();
+ else {
+ T = getDerived().TransformType(FromVar->getType());
+ if (T.isNull())
+ return StmtError();
+ }
+
+ Var = getDerived().RebuildObjCExceptionDecl(FromVar, TSInfo, T);
+ if (!Var)
+ return StmtError();
+ }
+
+ StmtResult Body = getDerived().TransformStmt(S->getCatchBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ return getDerived().RebuildObjCAtCatchStmt(S->getAtCatchLoc(),
+ S->getRParenLoc(),
+ Var, Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getFinallyBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Body.get() == S->getFinallyBody())
+ return SemaRef.Owned(S);
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAtFinallyStmt(S->getAtFinallyLoc(),
+ Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ ExprResult Operand;
+ if (S->getThrowExpr()) {
+ Operand = getDerived().TransformExpr(S->getThrowExpr());
+ if (Operand.isInvalid())
+ return StmtError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Operand.get() == S->getThrowExpr())
+ return getSema().Owned(S);
+
+ return getDerived().RebuildObjCAtThrowStmt(S->getThrowLoc(), Operand.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAtSynchronizedStmt(
+ ObjCAtSynchronizedStmt *S) {
+ // Transform the object we are locking.
+ ExprResult Object = getDerived().TransformExpr(S->getSynchExpr());
+ if (Object.isInvalid())
+ return StmtError();
+ Object =
+ getDerived().RebuildObjCAtSynchronizedOperand(S->getAtSynchronizedLoc(),
+ Object.get());
+ if (Object.isInvalid())
+ return StmtError();
+
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getSynchBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing change, just retain the current statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Object.get() == S->getSynchExpr() &&
+ Body.get() == S->getSynchBody())
+ return SemaRef.Owned(S);
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAtSynchronizedStmt(S->getAtSynchronizedLoc(),
+ Object.get(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCAutoreleasePoolStmt(
+ ObjCAutoreleasePoolStmt *S) {
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getSubStmt());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Body.get() == S->getSubStmt())
+ return SemaRef.Owned(S);
+
+ // Build a new statement.
+ return getDerived().RebuildObjCAutoreleasePoolStmt(
+ S->getAtLoc(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformObjCForCollectionStmt(
+ ObjCForCollectionStmt *S) {
+ // Transform the element statement.
+ StmtResult Element = getDerived().TransformStmt(S->getElement());
+ if (Element.isInvalid())
+ return StmtError();
+
+ // Transform the collection expression.
+ ExprResult Collection = getDerived().TransformExpr(S->getCollection());
+ if (Collection.isInvalid())
+ return StmtError();
+ Collection = getDerived().RebuildObjCForCollectionOperand(S->getForLoc(),
+ Collection.take());
+ if (Collection.isInvalid())
+ return StmtError();
+
+ // Transform the body.
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // If nothing changed, just retain this statement.
+ if (!getDerived().AlwaysRebuild() &&
+ Element.get() == S->getElement() &&
+ Collection.get() == S->getCollection() &&
+ Body.get() == S->getBody())
+ return SemaRef.Owned(S);
+
+ // Build a new statement.
+ return getDerived().RebuildObjCForCollectionStmt(S->getForLoc(),
+ /*FIXME:*/S->getForLoc(),
+ Element.get(),
+ Collection.get(),
+ S->getRParenLoc(),
+ Body.get());
+}
+
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCXXCatchStmt(CXXCatchStmt *S) {
+ // Transform the exception declaration, if any.
+ VarDecl *Var = 0;
+ if (S->getExceptionDecl()) {
+ VarDecl *ExceptionDecl = S->getExceptionDecl();
+ TypeSourceInfo *T = getDerived().TransformType(
+ ExceptionDecl->getTypeSourceInfo());
+ if (!T)
+ return StmtError();
+
+ Var = getDerived().RebuildExceptionDecl(ExceptionDecl, T,
+ ExceptionDecl->getInnerLocStart(),
+ ExceptionDecl->getLocation(),
+ ExceptionDecl->getIdentifier());
+ if (!Var || Var->isInvalidDecl())
+ return StmtError();
+ }
+
+ // Transform the actual exception handler.
+ StmtResult Handler = getDerived().TransformStmt(S->getHandlerBlock());
+ if (Handler.isInvalid())
+ return StmtError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ !Var &&
+ Handler.get() == S->getHandlerBlock())
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildCXXCatchStmt(S->getCatchLoc(),
+ Var,
+ Handler.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCXXTryStmt(CXXTryStmt *S) {
+ // Transform the try block itself.
+ StmtResult TryBlock
+ = getDerived().TransformCompoundStmt(S->getTryBlock());
+ if (TryBlock.isInvalid())
+ return StmtError();
+
+ // Transform the handlers.
+ bool HandlerChanged = false;
+ ASTOwningVector<Stmt*> Handlers(SemaRef);
+ for (unsigned I = 0, N = S->getNumHandlers(); I != N; ++I) {
+ StmtResult Handler
+ = getDerived().TransformCXXCatchStmt(S->getHandler(I));
+ if (Handler.isInvalid())
+ return StmtError();
+
+ HandlerChanged = HandlerChanged || Handler.get() != S->getHandler(I);
+ Handlers.push_back(Handler.takeAs<Stmt>());
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ TryBlock.get() == S->getTryBlock() &&
+ !HandlerChanged)
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildCXXTryStmt(S->getTryLoc(), TryBlock.get(),
+ move_arg(Handlers));
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformCXXForRangeStmt(CXXForRangeStmt *S) {
+ StmtResult Range = getDerived().TransformStmt(S->getRangeStmt());
+ if (Range.isInvalid())
+ return StmtError();
+
+ StmtResult BeginEnd = getDerived().TransformStmt(S->getBeginEndStmt());
+ if (BeginEnd.isInvalid())
+ return StmtError();
+
+ ExprResult Cond = getDerived().TransformExpr(S->getCond());
+ if (Cond.isInvalid())
+ return StmtError();
+ if (Cond.get())
+ Cond = SemaRef.CheckBooleanCondition(Cond.take(), S->getColonLoc());
+ if (Cond.isInvalid())
+ return StmtError();
+ if (Cond.get())
+ Cond = SemaRef.MaybeCreateExprWithCleanups(Cond.take());
+
+ ExprResult Inc = getDerived().TransformExpr(S->getInc());
+ if (Inc.isInvalid())
+ return StmtError();
+ if (Inc.get())
+ Inc = SemaRef.MaybeCreateExprWithCleanups(Inc.take());
+
+ StmtResult LoopVar = getDerived().TransformStmt(S->getLoopVarStmt());
+ if (LoopVar.isInvalid())
+ return StmtError();
+
+ StmtResult NewStmt = S;
+ if (getDerived().AlwaysRebuild() ||
+ Range.get() != S->getRangeStmt() ||
+ BeginEnd.get() != S->getBeginEndStmt() ||
+ Cond.get() != S->getCond() ||
+ Inc.get() != S->getInc() ||
+ LoopVar.get() != S->getLoopVarStmt())
+ NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
+ S->getColonLoc(), Range.get(),
+ BeginEnd.get(), Cond.get(),
+ Inc.get(), LoopVar.get(),
+ S->getRParenLoc());
+
+ StmtResult Body = getDerived().TransformStmt(S->getBody());
+ if (Body.isInvalid())
+ return StmtError();
+
+ // Body has changed but we didn't rebuild the for-range statement. Rebuild
+ // it now so we have a new statement to attach the body to.
+ if (Body.get() != S->getBody() && NewStmt.get() == S)
+ NewStmt = getDerived().RebuildCXXForRangeStmt(S->getForLoc(),
+ S->getColonLoc(), Range.get(),
+ BeginEnd.get(), Cond.get(),
+ Inc.get(), LoopVar.get(),
+ S->getRParenLoc());
+
+ if (NewStmt.get() == S)
+ return SemaRef.Owned(S);
+
+ return FinishCXXForRangeStmt(NewStmt.get(), Body.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformMSDependentExistsStmt(
+ MSDependentExistsStmt *S) {
+ // Transform the nested-name-specifier, if any.
+ NestedNameSpecifierLoc QualifierLoc;
+ if (S->getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(S->getQualifierLoc());
+ if (!QualifierLoc)
+ return StmtError();
+ }
+
+ // Transform the declaration name.
+ DeclarationNameInfo NameInfo = S->getNameInfo();
+ if (NameInfo.getName()) {
+ NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo);
+ if (!NameInfo.getName())
+ return StmtError();
+ }
+
+ // Check whether anything changed.
+ if (!getDerived().AlwaysRebuild() &&
+ QualifierLoc == S->getQualifierLoc() &&
+ NameInfo.getName() == S->getNameInfo().getName())
+ return S;
+
+ // Determine whether this name exists, if we can.
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+ bool Dependent = false;
+ switch (getSema().CheckMicrosoftIfExistsSymbol(/*S=*/0, SS, NameInfo)) {
+ case Sema::IER_Exists:
+ if (S->isIfExists())
+ break;
+
+ return new (getSema().Context) NullStmt(S->getKeywordLoc());
+
+ case Sema::IER_DoesNotExist:
+ if (S->isIfNotExists())
+ break;
+
+ return new (getSema().Context) NullStmt(S->getKeywordLoc());
+
+ case Sema::IER_Dependent:
+ Dependent = true;
+ break;
+
+ case Sema::IER_Error:
+ return StmtError();
+ }
+
+ // We need to continue with the instantiation, so do so now.
+ StmtResult SubStmt = getDerived().TransformCompoundStmt(S->getSubStmt());
+ if (SubStmt.isInvalid())
+ return StmtError();
+
+ // If we have resolved the name, just transform to the substatement.
+ if (!Dependent)
+ return SubStmt;
+
+ // The name is still dependent, so build a dependent expression again.
+ return getDerived().RebuildMSDependentExistsStmt(S->getKeywordLoc(),
+ S->isIfExists(),
+ QualifierLoc,
+ NameInfo,
+ SubStmt.get());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformSEHTryStmt(SEHTryStmt *S) {
+ StmtResult TryBlock; // = getDerived().TransformCompoundStmt(S->getTryBlock());
+ if(TryBlock.isInvalid()) return StmtError();
+
+ StmtResult Handler = getDerived().TransformSEHHandler(S->getHandler());
+ if(!getDerived().AlwaysRebuild() &&
+ TryBlock.get() == S->getTryBlock() &&
+ Handler.get() == S->getHandler())
+ return SemaRef.Owned(S);
+
+ return getDerived().RebuildSEHTryStmt(S->getIsCXXTry(),
+ S->getTryLoc(),
+ TryBlock.take(),
+ Handler.take());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformSEHFinallyStmt(SEHFinallyStmt *S) {
+ StmtResult Block; // = getDerived().TransformCompoundStatement(S->getBlock());
+ if(Block.isInvalid()) return StmtError();
+
+ return getDerived().RebuildSEHFinallyStmt(S->getFinallyLoc(),
+ Block.take());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformSEHExceptStmt(SEHExceptStmt *S) {
+ ExprResult FilterExpr = getDerived().TransformExpr(S->getFilterExpr());
+ if(FilterExpr.isInvalid()) return StmtError();
+
+ StmtResult Block; // = getDerived().TransformCompoundStatement(S->getBlock());
+ if(Block.isInvalid()) return StmtError();
+
+ return getDerived().RebuildSEHExceptStmt(S->getExceptLoc(),
+ FilterExpr.take(),
+ Block.take());
+}
+
+template<typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformSEHHandler(Stmt *Handler) {
+ if(isa<SEHFinallyStmt>(Handler))
+ return getDerived().TransformSEHFinallyStmt(cast<SEHFinallyStmt>(Handler));
+ else
+ return getDerived().TransformSEHExceptStmt(cast<SEHExceptStmt>(Handler));
+}
+
+//===----------------------------------------------------------------------===//
+// Expression transformation
+//===----------------------------------------------------------------------===//
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformPredefinedExpr(PredefinedExpr *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformDeclRefExpr(DeclRefExpr *E) {
+ NestedNameSpecifierLoc QualifierLoc;
+ if (E->getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+ }
+
+ ValueDecl *ND
+ = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getLocation(),
+ E->getDecl()));
+ if (!ND)
+ return ExprError();
+
+ DeclarationNameInfo NameInfo = E->getNameInfo();
+ if (NameInfo.getName()) {
+ NameInfo = getDerived().TransformDeclarationNameInfo(NameInfo);
+ if (!NameInfo.getName())
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ QualifierLoc == E->getQualifierLoc() &&
+ ND == E->getDecl() &&
+ NameInfo.getName() == E->getDecl()->getDeclName() &&
+ !E->hasExplicitTemplateArgs()) {
+
+ // Mark it referenced in the new context regardless.
+ // FIXME: this is a bit instantiation-specific.
+ SemaRef.MarkDeclRefReferenced(E);
+
+ return SemaRef.Owned(E);
+ }
+
+ TemplateArgumentListInfo TransArgs, *TemplateArgs = 0;
+ if (E->hasExplicitTemplateArgs()) {
+ TemplateArgs = &TransArgs;
+ TransArgs.setLAngleLoc(E->getLAngleLoc());
+ TransArgs.setRAngleLoc(E->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+ }
+
+ return getDerived().RebuildDeclRefExpr(QualifierLoc, ND, NameInfo,
+ TemplateArgs);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformIntegerLiteral(IntegerLiteral *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformFloatingLiteral(FloatingLiteral *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformImaginaryLiteral(ImaginaryLiteral *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformStringLiteral(StringLiteral *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCharacterLiteral(CharacterLiteral *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUserDefinedLiteral(UserDefinedLiteral *E) {
+ return SemaRef.MaybeBindToTemporary(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformGenericSelectionExpr(GenericSelectionExpr *E) {
+ ExprResult ControllingExpr =
+ getDerived().TransformExpr(E->getControllingExpr());
+ if (ControllingExpr.isInvalid())
+ return ExprError();
+
+ SmallVector<Expr *, 4> AssocExprs;
+ SmallVector<TypeSourceInfo *, 4> AssocTypes;
+ for (unsigned i = 0; i != E->getNumAssocs(); ++i) {
+ TypeSourceInfo *TS = E->getAssocTypeSourceInfo(i);
+ if (TS) {
+ TypeSourceInfo *AssocType = getDerived().TransformType(TS);
+ if (!AssocType)
+ return ExprError();
+ AssocTypes.push_back(AssocType);
+ } else {
+ AssocTypes.push_back(0);
+ }
+
+ ExprResult AssocExpr = getDerived().TransformExpr(E->getAssocExpr(i));
+ if (AssocExpr.isInvalid())
+ return ExprError();
+ AssocExprs.push_back(AssocExpr.release());
+ }
+
+ return getDerived().RebuildGenericSelectionExpr(E->getGenericLoc(),
+ E->getDefaultLoc(),
+ E->getRParenLoc(),
+ ControllingExpr.release(),
+ AssocTypes.data(),
+ AssocExprs.data(),
+ E->getNumAssocs());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformParenExpr(ParenExpr *E) {
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildParenExpr(SubExpr.get(), E->getLParen(),
+ E->getRParen());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnaryOperator(UnaryOperator *E) {
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildUnaryOperator(E->getOperatorLoc(),
+ E->getOpcode(),
+ SubExpr.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOffsetOfExpr(OffsetOfExpr *E) {
+ // Transform the type.
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!Type)
+ return ExprError();
+
+ // Transform all of the components into components similar to what the
+ // parser uses.
+ // FIXME: It would be slightly more efficient in the non-dependent case to
+ // just map FieldDecls, rather than requiring the rebuilder to look for
+ // the fields again. However, __builtin_offsetof is rare enough in
+ // template code that we don't care.
+ bool ExprChanged = false;
+ typedef Sema::OffsetOfComponent Component;
+ typedef OffsetOfExpr::OffsetOfNode Node;
+ SmallVector<Component, 4> Components;
+ for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
+ const Node &ON = E->getComponent(I);
+ Component Comp;
+ Comp.isBrackets = true;
+ Comp.LocStart = ON.getSourceRange().getBegin();
+ Comp.LocEnd = ON.getSourceRange().getEnd();
+ switch (ON.getKind()) {
+ case Node::Array: {
+ Expr *FromIndex = E->getIndexExpr(ON.getArrayExprIndex());
+ ExprResult Index = getDerived().TransformExpr(FromIndex);
+ if (Index.isInvalid())
+ return ExprError();
+
+ ExprChanged = ExprChanged || Index.get() != FromIndex;
+ Comp.isBrackets = true;
+ Comp.U.E = Index.get();
+ break;
+ }
+
+ case Node::Field:
+ case Node::Identifier:
+ Comp.isBrackets = false;
+ Comp.U.IdentInfo = ON.getFieldName();
+ if (!Comp.U.IdentInfo)
+ continue;
+
+ break;
+
+ case Node::Base:
+ // Will be recomputed during the rebuild.
+ continue;
+ }
+
+ Components.push_back(Comp);
+ }
+
+ // If nothing changed, retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeSourceInfo() &&
+ !ExprChanged)
+ return SemaRef.Owned(E);
+
+ // Build a new offsetof expression.
+ return getDerived().RebuildOffsetOfExpr(E->getOperatorLoc(), Type,
+ Components.data(), Components.size(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformOpaqueValueExpr(OpaqueValueExpr *E) {
+ assert(getDerived().AlreadyTransformed(E->getType()) &&
+ "opaque value expression requires transformation");
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
+ // Rebuild the syntactic form. The original syntactic form has
+ // opaque-value expressions in it, so strip those away and rebuild
+ // the result. This is a really awful way of doing this, but the
+ // better solution (rebuilding the semantic expressions and
+ // rebinding OVEs as necessary) doesn't work; we'd need
+ // TreeTransform to not strip away implicit conversions.
+ Expr *newSyntacticForm = SemaRef.recreateSyntacticForm(E);
+ ExprResult result = getDerived().TransformExpr(newSyntacticForm);
+ if (result.isInvalid()) return ExprError();
+
+ // If that gives us a pseudo-object result back, the pseudo-object
+ // expression must have been an lvalue-to-rvalue conversion which we
+ // should reapply.
+ if (result.get()->hasPlaceholderType(BuiltinType::PseudoObject))
+ result = SemaRef.checkPseudoObjectRValue(result.take());
+
+ return result;
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnaryExprOrTypeTraitExpr(
+ UnaryExprOrTypeTraitExpr *E) {
+ if (E->isArgumentType()) {
+ TypeSourceInfo *OldT = E->getArgumentTypeInfo();
+
+ TypeSourceInfo *NewT = getDerived().TransformType(OldT);
+ if (!NewT)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && OldT == NewT)
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildUnaryExprOrTypeTrait(NewT, E->getOperatorLoc(),
+ E->getKind(),
+ E->getSourceRange());
+ }
+
+ // C++0x [expr.sizeof]p1:
+ // The operand is either an expression, which is an unevaluated operand
+ // [...]
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+
+ ExprResult SubExpr = getDerived().TransformExpr(E->getArgumentExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getArgumentExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildUnaryExprOrTypeTrait(SubExpr.get(),
+ E->getOperatorLoc(),
+ E->getKind(),
+ E->getSourceRange());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformArraySubscriptExpr(ArraySubscriptExpr *E) {
+ ExprResult LHS = getDerived().TransformExpr(E->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+
+ if (!getDerived().AlwaysRebuild() &&
+ LHS.get() == E->getLHS() &&
+ RHS.get() == E->getRHS())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildArraySubscriptExpr(LHS.get(),
+ /*FIXME:*/E->getLHS()->getLocStart(),
+ RHS.get(),
+ E->getRBracketLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCallExpr(CallExpr *E) {
+ // Transform the callee.
+ ExprResult Callee = getDerived().TransformExpr(E->getCallee());
+ if (Callee.isInvalid())
+ return ExprError();
+
+ // Transform arguments.
+ bool ArgChanged = false;
+ ASTOwningVector<Expr*> Args(SemaRef);
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Callee.get() == E->getCallee() &&
+ !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);;
+
+ // FIXME: Wrong source location information for the '('.
+ SourceLocation FakeLParenLoc
+ = ((Expr *)Callee.get())->getSourceRange().getBegin();
+ return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
+ move_arg(Args),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (E->hasQualifier()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
+
+ if (!QualifierLoc)
+ return ExprError();
+ }
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
+
+ ValueDecl *Member
+ = cast_or_null<ValueDecl>(getDerived().TransformDecl(E->getMemberLoc(),
+ E->getMemberDecl()));
+ if (!Member)
+ return ExprError();
+
+ NamedDecl *FoundDecl = E->getFoundDecl();
+ if (FoundDecl == E->getMemberDecl()) {
+ FoundDecl = Member;
+ } else {
+ FoundDecl = cast_or_null<NamedDecl>(
+ getDerived().TransformDecl(E->getMemberLoc(), FoundDecl));
+ if (!FoundDecl)
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase() &&
+ QualifierLoc == E->getQualifierLoc() &&
+ Member == E->getMemberDecl() &&
+ FoundDecl == E->getFoundDecl() &&
+ !E->hasExplicitTemplateArgs()) {
+
+ // Mark it referenced in the new context regardless.
+ // FIXME: this is a bit instantiation-specific.
+ SemaRef.MarkMemberReferenced(E);
+
+ return SemaRef.Owned(E);
+ }
+
+ TemplateArgumentListInfo TransArgs;
+ if (E->hasExplicitTemplateArgs()) {
+ TransArgs.setLAngleLoc(E->getLAngleLoc());
+ TransArgs.setRAngleLoc(E->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+ }
+
+ // FIXME: Bogus source location for the operator
+ SourceLocation FakeOperatorLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getBase()->getSourceRange().getEnd());
+
+ // FIXME: to do this check properly, we will need to preserve the
+ // first-qualifier-in-scope here, just in case we had a dependent
+ // base (and therefore couldn't do the check) and a
+ // nested-name-qualifier (and therefore could do the lookup).
+ NamedDecl *FirstQualifierInScope = 0;
+
+ return getDerived().RebuildMemberExpr(Base.get(), FakeOperatorLoc,
+ E->isArrow(),
+ QualifierLoc,
+ TemplateKWLoc,
+ E->getMemberNameInfo(),
+ Member,
+ FoundDecl,
+ (E->hasExplicitTemplateArgs()
+ ? &TransArgs : 0),
+ FirstQualifierInScope);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformBinaryOperator(BinaryOperator *E) {
+ ExprResult LHS = getDerived().TransformExpr(E->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ LHS.get() == E->getLHS() &&
+ RHS.get() == E->getRHS())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildBinaryOperator(E->getOperatorLoc(), E->getOpcode(),
+ LHS.get(), RHS.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCompoundAssignOperator(
+ CompoundAssignOperator *E) {
+ return getDerived().TransformBinaryOperator(E);
+}
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformBinaryConditionalOperator(BinaryConditionalOperator *e) {
+ // Just rebuild the common and RHS expressions and see whether we
+ // get any changes.
+
+ ExprResult commonExpr = getDerived().TransformExpr(e->getCommon());
+ if (commonExpr.isInvalid())
+ return ExprError();
+
+ ExprResult rhs = getDerived().TransformExpr(e->getFalseExpr());
+ if (rhs.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ commonExpr.get() == e->getCommon() &&
+ rhs.get() == e->getFalseExpr())
+ return SemaRef.Owned(e);
+
+ return getDerived().RebuildConditionalOperator(commonExpr.take(),
+ e->getQuestionLoc(),
+ 0,
+ e->getColonLoc(),
+ rhs.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformConditionalOperator(ConditionalOperator *E) {
+ ExprResult Cond = getDerived().TransformExpr(E->getCond());
+ if (Cond.isInvalid())
+ return ExprError();
+
+ ExprResult LHS = getDerived().TransformExpr(E->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Cond.get() == E->getCond() &&
+ LHS.get() == E->getLHS() &&
+ RHS.get() == E->getRHS())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildConditionalOperator(Cond.get(),
+ E->getQuestionLoc(),
+ LHS.get(),
+ E->getColonLoc(),
+ RHS.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformImplicitCastExpr(ImplicitCastExpr *E) {
+ // Implicit casts are eliminated during transformation, since they
+ // will be recomputed by semantic analysis after transformation.
+ return getDerived().TransformExpr(E->getSubExprAsWritten());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCStyleCastExpr(CStyleCastExpr *E) {
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
+
+ ExprResult SubExpr
+ = getDerived().TransformExpr(E->getSubExprAsWritten());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeInfoAsWritten() &&
+ SubExpr.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCStyleCastExpr(E->getLParenLoc(),
+ Type,
+ E->getRParenLoc(),
+ SubExpr.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ TypeSourceInfo *OldT = E->getTypeSourceInfo();
+ TypeSourceInfo *NewT = getDerived().TransformType(OldT);
+ if (!NewT)
+ return ExprError();
+
+ ExprResult Init = getDerived().TransformExpr(E->getInitializer());
+ if (Init.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ OldT == NewT &&
+ Init.get() == E->getInitializer())
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // Note: the expression type doesn't necessarily match the
+ // type-as-written, but that's okay, because it should always be
+ // derivable from the initializer.
+
+ return getDerived().RebuildCompoundLiteralExpr(E->getLParenLoc(), NewT,
+ /*FIXME:*/E->getInitializer()->getLocEnd(),
+ Init.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformExtVectorElementExpr(ExtVectorElementExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase())
+ return SemaRef.Owned(E);
+
+ // FIXME: Bad source location
+ SourceLocation FakeOperatorLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getBase()->getLocEnd());
+ return getDerived().RebuildExtVectorElementExpr(Base.get(), FakeOperatorLoc,
+ E->getAccessorLoc(),
+ E->getAccessor());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformInitListExpr(InitListExpr *E) {
+ bool InitChanged = false;
+
+ ASTOwningVector<Expr*, 4> Inits(SemaRef);
+ if (getDerived().TransformExprs(E->getInits(), E->getNumInits(), false,
+ Inits, &InitChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && !InitChanged)
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildInitList(E->getLBraceLoc(), move_arg(Inits),
+ E->getRBraceLoc(), E->getType());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformDesignatedInitExpr(DesignatedInitExpr *E) {
+ Designation Desig;
+
+ // transform the initializer value
+ ExprResult Init = getDerived().TransformExpr(E->getInit());
+ if (Init.isInvalid())
+ return ExprError();
+
+ // transform the designators.
+ ASTOwningVector<Expr*, 4> ArrayExprs(SemaRef);
+ bool ExprChanged = false;
+ for (DesignatedInitExpr::designators_iterator D = E->designators_begin(),
+ DEnd = E->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ Desig.AddDesignator(Designator::getField(D->getFieldName(),
+ D->getDotLoc(),
+ D->getFieldLoc()));
+ continue;
+ }
+
+ if (D->isArrayDesignator()) {
+ ExprResult Index = getDerived().TransformExpr(E->getArrayIndex(*D));
+ if (Index.isInvalid())
+ return ExprError();
+
+ Desig.AddDesignator(Designator::getArray(Index.get(),
+ D->getLBracketLoc()));
+
+ ExprChanged = ExprChanged || Init.get() != E->getArrayIndex(*D);
+ ArrayExprs.push_back(Index.release());
+ continue;
+ }
+
+ assert(D->isArrayRangeDesignator() && "New kind of designator?");
+ ExprResult Start
+ = getDerived().TransformExpr(E->getArrayRangeStart(*D));
+ if (Start.isInvalid())
+ return ExprError();
+
+ ExprResult End = getDerived().TransformExpr(E->getArrayRangeEnd(*D));
+ if (End.isInvalid())
+ return ExprError();
+
+ Desig.AddDesignator(Designator::getArrayRange(Start.get(),
+ End.get(),
+ D->getLBracketLoc(),
+ D->getEllipsisLoc()));
+
+ ExprChanged = ExprChanged || Start.get() != E->getArrayRangeStart(*D) ||
+ End.get() != E->getArrayRangeEnd(*D);
+
+ ArrayExprs.push_back(Start.release());
+ ArrayExprs.push_back(End.release());
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Init.get() == E->getInit() &&
+ !ExprChanged)
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildDesignatedInitExpr(Desig, move_arg(ArrayExprs),
+ E->getEqualOrColonLoc(),
+ E->usesGNUSyntax(), Init.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformImplicitValueInitExpr(
+ ImplicitValueInitExpr *E) {
+ TemporaryBase Rebase(*this, E->getLocStart(), DeclarationName());
+
+ // FIXME: Will we ever have proper type location here? Will we actually
+ // need to transform the type?
+ QualType T = getDerived().TransformType(E->getType());
+ if (T.isNull())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getType())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildImplicitValueInitExpr(T);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformVAArgExpr(VAArgExpr *E) {
+ TypeSourceInfo *TInfo = getDerived().TransformType(E->getWrittenTypeInfo());
+ if (!TInfo)
+ return ExprError();
+
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TInfo == E->getWrittenTypeInfo() &&
+ SubExpr.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildVAArgExpr(E->getBuiltinLoc(), SubExpr.get(),
+ TInfo, E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformParenListExpr(ParenListExpr *E) {
+ bool ArgumentChanged = false;
+ ASTOwningVector<Expr*, 4> Inits(SemaRef);
+ if (TransformExprs(E->getExprs(), E->getNumExprs(), true, Inits,
+ &ArgumentChanged))
+ return ExprError();
+
+ return getDerived().RebuildParenListExpr(E->getLParenLoc(),
+ move_arg(Inits),
+ E->getRParenLoc());
+}
+
+/// \brief Transform an address-of-label expression.
+///
+/// By default, the transformation of an address-of-label expression always
+/// rebuilds the expression, so that the label identifier can be resolved to
+/// the corresponding label statement by semantic analysis.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAddrLabelExpr(AddrLabelExpr *E) {
+ Decl *LD = getDerived().TransformDecl(E->getLabel()->getLocation(),
+ E->getLabel());
+ if (!LD)
+ return ExprError();
+
+ return getDerived().RebuildAddrLabelExpr(E->getAmpAmpLoc(), E->getLabelLoc(),
+ cast<LabelDecl>(LD));
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformStmtExpr(StmtExpr *E) {
+ SemaRef.ActOnStartStmtExpr();
+ StmtResult SubStmt
+ = getDerived().TransformCompoundStmt(E->getSubStmt(), true);
+ if (SubStmt.isInvalid()) {
+ SemaRef.ActOnStmtExprError();
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubStmt.get() == E->getSubStmt()) {
+ // Calling this an 'error' is unintuitive, but it does the right thing.
+ SemaRef.ActOnStmtExprError();
+ return SemaRef.MaybeBindToTemporary(E);
+ }
+
+ return getDerived().RebuildStmtExpr(E->getLParenLoc(),
+ SubStmt.get(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformChooseExpr(ChooseExpr *E) {
+ ExprResult Cond = getDerived().TransformExpr(E->getCond());
+ if (Cond.isInvalid())
+ return ExprError();
+
+ ExprResult LHS = getDerived().TransformExpr(E->getLHS());
+ if (LHS.isInvalid())
+ return ExprError();
+
+ ExprResult RHS = getDerived().TransformExpr(E->getRHS());
+ if (RHS.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Cond.get() == E->getCond() &&
+ LHS.get() == E->getLHS() &&
+ RHS.get() == E->getRHS())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildChooseExpr(E->getBuiltinLoc(),
+ Cond.get(), LHS.get(), RHS.get(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformGNUNullExpr(GNUNullExpr *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ switch (E->getOperator()) {
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ llvm_unreachable("new and delete operators cannot use CXXOperatorCallExpr");
+
+ case OO_Call: {
+ // This is a call to an object's operator().
+ assert(E->getNumArgs() >= 1 && "Object call is missing arguments");
+
+ // Transform the object itself.
+ ExprResult Object = getDerived().TransformExpr(E->getArg(0));
+ if (Object.isInvalid())
+ return ExprError();
+
+ // FIXME: Poor location information
+ SourceLocation FakeLParenLoc
+ = SemaRef.PP.getLocForEndOfToken(
+ static_cast<Expr *>(Object.get())->getLocEnd());
+
+ // Transform the call arguments.
+ ASTOwningVector<Expr*> Args(SemaRef);
+ if (getDerived().TransformExprs(E->getArgs() + 1, E->getNumArgs() - 1, true,
+ Args))
+ return ExprError();
+
+ return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc,
+ move_arg(Args),
+ E->getLocEnd());
+ }
+
+#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
+ case OO_##Name:
+#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
+#include "clang/Basic/OperatorKinds.def"
+ case OO_Subscript:
+ // Handled below.
+ break;
+
+ case OO_Conditional:
+ llvm_unreachable("conditional operator is not actually overloadable");
+
+ case OO_None:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("not an overloaded operator?");
+ }
+
+ ExprResult Callee = getDerived().TransformExpr(E->getCallee());
+ if (Callee.isInvalid())
+ return ExprError();
+
+ ExprResult First = getDerived().TransformExpr(E->getArg(0));
+ if (First.isInvalid())
+ return ExprError();
+
+ ExprResult Second;
+ if (E->getNumArgs() == 2) {
+ Second = getDerived().TransformExpr(E->getArg(1));
+ if (Second.isInvalid())
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Callee.get() == E->getCallee() &&
+ First.get() == E->getArg(0) &&
+ (E->getNumArgs() != 2 || Second.get() == E->getArg(1)))
+ return SemaRef.MaybeBindToTemporary(E);
+
+ return getDerived().RebuildCXXOperatorCallExpr(E->getOperator(),
+ E->getOperatorLoc(),
+ Callee.get(),
+ First.get(),
+ Second.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ return getDerived().TransformCallExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ // Transform the callee.
+ ExprResult Callee = getDerived().TransformExpr(E->getCallee());
+ if (Callee.isInvalid())
+ return ExprError();
+
+ // Transform exec config.
+ ExprResult EC = getDerived().TransformCallExpr(E->getConfig());
+ if (EC.isInvalid())
+ return ExprError();
+
+ // Transform arguments.
+ bool ArgChanged = false;
+ ASTOwningVector<Expr*> Args(SemaRef);
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Callee.get() == E->getCallee() &&
+ !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // FIXME: Wrong source location information for the '('.
+ SourceLocation FakeLParenLoc
+ = ((Expr *)Callee.get())->getSourceRange().getBegin();
+ return getDerived().RebuildCallExpr(Callee.get(), FakeLParenLoc,
+ move_arg(Args),
+ E->getRParenLoc(), EC.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
+
+ ExprResult SubExpr
+ = getDerived().TransformExpr(E->getSubExprAsWritten());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeInfoAsWritten() &&
+ SubExpr.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ // FIXME: Poor source location information here.
+ SourceLocation FakeLAngleLoc
+ = SemaRef.PP.getLocForEndOfToken(E->getOperatorLoc());
+ SourceLocation FakeRAngleLoc = E->getSubExpr()->getSourceRange().getBegin();
+ SourceLocation FakeRParenLoc
+ = SemaRef.PP.getLocForEndOfToken(
+ E->getSubExpr()->getSourceRange().getEnd());
+ return getDerived().RebuildCXXNamedCastExpr(E->getOperatorLoc(),
+ E->getStmtClass(),
+ FakeLAngleLoc,
+ Type,
+ FakeRAngleLoc,
+ FakeRAngleLoc,
+ SubExpr.get(),
+ FakeRParenLoc);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXStaticCastExpr(CXXStaticCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXReinterpretCastExpr(
+ CXXReinterpretCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXConstCastExpr(CXXConstCastExpr *E) {
+ return getDerived().TransformCXXNamedCastExpr(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXFunctionalCastExpr(
+ CXXFunctionalCastExpr *E) {
+ TypeSourceInfo *Type = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!Type)
+ return ExprError();
+
+ ExprResult SubExpr
+ = getDerived().TransformExpr(E->getSubExprAsWritten());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Type == E->getTypeInfoAsWritten() &&
+ SubExpr.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXFunctionalCastExpr(Type,
+ /*FIXME:*/E->getSubExpr()->getLocStart(),
+ SubExpr.get(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXTypeidExpr(CXXTypeidExpr *E) {
+ if (E->isTypeOperand()) {
+ TypeSourceInfo *TInfo
+ = getDerived().TransformType(E->getTypeOperandSourceInfo());
+ if (!TInfo)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TInfo == E->getTypeOperandSourceInfo())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXTypeidExpr(E->getType(),
+ E->getLocStart(),
+ TInfo,
+ E->getLocEnd());
+ }
+
+ // We don't know whether the subexpression is potentially evaluated until
+ // after we perform semantic analysis. We speculatively assume it is
+ // unevaluated; it will get fixed later if the subexpression is in fact
+ // potentially evaluated.
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+
+ ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getExprOperand())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXTypeidExpr(E->getType(),
+ E->getLocStart(),
+ SubExpr.get(),
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXUuidofExpr(CXXUuidofExpr *E) {
+ if (E->isTypeOperand()) {
+ TypeSourceInfo *TInfo
+ = getDerived().TransformType(E->getTypeOperandSourceInfo());
+ if (!TInfo)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TInfo == E->getTypeOperandSourceInfo())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXUuidofExpr(E->getType(),
+ E->getLocStart(),
+ TInfo,
+ E->getLocEnd());
+ }
+
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+
+ ExprResult SubExpr = getDerived().TransformExpr(E->getExprOperand());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getExprOperand())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXUuidofExpr(E->getType(),
+ E->getLocStart(),
+ SubExpr.get(),
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNullPtrLiteralExpr(
+ CXXNullPtrLiteralExpr *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXThisExpr(CXXThisExpr *E) {
+ DeclContext *DC = getSema().getFunctionLevelDeclContext();
+ QualType T;
+ if (CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(DC))
+ T = MD->getThisType(getSema().Context);
+ else
+ T = getSema().Context.getPointerType(
+ getSema().Context.getRecordType(cast<CXXRecordDecl>(DC)));
+
+ if (!getDerived().AlwaysRebuild() && T == E->getType()) {
+ // Make sure that we capture 'this'.
+ getSema().CheckCXXThisCapture(E->getLocStart());
+ return SemaRef.Owned(E);
+ }
+
+ return getDerived().RebuildCXXThisExpr(E->getLocStart(), T, E->isImplicit());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXThrowExpr(CXXThrowExpr *E) {
+ ExprResult SubExpr = getDerived().TransformExpr(E->getSubExpr());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ SubExpr.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXThrowExpr(E->getThrowLoc(), SubExpr.get(),
+ E->isThrownVariableInScope());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ ParmVarDecl *Param
+ = cast_or_null<ParmVarDecl>(getDerived().TransformDecl(E->getLocStart(),
+ E->getParam()));
+ if (!Param)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ Param == E->getParam())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXDefaultArgExpr(E->getUsedLocation(), Param);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXScalarValueInitExpr(
+ CXXScalarValueInitExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getTypeSourceInfo())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXScalarValueInitExpr(T,
+ /*FIXME:*/T->getTypeLoc().getEndLoc(),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
+ // Transform the type that we're allocating
+ TypeSourceInfo *AllocTypeInfo
+ = getDerived().TransformType(E->getAllocatedTypeSourceInfo());
+ if (!AllocTypeInfo)
+ return ExprError();
+
+ // Transform the size of the array we're allocating (if any).
+ ExprResult ArraySize = getDerived().TransformExpr(E->getArraySize());
+ if (ArraySize.isInvalid())
+ return ExprError();
+
+ // Transform the placement arguments (if any).
+ bool ArgumentChanged = false;
+ ASTOwningVector<Expr*> PlacementArgs(SemaRef);
+ if (getDerived().TransformExprs(E->getPlacementArgs(),
+ E->getNumPlacementArgs(), true,
+ PlacementArgs, &ArgumentChanged))
+ return ExprError();
+
+ // Transform the initializer (if any).
+ Expr *OldInit = E->getInitializer();
+ ExprResult NewInit;
+ if (OldInit)
+ NewInit = getDerived().TransformExpr(OldInit);
+ if (NewInit.isInvalid())
+ return ExprError();
+
+ // Transform new operator and delete operator.
+ FunctionDecl *OperatorNew = 0;
+ if (E->getOperatorNew()) {
+ OperatorNew = cast_or_null<FunctionDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getOperatorNew()));
+ if (!OperatorNew)
+ return ExprError();
+ }
+
+ FunctionDecl *OperatorDelete = 0;
+ if (E->getOperatorDelete()) {
+ OperatorDelete = cast_or_null<FunctionDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getOperatorDelete()));
+ if (!OperatorDelete)
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ AllocTypeInfo == E->getAllocatedTypeSourceInfo() &&
+ ArraySize.get() == E->getArraySize() &&
+ NewInit.get() == OldInit &&
+ OperatorNew == E->getOperatorNew() &&
+ OperatorDelete == E->getOperatorDelete() &&
+ !ArgumentChanged) {
+ // Mark any declarations we need as referenced.
+ // FIXME: instantiation-specific.
+ if (OperatorNew)
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorNew);
+ if (OperatorDelete)
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
+
+ if (E->isArray() && !E->getAllocatedType()->isDependentType()) {
+ QualType ElementType
+ = SemaRef.Context.getBaseElementType(E->getAllocatedType());
+ if (const RecordType *RecordT = ElementType->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordT->getDecl());
+ if (CXXDestructorDecl *Destructor = SemaRef.LookupDestructor(Record)) {
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Destructor);
+ }
+ }
+ }
+
+ return SemaRef.Owned(E);
+ }
+
+ QualType AllocType = AllocTypeInfo->getType();
+ if (!ArraySize.get()) {
+ // If no array size was specified, but the new expression was
+ // instantiated with an array type (e.g., "new T" where T is
+ // instantiated with "int[4]"), extract the outer bound from the
+ // array type as our array size. We do this with constant and
+ // dependently-sized array types.
+ const ArrayType *ArrayT = SemaRef.Context.getAsArrayType(AllocType);
+ if (!ArrayT) {
+ // Do nothing
+ } else if (const ConstantArrayType *ConsArrayT
+ = dyn_cast<ConstantArrayType>(ArrayT)) {
+ ArraySize
+ = SemaRef.Owned(IntegerLiteral::Create(SemaRef.Context,
+ ConsArrayT->getSize(),
+ SemaRef.Context.getSizeType(),
+ /*FIXME:*/E->getLocStart()));
+ AllocType = ConsArrayT->getElementType();
+ } else if (const DependentSizedArrayType *DepArrayT
+ = dyn_cast<DependentSizedArrayType>(ArrayT)) {
+ if (DepArrayT->getSizeExpr()) {
+ ArraySize = SemaRef.Owned(DepArrayT->getSizeExpr());
+ AllocType = DepArrayT->getElementType();
+ }
+ }
+ }
+
+ return getDerived().RebuildCXXNewExpr(E->getLocStart(),
+ E->isGlobalNew(),
+ /*FIXME:*/E->getLocStart(),
+ move_arg(PlacementArgs),
+ /*FIXME:*/E->getLocStart(),
+ E->getTypeIdParens(),
+ AllocType,
+ AllocTypeInfo,
+ ArraySize.get(),
+ E->getDirectInitRange(),
+ NewInit.take());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDeleteExpr(CXXDeleteExpr *E) {
+ ExprResult Operand = getDerived().TransformExpr(E->getArgument());
+ if (Operand.isInvalid())
+ return ExprError();
+
+ // Transform the delete operator, if known.
+ FunctionDecl *OperatorDelete = 0;
+ if (E->getOperatorDelete()) {
+ OperatorDelete = cast_or_null<FunctionDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getOperatorDelete()));
+ if (!OperatorDelete)
+ return ExprError();
+ }
+
+ if (!getDerived().AlwaysRebuild() &&
+ Operand.get() == E->getArgument() &&
+ OperatorDelete == E->getOperatorDelete()) {
+ // Mark any declarations we need as referenced.
+ // FIXME: instantiation-specific.
+ if (OperatorDelete)
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), OperatorDelete);
+
+ if (!E->getArgument()->isTypeDependent()) {
+ QualType Destroyed = SemaRef.Context.getBaseElementType(
+ E->getDestroyedType());
+ if (const RecordType *DestroyedRec = Destroyed->getAs<RecordType>()) {
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(DestroyedRec->getDecl());
+ SemaRef.MarkFunctionReferenced(E->getLocStart(),
+ SemaRef.LookupDestructor(Record));
+ }
+ }
+
+ return SemaRef.Owned(E);
+ }
+
+ return getDerived().RebuildCXXDeleteExpr(E->getLocStart(),
+ E->isGlobalDelete(),
+ E->isArrayForm(),
+ Operand.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXPseudoDestructorExpr(
+ CXXPseudoDestructorExpr *E) {
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ ParsedType ObjectTypePtr;
+ bool MayBePseudoDestructor = false;
+ Base = SemaRef.ActOnStartCXXMemberReference(0, Base.get(),
+ E->getOperatorLoc(),
+ E->isArrow()? tok::arrow : tok::period,
+ ObjectTypePtr,
+ MayBePseudoDestructor);
+ if (Base.isInvalid())
+ return ExprError();
+
+ QualType ObjectType = ObjectTypePtr.get();
+ NestedNameSpecifierLoc QualifierLoc = E->getQualifierLoc();
+ if (QualifierLoc) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(QualifierLoc, ObjectType);
+ if (!QualifierLoc)
+ return ExprError();
+ }
+ CXXScopeSpec SS;
+ SS.Adopt(QualifierLoc);
+
+ PseudoDestructorTypeStorage Destroyed;
+ if (E->getDestroyedTypeInfo()) {
+ TypeSourceInfo *DestroyedTypeInfo
+ = getDerived().TransformTypeInObjectScope(E->getDestroyedTypeInfo(),
+ ObjectType, 0, SS);
+ if (!DestroyedTypeInfo)
+ return ExprError();
+ Destroyed = DestroyedTypeInfo;
+ } else if (!ObjectType.isNull() && ObjectType->isDependentType()) {
+ // We aren't likely to be able to resolve the identifier down to a type
+ // now anyway, so just retain the identifier.
+ Destroyed = PseudoDestructorTypeStorage(E->getDestroyedTypeIdentifier(),
+ E->getDestroyedTypeLoc());
+ } else {
+ // Look for a destructor known with the given name.
+ ParsedType T = SemaRef.getDestructorName(E->getTildeLoc(),
+ *E->getDestroyedTypeIdentifier(),
+ E->getDestroyedTypeLoc(),
+ /*Scope=*/0,
+ SS, ObjectTypePtr,
+ false);
+ if (!T)
+ return ExprError();
+
+ Destroyed
+ = SemaRef.Context.getTrivialTypeSourceInfo(SemaRef.GetTypeFromParser(T),
+ E->getDestroyedTypeLoc());
+ }
+
+ TypeSourceInfo *ScopeTypeInfo = 0;
+ if (E->getScopeTypeInfo()) {
+ ScopeTypeInfo = getDerived().TransformType(E->getScopeTypeInfo());
+ if (!ScopeTypeInfo)
+ return ExprError();
+ }
+
+ return getDerived().RebuildCXXPseudoDestructorExpr(Base.get(),
+ E->getOperatorLoc(),
+ E->isArrow(),
+ SS,
+ ScopeTypeInfo,
+ E->getColonColonLoc(),
+ E->getTildeLoc(),
+ Destroyed);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnresolvedLookupExpr(
+ UnresolvedLookupExpr *Old) {
+ LookupResult R(SemaRef, Old->getName(), Old->getNameLoc(),
+ Sema::LookupOrdinaryName);
+
+ // Transform all the decls.
+ for (UnresolvedLookupExpr::decls_iterator I = Old->decls_begin(),
+ E = Old->decls_end(); I != E; ++I) {
+ NamedDecl *InstD = static_cast<NamedDecl*>(
+ getDerived().TransformDecl(Old->getNameLoc(),
+ *I));
+ if (!InstD) {
+ // Silently ignore these if a UsingShadowDecl instantiated to nothing.
+ // This can happen because of dependent hiding.
+ if (isa<UsingShadowDecl>(*I))
+ continue;
+ else
+ return ExprError();
+ }
+
+ // Expand using declarations.
+ if (isa<UsingDecl>(InstD)) {
+ UsingDecl *UD = cast<UsingDecl>(InstD);
+ for (UsingDecl::shadow_iterator I = UD->shadow_begin(),
+ E = UD->shadow_end(); I != E; ++I)
+ R.addDecl(*I);
+ continue;
+ }
+
+ R.addDecl(InstD);
+ }
+
+ // Resolve a kind, but don't do any further analysis. If it's
+ // ambiguous, the callee needs to deal with it.
+ R.resolveKind();
+
+ // Rebuild the nested-name qualifier, if present.
+ CXXScopeSpec SS;
+ if (Old->getQualifierLoc()) {
+ NestedNameSpecifierLoc QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+
+ SS.Adopt(QualifierLoc);
+ }
+
+ if (Old->getNamingClass()) {
+ CXXRecordDecl *NamingClass
+ = cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
+ Old->getNameLoc(),
+ Old->getNamingClass()));
+ if (!NamingClass)
+ return ExprError();
+
+ R.setNamingClass(NamingClass);
+ }
+
+ SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
+
+ // If we have neither explicit template arguments, nor the template keyword,
+ // it's a normal declaration name.
+ if (!Old->hasExplicitTemplateArgs() && !TemplateKWLoc.isValid())
+ return getDerived().RebuildDeclarationNameExpr(SS, R, Old->requiresADL());
+
+ // If we have template arguments, rebuild them, then rebuild the
+ // templateid expression.
+ TemplateArgumentListInfo TransArgs(Old->getLAngleLoc(), Old->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
+ Old->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+
+ return getDerived().RebuildTemplateIdExpr(SS, TemplateKWLoc, R,
+ Old->requiresADL(), &TransArgs);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getQueriedTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getQueriedTypeSourceInfo())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildUnaryTypeTrait(E->getTrait(),
+ E->getLocStart(),
+ T,
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ TypeSourceInfo *LhsT = getDerived().TransformType(E->getLhsTypeSourceInfo());
+ if (!LhsT)
+ return ExprError();
+
+ TypeSourceInfo *RhsT = getDerived().TransformType(E->getRhsTypeSourceInfo());
+ if (!RhsT)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ LhsT == E->getLhsTypeSourceInfo() && RhsT == E->getRhsTypeSourceInfo())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildBinaryTypeTrait(E->getTrait(),
+ E->getLocStart(),
+ LhsT, RhsT,
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformTypeTraitExpr(TypeTraitExpr *E) {
+ bool ArgChanged = false;
+ llvm::SmallVector<TypeSourceInfo *, 4> Args;
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I) {
+ TypeSourceInfo *From = E->getArg(I);
+ TypeLoc FromTL = From->getTypeLoc();
+ if (!isa<PackExpansionTypeLoc>(FromTL)) {
+ TypeLocBuilder TLB;
+ TLB.reserve(FromTL.getFullDataSize());
+ QualType To = getDerived().TransformType(TLB, FromTL);
+ if (To.isNull())
+ return ExprError();
+
+ if (To == From->getType())
+ Args.push_back(From);
+ else {
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ ArgChanged = true;
+ }
+ continue;
+ }
+
+ ArgChanged = true;
+
+ // We have a pack expansion. Instantiate it.
+ PackExpansionTypeLoc ExpansionTL = cast<PackExpansionTypeLoc>(FromTL);
+ TypeLoc PatternTL = ExpansionTL.getPatternLoc();
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ SemaRef.collectUnexpandedParameterPacks(PatternTL, Unexpanded);
+
+ // Determine whether the set of unexpanded parameter packs can and should
+ // be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions
+ = ExpansionTL.getTypePtr()->getNumExpansions();
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ if (getDerived().TryExpandParameterPacks(ExpansionTL.getEllipsisLoc(),
+ PatternTL.getSourceRange(),
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+
+ TypeLocBuilder TLB;
+ TLB.reserve(From->getTypeLoc().getFullDataSize());
+
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ To = getDerived().RebuildPackExpansionType(To,
+ PatternTL.getSourceRange(),
+ ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (To.isNull())
+ return ExprError();
+
+ PackExpansionTypeLoc ToExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(To);
+ ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ continue;
+ }
+
+ // Expand the pack expansion by substituting for each argument in the
+ // pack(s).
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(SemaRef, I);
+ TypeLocBuilder TLB;
+ TLB.reserve(PatternTL.getFullDataSize());
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ }
+
+ if (!RetainExpansion)
+ continue;
+
+ // If we're supposed to retain a pack expansion, do so by temporarily
+ // forgetting the partially-substituted parameter pack.
+ ForgetPartiallySubstitutedPackRAII Forget(getDerived());
+
+ TypeLocBuilder TLB;
+ TLB.reserve(From->getTypeLoc().getFullDataSize());
+
+ QualType To = getDerived().TransformType(TLB, PatternTL);
+ if (To.isNull())
+ return ExprError();
+
+ To = getDerived().RebuildPackExpansionType(To,
+ PatternTL.getSourceRange(),
+ ExpansionTL.getEllipsisLoc(),
+ NumExpansions);
+ if (To.isNull())
+ return ExprError();
+
+ PackExpansionTypeLoc ToExpansionTL
+ = TLB.push<PackExpansionTypeLoc>(To);
+ ToExpansionTL.setEllipsisLoc(ExpansionTL.getEllipsisLoc());
+ Args.push_back(TLB.getTypeSourceInfo(SemaRef.Context, To));
+ }
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildTypeTrait(E->getTrait(),
+ E->getLocStart(),
+ Args,
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getQueriedTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getQueriedTypeSourceInfo())
+ return SemaRef.Owned(E);
+
+ ExprResult SubExpr;
+ {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ SubExpr = getDerived().TransformExpr(E->getDimensionExpression());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getDimensionExpression())
+ return SemaRef.Owned(E);
+ }
+
+ return getDerived().RebuildArrayTypeTrait(E->getTrait(),
+ E->getLocStart(),
+ T,
+ SubExpr.get(),
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformExpressionTraitExpr(ExpressionTraitExpr *E) {
+ ExprResult SubExpr;
+ {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ SubExpr = getDerived().TransformExpr(E->getQueriedExpression());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getQueriedExpression())
+ return SemaRef.Owned(E);
+ }
+
+ return getDerived().RebuildExpressionTrait(
+ E->getTrait(), E->getLocStart(), SubExpr.get(), E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformDependentScopeDeclRefExpr(
+ DependentScopeDeclRefExpr *E) {
+ NestedNameSpecifierLoc QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
+
+ // TODO: If this is a conversion-function-id, verify that the
+ // destination type name (if present) resolves the same way after
+ // instantiation as it did in the local scope.
+
+ DeclarationNameInfo NameInfo
+ = getDerived().TransformDeclarationNameInfo(E->getNameInfo());
+ if (!NameInfo.getName())
+ return ExprError();
+
+ if (!E->hasExplicitTemplateArgs()) {
+ if (!getDerived().AlwaysRebuild() &&
+ QualifierLoc == E->getQualifierLoc() &&
+ // Note: it is sufficient to compare the Name component of NameInfo:
+ // if name has not changed, DNLoc has not changed either.
+ NameInfo.getName() == E->getDeclName())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildDependentScopeDeclRefExpr(QualifierLoc,
+ TemplateKWLoc,
+ NameInfo,
+ /*TemplateArgs*/ 0);
+ }
+
+ TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+
+ return getDerived().RebuildDependentScopeDeclRefExpr(QualifierLoc,
+ TemplateKWLoc,
+ NameInfo,
+ &TransArgs);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXConstructExpr(CXXConstructExpr *E) {
+ // CXXConstructExprs are always implicit, so when we have a
+ // 1-argument construction we just transform that argument.
+ if (E->getNumArgs() == 1 ||
+ (E->getNumArgs() > 1 && getDerived().DropCallArgument(E->getArg(1))))
+ return getDerived().TransformExpr(E->getArg(0));
+
+ TemporaryBase Rebase(*this, /*FIXME*/E->getLocStart(), DeclarationName());
+
+ QualType T = getDerived().TransformType(E->getType());
+ if (T.isNull())
+ return ExprError();
+
+ CXXConstructorDecl *Constructor
+ = cast_or_null<CXXConstructorDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getConstructor()));
+ if (!Constructor)
+ return ExprError();
+
+ bool ArgumentChanged = false;
+ ASTOwningVector<Expr*> Args(SemaRef);
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getType() &&
+ Constructor == E->getConstructor() &&
+ !ArgumentChanged) {
+ // Mark the constructor as referenced.
+ // FIXME: Instantiation-specific
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
+ return SemaRef.Owned(E);
+ }
+
+ return getDerived().RebuildCXXConstructExpr(T, /*FIXME:*/E->getLocStart(),
+ Constructor, E->isElidable(),
+ move_arg(Args),
+ E->hadMultipleCandidates(),
+ E->requiresZeroInitialization(),
+ E->getConstructionKind(),
+ E->getParenRange());
+}
+
+/// \brief Transform a C++ temporary-binding expression.
+///
+/// Since CXXBindTemporaryExpr nodes are implicitly generated, we just
+/// transform the subexpression and return that.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ return getDerived().TransformExpr(E->getSubExpr());
+}
+
+/// \brief Transform a C++ expression that contains cleanups that should
+/// be run after the expression is evaluated.
+///
+/// Since ExprWithCleanups nodes are implicitly generated, we
+/// just transform the subexpression and return that.
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformExprWithCleanups(ExprWithCleanups *E) {
+ return getDerived().TransformExpr(E->getSubExpr());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXTemporaryObjectExpr(
+ CXXTemporaryObjectExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ CXXConstructorDecl *Constructor
+ = cast_or_null<CXXConstructorDecl>(
+ getDerived().TransformDecl(E->getLocStart(),
+ E->getConstructor()));
+ if (!Constructor)
+ return ExprError();
+
+ bool ArgumentChanged = false;
+ ASTOwningVector<Expr*> Args(SemaRef);
+ Args.reserve(E->getNumArgs());
+ if (TransformExprs(E->getArgs(), E->getNumArgs(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getTypeSourceInfo() &&
+ Constructor == E->getConstructor() &&
+ !ArgumentChanged) {
+ // FIXME: Instantiation-specific
+ SemaRef.MarkFunctionReferenced(E->getLocStart(), Constructor);
+ return SemaRef.MaybeBindToTemporary(E);
+ }
+
+ return getDerived().RebuildCXXTemporaryObjectExpr(T,
+ /*FIXME:*/T->getTypeLoc().getEndLoc(),
+ move_arg(Args),
+ E->getLocEnd());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
+ // Create the local class that will describe the lambda.
+ CXXRecordDecl *Class
+ = getSema().createLambdaClosureType(E->getIntroducerRange(),
+ /*KnownDependent=*/false);
+ getDerived().transformedLocalDecl(E->getLambdaClass(), Class);
+
+ // Transform the type of the lambda parameters and start the definition of
+ // the lambda itself.
+ TypeSourceInfo *MethodTy
+ = TransformType(E->getCallOperator()->getTypeSourceInfo());
+ if (!MethodTy)
+ return ExprError();
+
+ // Transform lambda parameters.
+ bool Invalid = false;
+ llvm::SmallVector<QualType, 4> ParamTypes;
+ llvm::SmallVector<ParmVarDecl *, 4> Params;
+ if (getDerived().TransformFunctionTypeParams(E->getLocStart(),
+ E->getCallOperator()->param_begin(),
+ E->getCallOperator()->param_size(),
+ 0, ParamTypes, &Params))
+ Invalid = true;
+
+ // Build the call operator.
+ // Note: Once a lambda mangling number and context declaration have been
+ // assigned, they never change.
+ unsigned ManglingNumber = E->getLambdaClass()->getLambdaManglingNumber();
+ Decl *ContextDecl = E->getLambdaClass()->getLambdaContextDecl();
+ CXXMethodDecl *CallOperator
+ = getSema().startLambdaDefinition(Class, E->getIntroducerRange(),
+ MethodTy,
+ E->getCallOperator()->getLocEnd(),
+ Params, ManglingNumber, ContextDecl);
+ getDerived().transformAttrs(E->getCallOperator(), CallOperator);
+
+ // FIXME: Instantiation-specific.
+ CallOperator->setInstantiationOfMemberFunction(E->getCallOperator(),
+ TSK_ImplicitInstantiation);
+
+ // Introduce the context of the call operator.
+ Sema::ContextRAII SavedContext(getSema(), CallOperator);
+
+ // Enter the scope of the lambda.
+ sema::LambdaScopeInfo *LSI
+ = getSema().enterLambdaScope(CallOperator, E->getIntroducerRange(),
+ E->getCaptureDefault(),
+ E->hasExplicitParameters(),
+ E->hasExplicitResultType(),
+ E->isMutable());
+
+ // Transform captures.
+ bool FinishedExplicitCaptures = false;
+ for (LambdaExpr::capture_iterator C = E->capture_begin(),
+ CEnd = E->capture_end();
+ C != CEnd; ++C) {
+ // When we hit the first implicit capture, tell Sema that we've finished
+ // the list of explicit captures.
+ if (!FinishedExplicitCaptures && C->isImplicit()) {
+ getSema().finishLambdaExplicitCaptures(LSI);
+ FinishedExplicitCaptures = true;
+ }
+
+ // Capturing 'this' is trivial.
+ if (C->capturesThis()) {
+ getSema().CheckCXXThisCapture(C->getLocation(), C->isExplicit());
+ continue;
+ }
+
+ // Determine the capture kind for Sema.
+ Sema::TryCaptureKind Kind
+ = C->isImplicit()? Sema::TryCapture_Implicit
+ : C->getCaptureKind() == LCK_ByCopy
+ ? Sema::TryCapture_ExplicitByVal
+ : Sema::TryCapture_ExplicitByRef;
+ SourceLocation EllipsisLoc;
+ if (C->isPackExpansion()) {
+ UnexpandedParameterPack Unexpanded(C->getCapturedVar(), C->getLocation());
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(C->getEllipsisLoc(),
+ C->getLocation(),
+ Unexpanded,
+ ShouldExpand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (ShouldExpand) {
+ // The transform has determined that we should perform an expansion;
+ // transform and capture each of the arguments.
+ // expansion of the pattern. Do so.
+ VarDecl *Pack = C->getCapturedVar();
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ VarDecl *CapturedVar
+ = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
+ Pack));
+ if (!CapturedVar) {
+ Invalid = true;
+ continue;
+ }
+
+ // Capture the transformed variable.
+ getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind);
+ }
+ continue;
+ }
+
+ EllipsisLoc = C->getEllipsisLoc();
+ }
+
+ // Transform the captured variable.
+ VarDecl *CapturedVar
+ = cast_or_null<VarDecl>(getDerived().TransformDecl(C->getLocation(),
+ C->getCapturedVar()));
+ if (!CapturedVar) {
+ Invalid = true;
+ continue;
+ }
+
+ // Capture the transformed variable.
+ getSema().tryCaptureVariable(CapturedVar, C->getLocation(), Kind);
+ }
+ if (!FinishedExplicitCaptures)
+ getSema().finishLambdaExplicitCaptures(LSI);
+
+
+ // Enter a new evaluation context to insulate the lambda from any
+ // cleanups from the enclosing full-expression.
+ getSema().PushExpressionEvaluationContext(Sema::PotentiallyEvaluated);
+
+ if (Invalid) {
+ getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/0,
+ /*IsInstantiation=*/true);
+ return ExprError();
+ }
+
+ // Instantiate the body of the lambda expression.
+ StmtResult Body = getDerived().TransformStmt(E->getBody());
+ if (Body.isInvalid()) {
+ getSema().ActOnLambdaError(E->getLocStart(), /*CurScope=*/0,
+ /*IsInstantiation=*/true);
+ return ExprError();
+ }
+
+ return getSema().ActOnLambdaExpr(E->getLocStart(), Body.take(),
+ /*CurScope=*/0, /*IsInstantiation=*/true);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *E) {
+ TypeSourceInfo *T = getDerived().TransformType(E->getTypeSourceInfo());
+ if (!T)
+ return ExprError();
+
+ bool ArgumentChanged = false;
+ ASTOwningVector<Expr*> Args(SemaRef);
+ Args.reserve(E->arg_size());
+ if (getDerived().TransformExprs(E->arg_begin(), E->arg_size(), true, Args,
+ &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ T == E->getTypeSourceInfo() &&
+ !ArgumentChanged)
+ return SemaRef.Owned(E);
+
+ // FIXME: we're faking the locations of the commas
+ return getDerived().RebuildCXXUnresolvedConstructExpr(T,
+ E->getLParenLoc(),
+ move_arg(Args),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXDependentScopeMemberExpr(
+ CXXDependentScopeMemberExpr *E) {
+ // Transform the base of the expression.
+ ExprResult Base((Expr*) 0);
+ Expr *OldBase;
+ QualType BaseType;
+ QualType ObjectType;
+ if (!E->isImplicitAccess()) {
+ OldBase = E->getBase();
+ Base = getDerived().TransformExpr(OldBase);
+ if (Base.isInvalid())
+ return ExprError();
+
+ // Start the member reference and compute the object's type.
+ ParsedType ObjectTy;
+ bool MayBePseudoDestructor = false;
+ Base = SemaRef.ActOnStartCXXMemberReference(0, Base.get(),
+ E->getOperatorLoc(),
+ E->isArrow()? tok::arrow : tok::period,
+ ObjectTy,
+ MayBePseudoDestructor);
+ if (Base.isInvalid())
+ return ExprError();
+
+ ObjectType = ObjectTy.get();
+ BaseType = ((Expr*) Base.get())->getType();
+ } else {
+ OldBase = 0;
+ BaseType = getDerived().TransformType(E->getBaseType());
+ ObjectType = BaseType->getAs<PointerType>()->getPointeeType();
+ }
+
+ // Transform the first part of the nested-name-specifier that qualifies
+ // the member name.
+ NamedDecl *FirstQualifierInScope
+ = getDerived().TransformFirstQualifierInScope(
+ E->getFirstQualifierFoundInScope(),
+ E->getQualifierLoc().getBeginLoc());
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (E->getQualifier()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(E->getQualifierLoc(),
+ ObjectType,
+ FirstQualifierInScope);
+ if (!QualifierLoc)
+ return ExprError();
+ }
+
+ SourceLocation TemplateKWLoc = E->getTemplateKeywordLoc();
+
+ // TODO: If this is a conversion-function-id, verify that the
+ // destination type name (if present) resolves the same way after
+ // instantiation as it did in the local scope.
+
+ DeclarationNameInfo NameInfo
+ = getDerived().TransformDeclarationNameInfo(E->getMemberNameInfo());
+ if (!NameInfo.getName())
+ return ExprError();
+
+ if (!E->hasExplicitTemplateArgs()) {
+ // This is a reference to a member without an explicitly-specified
+ // template argument list. Optimize for this common case.
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == OldBase &&
+ BaseType == E->getBaseType() &&
+ QualifierLoc == E->getQualifierLoc() &&
+ NameInfo.getName() == E->getMember() &&
+ FirstQualifierInScope == E->getFirstQualifierFoundInScope())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXDependentScopeMemberExpr(Base.get(),
+ BaseType,
+ E->isArrow(),
+ E->getOperatorLoc(),
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierInScope,
+ NameInfo,
+ /*TemplateArgs*/ 0);
+ }
+
+ TemplateArgumentListInfo TransArgs(E->getLAngleLoc(), E->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(E->getTemplateArgs(),
+ E->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+
+ return getDerived().RebuildCXXDependentScopeMemberExpr(Base.get(),
+ BaseType,
+ E->isArrow(),
+ E->getOperatorLoc(),
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierInScope,
+ NameInfo,
+ &TransArgs);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformUnresolvedMemberExpr(UnresolvedMemberExpr *Old) {
+ // Transform the base of the expression.
+ ExprResult Base((Expr*) 0);
+ QualType BaseType;
+ if (!Old->isImplicitAccess()) {
+ Base = getDerived().TransformExpr(Old->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+ Base = getSema().PerformMemberExprBaseConversion(Base.take(),
+ Old->isArrow());
+ if (Base.isInvalid())
+ return ExprError();
+ BaseType = Base.get()->getType();
+ } else {
+ BaseType = getDerived().TransformType(Old->getBaseType());
+ }
+
+ NestedNameSpecifierLoc QualifierLoc;
+ if (Old->getQualifierLoc()) {
+ QualifierLoc
+ = getDerived().TransformNestedNameSpecifierLoc(Old->getQualifierLoc());
+ if (!QualifierLoc)
+ return ExprError();
+ }
+
+ SourceLocation TemplateKWLoc = Old->getTemplateKeywordLoc();
+
+ LookupResult R(SemaRef, Old->getMemberNameInfo(),
+ Sema::LookupOrdinaryName);
+
+ // Transform all the decls.
+ for (UnresolvedMemberExpr::decls_iterator I = Old->decls_begin(),
+ E = Old->decls_end(); I != E; ++I) {
+ NamedDecl *InstD = static_cast<NamedDecl*>(
+ getDerived().TransformDecl(Old->getMemberLoc(),
+ *I));
+ if (!InstD) {
+ // Silently ignore these if a UsingShadowDecl instantiated to nothing.
+ // This can happen because of dependent hiding.
+ if (isa<UsingShadowDecl>(*I))
+ continue;
+ else {
+ R.clear();
+ return ExprError();
+ }
+ }
+
+ // Expand using declarations.
+ if (isa<UsingDecl>(InstD)) {
+ UsingDecl *UD = cast<UsingDecl>(InstD);
+ for (UsingDecl::shadow_iterator I = UD->shadow_begin(),
+ E = UD->shadow_end(); I != E; ++I)
+ R.addDecl(*I);
+ continue;
+ }
+
+ R.addDecl(InstD);
+ }
+
+ R.resolveKind();
+
+ // Determine the naming class.
+ if (Old->getNamingClass()) {
+ CXXRecordDecl *NamingClass
+ = cast_or_null<CXXRecordDecl>(getDerived().TransformDecl(
+ Old->getMemberLoc(),
+ Old->getNamingClass()));
+ if (!NamingClass)
+ return ExprError();
+
+ R.setNamingClass(NamingClass);
+ }
+
+ TemplateArgumentListInfo TransArgs;
+ if (Old->hasExplicitTemplateArgs()) {
+ TransArgs.setLAngleLoc(Old->getLAngleLoc());
+ TransArgs.setRAngleLoc(Old->getRAngleLoc());
+ if (getDerived().TransformTemplateArguments(Old->getTemplateArgs(),
+ Old->getNumTemplateArgs(),
+ TransArgs))
+ return ExprError();
+ }
+
+ // FIXME: to do this check properly, we will need to preserve the
+ // first-qualifier-in-scope here, just in case we had a dependent
+ // base (and therefore couldn't do the check) and a
+ // nested-name-qualifier (and therefore could do the lookup).
+ NamedDecl *FirstQualifierInScope = 0;
+
+ return getDerived().RebuildUnresolvedMemberExpr(Base.get(),
+ BaseType,
+ Old->getOperatorLoc(),
+ Old->isArrow(),
+ QualifierLoc,
+ TemplateKWLoc,
+ FirstQualifierInScope,
+ R,
+ (Old->hasExplicitTemplateArgs()
+ ? &TransArgs : 0));
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ EnterExpressionEvaluationContext Unevaluated(SemaRef, Sema::Unevaluated);
+ ExprResult SubExpr = getDerived().TransformExpr(E->getOperand());
+ if (SubExpr.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && SubExpr.get() == E->getOperand())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildCXXNoexceptExpr(E->getSourceRange(),SubExpr.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformPackExpansionExpr(PackExpansionExpr *E) {
+ ExprResult Pattern = getDerived().TransformExpr(E->getPattern());
+ if (Pattern.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && Pattern.get() == E->getPattern())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildPackExpansion(Pattern.get(), E->getEllipsisLoc(),
+ E->getNumExpansions());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformSizeOfPackExpr(SizeOfPackExpr *E) {
+ // If E is not value-dependent, then nothing will change when we transform it.
+ // Note: This is an instantiation-centric view.
+ if (!E->isValueDependent())
+ return SemaRef.Owned(E);
+
+ // Note: None of the implementations of TryExpandParameterPacks can ever
+ // produce a diagnostic when given only a single unexpanded parameter pack,
+ // so
+ UnexpandedParameterPack Unexpanded(E->getPack(), E->getPackLoc());
+ bool ShouldExpand = false;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> NumExpansions;
+ if (getDerived().TryExpandParameterPacks(E->getOperatorLoc(), E->getPackLoc(),
+ Unexpanded,
+ ShouldExpand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (RetainExpansion)
+ return SemaRef.Owned(E);
+
+ NamedDecl *Pack = E->getPack();
+ if (!ShouldExpand) {
+ Pack = cast_or_null<NamedDecl>(getDerived().TransformDecl(E->getPackLoc(),
+ Pack));
+ if (!Pack)
+ return ExprError();
+ }
+
+
+ // We now know the length of the parameter pack, so build a new expression
+ // that stores that length.
+ return getDerived().RebuildSizeOfPackExpr(E->getOperatorLoc(), Pack,
+ E->getPackLoc(), E->getRParenLoc(),
+ NumExpansions);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ // Default behavior is to do nothing with this transformation.
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ // Default behavior is to do nothing with this transformation.
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformMaterializeTemporaryExpr(
+ MaterializeTemporaryExpr *E) {
+ return getDerived().TransformExpr(E->GetTemporaryExpr());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCStringLiteral(ObjCStringLiteral *E) {
+ return SemaRef.MaybeBindToTemporary(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCNumericLiteral(ObjCNumericLiteral *E) {
+ return SemaRef.MaybeBindToTemporary(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCArrayLiteral(ObjCArrayLiteral *E) {
+ // Transform each of the elements.
+ llvm::SmallVector<Expr *, 8> Elements;
+ bool ArgChanged = false;
+ if (getDerived().TransformExprs(E->getElements(), E->getNumElements(),
+ /*IsCall=*/false, Elements, &ArgChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ return getDerived().RebuildObjCArrayLiteral(E->getSourceRange(),
+ Elements.data(),
+ Elements.size());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCDictionaryLiteral(
+ ObjCDictionaryLiteral *E) {
+ // Transform each of the elements.
+ llvm::SmallVector<ObjCDictionaryElement, 8> Elements;
+ bool ArgChanged = false;
+ for (unsigned I = 0, N = E->getNumElements(); I != N; ++I) {
+ ObjCDictionaryElement OrigElement = E->getKeyValueElement(I);
+
+ if (OrigElement.isPackExpansion()) {
+ // This key/value element is a pack expansion.
+ SmallVector<UnexpandedParameterPack, 2> Unexpanded;
+ getSema().collectUnexpandedParameterPacks(OrigElement.Key, Unexpanded);
+ getSema().collectUnexpandedParameterPacks(OrigElement.Value, Unexpanded);
+ assert(!Unexpanded.empty() && "Pack expansion without parameter packs?");
+
+ // Determine whether the set of unexpanded parameter packs can
+ // and should be expanded.
+ bool Expand = true;
+ bool RetainExpansion = false;
+ llvm::Optional<unsigned> OrigNumExpansions = OrigElement.NumExpansions;
+ llvm::Optional<unsigned> NumExpansions = OrigNumExpansions;
+ SourceRange PatternRange(OrigElement.Key->getLocStart(),
+ OrigElement.Value->getLocEnd());
+ if (getDerived().TryExpandParameterPacks(OrigElement.EllipsisLoc,
+ PatternRange,
+ Unexpanded,
+ Expand, RetainExpansion,
+ NumExpansions))
+ return ExprError();
+
+ if (!Expand) {
+ // The transform has determined that we should perform a simple
+ // transformation on the pack expansion, producing another pack
+ // expansion.
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), -1);
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ if (Key.get() != OrigElement.Key)
+ ArgChanged = true;
+
+ ExprResult Value = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ if (Value.get() != OrigElement.Value)
+ ArgChanged = true;
+
+ ObjCDictionaryElement Expansion = {
+ Key.get(), Value.get(), OrigElement.EllipsisLoc, NumExpansions
+ };
+ Elements.push_back(Expansion);
+ continue;
+ }
+
+ // Record right away that the argument was changed. This needs
+ // to happen even if the array expands to nothing.
+ ArgChanged = true;
+
+ // The transform has determined that we should perform an elementwise
+ // expansion of the pattern. Do so.
+ for (unsigned I = 0; I != *NumExpansions; ++I) {
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(getSema(), I);
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ ExprResult Value = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ ObjCDictionaryElement Element = {
+ Key.get(), Value.get(), SourceLocation(), NumExpansions
+ };
+
+ // If any unexpanded parameter packs remain, we still have a
+ // pack expansion.
+ if (Key.get()->containsUnexpandedParameterPack() ||
+ Value.get()->containsUnexpandedParameterPack())
+ Element.EllipsisLoc = OrigElement.EllipsisLoc;
+
+ Elements.push_back(Element);
+ }
+
+ // We've finished with this pack expansion.
+ continue;
+ }
+
+ // Transform and check key.
+ ExprResult Key = getDerived().TransformExpr(OrigElement.Key);
+ if (Key.isInvalid())
+ return ExprError();
+
+ if (Key.get() != OrigElement.Key)
+ ArgChanged = true;
+
+ // Transform and check value.
+ ExprResult Value
+ = getDerived().TransformExpr(OrigElement.Value);
+ if (Value.isInvalid())
+ return ExprError();
+
+ if (Value.get() != OrigElement.Value)
+ ArgChanged = true;
+
+ ObjCDictionaryElement Element = {
+ Key.get(), Value.get(), SourceLocation(), llvm::Optional<unsigned>()
+ };
+ Elements.push_back(Element);
+ }
+
+ if (!getDerived().AlwaysRebuild() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ return getDerived().RebuildObjCDictionaryLiteral(E->getSourceRange(),
+ Elements.data(),
+ Elements.size());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCEncodeExpr(ObjCEncodeExpr *E) {
+ TypeSourceInfo *EncodedTypeInfo
+ = getDerived().TransformType(E->getEncodedTypeSourceInfo());
+ if (!EncodedTypeInfo)
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ EncodedTypeInfo == E->getEncodedTypeSourceInfo())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildObjCEncodeExpr(E->getAtLoc(),
+ EncodedTypeInfo,
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ ExprResult result = getDerived().TransformExpr(E->getSubExpr());
+ if (result.isInvalid()) return ExprError();
+ Expr *subExpr = result.take();
+
+ if (!getDerived().AlwaysRebuild() &&
+ subExpr == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return SemaRef.Owned(new(SemaRef.Context)
+ ObjCIndirectCopyRestoreExpr(subExpr, E->getType(), E->shouldCopy()));
+}
+
+template<typename Derived>
+ExprResult TreeTransform<Derived>::
+TransformObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ TypeSourceInfo *TSInfo
+ = getDerived().TransformType(E->getTypeInfoAsWritten());
+ if (!TSInfo)
+ return ExprError();
+
+ ExprResult Result = getDerived().TransformExpr(E->getSubExpr());
+ if (Result.isInvalid())
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ TSInfo == E->getTypeInfoAsWritten() &&
+ Result.get() == E->getSubExpr())
+ return SemaRef.Owned(E);
+
+ return SemaRef.BuildObjCBridgedCast(E->getLParenLoc(), E->getBridgeKind(),
+ E->getBridgeKeywordLoc(), TSInfo,
+ Result.get());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCMessageExpr(ObjCMessageExpr *E) {
+ // Transform arguments.
+ bool ArgChanged = false;
+ ASTOwningVector<Expr*> Args(SemaRef);
+ Args.reserve(E->getNumArgs());
+ if (getDerived().TransformExprs(E->getArgs(), E->getNumArgs(), false, Args,
+ &ArgChanged))
+ return ExprError();
+
+ if (E->getReceiverKind() == ObjCMessageExpr::Class) {
+ // Class message: transform the receiver type.
+ TypeSourceInfo *ReceiverTypeInfo
+ = getDerived().TransformType(E->getClassReceiverTypeInfo());
+ if (!ReceiverTypeInfo)
+ return ExprError();
+
+ // If nothing changed, just retain the existing message send.
+ if (!getDerived().AlwaysRebuild() &&
+ ReceiverTypeInfo == E->getClassReceiverTypeInfo() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // Build a new class message send.
+ SmallVector<SourceLocation, 16> SelLocs;
+ E->getSelectorLocs(SelLocs);
+ return getDerived().RebuildObjCMessageExpr(ReceiverTypeInfo,
+ E->getSelector(),
+ SelLocs,
+ E->getMethodDecl(),
+ E->getLeftLoc(),
+ move_arg(Args),
+ E->getRightLoc());
+ }
+
+ // Instance message: transform the receiver
+ assert(E->getReceiverKind() == ObjCMessageExpr::Instance &&
+ "Only class and instance messages may be instantiated");
+ ExprResult Receiver
+ = getDerived().TransformExpr(E->getInstanceReceiver());
+ if (Receiver.isInvalid())
+ return ExprError();
+
+ // If nothing changed, just retain the existing message send.
+ if (!getDerived().AlwaysRebuild() &&
+ Receiver.get() == E->getInstanceReceiver() && !ArgChanged)
+ return SemaRef.MaybeBindToTemporary(E);
+
+ // Build a new instance message send.
+ SmallVector<SourceLocation, 16> SelLocs;
+ E->getSelectorLocs(SelLocs);
+ return getDerived().RebuildObjCMessageExpr(Receiver.get(),
+ E->getSelector(),
+ SelLocs,
+ E->getMethodDecl(),
+ E->getLeftLoc(),
+ move_arg(Args),
+ E->getRightLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCSelectorExpr(ObjCSelectorExpr *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCProtocolExpr(ObjCProtocolExpr *E) {
+ return SemaRef.Owned(E);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // We don't need to transform the ivar; it will never change.
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildObjCIvarRefExpr(Base.get(), E->getDecl(),
+ E->getLocation(),
+ E->isArrow(), E->isFreeIvar());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ // 'super' and types never change. Property never changes. Just
+ // retain the existing expression.
+ if (!E->isObjectReceiver())
+ return SemaRef.Owned(E);
+
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // We don't need to transform the property; it will never change.
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase())
+ return SemaRef.Owned(E);
+
+ if (E->isExplicitProperty())
+ return getDerived().RebuildObjCPropertyRefExpr(Base.get(),
+ E->getExplicitProperty(),
+ E->getLocation());
+
+ return getDerived().RebuildObjCPropertyRefExpr(Base.get(),
+ SemaRef.Context.PseudoObjectTy,
+ E->getImplicitPropertyGetter(),
+ E->getImplicitPropertySetter(),
+ E->getLocation());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBaseExpr());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // Transform the key expression.
+ ExprResult Key = getDerived().TransformExpr(E->getKeyExpr());
+ if (Key.isInvalid())
+ return ExprError();
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Key.get() == E->getKeyExpr() && Base.get() == E->getBaseExpr())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildObjCSubscriptRefExpr(E->getRBracket(),
+ Base.get(), Key.get(),
+ E->getAtIndexMethodDecl(),
+ E->setAtIndexMethodDecl());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformObjCIsaExpr(ObjCIsaExpr *E) {
+ // Transform the base expression.
+ ExprResult Base = getDerived().TransformExpr(E->getBase());
+ if (Base.isInvalid())
+ return ExprError();
+
+ // If nothing changed, just retain the existing expression.
+ if (!getDerived().AlwaysRebuild() &&
+ Base.get() == E->getBase())
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildObjCIsaExpr(Base.get(), E->getIsaMemberLoc(),
+ E->isArrow());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformShuffleVectorExpr(ShuffleVectorExpr *E) {
+ bool ArgumentChanged = false;
+ ASTOwningVector<Expr*> SubExprs(SemaRef);
+ SubExprs.reserve(E->getNumSubExprs());
+ if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
+ SubExprs, &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ !ArgumentChanged)
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildShuffleVectorExpr(E->getBuiltinLoc(),
+ move_arg(SubExprs),
+ E->getRParenLoc());
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformBlockExpr(BlockExpr *E) {
+ BlockDecl *oldBlock = E->getBlockDecl();
+
+ SemaRef.ActOnBlockStart(E->getCaretLocation(), /*Scope=*/0);
+ BlockScopeInfo *blockScope = SemaRef.getCurBlock();
+
+ blockScope->TheDecl->setIsVariadic(oldBlock->isVariadic());
+ blockScope->TheDecl->setBlockMissingReturnType(
+ oldBlock->blockMissingReturnType());
+
+ SmallVector<ParmVarDecl*, 4> params;
+ SmallVector<QualType, 4> paramTypes;
+
+ // Parameter substitution.
+ if (getDerived().TransformFunctionTypeParams(E->getCaretLocation(),
+ oldBlock->param_begin(),
+ oldBlock->param_size(),
+ 0, paramTypes, &params)) {
+ getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/0);
+ return ExprError();
+ }
+
+ const FunctionType *exprFunctionType = E->getFunctionType();
+ QualType exprResultType =
+ getDerived().TransformType(exprFunctionType->getResultType());
+
+ // Don't allow returning a objc interface by value.
+ if (exprResultType->isObjCObjectType()) {
+ getSema().Diag(E->getCaretLocation(),
+ diag::err_object_cannot_be_passed_returned_by_value)
+ << 0 << exprResultType;
+ getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/0);
+ return ExprError();
+ }
+
+ QualType functionType = getDerived().RebuildFunctionProtoType(
+ exprResultType,
+ paramTypes.data(),
+ paramTypes.size(),
+ oldBlock->isVariadic(),
+ false, 0, RQ_None,
+ exprFunctionType->getExtInfo());
+ blockScope->FunctionType = functionType;
+
+ // Set the parameters on the block decl.
+ if (!params.empty())
+ blockScope->TheDecl->setParams(params);
+
+ if (!oldBlock->blockMissingReturnType()) {
+ blockScope->HasImplicitReturnType = false;
+ blockScope->ReturnType = exprResultType;
+ }
+
+ // Transform the body
+ StmtResult body = getDerived().TransformStmt(E->getBody());
+ if (body.isInvalid()) {
+ getSema().ActOnBlockError(E->getCaretLocation(), /*Scope=*/0);
+ return ExprError();
+ }
+
+#ifndef NDEBUG
+ // In builds with assertions, make sure that we captured everything we
+ // captured before.
+ if (!SemaRef.getDiagnostics().hasErrorOccurred()) {
+ for (BlockDecl::capture_iterator i = oldBlock->capture_begin(),
+ e = oldBlock->capture_end(); i != e; ++i) {
+ VarDecl *oldCapture = i->getVariable();
+
+ // Ignore parameter packs.
+ if (isa<ParmVarDecl>(oldCapture) &&
+ cast<ParmVarDecl>(oldCapture)->isParameterPack())
+ continue;
+
+ VarDecl *newCapture =
+ cast<VarDecl>(getDerived().TransformDecl(E->getCaretLocation(),
+ oldCapture));
+ assert(blockScope->CaptureMap.count(newCapture));
+ }
+ assert(oldBlock->capturesCXXThis() == blockScope->isCXXThisCaptured());
+ }
+#endif
+
+ return SemaRef.ActOnBlockStmtExpr(E->getCaretLocation(), body.get(),
+ /*Scope=*/0);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAsTypeExpr(AsTypeExpr *E) {
+ llvm_unreachable("Cannot transform asType expressions yet");
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::TransformAtomicExpr(AtomicExpr *E) {
+ QualType RetTy = getDerived().TransformType(E->getType());
+ bool ArgumentChanged = false;
+ ASTOwningVector<Expr*> SubExprs(SemaRef);
+ SubExprs.reserve(E->getNumSubExprs());
+ if (getDerived().TransformExprs(E->getSubExprs(), E->getNumSubExprs(), false,
+ SubExprs, &ArgumentChanged))
+ return ExprError();
+
+ if (!getDerived().AlwaysRebuild() &&
+ !ArgumentChanged)
+ return SemaRef.Owned(E);
+
+ return getDerived().RebuildAtomicExpr(E->getBuiltinLoc(), move_arg(SubExprs),
+ RetTy, E->getOp(), E->getRParenLoc());
+}
+
+//===----------------------------------------------------------------------===//
+// Type reconstruction
+//===----------------------------------------------------------------------===//
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildPointerType(QualType PointeeType,
+ SourceLocation Star) {
+ return SemaRef.BuildPointerType(PointeeType, Star,
+ getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildBlockPointerType(QualType PointeeType,
+ SourceLocation Star) {
+ return SemaRef.BuildBlockPointerType(PointeeType, Star,
+ getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildReferenceType(QualType ReferentType,
+ bool WrittenAsLValue,
+ SourceLocation Sigil) {
+ return SemaRef.BuildReferenceType(ReferentType, WrittenAsLValue,
+ Sigil, getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildMemberPointerType(QualType PointeeType,
+ QualType ClassType,
+ SourceLocation Sigil) {
+ return SemaRef.BuildMemberPointerType(PointeeType, ClassType,
+ Sigil, getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ const llvm::APInt *Size,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ if (SizeExpr || !Size)
+ return SemaRef.BuildArrayType(ElementType, SizeMod, SizeExpr,
+ IndexTypeQuals, BracketsRange,
+ getDerived().getBaseEntity());
+
+ QualType Types[] = {
+ SemaRef.Context.UnsignedCharTy, SemaRef.Context.UnsignedShortTy,
+ SemaRef.Context.UnsignedIntTy, SemaRef.Context.UnsignedLongTy,
+ SemaRef.Context.UnsignedLongLongTy, SemaRef.Context.UnsignedInt128Ty
+ };
+ const unsigned NumTypes = sizeof(Types) / sizeof(QualType);
+ QualType SizeType;
+ for (unsigned I = 0; I != NumTypes; ++I)
+ if (Size->getBitWidth() == SemaRef.Context.getIntWidth(Types[I])) {
+ SizeType = Types[I];
+ break;
+ }
+
+ // Note that we can return a VariableArrayType here in the case where
+ // the element type was a dependent VariableArrayType.
+ IntegerLiteral *ArraySize
+ = IntegerLiteral::Create(SemaRef.Context, *Size, SizeType,
+ /*FIXME*/BracketsRange.getBegin());
+ return SemaRef.BuildArrayType(ElementType, SizeMod, ArraySize,
+ IndexTypeQuals, BracketsRange,
+ getDerived().getBaseEntity());
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildConstantArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ const llvm::APInt &Size,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ return getDerived().RebuildArrayType(ElementType, SizeMod, &Size, 0,
+ IndexTypeQuals, BracketsRange);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildIncompleteArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ return getDerived().RebuildArrayType(ElementType, SizeMod, 0, 0,
+ IndexTypeQuals, BracketsRange);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildVariableArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ return getDerived().RebuildArrayType(ElementType, SizeMod, 0,
+ SizeExpr,
+ IndexTypeQuals, BracketsRange);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildDependentSizedArrayType(QualType ElementType,
+ ArrayType::ArraySizeModifier SizeMod,
+ Expr *SizeExpr,
+ unsigned IndexTypeQuals,
+ SourceRange BracketsRange) {
+ return getDerived().RebuildArrayType(ElementType, SizeMod, 0,
+ SizeExpr,
+ IndexTypeQuals, BracketsRange);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildVectorType(QualType ElementType,
+ unsigned NumElements,
+ VectorType::VectorKind VecKind) {
+ // FIXME: semantic checking!
+ return SemaRef.Context.getVectorType(ElementType, NumElements, VecKind);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildExtVectorType(QualType ElementType,
+ unsigned NumElements,
+ SourceLocation AttributeLoc) {
+ llvm::APInt numElements(SemaRef.Context.getIntWidth(SemaRef.Context.IntTy),
+ NumElements, true);
+ IntegerLiteral *VectorSize
+ = IntegerLiteral::Create(SemaRef.Context, numElements, SemaRef.Context.IntTy,
+ AttributeLoc);
+ return SemaRef.BuildExtVectorType(ElementType, VectorSize, AttributeLoc);
+}
+
+template<typename Derived>
+QualType
+TreeTransform<Derived>::RebuildDependentSizedExtVectorType(QualType ElementType,
+ Expr *SizeExpr,
+ SourceLocation AttributeLoc) {
+ return SemaRef.BuildExtVectorType(ElementType, SizeExpr, AttributeLoc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildFunctionProtoType(QualType T,
+ QualType *ParamTypes,
+ unsigned NumParamTypes,
+ bool Variadic,
+ bool HasTrailingReturn,
+ unsigned Quals,
+ RefQualifierKind RefQualifier,
+ const FunctionType::ExtInfo &Info) {
+ return SemaRef.BuildFunctionType(T, ParamTypes, NumParamTypes, Variadic,
+ HasTrailingReturn, Quals, RefQualifier,
+ getDerived().getBaseLocation(),
+ getDerived().getBaseEntity(),
+ Info);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildFunctionNoProtoType(QualType T) {
+ return SemaRef.Context.getFunctionNoProtoType(T);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildUnresolvedUsingType(Decl *D) {
+ assert(D && "no decl found");
+ if (D->isInvalidDecl()) return QualType();
+
+ // FIXME: Doesn't account for ObjCInterfaceDecl!
+ TypeDecl *Ty;
+ if (isa<UsingDecl>(D)) {
+ UsingDecl *Using = cast<UsingDecl>(D);
+ assert(Using->isTypeName() &&
+ "UnresolvedUsingTypenameDecl transformed to non-typename using");
+
+ // A valid resolved using typename decl points to exactly one type decl.
+ assert(++Using->shadow_begin() == Using->shadow_end());
+ Ty = cast<TypeDecl>((*Using->shadow_begin())->getTargetDecl());
+
+ } else {
+ assert(isa<UnresolvedUsingTypenameDecl>(D) &&
+ "UnresolvedUsingTypenameDecl transformed to non-using decl");
+ Ty = cast<UnresolvedUsingTypenameDecl>(D);
+ }
+
+ return SemaRef.Context.getTypeDeclType(Ty);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildTypeOfExprType(Expr *E,
+ SourceLocation Loc) {
+ return SemaRef.BuildTypeofExprType(E, Loc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildTypeOfType(QualType Underlying) {
+ return SemaRef.Context.getTypeOfType(Underlying);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildDecltypeType(Expr *E,
+ SourceLocation Loc) {
+ return SemaRef.BuildDecltypeType(E, Loc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildUnaryTransformType(QualType BaseType,
+ UnaryTransformType::UTTKind UKind,
+ SourceLocation Loc) {
+ return SemaRef.BuildUnaryTransformType(BaseType, UKind, Loc);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildTemplateSpecializationType(
+ TemplateName Template,
+ SourceLocation TemplateNameLoc,
+ TemplateArgumentListInfo &TemplateArgs) {
+ return SemaRef.CheckTemplateIdType(Template, TemplateNameLoc, TemplateArgs);
+}
+
+template<typename Derived>
+QualType TreeTransform<Derived>::RebuildAtomicType(QualType ValueType,
+ SourceLocation KWLoc) {
+ return SemaRef.BuildAtomicType(ValueType, KWLoc);
+}
+
+template<typename Derived>
+TemplateName
+TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ bool TemplateKW,
+ TemplateDecl *Template) {
+ return SemaRef.Context.getQualifiedTemplateName(SS.getScopeRep(), TemplateKW,
+ Template);
+}
+
+template<typename Derived>
+TemplateName
+TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ const IdentifierInfo &Name,
+ SourceLocation NameLoc,
+ QualType ObjectType,
+ NamedDecl *FirstQualifierInScope) {
+ UnqualifiedId TemplateName;
+ TemplateName.setIdentifier(&Name, NameLoc);
+ Sema::TemplateTy Template;
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
+ getSema().ActOnDependentTemplateName(/*Scope=*/0,
+ SS, TemplateKWLoc, TemplateName,
+ ParsedType::make(ObjectType),
+ /*EnteringContext=*/false,
+ Template);
+ return Template.get();
+}
+
+template<typename Derived>
+TemplateName
+TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
+ OverloadedOperatorKind Operator,
+ SourceLocation NameLoc,
+ QualType ObjectType) {
+ UnqualifiedId Name;
+ // FIXME: Bogus location information.
+ SourceLocation SymbolLocations[3] = { NameLoc, NameLoc, NameLoc };
+ Name.setOperatorFunctionId(NameLoc, Operator, SymbolLocations);
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
+ Sema::TemplateTy Template;
+ getSema().ActOnDependentTemplateName(/*Scope=*/0,
+ SS, TemplateKWLoc, Name,
+ ParsedType::make(ObjectType),
+ /*EnteringContext=*/false,
+ Template);
+ return Template.template getAsVal<TemplateName>();
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
+ SourceLocation OpLoc,
+ Expr *OrigCallee,
+ Expr *First,
+ Expr *Second) {
+ Expr *Callee = OrigCallee->IgnoreParenCasts();
+ bool isPostIncDec = Second && (Op == OO_PlusPlus || Op == OO_MinusMinus);
+
+ // Determine whether this should be a builtin operation.
+ if (Op == OO_Subscript) {
+ if (!First->getType()->isOverloadableType() &&
+ !Second->getType()->isOverloadableType())
+ return getSema().CreateBuiltinArraySubscriptExpr(First,
+ Callee->getLocStart(),
+ Second, OpLoc);
+ } else if (Op == OO_Arrow) {
+ // -> is never a builtin operation.
+ return SemaRef.BuildOverloadedArrowExpr(0, First, OpLoc);
+ } else if (Second == 0 || isPostIncDec) {
+ if (!First->getType()->isOverloadableType()) {
+ // The argument is not of overloadable type, so try to create a
+ // built-in unary operation.
+ UnaryOperatorKind Opc
+ = UnaryOperator::getOverloadedOpcode(Op, isPostIncDec);
+
+ return getSema().CreateBuiltinUnaryOp(OpLoc, Opc, First);
+ }
+ } else {
+ if (!First->getType()->isOverloadableType() &&
+ !Second->getType()->isOverloadableType()) {
+ // Neither of the arguments is an overloadable type, so try to
+ // create a built-in binary operation.
+ BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
+ ExprResult Result
+ = SemaRef.CreateBuiltinBinOp(OpLoc, Opc, First, Second);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return move(Result);
+ }
+ }
+
+ // Compute the transformed set of functions (and function templates) to be
+ // used during overload resolution.
+ UnresolvedSet<16> Functions;
+
+ if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(Callee)) {
+ assert(ULE->requiresADL());
+
+ // FIXME: Do we have to check
+ // IsAcceptableNonMemberOperatorCandidate for each of these?
+ Functions.append(ULE->decls_begin(), ULE->decls_end());
+ } else {
+ Functions.addDecl(cast<DeclRefExpr>(Callee)->getDecl());
+ }
+
+ // Add any functions found via argument-dependent lookup.
+ Expr *Args[2] = { First, Second };
+ unsigned NumArgs = 1 + (Second != 0);
+
+ // Create the overloaded operator invocation for unary operators.
+ if (NumArgs == 1 || isPostIncDec) {
+ UnaryOperatorKind Opc
+ = UnaryOperator::getOverloadedOpcode(Op, isPostIncDec);
+ return SemaRef.CreateOverloadedUnaryOp(OpLoc, Opc, Functions, First);
+ }
+
+ if (Op == OO_Subscript) {
+ SourceLocation LBrace;
+ SourceLocation RBrace;
+
+ if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Callee)) {
+ DeclarationNameLoc &NameLoc = DRE->getNameInfo().getInfo();
+ LBrace = SourceLocation::getFromRawEncoding(
+ NameLoc.CXXOperatorName.BeginOpNameLoc);
+ RBrace = SourceLocation::getFromRawEncoding(
+ NameLoc.CXXOperatorName.EndOpNameLoc);
+ } else {
+ LBrace = Callee->getLocStart();
+ RBrace = OpLoc;
+ }
+
+ return SemaRef.CreateOverloadedArraySubscriptExpr(LBrace, RBrace,
+ First, Second);
+ }
+
+ // Create the overloaded operator invocation for binary operators.
+ BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
+ ExprResult Result
+ = SemaRef.CreateOverloadedBinOp(OpLoc, Opc, Functions, Args[0], Args[1]);
+ if (Result.isInvalid())
+ return ExprError();
+
+ return move(Result);
+}
+
+template<typename Derived>
+ExprResult
+TreeTransform<Derived>::RebuildCXXPseudoDestructorExpr(Expr *Base,
+ SourceLocation OperatorLoc,
+ bool isArrow,
+ CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeType,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage Destroyed) {
+ QualType BaseType = Base->getType();
+ if (Base->isTypeDependent() || Destroyed.getIdentifier() ||
+ (!isArrow && !BaseType->getAs<RecordType>()) ||
+ (isArrow && BaseType->getAs<PointerType>() &&
+ !BaseType->getAs<PointerType>()->getPointeeType()
+ ->template getAs<RecordType>())){
+ // This pseudo-destructor expression is still a pseudo-destructor.
+ return SemaRef.BuildPseudoDestructorExpr(Base, OperatorLoc,
+ isArrow? tok::arrow : tok::period,
+ SS, ScopeType, CCLoc, TildeLoc,
+ Destroyed,
+ /*FIXME?*/true);
+ }
+
+ TypeSourceInfo *DestroyedType = Destroyed.getTypeSourceInfo();
+ DeclarationName Name(SemaRef.Context.DeclarationNames.getCXXDestructorName(
+ SemaRef.Context.getCanonicalType(DestroyedType->getType())));
+ DeclarationNameInfo NameInfo(Name, Destroyed.getLocation());
+ NameInfo.setNamedTypeInfo(DestroyedType);
+
+ // FIXME: the ScopeType should be tacked onto SS.
+
+ SourceLocation TemplateKWLoc; // FIXME: retrieve it from caller.
+ return getSema().BuildMemberReferenceExpr(Base, BaseType,
+ OperatorLoc, isArrow,
+ SS, TemplateKWLoc,
+ /*FIXME: FirstQualifier*/ 0,
+ NameInfo,
+ /*TemplateArgs*/ 0);
+}
+
+} // end namespace clang
+
+#endif // LLVM_CLANG_SEMA_TREETRANSFORM_H
diff --git a/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h b/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h
new file mode 100644
index 0000000..7a5e43e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Sema/TypeLocBuilder.h
@@ -0,0 +1,201 @@
+//===--- TypeLocBuilder.h - Type Source Info collector ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines TypeLocBuilder, a class for building TypeLocs
+// bottom-up.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_TYPELOCBUILDER_H
+#define LLVM_CLANG_SEMA_TYPELOCBUILDER_H
+
+#include "clang/AST/TypeLoc.h"
+#include "clang/AST/ASTContext.h"
+
+namespace clang {
+
+class TypeLocBuilder {
+ enum { InlineCapacity = 8 * sizeof(SourceLocation) };
+
+ /// The underlying location-data buffer. Data grows from the end
+ /// of the buffer backwards.
+ char *Buffer;
+
+ /// The capacity of the current buffer.
+ size_t Capacity;
+
+ /// The index of the first occupied byte in the buffer.
+ size_t Index;
+
+#ifndef NDEBUG
+ /// The last type pushed on this builder.
+ QualType LastTy;
+#endif
+
+ /// The inline buffer.
+ char InlineBuffer[InlineCapacity];
+
+ public:
+ TypeLocBuilder()
+ : Buffer(InlineBuffer), Capacity(InlineCapacity), Index(InlineCapacity) {}
+
+ ~TypeLocBuilder() {
+ if (Buffer != InlineBuffer)
+ delete[] Buffer;
+ }
+
+ /// Ensures that this buffer has at least as much capacity as described.
+ void reserve(size_t Requested) {
+ if (Requested > Capacity)
+ // For now, match the request exactly.
+ grow(Requested);
+ }
+
+ /// Pushes a copy of the given TypeLoc onto this builder. The builder
+ /// must be empty for this to work.
+ void pushFullCopy(TypeLoc L) {
+ size_t Size = L.getFullDataSize();
+ TypeLoc Copy = pushFullUninitializedImpl(L.getType(), Size);
+ memcpy(Copy.getOpaqueData(), L.getOpaqueData(), Size);
+ }
+
+ /// Pushes uninitialized space for the given type. The builder must
+ /// be empty.
+ TypeLoc pushFullUninitialized(QualType T) {
+ return pushFullUninitializedImpl(T, TypeLoc::getFullDataSizeForType(T));
+ }
+
+ /// Pushes space for a typespec TypeLoc. Invalidates any TypeLocs
+ /// previously retrieved from this builder.
+ TypeSpecTypeLoc pushTypeSpec(QualType T) {
+ size_t LocalSize = TypeSpecTypeLoc::LocalDataSize;
+ return cast<TypeSpecTypeLoc>(pushImpl(T, LocalSize));
+ }
+
+ /// Resets this builder to the newly-initialized state.
+ void clear() {
+#ifndef NDEBUG
+ LastTy = QualType();
+#endif
+ Index = Capacity;
+ }
+
+ /// \brief Tell the TypeLocBuilder that the type it is storing has been
+ /// modified in some safe way that doesn't affect type-location information.
+ void TypeWasModifiedSafely(QualType T) {
+#ifndef NDEBUG
+ LastTy = T;
+#endif
+ }
+
+ /// Pushes space for a new TypeLoc of the given type. Invalidates
+ /// any TypeLocs previously retrieved from this builder.
+ template <class TyLocType> TyLocType push(QualType T) {
+ size_t LocalSize = cast<TyLocType>(TypeLoc(T, 0)).getLocalDataSize();
+ return cast<TyLocType>(pushImpl(T, LocalSize));
+ }
+
+ /// Creates a TypeSourceInfo for the given type.
+ TypeSourceInfo *getTypeSourceInfo(ASTContext& Context, QualType T) {
+#ifndef NDEBUG
+ assert(T == LastTy && "type doesn't match last type pushed!");
+#endif
+
+ size_t FullDataSize = Capacity - Index;
+ TypeSourceInfo *DI = Context.CreateTypeSourceInfo(T, FullDataSize);
+ memcpy(DI->getTypeLoc().getOpaqueData(), &Buffer[Index], FullDataSize);
+ return DI;
+ }
+
+ /// \brief Copies the type-location information to the given AST context and
+ /// returns a \c TypeLoc referring into the AST context.
+ TypeLoc getTypeLocInContext(ASTContext &Context, QualType T) {
+#ifndef NDEBUG
+ assert(T == LastTy && "type doesn't match last type pushed!");
+#endif
+
+ size_t FullDataSize = Capacity - Index;
+ void *Mem = Context.Allocate(FullDataSize);
+ memcpy(Mem, &Buffer[Index], FullDataSize);
+ return TypeLoc(T, Mem);
+ }
+
+private:
+ TypeLoc pushImpl(QualType T, size_t LocalSize) {
+#ifndef NDEBUG
+ QualType TLast = TypeLoc(T, 0).getNextTypeLoc().getType();
+ assert(TLast == LastTy &&
+ "mismatch between last type and new type's inner type");
+ LastTy = T;
+#endif
+
+ // If we need to grow, grow by a factor of 2.
+ if (LocalSize > Index) {
+ size_t RequiredCapacity = Capacity + (LocalSize - Index);
+ size_t NewCapacity = Capacity * 2;
+ while (RequiredCapacity > NewCapacity)
+ NewCapacity *= 2;
+ grow(NewCapacity);
+ }
+
+ Index -= LocalSize;
+
+ return getTemporaryTypeLoc(T);
+ }
+
+ /// Grow to the given capacity.
+ void grow(size_t NewCapacity) {
+ assert(NewCapacity > Capacity);
+
+ // Allocate the new buffer and copy the old data into it.
+ char *NewBuffer = new char[NewCapacity];
+ unsigned NewIndex = Index + NewCapacity - Capacity;
+ memcpy(&NewBuffer[NewIndex],
+ &Buffer[Index],
+ Capacity - Index);
+
+ if (Buffer != InlineBuffer)
+ delete[] Buffer;
+
+ Buffer = NewBuffer;
+ Capacity = NewCapacity;
+ Index = NewIndex;
+ }
+
+ TypeLoc pushFullUninitializedImpl(QualType T, size_t Size) {
+#ifndef NDEBUG
+ assert(LastTy.isNull() && "pushing full on non-empty TypeLocBuilder");
+ LastTy = T;
+#endif
+ assert(Index == Capacity && "pushing full on non-empty TypeLocBuilder");
+
+ reserve(Size);
+ Index -= Size;
+
+ return getTemporaryTypeLoc(T);
+ }
+
+public:
+ /// \brief Retrieve a temporary TypeLoc that refers into this \c TypeLocBuilder
+ /// object.
+ ///
+ /// The resulting \c TypeLoc should only be used so long as the
+ /// \c TypeLocBuilder is active and has not had more type information
+ /// pushed into it.
+ TypeLoc getTemporaryTypeLoc(QualType T) {
+#ifndef NDEBUG
+ assert(LastTy == T && "type doesn't match last type pushed!");
+#endif
+ return TypeLoc(T, &Buffer[Index]);
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
new file mode 100644
index 0000000..67f74f7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.cpp
@@ -0,0 +1,77 @@
+//===--- ASTCommon.cpp - Common stuff for ASTReader/ASTWriter----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines common functions that both ASTReader and ASTWriter use.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTCommon.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+#include "clang/Basic/IdentifierTable.h"
+#include "llvm/ADT/StringExtras.h"
+
+using namespace clang;
+
+// Give ASTDeserializationListener's VTable a home.
+ASTDeserializationListener::~ASTDeserializationListener() { }
+
+serialization::TypeIdx
+serialization::TypeIdxFromBuiltin(const BuiltinType *BT) {
+ unsigned ID = 0;
+ switch (BT->getKind()) {
+ case BuiltinType::Void: ID = PREDEF_TYPE_VOID_ID; break;
+ case BuiltinType::Bool: ID = PREDEF_TYPE_BOOL_ID; break;
+ case BuiltinType::Char_U: ID = PREDEF_TYPE_CHAR_U_ID; break;
+ case BuiltinType::UChar: ID = PREDEF_TYPE_UCHAR_ID; break;
+ case BuiltinType::UShort: ID = PREDEF_TYPE_USHORT_ID; break;
+ case BuiltinType::UInt: ID = PREDEF_TYPE_UINT_ID; break;
+ case BuiltinType::ULong: ID = PREDEF_TYPE_ULONG_ID; break;
+ case BuiltinType::ULongLong: ID = PREDEF_TYPE_ULONGLONG_ID; break;
+ case BuiltinType::UInt128: ID = PREDEF_TYPE_UINT128_ID; break;
+ case BuiltinType::Char_S: ID = PREDEF_TYPE_CHAR_S_ID; break;
+ case BuiltinType::SChar: ID = PREDEF_TYPE_SCHAR_ID; break;
+ case BuiltinType::WChar_S:
+ case BuiltinType::WChar_U: ID = PREDEF_TYPE_WCHAR_ID; break;
+ case BuiltinType::Short: ID = PREDEF_TYPE_SHORT_ID; break;
+ case BuiltinType::Int: ID = PREDEF_TYPE_INT_ID; break;
+ case BuiltinType::Long: ID = PREDEF_TYPE_LONG_ID; break;
+ case BuiltinType::LongLong: ID = PREDEF_TYPE_LONGLONG_ID; break;
+ case BuiltinType::Int128: ID = PREDEF_TYPE_INT128_ID; break;
+ case BuiltinType::Half: ID = PREDEF_TYPE_HALF_ID; break;
+ case BuiltinType::Float: ID = PREDEF_TYPE_FLOAT_ID; break;
+ case BuiltinType::Double: ID = PREDEF_TYPE_DOUBLE_ID; break;
+ case BuiltinType::LongDouble: ID = PREDEF_TYPE_LONGDOUBLE_ID; break;
+ case BuiltinType::NullPtr: ID = PREDEF_TYPE_NULLPTR_ID; break;
+ case BuiltinType::Char16: ID = PREDEF_TYPE_CHAR16_ID; break;
+ case BuiltinType::Char32: ID = PREDEF_TYPE_CHAR32_ID; break;
+ case BuiltinType::Overload: ID = PREDEF_TYPE_OVERLOAD_ID; break;
+ case BuiltinType::BoundMember:ID = PREDEF_TYPE_BOUND_MEMBER; break;
+ case BuiltinType::PseudoObject:ID = PREDEF_TYPE_PSEUDO_OBJECT;break;
+ case BuiltinType::Dependent: ID = PREDEF_TYPE_DEPENDENT_ID; break;
+ case BuiltinType::UnknownAny: ID = PREDEF_TYPE_UNKNOWN_ANY; break;
+ case BuiltinType::ARCUnbridgedCast:
+ ID = PREDEF_TYPE_ARC_UNBRIDGED_CAST; break;
+ case BuiltinType::ObjCId: ID = PREDEF_TYPE_OBJC_ID; break;
+ case BuiltinType::ObjCClass: ID = PREDEF_TYPE_OBJC_CLASS; break;
+ case BuiltinType::ObjCSel: ID = PREDEF_TYPE_OBJC_SEL; break;
+ }
+
+ return TypeIdx(ID);
+}
+
+unsigned serialization::ComputeHash(Selector Sel) {
+ unsigned N = Sel.getNumArgs();
+ if (N == 0)
+ ++N;
+ unsigned R = 5381;
+ for (unsigned I = 0; I != N; ++I)
+ if (IdentifierInfo *II = Sel.getIdentifierInfoForSlot(I))
+ R = llvm::HashString(II->getName(), R);
+ return R;
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
new file mode 100644
index 0000000..16db8e3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTCommon.h
@@ -0,0 +1,63 @@
+//===- ASTCommon.h - Common stuff for ASTReader/ASTWriter -*- C++ -*-=========//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines common functions that both ASTReader and ASTWriter use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SERIALIZATION_LIB_AST_COMMON_H
+#define LLVM_CLANG_SERIALIZATION_LIB_AST_COMMON_H
+
+#include "clang/Serialization/ASTBitCodes.h"
+#include "clang/AST/ASTContext.h"
+
+namespace clang {
+
+namespace serialization {
+
+enum DeclUpdateKind {
+ UPD_CXX_ADDED_IMPLICIT_MEMBER,
+ UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION,
+ UPD_CXX_ADDED_ANONYMOUS_NAMESPACE,
+ UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER
+};
+
+TypeIdx TypeIdxFromBuiltin(const BuiltinType *BT);
+
+template <typename IdxForTypeTy>
+TypeID MakeTypeID(ASTContext &Context, QualType T, IdxForTypeTy IdxForType) {
+ if (T.isNull())
+ return PREDEF_TYPE_NULL_ID;
+
+ unsigned FastQuals = T.getLocalFastQualifiers();
+ T.removeLocalFastQualifiers();
+
+ if (T.hasLocalNonFastQualifiers())
+ return IdxForType(T).asTypeID(FastQuals);
+
+ assert(!T.hasLocalQualifiers());
+
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(T.getTypePtr()))
+ return TypeIdxFromBuiltin(BT).asTypeID(FastQuals);
+
+ if (T == Context.AutoDeductTy)
+ return TypeIdx(PREDEF_TYPE_AUTO_DEDUCT).asTypeID(FastQuals);
+ if (T == Context.AutoRRefDeductTy)
+ return TypeIdx(PREDEF_TYPE_AUTO_RREF_DEDUCT).asTypeID(FastQuals);
+
+ return IdxForType(T).asTypeID(FastQuals);
+}
+
+unsigned ComputeHash(Selector Sel);
+
+} // namespace serialization
+
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
new file mode 100644
index 0000000..f91b66c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReader.cpp
@@ -0,0 +1,6369 @@
+//===--- ASTReader.cpp - AST File Reader ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTReader class, which reads AST files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Serialization/ASTDeserializationListener.h"
+#include "clang/Serialization/ModuleManager.h"
+#include "clang/Serialization/SerializationDiagnostic.h"
+#include "ASTCommon.h"
+#include "ASTReaderInternals.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/Scope.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Version.h"
+#include "clang/Basic/VersionTuple.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
+#include <algorithm>
+#include <iterator>
+#include <cstdio>
+#include <sys/stat.h>
+
+using namespace clang;
+using namespace clang::serialization;
+using namespace clang::serialization::reader;
+
+//===----------------------------------------------------------------------===//
+// PCH validator implementation
+//===----------------------------------------------------------------------===//
+
+ASTReaderListener::~ASTReaderListener() {}
+
+bool
+PCHValidator::ReadLanguageOptions(const LangOptions &LangOpts) {
+ const LangOptions &PPLangOpts = PP.getLangOpts();
+
+#define LANGOPT(Name, Bits, Default, Description) \
+ if (PPLangOpts.Name != LangOpts.Name) { \
+ Reader.Diag(diag::err_pch_langopt_mismatch) \
+ << Description << LangOpts.Name << PPLangOpts.Name; \
+ return true; \
+ }
+
+#define VALUE_LANGOPT(Name, Bits, Default, Description) \
+ if (PPLangOpts.Name != LangOpts.Name) { \
+ Reader.Diag(diag::err_pch_langopt_value_mismatch) \
+ << Description; \
+ return true; \
+}
+
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ if (PPLangOpts.get##Name() != LangOpts.get##Name()) { \
+ Reader.Diag(diag::err_pch_langopt_value_mismatch) \
+ << Description; \
+ return true; \
+ }
+
+#define BENIGN_LANGOPT(Name, Bits, Default, Description)
+#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
+#include "clang/Basic/LangOptions.def"
+
+ return false;
+}
+
+bool PCHValidator::ReadTargetTriple(StringRef Triple) {
+ if (Triple == PP.getTargetInfo().getTriple().str())
+ return false;
+
+ Reader.Diag(diag::warn_pch_target_triple)
+ << Triple << PP.getTargetInfo().getTriple().str();
+ return true;
+}
+
+namespace {
+ struct EmptyStringRef {
+ bool operator ()(StringRef r) const { return r.empty(); }
+ };
+ struct EmptyBlock {
+ bool operator ()(const PCHPredefinesBlock &r) const {return r.Data.empty();}
+ };
+}
+
+static bool EqualConcatenations(SmallVector<StringRef, 2> L,
+ PCHPredefinesBlocks R) {
+ // First, sum up the lengths.
+ unsigned LL = 0, RL = 0;
+ for (unsigned I = 0, N = L.size(); I != N; ++I) {
+ LL += L[I].size();
+ }
+ for (unsigned I = 0, N = R.size(); I != N; ++I) {
+ RL += R[I].Data.size();
+ }
+ if (LL != RL)
+ return false;
+ if (LL == 0 && RL == 0)
+ return true;
+
+ // Kick out empty parts, they confuse the algorithm below.
+ L.erase(std::remove_if(L.begin(), L.end(), EmptyStringRef()), L.end());
+ R.erase(std::remove_if(R.begin(), R.end(), EmptyBlock()), R.end());
+
+ // Do it the hard way. At this point, both vectors must be non-empty.
+ StringRef LR = L[0], RR = R[0].Data;
+ unsigned LI = 0, RI = 0, LN = L.size(), RN = R.size();
+ (void) RN;
+ for (;;) {
+ // Compare the current pieces.
+ if (LR.size() == RR.size()) {
+ // If they're the same length, it's pretty easy.
+ if (LR != RR)
+ return false;
+ // Both pieces are done, advance.
+ ++LI;
+ ++RI;
+ // If either string is done, they're both done, since they're the same
+ // length.
+ if (LI == LN) {
+ assert(RI == RN && "Strings not the same length after all?");
+ return true;
+ }
+ LR = L[LI];
+ RR = R[RI].Data;
+ } else if (LR.size() < RR.size()) {
+ // Right piece is longer.
+ if (!RR.startswith(LR))
+ return false;
+ ++LI;
+ assert(LI != LN && "Strings not the same length after all?");
+ RR = RR.substr(LR.size());
+ LR = L[LI];
+ } else {
+ // Left piece is longer.
+ if (!LR.startswith(RR))
+ return false;
+ ++RI;
+ assert(RI != RN && "Strings not the same length after all?");
+ LR = LR.substr(RR.size());
+ RR = R[RI].Data;
+ }
+ }
+}
+
+static std::pair<FileID, StringRef::size_type>
+FindMacro(const PCHPredefinesBlocks &Buffers, StringRef MacroDef) {
+ std::pair<FileID, StringRef::size_type> Res;
+ for (unsigned I = 0, N = Buffers.size(); I != N; ++I) {
+ Res.second = Buffers[I].Data.find(MacroDef);
+ if (Res.second != StringRef::npos) {
+ Res.first = Buffers[I].BufferID;
+ break;
+ }
+ }
+ return Res;
+}
+
+bool PCHValidator::ReadPredefinesBuffer(const PCHPredefinesBlocks &Buffers,
+ StringRef OriginalFileName,
+ std::string &SuggestedPredefines,
+ FileManager &FileMgr) {
+ // We are in the context of an implicit include, so the predefines buffer will
+ // have a #include entry for the PCH file itself (as normalized by the
+ // preprocessor initialization). Find it and skip over it in the checking
+ // below.
+ SmallString<256> PCHInclude;
+ PCHInclude += "#include \"";
+ PCHInclude += HeaderSearch::NormalizeDashIncludePath(OriginalFileName,
+ FileMgr);
+ PCHInclude += "\"\n";
+ std::pair<StringRef,StringRef> Split =
+ StringRef(PP.getPredefines()).split(PCHInclude.str());
+ StringRef Left = Split.first, Right = Split.second;
+ if (Left == PP.getPredefines()) {
+ Error("Missing PCH include entry!");
+ return true;
+ }
+
+ // If the concatenation of all the PCH buffers is equal to the adjusted
+ // command line, we're done.
+ SmallVector<StringRef, 2> CommandLine;
+ CommandLine.push_back(Left);
+ CommandLine.push_back(Right);
+ if (EqualConcatenations(CommandLine, Buffers))
+ return false;
+
+ SourceManager &SourceMgr = PP.getSourceManager();
+
+ // The predefines buffers are different. Determine what the differences are,
+ // and whether they require us to reject the PCH file.
+ SmallVector<StringRef, 8> PCHLines;
+ for (unsigned I = 0, N = Buffers.size(); I != N; ++I)
+ Buffers[I].Data.split(PCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+
+ SmallVector<StringRef, 8> CmdLineLines;
+ Left.split(CmdLineLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+
+ // Pick out implicit #includes after the PCH and don't consider them for
+ // validation; we will insert them into SuggestedPredefines so that the
+ // preprocessor includes them.
+ std::string IncludesAfterPCH;
+ SmallVector<StringRef, 8> AfterPCHLines;
+ Right.split(AfterPCHLines, "\n", /*MaxSplit=*/-1, /*KeepEmpty=*/false);
+ for (unsigned i = 0, e = AfterPCHLines.size(); i != e; ++i) {
+ if (AfterPCHLines[i].startswith("#include ")) {
+ IncludesAfterPCH += AfterPCHLines[i];
+ IncludesAfterPCH += '\n';
+ } else {
+ CmdLineLines.push_back(AfterPCHLines[i]);
+ }
+ }
+
+ // Make sure we add the includes last into SuggestedPredefines before we
+ // exit this function.
+ struct AddIncludesRAII {
+ std::string &SuggestedPredefines;
+ std::string &IncludesAfterPCH;
+
+ AddIncludesRAII(std::string &SuggestedPredefines,
+ std::string &IncludesAfterPCH)
+ : SuggestedPredefines(SuggestedPredefines),
+ IncludesAfterPCH(IncludesAfterPCH) { }
+ ~AddIncludesRAII() {
+ SuggestedPredefines += IncludesAfterPCH;
+ }
+ } AddIncludes(SuggestedPredefines, IncludesAfterPCH);
+
+ // Sort both sets of predefined buffer lines, since we allow some extra
+ // definitions and they may appear at any point in the output.
+ std::sort(CmdLineLines.begin(), CmdLineLines.end());
+ std::sort(PCHLines.begin(), PCHLines.end());
+
+ // Determine which predefines that were used to build the PCH file are missing
+ // from the command line.
+ std::vector<StringRef> MissingPredefines;
+ std::set_difference(PCHLines.begin(), PCHLines.end(),
+ CmdLineLines.begin(), CmdLineLines.end(),
+ std::back_inserter(MissingPredefines));
+
+ bool MissingDefines = false;
+ bool ConflictingDefines = false;
+ for (unsigned I = 0, N = MissingPredefines.size(); I != N; ++I) {
+ StringRef Missing = MissingPredefines[I];
+ if (Missing.startswith("#include ")) {
+ // An -include was specified when generating the PCH; it is included in
+ // the PCH, just ignore it.
+ continue;
+ }
+ if (!Missing.startswith("#define ")) {
+ Reader.Diag(diag::warn_pch_compiler_options_mismatch);
+ return true;
+ }
+
+ // This is a macro definition. Determine the name of the macro we're
+ // defining.
+ std::string::size_type StartOfMacroName = strlen("#define ");
+ std::string::size_type EndOfMacroName
+ = Missing.find_first_of("( \n\r", StartOfMacroName);
+ assert(EndOfMacroName != std::string::npos &&
+ "Couldn't find the end of the macro name");
+ StringRef MacroName = Missing.slice(StartOfMacroName, EndOfMacroName);
+
+ // Determine whether this macro was given a different definition on the
+ // command line.
+ std::string MacroDefStart = "#define " + MacroName.str();
+ std::string::size_type MacroDefLen = MacroDefStart.size();
+ SmallVector<StringRef, 8>::iterator ConflictPos
+ = std::lower_bound(CmdLineLines.begin(), CmdLineLines.end(),
+ MacroDefStart);
+ for (; ConflictPos != CmdLineLines.end(); ++ConflictPos) {
+ if (!ConflictPos->startswith(MacroDefStart)) {
+ // Different macro; we're done.
+ ConflictPos = CmdLineLines.end();
+ break;
+ }
+
+ assert(ConflictPos->size() > MacroDefLen &&
+ "Invalid #define in predefines buffer?");
+ if ((*ConflictPos)[MacroDefLen] != ' ' &&
+ (*ConflictPos)[MacroDefLen] != '(')
+ continue; // Longer macro name; keep trying.
+
+ // We found a conflicting macro definition.
+ break;
+ }
+
+ if (ConflictPos != CmdLineLines.end()) {
+ Reader.Diag(diag::warn_cmdline_conflicting_macro_def)
+ << MacroName;
+
+ // Show the definition of this macro within the PCH file.
+ std::pair<FileID, StringRef::size_type> MacroLoc =
+ FindMacro(Buffers, Missing);
+ assert(MacroLoc.second!=StringRef::npos && "Unable to find macro!");
+ SourceLocation PCHMissingLoc =
+ SourceMgr.getLocForStartOfFile(MacroLoc.first)
+ .getLocWithOffset(MacroLoc.second);
+ Reader.Diag(PCHMissingLoc, diag::note_pch_macro_defined_as) << MacroName;
+
+ ConflictingDefines = true;
+ continue;
+ }
+
+ // If the macro doesn't conflict, then we'll just pick up the macro
+ // definition from the PCH file. Warn the user that they made a mistake.
+ if (ConflictingDefines)
+ continue; // Don't complain if there are already conflicting defs
+
+ if (!MissingDefines) {
+ Reader.Diag(diag::warn_cmdline_missing_macro_defs);
+ MissingDefines = true;
+ }
+
+ // Show the definition of this macro within the PCH file.
+ std::pair<FileID, StringRef::size_type> MacroLoc =
+ FindMacro(Buffers, Missing);
+ assert(MacroLoc.second!=StringRef::npos && "Unable to find macro!");
+ SourceLocation PCHMissingLoc =
+ SourceMgr.getLocForStartOfFile(MacroLoc.first)
+ .getLocWithOffset(MacroLoc.second);
+ Reader.Diag(PCHMissingLoc, diag::note_using_macro_def_from_pch);
+ }
+
+ if (ConflictingDefines)
+ return true;
+
+ // Determine what predefines were introduced based on command-line
+ // parameters that were not present when building the PCH
+ // file. Extra #defines are okay, so long as the identifiers being
+ // defined were not used within the precompiled header.
+ std::vector<StringRef> ExtraPredefines;
+ std::set_difference(CmdLineLines.begin(), CmdLineLines.end(),
+ PCHLines.begin(), PCHLines.end(),
+ std::back_inserter(ExtraPredefines));
+ for (unsigned I = 0, N = ExtraPredefines.size(); I != N; ++I) {
+ StringRef &Extra = ExtraPredefines[I];
+ if (!Extra.startswith("#define ")) {
+ Reader.Diag(diag::warn_pch_compiler_options_mismatch);
+ return true;
+ }
+
+ // This is an extra macro definition. Determine the name of the
+ // macro we're defining.
+ std::string::size_type StartOfMacroName = strlen("#define ");
+ std::string::size_type EndOfMacroName
+ = Extra.find_first_of("( \n\r", StartOfMacroName);
+ assert(EndOfMacroName != std::string::npos &&
+ "Couldn't find the end of the macro name");
+ StringRef MacroName = Extra.slice(StartOfMacroName, EndOfMacroName);
+
+ // Check whether this name was used somewhere in the PCH file. If
+ // so, defining it as a macro could change behavior, so we reject
+ // the PCH file.
+ if (IdentifierInfo *II = Reader.get(MacroName)) {
+ Reader.Diag(diag::warn_macro_name_used_in_pch) << II;
+ return true;
+ }
+
+ // Add this definition to the suggested predefines buffer.
+ SuggestedPredefines += Extra;
+ SuggestedPredefines += '\n';
+ }
+
+ // If we get here, it's because the predefines buffer had compatible
+ // contents. Accept the PCH file.
+ return false;
+}
+
+void PCHValidator::ReadHeaderFileInfo(const HeaderFileInfo &HFI,
+ unsigned ID) {
+ PP.getHeaderSearchInfo().setHeaderFileInfoForUID(HFI, ID);
+ ++NumHeaderInfos;
+}
+
+void PCHValidator::ReadCounter(unsigned Value) {
+ PP.setCounterValue(Value);
+}
+
+//===----------------------------------------------------------------------===//
+// AST reader implementation
+//===----------------------------------------------------------------------===//
+
+void
+ASTReader::setDeserializationListener(ASTDeserializationListener *Listener) {
+ DeserializationListener = Listener;
+}
+
+
+
+unsigned ASTSelectorLookupTrait::ComputeHash(Selector Sel) {
+ return serialization::ComputeHash(Sel);
+}
+
+
+std::pair<unsigned, unsigned>
+ASTSelectorLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
+ using namespace clang::io;
+ unsigned KeyLen = ReadUnalignedLE16(d);
+ unsigned DataLen = ReadUnalignedLE16(d);
+ return std::make_pair(KeyLen, DataLen);
+}
+
+ASTSelectorLookupTrait::internal_key_type
+ASTSelectorLookupTrait::ReadKey(const unsigned char* d, unsigned) {
+ using namespace clang::io;
+ SelectorTable &SelTable = Reader.getContext().Selectors;
+ unsigned N = ReadUnalignedLE16(d);
+ IdentifierInfo *FirstII
+ = Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
+ if (N == 0)
+ return SelTable.getNullarySelector(FirstII);
+ else if (N == 1)
+ return SelTable.getUnarySelector(FirstII);
+
+ SmallVector<IdentifierInfo *, 16> Args;
+ Args.push_back(FirstII);
+ for (unsigned I = 1; I != N; ++I)
+ Args.push_back(Reader.getLocalIdentifier(F, ReadUnalignedLE32(d)));
+
+ return SelTable.getSelector(N, Args.data());
+}
+
+ASTSelectorLookupTrait::data_type
+ASTSelectorLookupTrait::ReadData(Selector, const unsigned char* d,
+ unsigned DataLen) {
+ using namespace clang::io;
+
+ data_type Result;
+
+ Result.ID = Reader.getGlobalSelectorID(F, ReadUnalignedLE32(d));
+ unsigned NumInstanceMethods = ReadUnalignedLE16(d);
+ unsigned NumFactoryMethods = ReadUnalignedLE16(d);
+
+ // Load instance methods
+ for (unsigned I = 0; I != NumInstanceMethods; ++I) {
+ if (ObjCMethodDecl *Method
+ = Reader.GetLocalDeclAs<ObjCMethodDecl>(F, ReadUnalignedLE32(d)))
+ Result.Instance.push_back(Method);
+ }
+
+ // Load factory methods
+ for (unsigned I = 0; I != NumFactoryMethods; ++I) {
+ if (ObjCMethodDecl *Method
+ = Reader.GetLocalDeclAs<ObjCMethodDecl>(F, ReadUnalignedLE32(d)))
+ Result.Factory.push_back(Method);
+ }
+
+ return Result;
+}
+
+unsigned ASTIdentifierLookupTrait::ComputeHash(const internal_key_type& a) {
+ return llvm::HashString(StringRef(a.first, a.second));
+}
+
+std::pair<unsigned, unsigned>
+ASTIdentifierLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
+ using namespace clang::io;
+ unsigned DataLen = ReadUnalignedLE16(d);
+ unsigned KeyLen = ReadUnalignedLE16(d);
+ return std::make_pair(KeyLen, DataLen);
+}
+
+std::pair<const char*, unsigned>
+ASTIdentifierLookupTrait::ReadKey(const unsigned char* d, unsigned n) {
+ assert(n >= 2 && d[n-1] == '\0');
+ return std::make_pair((const char*) d, n-1);
+}
+
+IdentifierInfo *ASTIdentifierLookupTrait::ReadData(const internal_key_type& k,
+ const unsigned char* d,
+ unsigned DataLen) {
+ using namespace clang::io;
+ unsigned RawID = ReadUnalignedLE32(d);
+ bool IsInteresting = RawID & 0x01;
+
+ // Wipe out the "is interesting" bit.
+ RawID = RawID >> 1;
+
+ IdentID ID = Reader.getGlobalIdentifierID(F, RawID);
+ if (!IsInteresting) {
+ // For uninteresting identifiers, just build the IdentifierInfo
+ // and associate it with the persistent ID.
+ IdentifierInfo *II = KnownII;
+ if (!II) {
+ II = &Reader.getIdentifierTable().getOwn(StringRef(k.first, k.second));
+ KnownII = II;
+ }
+ Reader.SetIdentifierInfo(ID, II);
+ II->setIsFromAST();
+ Reader.markIdentifierUpToDate(II);
+ return II;
+ }
+
+ unsigned Bits = ReadUnalignedLE16(d);
+ bool CPlusPlusOperatorKeyword = Bits & 0x01;
+ Bits >>= 1;
+ bool HasRevertedTokenIDToIdentifier = Bits & 0x01;
+ Bits >>= 1;
+ bool Poisoned = Bits & 0x01;
+ Bits >>= 1;
+ bool ExtensionToken = Bits & 0x01;
+ Bits >>= 1;
+ bool hasMacroDefinition = Bits & 0x01;
+ Bits >>= 1;
+ unsigned ObjCOrBuiltinID = Bits & 0x7FF;
+ Bits >>= 11;
+
+ assert(Bits == 0 && "Extra bits in the identifier?");
+ DataLen -= 6;
+
+ // Build the IdentifierInfo itself and link the identifier ID with
+ // the new IdentifierInfo.
+ IdentifierInfo *II = KnownII;
+ if (!II) {
+ II = &Reader.getIdentifierTable().getOwn(StringRef(k.first, k.second));
+ KnownII = II;
+ }
+ Reader.markIdentifierUpToDate(II);
+ II->setIsFromAST();
+
+ // Set or check the various bits in the IdentifierInfo structure.
+ // Token IDs are read-only.
+ if (HasRevertedTokenIDToIdentifier)
+ II->RevertTokenIDToIdentifier();
+ II->setObjCOrBuiltinID(ObjCOrBuiltinID);
+ assert(II->isExtensionToken() == ExtensionToken &&
+ "Incorrect extension token flag");
+ (void)ExtensionToken;
+ if (Poisoned)
+ II->setIsPoisoned(true);
+ assert(II->isCPlusPlusOperatorKeyword() == CPlusPlusOperatorKeyword &&
+ "Incorrect C++ operator keyword flag");
+ (void)CPlusPlusOperatorKeyword;
+
+ // If this identifier is a macro, deserialize the macro
+ // definition.
+ if (hasMacroDefinition) {
+ // FIXME: Check for conflicts?
+ uint32_t Offset = ReadUnalignedLE32(d);
+ unsigned LocalSubmoduleID = ReadUnalignedLE32(d);
+
+ // Determine whether this macro definition should be visible now, or
+ // whether it is in a hidden submodule.
+ bool Visible = true;
+ if (SubmoduleID GlobalSubmoduleID
+ = Reader.getGlobalSubmoduleID(F, LocalSubmoduleID)) {
+ if (Module *Owner = Reader.getSubmodule(GlobalSubmoduleID)) {
+ if (Owner->NameVisibility == Module::Hidden) {
+ // The owning module is not visible, and this macro definition should
+ // not be, either.
+ Visible = false;
+
+ // Note that this macro definition was hidden because its owning
+ // module is not yet visible.
+ Reader.HiddenNamesMap[Owner].push_back(II);
+ }
+ }
+ }
+
+ Reader.setIdentifierIsMacro(II, F, Offset, Visible);
+ DataLen -= 8;
+ }
+
+ Reader.SetIdentifierInfo(ID, II);
+
+ // Read all of the declarations visible at global scope with this
+ // name.
+ if (DataLen > 0) {
+ SmallVector<uint32_t, 4> DeclIDs;
+ for (; DataLen > 0; DataLen -= 4)
+ DeclIDs.push_back(Reader.getGlobalDeclID(F, ReadUnalignedLE32(d)));
+ Reader.SetGloballyVisibleDecls(II, DeclIDs);
+ }
+
+ return II;
+}
+
+unsigned
+ASTDeclContextNameLookupTrait::ComputeHash(const DeclNameKey &Key) const {
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(Key.Kind);
+
+ switch (Key.Kind) {
+ case DeclarationName::Identifier:
+ case DeclarationName::CXXLiteralOperatorName:
+ ID.AddString(((IdentifierInfo*)Key.Data)->getName());
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ ID.AddInteger(serialization::ComputeHash(Selector(Key.Data)));
+ break;
+ case DeclarationName::CXXOperatorName:
+ ID.AddInteger((OverloadedOperatorKind)Key.Data);
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+
+ return ID.ComputeHash();
+}
+
+ASTDeclContextNameLookupTrait::internal_key_type
+ASTDeclContextNameLookupTrait::GetInternalKey(
+ const external_key_type& Name) const {
+ DeclNameKey Key;
+ Key.Kind = Name.getNameKind();
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ Key.Data = (uint64_t)Name.getAsIdentifierInfo();
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ Key.Data = (uint64_t)Name.getObjCSelector().getAsOpaquePtr();
+ break;
+ case DeclarationName::CXXOperatorName:
+ Key.Data = Name.getCXXOverloadedOperator();
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ Key.Data = (uint64_t)Name.getCXXLiteralIdentifier();
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ Key.Data = 0;
+ break;
+ }
+
+ return Key;
+}
+
+ASTDeclContextNameLookupTrait::external_key_type
+ASTDeclContextNameLookupTrait::GetExternalKey(
+ const internal_key_type& Key) const {
+ ASTContext &Context = Reader.getContext();
+ switch (Key.Kind) {
+ case DeclarationName::Identifier:
+ return DeclarationName((IdentifierInfo*)Key.Data);
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ return DeclarationName(Selector(Key.Data));
+
+ case DeclarationName::CXXConstructorName:
+ return Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(Reader.getLocalType(F, Key.Data)));
+
+ case DeclarationName::CXXDestructorName:
+ return Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(Reader.getLocalType(F, Key.Data)));
+
+ case DeclarationName::CXXConversionFunctionName:
+ return Context.DeclarationNames.getCXXConversionFunctionName(
+ Context.getCanonicalType(Reader.getLocalType(F, Key.Data)));
+
+ case DeclarationName::CXXOperatorName:
+ return Context.DeclarationNames.getCXXOperatorName(
+ (OverloadedOperatorKind)Key.Data);
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return Context.DeclarationNames.getCXXLiteralOperatorName(
+ (IdentifierInfo*)Key.Data);
+
+ case DeclarationName::CXXUsingDirective:
+ return DeclarationName::getUsingDirectiveName();
+ }
+
+ llvm_unreachable("Invalid Name Kind ?");
+}
+
+std::pair<unsigned, unsigned>
+ASTDeclContextNameLookupTrait::ReadKeyDataLength(const unsigned char*& d) {
+ using namespace clang::io;
+ unsigned KeyLen = ReadUnalignedLE16(d);
+ unsigned DataLen = ReadUnalignedLE16(d);
+ return std::make_pair(KeyLen, DataLen);
+}
+
+ASTDeclContextNameLookupTrait::internal_key_type
+ASTDeclContextNameLookupTrait::ReadKey(const unsigned char* d, unsigned) {
+ using namespace clang::io;
+
+ DeclNameKey Key;
+ Key.Kind = (DeclarationName::NameKind)*d++;
+ switch (Key.Kind) {
+ case DeclarationName::Identifier:
+ Key.Data = (uint64_t)Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ Key.Data =
+ (uint64_t)Reader.getLocalSelector(F, ReadUnalignedLE32(d))
+ .getAsOpaquePtr();
+ break;
+ case DeclarationName::CXXOperatorName:
+ Key.Data = *d++; // OverloadedOperatorKind
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ Key.Data = (uint64_t)Reader.getLocalIdentifier(F, ReadUnalignedLE32(d));
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ Key.Data = 0;
+ break;
+ }
+
+ return Key;
+}
+
+ASTDeclContextNameLookupTrait::data_type
+ASTDeclContextNameLookupTrait::ReadData(internal_key_type,
+ const unsigned char* d,
+ unsigned DataLen) {
+ using namespace clang::io;
+ unsigned NumDecls = ReadUnalignedLE16(d);
+ LE32DeclID *Start = (LE32DeclID *)d;
+ return std::make_pair(Start, Start + NumDecls);
+}
+
+bool ASTReader::ReadDeclContextStorage(ModuleFile &M,
+ llvm::BitstreamCursor &Cursor,
+ const std::pair<uint64_t, uint64_t> &Offsets,
+ DeclContextInfo &Info) {
+ SavedStreamPosition SavedPosition(Cursor);
+ // First the lexical decls.
+ if (Offsets.first != 0) {
+ Cursor.JumpToBit(Offsets.first);
+
+ RecordData Record;
+ const char *Blob;
+ unsigned BlobLen;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.ReadRecord(Code, Record, &Blob, &BlobLen);
+ if (RecCode != DECL_CONTEXT_LEXICAL) {
+ Error("Expected lexical block");
+ return true;
+ }
+
+ Info.LexicalDecls = reinterpret_cast<const KindDeclIDPair*>(Blob);
+ Info.NumLexicalDecls = BlobLen / sizeof(KindDeclIDPair);
+ }
+
+ // Now the lookup table.
+ if (Offsets.second != 0) {
+ Cursor.JumpToBit(Offsets.second);
+
+ RecordData Record;
+ const char *Blob;
+ unsigned BlobLen;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.ReadRecord(Code, Record, &Blob, &BlobLen);
+ if (RecCode != DECL_CONTEXT_VISIBLE) {
+ Error("Expected visible lookup table block");
+ return true;
+ }
+ Info.NameLookupTableData
+ = ASTDeclContextNameLookupTable::Create(
+ (const unsigned char *)Blob + Record[0],
+ (const unsigned char *)Blob,
+ ASTDeclContextNameLookupTrait(*this, M));
+ }
+
+ return false;
+}
+
+void ASTReader::Error(StringRef Msg) {
+ Error(diag::err_fe_pch_malformed, Msg);
+}
+
+void ASTReader::Error(unsigned DiagID,
+ StringRef Arg1, StringRef Arg2) {
+ if (Diags.isDiagnosticInFlight())
+ Diags.SetDelayedDiagnostic(DiagID, Arg1, Arg2);
+ else
+ Diag(DiagID) << Arg1 << Arg2;
+}
+
+/// \brief Tell the AST listener about the predefines buffers in the chain.
+bool ASTReader::CheckPredefinesBuffers() {
+ if (Listener)
+ return Listener->ReadPredefinesBuffer(PCHPredefinesBuffers,
+ ActualOriginalFileName,
+ SuggestedPredefines,
+ FileMgr);
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Source Manager Deserialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Read the line table in the source manager block.
+/// \returns true if there was an error.
+bool ASTReader::ParseLineTable(ModuleFile &F,
+ SmallVectorImpl<uint64_t> &Record) {
+ unsigned Idx = 0;
+ LineTableInfo &LineTable = SourceMgr.getLineTable();
+
+ // Parse the file names
+ std::map<int, int> FileIDs;
+ for (int I = 0, N = Record[Idx++]; I != N; ++I) {
+ // Extract the file name
+ unsigned FilenameLen = Record[Idx++];
+ std::string Filename(&Record[Idx], &Record[Idx] + FilenameLen);
+ Idx += FilenameLen;
+ MaybeAddSystemRootToFilename(Filename);
+ FileIDs[I] = LineTable.getLineTableFilenameID(Filename);
+ }
+
+ // Parse the line entries
+ std::vector<LineEntry> Entries;
+ while (Idx < Record.size()) {
+ int FID = Record[Idx++];
+ assert(FID >= 0 && "Serialized line entries for non-local file.");
+ // Remap FileID from 1-based old view.
+ FID += F.SLocEntryBaseID - 1;
+
+ // Extract the line entries
+ unsigned NumEntries = Record[Idx++];
+ assert(NumEntries && "Numentries is 00000");
+ Entries.clear();
+ Entries.reserve(NumEntries);
+ for (unsigned I = 0; I != NumEntries; ++I) {
+ unsigned FileOffset = Record[Idx++];
+ unsigned LineNo = Record[Idx++];
+ int FilenameID = FileIDs[Record[Idx++]];
+ SrcMgr::CharacteristicKind FileKind
+ = (SrcMgr::CharacteristicKind)Record[Idx++];
+ unsigned IncludeOffset = Record[Idx++];
+ Entries.push_back(LineEntry::get(FileOffset, LineNo, FilenameID,
+ FileKind, IncludeOffset));
+ }
+ LineTable.AddEntry(FID, Entries);
+ }
+
+ return false;
+}
+
+namespace {
+
+class ASTStatData {
+public:
+ const ino_t ino;
+ const dev_t dev;
+ const mode_t mode;
+ const time_t mtime;
+ const off_t size;
+
+ ASTStatData(ino_t i, dev_t d, mode_t mo, time_t m, off_t s)
+ : ino(i), dev(d), mode(mo), mtime(m), size(s) {}
+};
+
+class ASTStatLookupTrait {
+ public:
+ typedef const char *external_key_type;
+ typedef const char *internal_key_type;
+
+ typedef ASTStatData data_type;
+
+ static unsigned ComputeHash(const char *path) {
+ return llvm::HashString(path);
+ }
+
+ static internal_key_type GetInternalKey(const char *path) { return path; }
+
+ static bool EqualKey(internal_key_type a, internal_key_type b) {
+ return strcmp(a, b) == 0;
+ }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d) {
+ unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d);
+ unsigned DataLen = (unsigned) *d++;
+ return std::make_pair(KeyLen + 1, DataLen);
+ }
+
+ static internal_key_type ReadKey(const unsigned char *d, unsigned) {
+ return (const char *)d;
+ }
+
+ static data_type ReadData(const internal_key_type, const unsigned char *d,
+ unsigned /*DataLen*/) {
+ using namespace clang::io;
+
+ ino_t ino = (ino_t) ReadUnalignedLE32(d);
+ dev_t dev = (dev_t) ReadUnalignedLE32(d);
+ mode_t mode = (mode_t) ReadUnalignedLE16(d);
+ time_t mtime = (time_t) ReadUnalignedLE64(d);
+ off_t size = (off_t) ReadUnalignedLE64(d);
+ return data_type(ino, dev, mode, mtime, size);
+ }
+};
+
+/// \brief stat() cache for precompiled headers.
+///
+/// This cache is very similar to the stat cache used by pretokenized
+/// headers.
+class ASTStatCache : public FileSystemStatCache {
+ typedef OnDiskChainedHashTable<ASTStatLookupTrait> CacheTy;
+ CacheTy *Cache;
+
+ unsigned &NumStatHits, &NumStatMisses;
+public:
+ ASTStatCache(const unsigned char *Buckets, const unsigned char *Base,
+ unsigned &NumStatHits, unsigned &NumStatMisses)
+ : Cache(0), NumStatHits(NumStatHits), NumStatMisses(NumStatMisses) {
+ Cache = CacheTy::Create(Buckets, Base);
+ }
+
+ ~ASTStatCache() { delete Cache; }
+
+ LookupResult getStat(const char *Path, struct stat &StatBuf,
+ int *FileDescriptor) {
+ // Do the lookup for the file's data in the AST file.
+ CacheTy::iterator I = Cache->find(Path);
+
+ // If we don't get a hit in the AST file just forward to 'stat'.
+ if (I == Cache->end()) {
+ ++NumStatMisses;
+ return statChained(Path, StatBuf, FileDescriptor);
+ }
+
+ ++NumStatHits;
+ ASTStatData Data = *I;
+
+ StatBuf.st_ino = Data.ino;
+ StatBuf.st_dev = Data.dev;
+ StatBuf.st_mtime = Data.mtime;
+ StatBuf.st_mode = Data.mode;
+ StatBuf.st_size = Data.size;
+ return CacheExists;
+ }
+};
+} // end anonymous namespace
+
+
+/// \brief Read a source manager block
+ASTReader::ASTReadResult ASTReader::ReadSourceManagerBlock(ModuleFile &F) {
+ using namespace SrcMgr;
+
+ llvm::BitstreamCursor &SLocEntryCursor = F.SLocEntryCursor;
+
+ // Set the source-location entry cursor to the current position in
+ // the stream. This cursor will be used to read the contents of the
+ // source manager block initially, and then lazily read
+ // source-location entries as needed.
+ SLocEntryCursor = F.Stream;
+
+ // The stream itself is going to skip over the source manager block.
+ if (F.Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+
+ // Enter the source manager block.
+ if (SLocEntryCursor.EnterSubBlock(SOURCE_MANAGER_BLOCK_ID)) {
+ Error("malformed source manager block record in AST file");
+ return Failure;
+ }
+
+ RecordData Record;
+ while (true) {
+ unsigned Code = SLocEntryCursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (SLocEntryCursor.ReadBlockEnd()) {
+ Error("error at end of Source Manager block in AST file");
+ return Failure;
+ }
+ return Success;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ SLocEntryCursor.ReadSubBlockID();
+ if (SLocEntryCursor.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ SLocEntryCursor.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read a record.
+ const char *BlobStart;
+ unsigned BlobLen;
+ Record.clear();
+ switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case SM_SLOC_FILE_ENTRY:
+ case SM_SLOC_BUFFER_ENTRY:
+ case SM_SLOC_EXPANSION_ENTRY:
+ // Once we hit one of the source location entries, we're done.
+ return Success;
+ }
+ }
+}
+
+/// \brief If a header file is not found at the path that we expect it to be
+/// and the PCH file was moved from its original location, try to resolve the
+/// file by assuming that header+PCH were moved together and the header is in
+/// the same place relative to the PCH.
+static std::string
+resolveFileRelativeToOriginalDir(const std::string &Filename,
+ const std::string &OriginalDir,
+ const std::string &CurrDir) {
+ assert(OriginalDir != CurrDir &&
+ "No point trying to resolve the file if the PCH dir didn't change");
+ using namespace llvm::sys;
+ SmallString<128> filePath(Filename);
+ fs::make_absolute(filePath);
+ assert(path::is_absolute(OriginalDir));
+ SmallString<128> currPCHPath(CurrDir);
+
+ path::const_iterator fileDirI = path::begin(path::parent_path(filePath)),
+ fileDirE = path::end(path::parent_path(filePath));
+ path::const_iterator origDirI = path::begin(OriginalDir),
+ origDirE = path::end(OriginalDir);
+ // Skip the common path components from filePath and OriginalDir.
+ while (fileDirI != fileDirE && origDirI != origDirE &&
+ *fileDirI == *origDirI) {
+ ++fileDirI;
+ ++origDirI;
+ }
+ for (; origDirI != origDirE; ++origDirI)
+ path::append(currPCHPath, "..");
+ path::append(currPCHPath, fileDirI, fileDirE);
+ path::append(currPCHPath, path::filename(Filename));
+ return currPCHPath.str();
+}
+
+/// \brief Read in the source location entry with the given ID.
+ASTReader::ASTReadResult ASTReader::ReadSLocEntryRecord(int ID) {
+ if (ID == 0)
+ return Success;
+
+ if (unsigned(-ID) - 2 >= getTotalNumSLocs() || ID > 0) {
+ Error("source location entry ID out-of-range for AST file");
+ return Failure;
+ }
+
+ ModuleFile *F = GlobalSLocEntryMap.find(-ID)->second;
+ F->SLocEntryCursor.JumpToBit(F->SLocEntryOffsets[ID - F->SLocEntryBaseID]);
+ llvm::BitstreamCursor &SLocEntryCursor = F->SLocEntryCursor;
+ unsigned BaseOffset = F->SLocEntryBaseOffset;
+
+ ++NumSLocEntriesRead;
+ unsigned Code = SLocEntryCursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK ||
+ Code == llvm::bitc::ENTER_SUBBLOCK ||
+ Code == llvm::bitc::DEFINE_ABBREV) {
+ Error("incorrectly-formatted source location entry in AST file");
+ return Failure;
+ }
+
+ RecordData Record;
+ const char *BlobStart;
+ unsigned BlobLen;
+ switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
+ default:
+ Error("incorrectly-formatted source location entry in AST file");
+ return Failure;
+
+ case SM_SLOC_FILE_ENTRY: {
+ if (Record.size() < 7) {
+ Error("source location entry is incorrect");
+ return Failure;
+ }
+
+ // We will detect whether a file changed and return 'Failure' for it, but
+ // we will also try to fail gracefully by setting up the SLocEntry.
+ ASTReader::ASTReadResult Result = Success;
+
+ bool OverriddenBuffer = Record[6];
+
+ std::string OrigFilename(BlobStart, BlobStart + BlobLen);
+ std::string Filename = OrigFilename;
+ MaybeAddSystemRootToFilename(Filename);
+ const FileEntry *File =
+ OverriddenBuffer? FileMgr.getVirtualFile(Filename, (off_t)Record[4],
+ (time_t)Record[5])
+ : FileMgr.getFile(Filename, /*OpenFile=*/false);
+ if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() &&
+ OriginalDir != CurrentDir) {
+ std::string resolved = resolveFileRelativeToOriginalDir(Filename,
+ OriginalDir,
+ CurrentDir);
+ if (!resolved.empty())
+ File = FileMgr.getFile(resolved);
+ }
+ if (File == 0)
+ File = FileMgr.getVirtualFile(Filename, (off_t)Record[4],
+ (time_t)Record[5]);
+ if (File == 0) {
+ std::string ErrorStr = "could not find file '";
+ ErrorStr += Filename;
+ ErrorStr += "' referenced by AST file";
+ Error(ErrorStr.c_str());
+ return Failure;
+ }
+
+ if (!DisableValidation &&
+ ((off_t)Record[4] != File->getSize()
+#if !defined(LLVM_ON_WIN32)
+ // In our regression testing, the Windows file system seems to
+ // have inconsistent modification times that sometimes
+ // erroneously trigger this error-handling path.
+ || (time_t)Record[5] != File->getModificationTime()
+#endif
+ )) {
+ Error(diag::err_fe_pch_file_modified, Filename);
+ Result = Failure;
+ }
+
+ SourceLocation IncludeLoc = ReadSourceLocation(*F, Record[1]);
+ if (IncludeLoc.isInvalid() && F->Kind != MK_MainFile) {
+ // This is the module's main file.
+ IncludeLoc = getImportLocation(F);
+ }
+ FileID FID = SourceMgr.createFileID(File, IncludeLoc,
+ (SrcMgr::CharacteristicKind)Record[2],
+ ID, BaseOffset + Record[0]);
+ SrcMgr::FileInfo &FileInfo =
+ const_cast<SrcMgr::FileInfo&>(SourceMgr.getSLocEntry(FID).getFile());
+ FileInfo.NumCreatedFIDs = Record[7];
+ if (Record[3])
+ FileInfo.setHasLineDirectives();
+
+ const DeclID *FirstDecl = F->FileSortedDecls + Record[8];
+ unsigned NumFileDecls = Record[9];
+ if (NumFileDecls) {
+ assert(F->FileSortedDecls && "FILE_SORTED_DECLS not encountered yet ?");
+ FileDeclIDs[FID] = FileDeclsInfo(F, llvm::makeArrayRef(FirstDecl,
+ NumFileDecls));
+ }
+
+ const SrcMgr::ContentCache *ContentCache
+ = SourceMgr.getOrCreateContentCache(File);
+ if (OverriddenBuffer && !ContentCache->BufferOverridden &&
+ ContentCache->ContentsEntry == ContentCache->OrigEntry) {
+ unsigned Code = SLocEntryCursor.ReadCode();
+ Record.clear();
+ unsigned RecCode
+ = SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen);
+
+ if (RecCode != SM_SLOC_BUFFER_BLOB) {
+ Error("AST record has invalid code");
+ return Failure;
+ }
+
+ llvm::MemoryBuffer *Buffer
+ = llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1),
+ Filename);
+ SourceMgr.overrideFileContents(File, Buffer);
+ }
+
+ if (Result == Failure)
+ return Failure;
+ break;
+ }
+
+ case SM_SLOC_BUFFER_ENTRY: {
+ const char *Name = BlobStart;
+ unsigned Offset = Record[0];
+ unsigned Code = SLocEntryCursor.ReadCode();
+ Record.clear();
+ unsigned RecCode
+ = SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen);
+
+ if (RecCode != SM_SLOC_BUFFER_BLOB) {
+ Error("AST record has invalid code");
+ return Failure;
+ }
+
+ llvm::MemoryBuffer *Buffer
+ = llvm::MemoryBuffer::getMemBuffer(StringRef(BlobStart, BlobLen - 1),
+ Name);
+ FileID BufferID = SourceMgr.createFileIDForMemBuffer(Buffer, ID,
+ BaseOffset + Offset);
+
+ if (strcmp(Name, "<built-in>") == 0 && F->Kind == MK_PCH) {
+ PCHPredefinesBlock Block = {
+ BufferID,
+ StringRef(BlobStart, BlobLen - 1)
+ };
+ PCHPredefinesBuffers.push_back(Block);
+ }
+
+ break;
+ }
+
+ case SM_SLOC_EXPANSION_ENTRY: {
+ SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1]);
+ SourceMgr.createExpansionLoc(SpellingLoc,
+ ReadSourceLocation(*F, Record[2]),
+ ReadSourceLocation(*F, Record[3]),
+ Record[4],
+ ID,
+ BaseOffset + Record[0]);
+ break;
+ }
+ }
+
+ return Success;
+}
+
+/// \brief Find the location where the module F is imported.
+SourceLocation ASTReader::getImportLocation(ModuleFile *F) {
+ if (F->ImportLoc.isValid())
+ return F->ImportLoc;
+
+ // Otherwise we have a PCH. It's considered to be "imported" at the first
+ // location of its includer.
+ if (F->ImportedBy.empty() || !F->ImportedBy[0]) {
+ // Main file is the importer. We assume that it is the first entry in the
+ // entry table. We can't ask the manager, because at the time of PCH loading
+ // the main file entry doesn't exist yet.
+ // The very first entry is the invalid instantiation loc, which takes up
+ // offsets 0 and 1.
+ return SourceLocation::getFromRawEncoding(2U);
+ }
+ //return F->Loaders[0]->FirstLoc;
+ return F->ImportedBy[0]->FirstLoc;
+}
+
+/// ReadBlockAbbrevs - Enter a subblock of the specified BlockID with the
+/// specified cursor. Read the abbreviations that are at the top of the block
+/// and then leave the cursor pointing into the block.
+bool ASTReader::ReadBlockAbbrevs(llvm::BitstreamCursor &Cursor,
+ unsigned BlockID) {
+ if (Cursor.EnterSubBlock(BlockID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+
+ while (true) {
+ uint64_t Offset = Cursor.GetCurrentBitNo();
+ unsigned Code = Cursor.ReadCode();
+
+ // We expect all abbrevs to be at the start of the block.
+ if (Code != llvm::bitc::DEFINE_ABBREV) {
+ Cursor.JumpToBit(Offset);
+ return false;
+ }
+ Cursor.ReadAbbrevRecord();
+ }
+}
+
+void ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
+ llvm::BitstreamCursor &Stream = F.MacroCursor;
+
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this macro.
+ SavedStreamPosition SavedPosition(Stream);
+
+ Stream.JumpToBit(Offset);
+ RecordData Record;
+ SmallVector<IdentifierInfo*, 16> MacroArgs;
+ MacroInfo *Macro = 0;
+
+ while (true) {
+ unsigned Code = Stream.ReadCode();
+ switch (Code) {
+ case llvm::bitc::END_BLOCK:
+ return;
+
+ case llvm::bitc::ENTER_SUBBLOCK:
+ // No known subblocks, always skip them.
+ Stream.ReadSubBlockID();
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return;
+ }
+ continue;
+
+ case llvm::bitc::DEFINE_ABBREV:
+ Stream.ReadAbbrevRecord();
+ continue;
+ default: break;
+ }
+
+ // Read a record.
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ Record.clear();
+ PreprocessorRecordTypes RecType =
+ (PreprocessorRecordTypes)Stream.ReadRecord(Code, Record, BlobStart,
+ BlobLen);
+ switch (RecType) {
+ case PP_MACRO_OBJECT_LIKE:
+ case PP_MACRO_FUNCTION_LIKE: {
+ // If we already have a macro, that means that we've hit the end
+ // of the definition of the macro we were looking for. We're
+ // done.
+ if (Macro)
+ return;
+
+ IdentifierInfo *II = getLocalIdentifier(F, Record[0]);
+ if (II == 0) {
+ Error("macro must have a name in AST file");
+ return;
+ }
+
+ SourceLocation Loc = ReadSourceLocation(F, Record[1]);
+ bool isUsed = Record[2];
+
+ MacroInfo *MI = PP.AllocateMacroInfo(Loc);
+ MI->setIsUsed(isUsed);
+ MI->setIsFromAST();
+
+ bool IsPublic = Record[3];
+ unsigned NextIndex = 4;
+ MI->setVisibility(IsPublic, ReadSourceLocation(F, Record, NextIndex));
+
+ if (RecType == PP_MACRO_FUNCTION_LIKE) {
+ // Decode function-like macro info.
+ bool isC99VarArgs = Record[NextIndex++];
+ bool isGNUVarArgs = Record[NextIndex++];
+ MacroArgs.clear();
+ unsigned NumArgs = Record[NextIndex++];
+ for (unsigned i = 0; i != NumArgs; ++i)
+ MacroArgs.push_back(getLocalIdentifier(F, Record[NextIndex++]));
+
+ // Install function-like macro info.
+ MI->setIsFunctionLike();
+ if (isC99VarArgs) MI->setIsC99Varargs();
+ if (isGNUVarArgs) MI->setIsGNUVarargs();
+ MI->setArgumentList(MacroArgs.data(), MacroArgs.size(),
+ PP.getPreprocessorAllocator());
+ }
+
+ // Finally, install the macro.
+ PP.setMacroInfo(II, MI, /*LoadedFromAST=*/true);
+
+ // Remember that we saw this macro last so that we add the tokens that
+ // form its body to it.
+ Macro = MI;
+
+ if (NextIndex + 1 == Record.size() && PP.getPreprocessingRecord() &&
+ Record[NextIndex]) {
+ // We have a macro definition. Register the association
+ PreprocessedEntityID
+ GlobalID = getGlobalPreprocessedEntityID(F, Record[NextIndex]);
+ PreprocessingRecord &PPRec = *PP.getPreprocessingRecord();
+ PPRec.RegisterMacroDefinition(Macro,
+ PPRec.getPPEntityID(GlobalID-1, /*isLoaded=*/true));
+ }
+
+ ++NumMacrosRead;
+ break;
+ }
+
+ case PP_TOKEN: {
+ // If we see a TOKEN before a PP_MACRO_*, then the file is
+ // erroneous, just pretend we didn't see this.
+ if (Macro == 0) break;
+
+ Token Tok;
+ Tok.startToken();
+ Tok.setLocation(ReadSourceLocation(F, Record[0]));
+ Tok.setLength(Record[1]);
+ if (IdentifierInfo *II = getLocalIdentifier(F, Record[2]))
+ Tok.setIdentifierInfo(II);
+ Tok.setKind((tok::TokenKind)Record[3]);
+ Tok.setFlag((Token::TokenFlags)Record[4]);
+ Macro->AddTokenToBody(Tok);
+ break;
+ }
+ }
+ }
+}
+
+PreprocessedEntityID
+ASTReader::getGlobalPreprocessedEntityID(ModuleFile &M, unsigned LocalID) const {
+ ContinuousRangeMap<uint32_t, int, 2>::const_iterator
+ I = M.PreprocessedEntityRemap.find(LocalID - NUM_PREDEF_PP_ENTITY_IDS);
+ assert(I != M.PreprocessedEntityRemap.end()
+ && "Invalid index into preprocessed entity index remap");
+
+ return LocalID + I->second;
+}
+
+unsigned HeaderFileInfoTrait::ComputeHash(const char *path) {
+ return llvm::HashString(llvm::sys::path::filename(path));
+}
+
+HeaderFileInfoTrait::internal_key_type
+HeaderFileInfoTrait::GetInternalKey(const char *path) { return path; }
+
+bool HeaderFileInfoTrait::EqualKey(internal_key_type a, internal_key_type b) {
+ if (strcmp(a, b) == 0)
+ return true;
+
+ if (llvm::sys::path::filename(a) != llvm::sys::path::filename(b))
+ return false;
+
+ // Determine whether the actual files are equivalent.
+ bool Result = false;
+ if (llvm::sys::fs::equivalent(a, b, Result))
+ return false;
+
+ return Result;
+}
+
+std::pair<unsigned, unsigned>
+HeaderFileInfoTrait::ReadKeyDataLength(const unsigned char*& d) {
+ unsigned KeyLen = (unsigned) clang::io::ReadUnalignedLE16(d);
+ unsigned DataLen = (unsigned) *d++;
+ return std::make_pair(KeyLen + 1, DataLen);
+}
+
+HeaderFileInfoTrait::data_type
+HeaderFileInfoTrait::ReadData(const internal_key_type, const unsigned char *d,
+ unsigned DataLen) {
+ const unsigned char *End = d + DataLen;
+ using namespace clang::io;
+ HeaderFileInfo HFI;
+ unsigned Flags = *d++;
+ HFI.isImport = (Flags >> 5) & 0x01;
+ HFI.isPragmaOnce = (Flags >> 4) & 0x01;
+ HFI.DirInfo = (Flags >> 2) & 0x03;
+ HFI.Resolved = (Flags >> 1) & 0x01;
+ HFI.IndexHeaderMapHeader = Flags & 0x01;
+ HFI.NumIncludes = ReadUnalignedLE16(d);
+ HFI.ControllingMacroID = Reader.getGlobalIdentifierID(M,
+ ReadUnalignedLE32(d));
+ if (unsigned FrameworkOffset = ReadUnalignedLE32(d)) {
+ // The framework offset is 1 greater than the actual offset,
+ // since 0 is used as an indicator for "no framework name".
+ StringRef FrameworkName(FrameworkStrings + FrameworkOffset - 1);
+ HFI.Framework = HS->getUniqueFrameworkName(FrameworkName);
+ }
+
+ assert(End == d && "Wrong data length in HeaderFileInfo deserialization");
+ (void)End;
+
+ // This HeaderFileInfo was externally loaded.
+ HFI.External = true;
+ return HFI;
+}
+
+void ASTReader::setIdentifierIsMacro(IdentifierInfo *II, ModuleFile &F,
+ uint64_t LocalOffset, bool Visible) {
+ if (Visible) {
+ // Note that this identifier has a macro definition.
+ II->setHasMacroDefinition(true);
+ }
+
+ // Adjust the offset to a global offset.
+ UnreadMacroRecordOffsets[II] = F.GlobalBitOffset + LocalOffset;
+}
+
+void ASTReader::ReadDefinedMacros() {
+ for (ModuleReverseIterator I = ModuleMgr.rbegin(),
+ E = ModuleMgr.rend(); I != E; ++I) {
+ llvm::BitstreamCursor &MacroCursor = (*I)->MacroCursor;
+
+ // If there was no preprocessor block, skip this file.
+ if (!MacroCursor.getBitStreamReader())
+ continue;
+
+ llvm::BitstreamCursor Cursor = MacroCursor;
+ Cursor.JumpToBit((*I)->MacroStartOffset);
+
+ RecordData Record;
+ while (true) {
+ unsigned Code = Cursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK)
+ break;
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ Cursor.ReadSubBlockID();
+ if (Cursor.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Cursor.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read a record.
+ const char *BlobStart;
+ unsigned BlobLen;
+ Record.clear();
+ switch (Cursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case PP_MACRO_OBJECT_LIKE:
+ case PP_MACRO_FUNCTION_LIKE:
+ getLocalIdentifier(**I, Record[0]);
+ break;
+
+ case PP_TOKEN:
+ // Ignore tokens.
+ break;
+ }
+ }
+ }
+
+ // Drain the unread macro-record offsets map.
+ while (!UnreadMacroRecordOffsets.empty())
+ LoadMacroDefinition(UnreadMacroRecordOffsets.begin());
+}
+
+void ASTReader::LoadMacroDefinition(
+ llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos) {
+ assert(Pos != UnreadMacroRecordOffsets.end() && "Unknown macro definition");
+ uint64_t Offset = Pos->second;
+ UnreadMacroRecordOffsets.erase(Pos);
+
+ RecordLocation Loc = getLocalBitOffset(Offset);
+ ReadMacroRecord(*Loc.F, Loc.Offset);
+}
+
+void ASTReader::LoadMacroDefinition(IdentifierInfo *II) {
+ llvm::DenseMap<IdentifierInfo *, uint64_t>::iterator Pos
+ = UnreadMacroRecordOffsets.find(II);
+ LoadMacroDefinition(Pos);
+}
+
+namespace {
+ /// \brief Visitor class used to look up identifirs in an AST file.
+ class IdentifierLookupVisitor {
+ StringRef Name;
+ unsigned PriorGeneration;
+ IdentifierInfo *Found;
+ public:
+ IdentifierLookupVisitor(StringRef Name, unsigned PriorGeneration)
+ : Name(Name), PriorGeneration(PriorGeneration), Found() { }
+
+ static bool visit(ModuleFile &M, void *UserData) {
+ IdentifierLookupVisitor *This
+ = static_cast<IdentifierLookupVisitor *>(UserData);
+
+ // If we've already searched this module file, skip it now.
+ if (M.Generation <= This->PriorGeneration)
+ return true;
+
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)M.IdentifierLookupTable;
+ if (!IdTable)
+ return false;
+
+ ASTIdentifierLookupTrait Trait(IdTable->getInfoObj().getReader(),
+ M, This->Found);
+
+ std::pair<const char*, unsigned> Key(This->Name.begin(),
+ This->Name.size());
+ ASTIdentifierLookupTable::iterator Pos = IdTable->find(Key, &Trait);
+ if (Pos == IdTable->end())
+ return false;
+
+ // Dereferencing the iterator has the effect of building the
+ // IdentifierInfo node and populating it with the various
+ // declarations it needs.
+ This->Found = *Pos;
+ return true;
+ }
+
+ // \brief Retrieve the identifier info found within the module
+ // files.
+ IdentifierInfo *getIdentifierInfo() const { return Found; }
+ };
+}
+
+void ASTReader::updateOutOfDateIdentifier(IdentifierInfo &II) {
+ unsigned PriorGeneration = 0;
+ if (getContext().getLangOpts().Modules)
+ PriorGeneration = IdentifierGeneration[&II];
+
+ IdentifierLookupVisitor Visitor(II.getName(), PriorGeneration);
+ ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor);
+ markIdentifierUpToDate(&II);
+}
+
+void ASTReader::markIdentifierUpToDate(IdentifierInfo *II) {
+ if (!II)
+ return;
+
+ II->setOutOfDate(false);
+
+ // Update the generation for this identifier.
+ if (getContext().getLangOpts().Modules)
+ IdentifierGeneration[II] = CurrentGeneration;
+}
+
+const FileEntry *ASTReader::getFileEntry(StringRef filenameStrRef) {
+ std::string Filename = filenameStrRef;
+ MaybeAddSystemRootToFilename(Filename);
+ const FileEntry *File = FileMgr.getFile(Filename);
+ if (File == 0 && !OriginalDir.empty() && !CurrentDir.empty() &&
+ OriginalDir != CurrentDir) {
+ std::string resolved = resolveFileRelativeToOriginalDir(Filename,
+ OriginalDir,
+ CurrentDir);
+ if (!resolved.empty())
+ File = FileMgr.getFile(resolved);
+ }
+
+ return File;
+}
+
+/// \brief If we are loading a relocatable PCH file, and the filename is
+/// not an absolute path, add the system root to the beginning of the file
+/// name.
+void ASTReader::MaybeAddSystemRootToFilename(std::string &Filename) {
+ // If this is not a relocatable PCH file, there's nothing to do.
+ if (!RelocatablePCH)
+ return;
+
+ if (Filename.empty() || llvm::sys::path::is_absolute(Filename))
+ return;
+
+ if (isysroot.empty()) {
+ // If no system root was given, default to '/'
+ Filename.insert(Filename.begin(), '/');
+ return;
+ }
+
+ unsigned Length = isysroot.size();
+ if (isysroot[Length - 1] != '/')
+ Filename.insert(Filename.begin(), '/');
+
+ Filename.insert(Filename.begin(), isysroot.begin(), isysroot.end());
+}
+
+ASTReader::ASTReadResult
+ASTReader::ReadASTBlock(ModuleFile &F) {
+ llvm::BitstreamCursor &Stream = F.Stream;
+
+ if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+
+ // Read all of the records and blocks for the ASt file.
+ RecordData Record;
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (Stream.ReadBlockEnd()) {
+ Error("error at end of module block in AST file");
+ return Failure;
+ }
+
+ return Success;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ switch (Stream.ReadSubBlockID()) {
+ case DECLTYPES_BLOCK_ID:
+ // We lazily load the decls block, but we want to set up the
+ // DeclsCursor cursor to point into it. Clone our current bitcode
+ // cursor to it, enter the block and read the abbrevs in that block.
+ // With the main cursor, we just skip over it.
+ F.DeclsCursor = Stream;
+ if (Stream.SkipBlock() || // Skip with the main cursor.
+ // Read the abbrevs.
+ ReadBlockAbbrevs(F.DeclsCursor, DECLTYPES_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ break;
+
+ case DECL_UPDATES_BLOCK_ID:
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ break;
+
+ case PREPROCESSOR_BLOCK_ID:
+ F.MacroCursor = Stream;
+ if (!PP.getExternalSource())
+ PP.setExternalSource(this);
+
+ if (Stream.SkipBlock() ||
+ ReadBlockAbbrevs(F.MacroCursor, PREPROCESSOR_BLOCK_ID)) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ F.MacroStartOffset = F.MacroCursor.GetCurrentBitNo();
+ break;
+
+ case PREPROCESSOR_DETAIL_BLOCK_ID:
+ F.PreprocessorDetailCursor = Stream;
+ if (Stream.SkipBlock() ||
+ ReadBlockAbbrevs(F.PreprocessorDetailCursor,
+ PREPROCESSOR_DETAIL_BLOCK_ID)) {
+ Error("malformed preprocessor detail record in AST file");
+ return Failure;
+ }
+ F.PreprocessorDetailStartOffset
+ = F.PreprocessorDetailCursor.GetCurrentBitNo();
+
+ if (!PP.getPreprocessingRecord())
+ PP.createPreprocessingRecord(/*RecordConditionalDirectives=*/false);
+ if (!PP.getPreprocessingRecord()->getExternalSource())
+ PP.getPreprocessingRecord()->SetExternalSource(*this);
+ break;
+
+ case SOURCE_MANAGER_BLOCK_ID:
+ switch (ReadSourceManagerBlock(F)) {
+ case Success:
+ break;
+
+ case Failure:
+ Error("malformed source manager block in AST file");
+ return Failure;
+
+ case IgnorePCH:
+ return IgnorePCH;
+ }
+ break;
+
+ case SUBMODULE_BLOCK_ID:
+ switch (ReadSubmoduleBlock(F)) {
+ case Success:
+ break;
+
+ case Failure:
+ Error("malformed submodule block in AST file");
+ return Failure;
+
+ case IgnorePCH:
+ return IgnorePCH;
+ }
+ break;
+
+ default:
+ if (!Stream.SkipBlock())
+ break;
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read and process a record.
+ Record.clear();
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ switch ((ASTRecordTypes)Stream.ReadRecord(Code, Record,
+ &BlobStart, &BlobLen)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case METADATA: {
+ if (Record[0] != VERSION_MAJOR && !DisableValidation) {
+ Diag(Record[0] < VERSION_MAJOR? diag::warn_pch_version_too_old
+ : diag::warn_pch_version_too_new);
+ return IgnorePCH;
+ }
+
+ bool hasErrors = Record[5];
+ if (hasErrors && !DisableValidation && !AllowASTWithCompilerErrors) {
+ Diag(diag::err_pch_with_compiler_errors);
+ return IgnorePCH;
+ }
+
+ RelocatablePCH = Record[4];
+ if (Listener) {
+ std::string TargetTriple(BlobStart, BlobLen);
+ if (Listener->ReadTargetTriple(TargetTriple))
+ return IgnorePCH;
+ }
+ break;
+ }
+
+ case IMPORTS: {
+ // Load each of the imported PCH files.
+ unsigned Idx = 0, N = Record.size();
+ while (Idx < N) {
+ // Read information about the AST file.
+ ModuleKind ImportedKind = (ModuleKind)Record[Idx++];
+ unsigned Length = Record[Idx++];
+ SmallString<128> ImportedFile(Record.begin() + Idx,
+ Record.begin() + Idx + Length);
+ Idx += Length;
+
+ // Load the AST file.
+ switch(ReadASTCore(ImportedFile, ImportedKind, &F)) {
+ case Failure: return Failure;
+ // If we have to ignore the dependency, we'll have to ignore this too.
+ case IgnorePCH: return IgnorePCH;
+ case Success: break;
+ }
+ }
+ break;
+ }
+
+ case TYPE_OFFSET: {
+ if (F.LocalNumTypes != 0) {
+ Error("duplicate TYPE_OFFSET record in AST file");
+ return Failure;
+ }
+ F.TypeOffsets = (const uint32_t *)BlobStart;
+ F.LocalNumTypes = Record[0];
+ unsigned LocalBaseTypeIndex = Record[1];
+ F.BaseTypeIndex = getTotalNumTypes();
+
+ if (F.LocalNumTypes > 0) {
+ // Introduce the global -> local mapping for types within this module.
+ GlobalTypeMap.insert(std::make_pair(getTotalNumTypes(), &F));
+
+ // Introduce the local -> global mapping for types within this module.
+ F.TypeRemap.insertOrReplace(
+ std::make_pair(LocalBaseTypeIndex,
+ F.BaseTypeIndex - LocalBaseTypeIndex));
+
+ TypesLoaded.resize(TypesLoaded.size() + F.LocalNumTypes);
+ }
+ break;
+ }
+
+ case DECL_OFFSET: {
+ if (F.LocalNumDecls != 0) {
+ Error("duplicate DECL_OFFSET record in AST file");
+ return Failure;
+ }
+ F.DeclOffsets = (const DeclOffset *)BlobStart;
+ F.LocalNumDecls = Record[0];
+ unsigned LocalBaseDeclID = Record[1];
+ F.BaseDeclID = getTotalNumDecls();
+
+ if (F.LocalNumDecls > 0) {
+ // Introduce the global -> local mapping for declarations within this
+ // module.
+ GlobalDeclMap.insert(
+ std::make_pair(getTotalNumDecls() + NUM_PREDEF_DECL_IDS, &F));
+
+ // Introduce the local -> global mapping for declarations within this
+ // module.
+ F.DeclRemap.insertOrReplace(
+ std::make_pair(LocalBaseDeclID, F.BaseDeclID - LocalBaseDeclID));
+
+ // Introduce the global -> local mapping for declarations within this
+ // module.
+ F.GlobalToLocalDeclIDs[&F] = LocalBaseDeclID;
+
+ DeclsLoaded.resize(DeclsLoaded.size() + F.LocalNumDecls);
+ }
+ break;
+ }
+
+ case TU_UPDATE_LEXICAL: {
+ DeclContext *TU = Context.getTranslationUnitDecl();
+ DeclContextInfo &Info = F.DeclContextInfos[TU];
+ Info.LexicalDecls = reinterpret_cast<const KindDeclIDPair *>(BlobStart);
+ Info.NumLexicalDecls
+ = static_cast<unsigned int>(BlobLen / sizeof(KindDeclIDPair));
+ TU->setHasExternalLexicalStorage(true);
+ break;
+ }
+
+ case UPDATE_VISIBLE: {
+ unsigned Idx = 0;
+ serialization::DeclID ID = ReadDeclID(F, Record, Idx);
+ void *Table = ASTDeclContextNameLookupTable::Create(
+ (const unsigned char *)BlobStart + Record[Idx++],
+ (const unsigned char *)BlobStart,
+ ASTDeclContextNameLookupTrait(*this, F));
+ if (ID == PREDEF_DECL_TRANSLATION_UNIT_ID) { // Is it the TU?
+ DeclContext *TU = Context.getTranslationUnitDecl();
+ F.DeclContextInfos[TU].NameLookupTableData = Table;
+ TU->setHasExternalVisibleStorage(true);
+ } else
+ PendingVisibleUpdates[ID].push_back(std::make_pair(Table, &F));
+ break;
+ }
+
+ case LANGUAGE_OPTIONS:
+ if (ParseLanguageOptions(Record) && !DisableValidation)
+ return IgnorePCH;
+ break;
+
+ case IDENTIFIER_TABLE:
+ F.IdentifierTableData = BlobStart;
+ if (Record[0]) {
+ F.IdentifierLookupTable
+ = ASTIdentifierLookupTable::Create(
+ (const unsigned char *)F.IdentifierTableData + Record[0],
+ (const unsigned char *)F.IdentifierTableData,
+ ASTIdentifierLookupTrait(*this, F));
+
+ PP.getIdentifierTable().setExternalIdentifierLookup(this);
+ }
+ break;
+
+ case IDENTIFIER_OFFSET: {
+ if (F.LocalNumIdentifiers != 0) {
+ Error("duplicate IDENTIFIER_OFFSET record in AST file");
+ return Failure;
+ }
+ F.IdentifierOffsets = (const uint32_t *)BlobStart;
+ F.LocalNumIdentifiers = Record[0];
+ unsigned LocalBaseIdentifierID = Record[1];
+ F.BaseIdentifierID = getTotalNumIdentifiers();
+
+ if (F.LocalNumIdentifiers > 0) {
+ // Introduce the global -> local mapping for identifiers within this
+ // module.
+ GlobalIdentifierMap.insert(std::make_pair(getTotalNumIdentifiers() + 1,
+ &F));
+
+ // Introduce the local -> global mapping for identifiers within this
+ // module.
+ F.IdentifierRemap.insertOrReplace(
+ std::make_pair(LocalBaseIdentifierID,
+ F.BaseIdentifierID - LocalBaseIdentifierID));
+
+ IdentifiersLoaded.resize(IdentifiersLoaded.size()
+ + F.LocalNumIdentifiers);
+ }
+ break;
+ }
+
+ case EXTERNAL_DEFINITIONS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ ExternalDefinitions.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case SPECIAL_TYPES:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ SpecialTypes.push_back(getGlobalTypeID(F, Record[I]));
+ break;
+
+ case STATISTICS:
+ TotalNumStatements += Record[0];
+ TotalNumMacros += Record[1];
+ TotalLexicalDeclContexts += Record[2];
+ TotalVisibleDeclContexts += Record[3];
+ break;
+
+ case UNUSED_FILESCOPED_DECLS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ UnusedFileScopedDecls.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case DELEGATING_CTORS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ DelegatingCtorDecls.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case WEAK_UNDECLARED_IDENTIFIERS:
+ if (Record.size() % 4 != 0) {
+ Error("invalid weak identifiers record");
+ return Failure;
+ }
+
+ // FIXME: Ignore weak undeclared identifiers from non-original PCH
+ // files. This isn't the way to do it :)
+ WeakUndeclaredIdentifiers.clear();
+
+ // Translate the weak, undeclared identifiers into global IDs.
+ for (unsigned I = 0, N = Record.size(); I < N; /* in loop */) {
+ WeakUndeclaredIdentifiers.push_back(
+ getGlobalIdentifierID(F, Record[I++]));
+ WeakUndeclaredIdentifiers.push_back(
+ getGlobalIdentifierID(F, Record[I++]));
+ WeakUndeclaredIdentifiers.push_back(
+ ReadSourceLocation(F, Record, I).getRawEncoding());
+ WeakUndeclaredIdentifiers.push_back(Record[I++]);
+ }
+ break;
+
+ case LOCALLY_SCOPED_EXTERNAL_DECLS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ LocallyScopedExternalDecls.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case SELECTOR_OFFSETS: {
+ F.SelectorOffsets = (const uint32_t *)BlobStart;
+ F.LocalNumSelectors = Record[0];
+ unsigned LocalBaseSelectorID = Record[1];
+ F.BaseSelectorID = getTotalNumSelectors();
+
+ if (F.LocalNumSelectors > 0) {
+ // Introduce the global -> local mapping for selectors within this
+ // module.
+ GlobalSelectorMap.insert(std::make_pair(getTotalNumSelectors()+1, &F));
+
+ // Introduce the local -> global mapping for selectors within this
+ // module.
+ F.SelectorRemap.insertOrReplace(
+ std::make_pair(LocalBaseSelectorID,
+ F.BaseSelectorID - LocalBaseSelectorID));
+
+ SelectorsLoaded.resize(SelectorsLoaded.size() + F.LocalNumSelectors);
+ }
+ break;
+ }
+
+ case METHOD_POOL:
+ F.SelectorLookupTableData = (const unsigned char *)BlobStart;
+ if (Record[0])
+ F.SelectorLookupTable
+ = ASTSelectorLookupTable::Create(
+ F.SelectorLookupTableData + Record[0],
+ F.SelectorLookupTableData,
+ ASTSelectorLookupTrait(*this, F));
+ TotalNumMethodPoolEntries += Record[1];
+ break;
+
+ case REFERENCED_SELECTOR_POOL:
+ if (!Record.empty()) {
+ for (unsigned Idx = 0, N = Record.size() - 1; Idx < N; /* in loop */) {
+ ReferencedSelectorsData.push_back(getGlobalSelectorID(F,
+ Record[Idx++]));
+ ReferencedSelectorsData.push_back(ReadSourceLocation(F, Record, Idx).
+ getRawEncoding());
+ }
+ }
+ break;
+
+ case PP_COUNTER_VALUE:
+ if (!Record.empty() && Listener)
+ Listener->ReadCounter(Record[0]);
+ break;
+
+ case FILE_SORTED_DECLS:
+ F.FileSortedDecls = (const DeclID *)BlobStart;
+ break;
+
+ case SOURCE_LOCATION_OFFSETS: {
+ F.SLocEntryOffsets = (const uint32_t *)BlobStart;
+ F.LocalNumSLocEntries = Record[0];
+ unsigned SLocSpaceSize = Record[1];
+ llvm::tie(F.SLocEntryBaseID, F.SLocEntryBaseOffset) =
+ SourceMgr.AllocateLoadedSLocEntries(F.LocalNumSLocEntries,
+ SLocSpaceSize);
+ // Make our entry in the range map. BaseID is negative and growing, so
+ // we invert it. Because we invert it, though, we need the other end of
+ // the range.
+ unsigned RangeStart =
+ unsigned(-F.SLocEntryBaseID) - F.LocalNumSLocEntries + 1;
+ GlobalSLocEntryMap.insert(std::make_pair(RangeStart, &F));
+ F.FirstLoc = SourceLocation::getFromRawEncoding(F.SLocEntryBaseOffset);
+
+ // SLocEntryBaseOffset is lower than MaxLoadedOffset and decreasing.
+ assert((F.SLocEntryBaseOffset & (1U << 31U)) == 0);
+ GlobalSLocOffsetMap.insert(
+ std::make_pair(SourceManager::MaxLoadedOffset - F.SLocEntryBaseOffset
+ - SLocSpaceSize,&F));
+
+ // Initialize the remapping table.
+ // Invalid stays invalid.
+ F.SLocRemap.insert(std::make_pair(0U, 0));
+ // This module. Base was 2 when being compiled.
+ F.SLocRemap.insert(std::make_pair(2U,
+ static_cast<int>(F.SLocEntryBaseOffset - 2)));
+
+ TotalNumSLocEntries += F.LocalNumSLocEntries;
+ break;
+ }
+
+ case MODULE_OFFSET_MAP: {
+ // Additional remapping information.
+ const unsigned char *Data = (const unsigned char*)BlobStart;
+ const unsigned char *DataEnd = Data + BlobLen;
+
+ // Continuous range maps we may be updating in our module.
+ ContinuousRangeMap<uint32_t, int, 2>::Builder SLocRemap(F.SLocRemap);
+ ContinuousRangeMap<uint32_t, int, 2>::Builder
+ IdentifierRemap(F.IdentifierRemap);
+ ContinuousRangeMap<uint32_t, int, 2>::Builder
+ PreprocessedEntityRemap(F.PreprocessedEntityRemap);
+ ContinuousRangeMap<uint32_t, int, 2>::Builder
+ SubmoduleRemap(F.SubmoduleRemap);
+ ContinuousRangeMap<uint32_t, int, 2>::Builder
+ SelectorRemap(F.SelectorRemap);
+ ContinuousRangeMap<uint32_t, int, 2>::Builder DeclRemap(F.DeclRemap);
+ ContinuousRangeMap<uint32_t, int, 2>::Builder TypeRemap(F.TypeRemap);
+
+ while(Data < DataEnd) {
+ uint16_t Len = io::ReadUnalignedLE16(Data);
+ StringRef Name = StringRef((const char*)Data, Len);
+ Data += Len;
+ ModuleFile *OM = ModuleMgr.lookup(Name);
+ if (!OM) {
+ Error("SourceLocation remap refers to unknown module");
+ return Failure;
+ }
+
+ uint32_t SLocOffset = io::ReadUnalignedLE32(Data);
+ uint32_t IdentifierIDOffset = io::ReadUnalignedLE32(Data);
+ uint32_t PreprocessedEntityIDOffset = io::ReadUnalignedLE32(Data);
+ uint32_t SubmoduleIDOffset = io::ReadUnalignedLE32(Data);
+ uint32_t SelectorIDOffset = io::ReadUnalignedLE32(Data);
+ uint32_t DeclIDOffset = io::ReadUnalignedLE32(Data);
+ uint32_t TypeIndexOffset = io::ReadUnalignedLE32(Data);
+
+ // Source location offset is mapped to OM->SLocEntryBaseOffset.
+ SLocRemap.insert(std::make_pair(SLocOffset,
+ static_cast<int>(OM->SLocEntryBaseOffset - SLocOffset)));
+ IdentifierRemap.insert(
+ std::make_pair(IdentifierIDOffset,
+ OM->BaseIdentifierID - IdentifierIDOffset));
+ PreprocessedEntityRemap.insert(
+ std::make_pair(PreprocessedEntityIDOffset,
+ OM->BasePreprocessedEntityID - PreprocessedEntityIDOffset));
+ SubmoduleRemap.insert(std::make_pair(SubmoduleIDOffset,
+ OM->BaseSubmoduleID - SubmoduleIDOffset));
+ SelectorRemap.insert(std::make_pair(SelectorIDOffset,
+ OM->BaseSelectorID - SelectorIDOffset));
+ DeclRemap.insert(std::make_pair(DeclIDOffset,
+ OM->BaseDeclID - DeclIDOffset));
+
+ TypeRemap.insert(std::make_pair(TypeIndexOffset,
+ OM->BaseTypeIndex - TypeIndexOffset));
+
+ // Global -> local mappings.
+ F.GlobalToLocalDeclIDs[OM] = DeclIDOffset;
+ }
+ break;
+ }
+
+ case SOURCE_MANAGER_LINE_TABLE:
+ if (ParseLineTable(F, Record))
+ return Failure;
+ break;
+
+ case FILE_SOURCE_LOCATION_OFFSETS:
+ F.SLocFileOffsets = (const uint32_t *)BlobStart;
+ F.LocalNumSLocFileEntries = Record[0];
+ break;
+
+ case SOURCE_LOCATION_PRELOADS: {
+ // Need to transform from the local view (1-based IDs) to the global view,
+ // which is based off F.SLocEntryBaseID.
+ if (!F.PreloadSLocEntries.empty()) {
+ Error("Multiple SOURCE_LOCATION_PRELOADS records in AST file");
+ return Failure;
+ }
+
+ F.PreloadSLocEntries.swap(Record);
+ break;
+ }
+
+ case STAT_CACHE: {
+ if (!DisableStatCache) {
+ ASTStatCache *MyStatCache =
+ new ASTStatCache((const unsigned char *)BlobStart + Record[0],
+ (const unsigned char *)BlobStart,
+ NumStatHits, NumStatMisses);
+ FileMgr.addStatCache(MyStatCache);
+ F.StatCache = MyStatCache;
+ }
+ break;
+ }
+
+ case EXT_VECTOR_DECLS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ ExtVectorDecls.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case VTABLE_USES:
+ if (Record.size() % 3 != 0) {
+ Error("Invalid VTABLE_USES record");
+ return Failure;
+ }
+
+ // Later tables overwrite earlier ones.
+ // FIXME: Modules will have some trouble with this. This is clearly not
+ // the right way to do this.
+ VTableUses.clear();
+
+ for (unsigned Idx = 0, N = Record.size(); Idx != N; /* In loop */) {
+ VTableUses.push_back(getGlobalDeclID(F, Record[Idx++]));
+ VTableUses.push_back(
+ ReadSourceLocation(F, Record, Idx).getRawEncoding());
+ VTableUses.push_back(Record[Idx++]);
+ }
+ break;
+
+ case DYNAMIC_CLASSES:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ DynamicClasses.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case PENDING_IMPLICIT_INSTANTIATIONS:
+ if (PendingInstantiations.size() % 2 != 0) {
+ Error("Invalid PENDING_IMPLICIT_INSTANTIATIONS block");
+ return Failure;
+ }
+
+ // Later lists of pending instantiations overwrite earlier ones.
+ // FIXME: This is most certainly wrong for modules.
+ PendingInstantiations.clear();
+ for (unsigned I = 0, N = Record.size(); I != N; /* in loop */) {
+ PendingInstantiations.push_back(getGlobalDeclID(F, Record[I++]));
+ PendingInstantiations.push_back(
+ ReadSourceLocation(F, Record, I).getRawEncoding());
+ }
+ break;
+
+ case SEMA_DECL_REFS:
+ // Later tables overwrite earlier ones.
+ // FIXME: Modules will have some trouble with this.
+ SemaDeclRefs.clear();
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ SemaDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case ORIGINAL_FILE_NAME:
+ // The primary AST will be the last to get here, so it will be the one
+ // that's used.
+ ActualOriginalFileName.assign(BlobStart, BlobLen);
+ OriginalFileName = ActualOriginalFileName;
+ MaybeAddSystemRootToFilename(OriginalFileName);
+ break;
+
+ case ORIGINAL_FILE_ID:
+ OriginalFileID = FileID::get(Record[0]);
+ break;
+
+ case ORIGINAL_PCH_DIR:
+ // The primary AST will be the last to get here, so it will be the one
+ // that's used.
+ OriginalDir.assign(BlobStart, BlobLen);
+ break;
+
+ case VERSION_CONTROL_BRANCH_REVISION: {
+ const std::string &CurBranch = getClangFullRepositoryVersion();
+ StringRef ASTBranch(BlobStart, BlobLen);
+ if (StringRef(CurBranch) != ASTBranch && !DisableValidation) {
+ Diag(diag::warn_pch_different_branch) << ASTBranch << CurBranch;
+ return IgnorePCH;
+ }
+ break;
+ }
+
+ case PPD_ENTITIES_OFFSETS: {
+ F.PreprocessedEntityOffsets = (const PPEntityOffset *)BlobStart;
+ assert(BlobLen % sizeof(PPEntityOffset) == 0);
+ F.NumPreprocessedEntities = BlobLen / sizeof(PPEntityOffset);
+
+ unsigned LocalBasePreprocessedEntityID = Record[0];
+
+ unsigned StartingID;
+ if (!PP.getPreprocessingRecord())
+ PP.createPreprocessingRecord(/*RecordConditionalDirectives=*/false);
+ if (!PP.getPreprocessingRecord()->getExternalSource())
+ PP.getPreprocessingRecord()->SetExternalSource(*this);
+ StartingID
+ = PP.getPreprocessingRecord()
+ ->allocateLoadedEntities(F.NumPreprocessedEntities);
+ F.BasePreprocessedEntityID = StartingID;
+
+ if (F.NumPreprocessedEntities > 0) {
+ // Introduce the global -> local mapping for preprocessed entities in
+ // this module.
+ GlobalPreprocessedEntityMap.insert(std::make_pair(StartingID, &F));
+
+ // Introduce the local -> global mapping for preprocessed entities in
+ // this module.
+ F.PreprocessedEntityRemap.insertOrReplace(
+ std::make_pair(LocalBasePreprocessedEntityID,
+ F.BasePreprocessedEntityID - LocalBasePreprocessedEntityID));
+ }
+
+ break;
+ }
+
+ case DECL_UPDATE_OFFSETS: {
+ if (Record.size() % 2 != 0) {
+ Error("invalid DECL_UPDATE_OFFSETS block in AST file");
+ return Failure;
+ }
+ for (unsigned I = 0, N = Record.size(); I != N; I += 2)
+ DeclUpdateOffsets[getGlobalDeclID(F, Record[I])]
+ .push_back(std::make_pair(&F, Record[I+1]));
+ break;
+ }
+
+ case DECL_REPLACEMENTS: {
+ if (Record.size() % 3 != 0) {
+ Error("invalid DECL_REPLACEMENTS block in AST file");
+ return Failure;
+ }
+ for (unsigned I = 0, N = Record.size(); I != N; I += 3)
+ ReplacedDecls[getGlobalDeclID(F, Record[I])]
+ = ReplacedDeclInfo(&F, Record[I+1], Record[I+2]);
+ break;
+ }
+
+ case OBJC_CATEGORIES_MAP: {
+ if (F.LocalNumObjCCategoriesInMap != 0) {
+ Error("duplicate OBJC_CATEGORIES_MAP record in AST file");
+ return Failure;
+ }
+
+ F.LocalNumObjCCategoriesInMap = Record[0];
+ F.ObjCCategoriesMap = (const ObjCCategoriesInfo *)BlobStart;
+ break;
+ }
+
+ case OBJC_CATEGORIES:
+ F.ObjCCategories.swap(Record);
+ break;
+
+ case CXX_BASE_SPECIFIER_OFFSETS: {
+ if (F.LocalNumCXXBaseSpecifiers != 0) {
+ Error("duplicate CXX_BASE_SPECIFIER_OFFSETS record in AST file");
+ return Failure;
+ }
+
+ F.LocalNumCXXBaseSpecifiers = Record[0];
+ F.CXXBaseSpecifiersOffsets = (const uint32_t *)BlobStart;
+ NumCXXBaseSpecifiersLoaded += F.LocalNumCXXBaseSpecifiers;
+ break;
+ }
+
+ case DIAG_PRAGMA_MAPPINGS:
+ if (Record.size() % 2 != 0) {
+ Error("invalid DIAG_USER_MAPPINGS block in AST file");
+ return Failure;
+ }
+
+ if (F.PragmaDiagMappings.empty())
+ F.PragmaDiagMappings.swap(Record);
+ else
+ F.PragmaDiagMappings.insert(F.PragmaDiagMappings.end(),
+ Record.begin(), Record.end());
+ break;
+
+ case CUDA_SPECIAL_DECL_REFS:
+ // Later tables overwrite earlier ones.
+ // FIXME: Modules will have trouble with this.
+ CUDASpecialDeclRefs.clear();
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ CUDASpecialDeclRefs.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case HEADER_SEARCH_TABLE: {
+ F.HeaderFileInfoTableData = BlobStart;
+ F.LocalNumHeaderFileInfos = Record[1];
+ F.HeaderFileFrameworkStrings = BlobStart + Record[2];
+ if (Record[0]) {
+ F.HeaderFileInfoTable
+ = HeaderFileInfoLookupTable::Create(
+ (const unsigned char *)F.HeaderFileInfoTableData + Record[0],
+ (const unsigned char *)F.HeaderFileInfoTableData,
+ HeaderFileInfoTrait(*this, F,
+ &PP.getHeaderSearchInfo(),
+ BlobStart + Record[2]));
+
+ PP.getHeaderSearchInfo().SetExternalSource(this);
+ if (!PP.getHeaderSearchInfo().getExternalLookup())
+ PP.getHeaderSearchInfo().SetExternalLookup(this);
+ }
+ break;
+ }
+
+ case FP_PRAGMA_OPTIONS:
+ // Later tables overwrite earlier ones.
+ FPPragmaOptions.swap(Record);
+ break;
+
+ case OPENCL_EXTENSIONS:
+ // Later tables overwrite earlier ones.
+ OpenCLExtensions.swap(Record);
+ break;
+
+ case TENTATIVE_DEFINITIONS:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ TentativeDefinitions.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case KNOWN_NAMESPACES:
+ for (unsigned I = 0, N = Record.size(); I != N; ++I)
+ KnownNamespaces.push_back(getGlobalDeclID(F, Record[I]));
+ break;
+
+ case IMPORTED_MODULES: {
+ if (F.Kind != MK_Module) {
+ // If we aren't loading a module (which has its own exports), make
+ // all of the imported modules visible.
+ // FIXME: Deal with macros-only imports.
+ for (unsigned I = 0, N = Record.size(); I != N; ++I) {
+ if (unsigned GlobalID = getGlobalSubmoduleID(F, Record[I]))
+ ImportedModules.push_back(GlobalID);
+ }
+ }
+ break;
+ }
+
+ case LOCAL_REDECLARATIONS: {
+ F.RedeclarationChains.swap(Record);
+ break;
+ }
+
+ case LOCAL_REDECLARATIONS_MAP: {
+ if (F.LocalNumRedeclarationsInMap != 0) {
+ Error("duplicate LOCAL_REDECLARATIONS_MAP record in AST file");
+ return Failure;
+ }
+
+ F.LocalNumRedeclarationsInMap = Record[0];
+ F.RedeclarationsMap = (const LocalRedeclarationsInfo *)BlobStart;
+ break;
+ }
+
+ case MERGED_DECLARATIONS: {
+ for (unsigned Idx = 0; Idx < Record.size(); /* increment in loop */) {
+ GlobalDeclID CanonID = getGlobalDeclID(F, Record[Idx++]);
+ SmallVectorImpl<GlobalDeclID> &Decls = StoredMergedDecls[CanonID];
+ for (unsigned N = Record[Idx++]; N > 0; --N)
+ Decls.push_back(getGlobalDeclID(F, Record[Idx++]));
+ }
+ break;
+ }
+ }
+ }
+ Error("premature end of bitstream in AST file");
+ return Failure;
+}
+
+ASTReader::ASTReadResult ASTReader::validateFileEntries(ModuleFile &M) {
+ llvm::BitstreamCursor &SLocEntryCursor = M.SLocEntryCursor;
+
+ for (unsigned i = 0, e = M.LocalNumSLocFileEntries; i != e; ++i) {
+ SLocEntryCursor.JumpToBit(M.SLocFileOffsets[i]);
+ unsigned Code = SLocEntryCursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK ||
+ Code == llvm::bitc::ENTER_SUBBLOCK ||
+ Code == llvm::bitc::DEFINE_ABBREV) {
+ Error("incorrectly-formatted source location entry in AST file");
+ return Failure;
+ }
+
+ RecordData Record;
+ const char *BlobStart;
+ unsigned BlobLen;
+ switch (SLocEntryCursor.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
+ default:
+ Error("incorrectly-formatted source location entry in AST file");
+ return Failure;
+
+ case SM_SLOC_FILE_ENTRY: {
+ // If the buffer was overridden, the file need not exist.
+ if (Record[6])
+ break;
+
+ StringRef Filename(BlobStart, BlobLen);
+ const FileEntry *File = getFileEntry(Filename);
+
+ if (File == 0) {
+ std::string ErrorStr = "could not find file '";
+ ErrorStr += Filename;
+ ErrorStr += "' referenced by AST file";
+ Error(ErrorStr.c_str());
+ return IgnorePCH;
+ }
+
+ if (Record.size() < 7) {
+ Error("source location entry is incorrect");
+ return Failure;
+ }
+
+ // The stat info from the FileEntry came from the cached stat
+ // info of the PCH, so we cannot trust it.
+ struct stat StatBuf;
+ if (::stat(File->getName(), &StatBuf) != 0) {
+ StatBuf.st_size = File->getSize();
+ StatBuf.st_mtime = File->getModificationTime();
+ }
+
+ if (((off_t)Record[4] != StatBuf.st_size
+#if !defined(LLVM_ON_WIN32)
+ // In our regression testing, the Windows file system seems to
+ // have inconsistent modification times that sometimes
+ // erroneously trigger this error-handling path.
+ || (time_t)Record[5] != StatBuf.st_mtime
+#endif
+ )) {
+ Error(diag::err_fe_pch_file_modified, Filename);
+ return IgnorePCH;
+ }
+
+ break;
+ }
+ }
+ }
+
+ return Success;
+}
+
+void ASTReader::makeNamesVisible(const HiddenNames &Names) {
+ for (unsigned I = 0, N = Names.size(); I != N; ++I) {
+ if (Decl *D = Names[I].dyn_cast<Decl *>())
+ D->Hidden = false;
+ else {
+ IdentifierInfo *II = Names[I].get<IdentifierInfo *>();
+ if (!II->hasMacroDefinition()) {
+ II->setHasMacroDefinition(true);
+ if (DeserializationListener)
+ DeserializationListener->MacroVisible(II);
+ }
+ }
+ }
+}
+
+void ASTReader::makeModuleVisible(Module *Mod,
+ Module::NameVisibilityKind NameVisibility) {
+ llvm::SmallPtrSet<Module *, 4> Visited;
+ llvm::SmallVector<Module *, 4> Stack;
+ Stack.push_back(Mod);
+ while (!Stack.empty()) {
+ Mod = Stack.back();
+ Stack.pop_back();
+
+ if (NameVisibility <= Mod->NameVisibility) {
+ // This module already has this level of visibility (or greater), so
+ // there is nothing more to do.
+ continue;
+ }
+
+ if (!Mod->isAvailable()) {
+ // Modules that aren't available cannot be made visible.
+ continue;
+ }
+
+ // Update the module's name visibility.
+ Mod->NameVisibility = NameVisibility;
+
+ // If we've already deserialized any names from this module,
+ // mark them as visible.
+ HiddenNamesMapType::iterator Hidden = HiddenNamesMap.find(Mod);
+ if (Hidden != HiddenNamesMap.end()) {
+ makeNamesVisible(Hidden->second);
+ HiddenNamesMap.erase(Hidden);
+ }
+
+ // Push any non-explicit submodules onto the stack to be marked as
+ // visible.
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub) {
+ if (!(*Sub)->IsExplicit && Visited.insert(*Sub))
+ Stack.push_back(*Sub);
+ }
+
+ // Push any exported modules onto the stack to be marked as visible.
+ bool AnyWildcard = false;
+ bool UnrestrictedWildcard = false;
+ llvm::SmallVector<Module *, 4> WildcardRestrictions;
+ for (unsigned I = 0, N = Mod->Exports.size(); I != N; ++I) {
+ Module *Exported = Mod->Exports[I].getPointer();
+ if (!Mod->Exports[I].getInt()) {
+ // Export a named module directly; no wildcards involved.
+ if (Visited.insert(Exported))
+ Stack.push_back(Exported);
+
+ continue;
+ }
+
+ // Wildcard export: export all of the imported modules that match
+ // the given pattern.
+ AnyWildcard = true;
+ if (UnrestrictedWildcard)
+ continue;
+
+ if (Module *Restriction = Mod->Exports[I].getPointer())
+ WildcardRestrictions.push_back(Restriction);
+ else {
+ WildcardRestrictions.clear();
+ UnrestrictedWildcard = true;
+ }
+ }
+
+ // If there were any wildcards, push any imported modules that were
+ // re-exported by the wildcard restriction.
+ if (!AnyWildcard)
+ continue;
+
+ for (unsigned I = 0, N = Mod->Imports.size(); I != N; ++I) {
+ Module *Imported = Mod->Imports[I];
+ if (Visited.count(Imported))
+ continue;
+
+ bool Acceptable = UnrestrictedWildcard;
+ if (!Acceptable) {
+ // Check whether this module meets one of the restrictions.
+ for (unsigned R = 0, NR = WildcardRestrictions.size(); R != NR; ++R) {
+ Module *Restriction = WildcardRestrictions[R];
+ if (Imported == Restriction || Imported->isSubModuleOf(Restriction)) {
+ Acceptable = true;
+ break;
+ }
+ }
+ }
+
+ if (!Acceptable)
+ continue;
+
+ Visited.insert(Imported);
+ Stack.push_back(Imported);
+ }
+ }
+}
+
+ASTReader::ASTReadResult ASTReader::ReadAST(const std::string &FileName,
+ ModuleKind Type) {
+ // Bump the generation number.
+ unsigned PreviousGeneration = CurrentGeneration++;
+
+ switch(ReadASTCore(FileName, Type, /*ImportedBy=*/0)) {
+ case Failure: return Failure;
+ case IgnorePCH: return IgnorePCH;
+ case Success: break;
+ }
+
+ // Here comes stuff that we only do once the entire chain is loaded.
+
+ // Check the predefines buffers.
+ if (!DisableValidation && Type == MK_PCH &&
+ // FIXME: CheckPredefinesBuffers also sets the SuggestedPredefines;
+ // if DisableValidation is true, defines that were set on command-line
+ // but not in the PCH file will not be added to SuggestedPredefines.
+ CheckPredefinesBuffers())
+ return IgnorePCH;
+
+ // Mark all of the identifiers in the identifier table as being out of date,
+ // so that various accessors know to check the loaded modules when the
+ // identifier is used.
+ for (IdentifierTable::iterator Id = PP.getIdentifierTable().begin(),
+ IdEnd = PP.getIdentifierTable().end();
+ Id != IdEnd; ++Id)
+ Id->second->setOutOfDate(true);
+
+ // Resolve any unresolved module exports.
+ for (unsigned I = 0, N = UnresolvedModuleImportExports.size(); I != N; ++I) {
+ UnresolvedModuleImportExport &Unresolved = UnresolvedModuleImportExports[I];
+ SubmoduleID GlobalID = getGlobalSubmoduleID(*Unresolved.File,Unresolved.ID);
+ Module *ResolvedMod = getSubmodule(GlobalID);
+
+ if (Unresolved.IsImport) {
+ if (ResolvedMod)
+ Unresolved.Mod->Imports.push_back(ResolvedMod);
+ continue;
+ }
+
+ if (ResolvedMod || Unresolved.IsWildcard)
+ Unresolved.Mod->Exports.push_back(
+ Module::ExportDecl(ResolvedMod, Unresolved.IsWildcard));
+ }
+ UnresolvedModuleImportExports.clear();
+
+ InitializeContext();
+
+ if (DeserializationListener)
+ DeserializationListener->ReaderInitialized(this);
+
+ if (!OriginalFileID.isInvalid()) {
+ OriginalFileID = FileID::get(ModuleMgr.getPrimaryModule().SLocEntryBaseID
+ + OriginalFileID.getOpaqueValue() - 1);
+
+ // If this AST file is a precompiled preamble, then set the preamble file ID
+ // of the source manager to the file source file from which the preamble was
+ // built.
+ if (Type == MK_Preamble) {
+ SourceMgr.setPreambleFileID(OriginalFileID);
+ } else if (Type == MK_MainFile) {
+ SourceMgr.setMainFileID(OriginalFileID);
+ }
+ }
+
+ // For any Objective-C class definitions we have already loaded, make sure
+ // that we load any additional categories.
+ for (unsigned I = 0, N = ObjCClassesLoaded.size(); I != N; ++I) {
+ loadObjCCategories(ObjCClassesLoaded[I]->getGlobalID(),
+ ObjCClassesLoaded[I],
+ PreviousGeneration);
+ }
+
+ return Success;
+}
+
+ASTReader::ASTReadResult ASTReader::ReadASTCore(StringRef FileName,
+ ModuleKind Type,
+ ModuleFile *ImportedBy) {
+ ModuleFile *M;
+ bool NewModule;
+ std::string ErrorStr;
+ llvm::tie(M, NewModule) = ModuleMgr.addModule(FileName, Type, ImportedBy,
+ CurrentGeneration, ErrorStr);
+
+ if (!M) {
+ // We couldn't load the module.
+ std::string Msg = "Unable to load module \"" + FileName.str() + "\": "
+ + ErrorStr;
+ Error(Msg);
+ return Failure;
+ }
+
+ if (!NewModule) {
+ // We've already loaded this module.
+ return Success;
+ }
+
+ // FIXME: This seems rather a hack. Should CurrentDir be part of the
+ // module?
+ if (FileName != "-") {
+ CurrentDir = llvm::sys::path::parent_path(FileName);
+ if (CurrentDir.empty()) CurrentDir = ".";
+ }
+
+ ModuleFile &F = *M;
+ llvm::BitstreamCursor &Stream = F.Stream;
+ Stream.init(F.StreamFile);
+ F.SizeInBits = F.Buffer->getBufferSize() * 8;
+
+ // Sniff for the signature.
+ if (Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'P' ||
+ Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'H') {
+ Diag(diag::err_not_a_pch_file) << FileName;
+ return Failure;
+ }
+
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+
+ if (Code != llvm::bitc::ENTER_SUBBLOCK) {
+ Error("invalid record at top-level of AST file");
+ return Failure;
+ }
+
+ unsigned BlockID = Stream.ReadSubBlockID();
+
+ // We only know the AST subblock ID.
+ switch (BlockID) {
+ case llvm::bitc::BLOCKINFO_BLOCK_ID:
+ if (Stream.ReadBlockInfoBlock()) {
+ Error("malformed BlockInfoBlock in AST file");
+ return Failure;
+ }
+ break;
+ case AST_BLOCK_ID:
+ switch (ReadASTBlock(F)) {
+ case Success:
+ break;
+
+ case Failure:
+ return Failure;
+
+ case IgnorePCH:
+ // FIXME: We could consider reading through to the end of this
+ // AST block, skipping subblocks, to see if there are other
+ // AST blocks elsewhere.
+
+ // FIXME: We can't clear loaded slocentries anymore.
+ //SourceMgr.ClearPreallocatedSLocEntries();
+
+ // Remove the stat cache.
+ if (F.StatCache)
+ FileMgr.removeStatCache((ASTStatCache*)F.StatCache);
+
+ return IgnorePCH;
+ }
+ break;
+ default:
+ if (Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ break;
+ }
+ }
+
+ // Once read, set the ModuleFile bit base offset and update the size in
+ // bits of all files we've seen.
+ F.GlobalBitOffset = TotalModulesSizeInBits;
+ TotalModulesSizeInBits += F.SizeInBits;
+ GlobalBitOffsetsMap.insert(std::make_pair(F.GlobalBitOffset, &F));
+
+ // Make sure that the files this module was built against are still available.
+ if (!DisableValidation) {
+ switch(validateFileEntries(*M)) {
+ case Failure: return Failure;
+ case IgnorePCH: return IgnorePCH;
+ case Success: break;
+ }
+ }
+
+ // Preload SLocEntries.
+ for (unsigned I = 0, N = M->PreloadSLocEntries.size(); I != N; ++I) {
+ int Index = int(M->PreloadSLocEntries[I] - 1) + F.SLocEntryBaseID;
+ // Load it through the SourceManager and don't call ReadSLocEntryRecord()
+ // directly because the entry may have already been loaded in which case
+ // calling ReadSLocEntryRecord() directly would trigger an assertion in
+ // SourceManager.
+ SourceMgr.getLoadedSLocEntryByID(Index);
+ }
+
+
+ return Success;
+}
+
+void ASTReader::InitializeContext() {
+ // If there's a listener, notify them that we "read" the translation unit.
+ if (DeserializationListener)
+ DeserializationListener->DeclRead(PREDEF_DECL_TRANSLATION_UNIT_ID,
+ Context.getTranslationUnitDecl());
+
+ // Make sure we load the declaration update records for the translation unit,
+ // if there are any.
+ loadDeclUpdateRecords(PREDEF_DECL_TRANSLATION_UNIT_ID,
+ Context.getTranslationUnitDecl());
+
+ // FIXME: Find a better way to deal with collisions between these
+ // built-in types. Right now, we just ignore the problem.
+
+ // Load the special types.
+ if (SpecialTypes.size() >= NumSpecialTypeIDs) {
+ if (Context.getBuiltinVaListType().isNull()) {
+ Context.setBuiltinVaListType(
+ GetType(SpecialTypes[SPECIAL_TYPE_BUILTIN_VA_LIST]));
+ }
+
+ if (unsigned String = SpecialTypes[SPECIAL_TYPE_CF_CONSTANT_STRING]) {
+ if (!Context.CFConstantStringTypeDecl)
+ Context.setCFConstantStringType(GetType(String));
+ }
+
+ if (unsigned File = SpecialTypes[SPECIAL_TYPE_FILE]) {
+ QualType FileType = GetType(File);
+ if (FileType.isNull()) {
+ Error("FILE type is NULL");
+ return;
+ }
+
+ if (!Context.FILEDecl) {
+ if (const TypedefType *Typedef = FileType->getAs<TypedefType>())
+ Context.setFILEDecl(Typedef->getDecl());
+ else {
+ const TagType *Tag = FileType->getAs<TagType>();
+ if (!Tag) {
+ Error("Invalid FILE type in AST file");
+ return;
+ }
+ Context.setFILEDecl(Tag->getDecl());
+ }
+ }
+ }
+
+ if (unsigned Jmp_buf = SpecialTypes[SPECIAL_TYPE_JMP_BUF]) {
+ QualType Jmp_bufType = GetType(Jmp_buf);
+ if (Jmp_bufType.isNull()) {
+ Error("jmp_buf type is NULL");
+ return;
+ }
+
+ if (!Context.jmp_bufDecl) {
+ if (const TypedefType *Typedef = Jmp_bufType->getAs<TypedefType>())
+ Context.setjmp_bufDecl(Typedef->getDecl());
+ else {
+ const TagType *Tag = Jmp_bufType->getAs<TagType>();
+ if (!Tag) {
+ Error("Invalid jmp_buf type in AST file");
+ return;
+ }
+ Context.setjmp_bufDecl(Tag->getDecl());
+ }
+ }
+ }
+
+ if (unsigned Sigjmp_buf = SpecialTypes[SPECIAL_TYPE_SIGJMP_BUF]) {
+ QualType Sigjmp_bufType = GetType(Sigjmp_buf);
+ if (Sigjmp_bufType.isNull()) {
+ Error("sigjmp_buf type is NULL");
+ return;
+ }
+
+ if (!Context.sigjmp_bufDecl) {
+ if (const TypedefType *Typedef = Sigjmp_bufType->getAs<TypedefType>())
+ Context.setsigjmp_bufDecl(Typedef->getDecl());
+ else {
+ const TagType *Tag = Sigjmp_bufType->getAs<TagType>();
+ assert(Tag && "Invalid sigjmp_buf type in AST file");
+ Context.setsigjmp_bufDecl(Tag->getDecl());
+ }
+ }
+ }
+
+ if (unsigned ObjCIdRedef
+ = SpecialTypes[SPECIAL_TYPE_OBJC_ID_REDEFINITION]) {
+ if (Context.ObjCIdRedefinitionType.isNull())
+ Context.ObjCIdRedefinitionType = GetType(ObjCIdRedef);
+ }
+
+ if (unsigned ObjCClassRedef
+ = SpecialTypes[SPECIAL_TYPE_OBJC_CLASS_REDEFINITION]) {
+ if (Context.ObjCClassRedefinitionType.isNull())
+ Context.ObjCClassRedefinitionType = GetType(ObjCClassRedef);
+ }
+
+ if (unsigned ObjCSelRedef
+ = SpecialTypes[SPECIAL_TYPE_OBJC_SEL_REDEFINITION]) {
+ if (Context.ObjCSelRedefinitionType.isNull())
+ Context.ObjCSelRedefinitionType = GetType(ObjCSelRedef);
+ }
+
+ if (unsigned Ucontext_t = SpecialTypes[SPECIAL_TYPE_UCONTEXT_T]) {
+ QualType Ucontext_tType = GetType(Ucontext_t);
+ if (Ucontext_tType.isNull()) {
+ Error("ucontext_t type is NULL");
+ return;
+ }
+
+ if (!Context.ucontext_tDecl) {
+ if (const TypedefType *Typedef = Ucontext_tType->getAs<TypedefType>())
+ Context.setucontext_tDecl(Typedef->getDecl());
+ else {
+ const TagType *Tag = Ucontext_tType->getAs<TagType>();
+ assert(Tag && "Invalid ucontext_t type in AST file");
+ Context.setucontext_tDecl(Tag->getDecl());
+ }
+ }
+ }
+ }
+
+ ReadPragmaDiagnosticMappings(Context.getDiagnostics());
+
+ // If there were any CUDA special declarations, deserialize them.
+ if (!CUDASpecialDeclRefs.empty()) {
+ assert(CUDASpecialDeclRefs.size() == 1 && "More decl refs than expected!");
+ Context.setcudaConfigureCallDecl(
+ cast<FunctionDecl>(GetDecl(CUDASpecialDeclRefs[0])));
+ }
+
+ // Re-export any modules that were imported by a non-module AST file.
+ for (unsigned I = 0, N = ImportedModules.size(); I != N; ++I) {
+ if (Module *Imported = getSubmodule(ImportedModules[I]))
+ makeModuleVisible(Imported, Module::AllVisible);
+ }
+ ImportedModules.clear();
+}
+
+void ASTReader::finalizeForWriting() {
+ for (HiddenNamesMapType::iterator Hidden = HiddenNamesMap.begin(),
+ HiddenEnd = HiddenNamesMap.end();
+ Hidden != HiddenEnd; ++Hidden) {
+ makeNamesVisible(Hidden->second);
+ }
+ HiddenNamesMap.clear();
+}
+
+/// \brief Retrieve the name of the original source file name
+/// directly from the AST file, without actually loading the AST
+/// file.
+std::string ASTReader::getOriginalSourceFile(const std::string &ASTFileName,
+ FileManager &FileMgr,
+ DiagnosticsEngine &Diags) {
+ // Open the AST file.
+ std::string ErrStr;
+ OwningPtr<llvm::MemoryBuffer> Buffer;
+ Buffer.reset(FileMgr.getBufferForFile(ASTFileName, &ErrStr));
+ if (!Buffer) {
+ Diags.Report(diag::err_fe_unable_to_read_pch_file) << ErrStr;
+ return std::string();
+ }
+
+ // Initialize the stream
+ llvm::BitstreamReader StreamFile;
+ llvm::BitstreamCursor Stream;
+ StreamFile.init((const unsigned char *)Buffer->getBufferStart(),
+ (const unsigned char *)Buffer->getBufferEnd());
+ Stream.init(StreamFile);
+
+ // Sniff for the signature.
+ if (Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'P' ||
+ Stream.Read(8) != 'C' ||
+ Stream.Read(8) != 'H') {
+ Diags.Report(diag::err_fe_not_a_pch_file) << ASTFileName;
+ return std::string();
+ }
+
+ RecordData Record;
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ unsigned BlockID = Stream.ReadSubBlockID();
+
+ // We only know the AST subblock ID.
+ switch (BlockID) {
+ case AST_BLOCK_ID:
+ if (Stream.EnterSubBlock(AST_BLOCK_ID)) {
+ Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
+ return std::string();
+ }
+ break;
+
+ default:
+ if (Stream.SkipBlock()) {
+ Diags.Report(diag::err_fe_pch_malformed_block) << ASTFileName;
+ return std::string();
+ }
+ break;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (Stream.ReadBlockEnd()) {
+ Diags.Report(diag::err_fe_pch_error_at_end_block) << ASTFileName;
+ return std::string();
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ Record.clear();
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ if (Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen)
+ == ORIGINAL_FILE_NAME)
+ return std::string(BlobStart, BlobLen);
+ }
+
+ return std::string();
+}
+
+ASTReader::ASTReadResult ASTReader::ReadSubmoduleBlock(ModuleFile &F) {
+ // Enter the submodule block.
+ if (F.Stream.EnterSubBlock(SUBMODULE_BLOCK_ID)) {
+ Error("malformed submodule block record in AST file");
+ return Failure;
+ }
+
+ ModuleMap &ModMap = PP.getHeaderSearchInfo().getModuleMap();
+ bool First = true;
+ Module *CurrentModule = 0;
+ RecordData Record;
+ while (true) {
+ unsigned Code = F.Stream.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (F.Stream.ReadBlockEnd()) {
+ Error("error at end of submodule block in AST file");
+ return Failure;
+ }
+ return Success;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ F.Stream.ReadSubBlockID();
+ if (F.Stream.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return Failure;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ F.Stream.ReadAbbrevRecord();
+ continue;
+ }
+
+ // Read a record.
+ const char *BlobStart;
+ unsigned BlobLen;
+ Record.clear();
+ switch (F.Stream.ReadRecord(Code, Record, &BlobStart, &BlobLen)) {
+ default: // Default behavior: ignore.
+ break;
+
+ case SUBMODULE_DEFINITION: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (Record.size() < 7) {
+ Error("malformed module definition");
+ return Failure;
+ }
+
+ StringRef Name(BlobStart, BlobLen);
+ SubmoduleID GlobalID = getGlobalSubmoduleID(F, Record[0]);
+ SubmoduleID Parent = getGlobalSubmoduleID(F, Record[1]);
+ bool IsFramework = Record[2];
+ bool IsExplicit = Record[3];
+ bool IsSystem = Record[4];
+ bool InferSubmodules = Record[5];
+ bool InferExplicitSubmodules = Record[6];
+ bool InferExportWildcard = Record[7];
+
+ Module *ParentModule = 0;
+ if (Parent)
+ ParentModule = getSubmodule(Parent);
+
+ // Retrieve this (sub)module from the module map, creating it if
+ // necessary.
+ CurrentModule = ModMap.findOrCreateModule(Name, ParentModule,
+ IsFramework,
+ IsExplicit).first;
+ SubmoduleID GlobalIndex = GlobalID - NUM_PREDEF_SUBMODULE_IDS;
+ if (GlobalIndex >= SubmodulesLoaded.size() ||
+ SubmodulesLoaded[GlobalIndex]) {
+ Error("too many submodules");
+ return Failure;
+ }
+
+ CurrentModule->IsFromModuleFile = true;
+ CurrentModule->IsSystem = IsSystem || CurrentModule->IsSystem;
+ CurrentModule->InferSubmodules = InferSubmodules;
+ CurrentModule->InferExplicitSubmodules = InferExplicitSubmodules;
+ CurrentModule->InferExportWildcard = InferExportWildcard;
+ if (DeserializationListener)
+ DeserializationListener->ModuleRead(GlobalID, CurrentModule);
+
+ SubmodulesLoaded[GlobalIndex] = CurrentModule;
+ break;
+ }
+
+ case SUBMODULE_UMBRELLA_HEADER: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ StringRef FileName(BlobStart, BlobLen);
+ if (const FileEntry *Umbrella = PP.getFileManager().getFile(FileName)) {
+ if (!CurrentModule->getUmbrellaHeader())
+ ModMap.setUmbrellaHeader(CurrentModule, Umbrella);
+ else if (CurrentModule->getUmbrellaHeader() != Umbrella) {
+ Error("mismatched umbrella headers in submodule");
+ return Failure;
+ }
+ }
+ break;
+ }
+
+ case SUBMODULE_HEADER: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ // FIXME: Be more lazy about this!
+ StringRef FileName(BlobStart, BlobLen);
+ if (const FileEntry *File = PP.getFileManager().getFile(FileName)) {
+ if (std::find(CurrentModule->Headers.begin(),
+ CurrentModule->Headers.end(),
+ File) == CurrentModule->Headers.end())
+ ModMap.addHeader(CurrentModule, File);
+ }
+ break;
+ }
+
+ case SUBMODULE_UMBRELLA_DIR: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ StringRef DirName(BlobStart, BlobLen);
+ if (const DirectoryEntry *Umbrella
+ = PP.getFileManager().getDirectory(DirName)) {
+ if (!CurrentModule->getUmbrellaDir())
+ ModMap.setUmbrellaDir(CurrentModule, Umbrella);
+ else if (CurrentModule->getUmbrellaDir() != Umbrella) {
+ Error("mismatched umbrella directories in submodule");
+ return Failure;
+ }
+ }
+ break;
+ }
+
+ case SUBMODULE_METADATA: {
+ if (!First) {
+ Error("submodule metadata record not at beginning of block");
+ return Failure;
+ }
+ First = false;
+
+ F.BaseSubmoduleID = getTotalNumSubmodules();
+ F.LocalNumSubmodules = Record[0];
+ unsigned LocalBaseSubmoduleID = Record[1];
+ if (F.LocalNumSubmodules > 0) {
+ // Introduce the global -> local mapping for submodules within this
+ // module.
+ GlobalSubmoduleMap.insert(std::make_pair(getTotalNumSubmodules()+1,&F));
+
+ // Introduce the local -> global mapping for submodules within this
+ // module.
+ F.SubmoduleRemap.insertOrReplace(
+ std::make_pair(LocalBaseSubmoduleID,
+ F.BaseSubmoduleID - LocalBaseSubmoduleID));
+
+ SubmodulesLoaded.resize(SubmodulesLoaded.size() + F.LocalNumSubmodules);
+ }
+ break;
+ }
+
+ case SUBMODULE_IMPORTS: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ for (unsigned Idx = 0; Idx != Record.size(); ++Idx) {
+ UnresolvedModuleImportExport Unresolved;
+ Unresolved.File = &F;
+ Unresolved.Mod = CurrentModule;
+ Unresolved.ID = Record[Idx];
+ Unresolved.IsImport = true;
+ Unresolved.IsWildcard = false;
+ UnresolvedModuleImportExports.push_back(Unresolved);
+ }
+ break;
+ }
+
+ case SUBMODULE_EXPORTS: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ for (unsigned Idx = 0; Idx + 1 < Record.size(); Idx += 2) {
+ UnresolvedModuleImportExport Unresolved;
+ Unresolved.File = &F;
+ Unresolved.Mod = CurrentModule;
+ Unresolved.ID = Record[Idx];
+ Unresolved.IsImport = false;
+ Unresolved.IsWildcard = Record[Idx + 1];
+ UnresolvedModuleImportExports.push_back(Unresolved);
+ }
+
+ // Once we've loaded the set of exports, there's no reason to keep
+ // the parsed, unresolved exports around.
+ CurrentModule->UnresolvedExports.clear();
+ break;
+ }
+ case SUBMODULE_REQUIRES: {
+ if (First) {
+ Error("missing submodule metadata record at beginning of block");
+ return Failure;
+ }
+
+ if (!CurrentModule)
+ break;
+
+ CurrentModule->addRequirement(StringRef(BlobStart, BlobLen),
+ Context.getLangOpts(),
+ Context.getTargetInfo());
+ break;
+ }
+ }
+ }
+}
+
+/// \brief Parse the record that corresponds to a LangOptions data
+/// structure.
+///
+/// This routine parses the language options from the AST file and then gives
+/// them to the AST listener if one is set.
+///
+/// \returns true if the listener deems the file unacceptable, false otherwise.
+bool ASTReader::ParseLanguageOptions(
+ const SmallVectorImpl<uint64_t> &Record) {
+ if (Listener) {
+ LangOptions LangOpts;
+ unsigned Idx = 0;
+#define LANGOPT(Name, Bits, Default, Description) \
+ LangOpts.Name = Record[Idx++];
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ LangOpts.set##Name(static_cast<LangOptions::Type>(Record[Idx++]));
+#include "clang/Basic/LangOptions.def"
+
+ unsigned Length = Record[Idx++];
+ LangOpts.CurrentModule.assign(Record.begin() + Idx,
+ Record.begin() + Idx + Length);
+ return Listener->ReadLanguageOptions(LangOpts);
+ }
+
+ return false;
+}
+
+std::pair<ModuleFile *, unsigned>
+ASTReader::getModulePreprocessedEntity(unsigned GlobalIndex) {
+ GlobalPreprocessedEntityMapType::iterator
+ I = GlobalPreprocessedEntityMap.find(GlobalIndex);
+ assert(I != GlobalPreprocessedEntityMap.end() &&
+ "Corrupted global preprocessed entity map");
+ ModuleFile *M = I->second;
+ unsigned LocalIndex = GlobalIndex - M->BasePreprocessedEntityID;
+ return std::make_pair(M, LocalIndex);
+}
+
+PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
+ PreprocessedEntityID PPID = Index+1;
+ std::pair<ModuleFile *, unsigned> PPInfo = getModulePreprocessedEntity(Index);
+ ModuleFile &M = *PPInfo.first;
+ unsigned LocalIndex = PPInfo.second;
+ const PPEntityOffset &PPOffs = M.PreprocessedEntityOffsets[LocalIndex];
+
+ SavedStreamPosition SavedPosition(M.PreprocessorDetailCursor);
+ M.PreprocessorDetailCursor.JumpToBit(PPOffs.BitOffset);
+
+ unsigned Code = M.PreprocessorDetailCursor.ReadCode();
+ switch (Code) {
+ case llvm::bitc::END_BLOCK:
+ return 0;
+
+ case llvm::bitc::ENTER_SUBBLOCK:
+ Error("unexpected subblock record in preprocessor detail block");
+ return 0;
+
+ case llvm::bitc::DEFINE_ABBREV:
+ Error("unexpected abbrevation record in preprocessor detail block");
+ return 0;
+
+ default:
+ break;
+ }
+
+ if (!PP.getPreprocessingRecord()) {
+ Error("no preprocessing record");
+ return 0;
+ }
+
+ // Read the record.
+ SourceRange Range(ReadSourceLocation(M, PPOffs.Begin),
+ ReadSourceLocation(M, PPOffs.End));
+ PreprocessingRecord &PPRec = *PP.getPreprocessingRecord();
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ RecordData Record;
+ PreprocessorDetailRecordTypes RecType =
+ (PreprocessorDetailRecordTypes)M.PreprocessorDetailCursor.ReadRecord(
+ Code, Record, BlobStart, BlobLen);
+ switch (RecType) {
+ case PPD_MACRO_EXPANSION: {
+ bool isBuiltin = Record[0];
+ IdentifierInfo *Name = 0;
+ MacroDefinition *Def = 0;
+ if (isBuiltin)
+ Name = getLocalIdentifier(M, Record[1]);
+ else {
+ PreprocessedEntityID
+ GlobalID = getGlobalPreprocessedEntityID(M, Record[1]);
+ Def =cast<MacroDefinition>(PPRec.getLoadedPreprocessedEntity(GlobalID-1));
+ }
+
+ MacroExpansion *ME;
+ if (isBuiltin)
+ ME = new (PPRec) MacroExpansion(Name, Range);
+ else
+ ME = new (PPRec) MacroExpansion(Def, Range);
+
+ return ME;
+ }
+
+ case PPD_MACRO_DEFINITION: {
+ // Decode the identifier info and then check again; if the macro is
+ // still defined and associated with the identifier,
+ IdentifierInfo *II = getLocalIdentifier(M, Record[0]);
+ MacroDefinition *MD
+ = new (PPRec) MacroDefinition(II, Range);
+
+ if (DeserializationListener)
+ DeserializationListener->MacroDefinitionRead(PPID, MD);
+
+ return MD;
+ }
+
+ case PPD_INCLUSION_DIRECTIVE: {
+ const char *FullFileNameStart = BlobStart + Record[0];
+ StringRef FullFileName(FullFileNameStart, BlobLen - Record[0]);
+ const FileEntry *File = 0;
+ if (!FullFileName.empty())
+ File = PP.getFileManager().getFile(FullFileName);
+
+ // FIXME: Stable encoding
+ InclusionDirective::InclusionKind Kind
+ = static_cast<InclusionDirective::InclusionKind>(Record[2]);
+ InclusionDirective *ID
+ = new (PPRec) InclusionDirective(PPRec, Kind,
+ StringRef(BlobStart, Record[0]),
+ Record[1],
+ File,
+ Range);
+ return ID;
+ }
+ }
+
+ llvm_unreachable("Invalid PreprocessorDetailRecordTypes");
+}
+
+/// \brief \arg SLocMapI points at a chunk of a module that contains no
+/// preprocessed entities or the entities it contains are not the ones we are
+/// looking for. Find the next module that contains entities and return the ID
+/// of the first entry.
+PreprocessedEntityID ASTReader::findNextPreprocessedEntity(
+ GlobalSLocOffsetMapType::const_iterator SLocMapI) const {
+ ++SLocMapI;
+ for (GlobalSLocOffsetMapType::const_iterator
+ EndI = GlobalSLocOffsetMap.end(); SLocMapI != EndI; ++SLocMapI) {
+ ModuleFile &M = *SLocMapI->second;
+ if (M.NumPreprocessedEntities)
+ return getGlobalPreprocessedEntityID(M, M.BasePreprocessedEntityID);
+ }
+
+ return getTotalNumPreprocessedEntities();
+}
+
+namespace {
+
+template <unsigned PPEntityOffset::*PPLoc>
+struct PPEntityComp {
+ const ASTReader &Reader;
+ ModuleFile &M;
+
+ PPEntityComp(const ASTReader &Reader, ModuleFile &M) : Reader(Reader), M(M) { }
+
+ bool operator()(const PPEntityOffset &L, const PPEntityOffset &R) const {
+ SourceLocation LHS = getLoc(L);
+ SourceLocation RHS = getLoc(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(const PPEntityOffset &L, SourceLocation RHS) const {
+ SourceLocation LHS = getLoc(L);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(SourceLocation LHS, const PPEntityOffset &R) const {
+ SourceLocation RHS = getLoc(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ SourceLocation getLoc(const PPEntityOffset &PPE) const {
+ return Reader.ReadSourceLocation(M, PPE.*PPLoc);
+ }
+};
+
+}
+
+/// \brief Returns the first preprocessed entity ID that ends after \arg BLoc.
+PreprocessedEntityID
+ASTReader::findBeginPreprocessedEntity(SourceLocation BLoc) const {
+ if (SourceMgr.isLocalSourceLocation(BLoc))
+ return getTotalNumPreprocessedEntities();
+
+ GlobalSLocOffsetMapType::const_iterator
+ SLocMapI = GlobalSLocOffsetMap.find(SourceManager::MaxLoadedOffset -
+ BLoc.getOffset());
+ assert(SLocMapI != GlobalSLocOffsetMap.end() &&
+ "Corrupted global sloc offset map");
+
+ if (SLocMapI->second->NumPreprocessedEntities == 0)
+ return findNextPreprocessedEntity(SLocMapI);
+
+ ModuleFile &M = *SLocMapI->second;
+ typedef const PPEntityOffset *pp_iterator;
+ pp_iterator pp_begin = M.PreprocessedEntityOffsets;
+ pp_iterator pp_end = pp_begin + M.NumPreprocessedEntities;
+
+ size_t Count = M.NumPreprocessedEntities;
+ size_t Half;
+ pp_iterator First = pp_begin;
+ pp_iterator PPI;
+
+ // Do a binary search manually instead of using std::lower_bound because
+ // The end locations of entities may be unordered (when a macro expansion
+ // is inside another macro argument), but for this case it is not important
+ // whether we get the first macro expansion or its containing macro.
+ while (Count > 0) {
+ Half = Count/2;
+ PPI = First;
+ std::advance(PPI, Half);
+ if (SourceMgr.isBeforeInTranslationUnit(ReadSourceLocation(M, PPI->End),
+ BLoc)){
+ First = PPI;
+ ++First;
+ Count = Count - Half - 1;
+ } else
+ Count = Half;
+ }
+
+ if (PPI == pp_end)
+ return findNextPreprocessedEntity(SLocMapI);
+
+ return getGlobalPreprocessedEntityID(M,
+ M.BasePreprocessedEntityID + (PPI - pp_begin));
+}
+
+/// \brief Returns the first preprocessed entity ID that begins after \arg ELoc.
+PreprocessedEntityID
+ASTReader::findEndPreprocessedEntity(SourceLocation ELoc) const {
+ if (SourceMgr.isLocalSourceLocation(ELoc))
+ return getTotalNumPreprocessedEntities();
+
+ GlobalSLocOffsetMapType::const_iterator
+ SLocMapI = GlobalSLocOffsetMap.find(SourceManager::MaxLoadedOffset -
+ ELoc.getOffset());
+ assert(SLocMapI != GlobalSLocOffsetMap.end() &&
+ "Corrupted global sloc offset map");
+
+ if (SLocMapI->second->NumPreprocessedEntities == 0)
+ return findNextPreprocessedEntity(SLocMapI);
+
+ ModuleFile &M = *SLocMapI->second;
+ typedef const PPEntityOffset *pp_iterator;
+ pp_iterator pp_begin = M.PreprocessedEntityOffsets;
+ pp_iterator pp_end = pp_begin + M.NumPreprocessedEntities;
+ pp_iterator PPI =
+ std::upper_bound(pp_begin, pp_end, ELoc,
+ PPEntityComp<&PPEntityOffset::Begin>(*this, M));
+
+ if (PPI == pp_end)
+ return findNextPreprocessedEntity(SLocMapI);
+
+ return getGlobalPreprocessedEntityID(M,
+ M.BasePreprocessedEntityID + (PPI - pp_begin));
+}
+
+/// \brief Returns a pair of [Begin, End) indices of preallocated
+/// preprocessed entities that \arg Range encompasses.
+std::pair<unsigned, unsigned>
+ ASTReader::findPreprocessedEntitiesInRange(SourceRange Range) {
+ if (Range.isInvalid())
+ return std::make_pair(0,0);
+ assert(!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(),Range.getBegin()));
+
+ PreprocessedEntityID BeginID = findBeginPreprocessedEntity(Range.getBegin());
+ PreprocessedEntityID EndID = findEndPreprocessedEntity(Range.getEnd());
+ return std::make_pair(BeginID, EndID);
+}
+
+/// \brief Optionally returns true or false if the preallocated preprocessed
+/// entity with index \arg Index came from file \arg FID.
+llvm::Optional<bool> ASTReader::isPreprocessedEntityInFileID(unsigned Index,
+ FileID FID) {
+ if (FID.isInvalid())
+ return false;
+
+ std::pair<ModuleFile *, unsigned> PPInfo = getModulePreprocessedEntity(Index);
+ ModuleFile &M = *PPInfo.first;
+ unsigned LocalIndex = PPInfo.second;
+ const PPEntityOffset &PPOffs = M.PreprocessedEntityOffsets[LocalIndex];
+
+ SourceLocation Loc = ReadSourceLocation(M, PPOffs.Begin);
+ if (Loc.isInvalid())
+ return false;
+
+ if (SourceMgr.isInFileID(SourceMgr.getFileLoc(Loc), FID))
+ return true;
+ else
+ return false;
+}
+
+namespace {
+ /// \brief Visitor used to search for information about a header file.
+ class HeaderFileInfoVisitor {
+ ASTReader &Reader;
+ const FileEntry *FE;
+
+ llvm::Optional<HeaderFileInfo> HFI;
+
+ public:
+ HeaderFileInfoVisitor(ASTReader &Reader, const FileEntry *FE)
+ : Reader(Reader), FE(FE) { }
+
+ static bool visit(ModuleFile &M, void *UserData) {
+ HeaderFileInfoVisitor *This
+ = static_cast<HeaderFileInfoVisitor *>(UserData);
+
+ HeaderFileInfoTrait Trait(This->Reader, M,
+ &This->Reader.getPreprocessor().getHeaderSearchInfo(),
+ M.HeaderFileFrameworkStrings,
+ This->FE->getName());
+
+ HeaderFileInfoLookupTable *Table
+ = static_cast<HeaderFileInfoLookupTable *>(M.HeaderFileInfoTable);
+ if (!Table)
+ return false;
+
+ // Look in the on-disk hash table for an entry for this file name.
+ HeaderFileInfoLookupTable::iterator Pos = Table->find(This->FE->getName(),
+ &Trait);
+ if (Pos == Table->end())
+ return false;
+
+ This->HFI = *Pos;
+ return true;
+ }
+
+ llvm::Optional<HeaderFileInfo> getHeaderFileInfo() const { return HFI; }
+ };
+}
+
+HeaderFileInfo ASTReader::GetHeaderFileInfo(const FileEntry *FE) {
+ HeaderFileInfoVisitor Visitor(*this, FE);
+ ModuleMgr.visit(&HeaderFileInfoVisitor::visit, &Visitor);
+ if (llvm::Optional<HeaderFileInfo> HFI = Visitor.getHeaderFileInfo()) {
+ if (Listener)
+ Listener->ReadHeaderFileInfo(*HFI, FE->getUID());
+ return *HFI;
+ }
+
+ return HeaderFileInfo();
+}
+
+void ASTReader::ReadPragmaDiagnosticMappings(DiagnosticsEngine &Diag) {
+ for (ModuleIterator I = ModuleMgr.begin(), E = ModuleMgr.end(); I != E; ++I) {
+ ModuleFile &F = *(*I);
+ unsigned Idx = 0;
+ while (Idx < F.PragmaDiagMappings.size()) {
+ SourceLocation Loc = ReadSourceLocation(F, F.PragmaDiagMappings[Idx++]);
+ Diag.DiagStates.push_back(*Diag.GetCurDiagState());
+ Diag.DiagStatePoints.push_back(
+ DiagnosticsEngine::DiagStatePoint(&Diag.DiagStates.back(),
+ FullSourceLoc(Loc, SourceMgr)));
+ while (1) {
+ assert(Idx < F.PragmaDiagMappings.size() &&
+ "Invalid data, didn't find '-1' marking end of diag/map pairs");
+ if (Idx >= F.PragmaDiagMappings.size()) {
+ break; // Something is messed up but at least avoid infinite loop in
+ // release build.
+ }
+ unsigned DiagID = F.PragmaDiagMappings[Idx++];
+ if (DiagID == (unsigned)-1) {
+ break; // no more diag/map pairs for this location.
+ }
+ diag::Mapping Map = (diag::Mapping)F.PragmaDiagMappings[Idx++];
+ DiagnosticMappingInfo MappingInfo = Diag.makeMappingInfo(Map, Loc);
+ Diag.GetCurDiagState()->setMappingInfo(DiagID, MappingInfo);
+ }
+ }
+ }
+}
+
+/// \brief Get the correct cursor and offset for loading a type.
+ASTReader::RecordLocation ASTReader::TypeCursorForIndex(unsigned Index) {
+ GlobalTypeMapType::iterator I = GlobalTypeMap.find(Index);
+ assert(I != GlobalTypeMap.end() && "Corrupted global type map");
+ ModuleFile *M = I->second;
+ return RecordLocation(M, M->TypeOffsets[Index - M->BaseTypeIndex]);
+}
+
+/// \brief Read and return the type with the given index..
+///
+/// The index is the type ID, shifted and minus the number of predefs. This
+/// routine actually reads the record corresponding to the type at the given
+/// location. It is a helper routine for GetType, which deals with reading type
+/// IDs.
+QualType ASTReader::readTypeRecord(unsigned Index) {
+ RecordLocation Loc = TypeCursorForIndex(Index);
+ llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
+
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this type.
+ SavedStreamPosition SavedPosition(DeclsCursor);
+
+ ReadingKindTracker ReadingKind(Read_Type, *this);
+
+ // Note that we are loading a type record.
+ Deserializing AType(this);
+
+ unsigned Idx = 0;
+ DeclsCursor.JumpToBit(Loc.Offset);
+ RecordData Record;
+ unsigned Code = DeclsCursor.ReadCode();
+ switch ((TypeCode)DeclsCursor.ReadRecord(Code, Record)) {
+ case TYPE_EXT_QUAL: {
+ if (Record.size() != 2) {
+ Error("Incorrect encoding of extended qualifier type");
+ return QualType();
+ }
+ QualType Base = readType(*Loc.F, Record, Idx);
+ Qualifiers Quals = Qualifiers::fromOpaqueValue(Record[Idx++]);
+ return Context.getQualifiedType(Base, Quals);
+ }
+
+ case TYPE_COMPLEX: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of complex type");
+ return QualType();
+ }
+ QualType ElemType = readType(*Loc.F, Record, Idx);
+ return Context.getComplexType(ElemType);
+ }
+
+ case TYPE_POINTER: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of pointer type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ return Context.getPointerType(PointeeType);
+ }
+
+ case TYPE_BLOCK_POINTER: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of block pointer type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ return Context.getBlockPointerType(PointeeType);
+ }
+
+ case TYPE_LVALUE_REFERENCE: {
+ if (Record.size() != 2) {
+ Error("Incorrect encoding of lvalue reference type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ return Context.getLValueReferenceType(PointeeType, Record[1]);
+ }
+
+ case TYPE_RVALUE_REFERENCE: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of rvalue reference type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ return Context.getRValueReferenceType(PointeeType);
+ }
+
+ case TYPE_MEMBER_POINTER: {
+ if (Record.size() != 2) {
+ Error("Incorrect encoding of member pointer type");
+ return QualType();
+ }
+ QualType PointeeType = readType(*Loc.F, Record, Idx);
+ QualType ClassType = readType(*Loc.F, Record, Idx);
+ if (PointeeType.isNull() || ClassType.isNull())
+ return QualType();
+
+ return Context.getMemberPointerType(PointeeType, ClassType.getTypePtr());
+ }
+
+ case TYPE_CONSTANT_ARRAY: {
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ unsigned Idx = 3;
+ llvm::APInt Size = ReadAPInt(Record, Idx);
+ return Context.getConstantArrayType(ElementType, Size,
+ ASM, IndexTypeQuals);
+ }
+
+ case TYPE_INCOMPLETE_ARRAY: {
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ return Context.getIncompleteArrayType(ElementType, ASM, IndexTypeQuals);
+ }
+
+ case TYPE_VARIABLE_ARRAY: {
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ ArrayType::ArraySizeModifier ASM = (ArrayType::ArraySizeModifier)Record[1];
+ unsigned IndexTypeQuals = Record[2];
+ SourceLocation LBLoc = ReadSourceLocation(*Loc.F, Record[3]);
+ SourceLocation RBLoc = ReadSourceLocation(*Loc.F, Record[4]);
+ return Context.getVariableArrayType(ElementType, ReadExpr(*Loc.F),
+ ASM, IndexTypeQuals,
+ SourceRange(LBLoc, RBLoc));
+ }
+
+ case TYPE_VECTOR: {
+ if (Record.size() != 3) {
+ Error("incorrect encoding of vector type in AST file");
+ return QualType();
+ }
+
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ unsigned NumElements = Record[1];
+ unsigned VecKind = Record[2];
+ return Context.getVectorType(ElementType, NumElements,
+ (VectorType::VectorKind)VecKind);
+ }
+
+ case TYPE_EXT_VECTOR: {
+ if (Record.size() != 3) {
+ Error("incorrect encoding of extended vector type in AST file");
+ return QualType();
+ }
+
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ unsigned NumElements = Record[1];
+ return Context.getExtVectorType(ElementType, NumElements);
+ }
+
+ case TYPE_FUNCTION_NO_PROTO: {
+ if (Record.size() != 6) {
+ Error("incorrect encoding of no-proto function type");
+ return QualType();
+ }
+ QualType ResultType = readType(*Loc.F, Record, Idx);
+ FunctionType::ExtInfo Info(Record[1], Record[2], Record[3],
+ (CallingConv)Record[4], Record[5]);
+ return Context.getFunctionNoProtoType(ResultType, Info);
+ }
+
+ case TYPE_FUNCTION_PROTO: {
+ QualType ResultType = readType(*Loc.F, Record, Idx);
+
+ FunctionProtoType::ExtProtoInfo EPI;
+ EPI.ExtInfo = FunctionType::ExtInfo(/*noreturn*/ Record[1],
+ /*hasregparm*/ Record[2],
+ /*regparm*/ Record[3],
+ static_cast<CallingConv>(Record[4]),
+ /*produces*/ Record[5]);
+
+ unsigned Idx = 6;
+ unsigned NumParams = Record[Idx++];
+ SmallVector<QualType, 16> ParamTypes;
+ for (unsigned I = 0; I != NumParams; ++I)
+ ParamTypes.push_back(readType(*Loc.F, Record, Idx));
+
+ EPI.Variadic = Record[Idx++];
+ EPI.HasTrailingReturn = Record[Idx++];
+ EPI.TypeQuals = Record[Idx++];
+ EPI.RefQualifier = static_cast<RefQualifierKind>(Record[Idx++]);
+ ExceptionSpecificationType EST =
+ static_cast<ExceptionSpecificationType>(Record[Idx++]);
+ EPI.ExceptionSpecType = EST;
+ SmallVector<QualType, 2> Exceptions;
+ if (EST == EST_Dynamic) {
+ EPI.NumExceptions = Record[Idx++];
+ for (unsigned I = 0; I != EPI.NumExceptions; ++I)
+ Exceptions.push_back(readType(*Loc.F, Record, Idx));
+ EPI.Exceptions = Exceptions.data();
+ } else if (EST == EST_ComputedNoexcept) {
+ EPI.NoexceptExpr = ReadExpr(*Loc.F);
+ }
+ return Context.getFunctionType(ResultType, ParamTypes.data(), NumParams,
+ EPI);
+ }
+
+ case TYPE_UNRESOLVED_USING: {
+ unsigned Idx = 0;
+ return Context.getTypeDeclType(
+ ReadDeclAs<UnresolvedUsingTypenameDecl>(*Loc.F, Record, Idx));
+ }
+
+ case TYPE_TYPEDEF: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of typedef type");
+ return QualType();
+ }
+ unsigned Idx = 0;
+ TypedefNameDecl *Decl = ReadDeclAs<TypedefNameDecl>(*Loc.F, Record, Idx);
+ QualType Canonical = readType(*Loc.F, Record, Idx);
+ if (!Canonical.isNull())
+ Canonical = Context.getCanonicalType(Canonical);
+ return Context.getTypedefType(Decl, Canonical);
+ }
+
+ case TYPE_TYPEOF_EXPR:
+ return Context.getTypeOfExprType(ReadExpr(*Loc.F));
+
+ case TYPE_TYPEOF: {
+ if (Record.size() != 1) {
+ Error("incorrect encoding of typeof(type) in AST file");
+ return QualType();
+ }
+ QualType UnderlyingType = readType(*Loc.F, Record, Idx);
+ return Context.getTypeOfType(UnderlyingType);
+ }
+
+ case TYPE_DECLTYPE: {
+ QualType UnderlyingType = readType(*Loc.F, Record, Idx);
+ return Context.getDecltypeType(ReadExpr(*Loc.F), UnderlyingType);
+ }
+
+ case TYPE_UNARY_TRANSFORM: {
+ QualType BaseType = readType(*Loc.F, Record, Idx);
+ QualType UnderlyingType = readType(*Loc.F, Record, Idx);
+ UnaryTransformType::UTTKind UKind = (UnaryTransformType::UTTKind)Record[2];
+ return Context.getUnaryTransformType(BaseType, UnderlyingType, UKind);
+ }
+
+ case TYPE_AUTO:
+ return Context.getAutoType(readType(*Loc.F, Record, Idx));
+
+ case TYPE_RECORD: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of record type");
+ return QualType();
+ }
+ unsigned Idx = 0;
+ bool IsDependent = Record[Idx++];
+ RecordDecl *RD = ReadDeclAs<RecordDecl>(*Loc.F, Record, Idx);
+ RD = cast_or_null<RecordDecl>(RD->getCanonicalDecl());
+ QualType T = Context.getRecordType(RD);
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
+ return T;
+ }
+
+ case TYPE_ENUM: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of enum type");
+ return QualType();
+ }
+ unsigned Idx = 0;
+ bool IsDependent = Record[Idx++];
+ QualType T
+ = Context.getEnumType(ReadDeclAs<EnumDecl>(*Loc.F, Record, Idx));
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
+ return T;
+ }
+
+ case TYPE_ATTRIBUTED: {
+ if (Record.size() != 3) {
+ Error("incorrect encoding of attributed type");
+ return QualType();
+ }
+ QualType modifiedType = readType(*Loc.F, Record, Idx);
+ QualType equivalentType = readType(*Loc.F, Record, Idx);
+ AttributedType::Kind kind = static_cast<AttributedType::Kind>(Record[2]);
+ return Context.getAttributedType(kind, modifiedType, equivalentType);
+ }
+
+ case TYPE_PAREN: {
+ if (Record.size() != 1) {
+ Error("incorrect encoding of paren type");
+ return QualType();
+ }
+ QualType InnerType = readType(*Loc.F, Record, Idx);
+ return Context.getParenType(InnerType);
+ }
+
+ case TYPE_PACK_EXPANSION: {
+ if (Record.size() != 2) {
+ Error("incorrect encoding of pack expansion type");
+ return QualType();
+ }
+ QualType Pattern = readType(*Loc.F, Record, Idx);
+ if (Pattern.isNull())
+ return QualType();
+ llvm::Optional<unsigned> NumExpansions;
+ if (Record[1])
+ NumExpansions = Record[1] - 1;
+ return Context.getPackExpansionType(Pattern, NumExpansions);
+ }
+
+ case TYPE_ELABORATED: {
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
+ QualType NamedType = readType(*Loc.F, Record, Idx);
+ return Context.getElaboratedType(Keyword, NNS, NamedType);
+ }
+
+ case TYPE_OBJC_INTERFACE: {
+ unsigned Idx = 0;
+ ObjCInterfaceDecl *ItfD
+ = ReadDeclAs<ObjCInterfaceDecl>(*Loc.F, Record, Idx);
+ return Context.getObjCInterfaceType(ItfD->getCanonicalDecl());
+ }
+
+ case TYPE_OBJC_OBJECT: {
+ unsigned Idx = 0;
+ QualType Base = readType(*Loc.F, Record, Idx);
+ unsigned NumProtos = Record[Idx++];
+ SmallVector<ObjCProtocolDecl*, 4> Protos;
+ for (unsigned I = 0; I != NumProtos; ++I)
+ Protos.push_back(ReadDeclAs<ObjCProtocolDecl>(*Loc.F, Record, Idx));
+ return Context.getObjCObjectType(Base, Protos.data(), NumProtos);
+ }
+
+ case TYPE_OBJC_OBJECT_POINTER: {
+ unsigned Idx = 0;
+ QualType Pointee = readType(*Loc.F, Record, Idx);
+ return Context.getObjCObjectPointerType(Pointee);
+ }
+
+ case TYPE_SUBST_TEMPLATE_TYPE_PARM: {
+ unsigned Idx = 0;
+ QualType Parm = readType(*Loc.F, Record, Idx);
+ QualType Replacement = readType(*Loc.F, Record, Idx);
+ return
+ Context.getSubstTemplateTypeParmType(cast<TemplateTypeParmType>(Parm),
+ Replacement);
+ }
+
+ case TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK: {
+ unsigned Idx = 0;
+ QualType Parm = readType(*Loc.F, Record, Idx);
+ TemplateArgument ArgPack = ReadTemplateArgument(*Loc.F, Record, Idx);
+ return Context.getSubstTemplateTypeParmPackType(
+ cast<TemplateTypeParmType>(Parm),
+ ArgPack);
+ }
+
+ case TYPE_INJECTED_CLASS_NAME: {
+ CXXRecordDecl *D = ReadDeclAs<CXXRecordDecl>(*Loc.F, Record, Idx);
+ QualType TST = readType(*Loc.F, Record, Idx); // probably derivable
+ // FIXME: ASTContext::getInjectedClassNameType is not currently suitable
+ // for AST reading, too much interdependencies.
+ return
+ QualType(new (Context, TypeAlignment) InjectedClassNameType(D, TST), 0);
+ }
+
+ case TYPE_TEMPLATE_TYPE_PARM: {
+ unsigned Idx = 0;
+ unsigned Depth = Record[Idx++];
+ unsigned Index = Record[Idx++];
+ bool Pack = Record[Idx++];
+ TemplateTypeParmDecl *D
+ = ReadDeclAs<TemplateTypeParmDecl>(*Loc.F, Record, Idx);
+ return Context.getTemplateTypeParmType(Depth, Index, Pack, D);
+ }
+
+ case TYPE_DEPENDENT_NAME: {
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
+ const IdentifierInfo *Name = this->GetIdentifierInfo(*Loc.F, Record, Idx);
+ QualType Canon = readType(*Loc.F, Record, Idx);
+ if (!Canon.isNull())
+ Canon = Context.getCanonicalType(Canon);
+ return Context.getDependentNameType(Keyword, NNS, Name, Canon);
+ }
+
+ case TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION: {
+ unsigned Idx = 0;
+ ElaboratedTypeKeyword Keyword = (ElaboratedTypeKeyword)Record[Idx++];
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(*Loc.F, Record, Idx);
+ const IdentifierInfo *Name = this->GetIdentifierInfo(*Loc.F, Record, Idx);
+ unsigned NumArgs = Record[Idx++];
+ SmallVector<TemplateArgument, 8> Args;
+ Args.reserve(NumArgs);
+ while (NumArgs--)
+ Args.push_back(ReadTemplateArgument(*Loc.F, Record, Idx));
+ return Context.getDependentTemplateSpecializationType(Keyword, NNS, Name,
+ Args.size(), Args.data());
+ }
+
+ case TYPE_DEPENDENT_SIZED_ARRAY: {
+ unsigned Idx = 0;
+
+ // ArrayType
+ QualType ElementType = readType(*Loc.F, Record, Idx);
+ ArrayType::ArraySizeModifier ASM
+ = (ArrayType::ArraySizeModifier)Record[Idx++];
+ unsigned IndexTypeQuals = Record[Idx++];
+
+ // DependentSizedArrayType
+ Expr *NumElts = ReadExpr(*Loc.F);
+ SourceRange Brackets = ReadSourceRange(*Loc.F, Record, Idx);
+
+ return Context.getDependentSizedArrayType(ElementType, NumElts, ASM,
+ IndexTypeQuals, Brackets);
+ }
+
+ case TYPE_TEMPLATE_SPECIALIZATION: {
+ unsigned Idx = 0;
+ bool IsDependent = Record[Idx++];
+ TemplateName Name = ReadTemplateName(*Loc.F, Record, Idx);
+ SmallVector<TemplateArgument, 8> Args;
+ ReadTemplateArgumentList(Args, *Loc.F, Record, Idx);
+ QualType Underlying = readType(*Loc.F, Record, Idx);
+ QualType T;
+ if (Underlying.isNull())
+ T = Context.getCanonicalTemplateSpecializationType(Name, Args.data(),
+ Args.size());
+ else
+ T = Context.getTemplateSpecializationType(Name, Args.data(),
+ Args.size(), Underlying);
+ const_cast<Type*>(T.getTypePtr())->setDependent(IsDependent);
+ return T;
+ }
+
+ case TYPE_ATOMIC: {
+ if (Record.size() != 1) {
+ Error("Incorrect encoding of atomic type");
+ return QualType();
+ }
+ QualType ValueType = readType(*Loc.F, Record, Idx);
+ return Context.getAtomicType(ValueType);
+ }
+ }
+ llvm_unreachable("Invalid TypeCode!");
+}
+
+class clang::TypeLocReader : public TypeLocVisitor<TypeLocReader> {
+ ASTReader &Reader;
+ ModuleFile &F;
+ llvm::BitstreamCursor &DeclsCursor;
+ const ASTReader::RecordData &Record;
+ unsigned &Idx;
+
+ SourceLocation ReadSourceLocation(const ASTReader::RecordData &R,
+ unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+
+ template<typename T>
+ T *ReadDeclAs(const ASTReader::RecordData &Record, unsigned &Idx) {
+ return Reader.ReadDeclAs<T>(F, Record, Idx);
+ }
+
+public:
+ TypeLocReader(ASTReader &Reader, ModuleFile &F,
+ const ASTReader::RecordData &Record, unsigned &Idx)
+ : Reader(Reader), F(F), DeclsCursor(F.DeclsCursor), Record(Record), Idx(Idx)
+ { }
+
+ // We want compile-time assurance that we've enumerated all of
+ // these, so unfortunately we have to declare them first, then
+ // define them out-of-line.
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ void Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc);
+#include "clang/AST/TypeLocNodes.def"
+
+ void VisitFunctionTypeLoc(FunctionTypeLoc);
+ void VisitArrayTypeLoc(ArrayTypeLoc);
+};
+
+void TypeLocReader::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ // nothing to do
+}
+void TypeLocReader::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
+ TL.setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ if (TL.needsExtraLocalData()) {
+ TL.setWrittenTypeSpec(static_cast<DeclSpec::TST>(Record[Idx++]));
+ TL.setWrittenSignSpec(static_cast<DeclSpec::TSS>(Record[Idx++]));
+ TL.setWrittenWidthSpec(static_cast<DeclSpec::TSW>(Record[Idx++]));
+ TL.setModeAttr(Record[Idx++]);
+ }
+}
+void TypeLocReader::VisitComplexTypeLoc(ComplexTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitPointerTypeLoc(PointerTypeLoc TL) {
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
+ TL.setCaretLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
+ TL.setAmpLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
+ TL.setAmpAmpLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
+ TL.setClassTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+}
+void TypeLocReader::VisitArrayTypeLoc(ArrayTypeLoc TL) {
+ TL.setLBracketLoc(ReadSourceLocation(Record, Idx));
+ TL.setRBracketLoc(ReadSourceLocation(Record, Idx));
+ if (Record[Idx++])
+ TL.setSizeExpr(Reader.ReadExpr(F));
+ else
+ TL.setSizeExpr(0);
+}
+void TypeLocReader::VisitConstantArrayTypeLoc(ConstantArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocReader::VisitIncompleteArrayTypeLoc(IncompleteArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocReader::VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocReader::VisitDependentSizedArrayTypeLoc(
+ DependentSizedArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocReader::VisitDependentSizedExtVectorTypeLoc(
+ DependentSizedExtVectorTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitVectorTypeLoc(VectorTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
+ TL.setLocalRangeBegin(ReadSourceLocation(Record, Idx));
+ TL.setLocalRangeEnd(ReadSourceLocation(Record, Idx));
+ TL.setTrailingReturn(Record[Idx++]);
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i) {
+ TL.setArg(i, ReadDeclAs<ParmVarDecl>(Record, Idx));
+ }
+}
+void TypeLocReader::VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc TL) {
+ VisitFunctionTypeLoc(TL);
+}
+void TypeLocReader::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
+ VisitFunctionTypeLoc(TL);
+}
+void TypeLocReader::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
+ TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
+ TL.setTypeofLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+}
+void TypeLocReader::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
+ TL.setKWLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setUnderlyingTInfo(Reader.GetTypeSourceInfo(F, Record, Idx));
+}
+void TypeLocReader::VisitAutoTypeLoc(AutoTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitRecordTypeLoc(RecordTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitEnumTypeLoc(EnumTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ TL.setAttrNameLoc(ReadSourceLocation(Record, Idx));
+ if (TL.hasAttrOperand()) {
+ SourceRange range;
+ range.setBegin(ReadSourceLocation(Record, Idx));
+ range.setEnd(ReadSourceLocation(Record, Idx));
+ TL.setAttrOperandParensRange(range);
+ }
+ if (TL.hasAttrExprOperand()) {
+ if (Record[Idx++])
+ TL.setAttrExprOperand(Reader.ReadExpr(F));
+ else
+ TL.setAttrExprOperand(0);
+ } else if (TL.hasAttrEnumOperand())
+ TL.setAttrEnumOperandLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitSubstTemplateTypeParmTypeLoc(
+ SubstTemplateTypeParmTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitSubstTemplateTypeParmPackTypeLoc(
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitTemplateSpecializationTypeLoc(
+ TemplateSpecializationTypeLoc TL) {
+ TL.setTemplateKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ TL.setArgLocInfo(i,
+ Reader.GetTemplateArgumentLocInfo(F,
+ TL.getTypePtr()->getArg(i).getKind(),
+ Record, Idx));
+}
+void TypeLocReader::VisitParenTypeLoc(ParenTypeLoc TL) {
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+}
+void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ TL.setElaboratedKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setQualifierLoc(Reader.ReadNestedNameSpecifierLoc(F, Record, Idx));
+ TL.setTemplateKeywordLoc(ReadSourceLocation(Record, Idx));
+ TL.setTemplateNameLoc(ReadSourceLocation(Record, Idx));
+ TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ TL.setArgLocInfo(I,
+ Reader.GetTemplateArgumentLocInfo(F,
+ TL.getTypePtr()->getArg(I).getKind(),
+ Record, Idx));
+}
+void TypeLocReader::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
+ TL.setEllipsisLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
+ TL.setNameLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
+ TL.setHasBaseTypeAsWritten(Record[Idx++]);
+ TL.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TL.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
+ TL.setProtocolLoc(i, ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
+ TL.setStarLoc(ReadSourceLocation(Record, Idx));
+}
+void TypeLocReader::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
+ TL.setKWLoc(ReadSourceLocation(Record, Idx));
+ TL.setLParenLoc(ReadSourceLocation(Record, Idx));
+ TL.setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+TypeSourceInfo *ASTReader::GetTypeSourceInfo(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx) {
+ QualType InfoTy = readType(F, Record, Idx);
+ if (InfoTy.isNull())
+ return 0;
+
+ TypeSourceInfo *TInfo = getContext().CreateTypeSourceInfo(InfoTy);
+ TypeLocReader TLR(*this, F, Record, Idx);
+ for (TypeLoc TL = TInfo->getTypeLoc(); !TL.isNull(); TL = TL.getNextTypeLoc())
+ TLR.Visit(TL);
+ return TInfo;
+}
+
+QualType ASTReader::GetType(TypeID ID) {
+ unsigned FastQuals = ID & Qualifiers::FastMask;
+ unsigned Index = ID >> Qualifiers::FastWidth;
+
+ if (Index < NUM_PREDEF_TYPE_IDS) {
+ QualType T;
+ switch ((PredefinedTypeIDs)Index) {
+ case PREDEF_TYPE_NULL_ID: return QualType();
+ case PREDEF_TYPE_VOID_ID: T = Context.VoidTy; break;
+ case PREDEF_TYPE_BOOL_ID: T = Context.BoolTy; break;
+
+ case PREDEF_TYPE_CHAR_U_ID:
+ case PREDEF_TYPE_CHAR_S_ID:
+ // FIXME: Check that the signedness of CharTy is correct!
+ T = Context.CharTy;
+ break;
+
+ case PREDEF_TYPE_UCHAR_ID: T = Context.UnsignedCharTy; break;
+ case PREDEF_TYPE_USHORT_ID: T = Context.UnsignedShortTy; break;
+ case PREDEF_TYPE_UINT_ID: T = Context.UnsignedIntTy; break;
+ case PREDEF_TYPE_ULONG_ID: T = Context.UnsignedLongTy; break;
+ case PREDEF_TYPE_ULONGLONG_ID: T = Context.UnsignedLongLongTy; break;
+ case PREDEF_TYPE_UINT128_ID: T = Context.UnsignedInt128Ty; break;
+ case PREDEF_TYPE_SCHAR_ID: T = Context.SignedCharTy; break;
+ case PREDEF_TYPE_WCHAR_ID: T = Context.WCharTy; break;
+ case PREDEF_TYPE_SHORT_ID: T = Context.ShortTy; break;
+ case PREDEF_TYPE_INT_ID: T = Context.IntTy; break;
+ case PREDEF_TYPE_LONG_ID: T = Context.LongTy; break;
+ case PREDEF_TYPE_LONGLONG_ID: T = Context.LongLongTy; break;
+ case PREDEF_TYPE_INT128_ID: T = Context.Int128Ty; break;
+ case PREDEF_TYPE_HALF_ID: T = Context.HalfTy; break;
+ case PREDEF_TYPE_FLOAT_ID: T = Context.FloatTy; break;
+ case PREDEF_TYPE_DOUBLE_ID: T = Context.DoubleTy; break;
+ case PREDEF_TYPE_LONGDOUBLE_ID: T = Context.LongDoubleTy; break;
+ case PREDEF_TYPE_OVERLOAD_ID: T = Context.OverloadTy; break;
+ case PREDEF_TYPE_BOUND_MEMBER: T = Context.BoundMemberTy; break;
+ case PREDEF_TYPE_PSEUDO_OBJECT: T = Context.PseudoObjectTy; break;
+ case PREDEF_TYPE_DEPENDENT_ID: T = Context.DependentTy; break;
+ case PREDEF_TYPE_UNKNOWN_ANY: T = Context.UnknownAnyTy; break;
+ case PREDEF_TYPE_NULLPTR_ID: T = Context.NullPtrTy; break;
+ case PREDEF_TYPE_CHAR16_ID: T = Context.Char16Ty; break;
+ case PREDEF_TYPE_CHAR32_ID: T = Context.Char32Ty; break;
+ case PREDEF_TYPE_OBJC_ID: T = Context.ObjCBuiltinIdTy; break;
+ case PREDEF_TYPE_OBJC_CLASS: T = Context.ObjCBuiltinClassTy; break;
+ case PREDEF_TYPE_OBJC_SEL: T = Context.ObjCBuiltinSelTy; break;
+ case PREDEF_TYPE_AUTO_DEDUCT: T = Context.getAutoDeductType(); break;
+
+ case PREDEF_TYPE_AUTO_RREF_DEDUCT:
+ T = Context.getAutoRRefDeductType();
+ break;
+
+ case PREDEF_TYPE_ARC_UNBRIDGED_CAST:
+ T = Context.ARCUnbridgedCastTy;
+ break;
+
+ }
+
+ assert(!T.isNull() && "Unknown predefined type");
+ return T.withFastQualifiers(FastQuals);
+ }
+
+ Index -= NUM_PREDEF_TYPE_IDS;
+ assert(Index < TypesLoaded.size() && "Type index out-of-range");
+ if (TypesLoaded[Index].isNull()) {
+ TypesLoaded[Index] = readTypeRecord(Index);
+ if (TypesLoaded[Index].isNull())
+ return QualType();
+
+ TypesLoaded[Index]->setFromAST();
+ if (DeserializationListener)
+ DeserializationListener->TypeRead(TypeIdx::fromTypeID(ID),
+ TypesLoaded[Index]);
+ }
+
+ return TypesLoaded[Index].withFastQualifiers(FastQuals);
+}
+
+QualType ASTReader::getLocalType(ModuleFile &F, unsigned LocalID) {
+ return GetType(getGlobalTypeID(F, LocalID));
+}
+
+serialization::TypeID
+ASTReader::getGlobalTypeID(ModuleFile &F, unsigned LocalID) const {
+ unsigned FastQuals = LocalID & Qualifiers::FastMask;
+ unsigned LocalIndex = LocalID >> Qualifiers::FastWidth;
+
+ if (LocalIndex < NUM_PREDEF_TYPE_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = F.TypeRemap.find(LocalIndex - NUM_PREDEF_TYPE_IDS);
+ assert(I != F.TypeRemap.end() && "Invalid index into type index remap");
+
+ unsigned GlobalIndex = LocalIndex + I->second;
+ return (GlobalIndex << Qualifiers::FastWidth) | FastQuals;
+}
+
+TemplateArgumentLocInfo
+ASTReader::GetTemplateArgumentLocInfo(ModuleFile &F,
+ TemplateArgument::ArgKind Kind,
+ const RecordData &Record,
+ unsigned &Index) {
+ switch (Kind) {
+ case TemplateArgument::Expression:
+ return ReadExpr(F);
+ case TemplateArgument::Type:
+ return GetTypeSourceInfo(F, Record, Index);
+ case TemplateArgument::Template: {
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
+ Index);
+ SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
+ return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
+ SourceLocation());
+ }
+ case TemplateArgument::TemplateExpansion: {
+ NestedNameSpecifierLoc QualifierLoc = ReadNestedNameSpecifierLoc(F, Record,
+ Index);
+ SourceLocation TemplateNameLoc = ReadSourceLocation(F, Record, Index);
+ SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Index);
+ return TemplateArgumentLocInfo(QualifierLoc, TemplateNameLoc,
+ EllipsisLoc);
+ }
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Pack:
+ return TemplateArgumentLocInfo();
+ }
+ llvm_unreachable("unexpected template argument loc");
+}
+
+TemplateArgumentLoc
+ASTReader::ReadTemplateArgumentLoc(ModuleFile &F,
+ const RecordData &Record, unsigned &Index) {
+ TemplateArgument Arg = ReadTemplateArgument(F, Record, Index);
+
+ if (Arg.getKind() == TemplateArgument::Expression) {
+ if (Record[Index++]) // bool InfoHasSameExpr.
+ return TemplateArgumentLoc(Arg, TemplateArgumentLocInfo(Arg.getAsExpr()));
+ }
+ return TemplateArgumentLoc(Arg, GetTemplateArgumentLocInfo(F, Arg.getKind(),
+ Record, Index));
+}
+
+Decl *ASTReader::GetExternalDecl(uint32_t ID) {
+ return GetDecl(ID);
+}
+
+uint64_t ASTReader::readCXXBaseSpecifiers(ModuleFile &M, const RecordData &Record,
+ unsigned &Idx){
+ if (Idx >= Record.size())
+ return 0;
+
+ unsigned LocalID = Record[Idx++];
+ return getGlobalBitOffset(M, M.CXXBaseSpecifiersOffsets[LocalID - 1]);
+}
+
+CXXBaseSpecifier *ASTReader::GetExternalCXXBaseSpecifiers(uint64_t Offset) {
+ RecordLocation Loc = getLocalBitOffset(Offset);
+ llvm::BitstreamCursor &Cursor = Loc.F->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Loc.Offset);
+ ReadingKindTracker ReadingKind(Read_Decl, *this);
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.ReadRecord(Code, Record);
+ if (RecCode != DECL_CXX_BASE_SPECIFIERS) {
+ Error("Malformed AST file: missing C++ base specifiers");
+ return 0;
+ }
+
+ unsigned Idx = 0;
+ unsigned NumBases = Record[Idx++];
+ void *Mem = Context.Allocate(sizeof(CXXBaseSpecifier) * NumBases);
+ CXXBaseSpecifier *Bases = new (Mem) CXXBaseSpecifier [NumBases];
+ for (unsigned I = 0; I != NumBases; ++I)
+ Bases[I] = ReadCXXBaseSpecifier(*Loc.F, Record, Idx);
+ return Bases;
+}
+
+serialization::DeclID
+ASTReader::getGlobalDeclID(ModuleFile &F, unsigned LocalID) const {
+ if (LocalID < NUM_PREDEF_DECL_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = F.DeclRemap.find(LocalID - NUM_PREDEF_DECL_IDS);
+ assert(I != F.DeclRemap.end() && "Invalid index into decl index remap");
+
+ return LocalID + I->second;
+}
+
+bool ASTReader::isDeclIDFromModule(serialization::GlobalDeclID ID,
+ ModuleFile &M) const {
+ GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(ID);
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ return &M == I->second;
+}
+
+ModuleFile *ASTReader::getOwningModuleFile(Decl *D) {
+ if (!D->isFromASTFile())
+ return 0;
+ GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(D->getGlobalID());
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ return I->second;
+}
+
+SourceLocation ASTReader::getSourceLocationForDeclID(GlobalDeclID ID) {
+ if (ID < NUM_PREDEF_DECL_IDS)
+ return SourceLocation();
+
+ unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+
+ if (Index > DeclsLoaded.size()) {
+ Error("declaration ID out-of-range for AST file");
+ return SourceLocation();
+ }
+
+ if (Decl *D = DeclsLoaded[Index])
+ return D->getLocation();
+
+ unsigned RawLocation = 0;
+ RecordLocation Rec = DeclCursorForID(ID, RawLocation);
+ return ReadSourceLocation(*Rec.F, RawLocation);
+}
+
+Decl *ASTReader::GetDecl(DeclID ID) {
+ if (ID < NUM_PREDEF_DECL_IDS) {
+ switch ((PredefinedDeclIDs)ID) {
+ case PREDEF_DECL_NULL_ID:
+ return 0;
+
+ case PREDEF_DECL_TRANSLATION_UNIT_ID:
+ return Context.getTranslationUnitDecl();
+
+ case PREDEF_DECL_OBJC_ID_ID:
+ return Context.getObjCIdDecl();
+
+ case PREDEF_DECL_OBJC_SEL_ID:
+ return Context.getObjCSelDecl();
+
+ case PREDEF_DECL_OBJC_CLASS_ID:
+ return Context.getObjCClassDecl();
+
+ case PREDEF_DECL_OBJC_PROTOCOL_ID:
+ return Context.getObjCProtocolDecl();
+
+ case PREDEF_DECL_INT_128_ID:
+ return Context.getInt128Decl();
+
+ case PREDEF_DECL_UNSIGNED_INT_128_ID:
+ return Context.getUInt128Decl();
+
+ case PREDEF_DECL_OBJC_INSTANCETYPE_ID:
+ return Context.getObjCInstanceTypeDecl();
+ }
+ }
+
+ unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+
+ if (Index >= DeclsLoaded.size()) {
+ Error("declaration ID out-of-range for AST file");
+ }
+
+ if (!DeclsLoaded[Index]) {
+ ReadDeclRecord(ID);
+ if (DeserializationListener)
+ DeserializationListener->DeclRead(ID, DeclsLoaded[Index]);
+ }
+
+ return DeclsLoaded[Index];
+}
+
+DeclID ASTReader::mapGlobalIDToModuleFileGlobalID(ModuleFile &M,
+ DeclID GlobalID) {
+ if (GlobalID < NUM_PREDEF_DECL_IDS)
+ return GlobalID;
+
+ GlobalDeclMapType::const_iterator I = GlobalDeclMap.find(GlobalID);
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ ModuleFile *Owner = I->second;
+
+ llvm::DenseMap<ModuleFile *, serialization::DeclID>::iterator Pos
+ = M.GlobalToLocalDeclIDs.find(Owner);
+ if (Pos == M.GlobalToLocalDeclIDs.end())
+ return 0;
+
+ return GlobalID - Owner->BaseDeclID + Pos->second;
+}
+
+serialization::DeclID ASTReader::ReadDeclID(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx) {
+ if (Idx >= Record.size()) {
+ Error("Corrupted AST file");
+ return 0;
+ }
+
+ return getGlobalDeclID(F, Record[Idx++]);
+}
+
+/// \brief Resolve the offset of a statement into a statement.
+///
+/// This operation will read a new statement from the external
+/// source each time it is called, and is meant to be used via a
+/// LazyOffsetPtr (which is used by Decls for the body of functions, etc).
+Stmt *ASTReader::GetExternalDeclStmt(uint64_t Offset) {
+ // Switch case IDs are per Decl.
+ ClearSwitchCaseIDs();
+
+ // Offset here is a global offset across the entire chain.
+ RecordLocation Loc = getLocalBitOffset(Offset);
+ Loc.F->DeclsCursor.JumpToBit(Loc.Offset);
+ return ReadStmtFromStream(*Loc.F);
+}
+
+namespace {
+ class FindExternalLexicalDeclsVisitor {
+ ASTReader &Reader;
+ const DeclContext *DC;
+ bool (*isKindWeWant)(Decl::Kind);
+
+ SmallVectorImpl<Decl*> &Decls;
+ bool PredefsVisited[NUM_PREDEF_DECL_IDS];
+
+ public:
+ FindExternalLexicalDeclsVisitor(ASTReader &Reader, const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Decls)
+ : Reader(Reader), DC(DC), isKindWeWant(isKindWeWant), Decls(Decls)
+ {
+ for (unsigned I = 0; I != NUM_PREDEF_DECL_IDS; ++I)
+ PredefsVisited[I] = false;
+ }
+
+ static bool visit(ModuleFile &M, bool Preorder, void *UserData) {
+ if (Preorder)
+ return false;
+
+ FindExternalLexicalDeclsVisitor *This
+ = static_cast<FindExternalLexicalDeclsVisitor *>(UserData);
+
+ ModuleFile::DeclContextInfosMap::iterator Info
+ = M.DeclContextInfos.find(This->DC);
+ if (Info == M.DeclContextInfos.end() || !Info->second.LexicalDecls)
+ return false;
+
+ // Load all of the declaration IDs
+ for (const KindDeclIDPair *ID = Info->second.LexicalDecls,
+ *IDE = ID + Info->second.NumLexicalDecls;
+ ID != IDE; ++ID) {
+ if (This->isKindWeWant && !This->isKindWeWant((Decl::Kind)ID->first))
+ continue;
+
+ // Don't add predefined declarations to the lexical context more
+ // than once.
+ if (ID->second < NUM_PREDEF_DECL_IDS) {
+ if (This->PredefsVisited[ID->second])
+ continue;
+
+ This->PredefsVisited[ID->second] = true;
+ }
+
+ if (Decl *D = This->Reader.GetLocalDecl(M, ID->second)) {
+ if (!This->DC->isDeclInLexicalTraversal(D))
+ This->Decls.push_back(D);
+ }
+ }
+
+ return false;
+ }
+ };
+}
+
+ExternalLoadResult ASTReader::FindExternalLexicalDecls(const DeclContext *DC,
+ bool (*isKindWeWant)(Decl::Kind),
+ SmallVectorImpl<Decl*> &Decls) {
+ // There might be lexical decls in multiple modules, for the TU at
+ // least. Walk all of the modules in the order they were loaded.
+ FindExternalLexicalDeclsVisitor Visitor(*this, DC, isKindWeWant, Decls);
+ ModuleMgr.visitDepthFirst(&FindExternalLexicalDeclsVisitor::visit, &Visitor);
+ ++NumLexicalDeclContextsRead;
+ return ELR_Success;
+}
+
+namespace {
+
+class DeclIDComp {
+ ASTReader &Reader;
+ ModuleFile &Mod;
+
+public:
+ DeclIDComp(ASTReader &Reader, ModuleFile &M) : Reader(Reader), Mod(M) {}
+
+ bool operator()(LocalDeclID L, LocalDeclID R) const {
+ SourceLocation LHS = getLocation(L);
+ SourceLocation RHS = getLocation(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(SourceLocation LHS, LocalDeclID R) const {
+ SourceLocation RHS = getLocation(R);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ bool operator()(LocalDeclID L, SourceLocation RHS) const {
+ SourceLocation LHS = getLocation(L);
+ return Reader.getSourceManager().isBeforeInTranslationUnit(LHS, RHS);
+ }
+
+ SourceLocation getLocation(LocalDeclID ID) const {
+ return Reader.getSourceManager().getFileLoc(
+ Reader.getSourceLocationForDeclID(Reader.getGlobalDeclID(Mod, ID)));
+ }
+};
+
+}
+
+void ASTReader::FindFileRegionDecls(FileID File,
+ unsigned Offset, unsigned Length,
+ SmallVectorImpl<Decl *> &Decls) {
+ SourceManager &SM = getSourceManager();
+
+ llvm::DenseMap<FileID, FileDeclsInfo>::iterator I = FileDeclIDs.find(File);
+ if (I == FileDeclIDs.end())
+ return;
+
+ FileDeclsInfo &DInfo = I->second;
+ if (DInfo.Decls.empty())
+ return;
+
+ SourceLocation
+ BeginLoc = SM.getLocForStartOfFile(File).getLocWithOffset(Offset);
+ SourceLocation EndLoc = BeginLoc.getLocWithOffset(Length);
+
+ DeclIDComp DIDComp(*this, *DInfo.Mod);
+ ArrayRef<serialization::LocalDeclID>::iterator
+ BeginIt = std::lower_bound(DInfo.Decls.begin(), DInfo.Decls.end(),
+ BeginLoc, DIDComp);
+ if (BeginIt != DInfo.Decls.begin())
+ --BeginIt;
+
+ // If we are pointing at a top-level decl inside an objc container, we need
+ // to backtrack until we find it otherwise we will fail to report that the
+ // region overlaps with an objc container.
+ while (BeginIt != DInfo.Decls.begin() &&
+ GetDecl(getGlobalDeclID(*DInfo.Mod, *BeginIt))
+ ->isTopLevelDeclInObjCContainer())
+ --BeginIt;
+
+ ArrayRef<serialization::LocalDeclID>::iterator
+ EndIt = std::upper_bound(DInfo.Decls.begin(), DInfo.Decls.end(),
+ EndLoc, DIDComp);
+ if (EndIt != DInfo.Decls.end())
+ ++EndIt;
+
+ for (ArrayRef<serialization::LocalDeclID>::iterator
+ DIt = BeginIt; DIt != EndIt; ++DIt)
+ Decls.push_back(GetDecl(getGlobalDeclID(*DInfo.Mod, *DIt)));
+}
+
+namespace {
+ /// \brief ModuleFile visitor used to perform name lookup into a
+ /// declaration context.
+ class DeclContextNameLookupVisitor {
+ ASTReader &Reader;
+ llvm::SmallVectorImpl<const DeclContext *> &Contexts;
+ const DeclContext *DC;
+ DeclarationName Name;
+ SmallVectorImpl<NamedDecl *> &Decls;
+
+ public:
+ DeclContextNameLookupVisitor(ASTReader &Reader,
+ SmallVectorImpl<const DeclContext *> &Contexts,
+ DeclarationName Name,
+ SmallVectorImpl<NamedDecl *> &Decls)
+ : Reader(Reader), Contexts(Contexts), Name(Name), Decls(Decls) { }
+
+ static bool visit(ModuleFile &M, void *UserData) {
+ DeclContextNameLookupVisitor *This
+ = static_cast<DeclContextNameLookupVisitor *>(UserData);
+
+ // Check whether we have any visible declaration information for
+ // this context in this module.
+ ModuleFile::DeclContextInfosMap::iterator Info;
+ bool FoundInfo = false;
+ for (unsigned I = 0, N = This->Contexts.size(); I != N; ++I) {
+ Info = M.DeclContextInfos.find(This->Contexts[I]);
+ if (Info != M.DeclContextInfos.end() &&
+ Info->second.NameLookupTableData) {
+ FoundInfo = true;
+ break;
+ }
+ }
+
+ if (!FoundInfo)
+ return false;
+
+ // Look for this name within this module.
+ ASTDeclContextNameLookupTable *LookupTable =
+ (ASTDeclContextNameLookupTable*)Info->second.NameLookupTableData;
+ ASTDeclContextNameLookupTable::iterator Pos
+ = LookupTable->find(This->Name);
+ if (Pos == LookupTable->end())
+ return false;
+
+ bool FoundAnything = false;
+ ASTDeclContextNameLookupTrait::data_type Data = *Pos;
+ for (; Data.first != Data.second; ++Data.first) {
+ NamedDecl *ND = This->Reader.GetLocalDeclAs<NamedDecl>(M, *Data.first);
+ if (!ND)
+ continue;
+
+ if (ND->getDeclName() != This->Name) {
+ assert(!This->Name.getCXXNameType().isNull() &&
+ "Name mismatch without a type");
+ continue;
+ }
+
+ // Record this declaration.
+ FoundAnything = true;
+ This->Decls.push_back(ND);
+ }
+
+ return FoundAnything;
+ }
+ };
+}
+
+DeclContext::lookup_result
+ASTReader::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+ assert(DC->hasExternalVisibleStorage() &&
+ "DeclContext has no visible decls in storage");
+ if (!Name)
+ return DeclContext::lookup_result(DeclContext::lookup_iterator(0),
+ DeclContext::lookup_iterator(0));
+
+ SmallVector<NamedDecl *, 64> Decls;
+
+ // Compute the declaration contexts we need to look into. Multiple such
+ // declaration contexts occur when two declaration contexts from disjoint
+ // modules get merged, e.g., when two namespaces with the same name are
+ // independently defined in separate modules.
+ SmallVector<const DeclContext *, 2> Contexts;
+ Contexts.push_back(DC);
+
+ if (DC->isNamespace()) {
+ MergedDeclsMap::iterator Merged
+ = MergedDecls.find(const_cast<Decl *>(cast<Decl>(DC)));
+ if (Merged != MergedDecls.end()) {
+ for (unsigned I = 0, N = Merged->second.size(); I != N; ++I)
+ Contexts.push_back(cast<DeclContext>(GetDecl(Merged->second[I])));
+ }
+ }
+
+ DeclContextNameLookupVisitor Visitor(*this, Contexts, Name, Decls);
+ ModuleMgr.visit(&DeclContextNameLookupVisitor::visit, &Visitor);
+ ++NumVisibleDeclContextsRead;
+ SetExternalVisibleDeclsForName(DC, Name, Decls);
+ return const_cast<DeclContext*>(DC)->lookup(Name);
+}
+
+namespace {
+ /// \brief ModuleFile visitor used to complete the visible decls map of a
+ /// declaration context.
+ class DeclContextVisibleDeclMapVisitor {
+ ASTReader &Reader;
+ DeclContext *DC;
+
+ public:
+ DeclContextVisibleDeclMapVisitor(ASTReader &Reader, DeclContext *DC)
+ : Reader(Reader), DC(DC) { }
+
+ static bool visit(ModuleFile &M, void *UserData) {
+ return static_cast<DeclContextVisibleDeclMapVisitor*>(UserData)->visit(M);
+ }
+
+ bool visit(ModuleFile &M) {
+ // Check whether we have any visible declaration information for
+ // this context in this module.
+ ModuleFile::DeclContextInfosMap::iterator
+ Info = M.DeclContextInfos.find(DC);
+ if (Info == M.DeclContextInfos.end() ||
+ !Info->second.NameLookupTableData)
+ return false;
+
+ // Look for this name within this module.
+ ASTDeclContextNameLookupTable *LookupTable =
+ (ASTDeclContextNameLookupTable*)Info->second.NameLookupTableData;
+ for (ASTDeclContextNameLookupTable::key_iterator
+ I = LookupTable->key_begin(),
+ E = LookupTable->key_end(); I != E; ++I) {
+ DC->lookup(*I); // Force loading of the visible decls for the decl name.
+ }
+
+ return false;
+ }
+ };
+}
+
+void ASTReader::completeVisibleDeclsMap(DeclContext *DC) {
+ if (!DC->hasExternalVisibleStorage())
+ return;
+ DeclContextVisibleDeclMapVisitor Visitor(*this, DC);
+ ModuleMgr.visit(&DeclContextVisibleDeclMapVisitor::visit, &Visitor);
+}
+
+/// \brief Under non-PCH compilation the consumer receives the objc methods
+/// before receiving the implementation, and codegen depends on this.
+/// We simulate this by deserializing and passing to consumer the methods of the
+/// implementation before passing the deserialized implementation decl.
+static void PassObjCImplDeclToConsumer(ObjCImplDecl *ImplD,
+ ASTConsumer *Consumer) {
+ assert(ImplD && Consumer);
+
+ for (ObjCImplDecl::method_iterator
+ I = ImplD->meth_begin(), E = ImplD->meth_end(); I != E; ++I)
+ Consumer->HandleInterestingDecl(DeclGroupRef(*I));
+
+ Consumer->HandleInterestingDecl(DeclGroupRef(ImplD));
+}
+
+void ASTReader::PassInterestingDeclsToConsumer() {
+ assert(Consumer);
+ while (!InterestingDecls.empty()) {
+ Decl *D = InterestingDecls.front();
+ InterestingDecls.pop_front();
+
+ PassInterestingDeclToConsumer(D);
+ }
+}
+
+void ASTReader::PassInterestingDeclToConsumer(Decl *D) {
+ if (ObjCImplDecl *ImplD = dyn_cast<ObjCImplDecl>(D))
+ PassObjCImplDeclToConsumer(ImplD, Consumer);
+ else
+ Consumer->HandleInterestingDecl(DeclGroupRef(D));
+}
+
+void ASTReader::StartTranslationUnit(ASTConsumer *Consumer) {
+ this->Consumer = Consumer;
+
+ if (!Consumer)
+ return;
+
+ for (unsigned I = 0, N = ExternalDefinitions.size(); I != N; ++I) {
+ // Force deserialization of this decl, which will cause it to be queued for
+ // passing to the consumer.
+ GetDecl(ExternalDefinitions[I]);
+ }
+ ExternalDefinitions.clear();
+
+ PassInterestingDeclsToConsumer();
+}
+
+void ASTReader::PrintStats() {
+ std::fprintf(stderr, "*** AST File Statistics:\n");
+
+ unsigned NumTypesLoaded
+ = TypesLoaded.size() - std::count(TypesLoaded.begin(), TypesLoaded.end(),
+ QualType());
+ unsigned NumDeclsLoaded
+ = DeclsLoaded.size() - std::count(DeclsLoaded.begin(), DeclsLoaded.end(),
+ (Decl *)0);
+ unsigned NumIdentifiersLoaded
+ = IdentifiersLoaded.size() - std::count(IdentifiersLoaded.begin(),
+ IdentifiersLoaded.end(),
+ (IdentifierInfo *)0);
+ unsigned NumSelectorsLoaded
+ = SelectorsLoaded.size() - std::count(SelectorsLoaded.begin(),
+ SelectorsLoaded.end(),
+ Selector());
+
+ std::fprintf(stderr, " %u stat cache hits\n", NumStatHits);
+ std::fprintf(stderr, " %u stat cache misses\n", NumStatMisses);
+ if (unsigned TotalNumSLocEntries = getTotalNumSLocs())
+ std::fprintf(stderr, " %u/%u source location entries read (%f%%)\n",
+ NumSLocEntriesRead, TotalNumSLocEntries,
+ ((float)NumSLocEntriesRead/TotalNumSLocEntries * 100));
+ if (!TypesLoaded.empty())
+ std::fprintf(stderr, " %u/%u types read (%f%%)\n",
+ NumTypesLoaded, (unsigned)TypesLoaded.size(),
+ ((float)NumTypesLoaded/TypesLoaded.size() * 100));
+ if (!DeclsLoaded.empty())
+ std::fprintf(stderr, " %u/%u declarations read (%f%%)\n",
+ NumDeclsLoaded, (unsigned)DeclsLoaded.size(),
+ ((float)NumDeclsLoaded/DeclsLoaded.size() * 100));
+ if (!IdentifiersLoaded.empty())
+ std::fprintf(stderr, " %u/%u identifiers read (%f%%)\n",
+ NumIdentifiersLoaded, (unsigned)IdentifiersLoaded.size(),
+ ((float)NumIdentifiersLoaded/IdentifiersLoaded.size() * 100));
+ if (!SelectorsLoaded.empty())
+ std::fprintf(stderr, " %u/%u selectors read (%f%%)\n",
+ NumSelectorsLoaded, (unsigned)SelectorsLoaded.size(),
+ ((float)NumSelectorsLoaded/SelectorsLoaded.size() * 100));
+ if (TotalNumStatements)
+ std::fprintf(stderr, " %u/%u statements read (%f%%)\n",
+ NumStatementsRead, TotalNumStatements,
+ ((float)NumStatementsRead/TotalNumStatements * 100));
+ if (TotalNumMacros)
+ std::fprintf(stderr, " %u/%u macros read (%f%%)\n",
+ NumMacrosRead, TotalNumMacros,
+ ((float)NumMacrosRead/TotalNumMacros * 100));
+ if (TotalLexicalDeclContexts)
+ std::fprintf(stderr, " %u/%u lexical declcontexts read (%f%%)\n",
+ NumLexicalDeclContextsRead, TotalLexicalDeclContexts,
+ ((float)NumLexicalDeclContextsRead/TotalLexicalDeclContexts
+ * 100));
+ if (TotalVisibleDeclContexts)
+ std::fprintf(stderr, " %u/%u visible declcontexts read (%f%%)\n",
+ NumVisibleDeclContextsRead, TotalVisibleDeclContexts,
+ ((float)NumVisibleDeclContextsRead/TotalVisibleDeclContexts
+ * 100));
+ if (TotalNumMethodPoolEntries) {
+ std::fprintf(stderr, " %u/%u method pool entries read (%f%%)\n",
+ NumMethodPoolEntriesRead, TotalNumMethodPoolEntries,
+ ((float)NumMethodPoolEntriesRead/TotalNumMethodPoolEntries
+ * 100));
+ std::fprintf(stderr, " %u method pool misses\n", NumMethodPoolMisses);
+ }
+ std::fprintf(stderr, "\n");
+ dump();
+ std::fprintf(stderr, "\n");
+}
+
+template<typename Key, typename ModuleFile, unsigned InitialCapacity>
+static void
+dumpModuleIDMap(StringRef Name,
+ const ContinuousRangeMap<Key, ModuleFile *,
+ InitialCapacity> &Map) {
+ if (Map.begin() == Map.end())
+ return;
+
+ typedef ContinuousRangeMap<Key, ModuleFile *, InitialCapacity> MapType;
+ llvm::errs() << Name << ":\n";
+ for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
+ I != IEnd; ++I) {
+ llvm::errs() << " " << I->first << " -> " << I->second->FileName
+ << "\n";
+ }
+}
+
+void ASTReader::dump() {
+ llvm::errs() << "*** PCH/ModuleFile Remappings:\n";
+ dumpModuleIDMap("Global bit offset map", GlobalBitOffsetsMap);
+ dumpModuleIDMap("Global source location entry map", GlobalSLocEntryMap);
+ dumpModuleIDMap("Global type map", GlobalTypeMap);
+ dumpModuleIDMap("Global declaration map", GlobalDeclMap);
+ dumpModuleIDMap("Global identifier map", GlobalIdentifierMap);
+ dumpModuleIDMap("Global submodule map", GlobalSubmoduleMap);
+ dumpModuleIDMap("Global selector map", GlobalSelectorMap);
+ dumpModuleIDMap("Global preprocessed entity map",
+ GlobalPreprocessedEntityMap);
+
+ llvm::errs() << "\n*** PCH/Modules Loaded:";
+ for (ModuleManager::ModuleConstIterator M = ModuleMgr.begin(),
+ MEnd = ModuleMgr.end();
+ M != MEnd; ++M)
+ (*M)->dump();
+}
+
+/// Return the amount of memory used by memory buffers, breaking down
+/// by heap-backed versus mmap'ed memory.
+void ASTReader::getMemoryBufferSizes(MemoryBufferSizes &sizes) const {
+ for (ModuleConstIterator I = ModuleMgr.begin(),
+ E = ModuleMgr.end(); I != E; ++I) {
+ if (llvm::MemoryBuffer *buf = (*I)->Buffer.get()) {
+ size_t bytes = buf->getBufferSize();
+ switch (buf->getBufferKind()) {
+ case llvm::MemoryBuffer::MemoryBuffer_Malloc:
+ sizes.malloc_bytes += bytes;
+ break;
+ case llvm::MemoryBuffer::MemoryBuffer_MMap:
+ sizes.mmap_bytes += bytes;
+ break;
+ }
+ }
+ }
+}
+
+void ASTReader::InitializeSema(Sema &S) {
+ SemaObj = &S;
+ S.ExternalSource = this;
+
+ // Makes sure any declarations that were deserialized "too early"
+ // still get added to the identifier's declaration chains.
+ for (unsigned I = 0, N = PreloadedDecls.size(); I != N; ++I) {
+ SemaObj->pushExternalDeclIntoScope(PreloadedDecls[I],
+ PreloadedDecls[I]->getDeclName());
+ }
+ PreloadedDecls.clear();
+
+ // Load the offsets of the declarations that Sema references.
+ // They will be lazily deserialized when needed.
+ if (!SemaDeclRefs.empty()) {
+ assert(SemaDeclRefs.size() == 2 && "More decl refs than expected!");
+ if (!SemaObj->StdNamespace)
+ SemaObj->StdNamespace = SemaDeclRefs[0];
+ if (!SemaObj->StdBadAlloc)
+ SemaObj->StdBadAlloc = SemaDeclRefs[1];
+ }
+
+ if (!FPPragmaOptions.empty()) {
+ assert(FPPragmaOptions.size() == 1 && "Wrong number of FP_PRAGMA_OPTIONS");
+ SemaObj->FPFeatures.fp_contract = FPPragmaOptions[0];
+ }
+
+ if (!OpenCLExtensions.empty()) {
+ unsigned I = 0;
+#define OPENCLEXT(nm) SemaObj->OpenCLFeatures.nm = OpenCLExtensions[I++];
+#include "clang/Basic/OpenCLExtensions.def"
+
+ assert(OpenCLExtensions.size() == I && "Wrong number of OPENCL_EXTENSIONS");
+ }
+}
+
+IdentifierInfo* ASTReader::get(const char *NameStart, const char *NameEnd) {
+ IdentifierLookupVisitor Visitor(StringRef(NameStart, NameEnd - NameStart),
+ /*PriorGeneration=*/0);
+ ModuleMgr.visit(IdentifierLookupVisitor::visit, &Visitor);
+ IdentifierInfo *II = Visitor.getIdentifierInfo();
+ markIdentifierUpToDate(II);
+ return II;
+}
+
+namespace clang {
+ /// \brief An identifier-lookup iterator that enumerates all of the
+ /// identifiers stored within a set of AST files.
+ class ASTIdentifierIterator : public IdentifierIterator {
+ /// \brief The AST reader whose identifiers are being enumerated.
+ const ASTReader &Reader;
+
+ /// \brief The current index into the chain of AST files stored in
+ /// the AST reader.
+ unsigned Index;
+
+ /// \brief The current position within the identifier lookup table
+ /// of the current AST file.
+ ASTIdentifierLookupTable::key_iterator Current;
+
+ /// \brief The end position within the identifier lookup table of
+ /// the current AST file.
+ ASTIdentifierLookupTable::key_iterator End;
+
+ public:
+ explicit ASTIdentifierIterator(const ASTReader &Reader);
+
+ virtual StringRef Next();
+ };
+}
+
+ASTIdentifierIterator::ASTIdentifierIterator(const ASTReader &Reader)
+ : Reader(Reader), Index(Reader.ModuleMgr.size() - 1) {
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)Reader.ModuleMgr[Index].IdentifierLookupTable;
+ Current = IdTable->key_begin();
+ End = IdTable->key_end();
+}
+
+StringRef ASTIdentifierIterator::Next() {
+ while (Current == End) {
+ // If we have exhausted all of our AST files, we're done.
+ if (Index == 0)
+ return StringRef();
+
+ --Index;
+ ASTIdentifierLookupTable *IdTable
+ = (ASTIdentifierLookupTable *)Reader.ModuleMgr[Index].
+ IdentifierLookupTable;
+ Current = IdTable->key_begin();
+ End = IdTable->key_end();
+ }
+
+ // We have any identifiers remaining in the current AST file; return
+ // the next one.
+ std::pair<const char*, unsigned> Key = *Current;
+ ++Current;
+ return StringRef(Key.first, Key.second);
+}
+
+IdentifierIterator *ASTReader::getIdentifiers() const {
+ return new ASTIdentifierIterator(*this);
+}
+
+namespace clang { namespace serialization {
+ class ReadMethodPoolVisitor {
+ ASTReader &Reader;
+ Selector Sel;
+ unsigned PriorGeneration;
+ llvm::SmallVector<ObjCMethodDecl *, 4> InstanceMethods;
+ llvm::SmallVector<ObjCMethodDecl *, 4> FactoryMethods;
+
+ public:
+ ReadMethodPoolVisitor(ASTReader &Reader, Selector Sel,
+ unsigned PriorGeneration)
+ : Reader(Reader), Sel(Sel), PriorGeneration(PriorGeneration) { }
+
+ static bool visit(ModuleFile &M, void *UserData) {
+ ReadMethodPoolVisitor *This
+ = static_cast<ReadMethodPoolVisitor *>(UserData);
+
+ if (!M.SelectorLookupTable)
+ return false;
+
+ // If we've already searched this module file, skip it now.
+ if (M.Generation <= This->PriorGeneration)
+ return true;
+
+ ASTSelectorLookupTable *PoolTable
+ = (ASTSelectorLookupTable*)M.SelectorLookupTable;
+ ASTSelectorLookupTable::iterator Pos = PoolTable->find(This->Sel);
+ if (Pos == PoolTable->end())
+ return false;
+
+ ++This->Reader.NumSelectorsRead;
+ // FIXME: Not quite happy with the statistics here. We probably should
+ // disable this tracking when called via LoadSelector.
+ // Also, should entries without methods count as misses?
+ ++This->Reader.NumMethodPoolEntriesRead;
+ ASTSelectorLookupTrait::data_type Data = *Pos;
+ if (This->Reader.DeserializationListener)
+ This->Reader.DeserializationListener->SelectorRead(Data.ID,
+ This->Sel);
+
+ This->InstanceMethods.append(Data.Instance.begin(), Data.Instance.end());
+ This->FactoryMethods.append(Data.Factory.begin(), Data.Factory.end());
+ return true;
+ }
+
+ /// \brief Retrieve the instance methods found by this visitor.
+ ArrayRef<ObjCMethodDecl *> getInstanceMethods() const {
+ return InstanceMethods;
+ }
+
+ /// \brief Retrieve the instance methods found by this visitor.
+ ArrayRef<ObjCMethodDecl *> getFactoryMethods() const {
+ return FactoryMethods;
+ }
+ };
+} } // end namespace clang::serialization
+
+/// \brief Add the given set of methods to the method list.
+static void addMethodsToPool(Sema &S, ArrayRef<ObjCMethodDecl *> Methods,
+ ObjCMethodList &List) {
+ for (unsigned I = 0, N = Methods.size(); I != N; ++I) {
+ S.addMethodToGlobalList(&List, Methods[I]);
+ }
+}
+
+void ASTReader::ReadMethodPool(Selector Sel) {
+ // Get the selector generation and update it to the current generation.
+ unsigned &Generation = SelectorGeneration[Sel];
+ unsigned PriorGeneration = Generation;
+ Generation = CurrentGeneration;
+
+ // Search for methods defined with this selector.
+ ReadMethodPoolVisitor Visitor(*this, Sel, PriorGeneration);
+ ModuleMgr.visit(&ReadMethodPoolVisitor::visit, &Visitor);
+
+ if (Visitor.getInstanceMethods().empty() &&
+ Visitor.getFactoryMethods().empty()) {
+ ++NumMethodPoolMisses;
+ return;
+ }
+
+ if (!getSema())
+ return;
+
+ Sema &S = *getSema();
+ Sema::GlobalMethodPool::iterator Pos
+ = S.MethodPool.insert(std::make_pair(Sel, Sema::GlobalMethods())).first;
+
+ addMethodsToPool(S, Visitor.getInstanceMethods(), Pos->second.first);
+ addMethodsToPool(S, Visitor.getFactoryMethods(), Pos->second.second);
+}
+
+void ASTReader::ReadKnownNamespaces(
+ SmallVectorImpl<NamespaceDecl *> &Namespaces) {
+ Namespaces.clear();
+
+ for (unsigned I = 0, N = KnownNamespaces.size(); I != N; ++I) {
+ if (NamespaceDecl *Namespace
+ = dyn_cast_or_null<NamespaceDecl>(GetDecl(KnownNamespaces[I])))
+ Namespaces.push_back(Namespace);
+ }
+}
+
+void ASTReader::ReadTentativeDefinitions(
+ SmallVectorImpl<VarDecl *> &TentativeDefs) {
+ for (unsigned I = 0, N = TentativeDefinitions.size(); I != N; ++I) {
+ VarDecl *Var = dyn_cast_or_null<VarDecl>(GetDecl(TentativeDefinitions[I]));
+ if (Var)
+ TentativeDefs.push_back(Var);
+ }
+ TentativeDefinitions.clear();
+}
+
+void ASTReader::ReadUnusedFileScopedDecls(
+ SmallVectorImpl<const DeclaratorDecl *> &Decls) {
+ for (unsigned I = 0, N = UnusedFileScopedDecls.size(); I != N; ++I) {
+ DeclaratorDecl *D
+ = dyn_cast_or_null<DeclaratorDecl>(GetDecl(UnusedFileScopedDecls[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ UnusedFileScopedDecls.clear();
+}
+
+void ASTReader::ReadDelegatingConstructors(
+ SmallVectorImpl<CXXConstructorDecl *> &Decls) {
+ for (unsigned I = 0, N = DelegatingCtorDecls.size(); I != N; ++I) {
+ CXXConstructorDecl *D
+ = dyn_cast_or_null<CXXConstructorDecl>(GetDecl(DelegatingCtorDecls[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ DelegatingCtorDecls.clear();
+}
+
+void ASTReader::ReadExtVectorDecls(SmallVectorImpl<TypedefNameDecl *> &Decls) {
+ for (unsigned I = 0, N = ExtVectorDecls.size(); I != N; ++I) {
+ TypedefNameDecl *D
+ = dyn_cast_or_null<TypedefNameDecl>(GetDecl(ExtVectorDecls[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ ExtVectorDecls.clear();
+}
+
+void ASTReader::ReadDynamicClasses(SmallVectorImpl<CXXRecordDecl *> &Decls) {
+ for (unsigned I = 0, N = DynamicClasses.size(); I != N; ++I) {
+ CXXRecordDecl *D
+ = dyn_cast_or_null<CXXRecordDecl>(GetDecl(DynamicClasses[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ DynamicClasses.clear();
+}
+
+void
+ASTReader::ReadLocallyScopedExternalDecls(SmallVectorImpl<NamedDecl *> &Decls) {
+ for (unsigned I = 0, N = LocallyScopedExternalDecls.size(); I != N; ++I) {
+ NamedDecl *D
+ = dyn_cast_or_null<NamedDecl>(GetDecl(LocallyScopedExternalDecls[I]));
+ if (D)
+ Decls.push_back(D);
+ }
+ LocallyScopedExternalDecls.clear();
+}
+
+void ASTReader::ReadReferencedSelectors(
+ SmallVectorImpl<std::pair<Selector, SourceLocation> > &Sels) {
+ if (ReferencedSelectorsData.empty())
+ return;
+
+ // If there are @selector references added them to its pool. This is for
+ // implementation of -Wselector.
+ unsigned int DataSize = ReferencedSelectorsData.size()-1;
+ unsigned I = 0;
+ while (I < DataSize) {
+ Selector Sel = DecodeSelector(ReferencedSelectorsData[I++]);
+ SourceLocation SelLoc
+ = SourceLocation::getFromRawEncoding(ReferencedSelectorsData[I++]);
+ Sels.push_back(std::make_pair(Sel, SelLoc));
+ }
+ ReferencedSelectorsData.clear();
+}
+
+void ASTReader::ReadWeakUndeclaredIdentifiers(
+ SmallVectorImpl<std::pair<IdentifierInfo *, WeakInfo> > &WeakIDs) {
+ if (WeakUndeclaredIdentifiers.empty())
+ return;
+
+ for (unsigned I = 0, N = WeakUndeclaredIdentifiers.size(); I < N; /*none*/) {
+ IdentifierInfo *WeakId
+ = DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
+ IdentifierInfo *AliasId
+ = DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
+ SourceLocation Loc
+ = SourceLocation::getFromRawEncoding(WeakUndeclaredIdentifiers[I++]);
+ bool Used = WeakUndeclaredIdentifiers[I++];
+ WeakInfo WI(AliasId, Loc);
+ WI.setUsed(Used);
+ WeakIDs.push_back(std::make_pair(WeakId, WI));
+ }
+ WeakUndeclaredIdentifiers.clear();
+}
+
+void ASTReader::ReadUsedVTables(SmallVectorImpl<ExternalVTableUse> &VTables) {
+ for (unsigned Idx = 0, N = VTableUses.size(); Idx < N; /* In loop */) {
+ ExternalVTableUse VT;
+ VT.Record = dyn_cast_or_null<CXXRecordDecl>(GetDecl(VTableUses[Idx++]));
+ VT.Location = SourceLocation::getFromRawEncoding(VTableUses[Idx++]);
+ VT.DefinitionRequired = VTableUses[Idx++];
+ VTables.push_back(VT);
+ }
+
+ VTableUses.clear();
+}
+
+void ASTReader::ReadPendingInstantiations(
+ SmallVectorImpl<std::pair<ValueDecl *, SourceLocation> > &Pending) {
+ for (unsigned Idx = 0, N = PendingInstantiations.size(); Idx < N;) {
+ ValueDecl *D = cast<ValueDecl>(GetDecl(PendingInstantiations[Idx++]));
+ SourceLocation Loc
+ = SourceLocation::getFromRawEncoding(PendingInstantiations[Idx++]);
+ Pending.push_back(std::make_pair(D, Loc));
+ }
+ PendingInstantiations.clear();
+}
+
+void ASTReader::LoadSelector(Selector Sel) {
+ // It would be complicated to avoid reading the methods anyway. So don't.
+ ReadMethodPool(Sel);
+}
+
+void ASTReader::SetIdentifierInfo(IdentifierID ID, IdentifierInfo *II) {
+ assert(ID && "Non-zero identifier ID required");
+ assert(ID <= IdentifiersLoaded.size() && "identifier ID out of range");
+ IdentifiersLoaded[ID - 1] = II;
+ if (DeserializationListener)
+ DeserializationListener->IdentifierRead(ID, II);
+}
+
+/// \brief Set the globally-visible declarations associated with the given
+/// identifier.
+///
+/// If the AST reader is currently in a state where the given declaration IDs
+/// cannot safely be resolved, they are queued until it is safe to resolve
+/// them.
+///
+/// \param II an IdentifierInfo that refers to one or more globally-visible
+/// declarations.
+///
+/// \param DeclIDs the set of declaration IDs with the name @p II that are
+/// visible at global scope.
+///
+/// \param Nonrecursive should be true to indicate that the caller knows that
+/// this call is non-recursive, and therefore the globally-visible declarations
+/// will not be placed onto the pending queue.
+void
+ASTReader::SetGloballyVisibleDecls(IdentifierInfo *II,
+ const SmallVectorImpl<uint32_t> &DeclIDs,
+ bool Nonrecursive) {
+ if (NumCurrentElementsDeserializing && !Nonrecursive) {
+ PendingIdentifierInfos.push_back(PendingIdentifierInfo());
+ PendingIdentifierInfo &PII = PendingIdentifierInfos.back();
+ PII.II = II;
+ PII.DeclIDs.append(DeclIDs.begin(), DeclIDs.end());
+ return;
+ }
+
+ for (unsigned I = 0, N = DeclIDs.size(); I != N; ++I) {
+ NamedDecl *D = cast<NamedDecl>(GetDecl(DeclIDs[I]));
+ if (SemaObj) {
+ // Introduce this declaration into the translation-unit scope
+ // and add it to the declaration chain for this identifier, so
+ // that (unqualified) name lookup will find it.
+ SemaObj->pushExternalDeclIntoScope(D, II);
+ } else {
+ // Queue this declaration so that it will be added to the
+ // translation unit scope and identifier's declaration chain
+ // once a Sema object is known.
+ PreloadedDecls.push_back(D);
+ }
+ }
+}
+
+IdentifierInfo *ASTReader::DecodeIdentifierInfo(IdentifierID ID) {
+ if (ID == 0)
+ return 0;
+
+ if (IdentifiersLoaded.empty()) {
+ Error("no identifier table in AST file");
+ return 0;
+ }
+
+ ID -= 1;
+ if (!IdentifiersLoaded[ID]) {
+ GlobalIdentifierMapType::iterator I = GlobalIdentifierMap.find(ID + 1);
+ assert(I != GlobalIdentifierMap.end() && "Corrupted global identifier map");
+ ModuleFile *M = I->second;
+ unsigned Index = ID - M->BaseIdentifierID;
+ const char *Str = M->IdentifierTableData + M->IdentifierOffsets[Index];
+
+ // All of the strings in the AST file are preceded by a 16-bit length.
+ // Extract that 16-bit length to avoid having to execute strlen().
+ // NOTE: 'StrLenPtr' is an 'unsigned char*' so that we load bytes as
+ // unsigned integers. This is important to avoid integer overflow when
+ // we cast them to 'unsigned'.
+ const unsigned char *StrLenPtr = (const unsigned char*) Str - 2;
+ unsigned StrLen = (((unsigned) StrLenPtr[0])
+ | (((unsigned) StrLenPtr[1]) << 8)) - 1;
+ IdentifiersLoaded[ID]
+ = &PP.getIdentifierTable().get(StringRef(Str, StrLen));
+ if (DeserializationListener)
+ DeserializationListener->IdentifierRead(ID + 1, IdentifiersLoaded[ID]);
+ }
+
+ return IdentifiersLoaded[ID];
+}
+
+IdentifierInfo *ASTReader::getLocalIdentifier(ModuleFile &M, unsigned LocalID) {
+ return DecodeIdentifierInfo(getGlobalIdentifierID(M, LocalID));
+}
+
+IdentifierID ASTReader::getGlobalIdentifierID(ModuleFile &M, unsigned LocalID) {
+ if (LocalID < NUM_PREDEF_IDENT_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.IdentifierRemap.find(LocalID - NUM_PREDEF_IDENT_IDS);
+ assert(I != M.IdentifierRemap.end()
+ && "Invalid index into identifier index remap");
+
+ return LocalID + I->second;
+}
+
+bool ASTReader::ReadSLocEntry(int ID) {
+ return ReadSLocEntryRecord(ID) != Success;
+}
+
+serialization::SubmoduleID
+ASTReader::getGlobalSubmoduleID(ModuleFile &M, unsigned LocalID) {
+ if (LocalID < NUM_PREDEF_SUBMODULE_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.SubmoduleRemap.find(LocalID - NUM_PREDEF_SUBMODULE_IDS);
+ assert(I != M.SubmoduleRemap.end()
+ && "Invalid index into identifier index remap");
+
+ return LocalID + I->second;
+}
+
+Module *ASTReader::getSubmodule(SubmoduleID GlobalID) {
+ if (GlobalID < NUM_PREDEF_SUBMODULE_IDS) {
+ assert(GlobalID == 0 && "Unhandled global submodule ID");
+ return 0;
+ }
+
+ if (GlobalID > SubmodulesLoaded.size()) {
+ Error("submodule ID out of range in AST file");
+ return 0;
+ }
+
+ return SubmodulesLoaded[GlobalID - NUM_PREDEF_SUBMODULE_IDS];
+}
+
+Selector ASTReader::getLocalSelector(ModuleFile &M, unsigned LocalID) {
+ return DecodeSelector(getGlobalSelectorID(M, LocalID));
+}
+
+Selector ASTReader::DecodeSelector(serialization::SelectorID ID) {
+ if (ID == 0)
+ return Selector();
+
+ if (ID > SelectorsLoaded.size()) {
+ Error("selector ID out of range in AST file");
+ return Selector();
+ }
+
+ if (SelectorsLoaded[ID - 1].getAsOpaquePtr() == 0) {
+ // Load this selector from the selector table.
+ GlobalSelectorMapType::iterator I = GlobalSelectorMap.find(ID);
+ assert(I != GlobalSelectorMap.end() && "Corrupted global selector map");
+ ModuleFile &M = *I->second;
+ ASTSelectorLookupTrait Trait(*this, M);
+ unsigned Idx = ID - M.BaseSelectorID - NUM_PREDEF_SELECTOR_IDS;
+ SelectorsLoaded[ID - 1] =
+ Trait.ReadKey(M.SelectorLookupTableData + M.SelectorOffsets[Idx], 0);
+ if (DeserializationListener)
+ DeserializationListener->SelectorRead(ID, SelectorsLoaded[ID - 1]);
+ }
+
+ return SelectorsLoaded[ID - 1];
+}
+
+Selector ASTReader::GetExternalSelector(serialization::SelectorID ID) {
+ return DecodeSelector(ID);
+}
+
+uint32_t ASTReader::GetNumExternalSelectors() {
+ // ID 0 (the null selector) is considered an external selector.
+ return getTotalNumSelectors() + 1;
+}
+
+serialization::SelectorID
+ASTReader::getGlobalSelectorID(ModuleFile &M, unsigned LocalID) const {
+ if (LocalID < NUM_PREDEF_SELECTOR_IDS)
+ return LocalID;
+
+ ContinuousRangeMap<uint32_t, int, 2>::iterator I
+ = M.SelectorRemap.find(LocalID - NUM_PREDEF_SELECTOR_IDS);
+ assert(I != M.SelectorRemap.end()
+ && "Invalid index into identifier index remap");
+
+ return LocalID + I->second;
+}
+
+DeclarationName
+ASTReader::ReadDeclarationName(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ DeclarationName::NameKind Kind = (DeclarationName::NameKind)Record[Idx++];
+ switch (Kind) {
+ case DeclarationName::Identifier:
+ return DeclarationName(GetIdentifierInfo(F, Record, Idx));
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ return DeclarationName(ReadSelector(F, Record, Idx));
+
+ case DeclarationName::CXXConstructorName:
+ return Context.DeclarationNames.getCXXConstructorName(
+ Context.getCanonicalType(readType(F, Record, Idx)));
+
+ case DeclarationName::CXXDestructorName:
+ return Context.DeclarationNames.getCXXDestructorName(
+ Context.getCanonicalType(readType(F, Record, Idx)));
+
+ case DeclarationName::CXXConversionFunctionName:
+ return Context.DeclarationNames.getCXXConversionFunctionName(
+ Context.getCanonicalType(readType(F, Record, Idx)));
+
+ case DeclarationName::CXXOperatorName:
+ return Context.DeclarationNames.getCXXOperatorName(
+ (OverloadedOperatorKind)Record[Idx++]);
+
+ case DeclarationName::CXXLiteralOperatorName:
+ return Context.DeclarationNames.getCXXLiteralOperatorName(
+ GetIdentifierInfo(F, Record, Idx));
+
+ case DeclarationName::CXXUsingDirective:
+ return DeclarationName::getUsingDirectiveName();
+ }
+
+ llvm_unreachable("Invalid NameKind!");
+}
+
+void ASTReader::ReadDeclarationNameLoc(ModuleFile &F,
+ DeclarationNameLoc &DNLoc,
+ DeclarationName Name,
+ const RecordData &Record, unsigned &Idx) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ DNLoc.NamedType.TInfo = GetTypeSourceInfo(F, Record, Idx);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ DNLoc.CXXOperatorName.BeginOpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ DNLoc.CXXOperatorName.EndOpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ DNLoc.CXXLiteralOperatorName.OpNameLoc
+ = ReadSourceLocation(F, Record, Idx).getRawEncoding();
+ break;
+
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+void ASTReader::ReadDeclarationNameInfo(ModuleFile &F,
+ DeclarationNameInfo &NameInfo,
+ const RecordData &Record, unsigned &Idx) {
+ NameInfo.setName(ReadDeclarationName(F, Record, Idx));
+ NameInfo.setLoc(ReadSourceLocation(F, Record, Idx));
+ DeclarationNameLoc DNLoc;
+ ReadDeclarationNameLoc(F, DNLoc, NameInfo.getName(), Record, Idx);
+ NameInfo.setInfo(DNLoc);
+}
+
+void ASTReader::ReadQualifierInfo(ModuleFile &F, QualifierInfo &Info,
+ const RecordData &Record, unsigned &Idx) {
+ Info.QualifierLoc = ReadNestedNameSpecifierLoc(F, Record, Idx);
+ unsigned NumTPLists = Record[Idx++];
+ Info.NumTemplParamLists = NumTPLists;
+ if (NumTPLists) {
+ Info.TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
+ for (unsigned i=0; i != NumTPLists; ++i)
+ Info.TemplParamLists[i] = ReadTemplateParameterList(F, Record, Idx);
+ }
+}
+
+TemplateName
+ASTReader::ReadTemplateName(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ TemplateName::NameKind Kind = (TemplateName::NameKind)Record[Idx++];
+ switch (Kind) {
+ case TemplateName::Template:
+ return TemplateName(ReadDeclAs<TemplateDecl>(F, Record, Idx));
+
+ case TemplateName::OverloadedTemplate: {
+ unsigned size = Record[Idx++];
+ UnresolvedSet<8> Decls;
+ while (size--)
+ Decls.addDecl(ReadDeclAs<NamedDecl>(F, Record, Idx));
+
+ return Context.getOverloadedTemplateName(Decls.begin(), Decls.end());
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(F, Record, Idx);
+ bool hasTemplKeyword = Record[Idx++];
+ TemplateDecl *Template = ReadDeclAs<TemplateDecl>(F, Record, Idx);
+ return Context.getQualifiedTemplateName(NNS, hasTemplKeyword, Template);
+ }
+
+ case TemplateName::DependentTemplate: {
+ NestedNameSpecifier *NNS = ReadNestedNameSpecifier(F, Record, Idx);
+ if (Record[Idx++]) // isIdentifier
+ return Context.getDependentTemplateName(NNS,
+ GetIdentifierInfo(F, Record,
+ Idx));
+ return Context.getDependentTemplateName(NNS,
+ (OverloadedOperatorKind)Record[Idx++]);
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ TemplateTemplateParmDecl *param
+ = ReadDeclAs<TemplateTemplateParmDecl>(F, Record, Idx);
+ if (!param) return TemplateName();
+ TemplateName replacement = ReadTemplateName(F, Record, Idx);
+ return Context.getSubstTemplateTemplateParm(param, replacement);
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ TemplateTemplateParmDecl *Param
+ = ReadDeclAs<TemplateTemplateParmDecl>(F, Record, Idx);
+ if (!Param)
+ return TemplateName();
+
+ TemplateArgument ArgPack = ReadTemplateArgument(F, Record, Idx);
+ if (ArgPack.getKind() != TemplateArgument::Pack)
+ return TemplateName();
+
+ return Context.getSubstTemplateTemplateParmPack(Param, ArgPack);
+ }
+ }
+
+ llvm_unreachable("Unhandled template name kind!");
+}
+
+TemplateArgument
+ASTReader::ReadTemplateArgument(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ TemplateArgument::ArgKind Kind = (TemplateArgument::ArgKind)Record[Idx++];
+ switch (Kind) {
+ case TemplateArgument::Null:
+ return TemplateArgument();
+ case TemplateArgument::Type:
+ return TemplateArgument(readType(F, Record, Idx));
+ case TemplateArgument::Declaration:
+ return TemplateArgument(ReadDecl(F, Record, Idx));
+ case TemplateArgument::Integral: {
+ llvm::APSInt Value = ReadAPSInt(Record, Idx);
+ QualType T = readType(F, Record, Idx);
+ return TemplateArgument(Value, T);
+ }
+ case TemplateArgument::Template:
+ return TemplateArgument(ReadTemplateName(F, Record, Idx));
+ case TemplateArgument::TemplateExpansion: {
+ TemplateName Name = ReadTemplateName(F, Record, Idx);
+ llvm::Optional<unsigned> NumTemplateExpansions;
+ if (unsigned NumExpansions = Record[Idx++])
+ NumTemplateExpansions = NumExpansions - 1;
+ return TemplateArgument(Name, NumTemplateExpansions);
+ }
+ case TemplateArgument::Expression:
+ return TemplateArgument(ReadExpr(F));
+ case TemplateArgument::Pack: {
+ unsigned NumArgs = Record[Idx++];
+ TemplateArgument *Args = new (Context) TemplateArgument[NumArgs];
+ for (unsigned I = 0; I != NumArgs; ++I)
+ Args[I] = ReadTemplateArgument(F, Record, Idx);
+ return TemplateArgument(Args, NumArgs);
+ }
+ }
+
+ llvm_unreachable("Unhandled template argument kind!");
+}
+
+TemplateParameterList *
+ASTReader::ReadTemplateParameterList(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ SourceLocation TemplateLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation LAngleLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation RAngleLoc = ReadSourceLocation(F, Record, Idx);
+
+ unsigned NumParams = Record[Idx++];
+ SmallVector<NamedDecl *, 16> Params;
+ Params.reserve(NumParams);
+ while (NumParams--)
+ Params.push_back(ReadDeclAs<NamedDecl>(F, Record, Idx));
+
+ TemplateParameterList* TemplateParams =
+ TemplateParameterList::Create(Context, TemplateLoc, LAngleLoc,
+ Params.data(), Params.size(), RAngleLoc);
+ return TemplateParams;
+}
+
+void
+ASTReader::
+ReadTemplateArgumentList(SmallVector<TemplateArgument, 8> &TemplArgs,
+ ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ unsigned NumTemplateArgs = Record[Idx++];
+ TemplArgs.reserve(NumTemplateArgs);
+ while (NumTemplateArgs--)
+ TemplArgs.push_back(ReadTemplateArgument(F, Record, Idx));
+}
+
+/// \brief Read a UnresolvedSet structure.
+void ASTReader::ReadUnresolvedSet(ModuleFile &F, UnresolvedSetImpl &Set,
+ const RecordData &Record, unsigned &Idx) {
+ unsigned NumDecls = Record[Idx++];
+ while (NumDecls--) {
+ NamedDecl *D = ReadDeclAs<NamedDecl>(F, Record, Idx);
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ Set.addDecl(D, AS);
+ }
+}
+
+CXXBaseSpecifier
+ASTReader::ReadCXXBaseSpecifier(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ bool isVirtual = static_cast<bool>(Record[Idx++]);
+ bool isBaseOfClass = static_cast<bool>(Record[Idx++]);
+ AccessSpecifier AS = static_cast<AccessSpecifier>(Record[Idx++]);
+ bool inheritConstructors = static_cast<bool>(Record[Idx++]);
+ TypeSourceInfo *TInfo = GetTypeSourceInfo(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ SourceLocation EllipsisLoc = ReadSourceLocation(F, Record, Idx);
+ CXXBaseSpecifier Result(Range, isVirtual, isBaseOfClass, AS, TInfo,
+ EllipsisLoc);
+ Result.setInheritConstructors(inheritConstructors);
+ return Result;
+}
+
+std::pair<CXXCtorInitializer **, unsigned>
+ASTReader::ReadCXXCtorInitializers(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ CXXCtorInitializer **CtorInitializers = 0;
+ unsigned NumInitializers = Record[Idx++];
+ if (NumInitializers) {
+ CtorInitializers
+ = new (Context) CXXCtorInitializer*[NumInitializers];
+ for (unsigned i=0; i != NumInitializers; ++i) {
+ TypeSourceInfo *TInfo = 0;
+ bool IsBaseVirtual = false;
+ FieldDecl *Member = 0;
+ IndirectFieldDecl *IndirectMember = 0;
+
+ CtorInitializerType Type = (CtorInitializerType)Record[Idx++];
+ switch (Type) {
+ case CTOR_INITIALIZER_BASE:
+ TInfo = GetTypeSourceInfo(F, Record, Idx);
+ IsBaseVirtual = Record[Idx++];
+ break;
+
+ case CTOR_INITIALIZER_DELEGATING:
+ TInfo = GetTypeSourceInfo(F, Record, Idx);
+ break;
+
+ case CTOR_INITIALIZER_MEMBER:
+ Member = ReadDeclAs<FieldDecl>(F, Record, Idx);
+ break;
+
+ case CTOR_INITIALIZER_INDIRECT_MEMBER:
+ IndirectMember = ReadDeclAs<IndirectFieldDecl>(F, Record, Idx);
+ break;
+ }
+
+ SourceLocation MemberOrEllipsisLoc = ReadSourceLocation(F, Record, Idx);
+ Expr *Init = ReadExpr(F);
+ SourceLocation LParenLoc = ReadSourceLocation(F, Record, Idx);
+ SourceLocation RParenLoc = ReadSourceLocation(F, Record, Idx);
+ bool IsWritten = Record[Idx++];
+ unsigned SourceOrderOrNumArrayIndices;
+ SmallVector<VarDecl *, 8> Indices;
+ if (IsWritten) {
+ SourceOrderOrNumArrayIndices = Record[Idx++];
+ } else {
+ SourceOrderOrNumArrayIndices = Record[Idx++];
+ Indices.reserve(SourceOrderOrNumArrayIndices);
+ for (unsigned i=0; i != SourceOrderOrNumArrayIndices; ++i)
+ Indices.push_back(ReadDeclAs<VarDecl>(F, Record, Idx));
+ }
+
+ CXXCtorInitializer *BOMInit;
+ if (Type == CTOR_INITIALIZER_BASE) {
+ BOMInit = new (Context) CXXCtorInitializer(Context, TInfo, IsBaseVirtual,
+ LParenLoc, Init, RParenLoc,
+ MemberOrEllipsisLoc);
+ } else if (Type == CTOR_INITIALIZER_DELEGATING) {
+ BOMInit = new (Context) CXXCtorInitializer(Context, TInfo, LParenLoc,
+ Init, RParenLoc);
+ } else if (IsWritten) {
+ if (Member)
+ BOMInit = new (Context) CXXCtorInitializer(Context, Member, MemberOrEllipsisLoc,
+ LParenLoc, Init, RParenLoc);
+ else
+ BOMInit = new (Context) CXXCtorInitializer(Context, IndirectMember,
+ MemberOrEllipsisLoc, LParenLoc,
+ Init, RParenLoc);
+ } else {
+ BOMInit = CXXCtorInitializer::Create(Context, Member, MemberOrEllipsisLoc,
+ LParenLoc, Init, RParenLoc,
+ Indices.data(), Indices.size());
+ }
+
+ if (IsWritten)
+ BOMInit->setSourceOrder(SourceOrderOrNumArrayIndices);
+ CtorInitializers[i] = BOMInit;
+ }
+ }
+
+ return std::make_pair(CtorInitializers, NumInitializers);
+}
+
+NestedNameSpecifier *
+ASTReader::ReadNestedNameSpecifier(ModuleFile &F,
+ const RecordData &Record, unsigned &Idx) {
+ unsigned N = Record[Idx++];
+ NestedNameSpecifier *NNS = 0, *Prev = 0;
+ for (unsigned I = 0; I != N; ++I) {
+ NestedNameSpecifier::SpecifierKind Kind
+ = (NestedNameSpecifier::SpecifierKind)Record[Idx++];
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier: {
+ IdentifierInfo *II = GetIdentifierInfo(F, Record, Idx);
+ NNS = NestedNameSpecifier::Create(Context, Prev, II);
+ break;
+ }
+
+ case NestedNameSpecifier::Namespace: {
+ NamespaceDecl *NS = ReadDeclAs<NamespaceDecl>(F, Record, Idx);
+ NNS = NestedNameSpecifier::Create(Context, Prev, NS);
+ break;
+ }
+
+ case NestedNameSpecifier::NamespaceAlias: {
+ NamespaceAliasDecl *Alias =ReadDeclAs<NamespaceAliasDecl>(F, Record, Idx);
+ NNS = NestedNameSpecifier::Create(Context, Prev, Alias);
+ break;
+ }
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ const Type *T = readType(F, Record, Idx).getTypePtrOrNull();
+ if (!T)
+ return 0;
+
+ bool Template = Record[Idx++];
+ NNS = NestedNameSpecifier::Create(Context, Prev, Template, T);
+ break;
+ }
+
+ case NestedNameSpecifier::Global: {
+ NNS = NestedNameSpecifier::GlobalSpecifier(Context);
+ // No associated value, and there can't be a prefix.
+ break;
+ }
+ }
+ Prev = NNS;
+ }
+ return NNS;
+}
+
+NestedNameSpecifierLoc
+ASTReader::ReadNestedNameSpecifierLoc(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ unsigned N = Record[Idx++];
+ NestedNameSpecifierLocBuilder Builder;
+ for (unsigned I = 0; I != N; ++I) {
+ NestedNameSpecifier::SpecifierKind Kind
+ = (NestedNameSpecifier::SpecifierKind)Record[Idx++];
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier: {
+ IdentifierInfo *II = GetIdentifierInfo(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ Builder.Extend(Context, II, Range.getBegin(), Range.getEnd());
+ break;
+ }
+
+ case NestedNameSpecifier::Namespace: {
+ NamespaceDecl *NS = ReadDeclAs<NamespaceDecl>(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ Builder.Extend(Context, NS, Range.getBegin(), Range.getEnd());
+ break;
+ }
+
+ case NestedNameSpecifier::NamespaceAlias: {
+ NamespaceAliasDecl *Alias =ReadDeclAs<NamespaceAliasDecl>(F, Record, Idx);
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+ Builder.Extend(Context, Alias, Range.getBegin(), Range.getEnd());
+ break;
+ }
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ bool Template = Record[Idx++];
+ TypeSourceInfo *T = GetTypeSourceInfo(F, Record, Idx);
+ if (!T)
+ return NestedNameSpecifierLoc();
+ SourceLocation ColonColonLoc = ReadSourceLocation(F, Record, Idx);
+
+ // FIXME: 'template' keyword location not saved anywhere, so we fake it.
+ Builder.Extend(Context,
+ Template? T->getTypeLoc().getBeginLoc() : SourceLocation(),
+ T->getTypeLoc(), ColonColonLoc);
+ break;
+ }
+
+ case NestedNameSpecifier::Global: {
+ SourceLocation ColonColonLoc = ReadSourceLocation(F, Record, Idx);
+ Builder.MakeGlobal(Context, ColonColonLoc);
+ break;
+ }
+ }
+ }
+
+ return Builder.getWithLocInContext(Context);
+}
+
+SourceRange
+ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx) {
+ SourceLocation beg = ReadSourceLocation(F, Record, Idx);
+ SourceLocation end = ReadSourceLocation(F, Record, Idx);
+ return SourceRange(beg, end);
+}
+
+/// \brief Read an integral value
+llvm::APInt ASTReader::ReadAPInt(const RecordData &Record, unsigned &Idx) {
+ unsigned BitWidth = Record[Idx++];
+ unsigned NumWords = llvm::APInt::getNumWords(BitWidth);
+ llvm::APInt Result(BitWidth, NumWords, &Record[Idx]);
+ Idx += NumWords;
+ return Result;
+}
+
+/// \brief Read a signed integral value
+llvm::APSInt ASTReader::ReadAPSInt(const RecordData &Record, unsigned &Idx) {
+ bool isUnsigned = Record[Idx++];
+ return llvm::APSInt(ReadAPInt(Record, Idx), isUnsigned);
+}
+
+/// \brief Read a floating-point value
+llvm::APFloat ASTReader::ReadAPFloat(const RecordData &Record, unsigned &Idx) {
+ return llvm::APFloat(ReadAPInt(Record, Idx));
+}
+
+// \brief Read a string
+std::string ASTReader::ReadString(const RecordData &Record, unsigned &Idx) {
+ unsigned Len = Record[Idx++];
+ std::string Result(Record.data() + Idx, Record.data() + Idx + Len);
+ Idx += Len;
+ return Result;
+}
+
+VersionTuple ASTReader::ReadVersionTuple(const RecordData &Record,
+ unsigned &Idx) {
+ unsigned Major = Record[Idx++];
+ unsigned Minor = Record[Idx++];
+ unsigned Subminor = Record[Idx++];
+ if (Minor == 0)
+ return VersionTuple(Major);
+ if (Subminor == 0)
+ return VersionTuple(Major, Minor - 1);
+ return VersionTuple(Major, Minor - 1, Subminor - 1);
+}
+
+CXXTemporary *ASTReader::ReadCXXTemporary(ModuleFile &F,
+ const RecordData &Record,
+ unsigned &Idx) {
+ CXXDestructorDecl *Decl = ReadDeclAs<CXXDestructorDecl>(F, Record, Idx);
+ return CXXTemporary::Create(Context, Decl);
+}
+
+DiagnosticBuilder ASTReader::Diag(unsigned DiagID) {
+ return Diag(SourceLocation(), DiagID);
+}
+
+DiagnosticBuilder ASTReader::Diag(SourceLocation Loc, unsigned DiagID) {
+ return Diags.Report(Loc, DiagID);
+}
+
+/// \brief Retrieve the identifier table associated with the
+/// preprocessor.
+IdentifierTable &ASTReader::getIdentifierTable() {
+ return PP.getIdentifierTable();
+}
+
+/// \brief Record that the given ID maps to the given switch-case
+/// statement.
+void ASTReader::RecordSwitchCaseID(SwitchCase *SC, unsigned ID) {
+ assert(SwitchCaseStmts[ID] == 0 && "Already have a SwitchCase with this ID");
+ SwitchCaseStmts[ID] = SC;
+}
+
+/// \brief Retrieve the switch-case statement with the given ID.
+SwitchCase *ASTReader::getSwitchCaseWithID(unsigned ID) {
+ assert(SwitchCaseStmts[ID] != 0 && "No SwitchCase with this ID");
+ return SwitchCaseStmts[ID];
+}
+
+void ASTReader::ClearSwitchCaseIDs() {
+ SwitchCaseStmts.clear();
+}
+
+void ASTReader::finishPendingActions() {
+ while (!PendingIdentifierInfos.empty() || !PendingDeclChains.empty()) {
+ // If any identifiers with corresponding top-level declarations have
+ // been loaded, load those declarations now.
+ while (!PendingIdentifierInfos.empty()) {
+ SetGloballyVisibleDecls(PendingIdentifierInfos.front().II,
+ PendingIdentifierInfos.front().DeclIDs, true);
+ PendingIdentifierInfos.pop_front();
+ }
+
+ // Load pending declaration chains.
+ for (unsigned I = 0; I != PendingDeclChains.size(); ++I) {
+ loadPendingDeclChain(PendingDeclChains[I]);
+ PendingDeclChainsKnown.erase(PendingDeclChains[I]);
+ }
+ PendingDeclChains.clear();
+ }
+
+ // If we deserialized any C++ or Objective-C class definitions, any
+ // Objective-C protocol definitions, or any redeclarable templates, make sure
+ // that all redeclarations point to the definitions. Note that this can only
+ // happen now, after the redeclaration chains have been fully wired.
+ for (llvm::SmallPtrSet<Decl *, 4>::iterator D = PendingDefinitions.begin(),
+ DEnd = PendingDefinitions.end();
+ D != DEnd; ++D) {
+ if (TagDecl *TD = dyn_cast<TagDecl>(*D)) {
+ if (const TagType *TagT = dyn_cast<TagType>(TD->TypeForDecl)) {
+ // Make sure that the TagType points at the definition.
+ const_cast<TagType*>(TagT)->decl = TD;
+ }
+
+ if (CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(*D)) {
+ for (CXXRecordDecl::redecl_iterator R = RD->redecls_begin(),
+ REnd = RD->redecls_end();
+ R != REnd; ++R)
+ cast<CXXRecordDecl>(*R)->DefinitionData = RD->DefinitionData;
+
+ }
+
+ continue;
+ }
+
+ if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(*D)) {
+ // Make sure that the ObjCInterfaceType points at the definition.
+ const_cast<ObjCInterfaceType *>(cast<ObjCInterfaceType>(ID->TypeForDecl))
+ ->Decl = ID;
+
+ for (ObjCInterfaceDecl::redecl_iterator R = ID->redecls_begin(),
+ REnd = ID->redecls_end();
+ R != REnd; ++R)
+ R->Data = ID->Data;
+
+ continue;
+ }
+
+ if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(*D)) {
+ for (ObjCProtocolDecl::redecl_iterator R = PD->redecls_begin(),
+ REnd = PD->redecls_end();
+ R != REnd; ++R)
+ R->Data = PD->Data;
+
+ continue;
+ }
+
+ RedeclarableTemplateDecl *RTD
+ = cast<RedeclarableTemplateDecl>(*D)->getCanonicalDecl();
+ for (RedeclarableTemplateDecl::redecl_iterator R = RTD->redecls_begin(),
+ REnd = RTD->redecls_end();
+ R != REnd; ++R)
+ R->Common = RTD->Common;
+ }
+ PendingDefinitions.clear();
+}
+
+void ASTReader::FinishedDeserializing() {
+ assert(NumCurrentElementsDeserializing &&
+ "FinishedDeserializing not paired with StartedDeserializing");
+ if (NumCurrentElementsDeserializing == 1) {
+ // We decrease NumCurrentElementsDeserializing only after pending actions
+ // are finished, to avoid recursively re-calling finishPendingActions().
+ finishPendingActions();
+ }
+ --NumCurrentElementsDeserializing;
+
+ if (NumCurrentElementsDeserializing == 0 &&
+ Consumer && !PassingDeclsToConsumer) {
+ // Guard variable to avoid recursively redoing the process of passing
+ // decls to consumer.
+ SaveAndRestore<bool> GuardPassingDeclsToConsumer(PassingDeclsToConsumer,
+ true);
+
+ while (!InterestingDecls.empty()) {
+ // We are not in recursive loading, so it's safe to pass the "interesting"
+ // decls to the consumer.
+ Decl *D = InterestingDecls.front();
+ InterestingDecls.pop_front();
+ PassInterestingDeclToConsumer(D);
+ }
+ }
+}
+
+ASTReader::ASTReader(Preprocessor &PP, ASTContext &Context,
+ StringRef isysroot, bool DisableValidation,
+ bool DisableStatCache, bool AllowASTWithCompilerErrors)
+ : Listener(new PCHValidator(PP, *this)), DeserializationListener(0),
+ SourceMgr(PP.getSourceManager()), FileMgr(PP.getFileManager()),
+ Diags(PP.getDiagnostics()), SemaObj(0), PP(PP), Context(Context),
+ Consumer(0), ModuleMgr(FileMgr.getFileSystemOptions()),
+ RelocatablePCH(false), isysroot(isysroot),
+ DisableValidation(DisableValidation),
+ DisableStatCache(DisableStatCache),
+ AllowASTWithCompilerErrors(AllowASTWithCompilerErrors),
+ CurrentGeneration(0), NumStatHits(0), NumStatMisses(0),
+ NumSLocEntriesRead(0), TotalNumSLocEntries(0),
+ NumStatementsRead(0), TotalNumStatements(0), NumMacrosRead(0),
+ TotalNumMacros(0), NumSelectorsRead(0), NumMethodPoolEntriesRead(0),
+ NumMethodPoolMisses(0), TotalNumMethodPoolEntries(0),
+ NumLexicalDeclContextsRead(0), TotalLexicalDeclContexts(0),
+ NumVisibleDeclContextsRead(0), TotalVisibleDeclContexts(0),
+ TotalModulesSizeInBits(0), NumCurrentElementsDeserializing(0),
+ PassingDeclsToConsumer(false),
+ NumCXXBaseSpecifiersLoaded(0)
+{
+ SourceMgr.setExternalSLocEntrySource(this);
+}
+
+ASTReader::~ASTReader() {
+ for (DeclContextVisibleUpdatesPending::iterator
+ I = PendingVisibleUpdates.begin(),
+ E = PendingVisibleUpdates.end();
+ I != E; ++I) {
+ for (DeclContextVisibleUpdates::iterator J = I->second.begin(),
+ F = I->second.end();
+ J != F; ++J)
+ delete static_cast<ASTDeclContextNameLookupTable*>(J->first);
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
new file mode 100644
index 0000000..5db5f92
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -0,0 +1,2474 @@
+//===--- ASTReaderDecl.cpp - Decl Deserialization ---------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ASTReader::ReadDeclRecord method, which is the
+// entrypoint for loading a decl.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ASTCommon.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Sema/IdentifierResolver.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/SemaDiagnostic.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+using namespace clang;
+using namespace clang::serialization;
+
+//===----------------------------------------------------------------------===//
+// Declaration deserialization
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ class ASTDeclReader : public DeclVisitor<ASTDeclReader, void> {
+ ASTReader &Reader;
+ ModuleFile &F;
+ llvm::BitstreamCursor &Cursor;
+ const DeclID ThisDeclID;
+ const unsigned RawLocation;
+ typedef ASTReader::RecordData RecordData;
+ const RecordData &Record;
+ unsigned &Idx;
+ TypeID TypeIDForTypeDecl;
+
+ DeclID DeclContextIDForTemplateParmDecl;
+ DeclID LexicalDeclContextIDForTemplateParmDecl;
+
+ uint64_t GetCurrentCursorOffset();
+
+ SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+
+ SourceRange ReadSourceRange(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceRange(F, R, I);
+ }
+
+ TypeSourceInfo *GetTypeSourceInfo(const RecordData &R, unsigned &I) {
+ return Reader.GetTypeSourceInfo(F, R, I);
+ }
+
+ serialization::DeclID ReadDeclID(const RecordData &R, unsigned &I) {
+ return Reader.ReadDeclID(F, R, I);
+ }
+
+ Decl *ReadDecl(const RecordData &R, unsigned &I) {
+ return Reader.ReadDecl(F, R, I);
+ }
+
+ template<typename T>
+ T *ReadDeclAs(const RecordData &R, unsigned &I) {
+ return Reader.ReadDeclAs<T>(F, R, I);
+ }
+
+ void ReadQualifierInfo(QualifierInfo &Info,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadQualifierInfo(F, Info, R, I);
+ }
+
+ void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameLoc(F, DNLoc, Name, R, I);
+ }
+
+ void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo,
+ const RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
+ }
+
+ serialization::SubmoduleID readSubmoduleID(const RecordData &R,
+ unsigned &I) {
+ if (I >= R.size())
+ return 0;
+
+ return Reader.getGlobalSubmoduleID(F, R[I++]);
+ }
+
+ Module *readModule(const RecordData &R, unsigned &I) {
+ return Reader.getSubmodule(readSubmoduleID(R, I));
+ }
+
+ void ReadCXXDefinitionData(struct CXXRecordDecl::DefinitionData &Data,
+ const RecordData &R, unsigned &I);
+
+ /// \brief RAII class used to capture the first ID within a redeclaration
+ /// chain and to introduce it into the list of pending redeclaration chains
+ /// on destruction.
+ ///
+ /// The caller can choose not to introduce this ID into the redeclaration
+ /// chain by calling \c suppress().
+ class RedeclarableResult {
+ ASTReader &Reader;
+ GlobalDeclID FirstID;
+ mutable bool Owning;
+
+ RedeclarableResult &operator=(RedeclarableResult&); // DO NOT IMPLEMENT
+
+ public:
+ RedeclarableResult(ASTReader &Reader, GlobalDeclID FirstID)
+ : Reader(Reader), FirstID(FirstID), Owning(true) { }
+
+ RedeclarableResult(const RedeclarableResult &Other)
+ : Reader(Other.Reader), FirstID(Other.FirstID), Owning(Other.Owning)
+ {
+ Other.Owning = false;
+ }
+
+ ~RedeclarableResult() {
+ // FIXME: We want to suppress this when the declaration is local to
+ // a function, since there's no reason to search other AST files
+ // for redeclarations (they can't exist). However, this is hard to
+ // do locally because the declaration hasn't necessarily loaded its
+ // declaration context yet. Also, local externs still have the function
+ // as their (semantic) declaration context, which is wrong and would
+ // break this optimize.
+
+ if (FirstID && Owning && Reader.PendingDeclChainsKnown.insert(FirstID))
+ Reader.PendingDeclChains.push_back(FirstID);
+ }
+
+ /// \brief Retrieve the first ID.
+ GlobalDeclID getFirstID() const { return FirstID; }
+
+ /// \brief Do not introduce this declaration ID into the set of pending
+ /// declaration chains.
+ void suppress() {
+ Owning = false;
+ }
+ };
+
+ /// \brief Class used to capture the result of searching for an existing
+ /// declaration of a specific kind and name, along with the ability
+ /// to update the place where this result was found (the declaration
+ /// chain hanging off an identifier or the DeclContext we searched in)
+ /// if requested.
+ class FindExistingResult {
+ ASTReader &Reader;
+ NamedDecl *New;
+ NamedDecl *Existing;
+ mutable bool AddResult;
+
+ FindExistingResult &operator=(FindExistingResult&); // DO NOT IMPLEMENT
+
+ public:
+ FindExistingResult(ASTReader &Reader)
+ : Reader(Reader), New(0), Existing(0), AddResult(false) { }
+
+ FindExistingResult(ASTReader &Reader, NamedDecl *New, NamedDecl *Existing)
+ : Reader(Reader), New(New), Existing(Existing), AddResult(true) { }
+
+ FindExistingResult(const FindExistingResult &Other)
+ : Reader(Other.Reader), New(Other.New), Existing(Other.Existing),
+ AddResult(Other.AddResult)
+ {
+ Other.AddResult = false;
+ }
+
+ ~FindExistingResult();
+
+ /// \brief Suppress the addition of this result into the known set of
+ /// names.
+ void suppress() { AddResult = false; }
+
+ operator NamedDecl*() const { return Existing; }
+
+ template<typename T>
+ operator T*() const { return dyn_cast_or_null<T>(Existing); }
+ };
+
+ FindExistingResult findExisting(NamedDecl *D);
+
+ public:
+ ASTDeclReader(ASTReader &Reader, ModuleFile &F,
+ llvm::BitstreamCursor &Cursor, DeclID thisDeclID,
+ unsigned RawLocation,
+ const RecordData &Record, unsigned &Idx)
+ : Reader(Reader), F(F), Cursor(Cursor), ThisDeclID(thisDeclID),
+ RawLocation(RawLocation), Record(Record), Idx(Idx),
+ TypeIDForTypeDecl(0) { }
+
+ static void attachPreviousDecl(Decl *D, Decl *previous);
+ static void attachLatestDecl(Decl *D, Decl *latest);
+
+ void Visit(Decl *D);
+
+ void UpdateDecl(Decl *D, ModuleFile &ModuleFile,
+ const RecordData &Record);
+
+ static void setNextObjCCategory(ObjCCategoryDecl *Cat,
+ ObjCCategoryDecl *Next) {
+ Cat->NextClassCategory = Next;
+ }
+
+ void VisitDecl(Decl *D);
+ void VisitTranslationUnitDecl(TranslationUnitDecl *TU);
+ void VisitNamedDecl(NamedDecl *ND);
+ void VisitLabelDecl(LabelDecl *LD);
+ void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ void VisitTypeDecl(TypeDecl *TD);
+ void VisitTypedefNameDecl(TypedefNameDecl *TD);
+ void VisitTypedefDecl(TypedefDecl *TD);
+ void VisitTypeAliasDecl(TypeAliasDecl *TD);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ void VisitTagDecl(TagDecl *TD);
+ void VisitEnumDecl(EnumDecl *ED);
+ void VisitRecordDecl(RecordDecl *RD);
+ void VisitCXXRecordDecl(CXXRecordDecl *D);
+ void VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D);
+ void VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D);
+ void VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D);
+ void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ void VisitValueDecl(ValueDecl *VD);
+ void VisitEnumConstantDecl(EnumConstantDecl *ECD);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ void VisitDeclaratorDecl(DeclaratorDecl *DD);
+ void VisitFunctionDecl(FunctionDecl *FD);
+ void VisitCXXMethodDecl(CXXMethodDecl *D);
+ void VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ void VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ void VisitCXXConversionDecl(CXXConversionDecl *D);
+ void VisitFieldDecl(FieldDecl *FD);
+ void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
+ void VisitVarDecl(VarDecl *VD);
+ void VisitImplicitParamDecl(ImplicitParamDecl *PD);
+ void VisitParmVarDecl(ParmVarDecl *PD);
+ void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ void VisitTemplateDecl(TemplateDecl *D);
+ RedeclarableResult VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
+ void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
+ void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *AD);
+ void VisitImportDecl(ImportDecl *D);
+ void VisitAccessSpecDecl(AccessSpecDecl *D);
+ void VisitFriendDecl(FriendDecl *D);
+ void VisitFriendTemplateDecl(FriendTemplateDecl *D);
+ void VisitStaticAssertDecl(StaticAssertDecl *D);
+ void VisitBlockDecl(BlockDecl *BD);
+
+ std::pair<uint64_t, uint64_t> VisitDeclContext(DeclContext *DC);
+
+ template<typename T>
+ RedeclarableResult VisitRedeclarable(Redeclarable<T> *D);
+
+ template<typename T>
+ void mergeRedeclarable(Redeclarable<T> *D, RedeclarableResult &Redecl);
+
+ // FIXME: Reorder according to DeclNodes.td?
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCContainerDecl(ObjCContainerDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCIvarDecl(ObjCIvarDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCImplDecl(ObjCImplDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ };
+}
+
+uint64_t ASTDeclReader::GetCurrentCursorOffset() {
+ return F.DeclsCursor.GetCurrentBitNo() + F.GlobalBitOffset;
+}
+
+void ASTDeclReader::Visit(Decl *D) {
+ DeclVisitor<ASTDeclReader, void>::Visit(D);
+
+ if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)) {
+ if (DD->DeclInfo) {
+ DeclaratorDecl::ExtInfo *Info =
+ DD->DeclInfo.get<DeclaratorDecl::ExtInfo *>();
+ Info->TInfo =
+ GetTypeSourceInfo(Record, Idx);
+ }
+ else {
+ DD->DeclInfo = GetTypeSourceInfo(Record, Idx);
+ }
+ }
+
+ if (TypeDecl *TD = dyn_cast<TypeDecl>(D)) {
+ // if we have a fully initialized TypeDecl, we can safely read its type now.
+ TD->setTypeForDecl(Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull());
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ // if we have a fully initialized TypeDecl, we can safely read its type now.
+ ID->TypeForDecl = Reader.GetType(TypeIDForTypeDecl).getTypePtrOrNull();
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // FunctionDecl's body was written last after all other Stmts/Exprs.
+ if (Record[Idx++])
+ FD->setLazyBody(GetCurrentCursorOffset());
+ } else if (D->isTemplateParameter()) {
+ // If we have a fully initialized template parameter, we can now
+ // set its DeclContext.
+ DeclContext *SemaDC = cast<DeclContext>(
+ Reader.GetDecl(DeclContextIDForTemplateParmDecl));
+ DeclContext *LexicalDC = cast<DeclContext>(
+ Reader.GetDecl(LexicalDeclContextIDForTemplateParmDecl));
+ D->setDeclContextsImpl(SemaDC, LexicalDC, Reader.getContext());
+ }
+}
+
+void ASTDeclReader::VisitDecl(Decl *D) {
+ if (D->isTemplateParameter()) {
+ // We don't want to deserialize the DeclContext of a template
+ // parameter immediately, because the template parameter might be
+ // used in the formulation of its DeclContext. Use the translation
+ // unit DeclContext as a placeholder.
+ DeclContextIDForTemplateParmDecl = ReadDeclID(Record, Idx);
+ LexicalDeclContextIDForTemplateParmDecl = ReadDeclID(Record, Idx);
+ D->setDeclContext(Reader.getContext().getTranslationUnitDecl());
+ } else {
+ DeclContext *SemaDC = ReadDeclAs<DeclContext>(Record, Idx);
+ DeclContext *LexicalDC = ReadDeclAs<DeclContext>(Record, Idx);
+ // Avoid calling setLexicalDeclContext() directly because it uses
+ // Decl::getASTContext() internally which is unsafe during derialization.
+ D->setDeclContextsImpl(SemaDC, LexicalDC, Reader.getContext());
+ }
+ D->setLocation(Reader.ReadSourceLocation(F, RawLocation));
+ D->setInvalidDecl(Record[Idx++]);
+ if (Record[Idx++]) { // hasAttrs
+ AttrVec Attrs;
+ Reader.ReadAttributes(F, Attrs, Record, Idx);
+ // Avoid calling setAttrs() directly because it uses Decl::getASTContext()
+ // internally which is unsafe during derialization.
+ D->setAttrsImpl(Attrs, Reader.getContext());
+ }
+ D->setImplicit(Record[Idx++]);
+ D->setUsed(Record[Idx++]);
+ D->setReferenced(Record[Idx++]);
+ D->setTopLevelDeclInObjCContainer(Record[Idx++]);
+ D->setAccess((AccessSpecifier)Record[Idx++]);
+ D->FromASTFile = true;
+ D->setModulePrivate(Record[Idx++]);
+ D->Hidden = D->isModulePrivate();
+
+ // Determine whether this declaration is part of a (sub)module. If so, it
+ // may not yet be visible.
+ if (unsigned SubmoduleID = readSubmoduleID(Record, Idx)) {
+ // Store the owning submodule ID in the declaration.
+ D->setOwningModuleID(SubmoduleID);
+
+ // Module-private declarations are never visible, so there is no work to do.
+ if (!D->isModulePrivate()) {
+ if (Module *Owner = Reader.getSubmodule(SubmoduleID)) {
+ if (Owner->NameVisibility != Module::AllVisible) {
+ // The owning module is not visible. Mark this declaration as hidden.
+ D->Hidden = true;
+
+ // Note that this declaration was hidden because its owning module is
+ // not yet visible.
+ Reader.HiddenNamesMap[Owner].push_back(D);
+ }
+ }
+ }
+ }
+}
+
+void ASTDeclReader::VisitTranslationUnitDecl(TranslationUnitDecl *TU) {
+ llvm_unreachable("Translation units are not serialized");
+}
+
+void ASTDeclReader::VisitNamedDecl(NamedDecl *ND) {
+ VisitDecl(ND);
+ ND->setDeclName(Reader.ReadDeclarationName(F, Record, Idx));
+}
+
+void ASTDeclReader::VisitTypeDecl(TypeDecl *TD) {
+ VisitNamedDecl(TD);
+ TD->setLocStart(ReadSourceLocation(Record, Idx));
+ // Delay type reading until after we have fully initialized the decl.
+ TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
+}
+
+void ASTDeclReader::VisitTypedefNameDecl(TypedefNameDecl *TD) {
+ RedeclarableResult Redecl = VisitRedeclarable(TD);
+ VisitTypeDecl(TD);
+
+ TD->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ mergeRedeclarable(TD, Redecl);
+}
+
+void ASTDeclReader::VisitTypedefDecl(TypedefDecl *TD) {
+ VisitTypedefNameDecl(TD);
+}
+
+void ASTDeclReader::VisitTypeAliasDecl(TypeAliasDecl *TD) {
+ VisitTypedefNameDecl(TD);
+}
+
+void ASTDeclReader::VisitTagDecl(TagDecl *TD) {
+ RedeclarableResult Redecl = VisitRedeclarable(TD);
+ VisitTypeDecl(TD);
+
+ TD->IdentifierNamespace = Record[Idx++];
+ TD->setTagKind((TagDecl::TagKind)Record[Idx++]);
+ TD->setCompleteDefinition(Record[Idx++]);
+ TD->setEmbeddedInDeclarator(Record[Idx++]);
+ TD->setFreeStanding(Record[Idx++]);
+ TD->setRBraceLoc(ReadSourceLocation(Record, Idx));
+
+ if (Record[Idx++]) { // hasExtInfo
+ TagDecl::ExtInfo *Info = new (Reader.getContext()) TagDecl::ExtInfo();
+ ReadQualifierInfo(*Info, Record, Idx);
+ TD->TypedefNameDeclOrQualifier = Info;
+ } else
+ TD->setTypedefNameForAnonDecl(ReadDeclAs<TypedefNameDecl>(Record, Idx));
+
+ mergeRedeclarable(TD, Redecl);
+}
+
+void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
+ VisitTagDecl(ED);
+ if (TypeSourceInfo *TI = Reader.GetTypeSourceInfo(F, Record, Idx))
+ ED->setIntegerTypeSourceInfo(TI);
+ else
+ ED->setIntegerType(Reader.readType(F, Record, Idx));
+ ED->setPromotionType(Reader.readType(F, Record, Idx));
+ ED->setNumPositiveBits(Record[Idx++]);
+ ED->setNumNegativeBits(Record[Idx++]);
+ ED->IsScoped = Record[Idx++];
+ ED->IsScopedUsingClassTag = Record[Idx++];
+ ED->IsFixed = Record[Idx++];
+
+ if (EnumDecl *InstED = ReadDeclAs<EnumDecl>(Record, Idx)) {
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ ED->setInstantiationOfMemberEnum(Reader.getContext(), InstED, TSK);
+ ED->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ }
+}
+
+void ASTDeclReader::VisitRecordDecl(RecordDecl *RD) {
+ VisitTagDecl(RD);
+ RD->setHasFlexibleArrayMember(Record[Idx++]);
+ RD->setAnonymousStructOrUnion(Record[Idx++]);
+ RD->setHasObjectMember(Record[Idx++]);
+}
+
+void ASTDeclReader::VisitValueDecl(ValueDecl *VD) {
+ VisitNamedDecl(VD);
+ VD->setType(Reader.readType(F, Record, Idx));
+}
+
+void ASTDeclReader::VisitEnumConstantDecl(EnumConstantDecl *ECD) {
+ VisitValueDecl(ECD);
+ if (Record[Idx++])
+ ECD->setInitExpr(Reader.ReadExpr(F));
+ ECD->setInitVal(Reader.ReadAPSInt(Record, Idx));
+}
+
+void ASTDeclReader::VisitDeclaratorDecl(DeclaratorDecl *DD) {
+ VisitValueDecl(DD);
+ DD->setInnerLocStart(ReadSourceLocation(Record, Idx));
+ if (Record[Idx++]) { // hasExtInfo
+ DeclaratorDecl::ExtInfo *Info
+ = new (Reader.getContext()) DeclaratorDecl::ExtInfo();
+ ReadQualifierInfo(*Info, Record, Idx);
+ DD->DeclInfo = Info;
+ }
+}
+
+void ASTDeclReader::VisitFunctionDecl(FunctionDecl *FD) {
+ RedeclarableResult Redecl = VisitRedeclarable(FD);
+ VisitDeclaratorDecl(FD);
+
+ ReadDeclarationNameLoc(FD->DNLoc, FD->getDeclName(), Record, Idx);
+ FD->IdentifierNamespace = Record[Idx++];
+
+ // FunctionDecl's body is handled last at ASTDeclReader::Visit,
+ // after everything else is read.
+
+ FD->SClass = (StorageClass)Record[Idx++];
+ FD->SClassAsWritten = (StorageClass)Record[Idx++];
+ FD->IsInline = Record[Idx++];
+ FD->IsInlineSpecified = Record[Idx++];
+ FD->IsVirtualAsWritten = Record[Idx++];
+ FD->IsPure = Record[Idx++];
+ FD->HasInheritedPrototype = Record[Idx++];
+ FD->HasWrittenPrototype = Record[Idx++];
+ FD->IsDeleted = Record[Idx++];
+ FD->IsTrivial = Record[Idx++];
+ FD->IsDefaulted = Record[Idx++];
+ FD->IsExplicitlyDefaulted = Record[Idx++];
+ FD->HasImplicitReturnZero = Record[Idx++];
+ FD->IsConstexpr = Record[Idx++];
+ FD->EndRangeLoc = ReadSourceLocation(Record, Idx);
+
+ switch ((FunctionDecl::TemplatedKind)Record[Idx++]) {
+ case FunctionDecl::TK_NonTemplate:
+ mergeRedeclarable(FD, Redecl);
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ FD->setDescribedFunctionTemplate(ReadDeclAs<FunctionTemplateDecl>(Record,
+ Idx));
+ break;
+ case FunctionDecl::TK_MemberSpecialization: {
+ FunctionDecl *InstFD = ReadDeclAs<FunctionDecl>(Record, Idx);
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ FD->setInstantiationOfMemberFunction(Reader.getContext(), InstFD, TSK);
+ FD->getMemberSpecializationInfo()->setPointOfInstantiation(POI);
+ break;
+ }
+ case FunctionDecl::TK_FunctionTemplateSpecialization: {
+ FunctionTemplateDecl *Template = ReadDeclAs<FunctionTemplateDecl>(Record,
+ Idx);
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+
+ // Template arguments.
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+
+ // Template args as written.
+ SmallVector<TemplateArgumentLoc, 8> TemplArgLocs;
+ SourceLocation LAngleLoc, RAngleLoc;
+ bool HasTemplateArgumentsAsWritten = Record[Idx++];
+ if (HasTemplateArgumentsAsWritten) {
+ unsigned NumTemplateArgLocs = Record[Idx++];
+ TemplArgLocs.reserve(NumTemplateArgLocs);
+ for (unsigned i=0; i != NumTemplateArgLocs; ++i)
+ TemplArgLocs.push_back(
+ Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+
+ LAngleLoc = ReadSourceLocation(Record, Idx);
+ RAngleLoc = ReadSourceLocation(Record, Idx);
+ }
+
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+
+ ASTContext &C = Reader.getContext();
+ TemplateArgumentList *TemplArgList
+ = TemplateArgumentList::CreateCopy(C, TemplArgs.data(), TemplArgs.size());
+ TemplateArgumentListInfo TemplArgsInfo(LAngleLoc, RAngleLoc);
+ for (unsigned i=0, e = TemplArgLocs.size(); i != e; ++i)
+ TemplArgsInfo.addArgument(TemplArgLocs[i]);
+ FunctionTemplateSpecializationInfo *FTInfo
+ = FunctionTemplateSpecializationInfo::Create(C, FD, Template, TSK,
+ TemplArgList,
+ HasTemplateArgumentsAsWritten ? &TemplArgsInfo : 0,
+ POI);
+ FD->TemplateOrSpecialization = FTInfo;
+
+ if (FD->isCanonicalDecl()) { // if canonical add to template's set.
+ // The template that contains the specializations set. It's not safe to
+ // use getCanonicalDecl on Template since it may still be initializing.
+ FunctionTemplateDecl *CanonTemplate
+ = ReadDeclAs<FunctionTemplateDecl>(Record, Idx);
+ // Get the InsertPos by FindNodeOrInsertPos() instead of calling
+ // InsertNode(FTInfo) directly to avoid the getASTContext() call in
+ // FunctionTemplateSpecializationInfo's Profile().
+ // We avoid getASTContext because a decl in the parent hierarchy may
+ // be initializing.
+ llvm::FoldingSetNodeID ID;
+ FunctionTemplateSpecializationInfo::Profile(ID, TemplArgs.data(),
+ TemplArgs.size(), C);
+ void *InsertPos = 0;
+ CanonTemplate->getSpecializations().FindNodeOrInsertPos(ID, InsertPos);
+ assert(InsertPos && "Another specialization already inserted!");
+ CanonTemplate->getSpecializations().InsertNode(FTInfo, InsertPos);
+ }
+ break;
+ }
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
+ // Templates.
+ UnresolvedSet<8> TemplDecls;
+ unsigned NumTemplates = Record[Idx++];
+ while (NumTemplates--)
+ TemplDecls.addDecl(ReadDeclAs<NamedDecl>(Record, Idx));
+
+ // Templates args.
+ TemplateArgumentListInfo TemplArgs;
+ unsigned NumArgs = Record[Idx++];
+ while (NumArgs--)
+ TemplArgs.addArgument(Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+ TemplArgs.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ TemplArgs.setRAngleLoc(ReadSourceLocation(Record, Idx));
+
+ FD->setDependentTemplateSpecialization(Reader.getContext(),
+ TemplDecls, TemplArgs);
+ break;
+ }
+ }
+
+ // Read in the parameters.
+ unsigned NumParams = Record[Idx++];
+ SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ FD->setParams(Reader.getContext(), Params);
+}
+
+void ASTDeclReader::VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+ VisitNamedDecl(MD);
+ if (Record[Idx++]) {
+ // In practice, this won't be executed (since method definitions
+ // don't occur in header files).
+ MD->setBody(Reader.ReadStmt(F));
+ MD->setSelfDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
+ MD->setCmdDecl(ReadDeclAs<ImplicitParamDecl>(Record, Idx));
+ }
+ MD->setInstanceMethod(Record[Idx++]);
+ MD->setVariadic(Record[Idx++]);
+ MD->setSynthesized(Record[Idx++]);
+ MD->setDefined(Record[Idx++]);
+
+ MD->IsRedeclaration = Record[Idx++];
+ MD->HasRedeclaration = Record[Idx++];
+ if (MD->HasRedeclaration)
+ Reader.getContext().setObjCMethodRedeclaration(MD,
+ ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+
+ MD->setDeclImplementation((ObjCMethodDecl::ImplementationControl)Record[Idx++]);
+ MD->setObjCDeclQualifier((Decl::ObjCDeclQualifier)Record[Idx++]);
+ MD->SetRelatedResultType(Record[Idx++]);
+ MD->setResultType(Reader.readType(F, Record, Idx));
+ MD->setResultTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ MD->setEndLoc(ReadSourceLocation(Record, Idx));
+ unsigned NumParams = Record[Idx++];
+ SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+
+ MD->SelLocsKind = Record[Idx++];
+ unsigned NumStoredSelLocs = Record[Idx++];
+ SmallVector<SourceLocation, 16> SelLocs;
+ SelLocs.reserve(NumStoredSelLocs);
+ for (unsigned i = 0; i != NumStoredSelLocs; ++i)
+ SelLocs.push_back(ReadSourceLocation(Record, Idx));
+
+ MD->setParamsAndSelLocs(Reader.getContext(), Params, SelLocs);
+}
+
+void ASTDeclReader::VisitObjCContainerDecl(ObjCContainerDecl *CD) {
+ VisitNamedDecl(CD);
+ CD->setAtStartLoc(ReadSourceLocation(Record, Idx));
+ CD->setAtEndRange(ReadSourceRange(Record, Idx));
+}
+
+void ASTDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
+ RedeclarableResult Redecl = VisitRedeclarable(ID);
+ VisitObjCContainerDecl(ID);
+ TypeIDForTypeDecl = Reader.getGlobalTypeID(F, Record[Idx++]);
+ mergeRedeclarable(ID, Redecl);
+
+ if (Record[Idx++]) {
+ // Read the definition.
+ ID->allocateDefinitionData();
+
+ // Set the definition data of the canonical declaration, so other
+ // redeclarations will see it.
+ ID->getCanonicalDecl()->Data = ID->Data;
+
+ ObjCInterfaceDecl::DefinitionData &Data = ID->data();
+
+ // Read the superclass.
+ Data.SuperClass = ReadDeclAs<ObjCInterfaceDecl>(Record, Idx);
+ Data.SuperClassLoc = ReadSourceLocation(Record, Idx);
+
+ Data.EndLoc = ReadSourceLocation(Record, Idx);
+
+ // Read the directly referenced protocols and their SourceLocations.
+ unsigned NumProtocols = Record[Idx++];
+ SmallVector<ObjCProtocolDecl *, 16> Protocols;
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ ID->setProtocolList(Protocols.data(), NumProtocols, ProtoLocs.data(),
+ Reader.getContext());
+
+ // Read the transitive closure of protocols referenced by this class.
+ NumProtocols = Record[Idx++];
+ Protocols.clear();
+ Protocols.reserve(NumProtocols);
+ for (unsigned I = 0; I != NumProtocols; ++I)
+ Protocols.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ ID->data().AllReferencedProtocols.set(Protocols.data(), NumProtocols,
+ Reader.getContext());
+
+ // We will rebuild this list lazily.
+ ID->setIvarList(0);
+
+ // Note that we have deserialized a definition.
+ Reader.PendingDefinitions.insert(ID);
+
+ // Note that we've loaded this Objective-C class.
+ Reader.ObjCClassesLoaded.push_back(ID);
+ } else {
+ ID->Data = ID->getCanonicalDecl()->Data;
+ }
+}
+
+void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
+ VisitFieldDecl(IVD);
+ IVD->setAccessControl((ObjCIvarDecl::AccessControl)Record[Idx++]);
+ // This field will be built lazily.
+ IVD->setNextIvar(0);
+ bool synth = Record[Idx++];
+ IVD->setSynthesize(synth);
+}
+
+void ASTDeclReader::VisitObjCProtocolDecl(ObjCProtocolDecl *PD) {
+ RedeclarableResult Redecl = VisitRedeclarable(PD);
+ VisitObjCContainerDecl(PD);
+ mergeRedeclarable(PD, Redecl);
+
+ if (Record[Idx++]) {
+ // Read the definition.
+ PD->allocateDefinitionData();
+
+ // Set the definition data of the canonical declaration, so other
+ // redeclarations will see it.
+ PD->getCanonicalDecl()->Data = PD->Data;
+
+ unsigned NumProtoRefs = Record[Idx++];
+ SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
+ ProtoRefs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ PD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
+ Reader.getContext());
+
+ // Note that we have deserialized a definition.
+ Reader.PendingDefinitions.insert(PD);
+ } else {
+ PD->Data = PD->getCanonicalDecl()->Data;
+ }
+}
+
+void ASTDeclReader::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *FD) {
+ VisitFieldDecl(FD);
+}
+
+void ASTDeclReader::VisitObjCCategoryDecl(ObjCCategoryDecl *CD) {
+ VisitObjCContainerDecl(CD);
+ CD->setCategoryNameLoc(ReadSourceLocation(Record, Idx));
+ CD->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
+ CD->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
+
+ // Note that this category has been deserialized. We do this before
+ // deserializing the interface declaration, so that it will consider this
+ /// category.
+ Reader.CategoriesDeserialized.insert(CD);
+
+ CD->ClassInterface = ReadDeclAs<ObjCInterfaceDecl>(Record, Idx);
+ unsigned NumProtoRefs = Record[Idx++];
+ SmallVector<ObjCProtocolDecl *, 16> ProtoRefs;
+ ProtoRefs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoRefs.push_back(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ SmallVector<SourceLocation, 16> ProtoLocs;
+ ProtoLocs.reserve(NumProtoRefs);
+ for (unsigned I = 0; I != NumProtoRefs; ++I)
+ ProtoLocs.push_back(ReadSourceLocation(Record, Idx));
+ CD->setProtocolList(ProtoRefs.data(), NumProtoRefs, ProtoLocs.data(),
+ Reader.getContext());
+ CD->setHasSynthBitfield(Record[Idx++]);
+}
+
+void ASTDeclReader::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *CAD) {
+ VisitNamedDecl(CAD);
+ CAD->setClassInterface(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+}
+
+void ASTDeclReader::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ VisitNamedDecl(D);
+ D->setAtLoc(ReadSourceLocation(Record, Idx));
+ D->setLParenLoc(ReadSourceLocation(Record, Idx));
+ D->setType(GetTypeSourceInfo(Record, Idx));
+ // FIXME: stable encoding
+ D->setPropertyAttributes(
+ (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
+ D->setPropertyAttributesAsWritten(
+ (ObjCPropertyDecl::PropertyAttributeKind)Record[Idx++]);
+ // FIXME: stable encoding
+ D->setPropertyImplementation(
+ (ObjCPropertyDecl::PropertyControl)Record[Idx++]);
+ D->setGetterName(Reader.ReadDeclarationName(F,Record, Idx).getObjCSelector());
+ D->setSetterName(Reader.ReadDeclarationName(F,Record, Idx).getObjCSelector());
+ D->setGetterMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+ D->setSetterMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+ D->setPropertyIvarDecl(ReadDeclAs<ObjCIvarDecl>(Record, Idx));
+}
+
+void ASTDeclReader::VisitObjCImplDecl(ObjCImplDecl *D) {
+ VisitObjCContainerDecl(D);
+ D->setClassInterface(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+}
+
+void ASTDeclReader::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ VisitObjCImplDecl(D);
+ D->setIdentifier(Reader.GetIdentifierInfo(F, Record, Idx));
+ D->CategoryNameLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTDeclReader::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ VisitObjCImplDecl(D);
+ D->setSuperClass(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+ D->setIvarLBraceLoc(ReadSourceLocation(Record, Idx));
+ D->setIvarRBraceLoc(ReadSourceLocation(Record, Idx));
+ llvm::tie(D->IvarInitializers, D->NumIvarInitializers)
+ = Reader.ReadCXXCtorInitializers(F, Record, Idx);
+ D->setHasSynthBitfield(Record[Idx++]);
+}
+
+
+void ASTDeclReader::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ VisitDecl(D);
+ D->setAtLoc(ReadSourceLocation(Record, Idx));
+ D->setPropertyDecl(ReadDeclAs<ObjCPropertyDecl>(Record, Idx));
+ D->PropertyIvarDecl = ReadDeclAs<ObjCIvarDecl>(Record, Idx);
+ D->IvarLoc = ReadSourceLocation(Record, Idx);
+ D->setGetterCXXConstructor(Reader.ReadExpr(F));
+ D->setSetterCXXAssignment(Reader.ReadExpr(F));
+}
+
+void ASTDeclReader::VisitFieldDecl(FieldDecl *FD) {
+ VisitDeclaratorDecl(FD);
+ FD->setMutable(Record[Idx++]);
+ int BitWidthOrInitializer = Record[Idx++];
+ if (BitWidthOrInitializer == 1)
+ FD->setBitWidth(Reader.ReadExpr(F));
+ else if (BitWidthOrInitializer == 2)
+ FD->setInClassInitializer(Reader.ReadExpr(F));
+ if (!FD->getDeclName()) {
+ if (FieldDecl *Tmpl = ReadDeclAs<FieldDecl>(Record, Idx))
+ Reader.getContext().setInstantiatedFromUnnamedFieldDecl(FD, Tmpl);
+ }
+}
+
+void ASTDeclReader::VisitIndirectFieldDecl(IndirectFieldDecl *FD) {
+ VisitValueDecl(FD);
+
+ FD->ChainingSize = Record[Idx++];
+ assert(FD->ChainingSize >= 2 && "Anonymous chaining must be >= 2");
+ FD->Chaining = new (Reader.getContext())NamedDecl*[FD->ChainingSize];
+
+ for (unsigned I = 0; I != FD->ChainingSize; ++I)
+ FD->Chaining[I] = ReadDeclAs<NamedDecl>(Record, Idx);
+}
+
+void ASTDeclReader::VisitVarDecl(VarDecl *VD) {
+ RedeclarableResult Redecl = VisitRedeclarable(VD);
+ VisitDeclaratorDecl(VD);
+
+ VD->VarDeclBits.SClass = (StorageClass)Record[Idx++];
+ VD->VarDeclBits.SClassAsWritten = (StorageClass)Record[Idx++];
+ VD->VarDeclBits.ThreadSpecified = Record[Idx++];
+ VD->VarDeclBits.InitStyle = Record[Idx++];
+ VD->VarDeclBits.ExceptionVar = Record[Idx++];
+ VD->VarDeclBits.NRVOVariable = Record[Idx++];
+ VD->VarDeclBits.CXXForRangeDecl = Record[Idx++];
+ VD->VarDeclBits.ARCPseudoStrong = Record[Idx++];
+
+ // Only true variables (not parameters or implicit parameters) can be merged.
+ if (VD->getKind() == Decl::Var)
+ mergeRedeclarable(VD, Redecl);
+
+ if (uint64_t Val = Record[Idx++]) {
+ VD->setInit(Reader.ReadExpr(F));
+ if (Val > 1) {
+ EvaluatedStmt *Eval = VD->ensureEvaluatedStmt();
+ Eval->CheckedICE = true;
+ Eval->IsICE = Val == 3;
+ }
+ }
+
+ if (Record[Idx++]) { // HasMemberSpecializationInfo.
+ VarDecl *Tmpl = ReadDeclAs<VarDecl>(Record, Idx);
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ Reader.getContext().setInstantiatedFromStaticDataMember(VD, Tmpl, TSK,POI);
+ }
+}
+
+void ASTDeclReader::VisitImplicitParamDecl(ImplicitParamDecl *PD) {
+ VisitVarDecl(PD);
+}
+
+void ASTDeclReader::VisitParmVarDecl(ParmVarDecl *PD) {
+ VisitVarDecl(PD);
+ unsigned isObjCMethodParam = Record[Idx++];
+ unsigned scopeDepth = Record[Idx++];
+ unsigned scopeIndex = Record[Idx++];
+ unsigned declQualifier = Record[Idx++];
+ if (isObjCMethodParam) {
+ assert(scopeDepth == 0);
+ PD->setObjCMethodScopeInfo(scopeIndex);
+ PD->ParmVarDeclBits.ScopeDepthOrObjCQuals = declQualifier;
+ } else {
+ PD->setScopeInfo(scopeDepth, scopeIndex);
+ }
+ PD->ParmVarDeclBits.IsKNRPromoted = Record[Idx++];
+ PD->ParmVarDeclBits.HasInheritedDefaultArg = Record[Idx++];
+ if (Record[Idx++]) // hasUninstantiatedDefaultArg.
+ PD->setUninstantiatedDefaultArg(Reader.ReadExpr(F));
+}
+
+void ASTDeclReader::VisitFileScopeAsmDecl(FileScopeAsmDecl *AD) {
+ VisitDecl(AD);
+ AD->setAsmString(cast<StringLiteral>(Reader.ReadExpr(F)));
+ AD->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTDeclReader::VisitBlockDecl(BlockDecl *BD) {
+ VisitDecl(BD);
+ BD->setBody(cast_or_null<CompoundStmt>(Reader.ReadStmt(F)));
+ BD->setSignatureAsWritten(GetTypeSourceInfo(Record, Idx));
+ unsigned NumParams = Record[Idx++];
+ SmallVector<ParmVarDecl *, 16> Params;
+ Params.reserve(NumParams);
+ for (unsigned I = 0; I != NumParams; ++I)
+ Params.push_back(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ BD->setParams(Params);
+
+ bool capturesCXXThis = Record[Idx++];
+ unsigned numCaptures = Record[Idx++];
+ SmallVector<BlockDecl::Capture, 16> captures;
+ captures.reserve(numCaptures);
+ for (unsigned i = 0; i != numCaptures; ++i) {
+ VarDecl *decl = ReadDeclAs<VarDecl>(Record, Idx);
+ unsigned flags = Record[Idx++];
+ bool byRef = (flags & 1);
+ bool nested = (flags & 2);
+ Expr *copyExpr = ((flags & 4) ? Reader.ReadExpr(F) : 0);
+
+ captures.push_back(BlockDecl::Capture(decl, byRef, nested, copyExpr));
+ }
+ BD->setCaptures(Reader.getContext(), captures.begin(),
+ captures.end(), capturesCXXThis);
+}
+
+void ASTDeclReader::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ VisitDecl(D);
+ D->setLanguage((LinkageSpecDecl::LanguageIDs)Record[Idx++]);
+ D->setExternLoc(ReadSourceLocation(Record, Idx));
+ D->setRBraceLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTDeclReader::VisitLabelDecl(LabelDecl *D) {
+ VisitNamedDecl(D);
+ D->setLocStart(ReadSourceLocation(Record, Idx));
+}
+
+
+void ASTDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
+ VisitNamedDecl(D);
+ D->setInline(Record[Idx++]);
+ D->LocStart = ReadSourceLocation(Record, Idx);
+ D->RBraceLoc = ReadSourceLocation(Record, Idx);
+ mergeRedeclarable(D, Redecl);
+
+ if (Redecl.getFirstID() == ThisDeclID) {
+ // Each module has its own anonymous namespace, which is disjoint from
+ // any other module's anonymous namespaces, so don't attach the anonymous
+ // namespace at all.
+ NamespaceDecl *Anon = ReadDeclAs<NamespaceDecl>(Record, Idx);
+ if (F.Kind != MK_Module)
+ D->setAnonymousNamespace(Anon);
+ } else {
+ // Link this namespace back to the first declaration, which has already
+ // been deserialized.
+ D->AnonOrFirstNamespaceAndInline.setPointer(D->getFirstDeclaration());
+ }
+}
+
+void ASTDeclReader::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ VisitNamedDecl(D);
+ D->NamespaceLoc = ReadSourceLocation(Record, Idx);
+ D->IdentLoc = ReadSourceLocation(Record, Idx);
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ D->Namespace = ReadDeclAs<NamedDecl>(Record, Idx);
+}
+
+void ASTDeclReader::VisitUsingDecl(UsingDecl *D) {
+ VisitNamedDecl(D);
+ D->setUsingLocation(ReadSourceLocation(Record, Idx));
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
+ D->FirstUsingShadow.setPointer(ReadDeclAs<UsingShadowDecl>(Record, Idx));
+ D->setTypeName(Record[Idx++]);
+ if (NamedDecl *Pattern = ReadDeclAs<NamedDecl>(Record, Idx))
+ Reader.getContext().setInstantiatedFromUsingDecl(D, Pattern);
+}
+
+void ASTDeclReader::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ VisitNamedDecl(D);
+ D->setTargetDecl(ReadDeclAs<NamedDecl>(Record, Idx));
+ D->UsingOrNextShadow = ReadDeclAs<NamedDecl>(Record, Idx);
+ UsingShadowDecl *Pattern = ReadDeclAs<UsingShadowDecl>(Record, Idx);
+ if (Pattern)
+ Reader.getContext().setInstantiatedFromUsingShadowDecl(D, Pattern);
+}
+
+void ASTDeclReader::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ VisitNamedDecl(D);
+ D->UsingLoc = ReadSourceLocation(Record, Idx);
+ D->NamespaceLoc = ReadSourceLocation(Record, Idx);
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ D->NominatedNamespace = ReadDeclAs<NamedDecl>(Record, Idx);
+ D->CommonAncestor = ReadDeclAs<DeclContext>(Record, Idx);
+}
+
+void ASTDeclReader::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ VisitValueDecl(D);
+ D->setUsingLoc(ReadSourceLocation(Record, Idx));
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record, Idx);
+}
+
+void ASTDeclReader::VisitUnresolvedUsingTypenameDecl(
+ UnresolvedUsingTypenameDecl *D) {
+ VisitTypeDecl(D);
+ D->TypenameLocation = ReadSourceLocation(Record, Idx);
+ D->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+}
+
+void ASTDeclReader::ReadCXXDefinitionData(
+ struct CXXRecordDecl::DefinitionData &Data,
+ const RecordData &Record, unsigned &Idx) {
+ // Note: the caller has deserialized the IsLambda bit already.
+ Data.UserDeclaredConstructor = Record[Idx++];
+ Data.UserDeclaredCopyConstructor = Record[Idx++];
+ Data.UserDeclaredMoveConstructor = Record[Idx++];
+ Data.UserDeclaredCopyAssignment = Record[Idx++];
+ Data.UserDeclaredMoveAssignment = Record[Idx++];
+ Data.UserDeclaredDestructor = Record[Idx++];
+ Data.Aggregate = Record[Idx++];
+ Data.PlainOldData = Record[Idx++];
+ Data.Empty = Record[Idx++];
+ Data.Polymorphic = Record[Idx++];
+ Data.Abstract = Record[Idx++];
+ Data.IsStandardLayout = Record[Idx++];
+ Data.HasNoNonEmptyBases = Record[Idx++];
+ Data.HasPrivateFields = Record[Idx++];
+ Data.HasProtectedFields = Record[Idx++];
+ Data.HasPublicFields = Record[Idx++];
+ Data.HasMutableFields = Record[Idx++];
+ Data.HasOnlyCMembers = Record[Idx++];
+ Data.HasTrivialDefaultConstructor = Record[Idx++];
+ Data.HasConstexprNonCopyMoveConstructor = Record[Idx++];
+ Data.DefaultedDefaultConstructorIsConstexpr = Record[Idx++];
+ Data.DefaultedCopyConstructorIsConstexpr = Record[Idx++];
+ Data.DefaultedMoveConstructorIsConstexpr = Record[Idx++];
+ Data.HasConstexprDefaultConstructor = Record[Idx++];
+ Data.HasConstexprCopyConstructor = Record[Idx++];
+ Data.HasConstexprMoveConstructor = Record[Idx++];
+ Data.HasTrivialCopyConstructor = Record[Idx++];
+ Data.HasTrivialMoveConstructor = Record[Idx++];
+ Data.HasTrivialCopyAssignment = Record[Idx++];
+ Data.HasTrivialMoveAssignment = Record[Idx++];
+ Data.HasTrivialDestructor = Record[Idx++];
+ Data.HasIrrelevantDestructor = Record[Idx++];
+ Data.HasNonLiteralTypeFieldsOrBases = Record[Idx++];
+ Data.ComputedVisibleConversions = Record[Idx++];
+ Data.UserProvidedDefaultConstructor = Record[Idx++];
+ Data.DeclaredDefaultConstructor = Record[Idx++];
+ Data.DeclaredCopyConstructor = Record[Idx++];
+ Data.DeclaredMoveConstructor = Record[Idx++];
+ Data.DeclaredCopyAssignment = Record[Idx++];
+ Data.DeclaredMoveAssignment = Record[Idx++];
+ Data.DeclaredDestructor = Record[Idx++];
+ Data.FailedImplicitMoveConstructor = Record[Idx++];
+ Data.FailedImplicitMoveAssignment = Record[Idx++];
+
+ Data.NumBases = Record[Idx++];
+ if (Data.NumBases)
+ Data.Bases = Reader.readCXXBaseSpecifiers(F, Record, Idx);
+ Data.NumVBases = Record[Idx++];
+ if (Data.NumVBases)
+ Data.VBases = Reader.readCXXBaseSpecifiers(F, Record, Idx);
+
+ Reader.ReadUnresolvedSet(F, Data.Conversions, Record, Idx);
+ Reader.ReadUnresolvedSet(F, Data.VisibleConversions, Record, Idx);
+ assert(Data.Definition && "Data.Definition should be already set!");
+ Data.FirstFriend = ReadDeclAs<FriendDecl>(Record, Idx);
+
+ if (Data.IsLambda) {
+ typedef LambdaExpr::Capture Capture;
+ CXXRecordDecl::LambdaDefinitionData &Lambda
+ = static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
+ Lambda.Dependent = Record[Idx++];
+ Lambda.NumCaptures = Record[Idx++];
+ Lambda.NumExplicitCaptures = Record[Idx++];
+ Lambda.ManglingNumber = Record[Idx++];
+ Lambda.ContextDecl = ReadDecl(Record, Idx);
+ Lambda.Captures
+ = (Capture*)Reader.Context.Allocate(sizeof(Capture)*Lambda.NumCaptures);
+ Capture *ToCapture = Lambda.Captures;
+ for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
+ SourceLocation Loc = ReadSourceLocation(Record, Idx);
+ bool IsImplicit = Record[Idx++];
+ LambdaCaptureKind Kind = static_cast<LambdaCaptureKind>(Record[Idx++]);
+ VarDecl *Var = ReadDeclAs<VarDecl>(Record, Idx);
+ SourceLocation EllipsisLoc = ReadSourceLocation(Record, Idx);
+ *ToCapture++ = Capture(Loc, IsImplicit, Kind, Var, EllipsisLoc);
+ }
+ }
+}
+
+void ASTDeclReader::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ VisitRecordDecl(D);
+
+ ASTContext &C = Reader.getContext();
+ if (Record[Idx++]) {
+ // Determine whether this is a lambda closure type, so that we can
+ // allocate the appropriate DefinitionData structure.
+ bool IsLambda = Record[Idx++];
+ if (IsLambda)
+ D->DefinitionData = new (C) CXXRecordDecl::LambdaDefinitionData(D, false);
+ else
+ D->DefinitionData = new (C) struct CXXRecordDecl::DefinitionData(D);
+
+ // Propagate the DefinitionData pointer to the canonical declaration, so
+ // that all other deserialized declarations will see it.
+ // FIXME: Complain if there already is a DefinitionData!
+ D->getCanonicalDecl()->DefinitionData = D->DefinitionData;
+
+ ReadCXXDefinitionData(*D->DefinitionData, Record, Idx);
+
+ // Note that we have deserialized a definition. Any declarations
+ // deserialized before this one will be be given the DefinitionData pointer
+ // at the end.
+ Reader.PendingDefinitions.insert(D);
+ } else {
+ // Propagate DefinitionData pointer from the canonical declaration.
+ D->DefinitionData = D->getCanonicalDecl()->DefinitionData;
+ }
+
+ enum CXXRecKind {
+ CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ };
+ switch ((CXXRecKind)Record[Idx++]) {
+ case CXXRecNotTemplate:
+ break;
+ case CXXRecTemplate:
+ D->TemplateOrInstantiation = ReadDeclAs<ClassTemplateDecl>(Record, Idx);
+ break;
+ case CXXRecMemberSpecialization: {
+ CXXRecordDecl *RD = ReadDeclAs<CXXRecordDecl>(Record, Idx);
+ TemplateSpecializationKind TSK = (TemplateSpecializationKind)Record[Idx++];
+ SourceLocation POI = ReadSourceLocation(Record, Idx);
+ MemberSpecializationInfo *MSI = new (C) MemberSpecializationInfo(RD, TSK);
+ MSI->setPointOfInstantiation(POI);
+ D->TemplateOrInstantiation = MSI;
+ break;
+ }
+ }
+
+ // Load the key function to avoid deserializing every method so we can
+ // compute it.
+ if (D->IsCompleteDefinition) {
+ if (CXXMethodDecl *Key = ReadDeclAs<CXXMethodDecl>(Record, Idx))
+ C.KeyFunctions[D] = Key;
+ }
+}
+
+void ASTDeclReader::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ VisitFunctionDecl(D);
+ unsigned NumOverridenMethods = Record[Idx++];
+ while (NumOverridenMethods--) {
+ // Avoid invariant checking of CXXMethodDecl::addOverriddenMethod,
+ // MD may be initializing.
+ if (CXXMethodDecl *MD = ReadDeclAs<CXXMethodDecl>(Record, Idx))
+ Reader.getContext().addOverriddenMethod(D, MD);
+ }
+}
+
+void ASTDeclReader::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ VisitCXXMethodDecl(D);
+
+ D->IsExplicitSpecified = Record[Idx++];
+ D->ImplicitlyDefined = Record[Idx++];
+ llvm::tie(D->CtorInitializers, D->NumCtorInitializers)
+ = Reader.ReadCXXCtorInitializers(F, Record, Idx);
+}
+
+void ASTDeclReader::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ VisitCXXMethodDecl(D);
+
+ D->ImplicitlyDefined = Record[Idx++];
+ D->OperatorDelete = ReadDeclAs<FunctionDecl>(Record, Idx);
+}
+
+void ASTDeclReader::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ VisitCXXMethodDecl(D);
+ D->IsExplicitSpecified = Record[Idx++];
+}
+
+void ASTDeclReader::VisitImportDecl(ImportDecl *D) {
+ VisitDecl(D);
+ D->ImportedAndComplete.setPointer(readModule(Record, Idx));
+ D->ImportedAndComplete.setInt(Record[Idx++]);
+ SourceLocation *StoredLocs = reinterpret_cast<SourceLocation *>(D + 1);
+ for (unsigned I = 0, N = Record.back(); I != N; ++I)
+ StoredLocs[I] = ReadSourceLocation(Record, Idx);
+ ++Idx;
+}
+
+void ASTDeclReader::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ VisitDecl(D);
+ D->setColonLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTDeclReader::VisitFriendDecl(FriendDecl *D) {
+ VisitDecl(D);
+ if (Record[Idx++])
+ D->Friend = GetTypeSourceInfo(Record, Idx);
+ else
+ D->Friend = ReadDeclAs<NamedDecl>(Record, Idx);
+ D->NextFriend = Record[Idx++];
+ D->UnsupportedFriend = (Record[Idx++] != 0);
+ D->FriendLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTDeclReader::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
+ VisitDecl(D);
+ unsigned NumParams = Record[Idx++];
+ D->NumParams = NumParams;
+ D->Params = new TemplateParameterList*[NumParams];
+ for (unsigned i = 0; i != NumParams; ++i)
+ D->Params[i] = Reader.ReadTemplateParameterList(F, Record, Idx);
+ if (Record[Idx++]) // HasFriendDecl
+ D->Friend = ReadDeclAs<NamedDecl>(Record, Idx);
+ else
+ D->Friend = GetTypeSourceInfo(Record, Idx);
+ D->FriendLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTDeclReader::VisitTemplateDecl(TemplateDecl *D) {
+ VisitNamedDecl(D);
+
+ NamedDecl *TemplatedDecl = ReadDeclAs<NamedDecl>(Record, Idx);
+ TemplateParameterList* TemplateParams
+ = Reader.ReadTemplateParameterList(F, Record, Idx);
+ D->init(TemplatedDecl, TemplateParams);
+}
+
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarable(D);
+
+ // Make sure we've allocated the Common pointer first. We do this before
+ // VisitTemplateDecl so that getCommonPtr() can be used during initialization.
+ RedeclarableTemplateDecl *CanonD = D->getCanonicalDecl();
+ if (!CanonD->Common) {
+ CanonD->Common = CanonD->newCommon(Reader.getContext());
+ Reader.PendingDefinitions.insert(CanonD);
+ }
+ D->Common = CanonD->Common;
+
+ // If this is the first declaration of the template, fill in the information
+ // for the 'common' pointer.
+ if (ThisDeclID == Redecl.getFirstID()) {
+ if (RedeclarableTemplateDecl *RTD
+ = ReadDeclAs<RedeclarableTemplateDecl>(Record, Idx)) {
+ assert(RTD->getKind() == D->getKind() &&
+ "InstantiatedFromMemberTemplate kind mismatch");
+ D->setInstantiatedFromMemberTemplate(RTD);
+ if (Record[Idx++])
+ D->setMemberSpecialization();
+ }
+ }
+
+ VisitTemplateDecl(D);
+ D->IdentifierNamespace = Record[Idx++];
+
+ return Redecl;
+}
+
+void ASTDeclReader::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
+
+ if (ThisDeclID == Redecl.getFirstID()) {
+ // This ClassTemplateDecl owns a CommonPtr; read it to keep track of all of
+ // the specializations.
+ SmallVector<serialization::DeclID, 2> SpecIDs;
+ SpecIDs.push_back(0);
+
+ // Specializations.
+ unsigned Size = Record[Idx++];
+ SpecIDs[0] += Size;
+ for (unsigned I = 0; I != Size; ++I)
+ SpecIDs.push_back(ReadDeclID(Record, Idx));
+
+ // Partial specializations.
+ Size = Record[Idx++];
+ SpecIDs[0] += Size;
+ for (unsigned I = 0; I != Size; ++I)
+ SpecIDs.push_back(ReadDeclID(Record, Idx));
+
+ if (SpecIDs[0]) {
+ typedef serialization::DeclID DeclID;
+
+ ClassTemplateDecl::Common *CommonPtr = D->getCommonPtr();
+ // FIXME: Append specializations!
+ CommonPtr->LazySpecializations
+ = new (Reader.getContext()) DeclID [SpecIDs.size()];
+ memcpy(CommonPtr->LazySpecializations, SpecIDs.data(),
+ SpecIDs.size() * sizeof(DeclID));
+ }
+
+ // InjectedClassNameType is computed.
+ }
+}
+
+void ASTDeclReader::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ VisitCXXRecordDecl(D);
+
+ ASTContext &C = Reader.getContext();
+ if (Decl *InstD = ReadDecl(Record, Idx)) {
+ if (ClassTemplateDecl *CTD = dyn_cast<ClassTemplateDecl>(InstD)) {
+ D->SpecializedTemplate = CTD;
+ } else {
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ TemplateArgumentList *ArgList
+ = TemplateArgumentList::CreateCopy(C, TemplArgs.data(),
+ TemplArgs.size());
+ ClassTemplateSpecializationDecl::SpecializedPartialSpecialization *PS
+ = new (C) ClassTemplateSpecializationDecl::
+ SpecializedPartialSpecialization();
+ PS->PartialSpecialization
+ = cast<ClassTemplatePartialSpecializationDecl>(InstD);
+ PS->TemplateArgs = ArgList;
+ D->SpecializedTemplate = PS;
+ }
+ }
+
+ // Explicit info.
+ if (TypeSourceInfo *TyInfo = GetTypeSourceInfo(Record, Idx)) {
+ ClassTemplateSpecializationDecl::ExplicitSpecializationInfo *ExplicitInfo
+ = new (C) ClassTemplateSpecializationDecl::ExplicitSpecializationInfo;
+ ExplicitInfo->TypeAsWritten = TyInfo;
+ ExplicitInfo->ExternLoc = ReadSourceLocation(Record, Idx);
+ ExplicitInfo->TemplateKeywordLoc = ReadSourceLocation(Record, Idx);
+ D->ExplicitInfo = ExplicitInfo;
+ }
+
+ SmallVector<TemplateArgument, 8> TemplArgs;
+ Reader.ReadTemplateArgumentList(TemplArgs, F, Record, Idx);
+ D->TemplateArgs = TemplateArgumentList::CreateCopy(C, TemplArgs.data(),
+ TemplArgs.size());
+ D->PointOfInstantiation = ReadSourceLocation(Record, Idx);
+ D->SpecializationKind = (TemplateSpecializationKind)Record[Idx++];
+
+ if (D->isCanonicalDecl()) { // It's kept in the folding set.
+ ClassTemplateDecl *CanonPattern = ReadDeclAs<ClassTemplateDecl>(Record,Idx);
+ if (ClassTemplatePartialSpecializationDecl *Partial
+ = dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
+ CanonPattern->getCommonPtr()->PartialSpecializations.InsertNode(Partial);
+ } else {
+ CanonPattern->getCommonPtr()->Specializations.InsertNode(D);
+ }
+ }
+}
+
+void ASTDeclReader::VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D) {
+ VisitClassTemplateSpecializationDecl(D);
+
+ ASTContext &C = Reader.getContext();
+ D->TemplateParams = Reader.ReadTemplateParameterList(F, Record, Idx);
+
+ unsigned NumArgs = Record[Idx++];
+ if (NumArgs) {
+ D->NumArgsAsWritten = NumArgs;
+ D->ArgsAsWritten = new (C) TemplateArgumentLoc[NumArgs];
+ for (unsigned i=0; i != NumArgs; ++i)
+ D->ArgsAsWritten[i] = Reader.ReadTemplateArgumentLoc(F, Record, Idx);
+ }
+
+ D->SequenceNumber = Record[Idx++];
+
+ // These are read/set from/to the first declaration.
+ if (D->getPreviousDecl() == 0) {
+ D->InstantiatedFromMember.setPointer(
+ ReadDeclAs<ClassTemplatePartialSpecializationDecl>(Record, Idx));
+ D->InstantiatedFromMember.setInt(Record[Idx++]);
+ }
+}
+
+void ASTDeclReader::VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D) {
+ VisitDecl(D);
+ D->Specialization = ReadDeclAs<CXXMethodDecl>(Record, Idx);
+}
+
+void ASTDeclReader::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ RedeclarableResult Redecl = VisitRedeclarableTemplateDecl(D);
+
+ if (ThisDeclID == Redecl.getFirstID()) {
+ // This FunctionTemplateDecl owns a CommonPtr; read it.
+
+ // Read the function specialization declarations.
+ // FunctionTemplateDecl's FunctionTemplateSpecializationInfos are filled
+ // when reading the specialized FunctionDecl.
+ unsigned NumSpecs = Record[Idx++];
+ while (NumSpecs--)
+ (void)ReadDecl(Record, Idx);
+ }
+}
+
+void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ VisitTypeDecl(D);
+
+ D->setDeclaredWithTypename(Record[Idx++]);
+
+ bool Inherited = Record[Idx++];
+ TypeSourceInfo *DefArg = GetTypeSourceInfo(Record, Idx);
+ D->setDefaultArgument(DefArg, Inherited);
+}
+
+void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ VisitDeclaratorDecl(D);
+ // TemplateParmPosition.
+ D->setDepth(Record[Idx++]);
+ D->setPosition(Record[Idx++]);
+ if (D->isExpandedParameterPack()) {
+ void **Data = reinterpret_cast<void **>(D + 1);
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ Data[2*I] = Reader.readType(F, Record, Idx).getAsOpaquePtr();
+ Data[2*I + 1] = GetTypeSourceInfo(Record, Idx);
+ }
+ } else {
+ // Rest of NonTypeTemplateParmDecl.
+ D->ParameterPack = Record[Idx++];
+ if (Record[Idx++]) {
+ Expr *DefArg = Reader.ReadExpr(F);
+ bool Inherited = Record[Idx++];
+ D->setDefaultArgument(DefArg, Inherited);
+ }
+ }
+}
+
+void ASTDeclReader::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ VisitTemplateDecl(D);
+ // TemplateParmPosition.
+ D->setDepth(Record[Idx++]);
+ D->setPosition(Record[Idx++]);
+ // Rest of TemplateTemplateParmDecl.
+ TemplateArgumentLoc Arg = Reader.ReadTemplateArgumentLoc(F, Record, Idx);
+ bool IsInherited = Record[Idx++];
+ D->setDefaultArgument(Arg, IsInherited);
+ D->ParameterPack = Record[Idx++];
+}
+
+void ASTDeclReader::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+}
+
+void ASTDeclReader::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ VisitDecl(D);
+ D->AssertExpr = Reader.ReadExpr(F);
+ D->Message = cast<StringLiteral>(Reader.ReadExpr(F));
+ D->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+std::pair<uint64_t, uint64_t>
+ASTDeclReader::VisitDeclContext(DeclContext *DC) {
+ uint64_t LexicalOffset = Record[Idx++];
+ uint64_t VisibleOffset = Record[Idx++];
+ return std::make_pair(LexicalOffset, VisibleOffset);
+}
+
+template <typename T>
+ASTDeclReader::RedeclarableResult
+ASTDeclReader::VisitRedeclarable(Redeclarable<T> *D) {
+ DeclID FirstDeclID = ReadDeclID(Record, Idx);
+
+ // 0 indicates that this declaration was the only declaration of its entity,
+ // and is used for space optimization.
+ if (FirstDeclID == 0)
+ FirstDeclID = ThisDeclID;
+
+ T *FirstDecl = cast_or_null<T>(Reader.GetDecl(FirstDeclID));
+ if (FirstDecl != D) {
+ // We delay loading of the redeclaration chain to avoid deeply nested calls.
+ // We temporarily set the first (canonical) declaration as the previous one
+ // which is the one that matters and mark the real previous DeclID to be
+ // loaded & attached later on.
+ D->RedeclLink = typename Redeclarable<T>::PreviousDeclLink(FirstDecl);
+ }
+
+ // Note that this declaration has been deserialized.
+ Reader.RedeclsDeserialized.insert(static_cast<T *>(D));
+
+ // The result structure takes care to note that we need to load the
+ // other declaration chains for this ID.
+ return RedeclarableResult(Reader, FirstDeclID);
+}
+
+/// \brief Attempts to merge the given declaration (D) with another declaration
+/// of the same entity.
+template<typename T>
+void ASTDeclReader::mergeRedeclarable(Redeclarable<T> *D,
+ RedeclarableResult &Redecl) {
+ // If modules are not available, there is no reason to perform this merge.
+ if (!Reader.getContext().getLangOpts().Modules)
+ return;
+
+ if (FindExistingResult ExistingRes = findExisting(static_cast<T*>(D))) {
+ if (T *Existing = ExistingRes) {
+ T *ExistingCanon = Existing->getCanonicalDecl();
+ T *DCanon = static_cast<T*>(D)->getCanonicalDecl();
+ if (ExistingCanon != DCanon) {
+ // Have our redeclaration link point back at the canonical declaration
+ // of the existing declaration, so that this declaration has the
+ // appropriate canonical declaration.
+ D->RedeclLink
+ = typename Redeclarable<T>::PreviousDeclLink(ExistingCanon);
+
+ // When we merge a namespace, update its pointer to the first namespace.
+ if (NamespaceDecl *Namespace
+ = dyn_cast<NamespaceDecl>(static_cast<T*>(D))) {
+ Namespace->AnonOrFirstNamespaceAndInline.setPointer(
+ static_cast<NamespaceDecl *>(static_cast<void*>(ExistingCanon)));
+ }
+
+ // Don't introduce DCanon into the set of pending declaration chains.
+ Redecl.suppress();
+
+ // Introduce ExistingCanon into the set of pending declaration chains,
+ // if in fact it came from a module file.
+ if (ExistingCanon->isFromASTFile()) {
+ GlobalDeclID ExistingCanonID = ExistingCanon->getGlobalID();
+ assert(ExistingCanonID && "Unrecorded canonical declaration ID?");
+ if (Reader.PendingDeclChainsKnown.insert(ExistingCanonID))
+ Reader.PendingDeclChains.push_back(ExistingCanonID);
+ }
+
+ // If this declaration was the canonical declaration, make a note of
+ // that. We accept the linear algorithm here because the number of
+ // unique canonical declarations of an entity should always be tiny.
+ if (DCanon == static_cast<T*>(D)) {
+ SmallVectorImpl<DeclID> &Merged = Reader.MergedDecls[ExistingCanon];
+ if (std::find(Merged.begin(), Merged.end(), Redecl.getFirstID())
+ == Merged.end())
+ Merged.push_back(Redecl.getFirstID());
+
+ // If ExistingCanon did not come from a module file, introduce the
+ // first declaration that *does* come from a module file to the
+ // set of pending declaration chains, so that we merge this
+ // declaration.
+ if (!ExistingCanon->isFromASTFile() &&
+ Reader.PendingDeclChainsKnown.insert(Redecl.getFirstID()))
+ Reader.PendingDeclChains.push_back(Merged[0]);
+ }
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Attribute Reading
+//===----------------------------------------------------------------------===//
+
+/// \brief Reads attributes from the current stream position.
+void ASTReader::ReadAttributes(ModuleFile &F, AttrVec &Attrs,
+ const RecordData &Record, unsigned &Idx) {
+ for (unsigned i = 0, e = Record[Idx++]; i != e; ++i) {
+ Attr *New = 0;
+ attr::Kind Kind = (attr::Kind)Record[Idx++];
+ SourceRange Range = ReadSourceRange(F, Record, Idx);
+
+#include "clang/Serialization/AttrPCHRead.inc"
+
+ assert(New && "Unable to decode attribute?");
+ Attrs.push_back(New);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ASTReader Implementation
+//===----------------------------------------------------------------------===//
+
+/// \brief Note that we have loaded the declaration with the given
+/// Index.
+///
+/// This routine notes that this declaration has already been loaded,
+/// so that future GetDecl calls will return this declaration rather
+/// than trying to load a new declaration.
+inline void ASTReader::LoadedDecl(unsigned Index, Decl *D) {
+ assert(!DeclsLoaded[Index] && "Decl loaded twice?");
+ DeclsLoaded[Index] = D;
+}
+
+
+/// \brief Determine whether the consumer will be interested in seeing
+/// this declaration (via HandleTopLevelDecl).
+///
+/// This routine should return true for anything that might affect
+/// code generation, e.g., inline function definitions, Objective-C
+/// declarations with metadata, etc.
+static bool isConsumerInterestedIn(Decl *D) {
+ // An ObjCMethodDecl is never considered as "interesting" because its
+ // implementation container always is.
+
+ if (isa<FileScopeAsmDecl>(D) ||
+ isa<ObjCProtocolDecl>(D) ||
+ isa<ObjCImplDecl>(D))
+ return true;
+ if (VarDecl *Var = dyn_cast<VarDecl>(D))
+ return Var->isFileVarDecl() &&
+ Var->isThisDeclarationADefinition() == VarDecl::Definition;
+ if (FunctionDecl *Func = dyn_cast<FunctionDecl>(D))
+ return Func->doesThisDeclarationHaveABody();
+
+ return false;
+}
+
+/// \brief Get the correct cursor and offset for loading a declaration.
+ASTReader::RecordLocation
+ASTReader::DeclCursorForID(DeclID ID, unsigned &RawLocation) {
+ // See if there's an override.
+ DeclReplacementMap::iterator It = ReplacedDecls.find(ID);
+ if (It != ReplacedDecls.end()) {
+ RawLocation = It->second.RawLoc;
+ return RecordLocation(It->second.Mod, It->second.Offset);
+ }
+
+ GlobalDeclMapType::iterator I = GlobalDeclMap.find(ID);
+ assert(I != GlobalDeclMap.end() && "Corrupted global declaration map");
+ ModuleFile *M = I->second;
+ const DeclOffset &
+ DOffs = M->DeclOffsets[ID - M->BaseDeclID - NUM_PREDEF_DECL_IDS];
+ RawLocation = DOffs.Loc;
+ return RecordLocation(M, DOffs.BitOffset);
+}
+
+ASTReader::RecordLocation ASTReader::getLocalBitOffset(uint64_t GlobalOffset) {
+ ContinuousRangeMap<uint64_t, ModuleFile*, 4>::iterator I
+ = GlobalBitOffsetsMap.find(GlobalOffset);
+
+ assert(I != GlobalBitOffsetsMap.end() && "Corrupted global bit offsets map");
+ return RecordLocation(I->second, GlobalOffset - I->second->GlobalBitOffset);
+}
+
+uint64_t ASTReader::getGlobalBitOffset(ModuleFile &M, uint32_t LocalOffset) {
+ return LocalOffset + M.GlobalBitOffset;
+}
+
+/// \brief Determine whether the two declarations refer to the same entity.
+static bool isSameEntity(NamedDecl *X, NamedDecl *Y) {
+ assert(X->getDeclName() == Y->getDeclName() && "Declaration name mismatch!");
+
+ if (X == Y)
+ return true;
+
+ // Must be in the same context.
+ if (!X->getDeclContext()->getRedeclContext()->Equals(
+ Y->getDeclContext()->getRedeclContext()))
+ return false;
+
+ // Two typedefs refer to the same entity if they have the same underlying
+ // type.
+ if (TypedefNameDecl *TypedefX = dyn_cast<TypedefNameDecl>(X))
+ if (TypedefNameDecl *TypedefY = dyn_cast<TypedefNameDecl>(Y))
+ return X->getASTContext().hasSameType(TypedefX->getUnderlyingType(),
+ TypedefY->getUnderlyingType());
+
+ // Must have the same kind.
+ if (X->getKind() != Y->getKind())
+ return false;
+
+ // Objective-C classes and protocols with the same name always match.
+ if (isa<ObjCInterfaceDecl>(X) || isa<ObjCProtocolDecl>(X))
+ return true;
+
+ // Compatible tags match.
+ if (TagDecl *TagX = dyn_cast<TagDecl>(X)) {
+ TagDecl *TagY = cast<TagDecl>(Y);
+ return (TagX->getTagKind() == TagY->getTagKind()) ||
+ ((TagX->getTagKind() == TTK_Struct || TagX->getTagKind() == TTK_Class) &&
+ (TagY->getTagKind() == TTK_Struct || TagY->getTagKind() == TTK_Class));
+ }
+
+ // Functions with the same type and linkage match.
+ // FIXME: This needs to cope with function templates, merging of
+ //prototyped/non-prototyped functions, etc.
+ if (FunctionDecl *FuncX = dyn_cast<FunctionDecl>(X)) {
+ FunctionDecl *FuncY = cast<FunctionDecl>(Y);
+ return (FuncX->getLinkage() == FuncY->getLinkage()) &&
+ FuncX->getASTContext().hasSameType(FuncX->getType(), FuncY->getType());
+ }
+
+ // Variables with the same type and linkage match.
+ if (VarDecl *VarX = dyn_cast<VarDecl>(X)) {
+ VarDecl *VarY = cast<VarDecl>(Y);
+ return (VarX->getLinkage() == VarY->getLinkage()) &&
+ VarX->getASTContext().hasSameType(VarX->getType(), VarY->getType());
+ }
+
+ // Namespaces with the same name and inlinedness match.
+ if (NamespaceDecl *NamespaceX = dyn_cast<NamespaceDecl>(X)) {
+ NamespaceDecl *NamespaceY = cast<NamespaceDecl>(Y);
+ return NamespaceX->isInline() == NamespaceY->isInline();
+ }
+
+ // FIXME: Many other cases to implement.
+ return false;
+}
+
+ASTDeclReader::FindExistingResult::~FindExistingResult() {
+ if (!AddResult || Existing)
+ return;
+
+ DeclContext *DC = New->getDeclContext()->getRedeclContext();
+ if (DC->isTranslationUnit() && Reader.SemaObj) {
+ Reader.SemaObj->IdResolver.tryAddTopLevelDecl(New, New->getDeclName());
+ } else if (DC->isNamespace()) {
+ DC->addDecl(New);
+ }
+}
+
+ASTDeclReader::FindExistingResult ASTDeclReader::findExisting(NamedDecl *D) {
+ DeclarationName Name = D->getDeclName();
+ if (!Name) {
+ // Don't bother trying to find unnamed declarations.
+ FindExistingResult Result(Reader, D, /*Existing=*/0);
+ Result.suppress();
+ return Result;
+ }
+
+ DeclContext *DC = D->getDeclContext()->getRedeclContext();
+ if (!DC->isFileContext())
+ return FindExistingResult(Reader);
+
+ if (DC->isTranslationUnit() && Reader.SemaObj) {
+ IdentifierResolver &IdResolver = Reader.SemaObj->IdResolver;
+ for (IdentifierResolver::iterator I = IdResolver.begin(Name),
+ IEnd = IdResolver.end();
+ I != IEnd; ++I) {
+ if (isSameEntity(*I, D))
+ return FindExistingResult(Reader, D, *I);
+ }
+ }
+
+ if (DC->isNamespace()) {
+ for (DeclContext::lookup_result R = DC->lookup(Name);
+ R.first != R.second; ++R.first) {
+ if (isSameEntity(*R.first, D))
+ return FindExistingResult(Reader, D, *R.first);
+ }
+ }
+
+ return FindExistingResult(Reader, D, /*Existing=*/0);
+}
+
+void ASTDeclReader::attachPreviousDecl(Decl *D, Decl *previous) {
+ assert(D && previous);
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ TD->RedeclLink.setPointer(cast<TagDecl>(previous));
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ FD->RedeclLink.setPointer(cast<FunctionDecl>(previous));
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ VD->RedeclLink.setPointer(cast<VarDecl>(previous));
+ } else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ TD->RedeclLink.setPointer(cast<TypedefNameDecl>(previous));
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ ID->RedeclLink.setPointer(cast<ObjCInterfaceDecl>(previous));
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ PD->RedeclLink.setPointer(cast<ObjCProtocolDecl>(previous));
+ } else if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D)) {
+ ND->RedeclLink.setPointer(cast<NamespaceDecl>(previous));
+ } else {
+ RedeclarableTemplateDecl *TD = cast<RedeclarableTemplateDecl>(D);
+ TD->RedeclLink.setPointer(cast<RedeclarableTemplateDecl>(previous));
+ }
+}
+
+void ASTDeclReader::attachLatestDecl(Decl *D, Decl *Latest) {
+ assert(D && Latest);
+ if (TagDecl *TD = dyn_cast<TagDecl>(D)) {
+ TD->RedeclLink
+ = Redeclarable<TagDecl>::LatestDeclLink(cast<TagDecl>(Latest));
+ } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ FD->RedeclLink
+ = Redeclarable<FunctionDecl>::LatestDeclLink(cast<FunctionDecl>(Latest));
+ } else if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ VD->RedeclLink
+ = Redeclarable<VarDecl>::LatestDeclLink(cast<VarDecl>(Latest));
+ } else if (TypedefNameDecl *TD = dyn_cast<TypedefNameDecl>(D)) {
+ TD->RedeclLink
+ = Redeclarable<TypedefNameDecl>::LatestDeclLink(
+ cast<TypedefNameDecl>(Latest));
+ } else if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(D)) {
+ ID->RedeclLink
+ = Redeclarable<ObjCInterfaceDecl>::LatestDeclLink(
+ cast<ObjCInterfaceDecl>(Latest));
+ } else if (ObjCProtocolDecl *PD = dyn_cast<ObjCProtocolDecl>(D)) {
+ PD->RedeclLink
+ = Redeclarable<ObjCProtocolDecl>::LatestDeclLink(
+ cast<ObjCProtocolDecl>(Latest));
+ } else if (NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D)) {
+ ND->RedeclLink
+ = Redeclarable<NamespaceDecl>::LatestDeclLink(
+ cast<NamespaceDecl>(Latest));
+ } else {
+ RedeclarableTemplateDecl *TD = cast<RedeclarableTemplateDecl>(D);
+ TD->RedeclLink
+ = Redeclarable<RedeclarableTemplateDecl>::LatestDeclLink(
+ cast<RedeclarableTemplateDecl>(Latest));
+ }
+}
+
+ASTReader::MergedDeclsMap::iterator
+ASTReader::combineStoredMergedDecls(Decl *Canon, GlobalDeclID CanonID) {
+ // If we don't have any stored merged declarations, just look in the
+ // merged declarations set.
+ StoredMergedDeclsMap::iterator StoredPos = StoredMergedDecls.find(CanonID);
+ if (StoredPos == StoredMergedDecls.end())
+ return MergedDecls.find(Canon);
+
+ // Append the stored merged declarations to the merged declarations set.
+ MergedDeclsMap::iterator Pos = MergedDecls.find(Canon);
+ if (Pos == MergedDecls.end())
+ Pos = MergedDecls.insert(std::make_pair(Canon,
+ SmallVector<DeclID, 2>())).first;
+ Pos->second.append(StoredPos->second.begin(), StoredPos->second.end());
+ StoredMergedDecls.erase(StoredPos);
+
+ // Sort and uniquify the set of merged declarations.
+ llvm::array_pod_sort(Pos->second.begin(), Pos->second.end());
+ Pos->second.erase(std::unique(Pos->second.begin(), Pos->second.end()),
+ Pos->second.end());
+ return Pos;
+}
+
+void ASTReader::loadAndAttachPreviousDecl(Decl *D, serialization::DeclID ID) {
+ Decl *previous = GetDecl(ID);
+ ASTDeclReader::attachPreviousDecl(D, previous);
+}
+
+/// \brief Read the declaration at the given offset from the AST file.
+Decl *ASTReader::ReadDeclRecord(DeclID ID) {
+ unsigned Index = ID - NUM_PREDEF_DECL_IDS;
+ unsigned RawLocation = 0;
+ RecordLocation Loc = DeclCursorForID(ID, RawLocation);
+ llvm::BitstreamCursor &DeclsCursor = Loc.F->DeclsCursor;
+ // Keep track of where we are in the stream, then jump back there
+ // after reading this declaration.
+ SavedStreamPosition SavedPosition(DeclsCursor);
+
+ ReadingKindTracker ReadingKind(Read_Decl, *this);
+
+ // Note that we are loading a declaration record.
+ Deserializing ADecl(this);
+
+ DeclsCursor.JumpToBit(Loc.Offset);
+ RecordData Record;
+ unsigned Code = DeclsCursor.ReadCode();
+ unsigned Idx = 0;
+ ASTDeclReader Reader(*this, *Loc.F, DeclsCursor, ID, RawLocation, Record,Idx);
+
+ Decl *D = 0;
+ switch ((DeclCode)DeclsCursor.ReadRecord(Code, Record)) {
+ case DECL_CONTEXT_LEXICAL:
+ case DECL_CONTEXT_VISIBLE:
+ llvm_unreachable("Record cannot be de-serialized with ReadDeclRecord");
+ case DECL_TYPEDEF:
+ D = TypedefDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_TYPEALIAS:
+ D = TypeAliasDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_ENUM:
+ D = EnumDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_RECORD:
+ D = RecordDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_ENUM_CONSTANT:
+ D = EnumConstantDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FUNCTION:
+ D = FunctionDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_LINKAGE_SPEC:
+ D = LinkageSpecDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_LABEL:
+ D = LabelDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_NAMESPACE:
+ D = NamespaceDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_NAMESPACE_ALIAS:
+ D = NamespaceAliasDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_USING:
+ D = UsingDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_USING_SHADOW:
+ D = UsingShadowDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_USING_DIRECTIVE:
+ D = UsingDirectiveDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_UNRESOLVED_USING_VALUE:
+ D = UnresolvedUsingValueDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_UNRESOLVED_USING_TYPENAME:
+ D = UnresolvedUsingTypenameDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_RECORD:
+ D = CXXRecordDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_METHOD:
+ D = CXXMethodDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_CONSTRUCTOR:
+ D = CXXConstructorDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_DESTRUCTOR:
+ D = CXXDestructorDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_CONVERSION:
+ D = CXXConversionDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_ACCESS_SPEC:
+ D = AccessSpecDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FRIEND:
+ D = FriendDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FRIEND_TEMPLATE:
+ D = FriendTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CLASS_TEMPLATE:
+ D = ClassTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CLASS_TEMPLATE_SPECIALIZATION:
+ D = ClassTemplateSpecializationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION:
+ D = ClassTemplatePartialSpecializationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION:
+ D = ClassScopeFunctionSpecializationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FUNCTION_TEMPLATE:
+ D = FunctionTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_TEMPLATE_TYPE_PARM:
+ D = TemplateTypeParmDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_NON_TYPE_TEMPLATE_PARM:
+ D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK:
+ D = NonTypeTemplateParmDecl::CreateDeserialized(Context, ID, Record[Idx++]);
+ break;
+ case DECL_TEMPLATE_TEMPLATE_PARM:
+ D = TemplateTemplateParmDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_TYPE_ALIAS_TEMPLATE:
+ D = TypeAliasTemplateDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_STATIC_ASSERT:
+ D = StaticAssertDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_METHOD:
+ D = ObjCMethodDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_INTERFACE:
+ D = ObjCInterfaceDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_IVAR:
+ D = ObjCIvarDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_PROTOCOL:
+ D = ObjCProtocolDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_AT_DEFS_FIELD:
+ D = ObjCAtDefsFieldDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_CATEGORY:
+ D = ObjCCategoryDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_CATEGORY_IMPL:
+ D = ObjCCategoryImplDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_IMPLEMENTATION:
+ D = ObjCImplementationDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_COMPATIBLE_ALIAS:
+ D = ObjCCompatibleAliasDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_PROPERTY:
+ D = ObjCPropertyDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_OBJC_PROPERTY_IMPL:
+ D = ObjCPropertyImplDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FIELD:
+ D = FieldDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_INDIRECTFIELD:
+ D = IndirectFieldDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_VAR:
+ D = VarDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_IMPLICIT_PARAM:
+ D = ImplicitParamDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_PARM_VAR:
+ D = ParmVarDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_FILE_SCOPE_ASM:
+ D = FileScopeAsmDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_BLOCK:
+ D = BlockDecl::CreateDeserialized(Context, ID);
+ break;
+ case DECL_CXX_BASE_SPECIFIERS:
+ Error("attempt to read a C++ base-specifier record as a declaration");
+ return 0;
+ case DECL_IMPORT:
+ // Note: last entry of the ImportDecl record is the number of stored source
+ // locations.
+ D = ImportDecl::CreateDeserialized(Context, ID, Record.back());
+ break;
+ }
+
+ assert(D && "Unknown declaration reading AST file");
+ LoadedDecl(Index, D);
+ // Set the DeclContext before doing any deserialization, to make sure internal
+ // calls to Decl::getASTContext() by Decl's methods will find the
+ // TranslationUnitDecl without crashing.
+ D->setDeclContext(Context.getTranslationUnitDecl());
+ Reader.Visit(D);
+
+ // If this declaration is also a declaration context, get the
+ // offsets for its tables of lexical and visible declarations.
+ if (DeclContext *DC = dyn_cast<DeclContext>(D)) {
+ std::pair<uint64_t, uint64_t> Offsets = Reader.VisitDeclContext(DC);
+ if (Offsets.first || Offsets.second) {
+ if (Offsets.first != 0)
+ DC->setHasExternalLexicalStorage(true);
+ if (Offsets.second != 0)
+ DC->setHasExternalVisibleStorage(true);
+ if (ReadDeclContextStorage(*Loc.F, DeclsCursor, Offsets,
+ Loc.F->DeclContextInfos[DC]))
+ return 0;
+ }
+
+ // Now add the pending visible updates for this decl context, if it has any.
+ DeclContextVisibleUpdatesPending::iterator I =
+ PendingVisibleUpdates.find(ID);
+ if (I != PendingVisibleUpdates.end()) {
+ // There are updates. This means the context has external visible
+ // storage, even if the original stored version didn't.
+ DC->setHasExternalVisibleStorage(true);
+ DeclContextVisibleUpdates &U = I->second;
+ for (DeclContextVisibleUpdates::iterator UI = U.begin(), UE = U.end();
+ UI != UE; ++UI) {
+ UI->second->DeclContextInfos[DC].NameLookupTableData = UI->first;
+ }
+ PendingVisibleUpdates.erase(I);
+ }
+ }
+ assert(Idx == Record.size());
+
+ // Load any relevant update records.
+ loadDeclUpdateRecords(ID, D);
+
+ // Load the categories after recursive loading is finished.
+ if (ObjCInterfaceDecl *Class = dyn_cast<ObjCInterfaceDecl>(D))
+ if (Class->isThisDeclarationADefinition())
+ loadObjCCategories(ID, Class);
+
+ // If we have deserialized a declaration that has a definition the
+ // AST consumer might need to know about, queue it.
+ // We don't pass it to the consumer immediately because we may be in recursive
+ // loading, and some declarations may still be initializing.
+ if (isConsumerInterestedIn(D))
+ InterestingDecls.push_back(D);
+
+ return D;
+}
+
+void ASTReader::loadDeclUpdateRecords(serialization::DeclID ID, Decl *D) {
+ // The declaration may have been modified by files later in the chain.
+ // If this is the case, read the record containing the updates from each file
+ // and pass it to ASTDeclReader to make the modifications.
+ DeclUpdateOffsetsMap::iterator UpdI = DeclUpdateOffsets.find(ID);
+ if (UpdI != DeclUpdateOffsets.end()) {
+ FileOffsetsTy &UpdateOffsets = UpdI->second;
+ for (FileOffsetsTy::iterator
+ I = UpdateOffsets.begin(), E = UpdateOffsets.end(); I != E; ++I) {
+ ModuleFile *F = I->first;
+ uint64_t Offset = I->second;
+ llvm::BitstreamCursor &Cursor = F->DeclsCursor;
+ SavedStreamPosition SavedPosition(Cursor);
+ Cursor.JumpToBit(Offset);
+ RecordData Record;
+ unsigned Code = Cursor.ReadCode();
+ unsigned RecCode = Cursor.ReadRecord(Code, Record);
+ (void)RecCode;
+ assert(RecCode == DECL_UPDATES && "Expected DECL_UPDATES record!");
+
+ unsigned Idx = 0;
+ ASTDeclReader Reader(*this, *F, Cursor, ID, 0, Record, Idx);
+ Reader.UpdateDecl(D, *F, Record);
+ }
+ }
+}
+
+namespace {
+ struct CompareLocalRedeclarationsInfoToID {
+ bool operator()(const LocalRedeclarationsInfo &X, DeclID Y) {
+ return X.FirstID < Y;
+ }
+
+ bool operator()(DeclID X, const LocalRedeclarationsInfo &Y) {
+ return X < Y.FirstID;
+ }
+
+ bool operator()(const LocalRedeclarationsInfo &X,
+ const LocalRedeclarationsInfo &Y) {
+ return X.FirstID < Y.FirstID;
+ }
+ bool operator()(DeclID X, DeclID Y) {
+ return X < Y;
+ }
+ };
+
+ /// \brief Module visitor class that finds all of the redeclarations of a
+ ///
+ class RedeclChainVisitor {
+ ASTReader &Reader;
+ SmallVectorImpl<DeclID> &SearchDecls;
+ llvm::SmallPtrSet<Decl *, 16> &Deserialized;
+ GlobalDeclID CanonID;
+ llvm::SmallVector<Decl *, 4> Chain;
+
+ public:
+ RedeclChainVisitor(ASTReader &Reader, SmallVectorImpl<DeclID> &SearchDecls,
+ llvm::SmallPtrSet<Decl *, 16> &Deserialized,
+ GlobalDeclID CanonID)
+ : Reader(Reader), SearchDecls(SearchDecls), Deserialized(Deserialized),
+ CanonID(CanonID) {
+ for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I)
+ addToChain(Reader.GetDecl(SearchDecls[I]));
+ }
+
+ static bool visit(ModuleFile &M, bool Preorder, void *UserData) {
+ if (Preorder)
+ return false;
+
+ return static_cast<RedeclChainVisitor *>(UserData)->visit(M);
+ }
+
+ void addToChain(Decl *D) {
+ if (!D)
+ return;
+
+ if (Deserialized.count(D)) {
+ Deserialized.erase(D);
+ Chain.push_back(D);
+ }
+ }
+
+ void searchForID(ModuleFile &M, GlobalDeclID GlobalID) {
+ // Map global ID of the first declaration down to the local ID
+ // used in this module file.
+ DeclID ID = Reader.mapGlobalIDToModuleFileGlobalID(M, GlobalID);
+ if (!ID)
+ return;
+
+ // Perform a binary search to find the local redeclarations for this
+ // declaration (if any).
+ const LocalRedeclarationsInfo *Result
+ = std::lower_bound(M.RedeclarationsMap,
+ M.RedeclarationsMap + M.LocalNumRedeclarationsInMap,
+ ID, CompareLocalRedeclarationsInfoToID());
+ if (Result == M.RedeclarationsMap + M.LocalNumRedeclarationsInMap ||
+ Result->FirstID != ID) {
+ // If we have a previously-canonical singleton declaration that was
+ // merged into another redeclaration chain, create a trivial chain
+ // for this single declaration so that it will get wired into the
+ // complete redeclaration chain.
+ if (GlobalID != CanonID &&
+ GlobalID - NUM_PREDEF_DECL_IDS >= M.BaseDeclID &&
+ GlobalID - NUM_PREDEF_DECL_IDS < M.BaseDeclID + M.LocalNumDecls) {
+ addToChain(Reader.GetDecl(GlobalID));
+ }
+
+ return;
+ }
+
+ // Dig out all of the redeclarations.
+ unsigned Offset = Result->Offset;
+ unsigned N = M.RedeclarationChains[Offset];
+ M.RedeclarationChains[Offset++] = 0; // Don't try to deserialize again
+ for (unsigned I = 0; I != N; ++I)
+ addToChain(Reader.GetLocalDecl(M, M.RedeclarationChains[Offset++]));
+ }
+
+ bool visit(ModuleFile &M) {
+ // Visit each of the declarations.
+ for (unsigned I = 0, N = SearchDecls.size(); I != N; ++I)
+ searchForID(M, SearchDecls[I]);
+ return false;
+ }
+
+ ArrayRef<Decl *> getChain() const {
+ return Chain;
+ }
+ };
+}
+
+void ASTReader::loadPendingDeclChain(serialization::GlobalDeclID ID) {
+ Decl *D = GetDecl(ID);
+ Decl *CanonDecl = D->getCanonicalDecl();
+
+ // Determine the set of declaration IDs we'll be searching for.
+ llvm::SmallVector<DeclID, 1> SearchDecls;
+ GlobalDeclID CanonID = 0;
+ if (D == CanonDecl) {
+ SearchDecls.push_back(ID); // Always first.
+ CanonID = ID;
+ }
+ MergedDeclsMap::iterator MergedPos = combineStoredMergedDecls(CanonDecl, ID);
+ if (MergedPos != MergedDecls.end())
+ SearchDecls.append(MergedPos->second.begin(), MergedPos->second.end());
+
+ // Build up the list of redeclarations.
+ RedeclChainVisitor Visitor(*this, SearchDecls, RedeclsDeserialized, CanonID);
+ ModuleMgr.visitDepthFirst(&RedeclChainVisitor::visit, &Visitor);
+
+ // Retrieve the chains.
+ ArrayRef<Decl *> Chain = Visitor.getChain();
+ if (Chain.empty())
+ return;
+
+ // Hook up the chains.
+ Decl *MostRecent = CanonDecl->getMostRecentDecl();
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (Chain[I] == CanonDecl)
+ continue;
+
+ ASTDeclReader::attachPreviousDecl(Chain[I], MostRecent);
+ MostRecent = Chain[I];
+ }
+
+ ASTDeclReader::attachLatestDecl(CanonDecl, MostRecent);
+}
+
+namespace {
+ struct CompareObjCCategoriesInfo {
+ bool operator()(const ObjCCategoriesInfo &X, DeclID Y) {
+ return X.DefinitionID < Y;
+ }
+
+ bool operator()(DeclID X, const ObjCCategoriesInfo &Y) {
+ return X < Y.DefinitionID;
+ }
+
+ bool operator()(const ObjCCategoriesInfo &X,
+ const ObjCCategoriesInfo &Y) {
+ return X.DefinitionID < Y.DefinitionID;
+ }
+ bool operator()(DeclID X, DeclID Y) {
+ return X < Y;
+ }
+ };
+
+ /// \brief Given an ObjC interface, goes through the modules and links to the
+ /// interface all the categories for it.
+ class ObjCCategoriesVisitor {
+ ASTReader &Reader;
+ serialization::GlobalDeclID InterfaceID;
+ ObjCInterfaceDecl *Interface;
+ llvm::SmallPtrSet<ObjCCategoryDecl *, 16> &Deserialized;
+ unsigned PreviousGeneration;
+ ObjCCategoryDecl *Tail;
+ llvm::DenseMap<DeclarationName, ObjCCategoryDecl *> NameCategoryMap;
+
+ void add(ObjCCategoryDecl *Cat) {
+ // Only process each category once.
+ if (!Deserialized.count(Cat))
+ return;
+ Deserialized.erase(Cat);
+
+ // Check for duplicate categories.
+ if (Cat->getDeclName()) {
+ ObjCCategoryDecl *&Existing = NameCategoryMap[Cat->getDeclName()];
+ if (Existing &&
+ Reader.getOwningModuleFile(Existing)
+ != Reader.getOwningModuleFile(Cat)) {
+ // FIXME: We should not warn for duplicates in diamond:
+ //
+ // MT //
+ // / \ //
+ // ML MR //
+ // \ / //
+ // MB //
+ //
+ // If there are duplicates in ML/MR, there will be warning when
+ // creating MB *and* when importing MB. We should not warn when
+ // importing.
+ Reader.Diag(Cat->getLocation(), diag::warn_dup_category_def)
+ << Interface->getDeclName() << Cat->getDeclName();
+ Reader.Diag(Existing->getLocation(), diag::note_previous_definition);
+ } else if (!Existing) {
+ // Record this category.
+ Existing = Cat;
+ }
+ }
+
+ // Add this category to the end of the chain.
+ if (Tail)
+ ASTDeclReader::setNextObjCCategory(Tail, Cat);
+ else
+ Interface->setCategoryList(Cat);
+ Tail = Cat;
+ }
+
+ public:
+ ObjCCategoriesVisitor(ASTReader &Reader,
+ serialization::GlobalDeclID InterfaceID,
+ ObjCInterfaceDecl *Interface,
+ llvm::SmallPtrSet<ObjCCategoryDecl *, 16> &Deserialized,
+ unsigned PreviousGeneration)
+ : Reader(Reader), InterfaceID(InterfaceID), Interface(Interface),
+ Deserialized(Deserialized), PreviousGeneration(PreviousGeneration),
+ Tail(0)
+ {
+ // Populate the name -> category map with the set of known categories.
+ for (ObjCCategoryDecl *Cat = Interface->getCategoryList(); Cat;
+ Cat = Cat->getNextClassCategory()) {
+ if (Cat->getDeclName())
+ NameCategoryMap[Cat->getDeclName()] = Cat;
+
+ // Keep track of the tail of the category list.
+ Tail = Cat;
+ }
+ }
+
+ static bool visit(ModuleFile &M, void *UserData) {
+ return static_cast<ObjCCategoriesVisitor *>(UserData)->visit(M);
+ }
+
+ bool visit(ModuleFile &M) {
+ // If we've loaded all of the category information we care about from
+ // this module file, we're done.
+ if (M.Generation <= PreviousGeneration)
+ return true;
+
+ // Map global ID of the definition down to the local ID used in this
+ // module file. If there is no such mapping, we'll find nothing here
+ // (or in any module it imports).
+ DeclID LocalID = Reader.mapGlobalIDToModuleFileGlobalID(M, InterfaceID);
+ if (!LocalID)
+ return true;
+
+ // Perform a binary search to find the local redeclarations for this
+ // declaration (if any).
+ const ObjCCategoriesInfo *Result
+ = std::lower_bound(M.ObjCCategoriesMap,
+ M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap,
+ LocalID, CompareObjCCategoriesInfo());
+ if (Result == M.ObjCCategoriesMap + M.LocalNumObjCCategoriesInMap ||
+ Result->DefinitionID != LocalID) {
+ // We didn't find anything. If the class definition is in this module
+ // file, then the module files it depends on cannot have any categories,
+ // so suppress further lookup.
+ return Reader.isDeclIDFromModule(InterfaceID, M);
+ }
+
+ // We found something. Dig out all of the categories.
+ unsigned Offset = Result->Offset;
+ unsigned N = M.ObjCCategories[Offset];
+ M.ObjCCategories[Offset++] = 0; // Don't try to deserialize again
+ for (unsigned I = 0; I != N; ++I)
+ add(cast_or_null<ObjCCategoryDecl>(
+ Reader.GetLocalDecl(M, M.ObjCCategories[Offset++])));
+ return true;
+ }
+ };
+}
+
+void ASTReader::loadObjCCategories(serialization::GlobalDeclID ID,
+ ObjCInterfaceDecl *D,
+ unsigned PreviousGeneration) {
+ ObjCCategoriesVisitor Visitor(*this, ID, D, CategoriesDeserialized,
+ PreviousGeneration);
+ ModuleMgr.visit(ObjCCategoriesVisitor::visit, &Visitor);
+}
+
+void ASTDeclReader::UpdateDecl(Decl *D, ModuleFile &ModuleFile,
+ const RecordData &Record) {
+ unsigned Idx = 0;
+ while (Idx < Record.size()) {
+ switch ((DeclUpdateKind)Record[Idx++]) {
+ case UPD_CXX_ADDED_IMPLICIT_MEMBER:
+ cast<CXXRecordDecl>(D)->addedMember(Reader.ReadDecl(ModuleFile, Record, Idx));
+ break;
+
+ case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
+ // It will be added to the template's specializations set when loaded.
+ (void)Reader.ReadDecl(ModuleFile, Record, Idx);
+ break;
+
+ case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE: {
+ NamespaceDecl *Anon
+ = Reader.ReadDeclAs<NamespaceDecl>(ModuleFile, Record, Idx);
+
+ // Each module has its own anonymous namespace, which is disjoint from
+ // any other module's anonymous namespaces, so don't attach the anonymous
+ // namespace at all.
+ if (ModuleFile.Kind != MK_Module) {
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(D))
+ TU->setAnonymousNamespace(Anon);
+ else
+ cast<NamespaceDecl>(D)->setAnonymousNamespace(Anon);
+ }
+ break;
+ }
+
+ case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
+ cast<VarDecl>(D)->getMemberSpecializationInfo()->setPointOfInstantiation(
+ Reader.ReadSourceLocation(ModuleFile, Record, Idx));
+ break;
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h
new file mode 100644
index 0000000..3a1dfcf
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderInternals.h
@@ -0,0 +1,248 @@
+//===--- ASTReaderInternals.h - AST Reader Internals ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides internal definitions used in the AST reader.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LLVM_CLANG_SERIALIZATION_ASTREADER_INTERNALS_H
+#define LLVM_CLANG_SERIALIZATION_ASTREADER_INTERNALS_H
+
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/AST/DeclarationName.h"
+#include "llvm/Support/Endian.h"
+#include <utility>
+#include <sys/stat.h>
+
+namespace clang {
+
+class ASTReader;
+class HeaderSearch;
+struct HeaderFileInfo;
+
+namespace serialization {
+
+class ModuleFile;
+
+namespace reader {
+
+/// \brief Class that performs name lookup into a DeclContext stored
+/// in an AST file.
+class ASTDeclContextNameLookupTrait {
+ ASTReader &Reader;
+ ModuleFile &F;
+
+public:
+ /// \brief Pair of begin/end iterators for DeclIDs.
+ ///
+ /// Note that these declaration IDs are local to the module that contains this
+ /// particular lookup t
+ typedef llvm::support::ulittle32_t LE32DeclID;
+ typedef std::pair<LE32DeclID *, LE32DeclID *> data_type;
+
+ /// \brief Special internal key for declaration names.
+ /// The hash table creates keys for comparison; we do not create
+ /// a DeclarationName for the internal key to avoid deserializing types.
+ struct DeclNameKey {
+ DeclarationName::NameKind Kind;
+ uint64_t Data;
+ DeclNameKey() : Kind((DeclarationName::NameKind)0), Data(0) { }
+ };
+
+ typedef DeclarationName external_key_type;
+ typedef DeclNameKey internal_key_type;
+
+ explicit ASTDeclContextNameLookupTrait(ASTReader &Reader,
+ ModuleFile &F)
+ : Reader(Reader), F(F) { }
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return a.Kind == b.Kind && a.Data == b.Data;
+ }
+
+ unsigned ComputeHash(const DeclNameKey &Key) const;
+ internal_key_type GetInternalKey(const external_key_type& Name) const;
+ external_key_type GetExternalKey(const internal_key_type& Key) const;
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d);
+
+ internal_key_type ReadKey(const unsigned char* d, unsigned);
+
+ data_type ReadData(internal_key_type, const unsigned char* d,
+ unsigned DataLen);
+};
+
+/// \brief The on-disk hash table used for the DeclContext's Name lookup table.
+typedef OnDiskChainedHashTable<ASTDeclContextNameLookupTrait>
+ ASTDeclContextNameLookupTable;
+
+/// \brief Class that performs lookup for an identifier stored in an AST file.
+class ASTIdentifierLookupTrait {
+ ASTReader &Reader;
+ ModuleFile &F;
+
+ // If we know the IdentifierInfo in advance, it is here and we will
+ // not build a new one. Used when deserializing information about an
+ // identifier that was constructed before the AST file was read.
+ IdentifierInfo *KnownII;
+
+public:
+ typedef IdentifierInfo * data_type;
+
+ typedef const std::pair<const char*, unsigned> external_key_type;
+
+ typedef external_key_type internal_key_type;
+
+ ASTIdentifierLookupTrait(ASTReader &Reader, ModuleFile &F,
+ IdentifierInfo *II = 0)
+ : Reader(Reader), F(F), KnownII(II) { }
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return (a.second == b.second) ? memcmp(a.first, b.first, a.second) == 0
+ : false;
+ }
+
+ static unsigned ComputeHash(const internal_key_type& a);
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ // This hopefully will just get inlined and removed by the optimizer.
+ static const external_key_type&
+ GetExternalKey(const internal_key_type& x) { return x; }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d);
+
+ static std::pair<const char*, unsigned>
+ ReadKey(const unsigned char* d, unsigned n);
+
+ IdentifierInfo *ReadData(const internal_key_type& k,
+ const unsigned char* d,
+ unsigned DataLen);
+
+ ASTReader &getReader() const { return Reader; }
+
+};
+
+/// \brief The on-disk hash table used to contain information about
+/// all of the identifiers in the program.
+typedef OnDiskChainedHashTable<ASTIdentifierLookupTrait>
+ ASTIdentifierLookupTable;
+
+/// \brief Class that performs lookup for a selector's entries in the global
+/// method pool stored in an AST file.
+class ASTSelectorLookupTrait {
+ ASTReader &Reader;
+ ModuleFile &F;
+
+public:
+ struct data_type {
+ SelectorID ID;
+ llvm::SmallVector<ObjCMethodDecl *, 2> Instance;
+ llvm::SmallVector<ObjCMethodDecl *, 2> Factory;
+ };
+
+ typedef Selector external_key_type;
+ typedef external_key_type internal_key_type;
+
+ ASTSelectorLookupTrait(ASTReader &Reader, ModuleFile &F)
+ : Reader(Reader), F(F) { }
+
+ static bool EqualKey(const internal_key_type& a,
+ const internal_key_type& b) {
+ return a == b;
+ }
+
+ static unsigned ComputeHash(Selector Sel);
+
+ static const internal_key_type&
+ GetInternalKey(const external_key_type& x) { return x; }
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d);
+
+ internal_key_type ReadKey(const unsigned char* d, unsigned);
+ data_type ReadData(Selector, const unsigned char* d, unsigned DataLen);
+};
+
+/// \brief The on-disk hash table used for the global method pool.
+typedef OnDiskChainedHashTable<ASTSelectorLookupTrait>
+ ASTSelectorLookupTable;
+
+/// \brief Trait class used to search the on-disk hash table containing all of
+/// the header search information.
+///
+/// The on-disk hash table contains a mapping from each header path to
+/// information about that header (how many times it has been included, its
+/// controlling macro, etc.). Note that we actually hash based on the
+/// filename, and support "deep" comparisons of file names based on current
+/// inode numbers, so that the search can cope with non-normalized path names
+/// and symlinks.
+class HeaderFileInfoTrait {
+ ASTReader &Reader;
+ ModuleFile &M;
+ HeaderSearch *HS;
+ const char *FrameworkStrings;
+ const char *SearchPath;
+ struct stat SearchPathStatBuf;
+ llvm::Optional<int> SearchPathStatResult;
+
+ int StatSimpleCache(const char *Path, struct stat *StatBuf) {
+ if (Path == SearchPath) {
+ if (!SearchPathStatResult)
+ SearchPathStatResult = stat(Path, &SearchPathStatBuf);
+
+ *StatBuf = SearchPathStatBuf;
+ return *SearchPathStatResult;
+ }
+
+ return stat(Path, StatBuf);
+ }
+
+public:
+ typedef const char *external_key_type;
+ typedef const char *internal_key_type;
+
+ typedef HeaderFileInfo data_type;
+
+ HeaderFileInfoTrait(ASTReader &Reader, ModuleFile &M, HeaderSearch *HS,
+ const char *FrameworkStrings,
+ const char *SearchPath = 0)
+ : Reader(Reader), M(M), HS(HS), FrameworkStrings(FrameworkStrings),
+ SearchPath(SearchPath) { }
+
+ static unsigned ComputeHash(const char *path);
+ static internal_key_type GetInternalKey(const char *path);
+ bool EqualKey(internal_key_type a, internal_key_type b);
+
+ static std::pair<unsigned, unsigned>
+ ReadKeyDataLength(const unsigned char*& d);
+
+ static internal_key_type ReadKey(const unsigned char *d, unsigned) {
+ return (const char *)d;
+ }
+
+ data_type ReadData(const internal_key_type, const unsigned char *d,
+ unsigned DataLen);
+};
+
+/// \brief The on-disk hash table used for known header files.
+typedef OnDiskChainedHashTable<HeaderFileInfoTrait>
+ HeaderFileInfoLookupTable;
+
+} // end namespace clang::serialization::reader
+} // end namespace clang::serialization
+} // end namespace clang
+
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
new file mode 100644
index 0000000..2eeb090
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -0,0 +1,2214 @@
+//===--- ASTReaderStmt.cpp - Stmt/Expr Deserialization ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Statement/expression deserialization. This implements the
+// ASTReader::ReadStmt method.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTReader.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+using namespace clang::serialization;
+
+namespace clang {
+
+ class ASTStmtReader : public StmtVisitor<ASTStmtReader> {
+ typedef ASTReader::RecordData RecordData;
+
+ ASTReader &Reader;
+ ModuleFile &F;
+ llvm::BitstreamCursor &DeclsCursor;
+ const ASTReader::RecordData &Record;
+ unsigned &Idx;
+
+ SourceLocation ReadSourceLocation(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceLocation(F, R, I);
+ }
+
+ SourceRange ReadSourceRange(const RecordData &R, unsigned &I) {
+ return Reader.ReadSourceRange(F, R, I);
+ }
+
+ TypeSourceInfo *GetTypeSourceInfo(const RecordData &R, unsigned &I) {
+ return Reader.GetTypeSourceInfo(F, R, I);
+ }
+
+ serialization::DeclID ReadDeclID(const RecordData &R, unsigned &I) {
+ return Reader.ReadDeclID(F, R, I);
+ }
+
+ Decl *ReadDecl(const RecordData &R, unsigned &I) {
+ return Reader.ReadDecl(F, R, I);
+ }
+
+ template<typename T>
+ T *ReadDeclAs(const RecordData &R, unsigned &I) {
+ return Reader.ReadDeclAs<T>(F, R, I);
+ }
+
+ void ReadDeclarationNameLoc(DeclarationNameLoc &DNLoc, DeclarationName Name,
+ const ASTReader::RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameLoc(F, DNLoc, Name, R, I);
+ }
+
+ void ReadDeclarationNameInfo(DeclarationNameInfo &NameInfo,
+ const ASTReader::RecordData &R, unsigned &I) {
+ Reader.ReadDeclarationNameInfo(F, NameInfo, R, I);
+ }
+
+ public:
+ ASTStmtReader(ASTReader &Reader, ModuleFile &F,
+ llvm::BitstreamCursor &Cursor,
+ const ASTReader::RecordData &Record, unsigned &Idx)
+ : Reader(Reader), F(F), DeclsCursor(Cursor), Record(Record), Idx(Idx) { }
+
+ /// \brief The number of record fields required for the Stmt class
+ /// itself.
+ static const unsigned NumStmtFields = 0;
+
+ /// \brief The number of record fields required for the Expr class
+ /// itself.
+ static const unsigned NumExprFields = NumStmtFields + 7;
+
+ /// \brief Read and initialize a ExplicitTemplateArgumentList structure.
+ void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
+ unsigned NumTemplateArgs);
+ /// \brief Read and initialize a ExplicitTemplateArgumentList structure.
+ void ReadExplicitTemplateArgumentList(ASTTemplateArgumentListInfo &ArgList,
+ unsigned NumTemplateArgs);
+
+ void VisitStmt(Stmt *S);
+#define STMT(Type, Base) \
+ void Visit##Type(Type *);
+#include "clang/AST/StmtNodes.inc"
+ };
+}
+
+void ASTStmtReader::
+ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
+ unsigned NumTemplateArgs) {
+ SourceLocation TemplateKWLoc = ReadSourceLocation(Record, Idx);
+ TemplateArgumentListInfo ArgInfo;
+ ArgInfo.setLAngleLoc(ReadSourceLocation(Record, Idx));
+ ArgInfo.setRAngleLoc(ReadSourceLocation(Record, Idx));
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ ArgInfo.addArgument(
+ Reader.ReadTemplateArgumentLoc(F, Record, Idx));
+ Args.initializeFrom(TemplateKWLoc, ArgInfo);
+}
+
+void ASTStmtReader::VisitStmt(Stmt *S) {
+ assert(Idx == NumStmtFields && "Incorrect statement field count");
+}
+
+void ASTStmtReader::VisitNullStmt(NullStmt *S) {
+ VisitStmt(S);
+ S->setSemiLoc(ReadSourceLocation(Record, Idx));
+ S->HasLeadingEmptyMacro = Record[Idx++];
+}
+
+void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
+ VisitStmt(S);
+ SmallVector<Stmt *, 16> Stmts;
+ unsigned NumStmts = Record[Idx++];
+ while (NumStmts--)
+ Stmts.push_back(Reader.ReadSubStmt());
+ S->setStmts(Reader.getContext(), Stmts.data(), Stmts.size());
+ S->setLBracLoc(ReadSourceLocation(Record, Idx));
+ S->setRBracLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitSwitchCase(SwitchCase *S) {
+ VisitStmt(S);
+ Reader.RecordSwitchCaseID(S, Record[Idx++]);
+}
+
+void ASTStmtReader::VisitCaseStmt(CaseStmt *S) {
+ VisitSwitchCase(S);
+ S->setLHS(Reader.ReadSubExpr());
+ S->setRHS(Reader.ReadSubExpr());
+ S->setSubStmt(Reader.ReadSubStmt());
+ S->setCaseLoc(ReadSourceLocation(Record, Idx));
+ S->setEllipsisLoc(ReadSourceLocation(Record, Idx));
+ S->setColonLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitDefaultStmt(DefaultStmt *S) {
+ VisitSwitchCase(S);
+ S->setSubStmt(Reader.ReadSubStmt());
+ S->setDefaultLoc(ReadSourceLocation(Record, Idx));
+ S->setColonLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitLabelStmt(LabelStmt *S) {
+ VisitStmt(S);
+ LabelDecl *LD = ReadDeclAs<LabelDecl>(Record, Idx);
+ LD->setStmt(S);
+ S->setDecl(LD);
+ S->setSubStmt(Reader.ReadSubStmt());
+ S->setIdentLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitIfStmt(IfStmt *S) {
+ VisitStmt(S);
+ S->setConditionVariable(Reader.getContext(),
+ ReadDeclAs<VarDecl>(Record, Idx));
+ S->setCond(Reader.ReadSubExpr());
+ S->setThen(Reader.ReadSubStmt());
+ S->setElse(Reader.ReadSubStmt());
+ S->setIfLoc(ReadSourceLocation(Record, Idx));
+ S->setElseLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitSwitchStmt(SwitchStmt *S) {
+ VisitStmt(S);
+ S->setConditionVariable(Reader.getContext(),
+ ReadDeclAs<VarDecl>(Record, Idx));
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setSwitchLoc(ReadSourceLocation(Record, Idx));
+ if (Record[Idx++])
+ S->setAllEnumCasesCovered();
+
+ SwitchCase *PrevSC = 0;
+ for (unsigned N = Record.size(); Idx != N; ++Idx) {
+ SwitchCase *SC = Reader.getSwitchCaseWithID(Record[Idx]);
+ if (PrevSC)
+ PrevSC->setNextSwitchCase(SC);
+ else
+ S->setSwitchCaseList(SC);
+
+ PrevSC = SC;
+ }
+}
+
+void ASTStmtReader::VisitWhileStmt(WhileStmt *S) {
+ VisitStmt(S);
+ S->setConditionVariable(Reader.getContext(),
+ ReadDeclAs<VarDecl>(Record, Idx));
+
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setWhileLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitDoStmt(DoStmt *S) {
+ VisitStmt(S);
+ S->setCond(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setDoLoc(ReadSourceLocation(Record, Idx));
+ S->setWhileLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitForStmt(ForStmt *S) {
+ VisitStmt(S);
+ S->setInit(Reader.ReadSubStmt());
+ S->setCond(Reader.ReadSubExpr());
+ S->setConditionVariable(Reader.getContext(),
+ ReadDeclAs<VarDecl>(Record, Idx));
+ S->setInc(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setForLoc(ReadSourceLocation(Record, Idx));
+ S->setLParenLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitGotoStmt(GotoStmt *S) {
+ VisitStmt(S);
+ S->setLabel(ReadDeclAs<LabelDecl>(Record, Idx));
+ S->setGotoLoc(ReadSourceLocation(Record, Idx));
+ S->setLabelLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ VisitStmt(S);
+ S->setGotoLoc(ReadSourceLocation(Record, Idx));
+ S->setStarLoc(ReadSourceLocation(Record, Idx));
+ S->setTarget(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitContinueStmt(ContinueStmt *S) {
+ VisitStmt(S);
+ S->setContinueLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitBreakStmt(BreakStmt *S) {
+ VisitStmt(S);
+ S->setBreakLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitReturnStmt(ReturnStmt *S) {
+ VisitStmt(S);
+ S->setRetValue(Reader.ReadSubExpr());
+ S->setReturnLoc(ReadSourceLocation(Record, Idx));
+ S->setNRVOCandidate(ReadDeclAs<VarDecl>(Record, Idx));
+}
+
+void ASTStmtReader::VisitDeclStmt(DeclStmt *S) {
+ VisitStmt(S);
+ S->setStartLoc(ReadSourceLocation(Record, Idx));
+ S->setEndLoc(ReadSourceLocation(Record, Idx));
+
+ if (Idx + 1 == Record.size()) {
+ // Single declaration
+ S->setDeclGroup(DeclGroupRef(ReadDecl(Record, Idx)));
+ } else {
+ SmallVector<Decl *, 16> Decls;
+ Decls.reserve(Record.size() - Idx);
+ for (unsigned N = Record.size(); Idx != N; )
+ Decls.push_back(ReadDecl(Record, Idx));
+ S->setDeclGroup(DeclGroupRef(DeclGroup::Create(Reader.getContext(),
+ Decls.data(),
+ Decls.size())));
+ }
+}
+
+void ASTStmtReader::VisitAsmStmt(AsmStmt *S) {
+ VisitStmt(S);
+ unsigned NumOutputs = Record[Idx++];
+ unsigned NumInputs = Record[Idx++];
+ unsigned NumClobbers = Record[Idx++];
+ S->setAsmLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+ S->setVolatile(Record[Idx++]);
+ S->setSimple(Record[Idx++]);
+ S->setMSAsm(Record[Idx++]);
+
+ S->setAsmString(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+
+ // Outputs and inputs
+ SmallVector<IdentifierInfo *, 16> Names;
+ SmallVector<StringLiteral*, 16> Constraints;
+ SmallVector<Stmt*, 16> Exprs;
+ for (unsigned I = 0, N = NumOutputs + NumInputs; I != N; ++I) {
+ Names.push_back(Reader.GetIdentifierInfo(F, Record, Idx));
+ Constraints.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+ Exprs.push_back(Reader.ReadSubStmt());
+ }
+
+ // Constraints
+ SmallVector<StringLiteral*, 16> Clobbers;
+ for (unsigned I = 0; I != NumClobbers; ++I)
+ Clobbers.push_back(cast_or_null<StringLiteral>(Reader.ReadSubStmt()));
+
+ S->setOutputsAndInputsAndClobbers(Reader.getContext(),
+ Names.data(), Constraints.data(),
+ Exprs.data(), NumOutputs, NumInputs,
+ Clobbers.data(), NumClobbers);
+}
+
+void ASTStmtReader::VisitExpr(Expr *E) {
+ VisitStmt(E);
+ E->setType(Reader.readType(F, Record, Idx));
+ E->setTypeDependent(Record[Idx++]);
+ E->setValueDependent(Record[Idx++]);
+ E->setInstantiationDependent(Record[Idx++]);
+ E->ExprBits.ContainsUnexpandedParameterPack = Record[Idx++];
+ E->setValueKind(static_cast<ExprValueKind>(Record[Idx++]));
+ E->setObjectKind(static_cast<ExprObjectKind>(Record[Idx++]));
+ assert(Idx == NumExprFields && "Incorrect expression field count");
+}
+
+void ASTStmtReader::VisitPredefinedExpr(PredefinedExpr *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setIdentType((PredefinedExpr::IdentType)Record[Idx++]);
+}
+
+void ASTStmtReader::VisitDeclRefExpr(DeclRefExpr *E) {
+ VisitExpr(E);
+
+ E->DeclRefExprBits.HasQualifier = Record[Idx++];
+ E->DeclRefExprBits.HasFoundDecl = Record[Idx++];
+ E->DeclRefExprBits.HasTemplateKWAndArgsInfo = Record[Idx++];
+ E->DeclRefExprBits.HadMultipleCandidates = Record[Idx++];
+ E->DeclRefExprBits.RefersToEnclosingLocal = Record[Idx++];
+ unsigned NumTemplateArgs = 0;
+ if (E->hasTemplateKWAndArgsInfo())
+ NumTemplateArgs = Record[Idx++];
+
+ if (E->hasQualifier())
+ E->getInternalQualifierLoc()
+ = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+
+ if (E->hasFoundDecl())
+ E->getInternalFoundDecl() = ReadDeclAs<NamedDecl>(Record, Idx);
+
+ if (E->hasTemplateKWAndArgsInfo())
+ ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ NumTemplateArgs);
+
+ E->setDecl(ReadDeclAs<ValueDecl>(Record, Idx));
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ ReadDeclarationNameLoc(E->DNLoc, E->getDecl()->getDeclName(), Record, Idx);
+}
+
+void ASTStmtReader::VisitIntegerLiteral(IntegerLiteral *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setValue(Reader.getContext(), Reader.ReadAPInt(Record, Idx));
+}
+
+void ASTStmtReader::VisitFloatingLiteral(FloatingLiteral *E) {
+ VisitExpr(E);
+ E->setValue(Reader.getContext(), Reader.ReadAPFloat(Record, Idx));
+ E->setExact(Record[Idx++]);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ VisitExpr(E);
+ E->setSubExpr(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitStringLiteral(StringLiteral *E) {
+ VisitExpr(E);
+ unsigned Len = Record[Idx++];
+ assert(Record[Idx] == E->getNumConcatenated() &&
+ "Wrong number of concatenated tokens!");
+ ++Idx;
+ StringLiteral::StringKind kind =
+ static_cast<StringLiteral::StringKind>(Record[Idx++]);
+ bool isPascal = Record[Idx++];
+
+ // Read string data
+ SmallString<16> Str(&Record[Idx], &Record[Idx] + Len);
+ E->setString(Reader.getContext(), Str.str(), kind, isPascal);
+ Idx += Len;
+
+ // Read source locations
+ for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
+ E->setStrTokenLoc(I, ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCharacterLiteral(CharacterLiteral *E) {
+ VisitExpr(E);
+ E->setValue(Record[Idx++]);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setKind(static_cast<CharacterLiteral::CharacterKind>(Record[Idx++]));
+}
+
+void ASTStmtReader::VisitParenExpr(ParenExpr *E) {
+ VisitExpr(E);
+ E->setLParen(ReadSourceLocation(Record, Idx));
+ E->setRParen(ReadSourceLocation(Record, Idx));
+ E->setSubExpr(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitParenListExpr(ParenListExpr *E) {
+ VisitExpr(E);
+ unsigned NumExprs = Record[Idx++];
+ E->Exprs = new (Reader.getContext()) Stmt*[NumExprs];
+ for (unsigned i = 0; i != NumExprs; ++i)
+ E->Exprs[i] = Reader.ReadSubStmt();
+ E->NumExprs = NumExprs;
+ E->LParenLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitUnaryOperator(UnaryOperator *E) {
+ VisitExpr(E);
+ E->setSubExpr(Reader.ReadSubExpr());
+ E->setOpcode((UnaryOperator::Opcode)Record[Idx++]);
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ typedef OffsetOfExpr::OffsetOfNode Node;
+ VisitExpr(E);
+ assert(E->getNumComponents() == Record[Idx]);
+ ++Idx;
+ assert(E->getNumExpressions() == Record[Idx]);
+ ++Idx;
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
+ Node::Kind Kind = static_cast<Node::Kind>(Record[Idx++]);
+ SourceLocation Start = ReadSourceLocation(Record, Idx);
+ SourceLocation End = ReadSourceLocation(Record, Idx);
+ switch (Kind) {
+ case Node::Array:
+ E->setComponent(I, Node(Start, Record[Idx++], End));
+ break;
+
+ case Node::Field:
+ E->setComponent(I, Node(Start, ReadDeclAs<FieldDecl>(Record, Idx), End));
+ break;
+
+ case Node::Identifier:
+ E->setComponent(I,
+ Node(Start,
+ Reader.GetIdentifierInfo(F, Record, Idx),
+ End));
+ break;
+
+ case Node::Base: {
+ CXXBaseSpecifier *Base = new (Reader.getContext()) CXXBaseSpecifier();
+ *Base = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
+ E->setComponent(I, Node(Base));
+ break;
+ }
+ }
+ }
+
+ for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I)
+ E->setIndexExpr(I, Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+ VisitExpr(E);
+ E->setKind(static_cast<UnaryExprOrTypeTrait>(Record[Idx++]));
+ if (Record[Idx] == 0) {
+ E->setArgument(Reader.ReadSubExpr());
+ ++Idx;
+ } else {
+ E->setArgument(GetTypeSourceInfo(Record, Idx));
+ }
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ VisitExpr(E);
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
+ E->setRBracketLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCallExpr(CallExpr *E) {
+ VisitExpr(E);
+ E->setNumArgs(Reader.getContext(), Record[Idx++]);
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setCallee(Reader.ReadSubExpr());
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ VisitCallExpr(E);
+}
+
+void ASTStmtReader::VisitMemberExpr(MemberExpr *E) {
+ // Don't call VisitExpr, this is fully initialized at creation.
+ assert(E->getStmtClass() == Stmt::MemberExprClass &&
+ "It's a subclass, we must advance Idx!");
+}
+
+void ASTStmtReader::VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setIsaMemberLoc(ReadSourceLocation(Record, Idx));
+ E->setArrow(Record[Idx++]);
+}
+
+void ASTStmtReader::
+VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ VisitExpr(E);
+ E->Operand = Reader.ReadSubExpr();
+ E->setShouldCopy(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->LParenLoc = ReadSourceLocation(Record, Idx);
+ E->BridgeKeywordLoc = ReadSourceLocation(Record, Idx);
+ E->Kind = Record[Idx++];
+}
+
+void ASTStmtReader::VisitCastExpr(CastExpr *E) {
+ VisitExpr(E);
+ unsigned NumBaseSpecs = Record[Idx++];
+ assert(NumBaseSpecs == E->path_size());
+ E->setSubExpr(Reader.ReadSubExpr());
+ E->setCastKind((CastExpr::CastKind)Record[Idx++]);
+ CastExpr::path_iterator BaseI = E->path_begin();
+ while (NumBaseSpecs--) {
+ CXXBaseSpecifier *BaseSpec = new (Reader.getContext()) CXXBaseSpecifier;
+ *BaseSpec = Reader.ReadCXXBaseSpecifier(F, Record, Idx);
+ *BaseI++ = BaseSpec;
+ }
+}
+
+void ASTStmtReader::VisitBinaryOperator(BinaryOperator *E) {
+ VisitExpr(E);
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
+ E->setOpcode((BinaryOperator::Opcode)Record[Idx++]);
+ E->setOperatorLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ VisitBinaryOperator(E);
+ E->setComputationLHSType(Reader.readType(F, Record, Idx));
+ E->setComputationResultType(Reader.readType(F, Record, Idx));
+}
+
+void ASTStmtReader::VisitConditionalOperator(ConditionalOperator *E) {
+ VisitExpr(E);
+ E->SubExprs[ConditionalOperator::COND] = Reader.ReadSubExpr();
+ E->SubExprs[ConditionalOperator::LHS] = Reader.ReadSubExpr();
+ E->SubExprs[ConditionalOperator::RHS] = Reader.ReadSubExpr();
+ E->QuestionLoc = ReadSourceLocation(Record, Idx);
+ E->ColonLoc = ReadSourceLocation(Record, Idx);
+}
+
+void
+ASTStmtReader::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ VisitExpr(E);
+ E->OpaqueValue = cast<OpaqueValueExpr>(Reader.ReadSubExpr());
+ E->SubExprs[BinaryConditionalOperator::COMMON] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::COND] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::LHS] = Reader.ReadSubExpr();
+ E->SubExprs[BinaryConditionalOperator::RHS] = Reader.ReadSubExpr();
+ E->QuestionLoc = ReadSourceLocation(Record, Idx);
+ E->ColonLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ VisitCastExpr(E);
+}
+
+void ASTStmtReader::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ VisitCastExpr(E);
+ E->setTypeInfoAsWritten(GetTypeSourceInfo(Record, Idx));
+}
+
+void ASTStmtReader::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ VisitExpr(E);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ E->setInitializer(Reader.ReadSubExpr());
+ E->setFileScope(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
+ VisitExpr(E);
+ E->setBase(Reader.ReadSubExpr());
+ E->setAccessor(Reader.GetIdentifierInfo(F, Record, Idx));
+ E->setAccessorLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitInitListExpr(InitListExpr *E) {
+ VisitExpr(E);
+ E->setSyntacticForm(cast_or_null<InitListExpr>(Reader.ReadSubStmt()));
+ E->setLBraceLoc(ReadSourceLocation(Record, Idx));
+ E->setRBraceLoc(ReadSourceLocation(Record, Idx));
+ bool isArrayFiller = Record[Idx++];
+ Expr *filler = 0;
+ if (isArrayFiller) {
+ filler = Reader.ReadSubExpr();
+ E->ArrayFillerOrUnionFieldInit = filler;
+ } else
+ E->ArrayFillerOrUnionFieldInit = ReadDeclAs<FieldDecl>(Record, Idx);
+ E->sawArrayRangeDesignator(Record[Idx++]);
+ E->setInitializesStdInitializerList(Record[Idx++]);
+ unsigned NumInits = Record[Idx++];
+ E->reserveInits(Reader.getContext(), NumInits);
+ if (isArrayFiller) {
+ for (unsigned I = 0; I != NumInits; ++I) {
+ Expr *init = Reader.ReadSubExpr();
+ E->updateInit(Reader.getContext(), I, init ? init : filler);
+ }
+ } else {
+ for (unsigned I = 0; I != NumInits; ++I)
+ E->updateInit(Reader.getContext(), I, Reader.ReadSubExpr());
+ }
+}
+
+void ASTStmtReader::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ typedef DesignatedInitExpr::Designator Designator;
+
+ VisitExpr(E);
+ unsigned NumSubExprs = Record[Idx++];
+ assert(NumSubExprs == E->getNumSubExprs() && "Wrong number of subexprs");
+ for (unsigned I = 0; I != NumSubExprs; ++I)
+ E->setSubExpr(I, Reader.ReadSubExpr());
+ E->setEqualOrColonLoc(ReadSourceLocation(Record, Idx));
+ E->setGNUSyntax(Record[Idx++]);
+
+ SmallVector<Designator, 4> Designators;
+ while (Idx < Record.size()) {
+ switch ((DesignatorTypes)Record[Idx++]) {
+ case DESIG_FIELD_DECL: {
+ FieldDecl *Field = ReadDeclAs<FieldDecl>(Record, Idx);
+ SourceLocation DotLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation FieldLoc
+ = ReadSourceLocation(Record, Idx);
+ Designators.push_back(Designator(Field->getIdentifier(), DotLoc,
+ FieldLoc));
+ Designators.back().setField(Field);
+ break;
+ }
+
+ case DESIG_FIELD_NAME: {
+ const IdentifierInfo *Name = Reader.GetIdentifierInfo(F, Record, Idx);
+ SourceLocation DotLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation FieldLoc
+ = ReadSourceLocation(Record, Idx);
+ Designators.push_back(Designator(Name, DotLoc, FieldLoc));
+ break;
+ }
+
+ case DESIG_ARRAY: {
+ unsigned Index = Record[Idx++];
+ SourceLocation LBracketLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation RBracketLoc
+ = ReadSourceLocation(Record, Idx);
+ Designators.push_back(Designator(Index, LBracketLoc, RBracketLoc));
+ break;
+ }
+
+ case DESIG_ARRAY_RANGE: {
+ unsigned Index = Record[Idx++];
+ SourceLocation LBracketLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation EllipsisLoc
+ = ReadSourceLocation(Record, Idx);
+ SourceLocation RBracketLoc
+ = ReadSourceLocation(Record, Idx);
+ Designators.push_back(Designator(Index, LBracketLoc, EllipsisLoc,
+ RBracketLoc));
+ break;
+ }
+ }
+ }
+ E->setDesignators(Reader.getContext(),
+ Designators.data(), Designators.size());
+}
+
+void ASTStmtReader::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ VisitExpr(E);
+}
+
+void ASTStmtReader::VisitVAArgExpr(VAArgExpr *E) {
+ VisitExpr(E);
+ E->setSubExpr(Reader.ReadSubExpr());
+ E->setWrittenTypeInfo(GetTypeSourceInfo(Record, Idx));
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ VisitExpr(E);
+ E->setAmpAmpLoc(ReadSourceLocation(Record, Idx));
+ E->setLabelLoc(ReadSourceLocation(Record, Idx));
+ E->setLabel(ReadDeclAs<LabelDecl>(Record, Idx));
+}
+
+void ASTStmtReader::VisitStmtExpr(StmtExpr *E) {
+ VisitExpr(E);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+ E->setSubStmt(cast_or_null<CompoundStmt>(Reader.ReadSubStmt()));
+}
+
+void ASTStmtReader::VisitChooseExpr(ChooseExpr *E) {
+ VisitExpr(E);
+ E->setCond(Reader.ReadSubExpr());
+ E->setLHS(Reader.ReadSubExpr());
+ E->setRHS(Reader.ReadSubExpr());
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitGNUNullExpr(GNUNullExpr *E) {
+ VisitExpr(E);
+ E->setTokenLocation(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ VisitExpr(E);
+ SmallVector<Expr *, 16> Exprs;
+ unsigned NumExprs = Record[Idx++];
+ while (NumExprs--)
+ Exprs.push_back(Reader.ReadSubExpr());
+ E->setExprs(Reader.getContext(), Exprs.data(), Exprs.size());
+ E->setBuiltinLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitBlockExpr(BlockExpr *E) {
+ VisitExpr(E);
+ E->setBlockDecl(ReadDeclAs<BlockDecl>(Record, Idx));
+}
+
+void ASTStmtReader::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
+ VisitExpr(E);
+ E->NumAssocs = Record[Idx++];
+ E->AssocTypes = new (Reader.getContext()) TypeSourceInfo*[E->NumAssocs];
+ E->SubExprs =
+ new(Reader.getContext()) Stmt*[GenericSelectionExpr::END_EXPR+E->NumAssocs];
+
+ E->SubExprs[GenericSelectionExpr::CONTROLLING] = Reader.ReadSubExpr();
+ for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
+ E->AssocTypes[I] = GetTypeSourceInfo(Record, Idx);
+ E->SubExprs[GenericSelectionExpr::END_EXPR+I] = Reader.ReadSubExpr();
+ }
+ E->ResultIndex = Record[Idx++];
+
+ E->GenericLoc = ReadSourceLocation(Record, Idx);
+ E->DefaultLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ VisitExpr(E);
+ unsigned numSemanticExprs = Record[Idx++];
+ assert(numSemanticExprs + 1 == E->PseudoObjectExprBits.NumSubExprs);
+ E->PseudoObjectExprBits.ResultIndex = Record[Idx++];
+
+ // Read the syntactic expression.
+ E->getSubExprsBuffer()[0] = Reader.ReadSubExpr();
+
+ // Read all the semantic expressions.
+ for (unsigned i = 0; i != numSemanticExprs; ++i) {
+ Expr *subExpr = Reader.ReadSubExpr();
+ E->getSubExprsBuffer()[i+1] = subExpr;
+ }
+}
+
+void ASTStmtReader::VisitAtomicExpr(AtomicExpr *E) {
+ VisitExpr(E);
+ E->Op = AtomicExpr::AtomicOp(Record[Idx++]);
+ E->NumSubExprs = AtomicExpr::getNumSubExprs(E->Op);
+ for (unsigned I = 0; I != E->NumSubExprs; ++I)
+ E->SubExprs[I] = Reader.ReadSubExpr();
+ E->BuiltinLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+//===----------------------------------------------------------------------===//
+// Objective-C Expressions and Statements
+
+void ASTStmtReader::VisitObjCStringLiteral(ObjCStringLiteral *E) {
+ VisitExpr(E);
+ E->setString(cast<StringLiteral>(Reader.ReadSubStmt()));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ VisitExpr(E);
+ // could be one of several IntegerLiteral, FloatLiteral, etc.
+ E->Number = Reader.ReadSubStmt();
+ E->ObjCNumericLiteralMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->AtLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ VisitExpr(E);
+ unsigned NumElements = Record[Idx++];
+ assert(NumElements == E->getNumElements() && "Wrong number of elements");
+ Expr **Elements = E->getElements();
+ for (unsigned I = 0, N = NumElements; I != N; ++I)
+ Elements[I] = Reader.ReadSubExpr();
+ E->ArrayWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+ unsigned NumElements = Record[Idx++];
+ assert(NumElements == E->getNumElements() && "Wrong number of elements");
+ bool HasPackExpansions = Record[Idx++];
+ assert(HasPackExpansions == E->HasPackExpansions &&"Pack expansion mismatch");
+ ObjCDictionaryLiteral::KeyValuePair *KeyValues = E->getKeyValues();
+ ObjCDictionaryLiteral::ExpansionData *Expansions = E->getExpansionData();
+ for (unsigned I = 0; I != NumElements; ++I) {
+ KeyValues[I].Key = Reader.ReadSubExpr();
+ KeyValues[I].Value = Reader.ReadSubExpr();
+ if (HasPackExpansions) {
+ Expansions[I].EllipsisLoc = ReadSourceLocation(Record, Idx);
+ Expansions[I].NumExpansionsPlusOne = Record[Idx++];
+ }
+ }
+ E->DictWithObjectsMethod = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->Range = ReadSourceRange(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ VisitExpr(E);
+ E->setEncodedTypeSourceInfo(GetTypeSourceInfo(Record, Idx));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ VisitExpr(E);
+ E->setSelector(Reader.ReadSelector(F, Record, Idx));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ VisitExpr(E);
+ E->setProtocol(ReadDeclAs<ObjCProtocolDecl>(Record, Idx));
+ E->setAtLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ VisitExpr(E);
+ E->setDecl(ReadDeclAs<ObjCIvarDecl>(Record, Idx));
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setBase(Reader.ReadSubExpr());
+ E->setIsArrow(Record[Idx++]);
+ E->setIsFreeIvar(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ VisitExpr(E);
+ unsigned MethodRefFlags = Record[Idx++];
+ bool Implicit = Record[Idx++] != 0;
+ if (Implicit) {
+ ObjCMethodDecl *Getter = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ ObjCMethodDecl *Setter = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->setImplicitProperty(Getter, Setter, MethodRefFlags);
+ } else {
+ E->setExplicitProperty(ReadDeclAs<ObjCPropertyDecl>(Record, Idx),
+ MethodRefFlags);
+ }
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setReceiverLocation(ReadSourceLocation(Record, Idx));
+ switch (Record[Idx++]) {
+ case 0:
+ E->setBase(Reader.ReadSubExpr());
+ break;
+ case 1:
+ E->setSuperReceiver(Reader.readType(F, Record, Idx));
+ break;
+ case 2:
+ E->setClassReceiver(ReadDeclAs<ObjCInterfaceDecl>(Record, Idx));
+ break;
+ }
+}
+
+void ASTStmtReader::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ VisitExpr(E);
+ E->setRBracket(ReadSourceLocation(Record, Idx));
+ E->setBaseExpr(Reader.ReadSubExpr());
+ E->setKeyExpr(Reader.ReadSubExpr());
+ E->GetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+ E->SetAtIndexMethodDecl = ReadDeclAs<ObjCMethodDecl>(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ VisitExpr(E);
+ assert(Record[Idx] == E->getNumArgs());
+ ++Idx;
+ unsigned NumStoredSelLocs = Record[Idx++];
+ E->SelLocsKind = Record[Idx++];
+ E->setDelegateInitCall(Record[Idx++]);
+ E->IsImplicit = Record[Idx++];
+ ObjCMessageExpr::ReceiverKind Kind
+ = static_cast<ObjCMessageExpr::ReceiverKind>(Record[Idx++]);
+ switch (Kind) {
+ case ObjCMessageExpr::Instance:
+ E->setInstanceReceiver(Reader.ReadSubExpr());
+ break;
+
+ case ObjCMessageExpr::Class:
+ E->setClassReceiver(GetTypeSourceInfo(Record, Idx));
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance: {
+ QualType T = Reader.readType(F, Record, Idx);
+ SourceLocation SuperLoc = ReadSourceLocation(Record, Idx);
+ E->setSuper(SuperLoc, T, Kind == ObjCMessageExpr::SuperInstance);
+ break;
+ }
+ }
+
+ assert(Kind == E->getReceiverKind());
+
+ if (Record[Idx++])
+ E->setMethodDecl(ReadDeclAs<ObjCMethodDecl>(Record, Idx));
+ else
+ E->setSelector(Reader.ReadSelector(F, Record, Idx));
+
+ E->LBracLoc = ReadSourceLocation(Record, Idx);
+ E->RBracLoc = ReadSourceLocation(Record, Idx);
+
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+
+ SourceLocation *Locs = E->getStoredSelLocs();
+ for (unsigned I = 0; I != NumStoredSelLocs; ++I)
+ Locs[I] = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+ S->setElement(Reader.ReadSubStmt());
+ S->setCollection(Reader.ReadSubExpr());
+ S->setBody(Reader.ReadSubStmt());
+ S->setForLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ VisitStmt(S);
+ S->setCatchBody(Reader.ReadSubStmt());
+ S->setCatchParamDecl(ReadDeclAs<VarDecl>(Record, Idx));
+ S->setAtCatchLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ VisitStmt(S);
+ S->setFinallyBody(Reader.ReadSubStmt());
+ S->setAtFinallyLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ VisitStmt(S);
+ S->setSubStmt(Reader.ReadSubStmt());
+ S->setAtLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ VisitStmt(S);
+ assert(Record[Idx] == S->getNumCatchStmts());
+ ++Idx;
+ bool HasFinally = Record[Idx++];
+ S->setTryBody(Reader.ReadSubStmt());
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I)
+ S->setCatchStmt(I, cast_or_null<ObjCAtCatchStmt>(Reader.ReadSubStmt()));
+
+ if (HasFinally)
+ S->setFinallyStmt(Reader.ReadSubStmt());
+ S->setAtTryLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ VisitStmt(S);
+ S->setSynchExpr(Reader.ReadSubStmt());
+ S->setSynchBody(Reader.ReadSubStmt());
+ S->setAtSynchronizedLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ VisitStmt(S);
+ S->setThrowExpr(Reader.ReadSubStmt());
+ S->setThrowLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
+ VisitExpr(E);
+ E->setValue(Record[Idx++]);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions and Statements
+//===----------------------------------------------------------------------===//
+
+void ASTStmtReader::VisitCXXCatchStmt(CXXCatchStmt *S) {
+ VisitStmt(S);
+ S->CatchLoc = ReadSourceLocation(Record, Idx);
+ S->ExceptionDecl = ReadDeclAs<VarDecl>(Record, Idx);
+ S->HandlerBlock = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitCXXTryStmt(CXXTryStmt *S) {
+ VisitStmt(S);
+ assert(Record[Idx] == S->getNumHandlers() && "NumStmtFields is wrong ?");
+ ++Idx;
+ S->TryLoc = ReadSourceLocation(Record, Idx);
+ S->getStmts()[0] = Reader.ReadSubStmt();
+ for (unsigned i = 0, e = S->getNumHandlers(); i != e; ++i)
+ S->getStmts()[i + 1] = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ VisitStmt(S);
+ S->setForLoc(ReadSourceLocation(Record, Idx));
+ S->setColonLoc(ReadSourceLocation(Record, Idx));
+ S->setRParenLoc(ReadSourceLocation(Record, Idx));
+ S->setRangeStmt(Reader.ReadSubStmt());
+ S->setBeginEndStmt(Reader.ReadSubStmt());
+ S->setCond(Reader.ReadSubExpr());
+ S->setInc(Reader.ReadSubExpr());
+ S->setLoopVarStmt(Reader.ReadSubStmt());
+ S->setBody(Reader.ReadSubStmt());
+}
+
+void ASTStmtReader::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ S->KeywordLoc = ReadSourceLocation(Record, Idx);
+ S->IsIfExists = Record[Idx++];
+ S->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameInfo(S->NameInfo, Record, Idx);
+ S->SubStmt = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ VisitCallExpr(E);
+ E->setOperator((OverloadedOperatorKind)Record[Idx++]);
+}
+
+void ASTStmtReader::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ VisitExpr(E);
+ E->NumArgs = Record[Idx++];
+ if (E->NumArgs)
+ E->Args = new (Reader.getContext()) Stmt*[E->NumArgs];
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+ E->setConstructor(ReadDeclAs<CXXConstructorDecl>(Record, Idx));
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setElidable(Record[Idx++]);
+ E->setHadMultipleCandidates(Record[Idx++]);
+ E->setRequiresZeroInitialization(Record[Idx++]);
+ E->setConstructionKind((CXXConstructExpr::ConstructionKind)Record[Idx++]);
+ E->ParenRange = ReadSourceRange(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
+ VisitCXXConstructExpr(E);
+ E->Type = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitLambdaExpr(LambdaExpr *E) {
+ VisitExpr(E);
+ unsigned NumCaptures = Record[Idx++];
+ assert(NumCaptures == E->NumCaptures);(void)NumCaptures;
+ unsigned NumArrayIndexVars = Record[Idx++];
+ E->IntroducerRange = ReadSourceRange(Record, Idx);
+ E->CaptureDefault = static_cast<LambdaCaptureDefault>(Record[Idx++]);
+ E->ExplicitParams = Record[Idx++];
+ E->ExplicitResultType = Record[Idx++];
+ E->ClosingBrace = ReadSourceLocation(Record, Idx);
+
+ // Read capture initializers.
+ for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
+ CEnd = E->capture_init_end();
+ C != CEnd; ++C)
+ *C = Reader.ReadSubExpr();
+
+ // Read array capture index variables.
+ if (NumArrayIndexVars > 0) {
+ unsigned *ArrayIndexStarts = E->getArrayIndexStarts();
+ for (unsigned I = 0; I != NumCaptures + 1; ++I)
+ ArrayIndexStarts[I] = Record[Idx++];
+
+ VarDecl **ArrayIndexVars = E->getArrayIndexVars();
+ for (unsigned I = 0; I != NumArrayIndexVars; ++I)
+ ArrayIndexVars[I] = ReadDeclAs<VarDecl>(Record, Idx);
+ }
+}
+
+void ASTStmtReader::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ SourceRange R = ReadSourceRange(Record, Idx);
+ E->Loc = R.getBegin();
+ E->RParenLoc = R.getEnd();
+}
+
+void ASTStmtReader::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+void ASTStmtReader::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+void ASTStmtReader::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+void ASTStmtReader::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
+ return VisitCXXNamedCastExpr(E);
+}
+
+void ASTStmtReader::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ E->setTypeBeginLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
+ VisitCallExpr(E);
+ E->UDSuffixLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ VisitExpr(E);
+ E->setValue(Record[Idx++]);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ VisitExpr(E);
+ E->setSourceRange(ReadSourceRange(Record, Idx));
+ if (E->isTypeOperand()) { // typeid(int)
+ E->setTypeOperandSourceInfo(
+ GetTypeSourceInfo(Record, Idx));
+ return;
+ }
+
+ // typeid(42+2)
+ E->setExprOperand(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitCXXThisExpr(CXXThisExpr *E) {
+ VisitExpr(E);
+ E->setLocation(ReadSourceLocation(Record, Idx));
+ E->setImplicit(Record[Idx++]);
+}
+
+void ASTStmtReader::VisitCXXThrowExpr(CXXThrowExpr *E) {
+ VisitExpr(E);
+ E->ThrowLoc = ReadSourceLocation(Record, Idx);
+ E->Op = Reader.ReadSubExpr();
+ E->IsThrownVariableInScope = Record[Idx++];
+}
+
+void ASTStmtReader::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ VisitExpr(E);
+
+ assert((bool)Record[Idx] == E->Param.getInt() && "We messed up at creation ?");
+ ++Idx; // HasOtherExprStored and SubExpr was handled during creation.
+ E->Param.setPointer(ReadDeclAs<ParmVarDecl>(Record, Idx));
+ E->Loc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ VisitExpr(E);
+ E->setTemporary(Reader.ReadCXXTemporary(F, Record, Idx));
+ E->setSubExpr(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ VisitExpr(E);
+ E->TypeInfo = GetTypeSourceInfo(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXNewExpr(CXXNewExpr *E) {
+ VisitExpr(E);
+ E->GlobalNew = Record[Idx++];
+ bool isArray = Record[Idx++];
+ E->UsualArrayDeleteWantsSize = Record[Idx++];
+ unsigned NumPlacementArgs = Record[Idx++];
+ E->StoredInitializationStyle = Record[Idx++];
+ E->setOperatorNew(ReadDeclAs<FunctionDecl>(Record, Idx));
+ E->setOperatorDelete(ReadDeclAs<FunctionDecl>(Record, Idx));
+ E->AllocatedTypeInfo = GetTypeSourceInfo(Record, Idx);
+ E->TypeIdParens = ReadSourceRange(Record, Idx);
+ E->StartLoc = ReadSourceLocation(Record, Idx);
+ E->DirectInitRange = ReadSourceRange(Record, Idx);
+
+ E->AllocateArgsArray(Reader.getContext(), isArray, NumPlacementArgs,
+ E->StoredInitializationStyle != 0);
+
+ // Install all the subexpressions.
+ for (CXXNewExpr::raw_arg_iterator I = E->raw_arg_begin(),e = E->raw_arg_end();
+ I != e; ++I)
+ *I = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ VisitExpr(E);
+ E->GlobalDelete = Record[Idx++];
+ E->ArrayForm = Record[Idx++];
+ E->ArrayFormAsWritten = Record[Idx++];
+ E->UsualArrayDeleteWantsSize = Record[Idx++];
+ E->OperatorDelete = ReadDeclAs<FunctionDecl>(Record, Idx);
+ E->Argument = Reader.ReadSubExpr();
+ E->Loc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ VisitExpr(E);
+
+ E->Base = Reader.ReadSubExpr();
+ E->IsArrow = Record[Idx++];
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ E->ScopeType = GetTypeSourceInfo(Record, Idx);
+ E->ColonColonLoc = ReadSourceLocation(Record, Idx);
+ E->TildeLoc = ReadSourceLocation(Record, Idx);
+
+ IdentifierInfo *II = Reader.GetIdentifierInfo(F, Record, Idx);
+ if (II)
+ E->setDestroyedType(II, ReadSourceLocation(Record, Idx));
+ else
+ E->setDestroyedType(GetTypeSourceInfo(Record, Idx));
+}
+
+void ASTStmtReader::VisitExprWithCleanups(ExprWithCleanups *E) {
+ VisitExpr(E);
+
+ unsigned NumObjects = Record[Idx++];
+ assert(NumObjects == E->getNumObjects());
+ for (unsigned i = 0; i != NumObjects; ++i)
+ E->getObjectsBuffer()[i] = ReadDeclAs<BlockDecl>(Record, Idx);
+
+ E->SubExpr = Reader.ReadSubExpr();
+}
+
+void
+ASTStmtReader::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
+ VisitExpr(E);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ /*NumTemplateArgs=*/Record[Idx++]);
+
+ E->Base = Reader.ReadSubExpr();
+ E->BaseType = Reader.readType(F, Record, Idx);
+ E->IsArrow = Record[Idx++];
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ E->FirstQualifierFoundInScope = ReadDeclAs<NamedDecl>(Record, Idx);
+ ReadDeclarationNameInfo(E->MemberNameInfo, Record, Idx);
+}
+
+void
+ASTStmtReader::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ VisitExpr(E);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ /*NumTemplateArgs=*/Record[Idx++]);
+
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+ ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
+}
+
+void
+ASTStmtReader::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
+ VisitExpr(E);
+ assert(Record[Idx] == E->arg_size() && "Read wrong record during creation ?");
+ ++Idx; // NumArgs;
+ for (unsigned I = 0, N = E->arg_size(); I != N; ++I)
+ E->setArg(I, Reader.ReadSubExpr());
+ E->Type = GetTypeSourceInfo(Record, Idx);
+ E->setLParenLoc(ReadSourceLocation(Record, Idx));
+ E->setRParenLoc(ReadSourceLocation(Record, Idx));
+}
+
+void ASTStmtReader::VisitOverloadExpr(OverloadExpr *E) {
+ VisitExpr(E);
+
+ if (Record[Idx++]) // HasTemplateKWAndArgsInfo
+ ReadTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo(),
+ /*NumTemplateArgs=*/Record[Idx++]);
+
+ unsigned NumDecls = Record[Idx++];
+ UnresolvedSet<8> Decls;
+ for (unsigned i = 0; i != NumDecls; ++i) {
+ NamedDecl *D = ReadDeclAs<NamedDecl>(Record, Idx);
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ Decls.addDecl(D, AS);
+ }
+ E->initializeResults(Reader.getContext(), Decls.begin(), Decls.end());
+
+ ReadDeclarationNameInfo(E->NameInfo, Record, Idx);
+ E->QualifierLoc = Reader.ReadNestedNameSpecifierLoc(F, Record, Idx);
+}
+
+void ASTStmtReader::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ VisitOverloadExpr(E);
+ E->IsArrow = Record[Idx++];
+ E->HasUnresolvedUsing = Record[Idx++];
+ E->Base = Reader.ReadSubExpr();
+ E->BaseType = Reader.readType(F, Record, Idx);
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ VisitOverloadExpr(E);
+ E->RequiresADL = Record[Idx++];
+ if (E->RequiresADL)
+ E->StdIsAssociatedNamespace = Record[Idx++];
+ E->Overloaded = Record[Idx++];
+ E->NamingClass = ReadDeclAs<CXXRecordDecl>(Record, Idx);
+}
+
+void ASTStmtReader::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ VisitExpr(E);
+ E->UTT = (UnaryTypeTrait)Record[Idx++];
+ E->Value = (bool)Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ E->Loc = Range.getBegin();
+ E->RParen = Range.getEnd();
+ E->QueriedType = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ VisitExpr(E);
+ E->BTT = (BinaryTypeTrait)Record[Idx++];
+ E->Value = (bool)Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ E->Loc = Range.getBegin();
+ E->RParen = Range.getEnd();
+ E->LhsType = GetTypeSourceInfo(Record, Idx);
+ E->RhsType = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ VisitExpr(E);
+ E->TypeTraitExprBits.NumArgs = Record[Idx++];
+ E->TypeTraitExprBits.Kind = Record[Idx++];
+ E->TypeTraitExprBits.Value = Record[Idx++];
+
+ TypeSourceInfo **Args = E->getTypeSourceInfos();
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Args[I] = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ VisitExpr(E);
+ E->ATT = (ArrayTypeTrait)Record[Idx++];
+ E->Value = (unsigned int)Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ E->Loc = Range.getBegin();
+ E->RParen = Range.getEnd();
+ E->QueriedType = GetTypeSourceInfo(Record, Idx);
+}
+
+void ASTStmtReader::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ VisitExpr(E);
+ E->ET = (ExpressionTrait)Record[Idx++];
+ E->Value = (bool)Record[Idx++];
+ SourceRange Range = ReadSourceRange(Record, Idx);
+ E->QueriedExpression = Reader.ReadSubExpr();
+ E->Loc = Range.getBegin();
+ E->RParen = Range.getEnd();
+}
+
+void ASTStmtReader::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ VisitExpr(E);
+ E->Value = (bool)Record[Idx++];
+ E->Range = ReadSourceRange(Record, Idx);
+ E->Operand = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ VisitExpr(E);
+ E->EllipsisLoc = ReadSourceLocation(Record, Idx);
+ E->NumExpansions = Record[Idx++];
+ E->Pattern = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ VisitExpr(E);
+ E->OperatorLoc = ReadSourceLocation(Record, Idx);
+ E->PackLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->Length = Record[Idx++];
+ E->Pack = ReadDeclAs<NamedDecl>(Record, Idx);
+}
+
+void ASTStmtReader::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ VisitExpr(E);
+ E->Param = ReadDeclAs<NonTypeTemplateParmDecl>(Record, Idx);
+ E->NameLoc = ReadSourceLocation(Record, Idx);
+ E->Replacement = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ VisitExpr(E);
+ E->Param = ReadDeclAs<NonTypeTemplateParmDecl>(Record, Idx);
+ TemplateArgument ArgPack = Reader.ReadTemplateArgument(F, Record, Idx);
+ if (ArgPack.getKind() != TemplateArgument::Pack)
+ return;
+
+ E->Arguments = ArgPack.pack_begin();
+ E->NumArguments = ArgPack.pack_size();
+ E->NameLoc = ReadSourceLocation(Record, Idx);
+}
+
+void ASTStmtReader::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ VisitExpr(E);
+ E->Temporary = Reader.ReadSubExpr();
+}
+
+void ASTStmtReader::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ VisitExpr(E);
+ E->SourceExpr = Reader.ReadSubExpr();
+ E->Loc = ReadSourceLocation(Record, Idx);
+}
+
+//===----------------------------------------------------------------------===//
+// Microsoft Expressions and Statements
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
+ VisitExpr(E);
+ E->setSourceRange(ReadSourceRange(Record, Idx));
+ if (E->isTypeOperand()) { // __uuidof(ComType)
+ E->setTypeOperandSourceInfo(
+ GetTypeSourceInfo(Record, Idx));
+ return;
+ }
+
+ // __uuidof(expr)
+ E->setExprOperand(Reader.ReadSubExpr());
+}
+
+void ASTStmtReader::VisitSEHExceptStmt(SEHExceptStmt *S) {
+ VisitStmt(S);
+ S->Loc = ReadSourceLocation(Record, Idx);
+ S->Children[SEHExceptStmt::FILTER_EXPR] = Reader.ReadSubStmt();
+ S->Children[SEHExceptStmt::BLOCK] = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitSEHFinallyStmt(SEHFinallyStmt *S) {
+ VisitStmt(S);
+ S->Loc = ReadSourceLocation(Record, Idx);
+ S->Block = Reader.ReadSubStmt();
+}
+
+void ASTStmtReader::VisitSEHTryStmt(SEHTryStmt *S) {
+ VisitStmt(S);
+ S->IsCXXTry = Record[Idx++];
+ S->TryLoc = ReadSourceLocation(Record, Idx);
+ S->Children[SEHTryStmt::TRY] = Reader.ReadSubStmt();
+ S->Children[SEHTryStmt::HANDLER] = Reader.ReadSubStmt();
+}
+
+//===----------------------------------------------------------------------===//
+// CUDA Expressions and Statements
+//===----------------------------------------------------------------------===//
+
+void ASTStmtReader::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ VisitCallExpr(E);
+ E->setConfig(cast<CallExpr>(Reader.ReadSubExpr()));
+}
+
+//===----------------------------------------------------------------------===//
+// OpenCL Expressions and Statements.
+//===----------------------------------------------------------------------===//
+void ASTStmtReader::VisitAsTypeExpr(AsTypeExpr *E) {
+ VisitExpr(E);
+ E->BuiltinLoc = ReadSourceLocation(Record, Idx);
+ E->RParenLoc = ReadSourceLocation(Record, Idx);
+ E->SrcExpr = Reader.ReadSubExpr();
+}
+
+//===----------------------------------------------------------------------===//
+// ASTReader Implementation
+//===----------------------------------------------------------------------===//
+
+Stmt *ASTReader::ReadStmt(ModuleFile &F) {
+ switch (ReadingKind) {
+ case Read_Decl:
+ case Read_Type:
+ return ReadStmtFromStream(F);
+ case Read_Stmt:
+ return ReadSubStmt();
+ }
+
+ llvm_unreachable("ReadingKind not set ?");
+}
+
+Expr *ASTReader::ReadExpr(ModuleFile &F) {
+ return cast_or_null<Expr>(ReadStmt(F));
+}
+
+Expr *ASTReader::ReadSubExpr() {
+ return cast_or_null<Expr>(ReadSubStmt());
+}
+
+// Within the bitstream, expressions are stored in Reverse Polish
+// Notation, with each of the subexpressions preceding the
+// expression they are stored in. Subexpressions are stored from last to first.
+// To evaluate expressions, we continue reading expressions and placing them on
+// the stack, with expressions having operands removing those operands from the
+// stack. Evaluation terminates when we see a STMT_STOP record, and
+// the single remaining expression on the stack is our result.
+Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
+
+ ReadingKindTracker ReadingKind(Read_Stmt, *this);
+ llvm::BitstreamCursor &Cursor = F.DeclsCursor;
+
+ // Map of offset to previously deserialized stmt. The offset points
+ /// just after the stmt record.
+ llvm::DenseMap<uint64_t, Stmt *> StmtEntries;
+
+#ifndef NDEBUG
+ unsigned PrevNumStmts = StmtStack.size();
+#endif
+
+ RecordData Record;
+ unsigned Idx;
+ ASTStmtReader Reader(*this, F, Cursor, Record, Idx);
+ Stmt::EmptyShell Empty;
+
+ while (true) {
+ unsigned Code = Cursor.ReadCode();
+ if (Code == llvm::bitc::END_BLOCK) {
+ if (Cursor.ReadBlockEnd()) {
+ Error("error at end of block in AST file");
+ return 0;
+ }
+ break;
+ }
+
+ if (Code == llvm::bitc::ENTER_SUBBLOCK) {
+ // No known subblocks, always skip them.
+ Cursor.ReadSubBlockID();
+ if (Cursor.SkipBlock()) {
+ Error("malformed block record in AST file");
+ return 0;
+ }
+ continue;
+ }
+
+ if (Code == llvm::bitc::DEFINE_ABBREV) {
+ Cursor.ReadAbbrevRecord();
+ continue;
+ }
+
+ Stmt *S = 0;
+ Idx = 0;
+ Record.clear();
+ bool Finished = false;
+ bool IsStmtReference = false;
+ switch ((StmtCode)Cursor.ReadRecord(Code, Record)) {
+ case STMT_STOP:
+ Finished = true;
+ break;
+
+ case STMT_REF_PTR:
+ IsStmtReference = true;
+ assert(StmtEntries.find(Record[0]) != StmtEntries.end() &&
+ "No stmt was recorded for this offset reference!");
+ S = StmtEntries[Record[Idx++]];
+ break;
+
+ case STMT_NULL_PTR:
+ S = 0;
+ break;
+
+ case STMT_NULL:
+ S = new (Context) NullStmt(Empty);
+ break;
+
+ case STMT_COMPOUND:
+ S = new (Context) CompoundStmt(Empty);
+ break;
+
+ case STMT_CASE:
+ S = new (Context) CaseStmt(Empty);
+ break;
+
+ case STMT_DEFAULT:
+ S = new (Context) DefaultStmt(Empty);
+ break;
+
+ case STMT_LABEL:
+ S = new (Context) LabelStmt(Empty);
+ break;
+
+ case STMT_IF:
+ S = new (Context) IfStmt(Empty);
+ break;
+
+ case STMT_SWITCH:
+ S = new (Context) SwitchStmt(Empty);
+ break;
+
+ case STMT_WHILE:
+ S = new (Context) WhileStmt(Empty);
+ break;
+
+ case STMT_DO:
+ S = new (Context) DoStmt(Empty);
+ break;
+
+ case STMT_FOR:
+ S = new (Context) ForStmt(Empty);
+ break;
+
+ case STMT_GOTO:
+ S = new (Context) GotoStmt(Empty);
+ break;
+
+ case STMT_INDIRECT_GOTO:
+ S = new (Context) IndirectGotoStmt(Empty);
+ break;
+
+ case STMT_CONTINUE:
+ S = new (Context) ContinueStmt(Empty);
+ break;
+
+ case STMT_BREAK:
+ S = new (Context) BreakStmt(Empty);
+ break;
+
+ case STMT_RETURN:
+ S = new (Context) ReturnStmt(Empty);
+ break;
+
+ case STMT_DECL:
+ S = new (Context) DeclStmt(Empty);
+ break;
+
+ case STMT_ASM:
+ S = new (Context) AsmStmt(Empty);
+ break;
+
+ case EXPR_PREDEFINED:
+ S = new (Context) PredefinedExpr(Empty);
+ break;
+
+ case EXPR_DECL_REF:
+ S = DeclRefExpr::CreateEmpty(
+ Context,
+ /*HasQualifier=*/Record[ASTStmtReader::NumExprFields],
+ /*HasFoundDecl=*/Record[ASTStmtReader::NumExprFields + 1],
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields + 2],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields + 2] ?
+ Record[ASTStmtReader::NumExprFields + 5] : 0);
+ break;
+
+ case EXPR_INTEGER_LITERAL:
+ S = IntegerLiteral::Create(Context, Empty);
+ break;
+
+ case EXPR_FLOATING_LITERAL:
+ S = FloatingLiteral::Create(Context, Empty);
+ break;
+
+ case EXPR_IMAGINARY_LITERAL:
+ S = new (Context) ImaginaryLiteral(Empty);
+ break;
+
+ case EXPR_STRING_LITERAL:
+ S = StringLiteral::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
+
+ case EXPR_CHARACTER_LITERAL:
+ S = new (Context) CharacterLiteral(Empty);
+ break;
+
+ case EXPR_PAREN:
+ S = new (Context) ParenExpr(Empty);
+ break;
+
+ case EXPR_PAREN_LIST:
+ S = new (Context) ParenListExpr(Empty);
+ break;
+
+ case EXPR_UNARY_OPERATOR:
+ S = new (Context) UnaryOperator(Empty);
+ break;
+
+ case EXPR_OFFSETOF:
+ S = OffsetOfExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields],
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
+
+ case EXPR_SIZEOF_ALIGN_OF:
+ S = new (Context) UnaryExprOrTypeTraitExpr(Empty);
+ break;
+
+ case EXPR_ARRAY_SUBSCRIPT:
+ S = new (Context) ArraySubscriptExpr(Empty);
+ break;
+
+ case EXPR_CALL:
+ S = new (Context) CallExpr(Context, Stmt::CallExprClass, Empty);
+ break;
+
+ case EXPR_MEMBER: {
+ // We load everything here and fully initialize it at creation.
+ // That way we can use MemberExpr::Create and don't have to duplicate its
+ // logic with a MemberExpr::CreateEmpty.
+
+ assert(Idx == 0);
+ NestedNameSpecifierLoc QualifierLoc;
+ if (Record[Idx++]) { // HasQualifier.
+ QualifierLoc = ReadNestedNameSpecifierLoc(F, Record, Idx);
+ }
+
+ SourceLocation TemplateKWLoc;
+ TemplateArgumentListInfo ArgInfo;
+ bool HasTemplateKWAndArgsInfo = Record[Idx++];
+ if (HasTemplateKWAndArgsInfo) {
+ TemplateKWLoc = ReadSourceLocation(F, Record, Idx);
+ unsigned NumTemplateArgs = Record[Idx++];
+ ArgInfo.setLAngleLoc(ReadSourceLocation(F, Record, Idx));
+ ArgInfo.setRAngleLoc(ReadSourceLocation(F, Record, Idx));
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ ArgInfo.addArgument(ReadTemplateArgumentLoc(F, Record, Idx));
+ }
+
+ bool HadMultipleCandidates = Record[Idx++];
+
+ NamedDecl *FoundD = ReadDeclAs<NamedDecl>(F, Record, Idx);
+ AccessSpecifier AS = (AccessSpecifier)Record[Idx++];
+ DeclAccessPair FoundDecl = DeclAccessPair::make(FoundD, AS);
+
+ QualType T = readType(F, Record, Idx);
+ ExprValueKind VK = static_cast<ExprValueKind>(Record[Idx++]);
+ ExprObjectKind OK = static_cast<ExprObjectKind>(Record[Idx++]);
+ Expr *Base = ReadSubExpr();
+ ValueDecl *MemberD = ReadDeclAs<ValueDecl>(F, Record, Idx);
+ SourceLocation MemberLoc = ReadSourceLocation(F, Record, Idx);
+ DeclarationNameInfo MemberNameInfo(MemberD->getDeclName(), MemberLoc);
+ bool IsArrow = Record[Idx++];
+
+ S = MemberExpr::Create(Context, Base, IsArrow, QualifierLoc,
+ TemplateKWLoc, MemberD, FoundDecl, MemberNameInfo,
+ HasTemplateKWAndArgsInfo ? &ArgInfo : 0,
+ T, VK, OK);
+ ReadDeclarationNameLoc(F, cast<MemberExpr>(S)->MemberDNLoc,
+ MemberD->getDeclName(), Record, Idx);
+ if (HadMultipleCandidates)
+ cast<MemberExpr>(S)->setHadMultipleCandidates(true);
+ break;
+ }
+
+ case EXPR_BINARY_OPERATOR:
+ S = new (Context) BinaryOperator(Empty);
+ break;
+
+ case EXPR_COMPOUND_ASSIGN_OPERATOR:
+ S = new (Context) CompoundAssignOperator(Empty);
+ break;
+
+ case EXPR_CONDITIONAL_OPERATOR:
+ S = new (Context) ConditionalOperator(Empty);
+ break;
+
+ case EXPR_BINARY_CONDITIONAL_OPERATOR:
+ S = new (Context) BinaryConditionalOperator(Empty);
+ break;
+
+ case EXPR_IMPLICIT_CAST:
+ S = ImplicitCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CSTYLE_CAST:
+ S = CStyleCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_COMPOUND_LITERAL:
+ S = new (Context) CompoundLiteralExpr(Empty);
+ break;
+
+ case EXPR_EXT_VECTOR_ELEMENT:
+ S = new (Context) ExtVectorElementExpr(Empty);
+ break;
+
+ case EXPR_INIT_LIST:
+ S = new (Context) InitListExpr(getContext(), Empty);
+ break;
+
+ case EXPR_DESIGNATED_INIT:
+ S = DesignatedInitExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields] - 1);
+
+ break;
+
+ case EXPR_IMPLICIT_VALUE_INIT:
+ S = new (Context) ImplicitValueInitExpr(Empty);
+ break;
+
+ case EXPR_VA_ARG:
+ S = new (Context) VAArgExpr(Empty);
+ break;
+
+ case EXPR_ADDR_LABEL:
+ S = new (Context) AddrLabelExpr(Empty);
+ break;
+
+ case EXPR_STMT:
+ S = new (Context) StmtExpr(Empty);
+ break;
+
+ case EXPR_CHOOSE:
+ S = new (Context) ChooseExpr(Empty);
+ break;
+
+ case EXPR_GNU_NULL:
+ S = new (Context) GNUNullExpr(Empty);
+ break;
+
+ case EXPR_SHUFFLE_VECTOR:
+ S = new (Context) ShuffleVectorExpr(Empty);
+ break;
+
+ case EXPR_BLOCK:
+ S = new (Context) BlockExpr(Empty);
+ break;
+
+ case EXPR_GENERIC_SELECTION:
+ S = new (Context) GenericSelectionExpr(Empty);
+ break;
+
+ case EXPR_OBJC_STRING_LITERAL:
+ S = new (Context) ObjCStringLiteral(Empty);
+ break;
+ case EXPR_OBJC_NUMERIC_LITERAL:
+ S = new (Context) ObjCNumericLiteral(Empty);
+ break;
+ case EXPR_OBJC_ARRAY_LITERAL:
+ S = ObjCArrayLiteral::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+ case EXPR_OBJC_DICTIONARY_LITERAL:
+ S = ObjCDictionaryLiteral::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields],
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
+ case EXPR_OBJC_ENCODE:
+ S = new (Context) ObjCEncodeExpr(Empty);
+ break;
+ case EXPR_OBJC_SELECTOR_EXPR:
+ S = new (Context) ObjCSelectorExpr(Empty);
+ break;
+ case EXPR_OBJC_PROTOCOL_EXPR:
+ S = new (Context) ObjCProtocolExpr(Empty);
+ break;
+ case EXPR_OBJC_IVAR_REF_EXPR:
+ S = new (Context) ObjCIvarRefExpr(Empty);
+ break;
+ case EXPR_OBJC_PROPERTY_REF_EXPR:
+ S = new (Context) ObjCPropertyRefExpr(Empty);
+ break;
+ case EXPR_OBJC_SUBSCRIPT_REF_EXPR:
+ S = new (Context) ObjCSubscriptRefExpr(Empty);
+ break;
+ case EXPR_OBJC_KVC_REF_EXPR:
+ llvm_unreachable("mismatching AST file");
+ case EXPR_OBJC_MESSAGE_EXPR:
+ S = ObjCMessageExpr::CreateEmpty(Context,
+ Record[ASTStmtReader::NumExprFields],
+ Record[ASTStmtReader::NumExprFields + 1]);
+ break;
+ case EXPR_OBJC_ISA:
+ S = new (Context) ObjCIsaExpr(Empty);
+ break;
+ case EXPR_OBJC_INDIRECT_COPY_RESTORE:
+ S = new (Context) ObjCIndirectCopyRestoreExpr(Empty);
+ break;
+ case EXPR_OBJC_BRIDGED_CAST:
+ S = new (Context) ObjCBridgedCastExpr(Empty);
+ break;
+ case STMT_OBJC_FOR_COLLECTION:
+ S = new (Context) ObjCForCollectionStmt(Empty);
+ break;
+ case STMT_OBJC_CATCH:
+ S = new (Context) ObjCAtCatchStmt(Empty);
+ break;
+ case STMT_OBJC_FINALLY:
+ S = new (Context) ObjCAtFinallyStmt(Empty);
+ break;
+ case STMT_OBJC_AT_TRY:
+ S = ObjCAtTryStmt::CreateEmpty(Context,
+ Record[ASTStmtReader::NumStmtFields],
+ Record[ASTStmtReader::NumStmtFields + 1]);
+ break;
+ case STMT_OBJC_AT_SYNCHRONIZED:
+ S = new (Context) ObjCAtSynchronizedStmt(Empty);
+ break;
+ case STMT_OBJC_AT_THROW:
+ S = new (Context) ObjCAtThrowStmt(Empty);
+ break;
+ case STMT_OBJC_AUTORELEASE_POOL:
+ S = new (Context) ObjCAutoreleasePoolStmt(Empty);
+ break;
+ case EXPR_OBJC_BOOL_LITERAL:
+ S = new (Context) ObjCBoolLiteralExpr(Empty);
+ break;
+ case STMT_SEH_EXCEPT:
+ S = new (Context) SEHExceptStmt(Empty);
+ break;
+ case STMT_SEH_FINALLY:
+ S = new (Context) SEHFinallyStmt(Empty);
+ break;
+ case STMT_SEH_TRY:
+ S = new (Context) SEHTryStmt(Empty);
+ break;
+ case STMT_CXX_CATCH:
+ S = new (Context) CXXCatchStmt(Empty);
+ break;
+
+ case STMT_CXX_TRY:
+ S = CXXTryStmt::Create(Context, Empty,
+ /*NumHandlers=*/Record[ASTStmtReader::NumStmtFields]);
+ break;
+
+ case STMT_CXX_FOR_RANGE:
+ S = new (Context) CXXForRangeStmt(Empty);
+ break;
+
+ case STMT_MS_DEPENDENT_EXISTS:
+ S = new (Context) MSDependentExistsStmt(SourceLocation(), true,
+ NestedNameSpecifierLoc(),
+ DeclarationNameInfo(),
+ 0);
+ break;
+
+ case EXPR_CXX_OPERATOR_CALL:
+ S = new (Context) CXXOperatorCallExpr(Context, Empty);
+ break;
+
+ case EXPR_CXX_MEMBER_CALL:
+ S = new (Context) CXXMemberCallExpr(Context, Empty);
+ break;
+
+ case EXPR_CXX_CONSTRUCT:
+ S = new (Context) CXXConstructExpr(Empty);
+ break;
+
+ case EXPR_CXX_TEMPORARY_OBJECT:
+ S = new (Context) CXXTemporaryObjectExpr(Empty);
+ break;
+
+ case EXPR_CXX_STATIC_CAST:
+ S = CXXStaticCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_DYNAMIC_CAST:
+ S = CXXDynamicCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_REINTERPRET_CAST:
+ S = CXXReinterpretCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_CONST_CAST:
+ S = CXXConstCastExpr::CreateEmpty(Context);
+ break;
+
+ case EXPR_CXX_FUNCTIONAL_CAST:
+ S = CXXFunctionalCastExpr::CreateEmpty(Context,
+ /*PathSize*/ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_USER_DEFINED_LITERAL:
+ S = new (Context) UserDefinedLiteral(Context, Empty);
+ break;
+
+ case EXPR_CXX_BOOL_LITERAL:
+ S = new (Context) CXXBoolLiteralExpr(Empty);
+ break;
+
+ case EXPR_CXX_NULL_PTR_LITERAL:
+ S = new (Context) CXXNullPtrLiteralExpr(Empty);
+ break;
+ case EXPR_CXX_TYPEID_EXPR:
+ S = new (Context) CXXTypeidExpr(Empty, true);
+ break;
+ case EXPR_CXX_TYPEID_TYPE:
+ S = new (Context) CXXTypeidExpr(Empty, false);
+ break;
+ case EXPR_CXX_UUIDOF_EXPR:
+ S = new (Context) CXXUuidofExpr(Empty, true);
+ break;
+ case EXPR_CXX_UUIDOF_TYPE:
+ S = new (Context) CXXUuidofExpr(Empty, false);
+ break;
+ case EXPR_CXX_THIS:
+ S = new (Context) CXXThisExpr(Empty);
+ break;
+ case EXPR_CXX_THROW:
+ S = new (Context) CXXThrowExpr(Empty);
+ break;
+ case EXPR_CXX_DEFAULT_ARG: {
+ bool HasOtherExprStored = Record[ASTStmtReader::NumExprFields];
+ if (HasOtherExprStored) {
+ Expr *SubExpr = ReadSubExpr();
+ S = CXXDefaultArgExpr::Create(Context, SourceLocation(), 0, SubExpr);
+ } else
+ S = new (Context) CXXDefaultArgExpr(Empty);
+ break;
+ }
+ case EXPR_CXX_BIND_TEMPORARY:
+ S = new (Context) CXXBindTemporaryExpr(Empty);
+ break;
+
+ case EXPR_CXX_SCALAR_VALUE_INIT:
+ S = new (Context) CXXScalarValueInitExpr(Empty);
+ break;
+ case EXPR_CXX_NEW:
+ S = new (Context) CXXNewExpr(Empty);
+ break;
+ case EXPR_CXX_DELETE:
+ S = new (Context) CXXDeleteExpr(Empty);
+ break;
+ case EXPR_CXX_PSEUDO_DESTRUCTOR:
+ S = new (Context) CXXPseudoDestructorExpr(Empty);
+ break;
+
+ case EXPR_EXPR_WITH_CLEANUPS:
+ S = ExprWithCleanups::Create(Context, Empty,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_DEPENDENT_SCOPE_MEMBER:
+ S = CXXDependentScopeMemberExpr::CreateEmpty(Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
+ break;
+
+ case EXPR_CXX_DEPENDENT_SCOPE_DECL_REF:
+ S = DependentScopeDeclRefExpr::CreateEmpty(Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
+ break;
+
+ case EXPR_CXX_UNRESOLVED_CONSTRUCT:
+ S = CXXUnresolvedConstructExpr::CreateEmpty(Context,
+ /*NumArgs=*/Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_CXX_UNRESOLVED_MEMBER:
+ S = UnresolvedMemberExpr::CreateEmpty(Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
+ break;
+
+ case EXPR_CXX_UNRESOLVED_LOOKUP:
+ S = UnresolvedLookupExpr::CreateEmpty(Context,
+ /*HasTemplateKWAndArgsInfo=*/Record[ASTStmtReader::NumExprFields],
+ /*NumTemplateArgs=*/Record[ASTStmtReader::NumExprFields]
+ ? Record[ASTStmtReader::NumExprFields + 1]
+ : 0);
+ break;
+
+ case EXPR_CXX_UNARY_TYPE_TRAIT:
+ S = new (Context) UnaryTypeTraitExpr(Empty);
+ break;
+
+ case EXPR_BINARY_TYPE_TRAIT:
+ S = new (Context) BinaryTypeTraitExpr(Empty);
+ break;
+
+ case EXPR_TYPE_TRAIT:
+ S = TypeTraitExpr::CreateDeserialized(Context,
+ Record[ASTStmtReader::NumExprFields]);
+ break;
+
+ case EXPR_ARRAY_TYPE_TRAIT:
+ S = new (Context) ArrayTypeTraitExpr(Empty);
+ break;
+
+ case EXPR_CXX_EXPRESSION_TRAIT:
+ S = new (Context) ExpressionTraitExpr(Empty);
+ break;
+
+ case EXPR_CXX_NOEXCEPT:
+ S = new (Context) CXXNoexceptExpr(Empty);
+ break;
+
+ case EXPR_PACK_EXPANSION:
+ S = new (Context) PackExpansionExpr(Empty);
+ break;
+
+ case EXPR_SIZEOF_PACK:
+ S = new (Context) SizeOfPackExpr(Empty);
+ break;
+
+ case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM:
+ S = new (Context) SubstNonTypeTemplateParmExpr(Empty);
+ break;
+
+ case EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK:
+ S = new (Context) SubstNonTypeTemplateParmPackExpr(Empty);
+ break;
+
+ case EXPR_MATERIALIZE_TEMPORARY:
+ S = new (Context) MaterializeTemporaryExpr(Empty);
+ break;
+
+ case EXPR_OPAQUE_VALUE:
+ S = new (Context) OpaqueValueExpr(Empty);
+ break;
+
+ case EXPR_CUDA_KERNEL_CALL:
+ S = new (Context) CUDAKernelCallExpr(Context, Empty);
+ break;
+
+ case EXPR_ASTYPE:
+ S = new (Context) AsTypeExpr(Empty);
+ break;
+
+ case EXPR_PSEUDO_OBJECT: {
+ unsigned numSemanticExprs = Record[ASTStmtReader::NumExprFields];
+ S = PseudoObjectExpr::Create(Context, Empty, numSemanticExprs);
+ break;
+ }
+
+ case EXPR_ATOMIC:
+ S = new (Context) AtomicExpr(Empty);
+ break;
+
+ case EXPR_LAMBDA: {
+ unsigned NumCaptures = Record[ASTStmtReader::NumExprFields];
+ unsigned NumArrayIndexVars = Record[ASTStmtReader::NumExprFields + 1];
+ S = LambdaExpr::CreateDeserialized(Context, NumCaptures,
+ NumArrayIndexVars);
+ break;
+ }
+ }
+
+ // We hit a STMT_STOP, so we're done with this expression.
+ if (Finished)
+ break;
+
+ ++NumStatementsRead;
+
+ if (S && !IsStmtReference) {
+ Reader.Visit(S);
+ StmtEntries[Cursor.GetCurrentBitNo()] = S;
+ }
+
+
+ assert(Idx == Record.size() && "Invalid deserialization of statement");
+ StmtStack.push_back(S);
+ }
+
+#ifndef NDEBUG
+ assert(StmtStack.size() > PrevNumStmts && "Read too many sub stmts!");
+ assert(StmtStack.size() == PrevNumStmts + 1 && "Extra expressions on stack!");
+#endif
+
+ return StmtStack.pop_back_val();
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
new file mode 100644
index 0000000..a4301b5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriter.cpp
@@ -0,0 +1,4548 @@
+//===--- ASTWriter.cpp - AST File Writer ----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ASTWriter class, which writes AST files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTWriter.h"
+#include "ASTCommon.h"
+#include "clang/Sema/Sema.h"
+#include "clang/Sema/IdentifierResolver.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DeclFriend.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/TypeLocVisitor.h"
+#include "clang/Serialization/ASTReader.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PreprocessingRecord.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/HeaderSearch.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "clang/Basic/OnDiskHashTable.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/SourceManagerInternals.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/Version.h"
+#include "clang/Basic/VersionTuple.h"
+#include "llvm/ADT/APFloat.h"
+#include "llvm/ADT/APInt.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include <algorithm>
+#include <cstdio>
+#include <string.h>
+#include <utility>
+using namespace clang;
+using namespace clang::serialization;
+
+template <typename T, typename Allocator>
+static StringRef data(const std::vector<T, Allocator> &v) {
+ if (v.empty()) return StringRef();
+ return StringRef(reinterpret_cast<const char*>(&v[0]),
+ sizeof(T) * v.size());
+}
+
+template <typename T>
+static StringRef data(const SmallVectorImpl<T> &v) {
+ return StringRef(reinterpret_cast<const char*>(v.data()),
+ sizeof(T) * v.size());
+}
+
+//===----------------------------------------------------------------------===//
+// Type serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class ASTTypeWriter {
+ ASTWriter &Writer;
+ ASTWriter::RecordDataImpl &Record;
+
+ public:
+ /// \brief Type code that corresponds to the record generated.
+ TypeCode Code;
+
+ ASTTypeWriter(ASTWriter &Writer, ASTWriter::RecordDataImpl &Record)
+ : Writer(Writer), Record(Record), Code(TYPE_EXT_QUAL) { }
+
+ void VisitArrayType(const ArrayType *T);
+ void VisitFunctionType(const FunctionType *T);
+ void VisitTagType(const TagType *T);
+
+#define TYPE(Class, Base) void Visit##Class##Type(const Class##Type *T);
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ };
+}
+
+void ASTTypeWriter::VisitBuiltinType(const BuiltinType *T) {
+ llvm_unreachable("Built-in types are never serialized");
+}
+
+void ASTTypeWriter::VisitComplexType(const ComplexType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Code = TYPE_COMPLEX;
+}
+
+void ASTTypeWriter::VisitPointerType(const PointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = TYPE_POINTER;
+}
+
+void ASTTypeWriter::VisitBlockPointerType(const BlockPointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = TYPE_BLOCK_POINTER;
+}
+
+void ASTTypeWriter::VisitLValueReferenceType(const LValueReferenceType *T) {
+ Writer.AddTypeRef(T->getPointeeTypeAsWritten(), Record);
+ Record.push_back(T->isSpelledAsLValue());
+ Code = TYPE_LVALUE_REFERENCE;
+}
+
+void ASTTypeWriter::VisitRValueReferenceType(const RValueReferenceType *T) {
+ Writer.AddTypeRef(T->getPointeeTypeAsWritten(), Record);
+ Code = TYPE_RVALUE_REFERENCE;
+}
+
+void ASTTypeWriter::VisitMemberPointerType(const MemberPointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Writer.AddTypeRef(QualType(T->getClass(), 0), Record);
+ Code = TYPE_MEMBER_POINTER;
+}
+
+void ASTTypeWriter::VisitArrayType(const ArrayType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Record.push_back(T->getSizeModifier()); // FIXME: stable values
+ Record.push_back(T->getIndexTypeCVRQualifiers()); // FIXME: stable values
+}
+
+void ASTTypeWriter::VisitConstantArrayType(const ConstantArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddAPInt(T->getSize(), Record);
+ Code = TYPE_CONSTANT_ARRAY;
+}
+
+void ASTTypeWriter::VisitIncompleteArrayType(const IncompleteArrayType *T) {
+ VisitArrayType(T);
+ Code = TYPE_INCOMPLETE_ARRAY;
+}
+
+void ASTTypeWriter::VisitVariableArrayType(const VariableArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddSourceLocation(T->getLBracketLoc(), Record);
+ Writer.AddSourceLocation(T->getRBracketLoc(), Record);
+ Writer.AddStmt(T->getSizeExpr());
+ Code = TYPE_VARIABLE_ARRAY;
+}
+
+void ASTTypeWriter::VisitVectorType(const VectorType *T) {
+ Writer.AddTypeRef(T->getElementType(), Record);
+ Record.push_back(T->getNumElements());
+ Record.push_back(T->getVectorKind());
+ Code = TYPE_VECTOR;
+}
+
+void ASTTypeWriter::VisitExtVectorType(const ExtVectorType *T) {
+ VisitVectorType(T);
+ Code = TYPE_EXT_VECTOR;
+}
+
+void ASTTypeWriter::VisitFunctionType(const FunctionType *T) {
+ Writer.AddTypeRef(T->getResultType(), Record);
+ FunctionType::ExtInfo C = T->getExtInfo();
+ Record.push_back(C.getNoReturn());
+ Record.push_back(C.getHasRegParm());
+ Record.push_back(C.getRegParm());
+ // FIXME: need to stabilize encoding of calling convention...
+ Record.push_back(C.getCC());
+ Record.push_back(C.getProducesResult());
+}
+
+void ASTTypeWriter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
+ VisitFunctionType(T);
+ Code = TYPE_FUNCTION_NO_PROTO;
+}
+
+void ASTTypeWriter::VisitFunctionProtoType(const FunctionProtoType *T) {
+ VisitFunctionType(T);
+ Record.push_back(T->getNumArgs());
+ for (unsigned I = 0, N = T->getNumArgs(); I != N; ++I)
+ Writer.AddTypeRef(T->getArgType(I), Record);
+ Record.push_back(T->isVariadic());
+ Record.push_back(T->hasTrailingReturn());
+ Record.push_back(T->getTypeQuals());
+ Record.push_back(static_cast<unsigned>(T->getRefQualifier()));
+ Record.push_back(T->getExceptionSpecType());
+ if (T->getExceptionSpecType() == EST_Dynamic) {
+ Record.push_back(T->getNumExceptions());
+ for (unsigned I = 0, N = T->getNumExceptions(); I != N; ++I)
+ Writer.AddTypeRef(T->getExceptionType(I), Record);
+ } else if (T->getExceptionSpecType() == EST_ComputedNoexcept) {
+ Writer.AddStmt(T->getNoexceptExpr());
+ }
+ Code = TYPE_FUNCTION_PROTO;
+}
+
+void ASTTypeWriter::VisitUnresolvedUsingType(const UnresolvedUsingType *T) {
+ Writer.AddDeclRef(T->getDecl(), Record);
+ Code = TYPE_UNRESOLVED_USING;
+}
+
+void ASTTypeWriter::VisitTypedefType(const TypedefType *T) {
+ Writer.AddDeclRef(T->getDecl(), Record);
+ assert(!T->isCanonicalUnqualified() && "Invalid typedef ?");
+ Writer.AddTypeRef(T->getCanonicalTypeInternal(), Record);
+ Code = TYPE_TYPEDEF;
+}
+
+void ASTTypeWriter::VisitTypeOfExprType(const TypeOfExprType *T) {
+ Writer.AddStmt(T->getUnderlyingExpr());
+ Code = TYPE_TYPEOF_EXPR;
+}
+
+void ASTTypeWriter::VisitTypeOfType(const TypeOfType *T) {
+ Writer.AddTypeRef(T->getUnderlyingType(), Record);
+ Code = TYPE_TYPEOF;
+}
+
+void ASTTypeWriter::VisitDecltypeType(const DecltypeType *T) {
+ Writer.AddTypeRef(T->getUnderlyingType(), Record);
+ Writer.AddStmt(T->getUnderlyingExpr());
+ Code = TYPE_DECLTYPE;
+}
+
+void ASTTypeWriter::VisitUnaryTransformType(const UnaryTransformType *T) {
+ Writer.AddTypeRef(T->getBaseType(), Record);
+ Writer.AddTypeRef(T->getUnderlyingType(), Record);
+ Record.push_back(T->getUTTKind());
+ Code = TYPE_UNARY_TRANSFORM;
+}
+
+void ASTTypeWriter::VisitAutoType(const AutoType *T) {
+ Writer.AddTypeRef(T->getDeducedType(), Record);
+ Code = TYPE_AUTO;
+}
+
+void ASTTypeWriter::VisitTagType(const TagType *T) {
+ Record.push_back(T->isDependentType());
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
+ assert(!T->isBeingDefined() &&
+ "Cannot serialize in the middle of a type definition");
+}
+
+void ASTTypeWriter::VisitRecordType(const RecordType *T) {
+ VisitTagType(T);
+ Code = TYPE_RECORD;
+}
+
+void ASTTypeWriter::VisitEnumType(const EnumType *T) {
+ VisitTagType(T);
+ Code = TYPE_ENUM;
+}
+
+void ASTTypeWriter::VisitAttributedType(const AttributedType *T) {
+ Writer.AddTypeRef(T->getModifiedType(), Record);
+ Writer.AddTypeRef(T->getEquivalentType(), Record);
+ Record.push_back(T->getAttrKind());
+ Code = TYPE_ATTRIBUTED;
+}
+
+void
+ASTTypeWriter::VisitSubstTemplateTypeParmType(
+ const SubstTemplateTypeParmType *T) {
+ Writer.AddTypeRef(QualType(T->getReplacedParameter(), 0), Record);
+ Writer.AddTypeRef(T->getReplacementType(), Record);
+ Code = TYPE_SUBST_TEMPLATE_TYPE_PARM;
+}
+
+void
+ASTTypeWriter::VisitSubstTemplateTypeParmPackType(
+ const SubstTemplateTypeParmPackType *T) {
+ Writer.AddTypeRef(QualType(T->getReplacedParameter(), 0), Record);
+ Writer.AddTemplateArgument(T->getArgumentPack(), Record);
+ Code = TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK;
+}
+
+void
+ASTTypeWriter::VisitTemplateSpecializationType(
+ const TemplateSpecializationType *T) {
+ Record.push_back(T->isDependentType());
+ Writer.AddTemplateName(T->getTemplateName(), Record);
+ Record.push_back(T->getNumArgs());
+ for (TemplateSpecializationType::iterator ArgI = T->begin(), ArgE = T->end();
+ ArgI != ArgE; ++ArgI)
+ Writer.AddTemplateArgument(*ArgI, Record);
+ Writer.AddTypeRef(T->isTypeAlias() ? T->getAliasedType() :
+ T->isCanonicalUnqualified() ? QualType()
+ : T->getCanonicalTypeInternal(),
+ Record);
+ Code = TYPE_TEMPLATE_SPECIALIZATION;
+}
+
+void
+ASTTypeWriter::VisitDependentSizedArrayType(const DependentSizedArrayType *T) {
+ VisitArrayType(T);
+ Writer.AddStmt(T->getSizeExpr());
+ Writer.AddSourceRange(T->getBracketsRange(), Record);
+ Code = TYPE_DEPENDENT_SIZED_ARRAY;
+}
+
+void
+ASTTypeWriter::VisitDependentSizedExtVectorType(
+ const DependentSizedExtVectorType *T) {
+ // FIXME: Serialize this type (C++ only)
+ llvm_unreachable("Cannot serialize dependent sized extended vector types");
+}
+
+void
+ASTTypeWriter::VisitTemplateTypeParmType(const TemplateTypeParmType *T) {
+ Record.push_back(T->getDepth());
+ Record.push_back(T->getIndex());
+ Record.push_back(T->isParameterPack());
+ Writer.AddDeclRef(T->getDecl(), Record);
+ Code = TYPE_TEMPLATE_TYPE_PARM;
+}
+
+void
+ASTTypeWriter::VisitDependentNameType(const DependentNameType *T) {
+ Record.push_back(T->getKeyword());
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddIdentifierRef(T->getIdentifier(), Record);
+ Writer.AddTypeRef(T->isCanonicalUnqualified() ? QualType()
+ : T->getCanonicalTypeInternal(),
+ Record);
+ Code = TYPE_DEPENDENT_NAME;
+}
+
+void
+ASTTypeWriter::VisitDependentTemplateSpecializationType(
+ const DependentTemplateSpecializationType *T) {
+ Record.push_back(T->getKeyword());
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddIdentifierRef(T->getIdentifier(), Record);
+ Record.push_back(T->getNumArgs());
+ for (DependentTemplateSpecializationType::iterator
+ I = T->begin(), E = T->end(); I != E; ++I)
+ Writer.AddTemplateArgument(*I, Record);
+ Code = TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION;
+}
+
+void ASTTypeWriter::VisitPackExpansionType(const PackExpansionType *T) {
+ Writer.AddTypeRef(T->getPattern(), Record);
+ if (llvm::Optional<unsigned> NumExpansions = T->getNumExpansions())
+ Record.push_back(*NumExpansions + 1);
+ else
+ Record.push_back(0);
+ Code = TYPE_PACK_EXPANSION;
+}
+
+void ASTTypeWriter::VisitParenType(const ParenType *T) {
+ Writer.AddTypeRef(T->getInnerType(), Record);
+ Code = TYPE_PAREN;
+}
+
+void ASTTypeWriter::VisitElaboratedType(const ElaboratedType *T) {
+ Record.push_back(T->getKeyword());
+ Writer.AddNestedNameSpecifier(T->getQualifier(), Record);
+ Writer.AddTypeRef(T->getNamedType(), Record);
+ Code = TYPE_ELABORATED;
+}
+
+void ASTTypeWriter::VisitInjectedClassNameType(const InjectedClassNameType *T) {
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
+ Writer.AddTypeRef(T->getInjectedSpecializationType(), Record);
+ Code = TYPE_INJECTED_CLASS_NAME;
+}
+
+void ASTTypeWriter::VisitObjCInterfaceType(const ObjCInterfaceType *T) {
+ Writer.AddDeclRef(T->getDecl()->getCanonicalDecl(), Record);
+ Code = TYPE_OBJC_INTERFACE;
+}
+
+void ASTTypeWriter::VisitObjCObjectType(const ObjCObjectType *T) {
+ Writer.AddTypeRef(T->getBaseType(), Record);
+ Record.push_back(T->getNumProtocols());
+ for (ObjCObjectType::qual_iterator I = T->qual_begin(),
+ E = T->qual_end(); I != E; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = TYPE_OBJC_OBJECT;
+}
+
+void
+ASTTypeWriter::VisitObjCObjectPointerType(const ObjCObjectPointerType *T) {
+ Writer.AddTypeRef(T->getPointeeType(), Record);
+ Code = TYPE_OBJC_OBJECT_POINTER;
+}
+
+void
+ASTTypeWriter::VisitAtomicType(const AtomicType *T) {
+ Writer.AddTypeRef(T->getValueType(), Record);
+ Code = TYPE_ATOMIC;
+}
+
+namespace {
+
+class TypeLocWriter : public TypeLocVisitor<TypeLocWriter> {
+ ASTWriter &Writer;
+ ASTWriter::RecordDataImpl &Record;
+
+public:
+ TypeLocWriter(ASTWriter &Writer, ASTWriter::RecordDataImpl &Record)
+ : Writer(Writer), Record(Record) { }
+
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ void Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc);
+#include "clang/AST/TypeLocNodes.def"
+
+ void VisitArrayTypeLoc(ArrayTypeLoc TyLoc);
+ void VisitFunctionTypeLoc(FunctionTypeLoc TyLoc);
+};
+
+}
+
+void TypeLocWriter::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
+ // nothing to do
+}
+void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getBuiltinLoc(), Record);
+ if (TL.needsExtraLocalData()) {
+ Record.push_back(TL.getWrittenTypeSpec());
+ Record.push_back(TL.getWrittenSignSpec());
+ Record.push_back(TL.getWrittenWidthSpec());
+ Record.push_back(TL.hasModeAttr());
+ }
+}
+void TypeLocWriter::VisitComplexTypeLoc(ComplexTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitPointerTypeLoc(PointerTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getStarLoc(), Record);
+}
+void TypeLocWriter::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getCaretLoc(), Record);
+}
+void TypeLocWriter::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getAmpLoc(), Record);
+}
+void TypeLocWriter::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getAmpAmpLoc(), Record);
+}
+void TypeLocWriter::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getStarLoc(), Record);
+ Writer.AddTypeSourceInfo(TL.getClassTInfo(), Record);
+}
+void TypeLocWriter::VisitArrayTypeLoc(ArrayTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getLBracketLoc(), Record);
+ Writer.AddSourceLocation(TL.getRBracketLoc(), Record);
+ Record.push_back(TL.getSizeExpr() ? 1 : 0);
+ if (TL.getSizeExpr())
+ Writer.AddStmt(TL.getSizeExpr());
+}
+void TypeLocWriter::VisitConstantArrayTypeLoc(ConstantArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocWriter::VisitIncompleteArrayTypeLoc(IncompleteArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocWriter::VisitVariableArrayTypeLoc(VariableArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocWriter::VisitDependentSizedArrayTypeLoc(
+ DependentSizedArrayTypeLoc TL) {
+ VisitArrayTypeLoc(TL);
+}
+void TypeLocWriter::VisitDependentSizedExtVectorTypeLoc(
+ DependentSizedExtVectorTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitVectorTypeLoc(VectorTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getLocalRangeBegin(), Record);
+ Writer.AddSourceLocation(TL.getLocalRangeEnd(), Record);
+ Record.push_back(TL.getTrailingReturn());
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ Writer.AddDeclRef(TL.getArg(i), Record);
+}
+void TypeLocWriter::VisitFunctionProtoTypeLoc(FunctionProtoTypeLoc TL) {
+ VisitFunctionTypeLoc(TL);
+}
+void TypeLocWriter::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
+ VisitFunctionTypeLoc(TL);
+}
+void TypeLocWriter::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getTypeofLoc(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+}
+void TypeLocWriter::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getTypeofLoc(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+ Writer.AddTypeSourceInfo(TL.getUnderlyingTInfo(), Record);
+}
+void TypeLocWriter::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getKWLoc(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+ Writer.AddTypeSourceInfo(TL.getUnderlyingTInfo(), Record);
+}
+void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getAttrNameLoc(), Record);
+ if (TL.hasAttrOperand()) {
+ SourceRange range = TL.getAttrOperandParensRange();
+ Writer.AddSourceLocation(range.getBegin(), Record);
+ Writer.AddSourceLocation(range.getEnd(), Record);
+ }
+ if (TL.hasAttrExprOperand()) {
+ Expr *operand = TL.getAttrExprOperand();
+ Record.push_back(operand ? 1 : 0);
+ if (operand) Writer.AddStmt(operand);
+ } else if (TL.hasAttrEnumOperand()) {
+ Writer.AddSourceLocation(TL.getAttrEnumOperandLoc(), Record);
+ }
+}
+void TypeLocWriter::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitSubstTemplateTypeParmTypeLoc(
+ SubstTemplateTypeParmTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
+ SubstTemplateTypeParmPackTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
+ TemplateSpecializationTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getTemplateKeywordLoc(), Record);
+ Writer.AddSourceLocation(TL.getTemplateNameLoc(), Record);
+ Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
+ Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
+ for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
+ Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
+ TL.getArgLoc(i).getLocInfo(), Record);
+}
+void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+}
+void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
+}
+void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
+ DependentTemplateSpecializationTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getElaboratedKeywordLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(TL.getQualifierLoc(), Record);
+ Writer.AddSourceLocation(TL.getTemplateKeywordLoc(), Record);
+ Writer.AddSourceLocation(TL.getTemplateNameLoc(), Record);
+ Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
+ Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
+ for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
+ Writer.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
+ TL.getArgLoc(I).getLocInfo(), Record);
+}
+void TypeLocWriter::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getEllipsisLoc(), Record);
+}
+void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getNameLoc(), Record);
+}
+void TypeLocWriter::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
+ Record.push_back(TL.hasBaseTypeAsWritten());
+ Writer.AddSourceLocation(TL.getLAngleLoc(), Record);
+ Writer.AddSourceLocation(TL.getRAngleLoc(), Record);
+ for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
+ Writer.AddSourceLocation(TL.getProtocolLoc(i), Record);
+}
+void TypeLocWriter::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getStarLoc(), Record);
+}
+void TypeLocWriter::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
+ Writer.AddSourceLocation(TL.getKWLoc(), Record);
+ Writer.AddSourceLocation(TL.getLParenLoc(), Record);
+ Writer.AddSourceLocation(TL.getRParenLoc(), Record);
+}
+
+//===----------------------------------------------------------------------===//
+// ASTWriter Implementation
+//===----------------------------------------------------------------------===//
+
+static void EmitBlockID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ ASTWriter::RecordDataImpl &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETBID, Record);
+
+ // Emit the block name if present.
+ if (Name == 0 || Name[0] == 0) return;
+ Record.clear();
+ while (*Name)
+ Record.push_back(*Name++);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_BLOCKNAME, Record);
+}
+
+static void EmitRecordID(unsigned ID, const char *Name,
+ llvm::BitstreamWriter &Stream,
+ ASTWriter::RecordDataImpl &Record) {
+ Record.clear();
+ Record.push_back(ID);
+ while (*Name)
+ Record.push_back(*Name++);
+ Stream.EmitRecord(llvm::bitc::BLOCKINFO_CODE_SETRECORDNAME, Record);
+}
+
+static void AddStmtsExprs(llvm::BitstreamWriter &Stream,
+ ASTWriter::RecordDataImpl &Record) {
+#define RECORD(X) EmitRecordID(X, #X, Stream, Record)
+ RECORD(STMT_STOP);
+ RECORD(STMT_NULL_PTR);
+ RECORD(STMT_NULL);
+ RECORD(STMT_COMPOUND);
+ RECORD(STMT_CASE);
+ RECORD(STMT_DEFAULT);
+ RECORD(STMT_LABEL);
+ RECORD(STMT_IF);
+ RECORD(STMT_SWITCH);
+ RECORD(STMT_WHILE);
+ RECORD(STMT_DO);
+ RECORD(STMT_FOR);
+ RECORD(STMT_GOTO);
+ RECORD(STMT_INDIRECT_GOTO);
+ RECORD(STMT_CONTINUE);
+ RECORD(STMT_BREAK);
+ RECORD(STMT_RETURN);
+ RECORD(STMT_DECL);
+ RECORD(STMT_ASM);
+ RECORD(EXPR_PREDEFINED);
+ RECORD(EXPR_DECL_REF);
+ RECORD(EXPR_INTEGER_LITERAL);
+ RECORD(EXPR_FLOATING_LITERAL);
+ RECORD(EXPR_IMAGINARY_LITERAL);
+ RECORD(EXPR_STRING_LITERAL);
+ RECORD(EXPR_CHARACTER_LITERAL);
+ RECORD(EXPR_PAREN);
+ RECORD(EXPR_UNARY_OPERATOR);
+ RECORD(EXPR_SIZEOF_ALIGN_OF);
+ RECORD(EXPR_ARRAY_SUBSCRIPT);
+ RECORD(EXPR_CALL);
+ RECORD(EXPR_MEMBER);
+ RECORD(EXPR_BINARY_OPERATOR);
+ RECORD(EXPR_COMPOUND_ASSIGN_OPERATOR);
+ RECORD(EXPR_CONDITIONAL_OPERATOR);
+ RECORD(EXPR_IMPLICIT_CAST);
+ RECORD(EXPR_CSTYLE_CAST);
+ RECORD(EXPR_COMPOUND_LITERAL);
+ RECORD(EXPR_EXT_VECTOR_ELEMENT);
+ RECORD(EXPR_INIT_LIST);
+ RECORD(EXPR_DESIGNATED_INIT);
+ RECORD(EXPR_IMPLICIT_VALUE_INIT);
+ RECORD(EXPR_VA_ARG);
+ RECORD(EXPR_ADDR_LABEL);
+ RECORD(EXPR_STMT);
+ RECORD(EXPR_CHOOSE);
+ RECORD(EXPR_GNU_NULL);
+ RECORD(EXPR_SHUFFLE_VECTOR);
+ RECORD(EXPR_BLOCK);
+ RECORD(EXPR_GENERIC_SELECTION);
+ RECORD(EXPR_OBJC_STRING_LITERAL);
+ RECORD(EXPR_OBJC_NUMERIC_LITERAL);
+ RECORD(EXPR_OBJC_ARRAY_LITERAL);
+ RECORD(EXPR_OBJC_DICTIONARY_LITERAL);
+ RECORD(EXPR_OBJC_ENCODE);
+ RECORD(EXPR_OBJC_SELECTOR_EXPR);
+ RECORD(EXPR_OBJC_PROTOCOL_EXPR);
+ RECORD(EXPR_OBJC_IVAR_REF_EXPR);
+ RECORD(EXPR_OBJC_PROPERTY_REF_EXPR);
+ RECORD(EXPR_OBJC_KVC_REF_EXPR);
+ RECORD(EXPR_OBJC_MESSAGE_EXPR);
+ RECORD(STMT_OBJC_FOR_COLLECTION);
+ RECORD(STMT_OBJC_CATCH);
+ RECORD(STMT_OBJC_FINALLY);
+ RECORD(STMT_OBJC_AT_TRY);
+ RECORD(STMT_OBJC_AT_SYNCHRONIZED);
+ RECORD(STMT_OBJC_AT_THROW);
+ RECORD(EXPR_OBJC_BOOL_LITERAL);
+ RECORD(EXPR_CXX_OPERATOR_CALL);
+ RECORD(EXPR_CXX_CONSTRUCT);
+ RECORD(EXPR_CXX_STATIC_CAST);
+ RECORD(EXPR_CXX_DYNAMIC_CAST);
+ RECORD(EXPR_CXX_REINTERPRET_CAST);
+ RECORD(EXPR_CXX_CONST_CAST);
+ RECORD(EXPR_CXX_FUNCTIONAL_CAST);
+ RECORD(EXPR_USER_DEFINED_LITERAL);
+ RECORD(EXPR_CXX_BOOL_LITERAL);
+ RECORD(EXPR_CXX_NULL_PTR_LITERAL);
+ RECORD(EXPR_CXX_TYPEID_EXPR);
+ RECORD(EXPR_CXX_TYPEID_TYPE);
+ RECORD(EXPR_CXX_UUIDOF_EXPR);
+ RECORD(EXPR_CXX_UUIDOF_TYPE);
+ RECORD(EXPR_CXX_THIS);
+ RECORD(EXPR_CXX_THROW);
+ RECORD(EXPR_CXX_DEFAULT_ARG);
+ RECORD(EXPR_CXX_BIND_TEMPORARY);
+ RECORD(EXPR_CXX_SCALAR_VALUE_INIT);
+ RECORD(EXPR_CXX_NEW);
+ RECORD(EXPR_CXX_DELETE);
+ RECORD(EXPR_CXX_PSEUDO_DESTRUCTOR);
+ RECORD(EXPR_EXPR_WITH_CLEANUPS);
+ RECORD(EXPR_CXX_DEPENDENT_SCOPE_MEMBER);
+ RECORD(EXPR_CXX_DEPENDENT_SCOPE_DECL_REF);
+ RECORD(EXPR_CXX_UNRESOLVED_CONSTRUCT);
+ RECORD(EXPR_CXX_UNRESOLVED_MEMBER);
+ RECORD(EXPR_CXX_UNRESOLVED_LOOKUP);
+ RECORD(EXPR_CXX_UNARY_TYPE_TRAIT);
+ RECORD(EXPR_CXX_NOEXCEPT);
+ RECORD(EXPR_OPAQUE_VALUE);
+ RECORD(EXPR_BINARY_TYPE_TRAIT);
+ RECORD(EXPR_PACK_EXPANSION);
+ RECORD(EXPR_SIZEOF_PACK);
+ RECORD(EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK);
+ RECORD(EXPR_CUDA_KERNEL_CALL);
+#undef RECORD
+}
+
+void ASTWriter::WriteBlockInfoBlock() {
+ RecordData Record;
+ Stream.EnterSubblock(llvm::bitc::BLOCKINFO_BLOCK_ID, 3);
+
+#define BLOCK(X) EmitBlockID(X ## _ID, #X, Stream, Record)
+#define RECORD(X) EmitRecordID(X, #X, Stream, Record)
+
+ // AST Top-Level Block.
+ BLOCK(AST_BLOCK);
+ RECORD(ORIGINAL_FILE_NAME);
+ RECORD(ORIGINAL_FILE_ID);
+ RECORD(TYPE_OFFSET);
+ RECORD(DECL_OFFSET);
+ RECORD(LANGUAGE_OPTIONS);
+ RECORD(METADATA);
+ RECORD(IDENTIFIER_OFFSET);
+ RECORD(IDENTIFIER_TABLE);
+ RECORD(EXTERNAL_DEFINITIONS);
+ RECORD(SPECIAL_TYPES);
+ RECORD(STATISTICS);
+ RECORD(TENTATIVE_DEFINITIONS);
+ RECORD(UNUSED_FILESCOPED_DECLS);
+ RECORD(LOCALLY_SCOPED_EXTERNAL_DECLS);
+ RECORD(SELECTOR_OFFSETS);
+ RECORD(METHOD_POOL);
+ RECORD(PP_COUNTER_VALUE);
+ RECORD(SOURCE_LOCATION_OFFSETS);
+ RECORD(SOURCE_LOCATION_PRELOADS);
+ RECORD(STAT_CACHE);
+ RECORD(EXT_VECTOR_DECLS);
+ RECORD(VERSION_CONTROL_BRANCH_REVISION);
+ RECORD(PPD_ENTITIES_OFFSETS);
+ RECORD(IMPORTS);
+ RECORD(REFERENCED_SELECTOR_POOL);
+ RECORD(TU_UPDATE_LEXICAL);
+ RECORD(LOCAL_REDECLARATIONS_MAP);
+ RECORD(SEMA_DECL_REFS);
+ RECORD(WEAK_UNDECLARED_IDENTIFIERS);
+ RECORD(PENDING_IMPLICIT_INSTANTIATIONS);
+ RECORD(DECL_REPLACEMENTS);
+ RECORD(UPDATE_VISIBLE);
+ RECORD(DECL_UPDATE_OFFSETS);
+ RECORD(DECL_UPDATES);
+ RECORD(CXX_BASE_SPECIFIER_OFFSETS);
+ RECORD(DIAG_PRAGMA_MAPPINGS);
+ RECORD(CUDA_SPECIAL_DECL_REFS);
+ RECORD(HEADER_SEARCH_TABLE);
+ RECORD(ORIGINAL_PCH_DIR);
+ RECORD(FP_PRAGMA_OPTIONS);
+ RECORD(OPENCL_EXTENSIONS);
+ RECORD(DELEGATING_CTORS);
+ RECORD(FILE_SOURCE_LOCATION_OFFSETS);
+ RECORD(KNOWN_NAMESPACES);
+ RECORD(MODULE_OFFSET_MAP);
+ RECORD(SOURCE_MANAGER_LINE_TABLE);
+ RECORD(OBJC_CATEGORIES_MAP);
+ RECORD(FILE_SORTED_DECLS);
+ RECORD(IMPORTED_MODULES);
+ RECORD(MERGED_DECLARATIONS);
+ RECORD(LOCAL_REDECLARATIONS);
+ RECORD(OBJC_CATEGORIES);
+
+ // SourceManager Block.
+ BLOCK(SOURCE_MANAGER_BLOCK);
+ RECORD(SM_SLOC_FILE_ENTRY);
+ RECORD(SM_SLOC_BUFFER_ENTRY);
+ RECORD(SM_SLOC_BUFFER_BLOB);
+ RECORD(SM_SLOC_EXPANSION_ENTRY);
+
+ // Preprocessor Block.
+ BLOCK(PREPROCESSOR_BLOCK);
+ RECORD(PP_MACRO_OBJECT_LIKE);
+ RECORD(PP_MACRO_FUNCTION_LIKE);
+ RECORD(PP_TOKEN);
+
+ // Decls and Types block.
+ BLOCK(DECLTYPES_BLOCK);
+ RECORD(TYPE_EXT_QUAL);
+ RECORD(TYPE_COMPLEX);
+ RECORD(TYPE_POINTER);
+ RECORD(TYPE_BLOCK_POINTER);
+ RECORD(TYPE_LVALUE_REFERENCE);
+ RECORD(TYPE_RVALUE_REFERENCE);
+ RECORD(TYPE_MEMBER_POINTER);
+ RECORD(TYPE_CONSTANT_ARRAY);
+ RECORD(TYPE_INCOMPLETE_ARRAY);
+ RECORD(TYPE_VARIABLE_ARRAY);
+ RECORD(TYPE_VECTOR);
+ RECORD(TYPE_EXT_VECTOR);
+ RECORD(TYPE_FUNCTION_PROTO);
+ RECORD(TYPE_FUNCTION_NO_PROTO);
+ RECORD(TYPE_TYPEDEF);
+ RECORD(TYPE_TYPEOF_EXPR);
+ RECORD(TYPE_TYPEOF);
+ RECORD(TYPE_RECORD);
+ RECORD(TYPE_ENUM);
+ RECORD(TYPE_OBJC_INTERFACE);
+ RECORD(TYPE_OBJC_OBJECT);
+ RECORD(TYPE_OBJC_OBJECT_POINTER);
+ RECORD(TYPE_DECLTYPE);
+ RECORD(TYPE_ELABORATED);
+ RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM);
+ RECORD(TYPE_UNRESOLVED_USING);
+ RECORD(TYPE_INJECTED_CLASS_NAME);
+ RECORD(TYPE_OBJC_OBJECT);
+ RECORD(TYPE_TEMPLATE_TYPE_PARM);
+ RECORD(TYPE_TEMPLATE_SPECIALIZATION);
+ RECORD(TYPE_DEPENDENT_NAME);
+ RECORD(TYPE_DEPENDENT_TEMPLATE_SPECIALIZATION);
+ RECORD(TYPE_DEPENDENT_SIZED_ARRAY);
+ RECORD(TYPE_PAREN);
+ RECORD(TYPE_PACK_EXPANSION);
+ RECORD(TYPE_ATTRIBUTED);
+ RECORD(TYPE_SUBST_TEMPLATE_TYPE_PARM_PACK);
+ RECORD(TYPE_ATOMIC);
+ RECORD(DECL_TYPEDEF);
+ RECORD(DECL_ENUM);
+ RECORD(DECL_RECORD);
+ RECORD(DECL_ENUM_CONSTANT);
+ RECORD(DECL_FUNCTION);
+ RECORD(DECL_OBJC_METHOD);
+ RECORD(DECL_OBJC_INTERFACE);
+ RECORD(DECL_OBJC_PROTOCOL);
+ RECORD(DECL_OBJC_IVAR);
+ RECORD(DECL_OBJC_AT_DEFS_FIELD);
+ RECORD(DECL_OBJC_CATEGORY);
+ RECORD(DECL_OBJC_CATEGORY_IMPL);
+ RECORD(DECL_OBJC_IMPLEMENTATION);
+ RECORD(DECL_OBJC_COMPATIBLE_ALIAS);
+ RECORD(DECL_OBJC_PROPERTY);
+ RECORD(DECL_OBJC_PROPERTY_IMPL);
+ RECORD(DECL_FIELD);
+ RECORD(DECL_VAR);
+ RECORD(DECL_IMPLICIT_PARAM);
+ RECORD(DECL_PARM_VAR);
+ RECORD(DECL_FILE_SCOPE_ASM);
+ RECORD(DECL_BLOCK);
+ RECORD(DECL_CONTEXT_LEXICAL);
+ RECORD(DECL_CONTEXT_VISIBLE);
+ RECORD(DECL_NAMESPACE);
+ RECORD(DECL_NAMESPACE_ALIAS);
+ RECORD(DECL_USING);
+ RECORD(DECL_USING_SHADOW);
+ RECORD(DECL_USING_DIRECTIVE);
+ RECORD(DECL_UNRESOLVED_USING_VALUE);
+ RECORD(DECL_UNRESOLVED_USING_TYPENAME);
+ RECORD(DECL_LINKAGE_SPEC);
+ RECORD(DECL_CXX_RECORD);
+ RECORD(DECL_CXX_METHOD);
+ RECORD(DECL_CXX_CONSTRUCTOR);
+ RECORD(DECL_CXX_DESTRUCTOR);
+ RECORD(DECL_CXX_CONVERSION);
+ RECORD(DECL_ACCESS_SPEC);
+ RECORD(DECL_FRIEND);
+ RECORD(DECL_FRIEND_TEMPLATE);
+ RECORD(DECL_CLASS_TEMPLATE);
+ RECORD(DECL_CLASS_TEMPLATE_SPECIALIZATION);
+ RECORD(DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION);
+ RECORD(DECL_FUNCTION_TEMPLATE);
+ RECORD(DECL_TEMPLATE_TYPE_PARM);
+ RECORD(DECL_NON_TYPE_TEMPLATE_PARM);
+ RECORD(DECL_TEMPLATE_TEMPLATE_PARM);
+ RECORD(DECL_STATIC_ASSERT);
+ RECORD(DECL_CXX_BASE_SPECIFIERS);
+ RECORD(DECL_INDIRECTFIELD);
+ RECORD(DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK);
+
+ // Statements and Exprs can occur in the Decls and Types block.
+ AddStmtsExprs(Stream, Record);
+
+ BLOCK(PREPROCESSOR_DETAIL_BLOCK);
+ RECORD(PPD_MACRO_EXPANSION);
+ RECORD(PPD_MACRO_DEFINITION);
+ RECORD(PPD_INCLUSION_DIRECTIVE);
+
+#undef RECORD
+#undef BLOCK
+ Stream.ExitBlock();
+}
+
+/// \brief Adjusts the given filename to only write out the portion of the
+/// filename that is not part of the system root directory.
+///
+/// \param Filename the file name to adjust.
+///
+/// \param isysroot When non-NULL, the PCH file is a relocatable PCH file and
+/// the returned filename will be adjusted by this system root.
+///
+/// \returns either the original filename (if it needs no adjustment) or the
+/// adjusted filename (which points into the @p Filename parameter).
+static const char *
+adjustFilenameForRelocatablePCH(const char *Filename, StringRef isysroot) {
+ assert(Filename && "No file name to adjust?");
+
+ if (isysroot.empty())
+ return Filename;
+
+ // Verify that the filename and the system root have the same prefix.
+ unsigned Pos = 0;
+ for (; Filename[Pos] && Pos < isysroot.size(); ++Pos)
+ if (Filename[Pos] != isysroot[Pos])
+ return Filename; // Prefixes don't match.
+
+ // We hit the end of the filename before we hit the end of the system root.
+ if (!Filename[Pos])
+ return Filename;
+
+ // If the file name has a '/' at the current position, skip over the '/'.
+ // We distinguish sysroot-based includes from absolute includes by the
+ // absence of '/' at the beginning of sysroot-based includes.
+ if (Filename[Pos] == '/')
+ ++Pos;
+
+ return Filename + Pos;
+}
+
+/// \brief Write the AST metadata (e.g., i686-apple-darwin9).
+void ASTWriter::WriteMetadata(ASTContext &Context, StringRef isysroot,
+ const std::string &OutputFile) {
+ using namespace llvm;
+
+ // Metadata
+ const TargetInfo &Target = Context.getTargetInfo();
+ BitCodeAbbrev *MetaAbbrev = new BitCodeAbbrev();
+ MetaAbbrev->Add(BitCodeAbbrevOp(METADATA));
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // AST major
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // AST minor
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang major
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 16)); // Clang minor
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Relocatable
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Has errors
+ MetaAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Target triple
+ unsigned MetaAbbrevCode = Stream.EmitAbbrev(MetaAbbrev);
+
+ RecordData Record;
+ Record.push_back(METADATA);
+ Record.push_back(VERSION_MAJOR);
+ Record.push_back(VERSION_MINOR);
+ Record.push_back(CLANG_VERSION_MAJOR);
+ Record.push_back(CLANG_VERSION_MINOR);
+ Record.push_back(!isysroot.empty());
+ Record.push_back(ASTHasCompilerErrors);
+ const std::string &Triple = Target.getTriple().getTriple();
+ Stream.EmitRecordWithBlob(MetaAbbrevCode, Record, Triple);
+
+ if (Chain) {
+ serialization::ModuleManager &Mgr = Chain->getModuleManager();
+ llvm::SmallVector<char, 128> ModulePaths;
+ Record.clear();
+
+ for (ModuleManager::ModuleIterator M = Mgr.begin(), MEnd = Mgr.end();
+ M != MEnd; ++M) {
+ // Skip modules that weren't directly imported.
+ if (!(*M)->isDirectlyImported())
+ continue;
+
+ Record.push_back((unsigned)(*M)->Kind); // FIXME: Stable encoding
+ // FIXME: Write import location, once it matters.
+ // FIXME: This writes the absolute path for AST files we depend on.
+ const std::string &FileName = (*M)->FileName;
+ Record.push_back(FileName.size());
+ Record.append(FileName.begin(), FileName.end());
+ }
+ Stream.EmitRecord(IMPORTS, Record);
+ }
+
+ // Original file name and file ID
+ SourceManager &SM = Context.getSourceManager();
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ BitCodeAbbrev *FileAbbrev = new BitCodeAbbrev();
+ FileAbbrev->Add(BitCodeAbbrevOp(ORIGINAL_FILE_NAME));
+ FileAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ unsigned FileAbbrevCode = Stream.EmitAbbrev(FileAbbrev);
+
+ SmallString<128> MainFilePath(MainFile->getName());
+
+ llvm::sys::fs::make_absolute(MainFilePath);
+
+ const char *MainFileNameStr = MainFilePath.c_str();
+ MainFileNameStr = adjustFilenameForRelocatablePCH(MainFileNameStr,
+ isysroot);
+ RecordData Record;
+ Record.push_back(ORIGINAL_FILE_NAME);
+ Stream.EmitRecordWithBlob(FileAbbrevCode, Record, MainFileNameStr);
+
+ Record.clear();
+ Record.push_back(SM.getMainFileID().getOpaqueValue());
+ Stream.EmitRecord(ORIGINAL_FILE_ID, Record);
+ }
+
+ // Original PCH directory
+ if (!OutputFile.empty() && OutputFile != "-") {
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(ORIGINAL_PCH_DIR));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+
+ SmallString<128> OutputPath(OutputFile);
+
+ llvm::sys::fs::make_absolute(OutputPath);
+ StringRef origDir = llvm::sys::path::parent_path(OutputPath);
+
+ RecordData Record;
+ Record.push_back(ORIGINAL_PCH_DIR);
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, origDir);
+ }
+
+ // Repository branch/version information.
+ BitCodeAbbrev *RepoAbbrev = new BitCodeAbbrev();
+ RepoAbbrev->Add(BitCodeAbbrevOp(VERSION_CONTROL_BRANCH_REVISION));
+ RepoAbbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // SVN branch/tag
+ unsigned RepoAbbrevCode = Stream.EmitAbbrev(RepoAbbrev);
+ Record.clear();
+ Record.push_back(VERSION_CONTROL_BRANCH_REVISION);
+ Stream.EmitRecordWithBlob(RepoAbbrevCode, Record,
+ getClangFullRepositoryVersion());
+}
+
+/// \brief Write the LangOptions structure.
+void ASTWriter::WriteLanguageOptions(const LangOptions &LangOpts) {
+ RecordData Record;
+#define LANGOPT(Name, Bits, Default, Description) \
+ Record.push_back(LangOpts.Name);
+#define ENUM_LANGOPT(Name, Type, Bits, Default, Description) \
+ Record.push_back(static_cast<unsigned>(LangOpts.get##Name()));
+#include "clang/Basic/LangOptions.def"
+
+ Record.push_back(LangOpts.CurrentModule.size());
+ Record.append(LangOpts.CurrentModule.begin(), LangOpts.CurrentModule.end());
+ Stream.EmitRecord(LANGUAGE_OPTIONS, Record);
+}
+
+//===----------------------------------------------------------------------===//
+// stat cache Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+// Trait used for the on-disk hash table of stat cache results.
+class ASTStatCacheTrait {
+public:
+ typedef const char * key_type;
+ typedef key_type key_type_ref;
+
+ typedef struct stat data_type;
+ typedef const data_type &data_type_ref;
+
+ static unsigned ComputeHash(const char *path) {
+ return llvm::HashString(path);
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, const char *path,
+ data_type_ref Data) {
+ unsigned StrLen = strlen(path);
+ clang::io::Emit16(Out, StrLen);
+ unsigned DataLen = 4 + 4 + 2 + 8 + 8;
+ clang::io::Emit8(Out, DataLen);
+ return std::make_pair(StrLen + 1, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, const char *path, unsigned KeyLen) {
+ Out.write(path, KeyLen);
+ }
+
+ void EmitData(raw_ostream &Out, key_type_ref,
+ data_type_ref Data, unsigned DataLen) {
+ using namespace clang::io;
+ uint64_t Start = Out.tell(); (void)Start;
+
+ Emit32(Out, (uint32_t) Data.st_ino);
+ Emit32(Out, (uint32_t) Data.st_dev);
+ Emit16(Out, (uint16_t) Data.st_mode);
+ Emit64(Out, (uint64_t) Data.st_mtime);
+ Emit64(Out, (uint64_t) Data.st_size);
+
+ assert(Out.tell() - Start == DataLen && "Wrong data length");
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write the stat() system call cache to the AST file.
+void ASTWriter::WriteStatCache(MemorizeStatCalls &StatCalls) {
+ // Build the on-disk hash table containing information about every
+ // stat() call.
+ OnDiskChainedHashTableGenerator<ASTStatCacheTrait> Generator;
+ unsigned NumStatEntries = 0;
+ for (MemorizeStatCalls::iterator Stat = StatCalls.begin(),
+ StatEnd = StatCalls.end();
+ Stat != StatEnd; ++Stat, ++NumStatEntries) {
+ StringRef Filename = Stat->first();
+ Generator.insert(Filename.data(), Stat->second);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> StatCacheData;
+ uint32_t BucketOffset;
+ {
+ llvm::raw_svector_ostream Out(StatCacheData);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out);
+ }
+
+ // Create a blob abbreviation
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(STAT_CACHE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned StatCacheAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the stat cache
+ RecordData Record;
+ Record.push_back(STAT_CACHE);
+ Record.push_back(BucketOffset);
+ Record.push_back(NumStatEntries);
+ Stream.EmitRecordWithBlob(StatCacheAbbrev, Record, StatCacheData.str());
+}
+
+//===----------------------------------------------------------------------===//
+// Source Manager Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// file.
+static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_FILE_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives
+ // FileEntry fields.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // BufferOverridden
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumCreatedFIDs
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 24)); // FirstDeclIndex
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // NumDecls
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // File name
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// buffer.
+static unsigned CreateSLocBufferAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_BUFFER_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Buffer name blob
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a
+/// buffer's blob.
+static unsigned CreateSLocBufferBlobAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_BUFFER_BLOB));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Blob
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+/// \brief Create an abbreviation for the SLocEntry that refers to a macro
+/// expansion.
+static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_EXPANSION_ENTRY));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Spelling location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Start location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // End location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Token length
+ return Stream.EmitAbbrev(Abbrev);
+}
+
+namespace {
+ // Trait used for the on-disk hash table of header search information.
+ class HeaderFileInfoTrait {
+ ASTWriter &Writer;
+ const HeaderSearch &HS;
+
+ // Keep track of the framework names we've used during serialization.
+ SmallVector<char, 128> FrameworkStringData;
+ llvm::StringMap<unsigned> FrameworkNameOffset;
+
+ public:
+ HeaderFileInfoTrait(ASTWriter &Writer, const HeaderSearch &HS)
+ : Writer(Writer), HS(HS) { }
+
+ typedef const char *key_type;
+ typedef key_type key_type_ref;
+
+ typedef HeaderFileInfo data_type;
+ typedef const data_type &data_type_ref;
+
+ static unsigned ComputeHash(const char *path) {
+ // The hash is based only on the filename portion of the key, so that the
+ // reader can match based on filenames when symlinking or excess path
+ // elements ("foo/../", "../") change the form of the name. However,
+ // complete path is still the key.
+ return llvm::HashString(llvm::sys::path::filename(path));
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, const char *path,
+ data_type_ref Data) {
+ unsigned StrLen = strlen(path);
+ clang::io::Emit16(Out, StrLen);
+ unsigned DataLen = 1 + 2 + 4 + 4;
+ clang::io::Emit8(Out, DataLen);
+ return std::make_pair(StrLen + 1, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, const char *path, unsigned KeyLen) {
+ Out.write(path, KeyLen);
+ }
+
+ void EmitData(raw_ostream &Out, key_type_ref,
+ data_type_ref Data, unsigned DataLen) {
+ using namespace clang::io;
+ uint64_t Start = Out.tell(); (void)Start;
+
+ unsigned char Flags = (Data.isImport << 5)
+ | (Data.isPragmaOnce << 4)
+ | (Data.DirInfo << 2)
+ | (Data.Resolved << 1)
+ | Data.IndexHeaderMapHeader;
+ Emit8(Out, (uint8_t)Flags);
+ Emit16(Out, (uint16_t) Data.NumIncludes);
+
+ if (!Data.ControllingMacro)
+ Emit32(Out, (uint32_t)Data.ControllingMacroID);
+ else
+ Emit32(Out, (uint32_t)Writer.getIdentifierRef(Data.ControllingMacro));
+
+ unsigned Offset = 0;
+ if (!Data.Framework.empty()) {
+ // If this header refers into a framework, save the framework name.
+ llvm::StringMap<unsigned>::iterator Pos
+ = FrameworkNameOffset.find(Data.Framework);
+ if (Pos == FrameworkNameOffset.end()) {
+ Offset = FrameworkStringData.size() + 1;
+ FrameworkStringData.append(Data.Framework.begin(),
+ Data.Framework.end());
+ FrameworkStringData.push_back(0);
+
+ FrameworkNameOffset[Data.Framework] = Offset;
+ } else
+ Offset = Pos->second;
+ }
+ Emit32(Out, Offset);
+
+ assert(Out.tell() - Start == DataLen && "Wrong data length");
+ }
+
+ const char *strings_begin() const { return FrameworkStringData.begin(); }
+ const char *strings_end() const { return FrameworkStringData.end(); }
+ };
+} // end anonymous namespace
+
+/// \brief Write the header search block for the list of files that
+///
+/// \param HS The header search structure to save.
+///
+/// \param Chain Whether we're creating a chained AST file.
+void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS, StringRef isysroot) {
+ SmallVector<const FileEntry *, 16> FilesByUID;
+ HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
+
+ if (FilesByUID.size() > HS.header_file_size())
+ FilesByUID.resize(HS.header_file_size());
+
+ HeaderFileInfoTrait GeneratorTrait(*this, HS);
+ OnDiskChainedHashTableGenerator<HeaderFileInfoTrait> Generator;
+ SmallVector<const char *, 4> SavedStrings;
+ unsigned NumHeaderSearchEntries = 0;
+ for (unsigned UID = 0, LastUID = FilesByUID.size(); UID != LastUID; ++UID) {
+ const FileEntry *File = FilesByUID[UID];
+ if (!File)
+ continue;
+
+ // Use HeaderSearch's getFileInfo to make sure we get the HeaderFileInfo
+ // from the external source if it was not provided already.
+ const HeaderFileInfo &HFI = HS.getFileInfo(File);
+ if (HFI.External && Chain)
+ continue;
+
+ // Turn the file name into an absolute path, if it isn't already.
+ const char *Filename = File->getName();
+ Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
+
+ // If we performed any translation on the file name at all, we need to
+ // save this string, since the generator will refer to it later.
+ if (Filename != File->getName()) {
+ Filename = strdup(Filename);
+ SavedStrings.push_back(Filename);
+ }
+
+ Generator.insert(Filename, HFI, GeneratorTrait);
+ ++NumHeaderSearchEntries;
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> TableData;
+ uint32_t BucketOffset;
+ {
+ llvm::raw_svector_ostream Out(TableData);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out, GeneratorTrait);
+ }
+
+ // Create a blob abbreviation
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(HEADER_SEARCH_TABLE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned TableAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the header search table
+ RecordData Record;
+ Record.push_back(HEADER_SEARCH_TABLE);
+ Record.push_back(BucketOffset);
+ Record.push_back(NumHeaderSearchEntries);
+ Record.push_back(TableData.size());
+ TableData.append(GeneratorTrait.strings_begin(),GeneratorTrait.strings_end());
+ Stream.EmitRecordWithBlob(TableAbbrev, Record, TableData.str());
+
+ // Free all of the strings we had to duplicate.
+ for (unsigned I = 0, N = SavedStrings.size(); I != N; ++I)
+ free((void*)SavedStrings[I]);
+}
+
+/// \brief Writes the block containing the serialized form of the
+/// source manager.
+///
+/// TODO: We should probably use an on-disk hash table (stored in a
+/// blob), indexed based on the file name, so that we only create
+/// entries for files that we actually need. In the common case (no
+/// errors), we probably won't have to create file entries for any of
+/// the files in the AST.
+void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
+ const Preprocessor &PP,
+ StringRef isysroot) {
+ RecordData Record;
+
+ // Enter the source manager block.
+ Stream.EnterSubblock(SOURCE_MANAGER_BLOCK_ID, 3);
+
+ // Abbreviations for the various kinds of source-location entries.
+ unsigned SLocFileAbbrv = CreateSLocFileAbbrev(Stream);
+ unsigned SLocBufferAbbrv = CreateSLocBufferAbbrev(Stream);
+ unsigned SLocBufferBlobAbbrv = CreateSLocBufferBlobAbbrev(Stream);
+ unsigned SLocExpansionAbbrv = CreateSLocExpansionAbbrev(Stream);
+
+ // Write out the source location entry table. We skip the first
+ // entry, which is always the same dummy entry.
+ std::vector<uint32_t> SLocEntryOffsets;
+ // Write out the offsets of only source location file entries.
+ // We will go through them in ASTReader::validateFileEntries().
+ std::vector<uint32_t> SLocFileEntryOffsets;
+ RecordData PreloadSLocs;
+ SLocEntryOffsets.reserve(SourceMgr.local_sloc_entry_size() - 1);
+ for (unsigned I = 1, N = SourceMgr.local_sloc_entry_size();
+ I != N; ++I) {
+ // Get this source location entry.
+ const SrcMgr::SLocEntry *SLoc = &SourceMgr.getLocalSLocEntry(I);
+
+ // Record the offset of this source-location entry.
+ SLocEntryOffsets.push_back(Stream.GetCurrentBitNo());
+
+ // Figure out which record code to use.
+ unsigned Code;
+ if (SLoc->isFile()) {
+ const SrcMgr::ContentCache *Cache = SLoc->getFile().getContentCache();
+ if (Cache->OrigEntry) {
+ Code = SM_SLOC_FILE_ENTRY;
+ SLocFileEntryOffsets.push_back(Stream.GetCurrentBitNo());
+ } else
+ Code = SM_SLOC_BUFFER_ENTRY;
+ } else
+ Code = SM_SLOC_EXPANSION_ENTRY;
+ Record.clear();
+ Record.push_back(Code);
+
+ // Starting offset of this entry within this module, so skip the dummy.
+ Record.push_back(SLoc->getOffset() - 2);
+ if (SLoc->isFile()) {
+ const SrcMgr::FileInfo &File = SLoc->getFile();
+ Record.push_back(File.getIncludeLoc().getRawEncoding());
+ Record.push_back(File.getFileCharacteristic()); // FIXME: stable encoding
+ Record.push_back(File.hasLineDirectives());
+
+ const SrcMgr::ContentCache *Content = File.getContentCache();
+ if (Content->OrigEntry) {
+ assert(Content->OrigEntry == Content->ContentsEntry &&
+ "Writing to AST an overridden file is not supported");
+
+ // The source location entry is a file. The blob associated
+ // with this entry is the file name.
+
+ // Emit size/modification time for this file.
+ Record.push_back(Content->OrigEntry->getSize());
+ Record.push_back(Content->OrigEntry->getModificationTime());
+ Record.push_back(Content->BufferOverridden);
+ Record.push_back(File.NumCreatedFIDs);
+
+ FileDeclIDsTy::iterator FDI = FileDeclIDs.find(SLoc);
+ if (FDI != FileDeclIDs.end()) {
+ Record.push_back(FDI->second->FirstDeclIndex);
+ Record.push_back(FDI->second->DeclIDs.size());
+ } else {
+ Record.push_back(0);
+ Record.push_back(0);
+ }
+
+ // Turn the file name into an absolute path, if it isn't already.
+ const char *Filename = Content->OrigEntry->getName();
+ SmallString<128> FilePath(Filename);
+
+ // Ask the file manager to fixup the relative path for us. This will
+ // honor the working directory.
+ SourceMgr.getFileManager().FixupRelativePath(FilePath);
+
+ // FIXME: This call to make_absolute shouldn't be necessary, the
+ // call to FixupRelativePath should always return an absolute path.
+ llvm::sys::fs::make_absolute(FilePath);
+ Filename = FilePath.c_str();
+
+ Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
+ Stream.EmitRecordWithBlob(SLocFileAbbrv, Record, Filename);
+
+ if (Content->BufferOverridden) {
+ Record.clear();
+ Record.push_back(SM_SLOC_BUFFER_BLOB);
+ const llvm::MemoryBuffer *Buffer
+ = Content->getBuffer(PP.getDiagnostics(), PP.getSourceManager());
+ Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record,
+ StringRef(Buffer->getBufferStart(),
+ Buffer->getBufferSize() + 1));
+ }
+ } else {
+ // The source location entry is a buffer. The blob associated
+ // with this entry contains the contents of the buffer.
+
+ // We add one to the size so that we capture the trailing NULL
+ // that is required by llvm::MemoryBuffer::getMemBuffer (on
+ // the reader side).
+ const llvm::MemoryBuffer *Buffer
+ = Content->getBuffer(PP.getDiagnostics(), PP.getSourceManager());
+ const char *Name = Buffer->getBufferIdentifier();
+ Stream.EmitRecordWithBlob(SLocBufferAbbrv, Record,
+ StringRef(Name, strlen(Name) + 1));
+ Record.clear();
+ Record.push_back(SM_SLOC_BUFFER_BLOB);
+ Stream.EmitRecordWithBlob(SLocBufferBlobAbbrv, Record,
+ StringRef(Buffer->getBufferStart(),
+ Buffer->getBufferSize() + 1));
+
+ if (strcmp(Name, "<built-in>") == 0) {
+ PreloadSLocs.push_back(SLocEntryOffsets.size());
+ }
+ }
+ } else {
+ // The source location entry is a macro expansion.
+ const SrcMgr::ExpansionInfo &Expansion = SLoc->getExpansion();
+ Record.push_back(Expansion.getSpellingLoc().getRawEncoding());
+ Record.push_back(Expansion.getExpansionLocStart().getRawEncoding());
+ Record.push_back(Expansion.isMacroArgExpansion() ? 0
+ : Expansion.getExpansionLocEnd().getRawEncoding());
+
+ // Compute the token length for this macro expansion.
+ unsigned NextOffset = SourceMgr.getNextLocalOffset();
+ if (I + 1 != N)
+ NextOffset = SourceMgr.getLocalSLocEntry(I + 1).getOffset();
+ Record.push_back(NextOffset - SLoc->getOffset() - 1);
+ Stream.EmitRecordWithAbbrev(SLocExpansionAbbrv, Record);
+ }
+ }
+
+ Stream.ExitBlock();
+
+ if (SLocEntryOffsets.empty())
+ return;
+
+ // Write the source-location offsets table into the AST block. This
+ // table is used for lazily loading source-location information.
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SOURCE_LOCATION_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // total size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets
+ unsigned SLocOffsetsAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Record.clear();
+ Record.push_back(SOURCE_LOCATION_OFFSETS);
+ Record.push_back(SLocEntryOffsets.size());
+ Record.push_back(SourceMgr.getNextLocalOffset() - 1); // skip dummy
+ Stream.EmitRecordWithBlob(SLocOffsetsAbbrev, Record, data(SLocEntryOffsets));
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(FILE_SOURCE_LOCATION_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 16)); // # of slocs
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // offsets
+ unsigned SLocFileOffsetsAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Record.clear();
+ Record.push_back(FILE_SOURCE_LOCATION_OFFSETS);
+ Record.push_back(SLocFileEntryOffsets.size());
+ Stream.EmitRecordWithBlob(SLocFileOffsetsAbbrev, Record,
+ data(SLocFileEntryOffsets));
+
+ // Write the source location entry preloads array, telling the AST
+ // reader which source locations entries it should load eagerly.
+ Stream.EmitRecord(SOURCE_LOCATION_PRELOADS, PreloadSLocs);
+
+ // Write the line table. It depends on remapping working, so it must come
+ // after the source location offsets.
+ if (SourceMgr.hasLineTable()) {
+ LineTableInfo &LineTable = SourceMgr.getLineTable();
+
+ Record.clear();
+ // Emit the file names
+ Record.push_back(LineTable.getNumFilenames());
+ for (unsigned I = 0, N = LineTable.getNumFilenames(); I != N; ++I) {
+ // Emit the file name
+ const char *Filename = LineTable.getFilename(I);
+ Filename = adjustFilenameForRelocatablePCH(Filename, isysroot);
+ unsigned FilenameLen = Filename? strlen(Filename) : 0;
+ Record.push_back(FilenameLen);
+ if (FilenameLen)
+ Record.insert(Record.end(), Filename, Filename + FilenameLen);
+ }
+
+ // Emit the line entries
+ for (LineTableInfo::iterator L = LineTable.begin(), LEnd = LineTable.end();
+ L != LEnd; ++L) {
+ // Only emit entries for local files.
+ if (L->first < 0)
+ continue;
+
+ // Emit the file ID
+ Record.push_back(L->first);
+
+ // Emit the line entries
+ Record.push_back(L->second.size());
+ for (std::vector<LineEntry>::iterator LE = L->second.begin(),
+ LEEnd = L->second.end();
+ LE != LEEnd; ++LE) {
+ Record.push_back(LE->FileOffset);
+ Record.push_back(LE->LineNo);
+ Record.push_back(LE->FilenameID);
+ Record.push_back((unsigned)LE->FileKind);
+ Record.push_back(LE->IncludeOffset);
+ }
+ }
+ Stream.EmitRecord(SOURCE_MANAGER_LINE_TABLE, Record);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Preprocessor Serialization
+//===----------------------------------------------------------------------===//
+
+static int compareMacroDefinitions(const void *XPtr, const void *YPtr) {
+ const std::pair<const IdentifierInfo *, MacroInfo *> &X =
+ *(const std::pair<const IdentifierInfo *, MacroInfo *>*)XPtr;
+ const std::pair<const IdentifierInfo *, MacroInfo *> &Y =
+ *(const std::pair<const IdentifierInfo *, MacroInfo *>*)YPtr;
+ return X.first->getName().compare(Y.first->getName());
+}
+
+/// \brief Writes the block containing the serialized form of the
+/// preprocessor.
+///
+void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
+ PreprocessingRecord *PPRec = PP.getPreprocessingRecord();
+ if (PPRec)
+ WritePreprocessorDetail(*PPRec);
+
+ RecordData Record;
+
+ // If the preprocessor __COUNTER__ value has been bumped, remember it.
+ if (PP.getCounterValue() != 0) {
+ Record.push_back(PP.getCounterValue());
+ Stream.EmitRecord(PP_COUNTER_VALUE, Record);
+ Record.clear();
+ }
+
+ // Enter the preprocessor block.
+ Stream.EnterSubblock(PREPROCESSOR_BLOCK_ID, 3);
+
+ // If the AST file contains __DATE__ or __TIME__ emit a warning about this.
+ // FIXME: use diagnostics subsystem for localization etc.
+ if (PP.SawDateOrTime())
+ fprintf(stderr, "warning: precompiled header used __DATE__ or __TIME__.\n");
+
+
+ // Loop over all the macro definitions that are live at the end of the file,
+ // emitting each to the PP section.
+
+ // Construct the list of macro definitions that need to be serialized.
+ SmallVector<std::pair<const IdentifierInfo *, MacroInfo *>, 2>
+ MacrosToEmit;
+ llvm::SmallPtrSet<const IdentifierInfo*, 4> MacroDefinitionsSeen;
+ for (Preprocessor::macro_iterator I = PP.macro_begin(Chain == 0),
+ E = PP.macro_end(Chain == 0);
+ I != E; ++I) {
+ const IdentifierInfo *Name = I->first;
+ if (!IsModule || I->second->isPublic()) {
+ MacroDefinitionsSeen.insert(Name);
+ MacrosToEmit.push_back(std::make_pair(I->first, I->second));
+ }
+ }
+
+ // Sort the set of macro definitions that need to be serialized by the
+ // name of the macro, to provide a stable ordering.
+ llvm::array_pod_sort(MacrosToEmit.begin(), MacrosToEmit.end(),
+ &compareMacroDefinitions);
+
+ // Resolve any identifiers that defined macros at the time they were
+ // deserialized, adding them to the list of macros to emit (if appropriate).
+ for (unsigned I = 0, N = DeserializedMacroNames.size(); I != N; ++I) {
+ IdentifierInfo *Name
+ = const_cast<IdentifierInfo *>(DeserializedMacroNames[I]);
+ if (Name->hasMacroDefinition() && MacroDefinitionsSeen.insert(Name))
+ MacrosToEmit.push_back(std::make_pair(Name, PP.getMacroInfo(Name)));
+ }
+
+ for (unsigned I = 0, N = MacrosToEmit.size(); I != N; ++I) {
+ const IdentifierInfo *Name = MacrosToEmit[I].first;
+ MacroInfo *MI = MacrosToEmit[I].second;
+ if (!MI)
+ continue;
+
+ // Don't emit builtin macros like __LINE__ to the AST file unless they have
+ // been redefined by the header (in which case they are not isBuiltinMacro).
+ // Also skip macros from a AST file if we're chaining.
+
+ // FIXME: There is a (probably minor) optimization we could do here, if
+ // the macro comes from the original PCH but the identifier comes from a
+ // chained PCH, by storing the offset into the original PCH rather than
+ // writing the macro definition a second time.
+ if (MI->isBuiltinMacro() ||
+ (Chain &&
+ Name->isFromAST() && !Name->hasChangedSinceDeserialization() &&
+ MI->isFromAST() && !MI->hasChangedAfterLoad()))
+ continue;
+
+ AddIdentifierRef(Name, Record);
+ MacroOffsets[Name] = Stream.GetCurrentBitNo();
+ Record.push_back(MI->getDefinitionLoc().getRawEncoding());
+ Record.push_back(MI->isUsed());
+ Record.push_back(MI->isPublic());
+ AddSourceLocation(MI->getVisibilityLocation(), Record);
+ unsigned Code;
+ if (MI->isObjectLike()) {
+ Code = PP_MACRO_OBJECT_LIKE;
+ } else {
+ Code = PP_MACRO_FUNCTION_LIKE;
+
+ Record.push_back(MI->isC99Varargs());
+ Record.push_back(MI->isGNUVarargs());
+ Record.push_back(MI->getNumArgs());
+ for (MacroInfo::arg_iterator I = MI->arg_begin(), E = MI->arg_end();
+ I != E; ++I)
+ AddIdentifierRef(*I, Record);
+ }
+
+ // If we have a detailed preprocessing record, record the macro definition
+ // ID that corresponds to this macro.
+ if (PPRec)
+ Record.push_back(MacroDefinitions[PPRec->findMacroDefinition(MI)]);
+
+ Stream.EmitRecord(Code, Record);
+ Record.clear();
+
+ // Emit the tokens array.
+ for (unsigned TokNo = 0, e = MI->getNumTokens(); TokNo != e; ++TokNo) {
+ // Note that we know that the preprocessor does not have any annotation
+ // tokens in it because they are created by the parser, and thus can't be
+ // in a macro definition.
+ const Token &Tok = MI->getReplacementToken(TokNo);
+
+ Record.push_back(Tok.getLocation().getRawEncoding());
+ Record.push_back(Tok.getLength());
+
+ // FIXME: When reading literal tokens, reconstruct the literal pointer if
+ // it is needed.
+ AddIdentifierRef(Tok.getIdentifierInfo(), Record);
+ // FIXME: Should translate token kind to a stable encoding.
+ Record.push_back(Tok.getKind());
+ // FIXME: Should translate token flags to a stable encoding.
+ Record.push_back(Tok.getFlags());
+
+ Stream.EmitRecord(PP_TOKEN, Record);
+ Record.clear();
+ }
+ ++NumMacros;
+ }
+ Stream.ExitBlock();
+}
+
+void ASTWriter::WritePreprocessorDetail(PreprocessingRecord &PPRec) {
+ if (PPRec.local_begin() == PPRec.local_end())
+ return;
+
+ SmallVector<PPEntityOffset, 64> PreprocessedEntityOffsets;
+
+ // Enter the preprocessor block.
+ Stream.EnterSubblock(PREPROCESSOR_DETAIL_BLOCK_ID, 3);
+
+ // If the preprocessor has a preprocessing record, emit it.
+ unsigned NumPreprocessingRecords = 0;
+ using namespace llvm;
+
+ // Set up the abbreviation for
+ unsigned InclusionAbbrev = 0;
+ {
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(PPD_INCLUSION_DIRECTIVE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // filename length
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // in quotes
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ InclusionAbbrev = Stream.EmitAbbrev(Abbrev);
+ }
+
+ unsigned FirstPreprocessorEntityID
+ = (Chain ? PPRec.getNumLoadedPreprocessedEntities() : 0)
+ + NUM_PREDEF_PP_ENTITY_IDS;
+ unsigned NextPreprocessorEntityID = FirstPreprocessorEntityID;
+ RecordData Record;
+ for (PreprocessingRecord::iterator E = PPRec.local_begin(),
+ EEnd = PPRec.local_end();
+ E != EEnd;
+ (void)++E, ++NumPreprocessingRecords, ++NextPreprocessorEntityID) {
+ Record.clear();
+
+ PreprocessedEntityOffsets.push_back(PPEntityOffset((*E)->getSourceRange(),
+ Stream.GetCurrentBitNo()));
+
+ if (MacroDefinition *MD = dyn_cast<MacroDefinition>(*E)) {
+ // Record this macro definition's ID.
+ MacroDefinitions[MD] = NextPreprocessorEntityID;
+
+ AddIdentifierRef(MD->getName(), Record);
+ Stream.EmitRecord(PPD_MACRO_DEFINITION, Record);
+ continue;
+ }
+
+ if (MacroExpansion *ME = dyn_cast<MacroExpansion>(*E)) {
+ Record.push_back(ME->isBuiltinMacro());
+ if (ME->isBuiltinMacro())
+ AddIdentifierRef(ME->getName(), Record);
+ else
+ Record.push_back(MacroDefinitions[ME->getDefinition()]);
+ Stream.EmitRecord(PPD_MACRO_EXPANSION, Record);
+ continue;
+ }
+
+ if (InclusionDirective *ID = dyn_cast<InclusionDirective>(*E)) {
+ Record.push_back(PPD_INCLUSION_DIRECTIVE);
+ Record.push_back(ID->getFileName().size());
+ Record.push_back(ID->wasInQuotes());
+ Record.push_back(static_cast<unsigned>(ID->getKind()));
+ SmallString<64> Buffer;
+ Buffer += ID->getFileName();
+ // Check that the FileEntry is not null because it was not resolved and
+ // we create a PCH even with compiler errors.
+ if (ID->getFile())
+ Buffer += ID->getFile()->getName();
+ Stream.EmitRecordWithBlob(InclusionAbbrev, Record, Buffer);
+ continue;
+ }
+
+ llvm_unreachable("Unhandled PreprocessedEntity in ASTWriter");
+ }
+ Stream.ExitBlock();
+
+ // Write the offsets table for the preprocessing record.
+ if (NumPreprocessingRecords > 0) {
+ assert(PreprocessedEntityOffsets.size() == NumPreprocessingRecords);
+
+ // Write the offsets table for identifier IDs.
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(PPD_ENTITIES_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first pp entity
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned PPEOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Record.clear();
+ Record.push_back(PPD_ENTITIES_OFFSETS);
+ Record.push_back(FirstPreprocessorEntityID - NUM_PREDEF_PP_ENTITY_IDS);
+ Stream.EmitRecordWithBlob(PPEOffsetAbbrev, Record,
+ data(PreprocessedEntityOffsets));
+ }
+}
+
+unsigned ASTWriter::getSubmoduleID(Module *Mod) {
+ llvm::DenseMap<Module *, unsigned>::iterator Known = SubmoduleIDs.find(Mod);
+ if (Known != SubmoduleIDs.end())
+ return Known->second;
+
+ return SubmoduleIDs[Mod] = NextSubmoduleID++;
+}
+
+/// \brief Compute the number of modules within the given tree (including the
+/// given module).
+static unsigned getNumberOfModules(Module *Mod) {
+ unsigned ChildModules = 0;
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub)
+ ChildModules += getNumberOfModules(*Sub);
+
+ return ChildModules + 1;
+}
+
+void ASTWriter::WriteSubmodules(Module *WritingModule) {
+ // Determine the dependencies of our module and each of it's submodules.
+ // FIXME: This feels like it belongs somewhere else, but there are no
+ // other consumers of this information.
+ SourceManager &SrcMgr = PP->getSourceManager();
+ ModuleMap &ModMap = PP->getHeaderSearchInfo().getModuleMap();
+ for (ASTContext::import_iterator I = Context->local_import_begin(),
+ IEnd = Context->local_import_end();
+ I != IEnd; ++I) {
+ if (Module *ImportedFrom
+ = ModMap.inferModuleFromLocation(FullSourceLoc(I->getLocation(),
+ SrcMgr))) {
+ ImportedFrom->Imports.push_back(I->getImportedModule());
+ }
+ }
+
+ // Enter the submodule description block.
+ Stream.EnterSubblock(SUBMODULE_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
+
+ // Write the abbreviations needed for the submodules block.
+ using namespace llvm;
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_DEFINITION));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Parent
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExplicit
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsSystem
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferSubmodules...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExplicit...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // InferExportWild...
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned DefinitionAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_UMBRELLA_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned UmbrellaAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_HEADER));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned HeaderAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_UMBRELLA_DIR));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Name
+ unsigned UmbrellaDirAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_REQUIRES));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // Feature
+ unsigned RequiresAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the submodule metadata block.
+ RecordData Record;
+ Record.push_back(getNumberOfModules(WritingModule));
+ Record.push_back(FirstSubmoduleID - NUM_PREDEF_SUBMODULE_IDS);
+ Stream.EmitRecord(SUBMODULE_METADATA, Record);
+
+ // Write all of the submodules.
+ std::queue<Module *> Q;
+ Q.push(WritingModule);
+ while (!Q.empty()) {
+ Module *Mod = Q.front();
+ Q.pop();
+ unsigned ID = getSubmoduleID(Mod);
+
+ // Emit the definition of the block.
+ Record.clear();
+ Record.push_back(SUBMODULE_DEFINITION);
+ Record.push_back(ID);
+ if (Mod->Parent) {
+ assert(SubmoduleIDs[Mod->Parent] && "Submodule parent not written?");
+ Record.push_back(SubmoduleIDs[Mod->Parent]);
+ } else {
+ Record.push_back(0);
+ }
+ Record.push_back(Mod->IsFramework);
+ Record.push_back(Mod->IsExplicit);
+ Record.push_back(Mod->IsSystem);
+ Record.push_back(Mod->InferSubmodules);
+ Record.push_back(Mod->InferExplicitSubmodules);
+ Record.push_back(Mod->InferExportWildcard);
+ Stream.EmitRecordWithBlob(DefinitionAbbrev, Record, Mod->Name);
+
+ // Emit the requirements.
+ for (unsigned I = 0, N = Mod->Requires.size(); I != N; ++I) {
+ Record.clear();
+ Record.push_back(SUBMODULE_REQUIRES);
+ Stream.EmitRecordWithBlob(RequiresAbbrev, Record,
+ Mod->Requires[I].data(),
+ Mod->Requires[I].size());
+ }
+
+ // Emit the umbrella header, if there is one.
+ if (const FileEntry *UmbrellaHeader = Mod->getUmbrellaHeader()) {
+ Record.clear();
+ Record.push_back(SUBMODULE_UMBRELLA_HEADER);
+ Stream.EmitRecordWithBlob(UmbrellaAbbrev, Record,
+ UmbrellaHeader->getName());
+ } else if (const DirectoryEntry *UmbrellaDir = Mod->getUmbrellaDir()) {
+ Record.clear();
+ Record.push_back(SUBMODULE_UMBRELLA_DIR);
+ Stream.EmitRecordWithBlob(UmbrellaDirAbbrev, Record,
+ UmbrellaDir->getName());
+ }
+
+ // Emit the headers.
+ for (unsigned I = 0, N = Mod->Headers.size(); I != N; ++I) {
+ Record.clear();
+ Record.push_back(SUBMODULE_HEADER);
+ Stream.EmitRecordWithBlob(HeaderAbbrev, Record,
+ Mod->Headers[I]->getName());
+ }
+
+ // Emit the imports.
+ if (!Mod->Imports.empty()) {
+ Record.clear();
+ for (unsigned I = 0, N = Mod->Imports.size(); I != N; ++I) {
+ unsigned ImportedID = getSubmoduleID(Mod->Imports[I]);
+ assert(ImportedID && "Unknown submodule!");
+ Record.push_back(ImportedID);
+ }
+ Stream.EmitRecord(SUBMODULE_IMPORTS, Record);
+ }
+
+ // Emit the exports.
+ if (!Mod->Exports.empty()) {
+ Record.clear();
+ for (unsigned I = 0, N = Mod->Exports.size(); I != N; ++I) {
+ if (Module *Exported = Mod->Exports[I].getPointer()) {
+ unsigned ExportedID = SubmoduleIDs[Exported];
+ assert(ExportedID > 0 && "Unknown submodule ID?");
+ Record.push_back(ExportedID);
+ } else {
+ Record.push_back(0);
+ }
+
+ Record.push_back(Mod->Exports[I].getInt());
+ }
+ Stream.EmitRecord(SUBMODULE_EXPORTS, Record);
+ }
+
+ // Queue up the submodules of this module.
+ for (Module::submodule_iterator Sub = Mod->submodule_begin(),
+ SubEnd = Mod->submodule_end();
+ Sub != SubEnd; ++Sub)
+ Q.push(*Sub);
+ }
+
+ Stream.ExitBlock();
+
+ assert((NextSubmoduleID - FirstSubmoduleID
+ == getNumberOfModules(WritingModule)) && "Wrong # of submodules");
+}
+
+serialization::SubmoduleID
+ASTWriter::inferSubmoduleIDFromLocation(SourceLocation Loc) {
+ if (Loc.isInvalid() || !WritingModule)
+ return 0; // No submodule
+
+ // Find the module that owns this location.
+ ModuleMap &ModMap = PP->getHeaderSearchInfo().getModuleMap();
+ Module *OwningMod
+ = ModMap.inferModuleFromLocation(FullSourceLoc(Loc,PP->getSourceManager()));
+ if (!OwningMod)
+ return 0;
+
+ // Check whether this submodule is part of our own module.
+ if (WritingModule != OwningMod && !OwningMod->isSubModuleOf(WritingModule))
+ return 0;
+
+ return getSubmoduleID(OwningMod);
+}
+
+void ASTWriter::WritePragmaDiagnosticMappings(const DiagnosticsEngine &Diag) {
+ RecordData Record;
+ for (DiagnosticsEngine::DiagStatePointsTy::const_iterator
+ I = Diag.DiagStatePoints.begin(), E = Diag.DiagStatePoints.end();
+ I != E; ++I) {
+ const DiagnosticsEngine::DiagStatePoint &point = *I;
+ if (point.Loc.isInvalid())
+ continue;
+
+ Record.push_back(point.Loc.getRawEncoding());
+ for (DiagnosticsEngine::DiagState::const_iterator
+ I = point.State->begin(), E = point.State->end(); I != E; ++I) {
+ if (I->second.isPragma()) {
+ Record.push_back(I->first);
+ Record.push_back(I->second.getMapping());
+ }
+ }
+ Record.push_back(-1); // mark the end of the diag/map pairs for this
+ // location.
+ }
+
+ if (!Record.empty())
+ Stream.EmitRecord(DIAG_PRAGMA_MAPPINGS, Record);
+}
+
+void ASTWriter::WriteCXXBaseSpecifiersOffsets() {
+ if (CXXBaseSpecifiersOffsets.empty())
+ return;
+
+ RecordData Record;
+
+ // Create a blob abbreviation for the C++ base specifiers offsets.
+ using namespace llvm;
+
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(CXX_BASE_SPECIFIER_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned BaseSpecifierOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the base specifier offsets table.
+ Record.clear();
+ Record.push_back(CXX_BASE_SPECIFIER_OFFSETS);
+ Record.push_back(CXXBaseSpecifiersOffsets.size());
+ Stream.EmitRecordWithBlob(BaseSpecifierOffsetAbbrev, Record,
+ data(CXXBaseSpecifiersOffsets));
+}
+
+//===----------------------------------------------------------------------===//
+// Type Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Write the representation of a type to the AST stream.
+void ASTWriter::WriteType(QualType T) {
+ TypeIdx &Idx = TypeIdxs[T];
+ if (Idx.getIndex() == 0) // we haven't seen this type before.
+ Idx = TypeIdx(NextTypeID++);
+
+ assert(Idx.getIndex() >= FirstTypeID && "Re-writing a type from a prior AST");
+
+ // Record the offset for this type.
+ unsigned Index = Idx.getIndex() - FirstTypeID;
+ if (TypeOffsets.size() == Index)
+ TypeOffsets.push_back(Stream.GetCurrentBitNo());
+ else if (TypeOffsets.size() < Index) {
+ TypeOffsets.resize(Index + 1);
+ TypeOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ RecordData Record;
+
+ // Emit the type's representation.
+ ASTTypeWriter W(*this, Record);
+
+ if (T.hasLocalNonFastQualifiers()) {
+ Qualifiers Qs = T.getLocalQualifiers();
+ AddTypeRef(T.getLocalUnqualifiedType(), Record);
+ Record.push_back(Qs.getAsOpaqueValue());
+ W.Code = TYPE_EXT_QUAL;
+ } else {
+ switch (T->getTypeClass()) {
+ // For all of the concrete, non-dependent types, call the
+ // appropriate visitor function.
+#define TYPE(Class, Base) \
+ case Type::Class: W.Visit##Class##Type(cast<Class##Type>(T)); break;
+#define ABSTRACT_TYPE(Class, Base)
+#include "clang/AST/TypeNodes.def"
+ }
+ }
+
+ // Emit the serialized record.
+ Stream.EmitRecord(W.Code, Record);
+
+ // Flush any expressions that were written as part of this type.
+ FlushStmts();
+}
+
+//===----------------------------------------------------------------------===//
+// Declaration Serialization
+//===----------------------------------------------------------------------===//
+
+/// \brief Write the block containing all of the declaration IDs
+/// lexically declared within the given DeclContext.
+///
+/// \returns the offset of the DECL_CONTEXT_LEXICAL block within the
+/// bistream, or 0 if no block was written.
+uint64_t ASTWriter::WriteDeclContextLexicalBlock(ASTContext &Context,
+ DeclContext *DC) {
+ if (DC->decls_empty())
+ return 0;
+
+ uint64_t Offset = Stream.GetCurrentBitNo();
+ RecordData Record;
+ Record.push_back(DECL_CONTEXT_LEXICAL);
+ SmallVector<KindDeclIDPair, 64> Decls;
+ for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
+ D != DEnd; ++D)
+ Decls.push_back(std::make_pair((*D)->getKind(), GetDeclRef(*D)));
+
+ ++NumLexicalDeclContexts;
+ Stream.EmitRecordWithBlob(DeclContextLexicalAbbrev, Record, data(Decls));
+ return Offset;
+}
+
+void ASTWriter::WriteTypeDeclOffsets() {
+ using namespace llvm;
+ RecordData Record;
+
+ // Write the type offsets array
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(TYPE_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of types
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // base type index
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // types block
+ unsigned TypeOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+ Record.clear();
+ Record.push_back(TYPE_OFFSET);
+ Record.push_back(TypeOffsets.size());
+ Record.push_back(FirstTypeID - NUM_PREDEF_TYPE_IDS);
+ Stream.EmitRecordWithBlob(TypeOffsetAbbrev, Record, data(TypeOffsets));
+
+ // Write the declaration offsets array
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(DECL_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of declarations
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // base decl ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob)); // declarations block
+ unsigned DeclOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+ Record.clear();
+ Record.push_back(DECL_OFFSET);
+ Record.push_back(DeclOffsets.size());
+ Record.push_back(FirstDeclID - NUM_PREDEF_DECL_IDS);
+ Stream.EmitRecordWithBlob(DeclOffsetAbbrev, Record, data(DeclOffsets));
+}
+
+void ASTWriter::WriteFileDeclIDsMap() {
+ using namespace llvm;
+ RecordData Record;
+
+ // Join the vectors of DeclIDs from all files.
+ SmallVector<DeclID, 256> FileSortedIDs;
+ for (FileDeclIDsTy::iterator
+ FI = FileDeclIDs.begin(), FE = FileDeclIDs.end(); FI != FE; ++FI) {
+ DeclIDInFileInfo &Info = *FI->second;
+ Info.FirstDeclIndex = FileSortedIDs.size();
+ for (LocDeclIDsTy::iterator
+ DI = Info.DeclIDs.begin(), DE = Info.DeclIDs.end(); DI != DE; ++DI)
+ FileSortedIDs.push_back(DI->second);
+ }
+
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(FILE_SORTED_DECLS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned AbbrevCode = Stream.EmitAbbrev(Abbrev);
+ Record.push_back(FILE_SORTED_DECLS);
+ Stream.EmitRecordWithBlob(AbbrevCode, Record, data(FileSortedIDs));
+}
+
+//===----------------------------------------------------------------------===//
+// Global Method Pool and Selector Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+// Trait used for the on-disk hash table used in the method pool.
+class ASTMethodPoolTrait {
+ ASTWriter &Writer;
+
+public:
+ typedef Selector key_type;
+ typedef key_type key_type_ref;
+
+ struct data_type {
+ SelectorID ID;
+ ObjCMethodList Instance, Factory;
+ };
+ typedef const data_type& data_type_ref;
+
+ explicit ASTMethodPoolTrait(ASTWriter &Writer) : Writer(Writer) { }
+
+ static unsigned ComputeHash(Selector Sel) {
+ return serialization::ComputeHash(Sel);
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, Selector Sel,
+ data_type_ref Methods) {
+ unsigned KeyLen = 2 + (Sel.getNumArgs()? Sel.getNumArgs() * 4 : 4);
+ clang::io::Emit16(Out, KeyLen);
+ unsigned DataLen = 4 + 2 + 2; // 2 bytes for each of the method counts
+ for (const ObjCMethodList *Method = &Methods.Instance; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ DataLen += 4;
+ for (const ObjCMethodList *Method = &Methods.Factory; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ DataLen += 4;
+ clang::io::Emit16(Out, DataLen);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, Selector Sel, unsigned) {
+ uint64_t Start = Out.tell();
+ assert((Start >> 32) == 0 && "Selector key offset too large");
+ Writer.SetSelectorOffset(Sel, Start);
+ unsigned N = Sel.getNumArgs();
+ clang::io::Emit16(Out, N);
+ if (N == 0)
+ N = 1;
+ for (unsigned I = 0; I != N; ++I)
+ clang::io::Emit32(Out,
+ Writer.getIdentifierRef(Sel.getIdentifierInfoForSlot(I)));
+ }
+
+ void EmitData(raw_ostream& Out, key_type_ref,
+ data_type_ref Methods, unsigned DataLen) {
+ uint64_t Start = Out.tell(); (void)Start;
+ clang::io::Emit32(Out, Methods.ID);
+ unsigned NumInstanceMethods = 0;
+ for (const ObjCMethodList *Method = &Methods.Instance; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ ++NumInstanceMethods;
+
+ unsigned NumFactoryMethods = 0;
+ for (const ObjCMethodList *Method = &Methods.Factory; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ ++NumFactoryMethods;
+
+ clang::io::Emit16(Out, NumInstanceMethods);
+ clang::io::Emit16(Out, NumFactoryMethods);
+ for (const ObjCMethodList *Method = &Methods.Instance; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ clang::io::Emit32(Out, Writer.getDeclID(Method->Method));
+ for (const ObjCMethodList *Method = &Methods.Factory; Method;
+ Method = Method->Next)
+ if (Method->Method)
+ clang::io::Emit32(Out, Writer.getDeclID(Method->Method));
+
+ assert(Out.tell() - Start == DataLen && "Data length is wrong");
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write ObjC data: selectors and the method pool.
+///
+/// The method pool contains both instance and factory methods, stored
+/// in an on-disk hash table indexed by the selector. The hash table also
+/// contains an empty entry for every other selector known to Sema.
+void ASTWriter::WriteSelectors(Sema &SemaRef) {
+ using namespace llvm;
+
+ // Do we have to do anything at all?
+ if (SemaRef.MethodPool.empty() && SelectorIDs.empty())
+ return;
+ unsigned NumTableEntries = 0;
+ // Create and write out the blob that contains selectors and the method pool.
+ {
+ OnDiskChainedHashTableGenerator<ASTMethodPoolTrait> Generator;
+ ASTMethodPoolTrait Trait(*this);
+
+ // Create the on-disk hash table representation. We walk through every
+ // selector we've seen and look it up in the method pool.
+ SelectorOffsets.resize(NextSelectorID - FirstSelectorID);
+ for (llvm::DenseMap<Selector, SelectorID>::iterator
+ I = SelectorIDs.begin(), E = SelectorIDs.end();
+ I != E; ++I) {
+ Selector S = I->first;
+ Sema::GlobalMethodPool::iterator F = SemaRef.MethodPool.find(S);
+ ASTMethodPoolTrait::data_type Data = {
+ I->second,
+ ObjCMethodList(),
+ ObjCMethodList()
+ };
+ if (F != SemaRef.MethodPool.end()) {
+ Data.Instance = F->second.first;
+ Data.Factory = F->second.second;
+ }
+ // Only write this selector if it's not in an existing AST or something
+ // changed.
+ if (Chain && I->second < FirstSelectorID) {
+ // Selector already exists. Did it change?
+ bool changed = false;
+ for (ObjCMethodList *M = &Data.Instance; !changed && M && M->Method;
+ M = M->Next) {
+ if (!M->Method->isFromASTFile())
+ changed = true;
+ }
+ for (ObjCMethodList *M = &Data.Factory; !changed && M && M->Method;
+ M = M->Next) {
+ if (!M->Method->isFromASTFile())
+ changed = true;
+ }
+ if (!changed)
+ continue;
+ } else if (Data.Instance.Method || Data.Factory.Method) {
+ // A new method pool entry.
+ ++NumTableEntries;
+ }
+ Generator.insert(S, Data, Trait);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> MethodPool;
+ uint32_t BucketOffset;
+ {
+ ASTMethodPoolTrait Trait(*this);
+ llvm::raw_svector_ostream Out(MethodPool);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out, Trait);
+ }
+
+ // Create a blob abbreviation
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(METHOD_POOL));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned MethodPoolAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the method pool
+ RecordData Record;
+ Record.push_back(METHOD_POOL);
+ Record.push_back(BucketOffset);
+ Record.push_back(NumTableEntries);
+ Stream.EmitRecordWithBlob(MethodPoolAbbrev, Record, MethodPool.str());
+
+ // Create a blob abbreviation for the selector table offsets.
+ Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(SELECTOR_OFFSETS));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned SelectorOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the selector offsets table.
+ Record.clear();
+ Record.push_back(SELECTOR_OFFSETS);
+ Record.push_back(SelectorOffsets.size());
+ Record.push_back(FirstSelectorID - NUM_PREDEF_SELECTOR_IDS);
+ Stream.EmitRecordWithBlob(SelectorOffsetAbbrev, Record,
+ data(SelectorOffsets));
+ }
+}
+
+/// \brief Write the selectors referenced in @selector expression into AST file.
+void ASTWriter::WriteReferencedSelectorsPool(Sema &SemaRef) {
+ using namespace llvm;
+ if (SemaRef.ReferencedSelectors.empty())
+ return;
+
+ RecordData Record;
+
+ // Note: this writes out all references even for a dependent AST. But it is
+ // very tricky to fix, and given that @selector shouldn't really appear in
+ // headers, probably not worth it. It's not a correctness issue.
+ for (DenseMap<Selector, SourceLocation>::iterator S =
+ SemaRef.ReferencedSelectors.begin(),
+ E = SemaRef.ReferencedSelectors.end(); S != E; ++S) {
+ Selector Sel = (*S).first;
+ SourceLocation Loc = (*S).second;
+ AddSelectorRef(Sel, Record);
+ AddSourceLocation(Loc, Record);
+ }
+ Stream.EmitRecord(REFERENCED_SELECTOR_POOL, Record);
+}
+
+//===----------------------------------------------------------------------===//
+// Identifier Table Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ASTIdentifierTableTrait {
+ ASTWriter &Writer;
+ Preprocessor &PP;
+ IdentifierResolver &IdResolver;
+ bool IsModule;
+
+ /// \brief Determines whether this is an "interesting" identifier
+ /// that needs a full IdentifierInfo structure written into the hash
+ /// table.
+ bool isInterestingIdentifier(IdentifierInfo *II, MacroInfo *&Macro) {
+ if (II->isPoisoned() ||
+ II->isExtensionToken() ||
+ II->getObjCOrBuiltinID() ||
+ II->hasRevertedTokenIDToIdentifier() ||
+ II->getFETokenInfo<void>())
+ return true;
+
+ return hasMacroDefinition(II, Macro);
+ }
+
+ bool hasMacroDefinition(IdentifierInfo *II, MacroInfo *&Macro) {
+ if (!II->hasMacroDefinition())
+ return false;
+
+ if (Macro || (Macro = PP.getMacroInfo(II)))
+ return !Macro->isBuiltinMacro() && (!IsModule || Macro->isPublic());
+
+ return false;
+ }
+
+public:
+ typedef IdentifierInfo* key_type;
+ typedef key_type key_type_ref;
+
+ typedef IdentID data_type;
+ typedef data_type data_type_ref;
+
+ ASTIdentifierTableTrait(ASTWriter &Writer, Preprocessor &PP,
+ IdentifierResolver &IdResolver, bool IsModule)
+ : Writer(Writer), PP(PP), IdResolver(IdResolver), IsModule(IsModule) { }
+
+ static unsigned ComputeHash(const IdentifierInfo* II) {
+ return llvm::HashString(II->getName());
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, IdentifierInfo* II, IdentID ID) {
+ unsigned KeyLen = II->getLength() + 1;
+ unsigned DataLen = 4; // 4 bytes for the persistent ID << 1
+ MacroInfo *Macro = 0;
+ if (isInterestingIdentifier(II, Macro)) {
+ DataLen += 2; // 2 bytes for builtin ID, flags
+ if (hasMacroDefinition(II, Macro))
+ DataLen += 8;
+
+ for (IdentifierResolver::iterator D = IdResolver.begin(II),
+ DEnd = IdResolver.end();
+ D != DEnd; ++D)
+ DataLen += sizeof(DeclID);
+ }
+ clang::io::Emit16(Out, DataLen);
+ // We emit the key length after the data length so that every
+ // string is preceded by a 16-bit length. This matches the PTH
+ // format for storing identifiers.
+ clang::io::Emit16(Out, KeyLen);
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, const IdentifierInfo* II,
+ unsigned KeyLen) {
+ // Record the location of the key data. This is used when generating
+ // the mapping from persistent IDs to strings.
+ Writer.SetIdentifierOffset(II, Out.tell());
+ Out.write(II->getNameStart(), KeyLen);
+ }
+
+ void EmitData(raw_ostream& Out, IdentifierInfo* II,
+ IdentID ID, unsigned) {
+ MacroInfo *Macro = 0;
+ if (!isInterestingIdentifier(II, Macro)) {
+ clang::io::Emit32(Out, ID << 1);
+ return;
+ }
+
+ clang::io::Emit32(Out, (ID << 1) | 0x01);
+ uint32_t Bits = 0;
+ bool HasMacroDefinition = hasMacroDefinition(II, Macro);
+ Bits = (uint32_t)II->getObjCOrBuiltinID();
+ assert((Bits & 0x7ff) == Bits && "ObjCOrBuiltinID too big for ASTReader.");
+ Bits = (Bits << 1) | unsigned(HasMacroDefinition);
+ Bits = (Bits << 1) | unsigned(II->isExtensionToken());
+ Bits = (Bits << 1) | unsigned(II->isPoisoned());
+ Bits = (Bits << 1) | unsigned(II->hasRevertedTokenIDToIdentifier());
+ Bits = (Bits << 1) | unsigned(II->isCPlusPlusOperatorKeyword());
+ clang::io::Emit16(Out, Bits);
+
+ if (HasMacroDefinition) {
+ clang::io::Emit32(Out, Writer.getMacroOffset(II));
+ clang::io::Emit32(Out,
+ Writer.inferSubmoduleIDFromLocation(Macro->getDefinitionLoc()));
+ }
+
+ // Emit the declaration IDs in reverse order, because the
+ // IdentifierResolver provides the declarations as they would be
+ // visible (e.g., the function "stat" would come before the struct
+ // "stat"), but the ASTReader adds declarations to the end of the list
+ // (so we need to see the struct "status" before the function "status").
+ // Only emit declarations that aren't from a chained PCH, though.
+ SmallVector<Decl *, 16> Decls(IdResolver.begin(II),
+ IdResolver.end());
+ for (SmallVector<Decl *, 16>::reverse_iterator D = Decls.rbegin(),
+ DEnd = Decls.rend();
+ D != DEnd; ++D)
+ clang::io::Emit32(Out, Writer.getDeclID(*D));
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write the identifier table into the AST file.
+///
+/// The identifier table consists of a blob containing string data
+/// (the actual identifiers themselves) and a separate "offsets" index
+/// that maps identifier IDs to locations within the blob.
+void ASTWriter::WriteIdentifierTable(Preprocessor &PP,
+ IdentifierResolver &IdResolver,
+ bool IsModule) {
+ using namespace llvm;
+
+ // Create and write out the blob that contains the identifier
+ // strings.
+ {
+ OnDiskChainedHashTableGenerator<ASTIdentifierTableTrait> Generator;
+ ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule);
+
+ // Look for any identifiers that were named while processing the
+ // headers, but are otherwise not needed. We add these to the hash
+ // table to enable checking of the predefines buffer in the case
+ // where the user adds new macro definitions when building the AST
+ // file.
+ for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(),
+ IDEnd = PP.getIdentifierTable().end();
+ ID != IDEnd; ++ID)
+ getIdentifierRef(ID->second);
+
+ // Create the on-disk hash table representation. We only store offsets
+ // for identifiers that appear here for the first time.
+ IdentifierOffsets.resize(NextIdentID - FirstIdentID);
+ for (llvm::DenseMap<const IdentifierInfo *, IdentID>::iterator
+ ID = IdentifierIDs.begin(), IDEnd = IdentifierIDs.end();
+ ID != IDEnd; ++ID) {
+ assert(ID->first && "NULL identifier in identifier table");
+ if (!Chain || !ID->first->isFromAST() ||
+ ID->first->hasChangedSinceDeserialization())
+ Generator.insert(const_cast<IdentifierInfo *>(ID->first), ID->second,
+ Trait);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> IdentifierTable;
+ uint32_t BucketOffset;
+ {
+ ASTIdentifierTableTrait Trait(*this, PP, IdResolver, IsModule);
+ llvm::raw_svector_ostream Out(IdentifierTable);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out, Trait);
+ }
+
+ // Create a blob abbreviation
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_TABLE));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned IDTableAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ // Write the identifier table
+ RecordData Record;
+ Record.push_back(IDENTIFIER_TABLE);
+ Record.push_back(BucketOffset);
+ Stream.EmitRecordWithBlob(IDTableAbbrev, Record, IdentifierTable.str());
+ }
+
+ // Write the offsets table for identifier IDs.
+ BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(IDENTIFIER_OFFSET));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // # of identifiers
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32)); // first ID
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned IdentifierOffsetAbbrev = Stream.EmitAbbrev(Abbrev);
+
+ RecordData Record;
+ Record.push_back(IDENTIFIER_OFFSET);
+ Record.push_back(IdentifierOffsets.size());
+ Record.push_back(FirstIdentID - NUM_PREDEF_IDENT_IDS);
+ Stream.EmitRecordWithBlob(IdentifierOffsetAbbrev, Record,
+ data(IdentifierOffsets));
+}
+
+//===----------------------------------------------------------------------===//
+// DeclContext's Name Lookup Table Serialization
+//===----------------------------------------------------------------------===//
+
+namespace {
+// Trait used for the on-disk hash table used in the method pool.
+class ASTDeclContextNameLookupTrait {
+ ASTWriter &Writer;
+
+public:
+ typedef DeclarationName key_type;
+ typedef key_type key_type_ref;
+
+ typedef DeclContext::lookup_result data_type;
+ typedef const data_type& data_type_ref;
+
+ explicit ASTDeclContextNameLookupTrait(ASTWriter &Writer) : Writer(Writer) { }
+
+ unsigned ComputeHash(DeclarationName Name) {
+ llvm::FoldingSetNodeID ID;
+ ID.AddInteger(Name.getNameKind());
+
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ ID.AddString(Name.getAsIdentifierInfo()->getName());
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ ID.AddInteger(serialization::ComputeHash(Name.getObjCSelector()));
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ break;
+ case DeclarationName::CXXOperatorName:
+ ID.AddInteger(Name.getCXXOverloadedOperator());
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ ID.AddString(Name.getCXXLiteralIdentifier()->getName());
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+
+ return ID.ComputeHash();
+ }
+
+ std::pair<unsigned,unsigned>
+ EmitKeyDataLength(raw_ostream& Out, DeclarationName Name,
+ data_type_ref Lookup) {
+ unsigned KeyLen = 1;
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXLiteralOperatorName:
+ KeyLen += 4;
+ break;
+ case DeclarationName::CXXOperatorName:
+ KeyLen += 1;
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+ clang::io::Emit16(Out, KeyLen);
+
+ // 2 bytes for num of decls and 4 for each DeclID.
+ unsigned DataLen = 2 + 4 * (Lookup.second - Lookup.first);
+ clang::io::Emit16(Out, DataLen);
+
+ return std::make_pair(KeyLen, DataLen);
+ }
+
+ void EmitKey(raw_ostream& Out, DeclarationName Name, unsigned) {
+ using namespace clang::io;
+
+ assert(Name.getNameKind() < 0x100 && "Invalid name kind ?");
+ Emit8(Out, Name.getNameKind());
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ Emit32(Out, Writer.getIdentifierRef(Name.getAsIdentifierInfo()));
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ Emit32(Out, Writer.getSelectorRef(Name.getObjCSelector()));
+ break;
+ case DeclarationName::CXXOperatorName:
+ assert(Name.getCXXOverloadedOperator() < 0x100 && "Invalid operator ?");
+ Emit8(Out, Name.getCXXOverloadedOperator());
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ Emit32(Out, Writer.getIdentifierRef(Name.getCXXLiteralIdentifier()));
+ break;
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+ }
+
+ void EmitData(raw_ostream& Out, key_type_ref,
+ data_type Lookup, unsigned DataLen) {
+ uint64_t Start = Out.tell(); (void)Start;
+ clang::io::Emit16(Out, Lookup.second - Lookup.first);
+ for (; Lookup.first != Lookup.second; ++Lookup.first)
+ clang::io::Emit32(Out, Writer.GetDeclRef(*Lookup.first));
+
+ assert(Out.tell() - Start == DataLen && "Data length is wrong");
+ }
+};
+} // end anonymous namespace
+
+/// \brief Write the block containing all of the declaration IDs
+/// visible from the given DeclContext.
+///
+/// \returns the offset of the DECL_CONTEXT_VISIBLE block within the
+/// bitstream, or 0 if no block was written.
+uint64_t ASTWriter::WriteDeclContextVisibleBlock(ASTContext &Context,
+ DeclContext *DC) {
+ if (DC->getPrimaryContext() != DC)
+ return 0;
+
+ // Since there is no name lookup into functions or methods, don't bother to
+ // build a visible-declarations table for these entities.
+ if (DC->isFunctionOrMethod())
+ return 0;
+
+ // If not in C++, we perform name lookup for the translation unit via the
+ // IdentifierInfo chains, don't bother to build a visible-declarations table.
+ // FIXME: In C++ we need the visible declarations in order to "see" the
+ // friend declarations, is there a way to do this without writing the table ?
+ if (DC->isTranslationUnit() && !Context.getLangOpts().CPlusPlus)
+ return 0;
+
+ // Serialize the contents of the mapping used for lookup. Note that,
+ // although we have two very different code paths, the serialized
+ // representation is the same for both cases: a declaration name,
+ // followed by a size, followed by references to the visible
+ // declarations that have that name.
+ uint64_t Offset = Stream.GetCurrentBitNo();
+ StoredDeclsMap *Map = DC->buildLookup();
+ if (!Map || Map->empty())
+ return 0;
+
+ OnDiskChainedHashTableGenerator<ASTDeclContextNameLookupTrait> Generator;
+ ASTDeclContextNameLookupTrait Trait(*this);
+
+ // Create the on-disk hash table representation.
+ DeclarationName ConversionName;
+ llvm::SmallVector<NamedDecl *, 4> ConversionDecls;
+ for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end();
+ D != DEnd; ++D) {
+ DeclarationName Name = D->first;
+ DeclContext::lookup_result Result = D->second.getLookupResult();
+ if (Result.first != Result.second) {
+ if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName) {
+ // Hash all conversion function names to the same name. The actual
+ // type information in conversion function name is not used in the
+ // key (since such type information is not stable across different
+ // modules), so the intended effect is to coalesce all of the conversion
+ // functions under a single key.
+ if (!ConversionName)
+ ConversionName = Name;
+ ConversionDecls.append(Result.first, Result.second);
+ continue;
+ }
+
+ Generator.insert(Name, Result, Trait);
+ }
+ }
+
+ // Add the conversion functions
+ if (!ConversionDecls.empty()) {
+ Generator.insert(ConversionName,
+ DeclContext::lookup_result(ConversionDecls.begin(),
+ ConversionDecls.end()),
+ Trait);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> LookupTable;
+ uint32_t BucketOffset;
+ {
+ llvm::raw_svector_ostream Out(LookupTable);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out, Trait);
+ }
+
+ // Write the lookup table
+ RecordData Record;
+ Record.push_back(DECL_CONTEXT_VISIBLE);
+ Record.push_back(BucketOffset);
+ Stream.EmitRecordWithBlob(DeclContextVisibleLookupAbbrev, Record,
+ LookupTable.str());
+
+ Stream.EmitRecord(DECL_CONTEXT_VISIBLE, Record);
+ ++NumVisibleDeclContexts;
+ return Offset;
+}
+
+/// \brief Write an UPDATE_VISIBLE block for the given context.
+///
+/// UPDATE_VISIBLE blocks contain the declarations that are added to an existing
+/// DeclContext in a dependent AST file. As such, they only exist for the TU
+/// (in C++), for namespaces, and for classes with forward-declared unscoped
+/// enumeration members (in C++11).
+void ASTWriter::WriteDeclContextVisibleUpdate(const DeclContext *DC) {
+ StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(DC->getLookupPtr());
+ if (!Map || Map->empty())
+ return;
+
+ OnDiskChainedHashTableGenerator<ASTDeclContextNameLookupTrait> Generator;
+ ASTDeclContextNameLookupTrait Trait(*this);
+
+ // Create the hash table.
+ for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end();
+ D != DEnd; ++D) {
+ DeclarationName Name = D->first;
+ DeclContext::lookup_result Result = D->second.getLookupResult();
+ // For any name that appears in this table, the results are complete, i.e.
+ // they overwrite results from previous PCHs. Merging is always a mess.
+ if (Result.first != Result.second)
+ Generator.insert(Name, Result, Trait);
+ }
+
+ // Create the on-disk hash table in a buffer.
+ SmallString<4096> LookupTable;
+ uint32_t BucketOffset;
+ {
+ llvm::raw_svector_ostream Out(LookupTable);
+ // Make sure that no bucket is at offset 0
+ clang::io::Emit32(Out, 0);
+ BucketOffset = Generator.Emit(Out, Trait);
+ }
+
+ // Write the lookup table
+ RecordData Record;
+ Record.push_back(UPDATE_VISIBLE);
+ Record.push_back(getDeclID(cast<Decl>(DC)));
+ Record.push_back(BucketOffset);
+ Stream.EmitRecordWithBlob(UpdateVisibleAbbrev, Record, LookupTable.str());
+}
+
+/// \brief Write an FP_PRAGMA_OPTIONS block for the given FPOptions.
+void ASTWriter::WriteFPPragmaOptions(const FPOptions &Opts) {
+ RecordData Record;
+ Record.push_back(Opts.fp_contract);
+ Stream.EmitRecord(FP_PRAGMA_OPTIONS, Record);
+}
+
+/// \brief Write an OPENCL_EXTENSIONS block for the given OpenCLOptions.
+void ASTWriter::WriteOpenCLExtensions(Sema &SemaRef) {
+ if (!SemaRef.Context.getLangOpts().OpenCL)
+ return;
+
+ const OpenCLOptions &Opts = SemaRef.getOpenCLOptions();
+ RecordData Record;
+#define OPENCLEXT(nm) Record.push_back(Opts.nm);
+#include "clang/Basic/OpenCLExtensions.def"
+ Stream.EmitRecord(OPENCL_EXTENSIONS, Record);
+}
+
+void ASTWriter::WriteRedeclarations() {
+ RecordData LocalRedeclChains;
+ SmallVector<serialization::LocalRedeclarationsInfo, 2> LocalRedeclsMap;
+
+ for (unsigned I = 0, N = Redeclarations.size(); I != N; ++I) {
+ Decl *First = Redeclarations[I];
+ assert(First->getPreviousDecl() == 0 && "Not the first declaration?");
+
+ Decl *MostRecent = First->getMostRecentDecl();
+
+ // If we only have a single declaration, there is no point in storing
+ // a redeclaration chain.
+ if (First == MostRecent)
+ continue;
+
+ unsigned Offset = LocalRedeclChains.size();
+ unsigned Size = 0;
+ LocalRedeclChains.push_back(0); // Placeholder for the size.
+
+ // Collect the set of local redeclarations of this declaration.
+ for (Decl *Prev = MostRecent; Prev != First;
+ Prev = Prev->getPreviousDecl()) {
+ if (!Prev->isFromASTFile()) {
+ AddDeclRef(Prev, LocalRedeclChains);
+ ++Size;
+ }
+ }
+ LocalRedeclChains[Offset] = Size;
+
+ // Reverse the set of local redeclarations, so that we store them in
+ // order (since we found them in reverse order).
+ std::reverse(LocalRedeclChains.end() - Size, LocalRedeclChains.end());
+
+ // Add the mapping from the first ID to the set of local declarations.
+ LocalRedeclarationsInfo Info = { getDeclID(First), Offset };
+ LocalRedeclsMap.push_back(Info);
+
+ assert(N == Redeclarations.size() &&
+ "Deserialized a declaration we shouldn't have");
+ }
+
+ if (LocalRedeclChains.empty())
+ return;
+
+ // Sort the local redeclarations map by the first declaration ID,
+ // since the reader will be performing binary searches on this information.
+ llvm::array_pod_sort(LocalRedeclsMap.begin(), LocalRedeclsMap.end());
+
+ // Emit the local redeclarations map.
+ using namespace llvm;
+ llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(LOCAL_REDECLARATIONS_MAP));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of entries
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned AbbrevID = Stream.EmitAbbrev(Abbrev);
+
+ RecordData Record;
+ Record.push_back(LOCAL_REDECLARATIONS_MAP);
+ Record.push_back(LocalRedeclsMap.size());
+ Stream.EmitRecordWithBlob(AbbrevID, Record,
+ reinterpret_cast<char*>(LocalRedeclsMap.data()),
+ LocalRedeclsMap.size() * sizeof(LocalRedeclarationsInfo));
+
+ // Emit the redeclaration chains.
+ Stream.EmitRecord(LOCAL_REDECLARATIONS, LocalRedeclChains);
+}
+
+void ASTWriter::WriteObjCCategories() {
+ llvm::SmallVector<ObjCCategoriesInfo, 2> CategoriesMap;
+ RecordData Categories;
+
+ for (unsigned I = 0, N = ObjCClassesWithCategories.size(); I != N; ++I) {
+ unsigned Size = 0;
+ unsigned StartIndex = Categories.size();
+
+ ObjCInterfaceDecl *Class = ObjCClassesWithCategories[I];
+
+ // Allocate space for the size.
+ Categories.push_back(0);
+
+ // Add the categories.
+ for (ObjCCategoryDecl *Cat = Class->getCategoryList();
+ Cat; Cat = Cat->getNextClassCategory(), ++Size) {
+ assert(getDeclID(Cat) != 0 && "Bogus category");
+ AddDeclRef(Cat, Categories);
+ }
+
+ // Update the size.
+ Categories[StartIndex] = Size;
+
+ // Record this interface -> category map.
+ ObjCCategoriesInfo CatInfo = { getDeclID(Class), StartIndex };
+ CategoriesMap.push_back(CatInfo);
+ }
+
+ // Sort the categories map by the definition ID, since the reader will be
+ // performing binary searches on this information.
+ llvm::array_pod_sort(CategoriesMap.begin(), CategoriesMap.end());
+
+ // Emit the categories map.
+ using namespace llvm;
+ llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(OBJC_CATEGORIES_MAP));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // # of entries
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned AbbrevID = Stream.EmitAbbrev(Abbrev);
+
+ RecordData Record;
+ Record.push_back(OBJC_CATEGORIES_MAP);
+ Record.push_back(CategoriesMap.size());
+ Stream.EmitRecordWithBlob(AbbrevID, Record,
+ reinterpret_cast<char*>(CategoriesMap.data()),
+ CategoriesMap.size() * sizeof(ObjCCategoriesInfo));
+
+ // Emit the category lists.
+ Stream.EmitRecord(OBJC_CATEGORIES, Categories);
+}
+
+void ASTWriter::WriteMergedDecls() {
+ if (!Chain || Chain->MergedDecls.empty())
+ return;
+
+ RecordData Record;
+ for (ASTReader::MergedDeclsMap::iterator I = Chain->MergedDecls.begin(),
+ IEnd = Chain->MergedDecls.end();
+ I != IEnd; ++I) {
+ DeclID CanonID = I->first->isFromASTFile()? I->first->getGlobalID()
+ : getDeclID(I->first);
+ assert(CanonID && "Merged declaration not known?");
+
+ Record.push_back(CanonID);
+ Record.push_back(I->second.size());
+ Record.append(I->second.begin(), I->second.end());
+ }
+ Stream.EmitRecord(MERGED_DECLARATIONS, Record);
+}
+
+//===----------------------------------------------------------------------===//
+// General Serialization Routines
+//===----------------------------------------------------------------------===//
+
+/// \brief Write a record containing the given attributes.
+void ASTWriter::WriteAttributes(const AttrVec &Attrs, RecordDataImpl &Record) {
+ Record.push_back(Attrs.size());
+ for (AttrVec::const_iterator i = Attrs.begin(), e = Attrs.end(); i != e; ++i){
+ const Attr * A = *i;
+ Record.push_back(A->getKind()); // FIXME: stable encoding, target attrs
+ AddSourceRange(A->getRange(), Record);
+
+#include "clang/Serialization/AttrPCHWrite.inc"
+
+ }
+}
+
+void ASTWriter::AddString(StringRef Str, RecordDataImpl &Record) {
+ Record.push_back(Str.size());
+ Record.insert(Record.end(), Str.begin(), Str.end());
+}
+
+void ASTWriter::AddVersionTuple(const VersionTuple &Version,
+ RecordDataImpl &Record) {
+ Record.push_back(Version.getMajor());
+ if (llvm::Optional<unsigned> Minor = Version.getMinor())
+ Record.push_back(*Minor + 1);
+ else
+ Record.push_back(0);
+ if (llvm::Optional<unsigned> Subminor = Version.getSubminor())
+ Record.push_back(*Subminor + 1);
+ else
+ Record.push_back(0);
+}
+
+/// \brief Note that the identifier II occurs at the given offset
+/// within the identifier table.
+void ASTWriter::SetIdentifierOffset(const IdentifierInfo *II, uint32_t Offset) {
+ IdentID ID = IdentifierIDs[II];
+ // Only store offsets new to this AST file. Other identifier names are looked
+ // up earlier in the chain and thus don't need an offset.
+ if (ID >= FirstIdentID)
+ IdentifierOffsets[ID - FirstIdentID] = Offset;
+}
+
+/// \brief Note that the selector Sel occurs at the given offset
+/// within the method pool/selector table.
+void ASTWriter::SetSelectorOffset(Selector Sel, uint32_t Offset) {
+ unsigned ID = SelectorIDs[Sel];
+ assert(ID && "Unknown selector");
+ // Don't record offsets for selectors that are also available in a different
+ // file.
+ if (ID < FirstSelectorID)
+ return;
+ SelectorOffsets[ID - FirstSelectorID] = Offset;
+}
+
+ASTWriter::ASTWriter(llvm::BitstreamWriter &Stream)
+ : Stream(Stream), Context(0), PP(0), Chain(0), WritingModule(0),
+ WritingAST(false), ASTHasCompilerErrors(false),
+ FirstDeclID(NUM_PREDEF_DECL_IDS), NextDeclID(FirstDeclID),
+ FirstTypeID(NUM_PREDEF_TYPE_IDS), NextTypeID(FirstTypeID),
+ FirstIdentID(NUM_PREDEF_IDENT_IDS), NextIdentID(FirstIdentID),
+ FirstSubmoduleID(NUM_PREDEF_SUBMODULE_IDS),
+ NextSubmoduleID(FirstSubmoduleID),
+ FirstSelectorID(NUM_PREDEF_SELECTOR_IDS), NextSelectorID(FirstSelectorID),
+ CollectedStmts(&StmtsToEmit),
+ NumStatements(0), NumMacros(0), NumLexicalDeclContexts(0),
+ NumVisibleDeclContexts(0),
+ NextCXXBaseSpecifiersID(1),
+ DeclParmVarAbbrev(0), DeclContextLexicalAbbrev(0),
+ DeclContextVisibleLookupAbbrev(0), UpdateVisibleAbbrev(0),
+ DeclRefExprAbbrev(0), CharacterLiteralAbbrev(0),
+ DeclRecordAbbrev(0), IntegerLiteralAbbrev(0),
+ DeclTypedefAbbrev(0),
+ DeclVarAbbrev(0), DeclFieldAbbrev(0),
+ DeclEnumAbbrev(0), DeclObjCIvarAbbrev(0)
+{
+}
+
+ASTWriter::~ASTWriter() {
+ for (FileDeclIDsTy::iterator
+ I = FileDeclIDs.begin(), E = FileDeclIDs.end(); I != E; ++I)
+ delete I->second;
+}
+
+void ASTWriter::WriteAST(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ const std::string &OutputFile,
+ Module *WritingModule, StringRef isysroot,
+ bool hasErrors) {
+ WritingAST = true;
+
+ ASTHasCompilerErrors = hasErrors;
+
+ // Emit the file header.
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit((unsigned)'P', 8);
+ Stream.Emit((unsigned)'C', 8);
+ Stream.Emit((unsigned)'H', 8);
+
+ WriteBlockInfoBlock();
+
+ Context = &SemaRef.Context;
+ PP = &SemaRef.PP;
+ this->WritingModule = WritingModule;
+ WriteASTCore(SemaRef, StatCalls, isysroot, OutputFile, WritingModule);
+ Context = 0;
+ PP = 0;
+ this->WritingModule = 0;
+
+ WritingAST = false;
+}
+
+template<typename Vector>
+static void AddLazyVectorDecls(ASTWriter &Writer, Vector &Vec,
+ ASTWriter::RecordData &Record) {
+ for (typename Vector::iterator I = Vec.begin(0, true), E = Vec.end();
+ I != E; ++I) {
+ Writer.AddDeclRef(*I, Record);
+ }
+}
+
+void ASTWriter::WriteASTCore(Sema &SemaRef, MemorizeStatCalls *StatCalls,
+ StringRef isysroot,
+ const std::string &OutputFile,
+ Module *WritingModule) {
+ using namespace llvm;
+
+ // Make sure that the AST reader knows to finalize itself.
+ if (Chain)
+ Chain->finalizeForWriting();
+
+ ASTContext &Context = SemaRef.Context;
+ Preprocessor &PP = SemaRef.PP;
+
+ // Set up predefined declaration IDs.
+ DeclIDs[Context.getTranslationUnitDecl()] = PREDEF_DECL_TRANSLATION_UNIT_ID;
+ if (Context.ObjCIdDecl)
+ DeclIDs[Context.ObjCIdDecl] = PREDEF_DECL_OBJC_ID_ID;
+ if (Context.ObjCSelDecl)
+ DeclIDs[Context.ObjCSelDecl] = PREDEF_DECL_OBJC_SEL_ID;
+ if (Context.ObjCClassDecl)
+ DeclIDs[Context.ObjCClassDecl] = PREDEF_DECL_OBJC_CLASS_ID;
+ if (Context.ObjCProtocolClassDecl)
+ DeclIDs[Context.ObjCProtocolClassDecl] = PREDEF_DECL_OBJC_PROTOCOL_ID;
+ if (Context.Int128Decl)
+ DeclIDs[Context.Int128Decl] = PREDEF_DECL_INT_128_ID;
+ if (Context.UInt128Decl)
+ DeclIDs[Context.UInt128Decl] = PREDEF_DECL_UNSIGNED_INT_128_ID;
+ if (Context.ObjCInstanceTypeDecl)
+ DeclIDs[Context.ObjCInstanceTypeDecl] = PREDEF_DECL_OBJC_INSTANCETYPE_ID;
+
+ if (!Chain) {
+ // Make sure that we emit IdentifierInfos (and any attached
+ // declarations) for builtins. We don't need to do this when we're
+ // emitting chained PCH files, because all of the builtins will be
+ // in the original PCH file.
+ // FIXME: Modules won't like this at all.
+ IdentifierTable &Table = PP.getIdentifierTable();
+ SmallVector<const char *, 32> BuiltinNames;
+ Context.BuiltinInfo.GetBuiltinNames(BuiltinNames,
+ Context.getLangOpts().NoBuiltin);
+ for (unsigned I = 0, N = BuiltinNames.size(); I != N; ++I)
+ getIdentifierRef(&Table.get(BuiltinNames[I]));
+ }
+
+ // If there are any out-of-date identifiers, bring them up to date.
+ if (ExternalPreprocessorSource *ExtSource = PP.getExternalSource()) {
+ for (IdentifierTable::iterator ID = PP.getIdentifierTable().begin(),
+ IDEnd = PP.getIdentifierTable().end();
+ ID != IDEnd; ++ID)
+ if (ID->second->isOutOfDate())
+ ExtSource->updateOutOfDateIdentifier(*ID->second);
+ }
+
+ // Build a record containing all of the tentative definitions in this file, in
+ // TentativeDefinitions order. Generally, this record will be empty for
+ // headers.
+ RecordData TentativeDefinitions;
+ AddLazyVectorDecls(*this, SemaRef.TentativeDefinitions, TentativeDefinitions);
+
+ // Build a record containing all of the file scoped decls in this file.
+ RecordData UnusedFileScopedDecls;
+ AddLazyVectorDecls(*this, SemaRef.UnusedFileScopedDecls,
+ UnusedFileScopedDecls);
+
+ // Build a record containing all of the delegating constructors we still need
+ // to resolve.
+ RecordData DelegatingCtorDecls;
+ AddLazyVectorDecls(*this, SemaRef.DelegatingCtorDecls, DelegatingCtorDecls);
+
+ // Write the set of weak, undeclared identifiers. We always write the
+ // entire table, since later PCH files in a PCH chain are only interested in
+ // the results at the end of the chain.
+ RecordData WeakUndeclaredIdentifiers;
+ if (!SemaRef.WeakUndeclaredIdentifiers.empty()) {
+ for (llvm::DenseMap<IdentifierInfo*,WeakInfo>::iterator
+ I = SemaRef.WeakUndeclaredIdentifiers.begin(),
+ E = SemaRef.WeakUndeclaredIdentifiers.end(); I != E; ++I) {
+ AddIdentifierRef(I->first, WeakUndeclaredIdentifiers);
+ AddIdentifierRef(I->second.getAlias(), WeakUndeclaredIdentifiers);
+ AddSourceLocation(I->second.getLocation(), WeakUndeclaredIdentifiers);
+ WeakUndeclaredIdentifiers.push_back(I->second.getUsed());
+ }
+ }
+
+ // Build a record containing all of the locally-scoped external
+ // declarations in this header file. Generally, this record will be
+ // empty.
+ RecordData LocallyScopedExternalDecls;
+ // FIXME: This is filling in the AST file in densemap order which is
+ // nondeterminstic!
+ for (llvm::DenseMap<DeclarationName, NamedDecl *>::iterator
+ TD = SemaRef.LocallyScopedExternalDecls.begin(),
+ TDEnd = SemaRef.LocallyScopedExternalDecls.end();
+ TD != TDEnd; ++TD) {
+ if (!TD->second->isFromASTFile())
+ AddDeclRef(TD->second, LocallyScopedExternalDecls);
+ }
+
+ // Build a record containing all of the ext_vector declarations.
+ RecordData ExtVectorDecls;
+ AddLazyVectorDecls(*this, SemaRef.ExtVectorDecls, ExtVectorDecls);
+
+ // Build a record containing all of the VTable uses information.
+ RecordData VTableUses;
+ if (!SemaRef.VTableUses.empty()) {
+ for (unsigned I = 0, N = SemaRef.VTableUses.size(); I != N; ++I) {
+ AddDeclRef(SemaRef.VTableUses[I].first, VTableUses);
+ AddSourceLocation(SemaRef.VTableUses[I].second, VTableUses);
+ VTableUses.push_back(SemaRef.VTablesUsed[SemaRef.VTableUses[I].first]);
+ }
+ }
+
+ // Build a record containing all of dynamic classes declarations.
+ RecordData DynamicClasses;
+ AddLazyVectorDecls(*this, SemaRef.DynamicClasses, DynamicClasses);
+
+ // Build a record containing all of pending implicit instantiations.
+ RecordData PendingInstantiations;
+ for (std::deque<Sema::PendingImplicitInstantiation>::iterator
+ I = SemaRef.PendingInstantiations.begin(),
+ N = SemaRef.PendingInstantiations.end(); I != N; ++I) {
+ AddDeclRef(I->first, PendingInstantiations);
+ AddSourceLocation(I->second, PendingInstantiations);
+ }
+ assert(SemaRef.PendingLocalImplicitInstantiations.empty() &&
+ "There are local ones at end of translation unit!");
+
+ // Build a record containing some declaration references.
+ RecordData SemaDeclRefs;
+ if (SemaRef.StdNamespace || SemaRef.StdBadAlloc) {
+ AddDeclRef(SemaRef.getStdNamespace(), SemaDeclRefs);
+ AddDeclRef(SemaRef.getStdBadAlloc(), SemaDeclRefs);
+ }
+
+ RecordData CUDASpecialDeclRefs;
+ if (Context.getcudaConfigureCallDecl()) {
+ AddDeclRef(Context.getcudaConfigureCallDecl(), CUDASpecialDeclRefs);
+ }
+
+ // Build a record containing all of the known namespaces.
+ RecordData KnownNamespaces;
+ for (llvm::DenseMap<NamespaceDecl*, bool>::iterator
+ I = SemaRef.KnownNamespaces.begin(),
+ IEnd = SemaRef.KnownNamespaces.end();
+ I != IEnd; ++I) {
+ if (!I->second)
+ AddDeclRef(I->first, KnownNamespaces);
+ }
+
+ // Write the remaining AST contents.
+ RecordData Record;
+ Stream.EnterSubblock(AST_BLOCK_ID, 5);
+ WriteMetadata(Context, isysroot, OutputFile);
+ WriteLanguageOptions(Context.getLangOpts());
+ if (StatCalls && isysroot.empty())
+ WriteStatCache(*StatCalls);
+
+ // Create a lexical update block containing all of the declarations in the
+ // translation unit that do not come from other AST files.
+ const TranslationUnitDecl *TU = Context.getTranslationUnitDecl();
+ SmallVector<KindDeclIDPair, 64> NewGlobalDecls;
+ for (DeclContext::decl_iterator I = TU->noload_decls_begin(),
+ E = TU->noload_decls_end();
+ I != E; ++I) {
+ if (!(*I)->isFromASTFile())
+ NewGlobalDecls.push_back(std::make_pair((*I)->getKind(), GetDeclRef(*I)));
+ }
+
+ llvm::BitCodeAbbrev *Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(llvm::BitCodeAbbrevOp(TU_UPDATE_LEXICAL));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ unsigned TuUpdateLexicalAbbrev = Stream.EmitAbbrev(Abv);
+ Record.clear();
+ Record.push_back(TU_UPDATE_LEXICAL);
+ Stream.EmitRecordWithBlob(TuUpdateLexicalAbbrev, Record,
+ data(NewGlobalDecls));
+
+ // And a visible updates block for the translation unit.
+ Abv = new llvm::BitCodeAbbrev();
+ Abv->Add(llvm::BitCodeAbbrevOp(UPDATE_VISIBLE));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Fixed, 32));
+ Abv->Add(llvm::BitCodeAbbrevOp(llvm::BitCodeAbbrevOp::Blob));
+ UpdateVisibleAbbrev = Stream.EmitAbbrev(Abv);
+ WriteDeclContextVisibleUpdate(TU);
+
+ // If the translation unit has an anonymous namespace, and we don't already
+ // have an update block for it, write it as an update block.
+ if (NamespaceDecl *NS = TU->getAnonymousNamespace()) {
+ ASTWriter::UpdateRecord &Record = DeclUpdates[TU];
+ if (Record.empty()) {
+ Record.push_back(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE);
+ Record.push_back(reinterpret_cast<uint64_t>(NS));
+ }
+ }
+
+ // Resolve any declaration pointers within the declaration updates block.
+ ResolveDeclUpdatesBlocks();
+
+ // Form the record of special types.
+ RecordData SpecialTypes;
+ AddTypeRef(Context.getBuiltinVaListType(), SpecialTypes);
+ AddTypeRef(Context.getRawCFConstantStringType(), SpecialTypes);
+ AddTypeRef(Context.getFILEType(), SpecialTypes);
+ AddTypeRef(Context.getjmp_bufType(), SpecialTypes);
+ AddTypeRef(Context.getsigjmp_bufType(), SpecialTypes);
+ AddTypeRef(Context.ObjCIdRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.ObjCClassRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.ObjCSelRedefinitionType, SpecialTypes);
+ AddTypeRef(Context.getucontext_tType(), SpecialTypes);
+
+ // Keep writing types and declarations until all types and
+ // declarations have been written.
+ Stream.EnterSubblock(DECLTYPES_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
+ WriteDeclsBlockAbbrevs();
+ for (DeclsToRewriteTy::iterator I = DeclsToRewrite.begin(),
+ E = DeclsToRewrite.end();
+ I != E; ++I)
+ DeclTypesToEmit.push(const_cast<Decl*>(*I));
+ while (!DeclTypesToEmit.empty()) {
+ DeclOrType DOT = DeclTypesToEmit.front();
+ DeclTypesToEmit.pop();
+ if (DOT.isType())
+ WriteType(DOT.getType());
+ else
+ WriteDecl(Context, DOT.getDecl());
+ }
+ Stream.ExitBlock();
+
+ WriteFileDeclIDsMap();
+ WriteSourceManagerBlock(Context.getSourceManager(), PP, isysroot);
+
+ if (Chain) {
+ // Write the mapping information describing our module dependencies and how
+ // each of those modules were mapped into our own offset/ID space, so that
+ // the reader can build the appropriate mapping to its own offset/ID space.
+ // The map consists solely of a blob with the following format:
+ // *(module-name-len:i16 module-name:len*i8
+ // source-location-offset:i32
+ // identifier-id:i32
+ // preprocessed-entity-id:i32
+ // macro-definition-id:i32
+ // submodule-id:i32
+ // selector-id:i32
+ // declaration-id:i32
+ // c++-base-specifiers-id:i32
+ // type-id:i32)
+ //
+ llvm::BitCodeAbbrev *Abbrev = new BitCodeAbbrev();
+ Abbrev->Add(BitCodeAbbrevOp(MODULE_OFFSET_MAP));
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ unsigned ModuleOffsetMapAbbrev = Stream.EmitAbbrev(Abbrev);
+ SmallString<2048> Buffer;
+ {
+ llvm::raw_svector_ostream Out(Buffer);
+ for (ModuleManager::ModuleConstIterator M = Chain->ModuleMgr.begin(),
+ MEnd = Chain->ModuleMgr.end();
+ M != MEnd; ++M) {
+ StringRef FileName = (*M)->FileName;
+ io::Emit16(Out, FileName.size());
+ Out.write(FileName.data(), FileName.size());
+ io::Emit32(Out, (*M)->SLocEntryBaseOffset);
+ io::Emit32(Out, (*M)->BaseIdentifierID);
+ io::Emit32(Out, (*M)->BasePreprocessedEntityID);
+ io::Emit32(Out, (*M)->BaseSubmoduleID);
+ io::Emit32(Out, (*M)->BaseSelectorID);
+ io::Emit32(Out, (*M)->BaseDeclID);
+ io::Emit32(Out, (*M)->BaseTypeIndex);
+ }
+ }
+ Record.clear();
+ Record.push_back(MODULE_OFFSET_MAP);
+ Stream.EmitRecordWithBlob(ModuleOffsetMapAbbrev, Record,
+ Buffer.data(), Buffer.size());
+ }
+ WritePreprocessor(PP, WritingModule != 0);
+ WriteHeaderSearch(PP.getHeaderSearchInfo(), isysroot);
+ WriteSelectors(SemaRef);
+ WriteReferencedSelectorsPool(SemaRef);
+ WriteIdentifierTable(PP, SemaRef.IdResolver, WritingModule != 0);
+ WriteFPPragmaOptions(SemaRef.getFPOptions());
+ WriteOpenCLExtensions(SemaRef);
+
+ WriteTypeDeclOffsets();
+ WritePragmaDiagnosticMappings(Context.getDiagnostics());
+
+ WriteCXXBaseSpecifiersOffsets();
+
+ // If we're emitting a module, write out the submodule information.
+ if (WritingModule)
+ WriteSubmodules(WritingModule);
+
+ Stream.EmitRecord(SPECIAL_TYPES, SpecialTypes);
+
+ // Write the record containing external, unnamed definitions.
+ if (!ExternalDefinitions.empty())
+ Stream.EmitRecord(EXTERNAL_DEFINITIONS, ExternalDefinitions);
+
+ // Write the record containing tentative definitions.
+ if (!TentativeDefinitions.empty())
+ Stream.EmitRecord(TENTATIVE_DEFINITIONS, TentativeDefinitions);
+
+ // Write the record containing unused file scoped decls.
+ if (!UnusedFileScopedDecls.empty())
+ Stream.EmitRecord(UNUSED_FILESCOPED_DECLS, UnusedFileScopedDecls);
+
+ // Write the record containing weak undeclared identifiers.
+ if (!WeakUndeclaredIdentifiers.empty())
+ Stream.EmitRecord(WEAK_UNDECLARED_IDENTIFIERS,
+ WeakUndeclaredIdentifiers);
+
+ // Write the record containing locally-scoped external definitions.
+ if (!LocallyScopedExternalDecls.empty())
+ Stream.EmitRecord(LOCALLY_SCOPED_EXTERNAL_DECLS,
+ LocallyScopedExternalDecls);
+
+ // Write the record containing ext_vector type names.
+ if (!ExtVectorDecls.empty())
+ Stream.EmitRecord(EXT_VECTOR_DECLS, ExtVectorDecls);
+
+ // Write the record containing VTable uses information.
+ if (!VTableUses.empty())
+ Stream.EmitRecord(VTABLE_USES, VTableUses);
+
+ // Write the record containing dynamic classes declarations.
+ if (!DynamicClasses.empty())
+ Stream.EmitRecord(DYNAMIC_CLASSES, DynamicClasses);
+
+ // Write the record containing pending implicit instantiations.
+ if (!PendingInstantiations.empty())
+ Stream.EmitRecord(PENDING_IMPLICIT_INSTANTIATIONS, PendingInstantiations);
+
+ // Write the record containing declaration references of Sema.
+ if (!SemaDeclRefs.empty())
+ Stream.EmitRecord(SEMA_DECL_REFS, SemaDeclRefs);
+
+ // Write the record containing CUDA-specific declaration references.
+ if (!CUDASpecialDeclRefs.empty())
+ Stream.EmitRecord(CUDA_SPECIAL_DECL_REFS, CUDASpecialDeclRefs);
+
+ // Write the delegating constructors.
+ if (!DelegatingCtorDecls.empty())
+ Stream.EmitRecord(DELEGATING_CTORS, DelegatingCtorDecls);
+
+ // Write the known namespaces.
+ if (!KnownNamespaces.empty())
+ Stream.EmitRecord(KNOWN_NAMESPACES, KnownNamespaces);
+
+ // Write the visible updates to DeclContexts.
+ for (llvm::SmallPtrSet<const DeclContext *, 16>::iterator
+ I = UpdatedDeclContexts.begin(),
+ E = UpdatedDeclContexts.end();
+ I != E; ++I)
+ WriteDeclContextVisibleUpdate(*I);
+
+ if (!WritingModule) {
+ // Write the submodules that were imported, if any.
+ RecordData ImportedModules;
+ for (ASTContext::import_iterator I = Context.local_import_begin(),
+ IEnd = Context.local_import_end();
+ I != IEnd; ++I) {
+ assert(SubmoduleIDs.find(I->getImportedModule()) != SubmoduleIDs.end());
+ ImportedModules.push_back(SubmoduleIDs[I->getImportedModule()]);
+ }
+ if (!ImportedModules.empty()) {
+ // Sort module IDs.
+ llvm::array_pod_sort(ImportedModules.begin(), ImportedModules.end());
+
+ // Unique module IDs.
+ ImportedModules.erase(std::unique(ImportedModules.begin(),
+ ImportedModules.end()),
+ ImportedModules.end());
+
+ Stream.EmitRecord(IMPORTED_MODULES, ImportedModules);
+ }
+ }
+
+ WriteDeclUpdatesBlocks();
+ WriteDeclReplacementsBlock();
+ WriteMergedDecls();
+ WriteRedeclarations();
+ WriteObjCCategories();
+
+ // Some simple statistics
+ Record.clear();
+ Record.push_back(NumStatements);
+ Record.push_back(NumMacros);
+ Record.push_back(NumLexicalDeclContexts);
+ Record.push_back(NumVisibleDeclContexts);
+ Stream.EmitRecord(STATISTICS, Record);
+ Stream.ExitBlock();
+}
+
+/// \brief Go through the declaration update blocks and resolve declaration
+/// pointers into declaration IDs.
+void ASTWriter::ResolveDeclUpdatesBlocks() {
+ for (DeclUpdateMap::iterator
+ I = DeclUpdates.begin(), E = DeclUpdates.end(); I != E; ++I) {
+ const Decl *D = I->first;
+ UpdateRecord &URec = I->second;
+
+ if (isRewritten(D))
+ continue; // The decl will be written completely
+
+ unsigned Idx = 0, N = URec.size();
+ while (Idx < N) {
+ switch ((DeclUpdateKind)URec[Idx++]) {
+ case UPD_CXX_ADDED_IMPLICIT_MEMBER:
+ case UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION:
+ case UPD_CXX_ADDED_ANONYMOUS_NAMESPACE:
+ URec[Idx] = GetDeclRef(reinterpret_cast<Decl *>(URec[Idx]));
+ ++Idx;
+ break;
+
+ case UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER:
+ ++Idx;
+ break;
+ }
+ }
+ }
+}
+
+void ASTWriter::WriteDeclUpdatesBlocks() {
+ if (DeclUpdates.empty())
+ return;
+
+ RecordData OffsetsRecord;
+ Stream.EnterSubblock(DECL_UPDATES_BLOCK_ID, NUM_ALLOWED_ABBREVS_SIZE);
+ for (DeclUpdateMap::iterator
+ I = DeclUpdates.begin(), E = DeclUpdates.end(); I != E; ++I) {
+ const Decl *D = I->first;
+ UpdateRecord &URec = I->second;
+
+ if (isRewritten(D))
+ continue; // The decl will be written completely,no need to store updates.
+
+ uint64_t Offset = Stream.GetCurrentBitNo();
+ Stream.EmitRecord(DECL_UPDATES, URec);
+
+ OffsetsRecord.push_back(GetDeclRef(D));
+ OffsetsRecord.push_back(Offset);
+ }
+ Stream.ExitBlock();
+ Stream.EmitRecord(DECL_UPDATE_OFFSETS, OffsetsRecord);
+}
+
+void ASTWriter::WriteDeclReplacementsBlock() {
+ if (ReplacedDecls.empty())
+ return;
+
+ RecordData Record;
+ for (SmallVector<ReplacedDeclInfo, 16>::iterator
+ I = ReplacedDecls.begin(), E = ReplacedDecls.end(); I != E; ++I) {
+ Record.push_back(I->ID);
+ Record.push_back(I->Offset);
+ Record.push_back(I->Loc);
+ }
+ Stream.EmitRecord(DECL_REPLACEMENTS, Record);
+}
+
+void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record) {
+ Record.push_back(Loc.getRawEncoding());
+}
+
+void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record) {
+ AddSourceLocation(Range.getBegin(), Record);
+ AddSourceLocation(Range.getEnd(), Record);
+}
+
+void ASTWriter::AddAPInt(const llvm::APInt &Value, RecordDataImpl &Record) {
+ Record.push_back(Value.getBitWidth());
+ const uint64_t *Words = Value.getRawData();
+ Record.append(Words, Words + Value.getNumWords());
+}
+
+void ASTWriter::AddAPSInt(const llvm::APSInt &Value, RecordDataImpl &Record) {
+ Record.push_back(Value.isUnsigned());
+ AddAPInt(Value, Record);
+}
+
+void ASTWriter::AddAPFloat(const llvm::APFloat &Value, RecordDataImpl &Record) {
+ AddAPInt(Value.bitcastToAPInt(), Record);
+}
+
+void ASTWriter::AddIdentifierRef(const IdentifierInfo *II, RecordDataImpl &Record) {
+ Record.push_back(getIdentifierRef(II));
+}
+
+IdentID ASTWriter::getIdentifierRef(const IdentifierInfo *II) {
+ if (II == 0)
+ return 0;
+
+ IdentID &ID = IdentifierIDs[II];
+ if (ID == 0)
+ ID = NextIdentID++;
+ return ID;
+}
+
+void ASTWriter::AddSelectorRef(const Selector SelRef, RecordDataImpl &Record) {
+ Record.push_back(getSelectorRef(SelRef));
+}
+
+SelectorID ASTWriter::getSelectorRef(Selector Sel) {
+ if (Sel.getAsOpaquePtr() == 0) {
+ return 0;
+ }
+
+ SelectorID &SID = SelectorIDs[Sel];
+ if (SID == 0 && Chain) {
+ // This might trigger a ReadSelector callback, which will set the ID for
+ // this selector.
+ Chain->LoadSelector(Sel);
+ }
+ if (SID == 0) {
+ SID = NextSelectorID++;
+ }
+ return SID;
+}
+
+void ASTWriter::AddCXXTemporary(const CXXTemporary *Temp, RecordDataImpl &Record) {
+ AddDeclRef(Temp->getDestructor(), Record);
+}
+
+void ASTWriter::AddCXXBaseSpecifiersRef(CXXBaseSpecifier const *Bases,
+ CXXBaseSpecifier const *BasesEnd,
+ RecordDataImpl &Record) {
+ assert(Bases != BasesEnd && "Empty base-specifier sets are not recorded");
+ CXXBaseSpecifiersToWrite.push_back(
+ QueuedCXXBaseSpecifiers(NextCXXBaseSpecifiersID,
+ Bases, BasesEnd));
+ Record.push_back(NextCXXBaseSpecifiersID++);
+}
+
+void ASTWriter::AddTemplateArgumentLocInfo(TemplateArgument::ArgKind Kind,
+ const TemplateArgumentLocInfo &Arg,
+ RecordDataImpl &Record) {
+ switch (Kind) {
+ case TemplateArgument::Expression:
+ AddStmt(Arg.getAsExpr());
+ break;
+ case TemplateArgument::Type:
+ AddTypeSourceInfo(Arg.getAsTypeSourceInfo(), Record);
+ break;
+ case TemplateArgument::Template:
+ AddNestedNameSpecifierLoc(Arg.getTemplateQualifierLoc(), Record);
+ AddSourceLocation(Arg.getTemplateNameLoc(), Record);
+ break;
+ case TemplateArgument::TemplateExpansion:
+ AddNestedNameSpecifierLoc(Arg.getTemplateQualifierLoc(), Record);
+ AddSourceLocation(Arg.getTemplateNameLoc(), Record);
+ AddSourceLocation(Arg.getTemplateEllipsisLoc(), Record);
+ break;
+ case TemplateArgument::Null:
+ case TemplateArgument::Integral:
+ case TemplateArgument::Declaration:
+ case TemplateArgument::Pack:
+ break;
+ }
+}
+
+void ASTWriter::AddTemplateArgumentLoc(const TemplateArgumentLoc &Arg,
+ RecordDataImpl &Record) {
+ AddTemplateArgument(Arg.getArgument(), Record);
+
+ if (Arg.getArgument().getKind() == TemplateArgument::Expression) {
+ bool InfoHasSameExpr
+ = Arg.getArgument().getAsExpr() == Arg.getLocInfo().getAsExpr();
+ Record.push_back(InfoHasSameExpr);
+ if (InfoHasSameExpr)
+ return; // Avoid storing the same expr twice.
+ }
+ AddTemplateArgumentLocInfo(Arg.getArgument().getKind(), Arg.getLocInfo(),
+ Record);
+}
+
+void ASTWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo,
+ RecordDataImpl &Record) {
+ if (TInfo == 0) {
+ AddTypeRef(QualType(), Record);
+ return;
+ }
+
+ AddTypeLoc(TInfo->getTypeLoc(), Record);
+}
+
+void ASTWriter::AddTypeLoc(TypeLoc TL, RecordDataImpl &Record) {
+ AddTypeRef(TL.getType(), Record);
+
+ TypeLocWriter TLW(*this, Record);
+ for (; !TL.isNull(); TL = TL.getNextTypeLoc())
+ TLW.Visit(TL);
+}
+
+void ASTWriter::AddTypeRef(QualType T, RecordDataImpl &Record) {
+ Record.push_back(GetOrCreateTypeID(T));
+}
+
+TypeID ASTWriter::GetOrCreateTypeID( QualType T) {
+ return MakeTypeID(*Context, T,
+ std::bind1st(std::mem_fun(&ASTWriter::GetOrCreateTypeIdx), this));
+}
+
+TypeID ASTWriter::getTypeID(QualType T) const {
+ return MakeTypeID(*Context, T,
+ std::bind1st(std::mem_fun(&ASTWriter::getTypeIdx), this));
+}
+
+TypeIdx ASTWriter::GetOrCreateTypeIdx(QualType T) {
+ if (T.isNull())
+ return TypeIdx();
+ assert(!T.getLocalFastQualifiers());
+
+ TypeIdx &Idx = TypeIdxs[T];
+ if (Idx.getIndex() == 0) {
+ // We haven't seen this type before. Assign it a new ID and put it
+ // into the queue of types to emit.
+ Idx = TypeIdx(NextTypeID++);
+ DeclTypesToEmit.push(T);
+ }
+ return Idx;
+}
+
+TypeIdx ASTWriter::getTypeIdx(QualType T) const {
+ if (T.isNull())
+ return TypeIdx();
+ assert(!T.getLocalFastQualifiers());
+
+ TypeIdxMap::const_iterator I = TypeIdxs.find(T);
+ assert(I != TypeIdxs.end() && "Type not emitted!");
+ return I->second;
+}
+
+void ASTWriter::AddDeclRef(const Decl *D, RecordDataImpl &Record) {
+ Record.push_back(GetDeclRef(D));
+}
+
+DeclID ASTWriter::GetDeclRef(const Decl *D) {
+ assert(WritingAST && "Cannot request a declaration ID before AST writing");
+
+ if (D == 0) {
+ return 0;
+ }
+
+ // If D comes from an AST file, its declaration ID is already known and
+ // fixed.
+ if (D->isFromASTFile())
+ return D->getGlobalID();
+
+ assert(!(reinterpret_cast<uintptr_t>(D) & 0x01) && "Invalid decl pointer");
+ DeclID &ID = DeclIDs[D];
+ if (ID == 0) {
+ // We haven't seen this declaration before. Give it a new ID and
+ // enqueue it in the list of declarations to emit.
+ ID = NextDeclID++;
+ DeclTypesToEmit.push(const_cast<Decl *>(D));
+ }
+
+ return ID;
+}
+
+DeclID ASTWriter::getDeclID(const Decl *D) {
+ if (D == 0)
+ return 0;
+
+ // If D comes from an AST file, its declaration ID is already known and
+ // fixed.
+ if (D->isFromASTFile())
+ return D->getGlobalID();
+
+ assert(DeclIDs.find(D) != DeclIDs.end() && "Declaration not emitted!");
+ return DeclIDs[D];
+}
+
+static inline bool compLocDecl(std::pair<unsigned, serialization::DeclID> L,
+ std::pair<unsigned, serialization::DeclID> R) {
+ return L.first < R.first;
+}
+
+void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
+ assert(ID);
+ assert(D);
+
+ SourceLocation Loc = D->getLocation();
+ if (Loc.isInvalid())
+ return;
+
+ // We only keep track of the file-level declarations of each file.
+ if (!D->getLexicalDeclContext()->isFileContext())
+ return;
+ // FIXME: ParmVarDecls that are part of a function type of a parameter of
+ // a function/objc method, should not have TU as lexical context.
+ if (isa<ParmVarDecl>(D))
+ return;
+
+ SourceManager &SM = Context->getSourceManager();
+ SourceLocation FileLoc = SM.getFileLoc(Loc);
+ assert(SM.isLocalSourceLocation(FileLoc));
+ FileID FID;
+ unsigned Offset;
+ llvm::tie(FID, Offset) = SM.getDecomposedLoc(FileLoc);
+ if (FID.isInvalid())
+ return;
+ const SrcMgr::SLocEntry *Entry = &SM.getSLocEntry(FID);
+ assert(Entry->isFile());
+
+ DeclIDInFileInfo *&Info = FileDeclIDs[Entry];
+ if (!Info)
+ Info = new DeclIDInFileInfo();
+
+ std::pair<unsigned, serialization::DeclID> LocDecl(Offset, ID);
+ LocDeclIDsTy &Decls = Info->DeclIDs;
+
+ if (Decls.empty() || Decls.back().first <= Offset) {
+ Decls.push_back(LocDecl);
+ return;
+ }
+
+ LocDeclIDsTy::iterator
+ I = std::upper_bound(Decls.begin(), Decls.end(), LocDecl, compLocDecl);
+
+ Decls.insert(I, LocDecl);
+}
+
+void ASTWriter::AddDeclarationName(DeclarationName Name, RecordDataImpl &Record) {
+ // FIXME: Emit a stable enum for NameKind. 0 = Identifier etc.
+ Record.push_back(Name.getNameKind());
+ switch (Name.getNameKind()) {
+ case DeclarationName::Identifier:
+ AddIdentifierRef(Name.getAsIdentifierInfo(), Record);
+ break;
+
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ AddSelectorRef(Name.getObjCSelector(), Record);
+ break;
+
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ AddTypeRef(Name.getCXXNameType(), Record);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ Record.push_back(Name.getCXXOverloadedOperator());
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ AddIdentifierRef(Name.getCXXLiteralIdentifier(), Record);
+ break;
+
+ case DeclarationName::CXXUsingDirective:
+ // No extra data to emit
+ break;
+ }
+}
+
+void ASTWriter::AddDeclarationNameLoc(const DeclarationNameLoc &DNLoc,
+ DeclarationName Name, RecordDataImpl &Record) {
+ switch (Name.getNameKind()) {
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXConversionFunctionName:
+ AddTypeSourceInfo(DNLoc.NamedType.TInfo, Record);
+ break;
+
+ case DeclarationName::CXXOperatorName:
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXOperatorName.BeginOpNameLoc),
+ Record);
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXOperatorName.EndOpNameLoc),
+ Record);
+ break;
+
+ case DeclarationName::CXXLiteralOperatorName:
+ AddSourceLocation(
+ SourceLocation::getFromRawEncoding(DNLoc.CXXLiteralOperatorName.OpNameLoc),
+ Record);
+ break;
+
+ case DeclarationName::Identifier:
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector:
+ case DeclarationName::CXXUsingDirective:
+ break;
+ }
+}
+
+void ASTWriter::AddDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
+ RecordDataImpl &Record) {
+ AddDeclarationName(NameInfo.getName(), Record);
+ AddSourceLocation(NameInfo.getLoc(), Record);
+ AddDeclarationNameLoc(NameInfo.getInfo(), NameInfo.getName(), Record);
+}
+
+void ASTWriter::AddQualifierInfo(const QualifierInfo &Info,
+ RecordDataImpl &Record) {
+ AddNestedNameSpecifierLoc(Info.QualifierLoc, Record);
+ Record.push_back(Info.NumTemplParamLists);
+ for (unsigned i=0, e=Info.NumTemplParamLists; i != e; ++i)
+ AddTemplateParameterList(Info.TemplParamLists[i], Record);
+}
+
+void ASTWriter::AddNestedNameSpecifier(NestedNameSpecifier *NNS,
+ RecordDataImpl &Record) {
+ // Nested name specifiers usually aren't too long. I think that 8 would
+ // typically accommodate the vast majority.
+ SmallVector<NestedNameSpecifier *, 8> NestedNames;
+
+ // Push each of the NNS's onto a stack for serialization in reverse order.
+ while (NNS) {
+ NestedNames.push_back(NNS);
+ NNS = NNS->getPrefix();
+ }
+
+ Record.push_back(NestedNames.size());
+ while(!NestedNames.empty()) {
+ NNS = NestedNames.pop_back_val();
+ NestedNameSpecifier::SpecifierKind Kind = NNS->getKind();
+ Record.push_back(Kind);
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier:
+ AddIdentifierRef(NNS->getAsIdentifier(), Record);
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ AddDeclRef(NNS->getAsNamespace(), Record);
+ break;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ AddDeclRef(NNS->getAsNamespaceAlias(), Record);
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ AddTypeRef(QualType(NNS->getAsType(), 0), Record);
+ Record.push_back(Kind == NestedNameSpecifier::TypeSpecWithTemplate);
+ break;
+
+ case NestedNameSpecifier::Global:
+ // Don't need to write an associated value.
+ break;
+ }
+ }
+}
+
+void ASTWriter::AddNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
+ RecordDataImpl &Record) {
+ // Nested name specifiers usually aren't too long. I think that 8 would
+ // typically accommodate the vast majority.
+ SmallVector<NestedNameSpecifierLoc , 8> NestedNames;
+
+ // Push each of the nested-name-specifiers's onto a stack for
+ // serialization in reverse order.
+ while (NNS) {
+ NestedNames.push_back(NNS);
+ NNS = NNS.getPrefix();
+ }
+
+ Record.push_back(NestedNames.size());
+ while(!NestedNames.empty()) {
+ NNS = NestedNames.pop_back_val();
+ NestedNameSpecifier::SpecifierKind Kind
+ = NNS.getNestedNameSpecifier()->getKind();
+ Record.push_back(Kind);
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier:
+ AddIdentifierRef(NNS.getNestedNameSpecifier()->getAsIdentifier(), Record);
+ AddSourceRange(NNS.getLocalSourceRange(), Record);
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ AddDeclRef(NNS.getNestedNameSpecifier()->getAsNamespace(), Record);
+ AddSourceRange(NNS.getLocalSourceRange(), Record);
+ break;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ AddDeclRef(NNS.getNestedNameSpecifier()->getAsNamespaceAlias(), Record);
+ AddSourceRange(NNS.getLocalSourceRange(), Record);
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ Record.push_back(Kind == NestedNameSpecifier::TypeSpecWithTemplate);
+ AddTypeLoc(NNS.getTypeLoc(), Record);
+ AddSourceLocation(NNS.getLocalSourceRange().getEnd(), Record);
+ break;
+
+ case NestedNameSpecifier::Global:
+ AddSourceLocation(NNS.getLocalSourceRange().getEnd(), Record);
+ break;
+ }
+ }
+}
+
+void ASTWriter::AddTemplateName(TemplateName Name, RecordDataImpl &Record) {
+ TemplateName::NameKind Kind = Name.getKind();
+ Record.push_back(Kind);
+ switch (Kind) {
+ case TemplateName::Template:
+ AddDeclRef(Name.getAsTemplateDecl(), Record);
+ break;
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *OvT = Name.getAsOverloadedTemplate();
+ Record.push_back(OvT->size());
+ for (OverloadedTemplateStorage::iterator I = OvT->begin(), E = OvT->end();
+ I != E; ++I)
+ AddDeclRef(*I, Record);
+ break;
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QualT = Name.getAsQualifiedTemplateName();
+ AddNestedNameSpecifier(QualT->getQualifier(), Record);
+ Record.push_back(QualT->hasTemplateKeyword());
+ AddDeclRef(QualT->getTemplateDecl(), Record);
+ break;
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DepT = Name.getAsDependentTemplateName();
+ AddNestedNameSpecifier(DepT->getQualifier(), Record);
+ Record.push_back(DepT->isIdentifier());
+ if (DepT->isIdentifier())
+ AddIdentifierRef(DepT->getIdentifier(), Record);
+ else
+ Record.push_back(DepT->getOperator());
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *subst
+ = Name.getAsSubstTemplateTemplateParm();
+ AddDeclRef(subst->getParameter(), Record);
+ AddTemplateName(subst->getReplacement(), Record);
+ break;
+ }
+
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage *SubstPack
+ = Name.getAsSubstTemplateTemplateParmPack();
+ AddDeclRef(SubstPack->getParameterPack(), Record);
+ AddTemplateArgument(SubstPack->getArgumentPack(), Record);
+ break;
+ }
+ }
+}
+
+void ASTWriter::AddTemplateArgument(const TemplateArgument &Arg,
+ RecordDataImpl &Record) {
+ Record.push_back(Arg.getKind());
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ break;
+ case TemplateArgument::Type:
+ AddTypeRef(Arg.getAsType(), Record);
+ break;
+ case TemplateArgument::Declaration:
+ AddDeclRef(Arg.getAsDecl(), Record);
+ break;
+ case TemplateArgument::Integral:
+ AddAPSInt(*Arg.getAsIntegral(), Record);
+ AddTypeRef(Arg.getIntegralType(), Record);
+ break;
+ case TemplateArgument::Template:
+ AddTemplateName(Arg.getAsTemplateOrTemplatePattern(), Record);
+ break;
+ case TemplateArgument::TemplateExpansion:
+ AddTemplateName(Arg.getAsTemplateOrTemplatePattern(), Record);
+ if (llvm::Optional<unsigned> NumExpansions = Arg.getNumTemplateExpansions())
+ Record.push_back(*NumExpansions + 1);
+ else
+ Record.push_back(0);
+ break;
+ case TemplateArgument::Expression:
+ AddStmt(Arg.getAsExpr());
+ break;
+ case TemplateArgument::Pack:
+ Record.push_back(Arg.pack_size());
+ for (TemplateArgument::pack_iterator I=Arg.pack_begin(), E=Arg.pack_end();
+ I != E; ++I)
+ AddTemplateArgument(*I, Record);
+ break;
+ }
+}
+
+void
+ASTWriter::AddTemplateParameterList(const TemplateParameterList *TemplateParams,
+ RecordDataImpl &Record) {
+ assert(TemplateParams && "No TemplateParams!");
+ AddSourceLocation(TemplateParams->getTemplateLoc(), Record);
+ AddSourceLocation(TemplateParams->getLAngleLoc(), Record);
+ AddSourceLocation(TemplateParams->getRAngleLoc(), Record);
+ Record.push_back(TemplateParams->size());
+ for (TemplateParameterList::const_iterator
+ P = TemplateParams->begin(), PEnd = TemplateParams->end();
+ P != PEnd; ++P)
+ AddDeclRef(*P, Record);
+}
+
+/// \brief Emit a template argument list.
+void
+ASTWriter::AddTemplateArgumentList(const TemplateArgumentList *TemplateArgs,
+ RecordDataImpl &Record) {
+ assert(TemplateArgs && "No TemplateArgs!");
+ Record.push_back(TemplateArgs->size());
+ for (int i=0, e = TemplateArgs->size(); i != e; ++i)
+ AddTemplateArgument(TemplateArgs->get(i), Record);
+}
+
+
+void
+ASTWriter::AddUnresolvedSet(const UnresolvedSetImpl &Set, RecordDataImpl &Record) {
+ Record.push_back(Set.size());
+ for (UnresolvedSetImpl::const_iterator
+ I = Set.begin(), E = Set.end(); I != E; ++I) {
+ AddDeclRef(I.getDecl(), Record);
+ Record.push_back(I.getAccess());
+ }
+}
+
+void ASTWriter::AddCXXBaseSpecifier(const CXXBaseSpecifier &Base,
+ RecordDataImpl &Record) {
+ Record.push_back(Base.isVirtual());
+ Record.push_back(Base.isBaseOfClass());
+ Record.push_back(Base.getAccessSpecifierAsWritten());
+ Record.push_back(Base.getInheritConstructors());
+ AddTypeSourceInfo(Base.getTypeSourceInfo(), Record);
+ AddSourceRange(Base.getSourceRange(), Record);
+ AddSourceLocation(Base.isPackExpansion()? Base.getEllipsisLoc()
+ : SourceLocation(),
+ Record);
+}
+
+void ASTWriter::FlushCXXBaseSpecifiers() {
+ RecordData Record;
+ for (unsigned I = 0, N = CXXBaseSpecifiersToWrite.size(); I != N; ++I) {
+ Record.clear();
+
+ // Record the offset of this base-specifier set.
+ unsigned Index = CXXBaseSpecifiersToWrite[I].ID - 1;
+ if (Index == CXXBaseSpecifiersOffsets.size())
+ CXXBaseSpecifiersOffsets.push_back(Stream.GetCurrentBitNo());
+ else {
+ if (Index > CXXBaseSpecifiersOffsets.size())
+ CXXBaseSpecifiersOffsets.resize(Index + 1);
+ CXXBaseSpecifiersOffsets[Index] = Stream.GetCurrentBitNo();
+ }
+
+ const CXXBaseSpecifier *B = CXXBaseSpecifiersToWrite[I].Bases,
+ *BEnd = CXXBaseSpecifiersToWrite[I].BasesEnd;
+ Record.push_back(BEnd - B);
+ for (; B != BEnd; ++B)
+ AddCXXBaseSpecifier(*B, Record);
+ Stream.EmitRecord(serialization::DECL_CXX_BASE_SPECIFIERS, Record);
+
+ // Flush any expressions that were written as part of the base specifiers.
+ FlushStmts();
+ }
+
+ CXXBaseSpecifiersToWrite.clear();
+}
+
+void ASTWriter::AddCXXCtorInitializers(
+ const CXXCtorInitializer * const *CtorInitializers,
+ unsigned NumCtorInitializers,
+ RecordDataImpl &Record) {
+ Record.push_back(NumCtorInitializers);
+ for (unsigned i=0; i != NumCtorInitializers; ++i) {
+ const CXXCtorInitializer *Init = CtorInitializers[i];
+
+ if (Init->isBaseInitializer()) {
+ Record.push_back(CTOR_INITIALIZER_BASE);
+ AddTypeSourceInfo(Init->getTypeSourceInfo(), Record);
+ Record.push_back(Init->isBaseVirtual());
+ } else if (Init->isDelegatingInitializer()) {
+ Record.push_back(CTOR_INITIALIZER_DELEGATING);
+ AddTypeSourceInfo(Init->getTypeSourceInfo(), Record);
+ } else if (Init->isMemberInitializer()){
+ Record.push_back(CTOR_INITIALIZER_MEMBER);
+ AddDeclRef(Init->getMember(), Record);
+ } else {
+ Record.push_back(CTOR_INITIALIZER_INDIRECT_MEMBER);
+ AddDeclRef(Init->getIndirectMember(), Record);
+ }
+
+ AddSourceLocation(Init->getMemberLocation(), Record);
+ AddStmt(Init->getInit());
+ AddSourceLocation(Init->getLParenLoc(), Record);
+ AddSourceLocation(Init->getRParenLoc(), Record);
+ Record.push_back(Init->isWritten());
+ if (Init->isWritten()) {
+ Record.push_back(Init->getSourceOrder());
+ } else {
+ Record.push_back(Init->getNumArrayIndices());
+ for (unsigned i=0, e=Init->getNumArrayIndices(); i != e; ++i)
+ AddDeclRef(Init->getArrayIndex(i), Record);
+ }
+ }
+}
+
+void ASTWriter::AddCXXDefinitionData(const CXXRecordDecl *D, RecordDataImpl &Record) {
+ assert(D->DefinitionData);
+ struct CXXRecordDecl::DefinitionData &Data = *D->DefinitionData;
+ Record.push_back(Data.IsLambda);
+ Record.push_back(Data.UserDeclaredConstructor);
+ Record.push_back(Data.UserDeclaredCopyConstructor);
+ Record.push_back(Data.UserDeclaredMoveConstructor);
+ Record.push_back(Data.UserDeclaredCopyAssignment);
+ Record.push_back(Data.UserDeclaredMoveAssignment);
+ Record.push_back(Data.UserDeclaredDestructor);
+ Record.push_back(Data.Aggregate);
+ Record.push_back(Data.PlainOldData);
+ Record.push_back(Data.Empty);
+ Record.push_back(Data.Polymorphic);
+ Record.push_back(Data.Abstract);
+ Record.push_back(Data.IsStandardLayout);
+ Record.push_back(Data.HasNoNonEmptyBases);
+ Record.push_back(Data.HasPrivateFields);
+ Record.push_back(Data.HasProtectedFields);
+ Record.push_back(Data.HasPublicFields);
+ Record.push_back(Data.HasMutableFields);
+ Record.push_back(Data.HasOnlyCMembers);
+ Record.push_back(Data.HasTrivialDefaultConstructor);
+ Record.push_back(Data.HasConstexprNonCopyMoveConstructor);
+ Record.push_back(Data.DefaultedDefaultConstructorIsConstexpr);
+ Record.push_back(Data.DefaultedCopyConstructorIsConstexpr);
+ Record.push_back(Data.DefaultedMoveConstructorIsConstexpr);
+ Record.push_back(Data.HasConstexprDefaultConstructor);
+ Record.push_back(Data.HasConstexprCopyConstructor);
+ Record.push_back(Data.HasConstexprMoveConstructor);
+ Record.push_back(Data.HasTrivialCopyConstructor);
+ Record.push_back(Data.HasTrivialMoveConstructor);
+ Record.push_back(Data.HasTrivialCopyAssignment);
+ Record.push_back(Data.HasTrivialMoveAssignment);
+ Record.push_back(Data.HasTrivialDestructor);
+ Record.push_back(Data.HasIrrelevantDestructor);
+ Record.push_back(Data.HasNonLiteralTypeFieldsOrBases);
+ Record.push_back(Data.ComputedVisibleConversions);
+ Record.push_back(Data.UserProvidedDefaultConstructor);
+ Record.push_back(Data.DeclaredDefaultConstructor);
+ Record.push_back(Data.DeclaredCopyConstructor);
+ Record.push_back(Data.DeclaredMoveConstructor);
+ Record.push_back(Data.DeclaredCopyAssignment);
+ Record.push_back(Data.DeclaredMoveAssignment);
+ Record.push_back(Data.DeclaredDestructor);
+ Record.push_back(Data.FailedImplicitMoveConstructor);
+ Record.push_back(Data.FailedImplicitMoveAssignment);
+ // IsLambda bit is already saved.
+
+ Record.push_back(Data.NumBases);
+ if (Data.NumBases > 0)
+ AddCXXBaseSpecifiersRef(Data.getBases(), Data.getBases() + Data.NumBases,
+ Record);
+
+ // FIXME: Make VBases lazily computed when needed to avoid storing them.
+ Record.push_back(Data.NumVBases);
+ if (Data.NumVBases > 0)
+ AddCXXBaseSpecifiersRef(Data.getVBases(), Data.getVBases() + Data.NumVBases,
+ Record);
+
+ AddUnresolvedSet(Data.Conversions, Record);
+ AddUnresolvedSet(Data.VisibleConversions, Record);
+ // Data.Definition is the owning decl, no need to write it.
+ AddDeclRef(Data.FirstFriend, Record);
+
+ // Add lambda-specific data.
+ if (Data.IsLambda) {
+ CXXRecordDecl::LambdaDefinitionData &Lambda = D->getLambdaData();
+ Record.push_back(Lambda.Dependent);
+ Record.push_back(Lambda.NumCaptures);
+ Record.push_back(Lambda.NumExplicitCaptures);
+ Record.push_back(Lambda.ManglingNumber);
+ AddDeclRef(Lambda.ContextDecl, Record);
+ for (unsigned I = 0, N = Lambda.NumCaptures; I != N; ++I) {
+ LambdaExpr::Capture &Capture = Lambda.Captures[I];
+ AddSourceLocation(Capture.getLocation(), Record);
+ Record.push_back(Capture.isImplicit());
+ Record.push_back(Capture.getCaptureKind()); // FIXME: stable!
+ VarDecl *Var = Capture.capturesVariable()? Capture.getCapturedVar() : 0;
+ AddDeclRef(Var, Record);
+ AddSourceLocation(Capture.isPackExpansion()? Capture.getEllipsisLoc()
+ : SourceLocation(),
+ Record);
+ }
+ }
+}
+
+void ASTWriter::ReaderInitialized(ASTReader *Reader) {
+ assert(Reader && "Cannot remove chain");
+ assert((!Chain || Chain == Reader) && "Cannot replace chain");
+ assert(FirstDeclID == NextDeclID &&
+ FirstTypeID == NextTypeID &&
+ FirstIdentID == NextIdentID &&
+ FirstSubmoduleID == NextSubmoduleID &&
+ FirstSelectorID == NextSelectorID &&
+ "Setting chain after writing has started.");
+
+ Chain = Reader;
+
+ FirstDeclID = NUM_PREDEF_DECL_IDS + Chain->getTotalNumDecls();
+ FirstTypeID = NUM_PREDEF_TYPE_IDS + Chain->getTotalNumTypes();
+ FirstIdentID = NUM_PREDEF_IDENT_IDS + Chain->getTotalNumIdentifiers();
+ FirstSubmoduleID = NUM_PREDEF_SUBMODULE_IDS + Chain->getTotalNumSubmodules();
+ FirstSelectorID = NUM_PREDEF_SELECTOR_IDS + Chain->getTotalNumSelectors();
+ NextDeclID = FirstDeclID;
+ NextTypeID = FirstTypeID;
+ NextIdentID = FirstIdentID;
+ NextSelectorID = FirstSelectorID;
+ NextSubmoduleID = FirstSubmoduleID;
+}
+
+void ASTWriter::IdentifierRead(IdentID ID, IdentifierInfo *II) {
+ IdentifierIDs[II] = ID;
+ if (II->hasMacroDefinition())
+ DeserializedMacroNames.push_back(II);
+}
+
+void ASTWriter::TypeRead(TypeIdx Idx, QualType T) {
+ // Always take the highest-numbered type index. This copes with an interesting
+ // case for chained AST writing where we schedule writing the type and then,
+ // later, deserialize the type from another AST. In this case, we want to
+ // keep the higher-numbered entry so that we can properly write it out to
+ // the AST file.
+ TypeIdx &StoredIdx = TypeIdxs[T];
+ if (Idx.getIndex() >= StoredIdx.getIndex())
+ StoredIdx = Idx;
+}
+
+void ASTWriter::SelectorRead(SelectorID ID, Selector S) {
+ SelectorIDs[S] = ID;
+}
+
+void ASTWriter::MacroDefinitionRead(serialization::PreprocessedEntityID ID,
+ MacroDefinition *MD) {
+ assert(MacroDefinitions.find(MD) == MacroDefinitions.end());
+ MacroDefinitions[MD] = ID;
+}
+
+void ASTWriter::MacroVisible(IdentifierInfo *II) {
+ DeserializedMacroNames.push_back(II);
+}
+
+void ASTWriter::ModuleRead(serialization::SubmoduleID ID, Module *Mod) {
+ assert(SubmoduleIDs.find(Mod) == SubmoduleIDs.end());
+ SubmoduleIDs[Mod] = ID;
+}
+
+void ASTWriter::CompletedTagDefinition(const TagDecl *D) {
+ assert(D->isCompleteDefinition());
+ assert(!WritingAST && "Already writing the AST!");
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ // We are interested when a PCH decl is modified.
+ if (RD->isFromASTFile()) {
+ // A forward reference was mutated into a definition. Rewrite it.
+ // FIXME: This happens during template instantiation, should we
+ // have created a new definition decl instead ?
+ RewriteDecl(RD);
+ }
+ }
+}
+void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+
+ // TU and namespaces are handled elsewhere.
+ if (isa<TranslationUnitDecl>(DC) || isa<NamespaceDecl>(DC))
+ return;
+
+ if (!(!D->isFromASTFile() && cast<Decl>(DC)->isFromASTFile()))
+ return; // Not a source decl added to a DeclContext from PCH.
+
+ AddUpdatedDeclContext(DC);
+}
+
+void ASTWriter::AddedCXXImplicitMember(const CXXRecordDecl *RD, const Decl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ assert(D->isImplicit());
+ if (!(!D->isFromASTFile() && RD->isFromASTFile()))
+ return; // Not a source member added to a class from PCH.
+ if (!isa<CXXMethodDecl>(D))
+ return; // We are interested in lazily declared implicit methods.
+
+ // A decl coming from PCH was modified.
+ assert(RD->isCompleteDefinition());
+ UpdateRecord &Record = DeclUpdates[RD];
+ Record.push_back(UPD_CXX_ADDED_IMPLICIT_MEMBER);
+ Record.push_back(reinterpret_cast<uint64_t>(D));
+}
+
+void ASTWriter::AddedCXXTemplateSpecialization(const ClassTemplateDecl *TD,
+ const ClassTemplateSpecializationDecl *D) {
+ // The specializations set is kept in the canonical template.
+ assert(!WritingAST && "Already writing the AST!");
+ TD = TD->getCanonicalDecl();
+ if (!(!D->isFromASTFile() && TD->isFromASTFile()))
+ return; // Not a source specialization added to a template from PCH.
+
+ UpdateRecord &Record = DeclUpdates[TD];
+ Record.push_back(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION);
+ Record.push_back(reinterpret_cast<uint64_t>(D));
+}
+
+void ASTWriter::AddedCXXTemplateSpecialization(const FunctionTemplateDecl *TD,
+ const FunctionDecl *D) {
+ // The specializations set is kept in the canonical template.
+ assert(!WritingAST && "Already writing the AST!");
+ TD = TD->getCanonicalDecl();
+ if (!(!D->isFromASTFile() && TD->isFromASTFile()))
+ return; // Not a source specialization added to a template from PCH.
+
+ UpdateRecord &Record = DeclUpdates[TD];
+ Record.push_back(UPD_CXX_ADDED_TEMPLATE_SPECIALIZATION);
+ Record.push_back(reinterpret_cast<uint64_t>(D));
+}
+
+void ASTWriter::CompletedImplicitDefinition(const FunctionDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return; // Declaration not imported from PCH.
+
+ // Implicit decl from a PCH was defined.
+ // FIXME: Should implicit definition be a separate FunctionDecl?
+ RewriteDecl(D);
+}
+
+void ASTWriter::StaticDataMemberInstantiated(const VarDecl *D) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return;
+
+ // Since the actual instantiation is delayed, this really means that we need
+ // to update the instantiation location.
+ UpdateRecord &Record = DeclUpdates[D];
+ Record.push_back(UPD_CXX_INSTANTIATED_STATIC_DATA_MEMBER);
+ AddSourceLocation(
+ D->getMemberSpecializationInfo()->getPointOfInstantiation(), Record);
+}
+
+void ASTWriter::AddedObjCCategoryToInterface(const ObjCCategoryDecl *CatD,
+ const ObjCInterfaceDecl *IFD) {
+ assert(!WritingAST && "Already writing the AST!");
+ if (!IFD->isFromASTFile())
+ return; // Declaration not imported from PCH.
+
+ assert(IFD->getDefinition() && "Category on a class without a definition?");
+ ObjCClassesWithCategories.insert(
+ const_cast<ObjCInterfaceDecl *>(IFD->getDefinition()));
+}
+
+
+void ASTWriter::AddedObjCPropertyInClassExtension(const ObjCPropertyDecl *Prop,
+ const ObjCPropertyDecl *OrigProp,
+ const ObjCCategoryDecl *ClassExt) {
+ const ObjCInterfaceDecl *D = ClassExt->getClassInterface();
+ if (!D)
+ return;
+
+ assert(!WritingAST && "Already writing the AST!");
+ if (!D->isFromASTFile())
+ return; // Declaration not imported from PCH.
+
+ RewriteDecl(D);
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
new file mode 100644
index 0000000..7a4ef63
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -0,0 +1,1725 @@
+//===--- ASTWriterDecl.cpp - Declaration Serialization --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements serialization for Declarations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/ASTReader.h"
+#include "ASTCommon.h"
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclContextInternals.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/ErrorHandling.h"
+using namespace clang;
+using namespace serialization;
+
+//===----------------------------------------------------------------------===//
+// Declaration serialization
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ class ASTDeclWriter : public DeclVisitor<ASTDeclWriter, void> {
+
+ ASTWriter &Writer;
+ ASTContext &Context;
+ typedef ASTWriter::RecordData RecordData;
+ RecordData &Record;
+
+ public:
+ serialization::DeclCode Code;
+ unsigned AbbrevToUse;
+
+ ASTDeclWriter(ASTWriter &Writer, ASTContext &Context, RecordData &Record)
+ : Writer(Writer), Context(Context), Record(Record) {
+ }
+
+ void Visit(Decl *D);
+
+ void VisitDecl(Decl *D);
+ void VisitTranslationUnitDecl(TranslationUnitDecl *D);
+ void VisitNamedDecl(NamedDecl *D);
+ void VisitLabelDecl(LabelDecl *LD);
+ void VisitNamespaceDecl(NamespaceDecl *D);
+ void VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ void VisitNamespaceAliasDecl(NamespaceAliasDecl *D);
+ void VisitTypeDecl(TypeDecl *D);
+ void VisitTypedefNameDecl(TypedefNameDecl *D);
+ void VisitTypedefDecl(TypedefDecl *D);
+ void VisitTypeAliasDecl(TypeAliasDecl *D);
+ void VisitUnresolvedUsingTypenameDecl(UnresolvedUsingTypenameDecl *D);
+ void VisitTagDecl(TagDecl *D);
+ void VisitEnumDecl(EnumDecl *D);
+ void VisitRecordDecl(RecordDecl *D);
+ void VisitCXXRecordDecl(CXXRecordDecl *D);
+ void VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D);
+ void VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D);
+ void VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D);
+ void VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D);
+ void VisitValueDecl(ValueDecl *D);
+ void VisitEnumConstantDecl(EnumConstantDecl *D);
+ void VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
+ void VisitDeclaratorDecl(DeclaratorDecl *D);
+ void VisitFunctionDecl(FunctionDecl *D);
+ void VisitCXXMethodDecl(CXXMethodDecl *D);
+ void VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ void VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ void VisitCXXConversionDecl(CXXConversionDecl *D);
+ void VisitFieldDecl(FieldDecl *D);
+ void VisitIndirectFieldDecl(IndirectFieldDecl *D);
+ void VisitVarDecl(VarDecl *D);
+ void VisitImplicitParamDecl(ImplicitParamDecl *D);
+ void VisitParmVarDecl(ParmVarDecl *D);
+ void VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D);
+ void VisitTemplateDecl(TemplateDecl *D);
+ void VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D);
+ void VisitClassTemplateDecl(ClassTemplateDecl *D);
+ void VisitFunctionTemplateDecl(FunctionTemplateDecl *D);
+ void VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D);
+ void VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D);
+ void VisitUsingDecl(UsingDecl *D);
+ void VisitUsingShadowDecl(UsingShadowDecl *D);
+ void VisitLinkageSpecDecl(LinkageSpecDecl *D);
+ void VisitFileScopeAsmDecl(FileScopeAsmDecl *D);
+ void VisitImportDecl(ImportDecl *D);
+ void VisitAccessSpecDecl(AccessSpecDecl *D);
+ void VisitFriendDecl(FriendDecl *D);
+ void VisitFriendTemplateDecl(FriendTemplateDecl *D);
+ void VisitStaticAssertDecl(StaticAssertDecl *D);
+ void VisitBlockDecl(BlockDecl *D);
+
+ void VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset,
+ uint64_t VisibleOffset);
+ template <typename T> void VisitRedeclarable(Redeclarable<T> *D);
+
+
+ // FIXME: Put in the same order is DeclNodes.td?
+ void VisitObjCMethodDecl(ObjCMethodDecl *D);
+ void VisitObjCContainerDecl(ObjCContainerDecl *D);
+ void VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ void VisitObjCIvarDecl(ObjCIvarDecl *D);
+ void VisitObjCProtocolDecl(ObjCProtocolDecl *D);
+ void VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D);
+ void VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ void VisitObjCImplDecl(ObjCImplDecl *D);
+ void VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D);
+ void VisitObjCImplementationDecl(ObjCImplementationDecl *D);
+ void VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D);
+ void VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ void VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D);
+ };
+}
+
+void ASTDeclWriter::Visit(Decl *D) {
+ DeclVisitor<ASTDeclWriter>::Visit(D);
+
+ // Source locations require array (variable-length) abbreviations. The
+ // abbreviation infrastructure requires that arrays are encoded last, so
+ // we handle it here in the case of those classes derived from DeclaratorDecl
+ if (DeclaratorDecl *DD = dyn_cast<DeclaratorDecl>(D)){
+ Writer.AddTypeSourceInfo(DD->getTypeSourceInfo(), Record);
+ }
+
+ // Handle FunctionDecl's body here and write it after all other Stmts/Exprs
+ // have been written. We want it last because we will not read it back when
+ // retrieving it from the AST, we'll just lazily set the offset.
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ Record.push_back(FD->doesThisDeclarationHaveABody());
+ if (FD->doesThisDeclarationHaveABody())
+ Writer.AddStmt(FD->getBody());
+ }
+}
+
+void ASTDeclWriter::VisitDecl(Decl *D) {
+ Writer.AddDeclRef(cast_or_null<Decl>(D->getDeclContext()), Record);
+ Writer.AddDeclRef(cast_or_null<Decl>(D->getLexicalDeclContext()), Record);
+ Record.push_back(D->isInvalidDecl());
+ Record.push_back(D->hasAttrs());
+ if (D->hasAttrs())
+ Writer.WriteAttributes(D->getAttrs(), Record);
+ Record.push_back(D->isImplicit());
+ Record.push_back(D->isUsed(false));
+ Record.push_back(D->isReferenced());
+ Record.push_back(D->isTopLevelDeclInObjCContainer());
+ Record.push_back(D->getAccess());
+ Record.push_back(D->isModulePrivate());
+ Record.push_back(Writer.inferSubmoduleIDFromLocation(D->getLocation()));
+}
+
+void ASTDeclWriter::VisitTranslationUnitDecl(TranslationUnitDecl *D) {
+ llvm_unreachable("Translation units aren't directly serialized");
+}
+
+void ASTDeclWriter::VisitNamedDecl(NamedDecl *D) {
+ VisitDecl(D);
+ Writer.AddDeclarationName(D->getDeclName(), Record);
+}
+
+void ASTDeclWriter::VisitTypeDecl(TypeDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
+}
+
+void ASTDeclWriter::VisitTypedefNameDecl(TypedefNameDecl *D) {
+ VisitRedeclarable(D);
+ VisitTypeDecl(D);
+ Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
+}
+
+void ASTDeclWriter::VisitTypedefDecl(TypedefDecl *D) {
+ VisitTypedefNameDecl(D);
+ if (!D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ D->getFirstDeclaration() == D->getMostRecentDecl() &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier)
+ AbbrevToUse = Writer.getDeclTypedefAbbrev();
+
+ Code = serialization::DECL_TYPEDEF;
+}
+
+void ASTDeclWriter::VisitTypeAliasDecl(TypeAliasDecl *D) {
+ VisitTypedefNameDecl(D);
+ Code = serialization::DECL_TYPEALIAS;
+}
+
+void ASTDeclWriter::VisitTagDecl(TagDecl *D) {
+ VisitRedeclarable(D);
+ VisitTypeDecl(D);
+ Record.push_back(D->getIdentifierNamespace());
+ Record.push_back((unsigned)D->getTagKind()); // FIXME: stable encoding
+ Record.push_back(D->isCompleteDefinition());
+ Record.push_back(D->isEmbeddedInDeclarator());
+ Record.push_back(D->isFreeStanding());
+ Writer.AddSourceLocation(D->getRBraceLoc(), Record);
+ Record.push_back(D->hasExtInfo());
+ if (D->hasExtInfo())
+ Writer.AddQualifierInfo(*D->getExtInfo(), Record);
+ else
+ Writer.AddDeclRef(D->getTypedefNameForAnonDecl(), Record);
+}
+
+void ASTDeclWriter::VisitEnumDecl(EnumDecl *D) {
+ VisitTagDecl(D);
+ Writer.AddTypeSourceInfo(D->getIntegerTypeSourceInfo(), Record);
+ if (!D->getIntegerTypeSourceInfo())
+ Writer.AddTypeRef(D->getIntegerType(), Record);
+ Writer.AddTypeRef(D->getPromotionType(), Record);
+ Record.push_back(D->getNumPositiveBits());
+ Record.push_back(D->getNumNegativeBits());
+ Record.push_back(D->isScoped());
+ Record.push_back(D->isScopedUsingClassTag());
+ Record.push_back(D->isFixed());
+ if (MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo()) {
+ Writer.AddDeclRef(MemberInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MemberInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MemberInfo->getPointOfInstantiation(), Record);
+ } else {
+ Writer.AddDeclRef(0, Record);
+ }
+
+ if (!D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->hasExtInfo() &&
+ D->getFirstDeclaration() == D->getMostRecentDecl() &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ !CXXRecordDecl::classofKind(D->getKind()) &&
+ !D->getIntegerTypeSourceInfo() &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier)
+ AbbrevToUse = Writer.getDeclEnumAbbrev();
+
+ Code = serialization::DECL_ENUM;
+}
+
+void ASTDeclWriter::VisitRecordDecl(RecordDecl *D) {
+ VisitTagDecl(D);
+ Record.push_back(D->hasFlexibleArrayMember());
+ Record.push_back(D->isAnonymousStructOrUnion());
+ Record.push_back(D->hasObjectMember());
+
+ if (!D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->hasExtInfo() &&
+ D->getFirstDeclaration() == D->getMostRecentDecl() &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ !CXXRecordDecl::classofKind(D->getKind()) &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier)
+ AbbrevToUse = Writer.getDeclRecordAbbrev();
+
+ Code = serialization::DECL_RECORD;
+}
+
+void ASTDeclWriter::VisitValueDecl(ValueDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddTypeRef(D->getType(), Record);
+}
+
+void ASTDeclWriter::VisitEnumConstantDecl(EnumConstantDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->getInitExpr()? 1 : 0);
+ if (D->getInitExpr())
+ Writer.AddStmt(D->getInitExpr());
+ Writer.AddAPSInt(D->getInitVal(), Record);
+
+ Code = serialization::DECL_ENUM_CONSTANT;
+}
+
+void ASTDeclWriter::VisitDeclaratorDecl(DeclaratorDecl *D) {
+ VisitValueDecl(D);
+ Writer.AddSourceLocation(D->getInnerLocStart(), Record);
+ Record.push_back(D->hasExtInfo());
+ if (D->hasExtInfo())
+ Writer.AddQualifierInfo(*D->getExtInfo(), Record);
+}
+
+void ASTDeclWriter::VisitFunctionDecl(FunctionDecl *D) {
+ VisitRedeclarable(D);
+ VisitDeclaratorDecl(D);
+
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
+ Record.push_back(D->getIdentifierNamespace());
+
+ // FunctionDecl's body is handled last at ASTWriterDecl::Visit,
+ // after everything else is written.
+
+ Record.push_back(D->getStorageClass()); // FIXME: stable encoding
+ Record.push_back(D->getStorageClassAsWritten());
+ Record.push_back(D->IsInline);
+ Record.push_back(D->isInlineSpecified());
+ Record.push_back(D->isVirtualAsWritten());
+ Record.push_back(D->isPure());
+ Record.push_back(D->hasInheritedPrototype());
+ Record.push_back(D->hasWrittenPrototype());
+ Record.push_back(D->isDeletedAsWritten());
+ Record.push_back(D->isTrivial());
+ Record.push_back(D->isDefaulted());
+ Record.push_back(D->isExplicitlyDefaulted());
+ Record.push_back(D->hasImplicitReturnZero());
+ Record.push_back(D->isConstexpr());
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+
+ Record.push_back(D->getTemplatedKind());
+ switch (D->getTemplatedKind()) {
+ case FunctionDecl::TK_NonTemplate:
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ Writer.AddDeclRef(D->getDescribedFunctionTemplate(), Record);
+ break;
+ case FunctionDecl::TK_MemberSpecialization: {
+ MemberSpecializationInfo *MemberInfo = D->getMemberSpecializationInfo();
+ Writer.AddDeclRef(MemberInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MemberInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MemberInfo->getPointOfInstantiation(), Record);
+ break;
+ }
+ case FunctionDecl::TK_FunctionTemplateSpecialization: {
+ FunctionTemplateSpecializationInfo *
+ FTSInfo = D->getTemplateSpecializationInfo();
+ Writer.AddDeclRef(FTSInfo->getTemplate(), Record);
+ Record.push_back(FTSInfo->getTemplateSpecializationKind());
+
+ // Template arguments.
+ Writer.AddTemplateArgumentList(FTSInfo->TemplateArguments, Record);
+
+ // Template args as written.
+ Record.push_back(FTSInfo->TemplateArgumentsAsWritten != 0);
+ if (FTSInfo->TemplateArgumentsAsWritten) {
+ Record.push_back(FTSInfo->TemplateArgumentsAsWritten->NumTemplateArgs);
+ for (int i=0, e = FTSInfo->TemplateArgumentsAsWritten->NumTemplateArgs;
+ i!=e; ++i)
+ Writer.AddTemplateArgumentLoc((*FTSInfo->TemplateArgumentsAsWritten)[i],
+ Record);
+ Writer.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->LAngleLoc,
+ Record);
+ Writer.AddSourceLocation(FTSInfo->TemplateArgumentsAsWritten->RAngleLoc,
+ Record);
+ }
+
+ Writer.AddSourceLocation(FTSInfo->getPointOfInstantiation(), Record);
+
+ if (D->isCanonicalDecl()) {
+ // Write the template that contains the specializations set. We will
+ // add a FunctionTemplateSpecializationInfo to it when reading.
+ Writer.AddDeclRef(FTSInfo->getTemplate()->getCanonicalDecl(), Record);
+ }
+ break;
+ }
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization: {
+ DependentFunctionTemplateSpecializationInfo *
+ DFTSInfo = D->getDependentSpecializationInfo();
+
+ // Templates.
+ Record.push_back(DFTSInfo->getNumTemplates());
+ for (int i=0, e = DFTSInfo->getNumTemplates(); i != e; ++i)
+ Writer.AddDeclRef(DFTSInfo->getTemplate(i), Record);
+
+ // Templates args.
+ Record.push_back(DFTSInfo->getNumTemplateArgs());
+ for (int i=0, e = DFTSInfo->getNumTemplateArgs(); i != e; ++i)
+ Writer.AddTemplateArgumentLoc(DFTSInfo->getTemplateArg(i), Record);
+ Writer.AddSourceLocation(DFTSInfo->getLAngleLoc(), Record);
+ Writer.AddSourceLocation(DFTSInfo->getRAngleLoc(), Record);
+ break;
+ }
+ }
+
+ Record.push_back(D->param_size());
+ for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Code = serialization::DECL_FUNCTION;
+}
+
+void ASTDeclWriter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ VisitNamedDecl(D);
+ // FIXME: convert to LazyStmtPtr?
+ // Unlike C/C++, method bodies will never be in header files.
+ bool HasBodyStuff = D->getBody() != 0 ||
+ D->getSelfDecl() != 0 || D->getCmdDecl() != 0;
+ Record.push_back(HasBodyStuff);
+ if (HasBodyStuff) {
+ Writer.AddStmt(D->getBody());
+ Writer.AddDeclRef(D->getSelfDecl(), Record);
+ Writer.AddDeclRef(D->getCmdDecl(), Record);
+ }
+ Record.push_back(D->isInstanceMethod());
+ Record.push_back(D->isVariadic());
+ Record.push_back(D->isSynthesized());
+ Record.push_back(D->isDefined());
+
+ Record.push_back(D->IsRedeclaration);
+ Record.push_back(D->HasRedeclaration);
+ if (D->HasRedeclaration) {
+ assert(Context.getObjCMethodRedeclaration(D));
+ Writer.AddDeclRef(Context.getObjCMethodRedeclaration(D), Record);
+ }
+
+ // FIXME: stable encoding for @required/@optional
+ Record.push_back(D->getImplementationControl());
+ // FIXME: stable encoding for in/out/inout/bycopy/byref/oneway
+ Record.push_back(D->getObjCDeclQualifier());
+ Record.push_back(D->hasRelatedResultType());
+ Writer.AddTypeRef(D->getResultType(), Record);
+ Writer.AddTypeSourceInfo(D->getResultTypeSourceInfo(), Record);
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Record.push_back(D->param_size());
+ for (ObjCMethodDecl::param_iterator P = D->param_begin(),
+ PEnd = D->param_end(); P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+
+ Record.push_back(D->SelLocsKind);
+ unsigned NumStoredSelLocs = D->getNumStoredSelLocs();
+ SourceLocation *SelLocs = D->getStoredSelLocs();
+ Record.push_back(NumStoredSelLocs);
+ for (unsigned i = 0; i != NumStoredSelLocs; ++i)
+ Writer.AddSourceLocation(SelLocs[i], Record);
+
+ Code = serialization::DECL_OBJC_METHOD;
+}
+
+void ASTDeclWriter::VisitObjCContainerDecl(ObjCContainerDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getAtStartLoc(), Record);
+ Writer.AddSourceRange(D->getAtEndRange(), Record);
+ // Abstract class (no need to define a stable serialization::DECL code).
+}
+
+void ASTDeclWriter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
+ VisitRedeclarable(D);
+ VisitObjCContainerDecl(D);
+ Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
+
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition()) {
+ // Write the DefinitionData
+ ObjCInterfaceDecl::DefinitionData &Data = D->data();
+
+ Writer.AddDeclRef(D->getSuperClass(), Record);
+ Writer.AddSourceLocation(D->getSuperClassLoc(), Record);
+ Writer.AddSourceLocation(D->getEndOfDefinitionLoc(), Record);
+
+ // Write out the protocols that are directly referenced by the @interface.
+ Record.push_back(Data.ReferencedProtocols.size());
+ for (ObjCInterfaceDecl::protocol_iterator P = D->protocol_begin(),
+ PEnd = D->protocol_end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ for (ObjCInterfaceDecl::protocol_loc_iterator PL = D->protocol_loc_begin(),
+ PLEnd = D->protocol_loc_end();
+ PL != PLEnd; ++PL)
+ Writer.AddSourceLocation(*PL, Record);
+
+ // Write out the protocols that are transitively referenced.
+ Record.push_back(Data.AllReferencedProtocols.size());
+ for (ObjCList<ObjCProtocolDecl>::iterator
+ P = Data.AllReferencedProtocols.begin(),
+ PEnd = Data.AllReferencedProtocols.end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+
+ if (ObjCCategoryDecl *Cat = D->getCategoryList()) {
+ // Ensure that we write out the set of categories for this class.
+ Writer.ObjCClassesWithCategories.insert(D);
+
+ // Make sure that the categories get serialized.
+ for (; Cat; Cat = Cat->getNextClassCategory())
+ (void)Writer.GetDeclRef(Cat);
+ }
+ }
+
+ Code = serialization::DECL_OBJC_INTERFACE;
+}
+
+void ASTDeclWriter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ VisitFieldDecl(D);
+ // FIXME: stable encoding for @public/@private/@protected/@package
+ Record.push_back(D->getAccessControl());
+ Record.push_back(D->getSynthesize());
+
+ if (!D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isModulePrivate() &&
+ !D->getBitWidth() &&
+ !D->hasExtInfo() &&
+ D->getDeclName())
+ AbbrevToUse = Writer.getDeclObjCIvarAbbrev();
+
+ Code = serialization::DECL_OBJC_IVAR;
+}
+
+void ASTDeclWriter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ VisitRedeclarable(D);
+ VisitObjCContainerDecl(D);
+
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition()) {
+ Record.push_back(D->protocol_size());
+ for (ObjCProtocolDecl::protocol_iterator
+ I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
+ Writer.AddDeclRef(*I, Record);
+ for (ObjCProtocolDecl::protocol_loc_iterator PL = D->protocol_loc_begin(),
+ PLEnd = D->protocol_loc_end();
+ PL != PLEnd; ++PL)
+ Writer.AddSourceLocation(*PL, Record);
+ }
+
+ Code = serialization::DECL_OBJC_PROTOCOL;
+}
+
+void ASTDeclWriter::VisitObjCAtDefsFieldDecl(ObjCAtDefsFieldDecl *D) {
+ VisitFieldDecl(D);
+ Code = serialization::DECL_OBJC_AT_DEFS_FIELD;
+}
+
+void ASTDeclWriter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ VisitObjCContainerDecl(D);
+ Writer.AddSourceLocation(D->getCategoryNameLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarLBraceLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ Record.push_back(D->protocol_size());
+ for (ObjCCategoryDecl::protocol_iterator
+ I = D->protocol_begin(), IEnd = D->protocol_end(); I != IEnd; ++I)
+ Writer.AddDeclRef(*I, Record);
+ for (ObjCCategoryDecl::protocol_loc_iterator
+ PL = D->protocol_loc_begin(), PLEnd = D->protocol_loc_end();
+ PL != PLEnd; ++PL)
+ Writer.AddSourceLocation(*PL, Record);
+ Record.push_back(D->hasSynthBitfield());
+ Code = serialization::DECL_OBJC_CATEGORY;
+}
+
+void ASTDeclWriter::VisitObjCCompatibleAliasDecl(ObjCCompatibleAliasDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ Code = serialization::DECL_OBJC_COMPATIBLE_ALIAS;
+}
+
+void ASTDeclWriter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getAtLoc(), Record);
+ Writer.AddSourceLocation(D->getLParenLoc(), Record);
+ Writer.AddTypeSourceInfo(D->getTypeSourceInfo(), Record);
+ // FIXME: stable encoding
+ Record.push_back((unsigned)D->getPropertyAttributes());
+ Record.push_back((unsigned)D->getPropertyAttributesAsWritten());
+ // FIXME: stable encoding
+ Record.push_back((unsigned)D->getPropertyImplementation());
+ Writer.AddDeclarationName(D->getGetterName(), Record);
+ Writer.AddDeclarationName(D->getSetterName(), Record);
+ Writer.AddDeclRef(D->getGetterMethodDecl(), Record);
+ Writer.AddDeclRef(D->getSetterMethodDecl(), Record);
+ Writer.AddDeclRef(D->getPropertyIvarDecl(), Record);
+ Code = serialization::DECL_OBJC_PROPERTY;
+}
+
+void ASTDeclWriter::VisitObjCImplDecl(ObjCImplDecl *D) {
+ VisitObjCContainerDecl(D);
+ Writer.AddDeclRef(D->getClassInterface(), Record);
+ // Abstract class (no need to define a stable serialization::DECL code).
+}
+
+void ASTDeclWriter::VisitObjCCategoryImplDecl(ObjCCategoryImplDecl *D) {
+ VisitObjCImplDecl(D);
+ Writer.AddIdentifierRef(D->getIdentifier(), Record);
+ Writer.AddSourceLocation(D->getCategoryNameLoc(), Record);
+ Code = serialization::DECL_OBJC_CATEGORY_IMPL;
+}
+
+void ASTDeclWriter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
+ VisitObjCImplDecl(D);
+ Writer.AddDeclRef(D->getSuperClass(), Record);
+ Writer.AddSourceLocation(D->getIvarLBraceLoc(), Record);
+ Writer.AddSourceLocation(D->getIvarRBraceLoc(), Record);
+ Writer.AddCXXCtorInitializers(D->IvarInitializers, D->NumIvarInitializers,
+ Record);
+ Record.push_back(D->hasSynthBitfield());
+ Code = serialization::DECL_OBJC_IMPLEMENTATION;
+}
+
+void ASTDeclWriter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
+ VisitDecl(D);
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Writer.AddDeclRef(D->getPropertyDecl(), Record);
+ Writer.AddDeclRef(D->getPropertyIvarDecl(), Record);
+ Writer.AddSourceLocation(D->getPropertyIvarDeclLoc(), Record);
+ Writer.AddStmt(D->getGetterCXXConstructor());
+ Writer.AddStmt(D->getSetterCXXAssignment());
+ Code = serialization::DECL_OBJC_PROPERTY_IMPL;
+}
+
+void ASTDeclWriter::VisitFieldDecl(FieldDecl *D) {
+ VisitDeclaratorDecl(D);
+ Record.push_back(D->isMutable());
+ Record.push_back(D->getBitWidth()? 1 : D->hasInClassInitializer() ? 2 : 0);
+ if (D->getBitWidth())
+ Writer.AddStmt(D->getBitWidth());
+ else if (D->hasInClassInitializer())
+ Writer.AddStmt(D->getInClassInitializer());
+ if (!D->getDeclName())
+ Writer.AddDeclRef(Context.getInstantiatedFromUnnamedFieldDecl(D), Record);
+
+ if (!D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ !D->isModulePrivate() &&
+ !D->getBitWidth() &&
+ !D->hasInClassInitializer() &&
+ !D->hasExtInfo() &&
+ !ObjCIvarDecl::classofKind(D->getKind()) &&
+ !ObjCAtDefsFieldDecl::classofKind(D->getKind()) &&
+ D->getDeclName())
+ AbbrevToUse = Writer.getDeclFieldAbbrev();
+
+ Code = serialization::DECL_FIELD;
+}
+
+void ASTDeclWriter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
+ VisitValueDecl(D);
+ Record.push_back(D->getChainingSize());
+
+ for (IndirectFieldDecl::chain_iterator
+ P = D->chain_begin(),
+ PEnd = D->chain_end(); P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Code = serialization::DECL_INDIRECTFIELD;
+}
+
+void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
+ VisitRedeclarable(D);
+ VisitDeclaratorDecl(D);
+ Record.push_back(D->getStorageClass()); // FIXME: stable encoding
+ Record.push_back(D->getStorageClassAsWritten());
+ Record.push_back(D->isThreadSpecified());
+ Record.push_back(D->getInitStyle());
+ Record.push_back(D->isExceptionVariable());
+ Record.push_back(D->isNRVOVariable());
+ Record.push_back(D->isCXXForRangeDecl());
+ Record.push_back(D->isARCPseudoStrong());
+ if (D->getInit()) {
+ Record.push_back(!D->isInitKnownICE() ? 1 : (D->isInitICE() ? 3 : 2));
+ Writer.AddStmt(D->getInit());
+ } else {
+ Record.push_back(0);
+ }
+
+ MemberSpecializationInfo *SpecInfo
+ = D->isStaticDataMember() ? D->getMemberSpecializationInfo() : 0;
+ Record.push_back(SpecInfo != 0);
+ if (SpecInfo) {
+ Writer.AddDeclRef(SpecInfo->getInstantiatedFrom(), Record);
+ Record.push_back(SpecInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(SpecInfo->getPointOfInstantiation(), Record);
+ }
+
+ if (!D->hasAttrs() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ !D->isTopLevelDeclInObjCContainer() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ D->getDeclName().getNameKind() == DeclarationName::Identifier &&
+ !D->hasExtInfo() &&
+ D->getFirstDeclaration() == D->getMostRecentDecl() &&
+ D->getInitStyle() == VarDecl::CInit &&
+ D->getInit() == 0 &&
+ !isa<ParmVarDecl>(D) &&
+ !SpecInfo)
+ AbbrevToUse = Writer.getDeclVarAbbrev();
+
+ Code = serialization::DECL_VAR;
+}
+
+void ASTDeclWriter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ VisitVarDecl(D);
+ Code = serialization::DECL_IMPLICIT_PARAM;
+}
+
+void ASTDeclWriter::VisitParmVarDecl(ParmVarDecl *D) {
+ VisitVarDecl(D);
+ Record.push_back(D->isObjCMethodParameter());
+ Record.push_back(D->getFunctionScopeDepth());
+ Record.push_back(D->getFunctionScopeIndex());
+ Record.push_back(D->getObjCDeclQualifier()); // FIXME: stable encoding
+ Record.push_back(D->isKNRPromoted());
+ Record.push_back(D->hasInheritedDefaultArg());
+ Record.push_back(D->hasUninstantiatedDefaultArg());
+ if (D->hasUninstantiatedDefaultArg())
+ Writer.AddStmt(D->getUninstantiatedDefaultArg());
+ Code = serialization::DECL_PARM_VAR;
+
+ assert(!D->isARCPseudoStrong()); // can be true of ImplicitParamDecl
+
+ // If the assumptions about the DECL_PARM_VAR abbrev are true, use it. Here
+ // we dynamically check for the properties that we optimize for, but don't
+ // know are true of all PARM_VAR_DECLs.
+ if (!D->hasAttrs() &&
+ !D->hasExtInfo() &&
+ !D->isImplicit() &&
+ !D->isUsed(false) &&
+ !D->isInvalidDecl() &&
+ !D->isReferenced() &&
+ D->getAccess() == AS_none &&
+ !D->isModulePrivate() &&
+ D->getStorageClass() == 0 &&
+ D->getInitStyle() == VarDecl::CInit && // Can params have anything else?
+ D->getFunctionScopeDepth() == 0 &&
+ D->getObjCDeclQualifier() == 0 &&
+ !D->isKNRPromoted() &&
+ !D->hasInheritedDefaultArg() &&
+ D->getInit() == 0 &&
+ !D->hasUninstantiatedDefaultArg()) // No default expr.
+ AbbrevToUse = Writer.getDeclParmVarAbbrev();
+
+ // Check things we know are true of *every* PARM_VAR_DECL, which is more than
+ // just us assuming it.
+ assert(!D->isThreadSpecified() && "PARM_VAR_DECL can't be __thread");
+ assert(D->getAccess() == AS_none && "PARM_VAR_DECL can't be public/private");
+ assert(!D->isExceptionVariable() && "PARM_VAR_DECL can't be exception var");
+ assert(D->getPreviousDecl() == 0 && "PARM_VAR_DECL can't be redecl");
+ assert(!D->isStaticDataMember() &&
+ "PARM_VAR_DECL can't be static data member");
+}
+
+void ASTDeclWriter::VisitFileScopeAsmDecl(FileScopeAsmDecl *D) {
+ VisitDecl(D);
+ Writer.AddStmt(D->getAsmString());
+ Writer.AddSourceLocation(D->getRParenLoc(), Record);
+ Code = serialization::DECL_FILE_SCOPE_ASM;
+}
+
+void ASTDeclWriter::VisitBlockDecl(BlockDecl *D) {
+ VisitDecl(D);
+ Writer.AddStmt(D->getBody());
+ Writer.AddTypeSourceInfo(D->getSignatureAsWritten(), Record);
+ Record.push_back(D->param_size());
+ for (FunctionDecl::param_iterator P = D->param_begin(), PEnd = D->param_end();
+ P != PEnd; ++P)
+ Writer.AddDeclRef(*P, Record);
+ Record.push_back(D->capturesCXXThis());
+ Record.push_back(D->getNumCaptures());
+ for (BlockDecl::capture_iterator
+ i = D->capture_begin(), e = D->capture_end(); i != e; ++i) {
+ const BlockDecl::Capture &capture = *i;
+ Writer.AddDeclRef(capture.getVariable(), Record);
+
+ unsigned flags = 0;
+ if (capture.isByRef()) flags |= 1;
+ if (capture.isNested()) flags |= 2;
+ if (capture.hasCopyExpr()) flags |= 4;
+ Record.push_back(flags);
+
+ if (capture.hasCopyExpr()) Writer.AddStmt(capture.getCopyExpr());
+ }
+
+ Code = serialization::DECL_BLOCK;
+}
+
+void ASTDeclWriter::VisitLinkageSpecDecl(LinkageSpecDecl *D) {
+ VisitDecl(D);
+ Record.push_back(D->getLanguage());
+ Writer.AddSourceLocation(D->getExternLoc(), Record);
+ Writer.AddSourceLocation(D->getRBraceLoc(), Record);
+ Code = serialization::DECL_LINKAGE_SPEC;
+}
+
+void ASTDeclWriter::VisitLabelDecl(LabelDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Code = serialization::DECL_LABEL;
+}
+
+
+void ASTDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
+ VisitRedeclarable(D);
+ VisitNamedDecl(D);
+ Record.push_back(D->isInline());
+ Writer.AddSourceLocation(D->getLocStart(), Record);
+ Writer.AddSourceLocation(D->getRBraceLoc(), Record);
+
+ if (D->isOriginalNamespace())
+ Writer.AddDeclRef(D->getAnonymousNamespace(), Record);
+ Code = serialization::DECL_NAMESPACE;
+
+ if (Writer.hasChain() && !D->isOriginalNamespace() &&
+ D->getOriginalNamespace()->isFromASTFile()) {
+ NamespaceDecl *NS = D->getOriginalNamespace();
+ Writer.AddUpdatedDeclContext(NS);
+
+ // Make sure all visible decls are written. They will be recorded later.
+ if (StoredDeclsMap *Map = NS->buildLookup()) {
+ for (StoredDeclsMap::iterator D = Map->begin(), DEnd = Map->end();
+ D != DEnd; ++D) {
+ DeclContext::lookup_result Result = D->second.getLookupResult();
+ while (Result.first != Result.second) {
+ Writer.GetDeclRef(*Result.first);
+ ++Result.first;
+ }
+ }
+ }
+ }
+
+ if (Writer.hasChain() && D->isAnonymousNamespace() &&
+ D == D->getMostRecentDecl()) {
+ // This is a most recent reopening of the anonymous namespace. If its parent
+ // is in a previous PCH (or is the TU), mark that parent for update, because
+ // the original namespace always points to the latest re-opening of its
+ // anonymous namespace.
+ Decl *Parent = cast<Decl>(
+ D->getParent()->getRedeclContext()->getPrimaryContext());
+ if (Parent->isFromASTFile() || isa<TranslationUnitDecl>(Parent)) {
+ ASTWriter::UpdateRecord &Record = Writer.DeclUpdates[Parent];
+ Record.push_back(UPD_CXX_ADDED_ANONYMOUS_NAMESPACE);
+ Writer.AddDeclRef(D, Record);
+ }
+ }
+}
+
+void ASTDeclWriter::VisitNamespaceAliasDecl(NamespaceAliasDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getNamespaceLoc(), Record);
+ Writer.AddSourceLocation(D->getTargetNameLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Writer.AddDeclRef(D->getNamespace(), Record);
+ Code = serialization::DECL_NAMESPACE_ALIAS;
+}
+
+void ASTDeclWriter::VisitUsingDecl(UsingDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getUsingLocation(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
+ Writer.AddDeclRef(D->FirstUsingShadow.getPointer(), Record);
+ Record.push_back(D->isTypeName());
+ Writer.AddDeclRef(Context.getInstantiatedFromUsingDecl(D), Record);
+ Code = serialization::DECL_USING;
+}
+
+void ASTDeclWriter::VisitUsingShadowDecl(UsingShadowDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddDeclRef(D->getTargetDecl(), Record);
+ Writer.AddDeclRef(D->UsingOrNextShadow, Record);
+ Writer.AddDeclRef(Context.getInstantiatedFromUsingShadowDecl(D), Record);
+ Code = serialization::DECL_USING_SHADOW;
+}
+
+void ASTDeclWriter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getUsingLoc(), Record);
+ Writer.AddSourceLocation(D->getNamespaceKeyLocation(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Writer.AddDeclRef(D->getNominatedNamespace(), Record);
+ Writer.AddDeclRef(dyn_cast<Decl>(D->getCommonAncestor()), Record);
+ Code = serialization::DECL_USING_DIRECTIVE;
+}
+
+void ASTDeclWriter::VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D) {
+ VisitValueDecl(D);
+ Writer.AddSourceLocation(D->getUsingLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameLoc(D->DNLoc, D->getDeclName(), Record);
+ Code = serialization::DECL_UNRESOLVED_USING_VALUE;
+}
+
+void ASTDeclWriter::VisitUnresolvedUsingTypenameDecl(
+ UnresolvedUsingTypenameDecl *D) {
+ VisitTypeDecl(D);
+ Writer.AddSourceLocation(D->getTypenameLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(D->getQualifierLoc(), Record);
+ Code = serialization::DECL_UNRESOLVED_USING_TYPENAME;
+}
+
+void ASTDeclWriter::VisitCXXRecordDecl(CXXRecordDecl *D) {
+ VisitRecordDecl(D);
+ Record.push_back(D->isThisDeclarationADefinition());
+ if (D->isThisDeclarationADefinition())
+ Writer.AddCXXDefinitionData(D, Record);
+
+ enum {
+ CXXRecNotTemplate = 0, CXXRecTemplate, CXXRecMemberSpecialization
+ };
+ if (ClassTemplateDecl *TemplD = D->getDescribedClassTemplate()) {
+ Record.push_back(CXXRecTemplate);
+ Writer.AddDeclRef(TemplD, Record);
+ } else if (MemberSpecializationInfo *MSInfo
+ = D->getMemberSpecializationInfo()) {
+ Record.push_back(CXXRecMemberSpecialization);
+ Writer.AddDeclRef(MSInfo->getInstantiatedFrom(), Record);
+ Record.push_back(MSInfo->getTemplateSpecializationKind());
+ Writer.AddSourceLocation(MSInfo->getPointOfInstantiation(), Record);
+ } else {
+ Record.push_back(CXXRecNotTemplate);
+ }
+
+ // Store the key function to avoid deserializing every method so we can
+ // compute it.
+ if (D->IsCompleteDefinition)
+ Writer.AddDeclRef(Context.getKeyFunction(D), Record);
+
+ Code = serialization::DECL_CXX_RECORD;
+}
+
+void ASTDeclWriter::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ VisitFunctionDecl(D);
+ Record.push_back(D->size_overridden_methods());
+ for (CXXMethodDecl::method_iterator
+ I = D->begin_overridden_methods(), E = D->end_overridden_methods();
+ I != E; ++I)
+ Writer.AddDeclRef(*I, Record);
+ Code = serialization::DECL_CXX_METHOD;
+}
+
+void ASTDeclWriter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ VisitCXXMethodDecl(D);
+
+ Record.push_back(D->IsExplicitSpecified);
+ Record.push_back(D->ImplicitlyDefined);
+ Writer.AddCXXCtorInitializers(D->CtorInitializers, D->NumCtorInitializers,
+ Record);
+
+ Code = serialization::DECL_CXX_CONSTRUCTOR;
+}
+
+void ASTDeclWriter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ VisitCXXMethodDecl(D);
+
+ Record.push_back(D->ImplicitlyDefined);
+ Writer.AddDeclRef(D->OperatorDelete, Record);
+
+ Code = serialization::DECL_CXX_DESTRUCTOR;
+}
+
+void ASTDeclWriter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ VisitCXXMethodDecl(D);
+ Record.push_back(D->IsExplicitSpecified);
+ Code = serialization::DECL_CXX_CONVERSION;
+}
+
+void ASTDeclWriter::VisitImportDecl(ImportDecl *D) {
+ VisitDecl(D);
+ ArrayRef<SourceLocation> IdentifierLocs = D->getIdentifierLocs();
+ Record.push_back(!IdentifierLocs.empty());
+ if (IdentifierLocs.empty()) {
+ Writer.AddSourceLocation(D->getLocEnd(), Record);
+ Record.push_back(1);
+ } else {
+ for (unsigned I = 0, N = IdentifierLocs.size(); I != N; ++I)
+ Writer.AddSourceLocation(IdentifierLocs[I], Record);
+ Record.push_back(IdentifierLocs.size());
+ }
+ // Note: the number of source locations must always be the last element in
+ // the record.
+ Code = serialization::DECL_IMPORT;
+}
+
+void ASTDeclWriter::VisitAccessSpecDecl(AccessSpecDecl *D) {
+ VisitDecl(D);
+ Writer.AddSourceLocation(D->getColonLoc(), Record);
+ Code = serialization::DECL_ACCESS_SPEC;
+}
+
+void ASTDeclWriter::VisitFriendDecl(FriendDecl *D) {
+ VisitDecl(D);
+ Record.push_back(D->Friend.is<TypeSourceInfo*>());
+ if (D->Friend.is<TypeSourceInfo*>())
+ Writer.AddTypeSourceInfo(D->Friend.get<TypeSourceInfo*>(), Record);
+ else
+ Writer.AddDeclRef(D->Friend.get<NamedDecl*>(), Record);
+ Writer.AddDeclRef(D->getNextFriend(), Record);
+ Record.push_back(D->UnsupportedFriend);
+ Writer.AddSourceLocation(D->FriendLoc, Record);
+ Code = serialization::DECL_FRIEND;
+}
+
+void ASTDeclWriter::VisitFriendTemplateDecl(FriendTemplateDecl *D) {
+ VisitDecl(D);
+ Record.push_back(D->getNumTemplateParameters());
+ for (unsigned i = 0, e = D->getNumTemplateParameters(); i != e; ++i)
+ Writer.AddTemplateParameterList(D->getTemplateParameterList(i), Record);
+ Record.push_back(D->getFriendDecl() != 0);
+ if (D->getFriendDecl())
+ Writer.AddDeclRef(D->getFriendDecl(), Record);
+ else
+ Writer.AddTypeSourceInfo(D->getFriendType(), Record);
+ Writer.AddSourceLocation(D->getFriendLoc(), Record);
+ Code = serialization::DECL_FRIEND_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitTemplateDecl(TemplateDecl *D) {
+ VisitNamedDecl(D);
+
+ Writer.AddDeclRef(D->getTemplatedDecl(), Record);
+ Writer.AddTemplateParameterList(D->getTemplateParameters(), Record);
+}
+
+void ASTDeclWriter::VisitRedeclarableTemplateDecl(RedeclarableTemplateDecl *D) {
+ VisitRedeclarable(D);
+
+ // Emit data to initialize CommonOrPrev before VisitTemplateDecl so that
+ // getCommonPtr() can be used while this is still initializing.
+ if (D->isFirstDeclaration()) {
+ // This declaration owns the 'common' pointer, so serialize that data now.
+ Writer.AddDeclRef(D->getInstantiatedFromMemberTemplate(), Record);
+ if (D->getInstantiatedFromMemberTemplate())
+ Record.push_back(D->isMemberSpecialization());
+ }
+
+ VisitTemplateDecl(D);
+ Record.push_back(D->getIdentifierNamespace());
+}
+
+void ASTDeclWriter::VisitClassTemplateDecl(ClassTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+
+ if (D->isFirstDeclaration()) {
+ typedef llvm::FoldingSet<ClassTemplateSpecializationDecl> CTSDSetTy;
+ CTSDSetTy &CTSDSet = D->getSpecializations();
+ Record.push_back(CTSDSet.size());
+ for (CTSDSetTy::iterator I=CTSDSet.begin(), E = CTSDSet.end(); I!=E; ++I) {
+ assert(I->isCanonicalDecl() && "Expected only canonical decls in set");
+ Writer.AddDeclRef(&*I, Record);
+ }
+
+ typedef llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> CTPSDSetTy;
+ CTPSDSetTy &CTPSDSet = D->getPartialSpecializations();
+ Record.push_back(CTPSDSet.size());
+ for (CTPSDSetTy::iterator I=CTPSDSet.begin(), E=CTPSDSet.end(); I!=E; ++I) {
+ assert(I->isCanonicalDecl() && "Expected only canonical decls in set");
+ Writer.AddDeclRef(&*I, Record);
+ }
+
+ // InjectedClassNameType is computed, no need to write it.
+ }
+ Code = serialization::DECL_CLASS_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitClassTemplateSpecializationDecl(
+ ClassTemplateSpecializationDecl *D) {
+ VisitCXXRecordDecl(D);
+
+ llvm::PointerUnion<ClassTemplateDecl *,
+ ClassTemplatePartialSpecializationDecl *> InstFrom
+ = D->getSpecializedTemplateOrPartial();
+ if (Decl *InstFromD = InstFrom.dyn_cast<ClassTemplateDecl *>()) {
+ Writer.AddDeclRef(InstFromD, Record);
+ } else {
+ Writer.AddDeclRef(InstFrom.get<ClassTemplatePartialSpecializationDecl *>(),
+ Record);
+ Writer.AddTemplateArgumentList(&D->getTemplateInstantiationArgs(), Record);
+ }
+
+ // Explicit info.
+ Writer.AddTypeSourceInfo(D->getTypeAsWritten(), Record);
+ if (D->getTypeAsWritten()) {
+ Writer.AddSourceLocation(D->getExternLoc(), Record);
+ Writer.AddSourceLocation(D->getTemplateKeywordLoc(), Record);
+ }
+
+ Writer.AddTemplateArgumentList(&D->getTemplateArgs(), Record);
+ Writer.AddSourceLocation(D->getPointOfInstantiation(), Record);
+ Record.push_back(D->getSpecializationKind());
+
+ if (D->isCanonicalDecl()) {
+ // When reading, we'll add it to the folding set of the following template.
+ Writer.AddDeclRef(D->getSpecializedTemplate()->getCanonicalDecl(), Record);
+ }
+
+ Code = serialization::DECL_CLASS_TEMPLATE_SPECIALIZATION;
+}
+
+void ASTDeclWriter::VisitClassTemplatePartialSpecializationDecl(
+ ClassTemplatePartialSpecializationDecl *D) {
+ VisitClassTemplateSpecializationDecl(D);
+
+ Writer.AddTemplateParameterList(D->getTemplateParameters(), Record);
+
+ Record.push_back(D->getNumTemplateArgsAsWritten());
+ for (int i = 0, e = D->getNumTemplateArgsAsWritten(); i != e; ++i)
+ Writer.AddTemplateArgumentLoc(D->getTemplateArgsAsWritten()[i], Record);
+
+ Record.push_back(D->getSequenceNumber());
+
+ // These are read/set from/to the first declaration.
+ if (D->getPreviousDecl() == 0) {
+ Writer.AddDeclRef(D->getInstantiatedFromMember(), Record);
+ Record.push_back(D->isMemberSpecialization());
+ }
+
+ Code = serialization::DECL_CLASS_TEMPLATE_PARTIAL_SPECIALIZATION;
+}
+
+void ASTDeclWriter::VisitClassScopeFunctionSpecializationDecl(
+ ClassScopeFunctionSpecializationDecl *D) {
+ VisitDecl(D);
+ Writer.AddDeclRef(D->getSpecialization(), Record);
+ Code = serialization::DECL_CLASS_SCOPE_FUNCTION_SPECIALIZATION;
+}
+
+
+void ASTDeclWriter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+
+ if (D->isFirstDeclaration()) {
+ // This FunctionTemplateDecl owns the CommonPtr; write it.
+
+ // Write the function specialization declarations.
+ Record.push_back(D->getSpecializations().size());
+ for (llvm::FoldingSet<FunctionTemplateSpecializationInfo>::iterator
+ I = D->getSpecializations().begin(),
+ E = D->getSpecializations().end() ; I != E; ++I) {
+ assert(I->Function->isCanonicalDecl() &&
+ "Expected only canonical decls in set");
+ Writer.AddDeclRef(I->Function, Record);
+ }
+ }
+ Code = serialization::DECL_FUNCTION_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
+ VisitTypeDecl(D);
+
+ Record.push_back(D->wasDeclaredWithTypename());
+ Record.push_back(D->defaultArgumentWasInherited());
+ Writer.AddTypeSourceInfo(D->getDefaultArgumentInfo(), Record);
+
+ Code = serialization::DECL_TEMPLATE_TYPE_PARM;
+}
+
+void ASTDeclWriter::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
+ // For an expanded parameter pack, record the number of expansion types here
+ // so that it's easier for
+ if (D->isExpandedParameterPack())
+ Record.push_back(D->getNumExpansionTypes());
+
+ VisitDeclaratorDecl(D);
+ // TemplateParmPosition.
+ Record.push_back(D->getDepth());
+ Record.push_back(D->getPosition());
+
+ if (D->isExpandedParameterPack()) {
+ for (unsigned I = 0, N = D->getNumExpansionTypes(); I != N; ++I) {
+ Writer.AddTypeRef(D->getExpansionType(I), Record);
+ Writer.AddTypeSourceInfo(D->getExpansionTypeSourceInfo(I), Record);
+ }
+
+ Code = serialization::DECL_EXPANDED_NON_TYPE_TEMPLATE_PARM_PACK;
+ } else {
+ // Rest of NonTypeTemplateParmDecl.
+ Record.push_back(D->isParameterPack());
+ Record.push_back(D->getDefaultArgument() != 0);
+ if (D->getDefaultArgument()) {
+ Writer.AddStmt(D->getDefaultArgument());
+ Record.push_back(D->defaultArgumentWasInherited());
+ }
+ Code = serialization::DECL_NON_TYPE_TEMPLATE_PARM;
+ }
+}
+
+void ASTDeclWriter::VisitTemplateTemplateParmDecl(TemplateTemplateParmDecl *D) {
+ VisitTemplateDecl(D);
+ // TemplateParmPosition.
+ Record.push_back(D->getDepth());
+ Record.push_back(D->getPosition());
+ // Rest of TemplateTemplateParmDecl.
+ Writer.AddTemplateArgumentLoc(D->getDefaultArgument(), Record);
+ Record.push_back(D->defaultArgumentWasInherited());
+ Record.push_back(D->isParameterPack());
+ Code = serialization::DECL_TEMPLATE_TEMPLATE_PARM;
+}
+
+void ASTDeclWriter::VisitTypeAliasTemplateDecl(TypeAliasTemplateDecl *D) {
+ VisitRedeclarableTemplateDecl(D);
+ Code = serialization::DECL_TYPE_ALIAS_TEMPLATE;
+}
+
+void ASTDeclWriter::VisitStaticAssertDecl(StaticAssertDecl *D) {
+ VisitDecl(D);
+ Writer.AddStmt(D->getAssertExpr());
+ Writer.AddStmt(D->getMessage());
+ Writer.AddSourceLocation(D->getRParenLoc(), Record);
+ Code = serialization::DECL_STATIC_ASSERT;
+}
+
+/// \brief Emit the DeclContext part of a declaration context decl.
+///
+/// \param LexicalOffset the offset at which the DECL_CONTEXT_LEXICAL
+/// block for this declaration context is stored. May be 0 to indicate
+/// that there are no declarations stored within this context.
+///
+/// \param VisibleOffset the offset at which the DECL_CONTEXT_VISIBLE
+/// block for this declaration context is stored. May be 0 to indicate
+/// that there are no declarations visible from this context. Note
+/// that this value will not be emitted for non-primary declaration
+/// contexts.
+void ASTDeclWriter::VisitDeclContext(DeclContext *DC, uint64_t LexicalOffset,
+ uint64_t VisibleOffset) {
+ Record.push_back(LexicalOffset);
+ Record.push_back(VisibleOffset);
+}
+
+template <typename T>
+void ASTDeclWriter::VisitRedeclarable(Redeclarable<T> *D) {
+ T *First = D->getFirstDeclaration();
+ if (First->getMostRecentDecl() != First) {
+ // There is more than one declaration of this entity, so we will need to
+ // write a redeclaration chain.
+ Writer.AddDeclRef(First, Record);
+ Writer.Redeclarations.insert(First);
+
+ // Make sure that we serialize both the previous and the most-recent
+ // declarations, which (transitively) ensures that all declarations in the
+ // chain get serialized.
+ (void)Writer.GetDeclRef(D->getPreviousDecl());
+ (void)Writer.GetDeclRef(First->getMostRecentDecl());
+ } else {
+ // We use the sentinel value 0 to indicate an only declaration.
+ Record.push_back(0);
+ }
+
+}
+
+//===----------------------------------------------------------------------===//
+// ASTWriter Implementation
+//===----------------------------------------------------------------------===//
+
+void ASTWriter::WriteDeclsBlockAbbrevs() {
+ using namespace llvm;
+
+ BitCodeAbbrev *Abv;
+
+ // Abbreviation for DECL_FIELD
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_FIELD));
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ // FieldDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isMutable
+ Abv->Add(BitCodeAbbrevOp(0)); //getBitWidth
+ // Type Source Info
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclFieldAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_OBJC_IVAR
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_OBJC_IVAR));
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ // FieldDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isMutable
+ Abv->Add(BitCodeAbbrevOp(0)); //getBitWidth
+ // ObjC Ivar
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getAccessControl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getSynthesize
+ // Type Source Info
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclObjCIvarAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_ENUM
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_ENUM));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ // TypeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
+ // TagDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getTagKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCompleteDefinition
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // EmbeddedInDeclarator
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFreeStanding
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypedefNameAnonDecl
+ // EnumDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // AddTypeRef
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IntegerType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getPromotionType
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getNumPositiveBits
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getNumNegativeBits
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isScoped
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isScopedUsingClassTag
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isFixed
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InstantiatedMembEnum
+ // DC
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // VisibleOffset
+ DeclEnumAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_RECORD
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_RECORD));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ // TypeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
+ // TagDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // IdentifierNamespace
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getTagKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCompleteDefinition
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // EmbeddedInDeclarator
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFreeStanding
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SourceLocation
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypedefNameAnonDecl
+ // RecordDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // FlexibleArrayMember
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // AnonymousStructUnion
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // hasObjectMember
+ // DC
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalOffset
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // VisibleOffset
+ DeclRecordAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_PARM_VAR
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_PARM_VAR));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ // VarDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // StorageClass
+ Abv->Add(BitCodeAbbrevOp(0)); // StorageClassAsWritten
+ Abv->Add(BitCodeAbbrevOp(0)); // isThreadSpecified
+ Abv->Add(BitCodeAbbrevOp(0)); // hasCXXDirectInitializer
+ Abv->Add(BitCodeAbbrevOp(0)); // isExceptionVariable
+ Abv->Add(BitCodeAbbrevOp(0)); // isNRVOVariable
+ Abv->Add(BitCodeAbbrevOp(0)); // isCXXForRangeDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // isARCPseudoStrong
+ Abv->Add(BitCodeAbbrevOp(0)); // HasInit
+ Abv->Add(BitCodeAbbrevOp(0)); // HasMemberSpecializationInfo
+ // ParmVarDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsObjCMethodParameter
+ Abv->Add(BitCodeAbbrevOp(0)); // ScopeDepth
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ScopeIndex
+ Abv->Add(BitCodeAbbrevOp(0)); // ObjCDeclQualifier
+ Abv->Add(BitCodeAbbrevOp(0)); // KNRPromoted
+ Abv->Add(BitCodeAbbrevOp(0)); // HasInheritedDefaultArg
+ Abv->Add(BitCodeAbbrevOp(0)); // HasUninstantiatedDefaultArg
+ // Type Source Info
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclParmVarAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_TYPEDEF
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_TYPEDEF));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ // TypeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Source Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type Ref
+ // TypedefDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclTypedefAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for DECL_VAR
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_VAR));
+ // Redeclarable
+ Abv->Add(BitCodeAbbrevOp(0)); // No redeclaration
+ // Decl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclContext
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // LexicalDeclContext
+ Abv->Add(BitCodeAbbrevOp(0)); // isInvalidDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // HasAttrs
+ Abv->Add(BitCodeAbbrevOp(0)); // isImplicit
+ Abv->Add(BitCodeAbbrevOp(0)); // isUsed
+ Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
+ Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
+ Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
+ Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
+ // NamedDecl
+ Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Name
+ // ValueDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ // DeclaratorDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // InnerStartLoc
+ Abv->Add(BitCodeAbbrevOp(0)); // hasExtInfo
+ // VarDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // StorageClass
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // StorageClassAsWritten
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isThreadSpecified
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // CXXDirectInitializer
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isExceptionVariable
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isNRVOVariable
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isCXXForRangeDecl
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isARCPseudoStrong
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasInit
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // HasMemberSpecInfo
+ // Type Source Info
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // TypeLoc
+ DeclVarAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for EXPR_DECL_REF
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_DECL_REF));
+ //Stmt
+ //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
+ //DeclRefExpr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HasQualifier
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //GetDeclFound
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ExplicitTemplateArgs
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //HadMultipleCandidates
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //RefersToEnclosingLocal
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // DeclRef
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
+ DeclRefExprAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for EXPR_INTEGER_LITERAL
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_INTEGER_LITERAL));
+ //Stmt
+ //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
+ //Integer Literal
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
+ Abv->Add(BitCodeAbbrevOp(32)); // Bit Width
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Value
+ IntegerLiteralAbbrev = Stream.EmitAbbrev(Abv);
+
+ // Abbreviation for EXPR_CHARACTER_LITERAL
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::EXPR_CHARACTER_LITERAL));
+ //Stmt
+ //Expr
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
+ //Character Literal
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // getValue
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Location
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //IsWide
+ CharacterLiteralAbbrev = Stream.EmitAbbrev(Abv);
+
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_CONTEXT_LEXICAL));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ DeclContextLexicalAbbrev = Stream.EmitAbbrev(Abv);
+
+ Abv = new BitCodeAbbrev();
+ Abv->Add(BitCodeAbbrevOp(serialization::DECL_CONTEXT_VISIBLE));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 32));
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Blob));
+ DeclContextVisibleLookupAbbrev = Stream.EmitAbbrev(Abv);
+}
+
+/// isRequiredDecl - Check if this is a "required" Decl, which must be seen by
+/// consumers of the AST.
+///
+/// Such decls will always be deserialized from the AST file, so we would like
+/// this to be as restrictive as possible. Currently the predicate is driven by
+/// code generation requirements, if other clients have a different notion of
+/// what is "required" then we may have to consider an alternate scheme where
+/// clients can iterate over the top-level decls and get information on them,
+/// without necessary deserializing them. We could explicitly require such
+/// clients to use a separate API call to "realize" the decl. This should be
+/// relatively painless since they would presumably only do it for top-level
+/// decls.
+static bool isRequiredDecl(const Decl *D, ASTContext &Context) {
+ // An ObjCMethodDecl is never considered as "required" because its
+ // implementation container always is.
+
+ // File scoped assembly or obj-c implementation must be seen.
+ if (isa<FileScopeAsmDecl>(D) || isa<ObjCImplDecl>(D))
+ return true;
+
+ return Context.DeclMustBeEmitted(D);
+}
+
+void ASTWriter::WriteDecl(ASTContext &Context, Decl *D) {
+ // Switch case IDs are per Decl.
+ ClearSwitchCaseIDs();
+
+ RecordData Record;
+ ASTDeclWriter W(*this, Context, Record);
+
+ // Determine the ID for this declaration.
+ serialization::DeclID ID;
+ if (D->isFromASTFile())
+ ID = getDeclID(D);
+ else {
+ serialization::DeclID &IDR = DeclIDs[D];
+ if (IDR == 0)
+ IDR = NextDeclID++;
+
+ ID= IDR;
+ }
+
+ bool isReplacingADecl = ID < FirstDeclID;
+
+ // If this declaration is also a DeclContext, write blocks for the
+ // declarations that lexically stored inside its context and those
+ // declarations that are visible from its context. These blocks
+ // are written before the declaration itself so that we can put
+ // their offsets into the record for the declaration.
+ uint64_t LexicalOffset = 0;
+ uint64_t VisibleOffset = 0;
+ DeclContext *DC = dyn_cast<DeclContext>(D);
+ if (DC) {
+ if (isReplacingADecl) {
+ // It is replacing a decl from a chained PCH; make sure that the
+ // DeclContext is fully loaded.
+ if (DC->hasExternalLexicalStorage())
+ DC->LoadLexicalDeclsFromExternalStorage();
+ if (DC->hasExternalVisibleStorage())
+ Chain->completeVisibleDeclsMap(DC);
+ }
+ LexicalOffset = WriteDeclContextLexicalBlock(Context, DC);
+ VisibleOffset = WriteDeclContextVisibleBlock(Context, DC);
+ }
+
+ if (isReplacingADecl) {
+ // We're replacing a decl in a previous file.
+ ReplacedDecls.push_back(ReplacedDeclInfo(ID, Stream.GetCurrentBitNo(),
+ D->getLocation()));
+ } else {
+ unsigned Index = ID - FirstDeclID;
+
+ // Record the offset for this declaration
+ SourceLocation Loc = D->getLocation();
+ if (DeclOffsets.size() == Index)
+ DeclOffsets.push_back(DeclOffset(Loc, Stream.GetCurrentBitNo()));
+ else if (DeclOffsets.size() < Index) {
+ DeclOffsets.resize(Index+1);
+ DeclOffsets[Index].setLocation(Loc);
+ DeclOffsets[Index].BitOffset = Stream.GetCurrentBitNo();
+ }
+
+ SourceManager &SM = Context.getSourceManager();
+ if (Loc.isValid() && SM.isLocalSourceLocation(Loc))
+ associateDeclWithFile(D, ID);
+ }
+
+ // Build and emit a record for this declaration
+ Record.clear();
+ W.Code = (serialization::DeclCode)0;
+ W.AbbrevToUse = 0;
+ W.Visit(D);
+ if (DC) W.VisitDeclContext(DC, LexicalOffset, VisibleOffset);
+
+ if (!W.Code)
+ llvm::report_fatal_error(StringRef("unexpected declaration kind '") +
+ D->getDeclKindName() + "'");
+ Stream.EmitRecord(W.Code, Record, W.AbbrevToUse);
+
+ // Flush any expressions that were written as part of this declaration.
+ FlushStmts();
+
+ // Flush C++ base specifiers, if there are any.
+ FlushCXXBaseSpecifiers();
+
+ // Note "external" declarations so that we can add them to a record in the
+ // AST file later.
+ //
+ // FIXME: This should be renamed, the predicate is much more complicated.
+ if (isRequiredDecl(D, Context))
+ ExternalDefinitions.push_back(ID);
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
new file mode 100644
index 0000000..827caa0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -0,0 +1,1659 @@
+//===--- ASTWriterStmt.cpp - Statement and Expression Serialization -------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements serialization for Statements and Expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTWriter.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Statement/expression serialization
+//===----------------------------------------------------------------------===//
+
+namespace clang {
+ class ASTStmtWriter : public StmtVisitor<ASTStmtWriter, void> {
+ ASTWriter &Writer;
+ ASTWriter::RecordData &Record;
+
+ public:
+ serialization::StmtCode Code;
+ unsigned AbbrevToUse;
+
+ ASTStmtWriter(ASTWriter &Writer, ASTWriter::RecordData &Record)
+ : Writer(Writer), Record(Record) { }
+
+ void AddTemplateKWAndArgsInfo(const ASTTemplateKWAndArgsInfo &Args);
+
+ void VisitStmt(Stmt *S);
+#define STMT(Type, Base) \
+ void Visit##Type(Type *);
+#include "clang/AST/StmtNodes.inc"
+ };
+}
+
+void ASTStmtWriter::
+AddTemplateKWAndArgsInfo(const ASTTemplateKWAndArgsInfo &Args) {
+ Writer.AddSourceLocation(Args.getTemplateKeywordLoc(), Record);
+ Writer.AddSourceLocation(Args.LAngleLoc, Record);
+ Writer.AddSourceLocation(Args.RAngleLoc, Record);
+ for (unsigned i=0; i != Args.NumTemplateArgs; ++i)
+ Writer.AddTemplateArgumentLoc(Args.getTemplateArgs()[i], Record);
+}
+
+void ASTStmtWriter::VisitStmt(Stmt *S) {
+}
+
+void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getSemiLoc(), Record);
+ Record.push_back(S->HasLeadingEmptyMacro);
+ Code = serialization::STMT_NULL;
+}
+
+void ASTStmtWriter::VisitCompoundStmt(CompoundStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->size());
+ for (CompoundStmt::body_iterator CS = S->body_begin(), CSEnd = S->body_end();
+ CS != CSEnd; ++CS)
+ Writer.AddStmt(*CS);
+ Writer.AddSourceLocation(S->getLBracLoc(), Record);
+ Writer.AddSourceLocation(S->getRBracLoc(), Record);
+ Code = serialization::STMT_COMPOUND;
+}
+
+void ASTStmtWriter::VisitSwitchCase(SwitchCase *S) {
+ VisitStmt(S);
+ Record.push_back(Writer.getSwitchCaseID(S));
+}
+
+void ASTStmtWriter::VisitCaseStmt(CaseStmt *S) {
+ VisitSwitchCase(S);
+ Writer.AddStmt(S->getLHS());
+ Writer.AddStmt(S->getRHS());
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getCaseLoc(), Record);
+ Writer.AddSourceLocation(S->getEllipsisLoc(), Record);
+ Writer.AddSourceLocation(S->getColonLoc(), Record);
+ Code = serialization::STMT_CASE;
+}
+
+void ASTStmtWriter::VisitDefaultStmt(DefaultStmt *S) {
+ VisitSwitchCase(S);
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getDefaultLoc(), Record);
+ Writer.AddSourceLocation(S->getColonLoc(), Record);
+ Code = serialization::STMT_DEFAULT;
+}
+
+void ASTStmtWriter::VisitLabelStmt(LabelStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getDecl(), Record);
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getIdentLoc(), Record);
+ Code = serialization::STMT_LABEL;
+}
+
+void ASTStmtWriter::VisitIfStmt(IfStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getConditionVariable(), Record);
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getThen());
+ Writer.AddStmt(S->getElse());
+ Writer.AddSourceLocation(S->getIfLoc(), Record);
+ Writer.AddSourceLocation(S->getElseLoc(), Record);
+ Code = serialization::STMT_IF;
+}
+
+void ASTStmtWriter::VisitSwitchStmt(SwitchStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getConditionVariable(), Record);
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getSwitchLoc(), Record);
+ Record.push_back(S->isAllEnumCasesCovered());
+ for (SwitchCase *SC = S->getSwitchCaseList(); SC;
+ SC = SC->getNextSwitchCase())
+ Record.push_back(Writer.RecordSwitchCaseID(SC));
+ Code = serialization::STMT_SWITCH;
+}
+
+void ASTStmtWriter::VisitWhileStmt(WhileStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getConditionVariable(), Record);
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getWhileLoc(), Record);
+ Code = serialization::STMT_WHILE;
+}
+
+void ASTStmtWriter::VisitDoStmt(DoStmt *S) {
+ VisitStmt(S);
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getDoLoc(), Record);
+ Writer.AddSourceLocation(S->getWhileLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = serialization::STMT_DO;
+}
+
+void ASTStmtWriter::VisitForStmt(ForStmt *S) {
+ VisitStmt(S);
+ Writer.AddStmt(S->getInit());
+ Writer.AddStmt(S->getCond());
+ Writer.AddDeclRef(S->getConditionVariable(), Record);
+ Writer.AddStmt(S->getInc());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getLParenLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = serialization::STMT_FOR;
+}
+
+void ASTStmtWriter::VisitGotoStmt(GotoStmt *S) {
+ VisitStmt(S);
+ Writer.AddDeclRef(S->getLabel(), Record);
+ Writer.AddSourceLocation(S->getGotoLoc(), Record);
+ Writer.AddSourceLocation(S->getLabelLoc(), Record);
+ Code = serialization::STMT_GOTO;
+}
+
+void ASTStmtWriter::VisitIndirectGotoStmt(IndirectGotoStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getGotoLoc(), Record);
+ Writer.AddSourceLocation(S->getStarLoc(), Record);
+ Writer.AddStmt(S->getTarget());
+ Code = serialization::STMT_INDIRECT_GOTO;
+}
+
+void ASTStmtWriter::VisitContinueStmt(ContinueStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getContinueLoc(), Record);
+ Code = serialization::STMT_CONTINUE;
+}
+
+void ASTStmtWriter::VisitBreakStmt(BreakStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getBreakLoc(), Record);
+ Code = serialization::STMT_BREAK;
+}
+
+void ASTStmtWriter::VisitReturnStmt(ReturnStmt *S) {
+ VisitStmt(S);
+ Writer.AddStmt(S->getRetValue());
+ Writer.AddSourceLocation(S->getReturnLoc(), Record);
+ Writer.AddDeclRef(S->getNRVOCandidate(), Record);
+ Code = serialization::STMT_RETURN;
+}
+
+void ASTStmtWriter::VisitDeclStmt(DeclStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getStartLoc(), Record);
+ Writer.AddSourceLocation(S->getEndLoc(), Record);
+ DeclGroupRef DG = S->getDeclGroup();
+ for (DeclGroupRef::iterator D = DG.begin(), DEnd = DG.end(); D != DEnd; ++D)
+ Writer.AddDeclRef(*D, Record);
+ Code = serialization::STMT_DECL;
+}
+
+void ASTStmtWriter::VisitAsmStmt(AsmStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getNumOutputs());
+ Record.push_back(S->getNumInputs());
+ Record.push_back(S->getNumClobbers());
+ Writer.AddSourceLocation(S->getAsmLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Record.push_back(S->isVolatile());
+ Record.push_back(S->isSimple());
+ Record.push_back(S->isMSAsm());
+ Writer.AddStmt(S->getAsmString());
+
+ // Outputs
+ for (unsigned I = 0, N = S->getNumOutputs(); I != N; ++I) {
+ Writer.AddIdentifierRef(S->getOutputIdentifier(I), Record);
+ Writer.AddStmt(S->getOutputConstraintLiteral(I));
+ Writer.AddStmt(S->getOutputExpr(I));
+ }
+
+ // Inputs
+ for (unsigned I = 0, N = S->getNumInputs(); I != N; ++I) {
+ Writer.AddIdentifierRef(S->getInputIdentifier(I), Record);
+ Writer.AddStmt(S->getInputConstraintLiteral(I));
+ Writer.AddStmt(S->getInputExpr(I));
+ }
+
+ // Clobbers
+ for (unsigned I = 0, N = S->getNumClobbers(); I != N; ++I)
+ Writer.AddStmt(S->getClobber(I));
+
+ Code = serialization::STMT_ASM;
+}
+
+void ASTStmtWriter::VisitExpr(Expr *E) {
+ VisitStmt(E);
+ Writer.AddTypeRef(E->getType(), Record);
+ Record.push_back(E->isTypeDependent());
+ Record.push_back(E->isValueDependent());
+ Record.push_back(E->isInstantiationDependent());
+ Record.push_back(E->containsUnexpandedParameterPack());
+ Record.push_back(E->getValueKind());
+ Record.push_back(E->getObjectKind());
+}
+
+void ASTStmtWriter::VisitPredefinedExpr(PredefinedExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->getIdentType()); // FIXME: stable encoding
+ Code = serialization::EXPR_PREDEFINED;
+}
+
+void ASTStmtWriter::VisitDeclRefExpr(DeclRefExpr *E) {
+ VisitExpr(E);
+
+ Record.push_back(E->hasQualifier());
+ Record.push_back(E->getDecl() != E->getFoundDecl());
+ Record.push_back(E->hasTemplateKWAndArgsInfo());
+ Record.push_back(E->hadMultipleCandidates());
+ Record.push_back(E->refersToEnclosingLocal());
+
+ if (E->hasTemplateKWAndArgsInfo()) {
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
+ Record.push_back(NumTemplateArgs);
+ }
+
+ DeclarationName::NameKind nk = (E->getDecl()->getDeclName().getNameKind());
+
+ if ((!E->hasTemplateKWAndArgsInfo()) && (!E->hasQualifier()) &&
+ (E->getDecl() == E->getFoundDecl()) &&
+ nk == DeclarationName::Identifier) {
+ AbbrevToUse = Writer.getDeclRefExprAbbrev();
+ }
+
+ if (E->hasQualifier())
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+
+ if (E->getDecl() != E->getFoundDecl())
+ Writer.AddDeclRef(E->getFoundDecl(), Record);
+
+ if (E->hasTemplateKWAndArgsInfo())
+ AddTemplateKWAndArgsInfo(*E->getTemplateKWAndArgsInfo());
+
+ Writer.AddDeclRef(E->getDecl(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddDeclarationNameLoc(E->DNLoc, E->getDecl()->getDeclName(), Record);
+ Code = serialization::EXPR_DECL_REF;
+}
+
+void ASTStmtWriter::VisitIntegerLiteral(IntegerLiteral *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddAPInt(E->getValue(), Record);
+
+ if (E->getValue().getBitWidth() == 32) {
+ AbbrevToUse = Writer.getIntegerLiteralAbbrev();
+ }
+
+ Code = serialization::EXPR_INTEGER_LITERAL;
+}
+
+void ASTStmtWriter::VisitFloatingLiteral(FloatingLiteral *E) {
+ VisitExpr(E);
+ Writer.AddAPFloat(E->getValue(), Record);
+ Record.push_back(E->isExact());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_FLOATING_LITERAL;
+}
+
+void ASTStmtWriter::VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_IMAGINARY_LITERAL;
+}
+
+void ASTStmtWriter::VisitStringLiteral(StringLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getByteLength());
+ Record.push_back(E->getNumConcatenated());
+ Record.push_back(E->getKind());
+ Record.push_back(E->isPascal());
+ // FIXME: String data should be stored as a blob at the end of the
+ // StringLiteral. However, we can't do so now because we have no
+ // provision for coping with abbreviations when we're jumping around
+ // the AST file during deserialization.
+ Record.append(E->getBytes().begin(), E->getBytes().end());
+ for (unsigned I = 0, N = E->getNumConcatenated(); I != N; ++I)
+ Writer.AddSourceLocation(E->getStrTokenLoc(I), Record);
+ Code = serialization::EXPR_STRING_LITERAL;
+}
+
+void ASTStmtWriter::VisitCharacterLiteral(CharacterLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->getKind());
+
+ AbbrevToUse = Writer.getCharacterLiteralAbbrev();
+
+ Code = serialization::EXPR_CHARACTER_LITERAL;
+}
+
+void ASTStmtWriter::VisitParenExpr(ParenExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLParen(), Record);
+ Writer.AddSourceLocation(E->getRParen(), Record);
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_PAREN;
+}
+
+void ASTStmtWriter::VisitParenListExpr(ParenListExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->NumExprs);
+ for (unsigned i=0; i != E->NumExprs; ++i)
+ Writer.AddStmt(E->Exprs[i]);
+ Writer.AddSourceLocation(E->LParenLoc, Record);
+ Writer.AddSourceLocation(E->RParenLoc, Record);
+ Code = serialization::EXPR_PAREN_LIST;
+}
+
+void ASTStmtWriter::VisitUnaryOperator(UnaryOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->getOpcode()); // FIXME: stable encoding
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Code = serialization::EXPR_UNARY_OPERATOR;
+}
+
+void ASTStmtWriter::VisitOffsetOfExpr(OffsetOfExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumComponents());
+ Record.push_back(E->getNumExpressions());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ for (unsigned I = 0, N = E->getNumComponents(); I != N; ++I) {
+ const OffsetOfExpr::OffsetOfNode &ON = E->getComponent(I);
+ Record.push_back(ON.getKind()); // FIXME: Stable encoding
+ Writer.AddSourceLocation(ON.getSourceRange().getBegin(), Record);
+ Writer.AddSourceLocation(ON.getSourceRange().getEnd(), Record);
+ switch (ON.getKind()) {
+ case OffsetOfExpr::OffsetOfNode::Array:
+ Record.push_back(ON.getArrayExprIndex());
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Field:
+ Writer.AddDeclRef(ON.getField(), Record);
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Identifier:
+ Writer.AddIdentifierRef(ON.getFieldName(), Record);
+ break;
+
+ case OffsetOfExpr::OffsetOfNode::Base:
+ Writer.AddCXXBaseSpecifier(*ON.getBase(), Record);
+ break;
+ }
+ }
+ for (unsigned I = 0, N = E->getNumExpressions(); I != N; ++I)
+ Writer.AddStmt(E->getIndexExpr(I));
+ Code = serialization::EXPR_OFFSETOF;
+}
+
+void ASTStmtWriter::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getKind());
+ if (E->isArgumentType())
+ Writer.AddTypeSourceInfo(E->getArgumentTypeInfo(), Record);
+ else {
+ Record.push_back(0);
+ Writer.AddStmt(E->getArgumentExpr());
+ }
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_SIZEOF_ALIGN_OF;
+}
+
+void ASTStmtWriter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
+ Writer.AddSourceLocation(E->getRBracketLoc(), Record);
+ Code = serialization::EXPR_ARRAY_SUBSCRIPT;
+}
+
+void ASTStmtWriter::VisitCallExpr(CallExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.AddStmt(E->getCallee());
+ for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg)
+ Writer.AddStmt(*Arg);
+ Code = serialization::EXPR_CALL;
+}
+
+void ASTStmtWriter::VisitMemberExpr(MemberExpr *E) {
+ // Don't call VisitExpr, we'll write everything here.
+
+ Record.push_back(E->hasQualifier());
+ if (E->hasQualifier())
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ Writer.AddSourceLocation(E->getTemplateKeywordLoc(), Record);
+ unsigned NumTemplateArgs = E->getNumTemplateArgs();
+ Record.push_back(NumTemplateArgs);
+ Writer.AddSourceLocation(E->getLAngleLoc(), Record);
+ Writer.AddSourceLocation(E->getRAngleLoc(), Record);
+ for (unsigned i=0; i != NumTemplateArgs; ++i)
+ Writer.AddTemplateArgumentLoc(E->getTemplateArgs()[i], Record);
+ }
+
+ Record.push_back(E->hadMultipleCandidates());
+
+ DeclAccessPair FoundDecl = E->getFoundDecl();
+ Writer.AddDeclRef(FoundDecl.getDecl(), Record);
+ Record.push_back(FoundDecl.getAccess());
+
+ Writer.AddTypeRef(E->getType(), Record);
+ Record.push_back(E->getValueKind());
+ Record.push_back(E->getObjectKind());
+ Writer.AddStmt(E->getBase());
+ Writer.AddDeclRef(E->getMemberDecl(), Record);
+ Writer.AddSourceLocation(E->getMemberLoc(), Record);
+ Record.push_back(E->isArrow());
+ Writer.AddDeclarationNameLoc(E->MemberDNLoc,
+ E->getMemberDecl()->getDeclName(), Record);
+ Code = serialization::EXPR_MEMBER;
+}
+
+void ASTStmtWriter::VisitObjCIsaExpr(ObjCIsaExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddSourceLocation(E->getIsaMemberLoc(), Record);
+ Record.push_back(E->isArrow());
+ Code = serialization::EXPR_OBJC_ISA;
+}
+
+void ASTStmtWriter::
+VisitObjCIndirectCopyRestoreExpr(ObjCIndirectCopyRestoreExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->shouldCopy());
+ Code = serialization::EXPR_OBJC_INDIRECT_COPY_RESTORE;
+}
+
+void ASTStmtWriter::VisitObjCBridgedCastExpr(ObjCBridgedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getBridgeKeywordLoc(), Record);
+ Record.push_back(E->getBridgeKind()); // FIXME: Stable encoding
+ Code = serialization::EXPR_OBJC_BRIDGED_CAST;
+}
+
+void ASTStmtWriter::VisitCastExpr(CastExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->path_size());
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->getCastKind()); // FIXME: stable encoding
+
+ for (CastExpr::path_iterator
+ PI = E->path_begin(), PE = E->path_end(); PI != PE; ++PI)
+ Writer.AddCXXBaseSpecifier(**PI, Record);
+}
+
+void ASTStmtWriter::VisitBinaryOperator(BinaryOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
+ Record.push_back(E->getOpcode()); // FIXME: stable encoding
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Code = serialization::EXPR_BINARY_OPERATOR;
+}
+
+void ASTStmtWriter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ VisitBinaryOperator(E);
+ Writer.AddTypeRef(E->getComputationLHSType(), Record);
+ Writer.AddTypeRef(E->getComputationResultType(), Record);
+ Code = serialization::EXPR_COMPOUND_ASSIGN_OPERATOR;
+}
+
+void ASTStmtWriter::VisitConditionalOperator(ConditionalOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
+ Writer.AddSourceLocation(E->getQuestionLoc(), Record);
+ Writer.AddSourceLocation(E->getColonLoc(), Record);
+ Code = serialization::EXPR_CONDITIONAL_OPERATOR;
+}
+
+void
+ASTStmtWriter::VisitBinaryConditionalOperator(BinaryConditionalOperator *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getOpaqueValue());
+ Writer.AddStmt(E->getCommon());
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getTrueExpr());
+ Writer.AddStmt(E->getFalseExpr());
+ Writer.AddSourceLocation(E->getQuestionLoc(), Record);
+ Writer.AddSourceLocation(E->getColonLoc(), Record);
+ Code = serialization::EXPR_BINARY_CONDITIONAL_OPERATOR;
+}
+
+void ASTStmtWriter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
+ VisitCastExpr(E);
+ Code = serialization::EXPR_IMPLICIT_CAST;
+}
+
+void ASTStmtWriter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
+ VisitCastExpr(E);
+ Writer.AddTypeSourceInfo(E->getTypeInfoAsWritten(), Record);
+}
+
+void ASTStmtWriter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CSTYLE_CAST;
+}
+
+void ASTStmtWriter::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Writer.AddStmt(E->getInitializer());
+ Record.push_back(E->isFileScope());
+ Code = serialization::EXPR_COMPOUND_LITERAL;
+}
+
+void ASTStmtWriter::VisitExtVectorElementExpr(ExtVectorElementExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getBase());
+ Writer.AddIdentifierRef(&E->getAccessor(), Record);
+ Writer.AddSourceLocation(E->getAccessorLoc(), Record);
+ Code = serialization::EXPR_EXT_VECTOR_ELEMENT;
+}
+
+void ASTStmtWriter::VisitInitListExpr(InitListExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSyntacticForm());
+ Writer.AddSourceLocation(E->getLBraceLoc(), Record);
+ Writer.AddSourceLocation(E->getRBraceLoc(), Record);
+ bool isArrayFiller = E->ArrayFillerOrUnionFieldInit.is<Expr*>();
+ Record.push_back(isArrayFiller);
+ if (isArrayFiller)
+ Writer.AddStmt(E->getArrayFiller());
+ else
+ Writer.AddDeclRef(E->getInitializedFieldInUnion(), Record);
+ Record.push_back(E->hadArrayRangeDesignator());
+ Record.push_back(E->initializesStdInitializerList());
+ Record.push_back(E->getNumInits());
+ if (isArrayFiller) {
+ // ArrayFiller may have filled "holes" due to designated initializer.
+ // Replace them by 0 to indicate that the filler goes in that place.
+ Expr *filler = E->getArrayFiller();
+ for (unsigned I = 0, N = E->getNumInits(); I != N; ++I)
+ Writer.AddStmt(E->getInit(I) != filler ? E->getInit(I) : 0);
+ } else {
+ for (unsigned I = 0, N = E->getNumInits(); I != N; ++I)
+ Writer.AddStmt(E->getInit(I));
+ }
+ Code = serialization::EXPR_INIT_LIST;
+}
+
+void ASTStmtWriter::VisitDesignatedInitExpr(DesignatedInitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSubExprs());
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.AddStmt(E->getSubExpr(I));
+ Writer.AddSourceLocation(E->getEqualOrColonLoc(), Record);
+ Record.push_back(E->usesGNUSyntax());
+ for (DesignatedInitExpr::designators_iterator D = E->designators_begin(),
+ DEnd = E->designators_end();
+ D != DEnd; ++D) {
+ if (D->isFieldDesignator()) {
+ if (FieldDecl *Field = D->getField()) {
+ Record.push_back(serialization::DESIG_FIELD_DECL);
+ Writer.AddDeclRef(Field, Record);
+ } else {
+ Record.push_back(serialization::DESIG_FIELD_NAME);
+ Writer.AddIdentifierRef(D->getFieldName(), Record);
+ }
+ Writer.AddSourceLocation(D->getDotLoc(), Record);
+ Writer.AddSourceLocation(D->getFieldLoc(), Record);
+ } else if (D->isArrayDesignator()) {
+ Record.push_back(serialization::DESIG_ARRAY);
+ Record.push_back(D->getFirstExprIndex());
+ Writer.AddSourceLocation(D->getLBracketLoc(), Record);
+ Writer.AddSourceLocation(D->getRBracketLoc(), Record);
+ } else {
+ assert(D->isArrayRangeDesignator() && "Unknown designator");
+ Record.push_back(serialization::DESIG_ARRAY_RANGE);
+ Record.push_back(D->getFirstExprIndex());
+ Writer.AddSourceLocation(D->getLBracketLoc(), Record);
+ Writer.AddSourceLocation(D->getEllipsisLoc(), Record);
+ Writer.AddSourceLocation(D->getRBracketLoc(), Record);
+ }
+ }
+ Code = serialization::EXPR_DESIGNATED_INIT;
+}
+
+void ASTStmtWriter::VisitImplicitValueInitExpr(ImplicitValueInitExpr *E) {
+ VisitExpr(E);
+ Code = serialization::EXPR_IMPLICIT_VALUE_INIT;
+}
+
+void ASTStmtWriter::VisitVAArgExpr(VAArgExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubExpr());
+ Writer.AddTypeSourceInfo(E->getWrittenTypeInfo(), Record);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_VA_ARG;
+}
+
+void ASTStmtWriter::VisitAddrLabelExpr(AddrLabelExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getAmpAmpLoc(), Record);
+ Writer.AddSourceLocation(E->getLabelLoc(), Record);
+ Writer.AddDeclRef(E->getLabel(), Record);
+ Code = serialization::EXPR_ADDR_LABEL;
+}
+
+void ASTStmtWriter::VisitStmtExpr(StmtExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSubStmt());
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_STMT;
+}
+
+void ASTStmtWriter::VisitChooseExpr(ChooseExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getCond());
+ Writer.AddStmt(E->getLHS());
+ Writer.AddStmt(E->getRHS());
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CHOOSE;
+}
+
+void ASTStmtWriter::VisitGNUNullExpr(GNUNullExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getTokenLocation(), Record);
+ Code = serialization::EXPR_GNU_NULL;
+}
+
+void ASTStmtWriter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSubExprs());
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.AddStmt(E->getExpr(I));
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_SHUFFLE_VECTOR;
+}
+
+void ASTStmtWriter::VisitBlockExpr(BlockExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getBlockDecl(), Record);
+ Code = serialization::EXPR_BLOCK;
+}
+
+void ASTStmtWriter::VisitGenericSelectionExpr(GenericSelectionExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumAssocs());
+
+ Writer.AddStmt(E->getControllingExpr());
+ for (unsigned I = 0, N = E->getNumAssocs(); I != N; ++I) {
+ Writer.AddTypeSourceInfo(E->getAssocTypeSourceInfo(I), Record);
+ Writer.AddStmt(E->getAssocExpr(I));
+ }
+ Record.push_back(E->isResultDependent() ? -1U : E->getResultIndex());
+
+ Writer.AddSourceLocation(E->getGenericLoc(), Record);
+ Writer.AddSourceLocation(E->getDefaultLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_GENERIC_SELECTION;
+}
+
+void ASTStmtWriter::VisitPseudoObjectExpr(PseudoObjectExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumSemanticExprs());
+
+ // Push the result index. Currently, this needs to exactly match
+ // the encoding used internally for ResultIndex.
+ unsigned result = E->getResultExprIndex();
+ result = (result == PseudoObjectExpr::NoResult ? 0 : result + 1);
+ Record.push_back(result);
+
+ Writer.AddStmt(E->getSyntacticForm());
+ for (PseudoObjectExpr::semantics_iterator
+ i = E->semantics_begin(), e = E->semantics_end(); i != e; ++i) {
+ Writer.AddStmt(*i);
+ }
+ Code = serialization::EXPR_PSEUDO_OBJECT;
+}
+
+void ASTStmtWriter::VisitAtomicExpr(AtomicExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getOp());
+ for (unsigned I = 0, N = E->getNumSubExprs(); I != N; ++I)
+ Writer.AddStmt(E->getSubExprs()[I]);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_ATOMIC;
+}
+
+//===----------------------------------------------------------------------===//
+// Objective-C Expressions and Statements.
+//===----------------------------------------------------------------------===//
+
+void ASTStmtWriter::VisitObjCStringLiteral(ObjCStringLiteral *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getString());
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Code = serialization::EXPR_OBJC_STRING_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCNumericLiteral(ObjCNumericLiteral *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getNumber());
+ Writer.AddDeclRef(E->getObjCNumericLiteralMethod(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Code = serialization::EXPR_OBJC_NUMERIC_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCArrayLiteral(ObjCArrayLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumElements());
+ for (unsigned i = 0; i < E->getNumElements(); i++)
+ Writer.AddStmt(E->getElement(i));
+ Writer.AddDeclRef(E->getArrayWithObjectsMethod(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Code = serialization::EXPR_OBJC_ARRAY_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCDictionaryLiteral(ObjCDictionaryLiteral *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumElements());
+ Record.push_back(E->HasPackExpansions);
+ for (unsigned i = 0; i < E->getNumElements(); i++) {
+ ObjCDictionaryElement Element = E->getKeyValueElement(i);
+ Writer.AddStmt(Element.Key);
+ Writer.AddStmt(Element.Value);
+ if (E->HasPackExpansions) {
+ Writer.AddSourceLocation(Element.EllipsisLoc, Record);
+ unsigned NumExpansions = 0;
+ if (Element.NumExpansions)
+ NumExpansions = *Element.NumExpansions + 1;
+ Record.push_back(NumExpansions);
+ }
+ }
+
+ Writer.AddDeclRef(E->getDictWithObjectsMethod(), Record);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Code = serialization::EXPR_OBJC_DICTIONARY_LITERAL;
+}
+
+void ASTStmtWriter::VisitObjCEncodeExpr(ObjCEncodeExpr *E) {
+ VisitExpr(E);
+ Writer.AddTypeSourceInfo(E->getEncodedTypeSourceInfo(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_OBJC_ENCODE;
+}
+
+void ASTStmtWriter::VisitObjCSelectorExpr(ObjCSelectorExpr *E) {
+ VisitExpr(E);
+ Writer.AddSelectorRef(E->getSelector(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_OBJC_SELECTOR_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCProtocolExpr(ObjCProtocolExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getProtocol(), Record);
+ Writer.AddSourceLocation(E->getAtLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_OBJC_PROTOCOL_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCIvarRefExpr(ObjCIvarRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getDecl(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddStmt(E->getBase());
+ Record.push_back(E->isArrow());
+ Record.push_back(E->isFreeIvar());
+ Code = serialization::EXPR_OBJC_IVAR_REF_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->SetterAndMethodRefFlags.getInt());
+ Record.push_back(E->isImplicitProperty());
+ if (E->isImplicitProperty()) {
+ Writer.AddDeclRef(E->getImplicitPropertyGetter(), Record);
+ Writer.AddDeclRef(E->getImplicitPropertySetter(), Record);
+ } else {
+ Writer.AddDeclRef(E->getExplicitProperty(), Record);
+ }
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Writer.AddSourceLocation(E->getReceiverLocation(), Record);
+ if (E->isObjectReceiver()) {
+ Record.push_back(0);
+ Writer.AddStmt(E->getBase());
+ } else if (E->isSuperReceiver()) {
+ Record.push_back(1);
+ Writer.AddTypeRef(E->getSuperReceiverType(), Record);
+ } else {
+ Record.push_back(2);
+ Writer.AddDeclRef(E->getClassReceiver(), Record);
+ }
+
+ Code = serialization::EXPR_OBJC_PROPERTY_REF_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCSubscriptRefExpr(ObjCSubscriptRefExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getRBracket(), Record);
+ Writer.AddStmt(E->getBaseExpr());
+ Writer.AddStmt(E->getKeyExpr());
+ Writer.AddDeclRef(E->getAtIndexMethodDecl(), Record);
+ Writer.AddDeclRef(E->setAtIndexMethodDecl(), Record);
+
+ Code = serialization::EXPR_OBJC_SUBSCRIPT_REF_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ Record.push_back(E->getNumStoredSelLocs());
+ Record.push_back(E->SelLocsKind);
+ Record.push_back(E->isDelegateInitCall());
+ Record.push_back(E->IsImplicit);
+ Record.push_back((unsigned)E->getReceiverKind()); // FIXME: stable encoding
+ switch (E->getReceiverKind()) {
+ case ObjCMessageExpr::Instance:
+ Writer.AddStmt(E->getInstanceReceiver());
+ break;
+
+ case ObjCMessageExpr::Class:
+ Writer.AddTypeSourceInfo(E->getClassReceiverTypeInfo(), Record);
+ break;
+
+ case ObjCMessageExpr::SuperClass:
+ case ObjCMessageExpr::SuperInstance:
+ Writer.AddTypeRef(E->getSuperType(), Record);
+ Writer.AddSourceLocation(E->getSuperLoc(), Record);
+ break;
+ }
+
+ if (E->getMethodDecl()) {
+ Record.push_back(1);
+ Writer.AddDeclRef(E->getMethodDecl(), Record);
+ } else {
+ Record.push_back(0);
+ Writer.AddSelectorRef(E->getSelector(), Record);
+ }
+
+ Writer.AddSourceLocation(E->getLeftLoc(), Record);
+ Writer.AddSourceLocation(E->getRightLoc(), Record);
+
+ for (CallExpr::arg_iterator Arg = E->arg_begin(), ArgEnd = E->arg_end();
+ Arg != ArgEnd; ++Arg)
+ Writer.AddStmt(*Arg);
+
+ SourceLocation *Locs = E->getStoredSelLocs();
+ for (unsigned i = 0, e = E->getNumStoredSelLocs(); i != e; ++i)
+ Writer.AddSourceLocation(Locs[i], Record);
+
+ Code = serialization::EXPR_OBJC_MESSAGE_EXPR;
+}
+
+void ASTStmtWriter::VisitObjCForCollectionStmt(ObjCForCollectionStmt *S) {
+ VisitStmt(S);
+ Writer.AddStmt(S->getElement());
+ Writer.AddStmt(S->getCollection());
+ Writer.AddStmt(S->getBody());
+ Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = serialization::STMT_OBJC_FOR_COLLECTION;
+}
+
+void ASTStmtWriter::VisitObjCAtCatchStmt(ObjCAtCatchStmt *S) {
+ Writer.AddStmt(S->getCatchBody());
+ Writer.AddDeclRef(S->getCatchParamDecl(), Record);
+ Writer.AddSourceLocation(S->getAtCatchLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Code = serialization::STMT_OBJC_CATCH;
+}
+
+void ASTStmtWriter::VisitObjCAtFinallyStmt(ObjCAtFinallyStmt *S) {
+ Writer.AddStmt(S->getFinallyBody());
+ Writer.AddSourceLocation(S->getAtFinallyLoc(), Record);
+ Code = serialization::STMT_OBJC_FINALLY;
+}
+
+void ASTStmtWriter::VisitObjCAutoreleasePoolStmt(ObjCAutoreleasePoolStmt *S) {
+ Writer.AddStmt(S->getSubStmt());
+ Writer.AddSourceLocation(S->getAtLoc(), Record);
+ Code = serialization::STMT_OBJC_AUTORELEASE_POOL;
+}
+
+void ASTStmtWriter::VisitObjCAtTryStmt(ObjCAtTryStmt *S) {
+ Record.push_back(S->getNumCatchStmts());
+ Record.push_back(S->getFinallyStmt() != 0);
+ Writer.AddStmt(S->getTryBody());
+ for (unsigned I = 0, N = S->getNumCatchStmts(); I != N; ++I)
+ Writer.AddStmt(S->getCatchStmt(I));
+ if (S->getFinallyStmt())
+ Writer.AddStmt(S->getFinallyStmt());
+ Writer.AddSourceLocation(S->getAtTryLoc(), Record);
+ Code = serialization::STMT_OBJC_AT_TRY;
+}
+
+void ASTStmtWriter::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S) {
+ Writer.AddStmt(S->getSynchExpr());
+ Writer.AddStmt(S->getSynchBody());
+ Writer.AddSourceLocation(S->getAtSynchronizedLoc(), Record);
+ Code = serialization::STMT_OBJC_AT_SYNCHRONIZED;
+}
+
+void ASTStmtWriter::VisitObjCAtThrowStmt(ObjCAtThrowStmt *S) {
+ Writer.AddStmt(S->getThrowExpr());
+ Writer.AddSourceLocation(S->getThrowLoc(), Record);
+ Code = serialization::STMT_OBJC_AT_THROW;
+}
+
+void ASTStmtWriter::VisitObjCBoolLiteralExpr(ObjCBoolLiteralExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_OBJC_BOOL_LITERAL;
+}
+
+//===----------------------------------------------------------------------===//
+// C++ Expressions and Statements.
+//===----------------------------------------------------------------------===//
+
+void ASTStmtWriter::VisitCXXCatchStmt(CXXCatchStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getCatchLoc(), Record);
+ Writer.AddDeclRef(S->getExceptionDecl(), Record);
+ Writer.AddStmt(S->getHandlerBlock());
+ Code = serialization::STMT_CXX_CATCH;
+}
+
+void ASTStmtWriter::VisitCXXTryStmt(CXXTryStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getNumHandlers());
+ Writer.AddSourceLocation(S->getTryLoc(), Record);
+ Writer.AddStmt(S->getTryBlock());
+ for (unsigned i = 0, e = S->getNumHandlers(); i != e; ++i)
+ Writer.AddStmt(S->getHandler(i));
+ Code = serialization::STMT_CXX_TRY;
+}
+
+void ASTStmtWriter::VisitCXXForRangeStmt(CXXForRangeStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getForLoc(), Record);
+ Writer.AddSourceLocation(S->getColonLoc(), Record);
+ Writer.AddSourceLocation(S->getRParenLoc(), Record);
+ Writer.AddStmt(S->getRangeStmt());
+ Writer.AddStmt(S->getBeginEndStmt());
+ Writer.AddStmt(S->getCond());
+ Writer.AddStmt(S->getInc());
+ Writer.AddStmt(S->getLoopVarStmt());
+ Writer.AddStmt(S->getBody());
+ Code = serialization::STMT_CXX_FOR_RANGE;
+}
+
+void ASTStmtWriter::VisitMSDependentExistsStmt(MSDependentExistsStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getKeywordLoc(), Record);
+ Record.push_back(S->isIfExists());
+ Writer.AddNestedNameSpecifierLoc(S->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameInfo(S->getNameInfo(), Record);
+ Writer.AddStmt(S->getSubStmt());
+ Code = serialization::STMT_MS_DEPENDENT_EXISTS;
+}
+
+void ASTStmtWriter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
+ VisitCallExpr(E);
+ Record.push_back(E->getOperator());
+ Code = serialization::EXPR_CXX_OPERATOR_CALL;
+}
+
+void ASTStmtWriter::VisitCXXMemberCallExpr(CXXMemberCallExpr *E) {
+ VisitCallExpr(E);
+ Code = serialization::EXPR_CXX_MEMBER_CALL;
+}
+
+void ASTStmtWriter::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumArgs());
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Writer.AddStmt(E->getArg(I));
+ Writer.AddDeclRef(E->getConstructor(), Record);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->isElidable());
+ Record.push_back(E->hadMultipleCandidates());
+ Record.push_back(E->requiresZeroInitialization());
+ Record.push_back(E->getConstructionKind()); // FIXME: stable encoding
+ Writer.AddSourceRange(E->getParenRange(), Record);
+ Code = serialization::EXPR_CXX_CONSTRUCT;
+}
+
+void ASTStmtWriter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *E) {
+ VisitCXXConstructExpr(E);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_TEMPORARY_OBJECT;
+}
+
+void ASTStmtWriter::VisitLambdaExpr(LambdaExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->NumCaptures);
+ unsigned NumArrayIndexVars = 0;
+ if (E->HasArrayIndexVars)
+ NumArrayIndexVars = E->getArrayIndexStarts()[E->NumCaptures];
+ Record.push_back(NumArrayIndexVars);
+ Writer.AddSourceRange(E->IntroducerRange, Record);
+ Record.push_back(E->CaptureDefault); // FIXME: stable encoding
+ Record.push_back(E->ExplicitParams);
+ Record.push_back(E->ExplicitResultType);
+ Writer.AddSourceLocation(E->ClosingBrace, Record);
+
+ // Add capture initializers.
+ for (LambdaExpr::capture_init_iterator C = E->capture_init_begin(),
+ CEnd = E->capture_init_end();
+ C != CEnd; ++C) {
+ Writer.AddStmt(*C);
+ }
+
+ // Add array index variables, if any.
+ if (NumArrayIndexVars) {
+ Record.append(E->getArrayIndexStarts(),
+ E->getArrayIndexStarts() + E->NumCaptures + 1);
+ VarDecl **ArrayIndexVars = E->getArrayIndexVars();
+ for (unsigned I = 0; I != NumArrayIndexVars; ++I)
+ Writer.AddDeclRef(ArrayIndexVars[I], Record);
+ }
+
+ Code = serialization::EXPR_LAMBDA;
+}
+
+void ASTStmtWriter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceRange(SourceRange(E->getOperatorLoc(), E->getRParenLoc()),
+ Record);
+}
+
+void ASTStmtWriter::VisitCXXStaticCastExpr(CXXStaticCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_STATIC_CAST;
+}
+
+void ASTStmtWriter::VisitCXXDynamicCastExpr(CXXDynamicCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_DYNAMIC_CAST;
+}
+
+void ASTStmtWriter::VisitCXXReinterpretCastExpr(CXXReinterpretCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_REINTERPRET_CAST;
+}
+
+void ASTStmtWriter::VisitCXXConstCastExpr(CXXConstCastExpr *E) {
+ VisitCXXNamedCastExpr(E);
+ Code = serialization::EXPR_CXX_CONST_CAST;
+}
+
+void ASTStmtWriter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E) {
+ VisitExplicitCastExpr(E);
+ Writer.AddSourceLocation(E->getTypeBeginLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CXX_FUNCTIONAL_CAST;
+}
+
+void ASTStmtWriter::VisitUserDefinedLiteral(UserDefinedLiteral *E) {
+ VisitCallExpr(E);
+ Writer.AddSourceLocation(E->UDSuffixLoc, Record);
+ Code = serialization::EXPR_USER_DEFINED_LITERAL;
+}
+
+void ASTStmtWriter::VisitCXXBoolLiteralExpr(CXXBoolLiteralExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_CXX_BOOL_LITERAL;
+}
+
+void ASTStmtWriter::VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_CXX_NULL_PTR_LITERAL;
+}
+
+void ASTStmtWriter::VisitCXXTypeidExpr(CXXTypeidExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ if (E->isTypeOperand()) {
+ Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_TYPEID_TYPE;
+ } else {
+ Writer.AddStmt(E->getExprOperand());
+ Code = serialization::EXPR_CXX_TYPEID_EXPR;
+ }
+}
+
+void ASTStmtWriter::VisitCXXThisExpr(CXXThisExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Record.push_back(E->isImplicit());
+ Code = serialization::EXPR_CXX_THIS;
+}
+
+void ASTStmtWriter::VisitCXXThrowExpr(CXXThrowExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getThrowLoc(), Record);
+ Writer.AddStmt(E->getSubExpr());
+ Record.push_back(E->isThrownVariableInScope());
+ Code = serialization::EXPR_CXX_THROW;
+}
+
+void ASTStmtWriter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *E) {
+ VisitExpr(E);
+
+ bool HasOtherExprStored = E->Param.getInt();
+ // Store these first, the reader reads them before creation.
+ Record.push_back(HasOtherExprStored);
+ if (HasOtherExprStored)
+ Writer.AddStmt(E->getExpr());
+ Writer.AddDeclRef(E->getParam(), Record);
+ Writer.AddSourceLocation(E->getUsedLocation(), Record);
+
+ Code = serialization::EXPR_CXX_DEFAULT_ARG;
+}
+
+void ASTStmtWriter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ VisitExpr(E);
+ Writer.AddCXXTemporary(E->getTemporary(), Record);
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_CXX_BIND_TEMPORARY;
+}
+
+void ASTStmtWriter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
+ VisitExpr(E);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CXX_SCALAR_VALUE_INIT;
+}
+
+void ASTStmtWriter::VisitCXXNewExpr(CXXNewExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->isGlobalNew());
+ Record.push_back(E->isArray());
+ Record.push_back(E->doesUsualArrayDeleteWantSize());
+ Record.push_back(E->getNumPlacementArgs());
+ Record.push_back(E->StoredInitializationStyle);
+ Writer.AddDeclRef(E->getOperatorNew(), Record);
+ Writer.AddDeclRef(E->getOperatorDelete(), Record);
+ Writer.AddTypeSourceInfo(E->getAllocatedTypeSourceInfo(), Record);
+ Writer.AddSourceRange(E->getTypeIdParens(), Record);
+ Writer.AddSourceLocation(E->getStartLoc(), Record);
+ Writer.AddSourceRange(E->getDirectInitRange(), Record);
+ for (CXXNewExpr::arg_iterator I = E->raw_arg_begin(), e = E->raw_arg_end();
+ I != e; ++I)
+ Writer.AddStmt(*I);
+
+ Code = serialization::EXPR_CXX_NEW;
+}
+
+void ASTStmtWriter::VisitCXXDeleteExpr(CXXDeleteExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->isGlobalDelete());
+ Record.push_back(E->isArrayForm());
+ Record.push_back(E->isArrayFormAsWritten());
+ Record.push_back(E->doesUsualArrayDeleteWantSize());
+ Writer.AddDeclRef(E->getOperatorDelete(), Record);
+ Writer.AddStmt(E->getArgument());
+ Writer.AddSourceLocation(E->getSourceRange().getBegin(), Record);
+
+ Code = serialization::EXPR_CXX_DELETE;
+}
+
+void ASTStmtWriter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
+ VisitExpr(E);
+
+ Writer.AddStmt(E->getBase());
+ Record.push_back(E->isArrow());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+ Writer.AddTypeSourceInfo(E->getScopeTypeInfo(), Record);
+ Writer.AddSourceLocation(E->getColonColonLoc(), Record);
+ Writer.AddSourceLocation(E->getTildeLoc(), Record);
+
+ // PseudoDestructorTypeStorage.
+ Writer.AddIdentifierRef(E->getDestroyedTypeIdentifier(), Record);
+ if (E->getDestroyedTypeIdentifier())
+ Writer.AddSourceLocation(E->getDestroyedTypeLoc(), Record);
+ else
+ Writer.AddTypeSourceInfo(E->getDestroyedTypeInfo(), Record);
+
+ Code = serialization::EXPR_CXX_PSEUDO_DESTRUCTOR;
+}
+
+void ASTStmtWriter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ VisitExpr(E);
+ Record.push_back(E->getNumObjects());
+ for (unsigned i = 0, e = E->getNumObjects(); i != e; ++i)
+ Writer.AddDeclRef(E->getObject(i), Record);
+
+ Writer.AddStmt(E->getSubExpr());
+ Code = serialization::EXPR_EXPR_WITH_CLEANUPS;
+}
+
+void
+ASTStmtWriter::VisitCXXDependentScopeMemberExpr(CXXDependentScopeMemberExpr *E){
+ VisitExpr(E);
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
+ // emitted first.
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
+ Record.push_back(Args.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(Args);
+ }
+
+ if (!E->isImplicitAccess())
+ Writer.AddStmt(E->getBase());
+ else
+ Writer.AddStmt(0);
+ Writer.AddTypeRef(E->getBaseType(), Record);
+ Record.push_back(E->isArrow());
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+ Writer.AddDeclRef(E->getFirstQualifierFoundInScope(), Record);
+ Writer.AddDeclarationNameInfo(E->MemberNameInfo, Record);
+ Code = serialization::EXPR_CXX_DEPENDENT_SCOPE_MEMBER;
+}
+
+void
+ASTStmtWriter::VisitDependentScopeDeclRefExpr(DependentScopeDeclRefExpr *E) {
+ VisitExpr(E);
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
+ // emitted first.
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
+ Record.push_back(Args.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(Args);
+ }
+
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+ Writer.AddDeclarationNameInfo(E->NameInfo, Record);
+ Code = serialization::EXPR_CXX_DEPENDENT_SCOPE_DECL_REF;
+}
+
+void
+ASTStmtWriter::VisitCXXUnresolvedConstructExpr(CXXUnresolvedConstructExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->arg_size());
+ for (CXXUnresolvedConstructExpr::arg_iterator
+ ArgI = E->arg_begin(), ArgE = E->arg_end(); ArgI != ArgE; ++ArgI)
+ Writer.AddStmt(*ArgI);
+ Writer.AddTypeSourceInfo(E->getTypeSourceInfo(), Record);
+ Writer.AddSourceLocation(E->getLParenLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Code = serialization::EXPR_CXX_UNRESOLVED_CONSTRUCT;
+}
+
+void ASTStmtWriter::VisitOverloadExpr(OverloadExpr *E) {
+ VisitExpr(E);
+
+ // Don't emit anything here, HasTemplateKWAndArgsInfo must be
+ // emitted first.
+
+ Record.push_back(E->HasTemplateKWAndArgsInfo);
+ if (E->HasTemplateKWAndArgsInfo) {
+ const ASTTemplateKWAndArgsInfo &Args = *E->getTemplateKWAndArgsInfo();
+ Record.push_back(Args.NumTemplateArgs);
+ AddTemplateKWAndArgsInfo(Args);
+ }
+
+ Record.push_back(E->getNumDecls());
+ for (OverloadExpr::decls_iterator
+ OvI = E->decls_begin(), OvE = E->decls_end(); OvI != OvE; ++OvI) {
+ Writer.AddDeclRef(OvI.getDecl(), Record);
+ Record.push_back(OvI.getAccess());
+ }
+
+ Writer.AddDeclarationNameInfo(E->NameInfo, Record);
+ Writer.AddNestedNameSpecifierLoc(E->getQualifierLoc(), Record);
+}
+
+void ASTStmtWriter::VisitUnresolvedMemberExpr(UnresolvedMemberExpr *E) {
+ VisitOverloadExpr(E);
+ Record.push_back(E->isArrow());
+ Record.push_back(E->hasUnresolvedUsing());
+ Writer.AddStmt(!E->isImplicitAccess() ? E->getBase() : 0);
+ Writer.AddTypeRef(E->getBaseType(), Record);
+ Writer.AddSourceLocation(E->getOperatorLoc(), Record);
+ Code = serialization::EXPR_CXX_UNRESOLVED_MEMBER;
+}
+
+void ASTStmtWriter::VisitUnresolvedLookupExpr(UnresolvedLookupExpr *E) {
+ VisitOverloadExpr(E);
+ Record.push_back(E->requiresADL());
+ if (E->requiresADL())
+ Record.push_back(E->isStdAssociatedNamespace());
+ Record.push_back(E->isOverloaded());
+ Writer.AddDeclRef(E->getNamingClass(), Record);
+ Code = serialization::EXPR_CXX_UNRESOLVED_LOOKUP;
+}
+
+void ASTStmtWriter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getTrait());
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddTypeSourceInfo(E->getQueriedTypeSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_UNARY_TYPE_TRAIT;
+}
+
+void ASTStmtWriter::VisitBinaryTypeTraitExpr(BinaryTypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getTrait());
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddTypeSourceInfo(E->getLhsTypeSourceInfo(), Record);
+ Writer.AddTypeSourceInfo(E->getRhsTypeSourceInfo(), Record);
+ Code = serialization::EXPR_BINARY_TYPE_TRAIT;
+}
+
+void ASTStmtWriter::VisitTypeTraitExpr(TypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->TypeTraitExprBits.NumArgs);
+ Record.push_back(E->TypeTraitExprBits.Kind); // FIXME: Stable encoding
+ Record.push_back(E->TypeTraitExprBits.Value);
+ for (unsigned I = 0, N = E->getNumArgs(); I != N; ++I)
+ Writer.AddTypeSourceInfo(E->getArg(I), Record);
+ Code = serialization::EXPR_TYPE_TRAIT;
+}
+
+void ASTStmtWriter::VisitArrayTypeTraitExpr(ArrayTypeTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getTrait());
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddTypeSourceInfo(E->getQueriedTypeSourceInfo(), Record);
+ Code = serialization::EXPR_ARRAY_TYPE_TRAIT;
+}
+
+void ASTStmtWriter::VisitExpressionTraitExpr(ExpressionTraitExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getTrait());
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddStmt(E->getQueriedExpression());
+ Code = serialization::EXPR_CXX_EXPRESSION_TRAIT;
+}
+
+void ASTStmtWriter::VisitCXXNoexceptExpr(CXXNoexceptExpr *E) {
+ VisitExpr(E);
+ Record.push_back(E->getValue());
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ Writer.AddStmt(E->getOperand());
+ Code = serialization::EXPR_CXX_NOEXCEPT;
+}
+
+void ASTStmtWriter::VisitPackExpansionExpr(PackExpansionExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getEllipsisLoc(), Record);
+ Record.push_back(E->NumExpansions);
+ Writer.AddStmt(E->getPattern());
+ Code = serialization::EXPR_PACK_EXPANSION;
+}
+
+void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->OperatorLoc, Record);
+ Writer.AddSourceLocation(E->PackLoc, Record);
+ Writer.AddSourceLocation(E->RParenLoc, Record);
+ Record.push_back(E->Length);
+ Writer.AddDeclRef(E->Pack, Record);
+ Code = serialization::EXPR_SIZEOF_PACK;
+}
+
+void ASTStmtWriter::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getParameter(), Record);
+ Writer.AddSourceLocation(E->getNameLoc(), Record);
+ Writer.AddStmt(E->getReplacement());
+ Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM;
+}
+
+void ASTStmtWriter::VisitSubstNonTypeTemplateParmPackExpr(
+ SubstNonTypeTemplateParmPackExpr *E) {
+ VisitExpr(E);
+ Writer.AddDeclRef(E->getParameterPack(), Record);
+ Writer.AddTemplateArgument(E->getArgumentPack(), Record);
+ Writer.AddSourceLocation(E->getParameterPackLocation(), Record);
+ Code = serialization::EXPR_SUBST_NON_TYPE_TEMPLATE_PARM_PACK;
+}
+
+void ASTStmtWriter::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->Temporary);
+ Code = serialization::EXPR_MATERIALIZE_TEMPORARY;
+}
+
+void ASTStmtWriter::VisitOpaqueValueExpr(OpaqueValueExpr *E) {
+ VisitExpr(E);
+ Writer.AddStmt(E->getSourceExpr());
+ Writer.AddSourceLocation(E->getLocation(), Record);
+ Code = serialization::EXPR_OPAQUE_VALUE;
+}
+
+//===----------------------------------------------------------------------===//
+// CUDA Expressions and Statements.
+//===----------------------------------------------------------------------===//
+
+void ASTStmtWriter::VisitCUDAKernelCallExpr(CUDAKernelCallExpr *E) {
+ VisitCallExpr(E);
+ Writer.AddStmt(E->getConfig());
+ Code = serialization::EXPR_CUDA_KERNEL_CALL;
+}
+
+//===----------------------------------------------------------------------===//
+// OpenCL Expressions and Statements.
+//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitAsTypeExpr(AsTypeExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceLocation(E->getBuiltinLoc(), Record);
+ Writer.AddSourceLocation(E->getRParenLoc(), Record);
+ Writer.AddStmt(E->getSrcExpr());
+ Code = serialization::EXPR_ASTYPE;
+}
+
+//===----------------------------------------------------------------------===//
+// Microsoft Expressions and Statements.
+//===----------------------------------------------------------------------===//
+void ASTStmtWriter::VisitCXXUuidofExpr(CXXUuidofExpr *E) {
+ VisitExpr(E);
+ Writer.AddSourceRange(E->getSourceRange(), Record);
+ if (E->isTypeOperand()) {
+ Writer.AddTypeSourceInfo(E->getTypeOperandSourceInfo(), Record);
+ Code = serialization::EXPR_CXX_UUIDOF_TYPE;
+ } else {
+ Writer.AddStmt(E->getExprOperand());
+ Code = serialization::EXPR_CXX_UUIDOF_EXPR;
+ }
+}
+
+void ASTStmtWriter::VisitSEHExceptStmt(SEHExceptStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getExceptLoc(), Record);
+ Writer.AddStmt(S->getFilterExpr());
+ Writer.AddStmt(S->getBlock());
+ Code = serialization::STMT_SEH_EXCEPT;
+}
+
+void ASTStmtWriter::VisitSEHFinallyStmt(SEHFinallyStmt *S) {
+ VisitStmt(S);
+ Writer.AddSourceLocation(S->getFinallyLoc(), Record);
+ Writer.AddStmt(S->getBlock());
+ Code = serialization::STMT_SEH_FINALLY;
+}
+
+void ASTStmtWriter::VisitSEHTryStmt(SEHTryStmt *S) {
+ VisitStmt(S);
+ Record.push_back(S->getIsCXXTry());
+ Writer.AddSourceLocation(S->getTryLoc(), Record);
+ Writer.AddStmt(S->getTryBlock());
+ Writer.AddStmt(S->getHandler());
+ Code = serialization::STMT_SEH_TRY;
+}
+
+//===----------------------------------------------------------------------===//
+// ASTWriter Implementation
+//===----------------------------------------------------------------------===//
+
+unsigned ASTWriter::RecordSwitchCaseID(SwitchCase *S) {
+ assert(SwitchCaseIDs.find(S) == SwitchCaseIDs.end() &&
+ "SwitchCase recorded twice");
+ unsigned NextID = SwitchCaseIDs.size();
+ SwitchCaseIDs[S] = NextID;
+ return NextID;
+}
+
+unsigned ASTWriter::getSwitchCaseID(SwitchCase *S) {
+ assert(SwitchCaseIDs.find(S) != SwitchCaseIDs.end() &&
+ "SwitchCase hasn't been seen yet");
+ return SwitchCaseIDs[S];
+}
+
+void ASTWriter::ClearSwitchCaseIDs() {
+ SwitchCaseIDs.clear();
+}
+
+/// \brief Write the given substatement or subexpression to the
+/// bitstream.
+void ASTWriter::WriteSubStmt(Stmt *S,
+ llvm::DenseMap<Stmt *, uint64_t> &SubStmtEntries,
+ llvm::DenseSet<Stmt *> &ParentStmts) {
+ RecordData Record;
+ ASTStmtWriter Writer(*this, Record);
+ ++NumStatements;
+
+ if (!S) {
+ Stream.EmitRecord(serialization::STMT_NULL_PTR, Record);
+ return;
+ }
+
+ llvm::DenseMap<Stmt *, uint64_t>::iterator I = SubStmtEntries.find(S);
+ if (I != SubStmtEntries.end()) {
+ Record.push_back(I->second);
+ Stream.EmitRecord(serialization::STMT_REF_PTR, Record);
+ return;
+ }
+
+#ifndef NDEBUG
+ assert(!ParentStmts.count(S) && "There is a Stmt cycle!");
+
+ struct ParentStmtInserterRAII {
+ Stmt *S;
+ llvm::DenseSet<Stmt *> &ParentStmts;
+
+ ParentStmtInserterRAII(Stmt *S, llvm::DenseSet<Stmt *> &ParentStmts)
+ : S(S), ParentStmts(ParentStmts) {
+ ParentStmts.insert(S);
+ }
+ ~ParentStmtInserterRAII() {
+ ParentStmts.erase(S);
+ }
+ };
+
+ ParentStmtInserterRAII ParentStmtInserter(S, ParentStmts);
+#endif
+
+ // Redirect ASTWriter::AddStmt to collect sub stmts.
+ SmallVector<Stmt *, 16> SubStmts;
+ CollectedStmts = &SubStmts;
+
+ Writer.Code = serialization::STMT_NULL_PTR;
+ Writer.AbbrevToUse = 0;
+ Writer.Visit(S);
+
+#ifndef NDEBUG
+ if (Writer.Code == serialization::STMT_NULL_PTR) {
+ SourceManager &SrcMgr
+ = DeclIDs.begin()->first->getASTContext().getSourceManager();
+ S->dump(SrcMgr);
+ llvm_unreachable("Unhandled sub statement writing AST file");
+ }
+#endif
+
+ // Revert ASTWriter::AddStmt.
+ CollectedStmts = &StmtsToEmit;
+
+ // Write the sub stmts in reverse order, last to first. When reading them back
+ // we will read them in correct order by "pop"ing them from the Stmts stack.
+ // This simplifies reading and allows to store a variable number of sub stmts
+ // without knowing it in advance.
+ while (!SubStmts.empty())
+ WriteSubStmt(SubStmts.pop_back_val(), SubStmtEntries, ParentStmts);
+
+ Stream.EmitRecord(Writer.Code, Record, Writer.AbbrevToUse);
+
+ SubStmtEntries[S] = Stream.GetCurrentBitNo();
+}
+
+/// \brief Flush all of the statements that have been added to the
+/// queue via AddStmt().
+void ASTWriter::FlushStmts() {
+ RecordData Record;
+
+ // We expect to be the only consumer of the two temporary statement maps,
+ // assert that they are empty.
+ assert(SubStmtEntries.empty() && "unexpected entries in sub stmt map");
+ assert(ParentStmts.empty() && "unexpected entries in parent stmt map");
+
+ for (unsigned I = 0, N = StmtsToEmit.size(); I != N; ++I) {
+ WriteSubStmt(StmtsToEmit[I], SubStmtEntries, ParentStmts);
+
+ assert(N == StmtsToEmit.size() &&
+ "Substatement written via AddStmt rather than WriteSubStmt!");
+
+ // Note that we are at the end of a full expression. Any
+ // expression records that follow this one are part of a different
+ // expression.
+ Stream.EmitRecord(serialization::STMT_STOP, Record);
+
+ SubStmtEntries.clear();
+ ParentStmts.clear();
+ }
+
+ StmtsToEmit.clear();
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
new file mode 100644
index 0000000..02aed10
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/GeneratePCH.cpp
@@ -0,0 +1,69 @@
+//===--- GeneratePCH.cpp - Sema Consumer for PCH Generation -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PCHGenerator, which as a SemaConsumer that generates
+// a PCH file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Serialization/ASTWriter.h"
+#include "clang/Sema/SemaConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/FileSystemStatCache.h"
+#include "llvm/Bitcode/BitstreamWriter.h"
+#include "llvm/Support/raw_ostream.h"
+#include <string>
+
+using namespace clang;
+
+PCHGenerator::PCHGenerator(const Preprocessor &PP,
+ StringRef OutputFile,
+ clang::Module *Module,
+ StringRef isysroot,
+ raw_ostream *OS)
+ : PP(PP), OutputFile(OutputFile), Module(Module),
+ isysroot(isysroot.str()), Out(OS),
+ SemaPtr(0), StatCalls(0), Stream(Buffer), Writer(Stream) {
+ // Install a stat() listener to keep track of all of the stat()
+ // calls.
+ StatCalls = new MemorizeStatCalls();
+ PP.getFileManager().addStatCache(StatCalls, /*AtBeginning=*/false);
+}
+
+PCHGenerator::~PCHGenerator() {
+}
+
+void PCHGenerator::HandleTranslationUnit(ASTContext &Ctx) {
+ if (PP.getDiagnostics().hasErrorOccurred())
+ return;
+
+ // Emit the PCH file
+ assert(SemaPtr && "No Sema?");
+ Writer.WriteAST(*SemaPtr, StatCalls, OutputFile, Module, isysroot);
+
+ // Write the generated bitstream to "Out".
+ Out->write((char *)&Buffer.front(), Buffer.size());
+
+ // Make sure it hits disk now.
+ Out->flush();
+
+ // Free up some memory, in case the process is kept alive.
+ Buffer.clear();
+}
+
+ASTMutationListener *PCHGenerator::GetASTMutationListener() {
+ return &Writer;
+}
+
+ASTDeserializationListener *PCHGenerator::GetASTDeserializationListener() {
+ return &Writer;
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/Module.cpp b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
new file mode 100644
index 0000000..16b95e2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/Module.cpp
@@ -0,0 +1,115 @@
+//===--- Module.cpp - Module description ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Module class, which describes a module that has
+// been loaded from an AST file.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Serialization/Module.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "ASTReaderInternals.h"
+
+using namespace clang;
+using namespace serialization;
+using namespace reader;
+
+ModuleFile::ModuleFile(ModuleKind Kind, unsigned Generation)
+ : Kind(Kind), DirectlyImported(false), Generation(Generation), SizeInBits(0),
+ LocalNumSLocEntries(0), SLocEntryBaseID(0),
+ SLocEntryBaseOffset(0), SLocEntryOffsets(0),
+ SLocFileOffsets(0), LocalNumIdentifiers(0),
+ IdentifierOffsets(0), BaseIdentifierID(0), IdentifierTableData(0),
+ IdentifierLookupTable(0), BasePreprocessedEntityID(0),
+ PreprocessedEntityOffsets(0), NumPreprocessedEntities(0),
+ LocalNumHeaderFileInfos(0),
+ HeaderFileInfoTableData(0), HeaderFileInfoTable(0),
+ HeaderFileFrameworkStrings(0), LocalNumSubmodules(0), BaseSubmoduleID(0),
+ LocalNumSelectors(0), SelectorOffsets(0), BaseSelectorID(0),
+ SelectorLookupTableData(0), SelectorLookupTable(0), LocalNumDecls(0),
+ DeclOffsets(0), BaseDeclID(0),
+ LocalNumCXXBaseSpecifiers(0), CXXBaseSpecifiersOffsets(0),
+ FileSortedDecls(0), RedeclarationsMap(0), LocalNumRedeclarationsInMap(0),
+ ObjCCategoriesMap(0), LocalNumObjCCategoriesInMap(0),
+ LocalNumTypes(0), TypeOffsets(0), BaseTypeIndex(0), StatCache(0)
+{}
+
+ModuleFile::~ModuleFile() {
+ for (DeclContextInfosMap::iterator I = DeclContextInfos.begin(),
+ E = DeclContextInfos.end();
+ I != E; ++I) {
+ if (I->second.NameLookupTableData)
+ delete static_cast<ASTDeclContextNameLookupTable*>(
+ I->second.NameLookupTableData);
+ }
+
+ delete static_cast<ASTIdentifierLookupTable *>(IdentifierLookupTable);
+ delete static_cast<HeaderFileInfoLookupTable *>(HeaderFileInfoTable);
+ delete static_cast<ASTSelectorLookupTable *>(SelectorLookupTable);
+}
+
+template<typename Key, typename Offset, unsigned InitialCapacity>
+static void
+dumpLocalRemap(StringRef Name,
+ const ContinuousRangeMap<Key, Offset, InitialCapacity> &Map) {
+ if (Map.begin() == Map.end())
+ return;
+
+ typedef ContinuousRangeMap<Key, Offset, InitialCapacity> MapType;
+ llvm::errs() << " " << Name << ":\n";
+ for (typename MapType::const_iterator I = Map.begin(), IEnd = Map.end();
+ I != IEnd; ++I) {
+ llvm::errs() << " " << I->first << " -> " << I->second << "\n";
+ }
+}
+
+void ModuleFile::dump() {
+ llvm::errs() << "\nModule: " << FileName << "\n";
+ if (!Imports.empty()) {
+ llvm::errs() << " Imports: ";
+ for (unsigned I = 0, N = Imports.size(); I != N; ++I) {
+ if (I)
+ llvm::errs() << ", ";
+ llvm::errs() << Imports[I]->FileName;
+ }
+ llvm::errs() << "\n";
+ }
+
+ // Remapping tables.
+ llvm::errs() << " Base source location offset: " << SLocEntryBaseOffset
+ << '\n';
+ dumpLocalRemap("Source location offset local -> global map", SLocRemap);
+
+ llvm::errs() << " Base identifier ID: " << BaseIdentifierID << '\n'
+ << " Number of identifiers: " << LocalNumIdentifiers << '\n';
+ dumpLocalRemap("Identifier ID local -> global map", IdentifierRemap);
+
+ llvm::errs() << " Base submodule ID: " << BaseSubmoduleID << '\n'
+ << " Number of submodules: " << LocalNumSubmodules << '\n';
+ dumpLocalRemap("Submodule ID local -> global map", SubmoduleRemap);
+
+ llvm::errs() << " Base selector ID: " << BaseSelectorID << '\n'
+ << " Number of selectors: " << LocalNumSelectors << '\n';
+ dumpLocalRemap("Selector ID local -> global map", SelectorRemap);
+
+ llvm::errs() << " Base preprocessed entity ID: " << BasePreprocessedEntityID
+ << '\n'
+ << " Number of preprocessed entities: "
+ << NumPreprocessedEntities << '\n';
+ dumpLocalRemap("Preprocessed entity ID local -> global map",
+ PreprocessedEntityRemap);
+
+ llvm::errs() << " Base type index: " << BaseTypeIndex << '\n'
+ << " Number of types: " << LocalNumTypes << '\n';
+ dumpLocalRemap("Type index local -> global map", TypeRemap);
+
+ llvm::errs() << " Base decl ID: " << BaseDeclID << '\n'
+ << " Number of decls: " << LocalNumDecls << '\n';
+ dumpLocalRemap("Decl ID local -> global map", DeclRemap);
+}
diff --git a/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
new file mode 100644
index 0000000..ab364b7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Serialization/ModuleManager.cpp
@@ -0,0 +1,254 @@
+//===--- ModuleManager.cpp - Module Manager ---------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ModuleManager class, which manages a set of loaded
+// modules for the ASTReader.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Serialization/ModuleManager.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+
+#ifndef NDEBUG
+#include "llvm/Support/GraphWriter.h"
+#endif
+
+using namespace clang;
+using namespace serialization;
+
+ModuleFile *ModuleManager::lookup(StringRef Name) {
+ const FileEntry *Entry = FileMgr.getFile(Name);
+ return Modules[Entry];
+}
+
+llvm::MemoryBuffer *ModuleManager::lookupBuffer(StringRef Name) {
+ const FileEntry *Entry = FileMgr.getFile(Name);
+ return InMemoryBuffers[Entry];
+}
+
+std::pair<ModuleFile *, bool>
+ModuleManager::addModule(StringRef FileName, ModuleKind Type,
+ ModuleFile *ImportedBy, unsigned Generation,
+ std::string &ErrorStr) {
+ const FileEntry *Entry = FileMgr.getFile(FileName);
+ if (!Entry && FileName != "-") {
+ ErrorStr = "file not found";
+ return std::make_pair(static_cast<ModuleFile*>(0), false);
+ }
+
+ // Check whether we already loaded this module, before
+ ModuleFile *&ModuleEntry = Modules[Entry];
+ bool NewModule = false;
+ if (!ModuleEntry) {
+ // Allocate a new module.
+ ModuleFile *New = new ModuleFile(Type, Generation);
+ New->FileName = FileName.str();
+ Chain.push_back(New);
+ NewModule = true;
+ ModuleEntry = New;
+
+ // Load the contents of the module
+ if (llvm::MemoryBuffer *Buffer = lookupBuffer(FileName)) {
+ // The buffer was already provided for us.
+ assert(Buffer && "Passed null buffer");
+ New->Buffer.reset(Buffer);
+ } else {
+ // Open the AST file.
+ llvm::error_code ec;
+ if (FileName == "-") {
+ ec = llvm::MemoryBuffer::getSTDIN(New->Buffer);
+ if (ec)
+ ErrorStr = ec.message();
+ } else
+ New->Buffer.reset(FileMgr.getBufferForFile(FileName, &ErrorStr));
+
+ if (!New->Buffer)
+ return std::make_pair(static_cast<ModuleFile*>(0), false);
+ }
+
+ // Initialize the stream
+ New->StreamFile.init((const unsigned char *)New->Buffer->getBufferStart(),
+ (const unsigned char *)New->Buffer->getBufferEnd()); }
+
+ if (ImportedBy) {
+ ModuleEntry->ImportedBy.insert(ImportedBy);
+ ImportedBy->Imports.insert(ModuleEntry);
+ } else {
+ ModuleEntry->DirectlyImported = true;
+ }
+
+ return std::make_pair(ModuleEntry, NewModule);
+}
+
+void ModuleManager::addInMemoryBuffer(StringRef FileName,
+ llvm::MemoryBuffer *Buffer) {
+
+ const FileEntry *Entry = FileMgr.getVirtualFile(FileName,
+ Buffer->getBufferSize(), 0);
+ InMemoryBuffers[Entry] = Buffer;
+}
+
+ModuleManager::ModuleManager(const FileSystemOptions &FSO) : FileMgr(FSO) { }
+
+ModuleManager::~ModuleManager() {
+ for (unsigned i = 0, e = Chain.size(); i != e; ++i)
+ delete Chain[e - i - 1];
+}
+
+void ModuleManager::visit(bool (*Visitor)(ModuleFile &M, void *UserData),
+ void *UserData) {
+ unsigned N = size();
+
+ // Record the number of incoming edges for each module. When we
+ // encounter a module with no incoming edges, push it into the queue
+ // to seed the queue.
+ SmallVector<ModuleFile *, 4> Queue;
+ Queue.reserve(N);
+ llvm::DenseMap<ModuleFile *, unsigned> UnusedIncomingEdges;
+ for (ModuleIterator M = begin(), MEnd = end(); M != MEnd; ++M) {
+ if (unsigned Size = (*M)->ImportedBy.size())
+ UnusedIncomingEdges[*M] = Size;
+ else
+ Queue.push_back(*M);
+ }
+
+ llvm::SmallPtrSet<ModuleFile *, 4> Skipped;
+ unsigned QueueStart = 0;
+ while (QueueStart < Queue.size()) {
+ ModuleFile *CurrentModule = Queue[QueueStart++];
+
+ // Check whether this module should be skipped.
+ if (Skipped.count(CurrentModule))
+ continue;
+
+ if (Visitor(*CurrentModule, UserData)) {
+ // The visitor has requested that cut off visitation of any
+ // module that the current module depends on. To indicate this
+ // behavior, we mark all of the reachable modules as having N
+ // incoming edges (which is impossible otherwise).
+ SmallVector<ModuleFile *, 4> Stack;
+ Stack.push_back(CurrentModule);
+ Skipped.insert(CurrentModule);
+ while (!Stack.empty()) {
+ ModuleFile *NextModule = Stack.back();
+ Stack.pop_back();
+
+ // For any module that this module depends on, push it on the
+ // stack (if it hasn't already been marked as visited).
+ for (llvm::SetVector<ModuleFile *>::iterator
+ M = NextModule->Imports.begin(),
+ MEnd = NextModule->Imports.end();
+ M != MEnd; ++M) {
+ if (Skipped.insert(*M))
+ Stack.push_back(*M);
+ }
+ }
+ continue;
+ }
+
+ // For any module that this module depends on, push it on the
+ // stack (if it hasn't already been marked as visited).
+ for (llvm::SetVector<ModuleFile *>::iterator M = CurrentModule->Imports.begin(),
+ MEnd = CurrentModule->Imports.end();
+ M != MEnd; ++M) {
+
+ // Remove our current module as an impediment to visiting the
+ // module we depend on. If we were the last unvisited module
+ // that depends on this particular module, push it into the
+ // queue to be visited.
+ unsigned &NumUnusedEdges = UnusedIncomingEdges[*M];
+ if (NumUnusedEdges && (--NumUnusedEdges == 0))
+ Queue.push_back(*M);
+ }
+ }
+}
+
+/// \brief Perform a depth-first visit of the current module.
+static bool visitDepthFirst(ModuleFile &M,
+ bool (*Visitor)(ModuleFile &M, bool Preorder,
+ void *UserData),
+ void *UserData,
+ llvm::SmallPtrSet<ModuleFile *, 4> &Visited) {
+ // Preorder visitation
+ if (Visitor(M, /*Preorder=*/true, UserData))
+ return true;
+
+ // Visit children
+ for (llvm::SetVector<ModuleFile *>::iterator IM = M.Imports.begin(),
+ IMEnd = M.Imports.end();
+ IM != IMEnd; ++IM) {
+ if (!Visited.insert(*IM))
+ continue;
+
+ if (visitDepthFirst(**IM, Visitor, UserData, Visited))
+ return true;
+ }
+
+ // Postorder visitation
+ return Visitor(M, /*Preorder=*/false, UserData);
+}
+
+void ModuleManager::visitDepthFirst(bool (*Visitor)(ModuleFile &M, bool Preorder,
+ void *UserData),
+ void *UserData) {
+ llvm::SmallPtrSet<ModuleFile *, 4> Visited;
+ for (unsigned I = 0, N = Chain.size(); I != N; ++I) {
+ if (!Visited.insert(Chain[I]))
+ continue;
+
+ if (::visitDepthFirst(*Chain[I], Visitor, UserData, Visited))
+ return;
+ }
+}
+
+#ifndef NDEBUG
+namespace llvm {
+ template<>
+ struct GraphTraits<ModuleManager> {
+ typedef ModuleFile NodeType;
+ typedef llvm::SetVector<ModuleFile *>::const_iterator ChildIteratorType;
+ typedef ModuleManager::ModuleConstIterator nodes_iterator;
+
+ static ChildIteratorType child_begin(NodeType *Node) {
+ return Node->Imports.begin();
+ }
+
+ static ChildIteratorType child_end(NodeType *Node) {
+ return Node->Imports.end();
+ }
+
+ static nodes_iterator nodes_begin(const ModuleManager &Manager) {
+ return Manager.begin();
+ }
+
+ static nodes_iterator nodes_end(const ModuleManager &Manager) {
+ return Manager.end();
+ }
+ };
+
+ template<>
+ struct DOTGraphTraits<ModuleManager> : public DefaultDOTGraphTraits {
+ explicit DOTGraphTraits(bool IsSimple = false)
+ : DefaultDOTGraphTraits(IsSimple) { }
+
+ static bool renderGraphFromBottomUp() {
+ return true;
+ }
+
+ std::string getNodeLabel(ModuleFile *M, const ModuleManager&) {
+ return llvm::sys::path::stem(M->FileName);
+ }
+ };
+}
+
+void ModuleManager::viewGraph() {
+ llvm::ViewGraph(*this, "Modules");
+}
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
new file mode 100644
index 0000000..84ea8c7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AdjustedReturnValueChecker.cpp
@@ -0,0 +1,92 @@
+//== AdjustedReturnValueChecker.cpp -----------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AdjustedReturnValueChecker, a simple check to see if the
+// return value of a function call is different than the one the caller thinks
+// it is.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class AdjustedReturnValueChecker :
+ public Checker< check::PostStmt<CallExpr> > {
+public:
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+}
+
+void AdjustedReturnValueChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+
+ // Get the result type of the call.
+ QualType expectedResultTy = CE->getType();
+
+ // Fetch the signature of the called function.
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ SVal V = state->getSVal(CE, LCtx);
+
+ if (V.isUnknown())
+ return;
+
+ // Casting to void? Discard the value.
+ if (expectedResultTy->isVoidType()) {
+ C.addTransition(state->BindExpr(CE, LCtx, UnknownVal()));
+ return;
+ }
+
+ const MemRegion *callee = state->getSVal(CE->getCallee(), LCtx).getAsRegion();
+ if (!callee)
+ return;
+
+ QualType actualResultTy;
+
+ if (const FunctionTextRegion *FT = dyn_cast<FunctionTextRegion>(callee)) {
+ const FunctionDecl *FD = FT->getDecl();
+ actualResultTy = FD->getResultType();
+ }
+ else if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(callee)) {
+ const BlockTextRegion *BR = BD->getCodeRegion();
+ const BlockPointerType *BT=BR->getLocationType()->getAs<BlockPointerType>();
+ const FunctionType *FT = BT->getPointeeType()->getAs<FunctionType>();
+ actualResultTy = FT->getResultType();
+ }
+
+ // Can this happen?
+ if (actualResultTy.isNull())
+ return;
+
+ // For now, ignore references.
+ if (actualResultTy->getAs<ReferenceType>())
+ return;
+
+
+ // Are they the same?
+ if (expectedResultTy != actualResultTy) {
+ // FIXME: Do more checking and actual emit an error. At least performing
+ // the cast avoids some assertion failures elsewhere.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ V = svalBuilder.evalCast(V, expectedResultTy, actualResultTy);
+ C.addTransition(state->BindExpr(CE, LCtx, V));
+ }
+}
+
+void ento::registerAdjustedReturnValueChecker(CheckerManager &mgr) {
+ mgr.registerChecker<AdjustedReturnValueChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
new file mode 100644
index 0000000..aa6f97b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AnalyzerStatsChecker.cpp
@@ -0,0 +1,140 @@
+//==--AnalyzerStatsChecker.cpp - Analyzer visitation statistics --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file reports various statistics about analyzer visitation.
+//===----------------------------------------------------------------------===//
+#define DEBUG_TYPE "StatsChecker"
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/Statistic.h"
+
+using namespace clang;
+using namespace ento;
+
+STATISTIC(NumBlocks,
+ "The # of blocks in top level functions");
+STATISTIC(NumBlocksUnreachable,
+ "The # of unreachable blocks in analyzing top level functions");
+
+namespace {
+class AnalyzerStatsChecker : public Checker<check::EndAnalysis> {
+public:
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const;
+};
+}
+
+void AnalyzerStatsChecker::checkEndAnalysis(ExplodedGraph &G,
+ BugReporter &B,
+ ExprEngine &Eng) const {
+ const CFG *C = 0;
+ const SourceManager &SM = B.getSourceManager();
+ llvm::SmallPtrSet<const CFGBlock*, 256> reachable;
+
+ // Root node should have the location context of the top most function.
+ const ExplodedNode *GraphRoot = *G.roots_begin();
+ const LocationContext *LC = GraphRoot->getLocation().getLocationContext();
+
+ const Decl *D = LC->getDecl();
+
+ // Iterate over the exploded graph.
+ for (ExplodedGraph::node_iterator I = G.nodes_begin();
+ I != G.nodes_end(); ++I) {
+ const ProgramPoint &P = I->getLocation();
+
+ // Only check the coverage in the top level function (optimization).
+ if (D != P.getLocationContext()->getDecl())
+ continue;
+
+ if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
+ const CFGBlock *CB = BE->getBlock();
+ reachable.insert(CB);
+ }
+ }
+
+ // Get the CFG and the Decl of this block.
+ C = LC->getCFG();
+
+ unsigned total = 0, unreachable = 0;
+
+ // Find CFGBlocks that were not covered by any node
+ for (CFG::const_iterator I = C->begin(); I != C->end(); ++I) {
+ const CFGBlock *CB = *I;
+ ++total;
+ // Check if the block is unreachable
+ if (!reachable.count(CB)) {
+ ++unreachable;
+ }
+ }
+
+ // We never 'reach' the entry block, so correct the unreachable count
+ unreachable--;
+ // There is no BlockEntrance corresponding to the exit block as well, so
+ // assume it is reached as well.
+ unreachable--;
+
+ // Generate the warning string
+ SmallString<128> buf;
+ llvm::raw_svector_ostream output(buf);
+ PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
+ if (!Loc.isValid())
+ return;
+
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ output << *ND;
+ }
+ else if (isa<BlockDecl>(D)) {
+ output << "block(line:" << Loc.getLine() << ":col:" << Loc.getColumn();
+ }
+
+ NumBlocksUnreachable += unreachable;
+ NumBlocks += total;
+ std::string NameOfRootFunction = output.str();
+
+ output << " -> Total CFGBlocks: " << total << " | Unreachable CFGBlocks: "
+ << unreachable << " | Exhausted Block: "
+ << (Eng.wasBlocksExhausted() ? "yes" : "no")
+ << " | Empty WorkList: "
+ << (Eng.hasEmptyWorkList() ? "yes" : "no");
+
+ B.EmitBasicReport(D, "Analyzer Statistics", "Internal Statistics",
+ output.str(), PathDiagnosticLocation(D, SM));
+
+ // Emit warning for each block we bailed out on.
+ typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
+ const CoreEngine &CE = Eng.getCoreEngine();
+ for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
+ E = CE.blocks_exhausted_end(); I != E; ++I) {
+ const BlockEdge &BE = I->first;
+ const CFGBlock *Exit = BE.getDst();
+ const CFGElement &CE = Exit->front();
+ if (const CFGStmt *CS = dyn_cast<CFGStmt>(&CE)) {
+ SmallString<128> bufI;
+ llvm::raw_svector_ostream outputI(bufI);
+ outputI << "(" << NameOfRootFunction << ")" <<
+ ": The analyzer generated a sink at this point";
+ B.EmitBasicReport(D, "Sink Point", "Internal Statistics", outputI.str(),
+ PathDiagnosticLocation::createBegin(CS->getStmt(),
+ SM, LC));
+ }
+ }
+}
+
+void ento::registerAnalyzerStatsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<AnalyzerStatsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
new file mode 100644
index 0000000..b2ad184
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -0,0 +1,92 @@
+//== ArrayBoundChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ArrayBoundChecker, which is a path-sensitive check
+// which looks for an out-of-bound array element access.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ArrayBoundChecker :
+ public Checker<check::Location> {
+ mutable OwningPtr<BuiltinBug> BT;
+public:
+ void checkLocation(SVal l, bool isLoad, const Stmt* S,
+ CheckerContext &C) const;
+};
+}
+
+void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
+ CheckerContext &C) const {
+ // Check for out of bound array element access.
+ const MemRegion *R = l.getAsRegion();
+ if (!R)
+ return;
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return;
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+
+ // Zero index is always in bound, this also passes ElementRegions created for
+ // pointer casts.
+ if (Idx.isZeroConstant())
+ return;
+
+ ProgramStateRef state = C.getState();
+
+ // Get the size of the array.
+ DefinedOrUnknownSVal NumElements
+ = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
+ ER->getValueType());
+
+ ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateSink(StOutBound);
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug("Out-of-bound array access",
+ "Access out-of-bound array element (buffer overflow)"));
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ // Generate a report for this bug.
+ BugReport *report =
+ new BugReport(*BT, BT->getDescription(), N);
+
+ report->addRange(LoadS->getSourceRange());
+ C.EmitReport(report);
+ return;
+ }
+
+ // Array bound check succeeded. From this point forward the array bound
+ // should always succeed.
+ C.addTransition(StInBound);
+}
+
+void ento::registerArrayBoundChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ArrayBoundChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
new file mode 100644
index 0000000..c6efe94
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -0,0 +1,318 @@
+//== ArrayBoundCheckerV2.cpp ------------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ArrayBoundCheckerV2, which is a path-sensitive check
+// which looks for an out-of-bound array element access.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ArrayBoundCheckerV2 :
+ public Checker<check::Location> {
+ mutable OwningPtr<BuiltinBug> BT;
+
+ enum OOB_Kind { OOB_Precedes, OOB_Excedes, OOB_Tainted };
+
+ void reportOOB(CheckerContext &C, ProgramStateRef errorState,
+ OOB_Kind kind) const;
+
+public:
+ void checkLocation(SVal l, bool isLoad, const Stmt*S,
+ CheckerContext &C) const;
+};
+
+// FIXME: Eventually replace RegionRawOffset with this class.
+class RegionRawOffsetV2 {
+private:
+ const SubRegion *baseRegion;
+ SVal byteOffset;
+
+ RegionRawOffsetV2()
+ : baseRegion(0), byteOffset(UnknownVal()) {}
+
+public:
+ RegionRawOffsetV2(const SubRegion* base, SVal offset)
+ : baseRegion(base), byteOffset(offset) {}
+
+ NonLoc getByteOffset() const { return cast<NonLoc>(byteOffset); }
+ const SubRegion *getRegion() const { return baseRegion; }
+
+ static RegionRawOffsetV2 computeOffset(ProgramStateRef state,
+ SValBuilder &svalBuilder,
+ SVal location);
+
+ void dump() const;
+ void dumpToStream(raw_ostream &os) const;
+};
+}
+
+static SVal computeExtentBegin(SValBuilder &svalBuilder,
+ const MemRegion *region) {
+ while (true)
+ switch (region->getKind()) {
+ default:
+ return svalBuilder.makeZeroArrayIndex();
+ case MemRegion::SymbolicRegionKind:
+ // FIXME: improve this later by tracking symbolic lower bounds
+ // for symbolic regions.
+ return UnknownVal();
+ case MemRegion::ElementRegionKind:
+ region = cast<SubRegion>(region)->getSuperRegion();
+ continue;
+ }
+}
+
+void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
+ const Stmt* LoadS,
+ CheckerContext &checkerContext) const {
+
+ // NOTE: Instead of using ProgramState::assumeInBound(), we are prototyping
+ // some new logic here that reasons directly about memory region extents.
+ // Once that logic is more mature, we can bring it back to assumeInBound()
+ // for all clients to use.
+ //
+ // The algorithm we are using here for bounds checking is to see if the
+ // memory access is within the extent of the base region. Since we
+ // have some flexibility in defining the base region, we can achieve
+ // various levels of conservatism in our buffer overflow checking.
+ ProgramStateRef state = checkerContext.getState();
+ ProgramStateRef originalState = state;
+
+ SValBuilder &svalBuilder = checkerContext.getSValBuilder();
+ const RegionRawOffsetV2 &rawOffset =
+ RegionRawOffsetV2::computeOffset(state, svalBuilder, location);
+
+ if (!rawOffset.getRegion())
+ return;
+
+ // CHECK LOWER BOUND: Is byteOffset < extent begin?
+ // If so, we are doing a load/store
+ // before the first valid offset in the memory region.
+
+ SVal extentBegin = computeExtentBegin(svalBuilder, rawOffset.getRegion());
+
+ if (isa<NonLoc>(extentBegin)) {
+ SVal lowerBound
+ = svalBuilder.evalBinOpNN(state, BO_LT, rawOffset.getByteOffset(),
+ cast<NonLoc>(extentBegin),
+ svalBuilder.getConditionType());
+
+ NonLoc *lowerBoundToCheck = dyn_cast<NonLoc>(&lowerBound);
+ if (!lowerBoundToCheck)
+ return;
+
+ ProgramStateRef state_precedesLowerBound, state_withinLowerBound;
+ llvm::tie(state_precedesLowerBound, state_withinLowerBound) =
+ state->assume(*lowerBoundToCheck);
+
+ // Are we constrained enough to definitely precede the lower bound?
+ if (state_precedesLowerBound && !state_withinLowerBound) {
+ reportOOB(checkerContext, state_precedesLowerBound, OOB_Precedes);
+ return;
+ }
+
+ // Otherwise, assume the constraint of the lower bound.
+ assert(state_withinLowerBound);
+ state = state_withinLowerBound;
+ }
+
+ do {
+ // CHECK UPPER BOUND: Is byteOffset >= extent(baseRegion)? If so,
+ // we are doing a load/store after the last valid offset.
+ DefinedOrUnknownSVal extentVal =
+ rawOffset.getRegion()->getExtent(svalBuilder);
+ if (!isa<NonLoc>(extentVal))
+ break;
+
+ SVal upperbound
+ = svalBuilder.evalBinOpNN(state, BO_GE, rawOffset.getByteOffset(),
+ cast<NonLoc>(extentVal),
+ svalBuilder.getConditionType());
+
+ NonLoc *upperboundToCheck = dyn_cast<NonLoc>(&upperbound);
+ if (!upperboundToCheck)
+ break;
+
+ ProgramStateRef state_exceedsUpperBound, state_withinUpperBound;
+ llvm::tie(state_exceedsUpperBound, state_withinUpperBound) =
+ state->assume(*upperboundToCheck);
+
+ // If we are under constrained and the index variables are tainted, report.
+ if (state_exceedsUpperBound && state_withinUpperBound) {
+ if (state->isTainted(rawOffset.getByteOffset()))
+ reportOOB(checkerContext, state_exceedsUpperBound, OOB_Tainted);
+ return;
+ }
+
+ // If we are constrained enough to definitely exceed the upper bound, report.
+ if (state_exceedsUpperBound) {
+ assert(!state_withinUpperBound);
+ reportOOB(checkerContext, state_exceedsUpperBound, OOB_Excedes);
+ return;
+ }
+
+ assert(state_withinUpperBound);
+ state = state_withinUpperBound;
+ }
+ while (false);
+
+ if (state != originalState)
+ checkerContext.addTransition(state);
+}
+
+void ArrayBoundCheckerV2::reportOOB(CheckerContext &checkerContext,
+ ProgramStateRef errorState,
+ OOB_Kind kind) const {
+
+ ExplodedNode *errorNode = checkerContext.generateSink(errorState);
+ if (!errorNode)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug("Out-of-bound access"));
+
+ // FIXME: This diagnostics are preliminary. We should get far better
+ // diagnostics for explaining buffer overruns.
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Out of bound memory access ";
+ switch (kind) {
+ case OOB_Precedes:
+ os << "(accessed memory precedes memory block)";
+ break;
+ case OOB_Excedes:
+ os << "(access exceeds upper limit of memory block)";
+ break;
+ case OOB_Tainted:
+ os << "(index is tainted)";
+ break;
+ }
+
+ checkerContext.EmitReport(new BugReport(*BT, os.str(), errorNode));
+}
+
+void RegionRawOffsetV2::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+void RegionRawOffsetV2::dumpToStream(raw_ostream &os) const {
+ os << "raw_offset_v2{" << getRegion() << ',' << getByteOffset() << '}';
+}
+
+// FIXME: Merge with the implementation of the same method in Store.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition())
+ return false;
+ }
+
+ return true;
+}
+
+
+// Lazily computes a value to be used by 'computeOffset'. If 'val'
+// is unknown or undefined, we lazily substitute '0'. Otherwise,
+// return 'val'.
+static inline SVal getValue(SVal val, SValBuilder &svalBuilder) {
+ return isa<UndefinedVal>(val) ? svalBuilder.makeArrayIndex(0) : val;
+}
+
+// Scale a base value by a scaling factor, and return the scaled
+// value as an SVal. Used by 'computeOffset'.
+static inline SVal scaleValue(ProgramStateRef state,
+ NonLoc baseVal, CharUnits scaling,
+ SValBuilder &sb) {
+ return sb.evalBinOpNN(state, BO_Mul, baseVal,
+ sb.makeArrayIndex(scaling.getQuantity()),
+ sb.getArrayIndexType());
+}
+
+// Add an SVal to another, treating unknown and undefined values as
+// summing to UnknownVal. Used by 'computeOffset'.
+static SVal addValue(ProgramStateRef state, SVal x, SVal y,
+ SValBuilder &svalBuilder) {
+ // We treat UnknownVals and UndefinedVals the same here because we
+ // only care about computing offsets.
+ if (x.isUnknownOrUndef() || y.isUnknownOrUndef())
+ return UnknownVal();
+
+ return svalBuilder.evalBinOpNN(state, BO_Add,
+ cast<NonLoc>(x), cast<NonLoc>(y),
+ svalBuilder.getArrayIndexType());
+}
+
+/// Compute a raw byte offset from a base region. Used for array bounds
+/// checking.
+RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
+ SValBuilder &svalBuilder,
+ SVal location)
+{
+ const MemRegion *region = location.getAsRegion();
+ SVal offset = UndefinedVal();
+
+ while (region) {
+ switch (region->getKind()) {
+ default: {
+ if (const SubRegion *subReg = dyn_cast<SubRegion>(region)) {
+ offset = getValue(offset, svalBuilder);
+ if (!offset.isUnknownOrUndef())
+ return RegionRawOffsetV2(subReg, offset);
+ }
+ return RegionRawOffsetV2();
+ }
+ case MemRegion::ElementRegionKind: {
+ const ElementRegion *elemReg = cast<ElementRegion>(region);
+ SVal index = elemReg->getIndex();
+ if (!isa<NonLoc>(index))
+ return RegionRawOffsetV2();
+ QualType elemType = elemReg->getElementType();
+ // If the element is an incomplete type, go no further.
+ ASTContext &astContext = svalBuilder.getContext();
+ if (!IsCompleteType(astContext, elemType))
+ return RegionRawOffsetV2();
+
+ // Update the offset.
+ offset = addValue(state,
+ getValue(offset, svalBuilder),
+ scaleValue(state,
+ cast<NonLoc>(index),
+ astContext.getTypeSizeInChars(elemType),
+ svalBuilder),
+ svalBuilder);
+
+ if (offset.isUnknownOrUndef())
+ return RegionRawOffsetV2();
+
+ region = elemReg->getSuperRegion();
+ continue;
+ }
+ }
+ }
+ return RegionRawOffsetV2();
+}
+
+
+void ento::registerArrayBoundCheckerV2(CheckerManager &mgr) {
+ mgr.registerChecker<ArrayBoundCheckerV2>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
new file mode 100644
index 0000000..ab66e98
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/AttrNonNullChecker.cpp
@@ -0,0 +1,134 @@
+//===--- AttrNonNullChecker.h - Undefined arguments checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines AttrNonNullChecker, a builtin check in ExprEngine that
+// performs checks for arguments declared to have nonnull attribute.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class AttrNonNullChecker
+ : public Checker< check::PreStmt<CallExpr> > {
+ mutable OwningPtr<BugType> BT;
+public:
+
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void AttrNonNullChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // Check if the callee has a 'nonnull' attribute.
+ SVal X = state->getSVal(CE->getCallee(), LCtx);
+
+ const FunctionDecl *FD = X.getAsFunctionDecl();
+ if (!FD)
+ return;
+
+ const NonNullAttr* Att = FD->getAttr<NonNullAttr>();
+ if (!Att)
+ return;
+
+ // Iterate through the arguments of CE and check them for null.
+ unsigned idx = 0;
+
+ for (CallExpr::const_arg_iterator I=CE->arg_begin(), E=CE->arg_end(); I!=E;
+ ++I, ++idx) {
+
+ if (!Att->isNonNull(idx))
+ continue;
+
+ SVal V = state->getSVal(*I, LCtx);
+ DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
+
+ // If the value is unknown or undefined, we can't perform this check.
+ if (!DV)
+ continue;
+
+ if (!isa<Loc>(*DV)) {
+ // If the argument is a union type, we want to handle a potential
+ // transparent_unoin GCC extension.
+ QualType T = (*I)->getType();
+ const RecordType *UT = T->getAsUnionType();
+ if (!UT || !UT->getDecl()->hasAttr<TransparentUnionAttr>())
+ continue;
+ if (nonloc::CompoundVal *CSV = dyn_cast<nonloc::CompoundVal>(DV)) {
+ nonloc::CompoundVal::iterator CSV_I = CSV->begin();
+ assert(CSV_I != CSV->end());
+ V = *CSV_I;
+ DV = dyn_cast<DefinedSVal>(&V);
+ assert(++CSV_I == CSV->end());
+ if (!DV)
+ continue;
+ }
+ else {
+ // FIXME: Handle LazyCompoundVals?
+ continue;
+ }
+ }
+
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef stateNotNull, stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
+
+ if (stateNull && !stateNotNull) {
+ // Generate an error node. Check for a null node in case
+ // we cache out.
+ if (ExplodedNode *errorNode = C.generateSink(stateNull)) {
+
+ // Lazily allocate the BugType object if it hasn't already been
+ // created. Ownership is transferred to the BugReporter object once
+ // the BugReport is passed to 'EmitWarning'.
+ if (!BT)
+ BT.reset(new BugType("Argument with 'nonnull' attribute passed null",
+ "API"));
+
+ BugReport *R =
+ new BugReport(*BT, "Null pointer passed as an argument to a "
+ "'nonnull' parameter", errorNode);
+
+ // Highlight the range of the argument that was null.
+ const Expr *arg = *I;
+ R->addRange(arg->getSourceRange());
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(errorNode,
+ arg, R));
+ // Emit the bug report.
+ C.EmitReport(R);
+ }
+
+ // Always return. Either we cached out or we just emitted an error.
+ return;
+ }
+
+ // If a pointer value passed the check we should assume that it is
+ // indeed not null from this point forward.
+ assert(stateNotNull);
+ state = stateNotNull;
+ }
+
+ // If we reach here all of the arguments passed the nonnull check.
+ // If 'state' has been updated generated a new node.
+ C.addTransition(state);
+}
+
+void ento::registerAttrNonNullChecker(CheckerManager &mgr) {
+ mgr.registerChecker<AttrNonNullChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
new file mode 100644
index 0000000..6dd0a8c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -0,0 +1,672 @@
+//== BasicObjCFoundationChecks.cpp - Simple Apple-Foundation checks -*- C++ -*--
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicObjCFoundationChecks, a class that encapsulates
+// a set of simple checks to run on Objective-C code using Apple's Foundation
+// classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class APIMisuse : public BugType {
+public:
+ APIMisuse(const char* name) : BugType(name, "API Misuse (Apple)") {}
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static const char* GetReceiverNameType(const ObjCMessage &msg) {
+ if (const ObjCInterfaceDecl *ID = msg.getReceiverInterface())
+ return ID->getIdentifier()->getNameStart();
+ return 0;
+}
+
+static bool isReceiverClassOrSuperclass(const ObjCInterfaceDecl *ID,
+ StringRef ClassName) {
+ if (ID->getIdentifier()->getName() == ClassName)
+ return true;
+
+ if (const ObjCInterfaceDecl *Super = ID->getSuperClass())
+ return isReceiverClassOrSuperclass(Super, ClassName);
+
+ return false;
+}
+
+static inline bool isNil(SVal X) {
+ return isa<loc::ConcreteInt>(X);
+}
+
+//===----------------------------------------------------------------------===//
+// NilArgChecker - Check for prohibited nil arguments to ObjC method calls.
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class NilArgChecker : public Checker<check::PreObjCMessage> {
+ mutable OwningPtr<APIMisuse> BT;
+
+ void WarnNilArg(CheckerContext &C,
+ const ObjCMessage &msg, unsigned Arg) const;
+
+ public:
+ void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ };
+}
+
+void NilArgChecker::WarnNilArg(CheckerContext &C,
+ const ObjCMessage &msg,
+ unsigned int Arg) const
+{
+ if (!BT)
+ BT.reset(new APIMisuse("nil argument"));
+
+ if (ExplodedNode *N = C.generateSink()) {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << "Argument to '" << GetReceiverNameType(msg) << "' method '"
+ << msg.getSelector().getAsString() << "' cannot be nil";
+
+ BugReport *R = new BugReport(*BT, os.str(), N);
+ R->addRange(msg.getArgSourceRange(Arg));
+ C.EmitReport(R);
+ }
+}
+
+void NilArgChecker::checkPreObjCMessage(ObjCMessage msg,
+ CheckerContext &C) const {
+ const ObjCInterfaceDecl *ID = msg.getReceiverInterface();
+ if (!ID)
+ return;
+
+ if (isReceiverClassOrSuperclass(ID, "NSString")) {
+ Selector S = msg.getSelector();
+
+ if (S.isUnarySelector())
+ return;
+
+ // FIXME: This is going to be really slow doing these checks with
+ // lexical comparisons.
+
+ std::string NameStr = S.getAsString();
+ StringRef Name(NameStr);
+ assert(!Name.empty());
+
+ // FIXME: Checking for initWithFormat: will not work in most cases
+ // yet because [NSString alloc] returns id, not NSString*. We will
+ // need support for tracking expected-type information in the analyzer
+ // to find these errors.
+ if (Name == "caseInsensitiveCompare:" ||
+ Name == "compare:" ||
+ Name == "compare:options:" ||
+ Name == "compare:options:range:" ||
+ Name == "compare:options:range:locale:" ||
+ Name == "componentsSeparatedByCharactersInSet:" ||
+ Name == "initWithFormat:") {
+ if (isNil(msg.getArgSVal(0, C.getLocationContext(), C.getState())))
+ WarnNilArg(C, msg, 0);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFNumberCreateChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable OwningPtr<APIMisuse> BT;
+ mutable IdentifierInfo* II;
+public:
+ CFNumberCreateChecker() : II(0) {}
+
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ void EmitError(const TypedRegion* R, const Expr *Ex,
+ uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
+};
+} // end anonymous namespace
+
+enum CFNumberType {
+ kCFNumberSInt8Type = 1,
+ kCFNumberSInt16Type = 2,
+ kCFNumberSInt32Type = 3,
+ kCFNumberSInt64Type = 4,
+ kCFNumberFloat32Type = 5,
+ kCFNumberFloat64Type = 6,
+ kCFNumberCharType = 7,
+ kCFNumberShortType = 8,
+ kCFNumberIntType = 9,
+ kCFNumberLongType = 10,
+ kCFNumberLongLongType = 11,
+ kCFNumberFloatType = 12,
+ kCFNumberDoubleType = 13,
+ kCFNumberCFIndexType = 14,
+ kCFNumberNSIntegerType = 15,
+ kCFNumberCGFloatType = 16
+};
+
+namespace {
+ template<typename T>
+ class Optional {
+ bool IsKnown;
+ T Val;
+ public:
+ Optional() : IsKnown(false), Val(0) {}
+ Optional(const T& val) : IsKnown(true), Val(val) {}
+
+ bool isKnown() const { return IsKnown; }
+
+ const T& getValue() const {
+ assert (isKnown());
+ return Val;
+ }
+
+ operator const T&() const {
+ return getValue();
+ }
+ };
+}
+
+static Optional<uint64_t> GetCFNumberSize(ASTContext &Ctx, uint64_t i) {
+ static const unsigned char FixedSize[] = { 8, 16, 32, 64, 32, 64 };
+
+ if (i < kCFNumberCharType)
+ return FixedSize[i-1];
+
+ QualType T;
+
+ switch (i) {
+ case kCFNumberCharType: T = Ctx.CharTy; break;
+ case kCFNumberShortType: T = Ctx.ShortTy; break;
+ case kCFNumberIntType: T = Ctx.IntTy; break;
+ case kCFNumberLongType: T = Ctx.LongTy; break;
+ case kCFNumberLongLongType: T = Ctx.LongLongTy; break;
+ case kCFNumberFloatType: T = Ctx.FloatTy; break;
+ case kCFNumberDoubleType: T = Ctx.DoubleTy; break;
+ case kCFNumberCFIndexType:
+ case kCFNumberNSIntegerType:
+ case kCFNumberCGFloatType:
+ // FIXME: We need a way to map from names to Type*.
+ default:
+ return Optional<uint64_t>();
+ }
+
+ return Ctx.getTypeSize(T);
+}
+
+#if 0
+static const char* GetCFNumberTypeStr(uint64_t i) {
+ static const char* Names[] = {
+ "kCFNumberSInt8Type",
+ "kCFNumberSInt16Type",
+ "kCFNumberSInt32Type",
+ "kCFNumberSInt64Type",
+ "kCFNumberFloat32Type",
+ "kCFNumberFloat64Type",
+ "kCFNumberCharType",
+ "kCFNumberShortType",
+ "kCFNumberIntType",
+ "kCFNumberLongType",
+ "kCFNumberLongLongType",
+ "kCFNumberFloatType",
+ "kCFNumberDoubleType",
+ "kCFNumberCFIndexType",
+ "kCFNumberNSIntegerType",
+ "kCFNumberCGFloatType"
+ };
+
+ return i <= kCFNumberCGFloatType ? Names[i-1] : "Invalid CFNumberType";
+}
+#endif
+
+void CFNumberCreateChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II)
+ II = &Ctx.Idents.get("CFNumberCreate");
+
+ if (FD->getIdentifier() != II || CE->getNumArgs() != 3)
+ return;
+
+ // Get the value of the "theType" argument.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal TheTypeVal = state->getSVal(CE->getArg(1), LCtx);
+
+ // FIXME: We really should allow ranges of valid theType values, and
+ // bifurcate the state appropriately.
+ nonloc::ConcreteInt* V = dyn_cast<nonloc::ConcreteInt>(&TheTypeVal);
+ if (!V)
+ return;
+
+ uint64_t NumberKind = V->getValue().getLimitedValue();
+ Optional<uint64_t> TargetSize = GetCFNumberSize(Ctx, NumberKind);
+
+ // FIXME: In some cases we can emit an error.
+ if (!TargetSize.isKnown())
+ return;
+
+ // Look at the value of the integer being passed by reference. Essentially
+ // we want to catch cases where the value passed in is not equal to the
+ // size of the type being created.
+ SVal TheValueExpr = state->getSVal(CE->getArg(2), LCtx);
+
+ // FIXME: Eventually we should handle arbitrary locations. We can do this
+ // by having an enhanced memory model that does low-level typing.
+ loc::MemRegionVal* LV = dyn_cast<loc::MemRegionVal>(&TheValueExpr);
+ if (!LV)
+ return;
+
+ const TypedValueRegion* R = dyn_cast<TypedValueRegion>(LV->stripCasts());
+ if (!R)
+ return;
+
+ QualType T = Ctx.getCanonicalType(R->getValueType());
+
+ // FIXME: If the pointee isn't an integer type, should we flag a warning?
+ // People can do weird stuff with pointers.
+
+ if (!T->isIntegerType())
+ return;
+
+ uint64_t SourceSize = Ctx.getTypeSize(T);
+
+ // CHECK: is SourceSize == TargetSize
+ if (SourceSize == TargetSize)
+ return;
+
+ // Generate an error. Only generate a sink if 'SourceSize < TargetSize';
+ // otherwise generate a regular node.
+ //
+ // FIXME: We can actually create an abstract "CFNumber" object that has
+ // the bits initialized to the provided values.
+ //
+ if (ExplodedNode *N = SourceSize < TargetSize ? C.generateSink()
+ : C.addTransition()) {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ os << (SourceSize == 8 ? "An " : "A ")
+ << SourceSize << " bit integer is used to initialize a CFNumber "
+ "object that represents "
+ << (TargetSize == 8 ? "an " : "a ")
+ << TargetSize << " bit integer. ";
+
+ if (SourceSize < TargetSize)
+ os << (TargetSize - SourceSize)
+ << " bits of the CFNumber value will be garbage." ;
+ else
+ os << (SourceSize - TargetSize)
+ << " bits of the input integer will be lost.";
+
+ if (!BT)
+ BT.reset(new APIMisuse("Bad use of CFNumberCreate"));
+
+ BugReport *report = new BugReport(*BT, os.str(), N);
+ report->addRange(CE->getArg(2)->getSourceRange());
+ C.EmitReport(report);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// CFRetain/CFRelease checking for null arguments.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFRetainReleaseChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable OwningPtr<APIMisuse> BT;
+ mutable IdentifierInfo *Retain, *Release;
+public:
+ CFRetainReleaseChecker(): Retain(0), Release(0) {}
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+
+void CFRetainReleaseChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ // If the CallExpr doesn't have exactly 1 argument just give up checking.
+ if (CE->getNumArgs() != 1)
+ return;
+
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ if (!BT) {
+ ASTContext &Ctx = C.getASTContext();
+ Retain = &Ctx.Idents.get("CFRetain");
+ Release = &Ctx.Idents.get("CFRelease");
+ BT.reset(new APIMisuse("null passed to CFRetain/CFRelease"));
+ }
+
+ // Check if we called CFRetain/CFRelease.
+ const IdentifierInfo *FuncII = FD->getIdentifier();
+ if (!(FuncII == Retain || FuncII == Release))
+ return;
+
+ // FIXME: The rest of this just checks that the argument is non-null.
+ // It should probably be refactored and combined with AttrNonNullChecker.
+
+ // Get the argument's value.
+ const Expr *Arg = CE->getArg(0);
+ SVal ArgVal = state->getSVal(Arg, C.getLocationContext());
+ DefinedSVal *DefArgVal = dyn_cast<DefinedSVal>(&ArgVal);
+ if (!DefArgVal)
+ return;
+
+ // Get a NULL value.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedSVal zero = cast<DefinedSVal>(svalBuilder.makeZeroVal(Arg->getType()));
+
+ // Make an expression asserting that they're equal.
+ DefinedOrUnknownSVal ArgIsNull = svalBuilder.evalEQ(state, zero, *DefArgVal);
+
+ // Are they equal?
+ ProgramStateRef stateTrue, stateFalse;
+ llvm::tie(stateTrue, stateFalse) = state->assume(ArgIsNull);
+
+ if (stateTrue && !stateFalse) {
+ ExplodedNode *N = C.generateSink(stateTrue);
+ if (!N)
+ return;
+
+ const char *description = (FuncII == Retain)
+ ? "Null pointer argument in call to CFRetain"
+ : "Null pointer argument in call to CFRelease";
+
+ BugReport *report = new BugReport(*BT, description, N);
+ report->addRange(Arg->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Arg,
+ report));
+ C.EmitReport(report);
+ return;
+ }
+
+ // From here on, we know the argument is non-null.
+ C.addTransition(stateFalse);
+}
+
+//===----------------------------------------------------------------------===//
+// Check for sending 'retain', 'release', or 'autorelease' directly to a Class.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ClassReleaseChecker : public Checker<check::PreObjCMessage> {
+ mutable Selector releaseS;
+ mutable Selector retainS;
+ mutable Selector autoreleaseS;
+ mutable Selector drainS;
+ mutable OwningPtr<BugType> BT;
+
+public:
+ void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+};
+}
+
+void ClassReleaseChecker::checkPreObjCMessage(ObjCMessage msg,
+ CheckerContext &C) const {
+
+ if (!BT) {
+ BT.reset(new APIMisuse("message incorrectly sent to class instead of class "
+ "instance"));
+
+ ASTContext &Ctx = C.getASTContext();
+ releaseS = GetNullarySelector("release", Ctx);
+ retainS = GetNullarySelector("retain", Ctx);
+ autoreleaseS = GetNullarySelector("autorelease", Ctx);
+ drainS = GetNullarySelector("drain", Ctx);
+ }
+
+ if (msg.isInstanceMessage())
+ return;
+ const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
+ assert(Class);
+
+ Selector S = msg.getSelector();
+ if (!(S == releaseS || S == retainS || S == autoreleaseS || S == drainS))
+ return;
+
+ if (ExplodedNode *N = C.addTransition()) {
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "The '" << S.getAsString() << "' message should be sent to instances "
+ "of class '" << Class->getName()
+ << "' and not the class directly";
+
+ BugReport *report = new BugReport(*BT, os.str(), N);
+ report->addRange(msg.getSourceRange());
+ C.EmitReport(report);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Check for passing non-Objective-C types to variadic methods that expect
+// only Objective-C types.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VariadicMethodTypeChecker : public Checker<check::PreObjCMessage> {
+ mutable Selector arrayWithObjectsS;
+ mutable Selector dictionaryWithObjectsAndKeysS;
+ mutable Selector setWithObjectsS;
+ mutable Selector orderedSetWithObjectsS;
+ mutable Selector initWithObjectsS;
+ mutable Selector initWithObjectsAndKeysS;
+ mutable OwningPtr<BugType> BT;
+
+ bool isVariadicMessage(const ObjCMessage &msg) const;
+
+public:
+ void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+};
+}
+
+/// isVariadicMessage - Returns whether the given message is a variadic message,
+/// where all arguments must be Objective-C types.
+bool
+VariadicMethodTypeChecker::isVariadicMessage(const ObjCMessage &msg) const {
+ const ObjCMethodDecl *MD = msg.getMethodDecl();
+
+ if (!MD || !MD->isVariadic() || isa<ObjCProtocolDecl>(MD->getDeclContext()))
+ return false;
+
+ Selector S = msg.getSelector();
+
+ if (msg.isInstanceMessage()) {
+ // FIXME: Ideally we'd look at the receiver interface here, but that's not
+ // useful for init, because alloc returns 'id'. In theory, this could lead
+ // to false positives, for example if there existed a class that had an
+ // initWithObjects: implementation that does accept non-Objective-C pointer
+ // types, but the chance of that happening is pretty small compared to the
+ // gains that this analysis gives.
+ const ObjCInterfaceDecl *Class = MD->getClassInterface();
+
+ // -[NSArray initWithObjects:]
+ if (isReceiverClassOrSuperclass(Class, "NSArray") &&
+ S == initWithObjectsS)
+ return true;
+
+ // -[NSDictionary initWithObjectsAndKeys:]
+ if (isReceiverClassOrSuperclass(Class, "NSDictionary") &&
+ S == initWithObjectsAndKeysS)
+ return true;
+
+ // -[NSSet initWithObjects:]
+ if (isReceiverClassOrSuperclass(Class, "NSSet") &&
+ S == initWithObjectsS)
+ return true;
+
+ // -[NSOrderedSet initWithObjects:]
+ if (isReceiverClassOrSuperclass(Class, "NSOrderedSet") &&
+ S == initWithObjectsS)
+ return true;
+ } else {
+ const ObjCInterfaceDecl *Class = msg.getReceiverInterface();
+
+ // -[NSArray arrayWithObjects:]
+ if (isReceiverClassOrSuperclass(Class, "NSArray") &&
+ S == arrayWithObjectsS)
+ return true;
+
+ // -[NSDictionary dictionaryWithObjectsAndKeys:]
+ if (isReceiverClassOrSuperclass(Class, "NSDictionary") &&
+ S == dictionaryWithObjectsAndKeysS)
+ return true;
+
+ // -[NSSet setWithObjects:]
+ if (isReceiverClassOrSuperclass(Class, "NSSet") &&
+ S == setWithObjectsS)
+ return true;
+
+ // -[NSOrderedSet orderedSetWithObjects:]
+ if (isReceiverClassOrSuperclass(Class, "NSOrderedSet") &&
+ S == orderedSetWithObjectsS)
+ return true;
+ }
+
+ return false;
+}
+
+void VariadicMethodTypeChecker::checkPreObjCMessage(ObjCMessage msg,
+ CheckerContext &C) const {
+ if (!BT) {
+ BT.reset(new APIMisuse("Arguments passed to variadic method aren't all "
+ "Objective-C pointer types"));
+
+ ASTContext &Ctx = C.getASTContext();
+ arrayWithObjectsS = GetUnarySelector("arrayWithObjects", Ctx);
+ dictionaryWithObjectsAndKeysS =
+ GetUnarySelector("dictionaryWithObjectsAndKeys", Ctx);
+ setWithObjectsS = GetUnarySelector("setWithObjects", Ctx);
+ orderedSetWithObjectsS = GetUnarySelector("orderedSetWithObjects", Ctx);
+
+ initWithObjectsS = GetUnarySelector("initWithObjects", Ctx);
+ initWithObjectsAndKeysS = GetUnarySelector("initWithObjectsAndKeys", Ctx);
+ }
+
+ if (!isVariadicMessage(msg))
+ return;
+
+ // We are not interested in the selector arguments since they have
+ // well-defined types, so the compiler will issue a warning for them.
+ unsigned variadicArgsBegin = msg.getSelector().getNumArgs();
+
+ // We're not interested in the last argument since it has to be nil or the
+ // compiler would have issued a warning for it elsewhere.
+ unsigned variadicArgsEnd = msg.getNumArgs() - 1;
+
+ if (variadicArgsEnd <= variadicArgsBegin)
+ return;
+
+ // Verify that all arguments have Objective-C types.
+ llvm::Optional<ExplodedNode*> errorNode;
+ ProgramStateRef state = C.getState();
+
+ for (unsigned I = variadicArgsBegin; I != variadicArgsEnd; ++I) {
+ QualType ArgTy = msg.getArgType(I);
+ if (ArgTy->isObjCObjectPointerType())
+ continue;
+
+ // Block pointers are treaded as Objective-C pointers.
+ if (ArgTy->isBlockPointerType())
+ continue;
+
+ // Ignore pointer constants.
+ if (isa<loc::ConcreteInt>(msg.getArgSVal(I, C.getLocationContext(),
+ state)))
+ continue;
+
+ // Ignore pointer types annotated with 'NSObject' attribute.
+ if (C.getASTContext().isObjCNSObjectType(ArgTy))
+ continue;
+
+ // Ignore CF references, which can be toll-free bridged.
+ if (coreFoundation::isCFObjectRef(ArgTy))
+ continue;
+
+ // Generate only one error node to use for all bug reports.
+ if (!errorNode.hasValue()) {
+ errorNode = C.addTransition();
+ }
+
+ if (!errorNode.getValue())
+ continue;
+
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ if (const char *TypeName = GetReceiverNameType(msg))
+ os << "Argument to '" << TypeName << "' method '";
+ else
+ os << "Argument to method '";
+
+ os << msg.getSelector().getAsString()
+ << "' should be an Objective-C pointer type, not '"
+ << ArgTy.getAsString() << "'";
+
+ BugReport *R = new BugReport(*BT, os.str(),
+ errorNode.getValue());
+ R->addRange(msg.getArgSourceRange(I));
+ C.EmitReport(R);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Check registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerNilArgChecker(CheckerManager &mgr) {
+ mgr.registerChecker<NilArgChecker>();
+}
+
+void ento::registerCFNumberCreateChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CFNumberCreateChecker>();
+}
+
+void ento::registerCFRetainReleaseChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CFRetainReleaseChecker>();
+}
+
+void ento::registerClassReleaseChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ClassReleaseChecker>();
+}
+
+void ento::registerVariadicMethodTypeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VariadicMethodTypeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
new file mode 100644
index 0000000..a4fc396
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -0,0 +1,157 @@
+//== BoolAssignmentChecker.cpp - Boolean assignment checker -----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines BoolAssignmentChecker, a builtin check in ExprEngine that
+// performs checks for assignment of non-Boolean values to Boolean variables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+ class BoolAssignmentChecker : public Checker< check::Bind > {
+ mutable llvm::OwningPtr<BuiltinBug> BT;
+ void emitReport(ProgramStateRef state, CheckerContext &C) const;
+ public:
+ void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ };
+} // end anonymous namespace
+
+void BoolAssignmentChecker::emitReport(ProgramStateRef state,
+ CheckerContext &C) const {
+ if (ExplodedNode *N = C.addTransition(state)) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Assignment of a non-Boolean value"));
+ C.EmitReport(new BugReport(*BT, BT->getDescription(), N));
+ }
+}
+
+static bool isBooleanType(QualType Ty) {
+ if (Ty->isBooleanType()) // C++ or C99
+ return true;
+
+ if (const TypedefType *TT = Ty->getAs<TypedefType>())
+ return TT->getDecl()->getName() == "BOOL" || // Objective-C
+ TT->getDecl()->getName() == "_Bool" || // stdbool.h < C99
+ TT->getDecl()->getName() == "Boolean"; // MacTypes.h
+
+ return false;
+}
+
+void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+
+ // We are only interested in stores into Booleans.
+ const TypedValueRegion *TR =
+ dyn_cast_or_null<TypedValueRegion>(loc.getAsRegion());
+
+ if (!TR)
+ return;
+
+ QualType valTy = TR->getValueType();
+
+ if (!isBooleanType(valTy))
+ return;
+
+ // Get the value of the right-hand side. We only care about values
+ // that are defined (UnknownVals and UndefinedVals are handled by other
+ // checkers).
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&val);
+ if (!DV)
+ return;
+
+ // Check if the assigned value meets our criteria for correctness. It must
+ // be a value that is either 0 or 1. One way to check this is to see if
+ // the value is possibly < 0 (for a negative value) or greater than 1.
+ ProgramStateRef state = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ConstraintManager &CM = C.getConstraintManager();
+
+ // First, ensure that the value is >= 0.
+ DefinedSVal zeroVal = svalBuilder.makeIntVal(0, valTy);
+ SVal greaterThanOrEqualToZeroVal =
+ svalBuilder.evalBinOp(state, BO_GE, *DV, zeroVal,
+ svalBuilder.getConditionType());
+
+ DefinedSVal *greaterThanEqualToZero =
+ dyn_cast<DefinedSVal>(&greaterThanOrEqualToZeroVal);
+
+ if (!greaterThanEqualToZero) {
+ // The SValBuilder cannot construct a valid SVal for this condition.
+ // This means we cannot properly reason about it.
+ return;
+ }
+
+ ProgramStateRef stateLT, stateGE;
+ llvm::tie(stateGE, stateLT) = CM.assumeDual(state, *greaterThanEqualToZero);
+
+ // Is it possible for the value to be less than zero?
+ if (stateLT) {
+ // It is possible for the value to be less than zero. We only
+ // want to emit a warning, however, if that value is fully constrained.
+ // If it it possible for the value to be >= 0, then essentially the
+ // value is underconstrained and there is nothing left to be done.
+ if (!stateGE)
+ emitReport(stateLT, C);
+
+ // In either case, we are done.
+ return;
+ }
+
+ // If we reach here, it must be the case that the value is constrained
+ // to only be >= 0.
+ assert(stateGE == state);
+
+ // At this point we know that the value is >= 0.
+ // Now check to ensure that the value is <= 1.
+ DefinedSVal OneVal = svalBuilder.makeIntVal(1, valTy);
+ SVal lessThanEqToOneVal =
+ svalBuilder.evalBinOp(state, BO_LE, *DV, OneVal,
+ svalBuilder.getConditionType());
+
+ DefinedSVal *lessThanEqToOne =
+ dyn_cast<DefinedSVal>(&lessThanEqToOneVal);
+
+ if (!lessThanEqToOne) {
+ // The SValBuilder cannot construct a valid SVal for this condition.
+ // This means we cannot properly reason about it.
+ return;
+ }
+
+ ProgramStateRef stateGT, stateLE;
+ llvm::tie(stateLE, stateGT) = CM.assumeDual(state, *lessThanEqToOne);
+
+ // Is it possible for the value to be greater than one?
+ if (stateGT) {
+ // It is possible for the value to be greater than one. We only
+ // want to emit a warning, however, if that value is fully constrained.
+ // If it is possible for the value to be <= 1, then essentially the
+ // value is underconstrained and there is nothing left to be done.
+ if (!stateLE)
+ emitReport(stateGT, C);
+
+ // In either case, we are done.
+ return;
+ }
+
+ // If we reach here, it must be the case that the value is constrained
+ // to only be <= 1.
+ assert(stateLE == state);
+}
+
+void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
+ mgr.registerChecker<BoolAssignmentChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
new file mode 100644
index 0000000..509bc79
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/BuiltinFunctionChecker.cpp
@@ -0,0 +1,82 @@
+//=== BuiltinFunctionChecker.cpp --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker evaluates clang builtin functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/Basic/Builtins.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class BuiltinFunctionChecker : public Checker<eval::Call> {
+public:
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+};
+
+}
+
+bool BuiltinFunctionChecker::evalCall(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ const LocationContext *LCtx = C.getLocationContext();
+ if (!FD)
+ return false;
+
+ unsigned id = FD->getBuiltinID();
+
+ if (!id)
+ return false;
+
+ switch (id) {
+ case Builtin::BI__builtin_expect: {
+ // For __builtin_expect, just return the value of the subexpression.
+ assert (CE->arg_begin() != CE->arg_end());
+ SVal X = state->getSVal(*(CE->arg_begin()), LCtx);
+ C.addTransition(state->BindExpr(CE, LCtx, X));
+ return true;
+ }
+
+ case Builtin::BI__builtin_alloca: {
+ // FIXME: Refactor into StoreManager itself?
+ MemRegionManager& RM = C.getStoreManager().getRegionManager();
+ const AllocaRegion* R =
+ RM.getAllocaRegion(CE, C.getCurrentBlockCount(), C.getLocationContext());
+
+ // Set the extent of the region in bytes. This enables us to use the
+ // SVal of the argument directly. If we save the extent in bits, we
+ // cannot represent values like symbol*8.
+ DefinedOrUnknownSVal Size =
+ cast<DefinedOrUnknownSVal>(state->getSVal(*(CE->arg_begin()), LCtx));
+
+ SValBuilder& svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
+ DefinedOrUnknownSVal extentMatchesSizeArg =
+ svalBuilder.evalEQ(state, Extent, Size);
+ state = state->assume(extentMatchesSizeArg, true);
+
+ C.addTransition(state->BindExpr(CE, LCtx, loc::MemRegionVal(R)));
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void ento::registerBuiltinFunctionChecker(CheckerManager &mgr) {
+ mgr.registerChecker<BuiltinFunctionChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
new file mode 100644
index 0000000..9eb7edf
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -0,0 +1,1981 @@
+//= CStringChecker.cpp - Checks calls to C string functions --------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CStringChecker, which is an assortment of checks on calls
+// to functions in <string.h>.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "InterCheckerAPI.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CStringChecker : public Checker< eval::Call,
+ check::PreStmt<DeclStmt>,
+ check::LiveSymbols,
+ check::DeadSymbols,
+ check::RegionChanges
+ > {
+ mutable OwningPtr<BugType> BT_Null,
+ BT_Bounds,
+ BT_Overlap,
+ BT_NotCString,
+ BT_AdditionOverflow;
+
+ mutable const char *CurrentFunctionDescription;
+
+public:
+ /// The filter is used to filter out the diagnostics which are not enabled by
+ /// the user.
+ struct CStringChecksFilter {
+ DefaultBool CheckCStringNullArg;
+ DefaultBool CheckCStringOutOfBounds;
+ DefaultBool CheckCStringBufferOverlap;
+ DefaultBool CheckCStringNotNullTerm;
+ };
+
+ CStringChecksFilter Filter;
+
+ static void *getTag() { static int tag; return &tag; }
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
+ void checkLiveSymbols(ProgramStateRef state, SymbolReaper &SR) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const;
+
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const;
+
+ typedef void (CStringChecker::*FnCheck)(CheckerContext &,
+ const CallExpr *) const;
+
+ void evalMemcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalMempcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalMemmove(CheckerContext &C, const CallExpr *CE) const;
+ void evalBcopy(CheckerContext &C, const CallExpr *CE) const;
+ void evalCopyCommon(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *Source,
+ const Expr *Dest,
+ bool Restricted = false,
+ bool IsMempcpy = false) const;
+
+ void evalMemcmp(CheckerContext &C, const CallExpr *CE) const;
+
+ void evalstrLength(CheckerContext &C, const CallExpr *CE) const;
+ void evalstrnLength(CheckerContext &C, const CallExpr *CE) const;
+ void evalstrLengthCommon(CheckerContext &C,
+ const CallExpr *CE,
+ bool IsStrnlen = false) const;
+
+ void evalStrcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrncpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalStpcpy(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrcpyCommon(CheckerContext &C,
+ const CallExpr *CE,
+ bool returnEnd,
+ bool isBounded,
+ bool isAppending) const;
+
+ void evalStrcat(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrncat(CheckerContext &C, const CallExpr *CE) const;
+
+ void evalStrcmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrncmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrcasecmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrncasecmp(CheckerContext &C, const CallExpr *CE) const;
+ void evalStrcmpCommon(CheckerContext &C,
+ const CallExpr *CE,
+ bool isBounded = false,
+ bool ignoreCase = false) const;
+
+ // Utility methods
+ std::pair<ProgramStateRef , ProgramStateRef >
+ static assumeZero(CheckerContext &C,
+ ProgramStateRef state, SVal V, QualType Ty);
+
+ static ProgramStateRef setCStringLength(ProgramStateRef state,
+ const MemRegion *MR,
+ SVal strLength);
+ static SVal getCStringLengthForRegion(CheckerContext &C,
+ ProgramStateRef &state,
+ const Expr *Ex,
+ const MemRegion *MR,
+ bool hypothetical);
+ SVal getCStringLength(CheckerContext &C,
+ ProgramStateRef &state,
+ const Expr *Ex,
+ SVal Buf,
+ bool hypothetical = false) const;
+
+ const StringLiteral *getCStringLiteral(CheckerContext &C,
+ ProgramStateRef &state,
+ const Expr *expr,
+ SVal val) const;
+
+ static ProgramStateRef InvalidateBuffer(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Ex, SVal V);
+
+ static bool SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
+ const MemRegion *MR);
+
+ // Re-usable checks
+ ProgramStateRef checkNonNull(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *S,
+ SVal l) const;
+ ProgramStateRef CheckLocation(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *S,
+ SVal l,
+ const char *message = NULL) const;
+ ProgramStateRef CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *FirstBuf,
+ const Expr *SecondBuf,
+ const char *firstMessage = NULL,
+ const char *secondMessage = NULL,
+ bool WarnAboutSize = false) const;
+
+ ProgramStateRef CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *Buf,
+ const char *message = NULL,
+ bool WarnAboutSize = false) const {
+ // This is a convenience override.
+ return CheckBufferAccess(C, state, Size, Buf, NULL, message, NULL,
+ WarnAboutSize);
+ }
+ ProgramStateRef CheckOverlap(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *First,
+ const Expr *Second) const;
+ void emitOverlapBug(CheckerContext &C,
+ ProgramStateRef state,
+ const Stmt *First,
+ const Stmt *Second) const;
+
+ ProgramStateRef checkAdditionOverflow(CheckerContext &C,
+ ProgramStateRef state,
+ NonLoc left,
+ NonLoc right) const;
+};
+
+class CStringLength {
+public:
+ typedef llvm::ImmutableMap<const MemRegion *, SVal> EntryMap;
+};
+} //end anonymous namespace
+
+namespace clang {
+namespace ento {
+ template <>
+ struct ProgramStateTrait<CStringLength>
+ : public ProgramStatePartialTrait<CStringLength::EntryMap> {
+ static void *GDMIndex() { return CStringChecker::getTag(); }
+ };
+}
+}
+
+//===----------------------------------------------------------------------===//
+// Individual checks and utility methods.
+//===----------------------------------------------------------------------===//
+
+std::pair<ProgramStateRef , ProgramStateRef >
+CStringChecker::assumeZero(CheckerContext &C, ProgramStateRef state, SVal V,
+ QualType Ty) {
+ DefinedSVal *val = dyn_cast<DefinedSVal>(&V);
+ if (!val)
+ return std::pair<ProgramStateRef , ProgramStateRef >(state, state);
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal zero = svalBuilder.makeZeroVal(Ty);
+ return state->assume(svalBuilder.evalEQ(state, *val, zero));
+}
+
+ProgramStateRef CStringChecker::checkNonNull(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *S, SVal l) const {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ ProgramStateRef stateNull, stateNonNull;
+ llvm::tie(stateNull, stateNonNull) = assumeZero(C, state, l, S->getType());
+
+ if (stateNull && !stateNonNull) {
+ if (!Filter.CheckCStringNullArg)
+ return NULL;
+
+ ExplodedNode *N = C.generateSink(stateNull);
+ if (!N)
+ return NULL;
+
+ if (!BT_Null)
+ BT_Null.reset(new BuiltinBug("Unix API",
+ "Null pointer argument in call to byte string function"));
+
+ SmallString<80> buf;
+ llvm::raw_svector_ostream os(buf);
+ assert(CurrentFunctionDescription);
+ os << "Null pointer argument in call to " << CurrentFunctionDescription;
+
+ // Generate a report for this bug.
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null.get());
+ BugReport *report = new BugReport(*BT, os.str(), N);
+
+ report->addRange(S->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, S,
+ report));
+ C.EmitReport(report);
+ return NULL;
+ }
+
+ // From here on, assume that the value is non-null.
+ assert(stateNonNull);
+ return stateNonNull;
+}
+
+// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
+ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *S, SVal l,
+ const char *warningMsg) const {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ // Check for out of bound array element access.
+ const MemRegion *R = l.getAsRegion();
+ if (!R)
+ return state;
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return state;
+
+ assert(ER->getValueType() == C.getASTContext().CharTy &&
+ "CheckLocation should only be called with char* ElementRegions");
+
+ // Get the size of the array.
+ const SubRegion *superReg = cast<SubRegion>(ER->getSuperRegion());
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal Extent =
+ svalBuilder.convertToArrayIndex(superReg->getExtent(svalBuilder));
+ DefinedOrUnknownSVal Size = cast<DefinedOrUnknownSVal>(Extent);
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+
+ ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateSink(StOutBound);
+ if (!N)
+ return NULL;
+
+ if (!BT_Bounds) {
+ BT_Bounds.reset(new BuiltinBug("Out-of-bound array access",
+ "Byte string function accesses out-of-bound array element"));
+ }
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds.get());
+
+ // Generate a report for this bug.
+ BugReport *report;
+ if (warningMsg) {
+ report = new BugReport(*BT, warningMsg, N);
+ } else {
+ assert(CurrentFunctionDescription);
+ assert(CurrentFunctionDescription[0] != '\0');
+
+ SmallString<80> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << (char)toupper(CurrentFunctionDescription[0])
+ << &CurrentFunctionDescription[1]
+ << " accesses out-of-bound array element";
+ report = new BugReport(*BT, os.str(), N);
+ }
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ report->addRange(S->getSourceRange());
+ C.EmitReport(report);
+ return NULL;
+ }
+
+ // Array bound check succeeded. From this point forward the array bound
+ // should always succeed.
+ return StInBound;
+}
+
+ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *FirstBuf,
+ const Expr *SecondBuf,
+ const char *firstMessage,
+ const char *secondMessage,
+ bool WarnAboutSize) const {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ QualType sizeTy = Size->getType();
+ QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+
+ // Check that the first buffer is non-null.
+ SVal BufVal = state->getSVal(FirstBuf, LCtx);
+ state = checkNonNull(C, state, FirstBuf, BufVal);
+ if (!state)
+ return NULL;
+
+ // If out-of-bounds checking is turned off, skip the rest.
+ if (!Filter.CheckCStringOutOfBounds)
+ return state;
+
+ // Get the access length and make sure it is known.
+ // FIXME: This assumes the caller has already checked that the access length
+ // is positive. And that it's unsigned.
+ SVal LengthVal = state->getSVal(Size, LCtx);
+ NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
+ if (!Length)
+ return state;
+
+ // Compute the offset of the last element to be accessed: size-1.
+ NonLoc One = cast<NonLoc>(svalBuilder.makeIntVal(1, sizeTy));
+ NonLoc LastOffset = cast<NonLoc>(svalBuilder.evalBinOpNN(state, BO_Sub,
+ *Length, One, sizeTy));
+
+ // Check that the first buffer is sufficiently long.
+ SVal BufStart = svalBuilder.evalCast(BufVal, PtrTy, FirstBuf->getType());
+ if (Loc *BufLoc = dyn_cast<Loc>(&BufStart)) {
+ const Expr *warningExpr = (WarnAboutSize ? Size : FirstBuf);
+
+ SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
+ LastOffset, PtrTy);
+ state = CheckLocation(C, state, warningExpr, BufEnd, firstMessage);
+
+ // If the buffer isn't large enough, abort.
+ if (!state)
+ return NULL;
+ }
+
+ // If there's a second buffer, check it as well.
+ if (SecondBuf) {
+ BufVal = state->getSVal(SecondBuf, LCtx);
+ state = checkNonNull(C, state, SecondBuf, BufVal);
+ if (!state)
+ return NULL;
+
+ BufStart = svalBuilder.evalCast(BufVal, PtrTy, SecondBuf->getType());
+ if (Loc *BufLoc = dyn_cast<Loc>(&BufStart)) {
+ const Expr *warningExpr = (WarnAboutSize ? Size : SecondBuf);
+
+ SVal BufEnd = svalBuilder.evalBinOpLN(state, BO_Add, *BufLoc,
+ LastOffset, PtrTy);
+ state = CheckLocation(C, state, warningExpr, BufEnd, secondMessage);
+ }
+ }
+
+ // Large enough or not, return this state!
+ return state;
+}
+
+ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *Size,
+ const Expr *First,
+ const Expr *Second) const {
+ if (!Filter.CheckCStringBufferOverlap)
+ return state;
+
+ // Do a simple check for overlap: if the two arguments are from the same
+ // buffer, see if the end of the first is greater than the start of the second
+ // or vice versa.
+
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ ProgramStateRef stateTrue, stateFalse;
+
+ // Get the buffer values and make sure they're known locations.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal firstVal = state->getSVal(First, LCtx);
+ SVal secondVal = state->getSVal(Second, LCtx);
+
+ Loc *firstLoc = dyn_cast<Loc>(&firstVal);
+ if (!firstLoc)
+ return state;
+
+ Loc *secondLoc = dyn_cast<Loc>(&secondVal);
+ if (!secondLoc)
+ return state;
+
+ // Are the two values the same?
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ llvm::tie(stateTrue, stateFalse) =
+ state->assume(svalBuilder.evalEQ(state, *firstLoc, *secondLoc));
+
+ if (stateTrue && !stateFalse) {
+ // If the values are known to be equal, that's automatically an overlap.
+ emitOverlapBug(C, stateTrue, First, Second);
+ return NULL;
+ }
+
+ // assume the two expressions are not equal.
+ assert(stateFalse);
+ state = stateFalse;
+
+ // Which value comes first?
+ QualType cmpTy = svalBuilder.getConditionType();
+ SVal reverse = svalBuilder.evalBinOpLL(state, BO_GT,
+ *firstLoc, *secondLoc, cmpTy);
+ DefinedOrUnknownSVal *reverseTest = dyn_cast<DefinedOrUnknownSVal>(&reverse);
+ if (!reverseTest)
+ return state;
+
+ llvm::tie(stateTrue, stateFalse) = state->assume(*reverseTest);
+ if (stateTrue) {
+ if (stateFalse) {
+ // If we don't know which one comes first, we can't perform this test.
+ return state;
+ } else {
+ // Switch the values so that firstVal is before secondVal.
+ Loc *tmpLoc = firstLoc;
+ firstLoc = secondLoc;
+ secondLoc = tmpLoc;
+
+ // Switch the Exprs as well, so that they still correspond.
+ const Expr *tmpExpr = First;
+ First = Second;
+ Second = tmpExpr;
+ }
+ }
+
+ // Get the length, and make sure it too is known.
+ SVal LengthVal = state->getSVal(Size, LCtx);
+ NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
+ if (!Length)
+ return state;
+
+ // Convert the first buffer's start address to char*.
+ // Bail out if the cast fails.
+ ASTContext &Ctx = svalBuilder.getContext();
+ QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ SVal FirstStart = svalBuilder.evalCast(*firstLoc, CharPtrTy,
+ First->getType());
+ Loc *FirstStartLoc = dyn_cast<Loc>(&FirstStart);
+ if (!FirstStartLoc)
+ return state;
+
+ // Compute the end of the first buffer. Bail out if THAT fails.
+ SVal FirstEnd = svalBuilder.evalBinOpLN(state, BO_Add,
+ *FirstStartLoc, *Length, CharPtrTy);
+ Loc *FirstEndLoc = dyn_cast<Loc>(&FirstEnd);
+ if (!FirstEndLoc)
+ return state;
+
+ // Is the end of the first buffer past the start of the second buffer?
+ SVal Overlap = svalBuilder.evalBinOpLL(state, BO_GT,
+ *FirstEndLoc, *secondLoc, cmpTy);
+ DefinedOrUnknownSVal *OverlapTest = dyn_cast<DefinedOrUnknownSVal>(&Overlap);
+ if (!OverlapTest)
+ return state;
+
+ llvm::tie(stateTrue, stateFalse) = state->assume(*OverlapTest);
+
+ if (stateTrue && !stateFalse) {
+ // Overlap!
+ emitOverlapBug(C, stateTrue, First, Second);
+ return NULL;
+ }
+
+ // assume the two expressions don't overlap.
+ assert(stateFalse);
+ return stateFalse;
+}
+
+void CStringChecker::emitOverlapBug(CheckerContext &C, ProgramStateRef state,
+ const Stmt *First, const Stmt *Second) const {
+ ExplodedNode *N = C.generateSink(state);
+ if (!N)
+ return;
+
+ if (!BT_Overlap)
+ BT_Overlap.reset(new BugType("Unix API", "Improper arguments"));
+
+ // Generate a report for this bug.
+ BugReport *report =
+ new BugReport(*BT_Overlap,
+ "Arguments must not be overlapping buffers", N);
+ report->addRange(First->getSourceRange());
+ report->addRange(Second->getSourceRange());
+
+ C.EmitReport(report);
+}
+
+ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
+ ProgramStateRef state,
+ NonLoc left,
+ NonLoc right) const {
+ // If out-of-bounds checking is turned off, skip the rest.
+ if (!Filter.CheckCStringOutOfBounds)
+ return state;
+
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ const llvm::APSInt &maxValInt = BVF.getMaxValue(sizeTy);
+ NonLoc maxVal = svalBuilder.makeIntVal(maxValInt);
+
+ SVal maxMinusRight;
+ if (isa<nonloc::ConcreteInt>(right)) {
+ maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
+ sizeTy);
+ } else {
+ // Try switching the operands. (The order of these two assignments is
+ // important!)
+ maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, left,
+ sizeTy);
+ left = right;
+ }
+
+ if (NonLoc *maxMinusRightNL = dyn_cast<NonLoc>(&maxMinusRight)) {
+ QualType cmpTy = svalBuilder.getConditionType();
+ // If left > max - right, we have an overflow.
+ SVal willOverflow = svalBuilder.evalBinOpNN(state, BO_GT, left,
+ *maxMinusRightNL, cmpTy);
+
+ ProgramStateRef stateOverflow, stateOkay;
+ llvm::tie(stateOverflow, stateOkay) =
+ state->assume(cast<DefinedOrUnknownSVal>(willOverflow));
+
+ if (stateOverflow && !stateOkay) {
+ // We have an overflow. Emit a bug report.
+ ExplodedNode *N = C.generateSink(stateOverflow);
+ if (!N)
+ return NULL;
+
+ if (!BT_AdditionOverflow)
+ BT_AdditionOverflow.reset(new BuiltinBug("API",
+ "Sum of expressions causes overflow"));
+
+ // This isn't a great error message, but this should never occur in real
+ // code anyway -- you'd have to create a buffer longer than a size_t can
+ // represent, which is sort of a contradiction.
+ const char *warning =
+ "This expression will create a string whose length is too big to "
+ "be represented as a size_t";
+
+ // Generate a report for this bug.
+ BugReport *report = new BugReport(*BT_AdditionOverflow, warning, N);
+ C.EmitReport(report);
+
+ return NULL;
+ }
+
+ // From now on, assume an overflow didn't occur.
+ assert(stateOkay);
+ state = stateOkay;
+ }
+
+ return state;
+}
+
+ProgramStateRef CStringChecker::setCStringLength(ProgramStateRef state,
+ const MemRegion *MR,
+ SVal strLength) {
+ assert(!strLength.isUndef() && "Attempt to set an undefined string length");
+
+ MR = MR->StripCasts();
+
+ switch (MR->getKind()) {
+ case MemRegion::StringRegionKind:
+ // FIXME: This can happen if we strcpy() into a string region. This is
+ // undefined [C99 6.4.5p6], but we should still warn about it.
+ return state;
+
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::VarRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ // These are the types we can currently track string lengths for.
+ break;
+
+ case MemRegion::ElementRegionKind:
+ // FIXME: Handle element regions by upper-bounding the parent region's
+ // string length.
+ return state;
+
+ default:
+ // Other regions (mostly non-data) can't have a reliable C string length.
+ // For now, just ignore the change.
+ // FIXME: These are rare but not impossible. We should output some kind of
+ // warning for things like strcpy((char[]){'a', 0}, "b");
+ return state;
+ }
+
+ if (strLength.isUnknown())
+ return state->remove<CStringLength>(MR);
+
+ return state->set<CStringLength>(MR, strLength);
+}
+
+SVal CStringChecker::getCStringLengthForRegion(CheckerContext &C,
+ ProgramStateRef &state,
+ const Expr *Ex,
+ const MemRegion *MR,
+ bool hypothetical) {
+ if (!hypothetical) {
+ // If there's a recorded length, go ahead and return it.
+ const SVal *Recorded = state->get<CStringLength>(MR);
+ if (Recorded)
+ return *Recorded;
+ }
+
+ // Otherwise, get a new symbol and update the state.
+ unsigned Count = C.getCurrentBlockCount();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ SVal strLength = svalBuilder.getMetadataSymbolVal(CStringChecker::getTag(),
+ MR, Ex, sizeTy, Count);
+
+ if (!hypothetical)
+ state = state->set<CStringLength>(MR, strLength);
+
+ return strLength;
+}
+
+SVal CStringChecker::getCStringLength(CheckerContext &C, ProgramStateRef &state,
+ const Expr *Ex, SVal Buf,
+ bool hypothetical) const {
+ const MemRegion *MR = Buf.getAsRegion();
+ if (!MR) {
+ // If we can't get a region, see if it's something we /know/ isn't a
+ // C string. In the context of locations, the only time we can issue such
+ // a warning is for labels.
+ if (loc::GotoLabel *Label = dyn_cast<loc::GotoLabel>(&Buf)) {
+ if (!Filter.CheckCStringNotNullTerm)
+ return UndefinedVal();
+
+ if (ExplodedNode *N = C.addTransition(state)) {
+ if (!BT_NotCString)
+ BT_NotCString.reset(new BuiltinBug("Unix API",
+ "Argument is not a null-terminated string."));
+
+ SmallString<120> buf;
+ llvm::raw_svector_ostream os(buf);
+ assert(CurrentFunctionDescription);
+ os << "Argument to " << CurrentFunctionDescription
+ << " is the address of the label '" << Label->getLabel()->getName()
+ << "', which is not a null-terminated string";
+
+ // Generate a report for this bug.
+ BugReport *report = new BugReport(*BT_NotCString,
+ os.str(), N);
+
+ report->addRange(Ex->getSourceRange());
+ C.EmitReport(report);
+ }
+ return UndefinedVal();
+
+ }
+
+ // If it's not a region and not a label, give up.
+ return UnknownVal();
+ }
+
+ // If we have a region, strip casts from it and see if we can figure out
+ // its length. For anything we can't figure out, just return UnknownVal.
+ MR = MR->StripCasts();
+
+ switch (MR->getKind()) {
+ case MemRegion::StringRegionKind: {
+ // Modifying the contents of string regions is undefined [C99 6.4.5p6],
+ // so we can assume that the byte length is the correct C string length.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+ const StringLiteral *strLit = cast<StringRegion>(MR)->getStringLiteral();
+ return svalBuilder.makeIntVal(strLit->getByteLength(), sizeTy);
+ }
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::VarRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ return getCStringLengthForRegion(C, state, Ex, MR, hypothetical);
+ case MemRegion::CompoundLiteralRegionKind:
+ // FIXME: Can we track this? Is it necessary?
+ return UnknownVal();
+ case MemRegion::ElementRegionKind:
+ // FIXME: How can we handle this? It's not good enough to subtract the
+ // offset from the base string length; consider "123\x00567" and &a[5].
+ return UnknownVal();
+ default:
+ // Other regions (mostly non-data) can't have a reliable C string length.
+ // In this case, an error is emitted and UndefinedVal is returned.
+ // The caller should always be prepared to handle this case.
+ if (!Filter.CheckCStringNotNullTerm)
+ return UndefinedVal();
+
+ if (ExplodedNode *N = C.addTransition(state)) {
+ if (!BT_NotCString)
+ BT_NotCString.reset(new BuiltinBug("Unix API",
+ "Argument is not a null-terminated string."));
+
+ SmallString<120> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ assert(CurrentFunctionDescription);
+ os << "Argument to " << CurrentFunctionDescription << " is ";
+
+ if (SummarizeRegion(os, C.getASTContext(), MR))
+ os << ", which is not a null-terminated string";
+ else
+ os << "not a null-terminated string";
+
+ // Generate a report for this bug.
+ BugReport *report = new BugReport(*BT_NotCString,
+ os.str(), N);
+
+ report->addRange(Ex->getSourceRange());
+ C.EmitReport(report);
+ }
+
+ return UndefinedVal();
+ }
+}
+
+const StringLiteral *CStringChecker::getCStringLiteral(CheckerContext &C,
+ ProgramStateRef &state, const Expr *expr, SVal val) const {
+
+ // Get the memory region pointed to by the val.
+ const MemRegion *bufRegion = val.getAsRegion();
+ if (!bufRegion)
+ return NULL;
+
+ // Strip casts off the memory region.
+ bufRegion = bufRegion->StripCasts();
+
+ // Cast the memory region to a string region.
+ const StringRegion *strRegion= dyn_cast<StringRegion>(bufRegion);
+ if (!strRegion)
+ return NULL;
+
+ // Return the actual string in the string region.
+ return strRegion->getStringLiteral();
+}
+
+ProgramStateRef CStringChecker::InvalidateBuffer(CheckerContext &C,
+ ProgramStateRef state,
+ const Expr *E, SVal V) {
+ Loc *L = dyn_cast<Loc>(&V);
+ if (!L)
+ return state;
+
+ // FIXME: This is a simplified version of what's in CFRefCount.cpp -- it makes
+ // some assumptions about the value that CFRefCount can't. Even so, it should
+ // probably be refactored.
+ if (loc::MemRegionVal* MR = dyn_cast<loc::MemRegionVal>(L)) {
+ const MemRegion *R = MR->getRegion()->StripCasts();
+
+ // Are we dealing with an ElementRegion? If so, we should be invalidating
+ // the super-region.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ R = ER->getSuperRegion();
+ // FIXME: What about layers of ElementRegions?
+ }
+
+ // Invalidate this region.
+ unsigned Count = C.getCurrentBlockCount();
+ const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+ return state->invalidateRegions(R, E, Count, LCtx);
+ }
+
+ // If we have a non-region value by chance, just remove the binding.
+ // FIXME: is this necessary or correct? This handles the non-Region
+ // cases. Is it ever valid to store to these?
+ return state->unbindLoc(*L);
+}
+
+bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
+ const MemRegion *MR) {
+ const TypedValueRegion *TVR = dyn_cast<TypedValueRegion>(MR);
+
+ switch (MR->getKind()) {
+ case MemRegion::FunctionTextRegionKind: {
+ const FunctionDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+ if (FD)
+ os << "the address of the function '" << *FD << '\'';
+ else
+ os << "the address of a function";
+ return true;
+ }
+ case MemRegion::BlockTextRegionKind:
+ os << "block text";
+ return true;
+ case MemRegion::BlockDataRegionKind:
+ os << "a block";
+ return true;
+ case MemRegion::CXXThisRegionKind:
+ case MemRegion::CXXTempObjectRegionKind:
+ os << "a C++ temp object of type " << TVR->getValueType().getAsString();
+ return true;
+ case MemRegion::VarRegionKind:
+ os << "a variable of type" << TVR->getValueType().getAsString();
+ return true;
+ case MemRegion::FieldRegionKind:
+ os << "a field of type " << TVR->getValueType().getAsString();
+ return true;
+ case MemRegion::ObjCIvarRegionKind:
+ os << "an instance variable of type " << TVR->getValueType().getAsString();
+ return true;
+ default:
+ return false;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// evaluation of individual function calls.
+//===----------------------------------------------------------------------===//
+
+void CStringChecker::evalCopyCommon(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef state,
+ const Expr *Size, const Expr *Dest,
+ const Expr *Source, bool Restricted,
+ bool IsMempcpy) const {
+ CurrentFunctionDescription = "memory copy function";
+
+ // See if the size argument is zero.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal sizeVal = state->getSVal(Size, LCtx);
+ QualType sizeTy = Size->getType();
+
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
+ llvm::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, sizeVal, sizeTy);
+
+ // Get the value of the Dest.
+ SVal destVal = state->getSVal(Dest, LCtx);
+
+ // If the size is zero, there won't be any actual memory access, so
+ // just bind the return value to the destination buffer and return.
+ if (stateZeroSize) {
+ stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, destVal);
+ C.addTransition(stateZeroSize);
+ }
+
+ // If the size can be nonzero, we have to check the other arguments.
+ if (stateNonZeroSize) {
+ state = stateNonZeroSize;
+
+ // Ensure the destination is not null. If it is NULL there will be a
+ // NULL pointer dereference.
+ state = checkNonNull(C, state, Dest, destVal);
+ if (!state)
+ return;
+
+ // Get the value of the Src.
+ SVal srcVal = state->getSVal(Source, LCtx);
+
+ // Ensure the source is not null. If it is NULL there will be a
+ // NULL pointer dereference.
+ state = checkNonNull(C, state, Source, srcVal);
+ if (!state)
+ return;
+
+ // Ensure the accesses are valid and that the buffers do not overlap.
+ const char * const writeWarning =
+ "Memory copy function overflows destination buffer";
+ state = CheckBufferAccess(C, state, Size, Dest, Source,
+ writeWarning, /* sourceWarning = */ NULL);
+ if (Restricted)
+ state = CheckOverlap(C, state, Size, Dest, Source);
+
+ if (!state)
+ return;
+
+ // If this is mempcpy, get the byte after the last byte copied and
+ // bind the expr.
+ if (IsMempcpy) {
+ loc::MemRegionVal *destRegVal = dyn_cast<loc::MemRegionVal>(&destVal);
+ assert(destRegVal && "Destination should be a known MemRegionVal here");
+
+ // Get the length to copy.
+ NonLoc *lenValNonLoc = dyn_cast<NonLoc>(&sizeVal);
+
+ if (lenValNonLoc) {
+ // Get the byte after the last byte copied.
+ SVal lastElement = C.getSValBuilder().evalBinOpLN(state, BO_Add,
+ *destRegVal,
+ *lenValNonLoc,
+ Dest->getType());
+
+ // The byte after the last byte copied is the return value.
+ state = state->BindExpr(CE, LCtx, lastElement);
+ } else {
+ // If we don't know how much we copied, we can at least
+ // conjure a return value for later.
+ unsigned Count = C.getCurrentBlockCount();
+ SVal result =
+ C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ state = state->BindExpr(CE, LCtx, result);
+ }
+
+ } else {
+ // All other copies return the destination buffer.
+ // (Well, bcopy() has a void return type, but this won't hurt.)
+ state = state->BindExpr(CE, LCtx, destVal);
+ }
+
+ // Invalidate the destination.
+ // FIXME: Even if we can't perfectly model the copy, we should see if we
+ // can use LazyCompoundVals to copy the source values into the destination.
+ // This would probably remove any existing bindings past the end of the
+ // copied region, but that's still an improvement over blank invalidation.
+ state = InvalidateBuffer(C, state, Dest,
+ state->getSVal(Dest, C.getLocationContext()));
+ C.addTransition(state);
+ }
+}
+
+
+void CStringChecker::evalMemcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // void *memcpy(void *restrict dst, const void *restrict src, size_t n);
+ // The return value is the address of the destination buffer.
+ const Expr *Dest = CE->getArg(0);
+ ProgramStateRef state = C.getState();
+
+ evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true);
+}
+
+void CStringChecker::evalMempcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // void *mempcpy(void *restrict dst, const void *restrict src, size_t n);
+ // The return value is a pointer to the byte following the last written byte.
+ const Expr *Dest = CE->getArg(0);
+ ProgramStateRef state = C.getState();
+
+ evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1), true, true);
+}
+
+void CStringChecker::evalMemmove(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // void *memmove(void *dst, const void *src, size_t n);
+ // The return value is the address of the destination buffer.
+ const Expr *Dest = CE->getArg(0);
+ ProgramStateRef state = C.getState();
+
+ evalCopyCommon(C, CE, state, CE->getArg(2), Dest, CE->getArg(1));
+}
+
+void CStringChecker::evalBcopy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // void bcopy(const void *src, void *dst, size_t n);
+ evalCopyCommon(C, CE, C.getState(),
+ CE->getArg(2), CE->getArg(1), CE->getArg(0));
+}
+
+void CStringChecker::evalMemcmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // int memcmp(const void *s1, const void *s2, size_t n);
+ CurrentFunctionDescription = "memory comparison function";
+
+ const Expr *Left = CE->getArg(0);
+ const Expr *Right = CE->getArg(1);
+ const Expr *Size = CE->getArg(2);
+
+ ProgramStateRef state = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+
+ // See if the size argument is zero.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal sizeVal = state->getSVal(Size, LCtx);
+ QualType sizeTy = Size->getType();
+
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
+ llvm::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, sizeVal, sizeTy);
+
+ // If the size can be zero, the result will be 0 in that case, and we don't
+ // have to check either of the buffers.
+ if (stateZeroSize) {
+ state = stateZeroSize;
+ state = state->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
+ C.addTransition(state);
+ }
+
+ // If the size can be nonzero, we have to check the other arguments.
+ if (stateNonZeroSize) {
+ state = stateNonZeroSize;
+ // If we know the two buffers are the same, we know the result is 0.
+ // First, get the two buffers' addresses. Another checker will have already
+ // made sure they're not undefined.
+ DefinedOrUnknownSVal LV =
+ cast<DefinedOrUnknownSVal>(state->getSVal(Left, LCtx));
+ DefinedOrUnknownSVal RV =
+ cast<DefinedOrUnknownSVal>(state->getSVal(Right, LCtx));
+
+ // See if they are the same.
+ DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
+ ProgramStateRef StSameBuf, StNotSameBuf;
+ llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+
+ // If the two arguments might be the same buffer, we know the result is 0,
+ // and we only need to check one size.
+ if (StSameBuf) {
+ state = StSameBuf;
+ state = CheckBufferAccess(C, state, Size, Left);
+ if (state) {
+ state = StSameBuf->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
+ C.addTransition(state);
+ }
+ }
+
+ // If the two arguments might be different buffers, we have to check the
+ // size of both of them.
+ if (StNotSameBuf) {
+ state = StNotSameBuf;
+ state = CheckBufferAccess(C, state, Size, Left, Right);
+ if (state) {
+ // The return value is the comparison result, which we don't know.
+ unsigned Count = C.getCurrentBlockCount();
+ SVal CmpV = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ state = state->BindExpr(CE, LCtx, CmpV);
+ C.addTransition(state);
+ }
+ }
+ }
+}
+
+void CStringChecker::evalstrLength(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // size_t strlen(const char *s);
+ evalstrLengthCommon(C, CE, /* IsStrnlen = */ false);
+}
+
+void CStringChecker::evalstrnLength(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ // size_t strnlen(const char *s, size_t maxlen);
+ evalstrLengthCommon(C, CE, /* IsStrnlen = */ true);
+}
+
+void CStringChecker::evalstrLengthCommon(CheckerContext &C, const CallExpr *CE,
+ bool IsStrnlen) const {
+ CurrentFunctionDescription = "string length function";
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ if (IsStrnlen) {
+ const Expr *maxlenExpr = CE->getArg(1);
+ SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
+
+ ProgramStateRef stateZeroSize, stateNonZeroSize;
+ llvm::tie(stateZeroSize, stateNonZeroSize) =
+ assumeZero(C, state, maxlenVal, maxlenExpr->getType());
+
+ // If the size can be zero, the result will be 0 in that case, and we don't
+ // have to check the string itself.
+ if (stateZeroSize) {
+ SVal zero = C.getSValBuilder().makeZeroVal(CE->getType());
+ stateZeroSize = stateZeroSize->BindExpr(CE, LCtx, zero);
+ C.addTransition(stateZeroSize);
+ }
+
+ // If the size is GUARANTEED to be zero, we're done!
+ if (!stateNonZeroSize)
+ return;
+
+ // Otherwise, record the assumption that the size is nonzero.
+ state = stateNonZeroSize;
+ }
+
+ // Check that the string argument is non-null.
+ const Expr *Arg = CE->getArg(0);
+ SVal ArgVal = state->getSVal(Arg, LCtx);
+
+ state = checkNonNull(C, state, Arg, ArgVal);
+
+ if (!state)
+ return;
+
+ SVal strLength = getCStringLength(C, state, Arg, ArgVal);
+
+ // If the argument isn't a valid C string, there's no valid state to
+ // transition to.
+ if (strLength.isUndef())
+ return;
+
+ DefinedOrUnknownSVal result = UnknownVal();
+
+ // If the check is for strnlen() then bind the return value to no more than
+ // the maxlen value.
+ if (IsStrnlen) {
+ QualType cmpTy = C.getSValBuilder().getConditionType();
+
+ // It's a little unfortunate to be getting this again,
+ // but it's not that expensive...
+ const Expr *maxlenExpr = CE->getArg(1);
+ SVal maxlenVal = state->getSVal(maxlenExpr, LCtx);
+
+ NonLoc *strLengthNL = dyn_cast<NonLoc>(&strLength);
+ NonLoc *maxlenValNL = dyn_cast<NonLoc>(&maxlenVal);
+
+ if (strLengthNL && maxlenValNL) {
+ ProgramStateRef stateStringTooLong, stateStringNotTooLong;
+
+ // Check if the strLength is greater than the maxlen.
+ llvm::tie(stateStringTooLong, stateStringNotTooLong) =
+ state->assume(cast<DefinedOrUnknownSVal>
+ (C.getSValBuilder().evalBinOpNN(state, BO_GT,
+ *strLengthNL,
+ *maxlenValNL,
+ cmpTy)));
+
+ if (stateStringTooLong && !stateStringNotTooLong) {
+ // If the string is longer than maxlen, return maxlen.
+ result = *maxlenValNL;
+ } else if (stateStringNotTooLong && !stateStringTooLong) {
+ // If the string is shorter than maxlen, return its length.
+ result = *strLengthNL;
+ }
+ }
+
+ if (result.isUnknown()) {
+ // If we don't have enough information for a comparison, there's
+ // no guarantee the full string length will actually be returned.
+ // All we know is the return value is the min of the string length
+ // and the limit. This is better than nothing.
+ unsigned Count = C.getCurrentBlockCount();
+ result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ NonLoc *resultNL = cast<NonLoc>(&result);
+
+ if (strLengthNL) {
+ state = state->assume(cast<DefinedOrUnknownSVal>
+ (C.getSValBuilder().evalBinOpNN(state, BO_LE,
+ *resultNL,
+ *strLengthNL,
+ cmpTy)), true);
+ }
+
+ if (maxlenValNL) {
+ state = state->assume(cast<DefinedOrUnknownSVal>
+ (C.getSValBuilder().evalBinOpNN(state, BO_LE,
+ *resultNL,
+ *maxlenValNL,
+ cmpTy)), true);
+ }
+ }
+
+ } else {
+ // This is a plain strlen(), not strnlen().
+ result = cast<DefinedOrUnknownSVal>(strLength);
+
+ // If we don't know the length of the string, conjure a return
+ // value, so it can be used in constraints, at least.
+ if (result.isUnknown()) {
+ unsigned Count = C.getCurrentBlockCount();
+ result = C.getSValBuilder().getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ }
+ }
+
+ // Bind the return value.
+ assert(!result.isUnknown() && "Should have conjured a value by now");
+ state = state->BindExpr(CE, LCtx, result);
+ C.addTransition(state);
+}
+
+void CStringChecker::evalStrcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ // char *strcpy(char *restrict dst, const char *restrict src);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ false,
+ /* isAppending = */ false);
+}
+
+void CStringChecker::evalStrncpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ // char *strncpy(char *restrict dst, const char *restrict src, size_t n);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ true,
+ /* isAppending = */ false);
+}
+
+void CStringChecker::evalStpcpy(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ // char *stpcpy(char *restrict dst, const char *restrict src);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ true,
+ /* isBounded = */ false,
+ /* isAppending = */ false);
+}
+
+void CStringChecker::evalStrcat(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ //char *strcat(char *restrict s1, const char *restrict s2);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ false,
+ /* isAppending = */ true);
+}
+
+void CStringChecker::evalStrncat(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ //char *strncat(char *restrict s1, const char *restrict s2, size_t n);
+ evalStrcpyCommon(C, CE,
+ /* returnEnd = */ false,
+ /* isBounded = */ true,
+ /* isAppending = */ true);
+}
+
+void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
+ bool returnEnd, bool isBounded,
+ bool isAppending) const {
+ CurrentFunctionDescription = "string copy function";
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // Check that the destination is non-null.
+ const Expr *Dst = CE->getArg(0);
+ SVal DstVal = state->getSVal(Dst, LCtx);
+
+ state = checkNonNull(C, state, Dst, DstVal);
+ if (!state)
+ return;
+
+ // Check that the source is non-null.
+ const Expr *srcExpr = CE->getArg(1);
+ SVal srcVal = state->getSVal(srcExpr, LCtx);
+ state = checkNonNull(C, state, srcExpr, srcVal);
+ if (!state)
+ return;
+
+ // Get the string length of the source.
+ SVal strLength = getCStringLength(C, state, srcExpr, srcVal);
+
+ // If the source isn't a valid C string, give up.
+ if (strLength.isUndef())
+ return;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType cmpTy = svalBuilder.getConditionType();
+ QualType sizeTy = svalBuilder.getContext().getSizeType();
+
+ // These two values allow checking two kinds of errors:
+ // - actual overflows caused by a source that doesn't fit in the destination
+ // - potential overflows caused by a bound that could exceed the destination
+ SVal amountCopied = UnknownVal();
+ SVal maxLastElementIndex = UnknownVal();
+ const char *boundWarning = NULL;
+
+ // If the function is strncpy, strncat, etc... it is bounded.
+ if (isBounded) {
+ // Get the max number of characters to copy.
+ const Expr *lenExpr = CE->getArg(2);
+ SVal lenVal = state->getSVal(lenExpr, LCtx);
+
+ // Protect against misdeclared strncpy().
+ lenVal = svalBuilder.evalCast(lenVal, sizeTy, lenExpr->getType());
+
+ NonLoc *strLengthNL = dyn_cast<NonLoc>(&strLength);
+ NonLoc *lenValNL = dyn_cast<NonLoc>(&lenVal);
+
+ // If we know both values, we might be able to figure out how much
+ // we're copying.
+ if (strLengthNL && lenValNL) {
+ ProgramStateRef stateSourceTooLong, stateSourceNotTooLong;
+
+ // Check if the max number to copy is less than the length of the src.
+ // If the bound is equal to the source length, strncpy won't null-
+ // terminate the result!
+ llvm::tie(stateSourceTooLong, stateSourceNotTooLong) =
+ state->assume(cast<DefinedOrUnknownSVal>
+ (svalBuilder.evalBinOpNN(state, BO_GE, *strLengthNL,
+ *lenValNL, cmpTy)));
+
+ if (stateSourceTooLong && !stateSourceNotTooLong) {
+ // Max number to copy is less than the length of the src, so the actual
+ // strLength copied is the max number arg.
+ state = stateSourceTooLong;
+ amountCopied = lenVal;
+
+ } else if (!stateSourceTooLong && stateSourceNotTooLong) {
+ // The source buffer entirely fits in the bound.
+ state = stateSourceNotTooLong;
+ amountCopied = strLength;
+ }
+ }
+
+ // We still want to know if the bound is known to be too large.
+ if (lenValNL) {
+ if (isAppending) {
+ // For strncat, the check is strlen(dst) + lenVal < sizeof(dst)
+
+ // Get the string length of the destination. If the destination is
+ // memory that can't have a string length, we shouldn't be copying
+ // into it anyway.
+ SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+ if (dstStrLength.isUndef())
+ return;
+
+ if (NonLoc *dstStrLengthNL = dyn_cast<NonLoc>(&dstStrLength)) {
+ maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Add,
+ *lenValNL,
+ *dstStrLengthNL,
+ sizeTy);
+ boundWarning = "Size argument is greater than the free space in the "
+ "destination buffer";
+ }
+
+ } else {
+ // For strncpy, this is just checking that lenVal <= sizeof(dst)
+ // (Yes, strncpy and strncat differ in how they treat termination.
+ // strncat ALWAYS terminates, but strncpy doesn't.)
+ NonLoc one = cast<NonLoc>(svalBuilder.makeIntVal(1, sizeTy));
+ maxLastElementIndex = svalBuilder.evalBinOpNN(state, BO_Sub, *lenValNL,
+ one, sizeTy);
+ boundWarning = "Size argument is greater than the length of the "
+ "destination buffer";
+ }
+ }
+
+ // If we couldn't pin down the copy length, at least bound it.
+ // FIXME: We should actually run this code path for append as well, but
+ // right now it creates problems with constraints (since we can end up
+ // trying to pass constraints from symbol to symbol).
+ if (amountCopied.isUnknown() && !isAppending) {
+ // Try to get a "hypothetical" string length symbol, which we can later
+ // set as a real value if that turns out to be the case.
+ amountCopied = getCStringLength(C, state, lenExpr, srcVal, true);
+ assert(!amountCopied.isUndef());
+
+ if (NonLoc *amountCopiedNL = dyn_cast<NonLoc>(&amountCopied)) {
+ if (lenValNL) {
+ // amountCopied <= lenVal
+ SVal copiedLessThanBound = svalBuilder.evalBinOpNN(state, BO_LE,
+ *amountCopiedNL,
+ *lenValNL,
+ cmpTy);
+ state = state->assume(cast<DefinedOrUnknownSVal>(copiedLessThanBound),
+ true);
+ if (!state)
+ return;
+ }
+
+ if (strLengthNL) {
+ // amountCopied <= strlen(source)
+ SVal copiedLessThanSrc = svalBuilder.evalBinOpNN(state, BO_LE,
+ *amountCopiedNL,
+ *strLengthNL,
+ cmpTy);
+ state = state->assume(cast<DefinedOrUnknownSVal>(copiedLessThanSrc),
+ true);
+ if (!state)
+ return;
+ }
+ }
+ }
+
+ } else {
+ // The function isn't bounded. The amount copied should match the length
+ // of the source buffer.
+ amountCopied = strLength;
+ }
+
+ assert(state);
+
+ // This represents the number of characters copied into the destination
+ // buffer. (It may not actually be the strlen if the destination buffer
+ // is not terminated.)
+ SVal finalStrLength = UnknownVal();
+
+ // If this is an appending function (strcat, strncat...) then set the
+ // string length to strlen(src) + strlen(dst) since the buffer will
+ // ultimately contain both.
+ if (isAppending) {
+ // Get the string length of the destination. If the destination is memory
+ // that can't have a string length, we shouldn't be copying into it anyway.
+ SVal dstStrLength = getCStringLength(C, state, Dst, DstVal);
+ if (dstStrLength.isUndef())
+ return;
+
+ NonLoc *srcStrLengthNL = dyn_cast<NonLoc>(&amountCopied);
+ NonLoc *dstStrLengthNL = dyn_cast<NonLoc>(&dstStrLength);
+
+ // If we know both string lengths, we might know the final string length.
+ if (srcStrLengthNL && dstStrLengthNL) {
+ // Make sure the two lengths together don't overflow a size_t.
+ state = checkAdditionOverflow(C, state, *srcStrLengthNL, *dstStrLengthNL);
+ if (!state)
+ return;
+
+ finalStrLength = svalBuilder.evalBinOpNN(state, BO_Add, *srcStrLengthNL,
+ *dstStrLengthNL, sizeTy);
+ }
+
+ // If we couldn't get a single value for the final string length,
+ // we can at least bound it by the individual lengths.
+ if (finalStrLength.isUnknown()) {
+ // Try to get a "hypothetical" string length symbol, which we can later
+ // set as a real value if that turns out to be the case.
+ finalStrLength = getCStringLength(C, state, CE, DstVal, true);
+ assert(!finalStrLength.isUndef());
+
+ if (NonLoc *finalStrLengthNL = dyn_cast<NonLoc>(&finalStrLength)) {
+ if (srcStrLengthNL) {
+ // finalStrLength >= srcStrLength
+ SVal sourceInResult = svalBuilder.evalBinOpNN(state, BO_GE,
+ *finalStrLengthNL,
+ *srcStrLengthNL,
+ cmpTy);
+ state = state->assume(cast<DefinedOrUnknownSVal>(sourceInResult),
+ true);
+ if (!state)
+ return;
+ }
+
+ if (dstStrLengthNL) {
+ // finalStrLength >= dstStrLength
+ SVal destInResult = svalBuilder.evalBinOpNN(state, BO_GE,
+ *finalStrLengthNL,
+ *dstStrLengthNL,
+ cmpTy);
+ state = state->assume(cast<DefinedOrUnknownSVal>(destInResult),
+ true);
+ if (!state)
+ return;
+ }
+ }
+ }
+
+ } else {
+ // Otherwise, this is a copy-over function (strcpy, strncpy, ...), and
+ // the final string length will match the input string length.
+ finalStrLength = amountCopied;
+ }
+
+ // The final result of the function will either be a pointer past the last
+ // copied element, or a pointer to the start of the destination buffer.
+ SVal Result = (returnEnd ? UnknownVal() : DstVal);
+
+ assert(state);
+
+ // If the destination is a MemRegion, try to check for a buffer overflow and
+ // record the new string length.
+ if (loc::MemRegionVal *dstRegVal = dyn_cast<loc::MemRegionVal>(&DstVal)) {
+ QualType ptrTy = Dst->getType();
+
+ // If we have an exact value on a bounded copy, use that to check for
+ // overflows, rather than our estimate about how much is actually copied.
+ if (boundWarning) {
+ if (NonLoc *maxLastNL = dyn_cast<NonLoc>(&maxLastElementIndex)) {
+ SVal maxLastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
+ *maxLastNL, ptrTy);
+ state = CheckLocation(C, state, CE->getArg(2), maxLastElement,
+ boundWarning);
+ if (!state)
+ return;
+ }
+ }
+
+ // Then, if the final length is known...
+ if (NonLoc *knownStrLength = dyn_cast<NonLoc>(&finalStrLength)) {
+ SVal lastElement = svalBuilder.evalBinOpLN(state, BO_Add, *dstRegVal,
+ *knownStrLength, ptrTy);
+
+ // ...and we haven't checked the bound, we'll check the actual copy.
+ if (!boundWarning) {
+ const char * const warningMsg =
+ "String copy function overflows destination buffer";
+ state = CheckLocation(C, state, Dst, lastElement, warningMsg);
+ if (!state)
+ return;
+ }
+
+ // If this is a stpcpy-style copy, the last element is the return value.
+ if (returnEnd)
+ Result = lastElement;
+ }
+
+ // Invalidate the destination. This must happen before we set the C string
+ // length because invalidation will clear the length.
+ // FIXME: Even if we can't perfectly model the copy, we should see if we
+ // can use LazyCompoundVals to copy the source values into the destination.
+ // This would probably remove any existing bindings past the end of the
+ // string, but that's still an improvement over blank invalidation.
+ state = InvalidateBuffer(C, state, Dst, *dstRegVal);
+
+ // Set the C string length of the destination, if we know it.
+ if (isBounded && !isAppending) {
+ // strncpy is annoying in that it doesn't guarantee to null-terminate
+ // the result string. If the original string didn't fit entirely inside
+ // the bound (including the null-terminator), we don't know how long the
+ // result is.
+ if (amountCopied != strLength)
+ finalStrLength = UnknownVal();
+ }
+ state = setCStringLength(state, dstRegVal->getRegion(), finalStrLength);
+ }
+
+ assert(state);
+
+ // If this is a stpcpy-style copy, but we were unable to check for a buffer
+ // overflow, we still need a result. Conjure a return value.
+ if (returnEnd && Result.isUnknown()) {
+ unsigned Count = C.getCurrentBlockCount();
+ Result = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ }
+
+ // Set the return value.
+ state = state->BindExpr(CE, LCtx, Result);
+ C.addTransition(state);
+}
+
+void CStringChecker::evalStrcmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ //int strcmp(const char *s1, const char *s2);
+ evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ false);
+}
+
+void CStringChecker::evalStrncmp(CheckerContext &C, const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ //int strncmp(const char *s1, const char *s2, size_t n);
+ evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ false);
+}
+
+void CStringChecker::evalStrcasecmp(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (CE->getNumArgs() < 2)
+ return;
+
+ //int strcasecmp(const char *s1, const char *s2);
+ evalStrcmpCommon(C, CE, /* isBounded = */ false, /* ignoreCase = */ true);
+}
+
+void CStringChecker::evalStrncasecmp(CheckerContext &C,
+ const CallExpr *CE) const {
+ if (CE->getNumArgs() < 3)
+ return;
+
+ //int strncasecmp(const char *s1, const char *s2, size_t n);
+ evalStrcmpCommon(C, CE, /* isBounded = */ true, /* ignoreCase = */ true);
+}
+
+void CStringChecker::evalStrcmpCommon(CheckerContext &C, const CallExpr *CE,
+ bool isBounded, bool ignoreCase) const {
+ CurrentFunctionDescription = "string comparison function";
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // Check that the first string is non-null
+ const Expr *s1 = CE->getArg(0);
+ SVal s1Val = state->getSVal(s1, LCtx);
+ state = checkNonNull(C, state, s1, s1Val);
+ if (!state)
+ return;
+
+ // Check that the second string is non-null.
+ const Expr *s2 = CE->getArg(1);
+ SVal s2Val = state->getSVal(s2, LCtx);
+ state = checkNonNull(C, state, s2, s2Val);
+ if (!state)
+ return;
+
+ // Get the string length of the first string or give up.
+ SVal s1Length = getCStringLength(C, state, s1, s1Val);
+ if (s1Length.isUndef())
+ return;
+
+ // Get the string length of the second string or give up.
+ SVal s2Length = getCStringLength(C, state, s2, s2Val);
+ if (s2Length.isUndef())
+ return;
+
+ // If we know the two buffers are the same, we know the result is 0.
+ // First, get the two buffers' addresses. Another checker will have already
+ // made sure they're not undefined.
+ DefinedOrUnknownSVal LV = cast<DefinedOrUnknownSVal>(s1Val);
+ DefinedOrUnknownSVal RV = cast<DefinedOrUnknownSVal>(s2Val);
+
+ // See if they are the same.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal SameBuf = svalBuilder.evalEQ(state, LV, RV);
+ ProgramStateRef StSameBuf, StNotSameBuf;
+ llvm::tie(StSameBuf, StNotSameBuf) = state->assume(SameBuf);
+
+ // If the two arguments might be the same buffer, we know the result is 0,
+ // and we only need to check one size.
+ if (StSameBuf) {
+ StSameBuf = StSameBuf->BindExpr(CE, LCtx,
+ svalBuilder.makeZeroVal(CE->getType()));
+ C.addTransition(StSameBuf);
+
+ // If the two arguments are GUARANTEED to be the same, we're done!
+ if (!StNotSameBuf)
+ return;
+ }
+
+ assert(StNotSameBuf);
+ state = StNotSameBuf;
+
+ // At this point we can go about comparing the two buffers.
+ // For now, we only do this if they're both known string literals.
+
+ // Attempt to extract string literals from both expressions.
+ const StringLiteral *s1StrLiteral = getCStringLiteral(C, state, s1, s1Val);
+ const StringLiteral *s2StrLiteral = getCStringLiteral(C, state, s2, s2Val);
+ bool canComputeResult = false;
+
+ if (s1StrLiteral && s2StrLiteral) {
+ StringRef s1StrRef = s1StrLiteral->getString();
+ StringRef s2StrRef = s2StrLiteral->getString();
+
+ if (isBounded) {
+ // Get the max number of characters to compare.
+ const Expr *lenExpr = CE->getArg(2);
+ SVal lenVal = state->getSVal(lenExpr, LCtx);
+
+ // If the length is known, we can get the right substrings.
+ if (const llvm::APSInt *len = svalBuilder.getKnownValue(state, lenVal)) {
+ // Create substrings of each to compare the prefix.
+ s1StrRef = s1StrRef.substr(0, (size_t)len->getZExtValue());
+ s2StrRef = s2StrRef.substr(0, (size_t)len->getZExtValue());
+ canComputeResult = true;
+ }
+ } else {
+ // This is a normal, unbounded strcmp.
+ canComputeResult = true;
+ }
+
+ if (canComputeResult) {
+ // Real strcmp stops at null characters.
+ size_t s1Term = s1StrRef.find('\0');
+ if (s1Term != StringRef::npos)
+ s1StrRef = s1StrRef.substr(0, s1Term);
+
+ size_t s2Term = s2StrRef.find('\0');
+ if (s2Term != StringRef::npos)
+ s2StrRef = s2StrRef.substr(0, s2Term);
+
+ // Use StringRef's comparison methods to compute the actual result.
+ int result;
+
+ if (ignoreCase) {
+ // Compare string 1 to string 2 the same way strcasecmp() does.
+ result = s1StrRef.compare_lower(s2StrRef);
+ } else {
+ // Compare string 1 to string 2 the same way strcmp() does.
+ result = s1StrRef.compare(s2StrRef);
+ }
+
+ // Build the SVal of the comparison and bind the return value.
+ SVal resultVal = svalBuilder.makeIntVal(result, CE->getType());
+ state = state->BindExpr(CE, LCtx, resultVal);
+ }
+ }
+
+ if (!canComputeResult) {
+ // Conjure a symbolic value. It's the best we can do.
+ unsigned Count = C.getCurrentBlockCount();
+ SVal resultVal = svalBuilder.getConjuredSymbolVal(NULL, CE, LCtx, Count);
+ state = state->BindExpr(CE, LCtx, resultVal);
+ }
+
+ // Record this as a possible path.
+ C.addTransition(state);
+}
+
+//===----------------------------------------------------------------------===//
+// The driver method, and other Checker callbacks.
+//===----------------------------------------------------------------------===//
+
+bool CStringChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+
+ if (!FDecl)
+ return false;
+
+ FnCheck evalFunction = 0;
+ if (C.isCLibraryFunction(FDecl, "memcpy"))
+ evalFunction = &CStringChecker::evalMemcpy;
+ else if (C.isCLibraryFunction(FDecl, "mempcpy"))
+ evalFunction = &CStringChecker::evalMempcpy;
+ else if (C.isCLibraryFunction(FDecl, "memcmp"))
+ evalFunction = &CStringChecker::evalMemcmp;
+ else if (C.isCLibraryFunction(FDecl, "memmove"))
+ evalFunction = &CStringChecker::evalMemmove;
+ else if (C.isCLibraryFunction(FDecl, "strcpy"))
+ evalFunction = &CStringChecker::evalStrcpy;
+ else if (C.isCLibraryFunction(FDecl, "strncpy"))
+ evalFunction = &CStringChecker::evalStrncpy;
+ else if (C.isCLibraryFunction(FDecl, "stpcpy"))
+ evalFunction = &CStringChecker::evalStpcpy;
+ else if (C.isCLibraryFunction(FDecl, "strcat"))
+ evalFunction = &CStringChecker::evalStrcat;
+ else if (C.isCLibraryFunction(FDecl, "strncat"))
+ evalFunction = &CStringChecker::evalStrncat;
+ else if (C.isCLibraryFunction(FDecl, "strlen"))
+ evalFunction = &CStringChecker::evalstrLength;
+ else if (C.isCLibraryFunction(FDecl, "strnlen"))
+ evalFunction = &CStringChecker::evalstrnLength;
+ else if (C.isCLibraryFunction(FDecl, "strcmp"))
+ evalFunction = &CStringChecker::evalStrcmp;
+ else if (C.isCLibraryFunction(FDecl, "strncmp"))
+ evalFunction = &CStringChecker::evalStrncmp;
+ else if (C.isCLibraryFunction(FDecl, "strcasecmp"))
+ evalFunction = &CStringChecker::evalStrcasecmp;
+ else if (C.isCLibraryFunction(FDecl, "strncasecmp"))
+ evalFunction = &CStringChecker::evalStrncasecmp;
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ evalFunction = &CStringChecker::evalBcopy;
+ else if (C.isCLibraryFunction(FDecl, "bcmp"))
+ evalFunction = &CStringChecker::evalMemcmp;
+
+ // If the callee isn't a string function, let another checker handle it.
+ if (!evalFunction)
+ return false;
+
+ // Make sure each function sets its own description.
+ // (But don't bother in a release build.)
+ assert(!(CurrentFunctionDescription = NULL));
+
+ // Check and evaluate the call.
+ (this->*evalFunction)(C, CE);
+
+ // If the evaluate call resulted in no change, chain to the next eval call
+ // handler.
+ // Note, the custom CString evaluation calls assume that basic safety
+ // properties are held. However, if the user chooses to turn off some of these
+ // checks, we ignore the issues and leave the call evaluation to a generic
+ // handler.
+ if (!C.isDifferent())
+ return false;
+
+ return true;
+}
+
+void CStringChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
+ // Record string length for char a[] = "abc";
+ ProgramStateRef state = C.getState();
+
+ for (DeclStmt::const_decl_iterator I = DS->decl_begin(), E = DS->decl_end();
+ I != E; ++I) {
+ const VarDecl *D = dyn_cast<VarDecl>(*I);
+ if (!D)
+ continue;
+
+ // FIXME: Handle array fields of structs.
+ if (!D->getType()->isArrayType())
+ continue;
+
+ const Expr *Init = D->getInit();
+ if (!Init)
+ continue;
+ if (!isa<StringLiteral>(Init))
+ continue;
+
+ Loc VarLoc = state->getLValue(D, C.getLocationContext());
+ const MemRegion *MR = VarLoc.getAsRegion();
+ if (!MR)
+ continue;
+
+ SVal StrVal = state->getSVal(Init, C.getLocationContext());
+ assert(StrVal.isValid() && "Initializer string is unknown or undefined");
+ DefinedOrUnknownSVal strLength
+ = cast<DefinedOrUnknownSVal>(getCStringLength(C, state, Init, StrVal));
+
+ state = state->set<CStringLength>(MR, strLength);
+ }
+
+ C.addTransition(state);
+}
+
+bool CStringChecker::wantsRegionChangeUpdate(ProgramStateRef state) const {
+ CStringLength::EntryMap Entries = state->get<CStringLength>();
+ return !Entries.isEmpty();
+}
+
+ProgramStateRef
+CStringChecker::checkRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const {
+ CStringLength::EntryMap Entries = state->get<CStringLength>();
+ if (Entries.isEmpty())
+ return state;
+
+ llvm::SmallPtrSet<const MemRegion *, 8> Invalidated;
+ llvm::SmallPtrSet<const MemRegion *, 32> SuperRegions;
+
+ // First build sets for the changed regions and their super-regions.
+ for (ArrayRef<const MemRegion *>::iterator
+ I = Regions.begin(), E = Regions.end(); I != E; ++I) {
+ const MemRegion *MR = *I;
+ Invalidated.insert(MR);
+
+ SuperRegions.insert(MR);
+ while (const SubRegion *SR = dyn_cast<SubRegion>(MR)) {
+ MR = SR->getSuperRegion();
+ SuperRegions.insert(MR);
+ }
+ }
+
+ CStringLength::EntryMap::Factory &F = state->get_context<CStringLength>();
+
+ // Then loop over the entries in the current state.
+ for (CStringLength::EntryMap::iterator I = Entries.begin(),
+ E = Entries.end(); I != E; ++I) {
+ const MemRegion *MR = I.getKey();
+
+ // Is this entry for a super-region of a changed region?
+ if (SuperRegions.count(MR)) {
+ Entries = F.remove(Entries, MR);
+ continue;
+ }
+
+ // Is this entry for a sub-region of a changed region?
+ const MemRegion *Super = MR;
+ while (const SubRegion *SR = dyn_cast<SubRegion>(Super)) {
+ Super = SR->getSuperRegion();
+ if (Invalidated.count(Super)) {
+ Entries = F.remove(Entries, MR);
+ break;
+ }
+ }
+ }
+
+ return state->set<CStringLength>(Entries);
+}
+
+void CStringChecker::checkLiveSymbols(ProgramStateRef state,
+ SymbolReaper &SR) const {
+ // Mark all symbols in our string length map as valid.
+ CStringLength::EntryMap Entries = state->get<CStringLength>();
+
+ for (CStringLength::EntryMap::iterator I = Entries.begin(), E = Entries.end();
+ I != E; ++I) {
+ SVal Len = I.getData();
+
+ for (SymExpr::symbol_iterator si = Len.symbol_begin(),
+ se = Len.symbol_end(); si != se; ++si)
+ SR.markInUse(*si);
+ }
+}
+
+void CStringChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ if (!SR.hasDeadSymbols())
+ return;
+
+ ProgramStateRef state = C.getState();
+ CStringLength::EntryMap Entries = state->get<CStringLength>();
+ if (Entries.isEmpty())
+ return;
+
+ CStringLength::EntryMap::Factory &F = state->get_context<CStringLength>();
+ for (CStringLength::EntryMap::iterator I = Entries.begin(), E = Entries.end();
+ I != E; ++I) {
+ SVal Len = I.getData();
+ if (SymbolRef Sym = Len.getAsSymbol()) {
+ if (SR.isDead(Sym))
+ Entries = F.remove(Entries, I.getKey());
+ }
+ }
+
+ state = state->set<CStringLength>(Entries);
+ C.addTransition(state);
+}
+
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+ static CStringChecker *TheChecker = 0; \
+ if (TheChecker == 0) \
+ TheChecker = mgr.registerChecker<CStringChecker>(); \
+ TheChecker->Filter.Check##name = true; \
+}
+
+REGISTER_CHECKER(CStringNullArg)
+REGISTER_CHECKER(CStringOutOfBounds)
+REGISTER_CHECKER(CStringBufferOverlap)
+REGISTER_CHECKER(CStringNotNullTerm)
+
+void ento::registerCStringCheckerBasic(CheckerManager &Mgr) {
+ registerCStringNullArg(Mgr);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
new file mode 100644
index 0000000..befc935
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp
@@ -0,0 +1,191 @@
+//== CStringSyntaxChecker.cpp - CoreFoundation containers API *- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An AST checker that looks for common pitfalls when using C string APIs.
+// - Identifies erroneous patterns in the last argument to strncat - the number
+// of bytes to copy.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TypeTraits.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST: public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext* AC;
+ ASTContext &ASTC;
+
+ /// Check if two expressions refer to the same declaration.
+ inline bool sameDecl(const Expr *A1, const Expr *A2) {
+ if (const DeclRefExpr *D1 = dyn_cast<DeclRefExpr>(A1->IgnoreParenCasts()))
+ if (const DeclRefExpr *D2 = dyn_cast<DeclRefExpr>(A2->IgnoreParenCasts()))
+ return D1->getDecl() == D2->getDecl();
+ return false;
+ }
+
+ /// Check if the expression E is a sizeof(WithArg).
+ inline bool isSizeof(const Expr *E, const Expr *WithArg) {
+ if (const UnaryExprOrTypeTraitExpr *UE =
+ dyn_cast<UnaryExprOrTypeTraitExpr>(E))
+ if (UE->getKind() == UETT_SizeOf)
+ return sameDecl(UE->getArgumentExpr(), WithArg);
+ return false;
+ }
+
+ /// Check if the expression E is a strlen(WithArg).
+ inline bool isStrlen(const Expr *E, const Expr *WithArg) {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(E)) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return false;
+ return (CheckerContext::isCLibraryFunction(FD, "strlen", ASTC)
+ && sameDecl(CE->getArg(0), WithArg));
+ }
+ return false;
+ }
+
+ /// Check if the expression is an integer literal with value 1.
+ inline bool isOne(const Expr *E) {
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(E))
+ return (IL->getValue().isIntN(1));
+ return false;
+ }
+
+ inline StringRef getPrintableName(const Expr *E) {
+ if (const DeclRefExpr *D = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ return D->getDecl()->getName();
+ return StringRef();
+ }
+
+ /// Identify erroneous patterns in the last argument to strncat - the number
+ /// of bytes to copy.
+ bool containsBadStrncatPattern(const CallExpr *CE);
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac) :
+ BR(br), AC(ac), ASTC(AC->getASTContext()) {
+ }
+
+ // Statement visitor methods.
+ void VisitChildren(Stmt *S);
+ void VisitStmt(Stmt *S) {
+ VisitChildren(S);
+ }
+ void VisitCallExpr(CallExpr *CE);
+};
+} // end anonymous namespace
+
+// The correct size argument should look like following:
+// strncat(dst, src, sizeof(dst) - strlen(dest) - 1);
+// We look for the following anti-patterns:
+// - strncat(dst, src, sizeof(dst) - strlen(dst));
+// - strncat(dst, src, sizeof(dst) - 1);
+// - strncat(dst, src, sizeof(dst));
+bool WalkAST::containsBadStrncatPattern(const CallExpr *CE) {
+ const Expr *DstArg = CE->getArg(0);
+ const Expr *SrcArg = CE->getArg(1);
+ const Expr *LenArg = CE->getArg(2);
+
+ // Identify wrong size expressions, which are commonly used instead.
+ if (const BinaryOperator *BE =
+ dyn_cast<BinaryOperator>(LenArg->IgnoreParenCasts())) {
+ // - sizeof(dst) - strlen(dst)
+ if (BE->getOpcode() == BO_Sub) {
+ const Expr *L = BE->getLHS();
+ const Expr *R = BE->getRHS();
+ if (isSizeof(L, DstArg) && isStrlen(R, DstArg))
+ return true;
+
+ // - sizeof(dst) - 1
+ if (isSizeof(L, DstArg) && isOne(R->IgnoreParenCasts()))
+ return true;
+ }
+ }
+ // - sizeof(dst)
+ if (isSizeof(LenArg, DstArg))
+ return true;
+
+ // - sizeof(src)
+ if (isSizeof(LenArg, SrcArg))
+ return true;
+ return false;
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return;
+
+ if (CheckerContext::isCLibraryFunction(FD, "strncat", ASTC)) {
+ if (containsBadStrncatPattern(CE)) {
+ const Expr *DstArg = CE->getArg(0);
+ const Expr *LenArg = CE->getArg(2);
+ SourceRange R = LenArg->getSourceRange();
+ PathDiagnosticLocation Loc =
+ PathDiagnosticLocation::createBegin(LenArg, BR.getSourceManager(), AC);
+
+ StringRef DstName = getPrintableName(DstArg);
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Potential buffer overflow. ";
+ if (!DstName.empty()) {
+ os << "Replace with 'sizeof(" << DstName << ") "
+ "- strlen(" << DstName <<") - 1'";
+ os << " or u";
+ } else
+ os << "U";
+ os << "se a safer 'strlcat' API";
+
+ BR.EmitBasicReport(FD, "Anti-pattern in the argument", "C String API",
+ os.str(), Loc, &R, 1);
+ }
+ }
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I != E;
+ ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+namespace {
+class CStringSyntaxChecker: public Checker<check::ASTCodeBody> {
+public:
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, Mgr.getAnalysisDeclContext(D));
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerCStringSyntaxChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CStringSyntaxChecker>();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
new file mode 100644
index 0000000..f601431
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -0,0 +1,385 @@
+//===--- CallAndMessageChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CallAndMessageChecker, a builtin checker that checks for various
+// errors of call and objc message expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CallAndMessageChecker
+ : public Checker< check::PreStmt<CallExpr>, check::PreObjCMessage > {
+ mutable OwningPtr<BugType> BT_call_null;
+ mutable OwningPtr<BugType> BT_call_undef;
+ mutable OwningPtr<BugType> BT_call_arg;
+ mutable OwningPtr<BugType> BT_msg_undef;
+ mutable OwningPtr<BugType> BT_objc_prop_undef;
+ mutable OwningPtr<BugType> BT_msg_arg;
+ mutable OwningPtr<BugType> BT_msg_ret;
+public:
+
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+
+private:
+ static void PreVisitProcessArgs(CheckerContext &C,CallOrObjCMessage callOrMsg,
+ const char *BT_desc, OwningPtr<BugType> &BT);
+ static bool PreVisitProcessArg(CheckerContext &C, SVal V,SourceRange argRange,
+ const Expr *argEx,
+ const bool checkUninitFields,
+ const char *BT_desc,
+ OwningPtr<BugType> &BT);
+
+ static void EmitBadCall(BugType *BT, CheckerContext &C, const CallExpr *CE);
+ void emitNilReceiverBug(CheckerContext &C, const ObjCMessage &msg,
+ ExplodedNode *N) const;
+
+ void HandleNilReceiver(CheckerContext &C,
+ ProgramStateRef state,
+ ObjCMessage msg) const;
+
+ static void LazyInit_BT(const char *desc, OwningPtr<BugType> &BT) {
+ if (!BT)
+ BT.reset(new BuiltinBug(desc));
+ }
+};
+} // end anonymous namespace
+
+void CallAndMessageChecker::EmitBadCall(BugType *BT, CheckerContext &C,
+ const CallExpr *CE) {
+ ExplodedNode *N = C.generateSink();
+ if (!N)
+ return;
+
+ BugReport *R = new BugReport(*BT, BT->getName(), N);
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
+ bugreporter::GetCalleeExpr(N), R));
+ C.EmitReport(R);
+}
+
+void CallAndMessageChecker::PreVisitProcessArgs(CheckerContext &C,
+ CallOrObjCMessage callOrMsg,
+ const char *BT_desc,
+ OwningPtr<BugType> &BT) {
+ // Don't check for uninitialized field values in arguments if the
+ // caller has a body that is available and we have the chance to inline it.
+ // This is a hack, but is a reasonable compromise betweens sometimes warning
+ // and sometimes not depending on if we decide to inline a function.
+ const Decl *D = callOrMsg.getDecl();
+ const bool checkUninitFields =
+ !(C.getAnalysisManager().shouldInlineCall() &&
+ (D && D->getBody()));
+
+ for (unsigned i = 0, e = callOrMsg.getNumArgs(); i != e; ++i)
+ if (PreVisitProcessArg(C, callOrMsg.getArgSVal(i),
+ callOrMsg.getArgSourceRange(i), callOrMsg.getArg(i),
+ checkUninitFields,
+ BT_desc, BT))
+ return;
+}
+
+bool CallAndMessageChecker::PreVisitProcessArg(CheckerContext &C,
+ SVal V, SourceRange argRange,
+ const Expr *argEx,
+ const bool checkUninitFields,
+ const char *BT_desc,
+ OwningPtr<BugType> &BT) {
+ if (V.isUndef()) {
+ if (ExplodedNode *N = C.generateSink()) {
+ LazyInit_BT(BT_desc, BT);
+
+ // Generate a report for this bug.
+ BugReport *R = new BugReport(*BT, BT->getName(), N);
+ R->addRange(argRange);
+ if (argEx)
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, argEx,
+ R));
+ C.EmitReport(R);
+ }
+ return true;
+ }
+
+ if (!checkUninitFields)
+ return false;
+
+ if (const nonloc::LazyCompoundVal *LV =
+ dyn_cast<nonloc::LazyCompoundVal>(&V)) {
+
+ class FindUninitializedField {
+ public:
+ SmallVector<const FieldDecl *, 10> FieldChain;
+ private:
+ ASTContext &C;
+ StoreManager &StoreMgr;
+ MemRegionManager &MrMgr;
+ Store store;
+ public:
+ FindUninitializedField(ASTContext &c, StoreManager &storeMgr,
+ MemRegionManager &mrMgr, Store s)
+ : C(c), StoreMgr(storeMgr), MrMgr(mrMgr), store(s) {}
+
+ bool Find(const TypedValueRegion *R) {
+ QualType T = R->getValueType();
+ if (const RecordType *RT = T->getAsStructureType()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ assert(RD && "Referred record has no definition");
+ for (RecordDecl::field_iterator I =
+ RD->field_begin(), E = RD->field_end(); I!=E; ++I) {
+ const FieldRegion *FR = MrMgr.getFieldRegion(*I, R);
+ FieldChain.push_back(*I);
+ T = (*I)->getType();
+ if (T->getAsStructureType()) {
+ if (Find(FR))
+ return true;
+ }
+ else {
+ const SVal &V = StoreMgr.getBinding(store, loc::MemRegionVal(FR));
+ if (V.isUndef())
+ return true;
+ }
+ FieldChain.pop_back();
+ }
+ }
+
+ return false;
+ }
+ };
+
+ const LazyCompoundValData *D = LV->getCVData();
+ FindUninitializedField F(C.getASTContext(),
+ C.getState()->getStateManager().getStoreManager(),
+ C.getSValBuilder().getRegionManager(),
+ D->getStore());
+
+ if (F.Find(D->getRegion())) {
+ if (ExplodedNode *N = C.generateSink()) {
+ LazyInit_BT(BT_desc, BT);
+ SmallString<512> Str;
+ llvm::raw_svector_ostream os(Str);
+ os << "Passed-by-value struct argument contains uninitialized data";
+
+ if (F.FieldChain.size() == 1)
+ os << " (e.g., field: '" << *F.FieldChain[0] << "')";
+ else {
+ os << " (e.g., via the field chain: '";
+ bool first = true;
+ for (SmallVectorImpl<const FieldDecl *>::iterator
+ DI = F.FieldChain.begin(), DE = F.FieldChain.end(); DI!=DE;++DI){
+ if (first)
+ first = false;
+ else
+ os << '.';
+ os << **DI;
+ }
+ os << "')";
+ }
+
+ // Generate a report for this bug.
+ BugReport *R = new BugReport(*BT, os.str(), N);
+ R->addRange(argRange);
+
+ // FIXME: enhance track back for uninitialized value for arbitrary
+ // memregions
+ C.EmitReport(R);
+ }
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void CallAndMessageChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const{
+
+ const Expr *Callee = CE->getCallee()->IgnoreParens();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal L = C.getState()->getSVal(Callee, LCtx);
+
+ if (L.isUndef()) {
+ if (!BT_call_undef)
+ BT_call_undef.reset(new BuiltinBug("Called function pointer is an "
+ "uninitalized pointer value"));
+ EmitBadCall(BT_call_undef.get(), C, CE);
+ return;
+ }
+
+ if (isa<loc::ConcreteInt>(L)) {
+ if (!BT_call_null)
+ BT_call_null.reset(
+ new BuiltinBug("Called function pointer is null (null dereference)"));
+ EmitBadCall(BT_call_null.get(), C, CE);
+ }
+
+ PreVisitProcessArgs(C, CallOrObjCMessage(CE, C.getState(), LCtx),
+ "Function call argument is an uninitialized value",
+ BT_call_arg);
+}
+
+void CallAndMessageChecker::checkPreObjCMessage(ObjCMessage msg,
+ CheckerContext &C) const {
+
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+
+ // FIXME: Handle 'super'?
+ if (const Expr *receiver = msg.getInstanceReceiver()) {
+ SVal recVal = state->getSVal(receiver, LCtx);
+ if (recVal.isUndef()) {
+ if (ExplodedNode *N = C.generateSink()) {
+ BugType *BT = 0;
+ if (msg.isPureMessageExpr()) {
+ if (!BT_msg_undef)
+ BT_msg_undef.reset(new BuiltinBug("Receiver in message expression "
+ "is an uninitialized value"));
+ BT = BT_msg_undef.get();
+ }
+ else {
+ if (!BT_objc_prop_undef)
+ BT_objc_prop_undef.reset(new BuiltinBug("Property access on an "
+ "uninitialized object pointer"));
+ BT = BT_objc_prop_undef.get();
+ }
+ BugReport *R =
+ new BugReport(*BT, BT->getName(), N);
+ R->addRange(receiver->getSourceRange());
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
+ receiver,
+ R));
+ C.EmitReport(R);
+ }
+ return;
+ } else {
+ // Bifurcate the state into nil and non-nil ones.
+ DefinedOrUnknownSVal receiverVal = cast<DefinedOrUnknownSVal>(recVal);
+
+ ProgramStateRef notNilState, nilState;
+ llvm::tie(notNilState, nilState) = state->assume(receiverVal);
+
+ // Handle receiver must be nil.
+ if (nilState && !notNilState) {
+ HandleNilReceiver(C, state, msg);
+ return;
+ }
+ }
+ }
+
+ const char *bugDesc = msg.isPropertySetter() ?
+ "Argument for property setter is an uninitialized value"
+ : "Argument in message expression is an uninitialized value";
+ // Check for any arguments that are uninitialized/undefined.
+ PreVisitProcessArgs(C, CallOrObjCMessage(msg, state, LCtx),
+ bugDesc, BT_msg_arg);
+}
+
+void CallAndMessageChecker::emitNilReceiverBug(CheckerContext &C,
+ const ObjCMessage &msg,
+ ExplodedNode *N) const {
+
+ if (!BT_msg_ret)
+ BT_msg_ret.reset(
+ new BuiltinBug("Receiver in message expression is "
+ "'nil' and returns a garbage value"));
+
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "The receiver of message '" << msg.getSelector().getAsString()
+ << "' is nil and returns a value of type '"
+ << msg.getType(C.getASTContext()).getAsString() << "' that will be garbage";
+
+ BugReport *report = new BugReport(*BT_msg_ret, os.str(), N);
+ if (const Expr *receiver = msg.getInstanceReceiver()) {
+ report->addRange(receiver->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
+ receiver,
+ report));
+ }
+ C.EmitReport(report);
+}
+
+static bool supportsNilWithFloatRet(const llvm::Triple &triple) {
+ return (triple.getVendor() == llvm::Triple::Apple &&
+ (triple.getOS() == llvm::Triple::IOS ||
+ !triple.isMacOSXVersionLT(10,5)));
+}
+
+void CallAndMessageChecker::HandleNilReceiver(CheckerContext &C,
+ ProgramStateRef state,
+ ObjCMessage msg) const {
+ ASTContext &Ctx = C.getASTContext();
+
+ // Check the return type of the message expression. A message to nil will
+ // return different values depending on the return type and the architecture.
+ QualType RetTy = msg.getType(Ctx);
+ CanQualType CanRetTy = Ctx.getCanonicalType(RetTy);
+ const LocationContext *LCtx = C.getLocationContext();
+
+ if (CanRetTy->isStructureOrClassType()) {
+ // Structure returns are safe since the compiler zeroes them out.
+ SVal V = C.getSValBuilder().makeZeroVal(msg.getType(Ctx));
+ C.addTransition(state->BindExpr(msg.getMessageExpr(), LCtx, V));
+ return;
+ }
+
+ // Other cases: check if sizeof(return type) > sizeof(void*)
+ if (CanRetTy != Ctx.VoidTy && C.getLocationContext()->getParentMap()
+ .isConsumedExpr(msg.getMessageExpr())) {
+ // Compute: sizeof(void *) and sizeof(return type)
+ const uint64_t voidPtrSize = Ctx.getTypeSize(Ctx.VoidPtrTy);
+ const uint64_t returnTypeSize = Ctx.getTypeSize(CanRetTy);
+
+ if (voidPtrSize < returnTypeSize &&
+ !(supportsNilWithFloatRet(Ctx.getTargetInfo().getTriple()) &&
+ (Ctx.FloatTy == CanRetTy ||
+ Ctx.DoubleTy == CanRetTy ||
+ Ctx.LongDoubleTy == CanRetTy ||
+ Ctx.LongLongTy == CanRetTy ||
+ Ctx.UnsignedLongLongTy == CanRetTy))) {
+ if (ExplodedNode *N = C.generateSink(state))
+ emitNilReceiverBug(C, msg, N);
+ return;
+ }
+
+ // Handle the safe cases where the return value is 0 if the
+ // receiver is nil.
+ //
+ // FIXME: For now take the conservative approach that we only
+ // return null values if we *know* that the receiver is nil.
+ // This is because we can have surprises like:
+ //
+ // ... = [[NSScreens screens] objectAtIndex:0];
+ //
+ // What can happen is that [... screens] could return nil, but
+ // it most likely isn't nil. We should assume the semantics
+ // of this case unless we have *a lot* more knowledge.
+ //
+ SVal V = C.getSValBuilder().makeZeroVal(msg.getType(Ctx));
+ C.addTransition(state->BindExpr(msg.getMessageExpr(), LCtx, V));
+ return;
+ }
+
+ C.addTransition(state);
+}
+
+void ento::registerCallAndMessageChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CallAndMessageChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
new file mode 100644
index 0000000..2e184fb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastSizeChecker.cpp
@@ -0,0 +1,86 @@
+//=== CastSizeChecker.cpp ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CastSizeChecker checks when casting a malloc'ed symbolic region to type T,
+// whether the size of the symbolic region is a multiple of the size of T.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/AST/CharUnits.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CastSizeChecker : public Checker< check::PreStmt<CastExpr> > {
+ mutable OwningPtr<BuiltinBug> BT;
+public:
+ void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+};
+}
+
+void CastSizeChecker::checkPreStmt(const CastExpr *CE,CheckerContext &C) const {
+ const Expr *E = CE->getSubExpr();
+ ASTContext &Ctx = C.getASTContext();
+ QualType ToTy = Ctx.getCanonicalType(CE->getType());
+ const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+ if (!ToPTy)
+ return;
+
+ QualType ToPointeeTy = ToPTy->getPointeeType();
+
+ // Only perform the check if 'ToPointeeTy' is a complete type.
+ if (ToPointeeTy->isIncompleteType())
+ return;
+
+ ProgramStateRef state = C.getState();
+ const MemRegion *R = state->getSVal(E, C.getLocationContext()).getAsRegion();
+ if (R == 0)
+ return;
+
+ const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
+ if (SR == 0)
+ return;
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ SVal extent = SR->getExtent(svalBuilder);
+ const llvm::APSInt *extentInt = svalBuilder.getKnownValue(state, extent);
+ if (!extentInt)
+ return;
+
+ CharUnits regionSize = CharUnits::fromQuantity(extentInt->getSExtValue());
+ CharUnits typeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
+
+ // Ignore void, and a few other un-sizeable types.
+ if (typeSize.isZero())
+ return;
+
+ if (regionSize % typeSize != 0) {
+ if (ExplodedNode *errorNode = C.generateSink()) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Cast region with wrong size.",
+ "Cast a region whose size is not a multiple of the"
+ " destination type size."));
+ BugReport *R = new BugReport(*BT, BT->getDescription(),
+ errorNode);
+ R->addRange(CE->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+}
+
+
+void ento::registerCastSizeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CastSizeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
new file mode 100644
index 0000000..1407638
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp
@@ -0,0 +1,74 @@
+//=== CastToStructChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines CastToStructChecker, a builtin checker that checks for
+// cast from non-struct pointer to struct pointer.
+// This check corresponds to CWE-588.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class CastToStructChecker : public Checker< check::PreStmt<CastExpr> > {
+ mutable OwningPtr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const CastExpr *CE, CheckerContext &C) const;
+};
+}
+
+void CastToStructChecker::checkPreStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ const Expr *E = CE->getSubExpr();
+ ASTContext &Ctx = C.getASTContext();
+ QualType OrigTy = Ctx.getCanonicalType(E->getType());
+ QualType ToTy = Ctx.getCanonicalType(CE->getType());
+
+ const PointerType *OrigPTy = dyn_cast<PointerType>(OrigTy.getTypePtr());
+ const PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+ if (!ToPTy || !OrigPTy)
+ return;
+
+ QualType OrigPointeeTy = OrigPTy->getPointeeType();
+ QualType ToPointeeTy = ToPTy->getPointeeType();
+
+ if (!ToPointeeTy->isStructureOrClassType())
+ return;
+
+ // We allow cast from void*.
+ if (OrigPointeeTy->isVoidType())
+ return;
+
+ // Now the cast-to-type is struct pointer, the original type is not void*.
+ if (!OrigPointeeTy->isRecordType()) {
+ if (ExplodedNode *N = C.addTransition()) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Cast from non-struct type to struct type",
+ "Casting a non-structure type to a structure type "
+ "and accessing a field can lead to memory access "
+ "errors or data corruption."));
+ BugReport *R = new BugReport(*BT,BT->getDescription(), N);
+ R->addRange(CE->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+}
+
+void ento::registerCastToStructChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CastToStructChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
new file mode 100644
index 0000000..133204a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -0,0 +1,291 @@
+//==- CheckObjCDealloc.cpp - Check ObjC -dealloc implementation --*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCDealloc, a checker that
+// analyzes an Objective-C class's implementation to determine if it
+// correctly implements -dealloc.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool scan_dealloc(Stmt *S, Selector Dealloc) {
+
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getSelector() == Dealloc) {
+ switch (ME->getReceiverKind()) {
+ case ObjCMessageExpr::Instance: return false;
+ case ObjCMessageExpr::SuperInstance: return true;
+ case ObjCMessageExpr::Class: break;
+ case ObjCMessageExpr::SuperClass: break;
+ }
+ }
+
+ // Recurse to children.
+
+ for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
+ if (*I && scan_dealloc(*I, Dealloc))
+ return true;
+
+ return false;
+}
+
+static bool scan_ivar_release(Stmt *S, ObjCIvarDecl *ID,
+ const ObjCPropertyDecl *PD,
+ Selector Release,
+ IdentifierInfo* SelfII,
+ ASTContext &Ctx) {
+
+ // [mMyIvar release]
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getSelector() == Release)
+ if (ME->getInstanceReceiver())
+ if (Expr *Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
+ if (ObjCIvarRefExpr *E = dyn_cast<ObjCIvarRefExpr>(Receiver))
+ if (E->getDecl() == ID)
+ return true;
+
+ // [self setMyIvar:nil];
+ if (ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S))
+ if (ME->getInstanceReceiver())
+ if (Expr *Receiver = ME->getInstanceReceiver()->IgnoreParenCasts())
+ if (DeclRefExpr *E = dyn_cast<DeclRefExpr>(Receiver))
+ if (E->getDecl()->getIdentifier() == SelfII)
+ if (ME->getMethodDecl() == PD->getSetterMethodDecl() &&
+ ME->getNumArgs() == 1 &&
+ ME->getArg(0)->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull))
+ return true;
+
+ // self.myIvar = nil;
+ if (BinaryOperator* BO = dyn_cast<BinaryOperator>(S))
+ if (BO->isAssignmentOp())
+ if (ObjCPropertyRefExpr *PRE =
+ dyn_cast<ObjCPropertyRefExpr>(BO->getLHS()->IgnoreParenCasts()))
+ if (PRE->isExplicitProperty() && PRE->getExplicitProperty() == PD)
+ if (BO->getRHS()->isNullPointerConstant(Ctx,
+ Expr::NPC_ValueDependentIsNull)) {
+ // This is only a 'release' if the property kind is not
+ // 'assign'.
+ return PD->getSetterKind() != ObjCPropertyDecl::Assign;;
+ }
+
+ // Recurse to children.
+ for (Stmt::child_iterator I = S->child_begin(), E= S->child_end(); I!=E; ++I)
+ if (*I && scan_ivar_release(*I, ID, PD, Release, SelfII, Ctx))
+ return true;
+
+ return false;
+}
+
+static void checkObjCDealloc(const ObjCImplementationDecl *D,
+ const LangOptions& LOpts, BugReporter& BR) {
+
+ assert (LOpts.getGC() != LangOptions::GCOnly);
+
+ ASTContext &Ctx = BR.getContext();
+ const ObjCInterfaceDecl *ID = D->getClassInterface();
+
+ // Does the class contain any ivars that are pointers (or id<...>)?
+ // If not, skip the check entirely.
+ // NOTE: This is motivated by PR 2517:
+ // http://llvm.org/bugs/show_bug.cgi?id=2517
+
+ bool containsPointerIvar = false;
+
+ for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(), E=ID->ivar_end();
+ I!=E; ++I) {
+
+ ObjCIvarDecl *ID = *I;
+ QualType T = ID->getType();
+
+ if (!T->isObjCObjectPointerType() ||
+ ID->getAttr<IBOutletAttr>() || // Skip IBOutlets.
+ ID->getAttr<IBOutletCollectionAttr>()) // Skip IBOutletCollections.
+ continue;
+
+ containsPointerIvar = true;
+ break;
+ }
+
+ if (!containsPointerIvar)
+ return;
+
+ // Determine if the class subclasses NSObject.
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+ IdentifierInfo* SenTestCaseII = &Ctx.Idents.get("SenTestCase");
+
+
+ for ( ; ID ; ID = ID->getSuperClass()) {
+ IdentifierInfo *II = ID->getIdentifier();
+
+ if (II == NSObjectII)
+ break;
+
+ // FIXME: For now, ignore classes that subclass SenTestCase, as these don't
+ // need to implement -dealloc. They implement tear down in another way,
+ // which we should try and catch later.
+ // http://llvm.org/bugs/show_bug.cgi?id=3187
+ if (II == SenTestCaseII)
+ return;
+ }
+
+ if (!ID)
+ return;
+
+ // Get the "dealloc" selector.
+ IdentifierInfo* II = &Ctx.Idents.get("dealloc");
+ Selector S = Ctx.Selectors.getSelector(0, &II);
+ ObjCMethodDecl *MD = 0;
+
+ // Scan the instance methods for "dealloc".
+ for (ObjCImplementationDecl::instmeth_iterator I = D->instmeth_begin(),
+ E = D->instmeth_end(); I!=E; ++I) {
+
+ if ((*I)->getSelector() == S) {
+ MD = *I;
+ break;
+ }
+ }
+
+ PathDiagnosticLocation DLoc =
+ PathDiagnosticLocation::createBegin(D, BR.getSourceManager());
+
+ if (!MD) { // No dealloc found.
+
+ const char* name = LOpts.getGC() == LangOptions::NonGC
+ ? "missing -dealloc"
+ : "missing -dealloc (Hybrid MM, non-GC)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ os << "Objective-C class '" << *D << "' lacks a 'dealloc' instance method";
+
+ BR.EmitBasicReport(D, name, categories::CoreFoundationObjectiveC,
+ os.str(), DLoc);
+ return;
+ }
+
+ // dealloc found. Scan for missing [super dealloc].
+ if (MD->getBody() && !scan_dealloc(MD->getBody(), S)) {
+
+ const char* name = LOpts.getGC() == LangOptions::NonGC
+ ? "missing [super dealloc]"
+ : "missing [super dealloc] (Hybrid MM, non-GC)";
+
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+ os << "The 'dealloc' instance method in Objective-C class '" << *D
+ << "' does not send a 'dealloc' message to its super class"
+ " (missing [super dealloc])";
+
+ BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
+ os.str(), DLoc);
+ return;
+ }
+
+ // Get the "release" selector.
+ IdentifierInfo* RII = &Ctx.Idents.get("release");
+ Selector RS = Ctx.Selectors.getSelector(0, &RII);
+
+ // Get the "self" identifier
+ IdentifierInfo* SelfII = &Ctx.Idents.get("self");
+
+ // Scan for missing and extra releases of ivars used by implementations
+ // of synthesized properties
+ for (ObjCImplementationDecl::propimpl_iterator I = D->propimpl_begin(),
+ E = D->propimpl_end(); I!=E; ++I) {
+
+ // We can only check the synthesized properties
+ if ((*I)->getPropertyImplementation() != ObjCPropertyImplDecl::Synthesize)
+ continue;
+
+ ObjCIvarDecl *ID = (*I)->getPropertyIvarDecl();
+ if (!ID)
+ continue;
+
+ QualType T = ID->getType();
+ if (!T->isObjCObjectPointerType()) // Skip non-pointer ivars
+ continue;
+
+ const ObjCPropertyDecl *PD = (*I)->getPropertyDecl();
+ if (!PD)
+ continue;
+
+ // ivars cannot be set via read-only properties, so we'll skip them
+ if (PD->isReadOnly())
+ continue;
+
+ // ivar must be released if and only if the kind of setter was not 'assign'
+ bool requiresRelease = PD->getSetterKind() != ObjCPropertyDecl::Assign;
+ if (scan_ivar_release(MD->getBody(), ID, PD, RS, SelfII, Ctx)
+ != requiresRelease) {
+ const char *name = 0;
+ std::string buf;
+ llvm::raw_string_ostream os(buf);
+
+ if (requiresRelease) {
+ name = LOpts.getGC() == LangOptions::NonGC
+ ? "missing ivar release (leak)"
+ : "missing ivar release (Hybrid MM, non-GC)";
+
+ os << "The '" << *ID
+ << "' instance variable was retained by a synthesized property but "
+ "wasn't released in 'dealloc'";
+ } else {
+ name = LOpts.getGC() == LangOptions::NonGC
+ ? "extra ivar release (use-after-release)"
+ : "extra ivar release (Hybrid MM, non-GC)";
+
+ os << "The '" << *ID
+ << "' instance variable was not retained by a synthesized property "
+ "but was released in 'dealloc'";
+ }
+
+ PathDiagnosticLocation SDLoc =
+ PathDiagnosticLocation::createBegin((*I), BR.getSourceManager());
+
+ BR.EmitBasicReport(MD, name, categories::CoreFoundationObjectiveC,
+ os.str(), SDLoc);
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCDeallocChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCDeallocChecker : public Checker<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (mgr.getLangOpts().getGC() == LangOptions::GCOnly)
+ return;
+ checkObjCDealloc(cast<ObjCImplementationDecl>(D), mgr.getLangOpts(), BR);
+ }
+};
+}
+
+void ento::registerObjCDeallocChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCDeallocChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
new file mode 100644
index 0000000..6df47b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -0,0 +1,146 @@
+//=- CheckObjCInstMethodRetTy.cpp - Check ObjC method signatures -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCInstMethSignature, a flow-insenstive check
+// that determines if an Objective-C class interface incorrectly redefines
+// the method signature in a subclass.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/ASTContext.h"
+
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool AreTypesCompatible(QualType Derived, QualType Ancestor,
+ ASTContext &C) {
+
+ // Right now don't compare the compatibility of pointers. That involves
+ // looking at subtyping relationships. FIXME: Future patch.
+ if (Derived->isAnyPointerType() && Ancestor->isAnyPointerType())
+ return true;
+
+ return C.typesAreCompatible(Derived, Ancestor);
+}
+
+static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
+ const ObjCMethodDecl *MethAncestor,
+ BugReporter &BR, ASTContext &Ctx,
+ const ObjCImplementationDecl *ID) {
+
+ QualType ResDerived = MethDerived->getResultType();
+ QualType ResAncestor = MethAncestor->getResultType();
+
+ if (!AreTypesCompatible(ResDerived, ResAncestor, Ctx)) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "The Objective-C class '"
+ << *MethDerived->getClassInterface()
+ << "', which is derived from class '"
+ << *MethAncestor->getClassInterface()
+ << "', defines the instance method '"
+ << MethDerived->getSelector().getAsString()
+ << "' whose return type is '"
+ << ResDerived.getAsString()
+ << "'. A method with the same name (same selector) is also defined in "
+ "class '"
+ << *MethAncestor->getClassInterface()
+ << "' and has a return type of '"
+ << ResAncestor.getAsString()
+ << "'. These two types are incompatible, and may result in undefined "
+ "behavior for clients of these classes.";
+
+ PathDiagnosticLocation MethDLoc =
+ PathDiagnosticLocation::createBegin(MethDerived,
+ BR.getSourceManager());
+
+ BR.EmitBasicReport(MethDerived,
+ "Incompatible instance method return type",
+ categories::CoreFoundationObjectiveC,
+ os.str(), MethDLoc);
+ }
+}
+
+static void CheckObjCInstMethSignature(const ObjCImplementationDecl *ID,
+ BugReporter& BR) {
+
+ const ObjCInterfaceDecl *D = ID->getClassInterface();
+ const ObjCInterfaceDecl *C = D->getSuperClass();
+
+ if (!C)
+ return;
+
+ ASTContext &Ctx = BR.getContext();
+
+ // Build a DenseMap of the methods for quick querying.
+ typedef llvm::DenseMap<Selector,ObjCMethodDecl*> MapTy;
+ MapTy IMeths;
+ unsigned NumMethods = 0;
+
+ for (ObjCImplementationDecl::instmeth_iterator I=ID->instmeth_begin(),
+ E=ID->instmeth_end(); I!=E; ++I) {
+
+ ObjCMethodDecl *M = *I;
+ IMeths[M->getSelector()] = M;
+ ++NumMethods;
+ }
+
+ // Now recurse the class hierarchy chain looking for methods with the
+ // same signatures.
+ while (C && NumMethods) {
+ for (ObjCInterfaceDecl::instmeth_iterator I=C->instmeth_begin(),
+ E=C->instmeth_end(); I!=E; ++I) {
+
+ ObjCMethodDecl *M = *I;
+ Selector S = M->getSelector();
+
+ MapTy::iterator MI = IMeths.find(S);
+
+ if (MI == IMeths.end() || MI->second == 0)
+ continue;
+
+ --NumMethods;
+ ObjCMethodDecl *MethDerived = MI->second;
+ MI->second = 0;
+
+ CompareReturnTypes(MethDerived, M, BR, Ctx, ID);
+ }
+
+ C = C->getSuperClass();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCMethSigsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCMethSigsChecker : public Checker<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CheckObjCInstMethSignature(D, BR);
+ }
+};
+}
+
+void ento::registerObjCMethSigsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCMethSigsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
new file mode 100644
index 0000000..dde9071
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -0,0 +1,786 @@
+//==- CheckSecuritySyntaxOnly.cpp - Basic security checks --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of flow-insensitive security checks.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool isArc4RandomAvailable(const ASTContext &Ctx) {
+ const llvm::Triple &T = Ctx.getTargetInfo().getTriple();
+ return T.getVendor() == llvm::Triple::Apple ||
+ T.getOS() == llvm::Triple::FreeBSD ||
+ T.getOS() == llvm::Triple::NetBSD ||
+ T.getOS() == llvm::Triple::OpenBSD ||
+ T.getOS() == llvm::Triple::DragonFly;
+}
+
+namespace {
+struct DefaultBool {
+ bool val;
+ DefaultBool() : val(false) {}
+ operator bool() const { return val; }
+ DefaultBool &operator=(bool b) { val = b; return *this; }
+};
+
+struct ChecksFilter {
+ DefaultBool check_gets;
+ DefaultBool check_getpw;
+ DefaultBool check_mktemp;
+ DefaultBool check_mkstemp;
+ DefaultBool check_strcpy;
+ DefaultBool check_rand;
+ DefaultBool check_vfork;
+ DefaultBool check_FloatLoopCounter;
+ DefaultBool check_UncheckedReturn;
+};
+
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext* AC;
+ enum { num_setids = 6 };
+ IdentifierInfo *II_setid[num_setids];
+
+ const bool CheckRand;
+ const ChecksFilter &filter;
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac,
+ const ChecksFilter &f)
+ : BR(br), AC(ac), II_setid(),
+ CheckRand(isArc4RandomAvailable(BR.getContext())),
+ filter(f) {}
+
+ // Statement visitor methods.
+ void VisitCallExpr(CallExpr *CE);
+ void VisitForStmt(ForStmt *S);
+ void VisitCompoundStmt (CompoundStmt *S);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+
+ void VisitChildren(Stmt *S);
+
+ // Helpers.
+ bool checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD);
+
+ typedef void (WalkAST::*FnCheck)(const CallExpr *,
+ const FunctionDecl *);
+
+ // Checker-specific methods.
+ void checkLoopConditionForFloat(const ForStmt *FS);
+ void checkCall_gets(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_rand(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_random(const CallExpr *CE, const FunctionDecl *FD);
+ void checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD);
+ void checkUncheckedReturnValue(CallExpr *CE);
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ // Get the callee.
+ const FunctionDecl *FD = CE->getDirectCallee();
+
+ if (!FD)
+ return;
+
+ // Get the name of the callee. If it's a builtin, strip off the prefix.
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II) // if no identifier, not a simple C function
+ return;
+ StringRef Name = II->getName();
+ if (Name.startswith("__builtin_"))
+ Name = Name.substr(10);
+
+ // Set the evaluation function by switching on the callee name.
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("gets", &WalkAST::checkCall_gets)
+ .Case("getpw", &WalkAST::checkCall_getpw)
+ .Case("mktemp", &WalkAST::checkCall_mktemp)
+ .Case("mkstemp", &WalkAST::checkCall_mkstemp)
+ .Case("mkdtemp", &WalkAST::checkCall_mkstemp)
+ .Case("mkstemps", &WalkAST::checkCall_mkstemp)
+ .Cases("strcpy", "__strcpy_chk", &WalkAST::checkCall_strcpy)
+ .Cases("strcat", "__strcat_chk", &WalkAST::checkCall_strcat)
+ .Case("drand48", &WalkAST::checkCall_rand)
+ .Case("erand48", &WalkAST::checkCall_rand)
+ .Case("jrand48", &WalkAST::checkCall_rand)
+ .Case("lrand48", &WalkAST::checkCall_rand)
+ .Case("mrand48", &WalkAST::checkCall_rand)
+ .Case("nrand48", &WalkAST::checkCall_rand)
+ .Case("lcong48", &WalkAST::checkCall_rand)
+ .Case("rand", &WalkAST::checkCall_rand)
+ .Case("rand_r", &WalkAST::checkCall_rand)
+ .Case("random", &WalkAST::checkCall_random)
+ .Case("vfork", &WalkAST::checkCall_vfork)
+ .Default(NULL);
+
+ // If the callee isn't defined, it is not of security concern.
+ // Check and evaluate the call.
+ if (evalFunction)
+ (this->*evalFunction)(CE, FD);
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitCompoundStmt(CompoundStmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I) {
+ if (CallExpr *CE = dyn_cast<CallExpr>(child))
+ checkUncheckedReturnValue(CE);
+ Visit(child);
+ }
+}
+
+void WalkAST::VisitForStmt(ForStmt *FS) {
+ checkLoopConditionForFloat(FS);
+
+ // Recurse and check children.
+ VisitChildren(FS);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: floating poing variable used as loop counter.
+// Originally: <rdar://problem/6336718>
+// Implements: CERT security coding advisory FLP-30.
+//===----------------------------------------------------------------------===//
+
+static const DeclRefExpr*
+getIncrementedVar(const Expr *expr, const VarDecl *x, const VarDecl *y) {
+ expr = expr->IgnoreParenCasts();
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(expr)) {
+ if (!(B->isAssignmentOp() || B->isCompoundAssignmentOp() ||
+ B->getOpcode() == BO_Comma))
+ return NULL;
+
+ if (const DeclRefExpr *lhs = getIncrementedVar(B->getLHS(), x, y))
+ return lhs;
+
+ if (const DeclRefExpr *rhs = getIncrementedVar(B->getRHS(), x, y))
+ return rhs;
+
+ return NULL;
+ }
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(expr)) {
+ const NamedDecl *ND = DR->getDecl();
+ return ND == x || ND == y ? DR : NULL;
+ }
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(expr))
+ return U->isIncrementDecrementOp()
+ ? getIncrementedVar(U->getSubExpr(), x, y) : NULL;
+
+ return NULL;
+}
+
+/// CheckLoopConditionForFloat - This check looks for 'for' statements that
+/// use a floating point variable as a loop counter.
+/// CERT: FLP30-C, FLP30-CPP.
+///
+void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
+ if (!filter.check_FloatLoopCounter)
+ return;
+
+ // Does the loop have a condition?
+ const Expr *condition = FS->getCond();
+
+ if (!condition)
+ return;
+
+ // Does the loop have an increment?
+ const Expr *increment = FS->getInc();
+
+ if (!increment)
+ return;
+
+ // Strip away '()' and casts.
+ condition = condition->IgnoreParenCasts();
+ increment = increment->IgnoreParenCasts();
+
+ // Is the loop condition a comparison?
+ const BinaryOperator *B = dyn_cast<BinaryOperator>(condition);
+
+ if (!B)
+ return;
+
+ // Is this a comparison?
+ if (!(B->isRelationalOp() || B->isEqualityOp()))
+ return;
+
+ // Are we comparing variables?
+ const DeclRefExpr *drLHS =
+ dyn_cast<DeclRefExpr>(B->getLHS()->IgnoreParenLValueCasts());
+ const DeclRefExpr *drRHS =
+ dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParenLValueCasts());
+
+ // Does at least one of the variables have a floating point type?
+ drLHS = drLHS && drLHS->getType()->isRealFloatingType() ? drLHS : NULL;
+ drRHS = drRHS && drRHS->getType()->isRealFloatingType() ? drRHS : NULL;
+
+ if (!drLHS && !drRHS)
+ return;
+
+ const VarDecl *vdLHS = drLHS ? dyn_cast<VarDecl>(drLHS->getDecl()) : NULL;
+ const VarDecl *vdRHS = drRHS ? dyn_cast<VarDecl>(drRHS->getDecl()) : NULL;
+
+ if (!vdLHS && !vdRHS)
+ return;
+
+ // Does either variable appear in increment?
+ const DeclRefExpr *drInc = getIncrementedVar(increment, vdLHS, vdRHS);
+
+ if (!drInc)
+ return;
+
+ // Emit the error. First figure out which DeclRefExpr in the condition
+ // referenced the compared variable.
+ const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
+
+ SmallVector<SourceRange, 2> ranges;
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ os << "Variable '" << drCond->getDecl()->getName()
+ << "' with floating point type '" << drCond->getType().getAsString()
+ << "' should not be used as a loop counter";
+
+ ranges.push_back(drCond->getSourceRange());
+ ranges.push_back(drInc->getSourceRange());
+
+ const char *bugType = "Floating point variable used as loop counter";
+
+ PathDiagnosticLocation FSLoc =
+ PathDiagnosticLocation::createBegin(FS, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ bugType, "Security", os.str(),
+ FSLoc, ranges.data(), ranges.size());
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'gets' is insecure.
+// Originally: <rdar://problem/6335715>
+// Implements (part of): 300-BSI (buildsecurityin.us-cert.gov)
+// CWE-242: Use of Inherently Dangerous Function
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_gets(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_gets)
+ return;
+
+ const FunctionProtoType *FPT
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+ if (!FPT)
+ return;
+
+ // Verify that the function takes a single argument.
+ if (FPT->getNumArgs() != 1)
+ return;
+
+ // Is the argument a 'char*'?
+ const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(0));
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential buffer overflow in call to 'gets'",
+ "Security",
+ "Call to function 'gets' is extremely insecure as it can "
+ "always result in a buffer overflow",
+ CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'getpwd' is insecure.
+// CWE-477: Use of Obsolete Functions
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_getpw(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_getpw)
+ return;
+
+ const FunctionProtoType *FPT
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+ if (!FPT)
+ return;
+
+ // Verify that the function takes two arguments.
+ if (FPT->getNumArgs() != 2)
+ return;
+
+ // Verify the first argument type is integer.
+ if (!FPT->getArgType(0)->isIntegerType())
+ return;
+
+ // Verify the second argument type is char*.
+ const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(1));
+ if (!PT)
+ return;
+
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential buffer overflow in call to 'getpw'",
+ "Security",
+ "The getpw() function is dangerous as it may overflow the "
+ "provided buffer. It is obsoleted by getpwuid().",
+ CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'mktemp' is insecure. It is obsoleted by mkstemp().
+// CWE-377: Insecure Temporary File
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_mktemp) {
+ // Fall back to the security check of looking for enough 'X's in the
+ // format string, since that is a less severe warning.
+ checkCall_mkstemp(CE, FD);
+ return;
+ }
+
+ const FunctionProtoType *FPT
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+ if(!FPT)
+ return;
+
+ // Verify that the function takes a single argument.
+ if (FPT->getNumArgs() != 1)
+ return;
+
+ // Verify that the argument is Pointer Type.
+ const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(0));
+ if (!PT)
+ return;
+
+ // Verify that the argument is a 'char*'.
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return;
+
+ // Issue a waring.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential insecure temporary file in call 'mktemp'",
+ "Security",
+ "Call to function 'mktemp' is insecure as it always "
+ "creates or uses insecure temporary file. Use 'mkstemp' "
+ "instead",
+ CELoc, &R, 1);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Check: Use of 'mkstemp', 'mktemp', 'mkdtemp' should contain at least 6 X's.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_mkstemp(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_mkstemp)
+ return;
+
+ StringRef Name = FD->getIdentifier()->getName();
+ std::pair<signed, signed> ArgSuffix =
+ llvm::StringSwitch<std::pair<signed, signed> >(Name)
+ .Case("mktemp", std::make_pair(0,-1))
+ .Case("mkstemp", std::make_pair(0,-1))
+ .Case("mkdtemp", std::make_pair(0,-1))
+ .Case("mkstemps", std::make_pair(0,1))
+ .Default(std::make_pair(-1, -1));
+
+ assert(ArgSuffix.first >= 0 && "Unsupported function");
+
+ // Check if the number of arguments is consistent with out expectations.
+ unsigned numArgs = CE->getNumArgs();
+ if ((signed) numArgs <= ArgSuffix.first)
+ return;
+
+ const StringLiteral *strArg =
+ dyn_cast<StringLiteral>(CE->getArg((unsigned)ArgSuffix.first)
+ ->IgnoreParenImpCasts());
+
+ // Currently we only handle string literals. It is possible to do better,
+ // either by looking at references to const variables, or by doing real
+ // flow analysis.
+ if (!strArg || strArg->getCharByteWidth() != 1)
+ return;
+
+ // Count the number of X's, taking into account a possible cutoff suffix.
+ StringRef str = strArg->getString();
+ unsigned numX = 0;
+ unsigned n = str.size();
+
+ // Take into account the suffix.
+ unsigned suffix = 0;
+ if (ArgSuffix.second >= 0) {
+ const Expr *suffixEx = CE->getArg((unsigned)ArgSuffix.second);
+ llvm::APSInt Result;
+ if (!suffixEx->EvaluateAsInt(Result, BR.getContext()))
+ return;
+ // FIXME: Issue a warning.
+ if (Result.isNegative())
+ return;
+ suffix = (unsigned) Result.getZExtValue();
+ n = (n > suffix) ? n - suffix : 0;
+ }
+
+ for (unsigned i = 0; i < n; ++i)
+ if (str[i] == 'X') ++numX;
+
+ if (numX >= 6)
+ return;
+
+ // Issue a warning.
+ SourceRange R = strArg->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ SmallString<512> buf;
+ llvm::raw_svector_ostream out(buf);
+ out << "Call to '" << Name << "' should have at least 6 'X's in the"
+ " format string to be secure (" << numX << " 'X'";
+ if (numX != 1)
+ out << 's';
+ out << " seen";
+ if (suffix) {
+ out << ", " << suffix << " character";
+ if (suffix > 1)
+ out << 's';
+ out << " used as a suffix";
+ }
+ out << ')';
+ BR.EmitBasicReport(AC->getDecl(),
+ "Insecure temporary file creation", "Security",
+ out.str(), CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'strcpy' is insecure.
+//
+// CWE-119: Improper Restriction of Operations within
+// the Bounds of a Memory Buffer
+//===----------------------------------------------------------------------===//
+void WalkAST::checkCall_strcpy(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_strcpy)
+ return;
+
+ if (!checkCall_strCommon(CE, FD))
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential insecure memory buffer bounds restriction in "
+ "call 'strcpy'",
+ "Security",
+ "Call to function 'strcpy' is insecure as it does not "
+ "provide bounding of the memory buffer. Replace "
+ "unbounded copy functions with analogous functions that "
+ "support length arguments such as 'strlcpy'. CWE-119.",
+ CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Any use of 'strcat' is insecure.
+//
+// CWE-119: Improper Restriction of Operations within
+// the Bounds of a Memory Buffer
+//===----------------------------------------------------------------------===//
+void WalkAST::checkCall_strcat(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_strcpy)
+ return;
+
+ if (!checkCall_strCommon(CE, FD))
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential insecure memory buffer bounds restriction in "
+ "call 'strcat'",
+ "Security",
+ "Call to function 'strcat' is insecure as it does not "
+ "provide bounding of the memory buffer. Replace "
+ "unbounded copy functions with analogous functions that "
+ "support length arguments such as 'strlcat'. CWE-119.",
+ CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Common check for str* functions with no bounds parameters.
+//===----------------------------------------------------------------------===//
+bool WalkAST::checkCall_strCommon(const CallExpr *CE, const FunctionDecl *FD) {
+ const FunctionProtoType *FPT
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+ if (!FPT)
+ return false;
+
+ // Verify the function takes two arguments, three in the _chk version.
+ int numArgs = FPT->getNumArgs();
+ if (numArgs != 2 && numArgs != 3)
+ return false;
+
+ // Verify the type for both arguments.
+ for (int i = 0; i < 2; i++) {
+ // Verify that the arguments are pointers.
+ const PointerType *PT = dyn_cast<PointerType>(FPT->getArgType(i));
+ if (!PT)
+ return false;
+
+ // Verify that the argument is a 'char*'.
+ if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
+ return false;
+ }
+
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Linear congruent random number generators should not be used
+// Originally: <rdar://problem/63371000>
+// CWE-338: Use of cryptographically weak prng
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_rand || !CheckRand)
+ return;
+
+ const FunctionProtoType *FTP
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+ if (!FTP)
+ return;
+
+ if (FTP->getNumArgs() == 1) {
+ // Is the argument an 'unsigned short *'?
+ // (Actually any integer type is allowed.)
+ const PointerType *PT = dyn_cast<PointerType>(FTP->getArgType(0));
+ if (!PT)
+ return;
+
+ if (! PT->getPointeeType()->isIntegerType())
+ return;
+ }
+ else if (FTP->getNumArgs() != 0)
+ return;
+
+ // Issue a warning.
+ SmallString<256> buf1;
+ llvm::raw_svector_ostream os1(buf1);
+ os1 << '\'' << *FD << "' is a poor random number generator";
+
+ SmallString<256> buf2;
+ llvm::raw_svector_ostream os2(buf2);
+ os2 << "Function '" << *FD
+ << "' is obsolete because it implements a poor random number generator."
+ << " Use 'arc4random' instead";
+
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), os1.str(), "Security", os2.str(),
+ CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: 'random' should not be used
+// Originally: <rdar://problem/63371000>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_random(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!CheckRand || !filter.check_rand)
+ return;
+
+ const FunctionProtoType *FTP
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+ if (!FTP)
+ return;
+
+ // Verify that the function takes no argument.
+ if (FTP->getNumArgs() != 0)
+ return;
+
+ // Issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ "'random' is not a secure random number generator",
+ "Security",
+ "The 'random' function produces a sequence of values that "
+ "an adversary may be able to predict. Use 'arc4random' "
+ "instead", CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: 'vfork' should not be used.
+// POS33-C: Do not use vfork().
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkCall_vfork(const CallExpr *CE, const FunctionDecl *FD) {
+ if (!filter.check_vfork)
+ return;
+
+ // All calls to vfork() are insecure, issue a warning.
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential insecure implementation-specific behavior in "
+ "call 'vfork'",
+ "Security",
+ "Call to function 'vfork' is insecure as it can lead to "
+ "denial of service situations in the parent process. "
+ "Replace calls to vfork with calls to the safer "
+ "'posix_spawn' function",
+ CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// Check: Should check whether privileges are dropped successfully.
+// Originally: <rdar://problem/6337132>
+//===----------------------------------------------------------------------===//
+
+void WalkAST::checkUncheckedReturnValue(CallExpr *CE) {
+ if (!filter.check_UncheckedReturn)
+ return;
+
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return;
+
+ if (II_setid[0] == NULL) {
+ static const char * const identifiers[num_setids] = {
+ "setuid", "setgid", "seteuid", "setegid",
+ "setreuid", "setregid"
+ };
+
+ for (size_t i = 0; i < num_setids; i++)
+ II_setid[i] = &BR.getContext().Idents.get(identifiers[i]);
+ }
+
+ const IdentifierInfo *id = FD->getIdentifier();
+ size_t identifierid;
+
+ for (identifierid = 0; identifierid < num_setids; identifierid++)
+ if (id == II_setid[identifierid])
+ break;
+
+ if (identifierid >= num_setids)
+ return;
+
+ const FunctionProtoType *FTP
+ = dyn_cast<FunctionProtoType>(FD->getType().IgnoreParens());
+ if (!FTP)
+ return;
+
+ // Verify that the function takes one or two arguments (depending on
+ // the function).
+ if (FTP->getNumArgs() != (identifierid < 4 ? 1 : 2))
+ return;
+
+ // The arguments must be integers.
+ for (unsigned i = 0; i < FTP->getNumArgs(); i++)
+ if (! FTP->getArgType(i)->isIntegerType())
+ return;
+
+ // Issue a warning.
+ SmallString<256> buf1;
+ llvm::raw_svector_ostream os1(buf1);
+ os1 << "Return value is not checked in call to '" << *FD << '\'';
+
+ SmallString<256> buf2;
+ llvm::raw_svector_ostream os2(buf2);
+ os2 << "The return value from the call to '" << *FD
+ << "' is not checked. If an error occurs in '" << *FD
+ << "', the following code may execute with unexpected privileges";
+
+ SourceRange R = CE->getCallee()->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(), os1.str(), "Security", os2.str(),
+ CELoc, &R, 1);
+}
+
+//===----------------------------------------------------------------------===//
+// SecuritySyntaxChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class SecuritySyntaxChecker : public Checker<check::ASTCodeBody> {
+public:
+ ChecksFilter filter;
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, mgr.getAnalysisDeclContext(D), filter);
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+ mgr.registerChecker<SecuritySyntaxChecker>()->filter.check_##name = true;\
+}
+
+REGISTER_CHECKER(gets)
+REGISTER_CHECKER(getpw)
+REGISTER_CHECKER(mkstemp)
+REGISTER_CHECKER(mktemp)
+REGISTER_CHECKER(strcpy)
+REGISTER_CHECKER(rand)
+REGISTER_CHECKER(vfork)
+REGISTER_CHECKER(FloatLoopCounter)
+REGISTER_CHECKER(UncheckedReturn)
+
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
new file mode 100644
index 0000000..cc7fd37
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckSizeofPointer.cpp
@@ -0,0 +1,92 @@
+//==- CheckSizeofPointer.cpp - Check for sizeof on pointers ------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a check for unintended use of sizeof() on pointer
+// expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext* AC;
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac) : BR(br), AC(ac) {}
+ void VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitChildren(Stmt *S);
+};
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+// CWE-467: Use of sizeof() on a Pointer Type
+void WalkAST::VisitUnaryExprOrTypeTraitExpr(UnaryExprOrTypeTraitExpr *E) {
+ if (E->getKind() != UETT_SizeOf)
+ return;
+
+ // If an explicit type is used in the code, usually the coder knows what he is
+ // doing.
+ if (E->isArgumentType())
+ return;
+
+ QualType T = E->getTypeOfArgument();
+ if (T->isPointerType()) {
+
+ // Many false positives have the form 'sizeof *p'. This is reasonable
+ // because people know what they are doing when they intentionally
+ // dereference the pointer.
+ Expr *ArgEx = E->getArgumentExpr();
+ if (!isa<DeclRefExpr>(ArgEx->IgnoreParens()))
+ return;
+
+ SourceRange R = ArgEx->getSourceRange();
+ PathDiagnosticLocation ELoc =
+ PathDiagnosticLocation::createBegin(E, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ "Potential unintended use of sizeof() on pointer type",
+ "Logic",
+ "The code calls sizeof() on a pointer type. "
+ "This can produce an unexpected result.",
+ ELoc, &R, 1);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// SizeofPointerChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class SizeofPointerChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, mgr.getAnalysisDeclContext(D));
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerSizeofPointerChecker(CheckerManager &mgr) {
+ mgr.registerChecker<SizeofPointerChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
new file mode 100644
index 0000000..843502f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp
@@ -0,0 +1,233 @@
+//= CheckerDocumentation.cpp - Documentation checker ---------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker lists all the checker callbacks and provides documentation for
+// checker writers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+
+using namespace clang;
+using namespace ento;
+
+// All checkers should be placed into anonymous namespace.
+// We place the CheckerDocumentation inside ento namespace to make the
+// it visible in doxygen.
+namespace ento {
+
+/// This checker documents the callback functions checkers can use to implement
+/// the custom handling of the specific events during path exploration as well
+/// as reporting bugs. Most of the callbacks are targeted at path-sensitive
+/// checking.
+///
+/// \sa CheckerContext
+class CheckerDocumentation : public Checker< check::PreStmt<DeclStmt>,
+ check::PostStmt<CallExpr>,
+ check::PreObjCMessage,
+ check::PostObjCMessage,
+ check::BranchCondition,
+ check::Location,
+ check::Bind,
+ check::DeadSymbols,
+ check::EndPath,
+ check::EndAnalysis,
+ check::EndOfTranslationUnit,
+ eval::Call,
+ eval::Assume,
+ check::LiveSymbols,
+ check::RegionChanges,
+ check::Event<ImplicitNullDerefEvent>,
+ check::ASTDecl<FunctionDecl> > {
+public:
+
+ /// \brief Pre-visit the Statement.
+ ///
+ /// The method will be called before the analyzer core processes the
+ /// statement. The notification is performed for every explored CFGElement,
+ /// which does not include the control flow statements such as IfStmt. The
+ /// callback can be specialized to be called with any subclass of Stmt.
+ ///
+ /// See checkBranchCondition() callback for performing custom processing of
+ /// the branching statements.
+ ///
+ /// check::PreStmt<DeclStmt>
+ void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {}
+
+ /// \brief Post-visit the Statement.
+ ///
+ /// The method will be called after the analyzer core processes the
+ /// statement. The notification is performed for every explored CFGElement,
+ /// which does not include the control flow statements such as IfStmt. The
+ /// callback can be specialized to be called with any subclass of Stmt.
+ ///
+ /// check::PostStmt<DeclStmt>
+ void checkPostStmt(const CallExpr *DS, CheckerContext &C) const;
+
+ /// \brief Pre-visit the Objective C messages.
+ void checkPreObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const {}
+
+ /// \brief Post-visit the Objective C messages.
+ void checkPostObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const {}
+
+ /// \brief Pre-visit of the condition statement of a branch (such as IfStmt).
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const {}
+
+ /// \brief Called on a load from and a store to a location.
+ ///
+ /// The method will be called each time a location (pointer) value is
+ /// accessed.
+ /// \param Loc The value of the location (pointer).
+ /// \param IsLoad The flag specifying if the location is a store or a load.
+ /// \param S The load is performed while processing the statement.
+ ///
+ /// check::Location
+ void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
+ CheckerContext &C) const {}
+
+ /// \brief Called on binding of a value to a location.
+ ///
+ /// \param Loc The value of the location (pointer).
+ /// \param Val The value which will be stored at the location Loc.
+ /// \param S The bind is performed while processing the statement S.
+ ///
+ /// check::Bind
+ void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const {}
+
+
+ /// \brief Called whenever a symbol becomes dead.
+ ///
+ /// This callback should be used by the checkers to aggressively clean
+ /// up/reduce the checker state, which is important for reducing the overall
+ /// memory usage. Specifically, if a checker keeps symbol specific information
+ /// in the sate, it can and should be dropped after the symbol becomes dead.
+ /// In addition, reporting a bug as soon as the checker becomes dead leads to
+ /// more precise diagnostics. (For example, one should report that a malloced
+ /// variable is not freed right after it goes out of scope.)
+ ///
+ /// \param SR The SymbolReaper object can be queried to determine which
+ /// symbols are dead.
+ ///
+ /// check::DeadSymbols
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const {}
+
+ /// \brief Called when an end of path is reached in the ExplodedGraph.
+ ///
+ /// This callback should be used to check if the allocated resources are freed.
+ ///
+ /// check::EndPath
+ void checkEndPath(CheckerContext &Ctx) const {}
+
+ /// \brief Called after all the paths in the ExplodedGraph reach end of path
+ /// - the symbolic execution graph is fully explored.
+ ///
+ /// This callback should be used in cases when a checker needs to have a
+ /// global view of the information generated on all paths. For example, to
+ /// compare execution summary/result several paths.
+ /// See IdempotentOperationChecker for a usage example.
+ ///
+ /// check::EndAnalysis
+ void checkEndAnalysis(ExplodedGraph &G,
+ BugReporter &BR,
+ ExprEngine &Eng) const {}
+
+ /// \brief Called after analysis of a TranslationUnit is complete.
+ ///
+ /// check::EndOfTranslationUnit
+ void checkEndOfTranslationUnit(const TranslationUnitDecl *TU,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {}
+
+
+ /// \brief Evaluates function call.
+ ///
+ /// The analysis core threats all function calls in the same way. However, some
+ /// functions have special meaning, which should be reflected in the program
+ /// state. This callback allows a checker to provide domain specific knowledge
+ /// about the particular functions it knows about.
+ ///
+ /// \returns true if the call has been successfully evaluated
+ /// and false otherwise. Note, that only one checker can evaluate a call. If
+ /// more then one checker claim that they can evaluate the same call the
+ /// first one wins.
+ ///
+ /// eval::Call
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const { return true; }
+
+ /// \brief Handles assumptions on symbolic values.
+ ///
+ /// This method is called when a symbolic expression is assumed to be true or
+ /// false. For example, the assumptions are performed when evaluating a
+ /// condition at a branch. The callback allows checkers track the assumptions
+ /// performed on the symbols of interest and change the state accordingly.
+ ///
+ /// eval::Assume
+ ProgramStateRef evalAssume(ProgramStateRef State,
+ SVal Cond,
+ bool Assumption) const { return State; }
+
+ /// Allows modifying SymbolReaper object. For example, checkers can explicitly
+ /// register symbols of interest as live. These symbols will not be marked
+ /// dead and removed.
+ ///
+ /// check::LiveSymbols
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const {}
+
+
+ bool wantsRegionChangeUpdate(ProgramStateRef St) const { return true; }
+
+ /// check::RegionChanges
+ /// Allows tracking regions which get invalidated.
+ /// \param state The current program state.
+ /// \param invalidated A set of all symbols potentially touched by the change.
+ /// \param ExplicitRegions The regions explicitly requested for invalidation.
+ /// For example, in the case of a function call, these would be arguments.
+ /// \param Regions The transitive closure of accessible regions,
+ /// i.e. all regions that may have been touched by this change.
+ /// \param The call expression wrapper if the regions are invalidated by a
+ /// call, 0 otherwise.
+ /// Note, in order to be notified, the checker should also implement
+ /// wantsRegionChangeUpdate callback.
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State,
+ const StoreManager::InvalidatedSymbols *,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const {
+ return State;
+ }
+
+ /// check::Event<ImplicitNullDerefEvent>
+ void checkEvent(ImplicitNullDerefEvent Event) const {}
+
+ /// \brief Check every declaration in the AST.
+ ///
+ /// An AST traversal callback, which should only be used when the checker is
+ /// not path sensitive. It will be called for every Declaration in the AST and
+ /// can be specialized to only be called on subclasses of Decl, for example,
+ /// FunctionDecl.
+ ///
+ /// check::ASTDecl<FunctionDecl>
+ void checkASTDecl(const FunctionDecl *D,
+ AnalysisManager &Mgr,
+ BugReporter &BR) const {}
+
+};
+
+void CheckerDocumentation::checkPostStmt(const CallExpr *DS,
+ CheckerContext &C) const {
+ return;
+}
+
+} // end namespace
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
new file mode 100644
index 0000000..96a8d26
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/Checkers.td
@@ -0,0 +1,487 @@
+//===--- Checkers.td - Static Analyzer Checkers -===-----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+include "clang/StaticAnalyzer/Checkers/CheckerBase.td"
+
+//===----------------------------------------------------------------------===//
+// Packages.
+//===----------------------------------------------------------------------===//
+
+def Experimental : Package<"experimental">;
+
+def Core : Package<"core">;
+def CoreBuiltin : Package<"builtin">, InPackage<Core>;
+def CoreUninitialized : Package<"uninitialized">, InPackage<Core>;
+def CoreExperimental : Package<"core">, InPackage<Experimental>, Hidden;
+
+def Cplusplus : Package<"cplusplus">;
+def CplusplusExperimental : Package<"cplusplus">, InPackage<Experimental>, Hidden;
+
+def DeadCode : Package<"deadcode">;
+def DeadCodeExperimental : Package<"deadcode">, InPackage<Experimental>, Hidden;
+
+def Security : Package <"security">;
+def InsecureAPI : Package<"insecureAPI">, InPackage<Security>;
+def SecurityExperimental : Package<"security">, InPackage<Experimental>, Hidden;
+def Taint : Package<"taint">, InPackage<SecurityExperimental>, Hidden;
+
+def Unix : Package<"unix">;
+def UnixExperimental : Package<"unix">, InPackage<Experimental>, Hidden;
+def CString : Package<"cstring">, InPackage<Unix>, Hidden;
+def CStringExperimental : Package<"cstring">, InPackage<UnixExperimental>, Hidden;
+
+def OSX : Package<"osx">;
+def OSXExperimental : Package<"osx">, InPackage<Experimental>, Hidden;
+def Cocoa : Package<"cocoa">, InPackage<OSX>;
+def CocoaExperimental : Package<"cocoa">, InPackage<OSXExperimental>, Hidden;
+def CoreFoundation : Package<"coreFoundation">, InPackage<OSX>;
+def Containers : Package<"containers">, InPackage<CoreFoundation>;
+
+def LLVM : Package<"llvm">;
+def Debug : Package<"debug">;
+
+//===----------------------------------------------------------------------===//
+// Core Checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Core in {
+
+def DereferenceChecker : Checker<"NullDereference">,
+ HelpText<"Check for dereferences of null pointers">,
+ DescFile<"DereferenceChecker.cpp">;
+
+def CallAndMessageChecker : Checker<"CallAndMessage">,
+ HelpText<"Check for logical errors for function calls and Objective-C message expressions (e.g., uninitialized arguments, null function pointers)">,
+ DescFile<"CallAndMessageChecker.cpp">;
+
+def AdjustedReturnValueChecker : Checker<"AdjustedReturnValue">,
+ HelpText<"Check to see if the return value of a function call is different than the caller expects (e.g., from calls through function pointers)">,
+ DescFile<"AdjustedReturnValueChecker.cpp">;
+
+def AttrNonNullChecker : Checker<"AttributeNonNull">,
+ HelpText<"Check for null pointers passed as arguments to a function whose arguments are marked with the 'nonnull' attribute">,
+ DescFile<"AttrNonNullChecker.cpp">;
+
+def VLASizeChecker : Checker<"VLASize">,
+ HelpText<"Check for declarations of VLA of undefined or zero size">,
+ DescFile<"VLASizeChecker.cpp">;
+
+def DivZeroChecker : Checker<"DivideZero">,
+ HelpText<"Check for division by zero">,
+ DescFile<"DivZeroChecker.cpp">;
+
+def UndefResultChecker : Checker<"UndefinedBinaryOperatorResult">,
+ HelpText<"Check for undefined results of binary operators">,
+ DescFile<"UndefResultChecker.cpp">;
+
+def StackAddrEscapeChecker : Checker<"StackAddressEscape">,
+ HelpText<"Check that addresses to stack memory do not escape the function">,
+ DescFile<"StackAddrEscapeChecker.cpp">;
+
+} // end "core"
+
+let ParentPackage = CoreExperimental in {
+
+def BoolAssignmentChecker : Checker<"BoolAssignment">,
+ HelpText<"Warn about assigning non-{0,1} values to Boolean variables">,
+ DescFile<"BoolAssignmentChecker.cpp">;
+
+def CastSizeChecker : Checker<"CastSize">,
+ HelpText<"Check when casting a malloc'ed type T, whether the size is a multiple of the size of T">,
+ DescFile<"CastSizeChecker.cpp">;
+
+def CastToStructChecker : Checker<"CastToStruct">,
+ HelpText<"Check for cast from non-struct pointer to struct pointer">,
+ DescFile<"CastToStructChecker.cpp">;
+
+def FixedAddressChecker : Checker<"FixedAddr">,
+ HelpText<"Check for assignment of a fixed address to a pointer">,
+ DescFile<"FixedAddressChecker.cpp">;
+
+def PointerArithChecker : Checker<"PointerArithm">,
+ HelpText<"Check for pointer arithmetic on locations other than array elements">,
+ DescFile<"PointerArithChecker">;
+
+def PointerSubChecker : Checker<"PointerSub">,
+ HelpText<"Check for pointer subtractions on two pointers pointing to different memory chunks">,
+ DescFile<"PointerSubChecker">;
+
+def SizeofPointerChecker : Checker<"SizeofPtr">,
+ HelpText<"Warn about unintended use of sizeof() on pointer expressions">,
+ DescFile<"CheckSizeofPointer.cpp">;
+
+} // end "core.experimental"
+
+//===----------------------------------------------------------------------===//
+// Evaluate "builtin" functions.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = CoreBuiltin in {
+
+def NoReturnFunctionChecker : Checker<"NoReturnFunctions">,
+ HelpText<"Evaluate \"panic\" functions that are known to not return to the caller">,
+ DescFile<"NoReturnFunctionChecker.cpp">;
+
+def BuiltinFunctionChecker : Checker<"BuiltinFunctions">,
+ HelpText<"Evaluate compiler builtin functions (e.g., alloca())">,
+ DescFile<"BuiltinFunctionChecker.cpp">;
+
+} // end "core.builtin"
+
+//===----------------------------------------------------------------------===//
+// Uninitialized values checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = CoreUninitialized in {
+
+def UndefinedArraySubscriptChecker : Checker<"ArraySubscript">,
+ HelpText<"Check for uninitialized values used as array subscripts">,
+ DescFile<"UndefinedArraySubscriptChecker.cpp">;
+
+def UndefinedAssignmentChecker : Checker<"Assign">,
+ HelpText<"Check for assigning uninitialized values">,
+ DescFile<"UndefinedAssignmentChecker.cpp">;
+
+def UndefBranchChecker : Checker<"Branch">,
+ HelpText<"Check for uninitialized values used as branch conditions">,
+ DescFile<"UndefBranchChecker.cpp">;
+
+def UndefCapturedBlockVarChecker : Checker<"CapturedBlockVariable">,
+ HelpText<"Check for blocks that capture uninitialized values">,
+ DescFile<"UndefCapturedBlockVarChecker.cpp">;
+
+def ReturnUndefChecker : Checker<"UndefReturn">,
+ HelpText<"Check for uninitialized values being returned to the caller">,
+ DescFile<"ReturnUndefChecker.cpp">;
+
+} // end "core.uninitialized"
+
+//===----------------------------------------------------------------------===//
+// C++ checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = CplusplusExperimental in {
+
+def IteratorsChecker : Checker<"Iterators">,
+ HelpText<"Check improper uses of STL vector iterators">,
+ DescFile<"IteratorsChecker.cpp">;
+
+def VirtualCallChecker : Checker<"VirtualCall">,
+ HelpText<"Check virtual function calls during construction or destruction">,
+ DescFile<"VirtualCallChecker.cpp">;
+
+} // end: "cplusplus.experimental"
+
+//===----------------------------------------------------------------------===//
+// Deadcode checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = DeadCode in {
+
+def DeadStoresChecker : Checker<"DeadStores">,
+ HelpText<"Check for values stored to variables that are never read afterwards">,
+ DescFile<"DeadStoresChecker.cpp">;
+} // end DeadCode
+
+let ParentPackage = DeadCodeExperimental in {
+
+def IdempotentOperationChecker : Checker<"IdempotentOperations">,
+ HelpText<"Warn about idempotent operations">,
+ DescFile<"IdempotentOperationChecker.cpp">;
+
+def UnreachableCodeChecker : Checker<"UnreachableCode">,
+ HelpText<"Check unreachable code">,
+ DescFile<"UnreachableCodeChecker.cpp">;
+
+} // end "deadcode.experimental"
+
+//===----------------------------------------------------------------------===//
+// Security checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = InsecureAPI in {
+ def gets : Checker<"gets">,
+ HelpText<"Warn on uses of the 'gets' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def getpw : Checker<"getpw">,
+ HelpText<"Warn on uses of the 'getpw' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def mktemp : Checker<"mktemp">,
+ HelpText<"Warn on uses of the 'mktemp' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def mkstemp : Checker<"mkstemp">,
+ HelpText<"Warn when 'mkstemp' is passed fewer than 6 X's in the format string">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def rand : Checker<"rand">,
+ HelpText<"Warn on uses of the 'rand', 'random', and related functions">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def strcpy : Checker<"strcpy">,
+ HelpText<"Warn on uses of the 'strcpy' and 'strcat' functions">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def vfork : Checker<"vfork">,
+ HelpText<"Warn on uses of the 'vfork' function">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+ def UncheckedReturn : Checker<"UncheckedReturn">,
+ HelpText<"Warn on uses of functions whose return values must be always checked">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+}
+let ParentPackage = Security in {
+ def FloatLoopCounter : Checker<"FloatLoopCounter">,
+ HelpText<"Warn on using a floating point value as a loop counter (CERT: FLP30-C, FLP30-CPP)">,
+ DescFile<"CheckSecuritySyntaxOnly.cpp">;
+}
+
+let ParentPackage = SecurityExperimental in {
+
+def ArrayBoundChecker : Checker<"ArrayBound">,
+ HelpText<"Warn about buffer overflows (older checker)">,
+ DescFile<"ArrayBoundChecker.cpp">;
+
+def ArrayBoundCheckerV2 : Checker<"ArrayBoundV2">,
+ HelpText<"Warn about buffer overflows (newer checker)">,
+ DescFile<"ArrayBoundCheckerV2.cpp">;
+
+def ReturnPointerRangeChecker : Checker<"ReturnPtrRange">,
+ HelpText<"Check for an out-of-bound pointer being returned to callers">,
+ DescFile<"ReturnPointerRangeChecker.cpp">;
+
+def MallocOverflowSecurityChecker : Checker<"MallocOverflow">,
+ HelpText<"Check for overflows in the arguments to malloc()">,
+ DescFile<"MallocOverflowSecurityChecker.cpp">;
+
+} // end "security.experimental"
+
+//===----------------------------------------------------------------------===//
+// Taint checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Taint in {
+
+def GenericTaintChecker : Checker<"TaintPropagation">,
+ HelpText<"Generate taint information used by other checkers">,
+ DescFile<"GenericTaintChecker.cpp">;
+
+} // end "experimental.security.taint"
+
+//===----------------------------------------------------------------------===//
+// Unix API checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Unix in {
+
+def UnixAPIChecker : Checker<"API">,
+ HelpText<"Check calls to various UNIX/Posix functions">,
+ DescFile<"UnixAPIChecker.cpp">;
+
+def MallocPessimistic : Checker<"Malloc">,
+ HelpText<"Check for memory leaks, double free, and use-after-free problems.">,
+ DescFile<"MallocChecker.cpp">;
+
+} // end "unix"
+
+let ParentPackage = UnixExperimental in {
+
+def ChrootChecker : Checker<"Chroot">,
+ HelpText<"Check improper use of chroot">,
+ DescFile<"ChrootChecker.cpp">;
+
+def MallocOptimistic : Checker<"MallocWithAnnotations">,
+ HelpText<"Check for memory leaks, double free, and use-after-free problems. Assumes that all user-defined functions which might free a pointer are annotated.">,
+ DescFile<"MallocChecker.cpp">;
+
+def MallocSizeofChecker : Checker<"MallocSizeof">,
+ HelpText<"Check for dubious malloc arguments involving sizeof">,
+ DescFile<"MallocSizeofChecker.cpp">;
+
+def PthreadLockChecker : Checker<"PthreadLock">,
+ HelpText<"Simple lock -> unlock checker">,
+ DescFile<"PthreadLockChecker.cpp">;
+
+def StreamChecker : Checker<"Stream">,
+ HelpText<"Check stream handling functions">,
+ DescFile<"StreamChecker.cpp">;
+
+} // end "unix.experimental"
+
+let ParentPackage = CString in {
+
+def CStringNullArg : Checker<"NullArg">,
+ HelpText<"Check for null pointers being passed as arguments to C string functions">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringSyntaxChecker : Checker<"BadSizeArg">,
+ HelpText<"Check the size argument passed into C string functions for common erroneous patterns">,
+ DescFile<"CStringSyntaxChecker.cpp">;
+}
+
+let ParentPackage = CStringExperimental in {
+
+def CStringOutOfBounds : Checker<"OutOfBounds">,
+ HelpText<"Check for out-of-bounds access in string functions">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringBufferOverlap : Checker<"BufferOverlap">,
+ HelpText<"Checks for overlap in two buffer arguments">,
+ DescFile<"CStringChecker.cpp">;
+
+def CStringNotNullTerm : Checker<"NotNullTerminated">,
+ HelpText<"Check for arguments which are not null-terminating strings">,
+ DescFile<"CStringChecker.cpp">;
+}
+
+//===----------------------------------------------------------------------===//
+// Mac OS X, Cocoa, and Core Foundation checkers.
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = OSX in {
+
+def MacOSXAPIChecker : Checker<"API">,
+ InPackage<OSX>,
+ HelpText<"Check for proper uses of various Mac OS X APIs">,
+ DescFile<"MacOSXAPIChecker.cpp">;
+
+def OSAtomicChecker : Checker<"AtomicCAS">,
+ InPackage<OSX>,
+ HelpText<"Evaluate calls to OSAtomic functions">,
+ DescFile<"OSAtomicChecker.cpp">;
+
+def MacOSKeychainAPIChecker : Checker<"SecKeychainAPI">,
+ InPackage<OSX>,
+ HelpText<"Check for proper uses of Secure Keychain APIs">,
+ DescFile<"MacOSKeychainAPIChecker.cpp">;
+
+} // end "macosx"
+
+let ParentPackage = Cocoa in {
+
+def ObjCAtSyncChecker : Checker<"AtSync">,
+ HelpText<"Check for null pointers used as mutexes for @synchronized">,
+ DescFile<"ObjCAtSyncChecker.cpp">;
+
+def NilArgChecker : Checker<"NilArg">,
+ HelpText<"Check for prohibited nil arguments to ObjC method calls">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def ClassReleaseChecker : Checker<"ClassRelease">,
+ HelpText<"Check for sending 'retain', 'release', or 'autorelease' directly to a Class">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def VariadicMethodTypeChecker : Checker<"VariadicMethodTypes">,
+ HelpText<"Check for passing non-Objective-C types to variadic methods that expect "
+ "only Objective-C types">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def NSAutoreleasePoolChecker : Checker<"NSAutoreleasePool">,
+ HelpText<"Warn for suboptimal uses of NSAutoreleasePool in Objective-C GC mode">,
+ DescFile<"NSAutoreleasePoolChecker.cpp">;
+
+def ObjCMethSigsChecker : Checker<"IncompatibleMethodTypes">,
+ HelpText<"Warn about Objective-C method signatures with type incompatibilities">,
+ DescFile<"CheckObjCInstMethSignature.cpp">;
+
+def ObjCUnusedIvarsChecker : Checker<"UnusedIvars">,
+ HelpText<"Warn about private ivars that are never used">,
+ DescFile<"ObjCUnusedIVarsChecker.cpp">;
+
+def ObjCSelfInitChecker : Checker<"SelfInit">,
+ HelpText<"Check that 'self' is properly initialized inside an initializer method">,
+ DescFile<"ObjCSelfInitChecker.cpp">;
+
+def NSErrorChecker : Checker<"NSError">,
+ HelpText<"Check usage of NSError** parameters">,
+ DescFile<"NSErrorChecker.cpp">;
+
+def RetainCountChecker : Checker<"RetainCount">,
+ HelpText<"Check for leaks and improper reference count management">,
+ DescFile<"RetainCountChecker.cpp">;
+
+} // end "cocoa"
+
+let ParentPackage = CocoaExperimental in {
+
+def ObjCDeallocChecker : Checker<"Dealloc">,
+ HelpText<"Warn about Objective-C classes that lack a correct implementation of -dealloc">,
+ DescFile<"CheckObjCDealloc.cpp">;
+
+} // end "cocoa.experimental"
+
+let ParentPackage = CoreFoundation in {
+
+def CFNumberCreateChecker : Checker<"CFNumber">,
+ HelpText<"Check for proper uses of CFNumberCreate">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def CFRetainReleaseChecker : Checker<"CFRetainRelease">,
+ HelpText<"Check for null arguments to CFRetain/CFRelease">,
+ DescFile<"BasicObjCFoundationChecks.cpp">;
+
+def CFErrorChecker : Checker<"CFError">,
+ HelpText<"Check usage of CFErrorRef* parameters">,
+ DescFile<"NSErrorChecker.cpp">;
+}
+
+let ParentPackage = Containers in {
+def ObjCContainersASTChecker : Checker<"PointerSizedValues">,
+ HelpText<"Warns if 'CFArray', 'CFDictionary', 'CFSet' are created with non-pointer-size values">,
+ DescFile<"ObjCContainersASTChecker.cpp">;
+
+def ObjCContainersChecker : Checker<"OutOfBounds">,
+ HelpText<"Checks for index out-of-bounds when using 'CFArray' API">,
+ DescFile<"ObjCContainersChecker.cpp">;
+
+}
+//===----------------------------------------------------------------------===//
+// Checkers for LLVM development.
+//===----------------------------------------------------------------------===//
+
+def LLVMConventionsChecker : Checker<"Conventions">,
+ InPackage<LLVM>,
+ HelpText<"Check code for LLVM codebase conventions">,
+ DescFile<"LLVMConventionsChecker.cpp">;
+
+//===----------------------------------------------------------------------===//
+// Debugging checkers (for analyzer development).
+//===----------------------------------------------------------------------===//
+
+let ParentPackage = Debug in {
+
+def DominatorsTreeDumper : Checker<"DumpDominators">,
+ HelpText<"Print the dominance tree for a given CFG">,
+ DescFile<"DebugCheckers.cpp">;
+
+def LiveVariablesDumper : Checker<"DumpLiveVars">,
+ HelpText<"Print results of live variable analysis">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CFGViewer : Checker<"ViewCFG">,
+ HelpText<"View Control-Flow Graphs using GraphViz">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CFGDumper : Checker<"DumpCFG">,
+ HelpText<"Display Control-Flow Graphs">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CallGraphViewer : Checker<"ViewCallGraph">,
+ HelpText<"View Call Graph using GraphViz">,
+ DescFile<"DebugCheckers.cpp">;
+
+def CallGraphDumper : Checker<"DumpCallGraph">,
+ HelpText<"Display Call Graph">,
+ DescFile<"DebugCheckers.cpp">;
+
+def AnalyzerStatsChecker : Checker<"Stats">,
+ HelpText<"Emit warnings with analyzer statistics">,
+ DescFile<"AnalyzerStatsChecker.cpp">;
+
+def TaintTesterChecker : Checker<"TaintTest">,
+ HelpText<"Mark tainted symbols as such.">,
+ DescFile<"TaintTesterChecker.cpp">;
+
+} // end "debug"
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
new file mode 100644
index 0000000..30d0609
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ChrootChecker.cpp
@@ -0,0 +1,158 @@
+//===- Chrootchecker.cpp -------- Basic security checks ----------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines chroot checker, which checks improper use of chroot.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// enum value that represent the jail state
+enum Kind { NO_CHROOT, ROOT_CHANGED, JAIL_ENTERED };
+
+bool isRootChanged(intptr_t k) { return k == ROOT_CHANGED; }
+//bool isJailEntered(intptr_t k) { return k == JAIL_ENTERED; }
+
+// This checker checks improper use of chroot.
+// The state transition:
+// NO_CHROOT ---chroot(path)--> ROOT_CHANGED ---chdir(/) --> JAIL_ENTERED
+// | |
+// ROOT_CHANGED<--chdir(..)-- JAIL_ENTERED<--chdir(..)--
+// | |
+// bug<--foo()-- JAIL_ENTERED<--foo()--
+class ChrootChecker : public Checker<eval::Call, check::PreStmt<CallExpr> > {
+ mutable IdentifierInfo *II_chroot, *II_chdir;
+ // This bug refers to possibly break out of a chroot() jail.
+ mutable OwningPtr<BuiltinBug> BT_BreakJail;
+
+public:
+ ChrootChecker() : II_chroot(0), II_chdir(0) {}
+
+ static void *getTag() {
+ static int x;
+ return &x;
+ }
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ void Chroot(CheckerContext &C, const CallExpr *CE) const;
+ void Chdir(CheckerContext &C, const CallExpr *CE) const;
+};
+
+} // end anonymous namespace
+
+bool ChrootChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_chroot)
+ II_chroot = &Ctx.Idents.get("chroot");
+ if (!II_chdir)
+ II_chdir = &Ctx.Idents.get("chdir");
+
+ if (FD->getIdentifier() == II_chroot) {
+ Chroot(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_chdir) {
+ Chdir(C, CE);
+ return true;
+ }
+
+ return false;
+}
+
+void ChrootChecker::Chroot(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ ProgramStateManager &Mgr = state->getStateManager();
+
+ // Once encouter a chroot(), set the enum value ROOT_CHANGED directly in
+ // the GDM.
+ state = Mgr.addGDM(state, ChrootChecker::getTag(), (void*) ROOT_CHANGED);
+ C.addTransition(state);
+}
+
+void ChrootChecker::Chdir(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ ProgramStateManager &Mgr = state->getStateManager();
+
+ // If there are no jail state in the GDM, just return.
+ const void *k = state->FindGDM(ChrootChecker::getTag());
+ if (!k)
+ return;
+
+ // After chdir("/"), enter the jail, set the enum value JAIL_ENTERED.
+ const Expr *ArgExpr = CE->getArg(0);
+ SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
+
+ if (const MemRegion *R = ArgVal.getAsRegion()) {
+ R = R->StripCasts();
+ if (const StringRegion* StrRegion= dyn_cast<StringRegion>(R)) {
+ const StringLiteral* Str = StrRegion->getStringLiteral();
+ if (Str->getString() == "/")
+ state = Mgr.addGDM(state, ChrootChecker::getTag(),
+ (void*) JAIL_ENTERED);
+ }
+ }
+
+ C.addTransition(state);
+}
+
+// Check the jail state before any function call except chroot and chdir().
+void ChrootChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_chroot)
+ II_chroot = &Ctx.Idents.get("chroot");
+ if (!II_chdir)
+ II_chdir = &Ctx.Idents.get("chdir");
+
+ // Ingnore chroot and chdir.
+ if (FD->getIdentifier() == II_chroot || FD->getIdentifier() == II_chdir)
+ return;
+
+ // If jail state is ROOT_CHANGED, generate BugReport.
+ void *const* k = C.getState()->FindGDM(ChrootChecker::getTag());
+ if (k)
+ if (isRootChanged((intptr_t) *k))
+ if (ExplodedNode *N = C.addTransition()) {
+ if (!BT_BreakJail)
+ BT_BreakJail.reset(new BuiltinBug("Break out of jail",
+ "No call of chdir(\"/\") immediately "
+ "after chroot"));
+ BugReport *R = new BugReport(*BT_BreakJail,
+ BT_BreakJail->getDescription(), N);
+ C.EmitReport(R);
+ }
+
+ return;
+}
+
+void ento::registerChrootChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ChrootChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
new file mode 100644
index 0000000..77a5a72
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangCheckers.cpp
@@ -0,0 +1,32 @@
+//===--- ClangCheckers.h - Provides builtin checkers ------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+
+// FIXME: This is only necessary as long as there are checker registration
+// functions that do additional work besides mgr.registerChecker<CLASS>().
+// The only checkers that currently do this are:
+// - NSAutoreleasePoolChecker
+// - NSErrorChecker
+// - ObjCAtSyncChecker
+// It's probably worth including this information in Checkers.td to minimize
+// boilerplate code.
+#include "ClangSACheckers.h"
+
+using namespace clang;
+using namespace ento;
+
+void ento::registerBuiltinCheckers(CheckerRegistry &registry) {
+#define GET_CHECKERS
+#define CHECKER(FULLNAME,CLASS,DESCFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
+ registry.addChecker(register##CLASS, FULLNAME, HELPTEXT);
+#include "Checkers.inc"
+#undef GET_CHECKERS
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
new file mode 100644
index 0000000..230baa7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ClangSACheckers.h
@@ -0,0 +1,37 @@
+//===--- ClangSACheckers.h - Registration functions for Checkers *- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Declares the registation functions for the checkers defined in
+// libclangStaticAnalyzerCheckers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Checkers/CommonBugCategories.h"
+
+#ifndef LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
+#define LLVM_CLANG_SA_LIB_CHECKERS_CLANGSACHECKERS_H
+
+namespace clang {
+
+namespace ento {
+class CheckerManager;
+class CheckerRegistry;
+
+#define GET_CHECKERS
+#define CHECKER(FULLNAME,CLASS,CXXFILE,HELPTEXT,GROUPINDEX,HIDDEN) \
+ void register##CLASS(CheckerManager &mgr);
+#include "Checkers.inc"
+#undef CHECKER
+#undef GET_CHECKERS
+
+} // end ento namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp
new file mode 100644
index 0000000..e2a8ea6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/CommonBugCategories.cpp
@@ -0,0 +1,18 @@
+//=--- CommonBugCategories.cpp - Provides common issue categories -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+// Common strings used for the "category" of many static analyzer issues.
+namespace clang { namespace ento { namespace categories {
+
+const char *CoreFoundationObjectiveC = "Core Foundation/Objective-C";
+const char *MemoryCoreFoundationObjectiveC =
+ "Memory (Core Foundation/Objective-C)";
+const char *UnixAPI = "Unix API";
+}}}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
new file mode 100644
index 0000000..510e8cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -0,0 +1,386 @@
+//==- DeadStoresChecker.cpp - Check for stores to dead variables -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DeadStores, a flow-sensitive checker that looks for
+// stores to variables that are no longer live.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Visitors/CFGRecStmtVisitor.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/Analysis/Visitors/CFGRecStmtDeclVisitor.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ParentMap.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// FIXME: Eventually migrate into its own file, and have it managed by
+// AnalysisManager.
+class ReachableCode {
+ const CFG &cfg;
+ llvm::BitVector reachable;
+public:
+ ReachableCode(const CFG &cfg)
+ : cfg(cfg), reachable(cfg.getNumBlockIDs(), false) {}
+
+ void computeReachableBlocks();
+
+ bool isReachable(const CFGBlock *block) const {
+ return reachable[block->getBlockID()];
+ }
+};
+}
+
+void ReachableCode::computeReachableBlocks() {
+ if (!cfg.getNumBlockIDs())
+ return;
+
+ SmallVector<const CFGBlock*, 10> worklist;
+ worklist.push_back(&cfg.getEntry());
+
+ while (!worklist.empty()) {
+ const CFGBlock *block = worklist.back();
+ worklist.pop_back();
+ llvm::BitVector::reference isReachable = reachable[block->getBlockID()];
+ if (isReachable)
+ continue;
+ isReachable = true;
+ for (CFGBlock::const_succ_iterator i = block->succ_begin(),
+ e = block->succ_end(); i != e; ++i)
+ if (const CFGBlock *succ = *i)
+ worklist.push_back(succ);
+ }
+}
+
+static const Expr *LookThroughTransitiveAssignments(const Expr *Ex) {
+ while (Ex) {
+ const BinaryOperator *BO =
+ dyn_cast<BinaryOperator>(Ex->IgnoreParenCasts());
+ if (!BO)
+ break;
+ if (BO->getOpcode() == BO_Assign) {
+ Ex = BO->getRHS();
+ continue;
+ }
+ break;
+ }
+ return Ex;
+}
+
+namespace {
+class DeadStoreObs : public LiveVariables::Observer {
+ const CFG &cfg;
+ ASTContext &Ctx;
+ BugReporter& BR;
+ AnalysisDeclContext* AC;
+ ParentMap& Parents;
+ llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
+ OwningPtr<ReachableCode> reachableCode;
+ const CFGBlock *currentBlock;
+
+ enum DeadStoreKind { Standard, Enclosing, DeadIncrement, DeadInit };
+
+public:
+ DeadStoreObs(const CFG &cfg, ASTContext &ctx,
+ BugReporter& br, AnalysisDeclContext* ac, ParentMap& parents,
+ llvm::SmallPtrSet<const VarDecl*, 20> &escaped)
+ : cfg(cfg), Ctx(ctx), BR(br), AC(ac), Parents(parents),
+ Escaped(escaped), currentBlock(0) {}
+
+ virtual ~DeadStoreObs() {}
+
+ void Report(const VarDecl *V, DeadStoreKind dsk,
+ PathDiagnosticLocation L, SourceRange R) {
+ if (Escaped.count(V))
+ return;
+
+ // Compute reachable blocks within the CFG for trivial cases
+ // where a bogus dead store can be reported because itself is unreachable.
+ if (!reachableCode.get()) {
+ reachableCode.reset(new ReachableCode(cfg));
+ reachableCode->computeReachableBlocks();
+ }
+
+ if (!reachableCode->isReachable(currentBlock))
+ return;
+
+ SmallString<64> buf;
+ llvm::raw_svector_ostream os(buf);
+ const char *BugType = 0;
+
+ switch (dsk) {
+ case DeadInit:
+ BugType = "Dead initialization";
+ os << "Value stored to '" << *V
+ << "' during its initialization is never read";
+ break;
+
+ case DeadIncrement:
+ BugType = "Dead increment";
+ case Standard:
+ if (!BugType) BugType = "Dead assignment";
+ os << "Value stored to '" << *V << "' is never read";
+ break;
+
+ case Enclosing:
+ // Don't report issues in this case, e.g.: "if (x = foo())",
+ // where 'x' is unused later. We have yet to see a case where
+ // this is a real bug.
+ return;
+ }
+
+ BR.EmitBasicReport(AC->getDecl(), BugType, "Dead store", os.str(), L, R);
+ }
+
+ void CheckVarDecl(const VarDecl *VD, const Expr *Ex, const Expr *Val,
+ DeadStoreKind dsk,
+ const LiveVariables::LivenessValues &Live) {
+
+ if (!VD->hasLocalStorage())
+ return;
+ // Reference types confuse the dead stores checker. Skip them
+ // for now.
+ if (VD->getType()->getAs<ReferenceType>())
+ return;
+
+ if (!Live.isLive(VD) &&
+ !(VD->getAttr<UnusedAttr>() || VD->getAttr<BlocksAttr>())) {
+
+ PathDiagnosticLocation ExLoc =
+ PathDiagnosticLocation::createBegin(Ex, BR.getSourceManager(), AC);
+ Report(VD, dsk, ExLoc, Val->getSourceRange());
+ }
+ }
+
+ void CheckDeclRef(const DeclRefExpr *DR, const Expr *Val, DeadStoreKind dsk,
+ const LiveVariables::LivenessValues& Live) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+ CheckVarDecl(VD, DR, Val, dsk, Live);
+ }
+
+ bool isIncrement(VarDecl *VD, const BinaryOperator* B) {
+ if (B->isCompoundAssignmentOp())
+ return true;
+
+ const Expr *RHS = B->getRHS()->IgnoreParenCasts();
+ const BinaryOperator* BRHS = dyn_cast<BinaryOperator>(RHS);
+
+ if (!BRHS)
+ return false;
+
+ const DeclRefExpr *DR;
+
+ if ((DR = dyn_cast<DeclRefExpr>(BRHS->getLHS()->IgnoreParenCasts())))
+ if (DR->getDecl() == VD)
+ return true;
+
+ if ((DR = dyn_cast<DeclRefExpr>(BRHS->getRHS()->IgnoreParenCasts())))
+ if (DR->getDecl() == VD)
+ return true;
+
+ return false;
+ }
+
+ virtual void observeStmt(const Stmt *S, const CFGBlock *block,
+ const LiveVariables::LivenessValues &Live) {
+
+ currentBlock = block;
+
+ // Skip statements in macros.
+ if (S->getLocStart().isMacroID())
+ return;
+
+ // Only cover dead stores from regular assignments. ++/-- dead stores
+ // have never flagged a real bug.
+ if (const BinaryOperator* B = dyn_cast<BinaryOperator>(S)) {
+ if (!B->isAssignmentOp()) return; // Skip non-assignments.
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(B->getLHS()))
+ if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ // Special case: check for assigning null to a pointer.
+ // This is a common form of defensive programming.
+ const Expr *RHS = LookThroughTransitiveAssignments(B->getRHS());
+
+ QualType T = VD->getType();
+ if (T->isPointerType() || T->isObjCObjectPointerType()) {
+ if (RHS->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNull))
+ return;
+ }
+
+ RHS = RHS->IgnoreParenCasts();
+ // Special case: self-assignments. These are often used to shut up
+ // "unused variable" compiler warnings.
+ if (const DeclRefExpr *RhsDR = dyn_cast<DeclRefExpr>(RHS))
+ if (VD == dyn_cast<VarDecl>(RhsDR->getDecl()))
+ return;
+
+ // Otherwise, issue a warning.
+ DeadStoreKind dsk = Parents.isConsumedExpr(B)
+ ? Enclosing
+ : (isIncrement(VD,B) ? DeadIncrement : Standard);
+
+ CheckVarDecl(VD, DR, B->getRHS(), dsk, Live);
+ }
+ }
+ else if (const UnaryOperator* U = dyn_cast<UnaryOperator>(S)) {
+ if (!U->isIncrementOp() || U->isPrefix())
+ return;
+
+ const Stmt *parent = Parents.getParentIgnoreParenCasts(U);
+ if (!parent || !isa<ReturnStmt>(parent))
+ return;
+
+ const Expr *Ex = U->getSubExpr()->IgnoreParenCasts();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex))
+ CheckDeclRef(DR, U, DeadIncrement, Live);
+ }
+ else if (const DeclStmt *DS = dyn_cast<DeclStmt>(S))
+ // Iterate through the decls. Warn if any initializers are complex
+ // expressions that are not live (never used).
+ for (DeclStmt::const_decl_iterator DI=DS->decl_begin(), DE=DS->decl_end();
+ DI != DE; ++DI) {
+
+ VarDecl *V = dyn_cast<VarDecl>(*DI);
+
+ if (!V)
+ continue;
+
+ if (V->hasLocalStorage()) {
+ // Reference types confuse the dead stores checker. Skip them
+ // for now.
+ if (V->getType()->getAs<ReferenceType>())
+ return;
+
+ if (const Expr *E = V->getInit()) {
+ while (const ExprWithCleanups *exprClean =
+ dyn_cast<ExprWithCleanups>(E))
+ E = exprClean->getSubExpr();
+
+ // Look through transitive assignments, e.g.:
+ // int x = y = 0;
+ E = LookThroughTransitiveAssignments(E);
+
+ // Don't warn on C++ objects (yet) until we can show that their
+ // constructors/destructors don't have side effects.
+ if (isa<CXXConstructExpr>(E))
+ return;
+
+ // A dead initialization is a variable that is dead after it
+ // is initialized. We don't flag warnings for those variables
+ // marked 'unused'.
+ if (!Live.isLive(V) && V->getAttr<UnusedAttr>() == 0) {
+ // Special case: check for initializations with constants.
+ //
+ // e.g. : int x = 0;
+ //
+ // If x is EVER assigned a new value later, don't issue
+ // a warning. This is because such initialization can be
+ // due to defensive programming.
+ if (E->isEvaluatable(Ctx))
+ return;
+
+ if (const DeclRefExpr *DRE =
+ dyn_cast<DeclRefExpr>(E->IgnoreParenCasts()))
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
+ // Special case: check for initialization from constant
+ // variables.
+ //
+ // e.g. extern const int MyConstant;
+ // int x = MyConstant;
+ //
+ if (VD->hasGlobalStorage() &&
+ VD->getType().isConstQualified())
+ return;
+ // Special case: check for initialization from scalar
+ // parameters. This is often a form of defensive
+ // programming. Non-scalars are still an error since
+ // because it more likely represents an actual algorithmic
+ // bug.
+ if (isa<ParmVarDecl>(VD) && VD->getType()->isScalarType())
+ return;
+ }
+
+ PathDiagnosticLocation Loc =
+ PathDiagnosticLocation::create(V, BR.getSourceManager());
+ Report(V, DeadInit, Loc, E->getSourceRange());
+ }
+ }
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Driver function to invoke the Dead-Stores checker on a CFG.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class FindEscaped : public CFGRecStmtDeclVisitor<FindEscaped>{
+ CFG *cfg;
+public:
+ FindEscaped(CFG *c) : cfg(c) {}
+
+ CFG& getCFG() { return *cfg; }
+
+ llvm::SmallPtrSet<const VarDecl*, 20> Escaped;
+
+ void VisitUnaryOperator(UnaryOperator* U) {
+ // Check for '&'. Any VarDecl whose value has its address-taken we
+ // treat as escaped.
+ Expr *E = U->getSubExpr()->IgnoreParenCasts();
+ if (U->getOpcode() == UO_AddrOf)
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E))
+ if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ Escaped.insert(VD);
+ return;
+ }
+ Visit(E);
+ }
+};
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// DeadStoresChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DeadStoresChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (LiveVariables *L = mgr.getAnalysis<LiveVariables>(D)) {
+ CFG &cfg = *mgr.getCFG(D);
+ AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D);
+ ParentMap &pmap = mgr.getParentMap(D);
+ FindEscaped FS(&cfg);
+ FS.getCFG().VisitBlockStmts(FS);
+ DeadStoreObs A(cfg, BR.getContext(), BR, AC, pmap, FS.Escaped);
+ L->runOnAllBlocks(A);
+ }
+ }
+};
+}
+
+void ento::registerDeadStoresChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DeadStoresChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
new file mode 100644
index 0000000..34053cd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DebugCheckers.cpp
@@ -0,0 +1,146 @@
+//==- DebugCheckers.cpp - Debugging Checkers ---------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checkers that display debugging information.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/Analyses/Dominators.h"
+#include "clang/Analysis/CallGraph.h"
+#include "llvm/Support/Process.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// DominatorsTreeDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DominatorsTreeDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (AnalysisDeclContext *AC = mgr.getAnalysisDeclContext(D)) {
+ DominatorTree dom;
+ dom.buildDominatorTree(*AC);
+ dom.dump();
+ }
+ }
+};
+}
+
+void ento::registerDominatorsTreeDumper(CheckerManager &mgr) {
+ mgr.registerChecker<DominatorsTreeDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// LiveVariablesDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LiveVariablesDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (LiveVariables* L = mgr.getAnalysis<LiveVariables>(D)) {
+ L->dumpBlockLiveness(mgr.getSourceManager());
+ }
+ }
+};
+}
+
+void ento::registerLiveVariablesDumper(CheckerManager &mgr) {
+ mgr.registerChecker<LiveVariablesDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// CFGViewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFGViewer : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (CFG *cfg = mgr.getCFG(D)) {
+ cfg->viewCFG(mgr.getLangOpts());
+ }
+ }
+};
+}
+
+void ento::registerCFGViewer(CheckerManager &mgr) {
+ mgr.registerChecker<CFGViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// CFGDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFGDumper : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (CFG *cfg = mgr.getCFG(D)) {
+ cfg->dump(mgr.getLangOpts(),
+ llvm::sys::Process::StandardErrHasColors());
+ }
+ }
+};
+}
+
+void ento::registerCFGDumper(CheckerManager &mgr) {
+ mgr.registerChecker<CFGDumper>();
+}
+
+//===----------------------------------------------------------------------===//
+// CallGraphViewer
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CallGraphViewer : public Checker< check::ASTDecl<TranslationUnitDecl> > {
+public:
+ void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CallGraph CG;
+ CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
+ CG.viewGraph();
+ }
+};
+}
+
+void ento::registerCallGraphViewer(CheckerManager &mgr) {
+ mgr.registerChecker<CallGraphViewer>();
+}
+
+//===----------------------------------------------------------------------===//
+// CallGraphDumper
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CallGraphDumper : public Checker< check::ASTDecl<TranslationUnitDecl> > {
+public:
+ void checkASTDecl(const TranslationUnitDecl *TU, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CallGraph CG;
+ CG.addToCallGraph(const_cast<TranslationUnitDecl*>(TU));
+ CG.dump();
+ }
+};
+}
+
+void ento::registerCallGraphDumper(CheckerManager &mgr) {
+ mgr.registerChecker<CallGraphDumper>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
new file mode 100644
index 0000000..81a2745
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -0,0 +1,216 @@
+//== NullDerefChecker.cpp - Null dereference checker ------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NullDerefChecker, a builtin check in ExprEngine that performs
+// checks for null pointers at loads and stores.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DereferenceChecker
+ : public Checker< check::Location,
+ EventDispatcher<ImplicitNullDerefEvent> > {
+ mutable OwningPtr<BuiltinBug> BT_null;
+ mutable OwningPtr<BuiltinBug> BT_undef;
+
+public:
+ void checkLocation(SVal location, bool isLoad, const Stmt* S,
+ CheckerContext &C) const;
+
+ static const MemRegion *AddDerefSource(raw_ostream &os,
+ SmallVectorImpl<SourceRange> &Ranges,
+ const Expr *Ex, const ProgramState *state,
+ const LocationContext *LCtx,
+ bool loadedFrom = false);
+};
+} // end anonymous namespace
+
+const MemRegion *
+DereferenceChecker::AddDerefSource(raw_ostream &os,
+ SmallVectorImpl<SourceRange> &Ranges,
+ const Expr *Ex,
+ const ProgramState *state,
+ const LocationContext *LCtx,
+ bool loadedFrom) {
+ Ex = Ex->IgnoreParenLValueCasts();
+ const MemRegion *sourceR = 0;
+ switch (Ex->getStmtClass()) {
+ default:
+ break;
+ case Stmt::DeclRefExprClass: {
+ const DeclRefExpr *DR = cast<DeclRefExpr>(Ex);
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ os << " (" << (loadedFrom ? "loaded from" : "from")
+ << " variable '" << VD->getName() << "')";
+ Ranges.push_back(DR->getSourceRange());
+ sourceR = state->getLValue(VD, LCtx).getAsRegion();
+ }
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(Ex);
+ os << " (" << (loadedFrom ? "loaded from" : "via")
+ << " field '" << ME->getMemberNameInfo() << "')";
+ SourceLocation L = ME->getMemberLoc();
+ Ranges.push_back(SourceRange(L, L));
+ break;
+ }
+ }
+ return sourceR;
+}
+
+void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
+ CheckerContext &C) const {
+ // Check for dereference of an undefined value.
+ if (l.isUndef()) {
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_undef)
+ BT_undef.reset(new BuiltinBug("Dereference of undefined pointer value"));
+
+ BugReport *report =
+ new BugReport(*BT_undef, BT_undef->getDescription(), N);
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
+ bugreporter::GetDerefExpr(N), report));
+ C.EmitReport(report);
+ }
+ return;
+ }
+
+ DefinedOrUnknownSVal location = cast<DefinedOrUnknownSVal>(l);
+
+ // Check for null dereferences.
+ if (!isa<Loc>(location))
+ return;
+
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ ProgramStateRef notNullState, nullState;
+ llvm::tie(notNullState, nullState) = state->assume(location);
+
+ // The explicit NULL case.
+ if (nullState) {
+ if (!notNullState) {
+ // Generate an error node.
+ ExplodedNode *N = C.generateSink(nullState);
+ if (!N)
+ return;
+
+ // We know that 'location' cannot be non-null. This is what
+ // we call an "explicit" null dereference.
+ if (!BT_null)
+ BT_null.reset(new BuiltinBug("Dereference of null pointer"));
+
+ SmallString<100> buf;
+ SmallVector<SourceRange, 2> Ranges;
+
+ // Walk through lvalue casts to get the original expression
+ // that syntactically caused the load.
+ if (const Expr *expr = dyn_cast<Expr>(S))
+ S = expr->IgnoreParenLValueCasts();
+
+ const MemRegion *sourceR = 0;
+
+ switch (S->getStmtClass()) {
+ case Stmt::ArraySubscriptExprClass: {
+ llvm::raw_svector_ostream os(buf);
+ os << "Array access";
+ const ArraySubscriptExpr *AE = cast<ArraySubscriptExpr>(S);
+ sourceR =
+ AddDerefSource(os, Ranges, AE->getBase()->IgnoreParenCasts(),
+ state.getPtr(), LCtx);
+ os << " results in a null pointer dereference";
+ break;
+ }
+ case Stmt::UnaryOperatorClass: {
+ llvm::raw_svector_ostream os(buf);
+ os << "Dereference of null pointer";
+ const UnaryOperator *U = cast<UnaryOperator>(S);
+ sourceR =
+ AddDerefSource(os, Ranges, U->getSubExpr()->IgnoreParens(),
+ state.getPtr(), LCtx, true);
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(S);
+ if (M->isArrow()) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Access to field '" << M->getMemberNameInfo()
+ << "' results in a dereference of a null pointer";
+ sourceR =
+ AddDerefSource(os, Ranges, M->getBase()->IgnoreParenCasts(),
+ state.getPtr(), LCtx, true);
+ }
+ break;
+ }
+ case Stmt::ObjCIvarRefExprClass: {
+ const ObjCIvarRefExpr *IV = cast<ObjCIvarRefExpr>(S);
+ if (const DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(IV->getBase()->IgnoreParenCasts())) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Instance variable access (via '" << VD->getName()
+ << "') results in a null pointer dereference";
+ }
+ }
+ Ranges.push_back(IV->getSourceRange());
+ break;
+ }
+ default:
+ break;
+ }
+
+ BugReport *report =
+ new BugReport(*BT_null,
+ buf.empty() ? BT_null->getDescription():buf.str(),
+ N);
+
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
+ bugreporter::GetDerefExpr(N), report));
+
+ for (SmallVectorImpl<SourceRange>::iterator
+ I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
+ report->addRange(*I);
+
+ if (sourceR) {
+ report->markInteresting(sourceR);
+ report->markInteresting(state->getRawSVal(loc::MemRegionVal(sourceR)));
+ }
+
+ C.EmitReport(report);
+ return;
+ }
+ else {
+ // Otherwise, we have the case where the location could either be
+ // null or not-null. Record the error node as an "implicit" null
+ // dereference.
+ if (ExplodedNode *N = C.generateSink(nullState)) {
+ ImplicitNullDerefEvent event = { l, isLoad, N, &C.getBugReporter() };
+ dispatchEvent(event);
+ }
+ }
+ }
+
+ // From this point forward, we know that the location is not null.
+ C.addTransition(notNullState);
+}
+
+void ento::registerDereferenceChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DereferenceChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
new file mode 100644
index 0000000..2627f0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -0,0 +1,96 @@
+//== DivZeroChecker.cpp - Division by zero checker --------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines DivZeroChecker, a builtin check in ExprEngine that performs
+// checks for division by zeros.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class DivZeroChecker : public Checker< check::PreStmt<BinaryOperator> > {
+ mutable OwningPtr<BuiltinBug> BT;
+ void reportBug(const char *Msg,
+ ProgramStateRef StateZero,
+ CheckerContext &C) const ;
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void DivZeroChecker::reportBug(const char *Msg,
+ ProgramStateRef StateZero,
+ CheckerContext &C) const {
+ if (ExplodedNode *N = C.generateSink(StateZero)) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Division by zero"));
+
+ BugReport *R =
+ new BugReport(*BT, Msg, N);
+
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
+ bugreporter::GetDenomExpr(N), R));
+ C.EmitReport(R);
+ }
+}
+
+void DivZeroChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ BinaryOperator::Opcode Op = B->getOpcode();
+ if (Op != BO_Div &&
+ Op != BO_Rem &&
+ Op != BO_DivAssign &&
+ Op != BO_RemAssign)
+ return;
+
+ if (!B->getRHS()->getType()->isIntegerType() ||
+ !B->getRHS()->getType()->isScalarType())
+ return;
+
+ SVal Denom = C.getState()->getSVal(B->getRHS(), C.getLocationContext());
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&Denom);
+
+ // Divide-by-undefined handled in the generic checking for uses of
+ // undefined values.
+ if (!DV)
+ return;
+
+ // Check for divide by zero.
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef stateNotZero, stateZero;
+ llvm::tie(stateNotZero, stateZero) = CM.assumeDual(C.getState(), *DV);
+
+ if (!stateNotZero) {
+ assert(stateZero);
+ reportBug("Division by zero", stateZero, C);
+ return;
+ }
+
+ bool TaintedD = C.getState()->isTainted(*DV);
+ if ((stateNotZero && stateZero && TaintedD)) {
+ reportBug("Division by a tainted value, possibly zero", stateZero, C);
+ return;
+ }
+
+ // If we get here, then the denom should not be zero. We abandon the implicit
+ // zero denom case for now.
+ C.addTransition(stateNotZero);
+}
+
+void ento::registerDivZeroChecker(CheckerManager &mgr) {
+ mgr.registerChecker<DivZeroChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
new file mode 100644
index 0000000..a1f2f3b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/FixedAddressChecker.cpp
@@ -0,0 +1,67 @@
+//=== FixedAddressChecker.cpp - Fixed address usage checker ----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines FixedAddressChecker, a builtin checker that checks for
+// assignment of a fixed address to a pointer.
+// This check corresponds to CWE-587.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class FixedAddressChecker
+ : public Checker< check::PreStmt<BinaryOperator> > {
+ mutable OwningPtr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void FixedAddressChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ // Using a fixed address is not portable because that address will probably
+ // not be valid in all environments or platforms.
+
+ if (B->getOpcode() != BO_Assign)
+ return;
+
+ QualType T = B->getType();
+ if (!T->isPointerType())
+ return;
+
+ ProgramStateRef state = C.getState();
+ SVal RV = state->getSVal(B->getRHS(), C.getLocationContext());
+
+ if (!RV.isConstant() || RV.isZeroConstant())
+ return;
+
+ if (ExplodedNode *N = C.addTransition()) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Use fixed address",
+ "Using a fixed address is not portable because that "
+ "address will probably not be valid in all "
+ "environments or platforms."));
+ BugReport *R = new BugReport(*BT, BT->getDescription(), N);
+ R->addRange(B->getRHS()->getSourceRange());
+ C.EmitReport(R);
+ }
+}
+
+void ento::registerFixedAddressChecker(CheckerManager &mgr) {
+ mgr.registerChecker<FixedAddressChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
new file mode 100644
index 0000000..135b81d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -0,0 +1,740 @@
+//== GenericTaintChecker.cpp ----------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker defines the attack surface for generic taint propagation.
+//
+// The taint information produced by it might be useful to other checkers. For
+// example, checkers should report errors which involve tainted data more
+// aggressively, even if the involved symbols are under constrained.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/Basic/Builtins.h"
+#include <climits>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class GenericTaintChecker : public Checker< check::PostStmt<CallExpr>,
+ check::PreStmt<CallExpr> > {
+public:
+ static void *getTag() { static int Tag; return &Tag; }
+
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const DeclRefExpr *DRE, CheckerContext &C) const;
+
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+private:
+ static const unsigned InvalidArgIndex = UINT_MAX;
+ /// Denotes the return vale.
+ static const unsigned ReturnValueIndex = UINT_MAX - 1;
+
+ mutable OwningPtr<BugType> BT;
+ inline void initBugType() const {
+ if (!BT)
+ BT.reset(new BugType("Use of Untrusted Data", "Untrusted Data"));
+ }
+
+ /// \brief Catch taint related bugs. Check if tainted data is passed to a
+ /// system call etc.
+ bool checkPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Add taint sources on a pre-visit.
+ void addSourcesPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Propagate taint generated at pre-visit.
+ bool propagateFromPre(const CallExpr *CE, CheckerContext &C) const;
+
+ /// \brief Add taint sources on a post visit.
+ void addSourcesPost(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Check if the region the expression evaluates to is the standard input,
+ /// and thus, is tainted.
+ static bool isStdin(const Expr *E, CheckerContext &C);
+
+ /// \brief Given a pointer argument, get the symbol of the value it contains
+ /// (points to).
+ static SymbolRef getPointedToSymbol(CheckerContext &C, const Expr *Arg);
+
+ /// Functions defining the attack surface.
+ typedef ProgramStateRef (GenericTaintChecker::*FnCheck)(const CallExpr *,
+ CheckerContext &C) const;
+ ProgramStateRef postScanf(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef postSocket(const CallExpr *CE, CheckerContext &C) const;
+ ProgramStateRef postRetTaint(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Taint the scanned input if the file is tainted.
+ ProgramStateRef preFscanf(const CallExpr *CE, CheckerContext &C) const;
+
+ /// Check for CWE-134: Uncontrolled Format String.
+ static const char MsgUncontrolledFormatString[];
+ bool checkUncontrolledFormatString(const CallExpr *CE,
+ CheckerContext &C) const;
+
+ /// Check for:
+ /// CERT/STR02-C. "Sanitize data passed to complex subsystems"
+ /// CWE-78, "Failure to Sanitize Data into an OS Command"
+ static const char MsgSanitizeSystemArgs[];
+ bool checkSystemCall(const CallExpr *CE, StringRef Name,
+ CheckerContext &C) const;
+
+ /// Check if tainted data is used as a buffer size ins strn.. functions,
+ /// and allocators.
+ static const char MsgTaintedBufferSize[];
+ bool checkTaintedBufferSize(const CallExpr *CE, const FunctionDecl *FDecl,
+ CheckerContext &C) const;
+
+ /// Generate a report if the expression is tainted or points to tainted data.
+ bool generateReportIfTainted(const Expr *E, const char Msg[],
+ CheckerContext &C) const;
+
+
+ typedef llvm::SmallVector<unsigned, 2> ArgVector;
+
+ /// \brief A struct used to specify taint propagation rules for a function.
+ ///
+ /// If any of the possible taint source arguments is tainted, all of the
+ /// destination arguments should also be tainted. Use InvalidArgIndex in the
+ /// src list to specify that all of the arguments can introduce taint. Use
+ /// InvalidArgIndex in the dst arguments to signify that all the non-const
+ /// pointer and reference arguments might be tainted on return. If
+ /// ReturnValueIndex is added to the dst list, the return value will be
+ /// tainted.
+ struct TaintPropagationRule {
+ /// List of arguments which can be taint sources and should be checked.
+ ArgVector SrcArgs;
+ /// List of arguments which should be tainted on function return.
+ ArgVector DstArgs;
+ // TODO: Check if using other data structures would be more optimal.
+
+ TaintPropagationRule() {}
+
+ TaintPropagationRule(unsigned SArg,
+ unsigned DArg, bool TaintRet = false) {
+ SrcArgs.push_back(SArg);
+ DstArgs.push_back(DArg);
+ if (TaintRet)
+ DstArgs.push_back(ReturnValueIndex);
+ }
+
+ TaintPropagationRule(unsigned SArg1, unsigned SArg2,
+ unsigned DArg, bool TaintRet = false) {
+ SrcArgs.push_back(SArg1);
+ SrcArgs.push_back(SArg2);
+ DstArgs.push_back(DArg);
+ if (TaintRet)
+ DstArgs.push_back(ReturnValueIndex);
+ }
+
+ /// Get the propagation rule for a given function.
+ static TaintPropagationRule
+ getTaintPropagationRule(const FunctionDecl *FDecl,
+ StringRef Name,
+ CheckerContext &C);
+
+ inline void addSrcArg(unsigned A) { SrcArgs.push_back(A); }
+ inline void addDstArg(unsigned A) { DstArgs.push_back(A); }
+
+ inline bool isNull() const { return SrcArgs.empty(); }
+
+ inline bool isDestinationArgument(unsigned ArgNum) const {
+ return (std::find(DstArgs.begin(),
+ DstArgs.end(), ArgNum) != DstArgs.end());
+ }
+
+ static inline bool isTaintedOrPointsToTainted(const Expr *E,
+ ProgramStateRef State,
+ CheckerContext &C) {
+ return (State->isTainted(E, C.getLocationContext()) || isStdin(E, C) ||
+ (E->getType().getTypePtr()->isPointerType() &&
+ State->isTainted(getPointedToSymbol(C, E))));
+ }
+
+ /// \brief Pre-process a function which propagates taint according to the
+ /// taint rule.
+ ProgramStateRef process(const CallExpr *CE, CheckerContext &C) const;
+
+ };
+};
+
+const unsigned GenericTaintChecker::ReturnValueIndex;
+const unsigned GenericTaintChecker::InvalidArgIndex;
+
+const char GenericTaintChecker::MsgUncontrolledFormatString[] =
+ "Untrusted data is used as a format string "
+ "(CWE-134: Uncontrolled Format String)";
+
+const char GenericTaintChecker::MsgSanitizeSystemArgs[] =
+ "Untrusted data is passed to a system call "
+ "(CERT/STR02-C. Sanitize data passed to complex subsystems)";
+
+const char GenericTaintChecker::MsgTaintedBufferSize[] =
+ "Untrusted data is used to specify the buffer size "
+ "(CERT/STR31-C. Guarantee that storage for strings has sufficient space for "
+ "character data and the null terminator)";
+
+} // end of anonymous namespace
+
+/// A set which is used to pass information from call pre-visit instruction
+/// to the call post-visit. The values are unsigned integers, which are either
+/// ReturnValueIndex, or indexes of the pointer/reference argument, which
+/// points to data, which should be tainted on return.
+namespace { struct TaintArgsOnPostVisit{}; }
+namespace clang { namespace ento {
+template<> struct ProgramStateTrait<TaintArgsOnPostVisit>
+ : public ProgramStatePartialTrait<llvm::ImmutableSet<unsigned> > {
+ static void *GDMIndex() { return GenericTaintChecker::getTag(); }
+};
+}}
+
+GenericTaintChecker::TaintPropagationRule
+GenericTaintChecker::TaintPropagationRule::getTaintPropagationRule(
+ const FunctionDecl *FDecl,
+ StringRef Name,
+ CheckerContext &C) {
+ // TODO: Currently, we might loose precision here: we always mark a return
+ // value as tainted even if it's just a pointer, pointing to tainted data.
+
+ // Check for exact name match for functions without builtin substitutes.
+ TaintPropagationRule Rule = llvm::StringSwitch<TaintPropagationRule>(Name)
+ .Case("atoi", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("atol", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("atoll", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getc", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("fgetc", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getc_unlocked", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("getw", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("toupper", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("tolower", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("strchr", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("strrchr", TaintPropagationRule(0, ReturnValueIndex))
+ .Case("read", TaintPropagationRule(0, 2, 1, true))
+ .Case("pread", TaintPropagationRule(InvalidArgIndex, 1, true))
+ .Case("gets", TaintPropagationRule(InvalidArgIndex, 0, true))
+ .Case("fgets", TaintPropagationRule(2, 0, true))
+ .Case("getline", TaintPropagationRule(2, 0))
+ .Case("getdelim", TaintPropagationRule(3, 0))
+ .Case("fgetln", TaintPropagationRule(0, ReturnValueIndex))
+ .Default(TaintPropagationRule());
+
+ if (!Rule.isNull())
+ return Rule;
+
+ // Check if it's one of the memory setting/copying functions.
+ // This check is specialized but faster then calling isCLibraryFunction.
+ unsigned BId = 0;
+ if ( (BId = FDecl->getMemoryFunctionKind()) )
+ switch(BId) {
+ case Builtin::BImemcpy:
+ case Builtin::BImemmove:
+ case Builtin::BIstrncpy:
+ case Builtin::BIstrncat:
+ return TaintPropagationRule(1, 2, 0, true);
+ case Builtin::BIstrlcpy:
+ case Builtin::BIstrlcat:
+ return TaintPropagationRule(1, 2, 0, false);
+ case Builtin::BIstrndup:
+ return TaintPropagationRule(0, 1, ReturnValueIndex);
+
+ default:
+ break;
+ };
+
+ // Process all other functions which could be defined as builtins.
+ if (Rule.isNull()) {
+ if (C.isCLibraryFunction(FDecl, "snprintf") ||
+ C.isCLibraryFunction(FDecl, "sprintf"))
+ return TaintPropagationRule(InvalidArgIndex, 0, true);
+ else if (C.isCLibraryFunction(FDecl, "strcpy") ||
+ C.isCLibraryFunction(FDecl, "stpcpy") ||
+ C.isCLibraryFunction(FDecl, "strcat"))
+ return TaintPropagationRule(1, 0, true);
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ return TaintPropagationRule(0, 2, 1, false);
+ else if (C.isCLibraryFunction(FDecl, "strdup") ||
+ C.isCLibraryFunction(FDecl, "strdupa"))
+ return TaintPropagationRule(0, ReturnValueIndex);
+ else if (C.isCLibraryFunction(FDecl, "wcsdup"))
+ return TaintPropagationRule(0, ReturnValueIndex);
+ }
+
+ // Skipping the following functions, since they might be used for cleansing
+ // or smart memory copy:
+ // - memccpy - copying untill hitting a special character.
+
+ return TaintPropagationRule();
+}
+
+void GenericTaintChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ // Check for errors first.
+ if (checkPre(CE, C))
+ return;
+
+ // Add taint second.
+ addSourcesPre(CE, C);
+}
+
+void GenericTaintChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (propagateFromPre(CE, C))
+ return;
+ addSourcesPost(CE, C);
+}
+
+void GenericTaintChecker::addSourcesPre(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = 0;
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ StringRef Name = C.getCalleeName(FDecl);
+ if (Name.empty())
+ return;
+
+ // First, try generating a propagation rule for this function.
+ TaintPropagationRule Rule =
+ TaintPropagationRule::getTaintPropagationRule(FDecl, Name, C);
+ if (!Rule.isNull()) {
+ State = Rule.process(CE, C);
+ if (!State)
+ return;
+ C.addTransition(State);
+ return;
+ }
+
+ // Otherwise, check if we have custom pre-processing implemented.
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("fscanf", &GenericTaintChecker::preFscanf)
+ .Default(0);
+ // Check and evaluate the call.
+ if (evalFunction)
+ State = (this->*evalFunction)(CE, C);
+ if (!State)
+ return;
+ C.addTransition(State);
+
+}
+
+bool GenericTaintChecker::propagateFromPre(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Depending on what was tainted at pre-visit, we determined a set of
+ // arguments which should be tainted after the function returns. These are
+ // stored in the state as TaintArgsOnPostVisit set.
+ llvm::ImmutableSet<unsigned> TaintArgs = State->get<TaintArgsOnPostVisit>();
+ if (TaintArgs.isEmpty())
+ return false;
+
+ for (llvm::ImmutableSet<unsigned>::iterator
+ I = TaintArgs.begin(), E = TaintArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ // Special handling for the tainted return value.
+ if (ArgNum == ReturnValueIndex) {
+ State = State->addTaint(CE, C.getLocationContext());
+ continue;
+ }
+
+ // The arguments are pointer arguments. The data they are pointing at is
+ // tainted after the call.
+ if (CE->getNumArgs() < (ArgNum + 1))
+ return false;
+ const Expr* Arg = CE->getArg(ArgNum);
+ SymbolRef Sym = getPointedToSymbol(C, Arg);
+ if (Sym)
+ State = State->addTaint(Sym);
+ }
+
+ // Clear up the taint info from the state.
+ State = State->remove<TaintArgsOnPostVisit>();
+
+ if (State != C.getState()) {
+ C.addTransition(State);
+ return true;
+ }
+ return false;
+}
+
+void GenericTaintChecker::addSourcesPost(const CallExpr *CE,
+ CheckerContext &C) const {
+ // Define the attack surface.
+ // Set the evaluation function by switching on the callee name.
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty())
+ return;
+ FnCheck evalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Case("scanf", &GenericTaintChecker::postScanf)
+ // TODO: Add support for vfscanf & family.
+ .Case("getchar", &GenericTaintChecker::postRetTaint)
+ .Case("getchar_unlocked", &GenericTaintChecker::postRetTaint)
+ .Case("getenv", &GenericTaintChecker::postRetTaint)
+ .Case("fopen", &GenericTaintChecker::postRetTaint)
+ .Case("fdopen", &GenericTaintChecker::postRetTaint)
+ .Case("freopen", &GenericTaintChecker::postRetTaint)
+ .Case("getch", &GenericTaintChecker::postRetTaint)
+ .Case("wgetch", &GenericTaintChecker::postRetTaint)
+ .Case("socket", &GenericTaintChecker::postSocket)
+ .Default(0);
+
+ // If the callee isn't defined, it is not of security concern.
+ // Check and evaluate the call.
+ ProgramStateRef State = 0;
+ if (evalFunction)
+ State = (this->*evalFunction)(CE, C);
+ if (!State)
+ return;
+
+ C.addTransition(State);
+}
+
+bool GenericTaintChecker::checkPre(const CallExpr *CE, CheckerContext &C) const{
+
+ if (checkUncontrolledFormatString(CE, C))
+ return true;
+
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ StringRef Name = C.getCalleeName(FDecl);
+ if (Name.empty())
+ return false;
+
+ if (checkSystemCall(CE, Name, C))
+ return true;
+
+ if (checkTaintedBufferSize(CE, FDecl, C))
+ return true;
+
+ return false;
+}
+
+SymbolRef GenericTaintChecker::getPointedToSymbol(CheckerContext &C,
+ const Expr* Arg) {
+ ProgramStateRef State = C.getState();
+ SVal AddrVal = State->getSVal(Arg->IgnoreParens(), C.getLocationContext());
+ if (AddrVal.isUnknownOrUndef())
+ return 0;
+
+ Loc *AddrLoc = dyn_cast<Loc>(&AddrVal);
+ if (!AddrLoc)
+ return 0;
+
+ const PointerType *ArgTy =
+ dyn_cast<PointerType>(Arg->getType().getCanonicalType().getTypePtr());
+ SVal Val = State->getSVal(*AddrLoc,
+ ArgTy ? ArgTy->getPointeeType(): QualType());
+ return Val.getAsSymbol();
+}
+
+ProgramStateRef
+GenericTaintChecker::TaintPropagationRule::process(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ // Check for taint in arguments.
+ bool IsTainted = false;
+ for (ArgVector::const_iterator I = SrcArgs.begin(),
+ E = SrcArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ if (ArgNum == InvalidArgIndex) {
+ // Check if any of the arguments is tainted, but skip the
+ // destination arguments.
+ for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
+ if (isDestinationArgument(i))
+ continue;
+ if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(i), State, C)))
+ break;
+ }
+ break;
+ }
+
+ if (CE->getNumArgs() < (ArgNum + 1))
+ return State;
+ if ((IsTainted = isTaintedOrPointsToTainted(CE->getArg(ArgNum), State, C)))
+ break;
+ }
+ if (!IsTainted)
+ return State;
+
+ // Mark the arguments which should be tainted after the function returns.
+ for (ArgVector::const_iterator I = DstArgs.begin(),
+ E = DstArgs.end(); I != E; ++I) {
+ unsigned ArgNum = *I;
+
+ // Should we mark all arguments as tainted?
+ if (ArgNum == InvalidArgIndex) {
+ // For all pointer and references that were passed in:
+ // If they are not pointing to const data, mark data as tainted.
+ // TODO: So far we are just going one level down; ideally we'd need to
+ // recurse here.
+ for (unsigned int i = 0; i < CE->getNumArgs(); ++i) {
+ const Expr *Arg = CE->getArg(i);
+ // Process pointer argument.
+ const Type *ArgTy = Arg->getType().getTypePtr();
+ QualType PType = ArgTy->getPointeeType();
+ if ((!PType.isNull() && !PType.isConstQualified())
+ || (ArgTy->isReferenceType() && !Arg->getType().isConstQualified()))
+ State = State->add<TaintArgsOnPostVisit>(i);
+ }
+ continue;
+ }
+
+ // Should mark the return value?
+ if (ArgNum == ReturnValueIndex) {
+ State = State->add<TaintArgsOnPostVisit>(ReturnValueIndex);
+ continue;
+ }
+
+ // Mark the given argument.
+ assert(ArgNum < CE->getNumArgs());
+ State = State->add<TaintArgsOnPostVisit>(ArgNum);
+ }
+
+ return State;
+}
+
+
+// If argument 0 (file descriptor) is tainted, all arguments except for arg 0
+// and arg 1 should get taint.
+ProgramStateRef GenericTaintChecker::preFscanf(const CallExpr *CE,
+ CheckerContext &C) const {
+ assert(CE->getNumArgs() >= 2);
+ ProgramStateRef State = C.getState();
+
+ // Check is the file descriptor is tainted.
+ if (State->isTainted(CE->getArg(0), C.getLocationContext()) ||
+ isStdin(CE->getArg(0), C)) {
+ // All arguments except for the first two should get taint.
+ for (unsigned int i = 2; i < CE->getNumArgs(); ++i)
+ State = State->add<TaintArgsOnPostVisit>(i);
+ return State;
+ }
+
+ return 0;
+}
+
+
+// If argument 0(protocol domain) is network, the return value should get taint.
+ProgramStateRef GenericTaintChecker::postSocket(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (CE->getNumArgs() < 3)
+ return State;
+
+ SourceLocation DomLoc = CE->getArg(0)->getExprLoc();
+ StringRef DomName = C.getMacroNameOrSpelling(DomLoc);
+ // White list the internal communication protocols.
+ if (DomName.equals("AF_SYSTEM") || DomName.equals("AF_LOCAL") ||
+ DomName.equals("AF_UNIX") || DomName.equals("AF_RESERVED_36"))
+ return State;
+ State = State->addTaint(CE, C.getLocationContext());
+ return State;
+}
+
+ProgramStateRef GenericTaintChecker::postScanf(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (CE->getNumArgs() < 2)
+ return State;
+
+ SVal x = State->getSVal(CE->getArg(1), C.getLocationContext());
+ // All arguments except for the very first one should get taint.
+ for (unsigned int i = 1; i < CE->getNumArgs(); ++i) {
+ // The arguments are pointer arguments. The data they are pointing at is
+ // tainted after the call.
+ const Expr* Arg = CE->getArg(i);
+ SymbolRef Sym = getPointedToSymbol(C, Arg);
+ if (Sym)
+ State = State->addTaint(Sym);
+ }
+ return State;
+}
+
+ProgramStateRef GenericTaintChecker::postRetTaint(const CallExpr *CE,
+ CheckerContext &C) const {
+ return C.getState()->addTaint(CE, C.getLocationContext());
+}
+
+bool GenericTaintChecker::isStdin(const Expr *E, CheckerContext &C) {
+ ProgramStateRef State = C.getState();
+ SVal Val = State->getSVal(E, C.getLocationContext());
+
+ // stdin is a pointer, so it would be a region.
+ const MemRegion *MemReg = Val.getAsRegion();
+
+ // The region should be symbolic, we do not know it's value.
+ const SymbolicRegion *SymReg = dyn_cast_or_null<SymbolicRegion>(MemReg);
+ if (!SymReg)
+ return false;
+
+ // Get it's symbol and find the declaration region it's pointing to.
+ const SymbolRegionValue *Sm =dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
+ if (!Sm)
+ return false;
+ const DeclRegion *DeclReg = dyn_cast_or_null<DeclRegion>(Sm->getRegion());
+ if (!DeclReg)
+ return false;
+
+ // This region corresponds to a declaration, find out if it's a global/extern
+ // variable named stdin with the proper type.
+ if (const VarDecl *D = dyn_cast_or_null<VarDecl>(DeclReg->getDecl())) {
+ D = D->getCanonicalDecl();
+ if ((D->getName().find("stdin") != StringRef::npos) && D->isExternC())
+ if (const PointerType * PtrTy =
+ dyn_cast<PointerType>(D->getType().getTypePtr()))
+ if (PtrTy->getPointeeType() == C.getASTContext().getFILEType())
+ return true;
+ }
+ return false;
+}
+
+static bool getPrintfFormatArgumentNum(const CallExpr *CE,
+ const CheckerContext &C,
+ unsigned int &ArgNum) {
+ // Find if the function contains a format string argument.
+ // Handles: fprintf, printf, sprintf, snprintf, vfprintf, vprintf, vsprintf,
+ // vsnprintf, syslog, custom annotated functions.
+ const FunctionDecl *FDecl = C.getCalleeDecl(CE);
+ if (!FDecl)
+ return false;
+ for (specific_attr_iterator<FormatAttr>
+ i = FDecl->specific_attr_begin<FormatAttr>(),
+ e = FDecl->specific_attr_end<FormatAttr>(); i != e ; ++i) {
+
+ const FormatAttr *Format = *i;
+ ArgNum = Format->getFormatIdx() - 1;
+ if ((Format->getType() == "printf") && CE->getNumArgs() > ArgNum)
+ return true;
+ }
+
+ // Or if a function is named setproctitle (this is a heuristic).
+ if (C.getCalleeName(CE).find("setproctitle") != StringRef::npos) {
+ ArgNum = 0;
+ return true;
+ }
+
+ return false;
+}
+
+bool GenericTaintChecker::generateReportIfTainted(const Expr *E,
+ const char Msg[],
+ CheckerContext &C) const {
+ assert(E);
+
+ // Check for taint.
+ ProgramStateRef State = C.getState();
+ if (!State->isTainted(getPointedToSymbol(C, E)) &&
+ !State->isTainted(E, C.getLocationContext()))
+ return false;
+
+ // Generate diagnostic.
+ if (ExplodedNode *N = C.addTransition()) {
+ initBugType();
+ BugReport *report = new BugReport(*BT, Msg, N);
+ report->addRange(E->getSourceRange());
+ C.EmitReport(report);
+ return true;
+ }
+ return false;
+}
+
+bool GenericTaintChecker::checkUncontrolledFormatString(const CallExpr *CE,
+ CheckerContext &C) const{
+ // Check if the function contains a format string argument.
+ unsigned int ArgNum = 0;
+ if (!getPrintfFormatArgumentNum(CE, C, ArgNum))
+ return false;
+
+ // If either the format string content or the pointer itself are tainted, warn.
+ if (generateReportIfTainted(CE->getArg(ArgNum),
+ MsgUncontrolledFormatString, C))
+ return true;
+ return false;
+}
+
+bool GenericTaintChecker::checkSystemCall(const CallExpr *CE,
+ StringRef Name,
+ CheckerContext &C) const {
+ // TODO: It might make sense to run this check on demand. In some cases,
+ // we should check if the environment has been cleansed here. We also might
+ // need to know if the user was reset before these calls(seteuid).
+ unsigned ArgNum = llvm::StringSwitch<unsigned>(Name)
+ .Case("system", 0)
+ .Case("popen", 0)
+ .Case("execl", 0)
+ .Case("execle", 0)
+ .Case("execlp", 0)
+ .Case("execv", 0)
+ .Case("execvp", 0)
+ .Case("execvP", 0)
+ .Case("execve", 0)
+ .Case("dlopen", 0)
+ .Default(UINT_MAX);
+
+ if (ArgNum == UINT_MAX || CE->getNumArgs() < (ArgNum + 1))
+ return false;
+
+ if (generateReportIfTainted(CE->getArg(ArgNum),
+ MsgSanitizeSystemArgs, C))
+ return true;
+
+ return false;
+}
+
+// TODO: Should this check be a part of the CString checker?
+// If yes, should taint be a global setting?
+bool GenericTaintChecker::checkTaintedBufferSize(const CallExpr *CE,
+ const FunctionDecl *FDecl,
+ CheckerContext &C) const {
+ // If the function has a buffer size argument, set ArgNum.
+ unsigned ArgNum = InvalidArgIndex;
+ unsigned BId = 0;
+ if ( (BId = FDecl->getMemoryFunctionKind()) )
+ switch(BId) {
+ case Builtin::BImemcpy:
+ case Builtin::BImemmove:
+ case Builtin::BIstrncpy:
+ ArgNum = 2;
+ break;
+ case Builtin::BIstrndup:
+ ArgNum = 1;
+ break;
+ default:
+ break;
+ };
+
+ if (ArgNum == InvalidArgIndex) {
+ if (C.isCLibraryFunction(FDecl, "malloc") ||
+ C.isCLibraryFunction(FDecl, "calloc") ||
+ C.isCLibraryFunction(FDecl, "alloca"))
+ ArgNum = 0;
+ else if (C.isCLibraryFunction(FDecl, "memccpy"))
+ ArgNum = 3;
+ else if (C.isCLibraryFunction(FDecl, "realloc"))
+ ArgNum = 1;
+ else if (C.isCLibraryFunction(FDecl, "bcopy"))
+ ArgNum = 2;
+ }
+
+ if (ArgNum != InvalidArgIndex && CE->getNumArgs() > ArgNum &&
+ generateReportIfTainted(CE->getArg(ArgNum), MsgTaintedBufferSize, C))
+ return true;
+
+ return false;
+}
+
+void ento::registerGenericTaintChecker(CheckerManager &mgr) {
+ mgr.registerChecker<GenericTaintChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
new file mode 100644
index 0000000..c08f163
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IdempotentOperationChecker.cpp
@@ -0,0 +1,747 @@
+//==- IdempotentOperationChecker.cpp - Idempotent Operations ----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of path-sensitive checks for idempotent and/or
+// tautological operations. Each potential operation is checked along all paths
+// to see if every path results in a pointless operation.
+// +-------------------------------------------+
+// |Table of idempotent/tautological operations|
+// +-------------------------------------------+
+//+--------------------------------------------------------------------------+
+//|Operator | x op x | x op 1 | 1 op x | x op 0 | 0 op x | x op ~0 | ~0 op x |
+//+--------------------------------------------------------------------------+
+// +, += | | | | x | x | |
+// -, -= | | | | x | -x | |
+// *, *= | | x | x | 0 | 0 | |
+// /, /= | 1 | x | | N/A | 0 | |
+// &, &= | x | | | 0 | 0 | x | x
+// |, |= | x | | | x | x | ~0 | ~0
+// ^, ^= | 0 | | | x | x | |
+// <<, <<= | | | | x | 0 | |
+// >>, >>= | | | | x | 0 | |
+// || | 1 | 1 | 1 | x | x | 1 | 1
+// && | 1 | x | x | 0 | 0 | x | x
+// = | x | | | | | |
+// == | 1 | | | | | |
+// >= | 1 | | | | | |
+// <= | 1 | | | | | |
+// > | 0 | | | | | |
+// < | 0 | | | | | |
+// != | 0 | | | | | |
+//===----------------------------------------------------------------------===//
+//
+// Things TODO:
+// - Improved error messages
+// - Handle mixed assumptions (which assumptions can belong together?)
+// - Finer grained false positive control (levels)
+// - Handling ~0 values
+
+#include "ClangSACheckers.h"
+#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
+#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class IdempotentOperationChecker
+ : public Checker<check::PreStmt<BinaryOperator>,
+ check::PostStmt<BinaryOperator>,
+ check::EndAnalysis> {
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+ void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,ExprEngine &Eng) const;
+
+private:
+ // Our assumption about a particular operation.
+ enum Assumption { Possible = 0, Impossible, Equal, LHSis1, RHSis1, LHSis0,
+ RHSis0 };
+
+ static void UpdateAssumption(Assumption &A, const Assumption &New);
+
+ // False positive reduction methods
+ static bool isSelfAssign(const Expr *LHS, const Expr *RHS);
+ static bool isUnused(const Expr *E, AnalysisDeclContext *AC);
+ static bool isTruncationExtensionAssignment(const Expr *LHS,
+ const Expr *RHS);
+ static bool pathWasCompletelyAnalyzed(AnalysisDeclContext *AC,
+ const CFGBlock *CB,
+ const CoreEngine &CE);
+ static bool CanVary(const Expr *Ex,
+ AnalysisDeclContext *AC);
+ static bool isConstantOrPseudoConstant(const DeclRefExpr *DR,
+ AnalysisDeclContext *AC);
+ static bool containsNonLocalVarDecl(const Stmt *S);
+
+ // Hash table and related data structures
+ struct BinaryOperatorData {
+ BinaryOperatorData() : assumption(Possible) {}
+
+ Assumption assumption;
+ ExplodedNodeSet explodedNodes; // Set of ExplodedNodes that refer to a
+ // BinaryOperator
+ };
+ typedef llvm::DenseMap<const BinaryOperator *, BinaryOperatorData>
+ AssumptionMap;
+ mutable AssumptionMap hash;
+};
+}
+
+void IdempotentOperationChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ // Find or create an entry in the hash for this BinaryOperator instance.
+ // If we haven't done a lookup before, it will get default initialized to
+ // 'Possible'. At this stage we do not store the ExplodedNode, as it has not
+ // been created yet.
+ BinaryOperatorData &Data = hash[B];
+ Assumption &A = Data.assumption;
+ AnalysisDeclContext *AC = C.getCurrentAnalysisDeclContext();
+
+ // If we already have visited this node on a path that does not contain an
+ // idempotent operation, return immediately.
+ if (A == Impossible)
+ return;
+
+ // Retrieve both sides of the operator and determine if they can vary (which
+ // may mean this is a false positive.
+ const Expr *LHS = B->getLHS();
+ const Expr *RHS = B->getRHS();
+
+ // At this stage we can calculate whether each side contains a false positive
+ // that applies to all operators. We only need to calculate this the first
+ // time.
+ bool LHSContainsFalsePositive = false, RHSContainsFalsePositive = false;
+ if (A == Possible) {
+ // An expression contains a false positive if it can't vary, or if it
+ // contains a known false positive VarDecl.
+ LHSContainsFalsePositive = !CanVary(LHS, AC)
+ || containsNonLocalVarDecl(LHS);
+ RHSContainsFalsePositive = !CanVary(RHS, AC)
+ || containsNonLocalVarDecl(RHS);
+ }
+
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal LHSVal = state->getSVal(LHS, LCtx);
+ SVal RHSVal = state->getSVal(RHS, LCtx);
+
+ // If either value is unknown, we can't be 100% sure of all paths.
+ if (LHSVal.isUnknownOrUndef() || RHSVal.isUnknownOrUndef()) {
+ A = Impossible;
+ return;
+ }
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ // Dereference the LHS SVal if this is an assign operation
+ switch (Op) {
+ default:
+ break;
+
+ // Fall through intentional
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_AndAssign:
+ case BO_OrAssign:
+ case BO_XorAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_Assign:
+ // Assign statements have one extra level of indirection
+ if (!isa<Loc>(LHSVal)) {
+ A = Impossible;
+ return;
+ }
+ LHSVal = state->getSVal(cast<Loc>(LHSVal), LHS->getType());
+ }
+
+
+ // We now check for various cases which result in an idempotent operation.
+
+ // x op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BO_Assign:
+ // x Assign x can be used to silence unused variable warnings intentionally.
+ // If this is a self assignment and the variable is referenced elsewhere,
+ // and the assignment is not a truncation or extension, then it is a false
+ // positive.
+ if (isSelfAssign(LHS, RHS)) {
+ if (!isUnused(LHS, AC) && !isTruncationExtensionAssignment(LHS, RHS)) {
+ UpdateAssumption(A, Equal);
+ return;
+ }
+ else {
+ A = Impossible;
+ return;
+ }
+ }
+
+ case BO_SubAssign:
+ case BO_DivAssign:
+ case BO_AndAssign:
+ case BO_OrAssign:
+ case BO_XorAssign:
+ case BO_Sub:
+ case BO_Div:
+ case BO_And:
+ case BO_Or:
+ case BO_Xor:
+ case BO_LOr:
+ case BO_LAnd:
+ case BO_EQ:
+ case BO_NE:
+ if (LHSVal != RHSVal || LHSContainsFalsePositive
+ || RHSContainsFalsePositive)
+ break;
+ UpdateAssumption(A, Equal);
+ return;
+ }
+
+ // x op 1
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_Mul:
+ case BO_Div:
+ case BO_LOr:
+ case BO_LAnd:
+ if (!RHSVal.isConstant(1) || RHSContainsFalsePositive)
+ break;
+ UpdateAssumption(A, RHSis1);
+ return;
+ }
+
+ // 1 op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BO_MulAssign:
+ case BO_Mul:
+ case BO_LOr:
+ case BO_LAnd:
+ if (!LHSVal.isConstant(1) || LHSContainsFalsePositive)
+ break;
+ UpdateAssumption(A, LHSis1);
+ return;
+ }
+
+ // x op 0
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_MulAssign:
+ case BO_AndAssign:
+ case BO_OrAssign:
+ case BO_XorAssign:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Mul:
+ case BO_And:
+ case BO_Or:
+ case BO_Xor:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LOr:
+ case BO_LAnd:
+ if (!RHSVal.isConstant(0) || RHSContainsFalsePositive)
+ break;
+ UpdateAssumption(A, RHSis0);
+ return;
+ }
+
+ // 0 op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ //case BO_AddAssign: // Common false positive
+ case BO_SubAssign: // Check only if unsigned
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_AndAssign:
+ //case BO_OrAssign: // Common false positive
+ //case BO_XorAssign: // Common false positive
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_Add:
+ case BO_Sub:
+ case BO_Mul:
+ case BO_Div:
+ case BO_And:
+ case BO_Or:
+ case BO_Xor:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_LOr:
+ case BO_LAnd:
+ if (!LHSVal.isConstant(0) || LHSContainsFalsePositive)
+ break;
+ UpdateAssumption(A, LHSis0);
+ return;
+ }
+
+ // If we get to this point, there has been a valid use of this operation.
+ A = Impossible;
+}
+
+// At the post visit stage, the predecessor ExplodedNode will be the
+// BinaryOperator that was just created. We use this hook to collect the
+// ExplodedNode.
+void IdempotentOperationChecker::checkPostStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ // Add the ExplodedNode we just visited
+ BinaryOperatorData &Data = hash[B];
+
+ const Stmt *predStmt
+ = cast<StmtPoint>(C.getPredecessor()->getLocation()).getStmt();
+
+ // Ignore implicit calls to setters.
+ if (!isa<BinaryOperator>(predStmt))
+ return;
+
+ Data.explodedNodes.Add(C.getPredecessor());
+}
+
+void IdempotentOperationChecker::checkEndAnalysis(ExplodedGraph &G,
+ BugReporter &BR,
+ ExprEngine &Eng) const {
+ BugType *BT = new BugType("Idempotent operation", "Dead code");
+ // Iterate over the hash to see if we have any paths with definite
+ // idempotent operations.
+ for (AssumptionMap::const_iterator i = hash.begin(); i != hash.end(); ++i) {
+ // Unpack the hash contents
+ const BinaryOperatorData &Data = i->second;
+ const Assumption &A = Data.assumption;
+ const ExplodedNodeSet &ES = Data.explodedNodes;
+
+ // If there are no nodes accosted with the expression, nothing to report.
+ // FIXME: This is possible because the checker does part of processing in
+ // checkPreStmt and part in checkPostStmt.
+ if (ES.begin() == ES.end())
+ continue;
+
+ const BinaryOperator *B = i->first;
+
+ if (A == Impossible)
+ continue;
+
+ // If the analyzer did not finish, check to see if we can still emit this
+ // warning
+ if (Eng.hasWorkRemaining()) {
+ // If we can trace back
+ AnalysisDeclContext *AC = (*ES.begin())->getLocationContext()
+ ->getAnalysisDeclContext();
+ if (!pathWasCompletelyAnalyzed(AC,
+ AC->getCFGStmtMap()->getBlock(B),
+ Eng.getCoreEngine()))
+ continue;
+ }
+
+ // Select the error message and SourceRanges to report.
+ SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+ bool LHSRelevant = false, RHSRelevant = false;
+ switch (A) {
+ case Equal:
+ LHSRelevant = true;
+ RHSRelevant = true;
+ if (B->getOpcode() == BO_Assign)
+ os << "Assigned value is always the same as the existing value";
+ else
+ os << "Both operands to '" << B->getOpcodeStr()
+ << "' always have the same value";
+ break;
+ case LHSis1:
+ LHSRelevant = true;
+ os << "The left operand to '" << B->getOpcodeStr() << "' is always 1";
+ break;
+ case RHSis1:
+ RHSRelevant = true;
+ os << "The right operand to '" << B->getOpcodeStr() << "' is always 1";
+ break;
+ case LHSis0:
+ LHSRelevant = true;
+ os << "The left operand to '" << B->getOpcodeStr() << "' is always 0";
+ break;
+ case RHSis0:
+ RHSRelevant = true;
+ os << "The right operand to '" << B->getOpcodeStr() << "' is always 0";
+ break;
+ case Possible:
+ llvm_unreachable("Operation was never marked with an assumption");
+ case Impossible:
+ llvm_unreachable(0);
+ }
+
+ // Add a report for each ExplodedNode
+ for (ExplodedNodeSet::iterator I = ES.begin(), E = ES.end(); I != E; ++I) {
+ BugReport *report = new BugReport(*BT, os.str(), *I);
+
+ // Add source ranges and visitor hooks
+ if (LHSRelevant) {
+ const Expr *LHS = i->first->getLHS();
+ report->addRange(LHS->getSourceRange());
+ FindLastStoreBRVisitor::registerStatementVarDecls(*report, LHS);
+ }
+ if (RHSRelevant) {
+ const Expr *RHS = i->first->getRHS();
+ report->addRange(i->first->getRHS()->getSourceRange());
+ FindLastStoreBRVisitor::registerStatementVarDecls(*report, RHS);
+ }
+
+ BR.EmitReport(report);
+ }
+ }
+
+ hash.clear();
+}
+
+// Updates the current assumption given the new assumption
+inline void IdempotentOperationChecker::UpdateAssumption(Assumption &A,
+ const Assumption &New) {
+// If the assumption is the same, there is nothing to do
+ if (A == New)
+ return;
+
+ switch (A) {
+ // If we don't currently have an assumption, set it
+ case Possible:
+ A = New;
+ return;
+
+ // If we have determined that a valid state happened, ignore the new
+ // assumption.
+ case Impossible:
+ return;
+
+ // Any other case means that we had a different assumption last time. We don't
+ // currently support mixing assumptions for diagnostic reasons, so we set
+ // our assumption to be impossible.
+ default:
+ A = Impossible;
+ return;
+ }
+}
+
+// Check for a statement where a variable is self assigned to possibly avoid an
+// unused variable warning.
+bool IdempotentOperationChecker::isSelfAssign(const Expr *LHS, const Expr *RHS) {
+ LHS = LHS->IgnoreParenCasts();
+ RHS = RHS->IgnoreParenCasts();
+
+ const DeclRefExpr *LHS_DR = dyn_cast<DeclRefExpr>(LHS);
+ if (!LHS_DR)
+ return false;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(LHS_DR->getDecl());
+ if (!VD)
+ return false;
+
+ const DeclRefExpr *RHS_DR = dyn_cast<DeclRefExpr>(RHS);
+ if (!RHS_DR)
+ return false;
+
+ if (VD != RHS_DR->getDecl())
+ return false;
+
+ return true;
+}
+
+// Returns true if the Expr points to a VarDecl that is not read anywhere
+// outside of self-assignments.
+bool IdempotentOperationChecker::isUnused(const Expr *E,
+ AnalysisDeclContext *AC) {
+ if (!E)
+ return false;
+
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E->IgnoreParenCasts());
+ if (!DR)
+ return false;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ if (!VD)
+ return false;
+
+ if (AC->getPseudoConstantAnalysis()->wasReferenced(VD))
+ return false;
+
+ return true;
+}
+
+// Check for self casts truncating/extending a variable
+bool IdempotentOperationChecker::isTruncationExtensionAssignment(
+ const Expr *LHS,
+ const Expr *RHS) {
+
+ const DeclRefExpr *LHS_DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParenCasts());
+ if (!LHS_DR)
+ return false;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(LHS_DR->getDecl());
+ if (!VD)
+ return false;
+
+ const DeclRefExpr *RHS_DR = dyn_cast<DeclRefExpr>(RHS->IgnoreParenCasts());
+ if (!RHS_DR)
+ return false;
+
+ if (VD != RHS_DR->getDecl())
+ return false;
+
+ return dyn_cast<DeclRefExpr>(RHS->IgnoreParenLValueCasts()) == NULL;
+}
+
+// Returns false if a path to this block was not completely analyzed, or true
+// otherwise.
+bool
+IdempotentOperationChecker::pathWasCompletelyAnalyzed(AnalysisDeclContext *AC,
+ const CFGBlock *CB,
+ const CoreEngine &CE) {
+
+ CFGReverseBlockReachabilityAnalysis *CRA = AC->getCFGReachablityAnalysis();
+
+ // Test for reachability from any aborted blocks to this block
+ typedef CoreEngine::BlocksExhausted::const_iterator ExhaustedIterator;
+ for (ExhaustedIterator I = CE.blocks_exhausted_begin(),
+ E = CE.blocks_exhausted_end(); I != E; ++I) {
+ const BlockEdge &BE = I->first;
+
+ // The destination block on the BlockEdge is the first block that was not
+ // analyzed. If we can reach this block from the aborted block, then this
+ // block was not completely analyzed.
+ //
+ // Also explicitly check if the current block is the destination block.
+ // While technically reachable, it means we aborted the analysis on
+ // a path that included that block.
+ const CFGBlock *destBlock = BE.getDst();
+ if (destBlock == CB || CRA->isReachable(destBlock, CB))
+ return false;
+ }
+
+ // Test for reachability from blocks we just gave up on.
+ typedef CoreEngine::BlocksAborted::const_iterator AbortedIterator;
+ for (AbortedIterator I = CE.blocks_aborted_begin(),
+ E = CE.blocks_aborted_end(); I != E; ++I) {
+ const CFGBlock *destBlock = I->first;
+ if (destBlock == CB || CRA->isReachable(destBlock, CB))
+ return false;
+ }
+
+ // For the items still on the worklist, see if they are in blocks that
+ // can eventually reach 'CB'.
+ class VisitWL : public WorkList::Visitor {
+ const CFGStmtMap *CBM;
+ const CFGBlock *TargetBlock;
+ CFGReverseBlockReachabilityAnalysis &CRA;
+ public:
+ VisitWL(const CFGStmtMap *cbm, const CFGBlock *targetBlock,
+ CFGReverseBlockReachabilityAnalysis &cra)
+ : CBM(cbm), TargetBlock(targetBlock), CRA(cra) {}
+ virtual bool visit(const WorkListUnit &U) {
+ ProgramPoint P = U.getNode()->getLocation();
+ const CFGBlock *B = 0;
+ if (StmtPoint *SP = dyn_cast<StmtPoint>(&P)) {
+ B = CBM->getBlock(SP->getStmt());
+ }
+ else if (BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ B = BE->getDst();
+ }
+ else if (BlockEntrance *BEnt = dyn_cast<BlockEntrance>(&P)) {
+ B = BEnt->getBlock();
+ }
+ else if (BlockExit *BExit = dyn_cast<BlockExit>(&P)) {
+ B = BExit->getBlock();
+ }
+ if (!B)
+ return true;
+
+ return B == TargetBlock || CRA.isReachable(B, TargetBlock);
+ }
+ };
+ VisitWL visitWL(AC->getCFGStmtMap(), CB, *CRA);
+ // Were there any items in the worklist that could potentially reach
+ // this block?
+ if (CE.getWorkList()->visitItemsInWorkList(visitWL))
+ return false;
+
+ // Verify that this block is reachable from the entry block
+ if (!CRA->isReachable(&AC->getCFG()->getEntry(), CB))
+ return false;
+
+ // If we get to this point, there is no connection to the entry block or an
+ // aborted block. This path is unreachable and we can report the error.
+ return true;
+}
+
+// Recursive function that determines whether an expression contains any element
+// that varies. This could be due to a compile-time constant like sizeof. An
+// expression may also involve a variable that behaves like a constant. The
+// function returns true if the expression varies, and false otherwise.
+bool IdempotentOperationChecker::CanVary(const Expr *Ex,
+ AnalysisDeclContext *AC) {
+ // Parentheses and casts are irrelevant here
+ Ex = Ex->IgnoreParenCasts();
+
+ if (Ex->getLocStart().isMacroID())
+ return false;
+
+ switch (Ex->getStmtClass()) {
+ // Trivially true cases
+ case Stmt::ArraySubscriptExprClass:
+ case Stmt::MemberExprClass:
+ case Stmt::StmtExprClass:
+ case Stmt::CallExprClass:
+ case Stmt::VAArgExprClass:
+ case Stmt::ShuffleVectorExprClass:
+ return true;
+ default:
+ return true;
+
+ // Trivially false cases
+ case Stmt::IntegerLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::PredefinedExprClass:
+ case Stmt::ImaginaryLiteralClass:
+ case Stmt::StringLiteralClass:
+ case Stmt::OffsetOfExprClass:
+ case Stmt::CompoundLiteralExprClass:
+ case Stmt::AddrLabelExprClass:
+ case Stmt::BinaryTypeTraitExprClass:
+ case Stmt::GNUNullExprClass:
+ case Stmt::InitListExprClass:
+ case Stmt::DesignatedInitExprClass:
+ case Stmt::BlockExprClass:
+ return false;
+
+ // Cases requiring custom logic
+ case Stmt::UnaryExprOrTypeTraitExprClass: {
+ const UnaryExprOrTypeTraitExpr *SE =
+ cast<const UnaryExprOrTypeTraitExpr>(Ex);
+ if (SE->getKind() != UETT_SizeOf)
+ return false;
+ return SE->getTypeOfArgument()->isVariableArrayType();
+ }
+ case Stmt::DeclRefExprClass:
+ // Check for constants/pseudoconstants
+ return !isConstantOrPseudoConstant(cast<DeclRefExpr>(Ex), AC);
+
+ // The next cases require recursion for subexpressions
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *B = cast<const BinaryOperator>(Ex);
+
+ // Exclude cases involving pointer arithmetic. These are usually
+ // false positives.
+ if (B->getOpcode() == BO_Sub || B->getOpcode() == BO_Add)
+ if (B->getLHS()->getType()->getAs<PointerType>())
+ return false;
+
+ return CanVary(B->getRHS(), AC)
+ || CanVary(B->getLHS(), AC);
+ }
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *U = cast<const UnaryOperator>(Ex);
+ // Handle trivial case first
+ switch (U->getOpcode()) {
+ case UO_Extension:
+ return false;
+ default:
+ return CanVary(U->getSubExpr(), AC);
+ }
+ }
+ case Stmt::ChooseExprClass:
+ return CanVary(cast<const ChooseExpr>(Ex)->getChosenSubExpr(
+ AC->getASTContext()), AC);
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::BinaryConditionalOperatorClass:
+ return CanVary(cast<AbstractConditionalOperator>(Ex)->getCond(), AC);
+ }
+}
+
+// Returns true if a DeclRefExpr is or behaves like a constant.
+bool IdempotentOperationChecker::isConstantOrPseudoConstant(
+ const DeclRefExpr *DR,
+ AnalysisDeclContext *AC) {
+ // Check if the type of the Decl is const-qualified
+ if (DR->getType().isConstQualified())
+ return true;
+
+ // Check for an enum
+ if (isa<EnumConstantDecl>(DR->getDecl()))
+ return true;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ if (!VD)
+ return true;
+
+ // Check if the Decl behaves like a constant. This check also takes care of
+ // static variables, which can only change between function calls if they are
+ // modified in the AST.
+ PseudoConstantAnalysis *PCA = AC->getPseudoConstantAnalysis();
+ if (PCA->isPseudoConstant(VD))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing VarDecl's with storage other
+// than local
+bool IdempotentOperationChecker::containsNonLocalVarDecl(const Stmt *S) {
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+ if (DR)
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+ if (!VD->hasLocalStorage())
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsNonLocalVarDecl(child))
+ return true;
+
+ return false;
+}
+
+
+void ento::registerIdempotentOperationChecker(CheckerManager &mgr) {
+ mgr.registerChecker<IdempotentOperationChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
new file mode 100644
index 0000000..e35557f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/InterCheckerAPI.h
@@ -0,0 +1,22 @@
+//==--- InterCheckerAPI.h ---------------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file allows introduction of checker dependencies. It contains APIs for
+// inter-checker communications.
+//===----------------------------------------------------------------------===//
+
+#ifndef INTERCHECKERAPI_H_
+#define INTERCHECKERAPI_H_
+namespace clang {
+namespace ento {
+
+/// Register the checker which evaluates CString API calls.
+void registerCStringCheckerBasic(CheckerManager &Mgr);
+
+}}
+#endif /* INTERCHECKERAPI_H_ */
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
new file mode 100644
index 0000000..b0bac33
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/IteratorsChecker.cpp
@@ -0,0 +1,603 @@
+//=== IteratorsChecker.cpp - Check for Invalidated Iterators ------*- C++ -*----
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines IteratorsChecker, a number of small checks for conditions
+// leading to invalid iterators being used.
+// FIXME: Currently only supports 'vector' and 'deque'
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/DeclTemplate.h"
+#include "clang/Basic/SourceManager.h"
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Type.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/StringSwitch.h"
+
+
+using namespace clang;
+using namespace ento;
+
+// This is the state associated with each iterator which includes both the
+// kind of state and the instance used to initialize it.
+// FIXME: add location where invalidated for better error reporting.
+namespace {
+class RefState {
+ enum Kind { BeginValid, EndValid, Invalid, Undefined, Unknown } K;
+ const void *VR;
+
+public:
+ RefState(Kind k, const void *vr) : K(k), VR(vr) {}
+
+ bool isValid() const { return K == BeginValid || K == EndValid; }
+ bool isInvalid() const { return K == Invalid; }
+ bool isUndefined() const { return K == Undefined; }
+ bool isUnknown() const { return K == Unknown; }
+ const MemRegion *getMemRegion() const {
+ if (K == BeginValid || K == EndValid)
+ return(const MemRegion *)VR;
+ return 0;
+ }
+ const MemberExpr *getMemberExpr() const {
+ if (K == Invalid)
+ return(const MemberExpr *)VR;
+ return 0;
+ }
+
+ bool operator==(const RefState &X) const {
+ return K == X.K && VR == X.VR;
+ }
+
+ static RefState getBeginValid(const MemRegion *vr) {
+ assert(vr);
+ return RefState(BeginValid, vr);
+ }
+ static RefState getEndValid(const MemRegion *vr) {
+ assert(vr);
+ return RefState(EndValid, vr);
+ }
+ static RefState getInvalid( const MemberExpr *ME ) {
+ return RefState(Invalid, ME);
+ }
+ static RefState getUndefined( void ) {
+ return RefState(Undefined, 0);
+ }
+ static RefState getUnknown( void ) {
+ return RefState(Unknown, 0);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ ID.AddPointer(VR);
+ }
+};
+
+enum RefKind { NoKind, VectorKind, VectorIteratorKind };
+
+class IteratorsChecker :
+ public Checker<check::PreStmt<CXXOperatorCallExpr>,
+ check::PreStmt<DeclStmt>,
+ check::PreStmt<CXXMemberCallExpr>,
+ check::PreStmt<CallExpr> >
+ {
+ // Used when parsing iterators and vectors and deques.
+ BuiltinBug *BT_Invalid, *BT_Undefined, *BT_Incompatible;
+
+public:
+ IteratorsChecker() :
+ BT_Invalid(0), BT_Undefined(0), BT_Incompatible(0)
+ {}
+ static void *getTag() { static int tag; return &tag; }
+
+ // Checker entry points.
+ void checkPreStmt(const CXXOperatorCallExpr *OCE,
+ CheckerContext &C) const;
+
+ void checkPreStmt(const DeclStmt *DS,
+ CheckerContext &C) const;
+
+ void checkPreStmt(const CXXMemberCallExpr *MCE,
+ CheckerContext &C) const;
+
+ void checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const;
+
+private:
+ ProgramStateRef handleAssign(ProgramStateRef state,
+ const Expr *lexp,
+ const Expr *rexp,
+ const LocationContext *LC) const;
+
+ ProgramStateRef handleAssign(ProgramStateRef state,
+ const MemRegion *MR,
+ const Expr *rexp,
+ const LocationContext *LC) const;
+
+ ProgramStateRef invalidateIterators(ProgramStateRef state,
+ const MemRegion *MR,
+ const MemberExpr *ME) const;
+
+ void checkExpr(CheckerContext &C, const Expr *E) const;
+
+ void checkArgs(CheckerContext &C, const CallExpr *CE) const;
+
+ const MemRegion *getRegion(ProgramStateRef state,
+ const Expr *E,
+ const LocationContext *LC) const;
+
+ const DeclRefExpr *getDeclRefExpr(const Expr *E) const;
+};
+
+class IteratorState {
+public:
+ typedef llvm::ImmutableMap<const MemRegion *, RefState> EntryMap;
+};
+} //end anonymous namespace
+
+namespace clang {
+ namespace ento {
+ template <>
+ struct ProgramStateTrait<IteratorState>
+ : public ProgramStatePartialTrait<IteratorState::EntryMap> {
+ static void *GDMIndex() { return IteratorsChecker::getTag(); }
+ };
+ }
+}
+
+void ento::registerIteratorsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<IteratorsChecker>();
+}
+
+// ===============================================
+// Utility functions used by visitor functions
+// ===============================================
+
+// check a templated type for std::vector or std::deque
+static RefKind getTemplateKind(const NamedDecl *td) {
+ const DeclContext *dc = td->getDeclContext();
+ const NamespaceDecl *nameSpace = dyn_cast<NamespaceDecl>(dc);
+ if (!nameSpace || !isa<TranslationUnitDecl>(nameSpace->getDeclContext())
+ || nameSpace->getName() != "std")
+ return NoKind;
+
+ StringRef name = td->getName();
+ return llvm::StringSwitch<RefKind>(name)
+ .Cases("vector", "deque", VectorKind)
+ .Default(NoKind);
+}
+
+static RefKind getTemplateKind(const DeclContext *dc) {
+ if (const ClassTemplateSpecializationDecl *td =
+ dyn_cast<ClassTemplateSpecializationDecl>(dc))
+ return getTemplateKind(cast<NamedDecl>(td));
+ return NoKind;
+}
+
+static RefKind getTemplateKind(const TypedefType *tdt) {
+ const TypedefNameDecl *td = tdt->getDecl();
+ RefKind parentKind = getTemplateKind(td->getDeclContext());
+ if (parentKind == VectorKind) {
+ return llvm::StringSwitch<RefKind>(td->getName())
+ .Cases("iterator",
+ "const_iterator",
+ "reverse_iterator", VectorIteratorKind)
+ .Default(NoKind);
+ }
+ return NoKind;
+}
+
+static RefKind getTemplateKind(const TemplateSpecializationType *tsp) {
+ const TemplateName &tname = tsp->getTemplateName();
+ TemplateDecl *td = tname.getAsTemplateDecl();
+ if (!td)
+ return NoKind;
+ return getTemplateKind(td);
+}
+
+static RefKind getTemplateKind(QualType T) {
+ if (const TemplateSpecializationType *tsp =
+ T->getAs<TemplateSpecializationType>()) {
+ return getTemplateKind(tsp);
+ }
+ if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(T)) {
+ QualType namedType = ET->getNamedType();
+ if (const TypedefType *tdt = namedType->getAs<TypedefType>())
+ return getTemplateKind(tdt);
+ if (const TemplateSpecializationType *tsp =
+ namedType->getAs<TemplateSpecializationType>()) {
+ return getTemplateKind(tsp);
+ }
+ }
+ return NoKind;
+}
+
+// Iterate through our map and invalidate any iterators that were
+// initialized fromt the specified instance MemRegion.
+ProgramStateRef IteratorsChecker::invalidateIterators(ProgramStateRef state,
+ const MemRegion *MR, const MemberExpr *ME) const {
+ IteratorState::EntryMap Map = state->get<IteratorState>();
+ if (Map.isEmpty())
+ return state;
+
+ // Loop over the entries in the current state.
+ // The key doesn't change, so the map iterators won't change.
+ for (IteratorState::EntryMap::iterator I = Map.begin(), E = Map.end();
+ I != E; ++I) {
+ RefState RS = I.getData();
+ if (RS.getMemRegion() == MR)
+ state = state->set<IteratorState>(I.getKey(), RefState::getInvalid(ME));
+ }
+
+ return state;
+}
+
+// Handle assigning to an iterator where we don't have the LValue MemRegion.
+ProgramStateRef IteratorsChecker::handleAssign(ProgramStateRef state,
+ const Expr *lexp, const Expr *rexp, const LocationContext *LC) const {
+ // Skip the cast if present.
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(lexp))
+ lexp = M->GetTemporaryExpr();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(lexp))
+ lexp = ICE->getSubExpr();
+ SVal sv = state->getSVal(lexp, LC);
+ const MemRegion *MR = sv.getAsRegion();
+ if (!MR)
+ return state;
+ RefKind kind = getTemplateKind(lexp->getType());
+
+ // If assigning to a vector, invalidate any iterators currently associated.
+ if (kind == VectorKind)
+ return invalidateIterators(state, MR, 0);
+
+ // Make sure that we are assigning to an iterator.
+ if (getTemplateKind(lexp->getType()) != VectorIteratorKind)
+ return state;
+ return handleAssign(state, MR, rexp, LC);
+}
+
+// handle assigning to an iterator
+ProgramStateRef IteratorsChecker::handleAssign(ProgramStateRef state,
+ const MemRegion *MR, const Expr *rexp, const LocationContext *LC) const {
+ // Assume unknown until we find something definite.
+ state = state->set<IteratorState>(MR, RefState::getUnknown());
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(rexp))
+ rexp = M->GetTemporaryExpr();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(rexp))
+ rexp = ICE->getSubExpr();
+ // Need to handle three cases: MemberCall, copy, copy with addition.
+ if (const CallExpr *CE = dyn_cast<CallExpr>(rexp)) {
+ // Handle MemberCall.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee())) {
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ME->getBase());
+ if (!DRE)
+ return state;
+ // Verify that the type is std::vector<T>.
+ if (getTemplateKind(DRE->getType()) != VectorKind)
+ return state;
+ // Now get the MemRegion associated with the instance.
+ const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD)
+ return state;
+ const MemRegion *IMR = state->getRegion(VD, LC);
+ if (!IMR)
+ return state;
+ // Finally, see if it is one of the calls that will create
+ // a valid iterator and mark it if so, else mark as Unknown.
+ StringRef mName = ME->getMemberDecl()->getName();
+
+ if (llvm::StringSwitch<bool>(mName)
+ .Cases("begin", "insert", "erase", true).Default(false)) {
+ return state->set<IteratorState>(MR, RefState::getBeginValid(IMR));
+ }
+ if (mName == "end")
+ return state->set<IteratorState>(MR, RefState::getEndValid(IMR));
+
+ return state->set<IteratorState>(MR, RefState::getUnknown());
+ }
+ }
+ // Handle straight copy from another iterator.
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(rexp)) {
+ if (getTemplateKind(DRE->getType()) != VectorIteratorKind)
+ return state;
+ // Now get the MemRegion associated with the instance.
+ const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD)
+ return state;
+ const MemRegion *IMR = state->getRegion(VD, LC);
+ if (!IMR)
+ return state;
+ // Get the RefState of the iterator being copied.
+ const RefState *RS = state->get<IteratorState>(IMR);
+ if (!RS)
+ return state;
+ // Use it to set the state of the LValue.
+ return state->set<IteratorState>(MR, *RS);
+ }
+ // If we have operator+ or operator- ...
+ if (const CXXOperatorCallExpr *OCE = dyn_cast<CXXOperatorCallExpr>(rexp)) {
+ OverloadedOperatorKind Kind = OCE->getOperator();
+ if (Kind == OO_Plus || Kind == OO_Minus) {
+ // Check left side of tree for a valid value.
+ state = handleAssign( state, MR, OCE->getArg(0), LC);
+ const RefState *RS = state->get<IteratorState>(MR);
+ // If found, return it.
+ if (!RS->isUnknown())
+ return state;
+ // Otherwise return what we find in the right side.
+ return handleAssign(state, MR, OCE->getArg(1), LC);
+ }
+ }
+ // Fall through if nothing matched.
+ return state;
+}
+
+// Iterate through the arguments looking for an Invalid or Undefined iterator.
+void IteratorsChecker::checkArgs(CheckerContext &C, const CallExpr *CE) const {
+ for (CallExpr::const_arg_iterator I = CE->arg_begin(), E = CE->arg_end();
+ I != E; ++I) {
+ checkExpr(C, *I);
+ }
+}
+
+// Get the DeclRefExpr associated with the expression.
+const DeclRefExpr *IteratorsChecker::getDeclRefExpr(const Expr *E) const {
+ // If it is a CXXConstructExpr, need to get the subexpression.
+ if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(E)) {
+ if (CE->getNumArgs()== 1) {
+ CXXConstructorDecl *CD = CE->getConstructor();
+ if (CD->isTrivial())
+ E = CE->getArg(0);
+ }
+ }
+ if (const MaterializeTemporaryExpr *M = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ E = ICE->getSubExpr();
+ // If it isn't one of our types, don't do anything.
+ if (getTemplateKind(E->getType()) != VectorIteratorKind)
+ return NULL;
+ return dyn_cast<DeclRefExpr>(E);
+}
+
+// Get the MemRegion associated with the expresssion.
+const MemRegion *IteratorsChecker::getRegion(ProgramStateRef state,
+ const Expr *E, const LocationContext *LC) const {
+ const DeclRefExpr *DRE = getDeclRefExpr(E);
+ if (!DRE)
+ return NULL;
+ const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ if (!VD)
+ return NULL;
+ // return the MemRegion associated with the iterator
+ return state->getRegion(VD, LC);
+}
+
+// Check the expression and if it is an iterator, generate a diagnostic
+// if the iterator is not valid.
+// FIXME: this method can generate new nodes, and subsequent logic should
+// use those nodes. We also cannot create multiple nodes at one ProgramPoint
+// with the same tag.
+void IteratorsChecker::checkExpr(CheckerContext &C, const Expr *E) const {
+ ProgramStateRef state = C.getState();
+ const MemRegion *MR = getRegion(state, E, C.getLocationContext());
+ if (!MR)
+ return;
+
+ // Get the state associated with the iterator.
+ const RefState *RS = state->get<IteratorState>(MR);
+ if (!RS)
+ return;
+ if (RS->isInvalid()) {
+ if (ExplodedNode *N = C.addTransition()) {
+ if (!BT_Invalid)
+ // FIXME: We are eluding constness here.
+ const_cast<IteratorsChecker*>(this)->BT_Invalid = new BuiltinBug("");
+
+ std::string msg;
+ const MemberExpr *ME = RS->getMemberExpr();
+ if (ME) {
+ std::string name = ME->getMemberNameInfo().getAsString();
+ msg = "Attempt to use an iterator made invalid by call to '" +
+ name + "'";
+ }
+ else {
+ msg = "Attempt to use an iterator made invalid by copying another "
+ "container to its container";
+ }
+
+ BugReport *R = new BugReport(*BT_Invalid, msg, N);
+ R->addRange(getDeclRefExpr(E)->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+ else if (RS->isUndefined()) {
+ if (ExplodedNode *N = C.addTransition()) {
+ if (!BT_Undefined)
+ // FIXME: We are eluding constness here.
+ const_cast<IteratorsChecker*>(this)->BT_Undefined =
+ new BuiltinBug("Use of iterator that is not defined");
+
+ BugReport *R = new BugReport(*BT_Undefined,
+ BT_Undefined->getDescription(), N);
+ R->addRange(getDeclRefExpr(E)->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+}
+
+// ===============================================
+// Path analysis visitor functions
+// ===============================================
+
+// For a generic Call, just check the args for bad iterators.
+void IteratorsChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const{
+
+ // FIXME: These checks are to currently work around a bug
+ // in CheckerManager.
+ if (isa<CXXOperatorCallExpr>(CE))
+ return;
+ if (isa<CXXMemberCallExpr>(CE))
+ return;
+
+ checkArgs(C, CE);
+}
+
+// Handle operator calls. First, if it is operator=, check the argument,
+// and handle assigning and set target state appropriately. Otherwise, for
+// other operators, check the args for bad iterators and handle comparisons.
+void IteratorsChecker::checkPreStmt(const CXXOperatorCallExpr *OCE,
+ CheckerContext &C) const
+{
+ const LocationContext *LC = C.getLocationContext();
+ ProgramStateRef state = C.getState();
+ OverloadedOperatorKind Kind = OCE->getOperator();
+ if (Kind == OO_Equal) {
+ checkExpr(C, OCE->getArg(1));
+ state = handleAssign(state, OCE->getArg(0), OCE->getArg(1), LC);
+ C.addTransition(state);
+ return;
+ }
+ else {
+ checkArgs(C, OCE);
+ // If it is a compare and both are iterators, ensure that they are for
+ // the same container.
+ if (Kind == OO_EqualEqual || Kind == OO_ExclaimEqual ||
+ Kind == OO_Less || Kind == OO_LessEqual ||
+ Kind == OO_Greater || Kind == OO_GreaterEqual) {
+ const MemRegion *MR0, *MR1;
+ MR0 = getRegion(state, OCE->getArg(0), LC);
+ if (!MR0)
+ return;
+ MR1 = getRegion(state, OCE->getArg(1), LC);
+ if (!MR1)
+ return;
+ const RefState *RS0, *RS1;
+ RS0 = state->get<IteratorState>(MR0);
+ if (!RS0)
+ return;
+ RS1 = state->get<IteratorState>(MR1);
+ if (!RS1)
+ return;
+ if (RS0->getMemRegion() != RS1->getMemRegion()) {
+ if (ExplodedNode *N = C.addTransition()) {
+ if (!BT_Incompatible)
+ const_cast<IteratorsChecker*>(this)->BT_Incompatible =
+ new BuiltinBug(
+ "Cannot compare iterators from different containers");
+
+ BugReport *R = new BugReport(*BT_Incompatible,
+ BT_Incompatible->getDescription(), N);
+ R->addRange(OCE->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+ }
+ }
+}
+
+// Need to handle DeclStmts to pick up initializing of iterators and to mark
+// uninitialized ones as Undefined.
+void IteratorsChecker::checkPreStmt(const DeclStmt *DS,
+ CheckerContext &C) const {
+ const Decl *D = *DS->decl_begin();
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ // Only care about iterators.
+ if (getTemplateKind(VD->getType()) != VectorIteratorKind)
+ return;
+
+ // Get the MemRegion associated with the iterator and mark it as Undefined.
+ ProgramStateRef state = C.getState();
+ Loc VarLoc = state->getLValue(VD, C.getLocationContext());
+ const MemRegion *MR = VarLoc.getAsRegion();
+ if (!MR)
+ return;
+ state = state->set<IteratorState>(MR, RefState::getUndefined());
+
+ // if there is an initializer, handle marking Valid if a proper initializer
+ const Expr *InitEx = VD->getInit();
+ if (InitEx) {
+ // FIXME: This is too syntactic. Since 'InitEx' will be analyzed first
+ // it should resolve to an SVal that we can check for validity
+ // *semantically* instead of walking through the AST.
+ if (const CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(InitEx)) {
+ if (CE->getNumArgs() == 1) {
+ const Expr *E = CE->getArg(0);
+ if (const MaterializeTemporaryExpr *M
+ = dyn_cast<MaterializeTemporaryExpr>(E))
+ E = M->GetTemporaryExpr();
+ if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(E))
+ InitEx = ICE->getSubExpr();
+ state = handleAssign(state, MR, InitEx, C.getLocationContext());
+ }
+ }
+ }
+ C.addTransition(state);
+}
+
+
+namespace { struct CalledReserved {}; }
+namespace clang { namespace ento {
+template<> struct ProgramStateTrait<CalledReserved>
+ : public ProgramStatePartialTrait<llvm::ImmutableSet<const MemRegion*> > {
+ static void *GDMIndex() { static int index = 0; return &index; }
+};
+}}
+
+// on a member call, first check the args for any bad iterators
+// then, check to see if it is a call to a function that will invalidate
+// the iterators
+void IteratorsChecker::checkPreStmt(const CXXMemberCallExpr *MCE,
+ CheckerContext &C) const {
+ // Check the arguments.
+ checkArgs(C, MCE);
+ const MemberExpr *ME = dyn_cast<MemberExpr>(MCE->getCallee());
+ if (!ME)
+ return;
+ // Make sure we have the right kind of container.
+ const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(ME->getBase());
+ if (!DRE || getTemplateKind(DRE->getType()) != VectorKind)
+ return;
+ SVal tsv = C.getState()->getSVal(DRE, C.getLocationContext());
+ // Get the MemRegion associated with the container instance.
+ const MemRegion *MR = tsv.getAsRegion();
+ if (!MR)
+ return;
+ // If we are calling a function that invalidates iterators, mark them
+ // appropriately by finding matching instances.
+ ProgramStateRef state = C.getState();
+ StringRef mName = ME->getMemberDecl()->getName();
+ if (llvm::StringSwitch<bool>(mName)
+ .Cases("insert", "reserve", "push_back", true)
+ .Cases("erase", "pop_back", "clear", "resize", true)
+ .Default(false)) {
+ // If there was a 'reserve' call, assume iterators are good.
+ if (!state->contains<CalledReserved>(MR))
+ state = invalidateIterators(state, MR, ME);
+ }
+ // Keep track of instances that have called 'reserve'
+ // note: do this after we invalidate any iterators by calling
+ // 'reserve' itself.
+ if (mName == "reserve")
+ state = state->add<CalledReserved>(MR);
+
+ if (state != C.getState())
+ C.addTransition(state);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
new file mode 100644
index 0000000..757a4ce
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -0,0 +1,314 @@
+//=== LLVMConventionsChecker.cpp - Check LLVM codebase conventions ---*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines LLVMConventionsChecker, a bunch of small little checks
+// for checking specific coding conventions in the LLVM/Clang codebase.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Generic type checking routines.
+//===----------------------------------------------------------------------===//
+
+static bool IsLLVMStringRef(QualType T) {
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ return StringRef(QualType(RT, 0).getAsString()) ==
+ "class StringRef";
+}
+
+/// Check whether the declaration is semantically inside the top-level
+/// namespace named by ns.
+static bool InNamespace(const Decl *D, StringRef NS) {
+ const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
+ if (!ND)
+ return false;
+ const IdentifierInfo *II = ND->getIdentifier();
+ if (!II || !II->getName().equals(NS))
+ return false;
+ return isa<TranslationUnitDecl>(ND->getDeclContext());
+}
+
+static bool IsStdString(QualType T) {
+ if (const ElaboratedType *QT = T->getAs<ElaboratedType>())
+ T = QT->getNamedType();
+
+ const TypedefType *TT = T->getAs<TypedefType>();
+ if (!TT)
+ return false;
+
+ const TypedefNameDecl *TD = TT->getDecl();
+
+ if (!InNamespace(TD, "std"))
+ return false;
+
+ return TD->getName() == "string";
+}
+
+static bool IsClangType(const RecordDecl *RD) {
+ return RD->getName() == "Type" && InNamespace(RD, "clang");
+}
+
+static bool IsClangDecl(const RecordDecl *RD) {
+ return RD->getName() == "Decl" && InNamespace(RD, "clang");
+}
+
+static bool IsClangStmt(const RecordDecl *RD) {
+ return RD->getName() == "Stmt" && InNamespace(RD, "clang");
+}
+
+static bool IsClangAttr(const RecordDecl *RD) {
+ return RD->getName() == "Attr" && InNamespace(RD, "clang");
+}
+
+static bool IsStdVector(QualType T) {
+ const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
+ if (!TS)
+ return false;
+
+ TemplateName TM = TS->getTemplateName();
+ TemplateDecl *TD = TM.getAsTemplateDecl();
+
+ if (!TD || !InNamespace(TD, "std"))
+ return false;
+
+ return TD->getName() == "vector";
+}
+
+static bool IsSmallVector(QualType T) {
+ const TemplateSpecializationType *TS = T->getAs<TemplateSpecializationType>();
+ if (!TS)
+ return false;
+
+ TemplateName TM = TS->getTemplateName();
+ TemplateDecl *TD = TM.getAsTemplateDecl();
+
+ if (!TD || !InNamespace(TD, "llvm"))
+ return false;
+
+ return TD->getName() == "SmallVector";
+}
+
+//===----------------------------------------------------------------------===//
+// CHECK: a StringRef should not be bound to a temporary std::string whose
+// lifetime is shorter than the StringRef's.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class StringRefCheckerVisitor : public StmtVisitor<StringRefCheckerVisitor> {
+ BugReporter &BR;
+ const Decl *DeclWithIssue;
+public:
+ StringRefCheckerVisitor(const Decl *declWithIssue, BugReporter &br)
+ : BR(br), DeclWithIssue(declWithIssue) {}
+ void VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end() ;
+ I != E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+ }
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitDeclStmt(DeclStmt *DS);
+private:
+ void VisitVarDecl(VarDecl *VD);
+};
+} // end anonymous namespace
+
+static void CheckStringRefAssignedTemporary(const Decl *D, BugReporter &BR) {
+ StringRefCheckerVisitor walker(D, BR);
+ walker.Visit(D->getBody());
+}
+
+void StringRefCheckerVisitor::VisitDeclStmt(DeclStmt *S) {
+ VisitChildren(S);
+
+ for (DeclStmt::decl_iterator I = S->decl_begin(), E = S->decl_end();I!=E; ++I)
+ if (VarDecl *VD = dyn_cast<VarDecl>(*I))
+ VisitVarDecl(VD);
+}
+
+void StringRefCheckerVisitor::VisitVarDecl(VarDecl *VD) {
+ Expr *Init = VD->getInit();
+ if (!Init)
+ return;
+
+ // Pattern match for:
+ // StringRef x = call() (where call returns std::string)
+ if (!IsLLVMStringRef(VD->getType()))
+ return;
+ ExprWithCleanups *Ex1 = dyn_cast<ExprWithCleanups>(Init);
+ if (!Ex1)
+ return;
+ CXXConstructExpr *Ex2 = dyn_cast<CXXConstructExpr>(Ex1->getSubExpr());
+ if (!Ex2 || Ex2->getNumArgs() != 1)
+ return;
+ ImplicitCastExpr *Ex3 = dyn_cast<ImplicitCastExpr>(Ex2->getArg(0));
+ if (!Ex3)
+ return;
+ CXXConstructExpr *Ex4 = dyn_cast<CXXConstructExpr>(Ex3->getSubExpr());
+ if (!Ex4 || Ex4->getNumArgs() != 1)
+ return;
+ ImplicitCastExpr *Ex5 = dyn_cast<ImplicitCastExpr>(Ex4->getArg(0));
+ if (!Ex5)
+ return;
+ CXXBindTemporaryExpr *Ex6 = dyn_cast<CXXBindTemporaryExpr>(Ex5->getSubExpr());
+ if (!Ex6 || !IsStdString(Ex6->getType()))
+ return;
+
+ // Okay, badness! Report an error.
+ const char *desc = "StringRef should not be bound to temporary "
+ "std::string that it outlives";
+ PathDiagnosticLocation VDLoc =
+ PathDiagnosticLocation::createBegin(VD, BR.getSourceManager());
+ BR.EmitBasicReport(DeclWithIssue, desc, "LLVM Conventions", desc,
+ VDLoc, Init->getSourceRange());
+}
+
+//===----------------------------------------------------------------------===//
+// CHECK: Clang AST nodes should not have fields that can allocate
+// memory.
+//===----------------------------------------------------------------------===//
+
+static bool AllocatesMemory(QualType T) {
+ return IsStdVector(T) || IsStdString(T) || IsSmallVector(T);
+}
+
+// This type checking could be sped up via dynamic programming.
+static bool IsPartOfAST(const CXXRecordDecl *R) {
+ if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R))
+ return true;
+
+ for (CXXRecordDecl::base_class_const_iterator I = R->bases_begin(),
+ E = R->bases_end(); I!=E; ++I) {
+ CXXBaseSpecifier BS = *I;
+ QualType T = BS.getType();
+ if (const RecordType *baseT = T->getAs<RecordType>()) {
+ CXXRecordDecl *baseD = cast<CXXRecordDecl>(baseT->getDecl());
+ if (IsPartOfAST(baseD))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+namespace {
+class ASTFieldVisitor {
+ SmallVector<FieldDecl*, 10> FieldChain;
+ const CXXRecordDecl *Root;
+ BugReporter &BR;
+public:
+ ASTFieldVisitor(const CXXRecordDecl *root, BugReporter &br)
+ : Root(root), BR(br) {}
+
+ void Visit(FieldDecl *D);
+ void ReportError(QualType T);
+};
+} // end anonymous namespace
+
+static void CheckASTMemory(const CXXRecordDecl *R, BugReporter &BR) {
+ if (!IsPartOfAST(R))
+ return;
+
+ for (RecordDecl::field_iterator I = R->field_begin(), E = R->field_end();
+ I != E; ++I) {
+ ASTFieldVisitor walker(R, BR);
+ walker.Visit(*I);
+ }
+}
+
+void ASTFieldVisitor::Visit(FieldDecl *D) {
+ FieldChain.push_back(D);
+
+ QualType T = D->getType();
+
+ if (AllocatesMemory(T))
+ ReportError(T);
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const RecordDecl *RD = RT->getDecl()->getDefinition();
+ for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I)
+ Visit(*I);
+ }
+
+ FieldChain.pop_back();
+}
+
+void ASTFieldVisitor::ReportError(QualType T) {
+ SmallString<1024> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "AST class '" << Root->getName() << "' has a field '"
+ << FieldChain.front()->getName() << "' that allocates heap memory";
+ if (FieldChain.size() > 1) {
+ os << " via the following chain: ";
+ bool isFirst = true;
+ for (SmallVectorImpl<FieldDecl*>::iterator I=FieldChain.begin(),
+ E=FieldChain.end(); I!=E; ++I) {
+ if (!isFirst)
+ os << '.';
+ else
+ isFirst = false;
+ os << (*I)->getName();
+ }
+ }
+ os << " (type " << FieldChain.back()->getType().getAsString() << ")";
+ os.flush();
+
+ // Note that this will fire for every translation unit that uses this
+ // class. This is suboptimal, but at least scan-build will merge
+ // duplicate HTML reports. In the future we need a unified way of merging
+ // duplicate reports across translation units. For C++ classes we cannot
+ // just report warnings when we see an out-of-line method definition for a
+ // class, as that heuristic doesn't always work (the complete definition of
+ // the class may be in the header file, for example).
+ PathDiagnosticLocation L = PathDiagnosticLocation::createBegin(
+ FieldChain.front(), BR.getSourceManager());
+ BR.EmitBasicReport(Root, "AST node allocates heap memory", "LLVM Conventions",
+ os.str(), L);
+}
+
+//===----------------------------------------------------------------------===//
+// LLVMConventionsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class LLVMConventionsChecker : public Checker<
+ check::ASTDecl<CXXRecordDecl>,
+ check::ASTCodeBody > {
+public:
+ void checkASTDecl(const CXXRecordDecl *R, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ if (R->isCompleteDefinition())
+ CheckASTMemory(R, BR);
+ }
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ CheckStringRefAssignedTemporary(D, BR);
+ }
+};
+}
+
+void ento::registerLLVMConventionsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<LLVMConventionsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
new file mode 100644
index 0000000..cb976e0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp
@@ -0,0 +1,681 @@
+//==--- MacOSKeychainAPIChecker.cpp ------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This checker flags misuses of KeyChainAPI. In particular, the password data
+// allocated/returned by SecKeychainItemCopyContent,
+// SecKeychainFindGenericPassword, SecKeychainFindInternetPassword functions has
+// to be freed using a call to SecKeychainItemFreeContent.
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class MacOSKeychainAPIChecker : public Checker<check::PreStmt<CallExpr>,
+ check::PreStmt<ReturnStmt>,
+ check::PostStmt<CallExpr>,
+ check::EndPath,
+ check::DeadSymbols> {
+ mutable OwningPtr<BugType> BT;
+
+public:
+ /// AllocationState is a part of the checker specific state together with the
+ /// MemRegion corresponding to the allocated data.
+ struct AllocationState {
+ /// The index of the allocator function.
+ unsigned int AllocatorIdx;
+ SymbolRef Region;
+
+ AllocationState(const Expr *E, unsigned int Idx, SymbolRef R) :
+ AllocatorIdx(Idx),
+ Region(R) {}
+
+ bool operator==(const AllocationState &X) const {
+ return (AllocatorIdx == X.AllocatorIdx &&
+ Region == X.Region);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(AllocatorIdx);
+ ID.AddPointer(Region);
+ }
+ };
+
+ void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ void checkPostStmt(const CallExpr *S, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
+ void checkEndPath(CheckerContext &C) const;
+
+private:
+ typedef std::pair<SymbolRef, const AllocationState*> AllocationPair;
+ typedef llvm::SmallVector<AllocationPair, 2> AllocationPairVec;
+
+ enum APIKind {
+ /// Denotes functions tracked by this checker.
+ ValidAPI = 0,
+ /// The functions commonly/mistakenly used in place of the given API.
+ ErrorAPI = 1,
+ /// The functions which may allocate the data. These are tracked to reduce
+ /// the false alarm rate.
+ PossibleAPI = 2
+ };
+ /// Stores the information about the allocator and deallocator functions -
+ /// these are the functions the checker is tracking.
+ struct ADFunctionInfo {
+ const char* Name;
+ unsigned int Param;
+ unsigned int DeallocatorIdx;
+ APIKind Kind;
+ };
+ static const unsigned InvalidIdx = 100000;
+ static const unsigned FunctionsToTrackSize = 8;
+ static const ADFunctionInfo FunctionsToTrack[FunctionsToTrackSize];
+ /// The value, which represents no error return value for allocator functions.
+ static const unsigned NoErr = 0;
+
+ /// Given the function name, returns the index of the allocator/deallocator
+ /// function.
+ static unsigned getTrackedFunctionIndex(StringRef Name, bool IsAllocator);
+
+ inline void initBugType() const {
+ if (!BT)
+ BT.reset(new BugType("Improper use of SecKeychain API", "Mac OS API"));
+ }
+
+ void generateDeallocatorMismatchReport(const AllocationPair &AP,
+ const Expr *ArgExpr,
+ CheckerContext &C) const;
+
+ /// Find the allocation site for Sym on the path leading to the node N.
+ const Stmt *getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const;
+
+ BugReport *generateAllocatedDataNotReleasedReport(const AllocationPair &AP,
+ ExplodedNode *N,
+ CheckerContext &C) const;
+
+ /// Check if RetSym evaluates to an error value in the current state.
+ bool definitelyReturnedError(SymbolRef RetSym,
+ ProgramStateRef State,
+ SValBuilder &Builder,
+ bool noError = false) const;
+
+ /// Check if RetSym evaluates to a NoErr value in the current state.
+ bool definitelyDidnotReturnError(SymbolRef RetSym,
+ ProgramStateRef State,
+ SValBuilder &Builder) const {
+ return definitelyReturnedError(RetSym, State, Builder, true);
+ }
+
+ /// Mark an AllocationPair interesting for diagnostic reporting.
+ void markInteresting(BugReport *R, const AllocationPair &AP) const {
+ R->markInteresting(AP.first);
+ R->markInteresting(AP.second->Region);
+ }
+
+ /// The bug visitor which allows us to print extra diagnostics along the
+ /// BugReport path. For example, showing the allocation site of the leaked
+ /// region.
+ class SecKeychainBugVisitor
+ : public BugReporterVisitorImpl<SecKeychainBugVisitor> {
+ protected:
+ // The allocated region symbol tracked by the main analysis.
+ SymbolRef Sym;
+
+ public:
+ SecKeychainBugVisitor(SymbolRef S) : Sym(S) {}
+ virtual ~SecKeychainBugVisitor() {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Sym);
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR);
+ };
+};
+}
+
+/// ProgramState traits to store the currently allocated (and not yet freed)
+/// symbols. This is a map from the allocated content symbol to the
+/// corresponding AllocationState.
+typedef llvm::ImmutableMap<SymbolRef,
+ MacOSKeychainAPIChecker::AllocationState> AllocatedSetTy;
+
+namespace { struct AllocatedData {}; }
+namespace clang { namespace ento {
+template<> struct ProgramStateTrait<AllocatedData>
+ : public ProgramStatePartialTrait<AllocatedSetTy > {
+ static void *GDMIndex() { static int index = 0; return &index; }
+};
+}}
+
+static bool isEnclosingFunctionParam(const Expr *E) {
+ E = E->IgnoreParenCasts();
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *VD = DRE->getDecl();
+ if (isa<ImplicitParamDecl>(VD) || isa<ParmVarDecl>(VD))
+ return true;
+ }
+ return false;
+}
+
+const MacOSKeychainAPIChecker::ADFunctionInfo
+ MacOSKeychainAPIChecker::FunctionsToTrack[FunctionsToTrackSize] = {
+ {"SecKeychainItemCopyContent", 4, 3, ValidAPI}, // 0
+ {"SecKeychainFindGenericPassword", 6, 3, ValidAPI}, // 1
+ {"SecKeychainFindInternetPassword", 13, 3, ValidAPI}, // 2
+ {"SecKeychainItemFreeContent", 1, InvalidIdx, ValidAPI}, // 3
+ {"SecKeychainItemCopyAttributesAndData", 5, 5, ValidAPI}, // 4
+ {"SecKeychainItemFreeAttributesAndData", 1, InvalidIdx, ValidAPI}, // 5
+ {"free", 0, InvalidIdx, ErrorAPI}, // 6
+ {"CFStringCreateWithBytesNoCopy", 1, InvalidIdx, PossibleAPI}, // 7
+};
+
+unsigned MacOSKeychainAPIChecker::getTrackedFunctionIndex(StringRef Name,
+ bool IsAllocator) {
+ for (unsigned I = 0; I < FunctionsToTrackSize; ++I) {
+ ADFunctionInfo FI = FunctionsToTrack[I];
+ if (FI.Name != Name)
+ continue;
+ // Make sure the function is of the right type (allocator vs deallocator).
+ if (IsAllocator && (FI.DeallocatorIdx == InvalidIdx))
+ return InvalidIdx;
+ if (!IsAllocator && (FI.DeallocatorIdx != InvalidIdx))
+ return InvalidIdx;
+
+ return I;
+ }
+ // The function is not tracked.
+ return InvalidIdx;
+}
+
+static bool isBadDeallocationArgument(const MemRegion *Arg) {
+ if (!Arg)
+ return false;
+ if (isa<AllocaRegion>(Arg) ||
+ isa<BlockDataRegion>(Arg) ||
+ isa<TypedRegion>(Arg)) {
+ return true;
+ }
+ return false;
+}
+
+/// Given the address expression, retrieve the value it's pointing to. Assume
+/// that value is itself an address, and return the corresponding symbol.
+static SymbolRef getAsPointeeSymbol(const Expr *Expr,
+ CheckerContext &C) {
+ ProgramStateRef State = C.getState();
+ SVal ArgV = State->getSVal(Expr, C.getLocationContext());
+
+ if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&ArgV)) {
+ StoreManager& SM = C.getStoreManager();
+ SymbolRef sym = SM.getBinding(State->getStore(), *X).getAsLocSymbol();
+ if (sym)
+ return sym;
+ }
+ return 0;
+}
+
+// When checking for error code, we need to consider the following cases:
+// 1) noErr / [0]
+// 2) someErr / [1, inf]
+// 3) unknown
+// If noError, returns true iff (1).
+// If !noError, returns true iff (2).
+bool MacOSKeychainAPIChecker::definitelyReturnedError(SymbolRef RetSym,
+ ProgramStateRef State,
+ SValBuilder &Builder,
+ bool noError) const {
+ DefinedOrUnknownSVal NoErrVal = Builder.makeIntVal(NoErr,
+ Builder.getSymbolManager().getType(RetSym));
+ DefinedOrUnknownSVal NoErr = Builder.evalEQ(State, NoErrVal,
+ nonloc::SymbolVal(RetSym));
+ ProgramStateRef ErrState = State->assume(NoErr, noError);
+ if (ErrState == State) {
+ return true;
+ }
+
+ return false;
+}
+
+// Report deallocator mismatch. Remove the region from tracking - reporting a
+// missing free error after this one is redundant.
+void MacOSKeychainAPIChecker::
+ generateDeallocatorMismatchReport(const AllocationPair &AP,
+ const Expr *ArgExpr,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ State = State->remove<AllocatedData>(AP.first);
+ ExplodedNode *N = C.addTransition(State);
+
+ if (!N)
+ return;
+ initBugType();
+ SmallString<80> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ unsigned int PDeallocIdx =
+ FunctionsToTrack[AP.second->AllocatorIdx].DeallocatorIdx;
+
+ os << "Deallocator doesn't match the allocator: '"
+ << FunctionsToTrack[PDeallocIdx].Name << "' should be used.";
+ BugReport *Report = new BugReport(*BT, os.str(), N);
+ Report->addVisitor(new SecKeychainBugVisitor(AP.first));
+ Report->addRange(ArgExpr->getSourceRange());
+ markInteresting(Report, AP);
+ C.EmitReport(Report);
+}
+
+void MacOSKeychainAPIChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ unsigned idx = InvalidIdx;
+ ProgramStateRef State = C.getState();
+
+ StringRef funName = C.getCalleeName(CE);
+ if (funName.empty())
+ return;
+
+ // If it is a call to an allocator function, it could be a double allocation.
+ idx = getTrackedFunctionIndex(funName, true);
+ if (idx != InvalidIdx) {
+ const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
+ if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C))
+ if (const AllocationState *AS = State->get<AllocatedData>(V)) {
+ if (!definitelyReturnedError(AS->Region, State, C.getSValBuilder())) {
+ // Remove the value from the state. The new symbol will be added for
+ // tracking when the second allocator is processed in checkPostStmt().
+ State = State->remove<AllocatedData>(V);
+ ExplodedNode *N = C.addTransition(State);
+ if (!N)
+ return;
+ initBugType();
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ unsigned int DIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
+ os << "Allocated data should be released before another call to "
+ << "the allocator: missing a call to '"
+ << FunctionsToTrack[DIdx].Name
+ << "'.";
+ BugReport *Report = new BugReport(*BT, os.str(), N);
+ Report->addVisitor(new SecKeychainBugVisitor(V));
+ Report->addRange(ArgExpr->getSourceRange());
+ Report->markInteresting(AS->Region);
+ C.EmitReport(Report);
+ }
+ }
+ return;
+ }
+
+ // Is it a call to one of deallocator functions?
+ idx = getTrackedFunctionIndex(funName, false);
+ if (idx == InvalidIdx)
+ return;
+
+ // Check the argument to the deallocator.
+ const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
+ SVal ArgSVal = State->getSVal(ArgExpr, C.getLocationContext());
+
+ // Undef is reported by another checker.
+ if (ArgSVal.isUndef())
+ return;
+
+ SymbolRef ArgSM = ArgSVal.getAsLocSymbol();
+
+ // If the argument is coming from the heap, globals, or unknown, do not
+ // report it.
+ bool RegionArgIsBad = false;
+ if (!ArgSM) {
+ if (!isBadDeallocationArgument(ArgSVal.getAsRegion()))
+ return;
+ RegionArgIsBad = true;
+ }
+
+ // Is the argument to the call being tracked?
+ const AllocationState *AS = State->get<AllocatedData>(ArgSM);
+ if (!AS && FunctionsToTrack[idx].Kind != ValidAPI) {
+ return;
+ }
+ // If trying to free data which has not been allocated yet, report as a bug.
+ // TODO: We might want a more precise diagnostic for double free
+ // (that would involve tracking all the freed symbols in the checker state).
+ if (!AS || RegionArgIsBad) {
+ // It is possible that this is a false positive - the argument might
+ // have entered as an enclosing function parameter.
+ if (isEnclosingFunctionParam(ArgExpr))
+ return;
+
+ ExplodedNode *N = C.addTransition(State);
+ if (!N)
+ return;
+ initBugType();
+ BugReport *Report = new BugReport(*BT,
+ "Trying to free data which has not been allocated.", N);
+ Report->addRange(ArgExpr->getSourceRange());
+ if (AS)
+ Report->markInteresting(AS->Region);
+ C.EmitReport(Report);
+ return;
+ }
+
+ // Process functions which might deallocate.
+ if (FunctionsToTrack[idx].Kind == PossibleAPI) {
+
+ if (funName == "CFStringCreateWithBytesNoCopy") {
+ const Expr *DeallocatorExpr = CE->getArg(5)->IgnoreParenCasts();
+ // NULL ~ default deallocator, so warn.
+ if (DeallocatorExpr->isNullPointerConstant(C.getASTContext(),
+ Expr::NPC_ValueDependentIsNotNull)) {
+ const AllocationPair AP = std::make_pair(ArgSM, AS);
+ generateDeallocatorMismatchReport(AP, ArgExpr, C);
+ return;
+ }
+ // One of the default allocators, so warn.
+ if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(DeallocatorExpr)) {
+ StringRef DeallocatorName = DE->getFoundDecl()->getName();
+ if (DeallocatorName == "kCFAllocatorDefault" ||
+ DeallocatorName == "kCFAllocatorSystemDefault" ||
+ DeallocatorName == "kCFAllocatorMalloc") {
+ const AllocationPair AP = std::make_pair(ArgSM, AS);
+ generateDeallocatorMismatchReport(AP, ArgExpr, C);
+ return;
+ }
+ // If kCFAllocatorNull, which does not deallocate, we still have to
+ // find the deallocator. Otherwise, assume that the user had written a
+ // custom deallocator which does the right thing.
+ if (DE->getFoundDecl()->getName() != "kCFAllocatorNull") {
+ State = State->remove<AllocatedData>(ArgSM);
+ C.addTransition(State);
+ return;
+ }
+ }
+ }
+ return;
+ }
+
+ // The call is deallocating a value we previously allocated, so remove it
+ // from the next state.
+ State = State->remove<AllocatedData>(ArgSM);
+
+ // Check if the proper deallocator is used.
+ unsigned int PDeallocIdx = FunctionsToTrack[AS->AllocatorIdx].DeallocatorIdx;
+ if (PDeallocIdx != idx || (FunctionsToTrack[idx].Kind == ErrorAPI)) {
+ const AllocationPair AP = std::make_pair(ArgSM, AS);
+ generateDeallocatorMismatchReport(AP, ArgExpr, C);
+ return;
+ }
+
+ // If the buffer can be null and the return status can be an error,
+ // report a bad call to free.
+ if (State->assume(cast<DefinedSVal>(ArgSVal), false) &&
+ !definitelyDidnotReturnError(AS->Region, State, C.getSValBuilder())) {
+ ExplodedNode *N = C.addTransition(State);
+ if (!N)
+ return;
+ initBugType();
+ BugReport *Report = new BugReport(*BT,
+ "Only call free if a valid (non-NULL) buffer was returned.", N);
+ Report->addVisitor(new SecKeychainBugVisitor(ArgSM));
+ Report->addRange(ArgExpr->getSourceRange());
+ Report->markInteresting(AS->Region);
+ C.EmitReport(Report);
+ return;
+ }
+
+ C.addTransition(State);
+}
+
+void MacOSKeychainAPIChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ StringRef funName = C.getCalleeName(CE);
+
+ // If a value has been allocated, add it to the set for tracking.
+ unsigned idx = getTrackedFunctionIndex(funName, true);
+ if (idx == InvalidIdx)
+ return;
+
+ const Expr *ArgExpr = CE->getArg(FunctionsToTrack[idx].Param);
+ // If the argument entered as an enclosing function parameter, skip it to
+ // avoid false positives.
+ if (isEnclosingFunctionParam(ArgExpr) &&
+ C.getLocationContext()->getParent() == 0)
+ return;
+
+ if (SymbolRef V = getAsPointeeSymbol(ArgExpr, C)) {
+ // If the argument points to something that's not a symbolic region, it
+ // can be:
+ // - unknown (cannot reason about it)
+ // - undefined (already reported by other checker)
+ // - constant (null - should not be tracked,
+ // other constant will generate a compiler warning)
+ // - goto (should be reported by other checker)
+
+ // The call return value symbol should stay alive for as long as the
+ // allocated value symbol, since our diagnostics depend on the value
+ // returned by the call. Ex: Data should only be freed if noErr was
+ // returned during allocation.)
+ SymbolRef RetStatusSymbol =
+ State->getSVal(CE, C.getLocationContext()).getAsSymbol();
+ C.getSymbolManager().addSymbolDependency(V, RetStatusSymbol);
+
+ // Track the allocated value in the checker state.
+ State = State->set<AllocatedData>(V, AllocationState(ArgExpr, idx,
+ RetStatusSymbol));
+ assert(State);
+ C.addTransition(State);
+ }
+}
+
+void MacOSKeychainAPIChecker::checkPreStmt(const ReturnStmt *S,
+ CheckerContext &C) const {
+ const Expr *retExpr = S->getRetValue();
+ if (!retExpr)
+ return;
+
+ // If inside inlined call, skip it.
+ const LocationContext *LC = C.getLocationContext();
+ if (LC->getParent() != 0)
+ return;
+
+ // Check if the value is escaping through the return.
+ ProgramStateRef state = C.getState();
+ SymbolRef sym = state->getSVal(retExpr, LC).getAsLocSymbol();
+ if (!sym)
+ return;
+ state = state->remove<AllocatedData>(sym);
+
+ // Proceed from the new state.
+ C.addTransition(state);
+}
+
+// TODO: This logic is the same as in Malloc checker.
+const Stmt *
+MacOSKeychainAPIChecker::getAllocationSite(const ExplodedNode *N,
+ SymbolRef Sym,
+ CheckerContext &C) const {
+ const LocationContext *LeakContext = N->getLocationContext();
+ // Walk the ExplodedGraph backwards and find the first node that referred to
+ // the tracked symbol.
+ const ExplodedNode *AllocNode = N;
+
+ while (N) {
+ if (!N->getState()->get<AllocatedData>(Sym))
+ break;
+ // Allocation node, is the last node in the current context in which the
+ // symbol was tracked.
+ if (N->getLocationContext() == LeakContext)
+ AllocNode = N;
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+ }
+
+ ProgramPoint P = AllocNode->getLocation();
+ if (!isa<StmtPoint>(P))
+ return 0;
+ return cast<clang::PostStmt>(P).getStmt();
+}
+
+BugReport *MacOSKeychainAPIChecker::
+ generateAllocatedDataNotReleasedReport(const AllocationPair &AP,
+ ExplodedNode *N,
+ CheckerContext &C) const {
+ const ADFunctionInfo &FI = FunctionsToTrack[AP.second->AllocatorIdx];
+ initBugType();
+ SmallString<70> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << "Allocated data is not released: missing a call to '"
+ << FunctionsToTrack[FI.DeallocatorIdx].Name << "'.";
+
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path.
+ PathDiagnosticLocation LocUsedForUniqueing;
+ if (const Stmt *AllocStmt = getAllocationSite(N, AP.first, C))
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocStmt,
+ C.getSourceManager(), N->getLocationContext());
+
+ BugReport *Report = new BugReport(*BT, os.str(), N, LocUsedForUniqueing);
+ Report->addVisitor(new SecKeychainBugVisitor(AP.first));
+ markInteresting(Report, AP);
+ return Report;
+}
+
+void MacOSKeychainAPIChecker::checkDeadSymbols(SymbolReaper &SR,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ AllocatedSetTy ASet = State->get<AllocatedData>();
+ if (ASet.isEmpty())
+ return;
+
+ bool Changed = false;
+ AllocationPairVec Errors;
+ for (AllocatedSetTy::iterator I = ASet.begin(), E = ASet.end(); I != E; ++I) {
+ if (SR.isLive(I->first))
+ continue;
+
+ Changed = true;
+ State = State->remove<AllocatedData>(I->first);
+ // If the allocated symbol is null or if the allocation call might have
+ // returned an error, do not report.
+ if (State->getSymVal(I->first) ||
+ definitelyReturnedError(I->second.Region, State, C.getSValBuilder()))
+ continue;
+ Errors.push_back(std::make_pair(I->first, &I->second));
+ }
+ if (!Changed) {
+ // Generate the new, cleaned up state.
+ C.addTransition(State);
+ return;
+ }
+
+ static SimpleProgramPointTag Tag("MacOSKeychainAPIChecker : DeadSymbolsLeak");
+ ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+
+ // Generate the error reports.
+ for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end();
+ I != E; ++I) {
+ C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
+ }
+
+ // Generate the new, cleaned up state.
+ C.addTransition(State, N);
+}
+
+// TODO: Remove this after we ensure that checkDeadSymbols are always called.
+void MacOSKeychainAPIChecker::checkEndPath(CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ // If inside inlined call, skip it.
+ if (C.getLocationContext()->getParent() != 0)
+ return;
+
+ AllocatedSetTy AS = state->get<AllocatedData>();
+ if (AS.isEmpty())
+ return;
+
+ // Anything which has been allocated but not freed (nor escaped) will be
+ // found here, so report it.
+ bool Changed = false;
+ AllocationPairVec Errors;
+ for (AllocatedSetTy::iterator I = AS.begin(), E = AS.end(); I != E; ++I ) {
+ Changed = true;
+ state = state->remove<AllocatedData>(I->first);
+ // If the allocated symbol is null or if error code was returned at
+ // allocation, do not report.
+ if (state->getSymVal(I.getKey()) ||
+ definitelyReturnedError(I->second.Region, state,
+ C.getSValBuilder())) {
+ continue;
+ }
+ Errors.push_back(std::make_pair(I->first, &I->second));
+ }
+
+ // If no change, do not generate a new state.
+ if (!Changed) {
+ C.addTransition(state);
+ return;
+ }
+
+ static SimpleProgramPointTag Tag("MacOSKeychainAPIChecker : EndPathLeak");
+ ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+
+ // Generate the error reports.
+ for (AllocationPairVec::iterator I = Errors.begin(), E = Errors.end();
+ I != E; ++I) {
+ C.EmitReport(generateAllocatedDataNotReleasedReport(*I, N, C));
+ }
+
+ C.addTransition(state, N);
+}
+
+
+PathDiagnosticPiece *MacOSKeychainAPIChecker::SecKeychainBugVisitor::VisitNode(
+ const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ const AllocationState *AS = N->getState()->get<AllocatedData>(Sym);
+ if (!AS)
+ return 0;
+ const AllocationState *ASPrev = PrevN->getState()->get<AllocatedData>(Sym);
+ if (ASPrev)
+ return 0;
+
+ // (!ASPrev && AS) ~ We started tracking symbol in node N, it must be the
+ // allocation site.
+ const CallExpr *CE = cast<CallExpr>(cast<StmtPoint>(N->getLocation())
+ .getStmt());
+ const FunctionDecl *funDecl = CE->getDirectCallee();
+ assert(funDecl && "We do not support indirect function calls as of now.");
+ StringRef funName = funDecl->getName();
+
+ // Get the expression of the corresponding argument.
+ unsigned Idx = getTrackedFunctionIndex(funName, true);
+ assert(Idx != InvalidIdx && "This should be a call to an allocator.");
+ const Expr *ArgExpr = CE->getArg(FunctionsToTrack[Idx].Param);
+ PathDiagnosticLocation Pos(ArgExpr, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, "Data is allocated here.");
+}
+
+void ento::registerMacOSKeychainAPIChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MacOSKeychainAPIChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
new file mode 100644
index 0000000..cfdb55d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MacOSXAPIChecker.cpp
@@ -0,0 +1,116 @@
+// MacOSXAPIChecker.h - Checks proper use of various MacOS X APIs --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines MacOSXAPIChecker, which is an assortment of checks on calls
+// to various, widely used Mac OS X functions.
+//
+// FIXME: What's currently in BasicObjCFoundationChecks.cpp should be migrated
+// to here, using the new Checker interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class MacOSXAPIChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable OwningPtr<BugType> BT_dispatchOnce;
+
+public:
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+ StringRef FName) const;
+
+ typedef void (MacOSXAPIChecker::*SubChecker)(CheckerContext &,
+ const CallExpr *,
+ StringRef FName) const;
+};
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// dispatch_once and dispatch_once_f
+//===----------------------------------------------------------------------===//
+
+void MacOSXAPIChecker::CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+ StringRef FName) const {
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // Check if the first argument is stack allocated. If so, issue a warning
+ // because that's likely to be bad news.
+ ProgramStateRef state = C.getState();
+ const MemRegion *R =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
+ if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+ return;
+
+ ExplodedNode *N = C.generateSink(state);
+ if (!N)
+ return;
+
+ if (!BT_dispatchOnce)
+ BT_dispatchOnce.reset(new BugType("Improper use of 'dispatch_once'",
+ "Mac OS X API"));
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to '" << FName << "' uses";
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+ os << " the local variable '" << VR->getDecl()->getName() << '\'';
+ else
+ os << " stack allocated memory";
+ os << " for the predicate value. Using such transient memory for "
+ "the predicate is potentially dangerous.";
+ if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+ os << " Perhaps you intended to declare the variable as 'static'?";
+
+ BugReport *report = new BugReport(*BT_dispatchOnce, os.str(), N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+void MacOSXAPIChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty())
+ return;
+
+ SubChecker SC =
+ llvm::StringSwitch<SubChecker>(Name)
+ .Cases("dispatch_once", "dispatch_once_f",
+ &MacOSXAPIChecker::CheckDispatchOnce)
+ .Default(NULL);
+
+ if (SC)
+ (this->*SC)(C, CE, Name);
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerMacOSXAPIChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MacOSXAPIChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
new file mode 100644
index 0000000..8bce88a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -0,0 +1,1463 @@
+//=== MallocChecker.cpp - A malloc/free checker -------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines malloc/free checker, which checks for potential memory
+// leaks, double free, and use-after-free problems.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "InterCheckerAPI.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include <climits>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class RefState {
+ enum Kind { AllocateUnchecked, AllocateFailed, Released, Escaped,
+ Relinquished } K;
+ const Stmt *S;
+
+public:
+ RefState(Kind k, const Stmt *s) : K(k), S(s) {}
+
+ bool isAllocated() const { return K == AllocateUnchecked; }
+ bool isReleased() const { return K == Released; }
+
+ const Stmt *getStmt() const { return S; }
+
+ bool operator==(const RefState &X) const {
+ return K == X.K && S == X.S;
+ }
+
+ static RefState getAllocateUnchecked(const Stmt *s) {
+ return RefState(AllocateUnchecked, s);
+ }
+ static RefState getAllocateFailed() {
+ return RefState(AllocateFailed, 0);
+ }
+ static RefState getReleased(const Stmt *s) { return RefState(Released, s); }
+ static RefState getEscaped(const Stmt *s) { return RefState(Escaped, s); }
+ static RefState getRelinquished(const Stmt *s) {
+ return RefState(Relinquished, s);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ ID.AddPointer(S);
+ }
+};
+
+struct ReallocPair {
+ SymbolRef ReallocatedSym;
+ bool IsFreeOnFailure;
+ ReallocPair(SymbolRef S, bool F) : ReallocatedSym(S), IsFreeOnFailure(F) {}
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(IsFreeOnFailure);
+ ID.AddPointer(ReallocatedSym);
+ }
+ bool operator==(const ReallocPair &X) const {
+ return ReallocatedSym == X.ReallocatedSym &&
+ IsFreeOnFailure == X.IsFreeOnFailure;
+ }
+};
+
+typedef std::pair<const Stmt*, const MemRegion*> LeakInfo;
+
+class MallocChecker : public Checker<check::DeadSymbols,
+ check::EndPath,
+ check::PreStmt<ReturnStmt>,
+ check::PreStmt<CallExpr>,
+ check::PostStmt<CallExpr>,
+ check::PostStmt<BlockExpr>,
+ check::Location,
+ check::Bind,
+ eval::Assume,
+ check::RegionChanges>
+{
+ mutable OwningPtr<BugType> BT_DoubleFree;
+ mutable OwningPtr<BugType> BT_Leak;
+ mutable OwningPtr<BugType> BT_UseFree;
+ mutable OwningPtr<BugType> BT_BadFree;
+ mutable IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc,
+ *II_valloc, *II_reallocf, *II_strndup, *II_strdup;
+
+public:
+ MallocChecker() : II_malloc(0), II_free(0), II_realloc(0), II_calloc(0),
+ II_valloc(0), II_reallocf(0), II_strndup(0), II_strdup(0) {}
+
+ /// In pessimistic mode, the checker assumes that it does not know which
+ /// functions might free the memory.
+ struct ChecksFilter {
+ DefaultBool CMallocPessimistic;
+ DefaultBool CMallocOptimistic;
+ };
+
+ ChecksFilter Filter;
+
+ void checkPreStmt(const CallExpr *S, CheckerContext &C) const;
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ void checkEndPath(CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
+ bool Assumption) const;
+ void checkLocation(SVal l, bool isLoad, const Stmt *S,
+ CheckerContext &C) const;
+ void checkBind(SVal location, SVal val, const Stmt*S,
+ CheckerContext &C) const;
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const;
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const {
+ return true;
+ }
+
+private:
+ void initIdentifierInfo(ASTContext &C) const;
+
+ /// Check if this is one of the functions which can allocate/reallocate memory
+ /// pointed to by one of its arguments.
+ bool isMemFunction(const FunctionDecl *FD, ASTContext &C) const;
+
+ static ProgramStateRef MallocMemReturnsAttr(CheckerContext &C,
+ const CallExpr *CE,
+ const OwnershipAttr* Att);
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ const Expr *SizeEx, SVal Init,
+ ProgramStateRef state) {
+ return MallocMemAux(C, CE,
+ state->getSVal(SizeEx, C.getLocationContext()),
+ Init, state);
+ }
+
+ static ProgramStateRef MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ SVal SizeEx, SVal Init,
+ ProgramStateRef state);
+
+ /// Update the RefState to reflect the new memory allocation.
+ static ProgramStateRef MallocUpdateRefState(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef state);
+
+ ProgramStateRef FreeMemAttr(CheckerContext &C, const CallExpr *CE,
+ const OwnershipAttr* Att) const;
+ ProgramStateRef FreeMemAux(CheckerContext &C, const CallExpr *CE,
+ ProgramStateRef state, unsigned Num,
+ bool Hold) const;
+
+ ProgramStateRef ReallocMem(CheckerContext &C, const CallExpr *CE,
+ bool FreesMemOnFailure) const;
+ static ProgramStateRef CallocMem(CheckerContext &C, const CallExpr *CE);
+
+ bool checkEscape(SymbolRef Sym, const Stmt *S, CheckerContext &C) const;
+ bool checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
+ const Stmt *S = 0) const;
+
+ /// Check if the function is not known to us. So, for example, we could
+ /// conservatively assume it can free/reallocate it's pointer arguments.
+ bool doesNotFreeMemory(const CallOrObjCMessage *Call,
+ ProgramStateRef State) const;
+
+ static bool SummarizeValue(raw_ostream &os, SVal V);
+ static bool SummarizeRegion(raw_ostream &os, const MemRegion *MR);
+ void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange range) const;
+
+ /// Find the location of the allocation for Sym on the path leading to the
+ /// exploded node N.
+ LeakInfo getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const;
+
+ void reportLeak(SymbolRef Sym, ExplodedNode *N, CheckerContext &C) const;
+
+ /// The bug visitor which allows us to print extra diagnostics along the
+ /// BugReport path. For example, showing the allocation site of the leaked
+ /// region.
+ class MallocBugVisitor : public BugReporterVisitorImpl<MallocBugVisitor> {
+ protected:
+ enum NotificationMode {
+ Normal,
+ ReallocationFailed
+ };
+
+ // The allocated region symbol tracked by the main analysis.
+ SymbolRef Sym;
+
+ // The mode we are in, i.e. what kind of diagnostics will be emitted.
+ NotificationMode Mode;
+
+ // A symbol from when the primary region should have been reallocated.
+ SymbolRef FailedReallocSymbol;
+
+ public:
+ MallocBugVisitor(SymbolRef S)
+ : Sym(S), Mode(Normal), FailedReallocSymbol(0) {}
+
+ virtual ~MallocBugVisitor() {}
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int X = 0;
+ ID.AddPointer(&X);
+ ID.AddPointer(Sym);
+ }
+
+ inline bool isAllocated(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // Did not track -> allocated. Other state (released) -> allocated.
+ return (Stmt && isa<CallExpr>(Stmt) &&
+ (S && S->isAllocated()) && (!SPrev || !SPrev->isAllocated()));
+ }
+
+ inline bool isReleased(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // Did not track -> released. Other state (allocated) -> released.
+ return (Stmt && isa<CallExpr>(Stmt) &&
+ (S && S->isReleased()) && (!SPrev || !SPrev->isReleased()));
+ }
+
+ inline bool isReallocFailedCheck(const RefState *S, const RefState *SPrev,
+ const Stmt *Stmt) {
+ // If the expression is not a call, and the state change is
+ // released -> allocated, it must be the realloc return value
+ // check. If we have to handle more cases here, it might be cleaner just
+ // to track this extra bit in the state itself.
+ return ((!Stmt || !isa<CallExpr>(Stmt)) &&
+ (S && S->isAllocated()) && (SPrev && !SPrev->isAllocated()));
+ }
+
+ PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR);
+ private:
+ class StackHintGeneratorForReallocationFailed
+ : public StackHintGeneratorForSymbol {
+ public:
+ StackHintGeneratorForReallocationFailed(SymbolRef S, StringRef M)
+ : StackHintGeneratorForSymbol(S, M) {}
+
+ virtual std::string getMessageForArg(const Expr *ArgE, unsigned ArgIndex) {
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Reallocation of ";
+ // Printed parameters start at 1, not 0.
+ printOrdinal(++ArgIndex, os);
+ os << " parameter failed";
+
+ return os.str();
+ }
+
+ virtual std::string getMessageForReturn(const CallExpr *CallExpr) {
+ return "Reallocation of returned value failed";
+ }
+ };
+ };
+};
+} // end anonymous namespace
+
+typedef llvm::ImmutableMap<SymbolRef, RefState> RegionStateTy;
+typedef llvm::ImmutableMap<SymbolRef, ReallocPair > ReallocMap;
+class RegionState {};
+class ReallocPairs {};
+namespace clang {
+namespace ento {
+ template <>
+ struct ProgramStateTrait<RegionState>
+ : public ProgramStatePartialTrait<RegionStateTy> {
+ static void *GDMIndex() { static int x; return &x; }
+ };
+
+ template <>
+ struct ProgramStateTrait<ReallocPairs>
+ : public ProgramStatePartialTrait<ReallocMap> {
+ static void *GDMIndex() { static int x; return &x; }
+ };
+}
+}
+
+namespace {
+class StopTrackingCallback : public SymbolVisitor {
+ ProgramStateRef state;
+public:
+ StopTrackingCallback(ProgramStateRef st) : state(st) {}
+ ProgramStateRef getState() const { return state; }
+
+ bool VisitSymbol(SymbolRef sym) {
+ state = state->remove<RegionState>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+void MallocChecker::initIdentifierInfo(ASTContext &Ctx) const {
+ if (!II_malloc)
+ II_malloc = &Ctx.Idents.get("malloc");
+ if (!II_free)
+ II_free = &Ctx.Idents.get("free");
+ if (!II_realloc)
+ II_realloc = &Ctx.Idents.get("realloc");
+ if (!II_reallocf)
+ II_reallocf = &Ctx.Idents.get("reallocf");
+ if (!II_calloc)
+ II_calloc = &Ctx.Idents.get("calloc");
+ if (!II_valloc)
+ II_valloc = &Ctx.Idents.get("valloc");
+ if (!II_strdup)
+ II_strdup = &Ctx.Idents.get("strdup");
+ if (!II_strndup)
+ II_strndup = &Ctx.Idents.get("strndup");
+}
+
+bool MallocChecker::isMemFunction(const FunctionDecl *FD, ASTContext &C) const {
+ if (!FD)
+ return false;
+ IdentifierInfo *FunI = FD->getIdentifier();
+ if (!FunI)
+ return false;
+
+ initIdentifierInfo(C);
+
+ if (FunI == II_malloc || FunI == II_free || FunI == II_realloc ||
+ FunI == II_reallocf || FunI == II_calloc || FunI == II_valloc ||
+ FunI == II_strdup || FunI == II_strndup)
+ return true;
+
+ if (Filter.CMallocOptimistic && FD->hasAttrs() &&
+ FD->specific_attr_begin<OwnershipAttr>() !=
+ FD->specific_attr_end<OwnershipAttr>())
+ return true;
+
+
+ return false;
+}
+
+void MallocChecker::checkPostStmt(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return;
+
+ initIdentifierInfo(C.getASTContext());
+ IdentifierInfo *FunI = FD->getIdentifier();
+ if (!FunI)
+ return;
+
+ ProgramStateRef State = C.getState();
+ if (FunI == II_malloc || FunI == II_valloc) {
+ if (CE->getNumArgs() < 1)
+ return;
+ State = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(), State);
+ } else if (FunI == II_realloc) {
+ State = ReallocMem(C, CE, false);
+ } else if (FunI == II_reallocf) {
+ State = ReallocMem(C, CE, true);
+ } else if (FunI == II_calloc) {
+ State = CallocMem(C, CE);
+ } else if (FunI == II_free) {
+ State = FreeMemAux(C, CE, C.getState(), 0, false);
+ } else if (FunI == II_strdup) {
+ State = MallocUpdateRefState(C, CE, State);
+ } else if (FunI == II_strndup) {
+ State = MallocUpdateRefState(C, CE, State);
+ } else if (Filter.CMallocOptimistic) {
+ // Check all the attributes, if there are any.
+ // There can be multiple of these attributes.
+ if (FD->hasAttrs())
+ for (specific_attr_iterator<OwnershipAttr>
+ i = FD->specific_attr_begin<OwnershipAttr>(),
+ e = FD->specific_attr_end<OwnershipAttr>();
+ i != e; ++i) {
+ switch ((*i)->getOwnKind()) {
+ case OwnershipAttr::Returns:
+ State = MallocMemReturnsAttr(C, CE, *i);
+ break;
+ case OwnershipAttr::Takes:
+ case OwnershipAttr::Holds:
+ State = FreeMemAttr(C, CE, *i);
+ break;
+ }
+ }
+ }
+ C.addTransition(State);
+}
+
+ProgramStateRef MallocChecker::MallocMemReturnsAttr(CheckerContext &C,
+ const CallExpr *CE,
+ const OwnershipAttr* Att) {
+ if (Att->getModule() != "malloc")
+ return 0;
+
+ OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
+ if (I != E) {
+ return MallocMemAux(C, CE, CE->getArg(*I), UndefinedVal(), C.getState());
+ }
+ return MallocMemAux(C, CE, UnknownVal(), UndefinedVal(), C.getState());
+}
+
+ProgramStateRef MallocChecker::MallocMemAux(CheckerContext &C,
+ const CallExpr *CE,
+ SVal Size, SVal Init,
+ ProgramStateRef state) {
+ // Get the return value.
+ SVal retVal = state->getSVal(CE, C.getLocationContext());
+
+ // We expect the malloc functions to return a pointer.
+ if (!isa<Loc>(retVal))
+ return 0;
+
+ // Fill the region with the initialization value.
+ state = state->bindDefault(retVal, Init);
+
+ // Set the region's extent equal to the Size parameter.
+ const SymbolicRegion *R =
+ dyn_cast_or_null<SymbolicRegion>(retVal.getAsRegion());
+ if (!R)
+ return 0;
+ if (isa<DefinedOrUnknownSVal>(Size)) {
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ DefinedOrUnknownSVal Extent = R->getExtent(svalBuilder);
+ DefinedOrUnknownSVal DefinedSize = cast<DefinedOrUnknownSVal>(Size);
+ DefinedOrUnknownSVal extentMatchesSize =
+ svalBuilder.evalEQ(state, Extent, DefinedSize);
+
+ state = state->assume(extentMatchesSize, true);
+ assert(state);
+ }
+
+ return MallocUpdateRefState(C, CE, state);
+}
+
+ProgramStateRef MallocChecker::MallocUpdateRefState(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef state) {
+ // Get the return value.
+ SVal retVal = state->getSVal(CE, C.getLocationContext());
+
+ // We expect the malloc functions to return a pointer.
+ if (!isa<Loc>(retVal))
+ return 0;
+
+ SymbolRef Sym = retVal.getAsLocSymbol();
+ assert(Sym);
+
+ // Set the symbol's state to Allocated.
+ return state->set<RegionState>(Sym, RefState::getAllocateUnchecked(CE));
+
+}
+
+ProgramStateRef MallocChecker::FreeMemAttr(CheckerContext &C,
+ const CallExpr *CE,
+ const OwnershipAttr* Att) const {
+ if (Att->getModule() != "malloc")
+ return 0;
+
+ ProgramStateRef State = C.getState();
+
+ for (OwnershipAttr::args_iterator I = Att->args_begin(), E = Att->args_end();
+ I != E; ++I) {
+ ProgramStateRef StateI = FreeMemAux(C, CE, State, *I,
+ Att->getOwnKind() == OwnershipAttr::Holds);
+ if (StateI)
+ State = StateI;
+ }
+ return State;
+}
+
+ProgramStateRef MallocChecker::FreeMemAux(CheckerContext &C,
+ const CallExpr *CE,
+ ProgramStateRef state,
+ unsigned Num,
+ bool Hold) const {
+ if (CE->getNumArgs() < (Num + 1))
+ return 0;
+
+ const Expr *ArgExpr = CE->getArg(Num);
+ SVal ArgVal = state->getSVal(ArgExpr, C.getLocationContext());
+ if (!isa<DefinedOrUnknownSVal>(ArgVal))
+ return 0;
+ DefinedOrUnknownSVal location = cast<DefinedOrUnknownSVal>(ArgVal);
+
+ // Check for null dereferences.
+ if (!isa<Loc>(location))
+ return 0;
+
+ // The explicit NULL case, no operation is performed.
+ ProgramStateRef notNullState, nullState;
+ llvm::tie(notNullState, nullState) = state->assume(location);
+ if (nullState && !notNullState)
+ return 0;
+
+ // Unknown values could easily be okay
+ // Undefined values are handled elsewhere
+ if (ArgVal.isUnknownOrUndef())
+ return 0;
+
+ const MemRegion *R = ArgVal.getAsRegion();
+
+ // Nonlocs can't be freed, of course.
+ // Non-region locations (labels and fixed addresses) also shouldn't be freed.
+ if (!R) {
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return 0;
+ }
+
+ R = R->StripCasts();
+
+ // Blocks might show up as heap data, but should not be free()d
+ if (isa<BlockDataRegion>(R)) {
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return 0;
+ }
+
+ const MemSpaceRegion *MS = R->getMemorySpace();
+
+ // Parameters, locals, statics, and globals shouldn't be freed.
+ if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
+ // FIXME: at the time this code was written, malloc() regions were
+ // represented by conjured symbols, which are all in UnknownSpaceRegion.
+ // This means that there isn't actually anything from HeapSpaceRegion
+ // that should be freed, even though we allow it here.
+ // Of course, free() can work on memory allocated outside the current
+ // function, so UnknownSpaceRegion is always a possibility.
+ // False negatives are better than false positives.
+
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return 0;
+ }
+
+ const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
+ // Various cases could lead to non-symbol values here.
+ // For now, ignore them.
+ if (!SR)
+ return 0;
+
+ SymbolRef Sym = SR->getSymbol();
+ const RefState *RS = state->get<RegionState>(Sym);
+
+ // If the symbol has not been tracked, return. This is possible when free() is
+ // called on a pointer that does not get its pointee directly from malloc().
+ // Full support of this requires inter-procedural analysis.
+ if (!RS)
+ return 0;
+
+ // Check double free.
+ if (RS->isReleased()) {
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_DoubleFree)
+ BT_DoubleFree.reset(
+ new BugType("Double free", "Memory Error"));
+ BugReport *R = new BugReport(*BT_DoubleFree,
+ "Attempt to free released memory", N);
+ R->addRange(ArgExpr->getSourceRange());
+ R->markInteresting(Sym);
+ R->addVisitor(new MallocBugVisitor(Sym));
+ C.EmitReport(R);
+ }
+ return 0;
+ }
+
+ // Normal free.
+ if (Hold)
+ return state->set<RegionState>(Sym, RefState::getRelinquished(CE));
+ return state->set<RegionState>(Sym, RefState::getReleased(CE));
+}
+
+bool MallocChecker::SummarizeValue(raw_ostream &os, SVal V) {
+ if (nonloc::ConcreteInt *IntVal = dyn_cast<nonloc::ConcreteInt>(&V))
+ os << "an integer (" << IntVal->getValue() << ")";
+ else if (loc::ConcreteInt *ConstAddr = dyn_cast<loc::ConcreteInt>(&V))
+ os << "a constant address (" << ConstAddr->getValue() << ")";
+ else if (loc::GotoLabel *Label = dyn_cast<loc::GotoLabel>(&V))
+ os << "the address of the label '" << Label->getLabel()->getName() << "'";
+ else
+ return false;
+
+ return true;
+}
+
+bool MallocChecker::SummarizeRegion(raw_ostream &os,
+ const MemRegion *MR) {
+ switch (MR->getKind()) {
+ case MemRegion::FunctionTextRegionKind: {
+ const FunctionDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+ if (FD)
+ os << "the address of the function '" << *FD << '\'';
+ else
+ os << "the address of a function";
+ return true;
+ }
+ case MemRegion::BlockTextRegionKind:
+ os << "block text";
+ return true;
+ case MemRegion::BlockDataRegionKind:
+ // FIXME: where the block came from?
+ os << "a block";
+ return true;
+ default: {
+ const MemSpaceRegion *MS = MR->getMemorySpace();
+
+ if (isa<StackLocalsSpaceRegion>(MS)) {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD)
+ os << "the address of the local variable '" << VD->getName() << "'";
+ else
+ os << "the address of a local stack variable";
+ return true;
+ }
+
+ if (isa<StackArgumentsSpaceRegion>(MS)) {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD)
+ os << "the address of the parameter '" << VD->getName() << "'";
+ else
+ os << "the address of a parameter";
+ return true;
+ }
+
+ if (isa<GlobalsSpaceRegion>(MS)) {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD) {
+ if (VD->isStaticLocal())
+ os << "the address of the static variable '" << VD->getName() << "'";
+ else
+ os << "the address of the global variable '" << VD->getName() << "'";
+ } else
+ os << "the address of a global variable";
+ return true;
+ }
+
+ return false;
+ }
+ }
+}
+
+void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
+ SourceRange range) const {
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_BadFree)
+ BT_BadFree.reset(new BugType("Bad free", "Memory Error"));
+
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ const MemRegion *MR = ArgVal.getAsRegion();
+ if (MR) {
+ while (const ElementRegion *ER = dyn_cast<ElementRegion>(MR))
+ MR = ER->getSuperRegion();
+
+ // Special case for alloca()
+ if (isa<AllocaRegion>(MR))
+ os << "Argument to free() was allocated by alloca(), not malloc()";
+ else {
+ os << "Argument to free() is ";
+ if (SummarizeRegion(os, MR))
+ os << ", which is not memory allocated by malloc()";
+ else
+ os << "not memory allocated by malloc()";
+ }
+ } else {
+ os << "Argument to free() is ";
+ if (SummarizeValue(os, ArgVal))
+ os << ", which is not memory allocated by malloc()";
+ else
+ os << "not memory allocated by malloc()";
+ }
+
+ BugReport *R = new BugReport(*BT_BadFree, os.str(), N);
+ R->markInteresting(MR);
+ R->addRange(range);
+ C.EmitReport(R);
+ }
+}
+
+ProgramStateRef MallocChecker::ReallocMem(CheckerContext &C,
+ const CallExpr *CE,
+ bool FreesOnFail) const {
+ if (CE->getNumArgs() < 2)
+ return 0;
+
+ ProgramStateRef state = C.getState();
+ const Expr *arg0Expr = CE->getArg(0);
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal Arg0Val = state->getSVal(arg0Expr, LCtx);
+ if (!isa<DefinedOrUnknownSVal>(Arg0Val))
+ return 0;
+ DefinedOrUnknownSVal arg0Val = cast<DefinedOrUnknownSVal>(Arg0Val);
+
+ SValBuilder &svalBuilder = C.getSValBuilder();
+
+ DefinedOrUnknownSVal PtrEQ =
+ svalBuilder.evalEQ(state, arg0Val, svalBuilder.makeNull());
+
+ // Get the size argument. If there is no size arg then give up.
+ const Expr *Arg1 = CE->getArg(1);
+ if (!Arg1)
+ return 0;
+
+ // Get the value of the size argument.
+ SVal Arg1ValG = state->getSVal(Arg1, LCtx);
+ if (!isa<DefinedOrUnknownSVal>(Arg1ValG))
+ return 0;
+ DefinedOrUnknownSVal Arg1Val = cast<DefinedOrUnknownSVal>(Arg1ValG);
+
+ // Compare the size argument to 0.
+ DefinedOrUnknownSVal SizeZero =
+ svalBuilder.evalEQ(state, Arg1Val,
+ svalBuilder.makeIntValWithPtrWidth(0, false));
+
+ ProgramStateRef StatePtrIsNull, StatePtrNotNull;
+ llvm::tie(StatePtrIsNull, StatePtrNotNull) = state->assume(PtrEQ);
+ ProgramStateRef StateSizeIsZero, StateSizeNotZero;
+ llvm::tie(StateSizeIsZero, StateSizeNotZero) = state->assume(SizeZero);
+ // We only assume exceptional states if they are definitely true; if the
+ // state is under-constrained, assume regular realloc behavior.
+ bool PrtIsNull = StatePtrIsNull && !StatePtrNotNull;
+ bool SizeIsZero = StateSizeIsZero && !StateSizeNotZero;
+
+ // If the ptr is NULL and the size is not 0, the call is equivalent to
+ // malloc(size).
+ if ( PrtIsNull && !SizeIsZero) {
+ ProgramStateRef stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
+ UndefinedVal(), StatePtrIsNull);
+ return stateMalloc;
+ }
+
+ if (PrtIsNull && SizeIsZero)
+ return 0;
+
+ // Get the from and to pointer symbols as in toPtr = realloc(fromPtr, size).
+ assert(!PrtIsNull);
+ SymbolRef FromPtr = arg0Val.getAsSymbol();
+ SVal RetVal = state->getSVal(CE, LCtx);
+ SymbolRef ToPtr = RetVal.getAsSymbol();
+ if (!FromPtr || !ToPtr)
+ return 0;
+
+ // If the size is 0, free the memory.
+ if (SizeIsZero)
+ if (ProgramStateRef stateFree = FreeMemAux(C, CE, StateSizeIsZero,0,false)){
+ // The semantics of the return value are:
+ // If size was equal to 0, either NULL or a pointer suitable to be passed
+ // to free() is returned.
+ stateFree = stateFree->set<ReallocPairs>(ToPtr,
+ ReallocPair(FromPtr, FreesOnFail));
+ C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
+ return stateFree;
+ }
+
+ // Default behavior.
+ if (ProgramStateRef stateFree = FreeMemAux(C, CE, state, 0, false)) {
+ // FIXME: We should copy the content of the original buffer.
+ ProgramStateRef stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
+ UnknownVal(), stateFree);
+ if (!stateRealloc)
+ return 0;
+ stateRealloc = stateRealloc->set<ReallocPairs>(ToPtr,
+ ReallocPair(FromPtr, FreesOnFail));
+ C.getSymbolManager().addSymbolDependency(ToPtr, FromPtr);
+ return stateRealloc;
+ }
+ return 0;
+}
+
+ProgramStateRef MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE){
+ if (CE->getNumArgs() < 2)
+ return 0;
+
+ ProgramStateRef state = C.getState();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal count = state->getSVal(CE->getArg(0), LCtx);
+ SVal elementSize = state->getSVal(CE->getArg(1), LCtx);
+ SVal TotalSize = svalBuilder.evalBinOp(state, BO_Mul, count, elementSize,
+ svalBuilder.getContext().getSizeType());
+ SVal zeroVal = svalBuilder.makeZeroVal(svalBuilder.getContext().CharTy);
+
+ return MallocMemAux(C, CE, TotalSize, zeroVal, state);
+}
+
+LeakInfo
+MallocChecker::getAllocationSite(const ExplodedNode *N, SymbolRef Sym,
+ CheckerContext &C) const {
+ const LocationContext *LeakContext = N->getLocationContext();
+ // Walk the ExplodedGraph backwards and find the first node that referred to
+ // the tracked symbol.
+ const ExplodedNode *AllocNode = N;
+ const MemRegion *ReferenceRegion = 0;
+
+ while (N) {
+ ProgramStateRef State = N->getState();
+ if (!State->get<RegionState>(Sym))
+ break;
+
+ // Find the most recent expression bound to the symbol in the current
+ // context.
+ if (!ReferenceRegion) {
+ if (const MemRegion *MR = C.getLocationRegionIfPostStore(N)) {
+ SVal Val = State->getSVal(MR);
+ if (Val.getAsLocSymbol() == Sym)
+ ReferenceRegion = MR;
+ }
+ }
+
+ // Allocation node, is the last node in the current context in which the
+ // symbol was tracked.
+ if (N->getLocationContext() == LeakContext)
+ AllocNode = N;
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+ }
+
+ ProgramPoint P = AllocNode->getLocation();
+ const Stmt *AllocationStmt = 0;
+ if (isa<StmtPoint>(P))
+ AllocationStmt = cast<StmtPoint>(P).getStmt();
+
+ return LeakInfo(AllocationStmt, ReferenceRegion);
+}
+
+void MallocChecker::reportLeak(SymbolRef Sym, ExplodedNode *N,
+ CheckerContext &C) const {
+ assert(N);
+ if (!BT_Leak) {
+ BT_Leak.reset(new BugType("Memory leak", "Memory Error"));
+ // Leaks should not be reported if they are post-dominated by a sink:
+ // (1) Sinks are higher importance bugs.
+ // (2) NoReturnFunctionChecker uses sink nodes to represent paths ending
+ // with __noreturn functions such as assert() or exit(). We choose not
+ // to report leaks on such paths.
+ BT_Leak->setSuppressOnSink(true);
+ }
+
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path.
+ PathDiagnosticLocation LocUsedForUniqueing;
+ const Stmt *AllocStmt = 0;
+ const MemRegion *Region = 0;
+ llvm::tie(AllocStmt, Region) = getAllocationSite(N, Sym, C);
+ if (AllocStmt)
+ LocUsedForUniqueing = PathDiagnosticLocation::createBegin(AllocStmt,
+ C.getSourceManager(), N->getLocationContext());
+
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Memory is never released; potential leak";
+ if (Region) {
+ os << " of memory pointed to by '";
+ Region->dumpPretty(os);
+ os <<'\'';
+ }
+
+ BugReport *R = new BugReport(*BT_Leak, os.str(), N, LocUsedForUniqueing);
+ R->markInteresting(Sym);
+ R->addVisitor(new MallocBugVisitor(Sym));
+ C.EmitReport(R);
+}
+
+void MallocChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const
+{
+ if (!SymReaper.hasDeadSymbols())
+ return;
+
+ ProgramStateRef state = C.getState();
+ RegionStateTy RS = state->get<RegionState>();
+ RegionStateTy::Factory &F = state->get_context<RegionState>();
+
+ bool generateReport = false;
+ llvm::SmallVector<SymbolRef, 2> Errors;
+ for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ if (SymReaper.isDead(I->first)) {
+ if (I->second.isAllocated()) {
+ generateReport = true;
+ Errors.push_back(I->first);
+ }
+ // Remove the dead symbol from the map.
+ RS = F.remove(RS, I->first);
+
+ }
+ }
+
+ // Cleanup the Realloc Pairs Map.
+ ReallocMap RP = state->get<ReallocPairs>();
+ for (ReallocMap::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ if (SymReaper.isDead(I->first) ||
+ SymReaper.isDead(I->second.ReallocatedSym)) {
+ state = state->remove<ReallocPairs>(I->first);
+ }
+ }
+
+ // Generate leak node.
+ static SimpleProgramPointTag Tag("MallocChecker : DeadSymbolsLeak");
+ ExplodedNode *N = C.addTransition(C.getState(), C.getPredecessor(), &Tag);
+
+ if (generateReport) {
+ for (llvm::SmallVector<SymbolRef, 2>::iterator
+ I = Errors.begin(), E = Errors.end(); I != E; ++I) {
+ reportLeak(*I, N, C);
+ }
+ }
+ C.addTransition(state->set<RegionState>(RS), N);
+}
+
+void MallocChecker::checkEndPath(CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ RegionStateTy M = state->get<RegionState>();
+
+ // If inside inlined call, skip it.
+ if (C.getLocationContext()->getParent() != 0)
+ return;
+
+ for (RegionStateTy::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ RefState RS = I->second;
+ if (RS.isAllocated()) {
+ ExplodedNode *N = C.addTransition(state);
+ if (N)
+ reportLeak(I->first, N, C);
+ }
+ }
+}
+
+bool MallocChecker::checkEscape(SymbolRef Sym, const Stmt *S,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const RefState *RS = state->get<RegionState>(Sym);
+ if (!RS)
+ return false;
+
+ if (RS->isAllocated()) {
+ state = state->set<RegionState>(Sym, RefState::getEscaped(S));
+ C.addTransition(state);
+ return true;
+ }
+ return false;
+}
+
+void MallocChecker::checkPreStmt(const CallExpr *CE, CheckerContext &C) const {
+ if (isMemFunction(C.getCalleeDecl(CE), C.getASTContext()))
+ return;
+
+ // Check use after free, when a freed pointer is passed to a call.
+ ProgramStateRef State = C.getState();
+ for (CallExpr::const_arg_iterator I = CE->arg_begin(),
+ E = CE->arg_end(); I != E; ++I) {
+ const Expr *A = *I;
+ if (A->getType().getTypePtr()->isAnyPointerType()) {
+ SymbolRef Sym = State->getSVal(A, C.getLocationContext()).getAsSymbol();
+ if (!Sym)
+ continue;
+ if (checkUseAfterFree(Sym, C, A))
+ return;
+ }
+ }
+}
+
+void MallocChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
+ const Expr *E = S->getRetValue();
+ if (!E)
+ return;
+
+ // Check if we are returning a symbol.
+ SVal RetVal = C.getState()->getSVal(E, C.getLocationContext());
+ SymbolRef Sym = RetVal.getAsSymbol();
+ if (!Sym)
+ // If we are returning a field of the allocated struct or an array element,
+ // the callee could still free the memory.
+ // TODO: This logic should be a part of generic symbol escape callback.
+ if (const MemRegion *MR = RetVal.getAsRegion())
+ if (isa<FieldRegion>(MR) || isa<ElementRegion>(MR))
+ if (const SymbolicRegion *BMR =
+ dyn_cast<SymbolicRegion>(MR->getBaseRegion()))
+ Sym = BMR->getSymbol();
+ if (!Sym)
+ return;
+
+ // Check if we are returning freed memory.
+ if (checkUseAfterFree(Sym, C, E))
+ return;
+
+ // If this function body is not inlined, check if the symbol is escaping.
+ if (C.getLocationContext()->getParent() == 0)
+ checkEscape(Sym, E, C);
+}
+
+// TODO: Blocks should be either inlined or should call invalidate regions
+// upon invocation. After that's in place, special casing here will not be
+// needed.
+void MallocChecker::checkPostStmt(const BlockExpr *BE,
+ CheckerContext &C) const {
+
+ // Scan the BlockDecRefExprs for any object the retain count checker
+ // may be tracking.
+ if (!BE->getBlockDecl()->hasCaptures())
+ return;
+
+ ProgramStateRef state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ if (I == E)
+ return;
+
+ SmallVector<const MemRegion*, 10> Regions;
+ const LocationContext *LC = C.getLocationContext();
+ MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
+
+ for ( ; I != E; ++I) {
+ const VarRegion *VR = *I;
+ if (VR->getSuperRegion() == R) {
+ VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+ }
+ Regions.push_back(VR);
+ }
+
+ state =
+ state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
+ Regions.data() + Regions.size()).getState();
+ C.addTransition(state);
+}
+
+bool MallocChecker::checkUseAfterFree(SymbolRef Sym, CheckerContext &C,
+ const Stmt *S) const {
+ assert(Sym);
+ const RefState *RS = C.getState()->get<RegionState>(Sym);
+ if (RS && RS->isReleased()) {
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_UseFree)
+ BT_UseFree.reset(new BugType("Use-after-free", "Memory Error"));
+
+ BugReport *R = new BugReport(*BT_UseFree,
+ "Use of memory after it is freed",N);
+ if (S)
+ R->addRange(S->getSourceRange());
+ R->markInteresting(Sym);
+ R->addVisitor(new MallocBugVisitor(Sym));
+ C.EmitReport(R);
+ return true;
+ }
+ }
+ return false;
+}
+
+// Check if the location is a freed symbolic region.
+void MallocChecker::checkLocation(SVal l, bool isLoad, const Stmt *S,
+ CheckerContext &C) const {
+ SymbolRef Sym = l.getLocSymbolInBase();
+ if (Sym)
+ checkUseAfterFree(Sym, C);
+}
+
+//===----------------------------------------------------------------------===//
+// Check various ways a symbol can be invalidated.
+// TODO: This logic (the next 3 functions) is copied/similar to the
+// RetainRelease checker. We might want to factor this out.
+//===----------------------------------------------------------------------===//
+
+// Stop tracking symbols when a value escapes as a result of checkBind.
+// A value escapes in three possible cases:
+// (1) we are binding to something that is not a memory region.
+// (2) we are binding to a memregion that does not have stack storage
+// (3) we are binding to a memregion with stack storage that the store
+// does not understand.
+void MallocChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = true;
+ ProgramStateRef state = C.getState();
+
+ if (loc::MemRegionVal *regionLoc = dyn_cast<loc::MemRegionVal>(&loc)) {
+ escapes = !regionLoc->getRegion()->hasStackStorage();
+
+ if (!escapes) {
+ // To test (3), generate a new state with the binding added. If it is
+ // the same state, then it escapes (since the store cannot represent
+ // the binding).
+ escapes = (state == (state->bindLoc(*regionLoc, val)));
+ }
+ if (!escapes) {
+ // Case 4: We do not currently model what happens when a symbol is
+ // assigned to a struct field, so be conservative here and let the symbol
+ // go. TODO: This could definitely be improved upon.
+ escapes = !isa<VarRegion>(regionLoc->getRegion());
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return and have the simulation
+ // state continue as is.
+ if (!escapes)
+ return;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
+ C.addTransition(state);
+}
+
+// If a symbolic region is assumed to NULL (or another constant), stop tracking
+// it - assuming that allocation failed on this path.
+ProgramStateRef MallocChecker::evalAssume(ProgramStateRef state,
+ SVal Cond,
+ bool Assumption) const {
+ RegionStateTy RS = state->get<RegionState>();
+ for (RegionStateTy::iterator I = RS.begin(), E = RS.end(); I != E; ++I) {
+ // If the symbol is assumed to NULL or another constant, this will
+ // return an APSInt*.
+ if (state->getSymVal(I.getKey()))
+ state = state->remove<RegionState>(I.getKey());
+ }
+
+ // Realloc returns 0 when reallocation fails, which means that we should
+ // restore the state of the pointer being reallocated.
+ ReallocMap RP = state->get<ReallocPairs>();
+ for (ReallocMap::iterator I = RP.begin(), E = RP.end(); I != E; ++I) {
+ // If the symbol is assumed to NULL or another constant, this will
+ // return an APSInt*.
+ if (state->getSymVal(I.getKey())) {
+ SymbolRef ReallocSym = I.getData().ReallocatedSym;
+ const RefState *RS = state->get<RegionState>(ReallocSym);
+ if (RS) {
+ if (RS->isReleased() && ! I.getData().IsFreeOnFailure)
+ state = state->set<RegionState>(ReallocSym,
+ RefState::getAllocateUnchecked(RS->getStmt()));
+ }
+ state = state->remove<ReallocPairs>(I.getKey());
+ }
+ }
+
+ return state;
+}
+
+// Check if the function is known to us. So, for example, we could
+// conservatively assume it can free/reallocate it's pointer arguments.
+// (We assume that the pointers cannot escape through calls to system
+// functions not handled by this checker.)
+bool MallocChecker::doesNotFreeMemory(const CallOrObjCMessage *Call,
+ ProgramStateRef State) const {
+ if (!Call)
+ return false;
+
+ // For now, assume that any C++ call can free memory.
+ // TODO: If we want to be more optimistic here, we'll need to make sure that
+ // regions escape to C++ containers. They seem to do that even now, but for
+ // mysterious reasons.
+ if (Call->isCXXCall())
+ return false;
+
+ const Decl *D = Call->getDecl();
+ if (!D)
+ return false;
+
+ ASTContext &ASTC = State->getStateManager().getContext();
+
+ // If it's one of the allocation functions we can reason about, we model
+ // its behavior explicitly.
+ if (isa<FunctionDecl>(D) && isMemFunction(cast<FunctionDecl>(D), ASTC)) {
+ return true;
+ }
+
+ // If it's not a system call, assume it frees memory.
+ SourceManager &SM = ASTC.getSourceManager();
+ if (!SM.isInSystemHeader(D->getLocation()))
+ return false;
+
+ // Process C/ObjC functions.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // White list the system functions whose arguments escape.
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return true;
+ StringRef FName = II->getName();
+
+ // White list thread local storage.
+ if (FName.equals("pthread_setspecific"))
+ return false;
+
+ // White list the 'XXXNoCopy' ObjC functions.
+ if (FName.endswith("NoCopy")) {
+ // Look for the deallocator argument. We know that the memory ownership
+ // is not transfered only if the deallocator argument is
+ // 'kCFAllocatorNull'.
+ for (unsigned i = 1; i < Call->getNumArgs(); ++i) {
+ const Expr *ArgE = Call->getArg(i)->IgnoreParenCasts();
+ if (const DeclRefExpr *DE = dyn_cast<DeclRefExpr>(ArgE)) {
+ StringRef DeallocatorName = DE->getFoundDecl()->getName();
+ if (DeallocatorName == "kCFAllocatorNull")
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // PR12101
+ // Many CoreFoundation and CoreGraphics might allow a tracked object
+ // to escape.
+ if (Call->isCFCGAllowingEscape(FName))
+ return false;
+
+ // Associating streams with malloced buffers. The pointer can escape if
+ // 'closefn' is specified (and if that function does free memory).
+ // Currently, we do not inspect the 'closefn' function (PR12101).
+ if (FName == "funopen")
+ if (Call->getNumArgs() >= 4 && !Call->getArgSVal(4).isConstant(0))
+ return false;
+
+ // Do not warn on pointers passed to 'setbuf' when used with std streams,
+ // these leaks might be intentional when setting the buffer for stdio.
+ // http://stackoverflow.com/questions/2671151/who-frees-setvbuf-buffer
+ if (FName == "setbuf" || FName =="setbuffer" ||
+ FName == "setlinebuf" || FName == "setvbuf") {
+ if (Call->getNumArgs() >= 1)
+ if (const DeclRefExpr *Arg =
+ dyn_cast<DeclRefExpr>(Call->getArg(0)->IgnoreParenCasts()))
+ if (const VarDecl *D = dyn_cast<VarDecl>(Arg->getDecl()))
+ if (D->getCanonicalDecl()->getName().find("std")
+ != StringRef::npos)
+ return false;
+ }
+
+ // A bunch of other functions, which take ownership of a pointer (See retain
+ // release checker). Not all the parameters here are invalidated, but the
+ // Malloc checker cannot differentiate between them. The right way of doing
+ // this would be to implement a pointer escapes callback.
+ if (FName == "CVPixelBufferCreateWithBytes" ||
+ FName == "CGBitmapContextCreateWithData" ||
+ FName == "CVPixelBufferCreateWithPlanarBytes" ||
+ FName == "OSAtomicEnqueue") {
+ return false;
+ }
+
+ // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove.
+ if (FName.startswith("NS") && (FName.find("Insert") != StringRef::npos))
+ return false;
+
+ // Otherwise, assume that the function does not free memory.
+ // Most system calls, do not free the memory.
+ return true;
+
+ // Process ObjC functions.
+ } else if (const ObjCMethodDecl * ObjCD = dyn_cast<ObjCMethodDecl>(D)) {
+ Selector S = ObjCD->getSelector();
+
+ // White list the ObjC functions which do free memory.
+ // - Anything containing 'freeWhenDone' param set to 1.
+ // Ex: dataWithBytesNoCopy:length:freeWhenDone.
+ for (unsigned i = 1; i < S.getNumArgs(); ++i) {
+ if (S.getNameForSlot(i).equals("freeWhenDone")) {
+ if (Call->getArgSVal(i).isConstant(1))
+ return false;
+ else
+ return true;
+ }
+ }
+
+ // If the first selector ends with NoCopy, assume that the ownership is
+ // transfered as well.
+ // Ex: [NSData dataWithBytesNoCopy:bytes length:10];
+ if (S.getNameForSlot(0).endswith("NoCopy")) {
+ return false;
+ }
+
+ // Otherwise, assume that the function does not free memory.
+ // Most system calls, do not free the memory.
+ return true;
+ }
+
+ // Otherwise, assume that the function can free memory.
+ return false;
+
+}
+
+// If the symbol we are tracking is invalidated, but not explicitly (ex: the &p
+// escapes, when we are tracking p), do not track the symbol as we cannot reason
+// about it anymore.
+ProgramStateRef
+MallocChecker::checkRegionChanges(ProgramStateRef State,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const {
+ if (!invalidated || invalidated->empty())
+ return State;
+ llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
+
+ // If it's a call which might free or reallocate memory, we assume that all
+ // regions (explicit and implicit) escaped.
+
+ // Otherwise, whitelist explicit pointers; we still can track them.
+ if (!Call || doesNotFreeMemory(Call, State)) {
+ for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+ E = ExplicitRegions.end(); I != E; ++I) {
+ if (const SymbolicRegion *R = (*I)->StripCasts()->getAs<SymbolicRegion>())
+ WhitelistedSymbols.insert(R->getSymbol());
+ }
+ }
+
+ for (StoreManager::InvalidatedSymbols::const_iterator I=invalidated->begin(),
+ E = invalidated->end(); I!=E; ++I) {
+ SymbolRef sym = *I;
+ if (WhitelistedSymbols.count(sym))
+ continue;
+ // The symbol escaped.
+ if (const RefState *RS = State->get<RegionState>(sym))
+ State = State->set<RegionState>(sym, RefState::getEscaped(RS->getStmt()));
+ }
+ return State;
+}
+
+static SymbolRef findFailedReallocSymbol(ProgramStateRef currState,
+ ProgramStateRef prevState) {
+ ReallocMap currMap = currState->get<ReallocPairs>();
+ ReallocMap prevMap = prevState->get<ReallocPairs>();
+
+ for (ReallocMap::iterator I = prevMap.begin(), E = prevMap.end();
+ I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (!currMap.lookup(sym))
+ return sym;
+ }
+
+ return NULL;
+}
+
+PathDiagnosticPiece *
+MallocChecker::MallocBugVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ ProgramStateRef state = N->getState();
+ ProgramStateRef statePrev = PrevN->getState();
+
+ const RefState *RS = state->get<RegionState>(Sym);
+ const RefState *RSPrev = statePrev->get<RegionState>(Sym);
+ if (!RS && !RSPrev)
+ return 0;
+
+ const Stmt *S = 0;
+ const char *Msg = 0;
+ StackHintGeneratorForSymbol *StackHint = 0;
+
+ // Retrieve the associated statement.
+ ProgramPoint ProgLoc = N->getLocation();
+ if (isa<StmtPoint>(ProgLoc))
+ S = cast<StmtPoint>(ProgLoc).getStmt();
+ // If an assumption was made on a branch, it should be caught
+ // here by looking at the state transition.
+ if (isa<BlockEdge>(ProgLoc)) {
+ const CFGBlock *srcBlk = cast<BlockEdge>(ProgLoc).getSrc();
+ S = srcBlk->getTerminator();
+ }
+ if (!S)
+ return 0;
+
+ // Find out if this is an interesting point and what is the kind.
+ if (Mode == Normal) {
+ if (isAllocated(RS, RSPrev, S)) {
+ Msg = "Memory is allocated";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returned allocated memory");
+ } else if (isReleased(RS, RSPrev, S)) {
+ Msg = "Memory is released";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returned released memory");
+ } else if (isReallocFailedCheck(RS, RSPrev, S)) {
+ Mode = ReallocationFailed;
+ Msg = "Reallocation failed";
+ StackHint = new StackHintGeneratorForReallocationFailed(Sym,
+ "Reallocation failed");
+
+ if (SymbolRef sym = findFailedReallocSymbol(state, statePrev)) {
+ // Is it possible to fail two reallocs WITHOUT testing in between?
+ assert((!FailedReallocSymbol || FailedReallocSymbol == sym) &&
+ "We only support one failed realloc at a time.");
+ BR.markInteresting(sym);
+ FailedReallocSymbol = sym;
+ }
+ }
+
+ // We are in a special mode if a reallocation failed later in the path.
+ } else if (Mode == ReallocationFailed) {
+ assert(FailedReallocSymbol && "No symbol to look for.");
+
+ // Is this is the first appearance of the reallocated symbol?
+ if (!statePrev->get<RegionState>(FailedReallocSymbol)) {
+ // If we ever hit this assert, that means BugReporter has decided to skip
+ // node pairs or visit them out of order.
+ assert(state->get<RegionState>(FailedReallocSymbol) &&
+ "Missed the reallocation point");
+
+ // We're at the reallocation point.
+ Msg = "Attempt to reallocate memory";
+ StackHint = new StackHintGeneratorForSymbol(Sym,
+ "Returned reallocated memory");
+ FailedReallocSymbol = NULL;
+ Mode = Normal;
+ }
+ }
+
+ if (!Msg)
+ return 0;
+ assert(StackHint);
+
+ // Generate the extra diagnostic.
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, Msg, true, StackHint);
+}
+
+
+#define REGISTER_CHECKER(name) \
+void ento::register##name(CheckerManager &mgr) {\
+ registerCStringCheckerBasic(mgr); \
+ mgr.registerChecker<MallocChecker>()->Filter.C##name = true;\
+}
+
+REGISTER_CHECKER(MallocPessimistic)
+REGISTER_CHECKER(MallocOptimistic)
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
new file mode 100644
index 0000000..daec418
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocOverflowSecurityChecker.cpp
@@ -0,0 +1,267 @@
+// MallocOverflowSecurityChecker.cpp - Check for malloc overflows -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker detects a common memory allocation security flaw.
+// Suppose 'unsigned int n' comes from an untrusted source. If the
+// code looks like 'malloc (n * 4)', and an attacker can make 'n' be
+// say MAX_UINT/4+2, then instead of allocating the correct 'n' 4-byte
+// elements, this will actually allocate only two because of overflow.
+// Then when the rest of the program attempts to store values past the
+// second element, these values will actually overwrite other items in
+// the heap, probably allowing the attacker to execute arbitrary code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/EvaluatedExprVisitor.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+struct MallocOverflowCheck {
+ const BinaryOperator *mulop;
+ const Expr *variable;
+
+ MallocOverflowCheck (const BinaryOperator *m, const Expr *v)
+ : mulop(m), variable (v)
+ {}
+};
+
+class MallocOverflowSecurityChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager &mgr,
+ BugReporter &BR) const;
+
+ void CheckMallocArgument(
+ llvm::SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const Expr *TheArgument, ASTContext &Context) const;
+
+ void OutputPossibleOverflows(
+ llvm::SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const Decl *D, BugReporter &BR, AnalysisManager &mgr) const;
+
+};
+} // end anonymous namespace
+
+void MallocOverflowSecurityChecker::CheckMallocArgument(
+ llvm::SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const Expr *TheArgument,
+ ASTContext &Context) const {
+
+ /* Look for a linear combination with a single variable, and at least
+ one multiplication.
+ Reject anything that applies to the variable: an explicit cast,
+ conditional expression, an operation that could reduce the range
+ of the result, or anything too complicated :-). */
+ const Expr * e = TheArgument;
+ const BinaryOperator * mulop = NULL;
+
+ for (;;) {
+ e = e->IgnoreParenImpCasts();
+ if (isa<BinaryOperator>(e)) {
+ const BinaryOperator * binop = dyn_cast<BinaryOperator>(e);
+ BinaryOperatorKind opc = binop->getOpcode();
+ // TODO: ignore multiplications by 1, reject if multiplied by 0.
+ if (mulop == NULL && opc == BO_Mul)
+ mulop = binop;
+ if (opc != BO_Mul && opc != BO_Add && opc != BO_Sub && opc != BO_Shl)
+ return;
+
+ const Expr *lhs = binop->getLHS();
+ const Expr *rhs = binop->getRHS();
+ if (rhs->isEvaluatable(Context))
+ e = lhs;
+ else if ((opc == BO_Add || opc == BO_Mul)
+ && lhs->isEvaluatable(Context))
+ e = rhs;
+ else
+ return;
+ }
+ else if (isa<DeclRefExpr>(e) || isa<MemberExpr>(e))
+ break;
+ else
+ return;
+ }
+
+ if (mulop == NULL)
+ return;
+
+ // We've found the right structure of malloc argument, now save
+ // the data so when the body of the function is completely available
+ // we can check for comparisons.
+
+ // TODO: Could push this into the innermost scope where 'e' is
+ // defined, rather than the whole function.
+ PossibleMallocOverflows.push_back(MallocOverflowCheck(mulop, e));
+}
+
+namespace {
+// A worker class for OutputPossibleOverflows.
+class CheckOverflowOps :
+ public EvaluatedExprVisitor<CheckOverflowOps> {
+public:
+ typedef llvm::SmallVectorImpl<MallocOverflowCheck> theVecType;
+
+private:
+ theVecType &toScanFor;
+ ASTContext &Context;
+
+ bool isIntZeroExpr(const Expr *E) const {
+ if (!E->getType()->isIntegralOrEnumerationType())
+ return false;
+ llvm::APSInt Result;
+ if (E->EvaluateAsInt(Result, Context))
+ return Result == 0;
+ return false;
+ }
+
+ void CheckExpr(const Expr *E_p) {
+ const Expr *E = E_p->IgnoreParenImpCasts();
+
+ theVecType::iterator i = toScanFor.end();
+ theVecType::iterator e = toScanFor.begin();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(E)) {
+ const Decl * EdreD = DR->getDecl();
+ while (i != e) {
+ --i;
+ if (const DeclRefExpr *DR_i = dyn_cast<DeclRefExpr>(i->variable)) {
+ if (DR_i->getDecl() == EdreD)
+ i = toScanFor.erase(i);
+ }
+ }
+ }
+ else if (isa<MemberExpr>(E)) {
+ // No points-to analysis, just look at the member
+ const Decl * EmeMD = dyn_cast<MemberExpr>(E)->getMemberDecl();
+ while (i != e) {
+ --i;
+ if (isa<MemberExpr>(i->variable)) {
+ if (dyn_cast<MemberExpr>(i->variable)->getMemberDecl() == EmeMD)
+ i = toScanFor.erase (i);
+ }
+ }
+ }
+ }
+
+ public:
+ void VisitBinaryOperator(BinaryOperator *E) {
+ if (E->isComparisonOp()) {
+ const Expr * lhs = E->getLHS();
+ const Expr * rhs = E->getRHS();
+ // Ignore comparisons against zero, since they generally don't
+ // protect against an overflow.
+ if (!isIntZeroExpr(lhs) && ! isIntZeroExpr(rhs)) {
+ CheckExpr(lhs);
+ CheckExpr(rhs);
+ }
+ }
+ EvaluatedExprVisitor<CheckOverflowOps>::VisitBinaryOperator(E);
+ }
+
+ /* We specifically ignore loop conditions, because they're typically
+ not error checks. */
+ void VisitWhileStmt(WhileStmt *S) {
+ return this->Visit(S->getBody());
+ }
+ void VisitForStmt(ForStmt *S) {
+ return this->Visit(S->getBody());
+ }
+ void VisitDoStmt(DoStmt *S) {
+ return this->Visit(S->getBody());
+ }
+
+ CheckOverflowOps(theVecType &v, ASTContext &ctx)
+ : EvaluatedExprVisitor<CheckOverflowOps>(ctx),
+ toScanFor(v), Context(ctx)
+ { }
+ };
+}
+
+// OutputPossibleOverflows - We've found a possible overflow earlier,
+// now check whether Body might contain a comparison which might be
+// preventing the overflow.
+// This doesn't do flow analysis, range analysis, or points-to analysis; it's
+// just a dumb "is there a comparison" scan. The aim here is to
+// detect the most blatent cases of overflow and educate the
+// programmer.
+void MallocOverflowSecurityChecker::OutputPossibleOverflows(
+ llvm::SmallVectorImpl<MallocOverflowCheck> &PossibleMallocOverflows,
+ const Decl *D, BugReporter &BR, AnalysisManager &mgr) const {
+ // By far the most common case: nothing to check.
+ if (PossibleMallocOverflows.empty())
+ return;
+
+ // Delete any possible overflows which have a comparison.
+ CheckOverflowOps c(PossibleMallocOverflows, BR.getContext());
+ c.Visit(mgr.getAnalysisDeclContext(D)->getBody());
+
+ // Output warnings for all overflows that are left.
+ for (CheckOverflowOps::theVecType::iterator
+ i = PossibleMallocOverflows.begin(),
+ e = PossibleMallocOverflows.end();
+ i != e;
+ ++i) {
+ SourceRange R = i->mulop->getSourceRange();
+ BR.EmitBasicReport(D, "malloc() size overflow", categories::UnixAPI,
+ "the computation of the size of the memory allocation may overflow",
+ PathDiagnosticLocation::createOperatorLoc(i->mulop,
+ BR.getSourceManager()), &R, 1);
+ }
+}
+
+void MallocOverflowSecurityChecker::checkASTCodeBody(const Decl *D,
+ AnalysisManager &mgr,
+ BugReporter &BR) const {
+
+ CFG *cfg = mgr.getCFG(D);
+ if (!cfg)
+ return;
+
+ // A list of variables referenced in possibly overflowing malloc operands.
+ llvm::SmallVector<MallocOverflowCheck, 2> PossibleMallocOverflows;
+
+ for (CFG::iterator it = cfg->begin(), ei = cfg->end(); it != ei; ++it) {
+ CFGBlock *block = *it;
+ for (CFGBlock::iterator bi = block->begin(), be = block->end();
+ bi != be; ++bi) {
+ if (const CFGStmt *CS = bi->getAs<CFGStmt>()) {
+ if (const CallExpr *TheCall = dyn_cast<CallExpr>(CS->getStmt())) {
+ // Get the callee.
+ const FunctionDecl *FD = TheCall->getDirectCallee();
+
+ if (!FD)
+ return;
+
+ // Get the name of the callee. If it's a builtin, strip off the prefix.
+ IdentifierInfo *FnInfo = FD->getIdentifier();
+ if (!FnInfo)
+ return;
+
+ if (FnInfo->isStr ("malloc") || FnInfo->isStr ("_MALLOC")) {
+ if (TheCall->getNumArgs() == 1)
+ CheckMallocArgument(PossibleMallocOverflows, TheCall->getArg(0),
+ mgr.getASTContext());
+ }
+ }
+ }
+ }
+ }
+
+ OutputPossibleOverflows(PossibleMallocOverflows, D, BR, mgr);
+}
+
+void ento::registerMallocOverflowSecurityChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MallocOverflowSecurityChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
new file mode 100644
index 0000000..08a9da1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -0,0 +1,211 @@
+// MallocSizeofChecker.cpp - Check for dubious malloc arguments ---*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Reports inconsistencies between the casted type of the return value of a
+// malloc/calloc/realloc call and the operand of any sizeof expressions
+// contained within its argument(s).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeLoc.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+typedef std::pair<const TypeSourceInfo *, const CallExpr *> TypeCallPair;
+typedef llvm::PointerUnion<const Stmt *, const VarDecl *> ExprParent;
+
+class CastedAllocFinder
+ : public ConstStmtVisitor<CastedAllocFinder, TypeCallPair> {
+ IdentifierInfo *II_malloc, *II_calloc, *II_realloc;
+
+public:
+ struct CallRecord {
+ ExprParent CastedExprParent;
+ const Expr *CastedExpr;
+ const TypeSourceInfo *ExplicitCastType;
+ const CallExpr *AllocCall;
+
+ CallRecord(ExprParent CastedExprParent, const Expr *CastedExpr,
+ const TypeSourceInfo *ExplicitCastType,
+ const CallExpr *AllocCall)
+ : CastedExprParent(CastedExprParent), CastedExpr(CastedExpr),
+ ExplicitCastType(ExplicitCastType), AllocCall(AllocCall) {}
+ };
+
+ typedef std::vector<CallRecord> CallVec;
+ CallVec Calls;
+
+ CastedAllocFinder(ASTContext *Ctx) :
+ II_malloc(&Ctx->Idents.get("malloc")),
+ II_calloc(&Ctx->Idents.get("calloc")),
+ II_realloc(&Ctx->Idents.get("realloc")) {}
+
+ void VisitChild(ExprParent Parent, const Stmt *S) {
+ TypeCallPair AllocCall = Visit(S);
+ if (AllocCall.second && AllocCall.second != S)
+ Calls.push_back(CallRecord(Parent, cast<Expr>(S), AllocCall.first,
+ AllocCall.second));
+ }
+
+ void VisitChildren(const Stmt *S) {
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I!=E; ++I)
+ if (const Stmt *child = *I)
+ VisitChild(S, child);
+ }
+
+ TypeCallPair VisitCastExpr(const CastExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ TypeCallPair VisitExplicitCastExpr(const ExplicitCastExpr *E) {
+ return TypeCallPair(E->getTypeInfoAsWritten(),
+ Visit(E->getSubExpr()).second);
+ }
+
+ TypeCallPair VisitParenExpr(const ParenExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ TypeCallPair VisitStmt(const Stmt *S) {
+ VisitChildren(S);
+ return TypeCallPair();
+ }
+
+ TypeCallPair VisitCallExpr(const CallExpr *E) {
+ VisitChildren(E);
+ const FunctionDecl *FD = E->getDirectCallee();
+ if (FD) {
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II == II_malloc || II == II_calloc || II == II_realloc)
+ return TypeCallPair((const TypeSourceInfo *)0, E);
+ }
+ return TypeCallPair();
+ }
+
+ TypeCallPair VisitDeclStmt(const DeclStmt *S) {
+ for (DeclStmt::const_decl_iterator I = S->decl_begin(), E = S->decl_end();
+ I!=E; ++I)
+ if (const VarDecl *VD = dyn_cast<VarDecl>(*I))
+ if (const Expr *Init = VD->getInit())
+ VisitChild(VD, Init);
+ return TypeCallPair();
+ }
+};
+
+class SizeofFinder : public ConstStmtVisitor<SizeofFinder> {
+public:
+ std::vector<const UnaryExprOrTypeTraitExpr *> Sizeofs;
+
+ void VisitBinMul(const BinaryOperator *E) {
+ Visit(E->getLHS());
+ Visit(E->getRHS());
+ }
+
+ void VisitBinAdd(const BinaryOperator *E) {
+ Visit(E->getLHS());
+ Visit(E->getRHS());
+ }
+
+ void VisitImplicitCastExpr(const ImplicitCastExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ void VisitParenExpr(const ParenExpr *E) {
+ return Visit(E->getSubExpr());
+ }
+
+ void VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *E) {
+ if (E->getKind() != UETT_SizeOf)
+ return;
+
+ Sizeofs.push_back(E);
+ }
+};
+
+class MallocSizeofChecker : public Checker<check::ASTCodeBody> {
+public:
+ void checkASTCodeBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ AnalysisDeclContext *ADC = mgr.getAnalysisDeclContext(D);
+ CastedAllocFinder Finder(&BR.getContext());
+ Finder.Visit(D->getBody());
+ for (CastedAllocFinder::CallVec::iterator i = Finder.Calls.begin(),
+ e = Finder.Calls.end(); i != e; ++i) {
+ QualType CastedType = i->CastedExpr->getType();
+ if (!CastedType->isPointerType())
+ continue;
+ QualType PointeeType = CastedType->getAs<PointerType>()->getPointeeType();
+ if (PointeeType->isVoidType())
+ continue;
+
+ for (CallExpr::const_arg_iterator ai = i->AllocCall->arg_begin(),
+ ae = i->AllocCall->arg_end(); ai != ae; ++ai) {
+ if (!(*ai)->getType()->isIntegerType())
+ continue;
+
+ SizeofFinder SFinder;
+ SFinder.Visit(*ai);
+ if (SFinder.Sizeofs.size() != 1)
+ continue;
+
+ QualType SizeofType = SFinder.Sizeofs[0]->getTypeOfArgument();
+ if (!BR.getContext().hasSameUnqualifiedType(PointeeType, SizeofType)) {
+ const TypeSourceInfo *TSI = 0;
+ if (i->CastedExprParent.is<const VarDecl *>()) {
+ TSI =
+ i->CastedExprParent.get<const VarDecl *>()->getTypeSourceInfo();
+ } else {
+ TSI = i->ExplicitCastType;
+ }
+
+ SmallString<64> buf;
+ llvm::raw_svector_ostream OS(buf);
+
+ OS << "Result of '"
+ << i->AllocCall->getDirectCallee()->getIdentifier()->getName()
+ << "' is converted to type '"
+ << CastedType.getAsString() << "', whose pointee type '"
+ << PointeeType.getAsString() << "' is incompatible with "
+ << "sizeof operand type '" << SizeofType.getAsString() << "'";
+ llvm::SmallVector<SourceRange, 4> Ranges;
+ Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
+ Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
+ if (TSI)
+ Ranges.push_back(TSI->getTypeLoc().getSourceRange());
+
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createBegin(i->AllocCall->getCallee(),
+ BR.getSourceManager(), ADC);
+
+ BR.EmitBasicReport(D, "allocator sizeof operand mismatch",
+ categories::UnixAPI,
+ OS.str(),
+ L, Ranges.data(), Ranges.size());
+ }
+ }
+ }
+ }
+};
+
+}
+
+void ento::registerMallocSizeofChecker(CheckerManager &mgr) {
+ mgr.registerChecker<MallocSizeofChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
new file mode 100644
index 0000000..4989ba8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSAutoreleasePoolChecker.cpp
@@ -0,0 +1,89 @@
+//=- NSAutoreleasePoolChecker.cpp --------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a NSAutoreleasePoolChecker, a small checker that warns
+// about subpar uses of NSAutoreleasePool. Note that while the check itself
+// (in its current form) could be written as a flow-insensitive check, in
+// can be potentially enhanced in the future with flow-sensitive information.
+// It is also a good example of the CheckerVisitor interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Decl.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class NSAutoreleasePoolChecker
+ : public Checker<check::PreObjCMessage> {
+ mutable OwningPtr<BugType> BT;
+ mutable Selector releaseS;
+
+public:
+ void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+void NSAutoreleasePoolChecker::checkPreObjCMessage(ObjCMessage msg,
+ CheckerContext &C) const {
+
+ const Expr *receiver = msg.getInstanceReceiver();
+ if (!receiver)
+ return;
+
+ // FIXME: Enhance with value-tracking information instead of consulting
+ // the type of the expression.
+ const ObjCObjectPointerType* PT =
+ receiver->getType()->getAs<ObjCObjectPointerType>();
+
+ if (!PT)
+ return;
+ const ObjCInterfaceDecl *OD = PT->getInterfaceDecl();
+ if (!OD)
+ return;
+ if (!OD->getIdentifier()->getName().equals("NSAutoreleasePool"))
+ return;
+
+ if (releaseS.isNull())
+ releaseS = GetNullarySelector("release", C.getASTContext());
+ // Sending 'release' message?
+ if (msg.getSelector() != releaseS)
+ return;
+
+ if (!BT)
+ BT.reset(new BugType("Use -drain instead of -release",
+ "API Upgrade (Apple)"));
+
+ ExplodedNode *N = C.addTransition();
+ if (!N) {
+ assert(0);
+ return;
+ }
+
+ BugReport *Report = new BugReport(*BT, "Use -drain instead of -release when "
+ "using NSAutoreleasePool and garbage collection", N);
+ Report->addRange(msg.getSourceRange());
+ C.EmitReport(Report);
+}
+
+void ento::registerNSAutoreleasePoolChecker(CheckerManager &mgr) {
+ if (mgr.getLangOpts().getGC() != LangOptions::NonGC)
+ mgr.registerChecker<NSAutoreleasePoolChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
new file mode 100644
index 0000000..f826573
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -0,0 +1,334 @@
+//=- NSErrorChecker.cpp - Coding conventions for uses of NSError -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckNSError, a flow-insenstive check
+// that determines if an Objective-C class interface correctly returns
+// a non-void return type.
+//
+// File under feature request PR 2600.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Decl.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool IsNSError(QualType T, IdentifierInfo *II);
+static bool IsCFError(QualType T, IdentifierInfo *II);
+
+//===----------------------------------------------------------------------===//
+// NSErrorMethodChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class NSErrorMethodChecker
+ : public Checker< check::ASTDecl<ObjCMethodDecl> > {
+ mutable IdentifierInfo *II;
+
+public:
+ NSErrorMethodChecker() : II(0) { }
+
+ void checkASTDecl(const ObjCMethodDecl *D,
+ AnalysisManager &mgr, BugReporter &BR) const;
+};
+}
+
+void NSErrorMethodChecker::checkASTDecl(const ObjCMethodDecl *D,
+ AnalysisManager &mgr,
+ BugReporter &BR) const {
+ if (!D->isThisDeclarationADefinition())
+ return;
+ if (!D->getResultType()->isVoidType())
+ return;
+
+ if (!II)
+ II = &D->getASTContext().Idents.get("NSError");
+
+ bool hasNSError = false;
+ for (ObjCMethodDecl::param_const_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I) {
+ if (IsNSError((*I)->getType(), II)) {
+ hasNSError = true;
+ break;
+ }
+ }
+
+ if (hasNSError) {
+ const char *err = "Method accepting NSError** "
+ "should have a non-void return value to indicate whether or not an "
+ "error occurred";
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(D, BR.getSourceManager());
+ BR.EmitBasicReport(D, "Bad return type when passing NSError**",
+ "Coding conventions (Apple)", err, L);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// CFErrorFunctionChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class CFErrorFunctionChecker
+ : public Checker< check::ASTDecl<FunctionDecl> > {
+ mutable IdentifierInfo *II;
+
+public:
+ CFErrorFunctionChecker() : II(0) { }
+
+ void checkASTDecl(const FunctionDecl *D,
+ AnalysisManager &mgr, BugReporter &BR) const;
+};
+}
+
+void CFErrorFunctionChecker::checkASTDecl(const FunctionDecl *D,
+ AnalysisManager &mgr,
+ BugReporter &BR) const {
+ if (!D->doesThisDeclarationHaveABody())
+ return;
+ if (!D->getResultType()->isVoidType())
+ return;
+
+ if (!II)
+ II = &D->getASTContext().Idents.get("CFErrorRef");
+
+ bool hasCFError = false;
+ for (FunctionDecl::param_const_iterator
+ I = D->param_begin(), E = D->param_end(); I != E; ++I) {
+ if (IsCFError((*I)->getType(), II)) {
+ hasCFError = true;
+ break;
+ }
+ }
+
+ if (hasCFError) {
+ const char *err = "Function accepting CFErrorRef* "
+ "should have a non-void return value to indicate whether or not an "
+ "error occurred";
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(D, BR.getSourceManager());
+ BR.EmitBasicReport(D, "Bad return type when passing CFErrorRef*",
+ "Coding conventions (Apple)", err, L);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// NSOrCFErrorDerefChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class NSErrorDerefBug : public BugType {
+public:
+ NSErrorDerefBug() : BugType("NSError** null dereference",
+ "Coding conventions (Apple)") {}
+};
+
+class CFErrorDerefBug : public BugType {
+public:
+ CFErrorDerefBug() : BugType("CFErrorRef* null dereference",
+ "Coding conventions (Apple)") {}
+};
+
+}
+
+namespace {
+class NSOrCFErrorDerefChecker
+ : public Checker< check::Location,
+ check::Event<ImplicitNullDerefEvent> > {
+ mutable IdentifierInfo *NSErrorII, *CFErrorII;
+public:
+ bool ShouldCheckNSError, ShouldCheckCFError;
+ NSOrCFErrorDerefChecker() : NSErrorII(0), CFErrorII(0),
+ ShouldCheckNSError(0), ShouldCheckCFError(0) { }
+
+ void checkLocation(SVal loc, bool isLoad, const Stmt *S,
+ CheckerContext &C) const;
+ void checkEvent(ImplicitNullDerefEvent event) const;
+};
+}
+
+namespace { struct NSErrorOut {}; }
+namespace { struct CFErrorOut {}; }
+
+typedef llvm::ImmutableMap<SymbolRef, unsigned> ErrorOutFlag;
+
+namespace clang {
+namespace ento {
+ template <>
+ struct ProgramStateTrait<NSErrorOut> : public ProgramStatePartialTrait<ErrorOutFlag> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+ };
+ template <>
+ struct ProgramStateTrait<CFErrorOut> : public ProgramStatePartialTrait<ErrorOutFlag> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+ };
+}
+}
+
+template <typename T>
+static bool hasFlag(SVal val, ProgramStateRef state) {
+ if (SymbolRef sym = val.getAsSymbol())
+ if (const unsigned *attachedFlags = state->get<T>(sym))
+ return *attachedFlags;
+ return false;
+}
+
+template <typename T>
+static void setFlag(ProgramStateRef state, SVal val, CheckerContext &C) {
+ // We tag the symbol that the SVal wraps.
+ if (SymbolRef sym = val.getAsSymbol())
+ C.addTransition(state->set<T>(sym, true));
+}
+
+static QualType parameterTypeFromSVal(SVal val, CheckerContext &C) {
+ const StackFrameContext *
+ SFC = C.getLocationContext()->getCurrentStackFrame();
+ if (const loc::MemRegionVal* X = dyn_cast<loc::MemRegionVal>(&val)) {
+ const MemRegion* R = X->getRegion();
+ if (const VarRegion *VR = R->getAs<VarRegion>())
+ if (const StackArgumentsSpaceRegion *
+ stackReg = dyn_cast<StackArgumentsSpaceRegion>(VR->getMemorySpace()))
+ if (stackReg->getStackFrame() == SFC)
+ return VR->getValueType();
+ }
+
+ return QualType();
+}
+
+void NSOrCFErrorDerefChecker::checkLocation(SVal loc, bool isLoad,
+ const Stmt *S,
+ CheckerContext &C) const {
+ if (!isLoad)
+ return;
+ if (loc.isUndef() || !isa<Loc>(loc))
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ ProgramStateRef state = C.getState();
+
+ // If we are loading from NSError**/CFErrorRef* parameter, mark the resulting
+ // SVal so that we can later check it when handling the
+ // ImplicitNullDerefEvent event.
+ // FIXME: Cumbersome! Maybe add hook at construction of SVals at start of
+ // function ?
+
+ QualType parmT = parameterTypeFromSVal(loc, C);
+ if (parmT.isNull())
+ return;
+
+ if (!NSErrorII)
+ NSErrorII = &Ctx.Idents.get("NSError");
+ if (!CFErrorII)
+ CFErrorII = &Ctx.Idents.get("CFErrorRef");
+
+ if (ShouldCheckNSError && IsNSError(parmT, NSErrorII)) {
+ setFlag<NSErrorOut>(state, state->getSVal(cast<Loc>(loc)), C);
+ return;
+ }
+
+ if (ShouldCheckCFError && IsCFError(parmT, CFErrorII)) {
+ setFlag<CFErrorOut>(state, state->getSVal(cast<Loc>(loc)), C);
+ return;
+ }
+}
+
+void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
+ if (event.IsLoad)
+ return;
+
+ SVal loc = event.Location;
+ ProgramStateRef state = event.SinkNode->getState();
+ BugReporter &BR = *event.BR;
+
+ bool isNSError = hasFlag<NSErrorOut>(loc, state);
+ bool isCFError = false;
+ if (!isNSError)
+ isCFError = hasFlag<CFErrorOut>(loc, state);
+
+ if (!(isNSError || isCFError))
+ return;
+
+ // Storing to possible null NSError/CFErrorRef out parameter.
+
+ // Emit an error.
+ std::string err;
+ llvm::raw_string_ostream os(err);
+ os << "Potential null dereference. According to coding standards ";
+
+ if (isNSError)
+ os << "in 'Creating and Returning NSError Objects' the parameter '";
+ else
+ os << "documented in CoreFoundation/CFError.h the parameter '";
+
+ os << "' may be null.";
+
+ BugType *bug = 0;
+ if (isNSError)
+ bug = new NSErrorDerefBug();
+ else
+ bug = new CFErrorDerefBug();
+ BugReport *report = new BugReport(*bug, os.str(),
+ event.SinkNode);
+ BR.EmitReport(report);
+}
+
+static bool IsNSError(QualType T, IdentifierInfo *II) {
+
+ const PointerType* PPT = T->getAs<PointerType>();
+ if (!PPT)
+ return false;
+
+ const ObjCObjectPointerType* PT =
+ PPT->getPointeeType()->getAs<ObjCObjectPointerType>();
+
+ if (!PT)
+ return false;
+
+ const ObjCInterfaceDecl *ID = PT->getInterfaceDecl();
+
+ // FIXME: Can ID ever be NULL?
+ if (ID)
+ return II == ID->getIdentifier();
+
+ return false;
+}
+
+static bool IsCFError(QualType T, IdentifierInfo *II) {
+ const PointerType* PPT = T->getAs<PointerType>();
+ if (!PPT) return false;
+
+ const TypedefType* TT = PPT->getPointeeType()->getAs<TypedefType>();
+ if (!TT) return false;
+
+ return TT->getDecl()->getIdentifier() == II;
+}
+
+void ento::registerNSErrorChecker(CheckerManager &mgr) {
+ mgr.registerChecker<NSErrorMethodChecker>();
+ NSOrCFErrorDerefChecker *
+ checker = mgr.registerChecker<NSOrCFErrorDerefChecker>();
+ checker->ShouldCheckNSError = true;
+}
+
+void ento::registerCFErrorChecker(CheckerManager &mgr) {
+ mgr.registerChecker<CFErrorFunctionChecker>();
+ NSOrCFErrorDerefChecker *
+ checker = mgr.registerChecker<NSOrCFErrorDerefChecker>();
+ checker->ShouldCheckCFError = true;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
new file mode 100644
index 0000000..c2d7c09
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/NoReturnFunctionChecker.cpp
@@ -0,0 +1,146 @@
+//=== NoReturnFunctionChecker.cpp -------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines NoReturnFunctionChecker, which evaluates functions that do not
+// return to the caller.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class NoReturnFunctionChecker : public Checker< check::PostStmt<CallExpr>,
+ check::PostObjCMessage > {
+public:
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMessage &msg, CheckerContext &C) const;
+};
+
+}
+
+void NoReturnFunctionChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const Expr *Callee = CE->getCallee();
+
+ bool BuildSinks = getFunctionExtInfo(Callee->getType()).getNoReturn();
+
+ if (!BuildSinks) {
+ SVal L = state->getSVal(Callee, C.getLocationContext());
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return;
+
+ if (FD->getAttr<AnalyzerNoReturnAttr>())
+ BuildSinks = true;
+ else if (const IdentifierInfo *II = FD->getIdentifier()) {
+ // HACK: Some functions are not marked noreturn, and don't return.
+ // Here are a few hardwired ones. If this takes too long, we can
+ // potentially cache these results.
+ BuildSinks
+ = llvm::StringSwitch<bool>(StringRef(II->getName()))
+ .Case("exit", true)
+ .Case("panic", true)
+ .Case("error", true)
+ .Case("Assert", true)
+ // FIXME: This is just a wrapper around throwing an exception.
+ // Eventually inter-procedural analysis should handle this easily.
+ .Case("ziperr", true)
+ .Case("assfail", true)
+ .Case("db_error", true)
+ .Case("__assert", true)
+ .Case("__assert_rtn", true)
+ .Case("__assert_fail", true)
+ .Case("dtrace_assfail", true)
+ .Case("yy_fatal_error", true)
+ .Case("_XCAssertionFailureHandler", true)
+ .Case("_DTAssertionFailureHandler", true)
+ .Case("_TSAssertionFailureHandler", true)
+ .Default(false);
+ }
+ }
+
+ if (BuildSinks)
+ C.generateSink();
+}
+
+static bool END_WITH_NULL isMultiArgSelector(const Selector *Sel, ...) {
+ va_list argp;
+ va_start(argp, Sel);
+
+ unsigned Slot = 0;
+ const char *Arg;
+ while ((Arg = va_arg(argp, const char *))) {
+ if (!Sel->getNameForSlot(Slot).equals(Arg))
+ break; // still need to va_end!
+ ++Slot;
+ }
+
+ va_end(argp);
+
+ // We only succeeded if we made it to the end of the argument list.
+ return (Arg == NULL);
+}
+
+void NoReturnFunctionChecker::checkPostObjCMessage(const ObjCMessage &Msg,
+ CheckerContext &C) const {
+ // HACK: This entire check is to handle two messages in the Cocoa frameworks:
+ // -[NSAssertionHandler
+ // handleFailureInMethod:object:file:lineNumber:description:]
+ // -[NSAssertionHandler
+ // handleFailureInFunction:file:lineNumber:description:]
+ // Eventually these should be annotated with __attribute__((noreturn)).
+ // Because ObjC messages use dynamic dispatch, it is not generally safe to
+ // assume certain methods can't return. In cases where it is definitely valid,
+ // see if you can mark the methods noreturn or analyzer_noreturn instead of
+ // adding more explicit checks to this method.
+
+ if (!Msg.isInstanceMessage())
+ return;
+
+ const ObjCInterfaceDecl *Receiver = Msg.getReceiverInterface();
+ if (!Receiver)
+ return;
+ if (!Receiver->getIdentifier()->isStr("NSAssertionHandler"))
+ return;
+
+ Selector Sel = Msg.getSelector();
+ switch (Sel.getNumArgs()) {
+ default:
+ return;
+ case 4:
+ if (!isMultiArgSelector(&Sel, "handleFailureInFunction", "file",
+ "lineNumber", "description", NULL))
+ return;
+ break;
+ case 5:
+ if (!isMultiArgSelector(&Sel, "handleFailureInMethod", "object", "file",
+ "lineNumber", "description", NULL))
+ return;
+ break;
+ }
+
+ // If we got here, it's one of the messages we care about.
+ C.generateSink();
+}
+
+
+void ento::registerNoReturnFunctionChecker(CheckerManager &mgr) {
+ mgr.registerChecker<NoReturnFunctionChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
new file mode 100644
index 0000000..7b724d2
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/OSAtomicChecker.cpp
@@ -0,0 +1,218 @@
+//=== OSAtomicChecker.cpp - OSAtomic functions evaluator --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker evaluates OSAtomic functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/Basic/Builtins.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class OSAtomicChecker : public Checker<eval::InlineCall> {
+public:
+ bool inlineCall(const CallExpr *CE, ExprEngine &Eng,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) const;
+
+private:
+ bool evalOSAtomicCompareAndSwap(const CallExpr *CE,
+ ExprEngine &Eng,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) const;
+};
+}
+
+static StringRef getCalleeName(ProgramStateRef State,
+ const CallExpr *CE,
+ const LocationContext *LCtx) {
+ const Expr *Callee = CE->getCallee();
+ SVal L = State->getSVal(Callee, LCtx);
+ const FunctionDecl *funDecl = L.getAsFunctionDecl();
+ if (!funDecl)
+ return StringRef();
+ IdentifierInfo *funI = funDecl->getIdentifier();
+ if (!funI)
+ return StringRef();
+ return funI->getName();
+}
+
+bool OSAtomicChecker::inlineCall(const CallExpr *CE,
+ ExprEngine &Eng,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) const {
+ StringRef FName = getCalleeName(Pred->getState(),
+ CE, Pred->getLocationContext());
+ if (FName.empty())
+ return false;
+
+ // Check for compare and swap.
+ if (FName.startswith("OSAtomicCompareAndSwap") ||
+ FName.startswith("objc_atomicCompareAndSwap"))
+ return evalOSAtomicCompareAndSwap(CE, Eng, Pred, Dst);
+
+ // FIXME: Other atomics.
+ return false;
+}
+
+bool OSAtomicChecker::evalOSAtomicCompareAndSwap(const CallExpr *CE,
+ ExprEngine &Eng,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) const {
+ // Not enough arguments to match OSAtomicCompareAndSwap?
+ if (CE->getNumArgs() != 3)
+ return false;
+
+ ASTContext &Ctx = Eng.getContext();
+ const Expr *oldValueExpr = CE->getArg(0);
+ QualType oldValueType = Ctx.getCanonicalType(oldValueExpr->getType());
+
+ const Expr *newValueExpr = CE->getArg(1);
+ QualType newValueType = Ctx.getCanonicalType(newValueExpr->getType());
+
+ // Do the types of 'oldValue' and 'newValue' match?
+ if (oldValueType != newValueType)
+ return false;
+
+ const Expr *theValueExpr = CE->getArg(2);
+ const PointerType *theValueType=theValueExpr->getType()->getAs<PointerType>();
+
+ // theValueType not a pointer?
+ if (!theValueType)
+ return false;
+
+ QualType theValueTypePointee =
+ Ctx.getCanonicalType(theValueType->getPointeeType()).getUnqualifiedType();
+
+ // The pointee must match newValueType and oldValueType.
+ if (theValueTypePointee != newValueType)
+ return false;
+
+ static SimpleProgramPointTag OSAtomicLoadTag("OSAtomicChecker : Load");
+ static SimpleProgramPointTag OSAtomicStoreTag("OSAtomicChecker : Store");
+
+ // Load 'theValue'.
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ExplodedNodeSet Tmp;
+ SVal location = state->getSVal(theValueExpr, LCtx);
+ // Here we should use the value type of the region as the load type, because
+ // we are simulating the semantics of the function, not the semantics of
+ // passing argument. So the type of theValue expr is not we are loading.
+ // But usually the type of the varregion is not the type we want either,
+ // we still need to do a CastRetrievedVal in store manager. So actually this
+ // LoadTy specifying can be omitted. But we put it here to emphasize the
+ // semantics.
+ QualType LoadTy;
+ if (const TypedValueRegion *TR =
+ dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
+ LoadTy = TR->getValueType();
+ }
+ Eng.evalLoad(Tmp, CE, theValueExpr, Pred,
+ state, location, &OSAtomicLoadTag, LoadTy);
+
+ if (Tmp.empty()) {
+ // If no nodes were generated, other checkers must have generated sinks.
+ // We return an empty Dst.
+ return true;
+ }
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end();
+ I != E; ++I) {
+
+ ExplodedNode *N = *I;
+ ProgramStateRef stateLoad = N->getState();
+
+ // Use direct bindings from the environment since we are forcing a load
+ // from a location that the Environment would typically not be used
+ // to bind a value.
+ SVal theValueVal_untested = stateLoad->getSVal(theValueExpr, LCtx, true);
+
+ SVal oldValueVal_untested = stateLoad->getSVal(oldValueExpr, LCtx);
+
+ // FIXME: Issue an error.
+ if (theValueVal_untested.isUndef() || oldValueVal_untested.isUndef()) {
+ return false;
+ }
+
+ DefinedOrUnknownSVal theValueVal =
+ cast<DefinedOrUnknownSVal>(theValueVal_untested);
+ DefinedOrUnknownSVal oldValueVal =
+ cast<DefinedOrUnknownSVal>(oldValueVal_untested);
+
+ SValBuilder &svalBuilder = Eng.getSValBuilder();
+
+ // Perform the comparison.
+ DefinedOrUnknownSVal Cmp =
+ svalBuilder.evalEQ(stateLoad,theValueVal,oldValueVal);
+
+ ProgramStateRef stateEqual = stateLoad->assume(Cmp, true);
+
+ // Were they equal?
+ if (stateEqual) {
+ // Perform the store.
+ ExplodedNodeSet TmpStore;
+ SVal val = stateEqual->getSVal(newValueExpr, LCtx);
+
+ // Handle implicit value casts.
+ if (const TypedValueRegion *R =
+ dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
+ val = svalBuilder.evalCast(val,R->getValueType(), newValueExpr->getType());
+ }
+
+ Eng.evalStore(TmpStore, CE, theValueExpr, N,
+ stateEqual, location, val, &OSAtomicStoreTag);
+
+ if (TmpStore.empty()) {
+ // If no nodes were generated, other checkers must have generated sinks.
+ // We return an empty Dst.
+ return true;
+ }
+
+ StmtNodeBuilder B(TmpStore, Dst, Eng.getBuilderContext());
+ // Now bind the result of the comparison.
+ for (ExplodedNodeSet::iterator I2 = TmpStore.begin(),
+ E2 = TmpStore.end(); I2 != E2; ++I2) {
+ ExplodedNode *predNew = *I2;
+ ProgramStateRef stateNew = predNew->getState();
+ // Check for 'void' return type if we have a bogus function prototype.
+ SVal Res = UnknownVal();
+ QualType T = CE->getType();
+ if (!T->isVoidType())
+ Res = Eng.getSValBuilder().makeTruthVal(true, T);
+ B.generateNode(CE, predNew, stateNew->BindExpr(CE, LCtx, Res),
+ false, this);
+ }
+ }
+
+ // Were they not equal?
+ if (ProgramStateRef stateNotEqual = stateLoad->assume(Cmp, false)) {
+ // Check for 'void' return type if we have a bogus function prototype.
+ SVal Res = UnknownVal();
+ QualType T = CE->getType();
+ if (!T->isVoidType())
+ Res = Eng.getSValBuilder().makeTruthVal(false, CE->getType());
+ StmtNodeBuilder B(N, Dst, Eng.getBuilderContext());
+ B.generateNode(CE, N, stateNotEqual->BindExpr(CE, LCtx, Res),
+ false, this);
+ }
+ }
+
+ return true;
+}
+
+void ento::registerOSAtomicChecker(CheckerManager &mgr) {
+ mgr.registerChecker<OSAtomicChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
new file mode 100644
index 0000000..777e9ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -0,0 +1,96 @@
+//== ObjCAtSyncChecker.cpp - nil mutex checker for @synchronized -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ObjCAtSyncChecker, a builtin check that checks for null pointers
+// used as mutexes for @synchronized.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Checkers/DereferenceChecker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCAtSyncChecker
+ : public Checker< check::PreStmt<ObjCAtSynchronizedStmt> > {
+ mutable OwningPtr<BuiltinBug> BT_null;
+ mutable OwningPtr<BuiltinBug> BT_undef;
+
+public:
+ void checkPreStmt(const ObjCAtSynchronizedStmt *S, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
+ CheckerContext &C) const {
+
+ const Expr *Ex = S->getSynchExpr();
+ ProgramStateRef state = C.getState();
+ SVal V = state->getSVal(Ex, C.getLocationContext());
+
+ // Uninitialized value used for the mutex?
+ if (isa<UndefinedVal>(V)) {
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT_undef)
+ BT_undef.reset(new BuiltinBug("Uninitialized value used as mutex "
+ "for @synchronized"));
+ BugReport *report =
+ new BugReport(*BT_undef, BT_undef->getDescription(), N);
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
+ report));
+ C.EmitReport(report);
+ }
+ return;
+ }
+
+ if (V.isUnknown())
+ return;
+
+ // Check for null mutexes.
+ ProgramStateRef notNullState, nullState;
+ llvm::tie(notNullState, nullState) = state->assume(cast<DefinedSVal>(V));
+
+ if (nullState) {
+ if (!notNullState) {
+ // Generate an error node. This isn't a sink since
+ // a null mutex just means no synchronization occurs.
+ if (ExplodedNode *N = C.addTransition(nullState)) {
+ if (!BT_null)
+ BT_null.reset(new BuiltinBug("Nil value used as mutex for @synchronized() "
+ "(no synchronization will occur)"));
+ BugReport *report =
+ new BugReport(*BT_null, BT_null->getDescription(), N);
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
+ report));
+
+ C.EmitReport(report);
+ return;
+ }
+ }
+ // Don't add a transition for 'nullState'. If the value is
+ // under-constrained to be null or non-null, assume it is non-null
+ // afterwards.
+ }
+
+ if (notNullState)
+ C.addTransition(notNullState);
+}
+
+void ento::registerObjCAtSyncChecker(CheckerManager &mgr) {
+ if (mgr.getLangOpts().ObjC2)
+ mgr.registerChecker<ObjCAtSyncChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
new file mode 100644
index 0000000..f2929c0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -0,0 +1,174 @@
+//== ObjCContainersASTChecker.cpp - CoreFoundation containers API *- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// An AST checker that looks for common pitfalls when using 'CFArray',
+// 'CFDictionary', 'CFSet' APIs.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext* AC;
+ ASTContext &ASTC;
+ uint64_t PtrWidth;
+
+ static const unsigned InvalidArgIndex = UINT_MAX;
+
+ /// Check if the type has pointer size (very conservative).
+ inline bool isPointerSize(const Type *T) {
+ if (!T)
+ return true;
+ if (T->isIncompleteType())
+ return true;
+ return (ASTC.getTypeSize(T) == PtrWidth);
+ }
+
+ /// Check if the type is a pointer/array to pointer sized values.
+ inline bool hasPointerToPointerSizedType(const Expr *E) {
+ QualType T = E->getType();
+
+ // The type could be either a pointer or array.
+ const Type *TP = T.getTypePtr();
+ QualType PointeeT = TP->getPointeeType();
+ if (!PointeeT.isNull()) {
+ // If the type is a pointer to an array, check the size of the array
+ // elements. To avoid false positives coming from assumption that the
+ // values x and &x are equal when x is an array.
+ if (const Type *TElem = PointeeT->getArrayElementTypeNoTypeQual())
+ if (isPointerSize(TElem))
+ return true;
+
+ // Else, check the pointee size.
+ return isPointerSize(PointeeT.getTypePtr());
+ }
+
+ if (const Type *TElem = TP->getArrayElementTypeNoTypeQual())
+ return isPointerSize(TElem);
+
+ // The type must be an array/pointer type.
+
+ // This could be a null constant, which is allowed.
+ if (E->isNullPointerConstant(ASTC, Expr::NPC_ValueDependentIsNull))
+ return true;
+ return false;
+ }
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext* ac)
+ : BR(br), AC(ac), ASTC(AC->getASTContext()),
+ PtrWidth(ASTC.getTargetInfo().getPointerWidth(0)) {}
+
+ // Statement visitor methods.
+ void VisitChildren(Stmt *S);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitCallExpr(CallExpr *CE);
+};
+} // end anonymous namespace
+
+static StringRef getCalleeName(CallExpr *CE) {
+ const FunctionDecl *FD = CE->getDirectCallee();
+ if (!FD)
+ return StringRef();
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II) // if no identifier, not a simple C function
+ return StringRef();
+
+ return II->getName();
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ StringRef Name = getCalleeName(CE);
+ if (Name.empty())
+ return;
+
+ const Expr *Arg = 0;
+ unsigned ArgNum = InvalidArgIndex;
+
+ if (Name.equals("CFArrayCreate") || Name.equals("CFSetCreate")) {
+ ArgNum = 1;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg))
+ return;
+ }
+
+ if (Arg == 0 && Name.equals("CFDictionaryCreate")) {
+ // Check first argument.
+ ArgNum = 1;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg)) {
+ // Check second argument.
+ ArgNum = 2;
+ Arg = CE->getArg(ArgNum)->IgnoreParenCasts();
+ if (hasPointerToPointerSizedType(Arg))
+ // Both are good, return.
+ return;
+ }
+ }
+
+ if (ArgNum != InvalidArgIndex) {
+ assert(ArgNum == 1 || ArgNum == 2);
+
+ SmallString<256> BufName;
+ llvm::raw_svector_ostream OsName(BufName);
+ assert(ArgNum == 1 || ArgNum == 2);
+ OsName << " Invalid use of '" << Name << "'" ;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Os(Buf);
+ Os << " The "<< ((ArgNum == 1) ? "first" : "second") << " argument to '"
+ << Name << "' must be a C array of pointer-sized values, not '"
+ << Arg->getType().getAsString() << "'";
+
+ SourceRange R = Arg->getSourceRange();
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ BR.EmitBasicReport(AC->getDecl(),
+ OsName.str(), categories::CoreFoundationObjectiveC,
+ Os.str(), CELoc, &R, 1);
+ }
+
+ // Recurse and check children.
+ VisitChildren(CE);
+}
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+namespace {
+class ObjCContainersASTChecker : public Checker<check::ASTCodeBody> {
+public:
+
+ void checkASTCodeBody(const Decl *D, AnalysisManager& Mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, Mgr.getAnalysisDeclContext(D));
+ walker.Visit(D->getBody());
+ }
+};
+}
+
+void ento::registerObjCContainersASTChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCContainersASTChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
new file mode 100644
index 0000000..f4655b6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -0,0 +1,159 @@
+//== ObjCContainersChecker.cpp - Path sensitive checker for CFArray *- C++ -*=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Performs path sensitive checks of Core Foundation static containers like
+// CFArray.
+// 1) Check for buffer overflows:
+// In CFArrayGetArrayAtIndex( myArray, index), if the index is outside the
+// index space of theArray (0 to N-1 inclusive (where N is the count of
+// theArray), the behavior is undefined.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/AST/ParentMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
+ check::PostStmt<CallExpr> > {
+ mutable OwningPtr<BugType> BT;
+ inline void initBugType() const {
+ if (!BT)
+ BT.reset(new BugType("CFArray API",
+ categories::CoreFoundationObjectiveC));
+ }
+
+ inline SymbolRef getArraySym(const Expr *E, CheckerContext &C) const {
+ SVal ArrayRef = C.getState()->getSVal(E, C.getLocationContext());
+ SymbolRef ArraySym = ArrayRef.getAsSymbol();
+ return ArraySym;
+ }
+
+ void addSizeInfo(const Expr *Array, const Expr *Size,
+ CheckerContext &C) const;
+
+public:
+ /// A tag to id this checker.
+ static void *getTag() { static int Tag; return &Tag; }
+
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+// ProgramState trait - a map from array symbol to it's state.
+typedef llvm::ImmutableMap<SymbolRef, DefinedSVal> ArraySizeM;
+
+namespace { struct ArraySizeMap {}; }
+namespace clang { namespace ento {
+template<> struct ProgramStateTrait<ArraySizeMap>
+ : public ProgramStatePartialTrait<ArraySizeM > {
+ static void *GDMIndex() { return ObjCContainersChecker::getTag(); }
+};
+}}
+
+void ObjCContainersChecker::addSizeInfo(const Expr *Array, const Expr *Size,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ SVal SizeV = State->getSVal(Size, C.getLocationContext());
+ // Undefined is reported by another checker.
+ if (SizeV.isUnknownOrUndef())
+ return;
+
+ // Get the ArrayRef symbol.
+ SVal ArrayRef = State->getSVal(Array, C.getLocationContext());
+ SymbolRef ArraySym = ArrayRef.getAsSymbol();
+ if (!ArraySym)
+ return;
+
+ C.addTransition(State->set<ArraySizeMap>(ArraySym, cast<DefinedSVal>(SizeV)));
+ return;
+}
+
+void ObjCContainersChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty() || CE->getNumArgs() < 1)
+ return;
+
+ // Add array size information to the state.
+ if (Name.equals("CFArrayCreate")) {
+ if (CE->getNumArgs() < 3)
+ return;
+ // Note, we can visit the Create method in the post-visit because
+ // the CFIndex parameter is passed in by value and will not be invalidated
+ // by the call.
+ addSizeInfo(CE, CE->getArg(2), C);
+ return;
+ }
+
+ if (Name.equals("CFArrayGetCount")) {
+ addSizeInfo(CE->getArg(0), CE, C);
+ return;
+ }
+}
+
+void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef Name = C.getCalleeName(CE);
+ if (Name.empty() || CE->getNumArgs() < 2)
+ return;
+
+ // Check the array access.
+ if (Name.equals("CFArrayGetValueAtIndex")) {
+ ProgramStateRef State = C.getState();
+ // Retrieve the size.
+ // Find out if we saw this array symbol before and have information about it.
+ const Expr *ArrayExpr = CE->getArg(0);
+ SymbolRef ArraySym = getArraySym(ArrayExpr, C);
+ if (!ArraySym)
+ return;
+
+ const DefinedSVal *Size = State->get<ArraySizeMap>(ArraySym);
+
+ if (!Size)
+ return;
+
+ // Get the index.
+ const Expr *IdxExpr = CE->getArg(1);
+ SVal IdxVal = State->getSVal(IdxExpr, C.getLocationContext());
+ if (IdxVal.isUnknownOrUndef())
+ return;
+ DefinedSVal Idx = cast<DefinedSVal>(IdxVal);
+
+ // Now, check if 'Idx in [0, Size-1]'.
+ const QualType T = IdxExpr->getType();
+ ProgramStateRef StInBound = State->assumeInBound(Idx, *Size, true, T);
+ ProgramStateRef StOutBound = State->assumeInBound(Idx, *Size, false, T);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateSink(StOutBound);
+ if (!N)
+ return;
+ initBugType();
+ BugReport *R = new BugReport(*BT, "Index is out of bounds", N);
+ R->addRange(IdxExpr->getSourceRange());
+ C.EmitReport(R);
+ return;
+ }
+ }
+}
+
+/// Register checker.
+void ento::registerObjCContainersChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCContainersChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
new file mode 100644
index 0000000..d15c8ba
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -0,0 +1,381 @@
+//== ObjCSelfInitChecker.cpp - Checker for 'self' initialization -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ObjCSelfInitChecker, a builtin check that checks for uses of
+// 'self' before proper initialization.
+//
+//===----------------------------------------------------------------------===//
+
+// This checks initialization methods to verify that they assign 'self' to the
+// result of an initialization call (e.g. [super init], or [self initWith..])
+// before using 'self' or any instance variable.
+//
+// To perform the required checking, values are tagged with flags that indicate
+// 1) if the object is the one pointed to by 'self', and 2) if the object
+// is the result of an initializer (e.g. [super init]).
+//
+// Uses of an object that is true for 1) but not 2) trigger a diagnostic.
+// The uses that are currently checked are:
+// - Using instance variables.
+// - Returning the object.
+//
+// Note that we don't check for an invalid 'self' that is the receiver of an
+// obj-c message expression to cut down false positives where logging functions
+// get information from self (like its class) or doing "invalidation" on self
+// when the initialization fails.
+//
+// Because the object that 'self' points to gets invalidated when a call
+// receives a reference to 'self', the checker keeps track and passes the flags
+// for 1) and 2) to the new object that 'self' points to after the call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/AST/ParentMap.h"
+
+using namespace clang;
+using namespace ento;
+
+static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND);
+static bool isInitializationMethod(const ObjCMethodDecl *MD);
+static bool isInitMessage(const ObjCMessage &msg);
+static bool isSelfVar(SVal location, CheckerContext &C);
+
+namespace {
+class ObjCSelfInitChecker : public Checker< check::PreObjCMessage,
+ check::PostObjCMessage,
+ check::PostStmt<ObjCIvarRefExpr>,
+ check::PreStmt<ReturnStmt>,
+ check::PreStmt<CallExpr>,
+ check::PostStmt<CallExpr>,
+ check::Location > {
+public:
+ void checkPreObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ void checkPostObjCMessage(ObjCMessage msg, CheckerContext &C) const;
+ void checkPostStmt(const ObjCIvarRefExpr *E, CheckerContext &C) const;
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkLocation(SVal location, bool isLoad, const Stmt *S,
+ CheckerContext &C) const;
+
+ void checkPreStmt(const CallOrObjCMessage &CE, CheckerContext &C) const;
+ void checkPostStmt(const CallOrObjCMessage &CE, CheckerContext &C) const;
+
+};
+} // end anonymous namespace
+
+namespace {
+
+class InitSelfBug : public BugType {
+ const std::string desc;
+public:
+ InitSelfBug() : BugType("Missing \"self = [(super or self) init...]\"",
+ categories::CoreFoundationObjectiveC) {}
+};
+
+} // end anonymous namespace
+
+namespace {
+enum SelfFlagEnum {
+ /// \brief No flag set.
+ SelfFlag_None = 0x0,
+ /// \brief Value came from 'self'.
+ SelfFlag_Self = 0x1,
+ /// \brief Value came from the result of an initializer (e.g. [super init]).
+ SelfFlag_InitRes = 0x2
+};
+}
+
+typedef llvm::ImmutableMap<SymbolRef, unsigned> SelfFlag;
+namespace { struct CalledInit {}; }
+namespace { struct PreCallSelfFlags {}; }
+
+namespace clang {
+namespace ento {
+ template<>
+ struct ProgramStateTrait<SelfFlag> : public ProgramStatePartialTrait<SelfFlag> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+ };
+ template <>
+ struct ProgramStateTrait<CalledInit> : public ProgramStatePartialTrait<bool> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+ };
+
+ /// \brief A call receiving a reference to 'self' invalidates the object that
+ /// 'self' contains. This keeps the "self flags" assigned to the 'self'
+ /// object before the call so we can assign them to the new object that 'self'
+ /// points to after the call.
+ template <>
+ struct ProgramStateTrait<PreCallSelfFlags> : public ProgramStatePartialTrait<unsigned> {
+ static void *GDMIndex() { static int index = 0; return &index; }
+ };
+}
+}
+
+static SelfFlagEnum getSelfFlags(SVal val, ProgramStateRef state) {
+ if (SymbolRef sym = val.getAsSymbol())
+ if (const unsigned *attachedFlags = state->get<SelfFlag>(sym))
+ return (SelfFlagEnum)*attachedFlags;
+ return SelfFlag_None;
+}
+
+static SelfFlagEnum getSelfFlags(SVal val, CheckerContext &C) {
+ return getSelfFlags(val, C.getState());
+}
+
+static void addSelfFlag(ProgramStateRef state, SVal val,
+ SelfFlagEnum flag, CheckerContext &C) {
+ // We tag the symbol that the SVal wraps.
+ if (SymbolRef sym = val.getAsSymbol())
+ C.addTransition(state->set<SelfFlag>(sym, getSelfFlags(val, C) | flag));
+}
+
+static bool hasSelfFlag(SVal val, SelfFlagEnum flag, CheckerContext &C) {
+ return getSelfFlags(val, C) & flag;
+}
+
+/// \brief Returns true of the value of the expression is the object that 'self'
+/// points to and is an object that did not come from the result of calling
+/// an initializer.
+static bool isInvalidSelf(const Expr *E, CheckerContext &C) {
+ SVal exprVal = C.getState()->getSVal(E, C.getLocationContext());
+ if (!hasSelfFlag(exprVal, SelfFlag_Self, C))
+ return false; // value did not come from 'self'.
+ if (hasSelfFlag(exprVal, SelfFlag_InitRes, C))
+ return false; // 'self' is properly initialized.
+
+ return true;
+}
+
+static void checkForInvalidSelf(const Expr *E, CheckerContext &C,
+ const char *errorStr) {
+ if (!E)
+ return;
+
+ if (!C.getState()->get<CalledInit>())
+ return;
+
+ if (!isInvalidSelf(E, C))
+ return;
+
+ // Generate an error node.
+ ExplodedNode *N = C.generateSink();
+ if (!N)
+ return;
+
+ BugReport *report =
+ new BugReport(*new InitSelfBug(), errorStr, N);
+ C.EmitReport(report);
+}
+
+void ObjCSelfInitChecker::checkPostObjCMessage(ObjCMessage msg,
+ CheckerContext &C) const {
+ CallOrObjCMessage MsgWrapper(msg, C.getState(), C.getLocationContext());
+ checkPostStmt(MsgWrapper, C);
+
+ // When encountering a message that does initialization (init rule),
+ // tag the return value so that we know later on that if self has this value
+ // then it is properly initialized.
+
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ if (isInitMessage(msg)) {
+ // Tag the return value as the result of an initializer.
+ ProgramStateRef state = C.getState();
+
+ // FIXME this really should be context sensitive, where we record
+ // the current stack frame (for IPA). Also, we need to clean this
+ // value out when we return from this method.
+ state = state->set<CalledInit>(true);
+
+ SVal V = state->getSVal(msg.getMessageExpr(), C.getLocationContext());
+ addSelfFlag(state, V, SelfFlag_InitRes, C);
+ return;
+ }
+
+ // We don't check for an invalid 'self' in an obj-c message expression to cut
+ // down false positives where logging functions get information from self
+ // (like its class) or doing "invalidation" on self when the initialization
+ // fails.
+}
+
+void ObjCSelfInitChecker::checkPostStmt(const ObjCIvarRefExpr *E,
+ CheckerContext &C) const {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ checkForInvalidSelf(E->getBase(), C,
+ "Instance variable used while 'self' is not set to the result of "
+ "'[(super or self) init...]'");
+}
+
+void ObjCSelfInitChecker::checkPreStmt(const ReturnStmt *S,
+ CheckerContext &C) const {
+ // FIXME: A callback should disable checkers at the start of functions.
+ if (!shouldRunOnFunctionOrMethod(dyn_cast<NamedDecl>(
+ C.getCurrentAnalysisDeclContext()->getDecl())))
+ return;
+
+ checkForInvalidSelf(S->getRetValue(), C,
+ "Returning 'self' while it is not set to the result of "
+ "'[(super or self) init...]'");
+}
+
+// When a call receives a reference to 'self', [Pre/Post]VisitGenericCall pass
+// the SelfFlags from the object 'self' point to before the call, to the new
+// object after the call. This is to avoid invalidation of 'self' by logging
+// functions.
+// Another common pattern in classes with multiple initializers is to put the
+// subclass's common initialization bits into a static function that receives
+// the value of 'self', e.g:
+// @code
+// if (!(self = [super init]))
+// return nil;
+// if (!(self = _commonInit(self)))
+// return nil;
+// @endcode
+// Until we can use inter-procedural analysis, in such a call, transfer the
+// SelfFlags to the result of the call.
+
+void ObjCSelfInitChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ CallOrObjCMessage CEWrapper(CE, C.getState(), C.getLocationContext());
+ checkPreStmt(CEWrapper, C);
+}
+
+void ObjCSelfInitChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ CallOrObjCMessage CEWrapper(CE, C.getState(), C.getLocationContext());
+ checkPostStmt(CEWrapper, C);
+}
+
+void ObjCSelfInitChecker::checkPreObjCMessage(ObjCMessage Msg,
+ CheckerContext &C) const {
+ CallOrObjCMessage MsgWrapper(Msg, C.getState(), C.getLocationContext());
+ checkPreStmt(MsgWrapper, C);
+}
+
+void ObjCSelfInitChecker::checkPreStmt(const CallOrObjCMessage &CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ unsigned NumArgs = CE.getNumArgs();
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ SVal argV = CE.getArgSVal(i);
+ if (isSelfVar(argV, C)) {
+ unsigned selfFlags = getSelfFlags(state->getSVal(cast<Loc>(argV)), C);
+ C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
+ return;
+ } else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
+ unsigned selfFlags = getSelfFlags(argV, C);
+ C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
+ return;
+ }
+ }
+}
+
+void ObjCSelfInitChecker::checkPostStmt(const CallOrObjCMessage &CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ unsigned NumArgs = CE.getNumArgs();
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ SVal argV = CE.getArgSVal(i);
+ if (isSelfVar(argV, C)) {
+ SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
+ state = state->remove<PreCallSelfFlags>();
+ addSelfFlag(state, state->getSVal(cast<Loc>(argV)), prevFlags, C);
+ return;
+ } else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
+ SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
+ state = state->remove<PreCallSelfFlags>();
+ addSelfFlag(state, state->getSVal(cast<Loc>(argV)), prevFlags, C);
+ return;
+ }
+ }
+}
+
+void ObjCSelfInitChecker::checkLocation(SVal location, bool isLoad,
+ const Stmt *S,
+ CheckerContext &C) const {
+ // Tag the result of a load from 'self' so that we can easily know that the
+ // value is the object that 'self' points to.
+ ProgramStateRef state = C.getState();
+ if (isSelfVar(location, C))
+ addSelfFlag(state, state->getSVal(cast<Loc>(location)), SelfFlag_Self, C);
+}
+
+// FIXME: A callback should disable checkers at the start of functions.
+static bool shouldRunOnFunctionOrMethod(const NamedDecl *ND) {
+ if (!ND)
+ return false;
+
+ const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(ND);
+ if (!MD)
+ return false;
+ if (!isInitializationMethod(MD))
+ return false;
+
+ // self = [super init] applies only to NSObject subclasses.
+ // For instance, NSProxy doesn't implement -init.
+ ASTContext &Ctx = MD->getASTContext();
+ IdentifierInfo* NSObjectII = &Ctx.Idents.get("NSObject");
+ ObjCInterfaceDecl *ID = MD->getClassInterface()->getSuperClass();
+ for ( ; ID ; ID = ID->getSuperClass()) {
+ IdentifierInfo *II = ID->getIdentifier();
+
+ if (II == NSObjectII)
+ break;
+ }
+ if (!ID)
+ return false;
+
+ return true;
+}
+
+/// \brief Returns true if the location is 'self'.
+static bool isSelfVar(SVal location, CheckerContext &C) {
+ AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext();
+ if (!analCtx->getSelfDecl())
+ return false;
+ if (!isa<loc::MemRegionVal>(location))
+ return false;
+
+ loc::MemRegionVal MRV = cast<loc::MemRegionVal>(location);
+ if (const DeclRegion *DR = dyn_cast<DeclRegion>(MRV.getRegion()))
+ return (DR->getDecl() == analCtx->getSelfDecl());
+
+ return false;
+}
+
+static bool isInitializationMethod(const ObjCMethodDecl *MD) {
+ return MD->getMethodFamily() == OMF_init;
+}
+
+static bool isInitMessage(const ObjCMessage &msg) {
+ return msg.getMethodFamily() == OMF_init;
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerObjCSelfInitChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCSelfInitChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
new file mode 100644
index 0000000..4718dc7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ObjCUnusedIVarsChecker.cpp
@@ -0,0 +1,186 @@
+//==- ObjCUnusedIVarsChecker.cpp - Check for unused ivars --------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a CheckObjCUnusedIvars, a checker that
+// analyzes an Objective-C class's interface/implementation to determine if it
+// has any ivars that are never accessed.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+using namespace ento;
+
+enum IVarState { Unused, Used };
+typedef llvm::DenseMap<const ObjCIvarDecl*,IVarState> IvarUsageMap;
+
+static void Scan(IvarUsageMap& M, const Stmt *S) {
+ if (!S)
+ return;
+
+ if (const ObjCIvarRefExpr *Ex = dyn_cast<ObjCIvarRefExpr>(S)) {
+ const ObjCIvarDecl *D = Ex->getDecl();
+ IvarUsageMap::iterator I = M.find(D);
+ if (I != M.end())
+ I->second = Used;
+ return;
+ }
+
+ // Blocks can reference an instance variable of a class.
+ if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ Scan(M, BE->getBody());
+ return;
+ }
+
+ for (Stmt::const_child_iterator I=S->child_begin(),E=S->child_end(); I!=E;++I)
+ Scan(M, *I);
+}
+
+static void Scan(IvarUsageMap& M, const ObjCPropertyImplDecl *D) {
+ if (!D)
+ return;
+
+ const ObjCIvarDecl *ID = D->getPropertyIvarDecl();
+
+ if (!ID)
+ return;
+
+ IvarUsageMap::iterator I = M.find(ID);
+ if (I != M.end())
+ I->second = Used;
+}
+
+static void Scan(IvarUsageMap& M, const ObjCContainerDecl *D) {
+ // Scan the methods for accesses.
+ for (ObjCContainerDecl::instmeth_iterator I = D->instmeth_begin(),
+ E = D->instmeth_end(); I!=E; ++I)
+ Scan(M, (*I)->getBody());
+
+ if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) {
+ // Scan for @synthesized property methods that act as setters/getters
+ // to an ivar.
+ for (ObjCImplementationDecl::propimpl_iterator I = ID->propimpl_begin(),
+ E = ID->propimpl_end(); I!=E; ++I)
+ Scan(M, *I);
+
+ // Scan the associated categories as well.
+ for (const ObjCCategoryDecl *CD =
+ ID->getClassInterface()->getCategoryList(); CD ;
+ CD = CD->getNextClassCategory()) {
+ if (const ObjCCategoryImplDecl *CID = CD->getImplementation())
+ Scan(M, CID);
+ }
+ }
+}
+
+static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
+ SourceManager &SM) {
+ for (DeclContext::decl_iterator I=C->decls_begin(), E=C->decls_end();
+ I!=E; ++I)
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
+ SourceLocation L = FD->getLocStart();
+ if (SM.getFileID(L) == FID)
+ Scan(M, FD->getBody());
+ }
+}
+
+static void checkObjCUnusedIvar(const ObjCImplementationDecl *D,
+ BugReporter &BR) {
+
+ const ObjCInterfaceDecl *ID = D->getClassInterface();
+ IvarUsageMap M;
+
+ // Iterate over the ivars.
+ for (ObjCInterfaceDecl::ivar_iterator I=ID->ivar_begin(),
+ E=ID->ivar_end(); I!=E; ++I) {
+
+ const ObjCIvarDecl *ID = *I;
+
+ // Ignore ivars that...
+ // (a) aren't private
+ // (b) explicitly marked unused
+ // (c) are iboutlets
+ // (d) are unnamed bitfields
+ if (ID->getAccessControl() != ObjCIvarDecl::Private ||
+ ID->getAttr<UnusedAttr>() || ID->getAttr<IBOutletAttr>() ||
+ ID->getAttr<IBOutletCollectionAttr>() ||
+ ID->isUnnamedBitfield())
+ continue;
+
+ M[ID] = Unused;
+ }
+
+ if (M.empty())
+ return;
+
+ // Now scan the implementation declaration.
+ Scan(M, D);
+
+ // Any potentially unused ivars?
+ bool hasUnused = false;
+ for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+ if (I->second == Unused) {
+ hasUnused = true;
+ break;
+ }
+
+ if (!hasUnused)
+ return;
+
+ // We found some potentially unused ivars. Scan the entire translation unit
+ // for functions inside the @implementation that reference these ivars.
+ // FIXME: In the future hopefully we can just use the lexical DeclContext
+ // to go from the ObjCImplementationDecl to the lexically "nested"
+ // C functions.
+ SourceManager &SM = BR.getSourceManager();
+ Scan(M, D->getDeclContext(), SM.getFileID(D->getLocation()), SM);
+
+ // Find ivars that are unused.
+ for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
+ if (I->second == Unused) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Instance variable '" << *I->first << "' in class '" << *ID
+ << "' is never used by the methods in its @implementation "
+ "(although it may be used by category methods).";
+
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(I->first, BR.getSourceManager());
+ BR.EmitBasicReport(D, "Unused instance variable", "Optimization",
+ os.str(), L);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// ObjCUnusedIvarsChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCUnusedIvarsChecker : public Checker<
+ check::ASTDecl<ObjCImplementationDecl> > {
+public:
+ void checkASTDecl(const ObjCImplementationDecl *D, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ checkObjCUnusedIvar(D, BR);
+ }
+};
+}
+
+void ento::registerObjCUnusedIvarsChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ObjCUnusedIvarsChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
new file mode 100644
index 0000000..fe4845b
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerArithChecker.cpp
@@ -0,0 +1,69 @@
+//=== PointerArithChecker.cpp - Pointer arithmetic checker -----*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines PointerArithChecker, a builtin checker that checks for
+// pointer arithmetic on locations other than array elements.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PointerArithChecker
+ : public Checker< check::PreStmt<BinaryOperator> > {
+ mutable OwningPtr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void PointerArithChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ if (B->getOpcode() != BO_Sub && B->getOpcode() != BO_Add)
+ return;
+
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal LV = state->getSVal(B->getLHS(), LCtx);
+ SVal RV = state->getSVal(B->getRHS(), LCtx);
+
+ const MemRegion *LR = LV.getAsRegion();
+
+ if (!LR || !RV.isConstant())
+ return;
+
+ // If pointer arithmetic is done on variables of non-array type, this often
+ // means behavior rely on memory organization, which is dangerous.
+ if (isa<VarRegion>(LR) || isa<CodeTextRegion>(LR) ||
+ isa<CompoundLiteralRegion>(LR)) {
+
+ if (ExplodedNode *N = C.addTransition()) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Dangerous pointer arithmetic",
+ "Pointer arithmetic done on non-array variables "
+ "means reliance on memory layout, which is "
+ "dangerous."));
+ BugReport *R = new BugReport(*BT, BT->getDescription(), N);
+ R->addRange(B->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+}
+
+void ento::registerPointerArithChecker(CheckerManager &mgr) {
+ mgr.registerChecker<PointerArithChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
new file mode 100644
index 0000000..fa5c6a3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PointerSubChecker.cpp
@@ -0,0 +1,76 @@
+//=== PointerSubChecker.cpp - Pointer subtraction checker ------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files defines PointerSubChecker, a builtin checker that checks for
+// pointer subtractions on two pointers pointing to different memory chunks.
+// This check corresponds to CWE-469.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PointerSubChecker
+ : public Checker< check::PreStmt<BinaryOperator> > {
+ mutable OwningPtr<BuiltinBug> BT;
+
+public:
+ void checkPreStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+}
+
+void PointerSubChecker::checkPreStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ // When doing pointer subtraction, if the two pointers do not point to the
+ // same memory chunk, emit a warning.
+ if (B->getOpcode() != BO_Sub)
+ return;
+
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal LV = state->getSVal(B->getLHS(), LCtx);
+ SVal RV = state->getSVal(B->getRHS(), LCtx);
+
+ const MemRegion *LR = LV.getAsRegion();
+ const MemRegion *RR = RV.getAsRegion();
+
+ if (!(LR && RR))
+ return;
+
+ const MemRegion *BaseLR = LR->getBaseRegion();
+ const MemRegion *BaseRR = RR->getBaseRegion();
+
+ if (BaseLR == BaseRR)
+ return;
+
+ // Allow arithmetic on different symbolic regions.
+ if (isa<SymbolicRegion>(BaseLR) || isa<SymbolicRegion>(BaseRR))
+ return;
+
+ if (ExplodedNode *N = C.addTransition()) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Pointer subtraction",
+ "Subtraction of two pointers that do not point to "
+ "the same memory chunk may cause incorrect result."));
+ BugReport *R = new BugReport(*BT, BT->getDescription(), N);
+ R->addRange(B->getSourceRange());
+ C.EmitReport(R);
+ }
+}
+
+void ento::registerPointerSubChecker(CheckerManager &mgr) {
+ mgr.registerChecker<PointerSubChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
new file mode 100644
index 0000000..2d018ef
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -0,0 +1,198 @@
+//===--- PthreadLockChecker.cpp - Check for locking problems ---*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines PthreadLockChecker, a simple lock -> unlock checker.
+// Also handles XNU locks, which behave similarly enough to share code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/ADT/ImmutableList.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class PthreadLockChecker : public Checker< check::PostStmt<CallExpr> > {
+ mutable OwningPtr<BugType> BT_doublelock;
+ mutable OwningPtr<BugType> BT_lor;
+ enum LockingSemantics {
+ NotApplicable = 0,
+ PthreadSemantics,
+ XNUSemantics
+ };
+public:
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void AcquireLock(CheckerContext &C, const CallExpr *CE, SVal lock,
+ bool isTryLock, enum LockingSemantics semantics) const;
+
+ void ReleaseLock(CheckerContext &C, const CallExpr *CE, SVal lock) const;
+};
+} // end anonymous namespace
+
+// GDM Entry for tracking lock state.
+namespace { class LockSet {}; }
+namespace clang {
+namespace ento {
+template <> struct ProgramStateTrait<LockSet> :
+ public ProgramStatePartialTrait<llvm::ImmutableList<const MemRegion*> > {
+ static void *GDMIndex() { static int x = 0; return &x; }
+};
+} // end of ento (ProgramState) namespace
+} // end clang namespace
+
+
+void PthreadLockChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ StringRef FName = C.getCalleeName(CE);
+ if (FName.empty())
+ return;
+
+ if (CE->getNumArgs() != 1)
+ return;
+
+ if (FName == "pthread_mutex_lock" ||
+ FName == "pthread_rwlock_rdlock" ||
+ FName == "pthread_rwlock_wrlock")
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ false, PthreadSemantics);
+ else if (FName == "lck_mtx_lock" ||
+ FName == "lck_rw_lock_exclusive" ||
+ FName == "lck_rw_lock_shared")
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ false, XNUSemantics);
+ else if (FName == "pthread_mutex_trylock" ||
+ FName == "pthread_rwlock_tryrdlock" ||
+ FName == "pthread_rwlock_tryrwlock")
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ true, PthreadSemantics);
+ else if (FName == "lck_mtx_try_lock" ||
+ FName == "lck_rw_try_lock_exclusive" ||
+ FName == "lck_rw_try_lock_shared")
+ AcquireLock(C, CE, state->getSVal(CE->getArg(0), LCtx),
+ true, XNUSemantics);
+ else if (FName == "pthread_mutex_unlock" ||
+ FName == "pthread_rwlock_unlock" ||
+ FName == "lck_mtx_unlock" ||
+ FName == "lck_rw_done")
+ ReleaseLock(C, CE, state->getSVal(CE->getArg(0), LCtx));
+}
+
+void PthreadLockChecker::AcquireLock(CheckerContext &C, const CallExpr *CE,
+ SVal lock, bool isTryLock,
+ enum LockingSemantics semantics) const {
+
+ const MemRegion *lockR = lock.getAsRegion();
+ if (!lockR)
+ return;
+
+ ProgramStateRef state = C.getState();
+
+ SVal X = state->getSVal(CE, C.getLocationContext());
+ if (X.isUnknownOrUndef())
+ return;
+
+ DefinedSVal retVal = cast<DefinedSVal>(X);
+
+ if (state->contains<LockSet>(lockR)) {
+ if (!BT_doublelock)
+ BT_doublelock.reset(new BugType("Double locking", "Lock checker"));
+ ExplodedNode *N = C.generateSink();
+ if (!N)
+ return;
+ BugReport *report = new BugReport(*BT_doublelock,
+ "This lock has already "
+ "been acquired", N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.EmitReport(report);
+ return;
+ }
+
+ ProgramStateRef lockSucc = state;
+ if (isTryLock) {
+ // Bifurcate the state, and allow a mode where the lock acquisition fails.
+ ProgramStateRef lockFail;
+ switch (semantics) {
+ case PthreadSemantics:
+ llvm::tie(lockFail, lockSucc) = state->assume(retVal);
+ break;
+ case XNUSemantics:
+ llvm::tie(lockSucc, lockFail) = state->assume(retVal);
+ break;
+ default:
+ llvm_unreachable("Unknown tryLock locking semantics");
+ }
+ assert(lockFail && lockSucc);
+ C.addTransition(lockFail);
+
+ } else if (semantics == PthreadSemantics) {
+ // Assume that the return value was 0.
+ lockSucc = state->assume(retVal, false);
+ assert(lockSucc);
+
+ } else {
+ // XNU locking semantics return void on non-try locks
+ assert((semantics == XNUSemantics) && "Unknown locking semantics");
+ lockSucc = state;
+ }
+
+ // Record that the lock was acquired.
+ lockSucc = lockSucc->add<LockSet>(lockR);
+ C.addTransition(lockSucc);
+}
+
+void PthreadLockChecker::ReleaseLock(CheckerContext &C, const CallExpr *CE,
+ SVal lock) const {
+
+ const MemRegion *lockR = lock.getAsRegion();
+ if (!lockR)
+ return;
+
+ ProgramStateRef state = C.getState();
+ llvm::ImmutableList<const MemRegion*> LS = state->get<LockSet>();
+
+ // FIXME: Better analysis requires IPA for wrappers.
+ // FIXME: check for double unlocks
+ if (LS.isEmpty())
+ return;
+
+ const MemRegion *firstLockR = LS.getHead();
+ if (firstLockR != lockR) {
+ if (!BT_lor)
+ BT_lor.reset(new BugType("Lock order reversal", "Lock checker"));
+ ExplodedNode *N = C.generateSink();
+ if (!N)
+ return;
+ BugReport *report = new BugReport(*BT_lor,
+ "This was not the most "
+ "recently acquired lock. "
+ "Possible lock order "
+ "reversal", N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.EmitReport(report);
+ return;
+ }
+
+ // Record that the lock was released.
+ state = state->set<LockSet>(LS.getTail());
+ C.addTransition(state);
+}
+
+
+void ento::registerPthreadLockChecker(CheckerManager &mgr) {
+ mgr.registerChecker<PthreadLockChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
new file mode 100644
index 0000000..b569e41
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp
@@ -0,0 +1,3702 @@
+//==-- RetainCountChecker.cpp - Checks for leaks and other issues -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the methods for RetainCountChecker, which implements
+// a reference count checker for Core Foundation and Cocoa on (Mac OS X).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Analysis/DomainSpecific/CocoaConventions.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
+#include <cstdarg>
+
+using namespace clang;
+using namespace ento;
+using llvm::StrInStrNoCase;
+
+namespace {
+/// Wrapper around different kinds of node builder, so that helper functions
+/// can have a common interface.
+class GenericNodeBuilderRefCount {
+ CheckerContext *C;
+ const ProgramPointTag *tag;
+public:
+ GenericNodeBuilderRefCount(CheckerContext &c,
+ const ProgramPointTag *t = 0)
+ : C(&c), tag(t){}
+
+ ExplodedNode *MakeNode(ProgramStateRef state, ExplodedNode *Pred,
+ bool MarkAsSink = false) {
+ return C->addTransition(state, Pred, tag, MarkAsSink);
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Primitives used for constructing summaries for function/method calls.
+//===----------------------------------------------------------------------===//
+
+/// ArgEffect is used to summarize a function/method call's effect on a
+/// particular argument.
+enum ArgEffect { DoNothing, Autorelease, Dealloc, DecRef, DecRefMsg,
+ DecRefBridgedTransfered,
+ IncRefMsg, IncRef, MakeCollectable, MayEscape,
+ NewAutoreleasePool, SelfOwn, StopTracking };
+
+namespace llvm {
+template <> struct FoldingSetTrait<ArgEffect> {
+static inline void Profile(const ArgEffect X, FoldingSetNodeID& ID) {
+ ID.AddInteger((unsigned) X);
+}
+};
+} // end llvm namespace
+
+/// ArgEffects summarizes the effects of a function/method call on all of
+/// its arguments.
+typedef llvm::ImmutableMap<unsigned,ArgEffect> ArgEffects;
+
+namespace {
+
+/// RetEffect is used to summarize a function/method call's behavior with
+/// respect to its return value.
+class RetEffect {
+public:
+ enum Kind { NoRet, OwnedSymbol, OwnedAllocatedSymbol,
+ NotOwnedSymbol, GCNotOwnedSymbol, ARCNotOwnedSymbol,
+ OwnedWhenTrackedReceiver };
+
+ enum ObjKind { CF, ObjC, AnyObj };
+
+private:
+ Kind K;
+ ObjKind O;
+
+ RetEffect(Kind k, ObjKind o = AnyObj) : K(k), O(o) {}
+
+public:
+ Kind getKind() const { return K; }
+
+ ObjKind getObjKind() const { return O; }
+
+ bool isOwned() const {
+ return K == OwnedSymbol || K == OwnedAllocatedSymbol ||
+ K == OwnedWhenTrackedReceiver;
+ }
+
+ bool operator==(const RetEffect &Other) const {
+ return K == Other.K && O == Other.O;
+ }
+
+ static RetEffect MakeOwnedWhenTrackedReceiver() {
+ return RetEffect(OwnedWhenTrackedReceiver, ObjC);
+ }
+
+ static RetEffect MakeOwned(ObjKind o, bool isAllocated = false) {
+ return RetEffect(isAllocated ? OwnedAllocatedSymbol : OwnedSymbol, o);
+ }
+ static RetEffect MakeNotOwned(ObjKind o) {
+ return RetEffect(NotOwnedSymbol, o);
+ }
+ static RetEffect MakeGCNotOwned() {
+ return RetEffect(GCNotOwnedSymbol, ObjC);
+ }
+ static RetEffect MakeARCNotOwned() {
+ return RetEffect(ARCNotOwnedSymbol, ObjC);
+ }
+ static RetEffect MakeNoRet() {
+ return RetEffect(NoRet);
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned) K);
+ ID.AddInteger((unsigned) O);
+ }
+};
+
+//===----------------------------------------------------------------------===//
+// Reference-counting logic (typestate + counts).
+//===----------------------------------------------------------------------===//
+
+class RefVal {
+public:
+ enum Kind {
+ Owned = 0, // Owning reference.
+ NotOwned, // Reference is not owned by still valid (not freed).
+ Released, // Object has been released.
+ ReturnedOwned, // Returned object passes ownership to caller.
+ ReturnedNotOwned, // Return object does not pass ownership to caller.
+ ERROR_START,
+ ErrorDeallocNotOwned, // -dealloc called on non-owned object.
+ ErrorDeallocGC, // Calling -dealloc with GC enabled.
+ ErrorUseAfterRelease, // Object used after released.
+ ErrorReleaseNotOwned, // Release of an object that was not owned.
+ ERROR_LEAK_START,
+ ErrorLeak, // A memory leak due to excessive reference counts.
+ ErrorLeakReturned, // A memory leak due to the returning method not having
+ // the correct naming conventions.
+ ErrorGCLeakReturned,
+ ErrorOverAutorelease,
+ ErrorReturnedNotOwned
+ };
+
+private:
+ Kind kind;
+ RetEffect::ObjKind okind;
+ unsigned Cnt;
+ unsigned ACnt;
+ QualType T;
+
+ RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t)
+ : kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {}
+
+public:
+ Kind getKind() const { return kind; }
+
+ RetEffect::ObjKind getObjKind() const { return okind; }
+
+ unsigned getCount() const { return Cnt; }
+ unsigned getAutoreleaseCount() const { return ACnt; }
+ unsigned getCombinedCounts() const { return Cnt + ACnt; }
+ void clearCounts() { Cnt = 0; ACnt = 0; }
+ void setCount(unsigned i) { Cnt = i; }
+ void setAutoreleaseCount(unsigned i) { ACnt = i; }
+
+ QualType getType() const { return T; }
+
+ bool isOwned() const {
+ return getKind() == Owned;
+ }
+
+ bool isNotOwned() const {
+ return getKind() == NotOwned;
+ }
+
+ bool isReturnedOwned() const {
+ return getKind() == ReturnedOwned;
+ }
+
+ bool isReturnedNotOwned() const {
+ return getKind() == ReturnedNotOwned;
+ }
+
+ static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
+ unsigned Count = 1) {
+ return RefVal(Owned, o, Count, 0, t);
+ }
+
+ static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
+ unsigned Count = 0) {
+ return RefVal(NotOwned, o, Count, 0, t);
+ }
+
+ // Comparison, profiling, and pretty-printing.
+
+ bool operator==(const RefVal& X) const {
+ return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
+ }
+
+ RefVal operator-(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() - i,
+ getAutoreleaseCount(), getType());
+ }
+
+ RefVal operator+(size_t i) const {
+ return RefVal(getKind(), getObjKind(), getCount() + i,
+ getAutoreleaseCount(), getType());
+ }
+
+ RefVal operator^(Kind k) const {
+ return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
+ getType());
+ }
+
+ RefVal autorelease() const {
+ return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
+ getType());
+ }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned) kind);
+ ID.AddInteger(Cnt);
+ ID.AddInteger(ACnt);
+ ID.Add(T);
+ }
+
+ void print(raw_ostream &Out) const;
+};
+
+void RefVal::print(raw_ostream &Out) const {
+ if (!T.isNull())
+ Out << "Tracked " << T.getAsString() << '/';
+
+ switch (getKind()) {
+ default: llvm_unreachable("Invalid RefVal kind");
+ case Owned: {
+ Out << "Owned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case NotOwned: {
+ Out << "NotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedOwned: {
+ Out << "ReturnedOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case ReturnedNotOwned: {
+ Out << "ReturnedNotOwned";
+ unsigned cnt = getCount();
+ if (cnt) Out << " (+ " << cnt << ")";
+ break;
+ }
+
+ case Released:
+ Out << "Released";
+ break;
+
+ case ErrorDeallocGC:
+ Out << "-dealloc (GC)";
+ break;
+
+ case ErrorDeallocNotOwned:
+ Out << "-dealloc (not-owned)";
+ break;
+
+ case ErrorLeak:
+ Out << "Leaked";
+ break;
+
+ case ErrorLeakReturned:
+ Out << "Leaked (Bad naming)";
+ break;
+
+ case ErrorGCLeakReturned:
+ Out << "Leaked (GC-ed at return)";
+ break;
+
+ case ErrorUseAfterRelease:
+ Out << "Use-After-Release [ERROR]";
+ break;
+
+ case ErrorReleaseNotOwned:
+ Out << "Release of Not-Owned [ERROR]";
+ break;
+
+ case RefVal::ErrorOverAutorelease:
+ Out << "Over autoreleased";
+ break;
+
+ case RefVal::ErrorReturnedNotOwned:
+ Out << "Non-owned object returned instead of owned";
+ break;
+ }
+
+ if (ACnt) {
+ Out << " [ARC +" << ACnt << ']';
+ }
+}
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RefBindings - State used to track object reference counts.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<SymbolRef, RefVal> RefBindings;
+
+namespace clang {
+namespace ento {
+template<>
+struct ProgramStateTrait<RefBindings>
+ : public ProgramStatePartialTrait<RefBindings> {
+ static void *GDMIndex() {
+ static int RefBIndex = 0;
+ return &RefBIndex;
+ }
+};
+}
+}
+
+//===----------------------------------------------------------------------===//
+// Function/Method behavior summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainSummary {
+ /// Args - a map of (index, ArgEffect) pairs, where index
+ /// specifies the argument (starting from 0). This can be sparsely
+ /// populated; arguments with no entry in Args use 'DefaultArgEffect'.
+ ArgEffects Args;
+
+ /// DefaultArgEffect - The default ArgEffect to apply to arguments that
+ /// do not have an entry in Args.
+ ArgEffect DefaultArgEffect;
+
+ /// Receiver - If this summary applies to an Objective-C message expression,
+ /// this is the effect applied to the state of the receiver.
+ ArgEffect Receiver;
+
+ /// Ret - The effect on the return value. Used to indicate if the
+ /// function/method call returns a new tracked symbol.
+ RetEffect Ret;
+
+public:
+ RetainSummary(ArgEffects A, RetEffect R, ArgEffect defaultEff,
+ ArgEffect ReceiverEff)
+ : Args(A), DefaultArgEffect(defaultEff), Receiver(ReceiverEff), Ret(R) {}
+
+ /// getArg - Return the argument effect on the argument specified by
+ /// idx (starting from 0).
+ ArgEffect getArg(unsigned idx) const {
+ if (const ArgEffect *AE = Args.lookup(idx))
+ return *AE;
+
+ return DefaultArgEffect;
+ }
+
+ void addArg(ArgEffects::Factory &af, unsigned idx, ArgEffect e) {
+ Args = af.add(Args, idx, e);
+ }
+
+ /// setDefaultArgEffect - Set the default argument effect.
+ void setDefaultArgEffect(ArgEffect E) {
+ DefaultArgEffect = E;
+ }
+
+ /// getRetEffect - Returns the effect on the return value of the call.
+ RetEffect getRetEffect() const { return Ret; }
+
+ /// setRetEffect - Set the effect of the return value of the call.
+ void setRetEffect(RetEffect E) { Ret = E; }
+
+
+ /// Sets the effect on the receiver of the message.
+ void setReceiverEffect(ArgEffect e) { Receiver = e; }
+
+ /// getReceiverEffect - Returns the effect on the receiver of the call.
+ /// This is only meaningful if the summary applies to an ObjCMessageExpr*.
+ ArgEffect getReceiverEffect() const { return Receiver; }
+
+ /// Test if two retain summaries are identical. Note that merely equivalent
+ /// summaries are not necessarily identical (for example, if an explicit
+ /// argument effect matches the default effect).
+ bool operator==(const RetainSummary &Other) const {
+ return Args == Other.Args && DefaultArgEffect == Other.DefaultArgEffect &&
+ Receiver == Other.Receiver && Ret == Other.Ret;
+ }
+
+ /// Profile this summary for inclusion in a FoldingSet.
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.Add(Args);
+ ID.Add(DefaultArgEffect);
+ ID.Add(Receiver);
+ ID.Add(Ret);
+ }
+
+ /// A retain summary is simple if it has no ArgEffects other than the default.
+ bool isSimple() const {
+ return Args.isEmpty();
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for constructing summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class ObjCSummaryKey {
+ IdentifierInfo* II;
+ Selector S;
+public:
+ ObjCSummaryKey(IdentifierInfo* ii, Selector s)
+ : II(ii), S(s) {}
+
+ ObjCSummaryKey(const ObjCInterfaceDecl *d, Selector s)
+ : II(d ? d->getIdentifier() : 0), S(s) {}
+
+ ObjCSummaryKey(const ObjCInterfaceDecl *d, IdentifierInfo *ii, Selector s)
+ : II(d ? d->getIdentifier() : ii), S(s) {}
+
+ ObjCSummaryKey(Selector s)
+ : II(0), S(s) {}
+
+ IdentifierInfo *getIdentifier() const { return II; }
+ Selector getSelector() const { return S; }
+};
+}
+
+namespace llvm {
+template <> struct DenseMapInfo<ObjCSummaryKey> {
+ static inline ObjCSummaryKey getEmptyKey() {
+ return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getEmptyKey(),
+ DenseMapInfo<Selector>::getEmptyKey());
+ }
+
+ static inline ObjCSummaryKey getTombstoneKey() {
+ return ObjCSummaryKey(DenseMapInfo<IdentifierInfo*>::getTombstoneKey(),
+ DenseMapInfo<Selector>::getTombstoneKey());
+ }
+
+ static unsigned getHashValue(const ObjCSummaryKey &V) {
+ return (DenseMapInfo<IdentifierInfo*>::getHashValue(V.getIdentifier())
+ & 0x88888888)
+ | (DenseMapInfo<Selector>::getHashValue(V.getSelector())
+ & 0x55555555);
+ }
+
+ static bool isEqual(const ObjCSummaryKey& LHS, const ObjCSummaryKey& RHS) {
+ return DenseMapInfo<IdentifierInfo*>::isEqual(LHS.getIdentifier(),
+ RHS.getIdentifier()) &&
+ DenseMapInfo<Selector>::isEqual(LHS.getSelector(),
+ RHS.getSelector());
+ }
+
+};
+template <>
+struct isPodLike<ObjCSummaryKey> { static const bool value = true; };
+} // end llvm namespace
+
+namespace {
+class ObjCSummaryCache {
+ typedef llvm::DenseMap<ObjCSummaryKey, const RetainSummary *> MapTy;
+ MapTy M;
+public:
+ ObjCSummaryCache() {}
+
+ const RetainSummary * find(const ObjCInterfaceDecl *D, IdentifierInfo *ClsName,
+ Selector S) {
+ // Lookup the method using the decl for the class @interface. If we
+ // have no decl, lookup using the class name.
+ return D ? find(D, S) : find(ClsName, S);
+ }
+
+ const RetainSummary * find(const ObjCInterfaceDecl *D, Selector S) {
+ // Do a lookup with the (D,S) pair. If we find a match return
+ // the iterator.
+ ObjCSummaryKey K(D, S);
+ MapTy::iterator I = M.find(K);
+
+ if (I != M.end() || !D)
+ return I->second;
+
+ // Walk the super chain. If we find a hit with a parent, we'll end
+ // up returning that summary. We actually allow that key (null,S), as
+ // we cache summaries for the null ObjCInterfaceDecl* to allow us to
+ // generate initial summaries without having to worry about NSObject
+ // being declared.
+ // FIXME: We may change this at some point.
+ for (ObjCInterfaceDecl *C=D->getSuperClass() ;; C=C->getSuperClass()) {
+ if ((I = M.find(ObjCSummaryKey(C, S))) != M.end())
+ break;
+
+ if (!C)
+ return NULL;
+ }
+
+ // Cache the summary with original key to make the next lookup faster
+ // and return the iterator.
+ const RetainSummary *Summ = I->second;
+ M[K] = Summ;
+ return Summ;
+ }
+
+ const RetainSummary *find(IdentifierInfo* II, Selector S) {
+ // FIXME: Class method lookup. Right now we dont' have a good way
+ // of going between IdentifierInfo* and the class hierarchy.
+ MapTy::iterator I = M.find(ObjCSummaryKey(II, S));
+
+ if (I == M.end())
+ I = M.find(ObjCSummaryKey(S));
+
+ return I == M.end() ? NULL : I->second;
+ }
+
+ const RetainSummary *& operator[](ObjCSummaryKey K) {
+ return M[K];
+ }
+
+ const RetainSummary *& operator[](Selector S) {
+ return M[ ObjCSummaryKey(S) ];
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Data structures for managing collections of summaries.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainSummaryManager {
+
+ //==-----------------------------------------------------------------==//
+ // Typedefs.
+ //==-----------------------------------------------------------------==//
+
+ typedef llvm::DenseMap<const FunctionDecl*, const RetainSummary *>
+ FuncSummariesTy;
+
+ typedef ObjCSummaryCache ObjCMethodSummariesTy;
+
+ typedef llvm::FoldingSetNodeWrapper<RetainSummary> CachedSummaryNode;
+
+ //==-----------------------------------------------------------------==//
+ // Data.
+ //==-----------------------------------------------------------------==//
+
+ /// Ctx - The ASTContext object for the analyzed ASTs.
+ ASTContext &Ctx;
+
+ /// GCEnabled - Records whether or not the analyzed code runs in GC mode.
+ const bool GCEnabled;
+
+ /// Records whether or not the analyzed code runs in ARC mode.
+ const bool ARCEnabled;
+
+ /// FuncSummaries - A map from FunctionDecls to summaries.
+ FuncSummariesTy FuncSummaries;
+
+ /// ObjCClassMethodSummaries - A map from selectors (for instance methods)
+ /// to summaries.
+ ObjCMethodSummariesTy ObjCClassMethodSummaries;
+
+ /// ObjCMethodSummaries - A map from selectors to summaries.
+ ObjCMethodSummariesTy ObjCMethodSummaries;
+
+ /// BPAlloc - A BumpPtrAllocator used for allocating summaries, ArgEffects,
+ /// and all other data used by the checker.
+ llvm::BumpPtrAllocator BPAlloc;
+
+ /// AF - A factory for ArgEffects objects.
+ ArgEffects::Factory AF;
+
+ /// ScratchArgs - A holding buffer for construct ArgEffects.
+ ArgEffects ScratchArgs;
+
+ /// ObjCAllocRetE - Default return effect for methods returning Objective-C
+ /// objects.
+ RetEffect ObjCAllocRetE;
+
+ /// ObjCInitRetE - Default return effect for init methods returning
+ /// Objective-C objects.
+ RetEffect ObjCInitRetE;
+
+ /// SimpleSummaries - Used for uniquing summaries that don't have special
+ /// effects.
+ llvm::FoldingSet<CachedSummaryNode> SimpleSummaries;
+
+ //==-----------------------------------------------------------------==//
+ // Methods.
+ //==-----------------------------------------------------------------==//
+
+ /// getArgEffects - Returns a persistent ArgEffects object based on the
+ /// data in ScratchArgs.
+ ArgEffects getArgEffects();
+
+ enum UnaryFuncKind { cfretain, cfrelease, cfmakecollectable };
+
+public:
+ RetEffect getObjAllocRetEffect() const { return ObjCAllocRetE; }
+
+ const RetainSummary *getUnarySummary(const FunctionType* FT,
+ UnaryFuncKind func);
+
+ const RetainSummary *getCFSummaryCreateRule(const FunctionDecl *FD);
+ const RetainSummary *getCFSummaryGetRule(const FunctionDecl *FD);
+ const RetainSummary *getCFCreateGetRuleSummary(const FunctionDecl *FD);
+
+ const RetainSummary *getPersistentSummary(const RetainSummary &OldSumm);
+
+ const RetainSummary *getPersistentSummary(RetEffect RetEff,
+ ArgEffect ReceiverEff = DoNothing,
+ ArgEffect DefaultEff = MayEscape) {
+ RetainSummary Summ(getArgEffects(), RetEff, DefaultEff, ReceiverEff);
+ return getPersistentSummary(Summ);
+ }
+
+ const RetainSummary *getDefaultSummary() {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, MayEscape);
+ }
+
+ const RetainSummary *getPersistentStopSummary() {
+ return getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking, StopTracking);
+ }
+
+ void InitializeClassMethodSummaries();
+ void InitializeMethodSummaries();
+private:
+ void addNSObjectClsMethSummary(Selector S, const RetainSummary *Summ) {
+ ObjCClassMethodSummaries[S] = Summ;
+ }
+
+ void addNSObjectMethSummary(Selector S, const RetainSummary *Summ) {
+ ObjCMethodSummaries[S] = Summ;
+ }
+
+ void addClassMethSummary(const char* Cls, const char* name,
+ const RetainSummary *Summ, bool isNullary = true) {
+ IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+ Selector S = isNullary ? GetNullarySelector(name, Ctx)
+ : GetUnarySelector(name, Ctx);
+ ObjCClassMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addInstMethSummary(const char* Cls, const char* nullaryName,
+ const RetainSummary *Summ) {
+ IdentifierInfo* ClsII = &Ctx.Idents.get(Cls);
+ Selector S = GetNullarySelector(nullaryName, Ctx);
+ ObjCMethodSummaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ Selector generateSelector(va_list argp) {
+ SmallVector<IdentifierInfo*, 10> II;
+
+ while (const char* s = va_arg(argp, const char*))
+ II.push_back(&Ctx.Idents.get(s));
+
+ return Ctx.Selectors.getSelector(II.size(), &II[0]);
+ }
+
+ void addMethodSummary(IdentifierInfo *ClsII, ObjCMethodSummariesTy& Summaries,
+ const RetainSummary * Summ, va_list argp) {
+ Selector S = generateSelector(argp);
+ Summaries[ObjCSummaryKey(ClsII, S)] = Summ;
+ }
+
+ void addInstMethSummary(const char* Cls, const RetainSummary * Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(&Ctx.Idents.get(Cls), ObjCMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addClsMethSummary(const char* Cls, const RetainSummary * Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(&Ctx.Idents.get(Cls),ObjCClassMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+ void addClsMethSummary(IdentifierInfo *II, const RetainSummary * Summ, ...) {
+ va_list argp;
+ va_start(argp, Summ);
+ addMethodSummary(II, ObjCClassMethodSummaries, Summ, argp);
+ va_end(argp);
+ }
+
+public:
+
+ RetainSummaryManager(ASTContext &ctx, bool gcenabled, bool usesARC)
+ : Ctx(ctx),
+ GCEnabled(gcenabled),
+ ARCEnabled(usesARC),
+ AF(BPAlloc), ScratchArgs(AF.getEmptyMap()),
+ ObjCAllocRetE(gcenabled
+ ? RetEffect::MakeGCNotOwned()
+ : (usesARC ? RetEffect::MakeARCNotOwned()
+ : RetEffect::MakeOwned(RetEffect::ObjC, true))),
+ ObjCInitRetE(gcenabled
+ ? RetEffect::MakeGCNotOwned()
+ : (usesARC ? RetEffect::MakeARCNotOwned()
+ : RetEffect::MakeOwnedWhenTrackedReceiver())) {
+ InitializeClassMethodSummaries();
+ InitializeMethodSummaries();
+ }
+
+ const RetainSummary *getSummary(const FunctionDecl *FD);
+
+ const RetainSummary *getMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD,
+ QualType RetTy,
+ ObjCMethodSummariesTy &CachedSummaries);
+
+ const RetainSummary *getInstanceMethodSummary(const ObjCMessage &msg,
+ ProgramStateRef state,
+ const LocationContext *LC);
+
+ const RetainSummary *getInstanceMethodSummary(const ObjCMessage &msg,
+ const ObjCInterfaceDecl *ID) {
+ return getMethodSummary(msg.getSelector(), 0, ID, msg.getMethodDecl(),
+ msg.getType(Ctx), ObjCMethodSummaries);
+ }
+
+ const RetainSummary *getClassMethodSummary(const ObjCMessage &msg) {
+ const ObjCInterfaceDecl *Class = 0;
+ if (!msg.isInstanceMessage())
+ Class = msg.getReceiverInterface();
+
+ return getMethodSummary(msg.getSelector(), Class->getIdentifier(),
+ Class, msg.getMethodDecl(), msg.getType(Ctx),
+ ObjCClassMethodSummaries);
+ }
+
+ /// getMethodSummary - This version of getMethodSummary is used to query
+ /// the summary for the current method being analyzed.
+ const RetainSummary *getMethodSummary(const ObjCMethodDecl *MD) {
+ // FIXME: Eventually this should be unneeded.
+ const ObjCInterfaceDecl *ID = MD->getClassInterface();
+ Selector S = MD->getSelector();
+ IdentifierInfo *ClsName = ID->getIdentifier();
+ QualType ResultTy = MD->getResultType();
+
+ ObjCMethodSummariesTy *CachedSummaries;
+ if (MD->isInstanceMethod())
+ CachedSummaries = &ObjCMethodSummaries;
+ else
+ CachedSummaries = &ObjCClassMethodSummaries;
+
+ return getMethodSummary(S, ClsName, ID, MD, ResultTy, *CachedSummaries);
+ }
+
+ const RetainSummary *getStandardMethodSummary(const ObjCMethodDecl *MD,
+ Selector S, QualType RetTy);
+
+ void updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const ObjCMethodDecl *MD);
+
+ void updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const FunctionDecl *FD);
+
+ bool isGCEnabled() const { return GCEnabled; }
+
+ bool isARCEnabled() const { return ARCEnabled; }
+
+ bool isARCorGCEnabled() const { return GCEnabled || ARCEnabled; }
+};
+
+// Used to avoid allocating long-term (BPAlloc'd) memory for default retain
+// summaries. If a function or method looks like it has a default summary, but
+// it has annotations, the annotations are added to the stack-based template
+// and then copied into managed memory.
+class RetainSummaryTemplate {
+ RetainSummaryManager &Manager;
+ const RetainSummary *&RealSummary;
+ RetainSummary ScratchSummary;
+ bool Accessed;
+public:
+ RetainSummaryTemplate(const RetainSummary *&real, const RetainSummary &base,
+ RetainSummaryManager &mgr)
+ : Manager(mgr), RealSummary(real), ScratchSummary(real ? *real : base),
+ Accessed(false) {}
+
+ ~RetainSummaryTemplate() {
+ if (Accessed)
+ RealSummary = Manager.getPersistentSummary(ScratchSummary);
+ }
+
+ RetainSummary &operator*() {
+ Accessed = true;
+ return ScratchSummary;
+ }
+
+ RetainSummary *operator->() {
+ Accessed = true;
+ return &ScratchSummary;
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Implementation of checker data structures.
+//===----------------------------------------------------------------------===//
+
+ArgEffects RetainSummaryManager::getArgEffects() {
+ ArgEffects AE = ScratchArgs;
+ ScratchArgs = AF.getEmptyMap();
+ return AE;
+}
+
+const RetainSummary *
+RetainSummaryManager::getPersistentSummary(const RetainSummary &OldSumm) {
+ // Unique "simple" summaries -- those without ArgEffects.
+ if (OldSumm.isSimple()) {
+ llvm::FoldingSetNodeID ID;
+ OldSumm.Profile(ID);
+
+ void *Pos;
+ CachedSummaryNode *N = SimpleSummaries.FindNodeOrInsertPos(ID, Pos);
+
+ if (!N) {
+ N = (CachedSummaryNode *) BPAlloc.Allocate<CachedSummaryNode>();
+ new (N) CachedSummaryNode(OldSumm);
+ SimpleSummaries.InsertNode(N, Pos);
+ }
+
+ return &N->getValue();
+ }
+
+ RetainSummary *Summ = (RetainSummary *) BPAlloc.Allocate<RetainSummary>();
+ new (Summ) RetainSummary(OldSumm);
+ return Summ;
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for functions (largely uses of Core Foundation).
+//===----------------------------------------------------------------------===//
+
+static bool isRetain(const FunctionDecl *FD, StringRef FName) {
+ return FName.endswith("Retain");
+}
+
+static bool isRelease(const FunctionDecl *FD, StringRef FName) {
+ return FName.endswith("Release");
+}
+
+static bool isMakeCollectable(const FunctionDecl *FD, StringRef FName) {
+ // FIXME: Remove FunctionDecl parameter.
+ // FIXME: Is it really okay if MakeCollectable isn't a suffix?
+ return FName.find("MakeCollectable") != StringRef::npos;
+}
+
+const RetainSummary * RetainSummaryManager::getSummary(const FunctionDecl *FD) {
+ // Look up a summary in our cache of FunctionDecls -> Summaries.
+ FuncSummariesTy::iterator I = FuncSummaries.find(FD);
+ if (I != FuncSummaries.end())
+ return I->second;
+
+ // No summary? Generate one.
+ const RetainSummary *S = 0;
+
+ do {
+ // We generate "stop" summaries for implicitly defined functions.
+ if (FD->isImplicit()) {
+ S = getPersistentStopSummary();
+ break;
+ }
+ // For C++ methods, generate an implicit "stop" summary as well. We
+ // can relax this once we have a clear policy for C++ methods and
+ // ownership attributes.
+ if (isa<CXXMethodDecl>(FD)) {
+ S = getPersistentStopSummary();
+ break;
+ }
+
+ // [PR 3337] Use 'getAs<FunctionType>' to strip away any typedefs on the
+ // function's type.
+ const FunctionType* FT = FD->getType()->getAs<FunctionType>();
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ break;
+
+ StringRef FName = II->getName();
+
+ // Strip away preceding '_'. Doing this here will effect all the checks
+ // down below.
+ FName = FName.substr(FName.find_first_not_of('_'));
+
+ // Inspect the result type.
+ QualType RetTy = FT->getResultType();
+
+ // FIXME: This should all be refactored into a chain of "summary lookup"
+ // filters.
+ assert(ScratchArgs.isEmpty());
+
+ if (FName == "pthread_create") {
+ // Part of: <rdar://problem/7299394>. This will be addressed
+ // better with IPA.
+ S = getPersistentStopSummary();
+ } else if (FName == "NSMakeCollectable") {
+ // Handle: id NSMakeCollectable(CFTypeRef)
+ S = (RetTy->isObjCIdType())
+ ? getUnarySummary(FT, cfmakecollectable)
+ : getPersistentStopSummary();
+ } else if (FName == "IOBSDNameMatching" ||
+ FName == "IOServiceMatching" ||
+ FName == "IOServiceNameMatching" ||
+ FName == "IORegistryEntryIDMatching" ||
+ FName == "IOOpenFirmwarePathMatching") {
+ // Part of <rdar://problem/6961230>. (IOKit)
+ // This should be addressed using a API table.
+ S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+ DoNothing, DoNothing);
+ } else if (FName == "IOServiceGetMatchingService" ||
+ FName == "IOServiceGetMatchingServices") {
+ // FIXES: <rdar://problem/6326900>
+ // This should be addressed using a API table. This strcmp is also
+ // a little gross, but there is no need to super optimize here.
+ ScratchArgs = AF.add(ScratchArgs, 1, DecRef);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "IOServiceAddNotification" ||
+ FName == "IOServiceAddMatchingNotification") {
+ // Part of <rdar://problem/6961230>. (IOKit)
+ // This should be addressed using a API table.
+ ScratchArgs = AF.add(ScratchArgs, 2, DecRef);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "CVPixelBufferCreateWithBytes") {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithBytes is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ // FIXME: This function has an out parameter that returns an
+ // allocated object.
+ ScratchArgs = AF.add(ScratchArgs, 7, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "CGBitmapContextCreateWithData") {
+ // FIXES: <rdar://problem/7358899>
+ // Eventually this can be improved by recognizing that 'releaseInfo'
+ // passed to CGBitmapContextCreateWithData is released via
+ // a callback and doing full IPA to make sure this is done correctly.
+ ScratchArgs = AF.add(ScratchArgs, 8, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true),
+ DoNothing, DoNothing);
+ } else if (FName == "CVPixelBufferCreateWithPlanarBytes") {
+ // FIXES: <rdar://problem/7283567>
+ // Eventually this can be improved by recognizing that the pixel
+ // buffer passed to CVPixelBufferCreateWithPlanarBytes is released
+ // via a callback and doing full IPA to make sure this is done
+ // correctly.
+ ScratchArgs = AF.add(ScratchArgs, 12, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName == "dispatch_set_context") {
+ // <rdar://problem/11059275> - The analyzer currently doesn't have
+ // a good way to reason about the finalizer function for libdispatch.
+ // If we pass a context object that is memory managed, stop tracking it.
+ // FIXME: this hack should possibly go away once we can handle
+ // libdispatch finalizers.
+ ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ } else if (FName.startswith("NS") &&
+ (FName.find("Insert") != StringRef::npos)) {
+ // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove. (radar://11152419)
+ ScratchArgs = AF.add(ScratchArgs, 1, StopTracking);
+ ScratchArgs = AF.add(ScratchArgs, 2, StopTracking);
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+ }
+
+ // Did we get a summary?
+ if (S)
+ break;
+
+ // Enable this code once the semantics of NSDeallocateObject are resolved
+ // for GC. <rdar://problem/6619988>
+#if 0
+ // Handle: NSDeallocateObject(id anObject);
+ // This method does allow 'nil' (although we don't check it now).
+ if (strcmp(FName, "NSDeallocateObject") == 0) {
+ return RetTy == Ctx.VoidTy
+ ? getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, Dealloc)
+ : getPersistentStopSummary();
+ }
+#endif
+
+ if (RetTy->isPointerType()) {
+ // For CoreFoundation ('CF') types.
+ if (cocoa::isRefType(RetTy, "CF", FName)) {
+ if (isRetain(FD, FName))
+ S = getUnarySummary(FT, cfretain);
+ else if (isMakeCollectable(FD, FName))
+ S = getUnarySummary(FT, cfmakecollectable);
+ else
+ S = getCFCreateGetRuleSummary(FD);
+
+ break;
+ }
+
+ // For CoreGraphics ('CG') types.
+ if (cocoa::isRefType(RetTy, "CG", FName)) {
+ if (isRetain(FD, FName))
+ S = getUnarySummary(FT, cfretain);
+ else
+ S = getCFCreateGetRuleSummary(FD);
+
+ break;
+ }
+
+ // For the Disk Arbitration API (DiskArbitration/DADisk.h)
+ if (cocoa::isRefType(RetTy, "DADisk") ||
+ cocoa::isRefType(RetTy, "DADissenter") ||
+ cocoa::isRefType(RetTy, "DASessionRef")) {
+ S = getCFCreateGetRuleSummary(FD);
+ break;
+ }
+
+ break;
+ }
+
+ // Check for release functions, the only kind of functions that we care
+ // about that don't return a pointer type.
+ if (FName[0] == 'C' && (FName[1] == 'F' || FName[1] == 'G')) {
+ // Test for 'CGCF'.
+ FName = FName.substr(FName.startswith("CGCF") ? 4 : 2);
+
+ if (isRelease(FD, FName))
+ S = getUnarySummary(FT, cfrelease);
+ else {
+ assert (ScratchArgs.isEmpty());
+ // Remaining CoreFoundation and CoreGraphics functions.
+ // We use to assume that they all strictly followed the ownership idiom
+ // and that ownership cannot be transferred. While this is technically
+ // correct, many methods allow a tracked object to escape. For example:
+ //
+ // CFMutableDictionaryRef x = CFDictionaryCreateMutable(...);
+ // CFDictionaryAddValue(y, key, x);
+ // CFRelease(x);
+ // ... it is okay to use 'x' since 'y' has a reference to it
+ //
+ // We handle this and similar cases with the follow heuristic. If the
+ // function name contains "InsertValue", "SetValue", "AddValue",
+ // "AppendValue", or "SetAttribute", then we assume that arguments may
+ // "escape." This means that something else holds on to the object,
+ // allowing it be used even after its local retain count drops to 0.
+ ArgEffect E = (StrInStrNoCase(FName, "InsertValue") != StringRef::npos||
+ StrInStrNoCase(FName, "AddValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "SetValue") != StringRef::npos ||
+ StrInStrNoCase(FName, "AppendValue") != StringRef::npos||
+ StrInStrNoCase(FName, "SetAttribute") != StringRef::npos)
+ ? MayEscape : DoNothing;
+
+ S = getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, E);
+ }
+ }
+ }
+ while (0);
+
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(S, FD);
+
+ FuncSummaries[FD] = S;
+ return S;
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFCreateGetRuleSummary(const FunctionDecl *FD) {
+ if (coreFoundation::followsCreateRule(FD))
+ return getCFSummaryCreateRule(FD);
+
+ return getCFSummaryGetRule(FD);
+}
+
+const RetainSummary *
+RetainSummaryManager::getUnarySummary(const FunctionType* FT,
+ UnaryFuncKind func) {
+
+ // Sanity check that this is *really* a unary function. This can
+ // happen if people do weird things.
+ const FunctionProtoType* FTP = dyn_cast<FunctionProtoType>(FT);
+ if (!FTP || FTP->getNumArgs() != 1)
+ return getPersistentStopSummary();
+
+ assert (ScratchArgs.isEmpty());
+
+ ArgEffect Effect;
+ switch (func) {
+ case cfretain: Effect = IncRef; break;
+ case cfrelease: Effect = DecRef; break;
+ case cfmakecollectable: Effect = MakeCollectable; break;
+ }
+
+ ScratchArgs = AF.add(ScratchArgs, 0, Effect);
+ return getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, DoNothing);
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFSummaryCreateRule(const FunctionDecl *FD) {
+ assert (ScratchArgs.isEmpty());
+
+ return getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+}
+
+const RetainSummary *
+RetainSummaryManager::getCFSummaryGetRule(const FunctionDecl *FD) {
+ assert (ScratchArgs.isEmpty());
+ return getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::CF),
+ DoNothing, DoNothing);
+}
+
+//===----------------------------------------------------------------------===//
+// Summary creation for Selectors.
+//===----------------------------------------------------------------------===//
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const FunctionDecl *FD) {
+ if (!FD)
+ return;
+
+ RetainSummaryTemplate Template(Summ, *getDefaultSummary(), *this);
+
+ // Effects on the parameters.
+ unsigned parm_idx = 0;
+ for (FunctionDecl::param_const_iterator pi = FD->param_begin(),
+ pe = FD->param_end(); pi != pe; ++pi, ++parm_idx) {
+ const ParmVarDecl *pd = *pi;
+ if (pd->getAttr<NSConsumedAttr>()) {
+ if (!GCEnabled) {
+ Template->addArg(AF, parm_idx, DecRef);
+ }
+ } else if (pd->getAttr<CFConsumedAttr>()) {
+ Template->addArg(AF, parm_idx, DecRef);
+ }
+ }
+
+ QualType RetTy = FD->getResultType();
+
+ // Determine if there is a special return effect for this method.
+ if (cocoa::isCocoaObjectRef(RetTy)) {
+ if (FD->getAttr<NSReturnsRetainedAttr>()) {
+ Template->setRetEffect(ObjCAllocRetE);
+ }
+ else if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ Template->setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ }
+ else if (FD->getAttr<NSReturnsNotRetainedAttr>()) {
+ Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC));
+ }
+ else if (FD->getAttr<CFReturnsNotRetainedAttr>()) {
+ Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
+ }
+ } else if (RetTy->getAs<PointerType>()) {
+ if (FD->getAttr<CFReturnsRetainedAttr>()) {
+ Template->setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ }
+ else if (FD->getAttr<CFReturnsNotRetainedAttr>()) {
+ Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
+ }
+ }
+}
+
+void
+RetainSummaryManager::updateSummaryFromAnnotations(const RetainSummary *&Summ,
+ const ObjCMethodDecl *MD) {
+ if (!MD)
+ return;
+
+ RetainSummaryTemplate Template(Summ, *getDefaultSummary(), *this);
+ bool isTrackedLoc = false;
+
+ // Effects on the receiver.
+ if (MD->getAttr<NSConsumesSelfAttr>()) {
+ if (!GCEnabled)
+ Template->setReceiverEffect(DecRefMsg);
+ }
+
+ // Effects on the parameters.
+ unsigned parm_idx = 0;
+ for (ObjCMethodDecl::param_const_iterator
+ pi=MD->param_begin(), pe=MD->param_end();
+ pi != pe; ++pi, ++parm_idx) {
+ const ParmVarDecl *pd = *pi;
+ if (pd->getAttr<NSConsumedAttr>()) {
+ if (!GCEnabled)
+ Template->addArg(AF, parm_idx, DecRef);
+ }
+ else if(pd->getAttr<CFConsumedAttr>()) {
+ Template->addArg(AF, parm_idx, DecRef);
+ }
+ }
+
+ // Determine if there is a special return effect for this method.
+ if (cocoa::isCocoaObjectRef(MD->getResultType())) {
+ if (MD->getAttr<NSReturnsRetainedAttr>()) {
+ Template->setRetEffect(ObjCAllocRetE);
+ return;
+ }
+ if (MD->getAttr<NSReturnsNotRetainedAttr>()) {
+ Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC));
+ return;
+ }
+
+ isTrackedLoc = true;
+ } else {
+ isTrackedLoc = MD->getResultType()->getAs<PointerType>() != NULL;
+ }
+
+ if (isTrackedLoc) {
+ if (MD->getAttr<CFReturnsRetainedAttr>())
+ Template->setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ else if (MD->getAttr<CFReturnsNotRetainedAttr>())
+ Template->setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
+ }
+}
+
+const RetainSummary *
+RetainSummaryManager::getStandardMethodSummary(const ObjCMethodDecl *MD,
+ Selector S, QualType RetTy) {
+
+ if (MD) {
+ // Scan the method decl for 'void*' arguments. These should be treated
+ // as 'StopTracking' because they are often used with delegates.
+ // Delegates are a frequent form of false positives with the retain
+ // count checker.
+ unsigned i = 0;
+ for (ObjCMethodDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I, ++i)
+ if (const ParmVarDecl *PD = *I) {
+ QualType Ty = Ctx.getCanonicalType(PD->getType());
+ if (Ty.getLocalUnqualifiedType() == Ctx.VoidPtrTy)
+ ScratchArgs = AF.add(ScratchArgs, i, StopTracking);
+ }
+ }
+
+ // Any special effects?
+ ArgEffect ReceiverEff = DoNothing;
+ RetEffect ResultEff = RetEffect::MakeNoRet();
+
+ // Check the method family, and apply any default annotations.
+ switch (MD ? MD->getMethodFamily() : S.getMethodFamily()) {
+ case OMF_None:
+ case OMF_performSelector:
+ // Assume all Objective-C methods follow Cocoa Memory Management rules.
+ // FIXME: Does the non-threaded performSelector family really belong here?
+ // The selector could be, say, @selector(copy).
+ if (cocoa::isCocoaObjectRef(RetTy))
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::ObjC);
+ else if (coreFoundation::isCFObjectRef(RetTy)) {
+ // ObjCMethodDecl currently doesn't consider CF objects as valid return
+ // values for alloc, new, copy, or mutableCopy, so we have to
+ // double-check with the selector. This is ugly, but there aren't that
+ // many Objective-C methods that return CF objects, right?
+ if (MD) {
+ switch (S.getMethodFamily()) {
+ case OMF_alloc:
+ case OMF_new:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+ break;
+ default:
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
+ break;
+ }
+ } else {
+ ResultEff = RetEffect::MakeNotOwned(RetEffect::CF);
+ }
+ }
+ break;
+ case OMF_init:
+ ResultEff = ObjCInitRetE;
+ ReceiverEff = DecRefMsg;
+ break;
+ case OMF_alloc:
+ case OMF_new:
+ case OMF_copy:
+ case OMF_mutableCopy:
+ if (cocoa::isCocoaObjectRef(RetTy))
+ ResultEff = ObjCAllocRetE;
+ else if (coreFoundation::isCFObjectRef(RetTy))
+ ResultEff = RetEffect::MakeOwned(RetEffect::CF, true);
+ break;
+ case OMF_autorelease:
+ ReceiverEff = Autorelease;
+ break;
+ case OMF_retain:
+ ReceiverEff = IncRefMsg;
+ break;
+ case OMF_release:
+ ReceiverEff = DecRefMsg;
+ break;
+ case OMF_dealloc:
+ ReceiverEff = Dealloc;
+ break;
+ case OMF_self:
+ // -self is handled specially by the ExprEngine to propagate the receiver.
+ break;
+ case OMF_retainCount:
+ case OMF_finalize:
+ // These methods don't return objects.
+ break;
+ }
+
+ // If one of the arguments in the selector has the keyword 'delegate' we
+ // should stop tracking the reference count for the receiver. This is
+ // because the reference count is quite possibly handled by a delegate
+ // method.
+ if (S.isKeywordSelector()) {
+ const std::string &str = S.getAsString();
+ assert(!str.empty());
+ if (StrInStrNoCase(str, "delegate:") != StringRef::npos)
+ ReceiverEff = StopTracking;
+ }
+
+ if (ScratchArgs.isEmpty() && ReceiverEff == DoNothing &&
+ ResultEff.getKind() == RetEffect::NoRet)
+ return getDefaultSummary();
+
+ return getPersistentSummary(ResultEff, ReceiverEff, MayEscape);
+}
+
+const RetainSummary *
+RetainSummaryManager::getInstanceMethodSummary(const ObjCMessage &msg,
+ ProgramStateRef state,
+ const LocationContext *LC) {
+
+ // We need the type-information of the tracked receiver object
+ // Retrieve it from the state.
+ const Expr *Receiver = msg.getInstanceReceiver();
+ const ObjCInterfaceDecl *ID = 0;
+
+ // FIXME: Is this really working as expected? There are cases where
+ // we just use the 'ID' from the message expression.
+ SVal receiverV;
+
+ if (Receiver) {
+ receiverV = state->getSValAsScalarOrLoc(Receiver, LC);
+
+ // FIXME: Eventually replace the use of state->get<RefBindings> with
+ // a generic API for reasoning about the Objective-C types of symbolic
+ // objects.
+ if (SymbolRef Sym = receiverV.getAsLocSymbol())
+ if (const RefVal *T = state->get<RefBindings>(Sym))
+ if (const ObjCObjectPointerType* PT =
+ T->getType()->getAs<ObjCObjectPointerType>())
+ ID = PT->getInterfaceDecl();
+
+ // FIXME: this is a hack. This may or may not be the actual method
+ // that is called.
+ if (!ID) {
+ if (const ObjCObjectPointerType *PT =
+ Receiver->getType()->getAs<ObjCObjectPointerType>())
+ ID = PT->getInterfaceDecl();
+ }
+ } else {
+ // FIXME: Hack for 'super'.
+ ID = msg.getReceiverInterface();
+ }
+
+ // FIXME: The receiver could be a reference to a class, meaning that
+ // we should use the class method.
+ return getInstanceMethodSummary(msg, ID);
+}
+
+const RetainSummary *
+RetainSummaryManager::getMethodSummary(Selector S, IdentifierInfo *ClsName,
+ const ObjCInterfaceDecl *ID,
+ const ObjCMethodDecl *MD, QualType RetTy,
+ ObjCMethodSummariesTy &CachedSummaries) {
+
+ // Look up a summary in our summary cache.
+ const RetainSummary *Summ = CachedSummaries.find(ID, ClsName, S);
+
+ if (!Summ) {
+ Summ = getStandardMethodSummary(MD, S, RetTy);
+
+ // Annotations override defaults.
+ updateSummaryFromAnnotations(Summ, MD);
+
+ // Memoize the summary.
+ CachedSummaries[ObjCSummaryKey(ID, ClsName, S)] = Summ;
+ }
+
+ return Summ;
+}
+
+void RetainSummaryManager::InitializeClassMethodSummaries() {
+ assert(ScratchArgs.isEmpty());
+ // Create the [NSAssertionHandler currentHander] summary.
+ addClassMethSummary("NSAssertionHandler", "currentHandler",
+ getPersistentSummary(RetEffect::MakeNotOwned(RetEffect::ObjC)));
+
+ // Create the [NSAutoreleasePool addObject:] summary.
+ ScratchArgs = AF.add(ScratchArgs, 0, Autorelease);
+ addClassMethSummary("NSAutoreleasePool", "addObject",
+ getPersistentSummary(RetEffect::MakeNoRet(),
+ DoNothing, Autorelease));
+
+ // Create the summaries for [NSObject performSelector...]. We treat
+ // these as 'stop tracking' for the arguments because they are often
+ // used for delegates that can release the object. When we have better
+ // inter-procedural analysis we can potentially do something better. This
+ // workaround is to remove false positives.
+ const RetainSummary *Summ =
+ getPersistentSummary(RetEffect::MakeNoRet(), DoNothing, StopTracking);
+ IdentifierInfo *NSObjectII = &Ctx.Idents.get("NSObject");
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject",
+ "afterDelay", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "withObject",
+ "afterDelay", "inModes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread",
+ "withObject", "waitUntilDone", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorOnMainThread",
+ "withObject", "waitUntilDone", "modes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread",
+ "withObject", "waitUntilDone", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelector", "onThread",
+ "withObject", "waitUntilDone", "modes", NULL);
+ addClsMethSummary(NSObjectII, Summ, "performSelectorInBackground",
+ "withObject", NULL);
+}
+
+void RetainSummaryManager::InitializeMethodSummaries() {
+
+ assert (ScratchArgs.isEmpty());
+
+ // Create the "init" selector. It just acts as a pass-through for the
+ // receiver.
+ const RetainSummary *InitSumm = getPersistentSummary(ObjCInitRetE, DecRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("init", Ctx), InitSumm);
+
+ // awakeAfterUsingCoder: behaves basically like an 'init' method. It
+ // claims the receiver and returns a retained object.
+ addNSObjectMethSummary(GetUnarySelector("awakeAfterUsingCoder", Ctx),
+ InitSumm);
+
+ // The next methods are allocators.
+ const RetainSummary *AllocSumm = getPersistentSummary(ObjCAllocRetE);
+ const RetainSummary *CFAllocSumm =
+ getPersistentSummary(RetEffect::MakeOwned(RetEffect::CF, true));
+
+ // Create the "retain" selector.
+ RetEffect NoRet = RetEffect::MakeNoRet();
+ const RetainSummary *Summ = getPersistentSummary(NoRet, IncRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("retain", Ctx), Summ);
+
+ // Create the "release" selector.
+ Summ = getPersistentSummary(NoRet, DecRefMsg);
+ addNSObjectMethSummary(GetNullarySelector("release", Ctx), Summ);
+
+ // Create the "drain" selector.
+ Summ = getPersistentSummary(NoRet, isGCEnabled() ? DoNothing : DecRef);
+ addNSObjectMethSummary(GetNullarySelector("drain", Ctx), Summ);
+
+ // Create the -dealloc summary.
+ Summ = getPersistentSummary(NoRet, Dealloc);
+ addNSObjectMethSummary(GetNullarySelector("dealloc", Ctx), Summ);
+
+ // Create the "autorelease" selector.
+ Summ = getPersistentSummary(NoRet, Autorelease);
+ addNSObjectMethSummary(GetNullarySelector("autorelease", Ctx), Summ);
+
+ // Specially handle NSAutoreleasePool.
+ addInstMethSummary("NSAutoreleasePool", "init",
+ getPersistentSummary(NoRet, NewAutoreleasePool));
+
+ // For NSWindow, allocated objects are (initially) self-owned.
+ // FIXME: For now we opt for false negatives with NSWindow, as these objects
+ // self-own themselves. However, they only do this once they are displayed.
+ // Thus, we need to track an NSWindow's display status.
+ // This is tracked in <rdar://problem/6062711>.
+ // See also http://llvm.org/bugs/show_bug.cgi?id=3714.
+ const RetainSummary *NoTrackYet = getPersistentSummary(RetEffect::MakeNoRet(),
+ StopTracking,
+ StopTracking);
+
+ addClassMethSummary("NSWindow", "alloc", NoTrackYet);
+
+#if 0
+ addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", NULL);
+
+ addInstMethSummary("NSWindow", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", "screen", NULL);
+#endif
+
+ // For NSPanel (which subclasses NSWindow), allocated objects are not
+ // self-owned.
+ // FIXME: For now we don't track NSPanels. object for the same reason
+ // as for NSWindow objects.
+ addClassMethSummary("NSPanel", "alloc", NoTrackYet);
+
+#if 0
+ addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", NULL);
+
+ addInstMethSummary("NSPanel", NoTrackYet, "initWithContentRect",
+ "styleMask", "backing", "defer", "screen", NULL);
+#endif
+
+ // Don't track allocated autorelease pools yet, as it is okay to prematurely
+ // exit a method.
+ addClassMethSummary("NSAutoreleasePool", "alloc", NoTrackYet);
+ addClassMethSummary("NSAutoreleasePool", "allocWithZone", NoTrackYet, false);
+
+ // Create summaries QCRenderer/QCView -createSnapShotImageOfType:
+ addInstMethSummary("QCRenderer", AllocSumm,
+ "createSnapshotImageOfType", NULL);
+ addInstMethSummary("QCView", AllocSumm,
+ "createSnapshotImageOfType", NULL);
+
+ // Create summaries for CIContext, 'createCGImage' and
+ // 'createCGLayerWithSize'. These objects are CF objects, and are not
+ // automatically garbage collected.
+ addInstMethSummary("CIContext", CFAllocSumm,
+ "createCGImage", "fromRect", NULL);
+ addInstMethSummary("CIContext", CFAllocSumm,
+ "createCGImage", "fromRect", "format", "colorSpace", NULL);
+ addInstMethSummary("CIContext", CFAllocSumm, "createCGLayerWithSize",
+ "info", NULL);
+}
+
+//===----------------------------------------------------------------------===//
+// AutoreleaseBindings - State used to track objects in autorelease pools.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<SymbolRef, unsigned> ARCounts;
+typedef llvm::ImmutableMap<SymbolRef, ARCounts> ARPoolContents;
+typedef llvm::ImmutableList<SymbolRef> ARStack;
+
+static int AutoRCIndex = 0;
+static int AutoRBIndex = 0;
+
+namespace { class AutoreleasePoolContents {}; }
+namespace { class AutoreleaseStack {}; }
+
+namespace clang {
+namespace ento {
+template<> struct ProgramStateTrait<AutoreleaseStack>
+ : public ProgramStatePartialTrait<ARStack> {
+ static inline void *GDMIndex() { return &AutoRBIndex; }
+};
+
+template<> struct ProgramStateTrait<AutoreleasePoolContents>
+ : public ProgramStatePartialTrait<ARPoolContents> {
+ static inline void *GDMIndex() { return &AutoRCIndex; }
+};
+} // end GR namespace
+} // end clang namespace
+
+static SymbolRef GetCurrentAutoreleasePool(ProgramStateRef state) {
+ ARStack stack = state->get<AutoreleaseStack>();
+ return stack.isEmpty() ? SymbolRef() : stack.getHead();
+}
+
+static ProgramStateRef
+SendAutorelease(ProgramStateRef state,
+ ARCounts::Factory &F,
+ SymbolRef sym) {
+ SymbolRef pool = GetCurrentAutoreleasePool(state);
+ const ARCounts *cnts = state->get<AutoreleasePoolContents>(pool);
+ ARCounts newCnts(0);
+
+ if (cnts) {
+ const unsigned *cnt = (*cnts).lookup(sym);
+ newCnts = F.add(*cnts, sym, cnt ? *cnt + 1 : 1);
+ }
+ else
+ newCnts = F.add(F.getEmptyMap(), sym, 1);
+
+ return state->set<AutoreleasePoolContents>(pool, newCnts);
+}
+
+//===----------------------------------------------------------------------===//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+namespace {
+ typedef llvm::DenseMap<const ExplodedNode *, const RetainSummary *>
+ SummaryLogTy;
+
+ //===-------------===//
+ // Bug Descriptions. //
+ //===-------------===//
+
+ class CFRefBug : public BugType {
+ protected:
+ CFRefBug(StringRef name)
+ : BugType(name, categories::MemoryCoreFoundationObjectiveC) {}
+ public:
+
+ // FIXME: Eventually remove.
+ virtual const char *getDescription() const = 0;
+
+ virtual bool isLeak() const { return false; }
+ };
+
+ class UseAfterRelease : public CFRefBug {
+ public:
+ UseAfterRelease() : CFRefBug("Use-after-release") {}
+
+ const char *getDescription() const {
+ return "Reference-counted object is used after it is released";
+ }
+ };
+
+ class BadRelease : public CFRefBug {
+ public:
+ BadRelease() : CFRefBug("Bad release") {}
+
+ const char *getDescription() const {
+ return "Incorrect decrement of the reference count of an object that is "
+ "not owned at this point by the caller";
+ }
+ };
+
+ class DeallocGC : public CFRefBug {
+ public:
+ DeallocGC()
+ : CFRefBug("-dealloc called while using garbage collection") {}
+
+ const char *getDescription() const {
+ return "-dealloc called while using garbage collection";
+ }
+ };
+
+ class DeallocNotOwned : public CFRefBug {
+ public:
+ DeallocNotOwned()
+ : CFRefBug("-dealloc sent to non-exclusively owned object") {}
+
+ const char *getDescription() const {
+ return "-dealloc sent to object that may be referenced elsewhere";
+ }
+ };
+
+ class OverAutorelease : public CFRefBug {
+ public:
+ OverAutorelease()
+ : CFRefBug("Object sent -autorelease too many times") {}
+
+ const char *getDescription() const {
+ return "Object sent -autorelease too many times";
+ }
+ };
+
+ class ReturnedNotOwnedForOwned : public CFRefBug {
+ public:
+ ReturnedNotOwnedForOwned()
+ : CFRefBug("Method should return an owned object") {}
+
+ const char *getDescription() const {
+ return "Object with a +0 retain count returned to caller where a +1 "
+ "(owning) retain count is expected";
+ }
+ };
+
+ class Leak : public CFRefBug {
+ const bool isReturn;
+ protected:
+ Leak(StringRef name, bool isRet)
+ : CFRefBug(name), isReturn(isRet) {
+ // Leaks should not be reported if they are post-dominated by a sink.
+ setSuppressOnSink(true);
+ }
+ public:
+
+ const char *getDescription() const { return ""; }
+
+ bool isLeak() const { return true; }
+ };
+
+ class LeakAtReturn : public Leak {
+ public:
+ LeakAtReturn(StringRef name)
+ : Leak(name, true) {}
+ };
+
+ class LeakWithinFunction : public Leak {
+ public:
+ LeakWithinFunction(StringRef name)
+ : Leak(name, false) {}
+ };
+
+ //===---------===//
+ // Bug Reports. //
+ //===---------===//
+
+ class CFRefReportVisitor : public BugReporterVisitorImpl<CFRefReportVisitor> {
+ protected:
+ SymbolRef Sym;
+ const SummaryLogTy &SummaryLog;
+ bool GCEnabled;
+
+ public:
+ CFRefReportVisitor(SymbolRef sym, bool gcEnabled, const SummaryLogTy &log)
+ : Sym(sym), SummaryLog(log), GCEnabled(gcEnabled) {}
+
+ virtual void Profile(llvm::FoldingSetNodeID &ID) const {
+ static int x = 0;
+ ID.AddPointer(&x);
+ ID.AddPointer(Sym);
+ }
+
+ virtual PathDiagnosticPiece *VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR);
+
+ virtual PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR);
+ };
+
+ class CFRefLeakReportVisitor : public CFRefReportVisitor {
+ public:
+ CFRefLeakReportVisitor(SymbolRef sym, bool GCEnabled,
+ const SummaryLogTy &log)
+ : CFRefReportVisitor(sym, GCEnabled, log) {}
+
+ PathDiagnosticPiece *getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *N,
+ BugReport &BR);
+
+ virtual BugReporterVisitor *clone() const {
+ // The curiously-recurring template pattern only works for one level of
+ // subclassing. Rather than make a new template base for
+ // CFRefReportVisitor, we simply override clone() to do the right thing.
+ // This could be trouble someday if BugReporterVisitorImpl is ever
+ // used for something else besides a convenient implementation of clone().
+ return new CFRefLeakReportVisitor(*this);
+ }
+ };
+
+ class CFRefReport : public BugReport {
+ void addGCModeDescription(const LangOptions &LOpts, bool GCEnabled);
+
+ public:
+ CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+ const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+ bool registerVisitor = true)
+ : BugReport(D, D.getDescription(), n) {
+ if (registerVisitor)
+ addVisitor(new CFRefReportVisitor(sym, GCEnabled, Log));
+ addGCModeDescription(LOpts, GCEnabled);
+ }
+
+ CFRefReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+ const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+ StringRef endText)
+ : BugReport(D, D.getDescription(), endText, n) {
+ addVisitor(new CFRefReportVisitor(sym, GCEnabled, Log));
+ addGCModeDescription(LOpts, GCEnabled);
+ }
+
+ virtual std::pair<ranges_iterator, ranges_iterator> getRanges() {
+ const CFRefBug& BugTy = static_cast<CFRefBug&>(getBugType());
+ if (!BugTy.isLeak())
+ return BugReport::getRanges();
+ else
+ return std::make_pair(ranges_iterator(), ranges_iterator());
+ }
+ };
+
+ class CFRefLeakReport : public CFRefReport {
+ const MemRegion* AllocBinding;
+
+ public:
+ CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts, bool GCEnabled,
+ const SummaryLogTy &Log, ExplodedNode *n, SymbolRef sym,
+ CheckerContext &Ctx);
+
+ PathDiagnosticLocation getLocation(const SourceManager &SM) const {
+ assert(Location.isValid());
+ return Location;
+ }
+ };
+} // end anonymous namespace
+
+void CFRefReport::addGCModeDescription(const LangOptions &LOpts,
+ bool GCEnabled) {
+ const char *GCModeDescription = 0;
+
+ switch (LOpts.getGC()) {
+ case LangOptions::GCOnly:
+ assert(GCEnabled);
+ GCModeDescription = "Code is compiled to only use garbage collection";
+ break;
+
+ case LangOptions::NonGC:
+ assert(!GCEnabled);
+ GCModeDescription = "Code is compiled to use reference counts";
+ break;
+
+ case LangOptions::HybridGC:
+ if (GCEnabled) {
+ GCModeDescription = "Code is compiled to use either garbage collection "
+ "(GC) or reference counts (non-GC). The bug occurs "
+ "with GC enabled";
+ break;
+ } else {
+ GCModeDescription = "Code is compiled to use either garbage collection "
+ "(GC) or reference counts (non-GC). The bug occurs "
+ "in non-GC mode";
+ break;
+ }
+ }
+
+ assert(GCModeDescription && "invalid/unknown GC mode");
+ addExtraText(GCModeDescription);
+}
+
+// FIXME: This should be a method on SmallVector.
+static inline bool contains(const SmallVectorImpl<ArgEffect>& V,
+ ArgEffect X) {
+ for (SmallVectorImpl<ArgEffect>::const_iterator I=V.begin(), E=V.end();
+ I!=E; ++I)
+ if (*I == X) return true;
+
+ return false;
+}
+
+static bool isPropertyAccess(const Stmt *S, ParentMap &PM) {
+ unsigned maxDepth = 4;
+ while (S && maxDepth) {
+ if (const PseudoObjectExpr *PO = dyn_cast<PseudoObjectExpr>(S)) {
+ if (!isa<ObjCMessageExpr>(PO->getSyntacticForm()))
+ return true;
+ return false;
+ }
+ S = PM.getParent(S);
+ --maxDepth;
+ }
+ return false;
+}
+
+PathDiagnosticPiece *CFRefReportVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+
+ if (!isa<StmtPoint>(N->getLocation()))
+ return NULL;
+
+ // Check if the type state has changed.
+ ProgramStateRef PrevSt = PrevN->getState();
+ ProgramStateRef CurrSt = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+
+ const RefVal* CurrT = CurrSt->get<RefBindings>(Sym);
+ if (!CurrT) return NULL;
+
+ const RefVal &CurrV = *CurrT;
+ const RefVal *PrevT = PrevSt->get<RefBindings>(Sym);
+
+ // Create a string buffer to constain all the useful things we want
+ // to tell the user.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ // This is the allocation site since the previous node had no bindings
+ // for this symbol.
+ if (!PrevT) {
+ const Stmt *S = cast<StmtPoint>(N->getLocation()).getStmt();
+
+ if (isa<ObjCArrayLiteral>(S)) {
+ os << "NSArray literal is an object with a +0 retain count";
+ }
+ else if (isa<ObjCDictionaryLiteral>(S)) {
+ os << "NSDictionary literal is an object with a +0 retain count";
+ }
+ else {
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Get the name of the callee (if it is available).
+ SVal X = CurrSt->getSValAsScalarOrLoc(CE->getCallee(), LCtx);
+ if (const FunctionDecl *FD = X.getAsFunctionDecl())
+ os << "Call to function '" << *FD << '\'';
+ else
+ os << "function call";
+ }
+ else {
+ assert(isa<ObjCMessageExpr>(S));
+ // The message expression may have between written directly or as
+ // a property access. Lazily determine which case we are looking at.
+ os << (isPropertyAccess(S, N->getParentMap()) ? "Property" : "Method");
+ }
+
+ if (CurrV.getObjKind() == RetEffect::CF) {
+ os << " returns a Core Foundation object with a ";
+ }
+ else {
+ assert (CurrV.getObjKind() == RetEffect::ObjC);
+ os << " returns an Objective-C object with a ";
+ }
+
+ if (CurrV.isOwned()) {
+ os << "+1 retain count";
+
+ if (GCEnabled) {
+ assert(CurrV.getObjKind() == RetEffect::CF);
+ os << ". "
+ "Core Foundation objects are not automatically garbage collected.";
+ }
+ }
+ else {
+ assert (CurrV.isNotOwned());
+ os << "+0 retain count";
+ }
+ }
+
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(Pos, os.str());
+ }
+
+ // Gather up the effects that were performed on the object at this
+ // program point
+ SmallVector<ArgEffect, 2> AEffects;
+
+ const ExplodedNode *OrigNode = BRC.getNodeResolver().getOriginalNode(N);
+ if (const RetainSummary *Summ = SummaryLog.lookup(OrigNode)) {
+ // We only have summaries attached to nodes after evaluating CallExpr and
+ // ObjCMessageExprs.
+ const Stmt *S = cast<StmtPoint>(N->getLocation()).getStmt();
+
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S)) {
+ // Iterate through the parameter expressions and see if the symbol
+ // was ever passed as an argument.
+ unsigned i = 0;
+
+ for (CallExpr::const_arg_iterator AI=CE->arg_begin(), AE=CE->arg_end();
+ AI!=AE; ++AI, ++i) {
+
+ // Retrieve the value of the argument. Is it the symbol
+ // we are interested in?
+ if (CurrSt->getSValAsScalarOrLoc(*AI, LCtx).getAsLocSymbol() != Sym)
+ continue;
+
+ // We have an argument. Get the effect!
+ AEffects.push_back(Summ->getArg(i));
+ }
+ }
+ else if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(S)) {
+ if (const Expr *receiver = ME->getInstanceReceiver())
+ if (CurrSt->getSValAsScalarOrLoc(receiver, LCtx)
+ .getAsLocSymbol() == Sym) {
+ // The symbol we are tracking is the receiver.
+ AEffects.push_back(Summ->getReceiverEffect());
+ }
+ }
+ }
+
+ do {
+ // Get the previous type state.
+ RefVal PrevV = *PrevT;
+
+ // Specially handle -dealloc.
+ if (!GCEnabled && contains(AEffects, Dealloc)) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+ // We may not have transitioned to 'release' if we hit an error.
+ // This case is handled elsewhere.
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCombinedCounts() == 0);
+ os << "Object released by directly sending the '-dealloc' message";
+ break;
+ }
+ }
+
+ // Specially handle CFMakeCollectable and friends.
+ if (contains(AEffects, MakeCollectable)) {
+ // Get the name of the function.
+ const Stmt *S = cast<StmtPoint>(N->getLocation()).getStmt();
+ SVal X =
+ CurrSt->getSValAsScalarOrLoc(cast<CallExpr>(S)->getCallee(), LCtx);
+ const FunctionDecl *FD = X.getAsFunctionDecl();
+
+ if (GCEnabled) {
+ // Determine if the object's reference count was pushed to zero.
+ assert(!(PrevV == CurrV) && "The typestate *must* have changed.");
+
+ os << "In GC mode a call to '" << *FD
+ << "' decrements an object's retain count and registers the "
+ "object with the garbage collector. ";
+
+ if (CurrV.getKind() == RefVal::Released) {
+ assert(CurrV.getCount() == 0);
+ os << "Since it now has a 0 retain count the object can be "
+ "automatically collected by the garbage collector.";
+ }
+ else
+ os << "An object must have a 0 retain count to be garbage collected. "
+ "After this call its retain count is +" << CurrV.getCount()
+ << '.';
+ }
+ else
+ os << "When GC is not enabled a call to '" << *FD
+ << "' has no effect on its argument.";
+
+ // Nothing more to say.
+ break;
+ }
+
+ // Determine if the typestate has changed.
+ if (!(PrevV == CurrV))
+ switch (CurrV.getKind()) {
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+
+ if (PrevV.getCount() == CurrV.getCount()) {
+ // Did an autorelease message get sent?
+ if (PrevV.getAutoreleaseCount() == CurrV.getAutoreleaseCount())
+ return 0;
+
+ assert(PrevV.getAutoreleaseCount() < CurrV.getAutoreleaseCount());
+ os << "Object sent -autorelease message";
+ break;
+ }
+
+ if (PrevV.getCount() > CurrV.getCount())
+ os << "Reference count decremented.";
+ else
+ os << "Reference count incremented.";
+
+ if (unsigned Count = CurrV.getCount())
+ os << " The object now has a +" << Count << " retain count.";
+
+ if (PrevV.getKind() == RefVal::Released) {
+ assert(GCEnabled && CurrV.getCount() > 0);
+ os << " The object is not eligible for garbage collection until "
+ "the retain count reaches 0 again.";
+ }
+
+ break;
+
+ case RefVal::Released:
+ os << "Object released.";
+ break;
+
+ case RefVal::ReturnedOwned:
+ // Autoreleases can be applied after marking a node ReturnedOwned.
+ if (CurrV.getAutoreleaseCount())
+ return NULL;
+
+ os << "Object returned to caller as an owning reference (single "
+ "retain count transferred to caller)";
+ break;
+
+ case RefVal::ReturnedNotOwned:
+ os << "Object returned to caller with a +0 retain count";
+ break;
+
+ default:
+ return NULL;
+ }
+
+ // Emit any remaining diagnostics for the argument effects (if any).
+ for (SmallVectorImpl<ArgEffect>::iterator I=AEffects.begin(),
+ E=AEffects.end(); I != E; ++I) {
+
+ // A bunch of things have alternate behavior under GC.
+ if (GCEnabled)
+ switch (*I) {
+ default: break;
+ case Autorelease:
+ os << "In GC mode an 'autorelease' has no effect.";
+ continue;
+ case IncRefMsg:
+ os << "In GC mode the 'retain' message has no effect.";
+ continue;
+ case DecRefMsg:
+ os << "In GC mode the 'release' message has no effect.";
+ continue;
+ }
+ }
+ } while (0);
+
+ if (os.str().empty())
+ return 0; // We have nothing to say!
+
+ const Stmt *S = cast<StmtPoint>(N->getLocation()).getStmt();
+ PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
+ N->getLocationContext());
+ PathDiagnosticPiece *P = new PathDiagnosticEventPiece(Pos, os.str());
+
+ // Add the range by scanning the children of the statement for any bindings
+ // to Sym.
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I!=E; ++I)
+ if (const Expr *Exp = dyn_cast_or_null<Expr>(*I))
+ if (CurrSt->getSValAsScalarOrLoc(Exp, LCtx).getAsLocSymbol() == Sym) {
+ P->addRange(Exp->getSourceRange());
+ break;
+ }
+
+ return P;
+}
+
+// Find the first node in the current function context that referred to the
+// tracked symbol and the memory location that value was stored to. Note, the
+// value is only reported if the allocation occurred in the same function as
+// the leak.
+static std::pair<const ExplodedNode*,const MemRegion*>
+GetAllocationSite(ProgramStateManager& StateMgr, const ExplodedNode *N,
+ SymbolRef Sym) {
+ const ExplodedNode *Last = N;
+ const MemRegion* FirstBinding = 0;
+ const LocationContext *LeakContext = N->getLocationContext();
+
+ while (N) {
+ ProgramStateRef St = N->getState();
+ RefBindings B = St->get<RefBindings>();
+
+ if (!B.lookup(Sym))
+ break;
+
+ StoreManager::FindUniqueBinding FB(Sym);
+ StateMgr.iterBindings(St, FB);
+ if (FB) FirstBinding = FB.getRegion();
+
+ // Allocation node, is the last node in the current context in which the
+ // symbol was tracked.
+ if (N->getLocationContext() == LeakContext)
+ Last = N;
+
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+ }
+
+ // If allocation happened in a function different from the leak node context,
+ // do not report the binding.
+ if (N->getLocationContext() != LeakContext) {
+ FirstBinding = 0;
+ }
+
+ return std::make_pair(Last, FirstBinding);
+}
+
+PathDiagnosticPiece*
+CFRefReportVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndN,
+ BugReport &BR) {
+ BR.markInteresting(Sym);
+ return BugReporterVisitor::getDefaultEndPath(BRC, EndN, BR);
+}
+
+PathDiagnosticPiece*
+CFRefLeakReportVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndN,
+ BugReport &BR) {
+
+ // Tell the BugReporterContext to report cases when the tracked symbol is
+ // assigned to different variables, etc.
+ BR.markInteresting(Sym);
+
+ // We are reporting a leak. Walk up the graph to get to the first node where
+ // the symbol appeared, and also get the first VarDecl that tracked object
+ // is stored to.
+ const ExplodedNode *AllocNode = 0;
+ const MemRegion* FirstBinding = 0;
+
+ llvm::tie(AllocNode, FirstBinding) =
+ GetAllocationSite(BRC.getStateManager(), EndN, Sym);
+
+ SourceManager& SM = BRC.getSourceManager();
+
+ // Compute an actual location for the leak. Sometimes a leak doesn't
+ // occur at an actual statement (e.g., transition between blocks; end
+ // of function) so we need to walk the graph and compute a real location.
+ const ExplodedNode *LeakN = EndN;
+ PathDiagnosticLocation L = PathDiagnosticLocation::createEndOfPath(LeakN, SM);
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Object leaked: ";
+
+ if (FirstBinding) {
+ os << "object allocated and stored into '"
+ << FirstBinding->getString() << '\'';
+ }
+ else
+ os << "allocated object";
+
+ // Get the retain count.
+ const RefVal* RV = EndN->getState()->get<RefBindings>(Sym);
+
+ if (RV->getKind() == RefVal::ErrorLeakReturned) {
+ // FIXME: Per comments in rdar://6320065, "create" only applies to CF
+ // objects. Only "copy", "alloc", "retain" and "new" transfer ownership
+ // to the caller for NS objects.
+ const Decl *D = &EndN->getCodeDecl();
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ os << " is returned from a method whose name ('"
+ << MD->getSelector().getAsString()
+ << "') does not start with 'copy', 'mutableCopy', 'alloc' or 'new'."
+ " This violates the naming convention rules"
+ " given in the Memory Management Guide for Cocoa";
+ }
+ else {
+ const FunctionDecl *FD = cast<FunctionDecl>(D);
+ os << " is returned from a function whose name ('"
+ << *FD
+ << "') does not contain 'Copy' or 'Create'. This violates the naming"
+ " convention rules given in the Memory Management Guide for Core"
+ " Foundation";
+ }
+ }
+ else if (RV->getKind() == RefVal::ErrorGCLeakReturned) {
+ ObjCMethodDecl &MD = cast<ObjCMethodDecl>(EndN->getCodeDecl());
+ os << " and returned from method '" << MD.getSelector().getAsString()
+ << "' is potentially leaked when using garbage collection. Callers "
+ "of this method do not expect a returned object with a +1 retain "
+ "count since they expect the object to be managed by the garbage "
+ "collector";
+ }
+ else
+ os << " is not referenced later in this execution path and has a retain "
+ "count of +" << RV->getCount();
+
+ return new PathDiagnosticEventPiece(L, os.str());
+}
+
+CFRefLeakReport::CFRefLeakReport(CFRefBug &D, const LangOptions &LOpts,
+ bool GCEnabled, const SummaryLogTy &Log,
+ ExplodedNode *n, SymbolRef sym,
+ CheckerContext &Ctx)
+: CFRefReport(D, LOpts, GCEnabled, Log, n, sym, false) {
+
+ // Most bug reports are cached at the location where they occurred.
+ // With leaks, we want to unique them by the location where they were
+ // allocated, and only report a single path. To do this, we need to find
+ // the allocation site of a piece of tracked memory, which we do via a
+ // call to GetAllocationSite. This will walk the ExplodedGraph backwards.
+ // Note that this is *not* the trimmed graph; we are guaranteed, however,
+ // that all ancestor nodes that represent the allocation site have the
+ // same SourceLocation.
+ const ExplodedNode *AllocNode = 0;
+
+ const SourceManager& SMgr = Ctx.getSourceManager();
+
+ llvm::tie(AllocNode, AllocBinding) = // Set AllocBinding.
+ GetAllocationSite(Ctx.getStateManager(), getErrorNode(), sym);
+
+ // Get the SourceLocation for the allocation site.
+ ProgramPoint P = AllocNode->getLocation();
+ const Stmt *AllocStmt = cast<PostStmt>(P).getStmt();
+ Location = PathDiagnosticLocation::createBegin(AllocStmt, SMgr,
+ n->getLocationContext());
+ // Fill in the description of the bug.
+ Description.clear();
+ llvm::raw_string_ostream os(Description);
+ os << "Potential leak ";
+ if (GCEnabled)
+ os << "(when using garbage collection) ";
+ os << "of an object";
+
+ // FIXME: AllocBinding doesn't get populated for RegionStore yet.
+ if (AllocBinding)
+ os << " stored into '" << AllocBinding->getString() << '\'';
+
+ addVisitor(new CFRefLeakReportVisitor(sym, GCEnabled, Log));
+}
+
+//===----------------------------------------------------------------------===//
+// Main checker logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class RetainCountChecker
+ : public Checker< check::Bind,
+ check::DeadSymbols,
+ check::EndAnalysis,
+ check::EndPath,
+ check::PostStmt<BlockExpr>,
+ check::PostStmt<CastExpr>,
+ check::PostStmt<CallExpr>,
+ check::PostStmt<CXXConstructExpr>,
+ check::PostStmt<ObjCArrayLiteral>,
+ check::PostStmt<ObjCDictionaryLiteral>,
+ check::PostObjCMessage,
+ check::PreStmt<ReturnStmt>,
+ check::RegionChanges,
+ eval::Assume,
+ eval::Call > {
+ mutable OwningPtr<CFRefBug> useAfterRelease, releaseNotOwned;
+ mutable OwningPtr<CFRefBug> deallocGC, deallocNotOwned;
+ mutable OwningPtr<CFRefBug> overAutorelease, returnNotOwnedForOwned;
+ mutable OwningPtr<CFRefBug> leakWithinFunction, leakAtReturn;
+ mutable OwningPtr<CFRefBug> leakWithinFunctionGC, leakAtReturnGC;
+
+ typedef llvm::DenseMap<SymbolRef, const SimpleProgramPointTag *> SymbolTagMap;
+
+ // This map is only used to ensure proper deletion of any allocated tags.
+ mutable SymbolTagMap DeadSymbolTags;
+
+ mutable OwningPtr<RetainSummaryManager> Summaries;
+ mutable OwningPtr<RetainSummaryManager> SummariesGC;
+
+ mutable ARCounts::Factory ARCountFactory;
+
+ mutable SummaryLogTy SummaryLog;
+ mutable bool ShouldResetSummaryLog;
+
+public:
+ RetainCountChecker() : ShouldResetSummaryLog(false) {}
+
+ virtual ~RetainCountChecker() {
+ DeleteContainerSeconds(DeadSymbolTags);
+ }
+
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &BR,
+ ExprEngine &Eng) const {
+ // FIXME: This is a hack to make sure the summary log gets cleared between
+ // analyses of different code bodies.
+ //
+ // Why is this necessary? Because a checker's lifetime is tied to a
+ // translation unit, but an ExplodedGraph's lifetime is just a code body.
+ // Once in a blue moon, a new ExplodedNode will have the same address as an
+ // old one with an associated summary, and the bug report visitor gets very
+ // confused. (To make things worse, the summary lifetime is currently also
+ // tied to a code body, so we get a crash instead of incorrect results.)
+ //
+ // Why is this a bad solution? Because if the lifetime of the ExplodedGraph
+ // changes, things will start going wrong again. Really the lifetime of this
+ // log needs to be tied to either the specific nodes in it or the entire
+ // ExplodedGraph, not to a specific part of the code being analyzed.
+ //
+ // (Also, having stateful local data means that the same checker can't be
+ // used from multiple threads, but a lot of checkers have incorrect
+ // assumptions about that anyway. So that wasn't a priority at the time of
+ // this fix.)
+ //
+ // This happens at the end of analysis, but bug reports are emitted /after/
+ // this point. So we can't just clear the summary log now. Instead, we mark
+ // that the next time we access the summary log, it should be cleared.
+
+ // If we never reset the summary log during /this/ code body analysis,
+ // there were no new summaries. There might still have been summaries from
+ // the /last/ analysis, so clear them out to make sure the bug report
+ // visitors don't get confused.
+ if (ShouldResetSummaryLog)
+ SummaryLog.clear();
+
+ ShouldResetSummaryLog = !SummaryLog.empty();
+ }
+
+ CFRefBug *getLeakWithinFunctionBug(const LangOptions &LOpts,
+ bool GCEnabled) const {
+ if (GCEnabled) {
+ if (!leakWithinFunctionGC)
+ leakWithinFunctionGC.reset(new LeakWithinFunction("Leak of object when "
+ "using garbage "
+ "collection"));
+ return leakWithinFunctionGC.get();
+ } else {
+ if (!leakWithinFunction) {
+ if (LOpts.getGC() == LangOptions::HybridGC) {
+ leakWithinFunction.reset(new LeakWithinFunction("Leak of object when "
+ "not using garbage "
+ "collection (GC) in "
+ "dual GC/non-GC "
+ "code"));
+ } else {
+ leakWithinFunction.reset(new LeakWithinFunction("Leak"));
+ }
+ }
+ return leakWithinFunction.get();
+ }
+ }
+
+ CFRefBug *getLeakAtReturnBug(const LangOptions &LOpts, bool GCEnabled) const {
+ if (GCEnabled) {
+ if (!leakAtReturnGC)
+ leakAtReturnGC.reset(new LeakAtReturn("Leak of returned object when "
+ "using garbage collection"));
+ return leakAtReturnGC.get();
+ } else {
+ if (!leakAtReturn) {
+ if (LOpts.getGC() == LangOptions::HybridGC) {
+ leakAtReturn.reset(new LeakAtReturn("Leak of returned object when "
+ "not using garbage collection "
+ "(GC) in dual GC/non-GC code"));
+ } else {
+ leakAtReturn.reset(new LeakAtReturn("Leak of returned object"));
+ }
+ }
+ return leakAtReturn.get();
+ }
+ }
+
+ RetainSummaryManager &getSummaryManager(ASTContext &Ctx,
+ bool GCEnabled) const {
+ // FIXME: We don't support ARC being turned on and off during one analysis.
+ // (nor, for that matter, do we support changing ASTContexts)
+ bool ARCEnabled = (bool)Ctx.getLangOpts().ObjCAutoRefCount;
+ if (GCEnabled) {
+ if (!SummariesGC)
+ SummariesGC.reset(new RetainSummaryManager(Ctx, true, ARCEnabled));
+ else
+ assert(SummariesGC->isARCEnabled() == ARCEnabled);
+ return *SummariesGC;
+ } else {
+ if (!Summaries)
+ Summaries.reset(new RetainSummaryManager(Ctx, false, ARCEnabled));
+ else
+ assert(Summaries->isARCEnabled() == ARCEnabled);
+ return *Summaries;
+ }
+ }
+
+ RetainSummaryManager &getSummaryManager(CheckerContext &C) const {
+ return getSummaryManager(C.getASTContext(), C.isObjCGCEnabled());
+ }
+
+ void printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const;
+
+ void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
+ void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+ void checkPostStmt(const CastExpr *CE, CheckerContext &C) const;
+
+ void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const CXXConstructExpr *CE, CheckerContext &C) const;
+ void checkPostStmt(const ObjCArrayLiteral *AL, CheckerContext &C) const;
+ void checkPostStmt(const ObjCDictionaryLiteral *DL, CheckerContext &C) const;
+ void checkPostObjCMessage(const ObjCMessage &Msg, CheckerContext &C) const;
+
+ void checkSummary(const RetainSummary &Summ, const CallOrObjCMessage &Call,
+ CheckerContext &C) const;
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+
+ ProgramStateRef evalAssume(ProgramStateRef state, SVal Cond,
+ bool Assumption) const;
+
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const;
+
+ bool wantsRegionChangeUpdate(ProgramStateRef state) const {
+ return true;
+ }
+
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+ void checkReturnWithRetEffect(const ReturnStmt *S, CheckerContext &C,
+ ExplodedNode *Pred, RetEffect RE, RefVal X,
+ SymbolRef Sym, ProgramStateRef state) const;
+
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ void checkEndPath(CheckerContext &C) const;
+
+ ProgramStateRef updateSymbol(ProgramStateRef state, SymbolRef sym,
+ RefVal V, ArgEffect E, RefVal::Kind &hasErr,
+ CheckerContext &C) const;
+
+ void processNonLeakError(ProgramStateRef St, SourceRange ErrorRange,
+ RefVal::Kind ErrorKind, SymbolRef Sym,
+ CheckerContext &C) const;
+
+ void processObjCLiterals(CheckerContext &C, const Expr *Ex) const;
+
+ const ProgramPointTag *getDeadSymbolTag(SymbolRef sym) const;
+
+ ProgramStateRef handleSymbolDeath(ProgramStateRef state,
+ SymbolRef sid, RefVal V,
+ SmallVectorImpl<SymbolRef> &Leaked) const;
+
+ std::pair<ExplodedNode *, ProgramStateRef >
+ handleAutoreleaseCounts(ProgramStateRef state,
+ GenericNodeBuilderRefCount Bd, ExplodedNode *Pred,
+ CheckerContext &Ctx, SymbolRef Sym, RefVal V) const;
+
+ ExplodedNode *processLeaks(ProgramStateRef state,
+ SmallVectorImpl<SymbolRef> &Leaked,
+ GenericNodeBuilderRefCount &Builder,
+ CheckerContext &Ctx,
+ ExplodedNode *Pred = 0) const;
+};
+} // end anonymous namespace
+
+namespace {
+class StopTrackingCallback : public SymbolVisitor {
+ ProgramStateRef state;
+public:
+ StopTrackingCallback(ProgramStateRef st) : state(st) {}
+ ProgramStateRef getState() const { return state; }
+
+ bool VisitSymbol(SymbolRef sym) {
+ state = state->remove<RefBindings>(sym);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Handle statements that may have an effect on refcounts.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkPostStmt(const BlockExpr *BE,
+ CheckerContext &C) const {
+
+ // Scan the BlockDecRefExprs for any object the retain count checker
+ // may be tracking.
+ if (!BE->getBlockDecl()->hasCaptures())
+ return;
+
+ ProgramStateRef state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ if (I == E)
+ return;
+
+ // FIXME: For now we invalidate the tracking of all symbols passed to blocks
+ // via captured variables, even though captured variables result in a copy
+ // and in implicit increment/decrement of a retain count.
+ SmallVector<const MemRegion*, 10> Regions;
+ const LocationContext *LC = C.getLocationContext();
+ MemRegionManager &MemMgr = C.getSValBuilder().getRegionManager();
+
+ for ( ; I != E; ++I) {
+ const VarRegion *VR = *I;
+ if (VR->getSuperRegion() == R) {
+ VR = MemMgr.getVarRegion(VR->getDecl(), LC);
+ }
+ Regions.push_back(VR);
+ }
+
+ state =
+ state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
+ Regions.data() + Regions.size()).getState();
+ C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const CastExpr *CE,
+ CheckerContext &C) const {
+ const ObjCBridgedCastExpr *BE = dyn_cast<ObjCBridgedCastExpr>(CE);
+ if (!BE)
+ return;
+
+ ArgEffect AE = IncRef;
+
+ switch (BE->getBridgeKind()) {
+ case clang::OBC_Bridge:
+ // Do nothing.
+ return;
+ case clang::OBC_BridgeRetained:
+ AE = IncRef;
+ break;
+ case clang::OBC_BridgeTransfer:
+ AE = DecRefBridgedTransfered;
+ break;
+ }
+
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym = state->getSVal(CE, C.getLocationContext()).getAsLocSymbol();
+ if (!Sym)
+ return;
+ const RefVal* T = state->get<RefBindings>(Sym);
+ if (!T)
+ return;
+
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ state = updateSymbol(state, Sym, *T, AE, hasErr, C);
+
+ if (hasErr) {
+ // FIXME: If we get an error during a bridge cast, should we report it?
+ // Should we assert that there is no error?
+ return;
+ }
+
+ C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ if (C.wasInlined)
+ return;
+
+ // Get the callee.
+ ProgramStateRef state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee, C.getLocationContext());
+
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+ const RetainSummary *Summ = 0;
+
+ // FIXME: Better support for blocks. For now we stop tracking anything
+ // that is passed to blocks.
+ // FIXME: Need to handle variables that are "captured" by the block.
+ if (dyn_cast_or_null<BlockDataRegion>(L.getAsRegion())) {
+ Summ = Summaries.getPersistentStopSummary();
+ } else if (const FunctionDecl *FD = L.getAsFunctionDecl()) {
+ Summ = Summaries.getSummary(FD);
+ } else if (const CXXMemberCallExpr *me = dyn_cast<CXXMemberCallExpr>(CE)) {
+ if (const CXXMethodDecl *MD = me->getMethodDecl())
+ Summ = Summaries.getSummary(MD);
+ }
+
+ if (!Summ)
+ Summ = Summaries.getDefaultSummary();
+
+ checkSummary(*Summ, CallOrObjCMessage(CE, state, C.getLocationContext()), C);
+}
+
+void RetainCountChecker::checkPostStmt(const CXXConstructExpr *CE,
+ CheckerContext &C) const {
+ const CXXConstructorDecl *Ctor = CE->getConstructor();
+ if (!Ctor)
+ return;
+
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+ const RetainSummary *Summ = Summaries.getSummary(Ctor);
+
+ // If we didn't get a summary, this constructor doesn't affect retain counts.
+ if (!Summ)
+ return;
+
+ ProgramStateRef state = C.getState();
+ checkSummary(*Summ, CallOrObjCMessage(CE, state, C.getLocationContext()), C);
+}
+
+void RetainCountChecker::processObjCLiterals(CheckerContext &C,
+ const Expr *Ex) const {
+ ProgramStateRef state = C.getState();
+ const ExplodedNode *pred = C.getPredecessor();
+ for (Stmt::const_child_iterator it = Ex->child_begin(), et = Ex->child_end() ;
+ it != et ; ++it) {
+ const Stmt *child = *it;
+ SVal V = state->getSVal(child, pred->getLocationContext());
+ if (SymbolRef sym = V.getAsSymbol())
+ if (const RefVal* T = state->get<RefBindings>(sym)) {
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ state = updateSymbol(state, sym, *T, MayEscape, hasErr, C);
+ if (hasErr) {
+ processNonLeakError(state, child->getSourceRange(), hasErr, sym, C);
+ return;
+ }
+ }
+ }
+
+ // Return the object as autoreleased.
+ // RetEffect RE = RetEffect::MakeNotOwned(RetEffect::ObjC);
+ if (SymbolRef sym =
+ state->getSVal(Ex, pred->getLocationContext()).getAsSymbol()) {
+ QualType ResultTy = Ex->getType();
+ state = state->set<RefBindings>(sym, RefVal::makeNotOwned(RetEffect::ObjC,
+ ResultTy));
+ }
+
+ C.addTransition(state);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCArrayLiteral *AL,
+ CheckerContext &C) const {
+ // Apply the 'MayEscape' to all values.
+ processObjCLiterals(C, AL);
+}
+
+void RetainCountChecker::checkPostStmt(const ObjCDictionaryLiteral *DL,
+ CheckerContext &C) const {
+ // Apply the 'MayEscape' to all keys and values.
+ processObjCLiterals(C, DL);
+}
+
+void RetainCountChecker::checkPostObjCMessage(const ObjCMessage &Msg,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+
+ const RetainSummary *Summ;
+ if (Msg.isInstanceMessage()) {
+ const LocationContext *LC = C.getLocationContext();
+ Summ = Summaries.getInstanceMethodSummary(Msg, state, LC);
+ } else {
+ Summ = Summaries.getClassMethodSummary(Msg);
+ }
+
+ // If we didn't get a summary, this message doesn't affect retain counts.
+ if (!Summ)
+ return;
+
+ checkSummary(*Summ, CallOrObjCMessage(Msg, state, C.getLocationContext()), C);
+}
+
+/// GetReturnType - Used to get the return type of a message expression or
+/// function call with the intention of affixing that type to a tracked symbol.
+/// While the the return type can be queried directly from RetEx, when
+/// invoking class methods we augment to the return type to be that of
+/// a pointer to the class (as opposed it just being id).
+// FIXME: We may be able to do this with related result types instead.
+// This function is probably overestimating.
+static QualType GetReturnType(const Expr *RetE, ASTContext &Ctx) {
+ QualType RetTy = RetE->getType();
+ // If RetE is not a message expression just return its type.
+ // If RetE is a message expression, return its types if it is something
+ /// more specific than id.
+ if (const ObjCMessageExpr *ME = dyn_cast<ObjCMessageExpr>(RetE))
+ if (const ObjCObjectPointerType *PT = RetTy->getAs<ObjCObjectPointerType>())
+ if (PT->isObjCQualifiedIdType() || PT->isObjCIdType() ||
+ PT->isObjCClassType()) {
+ // At this point we know the return type of the message expression is
+ // id, id<...>, or Class. If we have an ObjCInterfaceDecl, we know this
+ // is a call to a class method whose type we can resolve. In such
+ // cases, promote the return type to XXX* (where XXX is the class).
+ const ObjCInterfaceDecl *D = ME->getReceiverInterface();
+ return !D ? RetTy :
+ Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D));
+ }
+
+ return RetTy;
+}
+
+void RetainCountChecker::checkSummary(const RetainSummary &Summ,
+ const CallOrObjCMessage &CallOrMsg,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ // Evaluate the effect of the arguments.
+ RefVal::Kind hasErr = (RefVal::Kind) 0;
+ SourceRange ErrorRange;
+ SymbolRef ErrorSym = 0;
+
+ for (unsigned idx = 0, e = CallOrMsg.getNumArgs(); idx != e; ++idx) {
+ SVal V = CallOrMsg.getArgSVal(idx);
+
+ if (SymbolRef Sym = V.getAsLocSymbol()) {
+ if (RefBindings::data_type *T = state->get<RefBindings>(Sym)) {
+ state = updateSymbol(state, Sym, *T, Summ.getArg(idx), hasErr, C);
+ if (hasErr) {
+ ErrorRange = CallOrMsg.getArgSourceRange(idx);
+ ErrorSym = Sym;
+ break;
+ }
+ }
+ }
+ }
+
+ // Evaluate the effect on the message receiver.
+ bool ReceiverIsTracked = false;
+ if (!hasErr && CallOrMsg.isObjCMessage()) {
+ const LocationContext *LC = C.getLocationContext();
+ SVal Receiver = CallOrMsg.getInstanceMessageReceiver(LC);
+ if (SymbolRef Sym = Receiver.getAsLocSymbol()) {
+ if (const RefVal *T = state->get<RefBindings>(Sym)) {
+ ReceiverIsTracked = true;
+ state = updateSymbol(state, Sym, *T, Summ.getReceiverEffect(),
+ hasErr, C);
+ if (hasErr) {
+ ErrorRange = CallOrMsg.getReceiverSourceRange();
+ ErrorSym = Sym;
+ }
+ }
+ }
+ }
+
+ // Process any errors.
+ if (hasErr) {
+ processNonLeakError(state, ErrorRange, hasErr, ErrorSym, C);
+ return;
+ }
+
+ // Consult the summary for the return value.
+ RetEffect RE = Summ.getRetEffect();
+
+ if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
+ if (ReceiverIsTracked)
+ RE = getSummaryManager(C).getObjAllocRetEffect();
+ else
+ RE = RetEffect::MakeNoRet();
+ }
+
+ switch (RE.getKind()) {
+ default:
+ llvm_unreachable("Unhandled RetEffect.");
+
+ case RetEffect::NoRet:
+ // No work necessary.
+ break;
+
+ case RetEffect::OwnedAllocatedSymbol:
+ case RetEffect::OwnedSymbol: {
+ SymbolRef Sym = state->getSVal(CallOrMsg.getOriginExpr(),
+ C.getLocationContext()).getAsSymbol();
+ if (!Sym)
+ break;
+
+ // Use the result type from callOrMsg as it automatically adjusts
+ // for methods/functions that return references.
+ QualType ResultTy = CallOrMsg.getResultType(C.getASTContext());
+ state = state->set<RefBindings>(Sym, RefVal::makeOwned(RE.getObjKind(),
+ ResultTy));
+
+ // FIXME: Add a flag to the checker where allocations are assumed to
+ // *not* fail. (The code below is out-of-date, though.)
+#if 0
+ if (RE.getKind() == RetEffect::OwnedAllocatedSymbol) {
+ bool isFeasible;
+ state = state.assume(loc::SymbolVal(Sym), true, isFeasible);
+ assert(isFeasible && "Cannot assume fresh symbol is non-null.");
+ }
+#endif
+
+ break;
+ }
+
+ case RetEffect::GCNotOwnedSymbol:
+ case RetEffect::ARCNotOwnedSymbol:
+ case RetEffect::NotOwnedSymbol: {
+ const Expr *Ex = CallOrMsg.getOriginExpr();
+ SymbolRef Sym = state->getSVal(Ex, C.getLocationContext()).getAsSymbol();
+ if (!Sym)
+ break;
+
+ // Use GetReturnType in order to give [NSFoo alloc] the type NSFoo *.
+ QualType ResultTy = GetReturnType(Ex, C.getASTContext());
+ state = state->set<RefBindings>(Sym, RefVal::makeNotOwned(RE.getObjKind(),
+ ResultTy));
+ break;
+ }
+ }
+
+ // This check is actually necessary; otherwise the statement builder thinks
+ // we've hit a previously-found path.
+ // Normally addTransition takes care of this, but we want the node pointer.
+ ExplodedNode *NewNode;
+ if (state == C.getState()) {
+ NewNode = C.getPredecessor();
+ } else {
+ NewNode = C.addTransition(state);
+ }
+
+ // Annotate the node with summary we used.
+ if (NewNode) {
+ // FIXME: This is ugly. See checkEndAnalysis for why it's necessary.
+ if (ShouldResetSummaryLog) {
+ SummaryLog.clear();
+ ShouldResetSummaryLog = false;
+ }
+ SummaryLog[NewNode] = &Summ;
+ }
+}
+
+
+ProgramStateRef
+RetainCountChecker::updateSymbol(ProgramStateRef state, SymbolRef sym,
+ RefVal V, ArgEffect E, RefVal::Kind &hasErr,
+ CheckerContext &C) const {
+ // In GC mode [... release] and [... retain] do nothing.
+ // In ARC mode they shouldn't exist at all, but we just ignore them.
+ bool IgnoreRetainMsg = C.isObjCGCEnabled();
+ if (!IgnoreRetainMsg)
+ IgnoreRetainMsg = (bool)C.getASTContext().getLangOpts().ObjCAutoRefCount;
+
+ switch (E) {
+ default: break;
+ case IncRefMsg: E = IgnoreRetainMsg ? DoNothing : IncRef; break;
+ case DecRefMsg: E = IgnoreRetainMsg ? DoNothing : DecRef; break;
+ case MakeCollectable: E = C.isObjCGCEnabled() ? DecRef : DoNothing; break;
+ case NewAutoreleasePool: E = C.isObjCGCEnabled() ? DoNothing :
+ NewAutoreleasePool; break;
+ }
+
+ // Handle all use-after-releases.
+ if (!C.isObjCGCEnabled() && V.getKind() == RefVal::Released) {
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ return state->set<RefBindings>(sym, V);
+ }
+
+ switch (E) {
+ case DecRefMsg:
+ case IncRefMsg:
+ case MakeCollectable:
+ llvm_unreachable("DecRefMsg/IncRefMsg/MakeCollectable already converted");
+
+ case Dealloc:
+ // Any use of -dealloc in GC is *bad*.
+ if (C.isObjCGCEnabled()) {
+ V = V ^ RefVal::ErrorDeallocGC;
+ hasErr = V.getKind();
+ break;
+ }
+
+ switch (V.getKind()) {
+ default:
+ llvm_unreachable("Invalid RefVal state for an explicit dealloc.");
+ case RefVal::Owned:
+ // The object immediately transitions to the released state.
+ V = V ^ RefVal::Released;
+ V.clearCounts();
+ return state->set<RefBindings>(sym, V);
+ case RefVal::NotOwned:
+ V = V ^ RefVal::ErrorDeallocNotOwned;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+
+ case NewAutoreleasePool:
+ assert(!C.isObjCGCEnabled());
+ return state->add<AutoreleaseStack>(sym);
+
+ case MayEscape:
+ if (V.getKind() == RefVal::Owned) {
+ V = V ^ RefVal::NotOwned;
+ break;
+ }
+
+ // Fall-through.
+
+ case DoNothing:
+ return state;
+
+ case Autorelease:
+ if (C.isObjCGCEnabled())
+ return state;
+
+ // Update the autorelease counts.
+ state = SendAutorelease(state, ARCountFactory, sym);
+ V = V.autorelease();
+ break;
+
+ case StopTracking:
+ return state->remove<RefBindings>(sym);
+
+ case IncRef:
+ switch (V.getKind()) {
+ default:
+ llvm_unreachable("Invalid RefVal state for a retain.");
+ case RefVal::Owned:
+ case RefVal::NotOwned:
+ V = V + 1;
+ break;
+ case RefVal::Released:
+ // Non-GC cases are handled above.
+ assert(C.isObjCGCEnabled());
+ V = (V ^ RefVal::Owned) + 1;
+ break;
+ }
+ break;
+
+ case SelfOwn:
+ V = V ^ RefVal::NotOwned;
+ // Fall-through.
+ case DecRef:
+ case DecRefBridgedTransfered:
+ switch (V.getKind()) {
+ default:
+ // case 'RefVal::Released' handled above.
+ llvm_unreachable("Invalid RefVal state for a release.");
+
+ case RefVal::Owned:
+ assert(V.getCount() > 0);
+ if (V.getCount() == 1)
+ V = V ^ (E == DecRefBridgedTransfered ?
+ RefVal::NotOwned : RefVal::Released);
+ V = V - 1;
+ break;
+
+ case RefVal::NotOwned:
+ if (V.getCount() > 0)
+ V = V - 1;
+ else {
+ V = V ^ RefVal::ErrorReleaseNotOwned;
+ hasErr = V.getKind();
+ }
+ break;
+
+ case RefVal::Released:
+ // Non-GC cases are handled above.
+ assert(C.isObjCGCEnabled());
+ V = V ^ RefVal::ErrorUseAfterRelease;
+ hasErr = V.getKind();
+ break;
+ }
+ break;
+ }
+ return state->set<RefBindings>(sym, V);
+}
+
+void RetainCountChecker::processNonLeakError(ProgramStateRef St,
+ SourceRange ErrorRange,
+ RefVal::Kind ErrorKind,
+ SymbolRef Sym,
+ CheckerContext &C) const {
+ ExplodedNode *N = C.generateSink(St);
+ if (!N)
+ return;
+
+ CFRefBug *BT;
+ switch (ErrorKind) {
+ default:
+ llvm_unreachable("Unhandled error.");
+ case RefVal::ErrorUseAfterRelease:
+ if (!useAfterRelease)
+ useAfterRelease.reset(new UseAfterRelease());
+ BT = &*useAfterRelease;
+ break;
+ case RefVal::ErrorReleaseNotOwned:
+ if (!releaseNotOwned)
+ releaseNotOwned.reset(new BadRelease());
+ BT = &*releaseNotOwned;
+ break;
+ case RefVal::ErrorDeallocGC:
+ if (!deallocGC)
+ deallocGC.reset(new DeallocGC());
+ BT = &*deallocGC;
+ break;
+ case RefVal::ErrorDeallocNotOwned:
+ if (!deallocNotOwned)
+ deallocNotOwned.reset(new DeallocNotOwned());
+ BT = &*deallocNotOwned;
+ break;
+ }
+
+ assert(BT);
+ CFRefReport *report = new CFRefReport(*BT, C.getASTContext().getLangOpts(),
+ C.isObjCGCEnabled(), SummaryLog,
+ N, Sym);
+ report->addRange(ErrorRange);
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Handle the return values of retain-count-related functions.
+//===----------------------------------------------------------------------===//
+
+bool RetainCountChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ // Get the callee. We're only interested in simple C functions.
+ ProgramStateRef state = C.getState();
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return false;
+
+ IdentifierInfo *II = FD->getIdentifier();
+ if (!II)
+ return false;
+
+ // For now, we're only handling the functions that return aliases of their
+ // arguments: CFRetain and CFMakeCollectable (and their families).
+ // Eventually we should add other functions we can model entirely,
+ // such as CFRelease, which don't invalidate their arguments or globals.
+ if (CE->getNumArgs() != 1)
+ return false;
+
+ // Get the name of the function.
+ StringRef FName = II->getName();
+ FName = FName.substr(FName.find_first_not_of('_'));
+
+ // See if it's one of the specific functions we know how to eval.
+ bool canEval = false;
+
+ QualType ResultTy = CE->getCallReturnType();
+ if (ResultTy->isObjCIdType()) {
+ // Handle: id NSMakeCollectable(CFTypeRef)
+ canEval = II->isStr("NSMakeCollectable");
+ } else if (ResultTy->isPointerType()) {
+ // Handle: (CF|CG)Retain
+ // CFMakeCollectable
+ // It's okay to be a little sloppy here (CGMakeCollectable doesn't exist).
+ if (cocoa::isRefType(ResultTy, "CF", FName) ||
+ cocoa::isRefType(ResultTy, "CG", FName)) {
+ canEval = isRetain(FD, FName) || isMakeCollectable(FD, FName);
+ }
+ }
+
+ if (!canEval)
+ return false;
+
+ // Bind the return value.
+ const LocationContext *LCtx = C.getLocationContext();
+ SVal RetVal = state->getSVal(CE->getArg(0), LCtx);
+ if (RetVal.isUnknown()) {
+ // If the receiver is unknown, conjure a return value.
+ SValBuilder &SVB = C.getSValBuilder();
+ unsigned Count = C.getCurrentBlockCount();
+ SVal RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count);
+ }
+ state = state->BindExpr(CE, LCtx, RetVal, false);
+
+ // FIXME: This should not be necessary, but otherwise the argument seems to be
+ // considered alive during the next statement.
+ if (const MemRegion *ArgRegion = RetVal.getAsRegion()) {
+ // Save the refcount status of the argument.
+ SymbolRef Sym = RetVal.getAsLocSymbol();
+ RefBindings::data_type *Binding = 0;
+ if (Sym)
+ Binding = state->get<RefBindings>(Sym);
+
+ // Invalidate the argument region.
+ unsigned Count = C.getCurrentBlockCount();
+ state = state->invalidateRegions(ArgRegion, CE, Count, LCtx);
+
+ // Restore the refcount status of the argument.
+ if (Binding)
+ state = state->set<RefBindings>(Sym, *Binding);
+ }
+
+ C.addTransition(state);
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
+// Handle return statements.
+//===----------------------------------------------------------------------===//
+
+// Return true if the current LocationContext has no caller context.
+static bool inTopFrame(CheckerContext &C) {
+ const LocationContext *LC = C.getLocationContext();
+ return LC->getParent() == 0;
+}
+
+void RetainCountChecker::checkPreStmt(const ReturnStmt *S,
+ CheckerContext &C) const {
+
+ // Only adjust the reference count if this is the top-level call frame,
+ // and not the result of inlining. In the future, we should do
+ // better checking even for inlined calls, and see if they match
+ // with their expected semantics (e.g., the method should return a retained
+ // object, etc.).
+ if (!inTopFrame(C))
+ return;
+
+ const Expr *RetE = S->getRetValue();
+ if (!RetE)
+ return;
+
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym =
+ state->getSValAsScalarOrLoc(RetE, C.getLocationContext()).getAsLocSymbol();
+ if (!Sym)
+ return;
+
+ // Get the reference count binding (if any).
+ const RefVal *T = state->get<RefBindings>(Sym);
+ if (!T)
+ return;
+
+ // Change the reference count.
+ RefVal X = *T;
+
+ switch (X.getKind()) {
+ case RefVal::Owned: {
+ unsigned cnt = X.getCount();
+ assert(cnt > 0);
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ break;
+ }
+
+ case RefVal::NotOwned: {
+ unsigned cnt = X.getCount();
+ if (cnt) {
+ X.setCount(cnt - 1);
+ X = X ^ RefVal::ReturnedOwned;
+ }
+ else {
+ X = X ^ RefVal::ReturnedNotOwned;
+ }
+ break;
+ }
+
+ default:
+ return;
+ }
+
+ // Update the binding.
+ state = state->set<RefBindings>(Sym, X);
+ ExplodedNode *Pred = C.addTransition(state);
+
+ // At this point we have updated the state properly.
+ // Everything after this is merely checking to see if the return value has
+ // been over- or under-retained.
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Update the autorelease counts.
+ static SimpleProgramPointTag
+ AutoreleaseTag("RetainCountChecker : Autorelease");
+ GenericNodeBuilderRefCount Bd(C, &AutoreleaseTag);
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, C, Sym, X);
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Get the updated binding.
+ T = state->get<RefBindings>(Sym);
+ assert(T);
+ X = *T;
+
+ // Consult the summary of the enclosing method.
+ RetainSummaryManager &Summaries = getSummaryManager(C);
+ const Decl *CD = &Pred->getCodeDecl();
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(CD)) {
+ // Unlike regular functions, /all/ ObjC methods are assumed to always
+ // follow Cocoa retain-count conventions, not just those with special
+ // names or attributes.
+ const RetainSummary *Summ = Summaries.getMethodSummary(MD);
+ RetEffect RE = Summ ? Summ->getRetEffect() : RetEffect::MakeNoRet();
+ checkReturnWithRetEffect(S, C, Pred, RE, X, Sym, state);
+ }
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CD)) {
+ if (!isa<CXXMethodDecl>(FD))
+ if (const RetainSummary *Summ = Summaries.getSummary(FD))
+ checkReturnWithRetEffect(S, C, Pred, Summ->getRetEffect(), X,
+ Sym, state);
+ }
+}
+
+void RetainCountChecker::checkReturnWithRetEffect(const ReturnStmt *S,
+ CheckerContext &C,
+ ExplodedNode *Pred,
+ RetEffect RE, RefVal X,
+ SymbolRef Sym,
+ ProgramStateRef state) const {
+ // Any leaks or other errors?
+ if (X.isReturnedOwned() && X.getCount() == 0) {
+ if (RE.getKind() != RetEffect::NoRet) {
+ bool hasError = false;
+ if (C.isObjCGCEnabled() && RE.getObjKind() == RetEffect::ObjC) {
+ // Things are more complicated with garbage collection. If the
+ // returned object is suppose to be an Objective-C object, we have
+ // a leak (as the caller expects a GC'ed object) because no
+ // method should return ownership unless it returns a CF object.
+ hasError = true;
+ X = X ^ RefVal::ErrorGCLeakReturned;
+ }
+ else if (!RE.isOwned()) {
+ // Either we are using GC and the returned object is a CF type
+ // or we aren't using GC. In either case, we expect that the
+ // enclosing method is expected to return ownership.
+ hasError = true;
+ X = X ^ RefVal::ErrorLeakReturned;
+ }
+
+ if (hasError) {
+ // Generate an error node.
+ state = state->set<RefBindings>(Sym, X);
+
+ static SimpleProgramPointTag
+ ReturnOwnLeakTag("RetainCountChecker : ReturnsOwnLeak");
+ ExplodedNode *N = C.addTransition(state, Pred, &ReturnOwnLeakTag);
+ if (N) {
+ const LangOptions &LOpts = C.getASTContext().getLangOpts();
+ bool GCEnabled = C.isObjCGCEnabled();
+ CFRefReport *report =
+ new CFRefLeakReport(*getLeakAtReturnBug(LOpts, GCEnabled),
+ LOpts, GCEnabled, SummaryLog,
+ N, Sym, C);
+ C.EmitReport(report);
+ }
+ }
+ }
+ } else if (X.isReturnedNotOwned()) {
+ if (RE.isOwned()) {
+ // Trying to return a not owned object to a caller expecting an
+ // owned object.
+ state = state->set<RefBindings>(Sym, X ^ RefVal::ErrorReturnedNotOwned);
+
+ static SimpleProgramPointTag
+ ReturnNotOwnedTag("RetainCountChecker : ReturnNotOwnedForOwned");
+ ExplodedNode *N = C.addTransition(state, Pred, &ReturnNotOwnedTag);
+ if (N) {
+ if (!returnNotOwnedForOwned)
+ returnNotOwnedForOwned.reset(new ReturnedNotOwnedForOwned());
+
+ CFRefReport *report =
+ new CFRefReport(*returnNotOwnedForOwned,
+ C.getASTContext().getLangOpts(),
+ C.isObjCGCEnabled(), SummaryLog, N, Sym);
+ C.EmitReport(report);
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Check various ways a symbol can be invalidated.
+//===----------------------------------------------------------------------===//
+
+void RetainCountChecker::checkBind(SVal loc, SVal val, const Stmt *S,
+ CheckerContext &C) const {
+ // Are we storing to something that causes the value to "escape"?
+ bool escapes = true;
+
+ // A value escapes in three possible cases (this may change):
+ //
+ // (1) we are binding to something that is not a memory region.
+ // (2) we are binding to a memregion that does not have stack storage
+ // (3) we are binding to a memregion with stack storage that the store
+ // does not understand.
+ ProgramStateRef state = C.getState();
+
+ if (loc::MemRegionVal *regionLoc = dyn_cast<loc::MemRegionVal>(&loc)) {
+ escapes = !regionLoc->getRegion()->hasStackStorage();
+
+ if (!escapes) {
+ // To test (3), generate a new state with the binding added. If it is
+ // the same state, then it escapes (since the store cannot represent
+ // the binding).
+ escapes = (state == (state->bindLoc(*regionLoc, val)));
+ }
+ if (!escapes) {
+ // Case 4: We do not currently model what happens when a symbol is
+ // assigned to a struct field, so be conservative here and let the symbol
+ // go. TODO: This could definitely be improved upon.
+ escapes = !isa<VarRegion>(regionLoc->getRegion());
+ }
+ }
+
+ // If our store can represent the binding and we aren't storing to something
+ // that doesn't have local storage then just return and have the simulation
+ // state continue as is.
+ if (!escapes)
+ return;
+
+ // Otherwise, find all symbols referenced by 'val' that we are tracking
+ // and stop tracking them.
+ state = state->scanReachableSymbols<StopTrackingCallback>(val).getState();
+ C.addTransition(state);
+}
+
+ProgramStateRef RetainCountChecker::evalAssume(ProgramStateRef state,
+ SVal Cond,
+ bool Assumption) const {
+
+ // FIXME: We may add to the interface of evalAssume the list of symbols
+ // whose assumptions have changed. For now we just iterate through the
+ // bindings and check if any of the tracked symbols are NULL. This isn't
+ // too bad since the number of symbols we will track in practice are
+ // probably small and evalAssume is only called at branches and a few
+ // other places.
+ RefBindings B = state->get<RefBindings>();
+
+ if (B.isEmpty())
+ return state;
+
+ bool changed = false;
+ RefBindings::Factory &RefBFactory = state->get_context<RefBindings>();
+
+ for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ // Check if the symbol is null (or equal to any constant).
+ // If this is the case, stop tracking the symbol.
+ if (state->getSymVal(I.getKey())) {
+ changed = true;
+ B = RefBFactory.remove(B, I.getKey());
+ }
+ }
+
+ if (changed)
+ state = state->set<RefBindings>(B);
+
+ return state;
+}
+
+ProgramStateRef
+RetainCountChecker::checkRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) const {
+ if (!invalidated)
+ return state;
+
+ llvm::SmallPtrSet<SymbolRef, 8> WhitelistedSymbols;
+ for (ArrayRef<const MemRegion *>::iterator I = ExplicitRegions.begin(),
+ E = ExplicitRegions.end(); I != E; ++I) {
+ if (const SymbolicRegion *SR = (*I)->StripCasts()->getAs<SymbolicRegion>())
+ WhitelistedSymbols.insert(SR->getSymbol());
+ }
+
+ for (StoreManager::InvalidatedSymbols::const_iterator I=invalidated->begin(),
+ E = invalidated->end(); I!=E; ++I) {
+ SymbolRef sym = *I;
+ if (WhitelistedSymbols.count(sym))
+ continue;
+ // Remove any existing reference-count binding.
+ state = state->remove<RefBindings>(sym);
+ }
+ return state;
+}
+
+//===----------------------------------------------------------------------===//
+// Handle dead symbols and end-of-path.
+//===----------------------------------------------------------------------===//
+
+std::pair<ExplodedNode *, ProgramStateRef >
+RetainCountChecker::handleAutoreleaseCounts(ProgramStateRef state,
+ GenericNodeBuilderRefCount Bd,
+ ExplodedNode *Pred,
+ CheckerContext &Ctx,
+ SymbolRef Sym, RefVal V) const {
+ unsigned ACnt = V.getAutoreleaseCount();
+
+ // No autorelease counts? Nothing to be done.
+ if (!ACnt)
+ return std::make_pair(Pred, state);
+
+ assert(!Ctx.isObjCGCEnabled() && "Autorelease counts in GC mode?");
+ unsigned Cnt = V.getCount();
+
+ // FIXME: Handle sending 'autorelease' to already released object.
+
+ if (V.getKind() == RefVal::ReturnedOwned)
+ ++Cnt;
+
+ if (ACnt <= Cnt) {
+ if (ACnt == Cnt) {
+ V.clearCounts();
+ if (V.getKind() == RefVal::ReturnedOwned)
+ V = V ^ RefVal::ReturnedNotOwned;
+ else
+ V = V ^ RefVal::NotOwned;
+ } else {
+ V.setCount(Cnt - ACnt);
+ V.setAutoreleaseCount(0);
+ }
+ state = state->set<RefBindings>(Sym, V);
+ ExplodedNode *N = Bd.MakeNode(state, Pred);
+ if (N == 0)
+ state = 0;
+ return std::make_pair(N, state);
+ }
+
+ // Woah! More autorelease counts then retain counts left.
+ // Emit hard error.
+ V = V ^ RefVal::ErrorOverAutorelease;
+ state = state->set<RefBindings>(Sym, V);
+
+ if (ExplodedNode *N = Bd.MakeNode(state, Pred, true)) {
+ SmallString<128> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+ os << "Object over-autoreleased: object was sent -autorelease ";
+ if (V.getAutoreleaseCount() > 1)
+ os << V.getAutoreleaseCount() << " times ";
+ os << "but the object has a +" << V.getCount() << " retain count";
+
+ if (!overAutorelease)
+ overAutorelease.reset(new OverAutorelease());
+
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+ CFRefReport *report =
+ new CFRefReport(*overAutorelease, LOpts, /* GCEnabled = */ false,
+ SummaryLog, N, Sym, os.str());
+ Ctx.EmitReport(report);
+ }
+
+ return std::make_pair((ExplodedNode *)0, (ProgramStateRef )0);
+}
+
+ProgramStateRef
+RetainCountChecker::handleSymbolDeath(ProgramStateRef state,
+ SymbolRef sid, RefVal V,
+ SmallVectorImpl<SymbolRef> &Leaked) const {
+ bool hasLeak = false;
+ if (V.isOwned())
+ hasLeak = true;
+ else if (V.isNotOwned() || V.isReturnedOwned())
+ hasLeak = (V.getCount() > 0);
+
+ if (!hasLeak)
+ return state->remove<RefBindings>(sid);
+
+ Leaked.push_back(sid);
+ return state->set<RefBindings>(sid, V ^ RefVal::ErrorLeak);
+}
+
+ExplodedNode *
+RetainCountChecker::processLeaks(ProgramStateRef state,
+ SmallVectorImpl<SymbolRef> &Leaked,
+ GenericNodeBuilderRefCount &Builder,
+ CheckerContext &Ctx,
+ ExplodedNode *Pred) const {
+ if (Leaked.empty())
+ return Pred;
+
+ // Generate an intermediate node representing the leak point.
+ ExplodedNode *N = Builder.MakeNode(state, Pred);
+
+ if (N) {
+ for (SmallVectorImpl<SymbolRef>::iterator
+ I = Leaked.begin(), E = Leaked.end(); I != E; ++I) {
+
+ const LangOptions &LOpts = Ctx.getASTContext().getLangOpts();
+ bool GCEnabled = Ctx.isObjCGCEnabled();
+ CFRefBug *BT = Pred ? getLeakWithinFunctionBug(LOpts, GCEnabled)
+ : getLeakAtReturnBug(LOpts, GCEnabled);
+ assert(BT && "BugType not initialized.");
+
+ CFRefLeakReport *report = new CFRefLeakReport(*BT, LOpts, GCEnabled,
+ SummaryLog, N, *I, Ctx);
+ Ctx.EmitReport(report);
+ }
+ }
+
+ return N;
+}
+
+void RetainCountChecker::checkEndPath(CheckerContext &Ctx) const {
+ ProgramStateRef state = Ctx.getState();
+ GenericNodeBuilderRefCount Bd(Ctx);
+ RefBindings B = state->get<RefBindings>();
+ ExplodedNode *Pred = Ctx.getPredecessor();
+
+ for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, Ctx,
+ I->first, I->second);
+ if (!state)
+ return;
+ }
+
+ // If the current LocationContext has a parent, don't check for leaks.
+ // We will do that later.
+ // FIXME: we should instead check for imblances of the retain/releases,
+ // and suggest annotations.
+ if (Ctx.getLocationContext()->getParent())
+ return;
+
+ B = state->get<RefBindings>();
+ SmallVector<SymbolRef, 10> Leaked;
+
+ for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ state = handleSymbolDeath(state, I->first, I->second, Leaked);
+
+ processLeaks(state, Leaked, Bd, Ctx, Pred);
+}
+
+const ProgramPointTag *
+RetainCountChecker::getDeadSymbolTag(SymbolRef sym) const {
+ const SimpleProgramPointTag *&tag = DeadSymbolTags[sym];
+ if (!tag) {
+ SmallString<64> buf;
+ llvm::raw_svector_ostream out(buf);
+ out << "RetainCountChecker : Dead Symbol : ";
+ sym->dumpToStream(out);
+ tag = new SimpleProgramPointTag(out.str());
+ }
+ return tag;
+}
+
+void RetainCountChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ ExplodedNode *Pred = C.getPredecessor();
+
+ ProgramStateRef state = C.getState();
+ RefBindings B = state->get<RefBindings>();
+
+ // Update counts from autorelease pools
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ SymbolRef Sym = *I;
+ if (const RefVal *T = B.lookup(Sym)){
+ // Use the symbol as the tag.
+ // FIXME: This might not be as unique as we would like.
+ GenericNodeBuilderRefCount Bd(C, getDeadSymbolTag(Sym));
+ llvm::tie(Pred, state) = handleAutoreleaseCounts(state, Bd, Pred, C,
+ Sym, *T);
+ if (!state)
+ return;
+ }
+ }
+
+ B = state->get<RefBindings>();
+ SmallVector<SymbolRef, 10> Leaked;
+
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ if (const RefVal *T = B.lookup(*I))
+ state = handleSymbolDeath(state, *I, *T, Leaked);
+ }
+
+ {
+ GenericNodeBuilderRefCount Bd(C, this);
+ Pred = processLeaks(state, Leaked, Bd, C, Pred);
+ }
+
+ // Did we cache out?
+ if (!Pred)
+ return;
+
+ // Now generate a new node that nukes the old bindings.
+ RefBindings::Factory &F = state->get_context<RefBindings>();
+
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I)
+ B = F.remove(B, *I);
+
+ state = state->set<RefBindings>(B);
+ C.addTransition(state, Pred);
+}
+
+//===----------------------------------------------------------------------===//
+// Debug printing of refcount bindings and autorelease pools.
+//===----------------------------------------------------------------------===//
+
+static void PrintPool(raw_ostream &Out, SymbolRef Sym,
+ ProgramStateRef State) {
+ Out << ' ';
+ if (Sym)
+ Sym->dumpToStream(Out);
+ else
+ Out << "<pool>";
+ Out << ":{";
+
+ // Get the contents of the pool.
+ if (const ARCounts *Cnts = State->get<AutoreleasePoolContents>(Sym))
+ for (ARCounts::iterator I = Cnts->begin(), E = Cnts->end(); I != E; ++I)
+ Out << '(' << I.getKey() << ',' << I.getData() << ')';
+
+ Out << '}';
+}
+
+static bool UsesAutorelease(ProgramStateRef state) {
+ // A state uses autorelease if it allocated an autorelease pool or if it has
+ // objects in the caller's autorelease pool.
+ return !state->get<AutoreleaseStack>().isEmpty() ||
+ state->get<AutoreleasePoolContents>(SymbolRef());
+}
+
+void RetainCountChecker::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) const {
+
+ RefBindings B = State->get<RefBindings>();
+
+ if (!B.isEmpty())
+ Out << Sep << NL;
+
+ for (RefBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ Out << I->first << " : ";
+ I->second.print(Out);
+ Out << NL;
+ }
+
+ // Print the autorelease stack.
+ if (UsesAutorelease(State)) {
+ Out << Sep << NL << "AR pool stack:";
+ ARStack Stack = State->get<AutoreleaseStack>();
+
+ PrintPool(Out, SymbolRef(), State); // Print the caller's pool.
+ for (ARStack::iterator I = Stack.begin(), E = Stack.end(); I != E; ++I)
+ PrintPool(Out, *I, State);
+
+ Out << NL;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Checker registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerRetainCountChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<RetainCountChecker>();
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
new file mode 100644
index 0000000..6e56593
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -0,0 +1,91 @@
+//== ReturnPointerRangeChecker.cpp ------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnPointerRangeChecker, which is a path-sensitive check
+// which looks for an out-of-bound pointer being returned to callers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ReturnPointerRangeChecker :
+ public Checker< check::PreStmt<ReturnStmt> > {
+ mutable OwningPtr<BuiltinBug> BT;
+public:
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+};
+}
+
+void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ SVal V = state->getSVal(RetE, C.getLocationContext());
+ const MemRegion *R = V.getAsRegion();
+
+ const ElementRegion *ER = dyn_cast_or_null<ElementRegion>(R);
+ if (!ER)
+ return;
+
+ DefinedOrUnknownSVal Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+ // Zero index is always in bound, this also passes ElementRegions created for
+ // pointer casts.
+ if (Idx.isZeroConstant())
+ return;
+ // FIXME: All of this out-of-bounds checking should eventually be refactored
+ // into a common place.
+
+ DefinedOrUnknownSVal NumElements
+ = C.getStoreManager().getSizeInElements(state, ER->getSuperRegion(),
+ ER->getValueType());
+
+ ProgramStateRef StInBound = state->assumeInBound(Idx, NumElements, true);
+ ProgramStateRef StOutBound = state->assumeInBound(Idx, NumElements, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.generateSink(StOutBound);
+
+ if (!N)
+ return;
+
+ // FIXME: This bug correspond to CWE-466. Eventually we should have bug
+ // types explicitly reference such exploit categories (when applicable).
+ if (!BT)
+ BT.reset(new BuiltinBug("Return of pointer value outside of expected range",
+ "Returned pointer value points outside the original object "
+ "(potential buffer overflow)"));
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ // Generate a report for this bug.
+ BugReport *report =
+ new BugReport(*BT, BT->getDescription(), N);
+
+ report->addRange(RetE->getSourceRange());
+ C.EmitReport(report);
+ }
+}
+
+void ento::registerReturnPointerRangeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ReturnPointerRangeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
new file mode 100644
index 0000000..7b1f0b1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/ReturnUndefChecker.cpp
@@ -0,0 +1,65 @@
+//== ReturnUndefChecker.cpp -------------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ReturnUndefChecker, which is a path-sensitive
+// check which looks for undefined or garbage values being returned to the
+// caller.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class ReturnUndefChecker :
+ public Checker< check::PreStmt<ReturnStmt> > {
+ mutable OwningPtr<BuiltinBug> BT;
+public:
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+};
+}
+
+void ReturnUndefChecker::checkPreStmt(const ReturnStmt *RS,
+ CheckerContext &C) const {
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ if (!C.getState()->getSVal(RetE, C.getLocationContext()).isUndef())
+ return;
+
+ ExplodedNode *N = C.generateSink();
+
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug("Garbage return value",
+ "Undefined or garbage value returned to caller"));
+
+ BugReport *report =
+ new BugReport(*BT, BT->getDescription(), N);
+
+ report->addRange(RetE->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, RetE,
+ report));
+
+ C.EmitReport(report);
+}
+
+void ento::registerReturnUndefChecker(CheckerManager &mgr) {
+ mgr.registerChecker<ReturnUndefChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
new file mode 100644
index 0000000..54cf569
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -0,0 +1,230 @@
+//=== StackAddrEscapeChecker.cpp ----------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stack address leak checker, which checks if an invalid
+// stack address is stored into a global or heap location. See CERT DCL30-C.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+using namespace ento;
+
+namespace {
+class StackAddrEscapeChecker : public Checker< check::PreStmt<ReturnStmt>,
+ check::EndPath > {
+ mutable OwningPtr<BuiltinBug> BT_stackleak;
+ mutable OwningPtr<BuiltinBug> BT_returnstack;
+
+public:
+ void checkPreStmt(const ReturnStmt *RS, CheckerContext &C) const;
+ void checkEndPath(CheckerContext &Ctx) const;
+private:
+ void EmitStackError(CheckerContext &C, const MemRegion *R,
+ const Expr *RetE) const;
+ static SourceRange GenName(raw_ostream &os, const MemRegion *R,
+ SourceManager &SM);
+};
+}
+
+SourceRange StackAddrEscapeChecker::GenName(raw_ostream &os,
+ const MemRegion *R,
+ SourceManager &SM) {
+ // Get the base region, stripping away fields and elements.
+ R = R->getBaseRegion();
+ SourceRange range;
+ os << "Address of ";
+
+ // Check if the region is a compound literal.
+ if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
+ const CompoundLiteralExpr *CL = CR->getLiteralExpr();
+ os << "stack memory associated with a compound literal "
+ "declared on line "
+ << SM.getExpansionLineNumber(CL->getLocStart())
+ << " returned to caller";
+ range = CL->getSourceRange();
+ }
+ else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
+ const Expr *ARE = AR->getExpr();
+ SourceLocation L = ARE->getLocStart();
+ range = ARE->getSourceRange();
+ os << "stack memory allocated by call to alloca() on line "
+ << SM.getExpansionLineNumber(L);
+ }
+ else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+ const BlockDecl *BD = BR->getCodeRegion()->getDecl();
+ SourceLocation L = BD->getLocStart();
+ range = BD->getSourceRange();
+ os << "stack-allocated block declared on line "
+ << SM.getExpansionLineNumber(L);
+ }
+ else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "stack memory associated with local variable '"
+ << VR->getString() << '\'';
+ range = VR->getDecl()->getSourceRange();
+ }
+ else if (const CXXTempObjectRegion *TOR = dyn_cast<CXXTempObjectRegion>(R)) {
+ os << "stack memory associated with temporary object of type '"
+ << TOR->getValueType().getAsString() << '\'';
+ range = TOR->getExpr()->getSourceRange();
+ }
+ else {
+ llvm_unreachable("Invalid region in ReturnStackAddressChecker.");
+ }
+
+ return range;
+}
+
+void StackAddrEscapeChecker::EmitStackError(CheckerContext &C, const MemRegion *R,
+ const Expr *RetE) const {
+ ExplodedNode *N = C.generateSink();
+
+ if (!N)
+ return;
+
+ if (!BT_returnstack)
+ BT_returnstack.reset(
+ new BuiltinBug("Return of address to stack-allocated memory"));
+
+ // Generate a report for this bug.
+ SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range = GenName(os, R, C.getSourceManager());
+ os << " returned to caller";
+ BugReport *report = new BugReport(*BT_returnstack, os.str(), N);
+ report->addRange(RetE->getSourceRange());
+ if (range.isValid())
+ report->addRange(range);
+
+ C.EmitReport(report);
+}
+
+void StackAddrEscapeChecker::checkPreStmt(const ReturnStmt *RS,
+ CheckerContext &C) const {
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ SVal V = C.getState()->getSVal(RetE, C.getLocationContext());
+ const MemRegion *R = V.getAsRegion();
+
+ if (!R)
+ return;
+
+ const StackSpaceRegion *SS =
+ dyn_cast_or_null<StackSpaceRegion>(R->getMemorySpace());
+
+ if (!SS)
+ return;
+
+ // Return stack memory in an ancestor stack frame is fine.
+ const StackFrameContext *SFC = SS->getStackFrame();
+ if (SFC != C.getLocationContext()->getCurrentStackFrame())
+ return;
+
+ // Automatic reference counting automatically copies blocks.
+ if (C.getASTContext().getLangOpts().ObjCAutoRefCount &&
+ isa<BlockDataRegion>(R))
+ return;
+
+ EmitStackError(C, R, RetE);
+}
+
+void StackAddrEscapeChecker::checkEndPath(CheckerContext &Ctx) const {
+ ProgramStateRef state = Ctx.getState();
+
+ // Iterate over all bindings to global variables and see if it contains
+ // a memory region in the stack space.
+ class CallBack : public StoreManager::BindingsHandler {
+ private:
+ CheckerContext &Ctx;
+ const StackFrameContext *CurSFC;
+ public:
+ SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V;
+
+ CallBack(CheckerContext &CC) :
+ Ctx(CC),
+ CurSFC(CC.getLocationContext()->getCurrentStackFrame())
+ {}
+
+ bool HandleBinding(StoreManager &SMgr, Store store,
+ const MemRegion *region, SVal val) {
+
+ if (!isa<GlobalsSpaceRegion>(region->getMemorySpace()))
+ return true;
+
+ const MemRegion *vR = val.getAsRegion();
+ if (!vR)
+ return true;
+
+ // Under automated retain release, it is okay to assign a block
+ // directly to a global variable.
+ if (Ctx.getASTContext().getLangOpts().ObjCAutoRefCount &&
+ isa<BlockDataRegion>(vR))
+ return true;
+
+ if (const StackSpaceRegion *SSR =
+ dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) {
+ // If the global variable holds a location in the current stack frame,
+ // record the binding to emit a warning.
+ if (SSR->getStackFrame() == CurSFC)
+ V.push_back(std::make_pair(region, vR));
+ }
+
+ return true;
+ }
+ };
+
+ CallBack cb(Ctx);
+ state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
+
+ if (cb.V.empty())
+ return;
+
+ // Generate an error node.
+ ExplodedNode *N = Ctx.addTransition(state);
+ if (!N)
+ return;
+
+ if (!BT_stackleak)
+ BT_stackleak.reset(
+ new BuiltinBug("Stack address stored into global variable",
+ "Stack address was saved into a global variable. "
+ "This is dangerous because the address will become "
+ "invalid after returning from the function"));
+
+ for (unsigned i = 0, e = cb.V.size(); i != e; ++i) {
+ // Generate a report for this bug.
+ SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range = GenName(os, cb.V[i].second,
+ Ctx.getSourceManager());
+ os << " is still referred to by the global variable '";
+ const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion());
+ os << *VR->getDecl()
+ << "' upon returning to the caller. This will be a dangling reference";
+ BugReport *report = new BugReport(*BT_stackleak, os.str(), N);
+ if (range.isValid())
+ report->addRange(range);
+
+ Ctx.EmitReport(report);
+ }
+}
+
+void ento::registerStackAddrEscapeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<StackAddrEscapeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
new file mode 100644
index 0000000..3745d4a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -0,0 +1,475 @@
+//===-- StreamChecker.cpp -----------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines checkers that model and check stream handling functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+struct StreamState {
+ enum Kind { Opened, Closed, OpenFailed, Escaped } K;
+ const Stmt *S;
+
+ StreamState(Kind k, const Stmt *s) : K(k), S(s) {}
+
+ bool isOpened() const { return K == Opened; }
+ bool isClosed() const { return K == Closed; }
+ //bool isOpenFailed() const { return K == OpenFailed; }
+ //bool isEscaped() const { return K == Escaped; }
+
+ bool operator==(const StreamState &X) const {
+ return K == X.K && S == X.S;
+ }
+
+ static StreamState getOpened(const Stmt *s) { return StreamState(Opened, s); }
+ static StreamState getClosed(const Stmt *s) { return StreamState(Closed, s); }
+ static StreamState getOpenFailed(const Stmt *s) {
+ return StreamState(OpenFailed, s);
+ }
+ static StreamState getEscaped(const Stmt *s) {
+ return StreamState(Escaped, s);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(K);
+ ID.AddPointer(S);
+ }
+};
+
+class StreamChecker : public Checker<eval::Call,
+ check::DeadSymbols,
+ check::EndPath,
+ check::PreStmt<ReturnStmt> > {
+ mutable IdentifierInfo *II_fopen, *II_tmpfile, *II_fclose, *II_fread,
+ *II_fwrite,
+ *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,
+ *II_clearerr, *II_feof, *II_ferror, *II_fileno;
+ mutable OwningPtr<BuiltinBug> BT_nullfp, BT_illegalwhence,
+ BT_doubleclose, BT_ResourceLeak;
+
+public:
+ StreamChecker()
+ : II_fopen(0), II_tmpfile(0) ,II_fclose(0), II_fread(0), II_fwrite(0),
+ II_fseek(0), II_ftell(0), II_rewind(0), II_fgetpos(0), II_fsetpos(0),
+ II_clearerr(0), II_feof(0), II_ferror(0), II_fileno(0) {}
+
+ bool evalCall(const CallExpr *CE, CheckerContext &C) const;
+ void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
+ void checkEndPath(CheckerContext &Ctx) const;
+ void checkPreStmt(const ReturnStmt *S, CheckerContext &C) const;
+
+private:
+ void Fopen(CheckerContext &C, const CallExpr *CE) const;
+ void Tmpfile(CheckerContext &C, const CallExpr *CE) const;
+ void Fclose(CheckerContext &C, const CallExpr *CE) const;
+ void Fread(CheckerContext &C, const CallExpr *CE) const;
+ void Fwrite(CheckerContext &C, const CallExpr *CE) const;
+ void Fseek(CheckerContext &C, const CallExpr *CE) const;
+ void Ftell(CheckerContext &C, const CallExpr *CE) const;
+ void Rewind(CheckerContext &C, const CallExpr *CE) const;
+ void Fgetpos(CheckerContext &C, const CallExpr *CE) const;
+ void Fsetpos(CheckerContext &C, const CallExpr *CE) const;
+ void Clearerr(CheckerContext &C, const CallExpr *CE) const;
+ void Feof(CheckerContext &C, const CallExpr *CE) const;
+ void Ferror(CheckerContext &C, const CallExpr *CE) const;
+ void Fileno(CheckerContext &C, const CallExpr *CE) const;
+
+ void OpenFileAux(CheckerContext &C, const CallExpr *CE) const;
+
+ ProgramStateRef CheckNullStream(SVal SV, ProgramStateRef state,
+ CheckerContext &C) const;
+ ProgramStateRef CheckDoubleClose(const CallExpr *CE, ProgramStateRef state,
+ CheckerContext &C) const;
+};
+
+} // end anonymous namespace
+
+namespace clang {
+namespace ento {
+ template <>
+ struct ProgramStateTrait<StreamState>
+ : public ProgramStatePartialTrait<llvm::ImmutableMap<SymbolRef, StreamState> > {
+ static void *GDMIndex() { static int x; return &x; }
+ };
+}
+}
+
+bool StreamChecker::evalCall(const CallExpr *CE, CheckerContext &C) const {
+ const FunctionDecl *FD = C.getCalleeDecl(CE);
+ if (!FD)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_fopen)
+ II_fopen = &Ctx.Idents.get("fopen");
+ if (!II_tmpfile)
+ II_tmpfile = &Ctx.Idents.get("tmpfile");
+ if (!II_fclose)
+ II_fclose = &Ctx.Idents.get("fclose");
+ if (!II_fread)
+ II_fread = &Ctx.Idents.get("fread");
+ if (!II_fwrite)
+ II_fwrite = &Ctx.Idents.get("fwrite");
+ if (!II_fseek)
+ II_fseek = &Ctx.Idents.get("fseek");
+ if (!II_ftell)
+ II_ftell = &Ctx.Idents.get("ftell");
+ if (!II_rewind)
+ II_rewind = &Ctx.Idents.get("rewind");
+ if (!II_fgetpos)
+ II_fgetpos = &Ctx.Idents.get("fgetpos");
+ if (!II_fsetpos)
+ II_fsetpos = &Ctx.Idents.get("fsetpos");
+ if (!II_clearerr)
+ II_clearerr = &Ctx.Idents.get("clearerr");
+ if (!II_feof)
+ II_feof = &Ctx.Idents.get("feof");
+ if (!II_ferror)
+ II_ferror = &Ctx.Idents.get("ferror");
+ if (!II_fileno)
+ II_fileno = &Ctx.Idents.get("fileno");
+
+ if (FD->getIdentifier() == II_fopen) {
+ Fopen(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_tmpfile) {
+ Tmpfile(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fclose) {
+ Fclose(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fread) {
+ Fread(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fwrite) {
+ Fwrite(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fseek) {
+ Fseek(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_ftell) {
+ Ftell(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_rewind) {
+ Rewind(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fgetpos) {
+ Fgetpos(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fsetpos) {
+ Fsetpos(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_clearerr) {
+ Clearerr(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_feof) {
+ Feof(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_ferror) {
+ Ferror(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fileno) {
+ Fileno(C, CE);
+ return true;
+ }
+
+ return false;
+}
+
+void StreamChecker::Fopen(CheckerContext &C, const CallExpr *CE) const {
+ OpenFileAux(C, CE);
+}
+
+void StreamChecker::Tmpfile(CheckerContext &C, const CallExpr *CE) const {
+ OpenFileAux(C, CE);
+}
+
+void StreamChecker::OpenFileAux(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ unsigned Count = C.getCurrentBlockCount();
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ const LocationContext *LCtx = C.getPredecessor()->getLocationContext();
+ DefinedSVal RetVal =
+ cast<DefinedSVal>(svalBuilder.getConjuredSymbolVal(0, CE, LCtx, Count));
+ state = state->BindExpr(CE, C.getLocationContext(), RetVal);
+
+ ConstraintManager &CM = C.getConstraintManager();
+ // Bifurcate the state into two: one with a valid FILE* pointer, the other
+ // with a NULL.
+ ProgramStateRef stateNotNull, stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, RetVal);
+
+ if (SymbolRef Sym = RetVal.getAsSymbol()) {
+ // if RetVal is not NULL, set the symbol's state to Opened.
+ stateNotNull =
+ stateNotNull->set<StreamState>(Sym,StreamState::getOpened(CE));
+ stateNull =
+ stateNull->set<StreamState>(Sym, StreamState::getOpenFailed(CE));
+
+ C.addTransition(stateNotNull);
+ C.addTransition(stateNull);
+ }
+}
+
+void StreamChecker::Fclose(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = CheckDoubleClose(CE, C.getState(), C);
+ if (state)
+ C.addTransition(state);
+}
+
+void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!(state = CheckNullStream(state->getSVal(CE->getArg(0),
+ C.getLocationContext()), state, C)))
+ return;
+ // Check the legality of the 'whence' argument of 'fseek'.
+ SVal Whence = state->getSVal(CE->getArg(2), C.getLocationContext());
+ const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Whence);
+
+ if (!CI)
+ return;
+
+ int64_t x = CI->getValue().getSExtValue();
+ if (x >= 0 && x <= 2)
+ return;
+
+ if (ExplodedNode *N = C.addTransition(state)) {
+ if (!BT_illegalwhence)
+ BT_illegalwhence.reset(new BuiltinBug("Illegal whence argument",
+ "The whence argument to fseek() should be "
+ "SEEK_SET, SEEK_END, or SEEK_CUR."));
+ BugReport *R = new BugReport(*BT_illegalwhence,
+ BT_illegalwhence->getDescription(), N);
+ C.EmitReport(R);
+ }
+}
+
+void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) const {
+ ProgramStateRef state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0), C.getLocationContext()),
+ state, C))
+ return;
+}
+
+ProgramStateRef StreamChecker::CheckNullStream(SVal SV, ProgramStateRef state,
+ CheckerContext &C) const {
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&SV);
+ if (!DV)
+ return 0;
+
+ ConstraintManager &CM = C.getConstraintManager();
+ ProgramStateRef stateNotNull, stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.assumeDual(state, *DV);
+
+ if (!stateNotNull && stateNull) {
+ if (ExplodedNode *N = C.generateSink(stateNull)) {
+ if (!BT_nullfp)
+ BT_nullfp.reset(new BuiltinBug("NULL stream pointer",
+ "Stream pointer might be NULL."));
+ BugReport *R =new BugReport(*BT_nullfp, BT_nullfp->getDescription(), N);
+ C.EmitReport(R);
+ }
+ return 0;
+ }
+ return stateNotNull;
+}
+
+ProgramStateRef StreamChecker::CheckDoubleClose(const CallExpr *CE,
+ ProgramStateRef state,
+ CheckerContext &C) const {
+ SymbolRef Sym =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsSymbol();
+ if (!Sym)
+ return state;
+
+ const StreamState *SS = state->get<StreamState>(Sym);
+
+ // If the file stream is not tracked, return.
+ if (!SS)
+ return state;
+
+ // Check: Double close a File Descriptor could cause undefined behaviour.
+ // Conforming to man-pages
+ if (SS->isClosed()) {
+ ExplodedNode *N = C.generateSink();
+ if (N) {
+ if (!BT_doubleclose)
+ BT_doubleclose.reset(new BuiltinBug("Double fclose",
+ "Try to close a file Descriptor already"
+ " closed. Cause undefined behaviour."));
+ BugReport *R = new BugReport(*BT_doubleclose,
+ BT_doubleclose->getDescription(), N);
+ C.EmitReport(R);
+ }
+ return NULL;
+ }
+
+ // Close the File Descriptor.
+ return state->set<StreamState>(Sym, StreamState::getClosed(CE));
+}
+
+void StreamChecker::checkDeadSymbols(SymbolReaper &SymReaper,
+ CheckerContext &C) const {
+ for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
+ E = SymReaper.dead_end(); I != E; ++I) {
+ SymbolRef Sym = *I;
+ ProgramStateRef state = C.getState();
+ const StreamState *SS = state->get<StreamState>(Sym);
+ if (!SS)
+ return;
+
+ if (SS->isOpened()) {
+ ExplodedNode *N = C.generateSink();
+ if (N) {
+ if (!BT_ResourceLeak)
+ BT_ResourceLeak.reset(new BuiltinBug("Resource Leak",
+ "Opened File never closed. Potential Resource leak."));
+ BugReport *R = new BugReport(*BT_ResourceLeak,
+ BT_ResourceLeak->getDescription(), N);
+ C.EmitReport(R);
+ }
+ }
+ }
+}
+
+void StreamChecker::checkEndPath(CheckerContext &Ctx) const {
+ ProgramStateRef state = Ctx.getState();
+ typedef llvm::ImmutableMap<SymbolRef, StreamState> SymMap;
+ SymMap M = state->get<StreamState>();
+
+ for (SymMap::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ StreamState SS = I->second;
+ if (SS.isOpened()) {
+ ExplodedNode *N = Ctx.addTransition(state);
+ if (N) {
+ if (!BT_ResourceLeak)
+ BT_ResourceLeak.reset(new BuiltinBug("Resource Leak",
+ "Opened File never closed. Potential Resource leak."));
+ BugReport *R = new BugReport(*BT_ResourceLeak,
+ BT_ResourceLeak->getDescription(), N);
+ Ctx.EmitReport(R);
+ }
+ }
+ }
+}
+
+void StreamChecker::checkPreStmt(const ReturnStmt *S, CheckerContext &C) const {
+ const Expr *RetE = S->getRetValue();
+ if (!RetE)
+ return;
+
+ ProgramStateRef state = C.getState();
+ SymbolRef Sym = state->getSVal(RetE, C.getLocationContext()).getAsSymbol();
+
+ if (!Sym)
+ return;
+
+ const StreamState *SS = state->get<StreamState>(Sym);
+ if(!SS)
+ return;
+
+ if (SS->isOpened())
+ state = state->set<StreamState>(Sym, StreamState::getEscaped(S));
+
+ C.addTransition(state);
+}
+
+void ento::registerStreamChecker(CheckerManager &mgr) {
+ mgr.registerChecker<StreamChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
new file mode 100644
index 0000000..1133682
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -0,0 +1,62 @@
+//== TaintTesterChecker.cpp ----------------------------------- -*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker can be used for testing how taint data is propagated.
+//
+//===----------------------------------------------------------------------===//
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class TaintTesterChecker : public Checker< check::PostStmt<Expr> > {
+
+ mutable OwningPtr<BugType> BT;
+ void initBugType() const;
+
+ /// Given a pointer argument, get the symbol of the value it contains
+ /// (points to).
+ SymbolRef getPointedToSymbol(CheckerContext &C,
+ const Expr* Arg,
+ bool IssueWarning = true) const;
+
+public:
+ void checkPostStmt(const Expr *E, CheckerContext &C) const;
+};
+}
+
+inline void TaintTesterChecker::initBugType() const {
+ if (!BT)
+ BT.reset(new BugType("Tainted data", "General"));
+}
+
+void TaintTesterChecker::checkPostStmt(const Expr *E,
+ CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+ if (!State)
+ return;
+
+ if (State->isTainted(E, C.getLocationContext())) {
+ if (ExplodedNode *N = C.addTransition()) {
+ initBugType();
+ BugReport *report = new BugReport(*BT, "tainted",N);
+ report->addRange(E->getSourceRange());
+ C.EmitReport(report);
+ }
+ }
+}
+
+void ento::registerTaintTesterChecker(CheckerManager &mgr) {
+ mgr.registerChecker<TaintTesterChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
new file mode 100644
index 0000000..a30f6d5
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefBranchChecker.cpp
@@ -0,0 +1,112 @@
+//=== UndefBranchChecker.cpp -----------------------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines UndefBranchChecker, which checks for undefined branch
+// condition.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class UndefBranchChecker : public Checker<check::BranchCondition> {
+ mutable OwningPtr<BuiltinBug> BT;
+
+ struct FindUndefExpr {
+ ProgramStateRef St;
+ const LocationContext *LCtx;
+
+ FindUndefExpr(ProgramStateRef S, const LocationContext *L)
+ : St(S), LCtx(L) {}
+
+ const Expr *FindExpr(const Expr *Ex) {
+ if (!MatchesCriteria(Ex))
+ return 0;
+
+ for (Stmt::const_child_iterator I = Ex->child_begin(),
+ E = Ex->child_end();I!=E;++I)
+ if (const Expr *ExI = dyn_cast_or_null<Expr>(*I)) {
+ const Expr *E2 = FindExpr(ExI);
+ if (E2) return E2;
+ }
+
+ return Ex;
+ }
+
+ bool MatchesCriteria(const Expr *Ex) {
+ return St->getSVal(Ex, LCtx).isUndef();
+ }
+ };
+
+public:
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const;
+};
+
+}
+
+void UndefBranchChecker::checkBranchCondition(const Stmt *Condition,
+ CheckerContext &Ctx) const {
+ SVal X = Ctx.getState()->getSVal(Condition, Ctx.getLocationContext());
+ if (X.isUndef()) {
+ // Generate a sink node, which implicitly marks both outgoing branches as
+ // infeasible.
+ ExplodedNode *N = Ctx.generateSink();
+ if (N) {
+ if (!BT)
+ BT.reset(
+ new BuiltinBug("Branch condition evaluates to a garbage value"));
+
+ // What's going on here: we want to highlight the subexpression of the
+ // condition that is the most likely source of the "uninitialized
+ // branch condition." We do a recursive walk of the condition's
+ // subexpressions and roughly look for the most nested subexpression
+ // that binds to Undefined. We then highlight that expression's range.
+
+ // Get the predecessor node and check if is a PostStmt with the Stmt
+ // being the terminator condition. We want to inspect the state
+ // of that node instead because it will contain main information about
+ // the subexpressions.
+
+ // Note: any predecessor will do. They should have identical state,
+ // since all the BlockEdge did was act as an error sink since the value
+ // had to already be undefined.
+ assert (!N->pred_empty());
+ const Expr *Ex = cast<Expr>(Condition);
+ ExplodedNode *PrevN = *N->pred_begin();
+ ProgramPoint P = PrevN->getLocation();
+ ProgramStateRef St = N->getState();
+
+ if (PostStmt *PS = dyn_cast<PostStmt>(&P))
+ if (PS->getStmt() == Ex)
+ St = PrevN->getState();
+
+ FindUndefExpr FindIt(St, Ctx.getLocationContext());
+ Ex = FindIt.FindExpr(Ex);
+
+ // Emit the bug report.
+ BugReport *R = new BugReport(*BT, BT->getDescription(), N);
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex, R));
+ R->addRange(Ex->getSourceRange());
+
+ Ctx.EmitReport(R);
+ }
+ }
+}
+
+void ento::registerUndefBranchChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefBranchChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
new file mode 100644
index 0000000..d57767e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefCapturedBlockVarChecker.cpp
@@ -0,0 +1,105 @@
+// UndefCapturedBlockVarChecker.cpp - Uninitialized captured vars -*- C++ -*-=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker detects blocks that capture uninitialized values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefCapturedBlockVarChecker
+ : public Checker< check::PostStmt<BlockExpr> > {
+ mutable OwningPtr<BugType> BT;
+
+public:
+ void checkPostStmt(const BlockExpr *BE, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+static const DeclRefExpr *FindBlockDeclRefExpr(const Stmt *S,
+ const VarDecl *VD) {
+ if (const DeclRefExpr *BR = dyn_cast<DeclRefExpr>(S))
+ if (BR->getDecl() == VD)
+ return BR;
+
+ for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
+ I!=E; ++I)
+ if (const Stmt *child = *I) {
+ const DeclRefExpr *BR = FindBlockDeclRefExpr(child, VD);
+ if (BR)
+ return BR;
+ }
+
+ return NULL;
+}
+
+void
+UndefCapturedBlockVarChecker::checkPostStmt(const BlockExpr *BE,
+ CheckerContext &C) const {
+ if (!BE->getBlockDecl()->hasCaptures())
+ return;
+
+ ProgramStateRef state = C.getState();
+ const BlockDataRegion *R =
+ cast<BlockDataRegion>(state->getSVal(BE,
+ C.getLocationContext()).getAsRegion());
+
+ BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
+ E = R->referenced_vars_end();
+
+ for (; I != E; ++I) {
+ // This VarRegion is the region associated with the block; we need
+ // the one associated with the encompassing context.
+ const VarRegion *VR = *I;
+ const VarDecl *VD = VR->getDecl();
+
+ if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage())
+ continue;
+
+ // Get the VarRegion associated with VD in the local stack frame.
+ const LocationContext *LC = C.getLocationContext();
+ VR = C.getSValBuilder().getRegionManager().getVarRegion(VD, LC);
+ SVal VRVal = state->getSVal(VR);
+
+ if (VRVal.isUndef())
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT)
+ BT.reset(new BuiltinBug("uninitialized variable captured by block"));
+
+ // Generate a bug report.
+ SmallString<128> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Variable '" << VD->getName()
+ << "' is uninitialized when captured by block";
+
+ BugReport *R = new BugReport(*BT, os.str(), N);
+ if (const Expr *Ex = FindBlockDeclRefExpr(BE->getBody(), VD))
+ R->addRange(Ex->getSourceRange());
+ R->addVisitor(new FindLastStoreBRVisitor(VRVal, VR));
+ // need location of block
+ C.EmitReport(R);
+ }
+ }
+}
+
+void ento::registerUndefCapturedBlockVarChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefCapturedBlockVarChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
new file mode 100644
index 0000000..c3c9ed7
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -0,0 +1,91 @@
+//=== UndefResultChecker.cpp ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefResultChecker, a builtin check in ExprEngine that
+// performs checks for undefined results of non-assignment binary operators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefResultChecker
+ : public Checker< check::PostStmt<BinaryOperator> > {
+
+ mutable OwningPtr<BugType> BT;
+
+public:
+ void checkPostStmt(const BinaryOperator *B, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
+ CheckerContext &C) const {
+ ProgramStateRef state = C.getState();
+ const LocationContext *LCtx = C.getLocationContext();
+ if (state->getSVal(B, LCtx).isUndef()) {
+ // Generate an error node.
+ ExplodedNode *N = C.generateSink();
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug("Result of operation is garbage or undefined"));
+
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream OS(sbuf);
+ const Expr *Ex = NULL;
+ bool isLeft = true;
+
+ if (state->getSVal(B->getLHS(), LCtx).isUndef()) {
+ Ex = B->getLHS()->IgnoreParenCasts();
+ isLeft = true;
+ }
+ else if (state->getSVal(B->getRHS(), LCtx).isUndef()) {
+ Ex = B->getRHS()->IgnoreParenCasts();
+ isLeft = false;
+ }
+
+ if (Ex) {
+ OS << "The " << (isLeft ? "left" : "right")
+ << " operand of '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' is a garbage value";
+ }
+ else {
+ // Neither operand was undefined, but the result is undefined.
+ OS << "The result of the '"
+ << BinaryOperator::getOpcodeStr(B->getOpcode())
+ << "' expression is undefined";
+ }
+ BugReport *report = new BugReport(*BT, OS.str(), N);
+ if (Ex) {
+ report->addRange(Ex->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Ex,
+ report));
+ }
+ else
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, B,
+ report));
+ C.EmitReport(report);
+ }
+}
+
+void ento::registerUndefResultChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefResultChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
new file mode 100644
index 0000000..0297c4e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedArraySubscriptChecker.cpp
@@ -0,0 +1,55 @@
+//===--- UndefinedArraySubscriptChecker.h ----------------------*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedArraySubscriptChecker, a builtin check in ExprEngine
+// that performs checks for undefined array subscripts.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefinedArraySubscriptChecker
+ : public Checker< check::PreStmt<ArraySubscriptExpr> > {
+ mutable OwningPtr<BugType> BT;
+
+public:
+ void checkPreStmt(const ArraySubscriptExpr *A, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void
+UndefinedArraySubscriptChecker::checkPreStmt(const ArraySubscriptExpr *A,
+ CheckerContext &C) const {
+ if (C.getState()->getSVal(A->getIdx(), C.getLocationContext()).isUndef()) {
+ if (ExplodedNode *N = C.generateSink()) {
+ if (!BT)
+ BT.reset(new BuiltinBug("Array subscript is undefined"));
+
+ // Generate a report for this bug.
+ BugReport *R = new BugReport(*BT, BT->getName(), N);
+ R->addRange(A->getIdx()->getSourceRange());
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N,
+ A->getIdx(),
+ R));
+ C.EmitReport(R);
+ }
+ }
+}
+
+void ento::registerUndefinedArraySubscriptChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefinedArraySubscriptChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
new file mode 100644
index 0000000..78f7fa6
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UndefinedAssignmentChecker.cpp
@@ -0,0 +1,88 @@
+//===--- UndefinedAssignmentChecker.h ---------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UndefinedAssignmentChecker, a builtin check in ExprEngine that
+// checks for assigning undefined values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UndefinedAssignmentChecker
+ : public Checker<check::Bind> {
+ mutable OwningPtr<BugType> BT;
+
+public:
+ void checkBind(SVal location, SVal val, const Stmt *S,
+ CheckerContext &C) const;
+};
+}
+
+void UndefinedAssignmentChecker::checkBind(SVal location, SVal val,
+ const Stmt *StoreE,
+ CheckerContext &C) const {
+ if (!val.isUndef())
+ return;
+
+ ExplodedNode *N = C.generateSink();
+
+ if (!N)
+ return;
+
+ const char *str = "Assigned value is garbage or undefined";
+
+ if (!BT)
+ BT.reset(new BuiltinBug(str));
+
+ // Generate a report for this bug.
+ const Expr *ex = 0;
+
+ while (StoreE) {
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(StoreE)) {
+ if (B->isCompoundAssignmentOp()) {
+ ProgramStateRef state = C.getState();
+ if (state->getSVal(B->getLHS(), C.getLocationContext()).isUndef()) {
+ str = "The left expression of the compound assignment is an "
+ "uninitialized value. The computed value will also be garbage";
+ ex = B->getLHS();
+ break;
+ }
+ }
+
+ ex = B->getRHS();
+ break;
+ }
+
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(StoreE)) {
+ const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ ex = VD->getInit();
+ }
+
+ break;
+ }
+
+ BugReport *R = new BugReport(*BT, str, N);
+ if (ex) {
+ R->addRange(ex->getSourceRange());
+ R->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, ex, R));
+ }
+ C.EmitReport(R);
+}
+
+void ento::registerUndefinedAssignmentChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UndefinedAssignmentChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
new file mode 100644
index 0000000..60e665fe
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -0,0 +1,353 @@
+//= UnixAPIChecker.h - Checks preconditions for various Unix APIs --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UnixAPIChecker, which is an assortment of checks on calls
+// to various, widely used UNIX/Posix functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringSwitch.h"
+#include <fcntl.h>
+
+using namespace clang;
+using namespace ento;
+using llvm::Optional;
+
+namespace {
+class UnixAPIChecker : public Checker< check::PreStmt<CallExpr> > {
+ mutable OwningPtr<BugType> BT_open, BT_pthreadOnce, BT_mallocZero;
+ mutable Optional<uint64_t> Val_O_CREAT;
+
+public:
+ void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
+
+ void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
+ void CheckPthreadOnce(CheckerContext &C, const CallExpr *CE) const;
+ void CheckCallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckMallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckReallocZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckAllocaZero(CheckerContext &C, const CallExpr *CE) const;
+ void CheckVallocZero(CheckerContext &C, const CallExpr *CE) const;
+
+ typedef void (UnixAPIChecker::*SubChecker)(CheckerContext &,
+ const CallExpr *) const;
+private:
+ bool ReportZeroByteAllocation(CheckerContext &C,
+ ProgramStateRef falseState,
+ const Expr *arg,
+ const char *fn_name) const;
+ void BasicAllocationCheck(CheckerContext &C,
+ const CallExpr *CE,
+ const unsigned numArgs,
+ const unsigned sizeArg,
+ const char *fn) const;
+};
+} //end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static inline void LazyInitialize(OwningPtr<BugType> &BT,
+ const char *name) {
+ if (BT)
+ return;
+ BT.reset(new BugType(name, categories::UnixAPI));
+}
+
+//===----------------------------------------------------------------------===//
+// "open" (man 2 open)
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::CheckOpen(CheckerContext &C, const CallExpr *CE) const {
+ // The definition of O_CREAT is platform specific. We need a better way
+ // of querying this information from the checking environment.
+ if (!Val_O_CREAT.hasValue()) {
+ if (C.getASTContext().getTargetInfo().getTriple().getVendor()
+ == llvm::Triple::Apple)
+ Val_O_CREAT = 0x0200;
+ else {
+ // FIXME: We need a more general way of getting the O_CREAT value.
+ // We could possibly grovel through the preprocessor state, but
+ // that would require passing the Preprocessor object to the ExprEngine.
+ return;
+ }
+ }
+
+ // Look at the 'oflags' argument for the O_CREAT flag.
+ ProgramStateRef state = C.getState();
+
+ if (CE->getNumArgs() < 2) {
+ // The frontend should issue a warning for this case, so this is a sanity
+ // check.
+ return;
+ }
+
+ // Now check if oflags has O_CREAT set.
+ const Expr *oflagsEx = CE->getArg(1);
+ const SVal V = state->getSVal(oflagsEx, C.getLocationContext());
+ if (!isa<NonLoc>(V)) {
+ // The case where 'V' can be a location can only be due to a bad header,
+ // so in this case bail out.
+ return;
+ }
+ NonLoc oflags = cast<NonLoc>(V);
+ NonLoc ocreateFlag =
+ cast<NonLoc>(C.getSValBuilder().makeIntVal(Val_O_CREAT.getValue(),
+ oflagsEx->getType()));
+ SVal maskedFlagsUC = C.getSValBuilder().evalBinOpNN(state, BO_And,
+ oflags, ocreateFlag,
+ oflagsEx->getType());
+ if (maskedFlagsUC.isUnknownOrUndef())
+ return;
+ DefinedSVal maskedFlags = cast<DefinedSVal>(maskedFlagsUC);
+
+ // Check if maskedFlags is non-zero.
+ ProgramStateRef trueState, falseState;
+ llvm::tie(trueState, falseState) = state->assume(maskedFlags);
+
+ // Only emit an error if the value of 'maskedFlags' is properly
+ // constrained;
+ if (!(trueState && !falseState))
+ return;
+
+ if (CE->getNumArgs() < 3) {
+ ExplodedNode *N = C.generateSink(trueState);
+ if (!N)
+ return;
+
+ LazyInitialize(BT_open, "Improper use of 'open'");
+
+ BugReport *report =
+ new BugReport(*BT_open,
+ "Call to 'open' requires a third argument when "
+ "the 'O_CREAT' flag is set", N);
+ report->addRange(oflagsEx->getSourceRange());
+ C.EmitReport(report);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// pthread_once
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::CheckPthreadOnce(CheckerContext &C,
+ const CallExpr *CE) const {
+
+ // This is similar to 'CheckDispatchOnce' in the MacOSXAPIChecker.
+ // They can possibly be refactored.
+
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // Check if the first argument is stack allocated. If so, issue a warning
+ // because that's likely to be bad news.
+ ProgramStateRef state = C.getState();
+ const MemRegion *R =
+ state->getSVal(CE->getArg(0), C.getLocationContext()).getAsRegion();
+ if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+ return;
+
+ ExplodedNode *N = C.generateSink(state);
+ if (!N)
+ return;
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to 'pthread_once' uses";
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+ os << " the local variable '" << VR->getDecl()->getName() << '\'';
+ else
+ os << " stack allocated memory";
+ os << " for the \"control\" value. Using such transient memory for "
+ "the control value is potentially dangerous.";
+ if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+ os << " Perhaps you intended to declare the variable as 'static'?";
+
+ LazyInitialize(BT_pthreadOnce, "Improper use of 'pthread_once'");
+
+ BugReport *report = new BugReport(*BT_pthreadOnce, os.str(), N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// "calloc", "malloc", "realloc", "alloca" and "valloc" with allocation size 0
+//===----------------------------------------------------------------------===//
+// FIXME: Eventually these should be rolled into the MallocChecker, but right now
+// they're more basic and valuable for widespread use.
+
+// Returns true if we try to do a zero byte allocation, false otherwise.
+// Fills in trueState and falseState.
+static bool IsZeroByteAllocation(ProgramStateRef state,
+ const SVal argVal,
+ ProgramStateRef *trueState,
+ ProgramStateRef *falseState) {
+ llvm::tie(*trueState, *falseState) =
+ state->assume(cast<DefinedSVal>(argVal));
+
+ return (*falseState && !*trueState);
+}
+
+// Generates an error report, indicating that the function whose name is given
+// will perform a zero byte allocation.
+// Returns false if an error occured, true otherwise.
+bool UnixAPIChecker::ReportZeroByteAllocation(CheckerContext &C,
+ ProgramStateRef falseState,
+ const Expr *arg,
+ const char *fn_name) const {
+ ExplodedNode *N = C.generateSink(falseState);
+ if (!N)
+ return false;
+
+ LazyInitialize(BT_mallocZero,
+ "Undefined allocation of 0 bytes (CERT MEM04-C; CWE-131)");
+
+ SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to '" << fn_name << "' has an allocation size of 0 bytes";
+ BugReport *report = new BugReport(*BT_mallocZero, os.str(), N);
+
+ report->addRange(arg->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, arg,
+ report));
+ C.EmitReport(report);
+
+ return true;
+}
+
+// Does a basic check for 0-sized allocations suitable for most of the below
+// functions (modulo "calloc")
+void UnixAPIChecker::BasicAllocationCheck(CheckerContext &C,
+ const CallExpr *CE,
+ const unsigned numArgs,
+ const unsigned sizeArg,
+ const char *fn) const {
+ // Sanity check for the correct number of arguments
+ if (CE->getNumArgs() != numArgs)
+ return;
+
+ // Check if the allocation size is 0.
+ ProgramStateRef state = C.getState();
+ ProgramStateRef trueState = NULL, falseState = NULL;
+ const Expr *arg = CE->getArg(sizeArg);
+ SVal argVal = state->getSVal(arg, C.getLocationContext());
+
+ if (argVal.isUnknownOrUndef())
+ return;
+
+ // Is the value perfectly constrained to zero?
+ if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
+ (void) ReportZeroByteAllocation(C, falseState, arg, fn);
+ return;
+ }
+ // Assume the the value is non-zero going forward.
+ assert(trueState);
+ if (trueState != state)
+ C.addTransition(trueState);
+}
+
+void UnixAPIChecker::CheckCallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ unsigned int nArgs = CE->getNumArgs();
+ if (nArgs != 2)
+ return;
+
+ ProgramStateRef state = C.getState();
+ ProgramStateRef trueState = NULL, falseState = NULL;
+
+ unsigned int i;
+ for (i = 0; i < nArgs; i++) {
+ const Expr *arg = CE->getArg(i);
+ SVal argVal = state->getSVal(arg, C.getLocationContext());
+ if (argVal.isUnknownOrUndef()) {
+ if (i == 0)
+ continue;
+ else
+ return;
+ }
+
+ if (IsZeroByteAllocation(state, argVal, &trueState, &falseState)) {
+ if (ReportZeroByteAllocation(C, falseState, arg, "calloc"))
+ return;
+ else if (i == 0)
+ continue;
+ else
+ return;
+ }
+ }
+
+ // Assume the the value is non-zero going forward.
+ assert(trueState);
+ if (trueState != state)
+ C.addTransition(trueState);
+}
+
+void UnixAPIChecker::CheckMallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "malloc");
+}
+
+void UnixAPIChecker::CheckReallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 2, 1, "realloc");
+}
+
+void UnixAPIChecker::CheckAllocaZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "alloca");
+}
+
+void UnixAPIChecker::CheckVallocZero(CheckerContext &C,
+ const CallExpr *CE) const {
+ BasicAllocationCheck(C, CE, 1, 0, "valloc");
+}
+
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+void UnixAPIChecker::checkPreStmt(const CallExpr *CE,
+ CheckerContext &C) const {
+ StringRef FName = C.getCalleeName(CE);
+ if (FName.empty())
+ return;
+
+ SubChecker SC =
+ llvm::StringSwitch<SubChecker>(FName)
+ .Case("open", &UnixAPIChecker::CheckOpen)
+ .Case("pthread_once", &UnixAPIChecker::CheckPthreadOnce)
+ .Case("calloc", &UnixAPIChecker::CheckCallocZero)
+ .Case("malloc", &UnixAPIChecker::CheckMallocZero)
+ .Case("realloc", &UnixAPIChecker::CheckReallocZero)
+ .Cases("alloca", "__builtin_alloca", &UnixAPIChecker::CheckAllocaZero)
+ .Case("valloc", &UnixAPIChecker::CheckVallocZero)
+ .Default(NULL);
+
+ if (SC)
+ (this->*SC)(C, CE);
+}
+
+//===----------------------------------------------------------------------===//
+// Registration.
+//===----------------------------------------------------------------------===//
+
+void ento::registerUnixAPIChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UnixAPIChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
new file mode 100644
index 0000000..5a13ed0
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/UnreachableCodeChecker.cpp
@@ -0,0 +1,247 @@
+//==- UnreachableCodeChecker.cpp - Generalized dead code checker -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+// This file implements a generalized unreachable code checker using a
+// path-sensitive analysis. We mark any path visited, and then walk the CFG as a
+// post-analysis to determine what was never visited.
+//
+// A similar flow-sensitive only check exists in Analysis/ReachableCode.cpp
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallSet.h"
+
+// The number of CFGBlock pointers we want to reserve memory for. This is used
+// once for each function we analyze.
+#define DEFAULT_CFGBLOCKS 256
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class UnreachableCodeChecker : public Checker<check::EndAnalysis> {
+public:
+ void checkEndAnalysis(ExplodedGraph &G, BugReporter &B,
+ ExprEngine &Eng) const;
+private:
+ typedef llvm::SmallSet<unsigned, DEFAULT_CFGBLOCKS> CFGBlocksSet;
+
+ static inline const Stmt *getUnreachableStmt(const CFGBlock *CB);
+ static void FindUnreachableEntryPoints(const CFGBlock *CB,
+ CFGBlocksSet &reachable,
+ CFGBlocksSet &visited);
+ static bool isInvalidPath(const CFGBlock *CB, const ParentMap &PM);
+ static inline bool isEmptyCFGBlock(const CFGBlock *CB);
+};
+}
+
+void UnreachableCodeChecker::checkEndAnalysis(ExplodedGraph &G,
+ BugReporter &B,
+ ExprEngine &Eng) const {
+ CFGBlocksSet reachable, visited;
+
+ if (Eng.hasWorkRemaining())
+ return;
+
+ const Decl *D = 0;
+ CFG *C = 0;
+ ParentMap *PM = 0;
+ const LocationContext *LC = 0;
+ // Iterate over ExplodedGraph
+ for (ExplodedGraph::node_iterator I = G.nodes_begin(), E = G.nodes_end();
+ I != E; ++I) {
+ const ProgramPoint &P = I->getLocation();
+ LC = P.getLocationContext();
+
+ if (!D)
+ D = LC->getAnalysisDeclContext()->getDecl();
+ // Save the CFG if we don't have it already
+ if (!C)
+ C = LC->getAnalysisDeclContext()->getUnoptimizedCFG();
+ if (!PM)
+ PM = &LC->getParentMap();
+
+ if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
+ const CFGBlock *CB = BE->getBlock();
+ reachable.insert(CB->getBlockID());
+ }
+ }
+
+ // Bail out if we didn't get the CFG or the ParentMap.
+ if (!D || !C || !PM)
+ return;
+
+ // Don't do anything for template instantiations. Proving that code
+ // in a template instantiation is unreachable means proving that it is
+ // unreachable in all instantiations.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isTemplateInstantiation())
+ return;
+
+ // Find CFGBlocks that were not covered by any node
+ for (CFG::const_iterator I = C->begin(), E = C->end(); I != E; ++I) {
+ const CFGBlock *CB = *I;
+ // Check if the block is unreachable
+ if (reachable.count(CB->getBlockID()))
+ continue;
+
+ // Check if the block is empty (an artificial block)
+ if (isEmptyCFGBlock(CB))
+ continue;
+
+ // Find the entry points for this block
+ if (!visited.count(CB->getBlockID()))
+ FindUnreachableEntryPoints(CB, reachable, visited);
+
+ // This block may have been pruned; check if we still want to report it
+ if (reachable.count(CB->getBlockID()))
+ continue;
+
+ // Check for false positives
+ if (CB->size() > 0 && isInvalidPath(CB, *PM))
+ continue;
+
+ // It is good practice to always have a "default" label in a "switch", even
+ // if we should never get there. It can be used to detect errors, for
+ // instance. Unreachable code directly under a "default" label is therefore
+ // likely to be a false positive.
+ if (const Stmt *label = CB->getLabel())
+ if (label->getStmtClass() == Stmt::DefaultStmtClass)
+ continue;
+
+ // Special case for __builtin_unreachable.
+ // FIXME: This should be extended to include other unreachable markers,
+ // such as llvm_unreachable.
+ if (!CB->empty()) {
+ bool foundUnreachable = false;
+ for (CFGBlock::const_iterator ci = CB->begin(), ce = CB->end();
+ ci != ce; ++ci) {
+ if (const CFGStmt *S = (*ci).getAs<CFGStmt>())
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S->getStmt())) {
+ if (CE->isBuiltinCall() == Builtin::BI__builtin_unreachable) {
+ foundUnreachable = true;
+ break;
+ }
+ }
+ }
+ if (foundUnreachable)
+ continue;
+ }
+
+ // We found a block that wasn't covered - find the statement to report
+ SourceRange SR;
+ PathDiagnosticLocation DL;
+ SourceLocation SL;
+ if (const Stmt *S = getUnreachableStmt(CB)) {
+ SR = S->getSourceRange();
+ DL = PathDiagnosticLocation::createBegin(S, B.getSourceManager(), LC);
+ SL = DL.asLocation();
+ if (SR.isInvalid() || !SL.isValid())
+ continue;
+ }
+ else
+ continue;
+
+ // Check if the SourceLocation is in a system header
+ const SourceManager &SM = B.getSourceManager();
+ if (SM.isInSystemHeader(SL) || SM.isInExternCSystemHeader(SL))
+ continue;
+
+ B.EmitBasicReport(D, "Unreachable code", "Dead code",
+ "This statement is never executed", DL, SR);
+ }
+}
+
+// Recursively finds the entry point(s) for this dead CFGBlock.
+void UnreachableCodeChecker::FindUnreachableEntryPoints(const CFGBlock *CB,
+ CFGBlocksSet &reachable,
+ CFGBlocksSet &visited) {
+ visited.insert(CB->getBlockID());
+
+ for (CFGBlock::const_pred_iterator I = CB->pred_begin(), E = CB->pred_end();
+ I != E; ++I) {
+ if (!reachable.count((*I)->getBlockID())) {
+ // If we find an unreachable predecessor, mark this block as reachable so
+ // we don't report this block
+ reachable.insert(CB->getBlockID());
+ if (!visited.count((*I)->getBlockID()))
+ // If we haven't previously visited the unreachable predecessor, recurse
+ FindUnreachableEntryPoints(*I, reachable, visited);
+ }
+ }
+}
+
+// Find the Stmt* in a CFGBlock for reporting a warning
+const Stmt *UnreachableCodeChecker::getUnreachableStmt(const CFGBlock *CB) {
+ for (CFGBlock::const_iterator I = CB->begin(), E = CB->end(); I != E; ++I) {
+ if (const CFGStmt *S = I->getAs<CFGStmt>())
+ return S->getStmt();
+ }
+ if (const Stmt *S = CB->getTerminator())
+ return S;
+ else
+ return 0;
+}
+
+// Determines if the path to this CFGBlock contained an element that infers this
+// block is a false positive. We assume that FindUnreachableEntryPoints has
+// already marked only the entry points to any dead code, so we need only to
+// find the condition that led to this block (the predecessor of this block.)
+// There will never be more than one predecessor.
+bool UnreachableCodeChecker::isInvalidPath(const CFGBlock *CB,
+ const ParentMap &PM) {
+ // We only expect a predecessor size of 0 or 1. If it is >1, then an external
+ // condition has broken our assumption (for example, a sink being placed by
+ // another check). In these cases, we choose not to report.
+ if (CB->pred_size() > 1)
+ return true;
+
+ // If there are no predecessors, then this block is trivially unreachable
+ if (CB->pred_size() == 0)
+ return false;
+
+ const CFGBlock *pred = *CB->pred_begin();
+
+ // Get the predecessor block's terminator conditon
+ const Stmt *cond = pred->getTerminatorCondition();
+
+ //assert(cond && "CFGBlock's predecessor has a terminator condition");
+ // The previous assertion is invalid in some cases (eg do/while). Leaving
+ // reporting of these situations on at the moment to help triage these cases.
+ if (!cond)
+ return false;
+
+ // Run each of the checks on the conditions
+ if (containsMacro(cond) || containsEnum(cond)
+ || containsStaticLocal(cond) || containsBuiltinOffsetOf(cond)
+ || containsStmt<UnaryExprOrTypeTraitExpr>(cond))
+ return true;
+
+ return false;
+}
+
+// Returns true if the given CFGBlock is empty
+bool UnreachableCodeChecker::isEmptyCFGBlock(const CFGBlock *CB) {
+ return CB->getLabel() == 0 // No labels
+ && CB->size() == 0 // No statements
+ && CB->getTerminator() == 0; // No terminator
+}
+
+void ento::registerUnreachableCodeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<UnreachableCodeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
new file mode 100644
index 0000000..38c9cc1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -0,0 +1,162 @@
+//=== VLASizeChecker.cpp - Undefined dereference checker --------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines VLASizeChecker, a builtin check in ExprEngine that
+// performs checks for declaration of VLA of undefined or zero size.
+// In addition, VLASizeChecker is responsible for defining the extent
+// of the MemRegion that represents a VLA.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/AST/CharUnits.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class VLASizeChecker : public Checker< check::PreStmt<DeclStmt> > {
+ mutable OwningPtr<BugType> BT;
+ enum VLASize_Kind { VLA_Garbage, VLA_Zero, VLA_Tainted };
+
+ void reportBug(VLASize_Kind Kind,
+ const Expr *SizeE,
+ ProgramStateRef State,
+ CheckerContext &C) const;
+public:
+ void checkPreStmt(const DeclStmt *DS, CheckerContext &C) const;
+};
+} // end anonymous namespace
+
+void VLASizeChecker::reportBug(VLASize_Kind Kind,
+ const Expr *SizeE,
+ ProgramStateRef State,
+ CheckerContext &C) const {
+ // Generate an error node.
+ ExplodedNode *N = C.generateSink(State);
+ if (!N)
+ return;
+
+ if (!BT)
+ BT.reset(new BuiltinBug("Dangerous variable-length array (VLA) declaration"));
+
+ SmallString<256> buf;
+ llvm::raw_svector_ostream os(buf);
+ os << "Declared variable-length array (VLA) ";
+ switch (Kind) {
+ case VLA_Garbage:
+ os << "uses a garbage value as its size";
+ break;
+ case VLA_Zero:
+ os << "has zero size";
+ break;
+ case VLA_Tainted:
+ os << "has tainted size";
+ break;
+ }
+
+ BugReport *report = new BugReport(*BT, os.str(), N);
+ report->addRange(SizeE->getSourceRange());
+ report->addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, SizeE,
+ report));
+ C.EmitReport(report);
+ return;
+}
+
+void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
+ if (!DS->isSingleDecl())
+ return;
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
+ if (!VD)
+ return;
+
+ ASTContext &Ctx = C.getASTContext();
+ const VariableArrayType *VLA = Ctx.getAsVariableArrayType(VD->getType());
+ if (!VLA)
+ return;
+
+ // FIXME: Handle multi-dimensional VLAs.
+ const Expr *SE = VLA->getSizeExpr();
+ ProgramStateRef state = C.getState();
+ SVal sizeV = state->getSVal(SE, C.getLocationContext());
+
+ if (sizeV.isUndef()) {
+ reportBug(VLA_Garbage, SE, state, C);
+ return;
+ }
+
+ // See if the size value is known. It can't be undefined because we would have
+ // warned about that already.
+ if (sizeV.isUnknown())
+ return;
+
+ // Check if the size is tainted.
+ if (state->isTainted(sizeV)) {
+ reportBug(VLA_Tainted, SE, 0, C);
+ return;
+ }
+
+ // Check if the size is zero.
+ DefinedSVal sizeD = cast<DefinedSVal>(sizeV);
+
+ ProgramStateRef stateNotZero, stateZero;
+ llvm::tie(stateNotZero, stateZero) = state->assume(sizeD);
+
+ if (stateZero && !stateNotZero) {
+ reportBug(VLA_Zero, SE, stateZero, C);
+ return;
+ }
+
+ // From this point on, assume that the size is not zero.
+ state = stateNotZero;
+
+ // VLASizeChecker is responsible for defining the extent of the array being
+ // declared. We do this by multiplying the array length by the element size,
+ // then matching that with the array region's extent symbol.
+
+ // Convert the array length to size_t.
+ SValBuilder &svalBuilder = C.getSValBuilder();
+ QualType SizeTy = Ctx.getSizeType();
+ NonLoc ArrayLength = cast<NonLoc>(svalBuilder.evalCast(sizeD, SizeTy,
+ SE->getType()));
+
+ // Get the element size.
+ CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType());
+ SVal EleSizeVal = svalBuilder.makeIntVal(EleSize.getQuantity(), SizeTy);
+
+ // Multiply the array length by the element size.
+ SVal ArraySizeVal = svalBuilder.evalBinOpNN(state, BO_Mul, ArrayLength,
+ cast<NonLoc>(EleSizeVal), SizeTy);
+
+ // Finally, assume that the array's extent matches the given size.
+ const LocationContext *LC = C.getLocationContext();
+ DefinedOrUnknownSVal Extent =
+ state->getRegion(VD, LC)->getExtent(svalBuilder);
+ DefinedOrUnknownSVal ArraySize = cast<DefinedOrUnknownSVal>(ArraySizeVal);
+ DefinedOrUnknownSVal sizeIsKnown =
+ svalBuilder.evalEQ(state, Extent, ArraySize);
+ state = state->assume(sizeIsKnown, true);
+
+ // Assume should not fail at this point.
+ assert(state);
+
+ // Remember our assumptions!
+ C.addTransition(state);
+}
+
+void ento::registerVLASizeChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VLASizeChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
new file mode 100644
index 0000000..f7c7c0c
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Checkers/VirtualCallChecker.cpp
@@ -0,0 +1,241 @@
+//=======- VirtualCallChecker.cpp --------------------------------*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a checker that checks virtual function calls during
+// construction or destruction of C++ objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckers.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtVisitor.h"
+#include "llvm/Support/SaveAndRestore.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class WalkAST : public StmtVisitor<WalkAST> {
+ BugReporter &BR;
+ AnalysisDeclContext *AC;
+
+ typedef const CallExpr * WorkListUnit;
+ typedef SmallVector<WorkListUnit, 20> DFSWorkList;
+
+ /// A vector representing the worklist which has a chain of CallExprs.
+ DFSWorkList WList;
+
+ // PreVisited : A CallExpr to this FunctionDecl is in the worklist, but the
+ // body has not been visited yet.
+ // PostVisited : A CallExpr to this FunctionDecl is in the worklist, and the
+ // body has been visited.
+ enum Kind { NotVisited,
+ PreVisited, /**< A CallExpr to this FunctionDecl is in the
+ worklist, but the body has not yet been
+ visited. */
+ PostVisited /**< A CallExpr to this FunctionDecl is in the
+ worklist, and the body has been visited. */
+ } K;
+
+ /// A DenseMap that records visited states of FunctionDecls.
+ llvm::DenseMap<const FunctionDecl *, Kind> VisitedFunctions;
+
+ /// The CallExpr whose body is currently being visited. This is used for
+ /// generating bug reports. This is null while visiting the body of a
+ /// constructor or destructor.
+ const CallExpr *visitingCallExpr;
+
+public:
+ WalkAST(BugReporter &br, AnalysisDeclContext *ac)
+ : BR(br),
+ AC(ac),
+ visitingCallExpr(0) {}
+
+ bool hasWork() const { return !WList.empty(); }
+
+ /// This method adds a CallExpr to the worklist and marks the callee as
+ /// being PreVisited.
+ void Enqueue(WorkListUnit WLUnit) {
+ const FunctionDecl *FD = WLUnit->getDirectCallee();
+ if (!FD || !FD->getBody())
+ return;
+ Kind &K = VisitedFunctions[FD];
+ if (K != NotVisited)
+ return;
+ K = PreVisited;
+ WList.push_back(WLUnit);
+ }
+
+ /// This method returns an item from the worklist without removing it.
+ WorkListUnit Dequeue() {
+ assert(!WList.empty());
+ return WList.back();
+ }
+
+ void Execute() {
+ while (hasWork()) {
+ WorkListUnit WLUnit = Dequeue();
+ const FunctionDecl *FD = WLUnit->getDirectCallee();
+ assert(FD && FD->getBody());
+
+ if (VisitedFunctions[FD] == PreVisited) {
+ // If the callee is PreVisited, walk its body.
+ // Visit the body.
+ SaveAndRestore<const CallExpr *> SaveCall(visitingCallExpr, WLUnit);
+ Visit(FD->getBody());
+
+ // Mark the function as being PostVisited to indicate we have
+ // scanned the body.
+ VisitedFunctions[FD] = PostVisited;
+ continue;
+ }
+
+ // Otherwise, the callee is PostVisited.
+ // Remove it from the worklist.
+ assert(VisitedFunctions[FD] == PostVisited);
+ WList.pop_back();
+ }
+ }
+
+ // Stmt visitor methods.
+ void VisitCallExpr(CallExpr *CE);
+ void VisitCXXMemberCallExpr(CallExpr *CE);
+ void VisitStmt(Stmt *S) { VisitChildren(S); }
+ void VisitChildren(Stmt *S);
+
+ void ReportVirtualCall(const CallExpr *CE, bool isPure);
+
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// AST walking.
+//===----------------------------------------------------------------------===//
+
+void WalkAST::VisitChildren(Stmt *S) {
+ for (Stmt::child_iterator I = S->child_begin(), E = S->child_end(); I!=E; ++I)
+ if (Stmt *child = *I)
+ Visit(child);
+}
+
+void WalkAST::VisitCallExpr(CallExpr *CE) {
+ VisitChildren(CE);
+ Enqueue(CE);
+}
+
+void WalkAST::VisitCXXMemberCallExpr(CallExpr *CE) {
+ VisitChildren(CE);
+ bool callIsNonVirtual = false;
+
+ // Several situations to elide for checking.
+ if (MemberExpr *CME = dyn_cast<MemberExpr>(CE->getCallee())) {
+ // If the member access is fully qualified (i.e., X::F), then treat
+ // this as a non-virtual call and do not warn.
+ if (CME->getQualifier())
+ callIsNonVirtual = true;
+
+ // Elide analyzing the call entirely if the base pointer is not 'this'.
+ if (Expr *base = CME->getBase()->IgnoreImpCasts())
+ if (!isa<CXXThisExpr>(base))
+ return;
+ }
+
+ // Get the callee.
+ const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(CE->getDirectCallee());
+ if (MD && MD->isVirtual() && !callIsNonVirtual)
+ ReportVirtualCall(CE, MD->isPure());
+
+ Enqueue(CE);
+}
+
+void WalkAST::ReportVirtualCall(const CallExpr *CE, bool isPure) {
+ SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << "Call Path : ";
+ // Name of current visiting CallExpr.
+ os << *CE->getDirectCallee();
+
+ // Name of the CallExpr whose body is current walking.
+ if (visitingCallExpr)
+ os << " <-- " << *visitingCallExpr->getDirectCallee();
+ // Names of FunctionDecls in worklist with state PostVisited.
+ for (SmallVectorImpl<const CallExpr *>::iterator I = WList.end(),
+ E = WList.begin(); I != E; --I) {
+ const FunctionDecl *FD = (*(I-1))->getDirectCallee();
+ assert(FD);
+ if (VisitedFunctions[FD] == PostVisited)
+ os << " <-- " << *FD;
+ }
+
+ PathDiagnosticLocation CELoc =
+ PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
+ SourceRange R = CE->getCallee()->getSourceRange();
+
+ if (isPure) {
+ os << "\n" << "Call pure virtual functions during construction or "
+ << "destruction may leads undefined behaviour";
+ BR.EmitBasicReport(AC->getDecl(),
+ "Call pure virtual function during construction or "
+ "Destruction",
+ "Cplusplus",
+ os.str(), CELoc, &R, 1);
+ return;
+ }
+ else {
+ os << "\n" << "Call virtual functions during construction or "
+ << "destruction will never go to a more derived class";
+ BR.EmitBasicReport(AC->getDecl(),
+ "Call virtual function during construction or "
+ "Destruction",
+ "Cplusplus",
+ os.str(), CELoc, &R, 1);
+ return;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// VirtualCallChecker
+//===----------------------------------------------------------------------===//
+
+namespace {
+class VirtualCallChecker : public Checker<check::ASTDecl<CXXRecordDecl> > {
+public:
+ void checkASTDecl(const CXXRecordDecl *RD, AnalysisManager& mgr,
+ BugReporter &BR) const {
+ WalkAST walker(BR, mgr.getAnalysisDeclContext(RD));
+
+ // Check the constructors.
+ for (CXXRecordDecl::ctor_iterator I = RD->ctor_begin(), E = RD->ctor_end();
+ I != E; ++I) {
+ if (!I->isCopyOrMoveConstructor())
+ if (Stmt *Body = I->getBody()) {
+ walker.Visit(Body);
+ walker.Execute();
+ }
+ }
+
+ // Check the destructor.
+ if (CXXDestructorDecl *DD = RD->getDestructor())
+ if (Stmt *Body = DD->getBody()) {
+ walker.Visit(Body);
+ walker.Execute();
+ }
+ }
+};
+}
+
+void ento::registerVirtualCallChecker(CheckerManager &mgr) {
+ mgr.registerChecker<VirtualCallChecker>();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
new file mode 100644
index 0000000..82ac8bd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/AnalysisManager.cpp
@@ -0,0 +1,100 @@
+//===-- AnalysisManager.cpp -------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/Index/Entity.h"
+#include "clang/Index/Indexer.h"
+
+using namespace clang;
+using namespace ento;
+
+void AnalysisManager::anchor() { }
+
+AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
+ const LangOptions &lang,
+ PathDiagnosticConsumer *pd,
+ StoreManagerCreator storemgr,
+ ConstraintManagerCreator constraintmgr,
+ CheckerManager *checkerMgr,
+ idx::Indexer *idxer,
+ unsigned maxnodes, unsigned maxvisit,
+ bool vizdot, bool vizubi,
+ AnalysisPurgeMode purge,
+ bool eager, bool trim,
+ bool useUnoptimizedCFG,
+ bool addImplicitDtors, bool addInitializers,
+ bool eagerlyTrimEGraph,
+ AnalysisIPAMode ipa,
+ unsigned inlineMaxStack,
+ unsigned inlineMaxFunctionSize,
+ AnalysisInliningMode IMode,
+ bool NoRetry)
+ : AnaCtxMgr(useUnoptimizedCFG, addImplicitDtors, addInitializers),
+ Ctx(ctx), Diags(diags), LangOpts(lang), PD(pd),
+ CreateStoreMgr(storemgr), CreateConstraintMgr(constraintmgr),
+ CheckerMgr(checkerMgr), Idxer(idxer),
+ AScope(ScopeDecl), MaxNodes(maxnodes), MaxVisit(maxvisit),
+ VisualizeEGDot(vizdot), VisualizeEGUbi(vizubi), PurgeDead(purge),
+ EagerlyAssume(eager), TrimGraph(trim),
+ EagerlyTrimEGraph(eagerlyTrimEGraph),
+ IPAMode(ipa),
+ InlineMaxStackDepth(inlineMaxStack),
+ InlineMaxFunctionSize(inlineMaxFunctionSize),
+ InliningMode(IMode),
+ NoRetryExhausted(NoRetry)
+{
+ AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
+}
+
+AnalysisManager::AnalysisManager(ASTContext &ctx, DiagnosticsEngine &diags,
+ AnalysisManager &ParentAM)
+ : AnaCtxMgr(ParentAM.AnaCtxMgr.getUseUnoptimizedCFG(),
+ ParentAM.AnaCtxMgr.getCFGBuildOptions().AddImplicitDtors,
+ ParentAM.AnaCtxMgr.getCFGBuildOptions().AddInitializers),
+ Ctx(ctx), Diags(diags),
+ LangOpts(ParentAM.LangOpts), PD(ParentAM.getPathDiagnosticConsumer()),
+ CreateStoreMgr(ParentAM.CreateStoreMgr),
+ CreateConstraintMgr(ParentAM.CreateConstraintMgr),
+ CheckerMgr(ParentAM.CheckerMgr),
+ Idxer(ParentAM.Idxer),
+ AScope(ScopeDecl),
+ MaxNodes(ParentAM.MaxNodes),
+ MaxVisit(ParentAM.MaxVisit),
+ VisualizeEGDot(ParentAM.VisualizeEGDot),
+ VisualizeEGUbi(ParentAM.VisualizeEGUbi),
+ PurgeDead(ParentAM.PurgeDead),
+ EagerlyAssume(ParentAM.EagerlyAssume),
+ TrimGraph(ParentAM.TrimGraph),
+ EagerlyTrimEGraph(ParentAM.EagerlyTrimEGraph),
+ IPAMode(ParentAM.IPAMode),
+ InlineMaxStackDepth(ParentAM.InlineMaxStackDepth),
+ InlineMaxFunctionSize(ParentAM.InlineMaxFunctionSize),
+ InliningMode(ParentAM.InliningMode),
+ NoRetryExhausted(ParentAM.NoRetryExhausted)
+{
+ AnaCtxMgr.getCFGBuildOptions().setAllAlwaysAdd();
+}
+
+
+AnalysisDeclContext *
+AnalysisManager::getAnalysisDeclContextInAnotherTU(const Decl *D) {
+ idx::Entity Ent = idx::Entity::get(const_cast<Decl *>(D),
+ Idxer->getProgram());
+ FunctionDecl *FuncDef;
+ idx::TranslationUnit *TU;
+ llvm::tie(FuncDef, TU) = Idxer->getDefinitionFor(Ent);
+
+ if (FuncDef == 0)
+ return 0;
+
+ // This AnalysisDeclContext wraps function definition in another translation unit.
+ // But it is still owned by the AnalysisManager associated with the current
+ // translation unit.
+ return AnaCtxMgr.getContext(FuncDef, TU);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
new file mode 100644
index 0000000..2d9addd
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicConstraintManager.cpp
@@ -0,0 +1,367 @@
+//== BasicConstraintManager.cpp - Manage basic constraints.------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicConstraintManager, a class that tracks simple
+// equality and inequality constraints on symbolic values of ProgramState.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+
+namespace { class ConstNotEq {}; }
+namespace { class ConstEq {}; }
+
+typedef llvm::ImmutableMap<SymbolRef,ProgramState::IntSetTy> ConstNotEqTy;
+typedef llvm::ImmutableMap<SymbolRef,const llvm::APSInt*> ConstEqTy;
+
+static int ConstEqIndex = 0;
+static int ConstNotEqIndex = 0;
+
+namespace clang {
+namespace ento {
+template<>
+struct ProgramStateTrait<ConstNotEq> :
+ public ProgramStatePartialTrait<ConstNotEqTy> {
+ static inline void *GDMIndex() { return &ConstNotEqIndex; }
+};
+
+template<>
+struct ProgramStateTrait<ConstEq> : public ProgramStatePartialTrait<ConstEqTy> {
+ static inline void *GDMIndex() { return &ConstEqIndex; }
+};
+}
+}
+
+namespace {
+// BasicConstraintManager only tracks equality and inequality constraints of
+// constants and integer variables.
+class BasicConstraintManager
+ : public SimpleConstraintManager {
+ ProgramState::IntSetTy::Factory ISetFactory;
+public:
+ BasicConstraintManager(ProgramStateManager &statemgr, SubEngine &subengine)
+ : SimpleConstraintManager(subengine),
+ ISetFactory(statemgr.getAllocator()) {}
+
+ ProgramStateRef assumeSymNE(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymEQ(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymLT(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymGT(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymGE(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymLE(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef AddEQ(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V);
+
+ ProgramStateRef AddNE(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V);
+
+ const llvm::APSInt* getSymVal(ProgramStateRef state,
+ SymbolRef sym) const;
+
+ bool isNotEqual(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V) const;
+
+ bool isEqual(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V) const;
+
+ ProgramStateRef removeDeadBindings(ProgramStateRef state,
+ SymbolReaper& SymReaper);
+
+ void print(ProgramStateRef state,
+ raw_ostream &Out,
+ const char* nl,
+ const char *sep);
+};
+
+} // end anonymous namespace
+
+ConstraintManager*
+ento::CreateBasicConstraintManager(ProgramStateManager& statemgr,
+ SubEngine &subengine) {
+ return new BasicConstraintManager(statemgr, subengine);
+}
+
+ProgramStateRef
+BasicConstraintManager::assumeSymNE(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // First, determine if sym == X, where X+Adjustment != V.
+ llvm::APSInt Adjusted = V-Adjustment;
+ if (const llvm::APSInt* X = getSymVal(state, sym)) {
+ bool isFeasible = (*X != Adjusted);
+ return isFeasible ? state : NULL;
+ }
+
+ // Second, determine if sym+Adjustment != V.
+ if (isNotEqual(state, sym, Adjusted))
+ return state;
+
+ // If we reach here, sym is not a constant and we don't know if it is != V.
+ // Make that assumption.
+ return AddNE(state, sym, Adjusted);
+}
+
+ProgramStateRef
+BasicConstraintManager::assumeSymEQ(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // First, determine if sym == X, where X+Adjustment != V.
+ llvm::APSInt Adjusted = V-Adjustment;
+ if (const llvm::APSInt* X = getSymVal(state, sym)) {
+ bool isFeasible = (*X == Adjusted);
+ return isFeasible ? state : NULL;
+ }
+
+ // Second, determine if sym+Adjustment != V.
+ if (isNotEqual(state, sym, Adjusted))
+ return NULL;
+
+ // If we reach here, sym is not a constant and we don't know if it is == V.
+ // Make that assumption.
+ return AddEQ(state, sym, Adjusted);
+}
+
+// The logic for these will be handled in another ConstraintManager.
+ProgramStateRef
+BasicConstraintManager::assumeSymLT(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // Is 'V' the smallest possible value?
+ if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
+ // sym cannot be any value less than 'V'. This path is infeasible.
+ return NULL;
+ }
+
+ // FIXME: For now have assuming x < y be the same as assuming sym != V;
+ return assumeSymNE(state, sym, V, Adjustment);
+}
+
+ProgramStateRef
+BasicConstraintManager::assumeSymGT(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // Is 'V' the largest possible value?
+ if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
+ // sym cannot be any value greater than 'V'. This path is infeasible.
+ return NULL;
+ }
+
+ // FIXME: For now have assuming x > y be the same as assuming sym != V;
+ return assumeSymNE(state, sym, V, Adjustment);
+}
+
+ProgramStateRef
+BasicConstraintManager::assumeSymGE(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // Reject a path if the value of sym is a constant X and !(X+Adj >= V).
+ if (const llvm::APSInt *X = getSymVal(state, sym)) {
+ bool isFeasible = (*X >= V-Adjustment);
+ return isFeasible ? state : NULL;
+ }
+
+ // Sym is not a constant, but it is worth looking to see if V is the
+ // maximum integer value.
+ if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
+ llvm::APSInt Adjusted = V-Adjustment;
+
+ // If we know that sym != V (after adjustment), then this condition
+ // is infeasible since there is no other value greater than V.
+ bool isFeasible = !isNotEqual(state, sym, Adjusted);
+
+ // If the path is still feasible then as a consequence we know that
+ // 'sym+Adjustment == V' because there are no larger values.
+ // Add this constraint.
+ return isFeasible ? AddEQ(state, sym, Adjusted) : NULL;
+ }
+
+ return state;
+}
+
+ProgramStateRef
+BasicConstraintManager::assumeSymLE(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // Reject a path if the value of sym is a constant X and !(X+Adj <= V).
+ if (const llvm::APSInt* X = getSymVal(state, sym)) {
+ bool isFeasible = (*X <= V-Adjustment);
+ return isFeasible ? state : NULL;
+ }
+
+ // Sym is not a constant, but it is worth looking to see if V is the
+ // minimum integer value.
+ if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
+ llvm::APSInt Adjusted = V-Adjustment;
+
+ // If we know that sym != V (after adjustment), then this condition
+ // is infeasible since there is no other value less than V.
+ bool isFeasible = !isNotEqual(state, sym, Adjusted);
+
+ // If the path is still feasible then as a consequence we know that
+ // 'sym+Adjustment == V' because there are no smaller values.
+ // Add this constraint.
+ return isFeasible ? AddEQ(state, sym, Adjusted) : NULL;
+ }
+
+ return state;
+}
+
+ProgramStateRef BasicConstraintManager::AddEQ(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V) {
+ // Create a new state with the old binding replaced.
+ return state->set<ConstEq>(sym, &state->getBasicVals().getValue(V));
+}
+
+ProgramStateRef BasicConstraintManager::AddNE(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V) {
+
+ // First, retrieve the NE-set associated with the given symbol.
+ ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
+ ProgramState::IntSetTy S = T ? *T : ISetFactory.getEmptySet();
+
+ // Now add V to the NE set.
+ S = ISetFactory.add(S, &state->getBasicVals().getValue(V));
+
+ // Create a new state with the old binding replaced.
+ return state->set<ConstNotEq>(sym, S);
+}
+
+const llvm::APSInt* BasicConstraintManager::getSymVal(ProgramStateRef state,
+ SymbolRef sym) const {
+ const ConstEqTy::data_type* T = state->get<ConstEq>(sym);
+ return T ? *T : NULL;
+}
+
+bool BasicConstraintManager::isNotEqual(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V) const {
+
+ // Retrieve the NE-set associated with the given symbol.
+ const ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
+
+ // See if V is present in the NE-set.
+ return T ? T->contains(&state->getBasicVals().getValue(V)) : false;
+}
+
+bool BasicConstraintManager::isEqual(ProgramStateRef state,
+ SymbolRef sym,
+ const llvm::APSInt& V) const {
+ // Retrieve the EQ-set associated with the given symbol.
+ const ConstEqTy::data_type* T = state->get<ConstEq>(sym);
+ // See if V is present in the EQ-set.
+ return T ? **T == V : false;
+}
+
+/// Scan all symbols referenced by the constraints. If the symbol is not alive
+/// as marked in LSymbols, mark it as dead in DSymbols.
+ProgramStateRef
+BasicConstraintManager::removeDeadBindings(ProgramStateRef state,
+ SymbolReaper& SymReaper) {
+
+ ConstEqTy CE = state->get<ConstEq>();
+ ConstEqTy::Factory& CEFactory = state->get_context<ConstEq>();
+
+ for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym))
+ CE = CEFactory.remove(CE, sym);
+ }
+ state = state->set<ConstEq>(CE);
+
+ ConstNotEqTy CNE = state->get<ConstNotEq>();
+ ConstNotEqTy::Factory& CNEFactory = state->get_context<ConstNotEq>();
+
+ for (ConstNotEqTy::iterator I = CNE.begin(), E = CNE.end(); I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym))
+ CNE = CNEFactory.remove(CNE, sym);
+ }
+
+ return state->set<ConstNotEq>(CNE);
+}
+
+void BasicConstraintManager::print(ProgramStateRef state,
+ raw_ostream &Out,
+ const char* nl, const char *sep) {
+ // Print equality constraints.
+
+ ConstEqTy CE = state->get<ConstEq>();
+
+ if (!CE.isEmpty()) {
+ Out << nl << sep << "'==' constraints:";
+ for (ConstEqTy::iterator I = CE.begin(), E = CE.end(); I!=E; ++I)
+ Out << nl << " $" << I.getKey() << " : " << *I.getData();
+ }
+
+ // Print != constraints.
+
+ ConstNotEqTy CNE = state->get<ConstNotEq>();
+
+ if (!CNE.isEmpty()) {
+ Out << nl << sep << "'!=' constraints:";
+
+ for (ConstNotEqTy::iterator I = CNE.begin(), EI = CNE.end(); I!=EI; ++I) {
+ Out << nl << " $" << I.getKey() << " : ";
+ bool isFirst = true;
+
+ ProgramState::IntSetTy::iterator J = I.getData().begin(),
+ EJ = I.getData().end();
+
+ for ( ; J != EJ; ++J) {
+ if (isFirst) isFirst = false;
+ else Out << ", ";
+
+ Out << (*J)->getSExtValue(); // Hack: should print to raw_ostream.
+ }
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
new file mode 100644
index 0000000..fe96700
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BasicValueFactory.cpp
@@ -0,0 +1,291 @@
+//=== BasicValueFactory.cpp - Basic values for Path Sens analysis --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BasicValueFactory, a class that manages the lifetime
+// of APSInt objects and symbolic constraints used by ExprEngine
+// and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+
+using namespace clang;
+using namespace ento;
+
+void CompoundValData::Profile(llvm::FoldingSetNodeID& ID, QualType T,
+ llvm::ImmutableList<SVal> L) {
+ T.Profile(ID);
+ ID.AddPointer(L.getInternalPointer());
+}
+
+void LazyCompoundValData::Profile(llvm::FoldingSetNodeID& ID,
+ const StoreRef &store,
+ const TypedValueRegion *region) {
+ ID.AddPointer(store.getStore());
+ ID.AddPointer(region);
+}
+
+typedef std::pair<SVal, uintptr_t> SValData;
+typedef std::pair<SVal, SVal> SValPair;
+
+namespace llvm {
+template<> struct FoldingSetTrait<SValData> {
+ static inline void Profile(const SValData& X, llvm::FoldingSetNodeID& ID) {
+ X.first.Profile(ID);
+ ID.AddPointer( (void*) X.second);
+ }
+};
+
+template<> struct FoldingSetTrait<SValPair> {
+ static inline void Profile(const SValPair& X, llvm::FoldingSetNodeID& ID) {
+ X.first.Profile(ID);
+ X.second.Profile(ID);
+ }
+};
+}
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValData> >
+ PersistentSValsTy;
+
+typedef llvm::FoldingSet<llvm::FoldingSetNodeWrapper<SValPair> >
+ PersistentSValPairsTy;
+
+BasicValueFactory::~BasicValueFactory() {
+ // Note that the dstor for the contents of APSIntSet will never be called,
+ // so we iterate over the set and invoke the dstor for each APSInt. This
+ // frees an aux. memory allocated to represent very large constants.
+ for (APSIntSetTy::iterator I=APSIntSet.begin(), E=APSIntSet.end(); I!=E; ++I)
+ I->getValue().~APSInt();
+
+ delete (PersistentSValsTy*) PersistentSVals;
+ delete (PersistentSValPairsTy*) PersistentSValPairs;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APSInt& X) {
+ llvm::FoldingSetNodeID ID;
+ void *InsertPos;
+ typedef llvm::FoldingSetNodeWrapper<llvm::APSInt> FoldNodeTy;
+
+ X.Profile(ID);
+ FoldNodeTy* P = APSIntSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(X);
+ APSIntSet.InsertNode(P, InsertPos);
+ }
+
+ return *P;
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(const llvm::APInt& X,
+ bool isUnsigned) {
+ llvm::APSInt V(X, isUnsigned);
+ return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, unsigned BitWidth,
+ bool isUnsigned) {
+ llvm::APSInt V(BitWidth, isUnsigned);
+ V = X;
+ return getValue(V);
+}
+
+const llvm::APSInt& BasicValueFactory::getValue(uint64_t X, QualType T) {
+
+ unsigned bits = Ctx.getTypeSize(T);
+ llvm::APSInt V(bits,
+ T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T));
+ V = X;
+ return getValue(V);
+}
+
+const CompoundValData*
+BasicValueFactory::getCompoundValData(QualType T,
+ llvm::ImmutableList<SVal> Vals) {
+
+ llvm::FoldingSetNodeID ID;
+ CompoundValData::Profile(ID, T, Vals);
+ void *InsertPos;
+
+ CompoundValData* D = CompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (CompoundValData*) BPAlloc.Allocate<CompoundValData>();
+ new (D) CompoundValData(T, Vals);
+ CompoundValDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
+const LazyCompoundValData*
+BasicValueFactory::getLazyCompoundValData(const StoreRef &store,
+ const TypedValueRegion *region) {
+ llvm::FoldingSetNodeID ID;
+ LazyCompoundValData::Profile(ID, store, region);
+ void *InsertPos;
+
+ LazyCompoundValData *D =
+ LazyCompoundValDataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!D) {
+ D = (LazyCompoundValData*) BPAlloc.Allocate<LazyCompoundValData>();
+ new (D) LazyCompoundValData(store, region);
+ LazyCompoundValDataSet.InsertNode(D, InsertPos);
+ }
+
+ return D;
+}
+
+const llvm::APSInt*
+BasicValueFactory::evalAPSInt(BinaryOperator::Opcode Op,
+ const llvm::APSInt& V1, const llvm::APSInt& V2) {
+
+ switch (Op) {
+ default:
+ assert (false && "Invalid Opcode.");
+
+ case BO_Mul:
+ return &getValue( V1 * V2 );
+
+ case BO_Div:
+ return &getValue( V1 / V2 );
+
+ case BO_Rem:
+ return &getValue( V1 % V2 );
+
+ case BO_Add:
+ return &getValue( V1 + V2 );
+
+ case BO_Sub:
+ return &getValue( V1 - V2 );
+
+ case BO_Shl: {
+
+ // FIXME: This logic should probably go higher up, where we can
+ // test these conditions symbolically.
+
+ // FIXME: Expand these checks to include all undefined behavior.
+
+ if (V2.isSigned() && V2.isNegative())
+ return NULL;
+
+ uint64_t Amt = V2.getZExtValue();
+
+ if (Amt > V1.getBitWidth())
+ return NULL;
+
+ return &getValue( V1.operator<<( (unsigned) Amt ));
+ }
+
+ case BO_Shr: {
+
+ // FIXME: This logic should probably go higher up, where we can
+ // test these conditions symbolically.
+
+ // FIXME: Expand these checks to include all undefined behavior.
+
+ if (V2.isSigned() && V2.isNegative())
+ return NULL;
+
+ uint64_t Amt = V2.getZExtValue();
+
+ if (Amt > V1.getBitWidth())
+ return NULL;
+
+ return &getValue( V1.operator>>( (unsigned) Amt ));
+ }
+
+ case BO_LT:
+ return &getTruthValue( V1 < V2 );
+
+ case BO_GT:
+ return &getTruthValue( V1 > V2 );
+
+ case BO_LE:
+ return &getTruthValue( V1 <= V2 );
+
+ case BO_GE:
+ return &getTruthValue( V1 >= V2 );
+
+ case BO_EQ:
+ return &getTruthValue( V1 == V2 );
+
+ case BO_NE:
+ return &getTruthValue( V1 != V2 );
+
+ // Note: LAnd, LOr, Comma are handled specially by higher-level logic.
+
+ case BO_And:
+ return &getValue( V1 & V2 );
+
+ case BO_Or:
+ return &getValue( V1 | V2 );
+
+ case BO_Xor:
+ return &getValue( V1 ^ V2 );
+ }
+}
+
+
+const std::pair<SVal, uintptr_t>&
+BasicValueFactory::getPersistentSValWithData(const SVal& V, uintptr_t Data) {
+
+ // Lazily create the folding set.
+ if (!PersistentSVals) PersistentSVals = new PersistentSValsTy();
+
+ llvm::FoldingSetNodeID ID;
+ void *InsertPos;
+ V.Profile(ID);
+ ID.AddPointer((void*) Data);
+
+ PersistentSValsTy& Map = *((PersistentSValsTy*) PersistentSVals);
+
+ typedef llvm::FoldingSetNodeWrapper<SValData> FoldNodeTy;
+ FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(std::make_pair(V, Data));
+ Map.InsertNode(P, InsertPos);
+ }
+
+ return P->getValue();
+}
+
+const std::pair<SVal, SVal>&
+BasicValueFactory::getPersistentSValPair(const SVal& V1, const SVal& V2) {
+
+ // Lazily create the folding set.
+ if (!PersistentSValPairs) PersistentSValPairs = new PersistentSValPairsTy();
+
+ llvm::FoldingSetNodeID ID;
+ void *InsertPos;
+ V1.Profile(ID);
+ V2.Profile(ID);
+
+ PersistentSValPairsTy& Map = *((PersistentSValPairsTy*) PersistentSValPairs);
+
+ typedef llvm::FoldingSetNodeWrapper<SValPair> FoldNodeTy;
+ FoldNodeTy* P = Map.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!P) {
+ P = (FoldNodeTy*) BPAlloc.Allocate<FoldNodeTy>();
+ new (P) FoldNodeTy(std::make_pair(V1, V2));
+ Map.InsertNode(P, InsertPos);
+ }
+
+ return P->getValue();
+}
+
+const SVal* BasicValueFactory::getPersistentSVal(SVal X) {
+ return &getPersistentSValWithData(X, 0).first;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp
new file mode 100644
index 0000000..74d761e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BlockCounter.cpp
@@ -0,0 +1,86 @@
+//==- BlockCounter.h - ADT for counting block visits -------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BlockCounter, an abstract data type used to count
+// the number of times a given block has been visited along a path
+// analyzed by CoreEngine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/BlockCounter.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class CountKey {
+ const StackFrameContext *CallSite;
+ unsigned BlockID;
+
+public:
+ CountKey(const StackFrameContext *CS, unsigned ID)
+ : CallSite(CS), BlockID(ID) {}
+
+ bool operator==(const CountKey &RHS) const {
+ return (CallSite == RHS.CallSite) && (BlockID == RHS.BlockID);
+ }
+
+ bool operator<(const CountKey &RHS) const {
+ return (CallSite == RHS.CallSite) ? (BlockID < RHS.BlockID)
+ : (CallSite < RHS.CallSite);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(CallSite);
+ ID.AddInteger(BlockID);
+ }
+};
+
+}
+
+typedef llvm::ImmutableMap<CountKey, unsigned> CountMap;
+
+static inline CountMap GetMap(void *D) {
+ return CountMap(static_cast<CountMap::TreeTy*>(D));
+}
+
+static inline CountMap::Factory& GetFactory(void *F) {
+ return *static_cast<CountMap::Factory*>(F);
+}
+
+unsigned BlockCounter::getNumVisited(const StackFrameContext *CallSite,
+ unsigned BlockID) const {
+ CountMap M = GetMap(Data);
+ CountMap::data_type* T = M.lookup(CountKey(CallSite, BlockID));
+ return T ? *T : 0;
+}
+
+BlockCounter::Factory::Factory(llvm::BumpPtrAllocator& Alloc) {
+ F = new CountMap::Factory(Alloc);
+}
+
+BlockCounter::Factory::~Factory() {
+ delete static_cast<CountMap::Factory*>(F);
+}
+
+BlockCounter
+BlockCounter::Factory::IncrementCount(BlockCounter BC,
+ const StackFrameContext *CallSite,
+ unsigned BlockID) {
+ return BlockCounter(GetFactory(F).add(GetMap(BC.Data),
+ CountKey(CallSite, BlockID),
+ BC.getNumVisited(CallSite, BlockID)+1).getRoot());
+}
+
+BlockCounter
+BlockCounter::Factory::GetEmptyCounter() {
+ return BlockCounter(GetFactory(F).getEmptyMap().getRoot());
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
new file mode 100644
index 0000000..a264212
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -0,0 +1,2056 @@
+// BugReporter.cpp - Generate PathDiagnostics for Bugs ------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines BugReporter, a utility class for generating
+// PathDiagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/IntrusiveRefCntPtr.h"
+#include <queue>
+
+using namespace clang;
+using namespace ento;
+
+BugReporterVisitor::~BugReporterVisitor() {}
+
+void BugReporterContext::anchor() {}
+
+//===----------------------------------------------------------------------===//
+// Helper routines for walking the ExplodedGraph and fetching statements.
+//===----------------------------------------------------------------------===//
+
+static inline const Stmt *GetStmt(const ProgramPoint &P) {
+ if (const StmtPoint* SP = dyn_cast<StmtPoint>(&P))
+ return SP->getStmt();
+ else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P))
+ return BE->getSrc()->getTerminator();
+
+ return 0;
+}
+
+static inline const ExplodedNode*
+GetPredecessorNode(const ExplodedNode *N) {
+ return N->pred_empty() ? NULL : *(N->pred_begin());
+}
+
+static inline const ExplodedNode*
+GetSuccessorNode(const ExplodedNode *N) {
+ return N->succ_empty() ? NULL : *(N->succ_begin());
+}
+
+static const Stmt *GetPreviousStmt(const ExplodedNode *N) {
+ for (N = GetPredecessorNode(N); N; N = GetPredecessorNode(N))
+ if (const Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return 0;
+}
+
+static const Stmt *GetNextStmt(const ExplodedNode *N) {
+ for (N = GetSuccessorNode(N); N; N = GetSuccessorNode(N))
+ if (const Stmt *S = GetStmt(N->getLocation())) {
+ // Check if the statement is '?' or '&&'/'||'. These are "merges",
+ // not actual statement points.
+ switch (S->getStmtClass()) {
+ case Stmt::ChooseExprClass:
+ case Stmt::BinaryConditionalOperatorClass: continue;
+ case Stmt::ConditionalOperatorClass: continue;
+ case Stmt::BinaryOperatorClass: {
+ BinaryOperatorKind Op = cast<BinaryOperator>(S)->getOpcode();
+ if (Op == BO_LAnd || Op == BO_LOr)
+ continue;
+ break;
+ }
+ default:
+ break;
+ }
+ return S;
+ }
+
+ return 0;
+}
+
+static inline const Stmt*
+GetCurrentOrPreviousStmt(const ExplodedNode *N) {
+ if (const Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return GetPreviousStmt(N);
+}
+
+static inline const Stmt*
+GetCurrentOrNextStmt(const ExplodedNode *N) {
+ if (const Stmt *S = GetStmt(N->getLocation()))
+ return S;
+
+ return GetNextStmt(N);
+}
+
+//===----------------------------------------------------------------------===//
+// Diagnostic cleanup.
+//===----------------------------------------------------------------------===//
+
+/// Recursively scan through a path and prune out calls and macros pieces
+/// that aren't needed. Return true if afterwards the path contains
+/// "interesting stuff" which means it should be pruned from the parent path.
+static bool RemoveUneededCalls(PathPieces &pieces) {
+ bool containsSomethingInteresting = false;
+ const unsigned N = pieces.size();
+
+ for (unsigned i = 0 ; i < N ; ++i) {
+ // Remove the front piece from the path. If it is still something we
+ // want to keep once we are done, we will push it back on the end.
+ IntrusiveRefCntPtr<PathDiagnosticPiece> piece(pieces.front());
+ pieces.pop_front();
+
+ switch (piece->getKind()) {
+ case PathDiagnosticPiece::Call: {
+ PathDiagnosticCallPiece *call = cast<PathDiagnosticCallPiece>(piece);
+ // Recursively clean out the subclass. Keep this call around if
+ // it contains any informative diagnostics.
+ if (!RemoveUneededCalls(call->path))
+ continue;
+ containsSomethingInteresting = true;
+ break;
+ }
+ case PathDiagnosticPiece::Macro: {
+ PathDiagnosticMacroPiece *macro = cast<PathDiagnosticMacroPiece>(piece);
+ if (!RemoveUneededCalls(macro->subPieces))
+ continue;
+ containsSomethingInteresting = true;
+ break;
+ }
+ case PathDiagnosticPiece::Event: {
+ PathDiagnosticEventPiece *event = cast<PathDiagnosticEventPiece>(piece);
+ // We never throw away an event, but we do throw it away wholesale
+ // as part of a path if we throw the entire path away.
+ if (event->isPrunable())
+ continue;
+ containsSomethingInteresting = true;
+ break;
+ }
+ case PathDiagnosticPiece::ControlFlow:
+ break;
+ }
+
+ pieces.push_back(piece);
+ }
+
+ return containsSomethingInteresting;
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticBuilder and its associated routines and helper objects.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::DenseMap<const ExplodedNode*,
+const ExplodedNode*> NodeBackMap;
+
+namespace {
+class NodeMapClosure : public BugReport::NodeResolver {
+ NodeBackMap& M;
+public:
+ NodeMapClosure(NodeBackMap *m) : M(*m) {}
+ ~NodeMapClosure() {}
+
+ const ExplodedNode *getOriginalNode(const ExplodedNode *N) {
+ NodeBackMap::iterator I = M.find(N);
+ return I == M.end() ? 0 : I->second;
+ }
+};
+
+class PathDiagnosticBuilder : public BugReporterContext {
+ BugReport *R;
+ PathDiagnosticConsumer *PDC;
+ OwningPtr<ParentMap> PM;
+ NodeMapClosure NMC;
+public:
+ const LocationContext *LC;
+
+ PathDiagnosticBuilder(GRBugReporter &br,
+ BugReport *r, NodeBackMap *Backmap,
+ PathDiagnosticConsumer *pdc)
+ : BugReporterContext(br),
+ R(r), PDC(pdc), NMC(Backmap), LC(r->getErrorNode()->getLocationContext())
+ {}
+
+ PathDiagnosticLocation ExecutionContinues(const ExplodedNode *N);
+
+ PathDiagnosticLocation ExecutionContinues(llvm::raw_string_ostream &os,
+ const ExplodedNode *N);
+
+ BugReport *getBugReport() { return R; }
+
+ Decl const &getCodeDecl() { return R->getErrorNode()->getCodeDecl(); }
+
+ ParentMap& getParentMap() { return LC->getParentMap(); }
+
+ const Stmt *getParent(const Stmt *S) {
+ return getParentMap().getParent(S);
+ }
+
+ virtual NodeMapClosure& getNodeResolver() { return NMC; }
+
+ PathDiagnosticLocation getEnclosingStmtLocation(const Stmt *S);
+
+ PathDiagnosticConsumer::PathGenerationScheme getGenerationScheme() const {
+ return PDC ? PDC->getGenerationScheme() : PathDiagnosticConsumer::Extensive;
+ }
+
+ bool supportsLogicalOpControlFlow() const {
+ return PDC ? PDC->supportsLogicalOpControlFlow() : true;
+ }
+};
+} // end anonymous namespace
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(const ExplodedNode *N) {
+ if (const Stmt *S = GetNextStmt(N))
+ return PathDiagnosticLocation(S, getSourceManager(), LC);
+
+ return PathDiagnosticLocation::createDeclEnd(N->getLocationContext(),
+ getSourceManager());
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::ExecutionContinues(llvm::raw_string_ostream &os,
+ const ExplodedNode *N) {
+
+ // Slow, but probably doesn't matter.
+ if (os.str().empty())
+ os << ' ';
+
+ const PathDiagnosticLocation &Loc = ExecutionContinues(N);
+
+ if (Loc.asStmt())
+ os << "Execution continues on line "
+ << getSourceManager().getExpansionLineNumber(Loc.asLocation())
+ << '.';
+ else {
+ os << "Execution jumps to the end of the ";
+ const Decl *D = N->getLocationContext()->getDecl();
+ if (isa<ObjCMethodDecl>(D))
+ os << "method";
+ else if (isa<FunctionDecl>(D))
+ os << "function";
+ else {
+ assert(isa<BlockDecl>(D));
+ os << "anonymous block";
+ }
+ os << '.';
+ }
+
+ return Loc;
+}
+
+static bool IsNested(const Stmt *S, ParentMap &PM) {
+ if (isa<Expr>(S) && PM.isConsumedExpr(cast<Expr>(S)))
+ return true;
+
+ const Stmt *Parent = PM.getParentIgnoreParens(S);
+
+ if (Parent)
+ switch (Parent->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::WhileStmtClass:
+ return true;
+ default:
+ break;
+ }
+
+ return false;
+}
+
+PathDiagnosticLocation
+PathDiagnosticBuilder::getEnclosingStmtLocation(const Stmt *S) {
+ assert(S && "Null Stmt *passed to getEnclosingStmtLocation");
+ ParentMap &P = getParentMap();
+ SourceManager &SMgr = getSourceManager();
+
+ while (IsNested(S, P)) {
+ const Stmt *Parent = P.getParentIgnoreParens(S);
+
+ if (!Parent)
+ break;
+
+ switch (Parent->getStmtClass()) {
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator *B = cast<BinaryOperator>(Parent);
+ if (B->isLogicalOp())
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ }
+ case Stmt::CompoundStmtClass:
+ case Stmt::StmtExprClass:
+ return PathDiagnosticLocation(S, SMgr, LC);
+ case Stmt::ChooseExprClass:
+ // Similar to '?' if we are referring to condition, just have the edge
+ // point to the entire choose expression.
+ if (cast<ChooseExpr>(Parent)->getCond() == S)
+ return PathDiagnosticLocation(Parent, SMgr, LC);
+ else
+ return PathDiagnosticLocation(S, SMgr, LC);
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ // For '?', if we are referring to condition, just have the edge point
+ // to the entire '?' expression.
+ if (cast<AbstractConditionalOperator>(Parent)->getCond() == S)
+ return PathDiagnosticLocation(Parent, SMgr, LC);
+ else
+ return PathDiagnosticLocation(S, SMgr, LC);
+ case Stmt::DoStmtClass:
+ return PathDiagnosticLocation(S, SMgr, LC);
+ case Stmt::ForStmtClass:
+ if (cast<ForStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ case Stmt::IfStmtClass:
+ if (cast<IfStmt>(Parent)->getCond() != S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ case Stmt::ObjCForCollectionStmtClass:
+ if (cast<ObjCForCollectionStmt>(Parent)->getBody() == S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ case Stmt::WhileStmtClass:
+ if (cast<WhileStmt>(Parent)->getCond() != S)
+ return PathDiagnosticLocation(S, SMgr, LC);
+ break;
+ default:
+ break;
+ }
+
+ S = Parent;
+ }
+
+ assert(S && "Cannot have null Stmt for PathDiagnosticLocation");
+
+ // Special case: DeclStmts can appear in for statement declarations, in which
+ // case the ForStmt is the context.
+ if (isa<DeclStmt>(S)) {
+ if (const Stmt *Parent = P.getParent(S)) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::ForStmtClass:
+ case Stmt::ObjCForCollectionStmtClass:
+ return PathDiagnosticLocation(Parent, SMgr, LC);
+ default:
+ break;
+ }
+ }
+ }
+ else if (isa<BinaryOperator>(S)) {
+ // Special case: the binary operator represents the initialization
+ // code in a for statement (this can happen when the variable being
+ // initialized is an old variable.
+ if (const ForStmt *FS =
+ dyn_cast_or_null<ForStmt>(P.getParentIgnoreParens(S))) {
+ if (FS->getInit() == S)
+ return PathDiagnosticLocation(FS, SMgr, LC);
+ }
+ }
+
+ return PathDiagnosticLocation(S, SMgr, LC);
+}
+
+//===----------------------------------------------------------------------===//
+// "Minimal" path diagnostic generation algorithm.
+//===----------------------------------------------------------------------===//
+typedef std::pair<PathDiagnosticCallPiece*, const ExplodedNode*> StackDiagPair;
+typedef SmallVector<StackDiagPair, 6> StackDiagVector;
+
+static void updateStackPiecesWithMessage(PathDiagnosticPiece *P,
+ StackDiagVector &CallStack) {
+ // If the piece contains a special message, add it to all the call
+ // pieces on the active stack.
+ if (PathDiagnosticEventPiece *ep =
+ dyn_cast<PathDiagnosticEventPiece>(P)) {
+
+ if (ep->hasCallStackHint())
+ for (StackDiagVector::iterator I = CallStack.begin(),
+ E = CallStack.end(); I != E; ++I) {
+ PathDiagnosticCallPiece *CP = I->first;
+ const ExplodedNode *N = I->second;
+ std::string stackMsg = ep->getCallStackMessage(N);
+
+ // The last message on the path to final bug is the most important
+ // one. Since we traverse the path backwards, do not add the message
+ // if one has been previously added.
+ if (!CP->hasCallStackMessage())
+ CP->setCallStackMessage(stackMsg);
+ }
+ }
+}
+
+static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM);
+
+static void GenerateMinimalPathDiagnostic(PathDiagnostic& PD,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode *N,
+ ArrayRef<BugReporterVisitor *> visitors) {
+
+ SourceManager& SMgr = PDB.getSourceManager();
+ const LocationContext *LC = PDB.LC;
+ const ExplodedNode *NextNode = N->pred_empty()
+ ? NULL : *(N->pred_begin());
+
+ StackDiagVector CallStack;
+
+ while (NextNode) {
+ N = NextNode;
+ PDB.LC = N->getLocationContext();
+ NextNode = GetPredecessorNode(N);
+
+ ProgramPoint P = N->getLocation();
+
+ if (const CallExit *CE = dyn_cast<CallExit>(&P)) {
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SMgr);
+ PD.getActivePath().push_front(C);
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ continue;
+ }
+
+ if (const CallEnter *CE = dyn_cast<CallEnter>(&P)) {
+ PD.popActivePath();
+ // The current active path should never be empty. Either we
+ // just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExit. If the front of the active
+ // path is not a PathDiagnosticCallPiece, it means that the
+ // path terminated within a function call. We must then take the
+ // current contents of the active path and place it within
+ // a new PathDiagnosticCallPiece.
+ assert(!PD.getActivePath().empty());
+ PathDiagnosticCallPiece *C =
+ dyn_cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ if (!C) {
+ const Decl *Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ }
+ C->setCallee(*CE, SMgr);
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ continue;
+ }
+
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ const CFGBlock *Src = BE->getSrc();
+ const CFGBlock *Dst = BE->getDst();
+ const Stmt *T = Src->getTerminator();
+
+ if (!T)
+ continue;
+
+ PathDiagnosticLocation Start =
+ PathDiagnosticLocation::createBegin(T, SMgr,
+ N->getLocationContext());
+
+ switch (T->getStmtClass()) {
+ default:
+ break;
+
+ case Stmt::GotoStmtClass:
+ case Stmt::IndirectGotoStmtClass: {
+ const Stmt *S = GetNextStmt(N);
+
+ if (!S)
+ continue;
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ const PathDiagnosticLocation &End = PDB.getEnclosingStmtLocation(S);
+
+ os << "Control jumps to line "
+ << End.asLocation().getExpansionLineNumber();
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ case Stmt::SwitchStmtClass: {
+ // Figure out what case arm we took.
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (const Stmt *S = Dst->getLabel()) {
+ PathDiagnosticLocation End(S, SMgr, LC);
+
+ switch (S->getStmtClass()) {
+ default:
+ os << "No cases match in the switch statement. "
+ "Control jumps to line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+ case Stmt::DefaultStmtClass:
+ os << "Control jumps to the 'default' case at line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+
+ case Stmt::CaseStmtClass: {
+ os << "Control jumps to 'case ";
+ const CaseStmt *Case = cast<CaseStmt>(S);
+ const Expr *LHS = Case->getLHS()->IgnoreParenCasts();
+
+ // Determine if it is an enum.
+ bool GetRawInt = true;
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) {
+ // FIXME: Maybe this should be an assertion. Are there cases
+ // were it is not an EnumConstantDecl?
+ const EnumConstantDecl *D =
+ dyn_cast<EnumConstantDecl>(DR->getDecl());
+
+ if (D) {
+ GetRawInt = false;
+ os << *D;
+ }
+ }
+
+ if (GetRawInt)
+ os << LHS->EvaluateKnownConstInt(PDB.getASTContext());
+
+ os << ":' at line "
+ << End.asLocation().getExpansionLineNumber();
+ break;
+ }
+ }
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "'Default' branch taken. ";
+ const PathDiagnosticLocation &End = PDB.ExecutionContinues(os, N);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+
+ break;
+ }
+
+ case Stmt::BreakStmtClass:
+ case Stmt::ContinueStmtClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ // Determine control-flow for ternary '?'.
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "'?' condition is ";
+
+ if (*(Src->succ_begin()+1) == Dst)
+ os << "false";
+ else
+ os << "true";
+
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ break;
+ }
+
+ // Determine control-flow for short-circuited '&&' and '||'.
+ case Stmt::BinaryOperatorClass: {
+ if (!PDB.supportsLogicalOpControlFlow())
+ break;
+
+ const BinaryOperator *B = cast<BinaryOperator>(T);
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+ os << "Left side of '";
+
+ if (B->getOpcode() == BO_LAnd) {
+ os << "&&" << "' is ";
+
+ if (*(Src->succ_begin()+1) == Dst) {
+ os << "false";
+ PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
+ PathDiagnosticLocation Start =
+ PathDiagnosticLocation::createOperatorLoc(B, SMgr);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "true";
+ PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ }
+ else {
+ assert(B->getOpcode() == BO_LOr);
+ os << "||" << "' is ";
+
+ if (*(Src->succ_begin()+1) == Dst) {
+ os << "false";
+ PathDiagnosticLocation Start(B->getLHS(), SMgr, LC);
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ os << "true";
+ PathDiagnosticLocation End(B->getLHS(), SMgr, LC);
+ PathDiagnosticLocation Start =
+ PathDiagnosticLocation::createOperatorLoc(B, SMgr);
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ }
+
+ break;
+ }
+
+ case Stmt::DoStmtClass: {
+ if (*(Src->succ_begin()) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is true. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Loop condition is false. Exiting loop"));
+ }
+
+ break;
+ }
+
+ case Stmt::WhileStmtClass:
+ case Stmt::ForStmtClass: {
+ if (*(Src->succ_begin()+1) == Dst) {
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "Loop condition is false. ";
+ PathDiagnosticLocation End = PDB.ExecutionContinues(os, N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ os.str()));
+ }
+ else {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Loop condition is true. Entering loop body"));
+ }
+
+ break;
+ }
+
+ case Stmt::IfStmtClass: {
+ PathDiagnosticLocation End = PDB.ExecutionContinues(N);
+
+ if (const Stmt *S = End.asStmt())
+ End = PDB.getEnclosingStmtLocation(S);
+
+ if (*(Src->succ_begin()+1) == Dst)
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Taking false branch"));
+ else
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(Start, End,
+ "Taking true branch"));
+
+ break;
+ }
+ }
+ }
+
+ if (NextNode) {
+ // Add diagnostic pieces from custom visitors.
+ BugReport *R = PDB.getBugReport();
+ for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+ E = visitors.end();
+ I != E; ++I) {
+ if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R)) {
+ PD.getActivePath().push_front(p);
+ updateStackPiecesWithMessage(p, CallStack);
+ }
+ }
+ }
+ }
+
+ // After constructing the full PathDiagnostic, do a pass over it to compact
+ // PathDiagnosticPieces that occur within a macro.
+ CompactPathDiagnostic(PD.getMutablePieces(), PDB.getSourceManager());
+}
+
+//===----------------------------------------------------------------------===//
+// "Extensive" PathDiagnostic generation.
+//===----------------------------------------------------------------------===//
+
+static bool IsControlFlowExpr(const Stmt *S) {
+ const Expr *E = dyn_cast<Expr>(S);
+
+ if (!E)
+ return false;
+
+ E = E->IgnoreParenCasts();
+
+ if (isa<AbstractConditionalOperator>(E))
+ return true;
+
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(E))
+ if (B->isLogicalOp())
+ return true;
+
+ return false;
+}
+
+namespace {
+class ContextLocation : public PathDiagnosticLocation {
+ bool IsDead;
+public:
+ ContextLocation(const PathDiagnosticLocation &L, bool isdead = false)
+ : PathDiagnosticLocation(L), IsDead(isdead) {}
+
+ void markDead() { IsDead = true; }
+ bool isDead() const { return IsDead; }
+};
+
+class EdgeBuilder {
+ std::vector<ContextLocation> CLocs;
+ typedef std::vector<ContextLocation>::iterator iterator;
+ PathDiagnostic &PD;
+ PathDiagnosticBuilder &PDB;
+ PathDiagnosticLocation PrevLoc;
+
+ bool IsConsumedExpr(const PathDiagnosticLocation &L);
+
+ bool containsLocation(const PathDiagnosticLocation &Container,
+ const PathDiagnosticLocation &Containee);
+
+ PathDiagnosticLocation getContextLocation(const PathDiagnosticLocation &L);
+
+ PathDiagnosticLocation cleanUpLocation(PathDiagnosticLocation L,
+ bool firstCharOnly = false) {
+ if (const Stmt *S = L.asStmt()) {
+ const Stmt *Original = S;
+ while (1) {
+ // Adjust the location for some expressions that are best referenced
+ // by one of their subexpressions.
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::ParenExprClass:
+ case Stmt::GenericSelectionExprClass:
+ S = cast<Expr>(S)->IgnoreParens();
+ firstCharOnly = true;
+ continue;
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ S = cast<AbstractConditionalOperator>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::ChooseExprClass:
+ S = cast<ChooseExpr>(S)->getCond();
+ firstCharOnly = true;
+ continue;
+ case Stmt::BinaryOperatorClass:
+ S = cast<BinaryOperator>(S)->getLHS();
+ firstCharOnly = true;
+ continue;
+ }
+
+ break;
+ }
+
+ if (S != Original)
+ L = PathDiagnosticLocation(S, L.getManager(), PDB.LC);
+ }
+
+ if (firstCharOnly)
+ L = PathDiagnosticLocation::createSingleLocation(L);
+
+ return L;
+ }
+
+ void popLocation() {
+ if (!CLocs.back().isDead() && CLocs.back().asLocation().isFileID()) {
+ // For contexts, we only one the first character as the range.
+ rawAddEdge(cleanUpLocation(CLocs.back(), true));
+ }
+ CLocs.pop_back();
+ }
+
+public:
+ EdgeBuilder(PathDiagnostic &pd, PathDiagnosticBuilder &pdb)
+ : PD(pd), PDB(pdb) {
+
+ // If the PathDiagnostic already has pieces, add the enclosing statement
+ // of the first piece as a context as well.
+ if (!PD.path.empty()) {
+ PrevLoc = (*PD.path.begin())->getLocation();
+
+ if (const Stmt *S = PrevLoc.asStmt())
+ addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+ }
+
+ ~EdgeBuilder() {
+ while (!CLocs.empty()) popLocation();
+
+ // Finally, add an initial edge from the start location of the first
+ // statement (if it doesn't already exist).
+ PathDiagnosticLocation L = PathDiagnosticLocation::createDeclBegin(
+ PDB.LC,
+ PDB.getSourceManager());
+ if (L.isValid())
+ rawAddEdge(L);
+ }
+
+ void flushLocations() {
+ while (!CLocs.empty())
+ popLocation();
+ PrevLoc = PathDiagnosticLocation();
+ }
+
+ void addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd = false);
+
+ void rawAddEdge(PathDiagnosticLocation NewLoc);
+
+ void addContext(const Stmt *S);
+ void addExtendedContext(const Stmt *S);
+};
+} // end anonymous namespace
+
+
+PathDiagnosticLocation
+EdgeBuilder::getContextLocation(const PathDiagnosticLocation &L) {
+ if (const Stmt *S = L.asStmt()) {
+ if (IsControlFlowExpr(S))
+ return L;
+
+ return PDB.getEnclosingStmtLocation(S);
+ }
+
+ return L;
+}
+
+bool EdgeBuilder::containsLocation(const PathDiagnosticLocation &Container,
+ const PathDiagnosticLocation &Containee) {
+
+ if (Container == Containee)
+ return true;
+
+ if (Container.asDecl())
+ return true;
+
+ if (const Stmt *S = Containee.asStmt())
+ if (const Stmt *ContainerS = Container.asStmt()) {
+ while (S) {
+ if (S == ContainerS)
+ return true;
+ S = PDB.getParent(S);
+ }
+ return false;
+ }
+
+ // Less accurate: compare using source ranges.
+ SourceRange ContainerR = Container.asRange();
+ SourceRange ContaineeR = Containee.asRange();
+
+ SourceManager &SM = PDB.getSourceManager();
+ SourceLocation ContainerRBeg = SM.getExpansionLoc(ContainerR.getBegin());
+ SourceLocation ContainerREnd = SM.getExpansionLoc(ContainerR.getEnd());
+ SourceLocation ContaineeRBeg = SM.getExpansionLoc(ContaineeR.getBegin());
+ SourceLocation ContaineeREnd = SM.getExpansionLoc(ContaineeR.getEnd());
+
+ unsigned ContainerBegLine = SM.getExpansionLineNumber(ContainerRBeg);
+ unsigned ContainerEndLine = SM.getExpansionLineNumber(ContainerREnd);
+ unsigned ContaineeBegLine = SM.getExpansionLineNumber(ContaineeRBeg);
+ unsigned ContaineeEndLine = SM.getExpansionLineNumber(ContaineeREnd);
+
+ assert(ContainerBegLine <= ContainerEndLine);
+ assert(ContaineeBegLine <= ContaineeEndLine);
+
+ return (ContainerBegLine <= ContaineeBegLine &&
+ ContainerEndLine >= ContaineeEndLine &&
+ (ContainerBegLine != ContaineeBegLine ||
+ SM.getExpansionColumnNumber(ContainerRBeg) <=
+ SM.getExpansionColumnNumber(ContaineeRBeg)) &&
+ (ContainerEndLine != ContaineeEndLine ||
+ SM.getExpansionColumnNumber(ContainerREnd) >=
+ SM.getExpansionColumnNumber(ContaineeREnd)));
+}
+
+void EdgeBuilder::rawAddEdge(PathDiagnosticLocation NewLoc) {
+ if (!PrevLoc.isValid()) {
+ PrevLoc = NewLoc;
+ return;
+ }
+
+ const PathDiagnosticLocation &NewLocClean = cleanUpLocation(NewLoc);
+ const PathDiagnosticLocation &PrevLocClean = cleanUpLocation(PrevLoc);
+
+ if (NewLocClean.asLocation() == PrevLocClean.asLocation())
+ return;
+
+ // FIXME: Ignore intra-macro edges for now.
+ if (NewLocClean.asLocation().getExpansionLoc() ==
+ PrevLocClean.asLocation().getExpansionLoc())
+ return;
+
+ PD.getActivePath().push_front(new PathDiagnosticControlFlowPiece(NewLocClean, PrevLocClean));
+ PrevLoc = NewLoc;
+}
+
+void EdgeBuilder::addEdge(PathDiagnosticLocation NewLoc, bool alwaysAdd) {
+
+ if (!alwaysAdd && NewLoc.asLocation().isMacroID())
+ return;
+
+ const PathDiagnosticLocation &CLoc = getContextLocation(NewLoc);
+
+ while (!CLocs.empty()) {
+ ContextLocation &TopContextLoc = CLocs.back();
+
+ // Is the top location context the same as the one for the new location?
+ if (TopContextLoc == CLoc) {
+ if (alwaysAdd) {
+ if (IsConsumedExpr(TopContextLoc) &&
+ !IsControlFlowExpr(TopContextLoc.asStmt()))
+ TopContextLoc.markDead();
+
+ rawAddEdge(NewLoc);
+ }
+
+ return;
+ }
+
+ if (containsLocation(TopContextLoc, CLoc)) {
+ if (alwaysAdd) {
+ rawAddEdge(NewLoc);
+
+ if (IsConsumedExpr(CLoc) && !IsControlFlowExpr(CLoc.asStmt())) {
+ CLocs.push_back(ContextLocation(CLoc, true));
+ return;
+ }
+ }
+
+ CLocs.push_back(CLoc);
+ return;
+ }
+
+ // Context does not contain the location. Flush it.
+ popLocation();
+ }
+
+ // If we reach here, there is no enclosing context. Just add the edge.
+ rawAddEdge(NewLoc);
+}
+
+bool EdgeBuilder::IsConsumedExpr(const PathDiagnosticLocation &L) {
+ if (const Expr *X = dyn_cast_or_null<Expr>(L.asStmt()))
+ return PDB.getParentMap().isConsumedExpr(X) && !IsControlFlowExpr(X);
+
+ return false;
+}
+
+void EdgeBuilder::addExtendedContext(const Stmt *S) {
+ if (!S)
+ return;
+
+ const Stmt *Parent = PDB.getParent(S);
+ while (Parent) {
+ if (isa<CompoundStmt>(Parent))
+ Parent = PDB.getParent(Parent);
+ else
+ break;
+ }
+
+ if (Parent) {
+ switch (Parent->getStmtClass()) {
+ case Stmt::DoStmtClass:
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ addContext(Parent);
+ default:
+ break;
+ }
+ }
+
+ addContext(S);
+}
+
+void EdgeBuilder::addContext(const Stmt *S) {
+ if (!S)
+ return;
+
+ PathDiagnosticLocation L(S, PDB.getSourceManager(), PDB.LC);
+
+ while (!CLocs.empty()) {
+ const PathDiagnosticLocation &TopContextLoc = CLocs.back();
+
+ // Is the top location context the same as the one for the new location?
+ if (TopContextLoc == L)
+ return;
+
+ if (containsLocation(TopContextLoc, L)) {
+ CLocs.push_back(L);
+ return;
+ }
+
+ // Context does not contain the location. Flush it.
+ popLocation();
+ }
+
+ CLocs.push_back(L);
+}
+
+static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
+ PathDiagnosticBuilder &PDB,
+ const ExplodedNode *N,
+ ArrayRef<BugReporterVisitor *> visitors) {
+ EdgeBuilder EB(PD, PDB);
+ const SourceManager& SM = PDB.getSourceManager();
+ StackDiagVector CallStack;
+
+ const ExplodedNode *NextNode = N->pred_empty() ? NULL : *(N->pred_begin());
+ while (NextNode) {
+ N = NextNode;
+ NextNode = GetPredecessorNode(N);
+ ProgramPoint P = N->getLocation();
+
+ do {
+ if (const CallExit *CE = dyn_cast<CallExit>(&P)) {
+ const StackFrameContext *LCtx =
+ CE->getLocationContext()->getCurrentStackFrame();
+ PathDiagnosticLocation Loc(LCtx->getCallSite(),
+ PDB.getSourceManager(),
+ LCtx);
+ EB.addEdge(Loc, true);
+ EB.flushLocations();
+ PathDiagnosticCallPiece *C =
+ PathDiagnosticCallPiece::construct(N, *CE, SM);
+ PD.getActivePath().push_front(C);
+ PD.pushActivePath(&C->path);
+ CallStack.push_back(StackDiagPair(C, N));
+ break;
+ }
+
+ // Pop the call hierarchy if we are done walking the contents
+ // of a function call.
+ if (const CallEnter *CE = dyn_cast<CallEnter>(&P)) {
+ // Add an edge to the start of the function.
+ const Decl *D = CE->getCalleeContext()->getDecl();
+ PathDiagnosticLocation pos =
+ PathDiagnosticLocation::createBegin(D, SM);
+ EB.addEdge(pos);
+
+ // Flush all locations, and pop the active path.
+ EB.flushLocations();
+ PD.popActivePath();
+ assert(!PD.getActivePath().empty());
+ PDB.LC = N->getLocationContext();
+
+ // The current active path should never be empty. Either we
+ // just added a bunch of stuff to the top-level path, or
+ // we have a previous CallExit. If the front of the active
+ // path is not a PathDiagnosticCallPiece, it means that the
+ // path terminated within a function call. We must then take the
+ // current contents of the active path and place it within
+ // a new PathDiagnosticCallPiece.
+ PathDiagnosticCallPiece *C =
+ dyn_cast<PathDiagnosticCallPiece>(PD.getActivePath().front());
+ if (!C) {
+ const Decl * Caller = CE->getLocationContext()->getDecl();
+ C = PathDiagnosticCallPiece::construct(PD.getActivePath(), Caller);
+ }
+ C->setCallee(*CE, SM);
+ EB.addContext(CE->getCallExpr());
+
+ if (!CallStack.empty()) {
+ assert(CallStack.back().first == C);
+ CallStack.pop_back();
+ }
+ break;
+ }
+
+ // Note that is important that we update the LocationContext
+ // after looking at CallExits. CallExit basically adds an
+ // edge in the *caller*, so we don't want to update the LocationContext
+ // too soon.
+ PDB.LC = N->getLocationContext();
+
+ // Block edges.
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ const CFGBlock &Blk = *BE->getSrc();
+ const Stmt *Term = Blk.getTerminator();
+
+ // Are we jumping to the head of a loop? Add a special diagnostic.
+ if (const Stmt *Loop = BE->getDst()->getLoopTarget()) {
+ PathDiagnosticLocation L(Loop, SM, PDB.LC);
+ const CompoundStmt *CS = NULL;
+
+ if (!Term) {
+ if (const ForStmt *FS = dyn_cast<ForStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(FS->getBody());
+ else if (const WhileStmt *WS = dyn_cast<WhileStmt>(Loop))
+ CS = dyn_cast<CompoundStmt>(WS->getBody());
+ }
+
+ PathDiagnosticEventPiece *p =
+ new PathDiagnosticEventPiece(L,
+ "Looping back to the head of the loop");
+ p->setPrunable(true);
+
+ EB.addEdge(p->getLocation(), true);
+ PD.getActivePath().push_front(p);
+
+ if (CS) {
+ PathDiagnosticLocation BL =
+ PathDiagnosticLocation::createEndBrace(CS, SM);
+ EB.addEdge(BL);
+ }
+ }
+
+ if (Term)
+ EB.addContext(Term);
+
+ break;
+ }
+
+ if (const BlockEntrance *BE = dyn_cast<BlockEntrance>(&P)) {
+ if (const CFGStmt *S = BE->getFirstElement().getAs<CFGStmt>()) {
+ const Stmt *stmt = S->getStmt();
+ if (IsControlFlowExpr(stmt)) {
+ // Add the proper context for '&&', '||', and '?'.
+ EB.addContext(stmt);
+ }
+ else
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(stmt).asStmt());
+ }
+
+ break;
+ }
+
+
+ } while (0);
+
+ if (!NextNode)
+ continue;
+
+ // Add pieces from custom visitors.
+ BugReport *R = PDB.getBugReport();
+ for (ArrayRef<BugReporterVisitor *>::iterator I = visitors.begin(),
+ E = visitors.end();
+ I != E; ++I) {
+ if (PathDiagnosticPiece *p = (*I)->VisitNode(N, NextNode, PDB, *R)) {
+ const PathDiagnosticLocation &Loc = p->getLocation();
+ EB.addEdge(Loc, true);
+ PD.getActivePath().push_front(p);
+ updateStackPiecesWithMessage(p, CallStack);
+
+ if (const Stmt *S = Loc.asStmt())
+ EB.addExtendedContext(PDB.getEnclosingStmtLocation(S).asStmt());
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugType and subclasses.
+//===----------------------------------------------------------------------===//
+BugType::~BugType() { }
+
+void BugType::FlushReports(BugReporter &BR) {}
+
+void BuiltinBug::anchor() {}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReport and subclasses.
+//===----------------------------------------------------------------------===//
+
+void BugReport::NodeResolver::anchor() {}
+
+void BugReport::addVisitor(BugReporterVisitor* visitor) {
+ if (!visitor)
+ return;
+
+ llvm::FoldingSetNodeID ID;
+ visitor->Profile(ID);
+ void *InsertPos;
+
+ if (CallbacksSet.FindNodeOrInsertPos(ID, InsertPos)) {
+ delete visitor;
+ return;
+ }
+
+ CallbacksSet.InsertNode(visitor, InsertPos);
+ Callbacks.push_back(visitor);
+ ++ConfigurationChangeToken;
+}
+
+BugReport::~BugReport() {
+ for (visitor_iterator I = visitor_begin(), E = visitor_end(); I != E; ++I) {
+ delete *I;
+ }
+}
+
+const Decl *BugReport::getDeclWithIssue() const {
+ if (DeclWithIssue)
+ return DeclWithIssue;
+
+ const ExplodedNode *N = getErrorNode();
+ if (!N)
+ return 0;
+
+ const LocationContext *LC = N->getLocationContext();
+ return LC->getCurrentStackFrame()->getDecl();
+}
+
+void BugReport::Profile(llvm::FoldingSetNodeID& hash) const {
+ hash.AddPointer(&BT);
+ hash.AddString(Description);
+ if (UniqueingLocation.isValid()) {
+ UniqueingLocation.Profile(hash);
+ } else if (Location.isValid()) {
+ Location.Profile(hash);
+ } else {
+ assert(ErrorNode);
+ hash.AddPointer(GetCurrentOrPreviousStmt(ErrorNode));
+ }
+
+ for (SmallVectorImpl<SourceRange>::const_iterator I =
+ Ranges.begin(), E = Ranges.end(); I != E; ++I) {
+ const SourceRange range = *I;
+ if (!range.isValid())
+ continue;
+ hash.AddInteger(range.getBegin().getRawEncoding());
+ hash.AddInteger(range.getEnd().getRawEncoding());
+ }
+}
+
+void BugReport::markInteresting(SymbolRef sym) {
+ if (!sym)
+ return;
+
+ // If the symbol wasn't already in our set, note a configuration change.
+ if (interestingSymbols.insert(sym).second)
+ ++ConfigurationChangeToken;
+
+ if (const SymbolMetadata *meta = dyn_cast<SymbolMetadata>(sym))
+ interestingRegions.insert(meta->getRegion());
+}
+
+void BugReport::markInteresting(const MemRegion *R) {
+ if (!R)
+ return;
+
+ // If the base region wasn't already in our set, note a configuration change.
+ R = R->getBaseRegion();
+ if (interestingRegions.insert(R).second)
+ ++ConfigurationChangeToken;
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ interestingSymbols.insert(SR->getSymbol());
+}
+
+void BugReport::markInteresting(SVal V) {
+ markInteresting(V.getAsRegion());
+ markInteresting(V.getAsSymbol());
+}
+
+bool BugReport::isInteresting(SVal V) const {
+ return isInteresting(V.getAsRegion()) || isInteresting(V.getAsSymbol());
+}
+
+bool BugReport::isInteresting(SymbolRef sym) const {
+ if (!sym)
+ return false;
+ // We don't currently consider metadata symbols to be interesting
+ // even if we know their region is interesting. Is that correct behavior?
+ return interestingSymbols.count(sym);
+}
+
+bool BugReport::isInteresting(const MemRegion *R) const {
+ if (!R)
+ return false;
+ R = R->getBaseRegion();
+ bool b = interestingRegions.count(R);
+ if (b)
+ return true;
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ return interestingSymbols.count(SR->getSymbol());
+ return false;
+}
+
+
+const Stmt *BugReport::getStmt() const {
+ if (!ErrorNode)
+ return 0;
+
+ ProgramPoint ProgP = ErrorNode->getLocation();
+ const Stmt *S = NULL;
+
+ if (BlockEntrance *BE = dyn_cast<BlockEntrance>(&ProgP)) {
+ CFGBlock &Exit = ProgP.getLocationContext()->getCFG()->getExit();
+ if (BE->getBlock() == &Exit)
+ S = GetPreviousStmt(ErrorNode);
+ }
+ if (!S)
+ S = GetStmt(ProgP);
+
+ return S;
+}
+
+std::pair<BugReport::ranges_iterator, BugReport::ranges_iterator>
+BugReport::getRanges() {
+ // If no custom ranges, add the range of the statement corresponding to
+ // the error node.
+ if (Ranges.empty()) {
+ if (const Expr *E = dyn_cast_or_null<Expr>(getStmt()))
+ addRange(E->getSourceRange());
+ else
+ return std::make_pair(ranges_iterator(), ranges_iterator());
+ }
+
+ // User-specified absence of range info.
+ if (Ranges.size() == 1 && !Ranges.begin()->isValid())
+ return std::make_pair(ranges_iterator(), ranges_iterator());
+
+ return std::make_pair(Ranges.begin(), Ranges.end());
+}
+
+PathDiagnosticLocation BugReport::getLocation(const SourceManager &SM) const {
+ if (ErrorNode) {
+ assert(!Location.isValid() &&
+ "Either Location or ErrorNode should be specified but not both.");
+
+ if (const Stmt *S = GetCurrentOrPreviousStmt(ErrorNode)) {
+ const LocationContext *LC = ErrorNode->getLocationContext();
+
+ // For member expressions, return the location of the '.' or '->'.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(S))
+ return PathDiagnosticLocation::createMemberLoc(ME, SM);
+ // For binary operators, return the location of the operator.
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(S))
+ return PathDiagnosticLocation::createOperatorLoc(B, SM);
+
+ return PathDiagnosticLocation::createBegin(S, SM, LC);
+ }
+ } else {
+ assert(Location.isValid());
+ return Location;
+ }
+
+ return PathDiagnosticLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// Methods for BugReporter and subclasses.
+//===----------------------------------------------------------------------===//
+
+BugReportEquivClass::~BugReportEquivClass() { }
+GRBugReporter::~GRBugReporter() { }
+BugReporterData::~BugReporterData() {}
+
+ExplodedGraph &GRBugReporter::getGraph() { return Eng.getGraph(); }
+
+ProgramStateManager&
+GRBugReporter::getStateManager() { return Eng.getStateManager(); }
+
+BugReporter::~BugReporter() {
+ FlushReports();
+
+ // Free the bug reports we are tracking.
+ typedef std::vector<BugReportEquivClass *> ContTy;
+ for (ContTy::iterator I = EQClassesVector.begin(), E = EQClassesVector.end();
+ I != E; ++I) {
+ delete *I;
+ }
+}
+
+void BugReporter::FlushReports() {
+ if (BugTypes.isEmpty())
+ return;
+
+ // First flush the warnings for each BugType. This may end up creating new
+ // warnings and new BugTypes.
+ // FIXME: Only NSErrorChecker needs BugType's FlushReports.
+ // Turn NSErrorChecker into a proper checker and remove this.
+ SmallVector<const BugType*, 16> bugTypes;
+ for (BugTypesTy::iterator I=BugTypes.begin(), E=BugTypes.end(); I!=E; ++I)
+ bugTypes.push_back(*I);
+ for (SmallVector<const BugType*, 16>::iterator
+ I = bugTypes.begin(), E = bugTypes.end(); I != E; ++I)
+ const_cast<BugType*>(*I)->FlushReports(*this);
+
+ typedef llvm::FoldingSet<BugReportEquivClass> SetTy;
+ for (SetTy::iterator EI=EQClasses.begin(), EE=EQClasses.end(); EI!=EE;++EI){
+ BugReportEquivClass& EQ = *EI;
+ FlushReport(EQ);
+ }
+
+ // BugReporter owns and deletes only BugTypes created implicitly through
+ // EmitBasicReport.
+ // FIXME: There are leaks from checkers that assume that the BugTypes they
+ // create will be destroyed by the BugReporter.
+ for (llvm::StringMap<BugType*>::iterator
+ I = StrBugTypes.begin(), E = StrBugTypes.end(); I != E; ++I)
+ delete I->second;
+
+ // Remove all references to the BugType objects.
+ BugTypes = F.getEmptySet();
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnostics generation.
+//===----------------------------------------------------------------------===//
+
+static std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
+ std::pair<ExplodedNode*, unsigned> >
+MakeReportGraph(const ExplodedGraph* G,
+ SmallVectorImpl<const ExplodedNode*> &nodes) {
+
+ // Create the trimmed graph. It will contain the shortest paths from the
+ // error nodes to the root. In the new graph we should only have one
+ // error node unless there are two or more error nodes with the same minimum
+ // path length.
+ ExplodedGraph* GTrim;
+ InterExplodedGraphMap* NMap;
+
+ llvm::DenseMap<const void*, const void*> InverseMap;
+ llvm::tie(GTrim, NMap) = G->Trim(nodes.data(), nodes.data() + nodes.size(),
+ &InverseMap);
+
+ // Create owning pointers for GTrim and NMap just to ensure that they are
+ // released when this function exists.
+ OwningPtr<ExplodedGraph> AutoReleaseGTrim(GTrim);
+ OwningPtr<InterExplodedGraphMap> AutoReleaseNMap(NMap);
+
+ // Find the (first) error node in the trimmed graph. We just need to consult
+ // the node map (NMap) which maps from nodes in the original graph to nodes
+ // in the new graph.
+
+ std::queue<const ExplodedNode*> WS;
+ typedef llvm::DenseMap<const ExplodedNode*, unsigned> IndexMapTy;
+ IndexMapTy IndexMap;
+
+ for (unsigned nodeIndex = 0 ; nodeIndex < nodes.size(); ++nodeIndex) {
+ const ExplodedNode *originalNode = nodes[nodeIndex];
+ if (const ExplodedNode *N = NMap->getMappedNode(originalNode)) {
+ WS.push(N);
+ IndexMap[originalNode] = nodeIndex;
+ }
+ }
+
+ assert(!WS.empty() && "No error node found in the trimmed graph.");
+
+ // Create a new (third!) graph with a single path. This is the graph
+ // that will be returned to the caller.
+ ExplodedGraph *GNew = new ExplodedGraph();
+
+ // Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS
+ // to the root node, and then construct a new graph that contains only
+ // a single path.
+ llvm::DenseMap<const void*,unsigned> Visited;
+
+ unsigned cnt = 0;
+ const ExplodedNode *Root = 0;
+
+ while (!WS.empty()) {
+ const ExplodedNode *Node = WS.front();
+ WS.pop();
+
+ if (Visited.find(Node) != Visited.end())
+ continue;
+
+ Visited[Node] = cnt++;
+
+ if (Node->pred_empty()) {
+ Root = Node;
+ break;
+ }
+
+ for (ExplodedNode::const_pred_iterator I=Node->pred_begin(),
+ E=Node->pred_end(); I!=E; ++I)
+ WS.push(*I);
+ }
+
+ assert(Root);
+
+ // Now walk from the root down the BFS path, always taking the successor
+ // with the lowest number.
+ ExplodedNode *Last = 0, *First = 0;
+ NodeBackMap *BM = new NodeBackMap();
+ unsigned NodeIndex = 0;
+
+ for ( const ExplodedNode *N = Root ;;) {
+ // Lookup the number associated with the current node.
+ llvm::DenseMap<const void*,unsigned>::iterator I = Visited.find(N);
+ assert(I != Visited.end());
+
+ // Create the equivalent node in the new graph with the same state
+ // and location.
+ ExplodedNode *NewN = GNew->getNode(N->getLocation(), N->getState());
+
+ // Store the mapping to the original node.
+ llvm::DenseMap<const void*, const void*>::iterator IMitr=InverseMap.find(N);
+ assert(IMitr != InverseMap.end() && "No mapping to original node.");
+ (*BM)[NewN] = (const ExplodedNode*) IMitr->second;
+
+ // Link up the new node with the previous node.
+ if (Last)
+ NewN->addPredecessor(Last, *GNew);
+
+ Last = NewN;
+
+ // Are we at the final node?
+ IndexMapTy::iterator IMI =
+ IndexMap.find((const ExplodedNode*)(IMitr->second));
+ if (IMI != IndexMap.end()) {
+ First = NewN;
+ NodeIndex = IMI->second;
+ break;
+ }
+
+ // Find the next successor node. We choose the node that is marked
+ // with the lowest DFS number.
+ ExplodedNode::const_succ_iterator SI = N->succ_begin();
+ ExplodedNode::const_succ_iterator SE = N->succ_end();
+ N = 0;
+
+ for (unsigned MinVal = 0; SI != SE; ++SI) {
+
+ I = Visited.find(*SI);
+
+ if (I == Visited.end())
+ continue;
+
+ if (!N || I->second < MinVal) {
+ N = *SI;
+ MinVal = I->second;
+ }
+ }
+
+ assert(N);
+ }
+
+ assert(First);
+
+ return std::make_pair(std::make_pair(GNew, BM),
+ std::make_pair(First, NodeIndex));
+}
+
+/// CompactPathDiagnostic - This function postprocesses a PathDiagnostic object
+/// and collapses PathDiagosticPieces that are expanded by macros.
+static void CompactPathDiagnostic(PathPieces &path, const SourceManager& SM) {
+ typedef std::vector<std::pair<IntrusiveRefCntPtr<PathDiagnosticMacroPiece>,
+ SourceLocation> > MacroStackTy;
+
+ typedef std::vector<IntrusiveRefCntPtr<PathDiagnosticPiece> >
+ PiecesTy;
+
+ MacroStackTy MacroStack;
+ PiecesTy Pieces;
+
+ for (PathPieces::const_iterator I = path.begin(), E = path.end();
+ I!=E; ++I) {
+
+ PathDiagnosticPiece *piece = I->getPtr();
+
+ // Recursively compact calls.
+ if (PathDiagnosticCallPiece *call=dyn_cast<PathDiagnosticCallPiece>(piece)){
+ CompactPathDiagnostic(call->path, SM);
+ }
+
+ // Get the location of the PathDiagnosticPiece.
+ const FullSourceLoc Loc = piece->getLocation().asLocation();
+
+ // Determine the instantiation location, which is the location we group
+ // related PathDiagnosticPieces.
+ SourceLocation InstantiationLoc = Loc.isMacroID() ?
+ SM.getExpansionLoc(Loc) :
+ SourceLocation();
+
+ if (Loc.isFileID()) {
+ MacroStack.clear();
+ Pieces.push_back(piece);
+ continue;
+ }
+
+ assert(Loc.isMacroID());
+
+ // Is the PathDiagnosticPiece within the same macro group?
+ if (!MacroStack.empty() && InstantiationLoc == MacroStack.back().second) {
+ MacroStack.back().first->subPieces.push_back(piece);
+ continue;
+ }
+
+ // We aren't in the same group. Are we descending into a new macro
+ // or are part of an old one?
+ IntrusiveRefCntPtr<PathDiagnosticMacroPiece> MacroGroup;
+
+ SourceLocation ParentInstantiationLoc = InstantiationLoc.isMacroID() ?
+ SM.getExpansionLoc(Loc) :
+ SourceLocation();
+
+ // Walk the entire macro stack.
+ while (!MacroStack.empty()) {
+ if (InstantiationLoc == MacroStack.back().second) {
+ MacroGroup = MacroStack.back().first;
+ break;
+ }
+
+ if (ParentInstantiationLoc == MacroStack.back().second) {
+ MacroGroup = MacroStack.back().first;
+ break;
+ }
+
+ MacroStack.pop_back();
+ }
+
+ if (!MacroGroup || ParentInstantiationLoc == MacroStack.back().second) {
+ // Create a new macro group and add it to the stack.
+ PathDiagnosticMacroPiece *NewGroup =
+ new PathDiagnosticMacroPiece(
+ PathDiagnosticLocation::createSingleLocation(piece->getLocation()));
+
+ if (MacroGroup)
+ MacroGroup->subPieces.push_back(NewGroup);
+ else {
+ assert(InstantiationLoc.isFileID());
+ Pieces.push_back(NewGroup);
+ }
+
+ MacroGroup = NewGroup;
+ MacroStack.push_back(std::make_pair(MacroGroup, InstantiationLoc));
+ }
+
+ // Finally, add the PathDiagnosticPiece to the group.
+ MacroGroup->subPieces.push_back(piece);
+ }
+
+ // Now take the pieces and construct a new PathDiagnostic.
+ path.clear();
+
+ for (PiecesTy::iterator I=Pieces.begin(), E=Pieces.end(); I!=E; ++I)
+ path.push_back(*I);
+}
+
+void GRBugReporter::GeneratePathDiagnostic(PathDiagnostic& PD,
+ SmallVectorImpl<BugReport *> &bugReports) {
+
+ assert(!bugReports.empty());
+ SmallVector<const ExplodedNode *, 10> errorNodes;
+ for (SmallVectorImpl<BugReport*>::iterator I = bugReports.begin(),
+ E = bugReports.end(); I != E; ++I) {
+ errorNodes.push_back((*I)->getErrorNode());
+ }
+
+ // Construct a new graph that contains only a single path from the error
+ // node to a root.
+ const std::pair<std::pair<ExplodedGraph*, NodeBackMap*>,
+ std::pair<ExplodedNode*, unsigned> >&
+ GPair = MakeReportGraph(&getGraph(), errorNodes);
+
+ // Find the BugReport with the original location.
+ assert(GPair.second.second < bugReports.size());
+ BugReport *R = bugReports[GPair.second.second];
+ assert(R && "No original report found for sliced graph.");
+
+ OwningPtr<ExplodedGraph> ReportGraph(GPair.first.first);
+ OwningPtr<NodeBackMap> BackMap(GPair.first.second);
+ const ExplodedNode *N = GPair.second.first;
+
+ // Start building the path diagnostic...
+ PathDiagnosticBuilder PDB(*this, R, BackMap.get(),
+ getPathDiagnosticConsumer());
+
+ // Register additional node visitors.
+ R->addVisitor(new NilReceiverBRVisitor());
+ R->addVisitor(new ConditionBRVisitor());
+
+ BugReport::VisitorList visitors;
+ unsigned originalReportConfigToken, finalReportConfigToken;
+
+ // While generating diagnostics, it's possible the visitors will decide
+ // new symbols and regions are interesting, or add other visitors based on
+ // the information they find. If they do, we need to regenerate the path
+ // based on our new report configuration.
+ do {
+ // Get a clean copy of all the visitors.
+ for (BugReport::visitor_iterator I = R->visitor_begin(),
+ E = R->visitor_end(); I != E; ++I)
+ visitors.push_back((*I)->clone());
+
+ // Clear out the active path from any previous work.
+ PD.getActivePath().clear();
+ originalReportConfigToken = R->getConfigurationChangeToken();
+
+ // Generate the very last diagnostic piece - the piece is visible before
+ // the trace is expanded.
+ PathDiagnosticPiece *LastPiece = 0;
+ for (BugReport::visitor_iterator I = visitors.begin(), E = visitors.end();
+ I != E; ++I) {
+ if (PathDiagnosticPiece *Piece = (*I)->getEndPath(PDB, N, *R)) {
+ assert (!LastPiece &&
+ "There can only be one final piece in a diagnostic.");
+ LastPiece = Piece;
+ }
+ }
+ if (!LastPiece)
+ LastPiece = BugReporterVisitor::getDefaultEndPath(PDB, N, *R);
+ if (LastPiece)
+ PD.getActivePath().push_back(LastPiece);
+ else
+ return;
+
+ switch (PDB.getGenerationScheme()) {
+ case PathDiagnosticConsumer::Extensive:
+ GenerateExtensivePathDiagnostic(PD, PDB, N, visitors);
+ break;
+ case PathDiagnosticConsumer::Minimal:
+ GenerateMinimalPathDiagnostic(PD, PDB, N, visitors);
+ break;
+ }
+
+ // Clean up the visitors we used.
+ llvm::DeleteContainerPointers(visitors);
+
+ // Did anything change while generating this path?
+ finalReportConfigToken = R->getConfigurationChangeToken();
+ } while(finalReportConfigToken != originalReportConfigToken);
+
+ // Finally, prune the diagnostic path of uninteresting stuff.
+ bool hasSomethingInteresting = RemoveUneededCalls(PD.getMutablePieces());
+ assert(hasSomethingInteresting);
+ (void) hasSomethingInteresting;
+}
+
+void BugReporter::Register(BugType *BT) {
+ BugTypes = F.add(BugTypes, BT);
+}
+
+void BugReporter::EmitReport(BugReport* R) {
+ // Compute the bug report's hash to determine its equivalence class.
+ llvm::FoldingSetNodeID ID;
+ R->Profile(ID);
+
+ // Lookup the equivance class. If there isn't one, create it.
+ BugType& BT = R->getBugType();
+ Register(&BT);
+ void *InsertPos;
+ BugReportEquivClass* EQ = EQClasses.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!EQ) {
+ EQ = new BugReportEquivClass(R);
+ EQClasses.InsertNode(EQ, InsertPos);
+ EQClassesVector.push_back(EQ);
+ }
+ else
+ EQ->AddReport(R);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Emitting reports in equivalence classes.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct FRIEC_WLItem {
+ const ExplodedNode *N;
+ ExplodedNode::const_succ_iterator I, E;
+
+ FRIEC_WLItem(const ExplodedNode *n)
+ : N(n), I(N->succ_begin()), E(N->succ_end()) {}
+};
+}
+
+static BugReport *
+FindReportInEquivalenceClass(BugReportEquivClass& EQ,
+ SmallVectorImpl<BugReport*> &bugReports) {
+
+ BugReportEquivClass::iterator I = EQ.begin(), E = EQ.end();
+ assert(I != E);
+ BugType& BT = I->getBugType();
+
+ // If we don't need to suppress any of the nodes because they are
+ // post-dominated by a sink, simply add all the nodes in the equivalence class
+ // to 'Nodes'. Any of the reports will serve as a "representative" report.
+ if (!BT.isSuppressOnSink()) {
+ BugReport *R = I;
+ for (BugReportEquivClass::iterator I=EQ.begin(), E=EQ.end(); I!=E; ++I) {
+ const ExplodedNode *N = I->getErrorNode();
+ if (N) {
+ R = I;
+ bugReports.push_back(R);
+ }
+ }
+ return R;
+ }
+
+ // For bug reports that should be suppressed when all paths are post-dominated
+ // by a sink node, iterate through the reports in the equivalence class
+ // until we find one that isn't post-dominated (if one exists). We use a
+ // DFS traversal of the ExplodedGraph to find a non-sink node. We could write
+ // this as a recursive function, but we don't want to risk blowing out the
+ // stack for very long paths.
+ BugReport *exampleReport = 0;
+
+ for (; I != E; ++I) {
+ const ExplodedNode *errorNode = I->getErrorNode();
+
+ if (!errorNode)
+ continue;
+ if (errorNode->isSink()) {
+ llvm_unreachable(
+ "BugType::isSuppressSink() should not be 'true' for sink end nodes");
+ }
+ // No successors? By definition this nodes isn't post-dominated by a sink.
+ if (errorNode->succ_empty()) {
+ bugReports.push_back(I);
+ if (!exampleReport)
+ exampleReport = I;
+ continue;
+ }
+
+ // At this point we know that 'N' is not a sink and it has at least one
+ // successor. Use a DFS worklist to find a non-sink end-of-path node.
+ typedef FRIEC_WLItem WLItem;
+ typedef SmallVector<WLItem, 10> DFSWorkList;
+ llvm::DenseMap<const ExplodedNode *, unsigned> Visited;
+
+ DFSWorkList WL;
+ WL.push_back(errorNode);
+ Visited[errorNode] = 1;
+
+ while (!WL.empty()) {
+ WLItem &WI = WL.back();
+ assert(!WI.N->succ_empty());
+
+ for (; WI.I != WI.E; ++WI.I) {
+ const ExplodedNode *Succ = *WI.I;
+ // End-of-path node?
+ if (Succ->succ_empty()) {
+ // If we found an end-of-path node that is not a sink.
+ if (!Succ->isSink()) {
+ bugReports.push_back(I);
+ if (!exampleReport)
+ exampleReport = I;
+ WL.clear();
+ break;
+ }
+ // Found a sink? Continue on to the next successor.
+ continue;
+ }
+ // Mark the successor as visited. If it hasn't been explored,
+ // enqueue it to the DFS worklist.
+ unsigned &mark = Visited[Succ];
+ if (!mark) {
+ mark = 1;
+ WL.push_back(Succ);
+ break;
+ }
+ }
+
+ // The worklist may have been cleared at this point. First
+ // check if it is empty before checking the last item.
+ if (!WL.empty() && &WL.back() == &WI)
+ WL.pop_back();
+ }
+ }
+
+ // ExampleReport will be NULL if all the nodes in the equivalence class
+ // were post-dominated by sinks.
+ return exampleReport;
+}
+
+//===----------------------------------------------------------------------===//
+// DiagnosticCache. This is a hack to cache analyzer diagnostics. It
+// uses global state, which eventually should go elsewhere.
+//===----------------------------------------------------------------------===//
+namespace {
+class DiagCacheItem : public llvm::FoldingSetNode {
+ llvm::FoldingSetNodeID ID;
+public:
+ DiagCacheItem(BugReport *R, PathDiagnostic *PD) {
+ R->Profile(ID);
+ PD->Profile(ID);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &id) {
+ id = ID;
+ }
+
+ llvm::FoldingSetNodeID &getID() { return ID; }
+};
+}
+
+static bool IsCachedDiagnostic(BugReport *R, PathDiagnostic *PD) {
+ // FIXME: Eventually this diagnostic cache should reside in something
+ // like AnalysisManager instead of being a static variable. This is
+ // really unsafe in the long term.
+ typedef llvm::FoldingSet<DiagCacheItem> DiagnosticCache;
+ static DiagnosticCache DC;
+
+ void *InsertPos;
+ DiagCacheItem *Item = new DiagCacheItem(R, PD);
+
+ if (DC.FindNodeOrInsertPos(Item->getID(), InsertPos)) {
+ delete Item;
+ return true;
+ }
+
+ DC.InsertNode(Item, InsertPos);
+ return false;
+}
+
+void BugReporter::FlushReport(BugReportEquivClass& EQ) {
+ SmallVector<BugReport*, 10> bugReports;
+ BugReport *exampleReport = FindReportInEquivalenceClass(EQ, bugReports);
+ if (!exampleReport)
+ return;
+
+ PathDiagnosticConsumer* PD = getPathDiagnosticConsumer();
+
+ // FIXME: Make sure we use the 'R' for the path that was actually used.
+ // Probably doesn't make a difference in practice.
+ BugType& BT = exampleReport->getBugType();
+
+ OwningPtr<PathDiagnostic>
+ D(new PathDiagnostic(exampleReport->getDeclWithIssue(),
+ exampleReport->getBugType().getName(),
+ !PD || PD->useVerboseDescription()
+ ? exampleReport->getDescription()
+ : exampleReport->getShortDescription(),
+ BT.getCategory()));
+
+ if (!bugReports.empty())
+ GeneratePathDiagnostic(*D.get(), bugReports);
+
+ // Get the meta data.
+ const BugReport::ExtraTextList &Meta =
+ exampleReport->getExtraText();
+ for (BugReport::ExtraTextList::const_iterator i = Meta.begin(),
+ e = Meta.end(); i != e; ++i) {
+ D->addMeta(*i);
+ }
+
+ // Emit a summary diagnostic to the regular Diagnostics engine.
+ BugReport::ranges_iterator Beg, End;
+ llvm::tie(Beg, End) = exampleReport->getRanges();
+ DiagnosticsEngine &Diag = getDiagnostic();
+
+ if (!IsCachedDiagnostic(exampleReport, D.get())) {
+ // Search the description for '%', as that will be interpretted as a
+ // format character by FormatDiagnostics.
+ StringRef desc = exampleReport->getShortDescription();
+
+ SmallString<512> TmpStr;
+ llvm::raw_svector_ostream Out(TmpStr);
+ for (StringRef::iterator I=desc.begin(), E=desc.end(); I!=E; ++I) {
+ if (*I == '%')
+ Out << "%%";
+ else
+ Out << *I;
+ }
+
+ Out.flush();
+ unsigned ErrorDiag = Diag.getCustomDiagID(DiagnosticsEngine::Warning, TmpStr);
+
+ DiagnosticBuilder diagBuilder = Diag.Report(
+ exampleReport->getLocation(getSourceManager()).asLocation(), ErrorDiag);
+ for (BugReport::ranges_iterator I = Beg; I != End; ++I)
+ diagBuilder << *I;
+ }
+
+ // Emit a full diagnostic for the path if we have a PathDiagnosticConsumer.
+ if (!PD)
+ return;
+
+ if (D->path.empty()) {
+ PathDiagnosticPiece *piece = new PathDiagnosticEventPiece(
+ exampleReport->getLocation(getSourceManager()),
+ exampleReport->getDescription());
+ for ( ; Beg != End; ++Beg)
+ piece->addRange(*Beg);
+
+ D->getActivePath().push_back(piece);
+ }
+
+ PD->HandlePathDiagnostic(D.take());
+}
+
+void BugReporter::EmitBasicReport(const Decl *DeclWithIssue,
+ StringRef name,
+ StringRef category,
+ StringRef str, PathDiagnosticLocation Loc,
+ SourceRange* RBeg, unsigned NumRanges) {
+
+ // 'BT' is owned by BugReporter.
+ BugType *BT = getBugTypeForName(name, category);
+ BugReport *R = new BugReport(*BT, str, Loc);
+ R->setDeclWithIssue(DeclWithIssue);
+ for ( ; NumRanges > 0 ; --NumRanges, ++RBeg) R->addRange(*RBeg);
+ EmitReport(R);
+}
+
+BugType *BugReporter::getBugTypeForName(StringRef name,
+ StringRef category) {
+ SmallString<136> fullDesc;
+ llvm::raw_svector_ostream(fullDesc) << name << ":" << category;
+ llvm::StringMapEntry<BugType *> &
+ entry = StrBugTypes.GetOrCreateValue(fullDesc);
+ BugType *BT = entry.getValue();
+ if (!BT) {
+ BT = new BugType(name, category);
+ entry.setValue(BT);
+ }
+ return BT;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
new file mode 100644
index 0000000..6532486
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -0,0 +1,784 @@
+// BugReporterVisitors.cpp - Helpers for reporting bugs -----------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of BugReporter "visitors" which can be used to
+// enhance the diagnostics reported for a bug.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitor.h"
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+const Stmt *bugreporter::GetDerefExpr(const ExplodedNode *N) {
+ // Pattern match for a few useful cases (do something smarter later):
+ // a[0], p->f, *p
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+
+ if (const UnaryOperator *U = dyn_cast<UnaryOperator>(S)) {
+ if (U->getOpcode() == UO_Deref)
+ return U->getSubExpr()->IgnoreParenCasts();
+ }
+ else if (const MemberExpr *ME = dyn_cast<MemberExpr>(S)) {
+ return ME->getBase()->IgnoreParenCasts();
+ }
+ else if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(S)) {
+ return AE->getBase();
+ }
+
+ return NULL;
+}
+
+const Stmt *bugreporter::GetDenomExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const BinaryOperator *BE = dyn_cast<BinaryOperator>(S))
+ return BE->getRHS();
+ return NULL;
+}
+
+const Stmt *bugreporter::GetCalleeExpr(const ExplodedNode *N) {
+ // Callee is checked as a PreVisit to the CallExpr.
+ const Stmt *S = N->getLocationAs<PreStmt>()->getStmt();
+ if (const CallExpr *CE = dyn_cast<CallExpr>(S))
+ return CE->getCallee();
+ return NULL;
+}
+
+const Stmt *bugreporter::GetRetValExpr(const ExplodedNode *N) {
+ const Stmt *S = N->getLocationAs<PostStmt>()->getStmt();
+ if (const ReturnStmt *RS = dyn_cast<ReturnStmt>(S))
+ return RS->getRetValue();
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Definitions for bug reporter visitors.
+//===----------------------------------------------------------------------===//
+
+PathDiagnosticPiece*
+BugReporterVisitor::getEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndPathNode,
+ BugReport &BR) {
+ return 0;
+}
+
+PathDiagnosticPiece*
+BugReporterVisitor::getDefaultEndPath(BugReporterContext &BRC,
+ const ExplodedNode *EndPathNode,
+ BugReport &BR) {
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::createEndOfPath(EndPathNode,BRC.getSourceManager());
+
+ BugReport::ranges_iterator Beg, End;
+ llvm::tie(Beg, End) = BR.getRanges();
+
+ // Only add the statement itself as a range if we didn't specify any
+ // special ranges for this report.
+ PathDiagnosticPiece *P = new PathDiagnosticEventPiece(L,
+ BR.getDescription(),
+ Beg == End);
+ for (; Beg != End; ++Beg)
+ P->addRange(*Beg);
+
+ return P;
+}
+
+
+void FindLastStoreBRVisitor ::Profile(llvm::FoldingSetNodeID &ID) const {
+ static int tag = 0;
+ ID.AddPointer(&tag);
+ ID.AddPointer(R);
+ ID.Add(V);
+}
+
+PathDiagnosticPiece *FindLastStoreBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+
+ if (satisfied)
+ return NULL;
+
+ if (!StoreSite) {
+ const ExplodedNode *Node = N, *Last = NULL;
+
+ for ( ; Node ; Node = Node->getFirstPred()) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ if (const PostStmt *P = Node->getLocationAs<PostStmt>())
+ if (const DeclStmt *DS = P->getStmtAs<DeclStmt>())
+ if (DS->getSingleDecl() == VR->getDecl()) {
+ // Record the last seen initialization point.
+ Last = Node;
+ break;
+ }
+ }
+
+ // Does the region still bind to value V? If not, we are done
+ // looking for store sites.
+ if (Node->getState()->getSVal(R) != V)
+ break;
+ }
+
+ if (!Node || !Last) {
+ satisfied = true;
+ return NULL;
+ }
+
+ StoreSite = Last;
+ }
+
+ if (StoreSite != N)
+ return NULL;
+
+ satisfied = true;
+ SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
+
+ if (const PostStmt *PS = N->getLocationAs<PostStmt>()) {
+ if (const DeclStmt *DS = PS->getStmtAs<DeclStmt>()) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "Variable '" << *VR->getDecl() << "' ";
+ }
+ else
+ return NULL;
+
+ if (isa<loc::ConcreteInt>(V)) {
+ bool b = false;
+ if (R->isBoundable()) {
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ if (TR->getValueType()->isObjCObjectPointerType()) {
+ os << "initialized to nil";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << "initialized to a null pointer value";
+ }
+ else if (isa<nonloc::ConcreteInt>(V)) {
+ os << "initialized to " << cast<nonloc::ConcreteInt>(V).getValue();
+ }
+ else if (V.isUndef()) {
+ if (isa<VarRegion>(R)) {
+ const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (VD->getInit())
+ os << "initialized to a garbage value";
+ else
+ os << "declared without an initial value";
+ }
+ }
+ }
+ }
+
+ if (os.str().empty()) {
+ if (isa<loc::ConcreteInt>(V)) {
+ bool b = false;
+ if (R->isBoundable()) {
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ if (TR->getValueType()->isObjCObjectPointerType()) {
+ os << "nil object reference stored to ";
+ b = true;
+ }
+ }
+ }
+
+ if (!b)
+ os << "Null pointer value stored to ";
+ }
+ else if (V.isUndef()) {
+ os << "Uninitialized value stored to ";
+ }
+ else if (isa<nonloc::ConcreteInt>(V)) {
+ os << "The value " << cast<nonloc::ConcreteInt>(V).getValue()
+ << " is assigned to ";
+ }
+ else
+ return NULL;
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << '\'' << *VR->getDecl() << '\'';
+ }
+ else
+ return NULL;
+ }
+
+ // Construct a new PathDiagnosticPiece.
+ ProgramPoint P = N->getLocation();
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(P, BRC.getSourceManager());
+ if (!L.isValid())
+ return NULL;
+ return new PathDiagnosticEventPiece(L, os.str());
+}
+
+void TrackConstraintBRVisitor::Profile(llvm::FoldingSetNodeID &ID) const {
+ static int tag = 0;
+ ID.AddPointer(&tag);
+ ID.AddBoolean(Assumption);
+ ID.Add(Constraint);
+}
+
+PathDiagnosticPiece *
+TrackConstraintBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ if (isSatisfied)
+ return NULL;
+
+ // Check if in the previous state it was feasible for this constraint
+ // to *not* be true.
+ if (PrevN->getState()->assume(Constraint, !Assumption)) {
+
+ isSatisfied = true;
+
+ // As a sanity check, make sure that the negation of the constraint
+ // was infeasible in the current state. If it is feasible, we somehow
+ // missed the transition point.
+ if (N->getState()->assume(Constraint, !Assumption))
+ return NULL;
+
+ // We found the transition point for the constraint. We now need to
+ // pretty-print the constraint. (work-in-progress)
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ if (isa<Loc>(Constraint)) {
+ os << "Assuming pointer value is ";
+ os << (Assumption ? "non-null" : "null");
+ }
+
+ if (os.str().empty())
+ return NULL;
+
+ // Construct a new PathDiagnosticPiece.
+ ProgramPoint P = N->getLocation();
+ PathDiagnosticLocation L =
+ PathDiagnosticLocation::create(P, BRC.getSourceManager());
+ if (!L.isValid())
+ return NULL;
+ return new PathDiagnosticEventPiece(L, os.str());
+ }
+
+ return NULL;
+}
+
+BugReporterVisitor *
+bugreporter::getTrackNullOrUndefValueVisitor(const ExplodedNode *N,
+ const Stmt *S,
+ BugReport *report) {
+ if (!S || !N)
+ return 0;
+
+ ProgramStateManager &StateMgr = N->getState()->getStateManager();
+
+ // Walk through nodes until we get one that matches the statement
+ // exactly.
+ while (N) {
+ const ProgramPoint &pp = N->getLocation();
+ if (const PostStmt *ps = dyn_cast<PostStmt>(&pp)) {
+ if (ps->getStmt() == S)
+ break;
+ }
+ N = N->getFirstPred();
+ }
+
+ if (!N)
+ return 0;
+
+ ProgramStateRef state = N->getState();
+
+ // Walk through lvalue-to-rvalue conversions.
+ const Expr *Ex = dyn_cast<Expr>(S);
+ if (Ex) {
+ Ex = Ex->IgnoreParenLValueCasts();
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const VarRegion *R =
+ StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+
+ // What did we load?
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ report->markInteresting(R);
+ report->markInteresting(V);
+ return new FindLastStoreBRVisitor(V, R);
+ }
+ }
+ }
+
+ SVal V = state->getSValAsScalarOrLoc(S, N->getLocationContext());
+
+ // Uncomment this to find cases where we aren't properly getting the
+ // base value that was dereferenced.
+ // assert(!V.isUnknownOrUndef());
+
+ // Is it a symbolic value?
+ if (loc::MemRegionVal *L = dyn_cast<loc::MemRegionVal>(&V)) {
+ const SubRegion *R = cast<SubRegion>(L->getRegion());
+ while (R && !isa<SymbolicRegion>(R)) {
+ R = dyn_cast<SubRegion>(R->getSuperRegion());
+ }
+
+ if (R) {
+ report->markInteresting(R);
+ return new TrackConstraintBRVisitor(loc::MemRegionVal(R), false);
+ }
+ }
+
+ return 0;
+}
+
+BugReporterVisitor *
+FindLastStoreBRVisitor::createVisitorObject(const ExplodedNode *N,
+ const MemRegion *R) {
+ assert(R && "The memory region is null.");
+
+ ProgramStateRef state = N->getState();
+ SVal V = state->getSVal(R);
+ if (V.isUnknown())
+ return 0;
+
+ return new FindLastStoreBRVisitor(V, R);
+}
+
+
+PathDiagnosticPiece *NilReceiverBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *PrevN,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ const PostStmt *P = N->getLocationAs<PostStmt>();
+ if (!P)
+ return 0;
+ const ObjCMessageExpr *ME = P->getStmtAs<ObjCMessageExpr>();
+ if (!ME)
+ return 0;
+ const Expr *Receiver = ME->getInstanceReceiver();
+ if (!Receiver)
+ return 0;
+ ProgramStateRef state = N->getState();
+ const SVal &V = state->getSVal(Receiver, N->getLocationContext());
+ const DefinedOrUnknownSVal *DV = dyn_cast<DefinedOrUnknownSVal>(&V);
+ if (!DV)
+ return 0;
+ state = state->assume(*DV, true);
+ if (state)
+ return 0;
+
+ // The receiver was nil, and hence the method was skipped.
+ // Register a BugReporterVisitor to issue a message telling us how
+ // the receiver was null.
+ BR.addVisitor(bugreporter::getTrackNullOrUndefValueVisitor(N, Receiver, &BR));
+ // Issue a message saying that the method was skipped.
+ PathDiagnosticLocation L(Receiver, BRC.getSourceManager(),
+ N->getLocationContext());
+ return new PathDiagnosticEventPiece(L, "No method is called "
+ "because the receiver is nil");
+}
+
+// Registers every VarDecl inside a Stmt with a last store visitor.
+void FindLastStoreBRVisitor::registerStatementVarDecls(BugReport &BR,
+ const Stmt *S) {
+ const ExplodedNode *N = BR.getErrorNode();
+ std::deque<const Stmt *> WorkList;
+ WorkList.push_back(S);
+
+ while (!WorkList.empty()) {
+ const Stmt *Head = WorkList.front();
+ WorkList.pop_front();
+
+ ProgramStateRef state = N->getState();
+ ProgramStateManager &StateMgr = state->getStateManager();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Head)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const VarRegion *R =
+ StateMgr.getRegionManager().getVarRegion(VD, N->getLocationContext());
+
+ // What did we load?
+ SVal V = state->getSVal(S, N->getLocationContext());
+
+ if (isa<loc::ConcreteInt>(V) || isa<nonloc::ConcreteInt>(V)) {
+ // Register a new visitor with the BugReport.
+ BR.addVisitor(new FindLastStoreBRVisitor(V, R));
+ }
+ }
+ }
+
+ for (Stmt::const_child_iterator I = Head->child_begin();
+ I != Head->child_end(); ++I)
+ WorkList.push_back(*I);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Visitor that tries to report interesting diagnostics from conditions.
+//===----------------------------------------------------------------------===//
+PathDiagnosticPiece *ConditionBRVisitor::VisitNode(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+ PathDiagnosticPiece *piece = VisitNodeImpl(N, Prev, BRC, BR);
+ if (PathDiagnosticEventPiece *ev =
+ dyn_cast_or_null<PathDiagnosticEventPiece>(piece))
+ ev->setPrunable(true, /* override */ false);
+ return piece;
+}
+
+PathDiagnosticPiece *ConditionBRVisitor::VisitNodeImpl(const ExplodedNode *N,
+ const ExplodedNode *Prev,
+ BugReporterContext &BRC,
+ BugReport &BR) {
+
+ const ProgramPoint &progPoint = N->getLocation();
+
+ ProgramStateRef CurrentState = N->getState();
+ ProgramStateRef PrevState = Prev->getState();
+
+ // Compare the GDMs of the state, because that is where constraints
+ // are managed. Note that ensure that we only look at nodes that
+ // were generated by the analyzer engine proper, not checkers.
+ if (CurrentState->getGDM().getRoot() ==
+ PrevState->getGDM().getRoot())
+ return 0;
+
+ // If an assumption was made on a branch, it should be caught
+ // here by looking at the state transition.
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&progPoint)) {
+ const CFGBlock *srcBlk = BE->getSrc();
+ if (const Stmt *term = srcBlk->getTerminator())
+ return VisitTerminator(term, N, srcBlk, BE->getDst(), BR, BRC);
+ return 0;
+ }
+
+ if (const PostStmt *PS = dyn_cast<PostStmt>(&progPoint)) {
+ // FIXME: Assuming that BugReporter is a GRBugReporter is a layering
+ // violation.
+ const std::pair<const ProgramPointTag *, const ProgramPointTag *> &tags =
+ cast<GRBugReporter>(BRC.getBugReporter()).
+ getEngine().getEagerlyAssumeTags();
+
+ const ProgramPointTag *tag = PS->getTag();
+ if (tag == tags.first)
+ return VisitTrueTest(cast<Expr>(PS->getStmt()), true,
+ BRC, BR, N);
+ if (tag == tags.second)
+ return VisitTrueTest(cast<Expr>(PS->getStmt()), false,
+ BRC, BR, N);
+
+ return 0;
+ }
+
+ return 0;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTerminator(const Stmt *Term,
+ const ExplodedNode *N,
+ const CFGBlock *srcBlk,
+ const CFGBlock *dstBlk,
+ BugReport &R,
+ BugReporterContext &BRC) {
+ const Expr *Cond = 0;
+
+ switch (Term->getStmtClass()) {
+ default:
+ return 0;
+ case Stmt::IfStmtClass:
+ Cond = cast<IfStmt>(Term)->getCond();
+ break;
+ case Stmt::ConditionalOperatorClass:
+ Cond = cast<ConditionalOperator>(Term)->getCond();
+ break;
+ }
+
+ assert(Cond);
+ assert(srcBlk->succ_size() == 2);
+ const bool tookTrue = *(srcBlk->succ_begin()) == dstBlk;
+ return VisitTrueTest(Cond->IgnoreParenNoopCasts(BRC.getASTContext()),
+ tookTrue, BRC, R, N);
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+ bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N) {
+
+ const Expr *Ex = Cond;
+
+ while (true) {
+ Ex = Ex->IgnoreParens();
+ switch (Ex->getStmtClass()) {
+ default:
+ return 0;
+ case Stmt::BinaryOperatorClass:
+ return VisitTrueTest(Cond, cast<BinaryOperator>(Ex), tookTrue, BRC,
+ R, N);
+ case Stmt::DeclRefExprClass:
+ return VisitTrueTest(Cond, cast<DeclRefExpr>(Ex), tookTrue, BRC,
+ R, N);
+ case Stmt::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(Ex);
+ if (UO->getOpcode() == UO_LNot) {
+ tookTrue = !tookTrue;
+ Ex = UO->getSubExpr()->IgnoreParenNoopCasts(BRC.getASTContext());
+ continue;
+ }
+ return 0;
+ }
+ }
+ }
+}
+
+bool ConditionBRVisitor::patternMatch(const Expr *Ex, llvm::raw_ostream &Out,
+ BugReporterContext &BRC,
+ BugReport &report,
+ const ExplodedNode *N,
+ llvm::Optional<bool> &prunable) {
+ const Expr *OriginalExpr = Ex;
+ Ex = Ex->IgnoreParenCasts();
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Ex)) {
+ const bool quotes = isa<VarDecl>(DR->getDecl());
+ if (quotes) {
+ Out << '\'';
+ const LocationContext *LCtx = N->getLocationContext();
+ const ProgramState *state = N->getState().getPtr();
+ if (const MemRegion *R = state->getLValue(cast<VarDecl>(DR->getDecl()),
+ LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ prunable = false;
+ else {
+ const ProgramState *state = N->getState().getPtr();
+ SVal V = state->getSVal(R);
+ if (report.isInteresting(V))
+ prunable = false;
+ }
+ }
+ }
+ Out << DR->getDecl()->getDeclName().getAsString();
+ if (quotes)
+ Out << '\'';
+ return quotes;
+ }
+
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(Ex)) {
+ QualType OriginalTy = OriginalExpr->getType();
+ if (OriginalTy->isPointerType()) {
+ if (IL->getValue() == 0) {
+ Out << "null";
+ return false;
+ }
+ }
+ else if (OriginalTy->isObjCObjectPointerType()) {
+ if (IL->getValue() == 0) {
+ Out << "nil";
+ return false;
+ }
+ }
+
+ Out << IL->getValue();
+ return false;
+ }
+
+ return false;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+ const BinaryOperator *BExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &R,
+ const ExplodedNode *N) {
+
+ bool shouldInvert = false;
+ llvm::Optional<bool> shouldPrune;
+
+ SmallString<128> LhsString, RhsString;
+ {
+ llvm::raw_svector_ostream OutLHS(LhsString), OutRHS(RhsString);
+ const bool isVarLHS = patternMatch(BExpr->getLHS(), OutLHS, BRC, R, N,
+ shouldPrune);
+ const bool isVarRHS = patternMatch(BExpr->getRHS(), OutRHS, BRC, R, N,
+ shouldPrune);
+
+ shouldInvert = !isVarLHS && isVarRHS;
+ }
+
+ BinaryOperator::Opcode Op = BExpr->getOpcode();
+
+ if (BinaryOperator::isAssignmentOp(Op)) {
+ // For assignment operators, all that we care about is that the LHS
+ // evaluates to "true" or "false".
+ return VisitConditionVariable(LhsString, BExpr->getLHS(), tookTrue,
+ BRC, R, N);
+ }
+
+ // For non-assignment operations, we require that we can understand
+ // both the LHS and RHS.
+ if (LhsString.empty() || RhsString.empty())
+ return 0;
+
+ // Should we invert the strings if the LHS is not a variable name?
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ Out << "Assuming " << (shouldInvert ? RhsString : LhsString) << " is ";
+
+ // Do we need to invert the opcode?
+ if (shouldInvert)
+ switch (Op) {
+ default: break;
+ case BO_LT: Op = BO_GT; break;
+ case BO_GT: Op = BO_LT; break;
+ case BO_LE: Op = BO_GE; break;
+ case BO_GE: Op = BO_LE; break;
+ }
+
+ if (!tookTrue)
+ switch (Op) {
+ case BO_EQ: Op = BO_NE; break;
+ case BO_NE: Op = BO_EQ; break;
+ case BO_LT: Op = BO_GE; break;
+ case BO_GT: Op = BO_LE; break;
+ case BO_LE: Op = BO_GT; break;
+ case BO_GE: Op = BO_LT; break;
+ default:
+ return 0;
+ }
+
+ switch (Op) {
+ case BO_EQ:
+ Out << "equal to ";
+ break;
+ case BO_NE:
+ Out << "not equal to ";
+ break;
+ default:
+ Out << BinaryOperator::getOpcodeStr(Op) << ' ';
+ break;
+ }
+
+ Out << (shouldInvert ? LhsString : RhsString);
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+ if (shouldPrune.hasValue())
+ event->setPrunable(shouldPrune.getValue());
+ return event;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitConditionVariable(StringRef LhsString,
+ const Expr *CondVarExpr,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &report,
+ const ExplodedNode *N) {
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ Out << "Assuming " << LhsString << " is ";
+
+ QualType Ty = CondVarExpr->getType();
+
+ if (Ty->isPointerType())
+ Out << (tookTrue ? "not null" : "null");
+ else if (Ty->isObjCObjectPointerType())
+ Out << (tookTrue ? "not nil" : "nil");
+ else if (Ty->isBooleanType())
+ Out << (tookTrue ? "true" : "false");
+ else if (Ty->isIntegerType())
+ Out << (tookTrue ? "non-zero" : "zero");
+ else
+ return 0;
+
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(CondVarExpr, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+
+ if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(CondVarExpr)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ const ProgramState *state = N->getState().getPtr();
+ if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ event->setPrunable(false);
+ }
+ }
+ }
+
+ return event;
+}
+
+PathDiagnosticPiece *
+ConditionBRVisitor::VisitTrueTest(const Expr *Cond,
+ const DeclRefExpr *DR,
+ const bool tookTrue,
+ BugReporterContext &BRC,
+ BugReport &report,
+ const ExplodedNode *N) {
+
+ const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl());
+ if (!VD)
+ return 0;
+
+ SmallString<256> Buf;
+ llvm::raw_svector_ostream Out(Buf);
+
+ Out << "Assuming '";
+ VD->getDeclName().printName(Out);
+ Out << "' is ";
+
+ QualType VDTy = VD->getType();
+
+ if (VDTy->isPointerType())
+ Out << (tookTrue ? "non-null" : "null");
+ else if (VDTy->isObjCObjectPointerType())
+ Out << (tookTrue ? "non-nil" : "nil");
+ else if (VDTy->isScalarType())
+ Out << (tookTrue ? "not equal to 0" : "0");
+ else
+ return 0;
+
+ const LocationContext *LCtx = N->getLocationContext();
+ PathDiagnosticLocation Loc(Cond, BRC.getSourceManager(), LCtx);
+ PathDiagnosticEventPiece *event =
+ new PathDiagnosticEventPiece(Loc, Out.str());
+
+ const ProgramState *state = N->getState().getPtr();
+ if (const MemRegion *R = state->getLValue(VD, LCtx).getAsRegion()) {
+ if (report.isInteresting(R))
+ event->setPrunable(false);
+ else {
+ SVal V = state->getSVal(R);
+ if (report.isInteresting(V))
+ event->setPrunable(false);
+ }
+ }
+ return event;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
new file mode 100644
index 0000000..07e0aac
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Checker.cpp
@@ -0,0 +1,31 @@
+//== Checker.cpp - Registration mechanism for checkers -----------*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines Checker, used to create and register checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+
+using namespace clang;
+using namespace ento;
+
+StringRef CheckerBase::getTagDescription() const {
+ // FIXME: We want to return the package + name of the checker here.
+ return "A Checker";
+}
+
+void Checker<check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck,
+ check::_VoidCheck, check::_VoidCheck, check::_VoidCheck
+ >::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
new file mode 100644
index 0000000..0a047d9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -0,0 +1,83 @@
+//== CheckerContext.cpp - Context info for path-sensitive checkers-----------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines CheckerContext that provides contextual info for
+// path-sensitive checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Lex/Lexer.h"
+
+using namespace clang;
+using namespace ento;
+
+const FunctionDecl *CheckerContext::getCalleeDecl(const CallExpr *CE) const {
+ ProgramStateRef State = getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = State->getSVal(Callee, Pred->getLocationContext());
+ return L.getAsFunctionDecl();
+}
+
+StringRef CheckerContext::getCalleeName(const FunctionDecl *FunDecl) const {
+ if (!FunDecl)
+ return StringRef();
+ IdentifierInfo *funI = FunDecl->getIdentifier();
+ if (!funI)
+ return StringRef();
+ return funI->getName();
+}
+
+
+bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
+ StringRef Name) {
+ return isCLibraryFunction(FD, Name, getASTContext());
+}
+
+bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
+ StringRef Name, ASTContext &Context) {
+ // To avoid false positives (Ex: finding user defined functions with
+ // similar names), only perform fuzzy name matching when it's a builtin.
+ // Using a string compare is slow, we might want to switch on BuiltinID here.
+ unsigned BId = FD->getBuiltinID();
+ if (BId != 0) {
+ StringRef BName = Context.BuiltinInfo.GetName(BId);
+ if (BName.find(Name) != StringRef::npos)
+ return true;
+ }
+
+ const IdentifierInfo *II = FD->getIdentifier();
+ // If this is a special C++ name without IdentifierInfo, it can't be a
+ // C library function.
+ if (!II)
+ return false;
+
+ StringRef FName = II->getName();
+ if (FName.equals(Name))
+ return true;
+
+ if (FName.startswith("__inline") && (FName.find(Name) != StringRef::npos))
+ return true;
+
+ if (FName.startswith("__") && FName.endswith("_chk") &&
+ FName.find(Name) != StringRef::npos)
+ return true;
+
+ return false;
+}
+
+StringRef CheckerContext::getMacroNameOrSpelling(SourceLocation &Loc) {
+ if (Loc.isMacroID())
+ return Lexer::getImmediateMacroName(Loc, getSourceManager(),
+ getLangOpts());
+ SmallVector<char, 16> buf;
+ return Lexer::getSpelling(Loc, buf, getSourceManager(), getLangOpts());
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
new file mode 100644
index 0000000..28df695
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerHelpers.cpp
@@ -0,0 +1,80 @@
+//===---- CheckerHelpers.cpp - Helper functions for checkers ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several static functions for use in checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerHelpers.h"
+#include "clang/AST/Expr.h"
+
+// Recursively find any substatements containing macros
+bool clang::ento::containsMacro(const Stmt *S) {
+ if (S->getLocStart().isMacroID())
+ return true;
+
+ if (S->getLocEnd().isMacroID())
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsMacro(child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing enum constants
+bool clang::ento::containsEnum(const Stmt *S) {
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+ if (DR && isa<EnumConstantDecl>(DR->getDecl()))
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsEnum(child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing static vars
+bool clang::ento::containsStaticLocal(const Stmt *S) {
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+ if (DR)
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl()))
+ if (VD->isStaticLocal())
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsStaticLocal(child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing __builtin_offsetof
+bool clang::ento::containsBuiltinOffsetOf(const Stmt *S) {
+ if (isa<OffsetOfExpr>(S))
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsBuiltinOffsetOf(child))
+ return true;
+
+ return false;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
new file mode 100644
index 0000000..0bcc343
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerManager.cpp
@@ -0,0 +1,678 @@
+//===--- CheckerManager.cpp - Static Analyzer Checker Manager -------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the Static Analyzer Checker Manager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/Analysis/ProgramPoint.h"
+#include "clang/AST/DeclBase.h"
+
+using namespace clang;
+using namespace ento;
+
+bool CheckerManager::hasPathSensitiveCheckers() const {
+ return !StmtCheckers.empty() ||
+ !PreObjCMessageCheckers.empty() ||
+ !PostObjCMessageCheckers.empty() ||
+ !LocationCheckers.empty() ||
+ !BindCheckers.empty() ||
+ !EndAnalysisCheckers.empty() ||
+ !EndPathCheckers.empty() ||
+ !BranchConditionCheckers.empty() ||
+ !LiveSymbolsCheckers.empty() ||
+ !DeadSymbolsCheckers.empty() ||
+ !RegionChangesCheckers.empty() ||
+ !EvalAssumeCheckers.empty() ||
+ !EvalCallCheckers.empty() ||
+ !InlineCallCheckers.empty();
+}
+
+void CheckerManager::finishedCheckerRegistration() {
+#ifndef NDEBUG
+ // Make sure that for every event that has listeners, there is at least
+ // one dispatcher registered for it.
+ for (llvm::DenseMap<EventTag, EventInfo>::iterator
+ I = Events.begin(), E = Events.end(); I != E; ++I)
+ assert(I->second.HasDispatcher && "No dispatcher registered for an event");
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers for AST traversing..
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::runCheckersOnASTDecl(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ assert(D);
+
+ unsigned DeclKind = D->getKind();
+ CachedDeclCheckers *checkers = 0;
+ CachedDeclCheckersMapTy::iterator CCI = CachedDeclCheckersMap.find(DeclKind);
+ if (CCI != CachedDeclCheckersMap.end()) {
+ checkers = &(CCI->second);
+ } else {
+ // Find the checkers that should run for this Decl and cache them.
+ checkers = &CachedDeclCheckersMap[DeclKind];
+ for (unsigned i = 0, e = DeclCheckers.size(); i != e; ++i) {
+ DeclCheckerInfo &info = DeclCheckers[i];
+ if (info.IsForDeclFn(D))
+ checkers->push_back(info.CheckFn);
+ }
+ }
+
+ assert(checkers);
+ for (CachedDeclCheckers::iterator
+ I = checkers->begin(), E = checkers->end(); I != E; ++I)
+ (*I)(D, mgr, BR);
+}
+
+void CheckerManager::runCheckersOnASTBody(const Decl *D, AnalysisManager& mgr,
+ BugReporter &BR) {
+ assert(D && D->hasBody());
+
+ for (unsigned i = 0, e = BodyCheckers.size(); i != e; ++i)
+ BodyCheckers[i](D, mgr, BR);
+}
+
+//===----------------------------------------------------------------------===//
+// Functions for running checkers for path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+template <typename CHECK_CTX>
+static void expandGraphWithCheckers(CHECK_CTX checkCtx,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src) {
+ const NodeBuilderContext &BldrCtx = checkCtx.Eng.getBuilderContext();
+ if (Src.empty())
+ return;
+
+ typename CHECK_CTX::CheckersTy::const_iterator
+ I = checkCtx.checkers_begin(), E = checkCtx.checkers_end();
+ if (I == E) {
+ Dst.insert(Src);
+ return;
+ }
+
+ ExplodedNodeSet Tmp1, Tmp2;
+ const ExplodedNodeSet *PrevSet = &Src;
+
+ for (; I != E; ++I) {
+ ExplodedNodeSet *CurrSet = 0;
+ if (I+1 == E)
+ CurrSet = &Dst;
+ else {
+ CurrSet = (PrevSet == &Tmp1) ? &Tmp2 : &Tmp1;
+ CurrSet->clear();
+ }
+
+ NodeBuilder B(*PrevSet, *CurrSet, BldrCtx);
+ for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
+ NI != NE; ++NI) {
+ checkCtx.runChecker(*I, B, *NI);
+ }
+
+ // If all the produced transitions are sinks, stop.
+ if (CurrSet->empty())
+ return;
+
+ // Update which NodeSet is the current one.
+ PrevSet = CurrSet;
+ }
+}
+
+namespace {
+ struct CheckStmtContext {
+ typedef SmallVectorImpl<CheckerManager::CheckStmtFunc> CheckersTy;
+ bool IsPreVisit;
+ const CheckersTy &Checkers;
+ const Stmt *S;
+ ExprEngine &Eng;
+ bool wasInlined;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckStmtContext(bool isPreVisit, const CheckersTy &checkers,
+ const Stmt *s, ExprEngine &eng, bool wasInlined = false)
+ : IsPreVisit(isPreVisit), Checkers(checkers), S(s), Eng(eng),
+ wasInlined(wasInlined) {}
+
+ void runChecker(CheckerManager::CheckStmtFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ // FIXME: Remove respondsToCallback from CheckerContext;
+ ProgramPoint::Kind K = IsPreVisit ? ProgramPoint::PreStmtKind :
+ ProgramPoint::PostStmtKind;
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+ Pred->getLocationContext(), checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L, wasInlined);
+ checkFn(S, C);
+ }
+ };
+}
+
+/// \brief Run checkers for visiting Stmts.
+void CheckerManager::runCheckersForStmt(bool isPreVisit,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const Stmt *S,
+ ExprEngine &Eng,
+ bool wasInlined) {
+ CheckStmtContext C(isPreVisit, *getCachedStmtCheckersFor(S, isPreVisit),
+ S, Eng, wasInlined);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+ struct CheckObjCMessageContext {
+ typedef std::vector<CheckerManager::CheckObjCMessageFunc> CheckersTy;
+ bool IsPreVisit;
+ const CheckersTy &Checkers;
+ const ObjCMessage &Msg;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckObjCMessageContext(bool isPreVisit, const CheckersTy &checkers,
+ const ObjCMessage &msg, ExprEngine &eng)
+ : IsPreVisit(isPreVisit), Checkers(checkers), Msg(msg), Eng(eng) { }
+
+ void runChecker(CheckerManager::CheckObjCMessageFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ ProgramPoint::Kind K = IsPreVisit ? ProgramPoint::PreStmtKind :
+ ProgramPoint::PostStmtKind;
+ const ProgramPoint &L =
+ ProgramPoint::getProgramPoint(Msg.getMessageExpr(),
+ K, Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+
+ checkFn(Msg, C);
+ }
+ };
+}
+
+/// \brief Run checkers for visiting obj-c messages.
+void CheckerManager::runCheckersForObjCMessage(bool isPreVisit,
+ ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const ObjCMessage &msg,
+ ExprEngine &Eng) {
+ CheckObjCMessageContext C(isPreVisit,
+ isPreVisit ? PreObjCMessageCheckers
+ : PostObjCMessageCheckers,
+ msg, Eng);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+ struct CheckLocationContext {
+ typedef std::vector<CheckerManager::CheckLocationFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ SVal Loc;
+ bool IsLoad;
+ const Stmt *NodeEx; /* Will become a CFGStmt */
+ const Stmt *BoundEx;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckLocationContext(const CheckersTy &checkers,
+ SVal loc, bool isLoad, const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExprEngine &eng)
+ : Checkers(checkers), Loc(loc), IsLoad(isLoad), NodeEx(NodeEx),
+ BoundEx(BoundEx), Eng(eng) {}
+
+ void runChecker(CheckerManager::CheckLocationFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ ProgramPoint::Kind K = IsLoad ? ProgramPoint::PreLoadKind :
+ ProgramPoint::PreStoreKind;
+ const ProgramPoint &L =
+ ProgramPoint::getProgramPoint(NodeEx, K,
+ Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(Loc, IsLoad, BoundEx, C);
+ }
+ };
+}
+
+/// \brief Run checkers for load/store of a location.
+
+void CheckerManager::runCheckersForLocation(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SVal location, bool isLoad,
+ const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExprEngine &Eng) {
+ CheckLocationContext C(LocationCheckers, location, isLoad, NodeEx,
+ BoundEx, Eng);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+namespace {
+ struct CheckBindContext {
+ typedef std::vector<CheckerManager::CheckBindFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ SVal Loc;
+ SVal Val;
+ const Stmt *S;
+ ExprEngine &Eng;
+ ProgramPoint::Kind PointKind;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckBindContext(const CheckersTy &checkers,
+ SVal loc, SVal val, const Stmt *s, ExprEngine &eng,
+ ProgramPoint::Kind PK)
+ : Checkers(checkers), Loc(loc), Val(val), S(s), Eng(eng), PointKind(PK) {}
+
+ void runChecker(CheckerManager::CheckBindFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, PointKind,
+ Pred->getLocationContext(), checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+
+ checkFn(Loc, Val, S, C);
+ }
+ };
+}
+
+/// \brief Run checkers for binding of a value to a location.
+void CheckerManager::runCheckersForBind(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SVal location, SVal val,
+ const Stmt *S, ExprEngine &Eng,
+ ProgramPoint::Kind PointKind) {
+ CheckBindContext C(BindCheckers, location, val, S, Eng, PointKind);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+void CheckerManager::runCheckersForEndAnalysis(ExplodedGraph &G,
+ BugReporter &BR,
+ ExprEngine &Eng) {
+ for (unsigned i = 0, e = EndAnalysisCheckers.size(); i != e; ++i)
+ EndAnalysisCheckers[i](G, BR, Eng);
+}
+
+/// \brief Run checkers for end of path.
+// Note, We do not chain the checker output (like in expandGraphWithCheckers)
+// for this callback since end of path nodes are expected to be final.
+void CheckerManager::runCheckersForEndPath(NodeBuilderContext &BC,
+ ExplodedNodeSet &Dst,
+ ExprEngine &Eng) {
+ ExplodedNode *Pred = BC.Pred;
+
+ // We define the builder outside of the loop bacause if at least one checkers
+ // creates a sucsessor for Pred, we do not need to generate an
+ // autotransition for it.
+ NodeBuilder Bldr(Pred, Dst, BC);
+ for (unsigned i = 0, e = EndPathCheckers.size(); i != e; ++i) {
+ CheckEndPathFunc checkFn = EndPathCheckers[i];
+
+ const ProgramPoint &L = BlockEntrance(BC.Block,
+ Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(C);
+ }
+}
+
+namespace {
+ struct CheckBranchConditionContext {
+ typedef std::vector<CheckerManager::CheckBranchConditionFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ const Stmt *Condition;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckBranchConditionContext(const CheckersTy &checkers,
+ const Stmt *Cond, ExprEngine &eng)
+ : Checkers(checkers), Condition(Cond), Eng(eng) {}
+
+ void runChecker(CheckerManager::CheckBranchConditionFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ ProgramPoint L = PostCondition(Condition, Pred->getLocationContext(),
+ checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+ checkFn(Condition, C);
+ }
+ };
+}
+
+/// \brief Run checkers for branch condition.
+void CheckerManager::runCheckersForBranchCondition(const Stmt *Condition,
+ ExplodedNodeSet &Dst,
+ ExplodedNode *Pred,
+ ExprEngine &Eng) {
+ ExplodedNodeSet Src;
+ Src.insert(Pred);
+ CheckBranchConditionContext C(BranchConditionCheckers, Condition, Eng);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+/// \brief Run checkers for live symbols.
+void CheckerManager::runCheckersForLiveSymbols(ProgramStateRef state,
+ SymbolReaper &SymReaper) {
+ for (unsigned i = 0, e = LiveSymbolsCheckers.size(); i != e; ++i)
+ LiveSymbolsCheckers[i](state, SymReaper);
+}
+
+namespace {
+ struct CheckDeadSymbolsContext {
+ typedef std::vector<CheckerManager::CheckDeadSymbolsFunc> CheckersTy;
+ const CheckersTy &Checkers;
+ SymbolReaper &SR;
+ const Stmt *S;
+ ExprEngine &Eng;
+
+ CheckersTy::const_iterator checkers_begin() { return Checkers.begin(); }
+ CheckersTy::const_iterator checkers_end() { return Checkers.end(); }
+
+ CheckDeadSymbolsContext(const CheckersTy &checkers, SymbolReaper &sr,
+ const Stmt *s, ExprEngine &eng)
+ : Checkers(checkers), SR(sr), S(s), Eng(eng) { }
+
+ void runChecker(CheckerManager::CheckDeadSymbolsFunc checkFn,
+ NodeBuilder &Bldr, ExplodedNode *Pred) {
+ ProgramPoint::Kind K = ProgramPoint::PostPurgeDeadSymbolsKind;
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(S, K,
+ Pred->getLocationContext(), checkFn.Checker);
+ CheckerContext C(Bldr, Eng, Pred, L);
+
+ checkFn(SR, C);
+ }
+ };
+}
+
+/// \brief Run checkers for dead symbols.
+void CheckerManager::runCheckersForDeadSymbols(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ SymbolReaper &SymReaper,
+ const Stmt *S,
+ ExprEngine &Eng) {
+ CheckDeadSymbolsContext C(DeadSymbolsCheckers, SymReaper, S, Eng);
+ expandGraphWithCheckers(C, Dst, Src);
+}
+
+/// \brief True if at least one checker wants to check region changes.
+bool CheckerManager::wantsRegionChangeUpdate(ProgramStateRef state) {
+ for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i)
+ if (RegionChangesCheckers[i].WantUpdateFn(state))
+ return true;
+
+ return false;
+}
+
+/// \brief Run checkers for region changes.
+ProgramStateRef
+CheckerManager::runCheckersForRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) {
+ for (unsigned i = 0, e = RegionChangesCheckers.size(); i != e; ++i) {
+ // If any checker declares the state infeasible (or if it starts that way),
+ // bail out.
+ if (!state)
+ return NULL;
+ state = RegionChangesCheckers[i].CheckFn(state, invalidated,
+ ExplicitRegions, Regions, Call);
+ }
+ return state;
+}
+
+/// \brief Run checkers for handling assumptions on symbolic values.
+ProgramStateRef
+CheckerManager::runCheckersForEvalAssume(ProgramStateRef state,
+ SVal Cond, bool Assumption) {
+ for (unsigned i = 0, e = EvalAssumeCheckers.size(); i != e; ++i) {
+ // If any checker declares the state infeasible (or if it starts that way),
+ // bail out.
+ if (!state)
+ return NULL;
+ state = EvalAssumeCheckers[i](state, Cond, Assumption);
+ }
+ return state;
+}
+
+/// \brief Run checkers for evaluating a call.
+/// Only one checker will evaluate the call.
+void CheckerManager::runCheckersForEvalCall(ExplodedNodeSet &Dst,
+ const ExplodedNodeSet &Src,
+ const CallExpr *CE,
+ ExprEngine &Eng,
+ GraphExpander *defaultEval) {
+ if (EvalCallCheckers.empty() &&
+ InlineCallCheckers.empty() &&
+ defaultEval == 0) {
+ Dst.insert(Src);
+ return;
+ }
+
+ for (ExplodedNodeSet::iterator
+ NI = Src.begin(), NE = Src.end(); NI != NE; ++NI) {
+
+ ExplodedNode *Pred = *NI;
+ bool anyEvaluated = false;
+
+ // First, check if any of the InlineCall callbacks can evaluate the call.
+ assert(InlineCallCheckers.size() <= 1 &&
+ "InlineCall is a special hacky callback to allow intrusive"
+ "evaluation of the call (which simulates inlining). It is "
+ "currently only used by OSAtomicChecker and should go away "
+ "at some point.");
+ for (std::vector<InlineCallFunc>::iterator
+ EI = InlineCallCheckers.begin(), EE = InlineCallCheckers.end();
+ EI != EE; ++EI) {
+ ExplodedNodeSet checkDst;
+ bool evaluated = (*EI)(CE, Eng, Pred, checkDst);
+ assert(!(evaluated && anyEvaluated)
+ && "There are more than one checkers evaluating the call");
+ if (evaluated) {
+ anyEvaluated = true;
+ Dst.insert(checkDst);
+#ifdef NDEBUG
+ break; // on release don't check that no other checker also evals.
+#endif
+ }
+ }
+
+#ifdef NDEBUG // on release don't check that no other checker also evals.
+ if (anyEvaluated) {
+ break;
+ }
+#endif
+
+ ExplodedNodeSet checkDst;
+ NodeBuilder B(Pred, checkDst, Eng.getBuilderContext());
+ // Next, check if any of the EvalCall callbacks can evaluate the call.
+ for (std::vector<EvalCallFunc>::iterator
+ EI = EvalCallCheckers.begin(), EE = EvalCallCheckers.end();
+ EI != EE; ++EI) {
+ ProgramPoint::Kind K = ProgramPoint::PostStmtKind;
+ const ProgramPoint &L = ProgramPoint::getProgramPoint(CE, K,
+ Pred->getLocationContext(), EI->Checker);
+ bool evaluated = false;
+ { // CheckerContext generates transitions(populates checkDest) on
+ // destruction, so introduce the scope to make sure it gets properly
+ // populated.
+ CheckerContext C(B, Eng, Pred, L);
+ evaluated = (*EI)(CE, C);
+ }
+ assert(!(evaluated && anyEvaluated)
+ && "There are more than one checkers evaluating the call");
+ if (evaluated) {
+ anyEvaluated = true;
+ Dst.insert(checkDst);
+#ifdef NDEBUG
+ break; // on release don't check that no other checker also evals.
+#endif
+ }
+ }
+
+ // If none of the checkers evaluated the call, ask ExprEngine to handle it.
+ if (!anyEvaluated) {
+ if (defaultEval)
+ defaultEval->expandGraph(Dst, Pred);
+ else
+ Dst.insert(Pred);
+ }
+ }
+}
+
+/// \brief Run checkers for the entire Translation Unit.
+void CheckerManager::runCheckersOnEndOfTranslationUnit(
+ const TranslationUnitDecl *TU,
+ AnalysisManager &mgr,
+ BugReporter &BR) {
+ for (unsigned i = 0, e = EndOfTranslationUnitCheckers.size(); i != e; ++i)
+ EndOfTranslationUnitCheckers[i](TU, mgr, BR);
+}
+
+void CheckerManager::runCheckersForPrintState(raw_ostream &Out,
+ ProgramStateRef State,
+ const char *NL, const char *Sep) {
+ for (llvm::DenseMap<CheckerTag, CheckerRef>::iterator
+ I = CheckerTags.begin(), E = CheckerTags.end(); I != E; ++I)
+ I->second->printState(Out, State, NL, Sep);
+}
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for AST traversing.
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::_registerForDecl(CheckDeclFunc checkfn,
+ HandlesDeclFunc isForDeclFn) {
+ DeclCheckerInfo info = { checkfn, isForDeclFn };
+ DeclCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForBody(CheckDeclFunc checkfn) {
+ BodyCheckers.push_back(checkfn);
+}
+
+//===----------------------------------------------------------------------===//
+// Internal registration functions for path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+void CheckerManager::_registerForPreStmt(CheckStmtFunc checkfn,
+ HandlesStmtFunc isForStmtFn) {
+ StmtCheckerInfo info = { checkfn, isForStmtFn, /*IsPreVisit*/true };
+ StmtCheckers.push_back(info);
+}
+void CheckerManager::_registerForPostStmt(CheckStmtFunc checkfn,
+ HandlesStmtFunc isForStmtFn) {
+ StmtCheckerInfo info = { checkfn, isForStmtFn, /*IsPreVisit*/false };
+ StmtCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForPreObjCMessage(CheckObjCMessageFunc checkfn) {
+ PreObjCMessageCheckers.push_back(checkfn);
+}
+void CheckerManager::_registerForPostObjCMessage(CheckObjCMessageFunc checkfn) {
+ PostObjCMessageCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForLocation(CheckLocationFunc checkfn) {
+ LocationCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForBind(CheckBindFunc checkfn) {
+ BindCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndAnalysis(CheckEndAnalysisFunc checkfn) {
+ EndAnalysisCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndPath(CheckEndPathFunc checkfn) {
+ EndPathCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForBranchCondition(
+ CheckBranchConditionFunc checkfn) {
+ BranchConditionCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForLiveSymbols(CheckLiveSymbolsFunc checkfn) {
+ LiveSymbolsCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForDeadSymbols(CheckDeadSymbolsFunc checkfn) {
+ DeadSymbolsCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForRegionChanges(CheckRegionChangesFunc checkfn,
+ WantsRegionChangeUpdateFunc wantUpdateFn) {
+ RegionChangesCheckerInfo info = {checkfn, wantUpdateFn};
+ RegionChangesCheckers.push_back(info);
+}
+
+void CheckerManager::_registerForEvalAssume(EvalAssumeFunc checkfn) {
+ EvalAssumeCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEvalCall(EvalCallFunc checkfn) {
+ EvalCallCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForInlineCall(InlineCallFunc checkfn) {
+ InlineCallCheckers.push_back(checkfn);
+}
+
+void CheckerManager::_registerForEndOfTranslationUnit(
+ CheckEndOfTranslationUnit checkfn) {
+ EndOfTranslationUnitCheckers.push_back(checkfn);
+}
+
+//===----------------------------------------------------------------------===//
+// Implementation details.
+//===----------------------------------------------------------------------===//
+
+CheckerManager::CachedStmtCheckers *
+CheckerManager::getCachedStmtCheckersFor(const Stmt *S, bool isPreVisit) {
+ assert(S);
+
+ CachedStmtCheckersKey key(S->getStmtClass(), isPreVisit);
+ CachedStmtCheckers *checkers = 0;
+ CachedStmtCheckersMapTy::iterator CCI = CachedStmtCheckersMap.find(key);
+ if (CCI != CachedStmtCheckersMap.end()) {
+ checkers = &(CCI->second);
+ } else {
+ // Find the checkers that should run for this Stmt and cache them.
+ checkers = &CachedStmtCheckersMap[key];
+ for (unsigned i = 0, e = StmtCheckers.size(); i != e; ++i) {
+ StmtCheckerInfo &info = StmtCheckers[i];
+ if (info.IsPreVisit == isPreVisit && info.IsForStmtFn(S))
+ checkers->push_back(info.CheckFn);
+ }
+ }
+
+ assert(checkers);
+ return checkers;
+}
+
+CheckerManager::~CheckerManager() {
+ for (unsigned i = 0, e = CheckerDtors.size(); i != e; ++i)
+ CheckerDtors[i]();
+}
+
+// Anchor for the vtable.
+GraphExpander::~GraphExpander() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
new file mode 100644
index 0000000..9791e2ec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CheckerRegistry.cpp
@@ -0,0 +1,150 @@
+//===--- CheckerRegistry.cpp - Maintains all available checkers -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
+#include "llvm/ADT/SetVector.h"
+
+using namespace clang;
+using namespace ento;
+
+static const char PackageSeparator = '.';
+typedef llvm::SetVector<const CheckerRegistry::CheckerInfo *> CheckerInfoSet;
+
+
+static bool checkerNameLT(const CheckerRegistry::CheckerInfo &a,
+ const CheckerRegistry::CheckerInfo &b) {
+ return a.FullName < b.FullName;
+}
+
+static bool isInPackage(const CheckerRegistry::CheckerInfo &checker,
+ StringRef packageName) {
+ // Does the checker's full name have the package as a prefix?
+ if (!checker.FullName.startswith(packageName))
+ return false;
+
+ // Is the package actually just the name of a specific checker?
+ if (checker.FullName.size() == packageName.size())
+ return true;
+
+ // Is the checker in the package (or a subpackage)?
+ if (checker.FullName[packageName.size()] == PackageSeparator)
+ return true;
+
+ return false;
+}
+
+static void collectCheckers(const CheckerRegistry::CheckerInfoList &checkers,
+ const llvm::StringMap<size_t> &packageSizes,
+ CheckerOptInfo &opt, CheckerInfoSet &collected) {
+ // Use a binary search to find the possible start of the package.
+ CheckerRegistry::CheckerInfo packageInfo(NULL, opt.getName(), "");
+ CheckerRegistry::CheckerInfoList::const_iterator e = checkers.end();
+ CheckerRegistry::CheckerInfoList::const_iterator i =
+ std::lower_bound(checkers.begin(), e, packageInfo, checkerNameLT);
+
+ // If we didn't even find a possible package, give up.
+ if (i == e)
+ return;
+
+ // If what we found doesn't actually start the package, give up.
+ if (!isInPackage(*i, opt.getName()))
+ return;
+
+ // There is at least one checker in the package; claim the option.
+ opt.claim();
+
+ // See how large the package is.
+ // If the package doesn't exist, assume the option refers to a single checker.
+ size_t size = 1;
+ llvm::StringMap<size_t>::const_iterator packageSize =
+ packageSizes.find(opt.getName());
+ if (packageSize != packageSizes.end())
+ size = packageSize->getValue();
+
+ // Step through all the checkers in the package.
+ for (e = i+size; i != e; ++i) {
+ if (opt.isEnabled())
+ collected.insert(&*i);
+ else
+ collected.remove(&*i);
+ }
+}
+
+void CheckerRegistry::addChecker(InitializationFunction fn, StringRef name,
+ StringRef desc) {
+ Checkers.push_back(CheckerInfo(fn, name, desc));
+
+ // Record the presence of the checker in its packages.
+ StringRef packageName, leafName;
+ llvm::tie(packageName, leafName) = name.rsplit(PackageSeparator);
+ while (!leafName.empty()) {
+ Packages[packageName] += 1;
+ llvm::tie(packageName, leafName) = packageName.rsplit(PackageSeparator);
+ }
+}
+
+void CheckerRegistry::initializeManager(CheckerManager &checkerMgr,
+ SmallVectorImpl<CheckerOptInfo> &opts) const {
+ // Sort checkers for efficient collection.
+ std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+
+ // Collect checkers enabled by the options.
+ CheckerInfoSet enabledCheckers;
+ for (SmallVectorImpl<CheckerOptInfo>::iterator
+ i = opts.begin(), e = opts.end(); i != e; ++i) {
+ collectCheckers(Checkers, Packages, *i, enabledCheckers);
+ }
+
+ // Initialize the CheckerManager with all enabled checkers.
+ for (CheckerInfoSet::iterator
+ i = enabledCheckers.begin(), e = enabledCheckers.end(); i != e; ++i) {
+ (*i)->Initialize(checkerMgr);
+ }
+}
+
+void CheckerRegistry::printHelp(llvm::raw_ostream &out,
+ size_t maxNameChars) const {
+ // FIXME: Alphabetical sort puts 'experimental' in the middle.
+ // Would it be better to name it '~experimental' or something else
+ // that's ASCIIbetically last?
+ std::sort(Checkers.begin(), Checkers.end(), checkerNameLT);
+
+ // FIXME: Print available packages.
+
+ out << "CHECKERS:\n";
+
+ // Find the maximum option length.
+ size_t optionFieldWidth = 0;
+ for (CheckerInfoList::const_iterator i = Checkers.begin(), e = Checkers.end();
+ i != e; ++i) {
+ // Limit the amount of padding we are willing to give up for alignment.
+ // Package.Name Description [Hidden]
+ size_t nameLength = i->FullName.size();
+ if (nameLength <= maxNameChars)
+ optionFieldWidth = std::max(optionFieldWidth, nameLength);
+ }
+
+ const size_t initialPad = 2;
+ for (CheckerInfoList::const_iterator i = Checkers.begin(), e = Checkers.end();
+ i != e; ++i) {
+ out.indent(initialPad) << i->FullName;
+
+ int pad = optionFieldWidth - i->FullName.size();
+
+ // Break on long option names.
+ if (pad < 0) {
+ out << '\n';
+ pad = optionFieldWidth + initialPad;
+ }
+ out.indent(pad + 2) << i->Desc;
+
+ out << '\n';
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
new file mode 100644
index 0000000..eb986af
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -0,0 +1,689 @@
+//==- CoreEngine.cpp - Path-Sensitive Dataflow Engine ------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a generic engine for intraprocedural, path-sensitive,
+// dataflow analysis via graph reachability engine.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "CoreEngine"
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CoreEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/Index/TranslationUnit.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Statistic.h"
+
+using namespace clang;
+using namespace ento;
+
+STATISTIC(NumReachedMaxSteps,
+ "The # of times we reached the max number of steps.");
+STATISTIC(NumPathsExplored,
+ "The # of paths explored by the analyzer.");
+
+//===----------------------------------------------------------------------===//
+// Worklist classes for exploration of reachable states.
+//===----------------------------------------------------------------------===//
+
+WorkList::Visitor::~Visitor() {}
+
+namespace {
+class DFS : public WorkList {
+ SmallVector<WorkListUnit,20> Stack;
+public:
+ virtual bool hasWork() const {
+ return !Stack.empty();
+ }
+
+ virtual void enqueue(const WorkListUnit& U) {
+ Stack.push_back(U);
+ }
+
+ virtual WorkListUnit dequeue() {
+ assert (!Stack.empty());
+ const WorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+
+ virtual bool visitItemsInWorkList(Visitor &V) {
+ for (SmallVectorImpl<WorkListUnit>::iterator
+ I = Stack.begin(), E = Stack.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
+};
+
+class BFS : public WorkList {
+ std::deque<WorkListUnit> Queue;
+public:
+ virtual bool hasWork() const {
+ return !Queue.empty();
+ }
+
+ virtual void enqueue(const WorkListUnit& U) {
+ Queue.push_front(U);
+ }
+
+ virtual WorkListUnit dequeue() {
+ WorkListUnit U = Queue.front();
+ Queue.pop_front();
+ return U;
+ }
+
+ virtual bool visitItemsInWorkList(Visitor &V) {
+ for (std::deque<WorkListUnit>::iterator
+ I = Queue.begin(), E = Queue.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
+};
+
+} // end anonymous namespace
+
+// Place the dstor for WorkList here because it contains virtual member
+// functions, and we the code for the dstor generated in one compilation unit.
+WorkList::~WorkList() {}
+
+WorkList *WorkList::makeDFS() { return new DFS(); }
+WorkList *WorkList::makeBFS() { return new BFS(); }
+
+namespace {
+ class BFSBlockDFSContents : public WorkList {
+ std::deque<WorkListUnit> Queue;
+ SmallVector<WorkListUnit,20> Stack;
+ public:
+ virtual bool hasWork() const {
+ return !Queue.empty() || !Stack.empty();
+ }
+
+ virtual void enqueue(const WorkListUnit& U) {
+ if (isa<BlockEntrance>(U.getNode()->getLocation()))
+ Queue.push_front(U);
+ else
+ Stack.push_back(U);
+ }
+
+ virtual WorkListUnit dequeue() {
+ // Process all basic blocks to completion.
+ if (!Stack.empty()) {
+ const WorkListUnit& U = Stack.back();
+ Stack.pop_back(); // This technically "invalidates" U, but we are fine.
+ return U;
+ }
+
+ assert(!Queue.empty());
+ // Don't use const reference. The subsequent pop_back() might make it
+ // unsafe.
+ WorkListUnit U = Queue.front();
+ Queue.pop_front();
+ return U;
+ }
+ virtual bool visitItemsInWorkList(Visitor &V) {
+ for (SmallVectorImpl<WorkListUnit>::iterator
+ I = Stack.begin(), E = Stack.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ for (std::deque<WorkListUnit>::iterator
+ I = Queue.begin(), E = Queue.end(); I != E; ++I) {
+ if (V.visit(*I))
+ return true;
+ }
+ return false;
+ }
+
+ };
+} // end anonymous namespace
+
+WorkList* WorkList::makeBFSBlockDFSContents() {
+ return new BFSBlockDFSContents();
+}
+
+//===----------------------------------------------------------------------===//
+// Core analysis engine.
+//===----------------------------------------------------------------------===//
+
+/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
+bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
+ ProgramStateRef InitState) {
+
+ if (G->num_roots() == 0) { // Initialize the analysis by constructing
+ // the root if none exists.
+
+ const CFGBlock *Entry = &(L->getCFG()->getEntry());
+
+ assert (Entry->empty() &&
+ "Entry block must be empty.");
+
+ assert (Entry->succ_size() == 1 &&
+ "Entry block must have 1 successor.");
+
+ // Mark the entry block as visited.
+ FunctionSummaries->markVisitedBasicBlock(Entry->getBlockID(),
+ L->getDecl(),
+ L->getCFG()->getNumBlockIDs());
+
+ // Get the solitary successor.
+ const CFGBlock *Succ = *(Entry->succ_begin());
+
+ // Construct an edge representing the
+ // starting location in the function.
+ BlockEdge StartLoc(Entry, Succ, L);
+
+ // Set the current block counter to being empty.
+ WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
+
+ if (!InitState)
+ // Generate the root.
+ generateNode(StartLoc, SubEng.getInitialState(L), 0);
+ else
+ generateNode(StartLoc, InitState, 0);
+ }
+
+ // Check if we have a steps limit
+ bool UnlimitedSteps = Steps == 0;
+
+ while (WList->hasWork()) {
+ if (!UnlimitedSteps) {
+ if (Steps == 0) {
+ NumReachedMaxSteps++;
+ break;
+ }
+ --Steps;
+ }
+
+ const WorkListUnit& WU = WList->dequeue();
+
+ // Set the current block counter.
+ WList->setBlockCounter(WU.getBlockCounter());
+
+ // Retrieve the node.
+ ExplodedNode *Node = WU.getNode();
+
+ dispatchWorkItem(Node, Node->getLocation(), WU);
+ }
+ SubEng.processEndWorklist(hasWorkRemaining());
+ return WList->hasWork();
+}
+
+void CoreEngine::dispatchWorkItem(ExplodedNode* Pred, ProgramPoint Loc,
+ const WorkListUnit& WU) {
+ // Dispatch on the location type.
+ switch (Loc.getKind()) {
+ case ProgramPoint::BlockEdgeKind:
+ HandleBlockEdge(cast<BlockEdge>(Loc), Pred);
+ break;
+
+ case ProgramPoint::BlockEntranceKind:
+ HandleBlockEntrance(cast<BlockEntrance>(Loc), Pred);
+ break;
+
+ case ProgramPoint::BlockExitKind:
+ assert (false && "BlockExit location never occur in forward analysis.");
+ break;
+
+ case ProgramPoint::CallEnterKind: {
+ CallEnter CEnter = cast<CallEnter>(Loc);
+ if (AnalyzedCallees)
+ if (const CallExpr* CE =
+ dyn_cast_or_null<CallExpr>(CEnter.getCallExpr()))
+ if (const Decl *CD = CE->getCalleeDecl())
+ AnalyzedCallees->insert(CD);
+ SubEng.processCallEnter(CEnter, Pred);
+ break;
+ }
+
+ case ProgramPoint::CallExitKind:
+ SubEng.processCallExit(Pred);
+ break;
+
+ case ProgramPoint::EpsilonKind: {
+ assert(Pred->hasSinglePred() &&
+ "Assume epsilon has exactly one predecessor by construction");
+ ExplodedNode *PNode = Pred->getFirstPred();
+ dispatchWorkItem(Pred, PNode->getLocation(), WU);
+ break;
+ }
+ default:
+ assert(isa<PostStmt>(Loc) ||
+ isa<PostInitializer>(Loc));
+ HandlePostStmt(WU.getBlock(), WU.getIndex(), Pred);
+ break;
+ }
+}
+
+bool CoreEngine::ExecuteWorkListWithInitialState(const LocationContext *L,
+ unsigned Steps,
+ ProgramStateRef InitState,
+ ExplodedNodeSet &Dst) {
+ bool DidNotFinish = ExecuteWorkList(L, Steps, InitState);
+ for (ExplodedGraph::eop_iterator I = G->eop_begin(),
+ E = G->eop_end(); I != E; ++I) {
+ Dst.Add(*I);
+ }
+ return DidNotFinish;
+}
+
+void CoreEngine::HandleBlockEdge(const BlockEdge &L, ExplodedNode *Pred) {
+
+ const CFGBlock *Blk = L.getDst();
+ NodeBuilderContext BuilderCtx(*this, Blk, Pred);
+
+ // Mark this block as visited.
+ const LocationContext *LC = Pred->getLocationContext();
+ FunctionSummaries->markVisitedBasicBlock(Blk->getBlockID(),
+ LC->getDecl(),
+ LC->getCFG()->getNumBlockIDs());
+
+ // Check if we are entering the EXIT block.
+ if (Blk == &(L.getLocationContext()->getCFG()->getExit())) {
+
+ assert (L.getLocationContext()->getCFG()->getExit().size() == 0
+ && "EXIT block cannot contain Stmts.");
+
+ // Process the final state transition.
+ SubEng.processEndOfFunction(BuilderCtx);
+
+ // This path is done. Don't enqueue any more nodes.
+ return;
+ }
+
+ // Call into the SubEngine to process entering the CFGBlock.
+ ExplodedNodeSet dstNodes;
+ BlockEntrance BE(Blk, Pred->getLocationContext());
+ NodeBuilderWithSinks nodeBuilder(Pred, dstNodes, BuilderCtx, BE);
+ SubEng.processCFGBlockEntrance(L, nodeBuilder);
+
+ // Auto-generate a node.
+ if (!nodeBuilder.hasGeneratedNodes()) {
+ nodeBuilder.generateNode(Pred->State, Pred);
+ }
+
+ // Enqueue nodes onto the worklist.
+ enqueue(dstNodes);
+}
+
+void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
+ ExplodedNode *Pred) {
+
+ // Increment the block counter.
+ const LocationContext *LC = Pred->getLocationContext();
+ unsigned BlockId = L.getBlock()->getBlockID();
+ BlockCounter Counter = WList->getBlockCounter();
+ Counter = BCounterFactory.IncrementCount(Counter, LC->getCurrentStackFrame(),
+ BlockId);
+ WList->setBlockCounter(Counter);
+
+ // Process the entrance of the block.
+ if (CFGElement E = L.getFirstElement()) {
+ NodeBuilderContext Ctx(*this, L.getBlock(), Pred);
+ SubEng.processCFGElement(E, Pred, 0, &Ctx);
+ }
+ else
+ HandleBlockExit(L.getBlock(), Pred);
+}
+
+void CoreEngine::HandleBlockExit(const CFGBlock * B, ExplodedNode *Pred) {
+
+ if (const Stmt *Term = B->getTerminator()) {
+ switch (Term->getStmtClass()) {
+ default:
+ llvm_unreachable("Analysis for this terminator not implemented.");
+
+ case Stmt::BinaryOperatorClass: // '&&' and '||'
+ HandleBranch(cast<BinaryOperator>(Term)->getLHS(), Term, B, Pred);
+ return;
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ HandleBranch(cast<AbstractConditionalOperator>(Term)->getCond(),
+ Term, B, Pred);
+ return;
+
+ // FIXME: Use constant-folding in CFG construction to simplify this
+ // case.
+
+ case Stmt::ChooseExprClass:
+ HandleBranch(cast<ChooseExpr>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::CXXTryStmtClass: {
+ // Generate a node for each of the successors.
+ // Our logic for EH analysis can certainly be improved.
+ for (CFGBlock::const_succ_iterator it = B->succ_begin(),
+ et = B->succ_end(); it != et; ++it) {
+ if (const CFGBlock *succ = *it) {
+ generateNode(BlockEdge(B, succ, Pred->getLocationContext()),
+ Pred->State, Pred);
+ }
+ }
+ return;
+ }
+
+ case Stmt::DoStmtClass:
+ HandleBranch(cast<DoStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::CXXForRangeStmtClass:
+ HandleBranch(cast<CXXForRangeStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::ForStmtClass:
+ HandleBranch(cast<ForStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::ContinueStmtClass:
+ case Stmt::BreakStmtClass:
+ case Stmt::GotoStmtClass:
+ break;
+
+ case Stmt::IfStmtClass:
+ HandleBranch(cast<IfStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+
+ case Stmt::IndirectGotoStmtClass: {
+ // Only 1 successor: the indirect goto dispatch block.
+ assert (B->succ_size() == 1);
+
+ IndirectGotoNodeBuilder
+ builder(Pred, B, cast<IndirectGotoStmt>(Term)->getTarget(),
+ *(B->succ_begin()), this);
+
+ SubEng.processIndirectGoto(builder);
+ return;
+ }
+
+ case Stmt::ObjCForCollectionStmtClass: {
+ // In the case of ObjCForCollectionStmt, it appears twice in a CFG:
+ //
+ // (1) inside a basic block, which represents the binding of the
+ // 'element' variable to a value.
+ // (2) in a terminator, which represents the branch.
+ //
+ // For (1), subengines will bind a value (i.e., 0 or 1) indicating
+ // whether or not collection contains any more elements. We cannot
+ // just test to see if the element is nil because a container can
+ // contain nil elements.
+ HandleBranch(Term, Term, B, Pred);
+ return;
+ }
+
+ case Stmt::SwitchStmtClass: {
+ SwitchNodeBuilder builder(Pred, B, cast<SwitchStmt>(Term)->getCond(),
+ this);
+
+ SubEng.processSwitch(builder);
+ return;
+ }
+
+ case Stmt::WhileStmtClass:
+ HandleBranch(cast<WhileStmt>(Term)->getCond(), Term, B, Pred);
+ return;
+ }
+ }
+
+ assert (B->succ_size() == 1 &&
+ "Blocks with no terminator should have at most 1 successor.");
+
+ generateNode(BlockEdge(B, *(B->succ_begin()), Pred->getLocationContext()),
+ Pred->State, Pred);
+}
+
+void CoreEngine::HandleBranch(const Stmt *Cond, const Stmt *Term,
+ const CFGBlock * B, ExplodedNode *Pred) {
+ assert(B->succ_size() == 2);
+ NodeBuilderContext Ctx(*this, B, Pred);
+ ExplodedNodeSet Dst;
+ SubEng.processBranch(Cond, Term, Ctx, Pred, Dst,
+ *(B->succ_begin()), *(B->succ_begin()+1));
+ // Enqueue the new frontier onto the worklist.
+ enqueue(Dst);
+}
+
+void CoreEngine::HandlePostStmt(const CFGBlock *B, unsigned StmtIdx,
+ ExplodedNode *Pred) {
+ assert(B);
+ assert(!B->empty());
+
+ if (StmtIdx == B->size())
+ HandleBlockExit(B, Pred);
+ else {
+ NodeBuilderContext Ctx(*this, B, Pred);
+ SubEng.processCFGElement((*B)[StmtIdx], Pred, StmtIdx, &Ctx);
+ }
+}
+
+/// generateNode - Utility method to generate nodes, hook up successors,
+/// and add nodes to the worklist.
+void CoreEngine::generateNode(const ProgramPoint &Loc,
+ ProgramStateRef State,
+ ExplodedNode *Pred) {
+
+ bool IsNew;
+ ExplodedNode *Node = G->getNode(Loc, State, false, &IsNew);
+
+ if (Pred)
+ Node->addPredecessor(Pred, *G); // Link 'Node' with its predecessor.
+ else {
+ assert (IsNew);
+ G->addRoot(Node); // 'Node' has no predecessor. Make it a root.
+ }
+
+ // Only add 'Node' to the worklist if it was freshly generated.
+ if (IsNew) WList->enqueue(Node);
+}
+
+void CoreEngine::enqueueStmtNode(ExplodedNode *N,
+ const CFGBlock *Block, unsigned Idx) {
+ assert(Block);
+ assert (!N->isSink());
+
+ // Check if this node entered a callee.
+ if (isa<CallEnter>(N->getLocation())) {
+ // Still use the index of the CallExpr. It's needed to create the callee
+ // StackFrameContext.
+ WList->enqueue(N, Block, Idx);
+ return;
+ }
+
+ // Do not create extra nodes. Move to the next CFG element.
+ if (isa<PostInitializer>(N->getLocation())) {
+ WList->enqueue(N, Block, Idx+1);
+ return;
+ }
+
+ if (isa<EpsilonPoint>(N->getLocation())) {
+ WList->enqueue(N, Block, Idx);
+ return;
+ }
+
+ const CFGStmt *CS = (*Block)[Idx].getAs<CFGStmt>();
+ const Stmt *St = CS ? CS->getStmt() : 0;
+ PostStmt Loc(St, N->getLocationContext());
+
+ if (Loc == N->getLocation()) {
+ // Note: 'N' should be a fresh node because otherwise it shouldn't be
+ // a member of Deferred.
+ WList->enqueue(N, Block, Idx+1);
+ return;
+ }
+
+ bool IsNew;
+ ExplodedNode *Succ = G->getNode(Loc, N->getState(), false, &IsNew);
+ Succ->addPredecessor(N, *G);
+
+ if (IsNew)
+ WList->enqueue(Succ, Block, Idx+1);
+}
+
+ExplodedNode *CoreEngine::generateCallExitNode(ExplodedNode *N) {
+ // Create a CallExit node and enqueue it.
+ const StackFrameContext *LocCtx
+ = cast<StackFrameContext>(N->getLocationContext());
+ const Stmt *CE = LocCtx->getCallSite();
+
+ // Use the the callee location context.
+ CallExit Loc(CE, LocCtx);
+
+ bool isNew;
+ ExplodedNode *Node = G->getNode(Loc, N->getState(), false, &isNew);
+ Node->addPredecessor(N, *G);
+ return isNew ? Node : 0;
+}
+
+
+void CoreEngine::enqueue(ExplodedNodeSet &Set) {
+ for (ExplodedNodeSet::iterator I = Set.begin(),
+ E = Set.end(); I != E; ++I) {
+ WList->enqueue(*I);
+ }
+}
+
+void CoreEngine::enqueue(ExplodedNodeSet &Set,
+ const CFGBlock *Block, unsigned Idx) {
+ for (ExplodedNodeSet::iterator I = Set.begin(),
+ E = Set.end(); I != E; ++I) {
+ enqueueStmtNode(*I, Block, Idx);
+ }
+}
+
+void CoreEngine::enqueueEndOfFunction(ExplodedNodeSet &Set) {
+ for (ExplodedNodeSet::iterator I = Set.begin(), E = Set.end(); I != E; ++I) {
+ ExplodedNode *N = *I;
+ // If we are in an inlined call, generate CallExit node.
+ if (N->getLocationContext()->getParent()) {
+ N = generateCallExitNode(N);
+ if (N)
+ WList->enqueue(N);
+ } else {
+ G->addEndOfPath(N);
+ NumPathsExplored++;
+ }
+ }
+}
+
+
+void NodeBuilder::anchor() { }
+
+ExplodedNode* NodeBuilder::generateNodeImpl(const ProgramPoint &Loc,
+ ProgramStateRef State,
+ ExplodedNode *FromN,
+ bool MarkAsSink) {
+ HasGeneratedNodes = true;
+ bool IsNew;
+ ExplodedNode *N = C.Eng.G->getNode(Loc, State, MarkAsSink, &IsNew);
+ N->addPredecessor(FromN, *C.Eng.G);
+ Frontier.erase(FromN);
+
+ if (!IsNew)
+ return 0;
+
+ if (!MarkAsSink)
+ Frontier.Add(N);
+
+ return N;
+}
+
+void NodeBuilderWithSinks::anchor() { }
+
+StmtNodeBuilder::~StmtNodeBuilder() {
+ if (EnclosingBldr)
+ for (ExplodedNodeSet::iterator I = Frontier.begin(),
+ E = Frontier.end(); I != E; ++I )
+ EnclosingBldr->addNodes(*I);
+}
+
+void BranchNodeBuilder::anchor() { }
+
+ExplodedNode *BranchNodeBuilder::generateNode(ProgramStateRef State,
+ bool branch,
+ ExplodedNode *NodePred) {
+ // If the branch has been marked infeasible we should not generate a node.
+ if (!isFeasible(branch))
+ return NULL;
+
+ ProgramPoint Loc = BlockEdge(C.Block, branch ? DstT:DstF,
+ NodePred->getLocationContext());
+ ExplodedNode *Succ = generateNodeImpl(Loc, State, NodePred);
+ return Succ;
+}
+
+ExplodedNode*
+IndirectGotoNodeBuilder::generateNode(const iterator &I,
+ ProgramStateRef St,
+ bool IsSink) {
+ bool IsNew;
+ ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+ Pred->getLocationContext()), St,
+ IsSink, &IsNew);
+ Succ->addPredecessor(Pred, *Eng.G);
+
+ if (!IsNew)
+ return 0;
+
+ if (!IsSink)
+ Eng.WList->enqueue(Succ);
+
+ return Succ;
+}
+
+
+ExplodedNode*
+SwitchNodeBuilder::generateCaseStmtNode(const iterator &I,
+ ProgramStateRef St) {
+
+ bool IsNew;
+ ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, I.getBlock(),
+ Pred->getLocationContext()), St,
+ false, &IsNew);
+ Succ->addPredecessor(Pred, *Eng.G);
+ if (!IsNew)
+ return 0;
+
+ Eng.WList->enqueue(Succ);
+ return Succ;
+}
+
+
+ExplodedNode*
+SwitchNodeBuilder::generateDefaultCaseNode(ProgramStateRef St,
+ bool IsSink) {
+ // Get the block for the default case.
+ assert(Src->succ_rbegin() != Src->succ_rend());
+ CFGBlock *DefaultBlock = *Src->succ_rbegin();
+
+ // Sanity check for default blocks that are unreachable and not caught
+ // by earlier stages.
+ if (!DefaultBlock)
+ return NULL;
+
+ bool IsNew;
+ ExplodedNode *Succ = Eng.G->getNode(BlockEdge(Src, DefaultBlock,
+ Pred->getLocationContext()), St,
+ IsSink, &IsNew);
+ Succ->addPredecessor(Pred, *Eng.G);
+
+ if (!IsNew)
+ return 0;
+
+ if (!IsSink)
+ Eng.WList->enqueue(Succ);
+
+ return Succ;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
new file mode 100644
index 0000000..b5ea3db
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -0,0 +1,295 @@
+//== Environment.cpp - Map from Stmt* to Locations/Values -------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the Environment and EnvironmentManager classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+SVal Environment::lookupExpr(const EnvironmentEntry &E) const {
+ const SVal* X = ExprBindings.lookup(E);
+ if (X) {
+ SVal V = *X;
+ return V;
+ }
+ return UnknownVal();
+}
+
+SVal Environment::getSVal(const EnvironmentEntry &Entry,
+ SValBuilder& svalBuilder,
+ bool useOnlyDirectBindings) const {
+
+ if (useOnlyDirectBindings) {
+ // This branch is rarely taken, but can be exercised by
+ // checkers that explicitly bind values to arbitrary
+ // expressions. It is crucial that we do not ignore any
+ // expression here, and do a direct lookup.
+ return lookupExpr(Entry);
+ }
+
+ const Stmt *E = Entry.getStmt();
+ const LocationContext *LCtx = Entry.getLocationContext();
+
+ for (;;) {
+ if (const Expr *Ex = dyn_cast<Expr>(E))
+ E = Ex->IgnoreParens();
+
+ switch (E->getStmtClass()) {
+ case Stmt::AddrLabelExprClass:
+ return svalBuilder.makeLoc(cast<AddrLabelExpr>(E));
+ case Stmt::OpaqueValueExprClass: {
+ const OpaqueValueExpr *ope = cast<OpaqueValueExpr>(E);
+ E = ope->getSourceExpr();
+ continue;
+ }
+ case Stmt::ParenExprClass:
+ case Stmt::GenericSelectionExprClass:
+ llvm_unreachable("ParenExprs and GenericSelectionExprs should "
+ "have been handled by IgnoreParens()");
+ case Stmt::CharacterLiteralClass: {
+ const CharacterLiteral* C = cast<CharacterLiteral>(E);
+ return svalBuilder.makeIntVal(C->getValue(), C->getType());
+ }
+ case Stmt::CXXBoolLiteralExprClass: {
+ const SVal *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx));
+ if (X)
+ return *X;
+ else
+ return svalBuilder.makeBoolVal(cast<CXXBoolLiteralExpr>(E));
+ }
+ case Stmt::IntegerLiteralClass: {
+ // In C++, this expression may have been bound to a temporary object.
+ SVal const *X = ExprBindings.lookup(EnvironmentEntry(E, LCtx));
+ if (X)
+ return *X;
+ else
+ return svalBuilder.makeIntVal(cast<IntegerLiteral>(E));
+ }
+ case Stmt::ObjCBoolLiteralExprClass:
+ return svalBuilder.makeBoolVal(cast<ObjCBoolLiteralExpr>(E));
+
+ // For special C0xx nullptr case, make a null pointer SVal.
+ case Stmt::CXXNullPtrLiteralExprClass:
+ return svalBuilder.makeNull();
+ case Stmt::ExprWithCleanupsClass:
+ E = cast<ExprWithCleanups>(E)->getSubExpr();
+ continue;
+ case Stmt::CXXBindTemporaryExprClass:
+ E = cast<CXXBindTemporaryExpr>(E)->getSubExpr();
+ continue;
+ case Stmt::ObjCPropertyRefExprClass:
+ return loc::ObjCPropRef(cast<ObjCPropertyRefExpr>(E));
+ case Stmt::ObjCStringLiteralClass: {
+ MemRegionManager &MRMgr = svalBuilder.getRegionManager();
+ const ObjCStringLiteral *SL = cast<ObjCStringLiteral>(E);
+ return svalBuilder.makeLoc(MRMgr.getObjCStringRegion(SL));
+ }
+ case Stmt::StringLiteralClass: {
+ MemRegionManager &MRMgr = svalBuilder.getRegionManager();
+ const StringLiteral *SL = cast<StringLiteral>(E);
+ return svalBuilder.makeLoc(MRMgr.getStringRegion(SL));
+ }
+ case Stmt::ReturnStmtClass: {
+ const ReturnStmt *RS = cast<ReturnStmt>(E);
+ if (const Expr *RE = RS->getRetValue()) {
+ E = RE;
+ continue;
+ }
+ return UndefinedVal();
+ }
+
+ // Handle all other Stmt* using a lookup.
+ default:
+ break;
+ };
+ break;
+ }
+ return lookupExpr(EnvironmentEntry(E, LCtx));
+}
+
+Environment EnvironmentManager::bindExpr(Environment Env,
+ const EnvironmentEntry &E,
+ SVal V,
+ bool Invalidate) {
+ if (V.isUnknown()) {
+ if (Invalidate)
+ return Environment(F.remove(Env.ExprBindings, E));
+ else
+ return Env;
+ }
+ return Environment(F.add(Env.ExprBindings, E, V));
+}
+
+static inline EnvironmentEntry MakeLocation(const EnvironmentEntry &E) {
+ const Stmt *S = E.getStmt();
+ S = (const Stmt*) (((uintptr_t) S) | 0x1);
+ return EnvironmentEntry(S, E.getLocationContext());
+}
+
+Environment EnvironmentManager::bindExprAndLocation(Environment Env,
+ const EnvironmentEntry &E,
+ SVal location, SVal V) {
+ return Environment(F.add(F.add(Env.ExprBindings, MakeLocation(E), location),
+ E, V));
+}
+
+namespace {
+class MarkLiveCallback : public SymbolVisitor {
+ SymbolReaper &SymReaper;
+public:
+ MarkLiveCallback(SymbolReaper &symreaper) : SymReaper(symreaper) {}
+ bool VisitSymbol(SymbolRef sym) {
+ SymReaper.markLive(sym);
+ return true;
+ }
+ bool VisitMemRegion(const MemRegion *R) {
+ SymReaper.markLive(R);
+ return true;
+ }
+};
+} // end anonymous namespace
+
+// In addition to mapping from EnvironmentEntry - > SVals in the Environment,
+// we also maintain a mapping from EnvironmentEntry -> SVals (locations)
+// that were used during a load and store.
+static inline bool IsLocation(const EnvironmentEntry &E) {
+ const Stmt *S = E.getStmt();
+ return (bool) (((uintptr_t) S) & 0x1);
+}
+
+// removeDeadBindings:
+// - Remove subexpression bindings.
+// - Remove dead block expression bindings.
+// - Keep live block expression bindings:
+// - Mark their reachable symbols live in SymbolReaper,
+// see ScanReachableSymbols.
+// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
+Environment
+EnvironmentManager::removeDeadBindings(Environment Env,
+ SymbolReaper &SymReaper,
+ ProgramStateRef ST) {
+
+ // We construct a new Environment object entirely, as this is cheaper than
+ // individually removing all the subexpression bindings (which will greatly
+ // outnumber block-level expression bindings).
+ Environment NewEnv = getInitialEnvironment();
+
+ SmallVector<std::pair<EnvironmentEntry, SVal>, 10> deferredLocations;
+
+ MarkLiveCallback CB(SymReaper);
+ ScanReachableSymbols RSScaner(ST, CB);
+
+ llvm::ImmutableMapRef<EnvironmentEntry,SVal>
+ EBMapRef(NewEnv.ExprBindings.getRootWithoutRetain(),
+ F.getTreeFactory());
+
+ // Iterate over the block-expr bindings.
+ for (Environment::iterator I = Env.begin(), E = Env.end();
+ I != E; ++I) {
+
+ const EnvironmentEntry &BlkExpr = I.getKey();
+ // For recorded locations (used when evaluating loads and stores), we
+ // consider them live only when their associated normal expression is
+ // also live.
+ // NOTE: This assumes that loads/stores that evaluated to UnknownVal
+ // still have an entry in the map.
+ if (IsLocation(BlkExpr)) {
+ deferredLocations.push_back(std::make_pair(BlkExpr, I.getData()));
+ continue;
+ }
+ const SVal &X = I.getData();
+
+ if (SymReaper.isLive(BlkExpr.getStmt(), BlkExpr.getLocationContext())) {
+ // Copy the binding to the new map.
+ EBMapRef = EBMapRef.add(BlkExpr, X);
+
+ // If the block expr's value is a memory region, then mark that region.
+ if (isa<loc::MemRegionVal>(X)) {
+ const MemRegion *R = cast<loc::MemRegionVal>(X).getRegion();
+ SymReaper.markLive(R);
+ }
+
+ // Mark all symbols in the block expr's value live.
+ RSScaner.scan(X);
+ continue;
+ }
+
+ // Otherwise the expression is dead with a couple exceptions.
+ // Do not misclean LogicalExpr or ConditionalOperator. It is dead at the
+ // beginning of itself, but we need its UndefinedVal to determine its
+ // SVal.
+ if (X.isUndef() && cast<UndefinedVal>(X).getData())
+ EBMapRef = EBMapRef.add(BlkExpr, X);
+ }
+
+ // Go through he deferred locations and add them to the new environment if
+ // the correspond Stmt* is in the map as well.
+ for (SmallVectorImpl<std::pair<EnvironmentEntry, SVal> >::iterator
+ I = deferredLocations.begin(), E = deferredLocations.end(); I != E; ++I) {
+ const EnvironmentEntry &En = I->first;
+ const Stmt *S = (Stmt*) (((uintptr_t) En.getStmt()) & (uintptr_t) ~0x1);
+ if (EBMapRef.lookup(EnvironmentEntry(S, En.getLocationContext())))
+ EBMapRef = EBMapRef.add(En, I->second);
+ }
+
+ NewEnv.ExprBindings = EBMapRef.asImmutableMap();
+ return NewEnv;
+}
+
+void Environment::print(raw_ostream &Out, const char *NL,
+ const char *Sep) const {
+ printAux(Out, false, NL, Sep);
+ printAux(Out, true, NL, Sep);
+}
+
+void Environment::printAux(raw_ostream &Out, bool printLocations,
+ const char *NL,
+ const char *Sep) const{
+
+ bool isFirst = true;
+
+ for (Environment::iterator I = begin(), E = end(); I != E; ++I) {
+ const EnvironmentEntry &En = I.getKey();
+ if (IsLocation(En)) {
+ if (!printLocations)
+ continue;
+ }
+ else {
+ if (printLocations)
+ continue;
+ }
+
+ if (isFirst) {
+ Out << NL << NL
+ << (printLocations ? "Load/Store locations:" : "Expressions:")
+ << NL;
+ isFirst = false;
+ } else {
+ Out << NL;
+ }
+
+ const Stmt *S = En.getStmt();
+ if (printLocations) {
+ S = (Stmt*) (((uintptr_t) S) & ((uintptr_t) ~0x1));
+ }
+
+ Out << " (" << (void*) En.getLocationContext() << ',' << (void*) S << ") ";
+ LangOptions LO; // FIXME.
+ S->printPretty(Out, 0, PrintingPolicy(LO));
+ Out << " : " << I.getData();
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
new file mode 100644
index 0000000..0dcbe1f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExplodedGraph.cpp
@@ -0,0 +1,405 @@
+//=-- ExplodedGraph.cpp - Local, Path-Sens. "Exploded Graph" -*- C++ -*------=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the template classes ExplodedNode and ExplodedGraph,
+// which represent a path-sensitive, intra-procedural "exploded graph."
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/ParentMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include <vector>
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Node auditing.
+//===----------------------------------------------------------------------===//
+
+// An out of line virtual method to provide a home for the class vtable.
+ExplodedNode::Auditor::~Auditor() {}
+
+#ifndef NDEBUG
+static ExplodedNode::Auditor* NodeAuditor = 0;
+#endif
+
+void ExplodedNode::SetAuditor(ExplodedNode::Auditor* A) {
+#ifndef NDEBUG
+ NodeAuditor = A;
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// Cleanup.
+//===----------------------------------------------------------------------===//
+
+static const unsigned CounterTop = 1000;
+
+ExplodedGraph::ExplodedGraph()
+ : NumNodes(0), reclaimNodes(false), reclaimCounter(CounterTop) {}
+
+ExplodedGraph::~ExplodedGraph() {}
+
+//===----------------------------------------------------------------------===//
+// Node reclamation.
+//===----------------------------------------------------------------------===//
+
+bool ExplodedGraph::shouldCollect(const ExplodedNode *node) {
+ // Reclaimn all nodes that match *all* the following criteria:
+ //
+ // (1) 1 predecessor (that has one successor)
+ // (2) 1 successor (that has one predecessor)
+ // (3) The ProgramPoint is for a PostStmt.
+ // (4) There is no 'tag' for the ProgramPoint.
+ // (5) The 'store' is the same as the predecessor.
+ // (6) The 'GDM' is the same as the predecessor.
+ // (7) The LocationContext is the same as the predecessor.
+ // (8) The PostStmt is for a non-consumed Stmt or Expr.
+
+ // Conditions 1 and 2.
+ if (node->pred_size() != 1 || node->succ_size() != 1)
+ return false;
+
+ const ExplodedNode *pred = *(node->pred_begin());
+ if (pred->succ_size() != 1)
+ return false;
+
+ const ExplodedNode *succ = *(node->succ_begin());
+ if (succ->pred_size() != 1)
+ return false;
+
+ // Condition 3.
+ ProgramPoint progPoint = node->getLocation();
+ if (!isa<PostStmt>(progPoint) ||
+ (isa<CallEnter>(progPoint) || isa<CallExit>(progPoint)))
+ return false;
+
+ // Condition 4.
+ PostStmt ps = cast<PostStmt>(progPoint);
+ if (ps.getTag())
+ return false;
+
+ if (isa<BinaryOperator>(ps.getStmt()))
+ return false;
+
+ // Conditions 5, 6, and 7.
+ ProgramStateRef state = node->getState();
+ ProgramStateRef pred_state = pred->getState();
+ if (state->store != pred_state->store || state->GDM != pred_state->GDM ||
+ progPoint.getLocationContext() != pred->getLocationContext())
+ return false;
+
+ // Condition 8.
+ if (const Expr *Ex = dyn_cast<Expr>(ps.getStmt())) {
+ ParentMap &PM = progPoint.getLocationContext()->getParentMap();
+ if (!PM.isConsumedExpr(Ex))
+ return false;
+ }
+
+ return true;
+}
+
+void ExplodedGraph::collectNode(ExplodedNode *node) {
+ // Removing a node means:
+ // (a) changing the predecessors successor to the successor of this node
+ // (b) changing the successors predecessor to the predecessor of this node
+ // (c) Putting 'node' onto freeNodes.
+ assert(node->pred_size() == 1 || node->succ_size() == 1);
+ ExplodedNode *pred = *(node->pred_begin());
+ ExplodedNode *succ = *(node->succ_begin());
+ pred->replaceSuccessor(succ);
+ succ->replacePredecessor(pred);
+ FreeNodes.push_back(node);
+ Nodes.RemoveNode(node);
+ --NumNodes;
+ node->~ExplodedNode();
+}
+
+void ExplodedGraph::reclaimRecentlyAllocatedNodes() {
+ if (ChangedNodes.empty())
+ return;
+
+ // Only periodically relcaim nodes so that we can build up a set of
+ // nodes that meet the reclamation criteria. Freshly created nodes
+ // by definition have no successor, and thus cannot be reclaimed (see below).
+ assert(reclaimCounter > 0);
+ if (--reclaimCounter != 0)
+ return;
+ reclaimCounter = CounterTop;
+
+ for (NodeVector::iterator it = ChangedNodes.begin(), et = ChangedNodes.end();
+ it != et; ++it) {
+ ExplodedNode *node = *it;
+ if (shouldCollect(node))
+ collectNode(node);
+ }
+ ChangedNodes.clear();
+}
+
+//===----------------------------------------------------------------------===//
+// ExplodedNode.
+//===----------------------------------------------------------------------===//
+
+static inline BumpVector<ExplodedNode*>& getVector(void *P) {
+ return *reinterpret_cast<BumpVector<ExplodedNode*>*>(P);
+}
+
+void ExplodedNode::addPredecessor(ExplodedNode *V, ExplodedGraph &G) {
+ assert (!V->isSink());
+ Preds.addNode(V, G);
+ V->Succs.addNode(this, G);
+#ifndef NDEBUG
+ if (NodeAuditor) NodeAuditor->AddEdge(V, this);
+#endif
+}
+
+void ExplodedNode::NodeGroup::replaceNode(ExplodedNode *node) {
+ assert(getKind() == Size1);
+ P = reinterpret_cast<uintptr_t>(node);
+ assert(getKind() == Size1);
+}
+
+void ExplodedNode::NodeGroup::addNode(ExplodedNode *N, ExplodedGraph &G) {
+ assert((reinterpret_cast<uintptr_t>(N) & Mask) == 0x0);
+ assert(!getFlag());
+
+ if (getKind() == Size1) {
+ if (ExplodedNode *NOld = getNode()) {
+ BumpVectorContext &Ctx = G.getNodeAllocator();
+ BumpVector<ExplodedNode*> *V =
+ G.getAllocator().Allocate<BumpVector<ExplodedNode*> >();
+ new (V) BumpVector<ExplodedNode*>(Ctx, 4);
+
+ assert((reinterpret_cast<uintptr_t>(V) & Mask) == 0x0);
+ V->push_back(NOld, Ctx);
+ V->push_back(N, Ctx);
+ P = reinterpret_cast<uintptr_t>(V) | SizeOther;
+ assert(getPtr() == (void*) V);
+ assert(getKind() == SizeOther);
+ }
+ else {
+ P = reinterpret_cast<uintptr_t>(N);
+ assert(getKind() == Size1);
+ }
+ }
+ else {
+ assert(getKind() == SizeOther);
+ getVector(getPtr()).push_back(N, G.getNodeAllocator());
+ }
+}
+
+unsigned ExplodedNode::NodeGroup::size() const {
+ if (getFlag())
+ return 0;
+
+ if (getKind() == Size1)
+ return getNode() ? 1 : 0;
+ else
+ return getVector(getPtr()).size();
+}
+
+ExplodedNode **ExplodedNode::NodeGroup::begin() const {
+ if (getFlag())
+ return NULL;
+
+ if (getKind() == Size1)
+ return (ExplodedNode**) (getPtr() ? &P : NULL);
+ else
+ return const_cast<ExplodedNode**>(&*(getVector(getPtr()).begin()));
+}
+
+ExplodedNode** ExplodedNode::NodeGroup::end() const {
+ if (getFlag())
+ return NULL;
+
+ if (getKind() == Size1)
+ return (ExplodedNode**) (getPtr() ? &P+1 : NULL);
+ else {
+ // Dereferencing end() is undefined behaviour. The vector is not empty, so
+ // we can dereference the last elem and then add 1 to the result.
+ return const_cast<ExplodedNode**>(getVector(getPtr()).end());
+ }
+}
+
+ExplodedNode *ExplodedGraph::getNode(const ProgramPoint &L,
+ ProgramStateRef State,
+ bool IsSink,
+ bool* IsNew) {
+ // Profile 'State' to determine if we already have an existing node.
+ llvm::FoldingSetNodeID profile;
+ void *InsertPos = 0;
+
+ NodeTy::Profile(profile, L, State, IsSink);
+ NodeTy* V = Nodes.FindNodeOrInsertPos(profile, InsertPos);
+
+ if (!V) {
+ if (!FreeNodes.empty()) {
+ V = FreeNodes.back();
+ FreeNodes.pop_back();
+ }
+ else {
+ // Allocate a new node.
+ V = (NodeTy*) getAllocator().Allocate<NodeTy>();
+ }
+
+ new (V) NodeTy(L, State, IsSink);
+
+ if (reclaimNodes)
+ ChangedNodes.push_back(V);
+
+ // Insert the node into the node set and return it.
+ Nodes.InsertNode(V, InsertPos);
+ ++NumNodes;
+
+ if (IsNew) *IsNew = true;
+ }
+ else
+ if (IsNew) *IsNew = false;
+
+ return V;
+}
+
+std::pair<ExplodedGraph*, InterExplodedGraphMap*>
+ExplodedGraph::Trim(const NodeTy* const* NBeg, const NodeTy* const* NEnd,
+ llvm::DenseMap<const void*, const void*> *InverseMap) const {
+
+ if (NBeg == NEnd)
+ return std::make_pair((ExplodedGraph*) 0,
+ (InterExplodedGraphMap*) 0);
+
+ assert (NBeg < NEnd);
+
+ OwningPtr<InterExplodedGraphMap> M(new InterExplodedGraphMap());
+
+ ExplodedGraph* G = TrimInternal(NBeg, NEnd, M.get(), InverseMap);
+
+ return std::make_pair(static_cast<ExplodedGraph*>(G), M.take());
+}
+
+ExplodedGraph*
+ExplodedGraph::TrimInternal(const ExplodedNode* const* BeginSources,
+ const ExplodedNode* const* EndSources,
+ InterExplodedGraphMap* M,
+ llvm::DenseMap<const void*, const void*> *InverseMap) const {
+
+ typedef llvm::DenseSet<const ExplodedNode*> Pass1Ty;
+ Pass1Ty Pass1;
+
+ typedef llvm::DenseMap<const ExplodedNode*, ExplodedNode*> Pass2Ty;
+ Pass2Ty& Pass2 = M->M;
+
+ SmallVector<const ExplodedNode*, 10> WL1, WL2;
+
+ // ===- Pass 1 (reverse DFS) -===
+ for (const ExplodedNode* const* I = BeginSources; I != EndSources; ++I) {
+ assert(*I);
+ WL1.push_back(*I);
+ }
+
+ // Process the first worklist until it is empty. Because it is a std::list
+ // it acts like a FIFO queue.
+ while (!WL1.empty()) {
+ const ExplodedNode *N = WL1.back();
+ WL1.pop_back();
+
+ // Have we already visited this node? If so, continue to the next one.
+ if (Pass1.count(N))
+ continue;
+
+ // Otherwise, mark this node as visited.
+ Pass1.insert(N);
+
+ // If this is a root enqueue it to the second worklist.
+ if (N->Preds.empty()) {
+ WL2.push_back(N);
+ continue;
+ }
+
+ // Visit our predecessors and enqueue them.
+ for (ExplodedNode** I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I)
+ WL1.push_back(*I);
+ }
+
+ // We didn't hit a root? Return with a null pointer for the new graph.
+ if (WL2.empty())
+ return 0;
+
+ // Create an empty graph.
+ ExplodedGraph* G = MakeEmptyGraph();
+
+ // ===- Pass 2 (forward DFS to construct the new graph) -===
+ while (!WL2.empty()) {
+ const ExplodedNode *N = WL2.back();
+ WL2.pop_back();
+
+ // Skip this node if we have already processed it.
+ if (Pass2.find(N) != Pass2.end())
+ continue;
+
+ // Create the corresponding node in the new graph and record the mapping
+ // from the old node to the new node.
+ ExplodedNode *NewN = G->getNode(N->getLocation(), N->State, N->isSink(), 0);
+ Pass2[N] = NewN;
+
+ // Also record the reverse mapping from the new node to the old node.
+ if (InverseMap) (*InverseMap)[NewN] = N;
+
+ // If this node is a root, designate it as such in the graph.
+ if (N->Preds.empty())
+ G->addRoot(NewN);
+
+ // In the case that some of the intended predecessors of NewN have already
+ // been created, we should hook them up as predecessors.
+
+ // Walk through the predecessors of 'N' and hook up their corresponding
+ // nodes in the new graph (if any) to the freshly created node.
+ for (ExplodedNode **I=N->Preds.begin(), **E=N->Preds.end(); I!=E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
+ if (PI == Pass2.end())
+ continue;
+
+ NewN->addPredecessor(PI->second, *G);
+ }
+
+ // In the case that some of the intended successors of NewN have already
+ // been created, we should hook them up as successors. Otherwise, enqueue
+ // the new nodes from the original graph that should have nodes created
+ // in the new graph.
+ for (ExplodedNode **I=N->Succs.begin(), **E=N->Succs.end(); I!=E; ++I) {
+ Pass2Ty::iterator PI = Pass2.find(*I);
+ if (PI != Pass2.end()) {
+ PI->second->addPredecessor(NewN, *G);
+ continue;
+ }
+
+ // Enqueue nodes to the worklist that were marked during pass 1.
+ if (Pass1.count(*I))
+ WL2.push_back(*I);
+ }
+ }
+
+ return G;
+}
+
+void InterExplodedGraphMap::anchor() { }
+
+ExplodedNode*
+InterExplodedGraphMap::getMappedNode(const ExplodedNode *N) const {
+ llvm::DenseMap<const ExplodedNode*, ExplodedNode*>::const_iterator I =
+ M.find(N);
+
+ return I == M.end() ? 0 : I->second;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
new file mode 100644
index 0000000..d2da9aa
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -0,0 +1,2075 @@
+//=-- ExprEngine.cpp - Path-Sensitive Expression-Level Dataflow ---*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a meta-engine for path-sensitive dataflow analysis that
+// is built on GREngine, but provides the boilerplate to execute transfer
+// functions and build the ExplodedGraph at the expression level.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "ExprEngine"
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtObjC.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/Basic/Builtins.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/Statistic.h"
+
+#ifndef NDEBUG
+#include "llvm/Support/GraphWriter.h"
+#endif
+
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+STATISTIC(NumRemoveDeadBindings,
+ "The # of times RemoveDeadBindings is called");
+STATISTIC(NumRemoveDeadBindingsSkipped,
+ "The # of times RemoveDeadBindings is skipped");
+STATISTIC(NumMaxBlockCountReached,
+ "The # of aborted paths due to reaching the maximum block count in "
+ "a top level function");
+STATISTIC(NumMaxBlockCountReachedInInlined,
+ "The # of aborted paths due to reaching the maximum block count in "
+ "an inlined function");
+STATISTIC(NumTimesRetriedWithoutInlining,
+ "The # of times we re-evaluated a call without inlining");
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static inline Selector GetNullarySelector(const char* name, ASTContext &Ctx) {
+ IdentifierInfo* II = &Ctx.Idents.get(name);
+ return Ctx.Selectors.getSelector(0, &II);
+}
+
+//===----------------------------------------------------------------------===//
+// Engine construction and deletion.
+//===----------------------------------------------------------------------===//
+
+ExprEngine::ExprEngine(AnalysisManager &mgr, bool gcEnabled,
+ SetOfConstDecls *VisitedCallees,
+ FunctionSummariesTy *FS)
+ : AMgr(mgr),
+ AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
+ Engine(*this, VisitedCallees, FS),
+ G(Engine.getGraph()),
+ StateMgr(getContext(), mgr.getStoreManagerCreator(),
+ mgr.getConstraintManagerCreator(), G.getAllocator(),
+ *this),
+ SymMgr(StateMgr.getSymbolManager()),
+ svalBuilder(StateMgr.getSValBuilder()),
+ EntryNode(NULL),
+ currentStmt(NULL), currentStmtIdx(0), currentBuilderContext(0),
+ NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
+ RaiseSel(GetNullarySelector("raise", getContext())),
+ ObjCGCEnabled(gcEnabled), BR(mgr, *this) {
+
+ if (mgr.shouldEagerlyTrimExplodedGraph()) {
+ // Enable eager node reclaimation when constructing the ExplodedGraph.
+ G.enableNodeReclamation();
+ }
+}
+
+ExprEngine::~ExprEngine() {
+ BR.FlushReports();
+ delete [] NSExceptionInstanceRaiseSelectors;
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+ProgramStateRef ExprEngine::getInitialState(const LocationContext *InitLoc) {
+ ProgramStateRef state = StateMgr.getInitialState(InitLoc);
+ const Decl *D = InitLoc->getDecl();
+
+ // Preconditions.
+ // FIXME: It would be nice if we had a more general mechanism to add
+ // such preconditions. Some day.
+ do {
+
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ // Precondition: the first argument of 'main' is an integer guaranteed
+ // to be > 0.
+ const IdentifierInfo *II = FD->getIdentifier();
+ if (!II || !(II->getName() == "main" && FD->getNumParams() > 0))
+ break;
+
+ const ParmVarDecl *PD = FD->getParamDecl(0);
+ QualType T = PD->getType();
+ if (!T->isIntegerType())
+ break;
+
+ const MemRegion *R = state->getRegion(PD, InitLoc);
+ if (!R)
+ break;
+
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ SVal Constraint_untested = evalBinOp(state, BO_GT, V,
+ svalBuilder.makeZeroVal(T),
+ getContext().IntTy);
+
+ DefinedOrUnknownSVal *Constraint =
+ dyn_cast<DefinedOrUnknownSVal>(&Constraint_untested);
+
+ if (!Constraint)
+ break;
+
+ if (ProgramStateRef newState = state->assume(*Constraint, true))
+ state = newState;
+ }
+ break;
+ }
+ while (0);
+
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ // Precondition: 'self' is always non-null upon entry to an Objective-C
+ // method.
+ const ImplicitParamDecl *SelfD = MD->getSelfDecl();
+ const MemRegion *R = state->getRegion(SelfD, InitLoc);
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+
+ if (const Loc *LV = dyn_cast<Loc>(&V)) {
+ // Assume that the pointer value in 'self' is non-null.
+ state = state->assume(*LV, true);
+ assert(state && "'self' cannot be null");
+ }
+ }
+
+ if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
+ if (!MD->isStatic()) {
+ // Precondition: 'this' is always non-null upon entry to the
+ // top-level function. This is our starting assumption for
+ // analyzing an "open" program.
+ const StackFrameContext *SFC = InitLoc->getCurrentStackFrame();
+ if (SFC->getParent() == 0) {
+ loc::MemRegionVal L(getCXXThisRegion(MD, SFC));
+ SVal V = state->getSVal(L);
+ if (const Loc *LV = dyn_cast<Loc>(&V)) {
+ state = state->assume(*LV, true);
+ assert(state && "'this' cannot be null");
+ }
+ }
+ }
+ }
+
+ return state;
+}
+
+//===----------------------------------------------------------------------===//
+// Top-level transfer function logic (Dispatcher).
+//===----------------------------------------------------------------------===//
+
+/// evalAssume - Called by ConstraintManager. Used to call checker-specific
+/// logic for handling assumptions on symbolic values.
+ProgramStateRef ExprEngine::processAssume(ProgramStateRef state,
+ SVal cond, bool assumption) {
+ return getCheckerManager().runCheckersForEvalAssume(state, cond, assumption);
+}
+
+bool ExprEngine::wantsRegionChangeUpdate(ProgramStateRef state) {
+ return getCheckerManager().wantsRegionChangeUpdate(state);
+}
+
+ProgramStateRef
+ExprEngine::processRegionChanges(ProgramStateRef state,
+ const StoreManager::InvalidatedSymbols *invalidated,
+ ArrayRef<const MemRegion *> Explicits,
+ ArrayRef<const MemRegion *> Regions,
+ const CallOrObjCMessage *Call) {
+ return getCheckerManager().runCheckersForRegionChanges(state, invalidated,
+ Explicits, Regions, Call);
+}
+
+void ExprEngine::printState(raw_ostream &Out, ProgramStateRef State,
+ const char *NL, const char *Sep) {
+ getCheckerManager().runCheckersForPrintState(Out, State, NL, Sep);
+}
+
+void ExprEngine::processEndWorklist(bool hasWorkRemaining) {
+ getCheckerManager().runCheckersForEndAnalysis(G, BR, *this);
+}
+
+void ExprEngine::processCFGElement(const CFGElement E, ExplodedNode *Pred,
+ unsigned StmtIdx, NodeBuilderContext *Ctx) {
+ currentStmtIdx = StmtIdx;
+ currentBuilderContext = Ctx;
+
+ switch (E.getKind()) {
+ case CFGElement::Invalid:
+ llvm_unreachable("Unexpected CFGElement kind.");
+ case CFGElement::Statement:
+ ProcessStmt(const_cast<Stmt*>(E.getAs<CFGStmt>()->getStmt()), Pred);
+ return;
+ case CFGElement::Initializer:
+ ProcessInitializer(E.getAs<CFGInitializer>()->getInitializer(), Pred);
+ return;
+ case CFGElement::AutomaticObjectDtor:
+ case CFGElement::BaseDtor:
+ case CFGElement::MemberDtor:
+ case CFGElement::TemporaryDtor:
+ ProcessImplicitDtor(*E.getAs<CFGImplicitDtor>(), Pred);
+ return;
+ }
+}
+
+static bool shouldRemoveDeadBindings(AnalysisManager &AMgr,
+ const CFGStmt S,
+ const ExplodedNode *Pred,
+ const LocationContext *LC) {
+
+ // Are we never purging state values?
+ if (AMgr.getPurgeMode() == PurgeNone)
+ return false;
+
+ // Is this the beginning of a basic block?
+ if (isa<BlockEntrance>(Pred->getLocation()))
+ return true;
+
+ // Is this on a non-expression?
+ if (!isa<Expr>(S.getStmt()))
+ return true;
+
+ // Run before processing a call.
+ if (isa<CallExpr>(S.getStmt()))
+ return true;
+
+ // Is this an expression that is consumed by another expression? If so,
+ // postpone cleaning out the state.
+ ParentMap &PM = LC->getAnalysisDeclContext()->getParentMap();
+ return !PM.isConsumedExpr(cast<Expr>(S.getStmt()));
+}
+
+void ExprEngine::ProcessStmt(const CFGStmt S,
+ ExplodedNode *Pred) {
+ // Reclaim any unnecessary nodes in the ExplodedGraph.
+ G.reclaimRecentlyAllocatedNodes();
+
+ currentStmt = S.getStmt();
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ currentStmt->getLocStart(),
+ "Error evaluating statement");
+
+ EntryNode = Pred;
+
+ ProgramStateRef EntryState = EntryNode->getState();
+ CleanedState = EntryState;
+
+ // Create the cleaned state.
+ const LocationContext *LC = EntryNode->getLocationContext();
+ SymbolReaper SymReaper(LC, currentStmt, SymMgr, getStoreManager());
+
+ if (shouldRemoveDeadBindings(AMgr, S, Pred, LC)) {
+ NumRemoveDeadBindings++;
+ getCheckerManager().runCheckersForLiveSymbols(CleanedState, SymReaper);
+
+ const StackFrameContext *SFC = LC->getCurrentStackFrame();
+
+ // Create a state in which dead bindings are removed from the environment
+ // and the store. TODO: The function should just return new env and store,
+ // not a new state.
+ CleanedState = StateMgr.removeDeadBindings(CleanedState, SFC, SymReaper);
+ } else {
+ NumRemoveDeadBindingsSkipped++;
+ }
+
+ // Process any special transfer function for dead symbols.
+ ExplodedNodeSet Tmp;
+ // A tag to track convenience transitions, which can be removed at cleanup.
+ static SimpleProgramPointTag cleanupTag("ExprEngine : Clean Node");
+
+ if (!SymReaper.hasDeadSymbols()) {
+ // Generate a CleanedNode that has the environment and store cleaned
+ // up. Since no symbols are dead, we can optimize and not clean out
+ // the constraint manager.
+ StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
+ Bldr.generateNode(currentStmt, EntryNode, CleanedState, false, &cleanupTag);
+
+ } else {
+ // Call checkers with the non-cleaned state so that they could query the
+ // values of the soon to be dead symbols.
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForDeadSymbols(CheckedSet, EntryNode,
+ SymReaper, currentStmt, *this);
+
+ // For each node in CheckedSet, generate CleanedNodes that have the
+ // environment, the store, and the constraints cleaned up but have the
+ // user-supplied states as the predecessors.
+ StmtNodeBuilder Bldr(CheckedSet, Tmp, *currentBuilderContext);
+ for (ExplodedNodeSet::const_iterator
+ I = CheckedSet.begin(), E = CheckedSet.end(); I != E; ++I) {
+ ProgramStateRef CheckerState = (*I)->getState();
+
+ // The constraint manager has not been cleaned up yet, so clean up now.
+ CheckerState = getConstraintManager().removeDeadBindings(CheckerState,
+ SymReaper);
+
+ assert(StateMgr.haveEqualEnvironments(CheckerState, EntryState) &&
+ "Checkers are not allowed to modify the Environment as a part of "
+ "checkDeadSymbols processing.");
+ assert(StateMgr.haveEqualStores(CheckerState, EntryState) &&
+ "Checkers are not allowed to modify the Store as a part of "
+ "checkDeadSymbols processing.");
+
+ // Create a state based on CleanedState with CheckerState GDM and
+ // generate a transition to that state.
+ ProgramStateRef CleanedCheckerSt =
+ StateMgr.getPersistentStateWithGDM(CleanedState, CheckerState);
+ Bldr.generateNode(currentStmt, *I, CleanedCheckerSt, false, &cleanupTag,
+ ProgramPoint::PostPurgeDeadSymbolsKind);
+ }
+ }
+
+ ExplodedNodeSet Dst;
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ ExplodedNodeSet DstI;
+ // Visit the statement.
+ Visit(currentStmt, *I, DstI);
+ Dst.insert(DstI);
+ }
+
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
+
+ // NULL out these variables to cleanup.
+ CleanedState = NULL;
+ EntryNode = NULL;
+ currentStmt = 0;
+}
+
+void ExprEngine::ProcessInitializer(const CFGInitializer Init,
+ ExplodedNode *Pred) {
+ ExplodedNodeSet Dst;
+
+ // We don't set EntryNode and currentStmt. And we don't clean up state.
+ const CXXCtorInitializer *BMI = Init.getInitializer();
+ const StackFrameContext *stackFrame =
+ cast<StackFrameContext>(Pred->getLocationContext());
+ const CXXConstructorDecl *decl =
+ cast<CXXConstructorDecl>(stackFrame->getDecl());
+ const CXXThisRegion *thisReg = getCXXThisRegion(decl, stackFrame);
+
+ SVal thisVal = Pred->getState()->getSVal(thisReg);
+
+ if (BMI->isAnyMemberInitializer()) {
+ // Evaluate the initializer.
+
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ ProgramStateRef state = Pred->getState();
+
+ const FieldDecl *FD = BMI->getAnyMember();
+
+ SVal FieldLoc = state->getLValue(FD, thisVal);
+ SVal InitVal = state->getSVal(BMI->getInit(), Pred->getLocationContext());
+ state = state->bindLoc(FieldLoc, InitVal);
+
+ // Use a custom node building process.
+ PostInitializer PP(BMI, stackFrame);
+ // Builder automatically add the generated node to the deferred set,
+ // which are processed in the builder's dtor.
+ Bldr.generateNode(PP, Pred, state);
+ } else {
+ assert(BMI->isBaseInitializer());
+
+ // Get the base class declaration.
+ const CXXConstructExpr *ctorExpr = cast<CXXConstructExpr>(BMI->getInit());
+
+ // Create the base object region.
+ SVal baseVal =
+ getStoreManager().evalDerivedToBase(thisVal, ctorExpr->getType());
+ const MemRegion *baseReg = baseVal.getAsRegion();
+ assert(baseReg);
+
+ VisitCXXConstructExpr(ctorExpr, baseReg, Pred, Dst);
+ }
+
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
+}
+
+void ExprEngine::ProcessImplicitDtor(const CFGImplicitDtor D,
+ ExplodedNode *Pred) {
+ ExplodedNodeSet Dst;
+ switch (D.getKind()) {
+ case CFGElement::AutomaticObjectDtor:
+ ProcessAutomaticObjDtor(cast<CFGAutomaticObjDtor>(D), Pred, Dst);
+ break;
+ case CFGElement::BaseDtor:
+ ProcessBaseDtor(cast<CFGBaseDtor>(D), Pred, Dst);
+ break;
+ case CFGElement::MemberDtor:
+ ProcessMemberDtor(cast<CFGMemberDtor>(D), Pred, Dst);
+ break;
+ case CFGElement::TemporaryDtor:
+ ProcessTemporaryDtor(cast<CFGTemporaryDtor>(D), Pred, Dst);
+ break;
+ default:
+ llvm_unreachable("Unexpected dtor kind.");
+ }
+
+ // Enqueue the new nodes onto the work list.
+ Engine.enqueue(Dst, currentBuilderContext->getBlock(), currentStmtIdx);
+}
+
+void ExprEngine::ProcessAutomaticObjDtor(const CFGAutomaticObjDtor Dtor,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ProgramStateRef state = Pred->getState();
+ const VarDecl *varDecl = Dtor.getVarDecl();
+
+ QualType varType = varDecl->getType();
+
+ if (const ReferenceType *refType = varType->getAs<ReferenceType>())
+ varType = refType->getPointeeType();
+
+ const CXXRecordDecl *recordDecl = varType->getAsCXXRecordDecl();
+ assert(recordDecl && "get CXXRecordDecl fail");
+ const CXXDestructorDecl *dtorDecl = recordDecl->getDestructor();
+
+ Loc dest = state->getLValue(varDecl, Pred->getLocationContext());
+
+ VisitCXXDestructor(dtorDecl, cast<loc::MemRegionVal>(dest).getRegion(),
+ Dtor.getTriggerStmt(), Pred, Dst);
+}
+
+void ExprEngine::ProcessBaseDtor(const CFGBaseDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {}
+
+void ExprEngine::ProcessMemberDtor(const CFGMemberDtor D,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {}
+
+void ExprEngine::ProcessTemporaryDtor(const CFGTemporaryDtor D,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {}
+
+void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
+ ExplodedNodeSet &DstTop) {
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ S->getLocStart(),
+ "Error evaluating statement");
+ ExplodedNodeSet Dst;
+ StmtNodeBuilder Bldr(Pred, DstTop, *currentBuilderContext);
+
+ // Expressions to ignore.
+ if (const Expr *Ex = dyn_cast<Expr>(S))
+ S = Ex->IgnoreParens();
+
+ // FIXME: add metadata to the CFG so that we can disable
+ // this check when we KNOW that there is no block-level subexpression.
+ // The motivation is that this check requires a hashtable lookup.
+
+ if (S != currentStmt && Pred->getLocationContext()->getCFG()->isBlkExpr(S))
+ return;
+
+ switch (S->getStmtClass()) {
+ // C++ and ARC stuff we don't support yet.
+ case Expr::ObjCIndirectCopyRestoreExprClass:
+ case Stmt::CXXDependentScopeMemberExprClass:
+ case Stmt::CXXPseudoDestructorExprClass:
+ case Stmt::CXXTryStmtClass:
+ case Stmt::CXXTypeidExprClass:
+ case Stmt::CXXUuidofExprClass:
+ case Stmt::CXXUnresolvedConstructExprClass:
+ case Stmt::CXXScalarValueInitExprClass:
+ case Stmt::DependentScopeDeclRefExprClass:
+ case Stmt::UnaryTypeTraitExprClass:
+ case Stmt::BinaryTypeTraitExprClass:
+ case Stmt::TypeTraitExprClass:
+ case Stmt::ArrayTypeTraitExprClass:
+ case Stmt::ExpressionTraitExprClass:
+ case Stmt::UnresolvedLookupExprClass:
+ case Stmt::UnresolvedMemberExprClass:
+ case Stmt::CXXNoexceptExprClass:
+ case Stmt::PackExpansionExprClass:
+ case Stmt::SubstNonTypeTemplateParmPackExprClass:
+ case Stmt::SEHTryStmtClass:
+ case Stmt::SEHExceptStmtClass:
+ case Stmt::LambdaExprClass:
+ case Stmt::SEHFinallyStmtClass: {
+ const ExplodedNode *node = Bldr.generateNode(S, Pred, Pred->getState(),
+ /* sink */ true);
+ Engine.addAbortedBlock(node, currentBuilderContext->getBlock());
+ break;
+ }
+
+ // We don't handle default arguments either yet, but we can fake it
+ // for now by just skipping them.
+ case Stmt::SubstNonTypeTemplateParmExprClass:
+ case Stmt::CXXDefaultArgExprClass:
+ break;
+
+ case Stmt::ParenExprClass:
+ llvm_unreachable("ParenExprs already handled.");
+ case Stmt::GenericSelectionExprClass:
+ llvm_unreachable("GenericSelectionExprs already handled.");
+ // Cases that should never be evaluated simply because they shouldn't
+ // appear in the CFG.
+ case Stmt::BreakStmtClass:
+ case Stmt::CaseStmtClass:
+ case Stmt::CompoundStmtClass:
+ case Stmt::ContinueStmtClass:
+ case Stmt::CXXForRangeStmtClass:
+ case Stmt::DefaultStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::ForStmtClass:
+ case Stmt::GotoStmtClass:
+ case Stmt::IfStmtClass:
+ case Stmt::IndirectGotoStmtClass:
+ case Stmt::LabelStmtClass:
+ case Stmt::NoStmtClass:
+ case Stmt::NullStmtClass:
+ case Stmt::SwitchStmtClass:
+ case Stmt::WhileStmtClass:
+ case Expr::MSDependentExistsStmtClass:
+ llvm_unreachable("Stmt should not be in analyzer evaluation loop");
+
+ case Stmt::GNUNullExprClass: {
+ // GNU __null is a pointer-width integer, not an actual pointer.
+ ProgramStateRef state = Pred->getState();
+ state = state->BindExpr(S, Pred->getLocationContext(),
+ svalBuilder.makeIntValWithPtrWidth(0, false));
+ Bldr.generateNode(S, Pred, state);
+ break;
+ }
+
+ case Stmt::ObjCAtSynchronizedStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ // FIXME.
+ case Stmt::ObjCSubscriptRefExprClass:
+ break;
+
+ case Stmt::ObjCPropertyRefExprClass:
+ // Implicitly handled by Environment::getSVal().
+ break;
+
+ case Stmt::ImplicitValueInitExprClass: {
+ ProgramStateRef state = Pred->getState();
+ QualType ty = cast<ImplicitValueInitExpr>(S)->getType();
+ SVal val = svalBuilder.makeZeroVal(ty);
+ Bldr.generateNode(S, Pred, state->BindExpr(S, Pred->getLocationContext(),
+ val));
+ break;
+ }
+
+ case Stmt::ExprWithCleanupsClass:
+ // Handled due to fully linearised CFG.
+ break;
+
+ // Cases not handled yet; but will handle some day.
+ case Stmt::DesignatedInitExprClass:
+ case Stmt::ExtVectorElementExprClass:
+ case Stmt::ImaginaryLiteralClass:
+ case Stmt::ObjCAtCatchStmtClass:
+ case Stmt::ObjCAtFinallyStmtClass:
+ case Stmt::ObjCAtTryStmtClass:
+ case Stmt::ObjCAutoreleasePoolStmtClass:
+ case Stmt::ObjCEncodeExprClass:
+ case Stmt::ObjCIsaExprClass:
+ case Stmt::ObjCProtocolExprClass:
+ case Stmt::ObjCSelectorExprClass:
+ case Expr::ObjCNumericLiteralClass:
+ case Stmt::ParenListExprClass:
+ case Stmt::PredefinedExprClass:
+ case Stmt::ShuffleVectorExprClass:
+ case Stmt::VAArgExprClass:
+ case Stmt::CUDAKernelCallExprClass:
+ case Stmt::OpaqueValueExprClass:
+ case Stmt::AsTypeExprClass:
+ case Stmt::AtomicExprClass:
+ // Fall through.
+
+ // Currently all handling of 'throw' just falls to the CFG. We
+ // can consider doing more if necessary.
+ case Stmt::CXXThrowExprClass:
+ // Fall through.
+
+ // Cases we intentionally don't evaluate, since they don't need
+ // to be explicitly evaluated.
+ case Stmt::AddrLabelExprClass:
+ case Stmt::IntegerLiteralClass:
+ case Stmt::CharacterLiteralClass:
+ case Stmt::CXXBoolLiteralExprClass:
+ case Stmt::ObjCBoolLiteralExprClass:
+ case Stmt::FloatingLiteralClass:
+ case Stmt::SizeOfPackExprClass:
+ case Stmt::StringLiteralClass:
+ case Stmt::ObjCStringLiteralClass:
+ case Stmt::CXXBindTemporaryExprClass:
+ case Stmt::CXXNullPtrLiteralExprClass: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet preVisit;
+ getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
+ getCheckerManager().runCheckersForPostStmt(Dst, preVisit, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Expr::ObjCArrayLiteralClass:
+ case Expr::ObjCDictionaryLiteralClass: {
+ Bldr.takeNodes(Pred);
+
+ ExplodedNodeSet preVisit;
+ getCheckerManager().runCheckersForPreStmt(preVisit, Pred, S, *this);
+
+ // FIXME: explicitly model with a region and the actual contents
+ // of the container. For now, conjure a symbol.
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr2(preVisit, Tmp, *currentBuilderContext);
+
+ for (ExplodedNodeSet::iterator it = preVisit.begin(), et = preVisit.end();
+ it != et; ++it) {
+ ExplodedNode *N = *it;
+ const Expr *Ex = cast<Expr>(S);
+ QualType resultType = Ex->getType();
+ const LocationContext *LCtx = N->getLocationContext();
+ SVal result =
+ svalBuilder.getConjuredSymbolVal(0, Ex, LCtx, resultType,
+ currentBuilderContext->getCurrentBlockCount());
+ ProgramStateRef state = N->getState()->BindExpr(Ex, LCtx, result);
+ Bldr2.generateNode(S, N, state);
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, S, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::ArraySubscriptExprClass:
+ Bldr.takeNodes(Pred);
+ VisitLvalArraySubscriptExpr(cast<ArraySubscriptExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::AsmStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitAsmStmt(cast<AsmStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::BlockExprClass:
+ Bldr.takeNodes(Pred);
+ VisitBlockExpr(cast<BlockExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::BinaryOperatorClass: {
+ const BinaryOperator* B = cast<BinaryOperator>(S);
+ if (B->isLogicalOp()) {
+ Bldr.takeNodes(Pred);
+ VisitLogicalExpr(B, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+ else if (B->getOpcode() == BO_Comma) {
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(B, Pred,
+ state->BindExpr(B, Pred->getLocationContext(),
+ state->getSVal(B->getRHS(),
+ Pred->getLocationContext())));
+ break;
+ }
+
+ Bldr.takeNodes(Pred);
+
+ if (AMgr.shouldEagerlyAssume() &&
+ (B->isRelationalOp() || B->isEqualityOp())) {
+ ExplodedNodeSet Tmp;
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Tmp);
+ evalEagerlyAssume(Dst, Tmp, cast<Expr>(S));
+ }
+ else
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CallExprClass:
+ case Stmt::CXXOperatorCallExprClass:
+ case Stmt::CXXMemberCallExprClass:
+ case Stmt::UserDefinedLiteralClass: {
+ Bldr.takeNodes(Pred);
+ VisitCallExpr(cast<CallExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXCatchStmtClass: {
+ Bldr.takeNodes(Pred);
+ VisitCXXCatchStmt(cast<CXXCatchStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXTemporaryObjectExprClass:
+ case Stmt::CXXConstructExprClass: {
+ const CXXConstructExpr *C = cast<CXXConstructExpr>(S);
+ // For block-level CXXConstructExpr, we don't have a destination region.
+ // Let VisitCXXConstructExpr() create one.
+ Bldr.takeNodes(Pred);
+ VisitCXXConstructExpr(C, 0, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXNewExprClass: {
+ Bldr.takeNodes(Pred);
+ const CXXNewExpr *NE = cast<CXXNewExpr>(S);
+ VisitCXXNewExpr(NE, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXDeleteExprClass: {
+ Bldr.takeNodes(Pred);
+ const CXXDeleteExpr *CDE = cast<CXXDeleteExpr>(S);
+ VisitCXXDeleteExpr(CDE, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+ // FIXME: ChooseExpr is really a constant. We need to fix
+ // the CFG do not model them as explicit control-flow.
+
+ case Stmt::ChooseExprClass: { // __builtin_choose_expr
+ Bldr.takeNodes(Pred);
+ const ChooseExpr *C = cast<ChooseExpr>(S);
+ VisitGuardedExpr(C, C->getLHS(), C->getRHS(), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CompoundAssignOperatorClass:
+ Bldr.takeNodes(Pred);
+ VisitBinaryOperator(cast<BinaryOperator>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::CompoundLiteralExprClass:
+ Bldr.takeNodes(Pred);
+ VisitCompoundLiteralExpr(cast<CompoundLiteralExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: { // '?' operator
+ Bldr.takeNodes(Pred);
+ const AbstractConditionalOperator *C
+ = cast<AbstractConditionalOperator>(S);
+ VisitGuardedExpr(C, C->getTrueExpr(), C->getFalseExpr(), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::CXXThisExprClass:
+ Bldr.takeNodes(Pred);
+ VisitCXXThisExpr(cast<CXXThisExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::DeclRefExprClass: {
+ Bldr.takeNodes(Pred);
+ const DeclRefExpr *DE = cast<DeclRefExpr>(S);
+ VisitCommonDeclRefExpr(DE, DE->getDecl(), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::DeclStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitDeclStmt(cast<DeclStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ImplicitCastExprClass:
+ case Stmt::CStyleCastExprClass:
+ case Stmt::CXXStaticCastExprClass:
+ case Stmt::CXXDynamicCastExprClass:
+ case Stmt::CXXReinterpretCastExprClass:
+ case Stmt::CXXConstCastExprClass:
+ case Stmt::CXXFunctionalCastExprClass:
+ case Stmt::ObjCBridgedCastExprClass: {
+ Bldr.takeNodes(Pred);
+ const CastExpr *C = cast<CastExpr>(S);
+ // Handle the previsit checks.
+ ExplodedNodeSet dstPrevisit;
+ getCheckerManager().runCheckersForPreStmt(dstPrevisit, Pred, C, *this);
+
+ // Handle the expression itself.
+ ExplodedNodeSet dstExpr;
+ for (ExplodedNodeSet::iterator i = dstPrevisit.begin(),
+ e = dstPrevisit.end(); i != e ; ++i) {
+ VisitCast(C, C->getSubExpr(), *i, dstExpr);
+ }
+
+ // Handle the postvisit checks.
+ getCheckerManager().runCheckersForPostStmt(Dst, dstExpr, C, *this);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Expr::MaterializeTemporaryExprClass: {
+ Bldr.takeNodes(Pred);
+ const MaterializeTemporaryExpr *Materialize
+ = cast<MaterializeTemporaryExpr>(S);
+ if (Materialize->getType()->isRecordType())
+ Dst.Add(Pred);
+ else
+ CreateCXXTemporaryObject(Materialize, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::InitListExprClass:
+ Bldr.takeNodes(Pred);
+ VisitInitListExpr(cast<InitListExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::MemberExprClass:
+ Bldr.takeNodes(Pred);
+ VisitMemberExpr(cast<MemberExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ObjCIvarRefExprClass:
+ Bldr.takeNodes(Pred);
+ VisitLvalObjCIvarRefExpr(cast<ObjCIvarRefExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ObjCForCollectionStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::ObjCMessageExprClass: {
+ Bldr.takeNodes(Pred);
+ // Is this a property access?
+ const ParentMap &PM = Pred->getLocationContext()->getParentMap();
+ const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(S);
+ bool evaluated = false;
+
+ if (const PseudoObjectExpr *PO =
+ dyn_cast_or_null<PseudoObjectExpr>(PM.getParent(S))) {
+ const Expr *syntactic = PO->getSyntacticForm();
+ if (const ObjCPropertyRefExpr *PR =
+ dyn_cast<ObjCPropertyRefExpr>(syntactic)) {
+ bool isSetter = ME->getNumArgs() > 0;
+ VisitObjCMessage(ObjCMessage(ME, PR, isSetter), Pred, Dst);
+ evaluated = true;
+ }
+ else if (isa<BinaryOperator>(syntactic)) {
+ VisitObjCMessage(ObjCMessage(ME, 0, true), Pred, Dst);
+ }
+ }
+
+ if (!evaluated)
+ VisitObjCMessage(ME, Pred, Dst);
+
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::ObjCAtThrowStmtClass: {
+ // FIXME: This is not complete. We basically treat @throw as
+ // an abort.
+ Bldr.generateNode(S, Pred, Pred->getState());
+ break;
+ }
+
+ case Stmt::ReturnStmtClass:
+ Bldr.takeNodes(Pred);
+ VisitReturnStmt(cast<ReturnStmt>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::OffsetOfExprClass:
+ Bldr.takeNodes(Pred);
+ VisitOffsetOfExpr(cast<OffsetOfExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::UnaryExprOrTypeTraitExprClass:
+ Bldr.takeNodes(Pred);
+ VisitUnaryExprOrTypeTraitExpr(cast<UnaryExprOrTypeTraitExpr>(S),
+ Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+
+ case Stmt::StmtExprClass: {
+ const StmtExpr *SE = cast<StmtExpr>(S);
+
+ if (SE->getSubStmt()->body_empty()) {
+ // Empty statement expression.
+ assert(SE->getType() == getContext().VoidTy
+ && "Empty statement expression must have void type.");
+ break;
+ }
+
+ if (Expr *LastExpr = dyn_cast<Expr>(*SE->getSubStmt()->body_rbegin())) {
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(SE, Pred,
+ state->BindExpr(SE, Pred->getLocationContext(),
+ state->getSVal(LastExpr,
+ Pred->getLocationContext())));
+ }
+ break;
+ }
+
+ case Stmt::UnaryOperatorClass: {
+ Bldr.takeNodes(Pred);
+ const UnaryOperator *U = cast<UnaryOperator>(S);
+ if (AMgr.shouldEagerlyAssume() && (U->getOpcode() == UO_LNot)) {
+ ExplodedNodeSet Tmp;
+ VisitUnaryOperator(U, Pred, Tmp);
+ evalEagerlyAssume(Dst, Tmp, U);
+ }
+ else
+ VisitUnaryOperator(U, Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
+ }
+
+ case Stmt::PseudoObjectExprClass: {
+ Bldr.takeNodes(Pred);
+ ProgramStateRef state = Pred->getState();
+ const PseudoObjectExpr *PE = cast<PseudoObjectExpr>(S);
+ if (const Expr *Result = PE->getResultExpr()) {
+ SVal V = state->getSVal(Result, Pred->getLocationContext());
+ Bldr.generateNode(S, Pred,
+ state->BindExpr(S, Pred->getLocationContext(), V));
+ }
+ else
+ Bldr.generateNode(S, Pred,
+ state->BindExpr(S, Pred->getLocationContext(),
+ UnknownVal()));
+
+ Bldr.addNodes(Dst);
+ break;
+ }
+ }
+}
+
+bool ExprEngine::replayWithoutInlining(ExplodedNode *N,
+ const LocationContext *CalleeLC) {
+ const StackFrameContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+ const StackFrameContext *CallerSF = CalleeSF->getParent()->getCurrentStackFrame();
+ assert(CalleeSF && CallerSF);
+ ExplodedNode *BeforeProcessingCall = 0;
+
+ // Find the first node before we started processing the call expression.
+ while (N) {
+ ProgramPoint L = N->getLocation();
+ BeforeProcessingCall = N;
+ N = N->pred_empty() ? NULL : *(N->pred_begin());
+
+ // Skip the nodes corresponding to the inlined code.
+ if (L.getLocationContext()->getCurrentStackFrame() != CallerSF)
+ continue;
+ // We reached the caller. Find the node right before we started
+ // processing the CallExpr.
+ if (isa<PostPurgeDeadSymbols>(L))
+ continue;
+ if (const StmtPoint *SP = dyn_cast<StmtPoint>(&L))
+ if (SP->getStmt() == CalleeSF->getCallSite())
+ continue;
+ break;
+ }
+
+ if (!BeforeProcessingCall)
+ return false;
+
+ // TODO: Clean up the unneeded nodes.
+
+ // Build an Epsilon node from which we will restart the analyzes.
+ const Stmt *CE = CalleeSF->getCallSite();
+ ProgramPoint NewNodeLoc =
+ EpsilonPoint(BeforeProcessingCall->getLocationContext(), CE);
+ // Add the special flag to GDM to signal retrying with no inlining.
+ // Note, changing the state ensures that we are not going to cache out.
+ ProgramStateRef NewNodeState = BeforeProcessingCall->getState();
+ NewNodeState = NewNodeState->set<ReplayWithoutInlining>((void*)CE);
+
+ // Make the new node a successor of BeforeProcessingCall.
+ bool IsNew = false;
+ ExplodedNode *NewNode = G.getNode(NewNodeLoc, NewNodeState, false, &IsNew);
+ // We cached out at this point. Caching out is common due to us backtracking
+ // from the inlined function, which might spawn several paths.
+ if (!IsNew)
+ return true;
+
+ NewNode->addPredecessor(BeforeProcessingCall, G);
+
+ // Add the new node to the work list.
+ Engine.enqueueStmtNode(NewNode, CalleeSF->getCallSiteBlock(),
+ CalleeSF->getIndex());
+ NumTimesRetriedWithoutInlining++;
+ return true;
+}
+
+/// Block entrance. (Update counters).
+void ExprEngine::processCFGBlockEntrance(const BlockEdge &L,
+ NodeBuilderWithSinks &nodeBuilder) {
+
+ // FIXME: Refactor this into a checker.
+ ExplodedNode *pred = nodeBuilder.getContext().getPred();
+
+ if (nodeBuilder.getContext().getCurrentBlockCount() >= AMgr.getMaxVisit()) {
+ static SimpleProgramPointTag tag("ExprEngine : Block count exceeded");
+ const ExplodedNode *Sink =
+ nodeBuilder.generateNode(pred->getState(), pred, &tag, true);
+
+ // Check if we stopped at the top level function or not.
+ // Root node should have the location context of the top most function.
+ const LocationContext *CalleeLC = pred->getLocation().getLocationContext();
+ const LocationContext *CalleeSF = CalleeLC->getCurrentStackFrame();
+ const LocationContext *RootLC =
+ (*G.roots_begin())->getLocation().getLocationContext();
+ if (RootLC->getCurrentStackFrame() != CalleeSF) {
+ Engine.FunctionSummaries->markReachedMaxBlockCount(CalleeSF->getDecl());
+
+ // Re-run the call evaluation without inlining it, by storing the
+ // no-inlining policy in the state and enqueuing the new work item on
+ // the list. Replay should almost never fail. Use the stats to catch it
+ // if it does.
+ if ((!AMgr.NoRetryExhausted && replayWithoutInlining(pred, CalleeLC)))
+ return;
+ NumMaxBlockCountReachedInInlined++;
+ } else
+ NumMaxBlockCountReached++;
+
+ // Make sink nodes as exhausted(for stats) only if retry failed.
+ Engine.blocksExhausted.push_back(std::make_pair(L, Sink));
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Branch processing.
+//===----------------------------------------------------------------------===//
+
+ProgramStateRef ExprEngine::MarkBranch(ProgramStateRef state,
+ const Stmt *Terminator,
+ const LocationContext *LCtx,
+ bool branchTaken) {
+
+ switch (Terminator->getStmtClass()) {
+ default:
+ return state;
+
+ case Stmt::BinaryOperatorClass: { // '&&' and '||'
+
+ const BinaryOperator* B = cast<BinaryOperator>(Terminator);
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ assert (Op == BO_LAnd || Op == BO_LOr);
+
+ // For &&, if we take the true branch, then the value of the whole
+ // expression is that of the RHS expression.
+ //
+ // For ||, if we take the false branch, then the value of the whole
+ // expression is that of the RHS expression.
+
+ const Expr *Ex = (Op == BO_LAnd && branchTaken) ||
+ (Op == BO_LOr && !branchTaken)
+ ? B->getRHS() : B->getLHS();
+
+ return state->BindExpr(B, LCtx, UndefinedVal(Ex));
+ }
+
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass: { // ?:
+ const AbstractConditionalOperator* C
+ = cast<AbstractConditionalOperator>(Terminator);
+
+ // For ?, if branchTaken == true then the value is either the LHS or
+ // the condition itself. (GNU extension).
+
+ const Expr *Ex;
+
+ if (branchTaken)
+ Ex = C->getTrueExpr();
+ else
+ Ex = C->getFalseExpr();
+
+ return state->BindExpr(C, LCtx, UndefinedVal(Ex));
+ }
+
+ case Stmt::ChooseExprClass: { // ?:
+
+ const ChooseExpr *C = cast<ChooseExpr>(Terminator);
+
+ const Expr *Ex = branchTaken ? C->getLHS() : C->getRHS();
+ return state->BindExpr(C, LCtx, UndefinedVal(Ex));
+ }
+ }
+}
+
+/// RecoverCastedSymbol - A helper function for ProcessBranch that is used
+/// to try to recover some path-sensitivity for casts of symbolic
+/// integers that promote their values (which are currently not tracked well).
+/// This function returns the SVal bound to Condition->IgnoreCasts if all the
+// cast(s) did was sign-extend the original value.
+static SVal RecoverCastedSymbol(ProgramStateManager& StateMgr,
+ ProgramStateRef state,
+ const Stmt *Condition,
+ const LocationContext *LCtx,
+ ASTContext &Ctx) {
+
+ const Expr *Ex = dyn_cast<Expr>(Condition);
+ if (!Ex)
+ return UnknownVal();
+
+ uint64_t bits = 0;
+ bool bitsInit = false;
+
+ while (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ QualType T = CE->getType();
+
+ if (!T->isIntegerType())
+ return UnknownVal();
+
+ uint64_t newBits = Ctx.getTypeSize(T);
+ if (!bitsInit || newBits < bits) {
+ bitsInit = true;
+ bits = newBits;
+ }
+
+ Ex = CE->getSubExpr();
+ }
+
+ // We reached a non-cast. Is it a symbolic value?
+ QualType T = Ex->getType();
+
+ if (!bitsInit || !T->isIntegerType() || Ctx.getTypeSize(T) > bits)
+ return UnknownVal();
+
+ return state->getSVal(Ex, LCtx);
+}
+
+void ExprEngine::processBranch(const Stmt *Condition, const Stmt *Term,
+ NodeBuilderContext& BldCtx,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst,
+ const CFGBlock *DstT,
+ const CFGBlock *DstF) {
+ currentBuilderContext = &BldCtx;
+
+ // Check for NULL conditions; e.g. "for(;;)"
+ if (!Condition) {
+ BranchNodeBuilder NullCondBldr(Pred, Dst, BldCtx, DstT, DstF);
+ NullCondBldr.markInfeasible(false);
+ NullCondBldr.generateNode(Pred->getState(), true, Pred);
+ return;
+ }
+
+ PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
+ Condition->getLocStart(),
+ "Error evaluating branch");
+
+ ExplodedNodeSet CheckersOutSet;
+ getCheckerManager().runCheckersForBranchCondition(Condition, CheckersOutSet,
+ Pred, *this);
+ // We generated only sinks.
+ if (CheckersOutSet.empty())
+ return;
+
+ BranchNodeBuilder builder(CheckersOutSet, Dst, BldCtx, DstT, DstF);
+ for (NodeBuilder::iterator I = CheckersOutSet.begin(),
+ E = CheckersOutSet.end(); E != I; ++I) {
+ ExplodedNode *PredI = *I;
+
+ if (PredI->isSink())
+ continue;
+
+ ProgramStateRef PrevState = Pred->getState();
+ SVal X = PrevState->getSVal(Condition, Pred->getLocationContext());
+
+ if (X.isUnknownOrUndef()) {
+ // Give it a chance to recover from unknown.
+ if (const Expr *Ex = dyn_cast<Expr>(Condition)) {
+ if (Ex->getType()->isIntegerType()) {
+ // Try to recover some path-sensitivity. Right now casts of symbolic
+ // integers that promote their values are currently not tracked well.
+ // If 'Condition' is such an expression, try and recover the
+ // underlying value and use that instead.
+ SVal recovered = RecoverCastedSymbol(getStateManager(),
+ PrevState, Condition,
+ Pred->getLocationContext(),
+ getContext());
+
+ if (!recovered.isUnknown()) {
+ X = recovered;
+ }
+ }
+ }
+ }
+
+ const LocationContext *LCtx = PredI->getLocationContext();
+
+ // If the condition is still unknown, give up.
+ if (X.isUnknownOrUndef()) {
+ builder.generateNode(MarkBranch(PrevState, Term, LCtx, true),
+ true, PredI);
+ builder.generateNode(MarkBranch(PrevState, Term, LCtx, false),
+ false, PredI);
+ continue;
+ }
+
+ DefinedSVal V = cast<DefinedSVal>(X);
+
+ // Process the true branch.
+ if (builder.isFeasible(true)) {
+ if (ProgramStateRef state = PrevState->assume(V, true))
+ builder.generateNode(MarkBranch(state, Term, LCtx, true),
+ true, PredI);
+ else
+ builder.markInfeasible(true);
+ }
+
+ // Process the false branch.
+ if (builder.isFeasible(false)) {
+ if (ProgramStateRef state = PrevState->assume(V, false))
+ builder.generateNode(MarkBranch(state, Term, LCtx, false),
+ false, PredI);
+ else
+ builder.markInfeasible(false);
+ }
+ }
+ currentBuilderContext = 0;
+}
+
+/// processIndirectGoto - Called by CoreEngine. Used to generate successor
+/// nodes by processing the 'effects' of a computed goto jump.
+void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
+
+ ProgramStateRef state = builder.getState();
+ SVal V = state->getSVal(builder.getTarget(), builder.getLocationContext());
+
+ // Three possibilities:
+ //
+ // (1) We know the computed label.
+ // (2) The label is NULL (or some other constant), or Undefined.
+ // (3) We have no clue about the label. Dispatch to all targets.
+ //
+
+ typedef IndirectGotoNodeBuilder::iterator iterator;
+
+ if (isa<loc::GotoLabel>(V)) {
+ const LabelDecl *L = cast<loc::GotoLabel>(V).getLabel();
+
+ for (iterator I = builder.begin(), E = builder.end(); I != E; ++I) {
+ if (I.getLabel() == L) {
+ builder.generateNode(I, state);
+ return;
+ }
+ }
+
+ llvm_unreachable("No block with label.");
+ }
+
+ if (isa<loc::ConcreteInt>(V) || isa<UndefinedVal>(V)) {
+ // Dispatch to the first target and mark it as a sink.
+ //ExplodedNode* N = builder.generateNode(builder.begin(), state, true);
+ // FIXME: add checker visit.
+ // UndefBranches.insert(N);
+ return;
+ }
+
+ // This is really a catch-all. We don't support symbolics yet.
+ // FIXME: Implement dispatch for symbolic pointers.
+
+ for (iterator I=builder.begin(), E=builder.end(); I != E; ++I)
+ builder.generateNode(I, state);
+}
+
+/// ProcessEndPath - Called by CoreEngine. Used to generate end-of-path
+/// nodes when the control reaches the end of a function.
+void ExprEngine::processEndOfFunction(NodeBuilderContext& BC) {
+ StateMgr.EndPath(BC.Pred->getState());
+ ExplodedNodeSet Dst;
+ getCheckerManager().runCheckersForEndPath(BC, Dst, *this);
+ Engine.enqueueEndOfFunction(Dst);
+}
+
+/// ProcessSwitch - Called by CoreEngine. Used to generate successor
+/// nodes by processing the 'effects' of a switch statement.
+void ExprEngine::processSwitch(SwitchNodeBuilder& builder) {
+ typedef SwitchNodeBuilder::iterator iterator;
+ ProgramStateRef state = builder.getState();
+ const Expr *CondE = builder.getCondition();
+ SVal CondV_untested = state->getSVal(CondE, builder.getLocationContext());
+
+ if (CondV_untested.isUndef()) {
+ //ExplodedNode* N = builder.generateDefaultCaseNode(state, true);
+ // FIXME: add checker
+ //UndefBranches.insert(N);
+
+ return;
+ }
+ DefinedOrUnknownSVal CondV = cast<DefinedOrUnknownSVal>(CondV_untested);
+
+ ProgramStateRef DefaultSt = state;
+
+ iterator I = builder.begin(), EI = builder.end();
+ bool defaultIsFeasible = I == EI;
+
+ for ( ; I != EI; ++I) {
+ // Successor may be pruned out during CFG construction.
+ if (!I.getBlock())
+ continue;
+
+ const CaseStmt *Case = I.getCase();
+
+ // Evaluate the LHS of the case value.
+ llvm::APSInt V1 = Case->getLHS()->EvaluateKnownConstInt(getContext());
+ assert(V1.getBitWidth() == getContext().getTypeSize(CondE->getType()));
+
+ // Get the RHS of the case, if it exists.
+ llvm::APSInt V2;
+ if (const Expr *E = Case->getRHS())
+ V2 = E->EvaluateKnownConstInt(getContext());
+ else
+ V2 = V1;
+
+ // FIXME: Eventually we should replace the logic below with a range
+ // comparison, rather than concretize the values within the range.
+ // This should be easy once we have "ranges" for NonLVals.
+
+ do {
+ nonloc::ConcreteInt CaseVal(getBasicVals().getValue(V1));
+ DefinedOrUnknownSVal Res = svalBuilder.evalEQ(DefaultSt ? DefaultSt : state,
+ CondV, CaseVal);
+
+ // Now "assume" that the case matches.
+ if (ProgramStateRef stateNew = state->assume(Res, true)) {
+ builder.generateCaseStmtNode(I, stateNew);
+
+ // If CondV evaluates to a constant, then we know that this
+ // is the *only* case that we can take, so stop evaluating the
+ // others.
+ if (isa<nonloc::ConcreteInt>(CondV))
+ return;
+ }
+
+ // Now "assume" that the case doesn't match. Add this state
+ // to the default state (if it is feasible).
+ if (DefaultSt) {
+ if (ProgramStateRef stateNew = DefaultSt->assume(Res, false)) {
+ defaultIsFeasible = true;
+ DefaultSt = stateNew;
+ }
+ else {
+ defaultIsFeasible = false;
+ DefaultSt = NULL;
+ }
+ }
+
+ // Concretize the next value in the range.
+ if (V1 == V2)
+ break;
+
+ ++V1;
+ assert (V1 <= V2);
+
+ } while (true);
+ }
+
+ if (!defaultIsFeasible)
+ return;
+
+ // If we have switch(enum value), the default branch is not
+ // feasible if all of the enum constants not covered by 'case:' statements
+ // are not feasible values for the switch condition.
+ //
+ // Note that this isn't as accurate as it could be. Even if there isn't
+ // a case for a particular enum value as long as that enum value isn't
+ // feasible then it shouldn't be considered for making 'default:' reachable.
+ const SwitchStmt *SS = builder.getSwitch();
+ const Expr *CondExpr = SS->getCond()->IgnoreParenImpCasts();
+ if (CondExpr->getType()->getAs<EnumType>()) {
+ if (SS->isAllEnumCasesCovered())
+ return;
+ }
+
+ builder.generateDefaultCaseNode(DefaultSt);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer functions: Loads and stores.
+//===----------------------------------------------------------------------===//
+
+void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
+ assert(Ex->isLValue());
+ SVal V = state->getLValue(VD, Pred->getLocationContext());
+
+ // For references, the 'lvalue' is the pointer address stored in the
+ // reference region.
+ if (VD->getType()->isReferenceType()) {
+ if (const MemRegion *R = V.getAsRegion())
+ V = state->getSVal(R);
+ else
+ V = UnknownVal();
+ }
+
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), false, 0,
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+ if (const EnumConstantDecl *ED = dyn_cast<EnumConstantDecl>(D)) {
+ assert(!Ex->isLValue());
+ SVal V = svalBuilder.makeIntVal(ED->getInitVal());
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V));
+ return;
+ }
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ SVal V = svalBuilder.getFunctionPointer(FD);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), false, 0,
+ ProgramPoint::PostLValueKind);
+ return;
+ }
+ if (isa<FieldDecl>(D)) {
+ // FIXME: Compute lvalue of fields.
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, UnknownVal()),
+ false, 0, ProgramPoint::PostLValueKind);
+ return;
+ }
+
+ assert (false &&
+ "ValueDecl support for this ValueDecl not implemented.");
+}
+
+/// VisitArraySubscriptExpr - Transfer function for array accesses
+void ExprEngine::VisitLvalArraySubscriptExpr(const ArraySubscriptExpr *A,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst){
+
+ const Expr *Base = A->getBase()->IgnoreParens();
+ const Expr *Idx = A->getIdx()->IgnoreParens();
+
+
+ ExplodedNodeSet checkerPreStmt;
+ getCheckerManager().runCheckersForPreStmt(checkerPreStmt, Pred, A, *this);
+
+ StmtNodeBuilder Bldr(checkerPreStmt, Dst, *currentBuilderContext);
+
+ for (ExplodedNodeSet::iterator it = checkerPreStmt.begin(),
+ ei = checkerPreStmt.end(); it != ei; ++it) {
+ const LocationContext *LCtx = (*it)->getLocationContext();
+ ProgramStateRef state = (*it)->getState();
+ SVal V = state->getLValue(A->getType(),
+ state->getSVal(Idx, LCtx),
+ state->getSVal(Base, LCtx));
+ assert(A->isLValue());
+ Bldr.generateNode(A, *it, state->BindExpr(A, LCtx, V),
+ false, 0, ProgramPoint::PostLValueKind);
+ }
+}
+
+/// VisitMemberExpr - Transfer function for member expressions.
+void ExprEngine::VisitMemberExpr(const MemberExpr *M, ExplodedNode *Pred,
+ ExplodedNodeSet &TopDst) {
+
+ StmtNodeBuilder Bldr(Pred, TopDst, *currentBuilderContext);
+ ExplodedNodeSet Dst;
+ Decl *member = M->getMemberDecl();
+ if (VarDecl *VD = dyn_cast<VarDecl>(member)) {
+ assert(M->isLValue());
+ Bldr.takeNodes(Pred);
+ VisitCommonDeclRefExpr(M, VD, Pred, Dst);
+ Bldr.addNodes(Dst);
+ return;
+ }
+
+ FieldDecl *field = dyn_cast<FieldDecl>(member);
+ if (!field) // FIXME: skipping member expressions for non-fields
+ return;
+
+ Expr *baseExpr = M->getBase()->IgnoreParens();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal baseExprVal = state->getSVal(baseExpr, Pred->getLocationContext());
+ if (isa<nonloc::LazyCompoundVal>(baseExprVal) ||
+ isa<nonloc::CompoundVal>(baseExprVal) ||
+ // FIXME: This can originate by conjuring a symbol for an unknown
+ // temporary struct object, see test/Analysis/fields.c:
+ // (p = getit()).x
+ isa<nonloc::SymbolVal>(baseExprVal)) {
+ Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, UnknownVal()));
+ return;
+ }
+
+ // FIXME: Should we insert some assumption logic in here to determine
+ // if "Base" is a valid piece of memory? Before we put this assumption
+ // later when using FieldOffset lvals (which we no longer have).
+
+ // For all other cases, compute an lvalue.
+ SVal L = state->getLValue(field, baseExprVal);
+ if (M->isLValue())
+ Bldr.generateNode(M, Pred, state->BindExpr(M, LCtx, L), false, 0,
+ ProgramPoint::PostLValueKind);
+ else {
+ Bldr.takeNodes(Pred);
+ evalLoad(Dst, M, M, Pred, state, L);
+ Bldr.addNodes(Dst);
+ }
+}
+
+/// evalBind - Handle the semantics of binding a value to a specific location.
+/// This method is used by evalStore and (soon) VisitDeclStmt, and others.
+void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
+ ExplodedNode *Pred,
+ SVal location, SVal Val, bool atDeclInit) {
+
+ // Do a previsit of the bind.
+ ExplodedNodeSet CheckedSet;
+ getCheckerManager().runCheckersForBind(CheckedSet, Pred, location, Val,
+ StoreE, *this,
+ ProgramPoint::PostStmtKind);
+
+ ExplodedNodeSet TmpDst;
+ StmtNodeBuilder Bldr(CheckedSet, TmpDst, *currentBuilderContext);
+
+ const LocationContext *LC = Pred->getLocationContext();
+ for (ExplodedNodeSet::iterator I = CheckedSet.begin(), E = CheckedSet.end();
+ I!=E; ++I) {
+ ExplodedNode *PredI = *I;
+ ProgramStateRef state = PredI->getState();
+
+ if (atDeclInit) {
+ const VarRegion *VR =
+ cast<VarRegion>(cast<loc::MemRegionVal>(location).getRegion());
+
+ state = state->bindDecl(VR, Val);
+ } else {
+ state = state->bindLoc(location, Val);
+ }
+
+ const MemRegion *LocReg = 0;
+ if (loc::MemRegionVal *LocRegVal = dyn_cast<loc::MemRegionVal>(&location))
+ LocReg = LocRegVal->getRegion();
+
+ const ProgramPoint L = PostStore(StoreE, LC, LocReg, 0);
+ Bldr.generateNode(L, PredI, state, false);
+ }
+
+ Dst.insert(TmpDst);
+}
+
+/// evalStore - Handle the semantics of a store via an assignment.
+/// @param Dst The node set to store generated state nodes
+/// @param AssignE The assignment expression if the store happens in an
+/// assignment.
+/// @param LocatioinE The location expression that is stored to.
+/// @param state The current simulation state
+/// @param location The location to store the value
+/// @param Val The value to be stored
+void ExprEngine::evalStore(ExplodedNodeSet &Dst, const Expr *AssignE,
+ const Expr *LocationE,
+ ExplodedNode *Pred,
+ ProgramStateRef state, SVal location, SVal Val,
+ const ProgramPointTag *tag) {
+ // Proceed with the store. We use AssignE as the anchor for the PostStore
+ // ProgramPoint if it is non-NULL, and LocationE otherwise.
+ const Expr *StoreE = AssignE ? AssignE : LocationE;
+
+ if (isa<loc::ObjCPropRef>(location)) {
+ assert(false);
+ }
+
+ // Evaluate the location (checks for bad dereferences).
+ ExplodedNodeSet Tmp;
+ evalLocation(Tmp, AssignE, LocationE, Pred, state, location, tag, false);
+
+ if (Tmp.empty())
+ return;
+
+ if (location.isUndef())
+ return;
+
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI)
+ evalBind(Dst, StoreE, *NI, location, Val, false);
+}
+
+void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
+ const Expr *NodeEx,
+ const Expr *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ QualType LoadTy)
+{
+ assert(!isa<NonLoc>(location) && "location cannot be a NonLoc.");
+ assert(!isa<loc::ObjCPropRef>(location));
+
+ // Are we loading from a region? This actually results in two loads; one
+ // to fetch the address of the referenced value and one to fetch the
+ // referenced value.
+ if (const TypedValueRegion *TR =
+ dyn_cast_or_null<TypedValueRegion>(location.getAsRegion())) {
+
+ QualType ValTy = TR->getValueType();
+ if (const ReferenceType *RT = ValTy->getAs<ReferenceType>()) {
+ static SimpleProgramPointTag
+ loadReferenceTag("ExprEngine : Load Reference");
+ ExplodedNodeSet Tmp;
+ evalLoadCommon(Tmp, NodeEx, BoundEx, Pred, state,
+ location, &loadReferenceTag,
+ getContext().getPointerType(RT->getPointeeType()));
+
+ // Perform the load from the referenced value.
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end() ; I!=E; ++I) {
+ state = (*I)->getState();
+ location = state->getSVal(BoundEx, (*I)->getLocationContext());
+ evalLoadCommon(Dst, NodeEx, BoundEx, *I, state, location, tag, LoadTy);
+ }
+ return;
+ }
+ }
+
+ evalLoadCommon(Dst, NodeEx, BoundEx, Pred, state, location, tag, LoadTy);
+}
+
+void ExprEngine::evalLoadCommon(ExplodedNodeSet &Dst,
+ const Expr *NodeEx,
+ const Expr *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ QualType LoadTy) {
+ assert(NodeEx);
+ assert(BoundEx);
+ // Evaluate the location (checks for bad dereferences).
+ ExplodedNodeSet Tmp;
+ evalLocation(Tmp, NodeEx, BoundEx, Pred, state, location, tag, true);
+ if (Tmp.empty())
+ return;
+
+ StmtNodeBuilder Bldr(Tmp, Dst, *currentBuilderContext);
+ if (location.isUndef())
+ return;
+
+ // Proceed with the load.
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI!=NE; ++NI) {
+ state = (*NI)->getState();
+ const LocationContext *LCtx = (*NI)->getLocationContext();
+
+ if (location.isUnknown()) {
+ // This is important. We must nuke the old binding.
+ Bldr.generateNode(NodeEx, *NI,
+ state->BindExpr(BoundEx, LCtx, UnknownVal()),
+ false, tag,
+ ProgramPoint::PostLoadKind);
+ }
+ else {
+ if (LoadTy.isNull())
+ LoadTy = BoundEx->getType();
+ SVal V = state->getSVal(cast<Loc>(location), LoadTy);
+ Bldr.generateNode(NodeEx, *NI,
+ state->bindExprAndLocation(BoundEx, LCtx, location, V),
+ false, tag, ProgramPoint::PostLoadKind);
+ }
+ }
+}
+
+void ExprEngine::evalLocation(ExplodedNodeSet &Dst,
+ const Stmt *NodeEx,
+ const Stmt *BoundEx,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ SVal location,
+ const ProgramPointTag *tag,
+ bool isLoad) {
+ StmtNodeBuilder BldrTop(Pred, Dst, *currentBuilderContext);
+ // Early checks for performance reason.
+ if (location.isUnknown()) {
+ return;
+ }
+
+ ExplodedNodeSet Src;
+ BldrTop.takeNodes(Pred);
+ StmtNodeBuilder Bldr(Pred, Src, *currentBuilderContext);
+ if (Pred->getState() != state) {
+ // Associate this new state with an ExplodedNode.
+ // FIXME: If I pass null tag, the graph is incorrect, e.g for
+ // int *p;
+ // p = 0;
+ // *p = 0xDEADBEEF;
+ // "p = 0" is not noted as "Null pointer value stored to 'p'" but
+ // instead "int *p" is noted as
+ // "Variable 'p' initialized to a null pointer value"
+
+ // FIXME: why is 'tag' not used instead of etag?
+ static SimpleProgramPointTag etag("ExprEngine: Location");
+ Bldr.generateNode(NodeEx, Pred, state, false, &etag);
+ }
+ ExplodedNodeSet Tmp;
+ getCheckerManager().runCheckersForLocation(Tmp, Src, location, isLoad,
+ NodeEx, BoundEx, *this);
+ BldrTop.addNodes(Tmp);
+}
+
+std::pair<const ProgramPointTag *, const ProgramPointTag*>
+ExprEngine::getEagerlyAssumeTags() {
+ static SimpleProgramPointTag
+ EagerlyAssumeTrue("ExprEngine : Eagerly Assume True"),
+ EagerlyAssumeFalse("ExprEngine : Eagerly Assume False");
+ return std::make_pair(&EagerlyAssumeTrue, &EagerlyAssumeFalse);
+}
+
+void ExprEngine::evalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
+ const Expr *Ex) {
+ StmtNodeBuilder Bldr(Src, Dst, *currentBuilderContext);
+
+ for (ExplodedNodeSet::iterator I=Src.begin(), E=Src.end(); I!=E; ++I) {
+ ExplodedNode *Pred = *I;
+ // Test if the previous node was as the same expression. This can happen
+ // when the expression fails to evaluate to anything meaningful and
+ // (as an optimization) we don't generate a node.
+ ProgramPoint P = Pred->getLocation();
+ if (!isa<PostStmt>(P) || cast<PostStmt>(P).getStmt() != Ex) {
+ continue;
+ }
+
+ ProgramStateRef state = Pred->getState();
+ SVal V = state->getSVal(Ex, Pred->getLocationContext());
+ nonloc::SymbolVal *SEV = dyn_cast<nonloc::SymbolVal>(&V);
+ if (SEV && SEV->isExpression()) {
+ const std::pair<const ProgramPointTag *, const ProgramPointTag*> &tags =
+ getEagerlyAssumeTags();
+
+ // First assume that the condition is true.
+ if (ProgramStateRef StateTrue = state->assume(*SEV, true)) {
+ SVal Val = svalBuilder.makeIntVal(1U, Ex->getType());
+ StateTrue = StateTrue->BindExpr(Ex, Pred->getLocationContext(), Val);
+ Bldr.generateNode(Ex, Pred, StateTrue, false, tags.first);
+ }
+
+ // Next, assume that the condition is false.
+ if (ProgramStateRef StateFalse = state->assume(*SEV, false)) {
+ SVal Val = svalBuilder.makeIntVal(0U, Ex->getType());
+ StateFalse = StateFalse->BindExpr(Ex, Pred->getLocationContext(), Val);
+ Bldr.generateNode(Ex, Pred, StateFalse, false, tags.second);
+ }
+ }
+ }
+}
+
+void ExprEngine::VisitAsmStmt(const AsmStmt *A, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ // We have processed both the inputs and the outputs. All of the outputs
+ // should evaluate to Locs. Nuke all of their values.
+
+ // FIXME: Some day in the future it would be nice to allow a "plug-in"
+ // which interprets the inline asm and stores proper results in the
+ // outputs.
+
+ ProgramStateRef state = Pred->getState();
+
+ for (AsmStmt::const_outputs_iterator OI = A->begin_outputs(),
+ OE = A->end_outputs(); OI != OE; ++OI) {
+ SVal X = state->getSVal(*OI, Pred->getLocationContext());
+ assert (!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
+
+ if (isa<Loc>(X))
+ state = state->bindLoc(cast<Loc>(X), UnknownVal());
+ }
+
+ Bldr.generateNode(A, Pred, state);
+}
+
+//===----------------------------------------------------------------------===//
+// Visualization.
+//===----------------------------------------------------------------------===//
+
+#ifndef NDEBUG
+static ExprEngine* GraphPrintCheckerState;
+static SourceManager* GraphPrintSourceManager;
+
+namespace llvm {
+template<>
+struct DOTGraphTraits<ExplodedNode*> :
+ public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ // FIXME: Since we do not cache error nodes in ExprEngine now, this does not
+ // work.
+ static std::string getNodeAttributes(const ExplodedNode *N, void*) {
+
+#if 0
+ // FIXME: Replace with a general scheme to tell if the node is
+ // an error node.
+ if (GraphPrintCheckerState->isImplicitNullDeref(N) ||
+ GraphPrintCheckerState->isExplicitNullDeref(N) ||
+ GraphPrintCheckerState->isUndefDeref(N) ||
+ GraphPrintCheckerState->isUndefStore(N) ||
+ GraphPrintCheckerState->isUndefControlFlow(N) ||
+ GraphPrintCheckerState->isUndefResult(N) ||
+ GraphPrintCheckerState->isBadCall(N) ||
+ GraphPrintCheckerState->isUndefArg(N))
+ return "color=\"red\",style=\"filled\"";
+
+ if (GraphPrintCheckerState->isNoReturnCall(N))
+ return "color=\"blue\",style=\"filled\"";
+#endif
+ return "";
+ }
+
+ static std::string getNodeLabel(const ExplodedNode *N, void*){
+
+ std::string sbuf;
+ llvm::raw_string_ostream Out(sbuf);
+
+ // Program Location.
+ ProgramPoint Loc = N->getLocation();
+
+ switch (Loc.getKind()) {
+ case ProgramPoint::BlockEntranceKind:
+ Out << "Block Entrance: B"
+ << cast<BlockEntrance>(Loc).getBlock()->getBlockID();
+ break;
+
+ case ProgramPoint::BlockExitKind:
+ assert (false);
+ break;
+
+ case ProgramPoint::CallEnterKind:
+ Out << "CallEnter";
+ break;
+
+ case ProgramPoint::CallExitKind:
+ Out << "CallExit";
+ break;
+
+ case ProgramPoint::EpsilonKind:
+ Out << "Epsilon Point";
+ break;
+
+ default: {
+ if (StmtPoint *L = dyn_cast<StmtPoint>(&Loc)) {
+ const Stmt *S = L->getStmt();
+ SourceLocation SLoc = S->getLocStart();
+
+ Out << S->getStmtClassName() << ' ' << (void*) S << ' ';
+ LangOptions LO; // FIXME.
+ S->printPretty(Out, 0, PrintingPolicy(LO));
+
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getExpansionColumnNumber(SLoc)
+ << "\\l";
+ }
+
+ if (isa<PreStmt>(Loc))
+ Out << "\\lPreStmt\\l;";
+ else if (isa<PostLoad>(Loc))
+ Out << "\\lPostLoad\\l;";
+ else if (isa<PostStore>(Loc))
+ Out << "\\lPostStore\\l";
+ else if (isa<PostLValue>(Loc))
+ Out << "\\lPostLValue\\l";
+
+#if 0
+ // FIXME: Replace with a general scheme to determine
+ // the name of the check.
+ if (GraphPrintCheckerState->isImplicitNullDeref(N))
+ Out << "\\|Implicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isExplicitNullDeref(N))
+ Out << "\\|Explicit-Null Dereference.\\l";
+ else if (GraphPrintCheckerState->isUndefDeref(N))
+ Out << "\\|Dereference of undefialied value.\\l";
+ else if (GraphPrintCheckerState->isUndefStore(N))
+ Out << "\\|Store to Undefined Loc.";
+ else if (GraphPrintCheckerState->isUndefResult(N))
+ Out << "\\|Result of operation is undefined.";
+ else if (GraphPrintCheckerState->isNoReturnCall(N))
+ Out << "\\|Call to function marked \"noreturn\".";
+ else if (GraphPrintCheckerState->isBadCall(N))
+ Out << "\\|Call to NULL/Undefined.";
+ else if (GraphPrintCheckerState->isUndefArg(N))
+ Out << "\\|Argument in call is undefined";
+#endif
+
+ break;
+ }
+
+ const BlockEdge &E = cast<BlockEdge>(Loc);
+ Out << "Edge: (B" << E.getSrc()->getBlockID() << ", B"
+ << E.getDst()->getBlockID() << ')';
+
+ if (const Stmt *T = E.getSrc()->getTerminator()) {
+
+ SourceLocation SLoc = T->getLocStart();
+
+ Out << "\\|Terminator: ";
+ LangOptions LO; // FIXME.
+ E.getSrc()->printTerminator(Out, LO);
+
+ if (SLoc.isFileID()) {
+ Out << "\\lline="
+ << GraphPrintSourceManager->getExpansionLineNumber(SLoc)
+ << " col="
+ << GraphPrintSourceManager->getExpansionColumnNumber(SLoc);
+ }
+
+ if (isa<SwitchStmt>(T)) {
+ const Stmt *Label = E.getDst()->getLabel();
+
+ if (Label) {
+ if (const CaseStmt *C = dyn_cast<CaseStmt>(Label)) {
+ Out << "\\lcase ";
+ LangOptions LO; // FIXME.
+ C->getLHS()->printPretty(Out, 0, PrintingPolicy(LO));
+
+ if (const Stmt *RHS = C->getRHS()) {
+ Out << " .. ";
+ RHS->printPretty(Out, 0, PrintingPolicy(LO));
+ }
+
+ Out << ":";
+ }
+ else {
+ assert (isa<DefaultStmt>(Label));
+ Out << "\\ldefault:";
+ }
+ }
+ else
+ Out << "\\l(implicit) default:";
+ }
+ else if (isa<IndirectGotoStmt>(T)) {
+ // FIXME
+ }
+ else {
+ Out << "\\lCondition: ";
+ if (*E.getSrc()->succ_begin() == E.getDst())
+ Out << "true";
+ else
+ Out << "false";
+ }
+
+ Out << "\\l";
+ }
+
+#if 0
+ // FIXME: Replace with a general scheme to determine
+ // the name of the check.
+ if (GraphPrintCheckerState->isUndefControlFlow(N)) {
+ Out << "\\|Control-flow based on\\lUndefined value.\\l";
+ }
+#endif
+ }
+ }
+
+ ProgramStateRef state = N->getState();
+ Out << "\\|StateID: " << (void*) state.getPtr()
+ << " NodeID: " << (void*) N << "\\|";
+ state->printDOT(Out);
+
+ Out << "\\l";
+
+ if (const ProgramPointTag *tag = Loc.getTag()) {
+ Out << "\\|Tag: " << tag->getTagDescription();
+ Out << "\\l";
+ }
+ return Out.str();
+ }
+};
+} // end llvm namespace
+#endif
+
+#ifndef NDEBUG
+template <typename ITERATOR>
+ExplodedNode *GetGraphNode(ITERATOR I) { return *I; }
+
+template <> ExplodedNode*
+GetGraphNode<llvm::DenseMap<ExplodedNode*, Expr*>::iterator>
+ (llvm::DenseMap<ExplodedNode*, Expr*>::iterator I) {
+ return I->first;
+}
+#endif
+
+void ExprEngine::ViewGraph(bool trim) {
+#ifndef NDEBUG
+ if (trim) {
+ std::vector<ExplodedNode*> Src;
+
+ // Flush any outstanding reports to make sure we cover all the nodes.
+ // This does not cause them to get displayed.
+ for (BugReporter::iterator I=BR.begin(), E=BR.end(); I!=E; ++I)
+ const_cast<BugType*>(*I)->FlushReports(BR);
+
+ // Iterate through the reports and get their nodes.
+ for (BugReporter::EQClasses_iterator
+ EI = BR.EQClasses_begin(), EE = BR.EQClasses_end(); EI != EE; ++EI) {
+ ExplodedNode *N = const_cast<ExplodedNode*>(EI->begin()->getErrorNode());
+ if (N) Src.push_back(N);
+ }
+
+ ViewGraph(&Src[0], &Src[0]+Src.size());
+ }
+ else {
+ GraphPrintCheckerState = this;
+ GraphPrintSourceManager = &getContext().getSourceManager();
+
+ llvm::ViewGraph(*G.roots_begin(), "ExprEngine");
+
+ GraphPrintCheckerState = NULL;
+ GraphPrintSourceManager = NULL;
+ }
+#endif
+}
+
+void ExprEngine::ViewGraph(ExplodedNode** Beg, ExplodedNode** End) {
+#ifndef NDEBUG
+ GraphPrintCheckerState = this;
+ GraphPrintSourceManager = &getContext().getSourceManager();
+
+ std::auto_ptr<ExplodedGraph> TrimmedG(G.Trim(Beg, End).first);
+
+ if (!TrimmedG.get())
+ llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
+ else
+ llvm::ViewGraph(*TrimmedG->roots_begin(), "TrimmedExprEngine");
+
+ GraphPrintCheckerState = NULL;
+ GraphPrintSourceManager = NULL;
+#endif
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
new file mode 100644
index 0000000..93e598a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -0,0 +1,811 @@
+//=-- ExprEngineC.cpp - ExprEngine support for C expressions ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ExprEngine's support for C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+void ExprEngine::VisitBinaryOperator(const BinaryOperator* B,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ Expr *LHS = B->getLHS()->IgnoreParens();
+ Expr *RHS = B->getRHS()->IgnoreParens();
+
+ // FIXME: Prechecks eventually go in ::Visit().
+ ExplodedNodeSet CheckedSet;
+ ExplodedNodeSet Tmp2;
+ getCheckerManager().runCheckersForPreStmt(CheckedSet, Pred, B, *this);
+
+ // With both the LHS and RHS evaluated, process the operation itself.
+ for (ExplodedNodeSet::iterator it=CheckedSet.begin(), ei=CheckedSet.end();
+ it != ei; ++it) {
+
+ ProgramStateRef state = (*it)->getState();
+ const LocationContext *LCtx = (*it)->getLocationContext();
+ SVal LeftV = state->getSVal(LHS, LCtx);
+ SVal RightV = state->getSVal(RHS, LCtx);
+
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ if (Op == BO_Assign) {
+ // EXPERIMENTAL: "Conjured" symbols.
+ // FIXME: Handle structs.
+ if (RightV.isUnknown()) {
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ RightV = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LCtx, Count);
+ }
+ // Simulate the effects of a "store": bind the value of the RHS
+ // to the L-Value represented by the LHS.
+ SVal ExprVal = B->isLValue() ? LeftV : RightV;
+ evalStore(Tmp2, B, LHS, *it, state->BindExpr(B, LCtx, ExprVal),
+ LeftV, RightV);
+ continue;
+ }
+
+ if (!B->isAssignmentOp()) {
+ StmtNodeBuilder Bldr(*it, Tmp2, *currentBuilderContext);
+ // Process non-assignments except commas or short-circuited
+ // logical expressions (LAnd and LOr).
+ SVal Result = evalBinOp(state, Op, LeftV, RightV, B->getType());
+ if (Result.isUnknown()) {
+ Bldr.generateNode(B, *it, state);
+ continue;
+ }
+
+ state = state->BindExpr(B, LCtx, Result);
+ Bldr.generateNode(B, *it, state);
+ continue;
+ }
+
+ assert (B->isCompoundAssignmentOp());
+
+ switch (Op) {
+ default:
+ llvm_unreachable("Invalid opcode for compound assignment.");
+ case BO_MulAssign: Op = BO_Mul; break;
+ case BO_DivAssign: Op = BO_Div; break;
+ case BO_RemAssign: Op = BO_Rem; break;
+ case BO_AddAssign: Op = BO_Add; break;
+ case BO_SubAssign: Op = BO_Sub; break;
+ case BO_ShlAssign: Op = BO_Shl; break;
+ case BO_ShrAssign: Op = BO_Shr; break;
+ case BO_AndAssign: Op = BO_And; break;
+ case BO_XorAssign: Op = BO_Xor; break;
+ case BO_OrAssign: Op = BO_Or; break;
+ }
+
+ // Perform a load (the LHS). This performs the checks for
+ // null dereferences, and so on.
+ ExplodedNodeSet Tmp;
+ SVal location = LeftV;
+ evalLoad(Tmp, B, LHS, *it, state, location);
+
+ for (ExplodedNodeSet::iterator I = Tmp.begin(), E = Tmp.end(); I != E;
+ ++I) {
+
+ state = (*I)->getState();
+ const LocationContext *LCtx = (*I)->getLocationContext();
+ SVal V = state->getSVal(LHS, LCtx);
+
+ // Get the computation type.
+ QualType CTy =
+ cast<CompoundAssignOperator>(B)->getComputationResultType();
+ CTy = getContext().getCanonicalType(CTy);
+
+ QualType CLHSTy =
+ cast<CompoundAssignOperator>(B)->getComputationLHSType();
+ CLHSTy = getContext().getCanonicalType(CLHSTy);
+
+ QualType LTy = getContext().getCanonicalType(LHS->getType());
+
+ // Promote LHS.
+ V = svalBuilder.evalCast(V, CLHSTy, LTy);
+
+ // Compute the result of the operation.
+ SVal Result = svalBuilder.evalCast(evalBinOp(state, Op, V, RightV, CTy),
+ B->getType(), CTy);
+
+ // EXPERIMENTAL: "Conjured" symbols.
+ // FIXME: Handle structs.
+
+ SVal LHSVal;
+
+ if (Result.isUnknown()) {
+
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+
+ // The symbolic value is actually for the type of the left-hand side
+ // expression, not the computation type, as this is the value the
+ // LValue on the LHS will bind to.
+ LHSVal = svalBuilder.getConjuredSymbolVal(NULL, B->getRHS(), LCtx,
+ LTy, Count);
+
+ // However, we need to convert the symbol to the computation type.
+ Result = svalBuilder.evalCast(LHSVal, CTy, LTy);
+ }
+ else {
+ // The left-hand side may bind to a different value then the
+ // computation type.
+ LHSVal = svalBuilder.evalCast(Result, LTy, CTy);
+ }
+
+ // In C++, assignment and compound assignment operators return an
+ // lvalue.
+ if (B->isLValue())
+ state = state->BindExpr(B, LCtx, location);
+ else
+ state = state->BindExpr(B, LCtx, Result);
+
+ evalStore(Tmp2, B, LHS, *I, state, location, LHSVal);
+ }
+ }
+
+ // FIXME: postvisits eventually go in ::Visit()
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp2, B, *this);
+}
+
+void ExprEngine::VisitBlockExpr(const BlockExpr *BE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ CanQualType T = getContext().getCanonicalType(BE->getType());
+ SVal V = svalBuilder.getBlockPointer(BE->getBlockDecl(), T,
+ Pred->getLocationContext());
+
+ ExplodedNodeSet Tmp;
+ StmtNodeBuilder Bldr(Pred, Tmp, *currentBuilderContext);
+ Bldr.generateNode(BE, Pred,
+ Pred->getState()->BindExpr(BE, Pred->getLocationContext(),
+ V),
+ false, 0,
+ ProgramPoint::PostLValueKind);
+
+ // FIXME: Move all post/pre visits to ::Visit().
+ getCheckerManager().runCheckersForPostStmt(Dst, Tmp, BE, *this);
+}
+
+void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+
+ ExplodedNodeSet dstPreStmt;
+ getCheckerManager().runCheckersForPreStmt(dstPreStmt, Pred, CastE, *this);
+
+ if (CastE->getCastKind() == CK_LValueToRValue) {
+ for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
+ I!=E; ++I) {
+ ExplodedNode *subExprNode = *I;
+ ProgramStateRef state = subExprNode->getState();
+ const LocationContext *LCtx = subExprNode->getLocationContext();
+ evalLoad(Dst, CastE, CastE, subExprNode, state, state->getSVal(Ex, LCtx));
+ }
+ return;
+ }
+
+ // All other casts.
+ QualType T = CastE->getType();
+ QualType ExTy = Ex->getType();
+
+ if (const ExplicitCastExpr *ExCast=dyn_cast_or_null<ExplicitCastExpr>(CastE))
+ T = ExCast->getTypeAsWritten();
+
+ StmtNodeBuilder Bldr(dstPreStmt, Dst, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator I = dstPreStmt.begin(), E = dstPreStmt.end();
+ I != E; ++I) {
+
+ Pred = *I;
+
+ switch (CastE->getCastKind()) {
+ case CK_LValueToRValue:
+ llvm_unreachable("LValueToRValue casts handled earlier.");
+ case CK_ToVoid:
+ continue;
+ // The analyzer doesn't do anything special with these casts,
+ // since it understands retain/release semantics already.
+ case CK_ARCProduceObject:
+ case CK_ARCConsumeObject:
+ case CK_ARCReclaimReturnedObject:
+ case CK_ARCExtendBlockObject: // Fall-through.
+ case CK_CopyAndAutoreleaseBlockObject:
+ // The analyser can ignore atomic casts for now, although some future
+ // checkers may want to make certain that you're not modifying the same
+ // value through atomic and nonatomic pointers.
+ case CK_AtomicToNonAtomic:
+ case CK_NonAtomicToAtomic:
+ // True no-ops.
+ case CK_NoOp:
+ case CK_FunctionToPointerDecay: {
+ // Copy the SVal of Ex to CastE.
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = state->getSVal(Ex, LCtx);
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ case CK_Dependent:
+ case CK_ArrayToPointerDecay:
+ case CK_BitCast:
+ case CK_LValueBitCast:
+ case CK_IntegralCast:
+ case CK_NullToPointer:
+ case CK_IntegralToPointer:
+ case CK_PointerToIntegral:
+ case CK_PointerToBoolean:
+ case CK_IntegralToBoolean:
+ case CK_IntegralToFloating:
+ case CK_FloatingToIntegral:
+ case CK_FloatingToBoolean:
+ case CK_FloatingCast:
+ case CK_FloatingRealToComplex:
+ case CK_FloatingComplexToReal:
+ case CK_FloatingComplexToBoolean:
+ case CK_FloatingComplexCast:
+ case CK_FloatingComplexToIntegralComplex:
+ case CK_IntegralRealToComplex:
+ case CK_IntegralComplexToReal:
+ case CK_IntegralComplexToBoolean:
+ case CK_IntegralComplexCast:
+ case CK_IntegralComplexToFloatingComplex:
+ case CK_CPointerToObjCPointerCast:
+ case CK_BlockPointerToObjCPointerCast:
+ case CK_AnyPointerToBlockPointerCast:
+ case CK_ObjCObjectLValueCast: {
+ // Delegate to SValBuilder to process.
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = state->getSVal(Ex, LCtx);
+ V = svalBuilder.evalCast(V, T, ExTy);
+ state = state->BindExpr(CastE, LCtx, V);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ case CK_DerivedToBase:
+ case CK_UncheckedDerivedToBase: {
+ // For DerivedToBase cast, delegate to the store manager.
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal val = state->getSVal(Ex, LCtx);
+ val = getStoreManager().evalDerivedToBase(val, T);
+ state = state->BindExpr(CastE, LCtx, val);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ // Handle C++ dyn_cast.
+ case CK_Dynamic: {
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal val = state->getSVal(Ex, LCtx);
+
+ // Compute the type of the result.
+ QualType resultType = CastE->getType();
+ if (CastE->isLValue())
+ resultType = getContext().getPointerType(resultType);
+
+ bool Failed = false;
+
+ // Check if the value being cast evaluates to 0.
+ if (val.isZeroConstant())
+ Failed = true;
+ // Else, evaluate the cast.
+ else
+ val = getStoreManager().evalDynamicCast(val, T, Failed);
+
+ if (Failed) {
+ if (T->isReferenceType()) {
+ // A bad_cast exception is thrown if input value is a reference.
+ // Currently, we model this, by generating a sink.
+ Bldr.generateNode(CastE, Pred, state, true);
+ continue;
+ } else {
+ // If the cast fails on a pointer, bind to 0.
+ state = state->BindExpr(CastE, LCtx, svalBuilder.makeNull());
+ }
+ } else {
+ // If we don't know if the cast succeeded, conjure a new symbol.
+ if (val.isUnknown()) {
+ DefinedOrUnknownSVal NewSym = svalBuilder.getConjuredSymbolVal(NULL,
+ CastE, LCtx, resultType,
+ currentBuilderContext->getCurrentBlockCount());
+ state = state->BindExpr(CastE, LCtx, NewSym);
+ } else
+ // Else, bind to the derived region value.
+ state = state->BindExpr(CastE, LCtx, val);
+ }
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ // Various C++ casts that are not handled yet.
+ case CK_ToUnion:
+ case CK_BaseToDerived:
+ case CK_NullToMemberPointer:
+ case CK_BaseToDerivedMemberPointer:
+ case CK_DerivedToBaseMemberPointer:
+ case CK_ReinterpretMemberPointer:
+ case CK_UserDefinedConversion:
+ case CK_ConstructorConversion:
+ case CK_VectorSplat:
+ case CK_MemberPointerToBoolean: {
+ // Recover some path-sensitivty by conjuring a new value.
+ QualType resultType = CastE->getType();
+ if (CastE->isLValue())
+ resultType = getContext().getPointerType(resultType);
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal result = svalBuilder.getConjuredSymbolVal(NULL, CastE, LCtx,
+ resultType, currentBuilderContext->getCurrentBlockCount());
+ ProgramStateRef state = Pred->getState()->BindExpr(CastE, LCtx,
+ result);
+ Bldr.generateNode(CastE, Pred, state);
+ continue;
+ }
+ }
+ }
+}
+
+void ExprEngine::VisitCompoundLiteralExpr(const CompoundLiteralExpr *CL,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+
+ const InitListExpr *ILE
+ = cast<InitListExpr>(CL->getInitializer()->IgnoreParens());
+
+ ProgramStateRef state = Pred->getState();
+ SVal ILV = state->getSVal(ILE, Pred->getLocationContext());
+ const LocationContext *LC = Pred->getLocationContext();
+ state = state->bindCompoundLiteral(CL, LC, ILV);
+
+ if (CL->isLValue())
+ B.generateNode(CL, Pred, state->BindExpr(CL, LC, state->getLValue(CL, LC)));
+ else
+ B.generateNode(CL, Pred, state->BindExpr(CL, LC, ILV));
+}
+
+void ExprEngine::VisitDeclStmt(const DeclStmt *DS, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ // FIXME: static variables may have an initializer, but the second
+ // time a function is called those values may not be current.
+ // This may need to be reflected in the CFG.
+
+ // Assumption: The CFG has one DeclStmt per Decl.
+ const Decl *D = *DS->decl_begin();
+
+ if (!D || !isa<VarDecl>(D)) {
+ //TODO:AZ: remove explicit insertion after refactoring is done.
+ Dst.insert(Pred);
+ return;
+ }
+
+ // FIXME: all pre/post visits should eventually be handled by ::Visit().
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, DS, *this);
+
+ StmtNodeBuilder B(dstPreVisit, Dst, *currentBuilderContext);
+ const VarDecl *VD = dyn_cast<VarDecl>(D);
+ for (ExplodedNodeSet::iterator I = dstPreVisit.begin(), E = dstPreVisit.end();
+ I!=E; ++I) {
+ ExplodedNode *N = *I;
+ ProgramStateRef state = N->getState();
+
+ // Decls without InitExpr are not initialized explicitly.
+ const LocationContext *LC = N->getLocationContext();
+
+ if (const Expr *InitEx = VD->getInit()) {
+ SVal InitVal = state->getSVal(InitEx, Pred->getLocationContext());
+
+ // We bound the temp obj region to the CXXConstructExpr. Now recover
+ // the lazy compound value when the variable is not a reference.
+ if (AMgr.getLangOpts().CPlusPlus && VD->getType()->isRecordType() &&
+ !VD->getType()->isReferenceType() && isa<loc::MemRegionVal>(InitVal)){
+ InitVal = state->getSVal(cast<loc::MemRegionVal>(InitVal).getRegion());
+ assert(isa<nonloc::LazyCompoundVal>(InitVal));
+ }
+
+ // Recover some path-sensitivity if a scalar value evaluated to
+ // UnknownVal.
+ if (InitVal.isUnknown()) {
+ QualType Ty = InitEx->getType();
+ if (InitEx->isLValue()) {
+ Ty = getContext().getPointerType(Ty);
+ }
+
+ InitVal = svalBuilder.getConjuredSymbolVal(NULL, InitEx, LC, Ty,
+ currentBuilderContext->getCurrentBlockCount());
+ }
+ B.takeNodes(N);
+ ExplodedNodeSet Dst2;
+ evalBind(Dst2, DS, N, state->getLValue(VD, LC), InitVal, true);
+ B.addNodes(Dst2);
+ }
+ else {
+ B.generateNode(DS, N,state->bindDeclWithNoInit(state->getRegion(VD, LC)));
+ }
+ }
+}
+
+void ExprEngine::VisitLogicalExpr(const BinaryOperator* B, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ assert(B->getOpcode() == BO_LAnd ||
+ B->getOpcode() == BO_LOr);
+
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal X = state->getSVal(B, LCtx);
+ assert(X.isUndef());
+
+ const Expr *Ex = (const Expr*) cast<UndefinedVal>(X).getData();
+ assert(Ex);
+
+ if (Ex == B->getRHS()) {
+ X = state->getSVal(Ex, LCtx);
+
+ // Handle undefined values.
+ if (X.isUndef()) {
+ Bldr.generateNode(B, Pred, state->BindExpr(B, LCtx, X));
+ return;
+ }
+
+ DefinedOrUnknownSVal XD = cast<DefinedOrUnknownSVal>(X);
+
+ // We took the RHS. Because the value of the '&&' or '||' expression must
+ // evaluate to 0 or 1, we must assume the value of the RHS evaluates to 0
+ // or 1. Alternatively, we could take a lazy approach, and calculate this
+ // value later when necessary. We don't have the machinery in place for
+ // this right now, and since most logical expressions are used for branches,
+ // the payoff is not likely to be large. Instead, we do eager evaluation.
+ if (ProgramStateRef newState = state->assume(XD, true))
+ Bldr.generateNode(B, Pred,
+ newState->BindExpr(B, LCtx,
+ svalBuilder.makeIntVal(1U, B->getType())));
+
+ if (ProgramStateRef newState = state->assume(XD, false))
+ Bldr.generateNode(B, Pred,
+ newState->BindExpr(B, LCtx,
+ svalBuilder.makeIntVal(0U, B->getType())));
+ }
+ else {
+ // We took the LHS expression. Depending on whether we are '&&' or
+ // '||' we know what the value of the expression is via properties of
+ // the short-circuiting.
+ X = svalBuilder.makeIntVal(B->getOpcode() == BO_LAnd ? 0U : 1U,
+ B->getType());
+ Bldr.generateNode(B, Pred, state->BindExpr(B, LCtx, X));
+ }
+}
+
+void ExprEngine::VisitInitListExpr(const InitListExpr *IE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ QualType T = getContext().getCanonicalType(IE->getType());
+ unsigned NumInitElements = IE->getNumInits();
+
+ if (T->isArrayType() || T->isRecordType() || T->isVectorType()) {
+ llvm::ImmutableList<SVal> vals = getBasicVals().getEmptySValList();
+
+ // Handle base case where the initializer has no elements.
+ // e.g: static int* myArray[] = {};
+ if (NumInitElements == 0) {
+ SVal V = svalBuilder.makeCompoundVal(T, vals);
+ B.generateNode(IE, Pred, state->BindExpr(IE, LCtx, V));
+ return;
+ }
+
+ for (InitListExpr::const_reverse_iterator it = IE->rbegin(),
+ ei = IE->rend(); it != ei; ++it) {
+ vals = getBasicVals().consVals(state->getSVal(cast<Expr>(*it), LCtx),
+ vals);
+ }
+
+ B.generateNode(IE, Pred,
+ state->BindExpr(IE, LCtx,
+ svalBuilder.makeCompoundVal(T, vals)));
+ return;
+ }
+
+ if (Loc::isLocType(T) || T->isIntegerType()) {
+ assert(IE->getNumInits() == 1);
+ const Expr *initEx = IE->getInit(0);
+ B.generateNode(IE, Pred, state->BindExpr(IE, LCtx,
+ state->getSVal(initEx, LCtx)));
+ return;
+ }
+
+ llvm_unreachable("unprocessed InitListExpr type");
+}
+
+void ExprEngine::VisitGuardedExpr(const Expr *Ex,
+ const Expr *L,
+ const Expr *R,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal X = state->getSVal(Ex, LCtx);
+ assert (X.isUndef());
+ const Expr *SE = (Expr*) cast<UndefinedVal>(X).getData();
+ assert(SE);
+ X = state->getSVal(SE, LCtx);
+
+ // Make sure that we invalidate the previous binding.
+ B.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, X, true));
+}
+
+void ExprEngine::
+VisitOffsetOfExpr(const OffsetOfExpr *OOE,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ StmtNodeBuilder B(Pred, Dst, *currentBuilderContext);
+ APSInt IV;
+ if (OOE->EvaluateAsInt(IV, getContext())) {
+ assert(IV.getBitWidth() == getContext().getTypeSize(OOE->getType()));
+ assert(OOE->getType()->isIntegerType());
+ assert(IV.isSigned() == OOE->getType()->isSignedIntegerOrEnumerationType());
+ SVal X = svalBuilder.makeIntVal(IV);
+ B.generateNode(OOE, Pred,
+ Pred->getState()->BindExpr(OOE, Pred->getLocationContext(),
+ X));
+ }
+ // FIXME: Handle the case where __builtin_offsetof is not a constant.
+}
+
+
+void ExprEngine::
+VisitUnaryExprOrTypeTraitExpr(const UnaryExprOrTypeTraitExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+
+ QualType T = Ex->getTypeOfArgument();
+
+ if (Ex->getKind() == UETT_SizeOf) {
+ if (!T->isIncompleteType() && !T->isConstantSizeType()) {
+ assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
+
+ // FIXME: Add support for VLA type arguments and VLA expressions.
+ // When that happens, we should probably refactor VLASizeChecker's code.
+ return;
+ }
+ else if (T->getAs<ObjCObjectType>()) {
+ // Some code tries to take the sizeof an ObjCObjectType, relying that
+ // the compiler has laid out its representation. Just report Unknown
+ // for these.
+ return;
+ }
+ }
+
+ APSInt Value = Ex->EvaluateKnownConstInt(getContext());
+ CharUnits amt = CharUnits::fromQuantity(Value.getZExtValue());
+
+ ProgramStateRef state = Pred->getState();
+ state = state->BindExpr(Ex, Pred->getLocationContext(),
+ svalBuilder.makeIntVal(amt.getQuantity(),
+ Ex->getType()));
+ Bldr.generateNode(Ex, Pred, state);
+}
+
+void ExprEngine::VisitUnaryOperator(const UnaryOperator* U,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ switch (U->getOpcode()) {
+ default: {
+ Bldr.takeNodes(Pred);
+ ExplodedNodeSet Tmp;
+ VisitIncrementDecrementOperator(U, Pred, Tmp);
+ Bldr.addNodes(Tmp);
+ }
+ break;
+ case UO_Real: {
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ break;
+ }
+
+ // For all other types, UO_Real is an identity operation.
+ assert (U->getType() == Ex->getType());
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx,
+ state->getSVal(Ex, LCtx)));
+ break;
+ }
+
+ case UO_Imag: {
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+ // FIXME: We don't have complex SValues yet.
+ if (Ex->getType()->isAnyComplexType()) {
+ // Just report "Unknown."
+ break;
+ }
+ // For all other types, UO_Imag returns 0.
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal X = svalBuilder.makeZeroVal(Ex->getType());
+ Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx, X));
+ break;
+ }
+
+ case UO_Plus:
+ assert(!U->isLValue());
+ // FALL-THROUGH.
+ case UO_Deref:
+ case UO_AddrOf:
+ case UO_Extension: {
+ // FIXME: We can probably just have some magic in Environment::getSVal()
+ // that propagates values, instead of creating a new node here.
+ //
+ // Unary "+" is a no-op, similar to a parentheses. We still have places
+ // where it may be a block-level expression, so we need to
+ // generate an extra node that just propagates the value of the
+ // subexpression.
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx,
+ state->getSVal(Ex, LCtx)));
+ break;
+ }
+
+ case UO_LNot:
+ case UO_Minus:
+ case UO_Not: {
+ assert (!U->isLValue());
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ // Get the value of the subexpression.
+ SVal V = state->getSVal(Ex, LCtx);
+
+ if (V.isUnknownOrUndef()) {
+ Bldr.generateNode(U, Pred, state->BindExpr(U, LCtx, V));
+ break;
+ }
+
+ switch (U->getOpcode()) {
+ default:
+ llvm_unreachable("Invalid Opcode.");
+ case UO_Not:
+ // FIXME: Do we need to handle promotions?
+ state = state->BindExpr(U, LCtx, evalComplement(cast<NonLoc>(V)));
+ break;
+ case UO_Minus:
+ // FIXME: Do we need to handle promotions?
+ state = state->BindExpr(U, LCtx, evalMinus(cast<NonLoc>(V)));
+ break;
+ case UO_LNot:
+ // C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
+ //
+ // Note: technically we do "E == 0", but this is the same in the
+ // transfer functions as "0 == E".
+ SVal Result;
+ if (isa<Loc>(V)) {
+ Loc X = svalBuilder.makeNull();
+ Result = evalBinOp(state, BO_EQ, cast<Loc>(V), X,
+ U->getType());
+ }
+ else {
+ nonloc::ConcreteInt X(getBasicVals().getValue(0, Ex->getType()));
+ Result = evalBinOp(state, BO_EQ, cast<NonLoc>(V), X,
+ U->getType());
+ }
+
+ state = state->BindExpr(U, LCtx, Result);
+ break;
+ }
+ Bldr.generateNode(U, Pred, state);
+ break;
+ }
+ }
+
+}
+
+void ExprEngine::VisitIncrementDecrementOperator(const UnaryOperator* U,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Handle ++ and -- (both pre- and post-increment).
+ assert (U->isIncrementDecrementOp());
+ const Expr *Ex = U->getSubExpr()->IgnoreParens();
+
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ProgramStateRef state = Pred->getState();
+ SVal loc = state->getSVal(Ex, LCtx);
+
+ // Perform a load.
+ ExplodedNodeSet Tmp;
+ evalLoad(Tmp, U, Ex, Pred, state, loc);
+
+ ExplodedNodeSet Dst2;
+ StmtNodeBuilder Bldr(Tmp, Dst2, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end();I!=E;++I) {
+
+ state = (*I)->getState();
+ assert(LCtx == (*I)->getLocationContext());
+ SVal V2_untested = state->getSVal(Ex, LCtx);
+
+ // Propagate unknown and undefined values.
+ if (V2_untested.isUnknownOrUndef()) {
+ Bldr.generateNode(U, *I, state->BindExpr(U, LCtx, V2_untested));
+ continue;
+ }
+ DefinedSVal V2 = cast<DefinedSVal>(V2_untested);
+
+ // Handle all other values.
+ BinaryOperator::Opcode Op = U->isIncrementOp() ? BO_Add : BO_Sub;
+
+ // If the UnaryOperator has non-location type, use its type to create the
+ // constant value. If the UnaryOperator has location type, create the
+ // constant with int type and pointer width.
+ SVal RHS;
+
+ if (U->getType()->isAnyPointerType())
+ RHS = svalBuilder.makeArrayIndex(1);
+ else
+ RHS = svalBuilder.makeIntVal(1, U->getType());
+
+ SVal Result = evalBinOp(state, Op, V2, RHS, U->getType());
+
+ // Conjure a new symbol if necessary to recover precision.
+ if (Result.isUnknown()){
+ DefinedOrUnknownSVal SymVal =
+ svalBuilder.getConjuredSymbolVal(NULL, Ex, LCtx,
+ currentBuilderContext->getCurrentBlockCount());
+ Result = SymVal;
+
+ // If the value is a location, ++/-- should always preserve
+ // non-nullness. Check if the original value was non-null, and if so
+ // propagate that constraint.
+ if (Loc::isLocType(U->getType())) {
+ DefinedOrUnknownSVal Constraint =
+ svalBuilder.evalEQ(state, V2,svalBuilder.makeZeroVal(U->getType()));
+
+ if (!state->assume(Constraint, true)) {
+ // It isn't feasible for the original value to be null.
+ // Propagate this constraint.
+ Constraint = svalBuilder.evalEQ(state, SymVal,
+ svalBuilder.makeZeroVal(U->getType()));
+
+
+ state = state->assume(Constraint, false);
+ assert(state);
+ }
+ }
+ }
+
+ // Since the lvalue-to-rvalue conversion is explicit in the AST,
+ // we bind an l-value if the operator is prefix and an lvalue (in C++).
+ if (U->isLValue())
+ state = state->BindExpr(U, LCtx, loc);
+ else
+ state = state->BindExpr(U, LCtx, U->isPostfix() ? V2 : Result);
+
+ // Perform the store.
+ Bldr.takeNodes(*I);
+ ExplodedNodeSet Dst3;
+ evalStore(Dst3, U, U, *I, state, loc, Result);
+ Bldr.addNodes(Dst3);
+ }
+ Dst.insert(Dst2);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
new file mode 100644
index 0000000..a14a491
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -0,0 +1,300 @@
+//===- ExprEngineCXX.cpp - ExprEngine support for C++ -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the C++ expression evaluation engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/StmtCXX.h"
+
+using namespace clang;
+using namespace ento;
+
+const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXRecordDecl *D,
+ const StackFrameContext *SFC) {
+ const Type *T = D->getTypeForDecl();
+ QualType PT = getContext().getPointerType(QualType(T, 0));
+ return svalBuilder.getRegionManager().getCXXThisRegion(PT, SFC);
+}
+
+const CXXThisRegion *ExprEngine::getCXXThisRegion(const CXXMethodDecl *decl,
+ const StackFrameContext *frameCtx) {
+ return svalBuilder.getRegionManager().
+ getCXXThisRegion(decl->getThisType(getContext()), frameCtx);
+}
+
+void ExprEngine::CreateCXXTemporaryObject(const MaterializeTemporaryExpr *ME,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ const Expr *tempExpr = ME->GetTemporaryExpr()->IgnoreParens();
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ // Bind the temporary object to the value of the expression. Then bind
+ // the expression to the location of the object.
+ SVal V = state->getSVal(tempExpr, Pred->getLocationContext());
+
+ const MemRegion *R =
+ svalBuilder.getRegionManager().getCXXTempObjectRegion(ME, LCtx);
+
+ state = state->bindLoc(loc::MemRegionVal(R), V);
+ Bldr.generateNode(ME, Pred, state->BindExpr(ME, LCtx, loc::MemRegionVal(R)));
+}
+
+void ExprEngine::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *expr,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ VisitCXXConstructExpr(expr, 0, Pred, Dst);
+}
+
+void ExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E,
+ const MemRegion *Dest,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &destNodes) {
+
+#if 0
+ const CXXConstructorDecl *CD = E->getConstructor();
+ assert(CD);
+#endif
+
+#if 0
+ if (!(CD->doesThisDeclarationHaveABody() && AMgr.shouldInlineCall()))
+ // FIXME: invalidate the object.
+ return;
+#endif
+
+#if 0
+ // Is the constructor elidable?
+ if (E->isElidable()) {
+ destNodes.Add(Pred);
+ return;
+ }
+#endif
+
+ // Perform the previsit of the constructor.
+ ExplodedNodeSet SrcNodes;
+ SrcNodes.Add(Pred);
+ ExplodedNodeSet TmpNodes;
+ getCheckerManager().runCheckersForPreStmt(TmpNodes, SrcNodes, E, *this);
+
+ // Evaluate the constructor. Currently we don't now allow checker-specific
+ // implementations of specific constructors (as we do with ordinary
+ // function calls. We can re-evaluate this in the future.
+
+#if 0
+ // Inlining currently isn't fully implemented.
+
+ if (AMgr.shouldInlineCall()) {
+ if (!Dest)
+ Dest =
+ svalBuilder.getRegionManager().getCXXTempObjectRegion(E,
+ Pred->getLocationContext());
+
+ // The callee stack frame context used to create the 'this'
+ // parameter region.
+ const StackFrameContext *SFC =
+ AMgr.getStackFrame(CD, Pred->getLocationContext(),
+ E, currentBuilderContext->getBlock(),
+ currentStmtIdx);
+
+ // Create the 'this' region.
+ const CXXThisRegion *ThisR =
+ getCXXThisRegion(E->getConstructor()->getParent(), SFC);
+
+ CallEnter Loc(E, SFC, Pred->getLocationContext());
+
+ StmtNodeBuilder Bldr(SrcNodes, TmpNodes, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator NI = SrcNodes.begin(),
+ NE = SrcNodes.end(); NI != NE; ++NI) {
+ ProgramStateRef state = (*NI)->getState();
+ // Setup 'this' region, so that the ctor is evaluated on the object pointed
+ // by 'Dest'.
+ state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest));
+ Bldr.generateNode(Loc, *NI, state);
+ }
+ }
+#endif
+
+ // Default semantics: invalidate all regions passed as arguments.
+ ExplodedNodeSet destCall;
+ {
+ StmtNodeBuilder Bldr(TmpNodes, destCall, *currentBuilderContext);
+ for (ExplodedNodeSet::iterator i = TmpNodes.begin(), e = TmpNodes.end();
+ i != e; ++i)
+ {
+ ExplodedNode *Pred = *i;
+ const LocationContext *LC = Pred->getLocationContext();
+ ProgramStateRef state = Pred->getState();
+
+ state = invalidateArguments(state, CallOrObjCMessage(E, state, LC), LC);
+ Bldr.generateNode(E, Pred, state);
+ }
+ }
+ // Do the post visit.
+ getCheckerManager().runCheckersForPostStmt(destNodes, destCall, E, *this);
+}
+
+void ExprEngine::VisitCXXDestructor(const CXXDestructorDecl *DD,
+ const MemRegion *Dest,
+ const Stmt *S,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ if (!(DD->doesThisDeclarationHaveABody() && AMgr.shouldInlineCall()))
+ return;
+
+ // Create the context for 'this' region.
+ const StackFrameContext *SFC =
+ AnalysisDeclContexts.getContext(DD)->
+ getStackFrame(Pred->getLocationContext(), S,
+ currentBuilderContext->getBlock(), currentStmtIdx);
+
+ const CXXThisRegion *ThisR = getCXXThisRegion(DD->getParent(), SFC);
+
+ CallEnter PP(S, SFC, Pred->getLocationContext());
+
+ ProgramStateRef state = Pred->getState();
+ state = state->bindLoc(loc::MemRegionVal(ThisR), loc::MemRegionVal(Dest));
+ Bldr.generateNode(PP, Pred, state);
+}
+
+void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+
+ unsigned blockCount = currentBuilderContext->getCurrentBlockCount();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ DefinedOrUnknownSVal symVal =
+ svalBuilder.getConjuredSymbolVal(NULL, CNE, LCtx, CNE->getType(), blockCount);
+ const MemRegion *NewReg = cast<loc::MemRegionVal>(symVal).getRegion();
+ QualType ObjTy = CNE->getType()->getAs<PointerType>()->getPointeeType();
+ const ElementRegion *EleReg =
+ getStoreManager().GetElementZeroRegion(NewReg, ObjTy);
+
+ if (CNE->isArray()) {
+ // FIXME: allocating an array requires simulating the constructors.
+ // For now, just return a symbolicated region.
+ ProgramStateRef state = Pred->getState();
+ state = state->BindExpr(CNE, Pred->getLocationContext(),
+ loc::MemRegionVal(EleReg));
+ Bldr.generateNode(CNE, Pred, state);
+ return;
+ }
+
+ // FIXME: Update for AST changes.
+#if 0
+ // Evaluate constructor arguments.
+ const FunctionProtoType *FnType = NULL;
+ const CXXConstructorDecl *CD = CNE->getConstructor();
+ if (CD)
+ FnType = CD->getType()->getAs<FunctionProtoType>();
+ ExplodedNodeSet argsEvaluated;
+ Bldr.takeNodes(Pred);
+ evalArguments(CNE->constructor_arg_begin(), CNE->constructor_arg_end(),
+ FnType, Pred, argsEvaluated);
+ Bldr.addNodes(argsEvaluated);
+
+ // Initialize the object region and bind the 'new' expression.
+ for (ExplodedNodeSet::iterator I = argsEvaluated.begin(),
+ E = argsEvaluated.end(); I != E; ++I) {
+
+ ProgramStateRef state = (*I)->getState();
+
+ // Accumulate list of regions that are invalidated.
+ // FIXME: Eventually we should unify the logic for constructor
+ // processing in one place.
+ SmallVector<const MemRegion*, 10> regionsToInvalidate;
+ for (CXXNewExpr::const_arg_iterator
+ ai = CNE->constructor_arg_begin(), ae = CNE->constructor_arg_end();
+ ai != ae; ++ai)
+ {
+ SVal val = state->getSVal(*ai, (*I)->getLocationContext());
+ if (const MemRegion *region = val.getAsRegion())
+ regionsToInvalidate.push_back(region);
+ }
+
+ if (ObjTy->isRecordType()) {
+ regionsToInvalidate.push_back(EleReg);
+ // Invalidate the regions.
+ // TODO: Pass the call to new information as the last argument, to limit
+ // the globals which will get invalidated.
+ state = state->invalidateRegions(regionsToInvalidate,
+ CNE, blockCount, 0, 0);
+
+ } else {
+ // Invalidate the regions.
+ // TODO: Pass the call to new information as the last argument, to limit
+ // the globals which will get invalidated.
+ state = state->invalidateRegions(regionsToInvalidate,
+ CNE, blockCount, 0, 0);
+
+ if (CNE->hasInitializer()) {
+ SVal V = state->getSVal(*CNE->constructor_arg_begin(),
+ (*I)->getLocationContext());
+ state = state->bindLoc(loc::MemRegionVal(EleReg), V);
+ } else {
+ // Explicitly set to undefined, because currently we retrieve symbolic
+ // value from symbolic region.
+ state = state->bindLoc(loc::MemRegionVal(EleReg), UndefinedVal());
+ }
+ }
+ state = state->BindExpr(CNE, (*I)->getLocationContext(),
+ loc::MemRegionVal(EleReg));
+ Bldr.generateNode(CNE, *I, state);
+ }
+#endif
+}
+
+void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
+ ExplodedNode *Pred, ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ ProgramStateRef state = Pred->getState();
+ Bldr.generateNode(CDE, Pred, state);
+}
+
+void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ const VarDecl *VD = CS->getExceptionDecl();
+ if (!VD) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = svalBuilder.getConjuredSymbolVal(CS, LCtx, VD->getType(),
+ currentBuilderContext->getCurrentBlockCount());
+ ProgramStateRef state = Pred->getState();
+ state = state->bindLoc(state->getLValue(VD, LCtx), V);
+
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+ Bldr.generateNode(CS, Pred, state);
+}
+
+void ExprEngine::VisitCXXThisExpr(const CXXThisExpr *TE, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+
+ // Get the this object region from StoreManager.
+ const LocationContext *LCtx = Pred->getLocationContext();
+ const MemRegion *R =
+ svalBuilder.getRegionManager().getCXXThisRegion(
+ getContext().getCanonicalType(TE->getType()),
+ LCtx);
+
+ ProgramStateRef state = Pred->getState();
+ SVal V = state->getSVal(loc::MemRegionVal(R));
+ Bldr.generateNode(TE, Pred, state->BindExpr(TE, LCtx, V));
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
new file mode 100644
index 0000000..b99bd54
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -0,0 +1,482 @@
+//=-- ExprEngineCallAndReturn.cpp - Support for call/return -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ExprEngine's support for calls and returns.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/Support/SaveAndRestore.h"
+
+using namespace clang;
+using namespace ento;
+
+void ExprEngine::processCallEnter(CallEnter CE, ExplodedNode *Pred) {
+ // Get the entry block in the CFG of the callee.
+ const StackFrameContext *calleeCtx = CE.getCalleeContext();
+ const CFG *CalleeCFG = calleeCtx->getCFG();
+ const CFGBlock *Entry = &(CalleeCFG->getEntry());
+
+ // Validate the CFG.
+ assert(Entry->empty());
+ assert(Entry->succ_size() == 1);
+
+ // Get the solitary sucessor.
+ const CFGBlock *Succ = *(Entry->succ_begin());
+
+ // Construct an edge representing the starting location in the callee.
+ BlockEdge Loc(Entry, Succ, calleeCtx);
+
+ // Construct a new state which contains the mapping from actual to
+ // formal arguments.
+ const LocationContext *callerCtx = Pred->getLocationContext();
+ ProgramStateRef state = Pred->getState()->enterStackFrame(callerCtx,
+ calleeCtx);
+
+ // Construct a new node and add it to the worklist.
+ bool isNew;
+ ExplodedNode *Node = G.getNode(Loc, state, false, &isNew);
+ Node->addPredecessor(Pred, G);
+ if (isNew)
+ Engine.getWorkList()->enqueue(Node);
+}
+
+static const ReturnStmt *getReturnStmt(const ExplodedNode *Node) {
+ while (Node) {
+ const ProgramPoint &PP = Node->getLocation();
+ // Skip any BlockEdges.
+ if (isa<BlockEdge>(PP) || isa<CallExit>(PP)) {
+ assert(Node->pred_size() == 1);
+ Node = *Node->pred_begin();
+ continue;
+ }
+ if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP)) {
+ const Stmt *S = SP->getStmt();
+ return dyn_cast<ReturnStmt>(S);
+ }
+ break;
+ }
+ return 0;
+}
+
+void ExprEngine::processCallExit(ExplodedNode *Pred) {
+ ProgramStateRef state = Pred->getState();
+ const StackFrameContext *calleeCtx =
+ Pred->getLocationContext()->getCurrentStackFrame();
+ const LocationContext *callerCtx = calleeCtx->getParent();
+ const Stmt *CE = calleeCtx->getCallSite();
+
+ // If the callee returns an expression, bind its value to CallExpr.
+ if (const ReturnStmt *RS = getReturnStmt(Pred)) {
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal V = state->getSVal(RS, LCtx);
+ state = state->BindExpr(CE, callerCtx, V);
+ }
+
+ // Bind the constructed object value to CXXConstructExpr.
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
+ const CXXThisRegion *ThisR =
+ getCXXThisRegion(CCE->getConstructor()->getParent(), calleeCtx);
+
+ SVal ThisV = state->getSVal(ThisR);
+ // Always bind the region to the CXXConstructExpr.
+ state = state->BindExpr(CCE, Pred->getLocationContext(), ThisV);
+ }
+
+ static SimpleProgramPointTag returnTag("ExprEngine : Call Return");
+ PostStmt Loc(CE, callerCtx, &returnTag);
+ bool isNew;
+ ExplodedNode *N = G.getNode(Loc, state, false, &isNew);
+ N->addPredecessor(Pred, G);
+ if (!isNew)
+ return;
+
+ // Perform the post-condition check of the CallExpr.
+ ExplodedNodeSet Dst;
+ NodeBuilderContext Ctx(Engine, calleeCtx->getCallSiteBlock(), N);
+ SaveAndRestore<const NodeBuilderContext*> NBCSave(currentBuilderContext,
+ &Ctx);
+ SaveAndRestore<unsigned> CBISave(currentStmtIdx, calleeCtx->getIndex());
+
+ getCheckerManager().runCheckersForPostStmt(Dst, N, CE, *this,
+ /* wasInlined */ true);
+
+ // Enqueue the next element in the block.
+ for (ExplodedNodeSet::iterator I = Dst.begin(), E = Dst.end(); I != E; ++I) {
+ Engine.getWorkList()->enqueue(*I,
+ calleeCtx->getCallSiteBlock(),
+ calleeCtx->getIndex()+1);
+ }
+}
+
+static unsigned getNumberStackFrames(const LocationContext *LCtx) {
+ unsigned count = 0;
+ while (LCtx) {
+ if (isa<StackFrameContext>(LCtx))
+ ++count;
+ LCtx = LCtx->getParent();
+ }
+ return count;
+}
+
+// Determine if we should inline the call.
+bool ExprEngine::shouldInlineDecl(const FunctionDecl *FD, ExplodedNode *Pred) {
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
+ const CFG *CalleeCFG = CalleeADC->getCFG();
+
+ if (getNumberStackFrames(Pred->getLocationContext())
+ == AMgr.InlineMaxStackDepth)
+ return false;
+
+ if (Engine.FunctionSummaries->hasReachedMaxBlockCount(FD))
+ return false;
+
+ if (CalleeCFG->getNumBlockIDs() > AMgr.InlineMaxFunctionSize)
+ return false;
+
+ return true;
+}
+
+// For now, skip inlining variadic functions.
+// We also don't inline blocks.
+static bool shouldInlineCallExpr(const CallExpr *CE, ExprEngine *E) {
+ if (!E->getAnalysisManager().shouldInlineCall())
+ return false;
+ QualType callee = CE->getCallee()->getType();
+ const FunctionProtoType *FT = 0;
+ if (const PointerType *PT = callee->getAs<PointerType>())
+ FT = dyn_cast<FunctionProtoType>(PT->getPointeeType());
+ else if (const BlockPointerType *BT = callee->getAs<BlockPointerType>()) {
+ // FIXME: inline blocks.
+ // FT = dyn_cast<FunctionProtoType>(BT->getPointeeType());
+ (void) BT;
+ return false;
+ }
+ // If we have no prototype, assume the function is okay.
+ if (!FT)
+ return true;
+
+ // Skip inlining of variadic functions.
+ return !FT->isVariadic();
+}
+
+bool ExprEngine::InlineCall(ExplodedNodeSet &Dst,
+ const CallExpr *CE,
+ ExplodedNode *Pred) {
+ if (!shouldInlineCallExpr(CE, this))
+ return false;
+
+ ProgramStateRef state = Pred->getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionDecl *FD =
+ state->getSVal(Callee, Pred->getLocationContext()).getAsFunctionDecl();
+ if (!FD || !FD->hasBody(FD))
+ return false;
+
+ switch (CE->getStmtClass()) {
+ default:
+ // FIXME: Handle C++.
+ break;
+ case Stmt::CallExprClass: {
+ if (!shouldInlineDecl(FD, Pred))
+ return false;
+
+ // Construct a new stack frame for the callee.
+ AnalysisDeclContext *CalleeADC = AMgr.getAnalysisDeclContext(FD);
+ const StackFrameContext *CallerSFC =
+ Pred->getLocationContext()->getCurrentStackFrame();
+ const StackFrameContext *CalleeSFC =
+ CalleeADC->getStackFrame(CallerSFC, CE,
+ currentBuilderContext->getBlock(),
+ currentStmtIdx);
+
+ CallEnter Loc(CE, CalleeSFC, Pred->getLocationContext());
+ bool isNew;
+ if (ExplodedNode *N = G.getNode(Loc, state, false, &isNew)) {
+ N->addPredecessor(Pred, G);
+ if (isNew)
+ Engine.getWorkList()->enqueue(N);
+ }
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool isPointerToConst(const ParmVarDecl *ParamDecl) {
+ QualType PointeeTy = ParamDecl->getOriginalType()->getPointeeType();
+ if (PointeeTy != QualType() && PointeeTy.isConstQualified() &&
+ !PointeeTy->isAnyPointerType() && !PointeeTy->isReferenceType()) {
+ return true;
+ }
+ return false;
+}
+
+// Try to retrieve the function declaration and find the function parameter
+// types which are pointers/references to a non-pointer const.
+// We do not invalidate the corresponding argument regions.
+static void findPtrToConstParams(llvm::SmallSet<unsigned, 1> &PreserveArgs,
+ const CallOrObjCMessage &Call) {
+ const Decl *CallDecl = Call.getDecl();
+ if (!CallDecl)
+ return;
+
+ if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(CallDecl)) {
+ const IdentifierInfo *II = FDecl->getIdentifier();
+
+ // List the cases, where the region should be invalidated even if the
+ // argument is const.
+ if (II) {
+ StringRef FName = II->getName();
+ // - 'int pthread_setspecific(ptheread_key k, const void *)' stores a
+ // value into thread local storage. The value can later be retrieved with
+ // 'void *ptheread_getspecific(pthread_key)'. So even thought the
+ // parameter is 'const void *', the region escapes through the call.
+ // - funopen - sets a buffer for future IO calls.
+ // - ObjC functions that end with "NoCopy" can free memory, of the passed
+ // in buffer.
+ // - Many CF containers allow objects to escape through custom
+ // allocators/deallocators upon container construction.
+ // - NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // be deallocated by NSMapRemove.
+ if (FName == "pthread_setspecific" ||
+ FName == "funopen" ||
+ FName.endswith("NoCopy") ||
+ (FName.startswith("NS") &&
+ (FName.find("Insert") != StringRef::npos)) ||
+ Call.isCFCGAllowingEscape(FName))
+ return;
+ }
+
+ for (unsigned Idx = 0, E = Call.getNumArgs(); Idx != E; ++Idx) {
+ if (FDecl && Idx < FDecl->getNumParams()) {
+ if (isPointerToConst(FDecl->getParamDecl(Idx)))
+ PreserveArgs.insert(Idx);
+ }
+ }
+ return;
+ }
+
+ if (const ObjCMethodDecl *MDecl = dyn_cast<ObjCMethodDecl>(CallDecl)) {
+ assert(MDecl->param_size() <= Call.getNumArgs());
+ unsigned Idx = 0;
+ for (clang::ObjCMethodDecl::param_const_iterator
+ I = MDecl->param_begin(), E = MDecl->param_end(); I != E; ++I, ++Idx) {
+ if (isPointerToConst(*I))
+ PreserveArgs.insert(Idx);
+ }
+ return;
+ }
+}
+
+ProgramStateRef
+ExprEngine::invalidateArguments(ProgramStateRef State,
+ const CallOrObjCMessage &Call,
+ const LocationContext *LC) {
+ SmallVector<const MemRegion *, 8> RegionsToInvalidate;
+
+ if (Call.isObjCMessage()) {
+ // Invalidate all instance variables of the receiver of an ObjC message.
+ // FIXME: We should be able to do better with inter-procedural analysis.
+ if (const MemRegion *MR = Call.getInstanceMessageReceiver(LC).getAsRegion())
+ RegionsToInvalidate.push_back(MR);
+
+ } else if (Call.isCXXCall()) {
+ // Invalidate all instance variables for the callee of a C++ method call.
+ // FIXME: We should be able to do better with inter-procedural analysis.
+ // FIXME: We can probably do better for const versus non-const methods.
+ if (const MemRegion *Callee = Call.getCXXCallee().getAsRegion())
+ RegionsToInvalidate.push_back(Callee);
+
+ } else if (Call.isFunctionCall()) {
+ // Block calls invalidate all captured-by-reference values.
+ SVal CalleeVal = Call.getFunctionCallee();
+ if (const MemRegion *Callee = CalleeVal.getAsRegion()) {
+ if (isa<BlockDataRegion>(Callee))
+ RegionsToInvalidate.push_back(Callee);
+ }
+ }
+
+ // Indexes of arguments whose values will be preserved by the call.
+ llvm::SmallSet<unsigned, 1> PreserveArgs;
+ findPtrToConstParams(PreserveArgs, Call);
+
+ for (unsigned idx = 0, e = Call.getNumArgs(); idx != e; ++idx) {
+ if (PreserveArgs.count(idx))
+ continue;
+
+ SVal V = Call.getArgSVal(idx);
+
+ // If we are passing a location wrapped as an integer, unwrap it and
+ // invalidate the values referred by the location.
+ if (nonloc::LocAsInteger *Wrapped = dyn_cast<nonloc::LocAsInteger>(&V))
+ V = Wrapped->getLoc();
+ else if (!isa<Loc>(V))
+ continue;
+
+ if (const MemRegion *R = V.getAsRegion()) {
+ // Invalidate the value of the variable passed by reference.
+
+ // Are we dealing with an ElementRegion? If the element type is
+ // a basic integer type (e.g., char, int) and the underlying region
+ // is a variable region then strip off the ElementRegion.
+ // FIXME: We really need to think about this for the general case
+ // as sometimes we are reasoning about arrays and other times
+ // about (char*), etc., is just a form of passing raw bytes.
+ // e.g., void *p = alloca(); foo((char*)p);
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // Checking for 'integral type' is probably too promiscuous, but
+ // we'll leave it in for now until we have a systematic way of
+ // handling all of these cases. Eventually we need to come up
+ // with an interface to StoreManager so that this logic can be
+ // appropriately delegated to the respective StoreManagers while
+ // still allowing us to do checker-specific logic (e.g.,
+ // invalidating reference counts), probably via callbacks.
+ if (ER->getElementType()->isIntegralOrEnumerationType()) {
+ const MemRegion *superReg = ER->getSuperRegion();
+ if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
+ isa<ObjCIvarRegion>(superReg))
+ R = cast<TypedRegion>(superReg);
+ }
+ // FIXME: What about layers of ElementRegions?
+ }
+
+ // Mark this region for invalidation. We batch invalidate regions
+ // below for efficiency.
+ RegionsToInvalidate.push_back(R);
+ } else {
+ // Nuke all other arguments passed by reference.
+ // FIXME: is this necessary or correct? This handles the non-Region
+ // cases. Is it ever valid to store to these?
+ State = State->unbindLoc(cast<Loc>(V));
+ }
+ }
+
+ // Invalidate designated regions using the batch invalidation API.
+
+ // FIXME: We can have collisions on the conjured symbol if the
+ // expression *I also creates conjured symbols. We probably want
+ // to identify conjured symbols by an expression pair: the enclosing
+ // expression (the context) and the expression itself. This should
+ // disambiguate conjured symbols.
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ StoreManager::InvalidatedSymbols IS;
+
+ // NOTE: Even if RegionsToInvalidate is empty, we may still invalidate
+ // global variables.
+ return State->invalidateRegions(RegionsToInvalidate,
+ Call.getOriginExpr(), Count, LC,
+ &IS, &Call);
+
+}
+
+static ProgramStateRef getReplayWithoutInliningState(ExplodedNode *&N,
+ const CallExpr *CE) {
+ void *ReplayState = N->getState()->get<ReplayWithoutInlining>();
+ if (!ReplayState)
+ return 0;
+ const CallExpr *ReplayCE = reinterpret_cast<const CallExpr*>(ReplayState);
+ if (CE == ReplayCE) {
+ return N->getState()->remove<ReplayWithoutInlining>();
+ }
+ return 0;
+}
+
+void ExprEngine::VisitCallExpr(const CallExpr *CE, ExplodedNode *Pred,
+ ExplodedNodeSet &dst) {
+ // Perform the previsit of the CallExpr.
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, CE, *this);
+
+ // Now evaluate the call itself.
+ class DefaultEval : public GraphExpander {
+ ExprEngine &Eng;
+ const CallExpr *CE;
+ public:
+
+ DefaultEval(ExprEngine &eng, const CallExpr *ce)
+ : Eng(eng), CE(ce) {}
+ virtual void expandGraph(ExplodedNodeSet &Dst, ExplodedNode *Pred) {
+
+ ProgramStateRef state = getReplayWithoutInliningState(Pred, CE);
+
+ // First, try to inline the call.
+ if (state == 0 && Eng.InlineCall(Dst, CE, Pred))
+ return;
+
+ // First handle the return value.
+ StmtNodeBuilder Bldr(Pred, Dst, *Eng.currentBuilderContext);
+
+ // Get the callee.
+ const Expr *Callee = CE->getCallee()->IgnoreParens();
+ if (state == 0)
+ state = Pred->getState();
+ SVal L = state->getSVal(Callee, Pred->getLocationContext());
+
+ // Figure out the result type. We do this dance to handle references.
+ QualType ResultTy;
+ if (const FunctionDecl *FD = L.getAsFunctionDecl())
+ ResultTy = FD->getResultType();
+ else
+ ResultTy = CE->getType();
+
+ if (CE->isLValue())
+ ResultTy = Eng.getContext().getPointerType(ResultTy);
+
+ // Conjure a symbol value to use as the result.
+ SValBuilder &SVB = Eng.getSValBuilder();
+ unsigned Count = Eng.currentBuilderContext->getCurrentBlockCount();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal RetVal = SVB.getConjuredSymbolVal(0, CE, LCtx, ResultTy, Count);
+
+ // Generate a new state with the return value set.
+ state = state->BindExpr(CE, LCtx, RetVal);
+
+ // Invalidate the arguments.
+ state = Eng.invalidateArguments(state, CallOrObjCMessage(CE, state, LCtx),
+ LCtx);
+
+ // And make the result node.
+ Bldr.generateNode(CE, Pred, state);
+ }
+ };
+
+ // Finally, evaluate the function call. We try each of the checkers
+ // to see if the can evaluate the function call.
+ ExplodedNodeSet dstCallEvaluated;
+ DefaultEval defEval(*this, CE);
+ getCheckerManager().runCheckersForEvalCall(dstCallEvaluated,
+ dstPreVisit,
+ CE, *this, &defEval);
+
+ // Finally, perform the post-condition check of the CallExpr and store
+ // the created nodes in 'Dst'.
+ getCheckerManager().runCheckersForPostStmt(dst, dstCallEvaluated, CE,
+ *this);
+}
+
+void ExprEngine::VisitReturnStmt(const ReturnStmt *RS, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ ExplodedNodeSet dstPreVisit;
+ getCheckerManager().runCheckersForPreStmt(dstPreVisit, Pred, RS, *this);
+
+ StmtNodeBuilder B(dstPreVisit, Dst, *currentBuilderContext);
+
+ if (RS->getRetValue()) {
+ for (ExplodedNodeSet::iterator it = dstPreVisit.begin(),
+ ei = dstPreVisit.end(); it != ei; ++it) {
+ B.generateNode(RS, *it, (*it)->getState());
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
new file mode 100644
index 0000000..c8ad70a
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ExprEngineObjC.cpp
@@ -0,0 +1,273 @@
+//=-- ExprEngineObjC.cpp - ExprEngine support for Objective-C ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ExprEngine's support for Objective-C expressions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/StmtObjC.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+
+using namespace clang;
+using namespace ento;
+
+void ExprEngine::VisitLvalObjCIvarRefExpr(const ObjCIvarRefExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+ SVal baseVal = state->getSVal(Ex->getBase(), LCtx);
+ SVal location = state->getLValue(Ex->getDecl(), baseVal);
+
+ ExplodedNodeSet dstIvar;
+ StmtNodeBuilder Bldr(Pred, dstIvar, *currentBuilderContext);
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, location));
+
+ // Perform the post-condition check of the ObjCIvarRefExpr and store
+ // the created nodes in 'Dst'.
+ getCheckerManager().runCheckersForPostStmt(Dst, dstIvar, Ex, *this);
+}
+
+void ExprEngine::VisitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt *S,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ getCheckerManager().runCheckersForPreStmt(Dst, Pred, S, *this);
+}
+
+void ExprEngine::VisitObjCForCollectionStmt(const ObjCForCollectionStmt *S,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ // ObjCForCollectionStmts are processed in two places. This method
+ // handles the case where an ObjCForCollectionStmt* occurs as one of the
+ // statements within a basic block. This transfer function does two things:
+ //
+ // (1) binds the next container value to 'element'. This creates a new
+ // node in the ExplodedGraph.
+ //
+ // (2) binds the value 0/1 to the ObjCForCollectionStmt* itself, indicating
+ // whether or not the container has any more elements. This value
+ // will be tested in ProcessBranch. We need to explicitly bind
+ // this value because a container can contain nil elements.
+ //
+ // FIXME: Eventually this logic should actually do dispatches to
+ // 'countByEnumeratingWithState:objects:count:' (NSFastEnumeration).
+ // This will require simulating a temporary NSFastEnumerationState, either
+ // through an SVal or through the use of MemRegions. This value can
+ // be affixed to the ObjCForCollectionStmt* instead of 0/1; when the loop
+ // terminates we reclaim the temporary (it goes out of scope) and we
+ // we can test if the SVal is 0 or if the MemRegion is null (depending
+ // on what approach we take).
+ //
+ // For now: simulate (1) by assigning either a symbol or nil if the
+ // container is empty. Thus this transfer function will by default
+ // result in state splitting.
+
+ const Stmt *elem = S->getElement();
+ ProgramStateRef state = Pred->getState();
+ SVal elementV;
+ StmtNodeBuilder Bldr(Pred, Dst, *currentBuilderContext);
+
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(elem)) {
+ const VarDecl *elemD = cast<VarDecl>(DS->getSingleDecl());
+ assert(elemD->getInit() == 0);
+ elementV = state->getLValue(elemD, Pred->getLocationContext());
+ }
+ else {
+ elementV = state->getSVal(elem, Pred->getLocationContext());
+ }
+
+ ExplodedNodeSet dstLocation;
+ Bldr.takeNodes(Pred);
+ evalLocation(dstLocation, S, elem, Pred, state, elementV, NULL, false);
+ Bldr.addNodes(dstLocation);
+
+ for (ExplodedNodeSet::iterator NI = dstLocation.begin(),
+ NE = dstLocation.end(); NI!=NE; ++NI) {
+ Pred = *NI;
+ ProgramStateRef state = Pred->getState();
+ const LocationContext *LCtx = Pred->getLocationContext();
+
+ // Handle the case where the container still has elements.
+ SVal TrueV = svalBuilder.makeTruthVal(1);
+ ProgramStateRef hasElems = state->BindExpr(S, LCtx, TrueV);
+
+ // Handle the case where the container has no elements.
+ SVal FalseV = svalBuilder.makeTruthVal(0);
+ ProgramStateRef noElems = state->BindExpr(S, LCtx, FalseV);
+
+ if (loc::MemRegionVal *MV = dyn_cast<loc::MemRegionVal>(&elementV))
+ if (const TypedValueRegion *R =
+ dyn_cast<TypedValueRegion>(MV->getRegion())) {
+ // FIXME: The proper thing to do is to really iterate over the
+ // container. We will do this with dispatch logic to the store.
+ // For now, just 'conjure' up a symbolic value.
+ QualType T = R->getValueType();
+ assert(Loc::isLocType(T));
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ SymbolRef Sym = SymMgr.getConjuredSymbol(elem, LCtx, T, Count);
+ SVal V = svalBuilder.makeLoc(Sym);
+ hasElems = hasElems->bindLoc(elementV, V);
+
+ // Bind the location to 'nil' on the false branch.
+ SVal nilV = svalBuilder.makeIntVal(0, T);
+ noElems = noElems->bindLoc(elementV, nilV);
+ }
+
+ // Create the new nodes.
+ Bldr.generateNode(S, Pred, hasElems);
+ Bldr.generateNode(S, Pred, noElems);
+ }
+}
+
+void ExprEngine::VisitObjCMessage(const ObjCMessage &msg,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+
+ // Handle the previsits checks.
+ ExplodedNodeSet dstPrevisit;
+ getCheckerManager().runCheckersForPreObjCMessage(dstPrevisit, Pred,
+ msg, *this);
+
+ // Proceed with evaluate the message expression.
+ ExplodedNodeSet dstEval;
+ StmtNodeBuilder Bldr(dstPrevisit, dstEval, *currentBuilderContext);
+
+ for (ExplodedNodeSet::iterator DI = dstPrevisit.begin(),
+ DE = dstPrevisit.end(); DI != DE; ++DI) {
+
+ ExplodedNode *Pred = *DI;
+ bool RaisesException = false;
+
+ if (const Expr *Receiver = msg.getInstanceReceiver()) {
+ ProgramStateRef state = Pred->getState();
+ SVal recVal = state->getSVal(Receiver, Pred->getLocationContext());
+ if (!recVal.isUndef()) {
+ // Bifurcate the state into nil and non-nil ones.
+ DefinedOrUnknownSVal receiverVal = cast<DefinedOrUnknownSVal>(recVal);
+
+ ProgramStateRef notNilState, nilState;
+ llvm::tie(notNilState, nilState) = state->assume(receiverVal);
+
+ // There are three cases: can be nil or non-nil, must be nil, must be
+ // non-nil. We ignore must be nil, and merge the rest two into non-nil.
+ if (nilState && !notNilState) {
+ continue;
+ }
+
+ // Check if the "raise" message was sent.
+ assert(notNilState);
+ if (msg.getSelector() == RaiseSel)
+ RaisesException = true;
+
+ // If we raise an exception, for now treat it as a sink.
+ // Eventually we will want to handle exceptions properly.
+ // Dispatch to plug-in transfer function.
+ evalObjCMessage(Bldr, msg, Pred, notNilState, RaisesException);
+ }
+ }
+ else if (const ObjCInterfaceDecl *Iface = msg.getReceiverInterface()) {
+ IdentifierInfo* ClsName = Iface->getIdentifier();
+ Selector S = msg.getSelector();
+
+ // Check for special instance methods.
+ if (!NSExceptionII) {
+ ASTContext &Ctx = getContext();
+ NSExceptionII = &Ctx.Idents.get("NSException");
+ }
+
+ if (ClsName == NSExceptionII) {
+ enum { NUM_RAISE_SELECTORS = 2 };
+
+ // Lazily create a cache of the selectors.
+ if (!NSExceptionInstanceRaiseSelectors) {
+ ASTContext &Ctx = getContext();
+ NSExceptionInstanceRaiseSelectors =
+ new Selector[NUM_RAISE_SELECTORS];
+ SmallVector<IdentifierInfo*, NUM_RAISE_SELECTORS> II;
+ unsigned idx = 0;
+
+ // raise:format:
+ II.push_back(&Ctx.Idents.get("raise"));
+ II.push_back(&Ctx.Idents.get("format"));
+ NSExceptionInstanceRaiseSelectors[idx++] =
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+
+ // raise:format::arguments:
+ II.push_back(&Ctx.Idents.get("arguments"));
+ NSExceptionInstanceRaiseSelectors[idx++] =
+ Ctx.Selectors.getSelector(II.size(), &II[0]);
+ }
+
+ for (unsigned i = 0; i < NUM_RAISE_SELECTORS; ++i)
+ if (S == NSExceptionInstanceRaiseSelectors[i]) {
+ RaisesException = true;
+ break;
+ }
+ }
+
+ // If we raise an exception, for now treat it as a sink.
+ // Eventually we will want to handle exceptions properly.
+ // Dispatch to plug-in transfer function.
+ evalObjCMessage(Bldr, msg, Pred, Pred->getState(), RaisesException);
+ }
+ }
+
+ // Finally, perform the post-condition check of the ObjCMessageExpr and store
+ // the created nodes in 'Dst'.
+ getCheckerManager().runCheckersForPostObjCMessage(Dst, dstEval, msg, *this);
+}
+
+void ExprEngine::evalObjCMessage(StmtNodeBuilder &Bldr,
+ const ObjCMessage &msg,
+ ExplodedNode *Pred,
+ ProgramStateRef state,
+ bool GenSink) {
+ // First handle the return value.
+ SVal ReturnValue = UnknownVal();
+
+ // Some method families have known return values.
+ switch (msg.getMethodFamily()) {
+ default:
+ break;
+ case OMF_autorelease:
+ case OMF_retain:
+ case OMF_self: {
+ // These methods return their receivers.
+ const Expr *ReceiverE = msg.getInstanceReceiver();
+ if (ReceiverE)
+ ReturnValue = state->getSVal(ReceiverE, Pred->getLocationContext());
+ break;
+ }
+ }
+
+ // If we failed to figure out the return value, use a conjured value instead.
+ if (ReturnValue.isUnknown()) {
+ SValBuilder &SVB = getSValBuilder();
+ QualType ResultTy = msg.getResultType(getContext());
+ unsigned Count = currentBuilderContext->getCurrentBlockCount();
+ const Expr *CurrentE = cast<Expr>(currentStmt);
+ const LocationContext *LCtx = Pred->getLocationContext();
+ ReturnValue = SVB.getConjuredSymbolVal(NULL, CurrentE, LCtx, ResultTy, Count);
+ }
+
+ // Bind the return value.
+ const LocationContext *LCtx = Pred->getLocationContext();
+ state = state->BindExpr(currentStmt, LCtx, ReturnValue);
+
+ // Invalidate the arguments (and the receiver)
+ state = invalidateArguments(state, CallOrObjCMessage(msg, state, LCtx), LCtx);
+
+ // And create the new node.
+ Bldr.generateNode(msg.getMessageExpr(), Pred, state, GenSink);
+ assert(Bldr.hasGeneratedNodes());
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp
new file mode 100644
index 0000000..c227aac
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/FunctionSummary.cpp
@@ -0,0 +1,38 @@
+//== FunctionSummary.h - Stores summaries of functions. ------------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a summary of a function gathered/used by static analyzes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/FunctionSummary.h"
+using namespace clang;
+using namespace ento;
+
+FunctionSummariesTy::~FunctionSummariesTy() {
+ for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+ delete(I->second);
+ }
+}
+
+unsigned FunctionSummariesTy::getTotalNumBasicBlocks() {
+ unsigned Total = 0;
+ for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+ Total += I->second->TotalBasicBlocks;
+ }
+ return Total;
+}
+
+unsigned FunctionSummariesTy::getTotalNumVisitedBasicBlocks() {
+ unsigned Total = 0;
+ for (MapTy::iterator I = Map.begin(), E = Map.end(); I != E; ++I) {
+ Total += I->second->VisitedBasicBlocks.count();
+ }
+ return Total;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
new file mode 100644
index 0000000..629f1ea
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/HTMLDiagnostics.cpp
@@ -0,0 +1,578 @@
+//===--- HTMLDiagnostics.cpp - HTML Diagnostics for Paths ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the HTMLDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Rewrite/Rewriter.h"
+#include "clang/Rewrite/HTMLRewrite.h"
+#include "clang/Lex/Lexer.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Boilerplate.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class HTMLDiagnostics : public PathDiagnosticConsumer {
+ llvm::sys::Path Directory, FilePrefix;
+ bool createdDir, noDir;
+ const Preprocessor &PP;
+public:
+ HTMLDiagnostics(const std::string& prefix, const Preprocessor &pp);
+
+ virtual ~HTMLDiagnostics() { FlushDiagnostics(NULL); }
+
+ virtual void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade);
+
+ virtual StringRef getName() const {
+ return "HTMLDiagnostics";
+ }
+
+ unsigned ProcessMacroPiece(raw_ostream &os,
+ const PathDiagnosticMacroPiece& P,
+ unsigned num);
+
+ void HandlePiece(Rewriter& R, FileID BugFileID,
+ const PathDiagnosticPiece& P, unsigned num, unsigned max);
+
+ void HighlightRange(Rewriter& R, FileID BugFileID, SourceRange Range,
+ const char *HighlightStart = "<span class=\"mrange\">",
+ const char *HighlightEnd = "</span>");
+
+ void ReportDiag(const PathDiagnostic& D,
+ SmallVectorImpl<std::string> *FilesMade);
+};
+
+} // end anonymous namespace
+
+HTMLDiagnostics::HTMLDiagnostics(const std::string& prefix,
+ const Preprocessor &pp)
+ : Directory(prefix), FilePrefix(prefix), createdDir(false), noDir(false),
+ PP(pp) {
+ // All html files begin with "report"
+ FilePrefix.appendComponent("report");
+}
+
+PathDiagnosticConsumer*
+ento::createHTMLDiagnosticConsumer(const std::string& prefix,
+ const Preprocessor &PP) {
+ return new HTMLDiagnostics(prefix, PP);
+}
+
+//===----------------------------------------------------------------------===//
+// Report processing.
+//===----------------------------------------------------------------------===//
+
+void HTMLDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade) {
+ for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
+ et = Diags.end(); it != et; ++it) {
+ ReportDiag(**it, FilesMade);
+ }
+}
+
+static void flattenPath(PathPieces &primaryPath, PathPieces &currentPath,
+ const PathPieces &oldPath) {
+ for (PathPieces::const_iterator it = oldPath.begin(), et = oldPath.end();
+ it != et; ++it ) {
+ PathDiagnosticPiece *piece = it->getPtr();
+ if (const PathDiagnosticCallPiece *call =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnter =
+ call->getCallEnterEvent();
+ if (callEnter)
+ currentPath.push_back(callEnter);
+ flattenPath(primaryPath, primaryPath, call->path);
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+ call->getCallExitEvent();
+ if (callExit)
+ currentPath.push_back(callExit);
+ continue;
+ }
+ if (PathDiagnosticMacroPiece *macro =
+ dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ currentPath.push_back(piece);
+ PathPieces newPath;
+ flattenPath(primaryPath, newPath, macro->subPieces);
+ macro->subPieces = newPath;
+ continue;
+ }
+
+ currentPath.push_back(piece);
+ }
+}
+
+void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
+ SmallVectorImpl<std::string> *FilesMade) {
+
+ // Create the HTML directory if it is missing.
+ if (!createdDir) {
+ createdDir = true;
+ std::string ErrorMsg;
+ Directory.createDirectoryOnDisk(true, &ErrorMsg);
+
+ bool IsDirectory;
+ if (llvm::sys::fs::is_directory(Directory.str(), IsDirectory) ||
+ !IsDirectory) {
+ llvm::errs() << "warning: could not create directory '"
+ << Directory.str() << "'\n"
+ << "reason: " << ErrorMsg << '\n';
+
+ noDir = true;
+
+ return;
+ }
+ }
+
+ if (noDir)
+ return;
+
+ // First flatten out the entire path to make it easier to use.
+ PathPieces path;
+ flattenPath(path, path, D.path);
+
+ // The path as already been prechecked that all parts of the path are
+ // from the same file and that it is non-empty.
+ const SourceManager &SMgr = (*path.begin())->getLocation().getManager();
+ assert(!path.empty());
+ FileID FID =
+ (*path.begin())->getLocation().asLocation().getExpansionLoc().getFileID();
+ assert(!FID.isInvalid());
+
+ // Create a new rewriter to generate HTML.
+ Rewriter R(const_cast<SourceManager&>(SMgr), PP.getLangOpts());
+
+ // Process the path.
+ unsigned n = path.size();
+ unsigned max = n;
+
+ for (PathPieces::const_reverse_iterator I = path.rbegin(),
+ E = path.rend();
+ I != E; ++I, --n)
+ HandlePiece(R, FID, **I, n, max);
+
+ // Add line numbers, header, footer, etc.
+
+ // unsigned FID = R.getSourceMgr().getMainFileID();
+ html::EscapeText(R, FID);
+ html::AddLineNumbers(R, FID);
+
+ // If we have a preprocessor, relex the file and syntax highlight.
+ // We might not have a preprocessor if we come from a deserialized AST file,
+ // for example.
+
+ html::SyntaxHighlight(R, FID, PP);
+ html::HighlightMacros(R, FID, PP);
+
+ // Get the full directory name of the analyzed file.
+
+ const FileEntry* Entry = SMgr.getFileEntryForID(FID);
+
+ // This is a cludge; basically we want to append either the full
+ // working directory if we have no directory information. This is
+ // a work in progress.
+
+ std::string DirName = "";
+
+ if (llvm::sys::path::is_relative(Entry->getName())) {
+ llvm::sys::Path P = llvm::sys::Path::GetCurrentDirectory();
+ DirName = P.str() + "/";
+ }
+
+ // Add the name of the file as an <h1> tag.
+
+ {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+
+ os << "<!-- REPORTHEADER -->\n"
+ << "<h3>Bug Summary</h3>\n<table class=\"simpletable\">\n"
+ "<tr><td class=\"rowname\">File:</td><td>"
+ << html::EscapeText(DirName)
+ << html::EscapeText(Entry->getName())
+ << "</td></tr>\n<tr><td class=\"rowname\">Location:</td><td>"
+ "<a href=\"#EndPath\">line "
+ << (*path.rbegin())->getLocation().asLocation().getExpansionLineNumber()
+ << ", column "
+ << (*path.rbegin())->getLocation().asLocation().getExpansionColumnNumber()
+ << "</a></td></tr>\n"
+ "<tr><td class=\"rowname\">Description:</td><td>"
+ << D.getDescription() << "</td></tr>\n";
+
+ // Output any other meta data.
+
+ for (PathDiagnostic::meta_iterator I=D.meta_begin(), E=D.meta_end();
+ I!=E; ++I) {
+ os << "<tr><td></td><td>" << html::EscapeText(*I) << "</td></tr>\n";
+ }
+
+ os << "</table>\n<!-- REPORTSUMMARYEXTRA -->\n"
+ "<h3>Annotated Source Code</h3>\n";
+
+ R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ // Embed meta-data tags.
+ {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+
+ const std::string& BugDesc = D.getDescription();
+ if (!BugDesc.empty())
+ os << "\n<!-- BUGDESC " << BugDesc << " -->\n";
+
+ const std::string& BugType = D.getBugType();
+ if (!BugType.empty())
+ os << "\n<!-- BUGTYPE " << BugType << " -->\n";
+
+ const std::string& BugCategory = D.getCategory();
+ if (!BugCategory.empty())
+ os << "\n<!-- BUGCATEGORY " << BugCategory << " -->\n";
+
+ os << "\n<!-- BUGFILE " << DirName << Entry->getName() << " -->\n";
+
+ os << "\n<!-- BUGLINE "
+ << path.back()->getLocation().asLocation().getExpansionLineNumber()
+ << " -->\n";
+
+ os << "\n<!-- BUGPATHLENGTH " << path.size() << " -->\n";
+
+ // Mark the end of the tags.
+ os << "\n<!-- BUGMETAEND -->\n";
+
+ // Insert the text.
+ R.InsertTextBefore(SMgr.getLocForStartOfFile(FID), os.str());
+ }
+
+ // Add CSS, header, and footer.
+
+ html::AddHeaderFooterInternalBuiltinCSS(R, FID, Entry->getName());
+
+ // Get the rewrite buffer.
+ const RewriteBuffer *Buf = R.getRewriteBufferFor(FID);
+
+ if (!Buf) {
+ llvm::errs() << "warning: no diagnostics generated for main file.\n";
+ return;
+ }
+
+ // Create a path for the target HTML file.
+ llvm::sys::Path F(FilePrefix);
+ F.makeUnique(false, NULL);
+
+ // Rename the file with an HTML extension.
+ llvm::sys::Path H(F);
+ H.appendSuffix("html");
+ F.renamePathOnDisk(H, NULL);
+
+ std::string ErrorMsg;
+ llvm::raw_fd_ostream os(H.c_str(), ErrorMsg);
+
+ if (!ErrorMsg.empty()) {
+ llvm::errs() << "warning: could not create file '" << F.str()
+ << "'\n";
+ return;
+ }
+
+ if (FilesMade)
+ FilesMade->push_back(llvm::sys::path::filename(H.str()));
+
+ // Emit the HTML to disk.
+ for (RewriteBuffer::iterator I = Buf->begin(), E = Buf->end(); I!=E; ++I)
+ os << *I;
+}
+
+void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
+ const PathDiagnosticPiece& P,
+ unsigned num, unsigned max) {
+
+ // For now, just draw a box above the line in question, and emit the
+ // warning.
+ FullSourceLoc Pos = P.getLocation().asLocation();
+
+ if (!Pos.isValid())
+ return;
+
+ SourceManager &SM = R.getSourceMgr();
+ assert(&Pos.getManager() == &SM && "SourceManagers are different!");
+ std::pair<FileID, unsigned> LPosInfo = SM.getDecomposedExpansionLoc(Pos);
+
+ if (LPosInfo.first != BugFileID)
+ return;
+
+ const llvm::MemoryBuffer *Buf = SM.getBuffer(LPosInfo.first);
+ const char* FileStart = Buf->getBufferStart();
+
+ // Compute the column number. Rewind from the current position to the start
+ // of the line.
+ unsigned ColNo = SM.getColumnNumber(LPosInfo.first, LPosInfo.second);
+ const char *TokInstantiationPtr =Pos.getExpansionLoc().getCharacterData();
+ const char *LineStart = TokInstantiationPtr-ColNo;
+
+ // Compute LineEnd.
+ const char *LineEnd = TokInstantiationPtr;
+ const char* FileEnd = Buf->getBufferEnd();
+ while (*LineEnd != '\n' && LineEnd != FileEnd)
+ ++LineEnd;
+
+ // Compute the margin offset by counting tabs and non-tabs.
+ unsigned PosNo = 0;
+ for (const char* c = LineStart; c != TokInstantiationPtr; ++c)
+ PosNo += *c == '\t' ? 8 : 1;
+
+ // Create the html for the message.
+
+ const char *Kind = 0;
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::Call:
+ llvm_unreachable("Calls should already be handled");
+ case PathDiagnosticPiece::Event: Kind = "Event"; break;
+ case PathDiagnosticPiece::ControlFlow: Kind = "Control"; break;
+ // Setting Kind to "Control" is intentional.
+ case PathDiagnosticPiece::Macro: Kind = "Control"; break;
+ }
+
+ std::string sbuf;
+ llvm::raw_string_ostream os(sbuf);
+
+ os << "\n<tr><td class=\"num\"></td><td class=\"line\"><div id=\"";
+
+ if (num == max)
+ os << "EndPath";
+ else
+ os << "Path" << num;
+
+ os << "\" class=\"msg";
+ if (Kind)
+ os << " msg" << Kind;
+ os << "\" style=\"margin-left:" << PosNo << "ex";
+
+ // Output a maximum size.
+ if (!isa<PathDiagnosticMacroPiece>(P)) {
+ // Get the string and determining its maximum substring.
+ const std::string& Msg = P.getString();
+ unsigned max_token = 0;
+ unsigned cnt = 0;
+ unsigned len = Msg.size();
+
+ for (std::string::const_iterator I=Msg.begin(), E=Msg.end(); I!=E; ++I)
+ switch (*I) {
+ default:
+ ++cnt;
+ continue;
+ case ' ':
+ case '\t':
+ case '\n':
+ if (cnt > max_token) max_token = cnt;
+ cnt = 0;
+ }
+
+ if (cnt > max_token)
+ max_token = cnt;
+
+ // Determine the approximate size of the message bubble in em.
+ unsigned em;
+ const unsigned max_line = 120;
+
+ if (max_token >= max_line)
+ em = max_token / 2;
+ else {
+ unsigned characters = max_line;
+ unsigned lines = len / max_line;
+
+ if (lines > 0) {
+ for (; characters > max_token; --characters)
+ if (len / characters > lines) {
+ ++characters;
+ break;
+ }
+ }
+
+ em = characters / 2;
+ }
+
+ if (em < max_line/2)
+ os << "; max-width:" << em << "em";
+ }
+ else
+ os << "; max-width:100em";
+
+ os << "\">";
+
+ if (max > 1) {
+ os << "<table class=\"msgT\"><tr><td valign=\"top\">";
+ os << "<div class=\"PathIndex";
+ if (Kind) os << " PathIndex" << Kind;
+ os << "\">" << num << "</div>";
+ os << "</td><td>";
+ }
+
+ if (const PathDiagnosticMacroPiece *MP =
+ dyn_cast<PathDiagnosticMacroPiece>(&P)) {
+
+ os << "Within the expansion of the macro '";
+
+ // Get the name of the macro by relexing it.
+ {
+ FullSourceLoc L = MP->getLocation().asLocation().getExpansionLoc();
+ assert(L.isFileID());
+ StringRef BufferInfo = L.getBufferData();
+ const char* MacroName = L.getDecomposedLoc().second + BufferInfo.data();
+ Lexer rawLexer(L, PP.getLangOpts(), BufferInfo.begin(),
+ MacroName, BufferInfo.end());
+
+ Token TheTok;
+ rawLexer.LexFromRawLexer(TheTok);
+ for (unsigned i = 0, n = TheTok.getLength(); i < n; ++i)
+ os << MacroName[i];
+ }
+
+ os << "':\n";
+
+ if (max > 1)
+ os << "</td></tr></table>";
+
+ // Within a macro piece. Write out each event.
+ ProcessMacroPiece(os, *MP, 0);
+ }
+ else {
+ os << html::EscapeText(P.getString());
+
+ if (max > 1)
+ os << "</td></tr></table>";
+ }
+
+ os << "</div></td></tr>";
+
+ // Insert the new html.
+ unsigned DisplayPos = LineEnd - FileStart;
+ SourceLocation Loc =
+ SM.getLocForStartOfFile(LPosInfo.first).getLocWithOffset(DisplayPos);
+
+ R.InsertTextBefore(Loc, os.str());
+
+ // Now highlight the ranges.
+ for (const SourceRange *I = P.ranges_begin(), *E = P.ranges_end();
+ I != E; ++I)
+ HighlightRange(R, LPosInfo.first, *I);
+
+#if 0
+ // If there is a code insertion hint, insert that code.
+ // FIXME: This code is disabled because it seems to mangle the HTML
+ // output. I'm leaving it here because it's generally the right idea,
+ // but needs some help from someone more familiar with the rewriter.
+ for (const FixItHint *Hint = P.fixit_begin(), *HintEnd = P.fixit_end();
+ Hint != HintEnd; ++Hint) {
+ if (Hint->RemoveRange.isValid()) {
+ HighlightRange(R, LPosInfo.first, Hint->RemoveRange,
+ "<span class=\"CodeRemovalHint\">", "</span>");
+ }
+ if (Hint->InsertionLoc.isValid()) {
+ std::string EscapedCode = html::EscapeText(Hint->CodeToInsert, true);
+ EscapedCode = "<span class=\"CodeInsertionHint\">" + EscapedCode
+ + "</span>";
+ R.InsertTextBefore(Hint->InsertionLoc, EscapedCode);
+ }
+ }
+#endif
+}
+
+static void EmitAlphaCounter(raw_ostream &os, unsigned n) {
+ unsigned x = n % ('z' - 'a');
+ n /= 'z' - 'a';
+
+ if (n > 0)
+ EmitAlphaCounter(os, n);
+
+ os << char('a' + x);
+}
+
+unsigned HTMLDiagnostics::ProcessMacroPiece(raw_ostream &os,
+ const PathDiagnosticMacroPiece& P,
+ unsigned num) {
+
+ for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
+ I!=E; ++I) {
+
+ if (const PathDiagnosticMacroPiece *MP =
+ dyn_cast<PathDiagnosticMacroPiece>(*I)) {
+ num = ProcessMacroPiece(os, *MP, num);
+ continue;
+ }
+
+ if (PathDiagnosticEventPiece *EP = dyn_cast<PathDiagnosticEventPiece>(*I)) {
+ os << "<div class=\"msg msgEvent\" style=\"width:94%; "
+ "margin-left:5px\">"
+ "<table class=\"msgT\"><tr>"
+ "<td valign=\"top\"><div class=\"PathIndex PathIndexEvent\">";
+ EmitAlphaCounter(os, num++);
+ os << "</div></td><td valign=\"top\">"
+ << html::EscapeText(EP->getString())
+ << "</td></tr></table></div>\n";
+ }
+ }
+
+ return num;
+}
+
+void HTMLDiagnostics::HighlightRange(Rewriter& R, FileID BugFileID,
+ SourceRange Range,
+ const char *HighlightStart,
+ const char *HighlightEnd) {
+ SourceManager &SM = R.getSourceMgr();
+ const LangOptions &LangOpts = R.getLangOpts();
+
+ SourceLocation InstantiationStart = SM.getExpansionLoc(Range.getBegin());
+ unsigned StartLineNo = SM.getExpansionLineNumber(InstantiationStart);
+
+ SourceLocation InstantiationEnd = SM.getExpansionLoc(Range.getEnd());
+ unsigned EndLineNo = SM.getExpansionLineNumber(InstantiationEnd);
+
+ if (EndLineNo < StartLineNo)
+ return;
+
+ if (SM.getFileID(InstantiationStart) != BugFileID ||
+ SM.getFileID(InstantiationEnd) != BugFileID)
+ return;
+
+ // Compute the column number of the end.
+ unsigned EndColNo = SM.getExpansionColumnNumber(InstantiationEnd);
+ unsigned OldEndColNo = EndColNo;
+
+ if (EndColNo) {
+ // Add in the length of the token, so that we cover multi-char tokens.
+ EndColNo += Lexer::MeasureTokenLength(Range.getEnd(), SM, LangOpts)-1;
+ }
+
+ // Highlight the range. Make the span tag the outermost tag for the
+ // selected range.
+
+ SourceLocation E =
+ InstantiationEnd.getLocWithOffset(EndColNo - OldEndColNo);
+
+ html::HighlightRange(R, InstantiationStart, E, HighlightStart, HighlightEnd);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
new file mode 100644
index 0000000..ed94c79
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -0,0 +1,1101 @@
+//== MemRegion.cpp - Abstract memory regions for static analysis --*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines MemRegion and its subclasses. MemRegion defines a
+// partially-typed abstraction of memory useful for path-sensitive dataflow
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/RecordLayout.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// MemRegion Construction.
+//===----------------------------------------------------------------------===//
+
+template<typename RegionTy> struct MemRegionManagerTrait;
+
+template <typename RegionTy, typename A1>
+RegionTy* MemRegionManager::getRegion(const A1 a1) {
+
+ const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
+ MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1);
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1,
+ const MemRegion *superRegion) {
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2>
+RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) {
+
+ const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
+ MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2);
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2,
+ const MemRegion *superRegion) {
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+template <typename RegionTy, typename A1, typename A2, typename A3>
+RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const A3 a3,
+ const MemRegion *superRegion) {
+
+ llvm::FoldingSetNodeID ID;
+ RegionTy::ProfileRegion(ID, a1, a2, a3, superRegion);
+ void *InsertPos;
+ RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
+ InsertPos));
+
+ if (!R) {
+ R = (RegionTy*) A.Allocate<RegionTy>();
+ new (R) RegionTy(a1, a2, a3, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+//===----------------------------------------------------------------------===//
+// Object destruction.
+//===----------------------------------------------------------------------===//
+
+MemRegion::~MemRegion() {}
+
+MemRegionManager::~MemRegionManager() {
+ // All regions and their data are BumpPtrAllocated. No need to call
+ // their destructors.
+}
+
+//===----------------------------------------------------------------------===//
+// Basic methods.
+//===----------------------------------------------------------------------===//
+
+bool SubRegion::isSubRegionOf(const MemRegion* R) const {
+ const MemRegion* r = getSuperRegion();
+ while (r != 0) {
+ if (r == R)
+ return true;
+ if (const SubRegion* sr = dyn_cast<SubRegion>(r))
+ r = sr->getSuperRegion();
+ else
+ break;
+ }
+ return false;
+}
+
+MemRegionManager* SubRegion::getMemRegionManager() const {
+ const SubRegion* r = this;
+ do {
+ const MemRegion *superRegion = r->getSuperRegion();
+ if (const SubRegion *sr = dyn_cast<SubRegion>(superRegion)) {
+ r = sr;
+ continue;
+ }
+ return superRegion->getMemRegionManager();
+ } while (1);
+}
+
+const StackFrameContext *VarRegion::getStackFrame() const {
+ const StackSpaceRegion *SSR = dyn_cast<StackSpaceRegion>(getMemorySpace());
+ return SSR ? SSR->getStackFrame() : NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Region extents.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal DeclRegion::getExtent(SValBuilder &svalBuilder) const {
+ ASTContext &Ctx = svalBuilder.getContext();
+ QualType T = getDesugaredValueType(Ctx);
+
+ if (isa<VariableArrayType>(T))
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+ if (isa<IncompleteArrayType>(T))
+ return UnknownVal();
+
+ CharUnits size = Ctx.getTypeSizeInChars(T);
+ QualType sizeTy = svalBuilder.getArrayIndexType();
+ return svalBuilder.makeIntVal(size.getQuantity(), sizeTy);
+}
+
+DefinedOrUnknownSVal FieldRegion::getExtent(SValBuilder &svalBuilder) const {
+ DefinedOrUnknownSVal Extent = DeclRegion::getExtent(svalBuilder);
+
+ // A zero-length array at the end of a struct often stands for dynamically-
+ // allocated extra memory.
+ if (Extent.isZeroConstant()) {
+ QualType T = getDesugaredValueType(svalBuilder.getContext());
+
+ if (isa<ConstantArrayType>(T))
+ return UnknownVal();
+ }
+
+ return Extent;
+}
+
+DefinedOrUnknownSVal AllocaRegion::getExtent(SValBuilder &svalBuilder) const {
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal SymbolicRegion::getExtent(SValBuilder &svalBuilder) const {
+ return nonloc::SymbolVal(svalBuilder.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal StringRegion::getExtent(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(getStringLiteral()->getByteLength()+1,
+ svalBuilder.getArrayIndexType());
+}
+
+ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const MemRegion* sReg)
+ : DeclRegion(ivd, sReg, ObjCIvarRegionKind) {}
+
+const ObjCIvarDecl *ObjCIvarRegion::getDecl() const {
+ return cast<ObjCIvarDecl>(D);
+}
+
+QualType ObjCIvarRegion::getValueType() const {
+ return getDecl()->getType();
+}
+
+QualType CXXBaseObjectRegion::getValueType() const {
+ return QualType(decl->getTypeForDecl(), 0);
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling.
+//===----------------------------------------------------------------------===//
+
+void MemSpaceRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddInteger((unsigned)getKind());
+}
+
+void StackSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned)getKind());
+ ID.AddPointer(getStackFrame());
+}
+
+void StaticGlobalSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned)getKind());
+ ID.AddPointer(getCodeRegion());
+}
+
+void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const StringLiteral* Str,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) StringRegionKind);
+ ID.AddPointer(Str);
+ ID.AddPointer(superRegion);
+}
+
+void ObjCStringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const ObjCStringLiteral* Str,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) ObjCStringRegionKind);
+ ID.AddPointer(Str);
+ ID.AddPointer(superRegion);
+}
+
+void AllocaRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const Expr *Ex, unsigned cnt,
+ const MemRegion *) {
+ ID.AddInteger((unsigned) AllocaRegionKind);
+ ID.AddPointer(Ex);
+ ID.AddInteger(cnt);
+}
+
+void AllocaRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ProfileRegion(ID, Ex, Cnt, superRegion);
+}
+
+void CompoundLiteralRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ CompoundLiteralRegion::ProfileRegion(ID, CL, superRegion);
+}
+
+void CompoundLiteralRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const CompoundLiteralExpr *CL,
+ const MemRegion* superRegion) {
+ ID.AddInteger((unsigned) CompoundLiteralRegionKind);
+ ID.AddPointer(CL);
+ ID.AddPointer(superRegion);
+}
+
+void CXXThisRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const PointerType *PT,
+ const MemRegion *sRegion) {
+ ID.AddInteger((unsigned) CXXThisRegionKind);
+ ID.AddPointer(PT);
+ ID.AddPointer(sRegion);
+}
+
+void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
+}
+
+void ObjCIvarRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const ObjCIvarDecl *ivd,
+ const MemRegion* superRegion) {
+ DeclRegion::ProfileRegion(ID, ivd, superRegion, ObjCIvarRegionKind);
+}
+
+void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl *D,
+ const MemRegion* superRegion, Kind k) {
+ ID.AddInteger((unsigned) k);
+ ID.AddPointer(D);
+ ID.AddPointer(superRegion);
+}
+
+void DeclRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ DeclRegion::ProfileRegion(ID, D, superRegion, getKind());
+}
+
+void VarRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ VarRegion::ProfileRegion(ID, getDecl(), superRegion);
+}
+
+void SymbolicRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, SymbolRef sym,
+ const MemRegion *sreg) {
+ ID.AddInteger((unsigned) MemRegion::SymbolicRegionKind);
+ ID.Add(sym);
+ ID.AddPointer(sreg);
+}
+
+void SymbolicRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ SymbolicRegion::ProfileRegion(ID, sym, getSuperRegion());
+}
+
+void ElementRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ QualType ElementType, SVal Idx,
+ const MemRegion* superRegion) {
+ ID.AddInteger(MemRegion::ElementRegionKind);
+ ID.Add(ElementType);
+ ID.AddPointer(superRegion);
+ Idx.Profile(ID);
+}
+
+void ElementRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ ElementRegion::ProfileRegion(ID, ElementType, Index, superRegion);
+}
+
+void FunctionTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const FunctionDecl *FD,
+ const MemRegion*) {
+ ID.AddInteger(MemRegion::FunctionTextRegionKind);
+ ID.AddPointer(FD);
+}
+
+void FunctionTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ FunctionTextRegion::ProfileRegion(ID, FD, superRegion);
+}
+
+void BlockTextRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const BlockDecl *BD, CanQualType,
+ const AnalysisDeclContext *AC,
+ const MemRegion*) {
+ ID.AddInteger(MemRegion::BlockTextRegionKind);
+ ID.AddPointer(BD);
+}
+
+void BlockTextRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ BlockTextRegion::ProfileRegion(ID, BD, locTy, AC, superRegion);
+}
+
+void BlockDataRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
+ const BlockTextRegion *BC,
+ const LocationContext *LC,
+ const MemRegion *sReg) {
+ ID.AddInteger(MemRegion::BlockDataRegionKind);
+ ID.AddPointer(BC);
+ ID.AddPointer(LC);
+ ID.AddPointer(sReg);
+}
+
+void BlockDataRegion::Profile(llvm::FoldingSetNodeID& ID) const {
+ BlockDataRegion::ProfileRegion(ID, BC, LC, getSuperRegion());
+}
+
+void CXXTempObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ Expr const *Ex,
+ const MemRegion *sReg) {
+ ID.AddPointer(Ex);
+ ID.AddPointer(sReg);
+}
+
+void CXXTempObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, Ex, getSuperRegion());
+}
+
+void CXXBaseObjectRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
+ const CXXRecordDecl *decl,
+ const MemRegion *sReg) {
+ ID.AddPointer(decl);
+ ID.AddPointer(sReg);
+}
+
+void CXXBaseObjectRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ProfileRegion(ID, decl, superRegion);
+}
+
+//===----------------------------------------------------------------------===//
+// Region anchors.
+//===----------------------------------------------------------------------===//
+
+void GlobalsSpaceRegion::anchor() { }
+void HeapSpaceRegion::anchor() { }
+void UnknownSpaceRegion::anchor() { }
+void StackLocalsSpaceRegion::anchor() { }
+void StackArgumentsSpaceRegion::anchor() { }
+void TypedRegion::anchor() { }
+void TypedValueRegion::anchor() { }
+void CodeTextRegion::anchor() { }
+void SubRegion::anchor() { }
+
+//===----------------------------------------------------------------------===//
+// Region pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void MemRegion::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+std::string MemRegion::getString() const {
+ std::string s;
+ llvm::raw_string_ostream os(s);
+ dumpToStream(os);
+ return os.str();
+}
+
+void MemRegion::dumpToStream(raw_ostream &os) const {
+ os << "<Unknown Region>";
+}
+
+void AllocaRegion::dumpToStream(raw_ostream &os) const {
+ os << "alloca{" << (void*) Ex << ',' << Cnt << '}';
+}
+
+void FunctionTextRegion::dumpToStream(raw_ostream &os) const {
+ os << "code{" << getDecl()->getDeclName().getAsString() << '}';
+}
+
+void BlockTextRegion::dumpToStream(raw_ostream &os) const {
+ os << "block_code{" << (void*) this << '}';
+}
+
+void BlockDataRegion::dumpToStream(raw_ostream &os) const {
+ os << "block_data{" << BC << '}';
+}
+
+void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const {
+ // FIXME: More elaborate pretty-printing.
+ os << "{ " << (void*) CL << " }";
+}
+
+void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
+ os << "temp_object{" << getValueType().getAsString() << ','
+ << (void*) Ex << '}';
+}
+
+void CXXBaseObjectRegion::dumpToStream(raw_ostream &os) const {
+ os << "base " << decl->getName();
+}
+
+void CXXThisRegion::dumpToStream(raw_ostream &os) const {
+ os << "this";
+}
+
+void ElementRegion::dumpToStream(raw_ostream &os) const {
+ os << "element{" << superRegion << ','
+ << Index << ',' << getElementType().getAsString() << '}';
+}
+
+void FieldRegion::dumpToStream(raw_ostream &os) const {
+ os << superRegion << "->" << *getDecl();
+}
+
+void ObjCIvarRegion::dumpToStream(raw_ostream &os) const {
+ os << "ivar{" << superRegion << ',' << *getDecl() << '}';
+}
+
+void StringRegion::dumpToStream(raw_ostream &os) const {
+ Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOpts()));
+}
+
+void ObjCStringRegion::dumpToStream(raw_ostream &os) const {
+ Str->printPretty(os, 0, PrintingPolicy(getContext().getLangOpts()));
+}
+
+void SymbolicRegion::dumpToStream(raw_ostream &os) const {
+ os << "SymRegion{" << sym << '}';
+}
+
+void VarRegion::dumpToStream(raw_ostream &os) const {
+ os << *cast<VarDecl>(D);
+}
+
+void RegionRawOffset::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+void RegionRawOffset::dumpToStream(raw_ostream &os) const {
+ os << "raw_offset{" << getRegion() << ',' << getOffset().getQuantity() << '}';
+}
+
+void StaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "StaticGlobalsMemSpace{" << CR << '}';
+}
+
+void NonStaticGlobalSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "NonStaticGlobalSpaceRegion";
+}
+
+void GlobalInternalSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalInternalSpaceRegion";
+}
+
+void GlobalSystemSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalSystemSpaceRegion";
+}
+
+void GlobalImmutableSpaceRegion::dumpToStream(raw_ostream &os) const {
+ os << "GlobalImmutableSpaceRegion";
+}
+
+void MemRegion::dumpPretty(raw_ostream &os) const {
+ return;
+}
+
+void VarRegion::dumpPretty(raw_ostream &os) const {
+ os << getDecl()->getName();
+}
+
+void FieldRegion::dumpPretty(raw_ostream &os) const {
+ superRegion->dumpPretty(os);
+ os << "->" << getDecl();
+}
+
+//===----------------------------------------------------------------------===//
+// MemRegionManager methods.
+//===----------------------------------------------------------------------===//
+
+template <typename REG>
+const REG *MemRegionManager::LazyAllocate(REG*& region) {
+ if (!region) {
+ region = (REG*) A.Allocate<REG>();
+ new (region) REG(this);
+ }
+
+ return region;
+}
+
+template <typename REG, typename ARG>
+const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
+ if (!region) {
+ region = (REG*) A.Allocate<REG>();
+ new (region) REG(this, a);
+ }
+
+ return region;
+}
+
+const StackLocalsSpaceRegion*
+MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
+ assert(STC);
+ StackLocalsSpaceRegion *&R = StackLocalsSpaceRegions[STC];
+
+ if (R)
+ return R;
+
+ R = A.Allocate<StackLocalsSpaceRegion>();
+ new (R) StackLocalsSpaceRegion(this, STC);
+ return R;
+}
+
+const StackArgumentsSpaceRegion *
+MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
+ assert(STC);
+ StackArgumentsSpaceRegion *&R = StackArgumentsSpaceRegions[STC];
+
+ if (R)
+ return R;
+
+ R = A.Allocate<StackArgumentsSpaceRegion>();
+ new (R) StackArgumentsSpaceRegion(this, STC);
+ return R;
+}
+
+const GlobalsSpaceRegion
+*MemRegionManager::getGlobalsRegion(MemRegion::Kind K,
+ const CodeTextRegion *CR) {
+ if (!CR) {
+ if (K == MemRegion::GlobalSystemSpaceRegionKind)
+ return LazyAllocate(SystemGlobals);
+ if (K == MemRegion::GlobalImmutableSpaceRegionKind)
+ return LazyAllocate(ImmutableGlobals);
+ assert(K == MemRegion::GlobalInternalSpaceRegionKind);
+ return LazyAllocate(InternalGlobals);
+ }
+
+ assert(K == MemRegion::StaticGlobalSpaceRegionKind);
+ StaticGlobalSpaceRegion *&R = StaticsGlobalSpaceRegions[CR];
+ if (R)
+ return R;
+
+ R = A.Allocate<StaticGlobalSpaceRegion>();
+ new (R) StaticGlobalSpaceRegion(this, CR);
+ return R;
+}
+
+const HeapSpaceRegion *MemRegionManager::getHeapRegion() {
+ return LazyAllocate(heap);
+}
+
+const MemSpaceRegion *MemRegionManager::getUnknownRegion() {
+ return LazyAllocate(unknown);
+}
+
+const MemSpaceRegion *MemRegionManager::getCodeRegion() {
+ return LazyAllocate(code);
+}
+
+//===----------------------------------------------------------------------===//
+// Constructing regions.
+//===----------------------------------------------------------------------===//
+const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){
+ return getSubRegion<StringRegion>(Str, getGlobalsRegion());
+}
+
+const ObjCStringRegion *
+MemRegionManager::getObjCStringRegion(const ObjCStringLiteral* Str){
+ return getSubRegion<ObjCStringRegion>(Str, getGlobalsRegion());
+}
+
+const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
+ const LocationContext *LC) {
+ const MemRegion *sReg = 0;
+
+ if (D->hasGlobalStorage() && !D->isStaticLocal()) {
+
+ // First handle the globals defined in system headers.
+ if (C.getSourceManager().isInSystemHeader(D->getLocation())) {
+ // Whitelist the system globals which often DO GET modified, assume the
+ // rest are immutable.
+ if (D->getName().find("errno") != StringRef::npos)
+ sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
+ else
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+
+ // Treat other globals as GlobalInternal unless they are constants.
+ } else {
+ QualType GQT = D->getType();
+ const Type *GT = GQT.getTypePtrOrNull();
+ // TODO: We could walk the complex types here and see if everything is
+ // constified.
+ if (GT && GQT.isConstQualified() && GT->isArithmeticType())
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+ else
+ sReg = getGlobalsRegion();
+ }
+
+ // Finally handle static locals.
+ } else {
+ // FIXME: Once we implement scope handling, we will need to properly lookup
+ // 'D' to the proper LocationContext.
+ const DeclContext *DC = D->getDeclContext();
+ const StackFrameContext *STC = LC->getStackFrameForDeclContext(DC);
+
+ if (!STC)
+ sReg = getUnknownRegion();
+ else {
+ if (D->hasLocalStorage()) {
+ sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
+ ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
+ : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+ }
+ else {
+ assert(D->isStaticLocal());
+ const Decl *D = STC->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
+ getFunctionTextRegion(FD));
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ const BlockTextRegion *BTR =
+ getBlockTextRegion(BD,
+ C.getCanonicalType(BD->getSignatureAsWritten()->getType()),
+ STC->getAnalysisDeclContext());
+ sReg = getGlobalsRegion(MemRegion::StaticGlobalSpaceRegionKind,
+ BTR);
+ }
+ else {
+ // FIXME: For ObjC-methods, we need a new CodeTextRegion. For now
+ // just use the main global memspace.
+ sReg = getGlobalsRegion();
+ }
+ }
+ }
+ }
+
+ return getSubRegion<VarRegion>(D, sReg);
+}
+
+const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
+ const MemRegion *superR) {
+ return getSubRegion<VarRegion>(D, superR);
+}
+
+const BlockDataRegion *
+MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
+ const LocationContext *LC) {
+ const MemRegion *sReg = 0;
+ const BlockDecl *BD = BC->getDecl();
+ if (!BD->hasCaptures()) {
+ // This handles 'static' blocks.
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+ }
+ else {
+ if (LC) {
+ // FIXME: Once we implement scope handling, we want the parent region
+ // to be the scope.
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ sReg = getStackLocalsRegion(STC);
+ }
+ else {
+ // We allow 'LC' to be NULL for cases where want BlockDataRegions
+ // without context-sensitivity.
+ sReg = getUnknownRegion();
+ }
+ }
+
+ return getSubRegion<BlockDataRegion>(BC, LC, sReg);
+}
+
+const CompoundLiteralRegion*
+MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr *CL,
+ const LocationContext *LC) {
+
+ const MemRegion *sReg = 0;
+
+ if (CL->isFileScope())
+ sReg = getGlobalsRegion();
+ else {
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ sReg = getStackLocalsRegion(STC);
+ }
+
+ return getSubRegion<CompoundLiteralRegion>(CL, sReg);
+}
+
+const ElementRegion*
+MemRegionManager::getElementRegion(QualType elementType, NonLoc Idx,
+ const MemRegion* superRegion,
+ ASTContext &Ctx){
+
+ QualType T = Ctx.getCanonicalType(elementType).getUnqualifiedType();
+
+ llvm::FoldingSetNodeID ID;
+ ElementRegion::ProfileRegion(ID, T, Idx, superRegion);
+
+ void *InsertPos;
+ MemRegion* data = Regions.FindNodeOrInsertPos(ID, InsertPos);
+ ElementRegion* R = cast_or_null<ElementRegion>(data);
+
+ if (!R) {
+ R = (ElementRegion*) A.Allocate<ElementRegion>();
+ new (R) ElementRegion(T, Idx, superRegion);
+ Regions.InsertNode(R, InsertPos);
+ }
+
+ return R;
+}
+
+const FunctionTextRegion *
+MemRegionManager::getFunctionTextRegion(const FunctionDecl *FD) {
+ return getSubRegion<FunctionTextRegion>(FD, getCodeRegion());
+}
+
+const BlockTextRegion *
+MemRegionManager::getBlockTextRegion(const BlockDecl *BD, CanQualType locTy,
+ AnalysisDeclContext *AC) {
+ return getSubRegion<BlockTextRegion>(BD, locTy, AC, getCodeRegion());
+}
+
+
+/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
+const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) {
+ return getSubRegion<SymbolicRegion>(sym, getUnknownRegion());
+}
+
+const FieldRegion*
+MemRegionManager::getFieldRegion(const FieldDecl *d,
+ const MemRegion* superRegion){
+ return getSubRegion<FieldRegion>(d, superRegion);
+}
+
+const ObjCIvarRegion*
+MemRegionManager::getObjCIvarRegion(const ObjCIvarDecl *d,
+ const MemRegion* superRegion) {
+ return getSubRegion<ObjCIvarRegion>(d, superRegion);
+}
+
+const CXXTempObjectRegion*
+MemRegionManager::getCXXTempObjectRegion(Expr const *E,
+ LocationContext const *LC) {
+ const StackFrameContext *SFC = LC->getCurrentStackFrame();
+ assert(SFC);
+ return getSubRegion<CXXTempObjectRegion>(E, getStackLocalsRegion(SFC));
+}
+
+const CXXBaseObjectRegion *
+MemRegionManager::getCXXBaseObjectRegion(const CXXRecordDecl *decl,
+ const MemRegion *superRegion) {
+ return getSubRegion<CXXBaseObjectRegion>(decl, superRegion);
+}
+
+const CXXThisRegion*
+MemRegionManager::getCXXThisRegion(QualType thisPointerTy,
+ const LocationContext *LC) {
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ const PointerType *PT = thisPointerTy->getAs<PointerType>();
+ assert(PT);
+ return getSubRegion<CXXThisRegion>(PT, getStackArgumentsRegion(STC));
+}
+
+const AllocaRegion*
+MemRegionManager::getAllocaRegion(const Expr *E, unsigned cnt,
+ const LocationContext *LC) {
+ const StackFrameContext *STC = LC->getCurrentStackFrame();
+ assert(STC);
+ return getSubRegion<AllocaRegion>(E, cnt, getStackLocalsRegion(STC));
+}
+
+const MemSpaceRegion *MemRegion::getMemorySpace() const {
+ const MemRegion *R = this;
+ const SubRegion* SR = dyn_cast<SubRegion>(this);
+
+ while (SR) {
+ R = SR->getSuperRegion();
+ SR = dyn_cast<SubRegion>(R);
+ }
+
+ return dyn_cast<MemSpaceRegion>(R);
+}
+
+bool MemRegion::hasStackStorage() const {
+ return isa<StackSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasStackNonParametersStorage() const {
+ return isa<StackLocalsSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasStackParametersStorage() const {
+ return isa<StackArgumentsSpaceRegion>(getMemorySpace());
+}
+
+bool MemRegion::hasGlobalsOrParametersStorage() const {
+ const MemSpaceRegion *MS = getMemorySpace();
+ return isa<StackArgumentsSpaceRegion>(MS) ||
+ isa<GlobalsSpaceRegion>(MS);
+}
+
+// getBaseRegion strips away all elements and fields, and get the base region
+// of them.
+const MemRegion *MemRegion::getBaseRegion() const {
+ const MemRegion *R = this;
+ while (true) {
+ switch (R->getKind()) {
+ case MemRegion::ElementRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::CXXBaseObjectRegionKind:
+ R = cast<SubRegion>(R)->getSuperRegion();
+ continue;
+ default:
+ break;
+ }
+ break;
+ }
+ return R;
+}
+
+//===----------------------------------------------------------------------===//
+// View handling.
+//===----------------------------------------------------------------------===//
+
+const MemRegion *MemRegion::StripCasts() const {
+ const MemRegion *R = this;
+ while (true) {
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ // FIXME: generalize. Essentially we want to strip away ElementRegions
+ // that were layered on a symbolic region because of casts. We only
+ // want to strip away ElementRegions, however, where the index is 0.
+ SVal index = ER->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) {
+ if (CI->getValue().getSExtValue() == 0) {
+ R = ER->getSuperRegion();
+ continue;
+ }
+ }
+ }
+ break;
+ }
+ return R;
+}
+
+// FIXME: Merge with the implementation of the same method in Store.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition())
+ return false;
+ }
+
+ return true;
+}
+
+RegionRawOffset ElementRegion::getAsArrayOffset() const {
+ CharUnits offset = CharUnits::Zero();
+ const ElementRegion *ER = this;
+ const MemRegion *superR = NULL;
+ ASTContext &C = getContext();
+
+ // FIXME: Handle multi-dimensional arrays.
+
+ while (ER) {
+ superR = ER->getSuperRegion();
+
+ // FIXME: generalize to symbolic offsets.
+ SVal index = ER->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&index)) {
+ // Update the offset.
+ int64_t i = CI->getValue().getSExtValue();
+
+ if (i != 0) {
+ QualType elemType = ER->getElementType();
+
+ // If we are pointing to an incomplete type, go no further.
+ if (!IsCompleteType(C, elemType)) {
+ superR = ER;
+ break;
+ }
+
+ CharUnits size = C.getTypeSizeInChars(elemType);
+ offset += (i * size);
+ }
+
+ // Go to the next ElementRegion (if any).
+ ER = dyn_cast<ElementRegion>(superR);
+ continue;
+ }
+
+ return NULL;
+ }
+
+ assert(superR && "super region cannot be NULL");
+ return RegionRawOffset(superR, offset);
+}
+
+RegionOffset MemRegion::getAsOffset() const {
+ const MemRegion *R = this;
+ int64_t Offset = 0;
+
+ while (1) {
+ switch (R->getKind()) {
+ default:
+ return RegionOffset(0);
+ case SymbolicRegionKind:
+ case AllocaRegionKind:
+ case CompoundLiteralRegionKind:
+ case CXXThisRegionKind:
+ case StringRegionKind:
+ case VarRegionKind:
+ case CXXTempObjectRegionKind:
+ goto Finish;
+ case ElementRegionKind: {
+ const ElementRegion *ER = cast<ElementRegion>(R);
+ QualType EleTy = ER->getValueType();
+
+ if (!IsCompleteType(getContext(), EleTy))
+ return RegionOffset(0);
+
+ SVal Index = ER->getIndex();
+ if (const nonloc::ConcreteInt *CI=dyn_cast<nonloc::ConcreteInt>(&Index)) {
+ int64_t i = CI->getValue().getSExtValue();
+ CharUnits Size = getContext().getTypeSizeInChars(EleTy);
+ Offset += i * Size.getQuantity() * 8;
+ } else {
+ // We cannot compute offset for non-concrete index.
+ return RegionOffset(0);
+ }
+ R = ER->getSuperRegion();
+ break;
+ }
+ case FieldRegionKind: {
+ const FieldRegion *FR = cast<FieldRegion>(R);
+ const RecordDecl *RD = FR->getDecl()->getParent();
+ if (!RD->isCompleteDefinition())
+ // We cannot compute offset for incomplete type.
+ return RegionOffset(0);
+ // Get the field number.
+ unsigned idx = 0;
+ for (RecordDecl::field_iterator FI = RD->field_begin(),
+ FE = RD->field_end(); FI != FE; ++FI, ++idx)
+ if (FR->getDecl() == *FI)
+ break;
+
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+ // This is offset in bits.
+ Offset += Layout.getFieldOffset(idx);
+ R = FR->getSuperRegion();
+ break;
+ }
+ }
+ }
+
+ Finish:
+ return RegionOffset(R, Offset);
+}
+
+//===----------------------------------------------------------------------===//
+// BlockDataRegion
+//===----------------------------------------------------------------------===//
+
+void BlockDataRegion::LazyInitializeReferencedVars() {
+ if (ReferencedVars)
+ return;
+
+ AnalysisDeclContext *AC = getCodeRegion()->getAnalysisDeclContext();
+ AnalysisDeclContext::referenced_decls_iterator I, E;
+ llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());
+
+ if (I == E) {
+ ReferencedVars = (void*) 0x1;
+ return;
+ }
+
+ MemRegionManager &MemMgr = *getMemRegionManager();
+ llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
+ BumpVectorContext BC(A);
+
+ typedef BumpVector<const MemRegion*> VarVec;
+ VarVec *BV = (VarVec*) A.Allocate<VarVec>();
+ new (BV) VarVec(BC, E - I);
+
+ for ( ; I != E; ++I) {
+ const VarDecl *VD = *I;
+ const VarRegion *VR = 0;
+
+ if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage())
+ VR = MemMgr.getVarRegion(VD, this);
+ else {
+ if (LC)
+ VR = MemMgr.getVarRegion(VD, LC);
+ else {
+ VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
+ }
+ }
+
+ assert(VR);
+ BV->push_back(VR, BC);
+ }
+
+ ReferencedVars = BV;
+}
+
+BlockDataRegion::referenced_vars_iterator
+BlockDataRegion::referenced_vars_begin() const {
+ const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
+
+ BumpVector<const MemRegion*> *Vec =
+ static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+
+ return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
+ NULL : Vec->begin());
+}
+
+BlockDataRegion::referenced_vars_iterator
+BlockDataRegion::referenced_vars_end() const {
+ const_cast<BlockDataRegion*>(this)->LazyInitializeReferencedVars();
+
+ BumpVector<const MemRegion*> *Vec =
+ static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
+
+ return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
+ NULL : Vec->end());
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
new file mode 100644
index 0000000..65cdcd9
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ObjCMessage.cpp
@@ -0,0 +1,90 @@
+//===- ObjCMessage.cpp - Wrapper for ObjC messages and dot syntax -*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines ObjCMessage which serves as a common wrapper for ObjC
+// message expressions or implicit messages for loading/storing ObjC properties.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/AST/DeclCXX.h"
+
+using namespace clang;
+using namespace ento;
+
+QualType CallOrObjCMessage::getResultType(ASTContext &ctx) const {
+ QualType resultTy;
+ bool isLVal = false;
+
+ if (isObjCMessage()) {
+ resultTy = Msg.getResultType(ctx);
+ } else if (const CXXConstructExpr *Ctor =
+ CallE.dyn_cast<const CXXConstructExpr *>()) {
+ resultTy = Ctor->getType();
+ } else {
+ const CallExpr *FunctionCall = CallE.get<const CallExpr *>();
+
+ isLVal = FunctionCall->isLValue();
+ const Expr *Callee = FunctionCall->getCallee();
+ if (const FunctionDecl *FD = State->getSVal(Callee, LCtx).getAsFunctionDecl())
+ resultTy = FD->getResultType();
+ else
+ resultTy = FunctionCall->getType();
+ }
+
+ if (isLVal)
+ resultTy = ctx.getPointerType(resultTy);
+
+ return resultTy;
+}
+
+SVal CallOrObjCMessage::getFunctionCallee() const {
+ assert(isFunctionCall());
+ assert(!isCXXCall());
+ const Expr *Fun = CallE.get<const CallExpr *>()->getCallee()->IgnoreParens();
+ return State->getSVal(Fun, LCtx);
+}
+
+SVal CallOrObjCMessage::getCXXCallee() const {
+ assert(isCXXCall());
+ const CallExpr *ActualCall = CallE.get<const CallExpr *>();
+ const Expr *callee =
+ cast<CXXMemberCallExpr>(ActualCall)->getImplicitObjectArgument();
+
+ // FIXME: Will eventually need to cope with member pointers. This is
+ // a limitation in getImplicitObjectArgument().
+ if (!callee)
+ return UnknownVal();
+
+ return State->getSVal(callee, LCtx);
+}
+
+SVal
+CallOrObjCMessage::getInstanceMessageReceiver(const LocationContext *LC) const {
+ assert(isObjCMessage());
+ return Msg.getInstanceReceiverSVal(State, LC);
+}
+
+const Decl *CallOrObjCMessage::getDecl() const {
+ if (isCXXCall()) {
+ const CXXMemberCallExpr *CE =
+ cast<CXXMemberCallExpr>(CallE.dyn_cast<const CallExpr *>());
+ assert(CE);
+ return CE->getMethodDecl();
+ } else if (isObjCMessage()) {
+ return Msg.getMethodDecl();
+ } else if (isFunctionCall()) {
+ // In case of a C style call, use the path sensitive information to find
+ // the function declaration.
+ SVal CalleeVal = getFunctionCallee();
+ return CalleeVal.getAsFunctionDecl();
+ }
+ return 0;
+}
+
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
new file mode 100644
index 0000000..01dd965
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PathDiagnostic.cpp
@@ -0,0 +1,755 @@
+//===--- PathDiagnostic.cpp - Path-Specific Diagnostic Handling -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PathDiagnostic-related interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExplodedGraph.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/StmtCXX.h"
+#include "llvm/ADT/SmallString.h"
+
+using namespace clang;
+using namespace ento;
+
+bool PathDiagnosticMacroPiece::containsEvent() const {
+ for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
+ I!=E; ++I) {
+ if (isa<PathDiagnosticEventPiece>(*I))
+ return true;
+ if (PathDiagnosticMacroPiece *MP = dyn_cast<PathDiagnosticMacroPiece>(*I))
+ if (MP->containsEvent())
+ return true;
+ }
+ return false;
+}
+
+static StringRef StripTrailingDots(StringRef s) {
+ for (StringRef::size_type i = s.size(); i != 0; --i)
+ if (s[i - 1] != '.')
+ return s.substr(0, i);
+ return "";
+}
+
+PathDiagnosticPiece::PathDiagnosticPiece(StringRef s,
+ Kind k, DisplayHint hint)
+ : str(StripTrailingDots(s)), kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::PathDiagnosticPiece(Kind k, DisplayHint hint)
+ : kind(k), Hint(hint) {}
+
+PathDiagnosticPiece::~PathDiagnosticPiece() {}
+PathDiagnosticEventPiece::~PathDiagnosticEventPiece() {}
+PathDiagnosticCallPiece::~PathDiagnosticCallPiece() {}
+PathDiagnosticControlFlowPiece::~PathDiagnosticControlFlowPiece() {}
+PathDiagnosticMacroPiece::~PathDiagnosticMacroPiece() {}
+
+
+PathPieces::~PathPieces() {}
+PathDiagnostic::~PathDiagnostic() {}
+
+PathDiagnostic::PathDiagnostic(const Decl *declWithIssue,
+ StringRef bugtype, StringRef desc,
+ StringRef category)
+ : DeclWithIssue(declWithIssue),
+ BugType(StripTrailingDots(bugtype)),
+ Desc(StripTrailingDots(desc)),
+ Category(StripTrailingDots(category)),
+ path(pathImpl) {}
+
+void PathDiagnosticConsumer::anchor() { }
+
+PathDiagnosticConsumer::~PathDiagnosticConsumer() {
+ // Delete the contents of the FoldingSet if it isn't empty already.
+ for (llvm::FoldingSet<PathDiagnostic>::iterator it =
+ Diags.begin(), et = Diags.end() ; it != et ; ++it) {
+ delete &*it;
+ }
+}
+
+void PathDiagnosticConsumer::HandlePathDiagnostic(PathDiagnostic *D) {
+ llvm::OwningPtr<PathDiagnostic> OwningD(D);
+
+ if (!D || D->path.empty())
+ return;
+
+ // We need to flatten the locations (convert Stmt* to locations) because
+ // the referenced statements may be freed by the time the diagnostics
+ // are emitted.
+ D->flattenLocations();
+
+ // If the PathDiagnosticConsumer does not support diagnostics that
+ // cross file boundaries, prune out such diagnostics now.
+ if (!supportsCrossFileDiagnostics()) {
+ // Verify that the entire path is from the same FileID.
+ FileID FID;
+ const SourceManager &SMgr = (*D->path.begin())->getLocation().getManager();
+ llvm::SmallVector<const PathPieces *, 5> WorkList;
+ WorkList.push_back(&D->path);
+
+ while (!WorkList.empty()) {
+ const PathPieces &path = *WorkList.back();
+ WorkList.pop_back();
+
+ for (PathPieces::const_iterator I = path.begin(), E = path.end();
+ I != E; ++I) {
+ const PathDiagnosticPiece *piece = I->getPtr();
+ FullSourceLoc L = piece->getLocation().asLocation().getExpansionLoc();
+
+ if (FID.isInvalid()) {
+ FID = SMgr.getFileID(L);
+ } else if (SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+
+ // Check the source ranges.
+ for (PathDiagnosticPiece::range_iterator RI = piece->ranges_begin(),
+ RE = piece->ranges_end();
+ RI != RE; ++RI) {
+ SourceLocation L = SMgr.getExpansionLoc(RI->getBegin());
+ if (!L.isFileID() || SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+ L = SMgr.getExpansionLoc(RI->getEnd());
+ if (!L.isFileID() || SMgr.getFileID(L) != FID)
+ return; // FIXME: Emit a warning?
+ }
+
+ if (const PathDiagnosticCallPiece *call =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ WorkList.push_back(&call->path);
+ }
+ else if (const PathDiagnosticMacroPiece *macro =
+ dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ WorkList.push_back(&macro->subPieces);
+ }
+ }
+ }
+
+ if (FID.isInvalid())
+ return; // FIXME: Emit a warning?
+ }
+
+ // Profile the node to see if we already have something matching it
+ llvm::FoldingSetNodeID profile;
+ D->Profile(profile);
+ void *InsertPos = 0;
+
+ if (PathDiagnostic *orig = Diags.FindNodeOrInsertPos(profile, InsertPos)) {
+ // Keep the PathDiagnostic with the shorter path.
+ const unsigned orig_size = orig->full_size();
+ const unsigned new_size = D->full_size();
+
+ if (orig_size <= new_size) {
+ bool shouldKeepOriginal = true;
+ if (orig_size == new_size) {
+ // Here we break ties in a fairly arbitrary, but deterministic, way.
+ llvm::FoldingSetNodeID fullProfile, fullProfileOrig;
+ D->FullProfile(fullProfile);
+ orig->FullProfile(fullProfileOrig);
+ if (fullProfile.ComputeHash() < fullProfileOrig.ComputeHash())
+ shouldKeepOriginal = false;
+ }
+
+ if (shouldKeepOriginal)
+ return;
+ }
+ Diags.RemoveNode(orig);
+ delete orig;
+ }
+
+ Diags.InsertNode(OwningD.take());
+}
+
+
+namespace {
+struct CompareDiagnostics {
+ // Compare if 'X' is "<" than 'Y'.
+ bool operator()(const PathDiagnostic *X, const PathDiagnostic *Y) const {
+ // First compare by location
+ const FullSourceLoc &XLoc = X->getLocation().asLocation();
+ const FullSourceLoc &YLoc = Y->getLocation().asLocation();
+ if (XLoc < YLoc)
+ return true;
+ if (XLoc != YLoc)
+ return false;
+
+ // Next, compare by bug type.
+ StringRef XBugType = X->getBugType();
+ StringRef YBugType = Y->getBugType();
+ if (XBugType < YBugType)
+ return true;
+ if (XBugType != YBugType)
+ return false;
+
+ // Next, compare by bug description.
+ StringRef XDesc = X->getDescription();
+ StringRef YDesc = Y->getDescription();
+ if (XDesc < YDesc)
+ return true;
+ if (XDesc != YDesc)
+ return false;
+
+ // FIXME: Further refine by comparing PathDiagnosticPieces?
+ return false;
+ }
+};
+}
+
+void
+PathDiagnosticConsumer::FlushDiagnostics(SmallVectorImpl<std::string> *Files) {
+ if (flushed)
+ return;
+
+ flushed = true;
+
+ std::vector<const PathDiagnostic *> BatchDiags;
+ for (llvm::FoldingSet<PathDiagnostic>::iterator it = Diags.begin(),
+ et = Diags.end(); it != et; ++it) {
+ BatchDiags.push_back(&*it);
+ }
+
+ // Clear out the FoldingSet.
+ Diags.clear();
+
+ // Sort the diagnostics so that they are always emitted in a deterministic
+ // order.
+ if (!BatchDiags.empty())
+ std::sort(BatchDiags.begin(), BatchDiags.end(), CompareDiagnostics());
+
+ FlushDiagnosticsImpl(BatchDiags, Files);
+
+ // Delete the flushed diagnostics.
+ for (std::vector<const PathDiagnostic *>::iterator it = BatchDiags.begin(),
+ et = BatchDiags.end(); it != et; ++it) {
+ const PathDiagnostic *D = *it;
+ delete D;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// PathDiagnosticLocation methods.
+//===----------------------------------------------------------------------===//
+
+static SourceLocation getValidSourceLocation(const Stmt* S,
+ LocationOrAnalysisDeclContext LAC) {
+ SourceLocation L = S->getLocStart();
+ assert(!LAC.isNull() && "A valid LocationContext or AnalysisDeclContext should "
+ "be passed to PathDiagnosticLocation upon creation.");
+
+ // S might be a temporary statement that does not have a location in the
+ // source code, so find an enclosing statement and use it's location.
+ if (!L.isValid()) {
+
+ ParentMap *PM = 0;
+ if (LAC.is<const LocationContext*>())
+ PM = &LAC.get<const LocationContext*>()->getParentMap();
+ else
+ PM = &LAC.get<AnalysisDeclContext*>()->getParentMap();
+
+ while (!L.isValid()) {
+ S = PM->getParent(S);
+ L = S->getLocStart();
+ }
+ }
+
+ return L;
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createBegin(const Decl *D,
+ const SourceManager &SM) {
+ return PathDiagnosticLocation(D->getLocStart(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createBegin(const Stmt *S,
+ const SourceManager &SM,
+ LocationOrAnalysisDeclContext LAC) {
+ return PathDiagnosticLocation(getValidSourceLocation(S, LAC),
+ SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createOperatorLoc(const BinaryOperator *BO,
+ const SourceManager &SM) {
+ return PathDiagnosticLocation(BO->getOperatorLoc(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createMemberLoc(const MemberExpr *ME,
+ const SourceManager &SM) {
+ return PathDiagnosticLocation(ME->getMemberLoc(), SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createBeginBrace(const CompoundStmt *CS,
+ const SourceManager &SM) {
+ SourceLocation L = CS->getLBracLoc();
+ return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createEndBrace(const CompoundStmt *CS,
+ const SourceManager &SM) {
+ SourceLocation L = CS->getRBracLoc();
+ return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createDeclBegin(const LocationContext *LC,
+ const SourceManager &SM) {
+ // FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
+ if (const CompoundStmt *CS =
+ dyn_cast_or_null<CompoundStmt>(LC->getDecl()->getBody()))
+ if (!CS->body_empty()) {
+ SourceLocation Loc = (*CS->body_begin())->getLocStart();
+ return PathDiagnosticLocation(Loc, SM, SingleLocK);
+ }
+
+ return PathDiagnosticLocation();
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createDeclEnd(const LocationContext *LC,
+ const SourceManager &SM) {
+ SourceLocation L = LC->getDecl()->getBodyRBrace();
+ return PathDiagnosticLocation(L, SM, SingleLocK);
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::create(const ProgramPoint& P,
+ const SourceManager &SMng) {
+
+ const Stmt* S = 0;
+ if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ const CFGBlock *BSrc = BE->getSrc();
+ S = BSrc->getTerminatorCondition();
+ }
+ else if (const PostStmt *PS = dyn_cast<PostStmt>(&P)) {
+ S = PS->getStmt();
+ }
+
+ return PathDiagnosticLocation(S, SMng, P.getLocationContext());
+}
+
+PathDiagnosticLocation
+ PathDiagnosticLocation::createEndOfPath(const ExplodedNode* N,
+ const SourceManager &SM) {
+ assert(N && "Cannot create a location with a null node.");
+
+ const ExplodedNode *NI = N;
+
+ while (NI) {
+ ProgramPoint P = NI->getLocation();
+ const LocationContext *LC = P.getLocationContext();
+ if (const StmtPoint *PS = dyn_cast<StmtPoint>(&P))
+ return PathDiagnosticLocation(PS->getStmt(), SM, LC);
+ else if (const BlockEdge *BE = dyn_cast<BlockEdge>(&P)) {
+ const Stmt *Term = BE->getSrc()->getTerminator();
+ if (Term) {
+ return PathDiagnosticLocation(Term, SM, LC);
+ }
+ }
+ NI = NI->succ_empty() ? 0 : *(NI->succ_begin());
+ }
+
+ return createDeclEnd(N->getLocationContext(), SM);
+}
+
+PathDiagnosticLocation PathDiagnosticLocation::createSingleLocation(
+ const PathDiagnosticLocation &PDL) {
+ FullSourceLoc L = PDL.asLocation();
+ return PathDiagnosticLocation(L, L.getManager(), SingleLocK);
+}
+
+FullSourceLoc
+ PathDiagnosticLocation::genLocation(SourceLocation L,
+ LocationOrAnalysisDeclContext LAC) const {
+ assert(isValid());
+ // Note that we want a 'switch' here so that the compiler can warn us in
+ // case we add more cases.
+ switch (K) {
+ case SingleLocK:
+ case RangeK:
+ break;
+ case StmtK:
+ // Defensive checking.
+ if (!S)
+ break;
+ return FullSourceLoc(getValidSourceLocation(S, LAC),
+ const_cast<SourceManager&>(*SM));
+ case DeclK:
+ // Defensive checking.
+ if (!D)
+ break;
+ return FullSourceLoc(D->getLocation(), const_cast<SourceManager&>(*SM));
+ }
+
+ return FullSourceLoc(L, const_cast<SourceManager&>(*SM));
+}
+
+PathDiagnosticRange
+ PathDiagnosticLocation::genRange(LocationOrAnalysisDeclContext LAC) const {
+ assert(isValid());
+ // Note that we want a 'switch' here so that the compiler can warn us in
+ // case we add more cases.
+ switch (K) {
+ case SingleLocK:
+ return PathDiagnosticRange(SourceRange(Loc,Loc), true);
+ case RangeK:
+ break;
+ case StmtK: {
+ const Stmt *S = asStmt();
+ switch (S->getStmtClass()) {
+ default:
+ break;
+ case Stmt::DeclStmtClass: {
+ const DeclStmt *DS = cast<DeclStmt>(S);
+ if (DS->isSingleDecl()) {
+ // Should always be the case, but we'll be defensive.
+ return SourceRange(DS->getLocStart(),
+ DS->getSingleDecl()->getLocation());
+ }
+ break;
+ }
+ // FIXME: Provide better range information for different
+ // terminators.
+ case Stmt::IfStmtClass:
+ case Stmt::WhileStmtClass:
+ case Stmt::DoStmtClass:
+ case Stmt::ForStmtClass:
+ case Stmt::ChooseExprClass:
+ case Stmt::IndirectGotoStmtClass:
+ case Stmt::SwitchStmtClass:
+ case Stmt::BinaryConditionalOperatorClass:
+ case Stmt::ConditionalOperatorClass:
+ case Stmt::ObjCForCollectionStmtClass: {
+ SourceLocation L = getValidSourceLocation(S, LAC);
+ return SourceRange(L, L);
+ }
+ }
+ SourceRange R = S->getSourceRange();
+ if (R.isValid())
+ return R;
+ break;
+ }
+ case DeclK:
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ return MD->getSourceRange();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (Stmt *Body = FD->getBody())
+ return Body->getSourceRange();
+ }
+ else {
+ SourceLocation L = D->getLocation();
+ return PathDiagnosticRange(SourceRange(L, L), true);
+ }
+ }
+
+ return SourceRange(Loc,Loc);
+}
+
+void PathDiagnosticLocation::flatten() {
+ if (K == StmtK) {
+ K = RangeK;
+ S = 0;
+ D = 0;
+ }
+ else if (K == DeclK) {
+ K = SingleLocK;
+ S = 0;
+ D = 0;
+ }
+}
+
+PathDiagnosticLocation PathDiagnostic::getLocation() const {
+ assert(path.size() > 0 &&
+ "getLocation() requires a non-empty PathDiagnostic.");
+
+ PathDiagnosticPiece *p = path.rbegin()->getPtr();
+
+ while (true) {
+ if (PathDiagnosticCallPiece *cp = dyn_cast<PathDiagnosticCallPiece>(p)) {
+ assert(!cp->path.empty());
+ p = cp->path.rbegin()->getPtr();
+ continue;
+ }
+ break;
+ }
+
+ return p->getLocation();
+}
+
+//===----------------------------------------------------------------------===//
+// Manipulation of PathDiagnosticCallPieces.
+//===----------------------------------------------------------------------===//
+
+static PathDiagnosticLocation getLastStmtLoc(const ExplodedNode *N,
+ const SourceManager &SM) {
+ while (N) {
+ ProgramPoint PP = N->getLocation();
+ if (const StmtPoint *SP = dyn_cast<StmtPoint>(&PP))
+ return PathDiagnosticLocation(SP->getStmt(), SM, PP.getLocationContext());
+ if (N->pred_empty())
+ break;
+ N = *N->pred_begin();
+ }
+ return PathDiagnosticLocation();
+}
+
+PathDiagnosticCallPiece *
+PathDiagnosticCallPiece::construct(const ExplodedNode *N,
+ const CallExit &CE,
+ const SourceManager &SM) {
+ const Decl *caller = CE.getLocationContext()->getParent()->getDecl();
+ PathDiagnosticLocation pos = getLastStmtLoc(N, SM);
+ return new PathDiagnosticCallPiece(caller, pos);
+}
+
+PathDiagnosticCallPiece *
+PathDiagnosticCallPiece::construct(PathPieces &path,
+ const Decl *caller) {
+ PathDiagnosticCallPiece *C = new PathDiagnosticCallPiece(path, caller);
+ path.clear();
+ path.push_front(C);
+ return C;
+}
+
+void PathDiagnosticCallPiece::setCallee(const CallEnter &CE,
+ const SourceManager &SM) {
+ const Decl *D = CE.getCalleeContext()->getDecl();
+ Callee = D;
+ callEnter = PathDiagnosticLocation(CE.getCallExpr(), SM,
+ CE.getLocationContext());
+ callEnterWithin = PathDiagnosticLocation::createBegin(D, SM);
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallEnterEvent() const {
+ if (!Callee)
+ return 0;
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ if (isa<BlockDecl>(Callee))
+ Out << "Calling anonymous block";
+ else if (const NamedDecl *ND = dyn_cast<NamedDecl>(Callee))
+ Out << "Calling '" << *ND << "'";
+ StringRef msg = Out.str();
+ if (msg.empty())
+ return 0;
+ return new PathDiagnosticEventPiece(callEnter, msg);
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallEnterWithinCallerEvent() const {
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Caller))
+ Out << "Entered call from '" << *ND << "'";
+ else
+ Out << "Entered call";
+ StringRef msg = Out.str();
+ if (msg.empty())
+ return 0;
+ return new PathDiagnosticEventPiece(callEnterWithin, msg);
+}
+
+IntrusiveRefCntPtr<PathDiagnosticEventPiece>
+PathDiagnosticCallPiece::getCallExitEvent() const {
+ if (NoExit)
+ return 0;
+ SmallString<256> buf;
+ llvm::raw_svector_ostream Out(buf);
+ if (!CallStackMessage.empty())
+ Out << CallStackMessage;
+ else if (const NamedDecl *ND = dyn_cast_or_null<NamedDecl>(Callee))
+ Out << "Returning from '" << *ND << "'";
+ else
+ Out << "Returning to caller";
+ return new PathDiagnosticEventPiece(callReturn, Out.str());
+}
+
+static void compute_path_size(const PathPieces &pieces, unsigned &size) {
+ for (PathPieces::const_iterator it = pieces.begin(),
+ et = pieces.end(); it != et; ++it) {
+ const PathDiagnosticPiece *piece = it->getPtr();
+ if (const PathDiagnosticCallPiece *cp =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ compute_path_size(cp->path, size);
+ }
+ else
+ ++size;
+ }
+}
+
+unsigned PathDiagnostic::full_size() {
+ unsigned size = 0;
+ compute_path_size(path, size);
+ return size;
+}
+
+//===----------------------------------------------------------------------===//
+// FoldingSet profiling methods.
+//===----------------------------------------------------------------------===//
+
+void PathDiagnosticLocation::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger(Range.getBegin().getRawEncoding());
+ ID.AddInteger(Range.getEnd().getRawEncoding());
+ ID.AddInteger(Loc.getRawEncoding());
+ return;
+}
+
+void PathDiagnosticPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned) getKind());
+ ID.AddString(str);
+ // FIXME: Add profiling support for code hints.
+ ID.AddInteger((unsigned) getDisplayHint());
+ for (range_iterator I = ranges_begin(), E = ranges_end(); I != E; ++I) {
+ ID.AddInteger(I->getBegin().getRawEncoding());
+ ID.AddInteger(I->getEnd().getRawEncoding());
+ }
+}
+
+void PathDiagnosticCallPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ for (PathPieces::const_iterator it = path.begin(),
+ et = path.end(); it != et; ++it) {
+ ID.Add(**it);
+ }
+}
+
+void PathDiagnosticSpotPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ ID.Add(Pos);
+}
+
+void PathDiagnosticControlFlowPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticPiece::Profile(ID);
+ for (const_iterator I = begin(), E = end(); I != E; ++I)
+ ID.Add(*I);
+}
+
+void PathDiagnosticMacroPiece::Profile(llvm::FoldingSetNodeID &ID) const {
+ PathDiagnosticSpotPiece::Profile(ID);
+ for (PathPieces::const_iterator I = subPieces.begin(), E = subPieces.end();
+ I != E; ++I)
+ ID.Add(**I);
+}
+
+void PathDiagnostic::Profile(llvm::FoldingSetNodeID &ID) const {
+ if (!path.empty())
+ getLocation().Profile(ID);
+ ID.AddString(BugType);
+ ID.AddString(Desc);
+ ID.AddString(Category);
+}
+
+void PathDiagnostic::FullProfile(llvm::FoldingSetNodeID &ID) const {
+ Profile(ID);
+ for (PathPieces::const_iterator I = path.begin(), E = path.end(); I != E; ++I)
+ ID.Add(**I);
+ for (meta_iterator I = meta_begin(), E = meta_end(); I != E; ++I)
+ ID.AddString(*I);
+}
+
+StackHintGenerator::~StackHintGenerator() {}
+
+std::string StackHintGeneratorForSymbol::getMessage(const ExplodedNode *N){
+ ProgramPoint P = N->getLocation();
+ const CallExit *CExit = dyn_cast<CallExit>(&P);
+ assert(CExit && "Stack Hints should be constructed at CallExit points.");
+
+ const CallExpr *CE = dyn_cast_or_null<CallExpr>(CExit->getStmt());
+ if (!CE)
+ return "";
+
+ // Get the successor node to make sure the return statement is evaluated and
+ // CE is set to the result value.
+ N = *N->succ_begin();
+ if (!N)
+ return getMessageForSymbolNotFound();
+
+ // Check if one of the parameters are set to the interesting symbol.
+ ProgramStateRef State = N->getState();
+ const LocationContext *LCtx = N->getLocationContext();
+ unsigned ArgIndex = 0;
+ for (CallExpr::const_arg_iterator I = CE->arg_begin(),
+ E = CE->arg_end(); I != E; ++I, ++ArgIndex){
+ SVal SV = State->getSVal(*I, LCtx);
+
+ // Check if the variable corresponding to the symbol is passed by value.
+ SymbolRef AS = SV.getAsLocSymbol();
+ if (AS == Sym) {
+ return getMessageForArg(*I, ArgIndex);
+ }
+
+ // Check if the parameter is a pointer to the symbol.
+ if (const loc::MemRegionVal *Reg = dyn_cast<loc::MemRegionVal>(&SV)) {
+ SVal PSV = State->getSVal(Reg->getRegion());
+ SymbolRef AS = PSV.getAsLocSymbol();
+ if (AS == Sym) {
+ return getMessageForArg(*I, ArgIndex);
+ }
+ }
+ }
+
+ // Check if we are returning the interesting symbol.
+ SVal SV = State->getSVal(CE, LCtx);
+ SymbolRef RetSym = SV.getAsLocSymbol();
+ if (RetSym == Sym) {
+ return getMessageForReturn(CE);
+ }
+
+ return getMessageForSymbolNotFound();
+}
+
+/// TODO: This is copied from clang diagnostics. Maybe we could just move it to
+/// some common place. (Same as HandleOrdinalModifier.)
+void StackHintGeneratorForSymbol::printOrdinal(unsigned ValNo,
+ llvm::raw_svector_ostream &Out) {
+ assert(ValNo != 0 && "ValNo must be strictly positive!");
+
+ // We could use text forms for the first N ordinals, but the numeric
+ // forms are actually nicer in diagnostics because they stand out.
+ Out << ValNo;
+
+ // It is critically important that we do this perfectly for
+ // user-written sequences with over 100 elements.
+ switch (ValNo % 100) {
+ case 11:
+ case 12:
+ case 13:
+ Out << "th"; return;
+ default:
+ switch (ValNo % 10) {
+ case 1: Out << "st"; return;
+ case 2: Out << "nd"; return;
+ case 3: Out << "rd"; return;
+ default: Out << "th"; return;
+ }
+ }
+}
+
+std::string StackHintGeneratorForSymbol::getMessageForArg(const Expr *ArgE,
+ unsigned ArgIndex) {
+ SmallString<200> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ os << Msg << " via ";
+ // Printed parameters start at 1, not 0.
+ printOrdinal(++ArgIndex, os);
+ os << " parameter";
+
+ return os.str();
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
new file mode 100644
index 0000000..323cede
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -0,0 +1,513 @@
+//===--- PlistDiagnostics.cpp - Plist Diagnostics for Paths -----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the PlistDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/FileManager.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+using namespace clang;
+using namespace ento;
+
+typedef llvm::DenseMap<FileID, unsigned> FIDMap;
+
+
+namespace {
+ class PlistDiagnostics : public PathDiagnosticConsumer {
+ const std::string OutputFile;
+ const LangOptions &LangOpts;
+ OwningPtr<PathDiagnosticConsumer> SubPD;
+ bool flushed;
+ const bool SupportsCrossFileDiagnostics;
+ public:
+ PlistDiagnostics(const std::string& prefix, const LangOptions &LangOpts,
+ bool supportsMultipleFiles,
+ PathDiagnosticConsumer *subPD);
+
+ virtual ~PlistDiagnostics() {}
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade);
+
+ virtual StringRef getName() const {
+ return "PlistDiagnostics";
+ }
+
+ PathGenerationScheme getGenerationScheme() const;
+ bool supportsLogicalOpControlFlow() const { return true; }
+ bool supportsAllBlockEdges() const { return true; }
+ virtual bool useVerboseDescription() const { return false; }
+ virtual bool supportsCrossFileDiagnostics() const {
+ return SupportsCrossFileDiagnostics;
+ }
+ };
+} // end anonymous namespace
+
+PlistDiagnostics::PlistDiagnostics(const std::string& output,
+ const LangOptions &LO,
+ bool supportsMultipleFiles,
+ PathDiagnosticConsumer *subPD)
+ : OutputFile(output), LangOpts(LO), SubPD(subPD), flushed(false),
+ SupportsCrossFileDiagnostics(supportsMultipleFiles) {}
+
+PathDiagnosticConsumer*
+ento::createPlistDiagnosticConsumer(const std::string& s, const Preprocessor &PP,
+ PathDiagnosticConsumer *subPD) {
+ return new PlistDiagnostics(s, PP.getLangOpts(), false, subPD);
+}
+
+PathDiagnosticConsumer*
+ento::createPlistMultiFileDiagnosticConsumer(const std::string &s,
+ const Preprocessor &PP) {
+ return new PlistDiagnostics(s, PP.getLangOpts(), true, 0);
+}
+
+PathDiagnosticConsumer::PathGenerationScheme
+PlistDiagnostics::getGenerationScheme() const {
+ if (const PathDiagnosticConsumer *PD = SubPD.get())
+ return PD->getGenerationScheme();
+
+ return Extensive;
+}
+
+static void AddFID(FIDMap &FIDs, SmallVectorImpl<FileID> &V,
+ const SourceManager* SM, SourceLocation L) {
+
+ FileID FID = SM->getFileID(SM->getExpansionLoc(L));
+ FIDMap::iterator I = FIDs.find(FID);
+ if (I != FIDs.end()) return;
+ FIDs[FID] = V.size();
+ V.push_back(FID);
+}
+
+static unsigned GetFID(const FIDMap& FIDs, const SourceManager &SM,
+ SourceLocation L) {
+ FileID FID = SM.getFileID(SM.getExpansionLoc(L));
+ FIDMap::const_iterator I = FIDs.find(FID);
+ assert(I != FIDs.end());
+ return I->second;
+}
+
+static raw_ostream &Indent(raw_ostream &o, const unsigned indent) {
+ for (unsigned i = 0; i < indent; ++i) o << ' ';
+ return o;
+}
+
+static void EmitLocation(raw_ostream &o, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ SourceLocation L, const FIDMap &FM,
+ unsigned indent, bool extend = false) {
+
+ FullSourceLoc Loc(SM.getExpansionLoc(L), const_cast<SourceManager&>(SM));
+
+ // Add in the length of the token, so that we cover multi-char tokens.
+ unsigned offset =
+ extend ? Lexer::MeasureTokenLength(Loc, SM, LangOpts) - 1 : 0;
+
+ Indent(o, indent) << "<dict>\n";
+ Indent(o, indent) << " <key>line</key><integer>"
+ << Loc.getExpansionLineNumber() << "</integer>\n";
+ Indent(o, indent) << " <key>col</key><integer>"
+ << Loc.getExpansionColumnNumber() + offset << "</integer>\n";
+ Indent(o, indent) << " <key>file</key><integer>"
+ << GetFID(FM, SM, Loc) << "</integer>\n";
+ Indent(o, indent) << "</dict>\n";
+}
+
+static void EmitLocation(raw_ostream &o, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ const PathDiagnosticLocation &L, const FIDMap& FM,
+ unsigned indent, bool extend = false) {
+ EmitLocation(o, SM, LangOpts, L.asLocation(), FM, indent, extend);
+}
+
+static void EmitRange(raw_ostream &o, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ PathDiagnosticRange R, const FIDMap &FM,
+ unsigned indent) {
+ Indent(o, indent) << "<array>\n";
+ EmitLocation(o, SM, LangOpts, R.getBegin(), FM, indent+1);
+ EmitLocation(o, SM, LangOpts, R.getEnd(), FM, indent+1, !R.isPoint);
+ Indent(o, indent) << "</array>\n";
+}
+
+static raw_ostream &EmitString(raw_ostream &o, StringRef s) {
+ o << "<string>";
+ for (StringRef::const_iterator I = s.begin(), E = s.end(); I != E; ++I) {
+ char c = *I;
+ switch (c) {
+ default: o << c; break;
+ case '&': o << "&amp;"; break;
+ case '<': o << "&lt;"; break;
+ case '>': o << "&gt;"; break;
+ case '\'': o << "&apos;"; break;
+ case '\"': o << "&quot;"; break;
+ }
+ }
+ o << "</string>";
+ return o;
+}
+
+static void ReportControlFlow(raw_ostream &o,
+ const PathDiagnosticControlFlowPiece& P,
+ const FIDMap& FM,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent) {
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ Indent(o, indent) << "<key>kind</key><string>control</string>\n";
+
+ // Emit edges.
+ Indent(o, indent) << "<key>edges</key>\n";
+ ++indent;
+ Indent(o, indent) << "<array>\n";
+ ++indent;
+ for (PathDiagnosticControlFlowPiece::const_iterator I=P.begin(), E=P.end();
+ I!=E; ++I) {
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+ Indent(o, indent) << "<key>start</key>\n";
+ EmitRange(o, SM, LangOpts, I->getStart().asRange(), FM, indent+1);
+ Indent(o, indent) << "<key>end</key>\n";
+ EmitRange(o, SM, LangOpts, I->getEnd().asRange(), FM, indent+1);
+ --indent;
+ Indent(o, indent) << "</dict>\n";
+ }
+ --indent;
+ Indent(o, indent) << "</array>\n";
+ --indent;
+
+ // Output any helper text.
+ const std::string& s = P.getString();
+ if (!s.empty()) {
+ Indent(o, indent) << "<key>alternate</key>";
+ EmitString(o, s) << '\n';
+ }
+
+ --indent;
+ Indent(o, indent) << "</dict>\n";
+}
+
+static void ReportEvent(raw_ostream &o, const PathDiagnosticPiece& P,
+ const FIDMap& FM,
+ const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth) {
+
+ Indent(o, indent) << "<dict>\n";
+ ++indent;
+
+ Indent(o, indent) << "<key>kind</key><string>event</string>\n";
+
+ // Output the location.
+ FullSourceLoc L = P.getLocation().asLocation();
+
+ Indent(o, indent) << "<key>location</key>\n";
+ EmitLocation(o, SM, LangOpts, L, FM, indent);
+
+ // Output the ranges (if any).
+ PathDiagnosticPiece::range_iterator RI = P.ranges_begin(),
+ RE = P.ranges_end();
+
+ if (RI != RE) {
+ Indent(o, indent) << "<key>ranges</key>\n";
+ Indent(o, indent) << "<array>\n";
+ ++indent;
+ for (; RI != RE; ++RI)
+ EmitRange(o, SM, LangOpts, *RI, FM, indent+1);
+ --indent;
+ Indent(o, indent) << "</array>\n";
+ }
+
+ // Output the call depth.
+ Indent(o, indent) << "<key>depth</key>"
+ << "<integer>" << depth << "</integer>\n";
+
+ // Output the text.
+ assert(!P.getString().empty());
+ Indent(o, indent) << "<key>extended_message</key>\n";
+ Indent(o, indent);
+ EmitString(o, P.getString()) << '\n';
+
+ // Output the short text.
+ // FIXME: Really use a short string.
+ Indent(o, indent) << "<key>message</key>\n";
+ EmitString(o, P.getString()) << '\n';
+
+ // Finish up.
+ --indent;
+ Indent(o, indent); o << "</dict>\n";
+}
+
+static void ReportPiece(raw_ostream &o,
+ const PathDiagnosticPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth,
+ bool includeControlFlow);
+
+static void ReportCall(raw_ostream &o,
+ const PathDiagnosticCallPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth) {
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnter =
+ P.getCallEnterEvent();
+
+ if (callEnter)
+ ReportPiece(o, *callEnter, FM, SM, LangOpts, indent, depth, true);
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callEnterWithinCaller =
+ P.getCallEnterWithinCallerEvent();
+
+ ++depth;
+
+ if (callEnterWithinCaller)
+ ReportPiece(o, *callEnterWithinCaller, FM, SM, LangOpts,
+ indent, depth, true);
+
+ for (PathPieces::const_iterator I = P.path.begin(), E = P.path.end();I!=E;++I)
+ ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, true);
+
+ IntrusiveRefCntPtr<PathDiagnosticEventPiece> callExit =
+ P.getCallExitEvent();
+
+ if (callExit)
+ ReportPiece(o, *callExit, FM, SM, LangOpts, indent, depth, true);
+}
+
+static void ReportMacro(raw_ostream &o,
+ const PathDiagnosticMacroPiece& P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth) {
+
+ for (PathPieces::const_iterator I = P.subPieces.begin(), E=P.subPieces.end();
+ I!=E; ++I) {
+ ReportPiece(o, **I, FM, SM, LangOpts, indent, depth, false);
+ }
+}
+
+static void ReportDiag(raw_ostream &o, const PathDiagnosticPiece& P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts) {
+ ReportPiece(o, P, FM, SM, LangOpts, 4, 0, true);
+}
+
+static void ReportPiece(raw_ostream &o,
+ const PathDiagnosticPiece &P,
+ const FIDMap& FM, const SourceManager &SM,
+ const LangOptions &LangOpts,
+ unsigned indent,
+ unsigned depth,
+ bool includeControlFlow) {
+ switch (P.getKind()) {
+ case PathDiagnosticPiece::ControlFlow:
+ if (includeControlFlow)
+ ReportControlFlow(o, cast<PathDiagnosticControlFlowPiece>(P), FM, SM,
+ LangOpts, indent);
+ break;
+ case PathDiagnosticPiece::Call:
+ ReportCall(o, cast<PathDiagnosticCallPiece>(P), FM, SM, LangOpts,
+ indent, depth);
+ break;
+ case PathDiagnosticPiece::Event:
+ ReportEvent(o, cast<PathDiagnosticSpotPiece>(P), FM, SM, LangOpts,
+ indent, depth);
+ break;
+ case PathDiagnosticPiece::Macro:
+ ReportMacro(o, cast<PathDiagnosticMacroPiece>(P), FM, SM, LangOpts,
+ indent, depth);
+ break;
+ }
+}
+
+void PlistDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade) {
+ // Build up a set of FIDs that we use by scanning the locations and
+ // ranges of the diagnostics.
+ FIDMap FM;
+ SmallVector<FileID, 10> Fids;
+ const SourceManager* SM = 0;
+
+ if (!Diags.empty())
+ SM = &(*(*Diags.begin())->path.begin())->getLocation().getManager();
+
+
+ for (std::vector<const PathDiagnostic*>::iterator DI = Diags.begin(),
+ DE = Diags.end(); DI != DE; ++DI) {
+
+ const PathDiagnostic *D = *DI;
+
+ llvm::SmallVector<const PathPieces *, 5> WorkList;
+ WorkList.push_back(&D->path);
+
+ while (!WorkList.empty()) {
+ const PathPieces &path = *WorkList.back();
+ WorkList.pop_back();
+
+ for (PathPieces::const_iterator I = path.begin(), E = path.end();
+ I!=E; ++I) {
+ const PathDiagnosticPiece *piece = I->getPtr();
+ AddFID(FM, Fids, SM, piece->getLocation().asLocation());
+
+ for (PathDiagnosticPiece::range_iterator RI = piece->ranges_begin(),
+ RE= piece->ranges_end(); RI != RE; ++RI) {
+ AddFID(FM, Fids, SM, RI->getBegin());
+ AddFID(FM, Fids, SM, RI->getEnd());
+ }
+
+ if (const PathDiagnosticCallPiece *call =
+ dyn_cast<PathDiagnosticCallPiece>(piece)) {
+ WorkList.push_back(&call->path);
+ }
+ else if (const PathDiagnosticMacroPiece *macro =
+ dyn_cast<PathDiagnosticMacroPiece>(piece)) {
+ WorkList.push_back(&macro->subPieces);
+ }
+ }
+ }
+ }
+
+ // Open the file.
+ std::string ErrMsg;
+ llvm::raw_fd_ostream o(OutputFile.c_str(), ErrMsg);
+ if (!ErrMsg.empty()) {
+ llvm::errs() << "warning: could not create file: " << OutputFile << '\n';
+ return;
+ }
+
+ // Write the plist header.
+ o << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<!DOCTYPE plist PUBLIC \"-//Apple Computer//DTD PLIST 1.0//EN\" "
+ "\"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n"
+ "<plist version=\"1.0\">\n";
+
+ // Write the root object: a <dict> containing...
+ // - "files", an <array> mapping from FIDs to file names
+ // - "diagnostics", an <array> containing the path diagnostics
+ o << "<dict>\n"
+ " <key>files</key>\n"
+ " <array>\n";
+
+ for (SmallVectorImpl<FileID>::iterator I=Fids.begin(), E=Fids.end();
+ I!=E; ++I) {
+ o << " ";
+ EmitString(o, SM->getFileEntryForID(*I)->getName()) << '\n';
+ }
+
+ o << " </array>\n"
+ " <key>diagnostics</key>\n"
+ " <array>\n";
+
+ for (std::vector<const PathDiagnostic*>::iterator DI=Diags.begin(),
+ DE = Diags.end(); DI!=DE; ++DI) {
+
+ o << " <dict>\n"
+ " <key>path</key>\n";
+
+ const PathDiagnostic *D = *DI;
+
+ o << " <array>\n";
+
+ for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
+ I != E; ++I)
+ ReportDiag(o, **I, FM, *SM, LangOpts);
+
+ o << " </array>\n";
+
+ // Output the bug type and bug category.
+ o << " <key>description</key>";
+ EmitString(o, D->getDescription()) << '\n';
+ o << " <key>category</key>";
+ EmitString(o, D->getCategory()) << '\n';
+ o << " <key>type</key>";
+ EmitString(o, D->getBugType()) << '\n';
+
+ // Output information about the semantic context where
+ // the issue occurred.
+ if (const Decl *DeclWithIssue = D->getDeclWithIssue()) {
+ // FIXME: handle blocks, which have no name.
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(DeclWithIssue)) {
+ StringRef declKind;
+ switch (ND->getKind()) {
+ case Decl::CXXRecord:
+ declKind = "C++ class";
+ break;
+ case Decl::CXXMethod:
+ declKind = "C++ method";
+ break;
+ case Decl::ObjCMethod:
+ declKind = "Objective-C method";
+ break;
+ case Decl::Function:
+ declKind = "function";
+ break;
+ default:
+ break;
+ }
+ if (!declKind.empty()) {
+ const std::string &declName = ND->getDeclName().getAsString();
+ o << " <key>issue_context_kind</key>";
+ EmitString(o, declKind) << '\n';
+ o << " <key>issue_context</key>";
+ EmitString(o, declName) << '\n';
+ }
+ }
+ }
+
+ // Output the location of the bug.
+ o << " <key>location</key>\n";
+ EmitLocation(o, *SM, LangOpts, D->getLocation(), FM, 2);
+
+ // Output the diagnostic to the sub-diagnostic client, if any.
+ if (SubPD) {
+ std::vector<const PathDiagnostic *> SubDiags;
+ SubDiags.push_back(D);
+ SmallVector<std::string, 1> SubFilesMade;
+ SubPD->FlushDiagnosticsImpl(SubDiags, &SubFilesMade);
+
+ if (!SubFilesMade.empty()) {
+ o << " <key>" << SubPD->getName() << "_files</key>\n";
+ o << " <array>\n";
+ for (size_t i = 0, n = SubFilesMade.size(); i < n ; ++i)
+ o << " <string>" << SubFilesMade[i] << "</string>\n";
+ o << " </array>\n";
+ }
+ }
+
+ // Close up the entry.
+ o << " </dict>\n";
+ }
+
+ o << " </array>\n";
+
+ // Finish.
+ o << "</dict>\n</plist>";
+
+ if (FilesMade)
+ FilesMade->push_back(OutputFile);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
new file mode 100644
index 0000000..b9cfa27
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -0,0 +1,709 @@
+//= ProgramState.cpp - Path-Sensitive "State" for tracking values --*- C++ -*--=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ProgramState and ProgramStateManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/CFG.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/TaintManager.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+// Give the vtable for ConstraintManager somewhere to live.
+// FIXME: Move this elsewhere.
+ConstraintManager::~ConstraintManager() {}
+
+namespace clang { namespace ento {
+/// Increments the number of times this state is referenced.
+
+void ProgramStateRetain(const ProgramState *state) {
+ ++const_cast<ProgramState*>(state)->refCount;
+}
+
+/// Decrement the number of times this state is referenced.
+void ProgramStateRelease(const ProgramState *state) {
+ assert(state->refCount > 0);
+ ProgramState *s = const_cast<ProgramState*>(state);
+ if (--s->refCount == 0) {
+ ProgramStateManager &Mgr = s->getStateManager();
+ Mgr.StateSet.RemoveNode(s);
+ s->~ProgramState();
+ Mgr.freeStates.push_back(s);
+ }
+}
+}}
+
+ProgramState::ProgramState(ProgramStateManager *mgr, const Environment& env,
+ StoreRef st, GenericDataMap gdm)
+ : stateMgr(mgr),
+ Env(env),
+ store(st.getStore()),
+ GDM(gdm),
+ refCount(0) {
+ stateMgr->getStoreManager().incrementReferenceCount(store);
+}
+
+ProgramState::ProgramState(const ProgramState &RHS)
+ : llvm::FoldingSetNode(),
+ stateMgr(RHS.stateMgr),
+ Env(RHS.Env),
+ store(RHS.store),
+ GDM(RHS.GDM),
+ refCount(0) {
+ stateMgr->getStoreManager().incrementReferenceCount(store);
+}
+
+ProgramState::~ProgramState() {
+ if (store)
+ stateMgr->getStoreManager().decrementReferenceCount(store);
+}
+
+ProgramStateManager::~ProgramStateManager() {
+ for (GDMContextsTy::iterator I=GDMContexts.begin(), E=GDMContexts.end();
+ I!=E; ++I)
+ I->second.second(I->second.first);
+}
+
+ProgramStateRef
+ProgramStateManager::removeDeadBindings(ProgramStateRef state,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper) {
+
+ // This code essentially performs a "mark-and-sweep" of the VariableBindings.
+ // The roots are any Block-level exprs and Decls that our liveness algorithm
+ // tells us are live. We then see what Decls they may reference, and keep
+ // those around. This code more than likely can be made faster, and the
+ // frequency of which this method is called should be experimented with
+ // for optimum performance.
+ ProgramState NewState = *state;
+
+ NewState.Env = EnvMgr.removeDeadBindings(NewState.Env, SymReaper, state);
+
+ // Clean up the store.
+ StoreRef newStore = StoreMgr->removeDeadBindings(NewState.getStore(), LCtx,
+ SymReaper);
+ NewState.setStore(newStore);
+ SymReaper.setReapedStore(newStore);
+
+ return getPersistentState(NewState);
+}
+
+ProgramStateRef ProgramStateManager::MarshalState(ProgramStateRef state,
+ const StackFrameContext *InitLoc) {
+ // make up an empty state for now.
+ ProgramState State(this,
+ EnvMgr.getInitialEnvironment(),
+ StoreMgr->getInitialStore(InitLoc),
+ GDMFactory.getEmptyMap());
+
+ return getPersistentState(State);
+}
+
+ProgramStateRef ProgramState::bindCompoundLiteral(const CompoundLiteralExpr *CL,
+ const LocationContext *LC,
+ SVal V) const {
+ const StoreRef &newStore =
+ getStateManager().StoreMgr->BindCompoundLiteral(getStore(), CL, LC, V);
+ return makeWithStore(newStore);
+}
+
+ProgramStateRef ProgramState::bindDecl(const VarRegion* VR, SVal IVal) const {
+ const StoreRef &newStore =
+ getStateManager().StoreMgr->BindDecl(getStore(), VR, IVal);
+ return makeWithStore(newStore);
+}
+
+ProgramStateRef ProgramState::bindDeclWithNoInit(const VarRegion* VR) const {
+ const StoreRef &newStore =
+ getStateManager().StoreMgr->BindDeclWithNoInit(getStore(), VR);
+ return makeWithStore(newStore);
+}
+
+ProgramStateRef ProgramState::bindLoc(Loc LV, SVal V) const {
+ ProgramStateManager &Mgr = getStateManager();
+ ProgramStateRef newState = makeWithStore(Mgr.StoreMgr->Bind(getStore(),
+ LV, V));
+ const MemRegion *MR = LV.getAsRegion();
+ if (MR && Mgr.getOwningEngine())
+ return Mgr.getOwningEngine()->processRegionChange(newState, MR);
+
+ return newState;
+}
+
+ProgramStateRef ProgramState::bindDefault(SVal loc, SVal V) const {
+ ProgramStateManager &Mgr = getStateManager();
+ const MemRegion *R = cast<loc::MemRegionVal>(loc).getRegion();
+ const StoreRef &newStore = Mgr.StoreMgr->BindDefault(getStore(), R, V);
+ ProgramStateRef new_state = makeWithStore(newStore);
+ return Mgr.getOwningEngine() ?
+ Mgr.getOwningEngine()->processRegionChange(new_state, R) :
+ new_state;
+}
+
+ProgramStateRef
+ProgramState::invalidateRegions(ArrayRef<const MemRegion *> Regions,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ StoreManager::InvalidatedSymbols *IS,
+ const CallOrObjCMessage *Call) const {
+ if (!IS) {
+ StoreManager::InvalidatedSymbols invalidated;
+ return invalidateRegionsImpl(Regions, E, Count, LCtx,
+ invalidated, Call);
+ }
+ return invalidateRegionsImpl(Regions, E, Count, LCtx, *IS, Call);
+}
+
+ProgramStateRef
+ProgramState::invalidateRegionsImpl(ArrayRef<const MemRegion *> Regions,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ StoreManager::InvalidatedSymbols &IS,
+ const CallOrObjCMessage *Call) const {
+ ProgramStateManager &Mgr = getStateManager();
+ SubEngine* Eng = Mgr.getOwningEngine();
+
+ if (Eng && Eng->wantsRegionChangeUpdate(this)) {
+ StoreManager::InvalidatedRegions Invalidated;
+ const StoreRef &newStore
+ = Mgr.StoreMgr->invalidateRegions(getStore(), Regions, E, Count, LCtx, IS,
+ Call, &Invalidated);
+ ProgramStateRef newState = makeWithStore(newStore);
+ return Eng->processRegionChanges(newState, &IS, Regions, Invalidated, Call);
+ }
+
+ const StoreRef &newStore =
+ Mgr.StoreMgr->invalidateRegions(getStore(), Regions, E, Count, LCtx, IS,
+ Call, NULL);
+ return makeWithStore(newStore);
+}
+
+ProgramStateRef ProgramState::unbindLoc(Loc LV) const {
+ assert(!isa<loc::MemRegionVal>(LV) && "Use invalidateRegion instead.");
+
+ Store OldStore = getStore();
+ const StoreRef &newStore = getStateManager().StoreMgr->Remove(OldStore, LV);
+
+ if (newStore.getStore() == OldStore)
+ return this;
+
+ return makeWithStore(newStore);
+}
+
+ProgramStateRef
+ProgramState::enterStackFrame(const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx) const {
+ const StoreRef &new_store =
+ getStateManager().StoreMgr->enterStackFrame(this, callerCtx, calleeCtx);
+ return makeWithStore(new_store);
+}
+
+SVal ProgramState::getSValAsScalarOrLoc(const MemRegion *R) const {
+ // We only want to do fetches from regions that we can actually bind
+ // values. For example, SymbolicRegions of type 'id<...>' cannot
+ // have direct bindings (but their can be bindings on their subregions).
+ if (!R->isBoundable())
+ return UnknownVal();
+
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ QualType T = TR->getValueType();
+ if (Loc::isLocType(T) || T->isIntegerType())
+ return getSVal(R);
+ }
+
+ return UnknownVal();
+}
+
+SVal ProgramState::getSVal(Loc location, QualType T) const {
+ SVal V = getRawSVal(cast<Loc>(location), T);
+
+ // If 'V' is a symbolic value that is *perfectly* constrained to
+ // be a constant value, use that value instead to lessen the burden
+ // on later analysis stages (so we have less symbolic values to reason
+ // about).
+ if (!T.isNull()) {
+ if (SymbolRef sym = V.getAsSymbol()) {
+ if (const llvm::APSInt *Int = getSymVal(sym)) {
+ // FIXME: Because we don't correctly model (yet) sign-extension
+ // and truncation of symbolic values, we need to convert
+ // the integer value to the correct signedness and bitwidth.
+ //
+ // This shows up in the following:
+ //
+ // char foo();
+ // unsigned x = foo();
+ // if (x == 54)
+ // ...
+ //
+ // The symbolic value stored to 'x' is actually the conjured
+ // symbol for the call to foo(); the type of that symbol is 'char',
+ // not unsigned.
+ const llvm::APSInt &NewV = getBasicVals().Convert(T, *Int);
+
+ if (isa<Loc>(V))
+ return loc::ConcreteInt(NewV);
+ else
+ return nonloc::ConcreteInt(NewV);
+ }
+ }
+ }
+
+ return V;
+}
+
+ProgramStateRef ProgramState::BindExpr(const Stmt *S,
+ const LocationContext *LCtx,
+ SVal V, bool Invalidate) const{
+ Environment NewEnv =
+ getStateManager().EnvMgr.bindExpr(Env, EnvironmentEntry(S, LCtx), V,
+ Invalidate);
+ if (NewEnv == Env)
+ return this;
+
+ ProgramState NewSt = *this;
+ NewSt.Env = NewEnv;
+ return getStateManager().getPersistentState(NewSt);
+}
+
+ProgramStateRef
+ProgramState::bindExprAndLocation(const Stmt *S, const LocationContext *LCtx,
+ SVal location,
+ SVal V) const {
+ Environment NewEnv =
+ getStateManager().EnvMgr.bindExprAndLocation(Env,
+ EnvironmentEntry(S, LCtx),
+ location, V);
+
+ if (NewEnv == Env)
+ return this;
+
+ ProgramState NewSt = *this;
+ NewSt.Env = NewEnv;
+ return getStateManager().getPersistentState(NewSt);
+}
+
+ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
+ DefinedOrUnknownSVal UpperBound,
+ bool Assumption,
+ QualType indexTy) const {
+ if (Idx.isUnknown() || UpperBound.isUnknown())
+ return this;
+
+ // Build an expression for 0 <= Idx < UpperBound.
+ // This is the same as Idx + MIN < UpperBound + MIN, if overflow is allowed.
+ // FIXME: This should probably be part of SValBuilder.
+ ProgramStateManager &SM = getStateManager();
+ SValBuilder &svalBuilder = SM.getSValBuilder();
+ ASTContext &Ctx = svalBuilder.getContext();
+
+ // Get the offset: the minimum value of the array index type.
+ BasicValueFactory &BVF = svalBuilder.getBasicValueFactory();
+ // FIXME: This should be using ValueManager::ArrayindexTy...somehow.
+ if (indexTy.isNull())
+ indexTy = Ctx.IntTy;
+ nonloc::ConcreteInt Min(BVF.getMinValue(indexTy));
+
+ // Adjust the index.
+ SVal newIdx = svalBuilder.evalBinOpNN(this, BO_Add,
+ cast<NonLoc>(Idx), Min, indexTy);
+ if (newIdx.isUnknownOrUndef())
+ return this;
+
+ // Adjust the upper bound.
+ SVal newBound =
+ svalBuilder.evalBinOpNN(this, BO_Add, cast<NonLoc>(UpperBound),
+ Min, indexTy);
+
+ if (newBound.isUnknownOrUndef())
+ return this;
+
+ // Build the actual comparison.
+ SVal inBound = svalBuilder.evalBinOpNN(this, BO_LT,
+ cast<NonLoc>(newIdx), cast<NonLoc>(newBound),
+ Ctx.IntTy);
+ if (inBound.isUnknownOrUndef())
+ return this;
+
+ // Finally, let the constraint manager take care of it.
+ ConstraintManager &CM = SM.getConstraintManager();
+ return CM.assume(this, cast<DefinedSVal>(inBound), Assumption);
+}
+
+ProgramStateRef ProgramStateManager::getInitialState(const LocationContext *InitLoc) {
+ ProgramState State(this,
+ EnvMgr.getInitialEnvironment(),
+ StoreMgr->getInitialStore(InitLoc),
+ GDMFactory.getEmptyMap());
+
+ return getPersistentState(State);
+}
+
+ProgramStateRef ProgramStateManager::getPersistentStateWithGDM(
+ ProgramStateRef FromState,
+ ProgramStateRef GDMState) {
+ ProgramState NewState(*FromState);
+ NewState.GDM = GDMState->GDM;
+ return getPersistentState(NewState);
+}
+
+ProgramStateRef ProgramStateManager::getPersistentState(ProgramState &State) {
+
+ llvm::FoldingSetNodeID ID;
+ State.Profile(ID);
+ void *InsertPos;
+
+ if (ProgramState *I = StateSet.FindNodeOrInsertPos(ID, InsertPos))
+ return I;
+
+ ProgramState *newState = 0;
+ if (!freeStates.empty()) {
+ newState = freeStates.back();
+ freeStates.pop_back();
+ }
+ else {
+ newState = (ProgramState*) Alloc.Allocate<ProgramState>();
+ }
+ new (newState) ProgramState(State);
+ StateSet.InsertNode(newState, InsertPos);
+ return newState;
+}
+
+ProgramStateRef ProgramState::makeWithStore(const StoreRef &store) const {
+ ProgramState NewSt(*this);
+ NewSt.setStore(store);
+ return getStateManager().getPersistentState(NewSt);
+}
+
+void ProgramState::setStore(const StoreRef &newStore) {
+ Store newStoreStore = newStore.getStore();
+ if (newStoreStore)
+ stateMgr->getStoreManager().incrementReferenceCount(newStoreStore);
+ if (store)
+ stateMgr->getStoreManager().decrementReferenceCount(store);
+ store = newStoreStore;
+}
+
+//===----------------------------------------------------------------------===//
+// State pretty-printing.
+//===----------------------------------------------------------------------===//
+
+void ProgramState::print(raw_ostream &Out,
+ const char *NL, const char *Sep) const {
+ // Print the store.
+ ProgramStateManager &Mgr = getStateManager();
+ Mgr.getStoreManager().print(getStore(), Out, NL, Sep);
+
+ // Print out the environment.
+ Env.print(Out, NL, Sep);
+
+ // Print out the constraints.
+ Mgr.getConstraintManager().print(this, Out, NL, Sep);
+
+ // Print checker-specific data.
+ Mgr.getOwningEngine()->printState(Out, this, NL, Sep);
+}
+
+void ProgramState::printDOT(raw_ostream &Out) const {
+ print(Out, "\\l", "\\|");
+}
+
+void ProgramState::dump() const {
+ print(llvm::errs());
+}
+
+void ProgramState::printTaint(raw_ostream &Out,
+ const char *NL, const char *Sep) const {
+ TaintMapImpl TM = get<TaintMap>();
+
+ if (!TM.isEmpty())
+ Out <<"Tainted Symbols:" << NL;
+
+ for (TaintMapImpl::iterator I = TM.begin(), E = TM.end(); I != E; ++I) {
+ Out << I->first << " : " << I->second << NL;
+ }
+}
+
+void ProgramState::dumpTaint() const {
+ printTaint(llvm::errs());
+}
+
+//===----------------------------------------------------------------------===//
+// Generic Data Map.
+//===----------------------------------------------------------------------===//
+
+void *const* ProgramState::FindGDM(void *K) const {
+ return GDM.lookup(K);
+}
+
+void*
+ProgramStateManager::FindGDMContext(void *K,
+ void *(*CreateContext)(llvm::BumpPtrAllocator&),
+ void (*DeleteContext)(void*)) {
+
+ std::pair<void*, void (*)(void*)>& p = GDMContexts[K];
+ if (!p.first) {
+ p.first = CreateContext(Alloc);
+ p.second = DeleteContext;
+ }
+
+ return p.first;
+}
+
+ProgramStateRef ProgramStateManager::addGDM(ProgramStateRef St, void *Key, void *Data){
+ ProgramState::GenericDataMap M1 = St->getGDM();
+ ProgramState::GenericDataMap M2 = GDMFactory.add(M1, Key, Data);
+
+ if (M1 == M2)
+ return St;
+
+ ProgramState NewSt = *St;
+ NewSt.GDM = M2;
+ return getPersistentState(NewSt);
+}
+
+ProgramStateRef ProgramStateManager::removeGDM(ProgramStateRef state, void *Key) {
+ ProgramState::GenericDataMap OldM = state->getGDM();
+ ProgramState::GenericDataMap NewM = GDMFactory.remove(OldM, Key);
+
+ if (NewM == OldM)
+ return state;
+
+ ProgramState NewState = *state;
+ NewState.GDM = NewM;
+ return getPersistentState(NewState);
+}
+
+void ScanReachableSymbols::anchor() { }
+
+bool ScanReachableSymbols::scan(nonloc::CompoundVal val) {
+ for (nonloc::CompoundVal::iterator I=val.begin(), E=val.end(); I!=E; ++I)
+ if (!scan(*I))
+ return false;
+
+ return true;
+}
+
+bool ScanReachableSymbols::scan(const SymExpr *sym) {
+ unsigned &isVisited = visited[sym];
+ if (isVisited)
+ return true;
+ isVisited = 1;
+
+ if (!visitor.VisitSymbol(sym))
+ return false;
+
+ // TODO: should be rewritten using SymExpr::symbol_iterator.
+ switch (sym->getKind()) {
+ case SymExpr::RegionValueKind:
+ case SymExpr::ConjuredKind:
+ case SymExpr::DerivedKind:
+ case SymExpr::ExtentKind:
+ case SymExpr::MetadataKind:
+ break;
+ case SymExpr::CastSymbolKind:
+ return scan(cast<SymbolCast>(sym)->getOperand());
+ case SymExpr::SymIntKind:
+ return scan(cast<SymIntExpr>(sym)->getLHS());
+ case SymExpr::IntSymKind:
+ return scan(cast<IntSymExpr>(sym)->getRHS());
+ case SymExpr::SymSymKind: {
+ const SymSymExpr *x = cast<SymSymExpr>(sym);
+ return scan(x->getLHS()) && scan(x->getRHS());
+ }
+ }
+ return true;
+}
+
+bool ScanReachableSymbols::scan(SVal val) {
+ if (loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(&val))
+ return scan(X->getRegion());
+
+ if (nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(&val))
+ return scan(X->getLoc());
+
+ if (SymbolRef Sym = val.getAsSymbol())
+ return scan(Sym);
+
+ if (const SymExpr *Sym = val.getAsSymbolicExpression())
+ return scan(Sym);
+
+ if (nonloc::CompoundVal *X = dyn_cast<nonloc::CompoundVal>(&val))
+ return scan(*X);
+
+ return true;
+}
+
+bool ScanReachableSymbols::scan(const MemRegion *R) {
+ if (isa<MemSpaceRegion>(R))
+ return true;
+
+ unsigned &isVisited = visited[R];
+ if (isVisited)
+ return true;
+ isVisited = 1;
+
+
+ if (!visitor.VisitMemRegion(R))
+ return false;
+
+ // If this is a symbolic region, visit the symbol for the region.
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R))
+ if (!visitor.VisitSymbol(SR->getSymbol()))
+ return false;
+
+ // If this is a subregion, also visit the parent regions.
+ if (const SubRegion *SR = dyn_cast<SubRegion>(R))
+ if (!scan(SR->getSuperRegion()))
+ return false;
+
+ // Now look at the binding to this region (if any).
+ if (!scan(state->getSValAsScalarOrLoc(R)))
+ return false;
+
+ // Now look at the subregions.
+ if (!SRM.get())
+ SRM.reset(state->getStateManager().getStoreManager().
+ getSubRegionMap(state->getStore()));
+
+ return SRM->iterSubRegions(R, *this);
+}
+
+bool ProgramState::scanReachableSymbols(SVal val, SymbolVisitor& visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ return S.scan(val);
+}
+
+bool ProgramState::scanReachableSymbols(const SVal *I, const SVal *E,
+ SymbolVisitor &visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ for ( ; I != E; ++I) {
+ if (!S.scan(*I))
+ return false;
+ }
+ return true;
+}
+
+bool ProgramState::scanReachableSymbols(const MemRegion * const *I,
+ const MemRegion * const *E,
+ SymbolVisitor &visitor) const {
+ ScanReachableSymbols S(this, visitor);
+ for ( ; I != E; ++I) {
+ if (!S.scan(*I))
+ return false;
+ }
+ return true;
+}
+
+ProgramStateRef ProgramState::addTaint(const Stmt *S,
+ const LocationContext *LCtx,
+ TaintTagType Kind) const {
+ if (const Expr *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreParens();
+
+ SymbolRef Sym = getSVal(S, LCtx).getAsSymbol();
+ if (Sym)
+ return addTaint(Sym, Kind);
+
+ const MemRegion *R = getSVal(S, LCtx).getAsRegion();
+ addTaint(R, Kind);
+
+ // Cannot add taint, so just return the state.
+ return this;
+}
+
+ProgramStateRef ProgramState::addTaint(const MemRegion *R,
+ TaintTagType Kind) const {
+ if (const SymbolicRegion *SR = dyn_cast_or_null<SymbolicRegion>(R))
+ return addTaint(SR->getSymbol(), Kind);
+ return this;
+}
+
+ProgramStateRef ProgramState::addTaint(SymbolRef Sym,
+ TaintTagType Kind) const {
+ // If this is a symbol cast, remove the cast before adding the taint. Taint
+ // is cast agnostic.
+ while (const SymbolCast *SC = dyn_cast<SymbolCast>(Sym))
+ Sym = SC->getOperand();
+
+ ProgramStateRef NewState = set<TaintMap>(Sym, Kind);
+ assert(NewState);
+ return NewState;
+}
+
+bool ProgramState::isTainted(const Stmt *S, const LocationContext *LCtx,
+ TaintTagType Kind) const {
+ if (const Expr *E = dyn_cast_or_null<Expr>(S))
+ S = E->IgnoreParens();
+
+ SVal val = getSVal(S, LCtx);
+ return isTainted(val, Kind);
+}
+
+bool ProgramState::isTainted(SVal V, TaintTagType Kind) const {
+ if (const SymExpr *Sym = V.getAsSymExpr())
+ return isTainted(Sym, Kind);
+ if (const MemRegion *Reg = V.getAsRegion())
+ return isTainted(Reg, Kind);
+ return false;
+}
+
+bool ProgramState::isTainted(const MemRegion *Reg, TaintTagType K) const {
+ if (!Reg)
+ return false;
+
+ // Element region (array element) is tainted if either the base or the offset
+ // are tainted.
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(Reg))
+ return isTainted(ER->getSuperRegion(), K) || isTainted(ER->getIndex(), K);
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Reg))
+ return isTainted(SR->getSymbol(), K);
+
+ if (const SubRegion *ER = dyn_cast<SubRegion>(Reg))
+ return isTainted(ER->getSuperRegion(), K);
+
+ return false;
+}
+
+bool ProgramState::isTainted(SymbolRef Sym, TaintTagType Kind) const {
+ if (!Sym)
+ return false;
+
+ // Traverse all the symbols this symbol depends on to see if any are tainted.
+ bool Tainted = false;
+ for (SymExpr::symbol_iterator SI = Sym->symbol_begin(), SE =Sym->symbol_end();
+ SI != SE; ++SI) {
+ assert(isa<SymbolData>(*SI));
+ const TaintTagType *Tag = get<TaintMap>(*SI);
+ Tainted = (Tag && *Tag == Kind);
+
+ // If this is a SymbolDerived with a tainted parent, it's also tainted.
+ if (const SymbolDerived *SD = dyn_cast<SymbolDerived>(*SI))
+ Tainted = Tainted || isTainted(SD->getParentSymbol(), Kind);
+
+ // If memory region is tainted, data is also tainted.
+ if (const SymbolRegionValue *SRV = dyn_cast<SymbolRegionValue>(*SI))
+ Tainted = Tainted || isTainted(SRV->getRegion(), Kind);
+
+ // If If this is a SymbolCast from a tainted value, it's also tainted.
+ if (const SymbolCast *SC = dyn_cast<SymbolCast>(*SI))
+ Tainted = Tainted || isTainted(SC->getOperand(), Kind);
+
+ if (Tainted)
+ return true;
+ }
+
+ return Tainted;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
new file mode 100644
index 0000000..98eb958
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -0,0 +1,442 @@
+//== RangeConstraintManager.cpp - Manage range constraints.------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines RangeConstraintManager, a class that tracks simple
+// equality and inequality constraints on symbolic values of ProgramState.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/ImmutableSet.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace { class ConstraintRange {}; }
+static int ConstraintRangeIndex = 0;
+
+/// A Range represents the closed range [from, to]. The caller must
+/// guarantee that from <= to. Note that Range is immutable, so as not
+/// to subvert RangeSet's immutability.
+namespace {
+class Range : public std::pair<const llvm::APSInt*,
+ const llvm::APSInt*> {
+public:
+ Range(const llvm::APSInt &from, const llvm::APSInt &to)
+ : std::pair<const llvm::APSInt*, const llvm::APSInt*>(&from, &to) {
+ assert(from <= to);
+ }
+ bool Includes(const llvm::APSInt &v) const {
+ return *first <= v && v <= *second;
+ }
+ const llvm::APSInt &From() const {
+ return *first;
+ }
+ const llvm::APSInt &To() const {
+ return *second;
+ }
+ const llvm::APSInt *getConcreteValue() const {
+ return &From() == &To() ? &From() : NULL;
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(&From());
+ ID.AddPointer(&To());
+ }
+};
+
+
+class RangeTrait : public llvm::ImutContainerInfo<Range> {
+public:
+ // When comparing if one Range is less than another, we should compare
+ // the actual APSInt values instead of their pointers. This keeps the order
+ // consistent (instead of comparing by pointer values) and can potentially
+ // be used to speed up some of the operations in RangeSet.
+ static inline bool isLess(key_type_ref lhs, key_type_ref rhs) {
+ return *lhs.first < *rhs.first || (!(*rhs.first < *lhs.first) &&
+ *lhs.second < *rhs.second);
+ }
+};
+
+/// RangeSet contains a set of ranges. If the set is empty, then
+/// there the value of a symbol is overly constrained and there are no
+/// possible values for that symbol.
+class RangeSet {
+ typedef llvm::ImmutableSet<Range, RangeTrait> PrimRangeSet;
+ PrimRangeSet ranges; // no need to make const, since it is an
+ // ImmutableSet - this allows default operator=
+ // to work.
+public:
+ typedef PrimRangeSet::Factory Factory;
+ typedef PrimRangeSet::iterator iterator;
+
+ RangeSet(PrimRangeSet RS) : ranges(RS) {}
+
+ iterator begin() const { return ranges.begin(); }
+ iterator end() const { return ranges.end(); }
+
+ bool isEmpty() const { return ranges.isEmpty(); }
+
+ /// Construct a new RangeSet representing '{ [from, to] }'.
+ RangeSet(Factory &F, const llvm::APSInt &from, const llvm::APSInt &to)
+ : ranges(F.add(F.getEmptySet(), Range(from, to))) {}
+
+ /// Profile - Generates a hash profile of this RangeSet for use
+ /// by FoldingSet.
+ void Profile(llvm::FoldingSetNodeID &ID) const { ranges.Profile(ID); }
+
+ /// getConcreteValue - If a symbol is contrained to equal a specific integer
+ /// constant then this method returns that value. Otherwise, it returns
+ /// NULL.
+ const llvm::APSInt* getConcreteValue() const {
+ return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : 0;
+ }
+
+private:
+ void IntersectInRange(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper,
+ PrimRangeSet &newRanges,
+ PrimRangeSet::iterator &i,
+ PrimRangeSet::iterator &e) const {
+ // There are six cases for each range R in the set:
+ // 1. R is entirely before the intersection range.
+ // 2. R is entirely after the intersection range.
+ // 3. R contains the entire intersection range.
+ // 4. R starts before the intersection range and ends in the middle.
+ // 5. R starts in the middle of the intersection range and ends after it.
+ // 6. R is entirely contained in the intersection range.
+ // These correspond to each of the conditions below.
+ for (/* i = begin(), e = end() */; i != e; ++i) {
+ if (i->To() < Lower) {
+ continue;
+ }
+ if (i->From() > Upper) {
+ break;
+ }
+
+ if (i->Includes(Lower)) {
+ if (i->Includes(Upper)) {
+ newRanges = F.add(newRanges, Range(BV.getValue(Lower),
+ BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.add(newRanges, Range(BV.getValue(Lower), i->To()));
+ } else {
+ if (i->Includes(Upper)) {
+ newRanges = F.add(newRanges, Range(i->From(), BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.add(newRanges, *i);
+ }
+ }
+ }
+
+public:
+ // Returns a set containing the values in the receiving set, intersected with
+ // the closed range [Lower, Upper]. Unlike the Range type, this range uses
+ // modular arithmetic, corresponding to the common treatment of C integer
+ // overflow. Thus, if the Lower bound is greater than the Upper bound, the
+ // range is taken to wrap around. This is equivalent to taking the
+ // intersection with the two ranges [Min, Upper] and [Lower, Max],
+ // or, alternatively, /removing/ all integers between Upper and Lower.
+ RangeSet Intersect(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper) const {
+ PrimRangeSet newRanges = F.getEmptySet();
+
+ PrimRangeSet::iterator i = begin(), e = end();
+ if (Lower <= Upper)
+ IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
+ else {
+ // The order of the next two statements is important!
+ // IntersectInRange() does not reset the iteration state for i and e.
+ // Therefore, the lower range most be handled first.
+ IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
+ IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
+ }
+ return newRanges;
+ }
+
+ void print(raw_ostream &os) const {
+ bool isFirst = true;
+ os << "{ ";
+ for (iterator i = begin(), e = end(); i != e; ++i) {
+ if (isFirst)
+ isFirst = false;
+ else
+ os << ", ";
+
+ os << '[' << i->From().toString(10) << ", " << i->To().toString(10)
+ << ']';
+ }
+ os << " }";
+ }
+
+ bool operator==(const RangeSet &other) const {
+ return ranges == other.ranges;
+ }
+};
+} // end anonymous namespace
+
+typedef llvm::ImmutableMap<SymbolRef,RangeSet> ConstraintRangeTy;
+
+namespace clang {
+namespace ento {
+template<>
+struct ProgramStateTrait<ConstraintRange>
+ : public ProgramStatePartialTrait<ConstraintRangeTy> {
+ static inline void *GDMIndex() { return &ConstraintRangeIndex; }
+};
+}
+}
+
+namespace {
+class RangeConstraintManager : public SimpleConstraintManager{
+ RangeSet GetRange(ProgramStateRef state, SymbolRef sym);
+public:
+ RangeConstraintManager(SubEngine &subengine)
+ : SimpleConstraintManager(subengine) {}
+
+ ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
+
+ ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
+
+ const llvm::APSInt* getSymVal(ProgramStateRef St, SymbolRef sym) const;
+
+ // FIXME: Refactor into SimpleConstraintManager?
+ bool isEqual(ProgramStateRef St, SymbolRef sym, const llvm::APSInt& V) const {
+ const llvm::APSInt *i = getSymVal(St, sym);
+ return i ? *i == V : false;
+ }
+
+ ProgramStateRef removeDeadBindings(ProgramStateRef St, SymbolReaper& SymReaper);
+
+ void print(ProgramStateRef St, raw_ostream &Out,
+ const char* nl, const char *sep);
+
+private:
+ RangeSet::Factory F;
+};
+
+} // end anonymous namespace
+
+ConstraintManager* ento::CreateRangeConstraintManager(ProgramStateManager&,
+ SubEngine &subeng) {
+ return new RangeConstraintManager(subeng);
+}
+
+const llvm::APSInt* RangeConstraintManager::getSymVal(ProgramStateRef St,
+ SymbolRef sym) const {
+ const ConstraintRangeTy::data_type *T = St->get<ConstraintRange>(sym);
+ return T ? T->getConcreteValue() : NULL;
+}
+
+/// Scan all symbols referenced by the constraints. If the symbol is not alive
+/// as marked in LSymbols, mark it as dead in DSymbols.
+ProgramStateRef
+RangeConstraintManager::removeDeadBindings(ProgramStateRef state,
+ SymbolReaper& SymReaper) {
+
+ ConstraintRangeTy CR = state->get<ConstraintRange>();
+ ConstraintRangeTy::Factory& CRFactory = state->get_context<ConstraintRange>();
+
+ for (ConstraintRangeTy::iterator I = CR.begin(), E = CR.end(); I != E; ++I) {
+ SymbolRef sym = I.getKey();
+ if (SymReaper.maybeDead(sym))
+ CR = CRFactory.remove(CR, sym);
+ }
+
+ return state->set<ConstraintRange>(CR);
+}
+
+RangeSet
+RangeConstraintManager::GetRange(ProgramStateRef state, SymbolRef sym) {
+ if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
+ return *V;
+
+ // Lazily generate a new RangeSet representing all possible values for the
+ // given symbol type.
+ QualType T = state->getSymbolManager().getType(sym);
+ BasicValueFactory& BV = state->getBasicVals();
+ return RangeSet(F, BV.getMinValue(T), BV.getMaxValue(T));
+}
+
+//===------------------------------------------------------------------------===
+// assumeSymX methods: public interface for RangeConstraintManager.
+//===------------------------------------------------------------------------===/
+
+// The syntax for ranges below is mathematical, using [x, y] for closed ranges
+// and (x, y) for open ranges. These ranges are modular, corresponding with
+// a common treatment of C integer overflow. This means that these methods
+// do not have to worry about overflow; RangeSet::Intersect can handle such a
+// "wraparound" range.
+// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1,
+// UINT_MAX, 0, 1, and 2.
+
+ProgramStateRef
+RangeConstraintManager::assumeSymNE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Lower;
+ --Lower;
+ ++Upper;
+
+ // [Int-Adjustment+1, Int-Adjustment-1]
+ // Notice that the lower bound is greater than the upper bound.
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Upper, Lower);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymEQ(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ // [Int-Adjustment, Int-Adjustment]
+ BasicValueFactory &BV = state->getBasicVals();
+ llvm::APSInt AdjInt = Int-Adjustment;
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, AdjInt, AdjInt);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymLT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ // Special case for Int == Min. This is always false.
+ if (Int == Min)
+ return NULL;
+
+ llvm::APSInt Lower = Min-Adjustment;
+ llvm::APSInt Upper = Int-Adjustment;
+ --Upper;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymGT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ // Special case for Int == Max. This is always false.
+ if (Int == Max)
+ return NULL;
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Max-Adjustment;
+ ++Lower;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymGE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ // Special case for Int == Min. This is always feasible.
+ if (Int == Min)
+ return state;
+
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Max-Adjustment;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+ProgramStateRef
+RangeConstraintManager::assumeSymLE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ // Special case for Int == Max. This is always feasible.
+ if (Int == Max)
+ return state;
+
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ llvm::APSInt Lower = Min-Adjustment;
+ llvm::APSInt Upper = Int-Adjustment;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+//===------------------------------------------------------------------------===
+// Pretty-printing.
+//===------------------------------------------------------------------------===/
+
+void RangeConstraintManager::print(ProgramStateRef St, raw_ostream &Out,
+ const char* nl, const char *sep) {
+
+ ConstraintRangeTy Ranges = St->get<ConstraintRange>();
+
+ if (Ranges.isEmpty()) {
+ Out << nl << sep << "Ranges are empty." << nl;
+ return;
+ }
+
+ Out << nl << sep << "Ranges of symbol values:";
+ for (ConstraintRangeTy::iterator I=Ranges.begin(), E=Ranges.end(); I!=E; ++I){
+ Out << nl << ' ' << I.getKey() << " : ";
+ I.getData().print(Out);
+ }
+ Out << nl;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
new file mode 100644
index 0000000..cc3ea8c3
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -0,0 +1,2009 @@
+//== RegionStore.cpp - Field-sensitive store model --------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a basic region store model. In this model, we do have field
+// sensitivity. But we assume nothing about the heap shape. So recursive data
+// structures are largely ignored. Basically we do 1-limiting analysis.
+// Parameter pointers are assumed with no aliasing. Pointee objects of
+// parameters are created lazily.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ObjCMessage.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "llvm/ADT/ImmutableList.h"
+#include "llvm/ADT/ImmutableMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::Optional;
+
+//===----------------------------------------------------------------------===//
+// Representation of binding keys.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class BindingKey {
+public:
+ enum Kind { Direct = 0x0, Default = 0x1 };
+private:
+ llvm ::PointerIntPair<const MemRegion*, 1> P;
+ uint64_t Offset;
+
+ explicit BindingKey(const MemRegion *r, uint64_t offset, Kind k)
+ : P(r, (unsigned) k), Offset(offset) {}
+public:
+
+ bool isDirect() const { return P.getInt() == Direct; }
+
+ const MemRegion *getRegion() const { return P.getPointer(); }
+ uint64_t getOffset() const { return Offset; }
+
+ void Profile(llvm::FoldingSetNodeID& ID) const {
+ ID.AddPointer(P.getOpaqueValue());
+ ID.AddInteger(Offset);
+ }
+
+ static BindingKey Make(const MemRegion *R, Kind k);
+
+ bool operator<(const BindingKey &X) const {
+ if (P.getOpaqueValue() < X.P.getOpaqueValue())
+ return true;
+ if (P.getOpaqueValue() > X.P.getOpaqueValue())
+ return false;
+ return Offset < X.Offset;
+ }
+
+ bool operator==(const BindingKey &X) const {
+ return P.getOpaqueValue() == X.P.getOpaqueValue() &&
+ Offset == X.Offset;
+ }
+
+ bool isValid() const {
+ return getRegion() != NULL;
+ }
+};
+} // end anonymous namespace
+
+BindingKey BindingKey::Make(const MemRegion *R, Kind k) {
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ const RegionRawOffset &O = ER->getAsArrayOffset();
+
+ // FIXME: There are some ElementRegions for which we cannot compute
+ // raw offsets yet, including regions with symbolic offsets. These will be
+ // ignored by the store.
+ return BindingKey(O.getRegion(), O.getOffset().getQuantity(), k);
+ }
+
+ return BindingKey(R, 0, k);
+}
+
+namespace llvm {
+ static inline
+ raw_ostream &operator<<(raw_ostream &os, BindingKey K) {
+ os << '(' << K.getRegion() << ',' << K.getOffset()
+ << ',' << (K.isDirect() ? "direct" : "default")
+ << ')';
+ return os;
+ }
+} // end llvm namespace
+
+//===----------------------------------------------------------------------===//
+// Actual Store type.
+//===----------------------------------------------------------------------===//
+
+typedef llvm::ImmutableMap<BindingKey, SVal> RegionBindings;
+
+//===----------------------------------------------------------------------===//
+// Fine-grained control of RegionStoreManager.
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct minimal_features_tag {};
+struct maximal_features_tag {};
+
+class RegionStoreFeatures {
+ bool SupportsFields;
+public:
+ RegionStoreFeatures(minimal_features_tag) :
+ SupportsFields(false) {}
+
+ RegionStoreFeatures(maximal_features_tag) :
+ SupportsFields(true) {}
+
+ void enableFields(bool t) { SupportsFields = t; }
+
+ bool supportsFields() const { return SupportsFields; }
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Main RegionStore logic.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class RegionStoreSubRegionMap : public SubRegionMap {
+public:
+ typedef llvm::ImmutableSet<const MemRegion*> Set;
+ typedef llvm::DenseMap<const MemRegion*, Set> Map;
+private:
+ Set::Factory F;
+ Map M;
+public:
+ bool add(const MemRegion* Parent, const MemRegion* SubRegion) {
+ Map::iterator I = M.find(Parent);
+
+ if (I == M.end()) {
+ M.insert(std::make_pair(Parent, F.add(F.getEmptySet(), SubRegion)));
+ return true;
+ }
+
+ I->second = F.add(I->second, SubRegion);
+ return false;
+ }
+
+ void process(SmallVectorImpl<const SubRegion*> &WL, const SubRegion *R);
+
+ ~RegionStoreSubRegionMap() {}
+
+ const Set *getSubRegions(const MemRegion *Parent) const {
+ Map::const_iterator I = M.find(Parent);
+ return I == M.end() ? NULL : &I->second;
+ }
+
+ bool iterSubRegions(const MemRegion* Parent, Visitor& V) const {
+ Map::const_iterator I = M.find(Parent);
+
+ if (I == M.end())
+ return true;
+
+ Set S = I->second;
+ for (Set::iterator SI=S.begin(),SE=S.end(); SI != SE; ++SI) {
+ if (!V.Visit(Parent, *SI))
+ return false;
+ }
+
+ return true;
+ }
+};
+
+void
+RegionStoreSubRegionMap::process(SmallVectorImpl<const SubRegion*> &WL,
+ const SubRegion *R) {
+ const MemRegion *superR = R->getSuperRegion();
+ if (add(superR, R))
+ if (const SubRegion *sr = dyn_cast<SubRegion>(superR))
+ WL.push_back(sr);
+}
+
+class RegionStoreManager : public StoreManager {
+ const RegionStoreFeatures Features;
+ RegionBindings::Factory RBFactory;
+
+public:
+ RegionStoreManager(ProgramStateManager& mgr, const RegionStoreFeatures &f)
+ : StoreManager(mgr),
+ Features(f),
+ RBFactory(mgr.getAllocator()) {}
+
+ SubRegionMap *getSubRegionMap(Store store) {
+ return getRegionStoreSubRegionMap(store);
+ }
+
+ RegionStoreSubRegionMap *getRegionStoreSubRegionMap(Store store);
+
+ Optional<SVal> getDirectBinding(RegionBindings B, const MemRegion *R);
+ /// getDefaultBinding - Returns an SVal* representing an optional default
+ /// binding associated with a region and its subregions.
+ Optional<SVal> getDefaultBinding(RegionBindings B, const MemRegion *R);
+
+ /// setImplicitDefaultValue - Set the default binding for the provided
+ /// MemRegion to the value implicitly defined for compound literals when
+ /// the value is not specified.
+ StoreRef setImplicitDefaultValue(Store store, const MemRegion *R, QualType T);
+
+ /// ArrayToPointer - Emulates the "decay" of an array to a pointer
+ /// type. 'Array' represents the lvalue of the array being decayed
+ /// to a pointer, and the returned SVal represents the decayed
+ /// version of that lvalue (i.e., a pointer to the first element of
+ /// the array). This is called by ExprEngine when evaluating
+ /// casts from arrays to pointers.
+ SVal ArrayToPointer(Loc Array);
+
+ /// For DerivedToBase casts, create a CXXBaseObjectRegion and return it.
+ virtual SVal evalDerivedToBase(SVal derived, QualType basePtrType);
+
+ /// \brief Evaluates C++ dynamic_cast cast.
+ /// The callback may result in the following 3 scenarios:
+ /// - Successful cast (ex: derived is subclass of base).
+ /// - Failed cast (ex: derived is definitely not a subclass of base).
+ /// - We don't know (base is a symbolic region and we don't have
+ /// enough info to determine if the cast will succeed at run time).
+ /// The function returns an SVal representing the derived class; it's
+ /// valid only if Failed flag is set to false.
+ virtual SVal evalDynamicCast(SVal base, QualType derivedPtrType,bool &Failed);
+
+ StoreRef getInitialStore(const LocationContext *InitLoc) {
+ return StoreRef(RBFactory.getEmptyMap().getRootWithoutRetain(), *this);
+ }
+
+ //===-------------------------------------------------------------------===//
+ // Binding values to regions.
+ //===-------------------------------------------------------------------===//
+ RegionBindings invalidateGlobalRegion(MemRegion::Kind K,
+ const Expr *Ex,
+ unsigned Count,
+ const LocationContext *LCtx,
+ RegionBindings B,
+ InvalidatedRegions *Invalidated);
+
+ StoreRef invalidateRegions(Store store, ArrayRef<const MemRegion *> Regions,
+ const Expr *E, unsigned Count,
+ const LocationContext *LCtx,
+ InvalidatedSymbols &IS,
+ const CallOrObjCMessage *Call,
+ InvalidatedRegions *Invalidated);
+
+public: // Made public for helper classes.
+
+ void RemoveSubRegionBindings(RegionBindings &B, const MemRegion *R,
+ RegionStoreSubRegionMap &M);
+
+ RegionBindings addBinding(RegionBindings B, BindingKey K, SVal V);
+
+ RegionBindings addBinding(RegionBindings B, const MemRegion *R,
+ BindingKey::Kind k, SVal V);
+
+ const SVal *lookup(RegionBindings B, BindingKey K);
+ const SVal *lookup(RegionBindings B, const MemRegion *R, BindingKey::Kind k);
+
+ RegionBindings removeBinding(RegionBindings B, BindingKey K);
+ RegionBindings removeBinding(RegionBindings B, const MemRegion *R,
+ BindingKey::Kind k);
+
+ RegionBindings removeBinding(RegionBindings B, const MemRegion *R) {
+ return removeBinding(removeBinding(B, R, BindingKey::Direct), R,
+ BindingKey::Default);
+ }
+
+public: // Part of public interface to class.
+
+ StoreRef Bind(Store store, Loc LV, SVal V);
+
+ // BindDefault is only used to initialize a region with a default value.
+ StoreRef BindDefault(Store store, const MemRegion *R, SVal V) {
+ RegionBindings B = GetRegionBindings(store);
+ assert(!lookup(B, R, BindingKey::Default));
+ assert(!lookup(B, R, BindingKey::Direct));
+ return StoreRef(addBinding(B, R, BindingKey::Default, V)
+ .getRootWithoutRetain(), *this);
+ }
+
+ StoreRef BindCompoundLiteral(Store store, const CompoundLiteralExpr *CL,
+ const LocationContext *LC, SVal V);
+
+ StoreRef BindDecl(Store store, const VarRegion *VR, SVal InitVal);
+
+ StoreRef BindDeclWithNoInit(Store store, const VarRegion *) {
+ return StoreRef(store, *this);
+ }
+
+ /// BindStruct - Bind a compound value to a structure.
+ StoreRef BindStruct(Store store, const TypedValueRegion* R, SVal V);
+
+ StoreRef BindArray(Store store, const TypedValueRegion* R, SVal V);
+
+ /// KillStruct - Set the entire struct to unknown.
+ StoreRef KillStruct(Store store, const TypedRegion* R, SVal DefaultVal);
+
+ StoreRef Remove(Store store, Loc LV);
+
+ void incrementReferenceCount(Store store) {
+ GetRegionBindings(store).manualRetain();
+ }
+
+ /// If the StoreManager supports it, decrement the reference count of
+ /// the specified Store object. If the reference count hits 0, the memory
+ /// associated with the object is recycled.
+ void decrementReferenceCount(Store store) {
+ GetRegionBindings(store).manualRelease();
+ }
+
+ bool includedInBindings(Store store, const MemRegion *region) const;
+
+ /// \brief Return the value bound to specified location in a given state.
+ ///
+ /// The high level logic for this method is this:
+ /// getBinding (L)
+ /// if L has binding
+ /// return L's binding
+ /// else if L is in killset
+ /// return unknown
+ /// else
+ /// if L is on stack or heap
+ /// return undefined
+ /// else
+ /// return symbolic
+ SVal getBinding(Store store, Loc L, QualType T = QualType());
+
+ SVal getBindingForElement(Store store, const ElementRegion *R);
+
+ SVal getBindingForField(Store store, const FieldRegion *R);
+
+ SVal getBindingForObjCIvar(Store store, const ObjCIvarRegion *R);
+
+ SVal getBindingForVar(Store store, const VarRegion *R);
+
+ SVal getBindingForLazySymbol(const TypedValueRegion *R);
+
+ SVal getBindingForFieldOrElementCommon(Store store, const TypedValueRegion *R,
+ QualType Ty, const MemRegion *superR);
+
+ SVal getLazyBinding(const MemRegion *lazyBindingRegion,
+ Store lazyBindingStore);
+
+ /// Get bindings for the values in a struct and return a CompoundVal, used
+ /// when doing struct copy:
+ /// struct s x, y;
+ /// x = y;
+ /// y's value is retrieved by this method.
+ SVal getBindingForStruct(Store store, const TypedValueRegion* R);
+
+ SVal getBindingForArray(Store store, const TypedValueRegion* R);
+
+ /// Used to lazily generate derived symbols for bindings that are defined
+ /// implicitly by default bindings in a super region.
+ Optional<SVal> getBindingForDerivedDefaultValue(RegionBindings B,
+ const MemRegion *superR,
+ const TypedValueRegion *R,
+ QualType Ty);
+
+ /// Get the state and region whose binding this region R corresponds to.
+ std::pair<Store, const MemRegion*>
+ GetLazyBinding(RegionBindings B, const MemRegion *R,
+ const MemRegion *originalRegion);
+
+ StoreRef CopyLazyBindings(nonloc::LazyCompoundVal V, Store store,
+ const TypedRegion *R);
+
+ //===------------------------------------------------------------------===//
+ // State pruning.
+ //===------------------------------------------------------------------===//
+
+ /// removeDeadBindings - Scans the RegionStore of 'state' for dead values.
+ /// It returns a new Store with these values removed.
+ StoreRef removeDeadBindings(Store store, const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper);
+
+ StoreRef enterStackFrame(ProgramStateRef state,
+ const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx);
+
+ //===------------------------------------------------------------------===//
+ // Region "extents".
+ //===------------------------------------------------------------------===//
+
+ // FIXME: This method will soon be eliminated; see the note in Store.h.
+ DefinedOrUnknownSVal getSizeInElements(ProgramStateRef state,
+ const MemRegion* R, QualType EleTy);
+
+ //===------------------------------------------------------------------===//
+ // Utility methods.
+ //===------------------------------------------------------------------===//
+
+ static inline RegionBindings GetRegionBindings(Store store) {
+ return RegionBindings(static_cast<const RegionBindings::TreeTy*>(store));
+ }
+
+ void print(Store store, raw_ostream &Out, const char* nl,
+ const char *sep);
+
+ void iterBindings(Store store, BindingsHandler& f) {
+ RegionBindings B = GetRegionBindings(store);
+ for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ const BindingKey &K = I.getKey();
+ if (!K.isDirect())
+ continue;
+ if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion())) {
+ // FIXME: Possibly incorporate the offset?
+ if (!f.HandleBinding(*this, store, R, I.getData()))
+ return;
+ }
+ }
+ }
+};
+
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
+// RegionStore creation.
+//===----------------------------------------------------------------------===//
+
+StoreManager *ento::CreateRegionStoreManager(ProgramStateManager& StMgr) {
+ RegionStoreFeatures F = maximal_features_tag();
+ return new RegionStoreManager(StMgr, F);
+}
+
+StoreManager *
+ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
+ RegionStoreFeatures F = minimal_features_tag();
+ F.enableFields(true);
+ return new RegionStoreManager(StMgr, F);
+}
+
+
+RegionStoreSubRegionMap*
+RegionStoreManager::getRegionStoreSubRegionMap(Store store) {
+ RegionBindings B = GetRegionBindings(store);
+ RegionStoreSubRegionMap *M = new RegionStoreSubRegionMap();
+
+ SmallVector<const SubRegion*, 10> WL;
+
+ for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I)
+ if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion()))
+ M->process(WL, R);
+
+ // We also need to record in the subregion map "intermediate" regions that
+ // don't have direct bindings but are super regions of those that do.
+ while (!WL.empty()) {
+ const SubRegion *R = WL.back();
+ WL.pop_back();
+ M->process(WL, R);
+ }
+
+ return M;
+}
+
+//===----------------------------------------------------------------------===//
+// Region Cluster analysis.
+//===----------------------------------------------------------------------===//
+
+namespace {
+template <typename DERIVED>
+class ClusterAnalysis {
+protected:
+ typedef BumpVector<BindingKey> RegionCluster;
+ typedef llvm::DenseMap<const MemRegion *, RegionCluster *> ClusterMap;
+ llvm::DenseMap<const RegionCluster*, unsigned> Visited;
+ typedef SmallVector<std::pair<const MemRegion *, RegionCluster*>, 10>
+ WorkList;
+
+ BumpVectorContext BVC;
+ ClusterMap ClusterM;
+ WorkList WL;
+
+ RegionStoreManager &RM;
+ ASTContext &Ctx;
+ SValBuilder &svalBuilder;
+
+ RegionBindings B;
+
+ const bool includeGlobals;
+
+public:
+ ClusterAnalysis(RegionStoreManager &rm, ProgramStateManager &StateMgr,
+ RegionBindings b, const bool includeGlobals)
+ : RM(rm), Ctx(StateMgr.getContext()),
+ svalBuilder(StateMgr.getSValBuilder()),
+ B(b), includeGlobals(includeGlobals) {}
+
+ RegionBindings getRegionBindings() const { return B; }
+
+ RegionCluster &AddToCluster(BindingKey K) {
+ const MemRegion *R = K.getRegion();
+ const MemRegion *baseR = R->getBaseRegion();
+ RegionCluster &C = getCluster(baseR);
+ C.push_back(K, BVC);
+ static_cast<DERIVED*>(this)->VisitAddedToCluster(baseR, C);
+ return C;
+ }
+
+ bool isVisited(const MemRegion *R) {
+ return (bool) Visited[&getCluster(R->getBaseRegion())];
+ }
+
+ RegionCluster& getCluster(const MemRegion *R) {
+ RegionCluster *&CRef = ClusterM[R];
+ if (!CRef) {
+ void *Mem = BVC.getAllocator().template Allocate<RegionCluster>();
+ CRef = new (Mem) RegionCluster(BVC, 10);
+ }
+ return *CRef;
+ }
+
+ void GenerateClusters() {
+ // Scan the entire set of bindings and make the region clusters.
+ for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
+ RegionCluster &C = AddToCluster(RI.getKey());
+ if (const MemRegion *R = RI.getData().getAsRegion()) {
+ // Generate a cluster, but don't add the region to the cluster
+ // if there aren't any bindings.
+ getCluster(R->getBaseRegion());
+ }
+ if (includeGlobals) {
+ const MemRegion *R = RI.getKey().getRegion();
+ if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
+ AddToWorkList(R, C);
+ }
+ }
+ }
+
+ bool AddToWorkList(const MemRegion *R, RegionCluster &C) {
+ if (unsigned &visited = Visited[&C])
+ return false;
+ else
+ visited = 1;
+
+ WL.push_back(std::make_pair(R, &C));
+ return true;
+ }
+
+ bool AddToWorkList(BindingKey K) {
+ return AddToWorkList(K.getRegion());
+ }
+
+ bool AddToWorkList(const MemRegion *R) {
+ const MemRegion *baseR = R->getBaseRegion();
+ return AddToWorkList(baseR, getCluster(baseR));
+ }
+
+ void RunWorkList() {
+ while (!WL.empty()) {
+ const MemRegion *baseR;
+ RegionCluster *C;
+ llvm::tie(baseR, C) = WL.back();
+ WL.pop_back();
+
+ // First visit the cluster.
+ static_cast<DERIVED*>(this)->VisitCluster(baseR, C->begin(), C->end());
+
+ // Next, visit the base region.
+ static_cast<DERIVED*>(this)->VisitBaseRegion(baseR);
+ }
+ }
+
+public:
+ void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C) {}
+ void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E) {}
+ void VisitBaseRegion(const MemRegion *baseR) {}
+};
+}
+
+//===----------------------------------------------------------------------===//
+// Binding invalidation.
+//===----------------------------------------------------------------------===//
+
+void RegionStoreManager::RemoveSubRegionBindings(RegionBindings &B,
+ const MemRegion *R,
+ RegionStoreSubRegionMap &M) {
+
+ if (const RegionStoreSubRegionMap::Set *S = M.getSubRegions(R))
+ for (RegionStoreSubRegionMap::Set::iterator I = S->begin(), E = S->end();
+ I != E; ++I)
+ RemoveSubRegionBindings(B, *I, M);
+
+ B = removeBinding(B, R);
+}
+
+namespace {
+class invalidateRegionsWorker : public ClusterAnalysis<invalidateRegionsWorker>
+{
+ const Expr *Ex;
+ unsigned Count;
+ const LocationContext *LCtx;
+ StoreManager::InvalidatedSymbols &IS;
+ StoreManager::InvalidatedRegions *Regions;
+public:
+ invalidateRegionsWorker(RegionStoreManager &rm,
+ ProgramStateManager &stateMgr,
+ RegionBindings b,
+ const Expr *ex, unsigned count,
+ const LocationContext *lctx,
+ StoreManager::InvalidatedSymbols &is,
+ StoreManager::InvalidatedRegions *r,
+ bool includeGlobals)
+ : ClusterAnalysis<invalidateRegionsWorker>(rm, stateMgr, b, includeGlobals),
+ Ex(ex), Count(count), LCtx(lctx), IS(is), Regions(r) {}
+
+ void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
+ void VisitBaseRegion(const MemRegion *baseR);
+
+private:
+ void VisitBinding(SVal V);
+};
+}
+
+void invalidateRegionsWorker::VisitBinding(SVal V) {
+ // A symbol? Mark it touched by the invalidation.
+ if (SymbolRef Sym = V.getAsSymbol())
+ IS.insert(Sym);
+
+ if (const MemRegion *R = V.getAsRegion()) {
+ AddToWorkList(R);
+ return;
+ }
+
+ // Is it a LazyCompoundVal? All references get invalidated as well.
+ if (const nonloc::LazyCompoundVal *LCS =
+ dyn_cast<nonloc::LazyCompoundVal>(&V)) {
+
+ const MemRegion *LazyR = LCS->getRegion();
+ RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
+
+ for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
+ const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion());
+ if (baseR && baseR->isSubRegionOf(LazyR))
+ VisitBinding(RI.getData());
+ }
+
+ return;
+ }
+}
+
+void invalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
+ BindingKey *I, BindingKey *E) {
+ for ( ; I != E; ++I) {
+ // Get the old binding. Is it a region? If so, add it to the worklist.
+ const BindingKey &K = *I;
+ if (const SVal *V = RM.lookup(B, K))
+ VisitBinding(*V);
+
+ B = RM.removeBinding(B, K);
+ }
+}
+
+void invalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
+ // Symbolic region? Mark that symbol touched by the invalidation.
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR))
+ IS.insert(SR->getSymbol());
+
+ // BlockDataRegion? If so, invalidate captured variables that are passed
+ // by reference.
+ if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(baseR)) {
+ for (BlockDataRegion::referenced_vars_iterator
+ BI = BR->referenced_vars_begin(), BE = BR->referenced_vars_end() ;
+ BI != BE; ++BI) {
+ const VarRegion *VR = *BI;
+ const VarDecl *VD = VR->getDecl();
+ if (VD->getAttr<BlocksAttr>() || !VD->hasLocalStorage())
+ AddToWorkList(VR);
+ }
+ return;
+ }
+
+ // Otherwise, we have a normal data region. Record that we touched the region.
+ if (Regions)
+ Regions->push_back(baseR);
+
+ if (isa<AllocaRegion>(baseR) || isa<SymbolicRegion>(baseR)) {
+ // Invalidate the region by setting its default value to
+ // conjured symbol. The type of the symbol is irrelavant.
+ DefinedOrUnknownSVal V =
+ svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
+ B = RM.addBinding(B, baseR, BindingKey::Default, V);
+ return;
+ }
+
+ if (!baseR->isBoundable())
+ return;
+
+ const TypedValueRegion *TR = cast<TypedValueRegion>(baseR);
+ QualType T = TR->getValueType();
+
+ // Invalidate the binding.
+ if (T->isStructureOrClassType()) {
+ // Invalidate the region by setting its default value to
+ // conjured symbol. The type of the symbol is irrelavant.
+ DefinedOrUnknownSVal V =
+ svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx, Ctx.IntTy, Count);
+ B = RM.addBinding(B, baseR, BindingKey::Default, V);
+ return;
+ }
+
+ if (const ArrayType *AT = Ctx.getAsArrayType(T)) {
+ // Set the default value of the array to conjured symbol.
+ DefinedOrUnknownSVal V =
+ svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx,
+ AT->getElementType(), Count);
+ B = RM.addBinding(B, baseR, BindingKey::Default, V);
+ return;
+ }
+
+ if (includeGlobals &&
+ isa<NonStaticGlobalSpaceRegion>(baseR->getMemorySpace())) {
+ // If the region is a global and we are invalidating all globals,
+ // just erase the entry. This causes all globals to be lazily
+ // symbolicated from the same base symbol.
+ B = RM.removeBinding(B, baseR);
+ return;
+ }
+
+
+ DefinedOrUnknownSVal V = svalBuilder.getConjuredSymbolVal(baseR, Ex, LCtx,
+ T,Count);
+ assert(SymbolManager::canSymbolicate(T) || V.isUnknown());
+ B = RM.addBinding(B, baseR, BindingKey::Direct, V);
+}
+
+RegionBindings RegionStoreManager::invalidateGlobalRegion(MemRegion::Kind K,
+ const Expr *Ex,
+ unsigned Count,
+ const LocationContext *LCtx,
+ RegionBindings B,
+ InvalidatedRegions *Invalidated) {
+ // Bind the globals memory space to a new symbol that we will use to derive
+ // the bindings for all globals.
+ const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion(K);
+ SVal V =
+ svalBuilder.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex, LCtx,
+ /* symbol type, doesn't matter */ Ctx.IntTy,
+ Count);
+
+ B = removeBinding(B, GS);
+ B = addBinding(B, BindingKey::Make(GS, BindingKey::Default), V);
+
+ // Even if there are no bindings in the global scope, we still need to
+ // record that we touched it.
+ if (Invalidated)
+ Invalidated->push_back(GS);
+
+ return B;
+}
+
+StoreRef RegionStoreManager::invalidateRegions(Store store,
+ ArrayRef<const MemRegion *> Regions,
+ const Expr *Ex, unsigned Count,
+ const LocationContext *LCtx,
+ InvalidatedSymbols &IS,
+ const CallOrObjCMessage *Call,
+ InvalidatedRegions *Invalidated) {
+ invalidateRegionsWorker W(*this, StateMgr,
+ RegionStoreManager::GetRegionBindings(store),
+ Ex, Count, LCtx, IS, Invalidated, false);
+
+ // Scan the bindings and generate the clusters.
+ W.GenerateClusters();
+
+ // Add the regions to the worklist.
+ for (ArrayRef<const MemRegion *>::iterator
+ I = Regions.begin(), E = Regions.end(); I != E; ++I)
+ W.AddToWorkList(*I);
+
+ W.RunWorkList();
+
+ // Return the new bindings.
+ RegionBindings B = W.getRegionBindings();
+
+ // For all globals which are not static nor immutable: determine which global
+ // regions should be invalidated and invalidate them.
+ // TODO: This could possibly be more precise with modules.
+ //
+ // System calls invalidate only system globals.
+ if (Call && Call->isInSystemHeader()) {
+ B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
+ Ex, Count, LCtx, B, Invalidated);
+ // Internal calls might invalidate both system and internal globals.
+ } else {
+ B = invalidateGlobalRegion(MemRegion::GlobalSystemSpaceRegionKind,
+ Ex, Count, LCtx, B, Invalidated);
+ B = invalidateGlobalRegion(MemRegion::GlobalInternalSpaceRegionKind,
+ Ex, Count, LCtx, B, Invalidated);
+ }
+
+ return StoreRef(B.getRootWithoutRetain(), *this);
+}
+
+//===----------------------------------------------------------------------===//
+// Extents for regions.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal
+RegionStoreManager::getSizeInElements(ProgramStateRef state,
+ const MemRegion *R,
+ QualType EleTy) {
+ SVal Size = cast<SubRegion>(R)->getExtent(svalBuilder);
+ const llvm::APSInt *SizeInt = svalBuilder.getKnownValue(state, Size);
+ if (!SizeInt)
+ return UnknownVal();
+
+ CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue());
+
+ if (Ctx.getAsVariableArrayType(EleTy)) {
+ // FIXME: We need to track extra state to properly record the size
+ // of VLAs. Returning UnknownVal here, however, is a stop-gap so that
+ // we don't have a divide-by-zero below.
+ return UnknownVal();
+ }
+
+ CharUnits EleSize = Ctx.getTypeSizeInChars(EleTy);
+
+ // If a variable is reinterpreted as a type that doesn't fit into a larger
+ // type evenly, round it down.
+ // This is a signed value, since it's used in arithmetic with signed indices.
+ return svalBuilder.makeIntVal(RegionSize / EleSize, false);
+}
+
+//===----------------------------------------------------------------------===//
+// Location and region casting.
+//===----------------------------------------------------------------------===//
+
+/// ArrayToPointer - Emulates the "decay" of an array to a pointer
+/// type. 'Array' represents the lvalue of the array being decayed
+/// to a pointer, and the returned SVal represents the decayed
+/// version of that lvalue (i.e., a pointer to the first element of
+/// the array). This is called by ExprEngine when evaluating casts
+/// from arrays to pointers.
+SVal RegionStoreManager::ArrayToPointer(Loc Array) {
+ if (!isa<loc::MemRegionVal>(Array))
+ return UnknownVal();
+
+ const MemRegion* R = cast<loc::MemRegionVal>(&Array)->getRegion();
+ const TypedValueRegion* ArrayR = dyn_cast<TypedValueRegion>(R);
+
+ if (!ArrayR)
+ return UnknownVal();
+
+ // Strip off typedefs from the ArrayRegion's ValueType.
+ QualType T = ArrayR->getValueType().getDesugaredType(Ctx);
+ const ArrayType *AT = cast<ArrayType>(T);
+ T = AT->getElementType();
+
+ NonLoc ZeroIdx = svalBuilder.makeZeroArrayIndex();
+ return loc::MemRegionVal(MRMgr.getElementRegion(T, ZeroIdx, ArrayR, Ctx));
+}
+
+SVal RegionStoreManager::evalDerivedToBase(SVal derived, QualType baseType) {
+ const CXXRecordDecl *baseDecl;
+ if (baseType->isPointerType())
+ baseDecl = baseType->getCXXRecordDeclForPointerType();
+ else
+ baseDecl = baseType->getAsCXXRecordDecl();
+
+ assert(baseDecl && "not a CXXRecordDecl?");
+
+ loc::MemRegionVal *derivedRegVal = dyn_cast<loc::MemRegionVal>(&derived);
+ if (!derivedRegVal)
+ return derived;
+
+ const MemRegion *baseReg =
+ MRMgr.getCXXBaseObjectRegion(baseDecl, derivedRegVal->getRegion());
+
+ return loc::MemRegionVal(baseReg);
+}
+
+SVal RegionStoreManager::evalDynamicCast(SVal base, QualType derivedType,
+ bool &Failed) {
+ Failed = false;
+
+ loc::MemRegionVal *baseRegVal = dyn_cast<loc::MemRegionVal>(&base);
+ if (!baseRegVal)
+ return UnknownVal();
+ const MemRegion *BaseRegion = baseRegVal->stripCasts();
+
+ // Assume the derived class is a pointer or a reference to a CXX record.
+ derivedType = derivedType->getPointeeType();
+ assert(!derivedType.isNull());
+ const CXXRecordDecl *DerivedDecl = derivedType->getAsCXXRecordDecl();
+ if (!DerivedDecl && !derivedType->isVoidType())
+ return UnknownVal();
+
+ // Drill down the CXXBaseObject chains, which represent upcasts (casts from
+ // derived to base).
+ const MemRegion *SR = BaseRegion;
+ while (const TypedRegion *TSR = dyn_cast_or_null<TypedRegion>(SR)) {
+ QualType BaseType = TSR->getLocationType()->getPointeeType();
+ assert(!BaseType.isNull());
+ const CXXRecordDecl *SRDecl = BaseType->getAsCXXRecordDecl();
+ if (!SRDecl)
+ return UnknownVal();
+
+ // If found the derived class, the cast succeeds.
+ if (SRDecl == DerivedDecl)
+ return loc::MemRegionVal(TSR);
+
+ // If the region type is a subclass of the derived type.
+ if (!derivedType->isVoidType() && SRDecl->isDerivedFrom(DerivedDecl)) {
+ // This occurs in two cases.
+ // 1) We are processing an upcast.
+ // 2) We are processing a downcast but we jumped directly from the
+ // ancestor to a child of the cast value, so conjure the
+ // appropriate region to represent value (the intermediate node).
+ return loc::MemRegionVal(MRMgr.getCXXBaseObjectRegion(DerivedDecl,
+ BaseRegion));
+ }
+
+ // If super region is not a parent of derived class, the cast definitely
+ // fails.
+ if (!derivedType->isVoidType() &&
+ DerivedDecl->isProvablyNotDerivedFrom(SRDecl)) {
+ Failed = true;
+ return UnknownVal();
+ }
+
+ if (const CXXBaseObjectRegion *R = dyn_cast<CXXBaseObjectRegion>(TSR))
+ // Drill down the chain to get the derived classes.
+ SR = R->getSuperRegion();
+ else {
+ // We reached the bottom of the hierarchy.
+
+ // If this is a cast to void*, return the region.
+ if (derivedType->isVoidType())
+ return loc::MemRegionVal(TSR);
+
+ // We did not find the derived class. We we must be casting the base to
+ // derived, so the cast should fail.
+ Failed = true;
+ return UnknownVal();
+ }
+ }
+
+ return UnknownVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Loading values from regions.
+//===----------------------------------------------------------------------===//
+
+Optional<SVal> RegionStoreManager::getDirectBinding(RegionBindings B,
+ const MemRegion *R) {
+
+ if (const SVal *V = lookup(B, R, BindingKey::Direct))
+ return *V;
+
+ return Optional<SVal>();
+}
+
+Optional<SVal> RegionStoreManager::getDefaultBinding(RegionBindings B,
+ const MemRegion *R) {
+ if (R->isBoundable())
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R))
+ if (TR->getValueType()->isUnionType())
+ return UnknownVal();
+
+ if (const SVal *V = lookup(B, R, BindingKey::Default))
+ return *V;
+
+ return Optional<SVal>();
+}
+
+SVal RegionStoreManager::getBinding(Store store, Loc L, QualType T) {
+ assert(!isa<UnknownVal>(L) && "location unknown");
+ assert(!isa<UndefinedVal>(L) && "location undefined");
+
+ // For access to concrete addresses, return UnknownVal. Checks
+ // for null dereferences (and similar errors) are done by checkers, not
+ // the Store.
+ // FIXME: We can consider lazily symbolicating such memory, but we really
+ // should defer this when we can reason easily about symbolicating arrays
+ // of bytes.
+ if (isa<loc::ConcreteInt>(L)) {
+ return UnknownVal();
+ }
+ if (!isa<loc::MemRegionVal>(L)) {
+ return UnknownVal();
+ }
+
+ const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion();
+
+ if (isa<AllocaRegion>(MR) ||
+ isa<SymbolicRegion>(MR) ||
+ isa<CodeTextRegion>(MR)) {
+ if (T.isNull()) {
+ if (const TypedRegion *TR = dyn_cast<TypedRegion>(MR))
+ T = TR->getLocationType();
+ else {
+ const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
+ T = SR->getSymbol()->getType(Ctx);
+ }
+ }
+ MR = GetElementZeroRegion(MR, T);
+ }
+
+ // FIXME: Perhaps this method should just take a 'const MemRegion*' argument
+ // instead of 'Loc', and have the other Loc cases handled at a higher level.
+ const TypedValueRegion *R = cast<TypedValueRegion>(MR);
+ QualType RTy = R->getValueType();
+
+ // FIXME: We should eventually handle funny addressing. e.g.:
+ //
+ // int x = ...;
+ // int *p = &x;
+ // char *q = (char*) p;
+ // char c = *q; // returns the first byte of 'x'.
+ //
+ // Such funny addressing will occur due to layering of regions.
+
+ if (RTy->isStructureOrClassType())
+ return getBindingForStruct(store, R);
+
+ // FIXME: Handle unions.
+ if (RTy->isUnionType())
+ return UnknownVal();
+
+ if (RTy->isArrayType())
+ return getBindingForArray(store, R);
+
+ // FIXME: handle Vector types.
+ if (RTy->isVectorType())
+ return UnknownVal();
+
+ if (const FieldRegion* FR = dyn_cast<FieldRegion>(R))
+ return CastRetrievedVal(getBindingForField(store, FR), FR, T, false);
+
+ if (const ElementRegion* ER = dyn_cast<ElementRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the element type. Eventually we want to compose these values
+ // more intelligently. For example, an 'element' can encompass multiple
+ // bound regions (e.g., several bound bytes), or could be a subset of
+ // a larger value.
+ return CastRetrievedVal(getBindingForElement(store, ER), ER, T, false);
+ }
+
+ if (const ObjCIvarRegion *IVR = dyn_cast<ObjCIvarRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the ivar type. What we should model is stores to ivars
+ // that blow past the extent of the ivar. If the address of the ivar is
+ // reinterpretted, it is possible we stored a different value that could
+ // fit within the ivar. Either we need to cast these when storing them
+ // or reinterpret them lazily (as we do here).
+ return CastRetrievedVal(getBindingForObjCIvar(store, IVR), IVR, T, false);
+ }
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ // FIXME: Here we actually perform an implicit conversion from the loaded
+ // value to the variable type. What we should model is stores to variables
+ // that blow past the extent of the variable. If the address of the
+ // variable is reinterpretted, it is possible we stored a different value
+ // that could fit within the variable. Either we need to cast these when
+ // storing them or reinterpret them lazily (as we do here).
+ return CastRetrievedVal(getBindingForVar(store, VR), VR, T, false);
+ }
+
+ RegionBindings B = GetRegionBindings(store);
+ const SVal *V = lookup(B, R, BindingKey::Direct);
+
+ // Check if the region has a binding.
+ if (V)
+ return *V;
+
+ // The location does not have a bound value. This means that it has
+ // the value it had upon its creation and/or entry to the analyzed
+ // function/method. These are either symbolic values or 'undefined'.
+ if (R->hasStackNonParametersStorage()) {
+ // All stack variables are considered to have undefined values
+ // upon creation. All heap allocated blocks are considered to
+ // have undefined values as well unless they are explicitly bound
+ // to specific values.
+ return UndefinedVal();
+ }
+
+ // All other values are symbolic.
+ return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+std::pair<Store, const MemRegion *>
+RegionStoreManager::GetLazyBinding(RegionBindings B, const MemRegion *R,
+ const MemRegion *originalRegion) {
+
+ if (originalRegion != R) {
+ if (Optional<SVal> OV = getDefaultBinding(B, R)) {
+ if (const nonloc::LazyCompoundVal *V =
+ dyn_cast<nonloc::LazyCompoundVal>(OV.getPointer()))
+ return std::make_pair(V->getStore(), V->getRegion());
+ }
+ }
+
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ const std::pair<Store, const MemRegion *> &X =
+ GetLazyBinding(B, ER->getSuperRegion(), originalRegion);
+
+ if (X.second)
+ return std::make_pair(X.first,
+ MRMgr.getElementRegionWithSuper(ER, X.second));
+ }
+ else if (const FieldRegion *FR = dyn_cast<FieldRegion>(R)) {
+ const std::pair<Store, const MemRegion *> &X =
+ GetLazyBinding(B, FR->getSuperRegion(), originalRegion);
+
+ if (X.second)
+ return std::make_pair(X.first,
+ MRMgr.getFieldRegionWithSuper(FR, X.second));
+ }
+ // C++ base object region is another kind of region that we should blast
+ // through to look for lazy compound value. It is like a field region.
+ else if (const CXXBaseObjectRegion *baseReg =
+ dyn_cast<CXXBaseObjectRegion>(R)) {
+ const std::pair<Store, const MemRegion *> &X =
+ GetLazyBinding(B, baseReg->getSuperRegion(), originalRegion);
+
+ if (X.second)
+ return std::make_pair(X.first,
+ MRMgr.getCXXBaseObjectRegionWithSuper(baseReg, X.second));
+ }
+
+ // The NULL MemRegion indicates an non-existent lazy binding. A NULL Store is
+ // possible for a valid lazy binding.
+ return std::make_pair((Store) 0, (const MemRegion *) 0);
+}
+
+SVal RegionStoreManager::getBindingForElement(Store store,
+ const ElementRegion* R) {
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(store);
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return *V;
+
+ const MemRegion* superR = R->getSuperRegion();
+
+ // Check if the region is an element region of a string literal.
+ if (const StringRegion *StrR=dyn_cast<StringRegion>(superR)) {
+ // FIXME: Handle loads from strings where the literal is treated as
+ // an integer, e.g., *((unsigned int*)"hello")
+ QualType T = Ctx.getAsArrayType(StrR->getValueType())->getElementType();
+ if (T != Ctx.getCanonicalType(R->getElementType()))
+ return UnknownVal();
+
+ const StringLiteral *Str = StrR->getStringLiteral();
+ SVal Idx = R->getIndex();
+ if (nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Idx)) {
+ int64_t i = CI->getValue().getSExtValue();
+ // Abort on string underrun. This can be possible by arbitrary
+ // clients of getBindingForElement().
+ if (i < 0)
+ return UndefinedVal();
+ int64_t length = Str->getLength();
+ // Technically, only i == length is guaranteed to be null.
+ // However, such overflows should be caught before reaching this point;
+ // the only time such an access would be made is if a string literal was
+ // used to initialize a larger array.
+ char c = (i >= length) ? '\0' : Str->getCodeUnit(i);
+ return svalBuilder.makeIntVal(c, T);
+ }
+ }
+
+ // Check for loads from a code text region. For such loads, just give up.
+ if (isa<CodeTextRegion>(superR))
+ return UnknownVal();
+
+ // Handle the case where we are indexing into a larger scalar object.
+ // For example, this handles:
+ // int x = ...
+ // char *y = &x;
+ // return *y;
+ // FIXME: This is a hack, and doesn't do anything really intelligent yet.
+ const RegionRawOffset &O = R->getAsArrayOffset();
+
+ // If we cannot reason about the offset, return an unknown value.
+ if (!O.getRegion())
+ return UnknownVal();
+
+ if (const TypedValueRegion *baseR =
+ dyn_cast_or_null<TypedValueRegion>(O.getRegion())) {
+ QualType baseT = baseR->getValueType();
+ if (baseT->isScalarType()) {
+ QualType elemT = R->getElementType();
+ if (elemT->isScalarType()) {
+ if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) {
+ if (const Optional<SVal> &V = getDirectBinding(B, superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (V->isUnknownOrUndef())
+ return *V;
+ // Other cases: give up. We are indexing into a larger object
+ // that has some value, but we don't know how to handle that yet.
+ return UnknownVal();
+ }
+ }
+ }
+ }
+ }
+ return getBindingForFieldOrElementCommon(store, R, R->getElementType(),
+ superR);
+}
+
+SVal RegionStoreManager::getBindingForField(Store store,
+ const FieldRegion* R) {
+
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(store);
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return *V;
+
+ QualType Ty = R->getValueType();
+ return getBindingForFieldOrElementCommon(store, R, Ty, R->getSuperRegion());
+}
+
+Optional<SVal>
+RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindings B,
+ const MemRegion *superR,
+ const TypedValueRegion *R,
+ QualType Ty) {
+
+ if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
+ const SVal &val = D.getValue();
+ if (SymbolRef parentSym = val.getAsSymbol())
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (val.isZeroConstant())
+ return svalBuilder.makeZeroVal(Ty);
+
+ if (val.isUnknownOrUndef())
+ return val;
+
+ // Lazy bindings are handled later.
+ if (isa<nonloc::LazyCompoundVal>(val))
+ return Optional<SVal>();
+
+ llvm_unreachable("Unknown default value");
+ }
+
+ return Optional<SVal>();
+}
+
+SVal RegionStoreManager::getLazyBinding(const MemRegion *lazyBindingRegion,
+ Store lazyBindingStore) {
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(lazyBindingRegion))
+ return getBindingForElement(lazyBindingStore, ER);
+
+ return getBindingForField(lazyBindingStore,
+ cast<FieldRegion>(lazyBindingRegion));
+}
+
+SVal RegionStoreManager::getBindingForFieldOrElementCommon(Store store,
+ const TypedValueRegion *R,
+ QualType Ty,
+ const MemRegion *superR) {
+
+ // At this point we have already checked in either getBindingForElement or
+ // getBindingForField if 'R' has a direct binding.
+ RegionBindings B = GetRegionBindings(store);
+
+ // Record whether or not we see a symbolic index. That can completely
+ // be out of scope of our lookup.
+ bool hasSymbolicIndex = false;
+
+ while (superR) {
+ if (const Optional<SVal> &D =
+ getBindingForDerivedDefaultValue(B, superR, R, Ty))
+ return *D;
+
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(superR)) {
+ NonLoc index = ER->getIndex();
+ if (!index.isConstant())
+ hasSymbolicIndex = true;
+ }
+
+ // If our super region is a field or element itself, walk up the region
+ // hierarchy to see if there is a default value installed in an ancestor.
+ if (const SubRegion *SR = dyn_cast<SubRegion>(superR)) {
+ superR = SR->getSuperRegion();
+ continue;
+ }
+ break;
+ }
+
+ // Lazy binding?
+ Store lazyBindingStore = NULL;
+ const MemRegion *lazyBindingRegion = NULL;
+ llvm::tie(lazyBindingStore, lazyBindingRegion) = GetLazyBinding(B, R, R);
+
+ if (lazyBindingRegion)
+ return getLazyBinding(lazyBindingRegion, lazyBindingStore);
+
+ if (R->hasStackNonParametersStorage()) {
+ if (isa<ElementRegion>(R)) {
+ // Currently we don't reason specially about Clang-style vectors. Check
+ // if superR is a vector and if so return Unknown.
+ if (const TypedValueRegion *typedSuperR =
+ dyn_cast<TypedValueRegion>(superR)) {
+ if (typedSuperR->getValueType()->isVectorType())
+ return UnknownVal();
+ }
+ }
+
+ // FIXME: We also need to take ElementRegions with symbolic indexes into
+ // account. This case handles both directly accessing an ElementRegion
+ // with a symbolic offset, but also fields within an element with
+ // a symbolic offset.
+ if (hasSymbolicIndex)
+ return UnknownVal();
+
+ return UndefinedVal();
+ }
+
+ // All other values are symbolic.
+ return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+SVal RegionStoreManager::getBindingForObjCIvar(Store store,
+ const ObjCIvarRegion* R) {
+
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(store);
+
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return *V;
+
+ const MemRegion *superR = R->getSuperRegion();
+
+ // Check if the super region has a default binding.
+ if (const Optional<SVal> &V = getDefaultBinding(B, superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ // Other cases: give up.
+ return UnknownVal();
+ }
+
+ return getBindingForLazySymbol(R);
+}
+
+SVal RegionStoreManager::getBindingForVar(Store store, const VarRegion *R) {
+
+ // Check if the region has a binding.
+ RegionBindings B = GetRegionBindings(store);
+
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
+ return *V;
+
+ // Lazily derive a value for the VarRegion.
+ const VarDecl *VD = R->getDecl();
+ QualType T = VD->getType();
+ const MemSpaceRegion *MS = R->getMemorySpace();
+
+ if (isa<UnknownSpaceRegion>(MS) ||
+ isa<StackArgumentsSpaceRegion>(MS))
+ return svalBuilder.getRegionValueSymbolVal(R);
+
+ if (isa<GlobalsSpaceRegion>(MS)) {
+ if (isa<NonStaticGlobalSpaceRegion>(MS)) {
+ // Is 'VD' declared constant? If so, retrieve the constant value.
+ QualType CT = Ctx.getCanonicalType(T);
+ if (CT.isConstQualified()) {
+ const Expr *Init = VD->getInit();
+ // Do the null check first, as we want to call 'IgnoreParenCasts'.
+ if (Init)
+ if (const IntegerLiteral *IL =
+ dyn_cast<IntegerLiteral>(Init->IgnoreParenCasts())) {
+ const nonloc::ConcreteInt &V = svalBuilder.makeIntVal(IL);
+ return svalBuilder.evalCast(V, Init->getType(), IL->getType());
+ }
+ }
+
+ if (const Optional<SVal> &V
+ = getBindingForDerivedDefaultValue(B, MS, R, CT))
+ return V.getValue();
+
+ return svalBuilder.getRegionValueSymbolVal(R);
+ }
+
+ if (T->isIntegerType())
+ return svalBuilder.makeIntVal(0, T);
+ if (T->isPointerType())
+ return svalBuilder.makeNull();
+
+ return UnknownVal();
+ }
+
+ return UndefinedVal();
+}
+
+SVal RegionStoreManager::getBindingForLazySymbol(const TypedValueRegion *R) {
+ // All other values are symbolic.
+ return svalBuilder.getRegionValueSymbolVal(R);
+}
+
+SVal RegionStoreManager::getBindingForStruct(Store store,
+ const TypedValueRegion* R) {
+ assert(R->getValueType()->isStructureOrClassType());
+ return svalBuilder.makeLazyCompoundVal(StoreRef(store, *this), R);
+}
+
+SVal RegionStoreManager::getBindingForArray(Store store,
+ const TypedValueRegion * R) {
+ assert(Ctx.getAsConstantArrayType(R->getValueType()));
+ return svalBuilder.makeLazyCompoundVal(StoreRef(store, *this), R);
+}
+
+bool RegionStoreManager::includedInBindings(Store store,
+ const MemRegion *region) const {
+ RegionBindings B = GetRegionBindings(store);
+ region = region->getBaseRegion();
+
+ for (RegionBindings::iterator it = B.begin(), ei = B.end(); it != ei; ++it) {
+ const BindingKey &K = it.getKey();
+ if (region == K.getRegion())
+ return true;
+ const SVal &D = it.getData();
+ if (const MemRegion *r = D.getAsRegion())
+ if (r == region)
+ return true;
+ }
+ return false;
+}
+
+//===----------------------------------------------------------------------===//
+// Binding values to regions.
+//===----------------------------------------------------------------------===//
+
+StoreRef RegionStoreManager::Remove(Store store, Loc L) {
+ if (isa<loc::MemRegionVal>(L))
+ if (const MemRegion* R = cast<loc::MemRegionVal>(L).getRegion())
+ return StoreRef(removeBinding(GetRegionBindings(store),
+ R).getRootWithoutRetain(),
+ *this);
+
+ return StoreRef(store, *this);
+}
+
+StoreRef RegionStoreManager::Bind(Store store, Loc L, SVal V) {
+ if (isa<loc::ConcreteInt>(L))
+ return StoreRef(store, *this);
+
+ // If we get here, the location should be a region.
+ const MemRegion *R = cast<loc::MemRegionVal>(L).getRegion();
+
+ // Check if the region is a struct region.
+ if (const TypedValueRegion* TR = dyn_cast<TypedValueRegion>(R))
+ if (TR->getValueType()->isStructureOrClassType())
+ return BindStruct(store, TR, V);
+
+ if (const ElementRegion *ER = dyn_cast<ElementRegion>(R)) {
+ if (ER->getIndex().isZeroConstant()) {
+ if (const TypedValueRegion *superR =
+ dyn_cast<TypedValueRegion>(ER->getSuperRegion())) {
+ QualType superTy = superR->getValueType();
+ // For now, just invalidate the fields of the struct/union/class.
+ // This is for test rdar_test_7185607 in misc-ps-region-store.m.
+ // FIXME: Precisely handle the fields of the record.
+ if (superTy->isStructureOrClassType())
+ return KillStruct(store, superR, UnknownVal());
+ }
+ }
+ }
+ else if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ // Binding directly to a symbolic region should be treated as binding
+ // to element 0.
+ QualType T = SR->getSymbol()->getType(Ctx);
+
+ // FIXME: Is this the right way to handle symbols that are references?
+ if (const PointerType *PT = T->getAs<PointerType>())
+ T = PT->getPointeeType();
+ else
+ T = T->getAs<ReferenceType>()->getPointeeType();
+
+ R = GetElementZeroRegion(SR, T);
+ }
+
+ // Perform the binding.
+ RegionBindings B = GetRegionBindings(store);
+ return StoreRef(addBinding(B, R, BindingKey::Direct,
+ V).getRootWithoutRetain(), *this);
+}
+
+StoreRef RegionStoreManager::BindDecl(Store store, const VarRegion *VR,
+ SVal InitVal) {
+
+ QualType T = VR->getDecl()->getType();
+
+ if (T->isArrayType())
+ return BindArray(store, VR, InitVal);
+ if (T->isStructureOrClassType())
+ return BindStruct(store, VR, InitVal);
+
+ return Bind(store, svalBuilder.makeLoc(VR), InitVal);
+}
+
+// FIXME: this method should be merged into Bind().
+StoreRef RegionStoreManager::BindCompoundLiteral(Store store,
+ const CompoundLiteralExpr *CL,
+ const LocationContext *LC,
+ SVal V) {
+ return Bind(store, loc::MemRegionVal(MRMgr.getCompoundLiteralRegion(CL, LC)),
+ V);
+}
+
+StoreRef RegionStoreManager::setImplicitDefaultValue(Store store,
+ const MemRegion *R,
+ QualType T) {
+ RegionBindings B = GetRegionBindings(store);
+ SVal V;
+
+ if (Loc::isLocType(T))
+ V = svalBuilder.makeNull();
+ else if (T->isIntegerType())
+ V = svalBuilder.makeZeroVal(T);
+ else if (T->isStructureOrClassType() || T->isArrayType()) {
+ // Set the default value to a zero constant when it is a structure
+ // or array. The type doesn't really matter.
+ V = svalBuilder.makeZeroVal(Ctx.IntTy);
+ }
+ else {
+ // We can't represent values of this type, but we still need to set a value
+ // to record that the region has been initialized.
+ // If this assertion ever fires, a new case should be added above -- we
+ // should know how to default-initialize any value we can symbolicate.
+ assert(!SymbolManager::canSymbolicate(T) && "This type is representable");
+ V = UnknownVal();
+ }
+
+ return StoreRef(addBinding(B, R, BindingKey::Default,
+ V).getRootWithoutRetain(), *this);
+}
+
+StoreRef RegionStoreManager::BindArray(Store store, const TypedValueRegion* R,
+ SVal Init) {
+
+ const ArrayType *AT =cast<ArrayType>(Ctx.getCanonicalType(R->getValueType()));
+ QualType ElementTy = AT->getElementType();
+ Optional<uint64_t> Size;
+
+ if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(AT))
+ Size = CAT->getSize().getZExtValue();
+
+ // Check if the init expr is a string literal.
+ if (loc::MemRegionVal *MRV = dyn_cast<loc::MemRegionVal>(&Init)) {
+ const StringRegion *S = cast<StringRegion>(MRV->getRegion());
+
+ // Treat the string as a lazy compound value.
+ nonloc::LazyCompoundVal LCV =
+ cast<nonloc::LazyCompoundVal>(svalBuilder.
+ makeLazyCompoundVal(StoreRef(store, *this), S));
+ return CopyLazyBindings(LCV, store, R);
+ }
+
+ // Handle lazy compound values.
+ if (nonloc::LazyCompoundVal *LCV = dyn_cast<nonloc::LazyCompoundVal>(&Init))
+ return CopyLazyBindings(*LCV, store, R);
+
+ // Remaining case: explicit compound values.
+
+ if (Init.isUnknown())
+ return setImplicitDefaultValue(store, R, ElementTy);
+
+ nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(Init);
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+ uint64_t i = 0;
+
+ StoreRef newStore(store, *this);
+ for (; Size.hasValue() ? i < Size.getValue() : true ; ++i, ++VI) {
+ // The init list might be shorter than the array length.
+ if (VI == VE)
+ break;
+
+ const NonLoc &Idx = svalBuilder.makeArrayIndex(i);
+ const ElementRegion *ER = MRMgr.getElementRegion(ElementTy, Idx, R, Ctx);
+
+ if (ElementTy->isStructureOrClassType())
+ newStore = BindStruct(newStore.getStore(), ER, *VI);
+ else if (ElementTy->isArrayType())
+ newStore = BindArray(newStore.getStore(), ER, *VI);
+ else
+ newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(ER), *VI);
+ }
+
+ // If the init list is shorter than the array length, set the
+ // array default value.
+ if (Size.hasValue() && i < Size.getValue())
+ newStore = setImplicitDefaultValue(newStore.getStore(), R, ElementTy);
+
+ return newStore;
+}
+
+StoreRef RegionStoreManager::BindStruct(Store store, const TypedValueRegion* R,
+ SVal V) {
+
+ if (!Features.supportsFields())
+ return StoreRef(store, *this);
+
+ QualType T = R->getValueType();
+ assert(T->isStructureOrClassType());
+
+ const RecordType* RT = T->getAs<RecordType>();
+ RecordDecl *RD = RT->getDecl();
+
+ if (!RD->isCompleteDefinition())
+ return StoreRef(store, *this);
+
+ // Handle lazy compound values.
+ if (const nonloc::LazyCompoundVal *LCV=dyn_cast<nonloc::LazyCompoundVal>(&V))
+ return CopyLazyBindings(*LCV, store, R);
+
+ // We may get non-CompoundVal accidentally due to imprecise cast logic or
+ // that we are binding symbolic struct value. Kill the field values, and if
+ // the value is symbolic go and bind it as a "default" binding.
+ if (V.isUnknown() || !isa<nonloc::CompoundVal>(V)) {
+ SVal SV = isa<nonloc::SymbolVal>(V) ? V : UnknownVal();
+ return KillStruct(store, R, SV);
+ }
+
+ nonloc::CompoundVal& CV = cast<nonloc::CompoundVal>(V);
+ nonloc::CompoundVal::iterator VI = CV.begin(), VE = CV.end();
+
+ RecordDecl::field_iterator FI, FE;
+ StoreRef newStore(store, *this);
+
+ for (FI = RD->field_begin(), FE = RD->field_end(); FI != FE; ++FI) {
+
+ if (VI == VE)
+ break;
+
+ // Skip any unnamed bitfields to stay in sync with the initializers.
+ if ((*FI)->isUnnamedBitfield())
+ continue;
+
+ QualType FTy = (*FI)->getType();
+ const FieldRegion* FR = MRMgr.getFieldRegion(*FI, R);
+
+ if (FTy->isArrayType())
+ newStore = BindArray(newStore.getStore(), FR, *VI);
+ else if (FTy->isStructureOrClassType())
+ newStore = BindStruct(newStore.getStore(), FR, *VI);
+ else
+ newStore = Bind(newStore.getStore(), svalBuilder.makeLoc(FR), *VI);
+ ++VI;
+ }
+
+ // There may be fewer values in the initialize list than the fields of struct.
+ if (FI != FE) {
+ RegionBindings B = GetRegionBindings(newStore.getStore());
+ B = addBinding(B, R, BindingKey::Default, svalBuilder.makeIntVal(0, false));
+ newStore = StoreRef(B.getRootWithoutRetain(), *this);
+ }
+
+ return newStore;
+}
+
+StoreRef RegionStoreManager::KillStruct(Store store, const TypedRegion* R,
+ SVal DefaultVal) {
+ BindingKey key = BindingKey::Make(R, BindingKey::Default);
+
+ // The BindingKey may be "invalid" if we cannot handle the region binding
+ // explicitly. One example is something like array[index], where index
+ // is a symbolic value. In such cases, we want to invalidate the entire
+ // array, as the index assignment could have been to any element. In
+ // the case of nested symbolic indices, we need to march up the region
+ // hierarchy untile we reach a region whose binding we can reason about.
+ const SubRegion *subReg = R;
+
+ while (!key.isValid()) {
+ if (const SubRegion *tmp = dyn_cast<SubRegion>(subReg->getSuperRegion())) {
+ subReg = tmp;
+ key = BindingKey::Make(tmp, BindingKey::Default);
+ }
+ else
+ break;
+ }
+
+ // Remove the old bindings, using 'subReg' as the root of all regions
+ // we will invalidate.
+ RegionBindings B = GetRegionBindings(store);
+ OwningPtr<RegionStoreSubRegionMap>
+ SubRegions(getRegionStoreSubRegionMap(store));
+ RemoveSubRegionBindings(B, subReg, *SubRegions);
+
+ // Set the default value of the struct region to "unknown".
+ if (!key.isValid())
+ return StoreRef(B.getRootWithoutRetain(), *this);
+
+ return StoreRef(addBinding(B, key, DefaultVal).getRootWithoutRetain(), *this);
+}
+
+StoreRef RegionStoreManager::CopyLazyBindings(nonloc::LazyCompoundVal V,
+ Store store,
+ const TypedRegion *R) {
+
+ // Nuke the old bindings stemming from R.
+ RegionBindings B = GetRegionBindings(store);
+
+ OwningPtr<RegionStoreSubRegionMap>
+ SubRegions(getRegionStoreSubRegionMap(store));
+
+ // B and DVM are updated after the call to RemoveSubRegionBindings.
+ RemoveSubRegionBindings(B, R, *SubRegions.get());
+
+ // Now copy the bindings. This amounts to just binding 'V' to 'R'. This
+ // results in a zero-copy algorithm.
+ return StoreRef(addBinding(B, R, BindingKey::Default,
+ V).getRootWithoutRetain(), *this);
+}
+
+//===----------------------------------------------------------------------===//
+// "Raw" retrievals and bindings.
+//===----------------------------------------------------------------------===//
+
+
+RegionBindings RegionStoreManager::addBinding(RegionBindings B, BindingKey K,
+ SVal V) {
+ if (!K.isValid())
+ return B;
+ return RBFactory.add(B, K, V);
+}
+
+RegionBindings RegionStoreManager::addBinding(RegionBindings B,
+ const MemRegion *R,
+ BindingKey::Kind k, SVal V) {
+ return addBinding(B, BindingKey::Make(R, k), V);
+}
+
+const SVal *RegionStoreManager::lookup(RegionBindings B, BindingKey K) {
+ if (!K.isValid())
+ return NULL;
+ return B.lookup(K);
+}
+
+const SVal *RegionStoreManager::lookup(RegionBindings B,
+ const MemRegion *R,
+ BindingKey::Kind k) {
+ return lookup(B, BindingKey::Make(R, k));
+}
+
+RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
+ BindingKey K) {
+ if (!K.isValid())
+ return B;
+ return RBFactory.remove(B, K);
+}
+
+RegionBindings RegionStoreManager::removeBinding(RegionBindings B,
+ const MemRegion *R,
+ BindingKey::Kind k){
+ return removeBinding(B, BindingKey::Make(R, k));
+}
+
+//===----------------------------------------------------------------------===//
+// State pruning.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class removeDeadBindingsWorker :
+ public ClusterAnalysis<removeDeadBindingsWorker> {
+ SmallVector<const SymbolicRegion*, 12> Postponed;
+ SymbolReaper &SymReaper;
+ const StackFrameContext *CurrentLCtx;
+
+public:
+ removeDeadBindingsWorker(RegionStoreManager &rm,
+ ProgramStateManager &stateMgr,
+ RegionBindings b, SymbolReaper &symReaper,
+ const StackFrameContext *LCtx)
+ : ClusterAnalysis<removeDeadBindingsWorker>(rm, stateMgr, b,
+ /* includeGlobals = */ false),
+ SymReaper(symReaper), CurrentLCtx(LCtx) {}
+
+ // Called by ClusterAnalysis.
+ void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C);
+ void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
+
+ void VisitBindingKey(BindingKey K);
+ bool UpdatePostponed();
+ void VisitBinding(SVal V);
+};
+}
+
+void removeDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
+ RegionCluster &C) {
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
+ if (SymReaper.isLive(VR))
+ AddToWorkList(baseR, C);
+
+ return;
+ }
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR)) {
+ if (SymReaper.isLive(SR->getSymbol()))
+ AddToWorkList(SR, C);
+ else
+ Postponed.push_back(SR);
+
+ return;
+ }
+
+ if (isa<NonStaticGlobalSpaceRegion>(baseR)) {
+ AddToWorkList(baseR, C);
+ return;
+ }
+
+ // CXXThisRegion in the current or parent location context is live.
+ if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) {
+ const StackArgumentsSpaceRegion *StackReg =
+ cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
+ const StackFrameContext *RegCtx = StackReg->getStackFrame();
+ if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx))
+ AddToWorkList(TR, C);
+ }
+}
+
+void removeDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
+ BindingKey *I, BindingKey *E) {
+ for ( ; I != E; ++I)
+ VisitBindingKey(*I);
+}
+
+void removeDeadBindingsWorker::VisitBinding(SVal V) {
+ // Is it a LazyCompoundVal? All referenced regions are live as well.
+ if (const nonloc::LazyCompoundVal *LCS =
+ dyn_cast<nonloc::LazyCompoundVal>(&V)) {
+
+ const MemRegion *LazyR = LCS->getRegion();
+ RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
+ for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
+ const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion());
+ if (baseR && baseR->isSubRegionOf(LazyR))
+ VisitBinding(RI.getData());
+ }
+ return;
+ }
+
+ // If V is a region, then add it to the worklist.
+ if (const MemRegion *R = V.getAsRegion())
+ AddToWorkList(R);
+
+ // Update the set of live symbols.
+ for (SymExpr::symbol_iterator SI = V.symbol_begin(), SE = V.symbol_end();
+ SI!=SE; ++SI)
+ SymReaper.markLive(*SI);
+}
+
+void removeDeadBindingsWorker::VisitBindingKey(BindingKey K) {
+ const MemRegion *R = K.getRegion();
+
+ // Mark this region "live" by adding it to the worklist. This will cause
+ // use to visit all regions in the cluster (if we haven't visited them
+ // already).
+ if (AddToWorkList(R)) {
+ // Mark the symbol for any live SymbolicRegion as "live". This means we
+ // should continue to track that symbol.
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
+ SymReaper.markLive(SymR->getSymbol());
+
+ // For BlockDataRegions, enqueue the VarRegions for variables marked
+ // with __block (passed-by-reference).
+ // via BlockDeclRefExprs.
+ if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(R)) {
+ for (BlockDataRegion::referenced_vars_iterator
+ RI = BD->referenced_vars_begin(), RE = BD->referenced_vars_end();
+ RI != RE; ++RI) {
+ if ((*RI)->getDecl()->getAttr<BlocksAttr>())
+ AddToWorkList(*RI);
+ }
+
+ // No possible data bindings on a BlockDataRegion.
+ return;
+ }
+ }
+
+ // Visit the data binding for K.
+ if (const SVal *V = RM.lookup(B, K))
+ VisitBinding(*V);
+}
+
+bool removeDeadBindingsWorker::UpdatePostponed() {
+ // See if any postponed SymbolicRegions are actually live now, after
+ // having done a scan.
+ bool changed = false;
+
+ for (SmallVectorImpl<const SymbolicRegion*>::iterator
+ I = Postponed.begin(), E = Postponed.end() ; I != E ; ++I) {
+ if (const SymbolicRegion *SR = cast_or_null<SymbolicRegion>(*I)) {
+ if (SymReaper.isLive(SR->getSymbol())) {
+ changed |= AddToWorkList(SR);
+ *I = NULL;
+ }
+ }
+ }
+
+ return changed;
+}
+
+StoreRef RegionStoreManager::removeDeadBindings(Store store,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper) {
+ RegionBindings B = GetRegionBindings(store);
+ removeDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
+ W.GenerateClusters();
+
+ // Enqueue the region roots onto the worklist.
+ for (SymbolReaper::region_iterator I = SymReaper.region_begin(),
+ E = SymReaper.region_end(); I != E; ++I) {
+ W.AddToWorkList(*I);
+ }
+
+ do W.RunWorkList(); while (W.UpdatePostponed());
+
+ // We have now scanned the store, marking reachable regions and symbols
+ // as live. We now remove all the regions that are dead from the store
+ // as well as update DSymbols with the set symbols that are now dead.
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I) {
+ const BindingKey &K = I.getKey();
+
+ // If the cluster has been visited, we know the region has been marked.
+ if (W.isVisited(K.getRegion()))
+ continue;
+
+ // Remove the dead entry.
+ B = removeBinding(B, K);
+
+ // Mark all non-live symbols that this binding references as dead.
+ if (const SymbolicRegion* SymR = dyn_cast<SymbolicRegion>(K.getRegion()))
+ SymReaper.maybeDead(SymR->getSymbol());
+
+ SVal X = I.getData();
+ SymExpr::symbol_iterator SI = X.symbol_begin(), SE = X.symbol_end();
+ for (; SI != SE; ++SI)
+ SymReaper.maybeDead(*SI);
+ }
+
+ return StoreRef(B.getRootWithoutRetain(), *this);
+}
+
+
+StoreRef RegionStoreManager::enterStackFrame(ProgramStateRef state,
+ const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx)
+{
+ FunctionDecl const *FD = cast<FunctionDecl>(calleeCtx->getDecl());
+ FunctionDecl::param_const_iterator PI = FD->param_begin(),
+ PE = FD->param_end();
+ StoreRef store = StoreRef(state->getStore(), *this);
+
+ if (CallExpr const *CE = dyn_cast<CallExpr>(calleeCtx->getCallSite())) {
+ CallExpr::const_arg_iterator AI = CE->arg_begin(), AE = CE->arg_end();
+
+ // Copy the arg expression value to the arg variables. We check that
+ // PI != PE because the actual number of arguments may be different than
+ // the function declaration.
+ for (; AI != AE && PI != PE; ++AI, ++PI) {
+ SVal ArgVal = state->getSVal(*AI, callerCtx);
+ store = Bind(store.getStore(),
+ svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, calleeCtx)),
+ ArgVal);
+ }
+ } else if (const CXXConstructExpr *CE =
+ dyn_cast<CXXConstructExpr>(calleeCtx->getCallSite())) {
+ CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(),
+ AE = CE->arg_end();
+
+ // Copy the arg expression value to the arg variables.
+ for (; AI != AE; ++AI, ++PI) {
+ SVal ArgVal = state->getSVal(*AI, callerCtx);
+ store = Bind(store.getStore(),
+ svalBuilder.makeLoc(MRMgr.getVarRegion(*PI, calleeCtx)),
+ ArgVal);
+ }
+ } else
+ assert(isa<CXXDestructorDecl>(calleeCtx->getDecl()));
+
+ return store;
+}
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+void RegionStoreManager::print(Store store, raw_ostream &OS,
+ const char* nl, const char *sep) {
+ RegionBindings B = GetRegionBindings(store);
+ OS << "Store (direct and default bindings):" << nl;
+
+ for (RegionBindings::iterator I = B.begin(), E = B.end(); I != E; ++I)
+ OS << ' ' << I.getKey() << " : " << I.getData() << nl;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
new file mode 100644
index 0000000..9e97f5e
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -0,0 +1,386 @@
+// SValBuilder.cpp - Basic class for all SValBuilder implementations -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SValBuilder, the base class for all (complete) SValBuilder
+// implementations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ExprCXX.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/BasicValueFactory.h"
+
+using namespace clang;
+using namespace ento;
+
+//===----------------------------------------------------------------------===//
+// Basic SVal creation.
+//===----------------------------------------------------------------------===//
+
+void SValBuilder::anchor() { }
+
+DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
+ if (Loc::isLocType(type))
+ return makeNull();
+
+ if (type->isIntegerType())
+ return makeIntVal(0, type);
+
+ // FIXME: Handle floats.
+ // FIXME: Handle structs.
+ return UnknownVal();
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const llvm::APSInt& rhs, QualType type) {
+ // The Environment ensures we always get a persistent APSInt in
+ // BasicValueFactory, so we don't need to get the APSInt from
+ // BasicValueFactory again.
+ assert(lhs);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getSymIntExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op, const SymExpr *rhs,
+ QualType type) {
+ assert(rhs);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getIntSymExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType type) {
+ assert(lhs && rhs);
+ assert(haveSameType(lhs->getType(Context), rhs->getType(Context)) == true);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getSymSymExpr(lhs, op, rhs, type));
+}
+
+NonLoc SValBuilder::makeNonLoc(const SymExpr *operand,
+ QualType fromTy, QualType toTy) {
+ assert(operand);
+ assert(!Loc::isLocType(toTy));
+ return nonloc::SymbolVal(SymMgr.getCastSymbol(operand, fromTy, toTy));
+}
+
+SVal SValBuilder::convertToArrayIndex(SVal val) {
+ if (val.isUnknownOrUndef())
+ return val;
+
+ // Common case: we have an appropriately sized integer.
+ if (nonloc::ConcreteInt* CI = dyn_cast<nonloc::ConcreteInt>(&val)) {
+ const llvm::APSInt& I = CI->getValue();
+ if (I.getBitWidth() == ArrayIndexWidth && I.isSigned())
+ return val;
+ }
+
+ return evalCastFromNonLoc(cast<NonLoc>(val), ArrayIndexTy);
+}
+
+nonloc::ConcreteInt SValBuilder::makeBoolVal(const CXXBoolLiteralExpr *boolean){
+ return makeTruthVal(boolean->getValue());
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getRegionValueSymbolVal(const TypedValueRegion* region) {
+ QualType T = region->getValueType();
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getRegionValueSymbol(region);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getConjuredSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ unsigned count) {
+ QualType T = expr->getType();
+ return getConjuredSymbolVal(symbolTag, expr, LCtx, T, count);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getConjuredSymbolVal(const void *symbolTag,
+ const Expr *expr,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned count) {
+ if (!SymbolManager::canSymbolicate(type))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(expr, LCtx, type, count, symbolTag);
+
+ if (Loc::isLocType(type))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+
+DefinedOrUnknownSVal
+SValBuilder::getConjuredSymbolVal(const Stmt *stmt,
+ const LocationContext *LCtx,
+ QualType type,
+ unsigned visitCount) {
+ if (!SymbolManager::canSymbolicate(type))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getConjuredSymbol(stmt, LCtx, type, visitCount);
+
+ if (Loc::isLocType(type))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedSVal SValBuilder::getMetadataSymbolVal(const void *symbolTag,
+ const MemRegion *region,
+ const Expr *expr, QualType type,
+ unsigned count) {
+ assert(SymbolManager::canSymbolicate(type) && "Invalid metadata symbol type");
+
+ SymbolRef sym =
+ SymMgr.getMetadataSymbol(region, expr, type, count, symbolTag);
+
+ if (Loc::isLocType(type))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedOrUnknownSVal
+SValBuilder::getDerivedRegionValueSymbolVal(SymbolRef parentSymbol,
+ const TypedValueRegion *region) {
+ QualType T = region->getValueType();
+
+ if (!SymbolManager::canSymbolicate(T))
+ return UnknownVal();
+
+ SymbolRef sym = SymMgr.getDerivedSymbol(parentSymbol, region);
+
+ if (Loc::isLocType(T))
+ return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
+
+ return nonloc::SymbolVal(sym);
+}
+
+DefinedSVal SValBuilder::getFunctionPointer(const FunctionDecl *func) {
+ return loc::MemRegionVal(MemMgr.getFunctionTextRegion(func));
+}
+
+DefinedSVal SValBuilder::getBlockPointer(const BlockDecl *block,
+ CanQualType locTy,
+ const LocationContext *locContext) {
+ const BlockTextRegion *BC =
+ MemMgr.getBlockTextRegion(block, locTy, locContext->getAnalysisDeclContext());
+ const BlockDataRegion *BD = MemMgr.getBlockDataRegion(BC, locContext);
+ return loc::MemRegionVal(BD);
+}
+
+//===----------------------------------------------------------------------===//
+
+SVal SValBuilder::makeGenericVal(ProgramStateRef State,
+ BinaryOperator::Opcode Op,
+ NonLoc LHS, NonLoc RHS,
+ QualType ResultTy) {
+ // If operands are tainted, create a symbol to ensure that we propagate taint.
+ if (State->isTainted(RHS) || State->isTainted(LHS)) {
+ const SymExpr *symLHS;
+ const SymExpr *symRHS;
+
+ if (const nonloc::ConcreteInt *rInt = dyn_cast<nonloc::ConcreteInt>(&RHS)) {
+ symLHS = LHS.getAsSymExpr();
+ return makeNonLoc(symLHS, Op, rInt->getValue(), ResultTy);
+ }
+
+ if (const nonloc::ConcreteInt *lInt = dyn_cast<nonloc::ConcreteInt>(&LHS)) {
+ symRHS = RHS.getAsSymExpr();
+ return makeNonLoc(lInt->getValue(), Op, symRHS, ResultTy);
+ }
+
+ symLHS = LHS.getAsSymExpr();
+ symRHS = RHS.getAsSymExpr();
+ return makeNonLoc(symLHS, Op, symRHS, ResultTy);
+ }
+ return UnknownVal();
+}
+
+
+SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
+ SVal lhs, SVal rhs, QualType type) {
+
+ if (lhs.isUndef() || rhs.isUndef())
+ return UndefinedVal();
+
+ if (lhs.isUnknown() || rhs.isUnknown())
+ return UnknownVal();
+
+ if (isa<Loc>(lhs)) {
+ if (isa<Loc>(rhs))
+ return evalBinOpLL(state, op, cast<Loc>(lhs), cast<Loc>(rhs), type);
+
+ return evalBinOpLN(state, op, cast<Loc>(lhs), cast<NonLoc>(rhs), type);
+ }
+
+ if (isa<Loc>(rhs)) {
+ // Support pointer arithmetic where the addend is on the left
+ // and the pointer on the right.
+ assert(op == BO_Add);
+
+ // Commute the operands.
+ return evalBinOpLN(state, op, cast<Loc>(rhs), cast<NonLoc>(lhs), type);
+ }
+
+ return evalBinOpNN(state, op, cast<NonLoc>(lhs), cast<NonLoc>(rhs), type);
+}
+
+DefinedOrUnknownSVal SValBuilder::evalEQ(ProgramStateRef state,
+ DefinedOrUnknownSVal lhs,
+ DefinedOrUnknownSVal rhs) {
+ return cast<DefinedOrUnknownSVal>(evalBinOp(state, BO_EQ, lhs, rhs,
+ Context.IntTy));
+}
+
+/// Recursively check if the pointer types are equal modulo const, volatile,
+/// and restrict qualifiers. Assumes the input types are canonical.
+/// TODO: This is based off of code in SemaCast; can we reuse it.
+static bool haveSimilarTypes(ASTContext &Context, QualType T1,
+ QualType T2) {
+ while (Context.UnwrapSimilarPointerTypes(T1, T2)) {
+ Qualifiers Quals1, Quals2;
+ T1 = Context.getUnqualifiedArrayType(T1, Quals1);
+ T2 = Context.getUnqualifiedArrayType(T2, Quals2);
+
+ // Make sure that non cvr-qualifiers the other qualifiers (e.g., address
+ // spaces) are identical.
+ Quals1.removeCVRQualifiers();
+ Quals2.removeCVRQualifiers();
+ if (Quals1 != Quals2)
+ return false;
+ }
+
+ if (T1 != T2)
+ return false;
+
+ return true;
+}
+
+// FIXME: should rewrite according to the cast kind.
+SVal SValBuilder::evalCast(SVal val, QualType castTy, QualType originalTy) {
+ castTy = Context.getCanonicalType(castTy);
+ originalTy = Context.getCanonicalType(originalTy);
+ if (val.isUnknownOrUndef() || castTy == originalTy)
+ return val;
+
+ // For const casts, just propagate the value.
+ if (!castTy->isVariableArrayType() && !originalTy->isVariableArrayType())
+ if (haveSimilarTypes(Context, Context.getPointerType(castTy),
+ Context.getPointerType(originalTy)))
+ return val;
+
+ // Check for casts from pointers to integers.
+ if (castTy->isIntegerType() && Loc::isLocType(originalTy))
+ return evalCastFromLoc(cast<Loc>(val), castTy);
+
+ // Check for casts from integers to pointers.
+ if (Loc::isLocType(castTy) && originalTy->isIntegerType()) {
+ if (nonloc::LocAsInteger *LV = dyn_cast<nonloc::LocAsInteger>(&val)) {
+ if (const MemRegion *R = LV->getLoc().getAsRegion()) {
+ StoreManager &storeMgr = StateMgr.getStoreManager();
+ R = storeMgr.castRegion(R, castTy);
+ return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+ }
+ return LV->getLoc();
+ }
+ return dispatchCast(val, castTy);
+ }
+
+ // Just pass through function and block pointers.
+ if (originalTy->isBlockPointerType() || originalTy->isFunctionPointerType()) {
+ assert(Loc::isLocType(castTy));
+ return val;
+ }
+
+ // Check for casts from array type to another type.
+ if (originalTy->isArrayType()) {
+ // We will always decay to a pointer.
+ val = StateMgr.ArrayToPointer(cast<Loc>(val));
+
+ // Are we casting from an array to a pointer? If so just pass on
+ // the decayed value.
+ if (castTy->isPointerType())
+ return val;
+
+ // Are we casting from an array to an integer? If so, cast the decayed
+ // pointer value to an integer.
+ assert(castTy->isIntegerType());
+
+ // FIXME: Keep these here for now in case we decide soon that we
+ // need the original decayed type.
+ // QualType elemTy = cast<ArrayType>(originalTy)->getElementType();
+ // QualType pointerTy = C.getPointerType(elemTy);
+ return evalCastFromLoc(cast<Loc>(val), castTy);
+ }
+
+ // Check for casts from a region to a specific type.
+ if (const MemRegion *R = val.getAsRegion()) {
+ // FIXME: We should handle the case where we strip off view layers to get
+ // to a desugared type.
+
+ if (!Loc::isLocType(castTy)) {
+ // FIXME: There can be gross cases where one casts the result of a function
+ // (that returns a pointer) to some other value that happens to fit
+ // within that pointer value. We currently have no good way to
+ // model such operations. When this happens, the underlying operation
+ // is that the caller is reasoning about bits. Conceptually we are
+ // layering a "view" of a location on top of those bits. Perhaps
+ // we need to be more lazy about mutual possible views, even on an
+ // SVal? This may be necessary for bit-level reasoning as well.
+ return UnknownVal();
+ }
+
+ // We get a symbolic function pointer for a dereference of a function
+ // pointer, but it is of function type. Example:
+
+ // struct FPRec {
+ // void (*my_func)(int * x);
+ // };
+ //
+ // int bar(int x);
+ //
+ // int f1_a(struct FPRec* foo) {
+ // int x;
+ // (*foo->my_func)(&x);
+ // return bar(x)+1; // no-warning
+ // }
+
+ assert(Loc::isLocType(originalTy) || originalTy->isFunctionType() ||
+ originalTy->isBlockPointerType() || castTy->isReferenceType());
+
+ StoreManager &storeMgr = StateMgr.getStoreManager();
+
+ // Delegate to store manager to get the result of casting a region to a
+ // different type. If the MemRegion* returned is NULL, this expression
+ // Evaluates to UnknownVal.
+ R = storeMgr.castRegion(R, castTy);
+ return R ? SVal(loc::MemRegionVal(R)) : UnknownVal();
+ }
+
+ return dispatchCast(val, castTy);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
new file mode 100644
index 0000000..b94aff4
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -0,0 +1,331 @@
+//= RValues.cpp - Abstract RValues for Path-Sens. Value Tracking -*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SVal, Loc, and NonLoc, classes that represent
+// abstract r-values for use with path-sensitive value tracking.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/Basic/IdentifierTable.h"
+using namespace clang;
+using namespace ento;
+using llvm::APSInt;
+
+//===----------------------------------------------------------------------===//
+// Symbol iteration within an SVal.
+//===----------------------------------------------------------------------===//
+
+
+//===----------------------------------------------------------------------===//
+// Utility methods.
+//===----------------------------------------------------------------------===//
+
+bool SVal::hasConjuredSymbol() const {
+ if (const nonloc::SymbolVal* SV = dyn_cast<nonloc::SymbolVal>(this)) {
+ SymbolRef sym = SV->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ }
+
+ if (const loc::MemRegionVal *RV = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion *R = RV->getRegion();
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R)) {
+ SymbolRef sym = SR->getSymbol();
+ if (isa<SymbolConjured>(sym))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+const FunctionDecl *SVal::getAsFunctionDecl() const {
+ if (const loc::MemRegionVal* X = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion* R = X->getRegion();
+ if (const FunctionTextRegion *CTR = R->getAs<FunctionTextRegion>())
+ return CTR->getDecl();
+ }
+
+ return 0;
+}
+
+/// \brief If this SVal is a location (subclasses Loc) and wraps a symbol,
+/// return that SymbolRef. Otherwise return 0.
+///
+/// Implicit casts (ex: void* -> char*) can turn Symbolic region into Element
+/// region. If that is the case, gets the underlining region.
+SymbolRef SVal::getAsLocSymbol() const {
+ // FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+ if (const nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(this))
+ return X->getLoc().getAsLocSymbol();
+
+ if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this)) {
+ const MemRegion *R = X->stripCasts();
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
+ return SymR->getSymbol();
+ }
+ return 0;
+}
+
+/// Get the symbol in the SVal or its base region.
+SymbolRef SVal::getLocSymbolInBase() const {
+ const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this);
+
+ if (!X)
+ return 0;
+
+ const MemRegion *R = X->getRegion();
+
+ while (const SubRegion *SR = dyn_cast<SubRegion>(R)) {
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SR))
+ return SymR->getSymbol();
+ else
+ R = SR->getSuperRegion();
+ }
+
+ return 0;
+}
+
+// TODO: The next 3 functions have to be simplified.
+
+/// \brief If this SVal wraps a symbol return that SymbolRef.
+/// Otherwise return 0.
+SymbolRef SVal::getAsSymbol() const {
+ // FIXME: should we consider SymbolRef wrapped in CodeTextRegion?
+ if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this))
+ return X->getSymbol();
+
+ return getAsLocSymbol();
+}
+
+/// getAsSymbolicExpression - If this Sval wraps a symbolic expression then
+/// return that expression. Otherwise return NULL.
+const SymExpr *SVal::getAsSymbolicExpression() const {
+ if (const nonloc::SymbolVal *X = dyn_cast<nonloc::SymbolVal>(this))
+ return X->getSymbol();
+
+ return getAsSymbol();
+}
+
+const SymExpr* SVal::getAsSymExpr() const {
+ const SymExpr* Sym = getAsSymbol();
+ if (!Sym)
+ Sym = getAsSymbolicExpression();
+ return Sym;
+}
+
+const MemRegion *SVal::getAsRegion() const {
+ if (const loc::MemRegionVal *X = dyn_cast<loc::MemRegionVal>(this))
+ return X->getRegion();
+
+ if (const nonloc::LocAsInteger *X = dyn_cast<nonloc::LocAsInteger>(this)) {
+ return X->getLoc().getAsRegion();
+ }
+
+ return 0;
+}
+
+const MemRegion *loc::MemRegionVal::stripCasts() const {
+ const MemRegion *R = getRegion();
+ return R ? R->StripCasts() : NULL;
+}
+
+const void *nonloc::LazyCompoundVal::getStore() const {
+ return static_cast<const LazyCompoundValData*>(Data)->getStore();
+}
+
+const TypedRegion *nonloc::LazyCompoundVal::getRegion() const {
+ return static_cast<const LazyCompoundValData*>(Data)->getRegion();
+}
+
+//===----------------------------------------------------------------------===//
+// Other Iterators.
+//===----------------------------------------------------------------------===//
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::begin() const {
+ return getValue()->begin();
+}
+
+nonloc::CompoundVal::iterator nonloc::CompoundVal::end() const {
+ return getValue()->end();
+}
+
+//===----------------------------------------------------------------------===//
+// Useful predicates.
+//===----------------------------------------------------------------------===//
+
+bool SVal::isConstant() const {
+ return isa<nonloc::ConcreteInt>(this) || isa<loc::ConcreteInt>(this);
+}
+
+bool SVal::isConstant(int I) const {
+ if (isa<loc::ConcreteInt>(*this))
+ return cast<loc::ConcreteInt>(*this).getValue() == I;
+ else if (isa<nonloc::ConcreteInt>(*this))
+ return cast<nonloc::ConcreteInt>(*this).getValue() == I;
+ else
+ return false;
+}
+
+bool SVal::isZeroConstant() const {
+ return isConstant(0);
+}
+
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Non-Locs.
+//===----------------------------------------------------------------------===//
+
+SVal nonloc::ConcreteInt::evalBinOp(SValBuilder &svalBuilder,
+ BinaryOperator::Opcode Op,
+ const nonloc::ConcreteInt& R) const {
+ const llvm::APSInt* X =
+ svalBuilder.getBasicValueFactory().evalAPSInt(Op, getValue(), R.getValue());
+
+ if (X)
+ return nonloc::ConcreteInt(*X);
+ else
+ return UndefinedVal();
+}
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::evalComplement(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(~getValue());
+}
+
+nonloc::ConcreteInt
+nonloc::ConcreteInt::evalMinus(SValBuilder &svalBuilder) const {
+ return svalBuilder.makeIntVal(-getValue());
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function dispatch for Locs.
+//===----------------------------------------------------------------------===//
+
+SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
+ BinaryOperator::Opcode Op,
+ const loc::ConcreteInt& R) const {
+
+ assert (Op == BO_Add || Op == BO_Sub ||
+ (Op >= BO_LT && Op <= BO_NE));
+
+ const llvm::APSInt* X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
+
+ if (X)
+ return loc::ConcreteInt(*X);
+ else
+ return UndefinedVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Pretty-Printing.
+//===----------------------------------------------------------------------===//
+
+void SVal::dump() const { dumpToStream(llvm::errs()); }
+
+void SVal::dumpToStream(raw_ostream &os) const {
+ switch (getBaseKind()) {
+ case UnknownKind:
+ os << "Unknown";
+ break;
+ case NonLocKind:
+ cast<NonLoc>(this)->dumpToStream(os);
+ break;
+ case LocKind:
+ cast<Loc>(this)->dumpToStream(os);
+ break;
+ case UndefinedKind:
+ os << "Undefined";
+ break;
+ }
+}
+
+void NonLoc::dumpToStream(raw_ostream &os) const {
+ switch (getSubKind()) {
+ case nonloc::ConcreteIntKind: {
+ const nonloc::ConcreteInt& C = *cast<nonloc::ConcreteInt>(this);
+ if (C.getValue().isUnsigned())
+ os << C.getValue().getZExtValue();
+ else
+ os << C.getValue().getSExtValue();
+ os << ' ' << (C.getValue().isUnsigned() ? 'U' : 'S')
+ << C.getValue().getBitWidth() << 'b';
+ break;
+ }
+ case nonloc::SymbolValKind: {
+ os << cast<nonloc::SymbolVal>(this)->getSymbol();
+ break;
+ }
+ case nonloc::LocAsIntegerKind: {
+ const nonloc::LocAsInteger& C = *cast<nonloc::LocAsInteger>(this);
+ os << C.getLoc() << " [as " << C.getNumBits() << " bit integer]";
+ break;
+ }
+ case nonloc::CompoundValKind: {
+ const nonloc::CompoundVal& C = *cast<nonloc::CompoundVal>(this);
+ os << "compoundVal{";
+ bool first = true;
+ for (nonloc::CompoundVal::iterator I=C.begin(), E=C.end(); I!=E; ++I) {
+ if (first) {
+ os << ' '; first = false;
+ }
+ else
+ os << ", ";
+
+ (*I).dumpToStream(os);
+ }
+ os << "}";
+ break;
+ }
+ case nonloc::LazyCompoundValKind: {
+ const nonloc::LazyCompoundVal &C = *cast<nonloc::LazyCompoundVal>(this);
+ os << "lazyCompoundVal{" << const_cast<void *>(C.getStore())
+ << ',' << C.getRegion()
+ << '}';
+ break;
+ }
+ default:
+ assert (false && "Pretty-printed not implemented for this NonLoc.");
+ break;
+ }
+}
+
+void Loc::dumpToStream(raw_ostream &os) const {
+ switch (getSubKind()) {
+ case loc::ConcreteIntKind:
+ os << cast<loc::ConcreteInt>(this)->getValue().getZExtValue() << " (Loc)";
+ break;
+ case loc::GotoLabelKind:
+ os << "&&" << cast<loc::GotoLabel>(this)->getLabel()->getName();
+ break;
+ case loc::MemRegionKind:
+ os << '&' << cast<loc::MemRegionVal>(this)->getRegion()->getString();
+ break;
+ case loc::ObjCPropRefKind: {
+ const ObjCPropertyRefExpr *E = cast<loc::ObjCPropRef>(this)->getPropRefExpr();
+ os << "objc-prop{";
+ if (E->isSuperReceiver())
+ os << "super.";
+ else if (E->getBase())
+ os << "<base>.";
+
+ if (E->isImplicitProperty())
+ os << E->getImplicitPropertyGetter()->getSelector().getAsString();
+ else
+ os << E->getExplicitProperty()->getName();
+
+ os << "}";
+ break;
+ }
+ default:
+ llvm_unreachable("Pretty-printing not implemented for this Loc.");
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
new file mode 100644
index 0000000..a76a2da
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -0,0 +1,307 @@
+//== SimpleConstraintManager.cpp --------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SimpleConstraintManager, a class that holds code shared
+// between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "SimpleConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+
+namespace ento {
+
+SimpleConstraintManager::~SimpleConstraintManager() {}
+
+bool SimpleConstraintManager::canReasonAbout(SVal X) const {
+ nonloc::SymbolVal *SymVal = dyn_cast<nonloc::SymbolVal>(&X);
+ if (SymVal && SymVal->isExpression()) {
+ const SymExpr *SE = SymVal->getSymbol();
+
+ if (const SymIntExpr *SIE = dyn_cast<SymIntExpr>(SE)) {
+ switch (SIE->getOpcode()) {
+ // We don't reason yet about bitwise-constraints on symbolic values.
+ case BO_And:
+ case BO_Or:
+ case BO_Xor:
+ return false;
+ // We don't reason yet about these arithmetic constraints on
+ // symbolic values.
+ case BO_Mul:
+ case BO_Div:
+ case BO_Rem:
+ case BO_Shl:
+ case BO_Shr:
+ return false;
+ // All other cases.
+ default:
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ return true;
+}
+
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
+ DefinedSVal Cond,
+ bool Assumption) {
+ if (isa<NonLoc>(Cond))
+ return assume(state, cast<NonLoc>(Cond), Assumption);
+ else
+ return assume(state, cast<Loc>(Cond), Assumption);
+}
+
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state, Loc cond,
+ bool assumption) {
+ state = assumeAux(state, cond, assumption);
+ return SU.processAssume(state, cond, assumption);
+}
+
+ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
+ Loc Cond, bool Assumption) {
+
+ BasicValueFactory &BasicVals = state->getBasicVals();
+
+ switch (Cond.getSubKind()) {
+ default:
+ assert (false && "'Assume' not implemented for this Loc.");
+ return state;
+
+ case loc::MemRegionKind: {
+ // FIXME: Should this go into the storemanager?
+
+ const MemRegion *R = cast<loc::MemRegionVal>(Cond).getRegion();
+ const SubRegion *SubR = dyn_cast<SubRegion>(R);
+
+ while (SubR) {
+ // FIXME: now we only find the first symbolic region.
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
+ const llvm::APSInt &zero = BasicVals.getZeroWithPtrWidth();
+ if (Assumption)
+ return assumeSymNE(state, SymR->getSymbol(), zero, zero);
+ else
+ return assumeSymEQ(state, SymR->getSymbol(), zero, zero);
+ }
+ SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
+ }
+
+ // FALL-THROUGH.
+ }
+
+ case loc::GotoLabelKind:
+ return Assumption ? state : NULL;
+
+ case loc::ConcreteIntKind: {
+ bool b = cast<loc::ConcreteInt>(Cond).getValue() != 0;
+ bool isFeasible = b ? Assumption : !Assumption;
+ return isFeasible ? state : NULL;
+ }
+ } // end switch
+}
+
+ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef state,
+ NonLoc cond,
+ bool assumption) {
+ state = assumeAux(state, cond, assumption);
+ return SU.processAssume(state, cond, assumption);
+}
+
+static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
+ // FIXME: This should probably be part of BinaryOperator, since this isn't
+ // the only place it's used. (This code was copied from SimpleSValBuilder.cpp.)
+ switch (op) {
+ default:
+ llvm_unreachable("Invalid opcode.");
+ case BO_LT: return BO_GE;
+ case BO_GT: return BO_LE;
+ case BO_LE: return BO_GT;
+ case BO_GE: return BO_LT;
+ case BO_EQ: return BO_NE;
+ case BO_NE: return BO_EQ;
+ }
+}
+
+
+ProgramStateRef SimpleConstraintManager::assumeAuxForSymbol(
+ ProgramStateRef State,
+ SymbolRef Sym,
+ bool Assumption) {
+ QualType T = State->getSymbolManager().getType(Sym);
+ const llvm::APSInt &zero = State->getBasicVals().getValue(0, T);
+ if (Assumption)
+ return assumeSymNE(State, Sym, zero, zero);
+ else
+ return assumeSymEQ(State, Sym, zero, zero);
+}
+
+ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef state,
+ NonLoc Cond,
+ bool Assumption) {
+
+ // We cannot reason about SymSymExprs, and can only reason about some
+ // SymIntExprs.
+ if (!canReasonAbout(Cond)) {
+ // Just add the constraint to the expression without trying to simplify.
+ SymbolRef sym = Cond.getAsSymExpr();
+ return assumeAuxForSymbol(state, sym, Assumption);
+ }
+
+ BasicValueFactory &BasicVals = state->getBasicVals();
+ SymbolManager &SymMgr = state->getSymbolManager();
+
+ switch (Cond.getSubKind()) {
+ default:
+ llvm_unreachable("'Assume' not implemented for this NonLoc");
+
+ case nonloc::SymbolValKind: {
+ nonloc::SymbolVal& SV = cast<nonloc::SymbolVal>(Cond);
+ SymbolRef sym = SV.getSymbol();
+ assert(sym);
+
+ // Handle SymbolData.
+ if (!SV.isExpression()) {
+ return assumeAuxForSymbol(state, sym, Assumption);
+
+ // Handle symbolic expression.
+ } else {
+ // We can only simplify expressions whose RHS is an integer.
+ const SymIntExpr *SE = dyn_cast<SymIntExpr>(sym);
+ if (!SE)
+ return assumeAuxForSymbol(state, sym, Assumption);
+
+ BinaryOperator::Opcode op = SE->getOpcode();
+ // Implicitly compare non-comparison expressions to 0.
+ if (!BinaryOperator::isComparisonOp(op)) {
+ QualType T = SymMgr.getType(SE);
+ const llvm::APSInt &zero = BasicVals.getValue(0, T);
+ op = (Assumption ? BO_NE : BO_EQ);
+ return assumeSymRel(state, SE, op, zero);
+ }
+ // From here on out, op is the real comparison we'll be testing.
+ if (!Assumption)
+ op = NegateComparison(op);
+
+ return assumeSymRel(state, SE->getLHS(), op, SE->getRHS());
+ }
+ }
+
+ case nonloc::ConcreteIntKind: {
+ bool b = cast<nonloc::ConcreteInt>(Cond).getValue() != 0;
+ bool isFeasible = b ? Assumption : !Assumption;
+ return isFeasible ? state : NULL;
+ }
+
+ case nonloc::LocAsIntegerKind:
+ return assumeAux(state, cast<nonloc::LocAsInteger>(Cond).getLoc(),
+ Assumption);
+ } // end switch
+}
+
+static llvm::APSInt computeAdjustment(const SymExpr *LHS,
+ SymbolRef &Sym) {
+ llvm::APSInt DefaultAdjustment;
+ DefaultAdjustment = 0;
+
+ // First check if the LHS is a simple symbol reference.
+ if (isa<SymbolData>(LHS))
+ return DefaultAdjustment;
+
+ // Next, see if it's a "($sym+constant1)" expression.
+ const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS);
+
+ // We cannot simplify "($sym1+$sym2)".
+ if (!SE)
+ return DefaultAdjustment;
+
+ // Get the constant out of the expression "($sym+constant1)" or
+ // "<expr>+constant1".
+ Sym = SE->getLHS();
+ switch (SE->getOpcode()) {
+ case BO_Add:
+ return SE->getRHS();
+ case BO_Sub:
+ return -SE->getRHS();
+ default:
+ // We cannot simplify non-additive operators.
+ return DefaultAdjustment;
+ }
+}
+
+ProgramStateRef SimpleConstraintManager::assumeSymRel(ProgramStateRef state,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& Int) {
+ assert(BinaryOperator::isComparisonOp(op) &&
+ "Non-comparison ops should be rewritten as comparisons to zero.");
+
+ // We only handle simple comparisons of the form "$sym == constant"
+ // or "($sym+constant1) == constant2".
+ // The adjustment is "constant1" in the above expression. It's used to
+ // "slide" the solution range around for modular arithmetic. For example,
+ // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
+ // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
+ // the subclasses of SimpleConstraintManager to handle the adjustment.
+ SymbolRef Sym = LHS;
+ llvm::APSInt Adjustment = computeAdjustment(LHS, Sym);
+
+ // FIXME: This next section is a hack. It silently converts the integers to
+ // be of the same type as the symbol, which is not always correct. Really the
+ // comparisons should be performed using the Int's type, then mapped back to
+ // the symbol's range of values.
+ ProgramStateManager &StateMgr = state->getStateManager();
+ ASTContext &Ctx = StateMgr.getContext();
+
+ QualType T = Sym->getType(Ctx);
+ assert(T->isIntegerType() || Loc::isLocType(T));
+ unsigned bitwidth = Ctx.getTypeSize(T);
+ bool isSymUnsigned
+ = T->isUnsignedIntegerOrEnumerationType() || Loc::isLocType(T);
+
+ // Convert the adjustment.
+ Adjustment.setIsUnsigned(isSymUnsigned);
+ Adjustment = Adjustment.extOrTrunc(bitwidth);
+
+ // Convert the right-hand side integer.
+ llvm::APSInt ConvertedInt(Int, isSymUnsigned);
+ ConvertedInt = ConvertedInt.extOrTrunc(bitwidth);
+
+ switch (op) {
+ default:
+ // No logic yet for other operators. assume the constraint is feasible.
+ return state;
+
+ case BO_EQ:
+ return assumeSymEQ(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_NE:
+ return assumeSymNE(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_GT:
+ return assumeSymGT(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_GE:
+ return assumeSymGE(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_LT:
+ return assumeSymLT(state, Sym, ConvertedInt, Adjustment);
+
+ case BO_LE:
+ return assumeSymLE(state, Sym, ConvertedInt, Adjustment);
+ } // end switch
+}
+
+} // end of namespace ento
+
+} // end of namespace clang
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
new file mode 100644
index 0000000..e082d9d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.h
@@ -0,0 +1,101 @@
+//== SimpleConstraintManager.h ----------------------------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Code shared between BasicConstraintManager and RangeConstraintManager.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H
+#define LLVM_CLANG_GR_SIMPLE_CONSTRAINT_MANAGER_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/ConstraintManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+namespace clang {
+
+namespace ento {
+
+class SimpleConstraintManager : public ConstraintManager {
+ SubEngine &SU;
+public:
+ SimpleConstraintManager(SubEngine &subengine) : SU(subengine) {}
+ virtual ~SimpleConstraintManager();
+
+ //===------------------------------------------------------------------===//
+ // Common implementation for the interface provided by ConstraintManager.
+ //===------------------------------------------------------------------===//
+
+ ProgramStateRef assume(ProgramStateRef state, DefinedSVal Cond,
+ bool Assumption);
+
+ ProgramStateRef assume(ProgramStateRef state, Loc Cond, bool Assumption);
+
+ ProgramStateRef assume(ProgramStateRef state, NonLoc Cond, bool Assumption);
+
+ ProgramStateRef assumeSymRel(ProgramStateRef state,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& Int);
+
+protected:
+
+ //===------------------------------------------------------------------===//
+ // Interface that subclasses must implement.
+ //===------------------------------------------------------------------===//
+
+ // Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison
+ // operation for the method being invoked.
+ virtual ProgramStateRef assumeSymNE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymEQ(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymLT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymGT(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymLE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ virtual ProgramStateRef assumeSymGE(ProgramStateRef state, SymbolRef sym,
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
+
+ //===------------------------------------------------------------------===//
+ // Internal implementation.
+ //===------------------------------------------------------------------===//
+
+ bool canReasonAbout(SVal X) const;
+
+ ProgramStateRef assumeAux(ProgramStateRef state,
+ Loc Cond,
+ bool Assumption);
+
+ ProgramStateRef assumeAux(ProgramStateRef state,
+ NonLoc Cond,
+ bool Assumption);
+
+ ProgramStateRef assumeAuxForSymbol(ProgramStateRef State,
+ SymbolRef Sym,
+ bool Assumption);
+};
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
new file mode 100644
index 0000000..d0558f1
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -0,0 +1,973 @@
+// SimpleSValBuilder.cpp - A basic SValBuilder -----------------------*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SimpleSValBuilder, a basic implementation of SValBuilder.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SValBuilder.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+class SimpleSValBuilder : public SValBuilder {
+protected:
+ virtual SVal dispatchCast(SVal val, QualType castTy);
+ virtual SVal evalCastFromNonLoc(NonLoc val, QualType castTy);
+ virtual SVal evalCastFromLoc(Loc val, QualType castTy);
+
+public:
+ SimpleSValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
+ ProgramStateManager &stateMgr)
+ : SValBuilder(alloc, context, stateMgr) {}
+ virtual ~SimpleSValBuilder() {}
+
+ virtual SVal evalMinus(NonLoc val);
+ virtual SVal evalComplement(NonLoc val);
+ virtual SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs, QualType resultTy);
+ virtual SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs, QualType resultTy);
+ virtual SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
+ Loc lhs, NonLoc rhs, QualType resultTy);
+
+ /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
+ /// (integer) value, that value is returned. Otherwise, returns NULL.
+ virtual const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V);
+
+ SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
+ const llvm::APSInt &RHS, QualType resultTy);
+};
+} // end anonymous namespace
+
+SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
+ ASTContext &context,
+ ProgramStateManager &stateMgr) {
+ return new SimpleSValBuilder(alloc, context, stateMgr);
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for Casts.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValBuilder::dispatchCast(SVal Val, QualType CastTy) {
+ assert(isa<Loc>(&Val) || isa<NonLoc>(&Val));
+ return isa<Loc>(Val) ? evalCastFromLoc(cast<Loc>(Val), CastTy)
+ : evalCastFromNonLoc(cast<NonLoc>(Val), CastTy);
+}
+
+SVal SimpleSValBuilder::evalCastFromNonLoc(NonLoc val, QualType castTy) {
+
+ bool isLocType = Loc::isLocType(castTy);
+
+ if (nonloc::LocAsInteger *LI = dyn_cast<nonloc::LocAsInteger>(&val)) {
+ if (isLocType)
+ return LI->getLoc();
+
+ // FIXME: Correctly support promotions/truncations.
+ unsigned castSize = Context.getTypeSize(castTy);
+ if (castSize == LI->getNumBits())
+ return val;
+ return makeLocAsInteger(LI->getLoc(), castSize);
+ }
+
+ if (const SymExpr *se = val.getAsSymbolicExpression()) {
+ QualType T = Context.getCanonicalType(se->getType(Context));
+ // If types are the same or both are integers, ignore the cast.
+ // FIXME: Remove this hack when we support symbolic truncation/extension.
+ // HACK: If both castTy and T are integers, ignore the cast. This is
+ // not a permanent solution. Eventually we want to precisely handle
+ // extension/truncation of symbolic integers. This prevents us from losing
+ // precision when we assign 'x = y' and 'y' is symbolic and x and y are
+ // different integer types.
+ if (haveSameType(T, castTy))
+ return val;
+
+ if (!isLocType)
+ return makeNonLoc(se, T, castTy);
+ return UnknownVal();
+ }
+
+ // If value is a non integer constant, produce unknown.
+ if (!isa<nonloc::ConcreteInt>(val))
+ return UnknownVal();
+
+ // Only handle casts from integers to integers - if val is an integer constant
+ // being cast to a non integer type, produce unknown.
+ if (!isLocType && !castTy->isIntegerType())
+ return UnknownVal();
+
+ llvm::APSInt i = cast<nonloc::ConcreteInt>(val).getValue();
+ i.setIsUnsigned(castTy->isUnsignedIntegerOrEnumerationType() ||
+ Loc::isLocType(castTy));
+ i = i.extOrTrunc(Context.getTypeSize(castTy));
+
+ if (isLocType)
+ return makeIntLocVal(i);
+ else
+ return makeIntVal(i);
+}
+
+SVal SimpleSValBuilder::evalCastFromLoc(Loc val, QualType castTy) {
+
+ // Casts from pointers -> pointers, just return the lval.
+ //
+ // Casts from pointers -> references, just return the lval. These
+ // can be introduced by the frontend for corner cases, e.g
+ // casting from va_list* to __builtin_va_list&.
+ //
+ if (Loc::isLocType(castTy) || castTy->isReferenceType())
+ return val;
+
+ // FIXME: Handle transparent unions where a value can be "transparently"
+ // lifted into a union type.
+ if (castTy->isUnionType())
+ return UnknownVal();
+
+ if (castTy->isIntegerType()) {
+ unsigned BitWidth = Context.getTypeSize(castTy);
+
+ if (!isa<loc::ConcreteInt>(val))
+ return makeLocAsInteger(val, BitWidth);
+
+ llvm::APSInt i = cast<loc::ConcreteInt>(val).getValue();
+ i.setIsUnsigned(castTy->isUnsignedIntegerOrEnumerationType() ||
+ Loc::isLocType(castTy));
+ i = i.extOrTrunc(BitWidth);
+ return makeIntVal(i);
+ }
+
+ // All other cases: return 'UnknownVal'. This includes casting pointers
+ // to floats, which is probably badness it itself, but this is a good
+ // intermediate solution until we do something better.
+ return UnknownVal();
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for unary operators.
+//===----------------------------------------------------------------------===//
+
+SVal SimpleSValBuilder::evalMinus(NonLoc val) {
+ switch (val.getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ return cast<nonloc::ConcreteInt>(val).evalMinus(*this);
+ default:
+ return UnknownVal();
+ }
+}
+
+SVal SimpleSValBuilder::evalComplement(NonLoc X) {
+ switch (X.getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ return cast<nonloc::ConcreteInt>(X).evalComplement(*this);
+ default:
+ return UnknownVal();
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Transfer function for binary operators.
+//===----------------------------------------------------------------------===//
+
+static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
+ switch (op) {
+ default:
+ llvm_unreachable("Invalid opcode.");
+ case BO_LT: return BO_GE;
+ case BO_GT: return BO_LE;
+ case BO_LE: return BO_GT;
+ case BO_GE: return BO_LT;
+ case BO_EQ: return BO_NE;
+ case BO_NE: return BO_EQ;
+ }
+}
+
+static BinaryOperator::Opcode ReverseComparison(BinaryOperator::Opcode op) {
+ switch (op) {
+ default:
+ llvm_unreachable("Invalid opcode.");
+ case BO_LT: return BO_GT;
+ case BO_GT: return BO_LT;
+ case BO_LE: return BO_GE;
+ case BO_GE: return BO_LE;
+ case BO_EQ:
+ case BO_NE:
+ return op;
+ }
+}
+
+SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt &RHS,
+ QualType resultTy) {
+ bool isIdempotent = false;
+
+ // Check for a few special cases with known reductions first.
+ switch (op) {
+ default:
+ // We can't reduce this case; just treat it normally.
+ break;
+ case BO_Mul:
+ // a*0 and a*1
+ if (RHS == 0)
+ return makeIntVal(0, resultTy);
+ else if (RHS == 1)
+ isIdempotent = true;
+ break;
+ case BO_Div:
+ // a/0 and a/1
+ if (RHS == 0)
+ // This is also handled elsewhere.
+ return UndefinedVal();
+ else if (RHS == 1)
+ isIdempotent = true;
+ break;
+ case BO_Rem:
+ // a%0 and a%1
+ if (RHS == 0)
+ // This is also handled elsewhere.
+ return UndefinedVal();
+ else if (RHS == 1)
+ return makeIntVal(0, resultTy);
+ break;
+ case BO_Add:
+ case BO_Sub:
+ case BO_Shl:
+ case BO_Shr:
+ case BO_Xor:
+ // a+0, a-0, a<<0, a>>0, a^0
+ if (RHS == 0)
+ isIdempotent = true;
+ break;
+ case BO_And:
+ // a&0 and a&(~0)
+ if (RHS == 0)
+ return makeIntVal(0, resultTy);
+ else if (RHS.isAllOnesValue())
+ isIdempotent = true;
+ break;
+ case BO_Or:
+ // a|0 and a|(~0)
+ if (RHS == 0)
+ isIdempotent = true;
+ else if (RHS.isAllOnesValue()) {
+ const llvm::APSInt &Result = BasicVals.Convert(resultTy, RHS);
+ return nonloc::ConcreteInt(Result);
+ }
+ break;
+ }
+
+ // Idempotent ops (like a*1) can still change the type of an expression.
+ // Wrap the LHS up in a NonLoc again and let evalCastFromNonLoc do the
+ // dirty work.
+ if (isIdempotent)
+ return evalCastFromNonLoc(nonloc::SymbolVal(LHS), resultTy);
+
+ // If we reach this point, the expression cannot be simplified.
+ // Make a SymbolVal for the entire expression.
+ return makeNonLoc(LHS, op, RHS, resultTy);
+}
+
+SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
+ BinaryOperator::Opcode op,
+ NonLoc lhs, NonLoc rhs,
+ QualType resultTy) {
+ // Handle trivial case where left-side and right-side are the same.
+ if (lhs == rhs)
+ switch (op) {
+ default:
+ break;
+ case BO_EQ:
+ case BO_LE:
+ case BO_GE:
+ return makeTruthVal(true, resultTy);
+ case BO_LT:
+ case BO_GT:
+ case BO_NE:
+ return makeTruthVal(false, resultTy);
+ case BO_Xor:
+ case BO_Sub:
+ return makeIntVal(0, resultTy);
+ case BO_Or:
+ case BO_And:
+ return evalCastFromNonLoc(lhs, resultTy);
+ }
+
+ while (1) {
+ switch (lhs.getSubKind()) {
+ default:
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
+ case nonloc::LocAsIntegerKind: {
+ Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
+ switch (rhs.getSubKind()) {
+ case nonloc::LocAsIntegerKind:
+ return evalBinOpLL(state, op, lhsL,
+ cast<nonloc::LocAsInteger>(rhs).getLoc(),
+ resultTy);
+ case nonloc::ConcreteIntKind: {
+ // Transform the integer into a location and compare.
+ llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue();
+ i.setIsUnsigned(true);
+ i = i.extOrTrunc(Context.getTypeSize(Context.VoidPtrTy));
+ return evalBinOpLL(state, op, lhsL, makeLoc(i), resultTy);
+ }
+ default:
+ switch (op) {
+ case BO_EQ:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ return makeTruthVal(true, resultTy);
+ default:
+ // This case also handles pointer arithmetic.
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
+ }
+ }
+ }
+ case nonloc::ConcreteIntKind: {
+ const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
+
+ // Is the RHS a symbol we can simplify?
+ // FIXME: This was mostly copy/pasted from the LHS-is-a-symbol case.
+ if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) {
+ SymbolRef RSym = srhs->getSymbol();
+ if (RSym->getType(Context)->isIntegerType()) {
+ if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
+ // The symbol evaluates to a constant.
+ const llvm::APSInt *rhs_I;
+ if (BinaryOperator::isRelationalOp(op))
+ rhs_I = &BasicVals.Convert(lhsInt.getValue(), *Constant);
+ else
+ rhs_I = &BasicVals.Convert(resultTy, *Constant);
+
+ rhs = nonloc::ConcreteInt(*rhs_I);
+ }
+ }
+ }
+
+ if (isa<nonloc::ConcreteInt>(rhs)) {
+ return lhsInt.evalBinOp(*this, op, cast<nonloc::ConcreteInt>(rhs));
+ } else {
+ const llvm::APSInt& lhsValue = lhsInt.getValue();
+
+ // Swap the left and right sides and flip the operator if doing so
+ // allows us to better reason about the expression (this is a form
+ // of expression canonicalization).
+ // While we're at it, catch some special cases for non-commutative ops.
+ NonLoc tmp = rhs;
+ rhs = lhs;
+ lhs = tmp;
+
+ switch (op) {
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ op = ReverseComparison(op);
+ continue;
+ case BO_EQ:
+ case BO_NE:
+ case BO_Add:
+ case BO_Mul:
+ case BO_And:
+ case BO_Xor:
+ case BO_Or:
+ continue;
+ case BO_Shr:
+ if (lhsValue.isAllOnesValue() && lhsValue.isSigned())
+ // At this point lhs and rhs have been swapped.
+ return rhs;
+ // FALL-THROUGH
+ case BO_Shl:
+ if (lhsValue == 0)
+ // At this point lhs and rhs have been swapped.
+ return rhs;
+ return makeGenericVal(state, op, rhs, lhs, resultTy);
+ default:
+ return makeGenericVal(state, op, rhs, lhs, resultTy);
+ }
+ }
+ }
+ case nonloc::SymbolValKind: {
+ nonloc::SymbolVal *selhs = cast<nonloc::SymbolVal>(&lhs);
+
+ // LHS is a symbolic expression.
+ if (selhs->isExpression()) {
+
+ // Only handle LHS of the form "$sym op constant", at least for now.
+ const SymIntExpr *symIntExpr =
+ dyn_cast<SymIntExpr>(selhs->getSymbol());
+
+ if (!symIntExpr)
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
+
+ // Is this a logical not? (!x is represented as x == 0.)
+ if (op == BO_EQ && rhs.isZeroConstant()) {
+ // We know how to negate certain expressions. Simplify them here.
+
+ BinaryOperator::Opcode opc = symIntExpr->getOpcode();
+ switch (opc) {
+ default:
+ // We don't know how to negate this operation.
+ // Just handle it as if it were a normal comparison to 0.
+ break;
+ case BO_LAnd:
+ case BO_LOr:
+ llvm_unreachable("Logical operators handled by branching logic.");
+ case BO_Assign:
+ case BO_MulAssign:
+ case BO_DivAssign:
+ case BO_RemAssign:
+ case BO_AddAssign:
+ case BO_SubAssign:
+ case BO_ShlAssign:
+ case BO_ShrAssign:
+ case BO_AndAssign:
+ case BO_XorAssign:
+ case BO_OrAssign:
+ case BO_Comma:
+ llvm_unreachable("'=' and ',' operators handled by ExprEngine.");
+ case BO_PtrMemD:
+ case BO_PtrMemI:
+ llvm_unreachable("Pointer arithmetic not handled here.");
+ case BO_LT:
+ case BO_GT:
+ case BO_LE:
+ case BO_GE:
+ case BO_EQ:
+ case BO_NE:
+ // Negate the comparison and make a value.
+ opc = NegateComparison(opc);
+ assert(symIntExpr->getType(Context) == resultTy);
+ return makeNonLoc(symIntExpr->getLHS(), opc,
+ symIntExpr->getRHS(), resultTy);
+ }
+ }
+
+ // For now, only handle expressions whose RHS is a constant.
+ const nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs);
+ if (!rhsInt)
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
+
+ // If both the LHS and the current expression are additive,
+ // fold their constants.
+ if (BinaryOperator::isAdditiveOp(op)) {
+ BinaryOperator::Opcode lop = symIntExpr->getOpcode();
+ if (BinaryOperator::isAdditiveOp(lop)) {
+ // resultTy may not be the best type to convert to, but it's
+ // probably the best choice in expressions with mixed type
+ // (such as x+1U+2LL). The rules for implicit conversions should
+ // choose a reasonable type to preserve the expression, and will
+ // at least match how the value is going to be used.
+ const llvm::APSInt &first =
+ BasicVals.Convert(resultTy, symIntExpr->getRHS());
+ const llvm::APSInt &second =
+ BasicVals.Convert(resultTy, rhsInt->getValue());
+ const llvm::APSInt *newRHS;
+ if (lop == op)
+ newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
+ else
+ newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
+ return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy);
+ }
+ }
+
+ // Otherwise, make a SymbolVal out of the expression.
+ return MakeSymIntVal(symIntExpr, op, rhsInt->getValue(), resultTy);
+
+ // LHS is a simple symbol (not a symbolic expression).
+ } else {
+ nonloc::SymbolVal *slhs = cast<nonloc::SymbolVal>(&lhs);
+ SymbolRef Sym = slhs->getSymbol();
+ QualType lhsType = Sym->getType(Context);
+
+ // The conversion type is usually the result type, but not in the case
+ // of relational expressions.
+ QualType conversionType = resultTy;
+ if (BinaryOperator::isRelationalOp(op))
+ conversionType = lhsType;
+
+ // Does the symbol simplify to a constant? If so, "fold" the constant
+ // by setting 'lhs' to a ConcreteInt and try again.
+ if (lhsType->isIntegerType())
+ if (const llvm::APSInt *Constant = state->getSymVal(Sym)) {
+ // The symbol evaluates to a constant. If necessary, promote the
+ // folded constant (LHS) to the result type.
+ const llvm::APSInt &lhs_I = BasicVals.Convert(conversionType,
+ *Constant);
+ lhs = nonloc::ConcreteInt(lhs_I);
+
+ // Also promote the RHS (if necessary).
+
+ // For shifts, it is not necessary to promote the RHS.
+ if (BinaryOperator::isShiftOp(op))
+ continue;
+
+ // Other operators: do an implicit conversion. This shouldn't be
+ // necessary once we support truncation/extension of symbolic values.
+ if (nonloc::ConcreteInt *rhs_I = dyn_cast<nonloc::ConcreteInt>(&rhs)){
+ rhs = nonloc::ConcreteInt(BasicVals.Convert(conversionType,
+ rhs_I->getValue()));
+ }
+
+ continue;
+ }
+
+ // Is the RHS a symbol we can simplify?
+ if (const nonloc::SymbolVal *srhs = dyn_cast<nonloc::SymbolVal>(&rhs)) {
+ SymbolRef RSym = srhs->getSymbol();
+ if (RSym->getType(Context)->isIntegerType()) {
+ if (const llvm::APSInt *Constant = state->getSymVal(RSym)) {
+ // The symbol evaluates to a constant.
+ const llvm::APSInt &rhs_I = BasicVals.Convert(conversionType,
+ *Constant);
+ rhs = nonloc::ConcreteInt(rhs_I);
+ }
+ }
+ }
+
+ if (isa<nonloc::ConcreteInt>(rhs)) {
+ return MakeSymIntVal(slhs->getSymbol(), op,
+ cast<nonloc::ConcreteInt>(rhs).getValue(),
+ resultTy);
+ }
+
+ return makeGenericVal(state, op, lhs, rhs, resultTy);
+ }
+ }
+ }
+ }
+}
+
+// FIXME: all this logic will change if/when we have MemRegion::getLocation().
+SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
+ BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs,
+ QualType resultTy) {
+ // Only comparisons and subtractions are valid operations on two pointers.
+ // See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15].
+ // However, if a pointer is casted to an integer, evalBinOpNN may end up
+ // calling this function with another operation (PR7527). We don't attempt to
+ // model this for now, but it could be useful, particularly when the
+ // "location" is actually an integer value that's been passed through a void*.
+ if (!(BinaryOperator::isComparisonOp(op) || op == BO_Sub))
+ return UnknownVal();
+
+ // Special cases for when both sides are identical.
+ if (lhs == rhs) {
+ switch (op) {
+ default:
+ llvm_unreachable("Unimplemented operation for two identical values");
+ case BO_Sub:
+ return makeZeroVal(resultTy);
+ case BO_EQ:
+ case BO_LE:
+ case BO_GE:
+ return makeTruthVal(true, resultTy);
+ case BO_NE:
+ case BO_LT:
+ case BO_GT:
+ return makeTruthVal(false, resultTy);
+ }
+ }
+
+ switch (lhs.getSubKind()) {
+ default:
+ llvm_unreachable("Ordering not implemented for this Loc.");
+
+ case loc::GotoLabelKind:
+ // The only thing we know about labels is that they're non-null.
+ if (rhs.isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BO_Sub:
+ return evalCastFromLoc(lhs, resultTy);
+ case BO_EQ:
+ case BO_LE:
+ case BO_LT:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ case BO_GT:
+ case BO_GE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+ // There may be two labels for the same location, and a function region may
+ // have the same address as a label at the start of the function (depending
+ // on the ABI).
+ // FIXME: we can probably do a comparison against other MemRegions, though.
+ // FIXME: is there a way to tell if two labels refer to the same location?
+ return UnknownVal();
+
+ case loc::ConcreteIntKind: {
+ // If one of the operands is a symbol and the other is a constant,
+ // build an expression for use by the constraint manager.
+ if (SymbolRef rSym = rhs.getAsLocSymbol()) {
+ // We can only build expressions with symbols on the left,
+ // so we need a reversible operator.
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+
+ const llvm::APSInt &lVal = cast<loc::ConcreteInt>(lhs).getValue();
+ return makeNonLoc(rSym, ReverseComparison(op), lVal, resultTy);
+ }
+
+ // If both operands are constants, just perform the operation.
+ if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) {
+ SVal ResultVal = cast<loc::ConcreteInt>(lhs).evalBinOp(BasicVals, op,
+ *rInt);
+ if (Loc *Result = dyn_cast<Loc>(&ResultVal))
+ return evalCastFromLoc(*Result, resultTy);
+ else
+ return UnknownVal();
+ }
+
+ // Special case comparisons against NULL.
+ // This must come after the test if the RHS is a symbol, which is used to
+ // build constraints. The address of any non-symbolic region is guaranteed
+ // to be non-NULL, as is any label.
+ assert(isa<loc::MemRegionVal>(rhs) || isa<loc::GotoLabel>(rhs));
+ if (lhs.isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BO_EQ:
+ case BO_GT:
+ case BO_GE:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ case BO_LT:
+ case BO_LE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+
+ // Comparing an arbitrary integer to a region or label address is
+ // completely unknowable.
+ return UnknownVal();
+ }
+ case loc::MemRegionKind: {
+ if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) {
+ // If one of the operands is a symbol and the other is a constant,
+ // build an expression for use by the constraint manager.
+ if (SymbolRef lSym = lhs.getAsLocSymbol())
+ return MakeSymIntVal(lSym, op, rInt->getValue(), resultTy);
+
+ // Special case comparisons to NULL.
+ // This must come after the test if the LHS is a symbol, which is used to
+ // build constraints. The address of any non-symbolic region is guaranteed
+ // to be non-NULL.
+ if (rInt->isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BO_Sub:
+ return evalCastFromLoc(lhs, resultTy);
+ case BO_EQ:
+ case BO_LT:
+ case BO_LE:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ case BO_GT:
+ case BO_GE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+
+ // Comparing a region to an arbitrary integer is completely unknowable.
+ return UnknownVal();
+ }
+
+ // Get both values as regions, if possible.
+ const MemRegion *LeftMR = lhs.getAsRegion();
+ assert(LeftMR && "MemRegionKind SVal doesn't have a region!");
+
+ const MemRegion *RightMR = rhs.getAsRegion();
+ if (!RightMR)
+ // The RHS is probably a label, which in theory could address a region.
+ // FIXME: we can probably make a more useful statement about non-code
+ // regions, though.
+ return UnknownVal();
+
+ // If both values wrap regions, see if they're from different base regions.
+ const MemRegion *LeftBase = LeftMR->getBaseRegion();
+ const MemRegion *RightBase = RightMR->getBaseRegion();
+ if (LeftBase != RightBase &&
+ !isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) {
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BO_EQ:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+
+ // The two regions are from the same base region. See if they're both a
+ // type of region we know how to compare.
+ const MemSpaceRegion *LeftMS = LeftBase->getMemorySpace();
+ const MemSpaceRegion *RightMS = RightBase->getMemorySpace();
+
+ // Heuristic: assume that no symbolic region (whose memory space is
+ // unknown) is on the stack.
+ // FIXME: we should be able to be more precise once we can do better
+ // aliasing constraints for symbolic regions, but this is a reasonable,
+ // albeit unsound, assumption that holds most of the time.
+ if (isa<StackSpaceRegion>(LeftMS) ^ isa<StackSpaceRegion>(RightMS)) {
+ switch (op) {
+ default:
+ break;
+ case BO_EQ:
+ return makeTruthVal(false, resultTy);
+ case BO_NE:
+ return makeTruthVal(true, resultTy);
+ }
+ }
+
+ // FIXME: If/when there is a getAsRawOffset() for FieldRegions, this
+ // ElementRegion path and the FieldRegion path below should be unified.
+ if (const ElementRegion *LeftER = dyn_cast<ElementRegion>(LeftMR)) {
+ // First see if the right region is also an ElementRegion.
+ const ElementRegion *RightER = dyn_cast<ElementRegion>(RightMR);
+ if (!RightER)
+ return UnknownVal();
+
+ // Next, see if the two ERs have the same super-region and matching types.
+ // FIXME: This should do something useful even if the types don't match,
+ // though if both indexes are constant the RegionRawOffset path will
+ // give the correct answer.
+ if (LeftER->getSuperRegion() == RightER->getSuperRegion() &&
+ LeftER->getElementType() == RightER->getElementType()) {
+ // Get the left index and cast it to the correct type.
+ // If the index is unknown or undefined, bail out here.
+ SVal LeftIndexVal = LeftER->getIndex();
+ NonLoc *LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal);
+ if (!LeftIndex)
+ return UnknownVal();
+ LeftIndexVal = evalCastFromNonLoc(*LeftIndex, resultTy);
+ LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal);
+ if (!LeftIndex)
+ return UnknownVal();
+
+ // Do the same for the right index.
+ SVal RightIndexVal = RightER->getIndex();
+ NonLoc *RightIndex = dyn_cast<NonLoc>(&RightIndexVal);
+ if (!RightIndex)
+ return UnknownVal();
+ RightIndexVal = evalCastFromNonLoc(*RightIndex, resultTy);
+ RightIndex = dyn_cast<NonLoc>(&RightIndexVal);
+ if (!RightIndex)
+ return UnknownVal();
+
+ // Actually perform the operation.
+ // evalBinOpNN expects the two indexes to already be the right type.
+ return evalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy);
+ }
+
+ // If the element indexes aren't comparable, see if the raw offsets are.
+ RegionRawOffset LeftOffset = LeftER->getAsArrayOffset();
+ RegionRawOffset RightOffset = RightER->getAsArrayOffset();
+
+ if (LeftOffset.getRegion() != NULL &&
+ LeftOffset.getRegion() == RightOffset.getRegion()) {
+ CharUnits left = LeftOffset.getOffset();
+ CharUnits right = RightOffset.getOffset();
+
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BO_LT:
+ return makeTruthVal(left < right, resultTy);
+ case BO_GT:
+ return makeTruthVal(left > right, resultTy);
+ case BO_LE:
+ return makeTruthVal(left <= right, resultTy);
+ case BO_GE:
+ return makeTruthVal(left >= right, resultTy);
+ case BO_EQ:
+ return makeTruthVal(left == right, resultTy);
+ case BO_NE:
+ return makeTruthVal(left != right, resultTy);
+ }
+ }
+
+ // If we get here, we have no way of comparing the ElementRegions.
+ return UnknownVal();
+ }
+
+ // See if both regions are fields of the same structure.
+ // FIXME: This doesn't handle nesting, inheritance, or Objective-C ivars.
+ if (const FieldRegion *LeftFR = dyn_cast<FieldRegion>(LeftMR)) {
+ // Only comparisons are meaningful here!
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+
+ // First see if the right region is also a FieldRegion.
+ const FieldRegion *RightFR = dyn_cast<FieldRegion>(RightMR);
+ if (!RightFR)
+ return UnknownVal();
+
+ // Next, see if the two FRs have the same super-region.
+ // FIXME: This doesn't handle casts yet, and simply stripping the casts
+ // doesn't help.
+ if (LeftFR->getSuperRegion() != RightFR->getSuperRegion())
+ return UnknownVal();
+
+ const FieldDecl *LeftFD = LeftFR->getDecl();
+ const FieldDecl *RightFD = RightFR->getDecl();
+ const RecordDecl *RD = LeftFD->getParent();
+
+ // Make sure the two FRs are from the same kind of record. Just in case!
+ // FIXME: This is probably where inheritance would be a problem.
+ if (RD != RightFD->getParent())
+ return UnknownVal();
+
+ // We know for sure that the two fields are not the same, since that
+ // would have given us the same SVal.
+ if (op == BO_EQ)
+ return makeTruthVal(false, resultTy);
+ if (op == BO_NE)
+ return makeTruthVal(true, resultTy);
+
+ // Iterate through the fields and see which one comes first.
+ // [C99 6.7.2.1.13] "Within a structure object, the non-bit-field
+ // members and the units in which bit-fields reside have addresses that
+ // increase in the order in which they are declared."
+ bool leftFirst = (op == BO_LT || op == BO_LE);
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I!=E; ++I) {
+ if (*I == LeftFD)
+ return makeTruthVal(leftFirst, resultTy);
+ if (*I == RightFD)
+ return makeTruthVal(!leftFirst, resultTy);
+ }
+
+ llvm_unreachable("Fields not found in parent record's definition");
+ }
+
+ // If we get here, we have no way of comparing the regions.
+ return UnknownVal();
+ }
+ }
+}
+
+SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
+ BinaryOperator::Opcode op,
+ Loc lhs, NonLoc rhs, QualType resultTy) {
+
+ // Special case: rhs is a zero constant.
+ if (rhs.isZeroConstant())
+ return lhs;
+
+ // Special case: 'rhs' is an integer that has the same width as a pointer and
+ // we are using the integer location in a comparison. Normally this cannot be
+ // triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
+ // can generate comparisons that trigger this code.
+ // FIXME: Are all locations guaranteed to have pointer width?
+ if (BinaryOperator::isComparisonOp(op)) {
+ if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) {
+ const llvm::APSInt *x = &rhsInt->getValue();
+ ASTContext &ctx = Context;
+ if (ctx.getTypeSize(ctx.VoidPtrTy) == x->getBitWidth()) {
+ // Convert the signedness of the integer (if necessary).
+ if (x->isSigned())
+ x = &getBasicValueFactory().getValue(*x, true);
+
+ return evalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy);
+ }
+ }
+ }
+
+ // We are dealing with pointer arithmetic.
+
+ // Handle pointer arithmetic on constant values.
+ if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) {
+ if (loc::ConcreteInt *lhsInt = dyn_cast<loc::ConcreteInt>(&lhs)) {
+ const llvm::APSInt &leftI = lhsInt->getValue();
+ assert(leftI.isUnsigned());
+ llvm::APSInt rightI(rhsInt->getValue(), /* isUnsigned */ true);
+
+ // Convert the bitwidth of rightI. This should deal with overflow
+ // since we are dealing with concrete values.
+ rightI = rightI.extOrTrunc(leftI.getBitWidth());
+
+ // Offset the increment by the pointer size.
+ llvm::APSInt Multiplicand(rightI.getBitWidth(), /* isUnsigned */ true);
+ rightI *= Multiplicand;
+
+ // Compute the adjusted pointer.
+ switch (op) {
+ case BO_Add:
+ rightI = leftI + rightI;
+ break;
+ case BO_Sub:
+ rightI = leftI - rightI;
+ break;
+ default:
+ llvm_unreachable("Invalid pointer arithmetic operation");
+ }
+ return loc::ConcreteInt(getBasicValueFactory().getValue(rightI));
+ }
+ }
+
+ // Handle cases where 'lhs' is a region.
+ if (const MemRegion *region = lhs.getAsRegion()) {
+ rhs = cast<NonLoc>(convertToArrayIndex(rhs));
+ SVal index = UnknownVal();
+ const MemRegion *superR = 0;
+ QualType elementType;
+
+ if (const ElementRegion *elemReg = dyn_cast<ElementRegion>(region)) {
+ assert(op == BO_Add || op == BO_Sub);
+ index = evalBinOpNN(state, op, elemReg->getIndex(), rhs,
+ getArrayIndexType());
+ superR = elemReg->getSuperRegion();
+ elementType = elemReg->getElementType();
+ }
+ else if (isa<SubRegion>(region)) {
+ superR = region;
+ index = rhs;
+ if (const PointerType *PT = resultTy->getAs<PointerType>()) {
+ elementType = PT->getPointeeType();
+ }
+ else {
+ const ObjCObjectPointerType *OT =
+ resultTy->getAs<ObjCObjectPointerType>();
+ elementType = OT->getPointeeType();
+ }
+ }
+
+ if (NonLoc *indexV = dyn_cast<NonLoc>(&index)) {
+ return loc::MemRegionVal(MemMgr.getElementRegion(elementType, *indexV,
+ superR, getContext()));
+ }
+ }
+ return UnknownVal();
+}
+
+const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
+ SVal V) {
+ if (V.isUnknownOrUndef())
+ return NULL;
+
+ if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
+ return &X->getValue();
+
+ if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
+ return &X->getValue();
+
+ if (SymbolRef Sym = V.getAsSymbol())
+ return state->getSymVal(Sym);
+
+ // FIXME: Add support for SymExprs.
+ return NULL;
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
new file mode 100644
index 0000000..11748ae
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -0,0 +1,362 @@
+//== Store.cpp - Interface for maps from Locations to Values ----*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defined the types Store and StoreManager.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/AST/CharUnits.h"
+#include "clang/AST/DeclObjC.h"
+
+using namespace clang;
+using namespace ento;
+
+StoreManager::StoreManager(ProgramStateManager &stateMgr)
+ : svalBuilder(stateMgr.getSValBuilder()), StateMgr(stateMgr),
+ MRMgr(svalBuilder.getRegionManager()), Ctx(stateMgr.getContext()) {}
+
+StoreRef StoreManager::enterStackFrame(ProgramStateRef state,
+ const LocationContext *callerCtx,
+ const StackFrameContext *calleeCtx) {
+ return StoreRef(state->getStore(), *this);
+}
+
+const MemRegion *StoreManager::MakeElementRegion(const MemRegion *Base,
+ QualType EleTy, uint64_t index) {
+ NonLoc idx = svalBuilder.makeArrayIndex(index);
+ return MRMgr.getElementRegion(EleTy, idx, Base, svalBuilder.getContext());
+}
+
+// FIXME: Merge with the implementation of the same method in MemRegion.cpp
+static bool IsCompleteType(ASTContext &Ctx, QualType Ty) {
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ const RecordDecl *D = RT->getDecl();
+ if (!D->getDefinition())
+ return false;
+ }
+
+ return true;
+}
+
+StoreRef StoreManager::BindDefault(Store store, const MemRegion *R, SVal V) {
+ return StoreRef(store, *this);
+}
+
+const ElementRegion *StoreManager::GetElementZeroRegion(const MemRegion *R,
+ QualType T) {
+ NonLoc idx = svalBuilder.makeZeroArrayIndex();
+ assert(!T.isNull());
+ return MRMgr.getElementRegion(T, idx, R, Ctx);
+}
+
+const MemRegion *StoreManager::castRegion(const MemRegion *R, QualType CastToTy) {
+
+ ASTContext &Ctx = StateMgr.getContext();
+
+ // Handle casts to Objective-C objects.
+ if (CastToTy->isObjCObjectPointerType())
+ return R->StripCasts();
+
+ if (CastToTy->isBlockPointerType()) {
+ // FIXME: We may need different solutions, depending on the symbol
+ // involved. Blocks can be casted to/from 'id', as they can be treated
+ // as Objective-C objects. This could possibly be handled by enhancing
+ // our reasoning of downcasts of symbolic objects.
+ if (isa<CodeTextRegion>(R) || isa<SymbolicRegion>(R))
+ return R;
+
+ // We don't know what to make of it. Return a NULL region, which
+ // will be interpretted as UnknownVal.
+ return NULL;
+ }
+
+ // Now assume we are casting from pointer to pointer. Other cases should
+ // already be handled.
+ QualType PointeeTy = CastToTy->getPointeeType();
+ QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+
+ // Handle casts to void*. We just pass the region through.
+ if (CanonPointeeTy.getLocalUnqualifiedType() == Ctx.VoidTy)
+ return R;
+
+ // Handle casts from compatible types.
+ if (R->isBoundable())
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(R)) {
+ QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
+ if (CanonPointeeTy == ObjTy)
+ return R;
+ }
+
+ // Process region cast according to the kind of the region being cast.
+ switch (R->getKind()) {
+ case MemRegion::CXXThisRegionKind:
+ case MemRegion::GenericMemSpaceRegionKind:
+ case MemRegion::StackLocalsSpaceRegionKind:
+ case MemRegion::StackArgumentsSpaceRegionKind:
+ case MemRegion::HeapSpaceRegionKind:
+ case MemRegion::UnknownSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind:
+ case MemRegion::GlobalInternalSpaceRegionKind:
+ case MemRegion::GlobalSystemSpaceRegionKind:
+ case MemRegion::GlobalImmutableSpaceRegionKind: {
+ llvm_unreachable("Invalid region cast");
+ }
+
+ case MemRegion::FunctionTextRegionKind:
+ case MemRegion::BlockTextRegionKind:
+ case MemRegion::BlockDataRegionKind:
+ case MemRegion::StringRegionKind:
+ // FIXME: Need to handle arbitrary downcasts.
+ case MemRegion::SymbolicRegionKind:
+ case MemRegion::AllocaRegionKind:
+ case MemRegion::CompoundLiteralRegionKind:
+ case MemRegion::FieldRegionKind:
+ case MemRegion::ObjCIvarRegionKind:
+ case MemRegion::ObjCStringRegionKind:
+ case MemRegion::VarRegionKind:
+ case MemRegion::CXXTempObjectRegionKind:
+ case MemRegion::CXXBaseObjectRegionKind:
+ return MakeElementRegion(R, PointeeTy);
+
+ case MemRegion::ElementRegionKind: {
+ // If we are casting from an ElementRegion to another type, the
+ // algorithm is as follows:
+ //
+ // (1) Compute the "raw offset" of the ElementRegion from the
+ // base region. This is done by calling 'getAsRawOffset()'.
+ //
+ // (2a) If we get a 'RegionRawOffset' after calling
+ // 'getAsRawOffset()', determine if the absolute offset
+ // can be exactly divided into chunks of the size of the
+ // casted-pointee type. If so, create a new ElementRegion with
+ // the pointee-cast type as the new ElementType and the index
+ // being the offset divded by the chunk size. If not, create
+ // a new ElementRegion at offset 0 off the raw offset region.
+ //
+ // (2b) If we don't a get a 'RegionRawOffset' after calling
+ // 'getAsRawOffset()', it means that we are at offset 0.
+ //
+ // FIXME: Handle symbolic raw offsets.
+
+ const ElementRegion *elementR = cast<ElementRegion>(R);
+ const RegionRawOffset &rawOff = elementR->getAsArrayOffset();
+ const MemRegion *baseR = rawOff.getRegion();
+
+ // If we cannot compute a raw offset, throw up our hands and return
+ // a NULL MemRegion*.
+ if (!baseR)
+ return NULL;
+
+ CharUnits off = rawOff.getOffset();
+
+ if (off.isZero()) {
+ // Edge case: we are at 0 bytes off the beginning of baseR. We
+ // check to see if type we are casting to is the same as the base
+ // region. If so, just return the base region.
+ if (const TypedValueRegion *TR = dyn_cast<TypedValueRegion>(baseR)) {
+ QualType ObjTy = Ctx.getCanonicalType(TR->getValueType());
+ QualType CanonPointeeTy = Ctx.getCanonicalType(PointeeTy);
+ if (CanonPointeeTy == ObjTy)
+ return baseR;
+ }
+
+ // Otherwise, create a new ElementRegion at offset 0.
+ return MakeElementRegion(baseR, PointeeTy);
+ }
+
+ // We have a non-zero offset from the base region. We want to determine
+ // if the offset can be evenly divided by sizeof(PointeeTy). If so,
+ // we create an ElementRegion whose index is that value. Otherwise, we
+ // create two ElementRegions, one that reflects a raw offset and the other
+ // that reflects the cast.
+
+ // Compute the index for the new ElementRegion.
+ int64_t newIndex = 0;
+ const MemRegion *newSuperR = 0;
+
+ // We can only compute sizeof(PointeeTy) if it is a complete type.
+ if (IsCompleteType(Ctx, PointeeTy)) {
+ // Compute the size in **bytes**.
+ CharUnits pointeeTySize = Ctx.getTypeSizeInChars(PointeeTy);
+ if (!pointeeTySize.isZero()) {
+ // Is the offset a multiple of the size? If so, we can layer the
+ // ElementRegion (with elementType == PointeeTy) directly on top of
+ // the base region.
+ if (off % pointeeTySize == 0) {
+ newIndex = off / pointeeTySize;
+ newSuperR = baseR;
+ }
+ }
+ }
+
+ if (!newSuperR) {
+ // Create an intermediate ElementRegion to represent the raw byte.
+ // This will be the super region of the final ElementRegion.
+ newSuperR = MakeElementRegion(baseR, Ctx.CharTy, off.getQuantity());
+ }
+
+ return MakeElementRegion(newSuperR, PointeeTy, newIndex);
+ }
+ }
+
+ llvm_unreachable("unreachable");
+}
+
+
+/// CastRetrievedVal - Used by subclasses of StoreManager to implement
+/// implicit casts that arise from loads from regions that are reinterpreted
+/// as another region.
+SVal StoreManager::CastRetrievedVal(SVal V, const TypedValueRegion *R,
+ QualType castTy, bool performTestOnly) {
+
+ if (castTy.isNull() || V.isUnknownOrUndef())
+ return V;
+
+ ASTContext &Ctx = svalBuilder.getContext();
+
+ if (performTestOnly) {
+ // Automatically translate references to pointers.
+ QualType T = R->getValueType();
+ if (const ReferenceType *RT = T->getAs<ReferenceType>())
+ T = Ctx.getPointerType(RT->getPointeeType());
+
+ assert(svalBuilder.getContext().hasSameUnqualifiedType(castTy, T));
+ return V;
+ }
+
+ return svalBuilder.dispatchCast(V, castTy);
+}
+
+SVal StoreManager::getLValueFieldOrIvar(const Decl *D, SVal Base) {
+ if (Base.isUnknownOrUndef())
+ return Base;
+
+ Loc BaseL = cast<Loc>(Base);
+ const MemRegion* BaseR = 0;
+
+ switch (BaseL.getSubKind()) {
+ case loc::MemRegionKind:
+ BaseR = cast<loc::MemRegionVal>(BaseL).getRegion();
+ break;
+
+ case loc::GotoLabelKind:
+ // These are anormal cases. Flag an undefined value.
+ return UndefinedVal();
+
+ case loc::ConcreteIntKind:
+ // While these seem funny, this can happen through casts.
+ // FIXME: What we should return is the field offset. For example,
+ // add the field offset to the integer value. That way funny things
+ // like this work properly: &(((struct foo *) 0xa)->f)
+ return Base;
+
+ default:
+ llvm_unreachable("Unhandled Base.");
+ }
+
+ // NOTE: We must have this check first because ObjCIvarDecl is a subclass
+ // of FieldDecl.
+ if (const ObjCIvarDecl *ID = dyn_cast<ObjCIvarDecl>(D))
+ return loc::MemRegionVal(MRMgr.getObjCIvarRegion(ID, BaseR));
+
+ return loc::MemRegionVal(MRMgr.getFieldRegion(cast<FieldDecl>(D), BaseR));
+}
+
+SVal StoreManager::getLValueIvar(const ObjCIvarDecl *decl, SVal base) {
+ return getLValueFieldOrIvar(decl, base);
+}
+
+SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
+ SVal Base) {
+
+ // If the base is an unknown or undefined value, just return it back.
+ // FIXME: For absolute pointer addresses, we just return that value back as
+ // well, although in reality we should return the offset added to that
+ // value.
+ if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base))
+ return Base;
+
+ const MemRegion* BaseRegion = cast<loc::MemRegionVal>(Base).getRegion();
+
+ // Pointer of any type can be cast and used as array base.
+ const ElementRegion *ElemR = dyn_cast<ElementRegion>(BaseRegion);
+
+ // Convert the offset to the appropriate size and signedness.
+ Offset = cast<NonLoc>(svalBuilder.convertToArrayIndex(Offset));
+
+ if (!ElemR) {
+ //
+ // If the base region is not an ElementRegion, create one.
+ // This can happen in the following example:
+ //
+ // char *p = __builtin_alloc(10);
+ // p[1] = 8;
+ //
+ // Observe that 'p' binds to an AllocaRegion.
+ //
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
+ BaseRegion, Ctx));
+ }
+
+ SVal BaseIdx = ElemR->getIndex();
+
+ if (!isa<nonloc::ConcreteInt>(BaseIdx))
+ return UnknownVal();
+
+ const llvm::APSInt& BaseIdxI = cast<nonloc::ConcreteInt>(BaseIdx).getValue();
+
+ // Only allow non-integer offsets if the base region has no offset itself.
+ // FIXME: This is a somewhat arbitrary restriction. We should be using
+ // SValBuilder here to add the two offsets without checking their types.
+ if (!isa<nonloc::ConcreteInt>(Offset)) {
+ if (isa<ElementRegion>(BaseRegion->StripCasts()))
+ return UnknownVal();
+
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, Offset,
+ ElemR->getSuperRegion(),
+ Ctx));
+ }
+
+ const llvm::APSInt& OffI = cast<nonloc::ConcreteInt>(Offset).getValue();
+ assert(BaseIdxI.isSigned());
+
+ // Compute the new index.
+ nonloc::ConcreteInt NewIdx(svalBuilder.getBasicValueFactory().getValue(BaseIdxI +
+ OffI));
+
+ // Construct the new ElementRegion.
+ const MemRegion *ArrayR = ElemR->getSuperRegion();
+ return loc::MemRegionVal(MRMgr.getElementRegion(elementType, NewIdx, ArrayR,
+ Ctx));
+}
+
+StoreManager::BindingsHandler::~BindingsHandler() {}
+
+bool StoreManager::FindUniqueBinding::HandleBinding(StoreManager& SMgr,
+ Store store,
+ const MemRegion* R,
+ SVal val) {
+ SymbolRef SymV = val.getAsLocSymbol();
+ if (!SymV || SymV != Sym)
+ return true;
+
+ if (Binding) {
+ First = false;
+ return false;
+ }
+ else
+ Binding = R;
+
+ return true;
+}
+
+void SubRegionMap::anchor() { }
+void SubRegionMap::Visitor::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
new file mode 100644
index 0000000..350f4b8
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SubEngine.cpp
@@ -0,0 +1,14 @@
+//== SubEngine.cpp - Interface of the subengine of CoreEngine ------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SubEngine.h"
+
+using namespace clang::ento;
+
+void SubEngine::anchor() { }
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
new file mode 100644
index 0000000..adefb58
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -0,0 +1,540 @@
+//== SymbolManager.h - Management of Symbolic Values ------------*- C++ -*--==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines SymbolManager, a class that manages symbolic values
+// created for use by ExprEngine and related classes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/SymbolManager.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/MemRegion.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/Store.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+using namespace ento;
+
+void SymExpr::anchor() { }
+
+void SymExpr::dump() const {
+ dumpToStream(llvm::errs());
+}
+
+static void print(raw_ostream &os, BinaryOperator::Opcode Op) {
+ switch (Op) {
+ default:
+ llvm_unreachable("operator printing not implemented");
+ case BO_Mul: os << '*' ; break;
+ case BO_Div: os << '/' ; break;
+ case BO_Rem: os << '%' ; break;
+ case BO_Add: os << '+' ; break;
+ case BO_Sub: os << '-' ; break;
+ case BO_Shl: os << "<<" ; break;
+ case BO_Shr: os << ">>" ; break;
+ case BO_LT: os << "<" ; break;
+ case BO_GT: os << '>' ; break;
+ case BO_LE: os << "<=" ; break;
+ case BO_GE: os << ">=" ; break;
+ case BO_EQ: os << "==" ; break;
+ case BO_NE: os << "!=" ; break;
+ case BO_And: os << '&' ; break;
+ case BO_Xor: os << '^' ; break;
+ case BO_Or: os << '|' ; break;
+ }
+}
+
+void SymIntExpr::dumpToStream(raw_ostream &os) const {
+ os << '(';
+ getLHS()->dumpToStream(os);
+ os << ") ";
+ print(os, getOpcode());
+ os << ' ' << getRHS().getZExtValue();
+ if (getRHS().isUnsigned()) os << 'U';
+}
+
+void IntSymExpr::dumpToStream(raw_ostream &os) const {
+ os << ' ' << getLHS().getZExtValue();
+ if (getLHS().isUnsigned()) os << 'U';
+ print(os, getOpcode());
+ os << '(';
+ getRHS()->dumpToStream(os);
+ os << ") ";
+}
+
+void SymSymExpr::dumpToStream(raw_ostream &os) const {
+ os << '(';
+ getLHS()->dumpToStream(os);
+ os << ") ";
+ os << '(';
+ getRHS()->dumpToStream(os);
+ os << ')';
+}
+
+void SymbolCast::dumpToStream(raw_ostream &os) const {
+ os << '(' << ToTy.getAsString() << ") (";
+ Operand->dumpToStream(os);
+ os << ')';
+}
+
+void SymbolConjured::dumpToStream(raw_ostream &os) const {
+ os << "conj_$" << getSymbolID() << '{' << T.getAsString() << '}';
+}
+
+void SymbolDerived::dumpToStream(raw_ostream &os) const {
+ os << "derived_$" << getSymbolID() << '{'
+ << getParentSymbol() << ',' << getRegion() << '}';
+}
+
+void SymbolExtent::dumpToStream(raw_ostream &os) const {
+ os << "extent_$" << getSymbolID() << '{' << getRegion() << '}';
+}
+
+void SymbolMetadata::dumpToStream(raw_ostream &os) const {
+ os << "meta_$" << getSymbolID() << '{'
+ << getRegion() << ',' << T.getAsString() << '}';
+}
+
+void SymbolData::anchor() { }
+
+void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
+ os << "reg_$" << getSymbolID() << "<" << R << ">";
+}
+
+bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const {
+ return itr == X.itr;
+}
+
+bool SymExpr::symbol_iterator::operator!=(const symbol_iterator &X) const {
+ return itr != X.itr;
+}
+
+SymExpr::symbol_iterator::symbol_iterator(const SymExpr *SE) {
+ itr.push_back(SE);
+ while (!isa<SymbolData>(itr.back())) expand();
+}
+
+SymExpr::symbol_iterator &SymExpr::symbol_iterator::operator++() {
+ assert(!itr.empty() && "attempting to iterate on an 'end' iterator");
+ assert(isa<SymbolData>(itr.back()));
+ itr.pop_back();
+ if (!itr.empty())
+ while (!isa<SymbolData>(itr.back())) expand();
+ return *this;
+}
+
+SymbolRef SymExpr::symbol_iterator::operator*() {
+ assert(!itr.empty() && "attempting to dereference an 'end' iterator");
+ return cast<SymbolData>(itr.back());
+}
+
+void SymExpr::symbol_iterator::expand() {
+ const SymExpr *SE = itr.back();
+ itr.pop_back();
+
+ switch (SE->getKind()) {
+ case SymExpr::RegionValueKind:
+ case SymExpr::ConjuredKind:
+ case SymExpr::DerivedKind:
+ case SymExpr::ExtentKind:
+ case SymExpr::MetadataKind:
+ return;
+ case SymExpr::CastSymbolKind:
+ itr.push_back(cast<SymbolCast>(SE)->getOperand());
+ return;
+ case SymExpr::SymIntKind:
+ itr.push_back(cast<SymIntExpr>(SE)->getLHS());
+ return;
+ case SymExpr::IntSymKind:
+ itr.push_back(cast<IntSymExpr>(SE)->getRHS());
+ return;
+ case SymExpr::SymSymKind: {
+ const SymSymExpr *x = cast<SymSymExpr>(SE);
+ itr.push_back(x->getLHS());
+ itr.push_back(x->getRHS());
+ return;
+ }
+ }
+ llvm_unreachable("unhandled expansion case");
+}
+
+const SymbolRegionValue*
+SymbolManager::getRegionValueSymbol(const TypedValueRegion* R) {
+ llvm::FoldingSetNodeID profile;
+ SymbolRegionValue::Profile(profile, R);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
+ new (SD) SymbolRegionValue(SymbolCounter, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolRegionValue>(SD);
+}
+
+const SymbolConjured*
+SymbolManager::getConjuredSymbol(const Stmt *E, const LocationContext *LCtx,
+ QualType T, unsigned Count,
+ const void *SymbolTag) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolConjured::Profile(profile, E, T, Count, LCtx, SymbolTag);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolConjured>();
+ new (SD) SymbolConjured(SymbolCounter, E, LCtx, T, Count, SymbolTag);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolConjured>(SD);
+}
+
+const SymbolDerived*
+SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
+ const TypedValueRegion *R) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolDerived::Profile(profile, parentSymbol, R);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolDerived>();
+ new (SD) SymbolDerived(SymbolCounter, parentSymbol, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolDerived>(SD);
+}
+
+const SymbolExtent*
+SymbolManager::getExtentSymbol(const SubRegion *R) {
+ llvm::FoldingSetNodeID profile;
+ SymbolExtent::Profile(profile, R);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>();
+ new (SD) SymbolExtent(SymbolCounter, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolExtent>(SD);
+}
+
+const SymbolMetadata*
+SymbolManager::getMetadataSymbol(const MemRegion* R, const Stmt *S, QualType T,
+ unsigned Count, const void *SymbolTag) {
+
+ llvm::FoldingSetNodeID profile;
+ SymbolMetadata::Profile(profile, R, S, T, Count, SymbolTag);
+ void *InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolMetadata>();
+ new (SD) SymbolMetadata(SymbolCounter, R, S, T, Count, SymbolTag);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolMetadata>(SD);
+}
+
+const SymbolCast*
+SymbolManager::getCastSymbol(const SymExpr *Op,
+ QualType From, QualType To) {
+ llvm::FoldingSetNodeID ID;
+ SymbolCast::Profile(ID, Op, From, To);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+ if (!data) {
+ data = (SymbolCast*) BPAlloc.Allocate<SymbolCast>();
+ new (data) SymbolCast(Op, From, To);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymbolCast>(data);
+}
+
+const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& v,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ SymIntExpr::Profile(ID, lhs, op, v, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (SymIntExpr*) BPAlloc.Allocate<SymIntExpr>();
+ new (data) SymIntExpr(lhs, op, v, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymIntExpr>(data);
+}
+
+const IntSymExpr *SymbolManager::getIntSymExpr(const llvm::APSInt& lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ IntSymExpr::Profile(ID, lhs, op, rhs, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (IntSymExpr*) BPAlloc.Allocate<IntSymExpr>();
+ new (data) IntSymExpr(lhs, op, rhs, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<IntSymExpr>(data);
+}
+
+const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs,
+ QualType t) {
+ llvm::FoldingSetNodeID ID;
+ SymSymExpr::Profile(ID, lhs, op, rhs, t);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!data) {
+ data = (SymSymExpr*) BPAlloc.Allocate<SymSymExpr>();
+ new (data) SymSymExpr(lhs, op, rhs, t);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<SymSymExpr>(data);
+}
+
+QualType SymbolConjured::getType(ASTContext&) const {
+ return T;
+}
+
+QualType SymbolDerived::getType(ASTContext &Ctx) const {
+ return R->getValueType();
+}
+
+QualType SymbolExtent::getType(ASTContext &Ctx) const {
+ return Ctx.getSizeType();
+}
+
+QualType SymbolMetadata::getType(ASTContext&) const {
+ return T;
+}
+
+QualType SymbolRegionValue::getType(ASTContext &C) const {
+ return R->getValueType();
+}
+
+SymbolManager::~SymbolManager() {
+ for (SymbolDependTy::const_iterator I = SymbolDependencies.begin(),
+ E = SymbolDependencies.end(); I != E; ++I) {
+ delete I->second;
+ }
+
+}
+
+bool SymbolManager::canSymbolicate(QualType T) {
+ T = T.getCanonicalType();
+
+ if (Loc::isLocType(T))
+ return true;
+
+ if (T->isIntegerType())
+ return T->isScalarType();
+
+ if (T->isRecordType() && !T->isUnionType())
+ return true;
+
+ return false;
+}
+
+void SymbolManager::addSymbolDependency(const SymbolRef Primary,
+ const SymbolRef Dependent) {
+ SymbolDependTy::iterator I = SymbolDependencies.find(Primary);
+ SymbolRefSmallVectorTy *dependencies = 0;
+ if (I == SymbolDependencies.end()) {
+ dependencies = new SymbolRefSmallVectorTy();
+ SymbolDependencies[Primary] = dependencies;
+ } else {
+ dependencies = I->second;
+ }
+ dependencies->push_back(Dependent);
+}
+
+const SymbolRefSmallVectorTy *SymbolManager::getDependentSymbols(
+ const SymbolRef Primary) {
+ SymbolDependTy::const_iterator I = SymbolDependencies.find(Primary);
+ if (I == SymbolDependencies.end())
+ return 0;
+ return I->second;
+}
+
+void SymbolReaper::markDependentsLive(SymbolRef sym) {
+ // Do not mark dependents more then once.
+ SymbolMapTy::iterator LI = TheLiving.find(sym);
+ assert(LI != TheLiving.end() && "The primary symbol is not live.");
+ if (LI->second == HaveMarkedDependents)
+ return;
+ LI->second = HaveMarkedDependents;
+
+ if (const SymbolRefSmallVectorTy *Deps = SymMgr.getDependentSymbols(sym)) {
+ for (SymbolRefSmallVectorTy::const_iterator I = Deps->begin(),
+ E = Deps->end(); I != E; ++I) {
+ if (TheLiving.find(*I) != TheLiving.end())
+ continue;
+ markLive(*I);
+ }
+ }
+}
+
+void SymbolReaper::markLive(SymbolRef sym) {
+ TheLiving[sym] = NotProcessed;
+ TheDead.erase(sym);
+ markDependentsLive(sym);
+}
+
+void SymbolReaper::markLive(const MemRegion *region) {
+ RegionRoots.insert(region);
+}
+
+void SymbolReaper::markInUse(SymbolRef sym) {
+ if (isa<SymbolMetadata>(sym))
+ MetadataInUse.insert(sym);
+}
+
+bool SymbolReaper::maybeDead(SymbolRef sym) {
+ if (isLive(sym))
+ return false;
+
+ TheDead.insert(sym);
+ return true;
+}
+
+bool SymbolReaper::isLiveRegion(const MemRegion *MR) {
+ if (RegionRoots.count(MR))
+ return true;
+
+ MR = MR->getBaseRegion();
+
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(MR))
+ return isLive(SR->getSymbol());
+
+ if (const VarRegion *VR = dyn_cast<VarRegion>(MR))
+ return isLive(VR, true);
+
+ // FIXME: This is a gross over-approximation. What we really need is a way to
+ // tell if anything still refers to this region. Unlike SymbolicRegions,
+ // AllocaRegions don't have associated symbols, though, so we don't actually
+ // have a way to track their liveness.
+ if (isa<AllocaRegion>(MR))
+ return true;
+
+ if (isa<CXXThisRegion>(MR))
+ return true;
+
+ if (isa<MemSpaceRegion>(MR))
+ return true;
+
+ return false;
+}
+
+bool SymbolReaper::isLive(SymbolRef sym) {
+ if (TheLiving.count(sym)) {
+ markDependentsLive(sym);
+ return true;
+ }
+
+ if (const SymbolDerived *derived = dyn_cast<SymbolDerived>(sym)) {
+ if (isLive(derived->getParentSymbol())) {
+ markLive(sym);
+ return true;
+ }
+ return false;
+ }
+
+ if (const SymbolExtent *extent = dyn_cast<SymbolExtent>(sym)) {
+ if (isLiveRegion(extent->getRegion())) {
+ markLive(sym);
+ return true;
+ }
+ return false;
+ }
+
+ if (const SymbolMetadata *metadata = dyn_cast<SymbolMetadata>(sym)) {
+ if (MetadataInUse.count(sym)) {
+ if (isLiveRegion(metadata->getRegion())) {
+ markLive(sym);
+ MetadataInUse.erase(sym);
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Interogate the symbol. It may derive from an input value to
+ // the analyzed function/method.
+ return isa<SymbolRegionValue>(sym);
+}
+
+bool
+SymbolReaper::isLive(const Stmt *ExprVal, const LocationContext *ELCtx) const {
+ if (LCtx != ELCtx) {
+ // If the reaper's location context is a parent of the expression's
+ // location context, then the expression value is now "out of scope".
+ if (LCtx->isParentOf(ELCtx))
+ return false;
+ return true;
+ }
+
+ return LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, ExprVal);
+}
+
+bool SymbolReaper::isLive(const VarRegion *VR, bool includeStoreBindings) const{
+ const StackFrameContext *VarContext = VR->getStackFrame();
+ const StackFrameContext *CurrentContext = LCtx->getCurrentStackFrame();
+
+ if (VarContext == CurrentContext) {
+ if (LCtx->getAnalysis<RelaxedLiveVariables>()->isLive(Loc, VR->getDecl()))
+ return true;
+
+ if (!includeStoreBindings)
+ return false;
+
+ unsigned &cachedQuery =
+ const_cast<SymbolReaper*>(this)->includedRegionCache[VR];
+
+ if (cachedQuery) {
+ return cachedQuery == 1;
+ }
+
+ // Query the store to see if the region occurs in any live bindings.
+ if (Store store = reapedStore.getStore()) {
+ bool hasRegion =
+ reapedStore.getStoreManager().includedInBindings(store, VR);
+ cachedQuery = hasRegion ? 1 : 2;
+ return hasRegion;
+ }
+
+ return false;
+ }
+
+ return VarContext->isParentOf(CurrentContext);
+}
+
+SymbolVisitor::~SymbolVisitor() {}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
new file mode 100644
index 0000000..fe912df
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Core/TextPathDiagnostics.cpp
@@ -0,0 +1,69 @@
+//===--- TextPathDiagnostics.cpp - Text Diagnostics for Paths ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the TextPathDiagnostics object.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace clang;
+using namespace ento;
+using namespace llvm;
+
+namespace {
+
+/// \brief Simple path diagnostic client used for outputting as diagnostic notes
+/// the sequence of events.
+class TextPathDiagnostics : public PathDiagnosticConsumer {
+ const std::string OutputFile;
+ DiagnosticsEngine &Diag;
+
+public:
+ TextPathDiagnostics(const std::string& output, DiagnosticsEngine &diag)
+ : OutputFile(output), Diag(diag) {}
+
+ void FlushDiagnosticsImpl(std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade);
+
+ virtual StringRef getName() const {
+ return "TextPathDiagnostics";
+ }
+
+ PathGenerationScheme getGenerationScheme() const { return Minimal; }
+ bool supportsLogicalOpControlFlow() const { return true; }
+ bool supportsAllBlockEdges() const { return true; }
+ virtual bool useVerboseDescription() const { return true; }
+};
+
+} // end anonymous namespace
+
+PathDiagnosticConsumer*
+ento::createTextPathDiagnosticConsumer(const std::string& out,
+ const Preprocessor &PP) {
+ return new TextPathDiagnostics(out, PP.getDiagnostics());
+}
+
+void TextPathDiagnostics::FlushDiagnosticsImpl(
+ std::vector<const PathDiagnostic *> &Diags,
+ SmallVectorImpl<std::string> *FilesMade) {
+ for (std::vector<const PathDiagnostic *>::iterator it = Diags.begin(),
+ et = Diags.end(); it != et; ++it) {
+ const PathDiagnostic *D = *it;
+ for (PathPieces::const_iterator I = D->path.begin(), E = D->path.end();
+ I != E; ++I) {
+ unsigned diagID =
+ Diag.getDiagnosticIDs()->getCustomDiagID(DiagnosticIDs::Note,
+ (*I)->getString());
+ Diag.Report((*I)->getLocation().asLocation(), diagID);
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
new file mode 100644
index 0000000..c19ebcb
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -0,0 +1,681 @@
+//===--- AnalysisConsumer.cpp - ASTConsumer for running Analyses ----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// "Meta" ASTConsumer for running different source analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#define DEBUG_TYPE "AnalysisConsumer"
+
+#include "AnalysisConsumer.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ParentMap.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/CallGraph.h"
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Checkers/LocalCheckers.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/PathDiagnostic.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/AnalysisManager.h"
+#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ExprEngine.h"
+#include "clang/StaticAnalyzer/Core/PathDiagnosticConsumers.h"
+
+#include "clang/Basic/FileManager.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/Lex/Preprocessor.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/ADT/DepthFirstIterator.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/Statistic.h"
+
+#include <queue>
+
+using namespace clang;
+using namespace ento;
+using llvm::SmallPtrSet;
+
+static ExplodedNode::Auditor* CreateUbiViz();
+
+STATISTIC(NumFunctionTopLevel, "The # of functions at top level.");
+STATISTIC(NumFunctionsAnalyzed, "The # of functions analysed (as top level).");
+STATISTIC(NumBlocksInAnalyzedFunctions,
+ "The # of basic blocks in the analyzed functions.");
+STATISTIC(PercentReachableBlocks, "The % of reachable basic blocks.");
+
+//===----------------------------------------------------------------------===//
+// Special PathDiagnosticConsumers.
+//===----------------------------------------------------------------------===//
+
+static PathDiagnosticConsumer*
+createPlistHTMLDiagnosticConsumer(const std::string& prefix,
+ const Preprocessor &PP) {
+ PathDiagnosticConsumer *PD =
+ createHTMLDiagnosticConsumer(llvm::sys::path::parent_path(prefix), PP);
+ return createPlistDiagnosticConsumer(prefix, PP, PD);
+}
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer declaration.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class AnalysisConsumer : public ASTConsumer,
+ public RecursiveASTVisitor<AnalysisConsumer> {
+ enum AnalysisMode {
+ ANALYSIS_SYNTAX,
+ ANALYSIS_PATH,
+ ANALYSIS_ALL
+ };
+
+ /// Mode of the analyzes while recursively visiting Decls.
+ AnalysisMode RecVisitorMode;
+ /// Bug Reporter to use while recursively visiting Decls.
+ BugReporter *RecVisitorBR;
+
+public:
+ ASTContext *Ctx;
+ const Preprocessor &PP;
+ const std::string OutDir;
+ AnalyzerOptions Opts;
+ ArrayRef<std::string> Plugins;
+
+ /// \brief Stores the declarations from the local translation unit.
+ /// Note, we pre-compute the local declarations at parse time as an
+ /// optimization to make sure we do not deserialize everything from disk.
+ /// The local declaration to all declarations ratio might be very small when
+ /// working with a PCH file.
+ SetOfDecls LocalTUDecls;
+
+ // PD is owned by AnalysisManager.
+ PathDiagnosticConsumer *PD;
+
+ StoreManagerCreator CreateStoreMgr;
+ ConstraintManagerCreator CreateConstraintMgr;
+
+ OwningPtr<CheckerManager> checkerMgr;
+ OwningPtr<AnalysisManager> Mgr;
+
+ /// Time the analyzes time of each translation unit.
+ static llvm::Timer* TUTotalTimer;
+
+ /// The information about analyzed functions shared throughout the
+ /// translation unit.
+ FunctionSummariesTy FunctionSummaries;
+
+ AnalysisConsumer(const Preprocessor& pp,
+ const std::string& outdir,
+ const AnalyzerOptions& opts,
+ ArrayRef<std::string> plugins)
+ : RecVisitorMode(ANALYSIS_ALL), RecVisitorBR(0),
+ Ctx(0), PP(pp), OutDir(outdir), Opts(opts), Plugins(plugins), PD(0) {
+ DigestAnalyzerOptions();
+ if (Opts.PrintStats) {
+ llvm::EnableStatistics();
+ TUTotalTimer = new llvm::Timer("Analyzer Total Time");
+ }
+ }
+
+ ~AnalysisConsumer() {
+ if (Opts.PrintStats)
+ delete TUTotalTimer;
+ }
+
+ void DigestAnalyzerOptions() {
+ // Create the PathDiagnosticConsumer.
+ if (!OutDir.empty()) {
+ switch (Opts.AnalysisDiagOpt) {
+ default:
+#define ANALYSIS_DIAGNOSTICS(NAME, CMDFLAG, DESC, CREATEFN, AUTOCREATE) \
+ case PD_##NAME: PD = CREATEFN(OutDir, PP); break;
+#include "clang/Frontend/Analyses.def"
+ }
+ } else if (Opts.AnalysisDiagOpt == PD_TEXT) {
+ // Create the text client even without a specified output file since
+ // it just uses diagnostic notes.
+ PD = createTextPathDiagnosticConsumer("", PP);
+ }
+
+ // Create the analyzer component creators.
+ switch (Opts.AnalysisStoreOpt) {
+ default:
+ llvm_unreachable("Unknown store manager.");
+#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \
+ case NAME##Model: CreateStoreMgr = CREATEFN; break;
+#include "clang/Frontend/Analyses.def"
+ }
+
+ switch (Opts.AnalysisConstraintsOpt) {
+ default:
+ llvm_unreachable("Unknown store manager.");
+#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATEFN) \
+ case NAME##Model: CreateConstraintMgr = CREATEFN; break;
+#include "clang/Frontend/Analyses.def"
+ }
+ }
+
+ void DisplayFunction(const Decl *D, AnalysisMode Mode) {
+ if (!Opts.AnalyzerDisplayProgress)
+ return;
+
+ SourceManager &SM = Mgr->getASTContext().getSourceManager();
+ PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
+ if (Loc.isValid()) {
+ llvm::errs() << "ANALYZE";
+ switch (Mode) {
+ case ANALYSIS_SYNTAX: llvm::errs() << "(Syntax)"; break;
+ case ANALYSIS_PATH: llvm::errs() << "(Path Sensitive)"; break;
+ case ANALYSIS_ALL: break;
+ };
+ llvm::errs() << ": " << Loc.getFilename();
+ if (isa<FunctionDecl>(D) || isa<ObjCMethodDecl>(D)) {
+ const NamedDecl *ND = cast<NamedDecl>(D);
+ llvm::errs() << ' ' << *ND << '\n';
+ }
+ else if (isa<BlockDecl>(D)) {
+ llvm::errs() << ' ' << "block(line:" << Loc.getLine() << ",col:"
+ << Loc.getColumn() << '\n';
+ }
+ else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ Selector S = MD->getSelector();
+ llvm::errs() << ' ' << S.getAsString();
+ }
+ }
+ }
+
+ virtual void Initialize(ASTContext &Context) {
+ Ctx = &Context;
+ checkerMgr.reset(createCheckerManager(Opts, PP.getLangOpts(), Plugins,
+ PP.getDiagnostics()));
+ Mgr.reset(new AnalysisManager(*Ctx, PP.getDiagnostics(),
+ PP.getLangOpts(), PD,
+ CreateStoreMgr, CreateConstraintMgr,
+ checkerMgr.get(),
+ /* Indexer */ 0,
+ Opts.MaxNodes, Opts.MaxLoop,
+ Opts.VisualizeEGDot, Opts.VisualizeEGUbi,
+ Opts.AnalysisPurgeOpt, Opts.EagerlyAssume,
+ Opts.TrimGraph,
+ Opts.UnoptimizedCFG, Opts.CFGAddImplicitDtors,
+ Opts.CFGAddInitializers,
+ Opts.EagerlyTrimEGraph,
+ Opts.IPAMode,
+ Opts.InlineMaxStackDepth,
+ Opts.InlineMaxFunctionSize,
+ Opts.InliningMode,
+ Opts.NoRetryExhausted));
+ }
+
+ /// \brief Store the top level decls in the set to be processed later on.
+ /// (Doing this pre-processing avoids deserialization of data from PCH.)
+ virtual bool HandleTopLevelDecl(DeclGroupRef D);
+ virtual void HandleTopLevelDeclInObjCContainer(DeclGroupRef D);
+
+ virtual void HandleTranslationUnit(ASTContext &C);
+
+ /// \brief Build the call graph for all the top level decls of this TU and
+ /// use it to define the order in which the functions should be visited.
+ void HandleDeclsGallGraph();
+
+ /// \brief Run analyzes(syntax or path sensitive) on the given function.
+ /// \param Mode - determines if we are requesting syntax only or path
+ /// sensitive only analysis.
+ /// \param VisitedCallees - The output parameter, which is populated with the
+ /// set of functions which should be considered analyzed after analyzing the
+ /// given root function.
+ void HandleCode(Decl *D, AnalysisMode Mode,
+ SetOfConstDecls *VisitedCallees = 0);
+
+ void RunPathSensitiveChecks(Decl *D, SetOfConstDecls *VisitedCallees);
+ void ActionExprEngine(Decl *D, bool ObjCGCEnabled,
+ SetOfConstDecls *VisitedCallees);
+
+ /// Visitors for the RecursiveASTVisitor.
+
+ /// Handle callbacks for arbitrary Decls.
+ bool VisitDecl(Decl *D) {
+ checkerMgr->runCheckersOnASTDecl(D, *Mgr, *RecVisitorBR);
+ return true;
+ }
+
+ bool VisitFunctionDecl(FunctionDecl *FD) {
+ IdentifierInfo *II = FD->getIdentifier();
+ if (II && II->getName().startswith("__inline"))
+ return true;
+
+ // We skip function template definitions, as their semantics is
+ // only determined when they are instantiated.
+ if (FD->isThisDeclarationADefinition() &&
+ !FD->isDependentContext()) {
+ HandleCode(FD, RecVisitorMode);
+ }
+ return true;
+ }
+
+ bool VisitObjCMethodDecl(ObjCMethodDecl *MD) {
+ checkerMgr->runCheckersOnASTDecl(MD, *Mgr, *RecVisitorBR);
+ if (MD->isThisDeclarationADefinition())
+ HandleCode(MD, RecVisitorMode);
+ return true;
+ }
+
+private:
+ void storeTopLevelDecls(DeclGroupRef DG);
+
+ /// \brief Check if we should skip (not analyze) the given function.
+ bool skipFunction(Decl *D);
+
+};
+} // end anonymous namespace
+
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer implementation.
+//===----------------------------------------------------------------------===//
+llvm::Timer* AnalysisConsumer::TUTotalTimer = 0;
+
+bool AnalysisConsumer::HandleTopLevelDecl(DeclGroupRef DG) {
+ storeTopLevelDecls(DG);
+ return true;
+}
+
+void AnalysisConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
+ storeTopLevelDecls(DG);
+}
+
+void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
+ for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) {
+
+ // Skip ObjCMethodDecl, wait for the objc container to avoid
+ // analyzing twice.
+ if (isa<ObjCMethodDecl>(*I))
+ continue;
+
+ LocalTUDecls.insert(*I);
+ }
+}
+
+void AnalysisConsumer::HandleDeclsGallGraph() {
+ // Otherwise, use the Callgraph to derive the order.
+ // Build the Call Graph.
+ CallGraph CG;
+ // Add all the top level declarations to the graph.
+ for (SetOfDecls::iterator I = LocalTUDecls.begin(),
+ E = LocalTUDecls.end(); I != E; ++I)
+ CG.addToCallGraph(*I);
+
+ // Find the top level nodes - children of root + the unreachable (parentless)
+ // nodes.
+ llvm::SmallVector<CallGraphNode*, 24> TopLevelFunctions;
+ for (CallGraph::nodes_iterator TI = CG.parentless_begin(),
+ TE = CG.parentless_end(); TI != TE; ++TI) {
+ TopLevelFunctions.push_back(*TI);
+ NumFunctionTopLevel++;
+ }
+ CallGraphNode *Entry = CG.getRoot();
+ for (CallGraphNode::iterator I = Entry->begin(),
+ E = Entry->end(); I != E; ++I) {
+ TopLevelFunctions.push_back(*I);
+ NumFunctionTopLevel++;
+ }
+
+ // Make sure the nodes are sorted in order reverse of their definition in the
+ // translation unit. This step is very important for performance. It ensures
+ // that we analyze the root functions before the externally available
+ // subroutines.
+ std::queue<CallGraphNode*> BFSQueue;
+ for (llvm::SmallVector<CallGraphNode*, 24>::reverse_iterator
+ TI = TopLevelFunctions.rbegin(), TE = TopLevelFunctions.rend();
+ TI != TE; ++TI)
+ BFSQueue.push(*TI);
+
+ // BFS over all of the functions, while skipping the ones inlined into
+ // the previously processed functions. Use external Visited set, which is
+ // also modified when we inline a function.
+ SmallPtrSet<CallGraphNode*,24> Visited;
+ while(!BFSQueue.empty()) {
+ CallGraphNode *N = BFSQueue.front();
+ BFSQueue.pop();
+
+ // Skip the functions which have been processed already or previously
+ // inlined.
+ if (Visited.count(N))
+ continue;
+
+ // Analyze the function.
+ SetOfConstDecls VisitedCallees;
+ Decl *D = N->getDecl();
+ assert(D);
+ HandleCode(D, ANALYSIS_PATH,
+ (Mgr->InliningMode == All ? 0 : &VisitedCallees));
+
+ // Add the visited callees to the global visited set.
+ for (SetOfConstDecls::const_iterator I = VisitedCallees.begin(),
+ E = VisitedCallees.end(); I != E; ++I){
+ CallGraphNode *VN = CG.getNode(*I);
+ if (VN)
+ Visited.insert(VN);
+ }
+ Visited.insert(N);
+
+ // Push the children into the queue.
+ for (CallGraphNode::const_iterator CI = N->begin(),
+ CE = N->end(); CI != CE; ++CI) {
+ BFSQueue.push(*CI);
+ }
+ }
+}
+
+void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
+ // Don't run the actions if an error has occurred with parsing the file.
+ DiagnosticsEngine &Diags = PP.getDiagnostics();
+ if (Diags.hasErrorOccurred() || Diags.hasFatalErrorOccurred())
+ return;
+
+ {
+ if (TUTotalTimer) TUTotalTimer->startTimer();
+
+ // Introduce a scope to destroy BR before Mgr.
+ BugReporter BR(*Mgr);
+ TranslationUnitDecl *TU = C.getTranslationUnitDecl();
+ checkerMgr->runCheckersOnASTDecl(TU, *Mgr, BR);
+
+ // Run the AST-only checks using the order in which functions are defined.
+ // If inlining is not turned on, use the simplest function order for path
+ // sensitive analyzes as well.
+ RecVisitorMode = (Mgr->shouldInlineCall() ? ANALYSIS_SYNTAX : ANALYSIS_ALL);
+ RecVisitorBR = &BR;
+
+ // Process all the top level declarations.
+ for (SetOfDecls::iterator I = LocalTUDecls.begin(),
+ E = LocalTUDecls.end(); I != E; ++I)
+ TraverseDecl(*I);
+
+ if (Mgr->shouldInlineCall())
+ HandleDeclsGallGraph();
+
+ // After all decls handled, run checkers on the entire TranslationUnit.
+ checkerMgr->runCheckersOnEndOfTranslationUnit(TU, *Mgr, BR);
+
+ RecVisitorBR = 0;
+ }
+
+ // Explicitly destroy the PathDiagnosticConsumer. This will flush its output.
+ // FIXME: This should be replaced with something that doesn't rely on
+ // side-effects in PathDiagnosticConsumer's destructor. This is required when
+ // used with option -disable-free.
+ Mgr.reset(NULL);
+
+ if (TUTotalTimer) TUTotalTimer->stopTimer();
+
+ // Count how many basic blocks we have not covered.
+ NumBlocksInAnalyzedFunctions = FunctionSummaries.getTotalNumBasicBlocks();
+ if (NumBlocksInAnalyzedFunctions > 0)
+ PercentReachableBlocks =
+ (FunctionSummaries.getTotalNumVisitedBasicBlocks() * 100) /
+ NumBlocksInAnalyzedFunctions;
+
+}
+
+static void FindBlocks(DeclContext *D, SmallVectorImpl<Decl*> &WL) {
+ if (BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ WL.push_back(BD);
+
+ for (DeclContext::decl_iterator I = D->decls_begin(), E = D->decls_end();
+ I!=E; ++I)
+ if (DeclContext *DC = dyn_cast<DeclContext>(*I))
+ FindBlocks(DC, WL);
+}
+
+static std::string getFunctionName(const Decl *D) {
+ if (const ObjCMethodDecl *ID = dyn_cast<ObjCMethodDecl>(D)) {
+ return ID->getSelector().getAsString();
+ }
+ if (const FunctionDecl *ND = dyn_cast<FunctionDecl>(D)) {
+ IdentifierInfo *II = ND->getIdentifier();
+ if (II)
+ return II->getName();
+ }
+ return "";
+}
+
+bool AnalysisConsumer::skipFunction(Decl *D) {
+ if (!Opts.AnalyzeSpecificFunction.empty() &&
+ getFunctionName(D) != Opts.AnalyzeSpecificFunction)
+ return true;
+
+ // Don't run the actions on declarations in header files unless
+ // otherwise specified.
+ SourceManager &SM = Ctx->getSourceManager();
+ SourceLocation SL = SM.getExpansionLoc(D->getLocation());
+ if (!Opts.AnalyzeAll && !SM.isFromMainFile(SL))
+ return true;
+
+ return false;
+}
+
+void AnalysisConsumer::HandleCode(Decl *D, AnalysisMode Mode,
+ SetOfConstDecls *VisitedCallees) {
+ if (skipFunction(D))
+ return;
+
+ DisplayFunction(D, Mode);
+
+ // Clear the AnalysisManager of old AnalysisDeclContexts.
+ Mgr->ClearContexts();
+
+ // Dispatch on the actions.
+ SmallVector<Decl*, 10> WL;
+ WL.push_back(D);
+
+ if (D->hasBody() && Opts.AnalyzeNestedBlocks)
+ FindBlocks(cast<DeclContext>(D), WL);
+
+ BugReporter BR(*Mgr);
+ for (SmallVectorImpl<Decl*>::iterator WI=WL.begin(), WE=WL.end();
+ WI != WE; ++WI)
+ if ((*WI)->hasBody()) {
+ if (Mode != ANALYSIS_PATH)
+ checkerMgr->runCheckersOnASTBody(*WI, *Mgr, BR);
+ if (Mode != ANALYSIS_SYNTAX && checkerMgr->hasPathSensitiveCheckers()) {
+ RunPathSensitiveChecks(*WI, VisitedCallees);
+ NumFunctionsAnalyzed++;
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Path-sensitive checking.
+//===----------------------------------------------------------------------===//
+
+void AnalysisConsumer::ActionExprEngine(Decl *D, bool ObjCGCEnabled,
+ SetOfConstDecls *VisitedCallees) {
+ // Construct the analysis engine. First check if the CFG is valid.
+ // FIXME: Inter-procedural analysis will need to handle invalid CFGs.
+ if (!Mgr->getCFG(D))
+ return;
+
+ ExprEngine Eng(*Mgr, ObjCGCEnabled, VisitedCallees, &FunctionSummaries);
+
+ // Set the graph auditor.
+ OwningPtr<ExplodedNode::Auditor> Auditor;
+ if (Mgr->shouldVisualizeUbigraph()) {
+ Auditor.reset(CreateUbiViz());
+ ExplodedNode::SetAuditor(Auditor.get());
+ }
+
+ // Execute the worklist algorithm.
+ Eng.ExecuteWorkList(Mgr->getAnalysisDeclContextManager().getStackFrame(D, 0),
+ Mgr->getMaxNodes());
+
+ // Release the auditor (if any) so that it doesn't monitor the graph
+ // created BugReporter.
+ ExplodedNode::SetAuditor(0);
+
+ // Visualize the exploded graph.
+ if (Mgr->shouldVisualizeGraphviz())
+ Eng.ViewGraph(Mgr->shouldTrimGraph());
+
+ // Display warnings.
+ Eng.getBugReporter().FlushReports();
+}
+
+void AnalysisConsumer::RunPathSensitiveChecks(Decl *D,
+ SetOfConstDecls *Visited) {
+
+ switch (Mgr->getLangOpts().getGC()) {
+ case LangOptions::NonGC:
+ ActionExprEngine(D, false, Visited);
+ break;
+
+ case LangOptions::GCOnly:
+ ActionExprEngine(D, true, Visited);
+ break;
+
+ case LangOptions::HybridGC:
+ ActionExprEngine(D, false, Visited);
+ ActionExprEngine(D, true, Visited);
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// AnalysisConsumer creation.
+//===----------------------------------------------------------------------===//
+
+ASTConsumer* ento::CreateAnalysisConsumer(const Preprocessor& pp,
+ const std::string& outDir,
+ const AnalyzerOptions& opts,
+ ArrayRef<std::string> plugins) {
+ // Disable the effects of '-Werror' when using the AnalysisConsumer.
+ pp.getDiagnostics().setWarningsAsErrors(false);
+
+ return new AnalysisConsumer(pp, outDir, opts, plugins);
+}
+
+//===----------------------------------------------------------------------===//
+// Ubigraph Visualization. FIXME: Move to separate file.
+//===----------------------------------------------------------------------===//
+
+namespace {
+
+class UbigraphViz : public ExplodedNode::Auditor {
+ OwningPtr<raw_ostream> Out;
+ llvm::sys::Path Dir, Filename;
+ unsigned Cntr;
+
+ typedef llvm::DenseMap<void*,unsigned> VMap;
+ VMap M;
+
+public:
+ UbigraphViz(raw_ostream *out, llvm::sys::Path& dir,
+ llvm::sys::Path& filename);
+
+ ~UbigraphViz();
+
+ virtual void AddEdge(ExplodedNode *Src, ExplodedNode *Dst);
+};
+
+} // end anonymous namespace
+
+static ExplodedNode::Auditor* CreateUbiViz() {
+ std::string ErrMsg;
+
+ llvm::sys::Path Dir = llvm::sys::Path::GetTemporaryDirectory(&ErrMsg);
+ if (!ErrMsg.empty())
+ return 0;
+
+ llvm::sys::Path Filename = Dir;
+ Filename.appendComponent("llvm_ubi");
+ Filename.makeUnique(true,&ErrMsg);
+
+ if (!ErrMsg.empty())
+ return 0;
+
+ llvm::errs() << "Writing '" << Filename.str() << "'.\n";
+
+ OwningPtr<llvm::raw_fd_ostream> Stream;
+ Stream.reset(new llvm::raw_fd_ostream(Filename.c_str(), ErrMsg));
+
+ if (!ErrMsg.empty())
+ return 0;
+
+ return new UbigraphViz(Stream.take(), Dir, Filename);
+}
+
+void UbigraphViz::AddEdge(ExplodedNode *Src, ExplodedNode *Dst) {
+
+ assert (Src != Dst && "Self-edges are not allowed.");
+
+ // Lookup the Src. If it is a new node, it's a root.
+ VMap::iterator SrcI= M.find(Src);
+ unsigned SrcID;
+
+ if (SrcI == M.end()) {
+ M[Src] = SrcID = Cntr++;
+ *Out << "('vertex', " << SrcID << ", ('color','#00ff00'))\n";
+ }
+ else
+ SrcID = SrcI->second;
+
+ // Lookup the Dst.
+ VMap::iterator DstI= M.find(Dst);
+ unsigned DstID;
+
+ if (DstI == M.end()) {
+ M[Dst] = DstID = Cntr++;
+ *Out << "('vertex', " << DstID << ")\n";
+ }
+ else {
+ // We have hit DstID before. Change its style to reflect a cache hit.
+ DstID = DstI->second;
+ *Out << "('change_vertex_style', " << DstID << ", 1)\n";
+ }
+
+ // Add the edge.
+ *Out << "('edge', " << SrcID << ", " << DstID
+ << ", ('arrow','true'), ('oriented', 'true'))\n";
+}
+
+UbigraphViz::UbigraphViz(raw_ostream *out, llvm::sys::Path& dir,
+ llvm::sys::Path& filename)
+ : Out(out), Dir(dir), Filename(filename), Cntr(0) {
+
+ *Out << "('vertex_style_attribute', 0, ('shape', 'icosahedron'))\n";
+ *Out << "('vertex_style', 1, 0, ('shape', 'sphere'), ('color', '#ffcc66'),"
+ " ('size', '1.5'))\n";
+}
+
+UbigraphViz::~UbigraphViz() {
+ Out.reset(0);
+ llvm::errs() << "Running 'ubiviz' program... ";
+ std::string ErrMsg;
+ llvm::sys::Path Ubiviz = llvm::sys::Program::FindProgramByName("ubiviz");
+ std::vector<const char*> args;
+ args.push_back(Ubiviz.c_str());
+ args.push_back(Filename.c_str());
+ args.push_back(0);
+
+ if (llvm::sys::Program::ExecuteAndWait(Ubiviz, &args[0],0,0,0,0,&ErrMsg)) {
+ llvm::errs() << "Error viewing graph: " << ErrMsg << "\n";
+ }
+
+ // Delete the directory.
+ Dir.eraseFromDisk(true);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
new file mode 100644
index 0000000..5a16bff
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.h
@@ -0,0 +1,43 @@
+//===--- AnalysisConsumer.h - Front-end Analysis Engine Hooks ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header contains the functions necessary for a front-end to run various
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_GR_ANALYSISCONSUMER_H
+#define LLVM_CLANG_GR_ANALYSISCONSUMER_H
+
+#include "clang/Basic/LLVM.h"
+#include <string>
+
+namespace clang {
+
+class AnalyzerOptions;
+class ASTConsumer;
+class Preprocessor;
+class DiagnosticsEngine;
+
+namespace ento {
+class CheckerManager;
+
+/// CreateAnalysisConsumer - Creates an ASTConsumer to run various code
+/// analysis passes. (The set of analyses run is controlled by command-line
+/// options.)
+ASTConsumer* CreateAnalysisConsumer(const Preprocessor &pp,
+ const std::string &output,
+ const AnalyzerOptions& opts,
+ ArrayRef<std::string> plugins);
+
+} // end GR namespace
+
+} // end clang namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
new file mode 100644
index 0000000..c06da0d
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/CheckerRegistration.cpp
@@ -0,0 +1,133 @@
+//===--- CheckerRegistration.cpp - Registration for the Analyzer Checkers -===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the registration function for the analyzer checkers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/CheckerRegistration.h"
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "clang/StaticAnalyzer/Checkers/ClangCheckers.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/CheckerOptInfo.h"
+#include "clang/StaticAnalyzer/Core/CheckerRegistry.h"
+#include "clang/Frontend/AnalyzerOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/SmallVector.h"
+
+using namespace clang;
+using namespace ento;
+using llvm::sys::DynamicLibrary;
+
+namespace {
+class ClangCheckerRegistry : public CheckerRegistry {
+ typedef void (*RegisterCheckersFn)(CheckerRegistry &);
+
+ static bool isCompatibleAPIVersion(const char *versionString);
+ static void warnIncompatible(DiagnosticsEngine *diags, StringRef pluginPath,
+ const char *pluginAPIVersion);
+
+public:
+ ClangCheckerRegistry(ArrayRef<std::string> plugins,
+ DiagnosticsEngine *diags = 0);
+};
+
+} // end anonymous namespace
+
+ClangCheckerRegistry::ClangCheckerRegistry(ArrayRef<std::string> plugins,
+ DiagnosticsEngine *diags) {
+ registerBuiltinCheckers(*this);
+
+ for (ArrayRef<std::string>::iterator i = plugins.begin(), e = plugins.end();
+ i != e; ++i) {
+ // Get access to the plugin.
+ DynamicLibrary lib = DynamicLibrary::getPermanentLibrary(i->c_str());
+
+ // See if it's compatible with this build of clang.
+ const char *pluginAPIVersion =
+ (const char *) lib.getAddressOfSymbol("clang_analyzerAPIVersionString");
+ if (!isCompatibleAPIVersion(pluginAPIVersion)) {
+ warnIncompatible(diags, *i, pluginAPIVersion);
+ continue;
+ }
+
+ // Register its checkers.
+ RegisterCheckersFn registerPluginCheckers =
+ (RegisterCheckersFn) (intptr_t) lib.getAddressOfSymbol(
+ "clang_registerCheckers");
+ if (registerPluginCheckers)
+ registerPluginCheckers(*this);
+ }
+}
+
+bool ClangCheckerRegistry::isCompatibleAPIVersion(const char *versionString) {
+ // If the version string is null, it's not an analyzer plugin.
+ if (versionString == 0)
+ return false;
+
+ // For now, none of the static analyzer API is considered stable.
+ // Versions must match exactly.
+ if (strcmp(versionString, CLANG_ANALYZER_API_VERSION_STRING) == 0)
+ return true;
+
+ return false;
+}
+
+void ClangCheckerRegistry::warnIncompatible(DiagnosticsEngine *diags,
+ StringRef pluginPath,
+ const char *pluginAPIVersion) {
+ if (!diags)
+ return;
+ if (!pluginAPIVersion)
+ return;
+
+ diags->Report(diag::warn_incompatible_analyzer_plugin_api)
+ << llvm::sys::path::filename(pluginPath);
+ diags->Report(diag::note_incompatible_analyzer_plugin_api)
+ << CLANG_ANALYZER_API_VERSION_STRING
+ << pluginAPIVersion;
+}
+
+
+CheckerManager *ento::createCheckerManager(const AnalyzerOptions &opts,
+ const LangOptions &langOpts,
+ ArrayRef<std::string> plugins,
+ DiagnosticsEngine &diags) {
+ OwningPtr<CheckerManager> checkerMgr(new CheckerManager(langOpts));
+
+ SmallVector<CheckerOptInfo, 8> checkerOpts;
+ for (unsigned i = 0, e = opts.CheckersControlList.size(); i != e; ++i) {
+ const std::pair<std::string, bool> &opt = opts.CheckersControlList[i];
+ checkerOpts.push_back(CheckerOptInfo(opt.first.c_str(), opt.second));
+ }
+
+ ClangCheckerRegistry allCheckers(plugins, &diags);
+ allCheckers.initializeManager(*checkerMgr, checkerOpts);
+ checkerMgr->finishedCheckerRegistration();
+
+ for (unsigned i = 0, e = checkerOpts.size(); i != e; ++i) {
+ if (checkerOpts[i].isUnclaimed())
+ diags.Report(diag::warn_unknown_analyzer_checker)
+ << checkerOpts[i].getName();
+ }
+
+ return checkerMgr.take();
+}
+
+void ento::printCheckerHelp(raw_ostream &out, ArrayRef<std::string> plugins) {
+ out << "OVERVIEW: Clang Static Analyzer Checkers List\n\n";
+ out << "USAGE: -analyzer-checker <CHECKER or PACKAGE,...>\n\n";
+
+ ClangCheckerRegistry(plugins).printHelp(out);
+}
diff --git a/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
new file mode 100644
index 0000000..85a18ec
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/StaticAnalyzer/Frontend/FrontendActions.cpp
@@ -0,0 +1,23 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/StaticAnalyzer/Frontend/FrontendActions.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "AnalysisConsumer.h"
+using namespace clang;
+using namespace ento;
+
+ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI,
+ StringRef InFile) {
+ return CreateAnalysisConsumer(CI.getPreprocessor(),
+ CI.getFrontendOpts().OutputFile,
+ CI.getAnalyzerOpts(),
+ CI.getFrontendOpts().Plugins);
+}
+
diff --git a/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
new file mode 100644
index 0000000..eea1055
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/CompilationDatabase.cpp
@@ -0,0 +1,230 @@
+//===--- CompilationDatabase.cpp - ----------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains multiple implementations for CompilationDatabases.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/CompilationDatabase.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/JSONParser.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/system_error.h"
+
+namespace clang {
+namespace tooling {
+
+namespace {
+
+/// \brief A parser for JSON escaped strings of command line arguments.
+///
+/// Assumes \-escaping for quoted arguments (see the documentation of
+/// unescapeJSONCommandLine(...)).
+class CommandLineArgumentParser {
+ public:
+ CommandLineArgumentParser(StringRef CommandLine)
+ : Input(CommandLine), Position(Input.begin()-1) {}
+
+ std::vector<std::string> parse() {
+ bool HasMoreInput = true;
+ while (HasMoreInput && nextNonWhitespace()) {
+ std::string Argument;
+ HasMoreInput = parseStringInto(Argument);
+ CommandLine.push_back(Argument);
+ }
+ return CommandLine;
+ }
+
+ private:
+ // All private methods return true if there is more input available.
+
+ bool parseStringInto(std::string &String) {
+ do {
+ if (*Position == '"') {
+ if (!parseQuotedStringInto(String)) return false;
+ } else {
+ if (!parseFreeStringInto(String)) return false;
+ }
+ } while (*Position != ' ');
+ return true;
+ }
+
+ bool parseQuotedStringInto(std::string &String) {
+ if (!next()) return false;
+ while (*Position != '"') {
+ if (!skipEscapeCharacter()) return false;
+ String.push_back(*Position);
+ if (!next()) return false;
+ }
+ return next();
+ }
+
+ bool parseFreeStringInto(std::string &String) {
+ do {
+ if (!skipEscapeCharacter()) return false;
+ String.push_back(*Position);
+ if (!next()) return false;
+ } while (*Position != ' ' && *Position != '"');
+ return true;
+ }
+
+ bool skipEscapeCharacter() {
+ if (*Position == '\\') {
+ return next();
+ }
+ return true;
+ }
+
+ bool nextNonWhitespace() {
+ do {
+ if (!next()) return false;
+ } while (*Position == ' ');
+ return true;
+ }
+
+ bool next() {
+ ++Position;
+ if (Position == Input.end()) return false;
+ // Remove the JSON escaping first. This is done unconditionally.
+ if (*Position == '\\') ++Position;
+ return Position != Input.end();
+ }
+
+ const StringRef Input;
+ StringRef::iterator Position;
+ std::vector<std::string> CommandLine;
+};
+
+std::vector<std::string> unescapeJSONCommandLine(
+ StringRef JSONEscapedCommandLine) {
+ CommandLineArgumentParser parser(JSONEscapedCommandLine);
+ return parser.parse();
+}
+
+} // end namespace
+
+CompilationDatabase::~CompilationDatabase() {}
+
+CompilationDatabase *
+CompilationDatabase::loadFromDirectory(StringRef BuildDirectory,
+ std::string &ErrorMessage) {
+ llvm::SmallString<1024> JSONDatabasePath(BuildDirectory);
+ llvm::sys::path::append(JSONDatabasePath, "compile_commands.json");
+ llvm::OwningPtr<CompilationDatabase> Database(
+ JSONCompilationDatabase::loadFromFile(JSONDatabasePath, ErrorMessage));
+ if (!Database) {
+ return NULL;
+ }
+ return Database.take();
+}
+
+JSONCompilationDatabase *
+JSONCompilationDatabase::loadFromFile(StringRef FilePath,
+ std::string &ErrorMessage) {
+ llvm::OwningPtr<llvm::MemoryBuffer> DatabaseBuffer;
+ llvm::error_code Result =
+ llvm::MemoryBuffer::getFile(FilePath, DatabaseBuffer);
+ if (Result != 0) {
+ ErrorMessage = "Error while opening JSON database: " + Result.message();
+ return NULL;
+ }
+ llvm::OwningPtr<JSONCompilationDatabase> Database(
+ new JSONCompilationDatabase(DatabaseBuffer.take()));
+ if (!Database->parse(ErrorMessage))
+ return NULL;
+ return Database.take();
+}
+
+JSONCompilationDatabase *
+JSONCompilationDatabase::loadFromBuffer(StringRef DatabaseString,
+ std::string &ErrorMessage) {
+ llvm::OwningPtr<llvm::MemoryBuffer> DatabaseBuffer(
+ llvm::MemoryBuffer::getMemBuffer(DatabaseString));
+ llvm::OwningPtr<JSONCompilationDatabase> Database(
+ new JSONCompilationDatabase(DatabaseBuffer.take()));
+ if (!Database->parse(ErrorMessage))
+ return NULL;
+ return Database.take();
+}
+
+std::vector<CompileCommand>
+JSONCompilationDatabase::getCompileCommands(StringRef FilePath) const {
+ llvm::StringMap< std::vector<CompileCommandRef> >::const_iterator
+ CommandsRefI = IndexByFile.find(FilePath);
+ if (CommandsRefI == IndexByFile.end())
+ return std::vector<CompileCommand>();
+ const std::vector<CompileCommandRef> &CommandsRef = CommandsRefI->getValue();
+ std::vector<CompileCommand> Commands;
+ for (int I = 0, E = CommandsRef.size(); I != E; ++I) {
+ Commands.push_back(CompileCommand(
+ // FIXME: Escape correctly:
+ CommandsRef[I].first,
+ unescapeJSONCommandLine(CommandsRef[I].second)));
+ }
+ return Commands;
+}
+
+bool JSONCompilationDatabase::parse(std::string &ErrorMessage) {
+ llvm::SourceMgr SM;
+ llvm::JSONParser Parser(Database->getBuffer(), &SM);
+ llvm::JSONValue *Root = Parser.parseRoot();
+ if (Root == NULL) {
+ ErrorMessage = "Error while parsing JSON.";
+ return false;
+ }
+ llvm::JSONArray *Array = dyn_cast<llvm::JSONArray>(Root);
+ if (Array == NULL) {
+ ErrorMessage = "Expected array.";
+ return false;
+ }
+ for (llvm::JSONArray::const_iterator AI = Array->begin(), AE = Array->end();
+ AI != AE; ++AI) {
+ const llvm::JSONObject *Object = dyn_cast<llvm::JSONObject>(*AI);
+ if (Object == NULL) {
+ ErrorMessage = "Expected object.";
+ return false;
+ }
+ StringRef EntryDirectory;
+ StringRef EntryFile;
+ StringRef EntryCommand;
+ for (llvm::JSONObject::const_iterator KVI = Object->begin(),
+ KVE = Object->end();
+ KVI != KVE; ++KVI) {
+ const llvm::JSONValue *Value = (*KVI)->Value;
+ if (Value == NULL) {
+ ErrorMessage = "Expected value.";
+ return false;
+ }
+ const llvm::JSONString *ValueString =
+ dyn_cast<llvm::JSONString>(Value);
+ if (ValueString == NULL) {
+ ErrorMessage = "Expected string as value.";
+ return false;
+ }
+ if ((*KVI)->Key->getRawText() == "directory") {
+ EntryDirectory = ValueString->getRawText();
+ } else if ((*KVI)->Key->getRawText() == "file") {
+ EntryFile = ValueString->getRawText();
+ } else if ((*KVI)->Key->getRawText() == "command") {
+ EntryCommand = ValueString->getRawText();
+ } else {
+ ErrorMessage = (Twine("Unknown key: \"") +
+ (*KVI)->Key->getRawText() + "\"").str();
+ return false;
+ }
+ }
+ IndexByFile[EntryFile].push_back(
+ CompileCommandRef(EntryDirectory, EntryCommand));
+ }
+ return true;
+}
+
+} // end namespace tooling
+} // end namespace clang
+
diff --git a/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
new file mode 100644
index 0000000..fa2374f
--- /dev/null
+++ b/contrib/llvm/tools/clang/lib/Tooling/Tooling.cpp
@@ -0,0 +1,296 @@
+//===--- Tooling.cpp - Running clang standalone tools ---------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements functions to run clang tools standalone instead
+// of running them as a plugin.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Tooling.h"
+#include "clang/Tooling/CompilationDatabase.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Tool.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendAction.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/raw_ostream.h"
+
+namespace clang {
+namespace tooling {
+
+FrontendActionFactory::~FrontendActionFactory() {}
+
+// FIXME: This file contains structural duplication with other parts of the
+// code that sets up a compiler to run tools on it, and we should refactor
+// it to be based on the same framework.
+
+/// \brief Builds a clang driver initialized for running clang tools.
+static clang::driver::Driver *newDriver(clang::DiagnosticsEngine *Diagnostics,
+ const char *BinaryName) {
+ const std::string DefaultOutputName = "a.out";
+ clang::driver::Driver *CompilerDriver = new clang::driver::Driver(
+ BinaryName, llvm::sys::getDefaultTargetTriple(),
+ DefaultOutputName, false, *Diagnostics);
+ CompilerDriver->setTitle("clang_based_tool");
+ return CompilerDriver;
+}
+
+/// \brief Retrieves the clang CC1 specific flags out of the compilation's jobs.
+///
+/// Returns NULL on error.
+static const clang::driver::ArgStringList *getCC1Arguments(
+ clang::DiagnosticsEngine *Diagnostics,
+ clang::driver::Compilation *Compilation) {
+ // We expect to get back exactly one Command job, if we didn't something
+ // failed. Extract that job from the Compilation.
+ const clang::driver::JobList &Jobs = Compilation->getJobs();
+ if (Jobs.size() != 1 || !isa<clang::driver::Command>(*Jobs.begin())) {
+ llvm::SmallString<256> error_msg;
+ llvm::raw_svector_ostream error_stream(error_msg);
+ Compilation->PrintJob(error_stream, Compilation->getJobs(), "; ", true);
+ Diagnostics->Report(clang::diag::err_fe_expected_compiler_job)
+ << error_stream.str();
+ return NULL;
+ }
+
+ // The one job we find should be to invoke clang again.
+ const clang::driver::Command *Cmd =
+ cast<clang::driver::Command>(*Jobs.begin());
+ if (StringRef(Cmd->getCreator().getName()) != "clang") {
+ Diagnostics->Report(clang::diag::err_fe_expected_clang_command);
+ return NULL;
+ }
+
+ return &Cmd->getArguments();
+}
+
+/// \brief Returns a clang build invocation initialized from the CC1 flags.
+static clang::CompilerInvocation *newInvocation(
+ clang::DiagnosticsEngine *Diagnostics,
+ const clang::driver::ArgStringList &CC1Args) {
+ assert(!CC1Args.empty() && "Must at least contain the program name!");
+ clang::CompilerInvocation *Invocation = new clang::CompilerInvocation;
+ clang::CompilerInvocation::CreateFromArgs(
+ *Invocation, CC1Args.data() + 1, CC1Args.data() + CC1Args.size(),
+ *Diagnostics);
+ Invocation->getFrontendOpts().DisableFree = false;
+ return Invocation;
+}
+
+bool runToolOnCode(clang::FrontendAction *ToolAction, const Twine &Code,
+ const Twine &FileName) {
+ SmallString<16> FileNameStorage;
+ StringRef FileNameRef = FileName.toNullTerminatedStringRef(FileNameStorage);
+ const char *const CommandLine[] = {
+ "clang-tool", "-fsyntax-only", FileNameRef.data()
+ };
+ FileManager Files((FileSystemOptions()));
+ ToolInvocation Invocation(
+ std::vector<std::string>(
+ CommandLine,
+ CommandLine + llvm::array_lengthof(CommandLine)),
+ ToolAction, &Files);
+
+ SmallString<1024> CodeStorage;
+ Invocation.mapVirtualFile(FileNameRef,
+ Code.toNullTerminatedStringRef(CodeStorage));
+ return Invocation.run();
+}
+
+/// \brief Returns the absolute path of 'File', by prepending it with
+/// 'BaseDirectory' if 'File' is not absolute.
+///
+/// Otherwise returns 'File'.
+/// If 'File' starts with "./", the returned path will not contain the "./".
+/// Otherwise, the returned path will contain the literal path-concatenation of
+/// 'BaseDirectory' and 'File'.
+///
+/// \param File Either an absolute or relative path.
+/// \param BaseDirectory An absolute path.
+static std::string getAbsolutePath(
+ StringRef File, StringRef BaseDirectory) {
+ assert(llvm::sys::path::is_absolute(BaseDirectory));
+ if (llvm::sys::path::is_absolute(File)) {
+ return File;
+ }
+ StringRef RelativePath(File);
+ if (RelativePath.startswith("./")) {
+ RelativePath = RelativePath.substr(strlen("./"));
+ }
+ llvm::SmallString<1024> AbsolutePath(BaseDirectory);
+ llvm::sys::path::append(AbsolutePath, RelativePath);
+ return AbsolutePath.str();
+}
+
+ToolInvocation::ToolInvocation(
+ ArrayRef<std::string> CommandLine, FrontendAction *ToolAction,
+ FileManager *Files)
+ : CommandLine(CommandLine.vec()), ToolAction(ToolAction), Files(Files) {
+}
+
+void ToolInvocation::mapVirtualFile(StringRef FilePath, StringRef Content) {
+ MappedFileContents[FilePath] = Content;
+}
+
+bool ToolInvocation::run() {
+ std::vector<const char*> Argv;
+ for (int I = 0, E = CommandLine.size(); I != E; ++I)
+ Argv.push_back(CommandLine[I].c_str());
+ const char *const BinaryName = Argv[0];
+ DiagnosticOptions DefaultDiagnosticOptions;
+ TextDiagnosticPrinter DiagnosticPrinter(
+ llvm::errs(), DefaultDiagnosticOptions);
+ DiagnosticsEngine Diagnostics(llvm::IntrusiveRefCntPtr<clang::DiagnosticIDs>(
+ new DiagnosticIDs()), &DiagnosticPrinter, false);
+
+ const llvm::OwningPtr<clang::driver::Driver> Driver(
+ newDriver(&Diagnostics, BinaryName));
+ // Since the input might only be virtual, don't check whether it exists.
+ Driver->setCheckInputsExist(false);
+ const llvm::OwningPtr<clang::driver::Compilation> Compilation(
+ Driver->BuildCompilation(llvm::makeArrayRef(Argv)));
+ const clang::driver::ArgStringList *const CC1Args = getCC1Arguments(
+ &Diagnostics, Compilation.get());
+ if (CC1Args == NULL) {
+ return false;
+ }
+ llvm::OwningPtr<clang::CompilerInvocation> Invocation(
+ newInvocation(&Diagnostics, *CC1Args));
+ return runInvocation(BinaryName, Compilation.get(),
+ Invocation.take(), *CC1Args, ToolAction.take());
+}
+
+// Exists solely for the purpose of lookup of the resource path.
+static int StaticSymbol;
+
+bool ToolInvocation::runInvocation(
+ const char *BinaryName,
+ clang::driver::Compilation *Compilation,
+ clang::CompilerInvocation *Invocation,
+ const clang::driver::ArgStringList &CC1Args,
+ clang::FrontendAction *ToolAction) {
+ llvm::OwningPtr<clang::FrontendAction> ScopedToolAction(ToolAction);
+ // Show the invocation, with -v.
+ if (Invocation->getHeaderSearchOpts().Verbose) {
+ llvm::errs() << "clang Invocation:\n";
+ Compilation->PrintJob(llvm::errs(), Compilation->getJobs(), "\n", true);
+ llvm::errs() << "\n";
+ }
+
+ // Create a compiler instance to handle the actual work.
+ clang::CompilerInstance Compiler;
+ Compiler.setInvocation(Invocation);
+ Compiler.setFileManager(Files);
+ // FIXME: What about LangOpts?
+
+ // Create the compilers actual diagnostics engine.
+ Compiler.createDiagnostics(CC1Args.size(),
+ const_cast<char**>(CC1Args.data()));
+ if (!Compiler.hasDiagnostics())
+ return false;
+
+ Compiler.createSourceManager(*Files);
+ addFileMappingsTo(Compiler.getSourceManager());
+
+ // Infer the builtin include path if unspecified.
+ if (Compiler.getHeaderSearchOpts().UseBuiltinIncludes &&
+ Compiler.getHeaderSearchOpts().ResourceDir.empty()) {
+ // This just needs to be some symbol in the binary.
+ void *const SymbolAddr = &StaticSymbol;
+ Compiler.getHeaderSearchOpts().ResourceDir =
+ clang::CompilerInvocation::GetResourcesPath(BinaryName, SymbolAddr);
+ }
+
+ const bool Success = Compiler.ExecuteAction(*ToolAction);
+
+ Compiler.resetAndLeakFileManager();
+ return Success;
+}
+
+void ToolInvocation::addFileMappingsTo(SourceManager &Sources) {
+ for (llvm::StringMap<StringRef>::const_iterator
+ It = MappedFileContents.begin(), End = MappedFileContents.end();
+ It != End; ++It) {
+ // Inject the code as the given file name into the preprocessor options.
+ const llvm::MemoryBuffer *Input =
+ llvm::MemoryBuffer::getMemBuffer(It->getValue());
+ // FIXME: figure out what '0' stands for.
+ const FileEntry *FromFile = Files->getVirtualFile(
+ It->getKey(), Input->getBufferSize(), 0);
+ // FIXME: figure out memory management ('true').
+ Sources.overrideFileContents(FromFile, Input, true);
+ }
+}
+
+ClangTool::ClangTool(const CompilationDatabase &Compilations,
+ ArrayRef<std::string> SourcePaths)
+ : Files((FileSystemOptions())) {
+ llvm::SmallString<1024> BaseDirectory;
+ if (const char *PWD = ::getenv("PWD"))
+ BaseDirectory = PWD;
+ else
+ llvm::sys::fs::current_path(BaseDirectory);
+ for (unsigned I = 0, E = SourcePaths.size(); I != E; ++I) {
+ llvm::SmallString<1024> File(getAbsolutePath(
+ SourcePaths[I], BaseDirectory));
+
+ std::vector<CompileCommand> CompileCommands =
+ Compilations.getCompileCommands(File.str());
+ if (!CompileCommands.empty()) {
+ for (int I = 0, E = CompileCommands.size(); I != E; ++I) {
+ CompileCommand &Command = CompileCommands[I];
+ if (!Command.Directory.empty()) {
+ // FIXME: What should happen if CommandLine includes -working-directory
+ // as well?
+ Command.CommandLine.push_back(
+ "-working-directory=" + Command.Directory);
+ }
+ CommandLines.push_back(std::make_pair(File.str(), Command.CommandLine));
+ }
+ } else {
+ // FIXME: There are two use cases here: doing a fuzzy
+ // "find . -name '*.cc' |xargs tool" match, where as a user I don't care
+ // about the .cc files that were not found, and the use case where I
+ // specify all files I want to run over explicitly, where this should
+ // be an error. We'll want to add an option for this.
+ llvm::outs() << "Skipping " << File << ". Command line not found.\n";
+ }
+ }
+}
+
+void ClangTool::mapVirtualFile(StringRef FilePath, StringRef Content) {
+ MappedFileContents.push_back(std::make_pair(FilePath, Content));
+}
+
+int ClangTool::run(FrontendActionFactory *ActionFactory) {
+ bool ProcessingFailed = false;
+ for (unsigned I = 0; I < CommandLines.size(); ++I) {
+ std::string File = CommandLines[I].first;
+ std::vector<std::string> &CommandLine = CommandLines[I].second;
+ llvm::outs() << "Processing: " << File << ".\n";
+ ToolInvocation Invocation(CommandLine, ActionFactory->create(), &Files);
+ for (int I = 0, E = MappedFileContents.size(); I != E; ++I) {
+ Invocation.mapVirtualFile(MappedFileContents[I].first,
+ MappedFileContents[I].second);
+ }
+ if (!Invocation.run()) {
+ llvm::outs() << "Error while processing " << File << ".\n";
+ ProcessingFailed = true;
+ }
+ }
+ return ProcessingFailed ? 1 : 0;
+}
+
+} // end namespace tooling
+} // end namespace clang
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
new file mode 100644
index 0000000..a211090
--- /dev/null
+++ b/contrib/llvm/tools/clang/tools/driver/cc1_main.cpp
@@ -0,0 +1,189 @@
+//===-- cc1_main.cpp - Clang CC1 Compiler Frontend ------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the entry point to the clang -cc1 functionality, which implements the
+// core compiler functionality along with a number of additional tools for
+// demonstration and testing purposes.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/TextDiagnosticBuffer.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/FrontendTool/Utils.h"
+#include "llvm/ADT/Statistic.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/LinkAllPasses.h"
+#include <cstdio>
+using namespace clang;
+
+//===----------------------------------------------------------------------===//
+// Main driver
+//===----------------------------------------------------------------------===//
+
+static void LLVMErrorHandler(void *UserData, const std::string &Message) {
+ DiagnosticsEngine &Diags = *static_cast<DiagnosticsEngine*>(UserData);
+
+ Diags.Report(diag::err_fe_error_backend) << Message;
+
+ // We cannot recover from llvm errors.
+ exit(1);
+}
+
+// FIXME: Define the need for this testing away.
+static int cc1_test(DiagnosticsEngine &Diags,
+ const char **ArgBegin, const char **ArgEnd) {
+ using namespace clang::driver;
+
+ llvm::errs() << "cc1 argv:";
+ for (const char **i = ArgBegin; i != ArgEnd; ++i)
+ llvm::errs() << " \"" << *i << '"';
+ llvm::errs() << "\n";
+
+ // Parse the arguments.
+ OptTable *Opts = createCC1OptTable();
+ unsigned MissingArgIndex, MissingArgCount;
+ InputArgList *Args = Opts->ParseArgs(ArgBegin, ArgEnd,
+ MissingArgIndex, MissingArgCount);
+
+ // Check for missing argument error.
+ if (MissingArgCount)
+ Diags.Report(clang::diag::err_drv_missing_argument)
+ << Args->getArgString(MissingArgIndex) << MissingArgCount;
+
+ // Dump the parsed arguments.
+ llvm::errs() << "cc1 parsed options:\n";
+ for (ArgList::const_iterator it = Args->begin(), ie = Args->end();
+ it != ie; ++it)
+ (*it)->dump();
+
+ // Create a compiler invocation.
+ llvm::errs() << "cc1 creating invocation.\n";
+ CompilerInvocation Invocation;
+ if (!CompilerInvocation::CreateFromArgs(Invocation, ArgBegin, ArgEnd, Diags))
+ return 1;
+
+ // Convert the invocation back to argument strings.
+ std::vector<std::string> InvocationArgs;
+ Invocation.toArgs(InvocationArgs);
+
+ // Dump the converted arguments.
+ SmallVector<const char*, 32> Invocation2Args;
+ llvm::errs() << "invocation argv :";
+ for (unsigned i = 0, e = InvocationArgs.size(); i != e; ++i) {
+ Invocation2Args.push_back(InvocationArgs[i].c_str());
+ llvm::errs() << " \"" << InvocationArgs[i] << '"';
+ }
+ llvm::errs() << "\n";
+
+ // Convert those arguments to another invocation, and check that we got the
+ // same thing.
+ CompilerInvocation Invocation2;
+ if (!CompilerInvocation::CreateFromArgs(Invocation2, Invocation2Args.begin(),
+ Invocation2Args.end(), Diags))
+ return 1;
+
+ // FIXME: Implement CompilerInvocation comparison.
+ if (true) {
+ //llvm::errs() << "warning: Invocations differ!\n";
+
+ std::vector<std::string> Invocation2Args;
+ Invocation2.toArgs(Invocation2Args);
+ llvm::errs() << "invocation2 argv:";
+ for (unsigned i = 0, e = Invocation2Args.size(); i != e; ++i)
+ llvm::errs() << " \"" << Invocation2Args[i] << '"';
+ llvm::errs() << "\n";
+ }
+
+ return 0;
+}
+
+int cc1_main(const char **ArgBegin, const char **ArgEnd,
+ const char *Argv0, void *MainAddr) {
+ OwningPtr<CompilerInstance> Clang(new CompilerInstance());
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+
+ // Run clang -cc1 test.
+ if (ArgBegin != ArgEnd && StringRef(ArgBegin[0]) == "-cc1test") {
+ DiagnosticsEngine Diags(DiagID, new TextDiagnosticPrinter(llvm::errs(),
+ DiagnosticOptions()));
+ return cc1_test(Diags, ArgBegin + 1, ArgEnd);
+ }
+
+ // Initialize targets first, so that --version shows registered targets.
+ llvm::InitializeAllTargets();
+ llvm::InitializeAllTargetMCs();
+ llvm::InitializeAllAsmPrinters();
+ llvm::InitializeAllAsmParsers();
+
+ // Buffer diagnostics from argument parsing so that we can output them using a
+ // well formed diagnostic object.
+ TextDiagnosticBuffer *DiagsBuffer = new TextDiagnosticBuffer;
+ DiagnosticsEngine Diags(DiagID, DiagsBuffer);
+ bool Success;
+ Success = CompilerInvocation::CreateFromArgs(Clang->getInvocation(),
+ ArgBegin, ArgEnd, Diags);
+
+ // Infer the builtin include path if unspecified.
+ if (Clang->getHeaderSearchOpts().UseBuiltinIncludes &&
+ Clang->getHeaderSearchOpts().ResourceDir.empty())
+ Clang->getHeaderSearchOpts().ResourceDir =
+ CompilerInvocation::GetResourcesPath(Argv0, MainAddr);
+
+ // Create the actual diagnostics engine.
+ Clang->createDiagnostics(ArgEnd - ArgBegin, const_cast<char**>(ArgBegin));
+ if (!Clang->hasDiagnostics())
+ return 1;
+
+ // Set an error handler, so that any LLVM backend diagnostics go through our
+ // error handler.
+ llvm::install_fatal_error_handler(LLVMErrorHandler,
+ static_cast<void*>(&Clang->getDiagnostics()));
+
+ DiagsBuffer->FlushDiagnostics(Clang->getDiagnostics());
+ if (!Success)
+ return 1;
+
+ // Execute the frontend actions.
+ Success = ExecuteCompilerInvocation(Clang.get());
+
+ // If any timers were active but haven't been destroyed yet, print their
+ // results now. This happens in -disable-free mode.
+ llvm::TimerGroup::printAll(llvm::errs());
+
+ // Our error handler depends on the Diagnostics object, which we're
+ // potentially about to delete. Uninstall the handler now so that any
+ // later errors use the default handling behavior instead.
+ llvm::remove_fatal_error_handler();
+
+ // When running with -disable-free, don't do any destruction or shutdown.
+ if (Clang->getFrontendOpts().DisableFree) {
+ if (llvm::AreStatisticsEnabled() || Clang->getFrontendOpts().ShowStats)
+ llvm::PrintStatistics();
+ Clang.take();
+ return !Success;
+ }
+
+ // Managed static deconstruction. Useful for making things like
+ // -time-passes usable.
+ llvm::llvm_shutdown();
+
+ return !Success;
+}
diff --git a/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
new file mode 100644
index 0000000..508d6da
--- /dev/null
+++ b/contrib/llvm/tools/clang/tools/driver/cc1as_main.cpp
@@ -0,0 +1,451 @@
+//===-- cc1as_main.cpp - Clang Assembler ---------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the entry point to the clang -cc1as functionality, which implements
+// the direct interface to the LLVM MC based assembler.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Driver/Arg.h"
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/CC1AsOptions.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Driver/Options.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/MC/MCParser/MCAsmParser.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include "llvm/Target/TargetData.h"
+using namespace clang;
+using namespace clang::driver;
+using namespace llvm;
+
+namespace {
+
+/// \brief Helper class for representing a single invocation of the assembler.
+struct AssemblerInvocation {
+ /// @name Target Options
+ /// @{
+
+ /// The name of the target triple to assemble for.
+ std::string Triple;
+
+ /// If given, the name of the target CPU to determine which instructions
+ /// are legal.
+ std::string CPU;
+
+ /// The list of target specific features to enable or disable -- this should
+ /// be a list of strings starting with '+' or '-'.
+ std::vector<std::string> Features;
+
+ /// @}
+ /// @name Language Options
+ /// @{
+
+ std::vector<std::string> IncludePaths;
+ unsigned NoInitialTextSection : 1;
+ unsigned SaveTemporaryLabels : 1;
+ unsigned GenDwarfForAssembly : 1;
+ std::string DwarfDebugFlags;
+
+ /// @}
+ /// @name Frontend Options
+ /// @{
+
+ std::string InputFile;
+ std::vector<std::string> LLVMArgs;
+ std::string OutputPath;
+ enum FileType {
+ FT_Asm, ///< Assembly (.s) output, transliterate mode.
+ FT_Null, ///< No output, for timing purposes.
+ FT_Obj ///< Object file output.
+ };
+ FileType OutputType;
+ unsigned ShowHelp : 1;
+ unsigned ShowVersion : 1;
+
+ /// @}
+ /// @name Transliterate Options
+ /// @{
+
+ unsigned OutputAsmVariant;
+ unsigned ShowEncoding : 1;
+ unsigned ShowInst : 1;
+
+ /// @}
+ /// @name Assembler Options
+ /// @{
+
+ unsigned RelaxAll : 1;
+ unsigned NoExecStack : 1;
+
+ /// @}
+
+public:
+ AssemblerInvocation() {
+ Triple = "";
+ NoInitialTextSection = 0;
+ InputFile = "-";
+ OutputPath = "-";
+ OutputType = FT_Asm;
+ OutputAsmVariant = 0;
+ ShowInst = 0;
+ ShowEncoding = 0;
+ RelaxAll = 0;
+ NoExecStack = 0;
+ }
+
+ static bool CreateFromArgs(AssemblerInvocation &Res, const char **ArgBegin,
+ const char **ArgEnd, DiagnosticsEngine &Diags);
+};
+
+}
+
+bool AssemblerInvocation::CreateFromArgs(AssemblerInvocation &Opts,
+ const char **ArgBegin,
+ const char **ArgEnd,
+ DiagnosticsEngine &Diags) {
+ using namespace clang::driver::cc1asoptions;
+ bool Success = true;
+
+ // Parse the arguments.
+ OwningPtr<OptTable> OptTbl(createCC1AsOptTable());
+ unsigned MissingArgIndex, MissingArgCount;
+ OwningPtr<InputArgList> Args(
+ OptTbl->ParseArgs(ArgBegin, ArgEnd,MissingArgIndex, MissingArgCount));
+
+ // Check for missing argument error.
+ if (MissingArgCount) {
+ Diags.Report(diag::err_drv_missing_argument)
+ << Args->getArgString(MissingArgIndex) << MissingArgCount;
+ Success = false;
+ }
+
+ // Issue errors on unknown arguments.
+ for (arg_iterator it = Args->filtered_begin(cc1asoptions::OPT_UNKNOWN),
+ ie = Args->filtered_end(); it != ie; ++it) {
+ Diags.Report(diag::err_drv_unknown_argument) << (*it) ->getAsString(*Args);
+ Success = false;
+ }
+
+ // Construct the invocation.
+
+ // Target Options
+ Opts.Triple = llvm::Triple::normalize(Args->getLastArgValue(OPT_triple));
+ Opts.CPU = Args->getLastArgValue(OPT_target_cpu);
+ Opts.Features = Args->getAllArgValues(OPT_target_feature);
+
+ // Use the default target triple if unspecified.
+ if (Opts.Triple.empty())
+ Opts.Triple = llvm::sys::getDefaultTargetTriple();
+
+ // Language Options
+ Opts.IncludePaths = Args->getAllArgValues(OPT_I);
+ Opts.NoInitialTextSection = Args->hasArg(OPT_n);
+ Opts.SaveTemporaryLabels = Args->hasArg(OPT_L);
+ Opts.GenDwarfForAssembly = Args->hasArg(OPT_g);
+ Opts.DwarfDebugFlags = Args->getLastArgValue(OPT_dwarf_debug_flags);
+
+ // Frontend Options
+ if (Args->hasArg(OPT_INPUT)) {
+ bool First = true;
+ for (arg_iterator it = Args->filtered_begin(OPT_INPUT),
+ ie = Args->filtered_end(); it != ie; ++it, First=false) {
+ const Arg *A = it;
+ if (First)
+ Opts.InputFile = A->getValue(*Args);
+ else {
+ Diags.Report(diag::err_drv_unknown_argument) << A->getAsString(*Args);
+ Success = false;
+ }
+ }
+ }
+ Opts.LLVMArgs = Args->getAllArgValues(OPT_mllvm);
+ if (Args->hasArg(OPT_fatal_warnings))
+ Opts.LLVMArgs.push_back("-fatal-assembler-warnings");
+ Opts.OutputPath = Args->getLastArgValue(OPT_o);
+ if (Arg *A = Args->getLastArg(OPT_filetype)) {
+ StringRef Name = A->getValue(*Args);
+ unsigned OutputType = StringSwitch<unsigned>(Name)
+ .Case("asm", FT_Asm)
+ .Case("null", FT_Null)
+ .Case("obj", FT_Obj)
+ .Default(~0U);
+ if (OutputType == ~0U) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(*Args) << Name;
+ Success = false;
+ } else
+ Opts.OutputType = FileType(OutputType);
+ }
+ Opts.ShowHelp = Args->hasArg(OPT_help);
+ Opts.ShowVersion = Args->hasArg(OPT_version);
+
+ // Transliterate Options
+ Opts.OutputAsmVariant = Args->getLastArgIntValue(OPT_output_asm_variant,
+ 0, Diags);
+ Opts.ShowEncoding = Args->hasArg(OPT_show_encoding);
+ Opts.ShowInst = Args->hasArg(OPT_show_inst);
+
+ // Assemble Options
+ Opts.RelaxAll = Args->hasArg(OPT_relax_all);
+ Opts.NoExecStack = Args->hasArg(OPT_no_exec_stack);
+
+ return Success;
+}
+
+static formatted_raw_ostream *GetOutputStream(AssemblerInvocation &Opts,
+ DiagnosticsEngine &Diags,
+ bool Binary) {
+ if (Opts.OutputPath.empty())
+ Opts.OutputPath = "-";
+
+ // Make sure that the Out file gets unlinked from the disk if we get a
+ // SIGINT.
+ if (Opts.OutputPath != "-")
+ sys::RemoveFileOnSignal(sys::Path(Opts.OutputPath));
+
+ std::string Error;
+ raw_fd_ostream *Out =
+ new raw_fd_ostream(Opts.OutputPath.c_str(), Error,
+ (Binary ? raw_fd_ostream::F_Binary : 0));
+ if (!Error.empty()) {
+ Diags.Report(diag::err_fe_unable_to_open_output)
+ << Opts.OutputPath << Error;
+ return 0;
+ }
+
+ return new formatted_raw_ostream(*Out, formatted_raw_ostream::DELETE_STREAM);
+}
+
+static bool ExecuteAssembler(AssemblerInvocation &Opts,
+ DiagnosticsEngine &Diags) {
+ // Get the target specific parser.
+ std::string Error;
+ const Target *TheTarget(TargetRegistry::lookupTarget(Opts.Triple, Error));
+ if (!TheTarget) {
+ Diags.Report(diag::err_target_unknown_triple) << Opts.Triple;
+ return false;
+ }
+
+ OwningPtr<MemoryBuffer> BufferPtr;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Opts.InputFile, BufferPtr)) {
+ Error = ec.message();
+ Diags.Report(diag::err_fe_error_reading) << Opts.InputFile;
+ return false;
+ }
+ MemoryBuffer *Buffer = BufferPtr.take();
+
+ SourceMgr SrcMgr;
+
+ // Tell SrcMgr about this buffer, which is what the parser will pick up.
+ SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
+
+ // Record the location of the include directories so that the lexer can find
+ // it later.
+ SrcMgr.setIncludeDirs(Opts.IncludePaths);
+
+ OwningPtr<MCAsmInfo> MAI(TheTarget->createMCAsmInfo(Opts.Triple));
+ assert(MAI && "Unable to create target asm info!");
+
+ OwningPtr<MCRegisterInfo> MRI(TheTarget->createMCRegInfo(Opts.Triple));
+ assert(MRI && "Unable to create target register info!");
+
+ bool IsBinary = Opts.OutputType == AssemblerInvocation::FT_Obj;
+ formatted_raw_ostream *Out = GetOutputStream(Opts, Diags, IsBinary);
+ if (!Out)
+ return false;
+
+ // FIXME: This is not pretty. MCContext has a ptr to MCObjectFileInfo and
+ // MCObjectFileInfo needs a MCContext reference in order to initialize itself.
+ OwningPtr<MCObjectFileInfo> MOFI(new MCObjectFileInfo());
+ MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr);
+ // FIXME: Assembler behavior can change with -static.
+ MOFI->InitMCObjectFileInfo(Opts.Triple,
+ Reloc::Default, CodeModel::Default, Ctx);
+ if (Opts.SaveTemporaryLabels)
+ Ctx.setAllowTemporaryLabels(false);
+ if (Opts.GenDwarfForAssembly)
+ Ctx.setGenDwarfForAssembly(true);
+ if (!Opts.DwarfDebugFlags.empty())
+ Ctx.setDwarfDebugFlags(StringRef(Opts.DwarfDebugFlags));
+
+ // Build up the feature string from the target feature list.
+ std::string FS;
+ if (!Opts.Features.empty()) {
+ FS = Opts.Features[0];
+ for (unsigned i = 1, e = Opts.Features.size(); i != e; ++i)
+ FS += "," + Opts.Features[i];
+ }
+
+ OwningPtr<MCStreamer> Str;
+
+ OwningPtr<MCInstrInfo> MCII(TheTarget->createMCInstrInfo());
+ OwningPtr<MCSubtargetInfo>
+ STI(TheTarget->createMCSubtargetInfo(Opts.Triple, Opts.CPU, FS));
+
+ // FIXME: There is a bit of code duplication with addPassesToEmitFile.
+ if (Opts.OutputType == AssemblerInvocation::FT_Asm) {
+ MCInstPrinter *IP =
+ TheTarget->createMCInstPrinter(Opts.OutputAsmVariant, *MAI, *MCII, *MRI,
+ *STI);
+ MCCodeEmitter *CE = 0;
+ MCAsmBackend *MAB = 0;
+ if (Opts.ShowEncoding) {
+ CE = TheTarget->createMCCodeEmitter(*MCII, *STI, Ctx);
+ MAB = TheTarget->createMCAsmBackend(Opts.Triple);
+ }
+ Str.reset(TheTarget->createAsmStreamer(Ctx, *Out, /*asmverbose*/true,
+ /*useLoc*/ true,
+ /*useCFI*/ true,
+ /*useDwarfDirectory*/ true,
+ IP, CE, MAB,
+ Opts.ShowInst));
+ } else if (Opts.OutputType == AssemblerInvocation::FT_Null) {
+ Str.reset(createNullStreamer(Ctx));
+ } else {
+ assert(Opts.OutputType == AssemblerInvocation::FT_Obj &&
+ "Invalid file type!");
+ MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*MCII, *STI, Ctx);
+ MCAsmBackend *MAB = TheTarget->createMCAsmBackend(Opts.Triple);
+ Str.reset(TheTarget->createMCObjectStreamer(Opts.Triple, Ctx, *MAB, *Out,
+ CE, Opts.RelaxAll,
+ Opts.NoExecStack));
+ Str.get()->InitSections();
+ }
+
+ OwningPtr<MCAsmParser> Parser(createMCAsmParser(SrcMgr, Ctx,
+ *Str.get(), *MAI));
+ OwningPtr<MCTargetAsmParser> TAP(TheTarget->createMCAsmParser(*STI, *Parser));
+ if (!TAP) {
+ Diags.Report(diag::err_target_unknown_triple) << Opts.Triple;
+ return false;
+ }
+
+ Parser->setTargetParser(*TAP.get());
+
+ bool Success = !Parser->Run(Opts.NoInitialTextSection);
+
+ // Close the output.
+ delete Out;
+
+ // Delete output on errors.
+ if (!Success && Opts.OutputPath != "-")
+ sys::Path(Opts.OutputPath).eraseFromDisk();
+
+ return Success;
+}
+
+static void LLVMErrorHandler(void *UserData, const std::string &Message) {
+ DiagnosticsEngine &Diags = *static_cast<DiagnosticsEngine*>(UserData);
+
+ Diags.Report(diag::err_fe_error_backend) << Message;
+
+ // We cannot recover from llvm errors.
+ exit(1);
+}
+
+int cc1as_main(const char **ArgBegin, const char **ArgEnd,
+ const char *Argv0, void *MainAddr) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(ArgEnd - ArgBegin, ArgBegin);
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ // Initialize targets and assembly printers/parsers.
+ InitializeAllTargetInfos();
+ InitializeAllTargetMCs();
+ InitializeAllAsmParsers();
+
+ // Construct our diagnostic client.
+ TextDiagnosticPrinter *DiagClient
+ = new TextDiagnosticPrinter(errs(), DiagnosticOptions());
+ DiagClient->setPrefix("clang -cc1as");
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+ DiagnosticsEngine Diags(DiagID, DiagClient);
+
+ // Set an error handler, so that any LLVM backend diagnostics go through our
+ // error handler.
+ ScopedFatalErrorHandler FatalErrorHandler
+ (LLVMErrorHandler, static_cast<void*>(&Diags));
+
+ // Parse the arguments.
+ AssemblerInvocation Asm;
+ if (!AssemblerInvocation::CreateFromArgs(Asm, ArgBegin, ArgEnd, Diags))
+ return 1;
+
+ // Honor -help.
+ if (Asm.ShowHelp) {
+ OwningPtr<driver::OptTable> Opts(driver::createCC1AsOptTable());
+ Opts->PrintHelp(llvm::outs(), "clang -cc1as", "Clang Integrated Assembler");
+ return 0;
+ }
+
+ // Honor -version.
+ //
+ // FIXME: Use a better -version message?
+ if (Asm.ShowVersion) {
+ llvm::cl::PrintVersionMessage();
+ return 0;
+ }
+
+ // Honor -mllvm.
+ //
+ // FIXME: Remove this, one day.
+ if (!Asm.LLVMArgs.empty()) {
+ unsigned NumArgs = Asm.LLVMArgs.size();
+ const char **Args = new const char*[NumArgs + 2];
+ Args[0] = "clang (LLVM option parsing)";
+ for (unsigned i = 0; i != NumArgs; ++i)
+ Args[i + 1] = Asm.LLVMArgs[i].c_str();
+ Args[NumArgs + 1] = 0;
+ llvm::cl::ParseCommandLineOptions(NumArgs + 1, Args);
+ }
+
+ // Execute the invocation, unless there were parsing errors.
+ bool Success = false;
+ if (!Diags.hasErrorOccurred())
+ Success = ExecuteAssembler(Asm, Diags);
+
+ // If any timers were active but haven't been destroyed yet, print their
+ // results now.
+ TimerGroup::printAll(errs());
+
+ return !Success;
+}
diff --git a/contrib/llvm/tools/clang/tools/driver/driver.cpp b/contrib/llvm/tools/clang/tools/driver/driver.cpp
new file mode 100644
index 0000000..8c05fff
--- /dev/null
+++ b/contrib/llvm/tools/clang/tools/driver/driver.cpp
@@ -0,0 +1,490 @@
+//===-- driver.cpp - Clang GCC-Compatible Driver --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the entry point to the clang driver; it is a thin wrapper
+// for functionality in the Driver clang library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/ArgList.h"
+#include "clang/Driver/CC1Options.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/OptTable.h"
+#include "clang/Frontend/CompilerInvocation.h"
+#include "clang/Frontend/DiagnosticOptions.h"
+#include "clang/Frontend/TextDiagnosticPrinter.h"
+#include "clang/Frontend/Utils.h"
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/system_error.h"
+#include <cctype>
+using namespace clang;
+using namespace clang::driver;
+
+llvm::sys::Path GetExecutablePath(const char *Argv0, bool CanonicalPrefixes) {
+ if (!CanonicalPrefixes)
+ return llvm::sys::Path(Argv0);
+
+ // This just needs to be some symbol in the binary; C++ doesn't
+ // allow taking the address of ::main however.
+ void *P = (void*) (intptr_t) GetExecutablePath;
+ return llvm::sys::Path::GetMainExecutable(Argv0, P);
+}
+
+static const char *SaveStringInSet(std::set<std::string> &SavedStrings,
+ StringRef S) {
+ return SavedStrings.insert(S).first->c_str();
+}
+
+/// ApplyQAOverride - Apply a list of edits to the input argument lists.
+///
+/// The input string is a space separate list of edits to perform,
+/// they are applied in order to the input argument lists. Edits
+/// should be one of the following forms:
+///
+/// '#': Silence information about the changes to the command line arguments.
+///
+/// '^': Add FOO as a new argument at the beginning of the command line.
+///
+/// '+': Add FOO as a new argument at the end of the command line.
+///
+/// 's/XXX/YYY/': Substitute the regular expression XXX with YYY in the command
+/// line.
+///
+/// 'xOPTION': Removes all instances of the literal argument OPTION.
+///
+/// 'XOPTION': Removes all instances of the literal argument OPTION,
+/// and the following argument.
+///
+/// 'Ox': Removes all flags matching 'O' or 'O[sz0-9]' and adds 'Ox'
+/// at the end of the command line.
+///
+/// \param OS - The stream to write edit information to.
+/// \param Args - The vector of command line arguments.
+/// \param Edit - The override command to perform.
+/// \param SavedStrings - Set to use for storing string representations.
+static void ApplyOneQAOverride(raw_ostream &OS,
+ SmallVectorImpl<const char*> &Args,
+ StringRef Edit,
+ std::set<std::string> &SavedStrings) {
+ // This does not need to be efficient.
+
+ if (Edit[0] == '^') {
+ const char *Str =
+ SaveStringInSet(SavedStrings, Edit.substr(1));
+ OS << "### Adding argument " << Str << " at beginning\n";
+ Args.insert(Args.begin() + 1, Str);
+ } else if (Edit[0] == '+') {
+ const char *Str =
+ SaveStringInSet(SavedStrings, Edit.substr(1));
+ OS << "### Adding argument " << Str << " at end\n";
+ Args.push_back(Str);
+ } else if (Edit[0] == 's' && Edit[1] == '/' && Edit.endswith("/") &&
+ Edit.slice(2, Edit.size()-1).find('/') != StringRef::npos) {
+ StringRef MatchPattern = Edit.substr(2).split('/').first;
+ StringRef ReplPattern = Edit.substr(2).split('/').second;
+ ReplPattern = ReplPattern.slice(0, ReplPattern.size()-1);
+
+ for (unsigned i = 1, e = Args.size(); i != e; ++i) {
+ std::string Repl = llvm::Regex(MatchPattern).sub(ReplPattern, Args[i]);
+
+ if (Repl != Args[i]) {
+ OS << "### Replacing '" << Args[i] << "' with '" << Repl << "'\n";
+ Args[i] = SaveStringInSet(SavedStrings, Repl);
+ }
+ }
+ } else if (Edit[0] == 'x' || Edit[0] == 'X') {
+ std::string Option = Edit.substr(1, std::string::npos);
+ for (unsigned i = 1; i < Args.size();) {
+ if (Option == Args[i]) {
+ OS << "### Deleting argument " << Args[i] << '\n';
+ Args.erase(Args.begin() + i);
+ if (Edit[0] == 'X') {
+ if (i < Args.size()) {
+ OS << "### Deleting argument " << Args[i] << '\n';
+ Args.erase(Args.begin() + i);
+ } else
+ OS << "### Invalid X edit, end of command line!\n";
+ }
+ } else
+ ++i;
+ }
+ } else if (Edit[0] == 'O') {
+ for (unsigned i = 1; i < Args.size();) {
+ const char *A = Args[i];
+ if (A[0] == '-' && A[1] == 'O' &&
+ (A[2] == '\0' ||
+ (A[3] == '\0' && (A[2] == 's' || A[2] == 'z' ||
+ ('0' <= A[2] && A[2] <= '9'))))) {
+ OS << "### Deleting argument " << Args[i] << '\n';
+ Args.erase(Args.begin() + i);
+ } else
+ ++i;
+ }
+ OS << "### Adding argument " << Edit << " at end\n";
+ Args.push_back(SaveStringInSet(SavedStrings, '-' + Edit.str()));
+ } else {
+ OS << "### Unrecognized edit: " << Edit << "\n";
+ }
+}
+
+/// ApplyQAOverride - Apply a comma separate list of edits to the
+/// input argument lists. See ApplyOneQAOverride.
+static void ApplyQAOverride(SmallVectorImpl<const char*> &Args,
+ const char *OverrideStr,
+ std::set<std::string> &SavedStrings) {
+ raw_ostream *OS = &llvm::errs();
+
+ if (OverrideStr[0] == '#') {
+ ++OverrideStr;
+ OS = &llvm::nulls();
+ }
+
+ *OS << "### QA_OVERRIDE_GCC3_OPTIONS: " << OverrideStr << "\n";
+
+ // This does not need to be efficient.
+
+ const char *S = OverrideStr;
+ while (*S) {
+ const char *End = ::strchr(S, ' ');
+ if (!End)
+ End = S + strlen(S);
+ if (End != S)
+ ApplyOneQAOverride(*OS, Args, std::string(S, End), SavedStrings);
+ S = End;
+ if (*S != '\0')
+ ++S;
+ }
+}
+
+extern int cc1_main(const char **ArgBegin, const char **ArgEnd,
+ const char *Argv0, void *MainAddr);
+extern int cc1as_main(const char **ArgBegin, const char **ArgEnd,
+ const char *Argv0, void *MainAddr);
+
+static void ExpandArgsFromBuf(const char *Arg,
+ SmallVectorImpl<const char*> &ArgVector,
+ std::set<std::string> &SavedStrings) {
+ const char *FName = Arg + 1;
+ OwningPtr<llvm::MemoryBuffer> MemBuf;
+ if (llvm::MemoryBuffer::getFile(FName, MemBuf)) {
+ ArgVector.push_back(SaveStringInSet(SavedStrings, Arg));
+ return;
+ }
+
+ const char *Buf = MemBuf->getBufferStart();
+ char InQuote = ' ';
+ std::string CurArg;
+
+ for (const char *P = Buf; ; ++P) {
+ if (*P == '\0' || (isspace(*P) && InQuote == ' ')) {
+ if (!CurArg.empty()) {
+
+ if (CurArg[0] != '@') {
+ ArgVector.push_back(SaveStringInSet(SavedStrings, CurArg));
+ } else {
+ ExpandArgsFromBuf(CurArg.c_str(), ArgVector, SavedStrings);
+ }
+
+ CurArg = "";
+ }
+ if (*P == '\0')
+ break;
+ else
+ continue;
+ }
+
+ if (isspace(*P)) {
+ if (InQuote != ' ')
+ CurArg.push_back(*P);
+ continue;
+ }
+
+ if (*P == '"' || *P == '\'') {
+ if (InQuote == *P)
+ InQuote = ' ';
+ else if (InQuote == ' ')
+ InQuote = *P;
+ else
+ CurArg.push_back(*P);
+ continue;
+ }
+
+ if (*P == '\\') {
+ ++P;
+ if (*P != '\0')
+ CurArg.push_back(*P);
+ continue;
+ }
+ CurArg.push_back(*P);
+ }
+}
+
+static void ExpandArgv(int argc, const char **argv,
+ SmallVectorImpl<const char*> &ArgVector,
+ std::set<std::string> &SavedStrings) {
+ for (int i = 0; i < argc; ++i) {
+ const char *Arg = argv[i];
+ if (Arg[0] != '@') {
+ ArgVector.push_back(SaveStringInSet(SavedStrings, std::string(Arg)));
+ continue;
+ }
+
+ ExpandArgsFromBuf(Arg, ArgVector, SavedStrings);
+ }
+}
+
+static void ParseProgName(SmallVectorImpl<const char *> &ArgVector,
+ std::set<std::string> &SavedStrings,
+ Driver &TheDriver)
+{
+ // Try to infer frontend type and default target from the program name.
+
+ // suffixes[] contains the list of known driver suffixes.
+ // Suffixes are compared against the program name in order.
+ // If there is a match, the frontend type is updated as necessary (CPP/C++).
+ // If there is no match, a second round is done after stripping the last
+ // hyphen and everything following it. This allows using something like
+ // "clang++-2.9".
+
+ // If there is a match in either the first or second round,
+ // the function tries to identify a target as prefix. E.g.
+ // "x86_64-linux-clang" as interpreted as suffix "clang" with
+ // target prefix "x86_64-linux". If such a target prefix is found,
+ // is gets added via -target as implicit first argument.
+ static const struct {
+ const char *Suffix;
+ bool IsCXX;
+ bool IsCPP;
+ } suffixes [] = {
+ { "clang", false, false },
+ { "clang++", true, false },
+ { "clang-c++", true, false },
+ { "clang-cc", false, false },
+ { "clang-cpp", false, true },
+ { "clang-g++", true, false },
+ { "clang-gcc", false, false },
+ { "cc", false, false },
+ { "cpp", false, true },
+ { "++", true, false },
+ };
+ std::string ProgName(llvm::sys::path::stem(ArgVector[0]));
+ StringRef ProgNameRef(ProgName);
+ StringRef Prefix;
+
+ for (int Components = 2; Components; --Components) {
+ bool FoundMatch = false;
+ size_t i;
+
+ for (i = 0; i < sizeof(suffixes) / sizeof(suffixes[0]); ++i) {
+ if (ProgNameRef.endswith(suffixes[i].Suffix)) {
+ FoundMatch = true;
+ if (suffixes[i].IsCXX)
+ TheDriver.CCCIsCXX = true;
+ if (suffixes[i].IsCPP)
+ TheDriver.CCCIsCPP = true;
+ break;
+ }
+ }
+
+ if (FoundMatch) {
+ StringRef::size_type LastComponent = ProgNameRef.rfind('-',
+ ProgNameRef.size() - strlen(suffixes[i].Suffix));
+ if (LastComponent != StringRef::npos)
+ Prefix = ProgNameRef.slice(0, LastComponent);
+ break;
+ }
+
+ StringRef::size_type LastComponent = ProgNameRef.rfind('-');
+ if (LastComponent == StringRef::npos)
+ break;
+ ProgNameRef = ProgNameRef.slice(0, LastComponent);
+ }
+
+ if (Prefix.empty())
+ return;
+
+ std::string IgnoredError;
+ if (llvm::TargetRegistry::lookupTarget(Prefix, IgnoredError)) {
+ SmallVectorImpl<const char *>::iterator it = ArgVector.begin();
+ if (it != ArgVector.end())
+ ++it;
+ ArgVector.insert(it, SaveStringInSet(SavedStrings, Prefix));
+ ArgVector.insert(it,
+ SaveStringInSet(SavedStrings, std::string("-target")));
+ }
+}
+
+int main(int argc_, const char **argv_) {
+ llvm::sys::PrintStackTraceOnErrorSignal();
+ llvm::PrettyStackTraceProgram X(argc_, argv_);
+
+ std::set<std::string> SavedStrings;
+ SmallVector<const char*, 256> argv;
+
+ ExpandArgv(argc_, argv_, argv, SavedStrings);
+
+ // Handle -cc1 integrated tools.
+ if (argv.size() > 1 && StringRef(argv[1]).startswith("-cc1")) {
+ StringRef Tool = argv[1] + 4;
+
+ if (Tool == "")
+ return cc1_main(argv.data()+2, argv.data()+argv.size(), argv[0],
+ (void*) (intptr_t) GetExecutablePath);
+ if (Tool == "as")
+ return cc1as_main(argv.data()+2, argv.data()+argv.size(), argv[0],
+ (void*) (intptr_t) GetExecutablePath);
+
+ // Reject unknown tools.
+ llvm::errs() << "error: unknown integrated tool '" << Tool << "'\n";
+ return 1;
+ }
+
+ bool CanonicalPrefixes = true;
+ for (int i = 1, size = argv.size(); i < size; ++i) {
+ if (StringRef(argv[i]) == "-no-canonical-prefixes") {
+ CanonicalPrefixes = false;
+ break;
+ }
+ }
+
+ llvm::sys::Path Path = GetExecutablePath(argv[0], CanonicalPrefixes);
+
+ DiagnosticOptions DiagOpts;
+ {
+ // Note that ParseDiagnosticArgs() uses the cc1 option table.
+ OwningPtr<OptTable> CC1Opts(createCC1OptTable());
+ unsigned MissingArgIndex, MissingArgCount;
+ OwningPtr<InputArgList> Args(CC1Opts->ParseArgs(argv.begin()+1, argv.end(),
+ MissingArgIndex, MissingArgCount));
+ // We ignore MissingArgCount and the return value of ParseDiagnosticArgs.
+ // Any errors that would be diagnosed here will also be diagnosed later,
+ // when the DiagnosticsEngine actually exists.
+ (void) ParseDiagnosticArgs(DiagOpts, *Args);
+ }
+ // Now we can create the DiagnosticsEngine with a properly-filled-out
+ // DiagnosticOptions instance.
+ TextDiagnosticPrinter *DiagClient
+ = new TextDiagnosticPrinter(llvm::errs(), DiagOpts);
+ DiagClient->setPrefix(llvm::sys::path::stem(Path.str()));
+ IntrusiveRefCntPtr<DiagnosticIDs> DiagID(new DiagnosticIDs());
+
+ DiagnosticsEngine Diags(DiagID, DiagClient);
+ ProcessWarningOptions(Diags, DiagOpts);
+
+#ifdef CLANG_IS_PRODUCTION
+ const bool IsProduction = true;
+#else
+ const bool IsProduction = false;
+#endif
+ Driver TheDriver(Path.str(), llvm::sys::getDefaultTargetTriple(),
+ "a.out", IsProduction, Diags);
+
+ // Attempt to find the original path used to invoke the driver, to determine
+ // the installed path. We do this manually, because we want to support that
+ // path being a symlink.
+ {
+ SmallString<128> InstalledPath(argv[0]);
+
+ // Do a PATH lookup, if there are no directory components.
+ if (llvm::sys::path::filename(InstalledPath) == InstalledPath) {
+ llvm::sys::Path Tmp = llvm::sys::Program::FindProgramByName(
+ llvm::sys::path::filename(InstalledPath.str()));
+ if (!Tmp.empty())
+ InstalledPath = Tmp.str();
+ }
+ llvm::sys::fs::make_absolute(InstalledPath);
+ InstalledPath = llvm::sys::path::parent_path(InstalledPath);
+ bool exists;
+ if (!llvm::sys::fs::exists(InstalledPath.str(), exists) && exists)
+ TheDriver.setInstalledDir(InstalledPath);
+ }
+
+ llvm::InitializeAllTargets();
+ ParseProgName(argv, SavedStrings, TheDriver);
+
+ // Handle CC_PRINT_OPTIONS and CC_PRINT_OPTIONS_FILE.
+ TheDriver.CCPrintOptions = !!::getenv("CC_PRINT_OPTIONS");
+ if (TheDriver.CCPrintOptions)
+ TheDriver.CCPrintOptionsFilename = ::getenv("CC_PRINT_OPTIONS_FILE");
+
+ // Handle CC_PRINT_HEADERS and CC_PRINT_HEADERS_FILE.
+ TheDriver.CCPrintHeaders = !!::getenv("CC_PRINT_HEADERS");
+ if (TheDriver.CCPrintHeaders)
+ TheDriver.CCPrintHeadersFilename = ::getenv("CC_PRINT_HEADERS_FILE");
+
+ // Handle CC_LOG_DIAGNOSTICS and CC_LOG_DIAGNOSTICS_FILE.
+ TheDriver.CCLogDiagnostics = !!::getenv("CC_LOG_DIAGNOSTICS");
+ if (TheDriver.CCLogDiagnostics)
+ TheDriver.CCLogDiagnosticsFilename = ::getenv("CC_LOG_DIAGNOSTICS_FILE");
+
+ // Handle QA_OVERRIDE_GCC3_OPTIONS and CCC_ADD_ARGS, used for editing a
+ // command line behind the scenes.
+ if (const char *OverrideStr = ::getenv("QA_OVERRIDE_GCC3_OPTIONS")) {
+ // FIXME: Driver shouldn't take extra initial argument.
+ ApplyQAOverride(argv, OverrideStr, SavedStrings);
+ } else if (const char *Cur = ::getenv("CCC_ADD_ARGS")) {
+ // FIXME: Driver shouldn't take extra initial argument.
+ std::vector<const char*> ExtraArgs;
+
+ for (;;) {
+ const char *Next = strchr(Cur, ',');
+
+ if (Next) {
+ ExtraArgs.push_back(SaveStringInSet(SavedStrings,
+ std::string(Cur, Next)));
+ Cur = Next + 1;
+ } else {
+ if (*Cur != '\0')
+ ExtraArgs.push_back(SaveStringInSet(SavedStrings, Cur));
+ break;
+ }
+ }
+
+ argv.insert(&argv[1], ExtraArgs.begin(), ExtraArgs.end());
+ }
+
+ OwningPtr<Compilation> C(TheDriver.BuildCompilation(argv));
+ int Res = 0;
+ const Command *FailingCommand = 0;
+ if (C.get())
+ Res = TheDriver.ExecuteCompilation(*C, FailingCommand);
+
+ // If result status is < 0, then the driver command signalled an error.
+ // In this case, generate additional diagnostic information if possible.
+ if (Res < 0)
+ TheDriver.generateCompilationDiagnostics(*C, FailingCommand);
+
+ // If any timers were active but haven't been destroyed yet, print their
+ // results now. This happens in -disable-free mode.
+ llvm::TimerGroup::printAll(llvm::errs());
+
+ llvm::llvm_shutdown();
+
+ return Res;
+}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp
new file mode 100644
index 0000000..d9d5a3c
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.cpp
@@ -0,0 +1,168 @@
+//=== ClangASTNodesEmitter.cpp - Generate Clang AST node tables -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang AST node tables
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangASTNodesEmitter.h"
+#include <set>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Statement Node Tables (.inc file) generation.
+//===----------------------------------------------------------------------===//
+
+// Returns the first and last non-abstract subrecords
+// Called recursively to ensure that nodes remain contiguous
+std::pair<Record *, Record *> ClangASTNodesEmitter::EmitNode(
+ const ChildMap &Tree,
+ raw_ostream &OS,
+ Record *Base) {
+ std::string BaseName = macroName(Base->getName());
+
+ ChildIterator i = Tree.lower_bound(Base), e = Tree.upper_bound(Base);
+
+ Record *First = 0, *Last = 0;
+ // This might be the pseudo-node for Stmt; don't assume it has an Abstract
+ // bit
+ if (Base->getValue("Abstract") && !Base->getValueAsBit("Abstract"))
+ First = Last = Base;
+
+ for (; i != e; ++i) {
+ Record *R = i->second;
+ bool Abstract = R->getValueAsBit("Abstract");
+ std::string NodeName = macroName(R->getName());
+
+ OS << "#ifndef " << NodeName << "\n";
+ OS << "# define " << NodeName << "(Type, Base) "
+ << BaseName << "(Type, Base)\n";
+ OS << "#endif\n";
+
+ if (Abstract)
+ OS << "ABSTRACT_" << macroName(Root.getName()) << "(" << NodeName << "("
+ << R->getName() << ", " << baseName(*Base) << "))\n";
+ else
+ OS << NodeName << "(" << R->getName() << ", "
+ << baseName(*Base) << ")\n";
+
+ if (Tree.find(R) != Tree.end()) {
+ const std::pair<Record *, Record *> &Result
+ = EmitNode(Tree, OS, R);
+ if (!First && Result.first)
+ First = Result.first;
+ if (Result.second)
+ Last = Result.second;
+ } else {
+ if (!Abstract) {
+ Last = R;
+
+ if (!First)
+ First = R;
+ }
+ }
+
+ OS << "#undef " << NodeName << "\n\n";
+ }
+
+ if (First) {
+ assert (Last && "Got a first node but not a last node for a range!");
+ if (Base == &Root)
+ OS << "LAST_" << macroName(Root.getName()) << "_RANGE(";
+ else
+ OS << macroName(Root.getName()) << "_RANGE(";
+ OS << Base->getName() << ", " << First->getName() << ", "
+ << Last->getName() << ")\n\n";
+ }
+
+ return std::make_pair(First, Last);
+}
+
+void ClangASTNodesEmitter::run(raw_ostream &OS) {
+ // Write the preamble
+ OS << "#ifndef ABSTRACT_" << macroName(Root.getName()) << "\n";
+ OS << "# define ABSTRACT_" << macroName(Root.getName()) << "(Type) Type\n";
+ OS << "#endif\n";
+
+ OS << "#ifndef " << macroName(Root.getName()) << "_RANGE\n";
+ OS << "# define "
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef LAST_" << macroName(Root.getName()) << "_RANGE\n";
+ OS << "# define LAST_"
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last) "
+ << macroName(Root.getName()) << "_RANGE(Base, First, Last)\n";
+ OS << "#endif\n\n";
+
+ // Emit statements
+ const std::vector<Record*> Stmts
+ = Records.getAllDerivedDefinitions(Root.getName());
+
+ ChildMap Tree;
+
+ for (unsigned i = 0, e = Stmts.size(); i != e; ++i) {
+ Record *R = Stmts[i];
+
+ if (R->getValue("Base"))
+ Tree.insert(std::make_pair(R->getValueAsDef("Base"), R));
+ else
+ Tree.insert(std::make_pair(&Root, R));
+ }
+
+ EmitNode(Tree, OS, &Root);
+
+ OS << "#undef " << macroName(Root.getName()) << "\n";
+ OS << "#undef " << macroName(Root.getName()) << "_RANGE\n";
+ OS << "#undef LAST_" << macroName(Root.getName()) << "_RANGE\n";
+ OS << "#undef ABSTRACT_" << macroName(Root.getName()) << "\n";
+}
+
+void ClangDeclContextEmitter::run(raw_ostream &OS) {
+ // FIXME: Find a .td file format to allow for this to be represented better.
+
+ OS << "#ifndef DECL_CONTEXT\n";
+ OS << "# define DECL_CONTEXT(DECL)\n";
+ OS << "#endif\n";
+
+ OS << "#ifndef DECL_CONTEXT_BASE\n";
+ OS << "# define DECL_CONTEXT_BASE(DECL) DECL_CONTEXT(DECL)\n";
+ OS << "#endif\n";
+
+ typedef std::set<Record*> RecordSet;
+ typedef std::vector<Record*> RecordVector;
+
+ RecordVector DeclContextsVector
+ = Records.getAllDerivedDefinitions("DeclContext");
+ RecordVector Decls = Records.getAllDerivedDefinitions("Decl");
+ RecordSet DeclContexts (DeclContextsVector.begin(), DeclContextsVector.end());
+
+ for (RecordVector::iterator i = Decls.begin(), e = Decls.end(); i != e; ++i) {
+ Record *R = *i;
+
+ if (R->getValue("Base")) {
+ Record *B = R->getValueAsDef("Base");
+ if (DeclContexts.find(B) != DeclContexts.end()) {
+ OS << "DECL_CONTEXT_BASE(" << B->getName() << ")\n";
+ DeclContexts.erase(B);
+ }
+ }
+ }
+
+ // To keep identical order, RecordVector may be used
+ // instead of RecordSet.
+ for (RecordVector::iterator
+ i = DeclContextsVector.begin(), e = DeclContextsVector.end();
+ i != e; ++i)
+ if (DeclContexts.find(*i) != DeclContexts.end())
+ OS << "DECL_CONTEXT(" << (*i)->getName() << ")\n";
+
+ OS << "#undef DECL_CONTEXT\n";
+ OS << "#undef DECL_CONTEXT_BASE\n";
+}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.h
new file mode 100644
index 0000000..edd9316
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangASTNodesEmitter.h
@@ -0,0 +1,84 @@
+//===- ClangASTNodesEmitter.h - Generate Clang AST node tables -*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang AST node tables
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGAST_EMITTER_H
+#define CLANGAST_EMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+#include "llvm/TableGen/Record.h"
+#include <string>
+#include <cctype>
+#include <map>
+
+namespace llvm {
+
+/// ClangASTNodesEmitter - The top-level class emits .inc files containing
+/// declarations of Clang statements.
+///
+class ClangASTNodesEmitter : public TableGenBackend {
+ // A map from a node to each of its derived nodes.
+ typedef std::multimap<Record*, Record*> ChildMap;
+ typedef ChildMap::const_iterator ChildIterator;
+
+ RecordKeeper &Records;
+ Record Root;
+ const std::string &BaseSuffix;
+
+ // Create a macro-ized version of a name
+ static std::string macroName(std::string S) {
+ for (unsigned i = 0; i < S.size(); ++i)
+ S[i] = std::toupper(S[i]);
+
+ return S;
+ }
+
+ // Return the name to be printed in the base field. Normally this is
+ // the record's name plus the base suffix, but if it is the root node and
+ // the suffix is non-empty, it's just the suffix.
+ std::string baseName(Record &R) {
+ if (&R == &Root && !BaseSuffix.empty())
+ return BaseSuffix;
+
+ return R.getName() + BaseSuffix;
+ }
+
+ std::pair<Record *, Record *> EmitNode (const ChildMap &Tree, raw_ostream& OS,
+ Record *Base);
+public:
+ explicit ClangASTNodesEmitter(RecordKeeper &R, const std::string &N,
+ const std::string &S)
+ : Records(R), Root(N, SMLoc(), R), BaseSuffix(S)
+ {}
+
+ // run - Output the .inc file contents
+ void run(raw_ostream &OS);
+};
+
+/// ClangDeclContextEmitter - Emits an addendum to a .inc file to enumerate the
+/// clang declaration contexts.
+///
+class ClangDeclContextEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangDeclContextEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ // run - Output the .inc file contents
+ void run(raw_ostream &OS);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
new file mode 100644
index 0000000..7951fc4
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.cpp
@@ -0,0 +1,1092 @@
+//===- ClangAttrEmitter.cpp - Generate Clang attribute handling =-*- C++ -*--=//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang attribute processing code
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangAttrEmitter.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/TableGen/Record.h"
+#include <algorithm>
+#include <cctype>
+#include <set>
+
+using namespace llvm;
+
+static const std::vector<StringRef>
+getValueAsListOfStrings(Record &R, StringRef FieldName) {
+ ListInit *List = R.getValueAsListInit(FieldName);
+ assert (List && "Got a null ListInit");
+
+ std::vector<StringRef> Strings;
+ Strings.reserve(List->getSize());
+
+ for (ListInit::const_iterator i = List->begin(), e = List->end();
+ i != e;
+ ++i) {
+ assert(*i && "Got a null element in a ListInit");
+ if (StringInit *S = dynamic_cast<StringInit *>(*i))
+ Strings.push_back(S->getValue());
+ else
+ assert(false && "Got a non-string, non-code element in a ListInit");
+ }
+
+ return Strings;
+}
+
+static std::string ReadPCHRecord(StringRef type) {
+ return StringSwitch<std::string>(type)
+ .EndsWith("Decl *", "GetLocalDeclAs<"
+ + std::string(type, 0, type.size()-1) + ">(F, Record[Idx++])")
+ .Case("QualType", "getLocalType(F, Record[Idx++])")
+ .Case("Expr *", "ReadSubExpr()")
+ .Case("IdentifierInfo *", "GetIdentifierInfo(F, Record, Idx)")
+ .Case("SourceLocation", "ReadSourceLocation(F, Record, Idx)")
+ .Default("Record[Idx++]");
+}
+
+// Assumes that the way to get the value is SA->getname()
+static std::string WritePCHRecord(StringRef type, StringRef name) {
+ return StringSwitch<std::string>(type)
+ .EndsWith("Decl *", "AddDeclRef(" + std::string(name) +
+ ", Record);\n")
+ .Case("QualType", "AddTypeRef(" + std::string(name) + ", Record);\n")
+ .Case("Expr *", "AddStmt(" + std::string(name) + ");\n")
+ .Case("IdentifierInfo *",
+ "AddIdentifierRef(" + std::string(name) + ", Record);\n")
+ .Case("SourceLocation",
+ "AddSourceLocation(" + std::string(name) + ", Record);\n")
+ .Default("Record.push_back(" + std::string(name) + ");\n");
+}
+
+// Normalize attribute name by removing leading and trailing
+// underscores. For example, __foo, foo__, __foo__ would
+// become foo.
+static StringRef NormalizeAttrName(StringRef AttrName) {
+ if (AttrName.startswith("__"))
+ AttrName = AttrName.substr(2, AttrName.size());
+
+ if (AttrName.endswith("__"))
+ AttrName = AttrName.substr(0, AttrName.size() - 2);
+
+ return AttrName;
+}
+
+// Normalize attribute spelling only if the spelling has both leading
+// and trailing underscores. For example, __ms_struct__ will be
+// normalized to "ms_struct"; __cdecl will remain intact.
+static StringRef NormalizeAttrSpelling(StringRef AttrSpelling) {
+ if (AttrSpelling.startswith("__") && AttrSpelling.endswith("__")) {
+ AttrSpelling = AttrSpelling.substr(2, AttrSpelling.size() - 4);
+ }
+
+ return AttrSpelling;
+}
+
+namespace {
+ class Argument {
+ std::string lowerName, upperName;
+ StringRef attrName;
+
+ public:
+ Argument(Record &Arg, StringRef Attr)
+ : lowerName(Arg.getValueAsString("Name")), upperName(lowerName),
+ attrName(Attr) {
+ if (!lowerName.empty()) {
+ lowerName[0] = std::tolower(lowerName[0]);
+ upperName[0] = std::toupper(upperName[0]);
+ }
+ }
+ virtual ~Argument() {}
+
+ StringRef getLowerName() const { return lowerName; }
+ StringRef getUpperName() const { return upperName; }
+ StringRef getAttrName() const { return attrName; }
+
+ // These functions print the argument contents formatted in different ways.
+ virtual void writeAccessors(raw_ostream &OS) const = 0;
+ virtual void writeAccessorDefinitions(raw_ostream &OS) const {}
+ virtual void writeCloneArgs(raw_ostream &OS) const = 0;
+ virtual void writeTemplateInstantiationArgs(raw_ostream &OS) const = 0;
+ virtual void writeTemplateInstantiation(raw_ostream &OS) const {}
+ virtual void writeCtorBody(raw_ostream &OS) const {}
+ virtual void writeCtorInitializers(raw_ostream &OS) const = 0;
+ virtual void writeCtorParameters(raw_ostream &OS) const = 0;
+ virtual void writeDeclarations(raw_ostream &OS) const = 0;
+ virtual void writePCHReadArgs(raw_ostream &OS) const = 0;
+ virtual void writePCHReadDecls(raw_ostream &OS) const = 0;
+ virtual void writePCHWrite(raw_ostream &OS) const = 0;
+ virtual void writeValue(raw_ostream &OS) const = 0;
+ };
+
+ class SimpleArgument : public Argument {
+ std::string type;
+
+ public:
+ SimpleArgument(Record &Arg, StringRef Attr, std::string T)
+ : Argument(Arg, Attr), type(T)
+ {}
+
+ std::string getType() const { return type; }
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " " << type << " get" << getUpperName() << "() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "(" << getUpperName() << ")";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << type << " " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << type << " " << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ std::string read = ReadPCHRecord(type);
+ OS << " " << type << " " << getLowerName() << " = " << read << ";\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " " << WritePCHRecord(type, "SA->get" +
+ std::string(getUpperName()) + "()");
+ }
+ void writeValue(raw_ostream &OS) const {
+ if (type == "FunctionDecl *") {
+ OS << "\" << get" << getUpperName() << "()->getNameInfo().getAsString() << \"";
+ } else if (type == "IdentifierInfo *") {
+ OS << "\" << get" << getUpperName() << "()->getName() << \"";
+ } else if (type == "QualType") {
+ OS << "\" << get" << getUpperName() << "().getAsString() << \"";
+ } else if (type == "SourceLocation") {
+ OS << "\" << get" << getUpperName() << "().getRawEncoding() << \"";
+ } else {
+ OS << "\" << get" << getUpperName() << "() << \"";
+ }
+ }
+ };
+
+ class StringArgument : public Argument {
+ public:
+ StringArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " llvm::StringRef get" << getUpperName() << "() const {\n";
+ OS << " return llvm::StringRef(" << getLowerName() << ", "
+ << getLowerName() << "Length);\n";
+ OS << " }\n";
+ OS << " unsigned get" << getUpperName() << "Length() const {\n";
+ OS << " return " << getLowerName() << "Length;\n";
+ OS << " }\n";
+ OS << " void set" << getUpperName()
+ << "(ASTContext &C, llvm::StringRef S) {\n";
+ OS << " " << getLowerName() << "Length = S.size();\n";
+ OS << " this->" << getLowerName() << " = new (C, 1) char ["
+ << getLowerName() << "Length];\n";
+ OS << " std::memcpy(this->" << getLowerName() << ", S.data(), "
+ << getLowerName() << "Length);\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << "get" << getUpperName() << "()";
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
+ << ".data(), " << getLowerName() << "Length);";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "Length(" << getUpperName() << ".size()),"
+ << getLowerName() << "(new (Ctx, 1) char[" << getLowerName()
+ << "Length])";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << "llvm::StringRef " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << "unsigned " << getLowerName() << "Length;\n";
+ OS << "char *" << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " std::string " << getLowerName()
+ << "= ReadString(Record, Idx);\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " AddString(SA->get" << getUpperName() << "(), Record);\n";
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << "\\\"\" << get" << getUpperName() << "() << \"\\\"";
+ }
+ };
+
+ class AlignedArgument : public Argument {
+ public:
+ AlignedArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " bool is" << getUpperName() << "Dependent() const;\n";
+
+ OS << " unsigned get" << getUpperName() << "(ASTContext &Ctx) const;\n";
+
+ OS << " bool is" << getUpperName() << "Expr() const {\n";
+ OS << " return is" << getLowerName() << "Expr;\n";
+ OS << " }\n";
+
+ OS << " Expr *get" << getUpperName() << "Expr() const {\n";
+ OS << " assert(is" << getLowerName() << "Expr);\n";
+ OS << " return " << getLowerName() << "Expr;\n";
+ OS << " }\n";
+
+ OS << " TypeSourceInfo *get" << getUpperName() << "Type() const {\n";
+ OS << " assert(!is" << getLowerName() << "Expr);\n";
+ OS << " return " << getLowerName() << "Type;\n";
+ OS << " }";
+ }
+ void writeAccessorDefinitions(raw_ostream &OS) const {
+ OS << "bool " << getAttrName() << "Attr::is" << getUpperName()
+ << "Dependent() const {\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " return " << getLowerName() << "Expr && (" << getLowerName()
+ << "Expr->isValueDependent() || " << getLowerName()
+ << "Expr->isTypeDependent());\n";
+ OS << " else\n";
+ OS << " return " << getLowerName()
+ << "Type->getType()->isDependentType();\n";
+ OS << "}\n";
+
+ // FIXME: Do not do the calculation here
+ // FIXME: Handle types correctly
+ // A null pointer means maximum alignment
+ // FIXME: Load the platform-specific maximum alignment, rather than
+ // 16, the x86 max.
+ OS << "unsigned " << getAttrName() << "Attr::get" << getUpperName()
+ << "(ASTContext &Ctx) const {\n";
+ OS << " assert(!is" << getUpperName() << "Dependent());\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " return (" << getLowerName() << "Expr ? " << getLowerName()
+ << "Expr->EvaluateKnownConstInt(Ctx).getZExtValue() : 16)"
+ << "* Ctx.getCharWidth();\n";
+ OS << " else\n";
+ OS << " return 0; // FIXME\n";
+ OS << "}\n";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr, is" << getLowerName()
+ << "Expr ? static_cast<void*>(" << getLowerName()
+ << "Expr) : " << getLowerName()
+ << "Type";
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ // FIXME: move the definition in Sema::InstantiateAttrs to here.
+ // In the meantime, aligned attributes are cloned.
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " " << getLowerName() << "Expr = reinterpret_cast<Expr *>("
+ << getUpperName() << ");\n";
+ OS << " else\n";
+ OS << " " << getLowerName()
+ << "Type = reinterpret_cast<TypeSourceInfo *>(" << getUpperName()
+ << ");";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr(Is" << getUpperName() << "Expr)";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << "bool Is" << getUpperName() << "Expr, void *" << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << "bool is" << getLowerName() << "Expr;\n";
+ OS << "union {\n";
+ OS << "Expr *" << getLowerName() << "Expr;\n";
+ OS << "TypeSourceInfo *" << getLowerName() << "Type;\n";
+ OS << "};";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << "is" << getLowerName() << "Expr, " << getLowerName() << "Ptr";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " bool is" << getLowerName() << "Expr = Record[Idx++];\n";
+ OS << " void *" << getLowerName() << "Ptr;\n";
+ OS << " if (is" << getLowerName() << "Expr)\n";
+ OS << " " << getLowerName() << "Ptr = ReadExpr(F);\n";
+ OS << " else\n";
+ OS << " " << getLowerName()
+ << "Ptr = GetTypeSourceInfo(F, Record, Idx);\n";
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " Record.push_back(SA->is" << getUpperName() << "Expr());\n";
+ OS << " if (SA->is" << getUpperName() << "Expr())\n";
+ OS << " AddStmt(SA->get" << getUpperName() << "Expr());\n";
+ OS << " else\n";
+ OS << " AddTypeSourceInfo(SA->get" << getUpperName()
+ << "Type(), Record);\n";
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << "\" << get" << getUpperName() << "(Ctx) << \"";
+ }
+ };
+
+ class VariadicArgument : public Argument {
+ std::string type;
+
+ public:
+ VariadicArgument(Record &Arg, StringRef Attr, std::string T)
+ : Argument(Arg, Attr), type(T)
+ {}
+
+ std::string getType() const { return type; }
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " typedef " << type << "* " << getLowerName() << "_iterator;\n";
+ OS << " " << getLowerName() << "_iterator " << getLowerName()
+ << "_begin() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }\n";
+ OS << " " << getLowerName() << "_iterator " << getLowerName()
+ << "_end() const {\n";
+ OS << " return " << getLowerName() << " + " << getLowerName()
+ << "Size;\n";
+ OS << " }\n";
+ OS << " unsigned " << getLowerName() << "_size() const {\n"
+ << " return " << getLowerName() << "Size;\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName() << ", " << getLowerName() << "Size";
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ // This isn't elegant, but we have to go through public methods...
+ OS << "A->" << getLowerName() << "_begin(), "
+ << "A->" << getLowerName() << "_size()";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ // FIXME: memcpy is not safe on non-trivial types.
+ OS << " std::memcpy(" << getLowerName() << ", " << getUpperName()
+ << ", " << getLowerName() << "Size * sizeof(" << getType() << "));\n";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "Size(" << getUpperName() << "Size), "
+ << getLowerName() << "(new (Ctx, 16) " << getType() << "["
+ << getLowerName() << "Size])";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << getType() << " *" << getUpperName() << ", unsigned "
+ << getUpperName() << "Size";
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << " unsigned " << getLowerName() << "Size;\n";
+ OS << " " << getType() << " *" << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " unsigned " << getLowerName() << "Size = Record[Idx++];\n";
+ OS << " llvm::SmallVector<" << type << ", 4> " << getLowerName()
+ << ";\n";
+ OS << " " << getLowerName() << ".reserve(" << getLowerName()
+ << "Size);\n";
+ OS << " for (unsigned i = " << getLowerName() << "Size; i; --i)\n";
+
+ std::string read = ReadPCHRecord(type);
+ OS << " " << getLowerName() << ".push_back(" << read << ");\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName() << ".data(), " << getLowerName() << "Size";
+ }
+ void writePCHWrite(raw_ostream &OS) const{
+ OS << " Record.push_back(SA->" << getLowerName() << "_size());\n";
+ OS << " for (" << getAttrName() << "Attr::" << getLowerName()
+ << "_iterator i = SA->" << getLowerName() << "_begin(), e = SA->"
+ << getLowerName() << "_end(); i != e; ++i)\n";
+ OS << " " << WritePCHRecord(type, "(*i)");
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << "\";\n";
+ OS << " bool isFirst = true;\n"
+ << " for (" << getAttrName() << "Attr::" << getLowerName()
+ << "_iterator i = " << getLowerName() << "_begin(), e = "
+ << getLowerName() << "_end(); i != e; ++i) {\n"
+ << " if (isFirst) isFirst = false;\n"
+ << " else OS << \", \";\n"
+ << " OS << *i;\n"
+ << " }\n";
+ OS << " OS << \"";
+ }
+ };
+
+ class EnumArgument : public Argument {
+ std::string type;
+ std::vector<StringRef> values, enums;
+ public:
+ EnumArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr), type(Arg.getValueAsString("Type")),
+ values(getValueAsListOfStrings(Arg, "Values")),
+ enums(getValueAsListOfStrings(Arg, "Enums"))
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " " << type << " get" << getUpperName() << "() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "(" << getUpperName() << ")";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << type << " " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ // Calculate the various enum values
+ std::vector<StringRef> uniques(enums);
+ std::sort(uniques.begin(), uniques.end());
+ uniques.erase(std::unique(uniques.begin(), uniques.end()),
+ uniques.end());
+ // FIXME: Emit a proper error
+ assert(!uniques.empty());
+
+ std::vector<StringRef>::iterator i = uniques.begin(),
+ e = uniques.end();
+ // The last one needs to not have a comma.
+ --e;
+
+ OS << "public:\n";
+ OS << " enum " << type << " {\n";
+ for (; i != e; ++i)
+ OS << " " << *i << ",\n";
+ OS << " " << *e << "\n";
+ OS << " };\n";
+ OS << "private:\n";
+ OS << " " << type << " " << getLowerName() << ";";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " " << getAttrName() << "Attr::" << type << " " << getLowerName()
+ << "(static_cast<" << getAttrName() << "Attr::" << type
+ << ">(Record[Idx++]));\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << "Record.push_back(SA->get" << getUpperName() << "());\n";
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << "\" << get" << getUpperName() << "() << \"";
+ }
+ };
+
+ class VersionArgument : public Argument {
+ public:
+ VersionArgument(Record &Arg, StringRef Attr)
+ : Argument(Arg, Attr)
+ {}
+
+ void writeAccessors(raw_ostream &OS) const {
+ OS << " VersionTuple get" << getUpperName() << "() const {\n";
+ OS << " return " << getLowerName() << ";\n";
+ OS << " }\n";
+ OS << " void set" << getUpperName()
+ << "(ASTContext &C, VersionTuple V) {\n";
+ OS << " " << getLowerName() << " = V;\n";
+ OS << " }";
+ }
+ void writeCloneArgs(raw_ostream &OS) const {
+ OS << "get" << getUpperName() << "()";
+ }
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "A->get" << getUpperName() << "()";
+ }
+ void writeCtorBody(raw_ostream &OS) const {
+ }
+ void writeCtorInitializers(raw_ostream &OS) const {
+ OS << getLowerName() << "(" << getUpperName() << ")";
+ }
+ void writeCtorParameters(raw_ostream &OS) const {
+ OS << "VersionTuple " << getUpperName();
+ }
+ void writeDeclarations(raw_ostream &OS) const {
+ OS << "VersionTuple " << getLowerName() << ";\n";
+ }
+ void writePCHReadDecls(raw_ostream &OS) const {
+ OS << " VersionTuple " << getLowerName()
+ << "= ReadVersionTuple(Record, Idx);\n";
+ }
+ void writePCHReadArgs(raw_ostream &OS) const {
+ OS << getLowerName();
+ }
+ void writePCHWrite(raw_ostream &OS) const {
+ OS << " AddVersionTuple(SA->get" << getUpperName() << "(), Record);\n";
+ }
+ void writeValue(raw_ostream &OS) const {
+ OS << getLowerName() << "=\" << get" << getUpperName() << "() << \"";
+ }
+ };
+
+ class ExprArgument : public SimpleArgument {
+ public:
+ ExprArgument(Record &Arg, StringRef Attr)
+ : SimpleArgument(Arg, Attr, "Expr *")
+ {}
+
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "tempInst" << getUpperName();
+ }
+
+ void writeTemplateInstantiation(raw_ostream &OS) const {
+ OS << " " << getType() << " tempInst" << getUpperName() << ";\n";
+ OS << " {\n";
+ OS << " EnterExpressionEvaluationContext "
+ << "Unevaluated(S, Sema::Unevaluated);\n";
+ OS << " ExprResult " << "Result = S.SubstExpr("
+ << "A->get" << getUpperName() << "(), TemplateArgs);\n";
+ OS << " tempInst" << getUpperName() << " = "
+ << "Result.takeAs<Expr>();\n";
+ OS << " }\n";
+ }
+ };
+
+ class VariadicExprArgument : public VariadicArgument {
+ public:
+ VariadicExprArgument(Record &Arg, StringRef Attr)
+ : VariadicArgument(Arg, Attr, "Expr *")
+ {}
+
+ void writeTemplateInstantiationArgs(raw_ostream &OS) const {
+ OS << "tempInst" << getUpperName() << ", "
+ << "A->" << getLowerName() << "_size()";
+ }
+
+ void writeTemplateInstantiation(raw_ostream &OS) const {
+ OS << " " << getType() << " *tempInst" << getUpperName()
+ << " = new (C, 16) " << getType()
+ << "[A->" << getLowerName() << "_size()];\n";
+ OS << " {\n";
+ OS << " EnterExpressionEvaluationContext "
+ << "Unevaluated(S, Sema::Unevaluated);\n";
+ OS << " " << getType() << " *TI = tempInst" << getUpperName()
+ << ";\n";
+ OS << " " << getType() << " *I = A->" << getLowerName()
+ << "_begin();\n";
+ OS << " " << getType() << " *E = A->" << getLowerName()
+ << "_end();\n";
+ OS << " for (; I != E; ++I, ++TI) {\n";
+ OS << " ExprResult Result = S.SubstExpr(*I, TemplateArgs);\n";
+ OS << " *TI = Result.takeAs<Expr>();\n";
+ OS << " }\n";
+ OS << " }\n";
+ }
+ };
+}
+
+static Argument *createArgument(Record &Arg, StringRef Attr,
+ Record *Search = 0) {
+ if (!Search)
+ Search = &Arg;
+
+ Argument *Ptr = 0;
+ llvm::StringRef ArgName = Search->getName();
+
+ if (ArgName == "AlignedArgument") Ptr = new AlignedArgument(Arg, Attr);
+ else if (ArgName == "EnumArgument") Ptr = new EnumArgument(Arg, Attr);
+ else if (ArgName == "ExprArgument") Ptr = new ExprArgument(Arg, Attr);
+ else if (ArgName == "FunctionArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "FunctionDecl *");
+ else if (ArgName == "IdentifierArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "IdentifierInfo *");
+ else if (ArgName == "BoolArgument") Ptr = new SimpleArgument(Arg, Attr,
+ "bool");
+ else if (ArgName == "IntArgument") Ptr = new SimpleArgument(Arg, Attr, "int");
+ else if (ArgName == "StringArgument") Ptr = new StringArgument(Arg, Attr);
+ else if (ArgName == "TypeArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "QualType");
+ else if (ArgName == "UnsignedArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "unsigned");
+ else if (ArgName == "SourceLocArgument")
+ Ptr = new SimpleArgument(Arg, Attr, "SourceLocation");
+ else if (ArgName == "VariadicUnsignedArgument")
+ Ptr = new VariadicArgument(Arg, Attr, "unsigned");
+ else if (ArgName == "VariadicExprArgument")
+ Ptr = new VariadicExprArgument(Arg, Attr);
+ else if (ArgName == "VersionArgument")
+ Ptr = new VersionArgument(Arg, Attr);
+
+ if (!Ptr) {
+ std::vector<Record*> Bases = Search->getSuperClasses();
+ for (std::vector<Record*>::iterator i = Bases.begin(), e = Bases.end();
+ i != e; ++i) {
+ Ptr = createArgument(Arg, Attr, *i);
+ if (Ptr)
+ break;
+ }
+ }
+ return Ptr;
+}
+
+static void writeAvailabilityValue(raw_ostream &OS) {
+ OS << "\" << getPlatform()->getName();\n"
+ << " if (!getIntroduced().empty()) OS << \", introduced=\" << getIntroduced();\n"
+ << " if (!getDeprecated().empty()) OS << \", deprecated=\" << getDeprecated();\n"
+ << " if (!getObsoleted().empty()) OS << \", obsoleted=\" << getObsoleted();\n"
+ << " if (getUnavailable()) OS << \", unavailable\";\n"
+ << " OS << \"";
+}
+
+void ClangAttrClassEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+ OS << "#ifndef LLVM_CLANG_ATTR_CLASSES_INC\n";
+ OS << "#define LLVM_CLANG_ATTR_CLASSES_INC\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+ i != e; ++i) {
+ Record &R = **i;
+ const std::string &SuperName = R.getSuperClasses().back()->getName();
+
+ OS << "class " << R.getName() << "Attr : public " << SuperName << " {\n";
+
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ai, ae;
+ Args.reserve(ArgRecords.size());
+
+ for (std::vector<Record*>::iterator ri = ArgRecords.begin(),
+ re = ArgRecords.end();
+ ri != re; ++ri) {
+ Record &ArgRecord = **ri;
+ Argument *Arg = createArgument(ArgRecord, R.getName());
+ assert(Arg);
+ Args.push_back(Arg);
+
+ Arg->writeDeclarations(OS);
+ OS << "\n\n";
+ }
+
+ ae = Args.end();
+
+ OS << "\n public:\n";
+ OS << " " << R.getName() << "Attr(SourceRange R, ASTContext &Ctx\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << " , ";
+ (*ai)->writeCtorParameters(OS);
+ OS << "\n";
+ }
+
+ OS << " )\n";
+ OS << " : " << SuperName << "(attr::" << R.getName() << ", R)\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << " , ";
+ (*ai)->writeCtorInitializers(OS);
+ OS << "\n";
+ }
+
+ OS << " {\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeCtorBody(OS);
+ OS << "\n";
+ }
+ OS << " }\n\n";
+
+ OS << " virtual " << R.getName() << "Attr *clone (ASTContext &C) const;\n";
+ OS << " virtual void printPretty(llvm::raw_ostream &OS, ASTContext &Ctx) const;\n";
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeAccessors(OS);
+ OS << "\n\n";
+ }
+
+ OS << R.getValueAsString("AdditionalMembers");
+ OS << "\n\n";
+
+ OS << " static bool classof(const Attr *A) { return A->getKind() == "
+ << "attr::" << R.getName() << "; }\n";
+ OS << " static bool classof(const " << R.getName()
+ << "Attr *) { return true; }\n";
+
+ bool LateParsed = R.getValueAsBit("LateParsed");
+ OS << " virtual bool isLateParsed() const { return "
+ << LateParsed << "; }\n";
+
+ OS << "};\n\n";
+ }
+
+ OS << "#endif\n";
+}
+
+void ClangAttrImplEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ri, re;
+ std::vector<Argument*>::iterator ai, ae;
+
+ for (; i != e; ++i) {
+ Record &R = **i;
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<StringRef> Spellings = getValueAsListOfStrings(R, "Spellings");
+ std::vector<Argument*> Args;
+ for (ri = ArgRecords.begin(), re = ArgRecords.end(); ri != re; ++ri)
+ Args.push_back(createArgument(**ri, R.getName()));
+
+ for (ai = Args.begin(), ae = Args.end(); ai != ae; ++ai)
+ (*ai)->writeAccessorDefinitions(OS);
+
+ OS << R.getName() << "Attr *" << R.getName()
+ << "Attr::clone(ASTContext &C) const {\n";
+ OS << " return new (C) " << R.getName() << "Attr(getLocation(), C";
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << ", ";
+ (*ai)->writeCloneArgs(OS);
+ }
+ OS << ");\n}\n\n";
+
+ OS << "void " << R.getName() << "Attr::printPretty("
+ << "llvm::raw_ostream &OS, ASTContext &Ctx) const {\n";
+ if (Spellings.begin() != Spellings.end()) {
+ OS << " OS << \" __attribute__((" << *Spellings.begin();
+ if (Args.size()) OS << "(";
+ if (*Spellings.begin()=="availability") {
+ writeAvailabilityValue(OS);
+ } else {
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ if (ai!=Args.begin()) OS <<", ";
+ (*ai)->writeValue(OS);
+ }
+ }
+ if (Args.size()) OS << ")";
+ OS << "))\";\n";
+ }
+ OS << "}\n\n";
+ }
+}
+
+static void EmitAttrList(raw_ostream &OS, StringRef Class,
+ const std::vector<Record*> &AttrList) {
+ std::vector<Record*>::const_iterator i = AttrList.begin(), e = AttrList.end();
+
+ if (i != e) {
+ // Move the end iterator back to emit the last attribute.
+ for(--e; i != e; ++i)
+ OS << Class << "(" << (*i)->getName() << ")\n";
+
+ OS << "LAST_" << Class << "(" << (*i)->getName() << ")\n\n";
+ }
+}
+
+void ClangAttrListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "#ifndef LAST_ATTR\n";
+ OS << "#define LAST_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef INHERITABLE_ATTR\n";
+ OS << "#define INHERITABLE_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef LAST_INHERITABLE_ATTR\n";
+ OS << "#define LAST_INHERITABLE_ATTR(NAME) INHERITABLE_ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef INHERITABLE_PARAM_ATTR\n";
+ OS << "#define INHERITABLE_PARAM_ATTR(NAME) ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ OS << "#ifndef LAST_INHERITABLE_PARAM_ATTR\n";
+ OS << "#define LAST_INHERITABLE_PARAM_ATTR(NAME)"
+ " INHERITABLE_PARAM_ATTR(NAME)\n";
+ OS << "#endif\n\n";
+
+ Record *InhClass = Records.getClass("InheritableAttr");
+ Record *InhParamClass = Records.getClass("InheritableParamAttr");
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"),
+ NonInhAttrs, InhAttrs, InhParamAttrs;
+ for (std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end();
+ i != e; ++i) {
+ if ((*i)->isSubClassOf(InhParamClass))
+ InhParamAttrs.push_back(*i);
+ else if ((*i)->isSubClassOf(InhClass))
+ InhAttrs.push_back(*i);
+ else
+ NonInhAttrs.push_back(*i);
+ }
+
+ EmitAttrList(OS, "INHERITABLE_PARAM_ATTR", InhParamAttrs);
+ EmitAttrList(OS, "INHERITABLE_ATTR", InhAttrs);
+ EmitAttrList(OS, "ATTR", NonInhAttrs);
+
+ OS << "#undef LAST_ATTR\n";
+ OS << "#undef INHERITABLE_ATTR\n";
+ OS << "#undef LAST_INHERITABLE_ATTR\n";
+ OS << "#undef LAST_INHERITABLE_PARAM_ATTR\n";
+ OS << "#undef ATTR\n";
+}
+
+void ClangAttrPCHReadEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ Record *InhClass = Records.getClass("InheritableAttr");
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"),
+ ArgRecords;
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ri, re;
+
+ OS << " switch (Kind) {\n";
+ OS << " default:\n";
+ OS << " assert(0 && \"Unknown attribute!\");\n";
+ OS << " break;\n";
+ for (; i != e; ++i) {
+ Record &R = **i;
+ OS << " case attr::" << R.getName() << ": {\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " bool isInherited = Record[Idx++];\n";
+ ArgRecords = R.getValueAsListOfDefs("Args");
+ Args.clear();
+ for (ai = ArgRecords.begin(), ae = ArgRecords.end(); ai != ae; ++ai) {
+ Argument *A = createArgument(**ai, R.getName());
+ Args.push_back(A);
+ A->writePCHReadDecls(OS);
+ }
+ OS << " New = new (Context) " << R.getName() << "Attr(Range, Context";
+ for (ri = Args.begin(), re = Args.end(); ri != re; ++ri) {
+ OS << ", ";
+ (*ri)->writePCHReadArgs(OS);
+ }
+ OS << ");\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " cast<InheritableAttr>(New)->setInherited(isInherited);\n";
+ OS << " break;\n";
+ OS << " }\n";
+ }
+ OS << " }\n";
+}
+
+void ClangAttrPCHWriteEmitter::run(raw_ostream &OS) {
+ Record *InhClass = Records.getClass("InheritableAttr");
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr"), Args;
+ std::vector<Record*>::iterator i = Attrs.begin(), e = Attrs.end(), ai, ae;
+
+ OS << " switch (A->getKind()) {\n";
+ OS << " default:\n";
+ OS << " llvm_unreachable(\"Unknown attribute kind!\");\n";
+ OS << " break;\n";
+ for (; i != e; ++i) {
+ Record &R = **i;
+ OS << " case attr::" << R.getName() << ": {\n";
+ Args = R.getValueAsListOfDefs("Args");
+ if (R.isSubClassOf(InhClass) || !Args.empty())
+ OS << " const " << R.getName() << "Attr *SA = cast<" << R.getName()
+ << "Attr>(A);\n";
+ if (R.isSubClassOf(InhClass))
+ OS << " Record.push_back(SA->isInherited());\n";
+ for (ai = Args.begin(), ae = Args.end(); ai != ae; ++ai)
+ createArgument(**ai, R.getName())->writePCHWrite(OS);
+ OS << " break;\n";
+ OS << " }\n";
+ }
+ OS << " }\n";
+}
+
+void ClangAttrSpellingListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end(); I != E; ++I) {
+ Record &Attr = **I;
+
+ std::vector<StringRef> Spellings = getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(), E = Spellings.end(); I != E; ++I) {
+ StringRef Spelling = *I;
+ OS << ".Case(\"" << Spelling << "\", true)\n";
+ }
+ }
+
+}
+
+void ClangAttrLateParsedListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &Attr = **I;
+
+ bool LateParsed = Attr.getValueAsBit("LateParsed");
+
+ if (LateParsed) {
+ std::vector<StringRef> Spellings =
+ getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ OS << ".Case(\"" << (*I) << "\", " << LateParsed << ")\n";
+ }
+ }
+ }
+}
+
+
+void ClangAttrTemplateInstantiateEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ OS << "namespace clang {\n"
+ << "namespace sema {\n\n"
+ << "Attr *instantiateTemplateAttribute(const Attr *At, ASTContext &C, "
+ << "Sema &S,\n"
+ << " const MultiLevelTemplateArgumentList &TemplateArgs) {\n"
+ << " switch (At->getKind()) {\n"
+ << " default:\n"
+ << " break;\n";
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &R = **I;
+
+ OS << " case attr::" << R.getName() << ": {\n";
+ OS << " const " << R.getName() << "Attr *A = cast<"
+ << R.getName() << "Attr>(At);\n";
+ bool TDependent = R.getValueAsBit("TemplateDependent");
+
+ if (!TDependent) {
+ OS << " return A->clone(C);\n";
+ OS << " }\n";
+ continue;
+ }
+
+ std::vector<Record*> ArgRecords = R.getValueAsListOfDefs("Args");
+ std::vector<Argument*> Args;
+ std::vector<Argument*>::iterator ai, ae;
+ Args.reserve(ArgRecords.size());
+
+ for (std::vector<Record*>::iterator ri = ArgRecords.begin(),
+ re = ArgRecords.end();
+ ri != re; ++ri) {
+ Record &ArgRecord = **ri;
+ Argument *Arg = createArgument(ArgRecord, R.getName());
+ assert(Arg);
+ Args.push_back(Arg);
+ }
+ ae = Args.end();
+
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ (*ai)->writeTemplateInstantiation(OS);
+ }
+ OS << " return new (C) " << R.getName() << "Attr(A->getLocation(), C";
+ for (ai = Args.begin(); ai != ae; ++ai) {
+ OS << ", ";
+ (*ai)->writeTemplateInstantiationArgs(OS);
+ }
+ OS << ");\n }\n";
+ }
+ OS << " } // end switch\n"
+ << " llvm_unreachable(\"Unknown attribute!\");\n"
+ << " return 0;\n"
+ << "}\n\n"
+ << "} // end namespace sema\n"
+ << "} // end namespace clang\n";
+}
+
+void ClangAttrParsedAttrListEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ OS << "#ifndef PARSED_ATTR\n";
+ OS << "#define PARSED_ATTR(NAME) NAME\n";
+ OS << "#endif\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+ std::set<StringRef> ProcessedAttrs;
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &Attr = **I;
+
+ bool SemaHandler = Attr.getValueAsBit("SemaHandler");
+
+ if (SemaHandler) {
+ std::vector<StringRef> Spellings =
+ getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ StringRef AttrName = *I;
+
+ AttrName = NormalizeAttrName(AttrName);
+ // skip if a normalized version has been processed.
+ if (ProcessedAttrs.find(AttrName) != ProcessedAttrs.end())
+ continue;
+ else
+ ProcessedAttrs.insert(AttrName);
+
+ OS << "PARSED_ATTR(" << AttrName << ")\n";
+ }
+ }
+ }
+}
+
+void ClangAttrParsedAttrKindsEmitter::run(raw_ostream &OS) {
+ OS << "// This file is generated by TableGen. Do not edit.\n\n";
+
+ std::vector<Record*> Attrs = Records.getAllDerivedDefinitions("Attr");
+
+ for (std::vector<Record*>::iterator I = Attrs.begin(), E = Attrs.end();
+ I != E; ++I) {
+ Record &Attr = **I;
+
+ bool SemaHandler = Attr.getValueAsBit("SemaHandler");
+
+ if (SemaHandler) {
+ std::vector<StringRef> Spellings =
+ getValueAsListOfStrings(Attr, "Spellings");
+
+ for (std::vector<StringRef>::const_iterator I = Spellings.begin(),
+ E = Spellings.end(); I != E; ++I) {
+ StringRef AttrName = *I, Spelling = *I;
+
+ AttrName = NormalizeAttrName(AttrName);
+ Spelling = NormalizeAttrSpelling(Spelling);
+
+ OS << ".Case(\"" << Spelling << "\", " << "AT_" << AttrName << ")\n";
+ }
+ }
+ }
+}
+
+
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h
new file mode 100644
index 0000000..d119a09
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangAttrEmitter.h
@@ -0,0 +1,153 @@
+//===- ClangAttrEmitter.h - Generate Clang attribute handling =-*- C++ -*--===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang attribute processing code
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGATTR_EMITTER_H
+#define CLANGATTR_EMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+
+namespace llvm {
+
+/// ClangAttrClassEmitter - class emits the class defintions for attributes for
+/// clang.
+class ClangAttrClassEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrClassEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrImplEmitter - class emits the class method defintions for
+/// attributes for clang.
+class ClangAttrImplEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrImplEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrListEmitter - class emits the enumeration list for attributes for
+/// clang.
+class ClangAttrListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrPCHReadEmitter - class emits the code to read an attribute from
+/// a clang precompiled header.
+class ClangAttrPCHReadEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrPCHReadEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrPCHWriteEmitter - class emits the code to read an attribute from
+/// a clang precompiled header.
+class ClangAttrPCHWriteEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrPCHWriteEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrSpellingListEmitter - class emits the list of spellings for attributes for
+/// clang.
+class ClangAttrSpellingListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrSpellingListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrLateParsedListEmitter emits the LateParsed property for attributes
+/// for clang.
+class ClangAttrLateParsedListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrLateParsedListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrTemplateInstantiateEmitter emits code to instantiate dependent
+/// attributes on templates.
+class ClangAttrTemplateInstantiateEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+ public:
+ explicit ClangAttrTemplateInstantiateEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrParsedAttrListEmitter emits the list of parsed attributes
+/// for clang.
+class ClangAttrParsedAttrListEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrParsedAttrListEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+/// ClangAttrParsedAttrKindsEmitter emits the kind list of parsed attributes
+/// for clang.
+class ClangAttrParsedAttrKindsEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+
+public:
+ explicit ClangAttrParsedAttrKindsEmitter(RecordKeeper &R)
+ : Records(R)
+ {}
+
+ void run(raw_ostream &OS);
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
new file mode 100644
index 0000000..8a49619
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -0,0 +1,385 @@
+//=- ClangDiagnosticsEmitter.cpp - Generate Clang diagnostics tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang diagnostics tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangDiagnosticsEmitter.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Compiler.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/SmallString.h"
+#include <map>
+#include <algorithm>
+#include <functional>
+#include <set>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Diagnostic category computation code.
+//===----------------------------------------------------------------------===//
+
+namespace {
+class DiagGroupParentMap {
+ RecordKeeper &Records;
+ std::map<const Record*, std::vector<Record*> > Mapping;
+public:
+ DiagGroupParentMap(RecordKeeper &records) : Records(records) {
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+ for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
+ std::vector<Record*> SubGroups =
+ DiagGroups[i]->getValueAsListOfDefs("SubGroups");
+ for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
+ Mapping[SubGroups[j]].push_back(DiagGroups[i]);
+ }
+ }
+
+ const std::vector<Record*> &getParents(const Record *Group) {
+ return Mapping[Group];
+ }
+};
+} // end anonymous namespace.
+
+static std::string
+getCategoryFromDiagGroup(const Record *Group,
+ DiagGroupParentMap &DiagGroupParents) {
+ // If the DiagGroup has a category, return it.
+ std::string CatName = Group->getValueAsString("CategoryName");
+ if (!CatName.empty()) return CatName;
+
+ // The diag group may the subgroup of one or more other diagnostic groups,
+ // check these for a category as well.
+ const std::vector<Record*> &Parents = DiagGroupParents.getParents(Group);
+ for (unsigned i = 0, e = Parents.size(); i != e; ++i) {
+ CatName = getCategoryFromDiagGroup(Parents[i], DiagGroupParents);
+ if (!CatName.empty()) return CatName;
+ }
+ return "";
+}
+
+/// getDiagnosticCategory - Return the category that the specified diagnostic
+/// lives in.
+static std::string getDiagnosticCategory(const Record *R,
+ DiagGroupParentMap &DiagGroupParents) {
+ // If the diagnostic is in a group, and that group has a category, use it.
+ if (DefInit *Group = dynamic_cast<DefInit*>(R->getValueInit("Group"))) {
+ // Check the diagnostic's diag group for a category.
+ std::string CatName = getCategoryFromDiagGroup(Group->getDef(),
+ DiagGroupParents);
+ if (!CatName.empty()) return CatName;
+ }
+
+ // If the diagnostic itself has a category, get it.
+ return R->getValueAsString("CategoryName");
+}
+
+namespace {
+ class DiagCategoryIDMap {
+ RecordKeeper &Records;
+ StringMap<unsigned> CategoryIDs;
+ std::vector<std::string> CategoryStrings;
+ public:
+ DiagCategoryIDMap(RecordKeeper &records) : Records(records) {
+ DiagGroupParentMap ParentInfo(Records);
+
+ // The zero'th category is "".
+ CategoryStrings.push_back("");
+ CategoryIDs[""] = 0;
+
+ std::vector<Record*> Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ std::string Category = getDiagnosticCategory(Diags[i], ParentInfo);
+ if (Category.empty()) continue; // Skip diags with no category.
+
+ unsigned &ID = CategoryIDs[Category];
+ if (ID != 0) continue; // Already seen.
+
+ ID = CategoryStrings.size();
+ CategoryStrings.push_back(Category);
+ }
+ }
+
+ unsigned getID(StringRef CategoryString) {
+ return CategoryIDs[CategoryString];
+ }
+
+ typedef std::vector<std::string>::iterator iterator;
+ iterator begin() { return CategoryStrings.begin(); }
+ iterator end() { return CategoryStrings.end(); }
+ };
+
+ struct GroupInfo {
+ std::vector<const Record*> DiagsInGroup;
+ std::vector<std::string> SubGroups;
+ unsigned IDNo;
+ };
+} // end anonymous namespace.
+
+/// \brief Invert the 1-[0/1] mapping of diags to group into a one to many
+/// mapping of groups to diags in the group.
+static void groupDiagnostics(const std::vector<Record*> &Diags,
+ const std::vector<Record*> &DiagGroups,
+ std::map<std::string, GroupInfo> &DiagsInGroup) {
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ const Record *R = Diags[i];
+ DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group"));
+ if (DI == 0) continue;
+ std::string GroupName = DI->getDef()->getValueAsString("GroupName");
+ DiagsInGroup[GroupName].DiagsInGroup.push_back(R);
+ }
+
+ // Add all DiagGroup's to the DiagsInGroup list to make sure we pick up empty
+ // groups (these are warnings that GCC supports that clang never produces).
+ for (unsigned i = 0, e = DiagGroups.size(); i != e; ++i) {
+ Record *Group = DiagGroups[i];
+ GroupInfo &GI = DiagsInGroup[Group->getValueAsString("GroupName")];
+
+ std::vector<Record*> SubGroups = Group->getValueAsListOfDefs("SubGroups");
+ for (unsigned j = 0, e = SubGroups.size(); j != e; ++j)
+ GI.SubGroups.push_back(SubGroups[j]->getValueAsString("GroupName"));
+ }
+
+ // Assign unique ID numbers to the groups.
+ unsigned IDNo = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I, ++IDNo)
+ I->second.IDNo = IDNo;
+}
+
+//===----------------------------------------------------------------------===//
+// Warning Tables (.inc file) generation.
+//===----------------------------------------------------------------------===//
+
+void ClangDiagsDefsEmitter::run(raw_ostream &OS) {
+ // Write the #if guard
+ if (!Component.empty()) {
+ std::string ComponentName = StringRef(Component).upper();
+ OS << "#ifdef " << ComponentName << "START\n";
+ OS << "__" << ComponentName << "START = DIAG_START_" << ComponentName
+ << ",\n";
+ OS << "#undef " << ComponentName << "START\n";
+ OS << "#endif\n\n";
+ }
+
+ const std::vector<Record*> &Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+
+ std::map<std::string, GroupInfo> DiagsInGroup;
+ groupDiagnostics(Diags, DiagGroups, DiagsInGroup);
+
+ DiagCategoryIDMap CategoryIDs(Records);
+ DiagGroupParentMap DGParentMap(Records);
+
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ const Record &R = *Diags[i];
+ // Filter by component.
+ if (!Component.empty() && Component != R.getValueAsString("Component"))
+ continue;
+
+ OS << "DIAG(" << R.getName() << ", ";
+ OS << R.getValueAsDef("Class")->getName();
+ OS << ", diag::" << R.getValueAsDef("DefaultMapping")->getName();
+
+ // Description string.
+ OS << ", \"";
+ OS.write_escaped(R.getValueAsString("Text")) << '"';
+
+ // Warning associated with the diagnostic. This is stored as an index into
+ // the alphabetically sorted warning table.
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group"))) {
+ std::map<std::string, GroupInfo>::iterator I =
+ DiagsInGroup.find(DI->getDef()->getValueAsString("GroupName"));
+ assert(I != DiagsInGroup.end());
+ OS << ", " << I->second.IDNo;
+ } else {
+ OS << ", 0";
+ }
+
+ // SFINAE bit
+ if (R.getValueAsBit("SFINAE"))
+ OS << ", true";
+ else
+ OS << ", false";
+
+ // Access control bit
+ if (R.getValueAsBit("AccessControl"))
+ OS << ", true";
+ else
+ OS << ", false";
+
+ // FIXME: This condition is just to avoid temporary revlock, it can be
+ // removed.
+ if (R.getValue("WarningNoWerror")) {
+ // Default warning has no Werror bit.
+ if (R.getValueAsBit("WarningNoWerror"))
+ OS << ", true";
+ else
+ OS << ", false";
+
+ // Default warning show in system header bit.
+ if (R.getValueAsBit("WarningShowInSystemHeader"))
+ OS << ", true";
+ else
+ OS << ", false";
+ }
+
+ // Category number.
+ OS << ", " << CategoryIDs.getID(getDiagnosticCategory(&R, DGParentMap));
+ OS << ")\n";
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Warning Group Tables generation
+//===----------------------------------------------------------------------===//
+
+static std::string getDiagCategoryEnum(llvm::StringRef name) {
+ if (name.empty())
+ return "DiagCat_None";
+ SmallString<256> enumName = llvm::StringRef("DiagCat_");
+ for (llvm::StringRef::iterator I = name.begin(), E = name.end(); I != E; ++I)
+ enumName += isalnum(*I) ? *I : '_';
+ return enumName.str();
+}
+
+void ClangDiagGroupsEmitter::run(raw_ostream &OS) {
+ // Compute a mapping from a DiagGroup to all of its parents.
+ DiagGroupParentMap DGParentMap(Records);
+
+ std::vector<Record*> Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+
+ std::vector<Record*> DiagGroups
+ = Records.getAllDerivedDefinitions("DiagGroup");
+
+ std::map<std::string, GroupInfo> DiagsInGroup;
+ groupDiagnostics(Diags, DiagGroups, DiagsInGroup);
+
+ // Walk through the groups emitting an array for each diagnostic of the diags
+ // that are mapped to.
+ OS << "\n#ifdef GET_DIAG_ARRAYS\n";
+ unsigned MaxLen = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I) {
+ MaxLen = std::max(MaxLen, (unsigned)I->first.size());
+
+ std::vector<const Record*> &V = I->second.DiagsInGroup;
+ if (!V.empty()) {
+ OS << "static const short DiagArray" << I->second.IDNo << "[] = { ";
+ for (unsigned i = 0, e = V.size(); i != e; ++i)
+ OS << "diag::" << V[i]->getName() << ", ";
+ OS << "-1 };\n";
+ }
+
+ const std::vector<std::string> &SubGroups = I->second.SubGroups;
+ if (!SubGroups.empty()) {
+ OS << "static const short DiagSubGroup" << I->second.IDNo << "[] = { ";
+ for (unsigned i = 0, e = SubGroups.size(); i != e; ++i) {
+ std::map<std::string, GroupInfo>::iterator RI =
+ DiagsInGroup.find(SubGroups[i]);
+ assert(RI != DiagsInGroup.end() && "Referenced without existing?");
+ OS << RI->second.IDNo << ", ";
+ }
+ OS << "-1 };\n";
+ }
+ }
+ OS << "#endif // GET_DIAG_ARRAYS\n\n";
+
+ // Emit the table now.
+ OS << "\n#ifdef GET_DIAG_TABLE\n";
+ for (std::map<std::string, GroupInfo>::iterator
+ I = DiagsInGroup.begin(), E = DiagsInGroup.end(); I != E; ++I) {
+ // Group option string.
+ OS << " { ";
+ OS << I->first.size() << ", ";
+ OS << "\"";
+ if (I->first.find_first_not_of("abcdefghijklmnopqrstuvwxyz"
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "0123456789!@#$%^*-+=:?")!=std::string::npos)
+ throw "Invalid character in diagnostic group '" + I->first + "'";
+ OS.write_escaped(I->first) << "\","
+ << std::string(MaxLen-I->first.size()+1, ' ');
+
+ // Diagnostics in the group.
+ if (I->second.DiagsInGroup.empty())
+ OS << "0, ";
+ else
+ OS << "DiagArray" << I->second.IDNo << ", ";
+
+ // Subgroups.
+ if (I->second.SubGroups.empty())
+ OS << 0;
+ else
+ OS << "DiagSubGroup" << I->second.IDNo;
+ OS << " },\n";
+ }
+ OS << "#endif // GET_DIAG_TABLE\n\n";
+
+ // Emit the category table next.
+ DiagCategoryIDMap CategoriesByID(Records);
+ OS << "\n#ifdef GET_CATEGORY_TABLE\n";
+ for (DiagCategoryIDMap::iterator I = CategoriesByID.begin(),
+ E = CategoriesByID.end(); I != E; ++I)
+ OS << "CATEGORY(\"" << *I << "\", " << getDiagCategoryEnum(*I) << ")\n";
+ OS << "#endif // GET_CATEGORY_TABLE\n\n";
+}
+
+//===----------------------------------------------------------------------===//
+// Diagnostic name index generation
+//===----------------------------------------------------------------------===//
+
+namespace {
+struct RecordIndexElement
+{
+ RecordIndexElement() {}
+ explicit RecordIndexElement(Record const &R):
+ Name(R.getName()) {}
+
+ std::string Name;
+};
+
+struct RecordIndexElementSorter :
+ public std::binary_function<RecordIndexElement, RecordIndexElement, bool> {
+
+ bool operator()(RecordIndexElement const &Lhs,
+ RecordIndexElement const &Rhs) const {
+ return Lhs.Name < Rhs.Name;
+ }
+
+};
+
+} // end anonymous namespace.
+
+void ClangDiagsIndexNameEmitter::run(raw_ostream &OS) {
+ const std::vector<Record*> &Diags =
+ Records.getAllDerivedDefinitions("Diagnostic");
+
+ std::vector<RecordIndexElement> Index;
+ Index.reserve(Diags.size());
+ for (unsigned i = 0, e = Diags.size(); i != e; ++i) {
+ const Record &R = *(Diags[i]);
+ Index.push_back(RecordIndexElement(R));
+ }
+
+ std::sort(Index.begin(), Index.end(), RecordIndexElementSorter());
+
+ for (unsigned i = 0, e = Index.size(); i != e; ++i) {
+ const RecordIndexElement &R = Index[i];
+
+ OS << "DIAG_NAME_INDEX(" << R.Name << ")\n";
+ }
+}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.h
new file mode 100644
index 0000000..73d3c4d
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangDiagnosticsEmitter.h
@@ -0,0 +1,54 @@
+//===- ClangDiagnosticsEmitter.h - Generate Clang diagnostics tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// These tablegen backends emit Clang diagnostics tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGDIAGS_EMITTER_H
+#define CLANGDIAGS_EMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+
+namespace llvm {
+
+/// ClangDiagsDefsEmitter - The top-level class emits .def files containing
+/// declarations of Clang diagnostics.
+///
+class ClangDiagsDefsEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+ const std::string& Component;
+public:
+ explicit ClangDiagsDefsEmitter(RecordKeeper &R, const std::string& component)
+ : Records(R), Component(component) {}
+
+ // run - Output the .def file contents
+ void run(raw_ostream &OS);
+};
+
+class ClangDiagGroupsEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+public:
+ explicit ClangDiagGroupsEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+class ClangDiagsIndexNameEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+public:
+ explicit ClangDiagsIndexNameEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp
new file mode 100644
index 0000000..423b68a
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.cpp
@@ -0,0 +1,319 @@
+//=- ClangSACheckersEmitter.cpp - Generate Clang SA checkers tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits Clang Static Analyzer checkers tables.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangSACheckersEmitter.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/ADT/DenseSet.h"
+#include <map>
+#include <string>
+using namespace llvm;
+
+//===----------------------------------------------------------------------===//
+// Static Analyzer Checkers Tables generation
+//===----------------------------------------------------------------------===//
+
+/// \brief True if it is specified hidden or a parent package is specified
+/// as hidden, otherwise false.
+static bool isHidden(const Record &R) {
+ if (R.getValueAsBit("Hidden"))
+ return true;
+ // Not declared as hidden, check the parent package if it is hidden.
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("ParentPackage")))
+ return isHidden(*DI->getDef());
+
+ return false;
+}
+
+static bool isCheckerNamed(const Record *R) {
+ return !R->getValueAsString("CheckerName").empty();
+}
+
+static std::string getPackageFullName(const Record *R);
+
+static std::string getParentPackageFullName(const Record *R) {
+ std::string name;
+ if (DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("ParentPackage")))
+ name = getPackageFullName(DI->getDef());
+ return name;
+}
+
+static std::string getPackageFullName(const Record *R) {
+ std::string name = getParentPackageFullName(R);
+ if (!name.empty()) name += ".";
+ return name + R->getValueAsString("PackageName");
+}
+
+static std::string getCheckerFullName(const Record *R) {
+ std::string name = getParentPackageFullName(R);
+ if (isCheckerNamed(R)) {
+ if (!name.empty()) name += ".";
+ name += R->getValueAsString("CheckerName");
+ }
+ return name;
+}
+
+static std::string getStringValue(const Record &R, StringRef field) {
+ if (StringInit *
+ SI = dynamic_cast<StringInit*>(R.getValueInit(field)))
+ return SI->getValue();
+ return std::string();
+}
+
+namespace {
+struct GroupInfo {
+ llvm::DenseSet<const Record*> Checkers;
+ llvm::DenseSet<const Record *> SubGroups;
+ bool Hidden;
+ unsigned Index;
+
+ GroupInfo() : Hidden(false) { }
+};
+}
+
+static void addPackageToCheckerGroup(const Record *package, const Record *group,
+ llvm::DenseMap<const Record *, GroupInfo *> &recordGroupMap) {
+ llvm::DenseSet<const Record *> &checkers = recordGroupMap[package]->Checkers;
+ for (llvm::DenseSet<const Record *>::iterator
+ I = checkers.begin(), E = checkers.end(); I != E; ++I)
+ recordGroupMap[group]->Checkers.insert(*I);
+
+ llvm::DenseSet<const Record *> &subGroups = recordGroupMap[package]->SubGroups;
+ for (llvm::DenseSet<const Record *>::iterator
+ I = subGroups.begin(), E = subGroups.end(); I != E; ++I)
+ addPackageToCheckerGroup(*I, group, recordGroupMap);
+}
+
+void ClangSACheckersEmitter::run(raw_ostream &OS) {
+ std::vector<Record*> checkers = Records.getAllDerivedDefinitions("Checker");
+ llvm::DenseMap<const Record *, unsigned> checkerRecIndexMap;
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i)
+ checkerRecIndexMap[checkers[i]] = i;
+
+ // Invert the mapping of checkers to package/group into a one to many
+ // mapping of packages/groups to checkers.
+ std::map<std::string, GroupInfo> groupInfoByName;
+ llvm::DenseMap<const Record *, GroupInfo *> recordGroupMap;
+
+ std::vector<Record*> packages = Records.getAllDerivedDefinitions("Package");
+ for (unsigned i = 0, e = packages.size(); i != e; ++i) {
+ Record *R = packages[i];
+ std::string fullName = getPackageFullName(R);
+ if (!fullName.empty()) {
+ GroupInfo &info = groupInfoByName[fullName];
+ info.Hidden = isHidden(*R);
+ recordGroupMap[R] = &info;
+ }
+ }
+
+ std::vector<Record*>
+ checkerGroups = Records.getAllDerivedDefinitions("CheckerGroup");
+ for (unsigned i = 0, e = checkerGroups.size(); i != e; ++i) {
+ Record *R = checkerGroups[i];
+ std::string name = R->getValueAsString("GroupName");
+ if (!name.empty()) {
+ GroupInfo &info = groupInfoByName[name];
+ recordGroupMap[R] = &info;
+ }
+ }
+
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i) {
+ Record *R = checkers[i];
+ Record *package = 0;
+ if (DefInit *
+ DI = dynamic_cast<DefInit*>(R->getValueInit("ParentPackage")))
+ package = DI->getDef();
+ if (!isCheckerNamed(R) && !package)
+ throw "Checker '" + R->getName() + "' is neither named, nor in a package!";
+
+ if (isCheckerNamed(R)) {
+ // Create a pseudo-group to hold this checker.
+ std::string fullName = getCheckerFullName(R);
+ GroupInfo &info = groupInfoByName[fullName];
+ info.Hidden = R->getValueAsBit("Hidden");
+ recordGroupMap[R] = &info;
+ info.Checkers.insert(R);
+ } else {
+ recordGroupMap[package]->Checkers.insert(R);
+ }
+
+ Record *currR = isCheckerNamed(R) ? R : package;
+ // Insert the checker and its parent packages into the subgroups set of
+ // the corresponding parent package.
+ while (DefInit *DI
+ = dynamic_cast<DefInit*>(currR->getValueInit("ParentPackage"))) {
+ Record *parentPackage = DI->getDef();
+ recordGroupMap[parentPackage]->SubGroups.insert(currR);
+ currR = parentPackage;
+ }
+ // Insert the checker into the set of its group.
+ if (DefInit *DI = dynamic_cast<DefInit*>(R->getValueInit("Group")))
+ recordGroupMap[DI->getDef()]->Checkers.insert(R);
+ }
+
+ // If a package is in group, add all its checkers and its sub-packages
+ // checkers into the group.
+ for (unsigned i = 0, e = packages.size(); i != e; ++i)
+ if (DefInit *DI = dynamic_cast<DefInit*>(packages[i]->getValueInit("Group")))
+ addPackageToCheckerGroup(packages[i], DI->getDef(), recordGroupMap);
+
+ typedef std::map<std::string, const Record *> SortedRecords;
+ typedef llvm::DenseMap<const Record *, unsigned> RecToSortIndex;
+
+ SortedRecords sortedGroups;
+ RecToSortIndex groupToSortIndex;
+ OS << "\n#ifdef GET_GROUPS\n";
+ {
+ for (unsigned i = 0, e = checkerGroups.size(); i != e; ++i)
+ sortedGroups[checkerGroups[i]->getValueAsString("GroupName")]
+ = checkerGroups[i];
+
+ unsigned sortIndex = 0;
+ for (SortedRecords::iterator
+ I = sortedGroups.begin(), E = sortedGroups.end(); I != E; ++I) {
+ const Record *R = I->second;
+
+ OS << "GROUP(" << "\"";
+ OS.write_escaped(R->getValueAsString("GroupName")) << "\"";
+ OS << ")\n";
+
+ groupToSortIndex[R] = sortIndex++;
+ }
+ }
+ OS << "#endif // GET_GROUPS\n\n";
+
+ OS << "\n#ifdef GET_PACKAGES\n";
+ {
+ SortedRecords sortedPackages;
+ for (unsigned i = 0, e = packages.size(); i != e; ++i)
+ sortedPackages[getPackageFullName(packages[i])] = packages[i];
+
+ for (SortedRecords::iterator
+ I = sortedPackages.begin(), E = sortedPackages.end(); I != E; ++I) {
+ const Record &R = *I->second;
+
+ OS << "PACKAGE(" << "\"";
+ OS.write_escaped(getPackageFullName(&R)) << "\", ";
+ // Group index
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ OS << groupToSortIndex[DI->getDef()] << ", ";
+ else
+ OS << "-1, ";
+ // Hidden bit
+ if (isHidden(R))
+ OS << "true";
+ else
+ OS << "false";
+ OS << ")\n";
+ }
+ }
+ OS << "#endif // GET_PACKAGES\n\n";
+
+ OS << "\n#ifdef GET_CHECKERS\n";
+ for (unsigned i = 0, e = checkers.size(); i != e; ++i) {
+ const Record &R = *checkers[i];
+
+ OS << "CHECKER(" << "\"";
+ std::string name;
+ if (isCheckerNamed(&R))
+ name = getCheckerFullName(&R);
+ OS.write_escaped(name) << "\", ";
+ OS << R.getName() << ", ";
+ OS << getStringValue(R, "DescFile") << ", ";
+ OS << "\"";
+ OS.write_escaped(getStringValue(R, "HelpText")) << "\", ";
+ // Group index
+ if (DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ OS << groupToSortIndex[DI->getDef()] << ", ";
+ else
+ OS << "-1, ";
+ // Hidden bit
+ if (isHidden(R))
+ OS << "true";
+ else
+ OS << "false";
+ OS << ")\n";
+ }
+ OS << "#endif // GET_CHECKERS\n\n";
+
+ unsigned index = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I)
+ I->second.Index = index++;
+
+ // Walk through the packages/groups/checkers emitting an array for each
+ // set of checkers and an array for each set of subpackages.
+
+ OS << "\n#ifdef GET_MEMBER_ARRAYS\n";
+ unsigned maxLen = 0;
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I) {
+ maxLen = std::max(maxLen, (unsigned)I->first.size());
+
+ llvm::DenseSet<const Record *> &checkers = I->second.Checkers;
+ if (!checkers.empty()) {
+ OS << "static const short CheckerArray" << I->second.Index << "[] = { ";
+ // Make the output order deterministic.
+ std::map<int, const Record *> sorted;
+ for (llvm::DenseSet<const Record *>::iterator
+ I = checkers.begin(), E = checkers.end(); I != E; ++I)
+ sorted[(*I)->getID()] = *I;
+
+ for (std::map<int, const Record *>::iterator
+ I = sorted.begin(), E = sorted.end(); I != E; ++I)
+ OS << checkerRecIndexMap[I->second] << ", ";
+ OS << "-1 };\n";
+ }
+
+ llvm::DenseSet<const Record *> &subGroups = I->second.SubGroups;
+ if (!subGroups.empty()) {
+ OS << "static const short SubPackageArray" << I->second.Index << "[] = { ";
+ // Make the output order deterministic.
+ std::map<int, const Record *> sorted;
+ for (llvm::DenseSet<const Record *>::iterator
+ I = subGroups.begin(), E = subGroups.end(); I != E; ++I)
+ sorted[(*I)->getID()] = *I;
+
+ for (std::map<int, const Record *>::iterator
+ I = sorted.begin(), E = sorted.end(); I != E; ++I) {
+ OS << recordGroupMap[I->second]->Index << ", ";
+ }
+ OS << "-1 };\n";
+ }
+ }
+ OS << "#endif // GET_MEMBER_ARRAYS\n\n";
+
+ OS << "\n#ifdef GET_CHECKNAME_TABLE\n";
+ for (std::map<std::string, GroupInfo>::iterator
+ I = groupInfoByName.begin(), E = groupInfoByName.end(); I != E; ++I) {
+ // Group option string.
+ OS << " { \"";
+ OS.write_escaped(I->first) << "\","
+ << std::string(maxLen-I->first.size()+1, ' ');
+
+ if (I->second.Checkers.empty())
+ OS << "0, ";
+ else
+ OS << "CheckerArray" << I->second.Index << ", ";
+
+ // Subgroups.
+ if (I->second.SubGroups.empty())
+ OS << "0, ";
+ else
+ OS << "SubPackageArray" << I->second.Index << ", ";
+
+ OS << (I->second.Hidden ? "true" : "false");
+
+ OS << " },\n";
+ }
+ OS << "#endif // GET_CHECKNAME_TABLE\n\n";
+}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.h
new file mode 100644
index 0000000..5a0e148
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/ClangSACheckersEmitter.h
@@ -0,0 +1,31 @@
+//===- ClangSACheckersEmitter.h - Generate Clang SA checkers tables -*- C++ -*-
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend emits Clang Static Analyzer checkers tables.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANGSACHECKERS_EMITTER_H
+#define CLANGSACHECKERS_EMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+
+namespace llvm {
+
+class ClangSACheckersEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+public:
+ explicit ClangSACheckersEmitter(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
new file mode 100644
index 0000000..e6f2e53
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.cpp
@@ -0,0 +1,1574 @@
+//===- NeonEmitter.cpp - Generate arm_neon.h for use with clang -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_neon.h, which includes
+// a declaration and definition of each function specified by the ARM NEON
+// compiler interface. See ARM document DUI0348B.
+//
+// Each NEON instruction is implemented in terms of 1 or more functions which
+// are suffixed with the element type of the input vectors. Functions may be
+// implemented in terms of generic vector operations such as +, *, -, etc. or
+// by calling a __builtin_-prefixed function which will be handled by clang's
+// CodeGen library.
+//
+// Additional validation code can be generated by this file when runHeader() is
+// called, rather than the normal run() entry point. A complete set of tests
+// for Neon intrinsics can be generated by calling the runTests() entry point.
+//
+//===----------------------------------------------------------------------===//
+
+#include "NeonEmitter.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <string>
+
+using namespace llvm;
+
+/// ParseTypes - break down a string such as "fQf" into a vector of StringRefs,
+/// which each StringRef representing a single type declared in the string.
+/// for "fQf" we would end up with 2 StringRefs, "f", and "Qf", representing
+/// 2xfloat and 4xfloat respectively.
+static void ParseTypes(Record *r, std::string &s,
+ SmallVectorImpl<StringRef> &TV) {
+ const char *data = s.data();
+ int len = 0;
+
+ for (unsigned i = 0, e = s.size(); i != e; ++i, ++len) {
+ if (data[len] == 'P' || data[len] == 'Q' || data[len] == 'U')
+ continue;
+
+ switch (data[len]) {
+ case 'c':
+ case 's':
+ case 'i':
+ case 'l':
+ case 'h':
+ case 'f':
+ break;
+ default:
+ throw TGError(r->getLoc(),
+ "Unexpected letter: " + std::string(data + len, 1));
+ }
+ TV.push_back(StringRef(data, len + 1));
+ data += len + 1;
+ len = -1;
+ }
+}
+
+/// Widen - Convert a type code into the next wider type. char -> short,
+/// short -> int, etc.
+static char Widen(const char t) {
+ switch (t) {
+ case 'c':
+ return 's';
+ case 's':
+ return 'i';
+ case 'i':
+ return 'l';
+ case 'h':
+ return 'f';
+ default: throw "unhandled type in widen!";
+ }
+}
+
+/// Narrow - Convert a type code into the next smaller type. short -> char,
+/// float -> half float, etc.
+static char Narrow(const char t) {
+ switch (t) {
+ case 's':
+ return 'c';
+ case 'i':
+ return 's';
+ case 'l':
+ return 'i';
+ case 'f':
+ return 'h';
+ default: throw "unhandled type in narrow!";
+ }
+}
+
+/// For a particular StringRef, return the base type code, and whether it has
+/// the quad-vector, polynomial, or unsigned modifiers set.
+static char ClassifyType(StringRef ty, bool &quad, bool &poly, bool &usgn) {
+ unsigned off = 0;
+
+ // remember quad.
+ if (ty[off] == 'Q') {
+ quad = true;
+ ++off;
+ }
+
+ // remember poly.
+ if (ty[off] == 'P') {
+ poly = true;
+ ++off;
+ }
+
+ // remember unsigned.
+ if (ty[off] == 'U') {
+ usgn = true;
+ ++off;
+ }
+
+ // base type to get the type string for.
+ return ty[off];
+}
+
+/// ModType - Transform a type code and its modifiers based on a mod code. The
+/// mod code definitions may be found at the top of arm_neon.td.
+static char ModType(const char mod, char type, bool &quad, bool &poly,
+ bool &usgn, bool &scal, bool &cnst, bool &pntr) {
+ switch (mod) {
+ case 't':
+ if (poly) {
+ poly = false;
+ usgn = true;
+ }
+ break;
+ case 'u':
+ usgn = true;
+ poly = false;
+ if (type == 'f')
+ type = 'i';
+ break;
+ case 'x':
+ usgn = false;
+ poly = false;
+ if (type == 'f')
+ type = 'i';
+ break;
+ case 'f':
+ if (type == 'h')
+ quad = true;
+ type = 'f';
+ usgn = false;
+ break;
+ case 'g':
+ quad = false;
+ break;
+ case 'w':
+ type = Widen(type);
+ quad = true;
+ break;
+ case 'n':
+ type = Widen(type);
+ break;
+ case 'i':
+ type = 'i';
+ scal = true;
+ break;
+ case 'l':
+ type = 'l';
+ scal = true;
+ usgn = true;
+ break;
+ case 's':
+ case 'a':
+ scal = true;
+ break;
+ case 'k':
+ quad = true;
+ break;
+ case 'c':
+ cnst = true;
+ case 'p':
+ pntr = true;
+ scal = true;
+ break;
+ case 'h':
+ type = Narrow(type);
+ if (type == 'h')
+ quad = false;
+ break;
+ case 'e':
+ type = Narrow(type);
+ usgn = true;
+ break;
+ default:
+ break;
+ }
+ return type;
+}
+
+/// TypeString - for a modifier and type, generate the name of the typedef for
+/// that type. QUc -> uint8x8_t.
+static std::string TypeString(const char mod, StringRef typestr) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ if (mod == 'v')
+ return "void";
+ if (mod == 'i')
+ return "int";
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ SmallString<128> s;
+
+ if (usgn)
+ s.push_back('u');
+
+ switch (type) {
+ case 'c':
+ s += poly ? "poly8" : "int8";
+ if (scal)
+ break;
+ s += quad ? "x16" : "x8";
+ break;
+ case 's':
+ s += poly ? "poly16" : "int16";
+ if (scal)
+ break;
+ s += quad ? "x8" : "x4";
+ break;
+ case 'i':
+ s += "int32";
+ if (scal)
+ break;
+ s += quad ? "x4" : "x2";
+ break;
+ case 'l':
+ s += "int64";
+ if (scal)
+ break;
+ s += quad ? "x2" : "x1";
+ break;
+ case 'h':
+ s += "float16";
+ if (scal)
+ break;
+ s += quad ? "x8" : "x4";
+ break;
+ case 'f':
+ s += "float32";
+ if (scal)
+ break;
+ s += quad ? "x4" : "x2";
+ break;
+ default:
+ throw "unhandled type!";
+ }
+
+ if (mod == '2')
+ s += "x2";
+ if (mod == '3')
+ s += "x3";
+ if (mod == '4')
+ s += "x4";
+
+ // Append _t, finishing the type string typedef type.
+ s += "_t";
+
+ if (cnst)
+ s += " const";
+
+ if (pntr)
+ s += " *";
+
+ return s.str();
+}
+
+/// BuiltinTypeString - for a modifier and type, generate the clang
+/// BuiltinsARM.def prototype code for the function. See the top of clang's
+/// Builtins.def for a description of the type strings.
+static std::string BuiltinTypeString(const char mod, StringRef typestr,
+ ClassKind ck, bool ret) {
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ if (mod == 'v')
+ return "v"; // void
+ if (mod == 'i')
+ return "i"; // int
+
+ // base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ // All pointers are void* pointers. Change type to 'v' now.
+ if (pntr) {
+ usgn = false;
+ poly = false;
+ type = 'v';
+ }
+ // Treat half-float ('h') types as unsigned short ('s') types.
+ if (type == 'h') {
+ type = 's';
+ usgn = true;
+ }
+ usgn = usgn | poly | ((ck == ClassI || ck == ClassW) && scal && type != 'f');
+
+ if (scal) {
+ SmallString<128> s;
+
+ if (usgn)
+ s.push_back('U');
+ else if (type == 'c')
+ s.push_back('S'); // make chars explicitly signed
+
+ if (type == 'l') // 64-bit long
+ s += "LLi";
+ else
+ s.push_back(type);
+
+ if (cnst)
+ s.push_back('C');
+ if (pntr)
+ s.push_back('*');
+ return s.str();
+ }
+
+ // Since the return value must be one type, return a vector type of the
+ // appropriate width which we will bitcast. An exception is made for
+ // returning structs of 2, 3, or 4 vectors which are returned in a sret-like
+ // fashion, storing them to a pointer arg.
+ if (ret) {
+ if (mod >= '2' && mod <= '4')
+ return "vv*"; // void result with void* first argument
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
+ return quad ? "V4f" : "V2f";
+ if (ck != ClassB && type == 's')
+ return quad ? "V8s" : "V4s";
+ if (ck != ClassB && type == 'i')
+ return quad ? "V4i" : "V2i";
+ if (ck != ClassB && type == 'l')
+ return quad ? "V2LLi" : "V1LLi";
+
+ return quad ? "V16Sc" : "V8Sc";
+ }
+
+ // Non-return array types are passed as individual vectors.
+ if (mod == '2')
+ return quad ? "V16ScV16Sc" : "V8ScV8Sc";
+ if (mod == '3')
+ return quad ? "V16ScV16ScV16Sc" : "V8ScV8ScV8Sc";
+ if (mod == '4')
+ return quad ? "V16ScV16ScV16ScV16Sc" : "V8ScV8ScV8ScV8Sc";
+
+ if (mod == 'f' || (ck != ClassB && type == 'f'))
+ return quad ? "V4f" : "V2f";
+ if (ck != ClassB && type == 's')
+ return quad ? "V8s" : "V4s";
+ if (ck != ClassB && type == 'i')
+ return quad ? "V4i" : "V2i";
+ if (ck != ClassB && type == 'l')
+ return quad ? "V2LLi" : "V1LLi";
+
+ return quad ? "V16Sc" : "V8Sc";
+}
+
+/// MangleName - Append a type or width suffix to a base neon function name,
+/// and insert a 'q' in the appropriate location if the operation works on
+/// 128b rather than 64b. E.g. turn "vst2_lane" into "vst2q_lane_f32", etc.
+static std::string MangleName(const std::string &name, StringRef typestr,
+ ClassKind ck) {
+ if (name == "vcvt_f32_f16")
+ return name;
+
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ std::string s = name;
+
+ switch (type) {
+ case 'c':
+ switch (ck) {
+ case ClassS: s += poly ? "_p8" : usgn ? "_u8" : "_s8"; break;
+ case ClassI: s += "_i8"; break;
+ case ClassW: s += "_8"; break;
+ default: break;
+ }
+ break;
+ case 's':
+ switch (ck) {
+ case ClassS: s += poly ? "_p16" : usgn ? "_u16" : "_s16"; break;
+ case ClassI: s += "_i16"; break;
+ case ClassW: s += "_16"; break;
+ default: break;
+ }
+ break;
+ case 'i':
+ switch (ck) {
+ case ClassS: s += usgn ? "_u32" : "_s32"; break;
+ case ClassI: s += "_i32"; break;
+ case ClassW: s += "_32"; break;
+ default: break;
+ }
+ break;
+ case 'l':
+ switch (ck) {
+ case ClassS: s += usgn ? "_u64" : "_s64"; break;
+ case ClassI: s += "_i64"; break;
+ case ClassW: s += "_64"; break;
+ default: break;
+ }
+ break;
+ case 'h':
+ switch (ck) {
+ case ClassS:
+ case ClassI: s += "_f16"; break;
+ case ClassW: s += "_16"; break;
+ default: break;
+ }
+ break;
+ case 'f':
+ switch (ck) {
+ case ClassS:
+ case ClassI: s += "_f32"; break;
+ case ClassW: s += "_32"; break;
+ default: break;
+ }
+ break;
+ default:
+ throw "unhandled type!";
+ }
+ if (ck == ClassB)
+ s += "_v";
+
+ // Insert a 'q' before the first '_' character so that it ends up before
+ // _lane or _n on vector-scalar operations.
+ if (quad) {
+ size_t pos = s.find('_');
+ s = s.insert(pos, "q");
+ }
+ return s;
+}
+
+/// UseMacro - Examine the prototype string to determine if the intrinsic
+/// should be defined as a preprocessor macro instead of an inline function.
+static bool UseMacro(const std::string &proto) {
+ // If this builtin takes an immediate argument, we need to #define it rather
+ // than use a standard declaration, so that SemaChecking can range check
+ // the immediate passed by the user.
+ if (proto.find('i') != std::string::npos)
+ return true;
+
+ // Pointer arguments need to use macros to avoid hiding aligned attributes
+ // from the pointer type.
+ if (proto.find('p') != std::string::npos ||
+ proto.find('c') != std::string::npos)
+ return true;
+
+ return false;
+}
+
+/// MacroArgUsedDirectly - Return true if argument i for an intrinsic that is
+/// defined as a macro should be accessed directly instead of being first
+/// assigned to a local temporary.
+static bool MacroArgUsedDirectly(const std::string &proto, unsigned i) {
+ // True for constant ints (i), pointers (p) and const pointers (c).
+ return (proto[i] == 'i' || proto[i] == 'p' || proto[i] == 'c');
+}
+
+// Generate the string "(argtype a, argtype b, ...)"
+static std::string GenArgs(const std::string &proto, StringRef typestr) {
+ bool define = UseMacro(proto);
+ char arg = 'a';
+
+ std::string s;
+ s += "(";
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ if (define) {
+ // Some macro arguments are used directly instead of being assigned
+ // to local temporaries; prepend an underscore prefix to make their
+ // names consistent with the local temporaries.
+ if (MacroArgUsedDirectly(proto, i))
+ s += "__";
+ } else {
+ s += TypeString(proto[i], typestr) + " __";
+ }
+ s.push_back(arg);
+ if ((i + 1) < e)
+ s += ", ";
+ }
+
+ s += ")";
+ return s;
+}
+
+// Macro arguments are not type-checked like inline function arguments, so
+// assign them to local temporaries to get the right type checking.
+static std::string GenMacroLocals(const std::string &proto, StringRef typestr) {
+ char arg = 'a';
+ std::string s;
+ bool generatedLocal = false;
+
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ // Do not create a temporary for an immediate argument.
+ // That would defeat the whole point of using a macro!
+ if (MacroArgUsedDirectly(proto, i))
+ continue;
+ generatedLocal = true;
+
+ s += TypeString(proto[i], typestr) + " __";
+ s.push_back(arg);
+ s += " = (";
+ s.push_back(arg);
+ s += "); ";
+ }
+
+ if (generatedLocal)
+ s += "\\\n ";
+ return s;
+}
+
+// Use the vmovl builtin to sign-extend or zero-extend a vector.
+static std::string Extend(StringRef typestr, const std::string &a) {
+ std::string s;
+ s = MangleName("vmovl", typestr, ClassS);
+ s += "(" + a + ")";
+ return s;
+}
+
+static std::string Duplicate(unsigned nElts, StringRef typestr,
+ const std::string &a) {
+ std::string s;
+
+ s = "(" + TypeString('d', typestr) + "){ ";
+ for (unsigned i = 0; i != nElts; ++i) {
+ s += a;
+ if ((i + 1) < nElts)
+ s += ", ";
+ }
+ s += " }";
+
+ return s;
+}
+
+static std::string SplatLane(unsigned nElts, const std::string &vec,
+ const std::string &lane) {
+ std::string s = "__builtin_shufflevector(" + vec + ", " + vec;
+ for (unsigned i = 0; i < nElts; ++i)
+ s += ", " + lane;
+ s += ")";
+ return s;
+}
+
+static unsigned GetNumElements(StringRef typestr, bool &quad) {
+ quad = false;
+ bool dummy = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ unsigned nElts = 0;
+ switch (type) {
+ case 'c': nElts = 8; break;
+ case 's': nElts = 4; break;
+ case 'i': nElts = 2; break;
+ case 'l': nElts = 1; break;
+ case 'h': nElts = 4; break;
+ case 'f': nElts = 2; break;
+ default:
+ throw "unhandled type!";
+ }
+ if (quad) nElts <<= 1;
+ return nElts;
+}
+
+// Generate the definition for this intrinsic, e.g. "a + b" for OpAdd.
+static std::string GenOpString(OpKind op, const std::string &proto,
+ StringRef typestr) {
+ bool quad;
+ unsigned nElts = GetNumElements(typestr, quad);
+ bool define = UseMacro(proto);
+
+ std::string ts = TypeString(proto[0], typestr);
+ std::string s;
+ if (!define) {
+ s = "return ";
+ }
+
+ switch(op) {
+ case OpAdd:
+ s += "__a + __b;";
+ break;
+ case OpAddl:
+ s += Extend(typestr, "__a") + " + " + Extend(typestr, "__b") + ";";
+ break;
+ case OpAddw:
+ s += "__a + " + Extend(typestr, "__b") + ";";
+ break;
+ case OpSub:
+ s += "__a - __b;";
+ break;
+ case OpSubl:
+ s += Extend(typestr, "__a") + " - " + Extend(typestr, "__b") + ";";
+ break;
+ case OpSubw:
+ s += "__a - " + Extend(typestr, "__b") + ";";
+ break;
+ case OpMulN:
+ s += "__a * " + Duplicate(nElts, typestr, "__b") + ";";
+ break;
+ case OpMulLane:
+ s += "__a * " + SplatLane(nElts, "__b", "__c") + ";";
+ break;
+ case OpMul:
+ s += "__a * __b;";
+ break;
+ case OpMullLane:
+ s += MangleName("vmull", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpMlaN:
+ s += "__a + (__b * " + Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlaLane:
+ s += "__a + (__b * " + SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpMla:
+ s += "__a + (__b * __c);";
+ break;
+ case OpMlalN:
+ s += "__a + " + MangleName("vmull", typestr, ClassS) + "(__b, " +
+ Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlalLane:
+ s += "__a + " + MangleName("vmull", typestr, ClassS) + "(__b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpMlal:
+ s += "__a + " + MangleName("vmull", typestr, ClassS) + "(__b, __c);";
+ break;
+ case OpMlsN:
+ s += "__a - (__b * " + Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlsLane:
+ s += "__a - (__b * " + SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpMls:
+ s += "__a - (__b * __c);";
+ break;
+ case OpMlslN:
+ s += "__a - " + MangleName("vmull", typestr, ClassS) + "(__b, " +
+ Duplicate(nElts, typestr, "__c") + ");";
+ break;
+ case OpMlslLane:
+ s += "__a - " + MangleName("vmull", typestr, ClassS) + "(__b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpMlsl:
+ s += "__a - " + MangleName("vmull", typestr, ClassS) + "(__b, __c);";
+ break;
+ case OpQDMullLane:
+ s += MangleName("vqdmull", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpQDMlalLane:
+ s += MangleName("vqdmlal", typestr, ClassS) + "(__a, __b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpQDMlslLane:
+ s += MangleName("vqdmlsl", typestr, ClassS) + "(__a, __b, " +
+ SplatLane(nElts, "__c", "__d") + ");";
+ break;
+ case OpQDMulhLane:
+ s += MangleName("vqdmulh", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpQRDMulhLane:
+ s += MangleName("vqrdmulh", typestr, ClassS) + "(__a, " +
+ SplatLane(nElts, "__b", "__c") + ");";
+ break;
+ case OpEq:
+ s += "(" + ts + ")(__a == __b);";
+ break;
+ case OpGe:
+ s += "(" + ts + ")(__a >= __b);";
+ break;
+ case OpLe:
+ s += "(" + ts + ")(__a <= __b);";
+ break;
+ case OpGt:
+ s += "(" + ts + ")(__a > __b);";
+ break;
+ case OpLt:
+ s += "(" + ts + ")(__a < __b);";
+ break;
+ case OpNeg:
+ s += " -__a;";
+ break;
+ case OpNot:
+ s += " ~__a;";
+ break;
+ case OpAnd:
+ s += "__a & __b;";
+ break;
+ case OpOr:
+ s += "__a | __b;";
+ break;
+ case OpXor:
+ s += "__a ^ __b;";
+ break;
+ case OpAndNot:
+ s += "__a & ~__b;";
+ break;
+ case OpOrNot:
+ s += "__a | ~__b;";
+ break;
+ case OpCast:
+ s += "(" + ts + ")__a;";
+ break;
+ case OpConcat:
+ s += "(" + ts + ")__builtin_shufflevector((int64x1_t)__a";
+ s += ", (int64x1_t)__b, 0, 1);";
+ break;
+ case OpHi:
+ s += "(" + ts +
+ ")__builtin_shufflevector((int64x2_t)__a, (int64x2_t)__a, 1);";
+ break;
+ case OpLo:
+ s += "(" + ts +
+ ")__builtin_shufflevector((int64x2_t)__a, (int64x2_t)__a, 0);";
+ break;
+ case OpDup:
+ s += Duplicate(nElts, typestr, "__a") + ";";
+ break;
+ case OpDupLane:
+ s += SplatLane(nElts, "__a", "__b") + ";";
+ break;
+ case OpSelect:
+ // ((0 & 1) | (~0 & 2))
+ s += "(" + ts + ")";
+ ts = TypeString(proto[1], typestr);
+ s += "((__a & (" + ts + ")__b) | ";
+ s += "(~__a & (" + ts + ")__c));";
+ break;
+ case OpRev16:
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = 2; i <= nElts; i += 2)
+ for (unsigned j = 0; j != 2; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ");";
+ break;
+ case OpRev32: {
+ unsigned WordElts = nElts >> (1 + (int)quad);
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = WordElts; i <= nElts; i += WordElts)
+ for (unsigned j = 0; j != WordElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ");";
+ break;
+ }
+ case OpRev64: {
+ unsigned DblWordElts = nElts >> (int)quad;
+ s += "__builtin_shufflevector(__a, __a";
+ for (unsigned i = DblWordElts; i <= nElts; i += DblWordElts)
+ for (unsigned j = 0; j != DblWordElts; ++j)
+ s += ", " + utostr(i - j - 1);
+ s += ");";
+ break;
+ }
+ case OpAbdl: {
+ std::string abd = MangleName("vabd", typestr, ClassS) + "(__a, __b)";
+ if (typestr[0] != 'U') {
+ // vabd results are always unsigned and must be zero-extended.
+ std::string utype = "U" + typestr.str();
+ s += "(" + TypeString(proto[0], typestr) + ")";
+ abd = "(" + TypeString('d', utype) + ")" + abd;
+ s += Extend(utype, abd) + ";";
+ } else {
+ s += Extend(typestr, abd) + ";";
+ }
+ break;
+ }
+ case OpAba:
+ s += "__a + " + MangleName("vabd", typestr, ClassS) + "(__b, __c);";
+ break;
+ case OpAbal: {
+ s += "__a + ";
+ std::string abd = MangleName("vabd", typestr, ClassS) + "(__b, __c)";
+ if (typestr[0] != 'U') {
+ // vabd results are always unsigned and must be zero-extended.
+ std::string utype = "U" + typestr.str();
+ s += "(" + TypeString(proto[0], typestr) + ")";
+ abd = "(" + TypeString('d', utype) + ")" + abd;
+ s += Extend(utype, abd) + ";";
+ } else {
+ s += Extend(typestr, abd) + ";";
+ }
+ break;
+ }
+ default:
+ throw "unknown OpKind!";
+ }
+ return s;
+}
+
+static unsigned GetNeonEnum(const std::string &proto, StringRef typestr) {
+ unsigned mod = proto[0];
+
+ if (mod == 'v' || mod == 'f')
+ mod = proto[1];
+
+ bool quad = false;
+ bool poly = false;
+ bool usgn = false;
+ bool scal = false;
+ bool cnst = false;
+ bool pntr = false;
+
+ // Base type to get the type string for.
+ char type = ClassifyType(typestr, quad, poly, usgn);
+
+ // Based on the modifying character, change the type and width if necessary.
+ type = ModType(mod, type, quad, poly, usgn, scal, cnst, pntr);
+
+ NeonTypeFlags::EltType ET;
+ switch (type) {
+ case 'c':
+ ET = poly ? NeonTypeFlags::Poly8 : NeonTypeFlags::Int8;
+ break;
+ case 's':
+ ET = poly ? NeonTypeFlags::Poly16 : NeonTypeFlags::Int16;
+ break;
+ case 'i':
+ ET = NeonTypeFlags::Int32;
+ break;
+ case 'l':
+ ET = NeonTypeFlags::Int64;
+ break;
+ case 'h':
+ ET = NeonTypeFlags::Float16;
+ break;
+ case 'f':
+ ET = NeonTypeFlags::Float32;
+ break;
+ default:
+ throw "unhandled type!";
+ }
+ NeonTypeFlags Flags(ET, usgn, quad && proto[1] != 'g');
+ return Flags.getFlags();
+}
+
+// Generate the definition for this intrinsic, e.g. __builtin_neon_cls(a)
+static std::string GenBuiltin(const std::string &name, const std::string &proto,
+ StringRef typestr, ClassKind ck) {
+ std::string s;
+
+ // If this builtin returns a struct 2, 3, or 4 vectors, pass it as an implicit
+ // sret-like argument.
+ bool sret = (proto[0] >= '2' && proto[0] <= '4');
+
+ bool define = UseMacro(proto);
+
+ // Check if the prototype has a scalar operand with the type of the vector
+ // elements. If not, bitcasting the args will take care of arg checking.
+ // The actual signedness etc. will be taken care of with special enums.
+ if (proto.find('s') == std::string::npos)
+ ck = ClassB;
+
+ if (proto[0] != 'v') {
+ std::string ts = TypeString(proto[0], typestr);
+
+ if (define) {
+ if (sret)
+ s += ts + " r; ";
+ else
+ s += "(" + ts + ")";
+ } else if (sret) {
+ s += ts + " r; ";
+ } else {
+ s += "return (" + ts + ")";
+ }
+ }
+
+ bool splat = proto.find('a') != std::string::npos;
+
+ s += "__builtin_neon_";
+ if (splat) {
+ // Call the non-splat builtin: chop off the "_n" suffix from the name.
+ std::string vname(name, 0, name.size()-2);
+ s += MangleName(vname, typestr, ck);
+ } else {
+ s += MangleName(name, typestr, ck);
+ }
+ s += "(";
+
+ // Pass the address of the return variable as the first argument to sret-like
+ // builtins.
+ if (sret)
+ s += "&r, ";
+
+ char arg = 'a';
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ std::string args = std::string(&arg, 1);
+
+ // Use the local temporaries instead of the macro arguments.
+ args = "__" + args;
+
+ bool argQuad = false;
+ bool argPoly = false;
+ bool argUsgn = false;
+ bool argScalar = false;
+ bool dummy = false;
+ char argType = ClassifyType(typestr, argQuad, argPoly, argUsgn);
+ argType = ModType(proto[i], argType, argQuad, argPoly, argUsgn, argScalar,
+ dummy, dummy);
+
+ // Handle multiple-vector values specially, emitting each subvector as an
+ // argument to the __builtin.
+ if (proto[i] >= '2' && proto[i] <= '4') {
+ // Check if an explicit cast is needed.
+ if (argType != 'c' || argPoly || argUsgn)
+ args = (argQuad ? "(int8x16_t)" : "(int8x8_t)") + args;
+
+ for (unsigned vi = 0, ve = proto[i] - '0'; vi != ve; ++vi) {
+ s += args + ".val[" + utostr(vi) + "]";
+ if ((vi + 1) < ve)
+ s += ", ";
+ }
+ if ((i + 1) < e)
+ s += ", ";
+
+ continue;
+ }
+
+ if (splat && (i + 1) == e)
+ args = Duplicate(GetNumElements(typestr, argQuad), typestr, args);
+
+ // Check if an explicit cast is needed.
+ if ((splat || !argScalar) &&
+ ((ck == ClassB && argType != 'c') || argPoly || argUsgn)) {
+ std::string argTypeStr = "c";
+ if (ck != ClassB)
+ argTypeStr = argType;
+ if (argQuad)
+ argTypeStr = "Q" + argTypeStr;
+ args = "(" + TypeString('d', argTypeStr) + ")" + args;
+ }
+
+ s += args;
+ if ((i + 1) < e)
+ s += ", ";
+ }
+
+ // Extra constant integer to hold type class enum for this function, e.g. s8
+ if (ck == ClassB)
+ s += ", " + utostr(GetNeonEnum(proto, typestr));
+
+ s += ");";
+
+ if (proto[0] != 'v' && sret) {
+ if (define)
+ s += " r;";
+ else
+ s += " return r;";
+ }
+ return s;
+}
+
+static std::string GenBuiltinDef(const std::string &name,
+ const std::string &proto,
+ StringRef typestr, ClassKind ck) {
+ std::string s("BUILTIN(__builtin_neon_");
+
+ // If all types are the same size, bitcasting the args will take care
+ // of arg checking. The actual signedness etc. will be taken care of with
+ // special enums.
+ if (proto.find('s') == std::string::npos)
+ ck = ClassB;
+
+ s += MangleName(name, typestr, ck);
+ s += ", \"";
+
+ for (unsigned i = 0, e = proto.size(); i != e; ++i)
+ s += BuiltinTypeString(proto[i], typestr, ck, i == 0);
+
+ // Extra constant integer to hold type class enum for this function, e.g. s8
+ if (ck == ClassB)
+ s += "i";
+
+ s += "\", \"n\")";
+ return s;
+}
+
+static std::string GenIntrinsic(const std::string &name,
+ const std::string &proto,
+ StringRef outTypeStr, StringRef inTypeStr,
+ OpKind kind, ClassKind classKind) {
+ assert(!proto.empty() && "");
+ bool define = UseMacro(proto);
+ std::string s;
+
+ // static always inline + return type
+ if (define)
+ s += "#define ";
+ else
+ s += "__ai " + TypeString(proto[0], outTypeStr) + " ";
+
+ // Function name with type suffix
+ std::string mangledName = MangleName(name, outTypeStr, ClassS);
+ if (outTypeStr != inTypeStr) {
+ // If the input type is different (e.g., for vreinterpret), append a suffix
+ // for the input type. String off a "Q" (quad) prefix so that MangleName
+ // does not insert another "q" in the name.
+ unsigned typeStrOff = (inTypeStr[0] == 'Q' ? 1 : 0);
+ StringRef inTypeNoQuad = inTypeStr.substr(typeStrOff);
+ mangledName = MangleName(mangledName, inTypeNoQuad, ClassS);
+ }
+ s += mangledName;
+
+ // Function arguments
+ s += GenArgs(proto, inTypeStr);
+
+ // Definition.
+ if (define) {
+ s += " __extension__ ({ \\\n ";
+ s += GenMacroLocals(proto, inTypeStr);
+ } else {
+ s += " { \\\n ";
+ }
+
+ if (kind != OpNone)
+ s += GenOpString(kind, proto, outTypeStr);
+ else
+ s += GenBuiltin(name, proto, outTypeStr, classKind);
+ if (define)
+ s += " })";
+ else
+ s += " }";
+ s += "\n";
+ return s;
+}
+
+/// run - Read the records in arm_neon.td and output arm_neon.h. arm_neon.h
+/// is comprised of type definitions and function declarations.
+void NeonEmitter::run(raw_ostream &OS) {
+ OS <<
+ "/*===---- arm_neon.h - ARM Neon intrinsics ------------------------------"
+ "---===\n"
+ " *\n"
+ " * Permission is hereby granted, free of charge, to any person obtaining "
+ "a copy\n"
+ " * of this software and associated documentation files (the \"Software\"),"
+ " to deal\n"
+ " * in the Software without restriction, including without limitation the "
+ "rights\n"
+ " * to use, copy, modify, merge, publish, distribute, sublicense, "
+ "and/or sell\n"
+ " * copies of the Software, and to permit persons to whom the Software is\n"
+ " * furnished to do so, subject to the following conditions:\n"
+ " *\n"
+ " * The above copyright notice and this permission notice shall be "
+ "included in\n"
+ " * all copies or substantial portions of the Software.\n"
+ " *\n"
+ " * THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, "
+ "EXPRESS OR\n"
+ " * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF "
+ "MERCHANTABILITY,\n"
+ " * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT "
+ "SHALL THE\n"
+ " * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR "
+ "OTHER\n"
+ " * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, "
+ "ARISING FROM,\n"
+ " * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER "
+ "DEALINGS IN\n"
+ " * THE SOFTWARE.\n"
+ " *\n"
+ " *===--------------------------------------------------------------------"
+ "---===\n"
+ " */\n\n";
+
+ OS << "#ifndef __ARM_NEON_H\n";
+ OS << "#define __ARM_NEON_H\n\n";
+
+ OS << "#ifndef __ARM_NEON__\n";
+ OS << "#error \"NEON support not enabled\"\n";
+ OS << "#endif\n\n";
+
+ OS << "#include <stdint.h>\n\n";
+
+ // Emit NEON-specific scalar typedefs.
+ OS << "typedef float float32_t;\n";
+ OS << "typedef int8_t poly8_t;\n";
+ OS << "typedef int16_t poly16_t;\n";
+ OS << "typedef uint16_t float16_t;\n";
+
+ // Emit Neon vector typedefs.
+ std::string TypedefTypes("cQcsQsiQilQlUcQUcUsQUsUiQUiUlQUlhQhfQfPcQPcPsQPs");
+ SmallVector<StringRef, 24> TDTypeVec;
+ ParseTypes(0, TypedefTypes, TDTypeVec);
+
+ // Emit vector typedefs.
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ bool dummy, quad = false, poly = false;
+ (void) ClassifyType(TDTypeVec[i], quad, poly, dummy);
+ if (poly)
+ OS << "typedef __attribute__((neon_polyvector_type(";
+ else
+ OS << "typedef __attribute__((neon_vector_type(";
+
+ unsigned nElts = GetNumElements(TDTypeVec[i], quad);
+ OS << utostr(nElts) << "))) ";
+ if (nElts < 10)
+ OS << " ";
+
+ OS << TypeString('s', TDTypeVec[i]);
+ OS << " " << TypeString('d', TDTypeVec[i]) << ";\n";
+ }
+ OS << "\n";
+
+ // Emit struct typedefs.
+ for (unsigned vi = 2; vi != 5; ++vi) {
+ for (unsigned i = 0, e = TDTypeVec.size(); i != e; ++i) {
+ std::string ts = TypeString('d', TDTypeVec[i]);
+ std::string vs = TypeString('0' + vi, TDTypeVec[i]);
+ OS << "typedef struct " << vs << " {\n";
+ OS << " " << ts << " val";
+ OS << "[" << utostr(vi) << "]";
+ OS << ";\n} ";
+ OS << vs << ";\n\n";
+ }
+ }
+
+ OS<<"#define __ai static __attribute__((__always_inline__, __nodebug__))\n\n";
+
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+
+ // Emit vmovl, vmull and vabd intrinsics first so they can be used by other
+ // intrinsics. (Some of the saturating multiply instructions are also
+ // used to implement the corresponding "_lane" variants, but tablegen
+ // sorts the records into alphabetical order so that the "_lane" variants
+ // come after the intrinsics they use.)
+ emitIntrinsic(OS, Records.getDef("VMOVL"));
+ emitIntrinsic(OS, Records.getDef("VMULL"));
+ emitIntrinsic(OS, Records.getDef("VABD"));
+
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ if (R->getName() != "VMOVL" &&
+ R->getName() != "VMULL" &&
+ R->getName() != "VABD")
+ emitIntrinsic(OS, R);
+ }
+
+ OS << "#undef __ai\n\n";
+ OS << "#endif /* __ARM_NEON_H */\n";
+}
+
+/// emitIntrinsic - Write out the arm_neon.h header file definitions for the
+/// intrinsics specified by record R.
+void NeonEmitter::emitIntrinsic(raw_ostream &OS, Record *R) {
+ std::string name = R->getValueAsString("Name");
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ OpKind kind = OpMap[R->getValueAsDef("Operand")->getName()];
+
+ ClassKind classKind = ClassNone;
+ if (R->getSuperClasses().size() >= 2)
+ classKind = ClassMap[R->getSuperClasses()[1]];
+ if (classKind == ClassNone && kind == OpNone)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ if (kind == OpReinterpret) {
+ bool outQuad = false;
+ bool dummy = false;
+ (void)ClassifyType(TypeVec[ti], outQuad, dummy, dummy);
+ for (unsigned srcti = 0, srcte = TypeVec.size();
+ srcti != srcte; ++srcti) {
+ bool inQuad = false;
+ (void)ClassifyType(TypeVec[srcti], inQuad, dummy, dummy);
+ if (srcti == ti || inQuad != outQuad)
+ continue;
+ OS << GenIntrinsic(name, Proto, TypeVec[ti], TypeVec[srcti],
+ OpCast, ClassS);
+ }
+ } else {
+ OS << GenIntrinsic(name, Proto, TypeVec[ti], TypeVec[ti],
+ kind, classKind);
+ }
+ }
+ OS << "\n";
+}
+
+static unsigned RangeFromType(const char mod, StringRef typestr) {
+ // base type to get the type string for.
+ bool quad = false, dummy = false;
+ char type = ClassifyType(typestr, quad, dummy, dummy);
+ type = ModType(mod, type, quad, dummy, dummy, dummy, dummy, dummy);
+
+ switch (type) {
+ case 'c':
+ return (8 << (int)quad) - 1;
+ case 'h':
+ case 's':
+ return (4 << (int)quad) - 1;
+ case 'f':
+ case 'i':
+ return (2 << (int)quad) - 1;
+ case 'l':
+ return (1 << (int)quad) - 1;
+ default:
+ throw "unhandled type!";
+ }
+}
+
+/// runHeader - Emit a file with sections defining:
+/// 1. the NEON section of BuiltinsARM.def.
+/// 2. the SemaChecking code for the type overload checking.
+/// 3. the SemaChecking code for validation of intrinsic immedate arguments.
+void NeonEmitter::runHeader(raw_ostream &OS) {
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+
+ StringMap<OpKind> EmittedMap;
+
+ // Generate BuiltinsARM.def for NEON
+ OS << "#ifdef GET_NEON_BUILTINS\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ std::string Types = R->getValueAsString("Types");
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ std::string name = R->getValueAsString("Name");
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the BuiltinsARM.def declaration for this builtin, ensuring
+ // that each unique BUILTIN() macro appears only once in the output
+ // stream.
+ std::string bd = GenBuiltinDef(name, Proto, TypeVec[ti], ck);
+ if (EmittedMap.count(bd))
+ continue;
+
+ EmittedMap[bd] = OpNone;
+ OS << bd << "\n";
+ }
+ }
+ OS << "#endif\n\n";
+
+ // Generate the overloaded type checking code for SemaChecking.cpp
+ OS << "#ifdef GET_NEON_OVERLOAD_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+ std::string name = R->getValueAsString("Name");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which have a scalar argument cannot be overloaded, no need to
+ // check them if we are emitting the type checking code.
+ if (Proto.find('s') != std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ int si = -1, qi = -1;
+ unsigned mask = 0, qmask = 0;
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ // Generate the switch case(s) for this builtin for the type validation.
+ bool quad = false, poly = false, usgn = false;
+ (void) ClassifyType(TypeVec[ti], quad, poly, usgn);
+
+ if (quad) {
+ qi = ti;
+ qmask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ } else {
+ si = ti;
+ mask |= 1 << GetNeonEnum(Proto, TypeVec[ti]);
+ }
+ }
+
+ // Check if the builtin function has a pointer or const pointer argument.
+ int PtrArgNum = -1;
+ bool HasConstPtr = false;
+ for (unsigned arg = 1, arge = Proto.size(); arg != arge; ++arg) {
+ char ArgType = Proto[arg];
+ if (ArgType == 'c') {
+ HasConstPtr = true;
+ PtrArgNum = arg - 1;
+ break;
+ }
+ if (ArgType == 'p') {
+ PtrArgNum = arg - 1;
+ break;
+ }
+ }
+ // For sret builtins, adjust the pointer argument index.
+ if (PtrArgNum >= 0 && (Proto[0] >= '2' && Proto[0] <= '4'))
+ PtrArgNum += 1;
+
+ // Omit type checking for the pointer arguments of vld1_lane, vld1_dup,
+ // and vst1_lane intrinsics. Using a pointer to the vector element
+ // type with one of those operations causes codegen to select an aligned
+ // load/store instruction. If you want an unaligned operation,
+ // the pointer argument needs to have less alignment than element type,
+ // so just accept any pointer type.
+ if (name == "vld1_lane" || name == "vld1_dup" || name == "vst1_lane") {
+ PtrArgNum = -1;
+ HasConstPtr = false;
+ }
+
+ if (mask) {
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[si], ClassB)
+ << ": mask = " << "0x" << utohexstr(mask);
+ if (PtrArgNum >= 0)
+ OS << "; PtrArgNum = " << PtrArgNum;
+ if (HasConstPtr)
+ OS << "; HasConstPtr = true";
+ OS << "; break;\n";
+ }
+ if (qmask) {
+ OS << "case ARM::BI__builtin_neon_"
+ << MangleName(name, TypeVec[qi], ClassB)
+ << ": mask = " << "0x" << utohexstr(qmask);
+ if (PtrArgNum >= 0)
+ OS << "; PtrArgNum = " << PtrArgNum;
+ if (HasConstPtr)
+ OS << "; HasConstPtr = true";
+ OS << "; break;\n";
+ }
+ }
+ OS << "#endif\n\n";
+
+ // Generate the intrinsic range checking code for shift/lane immediates.
+ OS << "#ifdef GET_NEON_IMMEDIATE_CHECK\n";
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+
+ OpKind k = OpMap[R->getValueAsDef("Operand")->getName()];
+ if (k != OpNone)
+ continue;
+
+ std::string name = R->getValueAsString("Name");
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+
+ // Functions with 'a' (the splat code) in the type prototype should not get
+ // their own builtin as they use the non-splat variant.
+ if (Proto.find('a') != std::string::npos)
+ continue;
+
+ // Functions which do not have an immediate do not need to have range
+ // checking code emitted.
+ size_t immPos = Proto.find('i');
+ if (immPos == std::string::npos)
+ continue;
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ if (R->getSuperClasses().size() < 2)
+ throw TGError(R->getLoc(), "Builtin has no class kind");
+
+ ClassKind ck = ClassMap[R->getSuperClasses()[1]];
+
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ std::string namestr, shiftstr, rangestr;
+
+ if (R->getValueAsBit("isVCVT_N")) {
+ // VCVT between floating- and fixed-point values takes an immediate
+ // in the range 1 to 32.
+ ck = ClassB;
+ rangestr = "l = 1; u = 31"; // upper bound = l + u
+ } else if (Proto.find('s') == std::string::npos) {
+ // Builtins which are overloaded by type will need to have their upper
+ // bound computed at Sema time based on the type constant.
+ ck = ClassB;
+ if (R->getValueAsBit("isShift")) {
+ shiftstr = ", true";
+
+ // Right shifts have an 'r' in the name, left shifts do not.
+ if (name.find('r') != std::string::npos)
+ rangestr = "l = 1; ";
+ }
+ rangestr += "u = RFT(TV" + shiftstr + ")";
+ } else {
+ // The immediate generally refers to a lane in the preceding argument.
+ assert(immPos > 0 && "unexpected immediate operand");
+ rangestr = "u = " + utostr(RangeFromType(Proto[immPos-1], TypeVec[ti]));
+ }
+ // Make sure cases appear only once by uniquing them in a string map.
+ namestr = MangleName(name, TypeVec[ti], ck);
+ if (EmittedMap.count(namestr))
+ continue;
+ EmittedMap[namestr] = OpNone;
+
+ // Calculate the index of the immediate that should be range checked.
+ unsigned immidx = 0;
+
+ // Builtins that return a struct of multiple vectors have an extra
+ // leading arg for the struct return.
+ if (Proto[0] >= '2' && Proto[0] <= '4')
+ ++immidx;
+
+ // Add one to the index for each argument until we reach the immediate
+ // to be checked. Structs of vectors are passed as multiple arguments.
+ for (unsigned ii = 1, ie = Proto.size(); ii != ie; ++ii) {
+ switch (Proto[ii]) {
+ default: immidx += 1; break;
+ case '2': immidx += 2; break;
+ case '3': immidx += 3; break;
+ case '4': immidx += 4; break;
+ case 'i': ie = ii + 1; break;
+ }
+ }
+ OS << "case ARM::BI__builtin_neon_" << MangleName(name, TypeVec[ti], ck)
+ << ": i = " << immidx << "; " << rangestr << "; break;\n";
+ }
+ }
+ OS << "#endif\n\n";
+}
+
+/// GenTest - Write out a test for the intrinsic specified by the name and
+/// type strings, including the embedded patterns for FileCheck to match.
+static std::string GenTest(const std::string &name,
+ const std::string &proto,
+ StringRef outTypeStr, StringRef inTypeStr,
+ bool isShift) {
+ assert(!proto.empty() && "");
+ std::string s;
+
+ // Function name with type suffix
+ std::string mangledName = MangleName(name, outTypeStr, ClassS);
+ if (outTypeStr != inTypeStr) {
+ // If the input type is different (e.g., for vreinterpret), append a suffix
+ // for the input type. String off a "Q" (quad) prefix so that MangleName
+ // does not insert another "q" in the name.
+ unsigned typeStrOff = (inTypeStr[0] == 'Q' ? 1 : 0);
+ StringRef inTypeNoQuad = inTypeStr.substr(typeStrOff);
+ mangledName = MangleName(mangledName, inTypeNoQuad, ClassS);
+ }
+
+ // Emit the FileCheck patterns.
+ s += "// CHECK: test_" + mangledName + "\n";
+ // s += "// CHECK: \n"; // FIXME: + expected instruction opcode.
+
+ // Emit the start of the test function.
+ s += TypeString(proto[0], outTypeStr) + " test_" + mangledName + "(";
+ char arg = 'a';
+ std::string comma;
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ // Do not create arguments for values that must be immediate constants.
+ if (proto[i] == 'i')
+ continue;
+ s += comma + TypeString(proto[i], inTypeStr) + " ";
+ s.push_back(arg);
+ comma = ", ";
+ }
+ s += ") { \\\n ";
+
+ if (proto[0] != 'v')
+ s += "return ";
+ s += mangledName + "(";
+ arg = 'a';
+ for (unsigned i = 1, e = proto.size(); i != e; ++i, ++arg) {
+ if (proto[i] == 'i') {
+ // For immediate operands, test the maximum value.
+ if (isShift)
+ s += "1"; // FIXME
+ else
+ // The immediate generally refers to a lane in the preceding argument.
+ s += utostr(RangeFromType(proto[i-1], inTypeStr));
+ } else {
+ s.push_back(arg);
+ }
+ if ((i + 1) < e)
+ s += ", ";
+ }
+ s += ");\n}\n\n";
+ return s;
+}
+
+/// runTests - Write out a complete set of tests for all of the Neon
+/// intrinsics.
+void NeonEmitter::runTests(raw_ostream &OS) {
+ OS <<
+ "// RUN: %clang_cc1 -triple thumbv7-apple-darwin \\\n"
+ "// RUN: -target-cpu cortex-a9 -ffreestanding -S -o - %s | FileCheck %s\n"
+ "\n"
+ "#include <arm_neon.h>\n"
+ "\n";
+
+ std::vector<Record*> RV = Records.getAllDerivedDefinitions("Inst");
+ for (unsigned i = 0, e = RV.size(); i != e; ++i) {
+ Record *R = RV[i];
+ std::string name = R->getValueAsString("Name");
+ std::string Proto = R->getValueAsString("Prototype");
+ std::string Types = R->getValueAsString("Types");
+ bool isShift = R->getValueAsBit("isShift");
+
+ SmallVector<StringRef, 16> TypeVec;
+ ParseTypes(R, Types, TypeVec);
+
+ OpKind kind = OpMap[R->getValueAsDef("Operand")->getName()];
+ for (unsigned ti = 0, te = TypeVec.size(); ti != te; ++ti) {
+ if (kind == OpReinterpret) {
+ bool outQuad = false;
+ bool dummy = false;
+ (void)ClassifyType(TypeVec[ti], outQuad, dummy, dummy);
+ for (unsigned srcti = 0, srcte = TypeVec.size();
+ srcti != srcte; ++srcti) {
+ bool inQuad = false;
+ (void)ClassifyType(TypeVec[srcti], inQuad, dummy, dummy);
+ if (srcti == ti || inQuad != outQuad)
+ continue;
+ OS << GenTest(name, Proto, TypeVec[ti], TypeVec[srcti], isShift);
+ }
+ } else {
+ OS << GenTest(name, Proto, TypeVec[ti], TypeVec[ti], isShift);
+ }
+ }
+ OS << "\n";
+ }
+}
+
diff --git a/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h
new file mode 100644
index 0000000..dec7451
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/NeonEmitter.h
@@ -0,0 +1,210 @@
+//===- NeonEmitter.h - Generate arm_neon.h for use with clang ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tablegen backend is responsible for emitting arm_neon.h, which includes
+// a declaration and definition of each function specified by the ARM NEON
+// compiler interface. See ARM document DUI0348B.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef NEON_EMITTER_H
+#define NEON_EMITTER_H
+
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenBackend.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
+
+enum OpKind {
+ OpNone,
+ OpAdd,
+ OpAddl,
+ OpAddw,
+ OpSub,
+ OpSubl,
+ OpSubw,
+ OpMul,
+ OpMla,
+ OpMlal,
+ OpMls,
+ OpMlsl,
+ OpMulN,
+ OpMlaN,
+ OpMlsN,
+ OpMlalN,
+ OpMlslN,
+ OpMulLane,
+ OpMullLane,
+ OpMlaLane,
+ OpMlsLane,
+ OpMlalLane,
+ OpMlslLane,
+ OpQDMullLane,
+ OpQDMlalLane,
+ OpQDMlslLane,
+ OpQDMulhLane,
+ OpQRDMulhLane,
+ OpEq,
+ OpGe,
+ OpLe,
+ OpGt,
+ OpLt,
+ OpNeg,
+ OpNot,
+ OpAnd,
+ OpOr,
+ OpXor,
+ OpAndNot,
+ OpOrNot,
+ OpCast,
+ OpConcat,
+ OpDup,
+ OpDupLane,
+ OpHi,
+ OpLo,
+ OpSelect,
+ OpRev16,
+ OpRev32,
+ OpRev64,
+ OpReinterpret,
+ OpAbdl,
+ OpAba,
+ OpAbal
+};
+
+enum ClassKind {
+ ClassNone,
+ ClassI, // generic integer instruction, e.g., "i8" suffix
+ ClassS, // signed/unsigned/poly, e.g., "s8", "u8" or "p8" suffix
+ ClassW, // width-specific instruction, e.g., "8" suffix
+ ClassB // bitcast arguments with enum argument to specify type
+};
+
+/// NeonTypeFlags - Flags to identify the types for overloaded Neon
+/// builtins. These must be kept in sync with the flags in
+/// include/clang/Basic/TargetBuiltins.h.
+class NeonTypeFlags {
+ enum {
+ EltTypeMask = 0xf,
+ UnsignedFlag = 0x10,
+ QuadFlag = 0x20
+ };
+ uint32_t Flags;
+
+public:
+ enum EltType {
+ Int8,
+ Int16,
+ Int32,
+ Int64,
+ Poly8,
+ Poly16,
+ Float16,
+ Float32
+ };
+
+ NeonTypeFlags(unsigned F) : Flags(F) {}
+ NeonTypeFlags(EltType ET, bool IsUnsigned, bool IsQuad) : Flags(ET) {
+ if (IsUnsigned)
+ Flags |= UnsignedFlag;
+ if (IsQuad)
+ Flags |= QuadFlag;
+ }
+
+ uint32_t getFlags() const { return Flags; }
+};
+
+namespace llvm {
+
+ class NeonEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+ StringMap<OpKind> OpMap;
+ DenseMap<Record*, ClassKind> ClassMap;
+
+ public:
+ NeonEmitter(RecordKeeper &R) : Records(R) {
+ OpMap["OP_NONE"] = OpNone;
+ OpMap["OP_ADD"] = OpAdd;
+ OpMap["OP_ADDL"] = OpAddl;
+ OpMap["OP_ADDW"] = OpAddw;
+ OpMap["OP_SUB"] = OpSub;
+ OpMap["OP_SUBL"] = OpSubl;
+ OpMap["OP_SUBW"] = OpSubw;
+ OpMap["OP_MUL"] = OpMul;
+ OpMap["OP_MLA"] = OpMla;
+ OpMap["OP_MLAL"] = OpMlal;
+ OpMap["OP_MLS"] = OpMls;
+ OpMap["OP_MLSL"] = OpMlsl;
+ OpMap["OP_MUL_N"] = OpMulN;
+ OpMap["OP_MLA_N"] = OpMlaN;
+ OpMap["OP_MLS_N"] = OpMlsN;
+ OpMap["OP_MLAL_N"] = OpMlalN;
+ OpMap["OP_MLSL_N"] = OpMlslN;
+ OpMap["OP_MUL_LN"]= OpMulLane;
+ OpMap["OP_MULL_LN"] = OpMullLane;
+ OpMap["OP_MLA_LN"]= OpMlaLane;
+ OpMap["OP_MLS_LN"]= OpMlsLane;
+ OpMap["OP_MLAL_LN"] = OpMlalLane;
+ OpMap["OP_MLSL_LN"] = OpMlslLane;
+ OpMap["OP_QDMULL_LN"] = OpQDMullLane;
+ OpMap["OP_QDMLAL_LN"] = OpQDMlalLane;
+ OpMap["OP_QDMLSL_LN"] = OpQDMlslLane;
+ OpMap["OP_QDMULH_LN"] = OpQDMulhLane;
+ OpMap["OP_QRDMULH_LN"] = OpQRDMulhLane;
+ OpMap["OP_EQ"] = OpEq;
+ OpMap["OP_GE"] = OpGe;
+ OpMap["OP_LE"] = OpLe;
+ OpMap["OP_GT"] = OpGt;
+ OpMap["OP_LT"] = OpLt;
+ OpMap["OP_NEG"] = OpNeg;
+ OpMap["OP_NOT"] = OpNot;
+ OpMap["OP_AND"] = OpAnd;
+ OpMap["OP_OR"] = OpOr;
+ OpMap["OP_XOR"] = OpXor;
+ OpMap["OP_ANDN"] = OpAndNot;
+ OpMap["OP_ORN"] = OpOrNot;
+ OpMap["OP_CAST"] = OpCast;
+ OpMap["OP_CONC"] = OpConcat;
+ OpMap["OP_HI"] = OpHi;
+ OpMap["OP_LO"] = OpLo;
+ OpMap["OP_DUP"] = OpDup;
+ OpMap["OP_DUP_LN"] = OpDupLane;
+ OpMap["OP_SEL"] = OpSelect;
+ OpMap["OP_REV16"] = OpRev16;
+ OpMap["OP_REV32"] = OpRev32;
+ OpMap["OP_REV64"] = OpRev64;
+ OpMap["OP_REINT"] = OpReinterpret;
+ OpMap["OP_ABDL"] = OpAbdl;
+ OpMap["OP_ABA"] = OpAba;
+ OpMap["OP_ABAL"] = OpAbal;
+
+ Record *SI = R.getClass("SInst");
+ Record *II = R.getClass("IInst");
+ Record *WI = R.getClass("WInst");
+ ClassMap[SI] = ClassS;
+ ClassMap[II] = ClassI;
+ ClassMap[WI] = ClassW;
+ }
+
+ // run - Emit arm_neon.h.inc
+ void run(raw_ostream &o);
+
+ // runHeader - Emit all the __builtin prototypes used in arm_neon.h
+ void runHeader(raw_ostream &o);
+
+ // runTests - Emit tests for all the Neon intrinsics.
+ void runTests(raw_ostream &o);
+
+ private:
+ void emitIntrinsic(raw_ostream &OS, Record *R);
+ };
+
+} // End llvm namespace
+
+#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp
new file mode 100644
index 0000000..dea22d3
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.cpp
@@ -0,0 +1,194 @@
+//===- OptParserEmitter.cpp - Table Driven Command Line Parsing -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "OptParserEmitter.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/ADT/STLExtras.h"
+using namespace llvm;
+
+static int StrCmpOptionName(const char *A, const char *B) {
+ char a = *A, b = *B;
+ while (a == b) {
+ if (a == '\0')
+ return 0;
+
+ a = *++A;
+ b = *++B;
+ }
+
+ if (a == '\0') // A is a prefix of B.
+ return 1;
+ if (b == '\0') // B is a prefix of A.
+ return -1;
+
+ // Otherwise lexicographic.
+ return (a < b) ? -1 : 1;
+}
+
+static int CompareOptionRecords(const void *Av, const void *Bv) {
+ const Record *A = *(Record**) Av;
+ const Record *B = *(Record**) Bv;
+
+ // Sentinel options precede all others and are only ordered by precedence.
+ bool ASent = A->getValueAsDef("Kind")->getValueAsBit("Sentinel");
+ bool BSent = B->getValueAsDef("Kind")->getValueAsBit("Sentinel");
+ if (ASent != BSent)
+ return ASent ? -1 : 1;
+
+ // Compare options by name, unless they are sentinels.
+ if (!ASent)
+ if (int Cmp = StrCmpOptionName(A->getValueAsString("Name").c_str(),
+ B->getValueAsString("Name").c_str()))
+ return Cmp;
+
+ // Then by the kind precedence;
+ int APrec = A->getValueAsDef("Kind")->getValueAsInt("Precedence");
+ int BPrec = B->getValueAsDef("Kind")->getValueAsInt("Precedence");
+ assert(APrec != BPrec && "Options are equivalent!");
+ return APrec < BPrec ? -1 : 1;
+}
+
+static const std::string getOptionName(const Record &R) {
+ // Use the record name unless EnumName is defined.
+ if (dynamic_cast<UnsetInit*>(R.getValueInit("EnumName")))
+ return R.getName();
+
+ return R.getValueAsString("EnumName");
+}
+
+static raw_ostream &write_cstring(raw_ostream &OS, llvm::StringRef Str) {
+ OS << '"';
+ OS.write_escaped(Str);
+ OS << '"';
+ return OS;
+}
+
+void OptParserEmitter::run(raw_ostream &OS) {
+ // Get the option groups and options.
+ const std::vector<Record*> &Groups =
+ Records.getAllDerivedDefinitions("OptionGroup");
+ std::vector<Record*> Opts = Records.getAllDerivedDefinitions("Option");
+
+ if (GenDefs)
+ EmitSourceFileHeader("Option Parsing Definitions", OS);
+ else
+ EmitSourceFileHeader("Option Parsing Table", OS);
+
+ array_pod_sort(Opts.begin(), Opts.end(), CompareOptionRecords);
+ if (GenDefs) {
+ OS << "#ifndef OPTION\n";
+ OS << "#error \"Define OPTION prior to including this file!\"\n";
+ OS << "#endif\n\n";
+
+ OS << "/////////\n";
+ OS << "// Groups\n\n";
+ for (unsigned i = 0, e = Groups.size(); i != e; ++i) {
+ const Record &R = *Groups[i];
+
+ // Start a single option entry.
+ OS << "OPTION(";
+
+ // The option string.
+ OS << '"' << R.getValueAsString("Name") << '"';
+
+ // The option identifier name.
+ OS << ", "<< getOptionName(R);
+
+ // The option kind.
+ OS << ", Group";
+
+ // The containing option group (if any).
+ OS << ", ";
+ if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ OS << getOptionName(*DI->getDef());
+ else
+ OS << "INVALID";
+
+ // The other option arguments (unused for groups).
+ OS << ", INVALID, 0, 0";
+
+ // The option help text.
+ if (!dynamic_cast<UnsetInit*>(R.getValueInit("HelpText"))) {
+ OS << ",\n";
+ OS << " ";
+ write_cstring(OS, R.getValueAsString("HelpText"));
+ } else
+ OS << ", 0";
+
+ // The option meta-variable name (unused).
+ OS << ", 0)\n";
+ }
+ OS << "\n";
+
+ OS << "//////////\n";
+ OS << "// Options\n\n";
+ for (unsigned i = 0, e = Opts.size(); i != e; ++i) {
+ const Record &R = *Opts[i];
+
+ // Start a single option entry.
+ OS << "OPTION(";
+
+ // The option string.
+ write_cstring(OS, R.getValueAsString("Name"));
+
+ // The option identifier name.
+ OS << ", "<< getOptionName(R);
+
+ // The option kind.
+ OS << ", " << R.getValueAsDef("Kind")->getValueAsString("Name");
+
+ // The containing option group (if any).
+ OS << ", ";
+ if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Group")))
+ OS << getOptionName(*DI->getDef());
+ else
+ OS << "INVALID";
+
+ // The option alias (if any).
+ OS << ", ";
+ if (const DefInit *DI = dynamic_cast<DefInit*>(R.getValueInit("Alias")))
+ OS << getOptionName(*DI->getDef());
+ else
+ OS << "INVALID";
+
+ // The option flags.
+ const ListInit *LI = R.getValueAsListInit("Flags");
+ if (LI->empty()) {
+ OS << ", 0";
+ } else {
+ OS << ", ";
+ for (unsigned i = 0, e = LI->size(); i != e; ++i) {
+ if (i)
+ OS << " | ";
+ OS << dynamic_cast<DefInit*>(LI->getElement(i))->getDef()->getName();
+ }
+ }
+
+ // The option parameter field.
+ OS << ", " << R.getValueAsInt("NumArgs");
+
+ // The option help text.
+ if (!dynamic_cast<UnsetInit*>(R.getValueInit("HelpText"))) {
+ OS << ",\n";
+ OS << " ";
+ write_cstring(OS, R.getValueAsString("HelpText"));
+ } else
+ OS << ", 0";
+
+ // The option meta-variable name.
+ OS << ", ";
+ if (!dynamic_cast<UnsetInit*>(R.getValueInit("MetaVarName")))
+ write_cstring(OS, R.getValueAsString("MetaVarName"));
+ else
+ OS << "0";
+
+ OS << ")\n";
+ }
+ }
+}
diff --git a/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.h b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.h
new file mode 100644
index 0000000..ca667ca
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/OptParserEmitter.h
@@ -0,0 +1,34 @@
+//===- OptParserEmitter.h - Table Driven Command Line Parsing ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef UTILS_TABLEGEN_OPTPARSEREMITTER_H
+#define UTILS_TABLEGEN_OPTPARSEREMITTER_H
+
+#include "llvm/TableGen/TableGenBackend.h"
+
+namespace llvm {
+ /// OptParserEmitter - This tablegen backend takes an input .td file
+ /// describing a list of options and emits a data structure for parsing and
+ /// working with those options when given an input command line.
+ class OptParserEmitter : public TableGenBackend {
+ RecordKeeper &Records;
+ bool GenDefs;
+
+ public:
+ OptParserEmitter(RecordKeeper &R, bool _GenDefs)
+ : Records(R), GenDefs(_GenDefs) {}
+
+ /// run - Output the option parsing information.
+ ///
+ /// \param GenHeader - Generate the header describing the option IDs.x
+ void run(raw_ostream &OS);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
new file mode 100644
index 0000000..5ff88db
--- /dev/null
+++ b/contrib/llvm/tools/clang/utils/TableGen/TableGen.cpp
@@ -0,0 +1,194 @@
+//===- TableGen.cpp - Top-Level TableGen implementation for Clang ---------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the main function for Clang's TableGen.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ClangASTNodesEmitter.h"
+#include "ClangAttrEmitter.h"
+#include "ClangDiagnosticsEmitter.h"
+#include "ClangSACheckersEmitter.h"
+#include "NeonEmitter.h"
+#include "OptParserEmitter.h"
+
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Main.h"
+#include "llvm/TableGen/Record.h"
+#include "llvm/TableGen/TableGenAction.h"
+
+using namespace llvm;
+
+enum ActionType {
+ GenClangAttrClasses,
+ GenClangAttrImpl,
+ GenClangAttrList,
+ GenClangAttrPCHRead,
+ GenClangAttrPCHWrite,
+ GenClangAttrSpellingList,
+ GenClangAttrLateParsedList,
+ GenClangAttrTemplateInstantiate,
+ GenClangAttrParsedAttrList,
+ GenClangAttrParsedAttrKinds,
+ GenClangDiagsDefs,
+ GenClangDiagGroups,
+ GenClangDiagsIndexName,
+ GenClangDeclNodes,
+ GenClangStmtNodes,
+ GenClangSACheckers,
+ GenOptParserDefs, GenOptParserImpl,
+ GenArmNeon,
+ GenArmNeonSema,
+ GenArmNeonTest
+};
+
+namespace {
+ cl::opt<ActionType>
+ Action(cl::desc("Action to perform:"),
+ cl::values(clEnumValN(GenOptParserDefs, "gen-opt-parser-defs",
+ "Generate option definitions"),
+ clEnumValN(GenOptParserImpl, "gen-opt-parser-impl",
+ "Generate option parser implementation"),
+ clEnumValN(GenClangAttrClasses, "gen-clang-attr-classes",
+ "Generate clang attribute clases"),
+ clEnumValN(GenClangAttrImpl, "gen-clang-attr-impl",
+ "Generate clang attribute implementations"),
+ clEnumValN(GenClangAttrList, "gen-clang-attr-list",
+ "Generate a clang attribute list"),
+ clEnumValN(GenClangAttrPCHRead, "gen-clang-attr-pch-read",
+ "Generate clang PCH attribute reader"),
+ clEnumValN(GenClangAttrPCHWrite, "gen-clang-attr-pch-write",
+ "Generate clang PCH attribute writer"),
+ clEnumValN(GenClangAttrSpellingList,
+ "gen-clang-attr-spelling-list",
+ "Generate a clang attribute spelling list"),
+ clEnumValN(GenClangAttrLateParsedList,
+ "gen-clang-attr-late-parsed-list",
+ "Generate a clang attribute LateParsed list"),
+ clEnumValN(GenClangAttrTemplateInstantiate,
+ "gen-clang-attr-template-instantiate",
+ "Generate a clang template instantiate code"),
+ clEnumValN(GenClangAttrParsedAttrList,
+ "gen-clang-attr-parsed-attr-list",
+ "Generate a clang parsed attribute list"),
+ clEnumValN(GenClangAttrParsedAttrKinds,
+ "gen-clang-attr-parsed-attr-kinds",
+ "Generate a clang parsed attribute kinds"),
+ clEnumValN(GenClangDiagsDefs, "gen-clang-diags-defs",
+ "Generate Clang diagnostics definitions"),
+ clEnumValN(GenClangDiagGroups, "gen-clang-diag-groups",
+ "Generate Clang diagnostic groups"),
+ clEnumValN(GenClangDiagsIndexName,
+ "gen-clang-diags-index-name",
+ "Generate Clang diagnostic name index"),
+ clEnumValN(GenClangDeclNodes, "gen-clang-decl-nodes",
+ "Generate Clang AST declaration nodes"),
+ clEnumValN(GenClangStmtNodes, "gen-clang-stmt-nodes",
+ "Generate Clang AST statement nodes"),
+ clEnumValN(GenClangSACheckers, "gen-clang-sa-checkers",
+ "Generate Clang Static Analyzer checkers"),
+ clEnumValN(GenArmNeon, "gen-arm-neon",
+ "Generate arm_neon.h for clang"),
+ clEnumValN(GenArmNeonSema, "gen-arm-neon-sema",
+ "Generate ARM NEON sema support for clang"),
+ clEnumValN(GenArmNeonTest, "gen-arm-neon-test",
+ "Generate ARM NEON tests for clang"),
+ clEnumValEnd));
+
+ cl::opt<std::string>
+ ClangComponent("clang-component",
+ cl::desc("Only use warnings from specified component"),
+ cl::value_desc("component"), cl::Hidden);
+
+class ClangTableGenAction : public TableGenAction {
+public:
+ bool operator()(raw_ostream &OS, RecordKeeper &Records) {
+ switch (Action) {
+ case GenClangAttrClasses:
+ ClangAttrClassEmitter(Records).run(OS);
+ break;
+ case GenClangAttrImpl:
+ ClangAttrImplEmitter(Records).run(OS);
+ break;
+ case GenClangAttrList:
+ ClangAttrListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrPCHRead:
+ ClangAttrPCHReadEmitter(Records).run(OS);
+ break;
+ case GenClangAttrPCHWrite:
+ ClangAttrPCHWriteEmitter(Records).run(OS);
+ break;
+ case GenClangAttrSpellingList:
+ ClangAttrSpellingListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrLateParsedList:
+ ClangAttrLateParsedListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrTemplateInstantiate:
+ ClangAttrTemplateInstantiateEmitter(Records).run(OS);
+ break;
+ case GenClangAttrParsedAttrList:
+ ClangAttrParsedAttrListEmitter(Records).run(OS);
+ break;
+ case GenClangAttrParsedAttrKinds:
+ ClangAttrParsedAttrKindsEmitter(Records).run(OS);
+ break;
+ case GenClangDiagsDefs:
+ ClangDiagsDefsEmitter(Records, ClangComponent).run(OS);
+ break;
+ case GenClangDiagGroups:
+ ClangDiagGroupsEmitter(Records).run(OS);
+ break;
+ case GenClangDiagsIndexName:
+ ClangDiagsIndexNameEmitter(Records).run(OS);
+ break;
+ case GenClangDeclNodes:
+ ClangASTNodesEmitter(Records, "Decl", "Decl").run(OS);
+ ClangDeclContextEmitter(Records).run(OS);
+ break;
+ case GenClangStmtNodes:
+ ClangASTNodesEmitter(Records, "Stmt", "").run(OS);
+ break;
+ case GenClangSACheckers:
+ ClangSACheckersEmitter(Records).run(OS);
+ break;
+ case GenOptParserDefs:
+ OptParserEmitter(Records, true).run(OS);
+ break;
+ case GenOptParserImpl:
+ OptParserEmitter(Records, false).run(OS);
+ break;
+ case GenArmNeon:
+ NeonEmitter(Records).run(OS);
+ break;
+ case GenArmNeonSema:
+ NeonEmitter(Records).runHeader(OS);
+ break;
+ case GenArmNeonTest:
+ NeonEmitter(Records).runTests(OS);
+ break;
+ }
+
+ return false;
+ }
+};
+}
+
+int main(int argc, char **argv) {
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+ cl::ParseCommandLineOptions(argc, argv);
+
+ ClangTableGenAction Action;
+ return TableGenMain(argv[0], Action);
+}
diff --git a/contrib/llvm/tools/llc/llc.cpp b/contrib/llvm/tools/llc/llc.cpp
new file mode 100644
index 0000000..9e30ac1
--- /dev/null
+++ b/contrib/llvm/tools/llc/llc.cpp
@@ -0,0 +1,547 @@
+//===-- llc.cpp - Implement the LLVM Native Code Generator ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the llc code generator driver. It provides a convenient
+// command-line interface for generating native assembly-language code
+// or C code, given LLVM bitcode.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Pass.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/CodeGen/LinkAllAsmWriterComponents.h"
+#include "llvm/CodeGen/LinkAllCodegenComponents.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PluginLoader.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include <memory>
+using namespace llvm;
+
+// General options for llc. Other pass-specific options are specified
+// within the corresponding llc passes, and target-specific options
+// and back-end code generation options are specified with the target machine.
+//
+static cl::opt<std::string>
+InputFilename(cl::Positional, cl::desc("<input bitcode>"), cl::init("-"));
+
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Output filename"), cl::value_desc("filename"));
+
+// Determine optimization level.
+static cl::opt<char>
+OptLevel("O",
+ cl::desc("Optimization level. [-O0, -O1, -O2, or -O3] "
+ "(default = '-O2')"),
+ cl::Prefix,
+ cl::ZeroOrMore,
+ cl::init(' '));
+
+static cl::opt<std::string>
+TargetTriple("mtriple", cl::desc("Override target triple for module"));
+
+static cl::opt<std::string>
+MArch("march", cl::desc("Architecture to generate code for (see --version)"));
+
+static cl::opt<std::string>
+MCPU("mcpu",
+ cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+ cl::value_desc("cpu-name"),
+ cl::init(""));
+
+static cl::list<std::string>
+MAttrs("mattr",
+ cl::CommaSeparated,
+ cl::desc("Target specific attributes (-mattr=help for details)"),
+ cl::value_desc("a1,+a2,-a3,..."));
+
+static cl::opt<Reloc::Model>
+RelocModel("relocation-model",
+ cl::desc("Choose relocation model"),
+ cl::init(Reloc::Default),
+ cl::values(
+ clEnumValN(Reloc::Default, "default",
+ "Target default relocation model"),
+ clEnumValN(Reloc::Static, "static",
+ "Non-relocatable code"),
+ clEnumValN(Reloc::PIC_, "pic",
+ "Fully relocatable, position independent code"),
+ clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+ "Relocatable external references, non-relocatable code"),
+ clEnumValEnd));
+
+static cl::opt<llvm::CodeModel::Model>
+CMModel("code-model",
+ cl::desc("Choose code model"),
+ cl::init(CodeModel::Default),
+ cl::values(clEnumValN(CodeModel::Default, "default",
+ "Target default code model"),
+ clEnumValN(CodeModel::Small, "small",
+ "Small code model"),
+ clEnumValN(CodeModel::Kernel, "kernel",
+ "Kernel code model"),
+ clEnumValN(CodeModel::Medium, "medium",
+ "Medium code model"),
+ clEnumValN(CodeModel::Large, "large",
+ "Large code model"),
+ clEnumValEnd));
+
+static cl::opt<bool>
+RelaxAll("mc-relax-all",
+ cl::desc("When used with filetype=obj, "
+ "relax all fixups in the emitted object file"));
+
+cl::opt<TargetMachine::CodeGenFileType>
+FileType("filetype", cl::init(TargetMachine::CGFT_AssemblyFile),
+ cl::desc("Choose a file type (not all types are supported by all targets):"),
+ cl::values(
+ clEnumValN(TargetMachine::CGFT_AssemblyFile, "asm",
+ "Emit an assembly ('.s') file"),
+ clEnumValN(TargetMachine::CGFT_ObjectFile, "obj",
+ "Emit a native object ('.o') file [experimental]"),
+ clEnumValN(TargetMachine::CGFT_Null, "null",
+ "Emit nothing, for performance testing"),
+ clEnumValEnd));
+
+cl::opt<bool> NoVerify("disable-verify", cl::Hidden,
+ cl::desc("Do not verify input module"));
+
+cl::opt<bool> DisableDotLoc("disable-dot-loc", cl::Hidden,
+ cl::desc("Do not use .loc entries"));
+
+cl::opt<bool> DisableCFI("disable-cfi", cl::Hidden,
+ cl::desc("Do not use .cfi_* directives"));
+
+cl::opt<bool> EnableDwarfDirectory("enable-dwarf-directory", cl::Hidden,
+ cl::desc("Use .file directives with an explicit directory."));
+
+static cl::opt<bool>
+DisableRedZone("disable-red-zone",
+ cl::desc("Do not emit code that uses the red zone."),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableFPMAD("enable-fp-mad",
+ cl::desc("Enable less precise MAD instructions to be generated"),
+ cl::init(false));
+
+static cl::opt<bool>
+PrintCode("print-machineinstrs",
+ cl::desc("Print generated machine code"),
+ cl::init(false));
+
+static cl::opt<bool>
+DisableFPElim("disable-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization"),
+ cl::init(false));
+
+static cl::opt<bool>
+DisableFPElimNonLeaf("disable-non-leaf-fp-elim",
+ cl::desc("Disable frame pointer elimination optimization for non-leaf funcs"),
+ cl::init(false));
+
+static cl::opt<bool>
+DisableExcessPrecision("disable-excess-fp-precision",
+ cl::desc("Disable optimizations that may increase FP precision"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableUnsafeFPMath("enable-unsafe-fp-math",
+ cl::desc("Enable optimizations that may decrease FP precision"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableNoInfsFPMath("enable-no-infs-fp-math",
+ cl::desc("Enable FP math optimizations that assume no +-Infs"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableNoNaNsFPMath("enable-no-nans-fp-math",
+ cl::desc("Enable FP math optimizations that assume no NaNs"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableHonorSignDependentRoundingFPMath("enable-sign-dependent-rounding-fp-math",
+ cl::Hidden,
+ cl::desc("Force codegen to assume rounding mode can change dynamically"),
+ cl::init(false));
+
+static cl::opt<bool>
+GenerateSoftFloatCalls("soft-float",
+ cl::desc("Generate software floating point library calls"),
+ cl::init(false));
+
+static cl::opt<llvm::FloatABI::ABIType>
+FloatABIForCalls("float-abi",
+ cl::desc("Choose float ABI type"),
+ cl::init(FloatABI::Default),
+ cl::values(
+ clEnumValN(FloatABI::Default, "default",
+ "Target default float ABI type"),
+ clEnumValN(FloatABI::Soft, "soft",
+ "Soft float ABI (implied by -soft-float)"),
+ clEnumValN(FloatABI::Hard, "hard",
+ "Hard float ABI (uses FP registers)"),
+ clEnumValEnd));
+
+static cl::opt<bool>
+DontPlaceZerosInBSS("nozero-initialized-in-bss",
+ cl::desc("Don't place zero-initialized symbols into bss section"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableJITExceptionHandling("jit-enable-eh",
+ cl::desc("Emit exception handling information"),
+ cl::init(false));
+
+// In debug builds, make this default to true.
+#ifdef NDEBUG
+#define EMIT_DEBUG false
+#else
+#define EMIT_DEBUG true
+#endif
+static cl::opt<bool>
+EmitJitDebugInfo("jit-emit-debug",
+ cl::desc("Emit debug information to debugger"),
+ cl::init(EMIT_DEBUG));
+#undef EMIT_DEBUG
+
+static cl::opt<bool>
+EmitJitDebugInfoToDisk("jit-emit-debug-to-disk",
+ cl::Hidden,
+ cl::desc("Emit debug info objfiles to disk"),
+ cl::init(false));
+
+static cl::opt<bool>
+EnableGuaranteedTailCallOpt("tailcallopt",
+ cl::desc("Turn fastcc calls into tail calls by (potentially) changing ABI."),
+ cl::init(false));
+
+static cl::opt<bool>
+DisableTailCalls("disable-tail-calls",
+ cl::desc("Never emit tail calls"),
+ cl::init(false));
+
+static cl::opt<unsigned>
+OverrideStackAlignment("stack-alignment",
+ cl::desc("Override default stack alignment"),
+ cl::init(0));
+
+static cl::opt<bool>
+EnableRealignStack("realign-stack",
+ cl::desc("Realign stack if needed"),
+ cl::init(true));
+
+static cl::opt<bool>
+DisableSwitchTables(cl::Hidden, "disable-jump-tables",
+ cl::desc("Do not generate jump tables."),
+ cl::init(false));
+
+static cl::opt<std::string>
+TrapFuncName("trap-func", cl::Hidden,
+ cl::desc("Emit a call to trap function rather than a trap instruction"),
+ cl::init(""));
+
+static cl::opt<bool>
+EnablePIE("enable-pie",
+ cl::desc("Assume the creation of a position independent executable."),
+ cl::init(false));
+
+static cl::opt<bool>
+SegmentedStacks("segmented-stacks",
+ cl::desc("Use segmented stacks if possible."),
+ cl::init(false));
+
+
+// GetFileNameRoot - Helper function to get the basename of a filename.
+static inline std::string
+GetFileNameRoot(const std::string &InputFilename) {
+ std::string IFN = InputFilename;
+ std::string outputFilename;
+ int Len = IFN.length();
+ if ((Len > 2) &&
+ IFN[Len-3] == '.' &&
+ ((IFN[Len-2] == 'b' && IFN[Len-1] == 'c') ||
+ (IFN[Len-2] == 'l' && IFN[Len-1] == 'l'))) {
+ outputFilename = std::string(IFN.begin(), IFN.end()-3); // s/.bc/.s/
+ } else {
+ outputFilename = IFN;
+ }
+ return outputFilename;
+}
+
+static tool_output_file *GetOutputStream(const char *TargetName,
+ Triple::OSType OS,
+ const char *ProgName) {
+ // If we don't yet have an output filename, make one.
+ if (OutputFilename.empty()) {
+ if (InputFilename == "-")
+ OutputFilename = "-";
+ else {
+ OutputFilename = GetFileNameRoot(InputFilename);
+
+ switch (FileType) {
+ case TargetMachine::CGFT_AssemblyFile:
+ if (TargetName[0] == 'c') {
+ if (TargetName[1] == 0)
+ OutputFilename += ".cbe.c";
+ else if (TargetName[1] == 'p' && TargetName[2] == 'p')
+ OutputFilename += ".cpp";
+ else
+ OutputFilename += ".s";
+ } else
+ OutputFilename += ".s";
+ break;
+ case TargetMachine::CGFT_ObjectFile:
+ if (OS == Triple::Win32)
+ OutputFilename += ".obj";
+ else
+ OutputFilename += ".o";
+ break;
+ case TargetMachine::CGFT_Null:
+ OutputFilename += ".null";
+ break;
+ }
+ }
+ }
+
+ // Decide if we need "binary" output.
+ bool Binary = false;
+ switch (FileType) {
+ case TargetMachine::CGFT_AssemblyFile:
+ break;
+ case TargetMachine::CGFT_ObjectFile:
+ case TargetMachine::CGFT_Null:
+ Binary = true;
+ break;
+ }
+
+ // Open the file.
+ std::string error;
+ unsigned OpenFlags = 0;
+ if (Binary) OpenFlags |= raw_fd_ostream::F_Binary;
+ tool_output_file *FDOut = new tool_output_file(OutputFilename.c_str(), error,
+ OpenFlags);
+ if (!error.empty()) {
+ errs() << error << '\n';
+ delete FDOut;
+ return 0;
+ }
+
+ return FDOut;
+}
+
+// main - Entry point for the llc compiler.
+//
+int main(int argc, char **argv) {
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ // Enable debug stream buffering.
+ EnableDebugBuffering = true;
+
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ // Initialize targets first, so that --version shows registered targets.
+ InitializeAllTargets();
+ InitializeAllTargetMCs();
+ InitializeAllAsmPrinters();
+ InitializeAllAsmParsers();
+
+ // Register the target printer for --version.
+ cl::AddExtraVersionPrinter(TargetRegistry::printRegisteredTargetsForVersion);
+
+ cl::ParseCommandLineOptions(argc, argv, "llvm system compiler\n");
+
+ // Load the module to be compiled...
+ SMDiagnostic Err;
+ std::auto_ptr<Module> M;
+
+ M.reset(ParseIRFile(InputFilename, Err, Context));
+ if (M.get() == 0) {
+ Err.print(argv[0], errs());
+ return 1;
+ }
+ Module &mod = *M.get();
+
+ // If we are supposed to override the target triple, do so now.
+ if (!TargetTriple.empty())
+ mod.setTargetTriple(Triple::normalize(TargetTriple));
+
+ Triple TheTriple(mod.getTargetTriple());
+ if (TheTriple.getTriple().empty())
+ TheTriple.setTriple(sys::getDefaultTargetTriple());
+
+ // Allocate target machine. First, check whether the user has explicitly
+ // specified an architecture to compile for. If so we have to look it up by
+ // name, because it might be a backend that has no mapping to a target triple.
+ const Target *TheTarget = 0;
+ if (!MArch.empty()) {
+ for (TargetRegistry::iterator it = TargetRegistry::begin(),
+ ie = TargetRegistry::end(); it != ie; ++it) {
+ if (MArch == it->getName()) {
+ TheTarget = &*it;
+ break;
+ }
+ }
+
+ if (!TheTarget) {
+ errs() << argv[0] << ": error: invalid target '" << MArch << "'.\n";
+ return 1;
+ }
+
+ // Adjust the triple to match (if known), otherwise stick with the
+ // module/host triple.
+ Triple::ArchType Type = Triple::getArchTypeForLLVMName(MArch);
+ if (Type != Triple::UnknownArch)
+ TheTriple.setArch(Type);
+ } else {
+ std::string Err;
+ TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Err);
+ if (TheTarget == 0) {
+ errs() << argv[0] << ": error auto-selecting target for module '"
+ << Err << "'. Please use the -march option to explicitly "
+ << "pick a target.\n";
+ return 1;
+ }
+ }
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (MAttrs.size()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ CodeGenOpt::Level OLvl = CodeGenOpt::Default;
+ switch (OptLevel) {
+ default:
+ errs() << argv[0] << ": invalid optimization level.\n";
+ return 1;
+ case ' ': break;
+ case '0': OLvl = CodeGenOpt::None; break;
+ case '1': OLvl = CodeGenOpt::Less; break;
+ case '2': OLvl = CodeGenOpt::Default; break;
+ case '3': OLvl = CodeGenOpt::Aggressive; break;
+ }
+
+ TargetOptions Options;
+ Options.LessPreciseFPMADOption = EnableFPMAD;
+ Options.PrintMachineCode = PrintCode;
+ Options.NoFramePointerElim = DisableFPElim;
+ Options.NoFramePointerElimNonLeaf = DisableFPElimNonLeaf;
+ Options.NoExcessFPPrecision = DisableExcessPrecision;
+ Options.UnsafeFPMath = EnableUnsafeFPMath;
+ Options.NoInfsFPMath = EnableNoInfsFPMath;
+ Options.NoNaNsFPMath = EnableNoNaNsFPMath;
+ Options.HonorSignDependentRoundingFPMathOption =
+ EnableHonorSignDependentRoundingFPMath;
+ Options.UseSoftFloat = GenerateSoftFloatCalls;
+ if (FloatABIForCalls != FloatABI::Default)
+ Options.FloatABIType = FloatABIForCalls;
+ Options.NoZerosInBSS = DontPlaceZerosInBSS;
+ Options.JITExceptionHandling = EnableJITExceptionHandling;
+ Options.JITEmitDebugInfo = EmitJitDebugInfo;
+ Options.JITEmitDebugInfoToDisk = EmitJitDebugInfoToDisk;
+ Options.GuaranteedTailCallOpt = EnableGuaranteedTailCallOpt;
+ Options.DisableTailCalls = DisableTailCalls;
+ Options.StackAlignmentOverride = OverrideStackAlignment;
+ Options.RealignStack = EnableRealignStack;
+ Options.DisableJumpTables = DisableSwitchTables;
+ Options.TrapFuncName = TrapFuncName;
+ Options.PositionIndependentExecutable = EnablePIE;
+ Options.EnableSegmentedStacks = SegmentedStacks;
+
+ std::auto_ptr<TargetMachine>
+ target(TheTarget->createTargetMachine(TheTriple.getTriple(),
+ MCPU, FeaturesStr, Options,
+ RelocModel, CMModel, OLvl));
+ assert(target.get() && "Could not allocate target machine!");
+ TargetMachine &Target = *target.get();
+
+ if (DisableDotLoc)
+ Target.setMCUseLoc(false);
+
+ if (DisableCFI)
+ Target.setMCUseCFI(false);
+
+ if (EnableDwarfDirectory)
+ Target.setMCUseDwarfDirectory(true);
+
+ if (GenerateSoftFloatCalls)
+ FloatABIForCalls = FloatABI::Soft;
+
+ // Disable .loc support for older OS X versions.
+ if (TheTriple.isMacOSX() &&
+ TheTriple.isMacOSXVersionLT(10, 6))
+ Target.setMCUseLoc(false);
+
+ // Figure out where we are going to send the output...
+ OwningPtr<tool_output_file> Out
+ (GetOutputStream(TheTarget->getName(), TheTriple.getOS(), argv[0]));
+ if (!Out) return 1;
+
+ // Build up all of the passes that we want to do to the module.
+ PassManager PM;
+
+ // Add the target data from the target machine, if it exists, or the module.
+ if (const TargetData *TD = Target.getTargetData())
+ PM.add(new TargetData(*TD));
+ else
+ PM.add(new TargetData(&mod));
+
+ // Override default to generate verbose assembly.
+ Target.setAsmVerbosityDefault(true);
+
+ if (RelaxAll) {
+ if (FileType != TargetMachine::CGFT_ObjectFile)
+ errs() << argv[0]
+ << ": warning: ignoring -mc-relax-all because filetype != obj";
+ else
+ Target.setMCRelaxAll(true);
+ }
+
+ {
+ formatted_raw_ostream FOS(Out->os());
+
+ // Ask the target to add backend passes as necessary.
+ if (Target.addPassesToEmitFile(PM, FOS, FileType, NoVerify)) {
+ errs() << argv[0] << ": target does not support generation of this"
+ << " file type!\n";
+ return 1;
+ }
+
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ PM.run(mod);
+ }
+
+ // Declare success.
+ Out->keep();
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/lli/lli.cpp b/contrib/llvm/tools/lli/lli.cpp
new file mode 100644
index 0000000..efcc1f5
--- /dev/null
+++ b/contrib/llvm/tools/lli/lli.cpp
@@ -0,0 +1,315 @@
+//===- lli.cpp - LLVM Interpreter / Dynamic compiler ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This utility provides a simple wrapper around the LLVM Execution Engines,
+// which allow the direct execution of LLVM programs through a Just-In-Time
+// compiler, or through an interpreter if no JIT is available for this platform.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/LinkAllCodegenComponents.h"
+#include "llvm/ExecutionEngine/GenericValue.h"
+#include "llvm/ExecutionEngine/Interpreter.h"
+#include "llvm/ExecutionEngine/JIT.h"
+#include "llvm/ExecutionEngine/JITEventListener.h"
+#include "llvm/ExecutionEngine/JITMemoryManager.h"
+#include "llvm/ExecutionEngine/MCJIT.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PluginLoader.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Process.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TargetSelect.h"
+#include <cerrno>
+
+#ifdef __CYGWIN__
+#include <cygwin/version.h>
+#if defined(CYGWIN_VERSION_DLL_MAJOR) && CYGWIN_VERSION_DLL_MAJOR<1007
+#define DO_NOTHING_ATEXIT 1
+#endif
+#endif
+
+using namespace llvm;
+
+namespace {
+ cl::opt<std::string>
+ InputFile(cl::desc("<input bitcode>"), cl::Positional, cl::init("-"));
+
+ cl::list<std::string>
+ InputArgv(cl::ConsumeAfter, cl::desc("<program arguments>..."));
+
+ cl::opt<bool> ForceInterpreter("force-interpreter",
+ cl::desc("Force interpretation: disable JIT"),
+ cl::init(false));
+
+ cl::opt<bool> UseMCJIT(
+ "use-mcjit", cl::desc("Enable use of the MC-based JIT (if available)"),
+ cl::init(false));
+
+ // Determine optimization level.
+ cl::opt<char>
+ OptLevel("O",
+ cl::desc("Optimization level. [-O0, -O1, -O2, or -O3] "
+ "(default = '-O2')"),
+ cl::Prefix,
+ cl::ZeroOrMore,
+ cl::init(' '));
+
+ cl::opt<std::string>
+ TargetTriple("mtriple", cl::desc("Override target triple for module"));
+
+ cl::opt<std::string>
+ MArch("march",
+ cl::desc("Architecture to generate assembly for (see --version)"));
+
+ cl::opt<std::string>
+ MCPU("mcpu",
+ cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+ cl::value_desc("cpu-name"),
+ cl::init(""));
+
+ cl::list<std::string>
+ MAttrs("mattr",
+ cl::CommaSeparated,
+ cl::desc("Target specific attributes (-mattr=help for details)"),
+ cl::value_desc("a1,+a2,-a3,..."));
+
+ cl::opt<std::string>
+ EntryFunc("entry-function",
+ cl::desc("Specify the entry function (default = 'main') "
+ "of the executable"),
+ cl::value_desc("function"),
+ cl::init("main"));
+
+ cl::opt<std::string>
+ FakeArgv0("fake-argv0",
+ cl::desc("Override the 'argv[0]' value passed into the executing"
+ " program"), cl::value_desc("executable"));
+
+ cl::opt<bool>
+ DisableCoreFiles("disable-core-files", cl::Hidden,
+ cl::desc("Disable emission of core files if possible"));
+
+ cl::opt<bool>
+ NoLazyCompilation("disable-lazy-compilation",
+ cl::desc("Disable JIT lazy compilation"),
+ cl::init(false));
+
+ cl::opt<Reloc::Model>
+ RelocModel("relocation-model",
+ cl::desc("Choose relocation model"),
+ cl::init(Reloc::Default),
+ cl::values(
+ clEnumValN(Reloc::Default, "default",
+ "Target default relocation model"),
+ clEnumValN(Reloc::Static, "static",
+ "Non-relocatable code"),
+ clEnumValN(Reloc::PIC_, "pic",
+ "Fully relocatable, position independent code"),
+ clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+ "Relocatable external references, non-relocatable code"),
+ clEnumValEnd));
+
+ cl::opt<llvm::CodeModel::Model>
+ CMModel("code-model",
+ cl::desc("Choose code model"),
+ cl::init(CodeModel::JITDefault),
+ cl::values(clEnumValN(CodeModel::JITDefault, "default",
+ "Target default JIT code model"),
+ clEnumValN(CodeModel::Small, "small",
+ "Small code model"),
+ clEnumValN(CodeModel::Kernel, "kernel",
+ "Kernel code model"),
+ clEnumValN(CodeModel::Medium, "medium",
+ "Medium code model"),
+ clEnumValN(CodeModel::Large, "large",
+ "Large code model"),
+ clEnumValEnd));
+
+}
+
+static ExecutionEngine *EE = 0;
+
+static void do_shutdown() {
+ // Cygwin-1.5 invokes DLL's dtors before atexit handler.
+#ifndef DO_NOTHING_ATEXIT
+ delete EE;
+ llvm_shutdown();
+#endif
+}
+
+//===----------------------------------------------------------------------===//
+// main Driver function
+//
+int main(int argc, char **argv, char * const *envp) {
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ LLVMContext &Context = getGlobalContext();
+ atexit(do_shutdown); // Call llvm_shutdown() on exit.
+
+ // If we have a native target, initialize it to ensure it is linked in and
+ // usable by the JIT.
+ InitializeNativeTarget();
+ InitializeNativeTargetAsmPrinter();
+
+ cl::ParseCommandLineOptions(argc, argv,
+ "llvm interpreter & dynamic compiler\n");
+
+ // If the user doesn't want core files, disable them.
+ if (DisableCoreFiles)
+ sys::Process::PreventCoreFiles();
+
+ // Load the bitcode...
+ SMDiagnostic Err;
+ Module *Mod = ParseIRFile(InputFile, Err, Context);
+ if (!Mod) {
+ Err.print(argv[0], errs());
+ return 1;
+ }
+
+ // If not jitting lazily, load the whole bitcode file eagerly too.
+ std::string ErrorMsg;
+ if (NoLazyCompilation) {
+ if (Mod->MaterializeAllPermanently(&ErrorMsg)) {
+ errs() << argv[0] << ": bitcode didn't read correctly.\n";
+ errs() << "Reason: " << ErrorMsg << "\n";
+ exit(1);
+ }
+ }
+
+ EngineBuilder builder(Mod);
+ builder.setMArch(MArch);
+ builder.setMCPU(MCPU);
+ builder.setMAttrs(MAttrs);
+ builder.setRelocationModel(RelocModel);
+ builder.setCodeModel(CMModel);
+ builder.setErrorStr(&ErrorMsg);
+ builder.setJITMemoryManager(ForceInterpreter ? 0 :
+ JITMemoryManager::CreateDefaultMemManager());
+ builder.setEngineKind(ForceInterpreter
+ ? EngineKind::Interpreter
+ : EngineKind::JIT);
+
+ // If we are supposed to override the target triple, do so now.
+ if (!TargetTriple.empty())
+ Mod->setTargetTriple(Triple::normalize(TargetTriple));
+
+ // Enable MCJIT if desired.
+ if (UseMCJIT && !ForceInterpreter) {
+ builder.setUseMCJIT(true);
+ builder.setJITMemoryManager(JITMemoryManager::CreateDefaultMemManager());
+ }
+
+ CodeGenOpt::Level OLvl = CodeGenOpt::Default;
+ switch (OptLevel) {
+ default:
+ errs() << argv[0] << ": invalid optimization level.\n";
+ return 1;
+ case ' ': break;
+ case '0': OLvl = CodeGenOpt::None; break;
+ case '1': OLvl = CodeGenOpt::Less; break;
+ case '2': OLvl = CodeGenOpt::Default; break;
+ case '3': OLvl = CodeGenOpt::Aggressive; break;
+ }
+ builder.setOptLevel(OLvl);
+
+ EE = builder.create();
+ if (!EE) {
+ if (!ErrorMsg.empty())
+ errs() << argv[0] << ": error creating EE: " << ErrorMsg << "\n";
+ else
+ errs() << argv[0] << ": unknown error creating EE!\n";
+ exit(1);
+ }
+
+ // The following functions have no effect if their respective profiling
+ // support wasn't enabled in the build configuration.
+ EE->RegisterJITEventListener(
+ JITEventListener::createOProfileJITEventListener());
+ EE->RegisterJITEventListener(
+ JITEventListener::createIntelJITEventListener());
+
+ EE->DisableLazyCompilation(NoLazyCompilation);
+
+ // If the user specifically requested an argv[0] to pass into the program,
+ // do it now.
+ if (!FakeArgv0.empty()) {
+ InputFile = FakeArgv0;
+ } else {
+ // Otherwise, if there is a .bc suffix on the executable strip it off, it
+ // might confuse the program.
+ if (StringRef(InputFile).endswith(".bc"))
+ InputFile.erase(InputFile.length() - 3);
+ }
+
+ // Add the module's name to the start of the vector of arguments to main().
+ InputArgv.insert(InputArgv.begin(), InputFile);
+
+ // Call the main function from M as if its signature were:
+ // int main (int argc, char **argv, const char **envp)
+ // using the contents of Args to determine argc & argv, and the contents of
+ // EnvVars to determine envp.
+ //
+ Function *EntryFn = Mod->getFunction(EntryFunc);
+ if (!EntryFn) {
+ errs() << '\'' << EntryFunc << "\' function not found in module.\n";
+ return -1;
+ }
+
+ // If the program doesn't explicitly call exit, we will need the Exit
+ // function later on to make an explicit call, so get the function now.
+ Constant *Exit = Mod->getOrInsertFunction("exit", Type::getVoidTy(Context),
+ Type::getInt32Ty(Context),
+ NULL);
+
+ // Reset errno to zero on entry to main.
+ errno = 0;
+
+ // Run static constructors.
+ EE->runStaticConstructorsDestructors(false);
+
+ if (NoLazyCompilation) {
+ for (Module::iterator I = Mod->begin(), E = Mod->end(); I != E; ++I) {
+ Function *Fn = &*I;
+ if (Fn != EntryFn && !Fn->isDeclaration())
+ EE->getPointerToFunction(Fn);
+ }
+ }
+
+ // Run main.
+ int Result = EE->runFunctionAsMain(EntryFn, InputArgv, envp);
+
+ // Run static destructors.
+ EE->runStaticConstructorsDestructors(true);
+
+ // If the program didn't call exit explicitly, we should call it now.
+ // This ensures that any atexit handlers get called correctly.
+ if (Function *ExitF = dyn_cast<Function>(Exit)) {
+ std::vector<GenericValue> Args;
+ GenericValue ResultGV;
+ ResultGV.IntVal = APInt(32, Result);
+ Args.push_back(ResultGV);
+ EE->runFunction(ExitF, Args);
+ errs() << "ERROR: exit(" << Result << ") returned!\n";
+ abort();
+ } else {
+ errs() << "ERROR: exit defined with wrong prototype!\n";
+ abort();
+ }
+}
diff --git a/contrib/llvm/tools/llvm-ar/llvm-ar.cpp b/contrib/llvm/tools/llvm-ar/llvm-ar.cpp
new file mode 100644
index 0000000..c1c8b24
--- /dev/null
+++ b/contrib/llvm/tools/llvm-ar/llvm-ar.cpp
@@ -0,0 +1,781 @@
+//===-- llvm-ar.cpp - LLVM archive librarian utility ----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Builds up (relatively) standard unix archive files (.a) containing LLVM
+// bitcode or other files.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Bitcode/Archive.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Signals.h"
+#include <algorithm>
+#include <memory>
+#include <fstream>
+using namespace llvm;
+
+// Option for compatibility with AIX, not used but must allow it to be present.
+static cl::opt<bool>
+X32Option ("X32_64", cl::Hidden,
+ cl::desc("Ignored option for compatibility with AIX"));
+
+// llvm-ar operation code and modifier flags. This must come first.
+static cl::opt<std::string>
+Options(cl::Positional, cl::Required, cl::desc("{operation}[modifiers]..."));
+
+// llvm-ar remaining positional arguments.
+static cl::list<std::string>
+RestOfArgs(cl::Positional, cl::OneOrMore,
+ cl::desc("[relpos] [count] <archive-file> [members]..."));
+
+// MoreHelp - Provide additional help output explaining the operations and
+// modifiers of llvm-ar. This object instructs the CommandLine library
+// to print the text of the constructor when the --help option is given.
+static cl::extrahelp MoreHelp(
+ "\nOPERATIONS:\n"
+ " d[NsS] - delete file(s) from the archive\n"
+ " m[abiSs] - move file(s) in the archive\n"
+ " p[kN] - print file(s) found in the archive\n"
+ " q[ufsS] - quick append file(s) to the archive\n"
+ " r[abfiuzRsS] - replace or insert file(s) into the archive\n"
+ " t - display contents of archive\n"
+ " x[No] - extract file(s) from the archive\n"
+ "\nMODIFIERS (operation specific):\n"
+ " [a] - put file(s) after [relpos]\n"
+ " [b] - put file(s) before [relpos] (same as [i])\n"
+ " [f] - truncate inserted file names\n"
+ " [i] - put file(s) before [relpos] (same as [b])\n"
+ " [k] - always print bitcode files (default is to skip them)\n"
+ " [N] - use instance [count] of name\n"
+ " [o] - preserve original dates\n"
+ " [P] - use full path names when matching\n"
+ " [R] - recurse through directories when inserting\n"
+ " [s] - create an archive index (cf. ranlib)\n"
+ " [S] - do not build a symbol table\n"
+ " [u] - update only files newer than archive contents\n"
+ " [z] - compress files before inserting/extracting\n"
+ "\nMODIFIERS (generic):\n"
+ " [c] - do not warn if the library had to be created\n"
+ " [v] - be verbose about actions taken\n"
+ " [V] - be *really* verbose about actions taken\n"
+);
+
+// This enumeration delineates the kinds of operations on an archive
+// that are permitted.
+enum ArchiveOperation {
+ NoOperation, ///< An operation hasn't been specified
+ Print, ///< Print the contents of the archive
+ Delete, ///< Delete the specified members
+ Move, ///< Move members to end or as given by {a,b,i} modifiers
+ QuickAppend, ///< Quickly append to end of archive
+ ReplaceOrInsert, ///< Replace or Insert members
+ DisplayTable, ///< Display the table of contents
+ Extract ///< Extract files back to file system
+};
+
+// Modifiers to follow operation to vary behavior
+bool AddAfter = false; ///< 'a' modifier
+bool AddBefore = false; ///< 'b' modifier
+bool Create = false; ///< 'c' modifier
+bool TruncateNames = false; ///< 'f' modifier
+bool InsertBefore = false; ///< 'i' modifier
+bool DontSkipBitcode = false; ///< 'k' modifier
+bool UseCount = false; ///< 'N' modifier
+bool OriginalDates = false; ///< 'o' modifier
+bool FullPath = false; ///< 'P' modifier
+bool RecurseDirectories = false; ///< 'R' modifier
+bool SymTable = true; ///< 's' & 'S' modifiers
+bool OnlyUpdate = false; ///< 'u' modifier
+bool Verbose = false; ///< 'v' modifier
+bool ReallyVerbose = false; ///< 'V' modifier
+bool Compression = false; ///< 'z' modifier
+
+// Relative Positional Argument (for insert/move). This variable holds
+// the name of the archive member to which the 'a', 'b' or 'i' modifier
+// refers. Only one of 'a', 'b' or 'i' can be specified so we only need
+// one variable.
+std::string RelPos;
+
+// Select which of multiple entries in the archive with the same name should be
+// used (specified with -N) for the delete and extract operations.
+int Count = 1;
+
+// This variable holds the name of the archive file as given on the
+// command line.
+std::string ArchiveName;
+
+// This variable holds the list of member files to proecess, as given
+// on the command line.
+std::vector<std::string> Members;
+
+// This variable holds the (possibly expanded) list of path objects that
+// correspond to files we will
+std::set<sys::Path> Paths;
+
+// The Archive object to which all the editing operations will be sent.
+Archive* TheArchive = 0;
+
+// getRelPos - Extract the member filename from the command line for
+// the [relpos] argument associated with a, b, and i modifiers
+void getRelPos() {
+ if(RestOfArgs.size() > 0) {
+ RelPos = RestOfArgs[0];
+ RestOfArgs.erase(RestOfArgs.begin());
+ }
+ else
+ throw "Expected [relpos] for a, b, or i modifier";
+}
+
+// getCount - Extract the [count] argument associated with the N modifier
+// from the command line and check its value.
+void getCount() {
+ if(RestOfArgs.size() > 0) {
+ Count = atoi(RestOfArgs[0].c_str());
+ RestOfArgs.erase(RestOfArgs.begin());
+ }
+ else
+ throw "Expected [count] value with N modifier";
+
+ // Non-positive counts are not allowed
+ if (Count < 1)
+ throw "Invalid [count] value (not a positive integer)";
+}
+
+// getArchive - Get the archive file name from the command line
+void getArchive() {
+ if(RestOfArgs.size() > 0) {
+ ArchiveName = RestOfArgs[0];
+ RestOfArgs.erase(RestOfArgs.begin());
+ }
+ else
+ throw "An archive name must be specified.";
+}
+
+// getMembers - Copy over remaining items in RestOfArgs to our Members vector
+// This is just for clarity.
+void getMembers() {
+ if(RestOfArgs.size() > 0)
+ Members = std::vector<std::string>(RestOfArgs);
+}
+
+// parseCommandLine - Parse the command line options as presented and return the
+// operation specified. Process all modifiers and check to make sure that
+// constraints on modifier/operation pairs have not been violated.
+ArchiveOperation parseCommandLine() {
+
+ // Keep track of number of operations. We can only specify one
+ // per execution.
+ unsigned NumOperations = 0;
+
+ // Keep track of the number of positional modifiers (a,b,i). Only
+ // one can be specified.
+ unsigned NumPositional = 0;
+
+ // Keep track of which operation was requested
+ ArchiveOperation Operation = NoOperation;
+
+ for(unsigned i=0; i<Options.size(); ++i) {
+ switch(Options[i]) {
+ case 'd': ++NumOperations; Operation = Delete; break;
+ case 'm': ++NumOperations; Operation = Move ; break;
+ case 'p': ++NumOperations; Operation = Print; break;
+ case 'q': ++NumOperations; Operation = QuickAppend; break;
+ case 'r': ++NumOperations; Operation = ReplaceOrInsert; break;
+ case 't': ++NumOperations; Operation = DisplayTable; break;
+ case 'x': ++NumOperations; Operation = Extract; break;
+ case 'c': Create = true; break;
+ case 'f': TruncateNames = true; break;
+ case 'k': DontSkipBitcode = true; break;
+ case 'l': /* accepted but unused */ break;
+ case 'o': OriginalDates = true; break;
+ case 'P': FullPath = true; break;
+ case 'R': RecurseDirectories = true; break;
+ case 's': SymTable = true; break;
+ case 'S': SymTable = false; break;
+ case 'u': OnlyUpdate = true; break;
+ case 'v': Verbose = true; break;
+ case 'V': Verbose = ReallyVerbose = true; break;
+ case 'z': Compression = true; break;
+ case 'a':
+ getRelPos();
+ AddAfter = true;
+ NumPositional++;
+ break;
+ case 'b':
+ getRelPos();
+ AddBefore = true;
+ NumPositional++;
+ break;
+ case 'i':
+ getRelPos();
+ InsertBefore = true;
+ NumPositional++;
+ break;
+ case 'N':
+ getCount();
+ UseCount = true;
+ break;
+ default:
+ cl::PrintHelpMessage();
+ }
+ }
+
+ // At this point, the next thing on the command line must be
+ // the archive name.
+ getArchive();
+
+ // Everything on the command line at this point is a member.
+ getMembers();
+
+ // Perform various checks on the operation/modifier specification
+ // to make sure we are dealing with a legal request.
+ if (NumOperations == 0)
+ throw "You must specify at least one of the operations";
+ if (NumOperations > 1)
+ throw "Only one operation may be specified";
+ if (NumPositional > 1)
+ throw "You may only specify one of a, b, and i modifiers";
+ if (AddAfter || AddBefore || InsertBefore)
+ if (Operation != Move && Operation != ReplaceOrInsert)
+ throw "The 'a', 'b' and 'i' modifiers can only be specified with "
+ "the 'm' or 'r' operations";
+ if (RecurseDirectories && Operation != ReplaceOrInsert)
+ throw "The 'R' modifiers is only applicabe to the 'r' operation";
+ if (OriginalDates && Operation != Extract)
+ throw "The 'o' modifier is only applicable to the 'x' operation";
+ if (TruncateNames && Operation!=QuickAppend && Operation!=ReplaceOrInsert)
+ throw "The 'f' modifier is only applicable to the 'q' and 'r' operations";
+ if (OnlyUpdate && Operation != ReplaceOrInsert)
+ throw "The 'u' modifier is only applicable to the 'r' operation";
+ if (Compression && Operation!=ReplaceOrInsert && Operation!=Extract)
+ throw "The 'z' modifier is only applicable to the 'r' and 'x' operations";
+ if (Count > 1 && Members.size() > 1)
+ throw "Only one member name may be specified with the 'N' modifier";
+
+ // Return the parsed operation to the caller
+ return Operation;
+}
+
+// recurseDirectories - Implements the "R" modifier. This function scans through
+// the Paths vector (built by buildPaths, below) and replaces any directories it
+// finds with all the files in that directory (recursively). It uses the
+// sys::Path::getDirectoryContent method to perform the actual directory scans.
+bool
+recurseDirectories(const sys::Path& path,
+ std::set<sys::Path>& result, std::string* ErrMsg) {
+ result.clear();
+ if (RecurseDirectories) {
+ std::set<sys::Path> content;
+ if (path.getDirectoryContents(content, ErrMsg))
+ return true;
+
+ for (std::set<sys::Path>::iterator I = content.begin(), E = content.end();
+ I != E; ++I) {
+ // Make sure it exists and is a directory
+ sys::PathWithStatus PwS(*I);
+ const sys::FileStatus *Status = PwS.getFileStatus(false, ErrMsg);
+ if (!Status)
+ return true;
+ if (Status->isDir) {
+ std::set<sys::Path> moreResults;
+ if (recurseDirectories(*I, moreResults, ErrMsg))
+ return true;
+ result.insert(moreResults.begin(), moreResults.end());
+ } else {
+ result.insert(*I);
+ }
+ }
+ }
+ return false;
+}
+
+// buildPaths - Convert the strings in the Members vector to sys::Path objects
+// and make sure they are valid and exist exist. This check is only needed for
+// the operations that add/replace files to the archive ('q' and 'r')
+bool buildPaths(bool checkExistence, std::string* ErrMsg) {
+ for (unsigned i = 0; i < Members.size(); i++) {
+ sys::Path aPath;
+ if (!aPath.set(Members[i]))
+ throw std::string("File member name invalid: ") + Members[i];
+ if (checkExistence) {
+ bool Exists;
+ if (sys::fs::exists(aPath.str(), Exists) || !Exists)
+ throw std::string("File does not exist: ") + Members[i];
+ std::string Err;
+ sys::PathWithStatus PwS(aPath);
+ const sys::FileStatus *si = PwS.getFileStatus(false, &Err);
+ if (!si)
+ throw Err;
+ if (si->isDir) {
+ std::set<sys::Path> dirpaths;
+ if (recurseDirectories(aPath, dirpaths, ErrMsg))
+ return true;
+ Paths.insert(dirpaths.begin(),dirpaths.end());
+ } else {
+ Paths.insert(aPath);
+ }
+ } else {
+ Paths.insert(aPath);
+ }
+ }
+ return false;
+}
+
+// printSymbolTable - print out the archive's symbol table.
+void printSymbolTable() {
+ outs() << "\nArchive Symbol Table:\n";
+ const Archive::SymTabType& symtab = TheArchive->getSymbolTable();
+ for (Archive::SymTabType::const_iterator I=symtab.begin(), E=symtab.end();
+ I != E; ++I ) {
+ unsigned offset = TheArchive->getFirstFileOffset() + I->second;
+ outs() << " " << format("%9u", offset) << "\t" << I->first <<"\n";
+ }
+}
+
+// doPrint - Implements the 'p' operation. This function traverses the archive
+// looking for members that match the path list. It is careful to uncompress
+// things that should be and to skip bitcode files unless the 'k' modifier was
+// given.
+bool doPrint(std::string* ErrMsg) {
+ if (buildPaths(false, ErrMsg))
+ return true;
+ unsigned countDown = Count;
+ for (Archive::iterator I = TheArchive->begin(), E = TheArchive->end();
+ I != E; ++I ) {
+ if (Paths.empty() ||
+ (std::find(Paths.begin(), Paths.end(), I->getPath()) != Paths.end())) {
+ if (countDown == 1) {
+ const char* data = reinterpret_cast<const char*>(I->getData());
+
+ // Skip things that don't make sense to print
+ if (I->isLLVMSymbolTable() || I->isSVR4SymbolTable() ||
+ I->isBSD4SymbolTable() || (!DontSkipBitcode && I->isBitcode()))
+ continue;
+
+ if (Verbose)
+ outs() << "Printing " << I->getPath().str() << "\n";
+
+ unsigned len = I->getSize();
+ outs().write(data, len);
+ } else {
+ countDown--;
+ }
+ }
+ }
+ return false;
+}
+
+// putMode - utility function for printing out the file mode when the 't'
+// operation is in verbose mode.
+void
+printMode(unsigned mode) {
+ if (mode & 004)
+ outs() << "r";
+ else
+ outs() << "-";
+ if (mode & 002)
+ outs() << "w";
+ else
+ outs() << "-";
+ if (mode & 001)
+ outs() << "x";
+ else
+ outs() << "-";
+}
+
+// doDisplayTable - Implement the 't' operation. This function prints out just
+// the file names of each of the members. However, if verbose mode is requested
+// ('v' modifier) then the file type, permission mode, user, group, size, and
+// modification time are also printed.
+bool
+doDisplayTable(std::string* ErrMsg) {
+ if (buildPaths(false, ErrMsg))
+ return true;
+ for (Archive::iterator I = TheArchive->begin(), E = TheArchive->end();
+ I != E; ++I ) {
+ if (Paths.empty() ||
+ (std::find(Paths.begin(), Paths.end(), I->getPath()) != Paths.end())) {
+ if (Verbose) {
+ // FIXME: Output should be this format:
+ // Zrw-r--r-- 500/ 500 525 Nov 8 17:42 2004 Makefile
+ if (I->isBitcode())
+ outs() << "b";
+ else if (I->isCompressed())
+ outs() << "Z";
+ else
+ outs() << " ";
+ unsigned mode = I->getMode();
+ printMode((mode >> 6) & 007);
+ printMode((mode >> 3) & 007);
+ printMode(mode & 007);
+ outs() << " " << format("%4u", I->getUser());
+ outs() << "/" << format("%4u", I->getGroup());
+ outs() << " " << format("%8u", I->getSize());
+ outs() << " " << format("%20s", I->getModTime().str().substr(4).c_str());
+ outs() << " " << I->getPath().str() << "\n";
+ } else {
+ outs() << I->getPath().str() << "\n";
+ }
+ }
+ }
+ if (ReallyVerbose)
+ printSymbolTable();
+ return false;
+}
+
+// doExtract - Implement the 'x' operation. This function extracts files back to
+// the file system, making sure to uncompress any that were compressed
+bool
+doExtract(std::string* ErrMsg) {
+ if (buildPaths(false, ErrMsg))
+ return true;
+ for (Archive::iterator I = TheArchive->begin(), E = TheArchive->end();
+ I != E; ++I ) {
+ if (Paths.empty() ||
+ (std::find(Paths.begin(), Paths.end(), I->getPath()) != Paths.end())) {
+
+ // Make sure the intervening directories are created
+ if (I->hasPath()) {
+ sys::Path dirs(I->getPath());
+ dirs.eraseComponent();
+ if (dirs.createDirectoryOnDisk(/*create_parents=*/true, ErrMsg))
+ return true;
+ }
+
+ // Open up a file stream for writing
+ std::ios::openmode io_mode = std::ios::out | std::ios::trunc |
+ std::ios::binary;
+ std::ofstream file(I->getPath().c_str(), io_mode);
+
+ // Get the data and its length
+ const char* data = reinterpret_cast<const char*>(I->getData());
+ unsigned len = I->getSize();
+
+ // Write the data.
+ file.write(data,len);
+ file.close();
+
+ // If we're supposed to retain the original modification times, etc. do so
+ // now.
+ if (OriginalDates)
+ I->getPath().setStatusInfoOnDisk(I->getFileStatus());
+ }
+ }
+ return false;
+}
+
+// doDelete - Implement the delete operation. This function deletes zero or more
+// members from the archive. Note that if the count is specified, there should
+// be no more than one path in the Paths list or else this algorithm breaks.
+// That check is enforced in parseCommandLine (above).
+bool
+doDelete(std::string* ErrMsg) {
+ if (buildPaths(false, ErrMsg))
+ return true;
+ if (Paths.empty())
+ return false;
+ unsigned countDown = Count;
+ for (Archive::iterator I = TheArchive->begin(), E = TheArchive->end();
+ I != E; ) {
+ if (std::find(Paths.begin(), Paths.end(), I->getPath()) != Paths.end()) {
+ if (countDown == 1) {
+ Archive::iterator J = I;
+ ++I;
+ TheArchive->erase(J);
+ } else
+ countDown--;
+ } else {
+ ++I;
+ }
+ }
+
+ // We're done editting, reconstruct the archive.
+ if (TheArchive->writeToDisk(SymTable,TruncateNames,Compression,ErrMsg))
+ return true;
+ if (ReallyVerbose)
+ printSymbolTable();
+ return false;
+}
+
+// doMore - Implement the move operation. This function re-arranges just the
+// order of the archive members so that when the archive is written the move
+// of the members is accomplished. Note the use of the RelPos variable to
+// determine where the items should be moved to.
+bool
+doMove(std::string* ErrMsg) {
+ if (buildPaths(false, ErrMsg))
+ return true;
+
+ // By default and convention the place to move members to is the end of the
+ // archive.
+ Archive::iterator moveto_spot = TheArchive->end();
+
+ // However, if the relative positioning modifiers were used, we need to scan
+ // the archive to find the member in question. If we don't find it, its no
+ // crime, we just move to the end.
+ if (AddBefore || InsertBefore || AddAfter) {
+ for (Archive::iterator I = TheArchive->begin(), E= TheArchive->end();
+ I != E; ++I ) {
+ if (RelPos == I->getPath().str()) {
+ if (AddAfter) {
+ moveto_spot = I;
+ moveto_spot++;
+ } else {
+ moveto_spot = I;
+ }
+ break;
+ }
+ }
+ }
+
+ // Keep a list of the paths remaining to be moved
+ std::set<sys::Path> remaining(Paths);
+
+ // Scan the archive again, this time looking for the members to move to the
+ // moveto_spot.
+ for (Archive::iterator I = TheArchive->begin(), E= TheArchive->end();
+ I != E && !remaining.empty(); ++I ) {
+ std::set<sys::Path>::iterator found =
+ std::find(remaining.begin(),remaining.end(),I->getPath());
+ if (found != remaining.end()) {
+ if (I != moveto_spot)
+ TheArchive->splice(moveto_spot,*TheArchive,I);
+ remaining.erase(found);
+ }
+ }
+
+ // We're done editting, reconstruct the archive.
+ if (TheArchive->writeToDisk(SymTable,TruncateNames,Compression,ErrMsg))
+ return true;
+ if (ReallyVerbose)
+ printSymbolTable();
+ return false;
+}
+
+// doQuickAppend - Implements the 'q' operation. This function just
+// indiscriminantly adds the members to the archive and rebuilds it.
+bool
+doQuickAppend(std::string* ErrMsg) {
+ // Get the list of paths to append.
+ if (buildPaths(true, ErrMsg))
+ return true;
+ if (Paths.empty())
+ return false;
+
+ // Append them quickly.
+ for (std::set<sys::Path>::iterator PI = Paths.begin(), PE = Paths.end();
+ PI != PE; ++PI) {
+ if (TheArchive->addFileBefore(*PI,TheArchive->end(),ErrMsg))
+ return true;
+ }
+
+ // We're done editting, reconstruct the archive.
+ if (TheArchive->writeToDisk(SymTable,TruncateNames,Compression,ErrMsg))
+ return true;
+ if (ReallyVerbose)
+ printSymbolTable();
+ return false;
+}
+
+// doReplaceOrInsert - Implements the 'r' operation. This function will replace
+// any existing files or insert new ones into the archive.
+bool
+doReplaceOrInsert(std::string* ErrMsg) {
+
+ // Build the list of files to be added/replaced.
+ if (buildPaths(true, ErrMsg))
+ return true;
+ if (Paths.empty())
+ return false;
+
+ // Keep track of the paths that remain to be inserted.
+ std::set<sys::Path> remaining(Paths);
+
+ // Default the insertion spot to the end of the archive
+ Archive::iterator insert_spot = TheArchive->end();
+
+ // Iterate over the archive contents
+ for (Archive::iterator I = TheArchive->begin(), E = TheArchive->end();
+ I != E && !remaining.empty(); ++I ) {
+
+ // Determine if this archive member matches one of the paths we're trying
+ // to replace.
+
+ std::set<sys::Path>::iterator found = remaining.end();
+ for (std::set<sys::Path>::iterator RI = remaining.begin(),
+ RE = remaining.end(); RI != RE; ++RI ) {
+ std::string compare(RI->str());
+ if (TruncateNames && compare.length() > 15) {
+ const char* nm = compare.c_str();
+ unsigned len = compare.length();
+ size_t slashpos = compare.rfind('/');
+ if (slashpos != std::string::npos) {
+ nm += slashpos + 1;
+ len -= slashpos +1;
+ }
+ if (len > 15)
+ len = 15;
+ compare.assign(nm,len);
+ }
+ if (compare == I->getPath().str()) {
+ found = RI;
+ break;
+ }
+ }
+
+ if (found != remaining.end()) {
+ std::string Err;
+ sys::PathWithStatus PwS(*found);
+ const sys::FileStatus *si = PwS.getFileStatus(false, &Err);
+ if (!si)
+ return true;
+ if (!si->isDir) {
+ if (OnlyUpdate) {
+ // Replace the item only if it is newer.
+ if (si->modTime > I->getModTime())
+ if (I->replaceWith(*found, ErrMsg))
+ return true;
+ } else {
+ // Replace the item regardless of time stamp
+ if (I->replaceWith(*found, ErrMsg))
+ return true;
+ }
+ } else {
+ // We purposefully ignore directories.
+ }
+
+ // Remove it from our "to do" list
+ remaining.erase(found);
+ }
+
+ // Determine if this is the place where we should insert
+ if ((AddBefore || InsertBefore) && RelPos == I->getPath().str())
+ insert_spot = I;
+ else if (AddAfter && RelPos == I->getPath().str()) {
+ insert_spot = I;
+ insert_spot++;
+ }
+ }
+
+ // If we didn't replace all the members, some will remain and need to be
+ // inserted at the previously computed insert-spot.
+ if (!remaining.empty()) {
+ for (std::set<sys::Path>::iterator PI = remaining.begin(),
+ PE = remaining.end(); PI != PE; ++PI) {
+ if (TheArchive->addFileBefore(*PI,insert_spot, ErrMsg))
+ return true;
+ }
+ }
+
+ // We're done editting, reconstruct the archive.
+ if (TheArchive->writeToDisk(SymTable,TruncateNames,Compression,ErrMsg))
+ return true;
+ if (ReallyVerbose)
+ printSymbolTable();
+ return false;
+}
+
+// main - main program for llvm-ar .. see comments in the code
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ // Have the command line options parsed and handle things
+ // like --help and --version.
+ cl::ParseCommandLineOptions(argc, argv,
+ "LLVM Archiver (llvm-ar)\n\n"
+ " This program archives bitcode files into single libraries\n"
+ );
+
+ int exitCode = 0;
+
+ // Make sure we don't exit with "unhandled exception".
+ try {
+ // Do our own parsing of the command line because the CommandLine utility
+ // can't handle the grouped positional parameters without a dash.
+ ArchiveOperation Operation = parseCommandLine();
+
+ // Check the path name of the archive
+ sys::Path ArchivePath;
+ if (!ArchivePath.set(ArchiveName))
+ throw std::string("Archive name invalid: ") + ArchiveName;
+
+ // Create or open the archive object.
+ bool Exists;
+ if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists) {
+ // Produce a warning if we should and we're creating the archive
+ if (!Create)
+ errs() << argv[0] << ": creating " << ArchivePath.str() << "\n";
+ TheArchive = Archive::CreateEmpty(ArchivePath, Context);
+ TheArchive->writeToDisk();
+ } else {
+ std::string Error;
+ TheArchive = Archive::OpenAndLoad(ArchivePath, Context, &Error);
+ if (TheArchive == 0) {
+ errs() << argv[0] << ": error loading '" << ArchivePath.str() << "': "
+ << Error << "!\n";
+ return 1;
+ }
+ }
+
+ // Make sure we're not fooling ourselves.
+ assert(TheArchive && "Unable to instantiate the archive");
+
+ // Make sure we clean up the archive even on failure.
+ std::auto_ptr<Archive> AutoArchive(TheArchive);
+
+ // Perform the operation
+ std::string ErrMsg;
+ bool haveError = false;
+ switch (Operation) {
+ case Print: haveError = doPrint(&ErrMsg); break;
+ case Delete: haveError = doDelete(&ErrMsg); break;
+ case Move: haveError = doMove(&ErrMsg); break;
+ case QuickAppend: haveError = doQuickAppend(&ErrMsg); break;
+ case ReplaceOrInsert: haveError = doReplaceOrInsert(&ErrMsg); break;
+ case DisplayTable: haveError = doDisplayTable(&ErrMsg); break;
+ case Extract: haveError = doExtract(&ErrMsg); break;
+ case NoOperation:
+ errs() << argv[0] << ": No operation was selected.\n";
+ break;
+ }
+ if (haveError) {
+ errs() << argv[0] << ": " << ErrMsg << "\n";
+ return 1;
+ }
+ } catch (const char*msg) {
+ // These errors are usage errors, thrown only by the various checks in the
+ // code above.
+ errs() << argv[0] << ": " << msg << "\n\n";
+ cl::PrintHelpMessage();
+ exitCode = 1;
+ } catch (const std::string& msg) {
+ // These errors are thrown by LLVM libraries (e.g. lib System) and represent
+ // a more serious error so we bump the exitCode and don't print the usage.
+ errs() << argv[0] << ": " << msg << "\n";
+ exitCode = 2;
+ } catch (...) {
+ // This really shouldn't happen, but just in case ....
+ errs() << argv[0] << ": An unexpected unknown exception occurred.\n";
+ exitCode = 3;
+ }
+
+ // Return result code back to operating system.
+ return exitCode;
+}
diff --git a/contrib/llvm/tools/llvm-as/llvm-as.cpp b/contrib/llvm/tools/llvm-as/llvm-as.cpp
new file mode 100644
index 0000000..1def9a4
--- /dev/null
+++ b/contrib/llvm/tools/llvm-as/llvm-as.cpp
@@ -0,0 +1,119 @@
+//===--- llvm-as.cpp - The low-level LLVM assembler -----------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This utility may be invoked in the following manner:
+// llvm-as --help - Output information about command line switches
+// llvm-as [options] - Read LLVM asm from stdin, write bitcode to stdout
+// llvm-as [options] x.ll - Read LLVM asm from the x.ll file, write bitcode
+// to the x.bc file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Assembly/Parser.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Signals.h"
+#include <memory>
+using namespace llvm;
+
+static cl::opt<std::string>
+InputFilename(cl::Positional, cl::desc("<input .llvm file>"), cl::init("-"));
+
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Override output filename"),
+ cl::value_desc("filename"));
+
+static cl::opt<bool>
+Force("f", cl::desc("Enable binary output on terminals"));
+
+static cl::opt<bool>
+DisableOutput("disable-output", cl::desc("Disable output"), cl::init(false));
+
+static cl::opt<bool>
+DumpAsm("d", cl::desc("Print assembly as parsed"), cl::Hidden);
+
+static cl::opt<bool>
+DisableVerify("disable-verify", cl::Hidden,
+ cl::desc("Do not run verifier on input LLVM (dangerous!)"));
+
+static void WriteOutputFile(const Module *M) {
+ // Infer the output filename if needed.
+ if (OutputFilename.empty()) {
+ if (InputFilename == "-") {
+ OutputFilename = "-";
+ } else {
+ std::string IFN = InputFilename;
+ int Len = IFN.length();
+ if (IFN[Len-3] == '.' && IFN[Len-2] == 'l' && IFN[Len-1] == 'l') {
+ // Source ends in .ll
+ OutputFilename = std::string(IFN.begin(), IFN.end()-3);
+ } else {
+ OutputFilename = IFN; // Append a .bc to it
+ }
+ OutputFilename += ".bc";
+ }
+ }
+
+ std::string ErrorInfo;
+ OwningPtr<tool_output_file> Out
+ (new tool_output_file(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary));
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ exit(1);
+ }
+
+ if (Force || !CheckBitcodeOutputToConsole(Out->os(), true))
+ WriteBitcodeToFile(M, Out->os());
+
+ // Declare success.
+ Out->keep();
+}
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+ cl::ParseCommandLineOptions(argc, argv, "llvm .ll -> .bc assembler\n");
+
+ // Parse the file now...
+ SMDiagnostic Err;
+ std::auto_ptr<Module> M(ParseAssemblyFile(InputFilename, Err, Context));
+ if (M.get() == 0) {
+ Err.print(argv[0], errs());
+ return 1;
+ }
+
+ if (!DisableVerify) {
+ std::string Err;
+ if (verifyModule(*M.get(), ReturnStatusAction, &Err)) {
+ errs() << argv[0]
+ << ": assembly parsed, but does not verify as correct!\n";
+ errs() << Err;
+ return 1;
+ }
+ }
+
+ if (DumpAsm) errs() << "Here's the assembly:\n" << *M.get();
+
+ if (!DisableOutput)
+ WriteOutputFile(M.get());
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp b/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
new file mode 100644
index 0000000..d630087
--- /dev/null
+++ b/contrib/llvm/tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp
@@ -0,0 +1,626 @@
+//===-- llvm-bcanalyzer.cpp - Bitcode Analyzer --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tool may be invoked in the following manner:
+// llvm-bcanalyzer [options] - Read LLVM bitcode from stdin
+// llvm-bcanalyzer [options] x.bc - Read LLVM bitcode from the x.bc file
+//
+// Options:
+// --help - Output information about command line switches
+// --dump - Dump low-level bitcode structure in readable format
+//
+// This tool provides analytical information about a bitcode file. It is
+// intended as an aid to developers of bitcode reading and writing software. It
+// produces on std::out a summary of the bitcode file that shows various
+// statistics about the contents of the file. By default this information is
+// detailed and contains information about individual bitcode blocks and the
+// functions in the module.
+// The tool is also able to print a bitcode file in a straight forward text
+// format that shows the containment and relationships of the information in
+// the bitcode file (-dump option).
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Bitcode/BitstreamReader.h"
+#include "llvm/Bitcode/LLVMBitCodes.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
+#include <cstdio>
+#include <map>
+#include <algorithm>
+using namespace llvm;
+
+static cl::opt<std::string>
+ InputFilename(cl::Positional, cl::desc("<input bitcode>"), cl::init("-"));
+
+static cl::opt<bool> Dump("dump", cl::desc("Dump low level bitcode trace"));
+
+//===----------------------------------------------------------------------===//
+// Bitcode specific analysis.
+//===----------------------------------------------------------------------===//
+
+static cl::opt<bool> NoHistogram("disable-histogram",
+ cl::desc("Do not print per-code histogram"));
+
+static cl::opt<bool>
+NonSymbolic("non-symbolic",
+ cl::desc("Emit numeric info in dump even if"
+ " symbolic info is available"));
+
+namespace {
+
+/// CurStreamTypeType - A type for CurStreamType
+enum CurStreamTypeType {
+ UnknownBitstream,
+ LLVMIRBitstream
+};
+
+}
+
+/// CurStreamType - If we can sniff the flavor of this stream, we can produce
+/// better dump info.
+static CurStreamTypeType CurStreamType;
+
+
+/// GetBlockName - Return a symbolic block name if known, otherwise return
+/// null.
+static const char *GetBlockName(unsigned BlockID,
+ const BitstreamReader &StreamFile) {
+ // Standard blocks for all bitcode files.
+ if (BlockID < bitc::FIRST_APPLICATION_BLOCKID) {
+ if (BlockID == bitc::BLOCKINFO_BLOCK_ID)
+ return "BLOCKINFO_BLOCK";
+ return 0;
+ }
+
+ // Check to see if we have a blockinfo record for this block, with a name.
+ if (const BitstreamReader::BlockInfo *Info =
+ StreamFile.getBlockInfo(BlockID)) {
+ if (!Info->Name.empty())
+ return Info->Name.c_str();
+ }
+
+
+ if (CurStreamType != LLVMIRBitstream) return 0;
+
+ switch (BlockID) {
+ default: return 0;
+ case bitc::MODULE_BLOCK_ID: return "MODULE_BLOCK";
+ case bitc::PARAMATTR_BLOCK_ID: return "PARAMATTR_BLOCK";
+ case bitc::TYPE_BLOCK_ID_NEW: return "TYPE_BLOCK_ID";
+ case bitc::CONSTANTS_BLOCK_ID: return "CONSTANTS_BLOCK";
+ case bitc::FUNCTION_BLOCK_ID: return "FUNCTION_BLOCK";
+ case bitc::VALUE_SYMTAB_BLOCK_ID: return "VALUE_SYMTAB";
+ case bitc::METADATA_BLOCK_ID: return "METADATA_BLOCK";
+ case bitc::METADATA_ATTACHMENT_ID: return "METADATA_ATTACHMENT_BLOCK";
+ case bitc::USELIST_BLOCK_ID: return "USELIST_BLOCK_ID";
+ }
+}
+
+/// GetCodeName - Return a symbolic code name if known, otherwise return
+/// null.
+static const char *GetCodeName(unsigned CodeID, unsigned BlockID,
+ const BitstreamReader &StreamFile) {
+ // Standard blocks for all bitcode files.
+ if (BlockID < bitc::FIRST_APPLICATION_BLOCKID) {
+ if (BlockID == bitc::BLOCKINFO_BLOCK_ID) {
+ switch (CodeID) {
+ default: return 0;
+ case bitc::BLOCKINFO_CODE_SETBID: return "SETBID";
+ case bitc::BLOCKINFO_CODE_BLOCKNAME: return "BLOCKNAME";
+ case bitc::BLOCKINFO_CODE_SETRECORDNAME: return "SETRECORDNAME";
+ }
+ }
+ return 0;
+ }
+
+ // Check to see if we have a blockinfo record for this record, with a name.
+ if (const BitstreamReader::BlockInfo *Info =
+ StreamFile.getBlockInfo(BlockID)) {
+ for (unsigned i = 0, e = Info->RecordNames.size(); i != e; ++i)
+ if (Info->RecordNames[i].first == CodeID)
+ return Info->RecordNames[i].second.c_str();
+ }
+
+
+ if (CurStreamType != LLVMIRBitstream) return 0;
+
+ switch (BlockID) {
+ default: return 0;
+ case bitc::MODULE_BLOCK_ID:
+ switch (CodeID) {
+ default: return 0;
+ case bitc::MODULE_CODE_VERSION: return "VERSION";
+ case bitc::MODULE_CODE_TRIPLE: return "TRIPLE";
+ case bitc::MODULE_CODE_DATALAYOUT: return "DATALAYOUT";
+ case bitc::MODULE_CODE_ASM: return "ASM";
+ case bitc::MODULE_CODE_SECTIONNAME: return "SECTIONNAME";
+ case bitc::MODULE_CODE_DEPLIB: return "DEPLIB";
+ case bitc::MODULE_CODE_GLOBALVAR: return "GLOBALVAR";
+ case bitc::MODULE_CODE_FUNCTION: return "FUNCTION";
+ case bitc::MODULE_CODE_ALIAS: return "ALIAS";
+ case bitc::MODULE_CODE_PURGEVALS: return "PURGEVALS";
+ case bitc::MODULE_CODE_GCNAME: return "GCNAME";
+ }
+ case bitc::PARAMATTR_BLOCK_ID:
+ switch (CodeID) {
+ default: return 0;
+ case bitc::PARAMATTR_CODE_ENTRY: return "ENTRY";
+ }
+ case bitc::TYPE_BLOCK_ID_NEW:
+ switch (CodeID) {
+ default: return 0;
+ case bitc::TYPE_CODE_NUMENTRY: return "NUMENTRY";
+ case bitc::TYPE_CODE_VOID: return "VOID";
+ case bitc::TYPE_CODE_FLOAT: return "FLOAT";
+ case bitc::TYPE_CODE_DOUBLE: return "DOUBLE";
+ case bitc::TYPE_CODE_LABEL: return "LABEL";
+ case bitc::TYPE_CODE_OPAQUE: return "OPAQUE";
+ case bitc::TYPE_CODE_INTEGER: return "INTEGER";
+ case bitc::TYPE_CODE_POINTER: return "POINTER";
+ case bitc::TYPE_CODE_ARRAY: return "ARRAY";
+ case bitc::TYPE_CODE_VECTOR: return "VECTOR";
+ case bitc::TYPE_CODE_X86_FP80: return "X86_FP80";
+ case bitc::TYPE_CODE_FP128: return "FP128";
+ case bitc::TYPE_CODE_PPC_FP128: return "PPC_FP128";
+ case bitc::TYPE_CODE_METADATA: return "METADATA";
+ case bitc::TYPE_CODE_STRUCT_ANON: return "STRUCT_ANON";
+ case bitc::TYPE_CODE_STRUCT_NAME: return "STRUCT_NAME";
+ case bitc::TYPE_CODE_STRUCT_NAMED: return "STRUCT_NAMED";
+ case bitc::TYPE_CODE_FUNCTION: return "FUNCTION";
+ }
+
+ case bitc::CONSTANTS_BLOCK_ID:
+ switch (CodeID) {
+ default: return 0;
+ case bitc::CST_CODE_SETTYPE: return "SETTYPE";
+ case bitc::CST_CODE_NULL: return "NULL";
+ case bitc::CST_CODE_UNDEF: return "UNDEF";
+ case bitc::CST_CODE_INTEGER: return "INTEGER";
+ case bitc::CST_CODE_WIDE_INTEGER: return "WIDE_INTEGER";
+ case bitc::CST_CODE_FLOAT: return "FLOAT";
+ case bitc::CST_CODE_AGGREGATE: return "AGGREGATE";
+ case bitc::CST_CODE_STRING: return "STRING";
+ case bitc::CST_CODE_CSTRING: return "CSTRING";
+ case bitc::CST_CODE_CE_BINOP: return "CE_BINOP";
+ case bitc::CST_CODE_CE_CAST: return "CE_CAST";
+ case bitc::CST_CODE_CE_GEP: return "CE_GEP";
+ case bitc::CST_CODE_CE_INBOUNDS_GEP: return "CE_INBOUNDS_GEP";
+ case bitc::CST_CODE_CE_SELECT: return "CE_SELECT";
+ case bitc::CST_CODE_CE_EXTRACTELT: return "CE_EXTRACTELT";
+ case bitc::CST_CODE_CE_INSERTELT: return "CE_INSERTELT";
+ case bitc::CST_CODE_CE_SHUFFLEVEC: return "CE_SHUFFLEVEC";
+ case bitc::CST_CODE_CE_CMP: return "CE_CMP";
+ case bitc::CST_CODE_INLINEASM: return "INLINEASM";
+ case bitc::CST_CODE_CE_SHUFVEC_EX: return "CE_SHUFVEC_EX";
+ case bitc::CST_CODE_BLOCKADDRESS: return "CST_CODE_BLOCKADDRESS";
+ case bitc::CST_CODE_DATA: return "DATA";
+ }
+ case bitc::FUNCTION_BLOCK_ID:
+ switch (CodeID) {
+ default: return 0;
+ case bitc::FUNC_CODE_DECLAREBLOCKS: return "DECLAREBLOCKS";
+
+ case bitc::FUNC_CODE_INST_BINOP: return "INST_BINOP";
+ case bitc::FUNC_CODE_INST_CAST: return "INST_CAST";
+ case bitc::FUNC_CODE_INST_GEP: return "INST_GEP";
+ case bitc::FUNC_CODE_INST_INBOUNDS_GEP: return "INST_INBOUNDS_GEP";
+ case bitc::FUNC_CODE_INST_SELECT: return "INST_SELECT";
+ case bitc::FUNC_CODE_INST_EXTRACTELT: return "INST_EXTRACTELT";
+ case bitc::FUNC_CODE_INST_INSERTELT: return "INST_INSERTELT";
+ case bitc::FUNC_CODE_INST_SHUFFLEVEC: return "INST_SHUFFLEVEC";
+ case bitc::FUNC_CODE_INST_CMP: return "INST_CMP";
+
+ case bitc::FUNC_CODE_INST_RET: return "INST_RET";
+ case bitc::FUNC_CODE_INST_BR: return "INST_BR";
+ case bitc::FUNC_CODE_INST_SWITCH: return "INST_SWITCH";
+ case bitc::FUNC_CODE_INST_INVOKE: return "INST_INVOKE";
+ case bitc::FUNC_CODE_INST_UNREACHABLE: return "INST_UNREACHABLE";
+
+ case bitc::FUNC_CODE_INST_PHI: return "INST_PHI";
+ case bitc::FUNC_CODE_INST_ALLOCA: return "INST_ALLOCA";
+ case bitc::FUNC_CODE_INST_LOAD: return "INST_LOAD";
+ case bitc::FUNC_CODE_INST_VAARG: return "INST_VAARG";
+ case bitc::FUNC_CODE_INST_STORE: return "INST_STORE";
+ case bitc::FUNC_CODE_INST_EXTRACTVAL: return "INST_EXTRACTVAL";
+ case bitc::FUNC_CODE_INST_INSERTVAL: return "INST_INSERTVAL";
+ case bitc::FUNC_CODE_INST_CMP2: return "INST_CMP2";
+ case bitc::FUNC_CODE_INST_VSELECT: return "INST_VSELECT";
+ case bitc::FUNC_CODE_DEBUG_LOC_AGAIN: return "DEBUG_LOC_AGAIN";
+ case bitc::FUNC_CODE_INST_CALL: return "INST_CALL";
+ case bitc::FUNC_CODE_DEBUG_LOC: return "DEBUG_LOC";
+ }
+ case bitc::VALUE_SYMTAB_BLOCK_ID:
+ switch (CodeID) {
+ default: return 0;
+ case bitc::VST_CODE_ENTRY: return "ENTRY";
+ case bitc::VST_CODE_BBENTRY: return "BBENTRY";
+ }
+ case bitc::METADATA_ATTACHMENT_ID:
+ switch(CodeID) {
+ default:return 0;
+ case bitc::METADATA_ATTACHMENT: return "METADATA_ATTACHMENT";
+ }
+ case bitc::METADATA_BLOCK_ID:
+ switch(CodeID) {
+ default:return 0;
+ case bitc::METADATA_STRING: return "METADATA_STRING";
+ case bitc::METADATA_NAME: return "METADATA_NAME";
+ case bitc::METADATA_KIND: return "METADATA_KIND";
+ case bitc::METADATA_NODE: return "METADATA_NODE";
+ case bitc::METADATA_FN_NODE: return "METADATA_FN_NODE";
+ case bitc::METADATA_NAMED_NODE: return "METADATA_NAMED_NODE";
+ }
+ case bitc::USELIST_BLOCK_ID:
+ switch(CodeID) {
+ default:return 0;
+ case bitc::USELIST_CODE_ENTRY: return "USELIST_CODE_ENTRY";
+ }
+ }
+}
+
+struct PerRecordStats {
+ unsigned NumInstances;
+ unsigned NumAbbrev;
+ uint64_t TotalBits;
+
+ PerRecordStats() : NumInstances(0), NumAbbrev(0), TotalBits(0) {}
+};
+
+struct PerBlockIDStats {
+ /// NumInstances - This the number of times this block ID has been seen.
+ unsigned NumInstances;
+
+ /// NumBits - The total size in bits of all of these blocks.
+ uint64_t NumBits;
+
+ /// NumSubBlocks - The total number of blocks these blocks contain.
+ unsigned NumSubBlocks;
+
+ /// NumAbbrevs - The total number of abbreviations.
+ unsigned NumAbbrevs;
+
+ /// NumRecords - The total number of records these blocks contain, and the
+ /// number that are abbreviated.
+ unsigned NumRecords, NumAbbreviatedRecords;
+
+ /// CodeFreq - Keep track of the number of times we see each code.
+ std::vector<PerRecordStats> CodeFreq;
+
+ PerBlockIDStats()
+ : NumInstances(0), NumBits(0),
+ NumSubBlocks(0), NumAbbrevs(0), NumRecords(0), NumAbbreviatedRecords(0) {}
+};
+
+static std::map<unsigned, PerBlockIDStats> BlockIDStats;
+
+
+
+/// Error - All bitcode analysis errors go through this function, making this a
+/// good place to breakpoint if debugging.
+static bool Error(const std::string &Err) {
+ errs() << Err << "\n";
+ return true;
+}
+
+/// ParseBlock - Read a block, updating statistics, etc.
+static bool ParseBlock(BitstreamCursor &Stream, unsigned IndentLevel) {
+ std::string Indent(IndentLevel*2, ' ');
+ uint64_t BlockBitStart = Stream.GetCurrentBitNo();
+ unsigned BlockID = Stream.ReadSubBlockID();
+
+ // Get the statistics for this BlockID.
+ PerBlockIDStats &BlockStats = BlockIDStats[BlockID];
+
+ BlockStats.NumInstances++;
+
+ // BLOCKINFO is a special part of the stream.
+ if (BlockID == bitc::BLOCKINFO_BLOCK_ID) {
+ if (Dump) outs() << Indent << "<BLOCKINFO_BLOCK/>\n";
+ if (Stream.ReadBlockInfoBlock())
+ return Error("Malformed BlockInfoBlock");
+ uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
+ BlockStats.NumBits += BlockBitEnd-BlockBitStart;
+ return false;
+ }
+
+ unsigned NumWords = 0;
+ if (Stream.EnterSubBlock(BlockID, &NumWords))
+ return Error("Malformed block record");
+
+ const char *BlockName = 0;
+ if (Dump) {
+ outs() << Indent << "<";
+ if ((BlockName = GetBlockName(BlockID, *Stream.getBitStreamReader())))
+ outs() << BlockName;
+ else
+ outs() << "UnknownBlock" << BlockID;
+
+ if (NonSymbolic && BlockName)
+ outs() << " BlockID=" << BlockID;
+
+ outs() << " NumWords=" << NumWords
+ << " BlockCodeSize=" << Stream.GetAbbrevIDWidth() << ">\n";
+ }
+
+ SmallVector<uint64_t, 64> Record;
+
+ // Read all the records for this block.
+ while (1) {
+ if (Stream.AtEndOfStream())
+ return Error("Premature end of bitstream");
+
+ uint64_t RecordStartBit = Stream.GetCurrentBitNo();
+
+ // Read the code for this record.
+ unsigned AbbrevID = Stream.ReadCode();
+ switch (AbbrevID) {
+ case bitc::END_BLOCK: {
+ if (Stream.ReadBlockEnd())
+ return Error("Error at end of block");
+ uint64_t BlockBitEnd = Stream.GetCurrentBitNo();
+ BlockStats.NumBits += BlockBitEnd-BlockBitStart;
+ if (Dump) {
+ outs() << Indent << "</";
+ if (BlockName)
+ outs() << BlockName << ">\n";
+ else
+ outs() << "UnknownBlock" << BlockID << ">\n";
+ }
+ return false;
+ }
+ case bitc::ENTER_SUBBLOCK: {
+ uint64_t SubBlockBitStart = Stream.GetCurrentBitNo();
+ if (ParseBlock(Stream, IndentLevel+1))
+ return true;
+ ++BlockStats.NumSubBlocks;
+ uint64_t SubBlockBitEnd = Stream.GetCurrentBitNo();
+
+ // Don't include subblock sizes in the size of this block.
+ BlockBitStart += SubBlockBitEnd-SubBlockBitStart;
+ break;
+ }
+ case bitc::DEFINE_ABBREV:
+ Stream.ReadAbbrevRecord();
+ ++BlockStats.NumAbbrevs;
+ break;
+ default:
+ Record.clear();
+
+ ++BlockStats.NumRecords;
+ if (AbbrevID != bitc::UNABBREV_RECORD)
+ ++BlockStats.NumAbbreviatedRecords;
+
+ const char *BlobStart = 0;
+ unsigned BlobLen = 0;
+ unsigned Code = Stream.ReadRecord(AbbrevID, Record, BlobStart, BlobLen);
+
+
+
+ // Increment the # occurrences of this code.
+ if (BlockStats.CodeFreq.size() <= Code)
+ BlockStats.CodeFreq.resize(Code+1);
+ BlockStats.CodeFreq[Code].NumInstances++;
+ BlockStats.CodeFreq[Code].TotalBits +=
+ Stream.GetCurrentBitNo()-RecordStartBit;
+ if (AbbrevID != bitc::UNABBREV_RECORD)
+ BlockStats.CodeFreq[Code].NumAbbrev++;
+
+ if (Dump) {
+ outs() << Indent << " <";
+ if (const char *CodeName =
+ GetCodeName(Code, BlockID, *Stream.getBitStreamReader()))
+ outs() << CodeName;
+ else
+ outs() << "UnknownCode" << Code;
+ if (NonSymbolic &&
+ GetCodeName(Code, BlockID, *Stream.getBitStreamReader()))
+ outs() << " codeid=" << Code;
+ if (AbbrevID != bitc::UNABBREV_RECORD)
+ outs() << " abbrevid=" << AbbrevID;
+
+ for (unsigned i = 0, e = Record.size(); i != e; ++i)
+ outs() << " op" << i << "=" << (int64_t)Record[i];
+
+ outs() << "/>";
+
+ if (BlobStart) {
+ outs() << " blob data = ";
+ bool BlobIsPrintable = true;
+ for (unsigned i = 0; i != BlobLen; ++i)
+ if (!isprint(BlobStart[i])) {
+ BlobIsPrintable = false;
+ break;
+ }
+
+ if (BlobIsPrintable)
+ outs() << "'" << std::string(BlobStart, BlobStart+BlobLen) <<"'";
+ else
+ outs() << "unprintable, " << BlobLen << " bytes.";
+ }
+
+ outs() << "\n";
+ }
+
+ break;
+ }
+ }
+}
+
+static void PrintSize(double Bits) {
+ fprintf(stderr, "%.2f/%.2fB/%luW", Bits, Bits/8,(unsigned long)(Bits/32));
+}
+static void PrintSize(uint64_t Bits) {
+ fprintf(stderr, "%lub/%.2fB/%luW", (unsigned long)Bits,
+ (double)Bits/8, (unsigned long)(Bits/32));
+}
+
+
+/// AnalyzeBitcode - Analyze the bitcode file specified by InputFilename.
+static int AnalyzeBitcode() {
+ // Read the input file.
+ OwningPtr<MemoryBuffer> MemBuf;
+
+ if (error_code ec =
+ MemoryBuffer::getFileOrSTDIN(InputFilename.c_str(), MemBuf))
+ return Error("Error reading '" + InputFilename + "': " + ec.message());
+
+ if (MemBuf->getBufferSize() & 3)
+ return Error("Bitcode stream should be a multiple of 4 bytes in length");
+
+ const unsigned char *BufPtr = (unsigned char *)MemBuf->getBufferStart();
+ const unsigned char *EndBufPtr = BufPtr+MemBuf->getBufferSize();
+
+ // If we have a wrapper header, parse it and ignore the non-bc file contents.
+ // The magic number is 0x0B17C0DE stored in little endian.
+ if (isBitcodeWrapper(BufPtr, EndBufPtr))
+ if (SkipBitcodeWrapperHeader(BufPtr, EndBufPtr, true))
+ return Error("Invalid bitcode wrapper header");
+
+ BitstreamReader StreamFile(BufPtr, EndBufPtr);
+ BitstreamCursor Stream(StreamFile);
+ StreamFile.CollectBlockInfoNames();
+
+ // Read the stream signature.
+ char Signature[6];
+ Signature[0] = Stream.Read(8);
+ Signature[1] = Stream.Read(8);
+ Signature[2] = Stream.Read(4);
+ Signature[3] = Stream.Read(4);
+ Signature[4] = Stream.Read(4);
+ Signature[5] = Stream.Read(4);
+
+ // Autodetect the file contents, if it is one we know.
+ CurStreamType = UnknownBitstream;
+ if (Signature[0] == 'B' && Signature[1] == 'C' &&
+ Signature[2] == 0x0 && Signature[3] == 0xC &&
+ Signature[4] == 0xE && Signature[5] == 0xD)
+ CurStreamType = LLVMIRBitstream;
+
+ unsigned NumTopBlocks = 0;
+
+ // Parse the top-level structure. We only allow blocks at the top-level.
+ while (!Stream.AtEndOfStream()) {
+ unsigned Code = Stream.ReadCode();
+ if (Code != bitc::ENTER_SUBBLOCK)
+ return Error("Invalid record at top-level");
+
+ if (ParseBlock(Stream, 0))
+ return true;
+ ++NumTopBlocks;
+ }
+
+ if (Dump) outs() << "\n\n";
+
+ uint64_t BufferSizeBits = (EndBufPtr-BufPtr)*CHAR_BIT;
+ // Print a summary of the read file.
+ outs() << "Summary of " << InputFilename << ":\n";
+ outs() << " Total size: ";
+ PrintSize(BufferSizeBits);
+ outs() << "\n";
+ outs() << " Stream type: ";
+ switch (CurStreamType) {
+ case UnknownBitstream: outs() << "unknown\n"; break;
+ case LLVMIRBitstream: outs() << "LLVM IR\n"; break;
+ }
+ outs() << " # Toplevel Blocks: " << NumTopBlocks << "\n";
+ outs() << "\n";
+
+ // Emit per-block stats.
+ outs() << "Per-block Summary:\n";
+ for (std::map<unsigned, PerBlockIDStats>::iterator I = BlockIDStats.begin(),
+ E = BlockIDStats.end(); I != E; ++I) {
+ outs() << " Block ID #" << I->first;
+ if (const char *BlockName = GetBlockName(I->first, StreamFile))
+ outs() << " (" << BlockName << ")";
+ outs() << ":\n";
+
+ const PerBlockIDStats &Stats = I->second;
+ outs() << " Num Instances: " << Stats.NumInstances << "\n";
+ outs() << " Total Size: ";
+ PrintSize(Stats.NumBits);
+ outs() << "\n";
+ double pct = (Stats.NumBits * 100.0) / BufferSizeBits;
+ errs() << " Percent of file: " << format("%2.4f%%", pct) << "\n";
+ if (Stats.NumInstances > 1) {
+ outs() << " Average Size: ";
+ PrintSize(Stats.NumBits/(double)Stats.NumInstances);
+ outs() << "\n";
+ outs() << " Tot/Avg SubBlocks: " << Stats.NumSubBlocks << "/"
+ << Stats.NumSubBlocks/(double)Stats.NumInstances << "\n";
+ outs() << " Tot/Avg Abbrevs: " << Stats.NumAbbrevs << "/"
+ << Stats.NumAbbrevs/(double)Stats.NumInstances << "\n";
+ outs() << " Tot/Avg Records: " << Stats.NumRecords << "/"
+ << Stats.NumRecords/(double)Stats.NumInstances << "\n";
+ } else {
+ outs() << " Num SubBlocks: " << Stats.NumSubBlocks << "\n";
+ outs() << " Num Abbrevs: " << Stats.NumAbbrevs << "\n";
+ outs() << " Num Records: " << Stats.NumRecords << "\n";
+ }
+ if (Stats.NumRecords) {
+ double pct = (Stats.NumAbbreviatedRecords * 100.0) / Stats.NumRecords;
+ outs() << " Percent Abbrevs: " << format("%2.4f%%", pct) << "\n";
+ }
+ outs() << "\n";
+
+ // Print a histogram of the codes we see.
+ if (!NoHistogram && !Stats.CodeFreq.empty()) {
+ std::vector<std::pair<unsigned, unsigned> > FreqPairs; // <freq,code>
+ for (unsigned i = 0, e = Stats.CodeFreq.size(); i != e; ++i)
+ if (unsigned Freq = Stats.CodeFreq[i].NumInstances)
+ FreqPairs.push_back(std::make_pair(Freq, i));
+ std::stable_sort(FreqPairs.begin(), FreqPairs.end());
+ std::reverse(FreqPairs.begin(), FreqPairs.end());
+
+ outs() << "\tRecord Histogram:\n";
+ fprintf(stderr, "\t\t Count # Bits %% Abv Record Kind\n");
+ for (unsigned i = 0, e = FreqPairs.size(); i != e; ++i) {
+ const PerRecordStats &RecStats = Stats.CodeFreq[FreqPairs[i].second];
+
+ fprintf(stderr, "\t\t%7d %9lu ", RecStats.NumInstances,
+ (unsigned long)RecStats.TotalBits);
+
+ if (RecStats.NumAbbrev)
+ fprintf(stderr, "%7.2f ",
+ (double)RecStats.NumAbbrev/RecStats.NumInstances*100);
+ else
+ fprintf(stderr, " ");
+
+ if (const char *CodeName =
+ GetCodeName(FreqPairs[i].second, I->first, StreamFile))
+ fprintf(stderr, "%s\n", CodeName);
+ else
+ fprintf(stderr, "UnknownCode%d\n", FreqPairs[i].second);
+ }
+ outs() << "\n";
+
+ }
+ }
+ return 0;
+}
+
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+ cl::ParseCommandLineOptions(argc, argv, "llvm-bcanalyzer file analyzer\n");
+
+ return AnalyzeBitcode();
+}
diff --git a/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp b/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp
new file mode 100644
index 0000000..0528039
--- /dev/null
+++ b/contrib/llvm/tools/llvm-diff/DiffConsumer.cpp
@@ -0,0 +1,215 @@
+//===-- DiffConsumer.cpp - Difference Consumer ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This files implements the the LLVM difference Consumer
+//
+//===----------------------------------------------------------------------===//
+
+#include "DiffConsumer.h"
+
+#include "llvm/Module.h"
+#include "llvm/Instructions.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace llvm;
+
+static void ComputeNumbering(Function *F, DenseMap<Value*,unsigned> &Numbering){
+ unsigned IN = 0;
+
+ // Arguments get the first numbers.
+ for (Function::arg_iterator
+ AI = F->arg_begin(), AE = F->arg_end(); AI != AE; ++AI)
+ if (!AI->hasName())
+ Numbering[&*AI] = IN++;
+
+ // Walk the basic blocks in order.
+ for (Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI) {
+ if (!FI->hasName())
+ Numbering[&*FI] = IN++;
+
+ // Walk the instructions in order.
+ for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
+ // void instructions don't get numbers.
+ if (!BI->hasName() && !BI->getType()->isVoidTy())
+ Numbering[&*BI] = IN++;
+ }
+
+ assert(!Numbering.empty() && "asked for numbering but numbering was no-op");
+}
+
+
+void Consumer::anchor() { }
+
+void DiffConsumer::printValue(Value *V, bool isL) {
+ if (V->hasName()) {
+ out << (isa<GlobalValue>(V) ? '@' : '%') << V->getName();
+ return;
+ }
+ if (V->getType()->isVoidTy()) {
+ if (isa<StoreInst>(V)) {
+ out << "store to ";
+ printValue(cast<StoreInst>(V)->getPointerOperand(), isL);
+ } else if (isa<CallInst>(V)) {
+ out << "call to ";
+ printValue(cast<CallInst>(V)->getCalledValue(), isL);
+ } else if (isa<InvokeInst>(V)) {
+ out << "invoke to ";
+ printValue(cast<InvokeInst>(V)->getCalledValue(), isL);
+ } else {
+ out << *V;
+ }
+ return;
+ }
+ if (isa<Constant>(V)) {
+ out << *V;
+ return;
+ }
+
+ unsigned N = contexts.size();
+ while (N > 0) {
+ --N;
+ DiffContext &ctxt = contexts[N];
+ if (!ctxt.IsFunction) continue;
+ if (isL) {
+ if (ctxt.LNumbering.empty())
+ ComputeNumbering(cast<Function>(ctxt.L), ctxt.LNumbering);
+ out << '%' << ctxt.LNumbering[V];
+ return;
+ } else {
+ if (ctxt.RNumbering.empty())
+ ComputeNumbering(cast<Function>(ctxt.R), ctxt.RNumbering);
+ out << '%' << ctxt.RNumbering[V];
+ return;
+ }
+ }
+
+ out << "<anonymous>";
+}
+
+void DiffConsumer::header() {
+ if (contexts.empty()) return;
+ for (SmallVectorImpl<DiffContext>::iterator
+ I = contexts.begin(), E = contexts.end(); I != E; ++I) {
+ if (I->Differences) continue;
+ if (isa<Function>(I->L)) {
+ // Extra newline between functions.
+ if (Differences) out << "\n";
+
+ Function *L = cast<Function>(I->L);
+ Function *R = cast<Function>(I->R);
+ if (L->getName() != R->getName())
+ out << "in function " << L->getName()
+ << " / " << R->getName() << ":\n";
+ else
+ out << "in function " << L->getName() << ":\n";
+ } else if (isa<BasicBlock>(I->L)) {
+ BasicBlock *L = cast<BasicBlock>(I->L);
+ BasicBlock *R = cast<BasicBlock>(I->R);
+ if (L->hasName() && R->hasName() && L->getName() == R->getName())
+ out << " in block %" << L->getName() << ":\n";
+ else {
+ out << " in block ";
+ printValue(L, true);
+ out << " / ";
+ printValue(R, false);
+ out << ":\n";
+ }
+ } else if (isa<Instruction>(I->L)) {
+ out << " in instruction ";
+ printValue(I->L, true);
+ out << " / ";
+ printValue(I->R, false);
+ out << ":\n";
+ }
+
+ I->Differences = true;
+ }
+}
+
+void DiffConsumer::indent() {
+ unsigned N = Indent;
+ while (N--) out << ' ';
+}
+
+bool DiffConsumer::hadDifferences() const {
+ return Differences;
+}
+
+void DiffConsumer::enterContext(Value *L, Value *R) {
+ contexts.push_back(DiffContext(L, R));
+ Indent += 2;
+}
+
+void DiffConsumer::exitContext() {
+ Differences |= contexts.back().Differences;
+ contexts.pop_back();
+ Indent -= 2;
+}
+
+void DiffConsumer::log(StringRef text) {
+ header();
+ indent();
+ out << text << '\n';
+}
+
+void DiffConsumer::logf(const LogBuilder &Log) {
+ header();
+ indent();
+
+ unsigned arg = 0;
+
+ StringRef format = Log.getFormat();
+ while (true) {
+ size_t percent = format.find('%');
+ if (percent == StringRef::npos) {
+ out << format;
+ break;
+ }
+ assert(format[percent] == '%');
+
+ if (percent > 0) out << format.substr(0, percent);
+
+ switch (format[percent+1]) {
+ case '%': out << '%'; break;
+ case 'l': printValue(Log.getArgument(arg++), true); break;
+ case 'r': printValue(Log.getArgument(arg++), false); break;
+ default: llvm_unreachable("unknown format character");
+ }
+
+ format = format.substr(percent+2);
+ }
+
+ out << '\n';
+}
+
+void DiffConsumer::logd(const DiffLogBuilder &Log) {
+ header();
+
+ for (unsigned I = 0, E = Log.getNumLines(); I != E; ++I) {
+ indent();
+ switch (Log.getLineKind(I)) {
+ case DC_match:
+ out << " ";
+ Log.getLeft(I)->dump();
+ //printValue(Log.getLeft(I), true);
+ break;
+ case DC_left:
+ out << "< ";
+ Log.getLeft(I)->dump();
+ //printValue(Log.getLeft(I), true);
+ break;
+ case DC_right:
+ out << "> ";
+ Log.getRight(I)->dump();
+ //printValue(Log.getRight(I), false);
+ break;
+ }
+ //out << "\n";
+ }
+}
diff --git a/contrib/llvm/tools/llvm-diff/DiffConsumer.h b/contrib/llvm/tools/llvm-diff/DiffConsumer.h
new file mode 100644
index 0000000..2060fe1
--- /dev/null
+++ b/contrib/llvm/tools/llvm-diff/DiffConsumer.h
@@ -0,0 +1,93 @@
+//===-- DiffConsumer.h - Difference Consumer --------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the interface to the LLVM difference Consumer
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LLVM_DIFFCONSUMER_H_
+#define _LLVM_DIFFCONSUMER_H_
+
+#include "DiffLog.h"
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+ class Module;
+ class Value;
+ class Function;
+
+ /// The interface for consumers of difference data.
+ class Consumer {
+ virtual void anchor();
+ public:
+ /// Record that a local context has been entered. Left and
+ /// Right are IR "containers" of some sort which are being
+ /// considered for structural equivalence: global variables,
+ /// functions, blocks, instructions, etc.
+ virtual void enterContext(Value *Left, Value *Right) = 0;
+
+ /// Record that a local context has been exited.
+ virtual void exitContext() = 0;
+
+ /// Record a difference within the current context.
+ virtual void log(StringRef Text) = 0;
+
+ /// Record a formatted difference within the current context.
+ virtual void logf(const LogBuilder &Log) = 0;
+
+ /// Record a line-by-line instruction diff.
+ virtual void logd(const DiffLogBuilder &Log) = 0;
+
+ protected:
+ virtual ~Consumer() {}
+ };
+
+ class DiffConsumer : public Consumer {
+ private:
+ struct DiffContext {
+ DiffContext(Value *L, Value *R)
+ : L(L), R(R), Differences(false), IsFunction(isa<Function>(L)) {}
+ Value *L;
+ Value *R;
+ bool Differences;
+ bool IsFunction;
+ DenseMap<Value*,unsigned> LNumbering;
+ DenseMap<Value*,unsigned> RNumbering;
+ };
+
+ raw_ostream &out;
+ Module *LModule;
+ Module *RModule;
+ SmallVector<DiffContext, 5> contexts;
+ bool Differences;
+ unsigned Indent;
+
+ void printValue(Value *V, bool isL);
+ void header();
+ void indent();
+
+ public:
+ DiffConsumer(Module *L, Module *R)
+ : out(errs()), LModule(L), RModule(R), Differences(false), Indent(0) {}
+
+ bool hadDifferences() const;
+ void enterContext(Value *L, Value *R);
+ void exitContext();
+ void log(StringRef text);
+ void logf(const LogBuilder &Log);
+ void logd(const DiffLogBuilder &Log);
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/llvm-diff/DiffLog.cpp b/contrib/llvm/tools/llvm-diff/DiffLog.cpp
new file mode 100644
index 0000000..9cc0c88
--- /dev/null
+++ b/contrib/llvm/tools/llvm-diff/DiffLog.cpp
@@ -0,0 +1,53 @@
+//===-- DiffLog.h - Difference Log Builder and accessories ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the interface to the LLVM difference log builder.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DiffLog.h"
+#include "DiffConsumer.h"
+
+#include "llvm/Instructions.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+
+using namespace llvm;
+
+LogBuilder::~LogBuilder() {
+ consumer.logf(*this);
+}
+
+StringRef LogBuilder::getFormat() const { return Format; }
+
+unsigned LogBuilder::getNumArguments() const { return Arguments.size(); }
+Value *LogBuilder::getArgument(unsigned I) const { return Arguments[I]; }
+
+DiffLogBuilder::~DiffLogBuilder() { consumer.logd(*this); }
+
+void DiffLogBuilder::addMatch(Instruction *L, Instruction *R) {
+ Diff.push_back(DiffRecord(L, R));
+}
+void DiffLogBuilder::addLeft(Instruction *L) {
+ // HACK: VS 2010 has a bug in the stdlib that requires this.
+ Diff.push_back(DiffRecord(L, DiffRecord::second_type(0)));
+}
+void DiffLogBuilder::addRight(Instruction *R) {
+ // HACK: VS 2010 has a bug in the stdlib that requires this.
+ Diff.push_back(DiffRecord(DiffRecord::first_type(0), R));
+}
+
+unsigned DiffLogBuilder::getNumLines() const { return Diff.size(); }
+
+DiffChange DiffLogBuilder::getLineKind(unsigned I) const {
+ return (Diff[I].first ? (Diff[I].second ? DC_match : DC_left)
+ : DC_right);
+}
+Instruction *DiffLogBuilder::getLeft(unsigned I) const { return Diff[I].first; }
+Instruction *DiffLogBuilder::getRight(unsigned I) const { return Diff[I].second; }
diff --git a/contrib/llvm/tools/llvm-diff/DiffLog.h b/contrib/llvm/tools/llvm-diff/DiffLog.h
new file mode 100644
index 0000000..43e318a
--- /dev/null
+++ b/contrib/llvm/tools/llvm-diff/DiffLog.h
@@ -0,0 +1,80 @@
+//===-- DiffLog.h - Difference Log Builder and accessories ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the interface to the LLVM difference log builder.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LLVM_DIFFLOG_H_
+#define _LLVM_DIFFLOG_H_
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+
+namespace llvm {
+ class Instruction;
+ class Value;
+ class Consumer;
+
+ /// Trichotomy assumption
+ enum DiffChange { DC_match, DC_left, DC_right };
+
+ /// A temporary-object class for building up log messages.
+ class LogBuilder {
+ Consumer &consumer;
+
+ /// The use of a stored StringRef here is okay because
+ /// LogBuilder should be used only as a temporary, and as a
+ /// temporary it will be destructed before whatever temporary
+ /// might be initializing this format.
+ StringRef Format;
+
+ SmallVector<Value*, 4> Arguments;
+
+ public:
+ LogBuilder(Consumer &c, StringRef Format)
+ : consumer(c), Format(Format) {}
+
+ LogBuilder &operator<<(Value *V) {
+ Arguments.push_back(V);
+ return *this;
+ }
+
+ ~LogBuilder();
+
+ StringRef getFormat() const;
+ unsigned getNumArguments() const;
+ Value *getArgument(unsigned I) const;
+ };
+
+ /// A temporary-object class for building up diff messages.
+ class DiffLogBuilder {
+ typedef std::pair<Instruction*,Instruction*> DiffRecord;
+ SmallVector<DiffRecord, 20> Diff;
+
+ Consumer &consumer;
+
+ public:
+ DiffLogBuilder(Consumer &c) : consumer(c) {}
+ ~DiffLogBuilder();
+
+ void addMatch(Instruction *L, Instruction *R);
+ // HACK: VS 2010 has a bug in the stdlib that requires this.
+ void addLeft(Instruction *L);
+ void addRight(Instruction *R);
+
+ unsigned getNumLines() const;
+ DiffChange getLineKind(unsigned I) const;
+ Instruction *getLeft(unsigned I) const;
+ Instruction *getRight(unsigned I) const;
+ };
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp b/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp
new file mode 100644
index 0000000..a5a99f5
--- /dev/null
+++ b/contrib/llvm/tools/llvm-diff/DifferenceEngine.cpp
@@ -0,0 +1,683 @@
+//===-- DifferenceEngine.cpp - Structural function/module comparison ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the implementation of the LLVM difference
+// engine, which structurally compares global values within a module.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DifferenceEngine.h"
+
+#include "llvm/Constants.h"
+#include "llvm/Function.h"
+#include "llvm/Instructions.h"
+#include "llvm/Module.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/type_traits.h"
+
+#include <utility>
+
+using namespace llvm;
+
+namespace {
+
+/// A priority queue, implemented as a heap.
+template <class T, class Sorter, unsigned InlineCapacity>
+class PriorityQueue {
+ Sorter Precedes;
+ llvm::SmallVector<T, InlineCapacity> Storage;
+
+public:
+ PriorityQueue(const Sorter &Precedes) : Precedes(Precedes) {}
+
+ /// Checks whether the heap is empty.
+ bool empty() const { return Storage.empty(); }
+
+ /// Insert a new value on the heap.
+ void insert(const T &V) {
+ unsigned Index = Storage.size();
+ Storage.push_back(V);
+ if (Index == 0) return;
+
+ T *data = Storage.data();
+ while (true) {
+ unsigned Target = (Index + 1) / 2 - 1;
+ if (!Precedes(data[Index], data[Target])) return;
+ std::swap(data[Index], data[Target]);
+ if (Target == 0) return;
+ Index = Target;
+ }
+ }
+
+ /// Remove the minimum value in the heap. Only valid on a non-empty heap.
+ T remove_min() {
+ assert(!empty());
+ T tmp = Storage[0];
+
+ unsigned NewSize = Storage.size() - 1;
+ if (NewSize) {
+ // Move the slot at the end to the beginning.
+ if (isPodLike<T>::value)
+ Storage[0] = Storage[NewSize];
+ else
+ std::swap(Storage[0], Storage[NewSize]);
+
+ // Bubble the root up as necessary.
+ unsigned Index = 0;
+ while (true) {
+ // With a 1-based index, the children would be Index*2 and Index*2+1.
+ unsigned R = (Index + 1) * 2;
+ unsigned L = R - 1;
+
+ // If R is out of bounds, we're done after this in any case.
+ if (R >= NewSize) {
+ // If L is also out of bounds, we're done immediately.
+ if (L >= NewSize) break;
+
+ // Otherwise, test whether we should swap L and Index.
+ if (Precedes(Storage[L], Storage[Index]))
+ std::swap(Storage[L], Storage[Index]);
+ break;
+ }
+
+ // Otherwise, we need to compare with the smaller of L and R.
+ // Prefer R because it's closer to the end of the array.
+ unsigned IndexToTest = (Precedes(Storage[L], Storage[R]) ? L : R);
+
+ // If Index is >= the min of L and R, then heap ordering is restored.
+ if (!Precedes(Storage[IndexToTest], Storage[Index]))
+ break;
+
+ // Otherwise, keep bubbling up.
+ std::swap(Storage[IndexToTest], Storage[Index]);
+ Index = IndexToTest;
+ }
+ }
+ Storage.pop_back();
+
+ return tmp;
+ }
+};
+
+/// A function-scope difference engine.
+class FunctionDifferenceEngine {
+ DifferenceEngine &Engine;
+
+ /// The current mapping from old local values to new local values.
+ DenseMap<Value*, Value*> Values;
+
+ /// The current mapping from old blocks to new blocks.
+ DenseMap<BasicBlock*, BasicBlock*> Blocks;
+
+ DenseSet<std::pair<Value*, Value*> > TentativeValues;
+
+ unsigned getUnprocPredCount(BasicBlock *Block) const {
+ unsigned Count = 0;
+ for (pred_iterator I = pred_begin(Block), E = pred_end(Block); I != E; ++I)
+ if (!Blocks.count(*I)) Count++;
+ return Count;
+ }
+
+ typedef std::pair<BasicBlock*, BasicBlock*> BlockPair;
+
+ /// A type which sorts a priority queue by the number of unprocessed
+ /// predecessor blocks it has remaining.
+ ///
+ /// This is actually really expensive to calculate.
+ struct QueueSorter {
+ const FunctionDifferenceEngine &fde;
+ explicit QueueSorter(const FunctionDifferenceEngine &fde) : fde(fde) {}
+
+ bool operator()(const BlockPair &Old, const BlockPair &New) {
+ return fde.getUnprocPredCount(Old.first)
+ < fde.getUnprocPredCount(New.first);
+ }
+ };
+
+ /// A queue of unified blocks to process.
+ PriorityQueue<BlockPair, QueueSorter, 20> Queue;
+
+ /// Try to unify the given two blocks. Enqueues them for processing
+ /// if they haven't already been processed.
+ ///
+ /// Returns true if there was a problem unifying them.
+ bool tryUnify(BasicBlock *L, BasicBlock *R) {
+ BasicBlock *&Ref = Blocks[L];
+
+ if (Ref) {
+ if (Ref == R) return false;
+
+ Engine.logf("successor %l cannot be equivalent to %r; "
+ "it's already equivalent to %r")
+ << L << R << Ref;
+ return true;
+ }
+
+ Ref = R;
+ Queue.insert(BlockPair(L, R));
+ return false;
+ }
+
+ /// Unifies two instructions, given that they're known not to have
+ /// structural differences.
+ void unify(Instruction *L, Instruction *R) {
+ DifferenceEngine::Context C(Engine, L, R);
+
+ bool Result = diff(L, R, true, true);
+ assert(!Result && "structural differences second time around?");
+ (void) Result;
+ if (!L->use_empty())
+ Values[L] = R;
+ }
+
+ void processQueue() {
+ while (!Queue.empty()) {
+ BlockPair Pair = Queue.remove_min();
+ diff(Pair.first, Pair.second);
+ }
+ }
+
+ void diff(BasicBlock *L, BasicBlock *R) {
+ DifferenceEngine::Context C(Engine, L, R);
+
+ BasicBlock::iterator LI = L->begin(), LE = L->end();
+ BasicBlock::iterator RI = R->begin();
+
+ llvm::SmallVector<std::pair<Instruction*,Instruction*>, 20> TentativePairs;
+
+ do {
+ assert(LI != LE && RI != R->end());
+ Instruction *LeftI = &*LI, *RightI = &*RI;
+
+ // If the instructions differ, start the more sophisticated diff
+ // algorithm at the start of the block.
+ if (diff(LeftI, RightI, false, false)) {
+ TentativeValues.clear();
+ return runBlockDiff(L->begin(), R->begin());
+ }
+
+ // Otherwise, tentatively unify them.
+ if (!LeftI->use_empty())
+ TentativeValues.insert(std::make_pair(LeftI, RightI));
+
+ ++LI, ++RI;
+ } while (LI != LE); // This is sufficient: we can't get equality of
+ // terminators if there are residual instructions.
+
+ // Unify everything in the block, non-tentatively this time.
+ TentativeValues.clear();
+ for (LI = L->begin(), RI = R->begin(); LI != LE; ++LI, ++RI)
+ unify(&*LI, &*RI);
+ }
+
+ bool matchForBlockDiff(Instruction *L, Instruction *R);
+ void runBlockDiff(BasicBlock::iterator LI, BasicBlock::iterator RI);
+
+ bool diffCallSites(CallSite L, CallSite R, bool Complain) {
+ // FIXME: call attributes
+ if (!equivalentAsOperands(L.getCalledValue(), R.getCalledValue())) {
+ if (Complain) Engine.log("called functions differ");
+ return true;
+ }
+ if (L.arg_size() != R.arg_size()) {
+ if (Complain) Engine.log("argument counts differ");
+ return true;
+ }
+ for (unsigned I = 0, E = L.arg_size(); I != E; ++I)
+ if (!equivalentAsOperands(L.getArgument(I), R.getArgument(I))) {
+ if (Complain)
+ Engine.logf("arguments %l and %r differ")
+ << L.getArgument(I) << R.getArgument(I);
+ return true;
+ }
+ return false;
+ }
+
+ bool diff(Instruction *L, Instruction *R, bool Complain, bool TryUnify) {
+ // FIXME: metadata (if Complain is set)
+
+ // Different opcodes always imply different operations.
+ if (L->getOpcode() != R->getOpcode()) {
+ if (Complain) Engine.log("different instruction types");
+ return true;
+ }
+
+ if (isa<CmpInst>(L)) {
+ if (cast<CmpInst>(L)->getPredicate()
+ != cast<CmpInst>(R)->getPredicate()) {
+ if (Complain) Engine.log("different predicates");
+ return true;
+ }
+ } else if (isa<CallInst>(L)) {
+ return diffCallSites(CallSite(L), CallSite(R), Complain);
+ } else if (isa<PHINode>(L)) {
+ // FIXME: implement.
+
+ // This is really weird; type uniquing is broken?
+ if (L->getType() != R->getType()) {
+ if (!L->getType()->isPointerTy() || !R->getType()->isPointerTy()) {
+ if (Complain) Engine.log("different phi types");
+ return true;
+ }
+ }
+ return false;
+
+ // Terminators.
+ } else if (isa<InvokeInst>(L)) {
+ InvokeInst *LI = cast<InvokeInst>(L);
+ InvokeInst *RI = cast<InvokeInst>(R);
+ if (diffCallSites(CallSite(LI), CallSite(RI), Complain))
+ return true;
+
+ if (TryUnify) {
+ tryUnify(LI->getNormalDest(), RI->getNormalDest());
+ tryUnify(LI->getUnwindDest(), RI->getUnwindDest());
+ }
+ return false;
+
+ } else if (isa<BranchInst>(L)) {
+ BranchInst *LI = cast<BranchInst>(L);
+ BranchInst *RI = cast<BranchInst>(R);
+ if (LI->isConditional() != RI->isConditional()) {
+ if (Complain) Engine.log("branch conditionality differs");
+ return true;
+ }
+
+ if (LI->isConditional()) {
+ if (!equivalentAsOperands(LI->getCondition(), RI->getCondition())) {
+ if (Complain) Engine.log("branch conditions differ");
+ return true;
+ }
+ if (TryUnify) tryUnify(LI->getSuccessor(1), RI->getSuccessor(1));
+ }
+ if (TryUnify) tryUnify(LI->getSuccessor(0), RI->getSuccessor(0));
+ return false;
+
+ } else if (isa<SwitchInst>(L)) {
+ SwitchInst *LI = cast<SwitchInst>(L);
+ SwitchInst *RI = cast<SwitchInst>(R);
+ if (!equivalentAsOperands(LI->getCondition(), RI->getCondition())) {
+ if (Complain) Engine.log("switch conditions differ");
+ return true;
+ }
+ if (TryUnify) tryUnify(LI->getDefaultDest(), RI->getDefaultDest());
+
+ bool Difference = false;
+
+ DenseMap<ConstantInt*,BasicBlock*> LCases;
+
+ for (SwitchInst::CaseIt I = LI->case_begin(), E = LI->case_end();
+ I != E; ++I)
+ LCases[I.getCaseValue()] = I.getCaseSuccessor();
+
+ for (SwitchInst::CaseIt I = RI->case_begin(), E = RI->case_end();
+ I != E; ++I) {
+ ConstantInt *CaseValue = I.getCaseValue();
+ BasicBlock *LCase = LCases[CaseValue];
+ if (LCase) {
+ if (TryUnify) tryUnify(LCase, I.getCaseSuccessor());
+ LCases.erase(CaseValue);
+ } else if (Complain || !Difference) {
+ if (Complain)
+ Engine.logf("right switch has extra case %r") << CaseValue;
+ Difference = true;
+ }
+ }
+ if (!Difference)
+ for (DenseMap<ConstantInt*,BasicBlock*>::iterator
+ I = LCases.begin(), E = LCases.end(); I != E; ++I) {
+ if (Complain)
+ Engine.logf("left switch has extra case %l") << I->first;
+ Difference = true;
+ }
+ return Difference;
+ } else if (isa<UnreachableInst>(L)) {
+ return false;
+ }
+
+ if (L->getNumOperands() != R->getNumOperands()) {
+ if (Complain) Engine.log("instructions have different operand counts");
+ return true;
+ }
+
+ for (unsigned I = 0, E = L->getNumOperands(); I != E; ++I) {
+ Value *LO = L->getOperand(I), *RO = R->getOperand(I);
+ if (!equivalentAsOperands(LO, RO)) {
+ if (Complain) Engine.logf("operands %l and %r differ") << LO << RO;
+ return true;
+ }
+ }
+
+ return false;
+ }
+
+ bool equivalentAsOperands(Constant *L, Constant *R) {
+ // Use equality as a preliminary filter.
+ if (L == R)
+ return true;
+
+ if (L->getValueID() != R->getValueID())
+ return false;
+
+ // Ask the engine about global values.
+ if (isa<GlobalValue>(L))
+ return Engine.equivalentAsOperands(cast<GlobalValue>(L),
+ cast<GlobalValue>(R));
+
+ // Compare constant expressions structurally.
+ if (isa<ConstantExpr>(L))
+ return equivalentAsOperands(cast<ConstantExpr>(L),
+ cast<ConstantExpr>(R));
+
+ // Nulls of the "same type" don't always actually have the same
+ // type; I don't know why. Just white-list them.
+ if (isa<ConstantPointerNull>(L))
+ return true;
+
+ // Block addresses only match if we've already encountered the
+ // block. FIXME: tentative matches?
+ if (isa<BlockAddress>(L))
+ return Blocks[cast<BlockAddress>(L)->getBasicBlock()]
+ == cast<BlockAddress>(R)->getBasicBlock();
+
+ return false;
+ }
+
+ bool equivalentAsOperands(ConstantExpr *L, ConstantExpr *R) {
+ if (L == R)
+ return true;
+ if (L->getOpcode() != R->getOpcode())
+ return false;
+
+ switch (L->getOpcode()) {
+ case Instruction::ICmp:
+ case Instruction::FCmp:
+ if (L->getPredicate() != R->getPredicate())
+ return false;
+ break;
+
+ case Instruction::GetElementPtr:
+ // FIXME: inbounds?
+ break;
+
+ default:
+ break;
+ }
+
+ if (L->getNumOperands() != R->getNumOperands())
+ return false;
+
+ for (unsigned I = 0, E = L->getNumOperands(); I != E; ++I)
+ if (!equivalentAsOperands(L->getOperand(I), R->getOperand(I)))
+ return false;
+
+ return true;
+ }
+
+ bool equivalentAsOperands(Value *L, Value *R) {
+ // Fall out if the values have different kind.
+ // This possibly shouldn't take priority over oracles.
+ if (L->getValueID() != R->getValueID())
+ return false;
+
+ // Value subtypes: Argument, Constant, Instruction, BasicBlock,
+ // InlineAsm, MDNode, MDString, PseudoSourceValue
+
+ if (isa<Constant>(L))
+ return equivalentAsOperands(cast<Constant>(L), cast<Constant>(R));
+
+ if (isa<Instruction>(L))
+ return Values[L] == R || TentativeValues.count(std::make_pair(L, R));
+
+ if (isa<Argument>(L))
+ return Values[L] == R;
+
+ if (isa<BasicBlock>(L))
+ return Blocks[cast<BasicBlock>(L)] != R;
+
+ // Pretend everything else is identical.
+ return true;
+ }
+
+ // Avoid a gcc warning about accessing 'this' in an initializer.
+ FunctionDifferenceEngine *this_() { return this; }
+
+public:
+ FunctionDifferenceEngine(DifferenceEngine &Engine) :
+ Engine(Engine), Queue(QueueSorter(*this_())) {}
+
+ void diff(Function *L, Function *R) {
+ if (L->arg_size() != R->arg_size())
+ Engine.log("different argument counts");
+
+ // Map the arguments.
+ for (Function::arg_iterator
+ LI = L->arg_begin(), LE = L->arg_end(),
+ RI = R->arg_begin(), RE = R->arg_end();
+ LI != LE && RI != RE; ++LI, ++RI)
+ Values[&*LI] = &*RI;
+
+ tryUnify(&*L->begin(), &*R->begin());
+ processQueue();
+ }
+};
+
+struct DiffEntry {
+ DiffEntry() : Cost(0) {}
+
+ unsigned Cost;
+ llvm::SmallVector<char, 8> Path; // actually of DifferenceEngine::DiffChange
+};
+
+bool FunctionDifferenceEngine::matchForBlockDiff(Instruction *L,
+ Instruction *R) {
+ return !diff(L, R, false, false);
+}
+
+void FunctionDifferenceEngine::runBlockDiff(BasicBlock::iterator LStart,
+ BasicBlock::iterator RStart) {
+ BasicBlock::iterator LE = LStart->getParent()->end();
+ BasicBlock::iterator RE = RStart->getParent()->end();
+
+ unsigned NL = std::distance(LStart, LE);
+
+ SmallVector<DiffEntry, 20> Paths1(NL+1);
+ SmallVector<DiffEntry, 20> Paths2(NL+1);
+
+ DiffEntry *Cur = Paths1.data();
+ DiffEntry *Next = Paths2.data();
+
+ const unsigned LeftCost = 2;
+ const unsigned RightCost = 2;
+ const unsigned MatchCost = 0;
+
+ assert(TentativeValues.empty());
+
+ // Initialize the first column.
+ for (unsigned I = 0; I != NL+1; ++I) {
+ Cur[I].Cost = I * LeftCost;
+ for (unsigned J = 0; J != I; ++J)
+ Cur[I].Path.push_back(DC_left);
+ }
+
+ for (BasicBlock::iterator RI = RStart; RI != RE; ++RI) {
+ // Initialize the first row.
+ Next[0] = Cur[0];
+ Next[0].Cost += RightCost;
+ Next[0].Path.push_back(DC_right);
+
+ unsigned Index = 1;
+ for (BasicBlock::iterator LI = LStart; LI != LE; ++LI, ++Index) {
+ if (matchForBlockDiff(&*LI, &*RI)) {
+ Next[Index] = Cur[Index-1];
+ Next[Index].Cost += MatchCost;
+ Next[Index].Path.push_back(DC_match);
+ TentativeValues.insert(std::make_pair(&*LI, &*RI));
+ } else if (Next[Index-1].Cost <= Cur[Index].Cost) {
+ Next[Index] = Next[Index-1];
+ Next[Index].Cost += LeftCost;
+ Next[Index].Path.push_back(DC_left);
+ } else {
+ Next[Index] = Cur[Index];
+ Next[Index].Cost += RightCost;
+ Next[Index].Path.push_back(DC_right);
+ }
+ }
+
+ std::swap(Cur, Next);
+ }
+
+ // We don't need the tentative values anymore; everything from here
+ // on out should be non-tentative.
+ TentativeValues.clear();
+
+ SmallVectorImpl<char> &Path = Cur[NL].Path;
+ BasicBlock::iterator LI = LStart, RI = RStart;
+
+ DiffLogBuilder Diff(Engine.getConsumer());
+
+ // Drop trailing matches.
+ while (Path.back() == DC_match)
+ Path.pop_back();
+
+ // Skip leading matches.
+ SmallVectorImpl<char>::iterator
+ PI = Path.begin(), PE = Path.end();
+ while (PI != PE && *PI == DC_match) {
+ unify(&*LI, &*RI);
+ ++PI, ++LI, ++RI;
+ }
+
+ for (; PI != PE; ++PI) {
+ switch (static_cast<DiffChange>(*PI)) {
+ case DC_match:
+ assert(LI != LE && RI != RE);
+ {
+ Instruction *L = &*LI, *R = &*RI;
+ unify(L, R);
+ Diff.addMatch(L, R);
+ }
+ ++LI; ++RI;
+ break;
+
+ case DC_left:
+ assert(LI != LE);
+ Diff.addLeft(&*LI);
+ ++LI;
+ break;
+
+ case DC_right:
+ assert(RI != RE);
+ Diff.addRight(&*RI);
+ ++RI;
+ break;
+ }
+ }
+
+ // Finishing unifying and complaining about the tails of the block,
+ // which should be matches all the way through.
+ while (LI != LE) {
+ assert(RI != RE);
+ unify(&*LI, &*RI);
+ ++LI, ++RI;
+ }
+
+ // If the terminators have different kinds, but one is an invoke and the
+ // other is an unconditional branch immediately following a call, unify
+ // the results and the destinations.
+ TerminatorInst *LTerm = LStart->getParent()->getTerminator();
+ TerminatorInst *RTerm = RStart->getParent()->getTerminator();
+ if (isa<BranchInst>(LTerm) && isa<InvokeInst>(RTerm)) {
+ if (cast<BranchInst>(LTerm)->isConditional()) return;
+ BasicBlock::iterator I = LTerm;
+ if (I == LStart->getParent()->begin()) return;
+ --I;
+ if (!isa<CallInst>(*I)) return;
+ CallInst *LCall = cast<CallInst>(&*I);
+ InvokeInst *RInvoke = cast<InvokeInst>(RTerm);
+ if (!equivalentAsOperands(LCall->getCalledValue(), RInvoke->getCalledValue()))
+ return;
+ if (!LCall->use_empty())
+ Values[LCall] = RInvoke;
+ tryUnify(LTerm->getSuccessor(0), RInvoke->getNormalDest());
+ } else if (isa<InvokeInst>(LTerm) && isa<BranchInst>(RTerm)) {
+ if (cast<BranchInst>(RTerm)->isConditional()) return;
+ BasicBlock::iterator I = RTerm;
+ if (I == RStart->getParent()->begin()) return;
+ --I;
+ if (!isa<CallInst>(*I)) return;
+ CallInst *RCall = cast<CallInst>(I);
+ InvokeInst *LInvoke = cast<InvokeInst>(LTerm);
+ if (!equivalentAsOperands(LInvoke->getCalledValue(), RCall->getCalledValue()))
+ return;
+ if (!LInvoke->use_empty())
+ Values[LInvoke] = RCall;
+ tryUnify(LInvoke->getNormalDest(), RTerm->getSuccessor(0));
+ }
+}
+
+}
+
+void DifferenceEngine::Oracle::anchor() { }
+
+void DifferenceEngine::diff(Function *L, Function *R) {
+ Context C(*this, L, R);
+
+ // FIXME: types
+ // FIXME: attributes and CC
+ // FIXME: parameter attributes
+
+ // If both are declarations, we're done.
+ if (L->empty() && R->empty())
+ return;
+ else if (L->empty())
+ log("left function is declaration, right function is definition");
+ else if (R->empty())
+ log("right function is declaration, left function is definition");
+ else
+ FunctionDifferenceEngine(*this).diff(L, R);
+}
+
+void DifferenceEngine::diff(Module *L, Module *R) {
+ StringSet<> LNames;
+ SmallVector<std::pair<Function*,Function*>, 20> Queue;
+
+ for (Module::iterator I = L->begin(), E = L->end(); I != E; ++I) {
+ Function *LFn = &*I;
+ LNames.insert(LFn->getName());
+
+ if (Function *RFn = R->getFunction(LFn->getName()))
+ Queue.push_back(std::make_pair(LFn, RFn));
+ else
+ logf("function %l exists only in left module") << LFn;
+ }
+
+ for (Module::iterator I = R->begin(), E = R->end(); I != E; ++I) {
+ Function *RFn = &*I;
+ if (!LNames.count(RFn->getName()))
+ logf("function %r exists only in right module") << RFn;
+ }
+
+ for (SmallVectorImpl<std::pair<Function*,Function*> >::iterator
+ I = Queue.begin(), E = Queue.end(); I != E; ++I)
+ diff(I->first, I->second);
+}
+
+bool DifferenceEngine::equivalentAsOperands(GlobalValue *L, GlobalValue *R) {
+ if (globalValueOracle) return (*globalValueOracle)(L, R);
+ return L->getName() == R->getName();
+}
diff --git a/contrib/llvm/tools/llvm-diff/DifferenceEngine.h b/contrib/llvm/tools/llvm-diff/DifferenceEngine.h
new file mode 100644
index 0000000..7ea79e4
--- /dev/null
+++ b/contrib/llvm/tools/llvm-diff/DifferenceEngine.h
@@ -0,0 +1,93 @@
+//===-- DifferenceEngine.h - Module comparator ------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This header defines the interface to the LLVM difference engine,
+// which structurally compares functions within a module.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LLVM_DIFFERENCE_ENGINE_H_
+#define _LLVM_DIFFERENCE_ENGINE_H_
+
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "DiffLog.h"
+#include "DiffConsumer.h"
+
+#include <utility>
+
+namespace llvm {
+ class Function;
+ class GlobalValue;
+ class Instruction;
+ class LLVMContext;
+ class Module;
+ class Twine;
+ class Value;
+
+ /// A class for performing structural comparisons of LLVM assembly.
+ class DifferenceEngine {
+ public:
+ /// A RAII object for recording the current context.
+ struct Context {
+ Context(DifferenceEngine &Engine, Value *L, Value *R) : Engine(Engine) {
+ Engine.consumer.enterContext(L, R);
+ }
+
+ ~Context() {
+ Engine.consumer.exitContext();
+ }
+
+ private:
+ DifferenceEngine &Engine;
+ };
+
+ /// An oracle for answering whether two values are equivalent as
+ /// operands.
+ class Oracle {
+ virtual void anchor();
+ public:
+ virtual bool operator()(Value *L, Value *R) = 0;
+
+ protected:
+ virtual ~Oracle() {}
+ };
+
+ DifferenceEngine(LLVMContext &context, Consumer &consumer)
+ : context(context), consumer(consumer), globalValueOracle(0) {}
+
+ void diff(Module *L, Module *R);
+ void diff(Function *L, Function *R);
+ void log(StringRef text) {
+ consumer.log(text);
+ }
+ LogBuilder logf(StringRef text) {
+ return LogBuilder(consumer, text);
+ }
+ Consumer& getConsumer() const { return consumer; }
+
+ /// Installs an oracle to decide whether two global values are
+ /// equivalent as operands. Without an oracle, global values are
+ /// considered equivalent as operands precisely when they have the
+ /// same name.
+ void setGlobalValueOracle(Oracle *oracle) {
+ globalValueOracle = oracle;
+ }
+
+ /// Determines whether two global values are equivalent.
+ bool equivalentAsOperands(GlobalValue *L, GlobalValue *R);
+
+ private:
+ LLVMContext &context;
+ Consumer &consumer;
+ Oracle *globalValueOracle;
+ };
+}
+
+#endif
diff --git a/contrib/llvm/tools/llvm-diff/llvm-diff.cpp b/contrib/llvm/tools/llvm-diff/llvm-diff.cpp
new file mode 100644
index 0000000..774169b
--- /dev/null
+++ b/contrib/llvm/tools/llvm-diff/llvm-diff.cpp
@@ -0,0 +1,98 @@
+//===-- llvm-diff.cpp - Module comparator command-line driver ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the command-line driver for the difference engine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DiffLog.h"
+#include "DifferenceEngine.h"
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Type.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/SourceMgr.h"
+
+#include <string>
+#include <utility>
+
+
+using namespace llvm;
+
+/// Reads a module from a file. On error, messages are written to stderr
+/// and null is returned.
+static Module *ReadModule(LLVMContext &Context, StringRef Name) {
+ SMDiagnostic Diag;
+ Module *M = ParseIRFile(Name, Diag, Context);
+ if (!M)
+ Diag.print("llvm-diff", errs());
+ return M;
+}
+
+static void diffGlobal(DifferenceEngine &Engine, Module *L, Module *R,
+ StringRef Name) {
+ // Drop leading sigils from the global name.
+ if (Name.startswith("@")) Name = Name.substr(1);
+
+ Function *LFn = L->getFunction(Name);
+ Function *RFn = R->getFunction(Name);
+ if (LFn && RFn)
+ Engine.diff(LFn, RFn);
+ else if (!LFn && !RFn)
+ errs() << "No function named @" << Name << " in either module\n";
+ else if (!LFn)
+ errs() << "No function named @" << Name << " in left module\n";
+ else
+ errs() << "No function named @" << Name << " in right module\n";
+}
+
+static cl::opt<std::string> LeftFilename(cl::Positional,
+ cl::desc("<first file>"),
+ cl::Required);
+static cl::opt<std::string> RightFilename(cl::Positional,
+ cl::desc("<second file>"),
+ cl::Required);
+static cl::list<std::string> GlobalsToCompare(cl::Positional,
+ cl::desc("<globals to compare>"));
+
+int main(int argc, char **argv) {
+ cl::ParseCommandLineOptions(argc, argv);
+
+ LLVMContext Context;
+
+ // Load both modules. Die if that fails.
+ Module *LModule = ReadModule(Context, LeftFilename);
+ Module *RModule = ReadModule(Context, RightFilename);
+ if (!LModule || !RModule) return 1;
+
+ DiffConsumer Consumer(LModule, RModule);
+ DifferenceEngine Engine(Context, Consumer);
+
+ // If any global names were given, just diff those.
+ if (!GlobalsToCompare.empty()) {
+ for (unsigned I = 0, E = GlobalsToCompare.size(); I != E; ++I)
+ diffGlobal(Engine, LModule, RModule, GlobalsToCompare[I]);
+
+ // Otherwise, diff everything in the module.
+ } else {
+ Engine.diff(LModule, RModule);
+ }
+
+ delete LModule;
+ delete RModule;
+
+ return Consumer.hadDifferences();
+}
diff --git a/contrib/llvm/tools/llvm-dis/llvm-dis.cpp b/contrib/llvm/tools/llvm-dis/llvm-dis.cpp
new file mode 100644
index 0000000..6450ea6
--- /dev/null
+++ b/contrib/llvm/tools/llvm-dis/llvm-dis.cpp
@@ -0,0 +1,193 @@
+//===-- llvm-dis.cpp - The low-level LLVM disassembler --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This utility may be invoked in the following manner:
+// llvm-dis [options] - Read LLVM bitcode from stdin, write asm to stdout
+// llvm-dis [options] x.bc - Read LLVM bitcode from the x.bc file, write asm
+// to the x.ll file.
+// Options:
+// --help - Output information about command line switches
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Type.h"
+#include "llvm/IntrinsicInst.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Assembly/AssemblyAnnotationWriter.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/DataStream.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
+using namespace llvm;
+
+static cl::opt<std::string>
+InputFilename(cl::Positional, cl::desc("<input bitcode>"), cl::init("-"));
+
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Override output filename"),
+ cl::value_desc("filename"));
+
+static cl::opt<bool>
+Force("f", cl::desc("Enable binary output on terminals"));
+
+static cl::opt<bool>
+DontPrint("disable-output", cl::desc("Don't output the .ll file"), cl::Hidden);
+
+static cl::opt<bool>
+ShowAnnotations("show-annotations",
+ cl::desc("Add informational comments to the .ll file"));
+
+namespace {
+
+static void printDebugLoc(const DebugLoc &DL, formatted_raw_ostream &OS) {
+ OS << DL.getLine() << ":" << DL.getCol();
+ if (MDNode *N = DL.getInlinedAt(getGlobalContext())) {
+ DebugLoc IDL = DebugLoc::getFromDILocation(N);
+ if (!IDL.isUnknown()) {
+ OS << "@";
+ printDebugLoc(IDL,OS);
+ }
+ }
+}
+class CommentWriter : public AssemblyAnnotationWriter {
+public:
+ void emitFunctionAnnot(const Function *F,
+ formatted_raw_ostream &OS) {
+ OS << "; [#uses=" << F->getNumUses() << ']'; // Output # uses
+ OS << '\n';
+ }
+ void printInfoComment(const Value &V, formatted_raw_ostream &OS) {
+ bool Padded = false;
+ if (!V.getType()->isVoidTy()) {
+ OS.PadToColumn(50);
+ Padded = true;
+ OS << "; [#uses=" << V.getNumUses() << " type=" << *V.getType() << "]"; // Output # uses and type
+ }
+ if (const Instruction *I = dyn_cast<Instruction>(&V)) {
+ const DebugLoc &DL = I->getDebugLoc();
+ if (!DL.isUnknown()) {
+ if (!Padded) {
+ OS.PadToColumn(50);
+ Padded = true;
+ OS << ";";
+ }
+ OS << " [debug line = ";
+ printDebugLoc(DL,OS);
+ OS << "]";
+ }
+ if (const DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(I)) {
+ DIVariable Var(DDI->getVariable());
+ if (!Padded) {
+ OS.PadToColumn(50);
+ Padded = true;
+ OS << ";";
+ }
+ OS << " [debug variable = " << Var.getName() << "]";
+ }
+ else if (const DbgValueInst *DVI = dyn_cast<DbgValueInst>(I)) {
+ DIVariable Var(DVI->getVariable());
+ if (!Padded) {
+ OS.PadToColumn(50);
+ Padded = true;
+ OS << ";";
+ }
+ OS << " [debug variable = " << Var.getName() << "]";
+ }
+ }
+ }
+};
+
+} // end anon namespace
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+
+ cl::ParseCommandLineOptions(argc, argv, "llvm .bc -> .ll disassembler\n");
+
+ std::string ErrorMessage;
+ std::auto_ptr<Module> M;
+
+ // Use the bitcode streaming interface
+ DataStreamer *streamer = getDataFileStreamer(InputFilename, &ErrorMessage);
+ if (streamer) {
+ std::string DisplayFilename;
+ if (InputFilename == "-")
+ DisplayFilename = "<stdin>";
+ else
+ DisplayFilename = InputFilename;
+ M.reset(getStreamedBitcodeModule(DisplayFilename, streamer, Context,
+ &ErrorMessage));
+ if(M.get() != 0 && M->MaterializeAllPermanently(&ErrorMessage)) {
+ M.reset();
+ }
+ }
+
+ if (M.get() == 0) {
+ errs() << argv[0] << ": ";
+ if (ErrorMessage.size())
+ errs() << ErrorMessage << "\n";
+ else
+ errs() << "bitcode didn't read correctly.\n";
+ return 1;
+ }
+
+ // Just use stdout. We won't actually print anything on it.
+ if (DontPrint)
+ OutputFilename = "-";
+
+ if (OutputFilename.empty()) { // Unspecified output, infer it.
+ if (InputFilename == "-") {
+ OutputFilename = "-";
+ } else {
+ const std::string &IFN = InputFilename;
+ int Len = IFN.length();
+ // If the source ends in .bc, strip it off.
+ if (IFN[Len-3] == '.' && IFN[Len-2] == 'b' && IFN[Len-1] == 'c')
+ OutputFilename = std::string(IFN.begin(), IFN.end()-3)+".ll";
+ else
+ OutputFilename = IFN+".ll";
+ }
+ }
+
+ std::string ErrorInfo;
+ OwningPtr<tool_output_file>
+ Out(new tool_output_file(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary));
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ return 1;
+ }
+
+ OwningPtr<AssemblyAnnotationWriter> Annotator;
+ if (ShowAnnotations)
+ Annotator.reset(new CommentWriter());
+
+ // All that llvm-dis does is write the assembly to a file.
+ if (!DontPrint)
+ M->print(Out->os(), Annotator.get());
+
+ // Declare success.
+ Out->keep();
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-extract/llvm-extract.cpp b/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
new file mode 100644
index 0000000..2ed11c5
--- /dev/null
+++ b/contrib/llvm/tools/llvm-extract/llvm-extract.cpp
@@ -0,0 +1,238 @@
+//===- llvm-extract.cpp - LLVM function extraction utility ----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This utility changes the input module to only contain a single function,
+// which is primarily used for debugging transformations.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SetVector.h"
+#include <memory>
+using namespace llvm;
+
+// InputFilename - The filename to read from.
+static cl::opt<std::string>
+InputFilename(cl::Positional, cl::desc("<input bitcode file>"),
+ cl::init("-"), cl::value_desc("filename"));
+
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Specify output filename"),
+ cl::value_desc("filename"), cl::init("-"));
+
+static cl::opt<bool>
+Force("f", cl::desc("Enable binary output on terminals"));
+
+static cl::opt<bool>
+DeleteFn("delete", cl::desc("Delete specified Globals from Module"));
+
+// ExtractFuncs - The functions to extract from the module.
+static cl::list<std::string>
+ExtractFuncs("func", cl::desc("Specify function to extract"),
+ cl::ZeroOrMore, cl::value_desc("function"));
+
+// ExtractRegExpFuncs - The functions, matched via regular expression, to
+// extract from the module.
+static cl::list<std::string>
+ExtractRegExpFuncs("rfunc", cl::desc("Specify function(s) to extract using a "
+ "regular expression"),
+ cl::ZeroOrMore, cl::value_desc("rfunction"));
+
+// ExtractGlobals - The globals to extract from the module.
+static cl::list<std::string>
+ExtractGlobals("glob", cl::desc("Specify global to extract"),
+ cl::ZeroOrMore, cl::value_desc("global"));
+
+// ExtractRegExpGlobals - The globals, matched via regular expression, to
+// extract from the module...
+static cl::list<std::string>
+ExtractRegExpGlobals("rglob", cl::desc("Specify global(s) to extract using a "
+ "regular expression"),
+ cl::ZeroOrMore, cl::value_desc("rglobal"));
+
+static cl::opt<bool>
+OutputAssembly("S",
+ cl::desc("Write output as LLVM assembly"), cl::Hidden);
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+ cl::ParseCommandLineOptions(argc, argv, "llvm extractor\n");
+
+ // Use lazy loading, since we only care about selected global values.
+ SMDiagnostic Err;
+ std::auto_ptr<Module> M;
+ M.reset(getLazyIRFileModule(InputFilename, Err, Context));
+
+ if (M.get() == 0) {
+ Err.print(argv[0], errs());
+ return 1;
+ }
+
+ // Use SetVector to avoid duplicates.
+ SetVector<GlobalValue *> GVs;
+
+ // Figure out which globals we should extract.
+ for (size_t i = 0, e = ExtractGlobals.size(); i != e; ++i) {
+ GlobalValue *GV = M->getNamedGlobal(ExtractGlobals[i]);
+ if (!GV) {
+ errs() << argv[0] << ": program doesn't contain global named '"
+ << ExtractGlobals[i] << "'!\n";
+ return 1;
+ }
+ GVs.insert(GV);
+ }
+
+ // Extract globals via regular expression matching.
+ for (size_t i = 0, e = ExtractRegExpGlobals.size(); i != e; ++i) {
+ std::string Error;
+ Regex RegEx(ExtractRegExpGlobals[i]);
+ if (!RegEx.isValid(Error)) {
+ errs() << argv[0] << ": '" << ExtractRegExpGlobals[i] << "' "
+ "invalid regex: " << Error;
+ }
+ bool match = false;
+ for (Module::global_iterator GV = M->global_begin(),
+ E = M->global_end(); GV != E; GV++) {
+ if (RegEx.match(GV->getName())) {
+ GVs.insert(&*GV);
+ match = true;
+ }
+ }
+ if (!match) {
+ errs() << argv[0] << ": program doesn't contain global named '"
+ << ExtractRegExpGlobals[i] << "'!\n";
+ return 1;
+ }
+ }
+
+ // Figure out which functions we should extract.
+ for (size_t i = 0, e = ExtractFuncs.size(); i != e; ++i) {
+ GlobalValue *GV = M->getFunction(ExtractFuncs[i]);
+ if (!GV) {
+ errs() << argv[0] << ": program doesn't contain function named '"
+ << ExtractFuncs[i] << "'!\n";
+ return 1;
+ }
+ GVs.insert(GV);
+ }
+ // Extract functions via regular expression matching.
+ for (size_t i = 0, e = ExtractRegExpFuncs.size(); i != e; ++i) {
+ std::string Error;
+ StringRef RegExStr = ExtractRegExpFuncs[i];
+ Regex RegEx(RegExStr);
+ if (!RegEx.isValid(Error)) {
+ errs() << argv[0] << ": '" << ExtractRegExpFuncs[i] << "' "
+ "invalid regex: " << Error;
+ }
+ bool match = false;
+ for (Module::iterator F = M->begin(), E = M->end(); F != E;
+ F++) {
+ if (RegEx.match(F->getName())) {
+ GVs.insert(&*F);
+ match = true;
+ }
+ }
+ if (!match) {
+ errs() << argv[0] << ": program doesn't contain global named '"
+ << ExtractRegExpFuncs[i] << "'!\n";
+ return 1;
+ }
+ }
+
+ // Materialize requisite global values.
+ if (!DeleteFn)
+ for (size_t i = 0, e = GVs.size(); i != e; ++i) {
+ GlobalValue *GV = GVs[i];
+ if (GV->isMaterializable()) {
+ std::string ErrInfo;
+ if (GV->Materialize(&ErrInfo)) {
+ errs() << argv[0] << ": error reading input: " << ErrInfo << "\n";
+ return 1;
+ }
+ }
+ }
+ else {
+ // Deleting. Materialize every GV that's *not* in GVs.
+ SmallPtrSet<GlobalValue *, 8> GVSet(GVs.begin(), GVs.end());
+ for (Module::global_iterator I = M->global_begin(), E = M->global_end();
+ I != E; ++I) {
+ GlobalVariable *G = I;
+ if (!GVSet.count(G) && G->isMaterializable()) {
+ std::string ErrInfo;
+ if (G->Materialize(&ErrInfo)) {
+ errs() << argv[0] << ": error reading input: " << ErrInfo << "\n";
+ return 1;
+ }
+ }
+ }
+ for (Module::iterator I = M->begin(), E = M->end(); I != E; ++I) {
+ Function *F = I;
+ if (!GVSet.count(F) && F->isMaterializable()) {
+ std::string ErrInfo;
+ if (F->Materialize(&ErrInfo)) {
+ errs() << argv[0] << ": error reading input: " << ErrInfo << "\n";
+ return 1;
+ }
+ }
+ }
+ }
+
+ // In addition to deleting all other functions, we also want to spiff it
+ // up a little bit. Do this now.
+ PassManager Passes;
+ Passes.add(new TargetData(M.get())); // Use correct TargetData
+
+ std::vector<GlobalValue*> Gvs(GVs.begin(), GVs.end());
+
+ Passes.add(createGVExtractionPass(Gvs, DeleteFn));
+ if (!DeleteFn)
+ Passes.add(createGlobalDCEPass()); // Delete unreachable globals
+ Passes.add(createStripDeadDebugInfoPass()); // Remove dead debug info
+ Passes.add(createStripDeadPrototypesPass()); // Remove dead func decls
+
+ std::string ErrorInfo;
+ tool_output_file Out(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary);
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ return 1;
+ }
+
+ if (OutputAssembly)
+ Passes.add(createPrintModulePass(&Out.os()));
+ else if (Force || !CheckBitcodeOutputToConsole(Out.os(), true))
+ Passes.add(createBitcodeWriterPass(Out.os()));
+
+ Passes.run(*M.get());
+
+ // Declare success.
+ Out.keep();
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-ld/Optimize.cpp b/contrib/llvm/tools/llvm-ld/Optimize.cpp
new file mode 100644
index 0000000..7f3f900
--- /dev/null
+++ b/contrib/llvm/tools/llvm-ld/Optimize.cpp
@@ -0,0 +1,130 @@
+//===- Optimize.cpp - Optimize a complete program -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements all optimization of the linked module for llvm-ld.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/DynamicLibrary.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/PassNameParser.h"
+#include "llvm/Support/PluginLoader.h"
+#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include "llvm/Transforms/Scalar.h"
+using namespace llvm;
+
+// Pass Name Options as generated by the PassNameParser
+static cl::list<const PassInfo*, bool, PassNameParser>
+ OptimizationList(cl::desc("Optimizations available:"));
+
+//Don't verify at the end
+static cl::opt<bool> DontVerify("disable-verify", cl::ReallyHidden);
+
+static cl::opt<bool> DisableInline("disable-inlining",
+ cl::desc("Do not run the inliner pass"));
+
+static cl::opt<bool>
+DisableOptimizations("disable-opt",
+ cl::desc("Do not run any optimization passes"));
+
+static cl::opt<bool> DisableInternalize("disable-internalize",
+ cl::desc("Do not mark all symbols as internal"));
+
+static cl::opt<bool> VerifyEach("verify-each",
+ cl::desc("Verify intermediate results of all passes"));
+
+static cl::alias ExportDynamic("export-dynamic",
+ cl::aliasopt(DisableInternalize),
+ cl::desc("Alias for -disable-internalize"));
+
+static cl::opt<bool> Strip("strip-all",
+ cl::desc("Strip all symbol info from executable"));
+
+static cl::alias A0("s", cl::desc("Alias for --strip-all"),
+ cl::aliasopt(Strip));
+
+static cl::opt<bool> StripDebug("strip-debug",
+ cl::desc("Strip debugger symbol info from executable"));
+
+static cl::alias A1("S", cl::desc("Alias for --strip-debug"),
+ cl::aliasopt(StripDebug));
+
+// A utility function that adds a pass to the pass manager but will also add
+// a verifier pass after if we're supposed to verify.
+static inline void addPass(PassManager &PM, Pass *P) {
+ // Add the pass to the pass manager...
+ PM.add(P);
+
+ // If we are verifying all of the intermediate steps, add the verifier...
+ if (VerifyEach)
+ PM.add(createVerifierPass());
+}
+
+namespace llvm {
+/// Optimize - Perform link time optimizations. This will run the scalar
+/// optimizations, any loaded plugin-optimization modules, and then the
+/// inter-procedural optimizations if applicable.
+void Optimize(Module *M) {
+
+ // Instantiate the pass manager to organize the passes.
+ PassManager Passes;
+
+ // If we're verifying, start off with a verification pass.
+ if (VerifyEach)
+ Passes.add(createVerifierPass());
+
+ // Add an appropriate TargetData instance for this module...
+ addPass(Passes, new TargetData(M));
+
+ if (!DisableOptimizations)
+ PassManagerBuilder().populateLTOPassManager(Passes, !DisableInternalize,
+ !DisableInline);
+
+ // If the -s or -S command line options were specified, strip the symbols out
+ // of the resulting program to make it smaller. -s and -S are GNU ld options
+ // that we are supporting; they alias -strip-all and -strip-debug.
+ if (Strip || StripDebug)
+ addPass(Passes, createStripSymbolsPass(StripDebug && !Strip));
+
+ // Create a new optimization pass for each one specified on the command line
+ std::auto_ptr<TargetMachine> target;
+ for (unsigned i = 0; i < OptimizationList.size(); ++i) {
+ const PassInfo *Opt = OptimizationList[i];
+ if (Opt->getNormalCtor())
+ addPass(Passes, Opt->getNormalCtor()());
+ else
+ errs() << "llvm-ld: cannot create pass: " << Opt->getPassName()
+ << "\n";
+ }
+
+ // The user's passes may leave cruft around. Clean up after them them but
+ // only if we haven't got DisableOptimizations set
+ if (!DisableOptimizations) {
+ addPass(Passes, createInstructionCombiningPass());
+ addPass(Passes, createCFGSimplificationPass());
+ addPass(Passes, createAggressiveDCEPass());
+ addPass(Passes, createGlobalDCEPass());
+ }
+
+ // Make sure everything is still good.
+ if (!DontVerify)
+ Passes.add(createVerifierPass());
+
+ // Run our queue of passes all at once now, efficiently.
+ Passes.run(*M);
+}
+
+}
diff --git a/contrib/llvm/tools/llvm-ld/llvm-ld.cpp b/contrib/llvm/tools/llvm-ld/llvm-ld.cpp
new file mode 100644
index 0000000..ecf0476
--- /dev/null
+++ b/contrib/llvm/tools/llvm-ld/llvm-ld.cpp
@@ -0,0 +1,732 @@
+//===- llvm-ld.cpp - LLVM 'ld' compatible linker --------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This utility is intended to be compatible with GCC, and follows standard
+// system 'ld' conventions. As such, the default output file is ./a.out.
+// Additionally, this program outputs a shell script that is used to invoke LLI
+// to execute the program. In this manner, the generated executable (a.out for
+// example), is directly executable, whereas the bitcode file actually lives in
+// the a.out.bc file generated by this program.
+//
+// Note that if someone (or a script) deletes the executable program generated,
+// the .bc file will be left around. Considering that this is a temporary hack,
+// I'm not too worried about this.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LinkAllVMCore.h"
+#include "llvm/Linker.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Signals.h"
+#include <memory>
+#include <cstring>
+using namespace llvm;
+
+// Rightly this should go in a header file but it just seems such a waste.
+namespace llvm {
+extern void Optimize(Module*);
+}
+
+// Input/Output Options
+static cl::list<std::string> InputFilenames(cl::Positional, cl::OneOrMore,
+ cl::desc("<input bitcode files>"));
+
+static cl::opt<std::string> OutputFilename("o", cl::init("a.out"),
+ cl::desc("Override output filename"),
+ cl::value_desc("filename"));
+
+static cl::opt<std::string> BitcodeOutputFilename("b", cl::init(""),
+ cl::desc("Override bitcode output filename"),
+ cl::value_desc("filename"));
+
+static cl::opt<bool> Verbose("v",
+ cl::desc("Print information about actions taken"));
+
+static cl::list<std::string> LibPaths("L", cl::Prefix,
+ cl::desc("Specify a library search path"),
+ cl::value_desc("directory"));
+
+static cl::list<std::string> FrameworkPaths("F", cl::Prefix,
+ cl::desc("Specify a framework search path"),
+ cl::value_desc("directory"));
+
+static cl::list<std::string> Libraries("l", cl::Prefix,
+ cl::desc("Specify libraries to link to"),
+ cl::value_desc("library prefix"));
+
+static cl::list<std::string> Frameworks("framework",
+ cl::desc("Specify frameworks to link to"),
+ cl::value_desc("framework"));
+
+// Options to control the linking, optimization, and code gen processes
+static cl::opt<bool> LinkAsLibrary("link-as-library",
+ cl::desc("Link the .bc files together as a library, not an executable"));
+
+static cl::alias Relink("r", cl::aliasopt(LinkAsLibrary),
+ cl::desc("Alias for -link-as-library"));
+
+static cl::opt<bool> Native("native",
+ cl::desc("Generate a native binary instead of a shell script"));
+
+static cl::opt<bool>NativeCBE("native-cbe",
+ cl::desc("Generate a native binary with the C backend and GCC"));
+
+static cl::list<std::string> PostLinkOpts("post-link-opts",
+ cl::value_desc("path"),
+ cl::desc("Run one or more optimization programs after linking"));
+
+static cl::list<std::string> XLinker("Xlinker", cl::value_desc("option"),
+ cl::desc("Pass options to the system linker"));
+
+// Compatibility options that llvm-ld ignores but are supported for
+// compatibility with LD
+static cl::opt<std::string> CO3("soname", cl::Hidden,
+ cl::desc("Compatibility option: ignored"));
+
+static cl::opt<std::string> CO4("version-script", cl::Hidden,
+ cl::desc("Compatibility option: ignored"));
+
+static cl::opt<bool> CO5("eh-frame-hdr", cl::Hidden,
+ cl::desc("Compatibility option: ignored"));
+
+static cl::opt<std::string> CO6("h", cl::Hidden,
+ cl::desc("Compatibility option: ignored"));
+
+static cl::opt<bool> CO7("start-group", cl::Hidden,
+ cl::desc("Compatibility option: ignored"));
+
+static cl::opt<bool> CO8("end-group", cl::Hidden,
+ cl::desc("Compatibility option: ignored"));
+
+static cl::opt<std::string> CO9("m", cl::Hidden,
+ cl::desc("Compatibility option: ignored"));
+
+/// This is just for convenience so it doesn't have to be passed around
+/// everywhere.
+static std::string progname;
+
+/// FileRemover objects to clean up output files in the event of an error.
+static FileRemover OutputRemover;
+static FileRemover BitcodeOutputRemover;
+
+/// PrintAndExit - Prints a message to standard error and exits with error code
+///
+/// Inputs:
+/// Message - The message to print to standard error.
+///
+static void PrintAndExit(const std::string &Message, Module *M, int errcode = 1) {
+ errs() << progname << ": " << Message << "\n";
+ delete M;
+ llvm_shutdown();
+ exit(errcode);
+}
+
+static void PrintCommand(const std::vector<const char*> &args) {
+ std::vector<const char*>::const_iterator I = args.begin(), E = args.end();
+ for (; I != E; ++I)
+ if (*I)
+ errs() << "'" << *I << "'" << " ";
+ errs() << "\n";
+}
+
+/// CopyEnv - This function takes an array of environment variables and makes a
+/// copy of it. This copy can then be manipulated any way the caller likes
+/// without affecting the process's real environment.
+///
+/// Inputs:
+/// envp - An array of C strings containing an environment.
+///
+/// Return value:
+/// NULL - An error occurred.
+///
+/// Otherwise, a pointer to a new array of C strings is returned. Every string
+/// in the array is a duplicate of the one in the original array (i.e. we do
+/// not copy the char *'s from one array to another).
+///
+static char ** CopyEnv(char ** const envp) {
+ // Count the number of entries in the old list;
+ unsigned entries; // The number of entries in the old environment list
+ for (entries = 0; envp[entries] != NULL; entries++)
+ /*empty*/;
+
+ // Add one more entry for the NULL pointer that ends the list.
+ ++entries;
+
+ // If there are no entries at all, just return NULL.
+ if (entries == 0)
+ return NULL;
+
+ // Allocate a new environment list.
+ char **newenv = new char* [entries];
+ if (newenv == NULL)
+ return NULL;
+
+ // Make a copy of the list. Don't forget the NULL that ends the list.
+ entries = 0;
+ while (envp[entries] != NULL) {
+ size_t len = strlen(envp[entries]) + 1;
+ newenv[entries] = new char[len];
+ memcpy(newenv[entries], envp[entries], len);
+ ++entries;
+ }
+ newenv[entries] = NULL;
+
+ return newenv;
+}
+
+
+/// RemoveEnv - Remove the specified environment variable from the environment
+/// array.
+///
+/// Inputs:
+/// name - The name of the variable to remove. It cannot be NULL.
+/// envp - The array of environment variables. It cannot be NULL.
+///
+/// Notes:
+/// This is mainly done because functions to remove items from the environment
+/// are not available across all platforms. In particular, Solaris does not
+/// seem to have an unsetenv() function or a setenv() function (or they are
+/// undocumented if they do exist).
+///
+static void RemoveEnv(const char * name, char ** const envp) {
+ for (unsigned index=0; envp[index] != NULL; index++) {
+ // Find the first equals sign in the array and make it an EOS character.
+ char *p = strchr (envp[index], '=');
+ if (p == NULL)
+ continue;
+ else
+ *p = '\0';
+
+ // Compare the two strings. If they are equal, zap this string.
+ // Otherwise, restore it.
+ if (!strcmp(name, envp[index]))
+ *envp[index] = '\0';
+ else
+ *p = '=';
+ }
+
+ return;
+}
+
+/// GenerateBitcode - generates a bitcode file from the module provided
+void GenerateBitcode(Module* M, const std::string& FileName) {
+
+ if (Verbose)
+ errs() << "Generating Bitcode To " << FileName << '\n';
+
+ // Create the output file.
+ std::string ErrorInfo;
+ tool_output_file Out(FileName.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary);
+ if (!ErrorInfo.empty()) {
+ PrintAndExit(ErrorInfo, M);
+ return;
+ }
+
+ // Write it out
+ WriteBitcodeToFile(M, Out.os());
+ Out.keep();
+}
+
+/// GenerateAssembly - generates a native assembly language source file from the
+/// specified bitcode file.
+///
+/// Inputs:
+/// InputFilename - The name of the input bitcode file.
+/// OutputFilename - The name of the file to generate.
+/// llc - The pathname to use for LLC.
+/// envp - The environment to use when running LLC.
+///
+/// Return non-zero value on error.
+///
+static int GenerateAssembly(const std::string &OutputFilename,
+ const std::string &InputFilename,
+ const sys::Path &llc,
+ std::string &ErrMsg ) {
+ // Run LLC to convert the bitcode file into assembly code.
+ std::vector<const char*> args;
+ args.push_back(llc.c_str());
+ // We will use GCC to assemble the program so set the assembly syntax to AT&T,
+ // regardless of what the target in the bitcode file is.
+ args.push_back("-x86-asm-syntax=att");
+ args.push_back("-o");
+ args.push_back(OutputFilename.c_str());
+ args.push_back(InputFilename.c_str());
+ args.push_back(0);
+
+ if (Verbose) {
+ errs() << "Generating Assembly With: \n";
+ PrintCommand(args);
+ }
+
+ return sys::Program::ExecuteAndWait(llc, &args[0], 0, 0, 0, 0, &ErrMsg);
+}
+
+/// GenerateCFile - generates a C source file from the specified bitcode file.
+static int GenerateCFile(const std::string &OutputFile,
+ const std::string &InputFile,
+ const sys::Path &llc,
+ std::string& ErrMsg) {
+ // Run LLC to convert the bitcode file into C.
+ std::vector<const char*> args;
+ args.push_back(llc.c_str());
+ args.push_back("-march=c");
+ args.push_back("-o");
+ args.push_back(OutputFile.c_str());
+ args.push_back(InputFile.c_str());
+ args.push_back(0);
+
+ if (Verbose) {
+ errs() << "Generating C Source With: \n";
+ PrintCommand(args);
+ }
+
+ return sys::Program::ExecuteAndWait(llc, &args[0], 0, 0, 0, 0, &ErrMsg);
+}
+
+/// GenerateNative - generates a native object file from the
+/// specified bitcode file.
+///
+/// Inputs:
+/// InputFilename - The name of the input bitcode file.
+/// OutputFilename - The name of the file to generate.
+/// NativeLinkItems - The native libraries, files, code with which to link
+/// LibPaths - The list of directories in which to find libraries.
+/// FrameworksPaths - The list of directories in which to find frameworks.
+/// Frameworks - The list of frameworks (dynamic libraries)
+/// gcc - The pathname to use for GGC.
+/// envp - A copy of the process's current environment.
+///
+/// Outputs:
+/// None.
+///
+/// Returns non-zero value on error.
+///
+static int GenerateNative(const std::string &OutputFilename,
+ const std::string &InputFilename,
+ const Linker::ItemList &LinkItems,
+ const sys::Path &gcc, char ** const envp,
+ std::string& ErrMsg) {
+ // Remove these environment variables from the environment of the
+ // programs that we will execute. It appears that GCC sets these
+ // environment variables so that the programs it uses can configure
+ // themselves identically.
+ //
+ // However, when we invoke GCC below, we want it to use its normal
+ // configuration. Hence, we must sanitize its environment.
+ char ** clean_env = CopyEnv(envp);
+ if (clean_env == NULL)
+ return 1;
+ RemoveEnv("LIBRARY_PATH", clean_env);
+ RemoveEnv("COLLECT_GCC_OPTIONS", clean_env);
+ RemoveEnv("GCC_EXEC_PREFIX", clean_env);
+ RemoveEnv("COMPILER_PATH", clean_env);
+ RemoveEnv("COLLECT_GCC", clean_env);
+
+
+ // Run GCC to assemble and link the program into native code.
+ //
+ // Note:
+ // We can't just assemble and link the file with the system assembler
+ // and linker because we don't know where to put the _start symbol.
+ // GCC mysteriously knows how to do it.
+ std::vector<std::string> args;
+ args.push_back(gcc.c_str());
+ args.push_back("-fno-strict-aliasing");
+ args.push_back("-O3");
+ args.push_back("-o");
+ args.push_back(OutputFilename);
+ args.push_back(InputFilename);
+
+ // Add in the library and framework paths
+ for (unsigned index = 0; index < LibPaths.size(); index++) {
+ args.push_back("-L" + LibPaths[index]);
+ }
+ for (unsigned index = 0; index < FrameworkPaths.size(); index++) {
+ args.push_back("-F" + FrameworkPaths[index]);
+ }
+
+ // Add the requested options
+ for (unsigned index = 0; index < XLinker.size(); index++)
+ args.push_back(XLinker[index]);
+
+ // Add in the libraries to link.
+ for (unsigned index = 0; index < LinkItems.size(); index++)
+ if (LinkItems[index].first != "crtend") {
+ if (LinkItems[index].second)
+ args.push_back("-l" + LinkItems[index].first);
+ else
+ args.push_back(LinkItems[index].first);
+ }
+
+ // Add in frameworks to link.
+ for (unsigned index = 0; index < Frameworks.size(); index++) {
+ args.push_back("-framework");
+ args.push_back(Frameworks[index]);
+ }
+
+ // Now that "args" owns all the std::strings for the arguments, call the c_str
+ // method to get the underlying string array. We do this game so that the
+ // std::string array is guaranteed to outlive the const char* array.
+ std::vector<const char *> Args;
+ for (unsigned i = 0, e = args.size(); i != e; ++i)
+ Args.push_back(args[i].c_str());
+ Args.push_back(0);
+
+ if (Verbose) {
+ errs() << "Generating Native Executable With:\n";
+ PrintCommand(Args);
+ }
+
+ // Run the compiler to assembly and link together the program.
+ int R = sys::Program::ExecuteAndWait(
+ gcc, &Args[0], const_cast<const char **>(clean_env), 0, 0, 0, &ErrMsg);
+ delete [] clean_env;
+ return R;
+}
+
+/// EmitShellScript - Output the wrapper file that invokes the JIT on the LLVM
+/// bitcode file for the program.
+static void EmitShellScript(char **argv, Module *M) {
+ if (Verbose)
+ errs() << "Emitting Shell Script\n";
+#if defined(_WIN32)
+ // Windows doesn't support #!/bin/sh style shell scripts in .exe files. To
+ // support windows systems, we copy the llvm-stub.exe executable from the
+ // build tree to the destination file.
+ std::string ErrMsg;
+ sys::Path llvmstub = PrependMainExecutablePath("llvm-stub", argv[0],
+ (void *)(intptr_t)&Optimize);
+ if (llvmstub.isEmpty())
+ PrintAndExit("Could not find llvm-stub.exe executable!", M);
+
+ if (0 != sys::CopyFile(sys::Path(OutputFilename), llvmstub, &ErrMsg))
+ PrintAndExit(ErrMsg, M);
+
+ return;
+#else
+
+ // Output the script to start the program...
+ std::string ErrorInfo;
+ tool_output_file Out2(OutputFilename.c_str(), ErrorInfo);
+ if (!ErrorInfo.empty())
+ PrintAndExit(ErrorInfo, M);
+
+ Out2.os() << "#!/bin/sh\n";
+ // Allow user to setenv LLVMINTERP if lli is not in their PATH.
+ Out2.os() << "lli=${LLVMINTERP-lli}\n";
+ Out2.os() << "exec $lli \\\n";
+ // gcc accepts -l<lib> and implicitly searches /lib and /usr/lib.
+ LibPaths.push_back("/lib");
+ LibPaths.push_back("/usr/lib");
+ LibPaths.push_back("/usr/X11R6/lib");
+ // We don't need to link in libc! In fact, /usr/lib/libc.so may not be a
+ // shared object at all! See RH 8: plain text.
+ std::vector<std::string>::iterator libc =
+ std::find(Libraries.begin(), Libraries.end(), "c");
+ if (libc != Libraries.end()) Libraries.erase(libc);
+ // List all the shared object (native) libraries this executable will need
+ // on the command line, so that we don't have to do this manually!
+ for (std::vector<std::string>::iterator i = Libraries.begin(),
+ e = Libraries.end(); i != e; ++i) {
+ // try explicit -L arguments first:
+ sys::Path FullLibraryPath;
+ for (cl::list<std::string>::const_iterator P = LibPaths.begin(),
+ E = LibPaths.end(); P != E; ++P) {
+ FullLibraryPath = *P;
+ FullLibraryPath.appendComponent("lib" + *i);
+ FullLibraryPath.appendSuffix(sys::Path::GetDLLSuffix());
+ if (!FullLibraryPath.isEmpty()) {
+ if (!FullLibraryPath.isDynamicLibrary()) {
+ // Not a native shared library; mark as invalid
+ FullLibraryPath = sys::Path();
+ } else break;
+ }
+ }
+ if (FullLibraryPath.isEmpty())
+ FullLibraryPath = sys::Path::FindLibrary(*i);
+ if (!FullLibraryPath.isEmpty())
+ Out2.os() << " -load=" << FullLibraryPath.str() << " \\\n";
+ }
+ Out2.os() << " " << BitcodeOutputFilename << " ${1+\"$@\"}\n";
+ Out2.keep();
+#endif
+}
+
+// BuildLinkItems -- This function generates a LinkItemList for the LinkItems
+// linker function by combining the Files and Libraries in the order they were
+// declared on the command line.
+static void BuildLinkItems(
+ Linker::ItemList& Items,
+ const cl::list<std::string>& Files,
+ const cl::list<std::string>& Libraries) {
+
+ // Build the list of linkage items for LinkItems.
+
+ cl::list<std::string>::const_iterator fileIt = Files.begin();
+ cl::list<std::string>::const_iterator libIt = Libraries.begin();
+
+ int libPos = -1, filePos = -1;
+ while ( libIt != Libraries.end() || fileIt != Files.end() ) {
+ if (libIt != Libraries.end())
+ libPos = Libraries.getPosition(libIt - Libraries.begin());
+ else
+ libPos = -1;
+ if (fileIt != Files.end())
+ filePos = Files.getPosition(fileIt - Files.begin());
+ else
+ filePos = -1;
+
+ if (filePos != -1 && (libPos == -1 || filePos < libPos)) {
+ // Add a source file
+ Items.push_back(std::make_pair(*fileIt++, false));
+ } else if (libPos != -1 && (filePos == -1 || libPos < filePos)) {
+ // Add a library
+ Items.push_back(std::make_pair(*libIt++, true));
+ }
+ }
+}
+
+int main(int argc, char **argv, char **envp) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ // Initialize passes
+ PassRegistry &Registry = *PassRegistry::getPassRegistry();
+ initializeCore(Registry);
+ initializeScalarOpts(Registry);
+ initializeIPO(Registry);
+ initializeAnalysis(Registry);
+ initializeIPA(Registry);
+ initializeTransformUtils(Registry);
+ initializeInstCombine(Registry);
+ initializeTarget(Registry);
+
+ // Initial global variable above for convenience printing of program name.
+ progname = sys::path::stem(argv[0]);
+
+ // Parse the command line options
+ cl::ParseCommandLineOptions(argc, argv, "llvm linker\n");
+
+#if defined(_WIN32) || defined(__CYGWIN__)
+ if (!LinkAsLibrary) {
+ // Default to "a.exe" instead of "a.out".
+ if (OutputFilename.getNumOccurrences() == 0)
+ OutputFilename = "a.exe";
+
+ // If there is no suffix add an "exe" one.
+ if (sys::path::extension(OutputFilename).empty())
+ OutputFilename.append(".exe");
+ }
+#endif
+
+ // Generate the bitcode for the optimized module.
+ // If -b wasn't specified, use the name specified
+ // with -o to construct BitcodeOutputFilename.
+ if (BitcodeOutputFilename.empty()) {
+ BitcodeOutputFilename = OutputFilename;
+ if (!LinkAsLibrary) BitcodeOutputFilename += ".bc";
+ }
+
+ // Arrange for the bitcode output file to be deleted on any errors.
+ BitcodeOutputRemover.setFile(BitcodeOutputFilename);
+ sys::RemoveFileOnSignal(sys::Path(BitcodeOutputFilename));
+
+ // Arrange for the output file to be deleted on any errors.
+ if (!LinkAsLibrary) {
+ OutputRemover.setFile(OutputFilename);
+ sys::RemoveFileOnSignal(sys::Path(OutputFilename));
+ }
+
+ // Construct a Linker (now that Verbose is set)
+ Linker TheLinker(progname, OutputFilename, Context, Verbose);
+
+ // Keep track of the native link items (versus the bitcode items)
+ Linker::ItemList NativeLinkItems;
+
+ // Add library paths to the linker
+ TheLinker.addPaths(LibPaths);
+ TheLinker.addSystemPaths();
+
+ // Remove any consecutive duplicates of the same library...
+ Libraries.erase(std::unique(Libraries.begin(), Libraries.end()),
+ Libraries.end());
+
+ if (LinkAsLibrary) {
+ std::vector<sys::Path> Files;
+ for (unsigned i = 0; i < InputFilenames.size(); ++i )
+ Files.push_back(sys::Path(InputFilenames[i]));
+ if (TheLinker.LinkInFiles(Files))
+ return 1; // Error already printed
+
+ // The libraries aren't linked in but are noted as "dependent" in the
+ // module.
+ for (cl::list<std::string>::const_iterator I = Libraries.begin(),
+ E = Libraries.end(); I != E ; ++I) {
+ TheLinker.getModule()->addLibrary(*I);
+ }
+ } else {
+ // Build a list of the items from our command line
+ Linker::ItemList Items;
+ BuildLinkItems(Items, InputFilenames, Libraries);
+
+ // Link all the items together
+ if (TheLinker.LinkInItems(Items, NativeLinkItems) )
+ return 1; // Error already printed
+ }
+
+ std::auto_ptr<Module> Composite(TheLinker.releaseModule());
+
+ // Optimize the module
+ Optimize(Composite.get());
+
+ // Generate the bitcode output.
+ GenerateBitcode(Composite.get(), BitcodeOutputFilename);
+
+ // If we are not linking a library, generate either a native executable
+ // or a JIT shell script, depending upon what the user wants.
+ if (!LinkAsLibrary) {
+ // If the user wants to run a post-link optimization, run it now.
+ if (!PostLinkOpts.empty()) {
+ std::vector<std::string> opts = PostLinkOpts;
+ for (std::vector<std::string>::iterator I = opts.begin(),
+ E = opts.end(); I != E; ++I) {
+ sys::Path prog(*I);
+ if (!prog.canExecute()) {
+ prog = sys::Program::FindProgramByName(*I);
+ if (prog.isEmpty())
+ PrintAndExit(std::string("Optimization program '") + *I +
+ "' is not found or not executable.", Composite.get());
+ }
+ // Get the program arguments
+ sys::Path tmp_output("opt_result");
+ std::string ErrMsg;
+ if (tmp_output.createTemporaryFileOnDisk(true, &ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get());
+
+ const char* args[4];
+ args[0] = I->c_str();
+ args[1] = BitcodeOutputFilename.c_str();
+ args[2] = tmp_output.c_str();
+ args[3] = 0;
+ if (0 == sys::Program::ExecuteAndWait(prog, args, 0,0,0,0, &ErrMsg)) {
+ if (tmp_output.isBitcodeFile()) {
+ sys::Path target(BitcodeOutputFilename);
+ target.eraseFromDisk();
+ if (tmp_output.renamePathOnDisk(target, &ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get(), 2);
+ } else
+ PrintAndExit("Post-link optimization output is not bitcode",
+ Composite.get());
+ } else {
+ PrintAndExit(ErrMsg, Composite.get());
+ }
+ }
+ }
+
+ // If the user wants to generate a native executable, compile it from the
+ // bitcode file.
+ //
+ // Otherwise, create a script that will run the bitcode through the JIT.
+ if (Native) {
+ // Name of the Assembly Language output file
+ sys::Path AssemblyFile ( OutputFilename);
+ AssemblyFile.appendSuffix("s");
+
+ // Mark the output files for removal.
+ FileRemover AssemblyFileRemover(AssemblyFile.str());
+ sys::RemoveFileOnSignal(AssemblyFile);
+
+ // Determine the locations of the llc and gcc programs.
+ sys::Path llc = PrependMainExecutablePath("llc", argv[0],
+ (void *)(intptr_t)&Optimize);
+ if (llc.isEmpty())
+ PrintAndExit("Failed to find llc", Composite.get());
+
+ sys::Path gcc = sys::Program::FindProgramByName("gcc");
+ if (gcc.isEmpty())
+ PrintAndExit("Failed to find gcc", Composite.get());
+
+ // Generate an assembly language file for the bitcode.
+ std::string ErrMsg;
+ if (0 != GenerateAssembly(AssemblyFile.str(), BitcodeOutputFilename,
+ llc, ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get());
+
+ if (0 != GenerateNative(OutputFilename, AssemblyFile.str(),
+ NativeLinkItems, gcc, envp, ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get());
+ } else if (NativeCBE) {
+ sys::Path CFile (OutputFilename);
+ CFile.appendSuffix("cbe.c");
+
+ // Mark the output files for removal.
+ FileRemover CFileRemover(CFile.str());
+ sys::RemoveFileOnSignal(CFile);
+
+ // Determine the locations of the llc and gcc programs.
+ sys::Path llc = PrependMainExecutablePath("llc", argv[0],
+ (void *)(intptr_t)&Optimize);
+ if (llc.isEmpty())
+ PrintAndExit("Failed to find llc", Composite.get());
+
+ sys::Path gcc = sys::Program::FindProgramByName("gcc");
+ if (gcc.isEmpty())
+ PrintAndExit("Failed to find gcc", Composite.get());
+
+ // Generate an assembly language file for the bitcode.
+ std::string ErrMsg;
+ if (GenerateCFile(CFile.str(), BitcodeOutputFilename, llc, ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get());
+
+ if (GenerateNative(OutputFilename, CFile.str(),
+ NativeLinkItems, gcc, envp, ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get());
+ } else {
+ EmitShellScript(argv, Composite.get());
+ }
+
+ // Make the script executable...
+ std::string ErrMsg;
+ if (sys::Path(OutputFilename).makeExecutableOnDisk(&ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get());
+
+ // Make the bitcode file readable and directly executable in LLEE as well
+ if (sys::Path(BitcodeOutputFilename).makeExecutableOnDisk(&ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get());
+
+ if (sys::Path(BitcodeOutputFilename).makeReadableOnDisk(&ErrMsg))
+ PrintAndExit(ErrMsg, Composite.get());
+ }
+
+ // Operations which may fail are now complete.
+ BitcodeOutputRemover.releaseFile();
+ if (!LinkAsLibrary)
+ OutputRemover.releaseFile();
+
+ // Graceful exit
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-link/llvm-link.cpp b/contrib/llvm/tools/llvm-link/llvm-link.cpp
new file mode 100644
index 0000000..378a833
--- /dev/null
+++ b/contrib/llvm/tools/llvm-link/llvm-link.cpp
@@ -0,0 +1,142 @@
+//===- llvm-link.cpp - Low-level LLVM linker ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This utility may be invoked in the following manner:
+// llvm-link a.bc b.bc c.bc -o x.bc
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Linker.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/Path.h"
+#include <memory>
+using namespace llvm;
+
+static cl::list<std::string>
+InputFilenames(cl::Positional, cl::OneOrMore,
+ cl::desc("<input bitcode files>"));
+
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Override output filename"), cl::init("-"),
+ cl::value_desc("filename"));
+
+static cl::opt<bool>
+Force("f", cl::desc("Enable binary output on terminals"));
+
+static cl::opt<bool>
+OutputAssembly("S",
+ cl::desc("Write output as LLVM assembly"), cl::Hidden);
+
+static cl::opt<bool>
+Verbose("v", cl::desc("Print information about actions taken"));
+
+static cl::opt<bool>
+DumpAsm("d", cl::desc("Print assembly as linked"), cl::Hidden);
+
+// LoadFile - Read the specified bitcode file in and return it. This routine
+// searches the link path for the specified file to try to find it...
+//
+static inline std::auto_ptr<Module> LoadFile(const char *argv0,
+ const std::string &FN,
+ LLVMContext& Context) {
+ sys::Path Filename;
+ if (!Filename.set(FN)) {
+ errs() << "Invalid file name: '" << FN << "'\n";
+ return std::auto_ptr<Module>();
+ }
+
+ SMDiagnostic Err;
+ if (Verbose) errs() << "Loading '" << Filename.c_str() << "'\n";
+ Module* Result = 0;
+
+ const std::string &FNStr = Filename.str();
+ Result = ParseIRFile(FNStr, Err, Context);
+ if (Result) return std::auto_ptr<Module>(Result); // Load successful!
+
+ Err.print(argv0, errs());
+ return std::auto_ptr<Module>();
+}
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+ cl::ParseCommandLineOptions(argc, argv, "llvm linker\n");
+
+ unsigned BaseArg = 0;
+ std::string ErrorMessage;
+
+ std::auto_ptr<Module> Composite(LoadFile(argv[0],
+ InputFilenames[BaseArg], Context));
+ if (Composite.get() == 0) {
+ errs() << argv[0] << ": error loading file '"
+ << InputFilenames[BaseArg] << "'\n";
+ return 1;
+ }
+
+ for (unsigned i = BaseArg+1; i < InputFilenames.size(); ++i) {
+ std::auto_ptr<Module> M(LoadFile(argv[0],
+ InputFilenames[i], Context));
+ if (M.get() == 0) {
+ errs() << argv[0] << ": error loading file '" <<InputFilenames[i]<< "'\n";
+ return 1;
+ }
+
+ if (Verbose) errs() << "Linking in '" << InputFilenames[i] << "'\n";
+
+ if (Linker::LinkModules(Composite.get(), M.get(), Linker::DestroySource,
+ &ErrorMessage)) {
+ errs() << argv[0] << ": link error in '" << InputFilenames[i]
+ << "': " << ErrorMessage << "\n";
+ return 1;
+ }
+ }
+
+ // TODO: Iterate over the -l list and link in any modules containing
+ // global symbols that have not been resolved so far.
+
+ if (DumpAsm) errs() << "Here's the assembly:\n" << *Composite;
+
+ std::string ErrorInfo;
+ tool_output_file Out(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary);
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ return 1;
+ }
+
+ if (verifyModule(*Composite)) {
+ errs() << argv[0] << ": linked module is broken!\n";
+ return 1;
+ }
+
+ if (Verbose) errs() << "Writing bitcode...\n";
+ if (OutputAssembly) {
+ Out.os() << *Composite;
+ } else if (Force || !CheckBitcodeOutputToConsole(Out.os(), true))
+ WriteBitcodeToFile(Composite.get(), Out.os());
+
+ // Declare success.
+ Out.keep();
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-mc/Disassembler.cpp b/contrib/llvm/tools/llvm-mc/Disassembler.cpp
new file mode 100644
index 0000000..a8cd7c1
--- /dev/null
+++ b/contrib/llvm/tools/llvm-mc/Disassembler.cpp
@@ -0,0 +1,382 @@
+//===- Disassembler.cpp - Disassembler for hex strings --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements the disassembler of strings of bytes written in
+// hexadecimal, from standard input or from a file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "Disassembler.h"
+#include "../../lib/MC/MCDisassembler/EDDisassembler.h"
+#include "../../lib/MC/MCDisassembler/EDInst.h"
+#include "../../lib/MC/MCDisassembler/EDOperand.h"
+#include "../../lib/MC/MCDisassembler/EDToken.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+typedef std::vector<std::pair<unsigned char, const char*> > ByteArrayTy;
+
+namespace {
+class VectorMemoryObject : public MemoryObject {
+private:
+ const ByteArrayTy &Bytes;
+public:
+ VectorMemoryObject(const ByteArrayTy &bytes) : Bytes(bytes) {}
+
+ uint64_t getBase() const { return 0; }
+ uint64_t getExtent() const { return Bytes.size(); }
+
+ int readByte(uint64_t Addr, uint8_t *Byte) const {
+ if (Addr >= getExtent())
+ return -1;
+ *Byte = Bytes[Addr].first;
+ return 0;
+ }
+};
+}
+
+static bool PrintInsts(const MCDisassembler &DisAsm,
+ MCInstPrinter &Printer, const ByteArrayTy &Bytes,
+ SourceMgr &SM, raw_ostream &Out) {
+ // Wrap the vector in a MemoryObject.
+ VectorMemoryObject memoryObject(Bytes);
+
+ // Disassemble it to strings.
+ uint64_t Size;
+ uint64_t Index;
+
+ for (Index = 0; Index < Bytes.size(); Index += Size) {
+ MCInst Inst;
+
+ MCDisassembler::DecodeStatus S;
+ S = DisAsm.getInstruction(Inst, Size, memoryObject, Index,
+ /*REMOVE*/ nulls(), nulls());
+ switch (S) {
+ case MCDisassembler::Fail:
+ SM.PrintMessage(SMLoc::getFromPointer(Bytes[Index].second),
+ SourceMgr::DK_Warning,
+ "invalid instruction encoding");
+ if (Size == 0)
+ Size = 1; // skip illegible bytes
+ break;
+
+ case MCDisassembler::SoftFail:
+ SM.PrintMessage(SMLoc::getFromPointer(Bytes[Index].second),
+ SourceMgr::DK_Warning,
+ "potentially undefined instruction encoding");
+ // Fall through
+
+ case MCDisassembler::Success:
+ Printer.printInst(&Inst, Out, "");
+ Out << "\n";
+ break;
+ }
+ }
+
+ return false;
+}
+
+static bool ByteArrayFromString(ByteArrayTy &ByteArray,
+ StringRef &Str,
+ SourceMgr &SM) {
+ while (!Str.empty()) {
+ // Strip horizontal whitespace.
+ if (size_t Pos = Str.find_first_not_of(" \t\r")) {
+ Str = Str.substr(Pos);
+ continue;
+ }
+
+ // If this is the end of a line or start of a comment, remove the rest of
+ // the line.
+ if (Str[0] == '\n' || Str[0] == '#') {
+ // Strip to the end of line if we already processed any bytes on this
+ // line. This strips the comment and/or the \n.
+ if (Str[0] == '\n') {
+ Str = Str.substr(1);
+ } else {
+ Str = Str.substr(Str.find_first_of('\n'));
+ if (!Str.empty())
+ Str = Str.substr(1);
+ }
+ continue;
+ }
+
+ // Get the current token.
+ size_t Next = Str.find_first_of(" \t\n\r#");
+ StringRef Value = Str.substr(0, Next);
+
+ // Convert to a byte and add to the byte vector.
+ unsigned ByteVal;
+ if (Value.getAsInteger(0, ByteVal) || ByteVal > 255) {
+ // If we have an error, print it and skip to the end of line.
+ SM.PrintMessage(SMLoc::getFromPointer(Value.data()), SourceMgr::DK_Error,
+ "invalid input token");
+ Str = Str.substr(Str.find('\n'));
+ ByteArray.clear();
+ continue;
+ }
+
+ ByteArray.push_back(std::make_pair((unsigned char)ByteVal, Value.data()));
+ Str = Str.substr(Next);
+ }
+
+ return false;
+}
+
+int Disassembler::disassemble(const Target &T,
+ const std::string &Triple,
+ const std::string &Cpu,
+ const std::string &FeaturesStr,
+ MemoryBuffer &Buffer,
+ raw_ostream &Out) {
+ // Set up disassembler.
+ OwningPtr<const MCAsmInfo> AsmInfo(T.createMCAsmInfo(Triple));
+
+ if (!AsmInfo) {
+ errs() << "error: no assembly info for target " << Triple << "\n";
+ return -1;
+ }
+
+ OwningPtr<const MCSubtargetInfo> STI(T.createMCSubtargetInfo(Triple, Cpu,
+ FeaturesStr));
+ if (!STI) {
+ errs() << "error: no subtarget info for target " << Triple << "\n";
+ return -1;
+ }
+
+ OwningPtr<const MCDisassembler> DisAsm(T.createMCDisassembler(*STI));
+ if (!DisAsm) {
+ errs() << "error: no disassembler for target " << Triple << "\n";
+ return -1;
+ }
+
+ OwningPtr<const MCRegisterInfo> MRI(T.createMCRegInfo(Triple));
+ if (!MRI) {
+ errs() << "error: no register info for target " << Triple << "\n";
+ return -1;
+ }
+
+ OwningPtr<const MCInstrInfo> MII(T.createMCInstrInfo());
+ if (!MII) {
+ errs() << "error: no instruction info for target " << Triple << "\n";
+ return -1;
+ }
+
+ int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
+ OwningPtr<MCInstPrinter> IP(T.createMCInstPrinter(AsmPrinterVariant, *AsmInfo,
+ *MII, *MRI, *STI));
+ if (!IP) {
+ errs() << "error: no instruction printer for target " << Triple << '\n';
+ return -1;
+ }
+
+ bool ErrorOccurred = false;
+
+ SourceMgr SM;
+ SM.AddNewSourceBuffer(&Buffer, SMLoc());
+
+ // Convert the input to a vector for disassembly.
+ ByteArrayTy ByteArray;
+ StringRef Str = Buffer.getBuffer();
+
+ ErrorOccurred |= ByteArrayFromString(ByteArray, Str, SM);
+
+ if (!ByteArray.empty())
+ ErrorOccurred |= PrintInsts(*DisAsm, *IP, ByteArray, SM, Out);
+
+ return ErrorOccurred;
+}
+
+static int byteArrayReader(uint8_t *B, uint64_t A, void *Arg) {
+ ByteArrayTy &ByteArray = *((ByteArrayTy*)Arg);
+
+ if (A >= ByteArray.size())
+ return -1;
+
+ *B = ByteArray[A].first;
+
+ return 0;
+}
+
+static int verboseEvaluator(uint64_t *V, unsigned R, void *Arg) {
+ EDDisassembler &disassembler = *(EDDisassembler *)((void **)Arg)[0];
+ raw_ostream &Out = *(raw_ostream *)((void **)Arg)[1];
+
+ if (const char *regName = disassembler.nameWithRegisterID(R))
+ Out << "[" << regName << "/" << R << "]";
+
+ if (disassembler.registerIsStackPointer(R))
+ Out << "(sp)";
+ if (disassembler.registerIsProgramCounter(R))
+ Out << "(pc)";
+
+ *V = 0;
+ return 0;
+}
+
+int Disassembler::disassembleEnhanced(const std::string &TS,
+ MemoryBuffer &Buffer,
+ raw_ostream &Out) {
+ ByteArrayTy ByteArray;
+ StringRef Str = Buffer.getBuffer();
+ SourceMgr SM;
+
+ SM.AddNewSourceBuffer(&Buffer, SMLoc());
+
+ if (ByteArrayFromString(ByteArray, Str, SM)) {
+ return -1;
+ }
+
+ Triple T(TS);
+ EDDisassembler::AssemblySyntax AS;
+
+ switch (T.getArch()) {
+ default:
+ errs() << "error: no default assembly syntax for " << TS.c_str() << "\n";
+ return -1;
+ case Triple::arm:
+ case Triple::thumb:
+ AS = EDDisassembler::kEDAssemblySyntaxARMUAL;
+ break;
+ case Triple::x86:
+ case Triple::x86_64:
+ AS = EDDisassembler::kEDAssemblySyntaxX86ATT;
+ break;
+ }
+
+ OwningPtr<EDDisassembler>
+ disassembler(EDDisassembler::getDisassembler(TS.c_str(), AS));
+
+ if (disassembler == 0) {
+ errs() << "error: couldn't get disassembler for " << TS << '\n';
+ return -1;
+ }
+
+ while (ByteArray.size()) {
+ OwningPtr<EDInst>
+ inst(disassembler->createInst(byteArrayReader, 0, &ByteArray));
+
+ if (inst == 0) {
+ errs() << "error: Didn't get an instruction\n";
+ return -1;
+ }
+
+ ByteArray.erase (ByteArray.begin(), ByteArray.begin() + inst->byteSize());
+
+ unsigned numTokens = inst->numTokens();
+ if ((int)numTokens < 0) {
+ errs() << "error: couldn't count the instruction's tokens\n";
+ return -1;
+ }
+
+ for (unsigned tokenIndex = 0; tokenIndex != numTokens; ++tokenIndex) {
+ EDToken *token;
+
+ if (inst->getToken(token, tokenIndex)) {
+ errs() << "error: Couldn't get token\n";
+ return -1;
+ }
+
+ const char *buf;
+ if (token->getString(buf)) {
+ errs() << "error: Couldn't get string for token\n";
+ return -1;
+ }
+
+ Out << '[';
+ int operandIndex = token->operandID();
+
+ if (operandIndex >= 0)
+ Out << operandIndex << "-";
+
+ switch (token->type()) {
+ case EDToken::kTokenWhitespace: Out << "w"; break;
+ case EDToken::kTokenPunctuation: Out << "p"; break;
+ case EDToken::kTokenOpcode: Out << "o"; break;
+ case EDToken::kTokenLiteral: Out << "l"; break;
+ case EDToken::kTokenRegister: Out << "r"; break;
+ }
+
+ Out << ":" << buf;
+
+ if (token->type() == EDToken::kTokenLiteral) {
+ Out << "=";
+ if (token->literalSign())
+ Out << "-";
+ uint64_t absoluteValue;
+ if (token->literalAbsoluteValue(absoluteValue)) {
+ errs() << "error: Couldn't get the value of a literal token\n";
+ return -1;
+ }
+ Out << absoluteValue;
+ } else if (token->type() == EDToken::kTokenRegister) {
+ Out << "=";
+ unsigned regID;
+ if (token->registerID(regID)) {
+ errs() << "error: Couldn't get the ID of a register token\n";
+ return -1;
+ }
+ Out << "r" << regID;
+ }
+
+ Out << "]";
+ }
+
+ Out << " ";
+
+ if (inst->isBranch())
+ Out << "<br> ";
+ if (inst->isMove())
+ Out << "<mov> ";
+
+ unsigned numOperands = inst->numOperands();
+
+ if ((int)numOperands < 0) {
+ errs() << "error: Couldn't count operands\n";
+ return -1;
+ }
+
+ for (unsigned operandIndex = 0; operandIndex != numOperands;
+ ++operandIndex) {
+ Out << operandIndex << ":";
+
+ EDOperand *operand;
+ if (inst->getOperand(operand, operandIndex)) {
+ errs() << "error: couldn't get operand\n";
+ return -1;
+ }
+
+ uint64_t evaluatedResult;
+ void *Arg[] = { disassembler.get(), &Out };
+ if (operand->evaluate(evaluatedResult, verboseEvaluator, Arg)) {
+ errs() << "error: Couldn't evaluate an operand\n";
+ return -1;
+ }
+ Out << "=" << evaluatedResult << " ";
+ }
+
+ Out << '\n';
+ }
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-mc/Disassembler.h b/contrib/llvm/tools/llvm-mc/Disassembler.h
new file mode 100644
index 0000000..e8cd92d
--- /dev/null
+++ b/contrib/llvm/tools/llvm-mc/Disassembler.h
@@ -0,0 +1,42 @@
+//===- Disassembler.h - Text File Disassembler ----------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This class implements the disassembler of strings of bytes written in
+// hexadecimal, from standard input or from a file.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef DISASSEMBLER_H
+#define DISASSEMBLER_H
+
+#include <string>
+
+namespace llvm {
+
+class MemoryBuffer;
+class Target;
+class raw_ostream;
+
+class Disassembler {
+public:
+ static int disassemble(const Target &target,
+ const std::string &tripleString,
+ const std::string &Cpu,
+ const std::string &FeaturesStr,
+ MemoryBuffer &buffer,
+ raw_ostream &Out);
+
+ static int disassembleEnhanced(const std::string &tripleString,
+ MemoryBuffer &buffer,
+ raw_ostream &Out);
+};
+
+} // namespace llvm
+
+#endif
diff --git a/contrib/llvm/tools/llvm-mc/llvm-mc.cpp b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
new file mode 100644
index 0000000..d882e01
--- /dev/null
+++ b/contrib/llvm/tools/llvm-mc/llvm-mc.cpp
@@ -0,0 +1,532 @@
+//===-- llvm-mc.cpp - Machine Code Hacking Driver -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This utility is a simple driver that allows command line hacking on machine
+// code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/MC/MCParser/AsmLexer.h"
+#include "llvm/MC/MCParser/MCAsmLexer.h"
+#include "llvm/MC/MCAsmBackend.h"
+#include "llvm/MC/MCContext.h"
+#include "llvm/MC/MCCodeEmitter.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCObjectFileInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSectionMachO.h"
+#include "llvm/MC/MCStreamer.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/MC/MCTargetAsmParser.h"
+#include "llvm/MC/SubtargetFeature.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileUtilities.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/system_error.h"
+#include "Disassembler.h"
+using namespace llvm;
+
+static cl::opt<std::string>
+InputFilename(cl::Positional, cl::desc("<input file>"), cl::init("-"));
+
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Output filename"),
+ cl::value_desc("filename"));
+
+static cl::opt<bool>
+ShowEncoding("show-encoding", cl::desc("Show instruction encodings"));
+
+static cl::opt<bool>
+ShowInst("show-inst", cl::desc("Show internal instruction representation"));
+
+static cl::opt<bool>
+ShowInstOperands("show-inst-operands",
+ cl::desc("Show instructions operands as parsed"));
+
+static cl::opt<unsigned>
+OutputAsmVariant("output-asm-variant",
+ cl::desc("Syntax variant to use for output printing"));
+
+static cl::opt<bool>
+RelaxAll("mc-relax-all", cl::desc("Relax all fixups"));
+
+static cl::opt<bool>
+NoExecStack("mc-no-exec-stack", cl::desc("File doesn't need an exec stack"));
+
+enum OutputFileType {
+ OFT_Null,
+ OFT_AssemblyFile,
+ OFT_ObjectFile
+};
+static cl::opt<OutputFileType>
+FileType("filetype", cl::init(OFT_AssemblyFile),
+ cl::desc("Choose an output file type:"),
+ cl::values(
+ clEnumValN(OFT_AssemblyFile, "asm",
+ "Emit an assembly ('.s') file"),
+ clEnumValN(OFT_Null, "null",
+ "Don't emit anything (for timing purposes)"),
+ clEnumValN(OFT_ObjectFile, "obj",
+ "Emit a native object ('.o') file"),
+ clEnumValEnd));
+
+static cl::list<std::string>
+IncludeDirs("I", cl::desc("Directory of include files"),
+ cl::value_desc("directory"), cl::Prefix);
+
+static cl::opt<std::string>
+ArchName("arch", cl::desc("Target arch to assemble for, "
+ "see -version for available targets"));
+
+static cl::opt<std::string>
+TripleName("triple", cl::desc("Target triple to assemble for, "
+ "see -version for available targets"));
+
+static cl::opt<std::string>
+MCPU("mcpu",
+ cl::desc("Target a specific cpu type (-mcpu=help for details)"),
+ cl::value_desc("cpu-name"),
+ cl::init(""));
+
+static cl::list<std::string>
+MAttrs("mattr",
+ cl::CommaSeparated,
+ cl::desc("Target specific attributes (-mattr=help for details)"),
+ cl::value_desc("a1,+a2,-a3,..."));
+
+static cl::opt<Reloc::Model>
+RelocModel("relocation-model",
+ cl::desc("Choose relocation model"),
+ cl::init(Reloc::Default),
+ cl::values(
+ clEnumValN(Reloc::Default, "default",
+ "Target default relocation model"),
+ clEnumValN(Reloc::Static, "static",
+ "Non-relocatable code"),
+ clEnumValN(Reloc::PIC_, "pic",
+ "Fully relocatable, position independent code"),
+ clEnumValN(Reloc::DynamicNoPIC, "dynamic-no-pic",
+ "Relocatable external references, non-relocatable code"),
+ clEnumValEnd));
+
+static cl::opt<llvm::CodeModel::Model>
+CMModel("code-model",
+ cl::desc("Choose code model"),
+ cl::init(CodeModel::Default),
+ cl::values(clEnumValN(CodeModel::Default, "default",
+ "Target default code model"),
+ clEnumValN(CodeModel::Small, "small",
+ "Small code model"),
+ clEnumValN(CodeModel::Kernel, "kernel",
+ "Kernel code model"),
+ clEnumValN(CodeModel::Medium, "medium",
+ "Medium code model"),
+ clEnumValN(CodeModel::Large, "large",
+ "Large code model"),
+ clEnumValEnd));
+
+static cl::opt<bool>
+NoInitialTextSection("n", cl::desc("Don't assume assembly file starts "
+ "in the text section"));
+
+static cl::opt<bool>
+SaveTempLabels("L", cl::desc("Don't discard temporary labels"));
+
+static cl::opt<bool>
+GenDwarfForAssembly("g", cl::desc("Generate dwarf debugging info for assembly "
+ "source files"));
+
+enum ActionType {
+ AC_AsLex,
+ AC_Assemble,
+ AC_Disassemble,
+ AC_EDisassemble
+};
+
+static cl::opt<ActionType>
+Action(cl::desc("Action to perform:"),
+ cl::init(AC_Assemble),
+ cl::values(clEnumValN(AC_AsLex, "as-lex",
+ "Lex tokens from a .s file"),
+ clEnumValN(AC_Assemble, "assemble",
+ "Assemble a .s file (default)"),
+ clEnumValN(AC_Disassemble, "disassemble",
+ "Disassemble strings of hex bytes"),
+ clEnumValN(AC_EDisassemble, "edis",
+ "Enhanced disassembly of strings of hex bytes"),
+ clEnumValEnd));
+
+static const Target *GetTarget(const char *ProgName) {
+ // Figure out the target triple.
+ if (TripleName.empty())
+ TripleName = sys::getDefaultTargetTriple();
+ Triple TheTriple(Triple::normalize(TripleName));
+
+ const Target *TheTarget = 0;
+ if (!ArchName.empty()) {
+ for (TargetRegistry::iterator it = TargetRegistry::begin(),
+ ie = TargetRegistry::end(); it != ie; ++it) {
+ if (ArchName == it->getName()) {
+ TheTarget = &*it;
+ break;
+ }
+ }
+
+ if (!TheTarget) {
+ errs() << ProgName << ": error: invalid target '" << ArchName << "'.\n";
+ return 0;
+ }
+
+ // Adjust the triple to match (if known), otherwise stick with the
+ // module/host triple.
+ Triple::ArchType Type = Triple::getArchTypeForLLVMName(ArchName);
+ if (Type != Triple::UnknownArch)
+ TheTriple.setArch(Type);
+ } else {
+ // Get the target specific parser.
+ std::string Error;
+ TheTarget = TargetRegistry::lookupTarget(TheTriple.getTriple(), Error);
+ if (TheTarget == 0) {
+ errs() << ProgName << ": error: unable to get target for '"
+ << TheTriple.getTriple()
+ << "', see --version and --triple.\n";
+ return 0;
+ }
+ }
+
+ TripleName = TheTriple.getTriple();
+ return TheTarget;
+}
+
+static tool_output_file *GetOutputStream() {
+ if (OutputFilename == "")
+ OutputFilename = "-";
+
+ std::string Err;
+ tool_output_file *Out = new tool_output_file(OutputFilename.c_str(), Err,
+ raw_fd_ostream::F_Binary);
+ if (!Err.empty()) {
+ errs() << Err << '\n';
+ delete Out;
+ return 0;
+ }
+
+ return Out;
+}
+
+static std::string DwarfDebugFlags;
+static void setDwarfDebugFlags(int argc, char **argv) {
+ if (!getenv("RC_DEBUG_OPTIONS"))
+ return;
+ for (int i = 0; i < argc; i++) {
+ DwarfDebugFlags += argv[i];
+ if (i + 1 < argc)
+ DwarfDebugFlags += " ";
+ }
+}
+
+static int AsLexInput(const char *ProgName) {
+ OwningPtr<MemoryBuffer> BufferPtr;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFilename, BufferPtr)) {
+ errs() << ProgName << ": " << ec.message() << '\n';
+ return 1;
+ }
+ MemoryBuffer *Buffer = BufferPtr.take();
+
+ SourceMgr SrcMgr;
+
+ // Tell SrcMgr about this buffer, which is what TGParser will pick up.
+ SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
+
+ // Record the location of the include directories so that the lexer can find
+ // it later.
+ SrcMgr.setIncludeDirs(IncludeDirs);
+
+ const Target *TheTarget = GetTarget(ProgName);
+ if (!TheTarget)
+ return 1;
+
+ llvm::OwningPtr<MCAsmInfo> MAI(TheTarget->createMCAsmInfo(TripleName));
+ assert(MAI && "Unable to create target asm info!");
+
+ AsmLexer Lexer(*MAI);
+ Lexer.setBuffer(SrcMgr.getMemoryBuffer(0));
+
+ OwningPtr<tool_output_file> Out(GetOutputStream());
+ if (!Out)
+ return 1;
+
+ bool Error = false;
+ while (Lexer.Lex().isNot(AsmToken::Eof)) {
+ AsmToken Tok = Lexer.getTok();
+
+ switch (Tok.getKind()) {
+ default:
+ SrcMgr.PrintMessage(Lexer.getLoc(), SourceMgr::DK_Warning,
+ "unknown token");
+ Error = true;
+ break;
+ case AsmToken::Error:
+ Error = true; // error already printed.
+ break;
+ case AsmToken::Identifier:
+ Out->os() << "identifier: " << Lexer.getTok().getString();
+ break;
+ case AsmToken::Integer:
+ Out->os() << "int: " << Lexer.getTok().getString();
+ break;
+ case AsmToken::Real:
+ Out->os() << "real: " << Lexer.getTok().getString();
+ break;
+ case AsmToken::Register:
+ Out->os() << "register: " << Lexer.getTok().getRegVal();
+ break;
+ case AsmToken::String:
+ Out->os() << "string: " << Lexer.getTok().getString();
+ break;
+
+ case AsmToken::Amp: Out->os() << "Amp"; break;
+ case AsmToken::AmpAmp: Out->os() << "AmpAmp"; break;
+ case AsmToken::At: Out->os() << "At"; break;
+ case AsmToken::Caret: Out->os() << "Caret"; break;
+ case AsmToken::Colon: Out->os() << "Colon"; break;
+ case AsmToken::Comma: Out->os() << "Comma"; break;
+ case AsmToken::Dollar: Out->os() << "Dollar"; break;
+ case AsmToken::Dot: Out->os() << "Dot"; break;
+ case AsmToken::EndOfStatement: Out->os() << "EndOfStatement"; break;
+ case AsmToken::Eof: Out->os() << "Eof"; break;
+ case AsmToken::Equal: Out->os() << "Equal"; break;
+ case AsmToken::EqualEqual: Out->os() << "EqualEqual"; break;
+ case AsmToken::Exclaim: Out->os() << "Exclaim"; break;
+ case AsmToken::ExclaimEqual: Out->os() << "ExclaimEqual"; break;
+ case AsmToken::Greater: Out->os() << "Greater"; break;
+ case AsmToken::GreaterEqual: Out->os() << "GreaterEqual"; break;
+ case AsmToken::GreaterGreater: Out->os() << "GreaterGreater"; break;
+ case AsmToken::Hash: Out->os() << "Hash"; break;
+ case AsmToken::LBrac: Out->os() << "LBrac"; break;
+ case AsmToken::LCurly: Out->os() << "LCurly"; break;
+ case AsmToken::LParen: Out->os() << "LParen"; break;
+ case AsmToken::Less: Out->os() << "Less"; break;
+ case AsmToken::LessEqual: Out->os() << "LessEqual"; break;
+ case AsmToken::LessGreater: Out->os() << "LessGreater"; break;
+ case AsmToken::LessLess: Out->os() << "LessLess"; break;
+ case AsmToken::Minus: Out->os() << "Minus"; break;
+ case AsmToken::Percent: Out->os() << "Percent"; break;
+ case AsmToken::Pipe: Out->os() << "Pipe"; break;
+ case AsmToken::PipePipe: Out->os() << "PipePipe"; break;
+ case AsmToken::Plus: Out->os() << "Plus"; break;
+ case AsmToken::RBrac: Out->os() << "RBrac"; break;
+ case AsmToken::RCurly: Out->os() << "RCurly"; break;
+ case AsmToken::RParen: Out->os() << "RParen"; break;
+ case AsmToken::Slash: Out->os() << "Slash"; break;
+ case AsmToken::Star: Out->os() << "Star"; break;
+ case AsmToken::Tilde: Out->os() << "Tilde"; break;
+ }
+
+ // Print the token string.
+ Out->os() << " (\"";
+ Out->os().write_escaped(Tok.getString());
+ Out->os() << "\")\n";
+ }
+
+ // Keep output if no errors.
+ if (Error == 0) Out->keep();
+
+ return Error;
+}
+
+static int AssembleInput(const char *ProgName) {
+ const Target *TheTarget = GetTarget(ProgName);
+ if (!TheTarget)
+ return 1;
+
+ OwningPtr<MemoryBuffer> BufferPtr;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFilename, BufferPtr)) {
+ errs() << ProgName << ": " << ec.message() << '\n';
+ return 1;
+ }
+ MemoryBuffer *Buffer = BufferPtr.take();
+
+ SourceMgr SrcMgr;
+
+ // Tell SrcMgr about this buffer, which is what the parser will pick up.
+ SrcMgr.AddNewSourceBuffer(Buffer, SMLoc());
+
+ // Record the location of the include directories so that the lexer can find
+ // it later.
+ SrcMgr.setIncludeDirs(IncludeDirs);
+
+
+ llvm::OwningPtr<MCAsmInfo> MAI(TheTarget->createMCAsmInfo(TripleName));
+ assert(MAI && "Unable to create target asm info!");
+
+ llvm::OwningPtr<MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TripleName));
+ assert(MRI && "Unable to create target register info!");
+
+ // FIXME: This is not pretty. MCContext has a ptr to MCObjectFileInfo and
+ // MCObjectFileInfo needs a MCContext reference in order to initialize itself.
+ OwningPtr<MCObjectFileInfo> MOFI(new MCObjectFileInfo());
+ MCContext Ctx(*MAI, *MRI, MOFI.get(), &SrcMgr);
+ MOFI->InitMCObjectFileInfo(TripleName, RelocModel, CMModel, Ctx);
+
+ if (SaveTempLabels)
+ Ctx.setAllowTemporaryLabels(false);
+
+ Ctx.setGenDwarfForAssembly(GenDwarfForAssembly);
+ if (!DwarfDebugFlags.empty())
+ Ctx.setDwarfDebugFlags(StringRef(DwarfDebugFlags));
+
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (MAttrs.size()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ OwningPtr<tool_output_file> Out(GetOutputStream());
+ if (!Out)
+ return 1;
+
+ formatted_raw_ostream FOS(Out->os());
+ OwningPtr<MCStreamer> Str;
+
+ OwningPtr<MCInstrInfo> MCII(TheTarget->createMCInstrInfo());
+ OwningPtr<MCSubtargetInfo>
+ STI(TheTarget->createMCSubtargetInfo(TripleName, MCPU, FeaturesStr));
+
+ // FIXME: There is a bit of code duplication with addPassesToEmitFile.
+ if (FileType == OFT_AssemblyFile) {
+ MCInstPrinter *IP =
+ TheTarget->createMCInstPrinter(OutputAsmVariant, *MAI, *MCII, *MRI, *STI);
+ MCCodeEmitter *CE = 0;
+ MCAsmBackend *MAB = 0;
+ if (ShowEncoding) {
+ CE = TheTarget->createMCCodeEmitter(*MCII, *STI, Ctx);
+ MAB = TheTarget->createMCAsmBackend(TripleName);
+ }
+ Str.reset(TheTarget->createAsmStreamer(Ctx, FOS, /*asmverbose*/true,
+ /*useLoc*/ true,
+ /*useCFI*/ true,
+ /*useDwarfDirectory*/ true,
+ IP, CE, MAB, ShowInst));
+
+ } else if (FileType == OFT_Null) {
+ Str.reset(createNullStreamer(Ctx));
+ } else {
+ assert(FileType == OFT_ObjectFile && "Invalid file type!");
+ MCCodeEmitter *CE = TheTarget->createMCCodeEmitter(*MCII, *STI, Ctx);
+ MCAsmBackend *MAB = TheTarget->createMCAsmBackend(TripleName);
+ Str.reset(TheTarget->createMCObjectStreamer(TripleName, Ctx, *MAB,
+ FOS, CE, RelaxAll,
+ NoExecStack));
+ }
+
+ OwningPtr<MCAsmParser> Parser(createMCAsmParser(SrcMgr, Ctx,
+ *Str.get(), *MAI));
+ OwningPtr<MCTargetAsmParser> TAP(TheTarget->createMCAsmParser(*STI, *Parser));
+ if (!TAP) {
+ errs() << ProgName
+ << ": error: this target does not support assembly parsing.\n";
+ return 1;
+ }
+
+ Parser->setShowParsedOperands(ShowInstOperands);
+ Parser->setTargetParser(*TAP.get());
+
+ int Res = Parser->Run(NoInitialTextSection);
+
+ // Keep output if no errors.
+ if (Res == 0) Out->keep();
+
+ return Res;
+}
+
+static int DisassembleInput(const char *ProgName, bool Enhanced) {
+ const Target *TheTarget = GetTarget(ProgName);
+ if (!TheTarget)
+ return 0;
+
+ OwningPtr<MemoryBuffer> Buffer;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFilename, Buffer)) {
+ errs() << ProgName << ": " << ec.message() << '\n';
+ return 1;
+ }
+
+ OwningPtr<tool_output_file> Out(GetOutputStream());
+ if (!Out)
+ return 1;
+
+ int Res;
+ if (Enhanced) {
+ Res =
+ Disassembler::disassembleEnhanced(TripleName, *Buffer.take(), Out->os());
+ } else {
+ // Package up features to be passed to target/subtarget
+ std::string FeaturesStr;
+ if (MAttrs.size()) {
+ SubtargetFeatures Features;
+ for (unsigned i = 0; i != MAttrs.size(); ++i)
+ Features.AddFeature(MAttrs[i]);
+ FeaturesStr = Features.getString();
+ }
+
+ Res = Disassembler::disassemble(*TheTarget, TripleName, MCPU, FeaturesStr,
+ *Buffer.take(), Out->os());
+ }
+
+ // Keep output if no errors.
+ if (Res == 0) Out->keep();
+
+ return Res;
+}
+
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ // Initialize targets and assembly printers/parsers.
+ llvm::InitializeAllTargetInfos();
+ llvm::InitializeAllTargetMCs();
+ llvm::InitializeAllAsmParsers();
+ llvm::InitializeAllDisassemblers();
+
+ // Register the target printer for --version.
+ cl::AddExtraVersionPrinter(TargetRegistry::printRegisteredTargetsForVersion);
+
+ cl::ParseCommandLineOptions(argc, argv, "llvm machine code playground\n");
+ TripleName = Triple::normalize(TripleName);
+ setDwarfDebugFlags(argc, argv);
+
+ switch (Action) {
+ case AC_AsLex:
+ return AsLexInput(argv[0]);
+ case AC_Assemble:
+ return AssembleInput(argv[0]);
+ case AC_Disassemble:
+ return DisassembleInput(argv[0], false);
+ case AC_EDisassemble:
+ return DisassembleInput(argv[0], true);
+ }
+}
diff --git a/contrib/llvm/tools/llvm-nm/llvm-nm.cpp b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
new file mode 100644
index 0000000..8d9e51e
--- /dev/null
+++ b/contrib/llvm/tools/llvm-nm/llvm-nm.cpp
@@ -0,0 +1,412 @@
+//===-- llvm-nm.cpp - Symbol table dumping utility for llvm ---------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This program is a utility that works like traditional Unix "nm",
+// that is, it prints out the names of symbols in a bitcode file,
+// along with some information about each symbol.
+//
+// This "nm" does not print symbols' addresses. It supports many of
+// the features of GNU "nm", including its different output formats.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Bitcode/Archive.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Program.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/system_error.h"
+#include <algorithm>
+#include <cctype>
+#include <cerrno>
+#include <cstring>
+#include <vector>
+using namespace llvm;
+using namespace object;
+
+namespace {
+ enum OutputFormatTy { bsd, sysv, posix };
+ cl::opt<OutputFormatTy>
+ OutputFormat("format",
+ cl::desc("Specify output format"),
+ cl::values(clEnumVal(bsd, "BSD format"),
+ clEnumVal(sysv, "System V format"),
+ clEnumVal(posix, "POSIX.2 format"),
+ clEnumValEnd), cl::init(bsd));
+ cl::alias OutputFormat2("f", cl::desc("Alias for --format"),
+ cl::aliasopt(OutputFormat));
+
+ cl::list<std::string>
+ InputFilenames(cl::Positional, cl::desc("<input bitcode files>"),
+ cl::ZeroOrMore);
+
+ cl::opt<bool> UndefinedOnly("undefined-only",
+ cl::desc("Show only undefined symbols"));
+ cl::alias UndefinedOnly2("u", cl::desc("Alias for --undefined-only"),
+ cl::aliasopt(UndefinedOnly));
+
+ cl::opt<bool> DynamicSyms("dynamic",
+ cl::desc("Display the dynamic symbols instead "
+ "of normal symbols."));
+ cl::alias DynamicSyms2("D", cl::desc("Alias for --dynamic"),
+ cl::aliasopt(DynamicSyms));
+
+ cl::opt<bool> DefinedOnly("defined-only",
+ cl::desc("Show only defined symbols"));
+
+ cl::opt<bool> ExternalOnly("extern-only",
+ cl::desc("Show only external symbols"));
+ cl::alias ExternalOnly2("g", cl::desc("Alias for --extern-only"),
+ cl::aliasopt(ExternalOnly));
+
+ cl::opt<bool> BSDFormat("B", cl::desc("Alias for --format=bsd"));
+ cl::opt<bool> POSIXFormat("P", cl::desc("Alias for --format=posix"));
+
+ cl::opt<bool> PrintFileName("print-file-name",
+ cl::desc("Precede each symbol with the object file it came from"));
+
+ cl::alias PrintFileNameA("A", cl::desc("Alias for --print-file-name"),
+ cl::aliasopt(PrintFileName));
+ cl::alias PrintFileNameo("o", cl::desc("Alias for --print-file-name"),
+ cl::aliasopt(PrintFileName));
+
+ cl::opt<bool> DebugSyms("debug-syms",
+ cl::desc("Show all symbols, even debugger only"));
+ cl::alias DebugSymsa("a", cl::desc("Alias for --debug-syms"),
+ cl::aliasopt(DebugSyms));
+
+ cl::opt<bool> NumericSort("numeric-sort",
+ cl::desc("Sort symbols by address"));
+ cl::alias NumericSortn("n", cl::desc("Alias for --numeric-sort"),
+ cl::aliasopt(NumericSort));
+ cl::alias NumericSortv("v", cl::desc("Alias for --numeric-sort"),
+ cl::aliasopt(NumericSort));
+
+ cl::opt<bool> NoSort("no-sort",
+ cl::desc("Show symbols in order encountered"));
+ cl::alias NoSortp("p", cl::desc("Alias for --no-sort"),
+ cl::aliasopt(NoSort));
+
+ cl::opt<bool> PrintSize("print-size",
+ cl::desc("Show symbol size instead of address"));
+ cl::alias PrintSizeS("S", cl::desc("Alias for --print-size"),
+ cl::aliasopt(PrintSize));
+
+ cl::opt<bool> SizeSort("size-sort", cl::desc("Sort symbols by size"));
+
+ bool PrintAddress = true;
+
+ bool MultipleFiles = false;
+
+ std::string ToolName;
+}
+
+
+static void error(Twine message, Twine path = Twine()) {
+ errs() << ToolName << ": " << path << ": " << message << ".\n";
+}
+
+static bool error(error_code ec, Twine path = Twine()) {
+ if (ec) {
+ error(ec.message(), path);
+ return true;
+ }
+ return false;
+}
+
+namespace {
+ struct NMSymbol {
+ uint64_t Address;
+ uint64_t Size;
+ char TypeChar;
+ StringRef Name;
+ };
+
+ static bool CompareSymbolAddress(const NMSymbol &a, const NMSymbol &b) {
+ if (a.Address < b.Address)
+ return true;
+ else if (a.Address == b.Address && a.Name < b.Name)
+ return true;
+ else
+ return false;
+
+ }
+
+ static bool CompareSymbolSize(const NMSymbol &a, const NMSymbol &b) {
+ if (a.Size < b.Size)
+ return true;
+ else if (a.Size == b.Size && a.Name < b.Name)
+ return true;
+ else
+ return false;
+ }
+
+ static bool CompareSymbolName(const NMSymbol &a, const NMSymbol &b) {
+ return a.Name < b.Name;
+ }
+
+ StringRef CurrentFilename;
+ typedef std::vector<NMSymbol> SymbolListT;
+ SymbolListT SymbolList;
+}
+
+static void SortAndPrintSymbolList() {
+ if (!NoSort) {
+ if (NumericSort)
+ std::sort(SymbolList.begin(), SymbolList.end(), CompareSymbolAddress);
+ else if (SizeSort)
+ std::sort(SymbolList.begin(), SymbolList.end(), CompareSymbolSize);
+ else
+ std::sort(SymbolList.begin(), SymbolList.end(), CompareSymbolName);
+ }
+
+ if (OutputFormat == posix && MultipleFiles) {
+ outs() << '\n' << CurrentFilename << ":\n";
+ } else if (OutputFormat == bsd && MultipleFiles) {
+ outs() << "\n" << CurrentFilename << ":\n";
+ } else if (OutputFormat == sysv) {
+ outs() << "\n\nSymbols from " << CurrentFilename << ":\n\n"
+ << "Name Value Class Type"
+ << " Size Line Section\n";
+ }
+
+ for (SymbolListT::iterator i = SymbolList.begin(),
+ e = SymbolList.end(); i != e; ++i) {
+ if ((i->TypeChar != 'U') && UndefinedOnly)
+ continue;
+ if ((i->TypeChar == 'U') && DefinedOnly)
+ continue;
+ if (SizeSort && !PrintAddress && i->Size == UnknownAddressOrSize)
+ continue;
+
+ char SymbolAddrStr[10] = "";
+ char SymbolSizeStr[10] = "";
+
+ if (OutputFormat == sysv || i->Address == object::UnknownAddressOrSize)
+ strcpy(SymbolAddrStr, " ");
+ if (OutputFormat == sysv)
+ strcpy(SymbolSizeStr, " ");
+
+ if (i->Address != object::UnknownAddressOrSize)
+ format("%08" PRIx64, i->Address).print(SymbolAddrStr,
+ sizeof(SymbolAddrStr));
+ if (i->Size != object::UnknownAddressOrSize)
+ format("%08" PRIx64, i->Size).print(SymbolSizeStr, sizeof(SymbolSizeStr));
+
+ if (OutputFormat == posix) {
+ outs() << i->Name << " " << i->TypeChar << " "
+ << SymbolAddrStr << SymbolSizeStr << "\n";
+ } else if (OutputFormat == bsd) {
+ if (PrintAddress)
+ outs() << SymbolAddrStr << ' ';
+ if (PrintSize) {
+ outs() << SymbolSizeStr;
+ if (i->Size != object::UnknownAddressOrSize)
+ outs() << ' ';
+ }
+ outs() << i->TypeChar << " " << i->Name << "\n";
+ } else if (OutputFormat == sysv) {
+ std::string PaddedName (i->Name);
+ while (PaddedName.length () < 20)
+ PaddedName += " ";
+ outs() << PaddedName << "|" << SymbolAddrStr << "| "
+ << i->TypeChar
+ << " | |" << SymbolSizeStr << "| |\n";
+ }
+ }
+
+ SymbolList.clear();
+}
+
+static char TypeCharForSymbol(GlobalValue &GV) {
+ if (GV.isDeclaration()) return 'U';
+ if (GV.hasLinkOnceLinkage()) return 'C';
+ if (GV.hasCommonLinkage()) return 'C';
+ if (GV.hasWeakLinkage()) return 'W';
+ if (isa<Function>(GV) && GV.hasInternalLinkage()) return 't';
+ if (isa<Function>(GV)) return 'T';
+ if (isa<GlobalVariable>(GV) && GV.hasInternalLinkage()) return 'd';
+ if (isa<GlobalVariable>(GV)) return 'D';
+ if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(&GV)) {
+ const GlobalValue *AliasedGV = GA->getAliasedGlobal();
+ if (isa<Function>(AliasedGV)) return 'T';
+ if (isa<GlobalVariable>(AliasedGV)) return 'D';
+ }
+ return '?';
+}
+
+static void DumpSymbolNameForGlobalValue(GlobalValue &GV) {
+ // Private linkage and available_externally linkage don't exist in symtab.
+ if (GV.hasPrivateLinkage() ||
+ GV.hasLinkerPrivateLinkage() ||
+ GV.hasLinkerPrivateWeakLinkage() ||
+ GV.hasLinkerPrivateWeakDefAutoLinkage() ||
+ GV.hasAvailableExternallyLinkage())
+ return;
+ char TypeChar = TypeCharForSymbol(GV);
+ if (GV.hasLocalLinkage () && ExternalOnly)
+ return;
+
+ NMSymbol s;
+ s.Address = object::UnknownAddressOrSize;
+ s.Size = object::UnknownAddressOrSize;
+ s.TypeChar = TypeChar;
+ s.Name = GV.getName();
+ SymbolList.push_back(s);
+}
+
+static void DumpSymbolNamesFromModule(Module *M) {
+ CurrentFilename = M->getModuleIdentifier();
+ std::for_each (M->begin(), M->end(), DumpSymbolNameForGlobalValue);
+ std::for_each (M->global_begin(), M->global_end(),
+ DumpSymbolNameForGlobalValue);
+ std::for_each (M->alias_begin(), M->alias_end(),
+ DumpSymbolNameForGlobalValue);
+
+ SortAndPrintSymbolList();
+}
+
+static void DumpSymbolNamesFromObject(ObjectFile *obj) {
+ error_code ec;
+ symbol_iterator ibegin = obj->begin_symbols();
+ symbol_iterator iend = obj->end_symbols();
+ if (DynamicSyms) {
+ ibegin = obj->begin_dynamic_symbols();
+ iend = obj->end_dynamic_symbols();
+ }
+ for (symbol_iterator i = ibegin; i != iend; i.increment(ec)) {
+ if (error(ec)) break;
+ uint32_t symflags;
+ if (error(i->getFlags(symflags))) break;
+ if (!DebugSyms && (symflags & SymbolRef::SF_FormatSpecific))
+ continue;
+ NMSymbol s;
+ s.Size = object::UnknownAddressOrSize;
+ s.Address = object::UnknownAddressOrSize;
+ if (PrintSize || SizeSort) {
+ if (error(i->getSize(s.Size))) break;
+ }
+ if (PrintAddress)
+ if (error(i->getAddress(s.Address))) break;
+ if (error(i->getNMTypeChar(s.TypeChar))) break;
+ if (error(i->getName(s.Name))) break;
+ SymbolList.push_back(s);
+ }
+
+ CurrentFilename = obj->getFileName();
+ SortAndPrintSymbolList();
+}
+
+static void DumpSymbolNamesFromFile(std::string &Filename) {
+ if (Filename != "-" && !sys::fs::exists(Filename)) {
+ errs() << ToolName << ": '" << Filename << "': " << "No such file\n";
+ return;
+ }
+
+ OwningPtr<MemoryBuffer> Buffer;
+ if (error(MemoryBuffer::getFileOrSTDIN(Filename, Buffer), Filename))
+ return;
+
+ sys::fs::file_magic magic = sys::fs::identify_magic(Buffer->getBuffer());
+
+ LLVMContext &Context = getGlobalContext();
+ std::string ErrorMessage;
+ if (magic == sys::fs::file_magic::bitcode) {
+ Module *Result = 0;
+ Result = ParseBitcodeFile(Buffer.get(), Context, &ErrorMessage);
+ if (Result) {
+ DumpSymbolNamesFromModule(Result);
+ delete Result;
+ } else {
+ error(ErrorMessage, Filename);
+ return;
+ }
+ } else if (magic == sys::fs::file_magic::archive) {
+ OwningPtr<Binary> arch;
+ if (error(object::createBinary(Buffer.take(), arch), Filename))
+ return;
+
+ if (object::Archive *a = dyn_cast<object::Archive>(arch.get())) {
+ for (object::Archive::child_iterator i = a->begin_children(),
+ e = a->end_children(); i != e; ++i) {
+ OwningPtr<Binary> child;
+ if (i->getAsBinary(child)) {
+ // Try opening it as a bitcode file.
+ OwningPtr<MemoryBuffer> buff(i->getBuffer());
+ Module *Result = 0;
+ if (buff)
+ Result = ParseBitcodeFile(buff.get(), Context, &ErrorMessage);
+
+ if (Result) {
+ DumpSymbolNamesFromModule(Result);
+ delete Result;
+ }
+ continue;
+ }
+ if (object::ObjectFile *o = dyn_cast<ObjectFile>(child.get())) {
+ outs() << o->getFileName() << ":\n";
+ DumpSymbolNamesFromObject(o);
+ }
+ }
+ }
+ } else if (magic.is_object()) {
+ OwningPtr<Binary> obj;
+ if (error(object::createBinary(Buffer.take(), obj), Filename))
+ return;
+ if (object::ObjectFile *o = dyn_cast<ObjectFile>(obj.get()))
+ DumpSymbolNamesFromObject(o);
+ } else {
+ errs() << ToolName << ": " << Filename << ": "
+ << "unrecognizable file type\n";
+ return;
+ }
+}
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+ cl::ParseCommandLineOptions(argc, argv, "llvm symbol table dumper\n");
+
+ // llvm-nm only reads binary files.
+ if (error(sys::Program::ChangeStdinToBinary()))
+ return 1;
+
+ ToolName = argv[0];
+ if (BSDFormat) OutputFormat = bsd;
+ if (POSIXFormat) OutputFormat = posix;
+
+ // The relative order of these is important. If you pass --size-sort it should
+ // only print out the size. However, if you pass -S --size-sort, it should
+ // print out both the size and address.
+ if (SizeSort && !PrintSize) PrintAddress = false;
+ if (OutputFormat == sysv || SizeSort) PrintSize = true;
+
+ switch (InputFilenames.size()) {
+ case 0: InputFilenames.push_back("-");
+ case 1: break;
+ default: MultipleFiles = true;
+ }
+
+ std::for_each(InputFilenames.begin(), InputFilenames.end(),
+ DumpSymbolNamesFromFile);
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-objdump/MCFunction.cpp b/contrib/llvm/tools/llvm-objdump/MCFunction.cpp
new file mode 100644
index 0000000..5c67f1b
--- /dev/null
+++ b/contrib/llvm/tools/llvm-objdump/MCFunction.cpp
@@ -0,0 +1,138 @@
+//===-- MCFunction.cpp ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the algorithm to break down a region of machine code
+// into basic blocks and try to reconstruct a CFG from it.
+//
+//===----------------------------------------------------------------------===//
+
+#include "MCFunction.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrAnalysis.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include <set>
+using namespace llvm;
+
+MCFunction
+MCFunction::createFunctionFromMC(StringRef Name, const MCDisassembler *DisAsm,
+ const MemoryObject &Region, uint64_t Start,
+ uint64_t End, const MCInstrAnalysis *Ana,
+ raw_ostream &DebugOut,
+ SmallVectorImpl<uint64_t> &Calls) {
+ std::vector<MCDecodedInst> Instructions;
+ std::set<uint64_t> Splits;
+ Splits.insert(Start);
+ uint64_t Size;
+
+ MCFunction f(Name);
+
+ {
+ DenseSet<uint64_t> VisitedInsts;
+ SmallVector<uint64_t, 16> WorkList;
+ WorkList.push_back(Start);
+ // Disassemble code and gather basic block split points.
+ while (!WorkList.empty()) {
+ uint64_t Index = WorkList.pop_back_val();
+ if (VisitedInsts.find(Index) != VisitedInsts.end())
+ continue; // Already visited this location.
+
+ for (;Index < End; Index += Size) {
+ VisitedInsts.insert(Index);
+
+ MCInst Inst;
+ if (DisAsm->getInstruction(Inst, Size, Region, Index, DebugOut, nulls())){
+ Instructions.push_back(MCDecodedInst(Index, Size, Inst));
+ if (Ana->isBranch(Inst)) {
+ uint64_t targ = Ana->evaluateBranch(Inst, Index, Size);
+ if (targ != -1ULL && targ == Index+Size)
+ continue; // Skip nop jumps.
+
+ // If we could determine the branch target, make a note to start a
+ // new basic block there and add the target to the worklist.
+ if (targ != -1ULL) {
+ Splits.insert(targ);
+ WorkList.push_back(targ);
+ WorkList.push_back(Index+Size);
+ }
+ Splits.insert(Index+Size);
+ break;
+ } else if (Ana->isReturn(Inst)) {
+ // Return instruction. This basic block ends here.
+ Splits.insert(Index+Size);
+ break;
+ } else if (Ana->isCall(Inst)) {
+ uint64_t targ = Ana->evaluateBranch(Inst, Index, Size);
+ // Add the call to the call list if the destination is known.
+ if (targ != -1ULL && targ != Index+Size)
+ Calls.push_back(targ);
+ }
+ } else {
+ errs().write_hex(Index) << ": warning: invalid instruction encoding\n";
+ if (Size == 0)
+ Size = 1; // skip illegible bytes
+ }
+ }
+ }
+ }
+
+ // Make sure the instruction list is sorted.
+ std::sort(Instructions.begin(), Instructions.end());
+
+ // Create basic blocks.
+ unsigned ii = 0, ie = Instructions.size();
+ for (std::set<uint64_t>::iterator spi = Splits.begin(),
+ spe = llvm::prior(Splits.end()); spi != spe; ++spi) {
+ MCBasicBlock BB;
+ uint64_t BlockEnd = *llvm::next(spi);
+ // Add instructions to the BB.
+ for (; ii != ie; ++ii) {
+ if (Instructions[ii].Address < *spi ||
+ Instructions[ii].Address >= BlockEnd)
+ break;
+ BB.addInst(Instructions[ii]);
+ }
+ f.addBlock(*spi, BB);
+ }
+
+ std::sort(f.Blocks.begin(), f.Blocks.end());
+
+ // Calculate successors of each block.
+ for (MCFunction::iterator i = f.begin(), e = f.end(); i != e; ++i) {
+ MCBasicBlock &BB = const_cast<MCBasicBlock&>(i->second);
+ if (BB.getInsts().empty()) continue;
+ const MCDecodedInst &Inst = BB.getInsts().back();
+
+ if (Ana->isBranch(Inst.Inst)) {
+ uint64_t targ = Ana->evaluateBranch(Inst.Inst, Inst.Address, Inst.Size);
+ if (targ == -1ULL) {
+ // Indirect branch. Bail and add all blocks of the function as a
+ // successor.
+ for (MCFunction::iterator i = f.begin(), e = f.end(); i != e; ++i)
+ BB.addSucc(i->first);
+ } else if (targ != Inst.Address+Inst.Size)
+ BB.addSucc(targ);
+ // Conditional branches can also fall through to the next block.
+ if (Ana->isConditionalBranch(Inst.Inst) && llvm::next(i) != e)
+ BB.addSucc(llvm::next(i)->first);
+ } else {
+ // No branch. Fall through to the next block.
+ if (!Ana->isReturn(Inst.Inst) && llvm::next(i) != e)
+ BB.addSucc(llvm::next(i)->first);
+ }
+ }
+
+ return f;
+}
diff --git a/contrib/llvm/tools/llvm-objdump/MCFunction.h b/contrib/llvm/tools/llvm-objdump/MCFunction.h
new file mode 100644
index 0000000..6d3a548
--- /dev/null
+++ b/contrib/llvm/tools/llvm-objdump/MCFunction.h
@@ -0,0 +1,100 @@
+//===-- MCFunction.h ------------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the data structures to hold a CFG reconstructed from
+// machine code.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJECTDUMP_MCFUNCTION_H
+#define LLVM_OBJECTDUMP_MCFUNCTION_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/MC/MCInst.h"
+#include <map>
+
+namespace llvm {
+
+class MCDisassembler;
+class MCInstrAnalysis;
+class MemoryObject;
+class raw_ostream;
+
+/// MCDecodedInst - Small container to hold an MCInst and associated info like
+/// address and size.
+struct MCDecodedInst {
+ uint64_t Address;
+ uint64_t Size;
+ MCInst Inst;
+
+ MCDecodedInst() {}
+ MCDecodedInst(uint64_t Address, uint64_t Size, MCInst Inst)
+ : Address(Address), Size(Size), Inst(Inst) {}
+
+ bool operator<(const MCDecodedInst &RHS) const {
+ return Address < RHS.Address;
+ }
+};
+
+/// MCBasicBlock - Consists of multiple MCDecodedInsts and a list of successing
+/// MCBasicBlocks.
+class MCBasicBlock {
+ std::vector<MCDecodedInst> Insts;
+ typedef DenseSet<uint64_t> SetTy;
+ SetTy Succs;
+public:
+ ArrayRef<MCDecodedInst> getInsts() const { return Insts; }
+
+ typedef SetTy::const_iterator succ_iterator;
+ succ_iterator succ_begin() const { return Succs.begin(); }
+ succ_iterator succ_end() const { return Succs.end(); }
+
+ bool contains(uint64_t Addr) const { return Succs.count(Addr); }
+
+ void addInst(const MCDecodedInst &Inst) { Insts.push_back(Inst); }
+ void addSucc(uint64_t Addr) { Succs.insert(Addr); }
+
+ bool operator<(const MCBasicBlock &RHS) const {
+ return Insts.size() < RHS.Insts.size();
+ }
+};
+
+/// MCFunction - Represents a named function in machine code, containing
+/// multiple MCBasicBlocks.
+class MCFunction {
+ const StringRef Name;
+ // Keep BBs sorted by address.
+ typedef std::vector<std::pair<uint64_t, MCBasicBlock> > MapTy;
+ MapTy Blocks;
+public:
+ MCFunction(StringRef Name) : Name(Name) {}
+
+ // Create an MCFunction from a region of binary machine code.
+ static MCFunction
+ createFunctionFromMC(StringRef Name, const MCDisassembler *DisAsm,
+ const MemoryObject &Region, uint64_t Start, uint64_t End,
+ const MCInstrAnalysis *Ana, raw_ostream &DebugOut,
+ SmallVectorImpl<uint64_t> &Calls);
+
+ typedef MapTy::const_iterator iterator;
+ iterator begin() const { return Blocks.begin(); }
+ iterator end() const { return Blocks.end(); }
+
+ StringRef getName() const { return Name; }
+
+ MCBasicBlock &addBlock(uint64_t Address, const MCBasicBlock &BB) {
+ Blocks.push_back(std::make_pair(Address, BB));
+ return Blocks.back().second;
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/llvm-objdump/MachODump.cpp b/contrib/llvm/tools/llvm-objdump/MachODump.cpp
new file mode 100644
index 0000000..0e7f3fd
--- /dev/null
+++ b/contrib/llvm/tools/llvm-objdump/MachODump.cpp
@@ -0,0 +1,639 @@
+//===-- MachODump.cpp - Object file dumping utility for llvm --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the MachO-specific dumper for llvm-objdump.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-objdump.h"
+#include "MCFunction.h"
+#include "llvm/Support/MachO.h"
+#include "llvm/Object/MachO.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/DebugInfo/DIContext.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrAnalysis.h"
+#include "llvm/MC/MCInstrDesc.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include <algorithm>
+#include <cstring>
+using namespace llvm;
+using namespace object;
+
+static cl::opt<bool>
+ CFG("cfg", cl::desc("Create a CFG for every symbol in the object file and"
+ "write it to a graphviz file (MachO-only)"));
+
+static cl::opt<bool>
+ UseDbg("g", cl::desc("Print line information from debug info if available"));
+
+static cl::opt<std::string>
+ DSYMFile("dsym", cl::desc("Use .dSYM file for debug info"));
+
+static const Target *GetTarget(const MachOObject *MachOObj) {
+ // Figure out the target triple.
+ if (TripleName.empty()) {
+ llvm::Triple TT("unknown-unknown-unknown");
+ switch (MachOObj->getHeader().CPUType) {
+ case llvm::MachO::CPUTypeI386:
+ TT.setArch(Triple::ArchType(Triple::x86));
+ break;
+ case llvm::MachO::CPUTypeX86_64:
+ TT.setArch(Triple::ArchType(Triple::x86_64));
+ break;
+ case llvm::MachO::CPUTypeARM:
+ TT.setArch(Triple::ArchType(Triple::arm));
+ break;
+ case llvm::MachO::CPUTypePowerPC:
+ TT.setArch(Triple::ArchType(Triple::ppc));
+ break;
+ case llvm::MachO::CPUTypePowerPC64:
+ TT.setArch(Triple::ArchType(Triple::ppc64));
+ break;
+ }
+ TripleName = TT.str();
+ }
+
+ // Get the target specific parser.
+ std::string Error;
+ const Target *TheTarget = TargetRegistry::lookupTarget(TripleName, Error);
+ if (TheTarget)
+ return TheTarget;
+
+ errs() << "llvm-objdump: error: unable to get target for '" << TripleName
+ << "', see --version and --triple.\n";
+ return 0;
+}
+
+struct SymbolSorter {
+ bool operator()(const SymbolRef &A, const SymbolRef &B) {
+ SymbolRef::Type AType, BType;
+ A.getType(AType);
+ B.getType(BType);
+
+ uint64_t AAddr, BAddr;
+ if (AType != SymbolRef::ST_Function)
+ AAddr = 0;
+ else
+ A.getAddress(AAddr);
+ if (BType != SymbolRef::ST_Function)
+ BAddr = 0;
+ else
+ B.getAddress(BAddr);
+ return AAddr < BAddr;
+ }
+};
+
+// Print additional information about an address, if available.
+static void DumpAddress(uint64_t Address, ArrayRef<SectionRef> Sections,
+ MachOObject *MachOObj, raw_ostream &OS) {
+ for (unsigned i = 0; i != Sections.size(); ++i) {
+ uint64_t SectAddr = 0, SectSize = 0;
+ Sections[i].getAddress(SectAddr);
+ Sections[i].getSize(SectSize);
+ uint64_t addr = SectAddr;
+ if (SectAddr <= Address &&
+ SectAddr + SectSize > Address) {
+ StringRef bytes, name;
+ Sections[i].getContents(bytes);
+ Sections[i].getName(name);
+ // Print constant strings.
+ if (!name.compare("__cstring"))
+ OS << '"' << bytes.substr(addr, bytes.find('\0', addr)) << '"';
+ // Print constant CFStrings.
+ if (!name.compare("__cfstring"))
+ OS << "@\"" << bytes.substr(addr, bytes.find('\0', addr)) << '"';
+ }
+ }
+}
+
+typedef std::map<uint64_t, MCFunction*> FunctionMapTy;
+typedef SmallVector<MCFunction, 16> FunctionListTy;
+static void createMCFunctionAndSaveCalls(StringRef Name,
+ const MCDisassembler *DisAsm,
+ MemoryObject &Object, uint64_t Start,
+ uint64_t End,
+ MCInstrAnalysis *InstrAnalysis,
+ uint64_t Address,
+ raw_ostream &DebugOut,
+ FunctionMapTy &FunctionMap,
+ FunctionListTy &Functions) {
+ SmallVector<uint64_t, 16> Calls;
+ MCFunction f =
+ MCFunction::createFunctionFromMC(Name, DisAsm, Object, Start, End,
+ InstrAnalysis, DebugOut, Calls);
+ Functions.push_back(f);
+ FunctionMap[Address] = &Functions.back();
+
+ // Add the gathered callees to the map.
+ for (unsigned i = 0, e = Calls.size(); i != e; ++i)
+ FunctionMap.insert(std::make_pair(Calls[i], (MCFunction*)0));
+}
+
+// Write a graphviz file for the CFG inside an MCFunction.
+static void emitDOTFile(const char *FileName, const MCFunction &f,
+ MCInstPrinter *IP) {
+ // Start a new dot file.
+ std::string Error;
+ raw_fd_ostream Out(FileName, Error);
+ if (!Error.empty()) {
+ errs() << "llvm-objdump: warning: " << Error << '\n';
+ return;
+ }
+
+ Out << "digraph " << f.getName() << " {\n";
+ Out << "graph [ rankdir = \"LR\" ];\n";
+ for (MCFunction::iterator i = f.begin(), e = f.end(); i != e; ++i) {
+ bool hasPreds = false;
+ // Only print blocks that have predecessors.
+ // FIXME: Slow.
+ for (MCFunction::iterator pi = f.begin(), pe = f.end(); pi != pe;
+ ++pi)
+ if (pi->second.contains(i->first)) {
+ hasPreds = true;
+ break;
+ }
+
+ if (!hasPreds && i != f.begin())
+ continue;
+
+ Out << '"' << i->first << "\" [ label=\"<a>";
+ // Print instructions.
+ for (unsigned ii = 0, ie = i->second.getInsts().size(); ii != ie;
+ ++ii) {
+ // Escape special chars and print the instruction in mnemonic form.
+ std::string Str;
+ raw_string_ostream OS(Str);
+ IP->printInst(&i->second.getInsts()[ii].Inst, OS, "");
+ Out << DOT::EscapeString(OS.str()) << '|';
+ }
+ Out << "<o>\" shape=\"record\" ];\n";
+
+ // Add edges.
+ for (MCBasicBlock::succ_iterator si = i->second.succ_begin(),
+ se = i->second.succ_end(); si != se; ++si)
+ Out << i->first << ":o -> " << *si <<":a\n";
+ }
+ Out << "}\n";
+}
+
+static void getSectionsAndSymbols(const macho::Header &Header,
+ MachOObjectFile *MachOObj,
+ InMemoryStruct<macho::SymtabLoadCommand> *SymtabLC,
+ std::vector<SectionRef> &Sections,
+ std::vector<SymbolRef> &Symbols,
+ SmallVectorImpl<uint64_t> &FoundFns) {
+ error_code ec;
+ for (symbol_iterator SI = MachOObj->begin_symbols(),
+ SE = MachOObj->end_symbols(); SI != SE; SI.increment(ec))
+ Symbols.push_back(*SI);
+
+ for (section_iterator SI = MachOObj->begin_sections(),
+ SE = MachOObj->end_sections(); SI != SE; SI.increment(ec)) {
+ SectionRef SR = *SI;
+ StringRef SectName;
+ SR.getName(SectName);
+ Sections.push_back(*SI);
+ }
+
+ for (unsigned i = 0; i != Header.NumLoadCommands; ++i) {
+ const MachOObject::LoadCommandInfo &LCI =
+ MachOObj->getObject()->getLoadCommandInfo(i);
+ if (LCI.Command.Type == macho::LCT_FunctionStarts) {
+ // We found a function starts segment, parse the addresses for later
+ // consumption.
+ InMemoryStruct<macho::LinkeditDataLoadCommand> LLC;
+ MachOObj->getObject()->ReadLinkeditDataLoadCommand(LCI, LLC);
+
+ MachOObj->getObject()->ReadULEB128s(LLC->DataOffset, FoundFns);
+ }
+ }
+}
+
+void llvm::DisassembleInputMachO(StringRef Filename) {
+ OwningPtr<MemoryBuffer> Buff;
+
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(Filename, Buff)) {
+ errs() << "llvm-objdump: " << Filename << ": " << ec.message() << "\n";
+ return;
+ }
+
+ OwningPtr<MachOObjectFile> MachOOF(static_cast<MachOObjectFile*>(
+ ObjectFile::createMachOObjectFile(Buff.take())));
+ MachOObject *MachOObj = MachOOF->getObject();
+
+ const Target *TheTarget = GetTarget(MachOObj);
+ if (!TheTarget) {
+ // GetTarget prints out stuff.
+ return;
+ }
+ OwningPtr<const MCInstrInfo> InstrInfo(TheTarget->createMCInstrInfo());
+ OwningPtr<MCInstrAnalysis>
+ InstrAnalysis(TheTarget->createMCInstrAnalysis(InstrInfo.get()));
+
+ // Set up disassembler.
+ OwningPtr<const MCAsmInfo> AsmInfo(TheTarget->createMCAsmInfo(TripleName));
+ OwningPtr<const MCSubtargetInfo>
+ STI(TheTarget->createMCSubtargetInfo(TripleName, "", ""));
+ OwningPtr<const MCDisassembler> DisAsm(TheTarget->createMCDisassembler(*STI));
+ OwningPtr<const MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TripleName));
+ int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
+ OwningPtr<MCInstPrinter>
+ IP(TheTarget->createMCInstPrinter(AsmPrinterVariant, *AsmInfo, *InstrInfo,
+ *MRI, *STI));
+
+ if (!InstrAnalysis || !AsmInfo || !STI || !DisAsm || !IP) {
+ errs() << "error: couldn't initialize disassembler for target "
+ << TripleName << '\n';
+ return;
+ }
+
+ outs() << '\n' << Filename << ":\n\n";
+
+ const macho::Header &Header = MachOObj->getHeader();
+
+ const MachOObject::LoadCommandInfo *SymtabLCI = 0;
+ // First, find the symbol table segment.
+ for (unsigned i = 0; i != Header.NumLoadCommands; ++i) {
+ const MachOObject::LoadCommandInfo &LCI = MachOObj->getLoadCommandInfo(i);
+ if (LCI.Command.Type == macho::LCT_Symtab) {
+ SymtabLCI = &LCI;
+ break;
+ }
+ }
+
+ // Read and register the symbol table data.
+ InMemoryStruct<macho::SymtabLoadCommand> SymtabLC;
+ MachOObj->ReadSymtabLoadCommand(*SymtabLCI, SymtabLC);
+ MachOObj->RegisterStringTable(*SymtabLC);
+
+ std::vector<SectionRef> Sections;
+ std::vector<SymbolRef> Symbols;
+ SmallVector<uint64_t, 8> FoundFns;
+
+ getSectionsAndSymbols(Header, MachOOF.get(), &SymtabLC, Sections, Symbols,
+ FoundFns);
+
+ // Make a copy of the unsorted symbol list. FIXME: duplication
+ std::vector<SymbolRef> UnsortedSymbols(Symbols);
+ // Sort the symbols by address, just in case they didn't come in that way.
+ std::sort(Symbols.begin(), Symbols.end(), SymbolSorter());
+
+#ifndef NDEBUG
+ raw_ostream &DebugOut = DebugFlag ? dbgs() : nulls();
+#else
+ raw_ostream &DebugOut = nulls();
+#endif
+
+ StringRef DebugAbbrevSection, DebugInfoSection, DebugArangesSection,
+ DebugLineSection, DebugStrSection;
+ OwningPtr<DIContext> diContext;
+ OwningPtr<MachOObjectFile> DSYMObj;
+ MachOObject *DbgInfoObj = MachOObj;
+ // Try to find debug info and set up the DIContext for it.
+ if (UseDbg) {
+ ArrayRef<SectionRef> DebugSections = Sections;
+ std::vector<SectionRef> DSYMSections;
+
+ // A separate DSym file path was specified, parse it as a macho file,
+ // get the sections and supply it to the section name parsing machinery.
+ if (!DSYMFile.empty()) {
+ OwningPtr<MemoryBuffer> Buf;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(DSYMFile.c_str(), Buf)) {
+ errs() << "llvm-objdump: " << Filename << ": " << ec.message() << '\n';
+ return;
+ }
+ DSYMObj.reset(static_cast<MachOObjectFile*>(
+ ObjectFile::createMachOObjectFile(Buf.take())));
+ const macho::Header &Header = DSYMObj->getObject()->getHeader();
+
+ std::vector<SymbolRef> Symbols;
+ SmallVector<uint64_t, 8> FoundFns;
+ getSectionsAndSymbols(Header, DSYMObj.get(), 0, DSYMSections, Symbols,
+ FoundFns);
+ DebugSections = DSYMSections;
+ DbgInfoObj = DSYMObj.get()->getObject();
+ }
+
+ // Find the named debug info sections.
+ for (unsigned SectIdx = 0; SectIdx != DebugSections.size(); SectIdx++) {
+ StringRef SectName;
+ if (!DebugSections[SectIdx].getName(SectName)) {
+ if (SectName.equals("__DWARF,__debug_abbrev"))
+ DebugSections[SectIdx].getContents(DebugAbbrevSection);
+ else if (SectName.equals("__DWARF,__debug_info"))
+ DebugSections[SectIdx].getContents(DebugInfoSection);
+ else if (SectName.equals("__DWARF,__debug_aranges"))
+ DebugSections[SectIdx].getContents(DebugArangesSection);
+ else if (SectName.equals("__DWARF,__debug_line"))
+ DebugSections[SectIdx].getContents(DebugLineSection);
+ else if (SectName.equals("__DWARF,__debug_str"))
+ DebugSections[SectIdx].getContents(DebugStrSection);
+ }
+ }
+
+ // Setup the DIContext.
+ diContext.reset(DIContext::getDWARFContext(DbgInfoObj->isLittleEndian(),
+ DebugInfoSection,
+ DebugAbbrevSection,
+ DebugArangesSection,
+ DebugLineSection,
+ DebugStrSection));
+ }
+
+ FunctionMapTy FunctionMap;
+ FunctionListTy Functions;
+
+ for (unsigned SectIdx = 0; SectIdx != Sections.size(); SectIdx++) {
+ StringRef SectName;
+ if (Sections[SectIdx].getName(SectName) ||
+ SectName.compare("__TEXT,__text"))
+ continue; // Skip non-text sections
+
+ // Insert the functions from the function starts segment into our map.
+ uint64_t VMAddr;
+ Sections[SectIdx].getAddress(VMAddr);
+ for (unsigned i = 0, e = FoundFns.size(); i != e; ++i) {
+ StringRef SectBegin;
+ Sections[SectIdx].getContents(SectBegin);
+ uint64_t Offset = (uint64_t)SectBegin.data();
+ FunctionMap.insert(std::make_pair(VMAddr + FoundFns[i]-Offset,
+ (MCFunction*)0));
+ }
+
+ StringRef Bytes;
+ Sections[SectIdx].getContents(Bytes);
+ StringRefMemoryObject memoryObject(Bytes);
+ bool symbolTableWorked = false;
+
+ // Parse relocations.
+ std::vector<std::pair<uint64_t, SymbolRef> > Relocs;
+ error_code ec;
+ for (relocation_iterator RI = Sections[SectIdx].begin_relocations(),
+ RE = Sections[SectIdx].end_relocations(); RI != RE; RI.increment(ec)) {
+ uint64_t RelocOffset, SectionAddress;
+ RI->getAddress(RelocOffset);
+ Sections[SectIdx].getAddress(SectionAddress);
+ RelocOffset -= SectionAddress;
+
+ SymbolRef RelocSym;
+ RI->getSymbol(RelocSym);
+
+ Relocs.push_back(std::make_pair(RelocOffset, RelocSym));
+ }
+ array_pod_sort(Relocs.begin(), Relocs.end());
+
+ // Disassemble symbol by symbol.
+ for (unsigned SymIdx = 0; SymIdx != Symbols.size(); SymIdx++) {
+ StringRef SymName;
+ Symbols[SymIdx].getName(SymName);
+
+ SymbolRef::Type ST;
+ Symbols[SymIdx].getType(ST);
+ if (ST != SymbolRef::ST_Function)
+ continue;
+
+ // Make sure the symbol is defined in this section.
+ bool containsSym = false;
+ Sections[SectIdx].containsSymbol(Symbols[SymIdx], containsSym);
+ if (!containsSym)
+ continue;
+
+ // Start at the address of the symbol relative to the section's address.
+ uint64_t SectionAddress = 0;
+ uint64_t Start = 0;
+ Sections[SectIdx].getAddress(SectionAddress);
+ Symbols[SymIdx].getAddress(Start);
+ Start -= SectionAddress;
+
+ // Stop disassembling either at the beginning of the next symbol or at
+ // the end of the section.
+ bool containsNextSym = true;
+ uint64_t NextSym = 0;
+ uint64_t NextSymIdx = SymIdx+1;
+ while (Symbols.size() > NextSymIdx) {
+ SymbolRef::Type NextSymType;
+ Symbols[NextSymIdx].getType(NextSymType);
+ if (NextSymType == SymbolRef::ST_Function) {
+ Sections[SectIdx].containsSymbol(Symbols[NextSymIdx],
+ containsNextSym);
+ Symbols[NextSymIdx].getAddress(NextSym);
+ NextSym -= SectionAddress;
+ break;
+ }
+ ++NextSymIdx;
+ }
+
+ uint64_t SectSize;
+ Sections[SectIdx].getSize(SectSize);
+ uint64_t End = containsNextSym ? NextSym : SectSize;
+ uint64_t Size;
+
+ symbolTableWorked = true;
+
+ if (!CFG) {
+ // Normal disassembly, print addresses, bytes and mnemonic form.
+ StringRef SymName;
+ Symbols[SymIdx].getName(SymName);
+
+ outs() << SymName << ":\n";
+ DILineInfo lastLine;
+ for (uint64_t Index = Start; Index < End; Index += Size) {
+ MCInst Inst;
+
+ if (DisAsm->getInstruction(Inst, Size, memoryObject, Index,
+ DebugOut, nulls())) {
+ uint64_t SectAddress = 0;
+ Sections[SectIdx].getAddress(SectAddress);
+ outs() << format("%8" PRIx64 ":\t", SectAddress + Index);
+
+ DumpBytes(StringRef(Bytes.data() + Index, Size));
+ IP->printInst(&Inst, outs(), "");
+
+ // Print debug info.
+ if (diContext) {
+ DILineInfo dli =
+ diContext->getLineInfoForAddress(SectAddress + Index);
+ // Print valid line info if it changed.
+ if (dli != lastLine && dli.getLine() != 0)
+ outs() << "\t## " << dli.getFileName() << ':'
+ << dli.getLine() << ':' << dli.getColumn();
+ lastLine = dli;
+ }
+ outs() << "\n";
+ } else {
+ errs() << "llvm-objdump: warning: invalid instruction encoding\n";
+ if (Size == 0)
+ Size = 1; // skip illegible bytes
+ }
+ }
+ } else {
+ // Create CFG and use it for disassembly.
+ StringRef SymName;
+ Symbols[SymIdx].getName(SymName);
+ createMCFunctionAndSaveCalls(
+ SymName, DisAsm.get(), memoryObject, Start, End,
+ InstrAnalysis.get(), Start, DebugOut, FunctionMap, Functions);
+ }
+ }
+
+ if (CFG) {
+ if (!symbolTableWorked) {
+ // Reading the symbol table didn't work, create a big __TEXT symbol.
+ uint64_t SectSize = 0, SectAddress = 0;
+ Sections[SectIdx].getSize(SectSize);
+ Sections[SectIdx].getAddress(SectAddress);
+ createMCFunctionAndSaveCalls("__TEXT", DisAsm.get(), memoryObject,
+ 0, SectSize,
+ InstrAnalysis.get(),
+ SectAddress, DebugOut,
+ FunctionMap, Functions);
+ }
+ for (std::map<uint64_t, MCFunction*>::iterator mi = FunctionMap.begin(),
+ me = FunctionMap.end(); mi != me; ++mi)
+ if (mi->second == 0) {
+ // Create functions for the remaining callees we have gathered,
+ // but we didn't find a name for them.
+ uint64_t SectSize = 0;
+ Sections[SectIdx].getSize(SectSize);
+
+ SmallVector<uint64_t, 16> Calls;
+ MCFunction f =
+ MCFunction::createFunctionFromMC("unknown", DisAsm.get(),
+ memoryObject, mi->first,
+ SectSize,
+ InstrAnalysis.get(), DebugOut,
+ Calls);
+ Functions.push_back(f);
+ mi->second = &Functions.back();
+ for (unsigned i = 0, e = Calls.size(); i != e; ++i) {
+ std::pair<uint64_t, MCFunction*> p(Calls[i], (MCFunction*)0);
+ if (FunctionMap.insert(p).second)
+ mi = FunctionMap.begin();
+ }
+ }
+
+ DenseSet<uint64_t> PrintedBlocks;
+ for (unsigned ffi = 0, ffe = Functions.size(); ffi != ffe; ++ffi) {
+ MCFunction &f = Functions[ffi];
+ for (MCFunction::iterator fi = f.begin(), fe = f.end(); fi != fe; ++fi){
+ if (!PrintedBlocks.insert(fi->first).second)
+ continue; // We already printed this block.
+
+ // We assume a block has predecessors when it's the first block after
+ // a symbol.
+ bool hasPreds = FunctionMap.find(fi->first) != FunctionMap.end();
+
+ // See if this block has predecessors.
+ // FIXME: Slow.
+ for (MCFunction::iterator pi = f.begin(), pe = f.end(); pi != pe;
+ ++pi)
+ if (pi->second.contains(fi->first)) {
+ hasPreds = true;
+ break;
+ }
+
+ uint64_t SectSize = 0, SectAddress;
+ Sections[SectIdx].getSize(SectSize);
+ Sections[SectIdx].getAddress(SectAddress);
+
+ // No predecessors, this is a data block. Print as .byte directives.
+ if (!hasPreds) {
+ uint64_t End = llvm::next(fi) == fe ? SectSize :
+ llvm::next(fi)->first;
+ outs() << "# " << End-fi->first << " bytes of data:\n";
+ for (unsigned pos = fi->first; pos != End; ++pos) {
+ outs() << format("%8x:\t", SectAddress + pos);
+ DumpBytes(StringRef(Bytes.data() + pos, 1));
+ outs() << format("\t.byte 0x%02x\n", (uint8_t)Bytes[pos]);
+ }
+ continue;
+ }
+
+ if (fi->second.contains(fi->first)) // Print a header for simple loops
+ outs() << "# Loop begin:\n";
+
+ DILineInfo lastLine;
+ // Walk over the instructions and print them.
+ for (unsigned ii = 0, ie = fi->second.getInsts().size(); ii != ie;
+ ++ii) {
+ const MCDecodedInst &Inst = fi->second.getInsts()[ii];
+
+ // If there's a symbol at this address, print its name.
+ if (FunctionMap.find(SectAddress + Inst.Address) !=
+ FunctionMap.end())
+ outs() << FunctionMap[SectAddress + Inst.Address]-> getName()
+ << ":\n";
+
+ outs() << format("%8" PRIx64 ":\t", SectAddress + Inst.Address);
+ DumpBytes(StringRef(Bytes.data() + Inst.Address, Inst.Size));
+
+ if (fi->second.contains(fi->first)) // Indent simple loops.
+ outs() << '\t';
+
+ IP->printInst(&Inst.Inst, outs(), "");
+
+ // Look for relocations inside this instructions, if there is one
+ // print its target and additional information if available.
+ for (unsigned j = 0; j != Relocs.size(); ++j)
+ if (Relocs[j].first >= SectAddress + Inst.Address &&
+ Relocs[j].first < SectAddress + Inst.Address + Inst.Size) {
+ StringRef SymName;
+ uint64_t Addr;
+ Relocs[j].second.getAddress(Addr);
+ Relocs[j].second.getName(SymName);
+
+ outs() << "\t# " << SymName << ' ';
+ DumpAddress(Addr, Sections, MachOObj, outs());
+ }
+
+ // If this instructions contains an address, see if we can evaluate
+ // it and print additional information.
+ uint64_t targ = InstrAnalysis->evaluateBranch(Inst.Inst,
+ Inst.Address,
+ Inst.Size);
+ if (targ != -1ULL)
+ DumpAddress(targ, Sections, MachOObj, outs());
+
+ // Print debug info.
+ if (diContext) {
+ DILineInfo dli =
+ diContext->getLineInfoForAddress(SectAddress + Inst.Address);
+ // Print valid line info if it changed.
+ if (dli != lastLine && dli.getLine() != 0)
+ outs() << "\t## " << dli.getFileName() << ':'
+ << dli.getLine() << ':' << dli.getColumn();
+ lastLine = dli;
+ }
+
+ outs() << '\n';
+ }
+ }
+
+ emitDOTFile((f.getName().str() + ".dot").c_str(), f, IP.get());
+ }
+ }
+ }
+}
diff --git a/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp b/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
new file mode 100644
index 0000000..5a6f94a
--- /dev/null
+++ b/contrib/llvm/tools/llvm-objdump/llvm-objdump.cpp
@@ -0,0 +1,657 @@
+//===-- llvm-objdump.cpp - Object file dumping utility for llvm -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This program is a utility that works like binutils "objdump", that is, it
+// dumps out a plethora of information about an object file depending on the
+// flags.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm-objdump.h"
+#include "MCFunction.h"
+#include "llvm/Object/Archive.h"
+#include "llvm/Object/COFF.h"
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/MC/MCAsmInfo.h"
+#include "llvm/MC/MCDisassembler.h"
+#include "llvm/MC/MCInst.h"
+#include "llvm/MC/MCInstPrinter.h"
+#include "llvm/MC/MCInstrInfo.h"
+#include "llvm/MC/MCRegisterInfo.h"
+#include "llvm/MC/MCSubtargetInfo.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/MemoryObject.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/SourceMgr.h"
+#include "llvm/Support/TargetRegistry.h"
+#include "llvm/Support/TargetSelect.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+#include <algorithm>
+#include <cctype>
+#include <cstring>
+using namespace llvm;
+using namespace object;
+
+static cl::list<std::string>
+InputFilenames(cl::Positional, cl::desc("<input object files>"),cl::ZeroOrMore);
+
+static cl::opt<bool>
+Disassemble("disassemble",
+ cl::desc("Display assembler mnemonics for the machine instructions"));
+static cl::alias
+Disassembled("d", cl::desc("Alias for --disassemble"),
+ cl::aliasopt(Disassemble));
+
+static cl::opt<bool>
+Relocations("r", cl::desc("Display the relocation entries in the file"));
+
+static cl::opt<bool>
+SectionContents("s", cl::desc("Display the content of each section"));
+
+static cl::opt<bool>
+SymbolTable("t", cl::desc("Display the symbol table"));
+
+static cl::opt<bool>
+MachO("macho", cl::desc("Use MachO specific object file parser"));
+static cl::alias
+MachOm("m", cl::desc("Alias for --macho"), cl::aliasopt(MachO));
+
+cl::opt<std::string>
+llvm::TripleName("triple", cl::desc("Target triple to disassemble for, "
+ "see -version for available targets"));
+
+cl::opt<std::string>
+llvm::ArchName("arch", cl::desc("Target arch to disassemble for, "
+ "see -version for available targets"));
+
+static cl::opt<bool>
+SectionHeaders("section-headers", cl::desc("Display summaries of the headers "
+ "for each section."));
+static cl::alias
+SectionHeadersShort("headers", cl::desc("Alias for --section-headers"),
+ cl::aliasopt(SectionHeaders));
+static cl::alias
+SectionHeadersShorter("h", cl::desc("Alias for --section-headers"),
+ cl::aliasopt(SectionHeaders));
+
+static StringRef ToolName;
+
+static bool error(error_code ec) {
+ if (!ec) return false;
+
+ outs() << ToolName << ": error reading file: " << ec.message() << ".\n";
+ outs().flush();
+ return true;
+}
+
+static const Target *GetTarget(const ObjectFile *Obj = NULL) {
+ // Figure out the target triple.
+ llvm::Triple TT("unknown-unknown-unknown");
+ if (TripleName.empty()) {
+ if (Obj)
+ TT.setArch(Triple::ArchType(Obj->getArch()));
+ } else
+ TT.setTriple(Triple::normalize(TripleName));
+
+ if (!ArchName.empty())
+ TT.setArchName(ArchName);
+
+ TripleName = TT.str();
+
+ // Get the target specific parser.
+ std::string Error;
+ const Target *TheTarget = TargetRegistry::lookupTarget(TripleName, Error);
+ if (TheTarget)
+ return TheTarget;
+
+ errs() << ToolName << ": error: unable to get target for '" << TripleName
+ << "', see --version and --triple.\n";
+ return 0;
+}
+
+void llvm::StringRefMemoryObject::anchor() { }
+
+void llvm::DumpBytes(StringRef bytes) {
+ static const char hex_rep[] = "0123456789abcdef";
+ // FIXME: The real way to do this is to figure out the longest instruction
+ // and align to that size before printing. I'll fix this when I get
+ // around to outputting relocations.
+ // 15 is the longest x86 instruction
+ // 3 is for the hex rep of a byte + a space.
+ // 1 is for the null terminator.
+ enum { OutputSize = (15 * 3) + 1 };
+ char output[OutputSize];
+
+ assert(bytes.size() <= 15
+ && "DumpBytes only supports instructions of up to 15 bytes");
+ memset(output, ' ', sizeof(output));
+ unsigned index = 0;
+ for (StringRef::iterator i = bytes.begin(),
+ e = bytes.end(); i != e; ++i) {
+ output[index] = hex_rep[(*i & 0xF0) >> 4];
+ output[index + 1] = hex_rep[*i & 0xF];
+ index += 3;
+ }
+
+ output[sizeof(output) - 1] = 0;
+ outs() << output;
+}
+
+static bool RelocAddressLess(RelocationRef a, RelocationRef b) {
+ uint64_t a_addr, b_addr;
+ if (error(a.getAddress(a_addr))) return false;
+ if (error(b.getAddress(b_addr))) return false;
+ return a_addr < b_addr;
+}
+
+static void DisassembleObject(const ObjectFile *Obj, bool InlineRelocs) {
+ const Target *TheTarget = GetTarget(Obj);
+ if (!TheTarget) {
+ // GetTarget prints out stuff.
+ return;
+ }
+
+ error_code ec;
+ for (section_iterator i = Obj->begin_sections(),
+ e = Obj->end_sections();
+ i != e; i.increment(ec)) {
+ if (error(ec)) break;
+ bool text;
+ if (error(i->isText(text))) break;
+ if (!text) continue;
+
+ uint64_t SectionAddr;
+ if (error(i->getAddress(SectionAddr))) break;
+
+ // Make a list of all the symbols in this section.
+ std::vector<std::pair<uint64_t, StringRef> > Symbols;
+ for (symbol_iterator si = Obj->begin_symbols(),
+ se = Obj->end_symbols();
+ si != se; si.increment(ec)) {
+ bool contains;
+ if (!error(i->containsSymbol(*si, contains)) && contains) {
+ uint64_t Address;
+ if (error(si->getAddress(Address))) break;
+ Address -= SectionAddr;
+
+ StringRef Name;
+ if (error(si->getName(Name))) break;
+ Symbols.push_back(std::make_pair(Address, Name));
+ }
+ }
+
+ // Sort the symbols by address, just in case they didn't come in that way.
+ array_pod_sort(Symbols.begin(), Symbols.end());
+
+ // Make a list of all the relocations for this section.
+ std::vector<RelocationRef> Rels;
+ if (InlineRelocs) {
+ for (relocation_iterator ri = i->begin_relocations(),
+ re = i->end_relocations();
+ ri != re; ri.increment(ec)) {
+ if (error(ec)) break;
+ Rels.push_back(*ri);
+ }
+ }
+
+ // Sort relocations by address.
+ std::sort(Rels.begin(), Rels.end(), RelocAddressLess);
+
+ StringRef name;
+ if (error(i->getName(name))) break;
+ outs() << "Disassembly of section " << name << ':';
+
+ // If the section has no symbols just insert a dummy one and disassemble
+ // the whole section.
+ if (Symbols.empty())
+ Symbols.push_back(std::make_pair(0, name));
+
+ // Set up disassembler.
+ OwningPtr<const MCAsmInfo> AsmInfo(TheTarget->createMCAsmInfo(TripleName));
+
+ if (!AsmInfo) {
+ errs() << "error: no assembly info for target " << TripleName << "\n";
+ return;
+ }
+
+ OwningPtr<const MCSubtargetInfo> STI(
+ TheTarget->createMCSubtargetInfo(TripleName, "", ""));
+
+ if (!STI) {
+ errs() << "error: no subtarget info for target " << TripleName << "\n";
+ return;
+ }
+
+ OwningPtr<const MCDisassembler> DisAsm(
+ TheTarget->createMCDisassembler(*STI));
+ if (!DisAsm) {
+ errs() << "error: no disassembler for target " << TripleName << "\n";
+ return;
+ }
+
+ OwningPtr<const MCRegisterInfo> MRI(TheTarget->createMCRegInfo(TripleName));
+ if (!MRI) {
+ errs() << "error: no register info for target " << TripleName << "\n";
+ return;
+ }
+
+ OwningPtr<const MCInstrInfo> MII(TheTarget->createMCInstrInfo());
+ if (!MII) {
+ errs() << "error: no instruction info for target " << TripleName << "\n";
+ return;
+ }
+
+ int AsmPrinterVariant = AsmInfo->getAssemblerDialect();
+ OwningPtr<MCInstPrinter> IP(TheTarget->createMCInstPrinter(
+ AsmPrinterVariant, *AsmInfo, *MII, *MRI, *STI));
+ if (!IP) {
+ errs() << "error: no instruction printer for target " << TripleName
+ << '\n';
+ return;
+ }
+
+ StringRef Bytes;
+ if (error(i->getContents(Bytes))) break;
+ StringRefMemoryObject memoryObject(Bytes);
+ uint64_t Size;
+ uint64_t Index;
+ uint64_t SectSize;
+ if (error(i->getSize(SectSize))) break;
+
+ std::vector<RelocationRef>::const_iterator rel_cur = Rels.begin();
+ std::vector<RelocationRef>::const_iterator rel_end = Rels.end();
+ // Disassemble symbol by symbol.
+ for (unsigned si = 0, se = Symbols.size(); si != se; ++si) {
+ uint64_t Start = Symbols[si].first;
+ uint64_t End;
+ // The end is either the size of the section or the beginning of the next
+ // symbol.
+ if (si == se - 1)
+ End = SectSize;
+ // Make sure this symbol takes up space.
+ else if (Symbols[si + 1].first != Start)
+ End = Symbols[si + 1].first - 1;
+ else
+ // This symbol has the same address as the next symbol. Skip it.
+ continue;
+
+ outs() << '\n' << Symbols[si].second << ":\n";
+
+#ifndef NDEBUG
+ raw_ostream &DebugOut = DebugFlag ? dbgs() : nulls();
+#else
+ raw_ostream &DebugOut = nulls();
+#endif
+
+ for (Index = Start; Index < End; Index += Size) {
+ MCInst Inst;
+
+ if (DisAsm->getInstruction(Inst, Size, memoryObject, Index,
+ DebugOut, nulls())) {
+ outs() << format("%8" PRIx64 ":\t", SectionAddr + Index);
+ DumpBytes(StringRef(Bytes.data() + Index, Size));
+ IP->printInst(&Inst, outs(), "");
+ outs() << "\n";
+ } else {
+ errs() << ToolName << ": warning: invalid instruction encoding\n";
+ if (Size == 0)
+ Size = 1; // skip illegible bytes
+ }
+
+ // Print relocation for instruction.
+ while (rel_cur != rel_end) {
+ bool hidden = false;
+ uint64_t addr;
+ SmallString<16> name;
+ SmallString<32> val;
+
+ // If this relocation is hidden, skip it.
+ if (error(rel_cur->getHidden(hidden))) goto skip_print_rel;
+ if (hidden) goto skip_print_rel;
+
+ if (error(rel_cur->getAddress(addr))) goto skip_print_rel;
+ // Stop when rel_cur's address is past the current instruction.
+ if (addr >= Index + Size) break;
+ if (error(rel_cur->getTypeName(name))) goto skip_print_rel;
+ if (error(rel_cur->getValueString(val))) goto skip_print_rel;
+
+ outs() << format("\t\t\t%8" PRIx64 ": ", SectionAddr + addr) << name
+ << "\t" << val << "\n";
+
+ skip_print_rel:
+ ++rel_cur;
+ }
+ }
+ }
+ }
+}
+
+static void PrintRelocations(const ObjectFile *o) {
+ error_code ec;
+ for (section_iterator si = o->begin_sections(), se = o->end_sections();
+ si != se; si.increment(ec)){
+ if (error(ec)) return;
+ if (si->begin_relocations() == si->end_relocations())
+ continue;
+ StringRef secname;
+ if (error(si->getName(secname))) continue;
+ outs() << "RELOCATION RECORDS FOR [" << secname << "]:\n";
+ for (relocation_iterator ri = si->begin_relocations(),
+ re = si->end_relocations();
+ ri != re; ri.increment(ec)) {
+ if (error(ec)) return;
+
+ bool hidden;
+ uint64_t address;
+ SmallString<32> relocname;
+ SmallString<32> valuestr;
+ if (error(ri->getHidden(hidden))) continue;
+ if (hidden) continue;
+ if (error(ri->getTypeName(relocname))) continue;
+ if (error(ri->getAddress(address))) continue;
+ if (error(ri->getValueString(valuestr))) continue;
+ outs() << address << " " << relocname << " " << valuestr << "\n";
+ }
+ outs() << "\n";
+ }
+}
+
+static void PrintSectionHeaders(const ObjectFile *o) {
+ outs() << "Sections:\n"
+ "Idx Name Size Address Type\n";
+ error_code ec;
+ unsigned i = 0;
+ for (section_iterator si = o->begin_sections(), se = o->end_sections();
+ si != se; si.increment(ec)) {
+ if (error(ec)) return;
+ StringRef Name;
+ if (error(si->getName(Name))) return;
+ uint64_t Address;
+ if (error(si->getAddress(Address))) return;
+ uint64_t Size;
+ if (error(si->getSize(Size))) return;
+ bool Text, Data, BSS;
+ if (error(si->isText(Text))) return;
+ if (error(si->isData(Data))) return;
+ if (error(si->isBSS(BSS))) return;
+ std::string Type = (std::string(Text ? "TEXT " : "") +
+ (Data ? "DATA " : "") + (BSS ? "BSS" : ""));
+ outs() << format("%3d %-13s %09" PRIx64 " %017" PRIx64 " %s\n",
+ i, Name.str().c_str(), Size, Address, Type.c_str());
+ ++i;
+ }
+}
+
+static void PrintSectionContents(const ObjectFile *o) {
+ error_code ec;
+ for (section_iterator si = o->begin_sections(),
+ se = o->end_sections();
+ si != se; si.increment(ec)) {
+ if (error(ec)) return;
+ StringRef Name;
+ StringRef Contents;
+ uint64_t BaseAddr;
+ if (error(si->getName(Name))) continue;
+ if (error(si->getContents(Contents))) continue;
+ if (error(si->getAddress(BaseAddr))) continue;
+
+ outs() << "Contents of section " << Name << ":\n";
+
+ // Dump out the content as hex and printable ascii characters.
+ for (std::size_t addr = 0, end = Contents.size(); addr < end; addr += 16) {
+ outs() << format(" %04" PRIx64 " ", BaseAddr + addr);
+ // Dump line of hex.
+ for (std::size_t i = 0; i < 16; ++i) {
+ if (i != 0 && i % 4 == 0)
+ outs() << ' ';
+ if (addr + i < end)
+ outs() << hexdigit((Contents[addr + i] >> 4) & 0xF, true)
+ << hexdigit(Contents[addr + i] & 0xF, true);
+ else
+ outs() << " ";
+ }
+ // Print ascii.
+ outs() << " ";
+ for (std::size_t i = 0; i < 16 && addr + i < end; ++i) {
+ if (std::isprint(Contents[addr + i] & 0xFF))
+ outs() << Contents[addr + i];
+ else
+ outs() << ".";
+ }
+ outs() << "\n";
+ }
+ }
+}
+
+static void PrintCOFFSymbolTable(const COFFObjectFile *coff) {
+ const coff_file_header *header;
+ if (error(coff->getHeader(header))) return;
+ int aux_count = 0;
+ const coff_symbol *symbol = 0;
+ for (int i = 0, e = header->NumberOfSymbols; i != e; ++i) {
+ if (aux_count--) {
+ // Figure out which type of aux this is.
+ if (symbol->StorageClass == COFF::IMAGE_SYM_CLASS_STATIC
+ && symbol->Value == 0) { // Section definition.
+ const coff_aux_section_definition *asd;
+ if (error(coff->getAuxSymbol<coff_aux_section_definition>(i, asd)))
+ return;
+ outs() << "AUX "
+ << format("scnlen 0x%x nreloc %d nlnno %d checksum 0x%x "
+ , unsigned(asd->Length)
+ , unsigned(asd->NumberOfRelocations)
+ , unsigned(asd->NumberOfLinenumbers)
+ , unsigned(asd->CheckSum))
+ << format("assoc %d comdat %d\n"
+ , unsigned(asd->Number)
+ , unsigned(asd->Selection));
+ } else {
+ outs() << "AUX Unknown\n";
+ }
+ } else {
+ StringRef name;
+ if (error(coff->getSymbol(i, symbol))) return;
+ if (error(coff->getSymbolName(symbol, name))) return;
+ outs() << "[" << format("%2d", i) << "]"
+ << "(sec " << format("%2d", int(symbol->SectionNumber)) << ")"
+ << "(fl 0x00)" // Flag bits, which COFF doesn't have.
+ << "(ty " << format("%3x", unsigned(symbol->Type)) << ")"
+ << "(scl " << format("%3x", unsigned(symbol->StorageClass)) << ") "
+ << "(nx " << unsigned(symbol->NumberOfAuxSymbols) << ") "
+ << "0x" << format("%08x", unsigned(symbol->Value)) << " "
+ << name << "\n";
+ aux_count = symbol->NumberOfAuxSymbols;
+ }
+ }
+}
+
+static void PrintSymbolTable(const ObjectFile *o) {
+ outs() << "SYMBOL TABLE:\n";
+
+ if (const COFFObjectFile *coff = dyn_cast<const COFFObjectFile>(o))
+ PrintCOFFSymbolTable(coff);
+ else {
+ error_code ec;
+ for (symbol_iterator si = o->begin_symbols(),
+ se = o->end_symbols(); si != se; si.increment(ec)) {
+ if (error(ec)) return;
+ StringRef Name;
+ uint64_t Address;
+ SymbolRef::Type Type;
+ uint64_t Size;
+ uint32_t Flags;
+ section_iterator Section = o->end_sections();
+ if (error(si->getName(Name))) continue;
+ if (error(si->getAddress(Address))) continue;
+ if (error(si->getFlags(Flags))) continue;
+ if (error(si->getType(Type))) continue;
+ if (error(si->getSize(Size))) continue;
+ if (error(si->getSection(Section))) continue;
+
+ bool Global = Flags & SymbolRef::SF_Global;
+ bool Weak = Flags & SymbolRef::SF_Weak;
+ bool Absolute = Flags & SymbolRef::SF_Absolute;
+
+ if (Address == UnknownAddressOrSize)
+ Address = 0;
+ if (Size == UnknownAddressOrSize)
+ Size = 0;
+ char GlobLoc = ' ';
+ if (Type != SymbolRef::ST_Unknown)
+ GlobLoc = Global ? 'g' : 'l';
+ char Debug = (Type == SymbolRef::ST_Debug || Type == SymbolRef::ST_File)
+ ? 'd' : ' ';
+ char FileFunc = ' ';
+ if (Type == SymbolRef::ST_File)
+ FileFunc = 'f';
+ else if (Type == SymbolRef::ST_Function)
+ FileFunc = 'F';
+
+ outs() << format("%08" PRIx64, Address) << " "
+ << GlobLoc // Local -> 'l', Global -> 'g', Neither -> ' '
+ << (Weak ? 'w' : ' ') // Weak?
+ << ' ' // Constructor. Not supported yet.
+ << ' ' // Warning. Not supported yet.
+ << ' ' // Indirect reference to another symbol.
+ << Debug // Debugging (d) or dynamic (D) symbol.
+ << FileFunc // Name of function (F), file (f) or object (O).
+ << ' ';
+ if (Absolute)
+ outs() << "*ABS*";
+ else if (Section == o->end_sections())
+ outs() << "*UND*";
+ else {
+ StringRef SectionName;
+ if (error(Section->getName(SectionName)))
+ SectionName = "";
+ outs() << SectionName;
+ }
+ outs() << '\t'
+ << format("%08" PRIx64 " ", Size)
+ << Name
+ << '\n';
+ }
+ }
+}
+
+static void DumpObject(const ObjectFile *o) {
+ outs() << '\n';
+ outs() << o->getFileName()
+ << ":\tfile format " << o->getFileFormatName() << "\n\n";
+
+ if (Disassemble)
+ DisassembleObject(o, Relocations);
+ if (Relocations && !Disassemble)
+ PrintRelocations(o);
+ if (SectionHeaders)
+ PrintSectionHeaders(o);
+ if (SectionContents)
+ PrintSectionContents(o);
+ if (SymbolTable)
+ PrintSymbolTable(o);
+}
+
+/// @brief Dump each object file in \a a;
+static void DumpArchive(const Archive *a) {
+ for (Archive::child_iterator i = a->begin_children(),
+ e = a->end_children(); i != e; ++i) {
+ OwningPtr<Binary> child;
+ if (error_code ec = i->getAsBinary(child)) {
+ // Ignore non-object files.
+ if (ec != object_error::invalid_file_type)
+ errs() << ToolName << ": '" << a->getFileName() << "': " << ec.message()
+ << ".\n";
+ continue;
+ }
+ if (ObjectFile *o = dyn_cast<ObjectFile>(child.get()))
+ DumpObject(o);
+ else
+ errs() << ToolName << ": '" << a->getFileName() << "': "
+ << "Unrecognized file type.\n";
+ }
+}
+
+/// @brief Open file and figure out how to dump it.
+static void DumpInput(StringRef file) {
+ // If file isn't stdin, check that it exists.
+ if (file != "-" && !sys::fs::exists(file)) {
+ errs() << ToolName << ": '" << file << "': " << "No such file\n";
+ return;
+ }
+
+ if (MachO && Disassemble) {
+ DisassembleInputMachO(file);
+ return;
+ }
+
+ // Attempt to open the binary.
+ OwningPtr<Binary> binary;
+ if (error_code ec = createBinary(file, binary)) {
+ errs() << ToolName << ": '" << file << "': " << ec.message() << ".\n";
+ return;
+ }
+
+ if (Archive *a = dyn_cast<Archive>(binary.get())) {
+ DumpArchive(a);
+ } else if (ObjectFile *o = dyn_cast<ObjectFile>(binary.get())) {
+ DumpObject(o);
+ } else {
+ errs() << ToolName << ": '" << file << "': " << "Unrecognized file type.\n";
+ }
+}
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ // Initialize targets and assembly printers/parsers.
+ llvm::InitializeAllTargetInfos();
+ llvm::InitializeAllTargetMCs();
+ llvm::InitializeAllAsmParsers();
+ llvm::InitializeAllDisassemblers();
+
+ cl::ParseCommandLineOptions(argc, argv, "llvm object file dumper\n");
+ TripleName = Triple::normalize(TripleName);
+
+ ToolName = argv[0];
+
+ // Defaults to a.out if no filenames specified.
+ if (InputFilenames.size() == 0)
+ InputFilenames.push_back("a.out");
+
+ if (!Disassemble
+ && !Relocations
+ && !SectionHeaders
+ && !SectionContents
+ && !SymbolTable) {
+ cl::PrintHelpMessage();
+ return 2;
+ }
+
+ std::for_each(InputFilenames.begin(), InputFilenames.end(),
+ DumpInput);
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-objdump/llvm-objdump.h b/contrib/llvm/tools/llvm-objdump/llvm-objdump.h
new file mode 100644
index 0000000..aa71b77
--- /dev/null
+++ b/contrib/llvm/tools/llvm-objdump/llvm-objdump.h
@@ -0,0 +1,46 @@
+//===-- llvm-objdump.h ----------------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_OBJDUMP_H
+#define LLVM_OBJDUMP_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/DataTypes.h"
+#include "llvm/Support/MemoryObject.h"
+
+namespace llvm {
+
+extern cl::opt<std::string> TripleName;
+extern cl::opt<std::string> ArchName;
+
+// Various helper functions.
+void DumpBytes(StringRef bytes);
+void DisassembleInputMachO(StringRef Filename);
+
+class StringRefMemoryObject : public MemoryObject {
+ virtual void anchor();
+ StringRef Bytes;
+public:
+ StringRefMemoryObject(StringRef bytes) : Bytes(bytes) {}
+
+ uint64_t getBase() const { return 0; }
+ uint64_t getExtent() const { return Bytes.size(); }
+
+ int readByte(uint64_t Addr, uint8_t *Byte) const {
+ if (Addr >= getExtent())
+ return -1;
+ *Byte = Bytes[Addr];
+ return 0;
+ }
+};
+
+}
+
+#endif
diff --git a/contrib/llvm/tools/llvm-prof/llvm-prof.cpp b/contrib/llvm/tools/llvm-prof/llvm-prof.cpp
new file mode 100644
index 0000000..d9b6713
--- /dev/null
+++ b/contrib/llvm/tools/llvm-prof/llvm-prof.cpp
@@ -0,0 +1,293 @@
+//===- llvm-prof.cpp - Read in and process llvmprof.out data files --------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tools is meant for use with the various LLVM profiling instrumentation
+// passes. It reads in the data file produced by executing an instrumented
+// program, and outputs a nice report.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/InstrTypes.h"
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Assembly/AssemblyAnnotationWriter.h"
+#include "llvm/Analysis/ProfileInfo.h"
+#include "llvm/Analysis/ProfileInfoLoader.h"
+#include "llvm/Analysis/Passes.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/system_error.h"
+#include <algorithm>
+#include <iomanip>
+#include <map>
+#include <set>
+
+using namespace llvm;
+
+namespace {
+ cl::opt<std::string>
+ BitcodeFile(cl::Positional, cl::desc("<program bitcode file>"),
+ cl::Required);
+
+ cl::opt<std::string>
+ ProfileDataFile(cl::Positional, cl::desc("<llvmprof.out file>"),
+ cl::Optional, cl::init("llvmprof.out"));
+
+ cl::opt<bool>
+ PrintAnnotatedLLVM("annotated-llvm",
+ cl::desc("Print LLVM code with frequency annotations"));
+ cl::alias PrintAnnotated2("A", cl::desc("Alias for --annotated-llvm"),
+ cl::aliasopt(PrintAnnotatedLLVM));
+ cl::opt<bool>
+ PrintAllCode("print-all-code",
+ cl::desc("Print annotated code for the entire program"));
+}
+
+// PairSecondSort - A sorting predicate to sort by the second element of a pair.
+template<class T>
+struct PairSecondSortReverse
+ : public std::binary_function<std::pair<T, double>,
+ std::pair<T, double>, bool> {
+ bool operator()(const std::pair<T, double> &LHS,
+ const std::pair<T, double> &RHS) const {
+ return LHS.second > RHS.second;
+ }
+};
+
+static double ignoreMissing(double w) {
+ if (w == ProfileInfo::MissingValue) return 0;
+ return w;
+}
+
+namespace {
+ class ProfileAnnotator : public AssemblyAnnotationWriter {
+ ProfileInfo &PI;
+ public:
+ ProfileAnnotator(ProfileInfo &pi) : PI(pi) {}
+
+ virtual void emitFunctionAnnot(const Function *F,
+ formatted_raw_ostream &OS) {
+ double w = PI.getExecutionCount(F);
+ if (w != ProfileInfo::MissingValue) {
+ OS << ";;; %" << F->getName() << " called "<<(unsigned)w
+ <<" times.\n;;;\n";
+ }
+ }
+ virtual void emitBasicBlockStartAnnot(const BasicBlock *BB,
+ formatted_raw_ostream &OS) {
+ double w = PI.getExecutionCount(BB);
+ if (w != ProfileInfo::MissingValue) {
+ if (w != 0) {
+ OS << "\t;;; Basic block executed " << (unsigned)w << " times.\n";
+ } else {
+ OS << "\t;;; Never executed!\n";
+ }
+ }
+ }
+
+ virtual void emitBasicBlockEndAnnot(const BasicBlock *BB,
+ formatted_raw_ostream &OS) {
+ // Figure out how many times each successor executed.
+ std::vector<std::pair<ProfileInfo::Edge, double> > SuccCounts;
+
+ const TerminatorInst *TI = BB->getTerminator();
+ for (unsigned s = 0, e = TI->getNumSuccessors(); s != e; ++s) {
+ BasicBlock* Succ = TI->getSuccessor(s);
+ double w = ignoreMissing(PI.getEdgeWeight(std::make_pair(BB, Succ)));
+ if (w != 0)
+ SuccCounts.push_back(std::make_pair(std::make_pair(BB, Succ), w));
+ }
+ if (!SuccCounts.empty()) {
+ OS << "\t;;; Out-edge counts:";
+ for (unsigned i = 0, e = SuccCounts.size(); i != e; ++i)
+ OS << " [" << (SuccCounts[i]).second << " -> "
+ << (SuccCounts[i]).first.second->getName() << "]";
+ OS << "\n";
+ }
+ }
+ };
+}
+
+namespace {
+ /// ProfileInfoPrinterPass - Helper pass to dump the profile information for
+ /// a module.
+ //
+ // FIXME: This should move elsewhere.
+ class ProfileInfoPrinterPass : public ModulePass {
+ ProfileInfoLoader &PIL;
+ public:
+ static char ID; // Class identification, replacement for typeinfo.
+ explicit ProfileInfoPrinterPass(ProfileInfoLoader &_PIL)
+ : ModulePass(ID), PIL(_PIL) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<ProfileInfo>();
+ }
+
+ bool runOnModule(Module &M);
+ };
+}
+
+char ProfileInfoPrinterPass::ID = 0;
+
+bool ProfileInfoPrinterPass::runOnModule(Module &M) {
+ ProfileInfo &PI = getAnalysis<ProfileInfo>();
+ std::map<const Function *, unsigned> FuncFreqs;
+ std::map<const BasicBlock*, unsigned> BlockFreqs;
+ std::map<ProfileInfo::Edge, unsigned> EdgeFreqs;
+
+ // Output a report. Eventually, there will be multiple reports selectable on
+ // the command line, for now, just keep things simple.
+
+ // Emit the most frequent function table...
+ std::vector<std::pair<Function*, double> > FunctionCounts;
+ std::vector<std::pair<BasicBlock*, double> > Counts;
+ for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI) {
+ if (FI->isDeclaration()) continue;
+ double w = ignoreMissing(PI.getExecutionCount(FI));
+ FunctionCounts.push_back(std::make_pair(FI, w));
+ for (Function::iterator BB = FI->begin(), BBE = FI->end();
+ BB != BBE; ++BB) {
+ double w = ignoreMissing(PI.getExecutionCount(BB));
+ Counts.push_back(std::make_pair(BB, w));
+ }
+ }
+
+ // Sort by the frequency, backwards.
+ sort(FunctionCounts.begin(), FunctionCounts.end(),
+ PairSecondSortReverse<Function*>());
+
+ double TotalExecutions = 0;
+ for (unsigned i = 0, e = FunctionCounts.size(); i != e; ++i)
+ TotalExecutions += FunctionCounts[i].second;
+
+ outs() << "===" << std::string(73, '-') << "===\n"
+ << "LLVM profiling output for execution";
+ if (PIL.getNumExecutions() != 1) outs() << "s";
+ outs() << ":\n";
+
+ for (unsigned i = 0, e = PIL.getNumExecutions(); i != e; ++i) {
+ outs() << " ";
+ if (e != 1) outs() << i+1 << ". ";
+ outs() << PIL.getExecution(i) << "\n";
+ }
+
+ outs() << "\n===" << std::string(73, '-') << "===\n";
+ outs() << "Function execution frequencies:\n\n";
+
+ // Print out the function frequencies...
+ outs() << " ## Frequency\n";
+ for (unsigned i = 0, e = FunctionCounts.size(); i != e; ++i) {
+ if (FunctionCounts[i].second == 0) {
+ outs() << "\n NOTE: " << e-i << " function"
+ << (e-i-1 ? "s were" : " was") << " never executed!\n";
+ break;
+ }
+
+ outs() << format("%3d", i+1) << ". "
+ << format("%5.2g", FunctionCounts[i].second) << "/"
+ << format("%g", TotalExecutions) << " "
+ << FunctionCounts[i].first->getName() << "\n";
+ }
+
+ std::set<Function*> FunctionsToPrint;
+
+ TotalExecutions = 0;
+ for (unsigned i = 0, e = Counts.size(); i != e; ++i)
+ TotalExecutions += Counts[i].second;
+
+ // Sort by the frequency, backwards.
+ sort(Counts.begin(), Counts.end(),
+ PairSecondSortReverse<BasicBlock*>());
+
+ outs() << "\n===" << std::string(73, '-') << "===\n";
+ outs() << "Top 20 most frequently executed basic blocks:\n\n";
+
+ // Print out the function frequencies...
+ outs() <<" ## %% \tFrequency\n";
+ unsigned BlocksToPrint = Counts.size();
+ if (BlocksToPrint > 20) BlocksToPrint = 20;
+ for (unsigned i = 0; i != BlocksToPrint; ++i) {
+ if (Counts[i].second == 0) break;
+ Function *F = Counts[i].first->getParent();
+ outs() << format("%3d", i+1) << ". "
+ << format("%5g", Counts[i].second/(double)TotalExecutions*100)<<"% "
+ << format("%5.0f", Counts[i].second) << "/"
+ << format("%g", TotalExecutions) << "\t"
+ << F->getName() << "() - "
+ << Counts[i].first->getName() << "\n";
+ FunctionsToPrint.insert(F);
+ }
+
+ if (PrintAnnotatedLLVM || PrintAllCode) {
+ outs() << "\n===" << std::string(73, '-') << "===\n";
+ outs() << "Annotated LLVM code for the module:\n\n";
+
+ ProfileAnnotator PA(PI);
+
+ if (FunctionsToPrint.empty() || PrintAllCode)
+ M.print(outs(), &PA);
+ else
+ // Print just a subset of the functions.
+ for (std::set<Function*>::iterator I = FunctionsToPrint.begin(),
+ E = FunctionsToPrint.end(); I != E; ++I)
+ (*I)->print(outs(), &PA);
+ }
+
+ return false;
+}
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ cl::ParseCommandLineOptions(argc, argv, "llvm profile dump decoder\n");
+
+ // Read in the bitcode file...
+ std::string ErrorMessage;
+ OwningPtr<MemoryBuffer> Buffer;
+ error_code ec;
+ Module *M = 0;
+ if (!(ec = MemoryBuffer::getFileOrSTDIN(BitcodeFile, Buffer))) {
+ M = ParseBitcodeFile(Buffer.get(), Context, &ErrorMessage);
+ } else
+ ErrorMessage = ec.message();
+ if (M == 0) {
+ errs() << argv[0] << ": " << BitcodeFile << ": "
+ << ErrorMessage << "\n";
+ return 1;
+ }
+
+ // Read the profiling information. This is redundant since we load it again
+ // using the standard profile info provider pass, but for now this gives us
+ // access to additional information not exposed via the ProfileInfo
+ // interface.
+ ProfileInfoLoader PIL(argv[0], ProfileDataFile, *M);
+
+ // Run the printer pass.
+ PassManager PassMgr;
+ PassMgr.add(createProfileLoaderPass(ProfileDataFile));
+ PassMgr.add(new ProfileInfoPrinterPass(PIL));
+ PassMgr.run(*M);
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp b/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp
new file mode 100644
index 0000000..64f795f
--- /dev/null
+++ b/contrib/llvm/tools/llvm-ranlib/llvm-ranlib.cpp
@@ -0,0 +1,101 @@
+//===-- llvm-ranlib.cpp - LLVM archive index generator --------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Adds or updates an index (symbol table) for an LLVM archive file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/Bitcode/Archive.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/Signals.h"
+#include <memory>
+using namespace llvm;
+
+// llvm-ar operation code and modifier flags
+static cl::opt<std::string>
+ArchiveName(cl::Positional, cl::Optional, cl::desc("<archive-file>"));
+
+static cl::opt<bool>
+Verbose("verbose",cl::Optional,cl::init(false),
+ cl::desc("Print the symbol table"));
+
+// printSymbolTable - print out the archive's symbol table.
+void printSymbolTable(Archive* TheArchive) {
+ outs() << "\nArchive Symbol Table:\n";
+ const Archive::SymTabType& symtab = TheArchive->getSymbolTable();
+ for (Archive::SymTabType::const_iterator I=symtab.begin(), E=symtab.end();
+ I != E; ++I ) {
+ unsigned offset = TheArchive->getFirstFileOffset() + I->second;
+ outs() << " " << format("%9u", offset) << "\t" << I->first <<"\n";
+ }
+}
+
+int main(int argc, char **argv) {
+ // Print a stack trace if we signal out.
+ llvm::sys::PrintStackTraceOnErrorSignal();
+ llvm::PrettyStackTraceProgram X(argc, argv);
+
+ LLVMContext &Context = getGlobalContext();
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ // Have the command line options parsed and handle things
+ // like --help and --version.
+ cl::ParseCommandLineOptions(argc, argv,
+ "LLVM Archive Index Generator (llvm-ranlib)\n\n"
+ " This program adds or updates an index of bitcode symbols\n"
+ " to an LLVM archive file."
+ );
+
+ int exitCode = 0;
+
+ // Make sure we don't exit with "unhandled exception".
+ try {
+
+ // Check the path name of the archive
+ sys::Path ArchivePath;
+ if (!ArchivePath.set(ArchiveName))
+ throw std::string("Archive name invalid: ") + ArchiveName;
+
+ // Make sure it exists, we don't create empty archives
+ bool Exists;
+ if (llvm::sys::fs::exists(ArchivePath.str(), Exists) || !Exists)
+ throw std::string("Archive file does not exist");
+
+ std::string err_msg;
+ std::auto_ptr<Archive>
+ AutoArchive(Archive::OpenAndLoad(ArchivePath, Context, &err_msg));
+ Archive* TheArchive = AutoArchive.get();
+ if (!TheArchive)
+ throw err_msg;
+
+ if (TheArchive->writeToDisk(true, false, false, &err_msg ))
+ throw err_msg;
+
+ if (Verbose)
+ printSymbolTable(TheArchive);
+
+ } catch (const char* msg) {
+ errs() << argv[0] << ": " << msg << "\n\n";
+ exitCode = 1;
+ } catch (const std::string& msg) {
+ errs() << argv[0] << ": " << msg << "\n";
+ exitCode = 2;
+ } catch (...) {
+ errs() << argv[0] << ": An unexpected unknown exception occurred.\n";
+ exitCode = 3;
+ }
+ return exitCode;
+}
diff --git a/contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp b/contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp
new file mode 100644
index 0000000..3be1289
--- /dev/null
+++ b/contrib/llvm/tools/llvm-readobj/llvm-readobj.cpp
@@ -0,0 +1,218 @@
+//===- llvm-readobj.cpp - Dump contents of an Object File -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This program is a utility that works like traditional Unix "readelf",
+// except that it can handle any type of object file recognized by lib/Object.
+//
+// It makes use of the generic ObjectFile interface.
+//
+// Caution: This utility is new, experimental, unsupported, and incomplete.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Object/ObjectFile.h"
+#include "llvm/Object/ELF.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/FormattedStream.h"
+
+using namespace llvm;
+using namespace llvm::object;
+
+static cl::opt<std::string>
+InputFilename(cl::Positional, cl::desc("<input object>"), cl::init(""));
+
+void DumpSymbolHeader() {
+ outs() << format(" %-32s", (const char*)"Name")
+ << format(" %-4s", (const char*)"Type")
+ << format(" %-16s", (const char*)"Address")
+ << format(" %-16s", (const char*)"Size")
+ << format(" %-16s", (const char*)"FileOffset")
+ << format(" %-26s", (const char*)"Flags")
+ << "\n";
+}
+
+const char *GetTypeStr(SymbolRef::Type Type) {
+ switch (Type) {
+ case SymbolRef::ST_Unknown: return "?";
+ case SymbolRef::ST_Data: return "DATA";
+ case SymbolRef::ST_Debug: return "DBG";
+ case SymbolRef::ST_File: return "FILE";
+ case SymbolRef::ST_Function: return "FUNC";
+ case SymbolRef::ST_Other: return "-";
+ }
+ return "INV";
+}
+
+std::string GetFlagStr(uint32_t Flags) {
+ std::string result;
+ if (Flags & SymbolRef::SF_Undefined)
+ result += "undef,";
+ if (Flags & SymbolRef::SF_Global)
+ result += "global,";
+ if (Flags & SymbolRef::SF_Weak)
+ result += "weak,";
+ if (Flags & SymbolRef::SF_Absolute)
+ result += "absolute,";
+ if (Flags & SymbolRef::SF_ThreadLocal)
+ result += "threadlocal,";
+ if (Flags & SymbolRef::SF_Common)
+ result += "common,";
+ if (Flags & SymbolRef::SF_FormatSpecific)
+ result += "formatspecific,";
+
+ // Remove trailing comma
+ if (result.size() > 0) {
+ result.erase(result.size() - 1);
+ }
+ return result;
+}
+
+void DumpSymbol(const SymbolRef &Sym, const ObjectFile *obj, bool IsDynamic) {
+ StringRef Name;
+ SymbolRef::Type Type;
+ uint32_t Flags;
+ uint64_t Address;
+ uint64_t Size;
+ uint64_t FileOffset;
+ Sym.getName(Name);
+ Sym.getAddress(Address);
+ Sym.getSize(Size);
+ Sym.getFileOffset(FileOffset);
+ Sym.getType(Type);
+ Sym.getFlags(Flags);
+ std::string FullName = Name;
+
+ // If this is a dynamic symbol from an ELF object, append
+ // the symbol's version to the name.
+ if (IsDynamic && obj->isELF()) {
+ StringRef Version;
+ bool IsDefault;
+ GetELFSymbolVersion(obj, Sym, Version, IsDefault);
+ if (!Version.empty()) {
+ FullName += (IsDefault ? "@@" : "@");
+ FullName += Version;
+ }
+ }
+
+ // format() can't handle StringRefs
+ outs() << format(" %-32s", FullName.c_str())
+ << format(" %-4s", GetTypeStr(Type))
+ << format(" %16" PRIx64, Address)
+ << format(" %16" PRIx64, Size)
+ << format(" %16" PRIx64, FileOffset)
+ << " " << GetFlagStr(Flags)
+ << "\n";
+}
+
+
+// Iterate through the normal symbols in the ObjectFile
+void DumpSymbols(const ObjectFile *obj) {
+ error_code ec;
+ uint32_t count = 0;
+ outs() << "Symbols:\n";
+ symbol_iterator it = obj->begin_symbols();
+ symbol_iterator ie = obj->end_symbols();
+ while (it != ie) {
+ DumpSymbol(*it, obj, false);
+ it.increment(ec);
+ if (ec)
+ report_fatal_error("Symbol iteration failed");
+ ++count;
+ }
+ outs() << " Total: " << count << "\n\n";
+}
+
+// Iterate through the dynamic symbols in the ObjectFile.
+void DumpDynamicSymbols(const ObjectFile *obj) {
+ error_code ec;
+ uint32_t count = 0;
+ outs() << "Dynamic Symbols:\n";
+ symbol_iterator it = obj->begin_dynamic_symbols();
+ symbol_iterator ie = obj->end_dynamic_symbols();
+ while (it != ie) {
+ DumpSymbol(*it, obj, true);
+ it.increment(ec);
+ if (ec)
+ report_fatal_error("Symbol iteration failed");
+ ++count;
+ }
+ outs() << " Total: " << count << "\n\n";
+}
+
+void DumpLibrary(const LibraryRef &lib) {
+ StringRef path;
+ lib.getPath(path);
+ outs() << " " << path << "\n";
+}
+
+// Iterate through needed libraries
+void DumpLibrariesNeeded(const ObjectFile *obj) {
+ error_code ec;
+ uint32_t count = 0;
+ library_iterator it = obj->begin_libraries_needed();
+ library_iterator ie = obj->end_libraries_needed();
+ outs() << "Libraries needed:\n";
+ while (it != ie) {
+ DumpLibrary(*it);
+ it.increment(ec);
+ if (ec)
+ report_fatal_error("Needed libraries iteration failed");
+ ++count;
+ }
+ outs() << " Total: " << count << "\n\n";
+}
+
+void DumpHeaders(const ObjectFile *obj) {
+ outs() << "File Format : " << obj->getFileFormatName() << "\n";
+ outs() << "Arch : "
+ << Triple::getArchTypeName((llvm::Triple::ArchType)obj->getArch())
+ << "\n";
+ outs() << "Address Size: " << (8*obj->getBytesInAddress()) << " bits\n";
+ outs() << "Load Name : " << obj->getLoadName() << "\n";
+ outs() << "\n";
+}
+
+int main(int argc, char** argv) {
+ error_code ec;
+ sys::PrintStackTraceOnErrorSignal();
+ PrettyStackTraceProgram X(argc, argv);
+
+ cl::ParseCommandLineOptions(argc, argv,
+ "LLVM Object Reader\n");
+
+ if (InputFilename.empty()) {
+ errs() << "Please specify an input filename\n";
+ return 1;
+ }
+
+ // Open the object file
+ OwningPtr<MemoryBuffer> File;
+ if (MemoryBuffer::getFile(InputFilename, File)) {
+ errs() << InputFilename << ": Open failed\n";
+ return 1;
+ }
+
+ ObjectFile *obj = ObjectFile::createObjectFile(File.take());
+ if (!obj) {
+ errs() << InputFilename << ": Object type not recognized\n";
+ }
+
+ DumpHeaders(obj);
+ DumpSymbols(obj);
+ DumpDynamicSymbols(obj);
+ DumpLibrariesNeeded(obj);
+ return 0;
+}
+
diff --git a/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
new file mode 100644
index 0000000..01a7d15
--- /dev/null
+++ b/contrib/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
@@ -0,0 +1,156 @@
+//===-- llvm-rtdyld.cpp - MCJIT Testing Tool ------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a testing tool for use with the MC-JIT LLVM components.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/OwningPtr.h"
+#include "llvm/ExecutionEngine/RuntimeDyld.h"
+#include "llvm/Object/MachOObject.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/Memory.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+using namespace llvm;
+using namespace llvm::object;
+
+static cl::list<std::string>
+InputFileList(cl::Positional, cl::ZeroOrMore,
+ cl::desc("<input file>"));
+
+enum ActionType {
+ AC_Execute
+};
+
+static cl::opt<ActionType>
+Action(cl::desc("Action to perform:"),
+ cl::init(AC_Execute),
+ cl::values(clEnumValN(AC_Execute, "execute",
+ "Load, link, and execute the inputs."),
+ clEnumValEnd));
+
+static cl::opt<std::string>
+EntryPoint("entry",
+ cl::desc("Function to call as entry point."),
+ cl::init("_main"));
+
+/* *** */
+
+// A trivial memory manager that doesn't do anything fancy, just uses the
+// support library allocation routines directly.
+class TrivialMemoryManager : public RTDyldMemoryManager {
+public:
+ SmallVector<sys::MemoryBlock, 16> FunctionMemory;
+ SmallVector<sys::MemoryBlock, 16> DataMemory;
+
+ uint8_t *allocateCodeSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID);
+ uint8_t *allocateDataSection(uintptr_t Size, unsigned Alignment,
+ unsigned SectionID);
+
+ virtual void *getPointerToNamedFunction(const std::string &Name,
+ bool AbortOnFailure = true) {
+ return 0;
+ }
+
+};
+
+uint8_t *TrivialMemoryManager::allocateCodeSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID) {
+ return (uint8_t*)sys::Memory::AllocateRWX(Size, 0, 0).base();
+}
+
+uint8_t *TrivialMemoryManager::allocateDataSection(uintptr_t Size,
+ unsigned Alignment,
+ unsigned SectionID) {
+ return (uint8_t*)sys::Memory::AllocateRWX(Size, 0, 0).base();
+}
+
+static const char *ProgramName;
+
+static void Message(const char *Type, const Twine &Msg) {
+ errs() << ProgramName << ": " << Type << ": " << Msg << "\n";
+}
+
+static int Error(const Twine &Msg) {
+ Message("error", Msg);
+ return 1;
+}
+
+/* *** */
+
+static int executeInput() {
+ // Instantiate a dynamic linker.
+ TrivialMemoryManager *MemMgr = new TrivialMemoryManager;
+ RuntimeDyld Dyld(MemMgr);
+
+ // If we don't have any input files, read from stdin.
+ if (!InputFileList.size())
+ InputFileList.push_back("-");
+ for(unsigned i = 0, e = InputFileList.size(); i != e; ++i) {
+ // Load the input memory buffer.
+ OwningPtr<MemoryBuffer> InputBuffer;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFileList[i],
+ InputBuffer))
+ return Error("unable to read input: '" + ec.message() + "'");
+
+ // Load the object file into it.
+ if (Dyld.loadObject(InputBuffer.take())) {
+ return Error(Dyld.getErrorString());
+ }
+ }
+
+ // Resolve all the relocations we can.
+ Dyld.resolveRelocations();
+
+ // FIXME: Error out if there are unresolved relocations.
+
+ // Get the address of the entry point (_main by default).
+ void *MainAddress = Dyld.getSymbolAddress(EntryPoint);
+ if (MainAddress == 0)
+ return Error("no definition for '" + EntryPoint + "'");
+
+ // Invalidate the instruction cache for each loaded function.
+ for (unsigned i = 0, e = MemMgr->FunctionMemory.size(); i != e; ++i) {
+ sys::MemoryBlock &Data = MemMgr->FunctionMemory[i];
+ // Make sure the memory is executable.
+ std::string ErrorStr;
+ sys::Memory::InvalidateInstructionCache(Data.base(), Data.size());
+ if (!sys::Memory::setExecutable(Data, &ErrorStr))
+ return Error("unable to mark function executable: '" + ErrorStr + "'");
+ }
+
+ // Dispatch to _main().
+ errs() << "loaded '" << EntryPoint << "' at: " << (void*)MainAddress << "\n";
+
+ int (*Main)(int, const char**) =
+ (int(*)(int,const char**)) uintptr_t(MainAddress);
+ const char **Argv = new const char*[2];
+ // Use the name of the first input object module as argv[0] for the target.
+ Argv[0] = InputFileList[0].c_str();
+ Argv[1] = 0;
+ return Main(1, Argv);
+}
+
+int main(int argc, char **argv) {
+ ProgramName = argv[0];
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ cl::ParseCommandLineOptions(argc, argv, "llvm MC-JIT tool\n");
+
+ switch (Action) {
+ case AC_Execute:
+ return executeInput();
+ }
+}
diff --git a/contrib/llvm/tools/llvm-stress/llvm-stress.cpp b/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
new file mode 100644
index 0000000..d284ea5
--- /dev/null
+++ b/contrib/llvm/tools/llvm-stress/llvm-stress.cpp
@@ -0,0 +1,702 @@
+//===-- llvm-stress.cpp - Generate random LL files to stress-test LLVM ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This program is a utility that generates random .ll files to stress-test
+// different components in LLVM.
+//
+//===----------------------------------------------------------------------===//
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Constants.h"
+#include "llvm/Instruction.h"
+#include "llvm/CallGraphSCCPass.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Support/PassNameParser.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PluginLoader.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include <memory>
+#include <sstream>
+#include <set>
+#include <vector>
+#include <algorithm>
+using namespace llvm;
+
+static cl::opt<unsigned> SeedCL("seed",
+ cl::desc("Seed used for randomness"), cl::init(0));
+static cl::opt<unsigned> SizeCL("size",
+ cl::desc("The estimated size of the generated function (# of instrs)"),
+ cl::init(100));
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Override output filename"),
+ cl::value_desc("filename"));
+
+static cl::opt<bool> GenHalfFloat("generate-half-float",
+ cl::desc("Generate half-length floating-point values"), cl::init(false));
+static cl::opt<bool> GenX86FP80("generate-x86-fp80",
+ cl::desc("Generate 80-bit X86 floating-point values"), cl::init(false));
+static cl::opt<bool> GenFP128("generate-fp128",
+ cl::desc("Generate 128-bit floating-point values"), cl::init(false));
+static cl::opt<bool> GenPPCFP128("generate-ppc-fp128",
+ cl::desc("Generate 128-bit PPC floating-point values"), cl::init(false));
+static cl::opt<bool> GenX86MMX("generate-x86-mmx",
+ cl::desc("Generate X86 MMX floating-point values"), cl::init(false));
+
+/// A utility class to provide a pseudo-random number generator which is
+/// the same across all platforms. This is somewhat close to the libc
+/// implementation. Note: This is not a cryptographically secure pseudorandom
+/// number generator.
+class Random {
+public:
+ /// C'tor
+ Random(unsigned _seed):Seed(_seed) {}
+
+ /// Return a random integer, up to a
+ /// maximum of 2**19 - 1.
+ uint32_t Rand() {
+ uint32_t Val = Seed + 0x000b07a1;
+ Seed = (Val * 0x3c7c0ac1);
+ // Only lowest 19 bits are random-ish.
+ return Seed & 0x7ffff;
+ }
+
+ /// Return a random 32 bit integer.
+ uint32_t Rand32() {
+ uint32_t Val = Rand();
+ Val &= 0xffff;
+ return Val | (Rand() << 16);
+ }
+
+ /// Return a random 64 bit integer.
+ uint64_t Rand64() {
+ uint64_t Val = Rand32();
+ return Val | (uint64_t(Rand32()) << 32);
+ }
+private:
+ unsigned Seed;
+};
+
+/// Generate an empty function with a default argument list.
+Function *GenEmptyFunction(Module *M) {
+ // Type Definitions
+ std::vector<Type*> ArgsTy;
+ // Define a few arguments
+ LLVMContext &Context = M->getContext();
+ ArgsTy.push_back(PointerType::get(IntegerType::getInt8Ty(Context), 0));
+ ArgsTy.push_back(PointerType::get(IntegerType::getInt32Ty(Context), 0));
+ ArgsTy.push_back(PointerType::get(IntegerType::getInt64Ty(Context), 0));
+ ArgsTy.push_back(IntegerType::getInt32Ty(Context));
+ ArgsTy.push_back(IntegerType::getInt64Ty(Context));
+ ArgsTy.push_back(IntegerType::getInt8Ty(Context));
+
+ FunctionType *FuncTy = FunctionType::get(Type::getVoidTy(Context), ArgsTy, 0);
+ // Pick a unique name to describe the input parameters
+ std::stringstream ss;
+ ss<<"autogen_SD"<<SeedCL;
+ Function *Func = Function::Create(FuncTy, GlobalValue::ExternalLinkage,
+ ss.str(), M);
+
+ Func->setCallingConv(CallingConv::C);
+ return Func;
+}
+
+/// A base class, implementing utilities needed for
+/// modifying and adding new random instructions.
+struct Modifier {
+ /// Used to store the randomly generated values.
+ typedef std::vector<Value*> PieceTable;
+
+public:
+ /// C'tor
+ Modifier(BasicBlock *Block, PieceTable *PT, Random *R):
+ BB(Block),PT(PT),Ran(R),Context(BB->getContext()) {}
+ /// Add a new instruction.
+ virtual void Act() = 0;
+ /// Add N new instructions,
+ virtual void ActN(unsigned n) {
+ for (unsigned i=0; i<n; ++i)
+ Act();
+ }
+
+protected:
+ /// Return a random value from the list of known values.
+ Value *getRandomVal() {
+ assert(PT->size());
+ return PT->at(Ran->Rand() % PT->size());
+ }
+
+ Constant *getRandomConstant(Type *Tp) {
+ if (Tp->isIntegerTy()) {
+ if (Ran->Rand() & 1)
+ return ConstantInt::getAllOnesValue(Tp);
+ return ConstantInt::getNullValue(Tp);
+ } else if (Tp->isFloatingPointTy()) {
+ if (Ran->Rand() & 1)
+ return ConstantFP::getAllOnesValue(Tp);
+ return ConstantFP::getNullValue(Tp);
+ }
+ return UndefValue::get(Tp);
+ }
+
+ /// Return a random value with a known type.
+ Value *getRandomValue(Type *Tp) {
+ unsigned index = Ran->Rand();
+ for (unsigned i=0; i<PT->size(); ++i) {
+ Value *V = PT->at((index + i) % PT->size());
+ if (V->getType() == Tp)
+ return V;
+ }
+
+ // If the requested type was not found, generate a constant value.
+ if (Tp->isIntegerTy()) {
+ if (Ran->Rand() & 1)
+ return ConstantInt::getAllOnesValue(Tp);
+ return ConstantInt::getNullValue(Tp);
+ } else if (Tp->isFloatingPointTy()) {
+ if (Ran->Rand() & 1)
+ return ConstantFP::getAllOnesValue(Tp);
+ return ConstantFP::getNullValue(Tp);
+ } else if (Tp->isVectorTy()) {
+ VectorType *VTp = cast<VectorType>(Tp);
+
+ std::vector<Constant*> TempValues;
+ TempValues.reserve(VTp->getNumElements());
+ for (unsigned i = 0; i < VTp->getNumElements(); ++i)
+ TempValues.push_back(getRandomConstant(VTp->getScalarType()));
+
+ ArrayRef<Constant*> VectorValue(TempValues);
+ return ConstantVector::get(VectorValue);
+ }
+
+ return UndefValue::get(Tp);
+ }
+
+ /// Return a random value of any pointer type.
+ Value *getRandomPointerValue() {
+ unsigned index = Ran->Rand();
+ for (unsigned i=0; i<PT->size(); ++i) {
+ Value *V = PT->at((index + i) % PT->size());
+ if (V->getType()->isPointerTy())
+ return V;
+ }
+ return UndefValue::get(pickPointerType());
+ }
+
+ /// Return a random value of any vector type.
+ Value *getRandomVectorValue() {
+ unsigned index = Ran->Rand();
+ for (unsigned i=0; i<PT->size(); ++i) {
+ Value *V = PT->at((index + i) % PT->size());
+ if (V->getType()->isVectorTy())
+ return V;
+ }
+ return UndefValue::get(pickVectorType());
+ }
+
+ /// Pick a random type.
+ Type *pickType() {
+ return (Ran->Rand() & 1 ? pickVectorType() : pickScalarType());
+ }
+
+ /// Pick a random pointer type.
+ Type *pickPointerType() {
+ Type *Ty = pickType();
+ return PointerType::get(Ty, 0);
+ }
+
+ /// Pick a random vector type.
+ Type *pickVectorType(unsigned len = (unsigned)-1) {
+ // Pick a random vector width in the range 2**0 to 2**4.
+ // by adding two randoms we are generating a normal-like distribution
+ // around 2**3.
+ unsigned width = 1<<((Ran->Rand() % 3) + (Ran->Rand() % 3));
+ Type *Ty;
+
+ // Vectors of x86mmx are illegal; keep trying till we get something else.
+ do {
+ Ty = pickScalarType();
+ } while (Ty->isX86_MMXTy());
+
+ if (len != (unsigned)-1)
+ width = len;
+ return VectorType::get(Ty, width);
+ }
+
+ /// Pick a random scalar type.
+ Type *pickScalarType() {
+ Type *t = 0;
+ do {
+ switch (Ran->Rand() % 30) {
+ case 0: t = Type::getInt1Ty(Context); break;
+ case 1: t = Type::getInt8Ty(Context); break;
+ case 2: t = Type::getInt16Ty(Context); break;
+ case 3: case 4:
+ case 5: t = Type::getFloatTy(Context); break;
+ case 6: case 7:
+ case 8: t = Type::getDoubleTy(Context); break;
+ case 9: case 10:
+ case 11: t = Type::getInt32Ty(Context); break;
+ case 12: case 13:
+ case 14: t = Type::getInt64Ty(Context); break;
+ case 15: case 16:
+ case 17: if (GenHalfFloat) t = Type::getHalfTy(Context); break;
+ case 18: case 19:
+ case 20: if (GenX86FP80) t = Type::getX86_FP80Ty(Context); break;
+ case 21: case 22:
+ case 23: if (GenFP128) t = Type::getFP128Ty(Context); break;
+ case 24: case 25:
+ case 26: if (GenPPCFP128) t = Type::getPPC_FP128Ty(Context); break;
+ case 27: case 28:
+ case 29: if (GenX86MMX) t = Type::getX86_MMXTy(Context); break;
+ default: llvm_unreachable("Invalid scalar value");
+ }
+ } while (t == 0);
+
+ return t;
+ }
+
+ /// Basic block to populate
+ BasicBlock *BB;
+ /// Value table
+ PieceTable *PT;
+ /// Random number generator
+ Random *Ran;
+ /// Context
+ LLVMContext &Context;
+};
+
+struct LoadModifier: public Modifier {
+ LoadModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+ // Try to use predefined pointers. If non exist, use undef pointer value;
+ Value *Ptr = getRandomPointerValue();
+ Value *V = new LoadInst(Ptr, "L", BB->getTerminator());
+ PT->push_back(V);
+ }
+};
+
+struct StoreModifier: public Modifier {
+ StoreModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+ // Try to use predefined pointers. If non exist, use undef pointer value;
+ Value *Ptr = getRandomPointerValue();
+ Type *Tp = Ptr->getType();
+ Value *Val = getRandomValue(Tp->getContainedType(0));
+ Type *ValTy = Val->getType();
+
+ // Do not store vectors of i1s because they are unsupported
+ // by the codegen.
+ if (ValTy->isVectorTy() && ValTy->getScalarSizeInBits() == 1)
+ return;
+
+ new StoreInst(Val, Ptr, BB->getTerminator());
+ }
+};
+
+struct BinModifier: public Modifier {
+ BinModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+
+ virtual void Act() {
+ Value *Val0 = getRandomVal();
+ Value *Val1 = getRandomValue(Val0->getType());
+
+ // Don't handle pointer types.
+ if (Val0->getType()->isPointerTy() ||
+ Val1->getType()->isPointerTy())
+ return;
+
+ // Don't handle i1 types.
+ if (Val0->getType()->getScalarSizeInBits() == 1)
+ return;
+
+
+ bool isFloat = Val0->getType()->getScalarType()->isFloatingPointTy();
+ Instruction* Term = BB->getTerminator();
+ unsigned R = Ran->Rand() % (isFloat ? 7 : 13);
+ Instruction::BinaryOps Op;
+
+ switch (R) {
+ default: llvm_unreachable("Invalid BinOp");
+ case 0:{Op = (isFloat?Instruction::FAdd : Instruction::Add); break; }
+ case 1:{Op = (isFloat?Instruction::FSub : Instruction::Sub); break; }
+ case 2:{Op = (isFloat?Instruction::FMul : Instruction::Mul); break; }
+ case 3:{Op = (isFloat?Instruction::FDiv : Instruction::SDiv); break; }
+ case 4:{Op = (isFloat?Instruction::FDiv : Instruction::UDiv); break; }
+ case 5:{Op = (isFloat?Instruction::FRem : Instruction::SRem); break; }
+ case 6:{Op = (isFloat?Instruction::FRem : Instruction::URem); break; }
+ case 7: {Op = Instruction::Shl; break; }
+ case 8: {Op = Instruction::LShr; break; }
+ case 9: {Op = Instruction::AShr; break; }
+ case 10:{Op = Instruction::And; break; }
+ case 11:{Op = Instruction::Or; break; }
+ case 12:{Op = Instruction::Xor; break; }
+ }
+
+ PT->push_back(BinaryOperator::Create(Op, Val0, Val1, "B", Term));
+ }
+};
+
+/// Generate constant values.
+struct ConstModifier: public Modifier {
+ ConstModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+ Type *Ty = pickType();
+
+ if (Ty->isVectorTy()) {
+ switch (Ran->Rand() % 2) {
+ case 0: if (Ty->getScalarType()->isIntegerTy())
+ return PT->push_back(ConstantVector::getAllOnesValue(Ty));
+ case 1: if (Ty->getScalarType()->isIntegerTy())
+ return PT->push_back(ConstantVector::getNullValue(Ty));
+ }
+ }
+
+ if (Ty->isFloatingPointTy()) {
+ // Generate 128 random bits, the size of the (currently)
+ // largest floating-point types.
+ uint64_t RandomBits[2];
+ for (unsigned i = 0; i < 2; ++i)
+ RandomBits[i] = Ran->Rand64();
+
+ APInt RandomInt(Ty->getPrimitiveSizeInBits(), makeArrayRef(RandomBits));
+
+ bool isIEEE = !Ty->isX86_FP80Ty() && !Ty->isPPC_FP128Ty();
+ APFloat RandomFloat(RandomInt, isIEEE);
+
+ if (Ran->Rand() & 1)
+ return PT->push_back(ConstantFP::getNullValue(Ty));
+ return PT->push_back(ConstantFP::get(Ty->getContext(), RandomFloat));
+ }
+
+ if (Ty->isIntegerTy()) {
+ switch (Ran->Rand() % 7) {
+ case 0: if (Ty->isIntegerTy())
+ return PT->push_back(ConstantInt::get(Ty,
+ APInt::getAllOnesValue(Ty->getPrimitiveSizeInBits())));
+ case 1: if (Ty->isIntegerTy())
+ return PT->push_back(ConstantInt::get(Ty,
+ APInt::getNullValue(Ty->getPrimitiveSizeInBits())));
+ case 2: case 3: case 4: case 5:
+ case 6: if (Ty->isIntegerTy())
+ PT->push_back(ConstantInt::get(Ty, Ran->Rand()));
+ }
+ }
+
+ }
+};
+
+struct AllocaModifier: public Modifier {
+ AllocaModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R){}
+
+ virtual void Act() {
+ Type *Tp = pickType();
+ PT->push_back(new AllocaInst(Tp, "A", BB->getFirstNonPHI()));
+ }
+};
+
+struct ExtractElementModifier: public Modifier {
+ ExtractElementModifier(BasicBlock *BB, PieceTable *PT, Random *R):
+ Modifier(BB, PT, R) {}
+
+ virtual void Act() {
+ Value *Val0 = getRandomVectorValue();
+ Value *V = ExtractElementInst::Create(Val0,
+ ConstantInt::get(Type::getInt32Ty(BB->getContext()),
+ Ran->Rand() % cast<VectorType>(Val0->getType())->getNumElements()),
+ "E", BB->getTerminator());
+ return PT->push_back(V);
+ }
+};
+
+struct ShuffModifier: public Modifier {
+ ShuffModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+
+ Value *Val0 = getRandomVectorValue();
+ Value *Val1 = getRandomValue(Val0->getType());
+
+ unsigned Width = cast<VectorType>(Val0->getType())->getNumElements();
+ std::vector<Constant*> Idxs;
+
+ Type *I32 = Type::getInt32Ty(BB->getContext());
+ for (unsigned i=0; i<Width; ++i) {
+ Constant *CI = ConstantInt::get(I32, Ran->Rand() % (Width*2));
+ // Pick some undef values.
+ if (!(Ran->Rand() % 5))
+ CI = UndefValue::get(I32);
+ Idxs.push_back(CI);
+ }
+
+ Constant *Mask = ConstantVector::get(Idxs);
+
+ Value *V = new ShuffleVectorInst(Val0, Val1, Mask, "Shuff",
+ BB->getTerminator());
+ PT->push_back(V);
+ }
+};
+
+struct InsertElementModifier: public Modifier {
+ InsertElementModifier(BasicBlock *BB, PieceTable *PT, Random *R):
+ Modifier(BB, PT, R) {}
+
+ virtual void Act() {
+ Value *Val0 = getRandomVectorValue();
+ Value *Val1 = getRandomValue(Val0->getType()->getScalarType());
+
+ Value *V = InsertElementInst::Create(Val0, Val1,
+ ConstantInt::get(Type::getInt32Ty(BB->getContext()),
+ Ran->Rand() % cast<VectorType>(Val0->getType())->getNumElements()),
+ "I", BB->getTerminator());
+ return PT->push_back(V);
+ }
+
+};
+
+struct CastModifier: public Modifier {
+ CastModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+
+ Value *V = getRandomVal();
+ Type *VTy = V->getType();
+ Type *DestTy = pickScalarType();
+
+ // Handle vector casts vectors.
+ if (VTy->isVectorTy()) {
+ VectorType *VecTy = cast<VectorType>(VTy);
+ DestTy = pickVectorType(VecTy->getNumElements());
+ }
+
+ // no need to casr.
+ if (VTy == DestTy) return;
+
+ // Pointers:
+ if (VTy->isPointerTy()) {
+ if (!DestTy->isPointerTy())
+ DestTy = PointerType::get(DestTy, 0);
+ return PT->push_back(
+ new BitCastInst(V, DestTy, "PC", BB->getTerminator()));
+ }
+
+ // Generate lots of bitcasts.
+ if ((Ran->Rand() & 1) &&
+ VTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) {
+ return PT->push_back(
+ new BitCastInst(V, DestTy, "BC", BB->getTerminator()));
+ }
+
+ // Both types are integers:
+ if (VTy->getScalarType()->isIntegerTy() &&
+ DestTy->getScalarType()->isIntegerTy()) {
+ if (VTy->getScalarType()->getPrimitiveSizeInBits() >
+ DestTy->getScalarType()->getPrimitiveSizeInBits()) {
+ return PT->push_back(
+ new TruncInst(V, DestTy, "Tr", BB->getTerminator()));
+ } else {
+ if (Ran->Rand() & 1)
+ return PT->push_back(
+ new ZExtInst(V, DestTy, "ZE", BB->getTerminator()));
+ return PT->push_back(new SExtInst(V, DestTy, "Se", BB->getTerminator()));
+ }
+ }
+
+ // Fp to int.
+ if (VTy->getScalarType()->isFloatingPointTy() &&
+ DestTy->getScalarType()->isIntegerTy()) {
+ if (Ran->Rand() & 1)
+ return PT->push_back(
+ new FPToSIInst(V, DestTy, "FC", BB->getTerminator()));
+ return PT->push_back(new FPToUIInst(V, DestTy, "FC", BB->getTerminator()));
+ }
+
+ // Int to fp.
+ if (VTy->getScalarType()->isIntegerTy() &&
+ DestTy->getScalarType()->isFloatingPointTy()) {
+ if (Ran->Rand() & 1)
+ return PT->push_back(
+ new SIToFPInst(V, DestTy, "FC", BB->getTerminator()));
+ return PT->push_back(new UIToFPInst(V, DestTy, "FC", BB->getTerminator()));
+
+ }
+
+ // Both floats.
+ if (VTy->getScalarType()->isFloatingPointTy() &&
+ DestTy->getScalarType()->isFloatingPointTy()) {
+ if (VTy->getScalarType()->getPrimitiveSizeInBits() >
+ DestTy->getScalarType()->getPrimitiveSizeInBits()) {
+ return PT->push_back(
+ new FPTruncInst(V, DestTy, "Tr", BB->getTerminator()));
+ } else {
+ return PT->push_back(
+ new FPExtInst(V, DestTy, "ZE", BB->getTerminator()));
+ }
+ }
+ }
+
+};
+
+struct SelectModifier: public Modifier {
+ SelectModifier(BasicBlock *BB, PieceTable *PT, Random *R):
+ Modifier(BB, PT, R) {}
+
+ virtual void Act() {
+ // Try a bunch of different select configuration until a valid one is found.
+ Value *Val0 = getRandomVal();
+ Value *Val1 = getRandomValue(Val0->getType());
+
+ Type *CondTy = Type::getInt1Ty(Context);
+
+ // If the value type is a vector, and we allow vector select, then in 50%
+ // of the cases generate a vector select.
+ if (Val0->getType()->isVectorTy() && (Ran->Rand() % 1)) {
+ unsigned NumElem = cast<VectorType>(Val0->getType())->getNumElements();
+ CondTy = VectorType::get(CondTy, NumElem);
+ }
+
+ Value *Cond = getRandomValue(CondTy);
+ Value *V = SelectInst::Create(Cond, Val0, Val1, "Sl", BB->getTerminator());
+ return PT->push_back(V);
+ }
+};
+
+
+struct CmpModifier: public Modifier {
+ CmpModifier(BasicBlock *BB, PieceTable *PT, Random *R):Modifier(BB, PT, R) {}
+ virtual void Act() {
+
+ Value *Val0 = getRandomVal();
+ Value *Val1 = getRandomValue(Val0->getType());
+
+ if (Val0->getType()->isPointerTy()) return;
+ bool fp = Val0->getType()->getScalarType()->isFloatingPointTy();
+
+ int op;
+ if (fp) {
+ op = Ran->Rand() %
+ (CmpInst::LAST_FCMP_PREDICATE - CmpInst::FIRST_FCMP_PREDICATE) +
+ CmpInst::FIRST_FCMP_PREDICATE;
+ } else {
+ op = Ran->Rand() %
+ (CmpInst::LAST_ICMP_PREDICATE - CmpInst::FIRST_ICMP_PREDICATE) +
+ CmpInst::FIRST_ICMP_PREDICATE;
+ }
+
+ Value *V = CmpInst::Create(fp ? Instruction::FCmp : Instruction::ICmp,
+ op, Val0, Val1, "Cmp", BB->getTerminator());
+ return PT->push_back(V);
+ }
+};
+
+void FillFunction(Function *F) {
+ // Create a legal entry block.
+ BasicBlock *BB = BasicBlock::Create(F->getContext(), "BB", F);
+ ReturnInst::Create(F->getContext(), BB);
+
+ // Create the value table.
+ Modifier::PieceTable PT;
+ // Pick an initial seed value
+ Random R(SeedCL);
+
+ // Consider arguments as legal values.
+ for (Function::arg_iterator it = F->arg_begin(), e = F->arg_end();
+ it != e; ++it)
+ PT.push_back(it);
+
+ // List of modifiers which add new random instructions.
+ std::vector<Modifier*> Modifiers;
+ std::auto_ptr<Modifier> LM(new LoadModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> SM(new StoreModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> EE(new ExtractElementModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> SHM(new ShuffModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> IE(new InsertElementModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> BM(new BinModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> CM(new CastModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> SLM(new SelectModifier(BB, &PT, &R));
+ std::auto_ptr<Modifier> PM(new CmpModifier(BB, &PT, &R));
+ Modifiers.push_back(LM.get());
+ Modifiers.push_back(SM.get());
+ Modifiers.push_back(EE.get());
+ Modifiers.push_back(SHM.get());
+ Modifiers.push_back(IE.get());
+ Modifiers.push_back(BM.get());
+ Modifiers.push_back(CM.get());
+ Modifiers.push_back(SLM.get());
+ Modifiers.push_back(PM.get());
+
+ // Generate the random instructions
+ AllocaModifier AM(BB, &PT, &R); AM.ActN(5); // Throw in a few allocas
+ ConstModifier COM(BB, &PT, &R); COM.ActN(40); // Throw in a few constants
+
+ for (unsigned i=0; i< SizeCL / Modifiers.size(); ++i)
+ for (std::vector<Modifier*>::iterator it = Modifiers.begin(),
+ e = Modifiers.end(); it != e; ++it) {
+ (*it)->Act();
+ }
+
+ SM->ActN(5); // Throw in a few stores.
+}
+
+void IntroduceControlFlow(Function *F) {
+ std::set<Instruction*> BoolInst;
+ for (BasicBlock::iterator it = F->begin()->begin(),
+ e = F->begin()->end(); it != e; ++it) {
+ if (it->getType() == IntegerType::getInt1Ty(F->getContext()))
+ BoolInst.insert(it);
+ }
+
+ for (std::set<Instruction*>::iterator it = BoolInst.begin(),
+ e = BoolInst.end(); it != e; ++it) {
+ Instruction *Instr = *it;
+ BasicBlock *Curr = Instr->getParent();
+ BasicBlock::iterator Loc= Instr;
+ BasicBlock *Next = Curr->splitBasicBlock(Loc, "CF");
+ Instr->moveBefore(Curr->getTerminator());
+ if (Curr != &F->getEntryBlock()) {
+ BranchInst::Create(Curr, Next, Instr, Curr->getTerminator());
+ Curr->getTerminator()->eraseFromParent();
+ }
+ }
+}
+
+int main(int argc, char **argv) {
+ // Init LLVM, call llvm_shutdown() on exit, parse args, etc.
+ llvm::PrettyStackTraceProgram X(argc, argv);
+ cl::ParseCommandLineOptions(argc, argv, "llvm codegen stress-tester\n");
+ llvm_shutdown_obj Y;
+
+ std::auto_ptr<Module> M(new Module("/tmp/autogen.bc", getGlobalContext()));
+ Function *F = GenEmptyFunction(M.get());
+ FillFunction(F);
+ IntroduceControlFlow(F);
+
+ // Figure out what stream we are supposed to write to...
+ OwningPtr<tool_output_file> Out;
+ // Default to standard output.
+ if (OutputFilename.empty())
+ OutputFilename = "-";
+
+ std::string ErrorInfo;
+ Out.reset(new tool_output_file(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary));
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ return 1;
+ }
+
+ PassManager Passes;
+ Passes.add(createVerifierPass());
+ Passes.add(createPrintModulePass(&Out->os()));
+ Passes.run(*M.get());
+ Out->keep();
+
+ return 0;
+}
diff --git a/contrib/llvm/tools/llvm-stub/llvm-stub.c b/contrib/llvm/tools/llvm-stub/llvm-stub.c
new file mode 100644
index 0000000..69cd6ed
--- /dev/null
+++ b/contrib/llvm/tools/llvm-stub/llvm-stub.c
@@ -0,0 +1,77 @@
+/*===- llvm-stub.c - Stub executable to run llvm bitcode files ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This tool is used by the gccld program to enable transparent execution of
+// bitcode files by the user. Specifically, gccld outputs two files when asked
+// to compile a <program> file:
+// 1. It outputs the LLVM bitcode file to <program>.bc
+// 2. It outputs a stub executable that runs lli on <program>.bc
+//
+// This allows the end user to just say ./<program> and have the JIT executed
+// automatically. On unix, the stub executable emitted is actually a bourne
+// shell script that does the forwarding. Windows does not like #!/bin/sh
+// programs in .exe files, so we make it an actual program, defined here.
+//
+//===----------------------------------------------------------------------===*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "llvm/Config/config.h"
+
+#if defined(HAVE_UNISTD_H) && !defined(_MSC_VER)
+#include <unistd.h>
+#endif
+
+#ifdef _WIN32
+#include <process.h>
+#include <io.h>
+#endif
+
+int main(int argc, char** argv) {
+ const char *Interp = getenv("LLVMINTERP");
+ const char **Args;
+ if (Interp == 0) Interp = "lli";
+
+ /* Set up the command line options to pass to the JIT. */
+ Args = (const char**)malloc(sizeof(char*) * (argc+2));
+ /* argv[0] is the JIT */
+ Args[0] = Interp;
+
+#ifdef LLVM_ON_WIN32
+ {
+ int len = strlen(argv[0]);
+ if (len < 4 || strcmp(argv[0] + len - 4, ".exe") != 0) {
+ /* .exe suffix is stripped off of argv[0] if the executable was run on the
+ * command line without one. Put it back on.
+ */
+ argv[0] = strcat(strcpy((char*)malloc(len + 5), argv[0]), ".exe");
+ }
+ }
+#endif
+
+ /* argv[1] is argv[0] + ".bc". */
+ Args[1] = strcat(strcpy((char*)malloc(strlen(argv[0])+4), argv[0]), ".bc");
+
+ /* The rest of the args are as before. */
+ memcpy((char **)Args+2, argv+1, sizeof(char*)*argc);
+
+ /* Run the JIT. */
+#if !defined(_WIN32) || defined(__MINGW64__)
+ execvp(Interp, (char **)Args); /* POSIX execvp takes a char *const[]. */
+#else
+ execvp(Interp, Args); /* windows execvp takes a const char *const *. */
+#endif
+ /* if _execv returns, the JIT could not be started. */
+ fprintf(stderr, "Could not execute the LLVM JIT. Either add 'lli' to your"
+ " path, or set the\ninterpreter you want to use in the LLVMINTERP "
+ "environment variable.\n");
+ return 1;
+}
diff --git a/contrib/llvm/tools/macho-dump/macho-dump.cpp b/contrib/llvm/tools/macho-dump/macho-dump.cpp
new file mode 100644
index 0000000..2b22c3b
--- /dev/null
+++ b/contrib/llvm/tools/macho-dump/macho-dump.cpp
@@ -0,0 +1,400 @@
+//===-- macho-dump.cpp - Mach Object Dumping Tool -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This is a testing tool for use with the MC/Mach-O LLVM components.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Object/MachOObject.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/Format.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Support/system_error.h"
+using namespace llvm;
+using namespace llvm::object;
+
+static cl::opt<std::string>
+InputFile(cl::Positional, cl::desc("<input file>"), cl::init("-"));
+
+static cl::opt<bool>
+ShowSectionData("dump-section-data", cl::desc("Dump the contents of sections"),
+ cl::init(false));
+
+///
+
+static const char *ProgramName;
+
+static void Message(const char *Type, const Twine &Msg) {
+ errs() << ProgramName << ": " << Type << ": " << Msg << "\n";
+}
+
+static int Error(const Twine &Msg) {
+ Message("error", Msg);
+ return 1;
+}
+
+static void Warning(const Twine &Msg) {
+ Message("warning", Msg);
+}
+
+///
+
+static void DumpSegmentCommandData(StringRef Name,
+ uint64_t VMAddr, uint64_t VMSize,
+ uint64_t FileOffset, uint64_t FileSize,
+ uint32_t MaxProt, uint32_t InitProt,
+ uint32_t NumSections, uint32_t Flags) {
+ outs() << " ('segment_name', '";
+ outs().write_escaped(Name, /*UseHexEscapes=*/true) << "')\n";
+ outs() << " ('vm_addr', " << VMAddr << ")\n";
+ outs() << " ('vm_size', " << VMSize << ")\n";
+ outs() << " ('file_offset', " << FileOffset << ")\n";
+ outs() << " ('file_size', " << FileSize << ")\n";
+ outs() << " ('maxprot', " << MaxProt << ")\n";
+ outs() << " ('initprot', " << InitProt << ")\n";
+ outs() << " ('num_sections', " << NumSections << ")\n";
+ outs() << " ('flags', " << Flags << ")\n";
+}
+
+static int DumpSectionData(MachOObject &Obj, unsigned Index, StringRef Name,
+ StringRef SegmentName, uint64_t Address,
+ uint64_t Size, uint32_t Offset,
+ uint32_t Align, uint32_t RelocationTableOffset,
+ uint32_t NumRelocationTableEntries,
+ uint32_t Flags, uint32_t Reserved1,
+ uint32_t Reserved2, uint64_t Reserved3 = ~0ULL) {
+ outs() << " # Section " << Index << "\n";
+ outs() << " (('section_name', '";
+ outs().write_escaped(Name, /*UseHexEscapes=*/true) << "')\n";
+ outs() << " ('segment_name', '";
+ outs().write_escaped(SegmentName, /*UseHexEscapes=*/true) << "')\n";
+ outs() << " ('address', " << Address << ")\n";
+ outs() << " ('size', " << Size << ")\n";
+ outs() << " ('offset', " << Offset << ")\n";
+ outs() << " ('alignment', " << Align << ")\n";
+ outs() << " ('reloc_offset', " << RelocationTableOffset << ")\n";
+ outs() << " ('num_reloc', " << NumRelocationTableEntries << ")\n";
+ outs() << " ('flags', " << format("0x%x", Flags) << ")\n";
+ outs() << " ('reserved1', " << Reserved1 << ")\n";
+ outs() << " ('reserved2', " << Reserved2 << ")\n";
+ if (Reserved3 != ~0ULL)
+ outs() << " ('reserved3', " << Reserved3 << ")\n";
+ outs() << " ),\n";
+
+ // Dump the relocation entries.
+ int Res = 0;
+ outs() << " ('_relocations', [\n";
+ for (unsigned i = 0; i != NumRelocationTableEntries; ++i) {
+ InMemoryStruct<macho::RelocationEntry> RE;
+ Obj.ReadRelocationEntry(RelocationTableOffset, i, RE);
+ if (!RE) {
+ Res = Error("unable to read relocation table entry '" + Twine(i) + "'");
+ break;
+ }
+
+ outs() << " # Relocation " << i << "\n";
+ outs() << " (('word-0', " << format("0x%x", RE->Word0) << "),\n";
+ outs() << " ('word-1', " << format("0x%x", RE->Word1) << ")),\n";
+ }
+ outs() << " ])\n";
+
+ // Dump the section data, if requested.
+ if (ShowSectionData) {
+ outs() << " ('_section_data', '";
+ StringRef Data = Obj.getData(Offset, Size);
+ for (unsigned i = 0; i != Data.size(); ++i) {
+ if (i && (i % 4) == 0)
+ outs() << ' ';
+ outs() << hexdigit((Data[i] >> 4) & 0xF, /*LowerCase=*/true);
+ outs() << hexdigit((Data[i] >> 0) & 0xF, /*LowerCase=*/true);
+ }
+ outs() << "')\n";
+ }
+
+ return Res;
+}
+
+static int DumpSegmentCommand(MachOObject &Obj,
+ const MachOObject::LoadCommandInfo &LCI) {
+ InMemoryStruct<macho::SegmentLoadCommand> SLC;
+ Obj.ReadSegmentLoadCommand(LCI, SLC);
+ if (!SLC)
+ return Error("unable to read segment load command");
+
+ DumpSegmentCommandData(StringRef(SLC->Name, 16), SLC->VMAddress,
+ SLC->VMSize, SLC->FileOffset, SLC->FileSize,
+ SLC->MaxVMProtection, SLC->InitialVMProtection,
+ SLC->NumSections, SLC->Flags);
+
+ // Dump the sections.
+ int Res = 0;
+ outs() << " ('sections', [\n";
+ for (unsigned i = 0; i != SLC->NumSections; ++i) {
+ InMemoryStruct<macho::Section> Sect;
+ Obj.ReadSection(LCI, i, Sect);
+ if (!SLC) {
+ Res = Error("unable to read section '" + Twine(i) + "'");
+ break;
+ }
+
+ if ((Res = DumpSectionData(Obj, i, StringRef(Sect->Name, 16),
+ StringRef(Sect->SegmentName, 16), Sect->Address,
+ Sect->Size, Sect->Offset, Sect->Align,
+ Sect->RelocationTableOffset,
+ Sect->NumRelocationTableEntries, Sect->Flags,
+ Sect->Reserved1, Sect->Reserved2)))
+ break;
+ }
+ outs() << " ])\n";
+
+ return Res;
+}
+
+static int DumpSegment64Command(MachOObject &Obj,
+ const MachOObject::LoadCommandInfo &LCI) {
+ InMemoryStruct<macho::Segment64LoadCommand> SLC;
+ Obj.ReadSegment64LoadCommand(LCI, SLC);
+ if (!SLC)
+ return Error("unable to read segment load command");
+
+ DumpSegmentCommandData(StringRef(SLC->Name, 16), SLC->VMAddress,
+ SLC->VMSize, SLC->FileOffset, SLC->FileSize,
+ SLC->MaxVMProtection, SLC->InitialVMProtection,
+ SLC->NumSections, SLC->Flags);
+
+ // Dump the sections.
+ int Res = 0;
+ outs() << " ('sections', [\n";
+ for (unsigned i = 0; i != SLC->NumSections; ++i) {
+ InMemoryStruct<macho::Section64> Sect;
+ Obj.ReadSection64(LCI, i, Sect);
+ if (!SLC) {
+ Res = Error("unable to read section '" + Twine(i) + "'");
+ break;
+ }
+
+ if ((Res = DumpSectionData(Obj, i, StringRef(Sect->Name, 16),
+ StringRef(Sect->SegmentName, 16), Sect->Address,
+ Sect->Size, Sect->Offset, Sect->Align,
+ Sect->RelocationTableOffset,
+ Sect->NumRelocationTableEntries, Sect->Flags,
+ Sect->Reserved1, Sect->Reserved2,
+ Sect->Reserved3)))
+ break;
+ }
+ outs() << " ])\n";
+
+ return 0;
+}
+
+static void DumpSymbolTableEntryData(MachOObject &Obj,
+ unsigned Index, uint32_t StringIndex,
+ uint8_t Type, uint8_t SectionIndex,
+ uint16_t Flags, uint64_t Value) {
+ outs() << " # Symbol " << Index << "\n";
+ outs() << " (('n_strx', " << StringIndex << ")\n";
+ outs() << " ('n_type', " << format("0x%x", Type) << ")\n";
+ outs() << " ('n_sect', " << uint32_t(SectionIndex) << ")\n";
+ outs() << " ('n_desc', " << Flags << ")\n";
+ outs() << " ('n_value', " << Value << ")\n";
+ outs() << " ('_string', '" << Obj.getStringAtIndex(StringIndex) << "')\n";
+ outs() << " ),\n";
+}
+
+static int DumpSymtabCommand(MachOObject &Obj,
+ const MachOObject::LoadCommandInfo &LCI) {
+ InMemoryStruct<macho::SymtabLoadCommand> SLC;
+ Obj.ReadSymtabLoadCommand(LCI, SLC);
+ if (!SLC)
+ return Error("unable to read segment load command");
+
+ outs() << " ('symoff', " << SLC->SymbolTableOffset << ")\n";
+ outs() << " ('nsyms', " << SLC->NumSymbolTableEntries << ")\n";
+ outs() << " ('stroff', " << SLC->StringTableOffset << ")\n";
+ outs() << " ('strsize', " << SLC->StringTableSize << ")\n";
+
+ // Cache the string table data.
+ Obj.RegisterStringTable(*SLC);
+
+ // Dump the string data.
+ outs() << " ('_string_data', '";
+ outs().write_escaped(Obj.getStringTableData(),
+ /*UseHexEscapes=*/true) << "')\n";
+
+ // Dump the symbol table.
+ int Res = 0;
+ outs() << " ('_symbols', [\n";
+ for (unsigned i = 0; i != SLC->NumSymbolTableEntries; ++i) {
+ if (Obj.is64Bit()) {
+ InMemoryStruct<macho::Symbol64TableEntry> STE;
+ Obj.ReadSymbol64TableEntry(SLC->SymbolTableOffset, i, STE);
+ if (!STE) {
+ Res = Error("unable to read symbol: '" + Twine(i) + "'");
+ break;
+ }
+
+ DumpSymbolTableEntryData(Obj, i, STE->StringIndex, STE->Type,
+ STE->SectionIndex, STE->Flags, STE->Value);
+ } else {
+ InMemoryStruct<macho::SymbolTableEntry> STE;
+ Obj.ReadSymbolTableEntry(SLC->SymbolTableOffset, i, STE);
+ if (!SLC) {
+ Res = Error("unable to read symbol: '" + Twine(i) + "'");
+ break;
+ }
+
+ DumpSymbolTableEntryData(Obj, i, STE->StringIndex, STE->Type,
+ STE->SectionIndex, STE->Flags, STE->Value);
+ }
+ }
+ outs() << " ])\n";
+
+ return Res;
+}
+
+static int DumpDysymtabCommand(MachOObject &Obj,
+ const MachOObject::LoadCommandInfo &LCI) {
+ InMemoryStruct<macho::DysymtabLoadCommand> DLC;
+ Obj.ReadDysymtabLoadCommand(LCI, DLC);
+ if (!DLC)
+ return Error("unable to read segment load command");
+
+ outs() << " ('ilocalsym', " << DLC->LocalSymbolsIndex << ")\n";
+ outs() << " ('nlocalsym', " << DLC->NumLocalSymbols << ")\n";
+ outs() << " ('iextdefsym', " << DLC->ExternalSymbolsIndex << ")\n";
+ outs() << " ('nextdefsym', " << DLC->NumExternalSymbols << ")\n";
+ outs() << " ('iundefsym', " << DLC->UndefinedSymbolsIndex << ")\n";
+ outs() << " ('nundefsym', " << DLC->NumUndefinedSymbols << ")\n";
+ outs() << " ('tocoff', " << DLC->TOCOffset << ")\n";
+ outs() << " ('ntoc', " << DLC->NumTOCEntries << ")\n";
+ outs() << " ('modtaboff', " << DLC->ModuleTableOffset << ")\n";
+ outs() << " ('nmodtab', " << DLC->NumModuleTableEntries << ")\n";
+ outs() << " ('extrefsymoff', " << DLC->ReferenceSymbolTableOffset << ")\n";
+ outs() << " ('nextrefsyms', "
+ << DLC->NumReferencedSymbolTableEntries << ")\n";
+ outs() << " ('indirectsymoff', " << DLC->IndirectSymbolTableOffset << ")\n";
+ outs() << " ('nindirectsyms', "
+ << DLC->NumIndirectSymbolTableEntries << ")\n";
+ outs() << " ('extreloff', " << DLC->ExternalRelocationTableOffset << ")\n";
+ outs() << " ('nextrel', " << DLC->NumExternalRelocationTableEntries << ")\n";
+ outs() << " ('locreloff', " << DLC->LocalRelocationTableOffset << ")\n";
+ outs() << " ('nlocrel', " << DLC->NumLocalRelocationTableEntries << ")\n";
+
+ // Dump the indirect symbol table.
+ int Res = 0;
+ outs() << " ('_indirect_symbols', [\n";
+ for (unsigned i = 0; i != DLC->NumIndirectSymbolTableEntries; ++i) {
+ InMemoryStruct<macho::IndirectSymbolTableEntry> ISTE;
+ Obj.ReadIndirectSymbolTableEntry(*DLC, i, ISTE);
+ if (!ISTE) {
+ Res = Error("unable to read segment load command");
+ break;
+ }
+
+ outs() << " # Indirect Symbol " << i << "\n";
+ outs() << " (('symbol_index', "
+ << format("0x%x", ISTE->Index) << "),),\n";
+ }
+ outs() << " ])\n";
+
+ return Res;
+}
+
+static int DumpLinkeditDataCommand(MachOObject &Obj,
+ const MachOObject::LoadCommandInfo &LCI) {
+ InMemoryStruct<macho::LinkeditDataLoadCommand> LLC;
+ Obj.ReadLinkeditDataLoadCommand(LCI, LLC);
+ if (!LLC)
+ return Error("unable to read segment load command");
+
+ outs() << " ('dataoff', " << LLC->DataOffset << ")\n"
+ << " ('datasize', " << LLC->DataSize << ")\n"
+ << " ('_addresses', [\n";
+
+ SmallVector<uint64_t, 8> Addresses;
+ Obj.ReadULEB128s(LLC->DataOffset, Addresses);
+ for (unsigned i = 0, e = Addresses.size(); i != e; ++i)
+ outs() << " # Address " << i << '\n'
+ << " ('address', " << format("0x%x", Addresses[i]) << "),\n";
+
+ outs() << " ])\n";
+
+ return 0;
+}
+
+
+static int DumpLoadCommand(MachOObject &Obj, unsigned Index) {
+ const MachOObject::LoadCommandInfo &LCI = Obj.getLoadCommandInfo(Index);
+ int Res = 0;
+
+ outs() << " # Load Command " << Index << "\n"
+ << " (('command', " << LCI.Command.Type << ")\n"
+ << " ('size', " << LCI.Command.Size << ")\n";
+ switch (LCI.Command.Type) {
+ case macho::LCT_Segment:
+ Res = DumpSegmentCommand(Obj, LCI);
+ break;
+ case macho::LCT_Segment64:
+ Res = DumpSegment64Command(Obj, LCI);
+ break;
+ case macho::LCT_Symtab:
+ Res = DumpSymtabCommand(Obj, LCI);
+ break;
+ case macho::LCT_Dysymtab:
+ Res = DumpDysymtabCommand(Obj, LCI);
+ break;
+ case macho::LCT_CodeSignature:
+ case macho::LCT_SegmentSplitInfo:
+ case macho::LCT_FunctionStarts:
+ Res = DumpLinkeditDataCommand(Obj, LCI);
+ break;
+ default:
+ Warning("unknown load command: " + Twine(LCI.Command.Type));
+ break;
+ }
+ outs() << " ),\n";
+
+ return Res;
+}
+
+int main(int argc, char **argv) {
+ ProgramName = argv[0];
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+
+ cl::ParseCommandLineOptions(argc, argv, "llvm Mach-O dumping tool\n");
+
+ // Load the input file.
+ std::string ErrorStr;
+ OwningPtr<MemoryBuffer> InputBuffer;
+ if (error_code ec = MemoryBuffer::getFileOrSTDIN(InputFile, InputBuffer))
+ return Error("unable to read input: '" + ec.message() + "'");
+
+ // Construct the Mach-O wrapper object.
+ OwningPtr<MachOObject> InputObject(
+ MachOObject::LoadFromBuffer(InputBuffer.take(), &ErrorStr));
+ if (!InputObject)
+ return Error("unable to load object: '" + ErrorStr + "'");
+
+ // Print the header
+ InputObject->printHeader(outs());
+
+ // Print the load commands.
+ int Res = 0;
+ outs() << "('load_commands', [\n";
+ for (unsigned i = 0; i != InputObject->getHeader().NumLoadCommands; ++i)
+ if ((Res = DumpLoadCommand(*InputObject, i)))
+ break;
+ outs() << "])\n";
+
+ return Res;
+}
diff --git a/contrib/llvm/tools/opt/AnalysisWrappers.cpp b/contrib/llvm/tools/opt/AnalysisWrappers.cpp
new file mode 100644
index 0000000..a2b57bb
--- /dev/null
+++ b/contrib/llvm/tools/opt/AnalysisWrappers.cpp
@@ -0,0 +1,94 @@
+//===- AnalysisWrappers.cpp - Wrappers around non-pass analyses -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines pass wrappers around LLVM analyses that don't make sense to
+// be passes. It provides a nice standard pass interface to these classes so
+// that they can be printed out by analyze.
+//
+// These classes are separated out of analyze.cpp so that it is more clear which
+// code is the integral part of the analyze tool, and which part of the code is
+// just making it so more passes are available.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Module.h"
+#include "llvm/Pass.h"
+#include "llvm/Support/CallSite.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Support/raw_ostream.h"
+using namespace llvm;
+
+namespace {
+ /// ExternalFunctionsPassedConstants - This pass prints out call sites to
+ /// external functions that are called with constant arguments. This can be
+ /// useful when looking for standard library functions we should constant fold
+ /// or handle in alias analyses.
+ struct ExternalFunctionsPassedConstants : public ModulePass {
+ static char ID; // Pass ID, replacement for typeid
+ ExternalFunctionsPassedConstants() : ModulePass(ID) {}
+ virtual bool runOnModule(Module &M) {
+ for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
+ if (!I->isDeclaration()) continue;
+
+ bool PrintedFn = false;
+ for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
+ UI != E; ++UI) {
+ Instruction *User = dyn_cast<Instruction>(*UI);
+ if (!User) continue;
+
+ CallSite CS(cast<Value>(User));
+ if (!CS) continue;
+
+ for (CallSite::arg_iterator AI = CS.arg_begin(),
+ E = CS.arg_end(); AI != E; ++AI) {
+ if (!isa<Constant>(*AI)) continue;
+
+ if (!PrintedFn) {
+ errs() << "Function '" << I->getName() << "':\n";
+ PrintedFn = true;
+ }
+ errs() << *User;
+ break;
+ }
+ }
+ }
+
+ return false;
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+ };
+}
+
+char ExternalFunctionsPassedConstants::ID = 0;
+static RegisterPass<ExternalFunctionsPassedConstants>
+ P1("print-externalfnconstants",
+ "Print external fn callsites passed constants");
+
+namespace {
+ struct CallGraphPrinter : public ModulePass {
+ static char ID; // Pass ID, replacement for typeid
+ CallGraphPrinter() : ModulePass(ID) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequiredTransitive<CallGraph>();
+ }
+ virtual bool runOnModule(Module &M) {
+ getAnalysis<CallGraph>().print(errs(), &M);
+ return false;
+ }
+ };
+}
+
+char CallGraphPrinter::ID = 0;
+static RegisterPass<CallGraphPrinter>
+ P2("print-callgraph", "Print a call graph");
diff --git a/contrib/llvm/tools/opt/GraphPrinters.cpp b/contrib/llvm/tools/opt/GraphPrinters.cpp
new file mode 100644
index 0000000..30361f5
--- /dev/null
+++ b/contrib/llvm/tools/opt/GraphPrinters.cpp
@@ -0,0 +1,118 @@
+//===- GraphPrinters.cpp - DOT printers for various graph types -----------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines several printers for various different types of graphs used
+// by the LLVM infrastructure. It uses the generic graph interface to convert
+// the graph into a .dot graph. These graphs can then be processed with the
+// "dot" tool to convert them to postscript or some other suitable format.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/GraphWriter.h"
+#include "llvm/Pass.h"
+#include "llvm/Value.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Analysis/Dominators.h"
+#include "llvm/Support/ToolOutputFile.h"
+using namespace llvm;
+
+template<typename GraphType>
+static void WriteGraphToFile(raw_ostream &O, const std::string &GraphName,
+ const GraphType &GT) {
+ std::string Filename = GraphName + ".dot";
+ O << "Writing '" << Filename << "'...";
+ std::string ErrInfo;
+ tool_output_file F(Filename.c_str(), ErrInfo);
+
+ if (ErrInfo.empty()) {
+ WriteGraph(F.os(), GT);
+ F.os().close();
+ if (!F.os().has_error()) {
+ O << "\n";
+ F.keep();
+ return;
+ }
+ }
+ O << " error opening file for writing!\n";
+ F.os().clear_error();
+}
+
+
+//===----------------------------------------------------------------------===//
+// Call Graph Printer
+//===----------------------------------------------------------------------===//
+
+namespace llvm {
+ template<>
+ struct DOTGraphTraits<CallGraph*> : public DefaultDOTGraphTraits {
+
+ DOTGraphTraits (bool isSimple=false) : DefaultDOTGraphTraits(isSimple) {}
+
+ static std::string getGraphName(CallGraph *F) {
+ return "Call Graph";
+ }
+
+ static std::string getNodeLabel(CallGraphNode *Node, CallGraph *Graph) {
+ if (Node->getFunction())
+ return ((Value*)Node->getFunction())->getName();
+ return "external node";
+ }
+ };
+}
+
+
+namespace {
+ struct CallGraphPrinter : public ModulePass {
+ static char ID; // Pass ID, replacement for typeid
+ CallGraphPrinter() : ModulePass(ID) {}
+
+ virtual bool runOnModule(Module &M) {
+ WriteGraphToFile(llvm::errs(), "callgraph", &getAnalysis<CallGraph>());
+ return false;
+ }
+
+ void print(raw_ostream &OS, const llvm::Module*) const {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequired<CallGraph>();
+ AU.setPreservesAll();
+ }
+ };
+}
+
+char CallGraphPrinter::ID = 0;
+static RegisterPass<CallGraphPrinter> P2("dot-callgraph",
+ "Print Call Graph to 'dot' file");
+
+//===----------------------------------------------------------------------===//
+// DomInfoPrinter Pass
+//===----------------------------------------------------------------------===//
+
+namespace {
+ class DomInfoPrinter : public FunctionPass {
+ public:
+ static char ID; // Pass identification, replacement for typeid
+ DomInfoPrinter() : FunctionPass(ID) {}
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<DominatorTree>();
+
+ }
+
+ virtual bool runOnFunction(Function &F) {
+ getAnalysis<DominatorTree>().dump();
+ return false;
+ }
+ };
+}
+
+char DomInfoPrinter::ID = 0;
+static RegisterPass<DomInfoPrinter>
+DIP("print-dom-info", "Dominator Info Printer", true, true);
diff --git a/contrib/llvm/tools/opt/PrintSCC.cpp b/contrib/llvm/tools/opt/PrintSCC.cpp
new file mode 100644
index 0000000..11efdcd
--- /dev/null
+++ b/contrib/llvm/tools/opt/PrintSCC.cpp
@@ -0,0 +1,112 @@
+//===- PrintSCC.cpp - Enumerate SCCs in some key graphs -------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file provides passes to print out SCCs in a CFG or a CallGraph.
+// Normally, you would not use these passes; instead, you would use the
+// scc_iterator directly to enumerate SCCs and process them in some way. These
+// passes serve three purposes:
+//
+// (1) As a reference for how to use the scc_iterator.
+// (2) To print out the SCCs for a CFG or a CallGraph:
+// analyze -print-cfg-sccs to print the SCCs in each CFG of a module.
+// analyze -print-cfg-sccs -stats to print the #SCCs and the maximum SCC size.
+// analyze -print-cfg-sccs -debug > /dev/null to watch the algorithm in action.
+//
+// and similarly:
+// analyze -print-callgraph-sccs [-stats] [-debug] to print SCCs in the CallGraph
+//
+// (3) To test the scc_iterator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Pass.h"
+#include "llvm/Module.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Support/CFG.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/SCCIterator.h"
+using namespace llvm;
+
+namespace {
+ struct CFGSCC : public FunctionPass {
+ static char ID; // Pass identification, replacement for typeid
+ CFGSCC() : FunctionPass(ID) {}
+ bool runOnFunction(Function& func);
+
+ void print(raw_ostream &O, const Module* = 0) const { }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+ };
+
+ struct CallGraphSCC : public ModulePass {
+ static char ID; // Pass identification, replacement for typeid
+ CallGraphSCC() : ModulePass(ID) {}
+
+ // run - Print out SCCs in the call graph for the specified module.
+ bool runOnModule(Module &M);
+
+ void print(raw_ostream &O, const Module* = 0) const { }
+
+ // getAnalysisUsage - This pass requires the CallGraph.
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ AU.addRequired<CallGraph>();
+ }
+ };
+}
+
+char CFGSCC::ID = 0;
+static RegisterPass<CFGSCC>
+Y("print-cfg-sccs", "Print SCCs of each function CFG");
+
+char CallGraphSCC::ID = 0;
+static RegisterPass<CallGraphSCC>
+Z("print-callgraph-sccs", "Print SCCs of the Call Graph");
+
+bool CFGSCC::runOnFunction(Function &F) {
+ unsigned sccNum = 0;
+ errs() << "SCCs for Function " << F.getName() << " in PostOrder:";
+ for (scc_iterator<Function*> SCCI = scc_begin(&F),
+ E = scc_end(&F); SCCI != E; ++SCCI) {
+ std::vector<BasicBlock*> &nextSCC = *SCCI;
+ errs() << "\nSCC #" << ++sccNum << " : ";
+ for (std::vector<BasicBlock*>::const_iterator I = nextSCC.begin(),
+ E = nextSCC.end(); I != E; ++I)
+ errs() << (*I)->getName() << ", ";
+ if (nextSCC.size() == 1 && SCCI.hasLoop())
+ errs() << " (Has self-loop).";
+ }
+ errs() << "\n";
+
+ return true;
+}
+
+
+// run - Print out SCCs in the call graph for the specified module.
+bool CallGraphSCC::runOnModule(Module &M) {
+ CallGraphNode* rootNode = getAnalysis<CallGraph>().getRoot();
+ unsigned sccNum = 0;
+ errs() << "SCCs for the program in PostOrder:";
+ for (scc_iterator<CallGraphNode*> SCCI = scc_begin(rootNode),
+ E = scc_end(rootNode); SCCI != E; ++SCCI) {
+ const std::vector<CallGraphNode*> &nextSCC = *SCCI;
+ errs() << "\nSCC #" << ++sccNum << " : ";
+ for (std::vector<CallGraphNode*>::const_iterator I = nextSCC.begin(),
+ E = nextSCC.end(); I != E; ++I)
+ errs() << ((*I)->getFunction() ? (*I)->getFunction()->getName()
+ : "external node") << ", ";
+ if (nextSCC.size() == 1 && SCCI.hasLoop())
+ errs() << " (Has self-loop).";
+ }
+ errs() << "\n";
+
+ return true;
+}
diff --git a/contrib/llvm/tools/opt/opt.cpp b/contrib/llvm/tools/opt/opt.cpp
new file mode 100644
index 0000000..30da863
--- /dev/null
+++ b/contrib/llvm/tools/opt/opt.cpp
@@ -0,0 +1,716 @@
+//===- opt.cpp - The LLVM Modular Optimizer -------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// Optimizations may be specified an arbitrary number of times on the command
+// line, They are run in the order specified.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/LLVMContext.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/CallGraphSCCPass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Analysis/DebugInfo.h"
+#include "llvm/Analysis/Verifier.h"
+#include "llvm/Analysis/LoopPass.h"
+#include "llvm/Analysis/RegionPass.h"
+#include "llvm/Analysis/CallGraph.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetLibraryInfo.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/PassNameParser.h"
+#include "llvm/Support/Signals.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/IRReader.h"
+#include "llvm/Support/ManagedStatic.h"
+#include "llvm/Support/PluginLoader.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/SystemUtils.h"
+#include "llvm/Support/ToolOutputFile.h"
+#include "llvm/LinkAllPasses.h"
+#include "llvm/LinkAllVMCore.h"
+#include "llvm/Transforms/IPO/PassManagerBuilder.h"
+#include <memory>
+#include <algorithm>
+using namespace llvm;
+
+// The OptimizationList is automatically populated with registered Passes by the
+// PassNameParser.
+//
+static cl::list<const PassInfo*, bool, PassNameParser>
+PassList(cl::desc("Optimizations available:"));
+
+// Other command line options...
+//
+static cl::opt<std::string>
+InputFilename(cl::Positional, cl::desc("<input bitcode file>"),
+ cl::init("-"), cl::value_desc("filename"));
+
+static cl::opt<std::string>
+OutputFilename("o", cl::desc("Override output filename"),
+ cl::value_desc("filename"));
+
+static cl::opt<bool>
+Force("f", cl::desc("Enable binary output on terminals"));
+
+static cl::opt<bool>
+PrintEachXForm("p", cl::desc("Print module after each transformation"));
+
+static cl::opt<bool>
+NoOutput("disable-output",
+ cl::desc("Do not write result bitcode file"), cl::Hidden);
+
+static cl::opt<bool>
+OutputAssembly("S", cl::desc("Write output as LLVM assembly"));
+
+static cl::opt<bool>
+NoVerify("disable-verify", cl::desc("Do not verify result module"), cl::Hidden);
+
+static cl::opt<bool>
+VerifyEach("verify-each", cl::desc("Verify after each transform"));
+
+static cl::opt<bool>
+StripDebug("strip-debug",
+ cl::desc("Strip debugger symbol info from translation unit"));
+
+static cl::opt<bool>
+DisableInline("disable-inlining", cl::desc("Do not run the inliner pass"));
+
+static cl::opt<bool>
+DisableOptimizations("disable-opt",
+ cl::desc("Do not run any optimization passes"));
+
+static cl::opt<bool>
+DisableInternalize("disable-internalize",
+ cl::desc("Do not mark all symbols as internal"));
+
+static cl::opt<bool>
+StandardCompileOpts("std-compile-opts",
+ cl::desc("Include the standard compile time optimizations"));
+
+static cl::opt<bool>
+StandardLinkOpts("std-link-opts",
+ cl::desc("Include the standard link time optimizations"));
+
+static cl::opt<bool>
+OptLevelO1("O1",
+ cl::desc("Optimization level 1. Similar to llvm-gcc -O1"));
+
+static cl::opt<bool>
+OptLevelO2("O2",
+ cl::desc("Optimization level 2. Similar to llvm-gcc -O2"));
+
+static cl::opt<bool>
+OptLevelO3("O3",
+ cl::desc("Optimization level 3. Similar to llvm-gcc -O3"));
+
+static cl::opt<bool>
+UnitAtATime("funit-at-a-time",
+ cl::desc("Enable IPO. This is same as llvm-gcc's -funit-at-a-time"),
+ cl::init(true));
+
+static cl::opt<bool>
+DisableSimplifyLibCalls("disable-simplify-libcalls",
+ cl::desc("Disable simplify-libcalls"));
+
+static cl::opt<bool>
+Quiet("q", cl::desc("Obsolete option"), cl::Hidden);
+
+static cl::alias
+QuietA("quiet", cl::desc("Alias for -q"), cl::aliasopt(Quiet));
+
+static cl::opt<bool>
+AnalyzeOnly("analyze", cl::desc("Only perform analysis, no optimization"));
+
+static cl::opt<bool>
+PrintBreakpoints("print-breakpoints-for-testing",
+ cl::desc("Print select breakpoints location for testing"));
+
+static cl::opt<std::string>
+DefaultDataLayout("default-data-layout",
+ cl::desc("data layout string to use if not specified by module"),
+ cl::value_desc("layout-string"), cl::init(""));
+
+// ---------- Define Printers for module and function passes ------------
+namespace {
+
+struct CallGraphSCCPassPrinter : public CallGraphSCCPass {
+ static char ID;
+ const PassInfo *PassToPrint;
+ raw_ostream &Out;
+ std::string PassName;
+
+ CallGraphSCCPassPrinter(const PassInfo *PI, raw_ostream &out) :
+ CallGraphSCCPass(ID), PassToPrint(PI), Out(out) {
+ std::string PassToPrintName = PassToPrint->getPassName();
+ PassName = "CallGraphSCCPass Printer: " + PassToPrintName;
+ }
+
+ virtual bool runOnSCC(CallGraphSCC &SCC) {
+ if (!Quiet)
+ Out << "Printing analysis '" << PassToPrint->getPassName() << "':\n";
+
+ // Get and print pass...
+ for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
+ Function *F = (*I)->getFunction();
+ if (F)
+ getAnalysisID<Pass>(PassToPrint->getTypeInfo()).print(Out,
+ F->getParent());
+ }
+ return false;
+ }
+
+ virtual const char *getPassName() const { return PassName.c_str(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredID(PassToPrint->getTypeInfo());
+ AU.setPreservesAll();
+ }
+};
+
+char CallGraphSCCPassPrinter::ID = 0;
+
+struct ModulePassPrinter : public ModulePass {
+ static char ID;
+ const PassInfo *PassToPrint;
+ raw_ostream &Out;
+ std::string PassName;
+
+ ModulePassPrinter(const PassInfo *PI, raw_ostream &out)
+ : ModulePass(ID), PassToPrint(PI), Out(out) {
+ std::string PassToPrintName = PassToPrint->getPassName();
+ PassName = "ModulePass Printer: " + PassToPrintName;
+ }
+
+ virtual bool runOnModule(Module &M) {
+ if (!Quiet)
+ Out << "Printing analysis '" << PassToPrint->getPassName() << "':\n";
+
+ // Get and print pass...
+ getAnalysisID<Pass>(PassToPrint->getTypeInfo()).print(Out, &M);
+ return false;
+ }
+
+ virtual const char *getPassName() const { return PassName.c_str(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredID(PassToPrint->getTypeInfo());
+ AU.setPreservesAll();
+ }
+};
+
+char ModulePassPrinter::ID = 0;
+struct FunctionPassPrinter : public FunctionPass {
+ const PassInfo *PassToPrint;
+ raw_ostream &Out;
+ static char ID;
+ std::string PassName;
+
+ FunctionPassPrinter(const PassInfo *PI, raw_ostream &out)
+ : FunctionPass(ID), PassToPrint(PI), Out(out) {
+ std::string PassToPrintName = PassToPrint->getPassName();
+ PassName = "FunctionPass Printer: " + PassToPrintName;
+ }
+
+ virtual bool runOnFunction(Function &F) {
+ if (!Quiet)
+ Out << "Printing analysis '" << PassToPrint->getPassName()
+ << "' for function '" << F.getName() << "':\n";
+
+ // Get and print pass...
+ getAnalysisID<Pass>(PassToPrint->getTypeInfo()).print(Out,
+ F.getParent());
+ return false;
+ }
+
+ virtual const char *getPassName() const { return PassName.c_str(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredID(PassToPrint->getTypeInfo());
+ AU.setPreservesAll();
+ }
+};
+
+char FunctionPassPrinter::ID = 0;
+
+struct LoopPassPrinter : public LoopPass {
+ static char ID;
+ const PassInfo *PassToPrint;
+ raw_ostream &Out;
+ std::string PassName;
+
+ LoopPassPrinter(const PassInfo *PI, raw_ostream &out) :
+ LoopPass(ID), PassToPrint(PI), Out(out) {
+ std::string PassToPrintName = PassToPrint->getPassName();
+ PassName = "LoopPass Printer: " + PassToPrintName;
+ }
+
+
+ virtual bool runOnLoop(Loop *L, LPPassManager &LPM) {
+ if (!Quiet)
+ Out << "Printing analysis '" << PassToPrint->getPassName() << "':\n";
+
+ // Get and print pass...
+ getAnalysisID<Pass>(PassToPrint->getTypeInfo()).print(Out,
+ L->getHeader()->getParent()->getParent());
+ return false;
+ }
+
+ virtual const char *getPassName() const { return PassName.c_str(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredID(PassToPrint->getTypeInfo());
+ AU.setPreservesAll();
+ }
+};
+
+char LoopPassPrinter::ID = 0;
+
+struct RegionPassPrinter : public RegionPass {
+ static char ID;
+ const PassInfo *PassToPrint;
+ raw_ostream &Out;
+ std::string PassName;
+
+ RegionPassPrinter(const PassInfo *PI, raw_ostream &out) : RegionPass(ID),
+ PassToPrint(PI), Out(out) {
+ std::string PassToPrintName = PassToPrint->getPassName();
+ PassName = "RegionPass Printer: " + PassToPrintName;
+ }
+
+ virtual bool runOnRegion(Region *R, RGPassManager &RGM) {
+ if (!Quiet) {
+ Out << "Printing analysis '" << PassToPrint->getPassName() << "' for "
+ << "region: '" << R->getNameStr() << "' in function '"
+ << R->getEntry()->getParent()->getName() << "':\n";
+ }
+ // Get and print pass...
+ getAnalysisID<Pass>(PassToPrint->getTypeInfo()).print(Out,
+ R->getEntry()->getParent()->getParent());
+ return false;
+ }
+
+ virtual const char *getPassName() const { return PassName.c_str(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredID(PassToPrint->getTypeInfo());
+ AU.setPreservesAll();
+ }
+};
+
+char RegionPassPrinter::ID = 0;
+
+struct BasicBlockPassPrinter : public BasicBlockPass {
+ const PassInfo *PassToPrint;
+ raw_ostream &Out;
+ static char ID;
+ std::string PassName;
+
+ BasicBlockPassPrinter(const PassInfo *PI, raw_ostream &out)
+ : BasicBlockPass(ID), PassToPrint(PI), Out(out) {
+ std::string PassToPrintName = PassToPrint->getPassName();
+ PassName = "BasicBlockPass Printer: " + PassToPrintName;
+ }
+
+ virtual bool runOnBasicBlock(BasicBlock &BB) {
+ if (!Quiet)
+ Out << "Printing Analysis info for BasicBlock '" << BB.getName()
+ << "': Pass " << PassToPrint->getPassName() << ":\n";
+
+ // Get and print pass...
+ getAnalysisID<Pass>(PassToPrint->getTypeInfo()).print(Out,
+ BB.getParent()->getParent());
+ return false;
+ }
+
+ virtual const char *getPassName() const { return PassName.c_str(); }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.addRequiredID(PassToPrint->getTypeInfo());
+ AU.setPreservesAll();
+ }
+};
+
+char BasicBlockPassPrinter::ID = 0;
+
+struct BreakpointPrinter : public ModulePass {
+ raw_ostream &Out;
+ static char ID;
+
+ BreakpointPrinter(raw_ostream &out)
+ : ModulePass(ID), Out(out) {
+ }
+
+ void getContextName(DIDescriptor Context, std::string &N) {
+ if (Context.isNameSpace()) {
+ DINameSpace NS(Context);
+ if (!NS.getName().empty()) {
+ getContextName(NS.getContext(), N);
+ N = N + NS.getName().str() + "::";
+ }
+ } else if (Context.isType()) {
+ DIType TY(Context);
+ if (!TY.getName().empty()) {
+ getContextName(TY.getContext(), N);
+ N = N + TY.getName().str() + "::";
+ }
+ }
+ }
+
+ virtual bool runOnModule(Module &M) {
+ StringSet<> Processed;
+ if (NamedMDNode *NMD = M.getNamedMetadata("llvm.dbg.sp"))
+ for (unsigned i = 0, e = NMD->getNumOperands(); i != e; ++i) {
+ std::string Name;
+ DISubprogram SP(NMD->getOperand(i));
+ if (SP.Verify())
+ getContextName(SP.getContext(), Name);
+ Name = Name + SP.getDisplayName().str();
+ if (!Name.empty() && Processed.insert(Name)) {
+ Out << Name << "\n";
+ }
+ }
+ return false;
+ }
+
+ virtual void getAnalysisUsage(AnalysisUsage &AU) const {
+ AU.setPreservesAll();
+ }
+};
+
+} // anonymous namespace
+
+char BreakpointPrinter::ID = 0;
+
+static inline void addPass(PassManagerBase &PM, Pass *P) {
+ // Add the pass to the pass manager...
+ PM.add(P);
+
+ // If we are verifying all of the intermediate steps, add the verifier...
+ if (VerifyEach) PM.add(createVerifierPass());
+}
+
+/// AddOptimizationPasses - This routine adds optimization passes
+/// based on selected optimization level, OptLevel. This routine
+/// duplicates llvm-gcc behaviour.
+///
+/// OptLevel - Optimization Level
+static void AddOptimizationPasses(PassManagerBase &MPM,FunctionPassManager &FPM,
+ unsigned OptLevel) {
+ FPM.add(createVerifierPass()); // Verify that input is correct
+
+ PassManagerBuilder Builder;
+ Builder.OptLevel = OptLevel;
+
+ if (DisableInline) {
+ // No inlining pass
+ } else if (OptLevel > 1) {
+ unsigned Threshold = 225;
+ if (OptLevel > 2)
+ Threshold = 275;
+ Builder.Inliner = createFunctionInliningPass(Threshold);
+ } else {
+ Builder.Inliner = createAlwaysInlinerPass();
+ }
+ Builder.DisableUnitAtATime = !UnitAtATime;
+ Builder.DisableUnrollLoops = OptLevel == 0;
+ Builder.DisableSimplifyLibCalls = DisableSimplifyLibCalls;
+
+ Builder.populateFunctionPassManager(FPM);
+ Builder.populateModulePassManager(MPM);
+}
+
+static void AddStandardCompilePasses(PassManagerBase &PM) {
+ PM.add(createVerifierPass()); // Verify that input is correct
+
+ // If the -strip-debug command line option was specified, do it.
+ if (StripDebug)
+ addPass(PM, createStripSymbolsPass(true));
+
+ if (DisableOptimizations) return;
+
+ // -std-compile-opts adds the same module passes as -O3.
+ PassManagerBuilder Builder;
+ if (!DisableInline)
+ Builder.Inliner = createFunctionInliningPass();
+ Builder.OptLevel = 3;
+ Builder.DisableSimplifyLibCalls = DisableSimplifyLibCalls;
+ Builder.populateModulePassManager(PM);
+}
+
+static void AddStandardLinkPasses(PassManagerBase &PM) {
+ PM.add(createVerifierPass()); // Verify that input is correct
+
+ // If the -strip-debug command line option was specified, do it.
+ if (StripDebug)
+ addPass(PM, createStripSymbolsPass(true));
+
+ if (DisableOptimizations) return;
+
+ PassManagerBuilder Builder;
+ Builder.populateLTOPassManager(PM, /*Internalize=*/ !DisableInternalize,
+ /*RunInliner=*/ !DisableInline);
+}
+
+
+//===----------------------------------------------------------------------===//
+// main for opt
+//
+int main(int argc, char **argv) {
+ sys::PrintStackTraceOnErrorSignal();
+ llvm::PrettyStackTraceProgram X(argc, argv);
+
+ // Enable debug stream buffering.
+ EnableDebugBuffering = true;
+
+ llvm_shutdown_obj Y; // Call llvm_shutdown() on exit.
+ LLVMContext &Context = getGlobalContext();
+
+ // Initialize passes
+ PassRegistry &Registry = *PassRegistry::getPassRegistry();
+ initializeCore(Registry);
+ initializeScalarOpts(Registry);
+ initializeVectorization(Registry);
+ initializeIPO(Registry);
+ initializeAnalysis(Registry);
+ initializeIPA(Registry);
+ initializeTransformUtils(Registry);
+ initializeInstCombine(Registry);
+ initializeInstrumentation(Registry);
+ initializeTarget(Registry);
+
+ cl::ParseCommandLineOptions(argc, argv,
+ "llvm .bc -> .bc modular optimizer and analysis printer\n");
+
+ if (AnalyzeOnly && NoOutput) {
+ errs() << argv[0] << ": analyze mode conflicts with no-output mode.\n";
+ return 1;
+ }
+
+ // Allocate a full target machine description only if necessary.
+ // FIXME: The choice of target should be controllable on the command line.
+ std::auto_ptr<TargetMachine> target;
+
+ SMDiagnostic Err;
+
+ // Load the input module...
+ std::auto_ptr<Module> M;
+ M.reset(ParseIRFile(InputFilename, Err, Context));
+
+ if (M.get() == 0) {
+ Err.print(argv[0], errs());
+ return 1;
+ }
+
+ // Figure out what stream we are supposed to write to...
+ OwningPtr<tool_output_file> Out;
+ if (NoOutput) {
+ if (!OutputFilename.empty())
+ errs() << "WARNING: The -o (output filename) option is ignored when\n"
+ "the --disable-output option is used.\n";
+ } else {
+ // Default to standard output.
+ if (OutputFilename.empty())
+ OutputFilename = "-";
+
+ std::string ErrorInfo;
+ Out.reset(new tool_output_file(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary));
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ return 1;
+ }
+ }
+
+ // If the output is set to be emitted to standard out, and standard out is a
+ // console, print out a warning message and refuse to do it. We don't
+ // impress anyone by spewing tons of binary goo to a terminal.
+ if (!Force && !NoOutput && !AnalyzeOnly && !OutputAssembly)
+ if (CheckBitcodeOutputToConsole(Out->os(), !Quiet))
+ NoOutput = true;
+
+ // Create a PassManager to hold and optimize the collection of passes we are
+ // about to build.
+ //
+ PassManager Passes;
+
+ // Add an appropriate TargetLibraryInfo pass for the module's triple.
+ TargetLibraryInfo *TLI = new TargetLibraryInfo(Triple(M->getTargetTriple()));
+
+ // The -disable-simplify-libcalls flag actually disables all builtin optzns.
+ if (DisableSimplifyLibCalls)
+ TLI->disableAllFunctions();
+ Passes.add(TLI);
+
+ // Add an appropriate TargetData instance for this module.
+ TargetData *TD = 0;
+ const std::string &ModuleDataLayout = M.get()->getDataLayout();
+ if (!ModuleDataLayout.empty())
+ TD = new TargetData(ModuleDataLayout);
+ else if (!DefaultDataLayout.empty())
+ TD = new TargetData(DefaultDataLayout);
+
+ if (TD)
+ Passes.add(TD);
+
+ OwningPtr<FunctionPassManager> FPasses;
+ if (OptLevelO1 || OptLevelO2 || OptLevelO3) {
+ FPasses.reset(new FunctionPassManager(M.get()));
+ if (TD)
+ FPasses->add(new TargetData(*TD));
+ }
+
+ if (PrintBreakpoints) {
+ // Default to standard output.
+ if (!Out) {
+ if (OutputFilename.empty())
+ OutputFilename = "-";
+
+ std::string ErrorInfo;
+ Out.reset(new tool_output_file(OutputFilename.c_str(), ErrorInfo,
+ raw_fd_ostream::F_Binary));
+ if (!ErrorInfo.empty()) {
+ errs() << ErrorInfo << '\n';
+ return 1;
+ }
+ }
+ Passes.add(new BreakpointPrinter(Out->os()));
+ NoOutput = true;
+ }
+
+ // If the -strip-debug command line option was specified, add it. If
+ // -std-compile-opts was also specified, it will handle StripDebug.
+ if (StripDebug && !StandardCompileOpts)
+ addPass(Passes, createStripSymbolsPass(true));
+
+ // Create a new optimization pass for each one specified on the command line
+ for (unsigned i = 0; i < PassList.size(); ++i) {
+ // Check to see if -std-compile-opts was specified before this option. If
+ // so, handle it.
+ if (StandardCompileOpts &&
+ StandardCompileOpts.getPosition() < PassList.getPosition(i)) {
+ AddStandardCompilePasses(Passes);
+ StandardCompileOpts = false;
+ }
+
+ if (StandardLinkOpts &&
+ StandardLinkOpts.getPosition() < PassList.getPosition(i)) {
+ AddStandardLinkPasses(Passes);
+ StandardLinkOpts = false;
+ }
+
+ if (OptLevelO1 && OptLevelO1.getPosition() < PassList.getPosition(i)) {
+ AddOptimizationPasses(Passes, *FPasses, 1);
+ OptLevelO1 = false;
+ }
+
+ if (OptLevelO2 && OptLevelO2.getPosition() < PassList.getPosition(i)) {
+ AddOptimizationPasses(Passes, *FPasses, 2);
+ OptLevelO2 = false;
+ }
+
+ if (OptLevelO3 && OptLevelO3.getPosition() < PassList.getPosition(i)) {
+ AddOptimizationPasses(Passes, *FPasses, 3);
+ OptLevelO3 = false;
+ }
+
+ const PassInfo *PassInf = PassList[i];
+ Pass *P = 0;
+ if (PassInf->getNormalCtor())
+ P = PassInf->getNormalCtor()();
+ else
+ errs() << argv[0] << ": cannot create pass: "
+ << PassInf->getPassName() << "\n";
+ if (P) {
+ PassKind Kind = P->getPassKind();
+ addPass(Passes, P);
+
+ if (AnalyzeOnly) {
+ switch (Kind) {
+ case PT_BasicBlock:
+ Passes.add(new BasicBlockPassPrinter(PassInf, Out->os()));
+ break;
+ case PT_Region:
+ Passes.add(new RegionPassPrinter(PassInf, Out->os()));
+ break;
+ case PT_Loop:
+ Passes.add(new LoopPassPrinter(PassInf, Out->os()));
+ break;
+ case PT_Function:
+ Passes.add(new FunctionPassPrinter(PassInf, Out->os()));
+ break;
+ case PT_CallGraphSCC:
+ Passes.add(new CallGraphSCCPassPrinter(PassInf, Out->os()));
+ break;
+ default:
+ Passes.add(new ModulePassPrinter(PassInf, Out->os()));
+ break;
+ }
+ }
+ }
+
+ if (PrintEachXForm)
+ Passes.add(createPrintModulePass(&errs()));
+ }
+
+ // If -std-compile-opts was specified at the end of the pass list, add them.
+ if (StandardCompileOpts) {
+ AddStandardCompilePasses(Passes);
+ StandardCompileOpts = false;
+ }
+
+ if (StandardLinkOpts) {
+ AddStandardLinkPasses(Passes);
+ StandardLinkOpts = false;
+ }
+
+ if (OptLevelO1)
+ AddOptimizationPasses(Passes, *FPasses, 1);
+
+ if (OptLevelO2)
+ AddOptimizationPasses(Passes, *FPasses, 2);
+
+ if (OptLevelO3)
+ AddOptimizationPasses(Passes, *FPasses, 3);
+
+ if (OptLevelO1 || OptLevelO2 || OptLevelO3) {
+ FPasses->doInitialization();
+ for (Module::iterator F = M->begin(), E = M->end(); F != E; ++F)
+ FPasses->run(*F);
+ FPasses->doFinalization();
+ }
+
+ // Check that the module is well formed on completion of optimization
+ if (!NoVerify && !VerifyEach)
+ Passes.add(createVerifierPass());
+
+ // Write bitcode or assembly to the output as the last step...
+ if (!NoOutput && !AnalyzeOnly) {
+ if (OutputAssembly)
+ Passes.add(createPrintModulePass(&Out->os()));
+ else
+ Passes.add(createBitcodeWriterPass(Out->os()));
+ }
+
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ // Now that we have all of the passes ready, run them.
+ Passes.run(*M.get());
+
+ // Declare success.
+ if (!NoOutput || PrintBreakpoints)
+ Out->keep();
+
+ return 0;
+}
OpenPOWER on IntegriCloud